summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
0 files changed, 0 insertions, 0 deletions
on/ABI/stable/sysfs-class-tpm?h=v5.18&id=b470ebc9e0e57f53d1db9c49b8a3de4086babd05'>Documentation/ABI/stable/sysfs-class-tpm14
-rw-r--r--Documentation/ABI/stable/sysfs-driver-speakup37
-rw-r--r--Documentation/ABI/testing/debugfs-driver-habanalabs50
-rw-r--r--Documentation/ABI/testing/ima_policy7
-rw-r--r--Documentation/ABI/testing/sysfs-bus-coresight-devices-etm4x8
-rw-r--r--Documentation/ABI/testing/sysfs-bus-cxl26
-rw-r--r--Documentation/ABI/testing/sysfs-bus-dfl-devices-emif25
-rw-r--r--Documentation/ABI/testing/sysfs-bus-dfl-devices-n3000-nios47
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio11
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio-dac-ad576631
-rw-r--r--Documentation/ABI/testing/sysfs-bus-pci-devices-pvpanic24
-rw-r--r--Documentation/ABI/testing/sysfs-bus-thunderbolt22
-rw-r--r--Documentation/ABI/testing/sysfs-class-led-trigger-tty6
-rw-r--r--Documentation/ABI/testing/sysfs-class-net15
-rw-r--r--Documentation/ABI/testing/sysfs-class-net-dsa11
-rw-r--r--Documentation/ABI/testing/sysfs-class-net-qmi10
-rw-r--r--Documentation/ABI/testing/sysfs-class-power-ltc4162l82
-rw-r--r--Documentation/ABI/testing/sysfs-class-typec20
-rw-r--r--Documentation/ABI/testing/sysfs-devices-memory58
-rw-r--r--Documentation/ABI/testing/sysfs-devices-xenbus41
-rw-r--r--Documentation/ABI/testing/sysfs-driver-habanalabs58
-rw-r--r--Documentation/ABI/testing/sysfs-driver-input-cros-ec-keyb6
-rw-r--r--Documentation/ABI/testing/sysfs-driver-intel-m10-bmc21
-rw-r--r--Documentation/ABI/testing/sysfs-driver-ufs11
-rw-r--r--Documentation/ABI/testing/sysfs-firmware-acpi43
-rw-r--r--Documentation/ABI/testing/sysfs-firmware-sfi15
-rw-r--r--Documentation/ABI/testing/sysfs-fs-f2fs32
-rw-r--r--Documentation/ABI/testing/sysfs-platform-ideapad-laptop26
-rw-r--r--Documentation/ABI/testing/sysfs-platform-kim2
-rw-r--r--Documentation/ABI/testing/sysfs-platform_profile28
-rw-r--r--Documentation/Makefile2
-rw-r--r--Documentation/PCI/endpoint/function/binding/pci-ntb.rst38
-rw-r--r--Documentation/PCI/endpoint/index.rst3
-rw-r--r--Documentation/PCI/endpoint/pci-endpoint-cfs.rst10
-rw-r--r--Documentation/PCI/endpoint/pci-ntb-function.rst348
-rw-r--r--Documentation/PCI/endpoint/pci-ntb-howto.rst161
-rw-r--r--Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.rst4
-rw-r--r--Documentation/RCU/Design/Requirements/Requirements.rst732
-rw-r--r--Documentation/RCU/NMI-RCU.rst3
-rw-r--r--Documentation/RCU/RTFP.txt94
-rw-r--r--Documentation/RCU/checklist.rst10
-rw-r--r--Documentation/RCU/rcubarrier.rst6
-rw-r--r--Documentation/RCU/stallwarn.rst27
-rw-r--r--Documentation/RCU/whatisRCU.rst10
-rw-r--r--Documentation/accounting/cgroupstats.rst4
-rw-r--r--Documentation/admin-guide/README.rst7
-rw-r--r--Documentation/admin-guide/auxdisplay/cfag12864b.rst2
-rw-r--r--Documentation/admin-guide/auxdisplay/ks0108.rst2
-rw-r--r--Documentation/admin-guide/cgroup-v1/memory.rst14
-rw-r--r--Documentation/admin-guide/cgroup-v2.rst76
-rw-r--r--Documentation/admin-guide/cifs/authors.rst6
-rw-r--r--Documentation/admin-guide/cifs/changes.rst5
-rw-r--r--Documentation/admin-guide/cifs/introduction.rst30
-rw-r--r--Documentation/admin-guide/cifs/todo.rst34
-rw-r--r--Documentation/admin-guide/cifs/usage.rst2
-rw-r--r--Documentation/admin-guide/cpu-load.rst2
-rw-r--r--Documentation/admin-guide/device-mapper/dm-crypt.rst2
-rw-r--r--Documentation/admin-guide/device-mapper/dm-integrity.rst15
-rw-r--r--Documentation/admin-guide/kernel-parameters.rst2
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt156
-rw-r--r--Documentation/admin-guide/kernel-per-CPU-kthreads.rst2
-rw-r--r--Documentation/admin-guide/laptops/thinkpad-acpi.rst25
-rw-r--r--Documentation/admin-guide/media/rkisp1.rst16
-rw-r--r--Documentation/admin-guide/mm/memory-hotplug.rst20
-rw-r--r--Documentation/admin-guide/perf-security.rst2
-rw-r--r--Documentation/admin-guide/perf/arm-cmn.rst2
-rw-r--r--Documentation/admin-guide/spkguide.txt48
-rw-r--r--Documentation/admin-guide/syscall-user-dispatch.rst4
-rw-r--r--Documentation/admin-guide/sysctl/fs.rst4
-rw-r--r--Documentation/admin-guide/sysctl/vm.rst10
-rw-r--r--Documentation/admin-guide/thunderbolt.rst23
-rw-r--r--Documentation/admin-guide/xfs.rst58
-rw-r--r--Documentation/arm/booting.rst2
-rw-r--r--Documentation/arm/index.rst2
-rw-r--r--Documentation/arm/marvell.rst (renamed from Documentation/arm/marvel.rst)7
-rw-r--r--Documentation/asm-annotations.rst5
-rw-r--r--Documentation/block/bfq-iosched.rst4
-rw-r--r--Documentation/block/biovecs.rst2
-rw-r--r--Documentation/block/inline-encryption.rst12
-rw-r--r--Documentation/block/queue-sysfs.rst13
-rw-r--r--Documentation/bpf/bpf_design_QA.rst6
-rw-r--r--Documentation/bpf/bpf_devel_QA.rst11
-rw-r--r--Documentation/conf.py78
-rw-r--r--Documentation/core-api/dma-api.rst64
-rw-r--r--Documentation/core-api/mm-api.rst7
-rw-r--r--Documentation/crypto/api-skcipher.rst4
-rw-r--r--Documentation/dev-tools/index.rst1
-rw-r--r--Documentation/dev-tools/kasan.rst35
-rw-r--r--Documentation/dev-tools/kfence.rst298
-rw-r--r--Documentation/dev-tools/kunit/index.rst2
-rw-r--r--Documentation/dev-tools/kunit/start.rst7
-rw-r--r--Documentation/dev-tools/kunit/tips.rst115
-rw-r--r--Documentation/devicetree/bindings/Makefile10
-rw-r--r--Documentation/devicetree/bindings/arm/amlogic.yaml6
-rw-r--r--Documentation/devicetree/bindings/arm/amlogic/amlogic,meson-mx-secbus2.yaml42
-rw-r--r--Documentation/devicetree/bindings/arm/arm,scmi.txt8
-rw-r--r--Documentation/devicetree/bindings/arm/atmel-sysregs.txt7
-rw-r--r--Documentation/devicetree/bindings/arm/bcm/brcm,bcm4908.yaml2
-rw-r--r--Documentation/devicetree/bindings/arm/coresight.txt5
-rw-r--r--Documentation/devicetree/bindings/arm/cpus.yaml2
-rw-r--r--Documentation/devicetree/bindings/arm/fsl.yaml23
-rw-r--r--Documentation/devicetree/bindings/arm/marvell/ap80x-system-controller.txt8
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek.yaml4
-rw-r--r--Documentation/devicetree/bindings/arm/msm/qcom,llcc.yaml1
-rw-r--r--Documentation/devicetree/bindings/arm/pmu.yaml1
-rw-r--r--Documentation/devicetree/bindings/arm/qcom.yaml12
-rw-r--r--Documentation/devicetree/bindings/arm/renesas.yaml2
-rw-r--r--Documentation/devicetree/bindings/arm/rockchip.yaml6
-rw-r--r--Documentation/devicetree/bindings/arm/sirf.yaml30
-rw-r--r--Documentation/devicetree/bindings/arm/socionext/socionext,uniphier-system-cache.yaml4
-rw-r--r--Documentation/devicetree/bindings/arm/ste-u300.txt46
-rw-r--r--Documentation/devicetree/bindings/arm/sunxi.yaml19
-rw-r--r--Documentation/devicetree/bindings/arm/tegra.yaml8
-rw-r--r--Documentation/devicetree/bindings/arm/xilinx.yaml6
-rw-r--r--Documentation/devicetree/bindings/arm/zte,sysctrl.txt30
-rw-r--r--Documentation/devicetree/bindings/arm/zte.yaml28
-rw-r--r--Documentation/devicetree/bindings/ata/sata_highbank.yaml1
-rw-r--r--Documentation/devicetree/bindings/auxdisplay/holtek,ht16k33.yaml77
-rw-r--r--Documentation/devicetree/bindings/bus/allwinner,sun8i-a23-rsb.yaml4
-rw-r--r--Documentation/devicetree/bindings/c6x/clocks.txt40
-rw-r--r--Documentation/devicetree/bindings/c6x/dscr.txt127
-rw-r--r--Documentation/devicetree/bindings/c6x/emifa.txt62
-rw-r--r--Documentation/devicetree/bindings/c6x/soc.txt28
-rw-r--r--Documentation/devicetree/bindings/clock/adi,axi-clkgen.yaml1
-rw-r--r--Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-ccu.yaml4
-rw-r--r--Documentation/devicetree/bindings/clock/allwinner,sun9i-a80-usb-clks.yaml (renamed from Documentation/devicetree/bindings/clock/allwinner,sun9i-a80-usb-clocks.yaml)4
-rw-r--r--Documentation/devicetree/bindings/clock/arm,syscon-icst.yaml4
-rw-r--r--Documentation/devicetree/bindings/clock/canaan,k210-clk.yaml1
-rw-r--r--Documentation/devicetree/bindings/clock/csr,atlas7-car.txt55
-rw-r--r--Documentation/devicetree/bindings/clock/idt,versaclock5.yaml6
-rw-r--r--Documentation/devicetree/bindings/clock/imx27-clock.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/imx31-clock.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/imx5-clock.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/intel,easic-n5x.yaml46
-rw-r--r--Documentation/devicetree/bindings/clock/mstar,msc313-mpll.yaml46
-rw-r--r--Documentation/devicetree/bindings/clock/prima2-clock.txt73
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,a7pll.yaml51
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc-sc7280.yaml92
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc-sc8180x.yaml76
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc-sm8350.yaml96
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gpucc-sdm660.yaml76
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,mmcc.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,rpmhcc.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/renesas,rcar-usb2-clock-sel.yaml3
-rw-r--r--Documentation/devicetree/bindings/clock/silabs,si570.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/ste-u300-syscon-clock.txt80
-rw-r--r--Documentation/devicetree/bindings/clock/tango4-clock.txt23
-rw-r--r--Documentation/devicetree/bindings/clock/zx296702-clk.txt34
-rw-r--r--Documentation/devicetree/bindings/clock/zx296718-clk.txt37
-rw-r--r--Documentation/devicetree/bindings/connector/usb-connector.yaml29
-rw-r--r--Documentation/devicetree/bindings/crypto/allwinner,sun8i-ce.yaml3
-rw-r--r--Documentation/devicetree/bindings/crypto/intel,keembay-ocs-hcu.yaml46
-rw-r--r--Documentation/devicetree/bindings/crypto/samsung-slimsss.yaml1
-rw-r--r--Documentation/devicetree/bindings/crypto/samsung-sss.yaml1
-rw-r--r--Documentation/devicetree/bindings/crypto/ti,sa2ul.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/allwinner,sun4i-a10-display-backend.yaml23
-rw-r--r--Documentation/devicetree/bindings/display/allwinner,sun4i-a10-display-frontend.yaml19
-rw-r--r--Documentation/devicetree/bindings/display/allwinner,sun4i-a10-hdmi.yaml19
-rw-r--r--Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tcon.yaml25
-rw-r--r--Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tv-encoder.yaml6
-rw-r--r--Documentation/devicetree/bindings/display/allwinner,sun6i-a31-drc.yaml19
-rw-r--r--Documentation/devicetree/bindings/display/allwinner,sun6i-a31-mipi-dsi.yaml6
-rw-r--r--Documentation/devicetree/bindings/display/allwinner,sun8i-a83t-de2-mixer.yaml19
-rw-r--r--Documentation/devicetree/bindings/display/allwinner,sun8i-a83t-dw-hdmi.yaml19
-rw-r--r--Documentation/devicetree/bindings/display/allwinner,sun8i-r40-tcon-top.yaml110
-rw-r--r--Documentation/devicetree/bindings/display/allwinner,sun9i-a80-deu.yaml19
-rw-r--r--Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/brcm,bcm2711-hdmi.yaml19
-rw-r--r--Documentation/devicetree/bindings/display/brcm,bcm2835-dpi.yaml7
-rw-r--r--Documentation/devicetree/bindings/display/brcm,bcm2835-dsi0.yaml1
-rw-r--r--Documentation/devicetree/bindings/display/brcm,bcm2835-hdmi.yaml1
-rw-r--r--Documentation/devicetree/bindings/display/brcm,bcm2835-hvs.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/bridge/analogix,anx7625.yaml6
-rw-r--r--Documentation/devicetree/bindings/display/bridge/analogix,anx7814.yaml19
-rw-r--r--Documentation/devicetree/bindings/display/bridge/anx6345.yaml18
-rw-r--r--Documentation/devicetree/bindings/display/bridge/cdns,mhdp8546.yaml22
-rw-r--r--Documentation/devicetree/bindings/display/bridge/chrontel,ch7033.yaml6
-rw-r--r--Documentation/devicetree/bindings/display/bridge/intel,keembay-dsi.yaml14
-rw-r--r--Documentation/devicetree/bindings/display/bridge/ite,it6505.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/bridge/lontium,lt9611.yaml70
-rw-r--r--Documentation/devicetree/bindings/display/bridge/lvds-codec.yaml18
-rw-r--r--Documentation/devicetree/bindings/display/bridge/nwl-dsi.yaml41
-rw-r--r--Documentation/devicetree/bindings/display/bridge/ps8640.yaml24
-rw-r--r--Documentation/devicetree/bindings/display/bridge/renesas,lvds.yaml24
-rw-r--r--Documentation/devicetree/bindings/display/bridge/sii902x.txt2
-rw-r--r--Documentation/devicetree/bindings/display/bridge/simple-bridge.yaml18
-rw-r--r--Documentation/devicetree/bindings/display/bridge/snps,dw-mipi-dsi.yaml7
-rw-r--r--Documentation/devicetree/bindings/display/bridge/thine,thc63lvd1024.yaml35
-rw-r--r--Documentation/devicetree/bindings/display/bridge/ti,sn65dsi86.yaml45
-rw-r--r--Documentation/devicetree/bindings/display/bridge/ti,tfp410.yaml24
-rw-r--r--Documentation/devicetree/bindings/display/bridge/toshiba,tc358762.yaml52
-rw-r--r--Documentation/devicetree/bindings/display/bridge/toshiba,tc358768.yaml48
-rw-r--r--Documentation/devicetree/bindings/display/bridge/toshiba,tc358775.yaml19
-rw-r--r--Documentation/devicetree/bindings/display/connector/analog-tv-connector.yaml1
-rw-r--r--Documentation/devicetree/bindings/display/connector/dp-connector.yaml55
-rw-r--r--Documentation/devicetree/bindings/display/connector/dvi-connector.yaml1
-rw-r--r--Documentation/devicetree/bindings/display/connector/hdmi-connector.yaml1
-rw-r--r--Documentation/devicetree/bindings/display/connector/vga-connector.yaml1
-rw-r--r--Documentation/devicetree/bindings/display/ht16k33.txt40
-rw-r--r--Documentation/devicetree/bindings/display/imx/nxp,imx8mq-dcss.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/ingenic,ipu.yaml5
-rw-r--r--Documentation/devicetree/bindings/display/ingenic,lcd.yaml10
-rw-r--r--Documentation/devicetree/bindings/display/intel,keembay-display.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt16
-rw-r--r--Documentation/devicetree/bindings/display/mediatek/mediatek,dsi.txt18
-rw-r--r--Documentation/devicetree/bindings/display/mediatek/mediatek,hdmi.txt18
-rw-r--r--Documentation/devicetree/bindings/display/panel/advantech,idk-2121wr.yaml21
-rw-r--r--Documentation/devicetree/bindings/display/panel/jdi,lt070me05000.yaml1
-rw-r--r--Documentation/devicetree/bindings/display/panel/mantix,mlaf057we51-x.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/panel/novatek,nt36672a.yaml1
-rw-r--r--Documentation/devicetree/bindings/display/panel/panel-common.yaml11
-rw-r--r--Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/panel/panel-simple.yaml21
-rw-r--r--Documentation/devicetree/bindings/display/panel/samsung,s6e63m0.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/rockchip/rockchip,rk3066-hdmi.yaml16
-rw-r--r--Documentation/devicetree/bindings/display/rockchip/rockchip-vop.yaml5
-rw-r--r--Documentation/devicetree/bindings/display/st,stm32-dsi.yaml12
-rw-r--r--Documentation/devicetree/bindings/display/st,stm32-ltdc.yaml8
-rw-r--r--Documentation/devicetree/bindings/display/ste,mcde.txt104
-rw-r--r--Documentation/devicetree/bindings/display/ste,mcde.yaml168
-rw-r--r--Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-host1x.txt4
-rw-r--r--Documentation/devicetree/bindings/display/ti/ti,am65x-dss.yaml19
-rw-r--r--Documentation/devicetree/bindings/display/ti/ti,j721e-dss.yaml23
-rw-r--r--Documentation/devicetree/bindings/display/ti/ti,k2g-dss.yaml3
-rw-r--r--Documentation/devicetree/bindings/dma/ingenic,dma.yaml2
-rw-r--r--Documentation/devicetree/bindings/dma/intel,ldma.yaml116
-rw-r--r--Documentation/devicetree/bindings/dma/owl-dma.yaml7
-rw-r--r--Documentation/devicetree/bindings/dma/renesas,rcar-dmac.yaml76
-rw-r--r--Documentation/devicetree/bindings/dma/sirfsoc-dma.txt44
-rw-r--r--Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.txt39
-rw-r--r--Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.yaml126
-rw-r--r--Documentation/devicetree/bindings/dma/ste-coh901318.txt32
-rw-r--r--Documentation/devicetree/bindings/dma/zxdma.txt38
-rw-r--r--Documentation/devicetree/bindings/dsp/fsl,dsp.yaml2
-rw-r--r--Documentation/devicetree/bindings/eeprom/at24.yaml9
-rw-r--r--Documentation/devicetree/bindings/eeprom/at25.yaml3
-rw-r--r--Documentation/devicetree/bindings/extcon/extcon-ptn5150.yaml2
-rw-r--r--Documentation/devicetree/bindings/extcon/wlf,arizona.yaml1
-rw-r--r--Documentation/devicetree/bindings/firmware/qcom,scm.txt2
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-atlas7.txt50
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-davinci.txt1
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-pca95xx.yaml4
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-stericsson-coh901.txt7
-rw-r--r--Documentation/devicetree/bindings/gpio/mrvl-gpio.yaml3
-rw-r--r--Documentation/devicetree/bindings/gpio/mstar,msc313-gpio.yaml2
-rw-r--r--Documentation/devicetree/bindings/gpio/renesas,rcar-gpio.yaml3
-rw-r--r--Documentation/devicetree/bindings/gpio/sifive,gpio.yaml29
-rw-r--r--Documentation/devicetree/bindings/gpio/toshiba,gpio-visconti.yaml70
-rw-r--r--Documentation/devicetree/bindings/gpio/zx296702-gpio.txt24
-rw-r--r--Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.txt33
-rw-r--r--Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.yaml75
-rw-r--r--Documentation/devicetree/bindings/graph.txt129
-rw-r--r--Documentation/devicetree/bindings/hwlock/ti,omap-hwspinlock.yaml1
-rw-r--r--Documentation/devicetree/bindings/hwmon/adi,ltc2947.yaml1
-rw-r--r--Documentation/devicetree/bindings/hwmon/baikal,bt1-pvt.yaml8
-rw-r--r--Documentation/devicetree/bindings/hwmon/ti,tmp513.yaml1
-rw-r--r--Documentation/devicetree/bindings/hwmon/ti,tps23861.yaml51
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-gpio.yaml2
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-sirf.txt19
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-stu300.txt15
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-zx2967.txt22
-rw-r--r--Documentation/devicetree/bindings/i2c/marvell,mv64xxx-i2c.yaml21
-rw-r--r--Documentation/devicetree/bindings/i2c/nuvoton,npcm7xx-i2c.yaml2
-rw-r--r--Documentation/devicetree/bindings/i2c/renesas,i2c.txt1
-rw-r--r--Documentation/devicetree/bindings/i2c/snps,designware-i2c.yaml3
-rw-r--r--Documentation/devicetree/bindings/i3c/i3c.txt140
-rw-r--r--Documentation/devicetree/bindings/i3c/i3c.yaml179
-rw-r--r--Documentation/devicetree/bindings/i3c/mipi-i3c-hci.yaml9
-rw-r--r--Documentation/devicetree/bindings/i3c/silvaco,i3c-master.yaml60
-rw-r--r--Documentation/devicetree/bindings/iio/accel/kionix,kxcjk1013.yaml3
-rw-r--r--Documentation/devicetree/bindings/iio/adc/adi,ad7192.yaml2
-rw-r--r--Documentation/devicetree/bindings/iio/adc/adi,ad7768-1.yaml2
-rw-r--r--Documentation/devicetree/bindings/iio/adc/aspeed,ast2400-adc.yaml1
-rw-r--r--Documentation/devicetree/bindings/iio/adc/lltc,ltc2496.yaml2
-rw-r--r--Documentation/devicetree/bindings/iio/adc/maxim,max9611.yaml1
-rw-r--r--Documentation/devicetree/bindings/iio/adc/qcom,spmi-vadc.yaml1
-rw-r--r--Documentation/devicetree/bindings/iio/adc/st,stm32-adc.yaml3
-rw-r--r--Documentation/devicetree/bindings/iio/adc/ti,palmas-gpadc.yaml2
-rw-r--r--Documentation/devicetree/bindings/iio/adc/x-powers,axp209-adc.yaml12
-rw-r--r--Documentation/devicetree/bindings/iio/adc/xilinx-xadc.txt49
-rw-r--r--Documentation/devicetree/bindings/iio/dac/adi,ad5696.yaml (renamed from Documentation/devicetree/bindings/iio/dac/adi,ad5686.yaml)10
-rw-r--r--Documentation/devicetree/bindings/iio/dac/adi,ad5758.yaml41
-rw-r--r--Documentation/devicetree/bindings/iio/dac/adi,ad5766.yaml63
-rw-r--r--Documentation/devicetree/bindings/iio/dac/microchip,mcp4725.yaml31
-rw-r--r--Documentation/devicetree/bindings/iio/gyroscope/bosch,bmg160.yaml3
-rw-r--r--Documentation/devicetree/bindings/iio/gyroscope/invensense,mpu3050.txt45
-rw-r--r--Documentation/devicetree/bindings/iio/gyroscope/invensense,mpu3050.yaml70
-rw-r--r--Documentation/devicetree/bindings/iio/health/maxim,max30100.yaml1
-rw-r--r--Documentation/devicetree/bindings/iio/health/ti,afe4404.yaml2
-rw-r--r--Documentation/devicetree/bindings/iio/imu/inv_mpu6050.txt67
-rw-r--r--Documentation/devicetree/bindings/iio/imu/invensense,mpu6050.yaml104
-rw-r--r--Documentation/devicetree/bindings/iio/magnetometer/asahi-kasei,ak8975.yaml1
-rw-r--r--Documentation/devicetree/bindings/iio/magnetometer/bosch,bmc150_magn.yaml3
-rw-r--r--Documentation/devicetree/bindings/iio/magnetometer/yamaha,yas530.yaml112
-rw-r--r--Documentation/devicetree/bindings/iio/potentiometer/adi,ad5272.yaml1
-rw-r--r--Documentation/devicetree/bindings/input/adc-keys.txt22
-rw-r--r--Documentation/devicetree/bindings/input/goodix,gt7375p.yaml65
-rw-r--r--Documentation/devicetree/bindings/input/google,cros-ec-keyb.yaml24
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/elan,elants_i2c.yaml1
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/goodix.yaml1
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/touchscreen.yaml2
-rw-r--r--Documentation/devicetree/bindings/interconnect/qcom,qcs404.yaml77
-rw-r--r--Documentation/devicetree/bindings/interconnect/qcom,rpm.yaml (renamed from Documentation/devicetree/bindings/interconnect/qcom,msm8916.yaml)22
-rw-r--r--Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml6
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/allwinner,sun6i-a31-r-intc.yaml67
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/allwinner,sun7i-a20-sc-nmi.yaml11
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/fsl,intmux.yaml2
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/ingenic,intc.yaml1
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/qcom,pdc.txt2
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/realtek,rtl-intc.yaml57
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/sifive,plic-1.0.0.yaml13
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/sigma,smp8642-intc.txt48
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/st,stm32-exti.yaml2
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/ti,c64x+megamod-pic.txt103
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/ti,pruss-intc.yaml3
-rw-r--r--Documentation/devicetree/bindings/iommu/arm,smmu.yaml2
-rw-r--r--Documentation/devicetree/bindings/iommu/mediatek,iommu.txt105
-rw-r--r--Documentation/devicetree/bindings/iommu/mediatek,iommu.yaml183
-rw-r--r--Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.yaml14
-rw-r--r--Documentation/devicetree/bindings/leds/leds-lgm.yaml113
-rw-r--r--Documentation/devicetree/bindings/leds/richtek,rt8515.yaml111
-rw-r--r--Documentation/devicetree/bindings/leds/ti,tca6507.yaml1
-rw-r--r--Documentation/devicetree/bindings/mailbox/omap-mailbox.txt4
-rw-r--r--Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.yaml34
-rw-r--r--Documentation/devicetree/bindings/media/allegro,al5e.yaml105
-rw-r--r--Documentation/devicetree/bindings/media/allegro.txt43
-rw-r--r--Documentation/devicetree/bindings/media/allwinner,sun4i-a10-csi.yaml11
-rw-r--r--Documentation/devicetree/bindings/media/allwinner,sun4i-a10-video-engine.yaml1
-rw-r--r--Documentation/devicetree/bindings/media/allwinner,sun6i-a31-csi.yaml12
-rw-r--r--Documentation/devicetree/bindings/media/allwinner,sun8i-h3-deinterlace.yaml3
-rw-r--r--Documentation/devicetree/bindings/media/i2c/adv7180.yaml36
-rw-r--r--Documentation/devicetree/bindings/media/i2c/adv7604.yaml37
-rw-r--r--Documentation/devicetree/bindings/media/i2c/aptina,mt9v111.yaml4
-rw-r--r--Documentation/devicetree/bindings/media/i2c/imi,rdacm2x-gmsl.yaml30
-rw-r--r--Documentation/devicetree/bindings/media/i2c/imx219.yaml22
-rw-r--r--Documentation/devicetree/bindings/media/i2c/imx258.yaml134
-rw-r--r--Documentation/devicetree/bindings/media/i2c/maxim,max9286.yaml117
-rw-r--r--Documentation/devicetree/bindings/media/i2c/mipi-ccs.yaml17
-rw-r--r--Documentation/devicetree/bindings/media/i2c/ov8856.yaml22
-rw-r--r--Documentation/devicetree/bindings/media/i2c/ovti,ov02a10.yaml29
-rw-r--r--Documentation/devicetree/bindings/media/i2c/ovti,ov2680.yaml6
-rw-r--r--Documentation/devicetree/bindings/media/i2c/ovti,ov5647.yaml (renamed from Documentation/devicetree/bindings/media/i2c/ov5647.yaml)25
-rw-r--r--Documentation/devicetree/bindings/media/i2c/ovti,ov5648.yaml109
-rw-r--r--Documentation/devicetree/bindings/media/i2c/ovti,ov772x.yaml9
-rw-r--r--Documentation/devicetree/bindings/media/i2c/ovti,ov8865.yaml118
-rw-r--r--Documentation/devicetree/bindings/media/i2c/sony,imx214.yaml25
-rw-r--r--Documentation/devicetree/bindings/media/i2c/sony,imx274.yaml3
-rw-r--r--Documentation/devicetree/bindings/media/i2c/sony,imx334.yaml90
-rw-r--r--Documentation/devicetree/bindings/media/marvell,mmp2-ccic.yaml20
-rw-r--r--Documentation/devicetree/bindings/media/mediatek-jpeg-decoder.txt2
-rw-r--r--Documentation/devicetree/bindings/media/mediatek-jpeg-encoder.txt2
-rw-r--r--Documentation/devicetree/bindings/media/mediatek-mdp.txt2
-rw-r--r--Documentation/devicetree/bindings/media/nxp,imx7-csi.yaml5
-rw-r--r--Documentation/devicetree/bindings/media/nxp,imx7-mipi-csi2.yaml32
-rw-r--r--Documentation/devicetree/bindings/media/renesas,ceu.yaml17
-rw-r--r--Documentation/devicetree/bindings/media/renesas,csi2.yaml54
-rw-r--r--Documentation/devicetree/bindings/media/renesas,vin.yaml113
-rw-r--r--Documentation/devicetree/bindings/media/rockchip-isp1.yaml40
-rw-r--r--Documentation/devicetree/bindings/media/st,stm32-dcmi.yaml18
-rw-r--r--Documentation/devicetree/bindings/media/ti,cal.yaml55
-rw-r--r--Documentation/devicetree/bindings/media/video-interface-devices.yaml406
-rw-r--r--Documentation/devicetree/bindings/media/video-interfaces.txt640
-rw-r--r--Documentation/devicetree/bindings/media/video-interfaces.yaml344
-rw-r--r--Documentation/devicetree/bindings/media/xilinx/xlnx,csi2rxss.yaml39
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/exynos-srom.yaml2
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/renesas,rpc-if.yaml6
-rw-r--r--Documentation/devicetree/bindings/mfd/bd9571mwv.txt4
-rw-r--r--Documentation/devicetree/bindings/mfd/canaan,k210-sysctl.yaml109
-rw-r--r--Documentation/devicetree/bindings/mfd/ene-kb930.yaml65
-rw-r--r--Documentation/devicetree/bindings/mfd/gateworks-gsc.yaml3
-rw-r--r--Documentation/devicetree/bindings/mfd/iqs62x.yaml2
-rw-r--r--Documentation/devicetree/bindings/mips/lantiq/lantiq,cgu.yaml32
-rw-r--r--Documentation/devicetree/bindings/mips/lantiq/lantiq,dma-xway.yaml32
-rw-r--r--Documentation/devicetree/bindings/mips/lantiq/lantiq,ebu.yaml32
-rw-r--r--Documentation/devicetree/bindings/mips/lantiq/lantiq,pmu.yaml32
-rw-r--r--Documentation/devicetree/bindings/mips/realtek-rtl.yaml24
-rw-r--r--Documentation/devicetree/bindings/misc/eeprom-93xx46.txt1
-rw-r--r--Documentation/devicetree/bindings/misc/fsl,dpaa2-console.yaml1
-rw-r--r--Documentation/devicetree/bindings/mmc/allwinner,sun4i-a10-mmc.yaml8
-rw-r--r--Documentation/devicetree/bindings/mmc/arm,pl18x.yaml223
-rw-r--r--Documentation/devicetree/bindings/mmc/marvell,xenon-sdhci.txt1
-rw-r--r--Documentation/devicetree/bindings/mmc/mmc-controller.yaml3
-rw-r--r--Documentation/devicetree/bindings/mmc/mmc-pwrseq-simple.yaml2
-rw-r--r--Documentation/devicetree/bindings/mmc/mmci.txt74
-rw-r--r--Documentation/devicetree/bindings/mmc/renesas,sdhi.yaml3
-rw-r--r--Documentation/devicetree/bindings/mmc/sdhci-am654.yaml19
-rw-r--r--Documentation/devicetree/bindings/mmc/sdhci-msm.txt8
-rw-r--r--Documentation/devicetree/bindings/mmc/sdhci-sirf.txt18
-rw-r--r--Documentation/devicetree/bindings/mmc/zx-dw-mshc.txt31
-rw-r--r--Documentation/devicetree/bindings/mtd/jedec,spi-nor.txt91
-rw-r--r--Documentation/devicetree/bindings/mtd/jedec,spi-nor.yaml102
-rw-r--r--Documentation/devicetree/bindings/mtd/partitions/brcm,bcm4908-partitions.yaml70
-rw-r--r--Documentation/devicetree/bindings/mtd/partitions/fixed-partitions.yaml33
-rw-r--r--Documentation/devicetree/bindings/mtd/partitions/partition.yaml47
-rw-r--r--Documentation/devicetree/bindings/mtd/partitions/qcom,smem-part.yaml33
-rw-r--r--Documentation/devicetree/bindings/net/amlogic,meson-dwmac.yaml55
-rw-r--r--Documentation/devicetree/bindings/net/brcm,bcm4908-enet.yaml48
-rw-r--r--Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt101
-rw-r--r--Documentation/devicetree/bindings/net/btusb.txt4
-rw-r--r--Documentation/devicetree/bindings/net/can/fsl,flexcan.yaml11
-rw-r--r--Documentation/devicetree/bindings/net/can/rcar_canfd.txt2
-rw-r--r--Documentation/devicetree/bindings/net/dsa/arrow,xrs700x.yaml73
-rw-r--r--Documentation/devicetree/bindings/net/dsa/brcm,sf2.yaml173
-rw-r--r--Documentation/devicetree/bindings/net/dsa/mt7530.txt6
-rw-r--r--Documentation/devicetree/bindings/net/ethernet-controller.yaml8
-rw-r--r--Documentation/devicetree/bindings/net/marvell-pp2.txt6
-rw-r--r--Documentation/devicetree/bindings/net/qca,ar803x.yaml16
-rw-r--r--Documentation/devicetree/bindings/net/qcom,ipa.yaml15
-rw-r--r--Documentation/devicetree/bindings/net/renesas,etheravb.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/snps,dwmac.yaml1
-rw-r--r--Documentation/devicetree/bindings/net/ti,k3-am654-cpsw-nuss.yaml51
-rw-r--r--Documentation/devicetree/bindings/net/ti,k3-am654-cpts.yaml8
-rw-r--r--Documentation/devicetree/bindings/net/toshiba,visconti-dwmac.yaml85
-rw-r--r--Documentation/devicetree/bindings/net/xilinx_axienet.txt4
-rw-r--r--Documentation/devicetree/bindings/nvmem/rmem.yaml49
-rw-r--r--Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml37
-rw-r--r--Documentation/devicetree/bindings/pci/layerscape-pci.txt1
-rw-r--r--Documentation/devicetree/bindings/pci/microchip,pcie-host.yaml92
-rw-r--r--Documentation/devicetree/bindings/pci/qcom,pcie.txt17
-rw-r--r--Documentation/devicetree/bindings/phy/allwinner,sun4i-a10-usb-phy.yaml2
-rw-r--r--Documentation/devicetree/bindings/phy/allwinner,sun50i-a64-usb-phy.yaml2
-rw-r--r--Documentation/devicetree/bindings/phy/allwinner,sun50i-h6-usb-phy.yaml2
-rw-r--r--Documentation/devicetree/bindings/phy/allwinner,sun5i-a13-usb-phy.yaml2
-rw-r--r--Documentation/devicetree/bindings/phy/allwinner,sun6i-a31-usb-phy.yaml2
-rw-r--r--Documentation/devicetree/bindings/phy/allwinner,sun8i-a23-usb-phy.yaml2
-rw-r--r--Documentation/devicetree/bindings/phy/allwinner,sun8i-a83t-usb-phy.yaml2
-rw-r--r--Documentation/devicetree/bindings/phy/allwinner,sun8i-h3-usb-phy.yaml2
-rw-r--r--Documentation/devicetree/bindings/phy/allwinner,sun8i-r40-usb-phy.yaml2
-rw-r--r--Documentation/devicetree/bindings/phy/allwinner,sun8i-v3s-usb-phy.yaml2
-rw-r--r--Documentation/devicetree/bindings/phy/allwinner,sun9i-a80-usb-phy.yaml19
-rw-r--r--Documentation/devicetree/bindings/phy/brcm,brcmstb-usb-phy.txt86
-rw-r--r--Documentation/devicetree/bindings/phy/brcm,brcmstb-usb-phy.yaml196
-rw-r--r--Documentation/devicetree/bindings/phy/brcm,sata-phy.yaml3
-rw-r--r--Documentation/devicetree/bindings/phy/mediatek,dsi-phy.yaml85
-rw-r--r--Documentation/devicetree/bindings/phy/mediatek,hdmi-phy.yaml92
-rw-r--r--Documentation/devicetree/bindings/phy/mediatek,tphy.yaml260
-rw-r--r--Documentation/devicetree/bindings/phy/mediatek,ufs-phy.yaml64
-rw-r--r--Documentation/devicetree/bindings/phy/mediatek,xsphy.yaml199
-rw-r--r--Documentation/devicetree/bindings/phy/phy-mtk-tphy.txt162
-rw-r--r--Documentation/devicetree/bindings/phy/phy-mtk-ufs.txt38
-rw-r--r--Documentation/devicetree/bindings/phy/phy-mtk-xsphy.txt109
-rw-r--r--Documentation/devicetree/bindings/phy/phy-stm32-usbphyc.yaml22
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,qmp-phy.yaml97
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,qusb2-phy.yaml2
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,usb-hs-28nm.yaml1
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,usb-snps-femto-v2.yaml2
-rw-r--r--Documentation/devicetree/bindings/phy/renesas,usb2-phy.yaml5
-rw-r--r--Documentation/devicetree/bindings/phy/rockchip-emmc-phy.txt10
-rw-r--r--Documentation/devicetree/bindings/phy/socionext,uniphier-ahci-phy.yaml2
-rw-r--r--Documentation/devicetree/bindings/phy/socionext,uniphier-pcie-phy.yaml2
-rw-r--r--Documentation/devicetree/bindings/phy/socionext,uniphier-usb3hs-phy.yaml2
-rw-r--r--Documentation/devicetree/bindings/phy/socionext,uniphier-usb3ss-phy.yaml2
-rw-r--r--Documentation/devicetree/bindings/phy/ti,phy-gmii-sel.yaml2
-rw-r--r--Documentation/devicetree/bindings/phy/ti,phy-j721e-wiz.yaml2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/allwinner,sun4i-a10-pinctrl.yaml17
-rw-r--r--Documentation/devicetree/bindings/pinctrl/aspeed,ast2400-pinctrl.yaml5
-rw-r--r--Documentation/devicetree/bindings/pinctrl/aspeed,ast2500-pinctrl.yaml6
-rw-r--r--Documentation/devicetree/bindings/pinctrl/aspeed,ast2600-pinctrl.yaml2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/atmel,at91-pio4-pinctrl.txt8
-rw-r--r--Documentation/devicetree/bindings/pinctrl/brcm,ns2-pinmux.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/brcm,nsp-pinmux.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/canaan,k210-fpioa.yaml171
-rw-r--r--Documentation/devicetree/bindings/pinctrl/fsl,imx7d-pinctrl.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/microchip,sparx5-sgpio.yaml4
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pinctrl-atlas7.txt109
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt4
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pinctrl-mcp23s08.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pinctrl-mt65xx.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pinctrl-single.txt12
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pinctrl-zx.txt84
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,pmic-mpp.txt1
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,sc8180x-pinctrl.yaml152
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,sm8350-pinctrl.yaml145
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,tlmm-common.yaml85
-rw-r--r--Documentation/devicetree/bindings/pinctrl/ralink,rt2880-pinmux.yaml62
-rw-r--r--Documentation/devicetree/bindings/pinctrl/renesas,pfc.yaml12
-rw-r--r--Documentation/devicetree/bindings/pinctrl/samsung-pinctrl.txt2
-rw-r--r--Documentation/devicetree/bindings/power/brcm,bcm-pmb.yaml50
-rw-r--r--Documentation/devicetree/bindings/power/mediatek,power-controller.yaml11
-rw-r--r--Documentation/devicetree/bindings/power/qcom,rpmpd.yaml1
-rw-r--r--Documentation/devicetree/bindings/power/renesas,apmu.yaml2
-rw-r--r--Documentation/devicetree/bindings/power/supply/battery.yaml3
-rw-r--r--Documentation/devicetree/bindings/power/supply/bq2515x.yaml1
-rw-r--r--Documentation/devicetree/bindings/power/supply/bq256xx.yaml110
-rw-r--r--Documentation/devicetree/bindings/power/supply/bq25980.yaml1
-rw-r--r--Documentation/devicetree/bindings/power/supply/ltc4162-l.yaml69
-rw-r--r--Documentation/devicetree/bindings/ptp/ptp-idtcm.yaml4
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-sifive.yaml9
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-zx.txt22
-rw-r--r--Documentation/devicetree/bindings/regulator/dlg,da9121.yaml1
-rw-r--r--Documentation/devicetree/bindings/regulator/fixed-regulator.yaml2
-rw-r--r--Documentation/devicetree/bindings/regulator/max8997-regulator.txt1
-rw-r--r--Documentation/devicetree/bindings/regulator/mcp16502-regulator.txt3
-rw-r--r--Documentation/devicetree/bindings/regulator/mt6315-regulator.yaml69
-rw-r--r--Documentation/devicetree/bindings/regulator/nxp,pca9450-regulator.yaml5
-rw-r--r--Documentation/devicetree/bindings/regulator/nxp,pf8x00-regulator.yaml17
-rw-r--r--Documentation/devicetree/bindings/regulator/qcom,rpmh-regulator.txt2
-rw-r--r--Documentation/devicetree/bindings/regulator/qcom-labibb-regulator.yaml30
-rw-r--r--Documentation/devicetree/bindings/regulator/richtek,rt4831-regulator.yaml35
-rw-r--r--Documentation/devicetree/bindings/remoteproc/ingenic,vpu.yaml2
-rw-r--r--Documentation/devicetree/bindings/remoteproc/mtk,scp.txt8
-rw-r--r--Documentation/devicetree/bindings/remoteproc/qcom,adsp.txt12
-rw-r--r--Documentation/devicetree/bindings/remoteproc/qcom,wcnss-pil.txt1
-rw-r--r--Documentation/devicetree/bindings/remoteproc/ti,omap-remoteproc.yaml3
-rw-r--r--Documentation/devicetree/bindings/reset/brcm,bcm4908-misc-pcie-reset.yaml39
-rw-r--r--Documentation/devicetree/bindings/reset/canaan,k210-rst.yaml40
-rw-r--r--Documentation/devicetree/bindings/reset/hisilicon,hi3660-reset.txt44
-rw-r--r--Documentation/devicetree/bindings/reset/hisilicon,hi3660-reset.yaml77
-rw-r--r--Documentation/devicetree/bindings/reset/sirf,rstc.txt42
-rw-r--r--Documentation/devicetree/bindings/reset/zte,zx2967-reset.txt20
-rw-r--r--Documentation/devicetree/bindings/riscv/canaan.yaml47
-rw-r--r--Documentation/devicetree/bindings/riscv/cpus.yaml8
-rw-r--r--Documentation/devicetree/bindings/riscv/sifive-l2-cache.yaml35
-rw-r--r--Documentation/devicetree/bindings/riscv/sifive.yaml17
-rw-r--r--Documentation/devicetree/bindings/rtc/allwinner,sun6i-a31-rtc.yaml1
-rw-r--r--Documentation/devicetree/bindings/rtc/atmel,at91rm9200-rtc.yaml1
-rw-r--r--Documentation/devicetree/bindings/rtc/nxp,pcf2127.yaml51
-rw-r--r--Documentation/devicetree/bindings/rtc/pcf8563.txt3
-rw-r--r--Documentation/devicetree/bindings/rtc/rtc.yaml2
-rw-r--r--Documentation/devicetree/bindings/rtc/sirf,prima2-sysrtc.txt13
-rw-r--r--Documentation/devicetree/bindings/rtc/stericsson,coh901331.txt16
-rw-r--r--Documentation/devicetree/bindings/rtc/trivial-rtc.yaml6
-rw-r--r--Documentation/devicetree/bindings/serial/fsl-imx-uart.yaml2
-rw-r--r--Documentation/devicetree/bindings/serial/fsl-mxs-auart.yaml2
-rw-r--r--Documentation/devicetree/bindings/serial/pl011.yaml4
-rw-r--r--Documentation/devicetree/bindings/serial/renesas,hscif.yaml3
-rw-r--r--Documentation/devicetree/bindings/serial/renesas,scif.yaml2
-rw-r--r--Documentation/devicetree/bindings/serial/renesas,scifa.yaml2
-rw-r--r--Documentation/devicetree/bindings/serial/renesas,scifb.yaml2
-rw-r--r--Documentation/devicetree/bindings/serial/sifive-serial.yaml1
-rw-r--r--Documentation/devicetree/bindings/serial/sirf-uart.txt34
-rw-r--r--Documentation/devicetree/bindings/serial/st,stm32-uart.yaml13
-rw-r--r--Documentation/devicetree/bindings/soc/imx/imx8m-soc.yaml86
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,aoss-qmp.txt1
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,smem.txt57
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,smem.yaml72
-rw-r--r--Documentation/devicetree/bindings/soc/ti/ti,pruss.yaml76
-rw-r--r--Documentation/devicetree/bindings/soc/zte/pd-2967xx.txt19
-rw-r--r--Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-codec.yaml1
-rw-r--r--Documentation/devicetree/bindings/sound/audio-graph-port.yaml3
-rw-r--r--Documentation/devicetree/bindings/sound/google,sc7180-trogdor.yaml1
-rw-r--r--Documentation/devicetree/bindings/sound/ingenic,codec.yaml11
-rw-r--r--Documentation/devicetree/bindings/sound/intel,keembay-i2s.yaml13
-rw-r--r--Documentation/devicetree/bindings/sound/mt8192-mt6359-rt1015-rt5682.yaml9
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra-audio-graph-card.yaml190
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra186-dspk.yaml18
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra210-admaif.yaml13
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra210-ahub.yaml13
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra210-dmic.yaml18
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra210-i2s.yaml18
-rw-r--r--Documentation/devicetree/bindings/sound/qcom,lpass-rx-macro.yaml62
-rw-r--r--Documentation/devicetree/bindings/sound/qcom,lpass-tx-macro.yaml67
-rw-r--r--Documentation/devicetree/bindings/sound/renesas,rsnd.yaml6
-rw-r--r--Documentation/devicetree/bindings/sound/rt5659.txt11
-rw-r--r--Documentation/devicetree/bindings/sound/samsung,aries-wm8994.yaml3
-rw-r--r--Documentation/devicetree/bindings/sound/samsung,midas-audio.yaml2
-rw-r--r--Documentation/devicetree/bindings/sound/sgtl5000.yaml2
-rw-r--r--Documentation/devicetree/bindings/sound/sirf-audio-codec.txt17
-rw-r--r--Documentation/devicetree/bindings/sound/sirf-usp.txt27
-rw-r--r--Documentation/devicetree/bindings/sound/st,stm32-i2s.yaml4
-rw-r--r--Documentation/devicetree/bindings/sound/tas2562.yaml2
-rw-r--r--Documentation/devicetree/bindings/sound/tas2770.yaml2
-rw-r--r--Documentation/devicetree/bindings/sound/tlv320adcx140.yaml1
-rw-r--r--Documentation/devicetree/bindings/sound/wm8962.txt4
-rw-r--r--Documentation/devicetree/bindings/sound/zte,tdm.txt30
-rw-r--r--Documentation/devicetree/bindings/sound/zte,zx-aud96p22.txt24
-rw-r--r--Documentation/devicetree/bindings/sound/zte,zx-i2s.txt45
-rw-r--r--Documentation/devicetree/bindings/sound/zte,zx-spdif.txt27
-rw-r--r--Documentation/devicetree/bindings/spi/allwinner,sun6i-a31-spi.yaml1
-rw-r--r--Documentation/devicetree/bindings/spi/cadence-quadspi.txt (renamed from Documentation/devicetree/bindings/mtd/cadence-quadspi.txt)1
-rw-r--r--Documentation/devicetree/bindings/spi/nvidia,tegra210-quad.yaml117
-rw-r--r--Documentation/devicetree/bindings/spi/realtek,rtl-spi.yaml41
-rw-r--r--Documentation/devicetree/bindings/spi/renesas,rspi.yaml2
-rw-r--r--Documentation/devicetree/bindings/spi/renesas,sh-msiof.yaml1
-rw-r--r--Documentation/devicetree/bindings/spi/spi-controller.yaml6
-rw-r--r--Documentation/devicetree/bindings/spi/spi-sirf.txt42
-rw-r--r--Documentation/devicetree/bindings/spi/spi-zynq-qspi.txt25
-rw-r--r--Documentation/devicetree/bindings/spi/xlnx,zynq-qspi.yaml59
-rw-r--r--Documentation/devicetree/bindings/sram/allwinner,sun4i-a10-system-control.yaml1
-rw-r--r--Documentation/devicetree/bindings/sram/sram.yaml4
-rw-r--r--Documentation/devicetree/bindings/thermal/allwinner,sun8i-a83t-ths.yaml12
-rw-r--r--Documentation/devicetree/bindings/thermal/qcom-spmi-adc-tm5.yaml153
-rw-r--r--Documentation/devicetree/bindings/thermal/tango-thermal.txt17
-rw-r--r--Documentation/devicetree/bindings/thermal/zx2967-thermal.txt116
-rw-r--r--Documentation/devicetree/bindings/timer/allwinner,sun4i-a10-timer.yaml2
-rw-r--r--Documentation/devicetree/bindings/timer/allwinner,sun5i-a13-hstimer.yaml3
-rw-r--r--Documentation/devicetree/bindings/timer/intel,ixp4xx-timer.yaml2
-rw-r--r--Documentation/devicetree/bindings/timer/nuvoton,npcm7xx-timer.txt3
-rw-r--r--Documentation/devicetree/bindings/timer/sifive,clint.yaml12
-rw-r--r--Documentation/devicetree/bindings/timer/snps,dw-apb-timer.yaml3
-rw-r--r--Documentation/devicetree/bindings/timer/stericsson-u300-apptimer.txt18
-rw-r--r--Documentation/devicetree/bindings/timer/ti,c64x+timer64.txt25
-rw-r--r--Documentation/devicetree/bindings/trivial-devices.yaml10
-rw-r--r--Documentation/devicetree/bindings/usb/allwinner,sun4i-a10-musb.yaml2
-rw-r--r--Documentation/devicetree/bindings/usb/amlogic,meson-g12a-usb-ctrl.yaml6
-rw-r--r--Documentation/devicetree/bindings/usb/brcm,usb-pinmap.yaml3
-rw-r--r--Documentation/devicetree/bindings/usb/dwc3-st.txt4
-rw-r--r--Documentation/devicetree/bindings/usb/dwc3-xilinx.txt2
-rw-r--r--Documentation/devicetree/bindings/usb/dwc3.txt128
-rw-r--r--Documentation/devicetree/bindings/usb/exynos-usb.txt2
-rw-r--r--Documentation/devicetree/bindings/usb/fsl,imx8mp-dwc3.yaml105
-rw-r--r--Documentation/devicetree/bindings/usb/generic-ehci.yaml53
-rw-r--r--Documentation/devicetree/bindings/usb/generic-ohci.yaml36
-rw-r--r--Documentation/devicetree/bindings/usb/generic-xhci.yaml65
-rw-r--r--Documentation/devicetree/bindings/usb/generic.txt57
-rw-r--r--Documentation/devicetree/bindings/usb/ingenic,musb.yaml2
-rw-r--r--Documentation/devicetree/bindings/usb/intel,keembay-dwc3.yaml9
-rw-r--r--Documentation/devicetree/bindings/usb/mediatek,mtk-xhci.txt121
-rw-r--r--Documentation/devicetree/bindings/usb/mediatek,mtk-xhci.yaml188
-rw-r--r--Documentation/devicetree/bindings/usb/mediatek,mtu3.txt108
-rw-r--r--Documentation/devicetree/bindings/usb/mediatek,mtu3.yaml287
-rw-r--r--Documentation/devicetree/bindings/usb/mediatek,musb.txt57
-rw-r--r--Documentation/devicetree/bindings/usb/mediatek,musb.yaml114
-rw-r--r--Documentation/devicetree/bindings/usb/omap-usb.txt2
-rw-r--r--Documentation/devicetree/bindings/usb/qcom,dwc3.yaml13
-rw-r--r--Documentation/devicetree/bindings/usb/renesas,usb-xhci.yaml4
-rw-r--r--Documentation/devicetree/bindings/usb/renesas,usb3-peri.yaml7
-rw-r--r--Documentation/devicetree/bindings/usb/renesas,usbhs.yaml1
-rw-r--r--Documentation/devicetree/bindings/usb/rockchip,dwc3.txt56
-rw-r--r--Documentation/devicetree/bindings/usb/rockchip,dwc3.yaml108
-rw-r--r--Documentation/devicetree/bindings/usb/snps,dwc3.yaml332
-rw-r--r--Documentation/devicetree/bindings/usb/ti,hd3ss3220.yaml8
-rw-r--r--Documentation/devicetree/bindings/usb/ti,j721e-usb.yaml7
-rw-r--r--Documentation/devicetree/bindings/usb/ti,keystone-dwc3.yaml6
-rw-r--r--Documentation/devicetree/bindings/usb/usb-device.txt102
-rw-r--r--Documentation/devicetree/bindings/usb/usb-device.yaml124
-rw-r--r--Documentation/devicetree/bindings/usb/usb-drd.yaml78
-rw-r--r--Documentation/devicetree/bindings/usb/usb-hcd.yaml36
-rw-r--r--Documentation/devicetree/bindings/usb/usb-xhci.txt41
-rw-r--r--Documentation/devicetree/bindings/usb/usb-xhci.yaml42
-rw-r--r--Documentation/devicetree/bindings/usb/usb.yaml63
-rw-r--r--Documentation/devicetree/bindings/usb/usbmisc-imx.txt2
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.yaml20
-rw-r--r--Documentation/devicetree/bindings/watchdog/allwinner,sun4i-a10-wdt.yaml12
-rw-r--r--Documentation/devicetree/bindings/watchdog/intel,keembay-wdt.yaml57
-rw-r--r--Documentation/devicetree/bindings/watchdog/mtk-wdt.txt5
-rw-r--r--Documentation/devicetree/bindings/watchdog/qcom-wdt.yaml1
-rw-r--r--Documentation/devicetree/bindings/watchdog/renesas,wdt.yaml1
-rw-r--r--Documentation/devicetree/bindings/watchdog/sigma,smp8642-wdt.txt18
-rw-r--r--Documentation/devicetree/bindings/watchdog/sirfsoc_wdt.txt18
-rw-r--r--Documentation/devicetree/bindings/watchdog/snps,dw-wdt.yaml6
-rw-r--r--Documentation/devicetree/bindings/watchdog/stericsson-coh901327.txt19
-rw-r--r--Documentation/devicetree/bindings/watchdog/ti,rti-wdt.yaml4
-rw-r--r--Documentation/devicetree/bindings/watchdog/watchdog.yaml1
-rw-r--r--Documentation/devicetree/bindings/watchdog/zte,zx2967-wdt.txt32
-rw-r--r--Documentation/devicetree/usage-model.rst2
-rw-r--r--Documentation/doc-guide/sphinx.rst30
-rw-r--r--Documentation/driver-api/auxiliary_bus.rst2
-rw-r--r--Documentation/driver-api/cxl/index.rst12
-rw-r--r--Documentation/driver-api/cxl/memory-devices.rst46
-rw-r--r--Documentation/driver-api/gpio/consumer.rst5
-rw-r--r--Documentation/driver-api/gpio/driver.rst4
-rw-r--r--Documentation/driver-api/gpio/intro.rst8
-rw-r--r--Documentation/driver-api/index.rst3
-rw-r--r--Documentation/driver-api/media/camera-sensor.rst20
-rw-r--r--Documentation/driver-api/media/cec-core.rst2
-rw-r--r--Documentation/driver-api/media/csi2.rst4
-rw-r--r--Documentation/driver-api/media/drivers/ccs/ccs.rst13
-rw-r--r--Documentation/driver-api/media/v4l2-clocks.rst31
-rw-r--r--Documentation/driver-api/media/v4l2-core.rst1
-rw-r--r--Documentation/driver-api/media/v4l2-subdev.rst63
-rw-r--r--Documentation/driver-api/men-chameleon-bus.rst12
-rw-r--r--Documentation/driver-api/pti_intel_mid.rst108
-rw-r--r--Documentation/driver-api/surface_aggregator/client-api.rst38
-rw-r--r--Documentation/driver-api/surface_aggregator/client.rst393
-rw-r--r--Documentation/driver-api/surface_aggregator/clients/cdev.rst87
-rw-r--r--Documentation/driver-api/surface_aggregator/clients/index.rst21
-rw-r--r--Documentation/driver-api/surface_aggregator/clients/san.rst44
-rw-r--r--Documentation/driver-api/surface_aggregator/index.rst21
-rw-r--r--Documentation/driver-api/surface_aggregator/internal-api.rst67
-rw-r--r--Documentation/driver-api/surface_aggregator/internal.rst577
-rw-r--r--Documentation/driver-api/surface_aggregator/overview.rst77
-rw-r--r--Documentation/driver-api/surface_aggregator/ssh.rst344
-rw-r--r--Documentation/driver-api/thermal/sysfs-api.rst19
-rw-r--r--Documentation/features/core/cBPF-JIT/arch-support.txt1
-rw-r--r--Documentation/features/core/eBPF-JIT/arch-support.txt1
-rw-r--r--Documentation/features/core/generic-idle-thread/arch-support.txt1
-rw-r--r--Documentation/features/core/jump-labels/arch-support.txt1
-rw-r--r--Documentation/features/core/tracehook/arch-support.txt1
-rw-r--r--Documentation/features/debug/KASAN/arch-support.txt1
-rw-r--r--Documentation/features/debug/debug-vm-pgtable/arch-support.txt1
-rw-r--r--Documentation/features/debug/gcov-profile-all/arch-support.txt3
-rw-r--r--Documentation/features/debug/kcov/arch-support.txt1
-rw-r--r--Documentation/features/debug/kgdb/arch-support.txt1
-rw-r--r--Documentation/features/debug/kmemleak/arch-support.txt3
-rw-r--r--Documentation/features/debug/kprobes-on-ftrace/arch-support.txt3
-rw-r--r--Documentation/features/debug/kprobes/arch-support.txt3
-rw-r--r--Documentation/features/debug/kretprobes/arch-support.txt3
-rw-r--r--Documentation/features/debug/optprobes/arch-support.txt1
-rw-r--r--Documentation/features/debug/stackprotector/arch-support.txt1
-rw-r--r--Documentation/features/debug/uprobes/arch-support.txt3
-rw-r--r--Documentation/features/debug/user-ret-profiler/arch-support.txt1
-rw-r--r--Documentation/features/io/dma-contiguous/arch-support.txt1
-rw-r--r--Documentation/features/locking/cmpxchg-local/arch-support.txt1
-rw-r--r--Documentation/features/locking/lockdep/arch-support.txt1
-rw-r--r--Documentation/features/locking/queued-rwlocks/arch-support.txt1
-rw-r--r--Documentation/features/locking/queued-spinlocks/arch-support.txt1
-rw-r--r--Documentation/features/perf/kprobes-event/arch-support.txt3
-rw-r--r--Documentation/features/perf/perf-regs/arch-support.txt3
-rw-r--r--Documentation/features/perf/perf-stackdump/arch-support.txt3
-rw-r--r--Documentation/features/sched/membarrier-sync-core/arch-support.txt1
-rw-r--r--Documentation/features/sched/numa-balancing/arch-support.txt3
-rw-r--r--Documentation/features/seccomp/seccomp-filter/arch-support.txt1
-rw-r--r--Documentation/features/time/arch-tick-broadcast/arch-support.txt1
-rw-r--r--Documentation/features/time/clockevents/arch-support.txt1
-rw-r--r--Documentation/features/time/context-tracking/arch-support.txt1
-rw-r--r--Documentation/features/time/irq-time-acct/arch-support.txt1
-rw-r--r--Documentation/features/time/virt-cpuacct/arch-support.txt1
-rw-r--r--Documentation/features/vm/ELF-ASLR/arch-support.txt1
-rw-r--r--Documentation/features/vm/PG_uncached/arch-support.txt1
-rw-r--r--Documentation/features/vm/THP/arch-support.txt1
-rw-r--r--Documentation/features/vm/TLB/arch-support.txt1
-rw-r--r--Documentation/features/vm/huge-vmap/arch-support.txt1
-rw-r--r--Documentation/features/vm/ioremap_prot/arch-support.txt1
-rw-r--r--Documentation/features/vm/pte_special/arch-support.txt1
-rw-r--r--Documentation/filesystems/afs.rst8
-rw-r--r--Documentation/filesystems/dax.txt17
-rw-r--r--Documentation/filesystems/f2fs.rst19
-rw-r--r--Documentation/filesystems/fsverity.rst76
-rw-r--r--Documentation/filesystems/index.rst1
-rw-r--r--Documentation/filesystems/locking.rst7
-rw-r--r--Documentation/filesystems/overlayfs.rst8
-rw-r--r--Documentation/filesystems/porting.rst25
-rw-r--r--Documentation/filesystems/proc.rst9
-rw-r--r--Documentation/filesystems/seq_file.rst6
-rw-r--r--Documentation/filesystems/vfs.rst26
-rw-r--r--Documentation/firmware-guide/acpi/debug.rst19
-rw-r--r--Documentation/firmware-guide/acpi/gpio-properties.rst1
-rw-r--r--Documentation/fpga/dfl.rst28
-rw-r--r--Documentation/gpu/drm-kms.rst52
-rw-r--r--Documentation/gpu/drm-uapi.rst3
-rw-r--r--Documentation/gpu/i915.rst2
-rw-r--r--Documentation/gpu/todo.rst21
-rw-r--r--Documentation/gpu/vkms.rst82
-rw-r--r--Documentation/hid/amd-sfh-hid.rst22
-rw-r--r--Documentation/hid/hid-alps.rst4
-rw-r--r--Documentation/hid/hid-sensor.rst18
-rw-r--r--Documentation/hid/hid-transport.rst12
-rw-r--r--Documentation/hid/hiddev.rst10
-rw-r--r--Documentation/hid/hidraw.rst5
-rw-r--r--Documentation/hid/intel-ish-hid.rst78
-rw-r--r--Documentation/hid/uhid.rst34
-rw-r--r--Documentation/hwmon/ab8500.rst26
-rw-r--r--Documentation/hwmon/abx500.rst32
-rw-r--r--Documentation/hwmon/aht10.rst46
-rw-r--r--Documentation/hwmon/ina2xx.rst2
-rw-r--r--Documentation/hwmon/index.rst4
-rw-r--r--Documentation/hwmon/max16601.rst197
-rw-r--r--Documentation/hwmon/nct6683.rst1
-rw-r--r--Documentation/hwmon/tps23861.rst41
-rw-r--r--Documentation/i2c/slave-testunit-backend.rst23
-rw-r--r--Documentation/iio/ep93xx_adc.rst2
-rw-r--r--Documentation/index.rst11
-rw-r--r--Documentation/input/event-codes.rst15
-rw-r--r--Documentation/input/multi-touch-protocol.rst4
-rw-r--r--Documentation/kbuild/gcc-plugins.rst47
-rw-r--r--Documentation/kbuild/llvm.rst44
-rw-r--r--Documentation/kbuild/makefiles.rst15
-rw-r--r--Documentation/kernel-hacking/hacking.rst4
-rw-r--r--Documentation/kernel-hacking/locking.rst2
-rw-r--r--Documentation/livepatch/index.rst1
-rw-r--r--Documentation/livepatch/livepatch.rst15
-rw-r--r--Documentation/livepatch/module-elf-format.rst10
-rw-r--r--Documentation/livepatch/reliable-stacktrace.rst309
-rw-r--r--Documentation/networking/bonding.rst13
-rw-r--r--Documentation/networking/caif/caif.rst1
-rw-r--r--Documentation/networking/device_drivers/ethernet/index.rst1
-rw-r--r--Documentation/networking/device_drivers/ethernet/intel/ice.rst1027
-rw-r--r--Documentation/networking/device_drivers/ethernet/marvell/octeontx2.rst70
-rw-r--r--Documentation/networking/device_drivers/ethernet/mellanox/mlx5.rst217
-rw-r--r--Documentation/networking/device_drivers/ethernet/ti/am65_nuss_cpsw_switchdev.rst143
-rw-r--r--Documentation/networking/device_drivers/index.rst1
-rw-r--r--Documentation/networking/device_drivers/qlogic/index.rst18
-rw-r--r--Documentation/networking/device_drivers/qlogic/qlge.rst118
-rw-r--r--Documentation/networking/devlink/am65-nuss-cpsw-switch.rst26
-rw-r--r--Documentation/networking/devlink/devlink-port.rst199
-rw-r--r--Documentation/networking/devlink/devlink-resource.rst14
-rw-r--r--Documentation/networking/devlink/devlink-trap.rst5
-rw-r--r--Documentation/networking/devlink/index.rst2
-rw-r--r--Documentation/networking/dsa/dsa.rst4
-rw-r--r--Documentation/networking/ethtool-netlink.rst11
-rw-r--r--Documentation/networking/filter.rst67
-rw-r--r--Documentation/networking/ip-sysctl.rst92
-rw-r--r--Documentation/networking/netdev-FAQ.rst16
-rw-r--r--Documentation/networking/netdev-features.rst21
-rw-r--r--Documentation/networking/phy.rst13
-rw-r--r--Documentation/networking/sfp-phylink.rst2
-rw-r--r--Documentation/networking/snmp_counter.rst28
-rw-r--r--Documentation/networking/timestamping.rst3
-rw-r--r--Documentation/power/freezing-of-tasks.rst2
-rw-r--r--Documentation/power/index.rst1
-rw-r--r--Documentation/power/powercap/dtpm.rst212
-rw-r--r--Documentation/power/runtime_pm.rst14
-rw-r--r--Documentation/powerpc/syscall64-abi.rst51
-rw-r--r--Documentation/process/4.Coding.rst2
-rw-r--r--Documentation/process/adding-syscalls.rst20
-rw-r--r--Documentation/process/coding-style.rst20
-rw-r--r--Documentation/process/howto.rst20
-rw-r--r--Documentation/process/magic-number.rst3
-rw-r--r--Documentation/process/submit-checklist.rst16
-rw-r--r--Documentation/process/submitting-patches.rst29
-rw-r--r--Documentation/scheduler/sched-bwc.rst17
-rw-r--r--Documentation/scheduler/sched-deadline.rst2
-rw-r--r--Documentation/scheduler/sched-design-CFS.rst6
-rw-r--r--Documentation/scheduler/schedutil.txt169
-rw-r--r--Documentation/scsi/libsas.rst9
-rw-r--r--Documentation/scsi/scsi-parameters.rst5
-rw-r--r--Documentation/security/keys/core.rst4
-rw-r--r--Documentation/security/lsm-development.rst2
-rw-r--r--Documentation/sound/designs/index.rst1
-rw-r--r--Documentation/sound/designs/jack-injection.rst166
-rw-r--r--Documentation/sphinx/automarkup.py7
-rw-r--r--Documentation/sphinx/cdomain.py8
-rw-r--r--Documentation/sphinx/kernel_abi.py27
-rw-r--r--Documentation/sphinx/kernel_feat.py25
-rw-r--r--Documentation/sphinx/kerneldoc.py26
-rw-r--r--Documentation/sphinx/kernellog.py26
-rw-r--r--Documentation/sphinx/kfigure.py14
-rwxr-xr-xDocumentation/sphinx/maintainers_include.py2
-rw-r--r--Documentation/sphinx/requirements.txt1
-rwxr-xr-xDocumentation/sphinx/rstFlatTable.py10
-rw-r--r--Documentation/timers/timers-howto.rst2
-rw-r--r--Documentation/trace/coresight/coresight.rst32
-rw-r--r--Documentation/trace/ftrace.rst6
-rw-r--r--Documentation/translations/it_IT/process/4.Coding.rst2
-rw-r--r--Documentation/translations/it_IT/process/adding-syscalls.rst18
-rw-r--r--Documentation/translations/it_IT/process/magic-number.rst1
-rw-r--r--Documentation/translations/it_IT/process/submit-checklist.rst2
-rw-r--r--Documentation/translations/it_IT/process/submitting-patches.rst4
-rw-r--r--Documentation/translations/ja_JP/SubmittingPatches4
-rw-r--r--Documentation/translations/ko_KR/howto.rst4
-rw-r--r--Documentation/translations/ko_KR/index.rst15
-rw-r--r--Documentation/translations/zh_CN/admin-guide/cpu-load.rst2
-rw-r--r--Documentation/translations/zh_CN/arm/Booting2
-rw-r--r--Documentation/translations/zh_CN/iio/ep93xx_adc.rst46
-rw-r--r--Documentation/translations/zh_CN/iio/iio_configfs.rst102
-rw-r--r--Documentation/translations/zh_CN/iio/index.rst20
-rw-r--r--Documentation/translations/zh_CN/mips/booting.rst31
-rw-r--r--Documentation/translations/zh_CN/mips/features.rst10
-rw-r--r--Documentation/translations/zh_CN/mips/index.rst26
-rw-r--r--Documentation/translations/zh_CN/mips/ingenic-tcu.rst69
-rw-r--r--Documentation/translations/zh_CN/process/4.Coding.rst2
-rw-r--r--Documentation/translations/zh_CN/process/magic-number.rst1
-rw-r--r--Documentation/translations/zh_CN/process/submitting-patches.rst4
-rw-r--r--Documentation/usb/gadget-testing.rst30
-rw-r--r--Documentation/usb/raw-gadget.rst102
-rw-r--r--Documentation/userspace-api/index.rst1
-rw-r--r--Documentation/userspace-api/ioctl/ioctl-number.rst6
-rw-r--r--Documentation/userspace-api/media/drivers/ccs.rst110
-rw-r--r--Documentation/userspace-api/media/drivers/index.rst1
-rw-r--r--Documentation/userspace-api/media/dvb/dvbstb.svg2
-rw-r--r--Documentation/userspace-api/media/mediactl/media-types.rst7
-rw-r--r--Documentation/userspace-api/media/v4l/ext-ctrls-codec.rst81
-rw-r--r--Documentation/userspace-api/media/v4l/pixfmt-yuv-planar.rst6
-rw-r--r--Documentation/userspace-api/sysfs-platform_profile.rst42
-rw-r--r--Documentation/virt/acrn/cpuid.rst46
-rw-r--r--Documentation/virt/acrn/index.rst12
-rw-r--r--Documentation/virt/acrn/introduction.rst43
-rw-r--r--Documentation/virt/acrn/io-request.rst97
-rw-r--r--Documentation/virt/index.rst1
-rw-r--r--Documentation/virt/kvm/amd-memory-encryption.rst21
-rw-r--r--Documentation/virt/kvm/api.rst362
-rw-r--r--Documentation/virt/kvm/arm/hyp-abi.rst9
-rw-r--r--Documentation/virt/kvm/locking.rst9
-rw-r--r--Documentation/virt/kvm/nested-vmx.rst6
-rw-r--r--Documentation/virt/kvm/running-nested-guests.rst2
-rw-r--r--Documentation/virt/kvm/s390-pv-boot.rst2
-rw-r--r--Documentation/vm/arch_pgtable_helpers.rst8
-rw-r--r--Documentation/vm/split_page_table_lock.rst2
-rw-r--r--Documentation/x86/boot.rst2
-rw-r--r--MAINTAINERS556
-rw-r--r--Makefile201
-rw-r--r--arch/Kconfig148
-rw-r--r--arch/alpha/Kconfig2
-rw-r--r--arch/alpha/Makefile1
-rw-r--r--arch/alpha/configs/defconfig1
-rw-r--r--arch/alpha/kernel/process.c2
-rw-r--r--arch/alpha/kernel/syscalls/Makefile13
-rw-r--r--arch/alpha/kernel/syscalls/syscall.tbl1
-rw-r--r--arch/alpha/oprofile/Makefile20
-rw-r--r--arch/alpha/oprofile/common.c189
-rw-r--r--arch/alpha/oprofile/op_impl.h55
-rw-r--r--arch/alpha/oprofile/op_model_ev4.c114
-rw-r--r--arch/alpha/oprofile/op_model_ev5.c209
-rw-r--r--arch/alpha/oprofile/op_model_ev6.c101
-rw-r--r--arch/alpha/oprofile/op_model_ev67.c261
-rw-r--r--arch/arc/Kconfig1
-rw-r--r--arch/arc/Makefile2
-rw-r--r--arch/arc/kernel/process.c2
-rw-r--r--arch/arc/oprofile/Makefile10
-rw-r--r--arch/arc/oprofile/common.c23
-rw-r--r--arch/arm/Kconfig30
-rw-r--r--arch/arm/Kconfig.debug135
-rw-r--r--arch/arm/Makefile8
-rw-r--r--arch/arm/boot/compressed/Makefile5
-rw-r--r--arch/arm/boot/compressed/atags_to_fdt.c3
-rw-r--r--arch/arm/boot/compressed/fdt_check_mem_start.c131
-rw-r--r--arch/arm/boot/compressed/head.S45
-rw-r--r--arch/arm/boot/dts/Makefile39
-rw-r--r--arch/arm/boot/dts/am335x-evm.dts13
-rw-r--r--arch/arm/boot/dts/am335x-evmsk.dts14
-rw-r--r--arch/arm/boot/dts/am335x-icev2.dts14
-rw-r--r--arch/arm/boot/dts/am335x-myirtech-myc.dtsi267
-rw-r--r--arch/arm/boot/dts/am335x-myirtech-myd.dts536
-rw-r--r--arch/arm/boot/dts/am33xx-l4.dtsi49
-rw-r--r--arch/arm/boot/dts/am574x-idk.dts4
-rw-r--r--arch/arm/boot/dts/armada-385-linksys.dtsi2
-rw-r--r--arch/arm/boot/dts/armada-388-helios4.dts28
-rw-r--r--arch/arm/boot/dts/aspeed-ast2600-evb.dts8
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-amd-ethanolx.dts2
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-ampere-mtjade.dts558
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-bytedance-g220a.dts10
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts775
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts3
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-inspur-fp5280g2.dts37
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-opp-mihawk.dts16
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-opp-mowgli.dts5
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-portwell-neptune.dts2
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-supermicro-x11spi.dts137
-rw-r--r--arch/arm/boot/dts/aspeed-g4.dtsi1
-rw-r--r--arch/arm/boot/dts/aspeed-g5.dtsi1
-rw-r--r--arch/arm/boot/dts/aspeed-g6.dtsi1
-rw-r--r--arch/arm/boot/dts/at91-kizbox3_common.dtsi1
-rw-r--r--arch/arm/boot/dts/at91-sama5d27_som1_ek.dts1
-rw-r--r--arch/arm/boot/dts/at91-sama5d27_wlsom1.dtsi22
-rw-r--r--arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts1
-rw-r--r--arch/arm/boot/dts/at91-sama5d2_icp.dts1
-rw-r--r--arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts1
-rw-r--r--arch/arm/boot/dts/at91-sama5d2_xplained.dts1
-rw-r--r--arch/arm/boot/dts/atlas6-evb.dts78
-rw-r--r--arch/arm/boot/dts/atlas6.dtsi800
-rw-r--r--arch/arm/boot/dts/atlas7-evb.dts127
-rw-r--r--arch/arm/boot/dts/atlas7.dtsi1955
-rw-r--r--arch/arm/boot/dts/bcm21664.dtsi2
-rw-r--r--arch/arm/boot/dts/bcm2711-rpi-4-b.dts17
-rw-r--r--arch/arm/boot/dts/bcm2711.dtsi31
-rw-r--r--arch/arm/boot/dts/berlin2.dtsi12
-rw-r--r--arch/arm/boot/dts/berlin2cd-google-chromecast.dts6
-rw-r--r--arch/arm/boot/dts/berlin2cd.dtsi12
-rw-r--r--arch/arm/boot/dts/berlin2q.dtsi12
-rw-r--r--arch/arm/boot/dts/cros-ec-keyboard.dtsi93
-rw-r--r--arch/arm/boot/dts/dra71-evm.dts2
-rw-r--r--arch/arm/boot/dts/dra76x.dtsi36
-rw-r--r--arch/arm/boot/dts/e60k02.dtsi6
-rw-r--r--arch/arm/boot/dts/efm32gg-dk3750.dts88
-rw-r--r--arch/arm/boot/dts/efm32gg.dtsi177
-rw-r--r--arch/arm/boot/dts/exynos3250-artik5.dtsi2
-rw-r--r--arch/arm/boot/dts/exynos3250-monk.dts2
-rw-r--r--arch/arm/boot/dts/exynos3250-rinato.dts2
-rw-r--r--arch/arm/boot/dts/exynos4210-i9100.dts30
-rw-r--r--arch/arm/boot/dts/exynos5250-spring.dts2
-rw-r--r--arch/arm/boot/dts/exynos5420-arndale-octa.dts2
-rw-r--r--arch/arm/boot/dts/exynos5422-odroid-core.dtsi2
-rw-r--r--arch/arm/boot/dts/exynos54xx.dtsi6
-rw-r--r--arch/arm/boot/dts/imx28.dtsi10
-rw-r--r--arch/arm/boot/dts/imx6-logicpd-baseboard.dtsi1
-rw-r--r--arch/arm/boot/dts/imx6dl-plybas.dts394
-rw-r--r--arch/arm/boot/dts/imx6dl-plym2m.dts446
-rw-r--r--arch/arm/boot/dts/imx6dl-prtmvt.dts852
-rw-r--r--arch/arm/boot/dts/imx6dl-victgo.dts852
-rw-r--r--arch/arm/boot/dts/imx6dl-vicut1.dts13
-rw-r--r--arch/arm/boot/dts/imx6q-tbs2910.dts7
-rw-r--r--arch/arm/boot/dts/imx6q-vicut1.dts17
-rw-r--r--arch/arm/boot/dts/imx6q.dtsi20
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw52xx.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi6
-rw-r--r--arch/arm/boot/dts/imx6qdl-sabreauto.dtsi1
-rw-r--r--arch/arm/boot/dts/imx6qdl-sr-som.dtsi14
-rw-r--r--arch/arm/boot/dts/imx6qdl-vicut1.dtsi803
-rw-r--r--arch/arm/boot/dts/imx6qdl-zii-rdu2.dtsi42
-rw-r--r--arch/arm/boot/dts/imx6qdl.dtsi25
-rw-r--r--arch/arm/boot/dts/imx6qp-vicutp.dts13
-rw-r--r--arch/arm/boot/dts/imx6sl-tolino-shine2hd.dts18
-rw-r--r--arch/arm/boot/dts/imx6sl-tolino-shine3.dts15
-rw-r--r--arch/arm/boot/dts/imx6sll-kobo-clarahd.dts13
-rw-r--r--arch/arm/boot/dts/imx6sx-sdb.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6ul-14x14-evk.dtsi54
-rw-r--r--arch/arm/boot/dts/imx6ul-prti6g.dts356
-rw-r--r--arch/arm/boot/dts/imx6ul.dtsi2
-rw-r--r--arch/arm/boot/dts/imx7d-flex-concentrator.dts1
-rw-r--r--arch/arm/boot/dts/imx7s.dtsi1
-rw-r--r--arch/arm/boot/dts/keystone-k2e.dtsi6
-rw-r--r--arch/arm/boot/dts/keystone.dtsi4
-rw-r--r--arch/arm/boot/dts/lpc32xx.dtsi3
-rw-r--r--arch/arm/boot/dts/meson.dtsi19
-rw-r--r--arch/arm/boot/dts/meson8.dtsi75
-rw-r--r--arch/arm/boot/dts/meson8b-ec100.dts5
-rw-r--r--arch/arm/boot/dts/meson8b-mxq.dts5
-rw-r--r--arch/arm/boot/dts/meson8b-odroidc1.dts5
-rw-r--r--arch/arm/boot/dts/meson8b.dtsi75
-rw-r--r--arch/arm/boot/dts/meson8m2-mxiii-plus.dts5
-rw-r--r--arch/arm/boot/dts/mmp2-olpc-xo-1-75.dts8
-rw-r--r--arch/arm/boot/dts/mmp2.dtsi5
-rw-r--r--arch/arm/boot/dts/mmp3-dell-ariel.dts33
-rw-r--r--arch/arm/boot/dts/mmp3.dtsi8
-rw-r--r--arch/arm/boot/dts/motorola-mapphone-common.dtsi23
-rw-r--r--arch/arm/boot/dts/mstar-infinity-breadbee-common.dtsi49
-rw-r--r--arch/arm/boot/dts/mstar-infinity-msc313-breadbee_crust.dts1
-rw-r--r--arch/arm/boot/dts/mstar-infinity3-msc313e-breadbee.dts1
-rw-r--r--arch/arm/boot/dts/omap3-echo.dts67
-rw-r--r--arch/arm/boot/dts/omap3-gta04.dtsi7
-rw-r--r--arch/arm/boot/dts/omap3-igep.dtsi2
-rw-r--r--arch/arm/boot/dts/omap3-igep0020-common.dtsi2
-rw-r--r--arch/arm/boot/dts/omap3-igep0020-rev-f.dts2
-rw-r--r--arch/arm/boot/dts/omap3-igep0020.dts2
-rw-r--r--arch/arm/boot/dts/omap3-igep0030-common.dtsi2
-rw-r--r--arch/arm/boot/dts/omap3-igep0030-rev-g.dts2
-rw-r--r--arch/arm/boot/dts/omap3-igep0030.dts2
-rw-r--r--arch/arm/boot/dts/omap36xx.dtsi1
-rw-r--r--arch/arm/boot/dts/omap4-droid4-xt894.dts5
-rw-r--r--arch/arm/boot/dts/omap443x.dtsi6
-rw-r--r--arch/arm/boot/dts/omap5.dtsi6
-rw-r--r--arch/arm/boot/dts/owl-s500-cubieboard6.dts7
-rw-r--r--arch/arm/boot/dts/owl-s500-guitar-bb-rev-b.dts7
-rw-r--r--arch/arm/boot/dts/owl-s500-labrador-base-m.dts7
-rw-r--r--arch/arm/boot/dts/owl-s500-roseapplepi.dts97
-rw-r--r--arch/arm/boot/dts/owl-s500-sparky.dts7
-rw-r--r--arch/arm/boot/dts/owl-s500.dtsi140
-rw-r--r--arch/arm/boot/dts/picoxcell-pc3x2.dtsi243
-rw-r--r--arch/arm/boot/dts/picoxcell-pc3x3.dtsi355
-rw-r--r--arch/arm/boot/dts/picoxcell-pc7302-pc3x2.dts78
-rw-r--r--arch/arm/boot/dts/picoxcell-pc7302-pc3x3.dts84
-rw-r--r--arch/arm/boot/dts/prima2-evb.dts37
-rw-r--r--arch/arm/boot/dts/prima2.dtsi838
-rw-r--r--arch/arm/boot/dts/qcom-apq8060-dragonboard.dts12
-rw-r--r--arch/arm/boot/dts/qcom-apq8064-asus-nexus7-flo.dts8
-rw-r--r--arch/arm/boot/dts/qcom-apq8064-cm-qs600.dts8
-rw-r--r--arch/arm/boot/dts/qcom-apq8064-ifc6410.dts10
-rw-r--r--arch/arm/boot/dts/qcom-apq8064-sony-xperia-yuga.dts4
-rw-r--r--arch/arm/boot/dts/qcom-apq8074-dragonboard.dts10
-rw-r--r--arch/arm/boot/dts/qcom-ipq4018-ap120c-ac-bit.dts28
-rw-r--r--arch/arm/boot/dts/qcom-ipq4018-ap120c-ac.dts27
-rw-r--r--arch/arm/boot/dts/qcom-ipq4018-ap120c-ac.dtsi254
-rw-r--r--arch/arm/boot/dts/qcom-ipq4018-jalapeno.dts214
-rw-r--r--arch/arm/boot/dts/qcom-ipq4019-ap.dk01.1.dtsi18
-rw-r--r--arch/arm/boot/dts/qcom-ipq4019-ap.dk04.1-c1.dts4
-rw-r--r--arch/arm/boot/dts/qcom-ipq4019-ap.dk04.1.dtsi10
-rw-r--r--arch/arm/boot/dts/qcom-ipq4019-ap.dk07.1-c1.dts8
-rw-r--r--arch/arm/boot/dts/qcom-ipq4019-ap.dk07.1-c2.dts2
-rw-r--r--arch/arm/boot/dts/qcom-ipq4019-ap.dk07.1.dtsi10
-rw-r--r--arch/arm/boot/dts/qcom-ipq4019.dtsi90
-rw-r--r--arch/arm/boot/dts/qcom-ipq8064-ap148.dts2
-rw-r--r--arch/arm/boot/dts/qcom-ipq8064-v1.0.dtsi12
-rw-r--r--arch/arm/boot/dts/qcom-ipq8064.dtsi31
-rw-r--r--arch/arm/boot/dts/qcom-mdm9615-wp8548.dtsi16
-rw-r--r--arch/arm/boot/dts/qcom-msm8660-surf.dts4
-rw-r--r--arch/arm/boot/dts/qcom-msm8960-cdp.dts8
-rw-r--r--arch/arm/boot/dts/qcom-msm8974-fairphone-fp2.dts14
-rw-r--r--arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts32
-rw-r--r--arch/arm/boot/dts/qcom-msm8974-samsung-klte.dts115
-rw-r--r--arch/arm/boot/dts/qcom-msm8974-sony-xperia-amami.dts14
-rw-r--r--arch/arm/boot/dts/qcom-msm8974-sony-xperia-castor.dts18
-rw-r--r--arch/arm/boot/dts/qcom-msm8974-sony-xperia-honami.dts16
-rw-r--r--arch/arm/boot/dts/qcom-msm8974.dtsi43
-rw-r--r--arch/arm/boot/dts/qcom-msm8974pro.dtsi5
-rw-r--r--arch/arm/boot/dts/qcom-pmx55.dtsi84
-rw-r--r--arch/arm/boot/dts/qcom-sdx55-mtp.dts251
-rw-r--r--arch/arm/boot/dts/qcom-sdx55.dtsi505
-rw-r--r--arch/arm/boot/dts/rk3036.dtsi83
-rw-r--r--arch/arm/boot/dts/rk322x.dtsi32
-rw-r--r--arch/arm/boot/dts/rk3288-miqi.dts5
-rw-r--r--arch/arm/boot/dts/rk3288.dtsi115
-rw-r--r--arch/arm/boot/dts/rk3xxx.dtsi106
-rw-r--r--arch/arm/boot/dts/rv1108.dtsi14
-rw-r--r--arch/arm/boot/dts/socfpga_arria10.dtsi13
-rw-r--r--arch/arm/boot/dts/ste-ab8500.dtsi64
-rw-r--r--arch/arm/boot/dts/ste-ab8505.dtsi67
-rw-r--r--arch/arm/boot/dts/ste-db8500.dtsi38
-rw-r--r--arch/arm/boot/dts/ste-db8520.dtsi38
-rw-r--r--arch/arm/boot/dts/ste-db9500.dtsi35
-rw-r--r--arch/arm/boot/dts/ste-dbx5x0.dtsi12
-rw-r--r--arch/arm/boot/dts/ste-href.dtsi23
-rw-r--r--arch/arm/boot/dts/ste-href520-tvk.dts42
-rw-r--r--arch/arm/boot/dts/ste-hrefprev60-stuib.dts19
-rw-r--r--arch/arm/boot/dts/ste-hrefprev60-tvk.dts19
-rw-r--r--arch/arm/boot/dts/ste-hrefprev60.dtsi8
-rw-r--r--arch/arm/boot/dts/ste-hrefv60plus-stuib.dts39
-rw-r--r--arch/arm/boot/dts/ste-hrefv60plus-tvk.dts39
-rw-r--r--arch/arm/boot/dts/ste-hrefv60plus.dtsi76
-rw-r--r--arch/arm/boot/dts/ste-nomadik-nhk15.dts2
-rw-r--r--arch/arm/boot/dts/ste-nomadik-s8815.dts2
-rw-r--r--arch/arm/boot/dts/ste-nomadik-stn8815.dtsi2
-rw-r--r--arch/arm/boot/dts/ste-snowball.dts10
-rw-r--r--arch/arm/boot/dts/ste-u300.dts464
-rw-r--r--arch/arm/boot/dts/ste-ux500-samsung-golden.dts42
-rw-r--r--arch/arm/boot/dts/ste-ux500-samsung-janice.dts930
-rw-r--r--arch/arm/boot/dts/ste-ux500-samsung-skomer.dts41
-rw-r--r--arch/arm/boot/dts/stm32f429.dtsi2
-rw-r--r--arch/arm/boot/dts/stm32f746.dtsi4
-rw-r--r--arch/arm/boot/dts/stm32h743.dtsi2
-rw-r--r--arch/arm/boot/dts/stm32mp15-pinctrl.dtsi40
-rw-r--r--arch/arm/boot/dts/stm32mp151.dtsi9
-rw-r--r--arch/arm/boot/dts/stm32mp157a-stinger96.dtsi4
-rw-r--r--arch/arm/boot/dts/stm32mp157c-ed1.dts4
-rw-r--r--arch/arm/boot/dts/stm32mp157c-lxa-mc1.dts13
-rw-r--r--arch/arm/boot/dts/stm32mp15xx-dhcom-drc02.dtsi18
-rw-r--r--arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi4
-rw-r--r--arch/arm/boot/dts/stm32mp15xx-dhcom-picoitx.dtsi16
-rw-r--r--arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi23
-rw-r--r--arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi4
-rw-r--r--arch/arm/boot/dts/stm32mp15xx-dkx.dtsi4
-rw-r--r--arch/arm/boot/dts/sun4i-a10-a1000.dts4
-rw-r--r--arch/arm/boot/dts/sun4i-a10-cubieboard.dts4
-rw-r--r--arch/arm/boot/dts/sun4i-a10-dserve-dsrv9703c.dts1
-rw-r--r--arch/arm/boot/dts/sun4i-a10-inet1.dts1
-rw-r--r--arch/arm/boot/dts/sun4i-a10-jesurun-q5.dts2
-rw-r--r--arch/arm/boot/dts/sun4i-a10-marsboard.dts8
-rw-r--r--arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts2
-rw-r--r--arch/arm/boot/dts/sun4i-a10-pcduino.dts4
-rw-r--r--arch/arm/boot/dts/sun4i-a10-pov-protab2-ips9.dts1
-rw-r--r--arch/arm/boot/dts/sun5i-a10s-auxtek-t003.dts2
-rw-r--r--arch/arm/boot/dts/sun5i-a10s-auxtek-t004.dts2
-rw-r--r--arch/arm/boot/dts/sun5i-a10s-mk802.dts2
-rw-r--r--arch/arm/boot/dts/sun5i-a10s-olinuxino-micro.dts2
-rw-r--r--arch/arm/boot/dts/sun5i-a10s-r7-tv-dongle.dts2
-rw-r--r--arch/arm/boot/dts/sun5i-a10s-wobo-i5.dts2
-rw-r--r--arch/arm/boot/dts/sun5i-a13-empire-electronix-d709.dts1
-rw-r--r--arch/arm/boot/dts/sun5i-a13-licheepi-one.dts6
-rw-r--r--arch/arm/boot/dts/sun5i-a13-olinuxino-micro.dts2
-rw-r--r--arch/arm/boot/dts/sun5i-a13-olinuxino.dts2
-rw-r--r--arch/arm/boot/dts/sun5i-a13-pocketbook-touch-lux-3.dts3
-rw-r--r--arch/arm/boot/dts/sun5i-a13.dtsi2
-rw-r--r--arch/arm/boot/dts/sun5i-gr8-evb.dts2
-rw-r--r--arch/arm/boot/dts/sun5i-reference-design-tablet.dtsi1
-rw-r--r--arch/arm/boot/dts/sun5i.dtsi12
-rw-r--r--arch/arm/boot/dts/sun6i-a31-hummingbird.dts2
-rw-r--r--arch/arm/boot/dts/sun6i-a31-i7.dts2
-rw-r--r--arch/arm/boot/dts/sun6i-a31-m9.dts4
-rw-r--r--arch/arm/boot/dts/sun6i-a31-mele-a1000g-quad.dts4
-rw-r--r--arch/arm/boot/dts/sun6i-a31.dtsi4
-rw-r--r--arch/arm/boot/dts/sun6i-a31s-primo81.dts3
-rw-r--r--arch/arm/boot/dts/sun6i-a31s-sina31s-core.dtsi2
-rw-r--r--arch/arm/boot/dts/sun6i-a31s-sinovoip-bpi-m2.dts79
-rw-r--r--arch/arm/boot/dts/sun6i-a31s-yones-toptech-bs1078-v2.dts2
-rw-r--r--arch/arm/boot/dts/sun6i-reference-design-tablet.dtsi2
-rw-r--r--arch/arm/boot/dts/sun7i-a20-bananapi-m1-plus.dts4
-rw-r--r--arch/arm/boot/dts/sun7i-a20-bananapi.dts2
-rw-r--r--arch/arm/boot/dts/sun7i-a20-bananapro.dts6
-rw-r--r--arch/arm/boot/dts/sun7i-a20-cubieboard2.dts4
-rw-r--r--arch/arm/boot/dts/sun7i-a20-cubietruck.dts8
-rw-r--r--arch/arm/boot/dts/sun7i-a20-i12-tvbox.dts4
-rw-r--r--arch/arm/boot/dts/sun7i-a20-itead-ibox.dts4
-rw-r--r--arch/arm/boot/dts/sun7i-a20-lamobo-r1.dts2
-rw-r--r--arch/arm/boot/dts/sun7i-a20-m3.dts2
-rw-r--r--arch/arm/boot/dts/sun7i-a20-olimex-som-evb.dts2
-rw-r--r--arch/arm/boot/dts/sun7i-a20-olimex-som204-evb.dts6
-rw-r--r--arch/arm/boot/dts/sun7i-a20-olinuxino-lime.dts2
-rw-r--r--arch/arm/boot/dts/sun7i-a20-olinuxino-lime2.dts2
-rw-r--r--arch/arm/boot/dts/sun7i-a20-olinuxino-micro.dts2
-rw-r--r--arch/arm/boot/dts/sun7i-a20-orangepi-mini.dts4
-rw-r--r--arch/arm/boot/dts/sun7i-a20-orangepi.dts2
-rw-r--r--arch/arm/boot/dts/sun7i-a20-pcduino3-nano.dts6
-rw-r--r--arch/arm/boot/dts/sun7i-a20-pcduino3.dts4
-rw-r--r--arch/arm/boot/dts/sun7i-a20-wexler-tab7200.dts1
-rw-r--r--arch/arm/boot/dts/sun7i-a20.dtsi2
-rw-r--r--arch/arm/boot/dts/sun8i-a23-a33.dtsi2
-rw-r--r--arch/arm/boot/dts/sun8i-a33-inet-d978-rev2.dts2
-rw-r--r--arch/arm/boot/dts/sun8i-a33-olinuxino.dts4
-rw-r--r--arch/arm/boot/dts/sun8i-a33-sinlinx-sina33.dts3
-rw-r--r--arch/arm/boot/dts/sun8i-a33.dtsi2
-rw-r--r--arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts4
-rw-r--r--arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts8
-rw-r--r--arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts2
-rw-r--r--arch/arm/boot/dts/sun8i-a83t.dtsi3
-rw-r--r--arch/arm/boot/dts/sun8i-h2-plus-bananapi-m2-zero.dts30
-rw-r--r--arch/arm/boot/dts/sun8i-h3-beelink-x2.dts4
-rw-r--r--arch/arm/boot/dts/sun8i-h3-nanopi-duo2.dts4
-rw-r--r--arch/arm/boot/dts/sun8i-h3-nanopi-neo-air.dts4
-rw-r--r--arch/arm/boot/dts/sun8i-h3-nanopi-r1.dts4
-rw-r--r--arch/arm/boot/dts/sun8i-h3-nanopi.dtsi4
-rw-r--r--arch/arm/boot/dts/sun8i-h3-orangepi-zero-plus2.dts4
-rw-r--r--arch/arm/boot/dts/sun8i-r16-bananapi-m2m.dts8
-rw-r--r--arch/arm/boot/dts/sun8i-r16-parrot.dts10
-rw-r--r--arch/arm/boot/dts/sun8i-r40.dtsi19
-rw-r--r--arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi3
-rw-r--r--arch/arm/boot/dts/sun8i-s3-elimo-impetus.dtsi2
-rw-r--r--arch/arm/boot/dts/sun8i-s3-pinecube.dts11
-rw-r--r--arch/arm/boot/dts/sun8i-v3-sl631-imx179.dts12
-rw-r--r--arch/arm/boot/dts/sun8i-v3-sl631.dtsi138
-rw-r--r--arch/arm/boot/dts/sun8i-v3s.dtsi17
-rw-r--r--arch/arm/boot/dts/sun9i-a80-cubieboard4.dts4
-rw-r--r--arch/arm/boot/dts/sunxi-bananapi-m2-plus.dtsi1
-rw-r--r--arch/arm/boot/dts/tango4-common.dtsi184
-rw-r--r--arch/arm/boot/dts/tango4-smp8758.dtsi57
-rw-r--r--arch/arm/boot/dts/tango4-vantage-1172.dts42
-rw-r--r--arch/arm/boot/dts/tegra30-ouya.dts4
-rw-r--r--arch/arm/boot/dts/zx296702-ad1.dts48
-rw-r--r--arch/arm/boot/dts/zx296702.dtsi142
-rw-r--r--arch/arm/boot/dts/zynq-ebaz4205.dts132
-rw-r--r--arch/arm/common/locomo.c5
-rw-r--r--arch/arm/common/sa1111.c6
-rw-r--r--arch/arm/configs/at91_dt_defconfig18
-rw-r--r--arch/arm/configs/bcm2835_defconfig2
-rw-r--r--arch/arm/configs/cns3420vb_defconfig1
-rw-r--r--arch/arm/configs/corgi_defconfig1
-rw-r--r--arch/arm/configs/efm32_defconfig98
-rw-r--r--arch/arm/configs/imx_v4_v5_defconfig1
-rw-r--r--arch/arm/configs/imx_v6_v7_defconfig1
-rw-r--r--arch/arm/configs/keystone_defconfig1
-rw-r--r--arch/arm/configs/multi_v5_defconfig2
-rw-r--r--arch/arm/configs/multi_v7_defconfig15
-rw-r--r--arch/arm/configs/mv78xx0_defconfig1
-rw-r--r--arch/arm/configs/mvebu_v5_defconfig1
-rw-r--r--arch/arm/configs/mxs_defconfig1
-rw-r--r--arch/arm/configs/omap1_defconfig1
-rw-r--r--arch/arm/configs/omap2plus_defconfig25
-rw-r--r--arch/arm/configs/orion5x_defconfig1
-rw-r--r--arch/arm/configs/prima2_defconfig72
-rw-r--r--arch/arm/configs/pxa_defconfig1
-rw-r--r--arch/arm/configs/qcom_defconfig19
-rw-r--r--arch/arm/configs/sama5_defconfig15
-rw-r--r--arch/arm/configs/socfpga_defconfig1
-rw-r--r--arch/arm/configs/spitz_defconfig1
-rw-r--r--arch/arm/configs/tango4_defconfig93
-rw-r--r--arch/arm/configs/tegra_defconfig4
-rw-r--r--arch/arm/configs/u300_defconfig65
-rw-r--r--arch/arm/configs/vexpress_defconfig1
-rw-r--r--arch/arm/configs/zx_defconfig122
-rw-r--r--arch/arm/crypto/Kconfig19
-rw-r--r--arch/arm/crypto/Makefile4
-rw-r--r--arch/arm/crypto/aes-neonbs-glue.c3
-rw-r--r--arch/arm/crypto/blake2b-neon-core.S347
-rw-r--r--arch/arm/crypto/blake2b-neon-glue.c105
-rw-r--r--arch/arm/crypto/blake2s-core.S285
-rw-r--r--arch/arm/crypto/blake2s-glue.c78
-rw-r--r--arch/arm/include/asm/archrandom.h10
-rw-r--r--arch/arm/include/asm/assembler.h17
-rw-r--r--arch/arm/include/asm/hardware/locomo.h2
-rw-r--r--arch/arm/include/asm/hardware/sa1111.h2
-rw-r--r--arch/arm/include/asm/kexec-internal.h12
-rw-r--r--arch/arm/include/debug/brcmstb.S30
-rw-r--r--arch/arm/include/debug/efm32.S45
-rw-r--r--arch/arm/include/debug/sirf.S40
-rw-r--r--arch/arm/include/debug/sti.S26
-rw-r--r--arch/arm/include/debug/tegra.S54
-rw-r--r--arch/arm/kernel/asm-offsets.c5
-rw-r--r--arch/arm/kernel/machine_kexec.c20
-rw-r--r--arch/arm/kernel/process.c2
-rw-r--r--arch/arm/kernel/relocate_kernel.S38
-rw-r--r--arch/arm/kernel/signal.c14
-rw-r--r--arch/arm/kernel/smp.c3
-rw-r--r--arch/arm/kernel/sys_oabi-compat.c15
-rw-r--r--arch/arm/mach-at91/pm_suspend.S2
-rw-r--r--arch/arm/mach-bcm/Kconfig1
-rw-r--r--arch/arm/mach-efm32/Makefile.boot4
-rw-r--r--arch/arm/mach-efm32/dtmachine.c16
-rw-r--r--arch/arm/mach-exynos/Kconfig1
-rw-r--r--arch/arm/mach-footbridge/dc21285.c12
-rw-r--r--arch/arm/mach-imx/common.h1
-rw-r--r--arch/arm/mach-imx/cpuidle-imx6sl.c1
-rw-r--r--arch/arm/mach-imx/hardware.h4
-rw-r--r--arch/arm/mach-imx/mach-imx6ul.c25
-rw-r--r--arch/arm/mach-imx/pm-imx6.c1
-rw-r--r--arch/arm/mach-imx/suspend-imx6.S1
-rw-r--r--arch/arm/mach-ixp4xx/Kconfig1
-rw-r--r--arch/arm/mach-omap1/board-osk.c2
-rw-r--r--arch/arm/mach-omap2/Kconfig28
-rw-r--r--arch/arm/mach-omap2/clockdomain.c4
-rw-r--r--arch/arm/mach-omap2/cpuidle44xx.c16
-rw-r--r--arch/arm/mach-omap2/pdata-quirks.c1
-rw-r--r--arch/arm/mach-picoxcell/Kconfig9
-rw-r--r--arch/arm/mach-picoxcell/common.c81
-rw-r--r--arch/arm/mach-prima2/Kconfig48
-rw-r--r--arch/arm/mach-prima2/Makefile9
-rw-r--r--arch/arm/mach-prima2/common.c64
-rw-r--r--arch/arm/mach-prima2/common.h32
-rw-r--r--arch/arm/mach-prima2/headsmp.S36
-rw-r--r--arch/arm/mach-prima2/hotplug.c38
-rw-r--r--arch/arm/mach-prima2/platsmp.c123
-rw-r--r--arch/arm/mach-prima2/pm.c153
-rw-r--r--arch/arm/mach-prima2/pm.h28
-rw-r--r--arch/arm/mach-prima2/rstc.c107
-rw-r--r--arch/arm/mach-prima2/rtciobrg.c179
-rw-r--r--arch/arm/mach-prima2/sleep.S63
-rw-r--r--arch/arm/mach-pxa/devices.c8
-rw-r--r--arch/arm/mach-pxa/mioa701.c1
-rw-r--r--arch/arm/mach-pxa/palm27x.c1
-rw-r--r--arch/arm/mach-pxa/palmte2.c1
-rw-r--r--arch/arm/mach-pxa/z2.c24
-rw-r--r--arch/arm/mach-s3c/irq-s3c24xx-fiq.S9
-rw-r--r--arch/arm/mach-s3c/irq-s3c24xx.c8
-rw-r--r--arch/arm/mach-sa1100/collie.c6
-rw-r--r--arch/arm/mach-spear/generic.h12
-rw-r--r--arch/arm/mach-spear/spear13xx.c1
-rw-r--r--arch/arm/mach-sunxi/Kconfig2
-rw-r--r--arch/arm/mach-tango/Kconfig13
-rw-r--r--arch/arm/mach-tango/Makefile4
-rw-r--r--arch/arm/mach-tango/platsmp.c52
-rw-r--r--arch/arm/mach-tango/pm.c31
-rw-r--r--arch/arm/mach-tango/pm.h7
-rw-r--r--arch/arm/mach-tango/setup.c20
-rw-r--r--arch/arm/mach-tango/smc.S12
-rw-r--r--arch/arm/mach-tango/smc.h9
-rw-r--r--arch/arm/mach-tegra/sleep-tegra20.S38
-rw-r--r--arch/arm/mach-tegra/sleep-tegra30.S94
-rw-r--r--arch/arm/mach-u300/Kconfig32
-rw-r--r--arch/arm/mach-u300/Makefile8
-rw-r--r--arch/arm/mach-u300/core.c413
-rw-r--r--arch/arm/mach-u300/regulator.c134
-rw-r--r--arch/arm/mach-zx/Kconfig21
-rw-r--r--arch/arm/mach-zx/Makefile3
-rw-r--r--arch/arm/mach-zx/core.h16
-rw-r--r--arch/arm/mach-zx/headsmp.S30
-rw-r--r--arch/arm/mach-zx/platsmp.c186
-rw-r--r--arch/arm/mach-zx/zx296702-pm-domain.c202
-rw-r--r--arch/arm/mach-zx/zx296702.c22
-rw-r--r--arch/arm/mm/Kconfig1
-rw-r--r--arch/arm/mm/dump.c9
-rw-r--r--arch/arm/mm/mmu.c1
-rw-r--r--arch/arm/net/bpf_jit_32.c7
-rw-r--r--arch/arm/oprofile/Makefile14
-rw-r--r--arch/arm/oprofile/common.c132
-rw-r--r--arch/arm/tools/Makefile2
-rw-r--r--arch/arm/tools/syscall.tbl1
-rw-r--r--arch/arm/vdso/Makefile1
-rw-r--r--arch/arm/xen/enlighten.c2
-rw-r--r--arch/arm/xen/p2m.c33
-rw-r--r--arch/arm64/Kconfig20
-rw-r--r--arch/arm64/Kconfig.platforms14
-rw-r--r--arch/arm64/Makefile10
-rw-r--r--arch/arm64/boot/dts/Makefile1
-rw-r--r--arch/arm64/boot/dts/allwinner/Makefile1
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts6
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-nanopi-a64.dts2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-lts.dts11
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts5
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone-1.0.dts2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone-1.1.dts2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone-1.2.dts2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone.dtsi18
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-pinetab-early-adopter.dts26
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-pinetab.dts2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts1
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi1
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-teres-i.dts4
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi6
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo-plus2.dts4
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo2.dts4
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-pc2.dts4
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-prime.dts4
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus.dts4
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus2.dts4
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi13
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h6-cpu-opp.dtsi20
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-3.dts4
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi.dtsi4
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64-model-b.dts15
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts7
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi26
-rw-r--r--arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi4
-rw-r--r--arch/arm64/boot/dts/amlogic/Makefile4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-axg.dtsi2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12a-x96-max.dts2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b-gsking-x.dts133
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b-gtking-pro.dts2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b-gtking.dts2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dtsi16
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b-ugoos-am6.dts2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gx-libretech-pc.dtsi2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gx.dtsi3
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-wetek-hub.dts2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-wetek-play2.dts2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s805x-libretech-ac.dts2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905x-khadas-vim.dts6
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc-v2.dts2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts6
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi21
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-sm1-khadas-vim3l.dts7
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-sm1-odroid-c4.dts429
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-sm1-odroid-hc4.dts96
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-sm1-odroid.dtsi442
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts10
-rw-r--r--arch/arm64/boot/dts/broadcom/bcm4908/Makefile1
-rw-r--r--arch/arm64/boot/dts/broadcom/bcm4908/bcm4906-netgear-r8000p.dts52
-rw-r--r--arch/arm64/boot/dts/broadcom/bcm4908/bcm4906.dtsi18
-rw-r--r--arch/arm64/boot/dts/broadcom/bcm4908/bcm4908-asus-gt-ac5300.dts51
-rw-r--r--arch/arm64/boot/dts/broadcom/bcm4908/bcm4908.dtsi117
-rw-r--r--arch/arm64/boot/dts/broadcom/stingray/bcm958742-base.dtsi64
-rw-r--r--arch/arm64/boot/dts/broadcom/stingray/stingray-sata.dtsi278
-rw-r--r--arch/arm64/boot/dts/broadcom/stingray/stingray-usb.dtsi7
-rw-r--r--arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi7
-rw-r--r--arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi4
-rw-r--r--arch/arm64/boot/dts/exynos/exynos7-espresso.dts2
-rw-r--r--arch/arm64/boot/dts/freescale/Makefile8
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1012a-frdm.dts21
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1012a-qds.dts5
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1012a-rdb.dts50
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi60
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-kbox-a-230-ls.dts4
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var1.dts62
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var3-ads2.dts5
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1028a-qds.dts8
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1028a-rdb.dts16
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi184
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi98
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts3
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi100
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts6
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi122
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi17
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls2088a.dtsi17
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls208xa-rdb.dtsi4
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi118
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-lx2160a-cex7.dtsi3
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-lx2160a-clearfog-itx.dtsi12
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-lx2160a-qds.dts10
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-lx2160a-rdb.dts24
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi147
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-lx2162a-qds.dts2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-beacon-baseboard.dtsi1
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-beacon-som.dtsi4
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-nitrogen-r2.dts393
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-venice-gw700x.dtsi495
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-venice-gw71xx-0x.dts19
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-venice-gw71xx.dtsi186
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx-0x.dts20
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx.dtsi311
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx-0x.dts19
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx.dtsi362
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm.dtsi23
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mn-beacon-baseboard.dtsi307
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mn-beacon-kit.dts19
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mn-beacon-som.dtsi466
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mn-evk.dtsi75
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mn.dtsi46
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-evk.dts21
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-phyboard-pollux-rdk.dts161
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi293
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp.dtsi107
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq-librem5-devkit.dts20
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq-librem5-r3.dts6
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq-librem5-r4.dts35
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq-librem5.dtsi113
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq-zii-ultra-rmb3.dts93
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq-zii-ultra-zest.dts30
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi118
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq.dtsi72
-rw-r--r--arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi6
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi3660.dtsi6
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi3670.dtsi77
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi3798cv200.dtsi8
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi6220.dtsi8
-rw-r--r--arch/arm64/boot/dts/hisilicon/hikey970-pinctrl.dtsi632
-rw-r--r--arch/arm64/boot/dts/hisilicon/hip05.dtsi2
-rw-r--r--arch/arm64/boot/dts/hisilicon/hip06.dtsi6
-rw-r--r--arch/arm64/boot/dts/hisilicon/hip07.dtsi9
-rw-r--r--arch/arm64/boot/dts/intel/Makefile1
-rw-r--r--arch/arm64/boot/dts/intel/socfpga_agilex.dtsi4
-rw-r--r--arch/arm64/boot/dts/intel/socfpga_n5x_socdk.dts53
-rw-r--r--arch/arm64/boot/dts/marvell/armada-3720-espressobin.dtsi2
-rw-r--r--arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts2
-rw-r--r--arch/arm64/boot/dts/marvell/armada-37xx.dtsi2
-rw-r--r--arch/arm64/boot/dts/marvell/armada-ap807.dtsi5
-rw-r--r--arch/arm64/boot/dts/marvell/armada-ap80x.dtsi3
-rw-r--r--arch/arm64/boot/dts/marvell/armada-cp11x.dtsi12
-rw-r--r--arch/arm64/boot/dts/marvell/cn9130-db.dts2
-rw-r--r--arch/arm64/boot/dts/mediatek/Makefile1
-rw-r--r--arch/arm64/boot/dts/mediatek/mt6779.dtsi17
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7622.dtsi2
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-evb.dts12
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui-krane-sku0.dts23
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui-krane-sku176.dts5
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi46
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183.dtsi26
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8192.dtsi57
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8516.dtsi30
-rw-r--r--arch/arm64/boot/dts/nvidia/Makefile1
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts742
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra186.dtsi22
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi8
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra194-p2972-0000.dts595
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra194-p3509-0000+p3668-0000.dts339
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra194-p3509-0000+p3668-0001.dts10
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra194-p3509-0000.dtsi351
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra194-p3668-0000.dtsi282
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra194-p3668-0001.dtsi19
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra194-p3668.dtsi284
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra194.dtsi48
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210-p2371-2180.dts299
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210-p3450-0000.dts175
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210.dtsi261
-rw-r--r--arch/arm64/boot/dts/qcom/Makefile15
-rw-r--r--arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi4
-rw-r--r--arch/arm64/boot/dts/qcom/apq8094-sony-xperia-kitakami-karin_windy.dts23
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-alcatel-idol347.dts291
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-asus-z00l.dts195
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-longcheer-l8910.dts267
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-pins.dtsi16
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-samsung-a2015-common.dtsi10
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-samsung-a5u-eur.dts2
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916.dtsi20
-rw-r--r--arch/arm64/boot/dts/qcom/msm8992-bullhead-rev-101.dts41
-rw-r--r--arch/arm64/boot/dts/qcom/msm8992-msft-lumia-octagon-talkman.dts15
-rw-r--r--arch/arm64/boot/dts/qcom/msm8992-msft-lumia-talkman.dts67
-rw-r--r--arch/arm64/boot/dts/qcom/msm8992-xiaomi-libra.dts92
-rw-r--r--arch/arm64/boot/dts/qcom/msm8992.dtsi743
-rw-r--r--arch/arm64/boot/dts/qcom/msm8994-msft-lumia-cityman.dts73
-rw-r--r--arch/arm64/boot/dts/qcom/msm8994-msft-lumia-octagon-cityman.dts15
-rw-r--r--arch/arm64/boot/dts/qcom/msm8994-msft-lumia-octagon.dtsi909
-rw-r--r--arch/arm64/boot/dts/qcom/msm8994-sony-xperia-kitakami-ivy.dts26
-rw-r--r--arch/arm64/boot/dts/qcom/msm8994-sony-xperia-kitakami-karin.dts45
-rw-r--r--arch/arm64/boot/dts/qcom/msm8994-sony-xperia-kitakami-satsuki.dts18
-rw-r--r--arch/arm64/boot/dts/qcom/msm8994-sony-xperia-kitakami-sumire.dts4
-rw-r--r--arch/arm64/boot/dts/qcom/msm8994-sony-xperia-kitakami-suzuran.dts20
-rw-r--r--arch/arm64/boot/dts/qcom/msm8994-sony-xperia-kitakami.dtsi457
-rw-r--r--arch/arm64/boot/dts/qcom/msm8994.dtsi321
-rw-r--r--arch/arm64/boot/dts/qcom/msm8996.dtsi4
-rw-r--r--arch/arm64/boot/dts/qcom/msm8998-clamshell.dtsi16
-rw-r--r--arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi10
-rw-r--r--arch/arm64/boot/dts/qcom/msm8998-pins.dtsi108
-rw-r--r--arch/arm64/boot/dts/qcom/msm8998.dtsi393
-rw-r--r--arch/arm64/boot/dts/qcom/pm8150.dtsi12
-rw-r--r--arch/arm64/boot/dts/qcom/pm8150b.dtsi12
-rw-r--r--arch/arm64/boot/dts/qcom/pm8150l.dtsi12
-rw-r--r--arch/arm64/boot/dts/qcom/pm8994.dtsi4
-rw-r--r--arch/arm64/boot/dts/qcom/pm8998.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/pmi8998.dtsi8
-rw-r--r--arch/arm64/boot/dts/qcom/pms405.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/qrb5165-rb5.dts572
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi91
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180.dtsi51
-rw-r--r--arch/arm64/boot/dts/qcom/sdm630.dtsi12
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845-db845c.dts8
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845-oneplus-common.dtsi623
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845-oneplus-enchilada.dts19
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845-oneplus-fajita.dts23
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845.dtsi3
-rw-r--r--arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts16
-rw-r--r--arch/arm64/boot/dts/qcom/sdm850.dtsi21
-rw-r--r--arch/arm64/boot/dts/qcom/sm8150.dtsi158
-rw-r--r--arch/arm64/boot/dts/qcom/sm8250-mtp.dts269
-rw-r--r--arch/arm64/boot/dts/qcom/sm8250.dtsi940
-rw-r--r--arch/arm64/boot/dts/qcom/sm8350-mtp.dts250
-rw-r--r--arch/arm64/boot/dts/qcom/sm8350.dtsi499
-rw-r--r--arch/arm64/boot/dts/renesas/Makefile2
-rw-r--r--arch/arm64/boot/dts/renesas/beacon-renesom-baseboard.dtsi137
-rw-r--r--arch/arm64/boot/dts/renesas/beacon-renesom-som.dtsi47
-rw-r--r--arch/arm64/boot/dts/renesas/hihope-common.dtsi2
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774a1-beacon-rzg2m-kit.dts41
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774a1.dtsi32
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774b1-beacon-rzg2n-kit.dts66
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774b1.dtsi32
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774c0.dtsi17
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774e1-beacon-rzg2h-kit.dts71
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774e1.dtsi32
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77951.dtsi65
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77960.dtsi65
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77961.dtsi135
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77965.dtsi65
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77980-condor.dts2
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts2
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77990.dtsi65
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77995-draak.dts2
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77995.dtsi135
-rw-r--r--arch/arm64/boot/dts/renesas/r8a779a0-falcon-cpu.dtsi138
-rw-r--r--arch/arm64/boot/dts/renesas/r8a779a0-falcon.dts6
-rw-r--r--arch/arm64/boot/dts/renesas/r8a779a0.dtsi837
-rw-r--r--arch/arm64/boot/dts/renesas/salvator-common.dtsi2
-rw-r--r--arch/arm64/boot/dts/renesas/ulcb.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/Makefile2
-rw-r--r--arch/arm64/boot/dts/rockchip/px30.dtsi85
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3308.dtsi63
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328-nanopi-r2s.dts7
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts382
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328-rock64.dts56
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328.dtsi33
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368.dtsi62
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-kobol-helios64.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-nanopc-t4.dts1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-nanopi-m4b.dts52
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-rock960.dts53
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dtsi29
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399.dtsi112
-rw-r--r--arch/arm64/boot/dts/synaptics/as370.dtsi4
-rw-r--r--arch/arm64/boot/dts/synaptics/berlin4ct.dtsi12
-rw-r--r--arch/arm64/boot/dts/ti/k3-am65-main.dtsi4
-rw-r--r--arch/arm64/boot/dts/ti/k3-am65.dtsi2
-rw-r--r--arch/arm64/boot/dts/ti/k3-j7200-common-proc-board.dts38
-rw-r--r--arch/arm64/boot/dts/ti/k3-j7200-main.dtsi161
-rw-r--r--arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi42
-rw-r--r--arch/arm64/boot/dts/ti/k3-j7200-som-p0.dtsi80
-rw-r--r--arch/arm64/boot/dts/ti/k3-j7200.dtsi2
-rw-r--r--arch/arm64/boot/dts/ti/k3-j721e-main.dtsi70
-rw-r--r--arch/arm64/boot/dts/ti/k3-j721e.dtsi2
-rw-r--r--arch/arm64/boot/dts/toshiba/tmpv7708-rm-mbrc.dts27
-rw-r--r--arch/arm64/boot/dts/toshiba/tmpv7708.dtsi48
-rw-r--r--arch/arm64/boot/dts/xilinx/Makefile1
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-clk-ccf.dtsi22
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts33
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revA.dts94
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zcu104-revA.dts40
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zcu104-revC.dts293
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zcu106-revA.dts89
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zcu111-revA.dts70
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp.dtsi128
-rw-r--r--arch/arm64/boot/dts/zte/Makefile3
-rw-r--r--arch/arm64/boot/dts/zte/zx296718-evb.dts144
-rw-r--r--arch/arm64/boot/dts/zte/zx296718-pcbox.dts143
-rw-r--r--arch/arm64/boot/dts/zte/zx296718.dtsi627
-rw-r--r--arch/arm64/configs/defconfig42
-rw-r--r--arch/arm64/crypto/aes-glue.c71
-rw-r--r--arch/arm64/crypto/aes-modes.S217
-rw-r--r--arch/arm64/crypto/aes-neonbs-core.S8
-rw-r--r--arch/arm64/crypto/crct10dif-ce-core.S43
-rw-r--r--arch/arm64/crypto/crct10dif-ce-glue.c30
-rw-r--r--arch/arm64/crypto/sha1-ce-core.S47
-rw-r--r--arch/arm64/crypto/sha1-ce-glue.c23
-rw-r--r--arch/arm64/crypto/sha2-ce-core.S38
-rw-r--r--arch/arm64/crypto/sha2-ce-glue.c24
-rw-r--r--arch/arm64/crypto/sha3-ce-core.S81
-rw-r--r--arch/arm64/crypto/sha3-ce-glue.c18
-rw-r--r--arch/arm64/crypto/sha512-ce-core.S29
-rw-r--r--arch/arm64/crypto/sha512-ce-glue.c55
-rw-r--r--arch/arm64/include/asm/archrandom.h82
-rw-r--r--arch/arm64/include/asm/asm-uaccess.h4
-rw-r--r--arch/arm64/include/asm/assembler.h33
-rw-r--r--arch/arm64/include/asm/cache.h1
-rw-r--r--arch/arm64/include/asm/cacheflush.h5
-rw-r--r--arch/arm64/include/asm/cpufeature.h11
-rw-r--r--arch/arm64/include/asm/el2_setup.h60
-rw-r--r--arch/arm64/include/asm/hyp_image.h29
-rw-r--r--arch/arm64/include/asm/kasan.h1
-rw-r--r--arch/arm64/include/asm/kexec.h5
-rw-r--r--arch/arm64/include/asm/kfence.h22
-rw-r--r--arch/arm64/include/asm/kvm_asm.h26
-rw-r--r--arch/arm64/include/asm/kvm_host.h3
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h61
-rw-r--r--arch/arm64/include/asm/kvm_pgtable.h5
-rw-r--r--arch/arm64/include/asm/memory.h23
-rw-r--r--arch/arm64/include/asm/mmu_context.h7
-rw-r--r--arch/arm64/include/asm/module.lds.h6
-rw-r--r--arch/arm64/include/asm/mte-def.h2
-rw-r--r--arch/arm64/include/asm/mte-kasan.h77
-rw-r--r--arch/arm64/include/asm/mte.h2
-rw-r--r--arch/arm64/include/asm/numa.h48
-rw-r--r--arch/arm64/include/asm/pgtable.h12
-rw-r--r--arch/arm64/include/asm/pointer_auth.h10
-rw-r--r--arch/arm64/include/asm/sections.h3
-rw-r--r--arch/arm64/include/asm/setup.h11
-rw-r--r--arch/arm64/include/asm/sparsemem.h23
-rw-r--r--arch/arm64/include/asm/spinlock.h2
-rw-r--r--arch/arm64/include/asm/stackprotector.h1
-rw-r--r--arch/arm64/include/asm/sysreg.h23
-rw-r--r--arch/arm64/include/asm/trans_pgd.h39
-rw-r--r--arch/arm64/include/asm/uaccess.h2
-rw-r--r--arch/arm64/include/asm/unistd.h2
-rw-r--r--arch/arm64/include/asm/unistd32.h2
-rw-r--r--arch/arm64/include/asm/virt.h7
-rw-r--r--arch/arm64/kernel/Makefile7
-rw-r--r--arch/arm64/kernel/acpi_numa.c12
-rw-r--r--arch/arm64/kernel/alternative.c2
-rw-r--r--arch/arm64/kernel/asm-offsets.c3
-rw-r--r--arch/arm64/kernel/cpu_errata.c2
-rw-r--r--arch/arm64/kernel/cpufeature.c81
-rw-r--r--arch/arm64/kernel/entry-common.c54
-rw-r--r--arch/arm64/kernel/entry.S14
-rw-r--r--arch/arm64/kernel/head.S76
-rw-r--r--arch/arm64/kernel/hibernate.c271
-rw-r--r--arch/arm64/kernel/hyp-stub.S129
-rw-r--r--arch/arm64/kernel/idreg-override.c216
-rw-r--r--arch/arm64/kernel/image-vars.h1
-rw-r--r--arch/arm64/kernel/kaslr.c43
-rw-r--r--arch/arm64/kernel/machine_kexec.c57
-rw-r--r--arch/arm64/kernel/machine_kexec_file.c4
-rw-r--r--arch/arm64/kernel/module-plts.c2
-rw-r--r--arch/arm64/kernel/mte.c61
-rw-r--r--arch/arm64/kernel/perf_event.c15
-rw-r--r--arch/arm64/kernel/probes/uprobes.c2
-rw-r--r--arch/arm64/kernel/process.c6
-rw-r--r--arch/arm64/kernel/ptrace.c3
-rw-r--r--arch/arm64/kernel/relocate_kernel.S48
-rw-r--r--arch/arm64/kernel/setup.c15
-rw-r--r--arch/arm64/kernel/sleep.S1
-rw-r--r--arch/arm64/kernel/smp.c4
-rw-r--r--arch/arm64/kernel/stacktrace.c13
-rw-r--r--arch/arm64/kernel/suspend.c2
-rw-r--r--arch/arm64/kernel/syscall.c30
-rw-r--r--arch/arm64/kernel/topology.c115
-rw-r--r--arch/arm64/kernel/traps.c2
-rw-r--r--arch/arm64/kernel/vdso-wrap.S (renamed from arch/arm64/kernel/vdso/vdso.S)0
-rw-r--r--arch/arm64/kernel/vdso/Makefile4
-rwxr-xr-xarch/arm64/kernel/vdso/gen_vdso_offsets.sh2
-rw-r--r--arch/arm64/kernel/vdso32-wrap.S (renamed from arch/arm64/kernel/vdso32/vdso.S)0
-rw-r--r--arch/arm64/kernel/vdso32/Makefile1
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S26
-rw-r--r--arch/arm64/kvm/Makefile2
-rw-r--r--arch/arm64/kvm/arm.c13
-rw-r--r--arch/arm64/kvm/hyp/hyp-entry.S2
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/switch.h4
-rw-r--r--arch/arm64/kvm/hyp/nvhe/.gitignore2
-rw-r--r--arch/arm64/kvm/hyp/nvhe/Makefile33
-rw-r--r--arch/arm64/kvm/hyp/nvhe/gen-hyprel.c438
-rw-r--r--arch/arm64/kvm/hyp/nvhe/host.S29
-rw-r--r--arch/arm64/kvm/hyp/nvhe/hyp-init.S41
-rw-r--r--arch/arm64/kvm/hyp/nvhe/hyp-main.c11
-rw-r--r--arch/arm64/kvm/hyp/nvhe/hyp-smp.c4
-rw-r--r--arch/arm64/kvm/hyp/nvhe/hyp.lds.S9
-rw-r--r--arch/arm64/kvm/hyp/nvhe/psci-relay.c37
-rw-r--r--arch/arm64/kvm/hyp/pgtable.c83
-rw-r--r--arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c2
-rw-r--r--arch/arm64/kvm/hypercalls.c6
-rw-r--r--arch/arm64/kvm/mmu.c13
-rw-r--r--arch/arm64/kvm/pmu-emul.c24
-rw-r--r--arch/arm64/kvm/sys_regs.c178
-rw-r--r--arch/arm64/kvm/trng.c85
-rw-r--r--arch/arm64/kvm/va_layout.c34
-rw-r--r--arch/arm64/lib/mte.S16
-rw-r--r--arch/arm64/mm/Makefile2
-rw-r--r--arch/arm64/mm/fault.c58
-rw-r--r--arch/arm64/mm/init.c4
-rw-r--r--arch/arm64/mm/mmap.c15
-rw-r--r--arch/arm64/mm/mmu.c31
-rw-r--r--arch/arm64/mm/physaddr.c2
-rw-r--r--arch/arm64/mm/proc.S16
-rw-r--r--arch/arm64/mm/ptdump.c1
-rw-r--r--arch/arm64/mm/trans_pgd.c324
-rw-r--r--arch/arm64/net/bpf_jit_comp.c16
-rw-r--r--arch/c6x/Kconfig113
-rw-r--r--arch/c6x/Kconfig.debug10
-rw-r--r--arch/c6x/Makefile60
-rw-r--r--arch/c6x/boot/dts/Makefile16
-rw-r--r--arch/c6x/boot/dts/dsk6455.dts57
-rw-r--r--arch/c6x/boot/dts/evmc6457.dts43
-rw-r--r--arch/c6x/boot/dts/evmc6472.dts68
-rw-r--r--arch/c6x/boot/dts/evmc6474.dts53
-rw-r--r--arch/c6x/boot/dts/evmc6678.dts78
-rw-r--r--arch/c6x/boot/dts/tms320c6455.dtsi97
-rw-r--r--arch/c6x/boot/dts/tms320c6457.dtsi69
-rw-r--r--arch/c6x/boot/dts/tms320c6472.dtsi135
-rw-r--r--arch/c6x/boot/dts/tms320c6474.dtsi90
-rw-r--r--arch/c6x/boot/dts/tms320c6678.dtsi147
-rw-r--r--arch/c6x/configs/dsk6455_defconfig42
-rw-r--r--arch/c6x/configs/evmc6457_defconfig39
-rw-r--r--arch/c6x/configs/evmc6472_defconfig40
-rw-r--r--arch/c6x/configs/evmc6474_defconfig40
-rw-r--r--arch/c6x/configs/evmc6678_defconfig40
-rw-r--r--arch/c6x/include/asm/Kbuild5
-rw-r--r--arch/c6x/include/asm/asm-offsets.h1
-rw-r--r--arch/c6x/include/asm/bitops.h95
-rw-r--r--arch/c6x/include/asm/bug.h20
-rw-r--r--arch/c6x/include/asm/cache.h94
-rw-r--r--arch/c6x/include/asm/cacheflush.h45
-rw-r--r--arch/c6x/include/asm/checksum.h34
-rw-r--r--arch/c6x/include/asm/clock.h145
-rw-r--r--arch/c6x/include/asm/cmpxchg.h63
-rw-r--r--arch/c6x/include/asm/delay.h64
-rw-r--r--arch/c6x/include/asm/dscr.h30
-rw-r--r--arch/c6x/include/asm/elf.h117
-rw-r--r--arch/c6x/include/asm/flat.h19
-rw-r--r--arch/c6x/include/asm/ftrace.h6
-rw-r--r--arch/c6x/include/asm/hardirq.h17
-rw-r--r--arch/c6x/include/asm/irq.h50
-rw-r--r--arch/c6x/include/asm/irqflags.h68
-rw-r--r--arch/c6x/include/asm/linkage.h31
-rw-r--r--arch/c6x/include/asm/megamod-pic.h10
-rw-r--r--arch/c6x/include/asm/mmu_context.h6
-rw-r--r--arch/c6x/include/asm/module.h20
-rw-r--r--arch/c6x/include/asm/page.h9
-rw-r--r--arch/c6x/include/asm/pgtable.h66
-rw-r--r--arch/c6x/include/asm/processor.h114
-rw-r--r--arch/c6x/include/asm/procinfo.h24
-rw-r--r--arch/c6x/include/asm/ptrace.h32
-rw-r--r--arch/c6x/include/asm/sections.h12
-rw-r--r--arch/c6x/include/asm/setup.h31
-rw-r--r--arch/c6x/include/asm/soc.h35
-rw-r--r--arch/c6x/include/asm/special_insns.h60
-rw-r--r--arch/c6x/include/asm/string.h18
-rw-r--r--arch/c6x/include/asm/switch_to.h30
-rw-r--r--arch/c6x/include/asm/syscall.h75
-rw-r--r--arch/c6x/include/asm/syscalls.h46
-rw-r--r--arch/c6x/include/asm/thread_info.h94
-rw-r--r--arch/c6x/include/asm/timer64.h7
-rw-r--r--arch/c6x/include/asm/timex.h30
-rw-r--r--arch/c6x/include/asm/tlb.h7
-rw-r--r--arch/c6x/include/asm/traps.h33
-rw-r--r--arch/c6x/include/asm/uaccess.h97
-rw-r--r--arch/c6x/include/asm/unaligned.h104
-rw-r--r--arch/c6x/include/asm/vmalloc.h4
-rw-r--r--arch/c6x/include/uapi/asm/byteorder.h13
-rw-r--r--arch/c6x/include/uapi/asm/ptrace.h164
-rw-r--r--arch/c6x/include/uapi/asm/setup.h7
-rw-r--r--arch/c6x/include/uapi/asm/sigcontext.h81
-rw-r--r--arch/c6x/include/uapi/asm/swab.h55
-rw-r--r--arch/c6x/include/uapi/asm/unistd.h29
-rw-r--r--arch/c6x/kernel/Makefile13
-rw-r--r--arch/c6x/kernel/asm-offsets.c123
-rw-r--r--arch/c6x/kernel/c6x_ksyms.c62
-rw-r--r--arch/c6x/kernel/devicetree.c14
-rw-r--r--arch/c6x/kernel/entry.S736
-rw-r--r--arch/c6x/kernel/head.S81
-rw-r--r--arch/c6x/kernel/irq.c127
-rw-r--r--arch/c6x/kernel/module.c119
-rw-r--r--arch/c6x/kernel/process.c151
-rw-r--r--arch/c6x/kernel/ptrace.c139
-rw-r--r--arch/c6x/kernel/setup.c476
-rw-r--r--arch/c6x/kernel/signal.c322
-rw-r--r--arch/c6x/kernel/soc.c87
-rw-r--r--arch/c6x/kernel/switch_to.S71
-rw-r--r--arch/c6x/kernel/sys_c6x.c71
-rw-r--r--arch/c6x/kernel/time.c63
-rw-r--r--arch/c6x/kernel/traps.c409
-rw-r--r--arch/c6x/kernel/vectors.S78
-rw-r--r--arch/c6x/kernel/vmlinux.lds.S151
-rw-r--r--arch/c6x/lib/Makefile8
-rw-r--r--arch/c6x/lib/checksum.c11
-rw-r--r--arch/c6x/lib/csum_64plus.S414
-rw-r--r--arch/c6x/lib/divi.S41
-rw-r--r--arch/c6x/lib/divremi.S34
-rw-r--r--arch/c6x/lib/divremu.S75
-rw-r--r--arch/c6x/lib/divu.S86
-rw-r--r--arch/c6x/lib/llshl.S25
-rw-r--r--arch/c6x/lib/llshr.S26
-rw-r--r--arch/c6x/lib/llshru.S26
-rw-r--r--arch/c6x/lib/memcpy_64plus.S43
-rw-r--r--arch/c6x/lib/mpyll.S37
-rw-r--r--arch/c6x/lib/negll.S19
-rw-r--r--arch/c6x/lib/pop_rts.S20
-rw-r--r--arch/c6x/lib/push_rts.S19
-rw-r--r--arch/c6x/lib/remi.S52
-rw-r--r--arch/c6x/lib/remu.S70
-rw-r--r--arch/c6x/lib/strasgi.S77
-rw-r--r--arch/c6x/lib/strasgi_64plus.S27
-rw-r--r--arch/c6x/mm/Makefile6
-rw-r--r--arch/c6x/mm/dma-coherent.c173
-rw-r--r--arch/c6x/mm/init.c65
-rw-r--r--arch/c6x/platforms/Kconfig21
-rw-r--r--arch/c6x/platforms/Makefile13
-rw-r--r--arch/c6x/platforms/cache.c444
-rw-r--r--arch/c6x/platforms/dscr.c595
-rw-r--r--arch/c6x/platforms/emif.c84
-rw-r--r--arch/c6x/platforms/megamod-pic.c344
-rw-r--r--arch/c6x/platforms/pll.c440
-rw-r--r--arch/c6x/platforms/plldata.c467
-rw-r--r--arch/c6x/platforms/timer64.c241
-rw-r--r--arch/csky/Kconfig24
-rw-r--r--arch/csky/abiv1/inc/abi/cacheflush.h1
-rw-r--r--arch/csky/abiv1/inc/abi/ckmmu.h10
-rw-r--r--arch/csky/abiv1/inc/abi/entry.h1
-rw-r--r--arch/csky/abiv1/inc/abi/page.h1
-rw-r--r--arch/csky/abiv1/inc/abi/pgtable-bits.h40
-rw-r--r--arch/csky/abiv1/inc/abi/reg_ops.h1
-rw-r--r--arch/csky/abiv1/inc/abi/regdef.h6
-rw-r--r--arch/csky/abiv1/inc/abi/string.h1
-rw-r--r--arch/csky/abiv1/inc/abi/switch_context.h1
-rw-r--r--arch/csky/abiv1/inc/abi/vdso.h18
-rw-r--r--arch/csky/abiv2/cacheflush.c3
-rw-r--r--arch/csky/abiv2/inc/abi/ckmmu.h44
-rw-r--r--arch/csky/abiv2/inc/abi/entry.h20
-rw-r--r--arch/csky/abiv2/inc/abi/fpu.h1
-rw-r--r--arch/csky/abiv2/inc/abi/page.h1
-rw-r--r--arch/csky/abiv2/inc/abi/pgtable-bits.h37
-rw-r--r--arch/csky/abiv2/inc/abi/reg_ops.h1
-rw-r--r--arch/csky/abiv2/inc/abi/regdef.h6
-rw-r--r--arch/csky/abiv2/inc/abi/switch_context.h1
-rw-r--r--arch/csky/abiv2/inc/abi/vdso.h20
-rw-r--r--arch/csky/abiv2/sysdep.h1
-rw-r--r--arch/csky/include/asm/addrspace.h1
-rw-r--r--arch/csky/include/asm/atomic.h212
-rw-r--r--arch/csky/include/asm/barrier.h83
-rw-r--r--arch/csky/include/asm/bitops.h1
-rw-r--r--arch/csky/include/asm/bug.h3
-rw-r--r--arch/csky/include/asm/cacheflush.h1
-rw-r--r--arch/csky/include/asm/checksum.h1
-rw-r--r--arch/csky/include/asm/clocksource.h8
-rw-r--r--arch/csky/include/asm/cmpxchg.h27
-rw-r--r--arch/csky/include/asm/elf.h1
-rw-r--r--arch/csky/include/asm/fixmap.h1
-rw-r--r--arch/csky/include/asm/ftrace.h1
-rw-r--r--arch/csky/include/asm/futex.h121
-rw-r--r--arch/csky/include/asm/highmem.h1
-rw-r--r--arch/csky/include/asm/io.h1
-rw-r--r--arch/csky/include/asm/memory.h2
-rw-r--r--arch/csky/include/asm/mmu.h1
-rw-r--r--arch/csky/include/asm/mmu_context.h10
-rw-r--r--arch/csky/include/asm/page.h2
-rw-r--r--arch/csky/include/asm/perf_event.h1
-rw-r--r--arch/csky/include/asm/pgalloc.h3
-rw-r--r--arch/csky/include/asm/pgtable.h80
-rw-r--r--arch/csky/include/asm/processor.h3
-rw-r--r--arch/csky/include/asm/ptrace.h1
-rw-r--r--arch/csky/include/asm/segment.h3
-rw-r--r--arch/csky/include/asm/shmparam.h1
-rw-r--r--arch/csky/include/asm/spinlock.h167
-rw-r--r--arch/csky/include/asm/spinlock_types.h10
-rw-r--r--arch/csky/include/asm/string.h1
-rw-r--r--arch/csky/include/asm/switch_to.h1
-rw-r--r--arch/csky/include/asm/syscalls.h1
-rw-r--r--arch/csky/include/asm/thread_info.h2
-rw-r--r--arch/csky/include/asm/tlb.h1
-rw-r--r--arch/csky/include/asm/tlbflush.h1
-rw-r--r--arch/csky/include/asm/traps.h1
-rw-r--r--arch/csky/include/asm/uaccess.h1
-rw-r--r--arch/csky/include/asm/unistd.h1
-rw-r--r--arch/csky/include/asm/vdso.h21
-rw-r--r--arch/csky/include/asm/vdso/clocksource.h9
-rw-r--r--arch/csky/include/asm/vdso/gettimeofday.h114
-rw-r--r--arch/csky/include/asm/vdso/processor.h12
-rw-r--r--arch/csky/include/asm/vdso/vsyscall.h22
-rw-r--r--arch/csky/include/uapi/asm/byteorder.h1
-rw-r--r--arch/csky/include/uapi/asm/perf_regs.h1
-rw-r--r--arch/csky/include/uapi/asm/ptrace.h1
-rw-r--r--arch/csky/include/uapi/asm/sigcontext.h1
-rw-r--r--arch/csky/include/uapi/asm/unistd.h1
-rw-r--r--arch/csky/kernel/Makefile2
-rw-r--r--arch/csky/kernel/atomic.S24
-rw-r--r--arch/csky/kernel/entry.S106
-rw-r--r--arch/csky/kernel/head.S10
-rw-r--r--arch/csky/kernel/perf_event.c4
-rw-r--r--arch/csky/kernel/probes/simulate-insn.c22
-rw-r--r--arch/csky/kernel/process.c2
-rw-r--r--arch/csky/kernel/ptrace.c128
-rw-r--r--arch/csky/kernel/setup.c18
-rw-r--r--arch/csky/kernel/signal.c4
-rw-r--r--arch/csky/kernel/smp.c7
-rw-r--r--arch/csky/kernel/traps.c10
-rw-r--r--arch/csky/kernel/vdso.c127
-rw-r--r--arch/csky/kernel/vdso/.gitignore (renamed from arch/arm/mach-efm32/Makefile)4
-rw-r--r--arch/csky/kernel/vdso/Makefile72
-rw-r--r--arch/csky/kernel/vdso/note.S12
-rw-r--r--arch/csky/kernel/vdso/rt_sigreturn.S14
-rwxr-xr-xarch/csky/kernel/vdso/so2s.sh5
-rw-r--r--arch/csky/kernel/vdso/vdso.S16
-rw-r--r--arch/csky/kernel/vdso/vdso.lds.S58
-rw-r--r--arch/csky/kernel/vdso/vgettimeofday.c28
-rw-r--r--arch/csky/kernel/vmlinux.lds.S2
-rw-r--r--arch/csky/mm/fault.c388
-rw-r--r--arch/csky/mm/init.c56
-rw-r--r--arch/csky/mm/tlb.c42
-rw-r--r--arch/h8300/kernel/asm-offsets.c3
-rw-r--r--arch/h8300/kernel/process.c2
-rw-r--r--arch/hexagon/Kconfig1
-rw-r--r--arch/hexagon/configs/comet_defconfig1
-rw-r--r--arch/hexagon/kernel/process.c2
-rw-r--r--arch/ia64/Kconfig1
-rw-r--r--arch/ia64/Makefile10
-rw-r--r--arch/ia64/configs/bigsur_defconfig1
-rw-r--r--arch/ia64/include/asm/efi.h13
-rw-r--r--arch/ia64/include/asm/hw_irq.h1
-rw-r--r--arch/ia64/include/asm/irq.h4
-rw-r--r--arch/ia64/include/asm/mca.h11
-rw-r--r--arch/ia64/include/asm/pal.h4
-rw-r--r--arch/ia64/include/asm/perfmon.h111
-rw-r--r--arch/ia64/include/asm/pgtable.h6
-rw-r--r--arch/ia64/include/asm/sal.h2
-rw-r--r--arch/ia64/include/asm/tlb.h4
-rw-r--r--arch/ia64/include/uapi/asm/cmpxchg.h2
-rw-r--r--arch/ia64/include/uapi/asm/perfmon.h178
-rw-r--r--arch/ia64/include/uapi/asm/perfmon_default_smpl.h84
-rw-r--r--arch/ia64/kernel/Makefile5
-rw-r--r--arch/ia64/kernel/asm-offsets.c18
-rw-r--r--arch/ia64/kernel/crash.c3
-rw-r--r--arch/ia64/kernel/efi.c2
-rw-r--r--arch/ia64/kernel/machine_kexec.c1
-rw-r--r--arch/ia64/kernel/mca.c6
-rw-r--r--arch/ia64/kernel/mca_drv.c2
-rw-r--r--arch/ia64/kernel/nr-irqs.c22
-rw-r--r--arch/ia64/kernel/palinfo.c41
-rw-r--r--arch/ia64/kernel/perfmon_default_smpl.c297
-rw-r--r--arch/ia64/kernel/perfmon_generic.h46
-rw-r--r--arch/ia64/kernel/perfmon_itanium.h2
-rw-r--r--arch/ia64/kernel/perfmon_mckinley.h188
-rw-r--r--arch/ia64/kernel/perfmon_montecito.h270
-rw-r--r--arch/ia64/kernel/process.c2
-rw-r--r--arch/ia64/kernel/signal.c3
-rw-r--r--arch/ia64/kernel/smpboot.c1
-rw-r--r--arch/ia64/kernel/syscalls/Makefile13
-rw-r--r--arch/ia64/kernel/syscalls/syscall.tbl1
-rw-r--r--arch/ia64/kernel/time.c32
-rw-r--r--arch/ia64/kernel/uncached.c4
-rw-r--r--arch/ia64/mm/contig.c1
-rw-r--r--arch/ia64/mm/discontig.c1
-rw-r--r--arch/ia64/mm/init.c15
-rw-r--r--arch/ia64/oprofile/Makefile10
-rw-r--r--arch/ia64/oprofile/backtrace.c131
-rw-r--r--arch/ia64/oprofile/init.c28
-rw-r--r--arch/ia64/scripts/unwcheck.py2
-rw-r--r--arch/m68k/coldfire/clk.c4
-rw-r--r--arch/m68k/configs/amiga_defconfig3
-rw-r--r--arch/m68k/configs/apollo_defconfig3
-rw-r--r--arch/m68k/configs/atari_defconfig3
-rw-r--r--arch/m68k/configs/bvme6000_defconfig3
-rw-r--r--arch/m68k/configs/hp300_defconfig3
-rw-r--r--arch/m68k/configs/mac_defconfig3
-rw-r--r--arch/m68k/configs/multi_defconfig3
-rw-r--r--arch/m68k/configs/mvme147_defconfig3
-rw-r--r--arch/m68k/configs/mvme16x_defconfig3
-rw-r--r--arch/m68k/configs/q40_defconfig3
-rw-r--r--arch/m68k/configs/sun3_defconfig3
-rw-r--r--arch/m68k/configs/sun3x_defconfig3
-rw-r--r--arch/m68k/emu/nfblock.c2
-rw-r--r--arch/m68k/include/asm/page.h2
-rw-r--r--arch/m68k/kernel/process.c2
-rw-r--r--arch/m68k/kernel/syscalls/Makefile13
-rw-r--r--arch/m68k/kernel/syscalls/syscall.tbl1
-rw-r--r--arch/microblaze/Kconfig2
-rw-r--r--arch/microblaze/Makefile2
-rw-r--r--arch/microblaze/kernel/module.c26
-rw-r--r--arch/microblaze/kernel/process.c2
-rw-r--r--arch/microblaze/kernel/syscalls/Makefile13
-rw-r--r--arch/microblaze/kernel/syscalls/syscall.tbl1
-rw-r--r--arch/microblaze/kernel/vmlinux.lds.S2
-rw-r--r--arch/microblaze/oprofile/Makefile14
-rw-r--r--arch/microblaze/oprofile/microblaze_oprofile.c22
-rw-r--r--arch/mips/Kbuild.platforms1
-rw-r--r--arch/mips/Kconfig80
-rw-r--r--arch/mips/Makefile21
-rw-r--r--arch/mips/alchemy/common/prom.c4
-rw-r--r--arch/mips/alchemy/common/setup.c34
-rw-r--r--arch/mips/ar7/memory.c5
-rw-r--r--arch/mips/ath25/prom.c4
-rw-r--r--arch/mips/ath79/prom.c5
-rw-r--r--arch/mips/ath79/setup.c13
-rw-r--r--arch/mips/bcm47xx/prom.c4
-rw-r--r--arch/mips/bcm63xx/prom.c4
-rw-r--r--arch/mips/bmips/setup.c13
-rw-r--r--arch/mips/boot/compressed/Makefile1
-rw-r--r--arch/mips/boot/compressed/head.S20
-rw-r--r--arch/mips/boot/dts/Makefile1
-rw-r--r--arch/mips/boot/dts/realtek/Makefile2
-rw-r--r--arch/mips/boot/dts/realtek/cisco_sg220-26.dts25
-rw-r--r--arch/mips/boot/dts/realtek/rtl838x.dtsi21
-rw-r--r--arch/mips/boot/dts/realtek/rtl83xx.dtsi59
-rw-r--r--arch/mips/cavium-octeon/setup.c9
-rw-r--r--arch/mips/cobalt/setup.c5
-rw-r--r--arch/mips/configs/fuloong2e_defconfig1
-rw-r--r--arch/mips/configs/ip32_defconfig1
-rw-r--r--arch/mips/configs/lemote2f_defconfig1
-rw-r--r--arch/mips/configs/loongson3_defconfig2
-rw-r--r--arch/mips/configs/mtx1_defconfig1
-rw-r--r--arch/mips/configs/nlm_xlp_defconfig1
-rw-r--r--arch/mips/configs/nlm_xlr_defconfig1
-rw-r--r--arch/mips/configs/rs90_defconfig1
-rw-r--r--arch/mips/fw/arc/memory.c2
-rw-r--r--arch/mips/fw/sni/sniprom.c4
-rw-r--r--arch/mips/generic/init.c9
-rw-r--r--arch/mips/include/asm/Kbuild4
-rw-r--r--arch/mips/include/asm/asm.h18
-rw-r--r--arch/mips/include/asm/atomic.h2
-rw-r--r--arch/mips/include/asm/bitops.h12
-rw-r--r--arch/mips/include/asm/bootinfo.h22
-rw-r--r--arch/mips/include/asm/checksum.h6
-rw-r--r--arch/mips/include/asm/cmpxchg.h6
-rw-r--r--arch/mips/include/asm/cpu-type.h5
-rw-r--r--arch/mips/include/asm/cpu.h2
-rw-r--r--arch/mips/include/asm/dma-coherence.h38
-rw-r--r--arch/mips/include/asm/elf.h56
-rw-r--r--arch/mips/include/asm/elfcore-compat.h29
-rw-r--r--arch/mips/include/asm/inst.h6
-rw-r--r--arch/mips/include/asm/irq.h1
-rw-r--r--arch/mips/include/asm/irq_cpu.h2
-rw-r--r--arch/mips/include/asm/kvm_host.h1
-rw-r--r--arch/mips/include/asm/mach-generic/irq.h6
-rw-r--r--arch/mips/include/asm/mach-loongson2ef/loongson.h9
-rw-r--r--arch/mips/include/asm/mach-loongson64/loongson.h2
-rw-r--r--arch/mips/include/asm/mach-n64/irq.h9
-rw-r--r--arch/mips/include/asm/mach-n64/kmalloc.h8
-rw-r--r--arch/mips/include/asm/mach-pistachio/irq.h15
-rw-r--r--arch/mips/include/asm/mipsregs.h4
-rw-r--r--arch/mips/include/asm/octeon/octeon.h1
-rw-r--r--arch/mips/include/asm/page.h17
-rw-r--r--arch/mips/include/asm/pgtable.h14
-rw-r--r--arch/mips/include/asm/ptrace.h2
-rw-r--r--arch/mips/include/asm/r4kcache.h67
-rw-r--r--arch/mips/include/asm/spinlock.h2
-rw-r--r--arch/mips/include/asm/spram.h2
-rw-r--r--arch/mips/include/asm/traps.h1
-rw-r--r--arch/mips/include/asm/vermagic.h2
-rw-r--r--arch/mips/include/asm/vpe.h3
-rw-r--r--arch/mips/include/uapi/asm/Kbuild3
-rw-r--r--arch/mips/include/uapi/asm/perf_regs.h40
-rw-r--r--arch/mips/jazz/Kconfig3
-rw-r--r--arch/mips/kernel/Makefile7
-rw-r--r--arch/mips/kernel/binfmt_elfn32.c113
-rw-r--r--arch/mips/kernel/binfmt_elfo32.c116
-rw-r--r--arch/mips/kernel/cacheinfo.c30
-rw-r--r--arch/mips/kernel/cevt-txx9.c2
-rw-r--r--arch/mips/kernel/cps-vec.S1
-rw-r--r--arch/mips/kernel/cpu-probe.c24
-rw-r--r--arch/mips/kernel/crash_dump.c41
-rw-r--r--arch/mips/kernel/ftrace.c4
-rw-r--r--arch/mips/kernel/genex.S4
-rw-r--r--arch/mips/kernel/head.S31
-rw-r--r--arch/mips/kernel/idle.c1
-rw-r--r--arch/mips/kernel/irq-rm7000.c45
-rw-r--r--arch/mips/kernel/kgdb.c23
-rw-r--r--arch/mips/kernel/module.c109
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c22
-rw-r--r--arch/mips/kernel/perf_regs.c68
-rw-r--r--arch/mips/kernel/process.c85
-rw-r--r--arch/mips/kernel/r4k-bugs64.c2
-rw-r--r--arch/mips/kernel/relocate.c72
-rw-r--r--arch/mips/kernel/scall64-n64.S2
-rw-r--r--arch/mips/kernel/setup.c71
-rw-r--r--arch/mips/kernel/smp-cps.c3
-rw-r--r--arch/mips/kernel/smp.c6
-rw-r--r--arch/mips/kernel/syscalls/Makefile43
-rw-r--r--arch/mips/kernel/syscalls/syscall_n32.tbl1
-rw-r--r--arch/mips/kernel/syscalls/syscall_n64.tbl1
-rw-r--r--arch/mips/kernel/syscalls/syscall_o32.tbl1
-rw-r--r--arch/mips/kernel/vmlinux.lds.S11
-rw-r--r--arch/mips/kernel/vpe-cmp.c4
-rw-r--r--arch/mips/kernel/vpe-mt.c4
-rw-r--r--arch/mips/kernel/vpe.c33
-rw-r--r--arch/mips/kvm/mips.c2
-rw-r--r--arch/mips/lantiq/irq.c10
-rw-r--r--arch/mips/lantiq/prom.c11
-rw-r--r--arch/mips/lib/iomap-pci.c2
-rw-r--r--arch/mips/loongson2ef/common/init.c5
-rw-r--r--arch/mips/loongson2ef/common/mem.c11
-rw-r--r--arch/mips/loongson2ef/fuloong-2e/irq.c2
-rw-r--r--arch/mips/loongson2ef/lemote-2f/irq.c1
-rw-r--r--arch/mips/loongson32/common/prom.c4
-rw-r--r--arch/mips/loongson64/Platform24
-rw-r--r--arch/mips/loongson64/init.c54
-rw-r--r--arch/mips/loongson64/numa.c52
-rw-r--r--arch/mips/loongson64/smp.c8
-rw-r--r--arch/mips/mm/c-r4k.c13
-rw-r--r--arch/mips/mm/cache.c39
-rw-r--r--arch/mips/mm/dma-noncoherent.c3
-rw-r--r--arch/mips/mm/fault.c5
-rw-r--r--arch/mips/mm/init.c5
-rw-r--r--arch/mips/mm/pgtable-32.c1
-rw-r--r--arch/mips/mm/pgtable-64.c1
-rw-r--r--arch/mips/mm/tlbex.c1
-rw-r--r--arch/mips/mti-malta/malta-init.c1
-rw-r--r--arch/mips/mti-malta/malta-memory.c4
-rw-r--r--arch/mips/mti-malta/malta-setup.c34
-rw-r--r--arch/mips/mti-malta/malta-time.c2
-rw-r--r--arch/mips/n64/Makefile6
-rw-r--r--arch/mips/n64/Platform7
-rw-r--r--arch/mips/n64/init.c164
-rw-r--r--arch/mips/n64/irq.c16
-rw-r--r--arch/mips/net/ebpf_jit.c11
-rw-r--r--arch/mips/netlogic/xlp/setup.c5
-rw-r--r--arch/mips/netlogic/xlr/setup.c5
-rw-r--r--arch/mips/oprofile/Makefile18
-rw-r--r--arch/mips/oprofile/backtrace.c177
-rw-r--r--arch/mips/oprofile/common.c147
-rw-r--r--arch/mips/oprofile/op_impl.h41
-rw-r--r--arch/mips/oprofile/op_model_loongson2.c161
-rw-r--r--arch/mips/oprofile/op_model_loongson3.c213
-rw-r--r--arch/mips/oprofile/op_model_mipsxx.c479
-rw-r--r--arch/mips/pci/pci-alchemy.c7
-rw-r--r--arch/mips/pci/pci-ar2315.c6
-rw-r--r--arch/mips/pic32/pic32mzda/init.c19
-rw-r--r--arch/mips/pistachio/Platform2
-rw-r--r--arch/mips/pistachio/init.c6
-rw-r--r--arch/mips/ralink/of.c11
-rw-r--r--arch/mips/ralink/prom.c4
-rw-r--r--arch/mips/ralink/reset.c4
-rw-r--r--arch/mips/rb532/prom.c5
-rw-r--r--arch/mips/sgi-ip27/ip27-memory.c5
-rw-r--r--arch/mips/sgi-ip32/ip32-irq.c2
-rw-r--r--arch/mips/sgi-ip32/ip32-memory.c5
-rw-r--r--arch/mips/sibyte/common/cfe.c5
-rw-r--r--arch/mips/txx9/generic/setup.c4
-rw-r--r--arch/mips/vdso/Kconfig2
-rw-r--r--arch/mips/vdso/Makefile5
-rw-r--r--arch/mips/vr41xx/common/init.c4
-rw-r--r--arch/nds32/configs/defconfig1
-rw-r--r--arch/nds32/kernel/process.c2
-rw-r--r--arch/nds32/kernel/setup.c2
-rw-r--r--arch/nds32/kernel/time.c2
-rw-r--r--arch/nds32/kernel/traps.c50
-rw-r--r--arch/nios2/kernel/entry.S3
-rw-r--r--arch/nios2/kernel/process.c2
-rw-r--r--arch/nios2/kernel/setup.c21
-rw-r--r--arch/nios2/kernel/sys_nios2.c11
-rw-r--r--arch/openrisc/Kbuild3
-rw-r--r--arch/openrisc/Makefile21
-rw-r--r--arch/openrisc/boot/.gitignore (renamed from arch/c6x/include/uapi/asm/Kbuild)2
-rw-r--r--arch/openrisc/boot/Makefile (renamed from arch/c6x/boot/Makefile)5
-rw-r--r--arch/openrisc/kernel/process.c15
-rw-r--r--arch/openrisc/kernel/smp.c23
-rw-r--r--arch/parisc/Kconfig20
-rw-r--r--arch/parisc/Makefile2
-rw-r--r--arch/parisc/configs/generic-32bit_defconfig1
-rw-r--r--arch/parisc/configs/generic-64bit_defconfig1
-rw-r--r--arch/parisc/include/asm/compat.h2
-rw-r--r--arch/parisc/include/asm/hardirq.h4
-rw-r--r--arch/parisc/include/asm/io.h2
-rw-r--r--arch/parisc/include/asm/irq.h3
-rw-r--r--arch/parisc/include/asm/mmu_context.h7
-rw-r--r--arch/parisc/include/asm/page.h2
-rw-r--r--arch/parisc/include/asm/pgalloc.h76
-rw-r--r--arch/parisc/include/asm/pgtable.h89
-rw-r--r--arch/parisc/kernel/asm-offsets.c1
-rw-r--r--arch/parisc/kernel/entry.S129
-rw-r--r--arch/parisc/kernel/hpmc.S10
-rw-r--r--arch/parisc/kernel/irq.c5
-rw-r--r--arch/parisc/kernel/process.c4
-rw-r--r--arch/parisc/kernel/syscalls/Makefile19
-rw-r--r--arch/parisc/kernel/syscalls/syscall.tbl1
-rw-r--r--arch/parisc/kernel/traps.c9
-rw-r--r--arch/parisc/mm/hugetlbpage.c13
-rw-r--r--arch/parisc/mm/init.c10
-rw-r--r--arch/parisc/oprofile/Makefile10
-rw-r--r--arch/parisc/oprofile/init.c23
-rw-r--r--arch/powerpc/Kconfig38
-rw-r--r--arch/powerpc/Kconfig.debug1
-rw-r--r--arch/powerpc/Makefile4
-rw-r--r--arch/powerpc/configs/44x/akebono_defconfig6
-rw-r--r--arch/powerpc/configs/44x/currituck_defconfig1
-rw-r--r--arch/powerpc/configs/44x/fsp2_defconfig1
-rw-r--r--arch/powerpc/configs/44x/iss476-smp_defconfig1
-rw-r--r--arch/powerpc/configs/cell_defconfig1
-rw-r--r--arch/powerpc/configs/g5_defconfig1
-rw-r--r--arch/powerpc/configs/maple_defconfig1
-rw-r--r--arch/powerpc/configs/pasemi_defconfig1
-rw-r--r--arch/powerpc/configs/pmac32_defconfig1
-rw-r--r--arch/powerpc/configs/powernv_defconfig1
-rw-r--r--arch/powerpc/configs/ppc64_defconfig1
-rw-r--r--arch/powerpc/configs/ppc64e_defconfig1
-rw-r--r--arch/powerpc/configs/ppc6xx_defconfig2
-rw-r--r--arch/powerpc/configs/ps3_defconfig1
-rw-r--r--arch/powerpc/configs/pseries_defconfig1
-rw-r--r--arch/powerpc/crypto/sha256-spe-glue.c2
-rw-r--r--arch/powerpc/include/asm/asm-prototypes.h29
-rw-r--r--arch/powerpc/include/asm/book3s/32/kup.h8
-rw-r--r--arch/powerpc/include/asm/book3s/32/mmu-hash.h2
-rw-r--r--arch/powerpc/include/asm/book3s/64/kup.h20
-rw-r--r--arch/powerpc/include/asm/book3s/64/mmu-hash.h5
-rw-r--r--arch/powerpc/include/asm/book3s/64/mmu.h2
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h23
-rw-r--r--arch/powerpc/include/asm/book3s/64/pkeys.h4
-rw-r--r--arch/powerpc/include/asm/book3s/64/tlbflush-radix.h2
-rw-r--r--arch/powerpc/include/asm/book3s/64/tlbflush.h2
-rw-r--r--arch/powerpc/include/asm/bug.h9
-rw-r--r--arch/powerpc/include/asm/cacheflush.h6
-rw-r--r--arch/powerpc/include/asm/cputable.h20
-rw-r--r--arch/powerpc/include/asm/cputime.h14
-rw-r--r--arch/powerpc/include/asm/debug.h4
-rw-r--r--arch/powerpc/include/asm/firmware.h2
-rw-r--r--arch/powerpc/include/asm/hugetlb.h2
-rw-r--r--arch/powerpc/include/asm/hvcall.h25
-rw-r--r--arch/powerpc/include/asm/hw_irq.h96
-rw-r--r--arch/powerpc/include/asm/interrupt.h449
-rw-r--r--arch/powerpc/include/asm/irq.h2
-rw-r--r--arch/powerpc/include/asm/kexec.h1
-rw-r--r--arch/powerpc/include/asm/kup.h2
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h7
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_asm.h11
-rw-r--r--arch/powerpc/include/asm/kvm_host.h8
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h12
-rw-r--r--arch/powerpc/include/asm/machdep.h3
-rw-r--r--arch/powerpc/include/asm/mce.h20
-rw-r--r--arch/powerpc/include/asm/mmu_context.h3
-rw-r--r--arch/powerpc/include/asm/nmi.h1
-rw-r--r--arch/powerpc/include/asm/oprofile_impl.h135
-rw-r--r--arch/powerpc/include/asm/paca.h13
-rw-r--r--arch/powerpc/include/asm/paravirt.h1
-rw-r--r--arch/powerpc/include/asm/perf_event.h2
-rw-r--r--arch/powerpc/include/asm/perf_event_server.h5
-rw-r--r--arch/powerpc/include/asm/pgtable.h3
-rw-r--r--arch/powerpc/include/asm/pkeys.h6
-rw-r--r--arch/powerpc/include/asm/ppc-pci.h7
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h24
-rw-r--r--arch/powerpc/include/asm/ptrace.h8
-rw-r--r--arch/powerpc/include/asm/reg.h22
-rw-r--r--arch/powerpc/include/asm/reg_booke.h3
-rw-r--r--arch/powerpc/include/asm/rtas.h2
-rw-r--r--arch/powerpc/include/asm/setup.h6
-rw-r--r--arch/powerpc/include/asm/simple_spinlock.h4
-rw-r--r--arch/powerpc/include/asm/smp.h2
-rw-r--r--arch/powerpc/include/asm/spu.h33
-rw-r--r--arch/powerpc/include/asm/thread_info.h4
-rw-r--r--arch/powerpc/include/asm/time.h2
-rw-r--r--arch/powerpc/include/asm/uaccess.h99
-rw-r--r--arch/powerpc/include/asm/vdso/timebase.h6
-rw-r--r--arch/powerpc/include/asm/xmon.h4
-rw-r--r--arch/powerpc/include/uapi/asm/kvm.h2
-rw-r--r--arch/powerpc/include/uapi/asm/perf_regs.h28
-rw-r--r--arch/powerpc/kernel/Makefile8
-rw-r--r--arch/powerpc/kernel/asm-offsets.c12
-rw-r--r--arch/powerpc/kernel/cputable.c67
-rw-r--r--arch/powerpc/kernel/dbell.c9
-rw-r--r--arch/powerpc/kernel/dt_cpu_ftrs.c2
-rw-r--r--arch/powerpc/kernel/eeh.c121
-rw-r--r--arch/powerpc/kernel/entry_32.S347
-rw-r--r--arch/powerpc/kernel/entry_64.S8
-rw-r--r--arch/powerpc/kernel/exceptions-64e.S8
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S324
-rw-r--r--arch/powerpc/kernel/head_32.h98
-rw-r--r--arch/powerpc/kernel/head_40x.S11
-rw-r--r--arch/powerpc/kernel/head_44x.S4
-rw-r--r--arch/powerpc/kernel/head_8xx.S13
-rw-r--r--arch/powerpc/kernel/head_book3s_32.S32
-rw-r--r--arch/powerpc/kernel/head_booke.h57
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S6
-rw-r--r--arch/powerpc/kernel/idle_book3s.S138
-rw-r--r--arch/powerpc/kernel/interrupt.c (renamed from arch/powerpc/kernel/syscall_64.c)203
-rw-r--r--arch/powerpc/kernel/iommu.c46
-rw-r--r--arch/powerpc/kernel/irq.c63
-rw-r--r--arch/powerpc/kernel/mce.c96
-rw-r--r--arch/powerpc/kernel/optprobes.c21
-rw-r--r--arch/powerpc/kernel/pci-common.c10
-rw-r--r--arch/powerpc/kernel/pci_dn.c62
-rw-r--r--arch/powerpc/kernel/process.c15
-rw-r--r--arch/powerpc/kernel/prom.c2
-rw-r--r--arch/powerpc/kernel/prom_init.c12
-rw-r--r--arch/powerpc/kernel/ptrace/ptrace.c4
-rw-r--r--arch/powerpc/kernel/setup-common.c13
-rw-r--r--arch/powerpc/kernel/setup.h12
-rw-r--r--arch/powerpc/kernel/setup_64.c7
-rw-r--r--arch/powerpc/kernel/signal.c4
-rw-r--r--arch/powerpc/kernel/signal_32.c3
-rw-r--r--arch/powerpc/kernel/smp.c3
-rw-r--r--arch/powerpc/kernel/sys_ppc32.c49
-rw-r--r--arch/powerpc/kernel/syscalls/Makefile21
-rw-r--r--arch/powerpc/kernel/syscalls/syscall.tbl21
-rw-r--r--arch/powerpc/kernel/tau_6xx.c6
-rw-r--r--arch/powerpc/kernel/time.c9
-rw-r--r--arch/powerpc/kernel/traps.c274
-rw-r--r--arch/powerpc/kernel/vdso32/Makefile5
-rw-r--r--arch/powerpc/kernel/vdso32_wrapper.S (renamed from arch/powerpc/kernel/vdso32/vdso32_wrapper.S)0
-rw-r--r--arch/powerpc/kernel/vdso64/Makefile8
-rw-r--r--arch/powerpc/kernel/vdso64/sigtramp.S11
-rw-r--r--arch/powerpc/kernel/vdso64/vdso64.lds.S2
-rw-r--r--arch/powerpc/kernel/vdso64_wrapper.S (renamed from arch/powerpc/kernel/vdso64/vdso64_wrapper.S)0
-rw-r--r--arch/powerpc/kernel/watchdog.c16
-rw-r--r--arch/powerpc/kexec/elf_64.c2
-rw-r--r--arch/powerpc/kexec/file_load_64.c35
-rw-r--r--arch/powerpc/kvm/Kconfig1
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c3
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_radix.c2
-rw-r--r--arch/powerpc/kvm/book3s_emulate.c4
-rw-r--r--arch/powerpc/kvm/book3s_hv.c156
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c109
-rw-r--r--arch/powerpc/kvm/book3s_hv_nested.c70
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S175
-rw-r--r--arch/powerpc/kvm/book3s_xive.c4
-rw-r--r--arch/powerpc/kvm/booke.c3
-rw-r--r--arch/powerpc/kvm/powerpc.c22
-rw-r--r--arch/powerpc/lib/Makefile2
-rw-r--r--arch/powerpc/lib/pmem.c1
-rw-r--r--arch/powerpc/lib/sstep.c115
-rw-r--r--arch/powerpc/mm/book3s32/Makefile4
-rw-r--r--arch/powerpc/mm/book3s32/mmu.c2
-rw-r--r--arch/powerpc/mm/book3s64/hash_hugetlbpage.c4
-rw-r--r--arch/powerpc/mm/book3s64/hash_utils.c101
-rw-r--r--arch/powerpc/mm/book3s64/internal.h2
-rw-r--r--arch/powerpc/mm/book3s64/iommu_api.c10
-rw-r--r--arch/powerpc/mm/book3s64/pgtable.c13
-rw-r--r--arch/powerpc/mm/book3s64/pkeys.c1
-rw-r--r--arch/powerpc/mm/book3s64/radix_tlb.c299
-rw-r--r--arch/powerpc/mm/book3s64/slb.c40
-rw-r--r--arch/powerpc/mm/fault.c75
-rw-r--r--arch/powerpc/mm/hugetlbpage.c20
-rw-r--r--arch/powerpc/mm/mem.c54
-rw-r--r--arch/powerpc/mm/pgtable.c15
-rw-r--r--arch/powerpc/mm/ptdump/segment_regs.c2
-rw-r--r--arch/powerpc/net/bpf_jit_comp64.c25
-rw-r--r--arch/powerpc/oprofile/Makefile19
-rw-r--r--arch/powerpc/oprofile/backtrace.c120
-rw-r--r--arch/powerpc/oprofile/cell/pr_util.h110
-rw-r--r--arch/powerpc/oprofile/cell/spu_profiler.c248
-rw-r--r--arch/powerpc/oprofile/cell/spu_task_sync.c657
-rw-r--r--arch/powerpc/oprofile/cell/vma_map.c279
-rw-r--r--arch/powerpc/oprofile/common.c243
-rw-r--r--arch/powerpc/oprofile/op_model_7450.c207
-rw-r--r--arch/powerpc/oprofile/op_model_cell.c1709
-rw-r--r--arch/powerpc/oprofile/op_model_fsl_emb.c380
-rw-r--r--arch/powerpc/oprofile/op_model_pa6t.c227
-rw-r--r--arch/powerpc/oprofile/op_model_power4.c438
-rw-r--r--arch/powerpc/perf/core-book3s.c98
-rw-r--r--arch/powerpc/perf/core-fsl-emb.c25
-rw-r--r--arch/powerpc/perf/hv-24x7.c15
-rw-r--r--arch/powerpc/perf/isa207-common.c67
-rw-r--r--arch/powerpc/perf/isa207-common.h15
-rw-r--r--arch/powerpc/perf/mpc7450-pmu.c5
-rw-r--r--arch/powerpc/perf/perf_regs.c13
-rw-r--r--arch/powerpc/perf/power10-pmu.c4
-rw-r--r--arch/powerpc/perf/power5+-pmu.c5
-rw-r--r--arch/powerpc/perf/power5-pmu.c5
-rw-r--r--arch/powerpc/perf/power6-pmu.c5
-rw-r--r--arch/powerpc/perf/power7-pmu.c5
-rw-r--r--arch/powerpc/perf/ppc970-pmu.c5
-rw-r--r--arch/powerpc/platforms/44x/Kconfig7
-rw-r--r--arch/powerpc/platforms/512x/mpc5121_ads.c13
-rw-r--r--arch/powerpc/platforms/52xx/efika.c3
-rw-r--r--arch/powerpc/platforms/52xx/lite5200.c3
-rw-r--r--arch/powerpc/platforms/52xx/media5200.c3
-rw-r--r--arch/powerpc/platforms/52xx/mpc5200_simple.c3
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c8
-rw-r--r--arch/powerpc/platforms/82xx/mpc8272_ads.c2
-rw-r--r--arch/powerpc/platforms/82xx/pq2ads-pci-pic.c13
-rw-r--r--arch/powerpc/platforms/82xx/pq2fads.c3
-rw-r--r--arch/powerpc/platforms/83xx/asp834x.c1
-rw-r--r--arch/powerpc/platforms/83xx/km83xx.c1
-rw-r--r--arch/powerpc/platforms/83xx/misc.c2
-rw-r--r--arch/powerpc/platforms/83xx/mpc830x_rdb.c1
-rw-r--r--arch/powerpc/platforms/83xx/mpc831x_rdb.c1
-rw-r--r--arch/powerpc/platforms/83xx/mpc832x_mds.c1
-rw-r--r--arch/powerpc/platforms/83xx/mpc832x_rdb.c1
-rw-r--r--arch/powerpc/platforms/83xx/mpc834x_itx.c1
-rw-r--r--arch/powerpc/platforms/83xx/mpc834x_mds.c1
-rw-r--r--arch/powerpc/platforms/83xx/mpc836x_mds.c1
-rw-r--r--arch/powerpc/platforms/83xx/mpc836x_rdk.c1
-rw-r--r--arch/powerpc/platforms/83xx/mpc837x_mds.c1
-rw-r--r--arch/powerpc/platforms/83xx/mpc837x_rdb.c1
-rw-r--r--arch/powerpc/platforms/83xx/mpc83xx.h2
-rw-r--r--arch/powerpc/platforms/8xx/machine_check.c2
-rw-r--r--arch/powerpc/platforms/amigaone/setup.c10
-rw-r--r--arch/powerpc/platforms/cell/Kconfig5
-rw-r--r--arch/powerpc/platforms/cell/Makefile1
-rw-r--r--arch/powerpc/platforms/cell/pervasive.c1
-rw-r--r--arch/powerpc/platforms/cell/pervasive.h3
-rw-r--r--arch/powerpc/platforms/cell/ras.c6
-rw-r--r--arch/powerpc/platforms/cell/ras.h9
-rw-r--r--arch/powerpc/platforms/cell/spu_notify.c55
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c5
-rw-r--r--arch/powerpc/platforms/cell/spufs/run.c6
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c7
-rw-r--r--arch/powerpc/platforms/cell/spufs/spufs.h1
-rw-r--r--arch/powerpc/platforms/chrp/pci.c8
-rw-r--r--arch/powerpc/platforms/chrp/setup.c12
-rw-r--r--arch/powerpc/platforms/embedded6xx/holly.c10
-rw-r--r--arch/powerpc/platforms/embedded6xx/linkstation.c10
-rw-r--r--arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c14
-rw-r--r--arch/powerpc/platforms/embedded6xx/mvme5100.c13
-rw-r--r--arch/powerpc/platforms/embedded6xx/storcenter.c8
-rw-r--r--arch/powerpc/platforms/maple/pci.c3
-rw-r--r--arch/powerpc/platforms/maple/setup.c4
-rw-r--r--arch/powerpc/platforms/pasemi/setup.c3
-rw-r--r--arch/powerpc/platforms/powermac/pci.c4
-rw-r--r--arch/powerpc/platforms/powermac/setup.c4
-rw-r--r--arch/powerpc/platforms/powernv/idle.c1
-rw-r--r--arch/powerpc/platforms/powernv/memtrace.c29
-rw-r--r--arch/powerpc/platforms/powernv/opal-core.c6
-rw-r--r--arch/powerpc/platforms/powernv/opal.c2
-rw-r--r--arch/powerpc/platforms/powernv/pci-cxl.c22
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda-tce.c4
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c27
-rw-r--r--arch/powerpc/platforms/powernv/pci.h1
-rw-r--r--arch/powerpc/platforms/powernv/setup.c4
-rw-r--r--arch/powerpc/platforms/powernv/subcore.h2
-rw-r--r--arch/powerpc/platforms/powernv/vas.c11
-rw-r--r--arch/powerpc/platforms/powernv/vas.h1
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c12
-rw-r--r--arch/powerpc/platforms/pseries/eeh_pseries.c5
-rw-r--r--arch/powerpc/platforms/pseries/ibmebus.c4
-rw-r--r--arch/powerpc/platforms/pseries/pci.c15
-rw-r--r--arch/powerpc/platforms/pseries/pseries.h2
-rw-r--r--arch/powerpc/platforms/pseries/ras.c36
-rw-r--r--arch/powerpc/platforms/pseries/setup.c15
-rw-r--r--arch/powerpc/xmon/xmon.c6
-rw-r--r--arch/riscv/Kconfig51
-rw-r--r--arch/riscv/Kconfig.socs33
-rw-r--r--arch/riscv/Makefile14
-rw-r--r--arch/riscv/boot/dts/Makefile2
-rw-r--r--arch/riscv/boot/dts/canaan/Makefile5
-rw-r--r--arch/riscv/boot/dts/canaan/canaan_kd233.dts152
-rw-r--r--arch/riscv/boot/dts/canaan/k210.dtsi459
-rw-r--r--arch/riscv/boot/dts/canaan/k210_generic.dts46
-rw-r--r--arch/riscv/boot/dts/canaan/sipeed_maix_bit.dts209
-rw-r--r--arch/riscv/boot/dts/canaan/sipeed_maix_dock.dts211
-rw-r--r--arch/riscv/boot/dts/canaan/sipeed_maix_go.dts219
-rw-r--r--arch/riscv/boot/dts/canaan/sipeed_maixduino.dts184
-rw-r--r--arch/riscv/boot/dts/kendryte/Makefile4
-rw-r--r--arch/riscv/boot/dts/kendryte/k210.dts23
-rw-r--r--arch/riscv/boot/dts/kendryte/k210.dtsi125
-rw-r--r--arch/riscv/boot/dts/sifive/Makefile3
-rw-r--r--arch/riscv/boot/dts/sifive/fu740-c000.dtsi293
-rw-r--r--arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts1
-rw-r--r--arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts253
-rw-r--r--arch/riscv/configs/defconfig1
-rw-r--r--arch/riscv/configs/nommu_k210_defconfig46
-rw-r--r--arch/riscv/configs/nommu_k210_sdcard_defconfig92
-rw-r--r--arch/riscv/configs/rv32_defconfig1
-rw-r--r--arch/riscv/include/asm/bug.h1
-rw-r--r--arch/riscv/include/asm/csr.h6
-rw-r--r--arch/riscv/include/asm/kasan.h22
-rw-r--r--arch/riscv/include/asm/kprobes.h40
-rw-r--r--arch/riscv/include/asm/mmu.h2
-rw-r--r--arch/riscv/include/asm/mmu_context.h10
-rw-r--r--arch/riscv/include/asm/mmzone.h13
-rw-r--r--arch/riscv/include/asm/numa.h8
-rw-r--r--arch/riscv/include/asm/page.h8
-rw-r--r--arch/riscv/include/asm/pci.h14
-rw-r--r--arch/riscv/include/asm/pgtable.h21
-rw-r--r--arch/riscv/include/asm/probes.h24
-rw-r--r--arch/riscv/include/asm/processor.h1
-rw-r--r--arch/riscv/include/asm/ptrace.h35
-rw-r--r--arch/riscv/include/asm/sbi.h18
-rw-r--r--arch/riscv/include/asm/set_memory.h8
-rw-r--r--arch/riscv/include/asm/soc.h38
-rw-r--r--arch/riscv/include/asm/stackprotector.h3
-rw-r--r--arch/riscv/include/asm/stacktrace.h2
-rw-r--r--arch/riscv/include/asm/thread_info.h4
-rw-r--r--arch/riscv/include/asm/uprobes.h40
-rw-r--r--arch/riscv/kernel/Makefile6
-rw-r--r--arch/riscv/kernel/asm-offsets.c3
-rw-r--r--arch/riscv/kernel/ftrace.c95
-rw-r--r--arch/riscv/kernel/head.S4
-rw-r--r--arch/riscv/kernel/image-vars.h2
-rw-r--r--arch/riscv/kernel/mcount-dyn.S342
-rw-r--r--arch/riscv/kernel/patch.c8
-rw-r--r--arch/riscv/kernel/probes/Makefile6
-rw-r--r--arch/riscv/kernel/probes/decode-insn.c48
-rw-r--r--arch/riscv/kernel/probes/decode-insn.h18
-rw-r--r--arch/riscv/kernel/probes/ftrace.c53
-rw-r--r--arch/riscv/kernel/probes/kprobes.c398
-rw-r--r--arch/riscv/kernel/probes/kprobes_trampoline.S93
-rw-r--r--arch/riscv/kernel/probes/simulate-insn.c85
-rw-r--r--arch/riscv/kernel/probes/simulate-insn.h47
-rw-r--r--arch/riscv/kernel/probes/uprobes.c186
-rw-r--r--arch/riscv/kernel/process.c20
-rw-r--r--arch/riscv/kernel/ptrace.c99
-rw-r--r--arch/riscv/kernel/sbi.c36
-rw-r--r--arch/riscv/kernel/setup.c27
-rw-r--r--arch/riscv/kernel/signal.c3
-rw-r--r--arch/riscv/kernel/smpboot.c12
-rw-r--r--arch/riscv/kernel/soc.c27
-rw-r--r--arch/riscv/kernel/stacktrace.c22
-rw-r--r--arch/riscv/kernel/traps.c22
-rw-r--r--arch/riscv/kernel/vdso/Makefile3
-rw-r--r--arch/riscv/lib/Makefile2
-rw-r--r--arch/riscv/lib/error-inject.c10
-rw-r--r--arch/riscv/mm/Makefile3
-rw-r--r--arch/riscv/mm/context.c265
-rw-r--r--arch/riscv/mm/fault.c38
-rw-r--r--arch/riscv/mm/init.c131
-rw-r--r--arch/riscv/mm/kasan_init.c176
-rw-r--r--arch/riscv/net/bpf_jit_comp32.c20
-rw-r--r--arch/riscv/net/bpf_jit_comp64.c16
-rw-r--r--arch/s390/Kconfig6
-rw-r--r--arch/s390/Kconfig.debug10
-rw-r--r--arch/s390/Makefile3
-rw-r--r--arch/s390/boot/uv.c2
-rw-r--r--arch/s390/configs/debug_defconfig14
-rw-r--r--arch/s390/configs/defconfig11
-rw-r--r--arch/s390/configs/zfcpdump_defconfig2
-rw-r--r--arch/s390/crypto/aes_s390.c2
-rw-r--r--arch/s390/crypto/paes_s390.c28
-rw-r--r--arch/s390/crypto/prng.c2
-rw-r--r--arch/s390/hypfs/hypfs_diag0c.c2
-rw-r--r--arch/s390/hypfs/hypfs_vm.c4
-rw-r--r--arch/s390/include/asm/alternative.h16
-rw-r--r--arch/s390/include/asm/ap.h2
-rw-r--r--arch/s390/include/asm/atomic.h20
-rw-r--r--arch/s390/include/asm/bitops.h36
-rw-r--r--arch/s390/include/asm/cputime.h2
-rw-r--r--arch/s390/include/asm/elf.h7
-rw-r--r--arch/s390/include/asm/entry-common.h60
-rw-r--r--arch/s390/include/asm/facility.h2
-rw-r--r--arch/s390/include/asm/fpu/api.h2
-rw-r--r--arch/s390/include/asm/hardirq.h1
-rw-r--r--arch/s390/include/asm/idle.h4
-rw-r--r--arch/s390/include/asm/irq_work.h12
-rw-r--r--arch/s390/include/asm/kvm_host.h1
-rw-r--r--arch/s390/include/asm/lowcore.h17
-rw-r--r--arch/s390/include/asm/nmi.h1
-rw-r--r--arch/s390/include/asm/pci.h4
-rw-r--r--arch/s390/include/asm/pgalloc.h2
-rw-r--r--arch/s390/include/asm/pgtable.h16
-rw-r--r--arch/s390/include/asm/preempt.h4
-rw-r--r--arch/s390/include/asm/processor.h50
-rw-r--r--arch/s390/include/asm/ptrace.h9
-rw-r--r--arch/s390/include/asm/qdio.h7
-rw-r--r--arch/s390/include/asm/scsw.h3
-rw-r--r--arch/s390/include/asm/syscall.h11
-rw-r--r--arch/s390/include/asm/syscall_wrapper.h128
-rw-r--r--arch/s390/include/asm/thread_info.h5
-rw-r--r--arch/s390/include/asm/timex.h48
-rw-r--r--arch/s390/include/asm/tlb.h5
-rw-r--r--arch/s390/include/asm/uaccess.h2
-rw-r--r--arch/s390/include/asm/uv.h4
-rw-r--r--arch/s390/include/asm/vdso.h7
-rw-r--r--arch/s390/include/asm/vdso/gettimeofday.h12
-rw-r--r--arch/s390/include/asm/vtime.h14
-rw-r--r--arch/s390/include/uapi/asm/perf_cpum_cf_diag.h51
-rw-r--r--arch/s390/include/uapi/asm/ptrace.h5
-rw-r--r--arch/s390/kernel/Makefile2
-rw-r--r--arch/s390/kernel/asm-offsets.c20
-rw-r--r--arch/s390/kernel/compat_signal.c1
-rw-r--r--arch/s390/kernel/crash_dump.c2
-rw-r--r--arch/s390/kernel/debug.c6
-rw-r--r--arch/s390/kernel/early.c12
-rw-r--r--arch/s390/kernel/entry.S872
-rw-r--r--arch/s390/kernel/entry.h12
-rw-r--r--arch/s390/kernel/fpu.c88
-rw-r--r--arch/s390/kernel/idle.c24
-rw-r--r--arch/s390/kernel/irq.c90
-rw-r--r--arch/s390/kernel/nmi.c19
-rw-r--r--arch/s390/kernel/perf_cpum_cf_diag.c552
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c2
-rw-r--r--arch/s390/kernel/process.c32
-rw-r--r--arch/s390/kernel/ptrace.c117
-rw-r--r--arch/s390/kernel/setup.c23
-rw-r--r--arch/s390/kernel/signal.c13
-rw-r--r--arch/s390/kernel/smp.c47
-rw-r--r--arch/s390/kernel/syscall.c (renamed from arch/s390/kernel/sys_s390.c)66
-rw-r--r--arch/s390/kernel/syscalls/syscall.tbl1
-rw-r--r--arch/s390/kernel/time.c50
-rw-r--r--arch/s390/kernel/topology.c25
-rw-r--r--arch/s390/kernel/traps.c65
-rw-r--r--arch/s390/kernel/uprobes.c6
-rw-r--r--arch/s390/kernel/uv.c2
-rw-r--r--arch/s390/kernel/vdso.c290
-rw-r--r--arch/s390/kernel/vdso64/getcpu.c6
-rw-r--r--arch/s390/kernel/vdso64/vdso64.lds.S7
-rw-r--r--arch/s390/kernel/vtime.c18
-rw-r--r--arch/s390/kvm/kvm-s390.c27
-rw-r--r--arch/s390/kvm/vsie.c3
-rw-r--r--arch/s390/lib/uaccess.c12
-rw-r--r--arch/s390/mm/fault.c2
-rw-r--r--arch/s390/mm/init.c1
-rw-r--r--arch/s390/mm/pgalloc.c22
-rw-r--r--arch/s390/mm/vmem.c44
-rw-r--r--arch/s390/net/bpf_jit_comp.c27
-rw-r--r--arch/s390/oprofile/Makefile10
-rw-r--r--arch/s390/oprofile/init.c37
-rw-r--r--arch/s390/pci/pci.c57
-rw-r--r--arch/s390/pci/pci_clp.c40
-rw-r--r--arch/s390/pci/pci_event.c6
-rw-r--r--arch/s390/pci/pci_mmio.c4
-rw-r--r--arch/s390/tools/opcodes.txt2
-rw-r--r--arch/sh/Kconfig2
-rw-r--r--arch/sh/Makefile1
-rw-r--r--arch/sh/boards/mach-landisk/gio.c6
-rw-r--r--arch/sh/configs/edosk7760_defconfig1
-rw-r--r--arch/sh/configs/espt_defconfig1
-rw-r--r--arch/sh/configs/migor_defconfig1
-rw-r--r--arch/sh/configs/r7780mp_defconfig1
-rw-r--r--arch/sh/configs/r7785rp_defconfig1
-rw-r--r--arch/sh/configs/rsk7201_defconfig1
-rw-r--r--arch/sh/configs/rsk7203_defconfig1
-rw-r--r--arch/sh/configs/rts7751r2d1_defconfig1
-rw-r--r--arch/sh/configs/rts7751r2dplus_defconfig1
-rw-r--r--arch/sh/configs/sdk7780_defconfig1
-rw-r--r--arch/sh/configs/sdk7786_defconfig1
-rw-r--r--arch/sh/configs/se7206_defconfig1
-rw-r--r--arch/sh/configs/sh03_defconfig1
-rw-r--r--arch/sh/configs/sh7724_generic_defconfig1
-rw-r--r--arch/sh/configs/sh7763rdp_defconfig1
-rw-r--r--arch/sh/configs/sh7770_generic_defconfig1
-rw-r--r--arch/sh/configs/shx3_defconfig1
-rw-r--r--arch/sh/include/asm/irq.h1
-rw-r--r--arch/sh/kernel/irq.c1
-rw-r--r--arch/sh/kernel/process_32.c2
-rw-r--r--arch/sh/kernel/syscalls/Makefile13
-rw-r--r--arch/sh/kernel/syscalls/syscall.tbl1
-rw-r--r--arch/sh/oprofile/Makefile16
-rw-r--r--arch/sh/oprofile/backtrace.c80
-rw-r--r--arch/sh/oprofile/common.c64
-rw-r--r--arch/sparc/Kconfig5
-rw-r--r--arch/sparc/Makefile1
-rw-r--r--arch/sparc/boot/piggyback.c4
-rw-r--r--arch/sparc/configs/sparc64_defconfig12
-rw-r--r--arch/sparc/include/asm/Kbuild2
-rw-r--r--arch/sparc/include/asm/backoff.h2
-rw-r--r--arch/sparc/include/asm/cmpxchg_32.h2
-rw-r--r--arch/sparc/include/asm/irq_64.h1
-rw-r--r--arch/sparc/include/asm/mman.h54
-rw-r--r--arch/sparc/include/asm/pgtsrmmu.h2
-rw-r--r--arch/sparc/include/asm/signal.h12
-rw-r--r--arch/sparc/include/asm/spinlock_64.h2
-rw-r--r--arch/sparc/include/asm/tlb_64.h1
-rw-r--r--arch/sparc/kernel/entry.S8
-rw-r--r--arch/sparc/kernel/irq_64.c1
-rw-r--r--arch/sparc/kernel/led.c2
-rw-r--r--arch/sparc/kernel/pci.c3
-rw-r--r--arch/sparc/kernel/process_32.c4
-rw-r--r--arch/sparc/kernel/process_64.c2
-rw-r--r--arch/sparc/kernel/rtrap_32.S2
-rw-r--r--arch/sparc/kernel/signal_32.c4
-rw-r--r--arch/sparc/kernel/syscalls/Makefile19
-rw-r--r--arch/sparc/kernel/syscalls/syscall.tbl1
-rw-r--r--arch/sparc/kernel/viohs.c6
-rw-r--r--arch/sparc/lib/memset.S1
-rw-r--r--arch/sparc/mm/init_32.c3
-rw-r--r--arch/sparc/mm/srmmu.c20
-rw-r--r--arch/sparc/net/bpf_jit_comp_64.c17
-rw-r--r--arch/sparc/oprofile/Makefile10
-rw-r--r--arch/sparc/oprofile/init.c87
-rw-r--r--arch/um/Kconfig1
-rw-r--r--arch/um/drivers/Kconfig13
-rw-r--r--arch/um/drivers/Makefile2
-rw-r--r--arch/um/drivers/rtc.h15
-rw-r--r--arch/um/drivers/rtc_kern.c211
-rw-r--r--arch/um/drivers/rtc_user.c80
-rw-r--r--arch/um/drivers/ubd_kern.c6
-rw-r--r--arch/um/drivers/virtio_uml.c148
-rw-r--r--arch/um/include/asm/Kbuild3
-rw-r--r--arch/um/include/asm/io.h3
-rw-r--r--arch/um/include/asm/irq.h1
-rw-r--r--arch/um/include/asm/mmu_context.h29
-rw-r--r--arch/um/include/asm/pgtable.h3
-rw-r--r--arch/um/include/asm/set_memory.h1
-rw-r--r--arch/um/include/linux/time-internal.h17
-rw-r--r--arch/um/include/shared/as-layout.h17
-rw-r--r--arch/um/include/shared/common-offsets.h6
-rw-r--r--arch/um/include/shared/irq_kern.h60
-rw-r--r--arch/um/include/shared/kern_util.h2
-rw-r--r--arch/um/include/shared/skas/mm_id.h1
-rw-r--r--arch/um/include/shared/skas/stub-data.h2
-rw-r--r--arch/um/kernel/exec.c4
-rw-r--r--arch/um/kernel/irq.c171
-rw-r--r--arch/um/kernel/kmsg_dump.c9
-rw-r--r--arch/um/kernel/process.c13
-rw-r--r--arch/um/kernel/skas/clone.c28
-rw-r--r--arch/um/kernel/skas/mmu.c87
-rw-r--r--arch/um/kernel/time.c67
-rw-r--r--arch/um/kernel/tlb.c68
-rw-r--r--arch/um/kernel/um_arch.c27
-rw-r--r--arch/um/os-Linux/helper.c4
-rw-r--r--arch/um/os-Linux/skas/mem.c2
-rw-r--r--arch/um/os-Linux/skas/process.c86
-rw-r--r--arch/um/os-Linux/time.c15
-rw-r--r--arch/x86/Kconfig35
-rw-r--r--arch/x86/Makefile39
-rw-r--r--arch/x86/configs/i386_defconfig1
-rw-r--r--arch/x86/configs/x86_64_defconfig1
-rw-r--r--arch/x86/crypto/Makefile2
-rw-r--r--arch/x86/crypto/aesni-intel_asm.S482
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c753
-rw-r--r--arch/x86/crypto/blake2s-glue.c150
-rw-r--r--arch/x86/crypto/blowfish_glue.c107
-rw-r--r--arch/x86/crypto/camellia-aesni-avx-asm_64.S298
-rw-r--r--arch/x86/crypto/camellia-aesni-avx2-asm_64.S351
-rw-r--r--arch/x86/crypto/camellia.h (renamed from arch/x86/include/asm/crypto/camellia.h)24
-rw-r--r--arch/x86/crypto/camellia_aesni_avx2_glue.c198
-rw-r--r--arch/x86/crypto/camellia_aesni_avx_glue.c216
-rw-r--r--arch/x86/crypto/camellia_glue.c145
-rw-r--r--arch/x86/crypto/cast5_avx_glue.c287
-rw-r--r--arch/x86/crypto/cast6-avx-x86_64-asm_64.S84
-rw-r--r--arch/x86/crypto/cast6_avx_glue.c207
-rw-r--r--arch/x86/crypto/des3_ede_glue.c104
-rw-r--r--arch/x86/crypto/ecb_cbc_helpers.h76
-rw-r--r--arch/x86/crypto/glue_helper-asm-avx.S104
-rw-r--r--arch/x86/crypto/glue_helper-asm-avx2.S136
-rw-r--r--arch/x86/crypto/glue_helper.c381
-rw-r--r--arch/x86/crypto/serpent-avx-x86_64-asm_64.S68
-rw-r--r--arch/x86/crypto/serpent-avx.h21
-rw-r--r--arch/x86/crypto/serpent-avx2-asm_64.S87
-rw-r--r--arch/x86/crypto/serpent-sse2.h (renamed from arch/x86/include/asm/crypto/serpent-sse2.h)0
-rw-r--r--arch/x86/crypto/serpent_avx2_glue.c185
-rw-r--r--arch/x86/crypto/serpent_avx_glue.c215
-rw-r--r--arch/x86/crypto/serpent_sse2_glue.c150
-rw-r--r--arch/x86/crypto/twofish-avx-x86_64-asm_64.S80
-rw-r--r--arch/x86/crypto/twofish.h (renamed from arch/x86/include/asm/crypto/twofish.h)4
-rw-r--r--arch/x86/crypto/twofish_avx_glue.c211
-rw-r--r--arch/x86/crypto/twofish_glue_3way.c160
-rw-r--r--arch/x86/entry/common.c19
-rw-r--r--arch/x86/entry/entry_64.S67
-rw-r--r--arch/x86/entry/syscalls/Makefile29
-rw-r--r--arch/x86/entry/syscalls/syscall_32.tbl1
-rw-r--r--arch/x86/entry/syscalls/syscall_64.tbl1
-rw-r--r--arch/x86/entry/thunk_64.S15
-rw-r--r--arch/x86/entry/vdso/Makefile3
-rw-r--r--arch/x86/events/core.c28
-rw-r--r--arch/x86/events/intel/core.c547
-rw-r--r--arch/x86/events/intel/ds.c131
-rw-r--r--arch/x86/events/intel/uncore.c58
-rw-r--r--arch/x86/events/intel/uncore.h5
-rw-r--r--arch/x86/events/intel/uncore_snb.c2
-rw-r--r--arch/x86/events/intel/uncore_snbep.c114
-rw-r--r--arch/x86/events/perf_event.h21
-rw-r--r--arch/x86/events/probe.c7
-rw-r--r--arch/x86/events/probe.h7
-rw-r--r--arch/x86/events/rapl.c34
-rw-r--r--arch/x86/hyperv/Makefile4
-rw-r--r--arch/x86/hyperv/hv_init.c122
-rw-r--r--arch/x86/hyperv/hv_proc.c219
-rw-r--r--arch/x86/hyperv/irqdomain.c385
-rw-r--r--arch/x86/include/asm/acrn.h78
-rw-r--r--arch/x86/include/asm/apb_timer.h40
-rw-r--r--arch/x86/include/asm/apic.h10
-rw-r--r--arch/x86/include/asm/barrier.h18
-rw-r--r--arch/x86/include/asm/compat.h11
-rw-r--r--arch/x86/include/asm/cpufeature.h7
-rw-r--r--arch/x86/include/asm/cpufeatures.h19
-rw-r--r--arch/x86/include/asm/crypto/glue_helper.h118
-rw-r--r--arch/x86/include/asm/crypto/serpent-avx.h42
-rw-r--r--arch/x86/include/asm/disabled-features.h3
-rw-r--r--arch/x86/include/asm/efi.h46
-rw-r--r--arch/x86/include/asm/elf.h2
-rw-r--r--arch/x86/include/asm/elfcore-compat.h31
-rw-r--r--arch/x86/include/asm/entry-common.h2
-rw-r--r--arch/x86/include/asm/fixmap.h3
-rw-r--r--arch/x86/include/asm/fpu/api.h12
-rw-r--r--arch/x86/include/asm/hyperv-tlfs.h38
-rw-r--r--arch/x86/include/asm/idtentry.h18
-rw-r--r--arch/x86/include/asm/insn.h45
-rw-r--r--arch/x86/include/asm/intel-mid.h93
-rw-r--r--arch/x86/include/asm/intel_mid_vrtc.h10
-rw-r--r--arch/x86/include/asm/intel_scu_ipc.h2
-rw-r--r--arch/x86/include/asm/intel_scu_ipc_legacy.h91
-rw-r--r--arch/x86/include/asm/irq.h4
-rw-r--r--arch/x86/include/asm/irq_stack.h279
-rw-r--r--arch/x86/include/asm/irqflags.h46
-rw-r--r--arch/x86/include/asm/kfence.h64
-rw-r--r--arch/x86/include/asm/kprobes.h11
-rw-r--r--arch/x86/include/asm/kvm-x86-ops.h123
-rw-r--r--arch/x86/include/asm/kvm_host.h134
-rw-r--r--arch/x86/include/asm/mce.h22
-rw-r--r--arch/x86/include/asm/microcode.h2
-rw-r--r--arch/x86/include/asm/mshyperv.h19
-rw-r--r--arch/x86/include/asm/nmi.h1
-rw-r--r--arch/x86/include/asm/orc_types.h10
-rw-r--r--arch/x86/include/asm/page_64_types.h2
-rw-r--r--arch/x86/include/asm/paravirt.h30
-rw-r--r--arch/x86/include/asm/paravirt_types.h17
-rw-r--r--arch/x86/include/asm/perf_event.h24
-rw-r--r--arch/x86/include/asm/pgtable_types.h2
-rw-r--r--arch/x86/include/asm/platform_sst_audio.h2
-rw-r--r--arch/x86/include/asm/preempt.h48
-rw-r--r--arch/x86/include/asm/processor.h9
-rw-r--r--arch/x86/include/asm/required-features.h3
-rw-r--r--arch/x86/include/asm/resctrl.h11
-rw-r--r--arch/x86/include/asm/softirq_stack.h11
-rw-r--r--arch/x86/include/asm/special_insns.h6
-rw-r--r--arch/x86/include/asm/static_call.h7
-rw-r--r--arch/x86/include/asm/thermal.h13
-rw-r--r--arch/x86/include/asm/tlb.h1
-rw-r--r--arch/x86/include/asm/unwind_hints.h13
-rw-r--r--arch/x86/include/asm/virtext.h25
-rw-r--r--arch/x86/include/asm/vm86.h1
-rw-r--r--arch/x86/include/asm/vmx.h1
-rw-r--r--arch/x86/include/asm/vmxfeatures.h1
-rw-r--r--arch/x86/include/asm/xen/interface.h3
-rw-r--r--arch/x86/include/asm/xen/page.h12
-rw-r--r--arch/x86/include/uapi/asm/kvm.h1
-rw-r--r--arch/x86/include/uapi/asm/vm86.h4
-rw-r--r--arch/x86/include/uapi/asm/vmx.h4
-rw-r--r--arch/x86/kernel/Makefile1
-rw-r--r--arch/x86/kernel/acpi/Makefile1
-rw-r--r--arch/x86/kernel/acpi/wakeup_64.S4
-rw-r--r--arch/x86/kernel/apb_timer.c347
-rw-r--r--arch/x86/kernel/apic/apic.c36
-rw-r--r--arch/x86/kernel/apic/io_apic.c4
-rw-r--r--arch/x86/kernel/apic/x2apic_cluster.c6
-rw-r--r--arch/x86/kernel/apic/x2apic_phys.c9
-rw-r--r--arch/x86/kernel/asm-offsets_64.c3
-rw-r--r--arch/x86/kernel/cpu/acrn.c16
-rw-r--r--arch/x86/kernel/cpu/common.c7
-rw-r--r--arch/x86/kernel/cpu/intel.c4
-rw-r--r--arch/x86/kernel/cpu/mce/Makefile2
-rw-r--r--arch/x86/kernel/cpu/mce/core.c16
-rw-r--r--arch/x86/kernel/cpu/mce/intel.c1
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c2
-rw-r--r--arch/x86/kernel/cpu/mshyperv.c58
-rw-r--r--arch/x86/kernel/cpu/mtrr/cleanup.c4
-rw-r--r--arch/x86/kernel/cpu/mtrr/generic.c1
-rw-r--r--arch/x86/kernel/cpu/mtrr/mtrr.c2
-rw-r--r--arch/x86/kernel/cpu/perfctr-watchdog.c11
-rw-r--r--arch/x86/kernel/cpu/resctrl/internal.h1
-rw-r--r--arch/x86/kernel/cpu/resctrl/rdtgroup.c24
-rw-r--r--arch/x86/kernel/cpu/scattered.c5
-rw-r--r--arch/x86/kernel/cpu/sgx/driver.c3
-rw-r--r--arch/x86/kernel/cpu/sgx/encl.c13
-rw-r--r--arch/x86/kernel/cpu/sgx/main.c14
-rw-r--r--arch/x86/kernel/dumpstack_64.c22
-rw-r--r--arch/x86/kernel/fpu/xstate.c4
-rw-r--r--arch/x86/kernel/ftrace_64.S8
-rw-r--r--arch/x86/kernel/hw_breakpoint.c61
-rw-r--r--arch/x86/kernel/irq.c23
-rw-r--r--arch/x86/kernel/irq_32.c1
-rw-r--r--arch/x86/kernel/irq_64.c12
-rw-r--r--arch/x86/kernel/irqflags.S11
-rw-r--r--arch/x86/kernel/kprobes/core.c168
-rw-r--r--arch/x86/kernel/ldt.c10
-rw-r--r--arch/x86/kernel/module.c1
-rw-r--r--arch/x86/kernel/msr.c7
-rw-r--r--arch/x86/kernel/paravirt.c7
-rw-r--r--arch/x86/kernel/paravirt_patch.c10
-rw-r--r--arch/x86/kernel/pci-iommu_table.c3
-rw-r--r--arch/x86/kernel/process.c2
-rw-r--r--arch/x86/kernel/process_64.c2
-rw-r--r--arch/x86/kernel/ptrace.c46
-rw-r--r--arch/x86/kernel/reboot.c39
-rw-r--r--arch/x86/kernel/setup.c22
-rw-r--r--arch/x86/kernel/smpboot.c1
-rw-r--r--arch/x86/kernel/static_call.c17
-rw-r--r--arch/x86/kernel/step.c10
-rw-r--r--arch/x86/kernel/sys_x86_64.c8
-rw-r--r--arch/x86/kernel/unwind_orc.c5
-rw-r--r--arch/x86/kernel/vm86_32.c62
-rw-r--r--arch/x86/kvm/Kconfig9
-rw-r--r--arch/x86/kvm/Makefile4
-rw-r--r--arch/x86/kvm/cpuid.c28
-rw-r--r--arch/x86/kvm/cpuid.h24
-rw-r--r--arch/x86/kvm/emulate.c16
-rw-r--r--arch/x86/kvm/hyperv.c345
-rw-r--r--arch/x86/kvm/hyperv.h54
-rw-r--r--arch/x86/kvm/irq.c10
-rw-r--r--arch/x86/kvm/kvm_cache_regs.h61
-rw-r--r--arch/x86/kvm/kvm_emulate.h2
-rw-r--r--arch/x86/kvm/lapic.c60
-rw-r--r--arch/x86/kvm/lapic.h20
-rw-r--r--arch/x86/kvm/mmu.h17
-rw-r--r--arch/x86/kvm/mmu/mmu.c555
-rw-r--r--arch/x86/kvm/mmu/mmu_audit.c8
-rw-r--r--arch/x86/kvm/mmu/mmu_internal.h22
-rw-r--r--arch/x86/kvm/mmu/page_track.c8
-rw-r--r--arch/x86/kvm/mmu/paging_tmpl.h22
-rw-r--r--arch/x86/kvm/mmu/spte.c2
-rw-r--r--arch/x86/kvm/mmu/spte.h33
-rw-r--r--arch/x86/kvm/mmu/tdp_iter.c46
-rw-r--r--arch/x86/kvm/mmu/tdp_iter.h21
-rw-r--r--arch/x86/kvm/mmu/tdp_mmu.c610
-rw-r--r--arch/x86/kvm/mmu/tdp_mmu.h35
-rw-r--r--arch/x86/kvm/mtrr.c12
-rw-r--r--arch/x86/kvm/pmu.c10
-rw-r--r--arch/x86/kvm/pmu.h2
-rw-r--r--arch/x86/kvm/svm/avic.c35
-rw-r--r--arch/x86/kvm/svm/nested.c70
-rw-r--r--arch/x86/kvm/svm/sev.c136
-rw-r--r--arch/x86/kvm/svm/svm.c327
-rw-r--r--arch/x86/kvm/svm/svm.h32
-rw-r--r--arch/x86/kvm/svm/svm_ops.h69
-rw-r--r--arch/x86/kvm/trace.h40
-rw-r--r--arch/x86/kvm/vmx/capabilities.h28
-rw-r--r--arch/x86/kvm/vmx/nested.c187
-rw-r--r--arch/x86/kvm/vmx/pmu_intel.c300
-rw-r--r--arch/x86/kvm/vmx/posted_intr.c6
-rw-r--r--arch/x86/kvm/vmx/vmenter.S2
-rw-r--r--arch/x86/kvm/vmx/vmx.c413
-rw-r--r--arch/x86/kvm/vmx/vmx.h58
-rw-r--r--arch/x86/kvm/x86.c916
-rw-r--r--arch/x86/kvm/x86.h14
-rw-r--r--arch/x86/kvm/xen.c721
-rw-r--r--arch/x86/kvm/xen.h138
-rw-r--r--arch/x86/lib/insn.c119
-rw-r--r--arch/x86/lib/retpoline.S2
-rw-r--r--arch/x86/mm/fault.c407
-rw-r--r--arch/x86/mm/init.c19
-rw-r--r--arch/x86/mm/mem_encrypt.c6
-rw-r--r--arch/x86/mm/mmio-mod.c2
-rw-r--r--arch/x86/mm/pat/memtype.c4
-rw-r--r--arch/x86/net/bpf_jit_comp.c422
-rw-r--r--arch/x86/net/bpf_jit_comp32.c6
-rw-r--r--arch/x86/oprofile/Makefile12
-rw-r--r--arch/x86/oprofile/backtrace.c127
-rw-r--r--arch/x86/oprofile/init.c38
-rw-r--r--arch/x86/oprofile/nmi_int.c780
-rw-r--r--arch/x86/oprofile/op_counter.h30
-rw-r--r--arch/x86/oprofile/op_model_amd.c542
-rw-r--r--arch/x86/oprofile/op_model_p4.c723
-rw-r--r--arch/x86/oprofile/op_model_ppro.c245
-rw-r--r--arch/x86/oprofile/op_x86_model.h90
-rw-r--r--arch/x86/pci/init.c15
-rw-r--r--arch/x86/pci/intel_mid_pci.c18
-rw-r--r--arch/x86/pci/mmconfig-shared.c6
-rw-r--r--arch/x86/platform/Makefile2
-rw-r--r--arch/x86/platform/efi/efi_64.c52
-rw-r--r--arch/x86/platform/efi/efi_thunk_64.S6
-rw-r--r--arch/x86/platform/efi/quirks.c16
-rw-r--r--arch/x86/platform/geode/alix.c19
-rw-r--r--arch/x86/platform/geode/geos.c19
-rw-r--r--arch/x86/platform/geode/net5501.c13
-rw-r--r--arch/x86/platform/goldfish/Makefile2
-rw-r--r--arch/x86/platform/goldfish/goldfish.c54
-rw-r--r--arch/x86/platform/intel-mid/Makefile7
-rw-r--r--arch/x86/platform/intel-mid/device_libs/Makefile33
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_bcm43xx.c101
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_bma023.c16
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_bt.c101
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_emc1403.c39
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_gpio_keys.c81
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_lis331.c37
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_max7315.c77
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_mpu3050.c32
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_mrfld_pinctrl.c39
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_mrfld_power_btn.c78
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_mrfld_rtc.c44
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_mrfld_sd.c43
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_mrfld_spidev.c50
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_msic.c83
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_msic.h15
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_msic_audio.c42
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_msic_battery.c32
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_msic_gpio.c43
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_msic_ocd.c44
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_msic_power_btn.c31
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_msic_thermal.c32
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_pcal9555a.c95
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_tc35876x.c42
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_tca6416.c53
-rw-r--r--arch/x86/platform/intel-mid/intel-mid.c106
-rw-r--r--arch/x86/platform/intel-mid/intel_mid_vrtc.c173
-rw-r--r--arch/x86/platform/intel-mid/sfi.c543
-rw-r--r--arch/x86/platform/pvh/head.S2
-rw-r--r--arch/x86/platform/sfi/sfi.c100
-rw-r--r--arch/x86/power/Makefile5
-rw-r--r--arch/x86/power/hibernate_asm_64.S103
-rw-r--r--arch/x86/tools/Makefile8
-rw-r--r--arch/x86/tools/insn_sanity.c4
-rw-r--r--arch/x86/tools/relocs.c16
-rw-r--r--arch/x86/um/os-Linux/task_size.c2
-rw-r--r--arch/x86/um/shared/sysdep/stub_32.h33
-rw-r--r--arch/x86/um/shared/sysdep/stub_64.h36
-rw-r--r--arch/x86/um/stub_32.S17
-rw-r--r--arch/x86/um/stub_64.S5
-rw-r--r--arch/x86/um/stub_segv.c5
-rw-r--r--arch/x86/xen/Makefile1
-rw-r--r--arch/x86/xen/enlighten_pv.c47
-rw-r--r--arch/x86/xen/irq.c23
-rw-r--r--arch/x86/xen/p2m.c69
-rw-r--r--arch/x86/xen/setup.c25
-rw-r--r--arch/x86/xen/xen-asm.S81
-rw-r--r--arch/x86/xen/xen-head.S5
-rw-r--r--arch/x86/xen/xen-ops.h3
-rw-r--r--arch/xtensa/Kconfig1
-rw-r--r--arch/xtensa/Makefile1
-rw-r--r--arch/xtensa/configs/audio_kc705_defconfig1
-rw-r--r--arch/xtensa/configs/generic_kc705_defconfig1
-rw-r--r--arch/xtensa/configs/smp_lx200_defconfig1
-rw-r--r--arch/xtensa/configs/xip_kc705_defconfig1
-rw-r--r--arch/xtensa/include/asm/spinlock.h2
-rw-r--r--arch/xtensa/kernel/process.c2
-rw-r--r--arch/xtensa/kernel/stacktrace.c2
-rw-r--r--arch/xtensa/kernel/syscalls/Makefile13
-rw-r--r--arch/xtensa/kernel/syscalls/syscall.tbl1
-rw-r--r--arch/xtensa/oprofile/Makefile10
-rw-r--r--arch/xtensa/oprofile/backtrace.c27
-rw-r--r--arch/xtensa/oprofile/init.c26
-rw-r--r--arch/xtensa/platforms/iss/simdisk.c2
-rw-r--r--block/bfq-iosched.c459
-rw-r--r--block/bfq-iosched.h29
-rw-r--r--block/bfq-wf2q.c3
-rw-r--r--block/bio-integrity.c35
-rw-r--r--block/bio.c602
-rw-r--r--block/blk-cgroup.c40
-rw-r--r--block/blk-core.c100
-rw-r--r--block/blk-crypto-fallback.c18
-rw-r--r--block/blk-crypto.c3
-rw-r--r--block/blk-exec.c14
-rw-r--r--block/blk-flush.c17
-rw-r--r--block/blk-map.c4
-rw-r--r--block/blk-merge.c17
-rw-r--r--block/blk-mq-debugfs.c1
-rw-r--r--block/blk-mq-sched.c12
-rw-r--r--block/blk-mq-sched.h1
-rw-r--r--block/blk-mq.c178
-rw-r--r--block/blk-mq.h2
-rw-r--r--block/blk-pm.h38
-rw-r--r--block/blk-settings.c53
-rw-r--r--block/blk-sysfs.c15
-rw-r--r--block/blk-throttle.c2
-rw-r--r--block/blk-wbt.c4
-rw-r--r--block/blk-zoned.c17
-rw-r--r--block/blk.h12
-rw-r--r--block/bounce.c28
-rw-r--r--block/bsg.c6
-rw-r--r--block/genhd.c308
-rw-r--r--block/ioctl.c21
-rw-r--r--block/keyslot-manager.c175
-rw-r--r--block/kyber-iosched.c34
-rw-r--r--block/mq-deadline.c10
-rw-r--r--block/partitions/core.c38
-rw-r--r--block/scsi_ioctl.c6
-rw-r--r--certs/blacklist.c10
-rw-r--r--certs/system_keyring.c5
-rw-r--r--crypto/Kconfig96
-rw-r--r--crypto/Makefile4
-rw-r--r--crypto/adiantum.c2
-rw-r--r--crypto/ansi_cprng.c2
-rw-r--r--crypto/asymmetric_keys/asymmetric_type.c6
-rw-r--r--crypto/asymmetric_keys/pkcs7_parser.h5
-rw-r--r--crypto/asymmetric_keys/pkcs7_trust.c2
-rw-r--r--crypto/asymmetric_keys/pkcs7_verify.c9
-rw-r--r--crypto/asymmetric_keys/restrict.c7
-rw-r--r--crypto/blake2b_generic.c249
-rw-r--r--crypto/blake2s_generic.c158
-rw-r--r--crypto/blowfish_generic.c23
-rw-r--r--crypto/camellia_generic.c45
-rw-r--r--crypto/cast5_generic.c23
-rw-r--r--crypto/cast6_generic.c39
-rw-r--r--crypto/cbc.c1
-rw-r--r--crypto/ccm.c2
-rw-r--r--crypto/cfb.c2
-rw-r--r--crypto/cipher.c7
-rw-r--r--crypto/cmac.c2
-rw-r--r--crypto/ctr.c2
-rw-r--r--crypto/drbg.c2
-rw-r--r--crypto/ecb.c1
-rw-r--r--crypto/ecdh_helper.c3
-rw-r--r--crypto/essiv.c2
-rw-r--r--crypto/fcrypt.c1
-rw-r--r--crypto/keywrap.c2
-rw-r--r--crypto/michael_mic.c31
-rw-r--r--crypto/ofb.c2
-rw-r--r--crypto/pcbc.c2
-rw-r--r--crypto/ripemd.h14
-rw-r--r--crypto/rmd128.c323
-rw-r--r--crypto/rmd256.c342
-rw-r--r--crypto/rmd320.c391
-rw-r--r--crypto/salsa20_generic.c212
-rw-r--r--crypto/serpent_generic.c126
-rw-r--r--crypto/skcipher.c8
-rw-r--r--crypto/tcrypt.c113
-rw-r--r--crypto/testmgr.c57
-rw-r--r--crypto/testmgr.h1632
-rw-r--r--crypto/tgr192.c682
-rw-r--r--crypto/twofish_generic.c11
-rw-r--r--crypto/vmac.c2
-rw-r--r--crypto/xcbc.c2
-rw-r--r--crypto/xor.c2
-rw-r--r--crypto/xts.c2
-rw-r--r--drivers/Kconfig1
-rw-r--r--drivers/Makefile3
-rw-r--r--drivers/accessibility/speakup/serialio.c22
-rw-r--r--drivers/accessibility/speakup/speakup_acntpc.c4
-rw-r--r--drivers/accessibility/speakup/speakup_apollo.c4
-rw-r--r--drivers/accessibility/speakup/speakup_audptr.c8
-rw-r--r--drivers/accessibility/speakup/speakup_decext.c2
-rw-r--r--drivers/accessibility/speakup/speakup_decpc.c4
-rw-r--r--drivers/accessibility/speakup/speakup_dectlk.c13
-rw-r--r--drivers/accessibility/speakup/speakup_dtlk.c4
-rw-r--r--drivers/accessibility/speakup/speakup_keypc.c4
-rw-r--r--drivers/accessibility/speakup/speakup_ltlk.c2
-rw-r--r--drivers/accessibility/speakup/speakup_soft.c4
-rw-r--r--drivers/accessibility/speakup/speakup_spkout.c4
-rw-r--r--drivers/accessibility/speakup/spk_priv.h4
-rw-r--r--drivers/accessibility/speakup/spk_ttyio.c187
-rw-r--r--drivers/accessibility/speakup/spk_types.h17
-rw-r--r--drivers/accessibility/speakup/synth.c9
-rw-r--r--drivers/accessibility/speakup/varhandlers.c1
-rw-r--r--drivers/acpi/Kconfig11
-rw-r--r--drivers/acpi/Makefile2
-rw-r--r--drivers/acpi/ac.c23
-rw-r--r--drivers/acpi/acpi_configfs.c7
-rw-r--r--drivers/acpi/acpi_fpdt.c264
-rw-r--r--drivers/acpi/acpi_pad.c24
-rw-r--r--drivers/acpi/acpi_tad.c14
-rw-r--r--drivers/acpi/acpi_video.c99
-rw-r--r--drivers/acpi/acpica/acapps.h4
-rw-r--r--drivers/acpi/acpica/accommon.h2
-rw-r--r--drivers/acpi/acpica/acconvert.h2
-rw-r--r--drivers/acpi/acpica/acdebug.h2
-rw-r--r--drivers/acpi/acpica/acdispat.h2
-rw-r--r--drivers/acpi/acpica/acevents.h2
-rw-r--r--drivers/acpi/acpica/acglobal.h2
-rw-r--r--drivers/acpi/acpica/achware.h2
-rw-r--r--drivers/acpi/acpica/acinterp.h2
-rw-r--r--drivers/acpi/acpica/aclocal.h2
-rw-r--r--drivers/acpi/acpica/acmacros.h2
-rw-r--r--drivers/acpi/acpica/acnamesp.h2
-rw-r--r--drivers/acpi/acpica/acobject.h3
-rw-r--r--drivers/acpi/acpica/acopcode.h2
-rw-r--r--drivers/acpi/acpica/acparser.h2
-rw-r--r--drivers/acpi/acpica/acpredef.h2
-rw-r--r--drivers/acpi/acpica/acresrc.h2
-rw-r--r--drivers/acpi/acpica/acstruct.h2
-rw-r--r--drivers/acpi/acpica/actables.h2
-rw-r--r--drivers/acpi/acpica/acutils.h2
-rw-r--r--drivers/acpi/acpica/amlcode.h2
-rw-r--r--drivers/acpi/acpica/amlresrc.h2
-rw-r--r--drivers/acpi/acpica/dbhistry.c2
-rw-r--r--drivers/acpi/acpica/dbinput.c4
-rw-r--r--drivers/acpi/acpica/dbobject.c2
-rw-r--r--drivers/acpi/acpica/dsargs.c2
-rw-r--r--drivers/acpi/acpica/dscontrol.c4
-rw-r--r--drivers/acpi/acpica/dsdebug.c4
-rw-r--r--drivers/acpi/acpica/dsfield.c2
-rw-r--r--drivers/acpi/acpica/dsinit.c2
-rw-r--r--drivers/acpi/acpica/dsmethod.c2
-rw-r--r--drivers/acpi/acpica/dsobject.c2
-rw-r--r--drivers/acpi/acpica/dsopcode.c2
-rw-r--r--drivers/acpi/acpica/dspkginit.c2
-rw-r--r--drivers/acpi/acpica/dswexec.c5
-rw-r--r--drivers/acpi/acpica/dswload.c4
-rw-r--r--drivers/acpi/acpica/dswload2.c4
-rw-r--r--drivers/acpi/acpica/dswscope.c2
-rw-r--r--drivers/acpi/acpica/dswstate.c2
-rw-r--r--drivers/acpi/acpica/evevent.c2
-rw-r--r--drivers/acpi/acpica/evglock.c2
-rw-r--r--drivers/acpi/acpica/evgpe.c2
-rw-r--r--drivers/acpi/acpica/evgpeblk.c2
-rw-r--r--drivers/acpi/acpica/evgpeinit.c2
-rw-r--r--drivers/acpi/acpica/evgpeutil.c2
-rw-r--r--drivers/acpi/acpica/evhandler.c9
-rw-r--r--drivers/acpi/acpica/evmisc.c2
-rw-r--r--drivers/acpi/acpica/evregion.c71
-rw-r--r--drivers/acpi/acpica/evrgnini.c2
-rw-r--r--drivers/acpi/acpica/evxface.c2
-rw-r--r--drivers/acpi/acpica/evxfevnt.c2
-rw-r--r--drivers/acpi/acpica/evxfgpe.c2
-rw-r--r--drivers/acpi/acpica/evxfregn.c4
-rw-r--r--drivers/acpi/acpica/exconcat.c2
-rw-r--r--drivers/acpi/acpica/exconfig.c2
-rw-r--r--drivers/acpi/acpica/exconvrt.c2
-rw-r--r--drivers/acpi/acpica/excreate.c2
-rw-r--r--drivers/acpi/acpica/exdebug.c2
-rw-r--r--drivers/acpi/acpica/exdump.c2
-rw-r--r--drivers/acpi/acpica/exfield.c2
-rw-r--r--drivers/acpi/acpica/exfldio.c4
-rw-r--r--drivers/acpi/acpica/exmisc.c2
-rw-r--r--drivers/acpi/acpica/exmutex.c2
-rw-r--r--drivers/acpi/acpica/exnames.c2
-rw-r--r--drivers/acpi/acpica/exoparg1.c2
-rw-r--r--drivers/acpi/acpica/exoparg2.c2
-rw-r--r--drivers/acpi/acpica/exoparg3.c2
-rw-r--r--drivers/acpi/acpica/exoparg6.c2
-rw-r--r--drivers/acpi/acpica/exprep.c2
-rw-r--r--drivers/acpi/acpica/exregion.c2
-rw-r--r--drivers/acpi/acpica/exresnte.c2
-rw-r--r--drivers/acpi/acpica/exresolv.c2
-rw-r--r--drivers/acpi/acpica/exresop.c6
-rw-r--r--drivers/acpi/acpica/exserial.c2
-rw-r--r--drivers/acpi/acpica/exstore.c6
-rw-r--r--drivers/acpi/acpica/exstoren.c2
-rw-r--r--drivers/acpi/acpica/exstorob.c2
-rw-r--r--drivers/acpi/acpica/exsystem.c2
-rw-r--r--drivers/acpi/acpica/extrace.c2
-rw-r--r--drivers/acpi/acpica/exutils.c2
-rw-r--r--drivers/acpi/acpica/hwacpi.c2
-rw-r--r--drivers/acpi/acpica/hwesleep.c2
-rw-r--r--drivers/acpi/acpica/hwgpe.c4
-rw-r--r--drivers/acpi/acpica/hwsleep.c2
-rw-r--r--drivers/acpi/acpica/hwtimer.c2
-rw-r--r--drivers/acpi/acpica/hwvalid.c2
-rw-r--r--drivers/acpi/acpica/hwxface.c2
-rw-r--r--drivers/acpi/acpica/hwxfsleep.c2
-rw-r--r--drivers/acpi/acpica/nsarguments.c2
-rw-r--r--drivers/acpi/acpica/nsconvert.c2
-rw-r--r--drivers/acpi/acpica/nsdump.c2
-rw-r--r--drivers/acpi/acpica/nsdumpdv.c2
-rw-r--r--drivers/acpi/acpica/nsinit.c2
-rw-r--r--drivers/acpi/acpica/nsload.c2
-rw-r--r--drivers/acpi/acpica/nsparse.c2
-rw-r--r--drivers/acpi/acpica/nspredef.c2
-rw-r--r--drivers/acpi/acpica/nsprepkg.c2
-rw-r--r--drivers/acpi/acpica/nsrepair.c2
-rw-r--r--drivers/acpi/acpica/nsrepair2.c19
-rw-r--r--drivers/acpi/acpica/nsutils.c2
-rw-r--r--drivers/acpi/acpica/nswalk.c2
-rw-r--r--drivers/acpi/acpica/nsxfname.c2
-rw-r--r--drivers/acpi/acpica/psargs.c2
-rw-r--r--drivers/acpi/acpica/psloop.c5
-rw-r--r--drivers/acpi/acpica/psobject.c2
-rw-r--r--drivers/acpi/acpica/psopcode.c2
-rw-r--r--drivers/acpi/acpica/psopinfo.c2
-rw-r--r--drivers/acpi/acpica/psparse.c4
-rw-r--r--drivers/acpi/acpica/psscope.c2
-rw-r--r--drivers/acpi/acpica/pstree.c2
-rw-r--r--drivers/acpi/acpica/psutils.c2
-rw-r--r--drivers/acpi/acpica/pswalk.c2
-rw-r--r--drivers/acpi/acpica/psxface.c2
-rw-r--r--drivers/acpi/acpica/tbdata.c2
-rw-r--r--drivers/acpi/acpica/tbfadt.c2
-rw-r--r--drivers/acpi/acpica/tbfind.c2
-rw-r--r--drivers/acpi/acpica/tbinstal.c2
-rw-r--r--drivers/acpi/acpica/tbprint.c2
-rw-r--r--drivers/acpi/acpica/tbutils.c2
-rw-r--r--drivers/acpi/acpica/tbxface.c2
-rw-r--r--drivers/acpi/acpica/tbxfload.c2
-rw-r--r--drivers/acpi/acpica/tbxfroot.c2
-rw-r--r--drivers/acpi/acpica/utaddress.c2
-rw-r--r--drivers/acpi/acpica/utalloc.c2
-rw-r--r--drivers/acpi/acpica/utascii.c2
-rw-r--r--drivers/acpi/acpica/utbuffer.c2
-rw-r--r--drivers/acpi/acpica/utcache.c2
-rw-r--r--drivers/acpi/acpica/utcopy.c2
-rw-r--r--drivers/acpi/acpica/utdebug.c2
-rw-r--r--drivers/acpi/acpica/utdecode.c2
-rw-r--r--drivers/acpi/acpica/utdelete.c2
-rw-r--r--drivers/acpi/acpica/uteval.c2
-rw-r--r--drivers/acpi/acpica/utglobal.c2
-rw-r--r--drivers/acpi/acpica/uthex.c2
-rw-r--r--drivers/acpi/acpica/utids.c2
-rw-r--r--drivers/acpi/acpica/utinit.c2
-rw-r--r--drivers/acpi/acpica/utlock.c2
-rw-r--r--drivers/acpi/acpica/utobject.c2
-rw-r--r--drivers/acpi/acpica/utosi.c2
-rw-r--r--drivers/acpi/acpica/utpredef.c2
-rw-r--r--drivers/acpi/acpica/utprint.c2
-rw-r--r--drivers/acpi/acpica/utstrsuppt.c4
-rw-r--r--drivers/acpi/acpica/uttrack.c2
-rw-r--r--drivers/acpi/acpica/utuuid.c2
-rw-r--r--drivers/acpi/acpica/utxface.c2
-rw-r--r--drivers/acpi/acpica/utxfinit.c2
-rw-r--r--drivers/acpi/apei/erst.c2
-rw-r--r--drivers/acpi/apei/hest.c12
-rw-r--r--drivers/acpi/arm64/iort.c14
-rw-r--r--drivers/acpi/battery.c33
-rw-r--r--drivers/acpi/bgrt.c20
-rw-r--r--drivers/acpi/bus.c179
-rw-r--r--drivers/acpi/button.c15
-rw-r--r--drivers/acpi/cppc_acpi.c8
-rw-r--r--drivers/acpi/device_pm.c20
-rw-r--r--drivers/acpi/device_sysfs.c64
-rw-r--r--drivers/acpi/dock.c26
-rw-r--r--drivers/acpi/nfit/core.c75
-rw-r--r--drivers/acpi/osl.c75
-rw-r--r--drivers/acpi/pci_root.c40
-rw-r--r--drivers/acpi/platform_profile.c178
-rw-r--r--drivers/acpi/power.c53
-rw-r--r--drivers/acpi/property.c73
-rw-r--r--drivers/acpi/scan.c138
-rw-r--r--drivers/acpi/sysfs.c7
-rw-r--r--drivers/acpi/thermal.c182
-rw-r--r--drivers/acpi/utils.c86
-rw-r--r--drivers/amba/bus.c234
-rw-r--r--drivers/android/binderfs.c6
-rw-r--r--drivers/ata/ahci_brcm.c14
-rw-r--r--drivers/ata/pata_icside.c21
-rw-r--r--drivers/atm/idt77252.c11
-rw-r--r--drivers/atm/idt77252.h2
-rw-r--r--drivers/auxdisplay/Kconfig3
-rw-r--r--drivers/auxdisplay/cfag12864b.c4
-rw-r--r--drivers/auxdisplay/cfag12864bfb.c4
-rw-r--r--drivers/auxdisplay/ht16k33.c17
-rw-r--r--drivers/auxdisplay/ks0108.c4
-rw-r--r--drivers/base/Kconfig8
-rw-r--r--drivers/base/Makefile1
-rw-r--r--drivers/base/arch_numa.c (renamed from arch/arm64/mm/numa.c)40
-rw-r--r--drivers/base/auxiliary.c13
-rw-r--r--drivers/base/base.h5
-rw-r--r--drivers/base/bus.c19
-rw-r--r--drivers/base/core.c165
-rw-r--r--drivers/base/devtmpfs.c15
-rw-r--r--drivers/base/init.c1
-rw-r--r--drivers/base/isa.c2
-rw-r--r--drivers/base/memory.c35
-rw-r--r--drivers/base/node.c33
-rw-r--r--drivers/base/platform.c13
-rw-r--r--drivers/base/power/clock_ops.c223
-rw-r--r--drivers/base/power/domain.c89
-rw-r--r--drivers/base/power/domain_governor.c102
-rw-r--r--drivers/base/power/main.c9
-rw-r--r--drivers/base/power/runtime.c64
-rw-r--r--drivers/base/property.c15
-rw-r--r--drivers/base/regmap/regcache.c2
-rw-r--r--drivers/base/regmap/regmap-sdw-mbq.c10
-rw-r--r--drivers/base/regmap/regmap-sdw.c4
-rw-r--r--drivers/base/swnode.c294
-rw-r--r--drivers/base/test/Makefile1
-rw-r--r--drivers/block/Kconfig16
-rw-r--r--drivers/block/Makefile3
-rw-r--r--drivers/block/aoe/aoecmd.c2
-rw-r--r--drivers/block/brd.c8
-rw-r--r--drivers/block/drbd/drbd_actlog.c2
-rw-r--r--drivers/block/drbd/drbd_bitmap.c2
-rw-r--r--drivers/block/drbd/drbd_int.h8
-rw-r--r--drivers/block/drbd/drbd_main.c16
-rw-r--r--drivers/block/drbd/drbd_receiver.c6
-rw-r--r--drivers/block/drbd/drbd_req.c18
-rw-r--r--drivers/block/drbd/drbd_req.h12
-rw-r--r--drivers/block/drbd/drbd_worker.c5
-rw-r--r--drivers/block/floppy.c30
-rw-r--r--drivers/block/loop.c98
-rw-r--r--drivers/block/loop.h1
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c17
-rw-r--r--drivers/block/n64cart.c178
-rw-r--r--drivers/block/nbd.c68
-rw-r--r--drivers/block/null_blk/main.c2
-rw-r--r--drivers/block/null_blk/zoned.c24
-rw-r--r--drivers/block/paride/pd.c2
-rw-r--r--drivers/block/pktcdvd.c6
-rw-r--r--drivers/block/ps3vram.c2
-rw-r--r--drivers/block/rbd.c19
-rw-r--r--drivers/block/rsxx/core.c8
-rw-r--r--drivers/block/rsxx/dev.c2
-rw-r--r--drivers/block/rsxx/dma.c3
-rw-r--r--drivers/block/rsxx/rsxx_priv.h1
-rw-r--r--drivers/block/skd_main.c3670
-rw-r--r--drivers/block/skd_s1120.h322
-rw-r--r--drivers/block/sx8.c4
-rw-r--r--drivers/block/umem.c2
-rw-r--r--drivers/block/virtio_blk.c13
-rw-r--r--drivers/block/xen-blkback/blkback.c36
-rw-r--r--drivers/block/xen-blkback/xenbus.c2
-rw-r--r--drivers/block/xen-blkfront.c20
-rw-r--r--drivers/block/zram/zram_drv.c7
-rw-r--r--drivers/bluetooth/btintel.c21
-rw-r--r--drivers/bluetooth/btmtksdio.c16
-rw-r--r--drivers/bluetooth/btqca.c67
-rw-r--r--drivers/bluetooth/btqca.h1
-rw-r--r--drivers/bluetooth/btqcomsmd.c27
-rw-r--r--drivers/bluetooth/btrtl.c43
-rw-r--r--drivers/bluetooth/btusb.c313
-rw-r--r--drivers/bluetooth/hci_bcm.c1
-rw-r--r--drivers/bluetooth/hci_h5.c7
-rw-r--r--drivers/bluetooth/hci_ldisc.c41
-rw-r--r--drivers/bluetooth/hci_qca.c33
-rw-r--r--drivers/bluetooth/hci_serdev.c4
-rw-r--r--drivers/bus/Kconfig2
-rw-r--r--drivers/bus/arm-integrator-lm.c1
-rw-r--r--drivers/bus/fsl-mc/Kconfig7
-rw-r--r--drivers/bus/fsl-mc/Makefile3
-rw-r--r--drivers/bus/fsl-mc/dprc-driver.c33
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-bus.c113
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-private.h49
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-uapi.c597
-rw-r--r--drivers/bus/fsl-mc/mc-sys.c2
-rw-r--r--drivers/bus/mhi/core/init.c12
-rw-r--r--drivers/bus/mhi/core/main.c194
-rw-r--r--drivers/bus/mhi/pci_generic.c381
-rw-r--r--drivers/bus/mvebu-mbus.c2
-rw-r--r--drivers/bus/simple-pm-bus.c3
-rw-r--r--drivers/bus/sunxi-rsb.c215
-rw-r--r--drivers/cdrom/cdrom.c2
-rw-r--r--drivers/char/hw_random/ingenic-trng.c6
-rw-r--r--drivers/char/hw_random/iproc-rng200.c38
-rw-r--r--drivers/char/hw_random/nomadik-rng.c3
-rw-r--r--drivers/char/hw_random/optee-rng.c3
-rw-r--r--drivers/char/hw_random/timeriomem-rng.c5
-rw-r--r--drivers/char/ipmi/ipmb_dev_int.c24
-rw-r--r--drivers/char/mem.c93
-rw-r--r--drivers/char/pcmcia/synclink_cs.c2
-rw-r--r--drivers/char/random.c17
-rw-r--r--drivers/char/tpm/Kconfig10
-rw-r--r--drivers/char/tpm/Makefile2
-rw-r--r--drivers/char/tpm/eventlog/tpm1.c1
-rw-r--r--drivers/char/tpm/tpm-dev-common.c1
-rw-r--r--drivers/char/tpm/tpm-sysfs.c179
-rw-r--r--drivers/char/tpm/tpm.h4
-rw-r--r--drivers/char/tpm/tpm_ppi.c2
-rw-r--r--drivers/char/tpm/tpm_tis_core.c80
-rw-r--r--drivers/char/tpm/tpm_tis_i2c_cr50.c790
-rw-r--r--drivers/clk/Kconfig12
-rw-r--r--drivers/clk/Makefile8
-rw-r--r--drivers/clk/at91/at91rm9200.c3
-rw-r--r--drivers/clk/at91/at91sam9260.c16
-rw-r--r--drivers/clk/at91/at91sam9g45.c3
-rw-r--r--drivers/clk/at91/at91sam9n12.c3
-rw-r--r--drivers/clk/at91/at91sam9rl.c3
-rw-r--r--drivers/clk/at91/at91sam9x5.c20
-rw-r--r--drivers/clk/at91/sama5d2.c3
-rw-r--r--drivers/clk/at91/sama5d3.c2
-rw-r--r--drivers/clk/at91/sama5d4.c3
-rw-r--r--drivers/clk/bcm/clk-iproc-pll.c2
-rw-r--r--drivers/clk/clk-ast2600.c37
-rw-r--r--drivers/clk/clk-axi-clkgen.c15
-rw-r--r--drivers/clk/clk-bd718x7.c12
-rw-r--r--drivers/clk/clk-divider.c9
-rw-r--r--drivers/clk/clk-efm32gg.c84
-rw-r--r--drivers/clk/clk-fixed-factor.c39
-rw-r--r--drivers/clk/clk-fixed-mmio.c2
-rw-r--r--drivers/clk/clk-k210.c1007
-rw-r--r--drivers/clk/clk-npcm7xx.c108
-rw-r--r--drivers/clk/clk-qoriq.c62
-rw-r--r--drivers/clk/clk-si570.c16
-rw-r--r--drivers/clk/clk-tango4.c85
-rw-r--r--drivers/clk/clk-u300.c1199
-rw-r--r--drivers/clk/clk-versaclock5.c64
-rw-r--r--drivers/clk/clk-xgene.c5
-rw-r--r--drivers/clk/clk.c24
-rw-r--r--drivers/clk/imx/Kconfig2
-rw-r--r--drivers/clk/imx/clk-imx31.c10
-rw-r--r--drivers/clk/imx/clk-imx6q.c6
-rw-r--r--drivers/clk/imx/clk-imx6sl.c1
-rw-r--r--drivers/clk/imx/clk-imx8mm.c12
-rw-r--r--drivers/clk/imx/clk-imx8mn.c12
-rw-r--r--drivers/clk/imx/clk-imx8mq.c22
-rw-r--r--drivers/clk/imx/clk-imx8qxp.c26
-rw-r--r--drivers/clk/mediatek/Kconfig11
-rw-r--r--drivers/clk/mediatek/clk-mux.c89
-rw-r--r--drivers/clk/mediatek/clk-mux.h14
-rw-r--r--drivers/clk/meson/axg.c3
-rw-r--r--drivers/clk/meson/axg.h1
-rw-r--r--drivers/clk/meson/clk-pll.c10
-rw-r--r--drivers/clk/meson/meson8b.c45
-rw-r--r--drivers/clk/mmp/clk-audio.c6
-rw-r--r--drivers/clk/mstar/Kconfig9
-rw-r--r--drivers/clk/mstar/Makefile6
-rw-r--r--drivers/clk/mstar/clk-msc313-mpll.c155
-rw-r--r--drivers/clk/mvebu/ap-cpu-clk.c2
-rw-r--r--drivers/clk/qcom/Kconfig61
-rw-r--r--drivers/clk/qcom/Makefile7
-rw-r--r--drivers/clk/qcom/a7-pll.c100
-rw-r--r--drivers/clk/qcom/apcs-sdx55.c149
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.c209
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.h4
-rw-r--r--drivers/clk/qcom/clk-rcg.h9
-rw-r--r--drivers/clk/qcom/clk-rcg2.c57
-rw-r--r--drivers/clk/qcom/clk-regmap.c1
-rw-r--r--drivers/clk/qcom/clk-rpm.c63
-rw-r--r--drivers/clk/qcom/clk-rpmh.c49
-rw-r--r--drivers/clk/qcom/gcc-ipq4019.c7
-rw-r--r--drivers/clk/qcom/gcc-msm8998.c143
-rw-r--r--drivers/clk/qcom/gcc-sc7180.c68
-rw-r--r--drivers/clk/qcom/gcc-sc7280.c3603
-rw-r--r--drivers/clk/qcom/gcc-sc8180x.c4629
-rw-r--r--drivers/clk/qcom/gcc-sdm660.c7
-rw-r--r--drivers/clk/qcom/gcc-sm8250.c4
-rw-r--r--drivers/clk/qcom/gcc-sm8350.c3890
-rw-r--r--drivers/clk/qcom/gdsc.c10
-rw-r--r--drivers/clk/qcom/gdsc.h3
-rw-r--r--drivers/clk/qcom/gpucc-msm8998.c18
-rw-r--r--drivers/clk/qcom/gpucc-sdm660.c349
-rw-r--r--drivers/clk/qcom/lpass-gfm-sm8250.c8
-rw-r--r--drivers/clk/qcom/mmcc-msm8974.c16
-rw-r--r--drivers/clk/qcom/mmcc-msm8996.c29
-rw-r--r--drivers/clk/qcom/mmcc-msm8998.c12
-rw-r--r--drivers/clk/qcom/mmcc-sdm660.c2864
-rw-r--r--drivers/clk/qcom/videocc-sm8250.c39
-rw-r--r--drivers/clk/renesas/Kconfig5
-rw-r--r--drivers/clk/renesas/Makefile1
-rw-r--r--drivers/clk/renesas/r8a7796-cpg-mssr.c5
-rw-r--r--drivers/clk/renesas/r8a77965-cpg-mssr.c5
-rw-r--r--drivers/clk/renesas/r8a77990-cpg-mssr.c5
-rw-r--r--drivers/clk/renesas/r8a77995-cpg-mssr.c5
-rw-r--r--drivers/clk/renesas/r8a779a0-cpg-mssr.c67
-rw-r--r--drivers/clk/renesas/rcar-cpg-lib.c270
-rw-r--r--drivers/clk/renesas/rcar-cpg-lib.h33
-rw-r--r--drivers/clk/renesas/rcar-gen3-cpg.c267
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.c4
-rw-r--r--drivers/clk/rockchip/clk-cpu.c4
-rw-r--r--drivers/clk/rockchip/clk-half-divider.c2
-rw-r--r--drivers/clk/rockchip/clk-pll.c6
-rw-r--r--drivers/clk/rockchip/clk-rk3368.c6
-rw-r--r--drivers/clk/rockchip/clk.c4
-rw-r--r--drivers/clk/sifive/fu540-prci.h5
-rw-r--r--drivers/clk/sifive/sifive-prci.c5
-rw-r--r--drivers/clk/sirf/Makefile6
-rw-r--r--drivers/clk/sirf/atlas6.h32
-rw-r--r--drivers/clk/sirf/clk-atlas6.c150
-rw-r--r--drivers/clk/sirf/clk-atlas7.c1682
-rw-r--r--drivers/clk/sirf/clk-common.c1037
-rw-r--r--drivers/clk/sirf/clk-prima2.c149
-rw-r--r--drivers/clk/sirf/prima2.h26
-rw-r--r--drivers/clk/socfpga/clk-agilex.c88
-rw-r--r--drivers/clk/socfpga/clk-periph-s10.c53
-rw-r--r--drivers/clk/socfpga/clk-pll-a10.c3
-rw-r--r--drivers/clk/socfpga/clk-pll-s10.c83
-rw-r--r--drivers/clk/socfpga/clk-pll.c3
-rw-r--r--drivers/clk/socfpga/stratix10-clk.h17
-rw-r--r--drivers/clk/spear/spear1310_clock.c1
-rw-r--r--drivers/clk/spear/spear1340_clock.c1
-rw-r--r--drivers/clk/st/clkgen-fsyn.c6
-rw-r--r--drivers/clk/st/clkgen-pll.c3
-rw-r--r--drivers/clk/sunxi-ng/Kconfig7
-rw-r--r--drivers/clk/sunxi-ng/Makefile1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c53
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h6-r.h2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h6.c10
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h616.c1150
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h616.h56
-rw-r--r--drivers/clk/sunxi-ng/ccu_mp.c2
-rw-r--r--drivers/clk/sunxi/clk-a10-ve.c2
-rw-r--r--drivers/clk/sunxi/clk-mod0.c2
-rw-r--r--drivers/clk/sunxi/clk-sunxi.c32
-rw-r--r--drivers/clk/tegra/Kconfig3
-rw-r--r--drivers/clk/tegra/Makefile2
-rw-r--r--drivers/clk/tegra/clk-tegra124-emc.c41
-rw-r--r--drivers/clk/tegra/clk-tegra124.c26
-rw-r--r--drivers/clk/tegra/clk-tegra30.c5
-rw-r--r--drivers/clk/tegra/clk.h18
-rw-r--r--drivers/clk/tegra/cvb.c1
-rw-r--r--drivers/clk/ti/clkt_dpll.c3
-rw-r--r--drivers/clk/ti/clockdomain.c2
-rw-r--r--drivers/clk/ti/dpll.c2
-rw-r--r--drivers/clk/ti/dpll3xxx.c20
-rw-r--r--drivers/clk/ti/dpll44xx.c6
-rw-r--r--drivers/clk/ti/gate.c2
-rw-r--r--drivers/clk/versatile/clk-icst.c7
-rw-r--r--drivers/clk/xilinx/Kconfig19
-rw-r--r--drivers/clk/xilinx/Makefile2
-rw-r--r--drivers/clk/xilinx/xlnx_vcu.c743
-rw-r--r--drivers/clk/zte/Makefile4
-rw-r--r--drivers/clk/zte/clk-zx296702.c741
-rw-r--r--drivers/clk/zte/clk-zx296718.c1074
-rw-r--r--drivers/clk/zte/clk.c446
-rw-r--r--drivers/clk/zte/clk.h174
-rw-r--r--drivers/clk/zynq/clkc.c73
-rw-r--r--drivers/clk/zynq/pll.c12
-rw-r--r--drivers/clk/zynqmp/divider.c1
-rw-r--r--drivers/clocksource/Kconfig37
-rw-r--r--drivers/clocksource/Makefile5
-rw-r--r--drivers/clocksource/hyperv_timer.c3
-rw-r--r--drivers/clocksource/mxs_timer.c5
-rw-r--r--drivers/clocksource/sh_cmt.c16
-rw-r--r--drivers/clocksource/timer-atlas7.c281
-rw-r--r--drivers/clocksource/timer-davinci.c5
-rw-r--r--drivers/clocksource/timer-efm32.c278
-rw-r--r--drivers/clocksource/timer-microchip-pit64b.c86
-rw-r--r--drivers/clocksource/timer-prima2.c242
-rw-r--r--drivers/clocksource/timer-tango-xtal.c57
-rw-r--r--drivers/clocksource/timer-u300.c457
-rw-r--r--drivers/cpufreq/Kconfig.arm5
-rw-r--r--drivers/cpufreq/Kconfig.x8610
-rw-r--r--drivers/cpufreq/Makefile2
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c75
-rw-r--r--drivers/cpufreq/brcmstb-avs-cpufreq.c24
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c2
-rw-r--r--drivers/cpufreq/cpufreq-dt.c2
-rw-r--r--drivers/cpufreq/cpufreq.c5
-rw-r--r--drivers/cpufreq/davinci-cpufreq.c2
-rw-r--r--drivers/cpufreq/freq_table.c8
-rw-r--r--drivers/cpufreq/intel_pstate.c46
-rw-r--r--drivers/cpufreq/loongson1-cpufreq.c2
-rw-r--r--drivers/cpufreq/mediatek-cpufreq.c2
-rw-r--r--drivers/cpufreq/omap-cpufreq.c2
-rw-r--r--drivers/cpufreq/pmac32-cpufreq.c3
-rw-r--r--drivers/cpufreq/qcom-cpufreq-hw.c50
-rw-r--r--drivers/cpufreq/s3c24xx-cpufreq.c2
-rw-r--r--drivers/cpufreq/s5pv210-cpufreq.c2
-rw-r--r--drivers/cpufreq/sa1100-cpufreq.c2
-rw-r--r--drivers/cpufreq/sa1110-cpufreq.c2
-rw-r--r--drivers/cpufreq/scmi-cpufreq.c2
-rw-r--r--drivers/cpufreq/scpi-cpufreq.c2
-rw-r--r--drivers/cpufreq/sfi-cpufreq.c127
-rw-r--r--drivers/cpufreq/spear-cpufreq.c2
-rw-r--r--drivers/cpufreq/tango-cpufreq.c38
-rw-r--r--drivers/cpufreq/tegra186-cpufreq.c2
-rw-r--r--drivers/cpufreq/tegra194-cpufreq.c3
-rw-r--r--drivers/cpufreq/tegra20-cpufreq.c45
-rw-r--r--drivers/cpufreq/vexpress-spc-cpufreq.c3
-rw-r--r--drivers/crypto/Kconfig35
-rw-r--r--drivers/crypto/Makefile2
-rw-r--r--drivers/crypto/allwinner/Kconfig9
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c196
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c52
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c6
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-prng.c6
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h8
-rw-r--r--drivers/crypto/bcm/cipher.c6
-rw-r--r--drivers/crypto/bcm/cipher.h4
-rw-r--r--drivers/crypto/bcm/spu.c20
-rw-r--r--drivers/crypto/bcm/spu2.c6
-rw-r--r--drivers/crypto/bcm/spu2.h8
-rw-r--r--drivers/crypto/bcm/spum.h22
-rw-r--r--drivers/crypto/bcm/util.c4
-rw-r--r--drivers/crypto/bcm/util.h26
-rw-r--r--drivers/crypto/caam/debugfs.c4
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_main.c8
-rw-r--r--drivers/crypto/ccp/sev-dev.c1
-rw-r--r--drivers/crypto/ccree/cc_cipher.c2
-rw-r--r--drivers/crypto/ccree/cc_driver.h1
-rw-r--r--drivers/crypto/geode-aes.c2
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre.h8
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_main.c169
-rw-r--r--drivers/crypto/hisilicon/qm.c193
-rw-r--r--drivers/crypto/hisilicon/qm.h33
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_main.c42
-rw-r--r--drivers/crypto/hisilicon/zip/zip_main.c23
-rw-r--r--drivers/crypto/inside-secure/safexcel.c6
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c1
-rw-r--r--drivers/crypto/keembay/Kconfig31
-rw-r--r--drivers/crypto/keembay/Makefile3
-rw-r--r--drivers/crypto/keembay/keembay-ocs-hcu-core.c1264
-rw-r--r--drivers/crypto/keembay/ocs-aes.c10
-rw-r--r--drivers/crypto/keembay/ocs-hcu.c840
-rw-r--r--drivers/crypto/keembay/ocs-hcu.h106
-rw-r--r--drivers/crypto/marvell/Kconfig15
-rw-r--r--drivers/crypto/marvell/Makefile1
-rw-r--r--drivers/crypto/marvell/cesa/cesa.c10
-rw-r--r--drivers/crypto/marvell/cesa/cesa.h31
-rw-r--r--drivers/crypto/marvell/cesa/cipher.c34
-rw-r--r--drivers/crypto/marvell/cesa/hash.c59
-rw-r--r--drivers/crypto/marvell/cesa/tdma.c52
-rw-r--r--drivers/crypto/marvell/octeontx2/Makefile10
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_common.h137
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_hw_types.h464
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c202
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_reqmgr.h197
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptlf.c428
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptlf.h353
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf.h61
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c713
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c356
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c1415
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h162
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf.h29
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c1758
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.h178
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c410
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c167
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c541
-rw-r--r--drivers/crypto/mediatek/Makefile3
-rw-r--r--drivers/crypto/mediatek/mtk-aes.c1271
-rw-r--r--drivers/crypto/mediatek/mtk-platform.c586
-rw-r--r--drivers/crypto/mediatek/mtk-platform.h231
-rw-r--r--drivers/crypto/mediatek/mtk-regs.h190
-rw-r--r--drivers/crypto/mediatek/mtk-sha.c1353
-rw-r--r--drivers/crypto/picoxcell_crypto.c1807
-rw-r--r--drivers/crypto/picoxcell_crypto_regs.h115
-rw-r--r--drivers/crypto/qat/Kconfig2
-rw-r--r--drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c14
-rw-r--r--drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c17
-rw-r--r--drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c27
-rw-r--r--drivers/crypto/qat/qat_common/adf_accel_devices.h3
-rw-r--r--drivers/crypto/qat/qat_common/adf_ctl_drv.c1
-rw-r--r--drivers/crypto/qat/qat_common/adf_hw_arbiter.c8
-rw-r--r--drivers/crypto/qat/qat_common/adf_transport.c2
-rw-r--r--drivers/crypto/qat/qat_common/adf_transport_debug.c4
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c1
-rw-r--r--drivers/crypto/qat/qat_common/qat_asym_algs.c12
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c28
-rw-r--r--drivers/crypto/sahara.c7
-rw-r--r--drivers/crypto/stm32/stm32-cryp.c2
-rw-r--r--drivers/crypto/talitos.c50
-rw-r--r--drivers/crypto/talitos.h1
-rw-r--r--drivers/crypto/vmx/aes.c1
-rw-r--r--drivers/crypto/vmx/aesp8-ppc.h6
-rw-r--r--drivers/crypto/vmx/vmx.c7
-rw-r--r--drivers/cxl/Kconfig53
-rw-r--r--drivers/cxl/Makefile7
-rw-r--r--drivers/cxl/bus.c29
-rw-r--r--drivers/cxl/cxl.h95
-rw-r--r--drivers/cxl/mem.c1552
-rw-r--r--drivers/cxl/pci.h31
-rw-r--r--drivers/dax/bus.c24
-rw-r--r--drivers/dax/bus.h2
-rw-r--r--drivers/dax/device.c8
-rw-r--r--drivers/dax/kmem.c7
-rw-r--r--drivers/dax/pmem/compat.c3
-rw-r--r--drivers/dax/super.c2
-rw-r--r--drivers/devfreq/devfreq.c11
-rw-r--r--drivers/devfreq/governor.h2
-rw-r--r--drivers/devfreq/governor_passive.c44
-rw-r--r--drivers/devfreq/rk3399_dmc.c2
-rw-r--r--drivers/devfreq/tegra30-devfreq.c4
-rw-r--r--drivers/dma-buf/Kconfig8
-rw-r--r--drivers/dma-buf/dma-buf.c120
-rw-r--r--drivers/dma-buf/dma-fence.c70
-rw-r--r--drivers/dma-buf/dma-heap.c14
-rw-r--r--drivers/dma-buf/heaps/cma_heap.c22
-rw-r--r--drivers/dma-buf/heaps/system_heap.c25
-rw-r--r--drivers/dma-buf/st-dma-fence.c7
-rw-r--r--drivers/dma/Kconfig30
-rw-r--r--drivers/dma/Makefile4
-rw-r--r--drivers/dma/at_hdmac.c19
-rw-r--r--drivers/dma/at_hdmac_regs.h28
-rw-r--r--drivers/dma/coh901318.c2808
-rw-r--r--drivers/dma/coh901318.h141
-rw-r--r--drivers/dma/coh901318_lli.c313
-rw-r--r--drivers/dma/dma-jz4780.c14
-rw-r--r--drivers/dma/dmaengine.c1
-rw-r--r--drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c698
-rw-r--r--drivers/dma/dw-axi-dmac/dw-axi-dmac.h34
-rw-r--r--drivers/dma/dw/core.c6
-rw-r--r--drivers/dma/fsldma.c6
-rw-r--r--drivers/dma/hsu/pci.c21
-rw-r--r--drivers/dma/idxd/device.c23
-rw-r--r--drivers/dma/idxd/dma.c6
-rw-r--r--drivers/dma/idxd/idxd.h2
-rw-r--r--drivers/dma/idxd/init.c16
-rw-r--r--drivers/dma/idxd/irq.c122
-rw-r--r--drivers/dma/imx-sdma.c46
-rw-r--r--drivers/dma/lgm/Kconfig10
-rw-r--r--drivers/dma/lgm/Makefile2
-rw-r--r--drivers/dma/lgm/lgm-dma.c1739
-rw-r--r--drivers/dma/mmp_pdma.c14
-rw-r--r--drivers/dma/owl-dma.c4
-rw-r--r--drivers/dma/pl330.c3
-rw-r--r--drivers/dma/qcom/bam_dma.c29
-rw-r--r--drivers/dma/qcom/gpi.c4
-rw-r--r--drivers/dma/sh/rcar-dmac.c112
-rw-r--r--drivers/dma/sirf-dma.c1170
-rw-r--r--drivers/dma/ste_dma40.c2
-rw-r--r--drivers/dma/ti/k3-udma.c134
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c2
-rw-r--r--drivers/dma/zx_dma.c941
-rw-r--r--drivers/edac/Kconfig15
-rw-r--r--drivers/edac/Makefile7
-rw-r--r--drivers/edac/amd64_edac.c332
-rw-r--r--drivers/edac/amd64_edac.h11
-rw-r--r--drivers/edac/amd64_edac_dbg.c55
-rw-r--r--drivers/edac/amd64_edac_inj.c235
-rw-r--r--drivers/edac/ppc4xx_edac.c2
-rw-r--r--drivers/edac/xgene_edac.c2
-rw-r--r--drivers/firewire/core-device.c4
-rw-r--r--drivers/firmware/arm_scmi/driver.c4
-rw-r--r--drivers/firmware/arm_scmi/smc.c42
-rw-r--r--drivers/firmware/efi/apple-properties.c13
-rw-r--r--drivers/firmware/efi/libstub/Makefile2
-rw-r--r--drivers/firmware/efi/libstub/arm64-stub.c4
-rw-r--r--drivers/firmware/efi/libstub/efistub.h11
-rw-r--r--drivers/firmware/google/coreboot_table.c5
-rw-r--r--drivers/firmware/google/coreboot_table.h2
-rw-r--r--drivers/firmware/google/framebuffer-coreboot.c4
-rw-r--r--drivers/firmware/google/memconsole-coreboot.c4
-rw-r--r--drivers/firmware/google/vpd.c4
-rw-r--r--drivers/firmware/imx/Kconfig1
-rw-r--r--drivers/firmware/qcom_scm.c16
-rw-r--r--drivers/firmware/smccc/smccc.c6
-rw-r--r--drivers/fpga/Kconfig11
-rw-r--r--drivers/fpga/Makefile2
-rw-r--r--drivers/fpga/dfl-fme-perf.c6
-rw-r--r--drivers/fpga/dfl-n3000-nios.c588
-rw-r--r--drivers/fpga/dfl-pci.c165
-rw-r--r--drivers/fpga/dfl.c4
-rw-r--r--drivers/fpga/dfl.h85
-rw-r--r--drivers/fpga/fpga-bridge.c4
-rw-r--r--drivers/gpio/Kconfig47
-rw-r--r--drivers/gpio/Makefile4
-rw-r--r--drivers/gpio/TODO2
-rw-r--r--drivers/gpio/gpio-aggregator.c40
-rw-r--r--drivers/gpio/gpio-bd70528.c59
-rw-r--r--drivers/gpio/gpio-bd71828.c39
-rw-r--r--drivers/gpio/gpio-bd9571mwv.c35
-rw-r--r--drivers/gpio/gpio-ep93xx.c232
-rw-r--r--drivers/gpio/gpio-intel-mid.c414
-rw-r--r--drivers/gpio/gpio-max77620.c2
-rw-r--r--drivers/gpio/gpio-merrifield.c5
-rw-r--r--drivers/gpio/gpio-msic.c314
-rw-r--r--drivers/gpio/gpio-mvebu.c148
-rw-r--r--drivers/gpio/gpio-pca953x.c2
-rw-r--r--drivers/gpio/gpio-pcf857x.c2
-rw-r--r--drivers/gpio/gpio-rcar.c85
-rw-r--r--drivers/gpio/gpio-sl28cpld.c4
-rw-r--r--drivers/gpio/gpio-tegra.c263
-rw-r--r--drivers/gpio/gpio-tegra186.c2
-rw-r--r--drivers/gpio/gpio-visconti.c218
-rw-r--r--drivers/gpio/gpio-vx855.c2
-rw-r--r--drivers/gpio/gpio-wcove.c65
-rw-r--r--drivers/gpio/gpio-xilinx.c369
-rw-r--r--drivers/gpio/gpio-zx.c289
-rw-r--r--drivers/gpio/gpiolib-cdev.c2
-rw-r--r--drivers/gpio/gpiolib-of.c11
-rw-r--r--drivers/gpio/gpiolib-of.h5
-rw-r--r--drivers/gpio/gpiolib.c68
-rw-r--r--drivers/gpu/drm/Kconfig8
-rw-r--r--drivers/gpu/drm/Makefile6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c87
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h138
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c150
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c45
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c195
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c176
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.h36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/athub_v2_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/athub_v2_1.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_ih.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c50
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c72
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c73
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c105
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c137
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v4_0.h31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c212
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v5_0.h31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_ih.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v10_1.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi10_ih.c478
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c124
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c260
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v10_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smuio_v11_0_6.c77
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smuio_v11_0_6.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c126
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h154
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c483
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega20_ih.c703
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega20_ih.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c36
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Kconfig2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c54
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c223
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h41
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c14
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c10
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c9
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c22
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/conversion.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/dc_common.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/dc_common.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser.c119
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table.c61
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c132
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c104
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c55
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c46
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_surface.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h22
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_edid_parser.c80
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_edid_parser.h44
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_helper.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_audio.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c73
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c126
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_opp.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_opp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_transform.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c55
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c76
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c99
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce60/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c39
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c122
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/Makefile6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c263
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c147
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/Makefile3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c107
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn302/Makefile3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c188
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_cp_psp.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c124
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c51
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c32
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_translate_diag.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/irq_service.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h35
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c11
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h2
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c3
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c9
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_table.c26
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h2
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c4
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h4
-rw-r--r--drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c13
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.c8
-rw-r--r--drivers/gpu/drm/amd/include/amd_pcie.h2
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h1
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_2_0_offset.h345
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_2_0_sh_mask.h1300
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_6_offset.h35
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_6_sh_mask.h41
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h5
-rw-r--r--drivers/gpu/drm/amd/include/renoir_ip_offset.h2
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c259
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h584
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu11_driver_if_vangogh.h1
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu_types.h9
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu_v11_0.h22
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu_v11_5_ppsmc.h6
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu_v12_0.h2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/hwmgr.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c4
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c1
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c4
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c137
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.h1
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c11
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c11
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c222
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c32
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c32
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c371
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c137
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c1306
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.h26
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c228
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c12
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c93
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h4
-rw-r--r--drivers/gpu/drm/arc/arcpgu_crtc.c9
-rw-r--r--drivers/gpu/drm/arc/arcpgu_drv.c2
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_crtc.c1
-rw-r--r--drivers/gpu/drm/arm/malidp_crtc.c1
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c1
-rw-r--r--drivers/gpu/drm/ast/ast_cursor.c55
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c4
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h2
-rw-r--r--drivers/gpu/drm/ast/ast_main.c25
-rw-r--r--drivers/gpu/drm/ast/ast_mm.c17
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c6
-rw-r--r--drivers/gpu/drm/ast/ast_post.c8
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c1
-rw-r--r--drivers/gpu/drm/bochs/bochs_drv.c1
-rw-r--r--drivers/gpu/drm/bochs/bochs_hw.c4
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c6
-rw-r--r--drivers/gpu/drm/bridge/display-connector.c46
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611uxc.c57
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c6
-rw-r--r--drivers/gpu/drm/bridge/thc63lvd1024.c2
-rw-r--r--drivers/gpu/drm/drm_agpsupport.c67
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c79
-rw-r--r--drivers/gpu/drm/drm_blend.c6
-rw-r--r--drivers/gpu/drm/drm_bufs.c4
-rw-r--r--drivers/gpu/drm/drm_cache.c33
-rw-r--r--drivers/gpu/drm/drm_client_modeset.c7
-rw-r--r--drivers/gpu/drm/drm_color_mgmt.c125
-rw-r--r--drivers/gpu/drm/drm_crtc.c130
-rw-r--r--drivers/gpu/drm/drm_crtc_helper_internal.h10
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c650
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c33
-rw-r--r--drivers/gpu/drm/drm_drv.c16
-rw-r--r--drivers/gpu/drm/drm_dsc.c30
-rw-r--r--drivers/gpu/drm/drm_dumb_buffers.c8
-rw-r--r--drivers/gpu/drm/drm_edid.c110
-rw-r--r--drivers/gpu/drm/drm_encoder.c113
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c46
-rw-r--r--drivers/gpu/drm/drm_file.c76
-rw-r--r--drivers/gpu/drm/drm_gem.c31
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c141
-rw-r--r--drivers/gpu/drm/drm_internal.h3
-rw-r--r--drivers/gpu/drm/drm_irq.c44
-rw-r--r--drivers/gpu/drm/drm_kms_helper_common.c25
-rw-r--r--drivers/gpu/drm/drm_legacy.h2
-rw-r--r--drivers/gpu/drm/drm_memory.c51
-rw-r--r--drivers/gpu/drm/drm_mode_config.c51
-rw-r--r--drivers/gpu/drm/drm_modes.c4
-rw-r--r--drivers/gpu/drm/drm_pci.c59
-rw-r--r--drivers/gpu/drm/drm_plane.c170
-rw-r--r--drivers/gpu/drm/drm_prime.c66
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c3
-rw-r--r--drivers/gpu/drm/drm_simple_kms_helper.c14
-rw-r--r--drivers/gpu/drm/drm_vblank.c11
-rw-r--r--drivers/gpu/drm/drm_vm.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c3
-rw-r--r--drivers/gpu/drm/exynos/Kconfig1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c48
-rw-r--r--drivers/gpu/drm/gma500/Kconfig17
-rw-r--r--drivers/gpu/drm/gma500/Makefile37
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.c30
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_crt.c5
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_display.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c4
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_hdmi.c4
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c11
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c15
-rw-r--r--drivers/gpu/drm/gma500/gem.c6
-rw-r--r--drivers/gpu/drm/gma500/gem.h2
-rw-r--r--drivers/gpu/drm/gma500/gma_device.c4
-rw-r--r--drivers/gpu/drm/gma500/gma_display.c12
-rw-r--r--drivers/gpu/drm/gma500/gtt.c20
-rw-r--r--drivers/gpu/drm/gma500/intel_bios.c8
-rw-r--r--drivers/gpu/drm/gma500/intel_gmbus.c4
-rw-r--r--drivers/gpu/drm/gma500/intel_i2c.c5
-rw-r--r--drivers/gpu/drm/gma500/mdfld_device.c562
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_dpi.c1017
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_dpi.h79
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_output.c603
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_output.h377
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c679
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.h80
-rw-r--r--drivers/gpu/drm/gma500/mdfld_intel_display.c966
-rw-r--r--drivers/gpu/drm/gma500/mdfld_output.c74
-rw-r--r--drivers/gpu/drm/gma500/mdfld_output.h76
-rw-r--r--drivers/gpu/drm/gma500/mdfld_tmd_vid.c197
-rw-r--r--drivers/gpu/drm/gma500/mdfld_tpo_vid.c83
-rw-r--r--drivers/gpu/drm/gma500/mid_bios.c9
-rw-r--r--drivers/gpu/drm/gma500/mmu.c36
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c6
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_device.c8
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c22
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds.c5
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c10
-rw-r--r--drivers/gpu/drm/gma500/opregion.c3
-rw-r--r--drivers/gpu/drm/gma500/power.c21
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c34
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h75
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c7
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_modes.c3
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_reg.h12
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c8
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.c75
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.h2
-rw-r--r--drivers/gpu/drm/gma500/psb_reg.h14
-rw-r--r--drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c805
-rw-r--r--drivers/gpu/drm/gma500/tc35876x-dsi-lvds.h38
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/Makefile2
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c2
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c104
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h10
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c2
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c20
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c61
-rw-r--r--drivers/gpu/drm/i915/Kconfig.debug24
-rw-r--r--drivers/gpu/drm/i915/Makefile29
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_plane.c926
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_plane.h24
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c21
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c80
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy.c20
-rw-r--r--drivers/gpu/drm/i915/display/intel_connector.c29
-rw-r--r--drivers/gpu/drm/i915/display/intel_connector.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc.c324
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc.h22
-rw-r--r--drivers/gpu/drm/i915/display/intel_cursor.c806
-rw-r--r--drivers/gpu/drm/i915/display/intel_cursor.h17
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c290
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c4594
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h27
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c133
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c133
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.h37
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h203
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c2780
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.c692
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.h18
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c404
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_hdcp.c178
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c74
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.c1363
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.h23
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c23
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c21
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.c683
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.h22
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c300
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c284
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.h7
-rw-r--r--drivers/gpu/drm/i915/display/intel_lspcon.c162
-rw-r--r--drivers/gpu/drm/i915/display/intel_lspcon.h12
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c22
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.c552
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.c1406
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.h52
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c143
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c213
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c69
-rw-r--r--drivers/gpu/drm/i915/display/intel_vbt_defs.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.c209
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.h33
-rw-r--r--drivers/gpu/drm/i915/dma_resv_utils.c17
-rw-r--r--drivers/gpu/drm/i915/dma_resv_utils.h13
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c132
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.h1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context_types.h1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_create.c113
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_domain.c104
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c24
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_lmem.c15
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_lmem.h8
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c112
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h44
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_blt.c9
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h19
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c51
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_phys.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pm.c76
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pm.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_region.c52
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c28
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c79
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.h2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_tiling.c12
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_wait.c54
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_pages.c22
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c4
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c1
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c1
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c1
-rw-r--r--drivers/gpu/drm/i915/gt/debugfs_gt_pm.c27
-rw-r--r--drivers/gpu/drm/i915/gt/gen6_ppgtt.c15
-rw-r--r--drivers/gpu/drm/i915/gt/gen7_renderclear.c26
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_engine_cs.c635
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_engine_cs.h127
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_ppgtt.c13
-rw-r--r--drivers/gpu/drm/i915/gt/intel_breadcrumbs.c106
-rw-r--r--drivers/gpu/drm/i915/gt/intel_breadcrumbs.h17
-rw-r--r--drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.h11
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_sseu.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_types.h25
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine.h93
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c167
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c59
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.c22
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_stats.h60
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h22
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.c3896
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.h47
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c65
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c22
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c12
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_buffer_pool_types.h4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c197
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_clock_utils.h8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_irq.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c49
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_requests.c7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_types.h25
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.c29
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c5415
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.h167
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc_reg.h41
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ppgtt.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_region_lmem.c (renamed from drivers/gpu/drm/i915/intel_region_lmem.c)4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_region_lmem.h (renamed from drivers/gpu/drm/i915/intel_region_lmem.h)2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_renderstate.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c101
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c272
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c61
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps_types.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.c103
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.h25
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline_types.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c654
-rw-r--r--drivers/gpu/drm/i915/gt/mock_engine.c15
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_context.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_cs.c1
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c13
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_pm.c203
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_execlists.c4741
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_gt_pm.c8
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_hangcheck.c173
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_lrc.c4701
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_mocs.c80
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rc6.c1
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_reset.c28
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rps.c5
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_timeline.c14
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_workarounds.c196
-rw-r--r--drivers/gpu/drm/i915/gt/shmem_utils.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c16
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.h7
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c13
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c457
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c54
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c7
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c284
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.h5
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.h6
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h11
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h41
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c22
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.h5
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c12
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.h12
-rw-r--r--drivers/gpu/drm/i915/gvt/mpt.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/reg.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c76
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h5
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c4
-rw-r--r--drivers/gpu/drm/i915/i915_active.c35
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c3
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c763
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c17
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h122
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c163
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h9
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c13
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c25
-rw-r--r--drivers/gpu/drm/i915/i915_getparam.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c6
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c475
-rw-r--r--drivers/gpu/drm/i915/i915_irq.h3
-rw-r--r--drivers/gpu/drm/i915/i915_mm.c2
-rw-r--r--drivers/gpu/drm/i915/i915_params.c2
-rw-r--r--drivers/gpu/drm/i915/i915_params.h1
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c7
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c17
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c95
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.h35
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h81
-rw-r--r--drivers/gpu/drm/i915/i915_request.c178
-rw-r--r--drivers/gpu/drm/i915/i915_request.h10
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.c32
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.h7
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler_types.h10
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c33
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c25
-rw-r--r--drivers/gpu/drm/i915/i915_utils.c2
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h7
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h15
-rw-r--r--drivers/gpu/drm/i915/i915_vma_types.h3
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c159
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h4
-rw-r--r--drivers/gpu/drm/i915/intel_dram.c136
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.c2
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.h13
-rw-r--r--drivers/gpu/drm/i915/intel_pch.c39
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c1108
-rw-r--r--drivers/gpu/drm/i915/intel_pm.h7
-rw-r--r--drivers/gpu/drm/i915/intel_sideband.c4
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c4
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.h6
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem.c1
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_evict.c28
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c3
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_perf.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c9
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_memory_region.c110
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_region.c19
-rw-r--r--drivers/gpu/drm/imx/Kconfig3
-rw-r--r--drivers/gpu/drm/imx/dw_hdmi-imx.c95
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c109
-rw-r--r--drivers/gpu/drm/imx/imx-tve.c109
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c131
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c69
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.h3
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c93
-rw-r--r--drivers/gpu/drm/ingenic/Kconfig1
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-drm-drv.c60
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-drm.h14
-rw-r--r--drivers/gpu/drm/kmb/kmb_drv.c4
-rw-r--r--drivers/gpu/drm/kmb/kmb_plane.c3
-rw-r--r--drivers/gpu/drm/lima/lima_sched.c2
-rw-r--r--drivers/gpu/drm/mediatek/Makefile5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ccorr.c223
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_color.c89
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_drv.h92
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_gamma.c197
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl.c254
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_rdma.c194
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi.c57
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c108
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.h1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp.h28
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c503
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h100
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c88
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.h5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_gem.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c50
-rw-r--r--drivers/gpu/drm/mga/mga_ioc32.c14
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c20
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_i2c.c2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mm.c10
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx.xml.h2
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c195
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_power.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c113
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.h49
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c139
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.h2
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c54
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c23
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h22
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c90
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c87
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c26
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h14
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c73
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c9
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c1
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c2
-rw-r--r--drivers/gpu/drm/msm/dp/dp_aux.c7
-rw-r--r--drivers/gpu/drm/msm/dp/dp_catalog.c24
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.c21
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.h2
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c24
-rw-r--r--drivers/gpu/drm/msm/dp/dp_panel.c3
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c21
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c3
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c5
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c2
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h8
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/arb.c12
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dfp.c5
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.h14
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/hw.c10
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/base507c.c6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/base827c.c6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core507d.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/corec37d.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c238
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head907d.c11
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head917d.c28
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.c17
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/cl917d.h4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl0080.h52
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/fifo.h6
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/push.h216
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/device.h191
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/engine.h13
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/enum.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/falcon.h10
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/layout.h53
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h31
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/bsp.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h18
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/cipher.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h41
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h10
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h42
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h80
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/mpeg.h10
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/mspdec.h8
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/msppp.h6
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/msvld.h10
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h23
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/sec.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h6
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/sw.h8
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/vp.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/acr.h12
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h14
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h10
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h20
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h35
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h8
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h75
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fuse.h6
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h12
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h18
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h12
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/iccsense.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h13
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h16
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h40
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h32
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mxm.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h22
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h25
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/privring.h12
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h18
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h8
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h27
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c55
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c20
-rw-r--r--drivers/gpu/drm/nouveau/nv17_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvif/fifo.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/engine.c51
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/memory.c18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/subdev.c110
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/gf100.c17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/gk104.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/gm107.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/gm200.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/gp100.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/gp102.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/gv100.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/cipher/g84.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c4599
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/user.c40
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ga102.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp89.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/nv04.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/tu102.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/gf100.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/gf119.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/gv100.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/nv04.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/nv50.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/priv.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/falcon.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c63
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv04.h6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h22
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv10.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv17.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv40.c32
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c75
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c99
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c210
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h35
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c53
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c60
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c365
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/g84.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gp108.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gt200.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gt215.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp79.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp89.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv15.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv17.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv44.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/g84.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv40.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mspdec/base.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mspdec/g98.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gf100.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gk104.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gt215.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mspdec/priv.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msppp/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msppp/g98.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msppp/gf100.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msppp/gt215.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msppp/priv.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msvld/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msvld/g98.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msvld/gf100.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msvld/gk104.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msvld/gt215.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msvld/mcp89.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msvld/priv.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gm107.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvenc/base.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvenc/gm107.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/g84.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gf108.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gf117.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gk104.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gt200.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gt215.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/nv50.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec/g98.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp108.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/gf100.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/nv04.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/nv10.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/priv.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/vp/g84.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/base.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm20b.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp108.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp10b.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/priv.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/g84.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/gm107.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/gm20b.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/tu102.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/g94.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/gf100.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv31.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv50.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/g84.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv04.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv40.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/priv.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/base.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g84.c18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g98.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/ga100.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gt215.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gv100.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/mcp89.c18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv05.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv10.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv1a.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv20.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp10b.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/g84.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf108.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk110.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm20b.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp10b.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gt215.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp77.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp89.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv10.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv1a.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv20.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv25.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv35.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv36.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv41.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv44.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv46.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv47.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv49.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv4e.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fuse/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gf100.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fuse/nv50.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fuse/priv.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/g94.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/ga102.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gf119.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv10.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv50.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/priv.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/g94.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf117.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf119.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk104.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk110.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm200.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv04.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv4e.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/Kbuild7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/priv.h9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/gf100.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c34
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gk104.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp102.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp10b.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c67
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/ga100.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk104.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp10b.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/gt215.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv11.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv17.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu102.c115
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/g84.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk104.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk20a.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm20b.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp100.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gv100.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mcp77.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv41.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mxm/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/g84.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/g92.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/gk104.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv46.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm200.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/privring/Kbuild7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/privring/gf100.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c)44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/privring/gf117.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf117.c)16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/privring/gk104.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c)46
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/privring/gk20a.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c)30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/privring/gm200.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gm200.c)10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/privring/gp10b.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gp10b.c)18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/privring/priv.h8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/g84.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf119.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.c43
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm107.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm200.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/gp100.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/gt215.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv40.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv50.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/timer/gk20a.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv40.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv41.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/timer/priv.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/top/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c70
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/top/ga100.c107
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c46
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/top/priv.h15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf100.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf117.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/nv40.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h8
-rw-r--r--drivers/gpu/drm/omapdrm/Kconfig120
-rw-r--r--drivers/gpu/drm/omapdrm/Makefile19
-rw-r--r--drivers/gpu/drm/omapdrm/displays/Kconfig10
-rw-r--r--drivers/gpu/drm/omapdrm/displays/Makefile2
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c1385
-rw-r--r--drivers/gpu/drm/omapdrm/dss/Kconfig135
-rw-r--r--drivers/gpu/drm/omapdrm/dss/Makefile20
-rw-r--r--drivers/gpu/drm/omapdrm/dss/base.c87
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.c202
-rw-r--r--drivers/gpu/drm/omapdrm/dss/display.c60
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dpi.c1
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c1949
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.h456
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.c28
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.h72
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4.c1
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5.c1
-rw-r--r--drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c229
-rw-r--r--drivers/gpu/drm/omapdrm/dss/omapdss.h347
-rw-r--r--drivers/gpu/drm/omapdrm/dss/output.c57
-rw-r--r--drivers/gpu/drm/omapdrm/dss/pll.c6
-rw-r--r--drivers/gpu/drm/omapdrm/dss/sdi.c1
-rw-r--r--drivers/gpu/drm/omapdrm/dss/venc.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.c157
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.h28
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c153
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.h2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c73
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_encoder.c59
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_irq.c34
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c41
-rw-r--r--drivers/gpu/drm/omapdrm/tcm-sita.c1
-rw-r--r--drivers/gpu/drm/panel/Kconfig20
-rw-r--r--drivers/gpu/drm/panel/Makefile2
-rw-r--r--drivers/gpu/drm/panel/panel-dsi-cm.c665
-rw-r--r--drivers/gpu/drm/panel/panel-elida-kd35t133.c3
-rw-r--r--drivers/gpu/drm/panel/panel-khadas-ts050.c870
-rw-r--r--drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c39
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e63m0.c59
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c223
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7703.c24
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_devfreq.c1
-rw-r--r--drivers/gpu/drm/pl111/pl111_drv.c6
-rw-r--r--drivers/gpu/drm/qxl/qxl_dev.h16
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h1
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c3
-rw-r--r--drivers/gpu/drm/qxl/qxl_irq.c3
-rw-r--r--drivers/gpu/drm/qxl/qxl_kms.c1
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.h2
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c1
-rw-r--r--drivers/gpu/drm/r128/r128_ioc32.c14
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c6
-rw-r--r--drivers/gpu/drm/radeon/r100.c27
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon.h36
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c89
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c55
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c17
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c24
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_prime.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_trace.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c38
-rw-r--r--drivers/gpu/drm/radeon/radeon_vce.c1
-rw-r--r--drivers/gpu/drm/radeon/rs690.c2
-rw-r--r--drivers/gpu/drm/radeon/rs780_dpm.c7
-rw-r--r--drivers/gpu/drm/radeon/vce_v1_0.c1
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_cmm.c2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c13
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c33
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.h16
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.c98
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.h2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c42
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.c8
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vsp.c28
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_writeback.c2
-rw-r--r--drivers/gpu/drm/rockchip/Kconfig2
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c2
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.h11
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c123
-rw-r--r--drivers/gpu/drm/sti/sti_cursor.c9
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.c9
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c9
-rw-r--r--drivers/gpu/drm/stm/ltdc.c1
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c46
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.h7
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_csc.c109
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c10
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h1
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c26
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.h6
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_layer.c2
-rw-r--r--drivers/gpu/drm/tdfx/tdfx_drv.c2
-rw-r--r--drivers/gpu/drm/tegra/dc.c2
-rw-r--r--drivers/gpu/drm/tegra/drm.c2
-rw-r--r--drivers/gpu/drm/tegra/dsi.c2
-rw-r--r--drivers/gpu/drm/tegra/falcon.c9
-rw-r--r--drivers/gpu/drm/tegra/gr2d.c9
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c2
-rw-r--r--drivers/gpu/drm/tegra/hub.c2
-rw-r--r--drivers/gpu/drm/tegra/sor.c2
-rw-r--r--drivers/gpu/drm/tegra/vic.c35
-rw-r--r--drivers/gpu/drm/tilcdc/Makefile2
-rw-r--r--drivers/gpu/drm/tiny/cirrus.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_agp_backend.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c114
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c16
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c7
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_module.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_module.h (renamed from include/drm/ttm/ttm_module.h)0
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool.c12
-rw-r--r--drivers/gpu/drm/ttm/ttm_range_manager.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c15
-rw-r--r--drivers/gpu/drm/tve200/tve200_display.c1
-rw-r--r--drivers/gpu/drm/tve200/tve200_drv.c1
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c4
-rw-r--r--drivers/gpu/drm/v3d/v3d_irq.c7
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_drv.c11
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_irq.c4
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_main.c8
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_ttm.c7
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c111
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c38
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c19
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h30
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c111
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c3
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c388
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.h34
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi_phy.c8
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi_regs.h13
-rw-r--r--drivers/gpu/drm/vc4/vc4_hvs.c26
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c248
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c36
-rw-r--r--drivers/gpu/drm/vc4/vc4_txp.c11
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c3
-rw-r--r--drivers/gpu/drm/via/via_irq.c2
-rw-r--r--drivers/gpu/drm/virtio/Kconfig3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_debugfs.c24
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fence.c81
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_gem.c8
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_kms.c1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vram.c3
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.c54
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.h12
-rw-r--r--drivers/gpu/drm/vkms/vkms_output.c13
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile6
-rw-r--r--drivers/gpu/drm/vmwgfx/ttm_object.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_binding.c52
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_blit.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c (renamed from drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c)126
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c22
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c40
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c14
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c240
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h94
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c26
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c28
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c15
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c76
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_marker.c155
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_mob.c16
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c28
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c27
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_so.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c36
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c47
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_thp.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c10
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_gem.c4
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_disp.c15
-rw-r--r--drivers/gpu/drm/zte/zx_plane.c7
-rw-r--r--drivers/gpu/ipu-v3/ipu-di.c4
-rw-r--r--drivers/greybus/es2.c9
-rw-r--r--drivers/greybus/greybus_trace.h6
-rw-r--r--drivers/hid/Kconfig19
-rw-r--r--drivers/hid/Makefile3
-rw-r--r--drivers/hid/hid-chicony.c55
-rw-r--r--drivers/hid/hid-core.c9
-rw-r--r--drivers/hid/hid-google-hammer.c85
-rw-r--r--drivers/hid/hid-ids.h11
-rw-r--r--drivers/hid/hid-input.c12
-rw-r--r--drivers/hid/hid-ite.c12
-rw-r--r--drivers/hid/hid-lg-g15.c2
-rw-r--r--drivers/hid/hid-logitech-dj.c8
-rw-r--r--drivers/hid/hid-logitech-hidpp.c246
-rw-r--r--drivers/hid/hid-multitouch.c13
-rw-r--r--drivers/hid/hid-playstation.c1351
-rw-r--r--drivers/hid/hid-quirks.c26
-rw-r--r--drivers/hid/hid-roccat-arvo.c6
-rw-r--r--drivers/hid/hid-sensor-custom.c143
-rw-r--r--drivers/hid/hid-sony.c20
-rw-r--r--drivers/hid/hid-uclogic-core.c2
-rw-r--r--drivers/hid/hid-uclogic-params.c2
-rw-r--r--drivers/hid/i2c-hid/Kconfig47
-rw-r--r--drivers/hid/i2c-hid/Makefile6
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-acpi.c143
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-core.c254
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-of-goodix.c116
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-of.c143
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.h22
-rw-r--r--drivers/hid/intel-ish-hid/ipc/hw-ish.h2
-rw-r--r--drivers/hid/intel-ish-hid/ipc/ipc.c27
-rw-r--r--drivers/hid/intel-ish-hid/ipc/pci-ish.c55
-rw-r--r--drivers/hid/wacom_sys.c9
-rw-r--r--drivers/hid/wacom_wac.c7
-rw-r--r--drivers/hid/wacom_wac.h2
-rw-r--r--drivers/hsi/controllers/omap_ssi_core.c2
-rw-r--r--drivers/hv/channel.c4
-rw-r--r--drivers/hv/channel_mgmt.c77
-rw-r--r--drivers/hv/connection.c7
-rw-r--r--drivers/hv/hv_balloon.c2
-rw-r--r--drivers/hv/hv_fcopy.c36
-rw-r--r--drivers/hv/hv_kvp.c122
-rw-r--r--drivers/hv/hv_snapshot.c89
-rw-r--r--drivers/hv/hv_util.c222
-rw-r--r--drivers/hv/vmbus_drv.c64
-rw-r--r--drivers/hwmon/Kconfig34
-rw-r--r--drivers/hwmon/Makefile3
-rw-r--r--drivers/hwmon/ab8500.c224
-rw-r--r--drivers/hwmon/abx500.c487
-rw-r--r--drivers/hwmon/abx500.h69
-rw-r--r--drivers/hwmon/acpi_power_meter.c4
-rw-r--r--drivers/hwmon/aht10.c348
-rw-r--r--drivers/hwmon/amd_energy.c1
-rw-r--r--drivers/hwmon/applesmc.c2
-rw-r--r--drivers/hwmon/aspeed-pwm-tacho.c4
-rw-r--r--drivers/hwmon/da9052-hwmon.c2
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c7
-rw-r--r--drivers/hwmon/gpio-fan.c2
-rw-r--r--drivers/hwmon/k10temp.c3
-rw-r--r--drivers/hwmon/lm70.c20
-rw-r--r--drivers/hwmon/max6650.c2
-rw-r--r--drivers/hwmon/nct6683.c3
-rw-r--r--drivers/hwmon/pc87360.c4
-rw-r--r--drivers/hwmon/pmbus/Kconfig4
-rw-r--r--drivers/hwmon/pmbus/ibm-cffps.c2
-rw-r--r--drivers/hwmon/pmbus/lm25066.c5
-rw-r--r--drivers/hwmon/pmbus/max16601.c91
-rw-r--r--drivers/hwmon/pmbus/max31785.c13
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c12
-rw-r--r--drivers/hwmon/pwm-fan.c160
-rw-r--r--drivers/hwmon/smsc47m1.c2
-rw-r--r--drivers/hwmon/tps23861.c601
-rw-r--r--drivers/hwmon/w83627ehf.c2
-rw-r--r--drivers/hwspinlock/omap_hwspinlock.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-catu.c15
-rw-r--r--drivers/hwtracing/coresight/coresight-core.c122
-rw-r--r--drivers/hwtracing/coresight/coresight-cpu-debug.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-cti-core.c22
-rw-r--r--drivers/hwtracing/coresight/coresight-cti-platform.c6
-rw-r--r--drivers/hwtracing/coresight/coresight-etb10.c14
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.c32
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x-core.c13
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-core.c822
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-sysfs.c189
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.h505
-rw-r--r--drivers/hwtracing/coresight/coresight-funnel.c11
-rw-r--r--drivers/hwtracing/coresight/coresight-replicator.c17
-rw-r--r--drivers/hwtracing/coresight/coresight-stm.c8
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-core.c20
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etf.c10
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etr.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-tpiu.c35
-rw-r--r--drivers/i2c/algos/i2c-algo-bit.c4
-rw-r--r--drivers/i2c/busses/Kconfig43
-rw-r--r--drivers/i2c/busses/Makefile4
-rw-r--r--drivers/i2c/busses/i2c-amd-mp2-pci.c55
-rw-r--r--drivers/i2c/busses/i2c-amd-mp2-plat.c3
-rw-r--r--drivers/i2c/busses/i2c-amd-mp2.h6
-rw-r--r--drivers/i2c/busses/i2c-bcm-iproc.c254
-rw-r--r--drivers/i2c/busses/i2c-brcmstb.c2
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h2
-rw-r--r--drivers/i2c/busses/i2c-designware-master.c2
-rw-r--r--drivers/i2c/busses/i2c-digicolor.c5
-rw-r--r--drivers/i2c/busses/i2c-efm32.c469
-rw-r--r--drivers/i2c/busses/i2c-elektor.c7
-rw-r--r--drivers/i2c/busses/i2c-exynos5.c8
-rw-r--r--drivers/i2c/busses/i2c-gpio.c2
-rw-r--r--drivers/i2c/busses/i2c-hix5hd2.c4
-rw-r--r--drivers/i2c/busses/i2c-i801.c17
-rw-r--r--drivers/i2c/busses/i2c-imx.c65
-rw-r--r--drivers/i2c/busses/i2c-jz4780.c5
-rw-r--r--drivers/i2c/busses/i2c-mlxcpld.c97
-rw-r--r--drivers/i2c/busses/i2c-mt65xx.c19
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c122
-rw-r--r--drivers/i2c/busses/i2c-nomadik.c4
-rw-r--r--drivers/i2c/busses/i2c-pca-isa.c4
-rw-r--r--drivers/i2c/busses/i2c-qcom-geni.c59
-rw-r--r--drivers/i2c/busses/i2c-qup.c2
-rw-r--r--drivers/i2c/busses/i2c-rcar.c66
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c2
-rw-r--r--drivers/i2c/busses/i2c-sirf.c475
-rw-r--r--drivers/i2c/busses/i2c-stm32f7.c17
-rw-r--r--drivers/i2c/busses/i2c-stu300.c1008
-rw-r--r--drivers/i2c/busses/i2c-tegra.c9
-rw-r--r--drivers/i2c/busses/i2c-zx2967.c602
-rw-r--r--drivers/i2c/i2c-core-acpi.c6
-rw-r--r--drivers/i2c/i2c-core-smbus.c46
-rw-r--r--drivers/i2c/i2c-slave-testunit.c12
-rw-r--r--drivers/i2c/i2c-stub.c1
-rw-r--r--drivers/i2c/muxes/i2c-mux-gpio.c112
-rw-r--r--drivers/i2c/muxes/i2c-mux-mlxcpld.c163
-rw-r--r--drivers/i3c/device.c5
-rw-r--r--drivers/i3c/master.c8
-rw-r--r--drivers/i3c/master/Kconfig9
-rw-r--r--drivers/i3c/master/Makefile1
-rw-r--r--drivers/i3c/master/dw-i3c-master.c5
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/core.c2
-rw-r--r--drivers/i3c/master/svc-i3c-master.c1478
-rw-r--r--drivers/ide/falconide.c3
-rw-r--r--drivers/ide/ide-atapi.c2
-rw-r--r--drivers/ide/ide-cd.c2
-rw-r--r--drivers/ide/ide-cd_ioctl.c2
-rw-r--r--drivers/ide/ide-devsets.c2
-rw-r--r--drivers/ide/ide-disk.c2
-rw-r--r--drivers/ide/ide-ioctls.c4
-rw-r--r--drivers/ide/ide-park.c2
-rw-r--r--drivers/ide/ide-pm.c4
-rw-r--r--drivers/ide/ide-tape.c2
-rw-r--r--drivers/ide/ide-taskfile.c2
-rw-r--r--drivers/idle/intel_idle.c2
-rw-r--r--drivers/iio/accel/hid-sensor-accel-3d.c6
-rw-r--r--drivers/iio/accel/kxcjk-1013.c32
-rw-r--r--drivers/iio/adc/Kconfig11
-rw-r--r--drivers/iio/adc/ab8500-gpadc.c30
-rw-r--r--drivers/iio/adc/ad7476.c6
-rw-r--r--drivers/iio/adc/qcom-pm8xxx-xoadc.c3
-rw-r--r--drivers/iio/adc/qcom-spmi-adc5.c95
-rw-r--r--drivers/iio/adc/qcom-spmi-vadc.c3
-rw-r--r--drivers/iio/adc/qcom-vadc-common.c279
-rw-r--r--drivers/iio/adc/sc27xx_adc.c2
-rw-r--r--drivers/iio/adc/stm32-adc-core.c29
-rw-r--r--drivers/iio/adc/stm32-adc.c14
-rw-r--r--drivers/iio/adc/stm32-dfsdm-core.c3
-rw-r--r--drivers/iio/adc/xilinx-xadc-core.c364
-rw-r--r--drivers/iio/adc/xilinx-xadc-events.c9
-rw-r--r--drivers/iio/adc/xilinx-xadc.h6
-rw-r--r--drivers/iio/chemical/bme680_core.c2
-rw-r--r--drivers/iio/chemical/pms7003.c2
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-attributes.c2
-rw-r--r--drivers/iio/common/ms_sensors/ms_sensors_i2c.c76
-rw-r--r--drivers/iio/common/ms_sensors/ms_sensors_i2c.h15
-rw-r--r--drivers/iio/dac/Kconfig10
-rw-r--r--drivers/iio/dac/Makefile1
-rw-r--r--drivers/iio/dac/ad5766.c643
-rw-r--r--drivers/iio/frequency/adf4350.c6
-rw-r--r--drivers/iio/gyro/bmg160_core.c25
-rw-r--r--drivers/iio/gyro/hid-sensor-gyro-3d.c40
-rw-r--r--drivers/iio/imu/inv_mpu6050/Kconfig8
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c9
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c5
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h2
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c5
-rw-r--r--drivers/iio/industrialio-core.c44
-rw-r--r--drivers/iio/inkern.c34
-rw-r--r--drivers/iio/light/apds9960.c8
-rw-r--r--drivers/iio/light/hid-sensor-als.c39
-rw-r--r--drivers/iio/light/tsl2583.c8
-rw-r--r--drivers/iio/light/vl6180.c2
-rw-r--r--drivers/iio/magnetometer/Kconfig15
-rw-r--r--drivers/iio/magnetometer/Makefile2
-rw-r--r--drivers/iio/magnetometer/bmc150_magn.c26
-rw-r--r--drivers/iio/magnetometer/hid-sensor-magn-3d.c48
-rw-r--r--drivers/iio/magnetometer/yamaha-yas530.c1049
-rw-r--r--drivers/iio/orientation/hid-sensor-incl-3d.c43
-rw-r--r--drivers/iio/orientation/hid-sensor-rotation.c46
-rw-r--r--drivers/iio/position/Kconfig16
-rw-r--r--drivers/iio/position/Makefile1
-rw-r--r--drivers/iio/position/hid-sensor-custom-intel-hinge.c385
-rw-r--r--drivers/iio/pressure/ms5637.c77
-rw-r--r--drivers/infiniband/Kconfig1
-rw-r--r--drivers/infiniband/core/Makefile2
-rw-r--r--drivers/infiniband/core/cache.c9
-rw-r--r--drivers/infiniband/core/cm.c13
-rw-r--r--drivers/infiniband/core/cma.c81
-rw-r--r--drivers/infiniband/core/cma_configfs.c12
-rw-r--r--drivers/infiniband/core/counters.c78
-rw-r--r--drivers/infiniband/core/device.c23
-rw-r--r--drivers/infiniband/core/iwpm_msg.c16
-rw-r--r--drivers/infiniband/core/iwpm_util.c6
-rw-r--r--drivers/infiniband/core/multicast.c1
-rw-r--r--drivers/infiniband/core/nldev.c4
-rw-r--r--drivers/infiniband/core/restrack.c4
-rw-r--r--drivers/infiniband/core/roce_gid_mgmt.c2
-rw-r--r--drivers/infiniband/core/rw.c2
-rw-r--r--drivers/infiniband/core/sa_query.c26
-rw-r--r--drivers/infiniband/core/umem.c3
-rw-r--r--drivers/infiniband/core/umem_dmabuf.c174
-rw-r--r--drivers/infiniband/core/user_mad.c17
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c2
-rw-r--r--drivers/infiniband/core/uverbs_ioctl.c2
-rw-r--r--drivers/infiniband/core/uverbs_std_types_mr.c117
-rw-r--r--drivers/infiniband/core/verbs.c4
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c49
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c29
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.h2
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/restrack.c2
-rw-r--r--drivers/infiniband/hw/efa/efa_admin_cmds_defs.h25
-rw-r--r--drivers/infiniband/hw/efa/efa_admin_defs.h4
-rw-r--r--drivers/infiniband/hw/efa/efa_com.c33
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c46
-rw-r--r--drivers/infiniband/hw/hfi1/exp_rcv.c8
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c2
-rw-r--r--drivers/infiniband/hw/hfi1/intr.c16
-rw-r--r--drivers/infiniband/hw/hfi1/iowait.c4
-rw-r--r--drivers/infiniband/hw/hfi1/mad.c4
-rw-r--r--drivers/infiniband/hw/hfi1/msix.c2
-rw-r--r--drivers/infiniband/hw/hfi1/netdev_rx.c2
-rw-r--r--drivers/infiniband/hw/hfi1/pcie.c4
-rw-r--r--drivers/infiniband/hw/hfi1/pio_copy.c1
-rw-r--r--drivers/infiniband/hw/hfi1/qp.c14
-rw-r--r--drivers/infiniband/hw/hfi1/qsfp.c4
-rw-r--r--drivers/infiniband/hw/hfi1/rc.c7
-rw-r--r--drivers/infiniband/hw/hfi1/ruc.c5
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c12
-rw-r--r--drivers/infiniband/hw/hfi1/tid_rdma.c47
-rw-r--r--drivers/infiniband/hw/hfi1/uc.c8
-rw-r--r--drivers/infiniband/hw/hfi1/ud.c8
-rw-r--r--drivers/infiniband/hw/hfi1/user_exp_rcv.c10
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c6
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_common.h26
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c116
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h84
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.c9
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c33
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.h43
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c791
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h141
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c30
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c458
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c49
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_srq.c331
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c21
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_ctrl.c18
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_hmc.c4
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_hw.c4
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_main.c13
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_pble.c5
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_puda.c13
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_uk.c5
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_utils.c22
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c19
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_virtchnl.c19
-rw-r--r--drivers/infiniband/hw/mlx4/main.c2
-rw-r--r--drivers/infiniband/hw/mlx4/sysfs.c4
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c231
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c14
-rw-r--r--drivers/infiniband/hw/mlx5/main.c153
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h60
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c137
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c327
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c160
-rw-r--r--drivers/infiniband/hw/mlx5/wr.c2
-rw-r--r--drivers/infiniband/hw/qedr/qedr.h8
-rw-r--r--drivers/infiniband/hw/qedr/qedr_roce_cm.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_driver.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_eeprom.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_iba6120.c18
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7220.c16
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c14
-rw-r--r--drivers/infiniband/hw/qib/qib_intr.c16
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.c10
-rw-r--r--drivers/infiniband/hw/qib/qib_pcie.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c12
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c5
-rw-r--r--drivers/infiniband/hw/qib/qib_twsi.c1
-rw-r--r--drivers/infiniband/hw/qib/qib_tx.c1
-rw-r--r--drivers/infiniband/hw/qib/qib_uc.c1
-rw-r--r--drivers/infiniband/hw/qib/qib_ud.c1
-rw-r--r--drivers/infiniband/hw/qib/qib_user_pages.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c6
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_sysfs.c7
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma.h14
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c2
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c5
-rw-r--r--drivers/infiniband/sw/rdmavt/cq.c2
-rw-r--r--drivers/infiniband/sw/rdmavt/mad.c7
-rw-r--r--drivers/infiniband/sw/rdmavt/mcast.c2
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.c21
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c34
-rw-r--r--drivers/infiniband/sw/rdmavt/srq.c7
-rw-r--r--drivers/infiniband/sw/rdmavt/vt.c2
-rw-r--r--drivers/infiniband/sw/rxe/Kconfig1
-rw-r--r--drivers/infiniband/sw/rxe/rxe_comp.c86
-rw-r--r--drivers/infiniband/sw/rxe/rxe_hdr.h178
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mcast.c64
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c30
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.c300
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.h103
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c11
-rw-r--r--drivers/infiniband/sw/rxe/rxe_recv.c79
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c1
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c11
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c68
-rw-r--r--drivers/infiniband/sw/siw/siw.h2
-rw-r--r--drivers/infiniband/sw/siw/siw_main.c4
-rw-r--r--drivers/infiniband/sw/siw/siw_qp.c271
-rw-r--r--drivers/infiniband/sw/siw/siw_qp_rx.c26
-rw-r--r--drivers/infiniband/sw/siw/siw_qp_tx.c4
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c20
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c15
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c53
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c3
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c2
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c10
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c2
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c11
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.c127
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.h4
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-pri.h9
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c9
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.c123
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs.c32
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c110
-rw-r--r--drivers/input/joydev.c7
-rw-r--r--drivers/input/joystick/Kconfig7
-rw-r--r--drivers/input/joystick/Makefile2
-rw-r--r--drivers/input/joystick/n64joy.c345
-rw-r--r--drivers/input/joystick/xpad.c18
-rw-r--r--drivers/input/keyboard/Kconfig6
-rw-r--r--drivers/input/keyboard/applespi.c23
-rw-r--r--drivers/input/keyboard/cros_ec_keyb.c79
-rw-r--r--drivers/input/keyboard/locomokbd.c4
-rw-r--r--drivers/input/keyboard/omap4-keypad.c302
-rw-r--r--drivers/input/misc/ariel-pwrbutton.c6
-rw-r--r--drivers/input/misc/da7280.c3
-rw-r--r--drivers/input/mouse/alps.c2
-rw-r--r--drivers/input/mouse/synaptics.c7
-rw-r--r--drivers/input/serio/Kconfig2
-rw-r--r--drivers/input/serio/ambakmi.c3
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h6
-rw-r--r--drivers/input/serio/sa1111ps2.c4
-rw-r--r--drivers/input/serio/serport.c4
-rw-r--r--drivers/input/tablet/aiptek.c80
-rw-r--r--drivers/input/touchscreen/Kconfig2
-rw-r--r--drivers/input/touchscreen/ads7846.c376
-rw-r--r--drivers/input/touchscreen/elants_i2c.c151
-rw-r--r--drivers/input/touchscreen/elo.c4
-rw-r--r--drivers/input/touchscreen/goodix.c2
-rw-r--r--drivers/input/touchscreen/htcpen.c4
-rw-r--r--drivers/input/touchscreen/ili210x.c26
-rw-r--r--drivers/input/touchscreen/iqs5xx.c209
-rw-r--r--drivers/input/touchscreen/melfas_mip4.c8
-rw-r--r--drivers/input/touchscreen/raydium_i2c_ts.c3
-rw-r--r--drivers/input/touchscreen/st1232.c53
-rw-r--r--drivers/input/touchscreen/stmpe-ts.c1
-rw-r--r--drivers/input/touchscreen/sur40.c1
-rw-r--r--drivers/input/touchscreen/surface3_spi.c2
-rw-r--r--drivers/input/touchscreen/usbtouchscreen.c3
-rw-r--r--drivers/input/touchscreen/zinitix.c4
-rw-r--r--drivers/interconnect/qcom/Kconfig18
-rw-r--r--drivers/interconnect/qcom/Makefile6
-rw-r--r--drivers/interconnect/qcom/icc-rpm.c191
-rw-r--r--drivers/interconnect/qcom/icc-rpm.h73
-rw-r--r--drivers/interconnect/qcom/msm8916.c241
-rw-r--r--drivers/interconnect/qcom/msm8939.c355
-rw-r--r--drivers/interconnect/qcom/qcs404.c242
-rw-r--r--drivers/interconnect/qcom/sdx55.c356
-rw-r--r--drivers/interconnect/qcom/sdx55.h70
-rw-r--r--drivers/iommu/amd/Kconfig1
-rw-r--r--drivers/iommu/amd/Makefile2
-rw-r--r--drivers/iommu/amd/amd_iommu.h29
-rw-r--r--drivers/iommu/amd/amd_iommu_types.h47
-rw-r--r--drivers/iommu/amd/init.c110
-rw-r--r--drivers/iommu/amd/io_pgtable.c560
-rw-r--r--drivers/iommu/amd/iommu.c672
-rw-r--r--drivers/iommu/amd/iommu_v2.c4
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c10
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c154
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h14
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c5
-rw-r--r--drivers/iommu/dma-iommu.c74
-rw-r--r--drivers/iommu/hyperv-iommu.c177
-rw-r--r--drivers/iommu/intel/Makefile2
-rw-r--r--drivers/iommu/intel/cap_audit.c205
-rw-r--r--drivers/iommu/intel/cap_audit.h130
-rw-r--r--drivers/iommu/intel/dmar.c13
-rw-r--r--drivers/iommu/intel/iommu.c311
-rw-r--r--drivers/iommu/intel/irq_remapping.c8
-rw-r--r--drivers/iommu/intel/pasid.c18
-rw-r--r--drivers/iommu/intel/pasid.h4
-rw-r--r--drivers/iommu/intel/svm.c73
-rw-r--r--drivers/iommu/io-pgtable-arm-v7s.c65
-rw-r--r--drivers/iommu/io-pgtable.c3
-rw-r--r--drivers/iommu/iommu.c54
-rw-r--r--drivers/iommu/iova.c35
-rw-r--r--drivers/iommu/ipmmu-vmsa.c53
-rw-r--r--drivers/iommu/msm_iommu.c10
-rw-r--r--drivers/iommu/mtk_iommu.c410
-rw-r--r--drivers/iommu/mtk_iommu.h12
-rw-r--r--drivers/iommu/tegra-gart.c7
-rw-r--r--drivers/iommu/tegra-smmu.c72
-rw-r--r--drivers/ipack/ipack.c11
-rw-r--r--drivers/irqchip/Kconfig19
-rw-r--r--drivers/irqchip/Makefile4
-rw-r--r--drivers/irqchip/irq-gic-v3.c4
-rw-r--r--drivers/irqchip/irq-ingenic-tcu.c1
-rw-r--r--drivers/irqchip/irq-ingenic.c1
-rw-r--r--drivers/irqchip/irq-loongson-pch-msi.c2
-rw-r--r--drivers/irqchip/irq-ls-extirq.c2
-rw-r--r--drivers/irqchip/irq-realtek-rtl.c180
-rw-r--r--drivers/irqchip/irq-sirfsoc.c134
-rw-r--r--drivers/irqchip/irq-sun6i-r.c379
-rw-r--r--drivers/irqchip/irq-sunxi-nmi.c26
-rw-r--r--drivers/irqchip/irq-tango.c227
-rw-r--r--drivers/leds/Kconfig6
-rw-r--r--drivers/leds/Makefile6
-rw-r--r--drivers/leds/blink/Kconfig20
-rw-r--r--drivers/leds/blink/Makefile2
-rw-r--r--drivers/leds/blink/leds-lgm-sso.c888
-rw-r--r--drivers/leds/flash/Kconfig16
-rw-r--r--drivers/leds/flash/Makefile3
-rw-r--r--drivers/leds/flash/leds-rt8515.c397
-rw-r--r--drivers/leds/led-class.c3
-rw-r--r--drivers/leds/led-core.c20
-rw-r--r--drivers/leds/led-triggers.c10
-rw-r--r--drivers/leds/leds-apu.c11
-rw-r--r--drivers/leds/leds-ariel.c6
-rw-r--r--drivers/leds/leds-blinkm.c24
-rw-r--r--drivers/leds/leds-gpio.c3
-rw-r--r--drivers/leds/leds-lm3530.c10
-rw-r--r--drivers/leds/leds-lm3533.c4
-rw-r--r--drivers/leds/leds-lm355x.c8
-rw-r--r--drivers/leds/leds-lm3642.c16
-rw-r--r--drivers/leds/leds-lp50xx.c83
-rw-r--r--drivers/leds/leds-max8997.c12
-rw-r--r--drivers/leds/leds-netxbig.c12
-rw-r--r--drivers/leds/leds-ss4200.c18
-rw-r--r--drivers/leds/leds-wm831x-status.c12
-rw-r--r--drivers/leds/leds.h6
-rw-r--r--drivers/leds/trigger/Kconfig9
-rw-r--r--drivers/leds/trigger/Makefile1
-rw-r--r--drivers/leds/trigger/ledtrig-tty.c183
-rw-r--r--drivers/lightnvm/pblk-core.c5
-rw-r--r--drivers/lightnvm/pblk-gc.c3
-rw-r--r--drivers/lightnvm/pblk-init.c2
-rw-r--r--drivers/lightnvm/pblk-recovery.c3
-rw-r--r--drivers/macintosh/adb-iop.c6
-rw-r--r--drivers/mailbox/arm_mhuv2.c30
-rw-r--r--drivers/mailbox/omap-mailbox.c6
-rw-r--r--drivers/mailbox/qcom-apcs-ipc-mailbox.c8
-rw-r--r--drivers/mailbox/sprd-mailbox.c2
-rw-r--r--drivers/mailbox/tegra-hsp.c15
-rw-r--r--drivers/md/Kconfig1
-rw-r--r--drivers/md/bcache/bcache.h7
-rw-r--r--drivers/md/bcache/bset.c12
-rw-r--r--drivers/md/bcache/btree.c21
-rw-r--r--drivers/md/bcache/debug.c2
-rw-r--r--drivers/md/bcache/features.h6
-rw-r--r--drivers/md/bcache/journal.c4
-rw-r--r--drivers/md/bcache/request.c39
-rw-r--r--drivers/md/bcache/super.c26
-rw-r--r--drivers/md/bcache/sysfs.c29
-rw-r--r--drivers/md/bcache/writeback.c42
-rw-r--r--drivers/md/bcache/writeback.h4
-rw-r--r--drivers/md/dm-bio-record.h9
-rw-r--r--drivers/md/dm-bufio.c4
-rw-r--r--drivers/md/dm-cache-metadata.c2
-rw-r--r--drivers/md/dm-clone-target.c14
-rw-r--r--drivers/md/dm-core.h9
-rw-r--r--drivers/md/dm-crypt.c39
-rw-r--r--drivers/md/dm-dust.c2
-rw-r--r--drivers/md/dm-era-target.c93
-rw-r--r--drivers/md/dm-flakey.c6
-rw-r--r--drivers/md/dm-integrity.c140
-rw-r--r--drivers/md/dm-io.c4
-rw-r--r--drivers/md/dm-linear.c8
-rw-r--r--drivers/md/dm-log-writes.c10
-rw-r--r--drivers/md/dm-raid1.c10
-rw-r--r--drivers/md/dm-table.c399
-rw-r--r--drivers/md/dm-thin-metadata.c2
-rw-r--r--drivers/md/dm-verity-fec.c23
-rw-r--r--drivers/md/dm-writecache.c80
-rw-r--r--drivers/md/dm-zoned-metadata.c6
-rw-r--r--drivers/md/dm.c110
-rw-r--r--drivers/md/dm.h2
-rw-r--r--drivers/md/md-linear.c2
-rw-r--r--drivers/md/md.c73
-rw-r--r--drivers/md/md.h8
-rw-r--r--drivers/md/persistent-data/dm-btree-internal.h2
-rw-r--r--drivers/md/persistent-data/dm-btree-spine.c2
-rw-r--r--drivers/md/raid1.c8
-rw-r--r--drivers/md/raid10.c18
-rw-r--r--drivers/md/raid5-ppl.c2
-rw-r--r--drivers/md/raid5.c112
-rw-r--r--drivers/media/cec/core/cec-adap.c4
-rw-r--r--drivers/media/cec/core/cec-api.c2
-rw-r--r--drivers/media/cec/platform/Makefile1
-rw-r--r--drivers/media/common/videobuf2/Kconfig1
-rw-r--r--drivers/media/common/videobuf2/Makefile1
-rw-r--r--drivers/media/common/videobuf2/frame_vector.c (renamed from mm/frame_vector.c)55
-rw-r--r--drivers/media/common/videobuf2/videobuf2-core.c11
-rw-r--r--drivers/media/common/videobuf2/videobuf2-memops.c3
-rw-r--r--drivers/media/common/videobuf2/videobuf2-v4l2.c8
-rw-r--r--drivers/media/dvb-core/dvb_frontend.c1
-rw-r--r--drivers/media/dvb-frontends/Kconfig11
-rw-r--r--drivers/media/dvb-frontends/Makefile1
-rw-r--r--drivers/media/dvb-frontends/af9033.c2
-rw-r--r--drivers/media/dvb-frontends/cx24120.c1
-rw-r--r--drivers/media/dvb-frontends/cxd2841er.c2
-rw-r--r--drivers/media/dvb-frontends/dib0090.c2
-rw-r--r--drivers/media/dvb-frontends/drxk_hard.c1
-rw-r--r--drivers/media/dvb-frontends/m88rs2000.c1
-rw-r--r--drivers/media/dvb-frontends/mxl692.c1378
-rw-r--r--drivers/media/dvb-frontends/mxl692.h38
-rw-r--r--drivers/media/dvb-frontends/mxl692_defs.h548
-rw-r--r--drivers/media/dvb-frontends/rtl2832.c1
-rw-r--r--drivers/media/i2c/Kconfig59
-rw-r--r--drivers/media/i2c/Makefile8
-rw-r--r--drivers/media/i2c/ccs-pll.c124
-rw-r--r--drivers/media/i2c/ccs-pll.h86
-rw-r--r--drivers/media/i2c/ccs/ccs-core.c318
-rw-r--r--drivers/media/i2c/ccs/ccs-data.c27
-rw-r--r--drivers/media/i2c/ccs/ccs-data.h2
-rw-r--r--drivers/media/i2c/ccs/ccs-reg-access.c29
-rw-r--r--drivers/media/i2c/ccs/ccs.h8
-rw-r--r--drivers/media/i2c/ccs/smiapp-reg-defs.h2
-rw-r--r--drivers/media/i2c/imx219.c23
-rw-r--r--drivers/media/i2c/imx258.c82
-rw-r--r--drivers/media/i2c/imx334.c1132
-rw-r--r--drivers/media/i2c/max9271.c5
-rw-r--r--drivers/media/i2c/max9286.c74
-rw-r--r--drivers/media/i2c/mt9m111.c17
-rw-r--r--drivers/media/i2c/mt9v111.c6
-rw-r--r--drivers/media/i2c/ov02a10.c2
-rw-r--r--drivers/media/i2c/ov5647.c1259
-rw-r--r--drivers/media/i2c/ov5648.c2624
-rw-r--r--drivers/media/i2c/ov5670.c3
-rw-r--r--drivers/media/i2c/ov5675.c6
-rw-r--r--drivers/media/i2c/ov6650.c28
-rw-r--r--drivers/media/i2c/ov8856.c4
-rw-r--r--drivers/media/i2c/ov8865.c2972
-rw-r--r--drivers/media/i2c/ov9640.c15
-rw-r--r--drivers/media/i2c/ov9640.h2
-rw-r--r--drivers/media/i2c/rdacm20.c4
-rw-r--r--drivers/media/i2c/rdacm21.c623
-rw-r--r--drivers/media/i2c/st-mipid02.c21
-rw-r--r--drivers/media/pci/cx25821/cx25821-core.c4
-rw-r--r--drivers/media/pci/intel/ipu3/Kconfig21
-rw-r--r--drivers/media/pci/intel/ipu3/Makefile3
-rw-r--r--drivers/media/pci/intel/ipu3/cio2-bridge.c314
-rw-r--r--drivers/media/pci/intel/ipu3/cio2-bridge.h125
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-cio2-main.c (renamed from drivers/media/pci/intel/ipu3/ipu3-cio2.c)56
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-cio2.h24
-rw-r--r--drivers/media/pci/ivtv/ivtv-driver.c5
-rw-r--r--drivers/media/pci/saa7134/saa7134-empress.c5
-rw-r--r--drivers/media/pci/saa7134/saa7134-tvaudio.c25
-rw-r--r--drivers/media/pci/saa7164/saa7164-buffer.c16
-rw-r--r--drivers/media/pci/saa7164/saa7164-core.c2
-rw-r--r--drivers/media/pci/saa7164/saa7164.h2
-rw-r--r--drivers/media/pci/smipcie/smipcie-ir.c46
-rw-r--r--drivers/media/pci/smipcie/smipcie-main.c26
-rw-r--r--drivers/media/platform/Kconfig18
-rw-r--r--drivers/media/platform/Makefile1
-rw-r--r--drivers/media/platform/allegro-dvt/Makefile6
-rw-r--r--drivers/media/platform/allegro-dvt/allegro-core.c (renamed from drivers/staging/media/allegro-dvt/allegro-core.c)974
-rw-r--r--drivers/media/platform/allegro-dvt/allegro-mail.c (renamed from drivers/staging/media/allegro-dvt/allegro-mail.c)21
-rw-r--r--drivers/media/platform/allegro-dvt/allegro-mail.h (renamed from drivers/staging/media/allegro-dvt/allegro-mail.h)5
-rw-r--r--drivers/media/platform/allegro-dvt/nal-h264.c (renamed from drivers/staging/media/allegro-dvt/nal-h264.c)336
-rw-r--r--drivers/media/platform/allegro-dvt/nal-h264.h (renamed from drivers/staging/media/allegro-dvt/nal-h264.h)0
-rw-r--r--drivers/media/platform/allegro-dvt/nal-hevc.c824
-rw-r--r--drivers/media/platform/allegro-dvt/nal-hevc.h350
-rw-r--r--drivers/media/platform/allegro-dvt/nal-rbsp.c310
-rw-r--r--drivers/media/platform/allegro-dvt/nal-rbsp.h61
-rw-r--r--drivers/media/platform/am437x/am437x-vpfe.c2
-rw-r--r--drivers/media/platform/aspeed-video.c6
-rw-r--r--drivers/media/platform/atmel/atmel-isc.h1
-rw-r--r--drivers/media/platform/atmel/atmel-isi.c46
-rw-r--r--drivers/media/platform/atmel/atmel-sama5d2-isc.c44
-rw-r--r--drivers/media/platform/cadence/cdns-csi2rx.c17
-rw-r--r--drivers/media/platform/davinci/vpbe.c2
-rw-r--r--drivers/media/platform/davinci/vpif.c3
-rw-r--r--drivers/media/platform/davinci/vpif_capture.c2
-rw-r--r--drivers/media/platform/davinci/vpif_display.c86
-rw-r--r--drivers/media/platform/davinci/vpif_display.h1
-rw-r--r--drivers/media/platform/exynos4-is/media-dev.c25
-rw-r--r--drivers/media/platform/exynos4-is/media-dev.h2
-rw-r--r--drivers/media/platform/marvell-ccic/cafe-driver.c14
-rw-r--r--drivers/media/platform/marvell-ccic/mcam-core.c12
-rw-r--r--drivers/media/platform/marvell-ccic/mcam-core.h1
-rw-r--r--drivers/media/platform/marvell-ccic/mmp-driver.c11
-rw-r--r--drivers/media/platform/meson/ge2d/ge2d.c1
-rw-r--r--drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c3
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c6
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c10
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c4
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.c12
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c3
-rw-r--r--drivers/media/platform/omap/Kconfig1
-rw-r--r--drivers/media/platform/omap3isp/isp.c74
-rw-r--r--drivers/media/platform/pxa_camera.c86
-rw-r--r--drivers/media/platform/qcom/camss/camss-video.c3
-rw-r--r--drivers/media/platform/qcom/camss/camss.c11
-rw-r--r--drivers/media/platform/qcom/venus/Makefile4
-rw-r--r--drivers/media/platform/qcom/venus/core.c49
-rw-r--r--drivers/media/platform/qcom/venus/core.h78
-rw-r--r--drivers/media/platform/qcom/venus/firmware.c3
-rw-r--r--drivers/media/platform/qcom/venus/helpers.c154
-rw-r--r--drivers/media/platform/qcom/venus/helpers.h4
-rw-r--r--drivers/media/platform/qcom/venus/hfi.c18
-rw-r--r--drivers/media/platform/qcom/venus/hfi_cmds.c12
-rw-r--r--drivers/media/platform/qcom/venus/hfi_helper.h22
-rw-r--r--drivers/media/platform/qcom/venus/hfi_parser.c59
-rw-r--r--drivers/media/platform/qcom/venus/hfi_parser.h7
-rw-r--r--drivers/media/platform/qcom/venus/hfi_plat_bufs.h38
-rw-r--r--drivers/media/platform/qcom/venus/hfi_plat_bufs_v6.c1317
-rw-r--r--drivers/media/platform/qcom/venus/hfi_platform.c65
-rw-r--r--drivers/media/platform/qcom/venus/hfi_platform.h67
-rw-r--r--drivers/media/platform/qcom/venus/hfi_platform_v4.c319
-rw-r--r--drivers/media/platform/qcom/venus/hfi_platform_v6.c326
-rw-r--r--drivers/media/platform/qcom/venus/hfi_venus.c80
-rw-r--r--drivers/media/platform/qcom/venus/pm_helpers.c48
-rw-r--r--drivers/media/platform/qcom/venus/vdec.c129
-rw-r--r--drivers/media/platform/qcom/venus/venc.c202
-rw-r--r--drivers/media/platform/qcom/venus/venc_ctrls.c138
-rw-r--r--drivers/media/platform/rcar-vin/rcar-core.c7
-rw-r--r--drivers/media/platform/rcar-vin/rcar-csi2.c2
-rw-r--r--drivers/media/platform/rcar-vin/rcar-dma.c5
-rw-r--r--drivers/media/platform/rcar-vin/rcar-v4l2.c42
-rw-r--r--drivers/media/platform/rcar_drif.c2
-rw-r--r--drivers/media/platform/rcar_fdp1.c4
-rw-r--r--drivers/media/platform/rcar_jpu.c6
-rw-r--r--drivers/media/platform/renesas-ceu.c58
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c36
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c34
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-params.c5
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h1
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c11
-rw-r--r--drivers/media/platform/sh_vou.c2
-rw-r--r--drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c1
-rw-r--r--drivers/media/platform/sti/hva/hva-hw.c1
-rw-r--r--drivers/media/platform/stm32/stm32-dcmi.c87
-rw-r--r--drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c9
-rw-r--r--drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.h1
-rw-r--r--drivers/media/platform/sunxi/sun4i-csi/sun4i_v4l2.c4
-rw-r--r--drivers/media/platform/ti-vpe/cal-camerarx.c373
-rw-r--r--drivers/media/platform/ti-vpe/cal-video.c394
-rw-r--r--drivers/media/platform/ti-vpe/cal.c399
-rw-r--r--drivers/media/platform/ti-vpe/cal.h105
-rw-r--r--drivers/media/platform/ti-vpe/vpe.c2
-rw-r--r--drivers/media/platform/video-mux.c14
-rw-r--r--drivers/media/platform/vsp1/vsp1.h20
-rw-r--r--drivers/media/platform/vsp1/vsp1_drv.c4
-rw-r--r--drivers/media/platform/xilinx/xilinx-vipp.c10
-rw-r--r--drivers/media/radio/radio-isa.c9
-rw-r--r--drivers/media/radio/radio-isa.h2
-rw-r--r--drivers/media/radio/radio-sf16fmr2.c4
-rw-r--r--drivers/media/rc/Kconfig2
-rw-r--r--drivers/media/rc/img-ir/Kconfig1
-rw-r--r--drivers/media/rc/ir-mce_kbd-decoder.c2
-rw-r--r--drivers/media/rc/ir_toy.c1
-rw-r--r--drivers/media/rc/ite-cir.c2
-rw-r--r--drivers/media/rc/mceusb.c11
-rw-r--r--drivers/media/rc/rc-main.c8
-rw-r--r--drivers/media/rc/serial_ir.c2
-rw-r--r--drivers/media/rc/sunxi-cir.c169
-rw-r--r--drivers/media/test-drivers/vicodec/vicodec-core.c5
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_bridge.c36
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_bridge.h7
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_psi.c8
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_ts.h1
-rw-r--r--drivers/media/test-drivers/vivid/vivid-ctrls.c38
-rw-r--r--drivers/media/tuners/it913x.c1
-rw-r--r--drivers/media/tuners/qm1d1c0042.c4
-rw-r--r--drivers/media/usb/cx231xx/Kconfig1
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9015.c1
-rw-r--r--drivers/media/usb/dvb-usb-v2/lmedm04.c26
-rw-r--r--drivers/media/usb/dvb-usb-v2/rtl28xxu.c35
-rw-r--r--drivers/media/usb/em28xx/Kconfig2
-rw-r--r--drivers/media/usb/em28xx/em28xx-cards.c46
-rw-r--r--drivers/media/usb/em28xx/em28xx-core.c10
-rw-r--r--drivers/media/usb/em28xx/em28xx-dvb.c26
-rw-r--r--drivers/media/usb/em28xx/em28xx-i2c.c6
-rw-r--r--drivers/media/usb/em28xx/em28xx.h1
-rw-r--r--drivers/media/usb/pwc/pwc-if.c22
-rw-r--r--drivers/media/usb/tm6000/tm6000-dvb.c4
-rw-r--r--drivers/media/usb/uvc/uvc_ctrl.c179
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c552
-rw-r--r--drivers/media/usb/uvc/uvc_entity.c11
-rw-r--r--drivers/media/usb/uvc/uvc_isight.c17
-rw-r--r--drivers/media/usb/uvc/uvc_queue.c9
-rw-r--r--drivers/media/usb/uvc/uvc_status.c44
-rw-r--r--drivers/media/usb/uvc/uvc_v4l2.c62
-rw-r--r--drivers/media/usb/uvc/uvc_video.c162
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h99
-rw-r--r--drivers/media/usb/zr364xx/zr364xx.c49
-rw-r--r--drivers/media/v4l2-core/Makefile2
-rw-r--r--drivers/media/v4l2-core/v4l2-async.c180
-rw-r--r--drivers/media/v4l2-core/v4l2-clk.c321
-rw-r--r--drivers/media/v4l2-core/v4l2-common.c4
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c19
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c20
-rw-r--r--drivers/media/v4l2-core/v4l2-event.c17
-rw-r--r--drivers/media/v4l2-core/v4l2-fwnode.c17
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c42
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c15
-rw-r--r--drivers/memory/Kconfig17
-rw-r--r--drivers/memory/Makefile2
-rw-r--r--drivers/memory/dfl-emif.c207
-rw-r--r--drivers/memory/emif.c3
-rw-r--r--drivers/memory/mtk-smi.c44
-rw-r--r--drivers/memory/pl172.c4
-rw-r--r--drivers/memory/pl353-smc.c4
-rw-r--r--drivers/memory/samsung/exynos5422-dmc.c4
-rw-r--r--drivers/memory/tegra/Kconfig4
-rw-r--r--drivers/memory/tegra/mc.c7
-rw-r--r--drivers/memory/tegra/tegra124-emc.c368
-rw-r--r--drivers/memory/tegra/tegra124.c82
-rw-r--r--drivers/memory/tegra/tegra186-emc.c12
-rw-r--r--drivers/memory/tegra/tegra20-emc.c13
-rw-r--r--drivers/memory/tegra/tegra30-emc.c13
-rw-r--r--drivers/memory/ti-aemif.c8
-rw-r--r--drivers/memory/ti-emif-pm.c2
-rw-r--r--drivers/message/fusion/lsi/mpi_cnfg.h2
-rw-r--r--drivers/message/fusion/lsi/mpi_history.txt2
-rw-r--r--drivers/mfd/Kconfig22
-rw-r--r--drivers/mfd/Makefile2
-rw-r--r--drivers/mfd/ab8500-core.c43
-rw-r--r--drivers/mfd/acer-ec-a500.c202
-rw-r--r--drivers/mfd/altera-sysmgr.c3
-rw-r--r--drivers/mfd/arizona-core.c11
-rw-r--r--drivers/mfd/arizona-i2c.c11
-rw-r--r--drivers/mfd/arizona-spi.c138
-rw-r--r--drivers/mfd/arizona.h9
-rw-r--r--drivers/mfd/axp20x-i2c.c4
-rw-r--r--drivers/mfd/axp20x-rsb.c4
-rw-r--r--drivers/mfd/axp20x.c4
-rw-r--r--drivers/mfd/bd9571mwv.c178
-rw-r--r--drivers/mfd/db8500-prcmu.c6
-rw-r--r--drivers/mfd/gateworks-gsc.c2
-rw-r--r--drivers/mfd/intel-lpss-pci.c28
-rw-r--r--drivers/mfd/intel-m10-bmc.c43
-rw-r--r--drivers/mfd/intel_msic.c425
-rw-r--r--drivers/mfd/iqs62x.c144
-rw-r--r--drivers/mfd/max8997.c4
-rw-r--r--drivers/mfd/mcp-sa11x0.c3
-rw-r--r--drivers/mfd/mt6360-core.c12
-rw-r--r--drivers/mfd/wm831x-auxadc.c3
-rw-r--r--drivers/misc/Kconfig22
-rw-r--r--drivers/misc/Makefile3
-rw-r--r--drivers/misc/atmel_tclib.c200
-rw-r--r--drivers/misc/bcm-vk/Kconfig29
-rw-r--r--drivers/misc/bcm-vk/Makefile12
-rw-r--r--drivers/misc/bcm-vk/bcm_vk.h549
-rw-r--r--drivers/misc/bcm-vk/bcm_vk_dev.c1652
-rw-r--r--drivers/misc/bcm-vk/bcm_vk_msg.c1357
-rw-r--r--drivers/misc/bcm-vk/bcm_vk_msg.h163
-rw-r--r--drivers/misc/bcm-vk/bcm_vk_sg.c275
-rw-r--r--drivers/misc/bcm-vk/bcm_vk_sg.h61
-rw-r--r--drivers/misc/bcm-vk/bcm_vk_tty.c339
-rw-r--r--drivers/misc/cardreader/rts5227.c5
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.c9
-rw-r--r--drivers/misc/cxl/cxllib.c4
-rw-r--r--drivers/misc/cxl/sysfs.c2
-rw-r--r--drivers/misc/eeprom/eeprom_93xx46.c17
-rw-r--r--drivers/misc/fastrpc.c7
-rw-r--r--drivers/misc/habanalabs/Kconfig1
-rw-r--r--drivers/misc/habanalabs/common/Makefile10
-rw-r--r--drivers/misc/habanalabs/common/asid.c6
-rw-r--r--drivers/misc/habanalabs/common/command_buffer.c8
-rw-r--r--drivers/misc/habanalabs/common/command_submission.c473
-rw-r--r--drivers/misc/habanalabs/common/context.c33
-rw-r--r--drivers/misc/habanalabs/common/debugfs.c43
-rw-r--r--drivers/misc/habanalabs/common/device.c46
-rw-r--r--drivers/misc/habanalabs/common/firmware_if.c157
-rw-r--r--drivers/misc/habanalabs/common/habanalabs.h115
-rw-r--r--drivers/misc/habanalabs/common/habanalabs_ioctl.c25
-rw-r--r--drivers/misc/habanalabs/common/hw_queue.c51
-rw-r--r--drivers/misc/habanalabs/common/memory.c673
-rw-r--r--drivers/misc/habanalabs/common/mmu/Makefile2
-rw-r--r--drivers/misc/habanalabs/common/mmu/mmu.c (renamed from drivers/misc/habanalabs/common/mmu.c)124
-rw-r--r--drivers/misc/habanalabs/common/mmu/mmu_v1.c (renamed from drivers/misc/habanalabs/common/mmu_v1.c)4
-rw-r--r--drivers/misc/habanalabs/common/pci/Makefile (renamed from arch/x86/platform/sfi/Makefile)2
-rw-r--r--drivers/misc/habanalabs/common/pci/pci.c (renamed from drivers/misc/habanalabs/common/pci.c)47
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi.c481
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudiP.h3
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi_coresight.c18
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi_security.c5
-rw-r--r--drivers/misc/habanalabs/goya/goya.c106
-rw-r--r--drivers/misc/habanalabs/goya/goyaP.h1
-rw-r--r--drivers/misc/habanalabs/goya/goya_coresight.c11
-rw-r--r--drivers/misc/habanalabs/goya/goya_security.c5
-rw-r--r--drivers/misc/habanalabs/include/common/cpucp_if.h14
-rw-r--r--drivers/misc/habanalabs/include/common/hl_boot_if.h19
-rw-r--r--drivers/misc/habanalabs/include/gaudi/gaudi_async_events.h4
-rw-r--r--drivers/misc/habanalabs/include/gaudi/gaudi_masks.h5
-rw-r--r--drivers/misc/habanalabs/include/gaudi/gaudi_packets.h27
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h5
-rw-r--r--drivers/misc/lkdtm/Makefile1
-rw-r--r--drivers/misc/mei/bus.c24
-rw-r--r--drivers/misc/mei/client.c291
-rw-r--r--drivers/misc/mei/client.h8
-rw-r--r--drivers/misc/mei/debugfs.c1
-rw-r--r--drivers/misc/mei/hbm.c165
-rw-r--r--drivers/misc/mei/hbm.h4
-rw-r--r--drivers/misc/mei/hdcp/mei_hdcp.c10
-rw-r--r--drivers/misc/mei/hw-me-regs.h5
-rw-r--r--drivers/misc/mei/hw.h61
-rw-r--r--drivers/misc/mei/init.c5
-rw-r--r--drivers/misc/mei/interrupt.c43
-rw-r--r--drivers/misc/mei/main.c2
-rw-r--r--drivers/misc/mei/mei_dev.h18
-rw-r--r--drivers/misc/mei/pci-me.c5
-rw-r--r--drivers/misc/ocxl/file.c3
-rw-r--r--drivers/misc/pci_endpoint_test.c1
-rw-r--r--drivers/misc/pti.c978
-rw-r--r--drivers/misc/pvpanic.c59
-rw-r--r--drivers/misc/sgi-xp/xpnet.c4
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.c19
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.h2
-rw-r--r--drivers/mmc/core/Kconfig8
-rw-r--r--drivers/mmc/core/Makefile1
-rw-r--r--drivers/mmc/core/block.c13
-rw-r--r--drivers/mmc/core/core.c11
-rw-r--r--drivers/mmc/core/crypto.c48
-rw-r--r--drivers/mmc/core/crypto.h40
-rw-r--r--drivers/mmc/core/host.c45
-rw-r--r--drivers/mmc/core/mmc.c6
-rw-r--r--drivers/mmc/core/mmc_ops.c6
-rw-r--r--drivers/mmc/core/mmc_test.c31
-rw-r--r--drivers/mmc/core/queue.c6
-rw-r--r--drivers/mmc/core/queue.h1
-rw-r--r--drivers/mmc/core/sd.c4
-rw-r--r--drivers/mmc/core/sd.h2
-rw-r--r--drivers/mmc/core/sdio.c2
-rw-r--r--drivers/mmc/core/sdio_cis.c6
-rw-r--r--drivers/mmc/host/Kconfig43
-rw-r--r--drivers/mmc/host/Makefile5
-rw-r--r--drivers/mmc/host/android-goldfish.c545
-rw-r--r--drivers/mmc/host/atmel-mci.c52
-rw-r--r--drivers/mmc/host/au1xmmc.c14
-rw-r--r--drivers/mmc/host/cavium.c5
-rw-r--r--drivers/mmc/host/cb710-mmc.c12
-rw-r--r--drivers/mmc/host/cqhci-core.c (renamed from drivers/mmc/host/cqhci.c)69
-rw-r--r--drivers/mmc/host/cqhci-crypto.c242
-rw-r--r--drivers/mmc/host/cqhci-crypto.h47
-rw-r--r--drivers/mmc/host/cqhci.h84
-rw-r--r--drivers/mmc/host/dw_mmc-zx.c234
-rw-r--r--drivers/mmc/host/dw_mmc-zx.h32
-rw-r--r--drivers/mmc/host/dw_mmc.c6
-rw-r--r--drivers/mmc/host/jz4740_mmc.c1
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c37
-rw-r--r--drivers/mmc/host/mmci.c74
-rw-r--r--drivers/mmc/host/mtk-sd.c18
-rw-r--r--drivers/mmc/host/mxs-mmc.c2
-rw-r--r--drivers/mmc/host/omap.c7
-rw-r--r--drivers/mmc/host/omap_hsmmc.c18
-rw-r--r--drivers/mmc/host/owl-mmc.c9
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c16
-rw-r--r--drivers/mmc/host/renesas_sdhi_internal_dmac.c91
-rw-r--r--drivers/mmc/host/rtsx_pci_sdmmc.c5
-rw-r--r--drivers/mmc/host/s3cmci.c6
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c3
-rw-r--r--drivers/mmc/host/sdhci-iproc.c18
-rw-r--r--drivers/mmc/host/sdhci-msm.c322
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c65
-rw-r--r--drivers/mmc/host/sdhci-of-aspeed-test.c105
-rw-r--r--drivers/mmc/host/sdhci-of-aspeed.c289
-rw-r--r--drivers/mmc/host/sdhci-of-dwcmshc.c1
-rw-r--r--drivers/mmc/host/sdhci-pci-gli.c23
-rw-r--r--drivers/mmc/host/sdhci-pci-o2micro.c20
-rw-r--r--drivers/mmc/host/sdhci-pltfm.h7
-rw-r--r--drivers/mmc/host/sdhci-sirf.c235
-rw-r--r--drivers/mmc/host/sdhci-sprd.c6
-rw-r--r--drivers/mmc/host/sdhci-xenon.c1
-rw-r--r--drivers/mmc/host/sdhci.c9
-rw-r--r--drivers/mmc/host/sdhci_am654.c28
-rw-r--r--drivers/mmc/host/sunxi-mmc.c30
-rw-r--r--drivers/mmc/host/tifm_sd.c7
-rw-r--r--drivers/mmc/host/tmio_mmc.h2
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c8
-rw-r--r--drivers/mmc/host/uniphier-sd.c14
-rw-r--r--drivers/mmc/host/usdhi6rol0.c4
-rw-r--r--drivers/mmc/host/via-sdmmc.c9
-rw-r--r--drivers/mmc/host/wbsd.c35
-rw-r--r--drivers/most/core.c6
-rw-r--r--drivers/mtd/devices/phram.c6
-rw-r--r--drivers/mtd/devices/st_spi_fsm.c2
-rw-r--r--drivers/mtd/maps/pci.c8
-rw-r--r--drivers/mtd/mtdswap.c1
-rw-r--r--drivers/mtd/nand/raw/Kconfig10
-rw-r--r--drivers/mtd/nand/raw/Makefile1
-rw-r--r--drivers/mtd/nand/raw/intel-nand-controller.c6
-rw-r--r--drivers/mtd/nand/raw/marvell_nand.c2
-rw-r--r--drivers/mtd/nand/raw/mxc_nand.c2
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c4
-rw-r--r--drivers/mtd/nand/raw/tango_nand.c727
-rw-r--r--drivers/mtd/parsers/Kconfig8
-rw-r--r--drivers/mtd/parsers/Makefile1
-rw-r--r--drivers/mtd/parsers/afs.c4
-rw-r--r--drivers/mtd/parsers/parser_imagetag.c4
-rw-r--r--drivers/mtd/parsers/qcomsmempart.c170
-rw-r--r--drivers/mtd/spi-nor/controllers/hisi-sfc.c4
-rw-r--r--drivers/mtd/spi-nor/controllers/intel-spi-pci.c1
-rw-r--r--drivers/mtd/spi-nor/core.c49
-rw-r--r--drivers/mtd/spi-nor/core.h2
-rw-r--r--drivers/mtd/spi-nor/sfdp.c5
-rw-r--r--drivers/mtd/spi-nor/sst.c52
-rw-r--r--drivers/mtd/ubi/eba.c1
-rw-r--r--drivers/mtd/ubi/io.c7
-rw-r--r--drivers/net/Kconfig4
-rw-r--r--drivers/net/Makefile2
-rw-r--r--drivers/net/arcnet/arc-rimi.c4
-rw-r--r--drivers/net/arcnet/arcdevice.h6
-rw-r--r--drivers/net/arcnet/arcnet.c73
-rw-r--r--drivers/net/arcnet/com20020-isa.c4
-rw-r--r--drivers/net/arcnet/com20020-pci.c2
-rw-r--r--drivers/net/arcnet/com20020_cs.c4
-rw-r--r--drivers/net/arcnet/com90io.c4
-rw-r--r--drivers/net/arcnet/com90xx.c4
-rw-r--r--drivers/net/bareudp.c19
-rw-r--r--drivers/net/bonding/bond_3ad.c26
-rw-r--r--drivers/net/bonding/bond_main.c174
-rw-r--r--drivers/net/bonding/bond_options.c55
-rw-r--r--drivers/net/caif/caif_serial.c3
-rw-r--r--drivers/net/caif/caif_virtio.c8
-rw-r--r--drivers/net/can/Makefile7
-rw-r--r--drivers/net/can/at91_can.c4
-rw-r--r--drivers/net/can/c_can/c_can.c4
-rw-r--r--drivers/net/can/cc770/cc770.c4
-rw-r--r--drivers/net/can/dev.c1338
-rw-r--r--drivers/net/can/dev/Makefile11
-rw-r--r--drivers/net/can/dev/bittiming.c261
-rw-r--r--drivers/net/can/dev/dev.c470
-rw-r--r--drivers/net/can/dev/length.c95
-rw-r--r--drivers/net/can/dev/netlink.c379
-rw-r--r--drivers/net/can/dev/rx-offload.c (renamed from drivers/net/can/rx-offload.c)5
-rw-r--r--drivers/net/can/dev/skb.c231
-rw-r--r--drivers/net/can/flexcan.c130
-rw-r--r--drivers/net/can/grcan.c4
-rw-r--r--drivers/net/can/ifi_canfd/ifi_canfd.c4
-rw-r--r--drivers/net/can/kvaser_pciefd.c6
-rw-r--r--drivers/net/can/m_can/Makefile4
-rw-r--r--drivers/net/can/m_can/m_can.c8
-rw-r--r--drivers/net/can/m_can/tcan4x5x-core.c (renamed from drivers/net/can/m_can/tcan4x5x.c)122
-rw-r--r--drivers/net/can/m_can/tcan4x5x-regmap.c135
-rw-r--r--drivers/net/can/m_can/tcan4x5x.h57
-rw-r--r--drivers/net/can/mscan/mscan.c4
-rw-r--r--drivers/net/can/pch_can.c4
-rw-r--r--drivers/net/can/peak_canfd/peak_canfd.c4
-rw-r--r--drivers/net/can/rcar/rcar_can.c4
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c4
-rw-r--r--drivers/net/can/sja1000/sja1000.c4
-rw-r--r--drivers/net/can/sja1000/tscan1.c4
-rw-r--r--drivers/net/can/slcan.c4
-rw-r--r--drivers/net/can/softing/softing_main.c4
-rw-r--r--drivers/net/can/spi/hi311x.c4
-rw-r--r--drivers/net/can/spi/mcp251x.c4
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c139
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd.h2
-rw-r--r--drivers/net/can/sun4i_can.c4
-rw-r--r--drivers/net/can/ti_hecc.c4
-rw-r--r--drivers/net/can/usb/ems_usb.c4
-rw-r--r--drivers/net/can/usb/esd_usb2.c4
-rw-r--r--drivers/net/can/usb/gs_usb.c4
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c2
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c2
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c2
-rw-r--r--drivers/net/can/usb/mcba_usb.c10
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c4
-rw-r--r--drivers/net/can/usb/ucan.c4
-rw-r--r--drivers/net/can/usb/usb_8dev.c4
-rw-r--r--drivers/net/can/vcan.c2
-rw-r--r--drivers/net/can/vxcan.c6
-rw-r--r--drivers/net/can/xilinx_can.c6
-rw-r--r--drivers/net/dsa/Kconfig2
-rw-r--r--drivers/net/dsa/Makefile1
-rw-r--r--drivers/net/dsa/b53/b53_common.c225
-rw-r--r--drivers/net/dsa/b53/b53_priv.h26
-rw-r--r--drivers/net/dsa/b53/b53_regs.h1
-rw-r--r--drivers/net/dsa/bcm_sf2.c64
-rw-r--r--drivers/net/dsa/bcm_sf2_cfp.c12
-rw-r--r--drivers/net/dsa/bcm_sf2_regs.h1
-rw-r--r--drivers/net/dsa/dsa_loop.c74
-rw-r--r--drivers/net/dsa/hirschmann/Kconfig1
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek.c452
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek.h23
-rw-r--r--drivers/net/dsa/lan9303-core.c12
-rw-r--r--drivers/net/dsa/lantiq_gswip.c105
-rw-r--r--drivers/net/dsa/microchip/ksz8795.c108
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c98
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c29
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h8
-rw-r--r--drivers/net/dsa/mt7530.c169
-rw-r--r--drivers/net/dsa/mt7530.h20
-rw-r--r--drivers/net/dsa/mv88e6xxx/Kconfig13
-rw-r--r--drivers/net/dsa/mv88e6xxx/Makefile6
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c633
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h11
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.h4
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_vtu.c69
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.c8
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.h187
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c73
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.h24
-rw-r--r--drivers/net/dsa/ocelot/Kconfig2
-rw-r--r--drivers/net/dsa/ocelot/felix.c1152
-rw-r--r--drivers/net/dsa/ocelot/felix.h18
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c67
-rw-r--r--drivers/net/dsa/ocelot/seville_vsc9953.c49
-rw-r--r--drivers/net/dsa/qca/ar9331.c165
-rw-r--r--drivers/net/dsa/qca8k.c40
-rw-r--r--drivers/net/dsa/realtek-smi-core.h12
-rw-r--r--drivers/net/dsa/rtl8366.c156
-rw-r--r--drivers/net/dsa/rtl8366rb.c276
-rw-r--r--drivers/net/dsa/sja1105/sja1105.h6
-rw-r--r--drivers/net/dsa/sja1105/sja1105_devlink.c9
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c363
-rw-r--r--drivers/net/dsa/sja1105/sja1105_spi.c6
-rw-r--r--drivers/net/dsa/sja1105/sja1105_static_config.c2
-rw-r--r--drivers/net/dsa/xrs700x/Kconfig26
-rw-r--r--drivers/net/dsa/xrs700x/Makefile4
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x.c743
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x.h42
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x_i2c.c147
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x_mdio.c164
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x_reg.h208
-rw-r--r--drivers/net/ethernet/3com/3c509.c3
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c16
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h14
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c3
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c3
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c39
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c4
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c4
-rw-r--r--drivers/net/ethernet/aurora/Kconfig23
-rw-r--r--drivers/net/ethernet/aurora/Makefile2
-rw-r--r--drivers/net/ethernet/aurora/nb8800.c1520
-rw-r--r--drivers/net/ethernet/aurora/nb8800.h316
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig9
-rw-r--r--drivers/net/ethernet/broadcom/Makefile1
-rw-r--r--drivers/net/ethernet/broadcom/bcm4908_enet.c692
-rw-r--r--drivers/net/ethernet/broadcom/bcm4908_enet.h96
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c190
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.h14
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c82
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h37
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c114
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h59
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c461
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h46
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c11
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h344
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c9
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h59
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c8
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c36
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h1
-rw-r--r--drivers/net/ethernet/broadcom/unimac.h68
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c4
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_core.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.c3
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c13
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/common.h6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c54
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.c53
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/subr.c64
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c24
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c49
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c13
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h6
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/Kconfig1
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c3
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c19
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c7
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h3
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c4
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c54
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c152
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h15
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c16
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c156
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h5
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h17
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni.c93
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni.h9
-rw-r--r--drivers/net/ethernet/freescale/enetc/Kconfig2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_hw.h2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_mdio.c61
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c64
-rw-r--r--drivers/net/ethernet/freescale/fec.h5
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c16
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c4
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c1
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c549
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.h6
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h20
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c101
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c56
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h16
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c8
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c165
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h14
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c27
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c330
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c333
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h15
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c31
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c215
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h54
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c204
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h20
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c180
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h8
-rw-r--r--drivers/net/ethernet/i825xx/ether1.c4
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c15
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c461
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h17
-rw-r--r--drivers/net/ethernet/intel/e100.c92
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c1
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c7
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c13
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h11
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c65
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.c949
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.h169
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c752
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c398
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c681
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h9
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_register.h174
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c667
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c24
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c155
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c15
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c2
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile1
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h60
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h52
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c60
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.c40
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c47
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_nl.c56
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.c273
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c64
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.c12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fw_update.c10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.c445
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.h87
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c156
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c128
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c662
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.h14
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c1059
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.h24
-rw-r--r--drivers/net/ethernet/intel/ice/ice_status.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c184
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h158
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c107
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c78
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c48
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c14
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h4
-rw-r--r--drivers/net/ethernet/intel/igc/igc_dump.c2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c51
-rw-r--r--drivers/net/ethernet/intel/igc/igc_hw.h1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_i225.c3
-rw-r--r--drivers/net/ethernet/intel/igc/igc_mac.c2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c24
-rw-r--r--drivers/net/ethernet/intel/igc/igc_phy.c18
-rw-r--r--drivers/net/ethernet/intel/igc/igc_phy.h1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_regs.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c57
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c42
-rw-r--r--drivers/net/ethernet/marvell/Kconfig1
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c80
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2.h134
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h2
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c599
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c89
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/Makefile10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c573
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.h25
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h71
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/common.h5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h131
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.c59
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h163
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc.h5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/ptp.c12
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.c272
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.h57
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c166
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h84
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c214
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c261
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c130
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c432
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c652
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.h27
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c139
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c54
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h32
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h614
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/Makefile10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c181
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c236
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h135
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c551
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c155
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c78
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c76
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c52
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_switchdev.c93
-rw-r--r--drivers/net/ethernet/marvell/sky2.c5
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c43
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c216
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.h18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/en_rep_tracepoint.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h76
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/health.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/qos.c984
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/qos.h44
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c307
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h175
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c499
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c1653
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/trap.c457
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/trap.h37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c66
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c68
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c212
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c533
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c95
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c94
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c100
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c1127
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c53
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c517
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.h76
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c71
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h94
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c678
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/events.c54
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c431
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c69
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mr.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c58
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qos.c85
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qos.h30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/cmd.c49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c275
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h55
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c102
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c556
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c233
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/mlx5_ifc_vhca_event.h82
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/priv.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h100
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c189
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h57
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c588
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c118
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c66
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c1619
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h171
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c1640
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c1633
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h193
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h85
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr_ste_v1.h434
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c77
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c56
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c196
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c131
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c214
-rw-r--r--drivers/net/ethernet/micrel/Kconfig4
-rw-r--r--drivers/net/ethernet/micrel/ks8851.h2
-rw-r--r--drivers/net/ethernet/micrel/ks8851_common.c114
-rw-r--r--drivers/net/ethernet/micrel/ks8851_par.c2
-rw-r--r--drivers/net/ethernet/micrel/ks8851_spi.c2
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c385
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h25
-rw-r--r--drivers/net/ethernet/mscc/Kconfig1
-rw-r--r--drivers/net/ethernet/mscc/Makefile4
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c728
-rw-r--r--drivers/net/ethernet/mscc/ocelot.h24
-rw-r--r--drivers/net/ethernet/mscc/ocelot_devlink.c885
-rw-r--r--drivers/net/ethernet/mscc/ocelot_flower.c7
-rw-r--r--drivers/net/ethernet/mscc/ocelot_io.c8
-rw-r--r--drivers/net/ethernet/mscc/ocelot_mrp.c175
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c602
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vcap.c19
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vcap.h295
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vsc7514.c331
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c14
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.h4
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/verifier.c15
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c14
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c2
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c83
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c49
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c22
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h10
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c134
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c12
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c35
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c196
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c2
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c5
-rw-r--r--drivers/net/ethernet/realtek/r8169.h1
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c535
-rw-r--r--drivers/net/ethernet/realtek/r8169_phy_config.c1
-rw-r--r--drivers/net/ethernet/renesas/ravb.h37
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c6
-rw-r--r--drivers/net/ethernet/rocker/rocker.h6
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c63
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c45
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c4
-rw-r--r--drivers/net/ethernet/sfc/efx.c2
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.c6
-rw-r--r--drivers/net/ethernet/sfc/rx.c10
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c2
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c10
-rw-r--r--drivers/net/ethernet/socionext/netsec.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c91
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c33
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c286
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c37
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c2
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-net.c2
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac.h2
-rw-r--r--drivers/net/ethernet/ti/Kconfig10
-rw-r--r--drivers/net/ethernet/ti/Makefile1
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c607
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.h28
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-qos.c2
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-switchdev.c538
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-switchdev.h34
-rw-r--r--drivers/net/ethernet/ti/am65-cpts.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c22
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c7
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c22
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.c12
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.h2
-rw-r--r--drivers/net/ethernet/ti/cpsw_switchdev.c91
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c12
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c8
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c18
-rw-r--r--drivers/net/ethernet/xilinx/Kconfig1
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h29
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c94
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c4
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c3
-rw-r--r--drivers/net/geneve.c19
-rw-r--r--drivers/net/gtp.c38
-rw-r--r--drivers/net/hyperv/hyperv_net.h93
-rw-r--r--drivers/net/hyperv/netvsc.c89
-rw-r--r--drivers/net/hyperv/netvsc_bpf.c14
-rw-r--r--drivers/net/hyperv/netvsc_drv.c56
-rw-r--r--drivers/net/hyperv/rndis_filter.c248
-rw-r--r--drivers/net/ifb.c7
-rw-r--r--drivers/net/ipa/Kconfig10
-rw-r--r--drivers/net/ipa/gsi.c405
-rw-r--r--drivers/net/ipa/gsi.h6
-rw-r--r--drivers/net/ipa/gsi_reg.h31
-rw-r--r--drivers/net/ipa/gsi_trans.h1
-rw-r--r--drivers/net/ipa/ipa.h4
-rw-r--r--drivers/net/ipa/ipa_clock.c199
-rw-r--r--drivers/net/ipa/ipa_cmd.c77
-rw-r--r--drivers/net/ipa/ipa_cmd.h24
-rw-r--r--drivers/net/ipa/ipa_data-sc7180.c38
-rw-r--r--drivers/net/ipa/ipa_data-sdm845.c38
-rw-r--r--drivers/net/ipa/ipa_data.h26
-rw-r--r--drivers/net/ipa/ipa_endpoint.c127
-rw-r--r--drivers/net/ipa/ipa_main.c43
-rw-r--r--drivers/net/ipa/ipa_mem.c4
-rw-r--r--drivers/net/ipa/ipa_reg.h22
-rw-r--r--drivers/net/ipa/ipa_table.c16
-rw-r--r--drivers/net/ipa/ipa_table.h8
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c6
-rw-r--r--drivers/net/macvlan.c2
-rw-r--r--drivers/net/mdio/mdio-moxart.c4
-rw-r--r--drivers/net/mdio/of_mdio.c30
-rw-r--r--drivers/net/mhi/Makefile3
-rw-r--r--drivers/net/mhi/mhi.h40
-rw-r--r--drivers/net/mhi/net.c (renamed from drivers/net/mhi_net.c)199
-rw-r--r--drivers/net/mhi/proto_mbim.c293
-rw-r--r--drivers/net/netdevsim/dev.c40
-rw-r--r--drivers/net/netdevsim/fib.c678
-rw-r--r--drivers/net/netdevsim/netdev.c2
-rw-r--r--drivers/net/pcs/pcs-lynx.c36
-rw-r--r--drivers/net/phy/at803x.c85
-rw-r--r--drivers/net/phy/bcm7xxx.c2
-rw-r--r--drivers/net/phy/broadcom.c282
-rw-r--r--drivers/net/phy/dp83822.c3
-rw-r--r--drivers/net/phy/dp83869.c4
-rw-r--r--drivers/net/phy/icplus.c387
-rw-r--r--drivers/net/phy/lxt.c1
-rw-r--r--drivers/net/phy/marvell.c15
-rw-r--r--drivers/net/phy/marvell10g.c2
-rw-r--r--drivers/net/phy/mdio_bus.c10
-rw-r--r--drivers/net/phy/micrel.c18
-rw-r--r--drivers/net/phy/mscc/Makefile1
-rw-r--r--drivers/net/phy/mscc/mscc.h28
-rw-r--r--drivers/net/phy/mscc/mscc_main.c608
-rw-r--r--drivers/net/phy/mscc/mscc_serdes.c650
-rw-r--r--drivers/net/phy/mscc/mscc_serdes.h31
-rw-r--r--drivers/net/phy/national.c2
-rw-r--r--drivers/net/phy/phy.c6
-rw-r--r--drivers/net/phy/phy_device.c70
-rw-r--r--drivers/net/phy/phylink.c4
-rw-r--r--drivers/net/phy/realtek.c132
-rw-r--r--drivers/net/phy/sfp-bus.c38
-rw-r--r--drivers/net/phy/sfp.c208
-rw-r--r--drivers/net/ppp/ppp_async.c11
-rw-r--r--drivers/net/ppp/ppp_synctty.c11
-rw-r--r--drivers/net/ppp/pptp.c8
-rw-r--r--drivers/net/tap.c19
-rw-r--r--drivers/net/team/team.c6
-rw-r--r--drivers/net/tun.c25
-rw-r--r--drivers/net/usb/cdc_ether.c6
-rw-r--r--drivers/net/usb/cdc_ncm.c12
-rw-r--r--drivers/net/usb/hso.c13
-rw-r--r--drivers/net/usb/lan78xx.c6
-rw-r--r--drivers/net/usb/pegasus.c7
-rw-r--r--drivers/net/usb/qmi_wwan.c88
-rw-r--r--drivers/net/usb/r8152.c225
-rw-r--r--drivers/net/usb/rtl8150.c6
-rw-r--r--drivers/net/usb/usbnet.c21
-rw-r--r--drivers/net/veth.c108
-rw-r--r--drivers/net/virtio_net.c19
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c46
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h2
-rw-r--r--drivers/net/vxlan.c31
-rw-r--r--drivers/net/wan/farsync.c12
-rw-r--r--drivers/net/wan/hdlc_x25.c6
-rw-r--r--drivers/net/wan/ixp4xx_hss.c4
-rw-r--r--drivers/net/wan/lmc/lmc_main.c4
-rw-r--r--drivers/net/wan/sbni.c2
-rw-r--r--drivers/net/wireguard/device.c21
-rw-r--r--drivers/net/wireguard/device.h15
-rw-r--r--drivers/net/wireguard/peer.c28
-rw-r--r--drivers/net/wireguard/peer.h8
-rw-r--r--drivers/net/wireguard/queueing.c86
-rw-r--r--drivers/net/wireguard/queueing.h45
-rw-r--r--drivers/net/wireguard/receive.c16
-rw-r--r--drivers/net/wireguard/send.c31
-rw-r--r--drivers/net/wireguard/socket.c8
-rw-r--r--drivers/net/wireless/ath/ath.h3
-rw-r--r--drivers/net/wireless/ath/ath10k/ahb.c5
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c41
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h9
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c32
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c14
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c287
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c7
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.c5
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c103
-rw-r--r--drivers/net/wireless/ath/ath10k/trace.h4
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c16
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c6
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h9
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c12
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h15
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.c20
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_tx.c1
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_tx.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_tx.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c183
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.h6
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.c4
-rw-r--r--drivers/net/wireless/ath/ath11k/peer.c9
-rw-r--r--drivers/net/wireless/ath/ath11k/peer.h3
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.c5
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.c4
-rw-r--r--drivers/net/wireless/ath/ath11k/trace.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c231
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.h37
-rw-r--r--drivers/net/wireless/ath/ath5k/mac80211-ops.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c4
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig8
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c95
-rw-r--r--drivers/net/wireless/ath/carl9170/fwcmd.h2
-rw-r--r--drivers/net/wireless/ath/carl9170/wlan.h20
-rw-r--r--drivers/net/wireless/ath/key.c41
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c3
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c5
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.h2
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c40
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c17
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c11
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx_edma.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h3
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c11
-rw-r--r--drivers/net/wireless/atmel/at76c50x-usb.c4
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_n.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c94
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c24
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c32
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c10
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h28
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c12
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/d11.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h1
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/1000.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/2000.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c124
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/5000.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/6000.c20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/7000.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/8000.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/9000.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/main.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tt.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c109
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.h25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/commands.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/debug.h15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/location.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rfi.h60
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rx.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/scan.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tx.h32
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c88
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/init.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/pnvm.c167
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c93
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.c91
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.c25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h47
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/Makefile1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c47
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c169
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c172
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c140
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h31
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/nvm.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c281
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rfi.c118
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c125
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tt.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c190
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c61
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c68
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c317
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c283
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c160
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c130
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c182
-rw-r--r--drivers/net/wireless/intel/iwlwifi/queue/tx.c188
-rw-r--r--drivers/net/wireless/intel/iwlwifi/queue/tx.h1
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c74
-rw-r--r--drivers/net/wireless/marvell/libertas/if_sdio.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c45
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c7
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c2
-rw-r--r--drivers/net/wireless/marvell/mwl8k.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/Kconfig5
-rw-r--r--drivers/net/wireless/mediatek/mt76/Makefile4
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c16
-rw-r--r--drivers/net/wireless/mediatek/mt76/eeprom.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h75
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/init.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mac.c24
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/main.c16
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/Kconfig3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c17
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/init.c64
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.c210
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/main.c192
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.c1605
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.h683
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h132
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c23
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/sdio.c11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/testmode.c101
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb.c12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac.h105
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c119
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c1842
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h979
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.c10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_phy.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_util.c14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c28
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/dma.c102
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c42
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h25
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/init.c48
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.c129
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/main.c46
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.c552
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.h63
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h69
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/pci.c177
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/regs.h29
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/testmode.c528
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/testmode.h59
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/Kconfig11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/Makefile5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c250
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/dma.c356
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/eeprom.c100
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/eeprom.h27
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/init.c282
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mac.c1516
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mac.h333
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/main.c1161
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.c1308
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.h434
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h342
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci.c292
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/regs.h419
-rw-r--r--drivers/net/wireless/mediatek/mt76/testmode.c124
-rw-r--r--drivers/net/wireless/mediatek/mt76/testmode.h17
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c39
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c1
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/dma.c26
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/phy.c3
-rw-r--r--drivers/net/wireless/microchip/wilc1000/Kconfig2
-rw-r--r--drivers/net/wireless/microchip/wilc1000/cfg80211.c2
-rw-r--r--drivers/net/wireless/microchip/wilc1000/fw.h8
-rw-r--r--drivers/net/wireless/microchip/wilc1000/mon.c4
-rw-r--r--drivers/net/wireless/microchip/wilc1000/netdev.c4
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan.c15
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan.h3
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c4
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.c5
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c13
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800usb.c1
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00crypto.c2
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/ps.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.c3
-rw-r--r--drivers/net/wireless/realtek/rtw88/coex.c4
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac80211.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c11
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h17
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.c154
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.h14
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.c62
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.h3
-rw-r--r--drivers/net/wireless/realtek/rtw88/reg.h2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723d.c4
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c.c116
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c.h22
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c_table.c397
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c_table.h1
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.h2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c_table.c18623
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.c13
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.h6
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_core.c3
-rw-r--r--drivers/net/wireless/ti/wl1251/cmd.c36
-rw-r--r--drivers/net/wireless/ti/wl12xx/main.c3
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c17
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore.h3
-rw-r--r--drivers/net/wireless/wl3501.h2
-rw-r--r--drivers/net/xen-netback/common.h3
-rw-r--r--drivers/net/xen-netback/interface.c28
-rw-r--r--drivers/net/xen-netback/netback.c15
-rw-r--r--drivers/net/xen-netback/rx.c9
-rw-r--r--drivers/net/xen-netback/xenbus.c4
-rw-r--r--drivers/net/xen-netfront.c18
-rw-r--r--drivers/nfc/Kconfig11
-rw-r--r--drivers/nfc/Makefile1
-rw-r--r--drivers/nfc/fdp/i2c.c2
-rw-r--r--drivers/nfc/microread/mei.c4
-rw-r--r--drivers/nfc/pn533/pn533.c4
-rw-r--r--drivers/nfc/pn544/mei.c4
-rw-r--r--drivers/nfc/st-nci/se.c3
-rw-r--r--drivers/nfc/trf7970a.c2
-rw-r--r--drivers/nfc/virtual_ncidev.c215
-rw-r--r--drivers/ntb/hw/Kconfig1
-rw-r--r--drivers/ntb/hw/Makefile1
-rw-r--r--drivers/ntb/hw/epf/Kconfig6
-rw-r--r--drivers/ntb/hw/epf/Makefile1
-rw-r--r--drivers/ntb/hw/epf/ntb_hw_epf.c753
-rw-r--r--drivers/nvdimm/blk.c7
-rw-r--r--drivers/nvdimm/btt.c4
-rw-r--r--drivers/nvdimm/bus.c13
-rw-r--r--drivers/nvdimm/dimm.c7
-rw-r--r--drivers/nvdimm/dimm_devs.c18
-rw-r--r--drivers/nvdimm/namespace_devs.c10
-rw-r--r--drivers/nvdimm/pmem.c9
-rw-r--r--drivers/nvdimm/region.c4
-rw-r--r--drivers/nvme/host/core.c96
-rw-r--r--drivers/nvme/host/fabrics.c11
-rw-r--r--drivers/nvme/host/fc.c2
-rw-r--r--drivers/nvme/host/hwmon.c32
-rw-r--r--drivers/nvme/host/lightnvm.c7
-rw-r--r--drivers/nvme/host/multipath.c12
-rw-r--r--drivers/nvme/host/nvme.h17
-rw-r--r--drivers/nvme/host/pci.c36
-rw-r--r--drivers/nvme/host/rdma.c36
-rw-r--r--drivers/nvme/host/tcp.c55
-rw-r--r--drivers/nvme/host/trace.c53
-rw-r--r--drivers/nvme/host/zns.c11
-rw-r--r--drivers/nvme/target/admin-cmd.c150
-rw-r--r--drivers/nvme/target/configfs.c56
-rw-r--r--drivers/nvme/target/core.c39
-rw-r--r--drivers/nvme/target/fc.c83
-rw-r--r--drivers/nvme/target/fcloop.c2
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c23
-rw-r--r--drivers/nvme/target/io-cmd-file.c5
-rw-r--r--drivers/nvme/target/nvmet.h27
-rw-r--r--drivers/nvme/target/passthru.c12
-rw-r--r--drivers/nvme/target/tcp.c62
-rw-r--r--drivers/nvme/target/trace.h9
-rw-r--r--drivers/nvmem/Kconfig8
-rw-r--r--drivers/nvmem/Makefile2
-rw-r--r--drivers/nvmem/core.c5
-rw-r--r--drivers/nvmem/imx-iim.c7
-rw-r--r--drivers/nvmem/qcom-spmi-sdam.c7
-rw-r--r--drivers/nvmem/rmem.c97
-rw-r--r--drivers/of/base.c4
-rw-r--r--drivers/of/device.c31
-rw-r--r--drivers/of/fdt.c12
-rw-r--r--drivers/of/platform.c5
-rw-r--r--drivers/of/property.c59
-rw-r--r--drivers/of/unittest.c2
-rw-r--r--drivers/opp/core.c800
-rw-r--r--drivers/opp/of.c230
-rw-r--r--drivers/opp/opp.h19
-rw-r--r--drivers/oprofile/buffer_sync.c591
-rw-r--r--drivers/oprofile/buffer_sync.h22
-rw-r--r--drivers/oprofile/cpu_buffer.c465
-rw-r--r--drivers/oprofile/cpu_buffer.h121
-rw-r--r--drivers/oprofile/event_buffer.c209
-rw-r--r--drivers/oprofile/event_buffer.h40
-rw-r--r--drivers/oprofile/nmi_timer_int.c157
-rw-r--r--drivers/oprofile/oprof.c286
-rw-r--r--drivers/oprofile/oprof.h50
-rw-r--r--drivers/oprofile/oprofile_files.c201
-rw-r--r--drivers/oprofile/oprofile_perf.c328
-rw-r--r--drivers/oprofile/oprofile_stats.c84
-rw-r--r--drivers/oprofile/oprofile_stats.h33
-rw-r--r--drivers/oprofile/oprofilefs.c300
-rw-r--r--drivers/oprofile/timer_int.c122
-rw-r--r--drivers/parport/share.c2
-rw-r--r--drivers/pci/Makefile2
-rw-r--r--drivers/pci/controller/Kconfig35
-rw-r--r--drivers/pci/controller/Makefile2
-rw-r--r--drivers/pci/controller/cadence/pci-j721e.c3
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-ep.c60
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-host.c86
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.h11
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape-ep.c7
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape.c5
-rw-r--r--drivers/pci/controller/dwc/pcie-al.c4
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c8
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c53
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.c70
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h4
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c22
-rw-r--r--drivers/pci/controller/pci-host-common.c4
-rw-r--r--drivers/pci/controller/pci-hyperv.c4
-rw-r--r--drivers/pci/controller/pci-xgene-msi.c10
-rw-r--r--drivers/pci/controller/pci-xgene.c13
-rw-r--r--drivers/pci/controller/pcie-altera-msi.c3
-rw-r--r--drivers/pci/controller/pcie-brcmstb.c35
-rw-r--r--drivers/pci/controller/pcie-mediatek.c7
-rw-r--r--drivers/pci/controller/pcie-microchip-host.c1138
-rw-r--r--drivers/pci/controller/pcie-rcar-host.c2
-rw-r--r--drivers/pci/controller/pcie-rockchip.c12
-rw-r--r--drivers/pci/controller/pcie-tango.c341
-rw-r--r--drivers/pci/controller/pcie-xilinx-cpm.c1
-rw-r--r--drivers/pci/endpoint/functions/Kconfig13
-rw-r--r--drivers/pci/endpoint/functions/Makefile1
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-ntb.c2128
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c13
-rw-r--r--drivers/pci/endpoint/pci-ep-cfs.c176
-rw-r--r--drivers/pci/endpoint/pci-epc-core.c130
-rw-r--r--drivers/pci/endpoint/pci-epf-core.c105
-rw-r--r--drivers/pci/hotplug/acpiphp.h3
-rw-r--r--drivers/pci/pci-bridge-emul.c11
-rw-r--r--drivers/pci/pci-sysfs.c11
-rw-r--r--drivers/pci/pci.c23
-rw-r--r--drivers/pci/pci.h5
-rw-r--r--drivers/pci/pcie/Kconfig8
-rw-r--r--drivers/pci/pcie/Makefile1
-rw-r--r--drivers/pci/pcie/aer.c5
-rw-r--r--drivers/pci/pcie/aspm.c44
-rw-r--r--drivers/pci/pcie/bw_notification.c138
-rw-r--r--drivers/pci/pcie/err.c16
-rw-r--r--drivers/pci/pcie/portdrv.h6
-rw-r--r--drivers/pci/pcie/portdrv_pci.c4
-rw-r--r--drivers/pci/proc.c6
-rw-r--r--drivers/pci/search.c4
-rw-r--r--drivers/pci/setup-res.c6
-rw-r--r--drivers/pci/syscall.c10
-rw-r--r--drivers/pcmcia/cistpl.c4
-rw-r--r--drivers/pcmcia/sa1111_generic.c3
-rw-r--r--drivers/perf/Kconfig2
-rw-r--r--drivers/perf/arm-cci.c7
-rw-r--r--drivers/perf/arm-cmn.c19
-rw-r--r--drivers/perf/arm_dmc620_pmu.c5
-rw-r--r--drivers/perf/arm_pmu.c2
-rw-r--r--drivers/perf/arm_smmuv3_pmu.c8
-rw-r--r--drivers/perf/arm_spe_pmu.c23
-rw-r--r--drivers/perf/fsl_imx8_ddr_perf.c10
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c2
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_hha_pmu.c2
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c2
-rw-r--r--drivers/perf/qcom_l2_pmu.c6
-rw-r--r--drivers/perf/qcom_l3_pmu.c6
-rw-r--r--drivers/perf/xgene_pmu.c5
-rw-r--r--drivers/phy/Kconfig1
-rw-r--r--drivers/phy/broadcom/Kconfig3
-rw-r--r--drivers/phy/broadcom/phy-brcm-sata.c2
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb.c18
-rw-r--r--drivers/phy/cadence/phy-cadence-torrent.c1
-rw-r--r--drivers/phy/ingenic/phy-ingenic-usb.c23
-rw-r--r--drivers/phy/lantiq/phy-lantiq-rcu-usb2.c10
-rw-r--r--drivers/phy/mediatek/phy-mtk-hdmi.c1
-rw-r--r--drivers/phy/mediatek/phy-mtk-mipi-dsi.c3
-rw-r--r--drivers/phy/motorola/phy-cpcap-usb.c2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.c430
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.h147
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qusb2.c74
-rw-r--r--drivers/phy/qualcomm/phy-qcom-usb-hs-28nm.c13
-rw-r--r--drivers/phy/rockchip/phy-rockchip-emmc.c12
-rw-r--r--drivers/phy/st/phy-stm32-usbphyc.c222
-rw-r--r--drivers/phy/xilinx/phy-zynqmp.c11
-rw-r--r--drivers/pinctrl/Kconfig38
-rw-r--r--drivers/pinctrl/Makefile5
-rw-r--r--drivers/pinctrl/actions/Kconfig3
-rw-r--r--drivers/pinctrl/actions/pinctrl-owl.c1
-rw-r--r--drivers/pinctrl/bcm/pinctrl-ns2-mux.c2
-rw-r--r--drivers/pinctrl/core.c1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx1-core.c1
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c117
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.h4
-rw-r--r--drivers/pinctrl/intel/pinctrl-tigerlake.c1
-rw-r--r--drivers/pinctrl/mediatek/mtk-eint.c13
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-moore.c4
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-paris.c4
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-37xx.c1
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c2
-rw-r--r--drivers/pinctrl/pinctrl-at91-pio4.c137
-rw-r--r--drivers/pinctrl/pinctrl-at91.c3
-rw-r--r--drivers/pinctrl/pinctrl-coh901.c774
-rw-r--r--drivers/pinctrl/pinctrl-coh901.h6
-rw-r--r--drivers/pinctrl/pinctrl-ingenic.c98
-rw-r--r--drivers/pinctrl/pinctrl-k210.c985
-rw-r--r--drivers/pinctrl/pinctrl-single.c1
-rw-r--r--drivers/pinctrl/pinctrl-st.c1
-rw-r--r--drivers/pinctrl/pinctrl-sx150x.c1
-rw-r--r--drivers/pinctrl/pinctrl-u300.c1111
-rw-r--r--drivers/pinctrl/pinmux.c2
-rw-r--r--drivers/pinctrl/qcom/Kconfig18
-rw-r--r--drivers/pinctrl/qcom/Makefile2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc8180x.c1624
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdm845.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8350.c1649
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-mpp.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c1
-rw-r--r--drivers/pinctrl/ralink/pinctrl-rt2880.c47
-rw-r--r--drivers/pinctrl/renesas/Kconfig5
-rw-r--r--drivers/pinctrl/renesas/Makefile1
-rw-r--r--drivers/pinctrl/renesas/core.c38
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a77950.c1
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a77951.c1
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a7796.c1
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a77965.c1
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a779a0.c4460
-rw-r--r--drivers/pinctrl/renesas/pinctrl.c16
-rw-r--r--drivers/pinctrl/renesas/sh_pfc.h28
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.c16
-rw-r--r--drivers/pinctrl/samsung/pinctrl-s3c24xx.c4
-rw-r--r--drivers/pinctrl/samsung/pinctrl-s3c64xx.c4
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c22
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.h2
-rw-r--r--drivers/pinctrl/sirf/Makefile7
-rw-r--r--drivers/pinctrl/sirf/pinctrl-atlas6.c1137
-rw-r--r--drivers/pinctrl/sirf/pinctrl-atlas7.c6157
-rw-r--r--drivers/pinctrl/sirf/pinctrl-prima2.c1131
-rw-r--r--drivers/pinctrl/sirf/pinctrl-sirf.c894
-rw-r--r--drivers/pinctrl/sirf/pinctrl-sirf.h116
-rw-r--r--drivers/pinctrl/sprd/pinctrl-sprd.c2
-rw-r--r--drivers/pinctrl/sunxi/Kconfig10
-rw-r--r--drivers/pinctrl/sunxi/Makefile2
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-h6-r.c2
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-h616-r.c56
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-h616.c548
-rw-r--r--drivers/pinctrl/ti/pinctrl-ti-iodelay.c7
-rw-r--r--drivers/pinctrl/visconti/pinctrl-common.c23
-rw-r--r--drivers/pinctrl/zte/Kconfig14
-rw-r--r--drivers/pinctrl/zte/Makefile3
-rw-r--r--drivers/pinctrl/zte/pinctrl-zx.c445
-rw-r--r--drivers/pinctrl/zte/pinctrl-zx.h102
-rw-r--r--drivers/pinctrl/zte/pinctrl-zx296718.c1024
-rw-r--r--drivers/platform/chrome/cros_ec.c33
-rw-r--r--drivers/platform/chrome/cros_ec.h4
-rw-r--r--drivers/platform/chrome/cros_ec_ishtp.c6
-rw-r--r--drivers/platform/chrome/cros_ec_lightbar.c2
-rw-r--r--drivers/platform/chrome/cros_ec_proto.c12
-rw-r--r--drivers/platform/chrome/cros_ec_rpmsg.c6
-rw-r--r--drivers/platform/chrome/cros_ec_sysfs.c5
-rw-r--r--drivers/platform/chrome/cros_ec_typec.c317
-rw-r--r--drivers/platform/chrome/cros_ec_vbc.c2
-rw-r--r--drivers/platform/chrome/wilco_ec/sysfs.c2
-rw-r--r--drivers/platform/goldfish/goldfish_pipe.c28
-rw-r--r--drivers/platform/olpc/olpc-ec.c37
-rw-r--r--drivers/platform/surface/Kconfig57
-rw-r--r--drivers/platform/surface/Makefile4
-rw-r--r--drivers/platform/surface/aggregator/Kconfig68
-rw-r--r--drivers/platform/surface/aggregator/Makefile17
-rw-r--r--drivers/platform/surface/aggregator/bus.c415
-rw-r--r--drivers/platform/surface/aggregator/bus.h27
-rw-r--r--drivers/platform/surface/aggregator/controller.c2579
-rw-r--r--drivers/platform/surface/aggregator/controller.h285
-rw-r--r--drivers/platform/surface/aggregator/core.c839
-rw-r--r--drivers/platform/surface/aggregator/ssh_msgb.h205
-rw-r--r--drivers/platform/surface/aggregator/ssh_packet_layer.c2074
-rw-r--r--drivers/platform/surface/aggregator/ssh_packet_layer.h190
-rw-r--r--drivers/platform/surface/aggregator/ssh_parser.c228
-rw-r--r--drivers/platform/surface/aggregator/ssh_parser.h154
-rw-r--r--drivers/platform/surface/aggregator/ssh_request_layer.c1263
-rw-r--r--drivers/platform/surface/aggregator/ssh_request_layer.h143
-rw-r--r--drivers/platform/surface/aggregator/trace.h632
-rw-r--r--drivers/platform/surface/surface3-wmi.c6
-rw-r--r--drivers/platform/surface/surface_acpi_notify.c886
-rw-r--r--drivers/platform/surface/surface_aggregator_cdev.c322
-rw-r--r--drivers/platform/surface/surface_hotplug.c282
-rw-r--r--drivers/platform/x86/Kconfig207
-rw-r--r--drivers/platform/x86/Makefile19
-rw-r--r--drivers/platform/x86/acer-wmi.c4
-rw-r--r--drivers/platform/x86/acerhdf.c3
-rw-r--r--drivers/platform/x86/amd-pmc.c14
-rw-r--r--drivers/platform/x86/asus-laptop.c6
-rw-r--r--drivers/platform/x86/dell/Kconfig207
-rw-r--r--drivers/platform/x86/dell/Makefile21
-rw-r--r--drivers/platform/x86/dell/alienware-wmi.c (renamed from drivers/platform/x86/alienware-wmi.c)0
-rw-r--r--drivers/platform/x86/dell/dcdbas.c (renamed from drivers/platform/x86/dcdbas.c)0
-rw-r--r--drivers/platform/x86/dell/dcdbas.h (renamed from drivers/platform/x86/dcdbas.h)0
-rw-r--r--drivers/platform/x86/dell/dell-laptop.c (renamed from drivers/platform/x86/dell-laptop.c)0
-rw-r--r--drivers/platform/x86/dell/dell-rbtn.c (renamed from drivers/platform/x86/dell-rbtn.c)0
-rw-r--r--drivers/platform/x86/dell/dell-rbtn.h (renamed from drivers/platform/x86/dell-rbtn.h)0
-rw-r--r--drivers/platform/x86/dell/dell-smbios-base.c (renamed from drivers/platform/x86/dell-smbios-base.c)0
-rw-r--r--drivers/platform/x86/dell/dell-smbios-smm.c (renamed from drivers/platform/x86/dell-smbios-smm.c)0
-rw-r--r--drivers/platform/x86/dell/dell-smbios-wmi.c (renamed from drivers/platform/x86/dell-smbios-wmi.c)0
-rw-r--r--drivers/platform/x86/dell/dell-smbios.h (renamed from drivers/platform/x86/dell-smbios.h)0
-rw-r--r--drivers/platform/x86/dell/dell-smo8800.c (renamed from drivers/platform/x86/dell-smo8800.c)0
-rw-r--r--drivers/platform/x86/dell/dell-wmi-aio.c (renamed from drivers/platform/x86/dell-wmi-aio.c)0
-rw-r--r--drivers/platform/x86/dell/dell-wmi-descriptor.c (renamed from drivers/platform/x86/dell-wmi-descriptor.c)0
-rw-r--r--drivers/platform/x86/dell/dell-wmi-descriptor.h (renamed from drivers/platform/x86/dell-wmi-descriptor.h)0
-rw-r--r--drivers/platform/x86/dell/dell-wmi-led.c (renamed from drivers/platform/x86/dell-wmi-led.c)0
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/Makefile (renamed from drivers/platform/x86/dell-wmi-sysman/Makefile)0
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/biosattr-interface.c (renamed from drivers/platform/x86/dell-wmi-sysman/biosattr-interface.c)0
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/dell-wmi-sysman.h (renamed from drivers/platform/x86/dell-wmi-sysman/dell-wmi-sysman.h)0
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/enum-attributes.c (renamed from drivers/platform/x86/dell-wmi-sysman/enum-attributes.c)0
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/int-attributes.c (renamed from drivers/platform/x86/dell-wmi-sysman/int-attributes.c)0
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c (renamed from drivers/platform/x86/dell-wmi-sysman/passobj-attributes.c)0
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/passwordattr-interface.c (renamed from drivers/platform/x86/dell-wmi-sysman/passwordattr-interface.c)0
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/string-attributes.c (renamed from drivers/platform/x86/dell-wmi-sysman/string-attributes.c)0
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/sysman.c (renamed from drivers/platform/x86/dell-wmi-sysman/sysman.c)6
-rw-r--r--drivers/platform/x86/dell/dell-wmi.c (renamed from drivers/platform/x86/dell-wmi.c)0
-rw-r--r--drivers/platform/x86/dell/dell_rbu.c (renamed from drivers/platform/x86/dell_rbu.c)0
-rw-r--r--drivers/platform/x86/hp-wmi.c14
-rw-r--r--drivers/platform/x86/ideapad-laptop.c1456
-rw-r--r--drivers/platform/x86/intel-uncore-frequency.c1
-rw-r--r--drivers/platform/x86/intel-vbtn.c138
-rw-r--r--drivers/platform/x86/intel_mid_powerbtn.c233
-rw-r--r--drivers/platform/x86/intel_mid_thermal.c560
-rw-r--r--drivers/platform/x86/intel_scu_ipc.c2
-rw-r--r--drivers/platform/x86/intel_scu_pcidrv.c22
-rw-r--r--drivers/platform/x86/intel_scu_wdt.c (renamed from arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c)41
-rw-r--r--drivers/platform/x86/msi-wmi.c2
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c464
-rw-r--r--drivers/platform/x86/touchscreen_dmi.c27
-rw-r--r--drivers/pnp/interface.c1
-rw-r--r--drivers/pnp/pnpbios/bioscalls.c3
-rw-r--r--drivers/power/reset/Kconfig15
-rw-r--r--drivers/power/reset/Makefile2
-rw-r--r--drivers/power/reset/at91-sama5d2_shdwc.c74
-rw-r--r--drivers/power/reset/atc260x-poweroff.c262
-rw-r--r--drivers/power/reset/linkstation-poweroff.c1
-rw-r--r--drivers/power/reset/zx-reboot.c86
-rw-r--r--drivers/power/supply/Kconfig27
-rw-r--r--drivers/power/supply/Makefile3
-rw-r--r--drivers/power/supply/ab8500_fg.c2
-rw-r--r--drivers/power/supply/acer_a500_battery.c297
-rw-r--r--drivers/power/supply/axp20x_usb_power.c2
-rw-r--r--drivers/power/supply/axp288_fuel_gauge.c6
-rw-r--r--drivers/power/supply/bq24190_charger.c2
-rw-r--r--drivers/power/supply/bq256xx_charger.c1749
-rw-r--r--drivers/power/supply/bq25980_charger.c2
-rw-r--r--drivers/power/supply/bq27xxx_battery.c39
-rw-r--r--drivers/power/supply/charger-manager.c8
-rw-r--r--drivers/power/supply/cpcap-battery.c217
-rw-r--r--drivers/power/supply/cpcap-charger.c262
-rw-r--r--drivers/power/supply/ds2760_battery.c2
-rw-r--r--drivers/power/supply/ds2780_battery.c8
-rw-r--r--drivers/power/supply/ingenic-battery.c2
-rw-r--r--drivers/power/supply/ltc4162-l-charger.c931
-rw-r--r--drivers/power/supply/max14656_charger_detector.c2
-rw-r--r--drivers/power/supply/max8903_charger.c360
-rw-r--r--drivers/power/supply/max8997_charger.c96
-rw-r--r--drivers/power/supply/power_supply_hwmon.c2
-rw-r--r--drivers/power/supply/power_supply_sysfs.c2
-rw-r--r--drivers/power/supply/smb347-charger.c12
-rw-r--r--drivers/power/supply/wm97xx_battery.c45
-rw-r--r--drivers/power/supply/z2_battery.c46
-rw-r--r--drivers/powercap/Kconfig13
-rw-r--r--drivers/powercap/Makefile2
-rw-r--r--drivers/powercap/dtpm.c483
-rw-r--r--drivers/powercap/dtpm_cpu.c257
-rw-r--r--drivers/powercap/intel_rapl_common.c9
-rw-r--r--drivers/ptp/idt8a340_reg.h10
-rw-r--r--drivers/ptp/ptp_clockmatrix.c313
-rw-r--r--drivers/ptp/ptp_clockmatrix.h17
-rw-r--r--drivers/pwm/Kconfig10
-rw-r--r--drivers/pwm/Makefile1
-rw-r--r--drivers/pwm/pwm-iqs620a.c94
-rw-r--r--drivers/pwm/pwm-lpc18xx-sct.c2
-rw-r--r--drivers/pwm/pwm-rockchip.c32
-rw-r--r--drivers/pwm/pwm-zx.c278
-rw-r--r--drivers/rapidio/rio.c2
-rw-r--r--drivers/regulator/Kconfig51
-rw-r--r--drivers/regulator/Makefile5
-rw-r--r--drivers/regulator/ab3100.c724
-rw-r--r--drivers/regulator/ab8500-ext.c422
-rw-r--r--drivers/regulator/ab8500.c116
-rw-r--r--drivers/regulator/atc260x-regulator.c539
-rw-r--r--drivers/regulator/axp20x-regulator.c7
-rw-r--r--drivers/regulator/bd70528-regulator.c11
-rw-r--r--drivers/regulator/bd71828-regulator.c13
-rw-r--r--drivers/regulator/bd718x7-regulator.c20
-rw-r--r--drivers/regulator/bd9571mwv-regulator.c59
-rw-r--r--drivers/regulator/core.c62
-rw-r--r--drivers/regulator/mcp16502.c2
-rw-r--r--drivers/regulator/mt6315-regulator.c299
-rw-r--r--drivers/regulator/mtk-dvfsrc-regulator.c215
-rw-r--r--drivers/regulator/pca9450-regulator.c22
-rw-r--r--drivers/regulator/pf8x00-regulator.c278
-rw-r--r--drivers/regulator/qcom-labibb-regulator.c728
-rw-r--r--drivers/regulator/qcom-rpmh-regulator.c34
-rw-r--r--drivers/regulator/rohm-regulator.c9
-rw-r--r--drivers/regulator/rt4831-regulator.c198
-rw-r--r--drivers/regulator/s5m8767.c15
-rw-r--r--drivers/remoteproc/Kconfig25
-rw-r--r--drivers/remoteproc/ingenic_rproc.c7
-rw-r--r--drivers/remoteproc/mtk_common.h7
-rw-r--r--drivers/remoteproc/mtk_scp.c82
-rw-r--r--drivers/remoteproc/qcom_q6v5_pas.c63
-rw-r--r--drivers/remoteproc/qcom_wcnss.c2
-rw-r--r--drivers/remoteproc/qcom_wcnss_iris.c1
-rw-r--r--drivers/remoteproc/remoteproc_core.c2
-rw-r--r--drivers/remoteproc/stm32_rproc.c23
-rw-r--r--drivers/reset/Kconfig12
-rw-r--r--drivers/reset/Makefile1
-rw-r--r--drivers/reset/core.c4
-rw-r--r--drivers/reset/hisilicon/reset-hi3660.c9
-rw-r--r--drivers/reset/reset-k210.c131
-rw-r--r--drivers/reset/reset-simple.c2
-rw-r--r--drivers/rpmsg/qcom_glink_ssr.c17
-rw-r--r--drivers/rtc/Kconfig50
-rw-r--r--drivers/rtc/Makefile5
-rw-r--r--drivers/rtc/class.c10
-rw-r--r--drivers/rtc/interface.c12
-rw-r--r--drivers/rtc/rtc-ab3100.c254
-rw-r--r--drivers/rtc/rtc-abx80x.c39
-rw-r--r--drivers/rtc/rtc-ac100.c4
-rw-r--r--drivers/rtc/rtc-armada38x.c21
-rw-r--r--drivers/rtc/rtc-asm9260.c6
-rw-r--r--drivers/rtc/rtc-bq32k.c2
-rw-r--r--drivers/rtc/rtc-brcmstb-waketimer.c2
-rw-r--r--drivers/rtc/rtc-cmos.c25
-rw-r--r--drivers/rtc/rtc-coh901331.c290
-rw-r--r--drivers/rtc/rtc-digicolor.c2
-rw-r--r--drivers/rtc/rtc-ds1305.c5
-rw-r--r--drivers/rtc/rtc-ds1307.c5
-rw-r--r--drivers/rtc/rtc-ds1672.c2
-rw-r--r--drivers/rtc/rtc-ds1685.c6
-rw-r--r--drivers/rtc/rtc-ds3232.c7
-rw-r--r--drivers/rtc/rtc-hym8563.c5
-rw-r--r--drivers/rtc/rtc-isl1208.c2
-rw-r--r--drivers/rtc/rtc-m41t80.c29
-rw-r--r--drivers/rtc/rtc-m48t59.c22
-rw-r--r--drivers/rtc/rtc-mc146818-lib.c7
-rw-r--r--drivers/rtc/rtc-mcp795.c5
-rw-r--r--drivers/rtc/rtc-meson.c2
-rw-r--r--drivers/rtc/rtc-mrst.c521
-rw-r--r--drivers/rtc/rtc-mv.c14
-rw-r--r--drivers/rtc/rtc-mxc.c5
-rw-r--r--drivers/rtc/rtc-mxc_v2.c7
-rw-r--r--drivers/rtc/rtc-opal.c27
-rw-r--r--drivers/rtc/rtc-pcf2123.c5
-rw-r--r--drivers/rtc/rtc-pcf2127.c46
-rw-r--r--drivers/rtc/rtc-pcf85063.c49
-rw-r--r--drivers/rtc/rtc-pcf85363.c10
-rw-r--r--drivers/rtc/rtc-pcf8563.c2
-rw-r--r--drivers/rtc/rtc-pl030.c4
-rw-r--r--drivers/rtc/rtc-pl031.c12
-rw-r--r--drivers/rtc/rtc-pm8xxx.c18
-rw-r--r--drivers/rtc/rtc-r7301.c5
-rw-r--r--drivers/rtc/rtc-rs5c372.c2
-rw-r--r--drivers/rtc/rtc-rv3028.c23
-rw-r--r--drivers/rtc/rtc-rv3029c2.c22
-rw-r--r--drivers/rtc/rtc-rv3032.c13
-rw-r--r--drivers/rtc/rtc-rv8803.c13
-rw-r--r--drivers/rtc/rtc-rx6110.c4
-rw-r--r--drivers/rtc/rtc-rx8010.c21
-rw-r--r--drivers/rtc/rtc-rx8025.c5
-rw-r--r--drivers/rtc/rtc-rx8581.c2
-rw-r--r--drivers/rtc/rtc-s35390a.c2
-rw-r--r--drivers/rtc/rtc-s3c.c17
-rw-r--r--drivers/rtc/rtc-s5m.c33
-rw-r--r--drivers/rtc/rtc-sd3078.c2
-rw-r--r--drivers/rtc/rtc-sirfsoc.c446
-rw-r--r--drivers/rtc/rtc-stm32.c4
-rw-r--r--drivers/rtc/rtc-tegra.c6
-rw-r--r--drivers/rtc/rtc-tps65910.c19
-rw-r--r--drivers/rtc/rtc-tx4939.c303
-rw-r--r--drivers/s390/block/dasd.c26
-rw-r--r--drivers/s390/block/dasd_devmap.c20
-rw-r--r--drivers/s390/block/dasd_eckd.c3
-rw-r--r--drivers/s390/block/dasd_int.h2
-rw-r--r--drivers/s390/block/dcssblk.c6
-rw-r--r--drivers/s390/block/xpram.c2
-rw-r--r--drivers/s390/char/con3215.c1
-rw-r--r--drivers/s390/char/sclp_early_core.c4
-rw-r--r--drivers/s390/char/sclp_tty.c1
-rw-r--r--drivers/s390/char/sclp_vt220.c1
-rw-r--r--drivers/s390/char/tape_3590.c4
-rw-r--r--drivers/s390/char/tty3270.c2
-rw-r--r--drivers/s390/char/vmur.c2
-rw-r--r--drivers/s390/cio/css.c20
-rw-r--r--drivers/s390/cio/device.c39
-rw-r--r--drivers/s390/cio/qdio.h25
-rw-r--r--drivers/s390/cio/qdio_debug.c9
-rw-r--r--drivers/s390/cio/qdio_main.c209
-rw-r--r--drivers/s390/cio/qdio_setup.c19
-rw-r--r--drivers/s390/cio/qdio_thinint.c70
-rw-r--r--drivers/s390/crypto/vfio_ap_drv.c6
-rw-r--r--drivers/s390/crypto/vfio_ap_ops.c149
-rw-r--r--drivers/s390/crypto/vfio_ap_private.h12
-rw-r--r--drivers/s390/crypto/zcrypt_api.c14
-rw-r--r--drivers/s390/crypto/zcrypt_ccamisc.c15
-rw-r--r--drivers/s390/net/qeth_core.h44
-rw-r--r--drivers/s390/net/qeth_core_main.c110
-rw-r--r--drivers/s390/net/qeth_core_sys.c10
-rw-r--r--drivers/s390/net/qeth_l2_main.c6
-rw-r--r--drivers/s390/net/qeth_l3_main.c90
-rw-r--r--drivers/s390/scsi/zfcp_fc.h1
-rw-r--r--drivers/s390/virtio/virtio_ccw.c4
-rw-r--r--drivers/sbus/char/display7seg.c4
-rw-r--r--drivers/scsi/3w-9xxx.c56
-rw-r--r--drivers/scsi/3w-9xxx.h156
-rw-r--r--drivers/scsi/3w-sas.c52
-rw-r--r--drivers/scsi/3w-sas.h118
-rw-r--r--drivers/scsi/3w-xxxx.c251
-rw-r--r--drivers/scsi/3w-xxxx.h199
-rw-r--r--drivers/scsi/Kconfig16
-rw-r--r--drivers/scsi/Makefile2
-rw-r--r--drivers/scsi/aacraid/aachba.c173
-rw-r--r--drivers/scsi/advansys.c87
-rw-r--r--drivers/scsi/aha1542.c136
-rw-r--r--drivers/scsi/aha1542.h33
-rw-r--r--drivers/scsi/aic7xxx/aic79xx.h38
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_core.c257
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c20
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.h37
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm_pci.c6
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_proc.c13
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx.h2
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_93cx6.c4
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_core.c263
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c88
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.h39
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_proc.c15
-rw-r--r--drivers/scsi/aic7xxx/aiclib.h15
-rw-r--r--drivers/scsi/aic7xxx/scsi_message.h41
-rw-r--r--drivers/scsi/aic94xx/aic94xx_scb.c24
-rw-r--r--drivers/scsi/arm/acornscsi.c14
-rw-r--r--drivers/scsi/atp870u.c451
-rw-r--r--drivers/scsi/atp870u.h14
-rw-r--r--drivers/scsi/bfa/bfa_fc.h15
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.c2
-rw-r--r--drivers/scsi/bfa/bfad_im.c2
-rw-r--r--drivers/scsi/bnx2fc/Kconfig1
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c2
-rw-r--r--drivers/scsi/dc395x.c28
-rw-r--r--drivers/scsi/dc395x.h38
-rw-r--r--drivers/scsi/dpt_i2o.c2
-rw-r--r--drivers/scsi/esp_scsi.c23
-rw-r--r--drivers/scsi/fdomain_isa.c3
-rw-r--r--drivers/scsi/g_NCR5380.c5
-rw-r--r--drivers/scsi/gdth.c4322
-rw-r--r--drivers/scsi/gdth.h981
-rw-r--r--drivers/scsi/gdth_ioctl.h251
-rw-r--r--drivers/scsi/gdth_proc.c586
-rw-r--r--drivers/scsi/gdth_proc.h18
-rw-r--r--drivers/scsi/hisi_sas/Kconfig6
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h18
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c48
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c7
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c19
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c264
-rw-r--r--drivers/scsi/hpsa.c55
-rw-r--r--drivers/scsi/hpsa_cmd.h2
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c1256
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h91
-rw-r--r--drivers/scsi/initio.c64
-rw-r--r--drivers/scsi/initio.h25
-rw-r--r--drivers/scsi/ips.c9
-rw-r--r--drivers/scsi/isci/port.c11
-rw-r--r--drivers/scsi/isci/request.c12
-rw-r--r--drivers/scsi/iscsi_tcp.c9
-rw-r--r--drivers/scsi/libiscsi.c496
-rw-r--r--drivers/scsi/libiscsi_tcp.c86
-rw-r--r--drivers/scsi/libsas/sas_event.c27
-rw-r--r--drivers/scsi/libsas/sas_init.c19
-rw-r--r--drivers/scsi/libsas/sas_internal.h6
-rw-r--r--drivers/scsi/lpfc/lpfc.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c9
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h15
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c49
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c36
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c241
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c21
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c48
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c33
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c59
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c141
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c2
-rw-r--r--drivers/scsi/mac53c94.c1
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c3
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c62
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h52
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c67
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.h22
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c44
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c38
-rw-r--r--drivers/scsi/mvsas/mv_sas.c25
-rw-r--r--drivers/scsi/ncr53c8xx.c83
-rw-r--r--drivers/scsi/ncr53c8xx.h16
-rw-r--r--drivers/scsi/nsp32.c2
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c12
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.h11
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c69
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c20
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c21
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h2
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c280
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.h17
-rw-r--r--drivers/scsi/pmcraid.h6
-rw-r--r--drivers/scsi/qedf/qedf_main.c2
-rw-r--r--drivers/scsi/qla1280.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c9
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c342
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.h5
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h83
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c28
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h27
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h29
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c245
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c87
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c18
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c93
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c29
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h4
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h1
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c2
-rw-r--r--drivers/scsi/scsi_debug.c3
-rw-r--r--drivers/scsi/scsi_error.c25
-rw-r--r--drivers/scsi/scsi_lib.c3
-rw-r--r--drivers/scsi/scsi_transport_fc.c118
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c43
-rw-r--r--drivers/scsi/sd.c14
-rw-r--r--drivers/scsi/sd_zbc.c49
-rw-r--r--drivers/scsi/sg.c3
-rw-r--r--drivers/scsi/st.c2
-rw-r--r--drivers/scsi/stex.c25
-rw-r--r--drivers/scsi/storvsc_drv.c60
-rw-r--r--drivers/scsi/ufs/Kconfig14
-rw-r--r--drivers/scsi/ufs/Makefile13
-rw-r--r--drivers/scsi/ufs/ufs-debugfs.c56
-rw-r--r--drivers/scsi/ufs/ufs-debugfs.h22
-rw-r--r--drivers/scsi/ufs/ufs-exynos.c9
-rw-r--r--drivers/scsi/ufs/ufs-mediatek.c1
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c18
-rw-r--r--drivers/scsi/ufs/ufs-sysfs.c175
-rw-r--r--drivers/scsi/ufs/ufs.h52
-rw-r--r--drivers/scsi/ufs/ufshcd-crypto.c13
-rw-r--r--drivers/scsi/ufs/ufshcd-crypto.h5
-rw-r--r--drivers/scsi/ufs/ufshcd.c575
-rw-r--r--drivers/scsi/ufs/ufshcd.h41
-rw-r--r--drivers/scsi/wd33c93.c6
-rw-r--r--drivers/sfi/Kconfig18
-rw-r--r--drivers/sfi/Makefile4
-rw-r--r--drivers/sfi/sfi_acpi.c214
-rw-r--r--drivers/sfi/sfi_core.c522
-rw-r--r--drivers/sfi/sfi_core.h81
-rw-r--r--drivers/soc/Kconfig3
-rw-r--r--drivers/soc/Makefile3
-rw-r--r--drivers/soc/aspeed/aspeed-lpc-snoop.c30
-rw-r--r--drivers/soc/aspeed/aspeed-socinfo.c33
-rw-r--r--drivers/soc/atmel/soc.c240
-rw-r--r--drivers/soc/atmel/soc.h19
-rw-r--r--drivers/soc/bcm/Makefile2
-rw-r--r--drivers/soc/bcm/bcm63xx/Kconfig9
-rw-r--r--drivers/soc/bcm/bcm63xx/Makefile1
-rw-r--r--drivers/soc/bcm/bcm63xx/bcm-pmb.c333
-rw-r--r--drivers/soc/bcm/brcmstb/common.c17
-rw-r--r--drivers/soc/canaan/Kconfig12
-rw-r--r--drivers/soc/canaan/Makefile3
-rw-r--r--drivers/soc/canaan/k210-sysctl.c78
-rw-r--r--drivers/soc/fsl/qe/qe_common.c20
-rw-r--r--drivers/soc/imx/Kconfig2
-rw-r--r--drivers/soc/imx/soc-imx8m.c84
-rw-r--r--drivers/soc/kendryte/Kconfig14
-rw-r--r--drivers/soc/kendryte/Makefile3
-rw-r--r--drivers/soc/kendryte/k210-sysctl.c260
-rw-r--r--drivers/soc/litex/Kconfig15
-rw-r--r--drivers/soc/litex/litex_soc_ctrl.c116
-rw-r--r--drivers/soc/mediatek/Makefile1
-rw-r--r--drivers/soc/mediatek/mt8167-pm-domains.h86
-rw-r--r--drivers/soc/mediatek/mt8183-pm-domains.h1
-rw-r--r--drivers/soc/mediatek/mtk-cmdq-helper.c32
-rw-r--r--drivers/soc/mediatek/mtk-mutex.c (renamed from drivers/gpu/drm/mediatek/mtk_drm_ddp.c)328
-rw-r--r--drivers/soc/mediatek/mtk-pm-domains.c51
-rw-r--r--drivers/soc/mediatek/mtk-pm-domains.h2
-rw-r--r--drivers/soc/qcom/llcc-qcom.c50
-rw-r--r--drivers/soc/qcom/ocmem.c8
-rw-r--r--drivers/soc/qcom/qcom_aoss.c1
-rw-r--r--drivers/soc/qcom/rpmh-rsc.c24
-rw-r--r--drivers/soc/qcom/rpmpd.c28
-rw-r--r--drivers/soc/qcom/smem.c4
-rw-r--r--drivers/soc/qcom/socinfo.c105
-rw-r--r--drivers/soc/renesas/rcar-sysc.c37
-rw-r--r--drivers/soc/samsung/Kconfig12
-rw-r--r--drivers/soc/samsung/Makefile3
-rw-r--r--drivers/soc/samsung/exynos-asv.c57
-rw-r--r--drivers/soc/samsung/exynos-asv.h2
-rw-r--r--drivers/soc/samsung/exynos-chipid.c71
-rw-r--r--drivers/soc/samsung/pm_domains.c97
-rw-r--r--drivers/soc/sifive/sifive_l2_cache.c27
-rw-r--r--drivers/soc/sunxi/sunxi_mbus.c5
-rw-r--r--drivers/soc/sunxi/sunxi_sram.c31
-rw-r--r--drivers/soc/ti/k3-ringacc.c7
-rw-r--r--drivers/soc/ti/knav_dma.c1
-rw-r--r--drivers/soc/ti/knav_qmss_queue.c3
-rw-r--r--drivers/soc/ti/omap_prm.c11
-rw-r--r--drivers/soc/ti/pm33xx.c5
-rw-r--r--drivers/soc/ti/pruss.c91
-rw-r--r--drivers/soc/xilinx/Kconfig17
-rw-r--r--drivers/soc/xilinx/Makefile1
-rw-r--r--drivers/soc/xilinx/xlnx_vcu.c628
-rw-r--r--drivers/soc/zte/Kconfig15
-rw-r--r--drivers/soc/zte/Makefile6
-rw-r--r--drivers/soc/zte/zx296718_pm_domains.c181
-rw-r--r--drivers/soc/zte/zx2967_pm_domains.c141
-rw-r--r--drivers/soc/zte/zx2967_pm_domains.h44
-rw-r--r--drivers/soundwire/bus.c179
-rw-r--r--drivers/soundwire/cadence_master.c31
-rw-r--r--drivers/soundwire/intel.c8
-rw-r--r--drivers/soundwire/intel.h2
-rw-r--r--drivers/soundwire/intel_init.c157
-rw-r--r--drivers/soundwire/slave.c10
-rw-r--r--drivers/soundwire/sysfs_slave.c2
-rw-r--r--drivers/spi/Kconfig34
-rw-r--r--drivers/spi/Makefile5
-rw-r--r--drivers/spi/atmel-quadspi.c1
-rw-r--r--drivers/spi/spi-altera.c3
-rw-r--r--drivers/spi/spi-atmel.c2
-rw-r--r--drivers/spi/spi-au1550.c53
-rw-r--r--drivers/spi/spi-bcm-qspi.c2
-rw-r--r--drivers/spi/spi-bcm2835.c8
-rw-r--r--drivers/spi/spi-bcm2835aux.c2
-rw-r--r--drivers/spi/spi-cadence-quadspi.c333
-rw-r--r--drivers/spi/spi-clps711x.c2
-rw-r--r--drivers/spi/spi-dw-bt1.c2
-rw-r--r--drivers/spi/spi-efm32.c462
-rw-r--r--drivers/spi/spi-fsl-spi.c2
-rw-r--r--drivers/spi/spi-hisi-sfc-v3xx.c33
-rw-r--r--drivers/spi/spi-imx.c2
-rw-r--r--drivers/spi/spi-mem.c23
-rw-r--r--drivers/spi/spi-mpc52xx.c16
-rw-r--r--drivers/spi/spi-mt65xx.c72
-rw-r--r--drivers/spi/spi-orion.c55
-rw-r--r--drivers/spi/spi-pl022.c5
-rw-r--r--drivers/spi/spi-pxa2xx-pci.c29
-rw-r--r--drivers/spi/spi-pxa2xx.c4
-rw-r--r--drivers/spi/spi-qcom-qspi.c3
-rw-r--r--drivers/spi/spi-realtek-rtl.c209
-rw-r--r--drivers/spi/spi-rockchip.c2
-rw-r--r--drivers/spi/spi-rpc-if.c13
-rw-r--r--drivers/spi/spi-sh-msiof.c14
-rw-r--r--drivers/spi/spi-sirf.c1236
-rw-r--r--drivers/spi/spi-stm32.c150
-rw-r--r--drivers/spi/spi-synquacer.c4
-rw-r--r--drivers/spi/spi-tegra210-quad.c1410
-rw-r--r--drivers/spi/spi-txx9.c477
-rw-r--r--drivers/spi/spi.c61
-rw-r--r--drivers/spi/spidev.c1
-rw-r--r--drivers/spmi/spmi-pmic-arb.c5
-rw-r--r--drivers/staging/android/ashmem.c2
-rw-r--r--drivers/staging/board/Kconfig9
-rw-r--r--drivers/staging/clocking-wizard/TODO3
-rw-r--r--drivers/staging/comedi/comedi_fops.c4
-rw-r--r--drivers/staging/comedi/drivers/adl_pci7x3x.c284
-rw-r--r--drivers/staging/comedi/drivers/adv_pci_dio.c289
-rw-r--r--drivers/staging/emxx_udc/emxx_udc.c3
-rw-r--r--drivers/staging/emxx_udc/emxx_udc.h2
-rw-r--r--drivers/staging/fbtft/fb_st7789v.c115
-rw-r--r--drivers/staging/fieldbus/anybuss/arcx-anybus.c4
-rw-r--r--drivers/staging/fsl-dpaa2/ethsw/ethsw-ethtool.c6
-rw-r--r--drivers/staging/fsl-dpaa2/ethsw/ethsw.c149
-rw-r--r--drivers/staging/fwserial/fwserial.c2
-rw-r--r--drivers/staging/gasket/gasket_ioctl.c42
-rw-r--r--drivers/staging/gdm724x/gdm_usb.c10
-rw-r--r--drivers/staging/greybus/audio_helper.c2
-rw-r--r--drivers/staging/greybus/audio_manager_sysfs.c4
-rw-r--r--drivers/staging/greybus/audio_module.c2
-rw-r--r--drivers/staging/greybus/audio_topology.c6
-rw-r--r--drivers/staging/greybus/hid.c6
-rw-r--r--drivers/staging/greybus/light.c3
-rw-r--r--drivers/staging/greybus/power_supply.c2
-rw-r--r--drivers/staging/greybus/spilib.c4
-rw-r--r--drivers/staging/hikey9xx/Kconfig2
-rw-r--r--drivers/staging/hikey9xx/hi6421-spmi-pmic.c331
-rw-r--r--drivers/staging/hikey9xx/hi6421v600-regulator.c533
-rw-r--r--drivers/staging/hikey9xx/hisilicon,hi6421-spmi-pmic.yaml108
-rw-r--r--drivers/staging/hikey9xx/hisilicon,hisi-spmi-controller.yaml19
-rw-r--r--drivers/staging/hikey9xx/phy-hi3670-usb3.c81
-rw-r--r--drivers/staging/hikey9xx/phy-hi3670-usb3.yaml1
-rw-r--r--drivers/staging/media/Kconfig2
-rw-r--r--drivers/staging/media/Makefile1
-rw-r--r--drivers/staging/media/allegro-dvt/Kconfig16
-rw-r--r--drivers/staging/media/allegro-dvt/Makefile5
-rw-r--r--drivers/staging/media/allegro-dvt/TODO4
-rw-r--r--drivers/staging/media/atomisp/include/linux/atomisp_platform.h1
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_subdev.c24
-rw-r--r--drivers/staging/media/atomisp/pci/hmm/hmm.c2
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_firmware.h1
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/rx.c1
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_params.c6
-rw-r--r--drivers/staging/media/hantro/hantro_v4l2.c4
-rw-r--r--drivers/staging/media/imx/Kconfig9
-rw-r--r--drivers/staging/media/imx/Makefile2
-rw-r--r--drivers/staging/media/imx/imx-media-capture.c10
-rw-r--r--drivers/staging/media/imx/imx-media-csc-scaler.c4
-rw-r--r--drivers/staging/media/imx/imx-media-csi.c14
-rw-r--r--drivers/staging/media/imx/imx-media-dev.c7
-rw-r--r--drivers/staging/media/imx/imx-media-of.c2
-rw-r--r--drivers/staging/media/imx/imx6-mipi-csi2.c127
-rw-r--r--drivers/staging/media/imx/imx7-media-csi.c43
-rw-r--r--drivers/staging/media/imx/imx7-mipi-csis.c15
-rw-r--r--drivers/staging/media/ipu3/ipu3-v4l2.c3
-rw-r--r--drivers/staging/media/omap4iss/iss.c1
-rw-r--r--drivers/staging/media/omap4iss/iss_video.h1
-rw-r--r--drivers/staging/media/rkvdec/rkvdec.c2
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.c49
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.h1
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_h264.c2
-rw-r--r--drivers/staging/media/tegra-video/csi.c35
-rw-r--r--drivers/staging/media/tegra-video/csi.h14
-rw-r--r--drivers/staging/media/tegra-video/tegra210.c340
-rw-r--r--drivers/staging/media/tegra-video/vi.c348
-rw-r--r--drivers/staging/media/tegra-video/vi.h23
-rw-r--r--drivers/staging/media/tegra-video/video.c18
-rw-r--r--drivers/staging/media/zoran/zoran_driver.c2
-rw-r--r--drivers/staging/most/net/net.c3
-rw-r--r--drivers/staging/most/sound/sound.c8
-rw-r--r--drivers/staging/most/video/video.c6
-rw-r--r--drivers/staging/mt7621-dma/Makefile2
-rw-r--r--drivers/staging/mt7621-dma/hsdma-mt7621.c (renamed from drivers/staging/mt7621-dma/mtk-hsdma.c)2
-rw-r--r--drivers/staging/mt7621-dts/mt7621.dtsi58
-rw-r--r--drivers/staging/nvec/nvec_power.c2
-rw-r--r--drivers/staging/nvec/nvec_ps2.c4
-rw-r--r--drivers/staging/octeon/ethernet-mdio.c9
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.c2
-rw-r--r--drivers/staging/qlge/Kconfig1
-rw-r--r--drivers/staging/qlge/Makefile2
-rw-r--r--drivers/staging/qlge/TODO10
-rw-r--r--drivers/staging/qlge/qlge.h244
-rw-r--r--drivers/staging/qlge/qlge_dbg.c1650
-rw-r--r--drivers/staging/qlge/qlge_devlink.c163
-rw-r--r--drivers/staging/qlge/qlge_devlink.h9
-rw-r--r--drivers/staging/qlge/qlge_ethtool.c239
-rw-r--r--drivers/staging/qlge/qlge_main.c1380
-rw-r--r--drivers/staging/qlge/qlge_mpi.c356
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme_ext.c44
-rw-r--r--drivers/staging/rtl8188eu/include/wifi.h65
-rw-r--r--drivers/staging/rtl8188eu/os_dep/ioctl_linux.c2
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_intf.c1
-rw-r--r--drivers/staging/rtl8188eu/os_dep/xmit_linux.c2
-rw-r--r--drivers/staging/rtl8192e/Kconfig1
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c4
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.c4
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_dm.c4
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_ethtool.c6
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_ps.c2
-rw-r--r--drivers/staging/rtl8192e/rtllib_rx.c5
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac.c3
-rw-r--r--drivers/staging/rtl8192e/rtllib_tx.c8
-rw-r--r--drivers/staging/rtl8192e/rtllib_wx.c2
-rw-r--r--drivers/staging/rtl8192u/Kconfig1
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c28
-rw-r--r--drivers/staging/rtl8192u/r8190_rtl8256.c2
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c2
-rw-r--r--drivers/staging/rtl8712/rtl871x_debug.h2
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_linux.c2
-rw-r--r--drivers/staging/rtl8712/wifi.h14
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_security.c24
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c225
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c2
-rw-r--r--drivers/staging/rtl8723bs/include/autoconf.h1
-rw-r--r--drivers/staging/rtl8723bs/include/hal_intf.h8
-rw-r--r--drivers/staging/rtl8723bs/include/ieee80211.h79
-rw-r--r--drivers/staging/rtl8723bs/include/rtl8723b_hal.h13
-rw-r--r--drivers/staging/rtl8723bs/include/rtl8723b_recv.h8
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_mlme.h6
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_wifi_regd.h6
-rw-r--r--drivers/staging/rtl8723bs/include/wlan_bssdef.h2
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c8
-rw-r--r--drivers/staging/rtl8723bs/os_dep/mlme_linux.c6
-rw-r--r--drivers/staging/rtl8723bs/os_dep/sdio_intf.c4
-rw-r--r--drivers/staging/rtl8723bs/os_dep/wifi_regd.c12
-rw-r--r--drivers/staging/sm750fb/sm750.c2
-rw-r--r--drivers/staging/unisys/visorhba/visorhba_main.c90
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/TODO2
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835-ctl.c6
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c6
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835.c6
-rw-r--r--drivers/staging/vc04_services/interface/TODO4
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c22
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c3
-rw-r--r--drivers/staging/vme/devices/vme_user.c16
-rw-r--r--drivers/staging/vt6655/baseband.c4
-rw-r--r--drivers/staging/vt6655/rxtx.h8
-rw-r--r--drivers/staging/vt6656/rf.c2
-rw-r--r--drivers/staging/vt6656/rxtx.h6
-rw-r--r--drivers/staging/wfx/bh.c1
-rw-r--r--drivers/staging/wfx/bh.h4
-rw-r--r--drivers/staging/wfx/bus.h3
-rw-r--r--drivers/staging/wfx/bus_sdio.c6
-rw-r--r--drivers/staging/wfx/bus_spi.c7
-rw-r--r--drivers/staging/wfx/data_rx.c5
-rw-r--r--drivers/staging/wfx/data_tx.c15
-rw-r--r--drivers/staging/wfx/data_tx.h4
-rw-r--r--drivers/staging/wfx/debug.c6
-rw-r--r--drivers/staging/wfx/fwio.c2
-rw-r--r--drivers/staging/wfx/hif_api_cmd.h6
-rw-r--r--drivers/staging/wfx/hif_api_general.h9
-rw-r--r--drivers/staging/wfx/hif_tx.c4
-rw-r--r--drivers/staging/wfx/hif_tx_mib.c5
-rw-r--r--drivers/staging/wfx/hwio.c3
-rw-r--r--drivers/staging/wfx/hwio.h2
-rw-r--r--drivers/staging/wfx/key.c2
-rw-r--r--drivers/staging/wfx/key.h2
-rw-r--r--drivers/staging/wfx/main.c7
-rw-r--r--drivers/staging/wfx/main.h3
-rw-r--r--drivers/staging/wfx/queue.c4
-rw-r--r--drivers/staging/wfx/queue.h3
-rw-r--r--drivers/staging/wfx/scan.h2
-rw-r--r--drivers/staging/wfx/sta.c6
-rw-r--r--drivers/staging/wfx/sta.h2
-rw-r--r--drivers/staging/wfx/traces.h3
-rw-r--r--drivers/staging/wfx/wfx.h3
-rw-r--r--drivers/staging/wimax/i2400m/fw.c17
-rw-r--r--drivers/staging/wimax/i2400m/netdev.c6
-rw-r--r--drivers/staging/wimax/i2400m/rx.c7
-rw-r--r--drivers/staging/wimax/i2400m/tx.c8
-rw-r--r--drivers/staging/wimax/i2400m/usb.c4
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.c28
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_target.c3
-rw-r--r--drivers/target/iscsi/iscsi_target.c20
-rw-r--r--drivers/target/iscsi/iscsi_target.h2
-rw-r--r--drivers/target/iscsi/iscsi_target_erl0.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c11
-rw-r--r--drivers/target/iscsi/iscsi_target_util.h2
-rw-r--r--drivers/target/sbp/sbp_target.c2
-rw-r--r--drivers/target/target_core_alua.c2
-rw-r--r--drivers/target/target_core_file.c22
-rw-r--r--drivers/target/target_core_iblock.c9
-rw-r--r--drivers/target/target_core_pr.c15
-rw-r--r--drivers/target/target_core_pscsi.c5
-rw-r--r--drivers/target/target_core_transport.c80
-rw-r--r--drivers/target/target_core_user.c189
-rw-r--r--drivers/tee/optee/call.c3
-rw-r--r--drivers/tee/optee/optee_msg.h158
-rw-r--r--drivers/tee/optee/optee_rpc_cmd.h103
-rw-r--r--drivers/tee/optee/optee_smc.h72
-rw-r--r--drivers/tee/optee/rpc.c70
-rw-r--r--drivers/thermal/Kconfig17
-rw-r--r--drivers/thermal/Makefile2
-rw-r--r--drivers/thermal/cpufreq_cooling.c71
-rw-r--r--drivers/thermal/da9062-thermal.c4
-rw-r--r--drivers/thermal/gov_power_allocator.c37
-rw-r--r--drivers/thermal/gov_step_wise.c14
-rw-r--r--drivers/thermal/intel/Kconfig4
-rw-r--r--drivers/thermal/intel/Makefile1
-rw-r--r--drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c6
-rw-r--r--drivers/thermal/intel/intel_pch_thermal.c6
-rw-r--r--drivers/thermal/intel/therm_throt.c (renamed from arch/x86/kernel/cpu/mce/therm_throt.c)41
-rw-r--r--drivers/thermal/intel/thermal_interrupt.h15
-rw-r--r--drivers/thermal/intel/x86_pkg_temp_thermal.c4
-rw-r--r--drivers/thermal/khadas_mcu_fan.c1
-rw-r--r--drivers/thermal/qcom/Kconfig11
-rw-r--r--drivers/thermal/qcom/Makefile1
-rw-r--r--drivers/thermal/qcom/qcom-spmi-adc-tm5.c623
-rw-r--r--drivers/thermal/tango_thermal.c126
-rw-r--r--drivers/thermal/thermal_core.c72
-rw-r--r--drivers/thermal/thermal_core.h7
-rw-r--r--drivers/thermal/thermal_helpers.c7
-rw-r--r--drivers/thermal/thermal_sysfs.c85
-rw-r--r--drivers/thermal/ti-soc-thermal/omap4-thermal-data.c7
-rw-r--r--drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h4
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-bandgap.c54
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-bandgap.h2
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-thermal-common.c6
-rw-r--r--drivers/thermal/zx2967_thermal.c256
-rw-r--r--drivers/thunderbolt/acpi.c67
-rw-r--r--drivers/thunderbolt/cap.c2
-rw-r--r--drivers/thunderbolt/ctl.c51
-rw-r--r--drivers/thunderbolt/dma_port.c2
-rw-r--r--drivers/thunderbolt/dma_test.c5
-rw-r--r--drivers/thunderbolt/domain.c48
-rw-r--r--drivers/thunderbolt/eeprom.c33
-rw-r--r--drivers/thunderbolt/icm.c10
-rw-r--r--drivers/thunderbolt/lc.c35
-rw-r--r--drivers/thunderbolt/nhi.c39
-rw-r--r--drivers/thunderbolt/path.c2
-rw-r--r--drivers/thunderbolt/switch.c82
-rw-r--r--drivers/thunderbolt/tb.c54
-rw-r--r--drivers/thunderbolt/tb.h22
-rw-r--r--drivers/thunderbolt/tb_regs.h1
-rw-r--r--drivers/thunderbolt/tunnel.c12
-rw-r--r--drivers/thunderbolt/usb4.c11
-rw-r--r--drivers/thunderbolt/xdomain.c15
-rw-r--r--drivers/tty/Makefile2
-rw-r--r--drivers/tty/amiserial.c3
-rw-r--r--drivers/tty/hvc/hvcs.c5
-rw-r--r--drivers/tty/ipwireless/tty.c1
-rw-r--r--drivers/tty/mxser.c1
-rw-r--r--drivers/tty/n_gsm.c3
-rw-r--r--drivers/tty/n_hdlc.c60
-rw-r--r--drivers/tty/n_null.c3
-rw-r--r--drivers/tty/n_r3964.c10
-rw-r--r--drivers/tty/n_tracerouter.c233
-rw-r--r--drivers/tty/n_tracesink.c228
-rw-r--r--drivers/tty/n_tracesink.h26
-rw-r--r--drivers/tty/n_tty.c153
-rw-r--r--drivers/tty/pty.c16
-rw-r--r--drivers/tty/serial/8250/8250_tegra.c11
-rw-r--r--drivers/tty/serial/Kconfig42
-rw-r--r--drivers/tty/serial/Makefile3
-rw-r--r--drivers/tty/serial/amba-pl010.c4
-rw-r--r--drivers/tty/serial/amba-pl011.c3
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_core.c43
-rw-r--r--drivers/tty/serial/efm32-uart.c852
-rw-r--r--drivers/tty/serial/fsl_lpuart.c4
-rw-r--r--drivers/tty/serial/icom.c4
-rw-r--r--drivers/tty/serial/ifx6x60.c1390
-rw-r--r--drivers/tty/serial/ifx6x60.h118
-rw-r--r--drivers/tty/serial/imx.c2
-rw-r--r--drivers/tty/serial/lantiq.c2
-rw-r--r--drivers/tty/serial/max3100.c3
-rw-r--r--drivers/tty/serial/mxs-auart.c45
-rw-r--r--drivers/tty/serial/owl-uart.c38
-rw-r--r--drivers/tty/serial/serial_core.c11
-rw-r--r--drivers/tty/serial/sirfsoc_uart.c1503
-rw-r--r--drivers/tty/serial/sirfsoc_uart.h447
-rw-r--r--drivers/tty/serial/stm32-usart.c490
-rw-r--r--drivers/tty/serial/stm32-usart.h2
-rw-r--r--drivers/tty/synclink_gt.c1
-rw-r--r--drivers/tty/tty_io.c235
-rw-r--r--drivers/tty/vcc.c10
-rw-r--r--drivers/tty/vt/consolemap.c2
-rw-r--r--drivers/tty/vt/defkeymap.c_shipped82
-rw-r--r--drivers/tty/vt/keyboard.c18
-rw-r--r--drivers/tty/vt/vt.c42
-rw-r--r--drivers/tty/vt/vt_ioctl.c154
-rw-r--r--drivers/uio/uio_pci_generic.c2
-rw-r--r--drivers/usb/Makefile2
-rw-r--r--drivers/usb/c67x00/c67x00-hcd.h2
-rw-r--r--drivers/usb/c67x00/c67x00-sched.c12
-rw-r--r--drivers/usb/cdns3/Kconfig60
-rw-r--r--drivers/usb/cdns3/Makefile43
-rw-r--r--drivers/usb/cdns3/cdns3-debug.h (renamed from drivers/usb/cdns3/debug.h)0
-rw-r--r--drivers/usb/cdns3/cdns3-ep0.c (renamed from drivers/usb/cdns3/ep0.c)8
-rw-r--r--drivers/usb/cdns3/cdns3-gadget.c (renamed from drivers/usb/cdns3/gadget.c)34
-rw-r--r--drivers/usb/cdns3/cdns3-gadget.h (renamed from drivers/usb/cdns3/gadget.h)0
-rw-r--r--drivers/usb/cdns3/cdns3-imx.c2
-rw-r--r--drivers/usb/cdns3/cdns3-plat.c315
-rw-r--r--drivers/usb/cdns3/cdns3-ti.c1
-rw-r--r--drivers/usb/cdns3/cdns3-trace.c (renamed from drivers/usb/cdns3/trace.c)2
-rw-r--r--drivers/usb/cdns3/cdns3-trace.h (renamed from drivers/usb/cdns3/trace.h)6
-rw-r--r--drivers/usb/cdns3/cdnsp-debug.h583
-rw-r--r--drivers/usb/cdns3/cdnsp-ep0.c489
-rw-r--r--drivers/usb/cdns3/cdnsp-gadget.c2009
-rw-r--r--drivers/usb/cdns3/cdnsp-gadget.h1601
-rw-r--r--drivers/usb/cdns3/cdnsp-mem.c1336
-rw-r--r--drivers/usb/cdns3/cdnsp-pci.c254
-rw-r--r--drivers/usb/cdns3/cdnsp-ring.c2438
-rw-r--r--drivers/usb/cdns3/cdnsp-trace.c12
-rw-r--r--drivers/usb/cdns3/cdnsp-trace.h830
-rw-r--r--drivers/usb/cdns3/core.c455
-rw-r--r--drivers/usb/cdns3/core.h65
-rw-r--r--drivers/usb/cdns3/drd.c224
-rw-r--r--drivers/usb/cdns3/drd.h94
-rw-r--r--drivers/usb/cdns3/gadget-export.h22
-rw-r--r--drivers/usb/cdns3/host-export.h18
-rw-r--r--drivers/usb/cdns3/host.c26
-rw-r--r--drivers/usb/chipidea/Kconfig3
-rw-r--r--drivers/usb/chipidea/ci_hdrc_tegra.c344
-rw-r--r--drivers/usb/chipidea/core.c10
-rw-r--r--drivers/usb/chipidea/host.c104
-rw-r--r--drivers/usb/class/cdc-acm.c6
-rw-r--r--drivers/usb/class/usblp.c19
-rw-r--r--drivers/usb/class/usbtmc.c85
-rw-r--r--drivers/usb/common/common.c26
-rw-r--r--drivers/usb/core/hcd.c4
-rw-r--r--drivers/usb/core/quirks.c9
-rw-r--r--drivers/usb/dwc2/gadget.c8
-rw-r--r--drivers/usb/dwc2/hcd.c15
-rw-r--r--drivers/usb/dwc2/hcd_intr.c14
-rw-r--r--drivers/usb/dwc2/params.c8
-rw-r--r--drivers/usb/dwc2/pci.c18
-rw-r--r--drivers/usb/dwc3/Kconfig10
-rw-r--r--drivers/usb/dwc3/Makefile1
-rw-r--r--drivers/usb/dwc3/core.c85
-rw-r--r--drivers/usb/dwc3/core.h11
-rw-r--r--drivers/usb/dwc3/drd.c25
-rw-r--r--drivers/usb/dwc3/dwc3-haps.c8
-rw-r--r--drivers/usb/dwc3/dwc3-imx8mp.c363
-rw-r--r--drivers/usb/dwc3/dwc3-keystone.c9
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c69
-rw-r--r--drivers/usb/dwc3/dwc3-qcom.c71
-rw-r--r--drivers/usb/dwc3/dwc3-st.c2
-rw-r--r--drivers/usb/dwc3/gadget.c245
-rw-r--r--drivers/usb/dwc3/host.c2
-rw-r--r--drivers/usb/gadget/composite.c104
-rw-r--r--drivers/usb/gadget/configfs.c24
-rw-r--r--drivers/usb/gadget/function/f_midi.c12
-rw-r--r--drivers/usb/gadget/function/f_printer.c5
-rw-r--r--drivers/usb/gadget/function/u_audio.c135
-rw-r--r--drivers/usb/gadget/function/u_ether.c33
-rw-r--r--drivers/usb/gadget/function/u_ether.h12
-rw-r--r--drivers/usb/gadget/function/u_ether_configfs.h15
-rw-r--r--drivers/usb/gadget/function/u_serial.c8
-rw-r--r--drivers/usb/gadget/legacy/Kconfig13
-rw-r--r--drivers/usb/gadget/legacy/ether.c4
-rw-r--r--drivers/usb/gadget/legacy/raw_gadget.c3
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/hub.c8
-rw-r--r--drivers/usb/gadget/udc/bdc/Kconfig11
-rw-r--r--drivers/usb/gadget/udc/bdc/Makefile2
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc.h134
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_cmd.c2
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_cmd.h21
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_dbg.c2
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_dbg.h10
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_ep.c16
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_ep.h10
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_pci.c128
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_udc.c8
-rw-r--r--drivers/usb/gadget/udc/core.c39
-rw-r--r--drivers/usb/gadget/udc/snps_udc_core.c30
-rw-r--r--drivers/usb/gadget/udc/udc-xilinx.c10
-rw-r--r--drivers/usb/host/Kconfig10
-rw-r--r--drivers/usb/host/Makefile1
-rw-r--r--drivers/usb/host/ehci-tegra.c604
-rw-r--r--drivers/usb/host/ohci-sa1111.c4
-rw-r--r--drivers/usb/host/xhci-ext-caps.c3
-rw-r--r--drivers/usb/host/xhci-mem.c21
-rw-r--r--drivers/usb/host/xhci-mtk-sch.c130
-rw-r--r--drivers/usb/host/xhci-mtk.c2
-rw-r--r--drivers/usb/host/xhci-mtk.h15
-rw-r--r--drivers/usb/host/xhci-mvebu.c42
-rw-r--r--drivers/usb/host/xhci-mvebu.h6
-rw-r--r--drivers/usb/host/xhci-plat.c20
-rw-r--r--drivers/usb/host/xhci-plat.h1
-rw-r--r--drivers/usb/host/xhci-ring.c1142
-rw-r--r--drivers/usb/host/xhci.c102
-rw-r--r--drivers/usb/host/xhci.h41
-rw-r--r--drivers/usb/misc/usb251xb.c12
-rw-r--r--drivers/usb/misc/usb3503.c9
-rw-r--r--drivers/usb/musb/jz4740.c18
-rw-r--r--drivers/usb/musb/musb_core.c31
-rw-r--r--drivers/usb/musb/musb_gadget.c2
-rw-r--r--drivers/usb/musb/musbhsdma.c4
-rw-r--r--drivers/usb/phy/phy-mxs-usb.c7
-rw-r--r--drivers/usb/phy/phy-tegra-usb.c103
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c1
-rw-r--r--drivers/usb/serial/Kconfig9
-rw-r--r--drivers/usb/serial/Makefile1
-rw-r--r--drivers/usb/serial/ark3116.c11
-rw-r--r--drivers/usb/serial/belkin_sa.c6
-rw-r--r--drivers/usb/serial/bus.c27
-rw-r--r--drivers/usb/serial/ch341.c4
-rw-r--r--drivers/usb/serial/cp210x.c219
-rw-r--r--drivers/usb/serial/cyberjack.c6
-rw-r--r--drivers/usb/serial/cypress_m8.c6
-rw-r--r--drivers/usb/serial/digi_acceleport.c6
-rw-r--r--drivers/usb/serial/f81232.c12
-rw-r--r--drivers/usb/serial/f81534.c7
-rw-r--r--drivers/usb/serial/ftdi_sio.c27
-rw-r--r--drivers/usb/serial/garmin_gps.c3
-rw-r--r--drivers/usb/serial/io_edgeport.c6
-rw-r--r--drivers/usb/serial/io_ti.c12
-rw-r--r--drivers/usb/serial/iuu_phoenix.c4
-rw-r--r--drivers/usb/serial/keyspan.c6
-rw-r--r--drivers/usb/serial/keyspan_pda.c4
-rw-r--r--drivers/usb/serial/kl5kusb105.c6
-rw-r--r--drivers/usb/serial/kobil_sct.c6
-rw-r--r--drivers/usb/serial/mct_u232.c6
-rw-r--r--drivers/usb/serial/metro-usb.c4
-rw-r--r--drivers/usb/serial/mos7720.c12
-rw-r--r--drivers/usb/serial/mos7840.c8
-rw-r--r--drivers/usb/serial/mxuport.c7
-rw-r--r--drivers/usb/serial/omninet.c6
-rw-r--r--drivers/usb/serial/opticon.c4
-rw-r--r--drivers/usb/serial/option.c9
-rw-r--r--drivers/usb/serial/oti6858.c6
-rw-r--r--drivers/usb/serial/pl2303.c12
-rw-r--r--drivers/usb/serial/quatech2.c4
-rw-r--r--drivers/usb/serial/sierra.c4
-rw-r--r--drivers/usb/serial/spcp8x5.c4
-rw-r--r--drivers/usb/serial/ssu100.c4
-rw-r--r--drivers/usb/serial/symbolserial.c4
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c6
-rw-r--r--drivers/usb/serial/upd78f0730.c9
-rw-r--r--drivers/usb/serial/usb-wwan.h2
-rw-r--r--drivers/usb/serial/usb_wwan.c4
-rw-r--r--drivers/usb/serial/whiteheat.c6
-rw-r--r--drivers/usb/serial/xr_serial.c611
-rw-r--r--drivers/usb/typec/altmodes/displayport.c17
-rw-r--r--drivers/usb/typec/class.c104
-rw-r--r--drivers/usb/typec/tcpm/tcpci.c9
-rw-r--r--drivers/usb/typec/tcpm/tcpci.h6
-rw-r--r--drivers/usb/typec/tcpm/tcpci_maxim.c35
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c1173
-rw-r--r--drivers/usb/typec/ucsi/Kconfig1
-rw-r--r--drivers/usb/typec/ucsi/displayport.c32
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c56
-rw-r--r--drivers/usb/typec/ucsi/ucsi.h3
-rw-r--r--drivers/usb/usbip/stub_main.c4
-rw-r--r--drivers/usb/usbip/usbip_common.h29
-rw-r--r--drivers/usb/usbip/vhci_rx.c2
-rw-r--r--drivers/usb/usbip/vhci_sysfs.c1
-rw-r--r--drivers/vdpa/Kconfig1
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_main.c2
-rw-r--r--drivers/vdpa/mlx5/core/mlx5_vdpa.h1
-rw-r--r--drivers/vdpa/mlx5/core/mr.c28
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c22
-rw-r--r--drivers/vdpa/vdpa.c503
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.c3
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.h2
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim_net.c98
-rw-r--r--drivers/vfio/pci/Kconfig12
-rw-r--r--drivers/vfio/pci/Makefile2
-rw-r--r--drivers/vfio/pci/vfio_pci.c12
-rw-r--r--drivers/vfio/pci/vfio_pci_igd.c10
-rw-r--r--drivers/vfio/pci/vfio_pci_private.h2
-rw-r--r--drivers/vfio/pci/vfio_pci_zdev.c24
-rw-r--r--drivers/vfio/platform/vfio_amba.c15
-rw-r--r--drivers/vfio/vfio.c5
-rw-r--r--drivers/vfio/vfio_iommu_type1.c564
-rw-r--r--drivers/vhost/net.c30
-rw-r--r--drivers/vhost/scsi.c9
-rw-r--r--drivers/video/backlight/ktd253-backlight.c12
-rw-r--r--drivers/video/backlight/lms283gf05.c43
-rw-r--r--drivers/video/backlight/locomolcd.c3
-rw-r--r--drivers/video/backlight/qcom-wled.c2
-rw-r--r--drivers/video/backlight/sky81452-backlight.c2
-rw-r--r--drivers/video/console/vgacon.c19
-rw-r--r--drivers/video/fbdev/Kconfig2
-rw-r--r--drivers/video/fbdev/acornfb.c34
-rw-r--r--drivers/video/fbdev/amba-clcd.c4
-rw-r--r--drivers/video/fbdev/amifb.c4
-rw-r--r--drivers/video/fbdev/aty/atyfb_base.c20
-rw-r--r--drivers/video/fbdev/aty/mach64_ct.c15
-rw-r--r--drivers/video/fbdev/aty/radeon_monitor.c4
-rw-r--r--drivers/video/fbdev/bw2.c2
-rw-r--r--drivers/video/fbdev/cg3.c2
-rw-r--r--drivers/video/fbdev/cg6.c2
-rw-r--r--drivers/video/fbdev/cirrusfb.c20
-rw-r--r--drivers/video/fbdev/controlfb.c4
-rw-r--r--drivers/video/fbdev/core/fb_notify.c7
-rw-r--r--drivers/video/fbdev/core/fbcon.c25
-rw-r--r--drivers/video/fbdev/core/fbmon.c2
-rw-r--r--drivers/video/fbdev/da8xx-fb.c4
-rw-r--r--drivers/video/fbdev/efifb.c3
-rw-r--r--drivers/video/fbdev/ffb.c2
-rw-r--r--drivers/video/fbdev/gbefb.c4
-rw-r--r--drivers/video/fbdev/goldfishfb.c2
-rw-r--r--drivers/video/fbdev/hgafb.c10
-rw-r--r--drivers/video/fbdev/imxfb.c2
-rw-r--r--drivers/video/fbdev/leo.c2
-rw-r--r--drivers/video/fbdev/mmp/hw/mmp_spi.c2
-rw-r--r--drivers/video/fbdev/mx3fb.c13
-rw-r--r--drivers/video/fbdev/neofb.c4
-rw-r--r--drivers/video/fbdev/nvidia/nv_setup.c7
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/Kconfig1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c6
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dpi.c4
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dsi.c18
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi4_core.c4
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c4
-rw-r--r--drivers/video/fbdev/p9100.c2
-rw-r--r--drivers/video/fbdev/pm2fb.c8
-rw-r--r--drivers/video/fbdev/riva/fbdev.c9
-rw-r--r--drivers/video/fbdev/riva/riva_hw.c28
-rw-r--r--drivers/video/fbdev/s1d13xxxfb.c3
-rw-r--r--drivers/video/fbdev/s3c-fb.c11
-rw-r--r--drivers/video/fbdev/sis/init.c33
-rw-r--r--drivers/video/fbdev/sis/oem310.h2
-rw-r--r--drivers/video/fbdev/sis/sis.h1
-rw-r--r--drivers/video/fbdev/sis/sis_main.c9
-rw-r--r--drivers/video/fbdev/sstfb.c2
-rw-r--r--drivers/video/fbdev/tcx.c2
-rw-r--r--drivers/video/fbdev/tdfxfb.c4
-rw-r--r--drivers/video/fbdev/tgafb.c7
-rw-r--r--drivers/video/fbdev/udlfb.c1
-rw-r--r--drivers/video/fbdev/uvesafb.c6
-rw-r--r--drivers/video/fbdev/via/lcd.c4
-rw-r--r--drivers/video/fbdev/wmt_ge_rops.c1
-rw-r--r--drivers/video/fbdev/xilinxfb.c2
-rw-r--r--drivers/video/of_display_timing.c1
-rw-r--r--drivers/video/of_videomode.c6
-rw-r--r--drivers/virt/Kconfig2
-rw-r--r--drivers/virt/Makefile1
-rw-r--r--drivers/virt/acrn/Kconfig15
-rw-r--r--drivers/virt/acrn/Makefile3
-rw-r--r--drivers/virt/acrn/acrn_drv.h227
-rw-r--r--drivers/virt/acrn/hsm.c470
-rw-r--r--drivers/virt/acrn/hypercall.h254
-rw-r--r--drivers/virt/acrn/ioeventfd.c273
-rw-r--r--drivers/virt/acrn/ioreq.c657
-rw-r--r--drivers/virt/acrn/irqfd.c235
-rw-r--r--drivers/virt/acrn/mm.c306
-rw-r--r--drivers/virt/acrn/vm.c126
-rw-r--r--drivers/virt/vboxguest/vboxguest_utils.c18
-rw-r--r--drivers/virtio/Kconfig9
-rw-r--r--drivers/virtio/Makefile1
-rw-r--r--drivers/virtio/virtio_input.c26
-rw-r--r--drivers/virtio/virtio_mem.c45
-rw-r--r--drivers/virtio/virtio_mmio.c2
-rw-r--r--drivers/virtio/virtio_pci_common.h22
-rw-r--r--drivers/virtio/virtio_pci_modern.c506
-rw-r--r--drivers/virtio/virtio_pci_modern_dev.c599
-rw-r--r--drivers/virtio/virtio_vdpa.c3
-rw-r--r--drivers/vme/vme.c4
-rw-r--r--drivers/w1/masters/ds2490.c25
-rw-r--r--drivers/w1/slaves/w1_therm.c22
-rw-r--r--drivers/w1/w1.c39
-rw-r--r--drivers/watchdog/Kconfig74
-rw-r--r--drivers/watchdog/Makefile7
-rw-r--r--drivers/watchdog/atlas7_wdt.c221
-rw-r--r--drivers/watchdog/coh901327_wdt.c408
-rw-r--r--drivers/watchdog/hpwdt.c6
-rw-r--r--drivers/watchdog/intel-mid_wdt.c8
-rw-r--r--drivers/watchdog/intel_scu_watchdog.c533
-rw-r--r--drivers/watchdog/intel_scu_watchdog.h50
-rw-r--r--drivers/watchdog/it8712f_wdt.c1
-rw-r--r--drivers/watchdog/keembay_wdt.c286
-rw-r--r--drivers/watchdog/mei_wdt.c5
-rw-r--r--drivers/watchdog/mtk_wdt.c23
-rw-r--r--drivers/watchdog/pcwd.c7
-rw-r--r--drivers/watchdog/qcom-wdt.c13
-rw-r--r--drivers/watchdog/renesas_wdt.c33
-rw-r--r--drivers/watchdog/sirfsoc_wdt.c216
-rw-r--r--drivers/watchdog/sp805_wdt.c4
-rw-r--r--drivers/watchdog/tangox_wdt.c209
-rw-r--r--drivers/watchdog/watchdog_core.c2
-rw-r--r--drivers/watchdog/ziirave_wdt.c1
-rw-r--r--drivers/watchdog/zx2967_wdt.c279
-rw-r--r--drivers/xen/balloon.c2
-rw-r--r--drivers/xen/events/events_base.c68
-rw-r--r--drivers/xen/evtchn.c29
-rw-r--r--drivers/xen/gntdev.c37
-rw-r--r--drivers/xen/pvcalls-back.c4
-rw-r--r--drivers/xen/xen-acpi-processor.c3
-rw-r--r--drivers/xen/xen-front-pgdir-shbuf.c11
-rw-r--r--drivers/xen/xen-pciback/xenbus.c2
-rw-r--r--drivers/xen/xen-scsiback.c6
-rw-r--r--drivers/xen/xenbus/xenbus.h1
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c99
-rw-r--r--fs/9p/acl.c8
-rw-r--r--fs/9p/v9fs.h3
-rw-r--r--fs/9p/v9fs_vfs.h3
-rw-r--r--fs/9p/vfs_inode.c57
-rw-r--r--fs/9p/vfs_inode_dotl.c39
-rw-r--r--fs/9p/xattr.c1
-rw-r--r--fs/Kconfig6
-rw-r--r--fs/Kconfig.binfmt4
-rw-r--r--fs/Makefile1
-rw-r--r--fs/adfs/adfs.h3
-rw-r--r--fs/adfs/inode.c5
-rw-r--r--fs/affs/affs.h24
-rw-r--r--fs/affs/inode.c7
-rw-r--r--fs/affs/namei.c19
-rw-r--r--fs/afs/dir.c34
-rw-r--r--fs/afs/inode.c9
-rw-r--r--fs/afs/internal.h7
-rw-r--r--fs/afs/main.c6
-rw-r--r--fs/afs/security.c3
-rw-r--r--fs/afs/xattr.c2
-rw-r--r--fs/anon_inodes.c157
-rw-r--r--fs/attr.c126
-rw-r--r--fs/autofs/root.c17
-rw-r--r--fs/bad_inode.c36
-rw-r--r--fs/bfs/dir.c12
-rw-r--r--fs/binfmt_elf.c26
-rw-r--r--fs/binfmt_elf_fdpic.c27
-rw-r--r--fs/binfmt_misc.c4
-rw-r--r--fs/block_dev.c42
-rw-r--r--fs/btrfs/Makefile19
-rw-r--r--fs/btrfs/acl.c6
-rw-r--r--fs/btrfs/backref.c17
-rw-r--r--fs/btrfs/backref.h9
-rw-r--r--fs/btrfs/block-group.c221
-rw-r--r--fs/btrfs/block-group.h28
-rw-r--r--fs/btrfs/btrfs_inode.h3
-rw-r--r--fs/btrfs/check-integrity.c10
-rw-r--r--fs/btrfs/compression.c78
-rw-r--r--fs/btrfs/ctree.c9
-rw-r--r--fs/btrfs/ctree.h30
-rw-r--r--fs/btrfs/delalloc-space.c29
-rw-r--r--fs/btrfs/delayed-inode.c7
-rw-r--r--fs/btrfs/delayed-ref.c79
-rw-r--r--fs/btrfs/delayed-ref.h28
-rw-r--r--fs/btrfs/dev-replace.c186
-rw-r--r--fs/btrfs/dev-replace.h3
-rw-r--r--fs/btrfs/discard.c6
-rw-r--r--fs/btrfs/disk-io.c186
-rw-r--r--fs/btrfs/disk-io.h6
-rw-r--r--fs/btrfs/extent-tree.c422
-rw-r--r--fs/btrfs/extent_io.c812
-rw-r--r--fs/btrfs/extent_io.h17
-rw-r--r--fs/btrfs/extent_map.c18
-rw-r--r--fs/btrfs/file-item.c22
-rw-r--r--fs/btrfs/file.c72
-rw-r--r--fs/btrfs/free-space-cache.c142
-rw-r--r--fs/btrfs/free-space-cache.h2
-rw-r--r--fs/btrfs/free-space-tree.c10
-rw-r--r--fs/btrfs/inode.c428
-rw-r--r--fs/btrfs/ioctl.c75
-rw-r--r--fs/btrfs/lzo.c4
-rw-r--r--fs/btrfs/ordered-data.c224
-rw-r--r--fs/btrfs/ordered-data.h57
-rw-r--r--fs/btrfs/qgroup.c8
-rw-r--r--fs/btrfs/qgroup.h2
-rw-r--r--fs/btrfs/raid56.c41
-rw-r--r--fs/btrfs/ref-verify.c47
-rw-r--r--fs/btrfs/reflink.c29
-rw-r--r--fs/btrfs/relocation.c99
-rw-r--r--fs/btrfs/scrub.c154
-rw-r--r--fs/btrfs/send.c38
-rw-r--r--fs/btrfs/space-info.c365
-rw-r--r--fs/btrfs/space-info.h25
-rw-r--r--fs/btrfs/subpage.c278
-rw-r--r--fs/btrfs/subpage.h91
-rw-r--r--fs/btrfs/super.c12
-rw-r--r--fs/btrfs/sysfs.c2
-rw-r--r--fs/btrfs/tests/btrfs-tests.c2
-rw-r--r--fs/btrfs/tests/extent-map-tests.c2
-rw-r--r--fs/btrfs/transaction.c152
-rw-r--r--fs/btrfs/transaction.h5
-rw-r--r--fs/btrfs/tree-checker.c16
-rw-r--r--fs/btrfs/tree-log.c285
-rw-r--r--fs/btrfs/volumes.c368
-rw-r--r--fs/btrfs/volumes.h19
-rw-r--r--fs/btrfs/xattr.c33
-rw-r--r--fs/btrfs/zlib.c5
-rw-r--r--fs/btrfs/zoned.c877
-rw-r--r--fs/btrfs/zoned.h157
-rw-r--r--fs/btrfs/zstd.c6
-rw-r--r--fs/buffer.c7
-rw-r--r--fs/cachefiles/interface.c4
-rw-r--r--fs/cachefiles/namei.c21
-rw-r--r--fs/cachefiles/xattr.c29
-rw-r--r--fs/ceph/acl.c6
-rw-r--r--fs/ceph/addr.c2
-rw-r--r--fs/ceph/caps.c70
-rw-r--r--fs/ceph/dir.c23
-rw-r--r--fs/ceph/inode.c79
-rw-r--r--fs/ceph/snap.c10
-rw-r--r--fs/ceph/super.h52
-rw-r--r--fs/ceph/xattr.c1
-rw-r--r--fs/cifs/cifs_debug.c125
-rw-r--r--fs/cifs/cifs_dfs_ref.c12
-rw-r--r--fs/cifs/cifs_swn.c2
-rw-r--r--fs/cifs/cifsacl.c379
-rw-r--r--fs/cifs/cifsacl.h4
-rw-r--r--fs/cifs/cifsencrypt.c6
-rw-r--r--fs/cifs/cifsfs.c24
-rw-r--r--fs/cifs/cifsfs.h27
-rw-r--r--fs/cifs/cifsglob.h11
-rw-r--r--fs/cifs/cifsproto.h8
-rw-r--r--fs/cifs/cifssmb.c6
-rw-r--r--fs/cifs/connect.c332
-rw-r--r--fs/cifs/dfs_cache.c41
-rw-r--r--fs/cifs/dir.c30
-rw-r--r--fs/cifs/file.c2
-rw-r--r--fs/cifs/fs_context.c136
-rw-r--r--fs/cifs/fs_context.h7
-rw-r--r--fs/cifs/inode.c49
-rw-r--r--fs/cifs/link.c3
-rw-r--r--fs/cifs/sess.c2
-rw-r--r--fs/cifs/smb2ops.c109
-rw-r--r--fs/cifs/smb2pdu.c22
-rw-r--r--fs/cifs/smb2pdu.h2
-rw-r--r--fs/cifs/trace.h36
-rw-r--r--fs/cifs/transport.c81
-rw-r--r--fs/cifs/xattr.c1
-rw-r--r--fs/coda/coda_linux.h8
-rw-r--r--fs/coda/dir.c18
-rw-r--r--fs/coda/inode.c9
-rw-r--r--fs/coda/pioctl.c6
-rw-r--r--fs/compat_binfmt_elf.c7
-rw-r--r--fs/configfs/configfs_internal.h6
-rw-r--r--fs/configfs/dir.c3
-rw-r--r--fs/configfs/inode.c5
-rw-r--r--fs/configfs/symlink.c6
-rw-r--r--fs/coredump.c14
-rw-r--r--fs/cramfs/inode.c18
-rw-r--r--fs/crypto/policy.c2
-rw-r--r--fs/dax.c5
-rw-r--r--fs/dcache.c92
-rw-r--r--fs/dcookies.c356
-rw-r--r--fs/debugfs/inode.c14
-rw-r--r--fs/direct-io.c10
-rw-r--r--fs/ecryptfs/crypto.c4
-rw-r--r--fs/ecryptfs/inode.c92
-rw-r--r--fs/ecryptfs/main.c6
-rw-r--r--fs/ecryptfs/mmap.c4
-rw-r--r--fs/efivarfs/file.c2
-rw-r--r--fs/efivarfs/inode.c4
-rw-r--r--fs/erofs/data.c4
-rw-r--r--fs/erofs/inode.c7
-rw-r--r--fs/erofs/internal.h5
-rw-r--r--fs/erofs/namei.c4
-rw-r--r--fs/erofs/super.c4
-rw-r--r--fs/erofs/xattr.c10
-rw-r--r--fs/erofs/zmap.c10
-rw-r--r--fs/eventpoll.c4
-rw-r--r--fs/exec.c20
-rw-r--r--fs/exfat/balloc.c4
-rw-r--r--fs/exfat/exfat_fs.h10
-rw-r--r--fs/exfat/exfat_raw.h4
-rw-r--r--fs/exfat/fatent.c43
-rw-r--r--fs/exfat/file.c16
-rw-r--r--fs/exfat/namei.c14
-rw-r--r--fs/exfat/super.c31
-rw-r--r--fs/ext2/acl.c6
-rw-r--r--fs/ext2/acl.h3
-rw-r--r--fs/ext2/ext2.h5
-rw-r--r--fs/ext2/ialloc.c2
-rw-r--r--fs/ext2/inode.c15
-rw-r--r--fs/ext2/ioctl.c6
-rw-r--r--fs/ext2/namei.c22
-rw-r--r--fs/ext2/xattr_security.c1
-rw-r--r--fs/ext2/xattr_trusted.c1
-rw-r--r--fs/ext2/xattr_user.c1
-rw-r--r--fs/ext4/.kunitconfig3
-rw-r--r--fs/ext4/Kconfig3
-rw-r--r--fs/ext4/acl.c5
-rw-r--r--fs/ext4/acl.h3
-rw-r--r--fs/ext4/ext4.h22
-rw-r--r--fs/ext4/extents.c16
-rw-r--r--fs/ext4/fast_commit.c33
-rw-r--r--fs/ext4/file.c5
-rw-r--r--fs/ext4/fsync.c2
-rw-r--r--fs/ext4/ialloc.c9
-rw-r--r--fs/ext4/inode.c41
-rw-r--r--fs/ext4/ioctl.c27
-rw-r--r--fs/ext4/namei.c94
-rw-r--r--fs/ext4/readpage.c3
-rw-r--r--fs/ext4/super.c16
-rw-r--r--fs/ext4/xattr_hurd.c1
-rw-r--r--fs/ext4/xattr_security.c1
-rw-r--r--fs/ext4/xattr_trusted.c1
-rw-r--r--fs/ext4/xattr_user.c1
-rw-r--r--fs/f2fs/Kconfig20
-rw-r--r--fs/f2fs/Makefile1
-rw-r--r--fs/f2fs/acl.c26
-rw-r--r--fs/f2fs/acl.h3
-rw-r--r--fs/f2fs/checkpoint.c177
-rw-r--r--fs/f2fs/compress.c195
-rw-r--r--fs/f2fs/data.c443
-rw-r--r--fs/f2fs/debug.c12
-rw-r--r--fs/f2fs/f2fs.h113
-rw-r--r--fs/f2fs/file.c100
-rw-r--r--fs/f2fs/gc.c8
-rw-r--r--fs/f2fs/inline.c4
-rw-r--r--fs/f2fs/namei.c31
-rw-r--r--fs/f2fs/node.c6
-rw-r--r--fs/f2fs/segment.c19
-rw-r--r--fs/f2fs/segment.h4
-rw-r--r--fs/f2fs/super.c202
-rw-r--r--fs/f2fs/sysfs.c132
-rw-r--r--fs/f2fs/trace.c165
-rw-r--r--fs/f2fs/trace.h43
-rw-r--r--fs/f2fs/xattr.c27
-rw-r--r--fs/fat/fat.h6
-rw-r--r--fs/fat/file.c26
-rw-r--r--fs/fat/misc.c23
-rw-r--r--fs/fat/namei_msdos.c12
-rw-r--r--fs/fat/namei_vfat.c15
-rw-r--r--fs/fcntl.c22
-rw-r--r--fs/fhandle.c2
-rw-r--r--fs/file.c36
-rw-r--r--fs/fs-writeback.c116
-rw-r--r--fs/fuse/acl.c3
-rw-r--r--fs/fuse/dev.c6
-rw-r--r--fs/fuse/dir.c46
-rw-r--r--fs/fuse/fuse_i.h4
-rw-r--r--fs/fuse/xattr.c2
-rw-r--r--fs/gfs2/acl.c5
-rw-r--r--fs/gfs2/acl.h3
-rw-r--r--fs/gfs2/bmap.c10
-rw-r--r--fs/gfs2/file.c23
-rw-r--r--fs/gfs2/glock.c22
-rw-r--r--fs/gfs2/glock.h6
-rw-r--r--fs/gfs2/glops.c38
-rw-r--r--fs/gfs2/incore.h54
-rw-r--r--fs/gfs2/inode.c74
-rw-r--r--fs/gfs2/inode.h3
-rw-r--r--fs/gfs2/lock_dlm.c8
-rw-r--r--fs/gfs2/log.c525
-rw-r--r--fs/gfs2/log.h20
-rw-r--r--fs/gfs2/lops.c26
-rw-r--r--fs/gfs2/lops.h23
-rw-r--r--fs/gfs2/main.c4
-rw-r--r--fs/gfs2/ops_fstype.c71
-rw-r--r--fs/gfs2/recovery.c14
-rw-r--r--fs/gfs2/rgrp.c442
-rw-r--r--fs/gfs2/rgrp.h6
-rw-r--r--fs/gfs2/super.c75
-rw-r--r--fs/gfs2/super.h8
-rw-r--r--fs/gfs2/trace_gfs2.h37
-rw-r--r--fs/gfs2/trans.c102
-rw-r--r--fs/gfs2/trans.h5
-rw-r--r--fs/gfs2/util.c59
-rw-r--r--fs/gfs2/util.h3
-rw-r--r--fs/gfs2/xattr.c55
-rw-r--r--fs/hfs/attr.c1
-rw-r--r--fs/hfs/dir.c13
-rw-r--r--fs/hfs/hfs_fs.h3
-rw-r--r--fs/hfs/inode.c8
-rw-r--r--fs/hfsplus/dir.c22
-rw-r--r--fs/hfsplus/hfsplus_fs.h5
-rw-r--r--fs/hfsplus/inode.c18
-rw-r--r--fs/hfsplus/ioctl.c2
-rw-r--r--fs/hfsplus/super.c2
-rw-r--r--fs/hfsplus/xattr.c1
-rw-r--r--fs/hfsplus/xattr_security.c1
-rw-r--r--fs/hfsplus/xattr_trusted.c1
-rw-r--r--fs/hfsplus/xattr_user.c1
-rw-r--r--fs/hostfs/hostfs_kern.c39
-rw-r--r--fs/hpfs/hpfs_fn.h2
-rw-r--r--fs/hpfs/inode.c7
-rw-r--r--fs/hpfs/namei.c20
-rw-r--r--fs/hugetlbfs/inode.c108
-rw-r--r--fs/init.c24
-rw-r--r--fs/inode.c87
-rw-r--r--fs/internal.h12
-rw-r--r--fs/io-wq.c645
-rw-r--r--fs/io-wq.h48
-rw-r--r--fs/io_uring.c4351
-rw-r--r--fs/iomap/buffered-io.c11
-rw-r--r--fs/iomap/direct-io.c78
-rw-r--r--fs/iomap/seek.c125
-rw-r--r--fs/isofs/dir.c1
-rw-r--r--fs/isofs/inode.c9
-rw-r--r--fs/isofs/namei.c1
-rw-r--r--fs/jbd2/checkpoint.c2
-rw-r--r--fs/jbd2/commit.c4
-rw-r--r--fs/jbd2/recovery.c2
-rw-r--r--fs/jffs2/acl.c6
-rw-r--r--fs/jffs2/acl.h3
-rw-r--r--fs/jffs2/compr_rtime.c3
-rw-r--r--fs/jffs2/dir.c33
-rw-r--r--fs/jffs2/fs.c7
-rw-r--r--fs/jffs2/os-linux.h2
-rw-r--r--fs/jffs2/security.c1
-rw-r--r--fs/jffs2/summary.c3
-rw-r--r--fs/jffs2/xattr_trusted.c1
-rw-r--r--fs/jffs2/xattr_user.c1
-rw-r--r--fs/jfs/acl.c5
-rw-r--r--fs/jfs/file.c9
-rw-r--r--fs/jfs/ioctl.c2
-rw-r--r--fs/jfs/jfs_acl.h3
-rw-r--r--fs/jfs/jfs_dmap.c2
-rw-r--r--fs/jfs/jfs_filsys.h1
-rw-r--r--fs/jfs/jfs_inode.c2
-rw-r--r--fs/jfs/jfs_inode.h2
-rw-r--r--fs/jfs/jfs_mount.c10
-rw-r--r--fs/jfs/jfs_txnmgr.c35
-rw-r--r--fs/jfs/namei.c21
-rw-r--r--fs/jfs/super.c1
-rw-r--r--fs/jfs/xattr.c2
-rw-r--r--fs/kernfs/dir.c6
-rw-r--r--fs/kernfs/inode.c19
-rw-r--r--fs/kernfs/kernfs-internal.h9
-rw-r--r--fs/libfs.c43
-rw-r--r--fs/lockd/svc4proc.c24
-rw-r--r--fs/lockd/svcproc.c24
-rw-r--r--fs/minix/bitmap.c2
-rw-r--r--fs/minix/file.c7
-rw-r--r--fs/minix/inode.c6
-rw-r--r--fs/minix/minix.h3
-rw-r--r--fs/minix/namei.c24
-rw-r--r--fs/mount.h10
-rw-r--r--fs/mpage.c4
-rw-r--r--fs/namei.c600
-rw-r--r--fs/namespace.c537
-rw-r--r--fs/nfs/blocklayout/blocklayout.c11
-rw-r--r--fs/nfs/callback_xdr.c2
-rw-r--r--fs/nfs/dir.c25
-rw-r--r--fs/nfs/export.c18
-rw-r--r--fs/nfs/file.c27
-rw-r--r--fs/nfs/fs_context.c35
-rw-r--r--fs/nfs/fscache.c4
-rw-r--r--fs/nfs/inode.c120
-rw-r--r--fs/nfs/internal.h14
-rw-r--r--fs/nfs/namespace.c15
-rw-r--r--fs/nfs/nfs3_fs.h3
-rw-r--r--fs/nfs/nfs3acl.c4
-rw-r--r--fs/nfs/nfs4client.c1
-rw-r--r--fs/nfs/nfs4file.c4
-rw-r--r--fs/nfs/nfs4proc.c24
-rw-r--r--fs/nfs/nfs4state.c1
-rw-r--r--fs/nfs/pnfs.c71
-rw-r--r--fs/nfs/read.c204
-rw-r--r--fs/nfs/super.c19
-rw-r--r--fs/nfs/write.c37
-rw-r--r--fs/nfs_common/Makefile2
-rw-r--r--fs/nfs_common/nfs_ssc.c2
-rw-r--r--fs/nfs_common/nfsacl.c52
-rw-r--r--fs/nfsd/Kconfig1
-rw-r--r--fs/nfsd/blocklayout.c2
-rw-r--r--fs/nfsd/export.c80
-rw-r--r--fs/nfsd/export.h15
-rw-r--r--fs/nfsd/netns.h23
-rw-r--r--fs/nfsd/nfs2acl.c73
-rw-r--r--fs/nfsd/nfs3acl.c51
-rw-r--r--fs/nfsd/nfs3proc.c93
-rw-r--r--fs/nfsd/nfs3xdr.c582
-rw-r--r--fs/nfsd/nfs4acl.c5
-rw-r--r--fs/nfsd/nfs4proc.c12
-rw-r--r--fs/nfsd/nfs4recover.c6
-rw-r--r--fs/nfsd/nfs4state.c124
-rw-r--r--fs/nfsd/nfscache.c52
-rw-r--r--fs/nfsd/nfsctl.c22
-rw-r--r--fs/nfsd/nfsd.h2
-rw-r--r--fs/nfsd/nfsfh.c7
-rw-r--r--fs/nfsd/nfsfh.h5
-rw-r--r--fs/nfsd/nfsproc.c94
-rw-r--r--fs/nfsd/nfssvc.c34
-rw-r--r--fs/nfsd/nfsxdr.c350
-rw-r--r--fs/nfsd/state.h3
-rw-r--r--fs/nfsd/stats.c114
-rw-r--r--fs/nfsd/stats.h96
-rw-r--r--fs/nfsd/vfs.c54
-rw-r--r--fs/nfsd/xdr.h12
-rw-r--r--fs/nfsd/xdr3.h20
-rw-r--r--fs/nilfs2/file.c1
-rw-r--r--fs/nilfs2/inode.c14
-rw-r--r--fs/nilfs2/ioctl.c2
-rw-r--r--fs/nilfs2/namei.c19
-rw-r--r--fs/nilfs2/nilfs.h6
-rw-r--r--fs/nilfs2/segbuf.c4
-rw-r--r--fs/nilfs2/the_nilfs.h2
-rw-r--r--fs/notify/fanotify/fanotify_user.c4
-rw-r--r--fs/notify/group.c25
-rw-r--r--fs/notify/inotify/inotify_user.c6
-rw-r--r--fs/ntfs/inode.c12
-rw-r--r--fs/ntfs/inode.h3
-rw-r--r--fs/ntfs/layout.h4
-rw-r--r--fs/ocfs2/acl.c6
-rw-r--r--fs/ocfs2/acl.h3
-rw-r--r--fs/ocfs2/cluster/heartbeat.c8
-rw-r--r--fs/ocfs2/dlm/dlmast.c10
-rw-r--r--fs/ocfs2/dlm/dlmcommon.h4
-rw-r--r--fs/ocfs2/dlmfs/dlmfs.c17
-rw-r--r--fs/ocfs2/file.c20
-rw-r--r--fs/ocfs2/file.h11
-rw-r--r--fs/ocfs2/ioctl.c2
-rw-r--r--fs/ocfs2/namei.c21
-rw-r--r--fs/ocfs2/refcounttree.c6
-rw-r--r--fs/ocfs2/super.c2
-rw-r--r--fs/ocfs2/xattr.c3
-rw-r--r--fs/omfs/dir.c13
-rw-r--r--fs/omfs/file.c7
-rw-r--r--fs/omfs/inode.c2
-rw-r--r--fs/open.c41
-rw-r--r--fs/orangefs/acl.c6
-rw-r--r--fs/orangefs/file.c5
-rw-r--r--fs/orangefs/inode.c20
-rw-r--r--fs/orangefs/namei.c12
-rw-r--r--fs/orangefs/orangefs-kernel.h13
-rw-r--r--fs/orangefs/xattr.c1
-rw-r--r--fs/overlayfs/copy_up.c37
-rw-r--r--fs/overlayfs/dir.c33
-rw-r--r--fs/overlayfs/file.c11
-rw-r--r--fs/overlayfs/inode.c29
-rw-r--r--fs/overlayfs/overlayfs.h46
-rw-r--r--fs/overlayfs/ovl_entry.h2
-rw-r--r--fs/overlayfs/readdir.c28
-rw-r--r--fs/overlayfs/super.c59
-rw-r--r--fs/overlayfs/util.c31
-rw-r--r--fs/pipe.c2
-rw-r--r--fs/posix_acl.c103
-rw-r--r--fs/proc/base.c47
-rw-r--r--fs/proc/fd.c5
-rw-r--r--fs/proc/fd.h3
-rw-r--r--fs/proc/generic.c12
-rw-r--r--fs/proc/internal.h6
-rw-r--r--fs/proc/meminfo.c10
-rw-r--r--fs/proc/proc_net.c5
-rw-r--r--fs/proc/proc_sysctl.c19
-rw-r--r--fs/proc/root.c5
-rw-r--r--fs/proc/self.c7
-rw-r--r--fs/proc/task_mmu.c9
-rw-r--r--fs/proc/vmcore.c7
-rw-r--r--fs/proc_namespace.c3
-rw-r--r--fs/pstore/inode.c2
-rw-r--r--fs/pstore/platform.c4
-rw-r--r--fs/pstore/ram_core.c2
-rw-r--r--fs/pstore/zone.c2
-rw-r--r--fs/quota/quota_v2.c11
-rw-r--r--fs/ramfs/file-nommu.c9
-rw-r--r--fs/ramfs/inode.c31
-rw-r--r--fs/read_write.c19
-rw-r--r--fs/reiserfs/acl.h3
-rw-r--r--fs/reiserfs/file.c2
-rw-r--r--fs/reiserfs/inode.c7
-rw-r--r--fs/reiserfs/ioctl.c4
-rw-r--r--fs/reiserfs/namei.c21
-rw-r--r--fs/reiserfs/reiserfs.h3
-rw-r--r--fs/reiserfs/xattr.c13
-rw-r--r--fs/reiserfs/xattr.h3
-rw-r--r--fs/reiserfs/xattr_acl.c8
-rw-r--r--fs/reiserfs/xattr_security.c3
-rw-r--r--fs/reiserfs/xattr_trusted.c3
-rw-r--r--fs/reiserfs/xattr_user.c3
-rw-r--r--fs/remap_range.c7
-rw-r--r--fs/seq_file.c5
-rw-r--r--fs/splice.c53
-rw-r--r--fs/squashfs/block.c8
-rw-r--r--fs/squashfs/export.c41
-rw-r--r--fs/squashfs/id.c40
-rw-r--r--fs/squashfs/squashfs_fs_sb.h1
-rw-r--r--fs/squashfs/super.c6
-rw-r--r--fs/squashfs/xattr.h10
-rw-r--r--fs/squashfs/xattr_id.c66
-rw-r--r--fs/stat.c26
-rw-r--r--fs/statfs.c5
-rw-r--r--fs/super.c15
-rw-r--r--fs/sysfs/file.c11
-rw-r--r--fs/sysv/file.c7
-rw-r--r--fs/sysv/ialloc.c2
-rw-r--r--fs/sysv/itree.c6
-rw-r--r--fs/sysv/namei.c21
-rw-r--r--fs/sysv/sysv.h3
-rw-r--r--fs/tracefs/inode.c4
-rw-r--r--fs/ubifs/auth.c2
-rw-r--r--fs/ubifs/dir.c30
-rw-r--r--fs/ubifs/file.c5
-rw-r--r--fs/ubifs/ioctl.c2
-rw-r--r--fs/ubifs/journal.c2
-rw-r--r--fs/ubifs/replay.c4
-rw-r--r--fs/ubifs/super.c4
-rw-r--r--fs/ubifs/ubifs.h5
-rw-r--r--fs/ubifs/xattr.c3
-rw-r--r--fs/udf/file.c9
-rw-r--r--fs/udf/ialloc.c2
-rw-r--r--fs/udf/inode.c9
-rw-r--r--fs/udf/namei.c24
-rw-r--r--fs/udf/super.c9
-rw-r--r--fs/udf/symlink.c7
-rw-r--r--fs/ufs/ialloc.c2
-rw-r--r--fs/ufs/inode.c7
-rw-r--r--fs/ufs/namei.c19
-rw-r--r--fs/ufs/ufs.h3
-rw-r--r--fs/userfaultfd.c19
-rw-r--r--fs/utimes.c3
-rw-r--r--fs/vboxsf/dir.c12
-rw-r--r--fs/vboxsf/utils.c9
-rw-r--r--fs/vboxsf/vfsmod.h8
-rw-r--r--fs/verity/Makefile1
-rw-r--r--fs/verity/enable.c2
-rw-r--r--fs/verity/fsverity_private.h13
-rw-r--r--fs/verity/open.c133
-rw-r--r--fs/verity/read_metadata.c195
-rw-r--r--fs/verity/signature.c20
-rw-r--r--fs/xattr.c139
-rw-r--r--fs/xfs/libxfs/xfs_alloc.c50
-rw-r--r--fs/xfs/libxfs/xfs_alloc.h3
-rw-r--r--fs/xfs/libxfs/xfs_attr.c22
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c315
-rw-r--r--fs/xfs/libxfs/xfs_btree.c45
-rw-r--r--fs/xfs/libxfs/xfs_dir2.h2
-rw-r--r--fs/xfs/libxfs/xfs_dir2_sf.c2
-rw-r--r--fs/xfs/libxfs/xfs_errortag.h6
-rw-r--r--fs/xfs/libxfs/xfs_fs.h1
-rw-r--r--fs/xfs/libxfs/xfs_inode_fork.c27
-rw-r--r--fs/xfs/libxfs/xfs_inode_fork.h63
-rw-r--r--fs/xfs/libxfs/xfs_sb.c2
-rw-r--r--fs/xfs/scrub/common.c4
-rw-r--r--fs/xfs/xfs_acl.c5
-rw-r--r--fs/xfs/xfs_acl.h3
-rw-r--r--fs/xfs/xfs_aops.c17
-rw-r--r--fs/xfs/xfs_bio_io.c2
-rw-r--r--fs/xfs/xfs_bmap_item.c10
-rw-r--r--fs/xfs/xfs_bmap_util.c81
-rw-r--r--fs/xfs/xfs_buf.c34
-rw-r--r--fs/xfs/xfs_buf.h11
-rw-r--r--fs/xfs/xfs_dquot.c47
-rw-r--r--fs/xfs/xfs_error.c6
-rw-r--r--fs/xfs/xfs_extent_busy.c14
-rw-r--r--fs/xfs/xfs_file.c443
-rw-r--r--fs/xfs/xfs_fsops.c32
-rw-r--r--fs/xfs/xfs_fsops.h4
-rw-r--r--fs/xfs/xfs_globals.c7
-rw-r--r--fs/xfs/xfs_icache.c438
-rw-r--r--fs/xfs/xfs_icache.h24
-rw-r--r--fs/xfs/xfs_inode.c156
-rw-r--r--fs/xfs/xfs_inode.h16
-rw-r--r--fs/xfs/xfs_ioctl.c104
-rw-r--r--fs/xfs/xfs_ioctl32.c13
-rw-r--r--fs/xfs/xfs_iomap.c82
-rw-r--r--fs/xfs/xfs_iops.c129
-rw-r--r--fs/xfs/xfs_iops.h3
-rw-r--r--fs/xfs/xfs_itable.c17
-rw-r--r--fs/xfs/xfs_itable.h1
-rw-r--r--fs/xfs/xfs_iwalk.c5
-rw-r--r--fs/xfs/xfs_linux.h3
-rw-r--r--fs/xfs/xfs_log.c142
-rw-r--r--fs/xfs/xfs_log.h4
-rw-r--r--fs/xfs/xfs_mount.c43
-rw-r--r--fs/xfs/xfs_mount.h10
-rw-r--r--fs/xfs/xfs_mru_cache.c2
-rw-r--r--fs/xfs/xfs_pwork.c25
-rw-r--r--fs/xfs/xfs_pwork.h4
-rw-r--r--fs/xfs/xfs_qm.c119
-rw-r--r--fs/xfs/xfs_quota.h49
-rw-r--r--fs/xfs/xfs_reflink.c103
-rw-r--r--fs/xfs/xfs_rtalloc.c5
-rw-r--r--fs/xfs/xfs_super.c86
-rw-r--r--fs/xfs/xfs_super.h6
-rw-r--r--fs/xfs/xfs_symlink.c20
-rw-r--r--fs/xfs/xfs_symlink.h5
-rw-r--r--fs/xfs/xfs_sysctl.c40
-rw-r--r--fs/xfs/xfs_sysctl.h3
-rw-r--r--fs/xfs/xfs_trace.c1
-rw-r--r--fs/xfs/xfs_trace.h72
-rw-r--r--fs/xfs/xfs_trans.c222
-rw-r--r--fs/xfs/xfs_trans.h43
-rw-r--r--fs/xfs/xfs_trans_dquot.c71
-rw-r--r--fs/xfs/xfs_xattr.c7
-rw-r--r--fs/zonefs/Makefile2
-rw-r--r--fs/zonefs/super.c36
-rw-r--r--fs/zonefs/trace.h104
-rw-r--r--include/acpi/acbuffer.h2
-rw-r--r--include/acpi/acconfig.h2
-rw-r--r--include/acpi/acexcep.h12
-rw-r--r--include/acpi/acnames.h2
-rw-r--r--include/acpi/acoutput.h2
-rw-r--r--include/acpi/acpi.h2
-rw-r--r--include/acpi/acpi_bus.h7
-rw-r--r--include/acpi/acpi_drivers.h7
-rw-r--r--include/acpi/acpi_numa.h4
-rw-r--r--include/acpi/acpiosxf.h2
-rw-r--r--include/acpi/acpixf.h4
-rw-r--r--include/acpi/acrestyp.h2
-rw-r--r--include/acpi/actbl.h2
-rw-r--r--include/acpi/actbl1.h13
-rw-r--r--include/acpi/actbl2.h26
-rw-r--r--include/acpi/actbl3.h25
-rw-r--r--include/acpi/actypes.h8
-rw-r--r--include/acpi/acuuid.h2
-rw-r--r--include/acpi/cppc_acpi.h2
-rw-r--r--include/acpi/platform/acenv.h2
-rw-r--r--include/acpi/platform/acenvex.h2
-rw-r--r--include/acpi/platform/acgcc.h17
-rw-r--r--include/acpi/platform/acgccex.h2
-rw-r--r--include/acpi/platform/acintel.h2
-rw-r--r--include/acpi/platform/aclinux.h2
-rw-r--r--include/acpi/platform/aclinuxex.h2
-rw-r--r--include/asm-generic/Kbuild2
-rw-r--r--include/asm-generic/export.h2
-rw-r--r--include/asm-generic/hyperv-tlfs.h255
-rw-r--r--include/asm-generic/mm-arch-hooks.h16
-rw-r--r--include/asm-generic/mshyperv.h5
-rw-r--r--include/asm-generic/numa.h52
-rw-r--r--include/asm-generic/page.h4
-rw-r--r--include/asm-generic/qrwlock.h25
-rw-r--r--include/asm-generic/softirq_stack.h14
-rw-r--r--include/asm-generic/tlb.h6
-rw-r--r--include/asm-generic/vmlinux.lds.h87
-rw-r--r--include/crypto/algapi.h39
-rw-r--r--include/crypto/blake2b.h67
-rw-r--r--include/crypto/blake2s.h63
-rw-r--r--include/crypto/hash.h8
-rw-r--r--include/crypto/internal/blake2b.h115
-rw-r--r--include/crypto/internal/blake2s.h109
-rw-r--r--include/crypto/internal/cipher.h218
-rw-r--r--include/crypto/internal/skcipher.h2
-rw-r--r--include/crypto/public_key.h1
-rw-r--r--include/drm/drm_agpsupport.h18
-rw-r--r--include/drm/drm_atomic.h20
-rw-r--r--include/drm/drm_atomic_helper.h4
-rw-r--r--include/drm/drm_connector.h49
-rw-r--r--include/drm/drm_crtc.h33
-rw-r--r--include/drm/drm_device.h23
-rw-r--r--include/drm/drm_dp_helper.h239
-rw-r--r--include/drm/drm_dp_mst_helper.h1
-rw-r--r--include/drm/drm_drv.h2
-rw-r--r--include/drm/drm_dsc.h1
-rw-r--r--include/drm/drm_edid.h30
-rw-r--r--include/drm/drm_encoder.h32
-rw-r--r--include/drm/drm_file.h3
-rw-r--r--include/drm/drm_gem.h3
-rw-r--r--include/drm/drm_gem_cma_helper.h14
-rw-r--r--include/drm/drm_hdcp.h8
-rw-r--r--include/drm/drm_irq.h2
-rw-r--r--include/drm/drm_legacy.h10
-rw-r--r--include/drm/drm_managed.h2
-rw-r--r--include/drm/drm_mipi_dbi.h2
-rw-r--r--include/drm/drm_modes.h10
-rw-r--r--include/drm/drm_modeset_helper_vtables.h29
-rw-r--r--include/drm/drm_plane.h42
-rw-r--r--include/drm/drm_prime.h7
-rw-r--r--include/drm/drm_property.h2
-rw-r--r--include/drm/drm_rect.h13
-rw-r--r--include/drm/drm_simple_kms_helper.h24
-rw-r--r--include/drm/gpu_scheduler.h14
-rw-r--r--include/drm/ttm/ttm_bo_api.h13
-rw-r--r--include/drm/ttm/ttm_bo_driver.h6
-rw-r--r--include/drm/ttm/ttm_resource.h5
-rw-r--r--include/drm/ttm/ttm_tt.h2
-rw-r--r--include/dt-bindings/clock/axg-clkc.h1
-rw-r--r--include/dt-bindings/clock/imx8-clock.h2
-rw-r--r--include/dt-bindings/clock/imx8mm-clock.h10
-rw-r--r--include/dt-bindings/clock/imx8mn-clock.h9
-rw-r--r--include/dt-bindings/clock/imx8mq-clock.h16
-rw-r--r--include/dt-bindings/clock/k210-clk.h1
-rw-r--r--include/dt-bindings/clock/meson8b-clkc.h2
-rw-r--r--include/dt-bindings/clock/mstar-msc313-mpll.h19
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msm8998.h2
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sc7280.h226
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sc8180x.h309
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sm8350.h266
-rw-r--r--include/dt-bindings/clock/qcom,gpucc-sdm660.h28
-rw-r--r--include/dt-bindings/clock/qcom,mmcc-sdm660.h162
-rw-r--r--include/dt-bindings/clock/qcom,videocc-sm8250.h2
-rw-r--r--include/dt-bindings/clock/rk3368-cru.h3
-rw-r--r--include/dt-bindings/clock/sun50i-h6-r-ccu.h2
-rw-r--r--include/dt-bindings/clock/sun50i-h616-ccu.h115
-rw-r--r--include/dt-bindings/clock/tegra210-car.h2
-rw-r--r--include/dt-bindings/clock/xlnx-vcu.h15
-rw-r--r--include/dt-bindings/clock/zx296702-clock.h180
-rw-r--r--include/dt-bindings/input/cros-ec-keyboard.h103
-rw-r--r--include/dt-bindings/interconnect/qcom,msm8939.h105
-rw-r--r--include/dt-bindings/interconnect/qcom,sdx55.h76
-rw-r--r--include/dt-bindings/memory/mt2701-larb-port.h4
-rw-r--r--include/dt-bindings/memory/mt2712-larb-port.h6
-rw-r--r--include/dt-bindings/memory/mt6779-larb-port.h6
-rw-r--r--include/dt-bindings/memory/mt8167-larb-port.h6
-rw-r--r--include/dt-bindings/memory/mt8173-larb-port.h6
-rw-r--r--include/dt-bindings/memory/mt8183-larb-port.h6
-rw-r--r--include/dt-bindings/memory/mt8192-larb-port.h243
-rw-r--r--include/dt-bindings/memory/mtk-memory-port.h15
-rw-r--r--include/dt-bindings/pinctrl/k210-fpioa.h276
-rw-r--r--include/dt-bindings/power/mt8167-power.h17
-rw-r--r--include/dt-bindings/power/qcom-rpmpd.h9
-rw-r--r--include/dt-bindings/reset-controller/mt8192-resets.h30
-rw-r--r--include/dt-bindings/reset/k210-rst.h42
-rw-r--r--include/dt-bindings/reset/sun50i-h6-r-ccu.h1
-rw-r--r--include/dt-bindings/reset/sun50i-h616-ccu.h70
-rw-r--r--include/dt-bindings/soc/bcm-pmb.h11
-rw-r--r--include/dt-bindings/sound/apq8016-lpass.h7
-rw-r--r--include/dt-bindings/sound/qcom,lpass.h15
-rw-r--r--include/dt-bindings/sound/sc7180-lpass.h6
-rw-r--r--include/dt-bindings/usb/pd.h311
-rw-r--r--include/keys/encrypted-type.h2
-rw-r--r--include/linux/acpi.h51
-rw-r--r--include/linux/amba/bus.h2
-rw-r--r--include/linux/anon_inodes.h5
-rw-r--r--include/linux/arm-smccc.h31
-rw-r--r--include/linux/atmdev.h2
-rw-r--r--include/linux/binfmts.h4
-rw-r--r--include/linux/bio.h64
-rw-r--r--include/linux/bitops.h2
-rw-r--r--include/linux/blk-mq.h20
-rw-r--r--include/linux/blk_types.h33
-rw-r--r--include/linux/blkdev.h58
-rw-r--r--include/linux/blktrace_api.h4
-rw-r--r--include/linux/bpf-cgroup.h101
-rw-r--r--include/linux/bpf.h92
-rw-r--r--include/linux/bpf_verifier.h8
-rw-r--r--include/linux/brcmphy.h25
-rw-r--r--include/linux/btf.h3
-rw-r--r--include/linux/buildid.h12
-rw-r--r--include/linux/can/bittiming.h44
-rw-r--r--include/linux/can/can-ml.h12
-rw-r--r--include/linux/can/dev.h136
-rw-r--r--include/linux/can/length.h174
-rw-r--r--include/linux/can/rx-offload.h3
-rw-r--r--include/linux/can/skb.h80
-rw-r--r--include/linux/capability.h14
-rw-r--r--include/linux/ceph/libceph.h7
-rw-r--r--include/linux/cfag12864b.h2
-rw-r--r--include/linux/cgroup.h4
-rw-r--r--include/linux/clk-provider.h4
-rw-r--r--include/linux/clk.h28
-rw-r--r--include/linux/clk/imx.h15
-rw-r--r--include/linux/clk/spear.h23
-rw-r--r--include/linux/clk/tegra.h8
-rw-r--r--include/linux/compiler-clang.h10
-rw-r--r--include/linux/compiler-gcc.h11
-rw-r--r--include/linux/compiler.h2
-rw-r--r--include/linux/compiler_attributes.h6
-rw-r--r--include/linux/connector.h2
-rw-r--r--include/linux/coresight-pmu.h20
-rw-r--r--include/linux/coresight.h218
-rw-r--r--include/linux/cpu.h2
-rw-r--r--include/linux/cpufreq.h30
-rw-r--r--include/linux/cpuhotplug.h3
-rw-r--r--include/linux/cred.h2
-rw-r--r--include/linux/crypto.h172
-rw-r--r--include/linux/dcache.h2
-rw-r--r--include/linux/dcookies.h69
-rw-r--r--include/linux/devfreq.h2
-rw-r--r--include/linux/device-mapper.h32
-rw-r--r--include/linux/device.h3
-rw-r--r--include/linux/device/driver.h2
-rw-r--r--include/linux/dfl.h86
-rw-r--r--include/linux/dma-buf.h45
-rw-r--r--include/linux/dma-fence.h3
-rw-r--r--include/linux/dma-heap.h12
-rw-r--r--include/linux/dma-map-ops.h10
-rw-r--r--include/linux/dma-mapping.h33
-rw-r--r--include/linux/dma/k3-psil.h13
-rw-r--r--include/linux/dma/mmp-pdma.h16
-rw-r--r--include/linux/dmaengine.h2
-rw-r--r--include/linux/dmar.h2
-rw-r--r--include/linux/dsa/8021q.h14
-rw-r--r--include/linux/dsa/brcm.h16
-rw-r--r--include/linux/dsa/ocelot.h223
-rw-r--r--include/linux/dtpm.h77
-rw-r--r--include/linux/eeprom_93xx46.h2
-rw-r--r--include/linux/efi.h19
-rw-r--r--include/linux/elevator.h2
-rw-r--r--include/linux/elfcore-compat.h15
-rw-r--r--include/linux/elfcore.h7
-rw-r--r--include/linux/entry-common.h5
-rw-r--r--include/linux/entry-kvm.h14
-rw-r--r--include/linux/ethtool.h5
-rw-r--r--include/linux/eventpoll.h2
-rw-r--r--include/linux/export.h9
-rw-r--r--include/linux/exportfs.h1
-rw-r--r--include/linux/f2fs_fs.h3
-rw-r--r--include/linux/fcntl.h2
-rw-r--r--include/linux/filter.h48
-rw-r--r--include/linux/firmware/intel/stratix10-svc-client.h10
-rw-r--r--include/linux/firmware/xlnx-zynqmp.h339
-rw-r--r--include/linux/fixp-arith.h19
-rw-r--r--include/linux/fortify-string.h302
-rw-r--r--include/linux/fs.h228
-rw-r--r--include/linux/fsl/mc.h8
-rw-r--r--include/linux/fsnotify_backend.h1
-rw-r--r--include/linux/fsverity.h12
-rw-r--r--include/linux/ftrace.h2
-rw-r--r--include/linux/fwnode.h27
-rw-r--r--include/linux/genhd.h27
-rw-r--r--include/linux/gfp.h28
-rw-r--r--include/linux/gpio/machine.h4
-rw-r--r--include/linux/hid-sensor-hub.h9
-rw-r--r--include/linux/hid-sensor-ids.h14
-rw-r--r--include/linux/hid.h15
-rw-r--r--include/linux/highmem-internal.h5
-rw-r--r--include/linux/highmem.h56
-rw-r--r--include/linux/huge_mm.h15
-rw-r--r--include/linux/hugetlb.h96
-rw-r--r--include/linux/hyperv.h13
-rw-r--r--include/linux/i3c/device.h2
-rw-r--r--include/linux/icmpv6.h28
-rw-r--r--include/linux/if_hsr.h27
-rw-r--r--include/linux/iio/adc/qcom-vadc-common.h (renamed from drivers/iio/adc/qcom-vadc-common.h)31
-rw-r--r--include/linux/iio/consumer.h36
-rw-r--r--include/linux/ima.h28
-rw-r--r--include/linux/indirect_call_wrapper.h8
-rw-r--r--include/linux/init.h83
-rw-r--r--include/linux/initrd.h11
-rw-r--r--include/linux/intel-iommu.h43
-rw-r--r--include/linux/intel-pti.h35
-rw-r--r--include/linux/interrupt.h9
-rw-r--r--include/linux/io-pgtable.h19
-rw-r--r--include/linux/io_uring.h34
-rw-r--r--include/linux/iomap.h19
-rw-r--r--include/linux/iommu.h26
-rw-r--r--include/linux/ioport.h6
-rw-r--r--include/linux/iova.h12
-rw-r--r--include/linux/ipv6.h2
-rw-r--r--include/linux/irqdomain.h4
-rw-r--r--include/linux/irqflags.h12
-rw-r--r--include/linux/isa.h2
-rw-r--r--include/linux/jump_label.h12
-rw-r--r--include/linux/kallsyms.h17
-rw-r--r--include/linux/kasan-checks.h6
-rw-r--r--include/linux/kasan.h55
-rw-r--r--include/linux/kbd_kern.h10
-rw-r--r--include/linux/kconfig.h6
-rw-r--r--include/linux/kd.h8
-rw-r--r--include/linux/kernel.h23
-rw-r--r--include/linux/kexec.h7
-rw-r--r--include/linux/key.h5
-rw-r--r--include/linux/keyslot-manager.h14
-rw-r--r--include/linux/kfence.h222
-rw-r--r--include/linux/kgdb.h3
-rw-r--r--include/linux/khugepaged.h2
-rw-r--r--include/linux/kprobes.h2
-rw-r--r--include/linux/ks0108.h2
-rw-r--r--include/linux/kvm_host.h35
-rw-r--r--include/linux/led-class-flash.h42
-rw-r--r--include/linux/led-class-multicolor.h42
-rw-r--r--include/linux/leds.h12
-rw-r--r--include/linux/linkage.h5
-rw-r--r--include/linux/list.h2
-rw-r--r--include/linux/litex.h150
-rw-r--r--include/linux/local_lock_internal.h5
-rw-r--r--include/linux/lockdep.h15
-rw-r--r--include/linux/lockdep_types.h18
-rw-r--r--include/linux/lsm_hook_defs.h17
-rw-r--r--include/linux/lsm_hooks.h10
-rw-r--r--include/linux/mdev.h2
-rw-r--r--include/linux/mdio.h23
-rw-r--r--include/linux/mei_cl_bus.h2
-rw-r--r--include/linux/memblock.h6
-rw-r--r--include/linux/memcontrol.h43
-rw-r--r--include/linux/memory.h3
-rw-r--r--include/linux/memory_hotplug.h33
-rw-r--r--include/linux/memremap.h6
-rw-r--r--include/linux/mfd/abx500/ab8500.h3
-rw-r--r--include/linux/mfd/axp20x.h2
-rw-r--r--include/linux/mfd/bd9571mwv.h45
-rw-r--r--include/linux/mfd/core.h6
-rw-r--r--include/linux/mfd/hi6421-spmi-pmic.h29
-rw-r--r--include/linux/mfd/intel-m10-bmc.h9
-rw-r--r--include/linux/mfd/intel_msic.h453
-rw-r--r--include/linux/mfd/iqs62x.h11
-rw-r--r--include/linux/mfd/rohm-generic.h16
-rw-r--r--include/linux/mhi.h22
-rw-r--r--include/linux/migrate.h2
-rw-r--r--include/linux/mlx5/device.h14
-rw-r--r--include/linux/mlx5/driver.h74
-rw-r--r--include/linux/mlx5/eswitch.h29
-rw-r--r--include/linux/mlx5/mlx5_ifc.h118
-rw-r--r--include/linux/mm-arch-hooks.h22
-rw-r--r--include/linux/mm.h102
-rw-r--r--include/linux/mm_inline.h113
-rw-r--r--include/linux/mm_types.h7
-rw-r--r--include/linux/mmc/card.h1
-rw-r--r--include/linux/mmc/core.h6
-rw-r--r--include/linux/mmc/host.h25
-rw-r--r--include/linux/mmzone.h71
-rw-r--r--include/linux/mod_devicetable.h42
-rw-r--r--include/linux/module.h48
-rw-r--r--include/linux/mount.h8
-rw-r--r--include/linux/mtd/spi-nor.h1
-rw-r--r--include/linux/mutex.h25
-rw-r--r--include/linux/namei.h1
-rw-r--r--include/linux/nd.h2
-rw-r--r--include/linux/net.h3
-rw-r--r--include/linux/netdev_features.h13
-rw-r--r--include/linux/netdevice.h171
-rw-r--r--include/linux/netfilter.h2
-rw-r--r--include/linux/netlink.h6
-rw-r--r--include/linux/nfs_fs.h10
-rw-r--r--include/linux/nfs_fs_sb.h4
-rw-r--r--include/linux/nfsacl.h3
-rw-r--r--include/linux/nvme.h30
-rw-r--r--include/linux/objtool.h13
-rw-r--r--include/linux/of_device.h14
-rw-r--r--include/linux/of_irq.h9
-rw-r--r--include/linux/of_mdio.h10
-rw-r--r--include/linux/oprofile.h209
-rw-r--r--include/linux/page-flags.h10
-rw-r--r--include/linux/page_counter.h9
-rw-r--r--include/linux/pagemap.h11
-rw-r--r--include/linux/pagevec.h4
-rw-r--r--include/linux/parport.h31
-rw-r--r--include/linux/parser.h1
-rw-r--r--include/linux/pci-epc.h39
-rw-r--r--include/linux/pci-epf.h28
-rw-r--r--include/linux/pci.h36
-rw-r--r--include/linux/pci_ids.h4
-rw-r--r--include/linux/perf_event.h4
-rw-r--r--include/linux/pgtable.h19
-rw-r--r--include/linux/phy.h38
-rw-r--r--include/linux/platform_data/clk-u300.h1
-rw-r--r--include/linux/platform_data/cros_ec_commands.h45
-rw-r--r--include/linux/platform_data/dma-atmel.h61
-rw-r--r--include/linux/platform_data/dma-coh901318.h72
-rw-r--r--include/linux/platform_data/dma-imx-sdma.h11
-rw-r--r--include/linux/platform_data/efm32-spi.h15
-rw-r--r--include/linux/platform_data/efm32-uart.h19
-rw-r--r--include/linux/platform_data/i2c-hid.h41
-rw-r--r--include/linux/platform_data/mlxcpld.h31
-rw-r--r--include/linux/platform_data/mmc-omap.h3
-rw-r--r--include/linux/platform_data/x86/mlxcpld.h52
-rw-r--r--include/linux/platform_profile.h41
-rw-r--r--include/linux/pm.h2
-rw-r--r--include/linux/pm_domain.h12
-rw-r--r--include/linux/pm_opp.h112
-rw-r--r--include/linux/pmbus.h9
-rw-r--r--include/linux/posix_acl.h21
-rw-r--r--include/linux/posix_acl_xattr.h12
-rw-r--r--include/linux/power/max8903_charger.h43
-rw-r--r--include/linux/property.h7
-rw-r--r--include/linux/psp-sev.h17
-rw-r--r--include/linux/ptrace.h2
-rw-r--r--include/linux/qed/qed_chain.h2
-rw-r--r--include/linux/rbtree.h206
-rw-r--r--include/linux/rcu_segcblist.h120
-rw-r--r--include/linux/rcupdate.h44
-rw-r--r--include/linux/regulator/ab8500.h166
-rw-r--r--include/linux/regulator/consumer.h30
-rw-r--r--include/linux/regulator/mt6315-regulator.h44
-rw-r--r--include/linux/regulator/pca9450.h7
-rw-r--r--include/linux/remoteproc/qcom_rproc.h4
-rw-r--r--include/linux/reset.h19
-rw-r--r--include/linux/rmap.h3
-rw-r--r--include/linux/rpmsg/qcom_glink.h8
-rw-r--r--include/linux/rtc.h2
-rw-r--r--include/linux/rtc/sirfsoc_rtciobrg.h21
-rw-r--r--include/linux/rwlock.h7
-rw-r--r--include/linux/sched.h66
-rw-r--r--include/linux/sched/prio.h18
-rw-r--r--include/linux/sched/task.h2
-rw-r--r--include/linux/security.h64
-rw-r--r--include/linux/sfi.h210
-rw-r--r--include/linux/sfi_acpi.h93
-rw-r--r--include/linux/sirfsoc_dma.h7
-rw-r--r--include/linux/skbuff.h177
-rw-r--r--include/linux/skmsg.h1
-rw-r--r--include/linux/slab.h2
-rw-r--r--include/linux/slab_def.h3
-rw-r--r--include/linux/slub_def.h3
-rw-r--r--include/linux/soc/brcmstb/brcmstb.h16
-rw-r--r--include/linux/soc/marvell/octeontx2/asm.h8
-rw-r--r--include/linux/soc/mediatek/infracfg.h8
-rw-r--r--include/linux/soc/mediatek/mtk-cmdq.h12
-rw-r--r--include/linux/soc/mediatek/mtk-mutex.h26
-rw-r--r--include/linux/soc/qcom/llcc-qcom.h3
-rw-r--r--include/linux/soc/qcom/mdt_loader.h35
-rw-r--r--include/linux/sony-laptop.h2
-rw-r--r--include/linux/soundwire/sdw.h2
-rw-r--r--include/linux/soundwire/sdw_intel.h2
-rw-r--r--include/linux/spi/ifx_modem.h15
-rw-r--r--include/linux/spi/lms283gf05.h16
-rw-r--r--include/linux/spi/spi-mem.h9
-rw-r--r--include/linux/spi/spi.h44
-rw-r--r--include/linux/srcu.h3
-rw-r--r--include/linux/srcutiny.h7
-rw-r--r--include/linux/ssb/ssb_driver_gige.h14
-rw-r--r--include/linux/stackdepot.h9
-rw-r--r--include/linux/static_call.h77
-rw-r--r--include/linux/static_call_types.h50
-rw-r--r--include/linux/stmmac.h1
-rw-r--r--include/linux/string.h282
-rw-r--r--include/linux/sunrpc/msg_prot.h3
-rw-r--r--include/linux/sunrpc/svc.h1
-rw-r--r--include/linux/sunrpc/svc_rdma.h15
-rw-r--r--include/linux/sunrpc/svcsock.h2
-rw-r--r--include/linux/sunrpc/xdr.h16
-rw-r--r--include/linux/sunxi-rsb.h2
-rw-r--r--include/linux/surface_acpi_notify.h39
-rw-r--r--include/linux/surface_aggregator/controller.h824
-rw-r--r--include/linux/surface_aggregator/device.h423
-rw-r--r--include/linux/surface_aggregator/serial_hub.h672
-rw-r--r--include/linux/swap.h10
-rw-r--r--include/linux/swiotlb.h1
-rw-r--r--include/linux/syscalls.h12
-rw-r--r--include/linux/sysfs.h2
-rw-r--r--include/linux/tcp.h3
-rw-r--r--include/linux/tee_drv.h2
-rw-r--r--include/linux/thermal.h18
-rw-r--r--include/linux/thread_info.h2
-rw-r--r--include/linux/thunderbolt.h3
-rw-r--r--include/linux/timer.h2
-rw-r--r--include/linux/topology.h1
-rw-r--r--include/linux/torture.h27
-rw-r--r--include/linux/tpm.h14
-rw-r--r--include/linux/trace.h3
-rw-r--r--include/linux/trace_events.h86
-rw-r--r--include/linux/tracepoint.h64
-rw-r--r--include/linux/tty.h11
-rw-r--r--include/linux/tty_ldisc.h3
-rw-r--r--include/linux/types.h8
-rw-r--r--include/linux/uio.h8
-rw-r--r--include/linux/units.h4
-rw-r--r--include/linux/usb/cdc_ncm.h2
-rw-r--r--include/linux/usb/ch9.h20
-rw-r--r--include/linux/usb/chipidea.h6
-rw-r--r--include/linux/usb/composite.h6
-rw-r--r--include/linux/usb/gadget.h11
-rw-r--r--include/linux/usb/pd.h3
-rw-r--r--include/linux/usb/pd_vdo.h304
-rw-r--r--include/linux/usb/serial.h2
-rw-r--r--include/linux/usb/tcpm.h9
-rw-r--r--include/linux/usb/tegra_usb_phy.h2
-rw-r--r--include/linux/usb/typec.h23
-rw-r--r--include/linux/usb/typec_altmode.h10
-rw-r--r--include/linux/vdpa.h44
-rw-r--r--include/linux/verification.h2
-rw-r--r--include/linux/vfio.h7
-rw-r--r--include/linux/vgaarb.h6
-rw-r--r--include/linux/virtio_pci_modern.h111
-rw-r--r--include/linux/vmalloc.h15
-rw-r--r--include/linux/vme.h2
-rw-r--r--include/linux/vmstat.h6
-rw-r--r--include/linux/vmw_vmci_defs.h4
-rw-r--r--include/linux/vt_kern.h12
-rw-r--r--include/linux/w1.h2
-rw-r--r--include/linux/wm97xx.h1
-rw-r--r--include/linux/workqueue.h2
-rw-r--r--include/linux/xattr.h30
-rw-r--r--include/linux/z2_battery.h1
-rw-r--r--include/linux/zpool.h3
-rw-r--r--include/linux/zsmalloc.h2
-rw-r--r--include/linux/zstd.h8
-rw-r--r--include/media/davinci/vpif_types.h2
-rw-r--r--include/media/frame_vector.h47
-rw-r--r--include/media/v4l2-async.h146
-rw-r--r--include/media/v4l2-clk.h73
-rw-r--r--include/media/v4l2-common.h4
-rw-r--r--include/media/v4l2-event.h13
-rw-r--r--include/media/v4l2-fwnode.h28
-rw-r--r--include/media/videobuf2-core.h1
-rw-r--r--include/net/act_api.h6
-rw-r--r--include/net/bluetooth/hci.h8
-rw-r--r--include/net/bluetooth/hci_core.h37
-rw-r--r--include/net/bluetooth/l2cap.h1
-rw-r--r--include/net/bluetooth/mgmt.h16
-rw-r--r--include/net/bonding.h4
-rw-r--r--include/net/cfg80211.h150
-rw-r--r--include/net/devlink.h105
-rw-r--r--include/net/dsa.h221
-rw-r--r--include/net/dst.h25
-rw-r--r--include/net/flow_offload.h1
-rw-r--r--include/net/fq.h11
-rw-r--r--include/net/fq_impl.h171
-rw-r--r--include/net/genetlink.h1
-rw-r--r--include/net/gre.h19
-rw-r--r--include/net/gro.h12
-rw-r--r--include/net/icmp.h6
-rw-r--r--include/net/inet_common.h2
-rw-r--r--include/net/inet_connection_sock.h10
-rw-r--r--include/net/ip6_fib.h12
-rw-r--r--include/net/ip6_route.h3
-rw-r--r--include/net/ip_fib.h3
-rw-r--r--include/net/ip_vs.h11
-rw-r--r--include/net/iucv/af_iucv.h3
-rw-r--r--include/net/lapb.h2
-rw-r--r--include/net/mac80211.h26
-rw-r--r--include/net/net_namespace.h4
-rw-r--r--include/net/netfilter/nf_flow_table.h4
-rw-r--r--include/net/netfilter/nf_tables.h19
-rw-r--r--include/net/netfilter/nf_tables_core.h12
-rw-r--r--include/net/netfilter/nft_fib.h2
-rw-r--r--include/net/netfilter/nft_meta.h4
-rw-r--r--include/net/netns/ipv4.h2
-rw-r--r--include/net/netns/ipv6.h1
-rw-r--r--include/net/nexthop.h14
-rw-r--r--include/net/pkt_cls.h36
-rw-r--r--include/net/sch_generic.h20
-rw-r--r--include/net/sock.h22
-rw-r--r--include/net/switchdev.h52
-rw-r--r--include/net/tcp.h25
-rw-r--r--include/net/udp.h6
-rw-r--r--include/net/udp_tunnel.h11
-rw-r--r--include/net/xdp.h25
-rw-r--r--include/rdma/ib_sa.h4
-rw-r--r--include/rdma/ib_umem.h48
-rw-r--r--include/rdma/ib_verbs.h9
-rw-r--r--include/rdma/rdma_counter.h3
-rw-r--r--include/scsi/libiscsi.h6
-rw-r--r--include/scsi/libsas.h9
-rw-r--r--include/scsi/scsi.h2
-rw-r--r--include/scsi/scsi_cmnd.h5
-rw-r--r--include/scsi/scsi_host.h6
-rw-r--r--include/scsi/scsi_transport_fc.h4
-rw-r--r--include/soc/brcmstb/common.h12
-rw-r--r--include/soc/canaan/k210-sysctl.h43
-rw-r--r--include/soc/fsl/qe/qe.h15
-rw-r--r--include/soc/fsl/qe/ucc_fast.h1
-rw-r--r--include/soc/mediatek/smi.h5
-rw-r--r--include/soc/mscc/ocelot.h183
-rw-r--r--include/soc/mscc/ocelot_qsys.h7
-rw-r--r--include/soc/mscc/ocelot_vcap.h297
-rw-r--r--include/soc/qcom/tcs.h9
-rw-r--r--include/soc/tegra/emc.h16
-rw-r--r--include/sound/core.h6
-rw-r--r--include/sound/dmaengine_pcm.h5
-rw-r--r--include/sound/graph_card.h6
-rw-r--r--include/sound/hdaudio.h14
-rw-r--r--include/sound/hdaudio_ext.h2
-rw-r--r--include/sound/hdmi-codec.h5
-rw-r--r--include/sound/intel-nhlt.h5
-rw-r--r--include/sound/jack.h1
-rw-r--r--include/sound/pcm.h2
-rw-r--r--include/sound/rt5645.h2
-rw-r--r--include/sound/soc-acpi.h2
-rw-r--r--include/sound/soc-component.h6
-rw-r--r--include/sound/soc-dai.h4
-rw-r--r--include/sound/soc.h4
-rw-r--r--include/sound/sof/ext_manifest.h6
-rw-r--r--include/target/target_core_backend.h1
-rw-r--r--include/target/target_core_base.h1
-rw-r--r--include/trace/bpf_probe.h12
-rw-r--r--include/trace/events/bcache.h10
-rw-r--r--include/trace/events/block.h20
-rw-r--r--include/trace/events/btrfs.h111
-rw-r--r--include/trace/events/error_report.h74
-rw-r--r--include/trace/events/intel_iommu.h39
-rw-r--r--include/trace/events/kmem.h24
-rw-r--r--include/trace/events/netlink.h29
-rw-r--r--include/trace/events/pagemap.h11
-rw-r--r--include/trace/events/rcu.h26
-rw-r--r--include/trace/events/rpcrdma.h50
-rw-r--r--include/trace/events/sunrpc.h15
-rw-r--r--include/trace/events/tcp.h20
-rw-r--r--include/trace/events/ufs.h108
-rw-r--r--include/trace/events/workqueue.h6
-rw-r--r--include/trace/trace_events.h31
-rw-r--r--include/uapi/asm-generic/unistd.h4
-rw-r--r--include/uapi/drm/drm.h97
-rw-r--r--include/uapi/drm/drm_fourcc.h23
-rw-r--r--include/uapi/drm/drm_mode.h13
-rw-r--r--include/uapi/drm/i915_drm.h3
-rw-r--r--include/uapi/linux/acrn.h580
-rw-r--r--include/uapi/linux/batadv_packet.h2
-rw-r--r--include/uapi/linux/batman_adv.h2
-rw-r--r--include/uapi/linux/binfmts.h4
-rw-r--r--include/uapi/linux/bpf.h123
-rw-r--r--include/uapi/linux/ccs.h18
-rw-r--r--include/uapi/linux/cxl_mem.h172
-rw-r--r--include/uapi/linux/devlink.h25
-rw-r--r--include/uapi/linux/dm-ioctl.h4
-rw-r--r--include/uapi/linux/ethtool_netlink.h1
-rw-r--r--include/uapi/linux/firewire-cdev.h2
-rw-r--r--include/uapi/linux/fsl_mc.h34
-rw-r--r--include/uapi/linux/fsverity.h14
-rw-r--r--include/uapi/linux/gfs2_ondisk.h5
-rw-r--r--include/uapi/linux/gpio.h4
-rw-r--r--include/uapi/linux/i2c-dev.h25
-rw-r--r--include/uapi/linux/i2c.h128
-rw-r--r--include/uapi/linux/if_bonding.h1
-rw-r--r--include/uapi/linux/if_link.h2
-rw-r--r--include/uapi/linux/input.h2
-rw-r--r--include/uapi/linux/io_uring.h11
-rw-r--r--include/uapi/linux/ipv6.h1
-rw-r--r--include/uapi/linux/kvm.h87
-rw-r--r--include/uapi/linux/map_to_7segment.h11
-rw-r--r--include/uapi/linux/media.h1
-rw-r--r--include/uapi/linux/mempolicy.h4
-rw-r--r--include/uapi/linux/misc/bcm_vk.h84
-rw-r--r--include/uapi/linux/mount.h16
-rw-r--r--include/uapi/linux/mptcp.h77
-rw-r--r--include/uapi/linux/mrp_bridge.h86
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h5
-rw-r--r--include/uapi/linux/nfs3.h6
-rw-r--r--include/uapi/linux/nl80211.h13
-rw-r--r--include/uapi/linux/openat2.h4
-rw-r--r--include/uapi/linux/perf_event.h96
-rw-r--r--include/uapi/linux/pkt_cls.h3
-rw-r--r--include/uapi/linux/pkt_sched.h1
-rw-r--r--include/uapi/linux/prctl.h3
-rw-r--r--include/uapi/linux/rkisp1-config.h86
-rw-r--r--include/uapi/linux/rpl.h6
-rw-r--r--include/uapi/linux/rtc.h5
-rw-r--r--include/uapi/linux/rtnetlink.h5
-rw-r--r--include/uapi/linux/serial_core.h3
-rw-r--r--include/uapi/linux/spi/spi.h41
-rw-r--r--include/uapi/linux/spi/spidev.h30
-rw-r--r--include/uapi/linux/surface_aggregator/cdev.h78
-rw-r--r--include/uapi/linux/sysctl.h1
-rw-r--r--include/uapi/linux/tcp.h23
-rw-r--r--include/uapi/linux/tee.h2
-rw-r--r--include/uapi/linux/termios.h15
-rw-r--r--include/uapi/linux/usb/ch9.h13
-rw-r--r--include/uapi/linux/usb/tmc.h3
-rw-r--r--include/uapi/linux/v4l2-controls.h22
-rw-r--r--include/uapi/linux/v4l2-subdev.h2
-rw-r--r--include/uapi/linux/vdpa.h40
-rw-r--r--include/uapi/linux/vfio.h27
-rw-r--r--include/uapi/misc/habanalabs.h74
-rw-r--r--include/uapi/rdma/ib_user_ioctl_cmds.h14
-rw-r--r--include/uapi/rdma/vmw_pvrdma-abi.h7
-rw-r--r--include/video/sstfb.h4
-rw-r--r--include/xen/events.h7
-rw-r--r--include/xen/grant_table.h1
-rw-r--r--include/xen/interface/xen.h4
-rw-r--r--include/xen/xenbus.h9
-rw-r--r--init/Kconfig82
-rw-r--r--init/init_task.c3
-rw-r--r--init/initramfs.c64
-rw-r--r--init/main.c16
-rw-r--r--init/version.c8
-rw-r--r--ipc/mqueue.c9
-rw-r--r--kernel/Kconfig.preempt19
-rw-r--r--kernel/Makefile2
-rw-r--r--kernel/audit.c4
-rw-r--r--kernel/audit_fsnotify.c2
-rw-r--r--kernel/auditsc.c21
-rw-r--r--kernel/bpf/bpf_inode_storage.c6
-rw-r--r--kernel/bpf/bpf_iter.c2
-rw-r--r--kernel/bpf/bpf_lru_list.c7
-rw-r--r--kernel/bpf/bpf_lsm.c12
-rw-r--r--kernel/bpf/btf.c107
-rw-r--r--kernel/bpf/cgroup.c127
-rw-r--r--kernel/bpf/core.c108
-rw-r--r--kernel/bpf/cpumap.c46
-rw-r--r--kernel/bpf/devmap.c4
-rw-r--r--kernel/bpf/disasm.c43
-rw-r--r--kernel/bpf/hashtab.c4
-rw-r--r--kernel/bpf/helpers.c12
-rw-r--r--kernel/bpf/inode.c13
-rw-r--r--kernel/bpf/preload/Makefile5
-rw-r--r--kernel/bpf/preload/iterators/iterators.c2
-rw-r--r--kernel/bpf/stackmap.c145
-rw-r--r--kernel/bpf/syscall.c16
-rw-r--r--kernel/bpf/task_iter.c267
-rw-r--r--kernel/bpf/trampoline.c77
-rw-r--r--kernel/bpf/verifier.c1124
-rw-r--r--kernel/capability.c14
-rw-r--r--kernel/cgroup/cgroup-v1.c3
-rw-r--r--kernel/cgroup/cgroup.c61
-rw-r--r--kernel/cgroup/cpuset.c6
-rw-r--r--kernel/cpu.c7
-rw-r--r--kernel/debug/debug_core.c39
-rw-r--r--kernel/debug/gdbstub.c4
-rw-r--r--kernel/debug/kdb/kdb_private.h12
-rw-r--r--kernel/debug/kdb/kdb_support.c53
-rw-r--r--kernel/dma/Kconfig3
-rw-r--r--kernel/dma/map_benchmark.c18
-rw-r--r--kernel/dma/mapping.c42
-rw-r--r--kernel/dma/swiotlb.c310
-rw-r--r--kernel/entry/common.c31
-rw-r--r--kernel/entry/syscall_user_dispatch.c4
-rw-r--r--kernel/events/core.c246
-rw-r--r--kernel/events/uprobes.c82
-rw-r--r--kernel/fork.c30
-rw-r--r--kernel/futex.c232
-rw-r--r--kernel/gcov/Kconfig2
-rw-r--r--kernel/groups.c7
-rw-r--r--kernel/irq/irqdomain.c11
-rw-r--r--kernel/irq/resend.c4
-rw-r--r--kernel/kallsyms.c8
-rw-r--r--kernel/kcsan/core.c26
-rw-r--r--kernel/kexec_core.c4
-rw-r--r--kernel/kexec_file.c5
-rw-r--r--kernel/kexec_internal.h2
-rw-r--r--kernel/kprobes.c78
-rw-r--r--kernel/livepatch/core.c7
-rw-r--r--kernel/locking/Makefile1
-rw-r--r--kernel/locking/irqflag-debug.c13
-rw-r--r--kernel/locking/lockdep.c186
-rw-r--r--kernel/locking/locktorture.c1
-rw-r--r--kernel/locking/mutex.c10
-rw-r--r--kernel/locking/qrwlock.c1
-rw-r--r--kernel/locking/rtmutex.c78
-rw-r--r--kernel/locking/rtmutex_common.h3
-rw-r--r--kernel/locking/rwsem.c2
-rw-r--r--kernel/locking/rwsem.h0
-rw-r--r--kernel/locking/semaphore.c2
-rw-r--r--kernel/module.c481
-rw-r--r--kernel/module_signature.c2
-rw-r--r--kernel/module_signing.c2
-rw-r--r--kernel/power/Kconfig12
-rw-r--r--kernel/power/main.c2
-rw-r--r--kernel/power/process.c2
-rw-r--r--kernel/power/swap.c2
-rw-r--r--kernel/printk/printk.c28
-rw-r--r--kernel/printk/printk_ringbuffer.h2
-rw-r--r--kernel/printk/printk_safe.c16
-rw-r--r--kernel/ptrace.c2
-rw-r--r--kernel/rcu/Kconfig5
-rw-r--r--kernel/rcu/rcu.h16
-rw-r--r--kernel/rcu/rcu_segcblist.c216
-rw-r--r--kernel/rcu/rcu_segcblist.h57
-rw-r--r--kernel/rcu/rcutorture.c395
-rw-r--r--kernel/rcu/refscale.c23
-rw-r--r--kernel/rcu/srcutiny.c77
-rw-r--r--kernel/rcu/srcutree.c147
-rw-r--r--kernel/rcu/tasks.h79
-rw-r--r--kernel/rcu/tree.c154
-rw-r--r--kernel/rcu/tree.h4
-rw-r--r--kernel/rcu/tree_exp.h2
-rw-r--r--kernel/rcu/tree_plugin.h398
-rw-r--r--kernel/rcu/tree_stall.h60
-rw-r--r--kernel/rcu/update.c4
-rw-r--r--kernel/resource.c98
-rw-r--r--kernel/scftorture.c6
-rw-r--r--kernel/sched/core.c406
-rw-r--r--kernel/sched/cpufreq_schedutil.c122
-rw-r--r--kernel/sched/deadline.c94
-rw-r--r--kernel/sched/debug.c2
-rw-r--r--kernel/sched/fair.c324
-rw-r--r--kernel/sched/features.h2
-rw-r--r--kernel/sched/idle.c1
-rw-r--r--kernel/sched/membarrier.c2
-rw-r--r--kernel/sched/rt.c2
-rw-r--r--kernel/sched/sched.h51
-rw-r--r--kernel/sched/topology.c99
-rw-r--r--kernel/seccomp.c4
-rw-r--r--kernel/signal.c4
-rw-r--r--kernel/smp.c4
-rw-r--r--kernel/softirq.c2
-rw-r--r--kernel/static_call.c60
-rw-r--r--kernel/sys.c5
-rw-r--r--kernel/sysctl.c8
-rw-r--r--kernel/time/alarmtimer.c8
-rw-r--r--kernel/time/namespace.c6
-rw-r--r--kernel/time/ntp.c4
-rw-r--r--kernel/time/timer.c14
-rw-r--r--kernel/torture.c167
-rw-r--r--kernel/trace/Kconfig37
-rw-r--r--kernel/trace/Makefile1
-rw-r--r--kernel/trace/blktrace.c53
-rw-r--r--kernel/trace/bpf_trace.c9
-rw-r--r--kernel/trace/error_report-traces.c11
-rw-r--r--kernel/trace/fgraph.c2
-rw-r--r--kernel/trace/preemptirq_delay_test.c14
-rw-r--r--kernel/trace/ring_buffer.c62
-rw-r--r--kernel/trace/trace.c291
-rw-r--r--kernel/trace/trace.h64
-rw-r--r--kernel/trace/trace_branch.c6
-rw-r--r--kernel/trace/trace_dynevent.c35
-rw-r--r--kernel/trace/trace_dynevent.h4
-rw-r--r--kernel/trace/trace_event_perf.c5
-rw-r--r--kernel/trace/trace_events.c43
-rw-r--r--kernel/trace/trace_events_inject.c6
-rw-r--r--kernel/trace/trace_events_synth.c322
-rw-r--r--kernel/trace/trace_functions.c31
-rw-r--r--kernel/trace/trace_functions_graph.c32
-rw-r--r--kernel/trace/trace_hwlat.c7
-rw-r--r--kernel/trace/trace_irqsoff.c90
-rw-r--r--kernel/trace/trace_kprobe.c57
-rw-r--r--kernel/trace/trace_mmiotrace.c16
-rw-r--r--kernel/trace/trace_output.c12
-rw-r--r--kernel/trace/trace_probe.c17
-rw-r--r--kernel/trace/trace_probe.h1
-rw-r--r--kernel/trace/trace_sched_wakeup.c71
-rw-r--r--kernel/trace/trace_syscalls.c20
-rw-r--r--kernel/trace/trace_uprobe.c23
-rw-r--r--kernel/tracepoint.c91
-rw-r--r--kernel/watch_queue.c2
-rw-r--r--kernel/workqueue.c4
-rw-r--r--lib/Kconfig9
-rw-r--r--lib/Kconfig.debug58
-rw-r--r--lib/Kconfig.kasan6
-rw-r--r--lib/Kconfig.kfence82
-rw-r--r--lib/Kconfig.ubsan17
-rw-r--r--lib/Makefile8
-rw-r--r--lib/bug.c3
-rw-r--r--lib/buildid.c149
-rw-r--r--lib/cmdline.c28
-rw-r--r--lib/cmdline_kunit.c56
-rw-r--r--lib/cpumask.c16
-rw-r--r--lib/crc7.c2
-rw-r--r--lib/crypto/blake2s.c48
-rw-r--r--lib/crypto/chacha20poly1305.c5
-rw-r--r--lib/genalloc.c3
-rw-r--r--lib/iov_iter.c59
-rw-r--r--lib/kunit/Kconfig1
-rw-r--r--lib/kunit/assert.c39
-rw-r--r--lib/kunit/executor.c93
-rw-r--r--lib/linear_ranges.c8
-rw-r--r--lib/locking-selftest.c334
-rw-r--r--lib/logic_pio.c3
-rw-r--r--lib/parman.c1
-rw-r--r--lib/parser.c44
-rw-r--r--lib/percpu-refcount.c12
-rw-r--r--lib/stackdepot.c37
-rw-r--r--lib/test_bitmap.c3
-rw-r--r--lib/test_bpf.c21
-rw-r--r--lib/test_fpu.c6
-rw-r--r--lib/test_kasan.c535
-rw-r--r--lib/test_kasan_module.c5
-rw-r--r--lib/test_printf.c16
-rw-r--r--lib/test_ubsan.c49
-rw-r--r--lib/timerqueue.c28
-rw-r--r--lib/ubsan.c99
-rw-r--r--lib/ubsan.h6
-rw-r--r--lib/vsprintf.c36
-rw-r--r--mm/Kconfig3
-rw-r--r--mm/Makefile2
-rw-r--r--mm/backing-dev.c9
-rw-r--r--mm/cma.c64
-rw-r--r--mm/compaction.c76
-rw-r--r--mm/debug.c10
-rw-r--r--mm/debug_vm_pgtable.c86
-rw-r--r--mm/dmapool.c3
-rw-r--r--mm/early_ioremap.c12
-rw-r--r--mm/filemap.c1109
-rw-r--r--mm/gup.c5
-rw-r--r--mm/huge_memory.c71
-rw-r--r--mm/hugetlb.c377
-rw-r--r--mm/hugetlb_cgroup.c6
-rw-r--r--mm/internal.h4
-rw-r--r--mm/kasan/common.c235
-rw-r--r--mm/kasan/generic.c41
-rw-r--r--mm/kasan/hw_tags.c26
-rw-r--r--mm/kasan/kasan.h152
-rw-r--r--mm/kasan/quarantine.c22
-rw-r--r--mm/kasan/report.c23
-rw-r--r--mm/kasan/report_generic.c8
-rw-r--r--mm/kasan/report_hw_tags.c8
-rw-r--r--mm/kasan/report_sw_tags.c8
-rw-r--r--mm/kasan/shadow.c83
-rw-r--r--mm/kasan/sw_tags.c20
-rw-r--r--mm/kfence/Makefile6
-rw-r--r--mm/kfence/core.c841
-rw-r--r--mm/kfence/kfence.h106
-rw-r--r--mm/kfence/kfence_test.c858
-rw-r--r--mm/kfence/report.c262
-rw-r--r--mm/khugepaged.c65
-rw-r--r--mm/list_lru.c12
-rw-r--r--mm/madvise.c17
-rw-r--r--mm/memblock.c55
-rw-r--r--mm/memcontrol.c286
-rw-r--r--mm/memory-failure.c40
-rw-r--r--mm/memory.c342
-rw-r--r--mm/memory_hotplug.c171
-rw-r--r--mm/mempolicy.c18
-rw-r--r--mm/mempool.c2
-rw-r--r--mm/memremap.c23
-rw-r--r--mm/migrate.c16
-rw-r--r--mm/mincore.c5
-rw-r--r--mm/mlock.c5
-rw-r--r--mm/mmap.c12
-rw-r--r--mm/mmu_gather.c31
-rw-r--r--mm/mprotect.c7
-rw-r--r--mm/mremap.c16
-rw-r--r--mm/nommu.c3
-rw-r--r--mm/oom_kill.c11
-rw-r--r--mm/page_alloc.c157
-rw-r--r--mm/page_io.c64
-rw-r--r--mm/page_owner.c4
-rw-r--r--mm/page_reporting.c2
-rw-r--r--mm/percpu.c36
-rw-r--r--mm/pgtable-generic.c5
-rw-r--r--mm/rmap.c57
-rw-r--r--mm/shmem.c212
-rw-r--r--mm/slab.c79
-rw-r--r--mm/slab.h32
-rw-r--r--mm/slab_common.c138
-rw-r--r--mm/slob.c8
-rw-r--r--mm/slub.c294
-rw-r--r--mm/swap.c88
-rw-r--r--mm/swap_slots.c3
-rw-r--r--mm/swap_state.c38
-rw-r--r--mm/swapfile.c68
-rw-r--r--mm/truncate.c131
-rw-r--r--mm/util.c31
-rw-r--r--mm/vmalloc.c13
-rw-r--r--mm/vmscan.c98
-rw-r--r--mm/vmstat.c49
-rw-r--r--mm/workingset.c7
-rw-r--r--mm/z3fold.c12
-rw-r--r--mm/zbud.c1
-rw-r--r--mm/zpool.c13
-rw-r--r--mm/zsmalloc.c22
-rw-r--r--mm/zswap.c57
-rw-r--r--net/8021q/vlan_dev.c9
-rw-r--r--net/9p/Kconfig1
-rw-r--r--net/Kconfig4
-rw-r--r--net/Makefile23
-rw-r--r--net/appletalk/ddp.c33
-rw-r--r--net/atm/pppoatm.c15
-rw-r--r--net/batman-adv/Kconfig3
-rw-r--r--net/batman-adv/Makefile2
-rw-r--r--net/batman-adv/bat_algo.c2
-rw-r--r--net/batman-adv/bat_algo.h2
-rw-r--r--net/batman-adv/bat_iv_ogm.c2
-rw-r--r--net/batman-adv/bat_iv_ogm.h2
-rw-r--r--net/batman-adv/bat_v.c2
-rw-r--r--net/batman-adv/bat_v.h2
-rw-r--r--net/batman-adv/bat_v_elp.c2
-rw-r--r--net/batman-adv/bat_v_elp.h2
-rw-r--r--net/batman-adv/bat_v_ogm.c2
-rw-r--r--net/batman-adv/bat_v_ogm.h2
-rw-r--r--net/batman-adv/bitarray.c2
-rw-r--r--net/batman-adv/bitarray.h2
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c2
-rw-r--r--net/batman-adv/bridge_loop_avoidance.h2
-rw-r--r--net/batman-adv/distributed-arp-table.c6
-rw-r--r--net/batman-adv/distributed-arp-table.h2
-rw-r--r--net/batman-adv/fragmentation.c2
-rw-r--r--net/batman-adv/fragmentation.h2
-rw-r--r--net/batman-adv/gateway_client.c2
-rw-r--r--net/batman-adv/gateway_client.h2
-rw-r--r--net/batman-adv/gateway_common.c2
-rw-r--r--net/batman-adv/gateway_common.h2
-rw-r--r--net/batman-adv/hard-interface.c2
-rw-r--r--net/batman-adv/hard-interface.h2
-rw-r--r--net/batman-adv/hash.c2
-rw-r--r--net/batman-adv/hash.h2
-rw-r--r--net/batman-adv/log.c2
-rw-r--r--net/batman-adv/log.h2
-rw-r--r--net/batman-adv/main.c2
-rw-r--r--net/batman-adv/main.h4
-rw-r--r--net/batman-adv/multicast.c4
-rw-r--r--net/batman-adv/multicast.h2
-rw-r--r--net/batman-adv/netlink.c6
-rw-r--r--net/batman-adv/netlink.h2
-rw-r--r--net/batman-adv/network-coding.c2
-rw-r--r--net/batman-adv/network-coding.h2
-rw-r--r--net/batman-adv/originator.c2
-rw-r--r--net/batman-adv/originator.h2
-rw-r--r--net/batman-adv/routing.c2
-rw-r--r--net/batman-adv/routing.h2
-rw-r--r--net/batman-adv/send.c2
-rw-r--r--net/batman-adv/send.h2
-rw-r--r--net/batman-adv/soft-interface.c2
-rw-r--r--net/batman-adv/soft-interface.h2
-rw-r--r--net/batman-adv/tp_meter.c4
-rw-r--r--net/batman-adv/tp_meter.h2
-rw-r--r--net/batman-adv/trace.c2
-rw-r--r--net/batman-adv/trace.h2
-rw-r--r--net/batman-adv/translation-table.c2
-rw-r--r--net/batman-adv/translation-table.h2
-rw-r--r--net/batman-adv/tvlv.c2
-rw-r--r--net/batman-adv/tvlv.h2
-rw-r--r--net/batman-adv/types.h5
-rw-r--r--net/bluetooth/Kconfig2
-rw-r--r--net/bluetooth/a2mp.c3
-rw-r--r--net/bluetooth/af_bluetooth.c22
-rw-r--r--net/bluetooth/amp.c3
-rw-r--r--net/bluetooth/hci_conn.c37
-rw-r--r--net/bluetooth/hci_core.c201
-rw-r--r--net/bluetooth/hci_debugfs.c80
-rw-r--r--net/bluetooth/hci_request.c74
-rw-r--r--net/bluetooth/l2cap_core.c119
-rw-r--r--net/bluetooth/mgmt.c399
-rw-r--r--net/bluetooth/msft.c460
-rw-r--r--net/bluetooth/msft.h30
-rw-r--r--net/bluetooth/smp.c5
-rw-r--r--net/bpf/test_run.c11
-rw-r--r--net/bpfilter/Kconfig2
-rw-r--r--net/bridge/Makefile2
-rw-r--r--net/bridge/br.c2
-rw-r--r--net/bridge/br_fdb.c1
-rw-r--r--net/bridge/br_forward.c3
-rw-r--r--net/bridge/br_input.c2
-rw-r--r--net/bridge/br_mrp.c58
-rw-r--r--net/bridge/br_mrp_switchdev.c178
-rw-r--r--net/bridge/br_multicast.c265
-rw-r--r--net/bridge/br_multicast_eht.c878
-rw-r--r--net/bridge/br_netlink.c151
-rw-r--r--net/bridge/br_private.h32
-rw-r--r--net/bridge/br_private_mcast_eht.h93
-rw-r--r--net/bridge/br_private_mrp.h70
-rw-r--r--net/bridge/br_stp.c8
-rw-r--r--net/bridge/br_switchdev.c33
-rw-r--r--net/bridge/br_sysfs_br.c170
-rw-r--r--net/bridge/br_sysfs_if.c16
-rw-r--r--net/bridge/br_vlan.c31
-rw-r--r--net/bridge/netfilter/nft_meta_bridge.c5
-rw-r--r--net/caif/chnl_net.c5
-rw-r--r--net/can/Kconfig1
-rw-r--r--net/can/af_can.c34
-rw-r--r--net/can/gw.c2
-rw-r--r--net/can/j1939/main.c22
-rw-r--r--net/can/j1939/socket.c13
-rw-r--r--net/can/proc.c19
-rw-r--r--net/can/raw.c16
-rw-r--r--net/ceph/ceph_common.c17
-rw-r--r--net/core/datagram.c12
-rw-r--r--net/core/dev.c603
-rw-r--r--net/core/dev_ioctl.c20
-rw-r--r--net/core/devlink.c315
-rw-r--r--net/core/filter.c230
-rw-r--r--net/core/flow_dissector.c35
-rw-r--r--net/core/neighbour.c8
-rw-r--r--net/core/net-sysfs.c53
-rw-r--r--net/core/net_namespace.c19
-rw-r--r--net/core/netpoll.c22
-rw-r--r--net/core/page_pool.c14
-rw-r--r--net/core/pktgen.c2
-rw-r--r--net/core/rtnetlink.c4
-rw-r--r--net/core/skbuff.c524
-rw-r--r--net/core/skmsg.c3
-rw-r--r--net/core/sock.c212
-rw-r--r--net/core/sock_map.c2
-rw-r--r--net/core/sysctl_net_core.c2
-rw-r--r--net/core/xdp.c70
-rw-r--r--net/dcb/Makefile2
-rw-r--r--net/dccp/feat.c2
-rw-r--r--net/decnet/dn_route.c2
-rw-r--r--net/dns_resolver/Kconfig2
-rw-r--r--net/dsa/Kconfig28
-rw-r--r--net/dsa/Makefile2
-rw-r--r--net/dsa/dsa.c60
-rw-r--r--net/dsa/dsa2.c391
-rw-r--r--net/dsa/dsa_priv.h162
-rw-r--r--net/dsa/master.c39
-rw-r--r--net/dsa/port.c376
-rw-r--r--net/dsa/slave.c497
-rw-r--r--net/dsa/switch.c322
-rw-r--r--net/dsa/tag_8021q.c15
-rw-r--r--net/dsa/tag_brcm.c1
-rw-r--r--net/dsa/tag_dsa.c17
-rw-r--r--net/dsa/tag_ocelot.c252
-rw-r--r--net/dsa/tag_ocelot_8021q.c102
-rw-r--r--net/dsa/tag_rtl4_a.c43
-rw-r--r--net/dsa/tag_xrs700x.c66
-rw-r--r--net/ethtool/common.c152
-rw-r--r--net/ethtool/common.h7
-rw-r--r--net/ethtool/ioctl.c18
-rw-r--r--net/ethtool/linkmodes.c208
-rw-r--r--net/ethtool/netlink.h2
-rw-r--r--net/hsr/hsr_device.c53
-rw-r--r--net/hsr/hsr_device.h1
-rw-r--r--net/hsr/hsr_forward.c35
-rw-r--r--net/hsr/hsr_forward.h1
-rw-r--r--net/hsr/hsr_framereg.c11
-rw-r--r--net/hsr/hsr_framereg.h1
-rw-r--r--net/hsr/hsr_main.c11
-rw-r--r--net/hsr/hsr_main.h14
-rw-r--r--net/hsr/hsr_slave.c10
-rw-r--r--net/ife/Kconfig1
-rw-r--r--net/ipv4/af_inet.c24
-rw-r--r--net/ipv4/esp4_offload.c2
-rw-r--r--net/ipv4/fib_lookup.h6
-rw-r--r--net/ipv4/fib_semantics.c7
-rw-r--r--net/ipv4/fib_trie.c38
-rw-r--r--net/ipv4/gre_offload.c22
-rw-r--r--net/ipv4/icmp.c5
-rw-r--r--net/ipv4/inet_hashtables.c25
-rw-r--r--net/ipv4/ip_input.c1
-rw-r--r--net/ipv4/ip_output.c6
-rw-r--r--net/ipv4/ip_tunnel.c16
-rw-r--r--net/ipv4/ip_tunnel_core.c9
-rw-r--r--net/ipv4/ipconfig.c22
-rw-r--r--net/ipv4/netfilter/nft_dup_ipv4.c18
-rw-r--r--net/ipv4/nexthop.c347
-rw-r--r--net/ipv4/proc.c50
-rw-r--r--net/ipv4/route.c14
-rw-r--r--net/ipv4/sysctl_net_ipv4.c9
-rw-r--r--net/ipv4/tcp.c199
-rw-r--r--net/ipv4/tcp_cubic.c11
-rw-r--r--net/ipv4/tcp_input.c41
-rw-r--r--net/ipv4/tcp_ipv4.c6
-rw-r--r--net/ipv4/tcp_output.c4
-rw-r--r--net/ipv4/tcp_recovery.c5
-rw-r--r--net/ipv4/tcp_timer.c18
-rw-r--r--net/ipv4/udp.c13
-rw-r--r--net/ipv4/udp_offload.c76
-rw-r--r--net/ipv4/udp_tunnel_core.c24
-rw-r--r--net/ipv6/addrconf.c11
-rw-r--r--net/ipv6/af_inet6.c20
-rw-r--r--net/ipv6/esp6.c2
-rw-r--r--net/ipv6/icmp.c18
-rw-r--r--net/ipv6/ip6_fib.c5
-rw-r--r--net/ipv6/ip6_icmp.c12
-rw-r--r--net/ipv6/ip6_input.c3
-rw-r--r--net/ipv6/ip6_offload.c1
-rw-r--r--net/ipv6/ip6_output.c6
-rw-r--r--net/ipv6/ndisc.c12
-rw-r--r--net/ipv6/netfilter/nft_dup_ipv6.c18
-rw-r--r--net/ipv6/route.c72
-rw-r--r--net/ipv6/seg6_local.c67
-rw-r--r--net/ipv6/sysctl_net_ipv6.c9
-rw-r--r--net/ipv6/tcp_ipv6.c6
-rw-r--r--net/ipv6/udp.c11
-rw-r--r--net/ipv6/udp_offload.c2
-rw-r--r--net/iucv/af_iucv.c122
-rw-r--r--net/kcm/kcmsock.c8
-rw-r--r--net/key/af_key.c6
-rw-r--r--net/l3mdev/Makefile2
-rw-r--r--net/lapb/lapb_iface.c70
-rw-r--r--net/lapb/lapb_out.c3
-rw-r--r--net/lapb/lapb_timer.c30
-rw-r--r--net/llc/Kconfig1
-rw-r--r--net/mac80211/Kconfig2
-rw-r--r--net/mac80211/Makefile2
-rw-r--r--net/mac80211/debugfs.c52
-rw-r--r--net/mac80211/debugfs_sta.c1
-rw-r--r--net/mac80211/driver-ops.c5
-rw-r--r--net/mac80211/driver-ops.h16
-rw-r--r--net/mac80211/he.c92
-rw-r--r--net/mac80211/ieee80211_i.h4
-rw-r--r--net/mac80211/iface.c46
-rw-r--r--net/mac80211/key.c4
-rw-r--r--net/mac80211/main.c5
-rw-r--r--net/mac80211/mesh_hwmp.c2
-rw-r--r--net/mac80211/mlme.c3
-rw-r--r--net/mac80211/pm.c6
-rw-r--r--net/mac80211/rate.c3
-rw-r--r--net/mac80211/rc80211_minstrel.c574
-rw-r--r--net/mac80211/rc80211_minstrel.h184
-rw-r--r--net/mac80211/rc80211_minstrel_debugfs.c172
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c1192
-rw-r--r--net/mac80211/rc80211_minstrel_ht.h137
-rw-r--r--net/mac80211/rc80211_minstrel_ht_debugfs.c79
-rw-r--r--net/mac80211/rx.c243
-rw-r--r--net/mac80211/spectmgmt.c10
-rw-r--r--net/mac80211/sta_info.h2
-rw-r--r--net/mac80211/status.c8
-rw-r--r--net/mac80211/tdls.c6
-rw-r--r--net/mac80211/trace.h18
-rw-r--r--net/mac80211/tx.c67
-rw-r--r--net/mac80211/util.c14
-rw-r--r--net/mac80211/vht.c9
-rw-r--r--net/mptcp/mib.c8
-rw-r--r--net/mptcp/mib.h8
-rw-r--r--net/mptcp/mptcp_diag.c8
-rw-r--r--net/mptcp/options.c94
-rw-r--r--net/mptcp/pm.c46
-rw-r--r--net/mptcp/pm_netlink.c826
-rw-r--r--net/mptcp/protocol.c376
-rw-r--r--net/mptcp/protocol.h155
-rw-r--r--net/mptcp/subflow.c268
-rw-r--r--net/netfilter/Kconfig2
-rw-r--r--net/netfilter/ipvs/Kconfig13
-rw-r--r--net/netfilter/ipvs/Makefile1
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c12
-rw-r--r--net/netfilter/ipvs/ip_vs_twos.c139
-rw-r--r--net/netfilter/nf_conntrack_core.c3
-rw-r--r--net/netfilter/nf_conntrack_netlink.c7
-rw-r--r--net/netfilter/nf_flow_table_core.c10
-rw-r--r--net/netfilter/nf_tables_api.c346
-rw-r--r--net/netfilter/nfnetlink_log.c8
-rw-r--r--net/netfilter/nfnetlink_queue.c10
-rw-r--r--net/netfilter/nft_bitwise.c23
-rw-r--r--net/netfilter/nft_byteorder.c14
-rw-r--r--net/netfilter/nft_cmp.c12
-rw-r--r--net/netfilter/nft_ct.c12
-rw-r--r--net/netfilter/nft_dup_netdev.c6
-rw-r--r--net/netfilter/nft_dynset.c53
-rw-r--r--net/netfilter/nft_exthdr.c14
-rw-r--r--net/netfilter/nft_fib.c5
-rw-r--r--net/netfilter/nft_fwd_netdev.c18
-rw-r--r--net/netfilter/nft_hash.c25
-rw-r--r--net/netfilter/nft_immediate.c6
-rw-r--r--net/netfilter/nft_lookup.c14
-rw-r--r--net/netfilter/nft_masq.c18
-rw-r--r--net/netfilter/nft_meta.c8
-rw-r--r--net/netfilter/nft_nat.c35
-rw-r--r--net/netfilter/nft_numgen.c15
-rw-r--r--net/netfilter/nft_objref.c6
-rw-r--r--net/netfilter/nft_osf.c8
-rw-r--r--net/netfilter/nft_payload.c10
-rw-r--r--net/netfilter/nft_queue.c12
-rw-r--r--net/netfilter/nft_range.c6
-rw-r--r--net/netfilter/nft_redir.c18
-rw-r--r--net/netfilter/nft_rt.c7
-rw-r--r--net/netfilter/nft_socket.c7
-rw-r--r--net/netfilter/nft_tproxy.c14
-rw-r--r--net/netfilter/nft_tunnel.c8
-rw-r--r--net/netfilter/nft_xfrm.c7
-rw-r--r--net/netfilter/xt_recent.c12
-rw-r--r--net/netlink/af_netlink.c8
-rw-r--r--net/netlink/genetlink.c32
-rw-r--r--net/nfc/Kconfig1
-rw-r--r--net/nfc/hci/llc_shdlc.c2
-rw-r--r--net/nfc/nci/core.c4
-rw-r--r--net/nfc/nci/uart.c3
-rw-r--r--net/nfc/netlink.c5
-rw-r--r--net/nfc/rawsock.c2
-rw-r--r--net/openvswitch/actions.c12
-rw-r--r--net/openvswitch/flow_netlink.c14
-rw-r--r--net/packet/af_packet.c4
-rw-r--r--net/packet/internal.h2
-rw-r--r--net/psample/Kconfig1
-rw-r--r--net/psample/psample.c4
-rw-r--r--net/qrtr/tun.c18
-rw-r--r--net/rds/rdma.c3
-rw-r--r--net/rxrpc/Kconfig1
-rw-r--r--net/rxrpc/af_rxrpc.c6
-rw-r--r--net/rxrpc/call_accept.c1
-rw-r--r--net/rxrpc/call_object.c2
-rw-r--r--net/rxrpc/local_object.c74
-rw-r--r--net/sched/act_api.c106
-rw-r--r--net/sched/act_ct.c2
-rw-r--r--net/sched/cls_api.c12
-rw-r--r--net/sched/cls_flower.c64
-rw-r--r--net/sched/em_nbyte.c2
-rw-r--r--net/sched/sch_api.c7
-rw-r--r--net/sched/sch_atm.c3
-rw-r--r--net/sched/sch_cbq.c3
-rw-r--r--net/sched/sch_drr.c3
-rw-r--r--net/sched/sch_dsmark.c3
-rw-r--r--net/sched/sch_hfsc.c3
-rw-r--r--net/sched/sch_htb.c557
-rw-r--r--net/sched/sch_qfq.c3
-rw-r--r--net/sched/sch_sfb.c3
-rw-r--r--net/sched/sch_taprio.c6
-rw-r--r--net/sctp/offload.c2
-rw-r--r--net/sctp/proc.c16
-rw-r--r--net/socket.c19
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c30
-rw-r--r--net/sunrpc/auth_gss/auth_gss_internal.h45
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_mech.c31
-rw-r--r--net/sunrpc/rpc_pipe.c1
-rw-r--r--net/sunrpc/svc.c2
-rw-r--r--net/sunrpc/svcsock.c40
-rw-r--r--net/sunrpc/xprtrdma/backchannel.c4
-rw-r--r--net/sunrpc/xprtrdma/frwr_ops.c12
-rw-r--r--net/sunrpc/xprtrdma/rpc_rdma.c67
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma.c198
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_backchannel.c4
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_recvfrom.c91
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_rw.c3
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_sendto.c2
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c6
-rw-r--r--net/sunrpc/xprtrdma/xprt_rdma.h15
-rw-r--r--net/sunrpc/xprtsock.c17
-rw-r--r--net/switchdev/Makefile2
-rw-r--r--net/switchdev/switchdev.c131
-rw-r--r--net/tipc/monitor.c2
-rw-r--r--net/tipc/msg.c4
-rw-r--r--net/tls/Kconfig1
-rw-r--r--net/tls/tls_device.c4
-rw-r--r--net/tls/tls_device_fallback.c2
-rw-r--r--net/unix/af_unix.c5
-rw-r--r--net/vmw_vsock/af_vsock.c32
-rw-r--r--net/vmw_vsock/hyperv_transport.c4
-rw-r--r--net/vmw_vsock/virtio_transport_common.c4
-rw-r--r--net/wireless/chan.c5
-rw-r--r--net/wireless/core.c164
-rw-r--r--net/wireless/core.h2
-rw-r--r--net/wireless/debugfs.c4
-rw-r--r--net/wireless/ibss.c3
-rw-r--r--net/wireless/mlme.c6
-rw-r--r--net/wireless/nl80211.c664
-rw-r--r--net/wireless/reg.c93
-rw-r--r--net/wireless/reg.h1
-rw-r--r--net/wireless/scan.c35
-rw-r--r--net/wireless/sme.c5
-rw-r--r--net/wireless/sysfs.c12
-rw-r--r--net/wireless/util.c39
-rw-r--r--net/wireless/wext-compat.c285
-rw-r--r--net/wireless/wext-core.c5
-rw-r--r--net/wireless/wext-sme.c4
-rw-r--r--net/xdp/xsk.c47
-rw-r--r--net/xdp/xsk_buff_pool.c12
-rw-r--r--net/xfrm/xfrm_input.c2
-rw-r--r--net/xfrm/xfrm_interface.c10
-rw-r--r--net/xfrm/xfrm_policy.c30
-rw-r--r--net/xfrm/xfrm_user.c2
-rw-r--r--samples/Kconfig2
-rw-r--r--samples/acrn/Makefile12
-rw-r--r--samples/acrn/guest.ld9
-rw-r--r--samples/acrn/payload.ld9
-rw-r--r--samples/acrn/vm-sample.c136
-rw-r--r--samples/auxdisplay/cfag12864b-example.c2
-rw-r--r--samples/bpf/Makefile10
-rw-r--r--samples/bpf/README.rst22
-rw-r--r--samples/bpf/bpf_insn.h28
-rw-r--r--samples/bpf/cookie_uid_helper_example.c10
-rw-r--r--samples/bpf/sock_example.c2
-rw-r--r--samples/bpf/test_cgrp2_attach.c5
-rw-r--r--samples/bpf/xdp_redirect_map_kern.c60
-rw-r--r--samples/bpf/xdp_redirect_map_user.c112
-rw-r--r--samples/kprobes/kprobe_example.c9
-rw-r--r--samples/watch_queue/watch_test.c2
-rw-r--r--scripts/Kbuild.include6
-rw-r--r--scripts/Kconfig.include13
-rw-r--r--scripts/Makefile9
-rw-r--r--scripts/Makefile.build68
-rw-r--r--scripts/Makefile.clean3
-rw-r--r--scripts/Makefile.dtbinst3
-rw-r--r--scripts/Makefile.lib57
-rw-r--r--scripts/Makefile.modfinal26
-rw-r--r--scripts/Makefile.modpost25
-rw-r--r--scripts/Makefile.ubsan2
-rwxr-xr-xscripts/adjust_autoksyms.sh3
-rwxr-xr-xscripts/bloat-o-meter2
-rwxr-xr-xscripts/cc-version.sh82
-rwxr-xr-xscripts/checkpatch.pl171
-rwxr-xr-xscripts/clang-tools/gen_compile_commands.py14
-rwxr-xr-xscripts/clang-tools/run-clang-tools.py2
-rwxr-xr-xscripts/clang-version.sh19
-rw-r--r--scripts/coccinelle/free/put_device.cocci1
-rwxr-xr-xscripts/diffconfig2
-rw-r--r--scripts/dtc/.gitignore1
-rw-r--r--scripts/dtc/Makefile9
-rw-r--r--scripts/dtc/data.c6
-rw-r--r--scripts/dtc/dtc.c4
-rw-r--r--scripts/dtc/dtc.h8
-rw-r--r--scripts/dtc/fdtdump.c163
-rw-r--r--scripts/dtc/fdtoverlay.c208
-rw-r--r--scripts/dtc/flattree.c8
l---------scripts/dtc/include-prefixes/c6x1
-rw-r--r--scripts/dtc/libfdt/fdt.c4
-rw-r--r--scripts/dtc/libfdt/fdt_ro.c20
-rw-r--r--scripts/dtc/libfdt/fdt_rw.c4
-rw-r--r--scripts/dtc/libfdt/fdt_sw.c2
-rw-r--r--scripts/dtc/libfdt/libfdt.h126
-rw-r--r--scripts/dtc/libfdt/libfdt_internal.h19
-rw-r--r--scripts/dtc/livetree.c2
-rw-r--r--scripts/dtc/srcpos.c2
-rwxr-xr-xscripts/dtc/update-dtc-source.sh3
-rw-r--r--scripts/dtc/version_gen.h2
-rw-r--r--scripts/dtc/yamltree.c6
-rwxr-xr-xscripts/dummy-tools/gcc10
-rw-r--r--scripts/gcc-plugins/latent_entropy_plugin.c2
-rw-r--r--scripts/gcc-plugins/structleak_plugin.c3
-rwxr-xr-xscripts/gcc-version.sh20
-rw-r--r--scripts/gdb/linux/Makefile4
-rw-r--r--scripts/gdb/linux/lists.py5
-rwxr-xr-xscripts/gen_autoksyms.sh38
-rwxr-xr-xscripts/generate_initcall_order.pl270
-rw-r--r--scripts/genksyms/genksyms.c2
-rw-r--r--scripts/genksyms/genksyms.h2
-rw-r--r--scripts/genksyms/lex.l56
-rwxr-xr-xscripts/jobserver-exec2
-rw-r--r--scripts/kallsyms.c6
-rw-r--r--scripts/kconfig/Makefile36
-rw-r--r--scripts/kconfig/conf.c78
-rwxr-xr-xscripts/kconfig/mconf-cfg.sh2
-rwxr-xr-xscripts/kernel-doc65
-rwxr-xr-xscripts/ld-version.sh82
-rwxr-xr-xscripts/link-vmlinux.sh98
-rwxr-xr-xscripts/lld-version.sh20
-rw-r--r--scripts/mod/Makefile1
-rw-r--r--scripts/mod/devicetable-offsets.c12
-rw-r--r--scripts/mod/file2alias.c36
-rw-r--r--scripts/mod/modpost.c66
-rw-r--r--scripts/mod/modpost.h12
-rw-r--r--scripts/mod/sumversion.c6
-rw-r--r--scripts/module.lds.S30
-rw-r--r--scripts/recordmcount.c2
-rwxr-xr-xscripts/recordmcount.pl6
-rwxr-xr-xscripts/spdxcheck.py2
-rw-r--r--scripts/spelling.txt30
-rwxr-xr-xscripts/sphinx-pre-install4
-rwxr-xr-xscripts/syscallhdr.sh98
-rwxr-xr-xscripts/syscalltbl.sh73
-rwxr-xr-xscripts/test_dwarf5_support.sh8
-rwxr-xr-xscripts/ver_linux12
-rw-r--r--security/apparmor/apparmorfs.c3
-rw-r--r--security/apparmor/domain.c13
-rw-r--r--security/apparmor/file.c4
-rw-r--r--security/apparmor/lsm.c21
-rw-r--r--security/commoncap.c187
-rw-r--r--security/integrity/digsig.c4
-rw-r--r--security/integrity/evm/evm_crypto.c18
-rw-r--r--security/integrity/evm/evm_main.c4
-rw-r--r--security/integrity/evm/evm_secfs.c2
-rw-r--r--security/integrity/ima/ima.h27
-rw-r--r--security/integrity/ima/ima_api.c16
-rw-r--r--security/integrity/ima/ima_appraise.c25
-rw-r--r--security/integrity/ima/ima_asymmetric_keys.c5
-rw-r--r--security/integrity/ima/ima_init.c5
-rw-r--r--security/integrity/ima/ima_kexec.c3
-rw-r--r--security/integrity/ima/ima_main.c91
-rw-r--r--security/integrity/ima/ima_mok.c5
-rw-r--r--security/integrity/ima/ima_policy.c133
-rw-r--r--security/integrity/ima/ima_queue_keys.c7
-rw-r--r--security/keys/Kconfig8
-rw-r--r--security/keys/big_key.c9
-rw-r--r--security/keys/key.c2
-rw-r--r--security/keys/keyctl.c2
-rw-r--r--security/keys/keyctl_pkey.c2
-rw-r--r--security/keys/keyring.c10
-rw-r--r--security/keys/process_keys.c1
-rw-r--r--security/keys/trusted-keys/trusted_tpm1.c22
-rw-r--r--security/keys/trusted-keys/trusted_tpm2.c22
-rw-r--r--security/lsm_audit.c5
-rw-r--r--security/security.c33
-rw-r--r--security/selinux/Makefile2
-rw-r--r--security/selinux/avc.c10
-rw-r--r--security/selinux/hooks.c164
-rw-r--r--security/selinux/ibpkey.c1
-rw-r--r--security/selinux/ima.c44
-rw-r--r--security/selinux/include/classmap.h2
-rw-r--r--security/selinux/include/ima.h24
-rw-r--r--security/selinux/include/security.h4
-rw-r--r--security/selinux/netif.c1
-rw-r--r--security/selinux/netlink.c2
-rw-r--r--security/selinux/netnode.c1
-rw-r--r--security/selinux/netport.c1
-rw-r--r--security/selinux/selinuxfs.c4
-rw-r--r--security/selinux/ss/avtab.c4
-rw-r--r--security/selinux/ss/ebitmap.c2
-rw-r--r--security/selinux/ss/hashtab.c2
-rw-r--r--security/selinux/ss/services.c74
-rw-r--r--security/selinux/xfrm.c2
-rw-r--r--security/smack/smack_lsm.c22
-rw-r--r--security/smack/smackfs.c21
-rw-r--r--security/tomoyo/file.c16
-rw-r--r--security/tomoyo/network.c10
-rw-r--r--security/tomoyo/util.c24
-rw-r--r--sound/ac97/bus.c2
-rw-r--r--sound/aoa/codecs/onyx.c2
-rw-r--r--sound/aoa/codecs/tas.c2
-rw-r--r--sound/aoa/codecs/toonie.c2
-rw-r--r--sound/aoa/core/alsa.c8
-rw-r--r--sound/aoa/fabrics/layout.c6
-rw-r--r--sound/aoa/soundbus/sysfs.c2
-rw-r--r--sound/arm/aaci.c10
-rw-r--r--sound/arm/pxa2xx-ac97.c2
-rw-r--r--sound/core/Kconfig9
-rw-r--r--sound/core/compress_offload.c2
-rw-r--r--sound/core/control.c20
-rw-r--r--sound/core/ctljack.c2
-rw-r--r--sound/core/hwdep.c6
-rw-r--r--sound/core/init.c23
-rw-r--r--sound/core/jack.c304
-rw-r--r--sound/core/oss/mixer_oss.c14
-rw-r--r--sound/core/oss/rate.c4
-rw-r--r--sound/core/pcm.c29
-rw-r--r--sound/core/pcm_dmaengine.c2
-rw-r--r--sound/core/pcm_local.h7
-rw-r--r--sound/core/pcm_memory.c12
-rw-r--r--sound/core/pcm_native.c64
-rw-r--r--sound/core/rawmidi.c2
-rw-r--r--sound/core/seq/oss/seq_oss_midi.c4
-rw-r--r--sound/core/seq/oss/seq_oss_synth.c6
-rw-r--r--sound/core/seq/seq_clientmgr.c2
-rw-r--r--sound/core/seq/seq_memory.c2
-rw-r--r--sound/core/seq/seq_ports.c6
-rw-r--r--sound/core/sound.c15
-rw-r--r--sound/core/timer.c10
-rw-r--r--sound/core/timer_compat.c4
-rw-r--r--sound/drivers/aloop.c2
-rw-r--r--sound/drivers/dummy.c2
-rw-r--r--sound/drivers/opl3/opl3_oss.c2
-rw-r--r--sound/drivers/opl3/opl3_synth.c2
-rw-r--r--sound/drivers/vx/vx_pcm.c3
-rw-r--r--sound/firewire/bebob/bebob_hwdep.c10
-rw-r--r--sound/firewire/dice/Makefile3
-rw-r--r--sound/firewire/dice/dice-harman.c26
-rw-r--r--sound/firewire/dice/dice-hwdep.c2
-rw-r--r--sound/firewire/dice/dice.c12
-rw-r--r--sound/firewire/dice/dice.h1
-rw-r--r--sound/firewire/digi00x/digi00x-hwdep.c2
-rw-r--r--sound/firewire/fireface/ff-hwdep.c12
-rw-r--r--sound/firewire/fireface/ff-protocol-latter.c118
-rw-r--r--sound/firewire/fireworks/fireworks_hwdep.c2
-rw-r--r--sound/firewire/motu/motu-hwdep.c2
-rw-r--r--sound/firewire/oxfw/oxfw-hwdep.c12
-rw-r--r--sound/firewire/tascam/tascam-hwdep.c2
-rw-r--r--sound/hda/Kconfig18
-rw-r--r--sound/hda/Makefile3
-rw-r--r--sound/hda/ext/hdac_ext_controller.c39
-rw-r--r--sound/hda/ext/hdac_ext_stream.c2
-rw-r--r--sound/hda/hdac_bus.c23
-rw-r--r--sound/hda/hdac_controller.c14
-rw-r--r--sound/hda/hdac_regmap.c2
-rw-r--r--sound/hda/hdac_stream.c5
-rw-r--r--sound/hda/hdac_sysfs.c2
-rw-r--r--sound/hda/intel-dsp-config.c47
-rw-r--r--sound/hda/intel-nhlt.c54
-rw-r--r--sound/hda/intel-sdw-acpi.c179
-rw-r--r--sound/i2c/i2c.c4
-rw-r--r--sound/isa/ad1848/ad1848.c7
-rw-r--r--sound/isa/adlib.c3
-rw-r--r--sound/isa/cmi8328.c3
-rw-r--r--sound/isa/cmi8330.c3
-rw-r--r--sound/isa/cs423x/cs4231.c7
-rw-r--r--sound/isa/cs423x/cs4236.c7
-rw-r--r--sound/isa/es1688/es1688.c7
-rw-r--r--sound/isa/es18xx.c5
-rw-r--r--sound/isa/galaxy/galaxy.c3
-rw-r--r--sound/isa/gus/gusclassic.c3
-rw-r--r--sound/isa/gus/gusextreme.c3
-rw-r--r--sound/isa/gus/gusmax.c3
-rw-r--r--sound/isa/gus/interwave.c3
-rw-r--r--sound/isa/msnd/msnd_pinnacle.c3
-rw-r--r--sound/isa/opl3sa2.c3
-rw-r--r--sound/isa/opti9xx/miro.c3
-rw-r--r--sound/isa/opti9xx/opti92x-ad1848.c5
-rw-r--r--sound/isa/sb/jazz16.c3
-rw-r--r--sound/isa/sb/sb16.c3
-rw-r--r--sound/isa/sb/sb16_csp.c2
-rw-r--r--sound/isa/sb/sb8.c3
-rw-r--r--sound/isa/sb/sb_mixer.c2
-rw-r--r--sound/isa/sc6000.c3
-rw-r--r--sound/isa/sscape.c3
-rw-r--r--sound/isa/wavefront/wavefront.c3
-rw-r--r--sound/mips/Kconfig7
-rw-r--r--sound/mips/Makefile1
-rw-r--r--sound/mips/snd-n64.c372
-rw-r--r--sound/oss/dmasound/dmasound_core.c4
-rw-r--r--sound/pci/ad1889.c3
-rw-r--r--sound/pci/ali5451/ali5451.c3
-rw-r--r--sound/pci/als300.c3
-rw-r--r--sound/pci/als4000.c3
-rw-r--r--sound/pci/asihpi/hpidebug.c2
-rw-r--r--sound/pci/au88x0/au88x0.c3
-rw-r--r--sound/pci/aw2/aw2-alsa.c3
-rw-r--r--sound/pci/azt3328.c5
-rw-r--r--sound/pci/bt87x.c3
-rw-r--r--sound/pci/ca0106/ca0106_main.c3
-rw-r--r--sound/pci/cs46xx/cs46xx_lib.c2
-rw-r--r--sound/pci/cs5535audio/cs5535audio.c3
-rw-r--r--sound/pci/cs5535audio/cs5535audio_olpc.c4
-rw-r--r--sound/pci/ctxfi/cthw20k1.c8
-rw-r--r--sound/pci/ctxfi/cthw20k2.c10
-rw-r--r--sound/pci/ctxfi/ctpcm.c2
-rw-r--r--sound/pci/ctxfi/ctresource.c2
-rw-r--r--sound/pci/emu10k1/emu10k1.c4
-rw-r--r--sound/pci/emu10k1/emu10k1_main.c2
-rw-r--r--sound/pci/emu10k1/emufx.c6
-rw-r--r--sound/pci/emu10k1/memory.c2
-rw-r--r--sound/pci/ens1370.c4
-rw-r--r--sound/pci/es1938.c3
-rw-r--r--sound/pci/es1968.c5
-rw-r--r--sound/pci/fm801.c2
-rw-r--r--sound/pci/hda/hda_auto_parser.c2
-rw-r--r--sound/pci/hda/hda_codec.c8
-rw-r--r--sound/pci/hda/hda_controller.c2
-rw-r--r--sound/pci/hda/hda_eld.c2
-rw-r--r--sound/pci/hda/hda_generic.c6
-rw-r--r--sound/pci/hda/hda_intel.c20
-rw-r--r--sound/pci/hda/hda_jack.c6
-rw-r--r--sound/pci/hda/hda_tegra.c90
-rw-r--r--sound/pci/hda/patch_ca0132.c9
-rw-r--r--sound/pci/hda/patch_conexant.c2
-rw-r--r--sound/pci/hda/patch_hdmi.c19
-rw-r--r--sound/pci/hda/patch_realtek.c67
-rw-r--r--sound/pci/hda/patch_via.c2
-rw-r--r--sound/pci/ice1712/ice1712.c3
-rw-r--r--sound/pci/ice1712/juli.c2
-rw-r--r--sound/pci/ice1712/psc724.c4
-rw-r--r--sound/pci/ice1712/quartet.c2
-rw-r--r--sound/pci/ice1712/wm8776.c2
-rw-r--r--sound/pci/intel8x0m.c3
-rw-r--r--sound/pci/lola/lola.c2
-rw-r--r--sound/pci/lola/lola_clock.c2
-rw-r--r--sound/pci/lola/lola_pcm.c2
-rw-r--r--sound/pci/maestro3.c5
-rw-r--r--sound/pci/rme9652/hdsp.c74
-rw-r--r--sound/pci/rme9652/hdspm.c2
-rw-r--r--sound/pci/sis7019.c2
-rw-r--r--sound/pci/sonicvibes.c5
-rw-r--r--sound/pci/trident/trident_main.c7
-rw-r--r--sound/ppc/keywest.c2
-rw-r--r--sound/soc/Kconfig20
-rw-r--r--sound/soc/Makefile8
-rw-r--r--sound/soc/adi/axi-i2s.c2
-rw-r--r--sound/soc/amd/acp3x-rt5682-max9836.c4
-rw-r--r--sound/soc/amd/renoir/rn-pci-acp3x.c18
-rw-r--r--sound/soc/atmel/atmel-i2s.c2
-rw-r--r--sound/soc/atmel/atmel-pcm-pdc.c78
-rw-r--r--sound/soc/atmel/mchp-i2s-mcc.c4
-rw-r--r--sound/soc/au1x/i2sc.c2
-rw-r--r--sound/soc/bcm/bcm2835-i2s.c4
-rw-r--r--sound/soc/bcm/bcm63xx-i2s-whistler.c2
-rw-r--r--sound/soc/bcm/cygnus-pcm.c107
-rw-r--r--sound/soc/cirrus/ep93xx-i2s.c2
-rw-r--r--sound/soc/codecs/Kconfig31
-rw-r--r--sound/soc/codecs/Makefile10
-rw-r--r--sound/soc/codecs/ab8500-codec.c4
-rw-r--r--sound/soc/codecs/adau1372.c2
-rw-r--r--sound/soc/codecs/adau1373.c6
-rw-r--r--sound/soc/codecs/adau1701.c2
-rw-r--r--sound/soc/codecs/adau17x1.c3
-rw-r--r--sound/soc/codecs/ak4458.c22
-rw-r--r--sound/soc/codecs/ak4554.c2
-rw-r--r--sound/soc/codecs/ak4613.c2
-rw-r--r--sound/soc/codecs/ak4641.c4
-rw-r--r--sound/soc/codecs/ak4642.c2
-rw-r--r--sound/soc/codecs/alc5632.c2
-rw-r--r--sound/soc/codecs/cpcap.c139
-rw-r--r--sound/soc/codecs/cros_ec_codec.c12
-rw-r--r--sound/soc/codecs/cs35l32.c2
-rw-r--r--sound/soc/codecs/cs35l33.c2
-rw-r--r--sound/soc/codecs/cs35l34.c2
-rw-r--r--sound/soc/codecs/cs35l35.c2
-rw-r--r--sound/soc/codecs/cs35l36.c2
-rw-r--r--sound/soc/codecs/cs4234.c2
-rw-r--r--sound/soc/codecs/cs4271.c2
-rw-r--r--sound/soc/codecs/cs42l56.c3
-rw-r--r--sound/soc/codecs/cs42l73.c6
-rw-r--r--sound/soc/codecs/cs43130.c6
-rw-r--r--sound/soc/codecs/cs4341.c2
-rw-r--r--sound/soc/codecs/cs4349.c2
-rw-r--r--sound/soc/codecs/cs47l15.c12
-rw-r--r--sound/soc/codecs/cs47l24.c12
-rw-r--r--sound/soc/codecs/cs47l35.c12
-rw-r--r--sound/soc/codecs/cs47l85.c16
-rw-r--r--sound/soc/codecs/cs47l90.c16
-rw-r--r--sound/soc/codecs/cs47l92.c12
-rw-r--r--sound/soc/codecs/cs53l30.c2
-rw-r--r--sound/soc/codecs/cx2072x.c2
-rw-r--r--sound/soc/codecs/da7210.c2
-rw-r--r--sound/soc/codecs/da7213.c2
-rw-r--r--sound/soc/codecs/da7218.c8
-rw-r--r--sound/soc/codecs/da7219.c4
-rw-r--r--sound/soc/codecs/da9055.c2
-rw-r--r--sound/soc/codecs/es8316.c5
-rw-r--r--sound/soc/codecs/es8328.c5
-rw-r--r--sound/soc/codecs/hdmi-codec.c4
-rw-r--r--sound/soc/codecs/inno_rk3036.c2
-rw-r--r--sound/soc/codecs/jz4740.c2
-rw-r--r--sound/soc/codecs/jz4760.c889
-rw-r--r--sound/soc/codecs/lm49453.c2
-rw-r--r--sound/soc/codecs/lochnagar-sc.c12
-rw-r--r--sound/soc/codecs/lpass-rx-macro.c3599
-rw-r--r--sound/soc/codecs/lpass-tx-macro.c1862
-rw-r--r--sound/soc/codecs/lpass-wsa-macro.c43
-rw-r--r--sound/soc/codecs/max98373-sdw.c4
-rw-r--r--sound/soc/codecs/max98373.c2
-rw-r--r--sound/soc/codecs/max9860.c2
-rw-r--r--sound/soc/codecs/max9867.c2
-rw-r--r--sound/soc/codecs/mc13783.c2
-rw-r--r--sound/soc/codecs/ml26124.c2
-rw-r--r--sound/soc/codecs/mt6359.c18
-rw-r--r--sound/soc/codecs/mt6660.c4
-rw-r--r--sound/soc/codecs/nau8810.c2
-rw-r--r--sound/soc/codecs/nau8822.c2
-rw-r--r--sound/soc/codecs/rt1015.c122
-rw-r--r--sound/soc/codecs/rt1015.h5
-rw-r--r--sound/soc/codecs/rt1308-sdw.c2
-rw-r--r--sound/soc/codecs/rt274.c2
-rw-r--r--sound/soc/codecs/rt286.c4
-rw-r--r--sound/soc/codecs/rt298.c4
-rw-r--r--sound/soc/codecs/rt5645.c78
-rw-r--r--sound/soc/codecs/rt5670.c4
-rw-r--r--sound/soc/codecs/rt5682-i2c.c3
-rw-r--r--sound/soc/codecs/rt5682-sdw.c25
-rw-r--r--sound/soc/codecs/rt5682.c16
-rw-r--r--sound/soc/codecs/rt5682.h2
-rw-r--r--sound/soc/codecs/rt700-sdw.c6
-rw-r--r--sound/soc/codecs/rt711-sdw.c6
-rw-r--r--sound/soc/codecs/rt715-sdw.c2
-rw-r--r--sound/soc/codecs/sgtl5000.c2
-rw-r--r--sound/soc/codecs/sirf-audio-codec.c575
-rw-r--r--sound/soc/codecs/ssm2602.c4
-rw-r--r--sound/soc/codecs/tas2764.c2
-rw-r--r--sound/soc/codecs/tas2770.c2
-rw-r--r--sound/soc/codecs/tlv320adcx140.c2
-rw-r--r--sound/soc/codecs/tlv320aic31xx.c4
-rw-r--r--sound/soc/codecs/tlv320aic32x4.c2
-rw-r--r--sound/soc/codecs/tlv320aic3x.c2
-rw-r--r--sound/soc/codecs/tscs42xx.c4
-rw-r--r--sound/soc/codecs/tscs454.c12
-rw-r--r--sound/soc/codecs/wm5102.c12
-rw-r--r--sound/soc/codecs/wm5110.c12
-rw-r--r--sound/soc/codecs/wm8510.c2
-rw-r--r--sound/soc/codecs/wm8731.c2
-rw-r--r--sound/soc/codecs/wm8770.c2
-rw-r--r--sound/soc/codecs/wm8804.c2
-rw-r--r--sound/soc/codecs/wm8903.c2
-rw-r--r--sound/soc/codecs/wm8904.c2
-rw-r--r--sound/soc/codecs/wm8940.c2
-rw-r--r--sound/soc/codecs/wm8960.c2
-rw-r--r--sound/soc/codecs/wm8962.c2
-rw-r--r--sound/soc/codecs/wm8974.c2
-rw-r--r--sound/soc/codecs/wm8978.c2
-rw-r--r--sound/soc/codecs/wm8983.c2
-rw-r--r--sound/soc/codecs/wm8985.c2
-rw-r--r--sound/soc/codecs/wm8988.c2
-rw-r--r--sound/soc/codecs/wm8993.c2
-rw-r--r--sound/soc/codecs/wm8994.c4
-rw-r--r--sound/soc/codecs/wm8997.c8
-rw-r--r--sound/soc/codecs/wm8998.c12
-rw-r--r--sound/soc/codecs/wm9713.c2
-rw-r--r--sound/soc/codecs/wm_adsp.c111
-rw-r--r--sound/soc/codecs/wmfw.h6
-rw-r--r--sound/soc/codecs/wsa881x.c1
-rw-r--r--sound/soc/codecs/zl38060.c4
-rw-r--r--sound/soc/codecs/zx_aud96p22.c401
-rw-r--r--sound/soc/fsl/Kconfig1
-rw-r--r--sound/soc/fsl/fsl_asrc.c5
-rw-r--r--sound/soc/fsl/fsl_easrc.c2
-rw-r--r--sound/soc/fsl/fsl_esai.c13
-rw-r--r--sound/soc/fsl/fsl_micfil.c9
-rw-r--r--sound/soc/fsl/fsl_sai.c8
-rw-r--r--sound/soc/fsl/fsl_spdif.c14
-rw-r--r--sound/soc/fsl/fsl_ssi.c12
-rw-r--r--sound/soc/fsl/fsl_xcvr.c7
-rw-r--r--sound/soc/generic/audio-graph-card.c17
-rw-r--r--sound/soc/generic/simple-card-utils.c13
-rw-r--r--sound/soc/intel/Kconfig2
-rw-r--r--sound/soc/intel/boards/Kconfig12
-rw-r--r--sound/soc/intel/boards/Makefile2
-rw-r--r--sound/soc/intel/boards/bytcht_es8316.c3
-rw-r--r--sound/soc/intel/boards/bytcr_rt5640.c89
-rw-r--r--sound/soc/intel/boards/bytcr_rt5651.c13
-rw-r--r--sound/soc/intel/boards/bytcr_wm5102.c465
-rw-r--r--sound/soc/intel/boards/cht_bsw_nau8824.c15
-rw-r--r--sound/soc/intel/boards/sof_maxim_common.c5
-rw-r--r--sound/soc/intel/boards/sof_rt5682.c7
-rw-r--r--sound/soc/intel/boards/sof_sdw.c121
-rw-r--r--sound/soc/intel/catpt/pcm.c14
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-adl-match.c1
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-bxt-match.c3
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-byt-match.c19
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-cfl-match.c3
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-cht-match.c3
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-cml-match.c3
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-cnl-match.c3
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-ehl-match.c5
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-glk-match.c3
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-hsw-bdw-match.c3
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-icl-match.c3
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-jsl-match.c3
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-kbl-match.c3
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-skl-match.c3
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-tgl-match.c32
-rw-r--r--sound/soc/intel/common/soc-intel-quirks.h25
-rw-r--r--sound/soc/intel/keembay/kmb_platform.c230
-rw-r--r--sound/soc/intel/keembay/kmb_platform.h10
-rw-r--r--sound/soc/intel/skylake/skl-topology.c15
-rw-r--r--sound/soc/intel/skylake/skl.c8
-rw-r--r--sound/soc/jz4740/jz4740-i2s.c2
-rw-r--r--sound/soc/kirkwood/kirkwood-dma.c79
-rw-r--r--sound/soc/mediatek/Kconfig2
-rw-r--r--sound/soc/mediatek/mt2701/mt2701-afe-pcm.c10
-rw-r--r--sound/soc/mediatek/mt6797/mt6797-dai-pcm.c8
-rw-r--r--sound/soc/mediatek/mt8173/mt8173-afe-pcm.c2
-rw-r--r--sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c11
-rw-r--r--sound/soc/mediatek/mt8183/mt8183-dai-pcm.c8
-rw-r--r--sound/soc/mediatek/mt8183/mt8183-mt6358-ts3a227-max98357.c52
-rw-r--r--sound/soc/mediatek/mt8192/mt8192-afe-pcm.c4
-rw-r--r--sound/soc/mediatek/mt8192/mt8192-dai-pcm.c8
-rw-r--r--sound/soc/mediatek/mt8192/mt8192-dai-tdm.c2
-rw-r--r--sound/soc/mediatek/mt8192/mt8192-mt6359-rt1015-rt5682.c133
-rw-r--r--sound/soc/meson/aiu-fifo-i2s.c1
-rw-r--r--sound/soc/meson/aiu-fifo-spdif.c1
-rw-r--r--sound/soc/meson/aiu-fifo.c18
-rw-r--r--sound/soc/pxa/pxa2xx-i2s.c2
-rw-r--r--sound/soc/qcom/lpass-apq8016.c2
-rw-r--r--sound/soc/qcom/lpass-cpu.c69
-rw-r--r--sound/soc/qcom/lpass-ipq806x.c2
-rw-r--r--sound/soc/qcom/lpass-lpaif-reg.h5
-rw-r--r--sound/soc/qcom/lpass-platform.c12
-rw-r--r--sound/soc/qcom/lpass-sc7180.c9
-rw-r--r--sound/soc/qcom/lpass.h3
-rw-r--r--sound/soc/qcom/qdsp6/q6afe.c2
-rw-r--r--sound/soc/qcom/qdsp6/q6asm-dai.c21
-rw-r--r--sound/soc/qcom/qdsp6/q6asm.c2
-rw-r--r--sound/soc/qcom/qdsp6/q6routing.c18
-rw-r--r--sound/soc/rockchip/rockchip_i2s.c4
-rw-r--r--sound/soc/rockchip/rockchip_pdm.c2
-rw-r--r--sound/soc/samsung/i2s.c2
-rw-r--r--sound/soc/samsung/pcm.c2
-rw-r--r--sound/soc/sh/rcar/core.c6
-rw-r--r--sound/soc/sh/siu.h2
-rw-r--r--sound/soc/sh/siu_pcm.c2
-rw-r--r--sound/soc/sirf/Kconfig21
-rw-r--r--sound/soc/sirf/Makefile8
-rw-r--r--sound/soc/sirf/sirf-audio-port.c86
-rw-r--r--sound/soc/sirf/sirf-audio.c160
-rw-r--r--sound/soc/sirf/sirf-usp.c435
-rw-r--r--sound/soc/sirf/sirf-usp.h292
-rw-r--r--sound/soc/soc-component.c54
-rw-r--r--sound/soc/soc-dapm.c13
-rw-r--r--sound/soc/soc-pcm.c465
-rw-r--r--sound/soc/soc-topology-test.c843
-rw-r--r--sound/soc/soc-topology.c41
-rw-r--r--sound/soc/sof/Kconfig15
-rw-r--r--sound/soc/sof/Makefile4
-rw-r--r--sound/soc/sof/core.c19
-rw-r--r--sound/soc/sof/debug.c2
-rw-r--r--sound/soc/sof/intel/Kconfig253
-rw-r--r--sound/soc/sof/intel/Makefile20
-rw-r--r--sound/soc/sof/intel/bdw.c67
-rw-r--r--sound/soc/sof/intel/byt.c106
-rw-r--r--sound/soc/sof/intel/hda-bus.c33
-rw-r--r--sound/soc/sof/intel/hda-compress.c4
-rw-r--r--sound/soc/sof/intel/hda-dsp.c24
-rw-r--r--sound/soc/sof/intel/hda-loader.c17
-rw-r--r--sound/soc/sof/intel/hda-pcm.c18
-rw-r--r--sound/soc/sof/intel/hda-stream.c38
-rw-r--r--sound/soc/sof/intel/hda-trace.c8
-rw-r--r--sound/soc/sof/intel/hda.c295
-rw-r--r--sound/soc/sof/intel/hda.h13
-rw-r--r--sound/soc/sof/intel/pci-apl.c81
-rw-r--r--sound/soc/sof/intel/pci-cnl.c104
-rw-r--r--sound/soc/sof/intel/pci-icl.c84
-rw-r--r--sound/soc/sof/intel/pci-tgl.c121
-rw-r--r--sound/soc/sof/intel/pci-tng.c70
-rw-r--r--sound/soc/sof/intel/shim.h6
-rw-r--r--sound/soc/sof/intel/tgl.c3
-rw-r--r--sound/soc/sof/ipc.c4
-rw-r--r--sound/soc/sof/loader.c8
-rw-r--r--sound/soc/sof/ops.h43
-rw-r--r--sound/soc/sof/pcm.c7
-rw-r--r--sound/soc/sof/pm.c1
-rw-r--r--sound/soc/sof/sof-acpi-dev.c129
-rw-r--r--sound/soc/sof/sof-acpi-dev.h16
-rw-r--r--sound/soc/sof/sof-pci-dev.c344
-rw-r--r--sound/soc/sof/sof-pci-dev.h17
-rw-r--r--sound/soc/sof/sof-priv.h8
-rw-r--r--sound/soc/sof/topology.c14
-rw-r--r--sound/soc/sprd/sprd-mcdt.c10
-rw-r--r--sound/soc/stm/stm32_i2s.c310
-rw-r--r--sound/soc/sunxi/sun4i-i2s.c2
-rw-r--r--sound/soc/sunxi/sun8i-codec.c12
-rw-r--r--sound/soc/tegra/Kconfig42
-rw-r--r--sound/soc/tegra/Makefile2
-rw-r--r--sound/soc/tegra/tegra186_dspk.c2
-rw-r--r--sound/soc/tegra/tegra20_i2s.c2
-rw-r--r--sound/soc/tegra/tegra210_dmic.c2
-rw-r--r--sound/soc/tegra/tegra210_i2s.c2
-rw-r--r--sound/soc/tegra/tegra30_ahub.c64
-rw-r--r--sound/soc/tegra/tegra30_ahub.h5
-rw-r--r--sound/soc/tegra/tegra30_i2s.c2
-rw-r--r--sound/soc/tegra/tegra_audio_graph_card.c252
-rw-r--r--sound/soc/tegra/tegra_pcm.c6
-rw-r--r--sound/soc/ti/davinci-mcasp.c2
-rw-r--r--sound/soc/txx9/Kconfig30
-rw-r--r--sound/soc/txx9/Makefile12
-rw-r--r--sound/soc/txx9/txx9aclc-ac97.c230
-rw-r--r--sound/soc/txx9/txx9aclc-generic.c88
-rw-r--r--sound/soc/txx9/txx9aclc.c422
-rw-r--r--sound/soc/txx9/txx9aclc.h71
-rw-r--r--sound/soc/zte/Kconfig26
-rw-r--r--sound/soc/zte/Makefile4
-rw-r--r--sound/soc/zte/zx-i2s.c452
-rw-r--r--sound/soc/zte/zx-spdif.c363
-rw-r--r--sound/soc/zte/zx-tdm.c458
-rw-r--r--sound/usb/bcd2000/bcd2000.c2
-rw-r--r--sound/usb/caiaq/audio.c2
-rw-r--r--sound/usb/caiaq/device.c6
-rw-r--r--sound/usb/caiaq/midi.c2
-rw-r--r--sound/usb/card.c15
-rw-r--r--sound/usb/card.h2
-rw-r--r--sound/usb/clock.c14
-rw-r--r--sound/usb/endpoint.c87
-rw-r--r--sound/usb/format.c11
-rw-r--r--sound/usb/hiface/chip.c6
-rw-r--r--sound/usb/hiface/pcm.c2
-rw-r--r--sound/usb/implicit.c5
-rw-r--r--sound/usb/mixer.c41
-rw-r--r--sound/usb/mixer_maps.c10
-rw-r--r--sound/usb/mixer_quirks.c369
-rw-r--r--sound/usb/mixer_scarlett.c2
-rw-r--r--sound/usb/mixer_scarlett_gen2.c2
-rw-r--r--sound/usb/mixer_us16x08.c2
-rw-r--r--sound/usb/pcm.c19
-rw-r--r--sound/usb/quirks-table.h117
-rw-r--r--sound/usb/quirks.c20
-rw-r--r--sound/x86/intel_hdmi_audio.c5
-rw-r--r--sound/xen/xen_snd_front_cfg.c2
-rw-r--r--tools/Makefile14
-rw-r--r--tools/arch/powerpc/include/uapi/asm/kvm.h2
-rw-r--r--tools/arch/powerpc/include/uapi/asm/perf_regs.h28
-rw-r--r--tools/arch/x86/include/asm/disabled-features.h3
-rw-r--r--tools/arch/x86/include/asm/insn.h45
-rw-r--r--tools/arch/x86/include/asm/orc_types.h10
-rw-r--r--tools/arch/x86/include/asm/required-features.h3
-rw-r--r--tools/arch/x86/lib/insn.c119
-rw-r--r--tools/bpf/bpf_dbg.c2
-rw-r--r--tools/bpf/bpftool/Makefile6
-rw-r--r--tools/bpf/bpftool/prog.c4
-rw-r--r--tools/bpf/resolve_btfids/.gitignore3
-rw-r--r--tools/bpf/resolve_btfids/Makefile44
-rw-r--r--tools/bpf/runqslower/Makefile3
-rw-r--r--tools/build/Makefile.feature4
-rw-r--r--tools/build/feature/Makefile4
-rw-r--r--tools/build/feature/test-libopencsd.c4
-rw-r--r--tools/gpio/gpio-utils.c89
-rw-r--r--tools/gpio/gpio-utils.h6
-rw-r--r--tools/include/linux/export.h3
-rw-r--r--tools/include/linux/filter.h24
-rw-r--r--tools/include/linux/objtool.h13
-rw-r--r--tools/include/linux/rbtree.h192
-rw-r--r--tools/include/linux/static_call_types.h50
-rw-r--r--tools/include/linux/types.h3
-rw-r--r--tools/include/nolibc/nolibc.h153
-rw-r--r--tools/include/uapi/asm-generic/unistd.h4
-rw-r--r--tools/include/uapi/linux/bpf.h123
-rw-r--r--tools/include/uapi/linux/bpf_perf_event.h1
-rw-r--r--tools/include/uapi/linux/kvm.h1
-rw-r--r--tools/include/uapi/linux/perf_event.h96
-rw-r--r--tools/include/uapi/linux/pkt_sched.h1
-rw-r--r--tools/include/uapi/linux/prctl.h3
-rw-r--r--tools/include/uapi/linux/tcp.h357
-rw-r--r--tools/lib/api/fs/cgroup.c95
-rw-r--r--tools/lib/bpf/.gitignore1
-rw-r--r--tools/lib/bpf/Makefile47
-rw-r--r--tools/lib/bpf/bpf_core_read.h169
-rw-r--r--tools/lib/bpf/bpf_helpers.h2
-rw-r--r--tools/lib/bpf/btf.c12
-rw-r--r--tools/lib/bpf/libbpf.c72
-rw-r--r--tools/lib/bpf/xsk.c83
-rw-r--r--tools/lib/perf/include/perf/event.h18
-rw-r--r--tools/memory-model/Documentation/glossary.txt12
-rw-r--r--tools/memory-model/README2
-rw-r--r--tools/memory-model/litmus-tests/CoRR+poonceonce+Once.litmus4
-rw-r--r--tools/memory-model/litmus-tests/CoRW+poonceonce+Once.litmus4
-rw-r--r--tools/memory-model/litmus-tests/CoWR+poonceonce+Once.litmus4
-rw-r--r--tools/memory-model/litmus-tests/CoWW+poonceonce.litmus4
-rw-r--r--tools/memory-model/litmus-tests/IRIW+fencembonceonces+OnceOnce.litmus5
-rw-r--r--tools/memory-model/litmus-tests/IRIW+poonceonces+OnceOnce.litmus5
-rw-r--r--tools/memory-model/litmus-tests/ISA2+pooncelock+pooncelock+pombonce.litmus7
-rw-r--r--tools/memory-model/litmus-tests/ISA2+poonceonces.litmus6
-rw-r--r--tools/memory-model/litmus-tests/ISA2+pooncerelease+poacquirerelease+poacquireonce.litmus6
-rw-r--r--tools/memory-model/litmus-tests/LB+fencembonceonce+ctrlonceonce.litmus5
-rw-r--r--tools/memory-model/litmus-tests/LB+poacquireonce+pooncerelease.litmus5
-rw-r--r--tools/memory-model/litmus-tests/LB+poonceonces.litmus5
-rw-r--r--tools/memory-model/litmus-tests/MP+fencewmbonceonce+fencermbonceonce.litmus5
-rw-r--r--tools/memory-model/litmus-tests/MP+onceassign+derefonce.litmus4
-rw-r--r--tools/memory-model/litmus-tests/MP+polockmbonce+poacquiresilsil.litmus5
-rw-r--r--tools/memory-model/litmus-tests/MP+polockonce+poacquiresilsil.litmus5
-rw-r--r--tools/memory-model/litmus-tests/MP+polocks.litmus6
-rw-r--r--tools/memory-model/litmus-tests/MP+poonceonces.litmus5
-rw-r--r--tools/memory-model/litmus-tests/MP+pooncerelease+poacquireonce.litmus5
-rw-r--r--tools/memory-model/litmus-tests/MP+porevlocks.litmus6
-rw-r--r--tools/memory-model/litmus-tests/R+fencembonceonces.litmus5
-rw-r--r--tools/memory-model/litmus-tests/R+poonceonces.litmus5
-rw-r--r--tools/memory-model/litmus-tests/S+fencewmbonceonce+poacquireonce.litmus5
-rw-r--r--tools/memory-model/litmus-tests/S+poonceonces.litmus5
-rw-r--r--tools/memory-model/litmus-tests/SB+fencembonceonces.litmus5
-rw-r--r--tools/memory-model/litmus-tests/SB+poonceonces.litmus5
-rw-r--r--tools/memory-model/litmus-tests/SB+rfionceonce-poonceonces.litmus5
-rw-r--r--tools/memory-model/litmus-tests/WRC+poonceonces+Once.litmus5
-rw-r--r--tools/memory-model/litmus-tests/WRC+pooncerelease+fencermbonceonce+Once.litmus5
-rw-r--r--tools/memory-model/litmus-tests/Z6.0+pooncelock+poonceLock+pombonce.litmus7
-rw-r--r--tools/memory-model/litmus-tests/Z6.0+pooncelock+pooncelock+pombonce.litmus7
-rw-r--r--tools/memory-model/litmus-tests/Z6.0+pooncerelease+poacquirerelease+fencembonceonce.litmus6
-rw-r--r--tools/objtool/.gitignore2
-rw-r--r--tools/objtool/Documentation/stack-validation.txt16
-rw-r--r--tools/objtool/Makefile5
-rw-r--r--tools/objtool/arch/x86/decode.c54
-rw-r--r--tools/objtool/arch/x86/include/arch/cfi_regs.h (renamed from tools/objtool/arch/x86/include/cfi_regs.h)0
-rw-r--r--tools/objtool/arch/x86/include/arch/elf.h (renamed from tools/objtool/arch/x86/include/arch_elf.h)0
-rw-r--r--tools/objtool/arch/x86/include/arch/endianness.h9
-rw-r--r--tools/objtool/arch/x86/include/arch/special.h (renamed from tools/objtool/arch/x86/include/arch_special.h)0
-rw-r--r--tools/objtool/arch/x86/special.c6
-rw-r--r--tools/objtool/builtin-check.c14
-rw-r--r--tools/objtool/builtin-orc.c10
-rw-r--r--tools/objtool/check.c532
-rw-r--r--tools/objtool/elf.c113
-rw-r--r--tools/objtool/include/objtool/arch.h (renamed from tools/objtool/arch.h)8
-rw-r--r--tools/objtool/include/objtool/builtin.h (renamed from tools/objtool/builtin.h)2
-rw-r--r--tools/objtool/include/objtool/cfi.h (renamed from tools/objtool/cfi.h)2
-rw-r--r--tools/objtool/include/objtool/check.h (renamed from tools/objtool/check.h)39
-rw-r--r--tools/objtool/include/objtool/elf.h (renamed from tools/objtool/elf.h)0
-rw-r--r--tools/objtool/include/objtool/endianness.h38
-rw-r--r--tools/objtool/include/objtool/objtool.h (renamed from tools/objtool/objtool.h)6
-rw-r--r--tools/objtool/include/objtool/special.h (renamed from tools/objtool/special.h)4
-rw-r--r--tools/objtool/include/objtool/warn.h (renamed from tools/objtool/warn.h)2
-rw-r--r--tools/objtool/objtool.c7
-rw-r--r--tools/objtool/orc_dump.c11
-rw-r--r--tools/objtool/orc_gen.c315
-rw-r--r--tools/objtool/special.c14
-rw-r--r--tools/objtool/weak.c9
-rw-r--r--tools/perf/Build1
-rw-r--r--tools/perf/Documentation/examples.txt2
-rw-r--r--tools/perf/Documentation/itrace.txt2
-rw-r--r--tools/perf/Documentation/perf-buildid-cache.txt6
-rw-r--r--tools/perf/Documentation/perf-config.txt24
-rw-r--r--tools/perf/Documentation/perf-daemon.txt208
-rw-r--r--tools/perf/Documentation/perf-intel-pt.txt89
-rw-r--r--tools/perf/Documentation/perf-mem.txt3
-rw-r--r--tools/perf/Documentation/perf-record.txt21
-rw-r--r--tools/perf/Documentation/perf-report.txt10
-rw-r--r--tools/perf/Documentation/perf-script.txt25
-rw-r--r--tools/perf/Documentation/perf-stat.txt32
-rw-r--r--tools/perf/Documentation/topdown.txt78
-rw-r--r--tools/perf/Makefile.config9
-rw-r--r--tools/perf/Makefile.perf50
-rw-r--r--tools/perf/arch/arm/include/perf_regs.h2
-rw-r--r--tools/perf/arch/arm64/include/perf_regs.h2
-rw-r--r--tools/perf/arch/arm64/util/machine.c3
-rw-r--r--tools/perf/arch/arm64/util/perf_regs.c94
-rw-r--r--tools/perf/arch/csky/include/perf_regs.h2
-rw-r--r--tools/perf/arch/powerpc/include/perf_regs.h8
-rw-r--r--tools/perf/arch/powerpc/util/Build1
-rw-r--r--tools/perf/arch/powerpc/util/machine.c25
-rw-r--r--tools/perf/arch/powerpc/util/perf_regs.c6
-rw-r--r--tools/perf/arch/riscv/include/perf_regs.h2
-rw-r--r--tools/perf/arch/s390/include/perf_regs.h2
-rw-r--r--tools/perf/arch/s390/util/machine.c3
-rw-r--r--tools/perf/arch/x86/include/perf_regs.h2
-rw-r--r--tools/perf/arch/x86/tests/insn-x86.c1
-rw-r--r--tools/perf/arch/x86/tests/intel-pt-pkt-decoder-test.c4
-rw-r--r--tools/perf/arch/x86/util/Build3
-rw-r--r--tools/perf/arch/x86/util/event.c25
-rw-r--r--tools/perf/arch/x86/util/evlist.c15
-rw-r--r--tools/perf/arch/x86/util/evsel.c8
-rw-r--r--tools/perf/arch/x86/util/mem-events.c44
-rw-r--r--tools/perf/bench/epoll-ctl.c1
-rw-r--r--tools/perf/bench/epoll-wait.c1
-rw-r--r--tools/perf/bench/futex-hash.c1
-rw-r--r--tools/perf/bench/futex-lock-pi.c1
-rw-r--r--tools/perf/bench/futex-requeue.c1
-rw-r--r--tools/perf/bench/futex-wake-parallel.c1
-rw-r--r--tools/perf/bench/futex-wake.c1
-rw-r--r--tools/perf/builtin-buildid-cache.c28
-rw-r--r--tools/perf/builtin-buildid-list.c3
-rw-r--r--tools/perf/builtin-c2c.c171
-rw-r--r--tools/perf/builtin-daemon.c1521
-rw-r--r--tools/perf/builtin-inject.c4
-rw-r--r--tools/perf/builtin-mem.c113
-rw-r--r--tools/perf/builtin-record.c39
-rw-r--r--tools/perf/builtin-script.c37
-rw-r--r--tools/perf/builtin-stat.c124
-rw-r--r--tools/perf/builtin.h1
-rw-r--r--tools/perf/command-list.txt1
-rw-r--r--tools/perf/perf.c1
-rw-r--r--tools/perf/pmu-events/arch/arm64/ampere/emag/branch.json8
-rw-r--r--tools/perf/pmu-events/arch/arm64/ampere/emag/bus.json5
-rw-r--r--tools/perf/pmu-events/arch/arm64/ampere/emag/cache.json58
-rw-r--r--tools/perf/pmu-events/arch/arm64/ampere/emag/clock.json4
-rw-r--r--tools/perf/pmu-events/arch/arm64/ampere/emag/exception.json10
-rw-r--r--tools/perf/pmu-events/arch/arm64/ampere/emag/instruction.json34
-rw-r--r--tools/perf/pmu-events/arch/arm64/ampere/emag/memory.json11
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/branch.json12
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/bus.json19
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/cache.json118
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/exception.json10
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/instruction.json45
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/memory.json6
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/other.json4
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/pipeline.json12
-rw-r--r--tools/perf/pmu-events/arch/arm64/armv8-common-and-microarch.json248
-rw-r--r--tools/perf/pmu-events/arch/arm64/freescale/imx8mm/sys/metrics.json4
-rw-r--r--tools/perf/pmu-events/arch/arm64/freescale/imx8mn/sys/ddrc.json37
-rw-r--r--tools/perf/pmu-events/arch/arm64/freescale/imx8mn/sys/metrics.json18
-rw-r--r--tools/perf/pmu-events/arch/arm64/freescale/imx8mp/sys/ddrc.json37
-rw-r--r--tools/perf/pmu-events/arch/arm64/freescale/imx8mp/sys/metrics.json466
-rw-r--r--tools/perf/pmu-events/arch/arm64/freescale/imx8mq/sys/ddrc.json37
-rw-r--r--tools/perf/pmu-events/arch/arm64/freescale/imx8mq/sys/metrics.json18
-rw-r--r--tools/perf/tests/Build1
-rw-r--r--tools/perf/tests/builtin-test.c4
-rw-r--r--tools/perf/tests/code-reading.c10
-rw-r--r--tools/perf/tests/demangle-ocaml-test.c43
-rw-r--r--tools/perf/tests/openat-syscall-all-cpus.c1
-rw-r--r--tools/perf/tests/parse-metric.c24
-rw-r--r--tools/perf/tests/sample-parsing.c20
-rwxr-xr-xtools/perf/tests/shell/buildid.sh6
-rwxr-xr-xtools/perf/tests/shell/daemon.sh475
-rwxr-xr-xtools/perf/tests/shell/test_arm_coresight.sh45
-rw-r--r--tools/perf/tests/tests.h1
-rw-r--r--tools/perf/ui/browsers/annotate.c2
-rw-r--r--tools/perf/util/Build2
-rw-r--r--tools/perf/util/annotate.c8
-rw-r--r--tools/perf/util/annotate.h1
-rw-r--r--tools/perf/util/arm-spe-decoder/arm-spe-decoder.c10
-rw-r--r--tools/perf/util/arm-spe-decoder/arm-spe-decoder.h8
-rw-r--r--tools/perf/util/arm-spe.c133
-rw-r--r--tools/perf/util/auxtrace.c15
-rw-r--r--tools/perf/util/auxtrace.h6
-rw-r--r--tools/perf/util/bpf_counter.c314
-rw-r--r--tools/perf/util/bpf_counter.h72
-rw-r--r--tools/perf/util/bpf_skel/.gitignore (renamed from arch/arm/mach-picoxcell/Makefile)3
-rw-r--r--tools/perf/util/bpf_skel/bpf_prog_profiler.bpf.c93
-rw-r--r--tools/perf/util/build-id.c5
-rw-r--r--tools/perf/util/build-id.h4
-rw-r--r--tools/perf/util/cgroup.c8
-rw-r--r--tools/perf/util/config.c123
-rw-r--r--tools/perf/util/config.h7
-rw-r--r--tools/perf/util/cs-etm-decoder/cs-etm-decoder.c15
-rw-r--r--tools/perf/util/data-convert-bt.c2
-rw-r--r--tools/perf/util/db-export.c2
-rw-r--r--tools/perf/util/debug.c34
-rw-r--r--tools/perf/util/debug.h1
-rw-r--r--tools/perf/util/demangle-ocaml.c80
-rw-r--r--tools/perf/util/demangle-ocaml.h7
-rw-r--r--tools/perf/util/event.c67
-rw-r--r--tools/perf/util/event.h18
-rw-r--r--tools/perf/util/evlist.c125
-rw-r--r--tools/perf/util/evlist.h12
-rw-r--r--tools/perf/util/evsel.c63
-rw-r--r--tools/perf/util/evsel.h9
-rw-r--r--tools/perf/util/evsel_fprintf.c2
-rw-r--r--tools/perf/util/header.c2
-rw-r--r--tools/perf/util/hist.c15
-rw-r--r--tools/perf/util/hist.h4
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-decoder.c334
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-decoder.h7
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c15
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.h1
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c12
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.h2
-rw-r--r--tools/perf/util/intel-pt.c214
-rw-r--r--tools/perf/util/intlist.c27
-rw-r--r--tools/perf/util/intlist.h10
-rw-r--r--tools/perf/util/jit.h2
-rw-r--r--tools/perf/util/jitdump.c84
-rw-r--r--tools/perf/util/machine.c51
-rw-r--r--tools/perf/util/machine.h2
-rw-r--r--tools/perf/util/map.c8
-rw-r--r--tools/perf/util/map.h3
-rw-r--r--tools/perf/util/mem-events.c36
-rw-r--r--tools/perf/util/mem-events.h5
-rw-r--r--tools/perf/util/metricgroup.c2
-rw-r--r--tools/perf/util/namespaces.c23
-rw-r--r--tools/perf/util/namespaces.h3
-rw-r--r--tools/perf/util/parse-events.l1
-rw-r--r--tools/perf/util/perf_api_probe.c10
-rw-r--r--tools/perf/util/perf_api_probe.h1
-rw-r--r--tools/perf/util/perf_event_attr_fprintf.c5
-rw-r--r--tools/perf/util/perf_regs.h7
-rw-r--r--tools/perf/util/probe-event.c12
-rw-r--r--tools/perf/util/probe-file.c38
-rw-r--r--tools/perf/util/probe-finder.c8
-rw-r--r--tools/perf/util/python-ext-sources1
-rw-r--r--tools/perf/util/python.c21
-rw-r--r--tools/perf/util/record.c9
-rw-r--r--tools/perf/util/record.h2
-rw-r--r--tools/perf/util/session.c54
-rw-r--r--tools/perf/util/setup.py2
-rw-r--r--tools/perf/util/sort.c109
-rw-r--r--tools/perf/util/sort.h6
-rw-r--r--tools/perf/util/stat-display.c4
-rw-r--r--tools/perf/util/stat-shadow.c92
-rw-r--r--tools/perf/util/stat.c6
-rw-r--r--tools/perf/util/stat.h9
-rw-r--r--tools/perf/util/string.c9
-rw-r--r--tools/perf/util/string2.h2
-rw-r--r--tools/perf/util/symbol-elf.c25
-rw-r--r--tools/perf/util/symbol.c73
-rw-r--r--tools/perf/util/symbol_conf.h7
-rw-r--r--tools/perf/util/synthetic-events.c225
-rw-r--r--tools/perf/util/target.c34
-rw-r--r--tools/perf/util/target.h10
-rw-r--r--tools/perf/util/trace-event-info.c10
-rw-r--r--tools/perf/util/unwind-libdw.c11
-rw-r--r--tools/perf/util/xyarray.c33
-rw-r--r--tools/power/acpi/common/cmfsize.c2
-rw-r--r--tools/power/acpi/common/getopt.c2
-rw-r--r--tools/power/acpi/os_specific/service_layers/oslinuxtbl.c2
-rw-r--r--tools/power/acpi/os_specific/service_layers/osunixdir.c2
-rw-r--r--tools/power/acpi/os_specific/service_layers/osunixmap.c2
-rw-r--r--tools/power/acpi/os_specific/service_layers/osunixxf.c2
-rw-r--r--tools/power/acpi/tools/acpidump/acpidump.h2
-rw-r--r--tools/power/acpi/tools/acpidump/apdump.c2
-rw-r--r--tools/power/acpi/tools/acpidump/apfiles.c2
-rw-r--r--tools/power/acpi/tools/acpidump/apmain.c2
-rw-r--r--tools/power/cpupower/Makefile8
-rw-r--r--tools/power/cpupower/bench/Makefile2
-rw-r--r--tools/power/cpupower/utils/cpufreq-info.c3
-rw-r--r--tools/power/cpupower/utils/helpers/amd.c65
-rw-r--r--tools/power/cpupower/utils/helpers/cpuid.c20
-rw-r--r--tools/power/cpupower/utils/helpers/helpers.h14
-rw-r--r--tools/power/cpupower/utils/helpers/misc.c9
-rw-r--r--tools/power/x86/intel-speed-select/isst-config.c115
-rw-r--r--tools/power/x86/intel-speed-select/isst-core.c11
-rw-r--r--tools/power/x86/intel-speed-select/isst-display.c18
-rw-r--r--tools/power/x86/intel-speed-select/isst.h2
-rw-r--r--tools/power/x86/turbostat/turbostat.c10
-rw-r--r--tools/scripts/Makefile.include8
-rwxr-xr-xtools/testing/kunit/kunit.py30
-rw-r--r--tools/testing/kunit/kunit_config.py13
-rw-r--r--tools/testing/kunit/kunit_kernel.py18
-rwxr-xr-xtools/testing/kunit/kunit_tool_test.py204
-rw-r--r--tools/testing/nvdimm/config_check.c3
-rw-r--r--tools/testing/nvdimm/test/Kbuild6
-rw-r--r--tools/testing/nvdimm/test/ndtest.c1129
-rw-r--r--tools/testing/nvdimm/test/ndtest.h109
-rw-r--r--tools/testing/scatterlist/main.c1
-rw-r--r--tools/testing/selftests/Makefile11
-rw-r--r--tools/testing/selftests/arm64/mte/check_buffer_fill.c2
-rw-r--r--tools/testing/selftests/bpf/.gitignore2
-rw-r--r--tools/testing/selftests/bpf/Makefile68
-rw-r--r--tools/testing/selftests/bpf/README.rst24
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_ringbufs.c2
-rw-r--r--tools/testing/selftests/bpf/bpf_sockopt_helpers.h21
-rw-r--r--tools/testing/selftests/bpf/bpf_tcp_helpers.h1
-rw-r--r--tools/testing/selftests/bpf/bpf_testmod/bpf_testmod-events.h6
-rw-r--r--tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c24
-rw-r--r--tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h6
-rw-r--r--tools/testing/selftests/bpf/prog_tests/atomic_bounds.c17
-rw-r--r--tools/testing/selftests/bpf/prog_tests/atomics.c246
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bind_perm.c109
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_iter.c118
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c1
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf.c25
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c33
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/check_mtu.c216
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cls_redirect.c1
-rw-r--r--tools/testing/selftests/bpf/prog_tests/core_read_macros.c64
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fexit_stress.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/global_func_args.c60
-rw-r--r--tools/testing/selftests/bpf/prog_tests/ksyms_module.c31
-rw-r--r--tools/testing/selftests/bpf/prog_tests/module_attach.c27
-rw-r--r--tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c118
-rw-r--r--tools/testing/selftests/bpf/prog_tests/recursion.c41
-rw-r--r--tools/testing/selftests/bpf/prog_tests/socket_cookie.c76
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockmap_basic.c1
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockopt_sk.c28
-rw-r--r--tools/testing/selftests/bpf/prog_tests/stack_var_off.c35
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_global_funcs.c8
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_ima.c23
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_local_storage.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_lsm.c1
-rw-r--r--tools/testing/selftests/bpf/prog_tests/trampoline_count.c4
-rw-r--r--tools/testing/selftests/bpf/progs/atomic_bounds.c24
-rw-r--r--tools/testing/selftests/bpf/progs/atomics.c154
-rw-r--r--tools/testing/selftests/bpf/progs/bind4_prog.c42
-rw-r--r--tools/testing/selftests/bpf/progs/bind6_prog.c42
-rw-r--r--tools/testing/selftests/bpf/progs/bind_perm.c45
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter.h8
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_task_vma.c58
-rw-r--r--tools/testing/selftests/bpf/progs/connect_force_port4.c8
-rw-r--r--tools/testing/selftests/bpf/progs/connect_force_port6.c8
-rw-r--r--tools/testing/selftests/bpf/progs/ima.c33
-rw-r--r--tools/testing/selftests/bpf/progs/lsm.c69
-rw-r--r--tools/testing/selftests/bpf/progs/recursion.c46
-rw-r--r--tools/testing/selftests/bpf/progs/recvmsg4_prog.c42
-rw-r--r--tools/testing/selftests/bpf/progs/recvmsg6_prog.c48
-rw-r--r--tools/testing/selftests/bpf/progs/sendmsg4_prog.c7
-rw-r--r--tools/testing/selftests/bpf/progs/sendmsg6_prog.c5
-rw-r--r--tools/testing/selftests/bpf/progs/socket_cookie_prog.c47
-rw-r--r--tools/testing/selftests/bpf/progs/sockopt_sk.c23
-rw-r--r--tools/testing/selftests/bpf/progs/test_check_mtu.c198
-rw-r--r--tools/testing/selftests/bpf/progs/test_cls_redirect.c7
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_read_macros.c50
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func10.c29
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func11.c19
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func12.c21
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func13.c24
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func14.c21
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func15.c22
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func16.c22
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func9.c132
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func_args.c91
-rw-r--r--tools/testing/selftests/bpf/progs/test_ksyms_module.c26
-rw-r--r--tools/testing/selftests/bpf/progs/test_module_attach.c10
-rw-r--r--tools/testing/selftests/bpf/progs/test_ns_current_pid_tgid.c28
-rw-r--r--tools/testing/selftests/bpf/progs/test_stack_var_off.c51
-rw-r--r--tools/testing/selftests/bpf/test_cgroup_storage.c2
-rw-r--r--tools/testing/selftests/bpf/test_current_pid_tgid_new_ns.c160
-rw-r--r--tools/testing/selftests/bpf/test_flow_dissector.c2
-rw-r--r--tools/testing/selftests/bpf/test_progs.c13
-rw-r--r--tools/testing/selftests/bpf/test_progs.h2
-rw-r--r--tools/testing/selftests/bpf/test_sock_addr.c86
-rw-r--r--tools/testing/selftests/bpf/test_socket_cookie.c208
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c88
-rwxr-xr-xtools/testing/selftests/bpf/test_xdp_redirect.sh10
-rw-r--r--tools/testing/selftests/bpf/verifier/atomic_and.c77
-rw-r--r--tools/testing/selftests/bpf/verifier/atomic_bounds.c27
-rw-r--r--tools/testing/selftests/bpf/verifier/atomic_cmpxchg.c96
-rw-r--r--tools/testing/selftests/bpf/verifier/atomic_fetch_add.c106
-rw-r--r--tools/testing/selftests/bpf/verifier/atomic_or.c77
-rw-r--r--tools/testing/selftests/bpf/verifier/atomic_xchg.c46
-rw-r--r--tools/testing/selftests/bpf/verifier/atomic_xor.c77
-rw-r--r--tools/testing/selftests/bpf/verifier/basic_stack.c2
-rw-r--r--tools/testing/selftests/bpf/verifier/calls.c4
-rw-r--r--tools/testing/selftests/bpf/verifier/const_or.c4
-rw-r--r--tools/testing/selftests/bpf/verifier/ctx.c7
-rw-r--r--tools/testing/selftests/bpf/verifier/direct_packet_access.c4
-rw-r--r--tools/testing/selftests/bpf/verifier/helper_access_var_len.c12
-rw-r--r--tools/testing/selftests/bpf/verifier/int_ptr.c6
-rw-r--r--tools/testing/selftests/bpf/verifier/jit.c24
-rw-r--r--tools/testing/selftests/bpf/verifier/leak_ptr.c10
-rw-r--r--tools/testing/selftests/bpf/verifier/meta_access.c4
-rw-r--r--tools/testing/selftests/bpf/verifier/raw_stack.c10
-rw-r--r--tools/testing/selftests/bpf/verifier/stack_ptr.c22
-rw-r--r--tools/testing/selftests/bpf/verifier/unpriv.c5
-rw-r--r--tools/testing/selftests/bpf/verifier/value_illegal_alu.c2
-rw-r--r--tools/testing/selftests/bpf/verifier/var_off.c115
-rw-r--r--tools/testing/selftests/bpf/verifier/xadd.c18
-rwxr-xr-xtools/testing/selftests/bpf/vmtest.sh368
-rw-r--r--tools/testing/selftests/bpf/xdpxceiver.c225
-rw-r--r--tools/testing/selftests/bpf/xdpxceiver.h2
-rw-r--r--tools/testing/selftests/breakpoints/breakpoint_test_arm64.c4
-rw-r--r--tools/testing/selftests/dma/dma_map_benchmark.c23
-rw-r--r--tools/testing/selftests/dmabuf-heaps/Makefile2
-rw-r--r--tools/testing/selftests/dmabuf-heaps/dmabuf-heap.c149
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/ethtool_lanes.sh187
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/fib.sh14
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/port_scale.sh64
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/spectrum-2/port_scale.sh16
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/spectrum-2/resource_scale.sh2
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/spectrum/port_scale.sh16
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh2
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/fib.sh14
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/fib_notifications.sh430
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-syntax.tc4
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic_event_syntax_errors.tc35
-rw-r--r--tools/testing/selftests/gpio/Makefile26
-rw-r--r--tools/testing/selftests/gpio/config1
-rw-r--r--tools/testing/selftests/gpio/gpio-mockup-cdev.c198
-rw-r--r--tools/testing/selftests/gpio/gpio-mockup-chardev.c323
-rwxr-xr-xtools/testing/selftests/gpio/gpio-mockup-sysfs.sh168
-rwxr-xr-xtools/testing/selftests/gpio/gpio-mockup.sh497
-rw-r--r--tools/testing/selftests/ipc/msgque.c6
-rwxr-xr-xtools/testing/selftests/kselftest_deps.sh4
-rw-r--r--tools/testing/selftests/kselftest_harness.h26
-rw-r--r--tools/testing/selftests/kselftest_module.h18
-rw-r--r--tools/testing/selftests/kvm/.gitignore7
-rw-r--r--tools/testing/selftests/kvm/Makefile7
-rw-r--r--tools/testing/selftests/kvm/demand_paging_test.c43
-rw-r--r--tools/testing/selftests/kvm/dirty_log_perf_test.c92
-rw-r--r--tools/testing/selftests/kvm/hardware_disable_test.c165
-rw-r--r--tools/testing/selftests/kvm/include/kvm_util.h6
-rw-r--r--tools/testing/selftests/kvm/include/numaif.h55
-rw-r--r--tools/testing/selftests/kvm/include/perf_test_util.h7
-rw-r--r--tools/testing/selftests/kvm/include/test_util.h16
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/processor.h41
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c7
-rw-r--r--tools/testing/selftests/kvm/lib/perf_test_util.c31
-rw-r--r--tools/testing/selftests/kvm/lib/test_util.c31
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/processor.c145
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/svm.c8
-rw-r--r--tools/testing/selftests/kvm/memslot_modification_stress_test.c212
-rw-r--r--tools/testing/selftests/kvm/settings1
-rw-r--r--tools/testing/selftests/kvm/x86_64/evmcs_test.c3
-rw-r--r--tools/testing/selftests/kvm/x86_64/get_cpuid_test.c175
-rw-r--r--tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c31
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_pmu_msrs_test.c131
-rw-r--r--tools/testing/selftests/kvm/x86_64/xapic_ipi_test.c544
-rw-r--r--tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c320
-rw-r--r--tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c149
-rw-r--r--tools/testing/selftests/mount_setattr/.gitignore1
-rw-r--r--tools/testing/selftests/mount_setattr/Makefile7
-rw-r--r--tools/testing/selftests/mount_setattr/config1
-rw-r--r--tools/testing/selftests/mount_setattr/mount_setattr_test.c1424
-rw-r--r--tools/testing/selftests/nci/Makefile6
-rw-r--r--tools/testing/selftests/nci/config3
-rw-r--r--tools/testing/selftests/nci/nci_dev.c599
-rw-r--r--tools/testing/selftests/net/Makefile1
-rwxr-xr-xtools/testing/selftests/net/fcnal-test.sh402
-rw-r--r--tools/testing/selftests/net/forwarding/config1
-rw-r--r--tools/testing/selftests/net/forwarding/ethtool_lib.sh34
-rw-r--r--tools/testing/selftests/net/forwarding/lib.sh69
-rwxr-xr-xtools/testing/selftests/net/forwarding/router_mpath_nh.sh2
-rwxr-xr-xtools/testing/selftests/net/forwarding/router_multipath.sh2
-rwxr-xr-xtools/testing/selftests/net/forwarding/tc_chains.sh4
-rwxr-xr-xtools/testing/selftests/net/forwarding/tc_flower.sh344
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_connect.sh94
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_join.sh1052
-rwxr-xr-xtools/testing/selftests/net/mptcp/pm_netlink.sh41
-rw-r--r--tools/testing/selftests/net/mptcp/pm_nl_ctl.c111
-rw-r--r--tools/testing/selftests/net/mptcp/settings2
-rw-r--r--tools/testing/selftests/net/nettest.c585
-rw-r--r--tools/testing/selftests/net/so_txtime.c2
-rw-r--r--tools/testing/selftests/net/tls.c15
-rw-r--r--tools/testing/selftests/net/txtimestamp.c6
-rwxr-xr-xtools/testing/selftests/net/unicast_extensions.sh228
-rwxr-xr-xtools/testing/selftests/net/xfrm_policy.sh45
-rwxr-xr-xtools/testing/selftests/netfilter/nft_meta.sh2
-rwxr-xr-xtools/testing/selftests/powerpc/eeh/eeh-basic.sh41
-rw-r--r--[-rwxr-xr-x]tools/testing/selftests/powerpc/eeh/eeh-functions.sh168
-rwxr-xr-xtools/testing/selftests/powerpc/eeh/eeh-vf-aware.sh45
-rwxr-xr-xtools/testing/selftests/powerpc/eeh/eeh-vf-unaware.sh35
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/config2csv.sh67
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/console-badness.sh1
-rw-r--r--tools/testing/selftests/rcutorture/bin/functions.sh36
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-find-errors.sh9
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-recheck.sh3
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh12
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm.sh103
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/mkinitrd.sh2
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/parse-build.sh2
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/parse-console.sh2
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/torture.sh442
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/RUDE01.boot1
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TASKS01.boot1
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE01.boot4
-rw-r--r--tools/testing/selftests/seccomp/seccomp_bpf.c10
-rw-r--r--tools/testing/selftests/syscall_user_dispatch/sud_benchmark.c8
-rw-r--r--tools/testing/selftests/syscall_user_dispatch/sud_test.c14
-rw-r--r--tools/testing/selftests/tc-testing/Makefile3
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/filters/u32.json46
-rw-r--r--tools/testing/selftests/timens/.gitignore1
-rw-r--r--tools/testing/selftests/vDSO/vdso_config.h4
-rwxr-xr-xtools/testing/selftests/vm/run_vmtests.sh (renamed from tools/testing/selftests/vm/run_vmtests)0
-rwxr-xr-xtools/testing/selftests/wireguard/netns.sh15
-rw-r--r--tools/testing/selftests/x86/helpers.h24
-rw-r--r--tools/testing/selftests/x86/ldt_gdt.c2
-rw-r--r--tools/tracing/Makefile19
-rw-r--r--tools/tracing/latency/.gitignore2
-rw-r--r--tools/tracing/latency/Makefile24
-rw-r--r--tools/tracing/latency/latency-collector.c2108
-rw-r--r--usr/Kconfig1
-rw-r--r--usr/include/Makefile2
-rw-r--r--virt/kvm/dirty_ring.c8
-rw-r--r--virt/kvm/kvm_main.c84
-rw-r--r--virt/kvm/mmu_lock.h23
11444 files changed, 488784 insertions, 286202 deletions
diff --git a/.clang-format b/.clang-format
index 10dc5a9a61b3..c24b147cac01 100644
--- a/.clang-format
+++ b/.clang-format
@@ -109,6 +109,7 @@ ForEachMacros:
- 'css_for_each_child'
- 'css_for_each_descendant_post'
- 'css_for_each_descendant_pre'
+ - 'cxl_for_each_cmd'
- 'device_for_each_child_node'
- 'dma_fence_chain_for_each'
- 'do_for_each_ftrace_op'
@@ -122,6 +123,7 @@ ForEachMacros:
- 'drm_for_each_bridge_in_chain'
- 'drm_for_each_connector_iter'
- 'drm_for_each_crtc'
+ - 'drm_for_each_crtc_reverse'
- 'drm_for_each_encoder'
- 'drm_for_each_encoder_mask'
- 'drm_for_each_fb'
@@ -203,14 +205,13 @@ ForEachMacros:
- 'for_each_matching_node'
- 'for_each_matching_node_and_match'
- 'for_each_member'
- - 'for_each_mem_region'
- - 'for_each_memblock_type'
- 'for_each_memcg_cache_index'
- 'for_each_mem_pfn_range'
- '__for_each_mem_range'
- 'for_each_mem_range'
- '__for_each_mem_range_rev'
- 'for_each_mem_range_rev'
+ - 'for_each_mem_region'
- 'for_each_migratetype_order'
- 'for_each_msi_entry'
- 'for_each_msi_entry_safe'
@@ -276,10 +277,8 @@ ForEachMacros:
- 'for_each_reserved_mem_range'
- 'for_each_reserved_mem_region'
- 'for_each_rtd_codec_dais'
- - 'for_each_rtd_codec_dais_rollback'
- 'for_each_rtd_components'
- 'for_each_rtd_cpu_dais'
- - 'for_each_rtd_cpu_dais_rollback'
- 'for_each_rtd_dais'
- 'for_each_set_bit'
- 'for_each_set_bit_from'
@@ -298,6 +297,7 @@ ForEachMacros:
- '__for_each_thread'
- 'for_each_thread'
- 'for_each_unicast_dest_pgid'
+ - 'for_each_vsi'
- 'for_each_wakeup_source'
- 'for_each_zone'
- 'for_each_zone_zonelist'
@@ -330,6 +330,7 @@ ForEachMacros:
- 'hlist_for_each_entry_rcu_bh'
- 'hlist_for_each_entry_rcu_notrace'
- 'hlist_for_each_entry_safe'
+ - 'hlist_for_each_entry_srcu'
- '__hlist_for_each_rcu'
- 'hlist_for_each_safe'
- 'hlist_nulls_for_each_entry'
@@ -378,6 +379,7 @@ ForEachMacros:
- 'list_for_each_entry_safe_continue'
- 'list_for_each_entry_safe_from'
- 'list_for_each_entry_safe_reverse'
+ - 'list_for_each_entry_srcu'
- 'list_for_each_prev'
- 'list_for_each_prev_safe'
- 'list_for_each_safe'
@@ -411,6 +413,8 @@ ForEachMacros:
- 'of_property_for_each_string'
- 'of_property_for_each_u32'
- 'pci_bus_for_each_resource'
+ - 'pcl_for_each_chunk'
+ - 'pcl_for_each_segment'
- 'pcm_for_each_format'
- 'ping_portaddr_for_each_entry'
- 'plist_for_each'
diff --git a/.gitignore b/.gitignore
index d01cda8e1177..3af66272d6f1 100644
--- a/.gitignore
+++ b/.gitignore
@@ -18,6 +18,7 @@
*.c.[012]*.*
*.dt.yaml
*.dtb
+*.dtbo
*.dtb.S
*.dwo
*.elf
@@ -41,6 +42,7 @@
*.so.dbg
*.su
*.symtypes
+*.symversions
*.tab.[ch]
*.tar
*.xz
diff --git a/.mailmap b/.mailmap
index b1ab0129c7d6..85b93cdefc87 100644
--- a/.mailmap
+++ b/.mailmap
@@ -9,9 +9,6 @@
#
# Please keep this list dictionary sorted.
#
-# This comment is parsed by git-shortlog:
-# repo-abbrev: /pub/scm/linux/kernel/git/
-#
Aaron Durbin <adurbin@google.com>
Adam Oldham <oldhamca@gmail.com>
Adam Radford <aradford@gmail.com>
@@ -40,6 +37,7 @@ Andrew Murray <amurray@thegoodpenguin.co.uk> <amurray@embedded-bits.co.uk>
Andrew Murray <amurray@thegoodpenguin.co.uk> <andrew.murray@arm.com>
Andrew Vasquez <andrew.vasquez@qlogic.com>
Andrey Ryabinin <ryabinin.a.a@gmail.com> <a.ryabinin@samsung.com>
+Andrey Ryabinin <ryabinin.a.a@gmail.com> <aryabinin@virtuozzo.com>
Andy Adamson <andros@citi.umich.edu>
Antoine Tenart <atenart@kernel.org> <antoine.tenart@bootlin.com>
Antoine Tenart <atenart@kernel.org> <antoine.tenart@free-electrons.com>
@@ -176,12 +174,13 @@ Juha Yrjola <at solidboot.com>
Juha Yrjola <juha.yrjola@nokia.com>
Juha Yrjola <juha.yrjola@solidboot.com>
Julien Thierry <julien.thierry.kdev@gmail.com> <julien.thierry@arm.com>
-Kamil Konieczny <k.konieczny@samsung.com> <k.konieczny@partner.samsung.com>
Kay Sievers <kay.sievers@vrfy.org>
Kees Cook <keescook@chromium.org> <kees.cook@canonical.com>
Kees Cook <keescook@chromium.org> <keescook@google.com>
Kees Cook <keescook@chromium.org> <kees@outflux.net>
Kees Cook <keescook@chromium.org> <kees@ubuntu.com>
+Keith Busch <kbusch@kernel.org> <keith.busch@intel.com>
+Keith Busch <kbusch@kernel.org> <keith.busch@linux.intel.com>
Kenneth W Chen <kenneth.w.chen@intel.com>
Konstantin Khlebnikov <koct9i@gmail.com> <khlebnikov@yandex-team.ru>
Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com>
@@ -202,6 +201,9 @@ Li Yang <leoyang.li@nxp.com> <leoli@freescale.com>
Li Yang <leoyang.li@nxp.com> <leo@zh-kernel.org>
Lukasz Luba <lukasz.luba@arm.com> <l.luba@partner.samsung.com>
Maciej W. Rozycki <macro@mips.com> <macro@imgtec.com>
+Maciej W. Rozycki <macro@orcam.me.uk> <macro@linux-mips.org>
+Manivannan Sadhasivam <mani@kernel.org> <manivannanece23@gmail.com>
+Manivannan Sadhasivam <mani@kernel.org> <manivannan.sadhasivam@linaro.org>
Marcin Nowakowski <marcin.nowakowski@mips.com> <marcin.nowakowski@imgtec.com>
Marc Zyngier <maz@kernel.org> <marc.zyngier@arm.com>
Mark Brown <broonie@sirena.org.uk>
@@ -235,6 +237,7 @@ Maxime Ripard <mripard@kernel.org> <maxime.ripard@free-electrons.com>
Mayuresh Janorkar <mayur@ti.com>
Michael Buesch <m@bues.ch>
Michel Dänzer <michel@tungstengraphics.com>
+Miguel Ojeda <ojeda@kernel.org> <miguel.ojeda.sandonis@gmail.com>
Mike Rapoport <rppt@kernel.org> <mike@compulab.co.il>
Mike Rapoport <rppt@kernel.org> <mike.rapoport@gmail.com>
Mike Rapoport <rppt@kernel.org> <rppt@linux.ibm.com>
@@ -247,6 +250,7 @@ Morten Welinder <welinder@anemone.rentec.com>
Morten Welinder <welinder@darter.rentec.com>
Morten Welinder <welinder@troll.com>
Mythri P K <mythripk@ti.com>
+Nathan Chancellor <nathan@kernel.org> <natechancellor@gmail.com>
Nguyen Anh Quynh <aquynh@gmail.com>
Nicolas Ferre <nicolas.ferre@microchip.com> <nicolas.ferre@atmel.com>
Nicolas Pitre <nico@fluxnic.net> <nicolas.pitre@linaro.org>
@@ -337,6 +341,8 @@ Vinod Koul <vkoul@kernel.org> <vkoul@infradead.org>
Viresh Kumar <vireshk@kernel.org> <viresh.kumar2@arm.com>
Viresh Kumar <vireshk@kernel.org> <viresh.kumar@st.com>
Viresh Kumar <vireshk@kernel.org> <viresh.linux@gmail.com>
+Viresh Kumar <viresh.kumar@linaro.org> <viresh.kumar@linaro.org>
+Viresh Kumar <viresh.kumar@linaro.org> <viresh.kumar@linaro.com>
Vivien Didelot <vivien.didelot@gmail.com> <vivien.didelot@savoirfairelinux.com>
Vlad Dogaru <ddvlad@gmail.com> <vlad.dogaru@intel.com>
Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@parallels.com>
diff --git a/CREDITS b/CREDITS
index 9add7e6a4fa0..cef83b958cbe 100644
--- a/CREDITS
+++ b/CREDITS
@@ -1244,10 +1244,10 @@ S: 80050-430 - Curitiba - Paraná
S: Brazil
N: Oded Gabbay
-E: oded.gabbay@gmail.com
-D: HabanaLabs and AMD KFD maintainer
-S: 12 Shraga Raphaeli
-S: Petah-Tikva, 4906418
+E: ogabbay@kernel.org
+D: HabanaLabs maintainer
+S: 29 Duchifat St.
+S: Ra'anana 4372029
S: Israel
N: Kumar Gala
@@ -2841,14 +2841,11 @@ S: Subiaco, 6008
S: Perth, Western Australia
S: Australia
-N: Miguel Ojeda Sandonis
-E: miguel.ojeda.sandonis@gmail.com
-W: http://miguelojeda.es
-W: http://jair.lab.fi.uva.es/~migojed/
+N: Miguel Ojeda
+E: ojeda@kernel.org
+W: https://ojeda.dev
D: Author of the ks0108, cfag12864b and cfag12864bfb auxiliary display drivers.
D: Maintainer of the auxiliary display drivers tree (drivers/auxdisplay/*)
-S: C/ Mieses 20, 9-B
-S: Valladolid 47009
S: Spain
N: Peter Oruba
diff --git a/Documentation/ABI/stable/sysfs-bus-fsl-mc b/Documentation/ABI/stable/sysfs-bus-fsl-mc
new file mode 100644
index 000000000000..58f06c7eeed7
--- /dev/null
+++ b/Documentation/ABI/stable/sysfs-bus-fsl-mc
@@ -0,0 +1,19 @@
+What: /sys/bus/fsl-mc/rescan
+Date: January 2021
+KernelVersion: 5.12
+Contact: Ioana Ciornei <ioana.ciornei@nxp.com>
+Description: Writing a non-zero value to this attribute will
+ force a rescan of fsl-mc bus in the system and
+ synchronize the objects under fsl-mc bus and the
+ Management Complex firmware.
+Users: Userspace drivers and management tools
+
+What: /sys/bus/fsl-mc/autorescan
+Date: January 2021
+KernelVersion: 5.12
+Contact: Ioana Ciornei <ioana.ciornei@nxp.com>
+Description: Writing a zero value to this attribute will
+ disable the DPRC IRQs on which automatic rescan
+ of the fsl-mc bus is performed. A non-zero value
+ will enable the DPRC IRQs.
+Users: Userspace drivers and management tools
diff --git a/Documentation/ABI/stable/sysfs-bus-vmbus b/Documentation/ABI/stable/sysfs-bus-vmbus
index c27b7b89477c..42599d9fa161 100644
--- a/Documentation/ABI/stable/sysfs-bus-vmbus
+++ b/Documentation/ABI/stable/sysfs-bus-vmbus
@@ -1,3 +1,10 @@
+What: /sys/bus/vmbus/hibernation
+Date: Jan 2021
+KernelVersion: 5.12
+Contact: Dexuan Cui <decui@microsoft.com>
+Description: Whether the host supports hibernation for the VM.
+Users: Daemon that sets up swap partition/file for hibernation.
+
What: /sys/bus/vmbus/devices/<UUID>/id
Date: Jul 2009
KernelVersion: 2.6.31
diff --git a/Documentation/ABI/stable/sysfs-class-tpm b/Documentation/ABI/stable/sysfs-class-tpm
index 91ca63ec7581..d897ecb9615f 100644
--- a/Documentation/ABI/stable/sysfs-class-tpm
+++ b/Documentation/ABI/stable/sysfs-class-tpm
@@ -194,3 +194,17 @@ Description: The "tpm_version_major" property shows the TCG spec major version
Example output::
2
+
+What: /sys/class/tpm/tpmX/pcr-H/N
+Date: March 2021
+KernelVersion: 5.12
+Contact: linux-integrity@vger.kernel.org
+Description: produces output in compact hex representation for PCR
+ number N from hash bank H. N is the numeric value of
+ the PCR number and H is the crypto string
+ representation of the hash
+
+ Example output::
+
+ cat /sys/class/tpm/tpm0/pcr-sha256/7
+ 2ED93F199692DC6788EFA6A1FE74514AB9760B2A6CEEAEF6C808C13E4ABB0D42
diff --git a/Documentation/ABI/stable/sysfs-driver-speakup b/Documentation/ABI/stable/sysfs-driver-speakup
index 792f58ba327d..dc2a6ba1674b 100644
--- a/Documentation/ABI/stable/sysfs-driver-speakup
+++ b/Documentation/ABI/stable/sysfs-driver-speakup
@@ -273,7 +273,7 @@ Description: In `/sys/accessibility/speakup` is a directory corresponding to
Below is a description of values and parameters for soft
synthesizer, which is currently the most commonly used.
-What: /sys/accessibility/speakup/soft/caps_start
+What: /sys/accessibility/speakup/<synth-name>/caps_start
KernelVersion: 2.6
Contact: speakup@linux-speakup.org
Description: This is the string that is sent to the synthesizer to cause it
@@ -281,7 +281,7 @@ Description: This is the string that is sent to the synthesizer to cause it
and most others, this causes the pitch of the voice to rise
above the currently set pitch.
-What: /sys/accessibility/speakup/soft/caps_stop
+What: /sys/accessibility/speakup/<synth-name>/caps_stop
KernelVersion: 2.6
Contact: speakup@linux-speakup.org
Description: This is the string sent to the synthesizer to cause it to stop
@@ -290,12 +290,12 @@ Description: This is the string sent to the synthesizer to cause it to stop
down to the
currently set pitch.
-What: /sys/accessibility/speakup/soft/delay_time
+What: /sys/accessibility/speakup/<synth-name>/delay_time
KernelVersion: 2.6
Contact: speakup@linux-speakup.org
Description: TODO:
-What: /sys/accessibility/speakup/soft/direct
+What: /sys/accessibility/speakup/<synth-name>/direct
KernelVersion: 2.6
Contact: speakup@linux-speakup.org
Description: Controls if punctuation is spoken by speakup, or by the
@@ -306,36 +306,43 @@ Description: Controls if punctuation is spoken by speakup, or by the
than". Zero lets speakup speak the punctuation. One lets the
synthesizer itself speak punctuation.
-What: /sys/accessibility/speakup/soft/freq
+What: /sys/accessibility/speakup/<synth-name>/freq
KernelVersion: 2.6
Contact: speakup@linux-speakup.org
Description: Gets or sets the frequency of the speech synthesizer. Range is
0-9.
-What: /sys/accessibility/speakup/soft/full_time
+What: /sys/accessibility/speakup/<synth-name>/flush_time
+KernelVersion: 5.12
+Contact: speakup@linux-speakup.org
+Description: Gets or sets the timeout to wait for the synthesizer flush to
+ complete. This can be used when the cable gets faulty and flush
+ notifications are getting lost.
+
+What: /sys/accessibility/speakup/<synth-name>/full_time
KernelVersion: 2.6
Contact: speakup@linux-speakup.org
Description: TODO:
-What: /sys/accessibility/speakup/soft/jiffy_delta
+What: /sys/accessibility/speakup/<synth-name>/jiffy_delta
KernelVersion: 2.6
Contact: speakup@linux-speakup.org
Description: This controls how many jiffys the kernel gives to the
synthesizer. Setting this too high can make a system unstable,
or even crash it.
-What: /sys/accessibility/speakup/soft/pitch
+What: /sys/accessibility/speakup/<synth-name>/pitch
KernelVersion: 2.6
Contact: speakup@linux-speakup.org
Description: Gets or sets the pitch of the synthesizer. The range is 0-9.
-What: /sys/accessibility/speakup/soft/inflection
+What: /sys/accessibility/speakup/<synth-name>/inflection
KernelVersion: 5.8
Contact: speakup@linux-speakup.org
Description: Gets or sets the inflection of the synthesizer, i.e. the pitch
range. The range is 0-9.
-What: /sys/accessibility/speakup/soft/punct
+What: /sys/accessibility/speakup/<synth-name>/punct
KernelVersion: 2.6
Contact: speakup@linux-speakup.org
Description: Gets or sets the amount of punctuation spoken by the
@@ -343,13 +350,13 @@ Description: Gets or sets the amount of punctuation spoken by the
TODO: How is this related to speakup's punc_level, or
reading_punc.
-What: /sys/accessibility/speakup/soft/rate
+What: /sys/accessibility/speakup/<synth-name>/rate
KernelVersion: 2.6
Contact: speakup@linux-speakup.org
Description: Gets or sets the rate of the synthesizer. Range is from zero
slowest, to nine fastest.
-What: /sys/accessibility/speakup/soft/tone
+What: /sys/accessibility/speakup/<synth-name>/tone
KernelVersion: 2.6
Contact: speakup@linux-speakup.org
Description: Gets or sets the tone of the speech synthesizer. The range for
@@ -357,12 +364,12 @@ Description: Gets or sets the tone of the speech synthesizer. The range for
difference if using espeak and the espeakup connector.
TODO: does espeakup support different tonalities?
-What: /sys/accessibility/speakup/soft/trigger_time
+What: /sys/accessibility/speakup/<synth-name>/trigger_time
KernelVersion: 2.6
Contact: speakup@linux-speakup.org
Description: TODO:
-What: /sys/accessibility/speakup/soft/voice
+What: /sys/accessibility/speakup/<synth-name>/voice
KernelVersion: 2.6
Contact: speakup@linux-speakup.org
Description: Gets or sets the voice used by the synthesizer if the
@@ -371,7 +378,7 @@ Description: Gets or sets the voice used by the synthesizer if the
voices, this parameter will not set the voice when the espeakup
connector is used between speakup and espeak.
-What: /sys/accessibility/speakup/soft/vol
+What: /sys/accessibility/speakup/<synth-name>/vol
KernelVersion: 2.6
Contact: speakup@linux-speakup.org
Description: Gets or sets the volume of the speech synthesizer. Range is 0-9,
diff --git a/Documentation/ABI/testing/debugfs-driver-habanalabs b/Documentation/ABI/testing/debugfs-driver-habanalabs
index c5d678d39144..d447a611c41b 100644
--- a/Documentation/ABI/testing/debugfs-driver-habanalabs
+++ b/Documentation/ABI/testing/debugfs-driver-habanalabs
@@ -1,7 +1,7 @@
What: /sys/kernel/debug/habanalabs/hl<n>/addr
Date: Jan 2019
KernelVersion: 5.1
-Contact: oded.gabbay@gmail.com
+Contact: ogabbay@kernel.org
Description: Sets the device address to be used for read or write through
PCI bar, or the device VA of a host mapped memory to be read or
written directly from the host. The latter option is allowed
@@ -11,7 +11,7 @@ Description: Sets the device address to be used for read or write through
What: /sys/kernel/debug/habanalabs/hl<n>/clk_gate
Date: May 2020
KernelVersion: 5.8
-Contact: oded.gabbay@gmail.com
+Contact: ogabbay@kernel.org
Description: Allow the root user to disable/enable in runtime the clock
gating mechanism in Gaudi. Due to how Gaudi is built, the
clock gating needs to be disabled in order to access the
@@ -34,28 +34,28 @@ Description: Allow the root user to disable/enable in runtime the clock
What: /sys/kernel/debug/habanalabs/hl<n>/command_buffers
Date: Jan 2019
KernelVersion: 5.1
-Contact: oded.gabbay@gmail.com
+Contact: ogabbay@kernel.org
Description: Displays a list with information about the currently allocated
command buffers
What: /sys/kernel/debug/habanalabs/hl<n>/command_submission
Date: Jan 2019
KernelVersion: 5.1
-Contact: oded.gabbay@gmail.com
+Contact: ogabbay@kernel.org
Description: Displays a list with information about the currently active
command submissions
What: /sys/kernel/debug/habanalabs/hl<n>/command_submission_jobs
Date: Jan 2019
KernelVersion: 5.1
-Contact: oded.gabbay@gmail.com
+Contact: ogabbay@kernel.org
Description: Displays a list with detailed information about each JOB (CB) of
each active command submission
What: /sys/kernel/debug/habanalabs/hl<n>/data32
Date: Jan 2019
KernelVersion: 5.1
-Contact: oded.gabbay@gmail.com
+Contact: ogabbay@kernel.org
Description: Allows the root user to read or write directly through the
device's PCI bar. Writing to this file generates a write
transaction while reading from the file generates a read
@@ -70,7 +70,7 @@ Description: Allows the root user to read or write directly through the
What: /sys/kernel/debug/habanalabs/hl<n>/data64
Date: Jan 2020
KernelVersion: 5.6
-Contact: oded.gabbay@gmail.com
+Contact: ogabbay@kernel.org
Description: Allows the root user to read or write 64 bit data directly
through the device's PCI bar. Writing to this file generates a
write transaction while reading from the file generates a read
@@ -85,7 +85,7 @@ Description: Allows the root user to read or write 64 bit data directly
What: /sys/kernel/debug/habanalabs/hl<n>/device
Date: Jan 2019
KernelVersion: 5.1
-Contact: oded.gabbay@gmail.com
+Contact: ogabbay@kernel.org
Description: Enables the root user to set the device to specific state.
Valid values are "disable", "enable", "suspend", "resume".
User can read this property to see the valid values
@@ -93,28 +93,28 @@ Description: Enables the root user to set the device to specific state.
What: /sys/kernel/debug/habanalabs/hl<n>/engines
Date: Jul 2019
KernelVersion: 5.3
-Contact: oded.gabbay@gmail.com
+Contact: ogabbay@kernel.org
Description: Displays the status registers values of the device engines and
their derived idle status
What: /sys/kernel/debug/habanalabs/hl<n>/i2c_addr
Date: Jan 2019
KernelVersion: 5.1
-Contact: oded.gabbay@gmail.com
+Contact: ogabbay@kernel.org
Description: Sets I2C device address for I2C transaction that is generated
by the device's CPU
What: /sys/kernel/debug/habanalabs/hl<n>/i2c_bus
Date: Jan 2019
KernelVersion: 5.1
-Contact: oded.gabbay@gmail.com
+Contact: ogabbay@kernel.org
Description: Sets I2C bus address for I2C transaction that is generated by
the device's CPU
What: /sys/kernel/debug/habanalabs/hl<n>/i2c_data
Date: Jan 2019
KernelVersion: 5.1
-Contact: oded.gabbay@gmail.com
+Contact: ogabbay@kernel.org
Description: Triggers an I2C transaction that is generated by the device's
CPU. Writing to this file generates a write transaction while
reading from the file generates a read transcation
@@ -122,32 +122,32 @@ Description: Triggers an I2C transaction that is generated by the device's
What: /sys/kernel/debug/habanalabs/hl<n>/i2c_reg
Date: Jan 2019
KernelVersion: 5.1
-Contact: oded.gabbay@gmail.com
+Contact: ogabbay@kernel.org
Description: Sets I2C register id for I2C transaction that is generated by
the device's CPU
What: /sys/kernel/debug/habanalabs/hl<n>/led0
Date: Jan 2019
KernelVersion: 5.1
-Contact: oded.gabbay@gmail.com
+Contact: ogabbay@kernel.org
Description: Sets the state of the first S/W led on the device
What: /sys/kernel/debug/habanalabs/hl<n>/led1
Date: Jan 2019
KernelVersion: 5.1
-Contact: oded.gabbay@gmail.com
+Contact: ogabbay@kernel.org
Description: Sets the state of the second S/W led on the device
What: /sys/kernel/debug/habanalabs/hl<n>/led2
Date: Jan 2019
KernelVersion: 5.1
-Contact: oded.gabbay@gmail.com
+Contact: ogabbay@kernel.org
Description: Sets the state of the third S/W led on the device
What: /sys/kernel/debug/habanalabs/hl<n>/mmu
Date: Jan 2019
KernelVersion: 5.1
-Contact: oded.gabbay@gmail.com
+Contact: ogabbay@kernel.org
Description: Displays the hop values and physical address for a given ASID
and virtual address. The user should write the ASID and VA into
the file and then read the file to get the result.
@@ -157,14 +157,14 @@ Description: Displays the hop values and physical address for a given ASID
What: /sys/kernel/debug/habanalabs/hl<n>/set_power_state
Date: Jan 2019
KernelVersion: 5.1
-Contact: oded.gabbay@gmail.com
+Contact: ogabbay@kernel.org
Description: Sets the PCI power state. Valid values are "1" for D0 and "2"
for D3Hot
What: /sys/kernel/debug/habanalabs/hl<n>/userptr
Date: Jan 2019
KernelVersion: 5.1
-Contact: oded.gabbay@gmail.com
+Contact: ogabbay@kernel.org
Description: Displays a list with information about the currently user
pointers (user virtual addresses) that are pinned and mapped
to DMA addresses
@@ -172,13 +172,21 @@ Description: Displays a list with information about the currently user
What: /sys/kernel/debug/habanalabs/hl<n>/vm
Date: Jan 2019
KernelVersion: 5.1
-Contact: oded.gabbay@gmail.com
+Contact: ogabbay@kernel.org
Description: Displays a list with information about all the active virtual
address mappings per ASID
What: /sys/kernel/debug/habanalabs/hl<n>/stop_on_err
Date: Mar 2020
KernelVersion: 5.6
-Contact: oded.gabbay@gmail.com
+Contact: ogabbay@kernel.org
Description: Sets the stop-on_error option for the device engines. Value of
"0" is for disable, otherwise enable.
+
+What: /sys/kernel/debug/habanalabs/hl<n>/dump_security_violations
+Date: Jan 2021
+KernelVersion: 5.12
+Contact: ogabbay@kernel.org
+Description: Dumps all security violations to dmesg. This will also ack
+ all security violations meanings those violations will not be
+ dumped next time user calls this API
diff --git a/Documentation/ABI/testing/ima_policy b/Documentation/ABI/testing/ima_policy
index e35263f97fc1..070779e8d836 100644
--- a/Documentation/ABI/testing/ima_policy
+++ b/Documentation/ABI/testing/ima_policy
@@ -29,10 +29,10 @@ Description:
option: [[appraise_type=]] [template=] [permit_directio]
[appraise_flag=] [keyrings=]
base:
- func:= [BPRM_CHECK][MMAP_CHECK][CREDS_CHECK][FILE_CHECK]MODULE_CHECK]
+ func:= [BPRM_CHECK][MMAP_CHECK][CREDS_CHECK][FILE_CHECK][MODULE_CHECK]
[FIRMWARE_CHECK]
[KEXEC_KERNEL_CHECK] [KEXEC_INITRAMFS_CHECK]
- [KEXEC_CMDLINE] [KEY_CHECK]
+ [KEXEC_CMDLINE] [KEY_CHECK] [CRITICAL_DATA]
mask:= [[^]MAY_READ] [[^]MAY_WRITE] [[^]MAY_APPEND]
[[^]MAY_EXEC]
fsmagic:= hex value
@@ -52,6 +52,9 @@ Description:
template:= name of a defined IMA template type
(eg, ima-ng). Only valid when action is "measure".
pcr:= decimal value
+ label:= [selinux]|[kernel_info]|[data_label]
+ data_label:= a unique string used for grouping and limiting critical data.
+ For example, "selinux" to measure critical data for SELinux.
default policy:
# PROC_SUPER_MAGIC
diff --git a/Documentation/ABI/testing/sysfs-bus-coresight-devices-etm4x b/Documentation/ABI/testing/sysfs-bus-coresight-devices-etm4x
index 881f0cd99ce4..8e53a32f8150 100644
--- a/Documentation/ABI/testing/sysfs-bus-coresight-devices-etm4x
+++ b/Documentation/ABI/testing/sysfs-bus-coresight-devices-etm4x
@@ -371,6 +371,14 @@ Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
Description: (Read) Print the content of the Device ID Register
(0xFC8). The value is taken directly from the HW.
+What: /sys/bus/coresight/devices/etm<N>/mgmt/trcdevarch
+Date: January 2021
+KernelVersion: 5.12
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (Read) Print the content of the Device Architecture Register
+ (offset 0xFBC). The value is taken directly read
+ from the HW.
+
What: /sys/bus/coresight/devices/etm<N>/mgmt/trcdevtype
Date: April 2015
KernelVersion: 4.01
diff --git a/Documentation/ABI/testing/sysfs-bus-cxl b/Documentation/ABI/testing/sysfs-bus-cxl
new file mode 100644
index 000000000000..2fe7490ad6a8
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-cxl
@@ -0,0 +1,26 @@
+What: /sys/bus/cxl/devices/memX/firmware_version
+Date: December, 2020
+KernelVersion: v5.12
+Contact: linux-cxl@vger.kernel.org
+Description:
+ (RO) "FW Revision" string as reported by the Identify
+ Memory Device Output Payload in the CXL-2.0
+ specification.
+
+What: /sys/bus/cxl/devices/memX/ram/size
+Date: December, 2020
+KernelVersion: v5.12
+Contact: linux-cxl@vger.kernel.org
+Description:
+ (RO) "Volatile Only Capacity" as bytes. Represents the
+ identically named field in the Identify Memory Device Output
+ Payload in the CXL-2.0 specification.
+
+What: /sys/bus/cxl/devices/memX/pmem/size
+Date: December, 2020
+KernelVersion: v5.12
+Contact: linux-cxl@vger.kernel.org
+Description:
+ (RO) "Persistent Only Capacity" as bytes. Represents the
+ identically named field in the Identify Memory Device Output
+ Payload in the CXL-2.0 specification.
diff --git a/Documentation/ABI/testing/sysfs-bus-dfl-devices-emif b/Documentation/ABI/testing/sysfs-bus-dfl-devices-emif
new file mode 100644
index 000000000000..817d14126d4d
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-dfl-devices-emif
@@ -0,0 +1,25 @@
+What: /sys/bus/dfl/devices/dfl_dev.X/infX_cal_fail
+Date: Oct 2020
+KernelVersion: 5.12
+Contact: Xu Yilun <yilun.xu@intel.com>
+Description: Read-only. It indicates if the calibration failed on this
+ memory interface. "1" for calibration failure, "0" for OK.
+ Format: %u
+
+What: /sys/bus/dfl/devices/dfl_dev.X/infX_init_done
+Date: Oct 2020
+KernelVersion: 5.12
+Contact: Xu Yilun <yilun.xu@intel.com>
+Description: Read-only. It indicates if the initialization completed on
+ this memory interface. "1" for initialization complete, "0"
+ for not yet.
+ Format: %u
+
+What: /sys/bus/dfl/devices/dfl_dev.X/infX_clear
+Date: Oct 2020
+KernelVersion: 5.12
+Contact: Xu Yilun <yilun.xu@intel.com>
+Description: Write-only. Writing "1" to this file will zero out all memory
+ data in this memory interface. Writing of other values is
+ invalid.
+ Format: %u
diff --git a/Documentation/ABI/testing/sysfs-bus-dfl-devices-n3000-nios b/Documentation/ABI/testing/sysfs-bus-dfl-devices-n3000-nios
new file mode 100644
index 000000000000..5335d742bcaf
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-dfl-devices-n3000-nios
@@ -0,0 +1,47 @@
+What: /sys/bus/dfl/devices/dfl_dev.X/fec_mode
+Date: Oct 2020
+KernelVersion: 5.12
+Contact: Xu Yilun <yilun.xu@intel.com>
+Description: Read-only. Returns the FEC mode of the 25G links of the
+ ethernet retimers configured by Nios firmware. "rs" for Reed
+ Solomon FEC, "kr" for Fire Code FEC, "no" for NO FEC.
+ "not supported" if the FEC mode setting is not supported, this
+ happens when the Nios firmware version major < 3, or no link is
+ configured to 25G.
+ Format: string
+
+What: /sys/bus/dfl/devices/dfl_dev.X/retimer_A_mode
+Date: Oct 2020
+KernelVersion: 5.12
+Contact: Xu Yilun <yilun.xu@intel.com>
+Description: Read-only. Returns the enumeration value of the working mode of
+ the retimer A configured by the Nios firmware. The value is
+ read out from shared registers filled by the Nios firmware. Now
+ the values could be:
+
+ - "0": Reset
+ - "1": 4x10G
+ - "2": 4x25G
+ - "3": 2x25G
+ - "4": 2x25G+2x10G
+ - "5": 1x25G
+
+ If the Nios firmware is updated in future to support more
+ retimer modes, more enumeration value is expected.
+ Format: 0x%x
+
+What: /sys/bus/dfl/devices/dfl_dev.X/retimer_B_mode
+Date: Oct 2020
+KernelVersion: 5.12
+Contact: Xu Yilun <yilun.xu@intel.com>
+Description: Read-only. Returns the enumeration value of the working mode of
+ the retimer B configured by the Nios firmware. The value format
+ is the same as retimer_A_mode.
+
+What: /sys/bus/dfl/devices/dfl_dev.X/nios_fw_version
+Date: Oct 2020
+KernelVersion: 5.12
+Contact: Xu Yilun <yilun.xu@intel.com>
+Description: Read-only. Returns the version of the Nios firmware in the
+ FPGA. Its format is "major.minor.patch".
+ Format: %x.%x.%x
diff --git a/Documentation/ABI/testing/sysfs-bus-iio b/Documentation/ABI/testing/sysfs-bus-iio
index 35289d47d6cb..d957f5da5c04 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio
+++ b/Documentation/ABI/testing/sysfs-bus-iio
@@ -198,6 +198,7 @@ Description:
Units after application of scale and offset are m/s^2.
What: /sys/bus/iio/devices/iio:deviceX/in_angl_raw
+What: /sys/bus/iio/devices/iio:deviceX/in_anglY_raw
KernelVersion: 4.17
Contact: linux-iio@vger.kernel.org
Description:
@@ -1812,3 +1813,13 @@ Contact: linux-iio@vger.kernel.org
Description:
Unscaled light intensity according to CIE 1931/DIN 5033 color space.
Units after application of scale are nano nanowatts per square meter.
+
+What: /sys/bus/iio/devices/iio:deviceX/in_anglY_label
+KernelVersion: 5.12
+Contact: linux-iio@vger.kernel.org
+Description:
+ Optional symbolic label for channel Y.
+ For Intel hid hinge sensor, the label values are:
+ hinge, keyboard, screen. It means the three channels
+ each correspond respectively to hinge angle, keyboard angle,
+ and screen angle.
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-dac-ad5766 b/Documentation/ABI/testing/sysfs-bus-iio-dac-ad5766
new file mode 100644
index 000000000000..7fbcba15bf1e
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-iio-dac-ad5766
@@ -0,0 +1,31 @@
+What: /sys/bus/iio/devices/iio:deviceX/in_voltageY_dither_enable
+KernelVersion: 5.12
+Contact: linux-iio@vger.kernel.org
+Description:
+ Dither enable. Write 1 to enable dither or 0 to disable it.
+
+What: /sys/bus/iio/devices/iio:deviceX/in_voltageY_dither_invert
+KernelVersion: 5.12
+Contact: linux-iio@vger.kernel.org
+Description:
+ Inverts the dither applied to the selected DAC channel. Dither is not
+ inverted by default. Write "1" to invert dither.
+
+What: /sys/bus/iio/devices/iio:deviceX/in_voltageY_dither_scale_available
+KernelVersion: 5.12
+Contact: linux-iio@vger.kernel.org
+Description:
+ Returns possible scalings available for the current channel.
+
+What: /sys/bus/iio/devices/iio:deviceX/in_voltageY_dither_scale
+KernelVersion: 5.12
+Contact: linux-iio@vger.kernel.org
+Description:
+ Scales the dither before it is applied to the selected channel.
+
+What: /sys/bus/iio/devices/iio:deviceX/in_voltageY_dither_source
+KernelVersion: 5.12
+Contact: linux-iio@vger.kernel.org
+Description:
+ Selects dither source applied to the selected channel. Write "0" to
+ select N0 source, write "1" to select N1 source.
diff --git a/Documentation/ABI/testing/sysfs-bus-pci-devices-pvpanic b/Documentation/ABI/testing/sysfs-bus-pci-devices-pvpanic
new file mode 100644
index 000000000000..1936f7324155
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-pci-devices-pvpanic
@@ -0,0 +1,24 @@
+What: /sys/devices/pci0000:00/*/QEMU0001:00/capability
+Date: Jan 2021
+Contact: zhenwei pi <pizhenwei@bytedance.com>
+Description:
+ Read-only attribute. Capabilities of pvpanic device which
+ are supported by QEMU.
+
+ Format: %x.
+
+ Detailed bit definition refers to section <Bit Definition>
+ from pvpanic device specification:
+ https://git.qemu.org/?p=qemu.git;a=blob_plain;f=docs/specs/pvpanic.txt
+
+What: /sys/devices/pci0000:00/*/QEMU0001:00/events
+Date: Jan 2021
+Contact: zhenwei pi <pizhenwei@bytedance.com>
+Description:
+ RW attribute. Set/get which features in-use. This attribute
+ is used to enable/disable feature(s) of pvpanic device.
+ Notice that this value should be a subset of capability.
+
+ Format: %x.
+
+ Also refer to pvpanic device specification.
diff --git a/Documentation/ABI/testing/sysfs-bus-thunderbolt b/Documentation/ABI/testing/sysfs-bus-thunderbolt
index a91b4b24496e..d7f09d011b6d 100644
--- a/Documentation/ABI/testing/sysfs-bus-thunderbolt
+++ b/Documentation/ABI/testing/sysfs-bus-thunderbolt
@@ -49,6 +49,15 @@ Description: Holds a comma separated list of device unique_ids that
If a device is authorized automatically during boot its
boot attribute is set to 1.
+What: /sys/bus/thunderbolt/devices/.../domainX/deauthorization
+Date: May 2021
+KernelVersion: 5.12
+Contact: Mika Westerberg <mika.westerberg@linux.intel.com>
+Description: This attribute tells whether the system supports
+ de-authorization of devices. Value of 1 means user can
+ de-authorize PCIe tunnel by writing 0 to authorized
+ attribute under each device.
+
What: /sys/bus/thunderbolt/devices/.../domainX/iommu_dma_protection
Date: Mar 2019
KernelVersion: 4.21
@@ -76,6 +85,8 @@ Description: This attribute holds current Thunderbolt security level
usbonly Automatically tunnel USB controller of the
connected Thunderbolt dock (and Display Port). All
PCIe links downstream of the dock are removed.
+ nopcie USB4 system where PCIe tunneling is disabled from
+ the BIOS.
======= ==================================================
What: /sys/bus/thunderbolt/devices/.../authorized
@@ -84,22 +95,25 @@ KernelVersion: 4.13
Contact: thunderbolt-software@lists.01.org
Description: This attribute is used to authorize Thunderbolt devices
after they have been connected. If the device is not
- authorized, no devices such as PCIe and Display port are
- available to the system.
+ authorized, no PCIe devices are available to the system.
Contents of this attribute will be 0 when the device is not
yet authorized.
Possible values are supported:
- == ===========================================
+ == ===================================================
+ 0 The device will be de-authorized (only supported if
+ deauthorization attribute under domain contains 1)
1 The device will be authorized and connected
- == ===========================================
+ == ===================================================
When key attribute contains 32 byte hex string the possible
values are:
== ========================================================
+ 0 The device will be de-authorized (only supported if
+ deauthorization attribute under domain contains 1)
1 The 32 byte hex string is added to the device NVM and
the device is authorized.
2 Send a challenge based on the 32 byte hex string. If the
diff --git a/Documentation/ABI/testing/sysfs-class-led-trigger-tty b/Documentation/ABI/testing/sysfs-class-led-trigger-tty
new file mode 100644
index 000000000000..2bf6b24e781b
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-led-trigger-tty
@@ -0,0 +1,6 @@
+What: /sys/class/leds/<led>/ttyname
+Date: Dec 2020
+KernelVersion: 5.10
+Contact: linux-leds@vger.kernel.org
+Description:
+ Specifies the tty device name of the triggering tty
diff --git a/Documentation/ABI/testing/sysfs-class-net b/Documentation/ABI/testing/sysfs-class-net
index 1f2002df5ba2..1419103d11f9 100644
--- a/Documentation/ABI/testing/sysfs-class-net
+++ b/Documentation/ABI/testing/sysfs-class-net
@@ -337,3 +337,18 @@ Contact: netdev@vger.kernel.org
Description:
32-bit unsigned integer counting the number of times the link has
been down
+
+What: /sys/class/net/<iface>/threaded
+Date: Jan 2021
+KernelVersion: 5.12
+Contact: netdev@vger.kernel.org
+Description:
+ Boolean value to control the threaded mode per device. User could
+ set this value to enable/disable threaded mode for all napi
+ belonging to this device, without the need to do device up/down.
+
+ Possible values:
+ == ==================================
+ 0 threaded mode disabled for this dev
+ 1 threaded mode enabled for this dev
+ == ==================================
diff --git a/Documentation/ABI/testing/sysfs-class-net-dsa b/Documentation/ABI/testing/sysfs-class-net-dsa
index 985d84c585c6..e2da26b44dd0 100644
--- a/Documentation/ABI/testing/sysfs-class-net-dsa
+++ b/Documentation/ABI/testing/sysfs-class-net-dsa
@@ -3,5 +3,12 @@ Date: August 2018
KernelVersion: 4.20
Contact: netdev@vger.kernel.org
Description:
- String indicating the type of tagging protocol used by the
- DSA slave network device.
+ On read, this file returns a string indicating the type of
+ tagging protocol used by the DSA network devices that are
+ attached to this master interface.
+ On write, this file changes the tagging protocol of the
+ attached DSA switches, if this operation is supported by the
+ driver. Changing the tagging protocol must be done with the DSA
+ interfaces and the master interface all administratively down.
+ See the "name" field of each registered struct dsa_device_ops
+ for a list of valid values.
diff --git a/Documentation/ABI/testing/sysfs-class-net-qmi b/Documentation/ABI/testing/sysfs-class-net-qmi
index c310db4ccbc2..ed79f5893421 100644
--- a/Documentation/ABI/testing/sysfs-class-net-qmi
+++ b/Documentation/ABI/testing/sysfs-class-net-qmi
@@ -48,3 +48,13 @@ Description:
Write a number ranging from 1 to 254 to delete a previously
created qmap mux based network device.
+
+What: /sys/class/net/<qmimux iface>/qmap/mux_id
+Date: January 2021
+KernelVersion: 5.12
+Contact: Daniele Palmas <dnlplm@gmail.com>
+Description:
+ Unsigned integer
+
+ Indicates the mux id associated to the qmimux network interface
+ during its creation.
diff --git a/Documentation/ABI/testing/sysfs-class-power-ltc4162l b/Documentation/ABI/testing/sysfs-class-power-ltc4162l
new file mode 100644
index 000000000000..ba30db93052b
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-power-ltc4162l
@@ -0,0 +1,82 @@
+What: /sys/class/power_supply/ltc4162-l/charge_status
+Date: Januari 2021
+KernelVersion: 5.11
+Description:
+ Detailed charge status information as reported by the chip.
+
+ Access: Read
+
+ Valid values:
+ ilim_reg_active
+ thermal_reg_active
+ vin_uvcl_active
+ iin_limit_active
+ constant_current
+ constant_voltage
+ charger_off
+
+What: /sys/class/power_supply/ltc4162-l/ibat
+Date: Januari 2021
+KernelVersion: 5.11
+Description:
+ Battery input current as measured by the charger. Negative value
+ means that the battery is discharging.
+
+ Access: Read
+
+ Valid values: Signed value in microamps
+
+What: /sys/class/power_supply/ltc4162-l/vbat
+Date: Januari 2021
+KernelVersion: 5.11
+Description:
+ Battery voltage as measured by the charger.
+
+ Access: Read
+
+ Valid values: In microvolts
+
+What: /sys/class/power_supply/ltc4162-l/vbat_avg
+Date: Januari 2021
+KernelVersion: 5.11
+Description:
+ Battery voltage, averaged over time, as measured by the charger.
+
+ Access: Read
+
+ Valid values: In microvolts
+
+What: /sys/class/power_supply/ltc4162-l/force_telemetry
+Date: Januari 2021
+KernelVersion: 5.11
+Description:
+ To save battery current, the measurement system is disabled if
+ the battery is the only source of power. This affects all
+ voltage, current and temperature measurements.
+ Write a "1" to this to keep performing telemetry once every few
+ seconds, even when running on battery (as reported by the online
+ property, which is "1" when external power is available and "0"
+ when the system runs on battery).
+
+ Access: Read, Write
+
+ Valid values: 0 (disabled) or 1 (enabled)
+
+What: /sys/class/power_supply/ltc4162-l/arm_ship_mode
+Date: Januari 2021
+KernelVersion: 5.11
+Description:
+ The charger will normally drain the battery while inactive,
+ typically drawing about 54 microamps. Write a "1" to this
+ property to arm a special "ship" mode that extends shelf life
+ by reducing the leakage to about 2.8 microamps. The chip will
+ remain in this mode (and no longer respond to I2C commands)
+ until some external power-supply is attached raising the input
+ voltage above 1V. It will then automatically revert to "0".
+ Writing a "0" to the property cancels the "ship" mode request.
+ The ship mode, when armed, activates once the input voltage
+ drops below 1V.
+
+ Access: Read, Write
+
+ Valid values: 0 (disable) or 1 (enable)
diff --git a/Documentation/ABI/testing/sysfs-class-typec b/Documentation/ABI/testing/sysfs-class-typec
index 8eab41e79ce6..40122d915ae1 100644
--- a/Documentation/ABI/testing/sysfs-class-typec
+++ b/Documentation/ABI/testing/sysfs-class-typec
@@ -105,7 +105,25 @@ Date: April 2017
Contact: Heikki Krogerus <heikki.krogerus@linux.intel.com>
Description:
Revision number of the supported USB Power Delivery
- specification, or 0 when USB Power Delivery is not supported.
+ specification, or 0.0 when USB Power Delivery is not supported.
+
+ Example values:
+ - "2.0": USB Power Delivery Release 2.0
+ - "3.0": USB Power Delivery Release 3.0
+ - "3.1": USB Power Delivery Release 3.1
+
+What: /sys/class/typec/<port>-{partner|cable}/usb_power_delivery_revision
+Date: January 2021
+Contact: Benson Leung <bleung@chromium.org>
+Description:
+ Revision number of the supported USB Power Delivery
+ specification of the port partner or cable, or 0.0 when USB
+ Power Delivery is not supported.
+
+ Example values:
+ - "2.0": USB Power Delivery Release 2.0
+ - "3.0": USB Power Delivery Release 3.0
+ - "3.1": USB Power Delivery Release 3.1
What: /sys/class/typec/<port>/usb_typec_revision
Date: April 2017
diff --git a/Documentation/ABI/testing/sysfs-devices-memory b/Documentation/ABI/testing/sysfs-devices-memory
index 246a45b96d22..d8b0f80b9e33 100644
--- a/Documentation/ABI/testing/sysfs-devices-memory
+++ b/Documentation/ABI/testing/sysfs-devices-memory
@@ -13,21 +13,22 @@ What: /sys/devices/system/memory/memoryX/removable
Date: June 2008
Contact: Badari Pulavarty <pbadari@us.ibm.com>
Description:
- The file /sys/devices/system/memory/memoryX/removable
- indicates whether this memory block is removable or not.
- This is useful for a user-level agent to determine
- identify removable sections of the memory before attempting
- potentially expensive hot-remove memory operation
+ The file /sys/devices/system/memory/memoryX/removable is a
+ legacy interface used to indicated whether a memory block is
+ likely to be offlineable or not. Newer kernel versions return
+ "1" if and only if the kernel supports memory offlining.
Users: hotplug memory remove tools
http://www.ibm.com/developerworks/wikis/display/LinuxP/powerpc-utils
+ lsmem/chmem part of util-linux
What: /sys/devices/system/memory/memoryX/phys_device
Date: September 2008
Contact: Badari Pulavarty <pbadari@us.ibm.com>
Description:
The file /sys/devices/system/memory/memoryX/phys_device
- is read-only and is designed to show the name of physical
- memory device. Implementation is currently incomplete.
+ is read-only; it is a legacy interface only ever used on s390x
+ to expose the covered storage increment.
+Users: Legacy s390-tools lsmem/chmem
What: /sys/devices/system/memory/memoryX/phys_index
Date: September 2008
@@ -43,23 +44,25 @@ Date: September 2008
Contact: Badari Pulavarty <pbadari@us.ibm.com>
Description:
The file /sys/devices/system/memory/memoryX/state
- is read-write. When read, its contents show the
- online/offline state of the memory section. When written,
- root can toggle the the online/offline state of a removable
- memory section (see removable file description above)
- using the following commands::
+ is read-write. When read, it returns the online/offline
+ state of the memory block. When written, root can toggle
+ the online/offline state of a memory block using the following
+ commands::
# echo online > /sys/devices/system/memory/memoryX/state
# echo offline > /sys/devices/system/memory/memoryX/state
- For example, if /sys/devices/system/memory/memory22/removable
- contains a value of 1 and
- /sys/devices/system/memory/memory22/state contains the
- string "online" the following command can be executed by
- by root to offline that section::
-
- # echo offline > /sys/devices/system/memory/memory22/state
-
+ On newer kernel versions, advanced states can be specified
+ when onlining to select a target zone: "online_movable"
+ selects the movable zone. "online_kernel" selects the
+ applicable kernel zone (DMA, DMA32, or Normal). However,
+ after successfully setting one of the advanced states,
+ reading the file will return "online"; the zone information
+ can be obtained via "valid_zones" instead.
+
+ While onlining is unlikely to fail, there are no guarantees
+ that offlining will succeed. Offlining is more likely to
+ succeed if "valid_zones" indicates "Movable".
Users: hotplug memory remove tools
http://www.ibm.com/developerworks/wikis/display/LinuxP/powerpc-utils
@@ -69,8 +72,19 @@ Date: July 2014
Contact: Zhang Zhen <zhenzhang.zhang@huawei.com>
Description:
The file /sys/devices/system/memory/memoryX/valid_zones is
- read-only and is designed to show which zone this memory
- block can be onlined to.
+ read-only.
+
+ For online memory blocks, it returns in which zone memory
+ provided by a memory block is managed. If multiple zones
+ apply (not applicable for hotplugged memory), "None" is returned
+ and the memory block cannot be offlined.
+
+ For offline memory blocks, it returns by which zone memory
+ provided by a memory block can be managed when onlining.
+ The first returned zone ("default") will be used when setting
+ the state of an offline memory block to "online". Only one of
+ the kernel zones (DMA, DMA32, Normal) is applicable for a single
+ memory block.
What: /sys/devices/system/memoryX/nodeY
Date: October 2009
diff --git a/Documentation/ABI/testing/sysfs-devices-xenbus b/Documentation/ABI/testing/sysfs-devices-xenbus
new file mode 100644
index 000000000000..fd796cb4f315
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-devices-xenbus
@@ -0,0 +1,41 @@
+What: /sys/devices/*/xenbus/event_channels
+Date: February 2021
+Contact: Xen Developers mailing list <xen-devel@lists.xenproject.org>
+Description:
+ Number of Xen event channels associated with a kernel based
+ paravirtualized device frontend or backend.
+
+What: /sys/devices/*/xenbus/events
+Date: February 2021
+Contact: Xen Developers mailing list <xen-devel@lists.xenproject.org>
+Description:
+ Total number of Xen events received for a Xen pv device
+ frontend or backend.
+
+What: /sys/devices/*/xenbus/jiffies_eoi_delayed
+Date: February 2021
+Contact: Xen Developers mailing list <xen-devel@lists.xenproject.org>
+Description:
+ Summed up time in jiffies the EOI of an interrupt for a Xen
+ pv device has been delayed in order to avoid stalls due to
+ event storms. This value rising is a first sign for a rogue
+ other end of the pv device.
+
+What: /sys/devices/*/xenbus/spurious_events
+Date: February 2021
+Contact: Xen Developers mailing list <xen-devel@lists.xenproject.org>
+Description:
+ Number of events received for a Xen pv device which did not
+ require any action. Too many spurious events in a row will
+ trigger delayed EOI processing.
+
+What: /sys/devices/*/xenbus/spurious_threshold
+Date: February 2021
+Contact: Xen Developers mailing list <xen-devel@lists.xenproject.org>
+Description:
+ Controls the tolerated number of subsequent spurious events
+ before delayed EOI processing is triggered for a Xen pv
+ device. Default is 1. This can be modified in case the other
+ end of the pv device is issuing spurious events on a regular
+ basis and is known not to be malicious on purpose. Raising
+ the value for such cases can improve pv device performance.
diff --git a/Documentation/ABI/testing/sysfs-driver-habanalabs b/Documentation/ABI/testing/sysfs-driver-habanalabs
index 169ae4b2a180..1f127f71d2b4 100644
--- a/Documentation/ABI/testing/sysfs-driver-habanalabs
+++ b/Documentation/ABI/testing/sysfs-driver-habanalabs
@@ -1,7 +1,7 @@
What: /sys/class/habanalabs/hl<n>/armcp_kernel_ver
Date: Jan 2019
KernelVersion: 5.1
-Contact: oded.gabbay@gmail.com
+Contact: ogabbay@kernel.org
Description: Version of the Linux kernel running on the device's CPU.
Will be DEPRECATED in Linux kernel version 5.10, and be
replaced with cpucp_kernel_ver
@@ -9,7 +9,7 @@ Description: Version of the Linux kernel running on the device's CPU.
What: /sys/class/habanalabs/hl<n>/armcp_ver
Date: Jan 2019
KernelVersion: 5.1
-Contact: oded.gabbay@gmail.com
+Contact: ogabbay@kernel.org
Description: Version of the application running on the device's CPU
Will be DEPRECATED in Linux kernel version 5.10, and be
replaced with cpucp_ver
@@ -17,7 +17,7 @@ Description: Version of the application running on the device's CPU
What: /sys/class/habanalabs/hl<n>/clk_max_freq_mhz
Date: Jun 2019
KernelVersion: not yet upstreamed
-Contact: oded.gabbay@gmail.com
+Contact: ogabbay@kernel.org
Description: Allows the user to set the maximum clock frequency, in MHz.
The device clock might be set to lower value than the maximum.
The user should read the clk_cur_freq_mhz to see the actual
@@ -27,52 +27,52 @@ Description: Allows the user to set the maximum clock frequency, in MHz.
What: /sys/class/habanalabs/hl<n>/clk_cur_freq_mhz
Date: Jun 2019
KernelVersion: not yet upstreamed
-Contact: oded.gabbay@gmail.com
+Contact: ogabbay@kernel.org
Description: Displays the current frequency, in MHz, of the device clock.
This property is valid only for the Gaudi ASIC family
What: /sys/class/habanalabs/hl<n>/cpld_ver
Date: Jan 2019
KernelVersion: 5.1
-Contact: oded.gabbay@gmail.com
+Contact: ogabbay@kernel.org
Description: Version of the Device's CPLD F/W
What: /sys/class/habanalabs/hl<n>/cpucp_kernel_ver
Date: Oct 2020
KernelVersion: 5.10
-Contact: oded.gabbay@gmail.com
+Contact: ogabbay@kernel.org
Description: Version of the Linux kernel running on the device's CPU
What: /sys/class/habanalabs/hl<n>/cpucp_ver
Date: Oct 2020
KernelVersion: 5.10
-Contact: oded.gabbay@gmail.com
+Contact: ogabbay@kernel.org
Description: Version of the application running on the device's CPU
What: /sys/class/habanalabs/hl<n>/device_type
Date: Jan 2019
KernelVersion: 5.1
-Contact: oded.gabbay@gmail.com
+Contact: ogabbay@kernel.org
Description: Displays the code name of the device according to its type.
The supported values are: "GOYA"
What: /sys/class/habanalabs/hl<n>/eeprom
Date: Jan 2019
KernelVersion: 5.1
-Contact: oded.gabbay@gmail.com
+Contact: ogabbay@kernel.org
Description: A binary file attribute that contains the contents of the
on-board EEPROM
What: /sys/class/habanalabs/hl<n>/fuse_ver
Date: Jan 2019
KernelVersion: 5.1
-Contact: oded.gabbay@gmail.com
+Contact: ogabbay@kernel.org
Description: Displays the device's version from the eFuse
What: /sys/class/habanalabs/hl<n>/hard_reset
Date: Jan 2019
KernelVersion: 5.1
-Contact: oded.gabbay@gmail.com
+Contact: ogabbay@kernel.org
Description: Interface to trigger a hard-reset operation for the device.
Hard-reset will reset ALL internal components of the device
except for the PCI interface and the internal PLLs
@@ -80,14 +80,14 @@ Description: Interface to trigger a hard-reset operation for the device.
What: /sys/class/habanalabs/hl<n>/hard_reset_cnt
Date: Jan 2019
KernelVersion: 5.1
-Contact: oded.gabbay@gmail.com
+Contact: ogabbay@kernel.org
Description: Displays how many times the device have undergone a hard-reset
operation since the driver was loaded
What: /sys/class/habanalabs/hl<n>/high_pll
Date: Jan 2019
KernelVersion: 5.1
-Contact: oded.gabbay@gmail.com
+Contact: ogabbay@kernel.org
Description: Allows the user to set the maximum clock frequency for MME, TPC
and IC when the power management profile is set to "automatic".
This property is valid only for the Goya ASIC family
@@ -95,7 +95,7 @@ Description: Allows the user to set the maximum clock frequency for MME, TPC
What: /sys/class/habanalabs/hl<n>/ic_clk
Date: Jan 2019
KernelVersion: 5.1
-Contact: oded.gabbay@gmail.com
+Contact: ogabbay@kernel.org
Description: Allows the user to set the maximum clock frequency, in Hz, of
the Interconnect fabric. Writes to this parameter affect the
device only when the power management profile is set to "manual"
@@ -107,27 +107,27 @@ Description: Allows the user to set the maximum clock frequency, in Hz, of
What: /sys/class/habanalabs/hl<n>/ic_clk_curr
Date: Jan 2019
KernelVersion: 5.1
-Contact: oded.gabbay@gmail.com
+Contact: ogabbay@kernel.org
Description: Displays the current clock frequency, in Hz, of the Interconnect
fabric. This property is valid only for the Goya ASIC family
What: /sys/class/habanalabs/hl<n>/infineon_ver
Date: Jan 2019
KernelVersion: 5.1
-Contact: oded.gabbay@gmail.com
+Contact: ogabbay@kernel.org
Description: Version of the Device's power supply F/W code
What: /sys/class/habanalabs/hl<n>/max_power
Date: Jan 2019
KernelVersion: 5.1
-Contact: oded.gabbay@gmail.com
+Contact: ogabbay@kernel.org
Description: Allows the user to set the maximum power consumption of the
device in milliwatts.
What: /sys/class/habanalabs/hl<n>/mme_clk
Date: Jan 2019
KernelVersion: 5.1
-Contact: oded.gabbay@gmail.com
+Contact: ogabbay@kernel.org
Description: Allows the user to set the maximum clock frequency, in Hz, of
the MME compute engine. Writes to this parameter affect the
device only when the power management profile is set to "manual"
@@ -139,21 +139,21 @@ Description: Allows the user to set the maximum clock frequency, in Hz, of
What: /sys/class/habanalabs/hl<n>/mme_clk_curr
Date: Jan 2019
KernelVersion: 5.1
-Contact: oded.gabbay@gmail.com
+Contact: ogabbay@kernel.org
Description: Displays the current clock frequency, in Hz, of the MME compute
engine. This property is valid only for the Goya ASIC family
What: /sys/class/habanalabs/hl<n>/pci_addr
Date: Jan 2019
KernelVersion: 5.1
-Contact: oded.gabbay@gmail.com
+Contact: ogabbay@kernel.org
Description: Displays the PCI address of the device. This is needed so the
user would be able to open a device based on its PCI address
What: /sys/class/habanalabs/hl<n>/pm_mng_profile
Date: Jan 2019
KernelVersion: 5.1
-Contact: oded.gabbay@gmail.com
+Contact: ogabbay@kernel.org
Description: Power management profile. Values are "auto", "manual". In "auto"
mode, the driver will set the maximum clock frequency to a high
value when a user-space process opens the device's file (unless
@@ -167,13 +167,13 @@ Description: Power management profile. Values are "auto", "manual". In "auto"
What: /sys/class/habanalabs/hl<n>/preboot_btl_ver
Date: Jan 2019
KernelVersion: 5.1
-Contact: oded.gabbay@gmail.com
+Contact: ogabbay@kernel.org
Description: Version of the device's preboot F/W code
What: /sys/class/habanalabs/hl<n>/soft_reset
Date: Jan 2019
KernelVersion: 5.1
-Contact: oded.gabbay@gmail.com
+Contact: ogabbay@kernel.org
Description: Interface to trigger a soft-reset operation for the device.
Soft-reset will reset only the compute and DMA engines of the
device
@@ -181,26 +181,26 @@ Description: Interface to trigger a soft-reset operation for the device.
What: /sys/class/habanalabs/hl<n>/soft_reset_cnt
Date: Jan 2019
KernelVersion: 5.1
-Contact: oded.gabbay@gmail.com
+Contact: ogabbay@kernel.org
Description: Displays how many times the device have undergone a soft-reset
operation since the driver was loaded
What: /sys/class/habanalabs/hl<n>/status
Date: Jan 2019
KernelVersion: 5.1
-Contact: oded.gabbay@gmail.com
+Contact: ogabbay@kernel.org
Description: Status of the card: "Operational", "Malfunction", "In reset".
What: /sys/class/habanalabs/hl<n>/thermal_ver
Date: Jan 2019
KernelVersion: 5.1
-Contact: oded.gabbay@gmail.com
+Contact: ogabbay@kernel.org
Description: Version of the Device's thermal daemon
What: /sys/class/habanalabs/hl<n>/tpc_clk
Date: Jan 2019
KernelVersion: 5.1
-Contact: oded.gabbay@gmail.com
+Contact: ogabbay@kernel.org
Description: Allows the user to set the maximum clock frequency, in Hz, of
the TPC compute engines. Writes to this parameter affect the
device only when the power management profile is set to "manual"
@@ -212,12 +212,12 @@ Description: Allows the user to set the maximum clock frequency, in Hz, of
What: /sys/class/habanalabs/hl<n>/tpc_clk_curr
Date: Jan 2019
KernelVersion: 5.1
-Contact: oded.gabbay@gmail.com
+Contact: ogabbay@kernel.org
Description: Displays the current clock frequency, in Hz, of the TPC compute
engines. This property is valid only for the Goya ASIC family
What: /sys/class/habanalabs/hl<n>/uboot_ver
Date: Jan 2019
KernelVersion: 5.1
-Contact: oded.gabbay@gmail.com
+Contact: ogabbay@kernel.org
Description: Version of the u-boot running on the device's CPU \ No newline at end of file
diff --git a/Documentation/ABI/testing/sysfs-driver-input-cros-ec-keyb b/Documentation/ABI/testing/sysfs-driver-input-cros-ec-keyb
new file mode 100644
index 000000000000..c7afc2328045
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-driver-input-cros-ec-keyb
@@ -0,0 +1,6 @@
+What: /sys/class/input/input(x)/device/function_row_physmap
+Date: January 2021
+Contact: Philip Chen <philipchen@chromium.org>
+Description: A space separated list of scancodes for the top row keys,
+ ordered by the physical positions of the keys, from left
+ to right.
diff --git a/Documentation/ABI/testing/sysfs-driver-intel-m10-bmc b/Documentation/ABI/testing/sysfs-driver-intel-m10-bmc
index 979a2d62513f..9773925138af 100644
--- a/Documentation/ABI/testing/sysfs-driver-intel-m10-bmc
+++ b/Documentation/ABI/testing/sysfs-driver-intel-m10-bmc
@@ -13,3 +13,24 @@ Contact: Xu Yilun <yilun.xu@intel.com>
Description: Read only. Returns the firmware version of Intel MAX10
BMC chip.
Format: "0x%x".
+
+What: /sys/bus/spi/devices/.../mac_address
+Date: January 2021
+KernelVersion: 5.12
+Contact: Russ Weight <russell.h.weight@intel.com>
+Description: Read only. Returns the first MAC address in a block
+ of sequential MAC addresses assigned to the board
+ that is managed by the Intel MAX10 BMC. It is stored in
+ FLASH storage and is mirrored in the MAX10 BMC register
+ space.
+ Format: "%02x:%02x:%02x:%02x:%02x:%02x".
+
+What: /sys/bus/spi/devices/.../mac_count
+Date: January 2021
+KernelVersion: 5.12
+Contact: Russ Weight <russell.h.weight@intel.com>
+Description: Read only. Returns the number of sequential MAC
+ addresses assigned to the board managed by the Intel
+ MAX10 BMC. This value is stored in FLASH and is mirrored
+ in the MAX10 BMC register space.
+ Format: "%u".
diff --git a/Documentation/ABI/testing/sysfs-driver-ufs b/Documentation/ABI/testing/sysfs-driver-ufs
index 75ccc5c62b3c..d1bc23cb6a9d 100644
--- a/Documentation/ABI/testing/sysfs-driver-ufs
+++ b/Documentation/ABI/testing/sysfs-driver-ufs
@@ -1161,3 +1161,14 @@ Description: This entry shows the configured size of WriteBooster buffer.
0400h corresponds to 4GB.
The file is read only.
+
+What: /sys/bus/platform/drivers/ufshcd/*/wb_on
+Date: January 2021
+Contact: Bean Huo <beanhuo@micron.com>
+Description: This node is used to set or display whether UFS WriteBooster is
+ enabled. Echo 0 to this file to disable UFS WriteBooster or 1 to
+ enable it. The WriteBooster is enabled after power-on/reset,
+ however, it will be disabled/enable while CLK scaling down/up
+ (if the platform supports UFSHCD_CAP_CLK_SCALING). For a
+ platform that doesn't support UFSHCD_CAP_CLK_SCALING, we can
+ disable/enable WriteBooster through this sysfs node.
diff --git a/Documentation/ABI/testing/sysfs-firmware-acpi b/Documentation/ABI/testing/sysfs-firmware-acpi
index b16d30a71709..819939d858c9 100644
--- a/Documentation/ABI/testing/sysfs-firmware-acpi
+++ b/Documentation/ABI/testing/sysfs-firmware-acpi
@@ -1,3 +1,46 @@
+What: /sys/firmware/acpi/fpdt/
+Date: Jan 2021
+Contact: Zhang Rui <rui.zhang@intel.com>
+Description:
+ ACPI Firmware Performance Data Table (FPDT) provides
+ information for firmware performance data for system boot,
+ S3 suspend and S3 resume. This sysfs entry contains the
+ performance data retrieved from the FPDT.
+
+ boot:
+ firmware_start_ns: Timer value logged at the beginning
+ of firmware image execution. In nanoseconds.
+ bootloader_load_ns: Timer value logged just prior to
+ loading the OS boot loader into memory.
+ In nanoseconds.
+ bootloader_launch_ns: Timer value logged just prior to
+ launching the currently loaded OS boot loader
+ image. In nanoseconds.
+ exitbootservice_start_ns: Timer value logged at the
+ point when the OS loader calls the
+ ExitBootServices function for UEFI compatible
+ firmware. In nanoseconds.
+ exitbootservice_end_ns: Timer value logged at the point
+ just prior to the OS loader gaining control
+ back from the ExitBootServices function for
+ UEFI compatible firmware. In nanoseconds.
+ suspend:
+ suspend_start_ns: Timer value recorded at the previous
+ OS write to SLP_TYP upon entry to S3. In
+ nanoseconds.
+ suspend_end_ns: Timer value recorded at the previous
+ firmware write to SLP_TYP used to trigger
+ hardware entry to S3. In nanoseconds.
+ resume:
+ resume_count: A count of the number of S3 resume cycles
+ since the last full boot sequence.
+ resume_avg_ns: Average timer value of all resume cycles
+ logged since the last full boot sequence,
+ including the most recent resume. In nanoseconds.
+ resume_prev_ns: Timer recorded at the end of the previous
+ platform runtime firmware S3 resume, just prior to
+ handoff to the OS waking vector. In nanoseconds.
+
What: /sys/firmware/acpi/bgrt/
Date: January 2012
Contact: Matthew Garrett <mjg@redhat.com>
diff --git a/Documentation/ABI/testing/sysfs-firmware-sfi b/Documentation/ABI/testing/sysfs-firmware-sfi
deleted file mode 100644
index 5210e0f06ddb..000000000000
--- a/Documentation/ABI/testing/sysfs-firmware-sfi
+++ /dev/null
@@ -1,15 +0,0 @@
-What: /sys/firmware/sfi/tables/
-Date: May 2010
-Contact: Len Brown <lenb@kernel.org>
-Description:
- SFI defines a number of small static memory tables
- so the kernel can get platform information from firmware.
-
- The tables are defined in the latest SFI specification:
- http://simplefirmware.org/documentation
-
- While the tables are used by the kernel, user-space
- can observe them this way::
-
- # cd /sys/firmware/sfi/tables
- # cat $TABLENAME > $TABLENAME.bin
diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs
index 3dfee94e0618..cbeac1bebe2f 100644
--- a/Documentation/ABI/testing/sysfs-fs-f2fs
+++ b/Documentation/ABI/testing/sysfs-fs-f2fs
@@ -377,3 +377,35 @@ Description: This gives a control to limit the bio size in f2fs.
Default is zero, which will follow underlying block layer limit,
whereas, if it has a certain bytes value, f2fs won't submit a
bio larger than that size.
+
+What: /sys/fs/f2fs/<disk>/stat/sb_status
+Date: December 2020
+Contact: "Chao Yu" <yuchao0@huawei.com>
+Description: Show status of f2fs superblock in real time.
+
+ ====== ===================== =================================
+ value sb status macro description
+ 0x1 SBI_IS_DIRTY dirty flag for checkpoint
+ 0x2 SBI_IS_CLOSE specify unmounting
+ 0x4 SBI_NEED_FSCK need fsck.f2fs to fix
+ 0x8 SBI_POR_DOING recovery is doing or not
+ 0x10 SBI_NEED_SB_WRITE need to recover superblock
+ 0x20 SBI_NEED_CP need to checkpoint
+ 0x40 SBI_IS_SHUTDOWN shutdown by ioctl
+ 0x80 SBI_IS_RECOVERED recovered orphan/data
+ 0x100 SBI_CP_DISABLED CP was disabled last mount
+ 0x200 SBI_CP_DISABLED_QUICK CP was disabled quickly
+ 0x400 SBI_QUOTA_NEED_FLUSH need to flush quota info in CP
+ 0x800 SBI_QUOTA_SKIP_FLUSH skip flushing quota in current CP
+ 0x1000 SBI_QUOTA_NEED_REPAIR quota file may be corrupted
+ 0x2000 SBI_IS_RESIZEFS resizefs is in process
+ ====== ===================== =================================
+
+What: /sys/fs/f2fs/<disk>/ckpt_thread_ioprio
+Date: January 2021
+Contact: "Daeho Jeong" <daehojeong@google.com>
+Description: Give a way to change checkpoint merge daemon's io priority.
+ Its default value is "be,3", which means "BE" I/O class and
+ I/O priority "3". We can select the class between "rt" and "be",
+ and set the I/O priority within valid range of it. "," delimiter
+ is necessary in between I/O class and priority number.
diff --git a/Documentation/ABI/testing/sysfs-platform-ideapad-laptop b/Documentation/ABI/testing/sysfs-platform-ideapad-laptop
index fd2ac02bc5bd..4989ab266682 100644
--- a/Documentation/ABI/testing/sysfs-platform-ideapad-laptop
+++ b/Documentation/ABI/testing/sysfs-platform-ideapad-laptop
@@ -1,11 +1,11 @@
-What: /sys/devices/platform/ideapad/camera_power
+What: /sys/bus/platform/devices/VPC2004:*/camera_power
Date: Dec 2010
KernelVersion: 2.6.37
Contact: "Ike Panhc <ike.pan@canonical.com>"
Description:
Control the power of camera module. 1 means on, 0 means off.
-What: /sys/devices/platform/ideapad/fan_mode
+What: /sys/bus/platform/devices/VPC2004:*/fan_mode
Date: June 2012
KernelVersion: 3.6
Contact: "Maxim Mikityanskiy <maxtram95@gmail.com>"
@@ -18,7 +18,7 @@ Description:
* 2 -> Dust Cleaning
* 4 -> Efficient Thermal Dissipation Mode
-What: /sys/devices/platform/ideapad/touchpad
+What: /sys/bus/platform/devices/VPC2004:*/touchpad
Date: May 2017
KernelVersion: 4.13
Contact: "Ritesh Raj Sarraf <rrs@debian.org>"
@@ -27,7 +27,16 @@ Description:
* 1 -> Switched On
* 0 -> Switched Off
-What: /sys/bus/pci/devices/<bdf>/<device>/VPC2004:00/fn_lock
+What: /sys/bus/platform/devices/VPC2004:*/conservation_mode
+Date: Aug 2017
+KernelVersion: 4.14
+Contact: platform-driver-x86@vger.kernel.org
+Description:
+ Controls whether the conservation mode is enabled or not.
+ This feature limits the maximum battery charge percentage to
+ around 50-60% in order to prolong the lifetime of the battery.
+
+What: /sys/bus/platform/devices/VPC2004:*/fn_lock
Date: May 2018
KernelVersion: 4.18
Contact: "Oleg Keri <ezhi99@gmail.com>"
@@ -41,3 +50,12 @@ Description:
# echo "0" > \
/sys/bus/pci/devices/0000:00:1f.0/PNP0C09:00/VPC2004:00/fn_lock
+
+What: /sys/bus/platform/devices/VPC2004:*/usb_charging
+Date: Feb 2021
+KernelVersion: 5.12
+Contact: platform-driver-x86@vger.kernel.org
+Description:
+ Controls whether the "always on USB charging" feature is
+ enabled or not. This feature enables charging USB devices
+ even if the computer is not turned on.
diff --git a/Documentation/ABI/testing/sysfs-platform-kim b/Documentation/ABI/testing/sysfs-platform-kim
index a7f81de68046..6a52d6d2b601 100644
--- a/Documentation/ABI/testing/sysfs-platform-kim
+++ b/Documentation/ABI/testing/sysfs-platform-kim
@@ -7,7 +7,7 @@ Description:
is connected. example: "/dev/ttyS0".
The device name flows down to architecture specific board
- initialization file from the SFI/ATAGS bootloader
+ initialization file from the ATAGS bootloader
firmware. The name exposed is read from the user-space
dameon and opens the device when install is requested.
diff --git a/Documentation/ABI/testing/sysfs-platform_profile b/Documentation/ABI/testing/sysfs-platform_profile
new file mode 100644
index 000000000000..dae9c8941905
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-platform_profile
@@ -0,0 +1,28 @@
+What: /sys/firmware/acpi/platform_profile_choices
+Date: October 2020
+Contact: Hans de Goede <hdegoede@redhat.com>
+Description: This file contains a space-separated list of profiles supported for this device.
+
+ Drivers must use the following standard profile-names:
+
+ ==================== ========================================
+ low-power Low power consumption
+ cool Cooler operation
+ quiet Quieter operation
+ balanced Balance between low power consumption
+ and performance
+ balanced-performance Balance between performance and low
+ power consumption with a slight bias
+ towards performance
+ performance High performance operation
+ ==================== ========================================
+
+ Userspace may expect drivers to offer more than one of these
+ standard profile names.
+
+What: /sys/firmware/acpi/platform_profile
+Date: October 2020
+Contact: Hans de Goede <hdegoede@redhat.com>
+Description: Reading this file gives the current selected profile for this
+ device. Writing this file with one of the strings from
+ platform_profile_choices changes the profile to the new value.
diff --git a/Documentation/Makefile b/Documentation/Makefile
index 61a7310b49e0..9c42dde97671 100644
--- a/Documentation/Makefile
+++ b/Documentation/Makefile
@@ -75,7 +75,7 @@ quiet_cmd_sphinx = SPHINX $@ --> file://$(abspath $(BUILDDIR)/$3/$4)
cmd_sphinx = $(MAKE) BUILDDIR=$(abspath $(BUILDDIR)) $(build)=Documentation/userspace-api/media $2 && \
PYTHONDONTWRITEBYTECODE=1 \
BUILDDIR=$(abspath $(BUILDDIR)) SPHINX_CONF=$(abspath $(srctree)/$(src)/$5/$(SPHINX_CONF)) \
- $(PYTHON) $(srctree)/scripts/jobserver-exec \
+ $(PYTHON3) $(srctree)/scripts/jobserver-exec \
$(SHELL) $(srctree)/Documentation/sphinx/parallel-wrapper.sh \
$(SPHINXBUILD) \
-b $2 \
diff --git a/Documentation/PCI/endpoint/function/binding/pci-ntb.rst b/Documentation/PCI/endpoint/function/binding/pci-ntb.rst
new file mode 100644
index 000000000000..40253d3d5163
--- /dev/null
+++ b/Documentation/PCI/endpoint/function/binding/pci-ntb.rst
@@ -0,0 +1,38 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==========================
+PCI NTB Endpoint Function
+==========================
+
+1) Create a subdirectory to pci_epf_ntb directory in configfs.
+
+Standard EPF Configurable Fields:
+
+================ ===========================================================
+vendorid should be 0x104c
+deviceid should be 0xb00d for TI's J721E SoC
+revid don't care
+progif_code don't care
+subclass_code should be 0x00
+baseclass_code should be 0x5
+cache_line_size don't care
+subsys_vendor_id don't care
+subsys_id don't care
+interrupt_pin don't care
+msi_interrupts don't care
+msix_interrupts don't care
+================ ===========================================================
+
+2) Create a subdirectory to directory created in 1
+
+NTB EPF specific configurable fields:
+
+================ ===========================================================
+db_count Number of doorbells; default = 4
+mw1 size of memory window1
+mw2 size of memory window2
+mw3 size of memory window3
+mw4 size of memory window4
+num_mws Number of memory windows; max = 4
+spad_count Number of scratchpad registers; default = 64
+================ ===========================================================
diff --git a/Documentation/PCI/endpoint/index.rst b/Documentation/PCI/endpoint/index.rst
index 4ca7439fbfc9..38ea1f604b6d 100644
--- a/Documentation/PCI/endpoint/index.rst
+++ b/Documentation/PCI/endpoint/index.rst
@@ -11,5 +11,8 @@ PCI Endpoint Framework
pci-endpoint-cfs
pci-test-function
pci-test-howto
+ pci-ntb-function
+ pci-ntb-howto
function/binding/pci-test
+ function/binding/pci-ntb
diff --git a/Documentation/PCI/endpoint/pci-endpoint-cfs.rst b/Documentation/PCI/endpoint/pci-endpoint-cfs.rst
index 1bbd81ed06c8..696f8eeb4738 100644
--- a/Documentation/PCI/endpoint/pci-endpoint-cfs.rst
+++ b/Documentation/PCI/endpoint/pci-endpoint-cfs.rst
@@ -68,6 +68,16 @@ created)
... subsys_vendor_id
... subsys_id
... interrupt_pin
+ ... primary/
+ ... <Symlink EPC Device1>/
+ ... secondary/
+ ... <Symlink EPC Device2>/
+
+If an EPF device has to be associated with 2 EPCs (like in the case of
+Non-transparent bridge), symlink of endpoint controller connected to primary
+interface should be added in 'primary' directory and symlink of endpoint
+controller connected to secondary interface should be added in 'secondary'
+directory.
EPC Device
==========
diff --git a/Documentation/PCI/endpoint/pci-ntb-function.rst b/Documentation/PCI/endpoint/pci-ntb-function.rst
new file mode 100644
index 000000000000..3b9d836a4924
--- /dev/null
+++ b/Documentation/PCI/endpoint/pci-ntb-function.rst
@@ -0,0 +1,348 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=================
+PCI NTB Function
+=================
+
+:Author: Kishon Vijay Abraham I <kishon@ti.com>
+
+PCI Non-Transparent Bridges (NTB) allow two host systems to communicate
+with each other by exposing each host as a device to the other host.
+NTBs typically support the ability to generate interrupts on the remote
+machine, expose memory ranges as BARs, and perform DMA. They also support
+scratchpads, which are areas of memory within the NTB that are accessible
+from both machines.
+
+PCI NTB Function allows two different systems (or hosts) to communicate
+with each other by configuring the endpoint instances in such a way that
+transactions from one system are routed to the other system.
+
+In the below diagram, PCI NTB function configures the SoC with multiple
+PCI Endpoint (EP) instances in such a way that transactions from one EP
+controller are routed to the other EP controller. Once PCI NTB function
+configures the SoC with multiple EP instances, HOST1 and HOST2 can
+communicate with each other using SoC as a bridge.
+
+.. code-block:: text
+
+ +-------------+ +-------------+
+ | | | |
+ | HOST1 | | HOST2 |
+ | | | |
+ +------^------+ +------^------+
+ | |
+ | |
+ +---------|-------------------------------------------------|---------+
+ | +------v------+ +------v------+ |
+ | | | | | |
+ | | EP | | EP | |
+ | | CONTROLLER1 | | CONTROLLER2 | |
+ | | <-----------------------------------> | |
+ | | | | | |
+ | | | | | |
+ | | | SoC With Multiple EP Instances | | |
+ | | | (Configured using NTB Function) | | |
+ | +-------------+ +-------------+ |
+ +---------------------------------------------------------------------+
+
+Constructs used for Implementing NTB
+====================================
+
+ 1) Config Region
+ 2) Self Scratchpad Registers
+ 3) Peer Scratchpad Registers
+ 4) Doorbell (DB) Registers
+ 5) Memory Window (MW)
+
+
+Config Region:
+--------------
+
+Config Region is a construct that is specific to NTB implemented using NTB
+Endpoint Function Driver. The host and endpoint side NTB function driver will
+exchange information with each other using this region. Config Region has
+Control/Status Registers for configuring the Endpoint Controller. Host can
+write into this region for configuring the outbound Address Translation Unit
+(ATU) and to indicate the link status. Endpoint can indicate the status of
+commands issued by host in this region. Endpoint can also indicate the
+scratchpad offset and number of memory windows to the host using this region.
+
+The format of Config Region is given below. All the fields here are 32 bits.
+
+.. code-block:: text
+
+ +------------------------+
+ | COMMAND |
+ +------------------------+
+ | ARGUMENT |
+ +------------------------+
+ | STATUS |
+ +------------------------+
+ | TOPOLOGY |
+ +------------------------+
+ | ADDRESS (LOWER 32) |
+ +------------------------+
+ | ADDRESS (UPPER 32) |
+ +------------------------+
+ | SIZE |
+ +------------------------+
+ | NO OF MEMORY WINDOW |
+ +------------------------+
+ | MEMORY WINDOW1 OFFSET |
+ +------------------------+
+ | SPAD OFFSET |
+ +------------------------+
+ | SPAD COUNT |
+ +------------------------+
+ | DB ENTRY SIZE |
+ +------------------------+
+ | DB DATA |
+ +------------------------+
+ | : |
+ +------------------------+
+ | : |
+ +------------------------+
+ | DB DATA |
+ +------------------------+
+
+
+ COMMAND:
+
+ NTB function supports three commands:
+
+ CMD_CONFIGURE_DOORBELL (0x1): Command to configure doorbell. Before
+ invoking this command, the host should allocate and initialize
+ MSI/MSI-X vectors (i.e., initialize the MSI/MSI-X Capability in the
+ Endpoint). The endpoint on receiving this command will configure
+ the outbound ATU such that transactions to Doorbell BAR will be routed
+ to the MSI/MSI-X address programmed by the host. The ARGUMENT
+ register should be populated with number of DBs to configure (in the
+ lower 16 bits) and if MSI or MSI-X should be configured (BIT 16).
+
+ CMD_CONFIGURE_MW (0x2): Command to configure memory window (MW). The
+ host invokes this command after allocating a buffer that can be
+ accessed by remote host. The allocated address should be programmed
+ in the ADDRESS register (64 bit), the size should be programmed in
+ the SIZE register and the memory window index should be programmed
+ in the ARGUMENT register. The endpoint on receiving this command
+ will configure the outbound ATU such that transactions to MW BAR
+ are routed to the address provided by the host.
+
+ CMD_LINK_UP (0x3): Command to indicate an NTB application is
+ bound to the EP device on the host side. Once the endpoint
+ receives this command from both the hosts, the endpoint will
+ raise a LINK_UP event to both the hosts to indicate the host
+ NTB applications can start communicating with each other.
+
+ ARGUMENT:
+
+ The value of this register is based on the commands issued in
+ command register. See COMMAND section for more information.
+
+ TOPOLOGY:
+
+ Set to NTB_TOPO_B2B_USD for Primary interface
+ Set to NTB_TOPO_B2B_DSD for Secondary interface
+
+ ADDRESS/SIZE:
+
+ Address and Size to be used while configuring the memory window.
+ See "CMD_CONFIGURE_MW" for more info.
+
+ MEMORY WINDOW1 OFFSET:
+
+ Memory Window 1 and Doorbell registers are packed together in the
+ same BAR. The initial portion of the region will have doorbell
+ registers and the latter portion of the region is for memory window 1.
+ This register will specify the offset of the memory window 1.
+
+ NO OF MEMORY WINDOW:
+
+ Specifies the number of memory windows supported by the NTB device.
+
+ SPAD OFFSET:
+
+ Self scratchpad region and config region are packed together in the
+ same BAR. The initial portion of the region will have config region
+ and the latter portion of the region is for self scratchpad. This
+ register will specify the offset of the self scratchpad registers.
+
+ SPAD COUNT:
+
+ Specifies the number of scratchpad registers supported by the NTB
+ device.
+
+ DB ENTRY SIZE:
+
+ Used to determine the offset within the DB BAR that should be written
+ in order to raise doorbell. EPF NTB can use either MSI or MSI-X to
+ ring doorbell (MSI-X support will be added later). MSI uses same
+ address for all the interrupts and MSI-X can provide different
+ addresses for different interrupts. The MSI/MSI-X address is provided
+ by the host and the address it gives is based on the MSI/MSI-X
+ implementation supported by the host. For instance, ARM platform
+ using GIC ITS will have the same MSI-X address for all the interrupts.
+ In order to support all the combinations and use the same mechanism
+ for both MSI and MSI-X, EPF NTB allocates a separate region in the
+ Outbound Address Space for each of the interrupts. This region will
+ be mapped to the MSI/MSI-X address provided by the host. If a host
+ provides the same address for all the interrupts, all the regions
+ will be translated to the same address. If a host provides different
+ addresses, the regions will be translated to different addresses. This
+ will ensure there is no difference while raising the doorbell.
+
+ DB DATA:
+
+ EPF NTB supports 32 interrupts, so there are 32 DB DATA registers.
+ This holds the MSI/MSI-X data that has to be written to MSI address
+ for raising doorbell interrupt. This will be populated by EPF NTB
+ while invoking CMD_CONFIGURE_DOORBELL.
+
+Scratchpad Registers:
+---------------------
+
+ Each host has its own register space allocated in the memory of NTB endpoint
+ controller. They are both readable and writable from both sides of the bridge.
+ They are used by applications built over NTB and can be used to pass control
+ and status information between both sides of a device.
+
+ Scratchpad registers has 2 parts
+ 1) Self Scratchpad: Host's own register space
+ 2) Peer Scratchpad: Remote host's register space.
+
+Doorbell Registers:
+-------------------
+
+ Doorbell Registers are used by the hosts to interrupt each other.
+
+Memory Window:
+--------------
+
+ Actual transfer of data between the two hosts will happen using the
+ memory window.
+
+Modeling Constructs:
+====================
+
+There are 5 or more distinct regions (config, self scratchpad, peer
+scratchpad, doorbell, one or more memory windows) to be modeled to achieve
+NTB functionality. At least one memory window is required while more than
+one is permitted. All these regions should be mapped to BARs for hosts to
+access these regions.
+
+If one 32-bit BAR is allocated for each of these regions, the scheme would
+look like this:
+
+====== ===============
+BAR NO CONSTRUCTS USED
+====== ===============
+BAR0 Config Region
+BAR1 Self Scratchpad
+BAR2 Peer Scratchpad
+BAR3 Doorbell
+BAR4 Memory Window 1
+BAR5 Memory Window 2
+====== ===============
+
+However if we allocate a separate BAR for each of the regions, there would not
+be enough BARs for all the regions in a platform that supports only 64-bit
+BARs.
+
+In order to be supported by most of the platforms, the regions should be
+packed and mapped to BARs in a way that provides NTB functionality and
+also makes sure the host doesn't access any region that it is not supposed
+to.
+
+The following scheme is used in EPF NTB Function:
+
+====== ===============================
+BAR NO CONSTRUCTS USED
+====== ===============================
+BAR0 Config Region + Self Scratchpad
+BAR1 Peer Scratchpad
+BAR2 Doorbell + Memory Window 1
+BAR3 Memory Window 2
+BAR4 Memory Window 3
+BAR5 Memory Window 4
+====== ===============================
+
+With this scheme, for the basic NTB functionality 3 BARs should be sufficient.
+
+Modeling Config/Scratchpad Region:
+----------------------------------
+
+.. code-block:: text
+
+ +-----------------+------->+------------------+ +-----------------+
+ | BAR0 | | CONFIG REGION | | BAR0 |
+ +-----------------+----+ +------------------+<-------+-----------------+
+ | BAR1 | | |SCRATCHPAD REGION | | BAR1 |
+ +-----------------+ +-->+------------------+<-------+-----------------+
+ | BAR2 | Local Memory | BAR2 |
+ +-----------------+ +-----------------+
+ | BAR3 | | BAR3 |
+ +-----------------+ +-----------------+
+ | BAR4 | | BAR4 |
+ +-----------------+ +-----------------+
+ | BAR5 | | BAR5 |
+ +-----------------+ +-----------------+
+ EP CONTROLLER 1 EP CONTROLLER 2
+
+Above diagram shows Config region + Scratchpad region for HOST1 (connected to
+EP controller 1) allocated in local memory. The HOST1 can access the config
+region and scratchpad region (self scratchpad) using BAR0 of EP controller 1.
+The peer host (HOST2 connected to EP controller 2) can also access this
+scratchpad region (peer scratchpad) using BAR1 of EP controller 2. This
+diagram shows the case where Config region and Scratchpad regions are allocated
+for HOST1, however the same is applicable for HOST2.
+
+Modeling Doorbell/Memory Window 1:
+----------------------------------
+
+.. code-block:: text
+
+ +-----------------+ +----->+----------------+-----------+-----------------+
+ | BAR0 | | | Doorbell 1 +-----------> MSI-X ADDRESS 1 |
+ +-----------------+ | +----------------+ +-----------------+
+ | BAR1 | | | Doorbell 2 +---------+ | |
+ +-----------------+----+ +----------------+ | | |
+ | BAR2 | | Doorbell 3 +-------+ | +-----------------+
+ +-----------------+----+ +----------------+ | +-> MSI-X ADDRESS 2 |
+ | BAR3 | | | Doorbell 4 +-----+ | +-----------------+
+ +-----------------+ | |----------------+ | | | |
+ | BAR4 | | | | | | +-----------------+
+ +-----------------+ | | MW1 +---+ | +-->+ MSI-X ADDRESS 3||
+ | BAR5 | | | | | | +-----------------+
+ +-----------------+ +----->-----------------+ | | | |
+ EP CONTROLLER 1 | | | | +-----------------+
+ | | | +---->+ MSI-X ADDRESS 4 |
+ +----------------+ | +-----------------+
+ EP CONTROLLER 2 | | |
+ (OB SPACE) | | |
+ +-------> MW1 |
+ | |
+ | |
+ +-----------------+
+ | |
+ | |
+ | |
+ | |
+ | |
+ +-----------------+
+ PCI Address Space
+ (Managed by HOST2)
+
+Above diagram shows how the doorbell and memory window 1 is mapped so that
+HOST1 can raise doorbell interrupt on HOST2 and also how HOST1 can access
+buffers exposed by HOST2 using memory window1 (MW1). Here doorbell and
+memory window 1 regions are allocated in EP controller 2 outbound (OB) address
+space. Allocating and configuring BARs for doorbell and memory window1
+is done during the initialization phase of NTB endpoint function driver.
+Mapping from EP controller 2 OB space to PCI address space is done when HOST2
+sends CMD_CONFIGURE_MW/CMD_CONFIGURE_DOORBELL.
+
+Modeling Optional Memory Windows:
+---------------------------------
+
+This is modeled the same was as MW1 but each of the additional memory windows
+is mapped to separate BARs.
diff --git a/Documentation/PCI/endpoint/pci-ntb-howto.rst b/Documentation/PCI/endpoint/pci-ntb-howto.rst
new file mode 100644
index 000000000000..1884bf29caba
--- /dev/null
+++ b/Documentation/PCI/endpoint/pci-ntb-howto.rst
@@ -0,0 +1,161 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===================================================================
+PCI Non-Transparent Bridge (NTB) Endpoint Function (EPF) User Guide
+===================================================================
+
+:Author: Kishon Vijay Abraham I <kishon@ti.com>
+
+This document is a guide to help users use pci-epf-ntb function driver
+and ntb_hw_epf host driver for NTB functionality. The list of steps to
+be followed in the host side and EP side is given below. For the hardware
+configuration and internals of NTB using configurable endpoints see
+Documentation/PCI/endpoint/pci-ntb-function.rst
+
+Endpoint Device
+===============
+
+Endpoint Controller Devices
+---------------------------
+
+For implementing NTB functionality at least two endpoint controller devices
+are required.
+
+To find the list of endpoint controller devices in the system::
+
+ # ls /sys/class/pci_epc/
+ 2900000.pcie-ep 2910000.pcie-ep
+
+If PCI_ENDPOINT_CONFIGFS is enabled::
+
+ # ls /sys/kernel/config/pci_ep/controllers
+ 2900000.pcie-ep 2910000.pcie-ep
+
+
+Endpoint Function Drivers
+-------------------------
+
+To find the list of endpoint function drivers in the system::
+
+ # ls /sys/bus/pci-epf/drivers
+ pci_epf_ntb pci_epf_ntb
+
+If PCI_ENDPOINT_CONFIGFS is enabled::
+
+ # ls /sys/kernel/config/pci_ep/functions
+ pci_epf_ntb pci_epf_ntb
+
+
+Creating pci-epf-ntb Device
+----------------------------
+
+PCI endpoint function device can be created using the configfs. To create
+pci-epf-ntb device, the following commands can be used::
+
+ # mount -t configfs none /sys/kernel/config
+ # cd /sys/kernel/config/pci_ep/
+ # mkdir functions/pci_epf_ntb/func1
+
+The "mkdir func1" above creates the pci-epf-ntb function device that will
+be probed by pci_epf_ntb driver.
+
+The PCI endpoint framework populates the directory with the following
+configurable fields::
+
+ # ls functions/pci_epf_ntb/func1
+ baseclass_code deviceid msi_interrupts pci-epf-ntb.0
+ progif_code secondary subsys_id vendorid
+ cache_line_size interrupt_pin msix_interrupts primary
+ revid subclass_code subsys_vendor_id
+
+The PCI endpoint function driver populates these entries with default values
+when the device is bound to the driver. The pci-epf-ntb driver populates
+vendorid with 0xffff and interrupt_pin with 0x0001::
+
+ # cat functions/pci_epf_ntb/func1/vendorid
+ 0xffff
+ # cat functions/pci_epf_ntb/func1/interrupt_pin
+ 0x0001
+
+
+Configuring pci-epf-ntb Device
+-------------------------------
+
+The user can configure the pci-epf-ntb device using its configfs entry. In order
+to change the vendorid and the deviceid, the following
+commands can be used::
+
+ # echo 0x104c > functions/pci_epf_ntb/func1/vendorid
+ # echo 0xb00d > functions/pci_epf_ntb/func1/deviceid
+
+In order to configure NTB specific attributes, a new sub-directory to func1
+should be created::
+
+ # mkdir functions/pci_epf_ntb/func1/pci_epf_ntb.0/
+
+The NTB function driver will populate this directory with various attributes
+that can be configured by the user::
+
+ # ls functions/pci_epf_ntb/func1/pci_epf_ntb.0/
+ db_count mw1 mw2 mw3 mw4 num_mws
+ spad_count
+
+A sample configuration for NTB function is given below::
+
+ # echo 4 > functions/pci_epf_ntb/func1/pci_epf_ntb.0/db_count
+ # echo 128 > functions/pci_epf_ntb/func1/pci_epf_ntb.0/spad_count
+ # echo 2 > functions/pci_epf_ntb/func1/pci_epf_ntb.0/num_mws
+ # echo 0x100000 > functions/pci_epf_ntb/func1/pci_epf_ntb.0/mw1
+ # echo 0x100000 > functions/pci_epf_ntb/func1/pci_epf_ntb.0/mw2
+
+Binding pci-epf-ntb Device to EP Controller
+--------------------------------------------
+
+NTB function device should be attached to two PCI endpoint controllers
+connected to the two hosts. Use the 'primary' and 'secondary' entries
+inside NTB function device to attach one PCI endpoint controller to
+primary interface and the other PCI endpoint controller to the secondary
+interface::
+
+ # ln -s controllers/2900000.pcie-ep/ functions/pci-epf-ntb/func1/primary
+ # ln -s controllers/2910000.pcie-ep/ functions/pci-epf-ntb/func1/secondary
+
+Once the above step is completed, both the PCI endpoint controllers are ready to
+establish a link with the host.
+
+
+Start the Link
+--------------
+
+In order for the endpoint device to establish a link with the host, the _start_
+field should be populated with '1'. For NTB, both the PCI endpoint controllers
+should establish link with the host::
+
+ # echo 1 > controllers/2900000.pcie-ep/start
+ # echo 1 > controllers/2910000.pcie-ep/start
+
+
+RootComplex Device
+==================
+
+lspci Output
+------------
+
+Note that the devices listed here correspond to the values populated in
+"Creating pci-epf-ntb Device" section above::
+
+ # lspci
+ 0000:00:00.0 PCI bridge: Texas Instruments Device b00d
+ 0000:01:00.0 RAM memory: Texas Instruments Device b00d
+
+
+Using ntb_hw_epf Device
+-----------------------
+
+The host side software follows the standard NTB software architecture in Linux.
+All the existing client side NTB utilities like NTB Transport Client and NTB
+Netdev, NTB Ping Pong Test Client and NTB Tool Test Client can be used with NTB
+function device.
+
+For more information on NTB see
+:doc:`Non-Transparent Bridge <../../driver-api/ntb>`
diff --git a/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.rst b/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.rst
index 72f0f6fbd53c..6f89cf1e567d 100644
--- a/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.rst
+++ b/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.rst
@@ -38,7 +38,7 @@ sections.
RCU-preempt Expedited Grace Periods
===================================
-``CONFIG_PREEMPT=y`` kernels implement RCU-preempt.
+``CONFIG_PREEMPTION=y`` kernels implement RCU-preempt.
The overall flow of the handling of a given CPU by an RCU-preempt
expedited grace period is shown in the following diagram:
@@ -112,7 +112,7 @@ things.
RCU-sched Expedited Grace Periods
---------------------------------
-``CONFIG_PREEMPT=n`` kernels implement RCU-sched. The overall flow of
+``CONFIG_PREEMPTION=n`` kernels implement RCU-sched. The overall flow of
the handling of a given CPU by an RCU-sched expedited grace period is
shown in the following diagram:
diff --git a/Documentation/RCU/Design/Requirements/Requirements.rst b/Documentation/RCU/Design/Requirements/Requirements.rst
index d4c9a016074b..38a39476fc24 100644
--- a/Documentation/RCU/Design/Requirements/Requirements.rst
+++ b/Documentation/RCU/Design/Requirements/Requirements.rst
@@ -72,13 +72,13 @@ understanding of this guarantee.
RCU's grace-period guarantee allows updaters to wait for the completion
of all pre-existing RCU read-side critical sections. An RCU read-side
-critical section begins with the marker ``rcu_read_lock()`` and ends
-with the marker ``rcu_read_unlock()``. These markers may be nested, and
+critical section begins with the marker rcu_read_lock() and ends
+with the marker rcu_read_unlock(). These markers may be nested, and
RCU treats a nested set as one big RCU read-side critical section.
-Production-quality implementations of ``rcu_read_lock()`` and
-``rcu_read_unlock()`` are extremely lightweight, and in fact have
+Production-quality implementations of rcu_read_lock() and
+rcu_read_unlock() are extremely lightweight, and in fact have
exactly zero overhead in Linux kernels built for production use with
-``CONFIG_PREEMPT=n``.
+``CONFIG_PREEMPTION=n``.
This guarantee allows ordering to be enforced with extremely low
overhead to readers, for example:
@@ -102,12 +102,12 @@ overhead to readers, for example:
15 WRITE_ONCE(y, 1);
16 }
-Because the ``synchronize_rcu()`` on line 14 waits for all pre-existing
-readers, any instance of ``thread0()`` that loads a value of zero from
-``x`` must complete before ``thread1()`` stores to ``y``, so that
+Because the synchronize_rcu() on line 14 waits for all pre-existing
+readers, any instance of thread0() that loads a value of zero from
+``x`` must complete before thread1() stores to ``y``, so that
instance must also load a value of zero from ``y``. Similarly, any
-instance of ``thread0()`` that loads a value of one from ``y`` must have
-started after the ``synchronize_rcu()`` started, and must therefore also
+instance of thread0() that loads a value of one from ``y`` must have
+started after the synchronize_rcu() started, and must therefore also
load a value of one from ``x``. Therefore, the outcome:
::
@@ -121,14 +121,14 @@ cannot happen.
+-----------------------------------------------------------------------+
| Wait a minute! You said that updaters can make useful forward |
| progress concurrently with readers, but pre-existing readers will |
-| block ``synchronize_rcu()``!!! |
+| block synchronize_rcu()!!! |
| Just who are you trying to fool??? |
+-----------------------------------------------------------------------+
| **Answer**: |
+-----------------------------------------------------------------------+
| First, if updaters do not wish to be blocked by readers, they can use |
-| ``call_rcu()`` or ``kfree_rcu()``, which will be discussed later. |
-| Second, even when using ``synchronize_rcu()``, the other update-side |
+| call_rcu() or kfree_rcu(), which will be discussed later. |
+| Second, even when using synchronize_rcu(), the other update-side |
| code does run concurrently with readers, whether pre-existing or not. |
+-----------------------------------------------------------------------+
@@ -170,34 +170,34 @@ recovery from node failure, more or less as follows:
29 WRITE_ONCE(state, STATE_NORMAL);
30 }
-The RCU read-side critical section in ``do_something_dlm()`` works with
-the ``synchronize_rcu()`` in ``start_recovery()`` to guarantee that
-``do_something()`` never runs concurrently with ``recovery()``, but with
-little or no synchronization overhead in ``do_something_dlm()``.
+The RCU read-side critical section in do_something_dlm() works with
+the synchronize_rcu() in start_recovery() to guarantee that
+do_something() never runs concurrently with recovery(), but with
+little or no synchronization overhead in do_something_dlm().
+-----------------------------------------------------------------------+
| **Quick Quiz**: |
+-----------------------------------------------------------------------+
-| Why is the ``synchronize_rcu()`` on line 28 needed? |
+| Why is the synchronize_rcu() on line 28 needed? |
+-----------------------------------------------------------------------+
| **Answer**: |
+-----------------------------------------------------------------------+
| Without that extra grace period, memory reordering could result in |
-| ``do_something_dlm()`` executing ``do_something()`` concurrently with |
-| the last bits of ``recovery()``. |
+| do_something_dlm() executing do_something() concurrently with |
+| the last bits of recovery(). |
+-----------------------------------------------------------------------+
In order to avoid fatal problems such as deadlocks, an RCU read-side
-critical section must not contain calls to ``synchronize_rcu()``.
+critical section must not contain calls to synchronize_rcu().
Similarly, an RCU read-side critical section must not contain anything
that waits, directly or indirectly, on completion of an invocation of
-``synchronize_rcu()``.
+synchronize_rcu().
Although RCU's grace-period guarantee is useful in and of itself, with
`quite a few use cases <https://lwn.net/Articles/573497/>`__, it would
be good to be able to use RCU to coordinate read-side access to linked
data structures. For this, the grace-period guarantee is not sufficient,
-as can be seen in function ``add_gp_buggy()`` below. We will look at the
+as can be seen in function add_gp_buggy() below. We will look at the
reader's code later, but in the meantime, just think of the reader as
locklessly picking up the ``gp`` pointer, and, if the value loaded is
non-\ ``NULL``, locklessly accessing the ``->a`` and ``->b`` fields.
@@ -256,8 +256,8 @@ Publish/Subscribe Guarantee
RCU's publish-subscribe guarantee allows data to be inserted into a
linked data structure without disrupting RCU readers. The updater uses
-``rcu_assign_pointer()`` to insert the new data, and readers use
-``rcu_dereference()`` to access data, whether new or old. The following
+rcu_assign_pointer() to insert the new data, and readers use
+rcu_dereference() to access data, whether new or old. The following
shows an example of insertion:
::
@@ -279,7 +279,7 @@ shows an example of insertion:
15 return true;
16 }
-The ``rcu_assign_pointer()`` on line 13 is conceptually equivalent to a
+The rcu_assign_pointer() on line 13 is conceptually equivalent to a
simple assignment statement, but also guarantees that its assignment
will happen after the two assignments in lines 11 and 12, similar to the
C11 ``memory_order_release`` store operation. It also prevents any
@@ -289,7 +289,7 @@ number of “interesting†compiler optimizations, for example, the use of
+-----------------------------------------------------------------------+
| **Quick Quiz**: |
+-----------------------------------------------------------------------+
-| But ``rcu_assign_pointer()`` does nothing to prevent the two |
+| But rcu_assign_pointer() does nothing to prevent the two |
| assignments to ``p->a`` and ``p->b`` from being reordered. Can't that |
| also cause problems? |
+-----------------------------------------------------------------------+
@@ -303,7 +303,7 @@ number of “interesting†compiler optimizations, for example, the use of
It is tempting to assume that the reader need not do anything special to
control its accesses to the RCU-protected data, as shown in
-``do_something_gp_buggy()`` below:
+do_something_gp_buggy() below:
::
@@ -321,11 +321,10 @@ control its accesses to the RCU-protected data, as shown in
12 }
However, this temptation must be resisted because there are a
-surprisingly large number of ways that the compiler (to say nothing of
-`DEC Alpha CPUs <https://h71000.www7.hp.com/wizard/wiz_2637.html>`__)
-can trip this code up. For but one example, if the compiler were short
-of registers, it might choose to refetch from ``gp`` rather than keeping
-a separate copy in ``p`` as follows:
+surprisingly large number of ways that the compiler (or weak ordering
+CPUs like the DEC Alpha) can trip this code up. For but one example, if
+the compiler were short of registers, it might choose to refetch from
+``gp`` rather than keeping a separate copy in ``p`` as follows:
::
@@ -345,7 +344,7 @@ If this function ran concurrently with a series of updates that replaced
the current structure with a new one, the fetches of ``gp->a`` and
``gp->b`` might well come from two different structures, which could
cause serious confusion. To prevent this (and much else besides),
-``do_something_gp()`` uses ``rcu_dereference()`` to fetch from ``gp``:
+do_something_gp() uses rcu_dereference() to fetch from ``gp``:
::
@@ -362,21 +361,21 @@ cause serious confusion. To prevent this (and much else besides),
11 return false;
12 }
-The ``rcu_dereference()`` uses volatile casts and (for DEC Alpha) memory
+The rcu_dereference() uses volatile casts and (for DEC Alpha) memory
barriers in the Linux kernel. Should a `high-quality implementation of
C11 ``memory_order_consume``
[PDF] <http://www.rdrop.com/users/paulmck/RCU/consume.2015.07.13a.pdf>`__
-ever appear, then ``rcu_dereference()`` could be implemented as a
+ever appear, then rcu_dereference() could be implemented as a
``memory_order_consume`` load. Regardless of the exact implementation, a
-pointer fetched by ``rcu_dereference()`` may not be used outside of the
+pointer fetched by rcu_dereference() may not be used outside of the
outermost RCU read-side critical section containing that
-``rcu_dereference()``, unless protection of the corresponding data
+rcu_dereference(), unless protection of the corresponding data
element has been passed from RCU to some other synchronization
mechanism, most commonly locking or `reference
counting <https://www.kernel.org/doc/Documentation/RCU/rcuref.txt>`__.
-In short, updaters use ``rcu_assign_pointer()`` and readers use
-``rcu_dereference()``, and these two RCU API elements work together to
+In short, updaters use rcu_assign_pointer() and readers use
+rcu_dereference(), and these two RCU API elements work together to
ensure that readers have a consistent view of newly added data elements.
Of course, it is also necessary to remove elements from RCU-protected
@@ -388,9 +387,9 @@ data structures, for example, using the following process:
the newly removed data element).
#. At this point, only the updater has a reference to the newly removed
data element, so it can safely reclaim the data element, for example,
- by passing it to ``kfree()``.
+ by passing it to kfree().
-This process is implemented by ``remove_gp_synchronous()``:
+This process is implemented by remove_gp_synchronous():
::
@@ -413,16 +412,16 @@ This process is implemented by ``remove_gp_synchronous()``:
This function is straightforward, with line 13 waiting for a grace
period before line 14 frees the old data element. This waiting ensures
-that readers will reach line 7 of ``do_something_gp()`` before the data
-element referenced by ``p`` is freed. The ``rcu_access_pointer()`` on
-line 6 is similar to ``rcu_dereference()``, except that:
+that readers will reach line 7 of do_something_gp() before the data
+element referenced by ``p`` is freed. The rcu_access_pointer() on
+line 6 is similar to rcu_dereference(), except that:
-#. The value returned by ``rcu_access_pointer()`` cannot be
+#. The value returned by rcu_access_pointer() cannot be
dereferenced. If you want to access the value pointed to as well as
- the pointer itself, use ``rcu_dereference()`` instead of
- ``rcu_access_pointer()``.
-#. The call to ``rcu_access_pointer()`` need not be protected. In
- contrast, ``rcu_dereference()`` must either be within an RCU
+ the pointer itself, use rcu_dereference() instead of
+ rcu_access_pointer().
+#. The call to rcu_access_pointer() need not be protected. In
+ contrast, rcu_dereference() must either be within an RCU
read-side critical section or in a code segment where the pointer
cannot change, for example, in code protected by the corresponding
update-side lock.
@@ -430,13 +429,13 @@ line 6 is similar to ``rcu_dereference()``, except that:
+-----------------------------------------------------------------------+
| **Quick Quiz**: |
+-----------------------------------------------------------------------+
-| Without the ``rcu_dereference()`` or the ``rcu_access_pointer()``, |
+| Without the rcu_dereference() or the rcu_access_pointer(), |
| what destructive optimizations might the compiler make use of? |
+-----------------------------------------------------------------------+
| **Answer**: |
+-----------------------------------------------------------------------+
-| Let's start with what happens to ``do_something_gp()`` if it fails to |
-| use ``rcu_dereference()``. It could reuse a value formerly fetched |
+| Let's start with what happens to do_something_gp() if it fails to |
+| use rcu_dereference(). It could reuse a value formerly fetched |
| from this same pointer. It could also fetch the pointer from ``gp`` |
| in a byte-at-a-time manner, resulting in *load tearing*, in turn |
| resulting a bytewise mash-up of two distinct pointer values. It might |
@@ -445,15 +444,15 @@ line 6 is similar to ``rcu_dereference()``, except that:
| update has changed the pointer to match the wrong guess. Too bad |
| about any dereferences that returned pre-initialization garbage in |
| the meantime! |
-| For ``remove_gp_synchronous()``, as long as all modifications to |
+| For remove_gp_synchronous(), as long as all modifications to |
| ``gp`` are carried out while holding ``gp_lock``, the above |
| optimizations are harmless. However, ``sparse`` will complain if you |
| define ``gp`` with ``__rcu`` and then access it without using either |
-| ``rcu_access_pointer()`` or ``rcu_dereference()``. |
+| rcu_access_pointer() or rcu_dereference(). |
+-----------------------------------------------------------------------+
In short, RCU's publish-subscribe guarantee is provided by the
-combination of ``rcu_assign_pointer()`` and ``rcu_dereference()``. This
+combination of rcu_assign_pointer() and rcu_dereference(). This
guarantee allows data elements to be safely added to RCU-protected
linked data structures without disrupting RCU readers. This guarantee
can be used in combination with the grace-period guarantee to also allow
@@ -462,9 +461,9 @@ again without disrupting RCU readers.
This guarantee was only partially premeditated. DYNIX/ptx used an
explicit memory barrier for publication, but had nothing resembling
-``rcu_dereference()`` for subscription, nor did it have anything
+rcu_dereference() for subscription, nor did it have anything
resembling the dependency-ordering barrier that was later subsumed
-into ``rcu_dereference()`` and later still into ``READ_ONCE()``. The
+into rcu_dereference() and later still into READ_ONCE(). The
need for these operations made itself known quite suddenly at a
late-1990s meeting with the DEC Alpha architects, back in the days when
DEC was still a free-standing company. It took the Alpha architects a
@@ -474,7 +473,7 @@ documentation did not make this point clear. More recent work with the C
and C++ standards committees have provided much education on tricks and
traps from the compiler. In short, compilers were much less tricky in
the early 1990s, but in 2015, don't even think about omitting
-``rcu_dereference()``!
+rcu_dereference()!
Memory-Barrier Guarantees
~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -484,31 +483,31 @@ demonstrates the need for RCU's stringent memory-ordering guarantees on
systems with more than one CPU:
#. Each CPU that has an RCU read-side critical section that begins
- before ``synchronize_rcu()`` starts is guaranteed to execute a full
+ before synchronize_rcu() starts is guaranteed to execute a full
memory barrier between the time that the RCU read-side critical
- section ends and the time that ``synchronize_rcu()`` returns. Without
+ section ends and the time that synchronize_rcu() returns. Without
this guarantee, a pre-existing RCU read-side critical section might
hold a reference to the newly removed ``struct foo`` after the
- ``kfree()`` on line 14 of ``remove_gp_synchronous()``.
+ kfree() on line 14 of remove_gp_synchronous().
#. Each CPU that has an RCU read-side critical section that ends after
- ``synchronize_rcu()`` returns is guaranteed to execute a full memory
- barrier between the time that ``synchronize_rcu()`` begins and the
+ synchronize_rcu() returns is guaranteed to execute a full memory
+ barrier between the time that synchronize_rcu() begins and the
time that the RCU read-side critical section begins. Without this
guarantee, a later RCU read-side critical section running after the
- ``kfree()`` on line 14 of ``remove_gp_synchronous()`` might later run
- ``do_something_gp()`` and find the newly deleted ``struct foo``.
-#. If the task invoking ``synchronize_rcu()`` remains on a given CPU,
+ kfree() on line 14 of remove_gp_synchronous() might later run
+ do_something_gp() and find the newly deleted ``struct foo``.
+#. If the task invoking synchronize_rcu() remains on a given CPU,
then that CPU is guaranteed to execute a full memory barrier sometime
- during the execution of ``synchronize_rcu()``. This guarantee ensures
- that the ``kfree()`` on line 14 of ``remove_gp_synchronous()`` really
+ during the execution of synchronize_rcu(). This guarantee ensures
+ that the kfree() on line 14 of remove_gp_synchronous() really
does execute after the removal on line 11.
-#. If the task invoking ``synchronize_rcu()`` migrates among a group of
+#. If the task invoking synchronize_rcu() migrates among a group of
CPUs during that invocation, then each of the CPUs in that group is
guaranteed to execute a full memory barrier sometime during the
- execution of ``synchronize_rcu()``. This guarantee also ensures that
- the ``kfree()`` on line 14 of ``remove_gp_synchronous()`` really does
+ execution of synchronize_rcu(). This guarantee also ensures that
+ the kfree() on line 14 of remove_gp_synchronous() really does
execute after the removal on line 11, but also in the case where the
- thread executing the ``synchronize_rcu()`` migrates in the meantime.
+ thread executing the synchronize_rcu() migrates in the meantime.
+-----------------------------------------------------------------------+
| **Quick Quiz**: |
@@ -516,19 +515,19 @@ systems with more than one CPU:
| Given that multiple CPUs can start RCU read-side critical sections at |
| any time without any ordering whatsoever, how can RCU possibly tell |
| whether or not a given RCU read-side critical section starts before a |
-| given instance of ``synchronize_rcu()``? |
+| given instance of synchronize_rcu()? |
+-----------------------------------------------------------------------+
| **Answer**: |
+-----------------------------------------------------------------------+
| If RCU cannot tell whether or not a given RCU read-side critical |
-| section starts before a given instance of ``synchronize_rcu()``, then |
+| section starts before a given instance of synchronize_rcu(), then |
| it must assume that the RCU read-side critical section started first. |
-| In other words, a given instance of ``synchronize_rcu()`` can avoid |
+| In other words, a given instance of synchronize_rcu() can avoid |
| waiting on a given RCU read-side critical section only if it can |
-| prove that ``synchronize_rcu()`` started first. |
-| A related question is “When ``rcu_read_lock()`` doesn't generate any |
+| prove that synchronize_rcu() started first. |
+| A related question is “When rcu_read_lock() doesn't generate any |
| code, why does it matter how it relates to a grace period?†The |
-| answer is that it is not the relationship of ``rcu_read_lock()`` |
+| answer is that it is not the relationship of rcu_read_lock() |
| itself that is important, but rather the relationship of the code |
| within the enclosed RCU read-side critical section to the code |
| preceding and following the grace period. If we take this viewpoint, |
@@ -556,14 +555,14 @@ systems with more than one CPU:
| Yes, they really are required. To see why the first guarantee is |
| required, consider the following sequence of events: |
| |
-| #. CPU 1: ``rcu_read_lock()`` |
+| #. CPU 1: rcu_read_lock() |
| #. CPU 1: ``q = rcu_dereference(gp); /* Very likely to return p. */`` |
| #. CPU 0: ``list_del_rcu(p);`` |
-| #. CPU 0: ``synchronize_rcu()`` starts. |
+| #. CPU 0: synchronize_rcu() starts. |
| #. CPU 1: ``do_something_with(q->a);`` |
| ``/* No smp_mb(), so might happen after kfree(). */`` |
-| #. CPU 1: ``rcu_read_unlock()`` |
-| #. CPU 0: ``synchronize_rcu()`` returns. |
+| #. CPU 1: rcu_read_unlock() |
+| #. CPU 0: synchronize_rcu() returns. |
| #. CPU 0: ``kfree(p);`` |
| |
| Therefore, there absolutely must be a full memory barrier between the |
@@ -574,14 +573,14 @@ systems with more than one CPU:
| is roughly similar: |
| |
| #. CPU 0: ``list_del_rcu(p);`` |
-| #. CPU 0: ``synchronize_rcu()`` starts. |
-| #. CPU 1: ``rcu_read_lock()`` |
+| #. CPU 0: synchronize_rcu() starts. |
+| #. CPU 1: rcu_read_lock() |
| #. CPU 1: ``q = rcu_dereference(gp);`` |
| ``/* Might return p if no memory barrier. */`` |
-| #. CPU 0: ``synchronize_rcu()`` returns. |
+| #. CPU 0: synchronize_rcu() returns. |
| #. CPU 0: ``kfree(p);`` |
| #. CPU 1: ``do_something_with(q->a); /* Boom!!! */`` |
-| #. CPU 1: ``rcu_read_unlock()`` |
+| #. CPU 1: rcu_read_unlock() |
| |
| And similarly, without a memory barrier between the beginning of the |
| grace period and the beginning of the RCU read-side critical section, |
@@ -597,7 +596,7 @@ systems with more than one CPU:
+-----------------------------------------------------------------------+
| **Quick Quiz**: |
+-----------------------------------------------------------------------+
-| You claim that ``rcu_read_lock()`` and ``rcu_read_unlock()`` generate |
+| You claim that rcu_read_lock() and rcu_read_unlock() generate |
| absolutely no code in some kernel builds. This means that the |
| compiler might arbitrarily rearrange consecutive RCU read-side |
| critical sections. Given such rearrangement, if a given RCU read-side |
@@ -607,11 +606,11 @@ systems with more than one CPU:
+-----------------------------------------------------------------------+
| **Answer**: |
+-----------------------------------------------------------------------+
-| In cases where ``rcu_read_lock()`` and ``rcu_read_unlock()`` generate |
+| In cases where rcu_read_lock() and rcu_read_unlock() generate |
| absolutely no code, RCU infers quiescent states only at special |
| locations, for example, within the scheduler. Because calls to |
-| ``schedule()`` had better prevent calling-code accesses to shared |
-| variables from being rearranged across the call to ``schedule()``, if |
+| schedule() had better prevent calling-code accesses to shared |
+| variables from being rearranged across the call to schedule(), if |
| RCU detects the end of a given RCU read-side critical section, it |
| will necessarily detect the end of all prior RCU read-side critical |
| sections, no matter how aggressively the compiler scrambles the code. |
@@ -655,8 +654,8 @@ read-side critical section might search for a given data element, and
then might acquire the update-side spinlock in order to update that
element, all while remaining in that RCU read-side critical section. Of
course, it is necessary to exit the RCU read-side critical section
-before invoking ``synchronize_rcu()``, however, this inconvenience can
-be avoided through use of the ``call_rcu()`` and ``kfree_rcu()`` API
+before invoking synchronize_rcu(), however, this inconvenience can
+be avoided through use of the call_rcu() and kfree_rcu() API
members described later in this document.
+-----------------------------------------------------------------------+
@@ -694,10 +693,10 @@ these non-guarantees were premeditated.
Readers Impose Minimal Ordering
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Reader-side markers such as ``rcu_read_lock()`` and
-``rcu_read_unlock()`` provide absolutely no ordering guarantees except
+Reader-side markers such as rcu_read_lock() and
+rcu_read_unlock() provide absolutely no ordering guarantees except
through their interaction with the grace-period APIs such as
-``synchronize_rcu()``. To see this, consider the following pair of
+synchronize_rcu(). To see this, consider the following pair of
threads:
::
@@ -722,7 +721,7 @@ threads:
18 rcu_read_unlock();
19 }
-After ``thread0()`` and ``thread1()`` execute concurrently, it is quite
+After thread0() and thread1() execute concurrently, it is quite
possible to have
::
@@ -730,7 +729,7 @@ possible to have
(r1 == 1 && r2 == 0)
(that is, ``y`` appears to have been assigned before ``x``), which would
-not be possible if ``rcu_read_lock()`` and ``rcu_read_unlock()`` had
+not be possible if rcu_read_lock() and rcu_read_unlock() had
much in the way of ordering properties. But they do not, so the CPU is
within its rights to do significant reordering. This is by design: Any
significant ordering constraints would slow down these fast-path APIs.
@@ -742,14 +741,14 @@ significant ordering constraints would slow down these fast-path APIs.
+-----------------------------------------------------------------------+
| **Answer**: |
+-----------------------------------------------------------------------+
-| No, the volatile casts in ``READ_ONCE()`` and ``WRITE_ONCE()`` |
+| No, the volatile casts in READ_ONCE() and WRITE_ONCE() |
| prevent the compiler from reordering in this particular case. |
+-----------------------------------------------------------------------+
Readers Do Not Exclude Updaters
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Neither ``rcu_read_lock()`` nor ``rcu_read_unlock()`` exclude updates.
+Neither rcu_read_lock() nor rcu_read_unlock() exclude updates.
All they do is to prevent grace periods from ending. The following
example illustrates this:
@@ -775,19 +774,19 @@ example illustrates this:
18 spin_unlock(&my_lock);
19 }
-If the ``thread0()`` function's ``rcu_read_lock()`` excluded the
-``thread1()`` function's update, the ``WARN_ON()`` could never fire. But
-the fact is that ``rcu_read_lock()`` does not exclude much of anything
-aside from subsequent grace periods, of which ``thread1()`` has none, so
-the ``WARN_ON()`` can and does fire.
+If the thread0() function's rcu_read_lock() excluded the
+thread1() function's update, the WARN_ON() could never fire. But
+the fact is that rcu_read_lock() does not exclude much of anything
+aside from subsequent grace periods, of which thread1() has none, so
+the WARN_ON() can and does fire.
Updaters Only Wait For Old Readers
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-It might be tempting to assume that after ``synchronize_rcu()``
+It might be tempting to assume that after synchronize_rcu()
completes, there are no readers executing. This temptation must be
avoided because new readers can start immediately after
-``synchronize_rcu()`` starts, and ``synchronize_rcu()`` is under no
+synchronize_rcu() starts, and synchronize_rcu() is under no
obligation to wait for these new readers.
+-----------------------------------------------------------------------+
@@ -799,10 +798,10 @@ obligation to wait for these new readers.
+-----------------------------------------------------------------------+
| **Answer**: |
+-----------------------------------------------------------------------+
-| For no time at all. Even if ``synchronize_rcu()`` were to wait until |
+| For no time at all. Even if synchronize_rcu() were to wait until |
| all readers had completed, a new reader might start immediately after |
-| ``synchronize_rcu()`` completed. Therefore, the code following |
-| ``synchronize_rcu()`` can *never* rely on there being no readers. |
+| synchronize_rcu() completed. Therefore, the code following |
+| synchronize_rcu() can *never* rely on there being no readers. |
+-----------------------------------------------------------------------+
Grace Periods Don't Partition Read-Side Critical Sections
@@ -892,12 +891,12 @@ period is known to end before the second grace period starts:
28 rcu_read_unlock();
29 }
-Here, if ``(r1 == 1)``, then ``thread0()``'s write to ``b`` must happen
-before the end of ``thread1()``'s grace period. If in addition
-``(r4 == 1)``, then ``thread3()``'s read from ``b`` must happen after
-the beginning of ``thread2()``'s grace period. If it is also the case
-that ``(r2 == 1)``, then the end of ``thread1()``'s grace period must
-precede the beginning of ``thread2()``'s grace period. This mean that
+Here, if ``(r1 == 1)``, then thread0()'s write to ``b`` must happen
+before the end of thread1()'s grace period. If in addition
+``(r4 == 1)``, then thread3()'s read from ``b`` must happen after
+the beginning of thread2()'s grace period. If it is also the case
+that ``(r2 == 1)``, then the end of thread1()'s grace period must
+precede the beginning of thread2()'s grace period. This mean that
the two RCU read-side critical sections cannot overlap, guaranteeing
that ``(r3 == 1)``. As a result, the outcome:
@@ -1076,8 +1075,8 @@ is captured by the following list of situations:
b. Wait-free read-side primitives for real-time use.
This focus on read-mostly situations means that RCU must interoperate
-with other synchronization primitives. For example, the ``add_gp()`` and
-``remove_gp_synchronous()`` examples discussed earlier use RCU to
+with other synchronization primitives. For example, the add_gp() and
+remove_gp_synchronous() examples discussed earlier use RCU to
protect readers and locking to coordinate updaters. However, the need
extends much farther, requiring that a variety of synchronization
primitives be legal within RCU read-side critical sections, including
@@ -1104,11 +1103,11 @@ memory barriers.
| sections. |
| Note that it *is* legal for a normal RCU read-side critical section |
| to conditionally acquire a sleeping locks (as in |
-| ``mutex_trylock()``), but only as long as it does not loop |
+| mutex_trylock()), but only as long as it does not loop |
| indefinitely attempting to conditionally acquire that sleeping locks. |
-| The key point is that things like ``mutex_trylock()`` either return |
+| The key point is that things like mutex_trylock() either return |
| with the mutex held, or return an error indication if the mutex was |
-| not immediately available. Either way, ``mutex_trylock()`` returns |
+| not immediately available. Either way, mutex_trylock() returns |
| immediately without sleeping. |
+-----------------------------------------------------------------------+
@@ -1182,8 +1181,8 @@ and has become decreasingly so as memory sizes have expanded and memory
costs have plummeted. However, as I learned from Matt Mackall's
`bloatwatch <http://elinux.org/Linux_Tiny-FAQ>`__ efforts, memory
footprint is critically important on single-CPU systems with
-non-preemptible (``CONFIG_PREEMPT=n``) kernels, and thus `tiny
-RCU <https://lkml.kernel.org/g/20090113221724.GA15307@linux.vnet.ibm.com>`__
+non-preemptible (``CONFIG_PREEMPTION=n``) kernels, and thus `tiny
+RCU <https://lore.kernel.org/r/20090113221724.GA15307@linux.vnet.ibm.com>`__
was born. Josh Triplett has since taken over the small-memory banner
with his `Linux kernel tinification <https://tiny.wiki.kernel.org/>`__
project, which resulted in `SRCU <Sleepable RCU_>`__ becoming optional
@@ -1191,57 +1190,57 @@ for those kernels not needing it.
The remaining performance requirements are, for the most part,
unsurprising. For example, in keeping with RCU's read-side
-specialization, ``rcu_dereference()`` should have negligible overhead
+specialization, rcu_dereference() should have negligible overhead
(for example, suppression of a few minor compiler optimizations).
-Similarly, in non-preemptible environments, ``rcu_read_lock()`` and
-``rcu_read_unlock()`` should have exactly zero overhead.
+Similarly, in non-preemptible environments, rcu_read_lock() and
+rcu_read_unlock() should have exactly zero overhead.
In preemptible environments, in the case where the RCU read-side
critical section was not preempted (as will be the case for the
-highest-priority real-time process), ``rcu_read_lock()`` and
-``rcu_read_unlock()`` should have minimal overhead. In particular, they
+highest-priority real-time process), rcu_read_lock() and
+rcu_read_unlock() should have minimal overhead. In particular, they
should not contain atomic read-modify-write operations, memory-barrier
instructions, preemption disabling, interrupt disabling, or backwards
branches. However, in the case where the RCU read-side critical section
-was preempted, ``rcu_read_unlock()`` may acquire spinlocks and disable
+was preempted, rcu_read_unlock() may acquire spinlocks and disable
interrupts. This is why it is better to nest an RCU read-side critical
section within a preempt-disable region than vice versa, at least in
cases where that critical section is short enough to avoid unduly
degrading real-time latencies.
-The ``synchronize_rcu()`` grace-period-wait primitive is optimized for
+The synchronize_rcu() grace-period-wait primitive is optimized for
throughput. It may therefore incur several milliseconds of latency in
addition to the duration of the longest RCU read-side critical section.
On the other hand, multiple concurrent invocations of
-``synchronize_rcu()`` are required to use batching optimizations so that
+synchronize_rcu() are required to use batching optimizations so that
they can be satisfied by a single underlying grace-period-wait
operation. For example, in the Linux kernel, it is not unusual for a
single grace-period-wait operation to serve more than `1,000 separate
invocations <https://www.usenix.org/conference/2004-usenix-annual-technical-conference/making-rcu-safe-deep-sub-millisecond-response>`__
-of ``synchronize_rcu()``, thus amortizing the per-invocation overhead
+of synchronize_rcu(), thus amortizing the per-invocation overhead
down to nearly zero. However, the grace-period optimization is also
required to avoid measurable degradation of real-time scheduling and
interrupt latencies.
-In some cases, the multi-millisecond ``synchronize_rcu()`` latencies are
-unacceptable. In these cases, ``synchronize_rcu_expedited()`` may be
+In some cases, the multi-millisecond synchronize_rcu() latencies are
+unacceptable. In these cases, synchronize_rcu_expedited() may be
used instead, reducing the grace-period latency down to a few tens of
microseconds on small systems, at least in cases where the RCU read-side
critical sections are short. There are currently no special latency
-requirements for ``synchronize_rcu_expedited()`` on large systems, but,
+requirements for synchronize_rcu_expedited() on large systems, but,
consistent with the empirical nature of the RCU specification, that is
subject to change. However, there most definitely are scalability
-requirements: A storm of ``synchronize_rcu_expedited()`` invocations on
+requirements: A storm of synchronize_rcu_expedited() invocations on
4096 CPUs should at least make reasonable forward progress. In return
-for its shorter latencies, ``synchronize_rcu_expedited()`` is permitted
+for its shorter latencies, synchronize_rcu_expedited() is permitted
to impose modest degradation of real-time latency on non-idle online
CPUs. Here, “modest†means roughly the same latency degradation as a
scheduling-clock interrupt.
There are a number of situations where even
-``synchronize_rcu_expedited()``'s reduced grace-period latency is
-unacceptable. In these situations, the asynchronous ``call_rcu()`` can
-be used in place of ``synchronize_rcu()`` as follows:
+synchronize_rcu_expedited()'s reduced grace-period latency is
+unacceptable. In these situations, the asynchronous call_rcu() can
+be used in place of synchronize_rcu() as follows:
::
@@ -1275,19 +1274,19 @@ be used in place of ``synchronize_rcu()`` as follows:
28 }
A definition of ``struct foo`` is finally needed, and appears on
-lines 1-5. The function ``remove_gp_cb()`` is passed to ``call_rcu()``
+lines 1-5. The function remove_gp_cb() is passed to call_rcu()
on line 25, and will be invoked after the end of a subsequent grace
-period. This gets the same effect as ``remove_gp_synchronous()``, but
+period. This gets the same effect as remove_gp_synchronous(), but
without forcing the updater to wait for a grace period to elapse. The
-``call_rcu()`` function may be used in a number of situations where
-neither ``synchronize_rcu()`` nor ``synchronize_rcu_expedited()`` would
-be legal, including within preempt-disable code, ``local_bh_disable()``
+call_rcu() function may be used in a number of situations where
+neither synchronize_rcu() nor synchronize_rcu_expedited() would
+be legal, including within preempt-disable code, local_bh_disable()
code, interrupt-disable code, and interrupt handlers. However, even
-``call_rcu()`` is illegal within NMI handlers and from idle and offline
-CPUs. The callback function (``remove_gp_cb()`` in this case) will be
+call_rcu() is illegal within NMI handlers and from idle and offline
+CPUs. The callback function (remove_gp_cb() in this case) will be
executed within softirq (software interrupt) environment within the
Linux kernel, either within a real softirq handler or under the
-protection of ``local_bh_disable()``. In both the Linux kernel and in
+protection of local_bh_disable(). In both the Linux kernel and in
userspace, it is bad practice to write an RCU callback function that
takes too long. Long-running operations should be relegated to separate
threads or (in the Linux kernel) workqueues.
@@ -1295,23 +1294,23 @@ threads or (in the Linux kernel) workqueues.
+-----------------------------------------------------------------------+
| **Quick Quiz**: |
+-----------------------------------------------------------------------+
-| Why does line 19 use ``rcu_access_pointer()``? After all, |
-| ``call_rcu()`` on line 25 stores into the structure, which would |
+| Why does line 19 use rcu_access_pointer()? After all, |
+| call_rcu() on line 25 stores into the structure, which would |
| interact badly with concurrent insertions. Doesn't this mean that |
-| ``rcu_dereference()`` is required? |
+| rcu_dereference() is required? |
+-----------------------------------------------------------------------+
| **Answer**: |
+-----------------------------------------------------------------------+
| Presumably the ``->gp_lock`` acquired on line 18 excludes any |
-| changes, including any insertions that ``rcu_dereference()`` would |
+| changes, including any insertions that rcu_dereference() would |
| protect against. Therefore, any insertions will be delayed until |
| after ``->gp_lock`` is released on line 25, which in turn means that |
-| ``rcu_access_pointer()`` suffices. |
+| rcu_access_pointer() suffices. |
+-----------------------------------------------------------------------+
-However, all that ``remove_gp_cb()`` is doing is invoking ``kfree()`` on
+However, all that remove_gp_cb() is doing is invoking kfree() on
the data element. This is a common idiom, and is supported by
-``kfree_rcu()``, which allows “fire and forget†operation as shown
+kfree_rcu(), which allows “fire and forget†operation as shown
below:
::
@@ -1338,20 +1337,20 @@ below:
20 return true;
21 }
-Note that ``remove_gp_faf()`` simply invokes ``kfree_rcu()`` and
+Note that remove_gp_faf() simply invokes kfree_rcu() and
proceeds, without any need to pay any further attention to the
-subsequent grace period and ``kfree()``. It is permissible to invoke
-``kfree_rcu()`` from the same environments as for ``call_rcu()``.
-Interestingly enough, DYNIX/ptx had the equivalents of ``call_rcu()``
-and ``kfree_rcu()``, but not ``synchronize_rcu()``. This was due to the
+subsequent grace period and kfree(). It is permissible to invoke
+kfree_rcu() from the same environments as for call_rcu().
+Interestingly enough, DYNIX/ptx had the equivalents of call_rcu()
+and kfree_rcu(), but not synchronize_rcu(). This was due to the
fact that RCU was not heavily used within DYNIX/ptx, so the very few
-places that needed something like ``synchronize_rcu()`` simply
+places that needed something like synchronize_rcu() simply
open-coded it.
+-----------------------------------------------------------------------+
| **Quick Quiz**: |
+-----------------------------------------------------------------------+
-| Earlier it was claimed that ``call_rcu()`` and ``kfree_rcu()`` |
+| Earlier it was claimed that call_rcu() and kfree_rcu() |
| allowed updaters to avoid being blocked by readers. But how can that |
| be correct, given that the invocation of the callback and the freeing |
| of the memory (respectively) must still wait for a grace period to |
@@ -1363,16 +1362,16 @@ open-coded it.
| definition would say that updates in garbage-collected languages |
| cannot complete until the next time the garbage collector runs, which |
| does not seem at all reasonable. The key point is that in most cases, |
-| an updater using either ``call_rcu()`` or ``kfree_rcu()`` can proceed |
-| to the next update as soon as it has invoked ``call_rcu()`` or |
-| ``kfree_rcu()``, without having to wait for a subsequent grace |
+| an updater using either call_rcu() or kfree_rcu() can proceed |
+| to the next update as soon as it has invoked call_rcu() or |
+| kfree_rcu(), without having to wait for a subsequent grace |
| period. |
+-----------------------------------------------------------------------+
But what if the updater must wait for the completion of code to be
executed after the end of the grace period, but has other tasks that can
be carried out in the meantime? The polling-style
-``get_state_synchronize_rcu()`` and ``cond_synchronize_rcu()`` functions
+get_state_synchronize_rcu() and cond_synchronize_rcu() functions
may be used for this purpose, as shown below:
::
@@ -1397,11 +1396,11 @@ may be used for this purpose, as shown below:
18 return true;
19 }
-On line 14, ``get_state_synchronize_rcu()`` obtains a “cookie†from RCU,
+On line 14, get_state_synchronize_rcu() obtains a “cookie†from RCU,
then line 15 carries out other tasks, and finally, line 16 returns
immediately if a grace period has elapsed in the meantime, but otherwise
waits as required. The need for ``get_state_synchronize_rcu`` and
-``cond_synchronize_rcu()`` has appeared quite recently, so it is too
+cond_synchronize_rcu() has appeared quite recently, so it is too
early to tell whether they will stand the test of time.
RCU thus provides a range of tools to allow updaters to strike the
@@ -1421,8 +1420,8 @@ example, an infinite loop in an RCU read-side critical section must by
definition prevent later grace periods from ever completing. For a more
involved example, consider a 64-CPU system built with
``CONFIG_RCU_NOCB_CPU=y`` and booted with ``rcu_nocbs=1-63``, where
-CPUs 1 through 63 spin in tight loops that invoke ``call_rcu()``. Even
-if these tight loops also contain calls to ``cond_resched()`` (thus
+CPUs 1 through 63 spin in tight loops that invoke call_rcu(). Even
+if these tight loops also contain calls to cond_resched() (thus
allowing grace periods to complete), CPU 0 simply will not be able to
invoke callbacks as fast as the other 63 CPUs can register them, at
least not until the system runs out of memory. In both of these
@@ -1435,21 +1434,21 @@ RCU takes the following steps to encourage timely completion of grace
periods:
#. If a grace period fails to complete within 100 milliseconds, RCU
- causes future invocations of ``cond_resched()`` on the holdout CPUs
+ causes future invocations of cond_resched() on the holdout CPUs
to provide an RCU quiescent state. RCU also causes those CPUs'
- ``need_resched()`` invocations to return ``true``, but only after the
+ need_resched() invocations to return ``true``, but only after the
corresponding CPU's next scheduling-clock.
#. CPUs mentioned in the ``nohz_full`` kernel boot parameter can run
indefinitely in the kernel without scheduling-clock interrupts, which
- defeats the above ``need_resched()`` strategem. RCU will therefore
- invoke ``resched_cpu()`` on any ``nohz_full`` CPUs still holding out
+ defeats the above need_resched() strategem. RCU will therefore
+ invoke resched_cpu() on any ``nohz_full`` CPUs still holding out
after 109 milliseconds.
#. In kernels built with ``CONFIG_RCU_BOOST=y``, if a given task that
has been preempted within an RCU read-side critical section is
holding out for more than 500 milliseconds, RCU will resort to
priority boosting.
#. If a CPU is still holding out 10 seconds into the grace period, RCU
- will invoke ``resched_cpu()`` on it regardless of its ``nohz_full``
+ will invoke resched_cpu() on it regardless of its ``nohz_full``
state.
The above values are defaults for systems running with ``HZ=1000``. They
@@ -1460,7 +1459,7 @@ caution when changing them. Note that these forward-progress measures
are provided only for RCU, not for `SRCU <Sleepable RCU_>`__ or `Tasks
RCU`_.
-RCU takes the following steps in ``call_rcu()`` to encourage timely
+RCU takes the following steps in call_rcu() to encourage timely
invocation of callbacks when any given non-\ ``rcu_nocbs`` CPU has
10,000 callbacks, or has 10,000 more callbacks than it had the last time
encouragement was provided:
@@ -1481,8 +1480,8 @@ RCU, not for `SRCU <Sleepable RCU_>`__ or `Tasks
RCU`_. Even for RCU, callback-invocation forward
progress for ``rcu_nocbs`` CPUs is much less well-developed, in part
because workloads benefiting from ``rcu_nocbs`` CPUs tend to invoke
-``call_rcu()`` relatively infrequently. If workloads emerge that need
-both ``rcu_nocbs`` CPUs and high ``call_rcu()`` invocation rates, then
+call_rcu() relatively infrequently. If workloads emerge that need
+both ``rcu_nocbs`` CPUs and high call_rcu() invocation rates, then
additional forward-progress work will be required.
Composability
@@ -1496,11 +1495,11 @@ in fact may be nested arbitrarily deeply. In practice, as with all
real-world implementations of composable constructs, there are
limitations.
-Implementations of RCU for which ``rcu_read_lock()`` and
-``rcu_read_unlock()`` generate no code, such as Linux-kernel RCU when
-``CONFIG_PREEMPT=n``, can be nested arbitrarily deeply. After all, there
+Implementations of RCU for which rcu_read_lock() and
+rcu_read_unlock() generate no code, such as Linux-kernel RCU when
+``CONFIG_PREEMPTION=n``, can be nested arbitrarily deeply. After all, there
is no overhead. Except that if all these instances of
-``rcu_read_lock()`` and ``rcu_read_unlock()`` are visible to the
+rcu_read_lock() and rcu_read_unlock() are visible to the
compiler, compilation will eventually fail due to exhausting memory,
mass storage, or user patience, whichever comes first. If the nesting is
not visible to the compiler, as is the case with mutually recursive
@@ -1558,11 +1557,11 @@ argue that such workloads should instead use something other than RCU,
the fact remains that RCU must handle such workloads gracefully. This
requirement is another factor driving batching of grace periods, but it
is also the driving force behind the checks for large numbers of queued
-RCU callbacks in the ``call_rcu()`` code path. Finally, high update
+RCU callbacks in the call_rcu() code path. Finally, high update
rates should not delay RCU read-side critical sections, although some
small read-side delays can occur when using
-``synchronize_rcu_expedited()``, courtesy of this function's use of
-``smp_call_function_single()``.
+synchronize_rcu_expedited(), courtesy of this function's use of
+smp_call_function_single().
Although all three of these corner cases were understood in the early
1990s, a simple user-level test consisting of ``close(open(path))`` in a
@@ -1583,48 +1582,48 @@ Software-Engineering Requirements
Between Murphy's Law and “To err is humanâ€, it is necessary to guard
against mishaps and misuse:
-#. It is all too easy to forget to use ``rcu_read_lock()`` everywhere
+#. It is all too easy to forget to use rcu_read_lock() everywhere
that it is needed, so kernels built with ``CONFIG_PROVE_RCU=y`` will
- splat if ``rcu_dereference()`` is used outside of an RCU read-side
+ splat if rcu_dereference() is used outside of an RCU read-side
critical section. Update-side code can use
- ``rcu_dereference_protected()``, which takes a `lockdep
+ rcu_dereference_protected(), which takes a `lockdep
expression <https://lwn.net/Articles/371986/>`__ to indicate what is
providing the protection. If the indicated protection is not
provided, a lockdep splat is emitted.
Code shared between readers and updaters can use
- ``rcu_dereference_check()``, which also takes a lockdep expression,
- and emits a lockdep splat if neither ``rcu_read_lock()`` nor the
+ rcu_dereference_check(), which also takes a lockdep expression,
+ and emits a lockdep splat if neither rcu_read_lock() nor the
indicated protection is in place. In addition,
- ``rcu_dereference_raw()`` is used in those (hopefully rare) cases
+ rcu_dereference_raw() is used in those (hopefully rare) cases
where the required protection cannot be easily described. Finally,
- ``rcu_read_lock_held()`` is provided to allow a function to verify
+ rcu_read_lock_held() is provided to allow a function to verify
that it has been invoked within an RCU read-side critical section. I
was made aware of this set of requirements shortly after Thomas
Gleixner audited a number of RCU uses.
#. A given function might wish to check for RCU-related preconditions
upon entry, before using any other RCU API. The
- ``rcu_lockdep_assert()`` does this job, asserting the expression in
+ rcu_lockdep_assert() does this job, asserting the expression in
kernels having lockdep enabled and doing nothing otherwise.
-#. It is also easy to forget to use ``rcu_assign_pointer()`` and
- ``rcu_dereference()``, perhaps (incorrectly) substituting a simple
+#. It is also easy to forget to use rcu_assign_pointer() and
+ rcu_dereference(), perhaps (incorrectly) substituting a simple
assignment. To catch this sort of error, a given RCU-protected
pointer may be tagged with ``__rcu``, after which sparse will
complain about simple-assignment accesses to that pointer. Arnd
Bergmann made me aware of this requirement, and also supplied the
needed `patch series <https://lwn.net/Articles/376011/>`__.
#. Kernels built with ``CONFIG_DEBUG_OBJECTS_RCU_HEAD=y`` will splat if
- a data element is passed to ``call_rcu()`` twice in a row, without a
+ a data element is passed to call_rcu() twice in a row, without a
grace period in between. (This error is similar to a double free.)
The corresponding ``rcu_head`` structures that are dynamically
allocated are automatically tracked, but ``rcu_head`` structures
allocated on the stack must be initialized with
- ``init_rcu_head_on_stack()`` and cleaned up with
- ``destroy_rcu_head_on_stack()``. Similarly, statically allocated
+ init_rcu_head_on_stack() and cleaned up with
+ destroy_rcu_head_on_stack(). Similarly, statically allocated
non-stack ``rcu_head`` structures must be initialized with
- ``init_rcu_head()`` and cleaned up with ``destroy_rcu_head()``.
+ init_rcu_head() and cleaned up with destroy_rcu_head().
Mathieu Desnoyers made me aware of this requirement, and also
supplied the needed
- `patch <https://lkml.kernel.org/g/20100319013024.GA28456@Krystal>`__.
+ `patch <https://lore.kernel.org/r/20100319013024.GA28456@Krystal>`__.
#. An infinite loop in an RCU read-side critical section will eventually
trigger an RCU CPU stall warning splat, with the duration of
“eventually†being controlled by the ``RCU_CPU_STALL_TIMEOUT``
@@ -1638,9 +1637,9 @@ against mishaps and misuse:
``rcupdate.rcu_cpu_stall_suppress`` to suppress the splats. This
kernel parameter may also be set via ``sysfs``. Furthermore, RCU CPU
stall warnings are counter-productive during sysrq dumps and during
- panics. RCU therefore supplies the ``rcu_sysrq_start()`` and
- ``rcu_sysrq_end()`` API members to be called before and after long
- sysrq dumps. RCU also supplies the ``rcu_panic()`` notifier that is
+ panics. RCU therefore supplies the rcu_sysrq_start() and
+ rcu_sysrq_end() API members to be called before and after long
+ sysrq dumps. RCU also supplies the rcu_panic() notifier that is
automatically invoked at the beginning of a panic to suppress further
RCU CPU stall warnings.
@@ -1656,7 +1655,7 @@ against mishaps and misuse:
synchronization mechanism, for example, reference counting.
#. In kernels built with ``CONFIG_RCU_TRACE=y``, RCU-related information
is provided via event tracing.
-#. Open-coded use of ``rcu_assign_pointer()`` and ``rcu_dereference()``
+#. Open-coded use of rcu_assign_pointer() and rcu_dereference()
to create typical linked data structures can be surprisingly
error-prone. Therefore, RCU-protected `linked
lists <https://lwn.net/Articles/609973/#RCU%20List%20APIs>`__ and,
@@ -1665,12 +1664,11 @@ against mishaps and misuse:
other special-purpose RCU-protected data structures are available in
the Linux kernel and the userspace RCU library.
#. Some linked structures are created at compile time, but still require
- ``__rcu`` checking. The ``RCU_POINTER_INITIALIZER()`` macro serves
+ ``__rcu`` checking. The RCU_POINTER_INITIALIZER() macro serves
this purpose.
-#. It is not necessary to use ``rcu_assign_pointer()`` when creating
+#. It is not necessary to use rcu_assign_pointer() when creating
linked structures that are to be published via a single external
- pointer. The ``RCU_INIT_POINTER()`` macro is provided for this task
- and also for assigning ``NULL`` pointers at runtime.
+ pointer. The RCU_INIT_POINTER() macro is provided for this task.
This not a hard-and-fast list: RCU's diagnostic capabilities will
continue to be guided by the number and type of usage bugs found in
@@ -1716,7 +1714,7 @@ requires almost all of them be hidden behind a ``CONFIG_RCU_EXPERT``
This all should be quite obvious, but the fact remains that Linus
Torvalds recently had to
-`remind <https://lkml.kernel.org/g/CA+55aFy4wcCwaL4okTs8wXhGZ5h-ibecy_Meg9C4MNQrUnwMcg@mail.gmail.com>`__
+`remind <https://lore.kernel.org/r/CA+55aFy4wcCwaL4okTs8wXhGZ5h-ibecy_Meg9C4MNQrUnwMcg@mail.gmail.com>`__
me of this requirement.
Firmware Interface
@@ -1743,17 +1741,17 @@ Early Boot
~~~~~~~~~~
The Linux kernel's boot sequence is an interesting process, and RCU is
-used early, even before ``rcu_init()`` is invoked. In fact, a number of
+used early, even before rcu_init() is invoked. In fact, a number of
RCU's primitives can be used as soon as the initial task's
``task_struct`` is available and the boot CPU's per-CPU variables are
-set up. The read-side primitives (``rcu_read_lock()``,
-``rcu_read_unlock()``, ``rcu_dereference()``, and
-``rcu_access_pointer()``) will operate normally very early on, as will
-``rcu_assign_pointer()``.
+set up. The read-side primitives (rcu_read_lock(),
+rcu_read_unlock(), rcu_dereference(), and
+rcu_access_pointer()) will operate normally very early on, as will
+rcu_assign_pointer().
-Although ``call_rcu()`` may be invoked at any time during boot,
+Although call_rcu() may be invoked at any time during boot,
callbacks are not guaranteed to be invoked until after all of RCU's
-kthreads have been spawned, which occurs at ``early_initcall()`` time.
+kthreads have been spawned, which occurs at early_initcall() time.
This delay in callback invocation is due to the fact that RCU does not
invoke callbacks until it is fully initialized, and this full
initialization cannot occur until after the scheduler has initialized
@@ -1762,22 +1760,22 @@ it would be possible to invoke callbacks earlier, however, this is not a
panacea because there would be severe restrictions on what operations
those callbacks could invoke.
-Perhaps surprisingly, ``synchronize_rcu()`` and
-``synchronize_rcu_expedited()``, will operate normally during very early
+Perhaps surprisingly, synchronize_rcu() and
+synchronize_rcu_expedited(), will operate normally during very early
boot, the reason being that there is only one CPU and preemption is
-disabled. This means that the call ``synchronize_rcu()`` (or friends)
+disabled. This means that the call synchronize_rcu() (or friends)
itself is a quiescent state and thus a grace period, so the early-boot
implementation can be a no-op.
However, once the scheduler has spawned its first kthread, this early
-boot trick fails for ``synchronize_rcu()`` (as well as for
-``synchronize_rcu_expedited()``) in ``CONFIG_PREEMPT=y`` kernels. The
+boot trick fails for synchronize_rcu() (as well as for
+synchronize_rcu_expedited()) in ``CONFIG_PREEMPTION=y`` kernels. The
reason is that an RCU read-side critical section might be preempted,
-which means that a subsequent ``synchronize_rcu()`` really does have to
+which means that a subsequent synchronize_rcu() really does have to
wait for something, as opposed to simply returning immediately.
-Unfortunately, ``synchronize_rcu()`` can't do this until all of its
+Unfortunately, synchronize_rcu() can't do this until all of its
kthreads are spawned, which doesn't happen until some time during
-``early_initcalls()`` time. But this is no excuse: RCU is nevertheless
+early_initcalls() time. But this is no excuse: RCU is nevertheless
required to correctly handle synchronous grace periods during this time
period. Once all of its kthreads are up and running, RCU starts running
normally.
@@ -1820,7 +1818,7 @@ Interrupts and NMIs
The Linux kernel has interrupts, and RCU read-side critical sections are
legal within interrupt handlers and within interrupt-disabled regions of
-code, as are invocations of ``call_rcu()``.
+code, as are invocations of call_rcu().
Some Linux-kernel architectures can enter an interrupt handler from
non-idle process context, and then just never leave it, instead
@@ -1832,22 +1830,22 @@ way during a rewrite of RCU's dyntick-idle code.
The Linux kernel has non-maskable interrupts (NMIs), and RCU read-side
critical sections are legal within NMI handlers. Thankfully, RCU
-update-side primitives, including ``call_rcu()``, are prohibited within
+update-side primitives, including call_rcu(), are prohibited within
NMI handlers.
The name notwithstanding, some Linux-kernel architectures can have
nested NMIs, which RCU must handle correctly. Andy Lutomirski `surprised
-me <https://lkml.kernel.org/r/CALCETrXLq1y7e_dKFPgou-FKHB6Pu-r8+t-6Ds+8=va7anBWDA@mail.gmail.com>`__
+me <https://lore.kernel.org/r/CALCETrXLq1y7e_dKFPgou-FKHB6Pu-r8+t-6Ds+8=va7anBWDA@mail.gmail.com>`__
with this requirement; he also kindly surprised me with `an
-algorithm <https://lkml.kernel.org/r/CALCETrXSY9JpW3uE6H8WYk81sg56qasA2aqmjMPsq5dOtzso=g@mail.gmail.com>`__
+algorithm <https://lore.kernel.org/r/CALCETrXSY9JpW3uE6H8WYk81sg56qasA2aqmjMPsq5dOtzso=g@mail.gmail.com>`__
that meets this requirement.
Furthermore, NMI handlers can be interrupted by what appear to RCU to be
normal interrupts. One way that this can happen is for code that
-directly invokes ``rcu_irq_enter()`` and ``rcu_irq_exit()`` to be called
+directly invokes rcu_irq_enter() and rcu_irq_exit() to be called
from an NMI handler. This astonishing fact of life prompted the current
-code structure, which has ``rcu_irq_enter()`` invoking
-``rcu_nmi_enter()`` and ``rcu_irq_exit()`` invoking ``rcu_nmi_exit()``.
+code structure, which has rcu_irq_enter() invoking
+rcu_nmi_enter() and rcu_irq_exit() invoking rcu_nmi_exit().
And yes, I also learned of this requirement the hard way.
Loadable Modules
@@ -1857,45 +1855,45 @@ The Linux kernel has loadable modules, and these modules can also be
unloaded. After a given module has been unloaded, any attempt to call
one of its functions results in a segmentation fault. The module-unload
functions must therefore cancel any delayed calls to loadable-module
-functions, for example, any outstanding ``mod_timer()`` must be dealt
-with via ``del_timer_sync()`` or similar.
+functions, for example, any outstanding mod_timer() must be dealt
+with via del_timer_sync() or similar.
Unfortunately, there is no way to cancel an RCU callback; once you
-invoke ``call_rcu()``, the callback function is eventually going to be
+invoke call_rcu(), the callback function is eventually going to be
invoked, unless the system goes down first. Because it is normally
considered socially irresponsible to crash the system in response to a
module unload request, we need some other way to deal with in-flight RCU
callbacks.
-RCU therefore provides ``rcu_barrier()``, which waits until all
+RCU therefore provides rcu_barrier(), which waits until all
in-flight RCU callbacks have been invoked. If a module uses
-``call_rcu()``, its exit function should therefore prevent any future
-invocation of ``call_rcu()``, then invoke ``rcu_barrier()``. In theory,
-the underlying module-unload code could invoke ``rcu_barrier()``
+call_rcu(), its exit function should therefore prevent any future
+invocation of call_rcu(), then invoke rcu_barrier(). In theory,
+the underlying module-unload code could invoke rcu_barrier()
unconditionally, but in practice this would incur unacceptable
latencies.
Nikita Danilov noted this requirement for an analogous
filesystem-unmount situation, and Dipankar Sarma incorporated
-``rcu_barrier()`` into RCU. The need for ``rcu_barrier()`` for module
+rcu_barrier() into RCU. The need for rcu_barrier() for module
unloading became apparent later.
.. important::
- The ``rcu_barrier()`` function is not, repeat,
+ The rcu_barrier() function is not, repeat,
*not*, obligated to wait for a grace period. It is instead only required
to wait for RCU callbacks that have already been posted. Therefore, if
there are no RCU callbacks posted anywhere in the system,
- ``rcu_barrier()`` is within its rights to return immediately. Even if
- there are callbacks posted, ``rcu_barrier()`` does not necessarily need
+ rcu_barrier() is within its rights to return immediately. Even if
+ there are callbacks posted, rcu_barrier() does not necessarily need
to wait for a grace period.
+-----------------------------------------------------------------------+
| **Quick Quiz**: |
+-----------------------------------------------------------------------+
| Wait a minute! Each RCU callbacks must wait for a grace period to |
-| complete, and ``rcu_barrier()`` must wait for each pre-existing |
-| callback to be invoked. Doesn't ``rcu_barrier()`` therefore need to |
+| complete, and rcu_barrier() must wait for each pre-existing |
+| callback to be invoked. Doesn't rcu_barrier() therefore need to |
| wait for a full grace period if there is even one callback posted |
| anywhere in the system? |
+-----------------------------------------------------------------------+
@@ -1904,14 +1902,14 @@ unloading became apparent later.
| Absolutely not!!! |
| Yes, each RCU callbacks must wait for a grace period to complete, but |
| it might well be partly (or even completely) finished waiting by the |
-| time ``rcu_barrier()`` is invoked. In that case, ``rcu_barrier()`` |
+| time rcu_barrier() is invoked. In that case, rcu_barrier() |
| need only wait for the remaining portion of the grace period to |
| elapse. So even if there are quite a few callbacks posted, |
-| ``rcu_barrier()`` might well return quite quickly. |
+| rcu_barrier() might well return quite quickly. |
| |
| So if you need to wait for a grace period as well as for all |
| pre-existing callbacks, you will need to invoke both |
-| ``synchronize_rcu()`` and ``rcu_barrier()``. If latency is a concern, |
+| synchronize_rcu() and rcu_barrier(). If latency is a concern, |
| you can always use workqueues to invoke them concurrently. |
+-----------------------------------------------------------------------+
@@ -1929,18 +1927,18 @@ The Linux-kernel CPU-hotplug implementation has notifiers that are used
to allow the various kernel subsystems (including RCU) to respond
appropriately to a given CPU-hotplug operation. Most RCU operations may
be invoked from CPU-hotplug notifiers, including even synchronous
-grace-period operations such as (``synchronize_rcu()`` and
-``synchronize_rcu_expedited()``). However, these synchronous operations
+grace-period operations such as (synchronize_rcu() and
+synchronize_rcu_expedited()). However, these synchronous operations
do block and therefore cannot be invoked from notifiers that execute via
-``stop_machine()``, specifically those between the ``CPUHP_AP_OFFLINE``
+stop_machine(), specifically those between the ``CPUHP_AP_OFFLINE``
and ``CPUHP_AP_ONLINE`` states.
-In addition, all-callback-wait operations such as ``rcu_barrier()`` may
+In addition, all-callback-wait operations such as rcu_barrier() may
not be invoked from any CPU-hotplug notifier. This restriction is due
to the fact that there are phases of CPU-hotplug operations where the
outgoing CPU's callbacks will not be invoked until after the CPU-hotplug
operation ends, which could also result in deadlock. Furthermore,
-``rcu_barrier()`` blocks CPU-hotplug operations during its execution,
+rcu_barrier() blocks CPU-hotplug operations during its execution,
which results in another type of deadlock when invoked from a CPU-hotplug
notifier.
@@ -1955,12 +1953,12 @@ if offline CPUs block an RCU grace period for too long.
An offline CPU's quiescent state will be reported either:
-1. As the CPU goes offline using RCU's hotplug notifier (``rcu_report_dead()``).
-2. When grace period initialization (``rcu_gp_init()``) detects a
+1. As the CPU goes offline using RCU's hotplug notifier (rcu_report_dead()).
+2. When grace period initialization (rcu_gp_init()) detects a
race either with CPU offlining or with a task unblocking on a leaf
``rcu_node`` structure whose CPUs are all offline.
-The CPU-online path (``rcu_cpu_starting()``) should never need to report
+The CPU-online path (rcu_cpu_starting()) should never need to report
a quiescent state for an offline CPU. However, as a debugging measure,
it does emit a warning if a quiescent state was not already reported
for that CPU.
@@ -1984,11 +1982,11 @@ room for further improvement.
There is no longer any prohibition against holding any of
scheduler's runqueue or priority-inheritance spinlocks across an
-``rcu_read_unlock()``, even if interrupts and preemption were enabled
+rcu_read_unlock(), even if interrupts and preemption were enabled
somewhere within the corresponding RCU read-side critical section.
-Therefore, it is now perfectly legal to execute ``rcu_read_lock()``
+Therefore, it is now perfectly legal to execute rcu_read_lock()
with preemption enabled, acquire one of the scheduler locks, and hold
-that lock across the matching ``rcu_read_unlock()``.
+that lock across the matching rcu_read_unlock().
Similarly, the RCU flavor consolidation has removed the need for negative
nesting. The fact that interrupt-disabled regions of code act as RCU
@@ -1999,7 +1997,7 @@ Tracing and RCU
~~~~~~~~~~~~~~~
It is possible to use tracing on RCU code, but tracing itself uses RCU.
-For this reason, ``rcu_dereference_raw_check()`` is provided for use
+For this reason, rcu_dereference_raw_check() is provided for use
by tracing, which avoids the destructive recursion that could otherwise
ensue. This API is also used by virtualization in some architectures,
where RCU readers execute in environments in which tracing cannot be
@@ -2010,12 +2008,12 @@ Accesses to User Memory and RCU
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The kernel needs to access user-space memory, for example, to access data
-referenced by system-call parameters. The ``get_user()`` macro does this job.
+referenced by system-call parameters. The get_user() macro does this job.
However, user-space memory might well be paged out, which means that
-``get_user()`` might well page-fault and thus block while waiting for the
+get_user() might well page-fault and thus block while waiting for the
resulting I/O to complete. It would be a very bad thing for the compiler to
-reorder a ``get_user()`` invocation into an RCU read-side critical section.
+reorder a get_user() invocation into an RCU read-side critical section.
For example, suppose that the source code looked like this:
@@ -2040,23 +2038,23 @@ the following:
5 rcu_read_unlock();
6 do_something_with(v, user_v);
-If the compiler did make this transformation in a ``CONFIG_PREEMPT=n`` kernel
-build, and if ``get_user()`` did page fault, the result would be a quiescent
+If the compiler did make this transformation in a ``CONFIG_PREEMPTION=n`` kernel
+build, and if get_user() did page fault, the result would be a quiescent
state in the middle of an RCU read-side critical section. This misplaced
quiescent state could result in line 4 being a use-after-free access,
which could be bad for your kernel's actuarial statistics. Similar examples
-can be constructed with the call to ``get_user()`` preceding the
-``rcu_read_lock()``.
+can be constructed with the call to get_user() preceding the
+rcu_read_lock().
-Unfortunately, ``get_user()`` doesn't have any particular ordering properties,
+Unfortunately, get_user() doesn't have any particular ordering properties,
and in some architectures the underlying ``asm`` isn't even marked
``volatile``. And even if it was marked ``volatile``, the above access to
``p->value`` is not volatile, so the compiler would not have any reason to keep
those two accesses in order.
-Therefore, the Linux-kernel definitions of ``rcu_read_lock()`` and
-``rcu_read_unlock()`` must act as compiler barriers, at least for outermost
-instances of ``rcu_read_lock()`` and ``rcu_read_unlock()`` within a nested set
+Therefore, the Linux-kernel definitions of rcu_read_lock() and
+rcu_read_unlock() must act as compiler barriers, at least for outermost
+instances of rcu_read_lock() and rcu_read_unlock() within a nested set
of RCU read-side critical sections.
Energy Efficiency
@@ -2071,26 +2069,26 @@ call.
Because RCU avoids interrupting idle CPUs, it is illegal to execute an
RCU read-side critical section on an idle CPU. (Kernels built with
-``CONFIG_PROVE_RCU=y`` will splat if you try it.) The ``RCU_NONIDLE()``
+``CONFIG_PROVE_RCU=y`` will splat if you try it.) The RCU_NONIDLE()
macro and ``_rcuidle`` event tracing is provided to work around this
-restriction. In addition, ``rcu_is_watching()`` may be used to test
+restriction. In addition, rcu_is_watching() may be used to test
whether or not it is currently legal to run RCU read-side critical
sections on this CPU. I learned of the need for diagnostics on the one
-hand and ``RCU_NONIDLE()`` on the other while inspecting idle-loop code.
+hand and RCU_NONIDLE() on the other while inspecting idle-loop code.
Steven Rostedt supplied ``_rcuidle`` event tracing, which is used quite
heavily in the idle loop. However, there are some restrictions on the
-code placed within ``RCU_NONIDLE()``:
+code placed within RCU_NONIDLE():
#. Blocking is prohibited. In practice, this is not a serious
restriction given that idle tasks are prohibited from blocking to
begin with.
-#. Although nesting ``RCU_NONIDLE()`` is permitted, they cannot nest
+#. Although nesting RCU_NONIDLE() is permitted, they cannot nest
indefinitely deeply. However, given that they can be nested on the
order of a million deep, even on 32-bit systems, this should not be a
serious restriction. This nesting limit would probably be reached
long after the compiler OOMed or the stack overflowed.
-#. Any code path that enters ``RCU_NONIDLE()`` must sequence out of that
- same ``RCU_NONIDLE()``. For example, the following is grossly
+#. Any code path that enters RCU_NONIDLE() must sequence out of that
+ same RCU_NONIDLE(). For example, the following is grossly
illegal:
::
@@ -2103,7 +2101,7 @@ code placed within ``RCU_NONIDLE()``:
It is just as illegal to transfer control into the middle of
- ``RCU_NONIDLE()``'s argument. Yes, in theory, you could transfer in
+ RCU_NONIDLE()'s argument. Yes, in theory, you could transfer in
as long as you also transferred out, but in practice you could also
expect to get sharply worded review comments.
@@ -2195,9 +2193,9 @@ scheduling-clock interrupt be enabled when RCU needs it to be:
sections, and RCU believes this CPU to be idle, no problem. This
sort of thing is used by some architectures for light-weight
exception handlers, which can then avoid the overhead of
- ``rcu_irq_enter()`` and ``rcu_irq_exit()`` at exception entry and
+ rcu_irq_enter() and rcu_irq_exit() at exception entry and
exit, respectively. Some go further and avoid the entireties of
- ``irq_enter()`` and ``irq_exit()``.
+ irq_enter() and irq_exit().
Just make very sure you are running some of your tests with
``CONFIG_PROVE_RCU=y``, just in case one of your code paths was in
fact joking about not doing RCU read-side critical sections.
@@ -2221,7 +2219,7 @@ scheduling-clock interrupt be enabled when RCU needs it to be:
| **Quick Quiz**: |
+-----------------------------------------------------------------------+
| But what if my driver has a hardware interrupt handler that can run |
-| for many seconds? I cannot invoke ``schedule()`` from an hardware |
+| for many seconds? I cannot invoke schedule() from an hardware |
| interrupt handler, after all! |
+-----------------------------------------------------------------------+
| **Answer**: |
@@ -2243,8 +2241,8 @@ Memory Efficiency
Although small-memory non-realtime systems can simply use Tiny RCU, code
size is only one aspect of memory efficiency. Another aspect is the size
-of the ``rcu_head`` structure used by ``call_rcu()`` and
-``kfree_rcu()``. Although this structure contains nothing more than a
+of the ``rcu_head`` structure used by call_rcu() and
+kfree_rcu(). Although this structure contains nothing more than a
pair of pointers, it does appear in many RCU-protected data structures,
including some that are size critical. The ``page`` structure is a case
in point, as evidenced by the many occurrences of the ``union`` keyword
@@ -2254,7 +2252,7 @@ This need for memory efficiency is one reason that RCU uses hand-crafted
singly linked lists to track the ``rcu_head`` structures that are
waiting for a grace period to elapse. It is also the reason why
``rcu_head`` structures do not contain debug information, such as fields
-tracking the file and line of the ``call_rcu()`` or ``kfree_rcu()`` that
+tracking the file and line of the call_rcu() or kfree_rcu() that
posted them. Although this information might appear in debug-only kernel
builds at some point, in the meantime, the ``->func`` field will often
provide the needed debug information.
@@ -2264,18 +2262,18 @@ more extreme measures. Returning to the ``page`` structure, the
``rcu_head`` field shares storage with a great many other structures
that are used at various points in the corresponding page's lifetime. In
order to correctly resolve certain `race
-conditions <https://lkml.kernel.org/g/1439976106-137226-1-git-send-email-kirill.shutemov@linux.intel.com>`__,
+conditions <https://lore.kernel.org/r/1439976106-137226-1-git-send-email-kirill.shutemov@linux.intel.com>`__,
the Linux kernel's memory-management subsystem needs a particular bit to
remain zero during all phases of grace-period processing, and that bit
happens to map to the bottom bit of the ``rcu_head`` structure's
-``->next`` field. RCU makes this guarantee as long as ``call_rcu()`` is
-used to post the callback, as opposed to ``kfree_rcu()`` or some future
-“lazy†variant of ``call_rcu()`` that might one day be created for
+``->next`` field. RCU makes this guarantee as long as call_rcu() is
+used to post the callback, as opposed to kfree_rcu() or some future
+“lazy†variant of call_rcu() that might one day be created for
energy-efficiency purposes.
That said, there are limits. RCU requires that the ``rcu_head``
structure be aligned to a two-byte boundary, and passing a misaligned
-``rcu_head`` structure to one of the ``call_rcu()`` family of functions
+``rcu_head`` structure to one of the call_rcu() family of functions
will result in a splat. It is therefore necessary to exercise caution
when packing structures containing fields of type ``rcu_head``. Why not
a four-byte or even eight-byte alignment requirement? Because the m68k
@@ -2299,7 +2297,7 @@ hot code paths in performance-critical portions of the Linux kernel's
networking, security, virtualization, and scheduling code paths. RCU
must therefore use efficient implementations, especially in its
read-side primitives. To that end, it would be good if preemptible RCU's
-implementation of ``rcu_read_lock()`` could be inlined, however, doing
+implementation of rcu_read_lock() could be inlined, however, doing
this requires resolving ``#include`` issues with the ``task_struct``
structure.
@@ -2312,23 +2310,23 @@ on the ``rcu_node`` structure. RCU is required to tolerate all CPUs
continuously invoking any combination of RCU's runtime primitives with
minimal per-operation overhead. In fact, in many cases, increasing load
must *decrease* the per-operation overhead, witness the batching
-optimizations for ``synchronize_rcu()``, ``call_rcu()``,
-``synchronize_rcu_expedited()``, and ``rcu_barrier()``. As a general
+optimizations for synchronize_rcu(), call_rcu(),
+synchronize_rcu_expedited(), and rcu_barrier(). As a general
rule, RCU must cheerfully accept whatever the rest of the Linux kernel
decides to throw at it.
The Linux kernel is used for real-time workloads, especially in
conjunction with the `-rt
-patchset <https://rt.wiki.kernel.org/index.php/Main_Page>`__. The
+patchset <https://wiki.linuxfoundation.org/realtime/>`__. The
real-time-latency response requirements are such that the traditional
approach of disabling preemption across RCU read-side critical sections
-is inappropriate. Kernels built with ``CONFIG_PREEMPT=y`` therefore use
+is inappropriate. Kernels built with ``CONFIG_PREEMPTION=y`` therefore use
an RCU implementation that allows RCU read-side critical sections to be
preempted. This requirement made its presence known after users made it
clear that an earlier `real-time
patch <https://lwn.net/Articles/107930/>`__ did not meet their needs, in
conjunction with some `RCU
-issues <https://lkml.kernel.org/g/20050318002026.GA2693@us.ibm.com>`__
+issues <https://lore.kernel.org/r/20050318002026.GA2693@us.ibm.com>`__
encountered by a very early version of the -rt patchset.
In addition, RCU must make do with a sub-100-microsecond real-time
@@ -2346,7 +2344,7 @@ number of race conditions.
RCU must avoid degrading real-time response for CPU-bound threads,
whether executing in usermode (which is one use case for
``CONFIG_NO_HZ_FULL=y``) or in the kernel. That said, CPU-bound loops in
-the kernel must execute ``cond_resched()`` at least once per few tens of
+the kernel must execute cond_resched() at least once per few tens of
milliseconds in order to avoid receiving an IPI from RCU.
Finally, RCU's status as a synchronization primitive means that any RCU
@@ -2412,7 +2410,7 @@ grace periods from ever ending. The result was an out-of-memory
condition and a system hang.
The solution was the creation of RCU-bh, which does
-``local_bh_disable()`` across its read-side critical sections, and which
+local_bh_disable() across its read-side critical sections, and which
uses the transition from one type of softirq processing to another as a
quiescent state in addition to context switch, idle, user mode, and
offline. This means that RCU-bh grace periods can complete even when
@@ -2420,31 +2418,31 @@ some of the CPUs execute in softirq indefinitely, thus allowing
algorithms based on RCU-bh to withstand network-based denial-of-service
attacks.
-Because ``rcu_read_lock_bh()`` and ``rcu_read_unlock_bh()`` disable and
+Because rcu_read_lock_bh() and rcu_read_unlock_bh() disable and
re-enable softirq handlers, any attempt to start a softirq handlers
during the RCU-bh read-side critical section will be deferred. In this
-case, ``rcu_read_unlock_bh()`` will invoke softirq processing, which can
+case, rcu_read_unlock_bh() will invoke softirq processing, which can
take considerable time. One can of course argue that this softirq
overhead should be associated with the code following the RCU-bh
-read-side critical section rather than ``rcu_read_unlock_bh()``, but the
+read-side critical section rather than rcu_read_unlock_bh(), but the
fact is that most profiling tools cannot be expected to make this sort
of fine distinction. For example, suppose that a three-millisecond-long
RCU-bh read-side critical section executes during a time of heavy
networking load. There will very likely be an attempt to invoke at least
one softirq handler during that three milliseconds, but any such
invocation will be delayed until the time of the
-``rcu_read_unlock_bh()``. This can of course make it appear at first
-glance as if ``rcu_read_unlock_bh()`` was executing very slowly.
+rcu_read_unlock_bh(). This can of course make it appear at first
+glance as if rcu_read_unlock_bh() was executing very slowly.
The `RCU-bh
API <https://lwn.net/Articles/609973/#RCU%20Per-Flavor%20API%20Table>`__
-includes ``rcu_read_lock_bh()``, ``rcu_read_unlock_bh()``,
-``rcu_dereference_bh()``, ``rcu_dereference_bh_check()``,
-``synchronize_rcu_bh()``, ``synchronize_rcu_bh_expedited()``,
-``call_rcu_bh()``, ``rcu_barrier_bh()``, and
-``rcu_read_lock_bh_held()``. However, the update-side APIs are now
-simple wrappers for other RCU flavors, namely RCU-sched in
-CONFIG_PREEMPT=n kernels and RCU-preempt otherwise.
+includes rcu_read_lock_bh(), rcu_read_unlock_bh(), rcu_dereference_bh(),
+rcu_dereference_bh_check(), and rcu_read_lock_bh_held(). However, the
+old RCU-bh update-side APIs are now gone, replaced by synchronize_rcu(),
+synchronize_rcu_expedited(), call_rcu(), and rcu_barrier(). In addition,
+anything that disables bottom halves also marks an RCU-bh read-side
+critical section, including local_bh_disable() and local_bh_enable(),
+local_irq_save() and local_irq_restore(), and so on.
Sched Flavor (Historical)
~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -2462,32 +2460,32 @@ not have this property, given that any point in the code outside of an
RCU read-side critical section can be a quiescent state. Therefore,
*RCU-sched* was created, which follows “classic†RCU in that an
RCU-sched grace period waits for pre-existing interrupt and NMI
-handlers. In kernels built with ``CONFIG_PREEMPT=n``, the RCU and
+handlers. In kernels built with ``CONFIG_PREEMPTION=n``, the RCU and
RCU-sched APIs have identical implementations, while kernels built with
-``CONFIG_PREEMPT=y`` provide a separate implementation for each.
+``CONFIG_PREEMPTION=y`` provide a separate implementation for each.
-Note well that in ``CONFIG_PREEMPT=y`` kernels,
-``rcu_read_lock_sched()`` and ``rcu_read_unlock_sched()`` disable and
+Note well that in ``CONFIG_PREEMPTION=y`` kernels,
+rcu_read_lock_sched() and rcu_read_unlock_sched() disable and
re-enable preemption, respectively. This means that if there was a
preemption attempt during the RCU-sched read-side critical section,
-``rcu_read_unlock_sched()`` will enter the scheduler, with all the
-latency and overhead entailed. Just as with ``rcu_read_unlock_bh()``,
-this can make it look as if ``rcu_read_unlock_sched()`` was executing
+rcu_read_unlock_sched() will enter the scheduler, with all the
+latency and overhead entailed. Just as with rcu_read_unlock_bh(),
+this can make it look as if rcu_read_unlock_sched() was executing
very slowly. However, the highest-priority task won't be preempted, so
-that task will enjoy low-overhead ``rcu_read_unlock_sched()``
+that task will enjoy low-overhead rcu_read_unlock_sched()
invocations.
The `RCU-sched
API <https://lwn.net/Articles/609973/#RCU%20Per-Flavor%20API%20Table>`__
-includes ``rcu_read_lock_sched()``, ``rcu_read_unlock_sched()``,
-``rcu_read_lock_sched_notrace()``, ``rcu_read_unlock_sched_notrace()``,
-``rcu_dereference_sched()``, ``rcu_dereference_sched_check()``,
-``synchronize_sched()``, ``synchronize_rcu_sched_expedited()``,
-``call_rcu_sched()``, ``rcu_barrier_sched()``, and
-``rcu_read_lock_sched_held()``. However, anything that disables
-preemption also marks an RCU-sched read-side critical section, including
-``preempt_disable()`` and ``preempt_enable()``, ``local_irq_save()`` and
-``local_irq_restore()``, and so on.
+includes rcu_read_lock_sched(), rcu_read_unlock_sched(),
+rcu_read_lock_sched_notrace(), rcu_read_unlock_sched_notrace(),
+rcu_dereference_sched(), rcu_dereference_sched_check(), and
+rcu_read_lock_sched_held(). However, the old RCU-sched update-side APIs
+are now gone, replaced by synchronize_rcu(), synchronize_rcu_expedited(),
+call_rcu(), and rcu_barrier(). In addition, anything that disables
+preemption also marks an RCU-sched read-side critical section,
+including preempt_disable() and preempt_enable(), local_irq_save()
+and local_irq_restore(), and so on.
Sleepable RCU
~~~~~~~~~~~~~
@@ -2509,7 +2507,7 @@ this structure must be passed in to each SRCU function, for example,
structure. The key benefit of these domains is that a slow SRCU reader
in one domain does not delay an SRCU grace period in some other domain.
That said, one consequence of these domains is that read-side code must
-pass a “cookie†from ``srcu_read_lock()`` to ``srcu_read_unlock()``, for
+pass a “cookie†from srcu_read_lock() to srcu_read_unlock(), for
example, as follows:
::
@@ -2539,24 +2537,24 @@ period to elapse. For example, this results in a self-deadlock:
6 srcu_read_unlock(&ss, idx);
However, if line 5 acquired a mutex that was held across a
-``synchronize_srcu()`` for domain ``ss``, deadlock would still be
+synchronize_srcu() for domain ``ss``, deadlock would still be
possible. Furthermore, if line 5 acquired a mutex that was held across a
-``synchronize_srcu()`` for some other domain ``ss1``, and if an
+synchronize_srcu() for some other domain ``ss1``, and if an
``ss1``-domain SRCU read-side critical section acquired another mutex
-that was held across as ``ss``-domain ``synchronize_srcu()``, deadlock
+that was held across as ``ss``-domain synchronize_srcu(), deadlock
would again be possible. Such a deadlock cycle could extend across an
arbitrarily large number of different SRCU domains. Again, with great
power comes great responsibility.
Unlike the other RCU flavors, SRCU read-side critical sections can run
on idle and even offline CPUs. This ability requires that
-``srcu_read_lock()`` and ``srcu_read_unlock()`` contain memory barriers,
+srcu_read_lock() and srcu_read_unlock() contain memory barriers,
which means that SRCU readers will run a bit slower than would RCU
-readers. It also motivates the ``smp_mb__after_srcu_read_unlock()`` API,
-which, in combination with ``srcu_read_unlock()``, guarantees a full
+readers. It also motivates the smp_mb__after_srcu_read_unlock() API,
+which, in combination with srcu_read_unlock(), guarantees a full
memory barrier.
-Also unlike other RCU flavors, ``synchronize_srcu()`` may **not** be
+Also unlike other RCU flavors, synchronize_srcu() may **not** be
invoked from CPU-hotplug notifiers, due to the fact that SRCU grace
periods make use of timers and the possibility of timers being
temporarily “stranded†on the outgoing CPU. This stranding of timers
@@ -2565,7 +2563,7 @@ the CPU-hotplug process. The problem is that if a notifier is waiting on
an SRCU grace period, that grace period is waiting on a timer, and that
timer is stranded on the outgoing CPU, then the notifier will never be
awakened, in other words, deadlock has occurred. This same situation of
-course also prohibits ``srcu_barrier()`` from being invoked from
+course also prohibits srcu_barrier() from being invoked from
CPU-hotplug notifiers.
SRCU also differs from other RCU flavors in that SRCU's expedited and
@@ -2576,12 +2574,12 @@ have not yet completed. (But please note that this is a property of the
current implementation, not necessarily of future implementations.) In
addition, if SRCU has been idle for longer than the interval specified
by the ``srcutree.exp_holdoff`` kernel boot parameter (25 microseconds
-by default), and if a ``synchronize_srcu()`` invocation ends this idle
+by default), and if a synchronize_srcu() invocation ends this idle
period, that invocation will be automatically expedited.
As of v4.12, SRCU's callbacks are maintained per-CPU, eliminating a
locking bottleneck present in prior kernel versions. Although this will
-allow users to put much heavier stress on ``call_srcu()``, it is
+allow users to put much heavier stress on call_srcu(), it is
important to note that SRCU does not yet take any special steps to deal
with callback flooding. So if you are posting (say) 10,000 SRCU
callbacks per second per CPU, you are probably totally OK, but if you
@@ -2592,14 +2590,32 @@ of your CPUs and the size of your memory.
The `SRCU
API <https://lwn.net/Articles/609973/#RCU%20Per-Flavor%20API%20Table>`__
-includes ``srcu_read_lock()``, ``srcu_read_unlock()``,
-``srcu_dereference()``, ``srcu_dereference_check()``,
-``synchronize_srcu()``, ``synchronize_srcu_expedited()``,
-``call_srcu()``, ``srcu_barrier()``, and ``srcu_read_lock_held()``. It
-also includes ``DEFINE_SRCU()``, ``DEFINE_STATIC_SRCU()``, and
-``init_srcu_struct()`` APIs for defining and initializing
+includes srcu_read_lock(), srcu_read_unlock(),
+srcu_dereference(), srcu_dereference_check(),
+synchronize_srcu(), synchronize_srcu_expedited(),
+call_srcu(), srcu_barrier(), and srcu_read_lock_held(). It
+also includes DEFINE_SRCU(), DEFINE_STATIC_SRCU(), and
+init_srcu_struct() APIs for defining and initializing
``srcu_struct`` structures.
+More recently, the SRCU API has added polling interfaces:
+
+#. start_poll_synchronize_srcu() returns a cookie identifying
+ the completion of a future SRCU grace period and ensures
+ that this grace period will be started.
+#. poll_state_synchronize_srcu() returns ``true`` iff the
+ specified cookie corresponds to an already-completed
+ SRCU grace period.
+#. get_state_synchronize_srcu() returns a cookie just like
+ start_poll_synchronize_srcu() does, but differs in that
+ it does nothing to ensure that any future SRCU grace period
+ will be started.
+
+These functions are used to avoid unnecessary SRCU grace periods in
+certain types of buffer-cache algorithms having multi-stage age-out
+mechanisms. The idea is that by the time the block has aged completely
+from the cache, an SRCU grace period will be very likely to have elapsed.
+
Tasks RCU
~~~~~~~~~
@@ -2608,11 +2624,11 @@ required to install different types of probes. It would be good to be
able to free old trampolines, which sounds like a job for some form of
RCU. However, because it is necessary to be able to install a trace
anywhere in the code, it is not possible to use read-side markers such
-as ``rcu_read_lock()`` and ``rcu_read_unlock()``. In addition, it does
+as rcu_read_lock() and rcu_read_unlock(). In addition, it does
not work to have these markers in the trampoline itself, because there
-would need to be instructions following ``rcu_read_unlock()``. Although
-``synchronize_rcu()`` would guarantee that execution reached the
-``rcu_read_unlock()``, it would not be able to guarantee that execution
+would need to be instructions following rcu_read_unlock(). Although
+synchronize_rcu() would guarantee that execution reached the
+rcu_read_unlock(), it would not be able to guarantee that execution
had completely left the trampoline. Worse yet, in some situations
the trampoline's protection must extend a few instructions *prior* to
execution reaching the trampoline. For example, these few instructions
@@ -2623,16 +2639,16 @@ actually reached the trampoline itself.
The solution, in the form of `Tasks
RCU <https://lwn.net/Articles/607117/>`__, is to have implicit read-side
critical sections that are delimited by voluntary context switches, that
-is, calls to ``schedule()``, ``cond_resched()``, and
-``synchronize_rcu_tasks()``. In addition, transitions to and from
+is, calls to schedule(), cond_resched(), and
+synchronize_rcu_tasks(). In addition, transitions to and from
userspace execution also delimit tasks-RCU read-side critical sections.
The tasks-RCU API is quite compact, consisting only of
-``call_rcu_tasks()``, ``synchronize_rcu_tasks()``, and
-``rcu_barrier_tasks()``. In ``CONFIG_PREEMPT=n`` kernels, trampolines
-cannot be preempted, so these APIs map to ``call_rcu()``,
-``synchronize_rcu()``, and ``rcu_barrier()``, respectively. In
-``CONFIG_PREEMPT=y`` kernels, trampolines can be preempted, and these
+call_rcu_tasks(), synchronize_rcu_tasks(), and
+rcu_barrier_tasks(). In ``CONFIG_PREEMPTION=n`` kernels, trampolines
+cannot be preempted, so these APIs map to call_rcu(),
+synchronize_rcu(), and rcu_barrier(), respectively. In
+``CONFIG_PREEMPTION=y`` kernels, trampolines can be preempted, and these
three APIs are therefore implemented by separate functions that check
for voluntary context switches.
@@ -2646,8 +2662,8 @@ grace-period state machine so as to avoid the need for the additional
latency.
RCU disables CPU hotplug in a few places, perhaps most notably in the
-``rcu_barrier()`` operations. If there is a strong reason to use
-``rcu_barrier()`` in CPU-hotplug notifiers, it will be necessary to
+rcu_barrier() operations. If there is a strong reason to use
+rcu_barrier() in CPU-hotplug notifiers, it will be necessary to
avoid disabling CPU hotplug. This would introduce some complexity, so
there had better be a *very* good reason.
@@ -2664,7 +2680,7 @@ However, this combining tree does not spread its memory across NUMA
nodes nor does it align the CPU groups with hardware features such as
sockets or cores. Such spreading and alignment is currently believed to
be unnecessary because the hotpath read-side primitives do not access
-the combining tree, nor does ``call_rcu()`` in the common case. If you
+the combining tree, nor does call_rcu() in the common case. If you
believe that your architecture needs such spreading and alignment, then
your architecture should also benefit from the
``rcutree.rcu_fanout_leaf`` boot parameter, which can be set to the
@@ -2685,7 +2701,7 @@ likely that adjustments will be required to more gracefully handle
extreme loads. It might also be necessary to be able to relate CPU
utilization by RCU's kthreads and softirq handlers to the code that
instigated this CPU utilization. For example, RCU callback overhead
-might be charged back to the originating ``call_rcu()`` instance, though
+might be charged back to the originating call_rcu() instance, though
probably not in production kernels.
Additional work may be required to provide reasonable forward-progress
diff --git a/Documentation/RCU/NMI-RCU.rst b/Documentation/RCU/NMI-RCU.rst
index 180958388ff9..2a92bc685ef1 100644
--- a/Documentation/RCU/NMI-RCU.rst
+++ b/Documentation/RCU/NMI-RCU.rst
@@ -8,8 +8,7 @@ Although RCU is usually used to protect read-mostly data structures,
it is possible to use RCU to provide dynamic non-maskable interrupt
handlers, as well as dynamic irq handlers. This document describes
how to do this, drawing loosely from Zwane Mwaikambo's NMI-timer
-work in "arch/x86/oprofile/nmi_timer_int.c" and in
-"arch/x86/kernel/traps.c".
+work in "arch/x86/kernel/traps.c".
The relevant pieces of code are listed below, each followed by a
brief explanation::
diff --git a/Documentation/RCU/RTFP.txt b/Documentation/RCU/RTFP.txt
index 9bccf16736f7..3b0876c77355 100644
--- a/Documentation/RCU/RTFP.txt
+++ b/Documentation/RCU/RTFP.txt
@@ -683,7 +683,7 @@ Orran Krieger and Rusty Russell and Dipankar Sarma and Maneesh Soni"
,month="October"
,year="2001"
,note="Available:
-\url{http://lkml.org/lkml/2001/10/13/105}
+\url{https://lore.kernel.org/r/Pine.LNX.4.33.0110131015410.8707-100000@penguin.transmeta.com}
[Viewed August 21, 2004]"
,annotation={
}
@@ -826,7 +826,7 @@ Symposium on Distributed Computing}
,month="October"
,year="2002"
,note="Available:
-\url{https://lkml.org/lkml/2002/10/24/262}
+\url{https://lore.kernel.org/r/3DB86B05.447E7410@us.ibm.com}
[Viewed February 15, 2014]"
,annotation={
Mingming Cao's patch to introduce RCU to SysV IPC.
@@ -839,7 +839,7 @@ Symposium on Distributed Computing}
,month="March"
,year="2003"
,note="Available:
-\url{http://lkml.org/lkml/2003/3/9/205}
+\url{https://lore.kernel.org/r/Pine.LNX.4.44.0303091831560.2129-100000@home.transmeta.com}
[Viewed March 13, 2006]"
,annotation={
Linus suggests replacing brlock with RCU and/or seqlocks:
@@ -1036,15 +1036,15 @@ Add per-cpu batch counter"
,annotation={
RCU runs reasonably on a 512-CPU SGI using Manfred Spraul's patches,
which may be found at:
- https://lkml.org/lkml/2004/5/20/49 (split vars into cachelines)
- https://lkml.org/lkml/2004/5/22/114 (cpu_quiet() patch)
- https://lkml.org/lkml/2004/5/25/24 (0/5)
- https://lkml.org/lkml/2004/5/25/23 (1/5)
- https://lkml.org/lkml/2004/5/25/265 (works for Jack)
- https://lkml.org/lkml/2004/5/25/20 (2/5)
- https://lkml.org/lkml/2004/5/25/22 (3/5)
- https://lkml.org/lkml/2004/5/25/19 (4/5)
- https://lkml.org/lkml/2004/5/25/21 (5/5)
+ https://lore.kernel.org/r/40AC9823.6020709@colorfullife.com (split vars into cachelines)
+ https://lore.kernel.org/r/Pine.LNX.4.44.0405222141260.11106-100000@dbl.q-ag.de (cpu_quiet() patch)
+ https://lore.kernel.org/r/200405250535.i4P5ZJo8017583@dbl.q-ag.de (0/5)
+ https://lore.kernel.org/r/200405250535.i4P5ZKAQ017591@dbl.q-ag.de (1/5)
+ https://lore.kernel.org/r/20040525203215.GB5127@sgi.com (works for Jack)
+ https://lore.kernel.org/r/200405250535.i4P5ZLiR017599@dbl.q-ag.de (2/5)
+ https://lore.kernel.org/r/200405250535.i4P5ZMFt017607@dbl.q-ag.de (3/5)
+ https://lore.kernel.org/r/200405250535.i4P5ZN6g017615@dbl.q-ag.de (4/5)
+ https://lore.kernel.org/r/200405250535.i4P5ZO7I017623@dbl.q-ag.de (5/5)
}
}
@@ -1106,7 +1106,7 @@ Oregon Health and Sciences University"
,month="August"
,year="2004"
,note="Available:
-\url{http://lkml.org/lkml/2004/8/6/237}
+\url{https://lore.kernel.org/r/20040807192424.GF3936@in.ibm.com}
[Viewed June 8, 2010]"
,annotation={
Introduce rcu_dereference().
@@ -1119,7 +1119,7 @@ Oregon Health and Sciences University"
,month="August"
,year="2004"
,note="Available:
-\url{http://lkml.org/lkml/2004/8/30/87}
+\url{https://lore.kernel.org/r/1093873222.984.12.camel@new.localdomain}
[Viewed February 17, 2005]"
,annotation={
Uses active code in rcu_read_lock() and rcu_read_unlock() to
@@ -1186,7 +1186,7 @@ Oregon Health and Sciences University"
,month="October"
,year="2004"
,note="Available:
-\url{http://lkml.org/lkml/2004/10/23/241}
+\url{https://lore.kernel.org/r/20041023202723.GA1930@us.ibm.com}
[Viewed June 8, 2010]"
,annotation={
Introduce rcu_assign_pointer().
@@ -1203,7 +1203,7 @@ Oregon Health and Sciences University"
,annotation={
James Morris posts Kaigai Kohei's patch to LKML.
[Viewed December 10, 2004]
- Kaigai's patch is at https://lkml.org/lkml/2004/9/27/52
+ Kaigai's patch is at https://lore.kernel.org/r/200409271057.i8RAvcA1007873@mailsv.bs1.fc.nec.co.jp
}
}
@@ -1241,7 +1241,7 @@ Oregon Health and Sciences University"
,year="2005"
,day="17"
,note="Available:
-\url{http://lkml.org/lkml/2005/3/17/199}
+\url{https://lore.kernel.org/r/20050318002026.GA2693@us.ibm.com}
[Viewed September 5, 2005]"
,annotation={
First posting showing how RCU can be safely adapted for
@@ -1256,7 +1256,7 @@ Oregon Health and Sciences University"
,year="2005"
,day="18"
,note="Available:
-\url{http://lkml.org/lkml/2005/3/18/122}
+\url{https://lore.kernel.org/r/Pine.OSF.4.05.10503181336310.2466-100000@da410.phys.au.dk}
[Viewed March 30, 2006]"
,annotation={
Esben Neilsen suggests read-side suppression of grace-period
@@ -1302,7 +1302,7 @@ Data Structures"
,month="May"
,year="2005"
,note="Available:
-\url{http://lkml.org/lkml/2005/5/9/185}
+\url{https://lore.kernel.org/r/20050510012444.GA3011@us.ibm.com}
[Viewed May 13, 2005]"
,annotation={
First publication of working lock-based deferred free patches
@@ -1385,7 +1385,7 @@ Data Structures"
,day="1"
,year="2005"
,note="Available:
-\url{http://lkml.org/lkml/2005/8/1/155}
+\url{https://lore.kernel.org/r/20050801171137.GA1754@us.ibm.com}
[Viewed March 14, 2006]"
,annotation={
First operating counter-based realtime RCU patch posted to LKML.
@@ -1399,7 +1399,7 @@ Data Structures"
,day="8"
,year="2005"
,note="Available:
-\url{http://lkml.org/lkml/2005/8/8/108}
+\url{https://lore.kernel.org/r/20050808144216.GA1307@us.ibm.com}
[Viewed March 14, 2006]"
,annotation={
First operating counter-based realtime RCU patch posted to LKML,
@@ -1415,7 +1415,7 @@ Data Structures"
,day="1"
,year="2005"
,note="Available:
-\url{http://lkml.org/lkml/2005/10/1/70}
+\url{https://lore.kernel.org/r/20051001182056.GA1613@us.ibm.com}
[Viewed March 14, 2006]"
,annotation={
First rcutorture patch.
@@ -1429,7 +1429,7 @@ Data Structures"
,day="6"
,year="2006"
,note="Available:
-\url{https://lkml.org/lkml/2006/1/7/22}
+\url{https://lore.kernel.org/r/20060106.231054.43576567.davem@davemloft.net}
[Viewed February 29, 2012]"
,annotation={
David Miller's view on hashed arrays of locks: used to really
@@ -1464,7 +1464,7 @@ Distributed Processing Symposium"
,day="20"
,year="2006"
,note="Available:
-\url{http://lkml.org/lkml/2006/6/20/238}
+\url{https://lore.kernel.org/r/20060408134707.22479.33814.sendpatchset@linux.site}
[Viewed March 25, 2008]"
,annotation={
RCU-protected radix tree.
@@ -1554,7 +1554,7 @@ Revised:
,day="28"
,year="2006"
,note="Available:
-\url{http://lkml.org/lkml/2006/9/28/160}
+\url{https://lore.kernel.org/r/20060928142616.GA20185@infradead.org}
[Viewed March 27, 2008]"
}
@@ -1593,7 +1593,7 @@ Revised:
,year="2006"
,day=26
,note="Available:
-\url{http://lkml.org/lkml/2006/10/26/73}
+\url{https://lore.kernel.org/r/20061026105731.GE11803@in.ibm.com}
[Viewed January 26, 2009]"
,annotation={
RCU-based reader-writer lock that allows readers to proceed with
@@ -1612,12 +1612,12 @@ Revised:
,year="2006"
,day=17
,note="Available:
-\url{http://lkml.org/lkml/2006/11/17/56}
+\url{https://lore.kernel.org/r/20061117092925.GT7164@kernel.dk}
[Viewed May 28, 2007]"
,annotation={
SRCU's grace periods are too slow for Jens, even after a
factor-of-three speedup.
- Sped-up version of SRCU at http://lkml.org/lkml/2006/11/17/359.
+ Sped-up version of SRCU at https://lore.kernel.org/r/20061118002845.GF2632@us.ibm.com.
}
}
@@ -1629,7 +1629,7 @@ Revised:
,year="2006"
,day=19
,note="Available:
-\url{http://lkml.org/lkml/2006/11/19/69}
+\url{https://lore.kernel.org/r/20061119190027.GA3676@oleg}
[Viewed May 28, 2007]"
,annotation={
First cut of QRCU. Expanded/corrected versions followed.
@@ -1644,7 +1644,7 @@ Revised:
,year="2006"
,day=30
,note="Available:
-\url{http://lkml.org/lkml/2006/11/29/330}
+\url{https://lore.kernel.org/r/20061130015714.GC1350@oleg}
[Viewed November 26, 2008]"
,annotation={
Expanded/corrected version of QRCU.
@@ -1709,7 +1709,7 @@ Revised:
,year="2007"
,day=3
,note="Available:
-\url{http://lkml.org/lkml/2007/1/3/112}
+\url{https://lore.kernel.org/r/20070103152738.GA16063@localdomain}
[Viewed May 28, 2007]"
,annotation={
Patch for list_splice_rcu().
@@ -1737,7 +1737,7 @@ Revised:
,year="2007"
,day=28
,note="Available:
-\url{http://lkml.org/lkml/2007/1/28/34}
+\url{https://lore.kernel.org/r/20070128120509.719287000@programming.kicks-ass.net}
[Viewed March 27, 2008]"
,annotation={
RCU-like implementation for frequent updaters and rare readers(!).
@@ -1767,7 +1767,7 @@ Revised:
,year="2007"
,day=24
,note="Available:
-\url{http://lkml.org/lkml/2007/2/25/18}
+\url{https://lore.kernel.org/r/20070225062349.GA17468@linux.vnet.ibm.com}
[Viewed March 27, 2008]"
,annotation={
Patch for QRCU supplying lock-free fast path.
@@ -1846,7 +1846,7 @@ Revised:
,annotation={
LWN article describing Promela and spin, and also using Oleg
Nesterov's QRCU as an example (with Paul McKenney's fastpath).
- Merged patch at: http://lkml.org/lkml/2007/2/25/18
+ Merged patch at: https://lore.kernel.org/r/20070225062349.GA17468@linux.vnet.ibm.com
}
}
@@ -1885,7 +1885,7 @@ Revised:
,day="10"
,year="2007"
,note="Available:
-\url{http://lkml.org/lkml/2007/9/10/213}
+\url{https://lore.kernel.org/r/20070910183004.GA3299@linux.vnet.ibm.com}
[Viewed October 25, 2007]"
,annotation={
Final patch for preemptable RCU to -rt. (Later patches were
@@ -1933,7 +1933,7 @@ Revised:
,day="20"
,year="2007"
,note="Available:
-\url{http://lkml.org/lkml/2007/12/20/244}
+\url{https://lore.kernel.org/r/20071220142540.GB22523@Krystal}
[Viewed March 27, 2008]"
,annotation={
Request for call_rcu_sched() and rcu_barrier_sched().
@@ -2013,7 +2013,7 @@ Revised:
,day="29"
,year="2008"
,note="Available:
-\url{http://lkml.org/lkml/2008/1/29/208}
+\url{https://lore.kernel.org/r/Pine.LNX.4.58.0801291113350.20371@gandalf.stny.rr.com}
[Viewed March 27, 2008]"
,annotation={
Patch that prevents preemptible RCU from unnecessarily waking
@@ -2028,7 +2028,7 @@ Revised:
,day="1"
,year="2008"
,note="Available:
-\url{http://lkml.org/lkml/2008/2/2/255}
+\url{https://lore.kernel.org/r/20080202214124.GA28612@linux.vnet.ibm.com}
[Viewed October 18, 2008]"
,annotation={
Explanation of compilers violating dependency ordering.
@@ -2088,7 +2088,7 @@ lot of {Linux} into your technology!!!"
,day="3"
,year="2008"
,note="Available:
-\url{http://lkml.org/lkml/2008/6/2/539}
+\url{https://lore.kernel.org/r/4844BE83.5010401@cn.fujitsu.com}
[Viewed December 10, 2008]"
,annotation={
Updated RCU classic algorithm. Introduced multi-tailed list
@@ -2122,7 +2122,7 @@ lot of {Linux} into your technology!!!"
,day="21"
,year="2008"
,note="Available:
-\url{http://lkml.org/lkml/2008/8/21/336}
+\url{https://lore.kernel.org/r/48AD8969.7060900@colorfullife.com}
[Viewed December 8, 2008]"
,annotation={
State-based RCU. One key thing that this patch does is to
@@ -2137,7 +2137,7 @@ lot of {Linux} into your technology!!!"
,day="6"
,year="2008"
,note="Available:
-\url{http://lkml.org/lkml/2008/9/6/86}
+\url{https://lore.kernel.org/r/48C2B1D2.5070801@colorfullife.com}
[Viewed December 8, 2008]"
,annotation={
Manfred notes a fix required to my attempt to separate irq
@@ -2183,7 +2183,7 @@ lot of {Linux} into your technology!!!"
,day="14"
,year="2009"
,note="Available:
-\url{http://lkml.org/lkml/2009/1/14/449}
+\url{https://lore.kernel.org/r/20090114202044.GJ6734@linux.vnet.ibm.com}
[Viewed January 15, 2009]"
,annotation={
Small-footprint implementation of RCU for uniprocessor
@@ -2218,7 +2218,7 @@ lot of {Linux} into your technology!!!"
git://lttng.org/userspace-rcu.git
http://lttng.org/cgi-bin/gitweb.cgi?p=userspace-rcu.git
http://lttng.org/urcu
- http://lkml.org/lkml/2009/2/5/572
+ https://lore.kernel.org/r/20090206030543.GB8560@Krystal
}
}
@@ -2258,7 +2258,7 @@ lot of {Linux} into your technology!!!"
,day="25"
,year="2009"
,note="Available:
-\url{http://lkml.org/lkml/2009/6/25/306}
+\url{https://lore.kernel.org/r/20090625160706.GA9467@linux.vnet.ibm.com}
[Viewed August 16, 2009]"
,annotation={
First posting of expedited RCU to be accepted into -tip.
@@ -2272,7 +2272,7 @@ lot of {Linux} into your technology!!!"
,day="23"
,year="2009"
,note="Available:
-\url{http://lkml.org/lkml/2009/7/23/294}
+\url{https://lore.kernel.org/r/20090724001429.GA17374@linux.vnet.ibm.com}
[Viewed August 15, 2009]"
,annotation={
First posting of simple and fast preemptable RCU.
@@ -2350,7 +2350,7 @@ lot of {Linux} into your technology!!!"
,month="December"
,year="2009"
,note="Available:
-\url{http://lkml.org/lkml/2009/10/18/129}
+\url{https://lore.kernel.org/r/20091018232918.GA7385@Krystal}
[Viewed December 29, 2009]"
,annotation={
Mathieu proposed defer_rcu() with fixed-size per-thread pool
@@ -2518,7 +2518,7 @@ lot of {Linux} into your technology!!!"
,month="January"
,year="2011"
,note="Available:
-\url{https://lkml.org/lkml/2011/1/18/322}
+\url{https://lore.kernel.org/r/AANLkTimajU0x1v6y3rH2+jr-bZ=tNLs1S_agXdGGAa3S@mail.gmail.com}
[Viewed March 4, 2011]"
,annotation={
"The RCU-based name lookup is at the other end of the spectrum - the
diff --git a/Documentation/RCU/checklist.rst b/Documentation/RCU/checklist.rst
index bb7128eb322e..1030119294d0 100644
--- a/Documentation/RCU/checklist.rst
+++ b/Documentation/RCU/checklist.rst
@@ -70,7 +70,7 @@ over a rather long period of time, but improvements are always welcome!
is less readable and prevents lockdep from detecting locking issues.
Letting RCU-protected pointers "leak" out of an RCU read-side
- critical section is every bid as bad as letting them leak out
+ critical section is every bit as bad as letting them leak out
from under a lock. Unless, of course, you have arranged some
other means of protection, such as a lock or a reference count
-before- letting them out of the RCU read-side critical section.
@@ -129,9 +129,7 @@ over a rather long period of time, but improvements are always welcome!
accesses. The rcu_dereference() primitive ensures that
the CPU picks up the pointer before it picks up the data
that the pointer points to. This really is necessary
- on Alpha CPUs. If you don't believe me, see:
-
- http://www.openvms.compaq.com/wizard/wiz_2637.html
+ on Alpha CPUs.
The rcu_dereference() primitive is also an excellent
documentation aid, letting the person reading the
@@ -214,9 +212,9 @@ over a rather long period of time, but improvements are always welcome!
the rest of the system.
7. As of v4.20, a given kernel implements only one RCU flavor,
- which is RCU-sched for PREEMPT=n and RCU-preempt for PREEMPT=y.
+ which is RCU-sched for PREEMPTION=n and RCU-preempt for PREEMPTION=y.
If the updater uses call_rcu() or synchronize_rcu(),
- then the corresponding readers my use rcu_read_lock() and
+ then the corresponding readers may use rcu_read_lock() and
rcu_read_unlock(), rcu_read_lock_bh() and rcu_read_unlock_bh(),
or any pair of primitives that disables and re-enables preemption,
for example, rcu_read_lock_sched() and rcu_read_unlock_sched().
diff --git a/Documentation/RCU/rcubarrier.rst b/Documentation/RCU/rcubarrier.rst
index f64f4413a47c..3b4a24877496 100644
--- a/Documentation/RCU/rcubarrier.rst
+++ b/Documentation/RCU/rcubarrier.rst
@@ -9,7 +9,7 @@ RCU (read-copy update) is a synchronization mechanism that can be thought
of as a replacement for read-writer locking (among other things), but with
very low-overhead readers that are immune to deadlock, priority inversion,
and unbounded latency. RCU read-side critical sections are delimited
-by rcu_read_lock() and rcu_read_unlock(), which, in non-CONFIG_PREEMPT
+by rcu_read_lock() and rcu_read_unlock(), which, in non-CONFIG_PREEMPTION
kernels, generate no code whatsoever.
This means that RCU writers are unaware of the presence of concurrent
@@ -329,10 +329,10 @@ Answer: This cannot happen. The reason is that on_each_cpu() has its last
to smp_call_function() and further to smp_call_function_on_cpu(),
causing this latter to spin until the cross-CPU invocation of
rcu_barrier_func() has completed. This by itself would prevent
- a grace period from completing on non-CONFIG_PREEMPT kernels,
+ a grace period from completing on non-CONFIG_PREEMPTION kernels,
since each CPU must undergo a context switch (or other quiescent
state) before the grace period can complete. However, this is
- of no use in CONFIG_PREEMPT kernels.
+ of no use in CONFIG_PREEMPTION kernels.
Therefore, on_each_cpu() disables preemption across its call
to smp_call_function() and also across the local call to
diff --git a/Documentation/RCU/stallwarn.rst b/Documentation/RCU/stallwarn.rst
index c9ab6af4d3be..7148e9be08c3 100644
--- a/Documentation/RCU/stallwarn.rst
+++ b/Documentation/RCU/stallwarn.rst
@@ -25,7 +25,7 @@ warnings:
- A CPU looping with bottom halves disabled.
-- For !CONFIG_PREEMPT kernels, a CPU looping anywhere in the kernel
+- For !CONFIG_PREEMPTION kernels, a CPU looping anywhere in the kernel
without invoking schedule(). If the looping in the kernel is
really expected and desirable behavior, you might need to add
some calls to cond_resched().
@@ -44,7 +44,7 @@ warnings:
result in the ``rcu_.*kthread starved for`` console-log message,
which will include additional debugging information.
-- A CPU-bound real-time task in a CONFIG_PREEMPT kernel, which might
+- A CPU-bound real-time task in a CONFIG_PREEMPTION kernel, which might
happen to preempt a low-priority task in the middle of an RCU
read-side critical section. This is especially damaging if
that low-priority task is not permitted to run on any other CPU,
@@ -92,7 +92,9 @@ warnings:
buggy timer hardware through bugs in the interrupt or exception
path (whether hardware, firmware, or software) through bugs
in Linux's timer subsystem through bugs in the scheduler, and,
- yes, even including bugs in RCU itself.
+ yes, even including bugs in RCU itself. It can also result in
+ the ``rcu_.*timer wakeup didn't happen for`` console-log message,
+ which will include additional debugging information.
- A bug in the RCU implementation.
@@ -292,6 +294,25 @@ kthread is waiting for a short timeout, the "state" precedes value of the
task_struct ->state field, and the "cpu" indicates that the grace-period
kthread last ran on CPU 5.
+If the relevant grace-period kthread does not wake from FQS wait in a
+reasonable time, then the following additional line is printed::
+
+ kthread timer wakeup didn't happen for 23804 jiffies! g7076 f0x0 RCU_GP_WAIT_FQS(5) ->state=0x402
+
+The "23804" indicates that kthread's timer expired more than 23 thousand
+jiffies ago. The rest of the line has meaning similar to the kthread
+starvation case.
+
+Additionally, the following line is printed::
+
+ Possible timer handling issue on cpu=4 timer-softirq=11142
+
+Here "cpu" indicates that the grace-period kthread last ran on CPU 4,
+where it queued the fqs timer. The number following the "timer-softirq"
+is the current ``TIMER_SOFTIRQ`` count on cpu 4. If this value does not
+change on successive RCU CPU stall warnings, there is further reason to
+suspect a timer problem.
+
Multiple Warnings From One Stall
================================
diff --git a/Documentation/RCU/whatisRCU.rst b/Documentation/RCU/whatisRCU.rst
index 1a4723f48bd9..17e95ab2a201 100644
--- a/Documentation/RCU/whatisRCU.rst
+++ b/Documentation/RCU/whatisRCU.rst
@@ -683,7 +683,7 @@ Quick Quiz #1:
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
This section presents a "toy" RCU implementation that is based on
"classic RCU". It is also short on performance (but only for updates) and
-on features such as hotplug CPU and the ability to run in CONFIG_PREEMPT
+on features such as hotplug CPU and the ability to run in CONFIG_PREEMPTION
kernels. The definitions of rcu_dereference() and rcu_assign_pointer()
are the same as those shown in the preceding section, so they are omitted.
::
@@ -739,7 +739,7 @@ Quick Quiz #2:
Quick Quiz #3:
If it is illegal to block in an RCU read-side
critical section, what the heck do you do in
- PREEMPT_RT, where normal spinlocks can block???
+ CONFIG_PREEMPT_RT, where normal spinlocks can block???
:ref:`Answers to Quick Quiz <8_whatisRCU>`
@@ -1093,7 +1093,7 @@ Quick Quiz #2:
overhead is **negative**.
Answer:
- Imagine a single-CPU system with a non-CONFIG_PREEMPT
+ Imagine a single-CPU system with a non-CONFIG_PREEMPTION
kernel where a routing table is used by process-context
code, but can be updated by irq-context code (for example,
by an "ICMP REDIRECT" packet). The usual way of handling
@@ -1120,10 +1120,10 @@ Answer:
Quick Quiz #3:
If it is illegal to block in an RCU read-side
critical section, what the heck do you do in
- PREEMPT_RT, where normal spinlocks can block???
+ CONFIG_PREEMPT_RT, where normal spinlocks can block???
Answer:
- Just as PREEMPT_RT permits preemption of spinlock
+ Just as CONFIG_PREEMPT_RT permits preemption of spinlock
critical sections, it permits preemption of RCU
read-side critical sections. It also permits
spinlocks blocking while in RCU read-side critical
diff --git a/Documentation/accounting/cgroupstats.rst b/Documentation/accounting/cgroupstats.rst
index b9afc48f4ea2..85186e7d4035 100644
--- a/Documentation/accounting/cgroupstats.rst
+++ b/Documentation/accounting/cgroupstats.rst
@@ -3,8 +3,8 @@ Control Groupstats
==================
Control Groupstats is inspired by the discussion at
-http://lkml.org/lkml/2007/4/11/187 and implements per cgroup statistics as
-suggested by Andrew Morton in http://lkml.org/lkml/2007/4/11/263.
+https://lore.kernel.org/r/461CF883.2030308@sw.ru and implements per cgroup statistics as
+suggested by Andrew Morton in https://lore.kernel.org/r/20070411114927.1277d7c9.akpm@linux-foundation.org.
Per cgroup statistics infrastructure re-uses code from the taskstats
interface. A new set of cgroup operations are registered with commands
diff --git a/Documentation/admin-guide/README.rst b/Documentation/admin-guide/README.rst
index 261b7b4cca1f..35314b63008c 100644
--- a/Documentation/admin-guide/README.rst
+++ b/Documentation/admin-guide/README.rst
@@ -226,10 +226,11 @@ Configuring the kernel
all module options to built in (=y) options. You can
also preserve modules by LMC_KEEP.
- "make kvmconfig" Enable additional options for kvm guest kernel support.
+ "make kvm_guest.config" Enable additional options for kvm guest kernel
+ support.
- "make xenconfig" Enable additional options for xen dom0 guest kernel
- support.
+ "make xen.config" Enable additional options for xen dom0 guest kernel
+ support.
"make tinyconfig" Configure the tiniest possible kernel.
diff --git a/Documentation/admin-guide/auxdisplay/cfag12864b.rst b/Documentation/admin-guide/auxdisplay/cfag12864b.rst
index 18c2865bd322..da385d851acc 100644
--- a/Documentation/admin-guide/auxdisplay/cfag12864b.rst
+++ b/Documentation/admin-guide/auxdisplay/cfag12864b.rst
@@ -3,7 +3,7 @@ cfag12864b LCD Driver Documentation
===================================
:License: GPLv2
-:Author & Maintainer: Miguel Ojeda Sandonis
+:Author & Maintainer: Miguel Ojeda <ojeda@kernel.org>
:Date: 2006-10-27
diff --git a/Documentation/admin-guide/auxdisplay/ks0108.rst b/Documentation/admin-guide/auxdisplay/ks0108.rst
index c0b7faf73136..a7d3fe509373 100644
--- a/Documentation/admin-guide/auxdisplay/ks0108.rst
+++ b/Documentation/admin-guide/auxdisplay/ks0108.rst
@@ -3,7 +3,7 @@ ks0108 LCD Controller Driver Documentation
==========================================
:License: GPLv2
-:Author & Maintainer: Miguel Ojeda Sandonis
+:Author & Maintainer: Miguel Ojeda <ojeda@kernel.org>
:Date: 2006-10-27
diff --git a/Documentation/admin-guide/cgroup-v1/memory.rst b/Documentation/admin-guide/cgroup-v1/memory.rst
index 52688ae34461..0936412e044e 100644
--- a/Documentation/admin-guide/cgroup-v1/memory.rst
+++ b/Documentation/admin-guide/cgroup-v1/memory.rst
@@ -963,21 +963,21 @@ References
2. Singh, Balbir. Memory Controller (RSS Control),
http://lwn.net/Articles/222762/
3. Emelianov, Pavel. Resource controllers based on process cgroups
- http://lkml.org/lkml/2007/3/6/198
+ https://lore.kernel.org/r/45ED7DEC.7010403@sw.ru
4. Emelianov, Pavel. RSS controller based on process cgroups (v2)
- http://lkml.org/lkml/2007/4/9/78
+ https://lore.kernel.org/r/461A3010.90403@sw.ru
5. Emelianov, Pavel. RSS controller based on process cgroups (v3)
- http://lkml.org/lkml/2007/5/30/244
+ https://lore.kernel.org/r/465D9739.8070209@openvz.org
6. Menage, Paul. Control Groups v10, http://lwn.net/Articles/236032/
7. Vaidyanathan, Srinivasan, Control Groups: Pagecache accounting and control
subsystem (v3), http://lwn.net/Articles/235534/
8. Singh, Balbir. RSS controller v2 test results (lmbench),
- http://lkml.org/lkml/2007/5/17/232
+ https://lore.kernel.org/r/464C95D4.7070806@linux.vnet.ibm.com
9. Singh, Balbir. RSS controller v2 AIM9 results
- http://lkml.org/lkml/2007/5/18/1
+ https://lore.kernel.org/r/464D267A.50107@linux.vnet.ibm.com
10. Singh, Balbir. Memory controller v6 test results,
- http://lkml.org/lkml/2007/8/19/36
+ https://lore.kernel.org/r/20070819094658.654.84837.sendpatchset@balbir-laptop
11. Singh, Balbir. Memory controller introduction (v6),
- http://lkml.org/lkml/2007/8/17/69
+ https://lore.kernel.org/r/20070817084228.26003.12568.sendpatchset@balbir-laptop
12. Corbet, Jonathan, Controlling memory use in cgroups,
http://lwn.net/Articles/243795/
diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst
index 63521cd36ce5..64c62b979f2f 100644
--- a/Documentation/admin-guide/cgroup-v2.rst
+++ b/Documentation/admin-guide/cgroup-v2.rst
@@ -1,3 +1,5 @@
+.. _cgroup-v2:
+
================
Control Group v2
================
@@ -172,7 +174,6 @@ disabling controllers in v1 and make them always available in v2.
cgroup v2 currently supports the following mount options.
nsdelegate
-
Consider cgroup namespaces as delegation boundaries. This
option is system wide and can only be set on mount or modified
through remount from the init namespace. The mount option is
@@ -180,7 +181,6 @@ cgroup v2 currently supports the following mount options.
Delegation section for details.
memory_localevents
-
Only populate memory.events with data for the current cgroup,
and not any subtrees. This is legacy behaviour, the default
behaviour without this option is to include subtree counts.
@@ -189,7 +189,6 @@ cgroup v2 currently supports the following mount options.
option is ignored on non-init namespace mounts.
memory_recursiveprot
-
Recursively apply memory.min and memory.low protection to
entire subtrees, without requiring explicit downward
propagation into leaf cgroups. This allows protecting entire
@@ -786,7 +785,6 @@ Core Interface Files
All cgroup core files are prefixed with "cgroup."
cgroup.type
-
A read-write single value file which exists on non-root
cgroups.
@@ -954,6 +952,8 @@ All cgroup core files are prefixed with "cgroup."
Controllers
===========
+.. _cgroup-v2-cpu:
+
CPU
---
@@ -1029,7 +1029,7 @@ All time durations are in microseconds.
one number is written, $MAX is updated.
cpu.pressure
- A read-only nested-key file which exists on non-root cgroups.
+ A read-write nested-keyed file.
Shows pressure stall information for CPU. See
:ref:`Documentation/accounting/psi.rst <psi>` for details.
@@ -1259,9 +1259,9 @@ PAGE_SIZE multiple when read back.
can show up in the middle. Don't rely on items remaining in a
fixed position; use the keys to look up specific values!
- If the entry has no per-node counter(or not show in the
- mempry.numa_stat). We use 'npn'(non-per-node) as the tag
- to indicate that it will not show in the mempry.numa_stat.
+ If the entry has no per-node counter (or not show in the
+ memory.numa_stat). We use 'npn' (non-per-node) as the tag
+ to indicate that it will not show in the memory.numa_stat.
anon
Amount of memory used in anonymous mappings such as
@@ -1277,11 +1277,11 @@ PAGE_SIZE multiple when read back.
pagetables
Amount of memory allocated for page tables.
- percpu(npn)
+ percpu (npn)
Amount of memory used for storing per-cpu kernel
data structures.
- sock(npn)
+ sock (npn)
Amount of memory used in network transmission buffers
shmem
@@ -1299,6 +1299,10 @@ PAGE_SIZE multiple when read back.
Amount of cached filesystem data that was modified and
is currently being written back to disk
+ swapcached
+ Amount of swap cached in memory. The swapcache is accounted
+ against both memory and swap usage.
+
anon_thp
Amount of memory used in anonymous mappings backed by
transparent hugepages
@@ -1329,7 +1333,7 @@ PAGE_SIZE multiple when read back.
Part of "slab" that cannot be reclaimed on memory
pressure.
- slab(npn)
+ slab (npn)
Amount of memory used for storing in-kernel data
structures.
@@ -1357,39 +1361,39 @@ PAGE_SIZE multiple when read back.
workingset_nodereclaim
Number of times a shadow node has been reclaimed
- pgfault(npn)
+ pgfault (npn)
Total number of page faults incurred
- pgmajfault(npn)
+ pgmajfault (npn)
Number of major page faults incurred
- pgrefill(npn)
+ pgrefill (npn)
Amount of scanned pages (in an active LRU list)
- pgscan(npn)
+ pgscan (npn)
Amount of scanned pages (in an inactive LRU list)
- pgsteal(npn)
+ pgsteal (npn)
Amount of reclaimed pages
- pgactivate(npn)
+ pgactivate (npn)
Amount of pages moved to the active LRU list
- pgdeactivate(npn)
+ pgdeactivate (npn)
Amount of pages moved to the inactive LRU list
- pglazyfree(npn)
+ pglazyfree (npn)
Amount of pages postponed to be freed under memory pressure
- pglazyfreed(npn)
+ pglazyfreed (npn)
Amount of reclaimed lazyfree pages
- thp_fault_alloc(npn)
+ thp_fault_alloc (npn)
Number of transparent hugepages which were allocated to satisfy
a page fault. This counter is not present when CONFIG_TRANSPARENT_HUGEPAGE
is not set.
- thp_collapse_alloc(npn)
+ thp_collapse_alloc (npn)
Number of transparent hugepages which were allocated to allow
collapsing an existing range of pages. This counter is not
present when CONFIG_TRANSPARENT_HUGEPAGE is not set.
@@ -1475,7 +1479,7 @@ PAGE_SIZE multiple when read back.
reduces the impact on the workload and memory management.
memory.pressure
- A read-only nested-key file which exists on non-root cgroups.
+ A read-only nested-keyed file.
Shows pressure stall information for memory. See
:ref:`Documentation/accounting/psi.rst <psi>` for details.
@@ -1558,7 +1562,7 @@ IO Interface Files
8:0 rbytes=90430464 wbytes=299008000 rios=8950 wios=1252 dbytes=50331648 dios=3021
io.cost.qos
- A read-write nested-keyed file with exists only on the root
+ A read-write nested-keyed file which exists only on the root
cgroup.
This file configures the Quality of Service of the IO cost
@@ -1613,7 +1617,7 @@ IO Interface Files
automatic mode can be restored by setting "ctrl" to "auto".
io.cost.model
- A read-write nested-keyed file with exists only on the root
+ A read-write nested-keyed file which exists only on the root
cgroup.
This file configures the cost model of the IO cost model based
@@ -1714,7 +1718,7 @@ IO Interface Files
8:16 rbps=2097152 wbps=max riops=max wiops=max
io.pressure
- A read-only nested-key file which exists on non-root cgroups.
+ A read-only nested-keyed file.
Shows pressure stall information for IO. See
:ref:`Documentation/accounting/psi.rst <psi>` for details.
@@ -2000,10 +2004,12 @@ Cpuset Interface Files
cpuset-enabled cgroups. This flag is owned by the parent cgroup
and is not delegatable.
- It accepts only the following input values when written to.
+ It accepts only the following input values when written to.
- "root" - a partition root
- "member" - a non-root member of a partition
+ ======== ================================
+ "root" a partition root
+ "member" a non-root member of a partition
+ ======== ================================
When set to be a partition root, the current cgroup is the
root of a new partition or scheduling domain that comprises
@@ -2044,9 +2050,11 @@ Cpuset Interface Files
root to change. On read, the "cpuset.sched.partition" file
can show the following values.
- "member" Non-root member of a partition
- "root" Partition root
- "root invalid" Invalid partition root
+ ============== ==============================
+ "member" Non-root member of a partition
+ "root" Partition root
+ "root invalid" Invalid partition root
+ ============== ==============================
It is a partition root if the first 2 partition root conditions
above are true and at least one CPU from "cpuset.cpus" is
@@ -2090,7 +2098,7 @@ If the program returns 0, the attempt fails with -EPERM, otherwise
it succeeds.
An example of BPF_CGROUP_DEVICE program may be found in the kernel
-source tree in the tools/testing/selftests/bpf/dev_cgroup.c file.
+source tree in the tools/testing/selftests/bpf/progs/dev_cgroup.c file.
RDMA
@@ -2219,7 +2227,7 @@ Without cgroup namespace, the "/proc/$PID/cgroup" file shows the
complete path of the cgroup of a process. In a container setup where
a set of cgroups and namespaces are intended to isolate processes the
"/proc/$PID/cgroup" file may leak potential system level information
-to the isolated processes. For Example::
+to the isolated processes. For example::
# cat /proc/self/cgroup
0::/batchjobs/container_id1
diff --git a/Documentation/admin-guide/cifs/authors.rst b/Documentation/admin-guide/cifs/authors.rst
index b02d6dd6c070..5c1d2f0fa7d1 100644
--- a/Documentation/admin-guide/cifs/authors.rst
+++ b/Documentation/admin-guide/cifs/authors.rst
@@ -5,10 +5,10 @@ Authors
Original Author
---------------
-Steve French (sfrench@samba.org)
+Steve French (smfrench@gmail.com, sfrench@samba.org)
The author wishes to express his appreciation and thanks to:
-Andrew Tridgell (Samba team) for his early suggestions about smb/cifs VFS
+Andrew Tridgell (Samba team) for his early suggestions about SMB/CIFS VFS
improvements. Thanks to IBM for allowing me time and test resources to pursue
this project, to Jim McDonough from IBM (and the Samba Team) for his help, to
the IBM Linux JFS team for explaining many esoteric Linux filesystem features.
@@ -51,7 +51,7 @@ Patch Contributors
- Ronnie Sahlberg (for SMB3 xattr work, bug fixes, and lots of great work on compounding)
- Shirish Pargaonkar (for many ACL patches over the years)
- Sachin Prabhu (many bug fixes, including for reconnect, copy offload and security)
-- Paulo Alcantara
+- Paulo Alcantara (for some excellent work in DFS, and in booting from SMB3)
- Long Li (some great work on RDMA, SMB Direct)
diff --git a/Documentation/admin-guide/cifs/changes.rst b/Documentation/admin-guide/cifs/changes.rst
index 71f2ecb62299..3147bbae9c43 100644
--- a/Documentation/admin-guide/cifs/changes.rst
+++ b/Documentation/admin-guide/cifs/changes.rst
@@ -3,6 +3,7 @@ Changes
=======
See https://wiki.samba.org/index.php/LinuxCIFSKernel for summary
-information (that may be easier to read than parsing the output of
-"git log fs/cifs") about fixes/improvements to CIFS/SMB2/SMB3 support (changes
+information about fixes/improvements to CIFS/SMB2/SMB3 support (changes
to cifs.ko module) by kernel version (and cifs internal module version).
+This may be easier to read than parsing the output of "git log fs/cifs"
+by release.
diff --git a/Documentation/admin-guide/cifs/introduction.rst b/Documentation/admin-guide/cifs/introduction.rst
index cc2851d93d17..53ea62906aa5 100644
--- a/Documentation/admin-guide/cifs/introduction.rst
+++ b/Documentation/admin-guide/cifs/introduction.rst
@@ -7,19 +7,19 @@ Introduction
protocol which was the successor to the Server Message Block
(SMB) protocol, the native file sharing mechanism for most early
PC operating systems. New and improved versions of CIFS are now
- called SMB2 and SMB3. Use of SMB3 (and later, including SMB3.1.1)
- is strongly preferred over using older dialects like CIFS due to
- security reasons. All modern dialects, including the most recent,
- SMB3.1.1 are supported by the CIFS VFS module. The SMB3 protocol
- is implemented and supported by all major file servers
- such as all modern versions of Windows (including Windows 2016
- Server), as well as by Samba (which provides excellent
- CIFS/SMB2/SMB3 server support and tools for Linux and many other
- operating systems). Apple systems also support SMB3 well, as
- do most Network Attached Storage vendors, so this network
- filesystem client can mount to a wide variety of systems.
- It also supports mounting to the cloud (for example
- Microsoft Azure), including the necessary security features.
+ called SMB2 and SMB3. Use of SMB3 (and later, including SMB3.1.1
+ the most current dialect) is strongly preferred over using older
+ dialects like CIFS due to security reasons. All modern dialects,
+ including the most recent, SMB3.1.1, are supported by the CIFS VFS
+ module. The SMB3 protocol is implemented and supported by all major
+ file servers such as Windows (including Windows 2019 Server), as
+ well as by Samba (which provides excellent CIFS/SMB2/SMB3 server
+ support and tools for Linux and many other operating systems).
+ Apple systems also support SMB3 well, as do most Network Attached
+ Storage vendors, so this network filesystem client can mount to a
+ wide variety of systems. It also supports mounting to the cloud
+ (for example Microsoft Azure), including the necessary security
+ features.
The intent of this module is to provide the most advanced network
file system function for SMB3 compliant servers, including advanced
@@ -27,8 +27,8 @@ Introduction
POSIX compliance, secure per-user session establishment, encryption,
high performance safe distributed caching (leases/oplocks), optional packet
signing, large files, Unicode support and other internationalization
- improvements. Since both Samba server and this filesystem client support
- the CIFS Unix extensions (and in the future SMB3 POSIX extensions),
+ improvements. Since both Samba server and this filesystem client support the
+ CIFS Unix extensions, and the Linux client also suppors SMB3 POSIX extensions,
the combination can provide a reasonable alternative to other network and
cluster file systems for fileserving in some Linux to Linux environments,
not just in Linux to Windows (or Linux to Mac) environments.
diff --git a/Documentation/admin-guide/cifs/todo.rst b/Documentation/admin-guide/cifs/todo.rst
index 25f11576e7b9..2646ed2e2d3e 100644
--- a/Documentation/admin-guide/cifs/todo.rst
+++ b/Documentation/admin-guide/cifs/todo.rst
@@ -13,24 +13,26 @@ is a partial list of the known problems and missing features:
a) SMB3 (and SMB3.1.1) missing optional features:
- - multichannel (started), integration with RDMA
- - directory leases (improved metadata caching), started (root dir only)
+ - multichannel (partially integrated), integration of multichannel with RDMA
+ - directory leases (improved metadata caching). Currently only implemented for root dir
- T10 copy offload ie "ODX" (copy chunk, and "Duplicate Extents" ioctl
currently the only two server side copy mechanisms supported)
b) improved sparse file support (fiemap and SEEK_HOLE are implemented
- but additional features would be supportable by the protocol).
+ but additional features would be supportable by the protocol such
+ as FALLOC_FL_COLLAPSE_RANGE and FALLOC_FL_INSERT_RANGE)
c) Directory entry caching relies on a 1 second timer, rather than
using Directory Leases, currently only the root file handle is cached longer
+ by leveraging Directory Leases
-d) quota support (needs minor kernel change since quota calls
- to make it to network filesystems or deviceless filesystems)
+d) quota support (needs minor kernel change since quota calls otherwise
+ won't make it to network filesystems or deviceless filesystems).
e) Additional use cases can be optimized to use "compounding" (e.g.
open/query/close and open/setinfo/close) to reduce the number of
roundtrips to the server and improve performance. Various cases
- (stat, statfs, create, unlink, mkdir) already have been improved by
+ (stat, statfs, create, unlink, mkdir, xattrs) already have been improved by
using compounding but more can be done. In addition we could
significantly reduce redundant opens by using deferred close (with
handle caching leases) and better using reference counters on file
@@ -60,7 +62,9 @@ k) Add tools to take advantage of more smb3 specific ioctls and features
metadata attributes easier from tools (e.g. extending what was done
in smb-info tool).
-l) encrypted file support
+l) encrypted file support (currently the attribute showing the file is
+ encrypted on the server is reported, but changing the attribute is not
+ supported).
m) improved stats gathering tools (perhaps integration with nfsometer?)
to extend and make easier to use what is currently in /proc/fs/cifs/Stats
@@ -69,14 +73,13 @@ n) Add support for claims based ACLs ("DAC")
o) mount helper GUI (to simplify the various configuration options on mount)
-p) Add support for witness protocol (perhaps ioctl to cifs.ko from user space
- tool listening on witness protocol RPC) to allow for notification of share
- move, server failover, and server adapter changes. And also improve other
- failover scenarios, e.g. when client knows multiple DFS entries point to
- different servers, and the server we are connected to has gone down.
+p) Expand support for witness protocol to allow for notification of share
+ move, and server network adapter changes. Currently only notifications by
+ the witness protocol for server move is supported by the Linux client.
q) Allow mount.cifs to be more verbose in reporting errors with dialect
- or unsupported feature errors.
+ or unsupported feature errors. This would now be easier due to the
+ implementation of the new mount API.
r) updating cifs documentation, and user guide.
@@ -87,11 +90,10 @@ t) split cifs and smb3 support into separate modules so legacy (and less
secure) CIFS dialect can be disabled in environments that don't need it
and simplify the code.
-v) POSIX Extensions for SMB3.1.1 (started, create and mkdir support added
- so far).
+v) Additional testing of POSIX Extensions for SMB3.1.1
w) Add support for additional strong encryption types, and additional spnego
- authentication mechanisms (see MS-SMB2)
+ authentication mechanisms (see MS-SMB2). GCM-256 is now partially implemented.
x) Finish support for SMB3.1.1 compression
diff --git a/Documentation/admin-guide/cifs/usage.rst b/Documentation/admin-guide/cifs/usage.rst
index b6d9f02bc12b..13783dc68ab7 100644
--- a/Documentation/admin-guide/cifs/usage.rst
+++ b/Documentation/admin-guide/cifs/usage.rst
@@ -83,7 +83,7 @@ and encrypted shares and stronger signing and authentication algorithms.
There are additional mount options that may be helpful for SMB3 to get
improved POSIX behavior (NB: can use vers=3.0 to force only SMB3, never 2.1):
- ``mfsymlinks`` and ``cifsacl`` and ``idsfromsid``
+ ``mfsymlinks`` and either ``cifsacl`` or ``modefromsid`` (usually with ``idsfromsid``)
Allowing User Mounts
====================
diff --git a/Documentation/admin-guide/cpu-load.rst b/Documentation/admin-guide/cpu-load.rst
index f3ada90e9ca8..21a984337080 100644
--- a/Documentation/admin-guide/cpu-load.rst
+++ b/Documentation/admin-guide/cpu-load.rst
@@ -107,7 +107,7 @@ will lead to quite erratic information inside ``/proc/stat``::
References
----------
-- http://lkml.org/lkml/2007/2/12/6
+- https://lore.kernel.org/r/loom.20070212T063225-663@post.gmane.org
- Documentation/filesystems/proc.rst (1.8)
diff --git a/Documentation/admin-guide/device-mapper/dm-crypt.rst b/Documentation/admin-guide/device-mapper/dm-crypt.rst
index 1a6753b76dbb..aa2d04d95df6 100644
--- a/Documentation/admin-guide/device-mapper/dm-crypt.rst
+++ b/Documentation/admin-guide/device-mapper/dm-crypt.rst
@@ -67,7 +67,7 @@ Parameters::
the value passed in <key_size>.
<key_type>
- Either 'logon', 'user' or 'encrypted' kernel key type.
+ Either 'logon', 'user', 'encrypted' or 'trusted' kernel key type.
<key_description>
The kernel keyring key description crypt target should look for
diff --git a/Documentation/admin-guide/device-mapper/dm-integrity.rst b/Documentation/admin-guide/device-mapper/dm-integrity.rst
index 2cc5488acbd9..8db172efa272 100644
--- a/Documentation/admin-guide/device-mapper/dm-integrity.rst
+++ b/Documentation/admin-guide/device-mapper/dm-integrity.rst
@@ -143,8 +143,8 @@ recalculate
journal_crypt:algorithm(:key) (the key is optional)
Encrypt the journal using given algorithm to make sure that the
attacker can't read the journal. You can use a block cipher here
- (such as "cbc(aes)") or a stream cipher (for example "chacha20",
- "salsa20" or "ctr(aes)").
+ (such as "cbc(aes)") or a stream cipher (for example "chacha20"
+ or "ctr(aes)").
The journal contains history of last writes to the block device,
an attacker reading the journal could see the last sector numbers
@@ -186,6 +186,17 @@ fix_padding
space-efficient. If this option is not present, large padding is
used - that is for compatibility with older kernels.
+fix_hmac
+ Improve security of internal_hash and journal_mac:
+
+ - the section number is mixed to the mac, so that an attacker can't
+ copy sectors from one journal section to another journal section
+ - the superblock is protected by journal_mac
+ - a 16-byte salt stored in the superblock is mixed to the mac, so
+ that the attacker can't detect that two disks have the same hmac
+ key and also to disallow the attacker to move sectors from one
+ disk to another
+
legacy_recalculate
Allow recalculating of volumes with HMAC keys. This is disabled by
default for security reasons - an attacker could modify the volume,
diff --git a/Documentation/admin-guide/kernel-parameters.rst b/Documentation/admin-guide/kernel-parameters.rst
index 682ab28b5c94..1132796a8d96 100644
--- a/Documentation/admin-guide/kernel-parameters.rst
+++ b/Documentation/admin-guide/kernel-parameters.rst
@@ -60,7 +60,7 @@ Note that for the special case of a range one can split the range into equal
sized groups and for each group use some amount from the beginning of that
group:
- <cpu number>-cpu number>:<used size>/<group size>
+ <cpu number>-<cpu number>:<used size>/<group size>
For example one can add to the command line following parameter:
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index a10b545c2070..04545725f187 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -373,6 +373,12 @@
arcrimi= [HW,NET] ARCnet - "RIM I" (entirely mem-mapped) cards
Format: <io>,<irq>,<nodeID>
+ arm64.nobti [ARM64] Unconditionally disable Branch Target
+ Identification support
+
+ arm64.nopauth [ARM64] Unconditionally disable Pointer Authentication
+ support
+
ataflop= [HW,M68k]
atarimouse= [HW,MOUSE] Atari Mouse
@@ -600,7 +606,7 @@
kernel/dma/contiguous.c
cma_pernuma=nn[MG]
- [ARM64,KNL]
+ [ARM64,KNL,CMA]
Sets the size of kernel per-numa memory area for
contiguous memory allocations. A value of 0 disables
per-numa CMA altogether. And If this option is not
@@ -802,13 +808,14 @@
insecure, please do not use on production kernels.
debug_locks_verbose=
- [KNL] verbose self-tests
- Format=<0|1>
+ [KNL] verbose locking self-tests
+ Format: <int>
Print debugging info while doing the locking API
self-tests.
- We default to 0 (no extra messages), setting it to
- 1 will print _a lot_ more information - normally
- only useful to kernel developers.
+ Bitmask for the various LOCKTYPE_ tests. Defaults to 0
+ (no extra messages), setting it to -1 (all bits set)
+ will print _a_lot_ more information - normally only
+ useful to lockdep developers.
debug_objects [KNL] Enable object debugging
@@ -944,12 +951,6 @@
causing system reset or hang due to sending
INIT from AP to BSP.
- perf_v4_pmi= [X86,INTEL]
- Format: <bool>
- Disable Intel PMU counter freezing feature.
- The feature only exists starting from
- Arch Perfmon v4 (Skylake and newer).
-
disable_ddw [PPC/PSERIES]
Disable Dynamic DMA Window support. Use this
to workaround buggy firmware.
@@ -1433,6 +1434,11 @@
to enforce probe and suspend/resume ordering.
rpm -- Like "on", but also use to order runtime PM.
+ fw_devlink.strict=<bool>
+ [KNL] Treat all inferred dependencies as mandatory
+ dependencies. This only applies for fw_devlink=on|rpm.
+ Format: <bool>
+
gamecon.map[2|3]=
[HW,JOY] Multisystem joystick and NES/SNES/PSX pad
support via parallel port (up to 5 devices per port)
@@ -1524,12 +1530,12 @@
hpet_mmap= [X86, HPET_MMAP] Allow userspace to mmap HPET
registers. Default set by CONFIG_HPET_MMAP_DEFAULT.
- hugetlb_cma= [HW] The size of a cma area used for allocation
+ hugetlb_cma= [HW,CMA] The size of a CMA area used for allocation
of gigantic hugepages.
Format: nn[KMGTPE]
- Reserve a cma area of given size and allocate gigantic
- hugepages using the cma allocator. If enabled, the
+ Reserve a CMA area of given size and allocate gigantic
+ hugepages using the CMA allocator. If enabled, the
boot-time allocation of gigantic hugepages is skipped.
hugepages= [HW] Number of HugeTLB pages to allocate at boot.
@@ -1673,6 +1679,12 @@
In such case C2/C3 won't be used again.
idle=nomwait: Disable mwait for CPU C-states
+ idxd.sva= [HW]
+ Format: <bool>
+ Allow force disabling of Shared Virtual Memory (SVA)
+ support for the idxd driver. By default it is set to
+ true (1).
+
ieee754= [MIPS] Select IEEE Std 754 conformance mode
Format: { strict | legacy | 2008 | relaxed }
Default: strict
@@ -1746,7 +1758,7 @@
ima_policy= [IMA]
The builtin policies to load during IMA setup.
Format: "tcb | appraise_tcb | secure_boot |
- fail_securely"
+ fail_securely | critical_data"
The "tcb" policy measures all programs exec'd, files
mmap'd for exec, and all files opened with the read
@@ -1765,6 +1777,9 @@
filesystems with the SB_I_UNVERIFIABLE_SIGNATURE
flag.
+ The "critical_data" policy measures kernel integrity
+ critical data.
+
ima_tcb [IMA] Deprecated. Use ima_policy= instead.
Load a policy which meets the needs of the Trusted
Computing Base. This means IMA will measure all
@@ -2257,6 +2272,9 @@
kvm-arm.mode=
[KVM,ARM] Select one of KVM/arm64's modes of operation.
+ nvhe: Standard nVHE-based mode, without support for
+ protected guests.
+
protected: nVHE-based mode with support for guests whose
state is kept private from the host.
Not valid if the kernel is running in EL2.
@@ -3266,9 +3284,14 @@
parameter, xsave area per process might occupy more
memory on xsaves enabled systems.
- nohlt [BUGS=ARM,SH] Tells the kernel that the sleep(SH) or
- wfi(ARM) instruction doesn't work correctly and not to
- use it. This is also useful when using JTAG debugger.
+ nohlt [ARM,ARM64,MICROBLAZE,SH] Forces the kernel to busy wait
+ in do_idle() and not use the arch_cpu_idle()
+ implementation; requires CONFIG_GENERIC_IDLE_POLL_SETUP
+ to be effective. This is useful on platforms where the
+ sleep(SH) or wfi(ARM,ARM64) instructions do not work
+ correctly or when doing power measurements to evalute
+ the impact of the sleep instructions. This is also
+ useful when using JTAG debugger.
no_file_caps Tells the kernel not to honor file capabilities. The
only way then for a file to be executed with privilege
@@ -3281,6 +3304,21 @@
in certain environments such as networked servers or
real-time systems.
+ no_hash_pointers
+ Force pointers printed to the console or buffers to be
+ unhashed. By default, when a pointer is printed via %p
+ format string, that pointer is "hashed", i.e. obscured
+ by hashing the pointer value. This is a security feature
+ that hides actual kernel addresses from unprivileged
+ users, but it also makes debugging the kernel more
+ difficult since unequal pointers can no longer be
+ compared. However, if this command-line option is
+ specified, then all normal pointers will have their true
+ value printed. Pointers printed via %pK may still be
+ hashed. This option should only be specified when
+ debugging the kernel. Please do not use on production
+ kernels.
+
nohibernate [HIBERNATION] Disable hibernation and resume.
nohz= [KNL] Boottime enable/disable dynamic ticks
@@ -3458,20 +3496,6 @@
For example, to override I2C bus2:
omap_mux=i2c2_scl.i2c2_scl=0x100,i2c2_sda.i2c2_sda=0x100
- oprofile.timer= [HW]
- Use timer interrupt instead of performance counters
-
- oprofile.cpu_type= Force an oprofile cpu type
- This might be useful if you have an older oprofile
- userland or if you want common events.
- Format: { arch_perfmon }
- arch_perfmon: [X86] Force use of architectural
- perfmon on Intel CPUs instead of the
- CPU specific event set.
- timer: [X86] Force use of architectural NMI
- timer mode (see also oprofile.timer
- for generic hr timer mode)
-
oops=panic Always panic on oopses. Default is to just kill the
process, but there is a small probability of
deadlocking the machine.
@@ -3916,6 +3940,13 @@
Format: {"off"}
Disable Hardware Transactional Memory
+ preempt= [KNL]
+ Select preemption mode if you have CONFIG_PREEMPT_DYNAMIC
+ none - Limited to cond_resched() calls
+ voluntary - Limited to cond_resched() and might_sleep() calls
+ full - Any section that isn't explicitly preempt disabled
+ can be preempted anytime.
+
print-fatal-signals=
[KNL] debug: print fatal signals
@@ -4092,6 +4123,10 @@
value, meaning that RCU_SOFTIRQ is used by default.
Specify rcutree.use_softirq=0 to use rcuc kthreads.
+ But note that CONFIG_PREEMPT_RT=y kernels disable
+ this kernel boot parameter, forcibly setting it
+ to zero.
+
rcutree.rcu_fanout_exact= [KNL]
Disable autobalancing of the rcu_node combining
tree. This is used by rcutorture, and might
@@ -4179,12 +4214,6 @@
Set wakeup interval for idle CPUs that have
RCU callbacks (RCU_FAST_NO_HZ=y).
- rcutree.rcu_idle_lazy_gp_delay= [KNL]
- Set wakeup interval for idle CPUs that have
- only "lazy" RCU callbacks (RCU_FAST_NO_HZ=y).
- Lazy RCU callbacks are those which RCU can
- prove do nothing more than free memory.
-
rcutree.rcu_kick_kthreads= [KNL]
Cause the grace-period kthread to get an extra
wake_up() if it sleeps three times longer than
@@ -4338,6 +4367,14 @@
stress RCU, they don't participate in the actual
test, hence the "fake".
+ rcutorture.nocbs_nthreads= [KNL]
+ Set number of RCU callback-offload togglers.
+ Zero (the default) disables toggling.
+
+ rcutorture.nocbs_toggle= [KNL]
+ Set the delay in milliseconds between successive
+ callback-offload toggling attempts.
+
rcutorture.nreaders= [KNL]
Set number of RCU readers. The value -1 selects
N-1, where N is the number of CPUs. A value
@@ -4470,6 +4507,13 @@
only normal grace-period primitives. No effect
on CONFIG_TINY_RCU kernels.
+ But note that CONFIG_PREEMPT_RT=y kernels enables
+ this kernel boot parameter, forcibly setting
+ it to the value one, that is, converting any
+ post-boot attempt at an expedited RCU grace
+ period to instead use normal non-expedited
+ grace-period processing.
+
rcupdate.rcu_task_ipi_delay= [KNL]
Set time in jiffies during which RCU tasks will
avoid sending IPIs, starting with the beginning
@@ -4557,6 +4601,12 @@
refscale.verbose= [KNL]
Enable additional printk() statements.
+ refscale.verbose_batched= [KNL]
+ Batch the additional printk() statements. If zero
+ (the default) or negative, print everything. Otherwise,
+ print every Nth verbose statement, where N is the value
+ specified.
+
relax_domain_level=
[KNL, SMP] Set scheduler's default relax_domain_level.
See Documentation/admin-guide/cgroup-v1/cpusets.rst.
@@ -4854,14 +4904,6 @@
last alloc / free. For more information see
Documentation/vm/slub.rst.
- slub_memcg_sysfs= [MM, SLUB]
- Determines whether to enable sysfs directories for
- memory cgroup sub-caches. 1 to enable, 0 to disable.
- The default is determined by CONFIG_SLUB_MEMCG_SYSFS_ON.
- Enabling this can lead to a very high number of debug
- directories and files being created under
- /sys/kernel/slub.
-
slub_max_order= [MM, SLUB]
Determines the maximum allowed order for slabs.
A high setting may cause OOMs due to memory
@@ -5140,6 +5182,12 @@
growing up) the main stack are reserved for no other
mapping. Default value is 256 pages.
+ stack_depot_disable= [KNL]
+ Setting this to true through kernel command line will
+ disable the stack depot thereby saving the static memory
+ consumed by the stack hash table. By default this is set
+ to false.
+
stacktrace [FTRACE]
Enabled the stack tracer on boot up.
@@ -5331,6 +5379,14 @@
are running concurrently, especially on systems
with rotating-rust storage.
+ torture.verbose_sleep_frequency= [KNL]
+ Specifies how many verbose printk()s should be
+ emitted between each sleep. The default of zero
+ disables verbose-printk() sleeping.
+
+ torture.verbose_sleep_duration= [KNL]
+ Duration of each verbose-printk() sleep in jiffies.
+
tp720= [HW,PS2]
tpm_suspend_pcr=[HW,TPM]
@@ -5932,12 +5988,6 @@
default x2apic cluster mode on platforms
supporting x2apic.
- x86_intel_mid_timer= [X86-32,APBT]
- Choose timer option for x86 Intel MID platform.
- Two valid options are apbt timer only and lapic timer
- plus one apbt timer for broadcast timer.
- x86_intel_mid_timer=apbt_only | lapic_and_apbt
-
xen_512gb_limit [KNL,X86-64,XEN]
Restricts the kernel running paravirtualized under Xen
to use only up to 512 GB of RAM. The reason to do so is
diff --git a/Documentation/admin-guide/kernel-per-CPU-kthreads.rst b/Documentation/admin-guide/kernel-per-CPU-kthreads.rst
index dc36aeb65d0a..531f689311f2 100644
--- a/Documentation/admin-guide/kernel-per-CPU-kthreads.rst
+++ b/Documentation/admin-guide/kernel-per-CPU-kthreads.rst
@@ -273,7 +273,7 @@ To reduce its OS jitter, do any of the following:
However, there is an RFC patch from Christoph Lameter
(based on an earlier one from Gilad Ben-Yossef) that
reduces or even eliminates vmstat overhead for some
- workloads at https://lkml.org/lkml/2013/9/4/379.
+ workloads at https://lore.kernel.org/r/00000140e9dfd6bd-40db3d4f-c1be-434f-8132-7820f81bb586-000000@email.amazonses.com.
e. If running on high-end powerpc servers, build with
CONFIG_PPC_RTAS_DAEMON=n. This prevents the RTAS
daemon from running on each CPU every second or so.
diff --git a/Documentation/admin-guide/laptops/thinkpad-acpi.rst b/Documentation/admin-guide/laptops/thinkpad-acpi.rst
index 5fe1ade88c17..91fd6846ce17 100644
--- a/Documentation/admin-guide/laptops/thinkpad-acpi.rst
+++ b/Documentation/admin-guide/laptops/thinkpad-acpi.rst
@@ -51,6 +51,7 @@ detailed description):
- UWB enable and disable
- LCD Shadow (PrivacyGuard) enable and disable
- Lap mode sensor
+ - Setting keyboard language
A compatibility table by model and feature is maintained on the web
site, http://ibm-acpi.sf.net/. I appreciate any success or failure
@@ -1466,6 +1467,30 @@ Sysfs notes
rfkill controller switch "tpacpi_uwb_sw": refer to
Documentation/driver-api/rfkill.rst for details.
+
+Setting keyboard language
+-------------------------
+
+sysfs: keyboard_lang
+
+This feature is used to set keyboard language to ECFW using ASL interface.
+Fewer thinkpads models like T580 , T590 , T15 Gen 1 etc.. has "=", "(',
+")" numeric keys, which are not displaying correctly, when keyboard language
+is other than "english". This is because the default keyboard language in ECFW
+is set as "english". Hence using this sysfs, user can set the correct keyboard
+language to ECFW and then these key's will work correctly.
+
+Example of command to set keyboard language is mentioned below::
+
+ echo jp > /sys/devices/platform/thinkpad_acpi/keyboard_lang
+
+Text corresponding to keyboard layout to be set in sysfs are: be(Belgian),
+cz(Czech), da(Danish), de(German), en(English), es(Spain), et(Estonian),
+fr(French), fr-ch(French(Switzerland)), hu(Hungarian), it(Italy), jp (Japan),
+nl(Dutch), nn(Norway), pl(Polish), pt(portugese), sl(Slovenian), sv(Sweden),
+tr(Turkey)
+
+
Adaptive keyboard
-----------------
diff --git a/Documentation/admin-guide/media/rkisp1.rst b/Documentation/admin-guide/media/rkisp1.rst
index 2267e4fb475e..ccf418713623 100644
--- a/Documentation/admin-guide/media/rkisp1.rst
+++ b/Documentation/admin-guide/media/rkisp1.rst
@@ -13,6 +13,22 @@ This file documents the driver for the Rockchip ISP1 that is part of RK3288
and RK3399 SoCs. The driver is located under drivers/staging/media/rkisp1
and uses the Media-Controller API.
+Revisions
+=========
+
+There exist multiple smaller revisions to this ISP that got introduced in
+later SoCs. Revisions can be found in the enum :c:type:`rkisp1_cif_isp_version`
+in the UAPI and the revision of the ISP inside the running SoC can be read
+in the field hw_revision of struct media_device_info as returned by
+ioctl MEDIA_IOC_DEVICE_INFO.
+
+Versions in use are:
+
+- RKISP1_V10: used at least in rk3288 and rk3399
+- RKISP1_V11: declared in the original vendor code, but not used
+- RKISP1_V12: used at least in rk3326 and px30
+- RKISP1_V13: used at least in rk1808
+
Topology
========
.. _rkisp1_topology_graph:
diff --git a/Documentation/admin-guide/mm/memory-hotplug.rst b/Documentation/admin-guide/mm/memory-hotplug.rst
index 5c4432c96c4b..5307f90738aa 100644
--- a/Documentation/admin-guide/mm/memory-hotplug.rst
+++ b/Documentation/admin-guide/mm/memory-hotplug.rst
@@ -160,16 +160,16 @@ Under each memory block, you can see 5 files:
"online_movable", "online", "offline" command
which will be performed on all sections in the block.
-``phys_device`` read-only: designed to show the name of physical memory
- device. This is not well implemented now.
-``removable`` read-only: contains an integer value indicating
- whether the memory block is removable or not
- removable. A value of 1 indicates that the memory
- block is removable and a value of 0 indicates that
- it is not removable. A memory block is removable only if
- every section in the block is removable.
-``valid_zones`` read-only: designed to show which zones this memory block
- can be onlined to.
+``phys_device`` read-only: legacy interface only ever used on s390x to
+ expose the covered storage increment.
+``removable`` read-only: legacy interface that indicated whether a memory
+ block was likely to be offlineable or not. Newer kernel
+ versions return "1" if and only if the kernel supports
+ memory offlining.
+``valid_zones`` read-only: designed to show by which zone memory provided by
+ a memory block is managed, and to show by which zone memory
+ provided by an offline memory block could be managed when
+ onlining.
The first column shows it`s default zone.
diff --git a/Documentation/admin-guide/perf-security.rst b/Documentation/admin-guide/perf-security.rst
index 904e4eb37f99..34aa334320ca 100644
--- a/Documentation/admin-guide/perf-security.rst
+++ b/Documentation/admin-guide/perf-security.rst
@@ -72,7 +72,7 @@ monitoring and observability operations, thus, bypass *scope* permissions
checks in the kernel. CAP_PERFMON implements the principle of least
privilege [13]_ (POSIX 1003.1e: 2.2.2.39) for performance monitoring and
observability operations in the kernel and provides a secure approach to
-perfomance monitoring and observability in the system.
+performance monitoring and observability in the system.
For backward compatibility reasons the access to perf_events monitoring and
observability operations is also open for CAP_SYS_ADMIN privileged
diff --git a/Documentation/admin-guide/perf/arm-cmn.rst b/Documentation/admin-guide/perf/arm-cmn.rst
index 0e4809346014..796e25b7027b 100644
--- a/Documentation/admin-guide/perf/arm-cmn.rst
+++ b/Documentation/admin-guide/perf/arm-cmn.rst
@@ -17,7 +17,7 @@ PMU events
----------
The PMU driver registers a single PMU device for the whole interconnect,
-see /sys/bus/event_source/devices/arm_cmn. Multi-chip systems may link
+see /sys/bus/event_source/devices/arm_cmn_0. Multi-chip systems may link
more than one CMN together via external CCIX links - in this situation,
each mesh counts its own events entirely independently, and additional
PMU devices will be named arm_cmn_{1..n}.
diff --git a/Documentation/admin-guide/spkguide.txt b/Documentation/admin-guide/spkguide.txt
index 5ff6a0fe87d1..977ab3f5a0a8 100644
--- a/Documentation/admin-guide/spkguide.txt
+++ b/Documentation/admin-guide/spkguide.txt
@@ -1033,7 +1033,9 @@ speakup + keypad 3, you would hear:
The speakup key is depressed, so the name of the key state is speakup.
This part of the message comes from the states collection.
-14.2. Loading Your Own Messages
+14.2. Changing language
+
+14.2.1. Loading Your Own Messages
The files under the i18n subdirectory all follow the same format.
They consist of lines, with one message per line.
@@ -1066,8 +1068,50 @@ echo '1 azul' > /speakup/i18n/colors
The next time that Speakup says message 1 from the colors group, it will
say "azul", rather than "blue."
+14.2.2. Choose a language
+
In the future, translations into various languages will be made available,
-and most users will just load the files necessary for their language.
+and most users will just load the files necessary for their language. So far,
+only French language is available beyond native Canadian English language.
+
+French is only available after you are logged in.
+
+Canadian English is the default language. To toggle another language,
+download the source of Speakup and untar it in your home directory. The
+following command should let you do this:
+
+tar xvjf speakup-<version>.tar.bz2
+
+where <version> is the version number of the application.
+
+Next, change to the newly created directory, then into the tools/ directory, and
+run the script speakup_setlocale. You are asked the language that you want to
+use. Type the number associated to your language (e.g. fr for French) then press
+Enter. Needed files are copied in the i18n directory.
+
+Note: the speakupconf must be installed on your system so that settings are saved.
+Otherwise, you will have an error: your language will be loaded but you will
+have to run the script again every time Speakup restarts.
+See section 16.1. for information about speakupconf.
+
+You will have to repeat these steps for any change of locale, i.e. if you wish
+change the speakup's language or charset (iso-8859-15 ou UTF-8).
+
+If you wish store the settings, note that at your next login, you will need to
+do:
+
+speakup load
+
+Alternatively, you can add the above line to your file
+~/.bashrc or ~/.bash_profile.
+
+If your system administrator ran himself the script, all the users will be able
+to change from English to the language choosed by root and do directly
+speakupconf load (or add this to the ~/.bashrc or
+~/.bash_profile file). If there are several languages to handle, the
+administrator (or every user) will have to run the first steps until speakupconf
+save, choosing the appropriate language, in every user's home directory. Every
+user will then be able to do speakupconf load, Speakup will load his own settings.
14.3. No Support for Non-Western-European Languages
diff --git a/Documentation/admin-guide/syscall-user-dispatch.rst b/Documentation/admin-guide/syscall-user-dispatch.rst
index a380d6515774..60314953c728 100644
--- a/Documentation/admin-guide/syscall-user-dispatch.rst
+++ b/Documentation/admin-guide/syscall-user-dispatch.rst
@@ -70,8 +70,8 @@ trampoline code on the vDSO, that trampoline is never intercepted.
[selector] is a pointer to a char-sized region in the process memory
region, that provides a quick way to enable disable syscall redirection
thread-wide, without the need to invoke the kernel directly. selector
-can be set to PR_SYS_DISPATCH_ON or PR_SYS_DISPATCH_OFF. Any other
-value should terminate the program with a SIGSYS.
+can be set to SYSCALL_DISPATCH_FILTER_ALLOW or SYSCALL_DISPATCH_FILTER_BLOCK.
+Any other value should terminate the program with a SIGSYS.
Security Notes
--------------
diff --git a/Documentation/admin-guide/sysctl/fs.rst b/Documentation/admin-guide/sysctl/fs.rst
index f48277a0a850..2a501c9ddc55 100644
--- a/Documentation/admin-guide/sysctl/fs.rst
+++ b/Documentation/admin-guide/sysctl/fs.rst
@@ -380,5 +380,5 @@ This configuration option sets the maximum number of "watches" that are
allowed for each user.
Each "watch" costs roughly 90 bytes on a 32bit kernel, and roughly 160 bytes
on a 64bit one.
-The current default value for max_user_watches is the 1/32 of the available
-low memory, divided for the "watch" cost in bytes.
+The current default value for max_user_watches is the 1/25 (4%) of the
+available low memory, divided for the "watch" cost in bytes.
diff --git a/Documentation/admin-guide/sysctl/vm.rst b/Documentation/admin-guide/sysctl/vm.rst
index e35a3f2fb006..586cd4b86428 100644
--- a/Documentation/admin-guide/sysctl/vm.rst
+++ b/Documentation/admin-guide/sysctl/vm.rst
@@ -983,11 +983,11 @@ that benefit from having their data cached, zone_reclaim_mode should be
left disabled as the caching effect is likely to be more important than
data locality.
-zone_reclaim may be enabled if it's known that the workload is partitioned
-such that each partition fits within a NUMA node and that accessing remote
-memory would cause a measurable performance reduction. The page allocator
-will then reclaim easily reusable pages (those page cache pages that are
-currently not used) before allocating off node pages.
+Consider enabling one or more zone_reclaim mode bits if it's known that the
+workload is partitioned such that each partition fits within a NUMA node
+and that accessing remote memory would cause a measurable performance
+reduction. The page allocator will take additional actions before
+allocating off node pages.
Allowing zone reclaim to write out pages stops processes that are
writing large amounts of data from dirtying pages on other nodes. Zone
diff --git a/Documentation/admin-guide/thunderbolt.rst b/Documentation/admin-guide/thunderbolt.rst
index 613cb24c76c7..f18e881373c4 100644
--- a/Documentation/admin-guide/thunderbolt.rst
+++ b/Documentation/admin-guide/thunderbolt.rst
@@ -47,6 +47,9 @@ be DMA masters and thus read contents of the host memory without CPU and OS
knowing about it. There are ways to prevent this by setting up an IOMMU but
it is not always available for various reasons.
+Some USB4 systems have a BIOS setting to disable PCIe tunneling. This is
+treated as another security level (nopcie).
+
The security levels are as follows:
none
@@ -77,6 +80,10 @@ The security levels are as follows:
Display Port in a dock. All PCIe links downstream of the dock are
removed.
+ nopcie
+ PCIe tunneling is disabled/forbidden from the BIOS. Available in some
+ USB4 systems.
+
The current security level can be read from
``/sys/bus/thunderbolt/devices/domainX/security`` where ``domainX`` is
the Thunderbolt domain the host controller manages. There is typically
@@ -153,6 +160,22 @@ If the user still wants to connect the device they can either approve
the device without a key or write a new key and write 1 to the
``authorized`` file to get the new key stored on the device NVM.
+De-authorizing devices
+----------------------
+It is possible to de-authorize devices by writing ``0`` to their
+``authorized`` attribute. This requires support from the connection
+manager implementation and can be checked by reading domain
+``deauthorization`` attribute. If it reads ``1`` then the feature is
+supported.
+
+When a device is de-authorized the PCIe tunnel from the parent device
+PCIe downstream (or root) port to the device PCIe upstream port is torn
+down. This is essentially the same thing as PCIe hot-remove and the PCIe
+toplogy in question will not be accessible anymore until the device is
+authorized again. If there is storage such as NVMe or similar involved,
+there is a risk for data loss if the filesystem on that storage is not
+properly shut down. You have been warned!
+
DMA protection utilizing IOMMU
------------------------------
Recent systems from 2018 and forward with Thunderbolt ports may natively
diff --git a/Documentation/admin-guide/xfs.rst b/Documentation/admin-guide/xfs.rst
index 86de8a1ad91c..5422407a96d7 100644
--- a/Documentation/admin-guide/xfs.rst
+++ b/Documentation/admin-guide/xfs.rst
@@ -284,6 +284,9 @@ The following sysctls are available for the XFS filesystem:
removes unused preallocation from clean inodes and releases
the unused space back to the free pool.
+ fs.xfs.speculative_cow_prealloc_lifetime
+ This is an alias for speculative_prealloc_lifetime.
+
fs.xfs.error_level (Min: 0 Default: 3 Max: 11)
A volume knob for error reporting when internal errors occur.
This will generate detailed messages & backtraces for filesystem
@@ -356,12 +359,13 @@ The following sysctls are available for the XFS filesystem:
Deprecated Sysctls
==================
-=========================== ================
- Name Removal Schedule
-=========================== ================
-fs.xfs.irix_sgid_inherit September 2025
-fs.xfs.irix_symlink_mode September 2025
-=========================== ================
+=========================================== ================
+ Name Removal Schedule
+=========================================== ================
+fs.xfs.irix_sgid_inherit September 2025
+fs.xfs.irix_symlink_mode September 2025
+fs.xfs.speculative_cow_prealloc_lifetime September 2025
+=========================================== ================
Removed Sysctls
@@ -495,3 +499,45 @@ the class and error context. For example, the default values for
"metadata/ENODEV" are "0" rather than "-1" so that this error handler defaults
to "fail immediately" behaviour. This is done because ENODEV is a fatal,
unrecoverable error no matter how many times the metadata IO is retried.
+
+Workqueue Concurrency
+=====================
+
+XFS uses kernel workqueues to parallelize metadata update processes. This
+enables it to take advantage of storage hardware that can service many IO
+operations simultaneously. This interface exposes internal implementation
+details of XFS, and as such is explicitly not part of any userspace API/ABI
+guarantee the kernel may give userspace. These are undocumented features of
+the generic workqueue implementation XFS uses for concurrency, and they are
+provided here purely for diagnostic and tuning purposes and may change at any
+time in the future.
+
+The control knobs for a filesystem's workqueues are organized by task at hand
+and the short name of the data device. They all can be found in:
+
+ /sys/bus/workqueue/devices/${task}!${device}
+
+================ ===========
+ Task Description
+================ ===========
+ xfs_iwalk-$pid Inode scans of the entire filesystem. Currently limited to
+ mount time quotacheck.
+ xfs-blockgc Background garbage collection of disk space that have been
+ speculatively allocated beyond EOF or for staging copy on
+ write operations.
+================ ===========
+
+For example, the knobs for the quotacheck workqueue for /dev/nvme0n1 would be
+found in /sys/bus/workqueue/devices/xfs_iwalk-1111!nvme0n1/.
+
+The interesting knobs for XFS workqueues are as follows:
+
+============ ===========
+ Knob Description
+============ ===========
+ max_active Maximum number of background threads that can be started to
+ run the work.
+ cpumask CPUs upon which the threads are allowed to run.
+ nice Relative priority of scheduling the threads. These are the
+ same nice levels that can be applied to userspace processes.
+============ ===========
diff --git a/Documentation/arm/booting.rst b/Documentation/arm/booting.rst
index a2263451dc2c..5974e37b3d20 100644
--- a/Documentation/arm/booting.rst
+++ b/Documentation/arm/booting.rst
@@ -128,7 +128,7 @@ it. The recommended placement is in the first 16KiB of RAM.
The boot loader must load a device tree image (dtb) into system ram
at a 64bit aligned address and initialize it with the boot data. The
-dtb format is documented in Documentation/devicetree/booting-without-of.rst.
+dtb format is documented at https://www.devicetree.org/specifications/.
The kernel will look for the dtb magic value of 0xd00dfeed at the dtb
physical address to determine if a dtb has been passed instead of a
tagged list.
diff --git a/Documentation/arm/index.rst b/Documentation/arm/index.rst
index a2e9e1bba7b9..b4bea32472b6 100644
--- a/Documentation/arm/index.rst
+++ b/Documentation/arm/index.rst
@@ -33,7 +33,7 @@ SoC-specific documents
ixp4xx
- marvel
+ marvell
microchip
netwinder
diff --git a/Documentation/arm/marvel.rst b/Documentation/arm/marvell.rst
index 16ab2eb085b8..94cd73383594 100644
--- a/Documentation/arm/marvel.rst
+++ b/Documentation/arm/marvell.rst
@@ -127,7 +127,7 @@ EBU Armada family
- 88F6828 Armada 388
- Product infos: http://www.marvell.com/embedded-processors/armada-38x/
- - Functional Spec: https://marvellcorp.wufoo.com/forms/marvell-armada-38x-functional-specifications/
+ - Functional Spec: http://www.marvell.com/content/dam/marvell/en/public-collateral/embedded-processors/marvell-embedded-processors-armada-38x-functional-specifications-2015-11.pdf
Core:
ARM Cortex-A9
@@ -183,7 +183,10 @@ EBU Armada family ARMv8
http://www.marvell.com/embedded-processors/armada-3700/
Product Brief:
- http://www.marvell.com/embedded-processors/assets/PB-88F3700-FNL.pdf
+ http://www.marvell.com/content/dam/marvell/en/public-collateral/embedded-processors/marvell-embedded-processors-armada-37xx-product-brief-2016-01.pdf
+
+ Hardware Spec:
+ http://www.marvell.com/content/dam/marvell/en/public-collateral/embedded-processors/marvell-embedded-processors-armada-37xx-hardware-specifications-2019-09.pdf
Device tree files:
arch/arm64/boot/dts/marvell/armada-37*
diff --git a/Documentation/asm-annotations.rst b/Documentation/asm-annotations.rst
index 32ea57483378..76424e0431f4 100644
--- a/Documentation/asm-annotations.rst
+++ b/Documentation/asm-annotations.rst
@@ -100,6 +100,11 @@ Instruction Macros
~~~~~~~~~~~~~~~~~~
This section covers ``SYM_FUNC_*`` and ``SYM_CODE_*`` enumerated above.
+``objtool`` requires that all code must be contained in an ELF symbol. Symbol
+names that have a ``.L`` prefix do not emit symbol table entries. ``.L``
+prefixed symbols can be used within a code region, but should be avoided for
+denoting a range of code via ``SYM_*_START/END`` annotations.
+
* ``SYM_FUNC_START`` and ``SYM_FUNC_START_LOCAL`` are supposed to be **the
most frequent markings**. They are used for functions with standard calling
conventions -- global and local. Like in C, they both align the functions to
diff --git a/Documentation/block/bfq-iosched.rst b/Documentation/block/bfq-iosched.rst
index 19d4d1570cee..66c5a4e54130 100644
--- a/Documentation/block/bfq-iosched.rst
+++ b/Documentation/block/bfq-iosched.rst
@@ -430,13 +430,13 @@ fifo_expire_async
-----------------
This parameter is used to set the timeout of asynchronous requests. Default
-value of this is 248ms.
+value of this is 250ms.
fifo_expire_sync
----------------
This parameter is used to set the timeout of synchronous requests. Default
-value of this is 124ms. In case to favor synchronous requests over asynchronous
+value of this is 125ms. In case to favor synchronous requests over asynchronous
one, this value should be decreased relative to fifo_expire_async.
low_latency
diff --git a/Documentation/block/biovecs.rst b/Documentation/block/biovecs.rst
index 36771a131b56..ddb867e0185b 100644
--- a/Documentation/block/biovecs.rst
+++ b/Documentation/block/biovecs.rst
@@ -40,6 +40,8 @@ normal code doesn't have to deal with bi_bvec_done.
There is a lower level advance function - bvec_iter_advance() - which takes
a pointer to a biovec, not a bio; this is used by the bio integrity code.
+As of 5.12 bvec segments with zero bv_len are not supported.
+
What's all this get us?
=======================
diff --git a/Documentation/block/inline-encryption.rst b/Documentation/block/inline-encryption.rst
index e75151e467d3..7f9b40d6b416 100644
--- a/Documentation/block/inline-encryption.rst
+++ b/Documentation/block/inline-encryption.rst
@@ -182,8 +182,9 @@ API presented to device drivers
A :c:type:``struct blk_keyslot_manager`` should be set up by device drivers in
the ``request_queue`` of the device. The device driver needs to call
-``blk_ksm_init`` on the ``blk_keyslot_manager``, which specifying the number of
-keyslots supported by the hardware.
+``blk_ksm_init`` (or its resource-managed variant ``devm_blk_ksm_init``) on the
+``blk_keyslot_manager``, while specifying the number of keyslots supported by
+the hardware.
The device driver also needs to tell the KSM how to actually manipulate the
IE hardware in the device to do things like programming the crypto key into
@@ -202,10 +203,9 @@ needs each and every of its keyslots to be reprogrammed with the key it
"should have" at the point in time when the function is called. This is useful
e.g. if a device loses all its keys on runtime power down/up.
-``blk_ksm_destroy`` should be called to free up all resources used by a keyslot
-manager upon ``blk_ksm_init``, once the ``blk_keyslot_manager`` is no longer
-needed.
-
+If the driver used ``blk_ksm_init`` instead of ``devm_blk_ksm_init``, then
+``blk_ksm_destroy`` should be called to free up all resources used by a
+``blk_keyslot_manager`` once it is no longer needed.
Layered Devices
===============
diff --git a/Documentation/block/queue-sysfs.rst b/Documentation/block/queue-sysfs.rst
index 2638d3446b79..4dc7f0d499a8 100644
--- a/Documentation/block/queue-sysfs.rst
+++ b/Documentation/block/queue-sysfs.rst
@@ -261,6 +261,12 @@ For block drivers that support REQ_OP_WRITE_ZEROES, the maximum number of
bytes that can be zeroed at once. The value 0 means that REQ_OP_WRITE_ZEROES
is not supported.
+zone_append_max_bytes (RO)
+--------------------------
+This is the maximum number of bytes that can be written to a sequential
+zone of a zoned block device using a zone append write operation
+(REQ_OP_ZONE_APPEND). This value is always 0 for regular block devices.
+
zoned (RO)
----------
This indicates if the device is a zoned block device and the zone model of the
@@ -273,4 +279,11 @@ devices are described in the ZBC (Zoned Block Commands) and ZAC
do not support zone commands, they will be treated as regular block devices
and zoned will report "none".
+zone_write_granularity (RO)
+---------------------------
+This indicates the alignment constraint, in bytes, for write operations in
+sequential zones of zoned block devices (devices with a zoned attributed
+that reports "host-managed" or "host-aware"). This value is always 0 for
+regular block devices.
+
Jens Axboe <jens.axboe@oracle.com>, February 2009
diff --git a/Documentation/bpf/bpf_design_QA.rst b/Documentation/bpf/bpf_design_QA.rst
index 2df7b067ab93..0e15f9b05c9d 100644
--- a/Documentation/bpf/bpf_design_QA.rst
+++ b/Documentation/bpf/bpf_design_QA.rst
@@ -208,6 +208,12 @@ data structures and compile with kernel internal headers. Both of these
kernel internals are subject to change and can break with newer kernels
such that the program needs to be adapted accordingly.
+Q: Are tracepoints part of the stable ABI?
+------------------------------------------
+A: NO. Tracepoints are tied to internal implementation details hence they are
+subject to change and can break with newer kernels. BPF programs need to change
+accordingly when this happens.
+
Q: How much stack space a BPF program uses?
-------------------------------------------
A: Currently all program types are limited to 512 bytes of stack
diff --git a/Documentation/bpf/bpf_devel_QA.rst b/Documentation/bpf/bpf_devel_QA.rst
index 5b613d2a5f1a..2ed89abbf9a4 100644
--- a/Documentation/bpf/bpf_devel_QA.rst
+++ b/Documentation/bpf/bpf_devel_QA.rst
@@ -501,16 +501,19 @@ All LLVM releases can be found at: http://releases.llvm.org/
Q: Got it, so how do I build LLVM manually anyway?
--------------------------------------------------
-A: You need cmake and gcc-c++ as build requisites for LLVM. Once you have
-that set up, proceed with building the latest LLVM and clang version
+A: We recommend that developers who want the fastest incremental builds
+use the Ninja build system, you can find it in your system's package
+manager, usually the package is ninja or ninja-build.
+
+You need ninja, cmake and gcc-c++ as build requisites for LLVM. Once you
+have that set up, proceed with building the latest LLVM and clang version
from the git repositories::
$ git clone https://github.com/llvm/llvm-project.git
- $ mkdir -p llvm-project/llvm/build/install
+ $ mkdir -p llvm-project/llvm/build
$ cd llvm-project/llvm/build
$ cmake .. -G "Ninja" -DLLVM_TARGETS_TO_BUILD="BPF;X86" \
-DLLVM_ENABLE_PROJECTS="clang" \
- -DBUILD_SHARED_LIBS=OFF \
-DCMAKE_BUILD_TYPE=Release \
-DLLVM_BUILD_RUNTIME=OFF
$ ninja
diff --git a/Documentation/conf.py b/Documentation/conf.py
index 6a767294887e..fd65168c10f8 100644
--- a/Documentation/conf.py
+++ b/Documentation/conf.py
@@ -31,7 +31,7 @@ from load_config import loadConfig
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
-needs_sphinx = '1.3'
+needs_sphinx = '1.7'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
@@ -49,8 +49,7 @@ extensions = ['kerneldoc', 'rstFlatTable', 'kernel_include',
if major >= 3:
sys.stderr.write('''WARNING: The kernel documentation build process
support for Sphinx v3.0 and above is brand new. Be prepared for
- possible issues in the generated output.
- ''')
+ possible issues in the generated output.\n''')
if (major > 3) or (minor > 0 or patch >= 2):
# Sphinx c function parser is more pedantic with regards to type
# checking. Due to that, having macros at c:function cause problems.
@@ -112,19 +111,12 @@ if major >= 3:
else:
extensions.append('cdomain')
- if major == 1 and minor < 7:
- sys.stderr.write('WARNING: Sphinx 1.7 or greater will be required as of '
- 'the 5.12 release\n')
# Ensure that autosectionlabel will produce unique names
autosectionlabel_prefix_document = True
autosectionlabel_maxdepth = 2
-# The name of the math extension changed on Sphinx 1.4
-if (major == 1 and minor > 3) or (major > 1):
- extensions.append("sphinx.ext.imgmath")
-else:
- extensions.append("sphinx.ext.pngmath")
+extensions.append("sphinx.ext.imgmath")
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
@@ -375,71 +367,9 @@ if cjk_cmd.find("Noto Sans CJK SC") >= 0:
'''
# Fix reference escape troubles with Sphinx 1.4.x
-if major == 1 and minor > 3:
+if major == 1:
latex_elements['preamble'] += '\\renewcommand*{\\DUrole}[2]{ #2 }\n'
-if major == 1 and minor <= 4:
- latex_elements['preamble'] += '\\usepackage[margin=0.5in, top=1in, bottom=1in]{geometry}'
-elif major == 1 and (minor > 5 or (minor == 5 and patch >= 3)):
- latex_elements['sphinxsetup'] = 'hmargin=0.5in, vmargin=1in'
- latex_elements['preamble'] += '\\fvset{fontsize=auto}\n'
-
-# Customize notice background colors on Sphinx < 1.6:
-if major == 1 and minor < 6:
- latex_elements['preamble'] += '''
- \\usepackage{ifthen}
-
- % Put notes in color and let them be inside a table
- \\definecolor{NoteColor}{RGB}{204,255,255}
- \\definecolor{WarningColor}{RGB}{255,204,204}
- \\definecolor{AttentionColor}{RGB}{255,255,204}
- \\definecolor{ImportantColor}{RGB}{192,255,204}
- \\definecolor{OtherColor}{RGB}{204,204,204}
- \\newlength{\\mynoticelength}
- \\makeatletter\\newenvironment{coloredbox}[1]{%
- \\setlength{\\fboxrule}{1pt}
- \\setlength{\\fboxsep}{7pt}
- \\setlength{\\mynoticelength}{\\linewidth}
- \\addtolength{\\mynoticelength}{-2\\fboxsep}
- \\addtolength{\\mynoticelength}{-2\\fboxrule}
- \\begin{lrbox}{\\@tempboxa}\\begin{minipage}{\\mynoticelength}}{\\end{minipage}\\end{lrbox}%
- \\ifthenelse%
- {\\equal{\\py@noticetype}{note}}%
- {\\colorbox{NoteColor}{\\usebox{\\@tempboxa}}}%
- {%
- \\ifthenelse%
- {\\equal{\\py@noticetype}{warning}}%
- {\\colorbox{WarningColor}{\\usebox{\\@tempboxa}}}%
- {%
- \\ifthenelse%
- {\\equal{\\py@noticetype}{attention}}%
- {\\colorbox{AttentionColor}{\\usebox{\\@tempboxa}}}%
- {%
- \\ifthenelse%
- {\\equal{\\py@noticetype}{important}}%
- {\\colorbox{ImportantColor}{\\usebox{\\@tempboxa}}}%
- {\\colorbox{OtherColor}{\\usebox{\\@tempboxa}}}%
- }%
- }%
- }%
- }\\makeatother
-
- \\makeatletter
- \\renewenvironment{notice}[2]{%
- \\def\\py@noticetype{#1}
- \\begin{coloredbox}{#1}
- \\bf\\it
- \\par\\strong{#2}
- \\csname py@noticestart@#1\\endcsname
- }
- {
- \\csname py@noticeend@\\py@noticetype\\endcsname
- \\end{coloredbox}
- }
- \\makeatother
-
- '''
-
# With Sphinx 1.6, it is possible to change the Bg color directly
# by using:
# \definecolor{sphinxnoteBgColor}{RGB}{204,255,255}
diff --git a/Documentation/core-api/dma-api.rst b/Documentation/core-api/dma-api.rst
index 75cb757bbff0..e6d23f117308 100644
--- a/Documentation/core-api/dma-api.rst
+++ b/Documentation/core-api/dma-api.rst
@@ -528,16 +528,14 @@ an I/O device, you should not be using this part of the API.
::
- void *
- dma_alloc_noncoherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, enum dma_data_direction dir,
- gfp_t gfp)
+ struct page *
+ dma_alloc_pages(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ enum dma_data_direction dir, gfp_t gfp)
-This routine allocates a region of <size> bytes of consistent memory. It
-returns a pointer to the allocated region (in the processor's virtual address
-space) or NULL if the allocation failed. The returned memory may or may not
-be in the kernel direct mapping. Drivers must not call virt_to_page on
-the returned memory region.
+This routine allocates a region of <size> bytes of non-coherent memory. It
+returns a pointer to first struct page for the region, or NULL if the
+allocation failed. The resulting struct page can be used for everything a
+struct page is suitable for.
It also returns a <dma_handle> which may be cast to an unsigned integer the
same width as the bus and given to the device as the DMA address base of
@@ -558,51 +556,33 @@ reused.
::
void
- dma_free_noncoherent(struct device *dev, size_t size, void *cpu_addr,
+ dma_free_pages(struct device *dev, size_t size, struct page *page,
dma_addr_t dma_handle, enum dma_data_direction dir)
-Free a region of memory previously allocated using dma_alloc_noncoherent().
-dev, size and dma_handle and dir must all be the same as those passed into
-dma_alloc_noncoherent(). cpu_addr must be the virtual address returned by
-dma_alloc_noncoherent().
+Free a region of memory previously allocated using dma_alloc_pages().
+dev, size, dma_handle and dir must all be the same as those passed into
+dma_alloc_pages(). page must be the pointer returned by dma_alloc_pages().
::
- struct page *
- dma_alloc_pages(struct device *dev, size_t size, dma_addr_t *dma_handle,
- enum dma_data_direction dir, gfp_t gfp)
-
-This routine allocates a region of <size> bytes of non-coherent memory. It
-returns a pointer to first struct page for the region, or NULL if the
-allocation failed. The resulting struct page can be used for everything a
-struct page is suitable for.
-
-It also returns a <dma_handle> which may be cast to an unsigned integer the
-same width as the bus and given to the device as the DMA address base of
-the region.
-
-The dir parameter specified if data is read and/or written by the device,
-see dma_map_single() for details.
-
-The gfp parameter allows the caller to specify the ``GFP_`` flags (see
-kmalloc()) for the allocation, but rejects flags used to specify a memory
-zone such as GFP_DMA or GFP_HIGHMEM.
+ void *
+ dma_alloc_noncoherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, enum dma_data_direction dir,
+ gfp_t gfp)
-Before giving the memory to the device, dma_sync_single_for_device() needs
-to be called, and before reading memory written by the device,
-dma_sync_single_for_cpu(), just like for streaming DMA mappings that are
-reused.
+This routine is a convenient wrapper around dma_alloc_pages that returns the
+kernel virtual address for the allocated memory instead of the page structure.
::
void
- dma_free_pages(struct device *dev, size_t size, struct page *page,
+ dma_free_noncoherent(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_handle, enum dma_data_direction dir)
-Free a region of memory previously allocated using dma_alloc_pages().
-dev, size and dma_handle and dir must all be the same as those passed into
-dma_alloc_noncoherent(). page must be the pointer returned by
-dma_alloc_pages().
+Free a region of memory previously allocated using dma_alloc_noncoherent().
+dev, size, dma_handle and dir must all be the same as those passed into
+dma_alloc_noncoherent(). cpu_addr must be the virtual address returned by
+dma_alloc_noncoherent().
::
diff --git a/Documentation/core-api/mm-api.rst b/Documentation/core-api/mm-api.rst
index 2adffb3f7914..201b5423303b 100644
--- a/Documentation/core-api/mm-api.rst
+++ b/Documentation/core-api/mm-api.rst
@@ -19,11 +19,8 @@ User Space Memory Access
Memory Allocation Controls
==========================
-Functions which need to allocate memory often use GFP flags to express
-how that memory should be allocated. The GFP acronym stands for "get
-free pages", the underlying memory allocation function. Not every GFP
-flag is allowed to every function which may allocate memory. Most
-users will want to use a plain ``GFP_KERNEL``.
+.. kernel-doc:: include/linux/gfp.h
+ :internal:
.. kernel-doc:: include/linux/gfp.h
:doc: Page mobility and placement hints
diff --git a/Documentation/crypto/api-skcipher.rst b/Documentation/crypto/api-skcipher.rst
index 1aaf8985894b..04d6cc5357c8 100644
--- a/Documentation/crypto/api-skcipher.rst
+++ b/Documentation/crypto/api-skcipher.rst
@@ -28,8 +28,8 @@ Symmetric Key Cipher Request Handle
Single Block Cipher API
-----------------------
-.. kernel-doc:: include/linux/crypto.h
+.. kernel-doc:: include/crypto/internal/cipher.h
:doc: Single Block Cipher API
-.. kernel-doc:: include/linux/crypto.h
+.. kernel-doc:: include/crypto/internal/cipher.h
:functions: crypto_alloc_cipher crypto_free_cipher crypto_has_cipher crypto_cipher_blocksize crypto_cipher_setkey crypto_cipher_encrypt_one crypto_cipher_decrypt_one
diff --git a/Documentation/dev-tools/index.rst b/Documentation/dev-tools/index.rst
index f7809c7b1ba9..1b1cf4f5c9d9 100644
--- a/Documentation/dev-tools/index.rst
+++ b/Documentation/dev-tools/index.rst
@@ -22,6 +22,7 @@ whole; patches welcome!
ubsan
kmemleak
kcsan
+ kfence
gdb-kernel-debugging
kgdb
kselftest
diff --git a/Documentation/dev-tools/kasan.rst b/Documentation/dev-tools/kasan.rst
index 1651d961f06a..ddf4239a5890 100644
--- a/Documentation/dev-tools/kasan.rst
+++ b/Documentation/dev-tools/kasan.rst
@@ -147,27 +147,26 @@ negative values to distinguish between different kinds of inaccessible memory
like redzones or freed memory (see mm/kasan/kasan.h).
In the report above the arrows point to the shadow byte 03, which means that
-the accessed address is partially accessible.
-
-For tag-based KASAN this last report section shows the memory tags around the
-accessed address (see `Implementation details`_ section).
+the accessed address is partially accessible. For tag-based KASAN modes this
+last report section shows the memory tags around the accessed address
+(see the `Implementation details`_ section).
Boot parameters
~~~~~~~~~~~~~~~
-Hardware tag-based KASAN mode (see the section about different mode below) is
-intended for use in production as a security mitigation. Therefore it supports
+Hardware tag-based KASAN mode (see the section about various modes below) is
+intended for use in production as a security mitigation. Therefore, it supports
boot parameters that allow to disable KASAN competely or otherwise control
particular KASAN features.
- ``kasan=off`` or ``=on`` controls whether KASAN is enabled (default: ``on``).
- ``kasan.stacktrace=off`` or ``=on`` disables or enables alloc and free stack
- traces collection (default: ``on`` for ``CONFIG_DEBUG_KERNEL=y``, otherwise
- ``off``).
+ traces collection (default: ``on``).
- ``kasan.fault=report`` or ``=panic`` controls whether to only print a KASAN
- report or also panic the kernel (default: ``report``).
+ report or also panic the kernel (default: ``report``). Note, that tag
+ checking gets disabled after the first reported bug.
For developers
~~~~~~~~~~~~~~
@@ -290,6 +289,16 @@ reserved to tag freed memory regions.
Hardware tag-based KASAN currently only supports tagging of
kmem_cache_alloc/kmalloc and page_alloc memory.
+If the hardware doesn't support MTE (pre ARMv8.5), hardware tag-based KASAN
+won't be enabled. In this case all boot parameters are ignored.
+
+Note, that enabling CONFIG_KASAN_HW_TAGS always results in in-kernel TBI being
+enabled. Even when kasan.mode=off is provided, or when the hardware doesn't
+support MTE (but supports TBI).
+
+Hardware tag-based KASAN only reports the first found bug. After that MTE tag
+checking gets disabled.
+
What memory accesses are sanitised by KASAN?
--------------------------------------------
@@ -353,17 +362,17 @@ unmapped. This will require changes in arch-specific code.
This allows ``VMAP_STACK`` support on x86, and can simplify support of
architectures that do not have a fixed module region.
-CONFIG_KASAN_KUNIT_TEST & CONFIG_TEST_KASAN_MODULE
---------------------------------------------------
+CONFIG_KASAN_KUNIT_TEST and CONFIG_KASAN_MODULE_TEST
+----------------------------------------------------
-KASAN tests consist on two parts:
+KASAN tests consist of two parts:
1. Tests that are integrated with the KUnit Test Framework. Enabled with
``CONFIG_KASAN_KUNIT_TEST``. These tests can be run and partially verified
automatically in a few different ways, see the instructions below.
2. Tests that are currently incompatible with KUnit. Enabled with
-``CONFIG_TEST_KASAN_MODULE`` and can only be run as a module. These tests can
+``CONFIG_KASAN_MODULE_TEST`` and can only be run as a module. These tests can
only be verified manually, by loading the kernel module and inspecting the
kernel log for KASAN reports.
diff --git a/Documentation/dev-tools/kfence.rst b/Documentation/dev-tools/kfence.rst
new file mode 100644
index 000000000000..fdf04e741ea5
--- /dev/null
+++ b/Documentation/dev-tools/kfence.rst
@@ -0,0 +1,298 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. Copyright (C) 2020, Google LLC.
+
+Kernel Electric-Fence (KFENCE)
+==============================
+
+Kernel Electric-Fence (KFENCE) is a low-overhead sampling-based memory safety
+error detector. KFENCE detects heap out-of-bounds access, use-after-free, and
+invalid-free errors.
+
+KFENCE is designed to be enabled in production kernels, and has near zero
+performance overhead. Compared to KASAN, KFENCE trades performance for
+precision. The main motivation behind KFENCE's design, is that with enough
+total uptime KFENCE will detect bugs in code paths not typically exercised by
+non-production test workloads. One way to quickly achieve a large enough total
+uptime is when the tool is deployed across a large fleet of machines.
+
+Usage
+-----
+
+To enable KFENCE, configure the kernel with::
+
+ CONFIG_KFENCE=y
+
+To build a kernel with KFENCE support, but disabled by default (to enable, set
+``kfence.sample_interval`` to non-zero value), configure the kernel with::
+
+ CONFIG_KFENCE=y
+ CONFIG_KFENCE_SAMPLE_INTERVAL=0
+
+KFENCE provides several other configuration options to customize behaviour (see
+the respective help text in ``lib/Kconfig.kfence`` for more info).
+
+Tuning performance
+~~~~~~~~~~~~~~~~~~
+
+The most important parameter is KFENCE's sample interval, which can be set via
+the kernel boot parameter ``kfence.sample_interval`` in milliseconds. The
+sample interval determines the frequency with which heap allocations will be
+guarded by KFENCE. The default is configurable via the Kconfig option
+``CONFIG_KFENCE_SAMPLE_INTERVAL``. Setting ``kfence.sample_interval=0``
+disables KFENCE.
+
+The KFENCE memory pool is of fixed size, and if the pool is exhausted, no
+further KFENCE allocations occur. With ``CONFIG_KFENCE_NUM_OBJECTS`` (default
+255), the number of available guarded objects can be controlled. Each object
+requires 2 pages, one for the object itself and the other one used as a guard
+page; object pages are interleaved with guard pages, and every object page is
+therefore surrounded by two guard pages.
+
+The total memory dedicated to the KFENCE memory pool can be computed as::
+
+ ( #objects + 1 ) * 2 * PAGE_SIZE
+
+Using the default config, and assuming a page size of 4 KiB, results in
+dedicating 2 MiB to the KFENCE memory pool.
+
+Note: On architectures that support huge pages, KFENCE will ensure that the
+pool is using pages of size ``PAGE_SIZE``. This will result in additional page
+tables being allocated.
+
+Error reports
+~~~~~~~~~~~~~
+
+A typical out-of-bounds access looks like this::
+
+ ==================================================================
+ BUG: KFENCE: out-of-bounds read in test_out_of_bounds_read+0xa3/0x22b
+
+ Out-of-bounds read at 0xffffffffb672efff (1B left of kfence-#17):
+ test_out_of_bounds_read+0xa3/0x22b
+ kunit_try_run_case+0x51/0x85
+ kunit_generic_run_threadfn_adapter+0x16/0x30
+ kthread+0x137/0x160
+ ret_from_fork+0x22/0x30
+
+ kfence-#17 [0xffffffffb672f000-0xffffffffb672f01f, size=32, cache=kmalloc-32] allocated by task 507:
+ test_alloc+0xf3/0x25b
+ test_out_of_bounds_read+0x98/0x22b
+ kunit_try_run_case+0x51/0x85
+ kunit_generic_run_threadfn_adapter+0x16/0x30
+ kthread+0x137/0x160
+ ret_from_fork+0x22/0x30
+
+ CPU: 4 PID: 107 Comm: kunit_try_catch Not tainted 5.8.0-rc6+ #7
+ Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.13.0-1 04/01/2014
+ ==================================================================
+
+The header of the report provides a short summary of the function involved in
+the access. It is followed by more detailed information about the access and
+its origin. Note that, real kernel addresses are only shown when using the
+kernel command line option ``no_hash_pointers``.
+
+Use-after-free accesses are reported as::
+
+ ==================================================================
+ BUG: KFENCE: use-after-free read in test_use_after_free_read+0xb3/0x143
+
+ Use-after-free read at 0xffffffffb673dfe0 (in kfence-#24):
+ test_use_after_free_read+0xb3/0x143
+ kunit_try_run_case+0x51/0x85
+ kunit_generic_run_threadfn_adapter+0x16/0x30
+ kthread+0x137/0x160
+ ret_from_fork+0x22/0x30
+
+ kfence-#24 [0xffffffffb673dfe0-0xffffffffb673dfff, size=32, cache=kmalloc-32] allocated by task 507:
+ test_alloc+0xf3/0x25b
+ test_use_after_free_read+0x76/0x143
+ kunit_try_run_case+0x51/0x85
+ kunit_generic_run_threadfn_adapter+0x16/0x30
+ kthread+0x137/0x160
+ ret_from_fork+0x22/0x30
+
+ freed by task 507:
+ test_use_after_free_read+0xa8/0x143
+ kunit_try_run_case+0x51/0x85
+ kunit_generic_run_threadfn_adapter+0x16/0x30
+ kthread+0x137/0x160
+ ret_from_fork+0x22/0x30
+
+ CPU: 4 PID: 109 Comm: kunit_try_catch Tainted: G W 5.8.0-rc6+ #7
+ Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.13.0-1 04/01/2014
+ ==================================================================
+
+KFENCE also reports on invalid frees, such as double-frees::
+
+ ==================================================================
+ BUG: KFENCE: invalid free in test_double_free+0xdc/0x171
+
+ Invalid free of 0xffffffffb6741000:
+ test_double_free+0xdc/0x171
+ kunit_try_run_case+0x51/0x85
+ kunit_generic_run_threadfn_adapter+0x16/0x30
+ kthread+0x137/0x160
+ ret_from_fork+0x22/0x30
+
+ kfence-#26 [0xffffffffb6741000-0xffffffffb674101f, size=32, cache=kmalloc-32] allocated by task 507:
+ test_alloc+0xf3/0x25b
+ test_double_free+0x76/0x171
+ kunit_try_run_case+0x51/0x85
+ kunit_generic_run_threadfn_adapter+0x16/0x30
+ kthread+0x137/0x160
+ ret_from_fork+0x22/0x30
+
+ freed by task 507:
+ test_double_free+0xa8/0x171
+ kunit_try_run_case+0x51/0x85
+ kunit_generic_run_threadfn_adapter+0x16/0x30
+ kthread+0x137/0x160
+ ret_from_fork+0x22/0x30
+
+ CPU: 4 PID: 111 Comm: kunit_try_catch Tainted: G W 5.8.0-rc6+ #7
+ Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.13.0-1 04/01/2014
+ ==================================================================
+
+KFENCE also uses pattern-based redzones on the other side of an object's guard
+page, to detect out-of-bounds writes on the unprotected side of the object.
+These are reported on frees::
+
+ ==================================================================
+ BUG: KFENCE: memory corruption in test_kmalloc_aligned_oob_write+0xef/0x184
+
+ Corrupted memory at 0xffffffffb6797ff9 [ 0xac . . . . . . ] (in kfence-#69):
+ test_kmalloc_aligned_oob_write+0xef/0x184
+ kunit_try_run_case+0x51/0x85
+ kunit_generic_run_threadfn_adapter+0x16/0x30
+ kthread+0x137/0x160
+ ret_from_fork+0x22/0x30
+
+ kfence-#69 [0xffffffffb6797fb0-0xffffffffb6797ff8, size=73, cache=kmalloc-96] allocated by task 507:
+ test_alloc+0xf3/0x25b
+ test_kmalloc_aligned_oob_write+0x57/0x184
+ kunit_try_run_case+0x51/0x85
+ kunit_generic_run_threadfn_adapter+0x16/0x30
+ kthread+0x137/0x160
+ ret_from_fork+0x22/0x30
+
+ CPU: 4 PID: 120 Comm: kunit_try_catch Tainted: G W 5.8.0-rc6+ #7
+ Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.13.0-1 04/01/2014
+ ==================================================================
+
+For such errors, the address where the corruption occurred as well as the
+invalidly written bytes (offset from the address) are shown; in this
+representation, '.' denote untouched bytes. In the example above ``0xac`` is
+the value written to the invalid address at offset 0, and the remaining '.'
+denote that no following bytes have been touched. Note that, real values are
+only shown if the kernel was booted with ``no_hash_pointers``; to avoid
+information disclosure otherwise, '!' is used instead to denote invalidly
+written bytes.
+
+And finally, KFENCE may also report on invalid accesses to any protected page
+where it was not possible to determine an associated object, e.g. if adjacent
+object pages had not yet been allocated::
+
+ ==================================================================
+ BUG: KFENCE: invalid read in test_invalid_access+0x26/0xe0
+
+ Invalid read at 0xffffffffb670b00a:
+ test_invalid_access+0x26/0xe0
+ kunit_try_run_case+0x51/0x85
+ kunit_generic_run_threadfn_adapter+0x16/0x30
+ kthread+0x137/0x160
+ ret_from_fork+0x22/0x30
+
+ CPU: 4 PID: 124 Comm: kunit_try_catch Tainted: G W 5.8.0-rc6+ #7
+ Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.13.0-1 04/01/2014
+ ==================================================================
+
+DebugFS interface
+~~~~~~~~~~~~~~~~~
+
+Some debugging information is exposed via debugfs:
+
+* The file ``/sys/kernel/debug/kfence/stats`` provides runtime statistics.
+
+* The file ``/sys/kernel/debug/kfence/objects`` provides a list of objects
+ allocated via KFENCE, including those already freed but protected.
+
+Implementation Details
+----------------------
+
+Guarded allocations are set up based on the sample interval. After expiration
+of the sample interval, the next allocation through the main allocator (SLAB or
+SLUB) returns a guarded allocation from the KFENCE object pool (allocation
+sizes up to PAGE_SIZE are supported). At this point, the timer is reset, and
+the next allocation is set up after the expiration of the interval. To "gate" a
+KFENCE allocation through the main allocator's fast-path without overhead,
+KFENCE relies on static branches via the static keys infrastructure. The static
+branch is toggled to redirect the allocation to KFENCE.
+
+KFENCE objects each reside on a dedicated page, at either the left or right
+page boundaries selected at random. The pages to the left and right of the
+object page are "guard pages", whose attributes are changed to a protected
+state, and cause page faults on any attempted access. Such page faults are then
+intercepted by KFENCE, which handles the fault gracefully by reporting an
+out-of-bounds access, and marking the page as accessible so that the faulting
+code can (wrongly) continue executing (set ``panic_on_warn`` to panic instead).
+
+To detect out-of-bounds writes to memory within the object's page itself,
+KFENCE also uses pattern-based redzones. For each object page, a redzone is set
+up for all non-object memory. For typical alignments, the redzone is only
+required on the unguarded side of an object. Because KFENCE must honor the
+cache's requested alignment, special alignments may result in unprotected gaps
+on either side of an object, all of which are redzoned.
+
+The following figure illustrates the page layout::
+
+ ---+-----------+-----------+-----------+-----------+-----------+---
+ | xxxxxxxxx | O : | xxxxxxxxx | : O | xxxxxxxxx |
+ | xxxxxxxxx | B : | xxxxxxxxx | : B | xxxxxxxxx |
+ | x GUARD x | J : RED- | x GUARD x | RED- : J | x GUARD x |
+ | xxxxxxxxx | E : ZONE | xxxxxxxxx | ZONE : E | xxxxxxxxx |
+ | xxxxxxxxx | C : | xxxxxxxxx | : C | xxxxxxxxx |
+ | xxxxxxxxx | T : | xxxxxxxxx | : T | xxxxxxxxx |
+ ---+-----------+-----------+-----------+-----------+-----------+---
+
+Upon deallocation of a KFENCE object, the object's page is again protected and
+the object is marked as freed. Any further access to the object causes a fault
+and KFENCE reports a use-after-free access. Freed objects are inserted at the
+tail of KFENCE's freelist, so that the least recently freed objects are reused
+first, and the chances of detecting use-after-frees of recently freed objects
+is increased.
+
+Interface
+---------
+
+The following describes the functions which are used by allocators as well as
+page handling code to set up and deal with KFENCE allocations.
+
+.. kernel-doc:: include/linux/kfence.h
+ :functions: is_kfence_address
+ kfence_shutdown_cache
+ kfence_alloc kfence_free __kfence_free
+ kfence_ksize kfence_object_start
+ kfence_handle_page_fault
+
+Related Tools
+-------------
+
+In userspace, a similar approach is taken by `GWP-ASan
+<http://llvm.org/docs/GwpAsan.html>`_. GWP-ASan also relies on guard pages and
+a sampling strategy to detect memory unsafety bugs at scale. KFENCE's design is
+directly influenced by GWP-ASan, and can be seen as its kernel sibling. Another
+similar but non-sampling approach, that also inspired the name "KFENCE", can be
+found in the userspace `Electric Fence Malloc Debugger
+<https://linux.die.net/man/3/efence>`_.
+
+In the kernel, several tools exist to debug memory access errors, and in
+particular KASAN can detect all bug classes that KFENCE can detect. While KASAN
+is more precise, relying on compiler instrumentation, this comes at a
+performance cost.
+
+It is worth highlighting that KASAN and KFENCE are complementary, with
+different target environments. For instance, KASAN is the better debugging-aid,
+where test cases or reproducers exists: due to the lower chance to detect the
+error, it would require more effort using KFENCE to debug. Deployments at scale
+that cannot afford to enable KASAN, however, would benefit from using KFENCE to
+discover bugs due to code paths not exercised by test cases or fuzzers.
diff --git a/Documentation/dev-tools/kunit/index.rst b/Documentation/dev-tools/kunit/index.rst
index c234a3ab3c34..848478838347 100644
--- a/Documentation/dev-tools/kunit/index.rst
+++ b/Documentation/dev-tools/kunit/index.rst
@@ -13,6 +13,7 @@ KUnit - Unit Testing for the Linux Kernel
api/index
style
faq
+ tips
What is KUnit?
==============
@@ -88,6 +89,7 @@ How do I use it?
================
* :doc:`start` - for new users of KUnit
+* :doc:`tips` - for short examples of best practices
* :doc:`usage` - for a more detailed explanation of KUnit features
* :doc:`api/index` - for the list of KUnit APIs used for testing
* :doc:`kunit-tool` - for more information on the kunit_tool helper script
diff --git a/Documentation/dev-tools/kunit/start.rst b/Documentation/dev-tools/kunit/start.rst
index 454f307813ea..0e65cabe08eb 100644
--- a/Documentation/dev-tools/kunit/start.rst
+++ b/Documentation/dev-tools/kunit/start.rst
@@ -196,8 +196,9 @@ Now add the following to ``drivers/misc/Kconfig``:
.. code-block:: kconfig
config MISC_EXAMPLE_TEST
- bool "Test for my example"
+ tristate "Test for my example" if !KUNIT_ALL_TESTS
depends on MISC_EXAMPLE && KUNIT=y
+ default KUNIT_ALL_TESTS
and the following to ``drivers/misc/Makefile``:
@@ -233,5 +234,7 @@ Congrats! You just wrote your first KUnit test!
Next Steps
==========
-* Check out the :doc:`usage` page for a more
+* Check out the :doc:`tips` page for tips on
+ writing idiomatic KUnit tests.
+* Optional: see the :doc:`usage` page for a more
in-depth explanation of KUnit.
diff --git a/Documentation/dev-tools/kunit/tips.rst b/Documentation/dev-tools/kunit/tips.rst
new file mode 100644
index 000000000000..a6ca0af14098
--- /dev/null
+++ b/Documentation/dev-tools/kunit/tips.rst
@@ -0,0 +1,115 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+============================
+Tips For Writing KUnit Tests
+============================
+
+Exiting early on failed expectations
+------------------------------------
+
+``KUNIT_EXPECT_EQ`` and friends will mark the test as failed and continue
+execution. In some cases, it's unsafe to continue and you can use the
+``KUNIT_ASSERT`` variant to exit on failure.
+
+.. code-block:: c
+
+ void example_test_user_alloc_function(struct kunit *test)
+ {
+ void *object = alloc_some_object_for_me();
+
+ /* Make sure we got a valid pointer back. */
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, object);
+ do_something_with_object(object);
+ }
+
+Allocating memory
+-----------------
+
+Where you would use ``kzalloc``, you should prefer ``kunit_kzalloc`` instead.
+KUnit will ensure the memory is freed once the test completes.
+
+This is particularly useful since it lets you use the ``KUNIT_ASSERT_EQ``
+macros to exit early from a test without having to worry about remembering to
+call ``kfree``.
+
+Example:
+
+.. code-block:: c
+
+ void example_test_allocation(struct kunit *test)
+ {
+ char *buffer = kunit_kzalloc(test, 16, GFP_KERNEL);
+ /* Ensure allocation succeeded. */
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buffer);
+
+ KUNIT_ASSERT_STREQ(test, buffer, "");
+ }
+
+
+Testing static functions
+------------------------
+
+If you don't want to expose functions or variables just for testing, one option
+is to conditionally ``#include`` the test file at the end of your .c file, e.g.
+
+.. code-block:: c
+
+ /* In my_file.c */
+
+ static int do_interesting_thing();
+
+ #ifdef CONFIG_MY_KUNIT_TEST
+ #include "my_kunit_test.c"
+ #endif
+
+Injecting test-only code
+------------------------
+
+Similarly to the above, it can be useful to add test-specific logic.
+
+.. code-block:: c
+
+ /* In my_file.h */
+
+ #ifdef CONFIG_MY_KUNIT_TEST
+ /* Defined in my_kunit_test.c */
+ void test_only_hook(void);
+ #else
+ void test_only_hook(void) { }
+ #endif
+
+TODO(dlatypov@google.com): add an example of using ``current->kunit_test`` in
+such a hook when it's not only updated for ``CONFIG_KASAN=y``.
+
+Customizing error messages
+--------------------------
+
+Each of the ``KUNIT_EXPECT`` and ``KUNIT_ASSERT`` macros have a ``_MSG`` variant.
+These take a format string and arguments to provide additional context to the automatically generated error messages.
+
+.. code-block:: c
+
+ char some_str[41];
+ generate_sha1_hex_string(some_str);
+
+ /* Before. Not easy to tell why the test failed. */
+ KUNIT_EXPECT_EQ(test, strlen(some_str), 40);
+
+ /* After. Now we see the offending string. */
+ KUNIT_EXPECT_EQ_MSG(test, strlen(some_str), 40, "some_str='%s'", some_str);
+
+Alternatively, one can take full control over the error message by using ``KUNIT_FAIL()``, e.g.
+
+.. code-block:: c
+
+ /* Before */
+ KUNIT_EXPECT_EQ(test, some_setup_function(), 0);
+
+ /* After: full control over the failure message. */
+ if (some_setup_function())
+ KUNIT_FAIL(test, "Failed to setup thing for testing");
+
+Next Steps
+==========
+* Optional: see the :doc:`usage` page for a more
+ in-depth explanation of KUnit.
diff --git a/Documentation/devicetree/bindings/Makefile b/Documentation/devicetree/bindings/Makefile
index 8f2b054bec5a..780e5618ec0a 100644
--- a/Documentation/devicetree/bindings/Makefile
+++ b/Documentation/devicetree/bindings/Makefile
@@ -10,7 +10,7 @@ DT_SCHEMA_MIN_VERSION = 2020.8.1
PHONY += check_dtschema_version
check_dtschema_version:
@{ echo $(DT_SCHEMA_MIN_VERSION); \
- $(DT_DOC_CHECKER) --version 2>/dev/null || echo 0; } | sort -VC || \
+ $(DT_DOC_CHECKER) --version 2>/dev/null || echo 0; } | sort -Vc >/dev/null || \
{ echo "ERROR: dtschema minimum version is v$(DT_SCHEMA_MIN_VERSION)" >&2; false; }
quiet_cmd_extract_ex = DTEX $@
@@ -78,10 +78,10 @@ $(obj)/processed-schema.json: $(DT_SCHEMA_FILES) check_dtschema_version FORCE
endif
-extra-$(CHECK_DT_BINDING) += processed-schema-examples.json
-extra-$(CHECK_DTBS) += processed-schema.json
-extra-$(CHECK_DT_BINDING) += $(patsubst $(src)/%.yaml,%.example.dts, $(DT_SCHEMA_FILES))
-extra-$(CHECK_DT_BINDING) += $(patsubst $(src)/%.yaml,%.example.dt.yaml, $(DT_SCHEMA_FILES))
+always-$(CHECK_DT_BINDING) += processed-schema-examples.json
+always-$(CHECK_DTBS) += processed-schema.json
+always-$(CHECK_DT_BINDING) += $(patsubst $(src)/%.yaml,%.example.dts, $(DT_SCHEMA_FILES))
+always-$(CHECK_DT_BINDING) += $(patsubst $(src)/%.yaml,%.example.dt.yaml, $(DT_SCHEMA_FILES))
# Hack: avoid 'Argument list too long' error for 'make clean'. Remove most of
# build artifacts here before they are processed by scripts/Makefile.clean
diff --git a/Documentation/devicetree/bindings/arm/amlogic.yaml b/Documentation/devicetree/bindings/arm/amlogic.yaml
index 3341788d1096..5f6769bf45bd 100644
--- a/Documentation/devicetree/bindings/arm/amlogic.yaml
+++ b/Documentation/devicetree/bindings/arm/amlogic.yaml
@@ -151,6 +151,7 @@ properties:
- description: Boards with the Amlogic Meson G12B S922X SoC
items:
- enum:
+ - azw,gsking-x
- azw,gtking
- azw,gtking-pro
- hardkernel,odroid-n2
@@ -163,9 +164,10 @@ properties:
- description: Boards with the Amlogic Meson SM1 S905X3/D3/Y3 SoC
items:
- enum:
- - seirobotics,sei610
- - khadas,vim3l
- hardkernel,odroid-c4
+ - hardkernel,odroid-hc4
+ - khadas,vim3l
+ - seirobotics,sei610
- const: amlogic,sm1
- description: Boards with the Amlogic Meson A1 A113L SoC
diff --git a/Documentation/devicetree/bindings/arm/amlogic/amlogic,meson-mx-secbus2.yaml b/Documentation/devicetree/bindings/arm/amlogic/amlogic,meson-mx-secbus2.yaml
new file mode 100644
index 000000000000..eee7cda9f91b
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/amlogic/amlogic,meson-mx-secbus2.yaml
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/arm/amlogic/amlogic,meson-mx-secbus2.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Amlogic Meson8/Meson8b/Meson8m2 SECBUS2 register interface
+
+maintainers:
+ - Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+
+description: |
+ The Meson8/Meson8b/Meson8m2 SoCs have a register bank called SECBUS2 which
+ contains registers for various IP blocks such as pin-controller bits for
+ the BSD_EN and TEST_N GPIOs as well as some AO ARC core control bits.
+ The registers can be accessed directly when not running in "secure mode".
+ When "secure mode" is enabled then these registers have to be accessed
+ through secure monitor calls.
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - amlogic,meson8-secbus2
+ - amlogic,meson8b-secbus2
+ - const: syscon
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ secbus2: system-controller@4000 {
+ compatible = "amlogic,meson8-secbus2", "syscon";
+ reg = <0x4000 0x2000>;
+ };
diff --git a/Documentation/devicetree/bindings/arm/arm,scmi.txt b/Documentation/devicetree/bindings/arm/arm,scmi.txt
index b5ce5b39bb9c..667d58e0a659 100644
--- a/Documentation/devicetree/bindings/arm/arm,scmi.txt
+++ b/Documentation/devicetree/bindings/arm/arm,scmi.txt
@@ -31,6 +31,14 @@ Optional properties:
- mbox-names: shall be "tx" or "rx" depending on mboxes entries.
+- interrupts : when using smc or hvc transports, this optional
+ property indicates that msg completion by the platform is indicated
+ by an interrupt rather than by the return of the smc call. This
+ should not be used except when the platform requires such behavior.
+
+- interrupt-names : if "interrupts" is present, interrupt-names must also
+ be present and have the value "a2p".
+
See Documentation/devicetree/bindings/mailbox/mailbox.txt for more details
about the generic mailbox controller and client driver bindings.
diff --git a/Documentation/devicetree/bindings/arm/atmel-sysregs.txt b/Documentation/devicetree/bindings/arm/atmel-sysregs.txt
index 62cd4e89817c..807264a78edc 100644
--- a/Documentation/devicetree/bindings/arm/atmel-sysregs.txt
+++ b/Documentation/devicetree/bindings/arm/atmel-sysregs.txt
@@ -1,7 +1,7 @@
Atmel system registers
Chipid required properties:
-- compatible: Should be "atmel,sama5d2-chipid"
+- compatible: Should be "atmel,sama5d2-chipid" or "microchip,sama7g5-chipid"
- reg : Should contain registers location and length
PIT Timer required properties:
@@ -91,7 +91,8 @@ SHDWC SAMA5D2-Compatible Shutdown Controller
1) shdwc node
required properties:
-- compatible: should be "atmel,sama5d2-shdwc" or "microchip,sam9x60-shdwc".
+- compatible: should be "atmel,sama5d2-shdwc", "microchip,sam9x60-shdwc" or
+ "microchip,sama7g5-shdwc"
- reg: should contain registers location and length
- clocks: phandle to input clock.
- #address-cells: should be one. The cell is the wake-up input index.
@@ -103,7 +104,7 @@ optional properties:
microseconds. It's usually a board-related property.
- atmel,wakeup-rtc-timer: boolean to enable Real-Time Clock wake-up.
-optional microchip,sam9x60-shdwc properties:
+optional microchip,sam9x60-shdwc or microchip,sama7g5-shdwc properties:
- atmel,wakeup-rtt-timer: boolean to enable Real-time Timer Wake-up.
The node contains child nodes for each wake-up input that the platform uses.
diff --git a/Documentation/devicetree/bindings/arm/bcm/brcm,bcm4908.yaml b/Documentation/devicetree/bindings/arm/bcm/brcm,bcm4908.yaml
index 5fec063d9a13..e55731f43c84 100644
--- a/Documentation/devicetree/bindings/arm/bcm/brcm,bcm4908.yaml
+++ b/Documentation/devicetree/bindings/arm/bcm/brcm,bcm4908.yaml
@@ -19,6 +19,8 @@ properties:
oneOf:
- description: BCM4906 based boards
items:
+ - enum:
+ - netgear,r8000p
- const: brcm,bcm4906
- const: brcm,bcm4908
diff --git a/Documentation/devicetree/bindings/arm/coresight.txt b/Documentation/devicetree/bindings/arm/coresight.txt
index d711676b4a51..7f9c1ca87487 100644
--- a/Documentation/devicetree/bindings/arm/coresight.txt
+++ b/Documentation/devicetree/bindings/arm/coresight.txt
@@ -34,9 +34,12 @@ its hardware characteristcs.
Program Flow Trace Macrocell:
"arm,coresight-etm3x", "arm,primecell";
- - Embedded Trace Macrocell (version 4.x):
+ - Embedded Trace Macrocell (version 4.x), with memory mapped access.
"arm,coresight-etm4x", "arm,primecell";
+ - Embedded Trace Macrocell (version 4.x), with system register access only.
+ "arm,coresight-etm4x-sysreg";
+
- Coresight programmable Replicator :
"arm,coresight-dynamic-replicator", "arm,primecell";
diff --git a/Documentation/devicetree/bindings/arm/cpus.yaml b/Documentation/devicetree/bindings/arm/cpus.yaml
index 14cd727d3c4b..26b886b20b27 100644
--- a/Documentation/devicetree/bindings/arm/cpus.yaml
+++ b/Documentation/devicetree/bindings/arm/cpus.yaml
@@ -169,6 +169,7 @@ properties:
- qcom,kryo385
- qcom,kryo468
- qcom,kryo485
+ - qcom,kryo685
- qcom,scorpion
enable-method:
@@ -232,7 +233,6 @@ properties:
by this cpu (see ./idle-states.yaml).
capacity-dmips-mhz:
- $ref: '/schemas/types.yaml#/definitions/uint32'
description:
u32 value representing CPU capacity (see ./cpu-capacity.txt) in
DMIPS/MHz, relative to highest capacity-dmips-mhz
diff --git a/Documentation/devicetree/bindings/arm/fsl.yaml b/Documentation/devicetree/bindings/arm/fsl.yaml
index 34000f7fbe02..297c87f45db8 100644
--- a/Documentation/devicetree/bindings/arm/fsl.yaml
+++ b/Documentation/devicetree/bindings/arm/fsl.yaml
@@ -210,6 +210,7 @@ properties:
- kiebackpeter,imx6q-tpc # K+P i.MX6 Quad TPC Board
- kontron,imx6q-samx6i # Kontron i.MX6 Dual/Quad SMARC Module
- kosagi,imx6q-novena # Kosagi Novena Dual/Quad
+ - kvg,vicut1q # Kverneland UT1Q board
- logicpd,imx6q-logicpd
- lwn,display5 # Liebherr Display5 i.MX6 Quad Board
- lwn,mccmon6 # Liebherr Monitor6 i.MX6 Quad Board
@@ -331,6 +332,7 @@ properties:
- fsl,imx6qp-sabreauto # i.MX6 Quad Plus SABRE Automotive Board
- fsl,imx6qp-sabresd # i.MX6 Quad Plus SABRE Smart Device Board
- karo,imx6qp-tx6qp # Ka-Ro electronics TX6QP-8037 Module
+ - kvg,vicutp # Kverneland UT1P board
- prt,prtwd3 # Protonic WD3 board
- wand,imx6qp-wandboard # Wandboard i.MX6 QuadPlus Board
- zii,imx6qp-zii-rdu2 # ZII RDU2+ Board
@@ -364,7 +366,12 @@ properties:
- fsl,imx6dl-sabresd # i.MX6 DualLite SABRE Smart Device Board
- karo,imx6dl-tx6dl # Ka-Ro electronics TX6U Modules
- kontron,imx6dl-samx6i # Kontron i.MX6 Solo SMARC Module
+ - kvg,victgo # Kverneland TGO
+ - kvg,vicut1 # Kverneland UT1 board
+ - ply,plybas # Plymovent BAS board
+ - ply,plym2m # Plymovent M2M board
- poslab,imx6dl-savageboard # Poslab SavageBoard Dual
+ - prt,prtmvt # Protonic MVT board
- prt,prtrvt # Protonic RVT board
- prt,prtvt7 # Protonic VT7 board
- rex,imx6dl-rex-basic # Rex Basic i.MX6 Dual Lite Board
@@ -488,6 +495,7 @@ properties:
- karo,imx6ul-tx6ul # Ka-Ro electronics TXUL-0010 Module
- kontron,imx6ul-n6310-som # Kontron N6310 SOM
- kontron,imx6ul-n6311-som # Kontron N6311 SOM
+ - prt,prti6g # Protonic PRTI6G Board
- technexion,imx6ul-pico-dwarf # TechNexion i.MX6UL Pico-Dwarf
- technexion,imx6ul-pico-hobbit # TechNexion i.MX6UL Pico-Hobbit
- technexion,imx6ul-pico-pi # TechNexion i.MX6UL Pico-Pi
@@ -670,8 +678,12 @@ properties:
items:
- enum:
- beacon,imx8mm-beacon-kit # i.MX8MM Beacon Development Kit
+ - boundary,imx8mm-nitrogen8mm # i.MX8MM Nitrogen Board
- fsl,imx8mm-ddr4-evk # i.MX8MM DDR4 EVK Board
- fsl,imx8mm-evk # i.MX8MM EVK Board
+ - gw,imx8mm-gw71xx-0x # i.MX8MM Gateworks Development Kit
+ - gw,imx8mm-gw72xx-0x # i.MX8MM Gateworks Development Kit
+ - gw,imx8mm-gw73xx-0x # i.MX8MM Gateworks Development Kit
- kontron,imx8mm-n801x-som # i.MX8MM Kontron SL (N801X) SOM
- variscite,var-som-mx8mm # i.MX8MM Variscite VAR-SOM-MX8MM module
- const: fsl,imx8mm
@@ -691,6 +703,7 @@ properties:
- description: i.MX8MN based Boards
items:
- enum:
+ - beacon,imx8mn-beacon-kit # i.MX8MN Beacon Development Kit
- fsl,imx8mn-ddr4-evk # i.MX8MN DDR4 EVK Board
- fsl,imx8mn-evk # i.MX8MN LPDDR4 EVK Board
- const: fsl,imx8mn
@@ -707,6 +720,12 @@ properties:
- fsl,imx8mp-evk # i.MX8MP EVK Board
- const: fsl,imx8mp
+ - description: PHYTEC phyCORE-i.MX8MP SoM based boards
+ items:
+ - const: phytec,imx8mp-phyboard-pollux-rdk # phyBOARD-Pollux RDK
+ - const: phytec,imx8mp-phycore-som # phyCORE-i.MX8MP SoM
+ - const: fsl,imx8mp
+
- description: i.MX8MQ based Boards
items:
- enum:
@@ -724,6 +743,7 @@ properties:
- enum:
- purism,librem5r2 # Purism Librem5 phone "Chestnut"
- purism,librem5r3 # Purism Librem5 phone "Dogwood"
+ - purism,librem5r4 # Purism Librem5 phone "Evergreen"
- const: purism,librem5
- const: fsl,imx8mq
@@ -834,10 +854,12 @@ properties:
Kontron SMARC-sAL28 board on the SMARC Eval Carrier 2.0
items:
- enum:
+ - kontron,sl28-var1-ads2
- kontron,sl28-var2-ads2
- kontron,sl28-var3-ads2
- kontron,sl28-var4-ads2
- enum:
+ - kontron,sl28-var1
- kontron,sl28-var2
- kontron,sl28-var3
- kontron,sl28-var4
@@ -848,6 +870,7 @@ properties:
Kontron SMARC-sAL28 board (on a generic/undefined carrier)
items:
- enum:
+ - kontron,sl28-var1
- kontron,sl28-var2
- kontron,sl28-var3
- kontron,sl28-var4
diff --git a/Documentation/devicetree/bindings/arm/marvell/ap80x-system-controller.txt b/Documentation/devicetree/bindings/arm/marvell/ap80x-system-controller.txt
index e31511255d8e..052a967c1f28 100644
--- a/Documentation/devicetree/bindings/arm/marvell/ap80x-system-controller.txt
+++ b/Documentation/devicetree/bindings/arm/marvell/ap80x-system-controller.txt
@@ -80,6 +80,11 @@ Required properties:
- offset: offset address inside the syscon block
+Optional properties:
+
+- marvell,pwm-offset: offset address of PWM duration control registers inside
+ the syscon block
+
Example:
ap_syscon: system-controller@6f4000 {
compatible = "syscon", "simple-mfd";
@@ -101,6 +106,9 @@ ap_syscon: system-controller@6f4000 {
gpio-controller;
#gpio-cells = <2>;
gpio-ranges = <&ap_pinctrl 0 0 19>;
+ marvell,pwm-offset = <0x10c0>;
+ #pwm-cells = <2>;
+ clocks = <&ap_clk 3>;
};
};
diff --git a/Documentation/devicetree/bindings/arm/mediatek.yaml b/Documentation/devicetree/bindings/arm/mediatek.yaml
index 53f0d4e3ea98..93b3bdf6eaeb 100644
--- a/Documentation/devicetree/bindings/arm/mediatek.yaml
+++ b/Documentation/devicetree/bindings/arm/mediatek.yaml
@@ -120,7 +120,9 @@ properties:
- const: mediatek,mt8183
- description: Google Krane (Lenovo IdeaPad Duet, 10e,...)
items:
- - const: google,krane-sku176
+ - enum:
+ - google,krane-sku0
+ - google,krane-sku176
- const: google,krane
- const: mediatek,mt8183
diff --git a/Documentation/devicetree/bindings/arm/msm/qcom,llcc.yaml b/Documentation/devicetree/bindings/arm/msm/qcom,llcc.yaml
index 0a9889debc7c..c299dc907f6c 100644
--- a/Documentation/devicetree/bindings/arm/msm/qcom,llcc.yaml
+++ b/Documentation/devicetree/bindings/arm/msm/qcom,llcc.yaml
@@ -24,6 +24,7 @@ properties:
- qcom,sc7180-llcc
- qcom,sdm845-llcc
- qcom,sm8150-llcc
+ - qcom,sm8250-llcc
reg:
items:
diff --git a/Documentation/devicetree/bindings/arm/pmu.yaml b/Documentation/devicetree/bindings/arm/pmu.yaml
index 693ef3f185a8..e17ac049e890 100644
--- a/Documentation/devicetree/bindings/arm/pmu.yaml
+++ b/Documentation/devicetree/bindings/arm/pmu.yaml
@@ -43,6 +43,7 @@ properties:
- arm,cortex-a75-pmu
- arm,cortex-a76-pmu
- arm,cortex-a77-pmu
+ - arm,cortex-a78-pmu
- arm,neoverse-e1-pmu
- arm,neoverse-n1-pmu
- brcm,vulcan-pmu
diff --git a/Documentation/devicetree/bindings/arm/qcom.yaml b/Documentation/devicetree/bindings/arm/qcom.yaml
index c97d4a580f47..174134f920e1 100644
--- a/Documentation/devicetree/bindings/arm/qcom.yaml
+++ b/Documentation/devicetree/bindings/arm/qcom.yaml
@@ -40,7 +40,9 @@ description: |
sdm630
sdm660
sdm845
+ sdx55
sm8250
+ sm8350
The 'board' element must be one of the following strings:
@@ -169,6 +171,11 @@ properties:
- items:
- enum:
+ - qcom,sdx55-mtp
+ - const: qcom,sdx55
+
+ - items:
+ - enum:
- qcom,ipq6018-cp01-c1
- const: qcom,ipq6018
@@ -178,6 +185,11 @@ properties:
- qcom,sm8250-mtp
- const: qcom,sm8250
+ - items:
+ - enum:
+ - qcom,sm8350-mtp
+ - const: qcom,sm8350
+
additionalProperties: true
...
diff --git a/Documentation/devicetree/bindings/arm/renesas.yaml b/Documentation/devicetree/bindings/arm/renesas.yaml
index fe11be65039a..5fd0696a9f91 100644
--- a/Documentation/devicetree/bindings/arm/renesas.yaml
+++ b/Documentation/devicetree/bindings/arm/renesas.yaml
@@ -130,6 +130,7 @@ properties:
- description: RZ/G2N (R8A774B1)
items:
- enum:
+ - beacon,beacon-rzg2n # Beacon EmbeddedWorks RZ/G2N Kit
- hoperun,hihope-rzg2n # HopeRun HiHope RZ/G2N platform
- const: renesas,r8a774b1
@@ -154,6 +155,7 @@ properties:
- description: RZ/G2H (R8A774E1)
items:
- enum:
+ - beacon,beacon-rzg2h # Beacon EmbeddedWorks RZ/G2H Kit
- hoperun,hihope-rzg2h # HopeRun HiHope RZ/G2H platform
- const: renesas,r8a774e1
diff --git a/Documentation/devicetree/bindings/arm/rockchip.yaml b/Documentation/devicetree/bindings/arm/rockchip.yaml
index ef4544ad6f82..c3036f95c7bc 100644
--- a/Documentation/devicetree/bindings/arm/rockchip.yaml
+++ b/Documentation/devicetree/bindings/arm/rockchip.yaml
@@ -132,6 +132,7 @@ properties:
- enum:
- friendlyarm,nanopc-t4
- friendlyarm,nanopi-m4
+ - friendlyarm,nanopi-m4b
- friendlyarm,nanopi-neo4
- const: rockchip,rk3399
@@ -467,6 +468,11 @@ properties:
- const: radxa,rockpi4
- const: rockchip,rk3399
+ - description: Radxa ROCK Pi E
+ items:
+ - const: radxa,rockpi-e
+ - const: rockchip,rk3328
+
- description: Radxa ROCK Pi N8
items:
- const: radxa,rockpi-n8
diff --git a/Documentation/devicetree/bindings/arm/sirf.yaml b/Documentation/devicetree/bindings/arm/sirf.yaml
deleted file mode 100644
index b25eb35d1b66..000000000000
--- a/Documentation/devicetree/bindings/arm/sirf.yaml
+++ /dev/null
@@ -1,30 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-%YAML 1.2
----
-$id: http://devicetree.org/schemas/arm/sirf.yaml#
-$schema: http://devicetree.org/meta-schemas/core.yaml#
-
-title: CSR SiRFprimaII and SiRFmarco device tree bindings.
-
-maintainers:
- - Binghua Duan <binghua.duan@csr.com>
- - Barry Song <Baohua.Song@csr.com>
-
-properties:
- $nodename:
- const: '/'
- compatible:
- oneOf:
- - items:
- - const: sirf,atlas6-cb
- - const: sirf,atlas6
- - items:
- - const: sirf,atlas7-cb
- - const: sirf,atlas7
- - items:
- - const: sirf,prima2-cb
- - const: sirf,prima2
-
-additionalProperties: true
-
-...
diff --git a/Documentation/devicetree/bindings/arm/socionext/socionext,uniphier-system-cache.yaml b/Documentation/devicetree/bindings/arm/socionext/socionext,uniphier-system-cache.yaml
index 2e765bb3e6f6..7ca5375f278f 100644
--- a/Documentation/devicetree/bindings/arm/socionext/socionext,uniphier-system-cache.yaml
+++ b/Documentation/devicetree/bindings/arm/socionext/socionext,uniphier-system-cache.yaml
@@ -30,8 +30,8 @@ properties:
Interrupts can be used to notify the completion of cache operations.
The number of interrupts should match to the number of CPU cores.
The specified interrupts correspond to CPU0, CPU1, ... in this order.
- minItems: 1
- maxItems: 4
+ minItems: 1
+ maxItems: 4
cache-unified: true
diff --git a/Documentation/devicetree/bindings/arm/ste-u300.txt b/Documentation/devicetree/bindings/arm/ste-u300.txt
deleted file mode 100644
index d11d80006a19..000000000000
--- a/Documentation/devicetree/bindings/arm/ste-u300.txt
+++ /dev/null
@@ -1,46 +0,0 @@
-ST-Ericsson U300 Device Tree Bindings
-
-For various board the "board" node may contain specific properties
-that pertain to this particular board, such as board-specific GPIOs
-or board power regulator supplies.
-
-Required root node property:
-
-compatible="stericsson,u300";
-
-Required node: syscon
-This contains the system controller.
-- compatible: must be "stericsson,u300-syscon".
-- reg: the base address and size of the system controller.
-
-Boards with the U300 SoC include:
-
-S365 "Small Board U365":
-
-Required node: s365
-This contains the board-specific information.
-- compatible: must be "stericsson,s365".
-- vana15-supply: the regulator supplying the 1.5V to drive the
- board.
-- syscon: a pointer to the syscon node so we can access the
- syscon registers to set the board as self-powered.
-
-Example:
-
-/ {
- model = "ST-Ericsson U300";
- compatible = "stericsson,u300";
- #address-cells = <1>;
- #size-cells = <1>;
-
- s365 {
- compatible = "stericsson,s365";
- vana15-supply = <&ab3100_ldo_d_reg>;
- syscon = <&syscon>;
- };
-
- syscon: syscon@c0011000 {
- compatible = "stericsson,u300-syscon";
- reg = <0xc0011000 0x1000>;
- };
-};
diff --git a/Documentation/devicetree/bindings/arm/sunxi.yaml b/Documentation/devicetree/bindings/arm/sunxi.yaml
index 6db32fbf813f..08607c7ec1bf 100644
--- a/Documentation/devicetree/bindings/arm/sunxi.yaml
+++ b/Documentation/devicetree/bindings/arm/sunxi.yaml
@@ -657,7 +657,8 @@ properties:
- description: Pine64 PineCube
items:
- const: pine64,pinecube
- - const: allwinner,sun8i-s3
+ - const: sochip,s3
+ - const: allwinner,sun8i-v3
- description: Pine64 PineH64 model A
items:
@@ -683,23 +684,31 @@ properties:
- description: Pine64 PinePhone Developer Batch (1.0)
items:
- const: pine64,pinephone-1.0
+ - const: pine64,pinephone
- const: allwinner,sun50i-a64
- description: Pine64 PinePhone Braveheart (1.1)
items:
- const: pine64,pinephone-1.1
+ - const: pine64,pinephone
- const: allwinner,sun50i-a64
- description: Pine64 PinePhone (1.2)
items:
- const: pine64,pinephone-1.2
+ - const: pine64,pinephone
- const: allwinner,sun50i-a64
- - description: Pine64 PineTab
+ - description: Pine64 PineTab, Development Sample
items:
- const: pine64,pinetab
- const: allwinner,sun50i-a64
+ - description: Pine64 PineTab, Early Adopter's batch (and maybe later ones)
+ items:
+ - const: pine64,pinetab-early-adopter
+ - const: allwinner,sun50i-a64
+
- description: Pine64 SoPine Baseboard
items:
- const: pine64,sopine-baseboard
@@ -777,6 +786,12 @@ properties:
- const: sinlinx,sina33
- const: allwinner,sun8i-a33
+ - description: SL631 Action Camera with IMX179
+ items:
+ - const: allwinner,sl631-imx179
+ - const: allwinner,sl631
+ - const: allwinner,sun8i-v3
+
- description: Tanix TX6
items:
- const: oranth,tanix-tx6
diff --git a/Documentation/devicetree/bindings/arm/tegra.yaml b/Documentation/devicetree/bindings/arm/tegra.yaml
index c5fbf869aa93..b9f75e20fef5 100644
--- a/Documentation/devicetree/bindings/arm/tegra.yaml
+++ b/Documentation/devicetree/bindings/arm/tegra.yaml
@@ -120,10 +120,18 @@ properties:
items:
- const: nvidia,p3668-0000
- const: nvidia,tegra194
+ - description: Jetson Xavier NX (eMMC)
+ items:
+ - const: nvidia,p3668-0001
+ - const: nvidia,tegra194
- description: Jetson Xavier NX Developer Kit
items:
- const: nvidia,p3509-0000+p3668-0000
- const: nvidia,tegra194
+ - description: Jetson Xavier NX Developer Kit (eMMC)
+ items:
+ - const: nvidia,p3509-0000+p3668-0001
+ - const: nvidia,tegra194
- items:
- enum:
- nvidia,tegra234-vdk
diff --git a/Documentation/devicetree/bindings/arm/xilinx.yaml b/Documentation/devicetree/bindings/arm/xilinx.yaml
index e0c6787f6e94..f52c7e8ce654 100644
--- a/Documentation/devicetree/bindings/arm/xilinx.yaml
+++ b/Documentation/devicetree/bindings/arm/xilinx.yaml
@@ -22,6 +22,9 @@ properties:
- adapteva,parallella
- digilent,zynq-zybo
- digilent,zynq-zybo-z7
+ - ebang,ebaz4205
+ - myir,zynq-zturn-v5
+ - myir,zynq-zturn
- xlnx,zynq-cc108
- xlnx,zynq-zc702
- xlnx,zynq-zc706
@@ -91,6 +94,7 @@ properties:
items:
- enum:
- xlnx,zynqmp-zcu104-revA
+ - xlnx,zynqmp-zcu104-revC
- xlnx,zynqmp-zcu104-rev1.0
- const: xlnx,zynqmp-zcu104
- const: xlnx,zynqmp
@@ -107,7 +111,7 @@ properties:
items:
- enum:
- xlnx,zynqmp-zcu111-revA
- - xlnx,zynqmp-zcu11-rev1.0
+ - xlnx,zynqmp-zcu111-rev1.0
- const: xlnx,zynqmp-zcu111
- const: xlnx,zynqmp
diff --git a/Documentation/devicetree/bindings/arm/zte,sysctrl.txt b/Documentation/devicetree/bindings/arm/zte,sysctrl.txt
deleted file mode 100644
index 7e66b7f7ba96..000000000000
--- a/Documentation/devicetree/bindings/arm/zte,sysctrl.txt
+++ /dev/null
@@ -1,30 +0,0 @@
-ZTE sysctrl Registers
-
-Registers for 'zte,zx296702' SoC:
-
-System management required properties:
- - compatible = "zte,sysctrl"
-
-Low power management required properties:
- - compatible = "zte,zx296702-pcu"
-
-Bus matrix required properties:
- - compatible = "zte,zx-bus-matrix"
-
-
-Registers for 'zte,zx296718' SoC:
-
-System management required properties:
- - compatible = "zte,zx296718-aon-sysctrl"
- - compatible = "zte,zx296718-sysctrl"
-
-Example:
-aon_sysctrl: aon-sysctrl@116000 {
- compatible = "zte,zx296718-aon-sysctrl", "syscon";
- reg = <0x116000 0x1000>;
-};
-
-sysctrl: sysctrl@1463000 {
- compatible = "zte,zx296718-sysctrl", "syscon";
- reg = <0x1463000 0x1000>;
-};
diff --git a/Documentation/devicetree/bindings/arm/zte.yaml b/Documentation/devicetree/bindings/arm/zte.yaml
deleted file mode 100644
index 672f8129cd31..000000000000
--- a/Documentation/devicetree/bindings/arm/zte.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-%YAML 1.2
----
-$id: http://devicetree.org/schemas/arm/zte.yaml#
-$schema: http://devicetree.org/meta-schemas/core.yaml#
-
-title: ZTE platforms device tree bindings
-
-maintainers:
- - Jun Nie <jun.nie@linaro.org>
-
-properties:
- $nodename:
- const: '/'
- compatible:
- oneOf:
- - items:
- - enum:
- - zte,zx296702-ad1
- - const: zte,zx296702
- - items:
- - enum:
- - zte,zx296718-evb
- - const: zte,zx296718
-
-additionalProperties: true
-
-...
diff --git a/Documentation/devicetree/bindings/ata/sata_highbank.yaml b/Documentation/devicetree/bindings/ata/sata_highbank.yaml
index 5e2a2394e600..ce75d77e9289 100644
--- a/Documentation/devicetree/bindings/ata/sata_highbank.yaml
+++ b/Documentation/devicetree/bindings/ata/sata_highbank.yaml
@@ -61,6 +61,7 @@ properties:
maxItems: 8
calxeda,sgpio-gpio:
+ maxItems: 3
description: |
phandle-gpio bank, bit offset, and default on or off, which indicates
that the driver supports SGPIO indicator lights using the indicated
diff --git a/Documentation/devicetree/bindings/auxdisplay/holtek,ht16k33.yaml b/Documentation/devicetree/bindings/auxdisplay/holtek,ht16k33.yaml
new file mode 100644
index 000000000000..64ffff460026
--- /dev/null
+++ b/Documentation/devicetree/bindings/auxdisplay/holtek,ht16k33.yaml
@@ -0,0 +1,77 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/auxdisplay/holtek,ht16k33.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Holtek HT16K33 RAM mapping 16*8 LED controller with keyscan
+
+maintainers:
+ - Robin van der Gracht <robin@protonic.nl>
+
+allOf:
+ - $ref: "/schemas/input/matrix-keymap.yaml#"
+
+properties:
+ compatible:
+ const: holtek,ht16k33
+
+ reg:
+ maxItems: 1
+
+ refresh-rate-hz:
+ maxItems: 1
+ description: Display update interval in Hertz
+
+ interrupts:
+ maxItems: 1
+
+ debounce-delay-ms:
+ maxItems: 1
+ description: Debouncing interval time in milliseconds
+
+ linux,keymap: true
+
+ linux,no-autorepeat:
+ description: Disable keyrepeat
+
+ default-brightness-level:
+ minimum: 1
+ maximum: 16
+ default: 16
+ description: Initial brightness level
+
+required:
+ - compatible
+ - reg
+ - refresh-rate-hz
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/input/input.h>
+ i2c1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ht16k33: ht16k33@70 {
+ compatible = "holtek,ht16k33";
+ reg = <0x70>;
+ refresh-rate-hz = <20>;
+ interrupt-parent = <&gpio4>;
+ interrupts = <5 (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_EDGE_RISING)>;
+ debounce-delay-ms = <50>;
+ linux,keymap = <MATRIX_KEY(2, 0, KEY_F6)>,
+ <MATRIX_KEY(3, 0, KEY_F8)>,
+ <MATRIX_KEY(4, 0, KEY_F10)>,
+ <MATRIX_KEY(5, 0, KEY_F4)>,
+ <MATRIX_KEY(6, 0, KEY_F2)>,
+ <MATRIX_KEY(2, 1, KEY_F5)>,
+ <MATRIX_KEY(3, 1, KEY_F7)>,
+ <MATRIX_KEY(4, 1, KEY_F9)>,
+ <MATRIX_KEY(5, 1, KEY_F3)>,
+ <MATRIX_KEY(6, 1, KEY_F1)>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/bus/allwinner,sun8i-a23-rsb.yaml b/Documentation/devicetree/bindings/bus/allwinner,sun8i-a23-rsb.yaml
index 32d33b983d66..3d719f468a5b 100644
--- a/Documentation/devicetree/bindings/bus/allwinner,sun8i-a23-rsb.yaml
+++ b/Documentation/devicetree/bindings/bus/allwinner,sun8i-a23-rsb.yaml
@@ -21,7 +21,9 @@ properties:
oneOf:
- const: allwinner,sun8i-a23-rsb
- items:
- - const: allwinner,sun8i-a83t-rsb
+ - enum:
+ - allwinner,sun8i-a83t-rsb
+ - allwinner,sun50i-h616-rsb
- const: allwinner,sun8i-a23-rsb
reg:
diff --git a/Documentation/devicetree/bindings/c6x/clocks.txt b/Documentation/devicetree/bindings/c6x/clocks.txt
deleted file mode 100644
index a04f5fd30122..000000000000
--- a/Documentation/devicetree/bindings/c6x/clocks.txt
+++ /dev/null
@@ -1,40 +0,0 @@
-C6X PLL Clock Controllers
--------------------------
-
-This is a first-cut support for the SoC clock controllers. This is still
-under development and will probably change as the common device tree
-clock support is added to the kernel.
-
-Required properties:
-
-- compatible: "ti,c64x+pll"
- May also have SoC-specific value to support SoC-specific initialization
- in the driver. One of:
- "ti,c6455-pll"
- "ti,c6457-pll"
- "ti,c6472-pll"
- "ti,c6474-pll"
-
-- reg: base address and size of register area
-- clock-frequency: input clock frequency in hz
-
-
-Optional properties:
-
-- ti,c64x+pll-bypass-delay: CPU cycles to delay when entering bypass mode
-
-- ti,c64x+pll-reset-delay: CPU cycles to delay after PLL reset
-
-- ti,c64x+pll-lock-delay: CPU cycles to delay after PLL frequency change
-
-Example:
-
- clock-controller@29a0000 {
- compatible = "ti,c6472-pll", "ti,c64x+pll";
- reg = <0x029a0000 0x200>;
- clock-frequency = <25000000>;
-
- ti,c64x+pll-bypass-delay = <200>;
- ti,c64x+pll-reset-delay = <12000>;
- ti,c64x+pll-lock-delay = <80000>;
- };
diff --git a/Documentation/devicetree/bindings/c6x/dscr.txt b/Documentation/devicetree/bindings/c6x/dscr.txt
deleted file mode 100644
index 92672235de57..000000000000
--- a/Documentation/devicetree/bindings/c6x/dscr.txt
+++ /dev/null
@@ -1,127 +0,0 @@
-Device State Configuration Registers
-------------------------------------
-
-TI C6X SoCs contain a region of miscellaneous registers which provide various
-function for SoC control or status. Details vary considerably among from SoC
-to SoC with no two being alike.
-
-In general, the Device State Configuration Registers (DSCR) will provide one or
-more configuration registers often protected by a lock register where one or
-more key values must be written to a lock register in order to unlock the
-configuration register for writes. These configuration register may be used to
-enable (and disable in some cases) SoC pin drivers, select peripheral clock
-sources (internal or pin), etc. In some cases, a configuration register is
-write once or the individual bits are write once. In addition to device config,
-the DSCR block may provide registers which are used to reset peripherals,
-provide device ID information, provide ethernet MAC addresses, as well as other
-miscellaneous functions.
-
-For device state control (enable/disable), each device control is assigned an
-id which is used by individual device drivers to control the state as needed.
-
-Required properties:
-
-- compatible: must be "ti,c64x+dscr"
-- reg: register area base and size
-
-Optional properties:
-
- NOTE: These are optional in that not all SoCs will have all properties. For
- SoCs which do support a given property, leaving the property out of the
- device tree will result in reduced functionality or possibly driver
- failure.
-
-- ti,dscr-devstat
- offset of the devstat register
-
-- ti,dscr-silicon-rev
- offset, start bit, and bitsize of silicon revision field
-
-- ti,dscr-rmii-resets
- offset and bitmask of RMII reset field. May have multiple tuples if more
- than one ethernet port is available.
-
-- ti,dscr-locked-regs
- possibly multiple tuples describing registers which are write protected by
- a lock register. Each tuple consists of the register offset, lock register
- offsset, and the key value used to unlock the register.
-
-- ti,dscr-kick-regs
- offset and key values of two "kick" registers used to write protect other
- registers in DSCR. On SoCs using kick registers, the first key must be
- written to the first kick register and the second key must be written to
- the second register before other registers in the area are write-enabled.
-
-- ti,dscr-mac-fuse-regs
- MAC addresses are contained in two registers. Each element of a MAC address
- is contained in a single byte. This property has two tuples. Each tuple has
- a register offset and four cells representing bytes in the register from
- most significant to least. The value of these four cells is the MAC byte
- index (1-6) of the byte within the register. A value of 0 means the byte
- is unused in the MAC address.
-
-- ti,dscr-devstate-ctl-regs
- This property describes the bitfields used to control the state of devices.
- Each tuple describes a range of identical bitfields used to control one or
- more devices (one bitfield per device). The layout of each tuple is:
-
- start_id num_ids reg enable disable start_bit nbits
-
- Where:
- start_id is device id for the first device control in the range
- num_ids is the number of device controls in the range
- reg is the offset of the register holding the control bits
- enable is the value to enable a device
- disable is the value to disable a device (0xffffffff if cannot disable)
- start_bit is the bit number of the first bit in the range
- nbits is the number of bits per device control
-
-- ti,dscr-devstate-stat-regs
- This property describes the bitfields used to provide device state status
- for device states controlled by the DSCR. Each tuple describes a range of
- identical bitfields used to provide status for one or more devices (one
- bitfield per device). The layout of each tuple is:
-
- start_id num_ids reg enable disable start_bit nbits
-
- Where:
- start_id is device id for the first device status in the range
- num_ids is the number of devices covered by the range
- reg is the offset of the register holding the status bits
- enable is the value indicating device is enabled
- disable is the value indicating device is disabled
- start_bit is the bit number of the first bit in the range
- nbits is the number of bits per device status
-
-- ti,dscr-privperm
- Offset and default value for register used to set access privilege for
- some SoC devices.
-
-
-Example:
-
- device-state-config-regs@2a80000 {
- compatible = "ti,c64x+dscr";
- reg = <0x02a80000 0x41000>;
-
- ti,dscr-devstat = <0>;
- ti,dscr-silicon-rev = <8 28 0xf>;
- ti,dscr-rmii-resets = <0x40020 0x00040000>;
-
- ti,dscr-locked-regs = <0x40008 0x40004 0x0f0a0b00>;
- ti,dscr-devstate-ctl-regs =
- <0 12 0x40008 1 0 0 2
- 12 1 0x40008 3 0 30 2
- 13 2 0x4002c 1 0xffffffff 0 1>;
- ti,dscr-devstate-stat-regs =
- <0 10 0x40014 1 0 0 3
- 10 2 0x40018 1 0 0 3>;
-
- ti,dscr-mac-fuse-regs = <0x700 1 2 3 4
- 0x704 5 6 0 0>;
-
- ti,dscr-privperm = <0x41c 0xaaaaaaaa>;
-
- ti,dscr-kick-regs = <0x38 0x83E70B13
- 0x3c 0x95A4F1E0>;
- };
diff --git a/Documentation/devicetree/bindings/c6x/emifa.txt b/Documentation/devicetree/bindings/c6x/emifa.txt
deleted file mode 100644
index 0ff6e9b9a13f..000000000000
--- a/Documentation/devicetree/bindings/c6x/emifa.txt
+++ /dev/null
@@ -1,62 +0,0 @@
-External Memory Interface
--------------------------
-
-The emifa node describes a simple external bus controller found on some C6X
-SoCs. This interface provides external busses with a number of chip selects.
-
-Required properties:
-
-- compatible: must be "ti,c64x+emifa", "simple-bus"
-- reg: register area base and size
-- #address-cells: must be 2 (chip-select + offset)
-- #size-cells: must be 1
-- ranges: mapping from EMIFA space to parent space
-
-
-Optional properties:
-
-- ti,dscr-dev-enable: Device ID if EMIF is enabled/disabled from DSCR
-
-- ti,emifa-burst-priority:
- Number of memory transfers after which the EMIF will elevate the priority
- of the oldest command in the command FIFO. Setting this field to 255
- disables this feature, thereby allowing old commands to stay in the FIFO
- indefinitely.
-
-- ti,emifa-ce-config:
- Configuration values for each of the supported chip selects.
-
-Example:
-
- emifa@70000000 {
- compatible = "ti,c64x+emifa", "simple-bus";
- #address-cells = <2>;
- #size-cells = <1>;
- reg = <0x70000000 0x100>;
- ranges = <0x2 0x0 0xa0000000 0x00000008
- 0x3 0x0 0xb0000000 0x00400000
- 0x4 0x0 0xc0000000 0x10000000
- 0x5 0x0 0xD0000000 0x10000000>;
-
- ti,dscr-dev-enable = <13>;
- ti,emifa-burst-priority = <255>;
- ti,emifa-ce-config = <0x00240120
- 0x00240120
- 0x00240122
- 0x00240122>;
-
- flash@3,0 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "cfi-flash";
- reg = <0x3 0x0 0x400000>;
- bank-width = <1>;
- device-width = <1>;
- partition@0 {
- reg = <0x0 0x400000>;
- label = "NOR";
- };
- };
- };
-
-This shows a flash chip attached to chip select 3.
diff --git a/Documentation/devicetree/bindings/c6x/soc.txt b/Documentation/devicetree/bindings/c6x/soc.txt
deleted file mode 100644
index b1e4973b5769..000000000000
--- a/Documentation/devicetree/bindings/c6x/soc.txt
+++ /dev/null
@@ -1,28 +0,0 @@
-C6X System-on-Chip
-------------------
-
-Required properties:
-
-- compatible: "simple-bus"
-- #address-cells: must be 1
-- #size-cells: must be 1
-- ranges
-
-Optional properties:
-
-- model: specific SoC model
-
-- nodes for IP blocks within SoC
-
-
-Example:
-
- soc {
- compatible = "simple-bus";
- model = "tms320c6455";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
-
- ...
- };
diff --git a/Documentation/devicetree/bindings/clock/adi,axi-clkgen.yaml b/Documentation/devicetree/bindings/clock/adi,axi-clkgen.yaml
index 0d06387184d6..983033fe5b17 100644
--- a/Documentation/devicetree/bindings/clock/adi,axi-clkgen.yaml
+++ b/Documentation/devicetree/bindings/clock/adi,axi-clkgen.yaml
@@ -20,6 +20,7 @@ properties:
compatible:
enum:
- adi,axi-clkgen-2.00.a
+ - adi,zynqmp-axi-clkgen-2.00.a
clocks:
description:
diff --git a/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-ccu.yaml b/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-ccu.yaml
index 3b45344ed758..a27025cd3909 100644
--- a/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-ccu.yaml
+++ b/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-ccu.yaml
@@ -41,6 +41,8 @@ properties:
- allwinner,sun50i-h5-ccu
- allwinner,sun50i-h6-ccu
- allwinner,sun50i-h6-r-ccu
+ - allwinner,sun50i-h616-ccu
+ - allwinner,sun50i-h616-r-ccu
- allwinner,suniv-f1c100s-ccu
- nextthing,gr8-ccu
@@ -82,6 +84,7 @@ if:
- allwinner,sun50i-a64-r-ccu
- allwinner,sun50i-a100-r-ccu
- allwinner,sun50i-h6-r-ccu
+ - allwinner,sun50i-h616-r-ccu
then:
properties:
@@ -100,6 +103,7 @@ else:
enum:
- allwinner,sun50i-a100-ccu
- allwinner,sun50i-h6-ccu
+ - allwinner,sun50i-h616-ccu
then:
properties:
diff --git a/Documentation/devicetree/bindings/clock/allwinner,sun9i-a80-usb-clocks.yaml b/Documentation/devicetree/bindings/clock/allwinner,sun9i-a80-usb-clks.yaml
index fa0ee03a527f..6532fb6821bc 100644
--- a/Documentation/devicetree/bindings/clock/allwinner,sun9i-a80-usb-clocks.yaml
+++ b/Documentation/devicetree/bindings/clock/allwinner,sun9i-a80-usb-clks.yaml
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0+
%YAML 1.2
---
-$id: http://devicetree.org/schemas/clock/allwinner,sun9i-a80-usb-clocks.yaml#
+$id: http://devicetree.org/schemas/clock/allwinner,sun9i-a80-usb-clks.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Allwinner A80 USB Clock Controller Device Tree Bindings
@@ -18,7 +18,7 @@ properties:
const: 1
compatible:
- const: allwinner,sun9i-a80-usb-clocks
+ const: allwinner,sun9i-a80-usb-clks
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/clock/arm,syscon-icst.yaml b/Documentation/devicetree/bindings/clock/arm,syscon-icst.yaml
index eb241587efd1..118c5543e037 100644
--- a/Documentation/devicetree/bindings/clock/arm,syscon-icst.yaml
+++ b/Documentation/devicetree/bindings/clock/arm,syscon-icst.yaml
@@ -66,8 +66,8 @@ properties:
- arm,syscon-icst525-integratorcp-cm-mem
- arm,integrator-cm-auxosc
- arm,versatile-cm-auxosc
- - arm,impd-vco1
- - arm,impd-vco2
+ - arm,impd1-vco1
+ - arm,impd1-vco2
clocks:
description: Parent clock for the ICST VCO
diff --git a/Documentation/devicetree/bindings/clock/canaan,k210-clk.yaml b/Documentation/devicetree/bindings/clock/canaan,k210-clk.yaml
index 565ca468cb44..7f5cf4001f76 100644
--- a/Documentation/devicetree/bindings/clock/canaan,k210-clk.yaml
+++ b/Documentation/devicetree/bindings/clock/canaan,k210-clk.yaml
@@ -22,6 +22,7 @@ properties:
const: canaan,k210-clk
clocks:
+ maxItems: 1
description:
Phandle of the SoC 26MHz fixed-rate oscillator clock.
diff --git a/Documentation/devicetree/bindings/clock/csr,atlas7-car.txt b/Documentation/devicetree/bindings/clock/csr,atlas7-car.txt
deleted file mode 100644
index 54d6d1358339..000000000000
--- a/Documentation/devicetree/bindings/clock/csr,atlas7-car.txt
+++ /dev/null
@@ -1,55 +0,0 @@
-* Clock and reset bindings for CSR atlas7
-
-Required properties:
-- compatible: Should be "sirf,atlas7-car"
-- reg: Address and length of the register set
-- #clock-cells: Should be <1>
-- #reset-cells: Should be <1>
-
-The clock consumer should specify the desired clock by having the clock
-ID in its "clocks" phandle cell.
-The ID list atlas7_clks defined in drivers/clk/sirf/clk-atlas7.c
-
-The reset consumer should specify the desired reset by having the reset
-ID in its "reset" phandle cell.
-The ID list atlas7_reset_unit defined in drivers/clk/sirf/clk-atlas7.c
-
-Examples: Clock and reset controller node:
-
-car: clock-controller@18620000 {
- compatible = "sirf,atlas7-car";
- reg = <0x18620000 0x1000>;
- #clock-cells = <1>;
- #reset-cells = <1>;
-};
-
-Examples: Consumers using clock or reset:
-
-timer@10dc0000 {
- compatible = "sirf,macro-tick";
- reg = <0x10dc0000 0x1000>;
- clocks = <&car 54>;
- interrupts = <0 0 0>,
- <0 1 0>,
- <0 2 0>,
- <0 49 0>,
- <0 50 0>,
- <0 51 0>;
-};
-
-uart1: uart@18020000 {
- cell-index = <1>;
- compatible = "sirf,macro-uart";
- reg = <0x18020000 0x1000>;
- clocks = <&clks 95>;
- interrupts = <0 18 0>;
- fifosize = <32>;
-};
-
-vpp@13110000 {
- compatible = "sirf,prima2-vpp";
- reg = <0x13110000 0x10000>;
- interrupts = <0 31 0>;
- clocks = <&car 85>;
- resets = <&car 29>;
-};
diff --git a/Documentation/devicetree/bindings/clock/idt,versaclock5.yaml b/Documentation/devicetree/bindings/clock/idt,versaclock5.yaml
index 2ac1131fd922..c268debe5b8d 100644
--- a/Documentation/devicetree/bindings/clock/idt,versaclock5.yaml
+++ b/Documentation/devicetree/bindings/clock/idt,versaclock5.yaml
@@ -59,6 +59,12 @@ properties:
minItems: 1
maxItems: 2
+ idt,xtal-load-femtofarads:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 9000
+ maximum: 22760
+ description: Optional load capacitor for XTAL1 and XTAL2
+
patternProperties:
"^OUT[1-4]$":
type: object
diff --git a/Documentation/devicetree/bindings/clock/imx27-clock.yaml b/Documentation/devicetree/bindings/clock/imx27-clock.yaml
index a75365453dbc..160268f24487 100644
--- a/Documentation/devicetree/bindings/clock/imx27-clock.yaml
+++ b/Documentation/devicetree/bindings/clock/imx27-clock.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Clock bindings for Freescale i.MX27
maintainers:
- - Fabio Estevam <fabio.estevam@nxp.com>
+ - Fabio Estevam <festevam@gmail.com>
description: |
The clock consumer should specify the desired clock by having the clock
diff --git a/Documentation/devicetree/bindings/clock/imx31-clock.yaml b/Documentation/devicetree/bindings/clock/imx31-clock.yaml
index a25a374b3b2a..d2336261c922 100644
--- a/Documentation/devicetree/bindings/clock/imx31-clock.yaml
+++ b/Documentation/devicetree/bindings/clock/imx31-clock.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Clock bindings for Freescale i.MX31
maintainers:
- - Fabio Estevam <fabio.estevam@nxp.com>
+ - Fabio Estevam <festevam@gmail.com>
description: |
The clock consumer should specify the desired clock by having the clock
diff --git a/Documentation/devicetree/bindings/clock/imx5-clock.yaml b/Documentation/devicetree/bindings/clock/imx5-clock.yaml
index 90775c2669b8..b1740d7abe68 100644
--- a/Documentation/devicetree/bindings/clock/imx5-clock.yaml
+++ b/Documentation/devicetree/bindings/clock/imx5-clock.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Clock bindings for Freescale i.MX5
maintainers:
- - Fabio Estevam <fabio.estevam@nxp.com>
+ - Fabio Estevam <festevam@gmail.com>
description: |
The clock consumer should specify the desired clock by having the clock
diff --git a/Documentation/devicetree/bindings/clock/intel,easic-n5x.yaml b/Documentation/devicetree/bindings/clock/intel,easic-n5x.yaml
new file mode 100644
index 000000000000..8f45976e946e
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/intel,easic-n5x.yaml
@@ -0,0 +1,46 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/intel,easic-n5x.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Intel SoCFPGA eASIC N5X platform clock controller binding
+
+maintainers:
+ - Dinh Nguyen <dinguyen@kernel.org>
+
+description:
+ The Intel eASIC N5X Clock controller is an integrated clock controller, which
+ generates and supplies to all modules.
+
+properties:
+ compatible:
+ const: intel,easic-n5x-clkmgr
+
+ '#clock-cells':
+ const: 1
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - '#clock-cells'
+
+additionalProperties: false
+
+examples:
+ # Clock controller node:
+ - |
+ clkmgr: clock-controller@ffd10000 {
+ compatible = "intel,easic-n5x-clkmgr";
+ reg = <0xffd10000 0x1000>;
+ clocks = <&osc1>;
+ #clock-cells = <1>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/clock/mstar,msc313-mpll.yaml b/Documentation/devicetree/bindings/clock/mstar,msc313-mpll.yaml
new file mode 100644
index 000000000000..0df5d75d4ebc
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/mstar,msc313-mpll.yaml
@@ -0,0 +1,46 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/mstar,msc313-mpll.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: MStar/Sigmastar MSC313 MPLL
+
+maintainers:
+ - Daniel Palmer <daniel@thingy.jp>
+
+description: |
+ The MStar/SigmaStar MSC313 and later ARMv7 chips have an MPLL block that
+ takes the external xtal input and multiplies it to create a high
+ frequency clock and divides that down into a number of clocks that
+ peripherals use.
+
+properties:
+ compatible:
+ const: mstar,msc313-mpll
+
+ "#clock-cells":
+ const: 1
+
+ clocks:
+ maxItems: 1
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - "#clock-cells"
+ - clocks
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ mpll@206000 {
+ compatible = "mstar,msc313-mpll";
+ reg = <0x206000 0x200>;
+ #clock-cells = <1>;
+ clocks = <&xtal>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/prima2-clock.txt b/Documentation/devicetree/bindings/clock/prima2-clock.txt
deleted file mode 100644
index 5016979c0f78..000000000000
--- a/Documentation/devicetree/bindings/clock/prima2-clock.txt
+++ /dev/null
@@ -1,73 +0,0 @@
-* Clock bindings for CSR SiRFprimaII
-
-Required properties:
-- compatible: Should be "sirf,prima2-clkc"
-- reg: Address and length of the register set
-- interrupts: Should contain clock controller interrupt
-- #clock-cells: Should be <1>
-
-The clock consumer should specify the desired clock by having the clock
-ID in its "clocks" phandle cell. The following is a full list of prima2
-clocks and IDs.
-
- Clock ID
- ---------------------------
- rtc 0
- osc 1
- pll1 2
- pll2 3
- pll3 4
- mem 5
- sys 6
- security 7
- dsp 8
- gps 9
- mf 10
- io 11
- cpu 12
- uart0 13
- uart1 14
- uart2 15
- tsc 16
- i2c0 17
- i2c1 18
- spi0 19
- spi1 20
- pwmc 21
- efuse 22
- pulse 23
- dmac0 24
- dmac1 25
- nand 26
- audio 27
- usp0 28
- usp1 29
- usp2 30
- vip 31
- gfx 32
- mm 33
- lcd 34
- vpp 35
- mmc01 36
- mmc23 37
- mmc45 38
- usbpll 39
- usb0 40
- usb1 41
-
-Examples:
-
-clks: clock-controller@88000000 {
- compatible = "sirf,prima2-clkc";
- reg = <0x88000000 0x1000>;
- interrupts = <3>;
- #clock-cells = <1>;
-};
-
-i2c0: i2c@b00e0000 {
- cell-index = <0>;
- compatible = "sirf,prima2-i2c";
- reg = <0xb00e0000 0x10000>;
- interrupts = <24>;
- clocks = <&clks 17>;
-};
diff --git a/Documentation/devicetree/bindings/clock/qcom,a7pll.yaml b/Documentation/devicetree/bindings/clock/qcom,a7pll.yaml
new file mode 100644
index 000000000000..8666e995725f
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,a7pll.yaml
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/qcom,a7pll.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm A7 PLL Binding
+
+maintainers:
+ - Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+
+description:
+ The A7 PLL on the Qualcomm platforms like SDX55 is used to provide high
+ frequency clock to the CPU.
+
+properties:
+ compatible:
+ enum:
+ - qcom,sdx55-a7pll
+
+ reg:
+ maxItems: 1
+
+ '#clock-cells':
+ const: 0
+
+ clocks:
+ items:
+ - description: board XO clock
+
+ clock-names:
+ items:
+ - const: bi_tcxo
+
+required:
+ - compatible
+ - reg
+ - '#clock-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,rpmh.h>
+ a7pll: clock@17808000 {
+ compatible = "qcom,sdx55-a7pll";
+ reg = <0x17808000 0x1000>;
+ clocks = <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "bi_tcxo";
+ #clock-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc-sc7280.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc-sc7280.yaml
new file mode 100644
index 000000000000..5693b8997570
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc-sc7280.yaml
@@ -0,0 +1,92 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/qcom,gcc-sc7280.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Global Clock & Reset Controller Binding for SC7280
+
+maintainers:
+ - Taniya Das <tdas@codeaurora.org>
+
+description: |
+ Qualcomm global clock control module which supports the clocks, resets and
+ power domains on SC7280.
+
+ See also:
+ - dt-bindings/clock/qcom,gcc-sc7280.h
+
+properties:
+ compatible:
+ const: qcom,gcc-sc7280
+
+ clocks:
+ items:
+ - description: Board XO source
+ - description: Board active XO source
+ - description: Sleep clock source
+ - description: PCIE-0 pipe clock source
+ - description: PCIE-1 pipe clock source
+ - description: USF phy rx symbol 0 clock source
+ - description: USF phy rx symbol 1 clock source
+ - description: USF phy tx symbol 0 clock source
+ - description: USB30 phy wrapper pipe clock source
+
+ clock-names:
+ items:
+ - const: bi_tcxo
+ - const: bi_tcxo_ao
+ - const: sleep_clk
+ - const: pcie_0_pipe_clk
+ - const: pcie_1_pipe_clk
+ - const: ufs_phy_rx_symbol_0_clk
+ - const: ufs_phy_rx_symbol_1_clk
+ - const: ufs_phy_tx_symbol_0_clk
+ - const: usb3_phy_wrapper_gcc_usb30_pipe_clk
+
+ '#clock-cells':
+ const: 1
+
+ '#reset-cells':
+ const: 1
+
+ '#power-domain-cells':
+ const: 1
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - clocks
+ - clock-names
+ - reg
+ - '#clock-cells'
+ - '#reset-cells'
+ - '#power-domain-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,rpmh.h>
+ clock-controller@100000 {
+ compatible = "qcom,gcc-sc7280";
+ reg = <0x00100000 0x1f0000>;
+ clocks = <&rpmhcc RPMH_CXO_CLK>,
+ <&rpmhcc RPMH_CXO_CLK_A>,
+ <&sleep_clk>,
+ <&pcie_0_pipe_clk>, <&pcie_1_pipe_clk>,
+ <&ufs_phy_rx_symbol_0_clk>, <&ufs_phy_rx_symbol_1_clk>,
+ <&ufs_phy_tx_symbol_0_clk>,
+ <&usb3_phy_wrapper_gcc_usb30_pipe_clk>;
+
+ clock-names = "bi_tcxo", "bi_tcxo_ao", "sleep_clk", "pcie_0_pipe_clk",
+ "pcie_1_pipe_clk", "ufs_phy_rx_symbol_0_clk",
+ "ufs_phy_rx_symbol_1_clk", "ufs_phy_tx_symbol_0_clk",
+ "usb3_phy_wrapper_gcc_usb30_pipe_clk";
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc-sc8180x.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc-sc8180x.yaml
new file mode 100644
index 000000000000..f03ef96e57fa
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc-sc8180x.yaml
@@ -0,0 +1,76 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/qcom,gcc-sc8180x.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Global Clock & Reset Controller Binding for SC8180x
+
+maintainers:
+ - Bjorn Andersson <bjorn.andersson@linaro.org>
+
+description: |
+ Qualcomm global clock control module which supports the clocks, resets and
+ power domains on SC8180x.
+
+ See also:
+ - dt-bindings/clock/qcom,gcc-sc8180x.h
+
+properties:
+ compatible:
+ const: qcom,gcc-sc8180x
+
+ clocks:
+ items:
+ - description: Board XO source
+ - description: Board active XO source
+ - description: Sleep clock source
+
+ clock-names:
+ items:
+ - const: bi_tcxo
+ - const: bi_tcxo_ao
+ - const: sleep_clk
+
+ '#clock-cells':
+ const: 1
+
+ '#reset-cells':
+ const: 1
+
+ '#power-domain-cells':
+ const: 1
+
+ reg:
+ maxItems: 1
+
+ protected-clocks:
+ description:
+ Protected clock specifier list as per common clock binding.
+
+required:
+ - compatible
+ - clocks
+ - clock-names
+ - reg
+ - '#clock-cells'
+ - '#reset-cells'
+ - '#power-domain-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,rpmh.h>
+ clock-controller@100000 {
+ compatible = "qcom,gcc-sc8180x";
+ reg = <0x00100000 0x1f0000>;
+ clocks = <&rpmhcc RPMH_CXO_CLK>,
+ <&rpmhcc RPMH_CXO_CLK_A>,
+ <&sleep_clk>;
+ clock-names = "bi_tcxo", "bi_tcxo_ao", "sleep_clk";
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc-sm8350.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc-sm8350.yaml
new file mode 100644
index 000000000000..78f35832aa41
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc-sm8350.yaml
@@ -0,0 +1,96 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/qcom,gcc-sm8350.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Global Clock & Reset Controller Binding for SM8350
+
+maintainers:
+ - Vinod Koul <vkoul@kernel.org>
+
+description: |
+ Qualcomm global clock control module which supports the clocks, resets and
+ power domains on SM8350.
+
+ See also:
+ - dt-bindings/clock/qcom,gcc-sm8350.h
+
+properties:
+ compatible:
+ const: qcom,gcc-sm8350
+
+ clocks:
+ items:
+ - description: Board XO source
+ - description: Sleep clock source
+ - description: PLL test clock source (Optional clock)
+ - description: PCIE 0 Pipe clock source (Optional clock)
+ - description: PCIE 1 Pipe clock source (Optional clock)
+ - description: UFS card Rx symbol 0 clock source (Optional clock)
+ - description: UFS card Rx symbol 1 clock source (Optional clock)
+ - description: UFS card Tx symbol 0 clock source (Optional clock)
+ - description: UFS phy Rx symbol 0 clock source (Optional clock)
+ - description: UFS phy Rx symbol 1 clock source (Optional clock)
+ - description: UFS phy Tx symbol 0 clock source (Optional clock)
+ - description: USB3 phy wrapper pipe clock source (Optional clock)
+ - description: USB3 phy sec pipe clock source (Optional clock)
+ minItems: 2
+ maxItems: 13
+
+ clock-names:
+ items:
+ - const: bi_tcxo
+ - const: sleep_clk
+ - const: core_bi_pll_test_se # Optional clock
+ - const: pcie_0_pipe_clk # Optional clock
+ - const: pcie_1_pipe_clk # Optional clock
+ - const: ufs_card_rx_symbol_0_clk # Optional clock
+ - const: ufs_card_rx_symbol_1_clk # Optional clock
+ - const: ufs_card_tx_symbol_0_clk # Optional clock
+ - const: ufs_phy_rx_symbol_0_clk # Optional clock
+ - const: ufs_phy_rx_symbol_1_clk # Optional clock
+ - const: ufs_phy_tx_symbol_0_clk # Optional clock
+ - const: usb3_phy_wrapper_gcc_usb30_pipe_clk # Optional clock
+ - const: usb3_uni_phy_sec_gcc_usb30_pipe_clk # Optional clock
+ minItems: 2
+ maxItems: 13
+
+ '#clock-cells':
+ const: 1
+
+ '#reset-cells':
+ const: 1
+
+ '#power-domain-cells':
+ const: 1
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - clocks
+ - clock-names
+ - reg
+ - '#clock-cells'
+ - '#reset-cells'
+ - '#power-domain-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,rpmh.h>
+ clock-controller@100000 {
+ compatible = "qcom,gcc-sm8350";
+ reg = <0x00100000 0x1f0000>;
+ clocks = <&rpmhcc RPMH_CXO_CLK>,
+ <&sleep_clk>;
+ clock-names = "bi_tcxo", "sleep_clk";
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/clock/qcom,gpucc-sdm660.yaml b/Documentation/devicetree/bindings/clock/qcom,gpucc-sdm660.yaml
new file mode 100644
index 000000000000..3f70eb59aae3
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,gpucc-sdm660.yaml
@@ -0,0 +1,76 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/qcom,gpucc-sdm660.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Graphics Clock & Reset Controller Binding for SDM630 and SDM660
+
+maintainers:
+ - AngeloGioacchino Del Regno <angelogioacchino.delregno@somainline.org>
+
+description: |
+ Qualcomm graphics clock control module which supports the clocks, resets and
+ power domains on SDM630 and SDM660.
+
+ See also dt-bindings/clock/qcom,gpucc-sdm660.h.
+
+properties:
+ compatible:
+ enum:
+ - qcom,gpucc-sdm630
+ - qcom,gpucc-sdm660
+
+ clocks:
+ items:
+ - description: Board XO source
+ - description: GPLL0 main gpu branch
+ - description: GPLL0 divider gpu branch
+
+ clock-names:
+ items:
+ - const: xo
+ - const: gcc_gpu_gpll0_clk
+ - const: gcc_gpu_gpll0_div_clk
+
+ '#clock-cells':
+ const: 1
+
+ '#reset-cells':
+ const: 1
+
+ '#power-domain-cells':
+ const: 1
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - '#clock-cells'
+ - '#reset-cells'
+ - '#power-domain-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,gcc-sdm660.h>
+ #include <dt-bindings/clock/qcom,rpmcc.h>
+
+ clock-controller@5065000 {
+ compatible = "qcom,gpucc-sdm660";
+ reg = <0x05065000 0x9038>;
+ clocks = <&rpmcc RPM_SMD_XO_CLK_SRC>,
+ <&gcc GCC_GPU_GPLL0_CLK>,
+ <&gcc GCC_GPU_GPLL0_DIV_CLK>;
+ clock-names = "xo", "gcc_gpu_gpll0_clk",
+ "gcc_gpu_gpll0_div_clk";
+ #clock-cells = <1>;
+ #power-domain-cells = <1>;
+ #reset-cells = <1>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/clock/qcom,mmcc.yaml b/Documentation/devicetree/bindings/clock/qcom,mmcc.yaml
index af32dee14fc6..8b0b1c56f354 100644
--- a/Documentation/devicetree/bindings/clock/qcom,mmcc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,mmcc.yaml
@@ -24,6 +24,8 @@ properties:
- qcom,mmcc-msm8974
- qcom,mmcc-msm8996
- qcom,mmcc-msm8998
+ - qcom,mmcc-sdm630
+ - qcom,mmcc-sdm660
clocks:
items:
diff --git a/Documentation/devicetree/bindings/clock/qcom,rpmhcc.yaml b/Documentation/devicetree/bindings/clock/qcom,rpmhcc.yaml
index 12c9cbc0ebf9..9ea0b3f5a4f2 100644
--- a/Documentation/devicetree/bindings/clock/qcom,rpmhcc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,rpmhcc.yaml
@@ -18,6 +18,8 @@ properties:
compatible:
enum:
- qcom,sc7180-rpmh-clk
+ - qcom,sc7280-rpmh-clk
+ - qcom,sc8180x-rpmh-clk
- qcom,sdm845-rpmh-clk
- qcom,sdx55-rpmh-clk
- qcom,sm8150-rpmh-clk
diff --git a/Documentation/devicetree/bindings/clock/renesas,rcar-usb2-clock-sel.yaml b/Documentation/devicetree/bindings/clock/renesas,rcar-usb2-clock-sel.yaml
index 5be1229b3d6e..6eaabb4d82ec 100644
--- a/Documentation/devicetree/bindings/clock/renesas,rcar-usb2-clock-sel.yaml
+++ b/Documentation/devicetree/bindings/clock/renesas,rcar-usb2-clock-sel.yaml
@@ -35,6 +35,9 @@ properties:
compatible:
items:
- enum:
+ - renesas,r8a774a1-rcar-usb2-clock-sel # RZ/G2M
+ - renesas,r8a774b1-rcar-usb2-clock-sel # RZ/G2N
+ - renesas,r8a774e1-rcar-usb2-clock-sel # RZ/G2H
- renesas,r8a7795-rcar-usb2-clock-sel # R-Car H3
- renesas,r8a7796-rcar-usb2-clock-sel # R-Car M3-W
- renesas,r8a77961-rcar-usb2-clock-sel # R-Car M3-W+
diff --git a/Documentation/devicetree/bindings/clock/silabs,si570.txt b/Documentation/devicetree/bindings/clock/silabs,si570.txt
index 901935e929d2..5dda17df1ac5 100644
--- a/Documentation/devicetree/bindings/clock/silabs,si570.txt
+++ b/Documentation/devicetree/bindings/clock/silabs,si570.txt
@@ -28,6 +28,8 @@ Optional properties:
- clock-frequency: Output frequency to generate. This defines the output
frequency set during boot. It can be reprogrammed during
runtime through the common clock framework.
+ - silabs,skip-recall: Do not perform NVM->RAM recall operation. It will rely
+ on hardware loading of RAM from NVM at power on.
Example:
si570: clock-generator@5d {
diff --git a/Documentation/devicetree/bindings/clock/ste-u300-syscon-clock.txt b/Documentation/devicetree/bindings/clock/ste-u300-syscon-clock.txt
deleted file mode 100644
index 7cafcb98ead7..000000000000
--- a/Documentation/devicetree/bindings/clock/ste-u300-syscon-clock.txt
+++ /dev/null
@@ -1,80 +0,0 @@
-Clock bindings for ST-Ericsson U300 System Controller Clocks
-
-Bindings for the gated system controller clocks:
-
-Required properties:
-- compatible: must be "stericsson,u300-syscon-clk"
-- #clock-cells: must be <0>
-- clock-type: specifies the type of clock:
- 0 = slow clock
- 1 = fast clock
- 2 = rest/remaining clock
-- clock-id: specifies the clock in the type range
-
-Optional properties:
-- clocks: parent clock(s)
-
-The available clocks per type are as follows:
-
-Type: ID: Clock:
--------------------
-0 0 Slow peripheral bridge clock
-0 1 UART0 clock
-0 4 GPIO clock
-0 6 RTC clock
-0 7 Application timer clock
-0 8 Access timer clock
-
-1 0 Fast peripheral bridge clock
-1 1 I2C bus 0 clock
-1 2 I2C bus 1 clock
-1 5 MMC interface peripheral (silicon) clock
-1 6 SPI clock
-
-2 3 CPU clock
-2 4 DMA controller clock
-2 5 External Memory Interface (EMIF) clock
-2 6 NAND flask interface clock
-2 8 XGAM graphics engine clock
-2 9 Shared External Memory Interface (SEMI) clock
-2 10 AHB Subsystem Bridge clock
-2 12 Interrupt controller clock
-
-Example:
-
-gpio_clk: gpio_clk@13M {
- #clock-cells = <0>;
- compatible = "stericsson,u300-syscon-clk";
- clock-type = <0>; /* Slow */
- clock-id = <4>;
- clocks = <&slow_clk>;
-};
-
-gpio: gpio@c0016000 {
- compatible = "stericsson,gpio-coh901";
- (...)
- clocks = <&gpio_clk>;
-};
-
-
-Bindings for the MMC/SD card clock:
-
-Required properties:
-- compatible: must be "stericsson,u300-syscon-mclk"
-- #clock-cells: must be <0>
-
-Optional properties:
-- clocks: parent clock(s)
-
-mmc_mclk: mmc_mclk {
- #clock-cells = <0>;
- compatible = "stericsson,u300-syscon-mclk";
- clocks = <&mmc_pclk>;
-};
-
-mmcsd: mmcsd@c0001000 {
- compatible = "arm,pl18x", "arm,primecell";
- clocks = <&mmc_pclk>, <&mmc_mclk>;
- clock-names = "apb_pclk", "mclk";
- (...)
-};
diff --git a/Documentation/devicetree/bindings/clock/tango4-clock.txt b/Documentation/devicetree/bindings/clock/tango4-clock.txt
deleted file mode 100644
index 19c580a7bda2..000000000000
--- a/Documentation/devicetree/bindings/clock/tango4-clock.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-* Sigma Designs Tango4 Clock Generator
-
-The Tango4 clock generator outputs cpu_clk and sys_clk (the latter is used
-for RAM and various peripheral devices). The clock binding described here
-is applicable to all Tango4 SoCs.
-
-Required Properties:
-
-- compatible: should be "sigma,tango4-clkgen".
-- reg: physical base address of the device and length of memory mapped region.
-- clocks: phandle of the input clock (crystal oscillator).
-- clock-output-names: should be "cpuclk" and "sysclk".
-- #clock-cells: should be set to 1.
-
-Example:
-
- clkgen: clkgen@10000 {
- compatible = "sigma,tango4-clkgen";
- reg = <0x10000 0x40>;
- clocks = <&xtal>;
- clock-output-names = "cpuclk", "sysclk";
- #clock-cells = <1>;
- };
diff --git a/Documentation/devicetree/bindings/clock/zx296702-clk.txt b/Documentation/devicetree/bindings/clock/zx296702-clk.txt
deleted file mode 100644
index 5c91c9e4f1be..000000000000
--- a/Documentation/devicetree/bindings/clock/zx296702-clk.txt
+++ /dev/null
@@ -1,34 +0,0 @@
-Device Tree Clock bindings for ZTE zx296702
-
-This binding uses the common clock binding[1].
-
-[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
-
-Required properties:
-- compatible : shall be one of the following:
- "zte,zx296702-topcrm-clk":
- zx296702 top clock selection, divider and gating
-
- "zte,zx296702-lsp0crpm-clk" and
- "zte,zx296702-lsp1crpm-clk":
- zx296702 device level clock selection and gating
-
-- reg: Address and length of the register set
-
-The clock consumer should specify the desired clock by having the clock
-ID in its "clocks" phandle cell. See include/dt-bindings/clock/zx296702-clock.h
-for the full list of zx296702 clock IDs.
-
-
-topclk: topcrm@09800000 {
- compatible = "zte,zx296702-topcrm-clk";
- reg = <0x09800000 0x1000>;
- #clock-cells = <1>;
-};
-
-uart0: serial@09405000 {
- compatible = "zte,zx296702-uart";
- reg = <0x09405000 0x1000>;
- interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&lsp1clk ZX296702_UART0_PCLK>;
-};
diff --git a/Documentation/devicetree/bindings/clock/zx296718-clk.txt b/Documentation/devicetree/bindings/clock/zx296718-clk.txt
deleted file mode 100644
index 3a46bf0b2540..000000000000
--- a/Documentation/devicetree/bindings/clock/zx296718-clk.txt
+++ /dev/null
@@ -1,37 +0,0 @@
-Device Tree Clock bindings for ZTE zx296718
-
-This binding uses the common clock binding[1].
-
-[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
-
-Required properties:
-- compatible : shall be one of the following:
- "zte,zx296718-topcrm":
- zx296718 top clock selection, divider and gating
-
- "zte,zx296718-lsp0crm" and
- "zte,zx296718-lsp1crm":
- zx296718 device level clock selection and gating
-
- "zte,zx296718-audiocrm":
- zx296718 audio clock selection, divider and gating
-
-- reg: Address and length of the register set
-
-The clock consumer should specify the desired clock by having the clock
-ID in its "clocks" phandle cell. See include/dt-bindings/clock/zx296718-clock.h
-for the full list of zx296718 clock IDs.
-
-
-topclk: topcrm@1461000 {
- compatible = "zte,zx296718-topcrm-clk";
- reg = <0x01461000 0x1000>;
- #clock-cells = <1>;
-};
-
-usbphy0:usb-phy0 {
- compatible = "zte,zx296718-usb-phy";
- #phy-cells = <0>;
- clocks = <&topclk USB20_PHY_CLK>;
- clock-names = "phyclk";
-};
diff --git a/Documentation/devicetree/bindings/connector/usb-connector.yaml b/Documentation/devicetree/bindings/connector/usb-connector.yaml
index 4286ed767a0a..b6daedd62516 100644
--- a/Documentation/devicetree/bindings/connector/usb-connector.yaml
+++ b/Documentation/devicetree/bindings/connector/usb-connector.yaml
@@ -137,28 +137,41 @@ properties:
maxItems: 7
$ref: /schemas/types.yaml#/definitions/uint32-array
+ sink-vdos:
+ description: An array of u32 with each entry, a Vendor Defined Message Object (VDO),
+ providing additional information corresponding to the product, the detailed bit
+ definitions and the order of each VDO can be found in
+ "USB Power Delivery Specification Revision 3.0, Version 2.0 + ECNs 2020-12-10"
+ chapter 6.4.4.3.1 Discover Identity. User can specify the VDO array via
+ VDO_IDH/_CERT/_PRODUCT/_UFP/_DFP/_PCABLE/_ACABLE(1/2)/_VPD() defined in
+ dt-bindings/usb/pd.h.
+ minItems: 3
+ maxItems: 6
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+
op-sink-microwatt:
description: Sink required operating power in microwatt, if source can't
offer the power, Capability Mismatch is set. Required for power sink and
power dual role.
ports:
- description: OF graph bindings (specified in bindings/graph.txt) that model
- any data bus to the connector unless the bus is between parent node and
- the connector. Since a single connector can have multiple data buses every
- bus has an assigned OF graph port number as described below.
- type: object
+ $ref: /schemas/graph.yaml#/properties/ports
+ description: OF graph bindings modeling any data bus to the connector
+ unless the bus is between parent node and the connector. Since a single
+ connector can have multiple data buses every bus has an assigned OF graph
+ port number as described below.
+
properties:
port@0:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: High Speed (HS), present in all connectors.
port@1:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: Super Speed (SS), present in SS capable connectors.
port@2:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: Sideband Use (SBU), present in USB-C. This describes the
alternate mode connection of which SBU is a part.
diff --git a/Documentation/devicetree/bindings/crypto/allwinner,sun8i-ce.yaml b/Documentation/devicetree/bindings/crypto/allwinner,sun8i-ce.yaml
index 7a60d84289cc..6ab07eba7778 100644
--- a/Documentation/devicetree/bindings/crypto/allwinner,sun8i-ce.yaml
+++ b/Documentation/devicetree/bindings/crypto/allwinner,sun8i-ce.yaml
@@ -46,8 +46,7 @@ properties:
if:
properties:
compatible:
- items:
- const: allwinner,sun50i-h6-crypto
+ const: allwinner,sun50i-h6-crypto
then:
properties:
clocks:
diff --git a/Documentation/devicetree/bindings/crypto/intel,keembay-ocs-hcu.yaml b/Documentation/devicetree/bindings/crypto/intel,keembay-ocs-hcu.yaml
new file mode 100644
index 000000000000..acb92706d280
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/intel,keembay-ocs-hcu.yaml
@@ -0,0 +1,46 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/crypto/intel,keembay-ocs-hcu.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Intel Keem Bay OCS HCU Device Tree Bindings
+
+maintainers:
+ - Declan Murphy <declan.murphy@intel.com>
+ - Daniele Alessandrelli <daniele.alessandrelli@intel.com>
+
+description:
+ The Intel Keem Bay Offload and Crypto Subsystem (OCS) Hash Control Unit (HCU)
+ provides hardware-accelerated hashing and HMAC.
+
+properties:
+ compatible:
+ const: intel,keembay-ocs-hcu
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ crypto@3000b000 {
+ compatible = "intel,keembay-ocs-hcu";
+ reg = <0x3000b000 0x1000>;
+ interrupts = <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk 94>;
+ };
diff --git a/Documentation/devicetree/bindings/crypto/samsung-slimsss.yaml b/Documentation/devicetree/bindings/crypto/samsung-slimsss.yaml
index 7743eae049ab..676950bb7b37 100644
--- a/Documentation/devicetree/bindings/crypto/samsung-slimsss.yaml
+++ b/Documentation/devicetree/bindings/crypto/samsung-slimsss.yaml
@@ -8,7 +8,6 @@ title: Samsung Exynos SoC SlimSSS (Slim Security SubSystem) module
maintainers:
- Krzysztof Kozlowski <krzk@kernel.org>
- - Kamil Konieczny <k.konieczny@partner.samsung.com>
description: |+
The SlimSSS module in Exynos5433 SoC supports the following:
diff --git a/Documentation/devicetree/bindings/crypto/samsung-sss.yaml b/Documentation/devicetree/bindings/crypto/samsung-sss.yaml
index cf1c47a81d7f..6d62b0e42fc9 100644
--- a/Documentation/devicetree/bindings/crypto/samsung-sss.yaml
+++ b/Documentation/devicetree/bindings/crypto/samsung-sss.yaml
@@ -8,7 +8,6 @@ title: Samsung Exynos SoC SSS (Security SubSystem) module
maintainers:
- Krzysztof Kozlowski <krzk@kernel.org>
- - Kamil Konieczny <k.konieczny@partner.samsung.com>
description: |+
The SSS module in S5PV210 SoC supports the following:
diff --git a/Documentation/devicetree/bindings/crypto/ti,sa2ul.yaml b/Documentation/devicetree/bindings/crypto/ti,sa2ul.yaml
index 1465c9ebaf93..1d48ac712b23 100644
--- a/Documentation/devicetree/bindings/crypto/ti,sa2ul.yaml
+++ b/Documentation/devicetree/bindings/crypto/ti,sa2ul.yaml
@@ -66,7 +66,7 @@ examples:
#include <dt-bindings/soc/ti,sci_pm_domain.h>
main_crypto: crypto@4e00000 {
- compatible = "ti,j721-sa2ul";
+ compatible = "ti,j721e-sa2ul";
reg = <0x4e00000 0x1200>;
power-domains = <&k3_pds 264 TI_SCI_PD_EXCLUSIVE>;
dmas = <&main_udmap 0xc000>, <&main_udmap 0x4000>,
diff --git a/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-display-backend.yaml b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-display-backend.yaml
index 86057d541065..12a7df0e38b2 100644
--- a/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-display-backend.yaml
+++ b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-display-backend.yaml
@@ -84,36 +84,23 @@ properties:
const: dma-mem
ports:
- type: object
- description: |
- A ports node with endpoint definitions as defined in
- Documentation/devicetree/bindings/media/video-interfaces.txt.
+ $ref: /schemas/graph.yaml#/properties/ports
properties:
- "#address-cells":
- const: 1
-
- "#size-cells":
- const: 0
-
port@0:
- type: object
- description: |
+ $ref: /schemas/graph.yaml#/properties/port
+ description:
Input endpoints of the controller.
port@1:
- type: object
- description: |
+ $ref: /schemas/graph.yaml#/properties/port
+ description:
Output endpoints of the controller.
required:
- - "#address-cells"
- - "#size-cells"
- port@0
- port@1
- additionalProperties: false
-
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-display-frontend.yaml b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-display-frontend.yaml
index 3eb1c2bbf4e7..055157fbf3bf 100644
--- a/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-display-frontend.yaml
+++ b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-display-frontend.yaml
@@ -57,35 +57,22 @@ properties:
maxItems: 1
ports:
- type: object
- description: |
- A ports node with endpoint definitions as defined in
- Documentation/devicetree/bindings/media/video-interfaces.txt.
+ $ref: /schemas/graph.yaml#/properties/ports
properties:
- "#address-cells":
- const: 1
-
- "#size-cells":
- const: 0
-
port@0:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: |
Input endpoints of the controller.
port@1:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: |
Output endpoints of the controller.
required:
- - "#address-cells"
- - "#size-cells"
- port@1
- additionalProperties: false
-
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-hdmi.yaml b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-hdmi.yaml
index 75e6479397a5..7f11452539f4 100644
--- a/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-hdmi.yaml
+++ b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-hdmi.yaml
@@ -76,37 +76,24 @@ properties:
- const: audio-tx
ports:
- type: object
- description: |
- A ports node with endpoint definitions as defined in
- Documentation/devicetree/bindings/media/video-interfaces.txt.
+ $ref: /schemas/graph.yaml#/properties/ports
properties:
- "#address-cells":
- const: 1
-
- "#size-cells":
- const: 0
-
port@0:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: |
Input endpoints of the controller.
port@1:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: |
Output endpoints of the controller. Usually an HDMI
connector.
required:
- - "#address-cells"
- - "#size-cells"
- port@0
- port@1
- additionalProperties: false
-
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tcon.yaml b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tcon.yaml
index 4c15a2644a7c..c13faf3e6581 100644
--- a/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tcon.yaml
+++ b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tcon.yaml
@@ -115,31 +115,24 @@ properties:
- const: lvds
ports:
- type: object
- description: |
- A ports node with endpoint definitions as defined in
- Documentation/devicetree/bindings/media/video-interfaces.txt.
+ $ref: /schemas/graph.yaml#/properties/ports
properties:
- "#address-cells":
- const: 1
-
- "#size-cells":
- const: 0
-
port@0:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: |
Input endpoints of the controller.
port@1:
- type: object
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
description: |
Output endpoints of the controller.
patternProperties:
"^endpoint(@[0-9])$":
- type: object
+ $ref: /schemas/graph.yaml#/$defs/endpoint-base
+ unevaluatedProperties: false
properties:
allwinner,tcon-channel:
@@ -156,16 +149,10 @@ properties:
property is not present, the endpoint number will be
used as the channel number.
- unevaluatedProperties: true
-
required:
- - "#address-cells"
- - "#size-cells"
- port@0
- port@1
- additionalProperties: false
-
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tv-encoder.yaml b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tv-encoder.yaml
index 6009324be967..afc0ed799e0e 100644
--- a/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tv-encoder.yaml
+++ b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tv-encoder.yaml
@@ -24,11 +24,9 @@ properties:
maxItems: 1
port:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description:
- A port node with endpoint definitions as defined in
- Documentation/devicetree/bindings/media/video-interfaces.txt. The
- first port should be the input endpoint, usually coming from the
+ The first port should be the input endpoint, usually coming from the
associated TCON.
required:
diff --git a/Documentation/devicetree/bindings/display/allwinner,sun6i-a31-drc.yaml b/Documentation/devicetree/bindings/display/allwinner,sun6i-a31-drc.yaml
index 0c1ce55940e1..71cce5687580 100644
--- a/Documentation/devicetree/bindings/display/allwinner,sun6i-a31-drc.yaml
+++ b/Documentation/devicetree/bindings/display/allwinner,sun6i-a31-drc.yaml
@@ -46,36 +46,23 @@ properties:
maxItems: 1
ports:
- type: object
- description: |
- A ports node with endpoint definitions as defined in
- Documentation/devicetree/bindings/media/video-interfaces.txt.
+ $ref: /schemas/graph.yaml#/properties/ports
properties:
- "#address-cells":
- const: 1
-
- "#size-cells":
- const: 0
-
port@0:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: |
Input endpoints of the controller.
port@1:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: |
Output endpoints of the controller.
required:
- - "#address-cells"
- - "#size-cells"
- port@0
- port@1
- additionalProperties: false
-
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/display/allwinner,sun6i-a31-mipi-dsi.yaml b/Documentation/devicetree/bindings/display/allwinner,sun6i-a31-mipi-dsi.yaml
index 7aa330dabc44..a738d7c12a97 100644
--- a/Documentation/devicetree/bindings/display/allwinner,sun6i-a31-mipi-dsi.yaml
+++ b/Documentation/devicetree/bindings/display/allwinner,sun6i-a31-mipi-dsi.yaml
@@ -47,11 +47,9 @@ properties:
const: dphy
port:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description:
- A port node with endpoint definitions as defined in
- Documentation/devicetree/bindings/media/video-interfaces.txt. That
- port should be the input endpoint, usually coming from the
+ The port should be the input endpoint, usually coming from the
associated TCON.
required:
diff --git a/Documentation/devicetree/bindings/display/allwinner,sun8i-a83t-de2-mixer.yaml b/Documentation/devicetree/bindings/display/allwinner,sun8i-a83t-de2-mixer.yaml
index c040eef56518..4f91eec26de9 100644
--- a/Documentation/devicetree/bindings/display/allwinner,sun8i-a83t-de2-mixer.yaml
+++ b/Documentation/devicetree/bindings/display/allwinner,sun8i-a83t-de2-mixer.yaml
@@ -43,35 +43,22 @@ properties:
maxItems: 1
ports:
- type: object
- description: |
- A ports node with endpoint definitions as defined in
- Documentation/devicetree/bindings/media/video-interfaces.txt.
+ $ref: /schemas/graph.yaml#/properties/ports
properties:
- "#address-cells":
- const: 1
-
- "#size-cells":
- const: 0
-
port@0:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: |
Input endpoints of the controller.
port@1:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: |
Output endpoints of the controller.
required:
- - "#address-cells"
- - "#size-cells"
- port@1
- additionalProperties: false
-
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/display/allwinner,sun8i-a83t-dw-hdmi.yaml b/Documentation/devicetree/bindings/display/allwinner,sun8i-a83t-dw-hdmi.yaml
index fa4769a0b26e..b3e9992525c2 100644
--- a/Documentation/devicetree/bindings/display/allwinner,sun8i-a83t-dw-hdmi.yaml
+++ b/Documentation/devicetree/bindings/display/allwinner,sun8i-a83t-dw-hdmi.yaml
@@ -93,38 +93,25 @@ properties:
The VCC power supply of the controller
ports:
- type: object
- description: |
- A ports node with endpoint definitions as defined in
- Documentation/devicetree/bindings/media/video-interfaces.txt.
+ $ref: /schemas/graph.yaml#/properties/ports
properties:
- "#address-cells":
- const: 1
-
- "#size-cells":
- const: 0
-
port@0:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: |
Input endpoints of the controller. Usually the associated
TCON.
port@1:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: |
Output endpoints of the controller. Usually an HDMI
connector.
required:
- - "#address-cells"
- - "#size-cells"
- port@0
- port@1
- additionalProperties: false
-
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/display/allwinner,sun8i-r40-tcon-top.yaml b/Documentation/devicetree/bindings/display/allwinner,sun8i-r40-tcon-top.yaml
index b98ca609824b..ec21e8bf2767 100644
--- a/Documentation/devicetree/bindings/display/allwinner,sun8i-r40-tcon-top.yaml
+++ b/Documentation/devicetree/bindings/display/allwinner,sun8i-r40-tcon-top.yaml
@@ -80,141 +80,45 @@ properties:
maxItems: 1
ports:
- type: object
- description: |
- A ports node with endpoint definitions as defined in
- Documentation/devicetree/bindings/media/video-interfaces.txt.
- All ports should have only one endpoint connected to
- remote endpoint.
+ $ref: /schemas/graph.yaml#/properties/ports
properties:
- "#address-cells":
- const: 1
-
- "#size-cells":
- const: 0
-
port@0:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: |
Input endpoint for Mixer 0 mux.
port@1:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: |
Output endpoint for Mixer 0 mux
- properties:
- "#address-cells":
- const: 1
-
- "#size-cells":
- const: 0
-
- reg: true
-
- patternProperties:
- "^endpoint@[0-9]$":
- type: object
-
- properties:
- reg:
- description: |
- ID of the target TCON
-
- required:
- - reg
-
- required:
- - "#address-cells"
- - "#size-cells"
-
- additionalProperties: false
-
port@2:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: |
Input endpoint for Mixer 1 mux.
port@3:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: |
Output endpoint for Mixer 1 mux
- properties:
- "#address-cells":
- const: 1
-
- "#size-cells":
- const: 0
-
- reg: true
-
- patternProperties:
- "^endpoint@[0-9]$":
- type: object
-
- properties:
- reg:
- description: |
- ID of the target TCON
-
- required:
- - reg
-
- required:
- - "#address-cells"
- - "#size-cells"
-
- additionalProperties: false
-
port@4:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: |
Input endpoint for HDMI mux.
- properties:
- "#address-cells":
- const: 1
-
- "#size-cells":
- const: 0
-
- reg: true
-
- patternProperties:
- "^endpoint@[0-9]$":
- type: object
-
- properties:
- reg:
- description: |
- ID of the target TCON
-
- required:
- - reg
-
- required:
- - "#address-cells"
- - "#size-cells"
-
- additionalProperties: false
-
port@5:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: |
Output endpoint for HDMI mux
required:
- - "#address-cells"
- - "#size-cells"
- port@0
- port@1
- port@4
- port@5
- additionalProperties: false
-
required:
- "#clock-cells"
- compatible
diff --git a/Documentation/devicetree/bindings/display/allwinner,sun9i-a80-deu.yaml b/Documentation/devicetree/bindings/display/allwinner,sun9i-a80-deu.yaml
index 96de41d32b3e..637372ec4614 100644
--- a/Documentation/devicetree/bindings/display/allwinner,sun9i-a80-deu.yaml
+++ b/Documentation/devicetree/bindings/display/allwinner,sun9i-a80-deu.yaml
@@ -40,36 +40,23 @@ properties:
maxItems: 1
ports:
- type: object
- description: |
- A ports node with endpoint definitions as defined in
- Documentation/devicetree/bindings/media/video-interfaces.txt.
+ $ref: /schemas/graph.yaml#/properties/ports
properties:
- "#address-cells":
- const: 1
-
- "#size-cells":
- const: 0
-
port@0:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: |
Input endpoints of the controller.
port@1:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: |
Output endpoints of the controller.
required:
- - "#address-cells"
- - "#size-cells"
- port@0
- port@1
- additionalProperties: false
-
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml b/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml
index 0da42ab8fd3a..cf5a208f2f10 100644
--- a/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml
+++ b/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml
@@ -81,12 +81,12 @@ properties:
description: phandle to an external 5V regulator to power the HDMI logic
port@0:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description:
A port node pointing to the VENC Input port node.
port@1:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description:
A port node pointing to the TMDS Output port node.
diff --git a/Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml b/Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml
index a8d202c9d004..851cb0781217 100644
--- a/Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml
+++ b/Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml
@@ -83,12 +83,12 @@ properties:
description: phandle to the associated power domain
port@0:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description:
A port node pointing to the CVBS VDAC port node.
port@1:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description:
A port node pointing to the HDMI-TX port node.
diff --git a/Documentation/devicetree/bindings/display/brcm,bcm2711-hdmi.yaml b/Documentation/devicetree/bindings/display/brcm,bcm2711-hdmi.yaml
index 7ce06f9f9f8e..a1d5a32660e0 100644
--- a/Documentation/devicetree/bindings/display/brcm,bcm2711-hdmi.yaml
+++ b/Documentation/devicetree/bindings/display/brcm,bcm2711-hdmi.yaml
@@ -53,6 +53,24 @@ properties:
- const: audio
- const: cec
+ interrupts:
+ items:
+ - description: CEC TX interrupt
+ - description: CEC RX interrupt
+ - description: CEC stuck at low interrupt
+ - description: Wake-up interrupt
+ - description: Hotplug connected interrupt
+ - description: Hotplug removed interrupt
+
+ interrupt-names:
+ items:
+ - const: cec-tx
+ - const: cec-rx
+ - const: cec-low
+ - const: wakeup
+ - const: hpd-connected
+ - const: hpd-removed
+
ddc:
allOf:
- $ref: /schemas/types.yaml#/definitions/phandle
@@ -60,6 +78,7 @@ properties:
Phandle of the I2C controller used for DDC EDID probing
hpd-gpios:
+ maxItems: 1
description: >
The GPIO pin for the HDMI hotplug detect (if it doesn't appear
as an interrupt/status bit in the HDMI controller itself)
diff --git a/Documentation/devicetree/bindings/display/brcm,bcm2835-dpi.yaml b/Documentation/devicetree/bindings/display/brcm,bcm2835-dpi.yaml
index 5c1024bbc1b3..c9ad0ecc9b6d 100644
--- a/Documentation/devicetree/bindings/display/brcm,bcm2835-dpi.yaml
+++ b/Documentation/devicetree/bindings/display/brcm,bcm2835-dpi.yaml
@@ -27,10 +27,9 @@ properties:
- const: pixel
port:
- type: object
- description: >
- Port node with a single endpoint connecting to the panel, as
- defined in Documentation/devicetree/bindings/media/video-interfaces.txt.
+ $ref: /schemas/graph.yaml#/properties/port
+ description:
+ Port node with a single endpoint connecting to the panel.
required:
- compatible
diff --git a/Documentation/devicetree/bindings/display/brcm,bcm2835-dsi0.yaml b/Documentation/devicetree/bindings/display/brcm,bcm2835-dsi0.yaml
index eb44e072b6e5..55c60919991f 100644
--- a/Documentation/devicetree/bindings/display/brcm,bcm2835-dsi0.yaml
+++ b/Documentation/devicetree/bindings/display/brcm,bcm2835-dsi0.yaml
@@ -18,6 +18,7 @@ properties:
compatible:
enum:
+ - brcm,bcm2711-dsi1
- brcm,bcm2835-dsi0
- brcm,bcm2835-dsi1
diff --git a/Documentation/devicetree/bindings/display/brcm,bcm2835-hdmi.yaml b/Documentation/devicetree/bindings/display/brcm,bcm2835-hdmi.yaml
index f54b4e4808f0..031e35e76db2 100644
--- a/Documentation/devicetree/bindings/display/brcm,bcm2835-hdmi.yaml
+++ b/Documentation/devicetree/bindings/display/brcm,bcm2835-hdmi.yaml
@@ -37,6 +37,7 @@ properties:
Phandle of the I2C controller used for DDC EDID probing
hpd-gpios:
+ maxItems: 1
description: >
The GPIO pin for the HDMI hotplug detect (if it doesn't appear
as an interrupt/status bit in the HDMI controller itself)
diff --git a/Documentation/devicetree/bindings/display/brcm,bcm2835-hvs.yaml b/Documentation/devicetree/bindings/display/brcm,bcm2835-hvs.yaml
index e826ab0adb75..2e8566f47e63 100644
--- a/Documentation/devicetree/bindings/display/brcm,bcm2835-hvs.yaml
+++ b/Documentation/devicetree/bindings/display/brcm,bcm2835-hvs.yaml
@@ -36,7 +36,7 @@ if:
properties:
compatible:
contains:
- const: brcm,bcm2711-hvs"
+ const: brcm,bcm2711-hvs
then:
required:
diff --git a/Documentation/devicetree/bindings/display/bridge/analogix,anx7625.yaml b/Documentation/devicetree/bindings/display/bridge/analogix,anx7625.yaml
index 9392b5502a32..c789784efe30 100644
--- a/Documentation/devicetree/bindings/display/bridge/analogix,anx7625.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/analogix,anx7625.yaml
@@ -35,16 +35,16 @@ properties:
maxItems: 1
ports:
- type: object
+ $ref: /schemas/graph.yaml#/properties/ports
properties:
port@0:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description:
Video port for MIPI DSI input.
port@1:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description:
Video port for panel or connector.
diff --git a/Documentation/devicetree/bindings/display/bridge/analogix,anx7814.yaml b/Documentation/devicetree/bindings/display/bridge/analogix,anx7814.yaml
index 3ba477aefdd7..8e13f27b28ed 100644
--- a/Documentation/devicetree/bindings/display/bridge/analogix,anx7814.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/analogix,anx7814.yaml
@@ -42,31 +42,18 @@ properties:
description: Regulator for 1.0V digital core power.
ports:
- type: object
- description:
- A node containing input and output port nodes with endpoint
- definitions as documented in
- Documentation/devicetree/bindings/media/video-interfaces.txt
- Documentation/devicetree/bindings/graph.txt
+ $ref: /schemas/graph.yaml#/properties/ports
properties:
port@0:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: Video port for HDMI input.
- properties:
- reg:
- const: 0
-
port@1:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description:
Video port for SlimPort, DisplayPort, eDP or MyDP output.
- properties:
- reg:
- const: 1
-
required:
- port@0
- port@1
diff --git a/Documentation/devicetree/bindings/display/bridge/anx6345.yaml b/Documentation/devicetree/bindings/display/bridge/anx6345.yaml
index fccd63521a8c..1c0406c38fe5 100644
--- a/Documentation/devicetree/bindings/display/bridge/anx6345.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/anx6345.yaml
@@ -32,31 +32,23 @@ properties:
description: Regulator for 2.5V digital core power.
ports:
- type: object
+ $ref: /schemas/graph.yaml#/properties/ports
properties:
- '#address-cells':
- const: 1
-
- '#size-cells':
- const: 0
-
port@0:
- type: object
- description: |
+ $ref: /schemas/graph.yaml#/properties/port
+ description:
Video port for LVTTL input
port@1:
- type: object
- description: |
+ $ref: /schemas/graph.yaml#/properties/port
+ description:
Video port for eDP output (panel or connector).
May be omitted if EDID works reliably.
required:
- port@0
- additionalProperties: false
-
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/display/bridge/cdns,mhdp8546.yaml b/Documentation/devicetree/bindings/display/bridge/cdns,mhdp8546.yaml
index 74d675fc6e7b..63427878715e 100644
--- a/Documentation/devicetree/bindings/display/bridge/cdns,mhdp8546.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/cdns,mhdp8546.yaml
@@ -57,47 +57,37 @@ properties:
maxItems: 1
ports:
- type: object
- description:
- Ports as described in Documentation/devicetree/bindings/graph.txt.
+ $ref: /schemas/graph.yaml#/properties/ports
properties:
- '#address-cells':
- const: 1
-
- '#size-cells':
- const: 0
-
port@0:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description:
First input port representing the DP bridge input.
port@1:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description:
Second input port representing the DP bridge input.
port@2:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description:
Third input port representing the DP bridge input.
port@3:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description:
Fourth input port representing the DP bridge input.
port@4:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description:
Output port representing the DP bridge output.
required:
- port@0
- port@4
- - '#address-cells'
- - '#size-cells'
allOf:
- if:
diff --git a/Documentation/devicetree/bindings/display/bridge/chrontel,ch7033.yaml b/Documentation/devicetree/bindings/display/bridge/chrontel,ch7033.yaml
index 9f38f55fc990..bb6289c7d375 100644
--- a/Documentation/devicetree/bindings/display/bridge/chrontel,ch7033.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/chrontel,ch7033.yaml
@@ -19,16 +19,16 @@ properties:
description: I2C address of the device
ports:
- type: object
+ $ref: /schemas/graph.yaml#/properties/ports
properties:
port@0:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: |
Video port for RGB input.
port@1:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: |
DVI port, should be connected to a node compatible with the
dvi-connector binding.
diff --git a/Documentation/devicetree/bindings/display/bridge/intel,keembay-dsi.yaml b/Documentation/devicetree/bindings/display/bridge/intel,keembay-dsi.yaml
index 35c9dfd86650..dcb1336ee2a5 100644
--- a/Documentation/devicetree/bindings/display/bridge/intel,keembay-dsi.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/intel,keembay-dsi.yaml
@@ -35,29 +35,21 @@ properties:
- const: clk_mipi_cfg
ports:
- type: object
+ $ref: /schemas/graph.yaml#/properties/ports
properties:
- '#address-cells':
- const: 1
-
- '#size-cells':
- const: 0
-
port@0:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: MIPI DSI input port.
port@1:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: DSI output port.
required:
- port@0
- port@1
- additionalProperties: false
-
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/display/bridge/ite,it6505.yaml b/Documentation/devicetree/bindings/display/bridge/ite,it6505.yaml
index 02cfc0a3b550..833d11b2303a 100644
--- a/Documentation/devicetree/bindings/display/bridge/ite,it6505.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/ite,it6505.yaml
@@ -53,7 +53,7 @@ properties:
description: extcon specifier for the Power Delivery
port:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: A port node pointing to DPI host port node
required:
diff --git a/Documentation/devicetree/bindings/display/bridge/lontium,lt9611.yaml b/Documentation/devicetree/bindings/display/bridge/lontium,lt9611.yaml
index 7a1c89b995e2..5b9d36f7af30 100644
--- a/Documentation/devicetree/bindings/display/bridge/lontium,lt9611.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/lontium,lt9611.yaml
@@ -38,82 +38,26 @@ properties:
description: Regulator for 3.3V IO power.
ports:
- type: object
+ $ref: /schemas/graph.yaml#/properties/ports
properties:
- "#address-cells":
- const: 1
-
- "#size-cells":
- const: 0
-
port@0:
- type: object
- description: |
+ $ref: /schemas/graph.yaml#/properties/port
+ description:
Primary MIPI port-1 for MIPI input
- properties:
- reg:
- const: 0
-
- patternProperties:
- "^endpoint(@[0-9])$":
- type: object
- additionalProperties: false
-
- properties:
- remote-endpoint:
- $ref: /schemas/types.yaml#/definitions/phandle
-
- required:
- - reg
-
port@1:
- type: object
- description: |
+ $ref: /schemas/graph.yaml#/properties/port
+ description:
Additional MIPI port-2 for MIPI input, used in combination
with primary MIPI port-1 to drive higher resolution displays
- properties:
- reg:
- const: 1
-
- patternProperties:
- "^endpoint(@[0-9])$":
- type: object
- additionalProperties: false
-
- properties:
- remote-endpoint:
- $ref: /schemas/types.yaml#/definitions/phandle
-
- required:
- - reg
-
port@2:
- type: object
- description: |
+ $ref: /schemas/graph.yaml#/properties/port
+ description:
HDMI port for HDMI output
- properties:
- reg:
- const: 2
-
- patternProperties:
- "^endpoint(@[0-9])$":
- type: object
- additionalProperties: false
-
- properties:
- remote-endpoint:
- $ref: /schemas/types.yaml#/definitions/phandle
-
- required:
- - reg
-
required:
- - "#address-cells"
- - "#size-cells"
- port@0
- port@2
diff --git a/Documentation/devicetree/bindings/display/bridge/lvds-codec.yaml b/Documentation/devicetree/bindings/display/bridge/lvds-codec.yaml
index 66a14d60ce1d..304a1367faaa 100644
--- a/Documentation/devicetree/bindings/display/bridge/lvds-codec.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/lvds-codec.yaml
@@ -45,25 +45,17 @@ properties:
- thine,thc63lvdm83d # For the THC63LVDM83D LVDS serializer
ports:
- type: object
- description: |
- This device has two video ports. Their connections are modeled using the
- OF graph bindings specified in Documentation/devicetree/bindings/graph.txt
- properties:
- '#address-cells':
- const: 1
-
- '#size-cells':
- const: 0
+ $ref: /schemas/graph.yaml#/properties/ports
+ properties:
port@0:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: |
For LVDS encoders, port 0 is the parallel input
For LVDS decoders, port 0 is the LVDS input
port@1:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: |
For LVDS encoders, port 1 is the LVDS output
For LVDS decoders, port 1 is the parallel output
@@ -72,8 +64,6 @@ properties:
- port@0
- port@1
- additionalProperties: false
-
powerdown-gpios:
description:
The GPIO used to control the power down line of this device.
diff --git a/Documentation/devicetree/bindings/display/bridge/nwl-dsi.yaml b/Documentation/devicetree/bindings/display/bridge/nwl-dsi.yaml
index a125b2dd3a2f..350fb8f400f0 100644
--- a/Documentation/devicetree/bindings/display/bridge/nwl-dsi.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/nwl-dsi.yaml
@@ -84,40 +84,23 @@ properties:
- const: pclk
ports:
- type: object
- description:
- A node containing DSI input & output port nodes with endpoint
- definitions as documented in
- Documentation/devicetree/bindings/graph.txt.
+ $ref: /schemas/graph.yaml#/properties/ports
+
properties:
port@0:
- type: object
+ $ref: /schemas/graph.yaml#/$defs/port-base
description:
Input port node to receive pixel data from the
display controller. Exactly one endpoint must be
specified.
properties:
- '#address-cells':
- const: 1
-
- '#size-cells':
- const: 0
-
endpoint@0:
+ $ref: /schemas/graph.yaml#/properties/endpoint
description: sub-node describing the input from LCDIF
- type: object
endpoint@1:
+ $ref: /schemas/graph.yaml#/properties/endpoint
description: sub-node describing the input from DCSS
- type: object
-
- reg:
- const: 0
-
- required:
- - '#address-cells'
- - '#size-cells'
- - reg
oneOf:
- required:
@@ -125,28 +108,18 @@ properties:
- required:
- endpoint@1
- additionalProperties: false
+ unevaluatedProperties: false
port@1:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description:
DSI output port node to the panel or the next bridge
in the chain
- '#address-cells':
- const: 1
-
- '#size-cells':
- const: 0
-
required:
- - '#address-cells'
- - '#size-cells'
- port@0
- port@1
- additionalProperties: false
-
required:
- '#address-cells'
- '#size-cells'
diff --git a/Documentation/devicetree/bindings/display/bridge/ps8640.yaml b/Documentation/devicetree/bindings/display/bridge/ps8640.yaml
index 763c7909473e..fce82b605c8b 100644
--- a/Documentation/devicetree/bindings/display/bridge/ps8640.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/ps8640.yaml
@@ -41,34 +41,22 @@ properties:
description: Regulator for 3.3V digital core power.
ports:
- type: object
- description:
- A node containing DSI input & output port nodes with endpoint
- definitions as documented in
- Documentation/devicetree/bindings/media/video-interfaces.txt
- Documentation/devicetree/bindings/graph.txt
- properties:
- '#address-cells':
- const: 1
-
- '#size-cells':
- const: 0
+ $ref: /schemas/graph.yaml#/properties/ports
+ properties:
port@0:
- type: object
- description: |
+ $ref: /schemas/graph.yaml#/properties/port
+ description:
Video port for DSI input
port@1:
- type: object
- description: |
+ $ref: /schemas/graph.yaml#/properties/port
+ description:
Video port for eDP output (panel or connector).
required:
- port@0
- additionalProperties: false
-
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/display/bridge/renesas,lvds.yaml b/Documentation/devicetree/bindings/display/bridge/renesas,lvds.yaml
index e5b163951b91..acfc327f70a7 100644
--- a/Documentation/devicetree/bindings/display/bridge/renesas,lvds.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/renesas,lvds.yaml
@@ -49,33 +49,21 @@ properties:
maxItems: 1
ports:
- type: object
- description: |
- This device has two video ports. Their connections are modelled using the
- OF graph bindings specified in Documentation/devicetree/bindings/graph.txt.
- Each port shall have a single endpoint.
+ $ref: /schemas/graph.yaml#/properties/ports
properties:
- '#address-cells':
- const: 1
-
- '#size-cells':
- const: 0
-
port@0:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: Parallel RGB input port
port@1:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: LVDS output port
required:
- port@0
- port@1
- additionalProperties: false
-
power-domains:
maxItems: 1
@@ -83,9 +71,9 @@ properties:
$ref: /schemas/types.yaml#/definitions/phandle
description:
phandle to the companion LVDS encoder. This property is mandatory
- for the first LVDS encoder on D3 and E3 SoCs, and shall point to
- the second encoder to be used as a companion in dual-link mode. It
- shall not be set for any other LVDS encoder.
+ for the first LVDS encoder on R-Car D3 and E3, and RZ/G2E SoCs, and shall
+ point to the second encoder to be used as a companion in dual-link mode.
+ It shall not be set for any other LVDS encoder.
required:
- compatible
diff --git a/Documentation/devicetree/bindings/display/bridge/sii902x.txt b/Documentation/devicetree/bindings/display/bridge/sii902x.txt
index 02c21b584741..3bc760cc31cb 100644
--- a/Documentation/devicetree/bindings/display/bridge/sii902x.txt
+++ b/Documentation/devicetree/bindings/display/bridge/sii902x.txt
@@ -40,7 +40,7 @@ Optional properties:
documents on how to describe the way the sii902x device is
connected to the rest of the audio system:
Documentation/devicetree/bindings/sound/simple-card.yaml
- Documentation/devicetree/bindings/sound/audio-graph-card.txt
+ Documentation/devicetree/bindings/sound/audio-graph-card.yaml
Note: In case of the audio-graph-card binding the used port
index should be 3.
diff --git a/Documentation/devicetree/bindings/display/bridge/simple-bridge.yaml b/Documentation/devicetree/bindings/display/bridge/simple-bridge.yaml
index 64e8a1c24b40..6c7b577fd471 100644
--- a/Documentation/devicetree/bindings/display/bridge/simple-bridge.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/simple-bridge.yaml
@@ -30,31 +30,21 @@ properties:
- ti,ths8135
ports:
- type: object
- description: |
- This device has two video ports. Their connections are modeled using the
- OF graph bindings specified in Documentation/devicetree/bindings/graph.txt.
- properties:
- '#address-cells':
- const: 1
-
- '#size-cells':
- const: 0
+ $ref: /schemas/graph.yaml#/properties/ports
+ properties:
port@0:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: The bridge input
port@1:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: The bridge output
required:
- port@0
- port@1
- additionalProperties: false
-
enable-gpios:
maxItems: 1
description: GPIO controlling bridge enable
diff --git a/Documentation/devicetree/bindings/display/bridge/snps,dw-mipi-dsi.yaml b/Documentation/devicetree/bindings/display/bridge/snps,dw-mipi-dsi.yaml
index e42cb610f545..3c3e51af154b 100644
--- a/Documentation/devicetree/bindings/display/bridge/snps,dw-mipi-dsi.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/snps,dw-mipi-dsi.yaml
@@ -47,14 +47,15 @@ properties:
const: apb
ports:
- type: object
+ $ref: /schemas/graph.yaml#/properties/ports
properties:
port@0:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: Input node to receive pixel data.
+
port@1:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: DSI output node to panel.
required:
diff --git a/Documentation/devicetree/bindings/display/bridge/thine,thc63lvd1024.yaml b/Documentation/devicetree/bindings/display/bridge/thine,thc63lvd1024.yaml
index 3d5ce08a5792..8ae382429d2b 100644
--- a/Documentation/devicetree/bindings/display/bridge/thine,thc63lvd1024.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/thine,thc63lvd1024.yaml
@@ -25,46 +25,41 @@ properties:
const: thine,thc63lvd1024
ports:
- type: object
+ $ref: /schemas/graph.yaml#/properties/ports
description: |
- This device has four video ports. Their connections are modeled using the
- OF graph bindings specified in Documentation/devicetree/bindings/graph.txt.
+ The device can operate in single or dual input and output modes.
- The device can operate in single-link mode or dual-link mode. In
- single-link mode, all pixels are received on port@0, and port@1 shall not
- contain any endpoint. In dual-link mode, even-numbered pixels are
- received on port@0 and odd-numbered pixels on port@1, and both port@0 and
- port@1 shall contain endpoints.
+ When operating in single input mode, all pixels are received on port@0,
+ and port@1 shall not contain any endpoint. In dual input mode,
+ even-numbered pixels are received on port@0 and odd-numbered pixels on
+ port@1, and both port@0 and port@1 shall contain endpoints.
- properties:
- '#address-cells':
- const: 1
-
- '#size-cells':
- const: 0
+ When operating in single output mode all pixels are output from the first
+ CMOS/TTL port and port@3 shall not contain any endpoint. In dual output
+ mode pixels are output from both CMOS/TTL ports and both port@2 and
+ port@3 shall contain endpoints.
+ properties:
port@0:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: First LVDS input port
port@1:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: Second LVDS input port
port@2:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: First digital CMOS/TTL parallel output
port@3:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: Second digital CMOS/TTL parallel output
required:
- port@0
- port@2
- additionalProperties: false
-
oe-gpios:
maxItems: 1
description: Output enable GPIO signal, pin name "OE", active high.
diff --git a/Documentation/devicetree/bindings/display/bridge/ti,sn65dsi86.yaml b/Documentation/devicetree/bindings/display/bridge/ti,sn65dsi86.yaml
index f8622bd0f61e..26932d2e86ab 100644
--- a/Documentation/devicetree/bindings/display/bridge/ti,sn65dsi86.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/ti,sn65dsi86.yaml
@@ -71,54 +71,26 @@ properties:
description: See ../../pwm/pwm.yaml for description of the cell formats.
ports:
- type: object
- additionalProperties: false
+ $ref: /schemas/graph.yaml#/properties/ports
properties:
- "#address-cells":
- const: 1
-
- "#size-cells":
- const: 0
-
port@0:
- type: object
- additionalProperties: false
-
+ $ref: /schemas/graph.yaml#/properties/port
description:
Video port for MIPI DSI input
- properties:
- reg:
- const: 0
-
- endpoint:
- type: object
- additionalProperties: false
- properties:
- remote-endpoint: true
-
- required:
- - reg
-
port@1:
- type: object
- additionalProperties: false
-
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
description:
Video port for eDP output (panel or connector).
properties:
- reg:
- const: 1
-
endpoint:
- type: object
- additionalProperties: false
+ $ref: /schemas/graph.yaml#/$defs/endpoint-base
+ unevaluatedProperties: false
properties:
- remote-endpoint: true
-
data-lanes:
oneOf:
- minItems: 1
@@ -171,12 +143,7 @@ properties:
dependencies:
lane-polarities: [data-lanes]
- required:
- - reg
-
required:
- - "#address-cells"
- - "#size-cells"
- port@0
- port@1
diff --git a/Documentation/devicetree/bindings/display/bridge/ti,tfp410.yaml b/Documentation/devicetree/bindings/display/bridge/ti,tfp410.yaml
index 605831c1e836..4c5dd8ec2951 100644
--- a/Documentation/devicetree/bindings/display/bridge/ti,tfp410.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/ti,tfp410.yaml
@@ -31,23 +31,18 @@ properties:
maximum: 7
ports:
- description:
- A node containing input and output port nodes with endpoint
- definitions as documented in
- Documentation/devicetree/bindings/media/video-interfaces.txt
- type: object
+ $ref: /schemas/graph.yaml#/properties/ports
properties:
port@0:
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
description: DPI input port.
- type: object
properties:
- reg:
- const: 0
-
endpoint:
- type: object
+ $ref: /schemas/graph.yaml#/$defs/endpoint-base
+ unevaluatedProperties: false
properties:
pclk-sample:
@@ -67,15 +62,8 @@ properties:
default: 24
port@1:
+ $ref: /schemas/graph.yaml#/properties/port
description: DVI output port.
- type: object
-
- properties:
- reg:
- const: 1
-
- endpoint:
- type: object
required:
- port@0
diff --git a/Documentation/devicetree/bindings/display/bridge/toshiba,tc358762.yaml b/Documentation/devicetree/bindings/display/bridge/toshiba,tc358762.yaml
index 195025e6803c..5216c27fc0ad 100644
--- a/Documentation/devicetree/bindings/display/bridge/toshiba,tc358762.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/toshiba,tc358762.yaml
@@ -25,62 +25,20 @@ properties:
description: Regulator for 1.2V internal core power.
ports:
- type: object
+ $ref: /schemas/graph.yaml#/properties/ports
properties:
- "#address-cells":
- const: 1
-
- "#size-cells":
- const: 0
-
port@0:
- type: object
- additionalProperties: false
-
- description: |
+ $ref: /schemas/graph.yaml#/properties/port
+ description:
Video port for MIPI DSI input
- properties:
- reg:
- const: 0
-
- patternProperties:
- endpoint:
- type: object
- additionalProperties: false
-
- properties:
- remote-endpoint: true
-
- required:
- - reg
-
port@1:
- type: object
- additionalProperties: false
-
- description: |
+ $ref: /schemas/graph.yaml#/properties/port
+ description:
Video port for MIPI DPI output (panel or connector).
- properties:
- reg:
- const: 1
-
- patternProperties:
- endpoint:
- type: object
- additionalProperties: false
-
- properties:
- remote-endpoint: true
-
- required:
- - reg
-
required:
- - "#address-cells"
- - "#size-cells"
- port@0
- port@1
diff --git a/Documentation/devicetree/bindings/display/bridge/toshiba,tc358768.yaml b/Documentation/devicetree/bindings/display/bridge/toshiba,tc358768.yaml
index c036a75db8f7..eacfe7165083 100644
--- a/Documentation/devicetree/bindings/display/bridge/toshiba,tc358768.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/toshiba,tc358768.yaml
@@ -42,65 +42,30 @@ properties:
const: refclk
ports:
- type: object
+ $ref: /schemas/graph.yaml#/properties/ports
properties:
- "#address-cells":
- const: 1
-
- "#size-cells":
- const: 0
-
port@0:
- type: object
- additionalProperties: false
-
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
description: |
Video port for RGB input
properties:
- reg:
- const: 0
-
- patternProperties:
endpoint:
- type: object
- additionalProperties: false
+ $ref: /schemas/graph.yaml#/$defs/endpoint-base
+ unevaluatedProperties: false
properties:
data-lines:
enum: [ 16, 18, 24 ]
- remote-endpoint: true
-
- required:
- - reg
-
port@1:
- type: object
- additionalProperties: false
-
+ $ref: /schemas/graph.yaml#/properties/port
description: |
Video port for DSI output (panel or connector).
- properties:
- reg:
- const: 1
-
- patternProperties:
- endpoint:
- type: object
- additionalProperties: false
-
- properties:
- remote-endpoint: true
-
- required:
- - reg
-
required:
- - "#address-cells"
- - "#size-cells"
- port@0
- port@1
@@ -156,4 +121,3 @@ examples:
};
};
};
-
diff --git a/Documentation/devicetree/bindings/display/bridge/toshiba,tc358775.yaml b/Documentation/devicetree/bindings/display/bridge/toshiba,tc358775.yaml
index b5959cc78b8d..10471c6c1ff9 100644
--- a/Documentation/devicetree/bindings/display/bridge/toshiba,tc358775.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/toshiba,tc358775.yaml
@@ -42,31 +42,22 @@ properties:
description: Hardware reset, Low active
ports:
- type: object
- description:
- A node containing input and output port nodes with endpoint definitions
- as documented in
- Documentation/devicetree/bindings/media/video-interfaces.txt
- properties:
- "#address-cells":
- const: 1
-
- "#size-cells":
- const: 0
+ $ref: /schemas/graph.yaml#/properties/ports
+ properties:
port@0:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: |
DSI Input. The remote endpoint phandle should be a
reference to a valid mipi_dsi_host device node.
port@1:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: |
Video port for LVDS output (panel or connector).
port@2:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: |
Video port for Dual link LVDS output (panel or connector).
diff --git a/Documentation/devicetree/bindings/display/connector/analog-tv-connector.yaml b/Documentation/devicetree/bindings/display/connector/analog-tv-connector.yaml
index eebe88fed999..a31ca2d52b86 100644
--- a/Documentation/devicetree/bindings/display/connector/analog-tv-connector.yaml
+++ b/Documentation/devicetree/bindings/display/connector/analog-tv-connector.yaml
@@ -25,6 +25,7 @@ properties:
$ref: /schemas/types.yaml#/definitions/uint32
port:
+ $ref: /schemas/graph.yaml#/properties/port
description: Connection to controller providing analog TV signals
required:
diff --git a/Documentation/devicetree/bindings/display/connector/dp-connector.yaml b/Documentation/devicetree/bindings/display/connector/dp-connector.yaml
new file mode 100644
index 000000000000..22792a79e7ce
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/connector/dp-connector.yaml
@@ -0,0 +1,55 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/connector/dp-connector.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: DisplayPort Connector
+
+maintainers:
+ - Tomi Valkeinen <tomi.valkeinen@ti.com>
+
+properties:
+ compatible:
+ const: dp-connector
+
+ label: true
+
+ type:
+ enum:
+ - full-size
+ - mini
+
+ hpd-gpios:
+ description: A GPIO line connected to HPD
+ maxItems: 1
+
+ dp-pwr-supply:
+ description: Power supply for the DP_PWR pin
+
+ port:
+ $ref: /schemas/graph.yaml#/properties/port
+ description: Connection to controller providing DP signals
+
+required:
+ - compatible
+ - type
+ - port
+
+additionalProperties: false
+
+examples:
+ - |
+ connector {
+ compatible = "dp-connector";
+ label = "dp0";
+ type = "full-size";
+
+ port {
+ dp_connector_in: endpoint {
+ remote-endpoint = <&dp_out>;
+ };
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/display/connector/dvi-connector.yaml b/Documentation/devicetree/bindings/display/connector/dvi-connector.yaml
index 71cb9220fa59..93eb14294e68 100644
--- a/Documentation/devicetree/bindings/display/connector/dvi-connector.yaml
+++ b/Documentation/devicetree/bindings/display/connector/dvi-connector.yaml
@@ -36,6 +36,7 @@ properties:
description: the connector has pins for DVI dual-link
port:
+ $ref: /schemas/graph.yaml#/properties/port
description: Connection to controller providing DVI signals
required:
diff --git a/Documentation/devicetree/bindings/display/connector/hdmi-connector.yaml b/Documentation/devicetree/bindings/display/connector/hdmi-connector.yaml
index 14d7128af592..83c0d008265b 100644
--- a/Documentation/devicetree/bindings/display/connector/hdmi-connector.yaml
+++ b/Documentation/devicetree/bindings/display/connector/hdmi-connector.yaml
@@ -37,6 +37,7 @@ properties:
maxItems: 1
port:
+ $ref: /schemas/graph.yaml#/properties/port
description: Connection to controller providing HDMI signals
required:
diff --git a/Documentation/devicetree/bindings/display/connector/vga-connector.yaml b/Documentation/devicetree/bindings/display/connector/vga-connector.yaml
index 5782c4bb3252..25f868002000 100644
--- a/Documentation/devicetree/bindings/display/connector/vga-connector.yaml
+++ b/Documentation/devicetree/bindings/display/connector/vga-connector.yaml
@@ -20,6 +20,7 @@ properties:
$ref: /schemas/types.yaml#/definitions/phandle
port:
+ $ref: /schemas/graph.yaml#/properties/port
description: Connection to controller providing VGA signals
required:
diff --git a/Documentation/devicetree/bindings/display/ht16k33.txt b/Documentation/devicetree/bindings/display/ht16k33.txt
deleted file mode 100644
index d5a8b070b467..000000000000
--- a/Documentation/devicetree/bindings/display/ht16k33.txt
+++ /dev/null
@@ -1,40 +0,0 @@
-Holtek ht16k33 RAM mapping 16*8 LED controller driver with keyscan
--------------------------------------------------------------------------------
-
-Required properties:
-- compatible: "holtek,ht16k33"
-- reg: I2C slave address of the chip.
-- interrupts: Interrupt specification for the key pressed interrupt.
-- refresh-rate-hz: Display update interval in HZ.
-- debounce-delay-ms: Debouncing interval time in milliseconds.
-- linux,keymap: The keymap for keys as described in the binding
- document (devicetree/bindings/input/matrix-keymap.txt).
-
-Optional properties:
-- linux,no-autorepeat: Disable keyrepeat.
-- default-brightness-level: Initial brightness level [0-15] (default: 15).
-
-Example:
-
-&i2c1 {
- ht16k33: ht16k33@70 {
- compatible = "holtek,ht16k33";
- reg = <0x70>;
- refresh-rate-hz = <20>;
- debounce-delay-ms = <50>;
- interrupt-parent = <&gpio4>;
- interrupts = <5 (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_EDGE_RISING)>;
- linux,keymap = <
- MATRIX_KEY(2, 0, KEY_F6)
- MATRIX_KEY(3, 0, KEY_F8)
- MATRIX_KEY(4, 0, KEY_F10)
- MATRIX_KEY(5, 0, KEY_F4)
- MATRIX_KEY(6, 0, KEY_F2)
- MATRIX_KEY(2, 1, KEY_F5)
- MATRIX_KEY(3, 1, KEY_F7)
- MATRIX_KEY(4, 1, KEY_F9)
- MATRIX_KEY(5, 1, KEY_F3)
- MATRIX_KEY(6, 1, KEY_F1)
- >;
- };
-};
diff --git a/Documentation/devicetree/bindings/display/imx/nxp,imx8mq-dcss.yaml b/Documentation/devicetree/bindings/display/imx/nxp,imx8mq-dcss.yaml
index f1f25aa794d9..0091df9dd73b 100644
--- a/Documentation/devicetree/bindings/display/imx/nxp,imx8mq-dcss.yaml
+++ b/Documentation/devicetree/bindings/display/imx/nxp,imx8mq-dcss.yaml
@@ -74,7 +74,7 @@ properties:
- description: Must be 400 MHz
port:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description:
A port node pointing to the input port of a HDMI/DP or MIPI display bridge.
diff --git a/Documentation/devicetree/bindings/display/ingenic,ipu.yaml b/Documentation/devicetree/bindings/display/ingenic,ipu.yaml
index 12064a8e7a92..e679f48a3886 100644
--- a/Documentation/devicetree/bindings/display/ingenic,ipu.yaml
+++ b/Documentation/devicetree/bindings/display/ingenic,ipu.yaml
@@ -31,9 +31,8 @@ properties:
clock-names:
const: ipu
-patternProperties:
- "^ports?$":
- description: OF graph bindings (specified in bindings/graph.txt).
+ port:
+ $ref: /schemas/graph.yaml#/properties/port
required:
- compatible
diff --git a/Documentation/devicetree/bindings/display/ingenic,lcd.yaml b/Documentation/devicetree/bindings/display/ingenic,lcd.yaml
index 768050f30dba..50d2b0a50e8a 100644
--- a/Documentation/devicetree/bindings/display/ingenic,lcd.yaml
+++ b/Documentation/devicetree/bindings/display/ingenic,lcd.yaml
@@ -39,18 +39,18 @@ properties:
minItems: 1
port:
- description: OF graph bindings (specified in bindings/graph.txt).
+ $ref: /schemas/graph.yaml#/properties/port
ports:
- description: OF graph bindings (specified in bindings/graph.txt).
- type: object
+ $ref: /schemas/graph.yaml#/properties/ports
+
properties:
port@0:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: DPI output, to interface with TFT panels.
port@8:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: Link to the Image Processing Unit (IPU).
(See ingenic,ipu.yaml).
diff --git a/Documentation/devicetree/bindings/display/intel,keembay-display.yaml b/Documentation/devicetree/bindings/display/intel,keembay-display.yaml
index 0a697d45c2ad..bc6622b010ca 100644
--- a/Documentation/devicetree/bindings/display/intel,keembay-display.yaml
+++ b/Documentation/devicetree/bindings/display/intel,keembay-display.yaml
@@ -36,7 +36,7 @@ properties:
maxItems: 1
port:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: Display output node to DSI.
required:
diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt b/Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt
index 33977e15bebd..93b160df3eec 100644
--- a/Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt
+++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt
@@ -23,7 +23,7 @@ connected to.
For a description of the display interface sink function blocks, see
Documentation/devicetree/bindings/display/mediatek/mediatek,dsi.txt and
-Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt.
+Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.yaml.
Required properties (all function blocks):
- compatible: "mediatek,<chip>-disp-<function>", one of
@@ -37,13 +37,14 @@ Required properties (all function blocks):
"mediatek,<chip>-disp-aal" - adaptive ambient light controller
"mediatek,<chip>-disp-gamma" - gamma correction
"mediatek,<chip>-disp-merge" - merge streams from two RDMA sources
+ "mediatek,<chip>-disp-postmask" - control round corner for display frame
"mediatek,<chip>-disp-split" - split stream to two encoders
"mediatek,<chip>-disp-ufoe" - data compression engine
"mediatek,<chip>-dsi" - DSI controller, see mediatek,dsi.txt
"mediatek,<chip>-dpi" - DPI controller, see mediatek,dpi.txt
"mediatek,<chip>-disp-mutex" - display mutex
"mediatek,<chip>-disp-od" - overdrive
- the supported chips are mt2701, mt7623, mt2712, mt8167 and mt8173.
+ the supported chips are mt2701, mt7623, mt2712, mt8167, mt8173, mt8183 and mt8192.
- reg: Physical base address and length of the function block register space
- interrupts: The interrupt signal from the function block (required, except for
merge and split function blocks).
@@ -61,11 +62,19 @@ Required properties (DMA function blocks):
"mediatek,<chip>-disp-wdma"
the supported chips are mt2701, mt8167 and mt8173.
- larb: Should contain a phandle pointing to the local arbiter device as defined
- in Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.txt
+ in Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.yaml
- iommus: Should point to the respective IOMMU block with master port as
argument, see Documentation/devicetree/bindings/iommu/mediatek,iommu.txt
for details.
+Optional properties (RDMA function blocks):
+- mediatek,rdma-fifo-size: rdma fifo size may be different even in same SOC, add this
+ property to the corresponding rdma
+ the value is the Max value which defined in hardware data sheet.
+ mediatek,rdma-fifo-size of mt8173-rdma0 is 8K
+ mediatek,rdma-fifo-size of mt8183-rdma0 is 5K
+ mediatek,rdma-fifo-size of mt8183-rdma1 is 2K
+
Examples:
mmsys: clock-controller@14000000 {
@@ -103,6 +112,7 @@ rdma0: rdma@1400e000 {
clocks = <&mmsys CLK_MM_DISP_RDMA0>;
iommus = <&iommu M4U_PORT_DISP_RDMA0>;
mediatek,larb = <&larb0>;
+ mediatek,rdma-fifosize = <8192>;
};
rdma1: rdma@1400f000 {
diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,dsi.txt b/Documentation/devicetree/bindings/display/mediatek/mediatek,dsi.txt
index f06f24d405a5..8238a86686be 100644
--- a/Documentation/devicetree/bindings/display/mediatek/mediatek,dsi.txt
+++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,dsi.txt
@@ -22,23 +22,7 @@ Required properties:
MIPI TX Configuration Module
============================
-The MIPI TX configuration module controls the MIPI D-PHY.
-
-Required properties:
-- compatible: "mediatek,<chip>-mipi-tx"
-- the supported chips are mt2701, 7623, mt8173 and mt8183.
-- reg: Physical base address and length of the controller's registers
-- clocks: PLL reference clock
-- clock-output-names: name of the output clock line to the DSI encoder
-- #clock-cells: must be <0>;
-- #phy-cells: must be <0>.
-
-Optional properties:
-- drive-strength-microamp: adjust driving current, should be 3000 ~ 6000. And
- the step is 200.
-- nvmem-cells: A phandle to the calibration data provided by a nvmem device. If
- unspecified default values shall be used.
-- nvmem-cell-names: Should be "calibration-data"
+See phy/mediatek,dsi-phy.yaml
Example:
diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,hdmi.txt b/Documentation/devicetree/bindings/display/mediatek/mediatek,hdmi.txt
index 6b1c586403e4..b284ca51b913 100644
--- a/Documentation/devicetree/bindings/display/mediatek/mediatek,hdmi.txt
+++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,hdmi.txt
@@ -53,23 +53,7 @@ Required properties:
HDMI PHY
========
-
-The HDMI PHY serializes the HDMI encoder's three channel 10-bit parallel
-output and drives the HDMI pads.
-
-Required properties:
-- compatible: "mediatek,<chip>-hdmi-phy"
-- the supported chips are mt2701, mt7623 and mt8173
-- reg: Physical base address and length of the module's registers
-- clocks: PLL reference clock
-- clock-names: must contain "pll_ref"
-- clock-output-names: must be "hdmitx_dig_cts" on mt8173
-- #phy-cells: must be <0>
-- #clock-cells: must be <0>
-
-Optional properties:
-- mediatek,ibias: TX DRV bias current for <1.65Gbps, defaults to 0xa
-- mediatek,ibias_up: TX DRV bias current for >1.65Gbps, defaults to 0x1c
+See phy/mediatek,hdmi-phy.yaml
Example:
diff --git a/Documentation/devicetree/bindings/display/panel/advantech,idk-2121wr.yaml b/Documentation/devicetree/bindings/display/panel/advantech,idk-2121wr.yaml
index 6b7fddc80c41..67682fe77f10 100644
--- a/Documentation/devicetree/bindings/display/panel/advantech,idk-2121wr.yaml
+++ b/Documentation/devicetree/bindings/display/panel/advantech,idk-2121wr.yaml
@@ -37,34 +37,33 @@ properties:
panel-timing: true
ports:
- type: object
+ $ref: /schemas/graph.yaml#/properties/ports
+
properties:
port@0:
- type: object
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
description: The sink for odd pixels.
properties:
- reg:
- const: 0
-
dual-lvds-odd-pixels: true
required:
- - reg
- dual-lvds-odd-pixels
port@1:
- type: object
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
description: The sink for even pixels.
properties:
- reg:
- const: 1
-
dual-lvds-even-pixels: true
required:
- - reg
- dual-lvds-even-pixels
+ required:
+ - port@0
+ - port@1
+
additionalProperties: false
required:
diff --git a/Documentation/devicetree/bindings/display/panel/jdi,lt070me05000.yaml b/Documentation/devicetree/bindings/display/panel/jdi,lt070me05000.yaml
index b8b9435e464c..4f92365e888a 100644
--- a/Documentation/devicetree/bindings/display/panel/jdi,lt070me05000.yaml
+++ b/Documentation/devicetree/bindings/display/panel/jdi,lt070me05000.yaml
@@ -30,6 +30,7 @@ properties:
power supply for LCM (1.8V)
dcdc-en-gpios:
+ maxItems: 1
description: |
phandle of the gpio for power ic line
Power IC supply enable, High active
diff --git a/Documentation/devicetree/bindings/display/panel/mantix,mlaf057we51-x.yaml b/Documentation/devicetree/bindings/display/panel/mantix,mlaf057we51-x.yaml
index 51f423297ec8..a4b8569ab81c 100644
--- a/Documentation/devicetree/bindings/display/panel/mantix,mlaf057we51-x.yaml
+++ b/Documentation/devicetree/bindings/display/panel/mantix,mlaf057we51-x.yaml
@@ -20,6 +20,7 @@ properties:
compatible:
enum:
- mantix,mlaf057we51-x
+ - ys,ys57pss36bh5gq
port: true
reg:
@@ -37,7 +38,8 @@ properties:
reset-gpios: true
- 'mantix,tp-rstn-gpios':
+ mantix,tp-rstn-gpios:
+ maxItems: 1
description: second reset line that triggers DSI config load
backlight: true
diff --git a/Documentation/devicetree/bindings/display/panel/novatek,nt36672a.yaml b/Documentation/devicetree/bindings/display/panel/novatek,nt36672a.yaml
index 2f5df1d235ae..ef4c0a24512d 100644
--- a/Documentation/devicetree/bindings/display/panel/novatek,nt36672a.yaml
+++ b/Documentation/devicetree/bindings/display/panel/novatek,nt36672a.yaml
@@ -30,6 +30,7 @@ properties:
panel. The novatek,nt36672a compatible shall always be provided as a fallback.
reset-gpios:
+ maxItems: 1
description: phandle of gpio for reset line - This should be 8mA, gpio
can be configured using mux, pinctrl, pinctrl-names (active high)
diff --git a/Documentation/devicetree/bindings/display/panel/panel-common.yaml b/Documentation/devicetree/bindings/display/panel/panel-common.yaml
index cd6dc5461721..5b38dc89cb21 100644
--- a/Documentation/devicetree/bindings/display/panel/panel-common.yaml
+++ b/Documentation/devicetree/bindings/display/panel/panel-common.yaml
@@ -68,16 +68,7 @@ properties:
# Connectivity
port:
- type: object
-
- ports:
- type: object
- description:
- Panels receive video data through one or multiple connections. While
- the nature of those connections is specific to the panel type, the
- connectivity is expressed in a standard fashion using ports as specified
- in the device graph bindings defined in
- Documentation/devicetree/bindings/graph.txt.
+ $ref: /schemas/graph.yaml#/properties/port
ddc-i2c-bus:
$ref: /schemas/types.yaml#/definitions/phandle
diff --git a/Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml b/Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml
index 72e4b6d4d5e1..fbd71669248f 100644
--- a/Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml
+++ b/Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml
@@ -35,6 +35,8 @@ properties:
- boe,tv080wum-nl0
# Innolux P079ZCA 7.85" 768x1024 TFT LCD panel
- innolux,p079zca
+ # Khadas TS050 5" 1080x1920 LCD panel
+ - khadas,ts050
# Kingdisplay KD097D04 9.7" 1536x2048 TFT LCD panel
- kingdisplay,kd097d04
# LG ACX467AKM-7 4.95" 1080×1920 LCD Panel
diff --git a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
index 27fffafe5b5c..62b0d54d87b7 100644
--- a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
+++ b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
@@ -76,6 +76,8 @@ properties:
# BOE OPTOELECTRONICS TECHNOLOGY 10.1" WXGA TFT LCD panel
- boe,nv101wxmn51
# BOE NV133FHM-N61 13.3" FHD (1920x1080) TFT LCD Panel
+ - boe,nv110wtm-n61
+ # BOE NV110WTM-N61 11.0" 2160x1440 TFT LCD Panel
- boe,nv133fhm-n61
# BOE NV133FHM-N62 13.3" FHD (1920x1080) TFT LCD Panel
- boe,nv133fhm-n62
@@ -105,26 +107,27 @@ properties:
- dlc,dlc1010gig
# Emerging Display Technology Corp. 3.5" QVGA TFT LCD panel
- edt,et035012dm6
+ # Emerging Display Technology Corp. 5.7" VGA TFT LCD panel
+ - edt,et057090dhu
+ - edt,et070080dh6
# Emerging Display Technology Corp. 480x272 TFT Display with capacitive touch
- edt,etm043080dh6gp
# Emerging Display Technology Corp. 480x272 TFT Display
- edt,etm0430g0dh6
- # Emerging Display Technology Corp. 5.7" VGA TFT LCD panel
- - edt,et057090dhu
- # Emerging Display Technology Corp. WVGA TFT Display with capacitive touch
- - edt,etm070080dh6
- # Emerging Display Technology Corp. WVGA TFT Display with capacitive touch
- - edt,etm0700g0dh6
# Emerging Display Technology Corp. WVGA TFT Display with capacitive touch
# Same as ETM0700G0DH6 but with inverted pixel clock.
- edt,etm070080bdh6
# Emerging Display Technology Corp. WVGA TFT Display with capacitive touch
+ # Same timings as the ETM0700G0DH6, but with resistive touch.
+ - edt,etm070080dh6
+ # Emerging Display Technology Corp. WVGA TFT Display with capacitive touch
# Same display as the ETM0700G0BDH6, but with changed hardware for the
# backlight and the touch interface.
- edt,etm070080edh6
+ - edt,etm0700g0bdh6
# Emerging Display Technology Corp. WVGA TFT Display with capacitive touch
- # Same timings as the ETM0700G0DH6, but with resistive touch.
- - edt,etm070080dh6
+ - edt,etm0700g0dh6
+ - edt,etm0700g0edh6
# Evervision Electronics Co. Ltd. VGG804821 5.0" WVGA TFT LCD Panel
- evervision,vgg804821
# Foxlink Group 5" WVGA TFT LCD panel
@@ -173,6 +176,8 @@ properties:
- koe,tx26d202vm0bwa
# Kaohsiung Opto-Electronics. TX31D200VM0BAA 12.3" HSXGA LVDS panel
- koe,tx31d200vm0baa
+ # Kyocera Corporation 7" WVGA (800x480) transmissive color TFT
+ - kyo,tcg070wvlq
# Kyocera Corporation 12.1" XGA (1024x768) TFT LCD panel
- kyo,tcg121xglp
# LeMaker BL035-RGB-002 3.5" QVGA TFT LCD panel
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,s6e63m0.yaml b/Documentation/devicetree/bindings/display/panel/samsung,s6e63m0.yaml
index 1dab80ae1d0a..ea58df49263a 100644
--- a/Documentation/devicetree/bindings/display/panel/samsung,s6e63m0.yaml
+++ b/Documentation/devicetree/bindings/display/panel/samsung,s6e63m0.yaml
@@ -11,6 +11,7 @@ maintainers:
allOf:
- $ref: panel-common.yaml#
+ - $ref: /schemas/leds/backlight/common.yaml#
properties:
compatible:
@@ -19,6 +20,8 @@ properties:
reg: true
reset-gpios: true
port: true
+ default-brightness: true
+ max-brightness: true
vdd3-supply:
description: VDD regulator
@@ -31,7 +34,6 @@ required:
- reset-gpios
- vdd3-supply
- vci-supply
- - port
unevaluatedProperties: false
diff --git a/Documentation/devicetree/bindings/display/rockchip/rockchip,rk3066-hdmi.yaml b/Documentation/devicetree/bindings/display/rockchip/rockchip,rk3066-hdmi.yaml
index 4110d003ce1f..008c144257cb 100644
--- a/Documentation/devicetree/bindings/display/rockchip/rockchip,rk3066-hdmi.yaml
+++ b/Documentation/devicetree/bindings/display/rockchip/rockchip,rk3066-hdmi.yaml
@@ -43,34 +43,24 @@ properties:
This soc uses GRF regs to switch the HDMI TX input between vop0 and vop1.
ports:
- type: object
+ $ref: /schemas/graph.yaml#/properties/ports
properties:
- "#address-cells":
- const: 1
-
- "#size-cells":
- const: 0
-
port@0:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description:
Port node with two endpoints, numbered 0 and 1,
connected respectively to vop0 and vop1.
port@1:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description:
Port node with one endpoint connected to a hdmi-connector node.
required:
- - "#address-cells"
- - "#size-cells"
- port@0
- port@1
- additionalProperties: false
-
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.yaml b/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.yaml
index ed8148e26e24..6f43d885c9b3 100644
--- a/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.yaml
+++ b/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.yaml
@@ -70,10 +70,7 @@ properties:
- const: dclk
port:
- type: object
- description:
- A port node with endpoint definitions as defined in
- Documentation/devicetree/bindings/media/video-interfaces.txt.
+ $ref: /schemas/graph.yaml#/properties/port
assigned-clocks:
maxItems: 2
diff --git a/Documentation/devicetree/bindings/display/st,stm32-dsi.yaml b/Documentation/devicetree/bindings/display/st,stm32-dsi.yaml
index 327a14d85df8..679daed4124e 100644
--- a/Documentation/devicetree/bindings/display/st,stm32-dsi.yaml
+++ b/Documentation/devicetree/bindings/display/st,stm32-dsi.yaml
@@ -51,20 +51,16 @@ properties:
Phandle of the regulator that provides the supply voltage.
ports:
- type: object
- description:
- A node containing DSI input & output port nodes with endpoint
- definitions as documented in
- Documentation/devicetree/bindings/media/video-interfaces.txt
- Documentation/devicetree/bindings/graph.txt
+ $ref: /schemas/graph.yaml#/properties/ports
+
properties:
port@0:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description:
DSI input port node, connected to the ltdc rgb output port.
port@1:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description:
DSI output port node, connected to a panel or a bridge input port"
diff --git a/Documentation/devicetree/bindings/display/st,stm32-ltdc.yaml b/Documentation/devicetree/bindings/display/st,stm32-ltdc.yaml
index bf8ad916e9b0..d54f9ca207af 100644
--- a/Documentation/devicetree/bindings/display/st,stm32-ltdc.yaml
+++ b/Documentation/devicetree/bindings/display/st,stm32-ltdc.yaml
@@ -35,15 +35,13 @@ properties:
maxItems: 1
port:
- type: object
- description:
- "Video port for DPI RGB output.
+ $ref: /schemas/graph.yaml#/properties/port
+ description: |
+ Video port for DPI RGB output.
ltdc has one video port with up to 2 endpoints:
- for external dpi rgb panel or bridge, using gpios.
- for internal dpi input of the MIPI DSI host controller.
Note: These 2 endpoints cannot be activated simultaneously.
- Please refer to the bindings defined in
- Documentation/devicetree/bindings/media/video-interfaces.txt."
required:
- compatible
diff --git a/Documentation/devicetree/bindings/display/ste,mcde.txt b/Documentation/devicetree/bindings/display/ste,mcde.txt
deleted file mode 100644
index 4c33c692bd5f..000000000000
--- a/Documentation/devicetree/bindings/display/ste,mcde.txt
+++ /dev/null
@@ -1,104 +0,0 @@
-ST-Ericsson Multi Channel Display Engine MCDE
-
-The ST-Ericsson MCDE is a display controller with support for compositing
-and displaying several channels memory resident graphics data on DSI or
-LCD displays or bridges. It is used in the ST-Ericsson U8500 platform.
-
-Required properties:
-
-- compatible: must be:
- "ste,mcde"
-- reg: register base for the main MCDE control registers, should be
- 0x1000 in size
-- interrupts: the interrupt line for the MCDE
-- epod-supply: a phandle to the EPOD regulator
-- vana-supply: a phandle to the analog voltage regulator
-- clocks: an array of the MCDE clocks in this strict order:
- MCDECLK (main MCDE clock), LCDCLK (LCD clock), PLLDSI
- (HDMI clock), DSI0ESCLK (DSI0 energy save clock),
- DSI1ESCLK (DSI1 energy save clock), DSI2ESCLK (DSI2 energy
- save clock)
-- clock-names: must be the following array:
- "mcde", "lcd", "hdmi"
- to match the required clock inputs above.
-- #address-cells: should be <1> (for the DSI hosts that will be children)
-- #size-cells: should be <1> (for the DSI hosts that will be children)
-- ranges: this should always be stated
-
-Required subnodes:
-
-The devicetree must specify subnodes for the DSI host adapters.
-These must have the following characteristics:
-
-- compatible: must be:
- "ste,mcde-dsi"
-- reg: must specify the register range for the DSI host
-- vana-supply: phandle to the VANA voltage regulator
-- clocks: phandles to the high speed and low power (energy save) clocks
- the high speed clock is not present on the third (dsi2) block, so it
- should only have the "lp" clock
-- clock-names: "hs" for the high speed clock and "lp" for the low power
- (energy save) clock
-- #address-cells: should be <1>
-- #size-cells: should be <0>
-
-Display panels and bridges will appear as children on the DSI hosts, and
-the displays are connected to the DSI hosts using the common binding
-for video transmitter interfaces; see
-Documentation/devicetree/bindings/media/video-interfaces.txt
-
-If a DSI host is unused (not connected) it will have no children defined.
-
-Example:
-
-mcde@a0350000 {
- compatible = "ste,mcde";
- reg = <0xa0350000 0x1000>;
- interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
- epod-supply = <&db8500_b2r2_mcde_reg>;
- vana-supply = <&ab8500_ldo_ana_reg>;
- clocks = <&prcmu_clk PRCMU_MCDECLK>, /* Main MCDE clock */
- <&prcmu_clk PRCMU_LCDCLK>, /* LCD clock */
- <&prcmu_clk PRCMU_PLLDSI>; /* HDMI clock */
- clock-names = "mcde", "lcd", "hdmi";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
-
- dsi0: dsi@a0351000 {
- compatible = "ste,mcde-dsi";
- reg = <0xa0351000 0x1000>;
- vana-supply = <&ab8500_ldo_ana_reg>;
- clocks = <&prcmu_clk PRCMU_DSI0CLK>, <&prcmu_clk PRCMU_DSI0ESCCLK>;
- clock-names = "hs", "lp";
- #address-cells = <1>;
- #size-cells = <0>;
-
- panel {
- compatible = "samsung,s6d16d0";
- reg = <0>;
- vdd1-supply = <&ab8500_ldo_aux1_reg>;
- reset-gpios = <&gpio2 1 GPIO_ACTIVE_LOW>;
- };
-
- };
- dsi1: dsi@a0352000 {
- compatible = "ste,mcde-dsi";
- reg = <0xa0352000 0x1000>;
- vana-supply = <&ab8500_ldo_ana_reg>;
- clocks = <&prcmu_clk PRCMU_DSI1CLK>, <&prcmu_clk PRCMU_DSI1ESCCLK>;
- clock-names = "hs", "lp";
- #address-cells = <1>;
- #size-cells = <0>;
- };
- dsi2: dsi@a0353000 {
- compatible = "ste,mcde-dsi";
- reg = <0xa0353000 0x1000>;
- vana-supply = <&ab8500_ldo_ana_reg>;
- /* This DSI port only has the Low Power / Energy Save clock */
- clocks = <&prcmu_clk PRCMU_DSI2ESCCLK>;
- clock-names = "lp";
- #address-cells = <1>;
- #size-cells = <0>;
- };
-};
diff --git a/Documentation/devicetree/bindings/display/ste,mcde.yaml b/Documentation/devicetree/bindings/display/ste,mcde.yaml
new file mode 100644
index 000000000000..de0c678b3c29
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/ste,mcde.yaml
@@ -0,0 +1,168 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/ste,mcde.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ST-Ericsson Multi Channel Display Engine MCDE
+
+maintainers:
+ - Linus Walleij <linus.walleij@linaro.org>
+
+properties:
+ compatible:
+ const: ste,mcde
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ description: an array of the MCDE clocks
+ items:
+ - description: MCDECLK (main MCDE clock)
+ - description: LCDCLK (LCD clock)
+ - description: PLLDSI (HDMI clock)
+
+ clock-names:
+ items:
+ - const: mcde
+ - const: lcd
+ - const: hdmi
+
+ resets:
+ maxItems: 1
+
+ epod-supply:
+ description: a phandle to the EPOD regulator
+
+ vana-supply:
+ description: a phandle to the analog voltage regulator
+
+ port:
+ $ref: /schemas/graph.yaml#/properties/port
+ description:
+ A DPI port node
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 1
+
+ ranges: true
+
+patternProperties:
+ "^dsi@[0-9a-f]+$":
+ description: subnodes for the three DSI host adapters
+ type: object
+ allOf:
+ - $ref: dsi-controller.yaml#
+ properties:
+ compatible:
+ const: ste,mcde-dsi
+
+ reg:
+ maxItems: 1
+
+ vana-supply:
+ description: a phandle to the analog voltage regulator
+
+ clocks:
+ description: phandles to the high speed and low power (energy save) clocks
+ the high speed clock is not present on the third (dsi2) block, so it
+ should only have the "lp" clock
+ minItems: 1
+ maxItems: 2
+
+ clock-names:
+ oneOf:
+ - items:
+ - const: hs
+ - const: lp
+ - items:
+ - const: lp
+
+ required:
+ - compatible
+ - reg
+ - vana-supply
+ - clocks
+ - clock-names
+
+ unevaluatedProperties: false
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+ - epod-supply
+ - vana-supply
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/mfd/dbx500-prcmu.h>
+ #include <dt-bindings/gpio/gpio.h>
+
+ mcde@a0350000 {
+ compatible = "ste,mcde";
+ reg = <0xa0350000 0x1000>;
+ interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
+ epod-supply = <&db8500_b2r2_mcde_reg>;
+ vana-supply = <&ab8500_ldo_ana_reg>;
+ clocks = <&prcmu_clk PRCMU_MCDECLK>,
+ <&prcmu_clk PRCMU_LCDCLK>,
+ <&prcmu_clk PRCMU_PLLDSI>;
+ clock-names = "mcde", "lcd", "hdmi";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ dsi0: dsi@a0351000 {
+ compatible = "ste,mcde-dsi";
+ reg = <0xa0351000 0x1000>;
+ vana-supply = <&ab8500_ldo_ana_reg>;
+ clocks = <&prcmu_clk PRCMU_DSI0CLK>, <&prcmu_clk PRCMU_DSI0ESCCLK>;
+ clock-names = "hs", "lp";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ panel@0 {
+ compatible = "samsung,s6d16d0";
+ reg = <0>;
+ vdd1-supply = <&ab8500_ldo_aux1_reg>;
+ reset-gpios = <&gpio2 1 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ dsi1: dsi@a0352000 {
+ compatible = "ste,mcde-dsi";
+ reg = <0xa0352000 0x1000>;
+ vana-supply = <&ab8500_ldo_ana_reg>;
+ clocks = <&prcmu_clk PRCMU_DSI1CLK>, <&prcmu_clk PRCMU_DSI1ESCCLK>;
+ clock-names = "hs", "lp";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ dsi2: dsi@a0353000 {
+ compatible = "ste,mcde-dsi";
+ reg = <0xa0353000 0x1000>;
+ vana-supply = <&ab8500_ldo_ana_reg>;
+ /* This DSI port only has the Low Power / Energy Save clock */
+ clocks = <&prcmu_clk PRCMU_DSI2ESCCLK>;
+ clock-names = "lp";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-host1x.txt b/Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-host1x.txt
index 34d993338453..8a6d3e1ee306 100644
--- a/Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-host1x.txt
+++ b/Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-host1x.txt
@@ -111,8 +111,8 @@ of the following host1x client modules:
endpoint (required node)
Required properties:
- - data-lanes: an array of data lane from 1 to 4. Valid array
- lengths are 1/2/4.
+ - data-lanes: an array of data lane from 1 to 8. Valid array
+ lengths are 1/2/4/8.
- remote-endpoint: phandle to sensor 'endpoint' node.
port@1 (required node)
diff --git a/Documentation/devicetree/bindings/display/ti/ti,am65x-dss.yaml b/Documentation/devicetree/bindings/display/ti/ti,am65x-dss.yaml
index 4dc30738ee57..781c1868b0b8 100644
--- a/Documentation/devicetree/bindings/display/ti/ti,am65x-dss.yaml
+++ b/Documentation/devicetree/bindings/display/ti/ti,am65x-dss.yaml
@@ -74,30 +74,19 @@ properties:
type: boolean
ports:
- type: object
- description:
- Ports as described in Documentation/devicetree/bindings/graph.txt
- properties:
- "#address-cells":
- const: 1
-
- "#size-cells":
- const: 0
+ $ref: /schemas/graph.yaml#/properties/ports
+ properties:
port@0:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description:
The DSS OLDI output port node form video port 1
port@1:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description:
The DSS DPI output port node from video port 2
- required:
- - "#address-cells"
- - "#size-cells"
-
ti,am65x-oldi-io-ctrl:
$ref: "/schemas/types.yaml#/definitions/phandle-array"
maxItems: 1
diff --git a/Documentation/devicetree/bindings/display/ti/ti,j721e-dss.yaml b/Documentation/devicetree/bindings/display/ti/ti,j721e-dss.yaml
index c9a947d55fa4..2986f9acc9f0 100644
--- a/Documentation/devicetree/bindings/display/ti/ti,j721e-dss.yaml
+++ b/Documentation/devicetree/bindings/display/ti/ti,j721e-dss.yaml
@@ -107,40 +107,29 @@ properties:
type: boolean
ports:
- type: object
- description:
- Ports as described in Documentation/devicetree/bindings/graph.txt
- properties:
- "#address-cells":
- const: 1
-
- "#size-cells":
- const: 0
+ $ref: /schemas/graph.yaml#/properties/ports
+ properties:
port@0:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description:
The output port node form video port 1
port@1:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description:
The output port node from video port 2
port@2:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description:
The output port node from video port 3
port@3:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description:
The output port node from video port 4
- required:
- - "#address-cells"
- - "#size-cells"
-
max-memory-bandwidth:
$ref: /schemas/types.yaml#/definitions/uint32
description:
diff --git a/Documentation/devicetree/bindings/display/ti/ti,k2g-dss.yaml b/Documentation/devicetree/bindings/display/ti/ti,k2g-dss.yaml
index 8f87b82c6695..7ce7bbad5780 100644
--- a/Documentation/devicetree/bindings/display/ti/ti,k2g-dss.yaml
+++ b/Documentation/devicetree/bindings/display/ti/ti,k2g-dss.yaml
@@ -54,9 +54,8 @@ properties:
description: phandle to the associated power domain
port:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description:
- Port as described in Documentation/devicetree/bindings/graph.txt.
The DSS DPI output port node
max-memory-bandwidth:
diff --git a/Documentation/devicetree/bindings/dma/ingenic,dma.yaml b/Documentation/devicetree/bindings/dma/ingenic,dma.yaml
index 6a2043721b95..ac4d59494fc8 100644
--- a/Documentation/devicetree/bindings/dma/ingenic,dma.yaml
+++ b/Documentation/devicetree/bindings/dma/ingenic,dma.yaml
@@ -17,6 +17,8 @@ properties:
enum:
- ingenic,jz4740-dma
- ingenic,jz4725b-dma
+ - ingenic,jz4760-dma
+ - ingenic,jz4760b-dma
- ingenic,jz4770-dma
- ingenic,jz4780-dma
- ingenic,x1000-dma
diff --git a/Documentation/devicetree/bindings/dma/intel,ldma.yaml b/Documentation/devicetree/bindings/dma/intel,ldma.yaml
new file mode 100644
index 000000000000..a5c4be783593
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/intel,ldma.yaml
@@ -0,0 +1,116 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/dma/intel,ldma.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Lightning Mountain centralized DMA controllers.
+
+maintainers:
+ - chuanhua.lei@intel.com
+ - mallikarjunax.reddy@intel.com
+
+allOf:
+ - $ref: "dma-controller.yaml#"
+
+properties:
+ compatible:
+ enum:
+ - intel,lgm-cdma
+ - intel,lgm-dma2tx
+ - intel,lgm-dma1rx
+ - intel,lgm-dma1tx
+ - intel,lgm-dma0tx
+ - intel,lgm-dma3
+ - intel,lgm-toe-dma30
+ - intel,lgm-toe-dma31
+
+ reg:
+ maxItems: 1
+
+ "#dma-cells":
+ const: 3
+ description:
+ The first cell is the peripheral's DMA request line.
+ The second cell is the peripheral's (port) number corresponding to the channel.
+ The third cell is the burst length of the channel.
+
+ dma-channels:
+ minimum: 1
+ maximum: 16
+
+ dma-channel-mask:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+ reset-names:
+ items:
+ - const: ctrl
+
+ interrupts:
+ maxItems: 1
+
+ intel,dma-poll-cnt:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ DMA descriptor polling counter is used to control the poling mechanism
+ for the descriptor fetching for all channels.
+
+ intel,dma-byte-en:
+ type: boolean
+ description:
+ DMA byte enable is only valid for DMA write(RX).
+ Byte enable(1) means DMA write will be based on the number of dwords
+ instead of the whole burst.
+
+ intel,dma-drb:
+ type: boolean
+ description:
+ DMA descriptor read back to make sure data and desc synchronization.
+
+ intel,dma-dburst-wr:
+ type: boolean
+ description:
+ Enable RX dynamic burst write. When it is enabled, the DMA does RX dynamic burst;
+ if it is disabled, the DMA RX will still support programmable fixed burst size of 2,4,8,16.
+ It only applies to RX DMA and memcopy DMA.
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ dma0: dma-controller@e0e00000 {
+ compatible = "intel,lgm-cdma";
+ reg = <0xe0e00000 0x1000>;
+ #dma-cells = <3>;
+ dma-channels = <16>;
+ dma-channel-mask = <0xFFFF>;
+ interrupt-parent = <&ioapic1>;
+ interrupts = <82 1>;
+ resets = <&rcu0 0x30 0>;
+ reset-names = "ctrl";
+ clocks = <&cgu0 80>;
+ intel,dma-poll-cnt = <4>;
+ intel,dma-byte-en;
+ intel,dma-drb;
+ };
+ - |
+ dma3: dma-controller@ec800000 {
+ compatible = "intel,lgm-dma3";
+ reg = <0xec800000 0x1000>;
+ clocks = <&cgu0 71>;
+ resets = <&rcu0 0x10 9>;
+ #dma-cells = <3>;
+ intel,dma-poll-cnt = <16>;
+ intel,dma-byte-en;
+ intel,dma-dburst-wr;
+ };
diff --git a/Documentation/devicetree/bindings/dma/owl-dma.yaml b/Documentation/devicetree/bindings/dma/owl-dma.yaml
index 256d62af2c64..93b4847554fb 100644
--- a/Documentation/devicetree/bindings/dma/owl-dma.yaml
+++ b/Documentation/devicetree/bindings/dma/owl-dma.yaml
@@ -8,8 +8,8 @@ title: Actions Semi Owl SoCs DMA controller
description: |
The OWL DMA is a general-purpose direct memory access controller capable of
- supporting 10 and 12 independent DMA channels for S700 and S900 SoCs
- respectively.
+ supporting 10 independent DMA channels for the Actions Semi S700 SoC and 12
+ independent DMA channels for the S500 and S900 SoC variants.
maintainers:
- Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
@@ -20,8 +20,9 @@ allOf:
properties:
compatible:
enum:
- - actions,s900-dma
+ - actions,s500-dma
- actions,s700-dma
+ - actions,s900-dma
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.yaml b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.yaml
index c07eb6f2fc8d..7f2a54bc732d 100644
--- a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.yaml
+++ b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.yaml
@@ -14,34 +14,37 @@ allOf:
properties:
compatible:
- items:
- - enum:
- - renesas,dmac-r8a7742 # RZ/G1H
- - renesas,dmac-r8a7743 # RZ/G1M
- - renesas,dmac-r8a7744 # RZ/G1N
- - renesas,dmac-r8a7745 # RZ/G1E
- - renesas,dmac-r8a77470 # RZ/G1C
- - renesas,dmac-r8a774a1 # RZ/G2M
- - renesas,dmac-r8a774b1 # RZ/G2N
- - renesas,dmac-r8a774c0 # RZ/G2E
- - renesas,dmac-r8a774e1 # RZ/G2H
- - renesas,dmac-r8a7790 # R-Car H2
- - renesas,dmac-r8a7791 # R-Car M2-W
- - renesas,dmac-r8a7792 # R-Car V2H
- - renesas,dmac-r8a7793 # R-Car M2-N
- - renesas,dmac-r8a7794 # R-Car E2
- - renesas,dmac-r8a7795 # R-Car H3
- - renesas,dmac-r8a7796 # R-Car M3-W
- - renesas,dmac-r8a77961 # R-Car M3-W+
- - renesas,dmac-r8a77965 # R-Car M3-N
- - renesas,dmac-r8a77970 # R-Car V3M
- - renesas,dmac-r8a77980 # R-Car V3H
- - renesas,dmac-r8a77990 # R-Car E3
- - renesas,dmac-r8a77995 # R-Car D3
- - const: renesas,rcar-dmac
-
- reg:
- maxItems: 1
+ oneOf:
+ - items:
+ - enum:
+ - renesas,dmac-r8a7742 # RZ/G1H
+ - renesas,dmac-r8a7743 # RZ/G1M
+ - renesas,dmac-r8a7744 # RZ/G1N
+ - renesas,dmac-r8a7745 # RZ/G1E
+ - renesas,dmac-r8a77470 # RZ/G1C
+ - renesas,dmac-r8a774a1 # RZ/G2M
+ - renesas,dmac-r8a774b1 # RZ/G2N
+ - renesas,dmac-r8a774c0 # RZ/G2E
+ - renesas,dmac-r8a774e1 # RZ/G2H
+ - renesas,dmac-r8a7790 # R-Car H2
+ - renesas,dmac-r8a7791 # R-Car M2-W
+ - renesas,dmac-r8a7792 # R-Car V2H
+ - renesas,dmac-r8a7793 # R-Car M2-N
+ - renesas,dmac-r8a7794 # R-Car E2
+ - renesas,dmac-r8a7795 # R-Car H3
+ - renesas,dmac-r8a7796 # R-Car M3-W
+ - renesas,dmac-r8a77961 # R-Car M3-W+
+ - renesas,dmac-r8a77965 # R-Car M3-N
+ - renesas,dmac-r8a77970 # R-Car V3M
+ - renesas,dmac-r8a77980 # R-Car V3H
+ - renesas,dmac-r8a77990 # R-Car E3
+ - renesas,dmac-r8a77995 # R-Car D3
+ - const: renesas,rcar-dmac
+
+ - items:
+ - const: renesas,dmac-r8a779a0 # R-Car V3U
+
+ reg: true
interrupts:
minItems: 9
@@ -110,6 +113,23 @@ required:
- power-domains
- resets
+if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - renesas,dmac-r8a779a0
+then:
+ properties:
+ reg:
+ items:
+ - description: Base register block
+ - description: Channel register block
+else:
+ properties:
+ reg:
+ maxItems: 1
+
additionalProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/dma/sirfsoc-dma.txt b/Documentation/devicetree/bindings/dma/sirfsoc-dma.txt
deleted file mode 100644
index ccd52d6a231a..000000000000
--- a/Documentation/devicetree/bindings/dma/sirfsoc-dma.txt
+++ /dev/null
@@ -1,44 +0,0 @@
-* CSR SiRFSoC DMA controller
-
-See dma.txt first
-
-Required properties:
-- compatible: Should be "sirf,prima2-dmac", "sirf,atlas7-dmac" or
- "sirf,atlas7-dmac-v2"
-- reg: Should contain DMA registers location and length.
-- interrupts: Should contain one interrupt shared by all channel
-- #dma-cells: must be <1>. used to represent the number of integer
- cells in the dmas property of client device.
-- clocks: clock required
-
-Example:
-
-Controller:
-dmac0: dma-controller@b00b0000 {
- compatible = "sirf,prima2-dmac";
- reg = <0xb00b0000 0x10000>;
- interrupts = <12>;
- clocks = <&clks 24>;
- #dma-cells = <1>;
-};
-
-
-Client:
-Fill the specific dma request line in dmas. In the below example, spi0 read
-channel request line is 9 of the 2nd dma controller, while write channel uses
-4 of the 2nd dma controller; spi1 read channel request line is 12 of the 1st
-dma controller, while write channel uses 13 of the 1st dma controller:
-
-spi0: spi@b00d0000 {
- compatible = "sirf,prima2-spi";
- dmas = <&dmac1 9>,
- <&dmac1 4>;
- dma-names = "rx", "tx";
-};
-
-spi1: spi@b0170000 {
- compatible = "sirf,prima2-spi";
- dmas = <&dmac0 12>,
- <&dmac0 13>;
- dma-names = "rx", "tx";
-};
diff --git a/Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.txt b/Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.txt
deleted file mode 100644
index dbe160400adc..000000000000
--- a/Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.txt
+++ /dev/null
@@ -1,39 +0,0 @@
-Synopsys DesignWare AXI DMA Controller
-
-Required properties:
-- compatible: "snps,axi-dma-1.01a"
-- reg: Address range of the DMAC registers. This should include
- all of the per-channel registers.
-- interrupt: Should contain the DMAC interrupt number.
-- dma-channels: Number of channels supported by hardware.
-- snps,dma-masters: Number of AXI masters supported by the hardware.
-- snps,data-width: Maximum AXI data width supported by hardware.
- (0 - 8bits, 1 - 16bits, 2 - 32bits, ..., 6 - 512bits)
-- snps,priority: Priority of channel. Array size is equal to the number of
- dma-channels. Priority value must be programmed within [0:dma-channels-1]
- range. (0 - minimum priority)
-- snps,block-size: Maximum block size supported by the controller channel.
- Array size is equal to the number of dma-channels.
-
-Optional properties:
-- snps,axi-max-burst-len: Restrict master AXI burst length by value specified
- in this property. If this property is missing the maximum AXI burst length
- supported by DMAC is used. [1:256]
-
-Example:
-
-dmac: dma-controller@80000 {
- compatible = "snps,axi-dma-1.01a";
- reg = <0x80000 0x400>;
- clocks = <&core_clk>, <&cfgr_clk>;
- clock-names = "core-clk", "cfgr-clk";
- interrupt-parent = <&intc>;
- interrupts = <27>;
-
- dma-channels = <4>;
- snps,dma-masters = <2>;
- snps,data-width = <3>;
- snps,block-size = <4096 4096 4096 4096>;
- snps,priority = <0 1 2 3>;
- snps,axi-max-burst-len = <16>;
-};
diff --git a/Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.yaml b/Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.yaml
new file mode 100644
index 000000000000..79e241498e25
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.yaml
@@ -0,0 +1,126 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/dma/snps,dw-axi-dmac.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Synopsys DesignWare AXI DMA Controller
+
+maintainers:
+ - Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
+ - Jee Heng Sia <jee.heng.sia@intel.com>
+
+description:
+ Synopsys DesignWare AXI DMA Controller DT Binding
+
+allOf:
+ - $ref: "dma-controller.yaml#"
+
+properties:
+ compatible:
+ enum:
+ - snps,axi-dma-1.01a
+ - intel,kmb-axi-dma
+
+ reg:
+ minItems: 1
+ items:
+ - description: Address range of the DMAC registers
+ - description: Address range of the DMAC APB registers
+
+ reg-names:
+ items:
+ - const: axidma_ctrl_regs
+ - const: axidma_apb_regs
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: Bus Clock
+ - description: Module Clock
+
+ clock-names:
+ items:
+ - const: core-clk
+ - const: cfgr-clk
+
+ '#dma-cells':
+ const: 1
+
+ dma-channels:
+ minimum: 1
+ maximum: 8
+
+ snps,dma-masters:
+ description: |
+ Number of AXI masters supported by the hardware.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [1, 2]
+
+ snps,data-width:
+ description: |
+ AXI data width supported by hardware.
+ (0 - 8bits, 1 - 16bits, 2 - 32bits, ..., 6 - 512bits)
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1, 2, 3, 4, 5, 6]
+
+ snps,priority:
+ description: |
+ Channel priority specifier associated with the DMA channels.
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 1
+ maxItems: 8
+
+ snps,block-size:
+ description: |
+ Channel block size specifier associated with the DMA channels.
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 1
+ maxItems: 8
+
+ snps,axi-max-burst-len:
+ description: |
+ Restrict master AXI burst length by value specified in this property.
+ If this property is missing the maximum AXI burst length supported by
+ DMAC is used.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 1
+ maximum: 256
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - interrupts
+ - '#dma-cells'
+ - dma-channels
+ - snps,dma-masters
+ - snps,data-width
+ - snps,priority
+ - snps,block-size
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ /* example with snps,dw-axi-dmac */
+ dmac: dma-controller@80000 {
+ compatible = "snps,axi-dma-1.01a";
+ reg = <0x80000 0x400>;
+ clocks = <&core_clk>, <&cfgr_clk>;
+ clock-names = "core-clk", "cfgr-clk";
+ interrupt-parent = <&intc>;
+ interrupts = <27>;
+ #dma-cells = <1>;
+ dma-channels = <4>;
+ snps,dma-masters = <2>;
+ snps,data-width = <3>;
+ snps,block-size = <4096 4096 4096 4096>;
+ snps,priority = <0 1 2 3>;
+ snps,axi-max-burst-len = <16>;
+ };
diff --git a/Documentation/devicetree/bindings/dma/ste-coh901318.txt b/Documentation/devicetree/bindings/dma/ste-coh901318.txt
deleted file mode 100644
index 091ad057e9cf..000000000000
--- a/Documentation/devicetree/bindings/dma/ste-coh901318.txt
+++ /dev/null
@@ -1,32 +0,0 @@
-ST-Ericsson COH 901 318 DMA Controller
-
-This is a DMA controller which has begun as a fork of the
-ARM PL08x PrimeCell VHDL code.
-
-Required properties:
-- compatible: should be "stericsson,coh901318"
-- reg: register locations and length
-- interrupts: the single DMA IRQ
-- #dma-cells: must be set to <1>, as the channels on the
- COH 901 318 are simple and identified by a single number
-- dma-channels: the number of DMA channels handled
-
-Example:
-
-dmac: dma-controller@c00020000 {
- compatible = "stericsson,coh901318";
- reg = <0xc0020000 0x1000>;
- interrupt-parent = <&vica>;
- interrupts = <2>;
- #dma-cells = <1>;
- dma-channels = <40>;
-};
-
-Consumers example:
-
-uart0: serial@c0013000 {
- compatible = "...";
- (...)
- dmas = <&dmac 17 &dmac 18>;
- dma-names = "tx", "rx";
-};
diff --git a/Documentation/devicetree/bindings/dma/zxdma.txt b/Documentation/devicetree/bindings/dma/zxdma.txt
deleted file mode 100644
index 0ab80f69e566..000000000000
--- a/Documentation/devicetree/bindings/dma/zxdma.txt
+++ /dev/null
@@ -1,38 +0,0 @@
-* ZTE ZX296702 DMA controller
-
-Required properties:
-- compatible: Should be "zte,zx296702-dma"
-- reg: Should contain DMA registers location and length.
-- interrupts: Should contain one interrupt shared by all channel
-- #dma-cells: see dma.txt, should be 1, para number
-- dma-channels: physical channels supported
-- dma-requests: virtual channels supported, each virtual channel
- have specific request line
-- clocks: clock required
-
-Example:
-
-Controller:
- dma: dma-controller@09c00000{
- compatible = "zte,zx296702-dma";
- reg = <0x09c00000 0x1000>;
- clocks = <&topclk ZX296702_DMA_ACLK>;
- interrupts = <GIC_SPI 66 IRQ_TYPE_LEVEL_HIGH>;
- #dma-cells = <1>;
- dma-channels = <24>;
- dma-requests = <24>;
- };
-
-Client:
-Use specific request line passing from dmax
-For example, spdif0 tx channel request line is 4
- spdif0: spdif0@b004000 {
- #sound-dai-cells = <0>;
- compatible = "zte,zx296702-spdif";
- reg = <0x0b004000 0x1000>;
- clocks = <&lsp0clk ZX296702_SPDIF0_DIV>;
- clock-names = "tx";
- interrupts = <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
- dmas = <&dma 4>;
- dma-names = "tx";
- }
diff --git a/Documentation/devicetree/bindings/dsp/fsl,dsp.yaml b/Documentation/devicetree/bindings/dsp/fsl,dsp.yaml
index 4cc011230153..7afc9f2be13a 100644
--- a/Documentation/devicetree/bindings/dsp/fsl,dsp.yaml
+++ b/Documentation/devicetree/bindings/dsp/fsl,dsp.yaml
@@ -21,7 +21,7 @@ properties:
- fsl,imx8mp-dsp
reg:
- description: Should contain register location and length
+ maxItems: 1
clocks:
items:
diff --git a/Documentation/devicetree/bindings/eeprom/at24.yaml b/Documentation/devicetree/bindings/eeprom/at24.yaml
index d5117c638b75..021d8ae42da3 100644
--- a/Documentation/devicetree/bindings/eeprom/at24.yaml
+++ b/Documentation/devicetree/bindings/eeprom/at24.yaml
@@ -96,9 +96,6 @@ properties:
# These are special cases that don't conform to the above pattern.
# Each requires a standard at24 model as fallback.
- items:
- - const: rohm,br24t01
- - const: atmel,24c01
- - items:
- const: nxp,se97b
- const: atmel,24c02
- items:
@@ -113,6 +110,12 @@ properties:
- items:
- const: renesas,r1ex24128
- const: atmel,24c128
+ - items:
+ - const: rohm,br24g01
+ - const: atmel,24c01
+ - items:
+ - const: rohm,br24t01
+ - const: atmel,24c01
label:
description: Descriptive name of the EEPROM.
diff --git a/Documentation/devicetree/bindings/eeprom/at25.yaml b/Documentation/devicetree/bindings/eeprom/at25.yaml
index 121a601db22e..6a2dc8b3ed14 100644
--- a/Documentation/devicetree/bindings/eeprom/at25.yaml
+++ b/Documentation/devicetree/bindings/eeprom/at25.yaml
@@ -39,8 +39,7 @@ properties:
- const: atmel,at25
reg:
- description:
- Chip select number.
+ maxItems: 1
spi-max-frequency: true
diff --git a/Documentation/devicetree/bindings/extcon/extcon-ptn5150.yaml b/Documentation/devicetree/bindings/extcon/extcon-ptn5150.yaml
index 4b0f414486d2..d5cfa32ea52d 100644
--- a/Documentation/devicetree/bindings/extcon/extcon-ptn5150.yaml
+++ b/Documentation/devicetree/bindings/extcon/extcon-ptn5150.yaml
@@ -19,6 +19,7 @@ properties:
const: nxp,ptn5150
int-gpios:
+ maxItems: 1
deprecated: true
description:
GPIO pin (input) connected to the PTN5150's INTB pin.
@@ -31,6 +32,7 @@ properties:
maxItems: 1
vbus-gpios:
+ maxItems: 1
description:
GPIO pin (output) used to control VBUS. If skipped, no such control
takes place.
diff --git a/Documentation/devicetree/bindings/extcon/wlf,arizona.yaml b/Documentation/devicetree/bindings/extcon/wlf,arizona.yaml
index 5fe784f487c5..efdf59abb2e1 100644
--- a/Documentation/devicetree/bindings/extcon/wlf,arizona.yaml
+++ b/Documentation/devicetree/bindings/extcon/wlf,arizona.yaml
@@ -85,7 +85,6 @@ properties:
wlf,micd-timeout-ms:
description:
Timeout for microphone detection, specified in milliseconds.
- $ref: "/schemas/types.yaml#/definitions/uint32"
wlf,micd-force-micbias:
description:
diff --git a/Documentation/devicetree/bindings/firmware/qcom,scm.txt b/Documentation/devicetree/bindings/firmware/qcom,scm.txt
index 78456437df5f..a884955f861e 100644
--- a/Documentation/devicetree/bindings/firmware/qcom,scm.txt
+++ b/Documentation/devicetree/bindings/firmware/qcom,scm.txt
@@ -22,6 +22,8 @@ Required properties:
* "qcom,scm-sc7180"
* "qcom,scm-sdm845"
* "qcom,scm-sm8150"
+ * "qcom,scm-sm8250"
+ * "qcom,scm-sm8350"
and:
* "qcom,scm"
- clocks: Specifies clocks needed by the SCM interface, if any:
diff --git a/Documentation/devicetree/bindings/gpio/gpio-atlas7.txt b/Documentation/devicetree/bindings/gpio/gpio-atlas7.txt
deleted file mode 100644
index d7e123fc90b5..000000000000
--- a/Documentation/devicetree/bindings/gpio/gpio-atlas7.txt
+++ /dev/null
@@ -1,50 +0,0 @@
-CSR SiRFatlas7 GPIO controller bindings
-
-Required properties:
-- compatible : "sirf,atlas7-gpio"
-- reg : Address range of the pinctrl registers
-- interrupts : Interrupts used by every GPIO group
-- gpio-banks : How many gpio banks on this controller
-- gpio-controller : Indicates this device is a GPIO controller
-- interrupt-controller : Marks the device node as an interrupt controller
-
-The GPIO controller also acts as an interrupt controller. It uses the default
-two cells specifier as described in Documentation/devicetree/bindings/
-interrupt-controller/interrupts.txt.
-
-Example:
-
- gpio_0: gpio_mediam@17040000 {
- compatible = "sirf,atlas7-gpio";
- reg = <0x17040000 0x1000>;
- interrupts = <0 13 0>, <0 14 0>;
-
- #gpio-cells = <2>;
- #interrupt-cells = <2>;
-
- gpio-controller;
- interrupt-controller;
-
- gpio-banks = <2>;
- gpio-ranges = <&pinctrl 0 0 0>,
- <&pinctrl 32 0 0>;
- gpio-ranges-group-names = "lvds_gpio_grp",
- "uart_nand_gpio_grp";
- };
-
- leds {
- compatible = "gpio-leds";
-
- led1 {
- gpios = <&gpio_1 15 0>;
- ...
- };
-
- led2 {
- gpios = <&gpio_2 34 0>;
- ...
- };
- };
-
-Please refer to gpio.txt in this directory for details of the common
-gpio properties used by devices.
diff --git a/Documentation/devicetree/bindings/gpio/gpio-davinci.txt b/Documentation/devicetree/bindings/gpio/gpio-davinci.txt
index cd91d61eac31..696ea46227d1 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-davinci.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-davinci.txt
@@ -7,6 +7,7 @@ Required Properties:
"ti,k2g-gpio", "ti,keystone-gpio": for 66AK2G
"ti,am654-gpio", "ti,keystone-gpio": for TI K3 AM654
"ti,j721e-gpio", "ti,keystone-gpio": for J721E SoCs
+ "ti,am64-gpio", "ti,keystone-gpio": for AM64 SoCs
- reg: Physical base address of the controller and the size of memory mapped
registers.
diff --git a/Documentation/devicetree/bindings/gpio/gpio-pca95xx.yaml b/Documentation/devicetree/bindings/gpio/gpio-pca95xx.yaml
index f5ee23c2df60..b6a6e742b66d 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-pca95xx.yaml
+++ b/Documentation/devicetree/bindings/gpio/gpio-pca95xx.yaml
@@ -32,6 +32,7 @@ properties:
- maxim,max7327
- nxp,pca6416
- nxp,pca9505
+ - nxp,pca9506
- nxp,pca9534
- nxp,pca9535
- nxp,pca9536
@@ -70,7 +71,7 @@ properties:
gpio-line-names:
minItems: 1
- maxItems: 32
+ maxItems: 40
interrupts:
maxItems: 1
@@ -81,6 +82,7 @@ properties:
const: 2
reset-gpios:
+ maxItems: 1
description:
GPIO specification for the RESET input. This is an active low signal to
the PCA953x. Not valid for Maxim MAX732x devices.
diff --git a/Documentation/devicetree/bindings/gpio/gpio-stericsson-coh901.txt b/Documentation/devicetree/bindings/gpio/gpio-stericsson-coh901.txt
deleted file mode 100644
index fd665b44d767..000000000000
--- a/Documentation/devicetree/bindings/gpio/gpio-stericsson-coh901.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-ST-Ericsson COH 901 571/3 GPIO controller
-
-Required properties:
-- compatible: Compatible property value should be "stericsson,gpio-coh901"
-- reg: Physical base address of the controller and length of memory mapped
- region.
-- interrupts: the 0...n interrupts assigned to the different GPIO ports/banks.
diff --git a/Documentation/devicetree/bindings/gpio/mrvl-gpio.yaml b/Documentation/devicetree/bindings/gpio/mrvl-gpio.yaml
index 4db3b8a3332c..9cf6137dd524 100644
--- a/Documentation/devicetree/bindings/gpio/mrvl-gpio.yaml
+++ b/Documentation/devicetree/bindings/gpio/mrvl-gpio.yaml
@@ -82,8 +82,7 @@ properties:
'#gpio-cells':
const: 2
- gpio-ranges:
- maxItems: 1
+ gpio-ranges: true
interrupts: true
diff --git a/Documentation/devicetree/bindings/gpio/mstar,msc313-gpio.yaml b/Documentation/devicetree/bindings/gpio/mstar,msc313-gpio.yaml
index 1f2ef408bb43..fe1e1c63ffe3 100644
--- a/Documentation/devicetree/bindings/gpio/mstar,msc313-gpio.yaml
+++ b/Documentation/devicetree/bindings/gpio/mstar,msc313-gpio.yaml
@@ -46,7 +46,7 @@ examples:
#include <dt-bindings/gpio/msc313-gpio.h>
gpio: gpio@207800 {
- compatible = "mstar,msc313e-gpio";
+ compatible = "mstar,msc313-gpio";
#gpio-cells = <2>;
reg = <0x207800 0x200>;
gpio-controller;
diff --git a/Documentation/devicetree/bindings/gpio/renesas,rcar-gpio.yaml b/Documentation/devicetree/bindings/gpio/renesas,rcar-gpio.yaml
index 5026662e4508..f2541739ee3b 100644
--- a/Documentation/devicetree/bindings/gpio/renesas,rcar-gpio.yaml
+++ b/Documentation/devicetree/bindings/gpio/renesas,rcar-gpio.yaml
@@ -48,6 +48,9 @@ properties:
- renesas,gpio-r8a77995 # R-Car D3
- const: renesas,rcar-gen3-gpio # R-Car Gen3 or RZ/G2
+ - items:
+ - const: renesas,gpio-r8a779a0 # R-Car V3U
+
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/gpio/sifive,gpio.yaml b/Documentation/devicetree/bindings/gpio/sifive,gpio.yaml
index a0efd8dc2538..c2902aac2514 100644
--- a/Documentation/devicetree/bindings/gpio/sifive,gpio.yaml
+++ b/Documentation/devicetree/bindings/gpio/sifive,gpio.yaml
@@ -13,7 +13,10 @@ maintainers:
properties:
compatible:
items:
- - const: sifive,fu540-c000-gpio
+ - enum:
+ - sifive,fu540-c000-gpio
+ - sifive,fu740-c000-gpio
+ - canaan,k210-gpiohs
- const: sifive,gpio0
reg:
@@ -21,9 +24,9 @@ properties:
interrupts:
description:
- interrupt mapping one per GPIO. Maximum 16 GPIOs.
+ Interrupt mapping, one per GPIO. Maximum 32 GPIOs.
minItems: 1
- maxItems: 16
+ maxItems: 32
interrupt-controller: true
@@ -36,6 +39,14 @@ properties:
"#gpio-cells":
const: 2
+ ngpios:
+ description:
+ The number of GPIOs available on the controller implementation.
+ It is 16 for the SiFive SoCs and 32 for the Canaan K210.
+ minimum: 1
+ maximum: 32
+ default: 16
+
gpio-controller: true
required:
@@ -44,10 +55,20 @@ required:
- interrupts
- interrupt-controller
- "#interrupt-cells"
- - clocks
- "#gpio-cells"
- gpio-controller
+if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - sifive,fu540-c000-gpio
+ - sifive,fu740-c000-gpio
+then:
+ required:
+ - clocks
+
additionalProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/gpio/toshiba,gpio-visconti.yaml b/Documentation/devicetree/bindings/gpio/toshiba,gpio-visconti.yaml
new file mode 100644
index 000000000000..9ad470e01953
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/toshiba,gpio-visconti.yaml
@@ -0,0 +1,70 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/gpio/toshiba,gpio-visconti.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Toshiba Visconti ARM SoCs GPIO controller
+
+maintainers:
+ - Nobuhiro Iwamatsu <nobuhiro1.iwamatsu@toshiba.co.jp>
+
+properties:
+ compatible:
+ items:
+ - const: toshiba,gpio-tmpv7708
+
+ reg:
+ maxItems: 1
+
+ "#gpio-cells":
+ const: 2
+
+ gpio-ranges: true
+
+ gpio-controller: true
+
+ interrupt-controller: true
+
+ "#interrupt-cells":
+ const: 2
+
+ interrupts:
+ description:
+ interrupt mapping one per GPIO.
+ minItems: 16
+ maxItems: 16
+
+required:
+ - compatible
+ - reg
+ - "#gpio-cells"
+ - gpio-ranges
+ - gpio-controller
+ - interrupt-controller
+ - "#interrupt-cells"
+ - interrupt-parent
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ gpio: gpio@28020000 {
+ compatible = "toshiba,gpio-tmpv7708";
+ reg = <0 0x28020000 0 0x1000>;
+ #gpio-cells = <0x2>;
+ gpio-ranges = <&pmux 0 0 32>;
+ gpio-controller;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupt-parent = <&gic>;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/gpio/zx296702-gpio.txt b/Documentation/devicetree/bindings/gpio/zx296702-gpio.txt
deleted file mode 100644
index 0dab156fcf41..000000000000
--- a/Documentation/devicetree/bindings/gpio/zx296702-gpio.txt
+++ /dev/null
@@ -1,24 +0,0 @@
-ZTE ZX296702 GPIO controller
-
-Required properties:
-- compatible : "zte,zx296702-gpio"
-- #gpio-cells : Should be two. The first cell is the pin number and the
- second cell is used to specify optional parameters:
- - bit 0 specifies polarity (0 for normal, 1 for inverted)
-- gpio-controller : Marks the device node as a GPIO controller.
-- interrupts : Interrupt mapping for GPIO IRQ.
-- gpio-ranges : Interaction with the PINCTRL subsystem.
-
-gpio1: gpio@b008040 {
- compatible = "zte,zx296702-gpio";
- reg = <0xb008040 0x40>;
- gpio-controller;
- #gpio-cells = <2>;
- gpio-ranges = < &pmx0 0 54 2 &pmx0 2 59 14>;
- interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-parent = <&intc>;
- interrupt-controller;
- #interrupt-cells = <2>;
- clock-names = "gpio_pclk";
- clocks = <&lsp0clk ZX296702_GPIO_CLK>;
-};
diff --git a/Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.txt b/Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.txt
deleted file mode 100644
index b2df82b44625..000000000000
--- a/Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.txt
+++ /dev/null
@@ -1,33 +0,0 @@
-Broadcom V3D GPU
-
-Only the Broadcom V3D 3.x and newer GPUs are covered by this binding.
-For V3D 2.x, see brcm,bcm-vc4.txt.
-
-Required properties:
-- compatible: Should be "brcm,7268-v3d" or "brcm,7278-v3d"
-- reg: Physical base addresses and lengths of the register areas
-- reg-names: Names for the register areas. The "hub" and "core0"
- register areas are always required. The "gca" register area
- is required if the GCA cache controller is present. The
- "bridge" register area is required if an external reset
- controller is not present.
-- interrupts: The interrupt numbers. The first interrupt is for the hub,
- while the following interrupts are separate interrupt lines
- for the cores (if they don't share the hub's interrupt).
- See bindings/interrupt-controller/interrupts.txt
-
-Optional properties:
-- clocks: The core clock the unit runs on
-- resets: The reset line for v3d, if not using a mapping of the bridge
- See bindings/reset/reset.txt
-
-v3d {
- compatible = "brcm,7268-v3d";
- reg = <0xf1204000 0x100>,
- <0xf1200000 0x4000>,
- <0xf1208000 0x4000>,
- <0xf1204100 0x100>;
- reg-names = "bridge", "hub", "core0", "gca";
- interrupts = <0 78 4>,
- <0 77 4>;
-};
diff --git a/Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.yaml b/Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.yaml
new file mode 100644
index 000000000000..9d72264fa90a
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.yaml
@@ -0,0 +1,75 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/gpu/brcm,bcm-v3d.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom V3D GPU Bindings
+
+maintainers:
+ - Eric Anholt <eric@anholt.net>
+ - Nicolas Saenz Julienne <nsaenzjulienne@suse.de>
+
+properties:
+ $nodename:
+ pattern: '^gpu@[a-f0-9]+$'
+
+ compatible:
+ enum:
+ - brcm,7268-v3d
+ - brcm,7278-v3d
+
+ reg:
+ items:
+ - description: hub register (required)
+ - description: core0 register (required)
+ - description: GCA cache controller register (if GCA controller present)
+ - description: bridge register (if no external reset controller)
+ minItems: 2
+
+ reg-names:
+ items:
+ - const: hub
+ - const: core0
+ - enum: [ bridge, gca ]
+ - enum: [ bridge, gca ]
+ minItems: 2
+ maxItems: 4
+
+ interrupts:
+ items:
+ - description: hub interrupt (required)
+ - description: core interrupts (if it doesn't share the hub's interrupt)
+ minItems: 1
+
+ clocks:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+ power-domains:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ gpu@f1200000 {
+ compatible = "brcm,7268-v3d";
+ reg = <0xf1200000 0x4000>,
+ <0xf1208000 0x4000>,
+ <0xf1204000 0x100>,
+ <0xf1204100 0x100>;
+ reg-names = "hub", "core0", "bridge", "gca";
+ interrupts = <0 78 4>,
+ <0 77 4>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/graph.txt b/Documentation/devicetree/bindings/graph.txt
index 0415e2c53ba0..14733b5cb61e 100644
--- a/Documentation/devicetree/bindings/graph.txt
+++ b/Documentation/devicetree/bindings/graph.txt
@@ -1,128 +1 @@
-Common bindings for device graphs
-
-General concept
----------------
-
-The hierarchical organisation of the device tree is well suited to describe
-control flow to devices, but there can be more complex connections between
-devices that work together to form a logical compound device, following an
-arbitrarily complex graph.
-There already is a simple directed graph between devices tree nodes using
-phandle properties pointing to other nodes to describe connections that
-can not be inferred from device tree parent-child relationships. The device
-tree graph bindings described herein abstract more complex devices that can
-have multiple specifiable ports, each of which can be linked to one or more
-ports of other devices.
-
-These common bindings do not contain any information about the direction or
-type of the connections, they just map their existence. Specific properties
-may be described by specialized bindings depending on the type of connection.
-
-To see how this binding applies to video pipelines, for example, see
-Documentation/devicetree/bindings/media/video-interfaces.txt.
-Here the ports describe data interfaces, and the links between them are
-the connecting data buses. A single port with multiple connections can
-correspond to multiple devices being connected to the same physical bus.
-
-Organisation of ports and endpoints
------------------------------------
-
-Ports are described by child 'port' nodes contained in the device node.
-Each port node contains an 'endpoint' subnode for each remote device port
-connected to this port. If a single port is connected to more than one
-remote device, an 'endpoint' child node must be provided for each link.
-If more than one port is present in a device node or there is more than one
-endpoint at a port, or a port node needs to be associated with a selected
-hardware interface, a common scheme using '#address-cells', '#size-cells'
-and 'reg' properties is used to number the nodes.
-
-device {
- ...
- #address-cells = <1>;
- #size-cells = <0>;
-
- port@0 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0>;
-
- endpoint@0 {
- reg = <0>;
- ...
- };
- endpoint@1 {
- reg = <1>;
- ...
- };
- };
-
- port@1 {
- reg = <1>;
-
- endpoint { ... };
- };
-};
-
-All 'port' nodes can be grouped under an optional 'ports' node, which
-allows to specify #address-cells, #size-cells properties for the 'port'
-nodes independently from any other child device nodes a device might
-have.
-
-device {
- ...
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- port@0 {
- ...
- endpoint@0 { ... };
- endpoint@1 { ... };
- };
-
- port@1 { ... };
- };
-};
-
-Links between endpoints
------------------------
-
-Each endpoint should contain a 'remote-endpoint' phandle property that points
-to the corresponding endpoint in the port of the remote device. In turn, the
-remote endpoint should contain a 'remote-endpoint' property. If it has one, it
-must not point to anything other than the local endpoint. Two endpoints with
-their 'remote-endpoint' phandles pointing at each other form a link between the
-containing ports.
-
-device-1 {
- port {
- device_1_output: endpoint {
- remote-endpoint = <&device_2_input>;
- };
- };
-};
-
-device-2 {
- port {
- device_2_input: endpoint {
- remote-endpoint = <&device_1_output>;
- };
- };
-};
-
-Required properties
--------------------
-
-If there is more than one 'port' or more than one 'endpoint' node or 'reg'
-property present in the port and/or endpoint nodes then the following
-properties are required in a relevant parent node:
-
- - #address-cells : number of cells required to define port/endpoint
- identifier, should be 1.
- - #size-cells : should be zero.
-
-Optional endpoint properties
-----------------------------
-
-- remote-endpoint: phandle to an 'endpoint' subnode of a remote device node.
-
+This file has moved to graph.yaml in dt-schema repo
diff --git a/Documentation/devicetree/bindings/hwlock/ti,omap-hwspinlock.yaml b/Documentation/devicetree/bindings/hwlock/ti,omap-hwspinlock.yaml
index ac35491a6f65..ae1b37dbee75 100644
--- a/Documentation/devicetree/bindings/hwlock/ti,omap-hwspinlock.yaml
+++ b/Documentation/devicetree/bindings/hwlock/ti,omap-hwspinlock.yaml
@@ -13,6 +13,7 @@ properties:
compatible:
enum:
- ti,omap4-hwspinlock # for OMAP44xx, OMAP54xx, AM33xx, AM43xx, DRA7xx SoCs
+ - ti,am64-hwspinlock # for K3 AM64x SoCs
- ti,am654-hwspinlock # for K3 AM65x, J721E and J7200 SoCs
reg:
diff --git a/Documentation/devicetree/bindings/hwmon/adi,ltc2947.yaml b/Documentation/devicetree/bindings/hwmon/adi,ltc2947.yaml
index eef614962b10..bf04151b63d2 100644
--- a/Documentation/devicetree/bindings/hwmon/adi,ltc2947.yaml
+++ b/Documentation/devicetree/bindings/hwmon/adi,ltc2947.yaml
@@ -49,7 +49,6 @@ properties:
description:
This property controls the Accumulation Dead band which allows to set the
level of current below which no accumulation takes place.
- $ref: /schemas/types.yaml#/definitions/uint32
maximum: 255
default: 0
diff --git a/Documentation/devicetree/bindings/hwmon/baikal,bt1-pvt.yaml b/Documentation/devicetree/bindings/hwmon/baikal,bt1-pvt.yaml
index 00a6511354e6..5d3ce641fcde 100644
--- a/Documentation/devicetree/bindings/hwmon/baikal,bt1-pvt.yaml
+++ b/Documentation/devicetree/bindings/hwmon/baikal,bt1-pvt.yaml
@@ -73,11 +73,9 @@ properties:
description: |
Temperature sensor trimming factor. It can be used to manually adjust the
temperature measurements within 7.130 degrees Celsius.
- maxItems: 1
- items:
- default: 0
- minimum: 0
- maximum: 7130
+ default: 0
+ minimum: 0
+ maximum: 7130
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/hwmon/ti,tmp513.yaml b/Documentation/devicetree/bindings/hwmon/ti,tmp513.yaml
index 8020d739a078..1502b22c77cc 100644
--- a/Documentation/devicetree/bindings/hwmon/ti,tmp513.yaml
+++ b/Documentation/devicetree/bindings/hwmon/ti,tmp513.yaml
@@ -52,7 +52,6 @@ properties:
ti,bus-range-microvolt:
description: |
This is the operating range of the bus voltage in microvolt
- $ref: /schemas/types.yaml#/definitions/uint32
enum: [16000000, 32000000]
default: 32000000
diff --git a/Documentation/devicetree/bindings/hwmon/ti,tps23861.yaml b/Documentation/devicetree/bindings/hwmon/ti,tps23861.yaml
new file mode 100644
index 000000000000..3bc8e73dfbf0
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwmon/ti,tps23861.yaml
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+
+$id: http://devicetree.org/schemas/hwmon/ti,tps23861.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: TI TPS23861 PoE PSE
+
+maintainers:
+ - Robert Marko <robert.marko@sartura.hr>
+
+description: |
+ The TPS23861 is a IEEE 802.3at Quad Port Power-over-Ethernet PSE Controller.
+
+ Datasheets:
+ https://www.ti.com/lit/gpn/tps23861
+
+
+properties:
+ compatible:
+ enum:
+ - ti,tps23861
+
+ reg:
+ maxItems: 1
+
+ shunt-resistor-micro-ohms:
+ description: The value of curent sense resistor in microohms.
+ default: 255000
+ minimum: 250000
+ maximum: 255000
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ tps23861@30 {
+ compatible = "ti,tps23861";
+ reg = <0x30>;
+ shunt-resistor-micro-ohms = <255000>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/i2c/i2c-gpio.yaml b/Documentation/devicetree/bindings/i2c/i2c-gpio.yaml
index cc3aa2a5e70b..ff99344788ab 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-gpio.yaml
+++ b/Documentation/devicetree/bindings/i2c/i2c-gpio.yaml
@@ -39,11 +39,9 @@ properties:
i2c-gpio,delay-us:
description: delay between GPIO operations (may depend on each platform)
- $ref: /schemas/types.yaml#/definitions/uint32
i2c-gpio,timeout-ms:
description: timeout to get data
- $ref: /schemas/types.yaml#/definitions/uint32
# Deprecated properties, do not use in new device tree sources:
gpios:
diff --git a/Documentation/devicetree/bindings/i2c/i2c-sirf.txt b/Documentation/devicetree/bindings/i2c/i2c-sirf.txt
deleted file mode 100644
index 2701eefb00f7..000000000000
--- a/Documentation/devicetree/bindings/i2c/i2c-sirf.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-I2C for SiRFprimaII platforms
-
-Required properties :
-- compatible : Must be "sirf,prima2-i2c"
-- reg: physical base address of the controller and length of memory mapped
- region.
-- interrupts: interrupt number to the cpu.
-
-Optional properties:
-- clock-frequency : Constains desired I2C/HS-I2C bus clock frequency in Hz.
- The absence of the property indicates the default frequency 100 kHz.
-
-Examples :
-
-i2c0: i2c@b00e0000 {
- compatible = "sirf,prima2-i2c";
- reg = <0xb00e0000 0x10000>;
- interrupts = <24>;
-};
diff --git a/Documentation/devicetree/bindings/i2c/i2c-stu300.txt b/Documentation/devicetree/bindings/i2c/i2c-stu300.txt
deleted file mode 100644
index bd81a482634f..000000000000
--- a/Documentation/devicetree/bindings/i2c/i2c-stu300.txt
+++ /dev/null
@@ -1,15 +0,0 @@
-ST Microelectronics DDC I2C
-
-Required properties :
-- compatible : Must be "st,ddci2c"
-- reg: physical base address of the controller and length of memory mapped
- region.
-- interrupts: interrupt number to the cpu.
-- #address-cells = <1>;
-- #size-cells = <0>;
-
-Optional properties:
-- Child nodes conforming to i2c bus binding
-
-Examples :
-
diff --git a/Documentation/devicetree/bindings/i2c/i2c-zx2967.txt b/Documentation/devicetree/bindings/i2c/i2c-zx2967.txt
deleted file mode 100644
index cb806d1ae4c9..000000000000
--- a/Documentation/devicetree/bindings/i2c/i2c-zx2967.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-ZTE zx2967 I2C controller
-
-Required properties:
- - compatible: must be "zte,zx296718-i2c"
- - reg: physical address and length of the device registers
- - interrupts: a single interrupt specifier
- - clocks: clock for the device
- - #address-cells: should be <1>
- - #size-cells: should be <0>
- - clock-frequency: the desired I2C bus clock frequency.
-
-Examples:
-
- i2c@112000 {
- compatible = "zte,zx296718-i2c";
- reg = <0x00112000 0x1000>;
- interrupts = <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&osc24m>;
- #address-cells = <1>
- #size-cells = <0>;
- clock-frequency = <1600000>;
- };
diff --git a/Documentation/devicetree/bindings/i2c/marvell,mv64xxx-i2c.yaml b/Documentation/devicetree/bindings/i2c/marvell,mv64xxx-i2c.yaml
index 5b5ae402f97a..eb72dd571def 100644
--- a/Documentation/devicetree/bindings/i2c/marvell,mv64xxx-i2c.yaml
+++ b/Documentation/devicetree/bindings/i2c/marvell,mv64xxx-i2c.yaml
@@ -18,21 +18,14 @@ properties:
- const: allwinner,sun4i-a10-i2c
- const: allwinner,sun6i-a31-i2c
- items:
- - const: allwinner,sun8i-a23-i2c
+ - enum:
+ - allwinner,sun8i-a23-i2c
+ - allwinner,sun8i-a83t-i2c
+ - allwinner,sun50i-a64-i2c
+ - allwinner,sun50i-a100-i2c
+ - allwinner,sun50i-h6-i2c
+ - allwinner,sun50i-h616-i2c
- const: allwinner,sun6i-a31-i2c
- - items:
- - const: allwinner,sun8i-a83t-i2c
- - const: allwinner,sun6i-a31-i2c
- - items:
- - const: allwinner,sun50i-a64-i2c
- - const: allwinner,sun6i-a31-i2c
- - items:
- - const: allwinner,sun50i-a100-i2c
- - const: allwinner,sun6i-a31-i2c
- - items:
- - const: allwinner,sun50i-h6-i2c
- - const: allwinner,sun6i-a31-i2c
-
- const: marvell,mv64xxx-i2c
- const: marvell,mv78230-i2c
- const: marvell,mv78230-a0-i2c
diff --git a/Documentation/devicetree/bindings/i2c/nuvoton,npcm7xx-i2c.yaml b/Documentation/devicetree/bindings/i2c/nuvoton,npcm7xx-i2c.yaml
index e3ef2d36f372..128444942aec 100644
--- a/Documentation/devicetree/bindings/i2c/nuvoton,npcm7xx-i2c.yaml
+++ b/Documentation/devicetree/bindings/i2c/nuvoton,npcm7xx-i2c.yaml
@@ -17,7 +17,7 @@ maintainers:
properties:
compatible:
- const: nuvoton,npcm7xx-i2c
+ const: nuvoton,npcm750-i2c
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/i2c/renesas,i2c.txt b/Documentation/devicetree/bindings/i2c/renesas,i2c.txt
index 96d869ac3839..5762d2d1ab9c 100644
--- a/Documentation/devicetree/bindings/i2c/renesas,i2c.txt
+++ b/Documentation/devicetree/bindings/i2c/renesas,i2c.txt
@@ -26,6 +26,7 @@ Required properties:
"renesas,i2c-r8a77980" if the device is a part of a R8A77980 SoC.
"renesas,i2c-r8a77990" if the device is a part of a R8A77990 SoC.
"renesas,i2c-r8a77995" if the device is a part of a R8A77995 SoC.
+ "renesas,i2c-r8a779a0" if the device is a part of a R8A779A0 SoC.
"renesas,rcar-gen1-i2c" for a generic R-Car Gen1 compatible device.
"renesas,rcar-gen2-i2c" for a generic R-Car Gen2 or RZ/G1 compatible
device.
diff --git a/Documentation/devicetree/bindings/i2c/snps,designware-i2c.yaml b/Documentation/devicetree/bindings/i2c/snps,designware-i2c.yaml
index c22b66b6219e..d9293c57f573 100644
--- a/Documentation/devicetree/bindings/i2c/snps,designware-i2c.yaml
+++ b/Documentation/devicetree/bindings/i2c/snps,designware-i2c.yaml
@@ -66,21 +66,18 @@ properties:
default: 400000
i2c-sda-hold-time-ns:
- maxItems: 1
description: |
The property should contain the SDA hold time in nanoseconds. This option
is only supported in hardware blocks version 1.11a or newer or on
Microsemi SoCs.
i2c-scl-falling-time-ns:
- maxItems: 1
description: |
The property should contain the SCL falling time in nanoseconds.
This value is used to compute the tLOW period.
default: 300
i2c-sda-falling-time-ns:
- maxItems: 1
description: |
The property should contain the SDA falling time in nanoseconds.
This value is used to compute the tHIGH period.
diff --git a/Documentation/devicetree/bindings/i3c/i3c.txt b/Documentation/devicetree/bindings/i3c/i3c.txt
deleted file mode 100644
index 4ffe059f0fec..000000000000
--- a/Documentation/devicetree/bindings/i3c/i3c.txt
+++ /dev/null
@@ -1,140 +0,0 @@
-Generic device tree bindings for I3C busses
-===========================================
-
-This document describes generic bindings that should be used to describe I3C
-busses in a device tree.
-
-Required properties
--------------------
-
-- #address-cells - should be <3>. Read more about addresses below.
-- #size-cells - should be <0>.
-- compatible - name of the I3C master controller driving the I3C bus
-
-For other required properties e.g. to describe register sets,
-clocks, etc. check the binding documentation of the specific driver.
-The node describing an I3C bus should be named i3c-master.
-
-Optional properties
--------------------
-
-These properties may not be supported by all I3C master drivers. Each I3C
-master bindings should specify which of them are supported.
-
-- i3c-scl-hz: frequency of the SCL signal used for I3C transfers.
- When undefined the core sets it to 12.5MHz.
-
-- i2c-scl-hz: frequency of the SCL signal used for I2C transfers.
- When undefined, the core looks at LVR (Legacy Virtual Register)
- values of I2C devices described in the device tree to determine
- the maximum I2C frequency.
-
-I2C devices
-===========
-
-Each I2C device connected to the bus should be described in a subnode. All
-properties described in Documentation/devicetree/bindings/i2c/i2c.txt are
-valid here, but several new properties have been added.
-
-New constraint on existing properties:
---------------------------------------
-- reg: contains 3 cells
- + first cell : still encoding the I2C address. 10 bit addressing is not
- supported. Devices with 10 bit address can't be properly passed through
- DEFSLVS command.
-
- + second cell: shall be 0
-
- + third cell: shall encode the I3C LVR (Legacy Virtual Register)
- bit[31:8]: unused/ignored
- bit[7:5]: I2C device index. Possible values
- * 0: I2C device has a 50 ns spike filter
- * 1: I2C device does not have a 50 ns spike filter but supports high
- frequency on SCL
- * 2: I2C device does not have a 50 ns spike filter and is not tolerant
- to high frequencies
- * 3-7: reserved
-
- bit[4]: tell whether the device operates in FM (Fast Mode) or FM+ mode
- * 0: FM+ mode
- * 1: FM mode
-
- bit[3:0]: device type
- * 0-15: reserved
-
-The I2C node unit-address should always match the first cell of the reg
-property: <device-type>@<i2c-address>.
-
-I3C devices
-===========
-
-All I3C devices are supposed to support DAA (Dynamic Address Assignment), and
-are thus discoverable. So, by default, I3C devices do not have to be described
-in the device tree.
-This being said, one might want to attach extra resources to these devices,
-and those resources may have to be described in the device tree, which in turn
-means we have to describe I3C devices.
-
-Another use case for describing an I3C device in the device tree is when this
-I3C device has a static I2C address and we want to assign it a specific I3C
-dynamic address before the DAA takes place (so that other devices on the bus
-can't take this dynamic address).
-
-The I3C device should be names <device-type>@<static-i2c-address>,<i3c-pid>,
-where device-type is describing the type of device connected on the bus
-(gpio-controller, sensor, ...).
-
-Required properties
--------------------
-- reg: contains 3 cells
- + first cell : encodes the static I2C address. Should be 0 if the device does
- not have one (0 is not a valid I2C address).
-
- + second and third cells: should encode the ProvisionalID. The second cell
- contains the manufacturer ID left-shifted by 1.
- The third cell contains ORing of the part ID
- left-shifted by 16, the instance ID left-shifted
- by 12 and the extra information. This encoding is
- following the PID definition provided by the I3C
- specification.
-
-Optional properties
--------------------
-- assigned-address: dynamic address to be assigned to this device. This
- property is only valid if the I3C device has a static
- address (first cell of the reg property != 0).
-
-
-Example:
-
- i3c-master@d040000 {
- compatible = "cdns,i3c-master";
- clocks = <&coreclock>, <&i3csysclock>;
- clock-names = "pclk", "sysclk";
- interrupts = <3 0>;
- reg = <0x0d040000 0x1000>;
- #address-cells = <3>;
- #size-cells = <0>;
- i2c-scl-hz = <100000>;
-
- /* I2C device. */
- nunchuk: nunchuk@52 {
- compatible = "nintendo,nunchuk";
- reg = <0x52 0x0 0x10>;
- };
-
- /* I3C device with a static I2C address. */
- thermal_sensor: sensor@68,39200144004 {
- reg = <0x68 0x392 0x144004>;
- assigned-address = <0xa>;
- };
-
- /*
- * I3C device without a static I2C address but requiring
- * resources described in the DT.
- */
- sensor@0,39200154004 {
- reg = <0x0 0x392 0x154004>;
- clocks = <&clock_provider 0>;
- };
- };
diff --git a/Documentation/devicetree/bindings/i3c/i3c.yaml b/Documentation/devicetree/bindings/i3c/i3c.yaml
new file mode 100644
index 000000000000..52042aa44d19
--- /dev/null
+++ b/Documentation/devicetree/bindings/i3c/i3c.yaml
@@ -0,0 +1,179 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/i3c/i3c.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: I3C bus binding
+
+maintainers:
+ - Alexandre Belloni <alexandre.belloni@bootlin.com>
+ - Miquel Raynal <miquel.raynal@bootlin.com>
+
+description: |
+ I3C busses can be described with a node for the primary I3C controller device
+ and a set of child nodes for each I2C or I3C slave on the bus. Each of them
+ may, during the life of the bus, request mastership.
+
+properties:
+ $nodename:
+ pattern: "^i3c-master@[0-9a-f]+$"
+
+ "#address-cells":
+ const: 3
+ description: |
+ Each I2C device connected to the bus should be described in a subnode.
+
+ All I3C devices are supposed to support DAA (Dynamic Address Assignment),
+ and are thus discoverable. So, by default, I3C devices do not have to be
+ described in the device tree. This being said, one might want to attach
+ extra resources to these devices, and those resources may have to be
+ described in the device tree, which in turn means we have to describe
+ I3C devices.
+
+ Another use case for describing an I3C device in the device tree is when
+ this I3C device has a static I2C address and we want to assign it a
+ specific I3C dynamic address before the DAA takes place (so that other
+ devices on the bus can't take this dynamic address).
+
+ "#size-cells":
+ const: 0
+
+ i3c-scl-hz:
+ description: |
+ Frequency of the SCL signal used for I3C transfers. When undefined, the
+ default value should be 12.5MHz.
+
+ May not be supported by all controllers.
+
+ i2c-scl-hz:
+ description: |
+ Frequency of the SCL signal used for I2C transfers. When undefined, the
+ default should be to look at LVR (Legacy Virtual Register) values of
+ I2C devices described in the device tree to determine the maximum I2C
+ frequency.
+
+ May not be supported by all controllers.
+
+required:
+ - "#address-cells"
+ - "#size-cells"
+
+patternProperties:
+ "@[0-9a-f]+$":
+ type: object
+ description: |
+ I2C child, should be named: <device-type>@<i2c-address>
+
+ All properties described in Documentation/devicetree/bindings/i2c/i2c.txt
+ are valid here, except the reg property whose content is changed.
+
+ properties:
+ compatible:
+ description:
+ Compatible of the I2C device.
+
+ reg:
+ items:
+ - items:
+ - description: |
+ I2C address. 10 bit addressing is not supported. Devices with
+ 10-bit address can't be properly passed through DEFSLVS
+ command.
+ minimum: 0
+ maximum: 0x7f
+ - const: 0
+ - description: |
+ Shall encode the I3C LVR (Legacy Virtual Register):
+ bit[31:8]: unused/ignored
+ bit[7:5]: I2C device index. Possible values:
+ * 0: I2C device has a 50 ns spike filter
+ * 1: I2C device does not have a 50 ns spike filter but
+ supports high frequency on SCL
+ * 2: I2C device does not have a 50 ns spike filter and is
+ not tolerant to high frequencies
+ * 3-7: reserved
+ bit[4]: tell whether the device operates in FM (Fast Mode)
+ or FM+ mode:
+ * 0: FM+ mode
+ * 1: FM mode
+ bit[3:0]: device type
+ * 0-15: reserved
+
+ required:
+ - compatible
+ - reg
+
+ "@[0-9a-f]+,[0-9a-f]+$":
+ type: object
+ description: |
+ I3C child, should be named: <device-type>@<static-i2c-address>,<i3c-pid>
+
+ properties:
+ reg:
+ items:
+ - items:
+ - description: |
+ Encodes the static I2C address. Should be 0 if the device does
+ not have one (0 is not a valid I2C address).
+ minimum: 0
+ maximum: 0x7f
+ - description: |
+ First half of the Provisional ID (following the PID
+ definition provided by the I3C specification).
+
+ Contains the manufacturer ID left-shifted by 1.
+ - description: |
+ Second half of the Provisional ID (following the PID
+ definition provided by the I3C specification).
+
+ Contains the ORing of the part ID left-shifted by 16,
+ the instance ID left-shifted by 12 and extra information.
+
+ assigned-address:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0x1
+ maximum: 0xff
+ description: |
+ Dynamic address to be assigned to this device. This property is only
+ valid if the I3C device has a static address (first cell of the reg
+ property != 0).
+
+ required:
+ - reg
+
+additionalProperties: true
+
+examples:
+ - |
+ i3c-master@d040000 {
+ compatible = "cdns,i3c-master";
+ clocks = <&coreclock>, <&i3csysclock>;
+ clock-names = "pclk", "sysclk";
+ interrupts = <3 0>;
+ reg = <0x0d040000 0x1000>;
+ #address-cells = <3>;
+ #size-cells = <0>;
+ i2c-scl-hz = <100000>;
+
+ /* I2C device. */
+ nunchuk: nunchuk@52 {
+ compatible = "nintendo,nunchuk";
+ reg = <0x52 0x0 0x10>;
+ };
+
+ /* I3C device with a static I2C address. */
+ thermal_sensor: sensor@68,39200144004 {
+ reg = <0x68 0x392 0x144004>;
+ assigned-address = <0xa>;
+ };
+
+ /*
+ * I3C device without a static I2C address but requiring
+ * resources described in the DT.
+ */
+ sensor@0,39200154004 {
+ reg = <0x0 0x392 0x154004>;
+ clocks = <&clock_provider 0>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/i3c/mipi-i3c-hci.yaml b/Documentation/devicetree/bindings/i3c/mipi-i3c-hci.yaml
index 07a7b10163a3..04da001fc6ec 100644
--- a/Documentation/devicetree/bindings/i3c/mipi-i3c-hci.yaml
+++ b/Documentation/devicetree/bindings/i3c/mipi-i3c-hci.yaml
@@ -9,6 +9,9 @@ title: MIPI I3C HCI Device Tree Bindings
maintainers:
- Nicolas Pitre <npitre@baylibre.com>
+allOf:
+ - $ref: /schemas/i3c/i3c.yaml#
+
description: |
MIPI I3C Host Controller Interface
@@ -36,12 +39,14 @@ required:
- reg
- interrupts
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
- i3c@a0000000 {
+ i3c-master@a0000000 {
compatible = "mipi-i3c-hci";
reg = <0xa0000000 0x2000>;
interrupts = <89>;
+ #address-cells = <3>;
+ #size-cells = <0>;
};
diff --git a/Documentation/devicetree/bindings/i3c/silvaco,i3c-master.yaml b/Documentation/devicetree/bindings/i3c/silvaco,i3c-master.yaml
new file mode 100644
index 000000000000..adb5165505aa
--- /dev/null
+++ b/Documentation/devicetree/bindings/i3c/silvaco,i3c-master.yaml
@@ -0,0 +1,60 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/i3c/silvaco,i3c-master.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Silvaco I3C master
+
+maintainers:
+ - Conor Culhane <conor.culhane@silvaco.com>
+
+allOf:
+ - $ref: "i3c.yaml#"
+
+properties:
+ compatible:
+ const: silvaco,i3c-master-v1
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: system clock
+ - description: bus clock
+ - description: other (slower) events clock
+
+ clock-names:
+ items:
+ - const: pclk
+ - const: fast_clk
+ - const: slow_clk
+
+ resets:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clock-names
+ - clocks
+
+additionalProperties: true
+
+examples:
+ - |
+ i3c-master@a0000000 {
+ compatible = "silvaco,i3c-master";
+ clocks = <&zynqmp_clk 71>, <&fclk>, <&sclk>;
+ clock-names = "pclk", "fast_clk", "slow_clk";
+ interrupt-parent = <&gic>;
+ interrupts = <0 89 4>;
+ reg = <0xa0000000 0x1000>;
+ #address-cells = <3>;
+ #size-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/iio/accel/kionix,kxcjk1013.yaml b/Documentation/devicetree/bindings/iio/accel/kionix,kxcjk1013.yaml
index 5667d09dfe6a..fbb714431e3d 100644
--- a/Documentation/devicetree/bindings/iio/accel/kionix,kxcjk1013.yaml
+++ b/Documentation/devicetree/bindings/iio/accel/kionix,kxcjk1013.yaml
@@ -20,6 +20,9 @@ properties:
reg:
maxItems: 1
+ vdd-supply: true
+ vddio-supply: true
+
mount-matrix:
description: an optional 3x3 mounting rotation matrix.
diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad7192.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad7192.yaml
index e0cc3b2e8957..22b7ed3723f6 100644
--- a/Documentation/devicetree/bindings/iio/adc/adi,ad7192.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/adi,ad7192.yaml
@@ -80,7 +80,7 @@ properties:
type: boolean
bipolar:
- description: see Documentation/devicetree/bindings/iio/adc/adc.txt
+ description: see Documentation/devicetree/bindings/iio/adc/adc.yaml
type: boolean
required:
diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad7768-1.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad7768-1.yaml
index 924477dfb833..a85a28145ef6 100644
--- a/Documentation/devicetree/bindings/iio/adc/adi,ad7768-1.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/adi,ad7768-1.yaml
@@ -40,6 +40,7 @@ properties:
ADC reference voltage supply
adi,sync-in-gpios:
+ maxItems: 1
description:
Enables synchronization of multiple devices that require simultaneous
sampling. A pulse is always required if the configuration is changed
@@ -76,6 +77,7 @@ patternProperties:
properties:
reg:
+ maxItems: 1
description: |
The channel number.
diff --git a/Documentation/devicetree/bindings/iio/adc/aspeed,ast2400-adc.yaml b/Documentation/devicetree/bindings/iio/adc/aspeed,ast2400-adc.yaml
index 7f534a933e92..a726b6c2ab65 100644
--- a/Documentation/devicetree/bindings/iio/adc/aspeed,ast2400-adc.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/aspeed,ast2400-adc.yaml
@@ -23,6 +23,7 @@ properties:
maxItems: 1
clocks:
+ maxItems: 1
description:
Input clock used to derive the sample clock. Expected to be the
SoC's APB clock.
diff --git a/Documentation/devicetree/bindings/iio/adc/lltc,ltc2496.yaml b/Documentation/devicetree/bindings/iio/adc/lltc,ltc2496.yaml
index 2716d4e95329..0bd2fc0356c8 100644
--- a/Documentation/devicetree/bindings/iio/adc/lltc,ltc2496.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/lltc,ltc2496.yaml
@@ -20,7 +20,7 @@ properties:
description: Power supply for the reference voltage
reg:
- description: spi chipselect number according to the usual spi bindings
+ maxItems: 1
spi-max-frequency:
description: maximal spi bus frequency supported
diff --git a/Documentation/devicetree/bindings/iio/adc/maxim,max9611.yaml b/Documentation/devicetree/bindings/iio/adc/maxim,max9611.yaml
index 9475a9e6e920..95774a55629d 100644
--- a/Documentation/devicetree/bindings/iio/adc/maxim,max9611.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/maxim,max9611.yaml
@@ -23,7 +23,6 @@ properties:
maxItems: 1
shunt-resistor-micro-ohms:
- $ref: /schemas/types.yaml#/definitions/uint32
description: |
Value in micro Ohms of the shunt resistor connected between the RS+ and
RS- inputs, across which the current is measured. Value needed to compute
diff --git a/Documentation/devicetree/bindings/iio/adc/qcom,spmi-vadc.yaml b/Documentation/devicetree/bindings/iio/adc/qcom,spmi-vadc.yaml
index 95cc705b961b..74a4a9d95798 100644
--- a/Documentation/devicetree/bindings/iio/adc/qcom,spmi-vadc.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/qcom,spmi-vadc.yaml
@@ -68,6 +68,7 @@ patternProperties:
properties:
reg:
+ maxItems: 1
description: |
ADC channel number.
See include/dt-bindings/iio/qcom,spmi-vadc.h
diff --git a/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.yaml b/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.yaml
index 28417b31b558..a58334c3bb76 100644
--- a/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.yaml
@@ -41,6 +41,8 @@ properties:
maxItems: 2
clocks:
+ minItems: 1
+ maxItems: 2
description: |
Core can use up to two clocks, depending on part used:
- "adc" clock: for the analog circuitry, common to all ADCs.
@@ -246,7 +248,6 @@ patternProperties:
Resolution (bits) to use for conversions:
- can be 6, 8, 10 or 12 on stm32f4
- can be 8, 10, 12, 14 or 16 on stm32h7 and stm32mp1
- $ref: /schemas/types.yaml#/definitions/uint32
st,adc-channels:
description: |
diff --git a/Documentation/devicetree/bindings/iio/adc/ti,palmas-gpadc.yaml b/Documentation/devicetree/bindings/iio/adc/ti,palmas-gpadc.yaml
index 692dacd0fee5..7b895784e008 100644
--- a/Documentation/devicetree/bindings/iio/adc/ti,palmas-gpadc.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/ti,palmas-gpadc.yaml
@@ -42,7 +42,6 @@ properties:
const: 1
ti,channel0-current-microamp:
- $ref: /schemas/types.yaml#/definitions/uint32
description: Channel 0 current in uA.
enum:
- 0
@@ -51,7 +50,6 @@ properties:
- 20
ti,channel3-current-microamp:
- $ref: /schemas/types.yaml#/definitions/uint32
description: Channel 3 current in uA.
enum:
- 0
diff --git a/Documentation/devicetree/bindings/iio/adc/x-powers,axp209-adc.yaml b/Documentation/devicetree/bindings/iio/adc/x-powers,axp209-adc.yaml
index 5ccbb1f81960..e759a5da708d 100644
--- a/Documentation/devicetree/bindings/iio/adc/x-powers,axp209-adc.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/x-powers,axp209-adc.yaml
@@ -46,10 +46,14 @@ description: |
properties:
compatible:
- enum:
- - x-powers,axp209-adc
- - x-powers,axp221-adc
- - x-powers,axp813-adc
+ oneOf:
+ - const: x-powers,axp209-adc
+ - const: x-powers,axp221-adc
+ - const: x-powers,axp813-adc
+
+ - items:
+ - const: x-powers,axp803-adc
+ - const: x-powers,axp813-adc
"#io-channel-cells":
const: 1
diff --git a/Documentation/devicetree/bindings/iio/adc/xilinx-xadc.txt b/Documentation/devicetree/bindings/iio/adc/xilinx-xadc.txt
index e0e0755cabd8..f42e18078376 100644
--- a/Documentation/devicetree/bindings/iio/adc/xilinx-xadc.txt
+++ b/Documentation/devicetree/bindings/iio/adc/xilinx-xadc.txt
@@ -1,13 +1,22 @@
Xilinx XADC device driver
-This binding document describes the bindings for both of them since the
-bindings are very similar. The Xilinx XADC is a ADC that can be found in the
-series 7 FPGAs from Xilinx. The XADC has a DRP interface for communication.
-Currently two different frontends for the DRP interface exist. One that is only
-available on the ZYNQ family as a hardmacro in the SoC portion of the ZYNQ. The
-other one is available on all series 7 platforms and is a softmacro with a AXI
-interface. This binding document describes the bindings for both of them since
-the bindings are very similar.
+This binding document describes the bindings for the Xilinx 7 Series XADC as well
+as the UltraScale/UltraScale+ System Monitor.
+
+The Xilinx XADC is an ADC that can be found in the Series 7 FPGAs from Xilinx.
+The XADC has a DRP interface for communication. Currently two different
+frontends for the DRP interface exist. One that is only available on the ZYNQ
+family as a hardmacro in the SoC portion of the ZYNQ. The other one is available
+on all series 7 platforms and is a softmacro with a AXI interface. This binding
+document describes the bindings for both of them since the bindings are very
+similar.
+
+The Xilinx System Monitor is an ADC that is found in the UltraScale and
+UltraScale+ FPGAs from Xilinx. The System Monitor provides a DRP interface for
+communication. Xilinx provides a standard IP core that can be used to access the
+System Monitor through an AXI interface in the FPGA fabric. This IP core is
+called the Xilinx System Management Wizard. This document describes the bindings
+for this IP.
Required properties:
- compatible: Should be one of
@@ -15,11 +24,14 @@ Required properties:
configuration interface to interface to the XADC hardmacro.
* "xlnx,axi-xadc-1.00.a": When using the axi-xadc pcore to
interface to the XADC hardmacro.
+ * "xlnx,system-management-wiz-1.3": When using the
+ Xilinx System Management Wizard fabric IP core to access the
+ UltraScale and UltraScale+ System Monitor.
- reg: Address and length of the register set for the device
- interrupts: Interrupt for the XADC control interface.
- clocks: When using the ZYNQ this must be the ZYNQ PCAP clock,
- when using the AXI-XADC pcore this must be the clock that provides the
- clock to the AXI bus interface of the core.
+ when using the axi-xadc or the axi-system-management-wizard this must be
+ the clock that provides the clock to the AXI bus interface of the core.
Optional properties:
- xlnx,external-mux:
@@ -110,3 +122,20 @@ Examples:
};
};
};
+
+ adc@80000000 {
+ compatible = "xlnx,system-management-wiz-1.3";
+ reg = <0x80000000 0x1000>;
+ interrupts = <0 81 4>;
+ interrupt-parent = <&gic>;
+ clocks = <&fpga1_clk>;
+
+ xlnx,channels {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ channel@0 {
+ reg = <0>;
+ xlnx,bipolar;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/iio/dac/adi,ad5686.yaml b/Documentation/devicetree/bindings/iio/dac/adi,ad5696.yaml
index 8065228e5df8..56b0cda0f30a 100644
--- a/Documentation/devicetree/bindings/iio/dac/adi,ad5686.yaml
+++ b/Documentation/devicetree/bindings/iio/dac/adi,ad5696.yaml
@@ -1,16 +1,16 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
-$id: http://devicetree.org/schemas/iio/dac/adi,ad5686.yaml#
+$id: http://devicetree.org/schemas/iio/dac/adi,ad5696.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
-title: Analog Devices AD5686 and similar multi-channel DACs
+title: Analog Devices AD5696 and similar multi-channel DACs
maintainers:
- Michael Auchter <michael.auchter@ni.com>
description: |
- Binding for Analog Devices AD5686 and similar multi-channel DACs
+ Binding for Analog Devices AD5696 and similar multi-channel DACs
properties:
compatible:
@@ -48,8 +48,8 @@ examples:
#address-cells = <1>;
#size-cells = <0>;
- ad5686: dac@0 {
- compatible = "adi,ad5686";
+ ad5696: dac@0 {
+ compatible = "adi,ad5696";
reg = <0>;
vcc-supply = <&dac_vref>;
};
diff --git a/Documentation/devicetree/bindings/iio/dac/adi,ad5758.yaml b/Documentation/devicetree/bindings/iio/dac/adi,ad5758.yaml
index 626ccb6fe21e..fd4edca34a28 100644
--- a/Documentation/devicetree/bindings/iio/dac/adi,ad5758.yaml
+++ b/Documentation/devicetree/bindings/iio/dac/adi,ad5758.yaml
@@ -46,31 +46,42 @@ properties:
two properties must be present:
adi,range-microvolt:
- $ref: /schemas/types.yaml#/definitions/int32-array
description: |
Voltage output range specified as <minimum, maximum>
- enum:
- - [[0, 5000000]]
- - [[0, 10000000]]
- - [[-5000000, 5000000]]
- - [[-10000000, 10000000]]
+ oneOf:
+ - items:
+ - const: 0
+ - enum: [5000000, 10000000]
+ - items:
+ - const: -5000000
+ - const: 5000000
+ - items:
+ - const: -10000000
+ - const: 10000000
adi,range-microamp:
- $ref: /schemas/types.yaml#/definitions/int32-array
description: |
Current output range specified as <minimum, maximum>
- enum:
- - [[0, 20000]]
- - [[0, 24000]]
- - [[4, 24000]]
- - [[-20000, 20000]]
- - [[-24000, 24000]]
- - [[-1000, 22000]]
+ oneOf:
+ - items:
+ - const: 0
+ - enum: [20000, 24000]
+ - items:
+ - const: 4
+ - const: 24000
+ - items:
+ - const: -20000
+ - const: 20000
+ - items:
+ - const: -24000
+ - const: 24000
+ - items:
+ - const: -1000
+ - const: 22000
reset-gpios: true
adi,dc-dc-ilim-microamp:
- $ref: /schemas/types.yaml#/definitions/uint32
enum: [150000, 200000, 250000, 300000, 350000, 400000]
description: |
The dc-to-dc converter current limit.
diff --git a/Documentation/devicetree/bindings/iio/dac/adi,ad5766.yaml b/Documentation/devicetree/bindings/iio/dac/adi,ad5766.yaml
new file mode 100644
index 000000000000..d5c54813ce87
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/dac/adi,ad5766.yaml
@@ -0,0 +1,63 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright 2020 Analog Devices Inc.
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/dac/adi,ad5766.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Analog Devices AD5766 DAC device driver
+
+maintainers:
+ - Cristian Pop <cristian.pop@analog.com>
+
+description: |
+ Bindings for the Analog Devices AD5766 current DAC device. Datasheet can be
+ found here:
+ https://www.analog.com/media/en/technical-documentation/data-sheets/ad5766-5767.pdf
+
+properties:
+ compatible:
+ enum:
+ - adi,ad5766
+ - adi,ad5767
+
+ output-range-microvolts:
+ description: Select converter output range.
+
+ reg:
+ maxItems: 1
+
+ spi-max-frequency:
+ maximum: 1000000
+
+ spi-cpol: true
+
+ reset-gpios:
+ description: GPIO spec for the RESET pin. As the line is active low, it
+ should be marked GPIO_ACTIVE_LOW.
+ maxItems: 1
+
+required:
+ - compatible
+ - output-range-microvolts
+ - reg
+ - spi-max-frequency
+ - spi-cpol
+
+additionalProperties: false
+
+examples:
+ - |
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ad5766@0 {
+ compatible = "adi,ad5766";
+ output-range-microvolts = <(-5000) 5000>;
+ reg = <0>;
+ spi-cpol;
+ spi-max-frequency = <1000000>;
+ reset-gpios = <&gpio 22 0>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/iio/dac/microchip,mcp4725.yaml b/Documentation/devicetree/bindings/iio/dac/microchip,mcp4725.yaml
index 271998610ceb..5f5b578316bc 100644
--- a/Documentation/devicetree/bindings/iio/dac/microchip,mcp4725.yaml
+++ b/Documentation/devicetree/bindings/iio/dac/microchip,mcp4725.yaml
@@ -39,20 +39,39 @@ properties:
allOf:
- if:
- not:
- properties:
- compatible:
- contains:
- const: microchip,mcp4726
+ properties:
+ compatible:
+ contains:
+ const: microchip,mcp4725
then:
properties:
vref-supply: false
+ required:
+ - vdd-supply
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: microchip,mcp4726
+ then:
+ anyOf:
+ - required:
+ - vdd-supply
+ - required:
+ - vref-supply
+
+ - if:
+ not:
+ required:
+ - vref-supply
+ then:
+ properties:
microchip,vref-buffered: false
required:
- compatible
- reg
- - vdd-supply
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/iio/gyroscope/bosch,bmg160.yaml b/Documentation/devicetree/bindings/iio/gyroscope/bosch,bmg160.yaml
index 0466483be6bb..b6bbc312a7cf 100644
--- a/Documentation/devicetree/bindings/iio/gyroscope/bosch,bmg160.yaml
+++ b/Documentation/devicetree/bindings/iio/gyroscope/bosch,bmg160.yaml
@@ -19,6 +19,9 @@ properties:
reg:
maxItems: 1
+ vdd-supply: true
+ vddio-supply: true
+
interrupts:
minItems: 1
description:
diff --git a/Documentation/devicetree/bindings/iio/gyroscope/invensense,mpu3050.txt b/Documentation/devicetree/bindings/iio/gyroscope/invensense,mpu3050.txt
deleted file mode 100644
index 233fe207aded..000000000000
--- a/Documentation/devicetree/bindings/iio/gyroscope/invensense,mpu3050.txt
+++ /dev/null
@@ -1,45 +0,0 @@
-Invensense MPU-3050 Gyroscope device tree bindings
-
-Required properties:
- - compatible : should be "invensense,mpu3050"
- - reg : the I2C address of the sensor
-
-Optional properties:
- - interrupts : interrupt mapping for the trigger interrupt from the
- internal oscillator. The following IRQ modes are supported:
- IRQ_TYPE_EDGE_RISING, IRQ_TYPE_EDGE_FALLING, IRQ_TYPE_LEVEL_HIGH and
- IRQ_TYPE_LEVEL_LOW. The driver should detect and configure the hardware
- for the desired interrupt type.
- - vdd-supply : supply regulator for the main power voltage.
- - vlogic-supply : supply regulator for the signal voltage.
- - mount-matrix : see iio/mount-matrix.txt
-
-Optional subnodes:
- - The MPU-3050 will pass through and forward the I2C signals from the
- incoming I2C bus, alternatively drive traffic to a slave device (usually
- an accelerometer) on its own initiative. Therefore is supports a subnode
- i2c gate node. For details see: i2c/i2c-gate.txt
-
-Example:
-
-mpu3050@68 {
- compatible = "invensense,mpu3050";
- reg = <0x68>;
- interrupt-parent = <&foo>;
- interrupts = <12 IRQ_TYPE_EDGE_FALLING>;
- vdd-supply = <&bar>;
- vlogic-supply = <&baz>;
-
- /* External I2C interface */
- i2c-gate {
- #address-cells = <1>;
- #size-cells = <0>;
-
- fnord@18 {
- compatible = "fnord";
- reg = <0x18>;
- interrupt-parent = <&foo>;
- interrupts = <13 IRQ_TYPE_EDGE_FALLING>;
- };
- };
-};
diff --git a/Documentation/devicetree/bindings/iio/gyroscope/invensense,mpu3050.yaml b/Documentation/devicetree/bindings/iio/gyroscope/invensense,mpu3050.yaml
new file mode 100644
index 000000000000..7e2accc3d5ce
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/gyroscope/invensense,mpu3050.yaml
@@ -0,0 +1,70 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/gyroscope/invensense,mpu3050.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Invensense MPU-3050 Gyroscope
+
+maintainers:
+ - Linus Walleij <linus.walleij@linaro.org>
+
+properties:
+ compatible:
+ const: invensense,mpu3050
+
+ reg:
+ maxItems: 1
+
+ vdd-supply: true
+
+ vlogic-supply: true
+
+ interrupts:
+ minItems: 1
+ description:
+ Interrupt mapping for the trigger interrupt from the internal oscillator.
+
+ mount-matrix: true
+
+ i2c-gate:
+ $ref: /schemas/i2c/i2c-controller.yaml
+ unevaluatedProperties: false
+ description: |
+ The MPU-3050 will pass through and forward the I2C signals from the
+ incoming I2C bus, alternatively drive traffic to a slave device (usually
+ an accelerometer) on its own initiative. Therefore is supports an
+ i2c-gate subnode.
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gyroscope@68 {
+ compatible = "invensense,mpu3050";
+ reg = <0x68>;
+ interrupt-parent = <&foo>;
+ interrupts = <12 IRQ_TYPE_EDGE_FALLING>;
+ vdd-supply = <&bar>;
+ vlogic-supply = <&baz>;
+
+ i2c-gate {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ magnetometer@c {
+ compatible = "ak,ak8975";
+ reg = <0x0c>;
+ };
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/iio/health/maxim,max30100.yaml b/Documentation/devicetree/bindings/iio/health/maxim,max30100.yaml
index 64b862637039..967778fb0ce8 100644
--- a/Documentation/devicetree/bindings/iio/health/maxim,max30100.yaml
+++ b/Documentation/devicetree/bindings/iio/health/maxim,max30100.yaml
@@ -21,7 +21,6 @@ properties:
description: Connected to ADC_RDY pin.
maxim,led-current-microamp:
- $ref: /schemas/types.yaml#/definitions/uint32-array
minItems: 2
maxItems: 2
description: |
diff --git a/Documentation/devicetree/bindings/iio/health/ti,afe4404.yaml b/Documentation/devicetree/bindings/iio/health/ti,afe4404.yaml
index 3b4d6c48b8bb..c0e815d9999e 100644
--- a/Documentation/devicetree/bindings/iio/health/ti,afe4404.yaml
+++ b/Documentation/devicetree/bindings/iio/health/ti,afe4404.yaml
@@ -11,7 +11,7 @@ maintainers:
properties:
compatible:
- const: ti,afe4403
+ const: ti,afe4404
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/iio/imu/inv_mpu6050.txt b/Documentation/devicetree/bindings/iio/imu/inv_mpu6050.txt
deleted file mode 100644
index f2f64749e818..000000000000
--- a/Documentation/devicetree/bindings/iio/imu/inv_mpu6050.txt
+++ /dev/null
@@ -1,67 +0,0 @@
-InvenSense MPU-6050 Six-Axis (Gyro + Accelerometer) MEMS MotionTracking Device
-
-http://www.invensense.com/mems/gyro/mpu6050.html
-
-Required properties:
- - compatible : should be one of
- "invensense,mpu6000"
- "invensense,mpu6050"
- "invensense,mpu6500"
- "invensense,mpu6515"
- "invensense,mpu9150"
- "invensense,mpu9250"
- "invensense,mpu9255"
- "invensense,icm20608"
- "invensense,icm20609"
- "invensense,icm20689"
- "invensense,icm20602"
- "invensense,icm20690"
- "invensense,iam20680"
- - reg : the I2C address of the sensor
- - interrupts: interrupt mapping for IRQ. It should be configured with flags
- IRQ_TYPE_LEVEL_HIGH, IRQ_TYPE_EDGE_RISING, IRQ_TYPE_LEVEL_LOW or
- IRQ_TYPE_EDGE_FALLING.
-
- Refer to interrupt-controller/interrupts.txt for generic interrupt client node
- bindings.
-
-Optional properties:
- - vdd-supply: regulator phandle for VDD supply
- - vddio-supply: regulator phandle for VDDIO supply
- - mount-matrix: an optional 3x3 mounting rotation matrix
- - i2c-gate node. These devices also support an auxiliary i2c bus. This is
- simple enough to be described using the i2c-gate binding. See
- i2c/i2c-gate.txt for more details.
-
-Example:
- mpu6050@68 {
- compatible = "invensense,mpu6050";
- reg = <0x68>;
- interrupt-parent = <&gpio1>;
- interrupts = <18 IRQ_TYPE_EDGE_RISING>;
- mount-matrix = "-0.984807753012208", /* x0 */
- "0", /* y0 */
- "-0.173648177666930", /* z0 */
- "0", /* x1 */
- "-1", /* y1 */
- "0", /* z1 */
- "-0.173648177666930", /* x2 */
- "0", /* y2 */
- "0.984807753012208"; /* z2 */
- };
-
-
- mpu9250@68 {
- compatible = "invensense,mpu9250";
- reg = <0x68>;
- interrupt-parent = <&gpio3>;
- interrupts = <21 IRQ_TYPE_LEVEL_HIGH>;
- i2c-gate {
- #address-cells = <1>;
- #size-cells = <0>;
- ax8975@c {
- compatible = "ak,ak8975";
- reg = <0x0c>;
- };
- };
- };
diff --git a/Documentation/devicetree/bindings/iio/imu/invensense,mpu6050.yaml b/Documentation/devicetree/bindings/iio/imu/invensense,mpu6050.yaml
new file mode 100644
index 000000000000..edbc2921aabd
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/imu/invensense,mpu6050.yaml
@@ -0,0 +1,104 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/imu/invensense,mpu6050.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: InvenSense MPU-6050 Six-Axis (Gyro + Accelerometer) MEMS MotionTracking Device
+
+maintainers:
+ - Jean-Baptiste Maneyrol <jmaneyrol@invensense.com>
+
+description: |
+ These devices support both I2C and SPI bus interfaces.
+
+properties:
+ compatible:
+ enum:
+ - invensense,iam20680
+ - invensense,icm20608
+ - invensense,icm20609
+ - invensense,icm20689
+ - invensense,icm20602
+ - invensense,icm20690
+ - invensense,mpu6000
+ - invensense,mpu6050
+ - invensense,mpu6500
+ - invensense,mpu6515
+ - invensense,mpu6880
+ - invensense,mpu9150
+ - invensense,mpu9250
+ - invensense,mpu9255
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ spi-max-frequency: true
+
+ vdd-supply: true
+ vddio-supply: true
+
+ mount-matrix: true
+
+ i2c-gate:
+ $ref: /schemas/i2c/i2c-controller.yaml
+ unevaluatedProperties: false
+ description: |
+ These devices also support an auxiliary i2c bus via an i2c-gate.
+
+allOf:
+ - if:
+ not:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - invensense,mpu9150
+ - invensense,mpu9250
+ - invensense,mpu9255
+ then:
+ properties:
+ i2c-gate: false
+
+additionalProperties: false
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ imu@68 {
+ compatible = "invensense,mpu9250";
+ reg = <0x68>;
+ interrupt-parent = <&gpio3>;
+ interrupts = <21 IRQ_TYPE_LEVEL_HIGH>;
+ mount-matrix = "-0.984807753012208", /* x0 */
+ "0", /* y0 */
+ "-0.173648177666930", /* z0 */
+ "0", /* x1 */
+ "-1", /* y1 */
+ "0", /* z1 */
+ "-0.173648177666930", /* x2 */
+ "0", /* y2 */
+ "0.984807753012208"; /* z2 */
+ i2c-gate {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ magnetometer@c {
+ compatible = "ak,ak8975";
+ reg = <0x0c>;
+ };
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/iio/magnetometer/asahi-kasei,ak8975.yaml b/Documentation/devicetree/bindings/iio/magnetometer/asahi-kasei,ak8975.yaml
index a25590a16ba7..a0a1ffe017df 100644
--- a/Documentation/devicetree/bindings/iio/magnetometer/asahi-kasei,ak8975.yaml
+++ b/Documentation/devicetree/bindings/iio/magnetometer/asahi-kasei,ak8975.yaml
@@ -47,6 +47,7 @@ properties:
description: an optional 3x3 mounting rotation matrix.
reset-gpios:
+ maxItems: 1
description: |
an optional pin needed for AK09911 to set the reset state. This should
be usually active low
diff --git a/Documentation/devicetree/bindings/iio/magnetometer/bosch,bmc150_magn.yaml b/Documentation/devicetree/bindings/iio/magnetometer/bosch,bmc150_magn.yaml
index cdef7aeba708..2867ab6bf9b0 100644
--- a/Documentation/devicetree/bindings/iio/magnetometer/bosch,bmc150_magn.yaml
+++ b/Documentation/devicetree/bindings/iio/magnetometer/bosch,bmc150_magn.yaml
@@ -30,6 +30,9 @@ properties:
reg:
maxItems: 1
+ vdd-supply: true
+ vddio-supply: true
+
interrupts:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/iio/magnetometer/yamaha,yas530.yaml b/Documentation/devicetree/bindings/iio/magnetometer/yamaha,yas530.yaml
new file mode 100644
index 000000000000..4b0ef1ef5445
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/magnetometer/yamaha,yas530.yaml
@@ -0,0 +1,112 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/magnetometer/yamaha,yas530.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Yamaha YAS530 family of magnetometer sensors
+
+maintainers:
+ - Linus Walleij <linus.walleij@linaro.org>
+
+description:
+ The Yamaha YAS530 magnetometers is a line of 3-axis magnetometers
+ first introduced by Yamaha in 2009 with the YAS530. They are successors
+ of Yamaha's first magnetometer YAS529. Over the years this magnetometer
+ has been miniaturized and appeared in a number of different variants.
+
+properties:
+ $nodename:
+ pattern: '^magnetometer@[0-9a-f]+$'
+
+ compatible:
+ items:
+ - enum:
+ - yamaha,yas530
+ - yamaha,yas532
+ - yamaha,yas533
+ - yamaha,yas535
+ - yamaha,yas536
+ - yamaha,yas537
+ - yamaha,yas539
+
+ reg:
+ maxItems: 1
+
+ reset-gpios:
+ maxItems: 1
+ description: The YAS530 sensor has a RSTN pin used to reset
+ the logic inside the sensor. This GPIO line should connect
+ to that pin and be marked as GPIO_ACTIVE_LOW.
+
+ interrupts:
+ maxItems: 1
+ description: Interrupt for INT pin for interrupt generation.
+ The polarity, whether the interrupt is active on the rising
+ or the falling edge, is software-configurable in the hardware.
+
+ vdd-supply:
+ description: An optional regulator providing core power supply
+ on the VDD pin, typically 1.8 V or 3.0 V.
+
+ iovdd-supply:
+ description: An optional regulator providing I/O power supply
+ for the I2C interface on the IOVDD pin, typically 1.8 V.
+
+ mount-matrix:
+ description: An optional 3x3 mounting rotation matrix.
+
+allOf:
+ - if:
+ not:
+ properties:
+ compatible:
+ items:
+ const: yamaha,yas530
+ then:
+ properties:
+ reset-gpios: false
+
+ - if:
+ properties:
+ compatible:
+ items:
+ const: yamaha,yas539
+ then:
+ properties:
+ interrupts: false
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/gpio/gpio.h>
+ i2c-0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ magnetometer@2e {
+ compatible = "yamaha,yas530";
+ reg = <0x2e>;
+ vdd-supply = <&ldo1_reg>;
+ iovdd-supply = <&ldo2_reg>;
+ reset-gpios = <&gpio6 12 GPIO_ACTIVE_LOW>;
+ interrupts = <&gpio6 13 IRQ_TYPE_EDGE_RISING>;
+ };
+ };
+
+ i2c-1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ magnetometer@2e {
+ compatible = "yamaha,yas539";
+ reg = <0x2e>;
+ vdd-supply = <&ldo1_reg>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/iio/potentiometer/adi,ad5272.yaml b/Documentation/devicetree/bindings/iio/potentiometer/adi,ad5272.yaml
index 1aee9f9be951..0ebb6725a1af 100644
--- a/Documentation/devicetree/bindings/iio/potentiometer/adi,ad5272.yaml
+++ b/Documentation/devicetree/bindings/iio/potentiometer/adi,ad5272.yaml
@@ -25,6 +25,7 @@ properties:
maxItems: 1
reset-gpios:
+ maxItems: 1
description:
Active low signal to the AD5272 RESET input.
diff --git a/Documentation/devicetree/bindings/input/adc-keys.txt b/Documentation/devicetree/bindings/input/adc-keys.txt
index e551814629b4..6c8be6a9ace2 100644
--- a/Documentation/devicetree/bindings/input/adc-keys.txt
+++ b/Documentation/devicetree/bindings/input/adc-keys.txt
@@ -5,7 +5,8 @@ Required properties:
- compatible: "adc-keys"
- io-channels: Phandle to an ADC channel
- io-channel-names = "buttons";
- - keyup-threshold-microvolt: Voltage at which all the keys are considered up.
+ - keyup-threshold-microvolt: Voltage above or equal to which all the keys are
+ considered up.
Optional properties:
- poll-interval: Poll interval time in milliseconds
@@ -17,7 +18,12 @@ Each button (key) is represented as a sub-node of "adc-keys":
Required subnode-properties:
- label: Descriptive name of the key.
- linux,code: Keycode to emit.
- - press-threshold-microvolt: Voltage ADC input when this key is pressed.
+ - press-threshold-microvolt: voltage above or equal to which this key is
+ considered pressed.
+
+No two values of press-threshold-microvolt may be the same.
+All values of press-threshold-microvolt must be less than
+keyup-threshold-microvolt.
Example:
@@ -47,3 +53,15 @@ Example:
press-threshold-microvolt = <500000>;
};
};
+
++--------------------------------+------------------------+
+| 2.000.000 <= value | no key pressed |
++--------------------------------+------------------------+
+| 1.500.000 <= value < 2.000.000 | KEY_VOLUMEUP pressed |
++--------------------------------+------------------------+
+| 1.000.000 <= value < 1.500.000 | KEY_VOLUMEDOWN pressed |
++--------------------------------+------------------------+
+| 500.000 <= value < 1.000.000 | KEY_ENTER pressed |
++--------------------------------+------------------------+
+| value < 500.000 | no key pressed |
++--------------------------------+------------------------+
diff --git a/Documentation/devicetree/bindings/input/goodix,gt7375p.yaml b/Documentation/devicetree/bindings/input/goodix,gt7375p.yaml
new file mode 100644
index 000000000000..fe1c5016f7f3
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/goodix,gt7375p.yaml
@@ -0,0 +1,65 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/input/goodix,gt7375p.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Goodix GT7375P touchscreen
+
+maintainers:
+ - Douglas Anderson <dianders@chromium.org>
+
+description:
+ Supports the Goodix GT7375P touchscreen.
+ This touchscreen uses the i2c-hid protocol but has some non-standard
+ power sequencing required.
+
+properties:
+ compatible:
+ items:
+ - const: goodix,gt7375p
+
+ reg:
+ enum:
+ - 0x5d
+ - 0x14
+
+ interrupts:
+ maxItems: 1
+
+ reset-gpios:
+ true
+
+ vdd-supply:
+ description: The 3.3V supply to the touchscreen.
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - reset-gpios
+ - vdd-supply
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,rpmh.h>
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ap_ts: touchscreen@5d {
+ compatible = "goodix,gt7375p";
+ reg = <0x5d>;
+
+ interrupt-parent = <&tlmm>;
+ interrupts = <9 IRQ_TYPE_LEVEL_LOW>;
+
+ reset-gpios = <&tlmm 8 GPIO_ACTIVE_LOW>;
+ vdd-supply = <&pp3300_ts>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/input/google,cros-ec-keyb.yaml b/Documentation/devicetree/bindings/input/google,cros-ec-keyb.yaml
index 8e50c14a9d77..5377b232fa10 100644
--- a/Documentation/devicetree/bindings/input/google,cros-ec-keyb.yaml
+++ b/Documentation/devicetree/bindings/input/google,cros-ec-keyb.yaml
@@ -31,6 +31,17 @@ properties:
if the EC does not have its own logic or hardware for this.
type: boolean
+ function-row-physmap:
+ minItems: 1
+ maxItems: 15
+ description: |
+ An ordered u32 array describing the rows/columns (in the scan matrix)
+ of top row keys from physical left (KEY_F1) to right. Each entry
+ encodes the row/column as:
+ (((row) & 0xFF) << 24) | (((column) & 0xFF) << 16)
+ where the lower 16 bits are reserved. This property is specified only
+ when the keyboard has a custom design for the top row keys.
+
required:
- compatible
@@ -38,11 +49,24 @@ unevaluatedProperties: false
examples:
- |
+ #include <dt-bindings/input/input.h>
cros-ec-keyb {
compatible = "google,cros-ec-keyb";
keypad,num-rows = <8>;
keypad,num-columns = <13>;
google,needs-ghost-filter;
+ function-row-physmap = <
+ MATRIX_KEY(0x00, 0x02, 0) /* T1 */
+ MATRIX_KEY(0x03, 0x02, 0) /* T2 */
+ MATRIX_KEY(0x02, 0x02, 0) /* T3 */
+ MATRIX_KEY(0x01, 0x02, 0) /* T4 */
+ MATRIX_KEY(0x03, 0x04, 0) /* T5 */
+ MATRIX_KEY(0x02, 0x04, 0) /* T6 */
+ MATRIX_KEY(0x01, 0x04, 0) /* T7 */
+ MATRIX_KEY(0x02, 0x09, 0) /* T8 */
+ MATRIX_KEY(0x01, 0x09, 0) /* T9 */
+ MATRIX_KEY(0x00, 0x04, 0) /* T10 */
+ >;
/*
* Keymap entries take the form of 0xRRCCKKKK where
* RR=Row CC=Column KKKK=Key Code
diff --git a/Documentation/devicetree/bindings/input/touchscreen/elan,elants_i2c.yaml b/Documentation/devicetree/bindings/input/touchscreen/elan,elants_i2c.yaml
index a792d6377b1d..a9b53c2e6f0a 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/elan,elants_i2c.yaml
+++ b/Documentation/devicetree/bindings/input/touchscreen/elan,elants_i2c.yaml
@@ -29,6 +29,7 @@ properties:
description: touchscreen can be used as a wakeup source.
reset-gpios:
+ maxItems: 1
description: reset gpio the chip is connected to.
vcc33-supply:
diff --git a/Documentation/devicetree/bindings/input/touchscreen/goodix.yaml b/Documentation/devicetree/bindings/input/touchscreen/goodix.yaml
index da5b0d87e16d..93f2ce3130ae 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/goodix.yaml
+++ b/Documentation/devicetree/bindings/input/touchscreen/goodix.yaml
@@ -26,6 +26,7 @@ properties:
- goodix,gt927
- goodix,gt9271
- goodix,gt928
+ - goodix,gt9286
- goodix,gt967
reg:
diff --git a/Documentation/devicetree/bindings/input/touchscreen/touchscreen.yaml b/Documentation/devicetree/bindings/input/touchscreen/touchscreen.yaml
index a771a15f053f..046ace461cc9 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/touchscreen.yaml
+++ b/Documentation/devicetree/bindings/input/touchscreen/touchscreen.yaml
@@ -70,11 +70,9 @@ properties:
touchscreen-x-mm:
description: horizontal length in mm of the touchscreen
- $ref: /schemas/types.yaml#/definitions/uint32
touchscreen-y-mm:
description: vertical length in mm of the touchscreen
- $ref: /schemas/types.yaml#/definitions/uint32
dependencies:
touchscreen-size-x: [ touchscreen-size-y ]
diff --git a/Documentation/devicetree/bindings/interconnect/qcom,qcs404.yaml b/Documentation/devicetree/bindings/interconnect/qcom,qcs404.yaml
deleted file mode 100644
index 3fbb8785fbc9..000000000000
--- a/Documentation/devicetree/bindings/interconnect/qcom,qcs404.yaml
+++ /dev/null
@@ -1,77 +0,0 @@
-# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
-%YAML 1.2
----
-$id: http://devicetree.org/schemas/interconnect/qcom,qcs404.yaml#
-$schema: http://devicetree.org/meta-schemas/core.yaml#
-
-title: Qualcomm QCS404 Network-On-Chip interconnect
-
-maintainers:
- - Georgi Djakov <georgi.djakov@linaro.org>
-
-description: |
- The Qualcomm QCS404 interconnect providers support adjusting the
- bandwidth requirements between the various NoC fabrics.
-
-properties:
- reg:
- maxItems: 1
-
- compatible:
- enum:
- - qcom,qcs404-bimc
- - qcom,qcs404-pcnoc
- - qcom,qcs404-snoc
-
- '#interconnect-cells':
- const: 1
-
- clock-names:
- items:
- - const: bus
- - const: bus_a
-
- clocks:
- items:
- - description: Bus Clock
- - description: Bus A Clock
-
-required:
- - compatible
- - reg
- - '#interconnect-cells'
- - clock-names
- - clocks
-
-additionalProperties: false
-
-examples:
- - |
- #include <dt-bindings/clock/qcom,rpmcc.h>
-
- bimc: interconnect@400000 {
- reg = <0x00400000 0x80000>;
- compatible = "qcom,qcs404-bimc";
- #interconnect-cells = <1>;
- clock-names = "bus", "bus_a";
- clocks = <&rpmcc RPM_SMD_BIMC_CLK>,
- <&rpmcc RPM_SMD_BIMC_A_CLK>;
- };
-
- pnoc: interconnect@500000 {
- reg = <0x00500000 0x15080>;
- compatible = "qcom,qcs404-pcnoc";
- #interconnect-cells = <1>;
- clock-names = "bus", "bus_a";
- clocks = <&rpmcc RPM_SMD_PNOC_CLK>,
- <&rpmcc RPM_SMD_PNOC_A_CLK>;
- };
-
- snoc: interconnect@580000 {
- reg = <0x00580000 0x23080>;
- compatible = "qcom,qcs404-snoc";
- #interconnect-cells = <1>;
- clock-names = "bus", "bus_a";
- clocks = <&rpmcc RPM_SMD_SNOC_CLK>,
- <&rpmcc RPM_SMD_SNOC_A_CLK>;
- };
diff --git a/Documentation/devicetree/bindings/interconnect/qcom,msm8916.yaml b/Documentation/devicetree/bindings/interconnect/qcom,rpm.yaml
index e1009ae4e8f7..983d71fb5399 100644
--- a/Documentation/devicetree/bindings/interconnect/qcom,msm8916.yaml
+++ b/Documentation/devicetree/bindings/interconnect/qcom,rpm.yaml
@@ -1,27 +1,35 @@
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
%YAML 1.2
---
-$id: http://devicetree.org/schemas/interconnect/qcom,msm8916.yaml#
+$id: http://devicetree.org/schemas/interconnect/qcom,rpm.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
-title: Qualcomm MSM8916 Network-On-Chip interconnect
+title: Qualcomm RPM Network-On-Chip Interconnect
maintainers:
- Georgi Djakov <georgi.djakov@linaro.org>
description: |
- The Qualcomm MSM8916 interconnect providers support adjusting the
- bandwidth requirements between the various NoC fabrics.
+ RPM interconnect providers support system bandwidth requirements through
+ RPM processor. The provider is able to communicate with the RPM through
+ the RPM shared memory device.
properties:
+ reg:
+ maxItems: 1
+
compatible:
enum:
- qcom,msm8916-bimc
- qcom,msm8916-pcnoc
- qcom,msm8916-snoc
-
- reg:
- maxItems: 1
+ - qcom,msm8939-bimc
+ - qcom,msm8939-pcnoc
+ - qcom,msm8939-snoc
+ - qcom,msm8939-snoc-mm
+ - qcom,qcs404-bimc
+ - qcom,qcs404-pcnoc
+ - qcom,qcs404-snoc
'#interconnect-cells':
const: 1
diff --git a/Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml b/Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml
index 30c2a092d2d3..799e73cdb90b 100644
--- a/Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml
+++ b/Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml
@@ -45,6 +45,10 @@ properties:
- qcom,sdm845-mem-noc
- qcom,sdm845-mmss-noc
- qcom,sdm845-system-noc
+ - qcom,sdx55-ipa-virt
+ - qcom,sdx55-mc-virt
+ - qcom,sdx55-mem-noc
+ - qcom,sdx55-system-noc
- qcom,sm8150-aggre1-noc
- qcom,sm8150-aggre2-noc
- qcom,sm8150-camnoc-noc
@@ -69,7 +73,7 @@ properties:
- qcom,sm8250-system-noc
'#interconnect-cells':
- const: 1
+ enum: [ 1, 2 ]
qcom,bcm-voters:
$ref: /schemas/types.yaml#/definitions/phandle-array
diff --git a/Documentation/devicetree/bindings/interrupt-controller/allwinner,sun6i-a31-r-intc.yaml b/Documentation/devicetree/bindings/interrupt-controller/allwinner,sun6i-a31-r-intc.yaml
new file mode 100644
index 000000000000..4db24b8a9ffe
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/allwinner,sun6i-a31-r-intc.yaml
@@ -0,0 +1,67 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/allwinner,sun6i-a31-r-intc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A31 NMI/Wakeup Interrupt Controller Device Tree Bindings
+
+maintainers:
+ - Chen-Yu Tsai <wens@csie.org>
+ - Maxime Ripard <mripard@kernel.org>
+
+allOf:
+ - $ref: /schemas/interrupt-controller.yaml#
+
+properties:
+ "#interrupt-cells":
+ const: 3
+ description:
+ The first cell is GIC_SPI (0), the second cell is the IRQ number, and
+ the third cell is the trigger type as defined in interrupt.txt in this
+ directory.
+
+ compatible:
+ oneOf:
+ - const: allwinner,sun6i-a31-r-intc
+ - items:
+ - enum:
+ - allwinner,sun8i-a83t-r-intc
+ - allwinner,sun8i-h3-r-intc
+ - allwinner,sun50i-a64-r-intc
+ - const: allwinner,sun6i-a31-r-intc
+ - const: allwinner,sun50i-h6-r-intc
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+ description:
+ The GIC interrupt labeled as "External NMI".
+
+ interrupt-controller: true
+
+required:
+ - "#interrupt-cells"
+ - compatible
+ - reg
+ - interrupts
+ - interrupt-controller
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ r_intc: interrupt-controller@1f00c00 {
+ compatible = "allwinner,sun50i-a64-r-intc",
+ "allwinner,sun6i-a31-r-intc";
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ reg = <0x01f00c00 0x400>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/interrupt-controller/allwinner,sun7i-a20-sc-nmi.yaml b/Documentation/devicetree/bindings/interrupt-controller/allwinner,sun7i-a20-sc-nmi.yaml
index 8acca0ae3129..7fc9ad5ef38c 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/allwinner,sun7i-a20-sc-nmi.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/allwinner,sun7i-a20-sc-nmi.yaml
@@ -22,23 +22,16 @@ properties:
compatible:
oneOf:
- - const: allwinner,sun6i-a31-r-intc
- const: allwinner,sun6i-a31-sc-nmi
deprecated: true
- const: allwinner,sun7i-a20-sc-nmi
- items:
- - const: allwinner,sun8i-a83t-r-intc
- - const: allwinner,sun6i-a31-r-intc
+ - const: allwinner,sun8i-v3s-nmi
+ - const: allwinner,sun9i-a80-nmi
- const: allwinner,sun9i-a80-nmi
- items:
- - const: allwinner,sun50i-a64-r-intc
- - const: allwinner,sun6i-a31-r-intc
- - items:
- const: allwinner,sun50i-a100-nmi
- const: allwinner,sun9i-a80-nmi
- - items:
- - const: allwinner,sun50i-h6-r-intc
- - const: allwinner,sun6i-a31-r-intc
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/interrupt-controller/fsl,intmux.yaml b/Documentation/devicetree/bindings/interrupt-controller/fsl,intmux.yaml
index 43c6effbb5bd..1d6e0f64a807 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/fsl,intmux.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/fsl,intmux.yaml
@@ -31,7 +31,7 @@ properties:
The 1st cell is hw interrupt number, the 2nd cell is channel index.
clocks:
- description: ipg clock.
+ maxItems: 1
clock-names:
const: ipg
diff --git a/Documentation/devicetree/bindings/interrupt-controller/ingenic,intc.yaml b/Documentation/devicetree/bindings/interrupt-controller/ingenic,intc.yaml
index 0a046be8d1cd..0358a7739c8e 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/ingenic,intc.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/ingenic,intc.yaml
@@ -23,6 +23,7 @@ properties:
- enum:
- ingenic,jz4775-intc
- ingenic,jz4770-intc
+ - ingenic,jz4760b-intc
- const: ingenic,jz4760-intc
- items:
- const: ingenic,x1000-intc
diff --git a/Documentation/devicetree/bindings/interrupt-controller/qcom,pdc.txt b/Documentation/devicetree/bindings/interrupt-controller/qcom,pdc.txt
index 1df293953327..e9afb48182c7 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/qcom,pdc.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/qcom,pdc.txt
@@ -20,6 +20,8 @@ Properties:
Definition: Should contain "qcom,<soc>-pdc" and "qcom,pdc"
- "qcom,sc7180-pdc": For SC7180
- "qcom,sdm845-pdc": For SDM845
+ - "qcom,sdm8250-pdc": For SM8250
+ - "qcom,sdm8350-pdc": For SM8350
- reg:
Usage: required
diff --git a/Documentation/devicetree/bindings/interrupt-controller/realtek,rtl-intc.yaml b/Documentation/devicetree/bindings/interrupt-controller/realtek,rtl-intc.yaml
new file mode 100644
index 000000000000..9e76fff20323
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/realtek,rtl-intc.yaml
@@ -0,0 +1,57 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/realtek,rtl-intc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Realtek RTL SoC interrupt controller devicetree bindings
+
+maintainers:
+ - Birger Koblitz <mail@birger-koblitz.de>
+ - Bert Vermeulen <bert@biot.com>
+ - John Crispin <john@phrozen.org>
+
+properties:
+ compatible:
+ const: realtek,rtl-intc
+
+ "#interrupt-cells":
+ const: 1
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ interrupt-controller: true
+
+ "#address-cells":
+ const: 0
+
+ interrupt-map:
+ description: Describes mapping from SoC interrupts to CPU interrupts
+
+required:
+ - compatible
+ - reg
+ - "#interrupt-cells"
+ - interrupt-controller
+ - "#address-cells"
+ - interrupt-map
+
+additionalProperties: false
+
+examples:
+ - |
+ intc: interrupt-controller@3000 {
+ compatible = "realtek,rtl-intc";
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ reg = <0x3000 0x20>;
+ #address-cells = <0>;
+ interrupt-map =
+ <31 &cpuintc 2>,
+ <30 &cpuintc 1>,
+ <29 &cpuintc 5>;
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/sifive,plic-1.0.0.yaml b/Documentation/devicetree/bindings/interrupt-controller/sifive,plic-1.0.0.yaml
index b9a61c9f7530..08d5a57ce00f 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/sifive,plic-1.0.0.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/sifive,plic-1.0.0.yaml
@@ -8,10 +8,11 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: SiFive Platform-Level Interrupt Controller (PLIC)
description:
- SiFive SOCs include an implementation of the Platform-Level Interrupt Controller
- (PLIC) high-level specification in the RISC-V Privileged Architecture
- specification. The PLIC connects all external interrupts in the system to all
- hart contexts in the system, via the external interrupt source in each hart.
+ SiFive SoCs and other RISC-V SoCs include an implementation of the
+ Platform-Level Interrupt Controller (PLIC) high-level specification in
+ the RISC-V Privileged Architecture specification. The PLIC connects all
+ external interrupts in the system to all hart contexts in the system, via
+ the external interrupt source in each hart.
A hart context is a privilege mode in a hardware execution thread. For example,
in an 4 core system with 2-way SMT, you have 8 harts and probably at least two
@@ -42,7 +43,9 @@ maintainers:
properties:
compatible:
items:
- - const: sifive,fu540-c000-plic
+ - enum:
+ - sifive,fu540-c000-plic
+ - canaan,k210-plic
- const: sifive,plic-1.0.0
reg:
diff --git a/Documentation/devicetree/bindings/interrupt-controller/sigma,smp8642-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/sigma,smp8642-intc.txt
deleted file mode 100644
index 355c18a3a4d3..000000000000
--- a/Documentation/devicetree/bindings/interrupt-controller/sigma,smp8642-intc.txt
+++ /dev/null
@@ -1,48 +0,0 @@
-Sigma Designs SMP86xx/SMP87xx secondary interrupt controller
-
-Required properties:
-- compatible: should be "sigma,smp8642-intc"
-- reg: physical address of MMIO region
-- ranges: address space mapping of child nodes
-- interrupt-controller: boolean
-- #address-cells: should be <1>
-- #size-cells: should be <1>
-
-One child node per control block with properties:
-- reg: address of registers for this control block
-- interrupt-controller: boolean
-- #interrupt-cells: should be <2>, interrupt index and flags per interrupts.txt
-- interrupts: interrupt spec of primary interrupt controller
-
-Example:
-
-interrupt-controller@6e000 {
- compatible = "sigma,smp8642-intc";
- reg = <0x6e000 0x400>;
- ranges = <0x0 0x6e000 0x400>;
- interrupt-parent = <&gic>;
- interrupt-controller;
- #address-cells = <1>;
- #size-cells = <1>;
-
- irq0: interrupt-controller@0 {
- reg = <0x000 0x100>;
- interrupt-controller;
- #interrupt-cells = <2>;
- interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
- };
-
- irq1: interrupt-controller@100 {
- reg = <0x100 0x100>;
- interrupt-controller;
- #interrupt-cells = <2>;
- interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
- };
-
- irq2: interrupt-controller@300 {
- reg = <0x300 0x100>;
- interrupt-controller;
- #interrupt-cells = <2>;
- interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
- };
-};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/st,stm32-exti.yaml b/Documentation/devicetree/bindings/interrupt-controller/st,stm32-exti.yaml
index 2a5b29567926..6d3e68eb2e8b 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/st,stm32-exti.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/st,stm32-exti.yaml
@@ -36,6 +36,8 @@ properties:
Reference to a phandle of a hardware spinlock provider node.
interrupts:
+ minItems: 1
+ maxItems: 96
description:
Interrupts references to primary interrupt controller
diff --git a/Documentation/devicetree/bindings/interrupt-controller/ti,c64x+megamod-pic.txt b/Documentation/devicetree/bindings/interrupt-controller/ti,c64x+megamod-pic.txt
deleted file mode 100644
index ee3f9c351501..000000000000
--- a/Documentation/devicetree/bindings/interrupt-controller/ti,c64x+megamod-pic.txt
+++ /dev/null
@@ -1,103 +0,0 @@
-C6X Interrupt Chips
--------------------
-
-* C64X+ Core Interrupt Controller
-
- The core interrupt controller provides 16 prioritized interrupts to the
- C64X+ core. Priority 0 and 1 are used for reset and NMI respectively.
- Priority 2 and 3 are reserved. Priority 4-15 are used for interrupt
- sources coming from outside the core.
-
- Required properties:
- --------------------
- - compatible: Should be "ti,c64x+core-pic";
- - #interrupt-cells: <1>
-
- Interrupt Specifier Definition
- ------------------------------
- Single cell specifying the core interrupt priority level (4-15) where
- 4 is highest priority and 15 is lowest priority.
-
- Example
- -------
- core_pic: interrupt-controller@0 {
- interrupt-controller;
- #interrupt-cells = <1>;
- compatible = "ti,c64x+core-pic";
- };
-
-
-
-* C64x+ Megamodule Interrupt Controller
-
- The megamodule PIC consists of four interrupt mupliplexers each of which
- combine up to 32 interrupt inputs into a single interrupt output which
- may be cascaded into the core interrupt controller. The megamodule PIC
- has a total of 12 outputs cascading into the core interrupt controller.
- One for each core interrupt priority level. In addition to the combined
- interrupt sources, individual megamodule interrupts may be cascaded to
- the core interrupt controller. When an individual interrupt is cascaded,
- it is no longer handled through a megamodule interrupt combiner and is
- considered to have the core interrupt controller as the parent.
-
- Required properties:
- --------------------
- - compatible: "ti,c64x+megamod-pic"
- - interrupt-controller
- - #interrupt-cells: <1>
- - reg: base address and size of register area
- - interrupts: This should have four cells; one for each interrupt combiner.
- The cells contain the core priority interrupt to which the
- corresponding combiner output is wired.
-
- Optional properties:
- --------------------
- - ti,c64x+megamod-pic-mux: Array of 12 cells correspnding to the 12 core
- priority interrupts. The first cell corresponds to
- core priority 4 and the last cell corresponds to
- core priority 15. The value of each cell is the
- megamodule interrupt source which is MUXed to
- the core interrupt corresponding to the cell
- position. Allowed values are 4 - 127. Mapping for
- interrupts 0 - 3 (combined interrupt sources) are
- ignored.
-
- Interrupt Specifier Definition
- ------------------------------
- Single cell specifying the megamodule interrupt source (4-127). Note that
- interrupts mapped directly to the core with "ti,c64x+megamod-pic-mux" will
- use the core interrupt controller as their parent and the specifier will
- be the core priority level, not the megamodule interrupt number.
-
- Examples
- --------
- megamod_pic: interrupt-controller@1800000 {
- compatible = "ti,c64x+megamod-pic";
- interrupt-controller;
- #interrupt-cells = <1>;
- reg = <0x1800000 0x1000>;
- interrupt-parent = <&core_pic>;
- interrupts = < 12 13 14 15 >;
- };
-
- This is a minimal example where all individual interrupts go through a
- combiner. Combiner-0 is mapped to core interrupt 12, combiner-1 is mapped
- to interrupt 13, etc.
-
-
- megamod_pic: interrupt-controller@1800000 {
- compatible = "ti,c64x+megamod-pic";
- interrupt-controller;
- #interrupt-cells = <1>;
- reg = <0x1800000 0x1000>;
- interrupt-parent = <&core_pic>;
- interrupts = < 12 13 14 15 >;
- ti,c64x+megamod-pic-mux = < 0 0 0 0
- 32 0 0 0
- 0 0 0 0 >;
- };
-
- This the same as the first example except that megamodule interrupt 32 is
- mapped directly to core priority interrupt 8. The node using this interrupt
- must set the core controller as its interrupt parent and use 8 in the
- interrupt specifier value.
diff --git a/Documentation/devicetree/bindings/interrupt-controller/ti,pruss-intc.yaml b/Documentation/devicetree/bindings/interrupt-controller/ti,pruss-intc.yaml
index c2ce215501a5..9731dd4421a1 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/ti,pruss-intc.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/ti,pruss-intc.yaml
@@ -33,6 +33,9 @@ description: |
corresponding PRUSS node. The node should be named "interrupt-controller".
properties:
+ $nodename:
+ pattern: "^interrupt-controller@[0-9a-f]+$"
+
compatible:
enum:
- ti,pruss-intc
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu.yaml b/Documentation/devicetree/bindings/iommu/arm,smmu.yaml
index 3b63f2ae24db..6ba161dea4d8 100644
--- a/Documentation/devicetree/bindings/iommu/arm,smmu.yaml
+++ b/Documentation/devicetree/bindings/iommu/arm,smmu.yaml
@@ -34,9 +34,11 @@ properties:
items:
- enum:
- qcom,sc7180-smmu-500
+ - qcom,sc8180x-smmu-500
- qcom,sdm845-smmu-500
- qcom,sm8150-smmu-500
- qcom,sm8250-smmu-500
+ - qcom,sm8350-smmu-500
- const: arm,mmu-500
- description: Qcom Adreno GPUs implementing "arm,smmu-v2"
items:
diff --git a/Documentation/devicetree/bindings/iommu/mediatek,iommu.txt b/Documentation/devicetree/bindings/iommu/mediatek,iommu.txt
deleted file mode 100644
index ac949f7fe3d4..000000000000
--- a/Documentation/devicetree/bindings/iommu/mediatek,iommu.txt
+++ /dev/null
@@ -1,105 +0,0 @@
-* Mediatek IOMMU Architecture Implementation
-
- Some Mediatek SOCs contain a Multimedia Memory Management Unit (M4U), and
-this M4U have two generations of HW architecture. Generation one uses flat
-pagetable, and only supports 4K size page mapping. Generation two uses the
-ARM Short-Descriptor translation table format for address translation.
-
- About the M4U Hardware Block Diagram, please check below:
-
- EMI (External Memory Interface)
- |
- m4u (Multimedia Memory Management Unit)
- |
- +--------+
- | |
- gals0-rx gals1-rx (Global Async Local Sync rx)
- | |
- | |
- gals0-tx gals1-tx (Global Async Local Sync tx)
- | | Some SoCs may have GALS.
- +--------+
- |
- SMI Common(Smart Multimedia Interface Common)
- |
- +----------------+-------
- | |
- | gals-rx There may be GALS in some larbs.
- | |
- | |
- | gals-tx
- | |
- SMI larb0 SMI larb1 ... SoCs have several SMI local arbiter(larb).
- (display) (vdec)
- | |
- | |
- +-----+-----+ +----+----+
- | | | | | |
- | | |... | | | ... There are different ports in each larb.
- | | | | | |
-OVL0 RDMA0 WDMA0 MC PP VLD
-
- As above, The Multimedia HW will go through SMI and M4U while it
-access EMI. SMI is a bridge between m4u and the Multimedia HW. It contain
-smi local arbiter and smi common. It will control whether the Multimedia
-HW should go though the m4u for translation or bypass it and talk
-directly with EMI. And also SMI help control the power domain and clocks for
-each local arbiter.
- Normally we specify a local arbiter(larb) for each multimedia HW
-like display, video decode, and camera. And there are different ports
-in each larb. Take a example, There are many ports like MC, PP, VLD in the
-video decode local arbiter, all these ports are according to the video HW.
- In some SoCs, there may be a GALS(Global Async Local Sync) module between
-smi-common and m4u, and additional GALS module between smi-larb and
-smi-common. GALS can been seen as a "asynchronous fifo" which could help
-synchronize for the modules in different clock frequency.
-
-Required properties:
-- compatible : must be one of the following string:
- "mediatek,mt2701-m4u" for mt2701 which uses generation one m4u HW.
- "mediatek,mt2712-m4u" for mt2712 which uses generation two m4u HW.
- "mediatek,mt6779-m4u" for mt6779 which uses generation two m4u HW.
- "mediatek,mt7623-m4u", "mediatek,mt2701-m4u" for mt7623 which uses
- generation one m4u HW.
- "mediatek,mt8167-m4u" for mt8167 which uses generation two m4u HW.
- "mediatek,mt8173-m4u" for mt8173 which uses generation two m4u HW.
- "mediatek,mt8183-m4u" for mt8183 which uses generation two m4u HW.
-- reg : m4u register base and size.
-- interrupts : the interrupt of m4u.
-- clocks : must contain one entry for each clock-names.
-- clock-names : Only 1 optional clock:
- - "bclk": the block clock of m4u.
- Here is the list which require this "bclk":
- - mt2701, mt2712, mt7623 and mt8173.
- Note that m4u use the EMI clock which always has been enabled before kernel
- if there is no this "bclk".
-- mediatek,larbs : List of phandle to the local arbiters in the current Socs.
- Refer to bindings/memory-controllers/mediatek,smi-larb.txt. It must sort
- according to the local arbiter index, like larb0, larb1, larb2...
-- iommu-cells : must be 1. This is the mtk_m4u_id according to the HW.
- Specifies the mtk_m4u_id as defined in
- dt-binding/memory/mt2701-larb-port.h for mt2701, mt7623
- dt-binding/memory/mt2712-larb-port.h for mt2712,
- dt-binding/memory/mt6779-larb-port.h for mt6779,
- dt-binding/memory/mt8167-larb-port.h for mt8167,
- dt-binding/memory/mt8173-larb-port.h for mt8173, and
- dt-binding/memory/mt8183-larb-port.h for mt8183.
-
-Example:
- iommu: iommu@10205000 {
- compatible = "mediatek,mt8173-m4u";
- reg = <0 0x10205000 0 0x1000>;
- interrupts = <GIC_SPI 139 IRQ_TYPE_LEVEL_LOW>;
- clocks = <&infracfg CLK_INFRA_M4U>;
- clock-names = "bclk";
- mediatek,larbs = <&larb0 &larb1 &larb2 &larb3 &larb4 &larb5>;
- #iommu-cells = <1>;
- };
-
-Example for a client device:
- display {
- compatible = "mediatek,mt8173-disp";
- iommus = <&iommu M4U_PORT_DISP_OVL0>,
- <&iommu M4U_PORT_DISP_RDMA0>;
- ...
- };
diff --git a/Documentation/devicetree/bindings/iommu/mediatek,iommu.yaml b/Documentation/devicetree/bindings/iommu/mediatek,iommu.yaml
new file mode 100644
index 000000000000..0f26fe14c8e2
--- /dev/null
+++ b/Documentation/devicetree/bindings/iommu/mediatek,iommu.yaml
@@ -0,0 +1,183 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iommu/mediatek,iommu.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: MediaTek IOMMU Architecture Implementation
+
+maintainers:
+ - Yong Wu <yong.wu@mediatek.com>
+
+description: |+
+ Some MediaTek SOCs contain a Multimedia Memory Management Unit (M4U), and
+ this M4U have two generations of HW architecture. Generation one uses flat
+ pagetable, and only supports 4K size page mapping. Generation two uses the
+ ARM Short-Descriptor translation table format for address translation.
+
+ About the M4U Hardware Block Diagram, please check below:
+
+ EMI (External Memory Interface)
+ |
+ m4u (Multimedia Memory Management Unit)
+ |
+ +--------+
+ | |
+ gals0-rx gals1-rx (Global Async Local Sync rx)
+ | |
+ | |
+ gals0-tx gals1-tx (Global Async Local Sync tx)
+ | | Some SoCs may have GALS.
+ +--------+
+ |
+ SMI Common(Smart Multimedia Interface Common)
+ |
+ +----------------+-------
+ | |
+ | gals-rx There may be GALS in some larbs.
+ | |
+ | |
+ | gals-tx
+ | |
+ SMI larb0 SMI larb1 ... SoCs have several SMI local arbiter(larb).
+ (display) (vdec)
+ | |
+ | |
+ +-----+-----+ +----+----+
+ | | | | | |
+ | | |... | | | ... There are different ports in each larb.
+ | | | | | |
+ OVL0 RDMA0 WDMA0 MC PP VLD
+
+ As above, The Multimedia HW will go through SMI and M4U while it
+ access EMI. SMI is a bridge between m4u and the Multimedia HW. It contain
+ smi local arbiter and smi common. It will control whether the Multimedia
+ HW should go though the m4u for translation or bypass it and talk
+ directly with EMI. And also SMI help control the power domain and clocks for
+ each local arbiter.
+
+ Normally we specify a local arbiter(larb) for each multimedia HW
+ like display, video decode, and camera. And there are different ports
+ in each larb. Take a example, There are many ports like MC, PP, VLD in the
+ video decode local arbiter, all these ports are according to the video HW.
+
+ In some SoCs, there may be a GALS(Global Async Local Sync) module between
+ smi-common and m4u, and additional GALS module between smi-larb and
+ smi-common. GALS can been seen as a "asynchronous fifo" which could help
+ synchronize for the modules in different clock frequency.
+
+properties:
+ compatible:
+ oneOf:
+ - enum:
+ - mediatek,mt2701-m4u # generation one
+ - mediatek,mt2712-m4u # generation two
+ - mediatek,mt6779-m4u # generation two
+ - mediatek,mt8167-m4u # generation two
+ - mediatek,mt8173-m4u # generation two
+ - mediatek,mt8183-m4u # generation two
+ - mediatek,mt8192-m4u # generation two
+
+ - description: mt7623 generation one
+ items:
+ - const: mediatek,mt7623-m4u
+ - const: mediatek,mt2701-m4u
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: bclk is the block clock.
+
+ clock-names:
+ items:
+ - const: bclk
+
+ mediatek,larbs:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ minItems: 1
+ maxItems: 32
+ description: |
+ List of phandle to the local arbiters in the current Socs.
+ Refer to bindings/memory-controllers/mediatek,smi-larb.yaml. It must sort
+ according to the local arbiter index, like larb0, larb1, larb2...
+
+ '#iommu-cells':
+ const: 1
+ description: |
+ This is the mtk_m4u_id according to the HW. Specifies the mtk_m4u_id as
+ defined in
+ dt-binding/memory/mt2701-larb-port.h for mt2701 and mt7623,
+ dt-binding/memory/mt2712-larb-port.h for mt2712,
+ dt-binding/memory/mt6779-larb-port.h for mt6779,
+ dt-binding/memory/mt8167-larb-port.h for mt8167,
+ dt-binding/memory/mt8173-larb-port.h for mt8173,
+ dt-binding/memory/mt8183-larb-port.h for mt8183,
+ dt-binding/memory/mt8192-larb-port.h for mt8192.
+
+ power-domains:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - mediatek,larbs
+ - '#iommu-cells'
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - mediatek,mt2701-m4u
+ - mediatek,mt2712-m4u
+ - mediatek,mt8173-m4u
+ - mediatek,mt8192-m4u
+
+ then:
+ required:
+ - clocks
+
+ - if:
+ properties:
+ compatible:
+ enum:
+ - mediatek,mt8192-m4u
+
+ then:
+ required:
+ - power-domains
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/mt8173-clk.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ iommu: iommu@10205000 {
+ compatible = "mediatek,mt8173-m4u";
+ reg = <0x10205000 0x1000>;
+ interrupts = <GIC_SPI 139 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&infracfg CLK_INFRA_M4U>;
+ clock-names = "bclk";
+ mediatek,larbs = <&larb0 &larb1 &larb2
+ &larb3 &larb4 &larb5>;
+ #iommu-cells = <1>;
+ };
+
+ - |
+ #include <dt-bindings/memory/mt8173-larb-port.h>
+
+ /* Example for a client device */
+ display {
+ compatible = "mediatek,mt8173-disp";
+ iommus = <&iommu M4U_PORT_DISP_OVL0>,
+ <&iommu M4U_PORT_DISP_RDMA0>;
+ };
diff --git a/Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.yaml b/Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.yaml
index cde1afa8dfd6..dda44976acc1 100644
--- a/Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.yaml
+++ b/Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.yaml
@@ -76,7 +76,6 @@ required:
- compatible
- reg
- '#iommu-cells'
- - power-domains
oneOf:
- required:
@@ -86,6 +85,17 @@ oneOf:
additionalProperties: false
+allOf:
+ - if:
+ properties:
+ compatible:
+ not:
+ contains:
+ const: renesas,ipmmu-vmsa
+ then:
+ required:
+ - power-domains
+
examples:
- |
#include <dt-bindings/clock/r8a7791-cpg-mssr.h>
@@ -93,7 +103,7 @@ examples:
#include <dt-bindings/power/r8a7791-sysc.h>
ipmmu_mx: iommu@fe951000 {
- compatible = "renasas,ipmmu-r8a7791", "renasas,ipmmu-vmsa";
+ compatible = "renesas,ipmmu-r8a7791", "renesas,ipmmu-vmsa";
reg = <0xfe951000 0x1000>;
interrupts = <GIC_SPI 222 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 221 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/Documentation/devicetree/bindings/leds/leds-lgm.yaml b/Documentation/devicetree/bindings/leds/leds-lgm.yaml
new file mode 100644
index 000000000000..32bbf146c01d
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/leds-lgm.yaml
@@ -0,0 +1,113 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/leds/leds-lgm.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Intel Lightning Mountain (LGM) SoC LED Serial Shift Output (SSO) Controller driver
+
+maintainers:
+ - Zhu, Yi Xin <Yixin.zhu@intel.com>
+ - Amireddy Mallikarjuna reddy <mallikarjunax.reddy@intel.com>
+
+properties:
+ compatible:
+ const: intel,lgm-ssoled
+
+ gpio-controller: true
+
+ '#gpio-cells':
+ const: 2
+
+ ngpios:
+ minimum: 0
+ maximum: 32
+ description:
+ Number of GPIOs this controller provides.
+
+ intel,sso-update-rate-hz:
+ description:
+ Blink frequency for SOUTs in Hz.
+
+ led-controller:
+ type: object
+ description:
+ This sub-node must contain a sub-node for each leds.
+
+ additionalProperties: false
+
+ patternProperties:
+ "^led@[0-23]$":
+ type: object
+
+ properties:
+ reg:
+ description: Index of the LED.
+ minimum: 0
+ maximum: 2
+
+ intel,sso-hw-trigger:
+ type: boolean
+ description: This property indicates Hardware driven/control LED.
+
+ intel,sso-hw-blink:
+ type: boolean
+ description: This property indicates Enable LED blink by Hardware.
+
+ intel,sso-blink-rate-hz:
+ description: LED HW blink frequency.
+
+ retain-state-suspended:
+ type: boolean
+ description: The suspend state of LED can be retained.
+
+ retain-state-shutdown:
+ type: boolean
+ description: Retain the state of the LED on shutdown.
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - "#gpio-cells"
+ - gpio-controller
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/intel,lgm-clk.h>
+ #include <dt-bindings/leds/common.h>
+
+ ssogpio: ssogpio@e0d40000 {
+ compatible = "intel,sso-led";
+ reg = <0xE0D40000 0x2E4>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ ngpios = <32>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ledc>;
+ clocks = <&cgu0 LGM_GCLK_LEDC0>, <&afeclk>;
+ clock-names = "sso", "fpid";
+ intel,sso-update-rate-hz = <250000>;
+
+ led-controller {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ function = "gphy";
+ color = <LED_COLOR_ID_GREEN>;
+ led-gpio = <&ssogpio 0 0>;
+ };
+
+ led@23 {
+ reg = <23>;
+ function = LED_FUNCTION_POWER;
+ color = <LED_COLOR_ID_GREEN>;
+ led-gpio = <&ssogpio 23 0>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/leds/richtek,rt8515.yaml b/Documentation/devicetree/bindings/leds/richtek,rt8515.yaml
new file mode 100644
index 000000000000..68c328eec03b
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/richtek,rt8515.yaml
@@ -0,0 +1,111 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/leds/richtek,rt8515.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Richtek RT8515 1.5A dual channel LED driver
+
+maintainers:
+ - Linus Walleij <linus.walleij@linaro.org>
+
+description: |
+ The Richtek RT8515 is a dual channel (two mode) LED driver that
+ supports driving a white LED in flash or torch mode. The maximum
+ current for each mode is defined in hardware using two resistors
+ RFS and RTS.
+
+properties:
+ compatible:
+ const: richtek,rt8515
+
+ enf-gpios:
+ maxItems: 1
+ description: A connection to the 'ENF' (enable flash) pin.
+
+ ent-gpios:
+ maxItems: 1
+ description: A connection to the 'ENT' (enable torch) pin.
+
+ richtek,rfs-ohms:
+ minimum: 7680
+ maximum: 367000
+ description: The resistance value of the RFS resistor. This
+ resistors limits the maximum flash current. This must be set
+ for the property flash-max-microamp to work, the RFS resistor
+ defines the range of the dimmer setting (brightness) of the
+ flash LED.
+
+ richtek,rts-ohms:
+ minimum: 7680
+ maximum: 367000
+ description: The resistance value of the RTS resistor. This
+ resistors limits the maximum torch current. This must be set
+ for the property torch-max-microamp to work, the RTS resistor
+ defines the range of the dimmer setting (brightness) of the
+ torch LED.
+
+ led:
+ type: object
+ $ref: common.yaml#
+ properties:
+ function: true
+ color: true
+ flash-max-timeout-us: true
+
+ flash-max-microamp:
+ maximum: 700000
+ description: The maximum current for flash mode
+ is hardwired to the component using the RFS resistor to
+ ground. The maximum hardware current setting is calculated
+ according to the formula Imax = 5500 / RFS. The lowest
+ allowed resistance value is 7.86 kOhm giving an absolute
+ maximum current of 700mA. By setting this attribute in
+ the device tree, you can further restrict the maximum
+ current below the hardware limit. This requires the RFS
+ to be defined as it defines the maximum range.
+
+ led-max-microamp:
+ maximum: 700000
+ description: The maximum current for torch mode
+ is hardwired to the component using the RTS resistor to
+ ground. The maximum hardware current setting is calculated
+ according to the formula Imax = 5500 / RTS. The lowest
+ allowed resistance value is 7.86 kOhm giving an absolute
+ maximum current of 700mA. By setting this attribute in
+ the device tree, you can further restrict the maximum
+ current below the hardware limit. This requires the RTS
+ to be defined as it defines the maximum range.
+
+ additionalProperties: false
+
+required:
+ - compatible
+ - ent-gpios
+ - enf-gpios
+ - led
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/leds/common.h>
+
+ led-controller {
+ compatible = "richtek,rt8515";
+ enf-gpios = <&gpio4 12 GPIO_ACTIVE_HIGH>;
+ ent-gpios = <&gpio4 13 GPIO_ACTIVE_HIGH>;
+ richtek,rfs-ohms = <16000>;
+ richtek,rts-ohms = <100000>;
+
+ led {
+ function = LED_FUNCTION_FLASH;
+ color = <LED_COLOR_ID_WHITE>;
+ flash-max-timeout-us = <250000>;
+ flash-max-microamp = <150000>;
+ led-max-microamp = <25000>;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/leds/ti,tca6507.yaml b/Documentation/devicetree/bindings/leds/ti,tca6507.yaml
index 94c307c98762..32c600387895 100644
--- a/Documentation/devicetree/bindings/leds/ti,tca6507.yaml
+++ b/Documentation/devicetree/bindings/leds/ti,tca6507.yaml
@@ -69,6 +69,7 @@ patternProperties:
if:
patternProperties:
"^gpio@[0-6]$":
+ type: object
properties:
compatible:
contains:
diff --git a/Documentation/devicetree/bindings/mailbox/omap-mailbox.txt b/Documentation/devicetree/bindings/mailbox/omap-mailbox.txt
index 5fe80c1c19fc..12371f5c6cd9 100644
--- a/Documentation/devicetree/bindings/mailbox/omap-mailbox.txt
+++ b/Documentation/devicetree/bindings/mailbox/omap-mailbox.txt
@@ -28,6 +28,9 @@ SoCs has each of these instances form a cluster and combine multiple clusters
into a single IP block present within the Main NavSS. The interrupt lines from
all these clusters are multiplexed and routed to different processor subsystems
over a limited number of common interrupt output lines of an Interrupt Router.
+The AM64x SoCS also uses a single IP block comprising of multiple clusters,
+but the number of clusters are smaller, and the interrupt output lines are
+connected directly to various processors.
Mailbox Device Node:
====================
@@ -42,6 +45,7 @@ Required properties:
"ti,omap4-mailbox" for OMAP44xx, OMAP54xx, AM33xx,
AM43xx and DRA7xx SoCs
"ti,am654-mailbox" for K3 AM65x and J721E SoCs
+ "ti,am64-mailbox" for K3 AM64x SoCs
- reg: Contains the mailbox register address range (base
address and length)
- interrupts: Contains the interrupt information for the mailbox
diff --git a/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.yaml b/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.yaml
index ffd09b664ff5..5dc1173d03fd 100644
--- a/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.yaml
+++ b/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.yaml
@@ -24,6 +24,7 @@ properties:
- qcom,msm8998-apcs-hmss-global
- qcom,qcs404-apcs-apps-global
- qcom,sc7180-apss-shared
+ - qcom,sc8180x-apss-shared
- qcom,sdm660-apcs-hmss-global
- qcom,sdm845-apss-shared
- qcom,sm8150-apss-shared
@@ -33,9 +34,11 @@ properties:
clocks:
description: phandles to the parent clocks of the clock driver
+ minItems: 2
items:
- description: primary pll parent of the clock driver
- description: auxiliary parent
+ - description: reference clock
'#mbox-cells':
const: 1
@@ -44,9 +47,11 @@ properties:
const: 0
clock-names:
+ minItems: 2
items:
- const: pll
- const: aux
+ - const: ref
required:
- compatible
@@ -55,6 +60,35 @@ required:
additionalProperties: false
+allOf:
+ - if:
+ properties:
+ compatible:
+ enum:
+ - qcom,ipq6018-apcs-apps-global
+ - qcom,ipq8074-apcs-apps-global
+ - qcom,msm8916-apcs-kpss-global
+ - qcom,msm8994-apcs-kpss-global
+ - qcom,msm8996-apcs-hmss-global
+ - qcom,msm8998-apcs-hmss-global
+ - qcom,qcs404-apcs-apps-global
+ - qcom,sc7180-apss-shared
+ - qcom,sdm660-apcs-hmss-global
+ - qcom,sdm845-apss-shared
+ - qcom,sm8150-apss-shared
+ then:
+ properties:
+ clocks:
+ maxItems: 2
+ - if:
+ properties:
+ compatible:
+ enum:
+ - qcom,sdx55-apcs-gcc
+ then:
+ properties:
+ clocks:
+ maxItems: 3
examples:
# Example apcs with msm8996
diff --git a/Documentation/devicetree/bindings/media/allegro,al5e.yaml b/Documentation/devicetree/bindings/media/allegro,al5e.yaml
new file mode 100644
index 000000000000..135bea94b587
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/allegro,al5e.yaml
@@ -0,0 +1,105 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/media/allegro,al5e.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allegro DVT Video IP Codecs Device Tree Bindings
+
+maintainers:
+ - Michael Tretter <m.tretter@pengutronix.de>
+
+description: |-
+ Allegro DVT video IP codecs present in the Xilinx ZynqMP SoC. The IP core may
+ either be a H.264/H.265 encoder or H.264/H.265 decoder ip core.
+
+ Each actual codec engine is controlled by a microcontroller (MCU). Host
+ software uses a provided mailbox interface to communicate with the MCU. The
+ MCUs share an interrupt.
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - const: allegro,al5e-1.1
+ - const: allegro,al5e
+ - items:
+ - const: allegro,al5d-1.1
+ - const: allegro,al5d
+
+ reg:
+ items:
+ - description: The registers
+ - description: The SRAM
+
+ reg-names:
+ items:
+ - const: regs
+ - const: sram
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: Core clock
+ - description: MCU clock
+ - description: Core AXI master port clock
+ - description: MCU AXI master port clock
+ - description: AXI4-Lite slave port clock
+
+ clock-names:
+ items:
+ - const: core_clk
+ - const: mcu_clk
+ - const: m_axi_core_aclk
+ - const: m_axi_mcu_aclk
+ - const: s_axi_lite_aclk
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - interrupts
+ - clocks
+ - clock-names
+
+additionalProperties: False
+
+examples:
+ - |
+ fpga {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ al5e: video-codec@a0009000 {
+ compatible = "allegro,al5e-1.1", "allegro,al5e";
+ reg = <0 0xa0009000 0 0x1000>,
+ <0 0xa0000000 0 0x8000>;
+ reg-names = "regs", "sram";
+ interrupts = <0 96 4>;
+ clocks = <&xlnx_vcu 0>, <&xlnx_vcu 1>,
+ <&clkc 71>, <&clkc 71>, <&clkc 71>;
+ clock-names = "core_clk", "mcu_clk", "m_axi_core_aclk",
+ "m_axi_mcu_aclk", "s_axi_lite_aclk";
+ };
+ };
+ - |
+ fpga {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ al5d: video-codec@a0029000 {
+ compatible = "allegro,al5d-1.1", "allegro,al5d";
+ reg = <0 0xa0029000 0 0x1000>,
+ <0 0xa0020000 0 0x8000>;
+ reg-names = "regs", "sram";
+ interrupts = <0 96 4>;
+ clocks = <&xlnx_vcu 2>, <&xlnx_vcu 3>,
+ <&clkc 71>, <&clkc 71>, <&clkc 71>;
+ clock-names = "core_clk", "mcu_clk", "m_axi_core_aclk",
+ "m_axi_mcu_aclk", "s_axi_lite_aclk";
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/media/allegro.txt b/Documentation/devicetree/bindings/media/allegro.txt
deleted file mode 100644
index a92e2fbf26c9..000000000000
--- a/Documentation/devicetree/bindings/media/allegro.txt
+++ /dev/null
@@ -1,43 +0,0 @@
-Device-tree bindings for the Allegro DVT video IP codecs present in the Xilinx
-ZynqMP SoC. The IP core may either be a H.264/H.265 encoder or H.264/H.265
-decoder ip core.
-
-Each actual codec engines is controlled by a microcontroller (MCU). Host
-software uses a provided mailbox interface to communicate with the MCU. The
-MCU share an interrupt.
-
-Required properties:
- - compatible: value should be one of the following
- "allegro,al5e-1.1", "allegro,al5e": encoder IP core
- "allegro,al5d-1.1", "allegro,al5d": decoder IP core
- - reg: base and length of the memory mapped register region and base and
- length of the memory mapped sram
- - reg-names: must include "regs" and "sram"
- - interrupts: shared interrupt from the MCUs to the processing system
- - clocks: must contain an entry for each entry in clock-names
- - clock-names: must include "core_clk", "mcu_clk", "m_axi_core_aclk",
- "m_axi_mcu_aclk", "s_axi_lite_aclk"
-
-Example:
- al5e: video-codec@a0009000 {
- compatible = "allegro,al5e-1.1", "allegro,al5e";
- reg = <0 0xa0009000 0 0x1000>,
- <0 0xa0000000 0 0x8000>;
- reg-names = "regs", "sram";
- interrupts = <0 96 4>;
- clocks = <&xlnx_vcu 0>, <&xlnx_vcu 1>,
- <&clkc 71>, <&clkc 71>, <&clkc 71>;
- clock-names = "core_clk", "mcu_clk", "m_axi_core_aclk",
- "m_axi_mcu_aclk", "s_axi_lite_aclk"
- };
- al5d: video-codec@a0029000 {
- compatible = "allegro,al5d-1.1", "allegro,al5d";
- reg = <0 0xa0029000 0 0x1000>,
- <0 0xa0020000 0 0x8000>;
- reg-names = "regs", "sram";
- interrupts = <0 96 4>;
- clocks = <&xlnx_vcu 2>, <&xlnx_vcu 3>,
- <&clkc 71>, <&clkc 71>, <&clkc 71>;
- clock-names = "core_clk", "mcu_clk", "m_axi_core_aclk",
- "m_axi_mcu_aclk", "s_axi_lite_aclk"
- };
diff --git a/Documentation/devicetree/bindings/media/allwinner,sun4i-a10-csi.yaml b/Documentation/devicetree/bindings/media/allwinner,sun4i-a10-csi.yaml
index 09318830db47..6ced94064215 100644
--- a/Documentation/devicetree/bindings/media/allwinner,sun4i-a10-csi.yaml
+++ b/Documentation/devicetree/bindings/media/allwinner,sun4i-a10-csi.yaml
@@ -67,14 +67,14 @@ properties:
interconnect-names:
const: dma-mem
- # See ./video-interfaces.txt for details
port:
- type: object
+ $ref: /schemas/graph.yaml#/$defs/port-base
additionalProperties: false
properties:
endpoint:
- type: object
+ $ref: video-interfaces.yaml#
+ unevaluatedProperties: false
properties:
bus-width:
@@ -83,7 +83,6 @@ properties:
data-active: true
hsync-active: true
pclk-sample: true
- remote-endpoint: true
vsync-active: true
required:
@@ -91,12 +90,8 @@ properties:
- data-active
- hsync-active
- pclk-sample
- - remote-endpoint
- vsync-active
- required:
- - endpoint
-
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/media/allwinner,sun4i-a10-video-engine.yaml b/Documentation/devicetree/bindings/media/allwinner,sun4i-a10-video-engine.yaml
index 2f7058f7760c..c34303b87a5b 100644
--- a/Documentation/devicetree/bindings/media/allwinner,sun4i-a10-video-engine.yaml
+++ b/Documentation/devicetree/bindings/media/allwinner,sun4i-a10-video-engine.yaml
@@ -53,6 +53,7 @@ properties:
maxItems: 1
memory-region:
+ maxItems: 1
description:
CMA pool to use for buffers allocation instead of the default
CMA pool.
diff --git a/Documentation/devicetree/bindings/media/allwinner,sun6i-a31-csi.yaml b/Documentation/devicetree/bindings/media/allwinner,sun6i-a31-csi.yaml
index 1fd9b5532a21..8b568072a069 100644
--- a/Documentation/devicetree/bindings/media/allwinner,sun6i-a31-csi.yaml
+++ b/Documentation/devicetree/bindings/media/allwinner,sun6i-a31-csi.yaml
@@ -40,17 +40,15 @@ properties:
resets:
maxItems: 1
- # See ./video-interfaces.txt for details
port:
- type: object
+ $ref: /schemas/graph.yaml#/$defs/port-base
properties:
endpoint:
- type: object
+ $ref: video-interfaces.yaml#
+ unevaluatedProperties: false
properties:
- remote-endpoint: true
-
bus-width:
enum: [ 8, 10, 12, 16 ]
@@ -60,10 +58,6 @@ properties:
required:
- bus-width
- - remote-endpoint
-
- required:
- - endpoint
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/media/allwinner,sun8i-h3-deinterlace.yaml b/Documentation/devicetree/bindings/media/allwinner,sun8i-h3-deinterlace.yaml
index 6a56214c6cfd..b80980b1908e 100644
--- a/Documentation/devicetree/bindings/media/allwinner,sun8i-h3-deinterlace.yaml
+++ b/Documentation/devicetree/bindings/media/allwinner,sun8i-h3-deinterlace.yaml
@@ -20,6 +20,9 @@ properties:
oneOf:
- const: allwinner,sun8i-h3-deinterlace
- items:
+ - const: allwinner,sun8i-r40-deinterlace
+ - const: allwinner,sun8i-h3-deinterlace
+ - items:
- const: allwinner,sun50i-a64-deinterlace
- const: allwinner,sun8i-h3-deinterlace
diff --git a/Documentation/devicetree/bindings/media/i2c/adv7180.yaml b/Documentation/devicetree/bindings/media/i2c/adv7180.yaml
index d8c54f9d9506..bcfd93739b4f 100644
--- a/Documentation/devicetree/bindings/media/i2c/adv7180.yaml
+++ b/Documentation/devicetree/bindings/media/i2c/adv7180.yaml
@@ -36,17 +36,9 @@ properties:
maxItems: 1
port:
- type: object
- description:
- A node containing a single endpoint as doucmented in
- Documentation/devicetree/bindings/media/video-interfaces.txt
-
- ports:
- type: object
- description:
- A node containing input and output port nodes with endpoint definitions
- as documented in
- Documentation/devicetree/bindings/media/video-interfaces.txt
+ $ref: /schemas/graph.yaml#/properties/port
+
+ ports: true
additionalProperties: false
@@ -80,25 +72,20 @@ allOf:
then:
properties:
ports:
+ $ref: /schemas/graph.yaml#/properties/ports
properties:
- '#address-cells':
- const: 1
- '#size-cells':
- const: 0
port@3:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: Output port
patternProperties:
"^port@[0-2]$":
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: Input port
required:
- port@3
- additionalProperties: false
-
required:
- ports
@@ -110,25 +97,20 @@ allOf:
then:
properties:
ports:
+ $ref: /schemas/graph.yaml#/properties/ports
properties:
- '#address-cells':
- const: 1
- '#size-cells':
- const: 0
port@6:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: Output port
patternProperties:
"^port@[0-5]$":
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: Input port
required:
- port@6
- additionalProperties: false
-
required:
- ports
diff --git a/Documentation/devicetree/bindings/media/i2c/adv7604.yaml b/Documentation/devicetree/bindings/media/i2c/adv7604.yaml
index 407baddfaa1d..df634b0c1f8c 100644
--- a/Documentation/devicetree/bindings/media/i2c/adv7604.yaml
+++ b/Documentation/devicetree/bindings/media/i2c/adv7604.yaml
@@ -64,16 +64,12 @@ properties:
description:
Select which input is selected after reset.
- ports:
- type: object
- description:
- A node containing input and output port nodes with endpoint definitions
- as documented in
- Documentation/devicetree/bindings/media/video-interfaces.txt
+ ports: true
required:
- compatible
- reg
+ - ports
additionalProperties: false
@@ -86,26 +82,19 @@ allOf:
then:
properties:
ports:
+ $ref: /schemas/graph.yaml#/properties/ports
properties:
- '#address-cells':
- const: 1
- '#size-cells':
- const: 0
port@0:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: Input port
+
port@1:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: Output port
required:
- port@1
- additionalProperties: false
-
- required:
- - ports
-
- if:
properties:
compatible:
@@ -114,28 +103,20 @@ allOf:
then:
properties:
ports:
+ $ref: /schemas/graph.yaml#/properties/ports
properties:
- '#address-cells':
- const: 1
- '#size-cells':
- const: 0
port@2:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: Output port
patternProperties:
"^port@[0-1]$":
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: Input port
required:
- port@2
- additionalProperties: false
-
- required:
- - ports
-
examples:
- |
#include <dt-bindings/gpio/gpio.h>
diff --git a/Documentation/devicetree/bindings/media/i2c/aptina,mt9v111.yaml b/Documentation/devicetree/bindings/media/i2c/aptina,mt9v111.yaml
index ff9546e95d05..e53b8d65f381 100644
--- a/Documentation/devicetree/bindings/media/i2c/aptina,mt9v111.yaml
+++ b/Documentation/devicetree/bindings/media/i2c/aptina,mt9v111.yaml
@@ -41,9 +41,9 @@ properties:
maxItems: 1
port:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: |
- Output video port. See ../video-interfaces.txt.
+ Output video port.
required:
- compatible
diff --git a/Documentation/devicetree/bindings/media/i2c/imi,rdacm2x-gmsl.yaml b/Documentation/devicetree/bindings/media/i2c/imi,rdacm2x-gmsl.yaml
index 3dc06c628e64..e57575c44930 100644
--- a/Documentation/devicetree/bindings/media/i2c/imi,rdacm2x-gmsl.yaml
+++ b/Documentation/devicetree/bindings/media/i2c/imi,rdacm2x-gmsl.yaml
@@ -86,33 +86,9 @@ properties:
maxItems: 3
port:
- type: object
- additionalProperties: false
- description: -|
- Connection to the remote GMSL endpoint are modelled using the OF graph
- bindings in accordance with the video interface bindings defined in
- Documentation/devicetree/bindings/media/video-interfaces.txt.
-
- The device node contains a single "port" child node with a single
- "endpoint" sub-device.
-
- properties:
- endpoint:
- type: object
- additionalProperties: false
-
- properties:
- remote-endpoint:
- description: -|
- phandle to the remote GMSL endpoint sub-node in the remote node
- port.
- maxItems: 1
-
- required:
- - remote-endpoint
-
- required:
- - endpoint
+ $ref: /schemas/graph.yaml#/properties/port
+ description:
+ Connection to the remote GMSL endpoint.
required:
- compatible
diff --git a/Documentation/devicetree/bindings/media/i2c/imx219.yaml b/Documentation/devicetree/bindings/media/i2c/imx219.yaml
index dfc4d29a4f04..5fc96944b448 100644
--- a/Documentation/devicetree/bindings/media/i2c/imx219.yaml
+++ b/Documentation/devicetree/bindings/media/i2c/imx219.yaml
@@ -40,16 +40,20 @@ properties:
Digital core voltage supply, 1.2 volts
reset-gpios:
+ maxItems: 1
description: |-
Reference to the GPIO connected to the xclr pin, if any.
Must be released (set high) after all supplies are applied.
- # See ../video-interfaces.txt for more details
port:
- type: object
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ additionalProperties: false
+
properties:
endpoint:
- type: object
+ $ref: /schemas/media/video-interfaces.yaml#
+ unevaluatedProperties: false
+
properties:
data-lanes:
description: |-
@@ -60,16 +64,8 @@ properties:
- const: 1
- const: 2
- clock-noncontinuous:
- type: boolean
- description: |-
- MIPI CSI-2 clock is non-continuous if this property is present,
- otherwise it's continuous.
-
- link-frequencies:
- $ref: /schemas/types.yaml#/definitions/uint64-array
- description:
- Allowed data bus frequencies.
+ clock-noncontinuous: true
+ link-frequencies: true
required:
- link-frequencies
diff --git a/Documentation/devicetree/bindings/media/i2c/imx258.yaml b/Documentation/devicetree/bindings/media/i2c/imx258.yaml
new file mode 100644
index 000000000000..515317eff41a
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/i2c/imx258.yaml
@@ -0,0 +1,134 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/media/i2c/imx258.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Sony IMX258 13 Mpixel CMOS Digital Image Sensor
+
+maintainers:
+ - Krzysztof Kozlowski <krzk@kernel.org>
+
+description: |-
+ IMX258 is a diagonal 5.867mm (Type 1/3.06) 13 Mega-pixel CMOS active pixel
+ type stacked image sensor with a square pixel array of size 4208 x 3120. It
+ is programmable through I2C interface. Image data is sent through MIPI
+ CSI-2.
+
+properties:
+ compatible:
+ const: sony,imx258
+
+ assigned-clocks: true
+ assigned-clock-parents: true
+ assigned-clock-rates: true
+
+ clocks:
+ description:
+ Clock frequency from 6 to 27 MHz.
+ maxItems: 1
+
+ reg:
+ maxItems: 1
+
+ reset-gpios:
+ description: |-
+ Reference to the GPIO connected to the XCLR pin, if any.
+
+ vana-supply:
+ description:
+ Analog voltage (VANA) supply, 2.7 V
+
+ vdig-supply:
+ description:
+ Digital I/O voltage (VDIG) supply, 1.2 V
+
+ vif-supply:
+ description:
+ Interface voltage (VIF) supply, 1.8 V
+
+ # See ../video-interfaces.txt for more details
+ port:
+ $ref: /schemas/graph.yaml#/properties/port
+ additionalProperties: false
+
+ properties:
+ endpoint:
+ $ref: /schemas/media/video-interfaces.yaml#
+ unevaluatedProperties: false
+
+ properties:
+ data-lanes:
+ oneOf:
+ - items:
+ - const: 1
+ - const: 2
+ - const: 3
+ - const: 4
+ - items:
+ - const: 1
+ - const: 2
+
+ link-frequencies: true
+
+ required:
+ - data-lanes
+ - link-frequencies
+
+required:
+ - compatible
+ - reg
+ - port
+
+additionalProperties: false
+
+examples:
+ - |
+ i2c0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ sensor@6c {
+ compatible = "sony,imx258";
+ reg = <0x6c>;
+ clocks = <&imx258_clk>;
+
+ port {
+ endpoint {
+ remote-endpoint = <&csi1_ep>;
+ data-lanes = <1 2 3 4>;
+ link-frequencies = /bits/ 64 <320000000>;
+ };
+ };
+ };
+ };
+
+ /* Oscillator on the camera board */
+ imx258_clk: clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <19200000>;
+ };
+
+ - |
+ i2c0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ sensor@6c {
+ compatible = "sony,imx258";
+ reg = <0x6c>;
+ clocks = <&imx258_clk>;
+
+ assigned-clocks = <&imx258_clk>;
+ assigned-clock-rates = <19200000>;
+
+ port {
+ endpoint {
+ remote-endpoint = <&csi1_ep>;
+ data-lanes = <1 2 3 4>;
+ link-frequencies = /bits/ 64 <633600000>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/i2c/maxim,max9286.yaml b/Documentation/devicetree/bindings/media/i2c/maxim,max9286.yaml
index 68ee8c7d9e79..ee16102fdfe7 100644
--- a/Documentation/devicetree/bindings/media/i2c/maxim,max9286.yaml
+++ b/Documentation/devicetree/bindings/media/i2c/maxim,max9286.yaml
@@ -50,82 +50,62 @@ properties:
'#gpio-cells':
const: 2
- ports:
- type: object
+ maxim,reverse-channel-microvolt:
+ minimum: 30000
+ maximum: 200000
+ default: 170000
description: |
- The connections to the MAX9286 GMSL and its endpoint nodes are modelled
- using the OF graph bindings in accordance with the video interface
- bindings defined in
- Documentation/devicetree/bindings/media/video-interfaces.txt.
-
- The following table lists the port number corresponding to each device
- port.
-
- Port Description
- ----------------------------------------
- Port 0 GMSL Input 0
- Port 1 GMSL Input 1
- Port 2 GMSL Input 2
- Port 3 GMSL Input 3
- Port 4 CSI-2 Output
+ Initial amplitude of the reverse control channel, in micro volts.
- properties:
- '#address-cells':
- const: 1
+ The initial amplitude shall be adjusted to a value compatible with the
+ configuration of the connected remote serializer.
- '#size-cells':
- const: 0
+ Some camera modules (for example RDACM20) include an on-board MCU that
+ pre-programs the embedded serializer with power supply noise immunity
+ (high-threshold) enabled. A typical value of the deserializer's reverse
+ channel amplitude to communicate with pre-programmed serializers is
+ 170000 micro volts.
- port@[0-3]:
- type: object
- properties:
- reg:
- enum: [ 0, 1, 2, 3 ]
+ A typical value for the reverse channel amplitude to communicate with
+ a remote serializer whose high-threshold noise immunity is not enabled
+ is 100000 micro volts
- endpoint:
- type: object
+ ports:
+ $ref: /schemas/graph.yaml#/properties/ports
- properties:
- remote-endpoint:
- description: |
- phandle to the remote GMSL source endpoint subnode in the
- remote node port.
+ properties:
+ port@0:
+ $ref: /schemas/graph.yaml#/properties/port
+ description: GMSL Input 0
- required:
- - remote-endpoint
+ port@1:
+ $ref: /schemas/graph.yaml#/properties/port
+ description: GMSL Input 1
- required:
- - reg
- - endpoint
+ port@2:
+ $ref: /schemas/graph.yaml#/properties/port
+ description: GMSL Input 2
- additionalProperties: false
+ port@3:
+ $ref: /schemas/graph.yaml#/properties/port
+ description: GMSL Input 3
port@4:
- type: object
- properties:
- reg:
- const: 4
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
+ description: CSI-2 Output
+ properties:
endpoint:
- type: object
+ $ref: /schemas/media/video-interfaces.yaml#
+ unevaluatedProperties: false
properties:
- remote-endpoint:
- description: phandle to the remote CSI-2 sink endpoint.
-
- data-lanes:
- description: array of physical CSI-2 data lane indexes.
+ data-lanes: true
required:
- - remote-endpoint
- data-lanes
- required:
- - reg
- - endpoint
-
- additionalProperties: false
-
required:
- port@4
@@ -183,25 +163,8 @@ properties:
requirements of the currently connected remote device.
port:
- type: object
-
- properties:
- endpoint:
- type: object
-
- properties:
- remote-endpoint:
- description: phandle to the MAX9286 sink endpoint.
-
- required:
- - remote-endpoint
-
- additionalProperties: false
-
- required:
- - endpoint
-
- additionalProperties: false
+ $ref: /schemas/graph.yaml#/properties/port
+ description: Connection to the MAX9286 sink.
required:
- compatible
@@ -242,6 +205,8 @@ examples:
gpio-controller;
#gpio-cells = <2>;
+ maxim,reverse-channel-microvolt = <170000>;
+
ports {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/Documentation/devicetree/bindings/media/i2c/mipi-ccs.yaml b/Documentation/devicetree/bindings/media/i2c/mipi-ccs.yaml
index bb3528315f20..701f4e0d138f 100644
--- a/Documentation/devicetree/bindings/media/i2c/mipi-ccs.yaml
+++ b/Documentation/devicetree/bindings/media/i2c/mipi-ccs.yaml
@@ -71,19 +71,18 @@ properties:
enum: [ 0, 180 ]
port:
- type: object
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ additionalProperties: false
+
properties:
endpoint:
- type: object
+ $ref: /schemas/media/video-interfaces.yaml#
+ unevaluatedProperties: false
+
properties:
- link-frequencies:
- $ref: /schemas/types.yaml#/definitions/uint64-array
- description: List of allowed data link frequencies.
- data-lanes:
- minItems: 1
- maxItems: 8
+ link-frequencies: true
+ data-lanes: true
bus-type:
- description: The type of the data bus.
oneOf:
- const: 1 # CSI-2 C-PHY
- const: 3 # CCP2
diff --git a/Documentation/devicetree/bindings/media/i2c/ov8856.yaml b/Documentation/devicetree/bindings/media/i2c/ov8856.yaml
index cde85553fd01..baf92aaaf049 100644
--- a/Documentation/devicetree/bindings/media/i2c/ov8856.yaml
+++ b/Documentation/devicetree/bindings/media/i2c/ov8856.yaml
@@ -57,16 +57,13 @@ properties:
active low.
port:
- type: object
+ $ref: /schemas/graph.yaml#/$defs/port-base
additionalProperties: false
- description:
- A node containing an output port node with an endpoint definition
- as documented in
- Documentation/devicetree/bindings/media/video-interfaces.txt
properties:
endpoint:
- type: object
+ $ref: /schemas/media/video-interfaces.yaml#
+ unevaluatedProperties: false
properties:
data-lanes:
@@ -79,18 +76,14 @@ properties:
- const: 4
link-frequencies:
- $ref: /schemas/types.yaml#/definitions/uint64-array
- description:
- Allowed data bus frequencies. 360000000, 180000000 Hz or both
- are supported by the driver.
-
+ description: Frequencies listed are driver, not h/w limitations.
+ maxItems: 2
+ items:
+ enum: [ 360000000, 180000000 ]
required:
- link-frequencies
- required:
- - endpoint
-
required:
- compatible
- reg
@@ -139,4 +132,3 @@ examples:
};
};
...
-
diff --git a/Documentation/devicetree/bindings/media/i2c/ovti,ov02a10.yaml b/Documentation/devicetree/bindings/media/i2c/ovti,ov02a10.yaml
index 1c3879ec4122..63a040944f3d 100644
--- a/Documentation/devicetree/bindings/media/i2c/ovti,ov02a10.yaml
+++ b/Documentation/devicetree/bindings/media/i2c/ovti,ov02a10.yaml
@@ -17,6 +17,9 @@ description: |-
@ 1600x1200 (UXGA) resolution transferred over a 1-lane MIPI interface. The
sensor output is available via CSI-2 serial data output.
+allOf:
+ - $ref: /schemas/media/video-interface-devices.yaml#
+
properties:
compatible:
const: ovti,ov02a10
@@ -66,42 +69,34 @@ properties:
maxItems: 1
rotation:
- description:
- Definition of the sensor's placement.
- allOf:
- - $ref: "/schemas/types.yaml#/definitions/uint32"
- - enum:
- - 0 # Sensor Mounted Upright
- - 180 # Sensor Mounted Upside Down
- default: 0
-
- # See ../video-interfaces.txt for details
+ enum:
+ - 0 # Sensor Mounted Upright
+ - 180 # Sensor Mounted Upside Down
+ default: 0
+
port:
- type: object
+ $ref: /schemas/graph.yaml#/$defs/port-base
additionalProperties: false
description:
Output port node, single endpoint describing the CSI-2 transmitter.
properties:
endpoint:
- type: object
- additionalProperties: false
+ $ref: /schemas/media/video-interfaces.yaml#
+ unevaluatedProperties: false
properties:
link-frequencies: true
ovti,mipi-clock-voltage:
- allOf:
- - $ref: "/schemas/types.yaml#/definitions/uint32"
+ $ref: "/schemas/types.yaml#/definitions/uint32"
description:
Definition of MIPI clock voltage unit. This entry corresponds to
the link speed defined by the 'link-frequencies' property.
If present, the value shall be in the range of 0-4.
default: 4
- remote-endpoint: true
required:
- link-frequencies
- - remote-endpoint
required:
- endpoint
diff --git a/Documentation/devicetree/bindings/media/i2c/ovti,ov2680.yaml b/Documentation/devicetree/bindings/media/i2c/ovti,ov2680.yaml
index 43bf749807e1..cf456f8d9ddc 100644
--- a/Documentation/devicetree/bindings/media/i2c/ovti,ov2680.yaml
+++ b/Documentation/devicetree/bindings/media/i2c/ovti,ov2680.yaml
@@ -50,11 +50,9 @@ properties:
Definition of the regulator used as digital power supply.
port:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description:
- A node containing an output port node with an endpoint definition
- as documented in
- Documentation/devicetree/bindings/media/video-interfaces.txt
+ A node containing an output port node.
required:
- compatible
diff --git a/Documentation/devicetree/bindings/media/i2c/ov5647.yaml b/Documentation/devicetree/bindings/media/i2c/ovti,ov5647.yaml
index 280c62afae13..3e5d82df90a2 100644
--- a/Documentation/devicetree/bindings/media/i2c/ov5647.yaml
+++ b/Documentation/devicetree/bindings/media/i2c/ovti,ov5647.yaml
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
-$id: http://devicetree.org/schemas/media/i2c/ov5647.yaml#
+$id: http://devicetree.org/schemas/media/i2c/ovti,ov5647.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Omnivision OV5647 raw image sensor
@@ -31,29 +31,16 @@ properties:
maxItems: 1
port:
- type: object
- description: |-
- Should contain one endpoint sub-node used to model connection to the
- video receiver according to the specification defined in
- Documentation/devicetree/bindings/media/video-interfaces.txt.
+ $ref: /schemas/graph.yaml#/properties/port
+ additionalProperties: false
properties:
endpoint:
- type: object
+ $ref: /schemas/media/video-interfaces.yaml#
+ unevaluatedProperties: false
properties:
- remote-endpoint:
- description: |-
- phandle to the video receiver input port.
-
- clock-noncontinuous:
- type: boolean
- description: |-
- Set to true to allow MIPI CSI-2 non-continuous clock operations.
-
- additionalProperties: false
-
- additionalProperties: false
+ clock-noncontinuous: true
required:
- compatible
diff --git a/Documentation/devicetree/bindings/media/i2c/ovti,ov5648.yaml b/Documentation/devicetree/bindings/media/i2c/ovti,ov5648.yaml
new file mode 100644
index 000000000000..9149f5685688
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/i2c/ovti,ov5648.yaml
@@ -0,0 +1,109 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/media/i2c/ovti,ov5648.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: OmniVision OV5648 Image Sensor Device Tree Bindings
+
+maintainers:
+ - Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+
+properties:
+ compatible:
+ const: ovti,ov5648
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: XVCLK Clock
+
+ assigned-clocks:
+ maxItems: 1
+
+ assigned-clock-rates:
+ maxItems: 1
+
+ dvdd-supply:
+ description: Digital Domain Power Supply
+
+ avdd-supply:
+ description: Analog Domain Power Supply (internal AVDD is used if missing)
+
+ dovdd-supply:
+ description: I/O Domain Power Supply
+
+ powerdown-gpios:
+ maxItems: 1
+ description: Power Down Pin GPIO Control (active low)
+
+ reset-gpios:
+ maxItems: 1
+ description: Reset Pin GPIO Control (active low)
+
+ port:
+ description: MIPI CSI-2 transmitter port
+ $ref: /schemas/graph.yaml#/properties/port
+ additionalProperties: false
+
+ properties:
+ endpoint:
+ $ref: /schemas/media/video-interfaces.yaml#
+ unevaluatedProperties: false
+
+ properties:
+ link-frequencies: true
+
+ data-lanes:
+ minItems: 1
+ maxItems: 2
+
+ required:
+ - data-lanes
+ - link-frequencies
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - assigned-clocks
+ - assigned-clock-rates
+ - dvdd-supply
+ - dovdd-supply
+ - port
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/sun8i-v3s-ccu.h>
+ #include <dt-bindings/gpio/gpio.h>
+
+ i2c0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ov5648: camera@36 {
+ compatible = "ovti,ov5648";
+ reg = <0x36>;
+
+ dvdd-supply = <&ov5648_dvdd>;
+ avdd-supply = <&ov5648_avdd>;
+ dovdd-supply = <&ov5648_dovdd>;
+ clocks = <&ov5648_xvclk 0>;
+ assigned-clocks = <&ov5648_xvclk 0>;
+ assigned-clock-rates = <24000000>;
+
+
+ ov5648_out: port {
+ ov5648_out_mipi_csi2: endpoint {
+ data-lanes = <1 2>;
+ link-frequencies = /bits/ 64 <210000000 168000000>;
+
+ remote-endpoint = <&mipi_csi2_in_ov5648>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/i2c/ovti,ov772x.yaml b/Documentation/devicetree/bindings/media/i2c/ovti,ov772x.yaml
index 6866c2cdac50..44529425ce3a 100644
--- a/Documentation/devicetree/bindings/media/i2c/ovti,ov772x.yaml
+++ b/Documentation/devicetree/bindings/media/i2c/ovti,ov772x.yaml
@@ -37,13 +37,14 @@ properties:
maxItems: 1
port:
- type: object
+ $ref: /schemas/graph.yaml#/$defs/port-base
description: |
- Video output port. See ../video-interfaces.txt.
+ Video output port.
properties:
endpoint:
- type: object
+ $ref: /schemas/media/video-interfaces.yaml#
+ unevaluatedProperties: false
properties:
bus-type:
@@ -91,8 +92,6 @@ properties:
required:
- bus-type
- unevaluatedProperties: false
-
additionalProperties: false
required:
diff --git a/Documentation/devicetree/bindings/media/i2c/ovti,ov8865.yaml b/Documentation/devicetree/bindings/media/i2c/ovti,ov8865.yaml
new file mode 100644
index 000000000000..0699c7e4fdeb
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/i2c/ovti,ov8865.yaml
@@ -0,0 +1,118 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/media/i2c/ovti,ov8865.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: OmniVision OV8865 Image Sensor Device Tree Bindings
+
+maintainers:
+ - Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+
+properties:
+ compatible:
+ const: ovti,ov8865
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: EXTCLK Clock
+
+ assigned-clocks:
+ maxItems: 1
+
+ assigned-clock-rates:
+ maxItems: 1
+
+ dvdd-supply:
+ description: Digital Domain Power Supply
+
+ avdd-supply:
+ description: Analog Domain Power Supply
+
+ dovdd-supply:
+ description: I/O Domain Power Supply
+
+ powerdown-gpios:
+ maxItems: 1
+ description: Power Down Pin GPIO Control (active low)
+
+ reset-gpios:
+ maxItems: 1
+ description: Reset Pin GPIO Control (active low)
+
+ port:
+ description: MIPI CSI-2 transmitter port
+ $ref: /schemas/graph.yaml#/properties/port
+ additionalProperties: false
+
+ properties:
+ endpoint:
+ $ref: /schemas/media/video-interfaces.yaml#
+ unevaluatedProperties: false
+
+ properties:
+ link-frequencies: true
+
+ data-lanes:
+ minItems: 1
+ maxItems: 4
+
+ required:
+ - data-lanes
+ - link-frequencies
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - assigned-clocks
+ - assigned-clock-rates
+ - dvdd-supply
+ - avdd-supply
+ - dovdd-supply
+ - port
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/sun8i-a83t-ccu.h>
+ #include <dt-bindings/gpio/gpio.h>
+
+ i2c2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ov8865: camera@36 {
+ compatible = "ovti,ov8865";
+ reg = <0x36>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&csi_mclk_pin>;
+
+ clocks = <&ccu CLK_CSI_MCLK>;
+ assigned-clocks = <&ccu CLK_CSI_MCLK>;
+ assigned-clock-rates = <24000000>;
+
+ avdd-supply = <&reg_ov8865_avdd>;
+ dovdd-supply = <&reg_ov8865_dovdd>;
+ dvdd-supply = <&reg_ov8865_dvdd>;
+
+ powerdown-gpios = <&pio 4 17 GPIO_ACTIVE_LOW>; /* PE17 */
+ reset-gpios = <&pio 4 16 GPIO_ACTIVE_LOW>; /* PE16 */
+
+ port {
+ ov8865_out_mipi_csi2: endpoint {
+ data-lanes = <1 2 3 4>;
+ link-frequencies = /bits/ 64 <360000000>;
+
+ remote-endpoint = <&mipi_csi2_in_ov8865>;
+ };
+ };
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/media/i2c/sony,imx214.yaml b/Documentation/devicetree/bindings/media/i2c/sony,imx214.yaml
index eb12526a462f..c9760f895b3e 100644
--- a/Documentation/devicetree/bindings/media/i2c/sony,imx214.yaml
+++ b/Documentation/devicetree/bindings/media/i2c/sony,imx214.yaml
@@ -15,6 +15,9 @@ description: |
interface. Image data is sent through MIPI CSI-2, through 2 or 4 lanes at a
maximum throughput of 1.2Gbps/lane.
+allOf:
+ - $ref: ../video-interface-devices.yaml#
+
properties:
compatible:
const: sony,imx214
@@ -44,25 +47,21 @@ properties:
vddd-supply:
description: Chip digital core regulator (1.12V).
- flash-leds:
- description: See ../video-interfaces.txt
-
- lens-focus:
- description: See ../video-interfaces.txt
+ flash-leds: true
+ lens-focus: true
port:
- type: object
+ $ref: /schemas/graph.yaml#/$defs/port-base
description: |
- Video output port. See ../video-interfaces.txt.
+ Video output port.
properties:
endpoint:
- type: object
+ $ref: /schemas/media/video-interfaces.yaml#
+ unevaluatedProperties: false
properties:
data-lanes:
- $ref: /schemas/types.yaml#/definitions/uint32-array
- description: See ../video-interfaces.txt
anyOf:
- items:
- const: 1
@@ -73,16 +72,12 @@ properties:
- const: 3
- const: 4
- link-frequencies:
- $ref: /schemas/types.yaml#/definitions/uint64-array
- description: See ../video-interfaces.txt
+ link-frequencies: true
required:
- data-lanes
- link-frequencies
- unevaluatedProperties: false
-
additionalProperties: false
required:
diff --git a/Documentation/devicetree/bindings/media/i2c/sony,imx274.yaml b/Documentation/devicetree/bindings/media/i2c/sony,imx274.yaml
index a66acb20d59b..4271fc3cc623 100644
--- a/Documentation/devicetree/bindings/media/i2c/sony,imx274.yaml
+++ b/Documentation/devicetree/bindings/media/i2c/sony,imx274.yaml
@@ -41,8 +41,7 @@ properties:
description: Sensor digital IO 1.2 V supply.
port:
- type: object
- description: Output video port. See ../video-interfaces.txt.
+ $ref: /schemas/graph.yaml#/properties/port
required:
- compatible
diff --git a/Documentation/devicetree/bindings/media/i2c/sony,imx334.yaml b/Documentation/devicetree/bindings/media/i2c/sony,imx334.yaml
new file mode 100644
index 000000000000..27cc5b7ff613
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/i2c/sony,imx334.yaml
@@ -0,0 +1,90 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright (C) 2021 Intel Corporation
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/media/i2c/sony,imx334.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Sony IMX334 Sensor
+
+maintainers:
+ - Paul J. Murphy <paul.j.murphy@intel.com>
+ - Daniele Alessandrelli <daniele.alessandrelli@intel.com>
+
+description:
+ IMX334 sensor is a Sony CMOS active pixel digital image sensor with an active
+ array size of 3864H x 2202V. It is programmable through I2C interface. The
+ I2C client address is fixed to 0x1a as per sensor data sheet. Image data is
+ sent through MIPI CSI-2.
+
+properties:
+ compatible:
+ const: sony,imx334
+ reg:
+ description: I2C address
+ maxItems: 1
+
+ assigned-clocks: true
+ assigned-clock-parents: true
+ assigned-clock-rates: true
+
+ clocks:
+ description: Clock frequency from 6 to 27 MHz, 37.125MHz, 74.25MHz
+ maxItems: 1
+
+ reset-gpios:
+ description: Reference to the GPIO connected to the XCLR pin, if any.
+
+ port:
+ additionalProperties: false
+ $ref: /schemas/graph.yaml#/properties/port
+
+ properties:
+ endpoint:
+ $ref: /schemas/media/video-interfaces.yaml#
+ unevaluatedProperties: false
+
+ properties:
+ data-lanes: true
+ link-frequencies: true
+
+ required:
+ - data-lanes
+ - link-frequencies
+
+ required:
+ - endpoint
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - port
+
+additionalProperties: false
+
+examples:
+ - |
+ i2c0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ camera@1a {
+ compatible = "sony,imx334";
+ reg = <0x1a>;
+ clocks = <&imx334_clk>;
+
+ assigned-clocks = <&imx334_clk>;
+ assigned-clock-parents = <&imx334_clk_parent>;
+ assigned-clock-rates = <24000000>;
+
+ port {
+ imx334: endpoint {
+ remote-endpoint = <&cam>;
+ data-lanes = <1 2 3 4>;
+ link-frequencies = /bits/ 64 <891000000>;
+ };
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/media/marvell,mmp2-ccic.yaml b/Documentation/devicetree/bindings/media/marvell,mmp2-ccic.yaml
index 49bff738aca5..c14c7d827b00 100644
--- a/Documentation/devicetree/bindings/media/marvell,mmp2-ccic.yaml
+++ b/Documentation/devicetree/bindings/media/marvell,mmp2-ccic.yaml
@@ -23,30 +23,24 @@ properties:
interrupts:
maxItems: 1
+ power-domains:
+ maxItems: 1
+
port:
- type: object
+ $ref: /schemas/graph.yaml#/$defs/port-base
additionalProperties: false
properties:
endpoint:
- type: object
- additionalProperties: false
+ $ref: video-interfaces.yaml#
+ unevaluatedProperties: false
- # Properties described in
- # Documentation/devicetree/bindings/media/video-interfaces.txt
properties:
- remote-endpoint: true
hsync-active: true
vsync-active: true
pclk-sample: true
bus-type: true
- required:
- - remote-endpoint
-
- required:
- - endpoint
-
clocks:
minItems: 1
maxItems: 3
@@ -75,6 +69,7 @@ additionalProperties: false
examples:
- |
#include <dt-bindings/clock/marvell,mmp2.h>
+ #include <dt-bindings/power/marvell,mmp2.h>
camera@d420a000 {
compatible = "marvell,mmp2-ccic";
@@ -84,6 +79,7 @@ examples:
clock-names = "axi";
#clock-cells = <0>;
clock-output-names = "mclk";
+ power-domains = <&soc_clocks MMP3_POWER_DOMAIN_CAMERA>;
port {
camera0_0: endpoint {
diff --git a/Documentation/devicetree/bindings/media/mediatek-jpeg-decoder.txt b/Documentation/devicetree/bindings/media/mediatek-jpeg-decoder.txt
index 044b11913c49..cf60c5acc0e4 100644
--- a/Documentation/devicetree/bindings/media/mediatek-jpeg-decoder.txt
+++ b/Documentation/devicetree/bindings/media/mediatek-jpeg-decoder.txt
@@ -16,7 +16,7 @@ Required properties:
- power-domains: a phandle to the power domain, see
Documentation/devicetree/bindings/power/power_domain.txt for details.
- mediatek,larb: must contain the local arbiters in the current Socs, see
- Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.txt
+ Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.yaml
for details.
- iommus: should point to the respective IOMMU block with master port as
argument, see Documentation/devicetree/bindings/iommu/mediatek,iommu.txt
diff --git a/Documentation/devicetree/bindings/media/mediatek-jpeg-encoder.txt b/Documentation/devicetree/bindings/media/mediatek-jpeg-encoder.txt
index 736be7cad385..acfb50375b8a 100644
--- a/Documentation/devicetree/bindings/media/mediatek-jpeg-encoder.txt
+++ b/Documentation/devicetree/bindings/media/mediatek-jpeg-encoder.txt
@@ -14,7 +14,7 @@ Required properties:
- power-domains: a phandle to the power domain, see
Documentation/devicetree/bindings/power/power_domain.txt for details.
- mediatek,larb: must contain the local arbiters in the current SoCs, see
- Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.txt
+ Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.yaml
for details.
- iommus: should point to the respective IOMMU block with master port as
argument, see Documentation/devicetree/bindings/iommu/mediatek,iommu.txt
diff --git a/Documentation/devicetree/bindings/media/mediatek-mdp.txt b/Documentation/devicetree/bindings/media/mediatek-mdp.txt
index 0d03e3ae2be2..f4798d04e925 100644
--- a/Documentation/devicetree/bindings/media/mediatek-mdp.txt
+++ b/Documentation/devicetree/bindings/media/mediatek-mdp.txt
@@ -28,7 +28,7 @@ Required properties (DMA function blocks, child node):
argument, see Documentation/devicetree/bindings/iommu/mediatek,iommu.txt
for details.
- mediatek,larb: must contain the local arbiters in the current Socs, see
- Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.txt
+ Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.yaml
for details.
Example:
diff --git a/Documentation/devicetree/bindings/media/nxp,imx7-csi.yaml b/Documentation/devicetree/bindings/media/nxp,imx7-csi.yaml
index 4e81a47e60ac..d91575b8ebb9 100644
--- a/Documentation/devicetree/bindings/media/nxp,imx7-csi.yaml
+++ b/Documentation/devicetree/bindings/media/nxp,imx7-csi.yaml
@@ -33,10 +33,7 @@ properties:
- const: mclk
port:
- type: object
- description:
- A node containing input port nodes with endpoint definitions as documented
- in Documentation/devicetree/bindings/media/video-interfaces.txt
+ $ref: /schemas/graph.yaml#/properties/port
required:
- compatible
diff --git a/Documentation/devicetree/bindings/media/nxp,imx7-mipi-csi2.yaml b/Documentation/devicetree/bindings/media/nxp,imx7-mipi-csi2.yaml
index 0668332959e7..be47a7b62ca9 100644
--- a/Documentation/devicetree/bindings/media/nxp,imx7-mipi-csi2.yaml
+++ b/Documentation/devicetree/bindings/media/nxp,imx7-mipi-csi2.yaml
@@ -58,35 +58,22 @@ properties:
Differential receiver (HS-RX) settle time
ports:
- type: object
- description:
- A node containing input and output port nodes with endpoint definitions
- as documented in
- Documentation/devicetree/bindings/media/video-interfaces.txt
+ $ref: /schemas/graph.yaml#/properties/ports
properties:
- '#address-cells':
- const: 1
-
- '#size-cells':
- const: 0
-
port@0:
- type: object
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
description:
Input port node, single endpoint describing the CSI-2 transmitter.
properties:
- reg:
- const: 0
-
endpoint:
- type: object
+ $ref: video-interfaces.yaml#
+ unevaluatedProperties: false
properties:
data-lanes:
- $ref: /schemas/types.yaml#/definitions/uint32-array
- description: See ../video-interfaces.txt
oneOf:
- items:
- const: 1
@@ -94,18 +81,11 @@ properties:
- const: 1
- const: 2
- remote-endpoint: true
-
required:
- data-lanes
- - remote-endpoint
-
- additionalProperties: false
-
- additionalProperties: false
port@1:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description:
Output port node
diff --git a/Documentation/devicetree/bindings/media/renesas,ceu.yaml b/Documentation/devicetree/bindings/media/renesas,ceu.yaml
index c7e1e4fe67e6..50e0740af15a 100644
--- a/Documentation/devicetree/bindings/media/renesas,ceu.yaml
+++ b/Documentation/devicetree/bindings/media/renesas,ceu.yaml
@@ -34,18 +34,15 @@ properties:
maxItems: 1
port:
- type: object
- additionalProperties: false
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
properties:
endpoint:
- type: object
- additionalProperties: false
+ $ref: video-interfaces.yaml#
+ unevaluatedProperties: false
- # Properties described in
- # Documentation/devicetree/bindings/media/video-interfaces.txt
properties:
- remote-endpoint: true
hsync-active: true
vsync-active: true
field-even-active: false
@@ -53,12 +50,6 @@ properties:
enum: [8, 16]
default: 8
- required:
- - remote-endpoint
-
- required:
- - endpoint
-
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/media/renesas,csi2.yaml b/Documentation/devicetree/bindings/media/renesas,csi2.yaml
index 533c2f181db7..20396f1be999 100644
--- a/Documentation/devicetree/bindings/media/renesas,csi2.yaml
+++ b/Documentation/devicetree/bindings/media/renesas,csi2.yaml
@@ -46,24 +46,19 @@ properties:
maxItems: 1
ports:
- type: object
- description:
- A node containing input and output port nodes with endpoint definitions
- as documented in
- Documentation/devicetree/bindings/media/video-interfaces.txt
+ $ref: /schemas/graph.yaml#/properties/ports
properties:
port@0:
- type: object
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
description:
Input port node, single endpoint describing the CSI-2 transmitter.
properties:
- reg:
- const: 0
-
endpoint:
- type: object
+ $ref: video-interfaces.yaml#
+ unevaluatedProperties: false
properties:
clock-lanes:
@@ -72,50 +67,19 @@ properties:
data-lanes:
maxItems: 1
- remote-endpoint: true
-
required:
- clock-lanes
- data-lanes
- - remote-endpoint
-
- additionalProperties: false
-
- additionalProperties: false
port@1:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description:
Output port node, multiple endpoints describing all the R-Car VIN
modules connected the CSI-2 receiver.
- properties:
- '#address-cells':
- const: 1
-
- '#size-cells':
- const: 0
-
- reg:
- const: 1
-
- patternProperties:
- "^endpoint@[0-9a-f]$":
- type: object
-
- properties:
- reg:
- maxItems: 1
-
- remote-endpoint: true
-
- required:
- - reg
- - remote-endpoint
-
- additionalProperties: false
-
- additionalProperties: false
+ required:
+ - port@0
+ - port@1
required:
- compatible
diff --git a/Documentation/devicetree/bindings/media/renesas,vin.yaml b/Documentation/devicetree/bindings/media/renesas,vin.yaml
index ad2fe660364b..fe7c4cbfe4ba 100644
--- a/Documentation/devicetree/bindings/media/renesas,vin.yaml
+++ b/Documentation/devicetree/bindings/media/renesas,vin.yaml
@@ -69,15 +69,15 @@ properties:
#The per-board settings for Gen2 and RZ/G1 platforms:
port:
- type: object
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
description:
- A node containing a parallel input with a single endpoint definitions as
- documented in
- Documentation/devicetree/bindings/media/video-interfaces.txt
+ A node containing a parallel input
properties:
endpoint:
- type: object
+ $ref: video-interfaces.yaml#
+ unevaluatedProperties: false
properties:
hsync-active:
@@ -106,15 +106,6 @@ properties:
data-active: true
- remote-endpoint: true
-
- required:
- - remote-endpoint
-
- additionalProperties: false
-
- additionalProperties: false
-
#The per-board settings for Gen3 and RZ/G2 platforms:
renesas,id:
description: VIN channel number
@@ -123,23 +114,18 @@ properties:
maximum: 15
ports:
- type: object
- description:
- A node containing input nodes with endpoint definitions as documented in
- Documentation/devicetree/bindings/media/video-interfaces.txt
+ $ref: /schemas/graph.yaml#/properties/ports
properties:
port@0:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description:
Input port node, single endpoint describing a parallel input source.
properties:
- reg:
- const: 0
-
endpoint:
- type: object
+ $ref: video-interfaces.yaml#
+ unevaluatedProperties: false
properties:
hsync-active:
@@ -168,98 +154,29 @@ properties:
data-active: true
- remote-endpoint: true
-
- required:
- - remote-endpoint
-
- additionalProperties: false
-
- required:
- - endpoint
-
- additionalProperties: false
-
port@1:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description:
Input port node, multiple endpoints describing all the R-Car CSI-2
modules connected the VIN.
properties:
- '#address-cells':
- const: 1
-
- '#size-cells':
- const: 0
-
- reg:
- const: 1
-
endpoint@0:
- type: object
+ $ref: /schemas/graph.yaml#/properties/endpoint
description: Endpoint connected to CSI20.
- properties:
- reg:
- const: 0
-
- remote-endpoint: true
-
- required:
- - reg
- - remote-endpoint
-
- additionalProperties: false
-
endpoint@1:
- type: object
+ $ref: /schemas/graph.yaml#/properties/endpoint
description: Endpoint connected to CSI21.
- properties:
- reg:
- const: 1
-
- remote-endpoint: true
-
- required:
- - reg
- - remote-endpoint
-
- additionalProperties: false
-
endpoint@2:
- type: object
+ $ref: /schemas/graph.yaml#/properties/endpoint
description: Endpoint connected to CSI40.
- properties:
- reg:
- const: 2
-
- remote-endpoint: true
-
- required:
- - reg
- - remote-endpoint
-
- additionalProperties: false
-
endpoint@3:
- type: object
+ $ref: /schemas/graph.yaml#/properties/endpoint
description: Endpoint connected to CSI41.
- properties:
- reg:
- const: 3
-
- remote-endpoint: true
-
- required:
- - reg
- - remote-endpoint
-
- additionalProperties: false
-
anyOf:
- required:
- endpoint@0
@@ -270,8 +187,6 @@ properties:
- required:
- endpoint@3
- additionalProperties: false
-
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/media/rockchip-isp1.yaml b/Documentation/devicetree/bindings/media/rockchip-isp1.yaml
index 2004c054ed1a..a6b1eff879ed 100644
--- a/Documentation/devicetree/bindings/media/rockchip-isp1.yaml
+++ b/Documentation/devicetree/bindings/media/rockchip-isp1.yaml
@@ -56,56 +56,26 @@ properties:
power-domains:
maxItems: 1
- # See ./video-interfaces.txt for details
ports:
- type: object
- additionalProperties: false
+ $ref: /schemas/graph.yaml#/properties/ports
properties:
- "#address-cells":
- const: 1
-
- "#size-cells":
- const: 0
-
port@0:
- type: object
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
description: connection point for sensors at MIPI-DPHY RX0
- additionalProperties: false
properties:
- "#address-cells":
- const: 1
-
- "#size-cells":
- const: 0
-
- reg:
- const: 0
-
- patternProperties:
endpoint:
- type: object
- additionalProperties: false
+ $ref: video-interfaces.yaml#
+ unevaluatedProperties: false
properties:
- reg:
- maxItems: 1
-
data-lanes:
minItems: 1
maxItems: 4
- remote-endpoint: true
-
- required:
- - reg
- - "#address-cells"
- - "#size-cells"
-
required:
- - "#address-cells"
- - "#size-cells"
- port@0
required:
diff --git a/Documentation/devicetree/bindings/media/st,stm32-dcmi.yaml b/Documentation/devicetree/bindings/media/st,stm32-dcmi.yaml
index c18574bb3e81..41e1d0cd80e5 100644
--- a/Documentation/devicetree/bindings/media/st,stm32-dcmi.yaml
+++ b/Documentation/devicetree/bindings/media/st,stm32-dcmi.yaml
@@ -37,16 +37,15 @@ properties:
maxItems: 1
port:
- type: object
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
description:
- DCMI supports a single port node with parallel bus. It should contain
- one 'port' child node with child 'endpoint' node. Please refer to the
- bindings defined in
- Documentation/devicetree/bindings/media/video-interfaces.txt.
+ DCMI supports a single port node with parallel bus.
properties:
endpoint:
- type: object
+ $ref: video-interfaces.yaml#
+ unevaluatedProperties: false
properties:
bus-type:
@@ -57,8 +56,6 @@ properties:
enum: [8, 10, 12, 14]
default: 8
- remote-endpoint: true
-
allOf:
- if:
properties:
@@ -73,14 +70,9 @@ properties:
enum: [8]
required:
- - remote-endpoint
- bus-type
- pclk-sample
- unevaluatedProperties: false
-
- additionalProperties: false
-
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/media/ti,cal.yaml b/Documentation/devicetree/bindings/media/ti,cal.yaml
index 5e066629287d..65177cd69514 100644
--- a/Documentation/devicetree/bindings/media/ti,cal.yaml
+++ b/Documentation/devicetree/bindings/media/ti,cal.yaml
@@ -15,10 +15,7 @@ description: |-
processing capability to connect CSI2 image-sensor modules to the
DRA72x device.
- CAL supports 2 camera port nodes on MIPI bus. Each CSI2 camera port nodes
- should contain a 'port' child node with child 'endpoint' node. Please
- refer to the bindings defined in
- Documentation/devicetree/bindings/media/video-interfaces.txt.
+ CAL supports 2 camera port nodes on MIPI bus.
properties:
compatible:
@@ -67,31 +64,19 @@ properties:
Documentation/devicetree/bindings/power/power_domain.txt
maxItems: 1
- # See ./video-interfaces.txt for details
ports:
- type: object
- additionalProperties: false
+ $ref: /schemas/graph.yaml#/properties/ports
properties:
- "#address-cells":
- const: 1
-
- "#size-cells":
- const: 0
-
port@0:
- type: object
- additionalProperties: false
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
+ description: CSI2 Port #0
properties:
- reg:
- const: 0
- description: CSI2 Port #0
-
- patternProperties:
endpoint:
- type: object
- additionalProperties: false
+ $ref: video-interfaces.yaml#
+ unevaluatedProperties: false
properties:
clock-lanes:
@@ -101,24 +86,15 @@ properties:
minItems: 1
maxItems: 4
- remote-endpoint: true
-
- required:
- - reg
-
port@1:
- type: object
- additionalProperties: false
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
+ description: CSI2 Port #1
properties:
- reg:
- const: 1
- description: CSI2 Port #1
-
- patternProperties:
endpoint:
- type: object
- additionalProperties: false
+ $ref: video-interfaces.yaml#
+ unevaluatedProperties: false
properties:
clock-lanes:
@@ -128,14 +104,7 @@ properties:
minItems: 1
maxItems: 4
- remote-endpoint: true
-
- required:
- - reg
-
required:
- - "#address-cells"
- - "#size-cells"
- port@0
required:
diff --git a/Documentation/devicetree/bindings/media/video-interface-devices.yaml b/Documentation/devicetree/bindings/media/video-interface-devices.yaml
new file mode 100644
index 000000000000..4527f56a5a6e
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/video-interface-devices.yaml
@@ -0,0 +1,406 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/media/video-interface-devices.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Common bindings for video receiver and transmitter devices
+
+maintainers:
+ - Jacopo Mondi <jacopo@jmondi.org>
+ - Sakari Ailus <sakari.ailus@linux.intel.com>
+
+properties:
+ flash-leds:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ description:
+ An array of phandles, each referring to a flash LED, a sub-node of the LED
+ driver device node.
+
+ lens-focus:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ A phandle to the node of the focus lens controller.
+
+ rotation:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [ 0, 90, 180, 270 ]
+ description: |
+ The camera rotation is expressed as the angular difference in degrees
+ between two reference systems, one relative to the camera module, and one
+ defined on the external world scene to be captured when projected on the
+ image sensor pixel array.
+
+ A camera sensor has a 2-dimensional reference system 'Rc' defined by its
+ pixel array read-out order. The origin is set to the first pixel being
+ read out, the X-axis points along the column read-out direction towards
+ the last columns, and the Y-axis along the row read-out direction towards
+ the last row.
+
+ A typical example for a sensor with a 2592x1944 pixel array matrix
+ observed from the front is:
+
+ 2591 X-axis 0
+ <------------------------+ 0
+ .......... ... ..........!
+ .......... ... ..........! Y-axis
+ ... !
+ .......... ... ..........!
+ .......... ... ..........! 1943
+ V
+
+ The external world scene reference system 'Rs' is a 2-dimensional
+ reference system on the focal plane of the camera module. The origin is
+ placed on the top-left corner of the visible scene, the X-axis points
+ towards the right, and the Y-axis points towards the bottom of the scene.
+ The top, bottom, left and right directions are intentionally not defined
+ and depend on the environment in which the camera is used.
+
+ A typical example of a (very common) picture of a shark swimming from left
+ to right, as seen from the camera, is:
+
+ 0 X-axis
+ 0 +------------------------------------->
+ !
+ !
+ !
+ ! |\____)\___
+ ! ) _____ __`<
+ ! |/ )/
+ !
+ !
+ !
+ V
+ Y-axis
+
+ with the reference system 'Rs' placed on the camera focal plane:
+
+ ¸.·˙!
+ ¸.·˙ !
+ _ ¸.·˙ !
+ +-/ \-+¸.·˙ !
+ | (o) | ! Camera focal plane
+ +-----+˙·.¸ !
+ ˙·.¸ !
+ ˙·.¸ !
+ ˙·.¸!
+
+ When projected on the sensor's pixel array, the image and the associated
+ reference system 'Rs' are typically (but not always) inverted, due to the
+ camera module's lens optical inversion effect.
+
+ Assuming the above represented scene of the swimming shark, the lens
+ inversion projects the scene and its reference system onto the sensor
+ pixel array, seen from the front of the camera sensor, as follows:
+
+ Y-axis
+ ^
+ !
+ !
+ !
+ ! |\_____)\__
+ ! ) ____ ___.<
+ ! |/ )/
+ !
+ !
+ !
+ 0 +------------------------------------->
+ 0 X-axis
+
+ Note the shark being upside-down.
+
+ The resulting projected reference system is named 'Rp'.
+
+ The camera rotation property is then defined as the angular difference in
+ the counter-clockwise direction between the camera reference system 'Rc'
+ and the projected scene reference system 'Rp'. It is expressed in degrees
+ as a number in the range [0, 360[.
+
+ Examples
+
+ 0 degrees camera rotation:
+
+
+ Y-Rp
+ ^
+ Y-Rc !
+ ^ !
+ ! !
+ ! !
+ ! !
+ ! !
+ ! !
+ ! !
+ ! !
+ ! 0 +------------------------------------->
+ ! 0 X-Rp
+ 0 +------------------------------------->
+ 0 X-Rc
+
+
+ X-Rc 0
+ <------------------------------------+ 0
+ X-Rp 0 !
+ <------------------------------------+ 0 !
+ ! !
+ ! !
+ ! !
+ ! !
+ ! !
+ ! !
+ ! !
+ ! V
+ ! Y-Rc
+ V
+ Y-Rp
+
+ 90 degrees camera rotation:
+
+ 0 Y-Rc
+ 0 +-------------------->
+ ! Y-Rp
+ ! ^
+ ! !
+ ! !
+ ! !
+ ! !
+ ! !
+ ! !
+ ! !
+ ! !
+ ! !
+ ! 0 +------------------------------------->
+ ! 0 X-Rp
+ !
+ !
+ !
+ !
+ V
+ X-Rc
+
+ 180 degrees camera rotation:
+
+ 0
+ <------------------------------------+ 0
+ X-Rc !
+ Y-Rp !
+ ^ !
+ ! !
+ ! !
+ ! !
+ ! !
+ ! !
+ ! !
+ ! V
+ ! Y-Rc
+ 0 +------------------------------------->
+ 0 X-Rp
+
+ 270 degrees camera rotation:
+
+ 0 Y-Rc
+ 0 +-------------------->
+ ! 0
+ ! <-----------------------------------+ 0
+ ! X-Rp !
+ ! !
+ ! !
+ ! !
+ ! !
+ ! !
+ ! !
+ ! !
+ ! !
+ ! V
+ ! Y-Rp
+ !
+ !
+ !
+ !
+ V
+ X-Rc
+
+
+ Example one - Webcam
+
+ A camera module installed on the user facing part of a laptop screen
+ casing used for video calls. The captured images are meant to be displayed
+ in landscape mode (width > height) on the laptop screen.
+
+ The camera is typically mounted upside-down to compensate the lens optical
+ inversion effect:
+
+ Y-Rp
+ Y-Rc ^
+ ^ !
+ ! !
+ ! ! |\_____)\__
+ ! ! ) ____ ___.<
+ ! ! |/ )/
+ ! !
+ ! !
+ ! !
+ ! 0 +------------------------------------->
+ ! 0 X-Rp
+ 0 +------------------------------------->
+ 0 X-Rc
+
+ The two reference systems are aligned, the resulting camera rotation is
+ 0 degrees, no rotation correction needs to be applied to the resulting
+ image once captured to memory buffers to correctly display it to users:
+
+ +--------------------------------------+
+ ! !
+ ! !
+ ! !
+ ! |\____)\___ !
+ ! ) _____ __`< !
+ ! |/ )/ !
+ ! !
+ ! !
+ ! !
+ +--------------------------------------+
+
+ If the camera sensor is not mounted upside-down to compensate for the lens
+ optical inversion, the two reference systems will not be aligned, with
+ 'Rp' being rotated 180 degrees relatively to 'Rc':
+
+
+ X-Rc 0
+ <------------------------------------+ 0
+ !
+ Y-Rp !
+ ^ !
+ ! !
+ ! |\_____)\__ !
+ ! ) ____ ___.< !
+ ! |/ )/ !
+ ! !
+ ! !
+ ! V
+ ! Y-Rc
+ 0 +------------------------------------->
+ 0 X-Rp
+
+ The image once captured to memory will then be rotated by 180 degrees:
+
+ +--------------------------------------+
+ ! !
+ ! !
+ ! !
+ ! __/(_____/| !
+ ! >.___ ____ ( !
+ ! \( \| !
+ ! !
+ ! !
+ ! !
+ +--------------------------------------+
+
+ A software rotation correction of 180 degrees should be applied to
+ correctly display the image:
+
+ +--------------------------------------+
+ ! !
+ ! !
+ ! !
+ ! |\____)\___ !
+ ! ) _____ __`< !
+ ! |/ )/ !
+ ! !
+ ! !
+ ! !
+ +--------------------------------------+
+
+ Example two - Phone camera
+
+ A camera installed on the back side of a mobile device facing away from
+ the user. The captured images are meant to be displayed in portrait mode
+ (height > width) to match the device screen orientation and the device
+ usage orientation used when taking the picture.
+
+ The camera sensor is typically mounted with its pixel array longer side
+ aligned to the device longer side, upside-down mounted to compensate for
+ the lens optical inversion effect:
+
+ 0 Y-Rc
+ 0 +-------------------->
+ ! Y-Rp
+ ! ^
+ ! !
+ ! !
+ ! !
+ ! ! |\_____)\__
+ ! ! ) ____ ___.<
+ ! ! |/ )/
+ ! !
+ ! !
+ ! !
+ ! 0 +------------------------------------->
+ ! 0 X-Rp
+ !
+ !
+ !
+ !
+ V
+ X-Rc
+
+ The two reference systems are not aligned and the 'Rp' reference system is
+ rotated by 90 degrees in the counter-clockwise direction relatively to the
+ 'Rc' reference system.
+
+ The image once captured to memory will be rotated:
+
+ +-------------------------------------+
+ | _ _ |
+ | \ / |
+ | | | |
+ | | | |
+ | | > |
+ | < | |
+ | | | |
+ | . |
+ | V |
+ +-------------------------------------+
+
+ A correction of 90 degrees in counter-clockwise direction has to be
+ applied to correctly display the image in portrait mode on the device
+ screen:
+
+ +--------------------+
+ | |
+ | |
+ | |
+ | |
+ | |
+ | |
+ | |\____)\___ |
+ | ) _____ __`< |
+ | |/ )/ |
+ | |
+ | |
+ | |
+ | |
+ | |
+ +--------------------+
+
+ orientation:
+ description:
+ The orientation of a device (typically an image sensor or a flash LED)
+ describing its mounting position relative to the usage orientation of the
+ system where the device is installed on.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum:
+ # Front. The device is mounted on the front facing side of the system. For
+ # mobile devices such as smartphones, tablets and laptops the front side
+ # is the user facing side.
+ - 0
+ # Back. The device is mounted on the back side of the system, which is
+ # defined as the opposite side of the front facing one.
+ - 1
+ # External. The device is not attached directly to the system but is
+ # attached in a way that allows it to move freely.
+ - 2
+
+additionalProperties: true
+
+...
diff --git a/Documentation/devicetree/bindings/media/video-interfaces.txt b/Documentation/devicetree/bindings/media/video-interfaces.txt
index 3920f25a9123..8fcf5f52bf5b 100644
--- a/Documentation/devicetree/bindings/media/video-interfaces.txt
+++ b/Documentation/devicetree/bindings/media/video-interfaces.txt
@@ -1,639 +1 @@
-Common bindings for video receiver and transmitter interfaces
-
-General concept
----------------
-
-Video data pipelines usually consist of external devices, e.g. camera sensors,
-controlled over an I2C, SPI or UART bus, and SoC internal IP blocks, including
-video DMA engines and video data processors.
-
-SoC internal blocks are described by DT nodes, placed similarly to other SoC
-blocks. External devices are represented as child nodes of their respective
-bus controller nodes, e.g. I2C.
-
-Data interfaces on all video devices are described by their child 'port' nodes.
-Configuration of a port depends on other devices participating in the data
-transfer and is described by 'endpoint' subnodes.
-
-device {
- ...
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- port@0 {
- ...
- endpoint@0 { ... };
- endpoint@1 { ... };
- };
- port@1 { ... };
- };
-};
-
-If a port can be configured to work with more than one remote device on the same
-bus, an 'endpoint' child node must be provided for each of them. If more than
-one port is present in a device node or there is more than one endpoint at a
-port, or port node needs to be associated with a selected hardware interface,
-a common scheme using '#address-cells', '#size-cells' and 'reg' properties is
-used.
-
-All 'port' nodes can be grouped under optional 'ports' node, which allows to
-specify #address-cells, #size-cells properties independently for the 'port'
-and 'endpoint' nodes and any child device nodes a device might have.
-
-Two 'endpoint' nodes are linked with each other through their 'remote-endpoint'
-phandles. An endpoint subnode of a device contains all properties needed for
-configuration of this device for data exchange with other device. In most
-cases properties at the peer 'endpoint' nodes will be identical, however they
-might need to be different when there is any signal modifications on the bus
-between two devices, e.g. there are logic signal inverters on the lines.
-
-It is allowed for multiple endpoints at a port to be active simultaneously,
-where supported by a device. For example, in case where a data interface of
-a device is partitioned into multiple data busses, e.g. 16-bit input port
-divided into two separate ITU-R BT.656 8-bit busses. In such case bus-width
-and data-shift properties can be used to assign physical data lines to each
-endpoint node (logical bus).
-
-Documenting bindings for devices
---------------------------------
-
-All required and optional bindings the device supports shall be explicitly
-documented in device DT binding documentation. This also includes port and
-endpoint nodes for the device, including unit-addresses and reg properties where
-relevant.
-
-Please also see Documentation/devicetree/bindings/graph.txt .
-
-Required properties
--------------------
-
-If there is more than one 'port' or more than one 'endpoint' node or 'reg'
-property is present in port and/or endpoint nodes the following properties
-are required in a relevant parent node:
-
- - #address-cells : number of cells required to define port/endpoint
- identifier, should be 1.
- - #size-cells : should be zero.
-
-
-Optional properties
--------------------
-
-- flash-leds: An array of phandles, each referring to a flash LED, a sub-node
- of the LED driver device node.
-
-- lens-focus: A phandle to the node of the focus lens controller.
-
-- rotation: The camera rotation is expressed as the angular difference in
- degrees between two reference systems, one relative to the camera module, and
- one defined on the external world scene to be captured when projected on the
- image sensor pixel array.
-
- A camera sensor has a 2-dimensional reference system 'Rc' defined by
- its pixel array read-out order. The origin is set to the first pixel
- being read out, the X-axis points along the column read-out direction
- towards the last columns, and the Y-axis along the row read-out
- direction towards the last row.
-
- A typical example for a sensor with a 2592x1944 pixel array matrix
- observed from the front is:
-
- 2591 X-axis 0
- <------------------------+ 0
- .......... ... ..........!
- .......... ... ..........! Y-axis
- ... !
- .......... ... ..........!
- .......... ... ..........! 1943
- V
-
- The external world scene reference system 'Rs' is a 2-dimensional
- reference system on the focal plane of the camera module. The origin is
- placed on the top-left corner of the visible scene, the X-axis points
- towards the right, and the Y-axis points towards the bottom of the
- scene. The top, bottom, left and right directions are intentionally not
- defined and depend on the environment in which the camera is used.
-
- A typical example of a (very common) picture of a shark swimming from
- left to right, as seen from the camera, is:
-
- 0 X-axis
- 0 +------------------------------------->
- !
- !
- !
- ! |\____)\___
- ! ) _____ __`<
- ! |/ )/
- !
- !
- !
- V
- Y-axis
-
- with the reference system 'Rs' placed on the camera focal plane:
-
- ¸.·˙!
- ¸.·˙ !
- _ ¸.·˙ !
- +-/ \-+¸.·˙ !
- | (o) | ! Camera focal plane
- +-----+˙·.¸ !
- ˙·.¸ !
- ˙·.¸ !
- ˙·.¸!
-
- When projected on the sensor's pixel array, the image and the associated
- reference system 'Rs' are typically (but not always) inverted, due to
- the camera module's lens optical inversion effect.
-
- Assuming the above represented scene of the swimming shark, the lens
- inversion projects the scene and its reference system onto the sensor
- pixel array, seen from the front of the camera sensor, as follows:
-
- Y-axis
- ^
- !
- !
- !
- ! |\_____)\__
- ! ) ____ ___.<
- ! |/ )/
- !
- !
- !
- 0 +------------------------------------->
- 0 X-axis
-
- Note the shark being upside-down.
-
- The resulting projected reference system is named 'Rp'.
-
- The camera rotation property is then defined as the angular difference
- in the counter-clockwise direction between the camera reference system
- 'Rc' and the projected scene reference system 'Rp'. It is expressed in
- degrees as a number in the range [0, 360[.
-
- Examples
-
- 0 degrees camera rotation:
-
-
- Y-Rp
- ^
- Y-Rc !
- ^ !
- ! !
- ! !
- ! !
- ! !
- ! !
- ! !
- ! !
- ! 0 +------------------------------------->
- ! 0 X-Rp
- 0 +------------------------------------->
- 0 X-Rc
-
-
- X-Rc 0
- <------------------------------------+ 0
- X-Rp 0 !
- <------------------------------------+ 0 !
- ! !
- ! !
- ! !
- ! !
- ! !
- ! !
- ! !
- ! V
- ! Y-Rc
- V
- Y-Rp
-
- 90 degrees camera rotation:
-
- 0 Y-Rc
- 0 +-------------------->
- ! Y-Rp
- ! ^
- ! !
- ! !
- ! !
- ! !
- ! !
- ! !
- ! !
- ! !
- ! !
- ! 0 +------------------------------------->
- ! 0 X-Rp
- !
- !
- !
- !
- V
- X-Rc
-
- 180 degrees camera rotation:
-
- 0
- <------------------------------------+ 0
- X-Rc !
- Y-Rp !
- ^ !
- ! !
- ! !
- ! !
- ! !
- ! !
- ! !
- ! V
- ! Y-Rc
- 0 +------------------------------------->
- 0 X-Rp
-
- 270 degrees camera rotation:
-
- 0 Y-Rc
- 0 +-------------------->
- ! 0
- ! <-----------------------------------+ 0
- ! X-Rp !
- ! !
- ! !
- ! !
- ! !
- ! !
- ! !
- ! !
- ! !
- ! V
- ! Y-Rp
- !
- !
- !
- !
- V
- X-Rc
-
-
- Example one - Webcam
-
- A camera module installed on the user facing part of a laptop screen
- casing used for video calls. The captured images are meant to be
- displayed in landscape mode (width > height) on the laptop screen.
-
- The camera is typically mounted upside-down to compensate the lens
- optical inversion effect:
-
- Y-Rp
- Y-Rc ^
- ^ !
- ! !
- ! ! |\_____)\__
- ! ! ) ____ ___.<
- ! ! |/ )/
- ! !
- ! !
- ! !
- ! 0 +------------------------------------->
- ! 0 X-Rp
- 0 +------------------------------------->
- 0 X-Rc
-
- The two reference systems are aligned, the resulting camera rotation is
- 0 degrees, no rotation correction needs to be applied to the resulting
- image once captured to memory buffers to correctly display it to users:
-
- +--------------------------------------+
- ! !
- ! !
- ! !
- ! |\____)\___ !
- ! ) _____ __`< !
- ! |/ )/ !
- ! !
- ! !
- ! !
- +--------------------------------------+
-
- If the camera sensor is not mounted upside-down to compensate for the
- lens optical inversion, the two reference systems will not be aligned,
- with 'Rp' being rotated 180 degrees relatively to 'Rc':
-
-
- X-Rc 0
- <------------------------------------+ 0
- !
- Y-Rp !
- ^ !
- ! !
- ! |\_____)\__ !
- ! ) ____ ___.< !
- ! |/ )/ !
- ! !
- ! !
- ! V
- ! Y-Rc
- 0 +------------------------------------->
- 0 X-Rp
-
- The image once captured to memory will then be rotated by 180 degrees:
-
- +--------------------------------------+
- ! !
- ! !
- ! !
- ! __/(_____/| !
- ! >.___ ____ ( !
- ! \( \| !
- ! !
- ! !
- ! !
- +--------------------------------------+
-
- A software rotation correction of 180 degrees should be applied to
- correctly display the image:
-
- +--------------------------------------+
- ! !
- ! !
- ! !
- ! |\____)\___ !
- ! ) _____ __`< !
- ! |/ )/ !
- ! !
- ! !
- ! !
- +--------------------------------------+
-
- Example two - Phone camera
-
- A camera installed on the back side of a mobile device facing away from
- the user. The captured images are meant to be displayed in portrait mode
- (height > width) to match the device screen orientation and the device
- usage orientation used when taking the picture.
-
- The camera sensor is typically mounted with its pixel array longer side
- aligned to the device longer side, upside-down mounted to compensate for
- the lens optical inversion effect:
-
- 0 Y-Rc
- 0 +-------------------->
- ! Y-Rp
- ! ^
- ! !
- ! !
- ! !
- ! ! |\_____)\__
- ! ! ) ____ ___.<
- ! ! |/ )/
- ! !
- ! !
- ! !
- ! 0 +------------------------------------->
- ! 0 X-Rp
- !
- !
- !
- !
- V
- X-Rc
-
- The two reference systems are not aligned and the 'Rp' reference
- system is rotated by 90 degrees in the counter-clockwise direction
- relatively to the 'Rc' reference system.
-
- The image once captured to memory will be rotated:
-
- +-------------------------------------+
- | _ _ |
- | \ / |
- | | | |
- | | | |
- | | > |
- | < | |
- | | | |
- | . |
- | V |
- +-------------------------------------+
-
- A correction of 90 degrees in counter-clockwise direction has to be
- applied to correctly display the image in portrait mode on the device
- screen:
-
- +--------------------+
- | |
- | |
- | |
- | |
- | |
- | |
- | |\____)\___ |
- | ) _____ __`< |
- | |/ )/ |
- | |
- | |
- | |
- | |
- | |
- +--------------------+
-
-- orientation: The orientation of a device (typically an image sensor or a flash
- LED) describing its mounting position relative to the usage orientation of the
- system where the device is installed on.
- Possible values are:
- 0 - Front. The device is mounted on the front facing side of the system.
- For mobile devices such as smartphones, tablets and laptops the front side is
- the user facing side.
- 1 - Back. The device is mounted on the back side of the system, which is
- defined as the opposite side of the front facing one.
- 2 - External. The device is not attached directly to the system but is
- attached in a way that allows it to move freely.
-
-Optional endpoint properties
-----------------------------
-
-- remote-endpoint: phandle to an 'endpoint' subnode of a remote device node.
-- slave-mode: a boolean property indicating that the link is run in slave mode.
- The default when this property is not specified is master mode. In the slave
- mode horizontal and vertical synchronization signals are provided to the
- slave device (data source) by the master device (data sink). In the master
- mode the data source device is also the source of the synchronization signals.
-- bus-type: data bus type. Possible values are:
- 1 - MIPI CSI-2 C-PHY
- 2 - MIPI CSI1
- 3 - CCP2
- 4 - MIPI CSI-2 D-PHY
- 5 - Parallel
- 6 - Bt.656
-- bus-width: number of data lines actively used, valid for the parallel busses.
-- data-shift: on the parallel data busses, if bus-width is used to specify the
- number of data lines, data-shift can be used to specify which data lines are
- used, e.g. "bus-width=<8>; data-shift=<2>;" means, that lines 9:2 are used.
-- hsync-active: active state of the HSYNC signal, 0/1 for LOW/HIGH respectively.
-- vsync-active: active state of the VSYNC signal, 0/1 for LOW/HIGH respectively.
- Note, that if HSYNC and VSYNC polarities are not specified, embedded
- synchronization may be required, where supported.
-- data-active: similar to HSYNC and VSYNC, specifies data line polarity.
-- data-enable-active: similar to HSYNC and VSYNC, specifies the data enable
- signal polarity.
-- field-even-active: field signal level during the even field data transmission.
-- pclk-sample: sample data on rising (1) or falling (0) edge of the pixel clock
- signal.
-- sync-on-green-active: active state of Sync-on-green (SoG) signal, 0/1 for
- LOW/HIGH respectively.
-- data-lanes: an array of physical data lane indexes. Position of an entry
- determines the logical lane number, while the value of an entry indicates
- physical lane, e.g. for 2-lane MIPI CSI-2 bus we could have
- "data-lanes = <1 2>;", assuming the clock lane is on hardware lane 0.
- If the hardware does not support lane reordering, monotonically
- incremented values shall be used from 0 or 1 onwards, depending on
- whether or not there is also a clock lane. This property is valid for
- serial busses only (e.g. MIPI CSI-2).
-- clock-lanes: an array of physical clock lane indexes. Position of an entry
- determines the logical lane number, while the value of an entry indicates
- physical lane, e.g. for a MIPI CSI-2 bus we could have "clock-lanes = <0>;",
- which places the clock lane on hardware lane 0. This property is valid for
- serial busses only (e.g. MIPI CSI-2). Note that for the MIPI CSI-2 bus this
- array contains only one entry.
-- clock-noncontinuous: a boolean property to allow MIPI CSI-2 non-continuous
- clock mode.
-- link-frequencies: Allowed data bus frequencies. For MIPI CSI-2, for
- instance, this is the actual frequency of the bus, not bits per clock per
- lane value. An array of 64-bit unsigned integers.
-- lane-polarities: an array of polarities of the lanes starting from the clock
- lane and followed by the data lanes in the same order as in data-lanes.
- Valid values are 0 (normal) and 1 (inverted). The length of the array
- should be the combined length of data-lanes and clock-lanes properties.
- If the lane-polarities property is omitted, the value must be interpreted
- as 0 (normal). This property is valid for serial busses only.
-- strobe: Whether the clock signal is used as clock (0) or strobe (1). Used
- with CCP2, for instance.
-
-Example
--------
-
-The example snippet below describes two data pipelines. ov772x and imx074 are
-camera sensors with a parallel and serial (MIPI CSI-2) video bus respectively.
-Both sensors are on the I2C control bus corresponding to the i2c0 controller
-node. ov772x sensor is linked directly to the ceu0 video host interface.
-imx074 is linked to ceu0 through the MIPI CSI-2 receiver (csi2). ceu0 has a
-(single) DMA engine writing captured data to memory. ceu0 node has a single
-'port' node which may indicate that at any time only one of the following data
-pipelines can be active: ov772x -> ceu0 or imx074 -> csi2 -> ceu0.
-
- ceu0: ceu@fe910000 {
- compatible = "renesas,sh-mobile-ceu";
- reg = <0xfe910000 0xa0>;
- interrupts = <0x880>;
-
- mclk: master_clock {
- compatible = "renesas,ceu-clock";
- #clock-cells = <1>;
- clock-frequency = <50000000>; /* Max clock frequency */
- clock-output-names = "mclk";
- };
-
- port {
- #address-cells = <1>;
- #size-cells = <0>;
-
- /* Parallel bus endpoint */
- ceu0_1: endpoint@1 {
- reg = <1>; /* Local endpoint # */
- remote = <&ov772x_1_1>; /* Remote phandle */
- bus-width = <8>; /* Used data lines */
- data-shift = <2>; /* Lines 9:2 are used */
-
- /* If hsync-active/vsync-active are missing,
- embedded BT.656 sync is used */
- hsync-active = <0>; /* Active low */
- vsync-active = <0>; /* Active low */
- data-active = <1>; /* Active high */
- pclk-sample = <1>; /* Rising */
- };
-
- /* MIPI CSI-2 bus endpoint */
- ceu0_0: endpoint@0 {
- reg = <0>;
- remote = <&csi2_2>;
- };
- };
- };
-
- i2c0: i2c@fff20000 {
- ...
- ov772x_1: camera@21 {
- compatible = "ovti,ov772x";
- reg = <0x21>;
- vddio-supply = <&regulator1>;
- vddcore-supply = <&regulator2>;
-
- clock-frequency = <20000000>;
- clocks = <&mclk 0>;
- clock-names = "xclk";
-
- port {
- /* With 1 endpoint per port no need for addresses. */
- ov772x_1_1: endpoint {
- bus-width = <8>;
- remote-endpoint = <&ceu0_1>;
- hsync-active = <1>;
- vsync-active = <0>; /* Who came up with an
- inverter here ?... */
- data-active = <1>;
- pclk-sample = <1>;
- };
- };
- };
-
- imx074: camera@1a {
- compatible = "sony,imx074";
- reg = <0x1a>;
- vddio-supply = <&regulator1>;
- vddcore-supply = <&regulator2>;
-
- clock-frequency = <30000000>; /* Shared clock with ov772x_1 */
- clocks = <&mclk 0>;
- clock-names = "sysclk"; /* Assuming this is the
- name in the datasheet */
- port {
- imx074_1: endpoint {
- clock-lanes = <0>;
- data-lanes = <1 2>;
- remote-endpoint = <&csi2_1>;
- };
- };
- };
- };
-
- csi2: csi2@ffc90000 {
- compatible = "renesas,sh-mobile-csi2";
- reg = <0xffc90000 0x1000>;
- interrupts = <0x17a0>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- port@1 {
- compatible = "renesas,csi2c"; /* One of CSI2I and CSI2C. */
- reg = <1>; /* CSI-2 PHY #1 of 2: PHY_S,
- PHY_M has port address 0,
- is unused. */
- csi2_1: endpoint {
- clock-lanes = <0>;
- data-lanes = <2 1>;
- remote-endpoint = <&imx074_1>;
- };
- };
- port@2 {
- reg = <2>; /* port 2: link to the CEU */
-
- csi2_2: endpoint {
- remote-endpoint = <&ceu0_0>;
- };
- };
- };
+This file has moved to video-interfaces.yaml and video-interface-devices.yaml.
diff --git a/Documentation/devicetree/bindings/media/video-interfaces.yaml b/Documentation/devicetree/bindings/media/video-interfaces.yaml
new file mode 100644
index 000000000000..0a7a73fd59f2
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/video-interfaces.yaml
@@ -0,0 +1,344 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/media/video-interfaces.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Common bindings for video receiver and transmitter interface endpoints
+
+maintainers:
+ - Sakari Ailus <sakari.ailus@linux.intel.com>
+ - Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+
+description: |
+ Video data pipelines usually consist of external devices, e.g. camera sensors,
+ controlled over an I2C, SPI or UART bus, and SoC internal IP blocks, including
+ video DMA engines and video data processors.
+
+ SoC internal blocks are described by DT nodes, placed similarly to other SoC
+ blocks. External devices are represented as child nodes of their respective
+ bus controller nodes, e.g. I2C.
+
+ Data interfaces on all video devices are described by their child 'port' nodes.
+ Configuration of a port depends on other devices participating in the data
+ transfer and is described by 'endpoint' subnodes.
+
+ device {
+ ...
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ ...
+ endpoint@0 { ... };
+ endpoint@1 { ... };
+ };
+ port@1 { ... };
+ };
+ };
+
+ If a port can be configured to work with more than one remote device on the same
+ bus, an 'endpoint' child node must be provided for each of them. If more than
+ one port is present in a device node or there is more than one endpoint at a
+ port, or port node needs to be associated with a selected hardware interface,
+ a common scheme using '#address-cells', '#size-cells' and 'reg' properties is
+ used.
+
+ All 'port' nodes can be grouped under optional 'ports' node, which allows to
+ specify #address-cells, #size-cells properties independently for the 'port'
+ and 'endpoint' nodes and any child device nodes a device might have.
+
+ Two 'endpoint' nodes are linked with each other through their 'remote-endpoint'
+ phandles. An endpoint subnode of a device contains all properties needed for
+ configuration of this device for data exchange with other device. In most
+ cases properties at the peer 'endpoint' nodes will be identical, however they
+ might need to be different when there is any signal modifications on the bus
+ between two devices, e.g. there are logic signal inverters on the lines.
+
+ It is allowed for multiple endpoints at a port to be active simultaneously,
+ where supported by a device. For example, in case where a data interface of
+ a device is partitioned into multiple data busses, e.g. 16-bit input port
+ divided into two separate ITU-R BT.656 8-bit busses. In such case bus-width
+ and data-shift properties can be used to assign physical data lines to each
+ endpoint node (logical bus).
+
+ Documenting bindings for devices
+ --------------------------------
+
+ All required and optional bindings the device supports shall be explicitly
+ documented in device DT binding documentation. This also includes port and
+ endpoint nodes for the device, including unit-addresses and reg properties
+ where relevant.
+
+allOf:
+ - $ref: /schemas/graph.yaml#/$defs/endpoint-base
+
+properties:
+ slave-mode:
+ type: boolean
+ description:
+ Indicates that the link is run in slave mode. The default when this
+ property is not specified is master mode. In the slave mode horizontal and
+ vertical synchronization signals are provided to the slave device (data
+ source) by the master device (data sink). In the master mode the data
+ source device is also the source of the synchronization signals.
+
+ bus-type:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum:
+ - 1 # MIPI CSI-2 C-PHY
+ - 2 # MIPI CSI1
+ - 3 # CCP2
+ - 4 # MIPI CSI-2 D-PHY
+ - 5 # Parallel
+ - 6 # BT.656
+ description:
+ Data bus type.
+
+ bus-width:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ maximum: 64
+ description:
+ Number of data lines actively used, valid for the parallel busses.
+
+ data-shift:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ maximum: 64
+ description:
+ On the parallel data busses, if bus-width is used to specify the number of
+ data lines, data-shift can be used to specify which data lines are used,
+ e.g. "bus-width=<8>; data-shift=<2>;" means, that lines 9:2 are used.
+
+ hsync-active:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [ 0, 1 ]
+ description:
+ Active state of the HSYNC signal, 0/1 for LOW/HIGH respectively.
+
+ vsync-active:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [ 0, 1 ]
+ description:
+ Active state of the VSYNC signal, 0/1 for LOW/HIGH respectively. Note,
+ that if HSYNC and VSYNC polarities are not specified, embedded
+ synchronization may be required, where supported.
+
+ data-active:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [ 0, 1 ]
+ description:
+ Similar to HSYNC and VSYNC, specifies data line polarity.
+
+ data-enable-active:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [ 0, 1 ]
+ description:
+ Similar to HSYNC and VSYNC, specifies the data enable signal polarity.
+
+ field-even-active:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [ 0, 1 ]
+ description:
+ Field signal level during the even field data transmission.
+
+ pclk-sample:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [ 0, 1 ]
+ description:
+ Sample data on rising (1) or falling (0) edge of the pixel clock signal.
+
+ sync-on-green-active:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [ 0, 1 ]
+ description:
+ Active state of Sync-on-green (SoG) signal, 0/1 for LOW/HIGH respectively.
+
+ data-lanes:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 1
+ maxItems: 8
+ items:
+ # Assume up to 9 physical lane indices
+ maximum: 8
+ description:
+ An array of physical data lane indexes. Position of an entry determines
+ the logical lane number, while the value of an entry indicates physical
+ lane, e.g. for 2-lane MIPI CSI-2 bus we could have "data-lanes = <1 2>;",
+ assuming the clock lane is on hardware lane 0. If the hardware does not
+ support lane reordering, monotonically incremented values shall be used
+ from 0 or 1 onwards, depending on whether or not there is also a clock
+ lane. This property is valid for serial busses only (e.g. MIPI CSI-2).
+
+ clock-lanes:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ # Assume up to 9 physical lane indices
+ maximum: 8
+ description:
+ Physical clock lane index. Position of an entry determines the logical
+ lane number, while the value of an entry indicates physical lane, e.g. for
+ a MIPI CSI-2 bus we could have "clock-lanes = <0>;", which places the
+ clock lane on hardware lane 0. This property is valid for serial busses
+ only (e.g. MIPI CSI-2).
+
+ clock-noncontinuous:
+ type: boolean
+ description:
+ Allow MIPI CSI-2 non-continuous clock mode.
+
+ link-frequencies:
+ $ref: /schemas/types.yaml#/definitions/uint64-array
+ description:
+ Allowed data bus frequencies. For MIPI CSI-2, for instance, this is the
+ actual frequency of the bus, not bits per clock per lane value. An array
+ of 64-bit unsigned integers.
+
+ lane-polarities:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 1
+ maxItems: 9
+ items:
+ enum: [ 0, 1 ]
+ description:
+ An array of polarities of the lanes starting from the clock lane and
+ followed by the data lanes in the same order as in data-lanes. Valid
+ values are 0 (normal) and 1 (inverted). The length of the array should be
+ the combined length of data-lanes and clock-lanes properties. If the
+ lane-polarities property is omitted, the value must be interpreted as 0
+ (normal). This property is valid for serial busses only.
+
+ strobe:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [ 0, 1 ]
+ description:
+ Whether the clock signal is used as clock (0) or strobe (1). Used with
+ CCP2, for instance.
+
+additionalProperties: true
+
+examples:
+ # The example snippet below describes two data pipelines. ov772x and imx074
+ # are camera sensors with a parallel and serial (MIPI CSI-2) video bus
+ # respectively. Both sensors are on the I2C control bus corresponding to the
+ # i2c0 controller node. ov772x sensor is linked directly to the ceu0 video
+ # host interface. imx074 is linked to ceu0 through the MIPI CSI-2 receiver
+ # (csi2). ceu0 has a (single) DMA engine writing captured data to memory.
+ # ceu0 node has a single 'port' node which may indicate that at any time
+ # only one of the following data pipelines can be active:
+ # ov772x -> ceu0 or imx074 -> csi2 -> ceu0.
+ - |
+ ceu@fe910000 {
+ compatible = "renesas,sh-mobile-ceu";
+ reg = <0xfe910000 0xa0>;
+ interrupts = <0x880>;
+
+ mclk: master_clock {
+ compatible = "renesas,ceu-clock";
+ #clock-cells = <1>;
+ clock-frequency = <50000000>; /* Max clock frequency */
+ clock-output-names = "mclk";
+ };
+
+ port {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* Parallel bus endpoint */
+ ceu0_1: endpoint@1 {
+ reg = <1>; /* Local endpoint # */
+ remote-endpoint = <&ov772x_1_1>; /* Remote phandle */
+ bus-width = <8>; /* Used data lines */
+ data-shift = <2>; /* Lines 9:2 are used */
+
+ /* If hsync-active/vsync-active are missing,
+ embedded BT.656 sync is used */
+ hsync-active = <0>; /* Active low */
+ vsync-active = <0>; /* Active low */
+ data-active = <1>; /* Active high */
+ pclk-sample = <1>; /* Rising */
+ };
+
+ /* MIPI CSI-2 bus endpoint */
+ ceu0_0: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&csi2_2>;
+ };
+ };
+ };
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ camera@21 {
+ compatible = "ovti,ov772x";
+ reg = <0x21>;
+ vddio-supply = <&regulator1>;
+ vddcore-supply = <&regulator2>;
+
+ clock-frequency = <20000000>;
+ clocks = <&mclk 0>;
+ clock-names = "xclk";
+
+ port {
+ /* With 1 endpoint per port no need for addresses. */
+ ov772x_1_1: endpoint {
+ bus-width = <8>;
+ remote-endpoint = <&ceu0_1>;
+ hsync-active = <1>;
+ vsync-active = <0>; /* Who came up with an
+ inverter here ?... */
+ data-active = <1>;
+ pclk-sample = <1>;
+ };
+ };
+ };
+
+ camera@1a {
+ compatible = "sony,imx074";
+ reg = <0x1a>;
+ vddio-supply = <&regulator1>;
+ vddcore-supply = <&regulator2>;
+
+ clock-frequency = <30000000>; /* Shared clock with ov772x_1 */
+ clocks = <&mclk 0>;
+ clock-names = "sysclk"; /* Assuming this is the
+ name in the datasheet */
+ port {
+ imx074_1: endpoint {
+ clock-lanes = <0>;
+ data-lanes = <1 2>;
+ remote-endpoint = <&csi2_1>;
+ };
+ };
+ };
+ };
+
+ csi2: csi2@ffc90000 {
+ compatible = "renesas,sh-mobile-csi2";
+ reg = <0xffc90000 0x1000>;
+ interrupts = <0x17a0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@1 {
+ compatible = "renesas,csi2c"; /* One of CSI2I and CSI2C. */
+ reg = <1>; /* CSI-2 PHY #1 of 2: PHY_S,
+ PHY_M has port address 0,
+ is unused. */
+ csi2_1: endpoint {
+ clock-lanes = <0>;
+ data-lanes = <2 1>;
+ remote-endpoint = <&imx074_1>;
+ };
+ };
+ port@2 {
+ reg = <2>; /* port 2: link to the CEU */
+
+ csi2_2: endpoint {
+ remote-endpoint = <&ceu0_0>;
+ };
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/media/xilinx/xlnx,csi2rxss.yaml b/Documentation/devicetree/bindings/media/xilinx/xlnx,csi2rxss.yaml
index 2961a5b6872f..7d77823dbb7a 100644
--- a/Documentation/devicetree/bindings/media/xilinx/xlnx,csi2rxss.yaml
+++ b/Documentation/devicetree/bindings/media/xilinx/xlnx,csi2rxss.yaml
@@ -97,24 +97,21 @@ properties:
maxItems: 1
ports:
- type: object
+ $ref: /schemas/graph.yaml#/properties/ports
properties:
port@0:
- type: object
+ $ref: /schemas/graph.yaml#/$defs/port-base
description: |
Input / sink port node, single endpoint describing the
CSI-2 transmitter.
properties:
- reg:
- const: 0
-
endpoint:
- type: object
+ $ref: /schemas/media/video-interfaces.yaml#
+ unevaluatedProperties: false
properties:
-
data-lanes:
description: |
This is required only in the sink port 0 endpoint which
@@ -130,41 +127,17 @@ properties:
- const: 3
- const: 4
- remote-endpoint: true
-
required:
- data-lanes
- - remote-endpoint
-
- additionalProperties: false
- additionalProperties: false
+ unevaluatedProperties: false
port@1:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: |
Output / source port node, endpoint describing modules
connected the CSI-2 receiver.
- properties:
-
- reg:
- const: 1
-
- endpoint:
- type: object
-
- properties:
-
- remote-endpoint: true
-
- required:
- - remote-endpoint
-
- additionalProperties: false
-
- additionalProperties: false
-
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/memory-controllers/exynos-srom.yaml b/Documentation/devicetree/bindings/memory-controllers/exynos-srom.yaml
index 637e24f0f73b..c6e44f47ce7c 100644
--- a/Documentation/devicetree/bindings/memory-controllers/exynos-srom.yaml
+++ b/Documentation/devicetree/bindings/memory-controllers/exynos-srom.yaml
@@ -28,6 +28,8 @@ properties:
const: 1
ranges:
+ minItems: 1
+ maxItems: 4
description: |
Reflects the memory layout with four integer values per bank. Format:
<bank-number> 0 <parent address of bank> <size>
diff --git a/Documentation/devicetree/bindings/memory-controllers/renesas,rpc-if.yaml b/Documentation/devicetree/bindings/memory-controllers/renesas,rpc-if.yaml
index 6d6ba608fd22..990489fdd2ac 100644
--- a/Documentation/devicetree/bindings/memory-controllers/renesas,rpc-if.yaml
+++ b/Documentation/devicetree/bindings/memory-controllers/renesas,rpc-if.yaml
@@ -26,10 +26,14 @@ properties:
compatible:
items:
- enum:
+ - renesas,r8a774a1-rpc-if # RZ/G2M
+ - renesas,r8a774b1-rpc-if # RZ/G2N
+ - renesas,r8a774c0-rpc-if # RZ/G2E
+ - renesas,r8a774e1-rpc-if # RZ/G2H
- renesas,r8a77970-rpc-if # R-Car V3M
- renesas,r8a77980-rpc-if # R-Car V3H
- renesas,r8a77995-rpc-if # R-Car D3
- - const: renesas,rcar-gen3-rpc-if # a generic R-Car gen3 device
+ - const: renesas,rcar-gen3-rpc-if # a generic R-Car gen3 or RZ/G2 device
reg:
items:
diff --git a/Documentation/devicetree/bindings/mfd/bd9571mwv.txt b/Documentation/devicetree/bindings/mfd/bd9571mwv.txt
index 8c4678650d1a..1d6413e96c37 100644
--- a/Documentation/devicetree/bindings/mfd/bd9571mwv.txt
+++ b/Documentation/devicetree/bindings/mfd/bd9571mwv.txt
@@ -1,7 +1,7 @@
-* ROHM BD9571MWV Power Management Integrated Circuit (PMIC) bindings
+* ROHM BD9571MWV/BD9574MWF Power Management Integrated Circuit (PMIC) bindings
Required properties:
- - compatible : Should be "rohm,bd9571mwv".
+ - compatible : Should be "rohm,bd9571mwv" or "rohm,bd9574mwf".
- reg : I2C slave address.
- interrupts : The interrupt line the device is connected to.
- interrupt-controller : Marks the device node as an interrupt controller.
diff --git a/Documentation/devicetree/bindings/mfd/canaan,k210-sysctl.yaml b/Documentation/devicetree/bindings/mfd/canaan,k210-sysctl.yaml
new file mode 100644
index 000000000000..c24ad45cabb5
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/canaan,k210-sysctl.yaml
@@ -0,0 +1,109 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mfd/canaan,k210-sysctl.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Canaan Kendryte K210 System Controller Device Tree Bindings
+
+maintainers:
+ - Damien Le Moal <damien.lemoal@wdc.com>
+
+description:
+ Canaan Inc. Kendryte K210 SoC system controller which provides a
+ register map for controlling the clocks, reset signals and pin power
+ domains of the SoC.
+
+properties:
+ compatible:
+ items:
+ - const: canaan,k210-sysctl
+ - const: syscon
+ - const: simple-mfd
+
+ clocks:
+ maxItems: 1
+ description:
+ System controller Advanced Power Bus (APB) interface clock source.
+
+ clock-names:
+ items:
+ - const: pclk
+
+ reg:
+ maxItems: 1
+
+ clock-controller:
+ # Child node
+ type: object
+ $ref: "../clock/canaan,k210-clk.yaml"
+ description:
+ Clock controller for the SoC clocks. This child node definition
+ should follow the bindings specified in
+ Documentation/devicetree/bindings/clock/canaan,k210-clk.yaml.
+
+ reset-controller:
+ # Child node
+ type: object
+ $ref: "../reset/canaan,k210-rst.yaml"
+ description:
+ Reset controller for the SoC. This child node definition
+ should follow the bindings specified in
+ Documentation/devicetree/bindings/reset/canaan,k210-rst.yaml.
+
+ syscon-reboot:
+ # Child node
+ type: object
+ $ref: "../power/reset/syscon-reboot.yaml"
+ description:
+ Reboot method for the SoC. This child node definition
+ should follow the bindings specified in
+ Documentation/devicetree/bindings/power/reset/syscon-reboot.yaml.
+
+required:
+ - compatible
+ - clocks
+ - reg
+ - clock-controller
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/k210-clk.h>
+ #include <dt-bindings/reset/k210-rst.h>
+
+ clocks {
+ in0: oscllator {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <26000000>;
+ };
+ };
+
+ sysctl: syscon@50440000 {
+ compatible = "canaan,k210-sysctl",
+ "syscon", "simple-mfd";
+ reg = <0x50440000 0x100>;
+ clocks = <&sysclk K210_CLK_APB1>;
+ clock-names = "pclk";
+
+ sysclk: clock-controller {
+ #clock-cells = <1>;
+ compatible = "canaan,k210-clk";
+ clocks = <&in0>;
+ };
+
+ sysrst: reset-controller {
+ compatible = "canaan,k210-rst";
+ #reset-cells = <1>;
+ };
+
+ reboot: syscon-reboot {
+ compatible = "syscon-reboot";
+ regmap = <&sysctl>;
+ offset = <48>;
+ mask = <1>;
+ value = <1>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mfd/ene-kb930.yaml b/Documentation/devicetree/bindings/mfd/ene-kb930.yaml
new file mode 100644
index 000000000000..06ed9ec8f4bb
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/ene-kb930.yaml
@@ -0,0 +1,65 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mfd/ene-kb930.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ENE KB930 Embedded Controller bindings
+
+description: |
+ This binding describes the ENE KB930 Embedded Controller attached to an
+ I2C bus.
+
+maintainers:
+ - Dmitry Osipenko <digetx@gmail.com>
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - acer,a500-iconia-ec # Acer A500 Iconia tablet device
+ - const: ene,kb930
+ reg:
+ maxItems: 1
+
+ monitored-battery: true
+ power-supplies: true
+ system-power-controller: true
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ battery: battery-cell {
+ compatible = "simple-battery";
+ charge-full-design-microamp-hours = <3260000>;
+ energy-full-design-microwatt-hours = <24000000>;
+ operating-range-celsius = <0 40>;
+ };
+
+ mains: ac-adapter {
+ compatible = "gpio-charger";
+ charger-type = "mains";
+ gpios = <&gpio 125 0>;
+ };
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ embedded-controller@58 {
+ compatible = "acer,a500-iconia-ec", "ene,kb930";
+ reg = <0x58>;
+
+ system-power-controller;
+
+ monitored-battery = <&battery>;
+ power-supplies = <&mains>;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/mfd/gateworks-gsc.yaml b/Documentation/devicetree/bindings/mfd/gateworks-gsc.yaml
index d08e8fe76446..5a1e8d21f7a0 100644
--- a/Documentation/devicetree/bindings/mfd/gateworks-gsc.yaml
+++ b/Documentation/devicetree/bindings/mfd/gateworks-gsc.yaml
@@ -83,8 +83,9 @@ properties:
2 - scaled voltage based on an optional resistor divider
and optional offset
3 - pre-scaled 16-bit voltage value
+ 4 - fan tach input to report RPM's
$ref: /schemas/types.yaml#/definitions/uint32
- enum: [0, 1, 2, 3]
+ enum: [0, 1, 2, 3, 4]
gw,voltage-divider-ohms:
description: Values of resistors for divider on raw ADC input
diff --git a/Documentation/devicetree/bindings/mfd/iqs62x.yaml b/Documentation/devicetree/bindings/mfd/iqs62x.yaml
index 541b06d80e73..044cd7542c2b 100644
--- a/Documentation/devicetree/bindings/mfd/iqs62x.yaml
+++ b/Documentation/devicetree/bindings/mfd/iqs62x.yaml
@@ -93,7 +93,7 @@ examples:
pwmleds {
compatible = "pwm-leds";
- panel {
+ led-1 {
pwms = <&iqs620a_pwm 0 1000000>;
max-brightness = <255>;
};
diff --git a/Documentation/devicetree/bindings/mips/lantiq/lantiq,cgu.yaml b/Documentation/devicetree/bindings/mips/lantiq/lantiq,cgu.yaml
new file mode 100644
index 000000000000..d5805725befb
--- /dev/null
+++ b/Documentation/devicetree/bindings/mips/lantiq/lantiq,cgu.yaml
@@ -0,0 +1,32 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mips/lantiq/lantiq,cgu.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Lantiq Xway SoC series Clock Generation Unit (CGU)
+
+maintainers:
+ - John Crispin <john@phrozen.org>
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - lantiq,cgu-xway
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ cgu@103000 {
+ compatible = "lantiq,cgu-xway";
+ reg = <0x103000 0x1000>;
+ };
diff --git a/Documentation/devicetree/bindings/mips/lantiq/lantiq,dma-xway.yaml b/Documentation/devicetree/bindings/mips/lantiq/lantiq,dma-xway.yaml
new file mode 100644
index 000000000000..40130fefa2b4
--- /dev/null
+++ b/Documentation/devicetree/bindings/mips/lantiq/lantiq,dma-xway.yaml
@@ -0,0 +1,32 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mips/lantiq/lantiq,dma-xway.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Lantiq Xway SoCs DMA Controller DT bindings
+
+maintainers:
+ - John Crispin <john@phrozen.org>
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - lantiq,dma-xway
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ dma@e104100 {
+ compatible = "lantiq,dma-xway";
+ reg = <0xe104100 0x800>;
+ };
diff --git a/Documentation/devicetree/bindings/mips/lantiq/lantiq,ebu.yaml b/Documentation/devicetree/bindings/mips/lantiq/lantiq,ebu.yaml
new file mode 100644
index 000000000000..0fada1f085a9
--- /dev/null
+++ b/Documentation/devicetree/bindings/mips/lantiq/lantiq,ebu.yaml
@@ -0,0 +1,32 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mips/lantiq/lantiq,ebu.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Lantiq Xway SoC series External Bus Unit (EBU)
+
+maintainers:
+ - John Crispin <john@phrozen.org>
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - lantiq,ebu-xway
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ ebu@105300 {
+ compatible = "lantiq,ebu-xway";
+ reg = <0x105300 0x100>;
+ };
diff --git a/Documentation/devicetree/bindings/mips/lantiq/lantiq,pmu.yaml b/Documentation/devicetree/bindings/mips/lantiq/lantiq,pmu.yaml
new file mode 100644
index 000000000000..4982b458ac12
--- /dev/null
+++ b/Documentation/devicetree/bindings/mips/lantiq/lantiq,pmu.yaml
@@ -0,0 +1,32 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mips/lantiq/lantiq,pmu.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Lantiq Xway SoC series Power Management Unit (PMU)
+
+maintainers:
+ - John Crispin <john@phrozen.org>
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - lantiq,pmu-xway
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ pmu@102000 {
+ compatible = "lantiq,pmu-xway";
+ reg = <0x102000 0x1000>;
+ };
diff --git a/Documentation/devicetree/bindings/mips/realtek-rtl.yaml b/Documentation/devicetree/bindings/mips/realtek-rtl.yaml
new file mode 100644
index 000000000000..aadff8ce0f49
--- /dev/null
+++ b/Documentation/devicetree/bindings/mips/realtek-rtl.yaml
@@ -0,0 +1,24 @@
+# SPDX-License-Identifier: GPL-2.0-or-later OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mips/realtek-rtl.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Realtek RTL83xx/93xx SoC series device tree bindings
+
+maintainers:
+ - Bert Vermeulen <bert@biot.com>
+ - Sander Vanheule <sander@svanheule.net>
+
+properties:
+ $nodename:
+ const: "/"
+ compatible:
+ oneOf:
+ # RTL8382-based boards
+ - items:
+ - enum:
+ - cisco,sg220-26
+ - const: realtek,rtl8382-soc
+
+additionalProperties: true
diff --git a/Documentation/devicetree/bindings/misc/eeprom-93xx46.txt b/Documentation/devicetree/bindings/misc/eeprom-93xx46.txt
index a8ebb4621f79..7b636b7a8311 100644
--- a/Documentation/devicetree/bindings/misc/eeprom-93xx46.txt
+++ b/Documentation/devicetree/bindings/misc/eeprom-93xx46.txt
@@ -4,6 +4,7 @@ Required properties:
- compatible : shall be one of:
"atmel,at93c46d"
"eeprom-93xx46"
+ "microchip,93lc46b"
- data-size : number of data bits per word (either 8 or 16)
Optional properties:
diff --git a/Documentation/devicetree/bindings/misc/fsl,dpaa2-console.yaml b/Documentation/devicetree/bindings/misc/fsl,dpaa2-console.yaml
index 271a3eafc054..8cc951feb7df 100644
--- a/Documentation/devicetree/bindings/misc/fsl,dpaa2-console.yaml
+++ b/Documentation/devicetree/bindings/misc/fsl,dpaa2-console.yaml
@@ -15,6 +15,7 @@ properties:
const: "fsl,dpaa2-console"
reg:
+ maxItems: 1
description: A standard property. Specifies the region where the MCFBA
(MC firmware base address) register can be found.
diff --git a/Documentation/devicetree/bindings/mmc/allwinner,sun4i-a10-mmc.yaml b/Documentation/devicetree/bindings/mmc/allwinner,sun4i-a10-mmc.yaml
index e82c9a07b6fb..e75b3a8ba816 100644
--- a/Documentation/devicetree/bindings/mmc/allwinner,sun4i-a10-mmc.yaml
+++ b/Documentation/devicetree/bindings/mmc/allwinner,sun4i-a10-mmc.yaml
@@ -26,6 +26,8 @@ properties:
- const: allwinner,sun9i-a80-mmc
- const: allwinner,sun50i-a64-emmc
- const: allwinner,sun50i-a64-mmc
+ - const: allwinner,sun50i-a100-emmc
+ - const: allwinner,sun50i-a100-mmc
- items:
- const: allwinner,sun8i-a83t-mmc
- const: allwinner,sun7i-a20-mmc
@@ -47,6 +49,12 @@ properties:
- items:
- const: allwinner,sun50i-h6-mmc
- const: allwinner,sun50i-a64-mmc
+ - items:
+ - const: allwinner,sun50i-h616-emmc
+ - const: allwinner,sun50i-a100-emmc
+ - items:
+ - const: allwinner,sun50i-h616-mmc
+ - const: allwinner,sun50i-a100-mmc
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/mmc/arm,pl18x.yaml b/Documentation/devicetree/bindings/mmc/arm,pl18x.yaml
new file mode 100644
index 000000000000..47595cb483be
--- /dev/null
+++ b/Documentation/devicetree/bindings/mmc/arm,pl18x.yaml
@@ -0,0 +1,223 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mmc/arm,pl18x.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ARM PrimeCell MultiMedia Card Interface (MMCI) PL180 and PL181
+
+maintainers:
+ - Linus Walleij <linus.walleij@linaro.org>
+ - Ulf Hansson <ulf.hansson@linaro.org>
+
+description:
+ The ARM PrimeCells MMCI PL180 and PL181 provides an interface for
+ reading and writing to MultiMedia and SD cards alike. Over the years
+ vendors have use the VHDL code from ARM to create derivative MMC/SD/SDIO
+ host controllers with very similar characteristics.
+
+allOf:
+ - $ref: /schemas/arm/primecell.yaml#
+ - $ref: mmc-controller.yaml#
+
+# We need a select here so we don't match all nodes with 'arm,primecell'
+select:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - arm,pl180
+ - arm,pl181
+ - arm,pl18x
+ required:
+ - compatible
+
+properties:
+ compatible:
+ oneOf:
+ - description: The first version of the block, simply called
+ PL180 and found in the ARM Integrator IM/PD1 logic module.
+ items:
+ - const: arm,pl180
+ - const: arm,primecell
+ - description: The improved version of the block, found in the
+ ARM Versatile and later reference designs. Further revisions
+ exist but get detected at runtime by reading some magic numbers
+ in the PrimeCell ID registers.
+ items:
+ - const: arm,pl181
+ - const: arm,primecell
+ - description: Wildcard entry that will let the operating system
+ inspect the PrimeCell ID registers to determine which hardware
+ variant of PL180 or PL181 this is.
+ items:
+ - const: arm,pl18x
+ - const: arm,primecell
+
+ clocks:
+ description: One or two clocks, the "apb_pclk" and the "MCLK"
+ which is the core block clock. The names are not compulsory.
+ minItems: 1
+ maxItems: 2
+
+ power-domains: true
+
+ resets:
+ maxItems: 1
+
+ reg:
+ description: the MMIO memory window must be exactly 4KB (0x1000) and the
+ layout should provide the PrimeCell ID registers so that the device can
+ be discovered. On ST Micro variants, a second register window may be
+ defined if a delay block is present and used for tuning.
+
+ interrupts:
+ description: The first interrupt is the command interrupt and corresponds
+ to the event at the end of a command. The second interrupt is the
+ PIO (polled I/O) interrupt and occurs when the FIFO needs to be
+ emptied as part of a bulk read from the card. Some variants have these
+ two interrupts wired into the same line (logic OR) and in that case
+ only one interrupt may be provided.
+ minItems: 1
+ maxItems: 2
+
+ st,sig-dir-dat0:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description: ST Micro-specific property, bus signal direction pins used for
+ DAT[0].
+
+ st,sig-dir-dat2:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description: ST Micro-specific property, bus signal direction pins used for
+ DAT[2].
+
+ st,sig-dir-dat31:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description: ST Micro-specific property, bus signal direction pins used for
+ DAT[3] and DAT[1].
+
+ st,sig-dir-dat74:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description: ST Micro-specific property, bus signal direction pins used for
+ DAT[7] and DAT[4].
+
+ st,sig-dir-cmd:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description: ST Micro-specific property, CMD signal direction used for
+ pin CMD.
+
+ st,sig-pin-fbclk:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description: ST Micro-specific property, feedback clock FBCLK signal pin
+ in use.
+
+ st,sig-dir:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description: ST Micro-specific property, signal direction polarity used for
+ pins CMD, DAT[0], DAT[1], DAT[2] and DAT[3].
+
+ st,neg-edge:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description: ST Micro-specific property, data and command phase relation,
+ generated on the sd clock falling edge.
+
+ st,use-ckin:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description: ST Micro-specific property, use CKIN pin from an external
+ driver to sample the receive data (for example with a voltage switch
+ transceiver).
+
+ st,cmd-gpios:
+ maxItems: 1
+ description:
+ The GPIO matching the CMD pin.
+
+ st,ck-gpios:
+ maxItems: 1
+ description:
+ The GPIO matching the CK pin.
+
+ st,ckin-gpios:
+ maxItems: 1
+ description:
+ The GPIO matching the CKIN pin.
+
+dependencies:
+ st,cmd-gpios: [ "st,use-ckin" ]
+ st,ck-gpios: [ "st,use-ckin" ]
+ st,ckin-gpios: [ "st,use-ckin" ]
+
+unevaluatedProperties: false
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/gpio/gpio.h>
+
+ mmc@5000 {
+ compatible = "arm,pl180", "arm,primecell";
+ reg = <0x5000 0x1000>;
+ interrupts-extended = <&vic 22 &sic 1>;
+ clocks = <&xtal24mhz>, <&pclk>;
+ clock-names = "mclk", "apb_pclk";
+ };
+
+ mmc@80126000 {
+ compatible = "arm,pl18x", "arm,primecell";
+ reg = <0x80126000 0x1000>;
+ interrupts = <0 60 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&dma 29 0 0x2>, <&dma 29 0 0x0>;
+ dma-names = "rx", "tx";
+ clocks = <&prcc_kclk 1 5>, <&prcc_pclk 1 5>;
+ clock-names = "sdi", "apb_pclk";
+ max-frequency = <100000000>;
+ bus-width = <4>;
+ cap-sd-highspeed;
+ cap-mmc-highspeed;
+ cd-gpios = <&gpio2 31 0x4>;
+ st,sig-dir-dat0;
+ st,sig-dir-dat2;
+ st,sig-dir-cmd;
+ st,sig-pin-fbclk;
+ vmmc-supply = <&ab8500_ldo_aux3_reg>;
+ vqmmc-supply = <&vmmci>;
+ };
+
+ mmc@101f6000 {
+ compatible = "arm,pl18x", "arm,primecell";
+ reg = <0x101f6000 0x1000>;
+ clocks = <&sdiclk>, <&pclksdi>;
+ clock-names = "mclk", "apb_pclk";
+ interrupt-parent = <&vica>;
+ interrupts = <22>;
+ max-frequency = <400000>;
+ bus-width = <4>;
+ cap-mmc-highspeed;
+ cap-sd-highspeed;
+ full-pwr-cycle;
+ st,sig-dir-dat0;
+ st,sig-dir-dat2;
+ st,sig-dir-dat31;
+ st,sig-dir-cmd;
+ st,sig-pin-fbclk;
+ vmmc-supply = <&vmmc_regulator>;
+ };
+
+ mmc@52007000 {
+ compatible = "arm,pl18x", "arm,primecell";
+ arm,primecell-periphid = <0x10153180>;
+ reg = <0x52007000 0x1000>;
+ interrupts = <49>;
+ interrupt-names = "cmd_irq";
+ clocks = <&rcc 0>;
+ clock-names = "apb_pclk";
+ resets = <&rcc 1>;
+ cap-sd-highspeed;
+ cap-mmc-highspeed;
+ max-frequency = <120000000>;
+ };
diff --git a/Documentation/devicetree/bindings/mmc/marvell,xenon-sdhci.txt b/Documentation/devicetree/bindings/mmc/marvell,xenon-sdhci.txt
index ed1456f5c94d..c51a62d751dc 100644
--- a/Documentation/devicetree/bindings/mmc/marvell,xenon-sdhci.txt
+++ b/Documentation/devicetree/bindings/mmc/marvell,xenon-sdhci.txt
@@ -12,6 +12,7 @@ Required Properties:
- "marvell,armada-3700-sdhci": For controllers on Armada-3700 SoC.
Must provide a second register area and marvell,pad-type.
- "marvell,armada-ap806-sdhci": For controllers on Armada AP806.
+ - "marvell,armada-ap807-sdhci": For controllers on Armada AP807.
- "marvell,armada-cp110-sdhci": For controllers on Armada CP110.
- clocks:
diff --git a/Documentation/devicetree/bindings/mmc/mmc-controller.yaml b/Documentation/devicetree/bindings/mmc/mmc-controller.yaml
index 186f04ba9357..e141330c1114 100644
--- a/Documentation/devicetree/bindings/mmc/mmc-controller.yaml
+++ b/Documentation/devicetree/bindings/mmc/mmc-controller.yaml
@@ -40,6 +40,7 @@ properties:
There is no card detection available; polling must be used.
cd-gpios:
+ maxItems: 1
description:
The card detection will be done using the GPIO provided.
@@ -104,6 +105,7 @@ properties:
line. Not used in combination with eMMC or SDIO.
wp-gpios:
+ maxItems: 1
description:
GPIO to use for the write-protect detection.
@@ -259,7 +261,6 @@ properties:
waiting for I/O signalling and card power supply to be stable,
regardless of whether pwrseq-simple is used. Default to 10ms if
no available.
- $ref: /schemas/types.yaml#/definitions/uint32
default: 10
supports-cqe:
diff --git a/Documentation/devicetree/bindings/mmc/mmc-pwrseq-simple.yaml b/Documentation/devicetree/bindings/mmc/mmc-pwrseq-simple.yaml
index 6cd57863c1db..226fb191913d 100644
--- a/Documentation/devicetree/bindings/mmc/mmc-pwrseq-simple.yaml
+++ b/Documentation/devicetree/bindings/mmc/mmc-pwrseq-simple.yaml
@@ -41,13 +41,11 @@ properties:
description:
Delay in ms after powering the card and de-asserting the
reset-gpios (if any).
- $ref: /schemas/types.yaml#/definitions/uint32
power-off-delay-us:
description:
Delay in us after asserting the reset-gpios (if any)
during power off of the card.
- $ref: /schemas/types.yaml#/definitions/uint32
required:
- compatible
diff --git a/Documentation/devicetree/bindings/mmc/mmci.txt b/Documentation/devicetree/bindings/mmc/mmci.txt
deleted file mode 100644
index 4ec921e4bf34..000000000000
--- a/Documentation/devicetree/bindings/mmc/mmci.txt
+++ /dev/null
@@ -1,74 +0,0 @@
-* ARM PrimeCell MultiMedia Card Interface (MMCI) PL180/1
-
-The ARM PrimeCell MMCI PL180 and PL181 provides an interface for
-reading and writing to MultiMedia and SD cards alike.
-
-This file documents differences between the core properties described
-by mmc.txt and the properties used by the mmci driver. Using "st" as
-the prefix for a property, indicates support by the ST Micro variant.
-
-Required properties:
-- compatible : contains "arm,pl18x", "arm,primecell".
-- vmmc-supply : phandle to the regulator device tree node, mentioned
- as the VCC/VDD supply in the eMMC/SD specs.
-
-Optional properties:
-- arm,primecell-periphid : contains the PrimeCell Peripheral ID, it overrides
- the ID provided by the HW
-- resets : phandle to internal reset line.
- Should be defined for sdmmc variant.
-- vqmmc-supply : phandle to the regulator device tree node, mentioned
- as the VCCQ/VDD_IO supply in the eMMC/SD specs.
-specific for ux500 variant:
-- st,sig-dir-dat0 : bus signal direction pin used for DAT[0].
-- st,sig-dir-dat2 : bus signal direction pin used for DAT[2].
-- st,sig-dir-dat31 : bus signal direction pin used for DAT[3] and DAT[1].
-- st,sig-dir-dat74 : bus signal direction pin used for DAT[4] to DAT[7].
-- st,sig-dir-cmd : cmd signal direction pin used for CMD.
-- st,sig-pin-fbclk : feedback clock signal pin used.
-
-specific for sdmmc variant:
-- reg : a second base register may be defined if a delay
- block is present and used for tuning.
-- st,sig-dir : signal direction polarity used for cmd, dat0 dat123.
-- st,neg-edge : data & command phase relation, generated on
- sd clock falling edge.
-- st,use-ckin : use ckin pin from an external driver to sample
- the receive data (example: with voltage
- switch transceiver).
-
-Deprecated properties:
-- mmc-cap-mmc-highspeed : indicates whether MMC is high speed capable.
-- mmc-cap-sd-highspeed : indicates whether SD is high speed capable.
-
-Example:
-
-sdi0_per1@80126000 {
- compatible = "arm,pl18x", "arm,primecell";
- reg = <0x80126000 0x1000>;
- interrupts = <0 60 IRQ_TYPE_LEVEL_HIGH>;
-
- dmas = <&dma 29 0 0x2>, /* Logical - DevToMem */
- <&dma 29 0 0x0>; /* Logical - MemToDev */
- dma-names = "rx", "tx";
-
- clocks = <&prcc_kclk 1 5>, <&prcc_pclk 1 5>;
- clock-names = "sdi", "apb_pclk";
-
- max-frequency = <100000000>;
- bus-width = <4>;
- cap-sd-highspeed;
- cap-mmc-highspeed;
- cd-gpios = <&gpio2 31 0x4>; // 95
- st,sig-dir-dat0;
- st,sig-dir-dat2;
- st,sig-dir-cmd;
- st,sig-pin-fbclk;
-
- vmmc-supply = <&ab8500_ldo_aux3_reg>;
- vqmmc-supply = <&vmmci>;
-
- pinctrl-names = "default", "sleep";
- pinctrl-0 = <&sdi0_default_mode>;
- pinctrl-1 = <&sdi0_sleep_mode>;
-};
diff --git a/Documentation/devicetree/bindings/mmc/renesas,sdhi.yaml b/Documentation/devicetree/bindings/mmc/renesas,sdhi.yaml
index 6bbf29b5c239..1118b6fa93c9 100644
--- a/Documentation/devicetree/bindings/mmc/renesas,sdhi.yaml
+++ b/Documentation/devicetree/bindings/mmc/renesas,sdhi.yaml
@@ -59,6 +59,7 @@ properties:
- renesas,sdhi-r8a77980 # R-Car V3H
- renesas,sdhi-r8a77990 # R-Car E3
- renesas,sdhi-r8a77995 # R-Car D3
+ - renesas,sdhi-r8a779a0 # R-Car V3U
- const: renesas,rcar-gen3-sdhi # R-Car Gen3 or RZ/G2
reg:
@@ -123,7 +124,7 @@ required:
if:
properties:
compatible:
- items:
+ contains:
enum:
- renesas,sdhi-r7s72100
- renesas,sdhi-r7s9210
diff --git a/Documentation/devicetree/bindings/mmc/sdhci-am654.yaml b/Documentation/devicetree/bindings/mmc/sdhci-am654.yaml
index 1ae945434c53..3a79e39253d2 100644
--- a/Documentation/devicetree/bindings/mmc/sdhci-am654.yaml
+++ b/Documentation/devicetree/bindings/mmc/sdhci-am654.yaml
@@ -15,12 +15,19 @@ allOf:
properties:
compatible:
- enum:
- - ti,am654-sdhci-5.1
- - ti,j721e-sdhci-8bit
- - ti,j721e-sdhci-4bit
- - ti,j7200-sdhci-8bit
- - ti,j721e-sdhci-4bit
+ oneOf:
+ - const: ti,am654-sdhci-5.1
+ - const: ti,j721e-sdhci-8bit
+ - const: ti,j721e-sdhci-4bit
+ - const: ti,j721e-sdhci-4bit
+ - const: ti,am64-sdhci-8bit
+ - const: ti,am64-sdhci-4bit
+ - items:
+ - const: ti,j7200-sdhci-8bit
+ - const: ti,j721e-sdhci-8bit
+ - items:
+ - const: ti,j7200-sdhci-4bit
+ - const: ti,j721e-sdhci-4bit
reg:
maxItems: 2
diff --git a/Documentation/devicetree/bindings/mmc/sdhci-msm.txt b/Documentation/devicetree/bindings/mmc/sdhci-msm.txt
index 3b602fd6180b..4c7fa6a4ed15 100644
--- a/Documentation/devicetree/bindings/mmc/sdhci-msm.txt
+++ b/Documentation/devicetree/bindings/mmc/sdhci-msm.txt
@@ -17,10 +17,11 @@ Required properties:
"qcom,msm8916-sdhci", "qcom,sdhci-msm-v4"
"qcom,msm8992-sdhci", "qcom,sdhci-msm-v4"
"qcom,msm8996-sdhci", "qcom,sdhci-msm-v4"
- "qcom,sm8250-sdhci", "qcom,sdhci-msm-v5"
- "qcom,sdm845-sdhci", "qcom,sdhci-msm-v5"
"qcom,qcs404-sdhci", "qcom,sdhci-msm-v5"
"qcom,sc7180-sdhci", "qcom,sdhci-msm-v5";
+ "qcom,sdm845-sdhci", "qcom,sdhci-msm-v5"
+ "qcom,sdx55-sdhci", "qcom,sdhci-msm-v5";
+ "qcom,sm8250-sdhci", "qcom,sdhci-msm-v5"
NOTE that some old device tree files may be floating around that only
have the string "qcom,sdhci-msm-v4" without the SoC compatible string
but doing that should be considered a deprecated practice.
@@ -30,10 +31,12 @@ Required properties:
- SD Core register map (required for controllers earlier than msm-v5)
- CQE register map (Optional, CQE support is present on SDHC instance meant
for eMMC and version v4.2 and above)
+ - Inline Crypto Engine register map (optional)
- reg-names: When CQE register map is supplied, below reg-names are required
- "hc" for Host controller register map
- "core" for SD core register map
- "cqhci" for CQE register map
+ - "ice" for Inline Crypto Engine register map (optional)
- interrupts: Should contain an interrupt-specifiers for the interrupts:
- Host controller interrupt (required)
- pinctrl-names: Should contain only one value - "default".
@@ -46,6 +49,7 @@ Required properties:
"xo" - TCXO clock (optional)
"cal" - reference clock for RCLK delay calibration (optional)
"sleep" - sleep clock for RCLK delay calibration (optional)
+ "ice" - clock for Inline Crypto Engine (optional)
- qcom,ddr-config: Certain chipsets and platforms require particular settings
for the DDR_CONFIG register. Use this field to specify the register
diff --git a/Documentation/devicetree/bindings/mmc/sdhci-sirf.txt b/Documentation/devicetree/bindings/mmc/sdhci-sirf.txt
deleted file mode 100644
index dd6ed464bcb8..000000000000
--- a/Documentation/devicetree/bindings/mmc/sdhci-sirf.txt
+++ /dev/null
@@ -1,18 +0,0 @@
-* SiRFprimII/marco/atlas6 SDHCI Controller
-
-This file documents differences between the core properties in mmc.txt
-and the properties used by the sdhci-sirf driver.
-
-Required properties:
-- compatible: sirf,prima2-sdhc
-
-Optional properties:
-- cd-gpios: card detect gpio, with zero flags.
-
-Example:
-
- sd0: sdhci@56000000 {
- compatible = "sirf,prima2-sdhc";
- reg = <0xcd000000 0x100000>;
- cd-gpios = <&gpio 6 0>;
- };
diff --git a/Documentation/devicetree/bindings/mmc/zx-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/zx-dw-mshc.txt
deleted file mode 100644
index 0f59bd5361f5..000000000000
--- a/Documentation/devicetree/bindings/mmc/zx-dw-mshc.txt
+++ /dev/null
@@ -1,31 +0,0 @@
-* ZTE specific extensions to the Synopsys Designware Mobile Storage
- Host Controller
-
-The Synopsys designware mobile storage host controller is used to interface
-a SoC with storage medium such as eMMC or SD/MMC cards. This file documents
-differences between the core Synopsys dw mshc controller properties described
-by synopsys-dw-mshc.txt and the properties used by the ZTE specific
-extensions to the Synopsys Designware Mobile Storage Host Controller.
-
-Required Properties:
-
-* compatible: should be
- - "zte,zx296718-dw-mshc": for ZX SoCs
-
-Example:
-
- mmc1: mmc@1110000 {
- compatible = "zte,zx296718-dw-mshc";
- reg = <0x01110000 0x1000>;
- interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
- fifo-depth = <32>;
- data-addr = <0x200>;
- fifo-watermark-aligned;
- bus-width = <4>;
- clock-frequency = <50000000>;
- clocks = <&topcrm SD0_AHB>, <&topcrm SD0_WCLK>;
- clock-names = "biu", "ciu";
- max-frequency = <50000000>;
- cap-sdio-irq;
- cap-sd-highspeed;
- };
diff --git a/Documentation/devicetree/bindings/mtd/jedec,spi-nor.txt b/Documentation/devicetree/bindings/mtd/jedec,spi-nor.txt
deleted file mode 100644
index f03be904d3c2..000000000000
--- a/Documentation/devicetree/bindings/mtd/jedec,spi-nor.txt
+++ /dev/null
@@ -1,91 +0,0 @@
-* SPI NOR flash: ST M25Pxx (and similar) serial flash chips
-
-Required properties:
-- #address-cells, #size-cells : Must be present if the device has sub-nodes
- representing partitions.
-- compatible : May include a device-specific string consisting of the
- manufacturer and name of the chip. A list of supported chip
- names follows.
- Must also include "jedec,spi-nor" for any SPI NOR flash that can
- be identified by the JEDEC READ ID opcode (0x9F).
-
- Supported chip names:
- at25df321a
- at25df641
- at26df081a
- mr25h128
- mr25h256
- mr25h10
- mr25h40
- mx25l4005a
- mx25l1606e
- mx25l6405d
- mx25l12805d
- mx25l25635e
- n25q064
- n25q128a11
- n25q128a13
- n25q512a
- s25fl256s1
- s25fl512s
- s25sl12801
- s25fl008k
- s25fl064k
- sst25vf040b
- m25p40
- m25p80
- m25p16
- m25p32
- m25p64
- m25p128
- w25x80
- w25x32
- w25q32
- w25q64
- w25q32dw
- w25q80bl
- w25q128
- w25q256
-
- The following chip names have been used historically to
- designate quirky versions of flash chips that do not support the
- JEDEC READ ID opcode (0x9F):
- m25p05-nonjedec
- m25p10-nonjedec
- m25p20-nonjedec
- m25p40-nonjedec
- m25p80-nonjedec
- m25p16-nonjedec
- m25p32-nonjedec
- m25p64-nonjedec
- m25p128-nonjedec
-
-- reg : Chip-Select number
-- spi-max-frequency : Maximum frequency of the SPI bus the chip can operate at
-
-Optional properties:
-- m25p,fast-read : Use the "fast read" opcode to read data from the chip instead
- of the usual "read" opcode. This opcode is not supported by
- all chips and support for it can not be detected at runtime.
- Refer to your chips' datasheet to check if this is supported
- by your chip.
-- broken-flash-reset : Some flash devices utilize stateful addressing modes
- (e.g., for 32-bit addressing) which need to be managed
- carefully by a system. Because these sorts of flash don't
- have a standardized software reset command, and because some
- systems don't toggle the flash RESET# pin upon system reset
- (if the pin even exists at all), there are systems which
- cannot reboot properly if the flash is left in the "wrong"
- state. This boolean flag can be used on such systems, to
- denote the absence of a reliable reset mechanism.
-
-Example:
-
- flash: m25p80@0 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "spansion,m25p80", "jedec,spi-nor";
- reg = <0>;
- spi-max-frequency = <40000000>;
- m25p,fast-read;
- };
diff --git a/Documentation/devicetree/bindings/mtd/jedec,spi-nor.yaml b/Documentation/devicetree/bindings/mtd/jedec,spi-nor.yaml
new file mode 100644
index 000000000000..5e7e5349f9a1
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/jedec,spi-nor.yaml
@@ -0,0 +1,102 @@
+# SPDX-License-Identifier: GPL-2.0-only
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mtd/jedec,spi-nor.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: SPI NOR flash ST M25Pxx (and similar) serial flash chips
+
+maintainers:
+ - Rob Herring <robh@kernel.org>
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - pattern: "^((((micron|spansion|st),)?\
+ (m25p(40|80|16|32|64|128)|\
+ n25q(32b|064|128a11|128a13|256a|512a|164k)))|\
+ atmel,at25df(321a|641|081a)|\
+ everspin,mr25h(10|40|128|256)|\
+ (mxicy|macronix),mx25l(4005a|1606e|6405d|8005|12805d|25635e)|\
+ (mxicy|macronix),mx25u(4033|4035)|\
+ (spansion,)?s25fl(128s|256s1|512s|008k|064k|164k)|\
+ (sst|microchip),sst25vf(016b|032b|040b)|\
+ (sst,)?sst26wf016b|\
+ (sst,)?sst25wf(040b|080)|\
+ winbond,w25x(80|32)|\
+ (winbond,)?w25q(16|32(w|dw)?|64(dw)?|80bl|128(fw)?|256))$"
+ - const: jedec,spi-nor
+ - items:
+ - enum:
+ - issi,is25lp016d
+ - micron,mt25qu02g
+ - mxicy,mx25r1635f
+ - mxicy,mx25u6435f
+ - mxicy,mx25v8035f
+ - spansion,s25sl12801
+ - spansion,s25fs512s
+ - const: jedec,spi-nor
+ - const: jedec,spi-nor
+ description:
+ Must also include "jedec,spi-nor" for any SPI NOR flash that can be
+ identified by the JEDEC READ ID opcode (0x9F).
+
+ reg:
+ maxItems: 1
+
+ spi-max-frequency: true
+ spi-rx-bus-width: true
+ spi-tx-bus-width: true
+
+ m25p,fast-read:
+ type: boolean
+ description:
+ Use the "fast read" opcode to read data from the chip instead of the usual
+ "read" opcode. This opcode is not supported by all chips and support for
+ it can not be detected at runtime. Refer to your chips' datasheet to check
+ if this is supported by your chip.
+
+ broken-flash-reset:
+ type: boolean
+ description:
+ Some flash devices utilize stateful addressing modes (e.g., for 32-bit
+ addressing) which need to be managed carefully by a system. Because these
+ sorts of flash don't have a standardized software reset command, and
+ because some systems don't toggle the flash RESET# pin upon system reset
+ (if the pin even exists at all), there are systems which cannot reboot
+ properly if the flash is left in the "wrong" state. This boolean flag can
+ be used on such systems, to denote the absence of a reliable reset
+ mechanism.
+
+ label: true
+
+ partitions:
+ type: object
+
+ '#address-cells': true
+ '#size-cells': true
+
+patternProperties:
+ # Note: use 'partitions' node for new users
+ '^partition@':
+ type: object
+
+additionalProperties: false
+
+examples:
+ - |
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ flash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "spansion,m25p80", "jedec,spi-nor";
+ reg = <0>;
+ spi-max-frequency = <40000000>;
+ m25p,fast-read;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/mtd/partitions/brcm,bcm4908-partitions.yaml b/Documentation/devicetree/bindings/mtd/partitions/brcm,bcm4908-partitions.yaml
new file mode 100644
index 000000000000..7b113e5e3421
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/partitions/brcm,bcm4908-partitions.yaml
@@ -0,0 +1,70 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mtd/partitions/brcm,bcm4908-partitions.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom BCM4908 partitioning
+
+description: |
+ Broadcom BCM4908 CFE bootloader supports two firmware partitions. One is used
+ for regular booting, the other is treated as fallback.
+
+ This binding allows defining all fixed partitions and marking those containing
+ firmware. System can use that information e.g. for booting or flashing
+ purposes.
+
+maintainers:
+ - Rafał Miłecki <rafal@milecki.pl>
+
+properties:
+ compatible:
+ const: brcm,bcm4908-partitions
+
+ "#address-cells":
+ enum: [ 1, 2 ]
+
+ "#size-cells":
+ enum: [ 1, 2 ]
+
+patternProperties:
+ "^partition@[0-9a-f]+$":
+ $ref: "partition.yaml#"
+ properties:
+ compatible:
+ const: brcm,bcm4908-firmware
+ unevaluatedProperties: false
+
+required:
+ - "#address-cells"
+ - "#size-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ partitions {
+ compatible = "brcm,bcm4908-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "cferom";
+ reg = <0x0 0x100000>;
+ };
+
+ partition@100000 {
+ compatible = "brcm,bcm4908-firmware";
+ reg = <0x100000 0xf00000>;
+ };
+
+ partition@1000000 {
+ compatible = "brcm,bcm4908-firmware";
+ reg = <0x1000000 0xf00000>;
+ };
+
+ partition@1f00000 {
+ label = "calibration";
+ reg = <0x1f00000 0x100000>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mtd/partitions/fixed-partitions.yaml b/Documentation/devicetree/bindings/mtd/partitions/fixed-partitions.yaml
index 6d4a3450e064..ea4cace6a955 100644
--- a/Documentation/devicetree/bindings/mtd/partitions/fixed-partitions.yaml
+++ b/Documentation/devicetree/bindings/mtd/partitions/fixed-partitions.yaml
@@ -27,38 +27,7 @@ properties:
patternProperties:
"@[0-9a-f]+$":
- description: node describing a single flash partition
- type: object
-
- properties:
- reg:
- description: partition's offset and size within the flash
- maxItems: 1
-
- label:
- description: The label / name for this partition. If omitted, the label
- is taken from the node name (excluding the unit address).
-
- read-only:
- description: This parameter, if present, is a hint that this partition
- should only be mounted read-only. This is usually used for flash
- partitions containing early-boot firmware images or data which should
- not be clobbered.
- type: boolean
-
- lock:
- description: Do not unlock the partition at initialization time (not
- supported on all devices)
- type: boolean
-
- slc-mode:
- description: This parameter, if present, allows one to emulate SLC mode
- on a partition attached to an MLC NAND thus making this partition
- immune to paired-pages corruptions
- type: boolean
-
- required:
- - reg
+ $ref: "partition.yaml#"
required:
- "#address-cells"
diff --git a/Documentation/devicetree/bindings/mtd/partitions/partition.yaml b/Documentation/devicetree/bindings/mtd/partitions/partition.yaml
new file mode 100644
index 000000000000..e1ac08064425
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/partitions/partition.yaml
@@ -0,0 +1,47 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mtd/partitions/partition.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Partition
+
+description: |
+ This binding describes a single flash partition. Each partition must have its
+ relative offset and size specified. Depending on partition function extra
+ properties can be used.
+
+maintainers:
+ - Rafał Miłecki <rafal@milecki.pl>
+
+properties:
+ reg:
+ description: partition's offset and size within the flash
+ maxItems: 1
+
+ label:
+ description: The label / name for this partition. If omitted, the label
+ is taken from the node name (excluding the unit address).
+
+ read-only:
+ description: This parameter, if present, is a hint that this partition
+ should only be mounted read-only. This is usually used for flash
+ partitions containing early-boot firmware images or data which should
+ not be clobbered.
+ type: boolean
+
+ lock:
+ description: Do not unlock the partition at initialization time (not
+ supported on all devices)
+ type: boolean
+
+ slc-mode:
+ description: This parameter, if present, allows one to emulate SLC mode
+ on a partition attached to an MLC NAND thus making this partition
+ immune to paired-pages corruptions
+ type: boolean
+
+required:
+ - reg
+
+additionalProperties: true
diff --git a/Documentation/devicetree/bindings/mtd/partitions/qcom,smem-part.yaml b/Documentation/devicetree/bindings/mtd/partitions/qcom,smem-part.yaml
new file mode 100644
index 000000000000..cf3f8c1e035d
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/partitions/qcom,smem-part.yaml
@@ -0,0 +1,33 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mtd/partitions/qcom,smem-part.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm SMEM NAND flash partition parser binding
+
+maintainers:
+ - Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+
+description: |
+ The Qualcomm SoCs supporting the NAND controller interface features a Shared
+ Memory (SMEM) based partition table scheme. The maximum partitions supported
+ varies between partition table revisions. V3 supports maximum 16 partitions
+ and V4 supports 48 partitions.
+
+properties:
+ compatible:
+ const: qcom,smem-part
+
+required:
+ - compatible
+
+additionalProperties: false
+
+examples:
+ - |
+ flash {
+ partitions {
+ compatible = "qcom,smem-part";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/amlogic,meson-dwmac.yaml b/Documentation/devicetree/bindings/net/amlogic,meson-dwmac.yaml
index 1f133f4a2924..0467441d7037 100644
--- a/Documentation/devicetree/bindings/net/amlogic,meson-dwmac.yaml
+++ b/Documentation/devicetree/bindings/net/amlogic,meson-dwmac.yaml
@@ -74,17 +74,60 @@ allOf:
Any configuration is ignored when the phy-mode is set to "rmii".
amlogic,rx-delay-ns:
+ deprecated: true
enum:
- 0
- 2
default: 0
description:
- The internal RGMII RX clock delay (provided by this IP block) in
- nanoseconds. When phy-mode is set to "rgmii" then the RX delay
- should be explicitly configured. When the phy-mode is set to
- either "rgmii-id" or "rgmii-rxid" the RX clock delay is already
- provided by the PHY. Any configuration is ignored when the
- phy-mode is set to "rmii".
+ The internal RGMII RX clock delay in nanoseconds. Deprecated, use
+ rx-internal-delay-ps instead.
+
+ rx-internal-delay-ps:
+ default: 0
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - amlogic,meson8b-dwmac
+ - amlogic,meson8m2-dwmac
+ - amlogic,meson-gxbb-dwmac
+ - amlogic,meson-axg-dwmac
+ then:
+ properties:
+ rx-internal-delay-ps:
+ enum:
+ - 0
+ - 2000
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - amlogic,meson-g12a-dwmac
+ then:
+ properties:
+ rx-internal-delay-ps:
+ enum:
+ - 0
+ - 200
+ - 400
+ - 600
+ - 800
+ - 1000
+ - 1200
+ - 1400
+ - 1600
+ - 1800
+ - 2000
+ - 2200
+ - 2400
+ - 2600
+ - 2800
+ - 3000
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/net/brcm,bcm4908-enet.yaml b/Documentation/devicetree/bindings/net/brcm,bcm4908-enet.yaml
new file mode 100644
index 000000000000..79c38ea14237
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/brcm,bcm4908-enet.yaml
@@ -0,0 +1,48 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/brcm,bcm4908-enet.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom BCM4908 Ethernet controller
+
+description: Broadcom's Ethernet controller integrated into BCM4908 family SoCs
+
+maintainers:
+ - Rafał Miłecki <rafal@milecki.pl>
+
+allOf:
+ - $ref: ethernet-controller.yaml#
+
+properties:
+ compatible:
+ const: brcm,bcm4908-enet
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ description: RX interrupt
+
+ interrupt-names:
+ const: rx
+
+required:
+ - reg
+ - interrupts
+ - interrupt-names
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ ethernet@80002000 {
+ compatible = "brcm,bcm4908-enet";
+ reg = <0x80002000 0x1000>;
+
+ interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "rx";
+ };
diff --git a/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt b/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt
index 97ca62b0e14d..d0935d2afef8 100644
--- a/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt
+++ b/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt
@@ -1,108 +1,13 @@
* Broadcom Starfighter 2 integrated swich
-Required properties:
+See dsa/brcm,bcm7445-switch-v4.0.yaml for the documentation.
-- compatible: should be one of
- "brcm,bcm7445-switch-v4.0"
- "brcm,bcm7278-switch-v4.0"
- "brcm,bcm7278-switch-v4.8"
-- reg: addresses and length of the register sets for the device, must be 6
- pairs of register addresses and lengths
-- interrupts: interrupts for the devices, must be two interrupts
-- #address-cells: must be 1, see dsa/dsa.txt
-- #size-cells: must be 0, see dsa/dsa.txt
-
-Deprecated binding required properties:
+*Deprecated* binding required properties:
- dsa,mii-bus: phandle to the MDIO bus controller, see dsa/dsa.txt
- dsa,ethernet: phandle to the CPU network interface controller, see dsa/dsa.txt
- #address-cells: must be 2, see dsa/dsa.txt
-Subnodes:
-
-The integrated switch subnode should be specified according to the binding
-described in dsa/dsa.txt.
-
-Optional properties:
-
-- reg-names: litteral names for the device base register addresses, when present
- must be: "core", "reg", "intrl2_0", "intrl2_1", "fcb", "acb"
-
-- interrupt-names: litternal names for the device interrupt lines, when present
- must be: "switch_0" and "switch_1"
-
-- brcm,num-gphy: specify the maximum number of integrated gigabit PHYs in the
- switch
-
-- brcm,num-rgmii-ports: specify the maximum number of RGMII interfaces supported
- by the switch
-
-- brcm,fcb-pause-override: boolean property, if present indicates that the switch
- supports Failover Control Block pause override capability
-
-- brcm,acb-packets-inflight: boolean property, if present indicates that the switch
- Admission Control Block supports reporting the number of packets in-flight in a
- switch queue
-
-- resets: a single phandle and reset identifier pair. See
- Documentation/devicetree/bindings/reset/reset.txt for details.
-
-- reset-names: If the "reset" property is specified, this property should have
- the value "switch" to denote the switch reset line.
-
-- clocks: when provided, the first phandle is to the switch's main clock and
- is valid for both BCM7445 and BCM7278. The second phandle is only applicable
- to BCM7445 and is to support dividing the switch core clock.
-
-- clock-names: when provided, the first phandle must be "sw_switch", and the
- second must be named "sw_switch_mdiv".
-
-Port subnodes:
-
-Optional properties:
-
-- brcm,use-bcm-hdr: boolean property, if present, indicates that the switch
- port has Broadcom tags enabled (per-packet metadata)
-
-Example:
-
-switch_top@f0b00000 {
- compatible = "simple-bus";
- #size-cells = <1>;
- #address-cells = <1>;
- ranges = <0 0xf0b00000 0x40804>;
-
- ethernet_switch@0 {
- compatible = "brcm,bcm7445-switch-v4.0";
- #size-cells = <0>;
- #address-cells = <1>;
- reg = <0x0 0x40000
- 0x40000 0x110
- 0x40340 0x30
- 0x40380 0x30
- 0x40400 0x34
- 0x40600 0x208>;
- reg-names = "core", "reg", intrl2_0", "intrl2_1",
- "fcb, "acb";
- interrupts = <0 0x18 0
- 0 0x19 0>;
- brcm,num-gphy = <1>;
- brcm,num-rgmii-ports = <2>;
- brcm,fcb-pause-override;
- brcm,acb-packets-inflight;
-
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- port@0 {
- label = "gphy";
- reg = <0>;
- };
- };
- };
-};
-
Example using the old DSA DeviceTree binding:
switch_top@f0b00000 {
@@ -132,7 +37,7 @@ switch_top@f0b00000 {
switch@0 {
reg = <0 0>;
#size-cells = <0>;
- #address-cells <1>;
+ #address-cells = <1>;
port@0 {
label = "gphy";
diff --git a/Documentation/devicetree/bindings/net/btusb.txt b/Documentation/devicetree/bindings/net/btusb.txt
index b1ad6ee68e90..f546b1f7dd6d 100644
--- a/Documentation/devicetree/bindings/net/btusb.txt
+++ b/Documentation/devicetree/bindings/net/btusb.txt
@@ -4,7 +4,7 @@ Generic Bluetooth controller over USB (btusb driver)
Required properties:
- compatible : should comply with the format "usbVID,PID" specified in
- Documentation/devicetree/bindings/usb/usb-device.txt
+ Documentation/devicetree/bindings/usb/usb-device.yaml
At the time of writing, the only OF supported devices
(more may be added later) are:
@@ -38,7 +38,7 @@ Following example uses irq pin number 3 of gpio0 for out of band wake-on-bt:
compatible = "usb1286,204e";
reg = <1>;
interrupt-parent = <&gpio0>;
- interrupt-name = "wakeup";
+ interrupt-names = "wakeup";
interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
};
};
diff --git a/Documentation/devicetree/bindings/net/can/fsl,flexcan.yaml b/Documentation/devicetree/bindings/net/can/fsl,flexcan.yaml
index 0d2df30f19db..fe6a949a2eab 100644
--- a/Documentation/devicetree/bindings/net/can/fsl,flexcan.yaml
+++ b/Documentation/devicetree/bindings/net/can/fsl,flexcan.yaml
@@ -110,6 +110,16 @@ properties:
description:
Enable CAN remote wakeup.
+ fsl,scu-index:
+ description: |
+ The scu index of CAN instance.
+ For SoCs with SCU support, need setup stop mode via SCU firmware, so this
+ property can help indicate a resource. It supports up to 3 CAN instances
+ now.
+ $ref: /schemas/types.yaml#/definitions/uint8
+ minimum: 0
+ maximum: 2
+
required:
- compatible
- reg
@@ -137,4 +147,5 @@ examples:
clocks = <&clks 1>, <&clks 2>;
clock-names = "ipg", "per";
fsl,stop-mode = <&gpr 0x34 28>;
+ fsl,scu-index = /bits/ 8 <1>;
};
diff --git a/Documentation/devicetree/bindings/net/can/rcar_canfd.txt b/Documentation/devicetree/bindings/net/can/rcar_canfd.txt
index 22cf2a889b2c..248c4ed97a0a 100644
--- a/Documentation/devicetree/bindings/net/can/rcar_canfd.txt
+++ b/Documentation/devicetree/bindings/net/can/rcar_canfd.txt
@@ -97,7 +97,7 @@ E.g. below enables Channel 0 alone in the board using External clock
as fCAN clock.
&canfd {
- pinctrl-0 = <&canfd0_pins &can_clk_pins>;
+ pinctrl-0 = <&canfd0_pins>, <&can_clk_pins>;
pinctrl-names = "default";
status = "okay";
diff --git a/Documentation/devicetree/bindings/net/dsa/arrow,xrs700x.yaml b/Documentation/devicetree/bindings/net/dsa/arrow,xrs700x.yaml
new file mode 100644
index 000000000000..3f01b65f3b22
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/dsa/arrow,xrs700x.yaml
@@ -0,0 +1,73 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/dsa/arrow,xrs700x.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Arrow SpeedChips XRS7000 Series Switch Device Tree Bindings
+
+allOf:
+ - $ref: dsa.yaml#
+
+maintainers:
+ - George McCollister <george.mccollister@gmail.com>
+
+description:
+ The Arrow SpeedChips XRS7000 Series of single chip gigabit Ethernet switches
+ are designed for critical networking applications. They have up to three
+ RGMII ports and one RMII port and are managed via i2c or mdio.
+
+properties:
+ compatible:
+ oneOf:
+ - enum:
+ - arrow,xrs7003e
+ - arrow,xrs7003f
+ - arrow,xrs7004e
+ - arrow,xrs7004f
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ switch@8 {
+ compatible = "arrow,xrs7004e";
+ reg = <0x8>;
+
+ ethernet-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ ethernet-port@1 {
+ reg = <1>;
+ label = "lan0";
+ phy-handle = <&swphy0>;
+ phy-mode = "rgmii-id";
+ };
+ ethernet-port@2 {
+ reg = <2>;
+ label = "lan1";
+ phy-handle = <&swphy1>;
+ phy-mode = "rgmii-id";
+ };
+ ethernet-port@3 {
+ reg = <3>;
+ label = "cpu";
+ ethernet = <&fec1>;
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/dsa/brcm,sf2.yaml b/Documentation/devicetree/bindings/net/dsa/brcm,sf2.yaml
new file mode 100644
index 000000000000..d730fe5a4355
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/dsa/brcm,sf2.yaml
@@ -0,0 +1,173 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/dsa/brcm,sf2.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom Starfighter 2 integrated swich
+
+maintainers:
+ - Florian Fainelli <f.fainelli@gmail.com>
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - brcm,bcm4908-switch
+ - brcm,bcm7278-switch-v4.0
+ - brcm,bcm7278-switch-v4.8
+ - brcm,bcm7445-switch-v4.0
+
+ reg:
+ minItems: 6
+ maxItems: 6
+
+ reg-names:
+ items:
+ - const: core
+ - const: reg
+ - const: intrl2_0
+ - const: intrl2_1
+ - const: fcb
+ - const: acb
+
+ interrupts:
+ minItems: 2
+ maxItems: 2
+
+ interrupt-names:
+ items:
+ - const: switch_0
+ - const: switch_1
+
+ resets:
+ maxItems: 1
+
+ reset-names:
+ const: switch
+
+ clocks:
+ minItems: 1
+ maxItems: 2
+ items:
+ - description: switch's main clock
+ - description: dividing of the switch core clock
+
+ clock-names:
+ minItems: 1
+ maxItems: 2
+ items:
+ - const: sw_switch
+ - const: sw_switch_mdiv
+
+ brcm,num-gphy:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: maximum number of integrated gigabit PHYs in the switch
+
+ brcm,num-rgmii-ports:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: maximum number of RGMII interfaces supported by the switch
+
+ brcm,fcb-pause-override:
+ description: if present indicates that the switch supports Failover Control
+ Block pause override capability
+ type: boolean
+
+ brcm,acb-packets-inflight:
+ description: if present indicates that the switch Admission Control Block
+ supports reporting the number of packets in-flight in a switch queue
+ type: boolean
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+ ports:
+ type: object
+
+ properties:
+ brcm,use-bcm-hdr:
+ description: if present, indicates that the switch port has Broadcom
+ tags enabled (per-packet metadata)
+ type: boolean
+
+required:
+ - reg
+ - interrupts
+ - "#address-cells"
+ - "#size-cells"
+
+allOf:
+ - $ref: "dsa.yaml#"
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - brcm,bcm7278-switch-v4.0
+ - brcm,bcm7278-switch-v4.8
+ then:
+ properties:
+ clocks:
+ minItems: 1
+ maxItems: 1
+ clock-names:
+ minItems: 1
+ maxItems: 1
+ required:
+ - clocks
+ - clock-names
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: brcm,bcm7445-switch-v4.0
+ then:
+ properties:
+ clocks:
+ minItems: 2
+ maxItems: 2
+ clock-names:
+ minItems: 2
+ maxItems: 2
+ required:
+ - clocks
+ - clock-names
+
+additionalProperties: false
+
+examples:
+ - |
+ switch@f0b00000 {
+ compatible = "brcm,bcm7445-switch-v4.0";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0xf0b00000 0x40000>,
+ <0xf0b40000 0x110>,
+ <0xf0b40340 0x30>,
+ <0xf0b40380 0x30>,
+ <0xf0b40400 0x34>,
+ <0xf0b40600 0x208>;
+ reg-names = "core", "reg", "intrl2_0", "intrl2_1",
+ "fcb", "acb";
+ interrupts = <0 0x18 0>,
+ <0 0x19 0>;
+ clocks = <&sw_switch>, <&sw_switch_mdiv>;
+ clock-names = "sw_switch", "sw_switch_mdiv";
+ brcm,num-gphy = <1>;
+ brcm,num-rgmii-ports = <2>;
+ brcm,fcb-pause-override;
+ brcm,acb-packets-inflight;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ label = "gphy";
+ reg = <0>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/dsa/mt7530.txt b/Documentation/devicetree/bindings/net/dsa/mt7530.txt
index 560369efad6c..de04626a8e9d 100644
--- a/Documentation/devicetree/bindings/net/dsa/mt7530.txt
+++ b/Documentation/devicetree/bindings/net/dsa/mt7530.txt
@@ -76,6 +76,12 @@ phy-mode must be set, see also example 2 below!
* mt7621: phy-mode = "rgmii-txid";
* mt7623: phy-mode = "rgmii";
+Optional properties:
+
+- gpio-controller: Boolean; if defined, MT7530's LED controller will run on
+ GPIO mode.
+- #gpio-cells: Must be 2 if gpio-controller is defined.
+
See Documentation/devicetree/bindings/net/dsa/dsa.txt for a list of additional
required, optional properties and how the integrated switch subnodes must
be specified.
diff --git a/Documentation/devicetree/bindings/net/ethernet-controller.yaml b/Documentation/devicetree/bindings/net/ethernet-controller.yaml
index 0965f6515f9e..4b7d1e5d003c 100644
--- a/Documentation/devicetree/bindings/net/ethernet-controller.yaml
+++ b/Documentation/devicetree/bindings/net/ethernet-controller.yaml
@@ -89,6 +89,7 @@ properties:
- trgmii
- 1000base-x
- 2500base-x
+ - 5gbase-r
- rxaui
- xaui
@@ -122,7 +123,6 @@ properties:
such as flow control thresholds.
rx-internal-delay-ps:
- $ref: /schemas/types.yaml#/definitions/uint32
description: |
RGMII Receive Clock Delay defined in pico seconds.
This is used for controllers that have configurable RX internal delays.
@@ -140,7 +140,6 @@ properties:
is used for components that can have configurable fifo sizes.
tx-internal-delay-ps:
- $ref: /schemas/types.yaml#/definitions/uint32
description: |
RGMII Transmit Clock Delay defined in pico seconds.
This is used for controllers that have configurable TX internal delays.
@@ -207,6 +206,11 @@ properties:
Indicates that full-duplex is used. When absent, half
duplex is assumed.
+ pause:
+ $ref: /schemas/types.yaml#definitions/flag
+ description:
+ Indicates that pause should be enabled.
+
asym-pause:
$ref: /schemas/types.yaml#/definitions/flag
description:
diff --git a/Documentation/devicetree/bindings/net/marvell-pp2.txt b/Documentation/devicetree/bindings/net/marvell-pp2.txt
index b78397669320..ce15c173f43f 100644
--- a/Documentation/devicetree/bindings/net/marvell-pp2.txt
+++ b/Documentation/devicetree/bindings/net/marvell-pp2.txt
@@ -1,5 +1,6 @@
* Marvell Armada 375 Ethernet Controller (PPv2.1)
Marvell Armada 7K/8K Ethernet Controller (PPv2.2)
+ Marvell CN913X Ethernet Controller (PPv2.3)
Required properties:
@@ -12,10 +13,11 @@ Required properties:
- common controller registers
- LMS registers
- one register area per Ethernet port
- For "marvell,armada-7k-pp2", must contain the following register
+ For "marvell,armada-7k-pp2" used by 7K/8K and CN913X, must contain the following register
sets:
- packet processor registers
- networking interfaces registers
+ - CM3 address space used for TX Flow Control
- clocks: pointers to the reference clocks for this device, consequently:
- main controller clock (for both armada-375-pp2 and armada-7k-pp2)
@@ -81,7 +83,7 @@ Example for marvell,armada-7k-pp2:
cpm_ethernet: ethernet@0 {
compatible = "marvell,armada-7k-pp22";
- reg = <0x0 0x100000>, <0x129000 0xb000>;
+ reg = <0x0 0x100000>, <0x129000 0xb000>, <0x220000 0x800>;
clocks = <&cpm_syscon0 1 3>, <&cpm_syscon0 1 9>,
<&cpm_syscon0 1 5>, <&cpm_syscon0 1 6>, <&cpm_syscon0 1 18>;
clock-names = "pp_clk", "gop_clk", "mg_clk", "mg_core_clk", "axi_clk";
diff --git a/Documentation/devicetree/bindings/net/qca,ar803x.yaml b/Documentation/devicetree/bindings/net/qca,ar803x.yaml
index 64b3357ade8a..b3d4013b7ca6 100644
--- a/Documentation/devicetree/bindings/net/qca,ar803x.yaml
+++ b/Documentation/devicetree/bindings/net/qca,ar803x.yaml
@@ -28,6 +28,10 @@ properties:
$ref: /schemas/types.yaml#/definitions/uint32
enum: [0, 1, 2]
+ qca,disable-smarteee:
+ description: Disable Atheros SmartEEE feature.
+ type: boolean
+
qca,keep-pll-enabled:
description: |
If set, keep the PLL enabled even if there is no link. Useful if you
@@ -36,6 +40,18 @@ properties:
Only supported on the AR8031.
type: boolean
+ qca,smarteee-tw-us-100m:
+ description: EEE Tw parameter for 100M links.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 1
+ maximum: 255
+
+ qca,smarteee-tw-us-1g:
+ description: EEE Tw parameter for gigabit links.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 1
+ maximum: 255
+
vddio-supply:
description: |
RGMII I/O voltage regulator (see regulator/regulator.yaml).
diff --git a/Documentation/devicetree/bindings/net/qcom,ipa.yaml b/Documentation/devicetree/bindings/net/qcom,ipa.yaml
index 8a2d12644675..8f86084bf12e 100644
--- a/Documentation/devicetree/bindings/net/qcom,ipa.yaml
+++ b/Documentation/devicetree/bindings/net/qcom,ipa.yaml
@@ -113,13 +113,6 @@ properties:
performing early IPA initialization, including loading and
validating firwmare used by the GSI.
- modem-remoteproc:
- $ref: /schemas/types.yaml#/definitions/phandle
- description:
- This defines the phandle to the remoteproc node representing
- the modem subsystem. This is requied so the IPA driver can
- receive and act on notifications of modem up/down events.
-
memory-region:
maxItems: 1
description:
@@ -135,7 +128,6 @@ required:
- interrupts
- interconnects
- qcom,smem-states
- - modem-remoteproc
oneOf:
- required:
@@ -147,7 +139,7 @@ additionalProperties: false
examples:
- |
- #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/clock/qcom,rpmh.h>
#include <dt-bindings/interconnect/qcom,sdm845.h>
@@ -168,7 +160,6 @@ examples:
compatible = "qcom,sdm845-ipa";
modem-init;
- modem-remoteproc = <&mss_pil>;
iommus = <&apps_smmu 0x720 0x3>;
reg = <0x1e40000 0x7000>,
@@ -178,8 +169,8 @@ examples:
"ipa-shared",
"gsi";
- interrupts-extended = <&intc 0 311 IRQ_TYPE_EDGE_RISING>,
- <&intc 0 432 IRQ_TYPE_LEVEL_HIGH>,
+ interrupts-extended = <&intc GIC_SPI 311 IRQ_TYPE_EDGE_RISING>,
+ <&intc GIC_SPI 432 IRQ_TYPE_LEVEL_HIGH>,
<&ipa_smp2p_in 0 IRQ_TYPE_EDGE_RISING>,
<&ipa_smp2p_in 1 IRQ_TYPE_EDGE_RISING>;
interrupt-names = "ipa",
diff --git a/Documentation/devicetree/bindings/net/renesas,etheravb.yaml b/Documentation/devicetree/bindings/net/renesas,etheravb.yaml
index de9dd574a2f9..91ba96d43c6c 100644
--- a/Documentation/devicetree/bindings/net/renesas,etheravb.yaml
+++ b/Documentation/devicetree/bindings/net/renesas,etheravb.yaml
@@ -40,6 +40,7 @@ properties:
- renesas,etheravb-r8a77980 # R-Car V3H
- renesas,etheravb-r8a77990 # R-Car E3
- renesas,etheravb-r8a77995 # R-Car D3
+ - renesas,etheravb-r8a779a0 # R-Car V3U
- const: renesas,etheravb-rcar-gen3 # R-Car Gen3 and RZ/G2
reg: true
@@ -170,6 +171,7 @@ allOf:
- renesas,etheravb-r8a77965
- renesas,etheravb-r8a77970
- renesas,etheravb-r8a77980
+ - renesas,etheravb-r8a779a0
then:
required:
- tx-internal-delay-ps
diff --git a/Documentation/devicetree/bindings/net/snps,dwmac.yaml b/Documentation/devicetree/bindings/net/snps,dwmac.yaml
index dfbf5fe4547a..0642b0f59491 100644
--- a/Documentation/devicetree/bindings/net/snps,dwmac.yaml
+++ b/Documentation/devicetree/bindings/net/snps,dwmac.yaml
@@ -212,7 +212,6 @@ properties:
Triplet of delays. The 1st cell is reset pre-delay in micro
seconds. The 2nd cell is reset pulse in micro seconds. The 3rd
cell is reset post-delay in micro seconds.
- $ref: /schemas/types.yaml#/definitions/uint32-array
minItems: 3
maxItems: 3
diff --git a/Documentation/devicetree/bindings/net/ti,k3-am654-cpsw-nuss.yaml b/Documentation/devicetree/bindings/net/ti,k3-am654-cpsw-nuss.yaml
index c47b58f3e3f6..783b9e32cf66 100644
--- a/Documentation/devicetree/bindings/net/ti,k3-am654-cpsw-nuss.yaml
+++ b/Documentation/devicetree/bindings/net/ti,k3-am654-cpsw-nuss.yaml
@@ -4,7 +4,7 @@
$id: http://devicetree.org/schemas/net/ti,k3-am654-cpsw-nuss.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
-title: The TI AM654x/J721E SoC Gigabit Ethernet MAC (Media Access Controller) Device Tree Bindings
+title: The TI AM654x/J721E/AM642x SoC Gigabit Ethernet MAC (Media Access Controller) Device Tree Bindings
maintainers:
- Grygorii Strashko <grygorii.strashko@ti.com>
@@ -13,19 +13,16 @@ maintainers:
description:
The TI AM654x/J721E SoC Gigabit Ethernet MAC (CPSW2G NUSS) has two ports
(one external) and provides Ethernet packet communication for the device.
- CPSW2G NUSS features - the Reduced Gigabit Media Independent Interface (RGMII),
- Reduced Media Independent Interface (RMII), the Management Data
- Input/Output (MDIO) interface for physical layer device (PHY) management,
- new version of Common Platform Time Sync (CPTS), updated Address Lookup
- Engine (ALE).
- One external Ethernet port (port 1) with selectable RGMII/RMII interfaces and
- an internal Communications Port Programming Interface (CPPI5) (Host port 0).
+ The TI AM642x SoC Gigabit Ethernet MAC (CPSW3G NUSS) has three ports
+ (two external) and provides Ethernet packet communication and switching.
+
+ The internal Communications Port Programming Interface (CPPI5) (Host port 0).
Host Port 0 CPPI Packet Streaming Interface interface supports 8 TX channels
- and one RX channels and operating by TI AM654x/J721E NAVSS Unified DMA
- Peripheral Root Complex (UDMA-P) controller.
- The CPSW2G NUSS is integrated into device MCU domain named MCU_CPSW0.
+ and one RX channels and operating by NAVSS Unified DMA Peripheral Root
+ Complex (UDMA-P) controller.
- Additional features
+ CPSWxG features
+ updated Address Lookup Engine (ALE).
priority level Quality Of Service (QOS) support (802.1p)
Support for Audio/Video Bridging (P802.1Qav/D6.0)
Support for IEEE 1588 Clock Synchronization (2008 Annex D, Annex E and Annex F)
@@ -38,10 +35,18 @@ description:
VLAN support, 802.1Q compliant, Auto add port VLAN for untagged frames on
ingress, Auto VLAN removal on egress and auto pad to minimum frame size.
RX/TX csum offload
+ Management Data Input/Output (MDIO) interface for PHYs management
+ RMII/RGMII Interfaces support
+ new version of Common Platform Time Sync (CPTS)
+
+ The CPSWxG NUSS is integrated into
+ device MCU domain named MCU_CPSW0 on AM654x/J721E SoC.
+ device MAIN domain named CPSW0 on AM642x SoC.
Specifications can be found at
- http://www.ti.com/lit/ug/spruid7e/spruid7e.pdf
- http://www.ti.com/lit/ug/spruil1a/spruil1a.pdf
+ https://www.ti.com/lit/pdf/spruid7
+ https://www.ti.com/lit/zip/spruil1
+ https://www.ti.com/lit/pdf/spruim2
properties:
"#address-cells": true
@@ -51,11 +56,12 @@ properties:
oneOf:
- const: ti,am654-cpsw-nuss
- const: ti,j721e-cpsw-nuss
+ - const: ti,am642-cpsw-nuss
reg:
maxItems: 1
description:
- The physical base address and size of full the CPSW2G NUSS IO range
+ The physical base address and size of full the CPSWxG NUSS IO range
reg-names:
items:
@@ -66,12 +72,17 @@ properties:
dma-coherent: true
clocks:
- description: CPSW2G NUSS functional clock
+ maxItems: 1
+ description: CPSWxG NUSS functional clock
clock-names:
items:
- const: fck
+ assigned-clock-parents: true
+
+ assigned-clocks: true
+
power-domains:
maxItems: 1
@@ -99,16 +110,16 @@ properties:
const: 0
patternProperties:
- port@1:
+ port@[1-2]:
type: object
- description: CPSW2G NUSS external ports
+ description: CPSWxG NUSS external ports
$ref: ethernet-controller.yaml#
properties:
reg:
- items:
- - const: 1
+ minimum: 1
+ maximum: 2
description: CPSW port number
phys:
diff --git a/Documentation/devicetree/bindings/net/ti,k3-am654-cpts.yaml b/Documentation/devicetree/bindings/net/ti,k3-am654-cpts.yaml
index 9b7117920d90..4317eba503ca 100644
--- a/Documentation/devicetree/bindings/net/ti,k3-am654-cpts.yaml
+++ b/Documentation/devicetree/bindings/net/ti,k3-am654-cpts.yaml
@@ -59,6 +59,7 @@ properties:
- const: cpts
clocks:
+ maxItems: 1
description: CPTS reference clock
clock-names:
@@ -73,6 +74,13 @@ properties:
items:
- const: cpts
+ assigned-clock-parents: true
+
+ assigned-clocks: true
+
+ power-domains:
+ maxItems: 1
+
ti,cpts-ext-ts-inputs:
$ref: /schemas/types.yaml#/definitions/uint32
maximum: 8
diff --git a/Documentation/devicetree/bindings/net/toshiba,visconti-dwmac.yaml b/Documentation/devicetree/bindings/net/toshiba,visconti-dwmac.yaml
new file mode 100644
index 000000000000..59724d18e6f3
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/toshiba,visconti-dwmac.yaml
@@ -0,0 +1,85 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/net/toshiba,visconti-dwmac.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Toshiba Visconti DWMAC Ethernet controller
+
+maintainers:
+ - Nobuhiro Iwamatsu <nobuhiro1.iwamatsu@toshiba.co.jp>
+
+select:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - toshiba,visconti-dwmac
+ required:
+ - compatible
+
+allOf:
+ - $ref: "snps,dwmac.yaml#"
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - toshiba,visconti-dwmac
+ - const: snps,dwmac-4.20a
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: main clock
+ - description: PHY reference clock
+
+ clock-names:
+ items:
+ - const: stmmaceth
+ - const: phy_ref_clk
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ piether: ethernet@28000000 {
+ compatible = "toshiba,visconti-dwmac", "snps,dwmac-4.20a";
+ reg = <0 0x28000000 0 0x10000>;
+ interrupts = <GIC_SPI 156 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq";
+ clocks = <&clk300mhz>, <&clk125mhz>;
+ clock-names = "stmmaceth", "phy_ref_clk";
+ snps,txpbl = <4>;
+ snps,rxpbl = <4>;
+ snps,tso;
+ phy-mode = "rgmii-id";
+ phy-handle = <&phy0>;
+
+ mdio0 {
+ #address-cells = <0x1>;
+ #size-cells = <0x0>;
+ compatible = "snps,dwmac-mdio";
+
+ phy0: ethernet-phy@1 {
+ device_type = "ethernet-phy";
+ reg = <0x1>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/xilinx_axienet.txt b/Documentation/devicetree/bindings/net/xilinx_axienet.txt
index 7360617cdedb..2cd452419ed0 100644
--- a/Documentation/devicetree/bindings/net/xilinx_axienet.txt
+++ b/Documentation/devicetree/bindings/net/xilinx_axienet.txt
@@ -38,6 +38,10 @@ Optional properties:
1 to enable partial TX checksum offload,
2 to enable full TX checksum offload
- xlnx,rxcsum : Same values as xlnx,txcsum but for RX checksum offload
+- xlnx,switch-x-sgmii : Boolean to indicate the Ethernet core is configured to
+ support both 1000BaseX and SGMII modes. If set, the phy-mode
+ should be set to match the mode selected on core reset (i.e.
+ by the basex_or_sgmii core input line).
- clocks : AXI bus clock for the device. Refer to common clock bindings.
Used to calculate MDIO clock divisor. If not specified, it is
auto-detected from the CPU clock (but only on platforms where
diff --git a/Documentation/devicetree/bindings/nvmem/rmem.yaml b/Documentation/devicetree/bindings/nvmem/rmem.yaml
new file mode 100644
index 000000000000..1d85a0a30846
--- /dev/null
+++ b/Documentation/devicetree/bindings/nvmem/rmem.yaml
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/nvmem/rmem.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Reserved Memory Based nvmem Device
+
+maintainers:
+ - Nicolas Saenz Julienne <nsaenzjulienne@suse.de>
+
+allOf:
+ - $ref: "nvmem.yaml#"
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - raspberrypi,bootloader-config
+ - const: nvmem-rmem
+
+ no-map:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ Avoid creating a virtual mapping of the region as part of the OS'
+ standard mapping of system memory.
+
+required:
+ - compatible
+ - no-map
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ blconfig: nvram@10000000 {
+ compatible = "raspberrypi,bootloader-config", "nvmem-rmem";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x10000000 0x1000>;
+ no-map;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml b/Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml
index 807694b4f41f..f90557f6deb8 100644
--- a/Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml
+++ b/Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml
@@ -14,6 +14,7 @@ properties:
items:
- enum:
- brcm,bcm2711-pcie # The Raspberry Pi 4
+ - brcm,bcm4908-pcie
- brcm,bcm7211-pcie # Broadcom STB version of RPi4
- brcm,bcm7278-pcie # Broadcom 7278 Arm
- brcm,bcm7216-pcie # Broadcom 7216 Arm
@@ -63,15 +64,6 @@ properties:
aspm-no-l0s: true
- resets:
- description: for "brcm,bcm7216-pcie", must be a valid reset
- phandle pointing to the RESCAL reset controller provider node.
- $ref: "/schemas/types.yaml#/definitions/phandle"
-
- reset-names:
- items:
- - const: rescal
-
brcm,scb-sizes:
description: u64 giving the 64bit PCIe memory
viewport size of a memory controller. There may be up to
@@ -102,8 +94,35 @@ allOf:
properties:
compatible:
contains:
+ const: brcm,bcm4908-pcie
+ then:
+ properties:
+ resets:
+ items:
+ - description: reset controller handling the PERST# signal
+
+ reset-names:
+ items:
+ - const: perst
+
+ required:
+ - resets
+ - reset-names
+ - if:
+ properties:
+ compatible:
+ contains:
const: brcm,bcm7216-pcie
then:
+ properties:
+ resets:
+ items:
+ - description: phandle pointing to the RESCAL reset controller
+
+ reset-names:
+ items:
+ - const: rescal
+
required:
- resets
- reset-names
diff --git a/Documentation/devicetree/bindings/pci/layerscape-pci.txt b/Documentation/devicetree/bindings/pci/layerscape-pci.txt
index daa99f7d4c3f..6d898dd4a8e2 100644
--- a/Documentation/devicetree/bindings/pci/layerscape-pci.txt
+++ b/Documentation/devicetree/bindings/pci/layerscape-pci.txt
@@ -26,6 +26,7 @@ Required properties:
"fsl,ls1046a-pcie-ep", "fsl,ls-pcie-ep"
"fsl,ls1088a-pcie-ep", "fsl,ls-pcie-ep"
"fsl,ls2088a-pcie-ep", "fsl,ls-pcie-ep"
+ "fsl,lx2160ar2-pcie-ep", "fsl,ls-pcie-ep"
- reg: base addresses and lengths of the PCIe controller register blocks.
- interrupts: A list of interrupt outputs of the controller. Must contain an
entry for each entry in the interrupt-names property.
diff --git a/Documentation/devicetree/bindings/pci/microchip,pcie-host.yaml b/Documentation/devicetree/bindings/pci/microchip,pcie-host.yaml
new file mode 100644
index 000000000000..04251d71f56b
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/microchip,pcie-host.yaml
@@ -0,0 +1,92 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pci/microchip,pcie-host.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Microchip PCIe Root Port Bridge Controller Device Tree Bindings
+
+maintainers:
+ - Daire McNamara <daire.mcnamara@microchip.com>
+
+allOf:
+ - $ref: /schemas/pci/pci-bus.yaml#
+
+properties:
+ compatible:
+ const: microchip,pcie-host-1.0 # PolarFire
+
+ reg:
+ maxItems: 2
+
+ reg-names:
+ items:
+ - const: cfg
+ - const: apb
+
+ interrupts:
+ minItems: 1
+ maxItems: 2
+ items:
+ - description: PCIe host controller
+ - description: builtin MSI controller
+
+ interrupt-names:
+ minItems: 1
+ maxItems: 2
+ items:
+ - const: pcie
+ - const: msi
+
+ ranges:
+ maxItems: 1
+
+ msi-controller:
+ description: Identifies the node as an MSI controller.
+
+ msi-parent:
+ description: MSI controller the device is capable of using.
+
+required:
+ - reg
+ - reg-names
+ - "#interrupt-cells"
+ - interrupts
+ - interrupt-map-mask
+ - interrupt-map
+ - msi-controller
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ pcie0: pcie@2030000000 {
+ compatible = "microchip,pcie-host-1.0";
+ reg = <0x0 0x70000000 0x0 0x08000000>,
+ <0x0 0x43000000 0x0 0x00010000>;
+ reg-names = "cfg", "apb";
+ device_type = "pci";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+ interrupts = <119>;
+ interrupt-map-mask = <0x0 0x0 0x0 0x7>;
+ interrupt-map = <0 0 0 1 &pcie_intc0 0>,
+ <0 0 0 2 &pcie_intc0 1>,
+ <0 0 0 3 &pcie_intc0 2>,
+ <0 0 0 4 &pcie_intc0 3>;
+ interrupt-parent = <&plic0>;
+ msi-parent = <&pcie0>;
+ msi-controller;
+ bus-range = <0x00 0x7f>;
+ ranges = <0x03000000 0x0 0x78000000 0x0 0x78000000 0x0 0x04000000>;
+ pcie_intc0: interrupt-controller {
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pci/qcom,pcie.txt b/Documentation/devicetree/bindings/pci/qcom,pcie.txt
index 3b55310390a0..0da458a051b6 100644
--- a/Documentation/devicetree/bindings/pci/qcom,pcie.txt
+++ b/Documentation/devicetree/bindings/pci/qcom,pcie.txt
@@ -132,8 +132,20 @@
- "master_bus" AXI Master clock
- "slave_bus" AXI Slave clock
--clock-names:
- Usage: required for sdm845 and sm8250
+- clock-names:
+ Usage: required for sdm845
+ Value type: <stringlist>
+ Definition: Should contain the following entries
+ - "aux" Auxiliary clock
+ - "cfg" Configuration clock
+ - "bus_master" Master AXI clock
+ - "bus_slave" Slave AXI clock
+ - "slave_q2a" Slave Q2A clock
+ - "tbu" PCIe TBU clock
+ - "pipe" PIPE clock
+
+- clock-names:
+ Usage: required for sm8250
Value type: <stringlist>
Definition: Should contain the following entries
- "aux" Auxiliary clock
@@ -142,6 +154,7 @@
- "bus_slave" Slave AXI clock
- "slave_q2a" Slave Q2A clock
- "tbu" PCIe TBU clock
+ - "ddrss_sf_tbu" PCIe SF TBU clock
- "pipe" PIPE clock
- resets:
diff --git a/Documentation/devicetree/bindings/phy/allwinner,sun4i-a10-usb-phy.yaml b/Documentation/devicetree/bindings/phy/allwinner,sun4i-a10-usb-phy.yaml
index 94ac23687b7e..77606c899fe2 100644
--- a/Documentation/devicetree/bindings/phy/allwinner,sun4i-a10-usb-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/allwinner,sun4i-a10-usb-phy.yaml
@@ -51,9 +51,11 @@ properties:
- const: usb2_reset
usb0_id_det-gpios:
+ maxItems: 1
description: GPIO to the USB OTG ID pin
usb0_vbus_det-gpios:
+ maxItems: 1
description: GPIO to the USB OTG VBUS detect pin
usb0_vbus_power-supply:
diff --git a/Documentation/devicetree/bindings/phy/allwinner,sun50i-a64-usb-phy.yaml b/Documentation/devicetree/bindings/phy/allwinner,sun50i-a64-usb-phy.yaml
index fd6e126fcf18..078af52b16ed 100644
--- a/Documentation/devicetree/bindings/phy/allwinner,sun50i-a64-usb-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/allwinner,sun50i-a64-usb-phy.yaml
@@ -50,9 +50,11 @@ properties:
- const: usb1_reset
usb0_id_det-gpios:
+ maxItems: 1
description: GPIO to the USB OTG ID pin
usb0_vbus_det-gpios:
+ maxItems: 1
description: GPIO to the USB OTG VBUS detect pin
usb0_vbus_power-supply:
diff --git a/Documentation/devicetree/bindings/phy/allwinner,sun50i-h6-usb-phy.yaml b/Documentation/devicetree/bindings/phy/allwinner,sun50i-h6-usb-phy.yaml
index 7670411002c9..e632140722a2 100644
--- a/Documentation/devicetree/bindings/phy/allwinner,sun50i-h6-usb-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/allwinner,sun50i-h6-usb-phy.yaml
@@ -50,9 +50,11 @@ properties:
- const: usb3_reset
usb0_id_det-gpios:
+ maxItems: 1
description: GPIO to the USB OTG ID pin
usb0_vbus_det-gpios:
+ maxItems: 1
description: GPIO to the USB OTG VBUS detect pin
usb0_vbus_power-supply:
diff --git a/Documentation/devicetree/bindings/phy/allwinner,sun5i-a13-usb-phy.yaml b/Documentation/devicetree/bindings/phy/allwinner,sun5i-a13-usb-phy.yaml
index 9b319381d1ad..5bad9b06e2e7 100644
--- a/Documentation/devicetree/bindings/phy/allwinner,sun5i-a13-usb-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/allwinner,sun5i-a13-usb-phy.yaml
@@ -45,9 +45,11 @@ properties:
- const: usb1_reset
usb0_id_det-gpios:
+ maxItems: 1
description: GPIO to the USB OTG ID pin
usb0_vbus_det-gpios:
+ maxItems: 1
description: GPIO to the USB OTG VBUS detect pin
usb0_vbus_power-supply:
diff --git a/Documentation/devicetree/bindings/phy/allwinner,sun6i-a31-usb-phy.yaml b/Documentation/devicetree/bindings/phy/allwinner,sun6i-a31-usb-phy.yaml
index b0ed01bbf3db..922b4665e00d 100644
--- a/Documentation/devicetree/bindings/phy/allwinner,sun6i-a31-usb-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/allwinner,sun6i-a31-usb-phy.yaml
@@ -54,9 +54,11 @@ properties:
- const: usb2_reset
usb0_id_det-gpios:
+ maxItems: 1
description: GPIO to the USB OTG ID pin
usb0_vbus_det-gpios:
+ maxItems: 1
description: GPIO to the USB OTG VBUS detect pin
usb0_vbus_power-supply:
diff --git a/Documentation/devicetree/bindings/phy/allwinner,sun8i-a23-usb-phy.yaml b/Documentation/devicetree/bindings/phy/allwinner,sun8i-a23-usb-phy.yaml
index b0674406f8aa..a94019efc2f3 100644
--- a/Documentation/devicetree/bindings/phy/allwinner,sun8i-a23-usb-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/allwinner,sun8i-a23-usb-phy.yaml
@@ -50,9 +50,11 @@ properties:
- const: usb1_reset
usb0_id_det-gpios:
+ maxItems: 1
description: GPIO to the USB OTG ID pin
usb0_vbus_det-gpios:
+ maxItems: 1
description: GPIO to the USB OTG VBUS detect pin
usb0_vbus_power-supply:
diff --git a/Documentation/devicetree/bindings/phy/allwinner,sun8i-a83t-usb-phy.yaml b/Documentation/devicetree/bindings/phy/allwinner,sun8i-a83t-usb-phy.yaml
index 48dc9c834a9b..33f3ddc0492d 100644
--- a/Documentation/devicetree/bindings/phy/allwinner,sun8i-a83t-usb-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/allwinner,sun8i-a83t-usb-phy.yaml
@@ -56,9 +56,11 @@ properties:
- const: usb2_reset
usb0_id_det-gpios:
+ maxItems: 1
description: GPIO to the USB OTG ID pin
usb0_vbus_det-gpios:
+ maxItems: 1
description: GPIO to the USB OTG VBUS detect pin
usb0_vbus_power-supply:
diff --git a/Documentation/devicetree/bindings/phy/allwinner,sun8i-h3-usb-phy.yaml b/Documentation/devicetree/bindings/phy/allwinner,sun8i-h3-usb-phy.yaml
index 60c344585276..f80431060803 100644
--- a/Documentation/devicetree/bindings/phy/allwinner,sun8i-h3-usb-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/allwinner,sun8i-h3-usb-phy.yaml
@@ -62,9 +62,11 @@ properties:
- const: usb3_reset
usb0_id_det-gpios:
+ maxItems: 1
description: GPIO to the USB OTG ID pin
usb0_vbus_det-gpios:
+ maxItems: 1
description: GPIO to the USB OTG VBUS detect pin
usb0_vbus_power-supply:
diff --git a/Documentation/devicetree/bindings/phy/allwinner,sun8i-r40-usb-phy.yaml b/Documentation/devicetree/bindings/phy/allwinner,sun8i-r40-usb-phy.yaml
index a2bb36790fbd..d947e50a49d2 100644
--- a/Documentation/devicetree/bindings/phy/allwinner,sun8i-r40-usb-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/allwinner,sun8i-r40-usb-phy.yaml
@@ -56,9 +56,11 @@ properties:
- const: usb2_reset
usb0_id_det-gpios:
+ maxItems: 1
description: GPIO to the USB OTG ID pin
usb0_vbus_det-gpios:
+ maxItems: 1
description: GPIO to the USB OTG VBUS detect pin
usb0_vbus_power-supply:
diff --git a/Documentation/devicetree/bindings/phy/allwinner,sun8i-v3s-usb-phy.yaml b/Documentation/devicetree/bindings/phy/allwinner,sun8i-v3s-usb-phy.yaml
index eadfd0c9493c..a2836c296cc4 100644
--- a/Documentation/devicetree/bindings/phy/allwinner,sun8i-v3s-usb-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/allwinner,sun8i-v3s-usb-phy.yaml
@@ -42,9 +42,11 @@ properties:
const: usb0_reset
usb0_id_det-gpios:
+ maxItems: 1
description: GPIO to the USB OTG ID pin
usb0_vbus_det-gpios:
+ maxItems: 1
description: GPIO to the USB OTG VBUS detect pin
usb0_vbus_power-supply:
diff --git a/Documentation/devicetree/bindings/phy/allwinner,sun9i-a80-usb-phy.yaml b/Documentation/devicetree/bindings/phy/allwinner,sun9i-a80-usb-phy.yaml
index ded7d6f0a119..2eb493fa64fd 100644
--- a/Documentation/devicetree/bindings/phy/allwinner,sun9i-a80-usb-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/allwinner,sun9i-a80-usb-phy.yaml
@@ -22,7 +22,8 @@ properties:
clocks:
anyOf:
- - description: Main PHY Clock
+ - maxItems: 1
+ description: Main PHY Clock
- items:
- description: Main PHY clock
@@ -39,20 +40,16 @@ properties:
- const: hsic_480M
resets:
- anyOf:
+ minItems: 1
+ items:
- description: Normal USB PHY reset
-
- - items:
- - description: Normal USB PHY reset
- - description: HSIC Reset
+ - description: HSIC Reset
reset-names:
- oneOf:
+ minItems: 1
+ items:
- const: phy
-
- - items:
- - const: phy
- - const: hsic
+ - const: hsic
phy_type:
const: hsic
diff --git a/Documentation/devicetree/bindings/phy/brcm,brcmstb-usb-phy.txt b/Documentation/devicetree/bindings/phy/brcm,brcmstb-usb-phy.txt
deleted file mode 100644
index 698aacbdcfc4..000000000000
--- a/Documentation/devicetree/bindings/phy/brcm,brcmstb-usb-phy.txt
+++ /dev/null
@@ -1,86 +0,0 @@
-Broadcom STB USB PHY
-
-Required properties:
-- compatible: should be one of
- "brcm,brcmstb-usb-phy"
- "brcm,bcm7216-usb-phy"
- "brcm,bcm7211-usb-phy"
-
-- reg and reg-names properties requirements are specific to the
- compatible string.
- "brcm,brcmstb-usb-phy":
- - reg: 1 or 2 offset and length pairs. One for the base CTRL registers
- and an optional pair for systems with USB 3.x support
- - reg-names: not specified
- "brcm,bcm7216-usb-phy":
- - reg: 3 offset and length pairs for CTRL, XHCI_EC and XHCI_GBL
- registers
- - reg-names: "ctrl", "xhci_ec", "xhci_gbl"
- "brcm,bcm7211-usb-phy":
- - reg: 5 offset and length pairs for CTRL, XHCI_EC, XHCI_GBL,
- USB_PHY and USB_MDIO registers and an optional pair
- for the BDC registers
- - reg-names: "ctrl", "xhci_ec", "xhci_gbl", "usb_phy", "usb_mdio", "bdc_ec"
-
-- #phy-cells: Shall be 1 as it expects one argument for setting
- the type of the PHY. Possible values are:
- - PHY_TYPE_USB2 for USB1.1/2.0 PHY
- - PHY_TYPE_USB3 for USB3.x PHY
-
-Optional Properties:
-- clocks : clock phandles.
-- clock-names: String, clock name.
-- interrupts: wakeup interrupt
-- interrupt-names: "wakeup"
-- brcm,ipp: Boolean, Invert Port Power.
- Possible values are: 0 (Don't invert), 1 (Invert)
-- brcm,ioc: Boolean, Invert Over Current detection.
- Possible values are: 0 (Don't invert), 1 (Invert)
-- dr_mode: String, PHY Device mode.
- Possible values are: "host", "peripheral ", "drd" or "typec-pd"
- If this property is not defined, the phy will default to "host" mode.
-- brcm,syscon-piarbctl: phandle to syscon for handling config registers
-NOTE: one or both of the following two properties must be set
-- brcm,has-xhci: Boolean indicating the phy has an XHCI phy.
-- brcm,has-eohci: Boolean indicating the phy has an EHCI/OHCI phy.
-
-
-Example:
-
-usbphy_0: usb-phy@f0470200 {
- reg = <0xf0470200 0xb8>,
- <0xf0471940 0x6c0>;
- compatible = "brcm,brcmstb-usb-phy";
- #phy-cells = <1>;
- dr_mode = "host"
- brcm,ioc = <1>;
- brcm,ipp = <1>;
- brcm,has-xhci;
- brcm,has-eohci;
- clocks = <&usb20>, <&usb30>;
- clock-names = "sw_usb", "sw_usb3";
-};
-
-usb-phy@29f0200 {
- reg = <0x29f0200 0x200>,
- <0x29c0880 0x30>,
- <0x29cc100 0x534>,
- <0x2808000 0x24>,
- <0x2980080 0x8>;
- reg-names = "ctrl",
- "xhci_ec",
- "xhci_gbl",
- "usb_phy",
- "usb_mdio";
- brcm,ioc = <0x0>;
- brcm,ipp = <0x0>;
- compatible = "brcm,bcm7211-usb-phy";
- interrupts = <0x30>;
- interrupt-parent = <&vpu_intr1_nosec_intc>;
- interrupt-names = "wake";
- #phy-cells = <0x1>;
- brcm,has-xhci;
- syscon-piarbctl = <&syscon_piarbctl>;
- clocks = <&scmi_clk 256>;
- clock-names = "sw_usb";
-};
diff --git a/Documentation/devicetree/bindings/phy/brcm,brcmstb-usb-phy.yaml b/Documentation/devicetree/bindings/phy/brcm,brcmstb-usb-phy.yaml
new file mode 100644
index 000000000000..0497368d1fca
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/brcm,brcmstb-usb-phy.yaml
@@ -0,0 +1,196 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/brcm,brcmstb-usb-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom STB USB PHY
+
+description: Broadcom's PHY that handles EHCI/OHCI and/or XHCI
+
+maintainers:
+ - Al Cooper <alcooperx@gmail.com>
+ - Rafał Miłecki <rafal@milecki.pl>
+
+properties:
+ compatible:
+ enum:
+ - brcm,bcm4908-usb-phy
+ - brcm,bcm7211-usb-phy
+ - brcm,bcm7216-usb-phy
+ - brcm,brcmstb-usb-phy
+
+ reg:
+ minItems: 1
+ maxItems: 6
+ items:
+ - description: the base CTRL register
+ - description: XHCI EC register
+ - description: XHCI GBL register
+ - description: USB PHY register
+ - description: USB MDIO register
+ - description: BDC register
+
+ reg-names:
+ minItems: 1
+ maxItems: 6
+ items:
+ - const: ctrl
+ - const: xhci_ec
+ - const: xhci_gbl
+ - const: usb_phy
+ - const: usb_mdio
+ - const: bdc_ec
+
+ clocks:
+ minItems: 1
+ maxItems: 2
+
+ clock-names:
+ minItems: 1
+ maxItems: 2
+ items:
+ - const: sw_usb
+ - const: sw_usb3
+
+ interrupts:
+ description: wakeup interrupt
+
+ interrupt-names:
+ const: wake
+
+ brcm,ipp:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Invert Port Power
+ minimum: 0
+ maximum: 1
+
+ brcm,ioc:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Invert Over Current detection
+ minimum: 0
+ maximum: 1
+
+ dr_mode:
+ description: PHY Device mode. If this property is not defined, the PHY will
+ default to "host" mode.
+ enum:
+ - host
+ - peripheral
+ - drd
+ - typec-pd
+
+ brcm,syscon-piarbctl:
+ description: phandle to syscon for handling config registers
+ $ref: /schemas/types.yaml#/definitions/phandle
+
+ brcm,has-xhci:
+ description: Indicates the PHY has an XHCI PHY.
+ type: boolean
+
+ brcm,has-eohci:
+ description: Indicates the PHY has an EHCI/OHCI PHY.
+ type: boolean
+
+ "#phy-cells":
+ description: |
+ Cell allows setting the type of the PHY. Possible values are:
+ - PHY_TYPE_USB2 for USB1.1/2.0 PHY
+ - PHY_TYPE_USB3 for USB3.x PHY
+ const: 1
+
+required:
+ - reg
+ - "#phy-cells"
+
+anyOf:
+ - required:
+ - brcm,has-xhci
+ - required:
+ - brcm,has-eohci
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - const: brcm,bcm4908-usb-phy
+ - const: brcm,brcmstb-usb-phy
+ then:
+ properties:
+ reg:
+ minItems: 1
+ maxItems: 2
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: brcm,bcm7211-usb-phy
+ then:
+ properties:
+ reg:
+ minItems: 5
+ maxItems: 6
+ reg-names:
+ minItems: 5
+ maxItems: 6
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: brcm,bcm7216-usb-phy
+ then:
+ properties:
+ reg:
+ minItems: 3
+ maxItems: 3
+ reg-names:
+ minItems: 3
+ maxItems: 3
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/phy/phy.h>
+
+ usb-phy@f0470200 {
+ compatible = "brcm,brcmstb-usb-phy";
+ reg = <0xf0470200 0xb8>,
+ <0xf0471940 0x6c0>;
+ #phy-cells = <1>;
+ dr_mode = "host";
+ brcm,ioc = <1>;
+ brcm,ipp = <1>;
+ brcm,has-xhci;
+ brcm,has-eohci;
+ clocks = <&usb20>, <&usb30>;
+ clock-names = "sw_usb", "sw_usb3";
+ };
+ - |
+ #include <dt-bindings/phy/phy.h>
+
+ usb-phy@29f0200 {
+ compatible = "brcm,bcm7211-usb-phy";
+ reg = <0x29f0200 0x200>,
+ <0x29c0880 0x30>,
+ <0x29cc100 0x534>,
+ <0x2808000 0x24>,
+ <0x2980080 0x8>;
+ reg-names = "ctrl",
+ "xhci_ec",
+ "xhci_gbl",
+ "usb_phy",
+ "usb_mdio";
+ brcm,ioc = <0x0>;
+ brcm,ipp = <0x0>;
+ interrupts = <0x30>;
+ interrupt-parent = <&vpu_intr1_nosec_intc>;
+ interrupt-names = "wake";
+ #phy-cells = <0x1>;
+ brcm,has-xhci;
+ brcm,syscon-piarbctl = <&syscon_piarbctl>;
+ clocks = <&scmi_clk 256>;
+ clock-names = "sw_usb";
+ };
diff --git a/Documentation/devicetree/bindings/phy/brcm,sata-phy.yaml b/Documentation/devicetree/bindings/phy/brcm,sata-phy.yaml
index 58c3ef8004ad..04edda504ab6 100644
--- a/Documentation/devicetree/bindings/phy/brcm,sata-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/brcm,sata-phy.yaml
@@ -99,8 +99,7 @@ patternProperties:
if:
properties:
compatible:
- items:
- const: brcm,iproc-ns2-sata-phy
+ const: brcm,iproc-ns2-sata-phy
then:
properties:
reg:
diff --git a/Documentation/devicetree/bindings/phy/mediatek,dsi-phy.yaml b/Documentation/devicetree/bindings/phy/mediatek,dsi-phy.yaml
new file mode 100644
index 000000000000..71d4acea1f66
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/mediatek,dsi-phy.yaml
@@ -0,0 +1,85 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright (c) 2020 MediaTek
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/mediatek,dsi-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: MediaTek MIPI Display Serial Interface (DSI) PHY binding
+
+maintainers:
+ - Chun-Kuang Hu <chunkuang.hu@kernel.org>
+ - Philipp Zabel <p.zabel@pengutronix.de>
+ - Chunfeng Yun <chunfeng.yun@mediatek.com>
+
+description: The MIPI DSI PHY supports up to 4-lane output.
+
+properties:
+ $nodename:
+ pattern: "^dsi-phy@[0-9a-f]+$"
+
+ compatible:
+ enum:
+ - mediatek,mt2701-mipi-tx
+ - mediatek,mt7623-mipi-tx
+ - mediatek,mt8173-mipi-tx
+ - mediatek,mt8183-mipi-tx
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: PLL reference clock
+
+ clock-output-names:
+ maxItems: 1
+
+ "#phy-cells":
+ const: 0
+
+ "#clock-cells":
+ const: 0
+
+ nvmem-cells:
+ maxItems: 1
+ description: A phandle to the calibration data provided by a nvmem device,
+ if unspecified, default values shall be used.
+
+ nvmem-cell-names:
+ items:
+ - const: calibration-data
+
+ drive-strength-microamp:
+ description: adjust driving current
+ multipleOf: 200
+ minimum: 2000
+ maximum: 6000
+ default: 4600
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-output-names
+ - "#phy-cells"
+ - "#clock-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/mt8173-clk.h>
+ dsi-phy@10215000 {
+ compatible = "mediatek,mt8173-mipi-tx";
+ reg = <0x10215000 0x1000>;
+ clocks = <&clk26m>;
+ clock-output-names = "mipi_tx0_pll";
+ drive-strength-microamp = <4000>;
+ nvmem-cells= <&mipi_tx_calibration>;
+ nvmem-cell-names = "calibration-data";
+ #clock-cells = <0>;
+ #phy-cells = <0>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/phy/mediatek,hdmi-phy.yaml b/Documentation/devicetree/bindings/phy/mediatek,hdmi-phy.yaml
new file mode 100644
index 000000000000..4752517a1446
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/mediatek,hdmi-phy.yaml
@@ -0,0 +1,92 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright (c) 2020 MediaTek
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/mediatek,hdmi-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: MediaTek High Definition Multimedia Interface (HDMI) PHY binding
+
+maintainers:
+ - Chun-Kuang Hu <chunkuang.hu@kernel.org>
+ - Philipp Zabel <p.zabel@pengutronix.de>
+ - Chunfeng Yun <chunfeng.yun@mediatek.com>
+
+description: |
+ The HDMI PHY serializes the HDMI encoder's three channel 10-bit parallel
+ output and drives the HDMI pads.
+
+properties:
+ $nodename:
+ pattern: "^hdmi-phy@[0-9a-f]+$"
+
+ compatible:
+ enum:
+ - mediatek,mt2701-hdmi-phy
+ - mediatek,mt7623-hdmi-phy
+ - mediatek,mt8173-hdmi-phy
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: PLL reference clock
+
+ clock-names:
+ items:
+ - const: pll_ref
+
+ clock-output-names:
+ items:
+ - const: hdmitx_dig_cts
+
+ "#phy-cells":
+ const: 0
+
+ "#clock-cells":
+ const: 0
+
+ mediatek,ibias:
+ description:
+ TX DRV bias current for < 1.65Gbps
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 63
+ default: 0xa
+
+ mediatek,ibias_up:
+ description:
+ TX DRV bias current for >= 1.65Gbps
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 63
+ default: 0x1c
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - clock-output-names
+ - "#phy-cells"
+ - "#clock-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/mt8173-clk.h>
+ hdmi_phy: hdmi-phy@10209100 {
+ compatible = "mediatek,mt8173-hdmi-phy";
+ reg = <0x10209100 0x24>;
+ clocks = <&apmixedsys CLK_APMIXED_HDMI_REF>;
+ clock-names = "pll_ref";
+ clock-output-names = "hdmitx_dig_cts";
+ mediatek,ibias = <0xa>;
+ mediatek,ibias_up = <0x1c>;
+ #clock-cells = <0>;
+ #phy-cells = <0>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/phy/mediatek,tphy.yaml b/Documentation/devicetree/bindings/phy/mediatek,tphy.yaml
new file mode 100644
index 000000000000..602e6ff45785
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/mediatek,tphy.yaml
@@ -0,0 +1,260 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright (c) 2020 MediaTek
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/mediatek,tphy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: MediaTek T-PHY Controller Device Tree Bindings
+
+maintainers:
+ - Chunfeng Yun <chunfeng.yun@mediatek.com>
+
+description: |
+ The T-PHY controller supports physical layer functionality for a number of
+ controllers on MediaTek SoCs, includes USB2.0, USB3.0, PCIe and SATA.
+
+ Layout differences of banks between T-PHY V1 (mt8173/mt2701) and
+ T-PHY V2 (mt2712) when works on USB mode:
+ -----------------------------------
+ Version 1:
+ port offset bank
+ shared 0x0000 SPLLC
+ 0x0100 FMREG
+ u2 port0 0x0800 U2PHY_COM
+ u3 port0 0x0900 U3PHYD
+ 0x0a00 U3PHYD_BANK2
+ 0x0b00 U3PHYA
+ 0x0c00 U3PHYA_DA
+ u2 port1 0x1000 U2PHY_COM
+ u3 port1 0x1100 U3PHYD
+ 0x1200 U3PHYD_BANK2
+ 0x1300 U3PHYA
+ 0x1400 U3PHYA_DA
+ u2 port2 0x1800 U2PHY_COM
+ ...
+
+ Version 2:
+ port offset bank
+ u2 port0 0x0000 MISC
+ 0x0100 FMREG
+ 0x0300 U2PHY_COM
+ u3 port0 0x0700 SPLLC
+ 0x0800 CHIP
+ 0x0900 U3PHYD
+ 0x0a00 U3PHYD_BANK2
+ 0x0b00 U3PHYA
+ 0x0c00 U3PHYA_DA
+ u2 port1 0x1000 MISC
+ 0x1100 FMREG
+ 0x1300 U2PHY_COM
+ u3 port1 0x1700 SPLLC
+ 0x1800 CHIP
+ 0x1900 U3PHYD
+ 0x1a00 U3PHYD_BANK2
+ 0x1b00 U3PHYA
+ 0x1c00 U3PHYA_DA
+ u2 port2 0x2000 MISC
+ ...
+
+ SPLLC shared by u3 ports and FMREG shared by u2 ports on V1 are put back
+ into each port; a new bank MISC for u2 ports and CHIP for u3 ports are
+ added on V2.
+
+properties:
+ $nodename:
+ pattern: "^t-phy@[0-9a-f]+$"
+
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - mediatek,mt2701-tphy
+ - mediatek,mt7623-tphy
+ - mediatek,mt7622-tphy
+ - mediatek,mt8516-tphy
+ - const: mediatek,generic-tphy-v1
+ - items:
+ - enum:
+ - mediatek,mt2712-tphy
+ - mediatek,mt7629-tphy
+ - mediatek,mt8183-tphy
+ - const: mediatek,generic-tphy-v2
+ - const: mediatek,mt2701-u3phy
+ deprecated: true
+ - const: mediatek,mt2712-u3phy
+ deprecated: true
+ - const: mediatek,mt8173-u3phy
+
+ reg:
+ description:
+ Register shared by multiple ports, exclude port's private register.
+ It is needed for T-PHY V1, such as mt2701 and mt8173, but not for
+ T-PHY V2, such as mt2712.
+ maxItems: 1
+
+ "#address-cells":
+ enum: [1, 2]
+
+ "#size-cells":
+ enum: [1, 2]
+
+ # Used with non-empty value if optional 'reg' is not provided.
+ # The format of the value is an arbitrary number of triplets of
+ # (child-bus-address, parent-bus-address, length).
+ ranges: true
+
+ mediatek,src-ref-clk-mhz:
+ description:
+ Frequency of reference clock for slew rate calibrate
+ default: 26
+
+ mediatek,src-coef:
+ description:
+ Coefficient for slew rate calibrate, depends on SoC process
+ $ref: /schemas/types.yaml#/definitions/uint32
+ default: 28
+
+# Required child node:
+patternProperties:
+ "^usb-phy@[0-9a-f]+$":
+ type: object
+ description:
+ A sub-node is required for each port the controller provides.
+ Address range information including the usual 'reg' property
+ is used inside these nodes to describe the controller's topology.
+
+ properties:
+ reg:
+ maxItems: 1
+
+ clocks:
+ minItems: 1
+ maxItems: 2
+ items:
+ - description: Reference clock, (HS is 48Mhz, SS/P is 24~27Mhz)
+ - description: Reference clock of analog phy
+ description:
+ Uses both clocks if the clock of analog and digital phys are
+ separated, otherwise uses "ref" clock only if needed.
+
+ clock-names:
+ minItems: 1
+ maxItems: 2
+ items:
+ - const: ref
+ - const: da_ref
+
+ "#phy-cells":
+ const: 1
+ description: |
+ The cells contain the following arguments.
+
+ - description: The PHY type
+ enum:
+ - PHY_TYPE_USB2
+ - PHY_TYPE_USB3
+ - PHY_TYPE_PCIE
+ - PHY_TYPE_SATA
+
+ # The following optional vendor properties are only for debug or HQA test
+ mediatek,eye-src:
+ description:
+ The value of slew rate calibrate (U2 phy)
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 1
+ maximum: 7
+
+ mediatek,eye-vrt:
+ description:
+ The selection of VRT reference voltage (U2 phy)
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 1
+ maximum: 7
+
+ mediatek,eye-term:
+ description:
+ The selection of HS_TX TERM reference voltage (U2 phy)
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 1
+ maximum: 7
+
+ mediatek,intr:
+ description:
+ The selection of internal resistor (U2 phy)
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 1
+ maximum: 31
+
+ mediatek,discth:
+ description:
+ The selection of disconnect threshold (U2 phy)
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 1
+ maximum: 15
+
+ mediatek,bc12:
+ description:
+ Specify the flag to enable BC1.2 if support it
+ type: boolean
+
+ required:
+ - reg
+ - "#phy-cells"
+
+ additionalProperties: false
+
+required:
+ - compatible
+ - "#address-cells"
+ - "#size-cells"
+ - ranges
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/mt8173-clk.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/phy/phy.h>
+ usb@11271000 {
+ compatible = "mediatek,mt8173-mtu3", "mediatek,mtu3";
+ reg = <0x11271000 0x3000>, <0x11280700 0x0100>;
+ reg-names = "mac", "ippc";
+ phys = <&u2port0 PHY_TYPE_USB2>,
+ <&u3port0 PHY_TYPE_USB3>,
+ <&u2port1 PHY_TYPE_USB2>;
+ interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&topckgen CLK_TOP_USB30_SEL>;
+ clock-names = "sys_ck";
+ };
+
+ t-phy@11290000 {
+ compatible = "mediatek,mt8173-u3phy";
+ reg = <0x11290000 0x800>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ u2port0: usb-phy@11290800 {
+ reg = <0x11290800 0x100>;
+ clocks = <&apmixedsys CLK_APMIXED_REF2USB_TX>, <&clk48m>;
+ clock-names = "ref", "da_ref";
+ #phy-cells = <1>;
+ };
+
+ u3port0: usb-phy@11290900 {
+ reg = <0x11290900 0x700>;
+ clocks = <&clk26m>;
+ clock-names = "ref";
+ #phy-cells = <1>;
+ };
+
+ u2port1: usb-phy@11291000 {
+ reg = <0x11291000 0x100>;
+ #phy-cells = <1>;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/phy/mediatek,ufs-phy.yaml b/Documentation/devicetree/bindings/phy/mediatek,ufs-phy.yaml
new file mode 100644
index 000000000000..3a9be82e7f13
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/mediatek,ufs-phy.yaml
@@ -0,0 +1,64 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright (c) 2020 MediaTek
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/mediatek,ufs-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: MediaTek Universal Flash Storage (UFS) M-PHY binding
+
+maintainers:
+ - Stanley Chu <stanley.chu@mediatek.com>
+ - Chunfeng Yun <chunfeng.yun@mediatek.com>
+
+description: |
+ UFS M-PHY nodes are defined to describe on-chip UFS M-PHY hardware macro.
+ Each UFS M-PHY node should have its own node.
+ To bind UFS M-PHY with UFS host controller, the controller node should
+ contain a phandle reference to UFS M-PHY node.
+
+properties:
+ $nodename:
+ pattern: "^ufs-phy@[0-9a-f]+$"
+
+ compatible:
+ const: mediatek,mt8183-ufsphy
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: Unipro core control clock.
+ - description: M-PHY core control clock.
+
+ clock-names:
+ items:
+ - const: unipro
+ - const: mp
+
+ "#phy-cells":
+ const: 0
+
+required:
+ - compatible
+ - reg
+ - "#phy-cells"
+ - clocks
+ - clock-names
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/mt8183-clk.h>
+ ufsphy: ufs-phy@11fa0000 {
+ compatible = "mediatek,mt8183-ufsphy";
+ reg = <0x11fa0000 0xc000>;
+ clocks = <&infracfg CLK_INFRA_UNIPRO_SCK>,
+ <&infracfg CLK_INFRA_UFS_MP_SAP_BCLK>;
+ clock-names = "unipro", "mp";
+ #phy-cells = <0>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/phy/mediatek,xsphy.yaml b/Documentation/devicetree/bindings/phy/mediatek,xsphy.yaml
new file mode 100644
index 000000000000..598fd2b95c29
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/mediatek,xsphy.yaml
@@ -0,0 +1,199 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright (c) 2020 MediaTek
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/mediatek,xsphy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: MediaTek XS-PHY Controller Device Tree Bindings
+
+maintainers:
+ - Chunfeng Yun <chunfeng.yun@mediatek.com>
+
+description: |
+ The XS-PHY controller supports physical layer functionality for USB3.1
+ GEN2 controller on MediaTek SoCs.
+
+ Banks layout of xsphy
+ ----------------------------------
+ port offset bank
+ u2 port0 0x0000 MISC
+ 0x0100 FMREG
+ 0x0300 U2PHY_COM
+ u2 port1 0x1000 MISC
+ 0x1100 FMREG
+ 0x1300 U2PHY_COM
+ u2 port2 0x2000 MISC
+ ...
+ u31 common 0x3000 DIG_GLB
+ 0x3100 PHYA_GLB
+ u31 port0 0x3400 DIG_LN_TOP
+ 0x3500 DIG_LN_TX0
+ 0x3600 DIG_LN_RX0
+ 0x3700 DIG_LN_DAIF
+ 0x3800 PHYA_LN
+ u31 port1 0x3a00 DIG_LN_TOP
+ 0x3b00 DIG_LN_TX0
+ 0x3c00 DIG_LN_RX0
+ 0x3d00 DIG_LN_DAIF
+ 0x3e00 PHYA_LN
+ ...
+ DIG_GLB & PHYA_GLB are shared by U31 ports.
+
+properties:
+ $nodename:
+ pattern: "^xs-phy@[0-9a-f]+$"
+
+ compatible:
+ items:
+ - enum:
+ - mediatek,mt3611-xsphy
+ - mediatek,mt3612-xsphy
+ - const: mediatek,xsphy
+
+ reg:
+ description:
+ Register shared by multiple U3 ports, exclude port's private register,
+ if only U2 ports provided, shouldn't use the property.
+ maxItems: 1
+
+ "#address-cells":
+ enum: [1, 2]
+
+ "#size-cells":
+ enum: [1, 2]
+
+ ranges: true
+
+ mediatek,src-ref-clk-mhz:
+ description:
+ Frequency of reference clock for slew rate calibrate
+ default: 26
+
+ mediatek,src-coef:
+ description:
+ Coefficient for slew rate calibrate, depends on SoC process
+ $ref: /schemas/types.yaml#/definitions/uint32
+ default: 17
+
+# Required child node:
+patternProperties:
+ "^usb-phy@[0-9a-f]+$":
+ type: object
+ description:
+ A sub-node is required for each port the controller provides.
+ Address range information including the usual 'reg' property
+ is used inside these nodes to describe the controller's topology.
+
+ properties:
+ reg:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: Reference clock, (HS is 48Mhz, SS/P is 24~27Mhz)
+
+ clock-names:
+ items:
+ - const: ref
+
+ "#phy-cells":
+ const: 1
+ description: |
+ The cells contain the following arguments.
+
+ - description: The PHY type
+ enum:
+ - PHY_TYPE_USB2
+ - PHY_TYPE_USB3
+
+ # The following optional vendor properties are only for debug or HQA test
+ mediatek,eye-src:
+ description:
+ The value of slew rate calibrate (U2 phy)
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 1
+ maximum: 7
+
+ mediatek,eye-vrt:
+ description:
+ The selection of VRT reference voltage (U2 phy)
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 1
+ maximum: 7
+
+ mediatek,eye-term:
+ description:
+ The selection of HS_TX TERM reference voltage (U2 phy)
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 1
+ maximum: 7
+
+ mediatek,efuse-intr:
+ description:
+ The selection of Internal Resistor (U2/U3 phy)
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 1
+ maximum: 63
+
+ mediatek,efuse-tx-imp:
+ description:
+ The selection of TX Impedance (U3 phy)
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 1
+ maximum: 31
+
+ mediatek,efuse-rx-imp:
+ description:
+ The selection of RX Impedance (U3 phy)
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 1
+ maximum: 31
+
+ required:
+ - reg
+ - clocks
+ - clock-names
+ - "#phy-cells"
+
+ additionalProperties: false
+
+required:
+ - compatible
+ - "#address-cells"
+ - "#size-cells"
+ - ranges
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/phy/phy.h>
+
+ u3phy: xs-phy@11c40000 {
+ compatible = "mediatek,mt3611-xsphy", "mediatek,xsphy";
+ reg = <0x11c43000 0x0200>;
+ mediatek,src-ref-clk-mhz = <26>;
+ mediatek,src-coef = <17>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ u2port0: usb-phy@11c40000 {
+ reg = <0x11c40000 0x0400>;
+ clocks = <&clk48m>;
+ clock-names = "ref";
+ mediatek,eye-src = <4>;
+ #phy-cells = <1>;
+ };
+
+ u3port0: usb-phy@11c43000 {
+ reg = <0x11c43400 0x0500>;
+ clocks = <&clk26m>;
+ clock-names = "ref";
+ mediatek,efuse-intr = <28>;
+ #phy-cells = <1>;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/phy/phy-mtk-tphy.txt b/Documentation/devicetree/bindings/phy/phy-mtk-tphy.txt
deleted file mode 100644
index dd75b676b71d..000000000000
--- a/Documentation/devicetree/bindings/phy/phy-mtk-tphy.txt
+++ /dev/null
@@ -1,162 +0,0 @@
-MediaTek T-PHY binding
---------------------------
-
-T-phy controller supports physical layer functionality for a number of
-controllers on MediaTek SoCs, such as, USB2.0, USB3.0, PCIe, and SATA.
-
-Required properties (controller (parent) node):
- - compatible : should be one of
- "mediatek,generic-tphy-v1"
- "mediatek,generic-tphy-v2"
- "mediatek,mt2701-u3phy" (deprecated)
- "mediatek,mt2712-u3phy" (deprecated)
- "mediatek,mt8173-u3phy";
- make use of "mediatek,generic-tphy-v1" on mt2701 instead and
- "mediatek,generic-tphy-v2" on mt2712 instead.
-
-- #address-cells: the number of cells used to represent physical
- base addresses.
-- #size-cells: the number of cells used to represent the size of an address.
-- ranges: the address mapping relationship to the parent, defined with
- - empty value: if optional 'reg' is used.
- - non-empty value: if optional 'reg' is not used. should set
- the child's base address to 0, the physical address
- within parent's address space, and the length of
- the address map.
-
-Required nodes : a sub-node is required for each port the controller
- provides. Address range information including the usual
- 'reg' property is used inside these nodes to describe
- the controller's topology.
-
-Optional properties (controller (parent) node):
- - reg : offset and length of register shared by multiple ports,
- exclude port's private register. It is needed on mt2701
- and mt8173, but not on mt2712.
- - mediatek,src-ref-clk-mhz : frequency of reference clock for slew rate
- calibrate
- - mediatek,src-coef : coefficient for slew rate calibrate, depends on
- SoC process
-
-Required properties (port (child) node):
-- reg : address and length of the register set for the port.
-- #phy-cells : should be 1 (See second example)
- cell after port phandle is phy type from:
- - PHY_TYPE_USB2
- - PHY_TYPE_USB3
- - PHY_TYPE_PCIE
- - PHY_TYPE_SATA
-
-Optional properties (PHY_TYPE_USB2 port (child) node):
-- clocks : a list of phandle + clock-specifier pairs, one for each
- entry in clock-names
-- clock-names : may contain
- "ref": 48M reference clock for HighSpeed (digital) phy; and 26M
- reference clock for SuperSpeed (digital) phy, sometimes is
- 24M, 25M or 27M, depended on platform.
- "da_ref": the reference clock of analog phy, used if the clocks
- of analog and digital phys are separated, otherwise uses
- "ref" clock only if needed.
-
-- mediatek,eye-src : u32, the value of slew rate calibrate
-- mediatek,eye-vrt : u32, the selection of VRT reference voltage
-- mediatek,eye-term : u32, the selection of HS_TX TERM reference voltage
-- mediatek,bc12 : bool, enable BC12 of u2phy if support it
-- mediatek,discth : u32, the selection of disconnect threshold
-- mediatek,intr : u32, the selection of internal R (resistance)
-
-Example:
-
-u3phy: usb-phy@11290000 {
- compatible = "mediatek,mt8173-u3phy";
- reg = <0 0x11290000 0 0x800>;
- #address-cells = <2>;
- #size-cells = <2>;
- ranges;
-
- u2port0: usb-phy@11290800 {
- reg = <0 0x11290800 0 0x100>;
- clocks = <&apmixedsys CLK_APMIXED_REF2USB_TX>;
- clock-names = "ref";
- #phy-cells = <1>;
- };
-
- u3port0: usb-phy@11290900 {
- reg = <0 0x11290800 0 0x700>;
- clocks = <&clk26m>;
- clock-names = "ref";
- #phy-cells = <1>;
- };
-
- u2port1: usb-phy@11291000 {
- reg = <0 0x11291000 0 0x100>;
- clocks = <&apmixedsys CLK_APMIXED_REF2USB_TX>;
- clock-names = "ref";
- #phy-cells = <1>;
- };
-};
-
-Specifying phy control of devices
----------------------------------
-
-Device nodes should specify the configuration required in their "phys"
-property, containing a phandle to the phy port node and a device type;
-phy-names for each port are optional.
-
-Example:
-
-#include <dt-bindings/phy/phy.h>
-
-usb30: usb@11270000 {
- ...
- phys = <&u2port0 PHY_TYPE_USB2>, <&u3port0 PHY_TYPE_USB3>;
- phy-names = "usb2-0", "usb3-0";
- ...
-};
-
-
-Layout differences of banks between mt8173/mt2701 and mt2712
--------------------------------------------------------------
-mt8173 and mt2701:
-port offset bank
-shared 0x0000 SPLLC
- 0x0100 FMREG
-u2 port0 0x0800 U2PHY_COM
-u3 port0 0x0900 U3PHYD
- 0x0a00 U3PHYD_BANK2
- 0x0b00 U3PHYA
- 0x0c00 U3PHYA_DA
-u2 port1 0x1000 U2PHY_COM
-u3 port1 0x1100 U3PHYD
- 0x1200 U3PHYD_BANK2
- 0x1300 U3PHYA
- 0x1400 U3PHYA_DA
-u2 port2 0x1800 U2PHY_COM
- ...
-
-mt2712:
-port offset bank
-u2 port0 0x0000 MISC
- 0x0100 FMREG
- 0x0300 U2PHY_COM
-u3 port0 0x0700 SPLLC
- 0x0800 CHIP
- 0x0900 U3PHYD
- 0x0a00 U3PHYD_BANK2
- 0x0b00 U3PHYA
- 0x0c00 U3PHYA_DA
-u2 port1 0x1000 MISC
- 0x1100 FMREG
- 0x1300 U2PHY_COM
-u3 port1 0x1700 SPLLC
- 0x1800 CHIP
- 0x1900 U3PHYD
- 0x1a00 U3PHYD_BANK2
- 0x1b00 U3PHYA
- 0x1c00 U3PHYA_DA
-u2 port2 0x2000 MISC
- ...
-
- SPLLC shared by u3 ports and FMREG shared by u2 ports on
-mt8173/mt2701 are put back into each port; a new bank MISC for
-u2 ports and CHIP for u3 ports are added on mt2712.
diff --git a/Documentation/devicetree/bindings/phy/phy-mtk-ufs.txt b/Documentation/devicetree/bindings/phy/phy-mtk-ufs.txt
deleted file mode 100644
index 5789029a1d42..000000000000
--- a/Documentation/devicetree/bindings/phy/phy-mtk-ufs.txt
+++ /dev/null
@@ -1,38 +0,0 @@
-MediaTek Universal Flash Storage (UFS) M-PHY binding
---------------------------------------------------------
-
-UFS M-PHY nodes are defined to describe on-chip UFS M-PHY hardware macro.
-Each UFS M-PHY node should have its own node.
-
-To bind UFS M-PHY with UFS host controller, the controller node should
-contain a phandle reference to UFS M-PHY node.
-
-Required properties for UFS M-PHY nodes:
-- compatible : Compatible list, contains the following controller:
- "mediatek,mt8183-ufsphy" for ufs phy
- persent on MT81xx chipsets.
-- reg : Address and length of the UFS M-PHY register set.
-- #phy-cells : This property shall be set to 0.
-- clocks : List of phandle and clock specifier pairs.
-- clock-names : List of clock input name strings sorted in the same
- order as the clocks property. Following clocks are
- mandatory.
- "unipro": Unipro core control clock.
- "mp": M-PHY core control clock.
-
-Example:
-
- ufsphy: phy@11fa0000 {
- compatible = "mediatek,mt8183-ufsphy";
- reg = <0 0x11fa0000 0 0xc000>;
- #phy-cells = <0>;
-
- clocks = <&infracfg_ao INFRACFG_AO_UNIPRO_SCK_CG>,
- <&infracfg_ao INFRACFG_AO_UFS_MP_SAP_BCLK_CG>;
- clock-names = "unipro", "mp";
- };
-
- ufshci@11270000 {
- ...
- phys = <&ufsphy>;
- };
diff --git a/Documentation/devicetree/bindings/phy/phy-mtk-xsphy.txt b/Documentation/devicetree/bindings/phy/phy-mtk-xsphy.txt
deleted file mode 100644
index e7caefa0b9c2..000000000000
--- a/Documentation/devicetree/bindings/phy/phy-mtk-xsphy.txt
+++ /dev/null
@@ -1,109 +0,0 @@
-MediaTek XS-PHY binding
---------------------------
-
-The XS-PHY controller supports physical layer functionality for USB3.1
-GEN2 controller on MediaTek SoCs.
-
-Required properties (controller (parent) node):
- - compatible : should be "mediatek,<soc-model>-xsphy", "mediatek,xsphy",
- soc-model is the name of SoC, such as mt3611 etc;
- when using "mediatek,xsphy" compatible string, you need SoC specific
- ones in addition, one of:
- - "mediatek,mt3611-xsphy"
-
- - #address-cells, #size-cells : should use the same values as the root node
- - ranges: must be present
-
-Optional properties (controller (parent) node):
- - reg : offset and length of register shared by multiple U3 ports,
- exclude port's private register, if only U2 ports provided,
- shouldn't use the property.
- - mediatek,src-ref-clk-mhz : u32, frequency of reference clock for slew rate
- calibrate
- - mediatek,src-coef : u32, coefficient for slew rate calibrate, depends on
- SoC process
-
-Required nodes : a sub-node is required for each port the controller
- provides. Address range information including the usual
- 'reg' property is used inside these nodes to describe
- the controller's topology.
-
-Required properties (port (child) node):
-- reg : address and length of the register set for the port.
-- clocks : a list of phandle + clock-specifier pairs, one for each
- entry in clock-names
-- clock-names : must contain
- "ref": 48M reference clock for HighSpeed analog phy; and 26M
- reference clock for SuperSpeedPlus analog phy, sometimes is
- 24M, 25M or 27M, depended on platform.
-- #phy-cells : should be 1
- cell after port phandle is phy type from:
- - PHY_TYPE_USB2
- - PHY_TYPE_USB3
-
-The following optional properties are only for debug or HQA test
-Optional properties (PHY_TYPE_USB2 port (child) node):
-- mediatek,eye-src : u32, the value of slew rate calibrate
-- mediatek,eye-vrt : u32, the selection of VRT reference voltage
-- mediatek,eye-term : u32, the selection of HS_TX TERM reference voltage
-- mediatek,efuse-intr : u32, the selection of Internal Resistor
-
-Optional properties (PHY_TYPE_USB3 port (child) node):
-- mediatek,efuse-intr : u32, the selection of Internal Resistor
-- mediatek,efuse-tx-imp : u32, the selection of TX Impedance
-- mediatek,efuse-rx-imp : u32, the selection of RX Impedance
-
-Banks layout of xsphy
--------------------------------------------------------------
-port offset bank
-u2 port0 0x0000 MISC
- 0x0100 FMREG
- 0x0300 U2PHY_COM
-u2 port1 0x1000 MISC
- 0x1100 FMREG
- 0x1300 U2PHY_COM
-u2 port2 0x2000 MISC
- ...
-u31 common 0x3000 DIG_GLB
- 0x3100 PHYA_GLB
-u31 port0 0x3400 DIG_LN_TOP
- 0x3500 DIG_LN_TX0
- 0x3600 DIG_LN_RX0
- 0x3700 DIG_LN_DAIF
- 0x3800 PHYA_LN
-u31 port1 0x3a00 DIG_LN_TOP
- 0x3b00 DIG_LN_TX0
- 0x3c00 DIG_LN_RX0
- 0x3d00 DIG_LN_DAIF
- 0x3e00 PHYA_LN
- ...
-
-DIG_GLB & PHYA_GLB are shared by U31 ports.
-
-Example:
-
-u3phy: usb-phy@11c40000 {
- compatible = "mediatek,mt3611-xsphy", "mediatek,xsphy";
- reg = <0 0x11c43000 0 0x0200>;
- mediatek,src-ref-clk-mhz = <26>;
- mediatek,src-coef = <17>;
- #address-cells = <2>;
- #size-cells = <2>;
- ranges;
-
- u2port0: usb-phy@11c40000 {
- reg = <0 0x11c40000 0 0x0400>;
- clocks = <&clk48m>;
- clock-names = "ref";
- mediatek,eye-src = <4>;
- #phy-cells = <1>;
- };
-
- u3port0: usb-phy@11c43000 {
- reg = <0 0x11c43400 0 0x0500>;
- clocks = <&clk26m>;
- clock-names = "ref";
- mediatek,efuse-intr = <28>;
- #phy-cells = <1>;
- };
-};
diff --git a/Documentation/devicetree/bindings/phy/phy-stm32-usbphyc.yaml b/Documentation/devicetree/bindings/phy/phy-stm32-usbphyc.yaml
index 0ba61979b970..46df6786727a 100644
--- a/Documentation/devicetree/bindings/phy/phy-stm32-usbphyc.yaml
+++ b/Documentation/devicetree/bindings/phy/phy-stm32-usbphyc.yaml
@@ -45,6 +45,12 @@ properties:
"#size-cells":
const: 0
+ vdda1v1-supply:
+ description: regulator providing 1V1 power supply to the PLL block
+
+ vdda1v8-supply:
+ description: regulator providing 1V8 power supply to the PLL block
+
#Required child nodes:
patternProperties:
@@ -61,12 +67,6 @@ patternProperties:
phy-supply:
description: regulator providing 3V3 power supply to the PHY.
- vdda1v1-supply:
- description: regulator providing 1V1 power supply to the PLL block
-
- vdda1v8-supply:
- description: regulator providing 1V8 power supply to the PLL block
-
"#phy-cells":
enum: [ 0x0, 0x1 ]
@@ -90,8 +90,6 @@ patternProperties:
required:
- reg
- phy-supply
- - vdda1v1-supply
- - vdda1v8-supply
- "#phy-cells"
additionalProperties: false
@@ -102,6 +100,8 @@ required:
- clocks
- "#address-cells"
- "#size-cells"
+ - vdda1v1-supply
+ - vdda1v8-supply
- usb-phy@0
- usb-phy@1
@@ -116,22 +116,20 @@ examples:
reg = <0x5a006000 0x1000>;
clocks = <&rcc USBPHY_K>;
resets = <&rcc USBPHY_R>;
+ vdda1v1-supply = <&reg11>;
+ vdda1v8-supply = <&reg18>;
#address-cells = <1>;
#size-cells = <0>;
usbphyc_port0: usb-phy@0 {
reg = <0>;
phy-supply = <&vdd_usb>;
- vdda1v1-supply = <&reg11>;
- vdda1v8-supply = <&reg18>;
#phy-cells = <0>;
};
usbphyc_port1: usb-phy@1 {
reg = <1>;
phy-supply = <&vdd_usb>;
- vdda1v1-supply = <&reg11>;
- vdda1v8-supply = <&reg18>;
#phy-cells = <1>;
};
};
diff --git a/Documentation/devicetree/bindings/phy/qcom,qmp-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,qmp-phy.yaml
index ec05db374645..626447fee092 100644
--- a/Documentation/devicetree/bindings/phy/qcom,qmp-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,qmp-phy.yaml
@@ -25,19 +25,32 @@ properties:
- qcom,msm8998-qmp-pcie-phy
- qcom,msm8998-qmp-ufs-phy
- qcom,msm8998-qmp-usb3-phy
+ - qcom,sc8180x-qmp-ufs-phy
+ - qcom,sc8180x-qmp-usb3-phy
- qcom,sdm845-qhp-pcie-phy
- qcom,sdm845-qmp-pcie-phy
- qcom,sdm845-qmp-ufs-phy
- qcom,sdm845-qmp-usb3-uni-phy
- qcom,sm8150-qmp-ufs-phy
+ - qcom,sm8150-qmp-usb3-phy
+ - qcom,sm8150-qmp-usb3-uni-phy
- qcom,sm8250-qmp-ufs-phy
- qcom,sm8250-qmp-gen3x1-pcie-phy
- qcom,sm8250-qmp-gen3x2-pcie-phy
- qcom,sm8250-qmp-modem-pcie-phy
+ - qcom,sm8250-qmp-usb3-phy
+ - qcom,sm8250-qmp-usb3-uni-phy
+ - qcom,sm8350-qmp-ufs-phy
+ - qcom,sm8350-qmp-usb3-phy
+ - qcom,sm8350-qmp-usb3-uni-phy
+ - qcom,sdx55-qmp-usb3-uni-phy
reg:
+ minItems: 1
+ maxItems: 2
items:
- description: Address and length of PHY's common serdes block.
+ - description: Address and length of PHY's DP_COM control block.
"#clock-cells":
enum: [ 1, 2 ]
@@ -136,6 +149,32 @@ allOf:
compatible:
contains:
enum:
+ - qcom,sdx55-qmp-usb3-uni-phy
+ then:
+ properties:
+ clocks:
+ items:
+ - description: Phy aux clock.
+ - description: Phy config clock.
+ - description: 19.2 MHz ref clk.
+ clock-names:
+ items:
+ - const: aux
+ - const: cfg_ahb
+ - const: ref
+ resets:
+ items:
+ - description: reset of phy block.
+ - description: phy common block reset.
+ reset-names:
+ items:
+ - const: phy
+ - const: common
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
- qcom,msm8996-qmp-pcie-phy
then:
properties:
@@ -285,6 +324,64 @@ allOf:
reset-names:
items:
- const: phy
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,sm8150-qmp-usb3-phy
+ - qcom,sm8150-qmp-usb3-uni-phy
+ - qcom,sm8250-qmp-usb3-uni-phy
+ - qcom,sm8350-qmp-usb3-uni-phy
+ then:
+ properties:
+ clocks:
+ items:
+ - description: Phy aux clock.
+ - description: 19.2 MHz ref clk source.
+ - description: 19.2 MHz ref clk.
+ - description: Phy common block aux clock.
+ clock-names:
+ items:
+ - const: aux
+ - const: ref_clk_src
+ - const: ref
+ - const: com_aux
+ resets:
+ items:
+ - description: reset of phy block.
+ - description: phy common block reset.
+ reset-names:
+ items:
+ - const: phy
+ - const: common
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,sm8250-qmp-usb3-phy
+ - qcom,sm8350-qmp-usb3-phy
+ then:
+ properties:
+ clocks:
+ items:
+ - description: Phy aux clock.
+ - description: 19.2 MHz ref clk.
+ - description: Phy common block aux clock.
+ clock-names:
+ items:
+ - const: aux
+ - const: ref_clk_src
+ - const: com_aux
+ resets:
+ items:
+ - description: reset of phy block.
+ - description: phy common block reset.
+ reset-names:
+ items:
+ - const: phy
+ - const: common
examples:
- |
diff --git a/Documentation/devicetree/bindings/phy/qcom,qusb2-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,qusb2-phy.yaml
index d457fb6a4779..9f9cf07b7d45 100644
--- a/Documentation/devicetree/bindings/phy/qcom,qusb2-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,qusb2-phy.yaml
@@ -21,6 +21,8 @@ properties:
- qcom,ipq8074-qusb2-phy
- qcom,msm8996-qusb2-phy
- qcom,msm8998-qusb2-phy
+ - qcom,sdm660-qusb2-phy
+ - qcom,ipq6018-qusb2-phy
- items:
- enum:
- qcom,sc7180-qusb2-phy
diff --git a/Documentation/devicetree/bindings/phy/qcom,usb-hs-28nm.yaml b/Documentation/devicetree/bindings/phy/qcom,usb-hs-28nm.yaml
index ca6a0836b53c..abcc4373f39e 100644
--- a/Documentation/devicetree/bindings/phy/qcom,usb-hs-28nm.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,usb-hs-28nm.yaml
@@ -16,6 +16,7 @@ properties:
compatible:
enum:
- qcom,usb-hs-28nm-femtophy
+ - qcom,usb-hs-28nm-mdm9607
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/phy/qcom,usb-snps-femto-v2.yaml b/Documentation/devicetree/bindings/phy/qcom,usb-snps-femto-v2.yaml
index 4949a2851532..ee77c6458326 100644
--- a/Documentation/devicetree/bindings/phy/qcom,usb-snps-femto-v2.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,usb-snps-femto-v2.yaml
@@ -17,6 +17,8 @@ properties:
enum:
- qcom,usb-snps-hs-7nm-phy
- qcom,sm8150-usb-hs-phy
+ - qcom,sm8250-usb-hs-phy
+ - qcom,sm8350-usb-hs-phy
- qcom,usb-snps-femto-v2-phy
reg:
diff --git a/Documentation/devicetree/bindings/phy/renesas,usb2-phy.yaml b/Documentation/devicetree/bindings/phy/renesas,usb2-phy.yaml
index 829e8c7e467a..0f358d5b84ef 100644
--- a/Documentation/devicetree/bindings/phy/renesas,usb2-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/renesas,usb2-phy.yaml
@@ -81,9 +81,8 @@ properties:
if:
properties:
compatible:
- items:
- enum:
- - renesas,usb2-phy-r7s9210
+ contains:
+ const: renesas,usb2-phy-r7s9210
then:
required:
- clock-names
diff --git a/Documentation/devicetree/bindings/phy/rockchip-emmc-phy.txt b/Documentation/devicetree/bindings/phy/rockchip-emmc-phy.txt
index 00aa2d349e55..57d28c0d5696 100644
--- a/Documentation/devicetree/bindings/phy/rockchip-emmc-phy.txt
+++ b/Documentation/devicetree/bindings/phy/rockchip-emmc-phy.txt
@@ -16,11 +16,11 @@ Optional properties:
- drive-impedance-ohm: Specifies the drive impedance in Ohm.
Possible values are 33, 40, 50, 66 and 100.
If not set, the default value of 50 will be applied.
- - enable-strobe-pulldown: Enable internal pull-down for the strobe line.
- If not set, pull-down is not used.
- - output-tapdelay-select: Specifies the phyctrl_otapdlysec register.
- If not set, the register defaults to 0x4.
- Maximum value 0xf.
+ - rockchip,enable-strobe-pulldown: Enable internal pull-down for the strobe
+ line. If not set, pull-down is not used.
+ - rockchip,output-tapdelay-select: Specifies the phyctrl_otapdlysec register.
+ If not set, the register defaults to 0x4.
+ Maximum value 0xf.
Example:
diff --git a/Documentation/devicetree/bindings/phy/socionext,uniphier-ahci-phy.yaml b/Documentation/devicetree/bindings/phy/socionext,uniphier-ahci-phy.yaml
index 34756347a14e..745c525ce6b9 100644
--- a/Documentation/devicetree/bindings/phy/socionext,uniphier-ahci-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/socionext,uniphier-ahci-phy.yaml
@@ -20,7 +20,7 @@ properties:
- socionext,uniphier-pxs3-ahci-phy
reg:
- description: PHY register region (offset and length)
+ maxItems: 1
"#phy-cells":
const: 0
diff --git a/Documentation/devicetree/bindings/phy/socionext,uniphier-pcie-phy.yaml b/Documentation/devicetree/bindings/phy/socionext,uniphier-pcie-phy.yaml
index a06831fd64b9..3e0566899041 100644
--- a/Documentation/devicetree/bindings/phy/socionext,uniphier-pcie-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/socionext,uniphier-pcie-phy.yaml
@@ -21,7 +21,7 @@ properties:
- socionext,uniphier-pxs3-pcie-phy
reg:
- description: PHY register region (offset and length)
+ maxItems: 1
"#phy-cells":
const: 0
diff --git a/Documentation/devicetree/bindings/phy/socionext,uniphier-usb3hs-phy.yaml b/Documentation/devicetree/bindings/phy/socionext,uniphier-usb3hs-phy.yaml
index 6fa5caab1487..a681cbc3b4ef 100644
--- a/Documentation/devicetree/bindings/phy/socionext,uniphier-usb3hs-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/socionext,uniphier-usb3hs-phy.yaml
@@ -24,7 +24,7 @@ properties:
- socionext,uniphier-pxs3-usb3-hsphy
reg:
- description: PHY register region (offset and length)
+ maxItems: 1
"#phy-cells":
const: 0
diff --git a/Documentation/devicetree/bindings/phy/socionext,uniphier-usb3ss-phy.yaml b/Documentation/devicetree/bindings/phy/socionext,uniphier-usb3ss-phy.yaml
index 9d46715ed036..41c0dd68ee25 100644
--- a/Documentation/devicetree/bindings/phy/socionext,uniphier-usb3ss-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/socionext,uniphier-usb3ss-phy.yaml
@@ -25,7 +25,7 @@ properties:
- socionext,uniphier-pxs3-usb3-ssphy
reg:
- description: PHY register region (offset and length)
+ maxItems: 1
"#phy-cells":
const: 0
diff --git a/Documentation/devicetree/bindings/phy/ti,phy-gmii-sel.yaml b/Documentation/devicetree/bindings/phy/ti,phy-gmii-sel.yaml
index bcec422d7734..ff8a6d9eb153 100644
--- a/Documentation/devicetree/bindings/phy/ti,phy-gmii-sel.yaml
+++ b/Documentation/devicetree/bindings/phy/ti,phy-gmii-sel.yaml
@@ -55,7 +55,7 @@ properties:
- ti,am654-phy-gmii-sel
reg:
- description: Address and length of the register set for the device
+ maxItems: 1
'#phy-cells': true
diff --git a/Documentation/devicetree/bindings/phy/ti,phy-j721e-wiz.yaml b/Documentation/devicetree/bindings/phy/ti,phy-j721e-wiz.yaml
index c33e9bc79521..bbbd85501ada 100644
--- a/Documentation/devicetree/bindings/phy/ti,phy-j721e-wiz.yaml
+++ b/Documentation/devicetree/bindings/phy/ti,phy-j721e-wiz.yaml
@@ -151,7 +151,7 @@ patternProperties:
WIZ node should have '1' subnode for the SERDES. It could be either
Sierra SERDES or Torrent SERDES. Sierra SERDES should follow the
bindings specified in
- Documentation/devicetree/bindings/phy/phy-cadence-sierra.txt
+ Documentation/devicetree/bindings/phy/phy-cadence-sierra.yaml
Torrent SERDES should follow the bindings specified in
Documentation/devicetree/bindings/phy/phy-cadence-torrent.yaml
diff --git a/Documentation/devicetree/bindings/pinctrl/allwinner,sun4i-a10-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/allwinner,sun4i-a10-pinctrl.yaml
index 5240487dfe50..cce63c3cc463 100644
--- a/Documentation/devicetree/bindings/pinctrl/allwinner,sun4i-a10-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/allwinner,sun4i-a10-pinctrl.yaml
@@ -53,6 +53,8 @@ properties:
- allwinner,sun50i-h5-pinctrl
- allwinner,sun50i-h6-pinctrl
- allwinner,sun50i-h6-r-pinctrl
+ - allwinner,sun50i-h616-pinctrl
+ - allwinner,sun50i-h616-r-pinctrl
- allwinner,suniv-f1c100s-pinctrl
- nextthing,gr8-pinctrl
@@ -61,7 +63,7 @@ properties:
interrupts:
minItems: 1
- maxItems: 7
+ maxItems: 8
description:
One interrupt per external interrupt bank supported on the
controller, sorted by bank number ascending order.
@@ -91,7 +93,7 @@ properties:
bank found in the controller
$ref: /schemas/types.yaml#/definitions/uint32-array
minItems: 1
- maxItems: 5
+ maxItems: 8
patternProperties:
# It's pretty scary, but the basic idea is that:
@@ -149,6 +151,17 @@ allOf:
properties:
compatible:
enum:
+ - allwinner,sun50i-h616-pinctrl
+
+ then:
+ properties:
+ interrupts:
+ minItems: 8
+
+ - if:
+ properties:
+ compatible:
+ enum:
- allwinner,sun50i-a100-pinctrl
then:
diff --git a/Documentation/devicetree/bindings/pinctrl/aspeed,ast2400-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/aspeed,ast2400-pinctrl.yaml
index 54631dc1adb0..100bb6dea3ec 100644
--- a/Documentation/devicetree/bindings/pinctrl/aspeed,ast2400-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/aspeed,ast2400-pinctrl.yaml
@@ -23,8 +23,7 @@ properties:
compatible:
const: aspeed,ast2400-pinctrl
reg:
- description: |
- A hint for the memory regions associated with the pin-controller
+ maxItems: 2
patternProperties:
'^.*$':
@@ -63,7 +62,7 @@ examples:
reg = <0x1e6e2000 0x1a8>;
pinctrl: pinctrl {
- compatible = "aspeed,g4-pinctrl";
+ compatible = "aspeed,ast2400-pinctrl";
pinctrl_i2c3_default: i2c3_default {
function = "I2C3";
diff --git a/Documentation/devicetree/bindings/pinctrl/aspeed,ast2500-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/aspeed,ast2500-pinctrl.yaml
index a90c0fe0495f..904697bc9415 100644
--- a/Documentation/devicetree/bindings/pinctrl/aspeed,ast2500-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/aspeed,ast2500-pinctrl.yaml
@@ -24,8 +24,8 @@ properties:
compatible:
const: aspeed,ast2500-pinctrl
reg:
- description: |
- A hint for the memory regions associated with the pin-controller
+ maxItems: 2
+
aspeed,external-nodes:
minItems: 2
maxItems: 2
@@ -81,7 +81,7 @@ examples:
reg = <0x1e6e2000 0x1a8>;
pinctrl: pinctrl {
- compatible = "aspeed,g5-pinctrl";
+ compatible = "aspeed,ast2500-pinctrl";
aspeed,external-nodes = <&gfx>, <&lhc>;
pinctrl_i2c3_default: i2c3_default {
diff --git a/Documentation/devicetree/bindings/pinctrl/aspeed,ast2600-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/aspeed,ast2600-pinctrl.yaml
index c78ab7e2eee7..ad91c0bc54da 100644
--- a/Documentation/devicetree/bindings/pinctrl/aspeed,ast2600-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/aspeed,ast2600-pinctrl.yaml
@@ -95,7 +95,7 @@ examples:
reg = <0x1e6e2000 0xf6c>;
pinctrl: pinctrl {
- compatible = "aspeed,g6-pinctrl";
+ compatible = "aspeed,ast2600-pinctrl";
pinctrl_pwm10g1_default: pwm10g1_default {
function = "PWM10";
diff --git a/Documentation/devicetree/bindings/pinctrl/atmel,at91-pio4-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/atmel,at91-pio4-pinctrl.txt
index 265015bc0603..e2b861ce16d8 100644
--- a/Documentation/devicetree/bindings/pinctrl/atmel,at91-pio4-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/atmel,at91-pio4-pinctrl.txt
@@ -35,9 +35,11 @@ ioset settings. Use the macros from boot/dts/<soc>-pinfunc.h file to get the
right representation of the pin.
Optional properties:
-- GENERIC_PINCONFIG: generic pinconfig options to use, bias-disable,
-bias-pull-down, bias-pull-up, drive-open-drain, input-schmitt-enable,
-input-debounce, output-low, output-high.
+- GENERIC_PINCONFIG: generic pinconfig options to use:
+ - bias-disable, bias-pull-down, bias-pull-up, drive-open-drain,
+ input-schmitt-enable, input-debounce, output-low, output-high.
+ - for microchip,sama7g5-pinctrl only:
+ - slew-rate: 0 - disabled, 1 - enabled (default)
- atmel,drive-strength: 0 or 1 for low drive, 2 for medium drive and 3 for
high drive. The default value is low drive.
diff --git a/Documentation/devicetree/bindings/pinctrl/brcm,ns2-pinmux.txt b/Documentation/devicetree/bindings/pinctrl/brcm,ns2-pinmux.txt
index e295dda4bbba..40e0a9a19525 100644
--- a/Documentation/devicetree/bindings/pinctrl/brcm,ns2-pinmux.txt
+++ b/Documentation/devicetree/bindings/pinctrl/brcm,ns2-pinmux.txt
@@ -39,7 +39,7 @@ For example:
<0x660009b0 0x40>;
pinctrl-names = "default";
- pinctrl-0 = <&nand_sel &uart3_rx &sdio0_d4>;
+ pinctrl-0 = <&nand_sel>, <&uart3_rx>, <&sdio0_d4>;
/* Select nand function */
nand_sel: nand_sel {
diff --git a/Documentation/devicetree/bindings/pinctrl/brcm,nsp-pinmux.txt b/Documentation/devicetree/bindings/pinctrl/brcm,nsp-pinmux.txt
index 603564e5fe6f..dede11e4ef78 100644
--- a/Documentation/devicetree/bindings/pinctrl/brcm,nsp-pinmux.txt
+++ b/Documentation/devicetree/bindings/pinctrl/brcm,nsp-pinmux.txt
@@ -30,7 +30,7 @@ For example:
<0x1803f408 0x04>;
pinctrl-names = "default";
- pinctrl-0 = <&pwm &gpio_b &nand_sel>;
+ pinctrl-0 = <&pwm>, <&gpio_b>, <&nand_sel>;
pwm: pwm {
function = "pwm";
diff --git a/Documentation/devicetree/bindings/pinctrl/canaan,k210-fpioa.yaml b/Documentation/devicetree/bindings/pinctrl/canaan,k210-fpioa.yaml
new file mode 100644
index 000000000000..46fbc73ab26b
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/canaan,k210-fpioa.yaml
@@ -0,0 +1,171 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/canaan,k210-fpioa.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Canaan Kendryte K210 FPIOA Device Tree Bindings
+
+maintainers:
+ - Damien Le Moal <damien.lemoal@wdc.com>
+
+description:
+ The Canaan Kendryte K210 SoC Fully Programmable IO Array (FPIOA)
+ controller allows assiging any of 256 possible functions to any of
+ 48 IO pins of the SoC. Pin function configuration is performed on
+ a per-pin basis.
+
+properties:
+ compatible:
+ const: canaan,k210-fpioa
+
+ reg:
+ maxItems: 1
+ description:
+ Address and length of the register set for the FPIOA controller.
+
+ clocks:
+ items:
+ - description: Controller reference clock source
+ - description: APB interface clock source
+
+ clock-names:
+ items:
+ - const: ref
+ - const: pclk
+
+ resets:
+ maxItems: 1
+
+ canaan,k210-sysctl-power:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ description: |
+ phandle of the K210 system controller node and offset of its
+ power domain control register.
+
+patternProperties:
+ '-pinmux$':
+ type: object
+ $ref: /schemas/pinctrl/pinmux-node.yaml
+ description:
+ FPIOA client devices use sub-nodes to define the desired pin
+ configuration. Client device sub-nodes use the pinux property
+ below.
+
+ properties:
+ pinmux:
+ description:
+ List of IO pins alternate functions. The values for each IO
+ pin is a combination of an IO pin number (0 to 47) with the
+ desired function for the IO pin. Functions are defined as
+ macros in include/dt-bindings/pinctrl/k210-fpioa.h.
+ The K210_FPIOA(IO pin, function) macro is provided to
+ facilitate the combination of IO pin numbers and functions.
+
+ required:
+ - pinmux
+
+ additionalProperties: false
+
+ '-pins$':
+ type: object
+ $ref: /schemas/pinctrl/pincfg-node.yaml
+ description:
+ FPIOA client devices use sub-nodes to define the desired
+ configuration of pins. Client device sub-nodes use the
+ properties below.
+
+ properties:
+ pins:
+ description:
+ List of IO pins affected by the properties specified in this
+ subnode. IO pins are identified using the pin names "IO_xx".
+ Pin configuration nodes can also define the power domain to
+ be used for the SoC pin groups A0 (IO pins 0-5),
+ A1 (IO pins 6-11), A2 (IO pins 12-17), B0 (IO pins 18-23),
+ B1 (IO pins 24-29), B2 (IO pins 30-35), B3 (IO pins 30-35),
+ C0 (IO pins 36-41) and C1 (IO pins 42-47) using the
+ power-source property.
+ items:
+ anyOf:
+ - pattern: "^(IO_([0-9]*))|(A[0-2])|(B[3-5])|(C[6-7])$"
+ - enum: [ IO_0, IO_1, IO_2, IO_3, IO_4, IO_5, IO_6, IO_7,
+ IO_8, IO_9, IO_10, IO_11, IO_12, IO_13, IO_14,
+ IO_15, IO_16, IO_17, IO_18, IO_19, IO_20, IO_21,
+ IO_22, IO_23, IO_24, IO_25, IO_26, IO_27, IO_28,
+ IO_29, IO_30, IO_31, IO_32, IO_33, IO_34, IO_35,
+ IO_36, IO_37, IO_38, IO_39, IO_40, IO_41, IO_42,
+ IO_43, IO_44, IO_45, IO_46, IO_47,
+ A0, A1, A2, B3, B4, B5, C6, C7 ]
+ bias-disable: true
+
+ bias-pull-down: true
+
+ bias-pull-up: true
+
+ drive-strength: true
+
+ drive-strength-microamp: true
+
+ input-enable: true
+
+ input-disable: true
+
+ input-schmitt-enable: true
+
+ input-schmitt-disable: true
+
+ input-polarity-invert:
+ description:
+ Enable or disable pin input polarity inversion.
+
+ output-enable: true
+
+ output-disable: true
+
+ output-high: true
+
+ output-low: true
+
+ output-polarity-invert:
+ description:
+ Enable or disable pin output polarity inversion.
+
+ slew-rate: true
+
+ power-source: true
+
+ additionalProperties: false
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - canaan,k210-sysctl-power
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/pinctrl/k210-fpioa.h>
+ #include <dt-bindings/clock/k210-clk.h>
+ #include <dt-bindings/reset/k210-rst.h>
+
+ fpioa: pinmux@502B0000 {
+ compatible = "canaan,k210-fpioa";
+ reg = <0x502B0000 0x100>;
+ clocks = <&sysclk K210_CLK_FPIOA>,
+ <&sysclk K210_CLK_APB0>;
+ clock-names = "ref", "pclk";
+ resets = <&sysrst K210_RST_FPIOA>;
+ canaan,k210-sysctl-power = <&sysctl 108>;
+ pinctrl-0 = <&jtag_pinctrl>;
+ pinctrl-names = "default";
+
+ jtag_pinctrl: jtag-pinmux {
+ pinmux = <K210_FPIOA(0, K210_PCF_JTAG_TCLK)>,
+ <K210_FPIOA(1, K210_PCF_JTAG_TDI)>,
+ <K210_FPIOA(2, K210_PCF_JTAG_TMS)>,
+ <K210_FPIOA(3, K210_PCF_JTAG_TDO)>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/fsl,imx7d-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/fsl,imx7d-pinctrl.txt
index 8ac1d0851a0f..bfab5ca49fd1 100644
--- a/Documentation/devicetree/bindings/pinctrl/fsl,imx7d-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/fsl,imx7d-pinctrl.txt
@@ -60,7 +60,7 @@ iomuxc-lpsr controller and SDA pad from iomuxc controller as:
i2c1: i2c@30a20000 {
pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_i2c1_1 &pinctrl_i2c1_2>;
+ pinctrl-0 = <&pinctrl_i2c1_1>, <&pinctrl_i2c1_2>;
};
iomuxc-lpsr@302c0000 {
diff --git a/Documentation/devicetree/bindings/pinctrl/microchip,sparx5-sgpio.yaml b/Documentation/devicetree/bindings/pinctrl/microchip,sparx5-sgpio.yaml
index df0c83cb1c6e..4fe35e650909 100644
--- a/Documentation/devicetree/bindings/pinctrl/microchip,sparx5-sgpio.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/microchip,sparx5-sgpio.yaml
@@ -99,8 +99,8 @@ patternProperties:
'#interrupt-cells':
description:
- Specifies the pin (port and bit) and flags, as defined in
- defined in include/dt-bindings/interrupt-controller/irq.h
+ Specifies the pin (port and bit) and flags, as defined in
+ defined in include/dt-bindings/interrupt-controller/irq.h
const: 3
ngpios:
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-atlas7.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-atlas7.txt
deleted file mode 100644
index fbdd1a716a1e..000000000000
--- a/Documentation/devicetree/bindings/pinctrl/pinctrl-atlas7.txt
+++ /dev/null
@@ -1,109 +0,0 @@
-CSR SiRFatlas7 pinmux controller
-
-Required properties:
-- compatible : "sirf,atlas7-ioc"
-- reg : Address range of the pinctrl registers
-
-For example, pinctrl might have properties like the following:
- pinctrl: ioc@18880000 {
- compatible = "sirf,atlas7-ioc";
- reg = <0x18880000 0x1000>;
-
- a_ac97_pmx: ac97@0 {
- ac97 {
- groups = "audio_ac97_grp";
- function = "audio_ac97";
- };
- };
-
- ...
-
- sd2_pmx: sd2@0 {
- sd2 {
- groups = "sd2_grp0";
- function = "sd2";
- };
- };
-
- ...
-
-
- sample0_cfg: sample0@0 {
- sample0 {
- pins = "ldd_0", "ldd_1";
- bias-pull-up;
- };
- };
-
- sample1_cfg: sample1@0 {
- sample1 {
- pins = "ldd_2", "ldd_3";
- input-schmitt-enable;
- };
- };
-
- sample2_cfg: sample2@0 {
- sample2 {
- groups = "uart4_nopause_grp";
- bias-pull-down;
- };
- };
-
- sample3_cfg: sample3@0 {
- sample3 {
- pins = "ldd_4", "ldd_5";
- drive-strength = <2>;
- };
- };
- };
-
-Please refer to pinctrl-bindings.txt in this directory for details of the common
-pinctrl bindings used by client devices.
-
-SiRFatlas7's pinmux nodes act as a container for an arbitrary number of subnodes.
-Each of these subnodes represents some desired configuration for a group of pins.
-
-Required subnode-properties:
-- groups : An array of strings. Each string contains the name of a group.
-- function: A string containing the name of the function to mux to the
- group.
-
- Valid values for group and function names can be found from looking at the
- group and function arrays in driver files:
- drivers/pinctrl/pinctrl-sirf.c
-
-For example, pinctrl might have subnodes like the following:
- sd0_pmx: sd0@0 {
- sd0 {
- groups = "sd0_grp";
- function = "sd0";
- };
- };
-
- sd1_pmx0: sd1@0 {
- sd1 {
- groups = "sd1_grp0";
- function = "sd1_m0";
- };
- };
-
- sd1_pmx1: sd1@1 {
- sd1 {
- groups = "sd1_grp1";
- function = "sd1_m1";
- };
- };
-
-For a specific board, if it wants to use sd1,
-it can add the following to its board-specific .dts file.
-sd1: sd@12340000 {
- pinctrl-names = "default";
- pinctrl-0 = <&sd1_pmx0>;
-}
-
-or
-
-sd1: sd@12340000 {
- pinctrl-names = "default";
- pinctrl-0 = <&sd1_pmx1>;
-}
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
index 4613bb17ace3..9dae60acf950 100644
--- a/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
@@ -77,13 +77,13 @@ For example:
device {
pinctrl-names = "active", "idle";
pinctrl-0 = <&state_0_node_a>;
- pinctrl-1 = <&state_1_node_a &state_1_node_b>;
+ pinctrl-1 = <&state_1_node_a>, <&state_1_node_b>;
};
/* For the same device if using state IDs */
device {
pinctrl-0 = <&state_0_node_a>;
- pinctrl-1 = <&state_1_node_a &state_1_node_b>;
+ pinctrl-1 = <&state_1_node_a>, <&state_1_node_b>;
};
/*
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-mcp23s08.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-mcp23s08.txt
index 8b94aa8f5971..6ec3c8d79f49 100644
--- a/Documentation/devicetree/bindings/pinctrl/pinctrl-mcp23s08.txt
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-mcp23s08.txt
@@ -134,7 +134,7 @@ gpio21: gpio@21 {
#interrupt-cells = <0x2>;
microchip,irq-mirror;
pinctrl-names = "default";
- pinctrl-0 = <&i2cgpio0irq &gpio21pullups>;
+ pinctrl-0 = <&i2cgpio0irq>, <&gpio21pullups>;
gpio21pullups: pinmux {
pins = "gpio0", "gpio1", "gpio2", "gpio3",
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-mt65xx.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-mt65xx.txt
index 931a18cd1e23..360e59c9301a 100644
--- a/Documentation/devicetree/bindings/pinctrl/pinctrl-mt65xx.txt
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-mt65xx.txt
@@ -91,7 +91,7 @@ Examples:
pinctrl@1c20800 {
compatible = "mediatek,mt8135-pinctrl";
reg = <0 0x1000B000 0 0x1000>;
- mediatek,pctl-regmap = <&syscfg_pctl_a &syscfg_pctl_b>;
+ mediatek,pctl-regmap = <&syscfg_pctl_a>, <&syscfg_pctl_b>;
pins-are-numbered;
gpio-controller;
#gpio-cells = <2>;
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-single.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-single.txt
index f903eb4471f8..bfd222b05495 100644
--- a/Documentation/devicetree/bindings/pinctrl/pinctrl-single.txt
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-single.txt
@@ -8,7 +8,7 @@ Required properties:
- reg : offset and length of the register set for the mux registers
- #pinctrl-cells : number of cells in addition to the index, set to 1
- for pinctrl-single,pins and 2 for pinctrl-single,bits
+ or 2 for pinctrl-single,pins and set to 2 for pinctrl-single,bits
- pinctrl-single,register-width : pinmux register access width in bits
@@ -80,7 +80,7 @@ Optional properties:
property.
/* pin base, nr pins & gpio function */
- pinctrl-single,gpio-range = <&range 0 3 0 &range 3 9 1>;
+ pinctrl-single,gpio-range = <&range 0 3 0>, <&range 3 9 1>;
- interrupt-controller : standard interrupt controller binding if using
interrupts for wake-up events for example. In this case pinctrl-single
@@ -185,10 +185,10 @@ pmx_gpio: pinmux@d401e000 {
pinctrl-single,function-mask = <7>;
/* sparse GPIO range could be supported */
- pinctrl-single,gpio-range = <&range 0 3 0 &range 3 9 1
- &range 12 1 0 &range 13 29 1
- &range 43 1 0 &range 44 49 1
- &range 94 1 1 &range 96 2 1>;
+ pinctrl-single,gpio-range = <&range 0 3 0>, <&range 3 9 1>,
+ <&range 12 1 0>, <&range 13 29 1>,
+ <&range 43 1 0>, <&range 44 49 1>,
+ <&range 94 1 1>, <&range 96 2 1>;
range: gpio-range {
#pinctrl-single,gpio-range-cells = <3>;
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-zx.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-zx.txt
deleted file mode 100644
index 39170f372599..000000000000
--- a/Documentation/devicetree/bindings/pinctrl/pinctrl-zx.txt
+++ /dev/null
@@ -1,84 +0,0 @@
-* ZTE ZX Pin Controller
-
-The pin controller on ZTE ZX platforms is kinda of hybrid. It consists of
-a main controller and an auxiliary one. For example, on ZX296718 SoC, the
-main controller is TOP_PMM and the auxiliary one is AON_IOCFG. Both
-controllers work together to control pin multiplexing and configuration in
-the way illustrated as below.
-
-
- GMII_RXD3 ---+
- |
- DVI1_HS ---+----------------------------- GMII_RXD3 (TOP pin)
- |
- BGPIO16 ---+ ^
- | pinconf
- ^ |
- | pinmux |
- | |
-
- TOP_PMM (main) AON_IOCFG (aux)
-
- | | |
- | pinmux | |
- | pinmux v |
- v | pinconf
- KEY_ROW2 ---+ v
- PORT1_LCD_TE ---+ |
- | AGPIO10 ---+------ KEY_ROW2 (AON pin)
- I2S0_DOUT3 ---+ |
- |-----------------------+
- PWM_OUT3 ---+
- |
- VGA_VS1 ---+
-
-
-For most of pins like GMII_RXD3 in the figure, the pinmux function is
-controlled by TOP_PMM block only, and this type of pins are meant by term
-'TOP pins'. For pins like KEY_ROW2, the pinmux is controlled by both
-TOP_PMM and AON_IOCFG blocks, as the available multiplexing functions for
-the pin spread in both controllers. This type of pins are called 'AON pins'.
-Though pinmux implementation is quite different, pinconf is same for both
-types of pins. Both are controlled by auxiliary controller, i.e. AON_IOCFG
-on ZX296718.
-
-Required properties:
-- compatible: should be "zte,zx296718-pmm".
-- reg: the register physical address and length.
-- zte,auxiliary-controller: phandle to the auxiliary pin controller which
- implements pinmux for AON pins and pinconf for all pins.
-
-The following pin configuration are supported. Please refer to
-pinctrl-bindings.txt in this directory for more details of the common
-pinctrl bindings used by client devices.
-
-- bias-pull-up
-- bias-pull-down
-- drive-strength
-- input-enable
-- slew-rate
-
-Examples:
-
-iocfg: pin-controller@119000 {
- compatible = "zte,zx296718-iocfg";
- reg = <0x119000 0x1000>;
-};
-
-pmm: pin-controller@1462000 {
- compatible = "zte,zx296718-pmm";
- reg = <0x1462000 0x1000>;
- zte,auxiliary-controller = <&iocfg>;
-};
-
-&pmm {
- vga_pins: vga {
- pins = "KEY_COL1", "KEY_COL2", "KEY_ROW1", "KEY_ROW2";
- function = "VGA";
- };
-};
-
-&vga {
- pinctrl-names = "default";
- pinctrl-0 = <&vga_pins>;
-};
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-mpp.txt b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-mpp.txt
index 448d36a85730..0ba07bc96c55 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-mpp.txt
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-mpp.txt
@@ -8,6 +8,7 @@ of PMIC's from Qualcomm.
Value type: <string>
Definition: Should contain one of:
"qcom,pm8018-mpp",
+ "qcom,pm8019-mpp",
"qcom,pm8038-mpp",
"qcom,pm8058-mpp",
"qcom,pm8821-mpp",
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,sc8180x-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,sc8180x-pinctrl.yaml
new file mode 100644
index 000000000000..a82dab898395
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,sc8180x-pinctrl.yaml
@@ -0,0 +1,152 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/qcom,sc8180x-pinctrl.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Technologies, Inc. SC8180X TLMM block
+
+maintainers:
+ - Bjorn Andersson <bjorn.andersson@linaro.org>
+
+description: |
+ This binding describes the Top Level Mode Multiplexer block found in the
+ SC8180X platform.
+
+allOf:
+ - $ref: /schemas/pinctrl/qcom,tlmm-common.yaml#
+
+properties:
+ compatible:
+ const: qcom,sc8180x-tlmm
+
+ reg:
+ maxItems: 3
+
+ reg-names:
+ items:
+ - const: "west"
+ - const: "east"
+ - const: "south"
+
+ interrupts: true
+ interrupt-controller: true
+ '#interrupt-cells': true
+ gpio-controller: true
+ gpio-reserved-ranges: true
+ '#gpio-cells': true
+ gpio-ranges: true
+ wakeup-parent: true
+
+required:
+ - compatible
+ - reg
+ - reg-names
+
+additionalProperties: false
+
+patternProperties:
+ '-state$':
+ oneOf:
+ - $ref: "#/$defs/qcom-sc8180x-tlmm-state"
+ - patternProperties:
+ ".*":
+ $ref: "#/$defs/qcom-sc8180x-tlmm-state"
+
+'$defs':
+ qcom-sc8180x-tlmm-state:
+ type: object
+ description:
+ Pinctrl node's client devices use subnodes for desired pin configuration.
+ Client device subnodes use below standard properties.
+ $ref: "qcom,tlmm-common.yaml#/$defs/qcom-tlmm-state"
+
+ properties:
+ pins:
+ description:
+ List of gpio pins affected by the properties specified in this
+ subnode.
+ items:
+ oneOf:
+ - pattern: "^gpio([0-9]|[1-9][0-9]|1[0-8][0-9])$"
+ - enum: [ sdc2_clk, sdc2_cmd, sdc2_data, ufs_reset ]
+ minItems: 1
+ maxItems: 16
+
+ function:
+ description:
+ Specify the alternative function to be configured for the specified
+ pins.
+
+ enum: [ adsp_ext, agera_pll, aoss_cti, atest_char, atest_tsens,
+ atest_tsens2, atest_usb0, atest_usb1, atest_usb2, atest_usb3,
+ atest_usb4, audio_ref, btfm_slimbus, cam_mclk, cci_async,
+ cci_i2c, cci_timer0, cci_timer1, cci_timer2, cci_timer3,
+ cci_timer4, cci_timer5, cci_timer6, cci_timer7, cci_timer8,
+ cci_timer9, cri_trng, dbg_out, ddr_bist, ddr_pxi, debug_hot,
+ dp_hot, edp_hot, edp_lcd, emac_phy, emac_pps, gcc_gp1, gcc_gp2,
+ gcc_gp3, gcc_gp4, gcc_gp5, gpio, gps, grfc, hs1_mi2s, hs2_mi2s,
+ hs3_mi2s, jitter_bist, lpass_slimbus, m_voc, mdp_vsync,
+ mdp_vsync0, mdp_vsync1, mdp_vsync2, mdp_vsync3, mdp_vsync4,
+ mdp_vsync5, mss_lte, nav_pps, pa_indicator, pci_e0, pci_e1,
+ pci_e2, pci_e3, phase_flag, pll_bist, pll_bypassnl, pll_reset,
+ pri_mi2s, pri_mi2s_ws, prng_rosc, qdss_cti, qdss_gpio, qlink,
+ qspi0, qspi0_clk, qspi0_cs, qspi1, qspi1_clk, qspi1_cs,
+ qua_mi2s, qup0, qup1, qup2, qup3, qup4, qup5, qup6, qup7, qup8,
+ qup9, qup10, qup11, qup12, qup13, qup14, qup15, qup16, qup17,
+ qup18, qup19, qup_l4, qup_l5, qup_l6, rgmii, sd_write, sdc4,
+ sdc4_clk, sdc4_cmd, sec_mi2s, sp_cmu, spkr_i2s, ter_mi2s, tgu,
+ tsense_pwm1, tsense_pwm2, tsif1, tsif2, uim1, uim2, uim_batt,
+ usb0_phy, usb1_phy, usb2phy_ac, vfr_1, vsense_trigger,
+ wlan1_adc, wlan2_adc, wmss_reset ]
+
+ bias-disable: true
+ bias-pull-down: true
+ bias-pull-up: true
+ drive-strength: true
+ input-enable: true
+ output-high: true
+ output-low: true
+
+ required:
+ - pins
+ - function
+
+ additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ pinctrl@3100000 {
+ compatible = "qcom,sc8180x-tlmm";
+ reg = <0x03100000 0x300000>,
+ <0x03500000 0x700000>,
+ <0x03d00000 0x300000>;
+ reg-names = "west", "east", "south";
+ interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ gpio-ranges = <&tlmm 0 0 190>;
+
+ gpio-wo-subnode-state {
+ pins = "gpio1";
+ function = "gpio";
+ };
+
+ uart-w-subnodes-state {
+ rx {
+ pins = "gpio4";
+ function = "qup6";
+ bias-pull-up;
+ };
+
+ tx {
+ pins = "gpio5";
+ function = "qup6";
+ bias-disable;
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,sm8350-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,sm8350-pinctrl.yaml
new file mode 100644
index 000000000000..4f2667ea2805
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,sm8350-pinctrl.yaml
@@ -0,0 +1,145 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/qcom,sm8350-pinctrl.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Technologies, Inc. SM8350 TLMM block
+
+maintainers:
+ - Vinod Koul <vkoul@kernel.org>
+
+description: |
+ This binding describes the Top Level Mode Multiplexer (TLMM) block found
+ in the SM8350 platform.
+
+allOf:
+ - $ref: /schemas/pinctrl/qcom,tlmm-common.yaml#
+
+properties:
+ compatible:
+ const: qcom,sm8350-tlmm
+
+ reg:
+ maxItems: 1
+
+ interrupts: true
+ interrupt-controller: true
+ '#interrupt-cells': true
+ gpio-controller: true
+ gpio-reserved-ranges: true
+ '#gpio-cells': true
+ gpio-ranges: true
+ wakeup-parent: true
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+patternProperties:
+ '-state$':
+ oneOf:
+ - $ref: "#/$defs/qcom-sm8350-tlmm-state"
+ - patternProperties:
+ ".*":
+ $ref: "#/$defs/qcom-sm8350-tlmm-state"
+
+$defs:
+ qcom-sm8350-tlmm-state:
+ type: object
+ description:
+ Pinctrl node's client devices use subnodes for desired pin configuration.
+ Client device subnodes use below standard properties.
+ $ref: "qcom,tlmm-common.yaml#/$defs/qcom-tlmm-state"
+
+ properties:
+ pins:
+ description:
+ List of gpio pins affected by the properties specified in this
+ subnode.
+ items:
+ oneOf:
+ - pattern: "^gpio([0-9]|[1-9][0-9]|1[0-9][0-9]|20[0-3])$"
+ - enum: [ sdc1_clk, sdc1_cmd, sdc1_data, sdc2_clk, sdc2_cmd, sdc2_data ]
+ minItems: 1
+ maxItems: 36
+
+ function:
+ description:
+ Specify the alternative function to be configured for the specified
+ pins.
+
+ enum: [ atest_char, atest_usb, audio_ref, cam_mclk, cci_async,
+ cci_i2c, cci_timer, cmu_rng, coex_uart1, coex_uart2, cri_trng,
+ cri_trng0, cri_trng1, dbg_out, ddr_bist, ddr_pxi0, ddr_pxi1,
+ ddr_pxi2, ddr_pxi3, dp_hot, dp_lcd, gcc_gp1, gcc_gp2, gcc_gp3,
+ gpio, ibi_i3c, jitter_bist, lpass_slimbus, mdp_vsync, mdp_vsync0,
+ mdp_vsync1, mdp_vsync2, mdp_vsync3, mi2s0_data0, mi2s0_data1,
+ mi2s0_sck, mi2s0_ws, mi2s1_data0, mi2s1_data1, mi2s1_sck,
+ mi2s1_ws, mi2s2_data0, mi2s2_data1, mi2s2_sck, mi2s2_ws,
+ mss_grfc0, mss_grfc1, mss_grfc10, mss_grfc11, mss_grfc12,
+ mss_grfc2, mss_grfc3, mss_grfc4, mss_grfc5, mss_grfc6,
+ mss_grfc7, mss_grfc8, mss_grfc9, nav_gpio, pa_indicator,
+ pcie0_clkreqn, pcie1_clkreqn, phase_flag, pll_bist, pll_clk,
+ pri_mi2s, prng_rosc, qdss_cti, qdss_gpio, qlink0_enable,
+ qlink0_request, qlink0_wmss, qlink1_enable, qlink1_request,
+ qlink1_wmss, qlink2_enable, qlink2_request, qlink2_wmss, qspi0,
+ qspi1, qspi2, qspi3, qspi_clk, qspi_cs, qup0, qup1, qup10,
+ qup11, qup12, qup13, qup14, qup15, qup16, qup17, qup18, qup19,
+ qup2, qup3, qup4, qup5, qup6, qup7, qup8, qup9, qup_l4, qup_l5,
+ qup_l6, sd_write, sdc40, sdc41, sdc42, sdc43, sdc4_clk,
+ sdc4_cmd, sec_mi2s, tb_trig, tgu_ch0, tgu_ch1, tgu_ch2,
+ tgu_ch3, tsense_pwm1, tsense_pwm2, uim0_clk, uim0_data,
+ uim0_present, uim0_reset, uim1_clk, uim1_data, uim1_present,
+ uim1_reset, usb2phy_ac, usb_phy, vfr_0, vfr_1, vsense_trigger ]
+
+
+ bias-disable: true
+ bias-pull-down: true
+ bias-pull-up: true
+ drive-strength: true
+ input-enable: true
+ output-high: true
+ output-low: true
+
+ required:
+ - pins
+ - function
+
+ additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ pinctrl@f100000 {
+ compatible = "qcom,sm8350-tlmm";
+ reg = <0x0f100000 0x300000>;
+ interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ gpio-ranges = <&tlmm 0 0 203>;
+
+ gpio-wo-subnode-state {
+ pins = "gpio1";
+ function = "gpio";
+ };
+
+ uart-w-subnodes-state {
+ rx {
+ pins = "gpio18";
+ function = "qup3";
+ bias-pull-up;
+ };
+
+ tx {
+ pins = "gpio19";
+ function = "qup3";
+ bias-disable;
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,tlmm-common.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,tlmm-common.yaml
new file mode 100644
index 000000000000..3b37cf102d41
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,tlmm-common.yaml
@@ -0,0 +1,85 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/qcom,tlmm-common.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Technologies, Inc. Top Level Mode Multiplexer (TLMM) definitions
+
+maintainers:
+ - Bjorn Andersson <bjorn.andersson@linaro.org>
+
+description:
+ This defines the common properties used to describe all Qualcomm Top Level
+ Mode Multiplexer bindings and pinconf/pinmux states for these.
+
+properties:
+ interrupts:
+ description:
+ Specifies the TLMM summary IRQ
+ maxItems: 1
+
+ interrupt-controller: true
+
+ '#interrupt-cells':
+ description:
+ Specifies the PIN numbers and Flags, as defined in defined in
+ include/dt-bindings/interrupt-controller/irq.h
+ const: 2
+
+ gpio-controller: true
+
+ '#gpio-cells':
+ description:
+ Specifying the pin number and flags, as defined in
+ include/dt-bindings/gpio/gpio.h
+ const: 2
+
+ gpio-ranges:
+ maxItems: 1
+
+ wakeup-parent:
+ description:
+ Specifying the interrupt-controller used to wake up the system when the
+ TLMM block has been powered down.
+ maxItems: 1
+
+ gpio-reserved-ranges:
+ description:
+ Pins can be reserved for trusted applications and thereby unaccessible
+ from the OS. This property can be used to mark the pins which resources
+ should not be accessed by the OS. Please see the ../gpio/gpio.txt for more
+ information.
+
+required:
+ - interrupts
+ - interrupt-controller
+ - '#interrupt-cells'
+ - gpio-controller
+ - '#gpio-cells'
+ - gpio-ranges
+
+additionalProperties: true
+
+$defs:
+ qcom-tlmm-state:
+ allOf:
+ - $ref: pincfg-node.yaml#
+ - $ref: pinmux-node.yaml#
+
+ properties:
+ drive-strength:
+ enum: [2, 4, 6, 8, 10, 12, 14, 16]
+ default: 2
+ description:
+ Selects the drive strength for the specified pins, in mA.
+
+ bias-pull-down: true
+ bias-pull-up: true
+ bias-disable: true
+ input-enable: true
+ output-high: true
+ output-low: true
+
+ additionalProperties: true
+...
diff --git a/Documentation/devicetree/bindings/pinctrl/ralink,rt2880-pinmux.yaml b/Documentation/devicetree/bindings/pinctrl/ralink,rt2880-pinmux.yaml
index 7dea3e26d99e..b32f2676cab5 100644
--- a/Documentation/devicetree/bindings/pinctrl/ralink,rt2880-pinmux.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/ralink,rt2880-pinmux.yaml
@@ -15,39 +15,38 @@ description:
properties:
compatible:
- enum:
- - ralink,rt2880-pinmux
+ const: ralink,rt2880-pinmux
- pinctrl-0:
- description:
- A phandle to the node containing the subnodes containing default
- configurations. This is for pinctrl hogs.
+patternProperties:
+ '-pins$':
+ type: object
+ patternProperties:
+ '^(.*-)?pinmux$':
+ type: object
+ description: node for pinctrl.
+ $ref: pinmux-node.yaml#
+
+ properties:
+ groups:
+ description: Name of the pin group to use for the functions.
+ enum: [i2c, spi, uart1, uart2, uart3, rgmii1, rgmii2, mdio,
+ pcie, sdhci]
+ function:
+ description: The mux function to select
+ enum: [gpio, i2c, spi, uart1, uart2, uart3, rgmii1, rgmii2,
+ mdio, nand1, nand2, sdhci]
+
+ required:
+ - groups
+ - function
+
+ additionalProperties: false
- pinctrl-names:
- description:
- A pinctrl state named "default" can be defined.
- const: default
+ additionalProperties: false
required:
- compatible
-patternProperties:
- '[a-z0-9_-]+':
- if:
- type: object
- description: node for pinctrl.
- $ref: "pinmux-node.yaml"
- then:
- properties:
- groups:
- description: Name of the pin group to use for the functions.
- enum: [i2c, spi, uart1, uart2, uart3, rgmii1, rgmii2, mdio,
- pcie, sdhci]
- function:
- description: The mux function to select
- enum: [gpio, i2c, spi, uart1, uart2, uart3, rgmii1, rgmii2,
- mdio, nand1, nand2, sdhci]
-
additionalProperties: false
examples:
@@ -55,14 +54,9 @@ examples:
- |
pinctrl {
compatible = "ralink,rt2880-pinmux";
- pinctrl-names = "default";
- pinctrl-0 = <&state_default>;
-
- state_default: pinctrl0 {
- };
- i2c_pins: i2c0 {
- i2c0 {
+ i2c_pins: i2c0-pins {
+ pinmux {
groups = "i2c";
function = "i2c";
};
diff --git a/Documentation/devicetree/bindings/pinctrl/renesas,pfc.yaml b/Documentation/devicetree/bindings/pinctrl/renesas,pfc.yaml
index 5b5b1b9d2ec7..ac4e068aa03f 100644
--- a/Documentation/devicetree/bindings/pinctrl/renesas,pfc.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/renesas,pfc.yaml
@@ -43,11 +43,12 @@ properties:
- renesas,pfc-r8a77980 # R-Car V3H
- renesas,pfc-r8a77990 # R-Car E3
- renesas,pfc-r8a77995 # R-Car D3
+ - renesas,pfc-r8a779a0 # R-Car V3U
- renesas,pfc-sh73a0 # SH-Mobile AG5
reg:
minItems: 1
- maxItems: 2
+ maxItems: 10
gpio-controller: true
@@ -76,11 +77,10 @@ required:
if:
properties:
compatible:
- items:
- enum:
- - renesas,pfc-r8a73a4
- - renesas,pfc-r8a7740
- - renesas,pfc-sh73a0
+ enum:
+ - renesas,pfc-r8a73a4
+ - renesas,pfc-r8a7740
+ - renesas,pfc-sh73a0
then:
required:
- interrupts-extended
diff --git a/Documentation/devicetree/bindings/pinctrl/samsung-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/samsung-pinctrl.txt
index 7734ab6fec44..38a1416fd2cd 100644
--- a/Documentation/devicetree/bindings/pinctrl/samsung-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/samsung-pinctrl.txt
@@ -336,7 +336,7 @@ Example 3: A uart client node that supports 'default' and 'flow-control' states.
interrupts = <0 52 0>;
pinctrl-names = "default", "flow-control;
pinctrl-0 = <&uart0_data>;
- pinctrl-1 = <&uart0_data &uart0_fctl>;
+ pinctrl-1 = <&uart0_data>, <&uart0_fctl>;
};
Example 4: Set up the default pin state for uart controller.
diff --git a/Documentation/devicetree/bindings/power/brcm,bcm-pmb.yaml b/Documentation/devicetree/bindings/power/brcm,bcm-pmb.yaml
new file mode 100644
index 000000000000..40b08d83c80b
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/brcm,bcm-pmb.yaml
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/power/brcm,bcm-pmb.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom PMB (Power Management Bus) controller
+
+description: This document describes Broadcom's PMB controller. It supports
+ powering various types of connected devices (e.g. PCIe, USB, SATA).
+
+maintainers:
+ - Rafał Miłecki <rafal@milecki.pl>
+
+properties:
+ compatible:
+ enum:
+ - brcm,bcm4908-pmb
+
+ reg:
+ description: register space of one or more buses
+ maxItems: 1
+
+ big-endian:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description: Flag to use for block working in big endian mode.
+
+ "#power-domain-cells":
+ description: cell specifies device ID (see bcm-pmb.h)
+ const: 1
+
+required:
+ - reg
+ - "#power-domain-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/soc/bcm-pmb.h>
+
+ pmb: power-controller@802800e0 {
+ compatible = "brcm,bcm4908-pmb";
+ reg = <0x802800e0 0x40>;
+ #power-domain-cells = <1>;
+ };
+
+ foo {
+ power-domains = <&pmb BCM_PMB_PCIE0>;
+ };
diff --git a/Documentation/devicetree/bindings/power/mediatek,power-controller.yaml b/Documentation/devicetree/bindings/power/mediatek,power-controller.yaml
index d14cb9bac849..f234a756c193 100644
--- a/Documentation/devicetree/bindings/power/mediatek,power-controller.yaml
+++ b/Documentation/devicetree/bindings/power/mediatek,power-controller.yaml
@@ -23,6 +23,7 @@ properties:
compatible:
enum:
+ - mediatek,mt8167-power-controller
- mediatek,mt8173-power-controller
- mediatek,mt8183-power-controller
- mediatek,mt8192-power-controller
@@ -59,6 +60,7 @@ patternProperties:
reg:
description: |
Power domain index. Valid values are defined in:
+ "include/dt-bindings/power/mt8167-power.h" - for MT8167 type power domain.
"include/dt-bindings/power/mt8173-power.h" - for MT8173 type power domain.
"include/dt-bindings/power/mt8183-power.h" - for MT8183 type power domain.
"include/dt-bindings/power/mt8192-power.h" - for MT8192 type power domain.
@@ -82,6 +84,9 @@ patternProperties:
be specified by order, adding first the BASIC clocks followed by the
SUSBSYS clocks.
+ domain-supply:
+ description: domain regulator supply.
+
mediatek,infracfg:
$ref: /schemas/types.yaml#/definitions/phandle
description: phandle to the device containing the INFRACFG register range.
@@ -130,6 +135,9 @@ patternProperties:
be specified by order, adding first the BASIC clocks followed by the
SUSBSYS clocks.
+ domain-supply:
+ description: domain regulator supply.
+
mediatek,infracfg:
$ref: /schemas/types.yaml#/definitions/phandle
description: phandle to the device containing the INFRACFG register range.
@@ -178,6 +186,9 @@ patternProperties:
be specified by order, adding first the BASIC clocks followed by the
SUSBSYS clocks.
+ domain-supply:
+ description: domain regulator supply.
+
mediatek,infracfg:
$ref: /schemas/types.yaml#/definitions/phandle
description: phandle to the device containing the INFRACFG register range.
diff --git a/Documentation/devicetree/bindings/power/qcom,rpmpd.yaml b/Documentation/devicetree/bindings/power/qcom,rpmpd.yaml
index 64825128ee97..1ea21acbbd55 100644
--- a/Documentation/devicetree/bindings/power/qcom,rpmpd.yaml
+++ b/Documentation/devicetree/bindings/power/qcom,rpmpd.yaml
@@ -19,6 +19,7 @@ properties:
- qcom,msm8916-rpmpd
- qcom,msm8939-rpmpd
- qcom,msm8976-rpmpd
+ - qcom,msm8994-rpmpd
- qcom,msm8996-rpmpd
- qcom,msm8998-rpmpd
- qcom,qcs404-rpmpd
diff --git a/Documentation/devicetree/bindings/power/renesas,apmu.yaml b/Documentation/devicetree/bindings/power/renesas,apmu.yaml
index 60a23b3beb40..391897d897f2 100644
--- a/Documentation/devicetree/bindings/power/renesas,apmu.yaml
+++ b/Documentation/devicetree/bindings/power/renesas,apmu.yaml
@@ -52,5 +52,5 @@ examples:
apmu@e6152000 {
compatible = "renesas,r8a7791-apmu", "renesas,apmu";
reg = <0xe6152000 0x188>;
- cpus = <&cpu0 &cpu1>;
+ cpus = <&cpu0>, <&cpu1>;
};
diff --git a/Documentation/devicetree/bindings/power/supply/battery.yaml b/Documentation/devicetree/bindings/power/supply/battery.yaml
index 0c7e2e44793b..c3b4b7543591 100644
--- a/Documentation/devicetree/bindings/power/supply/battery.yaml
+++ b/Documentation/devicetree/bindings/power/supply/battery.yaml
@@ -83,21 +83,18 @@ properties:
for each of the battery capacity lookup table.
operating-range-celsius:
- $ref: /schemas/types.yaml#/definitions/uint32-array
description: operating temperature range of a battery
items:
- description: minimum temperature at which battery can operate
- description: maximum temperature at which battery can operate
ambient-celsius:
- $ref: /schemas/types.yaml#/definitions/uint32-array
description: safe range of ambient temperature
items:
- description: alert when ambient temperature is lower than this value
- description: alert when ambient temperature is higher than this value
alert-celsius:
- $ref: /schemas/types.yaml#/definitions/uint32-array
description: safe range of battery temperature
items:
- description: alert when battery temperature is lower than this value
diff --git a/Documentation/devicetree/bindings/power/supply/bq2515x.yaml b/Documentation/devicetree/bindings/power/supply/bq2515x.yaml
index 75a56773be4a..813d6afde606 100644
--- a/Documentation/devicetree/bindings/power/supply/bq2515x.yaml
+++ b/Documentation/devicetree/bindings/power/supply/bq2515x.yaml
@@ -50,7 +50,6 @@ properties:
maxItems: 1
input-current-limit-microamp:
- $ref: /schemas/types.yaml#/definitions/uint32
description: Maximum input current in micro Amps.
minimum: 50000
maximum: 500000
diff --git a/Documentation/devicetree/bindings/power/supply/bq256xx.yaml b/Documentation/devicetree/bindings/power/supply/bq256xx.yaml
new file mode 100644
index 000000000000..18b54783e11a
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/supply/bq256xx.yaml
@@ -0,0 +1,110 @@
+# SPDX-License-Identifier: (GPL-2.0-only or BSD-2-Clause)
+# Copyright (C) 2020 Texas Instruments Incorporated
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/power/supply/bq256xx.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: TI bq256xx Switch Mode Buck Charger
+
+maintainers:
+ - Ricardo Rivera-Matos <r-rivera-matos@ti.com>
+
+description: |
+ The bq256xx devices are a family of highly-integrated battery charge
+ management and system power management ICs for single cell Li-ion and Li-
+ polymer batteries.
+
+ Datasheets:
+ - https://www.ti.com/lit/ds/symlink/bq25600.pdf
+ - https://www.ti.com/lit/ds/symlink/bq25601.pdf
+ - https://www.ti.com/lit/ds/symlink/bq25600d.pdf
+ - https://www.ti.com/lit/ds/symlink/bq25601d.pdf
+ - https://www.ti.com/lit/ds/symlink/bq25611d.pdf
+ - https://www.ti.com/lit/ds/symlink/bq25618.pdf
+ - https://www.ti.com/lit/ds/symlink/bq25619.pdf
+
+properties:
+ compatible:
+ enum:
+ - ti,bq25600
+ - ti,bq25601
+ - ti,bq25600d
+ - ti,bq25601d
+ - ti,bq25611d
+ - ti,bq25618
+ - ti,bq25619
+
+ reg:
+ maxItems: 1
+
+ ti,watchdog-timeout-ms:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ default: 0
+ description: |
+ Watchdog timer in ms. 0 (default) disables the watchdog
+ minimum: 0
+ maximum: 160000
+ enum: [ 0, 40000, 80000, 160000]
+
+ input-voltage-limit-microvolt:
+ description: |
+ Minimum input voltage limit in µV with a 100000 µV step
+ minimum: 3900000
+ maximum: 5400000
+
+ input-current-limit-microamp:
+ description: |
+ Maximum input current limit in µA with a 100000 µA step
+ minimum: 100000
+ maximum: 3200000
+
+ monitored-battery:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: phandle to the battery node being monitored
+
+ interrupts:
+ maxItems: 1
+ description: |
+ Interrupt sends an active low, 256 μs pulse to host to report the charger
+ device status and faults.
+
+required:
+ - compatible
+ - reg
+ - monitored-battery
+
+additionalProperties: false
+
+examples:
+ - |
+ bat: battery {
+ compatible = "simple-battery";
+ constant-charge-current-max-microamp = <2040000>;
+ constant-charge-voltage-max-microvolt = <4352000>;
+ precharge-current-microamp = <180000>;
+ charge-term-current-microamp = <180000>;
+ };
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ i2c {
+
+ clock-frequency = <400000>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ charger@6b {
+ compatible = "ti,bq25601";
+ reg = <0x6b>;
+ monitored-battery = <&bat>;
+
+ interrupt-parent = <&gpio1>;
+ interrupts = <16 IRQ_TYPE_EDGE_FALLING>;
+ ti,watchdog-timeout-ms = <40000>;
+
+ input-voltage-limit-microvolt = <4500000>;
+ input-current-limit-microamp = <2400000>;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/power/supply/bq25980.yaml b/Documentation/devicetree/bindings/power/supply/bq25980.yaml
index f6b3dd4093ca..06eca6667f67 100644
--- a/Documentation/devicetree/bindings/power/supply/bq25980.yaml
+++ b/Documentation/devicetree/bindings/power/supply/bq25980.yaml
@@ -70,6 +70,7 @@ properties:
description: Enables bypass mode at boot time
interrupts:
+ maxItems: 1
description: |
Indicates that the device state has changed.
diff --git a/Documentation/devicetree/bindings/power/supply/ltc4162-l.yaml b/Documentation/devicetree/bindings/power/supply/ltc4162-l.yaml
new file mode 100644
index 000000000000..1f88c9e013f4
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/supply/ltc4162-l.yaml
@@ -0,0 +1,69 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright (C) 2020 Topic Embedded Products
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/power/supply/ltc4162-l.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Linear Technology (Analog Devices) LTC4162-L Charger
+
+maintainers:
+ - Mike Looijmans <mike.looijmans@topic.nl>
+
+description: |
+ The LTC ® 4162-L is an advanced monolithic synchronous step-down switching
+ battery charger and PowerPath (TM) manager that seamlessly manages power
+ distribution between input sources such as wall adapters, backplanes, solar
+ panels, etc., and a rechargeable Lithium-Ion/Polymer battery.
+
+ Specifications about the charger can be found at:
+ https://www.analog.com/en/products/ltc4162-s.html
+
+properties:
+ compatible:
+ enum:
+ - lltc,ltc4162-l
+
+ reg:
+ maxItems: 1
+ description: I2C address of the charger.
+
+ lltc,rsnsb-micro-ohms:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Battery sense resistor in microohm.
+ minimum: 1000
+
+ lltc,rsnsi-micro-ohms:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Input current sense resistor in microohm.
+ minimum: 1000
+
+ lltc,cell-count:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: |
+ Number of battery cells. If not provided, will be obtained from the chip
+ once the external power is applied. Omit this when the number of cells
+ is somewhat dynamic. Without it, several measurements will return 0 until
+ the charger is connected to an external supply.
+
+required:
+ - compatible
+ - reg
+ - lltc,rsnsb-micro-ohms
+ - lltc,rsnsi-micro-ohms
+
+additionalProperties: false
+
+examples:
+ - |
+ i2c0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ charger: battery-charger@68 {
+ compatible = "lltc,ltc4162-l";
+ reg = <0x68>;
+ lltc,rsnsb-micro-ohms = <10000>;
+ lltc,rsnsi-micro-ohms = <16000>;
+ lltc,cell-count = <2>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/ptp/ptp-idtcm.yaml b/Documentation/devicetree/bindings/ptp/ptp-idtcm.yaml
index 239b49fad805..658cec67743e 100644
--- a/Documentation/devicetree/bindings/ptp/ptp-idtcm.yaml
+++ b/Documentation/devicetree/bindings/ptp/ptp-idtcm.yaml
@@ -59,9 +59,7 @@ additionalProperties: false
examples:
- |
- i2c@1 {
- compatible = "abc,acme-1234";
- reg = <0x01 0x400>;
+ i2c {
#address-cells = <1>;
#size-cells = <0>;
phc@5b {
diff --git a/Documentation/devicetree/bindings/pwm/pwm-sifive.yaml b/Documentation/devicetree/bindings/pwm/pwm-sifive.yaml
index 5ac25275d8bf..84e66913d042 100644
--- a/Documentation/devicetree/bindings/pwm/pwm-sifive.yaml
+++ b/Documentation/devicetree/bindings/pwm/pwm-sifive.yaml
@@ -25,12 +25,15 @@ description:
properties:
compatible:
items:
- - const: sifive,fu540-c000-pwm
+ - enum:
+ - sifive,fu540-c000-pwm
+ - sifive,fu740-c000-pwm
- const: sifive,pwm0
description:
Should be "sifive,<chip>-pwm" and "sifive,pwm<version>". Supported
- compatible strings are "sifive,fu540-c000-pwm" for the SiFive PWM v0
- as integrated onto the SiFive FU540 chip, and "sifive,pwm0" for the
+ compatible strings are "sifive,fu540-c000-pwm" and
+ "sifive,fu740-c000-pwm" for the SiFive PWM v0 as integrated onto the
+ SiFive FU540 and FU740 chip respectively, and "sifive,pwm0" for the
SiFive PWM v0 IP block with no chip integration tweaks.
Please refer to sifive-blocks-ip-versioning.txt for details.
diff --git a/Documentation/devicetree/bindings/pwm/pwm-zx.txt b/Documentation/devicetree/bindings/pwm/pwm-zx.txt
deleted file mode 100644
index 3c8fe7aa8269..000000000000
--- a/Documentation/devicetree/bindings/pwm/pwm-zx.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-ZTE ZX PWM controller
-
-Required properties:
- - compatible: Should be "zte,zx296718-pwm".
- - reg: Physical base address and length of the controller's registers.
- - clocks : The phandle and specifier referencing the controller's clocks.
- - clock-names: "pclk" for PCLK, "wclk" for WCLK to the PWM controller. The
- PCLK is for register access, while WCLK is the reference clock for
- calculating period and duty cycles.
- - #pwm-cells: Should be 3. See pwm.yaml in this directory for a description of
- the cells format.
-
-Example:
-
- pwm: pwm@1439000 {
- compatible = "zte,zx296718-pwm";
- reg = <0x1439000 0x1000>;
- clocks = <&lsp1crm LSP1_PWM_PCLK>,
- <&lsp1crm LSP1_PWM_WCLK>;
- clock-names = "pclk", "wclk";
- #pwm-cells = <3>;
- };
diff --git a/Documentation/devicetree/bindings/regulator/dlg,da9121.yaml b/Documentation/devicetree/bindings/regulator/dlg,da9121.yaml
index 6f2164f7bc57..228018c87bea 100644
--- a/Documentation/devicetree/bindings/regulator/dlg,da9121.yaml
+++ b/Documentation/devicetree/bindings/regulator/dlg,da9121.yaml
@@ -62,7 +62,6 @@ properties:
description: IRQ line information.
dlg,irq-polling-delay-passive-ms:
- $ref: "/schemas/types.yaml#/definitions/uint32"
minimum: 1000
maximum: 10000
description: |
diff --git a/Documentation/devicetree/bindings/regulator/fixed-regulator.yaml b/Documentation/devicetree/bindings/regulator/fixed-regulator.yaml
index d3d0dc13dd8b..8850c01bd470 100644
--- a/Documentation/devicetree/bindings/regulator/fixed-regulator.yaml
+++ b/Documentation/devicetree/bindings/regulator/fixed-regulator.yaml
@@ -72,11 +72,9 @@ properties:
startup-delay-us:
description: startup time in microseconds
- $ref: /schemas/types.yaml#/definitions/uint32
off-on-delay-us:
description: off delay time in microseconds
- $ref: /schemas/types.yaml#/definitions/uint32
enable-active-high:
description:
diff --git a/Documentation/devicetree/bindings/regulator/max8997-regulator.txt b/Documentation/devicetree/bindings/regulator/max8997-regulator.txt
index 6fe825b8ac1b..b53c5e2b335f 100644
--- a/Documentation/devicetree/bindings/regulator/max8997-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/max8997-regulator.txt
@@ -35,6 +35,7 @@ Optional properties:
- interrupts: Interrupt specifiers for two interrupt sources.
- First interrupt specifier is for 'irq1' interrupt.
- Second interrupt specifier is for 'alert' interrupt.
+- charger-supply: regulator node for charging current.
- max8997,pmic-buck1-uses-gpio-dvs: 'buck1' can be controlled by gpio dvs.
- max8997,pmic-buck2-uses-gpio-dvs: 'buck2' can be controlled by gpio dvs.
- max8997,pmic-buck5-uses-gpio-dvs: 'buck5' can be controlled by gpio dvs.
diff --git a/Documentation/devicetree/bindings/regulator/mcp16502-regulator.txt b/Documentation/devicetree/bindings/regulator/mcp16502-regulator.txt
index d86584ed4d93..451cc4e86b01 100644
--- a/Documentation/devicetree/bindings/regulator/mcp16502-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/mcp16502-regulator.txt
@@ -4,7 +4,8 @@ Required properties:
- compatible: "microchip,mcp16502"
- reg: I2C slave address
- lpm-gpios: GPIO for LPM pin. Note that this GPIO *must* remain high during
- suspend-to-ram, keeping the PMIC into HIBERNATE mode.
+ suspend-to-ram, keeping the PMIC into HIBERNATE mode; this
+ property is optional;
- regulators: A node that houses a sub-node for each regulator within
the device. Each sub-node is identified using the node's
name. The content of each sub-node is defined by the
diff --git a/Documentation/devicetree/bindings/regulator/mt6315-regulator.yaml b/Documentation/devicetree/bindings/regulator/mt6315-regulator.yaml
new file mode 100644
index 000000000000..61dd5af80db6
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/mt6315-regulator.yaml
@@ -0,0 +1,69 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/regulator/mt6315-regulator.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Mediatek MT6315 Regulator
+
+maintainers:
+ - Hsin-Hsiung Wang <hsin-hsiung.wang@mediatek.com>
+
+description: |
+ The MT6315 is a power management IC (PMIC) configurable with SPMI.
+ that contains 4 BUCKs output which can combine with each other
+ by different efuse settings.
+
+properties:
+ compatible:
+ const: mediatek,mt6315-regulator
+
+ reg:
+ maxItems: 1
+
+ regulators:
+ type: object
+ description: List of regulators and its properties
+
+ patternProperties:
+ "^vbuck[1-4]$":
+ type: object
+ $ref: "regulator.yaml#"
+
+ properties:
+ regulator-name:
+ pattern: "^vbuck[1-4]$"
+
+ additionalProperties: false
+
+required:
+ - compatible
+ - reg
+ - regulators
+
+additionalProperties: false
+
+examples:
+ - |
+ pmic@6 {
+ compatible = "mediatek,mt6315-regulator";
+ reg = <0x6 0>;
+
+ regulators {
+ vbuck1 {
+ regulator-compatible = "vbuck1";
+ regulator-min-microvolt = <300000>;
+ regulator-max-microvolt = <1193750>;
+ regulator-enable-ramp-delay = <256>;
+ regulator-allowed-modes = <0 1 2 4>;
+ };
+
+ vbuck3 {
+ regulator-compatible = "vbuck3";
+ regulator-min-microvolt = <300000>;
+ regulator-max-microvolt = <1193750>;
+ regulator-enable-ramp-delay = <256>;
+ regulator-allowed-modes = <0 1 2 4>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/regulator/nxp,pca9450-regulator.yaml b/Documentation/devicetree/bindings/regulator/nxp,pca9450-regulator.yaml
index c2b0a8b6da1e..f70f2e758a00 100644
--- a/Documentation/devicetree/bindings/regulator/nxp,pca9450-regulator.yaml
+++ b/Documentation/devicetree/bindings/regulator/nxp,pca9450-regulator.yaml
@@ -87,6 +87,11 @@ properties:
additionalProperties: false
+ sd-vsel-gpios:
+ description: GPIO that is used to switch LDO5 between being configured by
+ LDO5CTRL_L or LDO5CTRL_H register. Use this if the SD_VSEL signal is
+ connected to a host GPIO.
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/regulator/nxp,pf8x00-regulator.yaml b/Documentation/devicetree/bindings/regulator/nxp,pf8x00-regulator.yaml
index 956156fe52a3..8761437ed8ad 100644
--- a/Documentation/devicetree/bindings/regulator/nxp,pf8x00-regulator.yaml
+++ b/Documentation/devicetree/bindings/regulator/nxp,pf8x00-regulator.yaml
@@ -62,8 +62,11 @@ properties:
$ref: "/schemas/types.yaml#/definitions/uint32"
minimum: 2100
maximum: 4500
+ deprecated: true
description:
BUCK regulators current limit in mA.
+ This property is deprecated, please use
+ "regulator-max-microamp" instead.
Listed current limits in mA are,
2100 (default)
@@ -73,21 +76,11 @@ properties:
nxp,phase-shift:
$ref: "/schemas/types.yaml#/definitions/uint32"
- minimum: 45
- maximum: 0
+ default: 0
+ enum: [ 0, 45, 90, 135, 180, 225, 270, 315 ]
description:
BUCK regulators phase shift control in degrees.
- Listed phase shift control values in degrees are,
- 45
- 90
- 135
- 180
- 225
- 270
- 315
- 0 (default)
-
unevaluatedProperties: false
"^vsnvs$":
diff --git a/Documentation/devicetree/bindings/regulator/qcom,rpmh-regulator.txt b/Documentation/devicetree/bindings/regulator/qcom,rpmh-regulator.txt
index 7d462b899473..ce1e04354006 100644
--- a/Documentation/devicetree/bindings/regulator/qcom,rpmh-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/qcom,rpmh-regulator.txt
@@ -50,6 +50,8 @@ First Level Nodes - PMIC
"qcom,pm8350-rpmh-regulators"
"qcom,pm8350c-rpmh-regulators"
"qcom,pm8998-rpmh-regulators"
+ "qcom,pmc8180-rpmh-regulators"
+ "qcom,pmc8180c-rpmh-regulators"
"qcom,pmi8998-rpmh-regulators"
"qcom,pm6150-rpmh-regulators"
"qcom,pm6150l-rpmh-regulators"
diff --git a/Documentation/devicetree/bindings/regulator/qcom-labibb-regulator.yaml b/Documentation/devicetree/bindings/regulator/qcom-labibb-regulator.yaml
index 53853ec20fe2..cf784bd1f5e5 100644
--- a/Documentation/devicetree/bindings/regulator/qcom-labibb-regulator.yaml
+++ b/Documentation/devicetree/bindings/regulator/qcom-labibb-regulator.yaml
@@ -22,11 +22,17 @@ properties:
type: object
properties:
+ qcom,soft-start-us:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Regulator soft start time in microseconds.
+ enum: [200, 400, 600, 800]
+ default: 200
interrupts:
- maxItems: 1
+ minItems: 1
+ maxItems: 2
description:
- Short-circuit interrupt for lab.
+ Short-circuit and over-current interrupts for lab.
required:
- interrupts
@@ -35,11 +41,17 @@ properties:
type: object
properties:
+ qcom,discharge-resistor-kohms:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Discharge resistor value in KiloOhms.
+ enum: [300, 64, 32, 16]
+ default: 300
interrupts:
- maxItems: 1
+ minItems: 1
+ maxItems: 2
description:
- Short-circuit interrupt for lab.
+ Short-circuit and over-current interrupts for ibb.
required:
- interrupts
@@ -57,13 +69,15 @@ examples:
compatible = "qcom,pmi8998-lab-ibb";
lab {
- interrupts = <0x3 0x0 IRQ_TYPE_EDGE_RISING>;
- interrupt-names = "sc-err";
+ interrupts = <0x3 0xde 0x1 IRQ_TYPE_EDGE_RISING>,
+ <0x3 0xde 0x0 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-names = "sc-err", "ocp";
};
ibb {
- interrupts = <0x3 0x2 IRQ_TYPE_EDGE_RISING>;
- interrupt-names = "sc-err";
+ interrupts = <0x3 0xdc 0x2 IRQ_TYPE_EDGE_RISING>,
+ <0x3 0xdc 0x0 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-names = "sc-err", "ocp";
};
};
diff --git a/Documentation/devicetree/bindings/regulator/richtek,rt4831-regulator.yaml b/Documentation/devicetree/bindings/regulator/richtek,rt4831-regulator.yaml
new file mode 100644
index 000000000000..d9c23333e157
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/richtek,rt4831-regulator.yaml
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/regulator/richtek,rt4831-regulator.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Richtek RT4831 Display Bias Voltage Regulator
+
+maintainers:
+ - ChiYuan Huang <cy_huang@richtek.com>
+
+description: |
+ RT4831 is a multifunctional device that can provide power to the LCD display
+ and LCD backlight.
+
+ For Display Bias Voltage DSVP and DSVN, the output range is about 4V to 6.5V.
+ It is sufficient to meet the current LCD power requirement.
+
+ DSVLCM is a boost regulator in IC internal as DSVP and DSVN input power.
+ Its voltage should be configured above 0.15V to 0.2V gap larger than the
+ voltage needed for DSVP and DSVN. Too much voltage gap could improve the
+ voltage drop from the heavy loading scenario. But it also make the power
+ efficiency worse. It's a trade-off.
+
+ Datasheet is available at
+ https://www.richtek.com/assets/product_file/RT4831A/DS4831A-05.pdf
+
+patternProperties:
+ "^DSV(LCM|P|N)$":
+ type: object
+ $ref: regulator.yaml#
+ description:
+ Properties for single Display Bias Voltage regulator.
+
+additionalProperties: false
diff --git a/Documentation/devicetree/bindings/remoteproc/ingenic,vpu.yaml b/Documentation/devicetree/bindings/remoteproc/ingenic,vpu.yaml
index c019f9fbe916..d0aa91bbf5e5 100644
--- a/Documentation/devicetree/bindings/remoteproc/ingenic,vpu.yaml
+++ b/Documentation/devicetree/bindings/remoteproc/ingenic,vpu.yaml
@@ -44,7 +44,7 @@ properties:
- const: vpu
interrupts:
- description: VPU hardware interrupt
+ maxItems: 1
required:
- compatible
diff --git a/Documentation/devicetree/bindings/remoteproc/mtk,scp.txt b/Documentation/devicetree/bindings/remoteproc/mtk,scp.txt
index 3ba668bab14b..3f5f78764b60 100644
--- a/Documentation/devicetree/bindings/remoteproc/mtk,scp.txt
+++ b/Documentation/devicetree/bindings/remoteproc/mtk,scp.txt
@@ -6,10 +6,10 @@ Mediatek SoCs.
Required properties:
- compatible Should be "mediatek,mt8183-scp"
-- reg Should contain the address ranges for the two memory
- regions, SRAM and CFG.
-- reg-names Contains the corresponding names for the two memory
- regions. These should be named "sram" & "cfg".
+- reg Should contain the address ranges for memory regions:
+ SRAM, CFG, and L1TCM.
+- reg-names Contains the corresponding names for the memory regions:
+ "sram", "cfg", and "l1tcm".
- clocks Clock for co-processor (See: ../clock/clock-bindings.txt)
- clock-names Contains the corresponding name for the clock. This
should be named "main".
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,adsp.txt b/Documentation/devicetree/bindings/remoteproc/qcom,adsp.txt
index 54737024da20..1c330a8941f9 100644
--- a/Documentation/devicetree/bindings/remoteproc/qcom,adsp.txt
+++ b/Documentation/devicetree/bindings/remoteproc/qcom,adsp.txt
@@ -25,6 +25,10 @@ on the Qualcomm ADSP Hexagon core.
"qcom,sm8250-adsp-pas"
"qcom,sm8250-cdsp-pas"
"qcom,sm8250-slpi-pas"
+ "qcom,sm8350-adsp-pas"
+ "qcom,sm8350-cdsp-pas"
+ "qcom,sm8350-slpi-pas"
+ "qcom,sm8350-mpss-pas"
- interrupts-extended:
Usage: required
@@ -51,10 +55,14 @@ on the Qualcomm ADSP Hexagon core.
qcom,sm8250-adsp-pas:
qcom,sm8250-cdsp-pas:
qcom,sm8250-slpi-pas:
+ qcom,sm8350-adsp-pas:
+ qcom,sm8350-cdsp-pas:
+ qcom,sm8350-slpi-pas:
must be "wdog", "fatal", "ready", "handover", "stop-ack"
qcom,qcs404-wcss-pas:
qcom,sc7180-mpss-pas:
qcom,sm8150-mpss-pas:
+ qcom,sm8350-mpss-pas:
must be "wdog", "fatal", "ready", "handover", "stop-ack",
"shutdown-ack"
@@ -114,13 +122,17 @@ on the Qualcomm ADSP Hexagon core.
qcom,sm8150-adsp-pas:
qcom,sm8150-cdsp-pas:
qcom,sm8250-cdsp-pas:
+ qcom,sm8350-cdsp-pas:
must be "cx", "load_state"
qcom,sc7180-mpss-pas:
qcom,sm8150-mpss-pas:
+ qcom,sm8350-mpss-pas:
must be "cx", "load_state", "mss"
qcom,sm8250-adsp-pas:
+ qcom,sm8350-adsp-pas:
qcom,sm8150-slpi-pas:
qcom,sm8250-slpi-pas:
+ qcom,sm8350-slpi-pas:
must be "lcx", "lmx", "load_state"
- memory-region:
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,wcnss-pil.txt b/Documentation/devicetree/bindings/remoteproc/qcom,wcnss-pil.txt
index cc0b7fc1c29b..da09c0d79ac0 100644
--- a/Documentation/devicetree/bindings/remoteproc/qcom,wcnss-pil.txt
+++ b/Documentation/devicetree/bindings/remoteproc/qcom,wcnss-pil.txt
@@ -80,6 +80,7 @@ and its resource dependencies. It is described by the following properties:
Definition: must be one of:
"qcom,wcn3620",
"qcom,wcn3660",
+ "qcom,wcn3660b",
"qcom,wcn3680"
- clocks:
diff --git a/Documentation/devicetree/bindings/remoteproc/ti,omap-remoteproc.yaml b/Documentation/devicetree/bindings/remoteproc/ti,omap-remoteproc.yaml
index 084960a8f17a..1a1159097a2a 100644
--- a/Documentation/devicetree/bindings/remoteproc/ti,omap-remoteproc.yaml
+++ b/Documentation/devicetree/bindings/remoteproc/ti,omap-remoteproc.yaml
@@ -70,10 +70,13 @@ properties:
the firmware image.
clocks:
+ maxItems: 1
description: |
Main functional clock for the remote processor
resets:
+ minItems: 1
+ maxItems: 2
description: |
Reset handles for the remote processor
diff --git a/Documentation/devicetree/bindings/reset/brcm,bcm4908-misc-pcie-reset.yaml b/Documentation/devicetree/bindings/reset/brcm,bcm4908-misc-pcie-reset.yaml
new file mode 100644
index 000000000000..88aebb370838
--- /dev/null
+++ b/Documentation/devicetree/bindings/reset/brcm,bcm4908-misc-pcie-reset.yaml
@@ -0,0 +1,39 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/reset/brcm,bcm4908-misc-pcie-reset.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom MISC block PCIe reset controller
+
+description: This document describes reset controller handling PCIe PERST#
+ signals. On BCM4908 it's a part of the MISC block.
+
+maintainers:
+ - Rafał Miłecki <rafal@milecki.pl>
+
+properties:
+ compatible:
+ const: brcm,bcm4908-misc-pcie-reset
+
+ reg:
+ maxItems: 1
+
+ "#reset-cells":
+ description: PCIe core id
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - "#reset-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ reset-controller@ff802644 {
+ compatible = "brcm,bcm4908-misc-pcie-reset";
+ reg = <0xff802644 0x04>;
+ #reset-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/reset/canaan,k210-rst.yaml b/Documentation/devicetree/bindings/reset/canaan,k210-rst.yaml
new file mode 100644
index 000000000000..53e4ede9c0bd
--- /dev/null
+++ b/Documentation/devicetree/bindings/reset/canaan,k210-rst.yaml
@@ -0,0 +1,40 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/reset/canaan,k210-rst.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Canaan Kendryte K210 Reset Controller Device Tree Bindings
+
+maintainers:
+ - Damien Le Moal <damien.lemoal@wdc.com>
+
+description: |
+ Canaan Kendryte K210 reset controller driver which supports the SoC
+ system controller supplied reset registers for the various peripherals
+ of the SoC. The K210 reset controller node must be defined as a child
+ node of the K210 system controller node.
+
+ See also:
+ - dt-bindings/reset/k210-rst.h
+
+properties:
+ compatible:
+ const: canaan,k210-rst
+
+ '#reset-cells':
+ const: 1
+
+required:
+ - '#reset-cells'
+ - compatible
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/reset/k210-rst.h>
+ sysrst: reset-controller {
+ compatible = "canaan,k210-rst";
+ #reset-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/reset/hisilicon,hi3660-reset.txt b/Documentation/devicetree/bindings/reset/hisilicon,hi3660-reset.txt
deleted file mode 100644
index 2df4bddeb688..000000000000
--- a/Documentation/devicetree/bindings/reset/hisilicon,hi3660-reset.txt
+++ /dev/null
@@ -1,44 +0,0 @@
-Hisilicon System Reset Controller
-======================================
-
-Please also refer to reset.txt in this directory for common reset
-controller binding usage.
-
-The reset controller registers are part of the system-ctl block on
-hi3660 and hi3670 SoCs.
-
-Required properties:
-- compatible: should be one of the following:
- "hisilicon,hi3660-reset" for HI3660
- "hisilicon,hi3670-reset", "hisilicon,hi3660-reset" for HI3670
-- hisi,rst-syscon: phandle of the reset's syscon.
-- #reset-cells : Specifies the number of cells needed to encode a
- reset source. The type shall be a <u32> and the value shall be 2.
-
- Cell #1 : offset of the reset assert control
- register from the syscon register base
- offset + 4: deassert control register
- offset + 8: status control register
- Cell #2 : bit position of the reset in the reset control register
-
-Example:
- iomcu: iomcu@ffd7e000 {
- compatible = "hisilicon,hi3660-iomcu", "syscon";
- reg = <0x0 0xffd7e000 0x0 0x1000>;
- };
-
- iomcu_rst: iomcu_rst_controller {
- compatible = "hisilicon,hi3660-reset";
- hisi,rst-syscon = <&iomcu>;
- #reset-cells = <2>;
- };
-
-Specifying reset lines connected to IP modules
-==============================================
-example:
-
- i2c0: i2c@..... {
- ...
- resets = <&iomcu_rst 0x20 3>; /* offset: 0x20; bit: 3 */
- ...
- };
diff --git a/Documentation/devicetree/bindings/reset/hisilicon,hi3660-reset.yaml b/Documentation/devicetree/bindings/reset/hisilicon,hi3660-reset.yaml
new file mode 100644
index 000000000000..9bf40952e5b7
--- /dev/null
+++ b/Documentation/devicetree/bindings/reset/hisilicon,hi3660-reset.yaml
@@ -0,0 +1,77 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/reset/hisilicon,hi3660-reset.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Hisilicon System Reset Controller
+
+maintainers:
+ - Wei Xu <xuwei5@hisilicon.com>
+
+description: |
+ Please also refer to reset.txt in this directory for common reset
+ controller binding usage.
+ The reset controller registers are part of the system-ctl block on
+ hi3660 and hi3670 SoCs.
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - const: hisilicon,hi3660-reset
+ - items:
+ - const: hisilicon,hi3670-reset
+ - const: hisilicon,hi3660-reset
+
+ hisilicon,rst-syscon:
+ description: phandle of the reset's syscon.
+ $ref: /schemas/types.yaml#/definitions/phandle
+
+ '#reset-cells':
+ description: |
+ Specifies the number of cells needed to encode a reset source.
+ Cell #1 : offset of the reset assert control register from the syscon
+ register base
+ offset + 4: deassert control register
+ offset + 8: status control register
+ Cell #2 : bit position of the reset in the reset control register
+ const: 2
+
+required:
+ - compatible
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/hi3660-clock.h>
+
+ iomcu: iomcu@ffd7e000 {
+ compatible = "hisilicon,hi3660-iomcu", "syscon";
+ reg = <0xffd7e000 0x1000>;
+ };
+
+ iomcu_rst: iomcu_rst_controller {
+ compatible = "hisilicon,hi3660-reset";
+ hisilicon,rst-syscon = <&iomcu>;
+ #reset-cells = <2>;
+ };
+
+ /* Specifying reset lines connected to IP modules */
+ i2c@ffd71000 {
+ compatible = "snps,designware-i2c";
+ reg = <0xffd71000 0x1000>;
+ interrupts = <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clock-frequency = <400000>;
+ clocks = <&crg_ctrl HI3660_CLK_GATE_I2C0>;
+ resets = <&iomcu_rst 0x20 3>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0_pmx_func &i2c0_cfg_func>;
+ status = "disabled";
+ };
+...
diff --git a/Documentation/devicetree/bindings/reset/sirf,rstc.txt b/Documentation/devicetree/bindings/reset/sirf,rstc.txt
deleted file mode 100644
index 0505de742d30..000000000000
--- a/Documentation/devicetree/bindings/reset/sirf,rstc.txt
+++ /dev/null
@@ -1,42 +0,0 @@
-CSR SiRFSoC Reset Controller
-======================================
-
-Please also refer to reset.txt in this directory for common reset
-controller binding usage.
-
-Required properties:
-- compatible: Should be "sirf,prima2-rstc" or "sirf,marco-rstc"
-- reg: should be register base and length as documented in the
- datasheet
-- #reset-cells: 1, see below
-
-example:
-
-rstc: reset-controller@88010000 {
- compatible = "sirf,prima2-rstc";
- reg = <0x88010000 0x1000>;
- #reset-cells = <1>;
-};
-
-Specifying reset lines connected to IP modules
-==============================================
-
-The reset controller(rstc) manages various reset sources. This module provides
-reset signals for most blocks in system. Those device nodes should specify the
-reset line on the rstc in their resets property, containing a phandle to the
-rstc device node and a RESET_INDEX specifying which module to reset, as described
-in reset.txt.
-
-For SiRFSoC, RESET_INDEX is just reset_bit defined in SW_RST0 and SW_RST1 registers.
-For modules whose rest_bit is in SW_RST0, its RESET_INDEX is 0~31. For modules whose
-rest_bit is in SW_RST1, its RESET_INDEX is 32~63.
-
-example:
-
-vpp@90020000 {
- compatible = "sirf,prima2-vpp";
- reg = <0x90020000 0x10000>;
- interrupts = <31>;
- clocks = <&clks 35>;
- resets = <&rstc 6>;
-};
diff --git a/Documentation/devicetree/bindings/reset/zte,zx2967-reset.txt b/Documentation/devicetree/bindings/reset/zte,zx2967-reset.txt
deleted file mode 100644
index b015508f9780..000000000000
--- a/Documentation/devicetree/bindings/reset/zte,zx2967-reset.txt
+++ /dev/null
@@ -1,20 +0,0 @@
-ZTE zx2967 SoCs Reset Controller
-=======================================
-
-Please also refer to reset.txt in this directory for common reset
-controller binding usage.
-
-Required properties:
-- compatible: should be one of the following.
- * zte,zx296718-reset
-- reg: physical base address of the controller and length of memory mapped
- region.
-- #reset-cells: must be 1.
-
-example:
-
- reset: reset-controller@1461060 {
- compatible = "zte,zx296718-reset";
- reg = <0x01461060 0x8>;
- #reset-cells = <1>;
- };
diff --git a/Documentation/devicetree/bindings/riscv/canaan.yaml b/Documentation/devicetree/bindings/riscv/canaan.yaml
new file mode 100644
index 000000000000..f8f3f286bd55
--- /dev/null
+++ b/Documentation/devicetree/bindings/riscv/canaan.yaml
@@ -0,0 +1,47 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/riscv/canaan.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Canaan SoC-based boards
+
+maintainers:
+ - Damien Le Moal <damien.lemoal@wdc.com>
+
+description:
+ Canaan Kendryte K210 SoC-based boards
+
+properties:
+ $nodename:
+ const: '/'
+ compatible:
+ oneOf:
+ - items:
+ - const: sipeed,maix-bit
+ - const: sipeed,maix-bitm
+ - const: canaan,kendryte-k210
+
+ - items:
+ - const: sipeed,maix-go
+ - const: canaan,kendryte-k210
+
+ - items:
+ - const: sipeed,maix-dock-m1
+ - const: sipeed,maix-dock-m1w
+ - const: canaan,kendryte-k210
+
+ - items:
+ - const: sipeed,maixduino
+ - const: canaan,kendryte-k210
+
+ - items:
+ - const: canaan,kendryte-kd233
+ - const: canaan,kendryte-k210
+
+ - items:
+ - const: canaan,kendryte-k210
+
+additionalProperties: true
+
+...
diff --git a/Documentation/devicetree/bindings/riscv/cpus.yaml b/Documentation/devicetree/bindings/riscv/cpus.yaml
index c6925e0b16e4..e534f6a7cfa1 100644
--- a/Documentation/devicetree/bindings/riscv/cpus.yaml
+++ b/Documentation/devicetree/bindings/riscv/cpus.yaml
@@ -28,11 +28,18 @@ properties:
- items:
- enum:
- sifive,rocket0
+ - sifive,bullet0
- sifive,e5
+ - sifive,e7
- sifive,e51
+ - sifive,e71
- sifive,u54-mc
+ - sifive,u74-mc
- sifive,u54
+ - sifive,u74
- sifive,u5
+ - sifive,u7
+ - canaan,k210
- const: riscv
- const: riscv # Simulator only
description:
@@ -50,6 +57,7 @@ properties:
- riscv,sv32
- riscv,sv39
- riscv,sv48
+ - riscv,none
riscv,isa:
description:
diff --git a/Documentation/devicetree/bindings/riscv/sifive-l2-cache.yaml b/Documentation/devicetree/bindings/riscv/sifive-l2-cache.yaml
index efc0198eeb74..23b227614366 100644
--- a/Documentation/devicetree/bindings/riscv/sifive-l2-cache.yaml
+++ b/Documentation/devicetree/bindings/riscv/sifive-l2-cache.yaml
@@ -27,6 +27,7 @@ select:
items:
- enum:
- sifive,fu540-c000-ccache
+ - sifive,fu740-c000-ccache
required:
- compatible
@@ -34,7 +35,9 @@ select:
properties:
compatible:
items:
- - const: sifive,fu540-c000-ccache
+ - enum:
+ - sifive,fu540-c000-ccache
+ - sifive,fu740-c000-ccache
- const: cache
cache-block-size:
@@ -52,10 +55,13 @@ properties:
cache-unified: true
interrupts:
- description: |
- Must contain entries for DirError, DataError and DataFail signals.
minItems: 3
- maxItems: 3
+ maxItems: 4
+ items:
+ - description: DirError interrupt
+ - description: DataError interrupt
+ - description: DataFail interrupt
+ - description: DirFail interrupt
reg:
maxItems: 1
@@ -63,10 +69,31 @@ properties:
next-level-cache: true
memory-region:
+ maxItems: 1
description: |
The reference to the reserved-memory for the L2 Loosely Integrated Memory region.
The reserved memory node should be defined as per the bindings in reserved-memory.txt.
+if:
+ properties:
+ compatible:
+ contains:
+ const: sifive,fu540-c000-ccache
+
+then:
+ properties:
+ interrupts:
+ description: |
+ Must contain entries for DirError, DataError and DataFail signals.
+ maxItems: 3
+
+else:
+ properties:
+ interrupts:
+ description: |
+ Must contain entries for DirError, DataError, DataFail, DirFail signals.
+ minItems: 4
+
additionalProperties: false
required:
diff --git a/Documentation/devicetree/bindings/riscv/sifive.yaml b/Documentation/devicetree/bindings/riscv/sifive.yaml
index 3a8647d1da4c..ee0a239af4c2 100644
--- a/Documentation/devicetree/bindings/riscv/sifive.yaml
+++ b/Documentation/devicetree/bindings/riscv/sifive.yaml
@@ -17,11 +17,18 @@ properties:
$nodename:
const: '/'
compatible:
- items:
- - enum:
- - sifive,hifive-unleashed-a00
- - const: sifive,fu540-c000
- - const: sifive,fu540
+ oneOf:
+ - items:
+ - enum:
+ - sifive,hifive-unleashed-a00
+ - const: sifive,fu540-c000
+ - const: sifive,fu540
+
+ - items:
+ - enum:
+ - sifive,hifive-unmatched-a00
+ - const: sifive,fu740-c000
+ - const: sifive,fu740
additionalProperties: true
diff --git a/Documentation/devicetree/bindings/rtc/allwinner,sun6i-a31-rtc.yaml b/Documentation/devicetree/bindings/rtc/allwinner,sun6i-a31-rtc.yaml
index 37c2a601c3fa..b1b0ee769b71 100644
--- a/Documentation/devicetree/bindings/rtc/allwinner,sun6i-a31-rtc.yaml
+++ b/Documentation/devicetree/bindings/rtc/allwinner,sun6i-a31-rtc.yaml
@@ -128,7 +128,6 @@ required:
- compatible
- reg
- interrupts
- - clocks
- clock-output-names
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/rtc/atmel,at91rm9200-rtc.yaml b/Documentation/devicetree/bindings/rtc/atmel,at91rm9200-rtc.yaml
index 02bbfe726c62..994de43d17fa 100644
--- a/Documentation/devicetree/bindings/rtc/atmel,at91rm9200-rtc.yaml
+++ b/Documentation/devicetree/bindings/rtc/atmel,at91rm9200-rtc.yaml
@@ -20,6 +20,7 @@ properties:
- atmel,sama5d4-rtc
- atmel,sama5d2-rtc
- microchip,sam9x60-rtc
+ - microchip,sama7g5-rtc
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/rtc/nxp,pcf2127.yaml b/Documentation/devicetree/bindings/rtc/nxp,pcf2127.yaml
new file mode 100644
index 000000000000..cde7b1675ead
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/nxp,pcf2127.yaml
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/rtc/nxp,pcf2127.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NXP PCF2127 Real Time Clock
+
+allOf:
+ - $ref: "rtc.yaml#"
+
+maintainers:
+ - Alexandre Belloni <alexandre.belloni@bootlin.com>
+
+properties:
+ compatible:
+ const: nxp,pcf2127
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ start-year: true
+
+ reset-source: true
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ rtc@51 {
+ compatible = "nxp,pcf2127";
+ reg = <0x51>;
+ pinctrl-0 = <&rtc_nint_pins>;
+ interrupts-extended = <&gpio1 16 IRQ_TYPE_LEVEL_HIGH>;
+ reset-source;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/rtc/pcf8563.txt b/Documentation/devicetree/bindings/rtc/pcf8563.txt
index 6076fe76dbfa..0a900f7c8977 100644
--- a/Documentation/devicetree/bindings/rtc/pcf8563.txt
+++ b/Documentation/devicetree/bindings/rtc/pcf8563.txt
@@ -5,7 +5,8 @@ Philips PCF8563/Epson RTC8564 Real Time Clock
Required properties:
- compatible: Should contain "nxp,pcf8563",
"epson,rtc8564" or
- "microcrystal,rv8564"
+ "microcrystal,rv8564" or
+ "nxp,pca8565"
- reg: I2C address for chip.
Optional property:
diff --git a/Documentation/devicetree/bindings/rtc/rtc.yaml b/Documentation/devicetree/bindings/rtc/rtc.yaml
index d30dc045aac6..0ec3551f12dd 100644
--- a/Documentation/devicetree/bindings/rtc/rtc.yaml
+++ b/Documentation/devicetree/bindings/rtc/rtc.yaml
@@ -27,7 +27,6 @@ properties:
1: chargeable
quartz-load-femtofarads:
- $ref: /schemas/types.yaml#/definitions/uint32
description:
The capacitive load of the quartz(x-tal), expressed in femto
Farad (fF). The default value shall be listed (if optional),
@@ -47,7 +46,6 @@ properties:
deprecated: true
trickle-resistor-ohms:
- $ref: /schemas/types.yaml#/definitions/uint32
description:
Selected resistor for trickle charger. Should be given
if trickle charger should be enabled.
diff --git a/Documentation/devicetree/bindings/rtc/sirf,prima2-sysrtc.txt b/Documentation/devicetree/bindings/rtc/sirf,prima2-sysrtc.txt
deleted file mode 100644
index 58885b55da21..000000000000
--- a/Documentation/devicetree/bindings/rtc/sirf,prima2-sysrtc.txt
+++ /dev/null
@@ -1,13 +0,0 @@
-SiRFSoC Real Time Clock
-
-Required properties:
-- compatible: must be "sirf,prima2-sysrtc"
-- reg: address range of rtc register set.
-- interrupts: rtc alarm interrupts.
-
-Example:
- rtc@2000 {
- compatible = "sirf,prima2-sysrtc";
- reg = <0x2000 0x1000>;
- interrupts = <52 53 54>;
- };
diff --git a/Documentation/devicetree/bindings/rtc/stericsson,coh901331.txt b/Documentation/devicetree/bindings/rtc/stericsson,coh901331.txt
deleted file mode 100644
index e615a897b20e..000000000000
--- a/Documentation/devicetree/bindings/rtc/stericsson,coh901331.txt
+++ /dev/null
@@ -1,16 +0,0 @@
-ST-Ericsson COH 901 331 Real Time Clock
-
-Required properties:
-- compatible: must be "stericsson,coh901331"
-- reg: address range of rtc register set.
-- interrupts: rtc alarm interrupt.
-- clocks: phandle to the rtc clock source
-
-Example:
- rtc: rtc@c0017000 {
- compatible = "stericsson,coh901331";
- reg = <0xc0017000 0x1000>;
- interrupt-parent = <&vicb>;
- interrupts = <10>;
- clocks = <&rtc_clk>;
- };
diff --git a/Documentation/devicetree/bindings/rtc/trivial-rtc.yaml b/Documentation/devicetree/bindings/rtc/trivial-rtc.yaml
index c7d14de214c4..7548d8714871 100644
--- a/Documentation/devicetree/bindings/rtc/trivial-rtc.yaml
+++ b/Documentation/devicetree/bindings/rtc/trivial-rtc.yaml
@@ -48,12 +48,8 @@ properties:
- microcrystal,rv3029
# Real Time Clock
- microcrystal,rv8523
- # Real-time clock
- - nxp,pcf2127
- # Real-time clock
- - nxp,pcf2129
- # Real-time clock
- nxp,pca2129
+ - nxp,pcf2129
# Real-time Clock Module
- pericom,pt7c4338
# I2C bus SERIAL INTERFACE REAL-TIME CLOCK IC
diff --git a/Documentation/devicetree/bindings/serial/fsl-imx-uart.yaml b/Documentation/devicetree/bindings/serial/fsl-imx-uart.yaml
index 9702c07a6b6c..2b06c6ce4a75 100644
--- a/Documentation/devicetree/bindings/serial/fsl-imx-uart.yaml
+++ b/Documentation/devicetree/bindings/serial/fsl-imx-uart.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Freescale i.MX Universal Asynchronous Receiver/Transmitter (UART)
maintainers:
- - Fabio Estevam <fabio.estevam@nxp.com>
+ - Fabio Estevam <festevam@gmail.com>
allOf:
- $ref: "serial.yaml"
diff --git a/Documentation/devicetree/bindings/serial/fsl-mxs-auart.yaml b/Documentation/devicetree/bindings/serial/fsl-mxs-auart.yaml
index ce1d89496342..14c7594c88c6 100644
--- a/Documentation/devicetree/bindings/serial/fsl-mxs-auart.yaml
+++ b/Documentation/devicetree/bindings/serial/fsl-mxs-auart.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Freescale MXS Application UART (AUART)
maintainers:
- - Fabio Estevam <fabio.estevam@nxp.com>
+ - Fabio Estevam <festevam@gmail.com>
allOf:
- $ref: "serial.yaml"
diff --git a/Documentation/devicetree/bindings/serial/pl011.yaml b/Documentation/devicetree/bindings/serial/pl011.yaml
index c23c93b400f0..1f8e9f2644b6 100644
--- a/Documentation/devicetree/bindings/serial/pl011.yaml
+++ b/Documentation/devicetree/bindings/serial/pl011.yaml
@@ -19,7 +19,6 @@ select:
contains:
enum:
- arm,pl011
- - zte,zx296702-uart
required:
- compatible
@@ -30,7 +29,6 @@ properties:
- const: arm,pl011
- const: arm,primecell
- items:
- - const: zte,zx296702-uart
- const: arm,primecell
reg:
@@ -88,14 +86,12 @@ properties:
description:
Rate at which poll occurs when auto-poll is set.
default 100ms.
- $ref: /schemas/types.yaml#/definitions/uint32
default: 100
poll-timeout-ms:
description:
Poll timeout when auto-poll is set, default
3000ms.
- $ref: /schemas/types.yaml#/definitions/uint32
default: 3000
required:
diff --git a/Documentation/devicetree/bindings/serial/renesas,hscif.yaml b/Documentation/devicetree/bindings/serial/renesas,hscif.yaml
index c139c5edb93e..ee9804cd49bb 100644
--- a/Documentation/devicetree/bindings/serial/renesas,hscif.yaml
+++ b/Documentation/devicetree/bindings/serial/renesas,hscif.yaml
@@ -51,6 +51,7 @@ properties:
- renesas,hscif-r8a77980 # R-Car V3H
- renesas,hscif-r8a77990 # R-Car E3
- renesas,hscif-r8a77995 # R-Car D3
+ - renesas,hscif-r8a779a0 # R-Car V3U
- const: renesas,rcar-gen3-hscif # R-Car Gen3 and RZ/G2
- const: renesas,hscif # generic HSCIF compatible UART
@@ -81,6 +82,8 @@ properties:
maxItems: 1
dmas:
+ minItems: 2
+ maxItems: 4
description:
Must contain a list of pairs of references to DMA specifiers, one for
transmission, and one for reception.
diff --git a/Documentation/devicetree/bindings/serial/renesas,scif.yaml b/Documentation/devicetree/bindings/serial/renesas,scif.yaml
index 672158906c33..22d76829f7ae 100644
--- a/Documentation/devicetree/bindings/serial/renesas,scif.yaml
+++ b/Documentation/devicetree/bindings/serial/renesas,scif.yaml
@@ -120,6 +120,8 @@ properties:
maxItems: 1
dmas:
+ minItems: 2
+ maxItems: 4
description:
Must contain a list of pairs of references to DMA specifiers, one for
transmission, and one for reception.
diff --git a/Documentation/devicetree/bindings/serial/renesas,scifa.yaml b/Documentation/devicetree/bindings/serial/renesas,scifa.yaml
index dbffb9534835..3c67d3202e1b 100644
--- a/Documentation/devicetree/bindings/serial/renesas,scifa.yaml
+++ b/Documentation/devicetree/bindings/serial/renesas,scifa.yaml
@@ -55,6 +55,8 @@ properties:
maxItems: 1
dmas:
+ minItems: 2
+ maxItems: 4
description:
Must contain a list of pairs of references to DMA specifiers, one for
transmission, and one for reception.
diff --git a/Documentation/devicetree/bindings/serial/renesas,scifb.yaml b/Documentation/devicetree/bindings/serial/renesas,scifb.yaml
index 147f8a37e02a..d5571c7a4424 100644
--- a/Documentation/devicetree/bindings/serial/renesas,scifb.yaml
+++ b/Documentation/devicetree/bindings/serial/renesas,scifb.yaml
@@ -55,6 +55,8 @@ properties:
maxItems: 1
dmas:
+ minItems: 2
+ maxItems: 4
description:
Must contain a list of pairs of references to DMA specifiers, one for
transmission, and one for reception.
diff --git a/Documentation/devicetree/bindings/serial/sifive-serial.yaml b/Documentation/devicetree/bindings/serial/sifive-serial.yaml
index 3ac5c7ff2758..5fa94dacbba9 100644
--- a/Documentation/devicetree/bindings/serial/sifive-serial.yaml
+++ b/Documentation/devicetree/bindings/serial/sifive-serial.yaml
@@ -20,6 +20,7 @@ properties:
- enum:
- sifive,fu540-c000-uart
- sifive,fu740-c000-uart
+ - canaan,k210-uarths
- const: sifive,uart0
description:
diff --git a/Documentation/devicetree/bindings/serial/sirf-uart.txt b/Documentation/devicetree/bindings/serial/sirf-uart.txt
deleted file mode 100644
index 1e48bbbeecc6..000000000000
--- a/Documentation/devicetree/bindings/serial/sirf-uart.txt
+++ /dev/null
@@ -1,34 +0,0 @@
-* CSR SiRFprimaII/atlasVI Universal Synchronous Asynchronous Receiver/Transmitter *
-
-Required properties:
-- compatible : Should be "sirf,prima2-uart", "sirf, prima2-usp-uart",
- "sirf,atlas7-uart" or "sirf,atlas7-usp-uart".
-- reg : Offset and length of the register set for the device
-- interrupts : Should contain uart interrupt
-- fifosize : Should define hardware rx/tx fifo size
-- clocks : Should contain uart clock number
-
-Optional properties:
-- uart-has-rtscts: we have hardware flow controller pins in hardware
-- rts-gpios: RTS pin for USP-based UART if uart-has-rtscts is true
-- cts-gpios: CTS pin for USP-based UART if uart-has-rtscts is true
-
-Example:
-
-uart0: uart@b0050000 {
- cell-index = <0>;
- compatible = "sirf,prima2-uart";
- reg = <0xb0050000 0x1000>;
- interrupts = <17>;
- fifosize = <128>;
- clocks = <&clks 13>;
-};
-
-On the board-specific dts, we can put rts-gpios and cts-gpios like
-
-usp@b0090000 {
- compatible = "sirf,prima2-usp-uart";
- uart-has-rtscts;
- rts-gpios = <&gpio 15 0>;
- cts-gpios = <&gpio 46 0>;
-};
diff --git a/Documentation/devicetree/bindings/serial/st,stm32-uart.yaml b/Documentation/devicetree/bindings/serial/st,stm32-uart.yaml
index 06d5f251ec88..8631678283f9 100644
--- a/Documentation/devicetree/bindings/serial/st,stm32-uart.yaml
+++ b/Documentation/devicetree/bindings/serial/st,stm32-uart.yaml
@@ -50,11 +50,14 @@ properties:
minItems: 1
maxItems: 2
- cts-gpios:
- maxItems: 1
-
- rts-gpios:
- maxItems: 1
+# cts-gpios and rts-gpios properties can be used instead of 'uart-has-rtscts'
+# or 'st,hw-flow-ctrl' (deprecated) for making use of any gpio pins for flow
+# control instead of dedicated pins.
+#
+# It should be noted that both cts-gpios/rts-gpios and 'uart-has-rtscts' or
+# 'st,hw-flow-ctrl' (deprecated) properties cannot co-exist in a design.
+ cts-gpios: true
+ rts-gpios: true
wakeup-source: true
diff --git a/Documentation/devicetree/bindings/soc/imx/imx8m-soc.yaml b/Documentation/devicetree/bindings/soc/imx/imx8m-soc.yaml
new file mode 100644
index 000000000000..effcc72f9425
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/imx/imx8m-soc.yaml
@@ -0,0 +1,86 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/soc/imx/imx8m-soc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NXP i.MX8M Series SoC
+
+maintainers:
+ - Alice Guo <alice.guo@nxp.com>
+
+description: |
+ NXP i.MX8M series SoCs contain fuse entries from which SoC Unique ID can be
+ obtained.
+
+select:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - fsl,imx8mm
+ - fsl,imx8mn
+ - fsl,imx8mp
+ - fsl,imx8mq
+ required:
+ - compatible
+
+patternProperties:
+ "^soc@[0-9a-f]+$":
+ type: object
+ properties:
+ compatible:
+ items:
+ - enum:
+ - fsl,imx8mm-soc
+ - fsl,imx8mn-soc
+ - fsl,imx8mp-soc
+ - fsl,imx8mq-soc
+ - const: simple-bus
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 1
+
+ ranges: true
+
+ dma-ranges: true
+
+ nvmem-cells:
+ maxItems: 1
+ description: Phandle to the SOC Unique ID provided by a nvmem node
+
+ nvmem-cell-names:
+ const: soc_unique_id
+
+ required:
+ - compatible
+ - nvmem-cells
+ - nvmem-cell-names
+
+ additionalProperties:
+ type: object
+
+additionalProperties: true
+
+examples:
+ - |
+ / {
+ model = "FSL i.MX8MM EVK board";
+ compatible = "fsl,imx8mm-evk", "fsl,imx8mm";
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ soc@0 {
+ compatible = "fsl,imx8mm-soc", "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x0 0x0 0x3e000000>;
+ nvmem-cells = <&imx8mm_uid>;
+ nvmem-cell-names = "soc_unique_id";
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,aoss-qmp.txt b/Documentation/devicetree/bindings/soc/qcom/qcom,aoss-qmp.txt
index 953add19e937..19c059e44681 100644
--- a/Documentation/devicetree/bindings/soc/qcom/qcom,aoss-qmp.txt
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,aoss-qmp.txt
@@ -20,6 +20,7 @@ power-domains.
"qcom,sdm845-aoss-qmp"
"qcom,sm8150-aoss-qmp"
"qcom,sm8250-aoss-qmp"
+ "qcom,sm8350-aoss-qmp"
- reg:
Usage: required
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,smem.txt b/Documentation/devicetree/bindings/soc/qcom/qcom,smem.txt
deleted file mode 100644
index 9326cdf6e1b1..000000000000
--- a/Documentation/devicetree/bindings/soc/qcom/qcom,smem.txt
+++ /dev/null
@@ -1,57 +0,0 @@
-Qualcomm Shared Memory Manager binding
-
-This binding describes the Qualcomm Shared Memory Manager, used to share data
-between various subsystems and OSes in Qualcomm platforms.
-
-- compatible:
- Usage: required
- Value type: <stringlist>
- Definition: must be:
- "qcom,smem"
-
-- memory-region:
- Usage: required
- Value type: <prop-encoded-array>
- Definition: handle to memory reservation for main SMEM memory region.
-
-- qcom,rpm-msg-ram:
- Usage: required
- Value type: <prop-encoded-array>
- Definition: handle to RPM message memory resource
-
-- hwlocks:
- Usage: required
- Value type: <prop-encoded-array>
- Definition: reference to a hwspinlock used to protect allocations from
- the shared memory
-
-= EXAMPLE
-The following example shows the SMEM setup for MSM8974, with a main SMEM region
-at 0xfa00000 and the RPM message ram at 0xfc428000:
-
- reserved-memory {
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
-
- smem_region: smem@fa00000 {
- reg = <0xfa00000 0x200000>;
- no-map;
- };
- };
-
- smem@fa00000 {
- compatible = "qcom,smem";
-
- memory-region = <&smem_region>;
- qcom,rpm-msg-ram = <&rpm_msg_ram>;
-
- hwlocks = <&tcsr_mutex 3>;
- };
-
- soc {
- rpm_msg_ram: memory@fc428000 {
- compatible = "qcom,rpm-msg-ram";
- reg = <0xfc428000 0x4000>;
- };
- };
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,smem.yaml b/Documentation/devicetree/bindings/soc/qcom/qcom,smem.yaml
new file mode 100644
index 000000000000..f7e17713b3d8
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,smem.yaml
@@ -0,0 +1,72 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/soc/qcom/qcom,smem.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Qualcomm Shared Memory Manager binding
+
+maintainers:
+ - Andy Gross <agross@kernel.org>
+ - Bjorn Andersson <bjorn.andersson@linaro.org>
+
+description: |
+ This binding describes the Qualcomm Shared Memory Manager, used to share data
+ between various subsystems and OSes in Qualcomm platforms.
+
+properties:
+ compatible:
+ const: qcom,smem
+
+ memory-region:
+ maxItems: 1
+ description: handle to memory reservation for main SMEM memory region.
+
+ hwlocks:
+ maxItems: 1
+
+ qcom,rpm-msg-ram:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: handle to RPM message memory resource
+
+required:
+ - compatible
+ - memory-region
+ - hwlocks
+
+additionalProperties: false
+
+examples:
+ - |
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ smem_region: smem@fa00000 {
+ reg = <0xfa00000 0x200000>;
+ no-map;
+ };
+ };
+
+ smem {
+ compatible = "qcom,smem";
+
+ memory-region = <&smem_region>;
+ qcom,rpm-msg-ram = <&rpm_msg_ram>;
+
+ hwlocks = <&tcsr_mutex 3>;
+ };
+
+ soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ rpm_msg_ram: sram@fc428000 {
+ compatible = "qcom,rpm-msg-ram";
+ reg = <0xfc428000 0x4000>;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/soc/ti/ti,pruss.yaml b/Documentation/devicetree/bindings/soc/ti/ti,pruss.yaml
index 037c51b2f972..dbc62821c60b 100644
--- a/Documentation/devicetree/bindings/soc/ti/ti,pruss.yaml
+++ b/Documentation/devicetree/bindings/soc/ti/ti,pruss.yaml
@@ -81,6 +81,9 @@ properties:
ranges:
maxItems: 1
+ dma-ranges:
+ maxItems: 1
+
power-domains:
description: |
This property is as per sci-pm-domain.txt.
@@ -278,6 +281,9 @@ patternProperties:
that is common to all the PRU cores. This should be represented as an
interrupt-controller node.
+ allOf:
+ - $ref: /schemas/interrupt-controller/ti,pruss-intc.yaml#
+
type: object
mdio@[a-f0-9]+$:
@@ -299,6 +305,9 @@ patternProperties:
present on K3 SoCs have additional auxiliary PRU cores with slightly
different IP integration.
+ allOf:
+ - $ref: /schemas/remoteproc/ti,pru-rproc.yaml#
+
type: object
required:
@@ -371,6 +380,36 @@ examples:
reg = <0x32000 0x58>;
};
+ pruss_intc: interrupt-controller@20000 {
+ compatible = "ti,pruss-intc";
+ reg = <0x20000 0x2000>;
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ interrupts = <20 21 22 23 24 25 26 27>;
+ interrupt-names = "host_intr0", "host_intr1",
+ "host_intr2", "host_intr3",
+ "host_intr4", "host_intr5",
+ "host_intr6", "host_intr7";
+ };
+
+ pru0: pru@34000 {
+ compatible = "ti,am3356-pru";
+ reg = <0x34000 0x2000>,
+ <0x22000 0x400>,
+ <0x22400 0x100>;
+ reg-names = "iram", "control", "debug";
+ firmware-name = "am335x-pru0-fw";
+ };
+
+ pru1: pru@38000 {
+ compatible = "ti,am3356-pru";
+ reg = <0x38000 0x2000>,
+ <0x24000 0x400>,
+ <0x24400 0x100>;
+ reg-names = "iram", "control", "debug";
+ firmware-name = "am335x-pru1-fw";
+ };
+
pruss_mdio: mdio@32400 {
compatible = "ti,davinci_mdio";
reg = <0x32400 0x90>;
@@ -425,6 +464,43 @@ examples:
reg = <0x32000 0x58>;
};
+ pruss1_intc: interrupt-controller@20000 {
+ compatible = "ti,pruss-intc";
+ reg = <0x20000 0x2000>;
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "host_intr0", "host_intr1",
+ "host_intr2", "host_intr3",
+ "host_intr4",
+ "host_intr6", "host_intr7";
+ ti,irqs-reserved = /bits/ 8 <0x20>; /* BIT(5) */
+ };
+
+ pru1_0: pru@34000 {
+ compatible = "ti,am4376-pru";
+ reg = <0x34000 0x3000>,
+ <0x22000 0x400>,
+ <0x22400 0x100>;
+ reg-names = "iram", "control", "debug";
+ firmware-name = "am437x-pru1_0-fw";
+ };
+
+ pru1_1: pru@38000 {
+ compatible = "ti,am4376-pru";
+ reg = <0x38000 0x3000>,
+ <0x24000 0x400>,
+ <0x24400 0x100>;
+ reg-names = "iram", "control", "debug";
+ firmware-name = "am437x-pru1_1-fw";
+ };
+
pruss1_mdio: mdio@32400 {
compatible = "ti,davinci_mdio";
reg = <0x32400 0x90>;
diff --git a/Documentation/devicetree/bindings/soc/zte/pd-2967xx.txt b/Documentation/devicetree/bindings/soc/zte/pd-2967xx.txt
deleted file mode 100644
index 7629de1c2c72..000000000000
--- a/Documentation/devicetree/bindings/soc/zte/pd-2967xx.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-* ZTE zx2967 family Power Domains
-
-zx2967 family includes support for multiple power domains which are used
-to gate power to one or more peripherals on the processor.
-
-Required Properties:
- - compatible: should be one of the following.
- * zte,zx296718-pcu - for zx296718 power domain.
- - reg: physical base address of the controller and length of memory mapped
- region.
- - #power-domain-cells: Must be 1.
-
-Example:
-
- pcu_domain: pcu@117000 {
- compatible = "zte,zx296718-pcu";
- reg = <0x00117000 0x1000>;
- #power-domain-cells = <1>;
- };
diff --git a/Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-codec.yaml b/Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-codec.yaml
index dd47fef9854d..559aff13ae23 100644
--- a/Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-codec.yaml
+++ b/Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-codec.yaml
@@ -88,6 +88,7 @@ properties:
description: Phandle to the codec analog controls in the PRCM
allwinner,pa-gpios:
+ maxItems: 1
description: GPIO to enable the external amplifier
required:
diff --git a/Documentation/devicetree/bindings/sound/audio-graph-port.yaml b/Documentation/devicetree/bindings/sound/audio-graph-port.yaml
index 2005014161be..766e9109b2f7 100644
--- a/Documentation/devicetree/bindings/sound/audio-graph-port.yaml
+++ b/Documentation/devicetree/bindings/sound/audio-graph-port.yaml
@@ -71,9 +71,6 @@ properties:
description: CPU to Codec rate channels.
$ref: /schemas/types.yaml#/definitions/uint32
- required:
- - remote-endpoint
-
ports:
description: multi OF-Graph subnode
type: object
diff --git a/Documentation/devicetree/bindings/sound/google,sc7180-trogdor.yaml b/Documentation/devicetree/bindings/sound/google,sc7180-trogdor.yaml
index 5095b780e2c7..837e3faa63a9 100644
--- a/Documentation/devicetree/bindings/sound/google,sc7180-trogdor.yaml
+++ b/Documentation/devicetree/bindings/sound/google,sc7180-trogdor.yaml
@@ -55,6 +55,7 @@ patternProperties:
maxItems: 1
reg:
+ maxItems: 1
description: dai link address.
cpu:
diff --git a/Documentation/devicetree/bindings/sound/ingenic,codec.yaml b/Documentation/devicetree/bindings/sound/ingenic,codec.yaml
index eb4be86464bb..97d5f3819b27 100644
--- a/Documentation/devicetree/bindings/sound/ingenic,codec.yaml
+++ b/Documentation/devicetree/bindings/sound/ingenic,codec.yaml
@@ -15,9 +15,14 @@ properties:
compatible:
oneOf:
- - const: ingenic,jz4770-codec
- - const: ingenic,jz4725b-codec
- - const: ingenic,jz4740-codec
+ - enum:
+ - ingenic,jz4770-codec
+ - ingenic,jz4760-codec
+ - ingenic,jz4725b-codec
+ - ingenic,jz4740-codec
+ - items:
+ - const: ingenic,jz4760b-codec
+ - const: ingenic,jz4760-codec
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/sound/intel,keembay-i2s.yaml b/Documentation/devicetree/bindings/sound/intel,keembay-i2s.yaml
index d346e61ab708..6f71294909a5 100644
--- a/Documentation/devicetree/bindings/sound/intel,keembay-i2s.yaml
+++ b/Documentation/devicetree/bindings/sound/intel,keembay-i2s.yaml
@@ -18,6 +18,7 @@ properties:
enum:
- intel,keembay-i2s
- intel,keembay-tdm
+ - intel,keembay-hdmi-i2s
"#sound-dai-cells":
const: 0
@@ -45,6 +46,16 @@ properties:
- const: osc
- const: apb_clk
+ dmas:
+ items:
+ - description: DMA TX channel
+ - description: DMA RX channel
+
+ dma-names:
+ items:
+ - const: tx
+ - const: rx
+
required:
- compatible
- "#sound-dai-cells"
@@ -70,4 +81,6 @@ examples:
interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "osc", "apb_clk";
clocks = <&scmi_clk KEEM_BAY_PSS_AUX_I2S3>, <&scmi_clk KEEM_BAY_PSS_I2S3>;
+ dmas = <&axi_dma0 29 &axi_dma0 33>;
+ dma-names = "tx", "rx";
};
diff --git a/Documentation/devicetree/bindings/sound/mt8192-mt6359-rt1015-rt5682.yaml b/Documentation/devicetree/bindings/sound/mt8192-mt6359-rt1015-rt5682.yaml
index bf8c8ba25009..5a5b765b859a 100644
--- a/Documentation/devicetree/bindings/sound/mt8192-mt6359-rt1015-rt5682.yaml
+++ b/Documentation/devicetree/bindings/sound/mt8192-mt6359-rt1015-rt5682.yaml
@@ -7,8 +7,8 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Mediatek MT8192 with MT6359, RT1015 and RT5682 ASoC sound card driver
maintainers:
- - Jiaxin Yu <jiaxin.yu@mediatek.com>
- - Shane Chien <shane.chien@mediatek.com>
+ - Jiaxin Yu <jiaxin.yu@mediatek.com>
+ - Shane Chien <shane.chien@mediatek.com>
description:
This binding describes the MT8192 sound card.
@@ -23,6 +23,10 @@ properties:
$ref: "/schemas/types.yaml#/definitions/phandle"
description: The phandle of MT8192 ASoC platform.
+ mediatek,hdmi-codec:
+ $ref: "/schemas/types.yaml#/definitions/phandle"
+ description: The phandle of HDMI codec.
+
additionalProperties: false
required:
@@ -35,6 +39,7 @@ examples:
sound: mt8192-sound {
compatible = "mediatek,mt8192_mt6359_rt1015_rt5682";
mediatek,platform = <&afe>;
+ mediatek,hdmi-codec = <&anx_bridge_dp>;
pinctrl-names = "aud_clk_mosi_off",
"aud_clk_mosi_on";
pinctrl-0 = <&aud_clk_mosi_off>;
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-graph-card.yaml b/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-graph-card.yaml
new file mode 100644
index 000000000000..249970952202
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-graph-card.yaml
@@ -0,0 +1,190 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/nvidia,tegra-audio-graph-card.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Audio Graph based Tegra sound card driver
+
+description: |
+ This is based on generic audio graph card driver along with additional
+ customizations for Tegra platforms. It uses the same bindings with
+ additional standard clock DT bindings required for Tegra.
+
+maintainers:
+ - Jon Hunter <jonathanh@nvidia.com>
+ - Sameer Pujar <spujar@nvidia.com>
+
+allOf:
+ - $ref: audio-graph.yaml#
+
+properties:
+ compatible:
+ enum:
+ - nvidia,tegra210-audio-graph-card
+ - nvidia,tegra186-audio-graph-card
+
+ clocks:
+ minItems: 2
+
+ clock-names:
+ minItems: 2
+ items:
+ - const: pll_a
+ - const: plla_out0
+
+ assigned-clocks:
+ minItems: 1
+ maxItems: 3
+
+ assigned-clock-parents:
+ minItems: 1
+ maxItems: 3
+
+ assigned-clock-rates:
+ minItems: 1
+ maxItems: 3
+
+ iommus:
+ maxItems: 1
+
+required:
+ - clocks
+ - clock-names
+ - assigned-clocks
+ - assigned-clock-parents
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include<dt-bindings/clock/tegra210-car.h>
+
+ tegra_sound {
+ compatible = "nvidia,tegra210-audio-graph-card";
+
+ clocks = <&tegra_car TEGRA210_CLK_PLL_A>,
+ <&tegra_car TEGRA210_CLK_PLL_A_OUT0>;
+ clock-names = "pll_a", "plla_out0";
+
+ assigned-clocks = <&tegra_car TEGRA210_CLK_PLL_A>,
+ <&tegra_car TEGRA210_CLK_PLL_A_OUT0>,
+ <&tegra_car TEGRA210_CLK_EXTERN1>;
+ assigned-clock-parents = <0>, <0>, <&tegra_car TEGRA210_CLK_PLL_A_OUT0>;
+ assigned-clock-rates = <368640000>, <49152000>, <12288000>;
+
+ dais = /* FE */
+ <&admaif1_port>,
+ /* Router */
+ <&xbar_i2s1_port>,
+ /* I/O DAP Ports */
+ <&i2s1_port>;
+
+ label = "jetson-tx1-ape";
+ };
+
+ // The ports are defined for AHUB and its child devices.
+ ahub@702d0800 {
+ compatible = "nvidia,tegra210-ahub";
+ reg = <0x702d0800 0x800>;
+ clocks = <&tegra_car TEGRA210_CLK_D_AUDIO>;
+ clock-names = "ahub";
+ assigned-clocks = <&tegra_car TEGRA210_CLK_D_AUDIO>;
+ assigned-clock-parents = <&tegra_car TEGRA210_CLK_PLL_A_OUT0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x702d0000 0x702d0000 0x0000e400>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0x0>;
+ xbar_admaif1_ep: endpoint {
+ remote-endpoint = <&admaif1_ep>;
+ };
+ };
+
+ // ...
+
+ xbar_i2s1_port: port@a {
+ reg = <0xa>;
+ xbar_i2s1_ep: endpoint {
+ remote-endpoint = <&i2s1_cif_ep>;
+ };
+ };
+ };
+
+ admaif@702d0000 {
+ compatible = "nvidia,tegra210-admaif";
+ reg = <0x702d0000 0x800>;
+ dmas = <&adma 1>, <&adma 1>,
+ <&adma 2>, <&adma 2>,
+ <&adma 3>, <&adma 3>,
+ <&adma 4>, <&adma 4>,
+ <&adma 5>, <&adma 5>,
+ <&adma 6>, <&adma 6>,
+ <&adma 7>, <&adma 7>,
+ <&adma 8>, <&adma 8>,
+ <&adma 9>, <&adma 9>,
+ <&adma 10>, <&adma 10>;
+ dma-names = "rx1", "tx1",
+ "rx2", "tx2",
+ "rx3", "tx3",
+ "rx4", "tx4",
+ "rx5", "tx5",
+ "rx6", "tx6",
+ "rx7", "tx7",
+ "rx8", "tx8",
+ "rx9", "tx9",
+ "rx10", "tx10";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ admaif1_port: port@0 {
+ reg = <0x0>;
+ admaif1_ep: endpoint {
+ remote-endpoint = <&xbar_admaif1_ep>;
+ };
+ };
+
+ // More ADMAIF ports to follow
+ };
+ };
+
+ i2s@702d1000 {
+ compatible = "nvidia,tegra210-i2s";
+ clocks = <&tegra_car TEGRA210_CLK_I2S0>;
+ clock-names = "i2s";
+ assigned-clocks = <&tegra_car TEGRA210_CLK_I2S0>;
+ assigned-clock-parents = <&tegra_car TEGRA210_CLK_PLL_A_OUT0>;
+ assigned-clock-rates = <1536000>;
+ reg = <0x702d1000 0x100>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0x0>;
+
+ i2s1_cif_ep: endpoint {
+ remote-endpoint = <&xbar_i2s1_ep>;
+ };
+ };
+
+ i2s1_port: port@1 {
+ reg = <0x1>;
+
+ i2s1_dap: endpoint {
+ dai-format = "i2s";
+ };
+ };
+ };
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra186-dspk.yaml b/Documentation/devicetree/bindings/sound/nvidia,tegra186-dspk.yaml
index ed2fb32fcdd4..b8645d9c38ac 100644
--- a/Documentation/devicetree/bindings/sound/nvidia,tegra186-dspk.yaml
+++ b/Documentation/devicetree/bindings/sound/nvidia,tegra186-dspk.yaml
@@ -17,6 +17,9 @@ maintainers:
- Jon Hunter <jonathanh@nvidia.com>
- Sameer Pujar <spujar@nvidia.com>
+allOf:
+ - $ref: audio-graph-port.yaml#
+
properties:
$nodename:
pattern: "^dspk@[0-9a-f]*$"
@@ -55,6 +58,19 @@ properties:
The name can be "DSPK1" or "DSPKx", where x depends on the maximum
available instances on a Tegra SoC.
+ ports:
+ type: object
+ properties:
+ port@0:
+ description: |
+ DSPK ACIF (Audio Client Interface) port connected to the
+ corresponding AHUB (Audio Hub) ACIF port.
+
+ port@1:
+ description: |
+ DSPK DAP (Digital Audio Port) interface which can be connected
+ to external audio codec for playback.
+
required:
- compatible
- reg
@@ -64,7 +80,7 @@ required:
- assigned-clock-parents
- sound-name-prefix
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra210-admaif.yaml b/Documentation/devicetree/bindings/sound/nvidia,tegra210-admaif.yaml
index c028b259e822..7cee7722df41 100644
--- a/Documentation/devicetree/bindings/sound/nvidia,tegra210-admaif.yaml
+++ b/Documentation/devicetree/bindings/sound/nvidia,tegra210-admaif.yaml
@@ -17,6 +17,9 @@ maintainers:
- Jon Hunter <jonathanh@nvidia.com>
- Sameer Pujar <spujar@nvidia.com>
+allOf:
+ - $ref: audio-graph-port.yaml#
+
properties:
$nodename:
pattern: "^admaif@[0-9a-f]*$"
@@ -37,6 +40,14 @@ properties:
dma-names: true
+ ports:
+ description: |
+ Contains list of ACIF (Audio CIF) port nodes for ADMAIF channels.
+ The number of port nodes depends on the number of ADMAIF channels
+ that SoC may have. These are interfaced with respective ACIF ports
+ in AHUB (Audio Hub). Each port is capable of data transfers in
+ both directions.
+
if:
properties:
compatible:
@@ -81,7 +92,7 @@ required:
- dmas
- dma-names
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra210-ahub.yaml b/Documentation/devicetree/bindings/sound/nvidia,tegra210-ahub.yaml
index d77219727768..31f3e51974bb 100644
--- a/Documentation/devicetree/bindings/sound/nvidia,tegra210-ahub.yaml
+++ b/Documentation/devicetree/bindings/sound/nvidia,tegra210-ahub.yaml
@@ -17,6 +17,9 @@ maintainers:
- Jon Hunter <jonathanh@nvidia.com>
- Sameer Pujar <spujar@nvidia.com>
+allOf:
+ - $ref: audio-graph-port.yaml#
+
properties:
$nodename:
pattern: "^ahub@[0-9a-f]*$"
@@ -56,6 +59,13 @@ properties:
ranges: true
+ ports:
+ description: |
+ Contains list of ACIF (Audio CIF) port nodes for AHUB (Audio Hub).
+ These are connected to ACIF interfaces of AHUB clients. Thus the
+ number of port nodes depend on the number of clients that AHUB may
+ have depending on the SoC revision.
+
required:
- compatible
- reg
@@ -67,8 +77,7 @@ required:
- "#size-cells"
- ranges
-additionalProperties:
- type: object
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra210-dmic.yaml b/Documentation/devicetree/bindings/sound/nvidia,tegra210-dmic.yaml
index 2a3207b550e7..89f4f471be24 100644
--- a/Documentation/devicetree/bindings/sound/nvidia,tegra210-dmic.yaml
+++ b/Documentation/devicetree/bindings/sound/nvidia,tegra210-dmic.yaml
@@ -16,6 +16,9 @@ maintainers:
- Jon Hunter <jonathanh@nvidia.com>
- Sameer Pujar <spujar@nvidia.com>
+allOf:
+ - $ref: audio-graph-port.yaml#
+
properties:
$nodename:
pattern: "^dmic@[0-9a-f]*$"
@@ -56,6 +59,19 @@ properties:
The name can be "DMIC1" or "DMIC2" ... "DMICx", where x depends
on the maximum available instances on a Tegra SoC.
+ ports:
+ type: object
+ properties:
+ port@0:
+ description: |
+ DMIC ACIF (Audio Client Interface) port connected to the
+ corresponding AHUB (Audio Hub) ACIF port.
+
+ port@1:
+ description: |
+ DMIC DAP (Digital Audio Port) interface which can be connected
+ to external audio codec for capture.
+
required:
- compatible
- reg
@@ -64,7 +80,7 @@ required:
- assigned-clocks
- assigned-clock-parents
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra210-i2s.yaml b/Documentation/devicetree/bindings/sound/nvidia,tegra210-i2s.yaml
index dfc1bf7b7722..556460332ffb 100644
--- a/Documentation/devicetree/bindings/sound/nvidia,tegra210-i2s.yaml
+++ b/Documentation/devicetree/bindings/sound/nvidia,tegra210-i2s.yaml
@@ -16,6 +16,9 @@ maintainers:
- Jon Hunter <jonathanh@nvidia.com>
- Sameer Pujar <spujar@nvidia.com>
+allOf:
+ - $ref: audio-graph-port.yaml#
+
properties:
$nodename:
pattern: "^i2s@[0-9a-f]*$"
@@ -74,6 +77,19 @@ properties:
The name can be "I2S1" or "I2S2" ... "I2Sx", where x depends
on the maximum available instances on a Tegra SoC.
+ ports:
+ type: object
+ properties:
+ port@0:
+ description: |
+ I2S ACIF (Audio Client Interface) port connected to the
+ corresponding AHUB (Audio Hub) ACIF port.
+
+ port@1:
+ description: |
+ I2S DAP (Digital Audio Port) interface which can be connected
+ to external audio codec for playback or capture.
+
required:
- compatible
- reg
@@ -82,7 +98,7 @@ required:
- assigned-clocks
- assigned-clock-parents
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/sound/qcom,lpass-rx-macro.yaml b/Documentation/devicetree/bindings/sound/qcom,lpass-rx-macro.yaml
new file mode 100644
index 000000000000..443d556caa69
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/qcom,lpass-rx-macro.yaml
@@ -0,0 +1,62 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/qcom,lpass-rx-macro.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: LPASS(Low Power Audio Subsystem) RX Macro audio codec DT bindings
+
+maintainers:
+ - Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+
+properties:
+ compatible:
+ const: qcom,sm8250-lpass-rx-macro
+
+ reg:
+ maxItems: 1
+
+ "#sound-dai-cells":
+ const: 1
+
+ '#clock-cells':
+ const: 0
+
+ clocks:
+ maxItems: 5
+
+ clock-names:
+ items:
+ - const: mclk
+ - const: npl
+ - const: macro
+ - const: dcodec
+ - const: fsgen
+
+ clock-output-names:
+ items:
+ - const: mclk
+
+required:
+ - compatible
+ - reg
+ - "#sound-dai-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/sound/qcom,q6afe.h>
+ codec@3200000 {
+ compatible = "qcom,sm8250-lpass-rx-macro";
+ reg = <0x3200000 0x1000>;
+ #sound-dai-cells = <1>;
+ #clock-cells = <0>;
+ clocks = <&audiocc 0>,
+ <&audiocc 1>,
+ <&q6afecc LPASS_HW_MACRO_VOTE LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
+ <&q6afecc LPASS_HW_DCODEC_VOTE LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
+ <&vamacro>;
+ clock-names = "mclk", "npl", "macro", "dcodec", "fsgen";
+ clock-output-names = "mclk";
+ };
diff --git a/Documentation/devicetree/bindings/sound/qcom,lpass-tx-macro.yaml b/Documentation/devicetree/bindings/sound/qcom,lpass-tx-macro.yaml
new file mode 100644
index 000000000000..6b5ca02ccce4
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/qcom,lpass-tx-macro.yaml
@@ -0,0 +1,67 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/qcom,lpass-tx-macro.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: LPASS(Low Power Audio Subsystem) TX Macro audio codec DT bindings
+
+maintainers:
+ - Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+
+properties:
+ compatible:
+ const: qcom,sm8250-lpass-tx-macro
+
+ reg:
+ maxItems: 1
+
+ "#sound-dai-cells":
+ const: 1
+
+ '#clock-cells':
+ const: 0
+
+ clocks:
+ maxItems: 5
+
+ clock-names:
+ items:
+ - const: mclk
+ - const: npl
+ - const: macro
+ - const: dcodec
+ - const: fsgen
+
+ clock-output-names:
+ items:
+ - const: mclk
+
+ qcom,dmic-sample-rate:
+ description: dmic sample rate
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+required:
+ - compatible
+ - reg
+ - "#sound-dai-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/sound/qcom,q6afe.h>
+ codec@3220000 {
+ compatible = "qcom,sm8250-lpass-tx-macro";
+ reg = <0x3220000 0x1000>;
+ #sound-dai-cells = <1>;
+ #clock-cells = <0>;
+ clocks = <&aoncc 0>,
+ <&aoncc 1>,
+ <&q6afecc LPASS_HW_MACRO_VOTE LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
+ <&q6afecc LPASS_HW_DCODEC_VOTE LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
+ <&vamacro>;
+ clock-names = "mclk", "npl", "macro", "dcodec", "fsgen";
+ clock-output-names = "mclk";
+ qcom,dmic-sample-rate = <600000>;
+ };
diff --git a/Documentation/devicetree/bindings/sound/renesas,rsnd.yaml b/Documentation/devicetree/bindings/sound/renesas,rsnd.yaml
index 0fd37aa84947..2e1046513603 100644
--- a/Documentation/devicetree/bindings/sound/renesas,rsnd.yaml
+++ b/Documentation/devicetree/bindings/sound/renesas,rsnd.yaml
@@ -404,7 +404,7 @@ examples:
/* DAI base */
rcar_sound,dai {
dai0 {
- playback = <&ssi5 &src5>;
+ playback = <&ssi5>, <&src5>;
capture = <&ssi6>;
};
dai1 {
@@ -430,8 +430,8 @@ examples:
bitclock-master = <&rsnd_endpoint0>;
frame-master = <&rsnd_endpoint0>;
- playback = <&ssi0 &src0 &dvc0>;
- capture = <&ssi1 &src1 &dvc1>;
+ playback = <&ssi0>, <&src0>, <&dvc0>;
+ capture = <&ssi1>, <&src1>, <&dvc1>;
};
};
};
diff --git a/Documentation/devicetree/bindings/sound/rt5659.txt b/Documentation/devicetree/bindings/sound/rt5659.txt
index 56788f50b6cf..c473df5c878c 100644
--- a/Documentation/devicetree/bindings/sound/rt5659.txt
+++ b/Documentation/devicetree/bindings/sound/rt5659.txt
@@ -37,10 +37,21 @@ Optional properties:
- realtek,jd-src
0: No JD is used
1: using JD3 as JD source
+ 2: JD source for Intel HDA header
- realtek,ldo1-en-gpios : The GPIO that controls the CODEC's LDO1_EN pin.
- realtek,reset-gpios : The GPIO that controls the CODEC's RESET pin.
+- sound-name-prefix: Please refer to name-prefix.txt
+
+- ports: A Codec may have a single or multiple I2S interfaces. These
+ interfaces on Codec side can be described under 'ports' or 'port'.
+ When the SoC or host device is connected to multiple interfaces of
+ the Codec, the connectivity can be described using 'ports' property.
+ If a single interface is used, then 'port' can be used. The usage
+ depends on the platform or board design.
+ Please refer to Documentation/devicetree/bindings/graph.txt
+
Pins on the device (for linking into audio routes) for RT5659/RT5658:
* DMIC L1
diff --git a/Documentation/devicetree/bindings/sound/samsung,aries-wm8994.yaml b/Documentation/devicetree/bindings/sound/samsung,aries-wm8994.yaml
index 1c6947294825..5fff586dc802 100644
--- a/Documentation/devicetree/bindings/sound/samsung,aries-wm8994.yaml
+++ b/Documentation/devicetree/bindings/sound/samsung,aries-wm8994.yaml
@@ -62,12 +62,15 @@ properties:
description: Supply for the micbias on the headset mic
earpath-sel-gpios:
+ maxItems: 1
description: GPIO for switching between tv-out and mic paths
headset-detect-gpios:
+ maxItems: 1
description: GPIO for detection of headset insertion
headset-key-gpios:
+ maxItems: 1
description: GPIO for detection of headset key press
io-channels:
diff --git a/Documentation/devicetree/bindings/sound/samsung,midas-audio.yaml b/Documentation/devicetree/bindings/sound/samsung,midas-audio.yaml
index 578928e67e5c..095775c598fa 100644
--- a/Documentation/devicetree/bindings/sound/samsung,midas-audio.yaml
+++ b/Documentation/devicetree/bindings/sound/samsung,midas-audio.yaml
@@ -53,9 +53,11 @@ properties:
description: Supply for the micbias on the Sub microphone
fm-sel-gpios:
+ maxItems: 1
description: GPIO pin for FM selection
lineout-sel-gpios:
+ maxItems: 1
description: GPIO pin for line out selection
required:
diff --git a/Documentation/devicetree/bindings/sound/sgtl5000.yaml b/Documentation/devicetree/bindings/sound/sgtl5000.yaml
index d116c174b545..70b4a8831073 100644
--- a/Documentation/devicetree/bindings/sound/sgtl5000.yaml
+++ b/Documentation/devicetree/bindings/sound/sgtl5000.yaml
@@ -41,14 +41,12 @@ properties:
values of 2k, 4k or 8k. If set to 0 it will be off. If this node is not
mentioned or if the value is unknown, then micbias resistor is set to
4k.
- $ref: "/schemas/types.yaml#/definitions/uint32"
enum: [ 0, 2, 4, 8 ]
micbias-voltage-m-volts:
description: The bias voltage to be used in mVolts. The voltage can take
values from 1.25V to 3V by 250mV steps. If this node is not mentioned
or the value is unknown, then the value is set to 1.25V.
- $ref: "/schemas/types.yaml#/definitions/uint32"
enum: [ 1250, 1500, 1750, 2000, 2250, 2500, 2750, 3000 ]
lrclk-strength:
diff --git a/Documentation/devicetree/bindings/sound/sirf-audio-codec.txt b/Documentation/devicetree/bindings/sound/sirf-audio-codec.txt
deleted file mode 100644
index 062f5ec36f9b..000000000000
--- a/Documentation/devicetree/bindings/sound/sirf-audio-codec.txt
+++ /dev/null
@@ -1,17 +0,0 @@
-SiRF internal audio CODEC
-
-Required properties:
-
- - compatible : "sirf,atlas6-audio-codec" or "sirf,prima2-audio-codec"
-
- - reg : the register address of the device.
-
- - clocks: the clock of SiRF internal audio codec
-
-Example:
-
-audiocodec: audiocodec@b0040000 {
- compatible = "sirf,atlas6-audio-codec";
- reg = <0xb0040000 0x10000>;
- clocks = <&clks 27>;
-};
diff --git a/Documentation/devicetree/bindings/sound/sirf-usp.txt b/Documentation/devicetree/bindings/sound/sirf-usp.txt
deleted file mode 100644
index 02f85b32d359..000000000000
--- a/Documentation/devicetree/bindings/sound/sirf-usp.txt
+++ /dev/null
@@ -1,27 +0,0 @@
-* SiRF SoC USP module
-
-Required properties:
-- compatible: "sirf,prima2-usp-pcm"
-- reg: Base address and size entries:
-- dmas: List of DMA controller phandle and DMA request line ordered pairs.
-- dma-names: Identifier string for each DMA request line in the dmas property.
- These strings correspond 1:1 with the ordered pairs in dmas.
-
- One of the DMA channels will be responsible for transmission (should be
- named "tx") and one for reception (should be named "rx").
-
-- clocks: USP controller clock source
-- pinctrl-names: Must contain a "default" entry.
-- pinctrl-NNN: One property must exist for each entry in pinctrl-names.
-
-Example:
-usp0: usp@b0080000 {
- compatible = "sirf,prima2-usp-pcm";
- reg = <0xb0080000 0x10000>;
- clocks = <&clks 28>;
- dmas = <&dmac1 1>, <&dmac1 2>;
- dma-names = "rx", "tx";
- pinctrl-names = "default";
- pinctrl-0 = <&usp0_only_utfs_pins_a>;
-};
-
diff --git a/Documentation/devicetree/bindings/sound/st,stm32-i2s.yaml b/Documentation/devicetree/bindings/sound/st,stm32-i2s.yaml
index f32410890589..6feb5a09c184 100644
--- a/Documentation/devicetree/bindings/sound/st,stm32-i2s.yaml
+++ b/Documentation/devicetree/bindings/sound/st,stm32-i2s.yaml
@@ -54,6 +54,10 @@ properties:
resets:
maxItems: 1
+ "#clock-cells":
+ description: Configure the I2S device as MCLK clock provider.
+ const: 0
+
required:
- compatible
- "#sound-dai-cells"
diff --git a/Documentation/devicetree/bindings/sound/tas2562.yaml b/Documentation/devicetree/bindings/sound/tas2562.yaml
index 27f7132ba2ef..acd4bbe69731 100644
--- a/Documentation/devicetree/bindings/sound/tas2562.yaml
+++ b/Documentation/devicetree/bindings/sound/tas2562.yaml
@@ -36,10 +36,12 @@ properties:
I2C address of the device can be one of these 0x4c, 0x4d, 0x4e or 0x4f
shut-down-gpios:
+ maxItems: 1
description: GPIO used to control the state of the device.
deprecated: true
shutdown-gpios:
+ maxItems: 1
description: GPIO used to control the state of the device.
interrupts:
diff --git a/Documentation/devicetree/bindings/sound/tas2770.yaml b/Documentation/devicetree/bindings/sound/tas2770.yaml
index 07e7f9951d2e..027bebf4e8cf 100644
--- a/Documentation/devicetree/bindings/sound/tas2770.yaml
+++ b/Documentation/devicetree/bindings/sound/tas2770.yaml
@@ -27,9 +27,11 @@ properties:
I2C address of the device can be between 0x41 to 0x48.
reset-gpio:
+ maxItems: 1
description: GPIO used to reset the device.
shutdown-gpios:
+ maxItems: 1
description: GPIO used to control the state of the device.
interrupts:
diff --git a/Documentation/devicetree/bindings/sound/tlv320adcx140.yaml b/Documentation/devicetree/bindings/sound/tlv320adcx140.yaml
index df18be9d7b15..54d64785aad2 100644
--- a/Documentation/devicetree/bindings/sound/tlv320adcx140.yaml
+++ b/Documentation/devicetree/bindings/sound/tlv320adcx140.yaml
@@ -35,6 +35,7 @@ properties:
I2C addresss of the device can be one of these 0x4c, 0x4d, 0x4e or 0x4f
reset-gpios:
+ maxItems: 1
description: |
GPIO used for hardware reset.
diff --git a/Documentation/devicetree/bindings/sound/wm8962.txt b/Documentation/devicetree/bindings/sound/wm8962.txt
index dcfa9a3369fd..c36c649ddfd0 100644
--- a/Documentation/devicetree/bindings/sound/wm8962.txt
+++ b/Documentation/devicetree/bindings/sound/wm8962.txt
@@ -9,6 +9,9 @@ Required properties:
- reg : the I2C address of the device.
Optional properties:
+
+ - clocks : The clock source of the mclk
+
- spk-mono: This is a boolean property. If present, the SPK_MONO bit
of R51 (Class D Control 2) gets set, indicating that the speaker is
in mono mode.
@@ -27,6 +30,7 @@ Example:
wm8962: codec@1a {
compatible = "wlf,wm8962";
reg = <0x1a>;
+ clocks = <&clks IMX6QDL_CLK_CKO>;
gpio-cfg = <
0x0000 /* 0:Default */
diff --git a/Documentation/devicetree/bindings/sound/zte,tdm.txt b/Documentation/devicetree/bindings/sound/zte,tdm.txt
deleted file mode 100644
index 2a07ca655264..000000000000
--- a/Documentation/devicetree/bindings/sound/zte,tdm.txt
+++ /dev/null
@@ -1,30 +0,0 @@
-ZTE TDM DAI driver
-
-Required properties:
-
-- compatible : should be one of the following.
- * zte,zx296718-tdm
-- reg : physical base address of the controller and length of memory mapped
- region.
-- clocks : Pairs of phandle and specifier referencing the controller's clocks.
-- clock-names: "wclk" for the wclk.
- "pclk" for the pclk.
--#clock-cells: should be 1.
-- zte,tdm-dma-sysctrl : Reference to the sysctrl controller controlling
- the dma. includes:
- phandle of sysctrl.
- register offset in sysctrl for control dma.
- mask of the register that be written to sysctrl.
-
-Example:
-
- tdm: tdm@1487000 {
- compatible = "zte,zx296718-tdm";
- reg = <0x01487000 0x1000>;
- clocks = <&audiocrm AUDIO_TDM_WCLK>, <&audiocrm AUDIO_TDM_PCLK>;
- clock-names = "wclk", "pclk";
- #clock-cells = <1>;
- pinctrl-names = "default";
- pinctrl-0 = <&tdm_global_pin>;
- zte,tdm-dma-sysctrl = <&sysctrl 0x10c 4>;
- };
diff --git a/Documentation/devicetree/bindings/sound/zte,zx-aud96p22.txt b/Documentation/devicetree/bindings/sound/zte,zx-aud96p22.txt
deleted file mode 100644
index 41bb1040eb71..000000000000
--- a/Documentation/devicetree/bindings/sound/zte,zx-aud96p22.txt
+++ /dev/null
@@ -1,24 +0,0 @@
-ZTE ZX AUD96P22 Audio Codec
-
-Required properties:
- - compatible: Must be "zte,zx-aud96p22"
- - #sound-dai-cells: Should be 0
- - reg: I2C bus slave address of AUD96P22
-
-Example:
-
- i2c0: i2c@1486000 {
- compatible = "zte,zx296718-i2c";
- reg = <0x01486000 0x1000>;
- interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
- #address-cells = <1>;
- #size-cells = <0>;
- clocks = <&audiocrm AUDIO_I2C0_WCLK>;
- clock-frequency = <1600000>;
-
- aud96p22: codec@22 {
- compatible = "zte,zx-aud96p22";
- #sound-dai-cells = <0>;
- reg = <0x22>;
- };
- };
diff --git a/Documentation/devicetree/bindings/sound/zte,zx-i2s.txt b/Documentation/devicetree/bindings/sound/zte,zx-i2s.txt
deleted file mode 100644
index 3927251464f0..000000000000
--- a/Documentation/devicetree/bindings/sound/zte,zx-i2s.txt
+++ /dev/null
@@ -1,45 +0,0 @@
-ZTE ZX296702 I2S controller
-
-Required properties:
- - compatible : Must be one of:
- "zte,zx296718-i2s", "zte,zx296702-i2s"
- "zte,zx296702-i2s"
- - reg : Must contain I2S core's registers location and length
- - clocks : Pairs of phandle and specifier referencing the controller's clocks.
- - clock-names: "wclk" for the wclk, "pclk" for the pclk to the I2S interface.
- - dmas: Pairs of phandle and specifier for the DMA channel that is used by
- the core. The core expects two dma channels for transmit.
- - dma-names : Must be "tx" and "rx"
-
-For more details on the 'dma', 'dma-names', 'clock' and 'clock-names' properties
-please check:
- * resource-names.txt
- * clock/clock-bindings.txt
- * dma/dma.txt
-
-Example:
- i2s0: i2s@b005000 {
- #sound-dai-cells = <0>;
- compatible = "zte,zx296718-i2s", "zte,zx296702-i2s";
- reg = <0x0b005000 0x1000>;
- clocks = <&audiocrm AUDIO_I2S0_WCLK>, <&audiocrm AUDIO_I2S0_PCLK>;
- clock-names = "wclk", "pclk";
- interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
- dmas = <&dma 5>, <&dma 6>;
- dma-names = "tx", "rx";
- };
-
- sound {
- compatible = "simple-audio-card";
- simple-audio-card,name = "zx296702_snd";
- simple-audio-card,format = "left_j";
- simple-audio-card,bitclock-master = <&sndcodec>;
- simple-audio-card,frame-master = <&sndcodec>;
- sndcpu: simple-audio-card,cpu {
- sound-dai = <&i2s0>;
- };
-
- sndcodec: simple-audio-card,codec {
- sound-dai = <&acodec>;
- };
- };
diff --git a/Documentation/devicetree/bindings/sound/zte,zx-spdif.txt b/Documentation/devicetree/bindings/sound/zte,zx-spdif.txt
deleted file mode 100644
index 09231d7586b2..000000000000
--- a/Documentation/devicetree/bindings/sound/zte,zx-spdif.txt
+++ /dev/null
@@ -1,27 +0,0 @@
-ZTE ZX296702 SPDIF controller
-
-Required properties:
- - compatible : Must be "zte,zx296702-spdif"
- - reg : Must contain SPDIF core's registers location and length
- - clocks : Pairs of phandle and specifier referencing the controller's clocks.
- - clock-names: "tx" for the clock to the SPDIF interface.
- - dmas: Pairs of phandle and specifier for the DMA channel that is used by
- the core. The core expects one dma channel for transmit.
- - dma-names : Must be "tx"
-
-For more details on the 'dma', 'dma-names', 'clock' and 'clock-names' properties
-please check:
- * resource-names.txt
- * clock/clock-bindings.txt
- * dma/dma.txt
-
-Example:
- spdif0: spdif0@b004000 {
- compatible = "zte,zx296702-spdif";
- reg = <0x0b004000 0x1000>;
- clocks = <&lsp0clk ZX296702_SPDIF0_DIV>;
- clock-names = "tx";
- interrupts = <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
- dmas = <&dma 4>;
- dma-names = "tx";
- };
diff --git a/Documentation/devicetree/bindings/spi/allwinner,sun6i-a31-spi.yaml b/Documentation/devicetree/bindings/spi/allwinner,sun6i-a31-spi.yaml
index 7866a655d81c..908248260afa 100644
--- a/Documentation/devicetree/bindings/spi/allwinner,sun6i-a31-spi.yaml
+++ b/Documentation/devicetree/bindings/spi/allwinner,sun6i-a31-spi.yaml
@@ -25,6 +25,7 @@ properties:
- enum:
- allwinner,sun8i-r40-spi
- allwinner,sun50i-h6-spi
+ - allwinner,sun50i-h616-spi
- const: allwinner,sun8i-h3-spi
reg:
diff --git a/Documentation/devicetree/bindings/mtd/cadence-quadspi.txt b/Documentation/devicetree/bindings/spi/cadence-quadspi.txt
index 945be7d5b236..8ace832a2d80 100644
--- a/Documentation/devicetree/bindings/mtd/cadence-quadspi.txt
+++ b/Documentation/devicetree/bindings/spi/cadence-quadspi.txt
@@ -5,6 +5,7 @@ Required properties:
Generic default - "cdns,qspi-nor".
For TI 66AK2G SoC - "ti,k2g-qspi", "cdns,qspi-nor".
For TI AM654 SoC - "ti,am654-ospi", "cdns,qspi-nor".
+ For Intel LGM SoC - "intel,lgm-qspi", "cdns,qspi-nor".
- reg : Contains two entries, each of which is a tuple consisting of a
physical address and length. The first entry is the address and
length of the controller register set. The second entry is the
diff --git a/Documentation/devicetree/bindings/spi/nvidia,tegra210-quad.yaml b/Documentation/devicetree/bindings/spi/nvidia,tegra210-quad.yaml
new file mode 100644
index 000000000000..35a8045b2c70
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/nvidia,tegra210-quad.yaml
@@ -0,0 +1,117 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/spi/nvidia,tegra210-quad.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Tegra Quad SPI Controller
+
+maintainers:
+ - Thierry Reding <thierry.reding@gmail.com>
+ - Jonathan Hunter <jonathanh@nvidia.com>
+
+allOf:
+ - $ref: "spi-controller.yaml#"
+
+properties:
+ compatible:
+ enum:
+ - nvidia,tegra210-qspi
+ - nvidia,tegra186-qspi
+ - nvidia,tegra194-qspi
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: qspi
+ - const: qspi_out
+
+ clocks:
+ maxItems: 2
+
+ resets:
+ maxItems: 1
+
+ dmas:
+ maxItems: 2
+
+ dma-names:
+ items:
+ - const: rx
+ - const: tx
+
+patternProperties:
+ "@[0-9a-f]+":
+ type: object
+
+ properties:
+ spi-rx-bus-width:
+ enum: [1, 2, 4]
+
+ spi-tx-bus-width:
+ enum: [1, 2, 4]
+
+ nvidia,tx-clk-tap-delay:
+ description:
+ Delays the clock going out to device with this tap value.
+ Tap value varies based on platform design trace lengths from Tegra
+ QSPI to corresponding slave device.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 31
+
+ nvidia,rx-clk-tap-delay:
+ description:
+ Delays the clock coming in from the device with this tap value.
+ Tap value varies based on platform design trace lengths from Tegra
+ QSPI to corresponding slave device.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 255
+
+ required:
+ - reg
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clock-names
+ - clocks
+ - resets
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/tegra210-car.h>
+ #include <dt-bindings/reset/tegra210-car.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ spi@70410000 {
+ compatible = "nvidia,tegra210-qspi";
+ reg = <0x70410000 0x1000>;
+ interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&tegra_car TEGRA210_CLK_QSPI>,
+ <&tegra_car TEGRA210_CLK_QSPI_PM>;
+ clock-names = "qspi", "qspi_out";
+ resets = <&tegra_car 211>;
+ dmas = <&apbdma 5>, <&apbdma 5>;
+ dma-names = "rx", "tx";
+
+ flash@0 {
+ compatible = "spi-nor";
+ reg = <0>;
+ spi-max-frequency = <104000000>;
+ spi-tx-bus-width = <2>;
+ spi-rx-bus-width = <2>;
+ nvidia,tx-clk-tap-delay = <0>;
+ nvidia,rx-clk-tap-delay = <0>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/spi/realtek,rtl-spi.yaml b/Documentation/devicetree/bindings/spi/realtek,rtl-spi.yaml
new file mode 100644
index 000000000000..30a62a211984
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/realtek,rtl-spi.yaml
@@ -0,0 +1,41 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/spi/realtek,rtl-spi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Realtek RTL838x/RTL839x SPI controller
+
+maintainers:
+ - Bert Vermeulen <bert@biot.com>
+ - Birger Koblitz <mail@birger-koblitz.de>
+
+allOf:
+ - $ref: "spi-controller.yaml#"
+
+properties:
+ compatible:
+ oneOf:
+ - const: realtek,rtl8380-spi
+ - const: realtek,rtl8382-spi
+ - const: realtek,rtl8391-spi
+ - const: realtek,rtl8392-spi
+ - const: realtek,rtl8393-spi
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ spi: spi@1200 {
+ compatible = "realtek,rtl8382-spi";
+ reg = <0x1200 0x100>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/spi/renesas,rspi.yaml b/Documentation/devicetree/bindings/spi/renesas,rspi.yaml
index 10e83cb17e8d..8397f60d80a2 100644
--- a/Documentation/devicetree/bindings/spi/renesas,rspi.yaml
+++ b/Documentation/devicetree/bindings/spi/renesas,rspi.yaml
@@ -68,6 +68,8 @@ properties:
maxItems: 1
dmas:
+ minItems: 2
+ maxItems: 4
description:
Must contain a list of pairs of references to DMA specifiers, one for
transmission, and one for reception.
diff --git a/Documentation/devicetree/bindings/spi/renesas,sh-msiof.yaml b/Documentation/devicetree/bindings/spi/renesas,sh-msiof.yaml
index 44c7ddb4b109..b104899205f6 100644
--- a/Documentation/devicetree/bindings/spi/renesas,sh-msiof.yaml
+++ b/Documentation/devicetree/bindings/spi/renesas,sh-msiof.yaml
@@ -47,6 +47,7 @@ properties:
- renesas,msiof-r8a77980 # R-Car V3H
- renesas,msiof-r8a77990 # R-Car E3
- renesas,msiof-r8a77995 # R-Car D3
+ - renesas,msiof-r8a779a0 # R-Car V3U
- const: renesas,rcar-gen3-msiof # generic R-Car Gen3 and RZ/G2
# compatible device
- items:
diff --git a/Documentation/devicetree/bindings/spi/spi-controller.yaml b/Documentation/devicetree/bindings/spi/spi-controller.yaml
index 5f505810104d..06786f1b43d2 100644
--- a/Documentation/devicetree/bindings/spi/spi-controller.yaml
+++ b/Documentation/devicetree/bindings/spi/spi-controller.yaml
@@ -152,8 +152,9 @@ patternProperties:
spi-rx-bus-width:
description:
Bus width to the SPI bus used for read transfers.
+ If 0 is provided, then no RX will be possible on this device.
$ref: /schemas/types.yaml#/definitions/uint32
- enum: [1, 2, 4, 8]
+ enum: [0, 1, 2, 4, 8]
default: 1
spi-rx-delay-us:
@@ -163,8 +164,9 @@ patternProperties:
spi-tx-bus-width:
description:
Bus width to the SPI bus used for write transfers.
+ If 0 is provided, then no TX will be possible on this device.
$ref: /schemas/types.yaml#/definitions/uint32
- enum: [1, 2, 4, 8]
+ enum: [0, 1, 2, 4, 8]
default: 1
spi-tx-delay-us:
diff --git a/Documentation/devicetree/bindings/spi/spi-sirf.txt b/Documentation/devicetree/bindings/spi/spi-sirf.txt
deleted file mode 100644
index ddd78ff68fae..000000000000
--- a/Documentation/devicetree/bindings/spi/spi-sirf.txt
+++ /dev/null
@@ -1,42 +0,0 @@
-* CSR SiRFprimaII Serial Peripheral Interface
-
-Required properties:
-- compatible : Should be "sirf,prima2-spi", "sirf,prima2-usp"
- or "sirf,atlas7-usp"
-- reg : Offset and length of the register set for the device
-- interrupts : Should contain SPI interrupt
-- resets: phandle to the reset controller asserting this device in
- reset
- See ../reset/reset.txt for details.
-- dmas : Must contain an entry for each entry in clock-names.
- See ../dma/dma.txt for details.
-- dma-names : Must include the following entries:
- - rx
- - tx
-- clocks : Must contain an entry for each entry in clock-names.
- See ../clocks/clock-bindings.txt for details.
-
-- #address-cells: Number of cells required to define a chip select
- address on the SPI bus. Should be set to 1.
-- #size-cells: Should be zero.
-
-Optional properties:
-- spi-max-frequency: Specifies maximum SPI clock frequency,
- Units - Hz. Definition as per
- Documentation/devicetree/bindings/spi/spi-bus.txt
-- cs-gpios: should specify GPIOs used for chipselects.
-
-Example:
-
-spi0: spi@b00d0000 {
- compatible = "sirf,prima2-spi";
- reg = <0xb00d0000 0x10000>;
- interrupts = <15>;
- dmas = <&dmac1 9>,
- <&dmac1 4>;
- dma-names = "rx", "tx";
- #address-cells = <1>;
- #size-cells = <0>;
- clocks = <&clks 19>;
- resets = <&rstc 26>;
-};
diff --git a/Documentation/devicetree/bindings/spi/spi-zynq-qspi.txt b/Documentation/devicetree/bindings/spi/spi-zynq-qspi.txt
deleted file mode 100644
index 16b734ad3102..000000000000
--- a/Documentation/devicetree/bindings/spi/spi-zynq-qspi.txt
+++ /dev/null
@@ -1,25 +0,0 @@
-Xilinx Zynq QSPI controller Device Tree Bindings
--------------------------------------------------------------------
-
-Required properties:
-- compatible : Should be "xlnx,zynq-qspi-1.0".
-- reg : Physical base address and size of QSPI registers map.
-- interrupts : Property with a value describing the interrupt
- number.
-- clock-names : List of input clock names - "ref_clk", "pclk"
- (See clock bindings for details).
-- clocks : Clock phandles (see clock bindings for details).
-
-Optional properties:
-- num-cs : Number of chip selects used.
-
-Example:
- qspi: spi@e000d000 {
- compatible = "xlnx,zynq-qspi-1.0";
- reg = <0xe000d000 0x1000>;
- interrupt-parent = <&intc>;
- interrupts = <0 19 4>;
- clock-names = "ref_clk", "pclk";
- clocks = <&clkc 10>, <&clkc 43>;
- num-cs = <1>;
- };
diff --git a/Documentation/devicetree/bindings/spi/xlnx,zynq-qspi.yaml b/Documentation/devicetree/bindings/spi/xlnx,zynq-qspi.yaml
new file mode 100644
index 000000000000..1f1c40a9f320
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/xlnx,zynq-qspi.yaml
@@ -0,0 +1,59 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/spi/xlnx,zynq-qspi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Xilinx Zynq QSPI controller
+
+description:
+ The Xilinx Zynq QSPI controller is used to access multi-bit serial flash
+ memory devices.
+
+allOf:
+ - $ref: "spi-controller.yaml#"
+
+maintainers:
+ - Michal Simek <michal.simek@xilinx.com>
+
+# Everything else is described in the common file
+properties:
+ compatible:
+ const: xlnx,zynq-qspi-1.0
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: reference clock
+ - description: peripheral clock
+
+ clock-names:
+ items:
+ - const: ref_clk
+ - const: pclk
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ spi@e000d000 {
+ compatible = "xlnx,zynq-qspi-1.0";
+ reg = <0xe000d000 0x1000>;
+ interrupt-parent = <&intc>;
+ interrupts = <0 19 4>;
+ clock-names = "ref_clk", "pclk";
+ clocks = <&clkc 10>, <&clkc 43>;
+ num-cs = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/sram/allwinner,sun4i-a10-system-control.yaml b/Documentation/devicetree/bindings/sram/allwinner,sun4i-a10-system-control.yaml
index b66a07e21d1e..1c426c211e36 100644
--- a/Documentation/devicetree/bindings/sram/allwinner,sun4i-a10-system-control.yaml
+++ b/Documentation/devicetree/bindings/sram/allwinner,sun4i-a10-system-control.yaml
@@ -49,6 +49,7 @@ properties:
- items:
- const: allwinner,suniv-f1c100s-system-control
- const: allwinner,sun4i-a10-system-control
+ - const: allwinner,sun50i-h616-system-control
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/sram/sram.yaml b/Documentation/devicetree/bindings/sram/sram.yaml
index 19d116ff9ddc..c1a5afa73cfe 100644
--- a/Documentation/devicetree/bindings/sram/sram.yaml
+++ b/Documentation/devicetree/bindings/sram/sram.yaml
@@ -35,6 +35,7 @@ properties:
maxItems: 1
clocks:
+ maxItems: 1
description:
A list of phandle and clock specifier pair that controls the single
SRAM clock.
@@ -46,6 +47,7 @@ properties:
const: 1
ranges:
+ maxItems: 1
description:
Should translate from local addresses within the sram to bus addresses.
@@ -72,6 +74,8 @@ patternProperties:
- allwinner,sun4i-a10-sram-d
- allwinner,sun9i-a80-smp-sram
- allwinner,sun50i-a64-sram-c
+ - amlogic,meson8-ao-arc-sram
+ - amlogic,meson8b-ao-arc-sram
- amlogic,meson8-smp-sram
- amlogic,meson8b-smp-sram
- amlogic,meson-gxbb-scp-shmem
diff --git a/Documentation/devicetree/bindings/thermal/allwinner,sun8i-a83t-ths.yaml b/Documentation/devicetree/bindings/thermal/allwinner,sun8i-a83t-ths.yaml
index 31edd051295a..bf97d1fb33e7 100644
--- a/Documentation/devicetree/bindings/thermal/allwinner,sun8i-a83t-ths.yaml
+++ b/Documentation/devicetree/bindings/thermal/allwinner,sun8i-a83t-ths.yaml
@@ -103,12 +103,12 @@ allOf:
compatible:
contains:
enum:
- - const: allwinner,sun8i-h3-ths
- - const: allwinner,sun8i-r40-ths
- - const: allwinner,sun50i-a64-ths
- - const: allwinner,sun50i-a100-ths
- - const: allwinner,sun50i-h5-ths
- - const: allwinner,sun50i-h6-ths
+ - allwinner,sun8i-h3-ths
+ - allwinner,sun8i-r40-ths
+ - allwinner,sun50i-a64-ths
+ - allwinner,sun50i-a100-ths
+ - allwinner,sun50i-h5-ths
+ - allwinner,sun50i-h6-ths
then:
required:
diff --git a/Documentation/devicetree/bindings/thermal/qcom-spmi-adc-tm5.yaml b/Documentation/devicetree/bindings/thermal/qcom-spmi-adc-tm5.yaml
new file mode 100644
index 000000000000..7cd364430573
--- /dev/null
+++ b/Documentation/devicetree/bindings/thermal/qcom-spmi-adc-tm5.yaml
@@ -0,0 +1,153 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/thermal/qcom-spmi-adc-tm5.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm's SPMI PMIC ADC Thermal Monitoring
+maintainers:
+ - Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+
+properties:
+ compatible:
+ const: qcom,spmi-adc-tm5
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ "#thermal-sensor-cells":
+ const: 1
+ description:
+ Number of cells required to uniquely identify the thermal sensors. Since
+ we have multiple sensors this is set to 1
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+ qcom,avg-samples:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Number of samples to be used for measurement.
+ enum:
+ - 1
+ - 2
+ - 4
+ - 8
+ - 16
+ default: 1
+
+ qcom,decimation:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: This parameter is used to decrease ADC sampling rate.
+ Quicker measurements can be made by reducing decimation ratio.
+ enum:
+ - 250
+ - 420
+ - 840
+ default: 840
+
+patternProperties:
+ "^([-a-z0-9]*)@[0-7]$":
+ type: object
+ description:
+ Represent one thermal sensor.
+
+ properties:
+ reg:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Specify the sensor channel. There are 8 channels in PMIC5's ADC TM
+ minimum: 0
+ maximum: 7
+
+ io-channels:
+ description:
+ From common IIO binding. Used to pipe PMIC ADC channel to thermal monitor
+
+ qcom,ratiometric:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ Channel calibration type.
+ If this property is specified VADC will use the VDD reference
+ (1.875V) and GND for channel calibration. If property is not found,
+ channel will be calibrated with 0V and 1.25V reference channels,
+ also known as absolute calibration.
+
+ qcom,hw-settle-time-us:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Time between AMUX getting configured and the ADC starting conversion.
+ enum: [15, 100, 200, 300, 400, 500, 600, 700, 1000, 2000, 4000, 8000, 16000, 32000, 64000, 128000]
+
+ qcom,pre-scaling:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ description: Used for scaling the channel input signal before the
+ signal is fed to VADC. The configuration for this node is to know the
+ pre-determined ratio and use it for post scaling. It is a pair of
+ integers, denoting the numerator and denominator of the fraction by
+ which input signal is multiplied. For example, <1 3> indicates the
+ signal is scaled down to 1/3 of its value before ADC measurement. If
+ property is not found default value depending on chip will be used.
+ items:
+ - const: 1
+ - enum: [ 1, 3, 4, 6, 20, 8, 10 ]
+
+ required:
+ - reg
+ - io-channels
+
+ additionalProperties:
+ false
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - "#address-cells"
+ - "#size-cells"
+ - "#thermal-sensor-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/iio/qcom,spmi-vadc.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ spmi_bus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pm8150b_adc: adc@3100 {
+ reg = <0x3100>;
+ compatible = "qcom,spmi-adc5";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #io-channel-cells = <1>;
+
+ /* Other propreties are omitted */
+ conn-therm@4f {
+ reg = <ADC5_AMUX_THM3_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time = <200>;
+ };
+ };
+
+ pm8150b_adc_tm: adc-tm@3500 {
+ compatible = "qcom,spmi-adc-tm5";
+ reg = <0x3500>;
+ interrupts = <0x2 0x35 0x0 IRQ_TYPE_EDGE_RISING>;
+ #thermal-sensor-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ conn-therm@0 {
+ reg = <0>;
+ io-channels = <&pm8150b_adc ADC5_AMUX_THM3_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time-us = <200>;
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/thermal/tango-thermal.txt b/Documentation/devicetree/bindings/thermal/tango-thermal.txt
deleted file mode 100644
index 2c918d742867..000000000000
--- a/Documentation/devicetree/bindings/thermal/tango-thermal.txt
+++ /dev/null
@@ -1,17 +0,0 @@
-* Tango Thermal
-
-The SMP8758 SoC includes 3 instances of this temperature sensor
-(in the CPU, video decoder, and PCIe controller).
-
-Required properties:
-- #thermal-sensor-cells: Should be 0 (see Documentation/devicetree/bindings/thermal/thermal-sensor.yaml)
-- compatible: "sigma,smp8758-thermal"
-- reg: Address range of the thermal registers
-
-Example:
-
- cpu_temp: thermal@920100 {
- #thermal-sensor-cells = <0>;
- compatible = "sigma,smp8758-thermal";
- reg = <0x920100 12>;
- };
diff --git a/Documentation/devicetree/bindings/thermal/zx2967-thermal.txt b/Documentation/devicetree/bindings/thermal/zx2967-thermal.txt
deleted file mode 100644
index 3dc1c6bf0478..000000000000
--- a/Documentation/devicetree/bindings/thermal/zx2967-thermal.txt
+++ /dev/null
@@ -1,116 +0,0 @@
-* ZTE zx2967 family Thermal
-
-Required Properties:
-- compatible: should be one of the following.
- * zte,zx296718-thermal
-- reg: physical base address of the controller and length of memory mapped
- region.
-- clocks : Pairs of phandle and specifier referencing the controller's clocks.
-- clock-names: "topcrm" for the topcrm clock.
- "apb" for the apb clock.
-- #thermal-sensor-cells: must be 0.
-
-Please note: slope coefficient defined in thermal-zones section need to be
-multiplied by 1000.
-
-Example for tempsensor:
-
- tempsensor: tempsensor@148a000 {
- compatible = "zte,zx296718-thermal";
- reg = <0x0148a000 0x20>;
- clocks = <&topcrm TEMPSENSOR_GATE>, <&audiocrm AUDIO_TS_PCLK>;
- clock-names = "topcrm", "apb";
- #thermal-sensor-cells = <0>;
- };
-
-Example for cooling device:
-
- cooling_dev: cooling_dev {
- cluster0_cooling_dev: cluster0-cooling-dev {
- #cooling-cells = <2>;
- cpumask = <0xf>;
- capacitance = <1500>;
- };
-
- cluster1_cooling_dev: cluster1-cooling-dev {
- #cooling-cells = <2>;
- cpumask = <0x30>;
- capacitance = <2000>;
- };
- };
-
-Example for thermal zones:
-
- thermal-zones {
- zx296718_thermal: zx296718_thermal {
- polling-delay-passive = <500>;
- polling-delay = <1000>;
- sustainable-power = <6500>;
-
- thermal-sensors = <&tempsensor 0>;
- /*
- * slope need to be multiplied by 1000.
- */
- coefficients = <1951 (-922)>;
-
- trips {
- trip0: switch_on_temperature {
- temperature = <90000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
- trip1: desired_temperature {
- temperature = <100000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
- crit: critical_temperature {
- temperature = <110000>;
- hysteresis = <2000>;
- type = "critical";
- };
- };
-
- cooling-maps {
- map0 {
- trip = <&trip0>;
- cooling-device = <&gpu 2 5>;
- };
-
- map1 {
- trip = <&trip0>;
- cooling-device = <&cluster0_cooling_dev 1 2>;
- };
-
- map2 {
- trip = <&trip1>;
- cooling-device = <&cluster0_cooling_dev 1 2>;
- };
-
- map3 {
- trip = <&crit>;
- cooling-device = <&cluster0_cooling_dev 1 2>;
- };
-
- map4 {
- trip = <&trip0>;
- cooling-device = <&cluster1_cooling_dev 1 2>;
- contribution = <9000>;
- };
-
- map5 {
- trip = <&trip1>;
- cooling-device = <&cluster1_cooling_dev 1 2>;
- contribution = <4096>;
- };
-
- map6 {
- trip = <&crit>;
- cooling-device = <&cluster1_cooling_dev 1 2>;
- contribution = <4096>;
- };
- };
- };
- };
diff --git a/Documentation/devicetree/bindings/timer/allwinner,sun4i-a10-timer.yaml b/Documentation/devicetree/bindings/timer/allwinner,sun4i-a10-timer.yaml
index d918cee100ac..1c7cf32e7ac2 100644
--- a/Documentation/devicetree/bindings/timer/allwinner,sun4i-a10-timer.yaml
+++ b/Documentation/devicetree/bindings/timer/allwinner,sun4i-a10-timer.yaml
@@ -22,6 +22,8 @@ properties:
maxItems: 1
interrupts:
+ minItems: 2
+ maxItems: 6
description:
List of timers interrupts
diff --git a/Documentation/devicetree/bindings/timer/allwinner,sun5i-a13-hstimer.yaml b/Documentation/devicetree/bindings/timer/allwinner,sun5i-a13-hstimer.yaml
index 40fc4bcb3145..b6a6d03a08b2 100644
--- a/Documentation/devicetree/bindings/timer/allwinner,sun5i-a13-hstimer.yaml
+++ b/Documentation/devicetree/bindings/timer/allwinner,sun5i-a13-hstimer.yaml
@@ -46,8 +46,7 @@ required:
if:
properties:
compatible:
- items:
- const: allwinner,sun5i-a13-hstimer
+ const: allwinner,sun5i-a13-hstimer
then:
properties:
diff --git a/Documentation/devicetree/bindings/timer/intel,ixp4xx-timer.yaml b/Documentation/devicetree/bindings/timer/intel,ixp4xx-timer.yaml
index 1a721d8af67a..a8de99b0c0f9 100644
--- a/Documentation/devicetree/bindings/timer/intel,ixp4xx-timer.yaml
+++ b/Documentation/devicetree/bindings/timer/intel,ixp4xx-timer.yaml
@@ -18,7 +18,7 @@ properties:
- const: intel,ixp4xx-timer
reg:
- description: Should contain registers location and length
+ maxItems: 1
interrupts:
minItems: 1
diff --git a/Documentation/devicetree/bindings/timer/nuvoton,npcm7xx-timer.txt b/Documentation/devicetree/bindings/timer/nuvoton,npcm7xx-timer.txt
index ea22dfe485be..97258f1a1505 100644
--- a/Documentation/devicetree/bindings/timer/nuvoton,npcm7xx-timer.txt
+++ b/Documentation/devicetree/bindings/timer/nuvoton,npcm7xx-timer.txt
@@ -6,8 +6,7 @@ timer counters.
Required properties:
- compatible : "nuvoton,npcm750-timer" for Poleg NPCM750.
- reg : Offset and length of the register set for the device.
-- interrupts : Contain the timer interrupt with flags for
- falling edge.
+- interrupts : Contain the timer interrupt of timer 0.
- clocks : phandle of timer reference clock (usually a 25 MHz clock).
Example:
diff --git a/Documentation/devicetree/bindings/timer/sifive,clint.yaml b/Documentation/devicetree/bindings/timer/sifive,clint.yaml
index 2a0e9cd9fbcf..a35952f48742 100644
--- a/Documentation/devicetree/bindings/timer/sifive,clint.yaml
+++ b/Documentation/devicetree/bindings/timer/sifive,clint.yaml
@@ -23,15 +23,19 @@ description:
properties:
compatible:
items:
- - const: sifive,fu540-c000-clint
+ - enum:
+ - sifive,fu540-c000-clint
+ - canaan,k210-clint
- const: sifive,clint0
description:
- Should be "sifive,<chip>-clint" and "sifive,clint<version>".
+ Should be "<vendor>,<chip>-clint" and "sifive,clint<version>".
Supported compatible strings are -
"sifive,fu540-c000-clint" for the SiFive CLINT v0 as integrated
- onto the SiFive FU540 chip, and "sifive,clint0" for the SiFive
- CLINT v0 IP block with no chip integration tweaks.
+ onto the SiFive FU540 chip, "canaan,k210-clint" for the SiFive
+ CLINT v0 as integrated onto the Canaan Kendryte K210 chip, and
+ "sifive,clint0" for the SiFive CLINT v0 IP block with no chip
+ integration tweaks.
Please refer to sifive-blocks-ip-versioning.txt for details
reg:
diff --git a/Documentation/devicetree/bindings/timer/snps,dw-apb-timer.yaml b/Documentation/devicetree/bindings/timer/snps,dw-apb-timer.yaml
index d65faf289a83..d33c9205a909 100644
--- a/Documentation/devicetree/bindings/timer/snps,dw-apb-timer.yaml
+++ b/Documentation/devicetree/bindings/timer/snps,dw-apb-timer.yaml
@@ -24,6 +24,9 @@ properties:
interrupts:
maxItems: 1
+ resets:
+ maxItems: 1
+
clocks:
minItems: 1
items:
diff --git a/Documentation/devicetree/bindings/timer/stericsson-u300-apptimer.txt b/Documentation/devicetree/bindings/timer/stericsson-u300-apptimer.txt
deleted file mode 100644
index 9499bc8ee9e3..000000000000
--- a/Documentation/devicetree/bindings/timer/stericsson-u300-apptimer.txt
+++ /dev/null
@@ -1,18 +0,0 @@
-ST-Ericsson U300 apptimer
-
-Required properties:
-
-- compatible : should be "stericsson,u300-apptimer"
-- reg : Specifies base physical address and size of the registers.
-- interrupts : A list of 4 interrupts; one for each subtimer. These
- are, in order: OS (operating system), DD (device driver) both
- adopted for EPOC/Symbian with two specific IRQs for these tasks,
- then GP1 and GP2, which are general-purpose timers.
-
-Example:
-
-timer {
- compatible = "stericsson,u300-apptimer";
- reg = <0xc0014000 0x1000>;
- interrupts = <24 25 26 27>;
-};
diff --git a/Documentation/devicetree/bindings/timer/ti,c64x+timer64.txt b/Documentation/devicetree/bindings/timer/ti,c64x+timer64.txt
deleted file mode 100644
index d96c1e283e73..000000000000
--- a/Documentation/devicetree/bindings/timer/ti,c64x+timer64.txt
+++ /dev/null
@@ -1,25 +0,0 @@
-Timer64
--------
-
-The timer64 node describes C6X event timers.
-
-Required properties:
-
-- compatible: must be "ti,c64x+timer64"
-- reg: base address and size of register region
-- interrupts: interrupt id
-
-Optional properties:
-
-- ti,dscr-dev-enable: Device ID used to enable timer IP through DSCR interface.
-
-- ti,core-mask: on multi-core SoCs, bitmask of cores allowed to use this timer.
-
-Example:
- timer0: timer@25e0000 {
- compatible = "ti,c64x+timer64";
- ti,core-mask = < 0x01 >;
- reg = <0x25e0000 0x40>;
- interrupt-parent = <&megamod_pic>;
- interrupts = < 16 >;
- };
diff --git a/Documentation/devicetree/bindings/trivial-devices.yaml b/Documentation/devicetree/bindings/trivial-devices.yaml
index bdc2dc318178..a327130d1faa 100644
--- a/Documentation/devicetree/bindings/trivial-devices.yaml
+++ b/Documentation/devicetree/bindings/trivial-devices.yaml
@@ -148,15 +148,13 @@ properties:
- maxim,max31730
# mCube 3-axis 8-bit digital accelerometer
- mcube,mc3230
- # MEMSIC magnetometer
- - memsic,mmc35240
- # MEMSIC 2-axis 8-bit digital accelerometer
- - memsic,mxc6225
# Measurement Specialities I2C temperature and humidity sensor
- meas,htu21
# Measurement Specialities I2C pressure and temperature sensor
- meas,ms5637
# Measurement Specialities I2C pressure and temperature sensor
+ - meas,ms5803
+ # Measurement Specialities I2C pressure and temperature sensor
- meas,ms5805
# Measurement Specialities I2C pressure and temperature sensor
- meas,ms5837
@@ -166,6 +164,10 @@ properties:
- meas,ms8607-temppressure
# Measurement Specialties temperature sensor
- meas,tsys01
+ # MEMSIC magnetometer
+ - memsic,mmc35240
+ # MEMSIC 2-axis 8-bit digital accelerometer
+ - memsic,mxc6225
# Microchip differential I2C ADC, 1 Channel, 18 bit
- microchip,mcp3421
# Microchip differential I2C ADC, 2 Channel, 18 bit
diff --git a/Documentation/devicetree/bindings/usb/allwinner,sun4i-a10-musb.yaml b/Documentation/devicetree/bindings/usb/allwinner,sun4i-a10-musb.yaml
index d9207bf9d894..0f520f17735e 100644
--- a/Documentation/devicetree/bindings/usb/allwinner,sun4i-a10-musb.yaml
+++ b/Documentation/devicetree/bindings/usb/allwinner,sun4i-a10-musb.yaml
@@ -39,7 +39,7 @@ properties:
maxItems: 1
phys:
- description: PHY specifier for the OTG PHY
+ maxItems: 1
phy-names:
const: usb
diff --git a/Documentation/devicetree/bindings/usb/amlogic,meson-g12a-usb-ctrl.yaml b/Documentation/devicetree/bindings/usb/amlogic,meson-g12a-usb-ctrl.yaml
index c0058332b967..e349fa5de606 100644
--- a/Documentation/devicetree/bindings/usb/amlogic,meson-g12a-usb-ctrl.yaml
+++ b/Documentation/devicetree/bindings/usb/amlogic,meson-g12a-usb-ctrl.yaml
@@ -79,7 +79,9 @@ properties:
patternProperties:
"^usb@[0-9a-f]+$":
- type: object
+ oneOf:
+ - $ref: dwc2.yaml#
+ - $ref: snps,dwc3.yaml#
additionalProperties: false
@@ -229,6 +231,6 @@ examples:
interrupts = <30>;
dr_mode = "host";
snps,dis_u2_susphy_quirk;
- snps,quirk-frame-length-adjustment;
+ snps,quirk-frame-length-adjustment = <0x20>;
};
};
diff --git a/Documentation/devicetree/bindings/usb/brcm,usb-pinmap.yaml b/Documentation/devicetree/bindings/usb/brcm,usb-pinmap.yaml
index ffa148b9eaa8..d4618d15ecc1 100644
--- a/Documentation/devicetree/bindings/usb/brcm,usb-pinmap.yaml
+++ b/Documentation/devicetree/bindings/usb/brcm,usb-pinmap.yaml
@@ -22,6 +22,8 @@ properties:
description: Interrupt for signals mirrored to out-gpios.
in-gpios:
+ minItems: 1
+ maxItems: 2
description: Array of one or two GPIO pins used for input signals.
brcm,in-functions:
@@ -33,6 +35,7 @@ properties:
description: Array of enable and mask pairs, one per gpio in-gpios.
out-gpios:
+ maxItems: 1
description: Array of one GPIO pin used for output signals.
brcm,out-functions:
diff --git a/Documentation/devicetree/bindings/usb/dwc3-st.txt b/Documentation/devicetree/bindings/usb/dwc3-st.txt
index df0e02e1ee43..bf73de0d5b4a 100644
--- a/Documentation/devicetree/bindings/usb/dwc3-st.txt
+++ b/Documentation/devicetree/bindings/usb/dwc3-st.txt
@@ -31,13 +31,13 @@ See: Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
Sub-nodes:
The dwc3 core should be added as subnode to ST DWC3 glue as shown in the
example below. The DT binding details of dwc3 can be found in:
-Documentation/devicetree/bindings/usb/dwc3.txt
+Documentation/devicetree/bindings/usb/snps,dwc3.yaml
NB: The dr_mode property described in [1] is NOT optional for this driver, as the default value
is "otg", which isn't supported by this SoC. Valid dr_mode values for dwc3-st are either "host"
or "device".
-[1] Documentation/devicetree/bindings/usb/generic.txt
+[1] Documentation/devicetree/bindings/usb/usb-drd.yaml
Example:
diff --git a/Documentation/devicetree/bindings/usb/dwc3-xilinx.txt b/Documentation/devicetree/bindings/usb/dwc3-xilinx.txt
index 4aae5b2cef56..a668f43bedf5 100644
--- a/Documentation/devicetree/bindings/usb/dwc3-xilinx.txt
+++ b/Documentation/devicetree/bindings/usb/dwc3-xilinx.txt
@@ -19,7 +19,7 @@ Example device node:
#address-cells = <0x2>;
#size-cells = <0x1>;
compatible = "xlnx,zynqmp-dwc3";
- clock-names = "bus_clk" "ref_clk";
+ clock-names = "bus_clk", "ref_clk";
clocks = <&clk125>, <&clk125>;
ranges;
diff --git a/Documentation/devicetree/bindings/usb/dwc3.txt b/Documentation/devicetree/bindings/usb/dwc3.txt
deleted file mode 100644
index 1aae2b6160c1..000000000000
--- a/Documentation/devicetree/bindings/usb/dwc3.txt
+++ /dev/null
@@ -1,128 +0,0 @@
-synopsys DWC3 CORE
-
-DWC3- USB3 CONTROLLER. Complies to the generic USB binding properties
- as described in 'usb/generic.txt'
-
-Required properties:
- - compatible: must be "snps,dwc3"
- - reg : Address and length of the register set for the device
- - interrupts: Interrupts used by the dwc3 controller.
- - clock-names: list of clock names. Ideally should be "ref",
- "bus_early", "suspend" but may be less or more.
- - clocks: list of phandle and clock specifier pairs corresponding to
- entries in the clock-names property.
-
-Exception for clocks:
- clocks are optional if the parent node (i.e. glue-layer) is compatible to
- one of the following:
- "cavium,octeon-7130-usb-uctl"
- "qcom,dwc3"
- "samsung,exynos5250-dwusb3"
- "samsung,exynos5433-dwusb3"
- "samsung,exynos7-dwusb3"
- "sprd,sc9860-dwc3"
- "st,stih407-dwc3"
- "ti,am437x-dwc3"
- "ti,dwc3"
- "ti,keystone-dwc3"
- "rockchip,rk3399-dwc3"
- "xlnx,zynqmp-dwc3"
-
-Optional properties:
- - usb-phy : array of phandle for the PHY device. The first element
- in the array is expected to be a handle to the USB2/HS PHY and
- the second element is expected to be a handle to the USB3/SS PHY
- - phys: from the *Generic PHY* bindings
- - phy-names: from the *Generic PHY* bindings; supported names are "usb2-phy"
- or "usb3-phy".
- - resets: set of phandle and reset specifier pairs
- - snps,usb2-lpm-disable: indicate if we don't want to enable USB2 HW LPM
- - snps,usb3_lpm_capable: determines if platform is USB3 LPM capable
- - snps,dis-start-transfer-quirk: when set, disable isoc START TRANSFER command
- failure SW work-around for DWC_usb31 version 1.70a-ea06
- and prior.
- - snps,disable_scramble_quirk: true when SW should disable data scrambling.
- Only really useful for FPGA builds.
- - snps,has-lpm-erratum: true when DWC3 was configured with LPM Erratum enabled
- - snps,lpm-nyet-threshold: LPM NYET threshold
- - snps,u2exit_lfps_quirk: set if we want to enable u2exit lfps quirk
- - snps,u2ss_inp3_quirk: set if we enable P3 OK for U2/SS Inactive quirk
- - snps,req_p1p2p3_quirk: when set, the core will always request for
- P1/P2/P3 transition sequence.
- - snps,del_p1p2p3_quirk: when set core will delay P1/P2/P3 until a certain
- amount of 8B10B errors occur.
- - snps,del_phy_power_chg_quirk: when set core will delay PHY power change
- from P0 to P1/P2/P3.
- - snps,lfps_filter_quirk: when set core will filter LFPS reception.
- - snps,rx_detect_poll_quirk: when set core will disable a 400us delay to start
- Polling LFPS after RX.Detect.
- - snps,tx_de_emphasis_quirk: when set core will set Tx de-emphasis value.
- - snps,tx_de_emphasis: the value driven to the PHY is controlled by the
- LTSSM during USB3 Compliance mode.
- - snps,dis_u3_susphy_quirk: when set core will disable USB3 suspend phy.
- - snps,dis_u2_susphy_quirk: when set core will disable USB2 suspend phy.
- - snps,dis_enblslpm_quirk: when set clears the enblslpm in GUSB2PHYCFG,
- disabling the suspend signal to the PHY.
- - snps,dis-u1-entry-quirk: set if link entering into U1 needs to be disabled.
- - snps,dis-u2-entry-quirk: set if link entering into U2 needs to be disabled.
- - snps,dis_rxdet_inp3_quirk: when set core will disable receiver detection
- in PHY P3 power state.
- - snps,dis-u2-freeclk-exists-quirk: when set, clear the u2_freeclk_exists
- in GUSB2PHYCFG, specify that USB2 PHY doesn't provide
- a free-running PHY clock.
- - snps,dis-del-phy-power-chg-quirk: when set core will change PHY power
- from P0 to P1/P2/P3 without delay.
- - snps,dis-tx-ipgap-linecheck-quirk: when set, disable u2mac linestate check
- during HS transmit.
- - snps,parkmode-disable-ss-quirk: when set, all SuperSpeed bus instances in
- park mode are disabled.
- - snps,dis_metastability_quirk: when set, disable metastability workaround.
- CAUTION: use only if you are absolutely sure of it.
- - snps,dis-split-quirk: when set, change the way URBs are handled by the
- driver. Needed to avoid -EPROTO errors with usbhid
- on some devices (Hikey 970).
- - snps,is-utmi-l1-suspend: true when DWC3 asserts output signal
- utmi_l1_suspend_n, false when asserts utmi_sleep_n
- - snps,hird-threshold: HIRD threshold
- - snps,hsphy_interface: High-Speed PHY interface selection between "utmi" for
- UTMI+ and "ulpi" for ULPI when the DWC_USB3_HSPHY_INTERFACE has value 3.
- - snps,quirk-frame-length-adjustment: Value for GFLADJ_30MHZ field of GFLADJ
- register for post-silicon frame length adjustment when the
- fladj_30mhz_sdbnd signal is invalid or incorrect.
- - snps,rx-thr-num-pkt-prd: periodic ESS RX packet threshold count - host mode
- only. Set this and rx-max-burst-prd to a valid,
- non-zero value 1-16 (DWC_usb31 programming guide
- section 1.2.4) to enable periodic ESS RX threshold.
- - snps,rx-max-burst-prd: max periodic ESS RX burst size - host mode only. Set
- this and rx-thr-num-pkt-prd to a valid, non-zero value
- 1-16 (DWC_usb31 programming guide section 1.2.4) to
- enable periodic ESS RX threshold.
- - snps,tx-thr-num-pkt-prd: periodic ESS TX packet threshold count - host mode
- only. Set this and tx-max-burst-prd to a valid,
- non-zero value 1-16 (DWC_usb31 programming guide
- section 1.2.3) to enable periodic ESS TX threshold.
- - snps,tx-max-burst-prd: max periodic ESS TX burst size - host mode only. Set
- this and tx-thr-num-pkt-prd to a valid, non-zero value
- 1-16 (DWC_usb31 programming guide section 1.2.3) to
- enable periodic ESS TX threshold.
-
- - <DEPRECATED> tx-fifo-resize: determines if the FIFO *has* to be reallocated.
- - snps,incr-burst-type-adjustment: Value for INCR burst type of GSBUSCFG0
- register, undefined length INCR burst type enable and INCRx type.
- When just one value, which means INCRX burst mode enabled. When
- more than one value, which means undefined length INCR burst type
- enabled. The values can be 1, 4, 8, 16, 32, 64, 128 and 256.
-
- - in addition all properties from usb-xhci.txt from the current directory are
- supported as well
-
-
-This is usually a subnode to DWC3 glue to which it is connected.
-
-dwc3@4a030000 {
- compatible = "snps,dwc3";
- reg = <0x4a030000 0xcfff>;
- interrupts = <0 92 4>
- usb-phy = <&usb2_phy>, <&usb3,phy>;
- snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
-};
diff --git a/Documentation/devicetree/bindings/usb/exynos-usb.txt b/Documentation/devicetree/bindings/usb/exynos-usb.txt
index 6aae1544f240..f7ae79825d7d 100644
--- a/Documentation/devicetree/bindings/usb/exynos-usb.txt
+++ b/Documentation/devicetree/bindings/usb/exynos-usb.txt
@@ -93,7 +93,7 @@ Sub-nodes:
The dwc3 core should be added as subnode to Exynos dwc3 glue.
- dwc3 :
The binding details of dwc3 can be found in:
- Documentation/devicetree/bindings/usb/dwc3.txt
+ Documentation/devicetree/bindings/usb/snps,dwc3.yaml
Example:
usb@12000000 {
diff --git a/Documentation/devicetree/bindings/usb/fsl,imx8mp-dwc3.yaml b/Documentation/devicetree/bindings/usb/fsl,imx8mp-dwc3.yaml
new file mode 100644
index 000000000000..cb4c6f6d3a33
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/fsl,imx8mp-dwc3.yaml
@@ -0,0 +1,105 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright (c) 2020 NXP
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/usb/fsl,imx8mp-dwc3.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NXP iMX8MP Soc USB Controller
+
+maintainers:
+ - Li Jun <jun.li@nxp.com>
+
+properties:
+ compatible:
+ const: fsl,imx8mp-dwc3
+
+ reg:
+ maxItems: 1
+ description: Address and length of the register set for the wrapper of
+ dwc3 core on the SOC.
+
+ "#address-cells":
+ enum: [ 1, 2 ]
+
+ "#size-cells":
+ enum: [ 1, 2 ]
+
+ dma-ranges:
+ description:
+ See section 2.3.9 of the DeviceTree Specification.
+
+ ranges: true
+
+ interrupts:
+ maxItems: 1
+ description: The interrupt that is asserted when a wakeup event is
+ received.
+
+ clocks:
+ description:
+ A list of phandle and clock-specifier pairs for the clocks
+ listed in clock-names.
+ items:
+ - description: system hsio root clock.
+ - description: suspend clock, used for usb wakeup logic.
+
+ clock-names:
+ items:
+ - const: hsio
+ - const: suspend
+
+# Required child node:
+
+patternProperties:
+ "^dwc3@[0-9a-f]+$":
+ type: object
+ description:
+ A child node must exist to represent the core DWC3 IP block
+ The content of the node is defined in dwc3.txt.
+
+required:
+ - compatible
+ - reg
+ - "#address-cells"
+ - "#size-cells"
+ - dma-ranges
+ - ranges
+ - clocks
+ - clock-names
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/imx8mp-clock.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ usb3_0: usb@32f10100 {
+ compatible = "fsl,imx8mp-dwc3";
+ reg = <0x32f10100 0x8>;
+ clocks = <&clk IMX8MP_CLK_HSIO_ROOT>,
+ <&clk IMX8MP_CLK_USB_ROOT>;
+ clock-names = "hsio", "suspend";
+ interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ dma-ranges = <0x40000000 0x40000000 0xc0000000>;
+ ranges;
+
+ dwc3@38100000 {
+ compatible = "snps,dwc3";
+ reg = <0x38100000 0x10000>;
+ clocks = <&clk IMX8MP_CLK_HSIO_AXI>,
+ <&clk IMX8MP_CLK_USB_CORE_REF>,
+ <&clk IMX8MP_CLK_USB_ROOT>;
+ clock-names = "bus_early", "ref", "suspend";
+ assigned-clocks = <&clk IMX8MP_CLK_HSIO_AXI>;
+ assigned-clock-parents = <&clk IMX8MP_SYS_PLL2_500M>;
+ assigned-clock-rates = <500000000>;
+ interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>;
+ phys = <&usb3_phy0>, <&usb3_phy0>;
+ phy-names = "usb2-phy", "usb3-phy";
+ snps,dis-u2-freeclk-exists-quirk;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/usb/generic-ehci.yaml b/Documentation/devicetree/bindings/usb/generic-ehci.yaml
index 247ef00381ea..cf83f2d9afac 100644
--- a/Documentation/devicetree/bindings/usb/generic-ehci.yaml
+++ b/Documentation/devicetree/bindings/usb/generic-ehci.yaml
@@ -24,8 +24,53 @@ allOf:
properties:
compatible:
- contains:
- const: generic-ehci
+ oneOf:
+ - items:
+ - enum:
+ - allwinner,sun4i-a10-ehci
+ - allwinner,sun50i-a64-ehci
+ - allwinner,sun50i-h6-ehci
+ - allwinner,sun5i-a13-ehci
+ - allwinner,sun6i-a31-ehci
+ - allwinner,sun7i-a20-ehci
+ - allwinner,sun8i-a23-ehci
+ - allwinner,sun8i-h3-ehci
+ - allwinner,sun8i-r40-ehci
+ - allwinner,sun9i-a80-ehci
+ - aspeed,ast2400-ehci
+ - aspeed,ast2500-ehci
+ - aspeed,ast2600-ehci
+ - brcm,bcm3384-ehci
+ - brcm,bcm63268-ehci
+ - brcm,bcm6328-ehci
+ - brcm,bcm6358-ehci
+ - brcm,bcm6362-ehci
+ - brcm,bcm6368-ehci
+ - brcm,bcm7125-ehci
+ - brcm,bcm7346-ehci
+ - brcm,bcm7358-ehci
+ - brcm,bcm7360-ehci
+ - brcm,bcm7362-ehci
+ - brcm,bcm7420-ehci
+ - brcm,bcm7425-ehci
+ - brcm,bcm7435-ehci
+ - ibm,476gtr-ehci
+ - nxp,lpc1850-ehci
+ - qca,ar7100-ehci
+ - snps,hsdk-v1.0-ehci
+ - socionext,uniphier-ehci
+ - const: generic-ehci
+ - items:
+ - enum:
+ - cavium,octeon-6335-ehci
+ - ibm,usb-ehci-440epx
+ - ibm,usb-ehci-460ex
+ - nintendo,hollywood-usb-ehci
+ - st,spear600-ehci
+ - const: usb-ehci
+ - enum:
+ - generic-ehci
+ - usb-ehci
reg:
minItems: 1
@@ -83,7 +128,7 @@ properties:
Phandle of a companion.
phys:
- description: PHY specifier for the USB PHY
+ maxItems: 1
phy-names:
const: usb
@@ -101,7 +146,7 @@ additionalProperties: false
examples:
- |
usb@e0000300 {
- compatible = "ibm,usb-ehci-440epx", "generic-ehci";
+ compatible = "ibm,usb-ehci-440epx", "usb-ehci";
interrupt-parent = <&UIC0>;
interrupts = <0x1a 4>;
reg = <0xe0000300 90>, <0xe0000390 70>;
diff --git a/Documentation/devicetree/bindings/usb/generic-ohci.yaml b/Documentation/devicetree/bindings/usb/generic-ohci.yaml
index 2178bcc401bc..0f5f6ea702d0 100644
--- a/Documentation/devicetree/bindings/usb/generic-ohci.yaml
+++ b/Documentation/devicetree/bindings/usb/generic-ohci.yaml
@@ -14,8 +14,38 @@ maintainers:
properties:
compatible:
- contains:
- const: generic-ohci
+ oneOf:
+ - items:
+ - enum:
+ - allwinner,sun4i-a10-ohci
+ - allwinner,sun50i-a64-ohci
+ - allwinner,sun50i-h6-ohci
+ - allwinner,sun5i-a13-ohci
+ - allwinner,sun6i-a31-ohci
+ - allwinner,sun7i-a20-ohci
+ - allwinner,sun8i-a23-ohci
+ - allwinner,sun8i-h3-ohci
+ - allwinner,sun8i-r40-ohci
+ - allwinner,sun9i-a80-ohci
+ - brcm,bcm3384-ohci
+ - brcm,bcm63268-ohci
+ - brcm,bcm6328-ohci
+ - brcm,bcm6358-ohci
+ - brcm,bcm6362-ohci
+ - brcm,bcm6368-ohci
+ - brcm,bcm7125-ohci
+ - brcm,bcm7346-ohci
+ - brcm,bcm7358-ohci
+ - brcm,bcm7360-ohci
+ - brcm,bcm7362-ohci
+ - brcm,bcm7420-ohci
+ - brcm,bcm7425-ohci
+ - brcm,bcm7435-ohci
+ - ibm,476gtr-ohci
+ - ingenic,jz4740-ohci
+ - snps,hsdk-v1.0-ohci
+ - const: generic-ohci
+ - const: generic-ohci
reg:
maxItems: 1
@@ -71,7 +101,7 @@ properties:
Overrides the detected port count
phys:
- description: PHY specifier for the USB PHY
+ maxItems: 1
phy-names:
const: usb
diff --git a/Documentation/devicetree/bindings/usb/generic-xhci.yaml b/Documentation/devicetree/bindings/usb/generic-xhci.yaml
new file mode 100644
index 000000000000..23d73df96ea3
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/generic-xhci.yaml
@@ -0,0 +1,65 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/usb/generic-xhci.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: USB xHCI Controller Device Tree Bindings
+
+maintainers:
+ - Mathias Nyman <mathias.nyman@intel.com>
+
+allOf:
+ - $ref: "usb-xhci.yaml#"
+
+properties:
+ compatible:
+ oneOf:
+ - description: Generic xHCI device
+ const: generic-xhci
+ - description: Armada 37xx/375/38x/8k SoCs
+ items:
+ - enum:
+ - marvell,armada3700-xhci
+ - marvell,armada-375-xhci
+ - marvell,armada-380-xhci
+ - marvell,armada-8k-xhci
+ - const: generic-xhci
+ - description: Broadcom STB SoCs with xHCI
+ enum:
+ - brcm,xhci-brcm-v2
+ - brcm,bcm7445-xhci
+ - description: Generic xHCI device
+ const: xhci-platform
+ deprecated: true
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ minItems: 1
+ maxItems: 2
+
+ clock-names:
+ minItems: 1
+ items:
+ - const: core
+ - const: reg
+
+unevaluatedProperties: false
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+examples:
+ - |
+ usb@f0931000 {
+ compatible = "generic-xhci";
+ reg = <0xf0931000 0x8c8>;
+ interrupts = <0x0 0x4e 0x0>;
+ };
diff --git a/Documentation/devicetree/bindings/usb/generic.txt b/Documentation/devicetree/bindings/usb/generic.txt
deleted file mode 100644
index ba472e7aefc9..000000000000
--- a/Documentation/devicetree/bindings/usb/generic.txt
+++ /dev/null
@@ -1,57 +0,0 @@
-Generic USB Properties
-
-Optional properties:
- - maximum-speed: tells USB controllers we want to work up to a certain
- speed. Valid arguments are "super-speed-plus",
- "super-speed", "high-speed", "full-speed" and
- "low-speed". In case this isn't passed via DT, USB
- controllers should default to their maximum HW
- capability.
- - dr_mode: tells Dual-Role USB controllers that we want to work on a
- particular mode. Valid arguments are "host",
- "peripheral" and "otg". In case this attribute isn't
- passed via DT, USB DRD controllers should default to
- OTG.
- - phy_type: tells USB controllers that we want to configure the core to support
- a UTMI+ PHY with an 8- or 16-bit interface if UTMI+ is
- selected. Valid arguments are "utmi" and "utmi_wide".
- In case this isn't passed via DT, USB controllers should
- default to HW capability.
- - otg-rev: tells usb driver the release number of the OTG and EH supplement
- with which the device and its descriptors are compliant,
- in binary-coded decimal (i.e. 2.0 is 0200H). This
- property is used if any real OTG features(HNP/SRP/ADP)
- is enabled, if ADP is required, otg-rev should be
- 0x0200 or above.
- - companion: phandle of a companion
- - hnp-disable: tells OTG controllers we want to disable OTG HNP, normally HNP
- is the basic function of real OTG except you want it
- to be a srp-capable only B device.
- - srp-disable: tells OTG controllers we want to disable OTG SRP, SRP is
- optional for OTG device.
- - adp-disable: tells OTG controllers we want to disable OTG ADP, ADP is
- optional for OTG device.
- - usb-role-switch: boolean, indicates that the device is capable of assigning
- the USB data role (USB host or USB device) for a given
- USB connector, such as Type-C, Type-B(micro).
- see connector/usb-connector.yaml.
- - role-switch-default-mode: indicating if usb-role-switch is enabled, the
- device default operation mode of controller while usb
- role is USB_ROLE_NONE. Valid arguments are "host" and
- "peripheral". Defaults to "peripheral" if not
- specified.
-
-
-This is an attribute to a USB controller such as:
-
-dwc3@4a030000 {
- compatible = "synopsys,dwc3";
- reg = <0x4a030000 0xcfff>;
- interrupts = <0 92 4>
- usb-phy = <&usb2_phy>, <&usb3,phy>;
- maximum-speed = "super-speed";
- dr_mode = "otg";
- phy_type = "utmi_wide";
- otg-rev = <0x0200>;
- adp-disable;
-};
diff --git a/Documentation/devicetree/bindings/usb/ingenic,musb.yaml b/Documentation/devicetree/bindings/usb/ingenic,musb.yaml
index 678396eeeb78..f506225a4d57 100644
--- a/Documentation/devicetree/bindings/usb/ingenic,musb.yaml
+++ b/Documentation/devicetree/bindings/usb/ingenic,musb.yaml
@@ -40,7 +40,7 @@ properties:
- const: mc
phys:
- description: PHY specifier for the USB PHY
+ maxItems: 1
usb-role-switch:
type: boolean
diff --git a/Documentation/devicetree/bindings/usb/intel,keembay-dwc3.yaml b/Documentation/devicetree/bindings/usb/intel,keembay-dwc3.yaml
index dd32c10ce6c7..43b91ab62004 100644
--- a/Documentation/devicetree/bindings/usb/intel,keembay-dwc3.yaml
+++ b/Documentation/devicetree/bindings/usb/intel,keembay-dwc3.yaml
@@ -34,11 +34,8 @@ properties:
# Required child node:
patternProperties:
- "^dwc3@[0-9a-f]+$":
- type: object
- description:
- A child node must exist to represent the core DWC3 IP block.
- The content of the node is defined in dwc3.txt.
+ "^usb@[0-9a-f]+$":
+ $ref: snps,dwc3.yaml#
required:
- compatible
@@ -68,7 +65,7 @@ examples:
#address-cells = <1>;
#size-cells = <1>;
- dwc3@34000000 {
+ usb@34000000 {
compatible = "snps,dwc3";
reg = <0x34000000 0x10000>;
interrupts = <GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/Documentation/devicetree/bindings/usb/mediatek,mtk-xhci.txt b/Documentation/devicetree/bindings/usb/mediatek,mtk-xhci.txt
deleted file mode 100644
index 42d8814f903a..000000000000
--- a/Documentation/devicetree/bindings/usb/mediatek,mtk-xhci.txt
+++ /dev/null
@@ -1,121 +0,0 @@
-MT8173 xHCI
-
-The device node for Mediatek SOC USB3.0 host controller
-
-There are two scenarios: the first one only supports xHCI driver;
-the second one supports dual-role mode, and the host is based on xHCI
-driver. Take account of backward compatibility, we divide bindings
-into two parts.
-
-1st: only supports xHCI driver
-------------------------------------------------------------------------
-
-Required properties:
- - compatible : should be "mediatek,<soc-model>-xhci", "mediatek,mtk-xhci",
- soc-model is the name of SoC, such as mt8173, mt2712 etc, when using
- "mediatek,mtk-xhci" compatible string, you need SoC specific ones in
- addition, one of:
- - "mediatek,mt8173-xhci"
- - reg : specifies physical base address and size of the registers
- - reg-names: should be "mac" for xHCI MAC and "ippc" for IP port control
- - interrupts : interrupt used by the controller
- - power-domains : a phandle to USB power domain node to control USB's
- mtcmos
- - vusb33-supply : regulator of USB avdd3.3v
-
- - clocks : a list of phandle + clock-specifier pairs, one for each
- entry in clock-names
- - clock-names : must contain
- "sys_ck": controller clock used by normal mode,
- the following ones are optional:
- "ref_ck": reference clock used by low power mode etc,
- "mcu_ck": mcu_bus clock for register access,
- "dma_ck": dma_bus clock for data transfer by DMA,
- "xhci_ck": controller clock
-
- - phys : see usb-hcd.yaml in the current directory
-
-Optional properties:
- - wakeup-source : enable USB remote wakeup;
- - mediatek,syscon-wakeup : phandle to syscon used to access the register
- of the USB wakeup glue layer between xHCI and SPM; it depends on
- "wakeup-source", and has two arguments:
- - the first one : register base address of the glue layer in syscon;
- - the second one : hardware version of the glue layer
- - 1 : used by mt8173 etc
- - 2 : used by mt2712 etc
- - mediatek,u3p-dis-msk : mask to disable u3ports, bit0 for u3port0,
- bit1 for u3port1, ... etc;
- - vbus-supply : reference to the VBUS regulator;
- - usb3-lpm-capable : supports USB3.0 LPM
- - pinctrl-names : a pinctrl state named "default" must be defined
- - pinctrl-0 : pin control group
- See: Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
- - imod-interval-ns: default interrupt moderation interval is 5000ns
-
-additionally the properties from usb-hcd.yaml (in the current directory) are
-supported.
-
-Example:
-usb30: usb@11270000 {
- compatible = "mediatek,mt8173-xhci";
- reg = <0 0x11270000 0 0x1000>,
- <0 0x11280700 0 0x0100>;
- reg-names = "mac", "ippc";
- interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_LOW>;
- power-domains = <&scpsys MT8173_POWER_DOMAIN_USB>;
- clocks = <&topckgen CLK_TOP_USB30_SEL>, <&clk26m>,
- <&pericfg CLK_PERI_USB0>,
- <&pericfg CLK_PERI_USB1>;
- clock-names = "sys_ck", "ref_ck";
- phys = <&phy_port0 PHY_TYPE_USB3>,
- <&phy_port1 PHY_TYPE_USB2>;
- vusb33-supply = <&mt6397_vusb_reg>;
- vbus-supply = <&usb_p1_vbus>;
- usb3-lpm-capable;
- mediatek,syscon-wakeup = <&pericfg 0x400 1>;
- wakeup-source;
- imod-interval-ns = <10000>;
-};
-
-2nd: dual-role mode with xHCI driver
-------------------------------------------------------------------------
-
-In the case, xhci is added as subnode to mtu3. An example and the DT binding
-details of mtu3 can be found in:
-Documentation/devicetree/bindings/usb/mediatek,mtu3.txt
-
-Required properties:
- - compatible : should be "mediatek,<soc-model>-xhci", "mediatek,mtk-xhci",
- soc-model is the name of SoC, such as mt8173, mt2712 etc, when using
- "mediatek,mtk-xhci" compatible string, you need SoC specific ones in
- addition, one of:
- - "mediatek,mt8173-xhci"
- - reg : specifies physical base address and size of the registers
- - reg-names: should be "mac" for xHCI MAC
- - interrupts : interrupt used by the host controller
- - power-domains : a phandle to USB power domain node to control USB's
- mtcmos
- - vusb33-supply : regulator of USB avdd3.3v
-
- - clocks : a list of phandle + clock-specifier pairs, one for each
- entry in clock-names
- - clock-names : must contain "sys_ck", and the following ones are optional:
- "ref_ck", "mcu_ck" and "dma_ck", "xhci_ck"
-
-Optional properties:
- - vbus-supply : reference to the VBUS regulator;
- - usb3-lpm-capable : supports USB3.0 LPM
-
-Example:
-usb30: usb@11270000 {
- compatible = "mediatek,mt8173-xhci";
- reg = <0 0x11270000 0 0x1000>;
- reg-names = "mac";
- interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_LOW>;
- power-domains = <&scpsys MT8173_POWER_DOMAIN_USB>;
- clocks = <&topckgen CLK_TOP_USB30_SEL>, <&clk26m>;
- clock-names = "sys_ck", "ref_ck";
- vusb33-supply = <&mt6397_vusb_reg>;
- usb3-lpm-capable;
-};
diff --git a/Documentation/devicetree/bindings/usb/mediatek,mtk-xhci.yaml b/Documentation/devicetree/bindings/usb/mediatek,mtk-xhci.yaml
new file mode 100644
index 000000000000..14f40efb3b22
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/mediatek,mtk-xhci.yaml
@@ -0,0 +1,188 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright (c) 2020 MediaTek
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/usb/mediatek,mtk-xhci.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: MediaTek USB3 xHCI Device Tree Bindings
+
+maintainers:
+ - Chunfeng Yun <chunfeng.yun@mediatek.com>
+
+allOf:
+ - $ref: "usb-xhci.yaml"
+
+description: |
+ There are two scenarios:
+ case 1: only supports xHCI driver;
+ case 2: supports dual-role mode, and the host is based on xHCI driver.
+
+properties:
+ # common properties for both case 1 and case 2
+ compatible:
+ items:
+ - enum:
+ - mediatek,mt2701-xhci
+ - mediatek,mt2712-xhci
+ - mediatek,mt7622-xhci
+ - mediatek,mt7623-xhci
+ - mediatek,mt7629-xhci
+ - mediatek,mt8173-xhci
+ - mediatek,mt8183-xhci
+ - const: mediatek,mtk-xhci
+
+ reg:
+ minItems: 1
+ items:
+ - description: the registers of xHCI MAC
+ - description: the registers of IP Port Control
+
+ reg-names:
+ minItems: 1
+ items:
+ - const: mac
+ - const: ippc # optional, only needed for case 1.
+
+ interrupts:
+ maxItems: 1
+
+ power-domains:
+ description: A phandle to USB power domain node to control USB's MTCMOS
+ maxItems: 1
+
+ clocks:
+ minItems: 1
+ items:
+ - description: Controller clock used by normal mode
+ - description: Reference clock used by low power mode etc
+ - description: Mcu bus clock for register access
+ - description: DMA bus clock for data transfer
+ - description: controller clock
+
+ clock-names:
+ minItems: 1
+ items:
+ - const: sys_ck # required, the following ones are optional
+ - const: ref_ck
+ - const: mcu_ck
+ - const: dma_ck
+ - const: xhci_ck
+
+ assigned-clocks:
+ minItems: 1
+ maxItems: 5
+
+ assigned-clock-parents:
+ minItems: 1
+ maxItems: 5
+
+ phys:
+ description:
+ List of all PHYs used on this HCD, it's better to keep PHYs in order
+ as the hardware layout
+ minItems: 1
+ items:
+ - description: USB2/HS PHY # required, others are optional
+ - description: USB3/SS(P) PHY
+ - description: USB2/HS PHY
+ - description: USB3/SS(P) PHY
+ - description: USB2/HS PHY
+ - description: USB3/SS(P) PHY
+ - description: USB2/HS PHY
+ - description: USB3/SS(P) PHY
+ - description: USB2/HS PHY
+
+ vusb33-supply:
+ description: Regulator of USB AVDD3.3v
+
+ vbus-supply:
+ description: Regulator of USB VBUS5v
+
+ usb3-lpm-capable:
+ description: supports USB3.0 LPM
+ type: boolean
+
+ imod-interval-ns:
+ description:
+ Interrupt moderation interval value, it is 8 times as much as that
+ defined in the xHCI spec on MTK's controller.
+ default: 5000
+
+ # the following properties are only used for case 1
+ wakeup-source:
+ description: enable USB remote wakeup, see power/wakeup-source.txt
+ type: boolean
+
+ mediatek,syscon-wakeup:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ maxItems: 1
+ description:
+ A phandle to syscon used to access the register of the USB wakeup glue
+ layer between xHCI and SPM, the field should always be 3 cells long.
+ items:
+ items:
+ - description:
+ The first cell represents a phandle to syscon
+ - description:
+ The second cell represents the register base address of the glue
+ layer in syscon
+ - description:
+ The third cell represents the hardware version of the glue layer,
+ 1 is used by mt8173 etc, 2 is used by mt2712 etc
+ enum: [1, 2]
+
+ mediatek,u3p-dis-msk:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: The mask to disable u3ports, bit0 for u3port0,
+ bit1 for u3port1, ... etc
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+patternProperties:
+ "@[0-9a-f]{1}$":
+ type: object
+ description: The hard wired USB devices.
+
+dependencies:
+ wakeup-source: [ 'mediatek,syscon-wakeup' ]
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - interrupts
+ - clocks
+ - clock-names
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/mt8173-clk.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/phy/phy.h>
+ #include <dt-bindings/power/mt8173-power.h>
+
+ usb@11270000 {
+ compatible = "mediatek,mt8173-xhci", "mediatek,mtk-xhci";
+ reg = <0x11270000 0x1000>, <0x11280700 0x0100>;
+ reg-names = "mac", "ippc";
+ interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_LOW>;
+ power-domains = <&scpsys MT8173_POWER_DOMAIN_USB>;
+ clocks = <&topckgen CLK_TOP_USB30_SEL>, <&clk26m>;
+ clock-names = "sys_ck", "ref_ck";
+ phys = <&u3port0 PHY_TYPE_USB3>, <&u2port1 PHY_TYPE_USB2>;
+ vusb33-supply = <&mt6397_vusb_reg>;
+ vbus-supply = <&usb_p1_vbus>;
+ imod-interval-ns = <10000>;
+ mediatek,syscon-wakeup = <&pericfg 0x400 1>;
+ wakeup-source;
+ usb3-lpm-capable;
+ };
+...
diff --git a/Documentation/devicetree/bindings/usb/mediatek,mtu3.txt b/Documentation/devicetree/bindings/usb/mediatek,mtu3.txt
deleted file mode 100644
index a82ca438aec1..000000000000
--- a/Documentation/devicetree/bindings/usb/mediatek,mtu3.txt
+++ /dev/null
@@ -1,108 +0,0 @@
-The device node for Mediatek USB3.0 DRD controller
-
-Required properties:
- - compatible : should be "mediatek,<soc-model>-mtu3", "mediatek,mtu3",
- soc-model is the name of SoC, such as mt8173, mt2712 etc,
- when using "mediatek,mtu3" compatible string, you need SoC specific
- ones in addition, one of:
- - "mediatek,mt8173-mtu3"
- - reg : specifies physical base address and size of the registers
- - reg-names: should be "mac" for device IP and "ippc" for IP port control
- - interrupts : interrupt used by the device IP
- - power-domains : a phandle to USB power domain node to control USB's
- mtcmos
- - vusb33-supply : regulator of USB avdd3.3v
- - clocks : a list of phandle + clock-specifier pairs, one for each
- entry in clock-names
- - clock-names : must contain "sys_ck" for clock of controller,
- the following clocks are optional:
- "ref_ck", "mcu_ck" and "dma_ck";
- - phys : see usb-hcd.yaml in the current directory
- - dr_mode : should be one of "host", "peripheral" or "otg",
- refer to usb/generic.txt
-
-Optional properties:
- - #address-cells, #size-cells : should be '2' if the device has sub-nodes
- with 'reg' property
- - ranges : allows valid 1:1 translation between child's address space and
- parent's address space
- - extcon : external connector for vbus and idpin changes detection, needed
- when supports dual-role mode.
- it's considered valid for compatibility reasons, not allowed for
- new bindings, and use "usb-role-switch" property instead.
- - vbus-supply : reference to the VBUS regulator, needed when supports
- dual-role mode.
- it's considered valid for compatibility reasons, not allowed for
- new bindings, and put into a usb-connector node.
- see connector/usb-connector.yaml.
- - pinctrl-names : a pinctrl state named "default" is optional, and need be
- defined if auto drd switch is enabled, that means the property dr_mode
- is set as "otg", and meanwhile the property "mediatek,enable-manual-drd"
- is not set.
- - pinctrl-0 : pin control group
- See: Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
-
- - maximum-speed : valid arguments are "super-speed", "high-speed" and
- "full-speed"; refer to usb/generic.txt
- - usb-role-switch : use USB Role Switch to support dual-role switch, but
- not extcon; see usb/generic.txt.
- - enable-manual-drd : supports manual dual-role switch via debugfs; usually
- used when receptacle is TYPE-A and also wants to support dual-role
- mode.
- - wakeup-source: enable USB remote wakeup of host mode.
- - mediatek,syscon-wakeup : phandle to syscon used to access the register
- of the USB wakeup glue layer between SSUSB and SPM; it depends on
- "wakeup-source", and has two arguments:
- - the first one : register base address of the glue layer in syscon;
- - the second one : hardware version of the glue layer
- - 1 : used by mt8173 etc
- - 2 : used by mt2712 etc
- - mediatek,u3p-dis-msk : mask to disable u3ports, bit0 for u3port0,
- bit1 for u3port1, ... etc;
-
-additionally the properties from usb-hcd.yaml (in the current directory) are
-supported.
-
-Sub-nodes:
-The xhci should be added as subnode to mtu3 as shown in the following example
-if host mode is enabled. The DT binding details of xhci can be found in:
-Documentation/devicetree/bindings/usb/mediatek,mtk-xhci.txt
-
-The port would be added as subnode if use "usb-role-switch" property.
- see graph.txt
-
-Example:
-ssusb: usb@11271000 {
- compatible = "mediatek,mt8173-mtu3";
- reg = <0 0x11271000 0 0x3000>,
- <0 0x11280700 0 0x0100>;
- reg-names = "mac", "ippc";
- interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_LOW>;
- phys = <&phy_port0 PHY_TYPE_USB3>,
- <&phy_port1 PHY_TYPE_USB2>;
- power-domains = <&scpsys MT8173_POWER_DOMAIN_USB>;
- clocks = <&topckgen CLK_TOP_USB30_SEL>, <&clk26m>,
- <&pericfg CLK_PERI_USB0>,
- <&pericfg CLK_PERI_USB1>;
- clock-names = "sys_ck", "ref_ck";
- vusb33-supply = <&mt6397_vusb_reg>;
- vbus-supply = <&usb_p0_vbus>;
- extcon = <&extcon_usb>;
- dr_mode = "otg";
- wakeup-source;
- mediatek,syscon-wakeup = <&pericfg 0x400 1>;
- #address-cells = <2>;
- #size-cells = <2>;
- ranges;
-
- usb_host: xhci@11270000 {
- compatible = "mediatek,mt8173-xhci";
- reg = <0 0x11270000 0 0x1000>;
- reg-names = "mac";
- interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_LOW>;
- power-domains = <&scpsys MT8173_POWER_DOMAIN_USB>;
- clocks = <&topckgen CLK_TOP_USB30_SEL>, <&clk26m>;
- clock-names = "sys_ck", "ref_ck";
- vusb33-supply = <&mt6397_vusb_reg>;
- };
-};
diff --git a/Documentation/devicetree/bindings/usb/mediatek,mtu3.yaml b/Documentation/devicetree/bindings/usb/mediatek,mtu3.yaml
new file mode 100644
index 000000000000..f5c04b9d2de9
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/mediatek,mtu3.yaml
@@ -0,0 +1,287 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright (c) 2020 MediaTek
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/usb/mediatek,mtu3.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: MediaTek USB3 DRD Controller Device Tree Bindings
+
+maintainers:
+ - Chunfeng Yun <chunfeng.yun@mediatek.com>
+
+allOf:
+ - $ref: "usb-drd.yaml"
+
+description: |
+ The DRD controller has a glue layer IPPC (IP Port Control), and its host is
+ based on xHCI.
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - mediatek,mt2712-mtu3
+ - mediatek,mt8173-mtu3
+ - mediatek,mt8183-mtu3
+ - const: mediatek,mtu3
+
+ reg:
+ items:
+ - description: the registers of device MAC
+ - description: the registers of IP Port Control
+
+ reg-names:
+ items:
+ - const: mac
+ - const: ippc
+
+ interrupts:
+ maxItems: 1
+
+ power-domains:
+ description: A phandle to USB power domain node to control USB's MTCMOS
+ maxItems: 1
+
+ clocks:
+ minItems: 1
+ items:
+ - description: Controller clock used by normal mode
+ - description: Reference clock used by low power mode etc
+ - description: Mcu bus clock for register access
+ - description: DMA bus clock for data transfer
+
+ clock-names:
+ minItems: 1
+ items:
+ - const: sys_ck # required, others are optional
+ - const: ref_ck
+ - const: mcu_ck
+ - const: dma_ck
+
+ phys:
+ description:
+ List of all the USB PHYs used, it's better to keep the sequence
+ as the hardware layout.
+ minItems: 1
+ items:
+ - description: USB2/HS PHY # required, others are optional
+ - description: USB3/SS(P) PHY
+ - description: USB2/HS PHY # the following for backward compatible
+ - description: USB3/SS(P) PHY
+ - description: USB2/HS PHY
+ - description: USB3/SS(P) PHY
+ - description: USB2/HS PHY
+ - description: USB3/SS(P) PHY
+ - description: USB2/HS PHY
+
+ vusb33-supply:
+ description: Regulator of USB AVDD3.3v
+
+ vbus-supply:
+ deprecated: true
+ description: |
+ Regulator of USB VBUS5v, needed when supports dual-role mode.
+ Particularly, if use an output GPIO to control a VBUS regulator, should
+ model it as a regulator. See bindings/regulator/fixed-regulator.yaml
+ It's considered valid for compatibility reasons, not allowed for
+ new bindings, and put into a usb-connector node.
+
+ dr_mode:
+ enum: [host, peripheral, otg]
+ default: otg
+
+ maximum-speed:
+ enum: [super-speed-plus, super-speed, high-speed, full-speed]
+
+ "#address-cells":
+ enum: [1, 2]
+
+ "#size-cells":
+ enum: [1, 2]
+
+ ranges: true
+
+ extcon:
+ deprecated: true
+ description: |
+ Phandle to the extcon device detecting the IDDIG/VBUS state, neede
+ when supports dual-role mode.
+ It's considered valid for compatibility reasons, not allowed for
+ new bindings, and use "usb-role-switch" property instead.
+
+ usb-role-switch:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description: Support role switch.
+ type: boolean
+
+ connector:
+ $ref: /connector/usb-connector.yaml#
+ description:
+ Connector for dual role switch, especially for "gpio-usb-b-connector"
+ type: object
+
+ port:
+ description:
+ Any connector to the data bus of this controller should be modelled
+ using the OF graph bindings specified, if the "usb-role-switch"
+ property is used. See graph.txt
+ type: object
+
+ enable-manual-drd:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ supports manual dual-role switch via debugfs; usually used when
+ receptacle is TYPE-A and also wants to support dual-role mode.
+ type: boolean
+
+ wakeup-source:
+ description: enable USB remote wakeup, see power/wakeup-source.txt
+ type: boolean
+
+ mediatek,syscon-wakeup:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ maxItems: 1
+ description:
+ A phandle to syscon used to access the register of the USB wakeup glue
+ layer between xHCI and SPM, the field should always be 3 cells long.
+ items:
+ items:
+ - description:
+ The first cell represents a phandle to syscon
+ - description:
+ The second cell represents the register base address of the glue
+ layer in syscon
+ - description:
+ The third cell represents the hardware version of the glue layer,
+ 1 is used by mt8173 etc, 2 is used by mt2712 etc
+ enum: [1, 2]
+
+ mediatek,u3p-dis-msk:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: The mask to disable u3ports, bit0 for u3port0,
+ bit1 for u3port1, ... etc
+
+# Required child node when support dual-role
+patternProperties:
+ "^usb@[0-9a-f]+$":
+ type: object
+ $ref: /usb/mediatek,mtk-xhci.yaml#
+ description:
+ The xhci should be added as subnode to mtu3 as shown in the following
+ example if the host mode is enabled.
+
+dependencies:
+ connector: [ 'usb-role-switch' ]
+ port: [ 'usb-role-switch' ]
+ wakeup-source: [ 'mediatek,syscon-wakeup' ]
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - interrupts
+ - clocks
+ - clock-names
+
+additionalProperties: false
+
+examples:
+ # Dual role switch by extcon
+ - |
+ #include <dt-bindings/clock/mt8173-clk.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/phy/phy.h>
+ #include <dt-bindings/power/mt8173-power.h>
+
+ usb@11271000 {
+ compatible = "mediatek,mt8173-mtu3", "mediatek,mtu3";
+ reg = <0x11271000 0x3000>, <0x11280700 0x0100>;
+ reg-names = "mac", "ippc";
+ interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_LOW>;
+ phys = <&phy_port0 PHY_TYPE_USB3>, <&phy_port1 PHY_TYPE_USB2>;
+ power-domains = <&scpsys MT8173_POWER_DOMAIN_USB>;
+ clocks = <&topckgen CLK_TOP_USB30_SEL>;
+ clock-names = "sys_ck";
+ vusb33-supply = <&mt6397_vusb_reg>;
+ vbus-supply = <&usb_p0_vbus>;
+ extcon = <&extcon_usb>;
+ dr_mode = "otg";
+ wakeup-source;
+ mediatek,syscon-wakeup = <&pericfg 0x400 1>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ xhci: usb@11270000 {
+ compatible = "mediatek,mt8173-xhci", "mediatek,mtk-xhci";
+ reg = <0x11270000 0x1000>;
+ reg-names = "mac";
+ interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_LOW>;
+ power-domains = <&scpsys MT8173_POWER_DOMAIN_USB>;
+ clocks = <&topckgen CLK_TOP_USB30_SEL>, <&clk26m>;
+ clock-names = "sys_ck", "ref_ck";
+ vusb33-supply = <&mt6397_vusb_reg>;
+ };
+ };
+
+ # Enable/disable device by an input gpio for VBUS pin
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/power/mt2712-power.h>
+
+ usb@112c1000 {
+ compatible = "mediatek,mt2712-mtu3", "mediatek,mtu3";
+ reg = <0x112c1000 0x3000>, <0x112d0700 0x0100>;
+ reg-names = "mac", "ippc";
+ interrupts = <GIC_SPI 248 IRQ_TYPE_LEVEL_LOW>;
+ phys = <&u2port2 PHY_TYPE_USB2>;
+ power-domains = <&scpsys MT2712_POWER_DOMAIN_USB2>;
+ clocks = <&topckgen CLK_TOP_USB30_SEL>;
+ clock-names = "sys_ck";
+ dr_mode = "peripheral";
+ usb-role-switch;
+
+ connector {
+ compatible = "gpio-usb-b-connector", "usb-b-connector";
+ type = "micro";
+ vbus-gpios = <&pio 13 GPIO_ACTIVE_HIGH>;
+ };
+ };
+
+ # Dual role switch with type-c
+ - |
+ usb@11201000 {
+ compatible ="mediatek,mt8183-mtu3", "mediatek,mtu3";
+ reg = <0x11201000 0x2e00>, <0x11203e00 0x0100>;
+ reg-names = "mac", "ippc";
+ interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_LOW>;
+ phys = <&u2port0 PHY_TYPE_USB2>;
+ clocks = <&clk26m>;
+ clock-names = "sys_ck";
+ mediatek,syscon-wakeup = <&pericfg 0x400 1>;
+ wakeup-source;
+ dr_mode = "otg";
+ usb-role-switch;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ host: usb@11200000 {
+ compatible = "mediatek,mt8183-xhci", "mediatek,mtk-xhci";
+ reg = <0x11200000 0x1000>;
+ reg-names = "mac";
+ interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&clk26m>;
+ clock-names = "sys_ck";
+ };
+
+ port {
+ usb_role_sw: endpoint {
+ remote-endpoint = <&hs_ep>;
+ };
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/usb/mediatek,musb.txt b/Documentation/devicetree/bindings/usb/mediatek,musb.txt
deleted file mode 100644
index 5eedb0296562..000000000000
--- a/Documentation/devicetree/bindings/usb/mediatek,musb.txt
+++ /dev/null
@@ -1,57 +0,0 @@
-MediaTek musb DRD/OTG controller
--------------------------------------------
-
-Required properties:
- - compatible : should be one of:
- "mediatek,mt2701-musb"
- ...
- followed by "mediatek,mtk-musb"
- - reg : specifies physical base address and size of
- the registers
- - interrupts : interrupt used by musb controller
- - interrupt-names : must be "mc"
- - phys : PHY specifier for the OTG phy
- - dr_mode : should be one of "host", "peripheral" or "otg",
- refer to usb/generic.txt
- - clocks : a list of phandle + clock-specifier pairs, one for
- each entry in clock-names
- - clock-names : must contain "main", "mcu", "univpll"
- for clocks of controller
-
-Optional properties:
- - power-domains : a phandle to USB power domain node to control USB's
- MTCMOS
-
-Required child nodes:
- usb connector node as defined in bindings/connector/usb-connector.yaml
-Optional properties:
- - id-gpios : input GPIO for USB ID pin.
- - vbus-gpios : input GPIO for USB VBUS pin.
- - vbus-supply : reference to the VBUS regulator, needed when supports
- dual-role mode
- - usb-role-switch : use USB Role Switch to support dual-role switch, see
- usb/generic.txt.
-
-Example:
-
-usb2: usb@11200000 {
- compatible = "mediatek,mt2701-musb",
- "mediatek,mtk-musb";
- reg = <0 0x11200000 0 0x1000>;
- interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>;
- interrupt-names = "mc";
- phys = <&u2port2 PHY_TYPE_USB2>;
- dr_mode = "otg";
- clocks = <&pericfg CLK_PERI_USB0>,
- <&pericfg CLK_PERI_USB0_MCU>,
- <&pericfg CLK_PERI_USB_SLV>;
- clock-names = "main","mcu","univpll";
- power-domains = <&scpsys MT2701_POWER_DOMAIN_IFR_MSC>;
- usb-role-switch;
- connector{
- compatible = "gpio-usb-b-connector", "usb-b-connector";
- type = "micro";
- id-gpios = <&pio 44 GPIO_ACTIVE_HIGH>;
- vbus-supply = <&usb_vbus>;
- };
-};
diff --git a/Documentation/devicetree/bindings/usb/mediatek,musb.yaml b/Documentation/devicetree/bindings/usb/mediatek,musb.yaml
new file mode 100644
index 000000000000..84ddacfdbe9b
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/mediatek,musb.yaml
@@ -0,0 +1,114 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright (c) 2020 MediaTek
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/usb/mediatek,musb.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: MediaTek MUSB DRD/OTG Controller Device Tree Bindings
+
+maintainers:
+ - Min Guo <min.guo@mediatek.com>
+
+properties:
+ $nodename:
+ pattern: '^usb@[0-9a-f]+$'
+
+ compatible:
+ items:
+ - enum:
+ - mediatek,mt8516-musb
+ - mediatek,mt2701-musb
+ - const: mediatek,mtk-musb
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ interrupt-names:
+ items:
+ - const: mc
+
+ clocks:
+ items:
+ - description: The main/core clock
+ - description: The system bus clock
+ - description: The 48Mhz clock
+
+ clock-names:
+ items:
+ - const: main
+ - const: mcu
+ - const: univpll
+
+ phys:
+ maxItems: 1
+
+ usb-role-switch:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description: Support role switch. See usb/generic.txt
+ type: boolean
+
+ dr_mode:
+ enum:
+ - host
+ - otg
+ - peripheral
+
+ power-domains:
+ description: A phandle to USB power domain node to control USB's MTCMOS
+ maxItems: 1
+
+ connector:
+ $ref: /connector/usb-connector.yaml#
+ description: Connector for dual role switch
+ type: object
+
+dependencies:
+ usb-role-switch: [ 'connector' ]
+ connector: [ 'usb-role-switch' ]
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - interrupt-names
+ - phys
+ - clocks
+ - clock-names
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/mt2701-clk.h>
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/phy/phy.h>
+ #include <dt-bindings/power/mt2701-power.h>
+
+ usb@11200000 {
+ compatible = "mediatek,mt2701-musb", "mediatek,mtk-musb";
+ reg = <0x11200000 0x1000>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-names = "mc";
+ phys = <&u2port2 PHY_TYPE_USB2>;
+ dr_mode = "otg";
+ clocks = <&pericfg CLK_PERI_USB0>,
+ <&pericfg CLK_PERI_USB0_MCU>,
+ <&pericfg CLK_PERI_USB_SLV>;
+ clock-names = "main","mcu","univpll";
+ power-domains = <&scpsys MT2701_POWER_DOMAIN_IFR_MSC>;
+ usb-role-switch;
+
+ connector {
+ compatible = "gpio-usb-b-connector", "usb-b-connector";
+ type = "micro";
+ id-gpios = <&pio 44 GPIO_ACTIVE_HIGH>;
+ vbus-supply = <&usb_vbus>;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/usb/omap-usb.txt b/Documentation/devicetree/bindings/usb/omap-usb.txt
index 38d9bb8507cf..f0dbc5ae45ae 100644
--- a/Documentation/devicetree/bindings/usb/omap-usb.txt
+++ b/Documentation/devicetree/bindings/usb/omap-usb.txt
@@ -65,7 +65,7 @@ Sub-nodes:
The dwc3 core should be added as subnode to omap dwc3 glue.
- dwc3 :
The binding details of dwc3 can be found in:
- Documentation/devicetree/bindings/usb/dwc3.txt
+ Documentation/devicetree/bindings/usb/snps,dwc3.yaml
omap_dwc3 {
compatible = "ti,dwc3";
diff --git a/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml b/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml
index 2cf525d21e05..c3cbd1fa9944 100644
--- a/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml
+++ b/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml
@@ -17,6 +17,10 @@ properties:
- qcom,msm8998-dwc3
- qcom,sc7180-dwc3
- qcom,sdm845-dwc3
+ - qcom,sdx55-dwc3
+ - qcom,sm8150-dwc3
+ - qcom,sm8250-dwc3
+ - qcom,sm8350-dwc3
- const: qcom,dwc3
reg:
@@ -103,11 +107,8 @@ properties:
# Required child node:
patternProperties:
- "^dwc3@[0-9a-f]+$":
- type: object
- description:
- A child node must exist to represent the core DWC3 IP block
- The content of the node is defined in dwc3.txt.
+ "^usb@[0-9a-f]+$":
+ $ref: snps,dwc3.yaml#
required:
- compatible
@@ -162,7 +163,7 @@ examples:
resets = <&gcc GCC_USB30_PRIM_BCR>;
- dwc3@a600000 {
+ usb@a600000 {
compatible = "snps,dwc3";
reg = <0 0x0a600000 0 0xcd00>;
interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/Documentation/devicetree/bindings/usb/renesas,usb-xhci.yaml b/Documentation/devicetree/bindings/usb/renesas,usb-xhci.yaml
index 22603256ddf8..4c5efaf02308 100644
--- a/Documentation/devicetree/bindings/usb/renesas,usb-xhci.yaml
+++ b/Documentation/devicetree/bindings/usb/renesas,usb-xhci.yaml
@@ -11,7 +11,7 @@ maintainers:
- Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
allOf:
- - $ref: "usb-hcd.yaml"
+ - $ref: "usb-xhci.yaml"
properties:
compatible:
@@ -68,7 +68,7 @@ required:
- power-domains
- resets
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/usb/renesas,usb3-peri.yaml b/Documentation/devicetree/bindings/usb/renesas,usb3-peri.yaml
index 929a3f413b44..9fcf54b10b07 100644
--- a/Documentation/devicetree/bindings/usb/renesas,usb3-peri.yaml
+++ b/Documentation/devicetree/bindings/usb/renesas,usb3-peri.yaml
@@ -54,18 +54,19 @@ properties:
description: phandle of a companion.
ports:
+ $ref: /schemas/graph.yaml#/properties/ports
description: |
any connector to the data bus of this controller should be modelled
using the OF graph bindings specified, if the "usb-role-switch"
property is used.
- type: object
+
properties:
port@0:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: High Speed (HS) data bus.
port@1:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: Super Speed (SS) data bus.
required:
diff --git a/Documentation/devicetree/bindings/usb/renesas,usbhs.yaml b/Documentation/devicetree/bindings/usb/renesas,usbhs.yaml
index 54c361d4a7af..e67223d90bb7 100644
--- a/Documentation/devicetree/bindings/usb/renesas,usbhs.yaml
+++ b/Documentation/devicetree/bindings/usb/renesas,usbhs.yaml
@@ -68,6 +68,7 @@ properties:
Integer to use BUSWAIT register.
renesas,enable-gpio:
+ maxItems: 1
description: |
gpio specifier to check GPIO determining if USB function should be
enabled.
diff --git a/Documentation/devicetree/bindings/usb/rockchip,dwc3.txt b/Documentation/devicetree/bindings/usb/rockchip,dwc3.txt
deleted file mode 100644
index 94520493233b..000000000000
--- a/Documentation/devicetree/bindings/usb/rockchip,dwc3.txt
+++ /dev/null
@@ -1,56 +0,0 @@
-Rockchip SuperSpeed DWC3 USB SoC controller
-
-Required properties:
-- compatible: should contain "rockchip,rk3399-dwc3" for rk3399 SoC
-- clocks: A list of phandle + clock-specifier pairs for the
- clocks listed in clock-names
-- clock-names: Should contain the following:
- "ref_clk" Controller reference clk, have to be 24 MHz
- "suspend_clk" Controller suspend clk, have to be 24 MHz or 32 KHz
- "bus_clk" Master/Core clock, have to be >= 62.5 MHz for SS
- operation and >= 30MHz for HS operation
- "grf_clk" Controller grf clk
-
-Required child node:
-A child node must exist to represent the core DWC3 IP block. The name of
-the node is not important. The content of the node is defined in dwc3.txt.
-
-Phy documentation is provided in the following places:
-Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.yaml - USB2.0 PHY
-Documentation/devicetree/bindings/phy/phy-rockchip-typec.txt - Type-C PHY
-
-Example device nodes:
-
- usbdrd3_0: usb@fe800000 {
- compatible = "rockchip,rk3399-dwc3";
- clocks = <&cru SCLK_USB3OTG0_REF>, <&cru SCLK_USB3OTG0_SUSPEND>,
- <&cru ACLK_USB3OTG0>, <&cru ACLK_USB3_GRF>;
- clock-names = "ref_clk", "suspend_clk",
- "bus_clk", "grf_clk";
- #address-cells = <2>;
- #size-cells = <2>;
- ranges;
- usbdrd_dwc3_0: dwc3@fe800000 {
- compatible = "snps,dwc3";
- reg = <0x0 0xfe800000 0x0 0x100000>;
- interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
- dr_mode = "otg";
- };
- };
-
- usbdrd3_1: usb@fe900000 {
- compatible = "rockchip,rk3399-dwc3";
- clocks = <&cru SCLK_USB3OTG1_REF>, <&cru SCLK_USB3OTG1_SUSPEND>,
- <&cru ACLK_USB3OTG1>, <&cru ACLK_USB3_GRF>;
- clock-names = "ref_clk", "suspend_clk",
- "bus_clk", "grf_clk";
- #address-cells = <2>;
- #size-cells = <2>;
- ranges;
- usbdrd_dwc3_1: dwc3@fe900000 {
- compatible = "snps,dwc3";
- reg = <0x0 0xfe900000 0x0 0x100000>;
- interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
- dr_mode = "otg";
- };
- };
diff --git a/Documentation/devicetree/bindings/usb/rockchip,dwc3.yaml b/Documentation/devicetree/bindings/usb/rockchip,dwc3.yaml
new file mode 100644
index 000000000000..04077f2d7faf
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/rockchip,dwc3.yaml
@@ -0,0 +1,108 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/usb/rockchip,dwc3.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Rockchip SuperSpeed DWC3 USB SoC controller
+
+maintainers:
+ - Heiko Stuebner <heiko@sntech.de>
+
+description:
+ The common content of the node is defined in snps,dwc3.yaml.
+
+ Phy documentation is provided in the following places.
+
+ USB2.0 PHY
+ Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.yaml
+
+ Type-C PHY
+ Documentation/devicetree/bindings/phy/phy-rockchip-typec.txt
+
+allOf:
+ - $ref: snps,dwc3.yaml#
+
+select:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - rockchip,rk3328-dwc3
+ - rockchip,rk3399-dwc3
+ required:
+ - compatible
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - rockchip,rk3328-dwc3
+ - rockchip,rk3399-dwc3
+ - const: snps,dwc3
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ minItems: 3
+ items:
+ - description:
+ Controller reference clock, must to be 24 MHz
+ - description:
+ Controller suspend clock, must to be 24 MHz or 32 KHz
+ - description:
+ Master/Core clock, must to be >= 62.5 MHz for SS
+ operation and >= 30MHz for HS operation
+ - description:
+ Controller grf clock
+
+ clock-names:
+ minItems: 3
+ items:
+ - const: ref_clk
+ - const: suspend_clk
+ - const: bus_clk
+ - const: grf_clk
+
+ power-domains:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+ reset-names:
+ const: usb3-otg
+
+unevaluatedProperties: false
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+
+examples:
+ - |
+ #include <dt-bindings/clock/rk3399-cru.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ bus {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ usbdrd3_0: usb@fe800000 {
+ compatible = "rockchip,rk3399-dwc3", "snps,dwc3";
+ reg = <0x0 0xfe800000 0x0 0x100000>;
+ interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru SCLK_USB3OTG0_REF>, <&cru SCLK_USB3OTG0_SUSPEND>,
+ <&cru ACLK_USB3OTG0>, <&cru ACLK_USB3_GRF>;
+ clock-names = "ref_clk", "suspend_clk",
+ "bus_clk", "grf_clk";
+ dr_mode = "otg";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/usb/snps,dwc3.yaml b/Documentation/devicetree/bindings/usb/snps,dwc3.yaml
new file mode 100644
index 000000000000..2247da77eac1
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/snps,dwc3.yaml
@@ -0,0 +1,332 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/usb/snps,dwc3.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Synopsys DesignWare USB3 Controller
+
+maintainers:
+ - Felipe Balbi <balbi@kernel.org>
+
+description:
+ This is usually a subnode to DWC3 glue to which it is connected, but can also
+ be presented as a standalone DT node with an optional vendor-specific
+ compatible string.
+
+allOf:
+ - $ref: usb-drd.yaml#
+ - if:
+ properties:
+ dr_mode:
+ const: peripheral
+
+ required:
+ - dr_mode
+ then:
+ $ref: usb.yaml#
+ else:
+ $ref: usb-xhci.yaml#
+
+properties:
+ compatible:
+ contains:
+ oneOf:
+ - const: snps,dwc3
+ - const: synopsys,dwc3
+ deprecated: true
+
+ interrupts:
+ description:
+ It's either a single common DWC3 interrupt (dwc_usb3) or individual
+ interrupts for the host, gadget and DRD modes.
+ minItems: 1
+ maxItems: 3
+
+ interrupt-names:
+ minItems: 1
+ maxItems: 3
+ oneOf:
+ - const: dwc_usb3
+ - items:
+ enum: [host, peripheral, otg]
+
+ clocks:
+ description:
+ In general the core supports three types of clocks. bus_early is a
+ SoC Bus Clock (AHB/AXI/Native). ref generates ITP when the UTMI/ULPI
+ PHY is suspended. suspend clocks a small part of the USB3 core when
+ SS PHY in P3. But particular cases may differ from that having less
+ or more clock sources with another names.
+
+ clock-names:
+ contains:
+ anyOf:
+ - enum: [bus_early, ref, suspend]
+ - true
+
+ usb-phy:
+ minItems: 1
+ items:
+ - description: USB2/HS PHY
+ - description: USB3/SS PHY
+
+ phys:
+ minItems: 1
+ items:
+ - description: USB2/HS PHY
+ - description: USB3/SS PHY
+
+ phy-names:
+ minItems: 1
+ items:
+ - const: usb2-phy
+ - const: usb3-phy
+
+ resets:
+ minItems: 1
+
+ snps,usb2-lpm-disable:
+ description: Indicate if we don't want to enable USB2 HW LPM
+ type: boolean
+
+ snps,usb3_lpm_capable:
+ description: Determines if platform is USB3 LPM capable
+ type: boolean
+
+ snps,dis-start-transfer-quirk:
+ description:
+ When set, disable isoc START TRANSFER command failure SW work-around
+ for DWC_usb31 version 1.70a-ea06 and prior.
+ type: boolean
+
+ snps,disable_scramble_quirk:
+ description:
+ True when SW should disable data scrambling. Only really useful for FPGA
+ builds.
+ type: boolean
+
+ snps,has-lpm-erratum:
+ description: True when DWC3 was configured with LPM Erratum enabled
+ type: boolean
+
+ snps,lpm-nyet-threshold:
+ description: LPM NYET threshold
+ $ref: /schemas/types.yaml#/definitions/uint8
+
+ snps,u2exit_lfps_quirk:
+ description: Set if we want to enable u2exit lfps quirk
+ type: boolean
+
+ snps,u2ss_inp3_quirk:
+ description: Set if we enable P3 OK for U2/SS Inactive quirk
+ type: boolean
+
+ snps,req_p1p2p3_quirk:
+ description:
+ When set, the core will always request for P1/P2/P3 transition sequence.
+ type: boolean
+
+ snps,del_p1p2p3_quirk:
+ description:
+ When set core will delay P1/P2/P3 until a certain amount of 8B10B errors
+ occur.
+ type: boolean
+
+ snps,del_phy_power_chg_quirk:
+ description: When set core will delay PHY power change from P0 to P1/P2/P3.
+ type: boolean
+
+ snps,lfps_filter_quirk:
+ description: When set core will filter LFPS reception.
+ type: boolean
+
+ snps,rx_detect_poll_quirk:
+ description:
+ when set core will disable a 400us delay to start Polling LFPS after
+ RX.Detect.
+ type: boolean
+
+ snps,tx_de_emphasis_quirk:
+ description: When set core will set Tx de-emphasis value
+ type: boolean
+
+ snps,tx_de_emphasis:
+ description:
+ The value driven to the PHY is controlled by the LTSSM during USB3
+ Compliance mode.
+ $ref: /schemas/types.yaml#/definitions/uint8
+ enum:
+ - 0 # -6dB de-emphasis
+ - 1 # -3.5dB de-emphasis
+ - 2 # No de-emphasis
+
+ snps,dis_u3_susphy_quirk:
+ description: When set core will disable USB3 suspend phy
+ type: boolean
+
+ snps,dis_u2_susphy_quirk:
+ description: When set core will disable USB2 suspend phy
+ type: boolean
+
+ snps,dis_enblslpm_quirk:
+ description:
+ When set clears the enblslpm in GUSB2PHYCFG, disabling the suspend signal
+ to the PHY.
+ type: boolean
+
+ snps,dis-u1-entry-quirk:
+ description: Set if link entering into U1 needs to be disabled
+ type: boolean
+
+ snps,dis-u2-entry-quirk:
+ description: Set if link entering into U2 needs to be disabled
+ type: boolean
+
+ snps,dis_rxdet_inp3_quirk:
+ description:
+ When set core will disable receiver detection in PHY P3 power state.
+ type: boolean
+
+ snps,dis-u2-freeclk-exists-quirk:
+ description:
+ When set, clear the u2_freeclk_exists in GUSB2PHYCFG, specify that USB2
+ PHY doesn't provide a free-running PHY clock.
+ type: boolean
+
+ snps,dis-del-phy-power-chg-quirk:
+ description:
+ When set core will change PHY power from P0 to P1/P2/P3 without delay.
+ type: boolean
+
+ snps,dis-tx-ipgap-linecheck-quirk:
+ description: When set, disable u2mac linestate check during HS transmit
+ type: boolean
+
+ snps,parkmode-disable-ss-quirk:
+ description:
+ When set, all SuperSpeed bus instances in park mode are disabled.
+ type: boolean
+
+ snps,dis_metastability_quirk:
+ description:
+ When set, disable metastability workaround. CAUTION! Use only if you are
+ absolutely sure of it.
+ type: boolean
+
+ snps,dis-split-quirk:
+ description:
+ When set, change the way URBs are handled by the driver. Needed to
+ avoid -EPROTO errors with usbhid on some devices (Hikey 970).
+ type: boolean
+
+ snps,is-utmi-l1-suspend:
+ description:
+ True when DWC3 asserts output signal utmi_l1_suspend_n, false when
+ asserts utmi_sleep_n.
+ type: boolean
+
+ snps,hird-threshold:
+ description: HIRD threshold
+ $ref: /schemas/types.yaml#/definitions/uint8
+
+ snps,hsphy_interface:
+ description:
+ High-Speed PHY interface selection between UTMI+ and ULPI when the
+ DWC_USB3_HSPHY_INTERFACE has value 3.
+ $ref: /schemas/types.yaml#/definitions/uint8
+ enum: [utmi, ulpi]
+
+ snps,quirk-frame-length-adjustment:
+ description:
+ Value for GFLADJ_30MHZ field of GFLADJ register for post-silicon frame
+ length adjustment when the fladj_30mhz_sdbnd signal is invalid or
+ incorrect.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 0x3f
+
+ snps,rx-thr-num-pkt-prd:
+ description:
+ Periodic ESS RX packet threshold count (host mode only). Set this and
+ snps,rx-max-burst-prd to a valid, non-zero value 1-16 (DWC_usb31
+ programming guide section 1.2.4) to enable periodic ESS RX threshold.
+ $ref: /schemas/types.yaml#/definitions/uint8
+ minimum: 1
+ maximum: 16
+
+ snps,rx-max-burst-prd:
+ description:
+ Max periodic ESS RX burst size (host mode only). Set this and
+ snps,rx-thr-num-pkt-prd to a valid, non-zero value 1-16 (DWC_usb31
+ programming guide section 1.2.4) to enable periodic ESS RX threshold.
+ $ref: /schemas/types.yaml#/definitions/uint8
+ minimum: 1
+ maximum: 16
+
+ snps,tx-thr-num-pkt-prd:
+ description:
+ Periodic ESS TX packet threshold count (host mode only). Set this and
+ snps,tx-max-burst-prd to a valid, non-zero value 1-16 (DWC_usb31
+ programming guide section 1.2.3) to enable periodic ESS TX threshold.
+ $ref: /schemas/types.yaml#/definitions/uint8
+ minimum: 1
+ maximum: 16
+
+ snps,tx-max-burst-prd:
+ description:
+ Max periodic ESS TX burst size (host mode only). Set this and
+ snps,tx-thr-num-pkt-prd to a valid, non-zero value 1-16 (DWC_usb31
+ programming guide section 1.2.3) to enable periodic ESS TX threshold.
+ $ref: /schemas/types.yaml#/definitions/uint8
+ minimum: 1
+ maximum: 16
+
+ tx-fifo-resize:
+ description: Determines if the FIFO *has* to be reallocated
+ deprecated: true
+ type: boolean
+
+ snps,incr-burst-type-adjustment:
+ description:
+ Value for INCR burst type of GSBUSCFG0 register, undefined length INCR
+ burst type enable and INCRx type. A single value means INCRX burst mode
+ enabled. If more than one value specified, undefined length INCR burst
+ type will be enabled with burst lengths utilized up to the maximum
+ of the values passed in this property.
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 1
+ maxItems: 8
+ uniqueItems: true
+ items:
+ enum: [1, 4, 8, 16, 32, 64, 128, 256]
+
+unevaluatedProperties: false
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+examples:
+ - |
+ usb@4a030000 {
+ compatible = "snps,dwc3";
+ reg = <0x4a030000 0xcfff>;
+ interrupts = <0 92 4>;
+ usb-phy = <&usb2_phy>, <&usb3_phy>;
+ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
+ };
+ - |
+ usb@4a000000 {
+ compatible = "snps,dwc3";
+ reg = <0x4a000000 0xcfff>;
+ interrupts = <0 92 4>;
+ clocks = <&clk 1>, <&clk 2>, <&clk 3>;
+ clock-names = "bus_early", "ref", "suspend";
+ phys = <&usb2_phy>, <&usb3_phy>;
+ phy-names = "usb2-phy", "usb3-phy";
+ snps,dis_u2_susphy_quirk;
+ snps,dis_enblslpm_quirk;
+ };
+...
diff --git a/Documentation/devicetree/bindings/usb/ti,hd3ss3220.yaml b/Documentation/devicetree/bindings/usb/ti,hd3ss3220.yaml
index 52ceb07294a3..b86bf6bc9cd6 100644
--- a/Documentation/devicetree/bindings/usb/ti,hd3ss3220.yaml
+++ b/Documentation/devicetree/bindings/usb/ti,hd3ss3220.yaml
@@ -26,17 +26,17 @@ properties:
maxItems: 1
ports:
+ $ref: /schemas/graph.yaml#/properties/ports
description: OF graph bindings (specified in bindings/graph.txt) that model
SS data bus to the SS capable connector.
- type: object
+
properties:
port@0:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: Super Speed (SS) MUX inputs connected to SS capable connector.
- $ref: /connector/usb-connector.yaml#/properties/ports/properties/port@1
port@1:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
description: Output of 2:1 MUX connected to Super Speed (SS) data bus.
required:
diff --git a/Documentation/devicetree/bindings/usb/ti,j721e-usb.yaml b/Documentation/devicetree/bindings/usb/ti,j721e-usb.yaml
index 148b3fb4ceaf..7ec87a783c5c 100644
--- a/Documentation/devicetree/bindings/usb/ti,j721e-usb.yaml
+++ b/Documentation/devicetree/bindings/usb/ti,j721e-usb.yaml
@@ -19,13 +19,16 @@ properties:
- const: ti,am64-usb
reg:
- description: module registers
+ maxItems: 1
+
+ ranges: true
power-domains:
description:
PM domain provider node and an args specifier containing
the USB device id value. See,
Documentation/devicetree/bindings/soc/ti/sci-pm-domain.txt
+ maxItems: 1
clocks:
description: Clock phandles to usb2_refclk and lpm_clk
@@ -62,6 +65,8 @@ properties:
'#size-cells':
const: 2
+ dma-coherent: true
+
patternProperties:
"^usb@":
type: object
diff --git a/Documentation/devicetree/bindings/usb/ti,keystone-dwc3.yaml b/Documentation/devicetree/bindings/usb/ti,keystone-dwc3.yaml
index c1b19fc5d0a2..9a068d3bc73b 100644
--- a/Documentation/devicetree/bindings/usb/ti,keystone-dwc3.yaml
+++ b/Documentation/devicetree/bindings/usb/ti,keystone-dwc3.yaml
@@ -43,12 +43,14 @@ properties:
maxItems: 2
power-domains:
+ maxItems: 1
description: Should contain a phandle to a PM domain provider node
and an args specifier containing the USB device id
value. This property is as per the binding,
Documentation/devicetree/bindings/soc/ti/sci-pm-domain.txt
phys:
+ maxItems: 1
description:
PHY specifier for the USB3.0 PHY. Some SoCs need the USB3.0 PHY
to be turned on before the controller.
@@ -64,9 +66,7 @@ properties:
patternProperties:
"usb@[a-f0-9]+$":
- type: object
- description: This is the node representing the DWC3 controller instance
- Documentation/devicetree/bindings/usb/dwc3.txt
+ $ref: snps,dwc3.yaml#
required:
- compatible
diff --git a/Documentation/devicetree/bindings/usb/usb-device.txt b/Documentation/devicetree/bindings/usb/usb-device.txt
deleted file mode 100644
index 036be172b1ae..000000000000
--- a/Documentation/devicetree/bindings/usb/usb-device.txt
+++ /dev/null
@@ -1,102 +0,0 @@
-Generic USB Device Properties
-
-Usually, we only use device tree for hard wired USB device.
-The reference binding doc is from:
-http://www.devicetree.org/open-firmware/bindings/usb/usb-1_0.ps
-
-Four types of device-tree nodes are defined: "host-controller nodes"
-representing USB host controllers, "device nodes" representing USB devices,
-"interface nodes" representing USB interfaces and "combined nodes"
-representing simple USB devices.
-
-A combined node shall be used instead of a device node and an interface node
-for devices of class 0 or 9 (hub) with a single configuration and a single
-interface.
-
-A "hub node" is a combined node or an interface node that represents a USB
-hub.
-
-
-Required properties for device nodes:
-- compatible: "usbVID,PID", where VID is the vendor id and PID the product id.
- The textual representation of VID and PID shall be in lower case hexadecimal
- with leading zeroes suppressed. The other compatible strings from the above
- standard binding could also be used, but a device adhering to this binding
- may leave out all except for "usbVID,PID".
-- reg: the number of the USB hub port or the USB host-controller port to which
- this device is attached. The range is 1-255.
-
-
-Required properties for device nodes with interface nodes:
-- #address-cells: shall be 2
-- #size-cells: shall be 0
-
-
-Required properties for interface nodes:
-- compatible: "usbifVID,PID.configCN.IN", where VID is the vendor id, PID is
- the product id, CN is the configuration value and IN is the interface
- number. The textual representation of VID, PID, CN and IN shall be in lower
- case hexadecimal with leading zeroes suppressed. The other compatible
- strings from the above standard binding could also be used, but a device
- adhering to this binding may leave out all except for
- "usbifVID,PID.configCN.IN".
-- reg: the interface number and configuration value
-
-The configuration component is not included in the textual representation of
-an interface-node unit address for configuration 1.
-
-
-Required properties for combined nodes:
-- compatible: "usbVID,PID", where VID is the vendor id and PID the product id.
- The textual representation of VID and PID shall be in lower case hexadecimal
- with leading zeroes suppressed. The other compatible strings from the above
- standard binding could also be used, but a device adhering to this binding
- may leave out all except for "usbVID,PID".
-- reg: the number of the USB hub port or the USB host-controller port to which
- this device is attached. The range is 1-255.
-
-
-Required properties for hub nodes with device nodes:
-- #address-cells: shall be 1
-- #size-cells: shall be 0
-
-
-Required properties for host-controller nodes with device nodes:
-- #address-cells: shall be 1
-- #size-cells: shall be 0
-
-
-Example:
-
-&usb1 { /* host controller */
- #address-cells = <1>;
- #size-cells = <0>;
-
- hub@1 { /* hub connected to port 1 */
- compatible = "usb5e3,608";
- reg = <1>;
- };
-
- device@2 { /* device connected to port 2 */
- compatible = "usb123,4567";
- reg = <2>;
- };
-
- device@3 { /* device connected to port 3 */
- compatible = "usb123,abcd";
- reg = <3>;
-
- #address-cells = <2>;
- #size-cells = <0>;
-
- interface@0 { /* interface 0 of configuration 1 */
- compatible = "usbif123,abcd.config1.0";
- reg = <0 1>;
- };
-
- interface@0,2 { /* interface 0 of configuration 2 */
- compatible = "usbif123,abcd.config2.0";
- reg = <0 2>;
- };
- };
-};
diff --git a/Documentation/devicetree/bindings/usb/usb-device.yaml b/Documentation/devicetree/bindings/usb/usb-device.yaml
new file mode 100644
index 000000000000..d4c99809ee9a
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/usb-device.yaml
@@ -0,0 +1,124 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/usb/usb-device.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: The device tree bindings for the Generic USB Device
+
+maintainers:
+ - Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+description: |
+ Usually, we only use device tree for hard wired USB device.
+ The reference binding doc is from:
+ http://www.devicetree.org/open-firmware/bindings/usb/usb-1_0.ps
+
+ Four types of device-tree nodes are defined: "host-controller nodes"
+ representing USB host controllers, "device nodes" representing USB devices,
+ "interface nodes" representing USB interfaces and "combined nodes"
+ representing simple USB devices.
+
+ A combined node shall be used instead of a device node and an interface node
+ for devices of class 0 or 9 (hub) with a single configuration and a single
+ interface.
+
+ A "hub node" is a combined node or an interface node that represents a USB
+ hub.
+
+properties:
+ compatible:
+ pattern: "^usb[0-9a-f]{1,4},[0-9a-f]{1,4}$"
+ description: Device nodes or combined nodes.
+ "usbVID,PID", where VID is the vendor id and PID the product id.
+ The textual representation of VID and PID shall be in lower case
+ hexadecimal with leading zeroes suppressed. The other compatible
+ strings from the above standard binding could also be used,
+ but a device adhering to this binding may leave out all except
+ for "usbVID,PID".
+
+ reg:
+ description: the number of the USB hub port or the USB host-controller
+ port to which this device is attached. The range is 1-255.
+ maxItems: 1
+
+ "#address-cells":
+ description: should be 1 for hub nodes with device nodes,
+ should be 2 for device nodes with interface nodes.
+ enum: [1, 2]
+
+ "#size-cells":
+ const: 0
+
+patternProperties:
+ "^interface@[0-9a-f]{1,2}(,[0-9a-f]{1,2})$":
+ type: object
+ description: USB interface nodes.
+ The configuration component is not included in the textual
+ representation of an interface-node unit address for configuration 1.
+
+ properties:
+ compatible:
+ pattern: "^usbif[0-9a-f]{1,4},[0-9a-f]{1,4}.config[0-9a-f]{1,2}.[0-9a-f]{1,2}$"
+ description: Interface nodes.
+ "usbifVID,PID.configCN.IN", where VID is the vendor id, PID is
+ the product id, CN is the configuration value and IN is the interface
+ number. The textual representation of VID, PID, CN and IN shall be
+ in lower case hexadecimal with leading zeroes suppressed.
+ The other compatible strings from the above standard binding could
+ also be used, but a device adhering to this binding may leave out
+ all except for "usbifVID,PID.configCN.IN".
+
+ reg:
+ description: should be 2 cells long, the first cell represents
+ the interface number and the second cell represents the
+ configuration value.
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: true
+
+examples:
+ #hub connected to port 1
+ #device connected to port 2
+ #device connected to port 3
+ # interface 0 of configuration 1
+ # interface 0 of configuration 2
+ - |
+ usb@11270000 {
+ reg = <0x11270000 0x1000>;
+ interrupts = <0x0 0x4e 0x0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ hub@1 {
+ compatible = "usb5e3,608";
+ reg = <1>;
+ };
+
+ device@2 {
+ compatible = "usb123,4567";
+ reg = <2>;
+ };
+
+ device@3 {
+ compatible = "usb123,abcd";
+ reg = <3>;
+
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ interface@0 {
+ compatible = "usbif123,abcd.config1.0";
+ reg = <0 1>;
+ };
+
+ interface@0,2 {
+ compatible = "usbif123,abcd.config2.0";
+ reg = <0 2>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/usb/usb-drd.yaml b/Documentation/devicetree/bindings/usb/usb-drd.yaml
new file mode 100644
index 000000000000..f229fc8068d9
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/usb-drd.yaml
@@ -0,0 +1,78 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/usb/usb-drd.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Generic USB OTG Controller Device Tree Bindings
+
+maintainers:
+ - Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+properties:
+ otg-rev:
+ description:
+ Tells usb driver the release number of the OTG and EH supplement with
+ which the device and its descriptors are compliant, in binary-coded
+ decimal (i.e. 2.0 is 0200H). This property is used if any real OTG
+ features (HNP/SRP/ADP) is enabled. If ADP is required, otg-rev should be
+ 0x0200 or above.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0x0100, 0x0120, 0x0130, 0x0200]
+
+ dr_mode:
+ description:
+ Tells Dual-Role USB controllers that we want to work on a particular
+ mode. In case this attribute isn't passed via DT, USB DRD controllers
+ should default to OTG.
+ $ref: /schemas/types.yaml#/definitions/string
+ enum: [host, peripheral, otg]
+
+ hnp-disable:
+ description:
+ Tells OTG controllers we want to disable OTG HNP. Normally HNP is the
+ basic function of real OTG except you want it to be a srp-capable only B
+ device.
+ type: boolean
+
+ srp-disable:
+ description:
+ Tells OTG controllers we want to disable OTG SRP. SRP is optional for OTG
+ device.
+ type: boolean
+
+ adp-disable:
+ description:
+ Tells OTG controllers we want to disable OTG ADP. ADP is optional for OTG
+ device.
+ type: boolean
+
+ usb-role-switch:
+ description:
+ Indicates that the device is capable of assigning the USB data role
+ (USB host or USB device) for a given USB connector, such as Type-C,
+ Type-B(micro). See connector/usb-connector.yaml.
+
+ role-switch-default-mode:
+ description:
+ Indicates if usb-role-switch is enabled, the device default operation
+ mode of controller while usb role is USB_ROLE_NONE.
+ $ref: /schemas/types.yaml#/definitions/string
+ enum: [host, peripheral]
+ default: peripheral
+
+additionalProperties: true
+
+examples:
+ - |
+ usb@4a030000 {
+ compatible = "snps,dwc3";
+ reg = <0x4a030000 0xcfff>;
+ interrupts = <0 92 4>;
+ usb-phy = <&usb2_phy>, <&usb3_phy>;
+ maximum-speed = "super-speed";
+ dr_mode = "otg";
+ phy_type = "utmi_wide";
+ otg-rev = <0x0200>;
+ adp-disable;
+ };
diff --git a/Documentation/devicetree/bindings/usb/usb-hcd.yaml b/Documentation/devicetree/bindings/usb/usb-hcd.yaml
index b545b087b342..56853c17af66 100644
--- a/Documentation/devicetree/bindings/usb/usb-hcd.yaml
+++ b/Documentation/devicetree/bindings/usb/usb-hcd.yaml
@@ -9,18 +9,31 @@ title: Generic USB Host Controller Device Tree Bindings
maintainers:
- Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+allOf:
+ - $ref: usb.yaml#
+
properties:
- $nodename:
- pattern: "^usb(@.*)?"
+ companion:
+ description: Phandle of a companion device
+ $ref: /schemas/types.yaml#/definitions/phandle
- phys:
- $ref: /schemas/types.yaml#/definitions/phandle-array
+ tpl-support:
description:
- List of all the USB PHYs on this HCD
+ Indicates if the Targeted Peripheral List is supported for given
+ targeted hosts (non-PC hosts).
+ type: boolean
- phy-names:
- description:
- Name specifier for the USB PHY
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+patternProperties:
+ "^.*@[0-9a-f]{1,2}$":
+ description: The hard wired USB devices
+ type: object
+ $ref: /usb/usb-device.yaml
additionalProperties: true
@@ -29,4 +42,11 @@ examples:
usb {
phys = <&usb2_phy1>, <&usb3_phy1>;
phy-names = "usb";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ hub@1 {
+ compatible = "usb5e3,610";
+ reg = <1>;
+ };
};
diff --git a/Documentation/devicetree/bindings/usb/usb-xhci.txt b/Documentation/devicetree/bindings/usb/usb-xhci.txt
deleted file mode 100644
index 0c5cff84a969..000000000000
--- a/Documentation/devicetree/bindings/usb/usb-xhci.txt
+++ /dev/null
@@ -1,41 +0,0 @@
-USB xHCI controllers
-
-Required properties:
- - compatible: should be one or more of
-
- - "generic-xhci" for generic XHCI device
- - "marvell,armada3700-xhci" for Armada 37xx SoCs
- - "marvell,armada-375-xhci" for Armada 375 SoCs
- - "marvell,armada-380-xhci" for Armada 38x SoCs
- - "brcm,bcm7445-xhci" for Broadcom STB SoCs with XHCI
- - "xhci-platform" (deprecated)
-
- When compatible with the generic version, nodes must list the
- SoC-specific version corresponding to the platform first
- followed by the generic version.
-
- - reg: should contain address and length of the standard XHCI
- register set for the device.
- - interrupts: one XHCI interrupt should be described here.
-
-Optional properties:
- - clocks: reference to the clocks
- - clock-names: mandatory if there is a second clock, in this case
- the name must be "core" for the first clock and "reg" for the
- second one
- - usb2-lpm-disable: indicate if we don't want to enable USB2 HW LPM
- - usb3-lpm-capable: determines if platform is USB3 LPM capable
- - quirk-broken-port-ped: set if the controller has broken port disable mechanism
- - imod-interval-ns: default interrupt moderation interval is 5000ns
- - phys : see usb-hcd.yaml in the current directory
-
-additionally the properties from usb-hcd.yaml (in the current directory) are
-supported.
-
-
-Example:
- usb@f0931000 {
- compatible = "generic-xhci";
- reg = <0xf0931000 0x8c8>;
- interrupts = <0x0 0x4e 0x0>;
- };
diff --git a/Documentation/devicetree/bindings/usb/usb-xhci.yaml b/Documentation/devicetree/bindings/usb/usb-xhci.yaml
new file mode 100644
index 000000000000..965f87fef702
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/usb-xhci.yaml
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/usb/usb-xhci.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Generic USB xHCI Controller Device Tree Bindings
+
+maintainers:
+ - Mathias Nyman <mathias.nyman@intel.com>
+
+allOf:
+ - $ref: "usb-hcd.yaml#"
+
+properties:
+ usb2-lpm-disable:
+ description: Indicates if we don't want to enable USB2 HW LPM
+ type: boolean
+
+ usb3-lpm-capable:
+ description: Determines if platform is USB3 LPM capable
+ type: boolean
+
+ quirk-broken-port-ped:
+ description: Set if the controller has broken port disable mechanism
+ type: boolean
+
+ imod-interval-ns:
+ description: Interrupt moderation interval
+ default: 5000
+
+additionalProperties: true
+
+examples:
+ - |
+ usb@f0930000 {
+ compatible = "generic-xhci";
+ reg = <0xf0930000 0x8c8>;
+ interrupts = <0x0 0x4e 0x0>;
+ usb2-lpm-disable;
+ usb3-lpm-capable;
+ };
diff --git a/Documentation/devicetree/bindings/usb/usb.yaml b/Documentation/devicetree/bindings/usb/usb.yaml
new file mode 100644
index 000000000000..78491e66ed24
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/usb.yaml
@@ -0,0 +1,63 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/usb/usb.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Generic USB Controller Device Tree Bindings
+
+maintainers:
+ - Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+select: false
+
+properties:
+ $nodename:
+ pattern: "^usb(@.*)?"
+
+ phys:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ description:
+ List of all the USB PHYs on this HCD
+
+ phy-names:
+ description:
+ Name specifier for the USB PHY
+
+ usb-phy:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ description:
+ List of all the USB PHYs on this HCD to be accepted by the legacy USB
+ Physical Layer subsystem.
+ deprecated: true
+
+ phy_type:
+ description:
+ Tells USB controllers that we want to configure the core to support a
+ UTMI+ PHY with an 8- or 16-bit interface if UTMI+ is selected, UTMI+ low
+ pin interface if ULPI is specified, Serial core/PHY interconnect if
+ serial is specified and High-Speed Inter-Chip feature if HSIC is
+ selected. In case this isn't passed via DT, USB controllers should
+ default to HW capability.
+ $ref: /schemas/types.yaml#/definitions/string
+ enum: [utmi, utmi_wide, ulpi, serial, hsic]
+
+ maximum-speed:
+ description:
+ Tells USB controllers we want to work up to a certain speed. In case this
+ isn't passed via DT, USB controllers should default to their maximum HW
+ capability.
+ $ref: /schemas/types.yaml#/definitions/string
+ enum:
+ - low-speed
+ - full-speed
+ - high-speed
+ - super-speed
+ - super-speed-plus
+ - super-speed-plus-gen2x1
+ - super-speed-plus-gen1x2
+ - super-speed-plus-gen2x2
+
+additionalProperties: true
+
+...
diff --git a/Documentation/devicetree/bindings/usb/usbmisc-imx.txt b/Documentation/devicetree/bindings/usb/usbmisc-imx.txt
index b353b9816487..b796836d2ce7 100644
--- a/Documentation/devicetree/bindings/usb/usbmisc-imx.txt
+++ b/Documentation/devicetree/bindings/usb/usbmisc-imx.txt
@@ -1,7 +1,7 @@
* Freescale i.MX non-core registers
Required properties:
-- #index-cells: Cells used to descibe usb controller index. Should be <1>
+- #index-cells: Cells used to describe usb controller index. Should be <1>
- compatible: Should be one of below:
"fsl,imx6q-usbmisc" for imx6q
"fsl,vf610-usbmisc" for Vybrid vf610
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml
index 041ae90b0d8f..f6064d84a424 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.yaml
+++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml
@@ -59,6 +59,8 @@ patternProperties:
description: Aeroflex Gaisler AB
"^al,.*":
description: Annapurna Labs
+ "^alcatel,.*":
+ description: Alcatel
"^allegro,.*":
description: Allegro DVT
"^allo,.*":
@@ -229,6 +231,8 @@ patternProperties:
description: Computadora Industrial Abierta Argentina
"^cirrus,.*":
description: Cirrus Logic, Inc.
+ "^cisco,.*":
+ description: Cisco Systems, Inc.
"^cloudengines,.*":
description: Cloud Engines, Inc.
"^cnm,.*":
@@ -311,6 +315,8 @@ patternProperties:
description: Dyna-Image
"^ea,.*":
description: Embedded Artists AB
+ "^ebang,.*":
+ description: Zhejiang Ebang Communication Co., Ltd
"^ebs-systart,.*":
description: EBS-SYSTART GmbH
"^ebv,.*":
@@ -467,10 +473,10 @@ patternProperties:
description: Hitex Development Tools
"^holt,.*":
description: Holt Integrated Circuits, Inc.
- "^honeywell,.*":
- description: Honeywell
"^honestar,.*":
description: Honestar Technologies Co., Ltd.
+ "^honeywell,.*":
+ description: Honeywell
"^hoperun,.*":
description: Jiangsu HopeRun Software Co., Ltd.
"^hp,.*":
@@ -581,6 +587,8 @@ patternProperties:
description: Kontron S&T AG
"^kosagi,.*":
description: Sutajio Ko-Usagi PTE Ltd.
+ "^kvg,.*":
+ description: Kverneland Group
"^kyo,.*":
description: Kyocera Corporation
"^lacie,.*":
@@ -866,6 +874,8 @@ patternProperties:
description: PLDA
"^plx,.*":
description: Broadcom Corporation (formerly PLX Technology)
+ "^ply,.*":
+ description: Plymovent Group BV
"^pni,.*":
description: PNI Sensor Corporation
"^pocketbook,.*":
@@ -1075,6 +1085,8 @@ patternProperties:
description: Shenzhen Sunchip Technology Co., Ltd
"^SUNW,.*":
description: Sun Microsystems, Inc
+ "^silvaco,.*":
+ description: Silvaco, Inc.
"^swir,.*":
description: Sierra Wireless
"^syna,.*":
@@ -1252,6 +1264,8 @@ patternProperties:
description: Shenzhen Xunlong Software CO.,Limited
"^xylon,.*":
description: Xylon
+ "^yamaha,.*":
+ description: Yamaha Corporation
"^yes-optoelectronics,.*":
description: Yes Optoelectronics Co.,Ltd.
"^ylm,.*":
@@ -1260,6 +1274,8 @@ patternProperties:
description: YSH & ATIL
"^yones-toptech,.*":
description: Yones Toptech Co., Ltd.
+ "^ys,.*":
+ description: Shenzhen Yashi Changhua Intelligent Technology Co., Ltd.
"^ysoft,.*":
description: Y Soft Corporation a.s.
"^zealz,.*":
diff --git a/Documentation/devicetree/bindings/watchdog/allwinner,sun4i-a10-wdt.yaml b/Documentation/devicetree/bindings/watchdog/allwinner,sun4i-a10-wdt.yaml
index 5ac607de8be4..9aa3c313c49f 100644
--- a/Documentation/devicetree/bindings/watchdog/allwinner,sun4i-a10-wdt.yaml
+++ b/Documentation/devicetree/bindings/watchdog/allwinner,sun4i-a10-wdt.yaml
@@ -19,13 +19,11 @@ properties:
- const: allwinner,sun4i-a10-wdt
- const: allwinner,sun6i-a31-wdt
- items:
- - const: allwinner,sun50i-a64-wdt
- - const: allwinner,sun6i-a31-wdt
- - items:
- - const: allwinner,sun50i-a100-wdt
- - const: allwinner,sun6i-a31-wdt
- - items:
- - const: allwinner,sun50i-h6-wdt
+ - enum:
+ - allwinner,sun50i-a64-wdt
+ - allwinner,sun50i-a100-wdt
+ - allwinner,sun50i-h6-wdt
+ - allwinner,sun50i-h616-wdt
- const: allwinner,sun6i-a31-wdt
- items:
- const: allwinner,suniv-f1c100s-wdt
diff --git a/Documentation/devicetree/bindings/watchdog/intel,keembay-wdt.yaml b/Documentation/devicetree/bindings/watchdog/intel,keembay-wdt.yaml
new file mode 100644
index 000000000000..1437ff8a122f
--- /dev/null
+++ b/Documentation/devicetree/bindings/watchdog/intel,keembay-wdt.yaml
@@ -0,0 +1,57 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/watchdog/intel,keembay-wdt.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Intel Keem Bay SoC non-secure Watchdog Timer
+
+maintainers:
+ - Wan Ahmad Zainie <wan.ahmad.zainie.wan.mohamad@intel.com>
+
+properties:
+ compatible:
+ enum:
+ - intel,keembay-wdt
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ interrupts:
+ items:
+ - description: interrupt specifier for threshold interrupt line
+ - description: interrupt specifier for timeout interrupt line
+
+ interrupt-names:
+ items:
+ - const: threshold
+ - const: timeout
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - interrupt-names
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #define KEEM_BAY_A53_TIM
+
+ watchdog: watchdog@2033009c {
+ compatible = "intel,keembay-wdt";
+ reg = <0x2033009c 0x10>;
+ interrupts = <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "threshold", "timeout";
+ clocks = <&scmi_clk KEEM_BAY_A53_TIM>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/watchdog/mtk-wdt.txt b/Documentation/devicetree/bindings/watchdog/mtk-wdt.txt
index 4dd36bd3f1ad..e36ba60de829 100644
--- a/Documentation/devicetree/bindings/watchdog/mtk-wdt.txt
+++ b/Documentation/devicetree/bindings/watchdog/mtk-wdt.txt
@@ -4,14 +4,15 @@ Required properties:
- compatible should contain:
"mediatek,mt2701-wdt", "mediatek,mt6589-wdt": for MT2701
- "mediatek,mt2712-wdt", "mediatek,mt6589-wdt": for MT2712
+ "mediatek,mt2712-wdt": for MT2712
"mediatek,mt6589-wdt": for MT6589
"mediatek,mt6797-wdt", "mediatek,mt6589-wdt": for MT6797
"mediatek,mt7622-wdt", "mediatek,mt6589-wdt": for MT7622
"mediatek,mt7623-wdt", "mediatek,mt6589-wdt": for MT7623
"mediatek,mt7629-wdt", "mediatek,mt6589-wdt": for MT7629
- "mediatek,mt8183-wdt", "mediatek,mt6589-wdt": for MT8183
+ "mediatek,mt8183-wdt": for MT8183
"mediatek,mt8516-wdt", "mediatek,mt6589-wdt": for MT8516
+ "mediatek,mt8192-wdt": for MT8192
- reg : Specifies base physical address and size of the registers.
diff --git a/Documentation/devicetree/bindings/watchdog/qcom-wdt.yaml b/Documentation/devicetree/bindings/watchdog/qcom-wdt.yaml
index 8e3760a3822b..b8e4118945a0 100644
--- a/Documentation/devicetree/bindings/watchdog/qcom-wdt.yaml
+++ b/Documentation/devicetree/bindings/watchdog/qcom-wdt.yaml
@@ -18,6 +18,7 @@ properties:
- qcom,apss-wdt-qcs404
- qcom,apss-wdt-sc7180
- qcom,apss-wdt-sdm845
+ - qcom,apss-wdt-sdx55
- qcom,apss-wdt-sm8150
- qcom,kpss-timer
- qcom,kpss-wdt
diff --git a/Documentation/devicetree/bindings/watchdog/renesas,wdt.yaml b/Documentation/devicetree/bindings/watchdog/renesas,wdt.yaml
index 6933005b52bd..ab66d3f0c476 100644
--- a/Documentation/devicetree/bindings/watchdog/renesas,wdt.yaml
+++ b/Documentation/devicetree/bindings/watchdog/renesas,wdt.yaml
@@ -50,6 +50,7 @@ properties:
- renesas,r8a77980-wdt # R-Car V3H
- renesas,r8a77990-wdt # R-Car E3
- renesas,r8a77995-wdt # R-Car D3
+ - renesas,r8a779a0-wdt # R-Car V3U
- const: renesas,rcar-gen3-wdt # R-Car Gen3 and RZ/G2
reg:
diff --git a/Documentation/devicetree/bindings/watchdog/sigma,smp8642-wdt.txt b/Documentation/devicetree/bindings/watchdog/sigma,smp8642-wdt.txt
deleted file mode 100644
index 5b7ec2c707d8..000000000000
--- a/Documentation/devicetree/bindings/watchdog/sigma,smp8642-wdt.txt
+++ /dev/null
@@ -1,18 +0,0 @@
-Sigma Designs SMP86xx/SMP87xx watchdog
-
-Required properties:
-- compatible: Should be "sigma,smp8642-wdt"
-- reg: Specifies the physical address region
-- clocks: Should be a phandle to the clock
-
-Optional properties:
-- timeout-sec: watchdog timeout in seconds
-
-Example:
-
-watchdog@1fd00 {
- compatible = "sigma,smp8642-wdt";
- reg = <0x1fd00 8>;
- clocks = <&xtal_in_clk>;
- timeout-sec = <30>;
-};
diff --git a/Documentation/devicetree/bindings/watchdog/sirfsoc_wdt.txt b/Documentation/devicetree/bindings/watchdog/sirfsoc_wdt.txt
deleted file mode 100644
index 0dce5e3100b4..000000000000
--- a/Documentation/devicetree/bindings/watchdog/sirfsoc_wdt.txt
+++ /dev/null
@@ -1,18 +0,0 @@
-SiRFSoC Timer and Watchdog Timer(WDT) Controller
-
-Required properties:
-- compatible: "sirf,prima2-tick"
-- reg: Address range of tick timer/WDT register set
-- interrupts: interrupt number to the cpu
-
-Optional properties:
-- timeout-sec : Contains the watchdog timeout in seconds
-
-Example:
-
-timer@b0020000 {
- compatible = "sirf,prima2-tick";
- reg = <0xb0020000 0x1000>;
- interrupts = <0>;
- timeout-sec = <30>;
-};
diff --git a/Documentation/devicetree/bindings/watchdog/snps,dw-wdt.yaml b/Documentation/devicetree/bindings/watchdog/snps,dw-wdt.yaml
index f7ee9229c29f..b58596b1831d 100644
--- a/Documentation/devicetree/bindings/watchdog/snps,dw-wdt.yaml
+++ b/Documentation/devicetree/bindings/watchdog/snps,dw-wdt.yaml
@@ -18,10 +18,16 @@ properties:
- const: snps,dw-wdt
- items:
- enum:
+ - rockchip,px30-wdt
- rockchip,rk3066-wdt
- rockchip,rk3188-wdt
+ - rockchip,rk3228-wdt
- rockchip,rk3288-wdt
+ - rockchip,rk3308-wdt
+ - rockchip,rk3328-wdt
- rockchip,rk3368-wdt
+ - rockchip,rk3399-wdt
+ - rockchip,rv1108-wdt
- const: snps,dw-wdt
reg:
diff --git a/Documentation/devicetree/bindings/watchdog/stericsson-coh901327.txt b/Documentation/devicetree/bindings/watchdog/stericsson-coh901327.txt
deleted file mode 100644
index 8ffb88e39e76..000000000000
--- a/Documentation/devicetree/bindings/watchdog/stericsson-coh901327.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-ST-Ericsson COH 901 327 Watchdog timer
-
-Required properties:
-- compatible: must be "stericsson,coh901327".
-- reg: physical base address of the controller and length of memory mapped
- region.
-- interrupts: the interrupt used for the watchdog timeout warning.
-
-Optional properties:
-- timeout-sec: contains the watchdog timeout in seconds.
-
-Example:
-
-watchdog: watchdog@c0012000 {
- compatible = "stericsson,coh901327";
- reg = <0xc0012000 0x1000>;
- interrupts = <3>;
- timeout-sec = <60>;
-};
diff --git a/Documentation/devicetree/bindings/watchdog/ti,rti-wdt.yaml b/Documentation/devicetree/bindings/watchdog/ti,rti-wdt.yaml
index c1348db59374..054584d7543a 100644
--- a/Documentation/devicetree/bindings/watchdog/ti,rti-wdt.yaml
+++ b/Documentation/devicetree/bindings/watchdog/ti,rti-wdt.yaml
@@ -57,8 +57,8 @@ examples:
*/
#include <dt-bindings/soc/ti,sci_pm_domain.h>
- watchdog0: rti@2200000 {
- compatible = "ti,rti-wdt";
+ watchdog@2200000 {
+ compatible = "ti,j7-rti-wdt";
reg = <0x2200000 0x100>;
clocks = <&k3_clks 252 1>;
power-domains = <&k3_pds 252 TI_SCI_PD_EXCLUSIVE>;
diff --git a/Documentation/devicetree/bindings/watchdog/watchdog.yaml b/Documentation/devicetree/bindings/watchdog/watchdog.yaml
index 4e2c26cd981d..e3dfb02f0ca5 100644
--- a/Documentation/devicetree/bindings/watchdog/watchdog.yaml
+++ b/Documentation/devicetree/bindings/watchdog/watchdog.yaml
@@ -19,7 +19,6 @@ properties:
pattern: "^watchdog(@.*|-[0-9a-f])?$"
timeout-sec:
- $ref: /schemas/types.yaml#/definitions/uint32
description:
Contains the watchdog timeout in seconds.
diff --git a/Documentation/devicetree/bindings/watchdog/zte,zx2967-wdt.txt b/Documentation/devicetree/bindings/watchdog/zte,zx2967-wdt.txt
deleted file mode 100644
index 06ce67766756..000000000000
--- a/Documentation/devicetree/bindings/watchdog/zte,zx2967-wdt.txt
+++ /dev/null
@@ -1,32 +0,0 @@
-ZTE zx2967 Watchdog timer
-
-Required properties:
-
-- compatible : should be one of the following.
- * zte,zx296718-wdt
-- reg : Specifies base physical address and size of the registers.
-- clocks : Pairs of phandle and specifier referencing the controller's clocks.
-- resets : Reference to the reset controller controlling the watchdog
- controller.
-
-Optional properties:
-
-- timeout-sec : Contains the watchdog timeout in seconds.
-- zte,wdt-reset-sysctrl : Directs how to reset system by the watchdog.
- if we don't want to restart system when watchdog been triggered,
- it's not required, vice versa.
- It should include following fields.
- * phandle of aon-sysctrl.
- * offset of register that be written, should be 0xb0.
- * configure value that be written to aon-sysctrl.
- * bit mask, corresponding bits will be affected.
-
-Example:
-
-wdt: watchdog@1465000 {
- compatible = "zte,zx296718-wdt";
- reg = <0x1465000 0x1000>;
- clocks = <&topcrm WDT_WCLK>;
- resets = <&toprst 35>;
- zte,wdt-reset-sysctrl = <&aon_sysctrl 0xb0 1 0x115>;
-};
diff --git a/Documentation/devicetree/usage-model.rst b/Documentation/devicetree/usage-model.rst
index e1b42dc63f01..1eb83496ca1e 100644
--- a/Documentation/devicetree/usage-model.rst
+++ b/Documentation/devicetree/usage-model.rst
@@ -12,7 +12,7 @@ This article describes how Linux uses the device tree. An overview of
the device tree data format can be found on the device tree usage page
at devicetree.org\ [1]_.
-.. [1] https://elinux.org/Device_Tree_Usage
+.. [1] https://www.devicetree.org/specifications/
The "Open Firmware Device Tree", or simply Device Tree (DT), is a data
structure and language for describing hardware. More specifically, it
diff --git a/Documentation/doc-guide/sphinx.rst b/Documentation/doc-guide/sphinx.rst
index 36ac2166ad67..ec3e71f56009 100644
--- a/Documentation/doc-guide/sphinx.rst
+++ b/Documentation/doc-guide/sphinx.rst
@@ -340,16 +340,26 @@ Rendered as:
Cross-referencing
-----------------
-Cross-referencing from one documentation page to another can be done by passing
-the path to the file starting from the Documentation folder.
-For example, to cross-reference to this page (the .rst extension is optional)::
-
- See Documentation/doc-guide/sphinx.rst.
-
-If you want to use a relative path, you need to use Sphinx's ``doc`` directive.
-For example, referencing this page from the same directory would be done as::
-
- See :doc:`sphinx`.
+Cross-referencing from one documentation page to another can be done simply by
+writing the path to the document file, no special syntax required. The path can
+be either absolute or relative. For absolute paths, start it with
+"Documentation/". For example, to cross-reference to this page, all the
+following are valid options, depending on the current document's directory (note
+that the ``.rst`` extension is required)::
+
+ See Documentation/doc-guide/sphinx.rst. This always works.
+ Take a look at sphinx.rst, which is at this same directory.
+ Read ../sphinx.rst, which is one directory above.
+
+If you want the link to have a different rendered text other than the document's
+title, you need to use Sphinx's ``doc`` role. For example::
+
+ See :doc:`my custom link text for document sphinx <sphinx>`.
+
+For most use cases, the former is preferred, as it is cleaner and more suited
+for people reading the source files. If you come across a ``:doc:`` usage that
+isn't adding any value, please feel free to convert it to just the document
+path.
For information on cross-referencing to kernel-doc functions or types, see
Documentation/doc-guide/kernel-doc.rst.
diff --git a/Documentation/driver-api/auxiliary_bus.rst b/Documentation/driver-api/auxiliary_bus.rst
index 2312506b0674..fff96c7ba7a8 100644
--- a/Documentation/driver-api/auxiliary_bus.rst
+++ b/Documentation/driver-api/auxiliary_bus.rst
@@ -1,5 +1,7 @@
.. SPDX-License-Identifier: GPL-2.0-only
+.. _auxiliary_bus:
+
=============
Auxiliary Bus
=============
diff --git a/Documentation/driver-api/cxl/index.rst b/Documentation/driver-api/cxl/index.rst
new file mode 100644
index 000000000000..036e49553542
--- /dev/null
+++ b/Documentation/driver-api/cxl/index.rst
@@ -0,0 +1,12 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+====================
+Compute Express Link
+====================
+
+.. toctree::
+ :maxdepth: 1
+
+ memory-devices
+
+.. only:: subproject and html
diff --git a/Documentation/driver-api/cxl/memory-devices.rst b/Documentation/driver-api/cxl/memory-devices.rst
new file mode 100644
index 000000000000..1bad466f9167
--- /dev/null
+++ b/Documentation/driver-api/cxl/memory-devices.rst
@@ -0,0 +1,46 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: <isonum.txt>
+
+===================================
+Compute Express Link Memory Devices
+===================================
+
+A Compute Express Link Memory Device is a CXL component that implements the
+CXL.mem protocol. It contains some amount of volatile memory, persistent memory,
+or both. It is enumerated as a PCI device for configuration and passing
+messages over an MMIO mailbox. Its contribution to the System Physical
+Address space is handled via HDM (Host Managed Device Memory) decoders
+that optionally define a device's contribution to an interleaved address
+range across multiple devices underneath a host-bridge or interleaved
+across host-bridges.
+
+Driver Infrastructure
+=====================
+
+This section covers the driver infrastructure for a CXL memory device.
+
+CXL Memory Device
+-----------------
+
+.. kernel-doc:: drivers/cxl/mem.c
+ :doc: cxl mem
+
+.. kernel-doc:: drivers/cxl/mem.c
+ :internal:
+
+CXL Bus
+-------
+.. kernel-doc:: drivers/cxl/bus.c
+ :doc: cxl bus
+
+External Interfaces
+===================
+
+CXL IOCTL Interface
+-------------------
+
+.. kernel-doc:: include/uapi/linux/cxl_mem.h
+ :doc: UAPI
+
+.. kernel-doc:: include/uapi/linux/cxl_mem.h
+ :internal:
diff --git a/Documentation/driver-api/gpio/consumer.rst b/Documentation/driver-api/gpio/consumer.rst
index 173e4c7b037d..22271c342d92 100644
--- a/Documentation/driver-api/gpio/consumer.rst
+++ b/Documentation/driver-api/gpio/consumer.rst
@@ -361,12 +361,13 @@ corresponding chip driver. In that case a significantly improved performance
can be expected. If simultaneous access is not possible the GPIOs will be
accessed sequentially.
-The functions take three arguments:
+The functions take four arguments:
+
* array_size - the number of array elements
* desc_array - an array of GPIO descriptors
* array_info - optional information obtained from gpiod_get_array()
* value_bitmap - a bitmap to store the GPIOs' values (get) or
- a bitmap of values to assign to the GPIOs (set)
+ a bitmap of values to assign to the GPIOs (set)
The descriptor array can be obtained using the gpiod_get_array() function
or one of its variants. If the group of descriptors returned by that function
diff --git a/Documentation/driver-api/gpio/driver.rst b/Documentation/driver-api/gpio/driver.rst
index 0fb57e298b41..d6b0d779859b 100644
--- a/Documentation/driver-api/gpio/driver.rst
+++ b/Documentation/driver-api/gpio/driver.rst
@@ -640,8 +640,8 @@ compliance:
level and edge IRQs
* [1] http://www.spinics.net/lists/linux-omap/msg120425.html
-* [2] https://lkml.org/lkml/2015/9/25/494
-* [3] https://lkml.org/lkml/2015/9/25/495
+* [2] https://lore.kernel.org/r/1443209283-20781-2-git-send-email-grygorii.strashko@ti.com
+* [3] https://lore.kernel.org/r/1443209283-20781-3-git-send-email-grygorii.strashko@ti.com
Requesting self-owned GPIO pins
diff --git a/Documentation/driver-api/gpio/intro.rst b/Documentation/driver-api/gpio/intro.rst
index 74591489d0b5..94dd7185e76e 100644
--- a/Documentation/driver-api/gpio/intro.rst
+++ b/Documentation/driver-api/gpio/intro.rst
@@ -106,11 +106,11 @@ don't. When you need open drain signaling but your hardware doesn't directly
support it, there's a common idiom you can use to emulate it with any GPIO pin
that can be used as either an input or an output:
- LOW: gpiod_direction_output(gpio, 0) ... this drives the signal and overrides
- the pullup.
+ **LOW**: ``gpiod_direction_output(gpio, 0)`` ... this drives the signal and
+ overrides the pullup.
- HIGH: gpiod_direction_input(gpio) ... this turns off the output, so the pullup
- (or some other device) controls the signal.
+ **HIGH**: ``gpiod_direction_input(gpio)`` ... this turns off the output, so
+ the pullup (or some other device) controls the signal.
The same logic can be applied to emulate open source signaling, by driving the
high signal and configuring the GPIO as input for low. This open drain/open
diff --git a/Documentation/driver-api/index.rst b/Documentation/driver-api/index.rst
index 2456d0a97ed8..b0ab367896ab 100644
--- a/Documentation/driver-api/index.rst
+++ b/Documentation/driver-api/index.rst
@@ -35,6 +35,7 @@ available subsections can be seen below.
usb/index
firewire
pci/index
+ cxl/index
spi
i2c
ipmb
@@ -93,12 +94,12 @@ available subsections can be seen below.
pps
ptp
phy/index
- pti_intel_mid
pwm
pldmfw/index
rfkill
serial/index
sm501
+ surface_aggregator/index
switchtec
sync_file
vfio-mediated-device
diff --git a/Documentation/driver-api/media/camera-sensor.rst b/Documentation/driver-api/media/camera-sensor.rst
index ffb0cad8137a..3fc378b3b269 100644
--- a/Documentation/driver-api/media/camera-sensor.rst
+++ b/Documentation/driver-api/media/camera-sensor.rst
@@ -15,7 +15,7 @@ Camera sensors have an internal clock tree including a PLL and a number of
divisors. The clock tree is generally configured by the driver based on a few
input parameters that are specific to the hardware:: the external clock frequency
and the link frequency. The two parameters generally are obtained from system
-firmware. No other frequencies should be used in any circumstances.
+firmware. **No other frequencies should be used in any circumstances.**
The reason why the clock frequencies are so important is that the clock signals
come out of the SoC, and in many cases a specific frequency is designed to be
@@ -23,6 +23,24 @@ used in the system. Using another frequency may cause harmful effects
elsewhere. Therefore only the pre-determined frequencies are configurable by the
user.
+ACPI
+~~~~
+
+Read the "clock-frequency" _DSD property to denote the frequency. The driver can
+rely on this frequency being used.
+
+Devicetree
+~~~~~~~~~~
+
+The currently preferred way to achieve this is using "assigned-clock-rates"
+property. See Documentation/devicetree/bindings/clock/clock-bindings.txt for
+more information. The driver then gets the frequency using clk_get_rate().
+
+This approach has the drawback that there's no guarantee that the frequency
+hasn't been modified directly or indirectly by another driver, or supported by
+the board's clock tree to begin with. Changes to the Common Clock Framework API
+are required to ensure reliability.
+
Frame size
----------
diff --git a/Documentation/driver-api/media/cec-core.rst b/Documentation/driver-api/media/cec-core.rst
index a26dc87eee8f..56345eae9a26 100644
--- a/Documentation/driver-api/media/cec-core.rst
+++ b/Documentation/driver-api/media/cec-core.rst
@@ -26,7 +26,7 @@ It is documented in the HDMI 1.4 specification with the new 2.0 bits documented
in the HDMI 2.0 specification. But for most of the features the freely available
HDMI 1.3a specification is sufficient:
-http://www.microprocessor.org/HDMISpecification13a.pdf
+https://www.hdmi.org/spec/index
CEC Adapter Interface
diff --git a/Documentation/driver-api/media/csi2.rst b/Documentation/driver-api/media/csi2.rst
index e3bbc6baf0f0..11c52b0be8b8 100644
--- a/Documentation/driver-api/media/csi2.rst
+++ b/Documentation/driver-api/media/csi2.rst
@@ -35,7 +35,7 @@ ability to start and stop the stream.
The value of the V4L2_CID_PIXEL_RATE is calculated as follows::
- pixel_rate = link_freq * 2 * nr_of_lanes / bits_per_sample
+ pixel_rate = link_freq * 2 * nr_of_lanes * 16 / k / bits_per_sample
where
@@ -53,6 +53,8 @@ where
- Two bits are transferred per clock cycle per lane.
* - bits_per_sample
- Number of bits per sample.
+ * - k
+ - 16 for D-PHY and 7 for C-PHY
The transmitter drivers must, if possible, configure the CSI-2
transmitter to *LP-11 mode* whenever the transmitter is powered on but
diff --git a/Documentation/driver-api/media/drivers/ccs/ccs.rst b/Documentation/driver-api/media/drivers/ccs/ccs.rst
index f49e971f2d92..b461c8aa2a16 100644
--- a/Documentation/driver-api/media/drivers/ccs/ccs.rst
+++ b/Documentation/driver-api/media/drivers/ccs/ccs.rst
@@ -79,4 +79,17 @@ definitions:
-l drivers/media/i2c/ccs/ccs-limits.c \
-c Documentation/driver-api/media/drivers/ccs/ccs-regs.asc
+CCS PLL calculator
+==================
+
+The CCS PLL calculator is used to compute the PLL configuration, given sensor's
+capabilities as well as board configuration and user specified configuration. As
+the configuration space that encompasses all these configurations is vast, the
+PLL calculator isn't entirely trivial. Yet it is relatively simple to use for a
+driver.
+
+The PLL model implemented by the PLL calculator corresponds to MIPI CCS 1.1.
+
+.. kernel-doc:: drivers/media/i2c/ccs-pll.h
+
**Copyright** |copy| 2020 Intel Corporation
diff --git a/Documentation/driver-api/media/v4l2-clocks.rst b/Documentation/driver-api/media/v4l2-clocks.rst
deleted file mode 100644
index 5c22eecab7ba..000000000000
--- a/Documentation/driver-api/media/v4l2-clocks.rst
+++ /dev/null
@@ -1,31 +0,0 @@
-.. SPDX-License-Identifier: GPL-2.0
-
-V4L2 clocks
------------
-
-.. attention::
-
- This is a temporary API and it shall be replaced by the generic
- clock API, when the latter becomes widely available.
-
-Many subdevices, like camera sensors, TV decoders and encoders, need a clock
-signal to be supplied by the system. Often this clock is supplied by the
-respective bridge device. The Linux kernel provides a Common Clock Framework for
-this purpose. However, it is not (yet) available on all architectures. Besides,
-the nature of the multi-functional (clock, data + synchronisation, I2C control)
-connection of subdevices to the system might impose special requirements on the
-clock API usage. E.g. V4L2 has to support clock provider driver unregistration
-while a subdevice driver is holding a reference to the clock. For these reasons
-a V4L2 clock helper API has been developed and is provided to bridge and
-subdevice drivers.
-
-The API consists of two parts: two functions to register and unregister a V4L2
-clock source: v4l2_clk_register() and v4l2_clk_unregister() and calls to control
-a clock object, similar to the respective generic clock API calls:
-v4l2_clk_get(), v4l2_clk_put(), v4l2_clk_enable(), v4l2_clk_disable(),
-v4l2_clk_get_rate(), and v4l2_clk_set_rate(). Clock suppliers have to provide
-clock operations that will be called when clock users invoke respective API
-methods.
-
-It is expected that once the CCF becomes available on all relevant
-architectures this API will be removed.
diff --git a/Documentation/driver-api/media/v4l2-core.rst b/Documentation/driver-api/media/v4l2-core.rst
index 0dcad7a23141..1a8c4a5f256b 100644
--- a/Documentation/driver-api/media/v4l2-core.rst
+++ b/Documentation/driver-api/media/v4l2-core.rst
@@ -15,7 +15,6 @@ Video4Linux devices
v4l2-controls
v4l2-videobuf
v4l2-videobuf2
- v4l2-clocks
v4l2-dv-timings
v4l2-flash-led-class
v4l2-mc
diff --git a/Documentation/driver-api/media/v4l2-subdev.rst b/Documentation/driver-api/media/v4l2-subdev.rst
index bb5b1a7cdfd9..8b53da2f9c74 100644
--- a/Documentation/driver-api/media/v4l2-subdev.rst
+++ b/Documentation/driver-api/media/v4l2-subdev.rst
@@ -122,15 +122,12 @@ Don't forget to cleanup the media entity before the sub-device is destroyed:
media_entity_cleanup(&sd->entity);
-If the subdev driver intends to process video and integrate with the media
-framework, it must implement format related functionality using
-:c:type:`v4l2_subdev_pad_ops` instead of :c:type:`v4l2_subdev_video_ops`.
-
-In that case, the subdev driver may set the link_validate field to provide
-its own link validation function. The link validation function is called for
-every link in the pipeline where both of the ends of the links are V4L2
-sub-devices. The driver is still responsible for validating the correctness
-of the format configuration between sub-devices and video nodes.
+If a sub-device driver implements sink pads, the subdev driver may set the
+link_validate field in :c:type:`v4l2_subdev_pad_ops` to provide its own link
+validation function. For every link in the pipeline, the link_validate pad
+operation of the sink end of the link is called. In both cases the driver is
+still responsible for validating the correctness of the format configuration
+between sub-devices and video nodes.
If link_validate op is not set, the default function
:c:func:`v4l2_subdev_link_validate_default` is used instead. This function
@@ -200,15 +197,45 @@ unregister the notifier the driver has to call
takes two arguments: a pointer to struct :c:type:`v4l2_device` and a
pointer to struct :c:type:`v4l2_async_notifier`.
-Before registering the notifier, bridge drivers must do two things:
-first, the notifier must be initialized using the
-:c:func:`v4l2_async_notifier_init`. Second, bridge drivers can then
-begin to form a list of subdevice descriptors that the bridge device
-needs for its operation. Subdevice descriptors are added to the notifier
-using the :c:func:`v4l2_async_notifier_add_subdev` call. This function
-takes two arguments: a pointer to struct :c:type:`v4l2_async_notifier`,
-and a pointer to the subdevice descripter, which is of type struct
-:c:type:`v4l2_async_subdev`.
+Before registering the notifier, bridge drivers must do two things: first, the
+notifier must be initialized using the :c:func:`v4l2_async_notifier_init`.
+Second, bridge drivers can then begin to form a list of subdevice descriptors
+that the bridge device needs for its operation. Several functions are available
+to add subdevice descriptors to a notifier, depending on the type of device and
+the needs of the driver.
+
+:c:func:`v4l2_async_notifier_add_fwnode_remote_subdev` and
+:c:func:`v4l2_async_notifier_add_i2c_subdev` are for bridge and ISP drivers for
+registering their async sub-devices with the notifier.
+
+:c:func:`v4l2_async_register_subdev_sensor_common` is a helper function for
+sensor drivers registering their own async sub-device, but it also registers a
+notifier and further registers async sub-devices for lens and flash devices
+found in firmware. The notifier for the sub-device is unregistered with the
+async sub-device.
+
+These functions allocate an async sub-device descriptor which is of type struct
+:c:type:`v4l2_async_subdev` embedded in a driver-specific struct. The &struct
+:c:type:`v4l2_async_subdev` shall be the first member of this struct:
+
+.. code-block:: c
+
+ struct my_async_subdev {
+ struct v4l2_async_subdev asd;
+ ...
+ };
+
+ struct my_async_subdev *my_asd;
+ struct fwnode_handle *ep;
+
+ ...
+
+ my_asd = v4l2_async_notifier_add_fwnode_remote_subdev(&notifier, ep,
+ struct my_async_subdev);
+ fwnode_handle_put(ep);
+
+ if (IS_ERR(asd))
+ return PTR_ERR(asd);
The V4L2 core will then use these descriptors to match asynchronously
registered subdevices to them. If a match is detected the ``.bound()``
diff --git a/Documentation/driver-api/men-chameleon-bus.rst b/Documentation/driver-api/men-chameleon-bus.rst
index 1b1f048aa748..6f0b9ee47595 100644
--- a/Documentation/driver-api/men-chameleon-bus.rst
+++ b/Documentation/driver-api/men-chameleon-bus.rst
@@ -18,6 +18,7 @@ MEN Chameleon Bus
4.1 The driver structure
4.2 Probing and attaching
4.3 Initializing the driver
+ 4.4 Using DMA
Introduction
@@ -173,3 +174,14 @@ module at the MCB core::
The module_mcb_driver() macro can be used to reduce the above code::
module_mcb_driver(foo_driver);
+
+Using DMA
+---------
+
+To make use of the kernel's DMA-API's function, you will need to use the
+carrier device's 'struct device'. Fortunately 'struct mcb_device' embeds a
+pointer (->dma_dev) to the carrier's device for DMA purposes::
+
+ ret = dma_set_mask_and_coherent(&mdev->dma_dev, DMA_BIT_MASK(dma_bits));
+ if (rc)
+ /* Handle errors */
diff --git a/Documentation/driver-api/pti_intel_mid.rst b/Documentation/driver-api/pti_intel_mid.rst
deleted file mode 100644
index bacc2a4ee89f..000000000000
--- a/Documentation/driver-api/pti_intel_mid.rst
+++ /dev/null
@@ -1,108 +0,0 @@
-.. SPDX-License-Identifier: GPL-2.0
-
-=============
-Intel MID PTI
-=============
-
-The Intel MID PTI project is HW implemented in Intel Atom
-system-on-a-chip designs based on the Parallel Trace
-Interface for MIPI P1149.7 cJTAG standard. The kernel solution
-for this platform involves the following files::
-
- ./include/linux/pti.h
- ./drivers/.../n_tracesink.h
- ./drivers/.../n_tracerouter.c
- ./drivers/.../n_tracesink.c
- ./drivers/.../pti.c
-
-pti.c is the driver that enables various debugging features
-popular on platforms from certain mobile manufacturers.
-n_tracerouter.c and n_tracesink.c allow extra system information to
-be collected and routed to the pti driver, such as trace
-debugging data from a modem. Although n_tracerouter
-and n_tracesink are a part of the complete PTI solution,
-these two line disciplines can work separately from
-pti.c and route any data stream from one /dev/tty node
-to another /dev/tty node via kernel-space. This provides
-a stable, reliable connection that will not break unless
-the user-space application shuts down (plus avoids
-kernel->user->kernel context switch overheads of routing
-data).
-
-An example debugging usage for this driver system:
-
- * Hook /dev/ttyPTI0 to syslogd. Opening this port will also start
- a console device to further capture debugging messages to PTI.
- * Hook /dev/ttyPTI1 to modem debugging data to write to PTI HW.
- This is where n_tracerouter and n_tracesink are used.
- * Hook /dev/pti to a user-level debugging application for writing
- to PTI HW.
- * `Use mipi_` Kernel Driver API in other device drivers for
- debugging to PTI by first requesting a PTI write address via
- mipi_request_masterchannel(1).
-
-Below is example pseudo-code on how a 'privileged' application
-can hook up n_tracerouter and n_tracesink to any tty on
-a system. 'Privileged' means the application has enough
-privileges to successfully manipulate the ldisc drivers
-but is not just blindly executing as 'root'. Keep in mind
-the use of ioctl(,TIOCSETD,) is not specific to the n_tracerouter
-and n_tracesink line discpline drivers but is a generic
-operation for a program to use a line discpline driver
-on a tty port other than the default n_tty:
-
-.. code-block:: c
-
- /////////// To hook up n_tracerouter and n_tracesink /////////
-
- // Note that n_tracerouter depends on n_tracesink.
- #include <errno.h>
- #define ONE_TTY "/dev/ttyOne"
- #define TWO_TTY "/dev/ttyTwo"
-
- // needed global to hand onto ldisc connection
- static int g_fd_source = -1;
- static int g_fd_sink = -1;
-
- // these two vars used to grab LDISC values from loaded ldisc drivers
- // in OS. Look at /proc/tty/ldiscs to get the right numbers from
- // the ldiscs loaded in the system.
- int source_ldisc_num, sink_ldisc_num = -1;
- int retval;
-
- g_fd_source = open(ONE_TTY, O_RDWR); // must be R/W
- g_fd_sink = open(TWO_TTY, O_RDWR); // must be R/W
-
- if (g_fd_source <= 0) || (g_fd_sink <= 0) {
- // doubt you'll want to use these exact error lines of code
- printf("Error on open(). errno: %d\n",errno);
- return errno;
- }
-
- retval = ioctl(g_fd_sink, TIOCSETD, &sink_ldisc_num);
- if (retval < 0) {
- printf("Error on ioctl(). errno: %d\n", errno);
- return errno;
- }
-
- retval = ioctl(g_fd_source, TIOCSETD, &source_ldisc_num);
- if (retval < 0) {
- printf("Error on ioctl(). errno: %d\n", errno);
- return errno;
- }
-
- /////////// To disconnect n_tracerouter and n_tracesink ////////
-
- // First make sure data through the ldiscs has stopped.
-
- // Second, disconnect ldiscs. This provides a
- // little cleaner shutdown on tty stack.
- sink_ldisc_num = 0;
- source_ldisc_num = 0;
- ioctl(g_fd_uart, TIOCSETD, &sink_ldisc_num);
- ioctl(g_fd_gadget, TIOCSETD, &source_ldisc_num);
-
- // Three, program closes connection, and cleanup:
- close(g_fd_uart);
- close(g_fd_gadget);
- g_fd_uart = g_fd_gadget = NULL;
diff --git a/Documentation/driver-api/surface_aggregator/client-api.rst b/Documentation/driver-api/surface_aggregator/client-api.rst
new file mode 100644
index 000000000000..8e0b000d0e64
--- /dev/null
+++ b/Documentation/driver-api/surface_aggregator/client-api.rst
@@ -0,0 +1,38 @@
+.. SPDX-License-Identifier: GPL-2.0+
+
+===============================
+Client Driver API Documentation
+===============================
+
+.. contents::
+ :depth: 2
+
+
+Serial Hub Communication
+========================
+
+.. kernel-doc:: include/linux/surface_aggregator/serial_hub.h
+
+.. kernel-doc:: drivers/platform/surface/aggregator/ssh_packet_layer.c
+ :export:
+
+
+Controller and Core Interface
+=============================
+
+.. kernel-doc:: include/linux/surface_aggregator/controller.h
+
+.. kernel-doc:: drivers/platform/surface/aggregator/controller.c
+ :export:
+
+.. kernel-doc:: drivers/platform/surface/aggregator/core.c
+ :export:
+
+
+Client Bus and Client Device API
+================================
+
+.. kernel-doc:: include/linux/surface_aggregator/device.h
+
+.. kernel-doc:: drivers/platform/surface/aggregator/bus.c
+ :export:
diff --git a/Documentation/driver-api/surface_aggregator/client.rst b/Documentation/driver-api/surface_aggregator/client.rst
new file mode 100644
index 000000000000..26d13085a117
--- /dev/null
+++ b/Documentation/driver-api/surface_aggregator/client.rst
@@ -0,0 +1,393 @@
+.. SPDX-License-Identifier: GPL-2.0+
+
+.. |ssam_controller| replace:: :c:type:`struct ssam_controller <ssam_controller>`
+.. |ssam_device| replace:: :c:type:`struct ssam_device <ssam_device>`
+.. |ssam_device_driver| replace:: :c:type:`struct ssam_device_driver <ssam_device_driver>`
+.. |ssam_client_bind| replace:: :c:func:`ssam_client_bind`
+.. |ssam_client_link| replace:: :c:func:`ssam_client_link`
+.. |ssam_get_controller| replace:: :c:func:`ssam_get_controller`
+.. |ssam_controller_get| replace:: :c:func:`ssam_controller_get`
+.. |ssam_controller_put| replace:: :c:func:`ssam_controller_put`
+.. |ssam_device_alloc| replace:: :c:func:`ssam_device_alloc`
+.. |ssam_device_add| replace:: :c:func:`ssam_device_add`
+.. |ssam_device_remove| replace:: :c:func:`ssam_device_remove`
+.. |ssam_device_driver_register| replace:: :c:func:`ssam_device_driver_register`
+.. |ssam_device_driver_unregister| replace:: :c:func:`ssam_device_driver_unregister`
+.. |module_ssam_device_driver| replace:: :c:func:`module_ssam_device_driver`
+.. |SSAM_DEVICE| replace:: :c:func:`SSAM_DEVICE`
+.. |ssam_notifier_register| replace:: :c:func:`ssam_notifier_register`
+.. |ssam_notifier_unregister| replace:: :c:func:`ssam_notifier_unregister`
+.. |ssam_request_sync| replace:: :c:func:`ssam_request_sync`
+.. |ssam_event_mask| replace:: :c:type:`enum ssam_event_mask <ssam_event_mask>`
+
+
+======================
+Writing Client Drivers
+======================
+
+For the API documentation, refer to:
+
+.. toctree::
+ :maxdepth: 2
+
+ client-api
+
+
+Overview
+========
+
+Client drivers can be set up in two main ways, depending on how the
+corresponding device is made available to the system. We specifically
+differentiate between devices that are presented to the system via one of
+the conventional ways, e.g. as platform devices via ACPI, and devices that
+are non-discoverable and instead need to be explicitly provided by some
+other mechanism, as discussed further below.
+
+
+Non-SSAM Client Drivers
+=======================
+
+All communication with the SAM EC is handled via the |ssam_controller|
+representing that EC to the kernel. Drivers targeting a non-SSAM device (and
+thus not being a |ssam_device_driver|) need to explicitly establish a
+connection/relation to that controller. This can be done via the
+|ssam_client_bind| function. Said function returns a reference to the SSAM
+controller, but, more importantly, also establishes a device link between
+client device and controller (this can also be done separate via
+|ssam_client_link|). It is important to do this, as it, first, guarantees
+that the returned controller is valid for use in the client driver for as
+long as this driver is bound to its device, i.e. that the driver gets
+unbound before the controller ever becomes invalid, and, second, as it
+ensures correct suspend/resume ordering. This setup should be done in the
+driver's probe function, and may be used to defer probing in case the SSAM
+subsystem is not ready yet, for example:
+
+.. code-block:: c
+
+ static int client_driver_probe(struct platform_device *pdev)
+ {
+ struct ssam_controller *ctrl;
+
+ ctrl = ssam_client_bind(&pdev->dev);
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl) == -ENODEV ? -EPROBE_DEFER : PTR_ERR(ctrl);
+
+ // ...
+
+ return 0;
+ }
+
+The controller may be separately obtained via |ssam_get_controller| and its
+lifetime be guaranteed via |ssam_controller_get| and |ssam_controller_put|.
+Note that none of these functions, however, guarantee that the controller
+will not be shut down or suspended. These functions essentially only operate
+on the reference, i.e. only guarantee a bare minimum of accessibility
+without any guarantees at all on practical operability.
+
+
+Adding SSAM Devices
+===================
+
+If a device does not already exist/is not already provided via conventional
+means, it should be provided as |ssam_device| via the SSAM client device
+hub. New devices can be added to this hub by entering their UID into the
+corresponding registry. SSAM devices can also be manually allocated via
+|ssam_device_alloc|, subsequently to which they have to be added via
+|ssam_device_add| and eventually removed via |ssam_device_remove|. By
+default, the parent of the device is set to the controller device provided
+for allocation, however this may be changed before the device is added. Note
+that, when changing the parent device, care must be taken to ensure that the
+controller lifetime and suspend/resume ordering guarantees, in the default
+setup provided through the parent-child relation, are preserved. If
+necessary, by use of |ssam_client_link| as is done for non-SSAM client
+drivers and described in more detail above.
+
+A client device must always be removed by the party which added the
+respective device before the controller shuts down. Such removal can be
+guaranteed by linking the driver providing the SSAM device to the controller
+via |ssam_client_link|, causing it to unbind before the controller driver
+unbinds. Client devices registered with the controller as parent are
+automatically removed when the controller shuts down, but this should not be
+relied upon, especially as this does not extend to client devices with a
+different parent.
+
+
+SSAM Client Drivers
+===================
+
+SSAM client device drivers are, in essence, no different than other device
+driver types. They are represented via |ssam_device_driver| and bind to a
+|ssam_device| via its UID (:c:type:`struct ssam_device.uid <ssam_device>`)
+member and the match table
+(:c:type:`struct ssam_device_driver.match_table <ssam_device_driver>`),
+which should be set when declaring the driver struct instance. Refer to the
+|SSAM_DEVICE| macro documentation for more details on how to define members
+of the driver's match table.
+
+The UID for SSAM client devices consists of a ``domain``, a ``category``,
+a ``target``, an ``instance``, and a ``function``. The ``domain`` is used
+differentiate between physical SAM devices
+(:c:type:`SSAM_DOMAIN_SERIALHUB <ssam_device_domain>`), i.e. devices that can
+be accessed via the Surface Serial Hub, and virtual ones
+(:c:type:`SSAM_DOMAIN_VIRTUAL <ssam_device_domain>`), such as client-device
+hubs, that have no real representation on the SAM EC and are solely used on
+the kernel/driver-side. For physical devices, ``category`` represents the
+target category, ``target`` the target ID, and ``instance`` the instance ID
+used to access the physical SAM device. In addition, ``function`` references
+a specific device functionality, but has no meaning to the SAM EC. The
+(default) name of a client device is generated based on its UID.
+
+A driver instance can be registered via |ssam_device_driver_register| and
+unregistered via |ssam_device_driver_unregister|. For convenience, the
+|module_ssam_device_driver| macro may be used to define module init- and
+exit-functions registering the driver.
+
+The controller associated with a SSAM client device can be found in its
+:c:type:`struct ssam_device.ctrl <ssam_device>` member. This reference is
+guaranteed to be valid for at least as long as the client driver is bound,
+but should also be valid for as long as the client device exists. Note,
+however, that access outside of the bound client driver must ensure that the
+controller device is not suspended while making any requests or
+(un-)registering event notifiers (and thus should generally be avoided). This
+is guaranteed when the controller is accessed from inside the bound client
+driver.
+
+
+Making Synchronous Requests
+===========================
+
+Synchronous requests are (currently) the main form of host-initiated
+communication with the EC. There are a couple of ways to define and execute
+such requests, however, most of them boil down to something similar as shown
+in the example below. This example defines a write-read request, meaning
+that the caller provides an argument to the SAM EC and receives a response.
+The caller needs to know the (maximum) length of the response payload and
+provide a buffer for it.
+
+Care must be taken to ensure that any command payload data passed to the SAM
+EC is provided in little-endian format and, similarly, any response payload
+data received from it is converted from little-endian to host endianness.
+
+.. code-block:: c
+
+ int perform_request(struct ssam_controller *ctrl, u32 arg, u32 *ret)
+ {
+ struct ssam_request rqst;
+ struct ssam_response resp;
+ int status;
+
+ /* Convert request argument to little-endian. */
+ __le32 arg_le = cpu_to_le32(arg);
+ __le32 ret_le = cpu_to_le32(0);
+
+ /*
+ * Initialize request specification. Replace this with your values.
+ * The rqst.payload field may be NULL if rqst.length is zero,
+ * indicating that the request does not have any argument.
+ *
+ * Note: The request parameters used here are not valid, i.e.
+ * they do not correspond to an actual SAM/EC request.
+ */
+ rqst.target_category = SSAM_SSH_TC_SAM;
+ rqst.target_id = 0x01;
+ rqst.command_id = 0x02;
+ rqst.instance_id = 0x03;
+ rqst.flags = SSAM_REQUEST_HAS_RESPONSE;
+ rqst.length = sizeof(arg_le);
+ rqst.payload = (u8 *)&arg_le;
+
+ /* Initialize request response. */
+ resp.capacity = sizeof(ret_le);
+ resp.length = 0;
+ resp.pointer = (u8 *)&ret_le;
+
+ /*
+ * Perform actual request. The response pointer may be null in case
+ * the request does not have any response. This must be consistent
+ * with the SSAM_REQUEST_HAS_RESPONSE flag set in the specification
+ * above.
+ */
+ status = ssam_request_sync(ctrl, &rqst, &resp);
+
+ /*
+ * Alternatively use
+ *
+ * ssam_request_sync_onstack(ctrl, &rqst, &resp, sizeof(arg_le));
+ *
+ * to perform the request, allocating the message buffer directly
+ * on the stack as opposed to allocation via kzalloc().
+ */
+
+ /*
+ * Convert request response back to native format. Note that in the
+ * error case, this value is not touched by the SSAM core, i.e.
+ * 'ret_le' will be zero as specified in its initialization.
+ */
+ *ret = le32_to_cpu(ret_le);
+
+ return status;
+ }
+
+Note that |ssam_request_sync| in its essence is a wrapper over lower-level
+request primitives, which may also be used to perform requests. Refer to its
+implementation and documentation for more details.
+
+An arguably more user-friendly way of defining such functions is by using
+one of the generator macros, for example via:
+
+.. code-block:: c
+
+ SSAM_DEFINE_SYNC_REQUEST_W(__ssam_tmp_perf_mode_set, __le32, {
+ .target_category = SSAM_SSH_TC_TMP,
+ .target_id = 0x01,
+ .command_id = 0x03,
+ .instance_id = 0x00,
+ });
+
+This example defines a function
+
+.. code-block:: c
+
+ int __ssam_tmp_perf_mode_set(struct ssam_controller *ctrl, const __le32 *arg);
+
+executing the specified request, with the controller passed in when calling
+said function. In this example, the argument is provided via the ``arg``
+pointer. Note that the generated function allocates the message buffer on
+the stack. Thus, if the argument provided via the request is large, these
+kinds of macros should be avoided. Also note that, in contrast to the
+previous non-macro example, this function does not do any endianness
+conversion, which has to be handled by the caller. Apart from those
+differences the function generated by the macro is similar to the one
+provided in the non-macro example above.
+
+The full list of such function-generating macros is
+
+- :c:func:`SSAM_DEFINE_SYNC_REQUEST_N` for requests without return value and
+ without argument.
+- :c:func:`SSAM_DEFINE_SYNC_REQUEST_R` for requests with return value but no
+ argument.
+- :c:func:`SSAM_DEFINE_SYNC_REQUEST_W` for requests without return value but
+ with argument.
+
+Refer to their respective documentation for more details. For each one of
+these macros, a special variant is provided, which targets request types
+applicable to multiple instances of the same device type:
+
+- :c:func:`SSAM_DEFINE_SYNC_REQUEST_MD_N`
+- :c:func:`SSAM_DEFINE_SYNC_REQUEST_MD_R`
+- :c:func:`SSAM_DEFINE_SYNC_REQUEST_MD_W`
+
+The difference of those macros to the previously mentioned versions is, that
+the device target and instance IDs are not fixed for the generated function,
+but instead have to be provided by the caller of said function.
+
+Additionally, variants for direct use with client devices, i.e.
+|ssam_device|, are also provided. These can, for example, be used as
+follows:
+
+.. code-block:: c
+
+ SSAM_DEFINE_SYNC_REQUEST_CL_R(ssam_bat_get_sta, __le32, {
+ .target_category = SSAM_SSH_TC_BAT,
+ .command_id = 0x01,
+ });
+
+This invocation of the macro defines a function
+
+.. code-block:: c
+
+ int ssam_bat_get_sta(struct ssam_device *sdev, __le32 *ret);
+
+executing the specified request, using the device IDs and controller given
+in the client device. The full list of such macros for client devices is:
+
+- :c:func:`SSAM_DEFINE_SYNC_REQUEST_CL_N`
+- :c:func:`SSAM_DEFINE_SYNC_REQUEST_CL_R`
+- :c:func:`SSAM_DEFINE_SYNC_REQUEST_CL_W`
+
+
+Handling Events
+===============
+
+To receive events from the SAM EC, an event notifier must be registered for
+the desired event via |ssam_notifier_register|. The notifier must be
+unregistered via |ssam_notifier_unregister| once it is not required any
+more.
+
+Event notifiers are registered by providing (at minimum) a callback to call
+in case an event has been received, the registry specifying how the event
+should be enabled, an event ID specifying for which target category and,
+optionally and depending on the registry used, for which instance ID events
+should be enabled, and finally, flags describing how the EC will send these
+events. If the specific registry does not enable events by instance ID, the
+instance ID must be set to zero. Additionally, a priority for the respective
+notifier may be specified, which determines its order in relation to any
+other notifier registered for the same target category.
+
+By default, event notifiers will receive all events for the specific target
+category, regardless of the instance ID specified when registering the
+notifier. The core may be instructed to only call a notifier if the target
+ID or instance ID (or both) of the event match the ones implied by the
+notifier IDs (in case of target ID, the target ID of the registry), by
+providing an event mask (see |ssam_event_mask|).
+
+In general, the target ID of the registry is also the target ID of the
+enabled event (with the notable exception being keyboard input events on the
+Surface Laptop 1 and 2, which are enabled via a registry with target ID 1,
+but provide events with target ID 2).
+
+A full example for registering an event notifier and handling received
+events is provided below:
+
+.. code-block:: c
+
+ u32 notifier_callback(struct ssam_event_notifier *nf,
+ const struct ssam_event *event)
+ {
+ int status = ...
+
+ /* Handle the event here ... */
+
+ /* Convert return value and indicate that we handled the event. */
+ return ssam_notifier_from_errno(status) | SSAM_NOTIF_HANDLED;
+ }
+
+ int setup_notifier(struct ssam_device *sdev,
+ struct ssam_event_notifier *nf)
+ {
+ /* Set priority wrt. other handlers of same target category. */
+ nf->base.priority = 1;
+
+ /* Set event/notifier callback. */
+ nf->base.fn = notifier_callback;
+
+ /* Specify event registry, i.e. how events get enabled/disabled. */
+ nf->event.reg = SSAM_EVENT_REGISTRY_KIP;
+
+ /* Specify which event to enable/disable */
+ nf->event.id.target_category = sdev->uid.category;
+ nf->event.id.instance = sdev->uid.instance;
+
+ /*
+ * Specify for which events the notifier callback gets executed.
+ * This essentially tells the core if it can skip notifiers that
+ * don't have target or instance IDs matching those of the event.
+ */
+ nf->event.mask = SSAM_EVENT_MASK_STRICT;
+
+ /* Specify event flags. */
+ nf->event.flags = SSAM_EVENT_SEQUENCED;
+
+ return ssam_notifier_register(sdev->ctrl, nf);
+ }
+
+Multiple event notifiers can be registered for the same event. The event
+handler core takes care of enabling and disabling events when notifiers are
+registered and unregistered, by keeping track of how many notifiers for a
+specific event (combination of registry, event target category, and event
+instance ID) are currently registered. This means that a specific event will
+be enabled when the first notifier for it is being registered and disabled
+when the last notifier for it is being unregistered. Note that the event
+flags are therefore only used on the first registered notifier, however, one
+should take care that notifiers for a specific event are always registered
+with the same flag and it is considered a bug to do otherwise.
diff --git a/Documentation/driver-api/surface_aggregator/clients/cdev.rst b/Documentation/driver-api/surface_aggregator/clients/cdev.rst
new file mode 100644
index 000000000000..248c1372d879
--- /dev/null
+++ b/Documentation/driver-api/surface_aggregator/clients/cdev.rst
@@ -0,0 +1,87 @@
+.. SPDX-License-Identifier: GPL-2.0+
+
+.. |u8| replace:: :c:type:`u8 <u8>`
+.. |u16| replace:: :c:type:`u16 <u16>`
+.. |ssam_cdev_request| replace:: :c:type:`struct ssam_cdev_request <ssam_cdev_request>`
+.. |ssam_cdev_request_flags| replace:: :c:type:`enum ssam_cdev_request_flags <ssam_cdev_request_flags>`
+
+==============================
+User-Space EC Interface (cdev)
+==============================
+
+The ``surface_aggregator_cdev`` module provides a misc-device for the SSAM
+controller to allow for a (more or less) direct connection from user-space to
+the SAM EC. It is intended to be used for development and debugging, and
+therefore should not be used or relied upon in any other way. Note that this
+module is not loaded automatically, but instead must be loaded manually.
+
+The provided interface is accessible through the ``/dev/surface/aggregator``
+device-file. All functionality of this interface is provided via IOCTLs.
+These IOCTLs and their respective input/output parameter structs are defined in
+``include/uapi/linux/surface_aggregator/cdev.h``.
+
+A small python library and scripts for accessing this interface can be found
+at https://github.com/linux-surface/surface-aggregator-module/tree/master/scripts/ssam.
+
+
+Controller IOCTLs
+=================
+
+The following IOCTLs are provided:
+
+.. flat-table:: Controller IOCTLs
+ :widths: 1 1 1 1 4
+ :header-rows: 1
+
+ * - Type
+ - Number
+ - Direction
+ - Name
+ - Description
+
+ * - ``0xA5``
+ - ``1``
+ - ``WR``
+ - ``REQUEST``
+ - Perform synchronous SAM request.
+
+
+``REQUEST``
+-----------
+
+Defined as ``_IOWR(0xA5, 1, struct ssam_cdev_request)``.
+
+Executes a synchronous SAM request. The request specification is passed in
+as argument of type |ssam_cdev_request|, which is then written to/modified
+by the IOCTL to return status and result of the request.
+
+Request payload data must be allocated separately and is passed in via the
+``payload.data`` and ``payload.length`` members. If a response is required,
+the response buffer must be allocated by the caller and passed in via the
+``response.data`` member. The ``response.length`` member must be set to the
+capacity of this buffer, or if no response is required, zero. Upon
+completion of the request, the call will write the response to the response
+buffer (if its capacity allows it) and overwrite the length field with the
+actual size of the response, in bytes.
+
+Additionally, if the request has a response, this must be indicated via the
+request flags, as is done with in-kernel requests. Request flags can be set
+via the ``flags`` member and the values correspond to the values found in
+|ssam_cdev_request_flags|.
+
+Finally, the status of the request itself is returned in the ``status``
+member (a negative errno value indicating failure). Note that failure
+indication of the IOCTL is separated from failure indication of the request:
+The IOCTL returns a negative status code if anything failed during setup of
+the request (``-EFAULT``) or if the provided argument or any of its fields
+are invalid (``-EINVAL``). In this case, the status value of the request
+argument may be set, providing more detail on what went wrong (e.g.
+``-ENOMEM`` for out-of-memory), but this value may also be zero. The IOCTL
+will return with a zero status code in case the request has been set up,
+submitted, and completed (i.e. handed back to user-space) successfully from
+inside the IOCTL, but the request ``status`` member may still be negative in
+case the actual execution of the request failed after it has been submitted.
+
+A full definition of the argument struct is provided below:
+
+.. kernel-doc:: include/uapi/linux/surface_aggregator/cdev.h
diff --git a/Documentation/driver-api/surface_aggregator/clients/index.rst b/Documentation/driver-api/surface_aggregator/clients/index.rst
new file mode 100644
index 000000000000..3ccabce23271
--- /dev/null
+++ b/Documentation/driver-api/surface_aggregator/clients/index.rst
@@ -0,0 +1,21 @@
+.. SPDX-License-Identifier: GPL-2.0+
+
+===========================
+Client Driver Documentation
+===========================
+
+This is the documentation for client drivers themselves. Refer to
+:doc:`../client` for documentation on how to write client drivers.
+
+.. toctree::
+ :maxdepth: 1
+
+ cdev
+ san
+
+.. only:: subproject and html
+
+ Indices
+ =======
+
+ * :ref:`genindex`
diff --git a/Documentation/driver-api/surface_aggregator/clients/san.rst b/Documentation/driver-api/surface_aggregator/clients/san.rst
new file mode 100644
index 000000000000..38c2580e7758
--- /dev/null
+++ b/Documentation/driver-api/surface_aggregator/clients/san.rst
@@ -0,0 +1,44 @@
+.. SPDX-License-Identifier: GPL-2.0+
+
+.. |san_client_link| replace:: :c:func:`san_client_link`
+.. |san_dgpu_notifier_register| replace:: :c:func:`san_dgpu_notifier_register`
+.. |san_dgpu_notifier_unregister| replace:: :c:func:`san_dgpu_notifier_unregister`
+
+===================
+Surface ACPI Notify
+===================
+
+The Surface ACPI Notify (SAN) device provides the bridge between ACPI and
+SAM controller. Specifically, ACPI code can execute requests and handle
+battery and thermal events via this interface. In addition to this, events
+relating to the discrete GPU (dGPU) of the Surface Book 2 can be sent from
+ACPI code (note: the Surface Book 3 uses a different method for this). The
+only currently known event sent via this interface is a dGPU power-on
+notification. While this driver handles the former part internally, it only
+relays the dGPU events to any other driver interested via its public API and
+does not handle them.
+
+The public interface of this driver is split into two parts: Client
+registration and notifier-block registration.
+
+A client to the SAN interface can be linked as consumer to the SAN device
+via |san_client_link|. This can be used to ensure that the a client
+receiving dGPU events does not miss any events due to the SAN interface not
+being set up as this forces the client driver to unbind once the SAN driver
+is unbound.
+
+Notifier-blocks can be registered by any device for as long as the module is
+loaded, regardless of being linked as client or not. Registration is done
+with |san_dgpu_notifier_register|. If the notifier is not needed any more, it
+should be unregistered via |san_dgpu_notifier_unregister|.
+
+Consult the API documentation below for more details.
+
+
+API Documentation
+=================
+
+.. kernel-doc:: include/linux/surface_acpi_notify.h
+
+.. kernel-doc:: drivers/platform/surface/surface_acpi_notify.c
+ :export:
diff --git a/Documentation/driver-api/surface_aggregator/index.rst b/Documentation/driver-api/surface_aggregator/index.rst
new file mode 100644
index 000000000000..6f3e1094904d
--- /dev/null
+++ b/Documentation/driver-api/surface_aggregator/index.rst
@@ -0,0 +1,21 @@
+.. SPDX-License-Identifier: GPL-2.0+
+
+=======================================
+Surface System Aggregator Module (SSAM)
+=======================================
+
+.. toctree::
+ :maxdepth: 2
+
+ overview
+ client
+ clients/index
+ ssh
+ internal
+
+.. only:: subproject and html
+
+ Indices
+ =======
+
+ * :ref:`genindex`
diff --git a/Documentation/driver-api/surface_aggregator/internal-api.rst b/Documentation/driver-api/surface_aggregator/internal-api.rst
new file mode 100644
index 000000000000..639a67b5a392
--- /dev/null
+++ b/Documentation/driver-api/surface_aggregator/internal-api.rst
@@ -0,0 +1,67 @@
+.. SPDX-License-Identifier: GPL-2.0+
+
+==========================
+Internal API Documentation
+==========================
+
+.. contents::
+ :depth: 2
+
+
+Packet Transport Layer
+======================
+
+.. kernel-doc:: drivers/platform/surface/aggregator/ssh_parser.h
+ :internal:
+
+.. kernel-doc:: drivers/platform/surface/aggregator/ssh_parser.c
+ :internal:
+
+.. kernel-doc:: drivers/platform/surface/aggregator/ssh_msgb.h
+ :internal:
+
+.. kernel-doc:: drivers/platform/surface/aggregator/ssh_packet_layer.h
+ :internal:
+
+.. kernel-doc:: drivers/platform/surface/aggregator/ssh_packet_layer.c
+ :internal:
+
+
+Request Transport Layer
+=======================
+
+.. kernel-doc:: drivers/platform/surface/aggregator/ssh_request_layer.h
+ :internal:
+
+.. kernel-doc:: drivers/platform/surface/aggregator/ssh_request_layer.c
+ :internal:
+
+
+Controller
+==========
+
+.. kernel-doc:: drivers/platform/surface/aggregator/controller.h
+ :internal:
+
+.. kernel-doc:: drivers/platform/surface/aggregator/controller.c
+ :internal:
+
+
+Client Device Bus
+=================
+
+.. kernel-doc:: drivers/platform/surface/aggregator/bus.c
+ :internal:
+
+
+Core
+====
+
+.. kernel-doc:: drivers/platform/surface/aggregator/core.c
+ :internal:
+
+
+Trace Helpers
+=============
+
+.. kernel-doc:: drivers/platform/surface/aggregator/trace.h
diff --git a/Documentation/driver-api/surface_aggregator/internal.rst b/Documentation/driver-api/surface_aggregator/internal.rst
new file mode 100644
index 000000000000..72704734982a
--- /dev/null
+++ b/Documentation/driver-api/surface_aggregator/internal.rst
@@ -0,0 +1,577 @@
+.. SPDX-License-Identifier: GPL-2.0+
+
+.. |ssh_ptl| replace:: :c:type:`struct ssh_ptl <ssh_ptl>`
+.. |ssh_ptl_submit| replace:: :c:func:`ssh_ptl_submit`
+.. |ssh_ptl_cancel| replace:: :c:func:`ssh_ptl_cancel`
+.. |ssh_ptl_shutdown| replace:: :c:func:`ssh_ptl_shutdown`
+.. |ssh_ptl_rx_rcvbuf| replace:: :c:func:`ssh_ptl_rx_rcvbuf`
+.. |ssh_rtl| replace:: :c:type:`struct ssh_rtl <ssh_rtl>`
+.. |ssh_rtl_submit| replace:: :c:func:`ssh_rtl_submit`
+.. |ssh_rtl_cancel| replace:: :c:func:`ssh_rtl_cancel`
+.. |ssh_rtl_shutdown| replace:: :c:func:`ssh_rtl_shutdown`
+.. |ssh_packet| replace:: :c:type:`struct ssh_packet <ssh_packet>`
+.. |ssh_packet_get| replace:: :c:func:`ssh_packet_get`
+.. |ssh_packet_put| replace:: :c:func:`ssh_packet_put`
+.. |ssh_packet_ops| replace:: :c:type:`struct ssh_packet_ops <ssh_packet_ops>`
+.. |ssh_packet_base_priority| replace:: :c:type:`enum ssh_packet_base_priority <ssh_packet_base_priority>`
+.. |ssh_packet_flags| replace:: :c:type:`enum ssh_packet_flags <ssh_packet_flags>`
+.. |SSH_PACKET_PRIORITY| replace:: :c:func:`SSH_PACKET_PRIORITY`
+.. |ssh_frame| replace:: :c:type:`struct ssh_frame <ssh_frame>`
+.. |ssh_command| replace:: :c:type:`struct ssh_command <ssh_command>`
+.. |ssh_request| replace:: :c:type:`struct ssh_request <ssh_request>`
+.. |ssh_request_get| replace:: :c:func:`ssh_request_get`
+.. |ssh_request_put| replace:: :c:func:`ssh_request_put`
+.. |ssh_request_ops| replace:: :c:type:`struct ssh_request_ops <ssh_request_ops>`
+.. |ssh_request_init| replace:: :c:func:`ssh_request_init`
+.. |ssh_request_flags| replace:: :c:type:`enum ssh_request_flags <ssh_request_flags>`
+.. |ssam_controller| replace:: :c:type:`struct ssam_controller <ssam_controller>`
+.. |ssam_device| replace:: :c:type:`struct ssam_device <ssam_device>`
+.. |ssam_device_driver| replace:: :c:type:`struct ssam_device_driver <ssam_device_driver>`
+.. |ssam_client_bind| replace:: :c:func:`ssam_client_bind`
+.. |ssam_client_link| replace:: :c:func:`ssam_client_link`
+.. |ssam_request_sync| replace:: :c:type:`struct ssam_request_sync <ssam_request_sync>`
+.. |ssam_event_registry| replace:: :c:type:`struct ssam_event_registry <ssam_event_registry>`
+.. |ssam_event_id| replace:: :c:type:`struct ssam_event_id <ssam_event_id>`
+.. |ssam_nf| replace:: :c:type:`struct ssam_nf <ssam_nf>`
+.. |ssam_nf_refcount_inc| replace:: :c:func:`ssam_nf_refcount_inc`
+.. |ssam_nf_refcount_dec| replace:: :c:func:`ssam_nf_refcount_dec`
+.. |ssam_notifier_register| replace:: :c:func:`ssam_notifier_register`
+.. |ssam_notifier_unregister| replace:: :c:func:`ssam_notifier_unregister`
+.. |ssam_cplt| replace:: :c:type:`struct ssam_cplt <ssam_cplt>`
+.. |ssam_event_queue| replace:: :c:type:`struct ssam_event_queue <ssam_event_queue>`
+.. |ssam_request_sync_submit| replace:: :c:func:`ssam_request_sync_submit`
+
+=====================
+Core Driver Internals
+=====================
+
+Architectural overview of the Surface System Aggregator Module (SSAM) core
+and Surface Serial Hub (SSH) driver. For the API documentation, refer to:
+
+.. toctree::
+ :maxdepth: 2
+
+ internal-api
+
+
+Overview
+========
+
+The SSAM core implementation is structured in layers, somewhat following the
+SSH protocol structure:
+
+Lower-level packet transport is implemented in the *packet transport layer
+(PTL)*, directly building on top of the serial device (serdev)
+infrastructure of the kernel. As the name indicates, this layer deals with
+the packet transport logic and handles things like packet validation, packet
+acknowledgment (ACKing), packet (retransmission) timeouts, and relaying
+packet payloads to higher-level layers.
+
+Above this sits the *request transport layer (RTL)*. This layer is centered
+around command-type packet payloads, i.e. requests (sent from host to EC),
+responses of the EC to those requests, and events (sent from EC to host).
+It, specifically, distinguishes events from request responses, matches
+responses to their corresponding requests, and implements request timeouts.
+
+The *controller* layer is building on top of this and essentially decides
+how request responses and, especially, events are dealt with. It provides an
+event notifier system, handles event activation/deactivation, provides a
+workqueue for event and asynchronous request completion, and also manages
+the message counters required for building command messages (``SEQ``,
+``RQID``). This layer basically provides a fundamental interface to the SAM
+EC for use in other kernel drivers.
+
+While the controller layer already provides an interface for other kernel
+drivers, the client *bus* extends this interface to provide support for
+native SSAM devices, i.e. devices that are not defined in ACPI and not
+implemented as platform devices, via |ssam_device| and |ssam_device_driver|
+simplify management of client devices and client drivers.
+
+Refer to :doc:`client` for documentation regarding the client device/driver
+API and interface options for other kernel drivers. It is recommended to
+familiarize oneself with that chapter and the :doc:`ssh` before continuing
+with the architectural overview below.
+
+
+Packet Transport Layer
+======================
+
+The packet transport layer is represented via |ssh_ptl| and is structured
+around the following key concepts:
+
+Packets
+-------
+
+Packets are the fundamental transmission unit of the SSH protocol. They are
+managed by the packet transport layer, which is essentially the lowest layer
+of the driver and is built upon by other components of the SSAM core.
+Packets to be transmitted by the SSAM core are represented via |ssh_packet|
+(in contrast, packets received by the core do not have any specific
+structure and are managed entirely via the raw |ssh_frame|).
+
+This structure contains the required fields to manage the packet inside the
+transport layer, as well as a reference to the buffer containing the data to
+be transmitted (i.e. the message wrapped in |ssh_frame|). Most notably, it
+contains an internal reference count, which is used for managing its
+lifetime (accessible via |ssh_packet_get| and |ssh_packet_put|). When this
+counter reaches zero, the ``release()`` callback provided to the packet via
+its |ssh_packet_ops| reference is executed, which may then deallocate the
+packet or its enclosing structure (e.g. |ssh_request|).
+
+In addition to the ``release`` callback, the |ssh_packet_ops| reference also
+provides a ``complete()`` callback, which is run once the packet has been
+completed and provides the status of this completion, i.e. zero on success
+or a negative errno value in case of an error. Once the packet has been
+submitted to the packet transport layer, the ``complete()`` callback is
+always guaranteed to be executed before the ``release()`` callback, i.e. the
+packet will always be completed, either successfully, with an error, or due
+to cancellation, before it will be released.
+
+The state of a packet is managed via its ``state`` flags
+(|ssh_packet_flags|), which also contains the packet type. In particular,
+the following bits are noteworthy:
+
+* ``SSH_PACKET_SF_LOCKED_BIT``: This bit is set when completion, either
+ through error or success, is imminent. It indicates that no further
+ references of the packet should be taken and any existing references
+ should be dropped as soon as possible. The process setting this bit is
+ responsible for removing any references to this packet from the packet
+ queue and pending set.
+
+* ``SSH_PACKET_SF_COMPLETED_BIT``: This bit is set by the process running the
+ ``complete()`` callback and is used to ensure that this callback only runs
+ once.
+
+* ``SSH_PACKET_SF_QUEUED_BIT``: This bit is set when the packet is queued on
+ the packet queue and cleared when it is dequeued.
+
+* ``SSH_PACKET_SF_PENDING_BIT``: This bit is set when the packet is added to
+ the pending set and cleared when it is removed from it.
+
+Packet Queue
+------------
+
+The packet queue is the first of the two fundamental collections in the
+packet transport layer. It is a priority queue, with priority of the
+respective packets based on the packet type (major) and number of tries
+(minor). See |SSH_PACKET_PRIORITY| for more details on the priority value.
+
+All packets to be transmitted by the transport layer must be submitted to
+this queue via |ssh_ptl_submit|. Note that this includes control packets
+sent by the transport layer itself. Internally, data packets can be
+re-submitted to this queue due to timeouts or NAK packets sent by the EC.
+
+Pending Set
+-----------
+
+The pending set is the second of the two fundamental collections in the
+packet transport layer. It stores references to packets that have already
+been transmitted, but wait for acknowledgment (e.g. the corresponding ACK
+packet) by the EC.
+
+Note that a packet may both be pending and queued if it has been
+re-submitted due to a packet acknowledgment timeout or NAK. On such a
+re-submission, packets are not removed from the pending set.
+
+Transmitter Thread
+------------------
+
+The transmitter thread is responsible for most of the actual work regarding
+packet transmission. In each iteration, it (waits for and) checks if the
+next packet on the queue (if any) can be transmitted and, if so, removes it
+from the queue and increments its counter for the number of transmission
+attempts, i.e. tries. If the packet is sequenced, i.e. requires an ACK by
+the EC, the packet is added to the pending set. Next, the packet's data is
+submitted to the serdev subsystem. In case of an error or timeout during
+this submission, the packet is completed by the transmitter thread with the
+status value of the callback set accordingly. In case the packet is
+unsequenced, i.e. does not require an ACK by the EC, the packet is completed
+with success on the transmitter thread.
+
+Transmission of sequenced packets is limited by the number of concurrently
+pending packets, i.e. a limit on how many packets may be waiting for an ACK
+from the EC in parallel. This limit is currently set to one (see :doc:`ssh`
+for the reasoning behind this). Control packets (i.e. ACK and NAK) can
+always be transmitted.
+
+Receiver Thread
+---------------
+
+Any data received from the EC is put into a FIFO buffer for further
+processing. This processing happens on the receiver thread. The receiver
+thread parses and validates the received message into its |ssh_frame| and
+corresponding payload. It prepares and submits the necessary ACK (and on
+validation error or invalid data NAK) packets for the received messages.
+
+This thread also handles further processing, such as matching ACK messages
+to the corresponding pending packet (via sequence ID) and completing it, as
+well as initiating re-submission of all currently pending packets on
+receival of a NAK message (re-submission in case of a NAK is similar to
+re-submission due to timeout, see below for more details on that). Note that
+the successful completion of a sequenced packet will always run on the
+receiver thread (whereas any failure-indicating completion will run on the
+process where the failure occurred).
+
+Any payload data is forwarded via a callback to the next upper layer, i.e.
+the request transport layer.
+
+Timeout Reaper
+--------------
+
+The packet acknowledgment timeout is a per-packet timeout for sequenced
+packets, started when the respective packet begins (re-)transmission (i.e.
+this timeout is armed once per transmission attempt on the transmitter
+thread). It is used to trigger re-submission or, when the number of tries
+has been exceeded, cancellation of the packet in question.
+
+This timeout is handled via a dedicated reaper task, which is essentially a
+work item (re-)scheduled to run when the next packet is set to time out. The
+work item then checks the set of pending packets for any packets that have
+exceeded the timeout and, if there are any remaining packets, re-schedules
+itself to the next appropriate point in time.
+
+If a timeout has been detected by the reaper, the packet will either be
+re-submitted if it still has some remaining tries left, or completed with
+``-ETIMEDOUT`` as status if not. Note that re-submission, in this case and
+triggered by receival of a NAK, means that the packet is added to the queue
+with a now incremented number of tries, yielding a higher priority. The
+timeout for the packet will be disabled until the next transmission attempt
+and the packet remains on the pending set.
+
+Note that due to transmission and packet acknowledgment timeouts, the packet
+transport layer is always guaranteed to make progress, if only through
+timing out packets, and will never fully block.
+
+Concurrency and Locking
+-----------------------
+
+There are two main locks in the packet transport layer: One guarding access
+to the packet queue and one guarding access to the pending set. These
+collections may only be accessed and modified under the respective lock. If
+access to both collections is needed, the pending lock must be acquired
+before the queue lock to avoid deadlocks.
+
+In addition to guarding the collections, after initial packet submission
+certain packet fields may only be accessed under one of the locks.
+Specifically, the packet priority must only be accessed while holding the
+queue lock and the packet timestamp must only be accessed while holding the
+pending lock.
+
+Other parts of the packet transport layer are guarded independently. State
+flags are managed by atomic bit operations and, if necessary, memory
+barriers. Modifications to the timeout reaper work item and expiration date
+are guarded by their own lock.
+
+The reference of the packet to the packet transport layer (``ptl``) is
+somewhat special. It is either set when the upper layer request is submitted
+or, if there is none, when the packet is first submitted. After it is set,
+it will not change its value. Functions that may run concurrently with
+submission, i.e. cancellation, can not rely on the ``ptl`` reference to be
+set. Access to it in these functions is guarded by ``READ_ONCE()``, whereas
+setting ``ptl`` is equally guarded with ``WRITE_ONCE()`` for symmetry.
+
+Some packet fields may be read outside of the respective locks guarding
+them, specifically priority and state for tracing. In those cases, proper
+access is ensured by employing ``WRITE_ONCE()`` and ``READ_ONCE()``. Such
+read-only access is only allowed when stale values are not critical.
+
+With respect to the interface for higher layers, packet submission
+(|ssh_ptl_submit|), packet cancellation (|ssh_ptl_cancel|), data receival
+(|ssh_ptl_rx_rcvbuf|), and layer shutdown (|ssh_ptl_shutdown|) may always be
+executed concurrently with respect to each other. Note that packet
+submission may not run concurrently with itself for the same packet.
+Equally, shutdown and data receival may also not run concurrently with
+themselves (but may run concurrently with each other).
+
+
+Request Transport Layer
+=======================
+
+The request transport layer is represented via |ssh_rtl| and builds on top
+of the packet transport layer. It deals with requests, i.e. SSH packets sent
+by the host containing a |ssh_command| as frame payload. This layer
+separates responses to requests from events, which are also sent by the EC
+via a |ssh_command| payload. While responses are handled in this layer,
+events are relayed to the next upper layer, i.e. the controller layer, via
+the corresponding callback. The request transport layer is structured around
+the following key concepts:
+
+Request
+-------
+
+Requests are packets with a command-type payload, sent from host to EC to
+query data from or trigger an action on it (or both simultaneously). They
+are represented by |ssh_request|, wrapping the underlying |ssh_packet|
+storing its message data (i.e. SSH frame with command payload). Note that
+all top-level representations, e.g. |ssam_request_sync| are built upon this
+struct.
+
+As |ssh_request| extends |ssh_packet|, its lifetime is also managed by the
+reference counter inside the packet struct (which can be accessed via
+|ssh_request_get| and |ssh_request_put|). Once the counter reaches zero, the
+``release()`` callback of the |ssh_request_ops| reference of the request is
+called.
+
+Requests can have an optional response that is equally sent via a SSH
+message with command-type payload (from EC to host). The party constructing
+the request must know if a response is expected and mark this in the request
+flags provided to |ssh_request_init|, so that the request transport layer
+can wait for this response.
+
+Similar to |ssh_packet|, |ssh_request| also has a ``complete()`` callback
+provided via its request ops reference and is guaranteed to be completed
+before it is released once it has been submitted to the request transport
+layer via |ssh_rtl_submit|. For a request without a response, successful
+completion will occur once the underlying packet has been successfully
+transmitted by the packet transport layer (i.e. from within the packet
+completion callback). For a request with response, successful completion
+will occur once the response has been received and matched to the request
+via its request ID (which happens on the packet layer's data-received
+callback running on the receiver thread). If the request is completed with
+an error, the status value will be set to the corresponding (negative) errno
+value.
+
+The state of a request is again managed via its ``state`` flags
+(|ssh_request_flags|), which also encode the request type. In particular,
+the following bits are noteworthy:
+
+* ``SSH_REQUEST_SF_LOCKED_BIT``: This bit is set when completion, either
+ through error or success, is imminent. It indicates that no further
+ references of the request should be taken and any existing references
+ should be dropped as soon as possible. The process setting this bit is
+ responsible for removing any references to this request from the request
+ queue and pending set.
+
+* ``SSH_REQUEST_SF_COMPLETED_BIT``: This bit is set by the process running the
+ ``complete()`` callback and is used to ensure that this callback only runs
+ once.
+
+* ``SSH_REQUEST_SF_QUEUED_BIT``: This bit is set when the request is queued on
+ the request queue and cleared when it is dequeued.
+
+* ``SSH_REQUEST_SF_PENDING_BIT``: This bit is set when the request is added to
+ the pending set and cleared when it is removed from it.
+
+Request Queue
+-------------
+
+The request queue is the first of the two fundamental collections in the
+request transport layer. In contrast to the packet queue of the packet
+transport layer, it is not a priority queue and the simple first come first
+serve principle applies.
+
+All requests to be transmitted by the request transport layer must be
+submitted to this queue via |ssh_rtl_submit|. Once submitted, requests may
+not be re-submitted, and will not be re-submitted automatically on timeout.
+Instead, the request is completed with a timeout error. If desired, the
+caller can create and submit a new request for another try, but it must not
+submit the same request again.
+
+Pending Set
+-----------
+
+The pending set is the second of the two fundamental collections in the
+request transport layer. This collection stores references to all pending
+requests, i.e. requests awaiting a response from the EC (similar to what the
+pending set of the packet transport layer does for packets).
+
+Transmitter Task
+----------------
+
+The transmitter task is scheduled when a new request is available for
+transmission. It checks if the next request on the request queue can be
+transmitted and, if so, submits its underlying packet to the packet
+transport layer. This check ensures that only a limited number of
+requests can be pending, i.e. waiting for a response, at the same time. If
+the request requires a response, the request is added to the pending set
+before its packet is submitted.
+
+Packet Completion Callback
+--------------------------
+
+The packet completion callback is executed once the underlying packet of a
+request has been completed. In case of an error completion, the
+corresponding request is completed with the error value provided in this
+callback.
+
+On successful packet completion, further processing depends on the request.
+If the request expects a response, it is marked as transmitted and the
+request timeout is started. If the request does not expect a response, it is
+completed with success.
+
+Data-Received Callback
+----------------------
+
+The data received callback notifies the request transport layer of data
+being received by the underlying packet transport layer via a data-type
+frame. In general, this is expected to be a command-type payload.
+
+If the request ID of the command is one of the request IDs reserved for
+events (one to ``SSH_NUM_EVENTS``, inclusively), it is forwarded to the
+event callback registered in the request transport layer. If the request ID
+indicates a response to a request, the respective request is looked up in
+the pending set and, if found and marked as transmitted, completed with
+success.
+
+Timeout Reaper
+--------------
+
+The request-response-timeout is a per-request timeout for requests expecting
+a response. It is used to ensure that a request does not wait indefinitely
+on a response from the EC and is started after the underlying packet has
+been successfully completed.
+
+This timeout is, similar to the packet acknowledgment timeout on the packet
+transport layer, handled via a dedicated reaper task. This task is
+essentially a work-item (re-)scheduled to run when the next request is set
+to time out. The work item then scans the set of pending requests for any
+requests that have timed out and completes them with ``-ETIMEDOUT`` as
+status. Requests will not be re-submitted automatically. Instead, the issuer
+of the request must construct and submit a new request, if so desired.
+
+Note that this timeout, in combination with packet transmission and
+acknowledgment timeouts, guarantees that the request layer will always make
+progress, even if only through timing out packets, and never fully block.
+
+Concurrency and Locking
+-----------------------
+
+Similar to the packet transport layer, there are two main locks in the
+request transport layer: One guarding access to the request queue and one
+guarding access to the pending set. These collections may only be accessed
+and modified under the respective lock.
+
+Other parts of the request transport layer are guarded independently. State
+flags are (again) managed by atomic bit operations and, if necessary, memory
+barriers. Modifications to the timeout reaper work item and expiration date
+are guarded by their own lock.
+
+Some request fields may be read outside of the respective locks guarding
+them, specifically the state for tracing. In those cases, proper access is
+ensured by employing ``WRITE_ONCE()`` and ``READ_ONCE()``. Such read-only
+access is only allowed when stale values are not critical.
+
+With respect to the interface for higher layers, request submission
+(|ssh_rtl_submit|), request cancellation (|ssh_rtl_cancel|), and layer
+shutdown (|ssh_rtl_shutdown|) may always be executed concurrently with
+respect to each other. Note that request submission may not run concurrently
+with itself for the same request (and also may only be called once per
+request). Equally, shutdown may also not run concurrently with itself.
+
+
+Controller Layer
+================
+
+The controller layer extends on the request transport layer to provide an
+easy-to-use interface for client drivers. It is represented by
+|ssam_controller| and the SSH driver. While the lower level transport layers
+take care of transmitting and handling packets and requests, the controller
+layer takes on more of a management role. Specifically, it handles device
+initialization, power management, and event handling, including event
+delivery and registration via the (event) completion system (|ssam_cplt|).
+
+Event Registration
+------------------
+
+In general, an event (or rather a class of events) has to be explicitly
+requested by the host before the EC will send it (HID input events seem to
+be the exception). This is done via an event-enable request (similarly,
+events should be disabled via an event-disable request once no longer
+desired).
+
+The specific request used to enable (or disable) an event is given via an
+event registry, i.e. the governing authority of this event (so to speak),
+represented by |ssam_event_registry|. As parameters to this request, the
+target category and, depending on the event registry, instance ID of the
+event to be enabled must be provided. This (optional) instance ID must be
+zero if the registry does not use it. Together, target category and instance
+ID form the event ID, represented by |ssam_event_id|. In short, both, event
+registry and event ID, are required to uniquely identify a respective class
+of events.
+
+Note that a further *request ID* parameter must be provided for the
+enable-event request. This parameter does not influence the class of events
+being enabled, but instead is set as the request ID (RQID) on each event of
+this class sent by the EC. It is used to identify events (as a limited
+number of request IDs is reserved for use in events only, specifically one
+to ``SSH_NUM_EVENTS`` inclusively) and also map events to their specific
+class. Currently, the controller always sets this parameter to the target
+category specified in |ssam_event_id|.
+
+As multiple client drivers may rely on the same (or overlapping) classes of
+events and enable/disable calls are strictly binary (i.e. on/off), the
+controller has to manage access to these events. It does so via reference
+counting, storing the counter inside an RB-tree based mapping with event
+registry and ID as key (there is no known list of valid event registry and
+event ID combinations). See |ssam_nf|, |ssam_nf_refcount_inc|, and
+|ssam_nf_refcount_dec| for details.
+
+This management is done together with notifier registration (described in
+the next section) via the top-level |ssam_notifier_register| and
+|ssam_notifier_unregister| functions.
+
+Event Delivery
+--------------
+
+To receive events, a client driver has to register an event notifier via
+|ssam_notifier_register|. This increments the reference counter for that
+specific class of events (as detailed in the previous section), enables the
+class on the EC (if it has not been enabled already), and installs the
+provided notifier callback.
+
+Notifier callbacks are stored in lists, with one (RCU) list per target
+category (provided via the event ID; NB: there is a fixed known number of
+target categories). There is no known association from the combination of
+event registry and event ID to the command data (target ID, target category,
+command ID, and instance ID) that can be provided by an event class, apart
+from target category and instance ID given via the event ID.
+
+Note that due to the way notifiers are (or rather have to be) stored, client
+drivers may receive events that they have not requested and need to account
+for them. Specifically, they will, by default, receive all events from the
+same target category. To simplify dealing with this, filtering of events by
+target ID (provided via the event registry) and instance ID (provided via
+the event ID) can be requested when registering a notifier. This filtering
+is applied when iterating over the notifiers at the time they are executed.
+
+All notifier callbacks are executed on a dedicated workqueue, the so-called
+completion workqueue. After an event has been received via the callback
+installed in the request layer (running on the receiver thread of the packet
+transport layer), it will be put on its respective event queue
+(|ssam_event_queue|). From this event queue the completion work item of that
+queue (running on the completion workqueue) will pick up the event and
+execute the notifier callback. This is done to avoid blocking on the
+receiver thread.
+
+There is one event queue per combination of target ID and target category.
+This is done to ensure that notifier callbacks are executed in sequence for
+events of the same target ID and target category. Callbacks can be executed
+in parallel for events with a different combination of target ID and target
+category.
+
+Concurrency and Locking
+-----------------------
+
+Most of the concurrency related safety guarantees of the controller are
+provided by the lower-level request transport layer. In addition to this,
+event (un-)registration is guarded by its own lock.
+
+Access to the controller state is guarded by the state lock. This lock is a
+read/write semaphore. The reader part can be used to ensure that the state
+does not change while functions depending on the state to stay the same
+(e.g. |ssam_notifier_register|, |ssam_notifier_unregister|,
+|ssam_request_sync_submit|, and derivatives) are executed and this guarantee
+is not already provided otherwise (e.g. through |ssam_client_bind| or
+|ssam_client_link|). The writer part guards any transitions that will change
+the state, i.e. initialization, destruction, suspension, and resumption.
+
+The controller state may be accessed (read-only) outside the state lock for
+smoke-testing against invalid API usage (e.g. in |ssam_request_sync_submit|).
+Note that such checks are not supposed to (and will not) protect against all
+invalid usages, but rather aim to help catch them. In those cases, proper
+variable access is ensured by employing ``WRITE_ONCE()`` and ``READ_ONCE()``.
+
+Assuming any preconditions on the state not changing have been satisfied,
+all non-initialization and non-shutdown functions may run concurrently with
+each other. This includes |ssam_notifier_register|, |ssam_notifier_unregister|,
+|ssam_request_sync_submit|, as well as all functions building on top of those.
diff --git a/Documentation/driver-api/surface_aggregator/overview.rst b/Documentation/driver-api/surface_aggregator/overview.rst
new file mode 100644
index 000000000000..1e9d57e50063
--- /dev/null
+++ b/Documentation/driver-api/surface_aggregator/overview.rst
@@ -0,0 +1,77 @@
+.. SPDX-License-Identifier: GPL-2.0+
+
+========
+Overview
+========
+
+The Surface/System Aggregator Module (SAM, SSAM) is an (arguably *the*)
+embedded controller (EC) on Microsoft Surface devices. It has been originally
+introduced on 4th generation devices (Surface Pro 4, Surface Book 1), but
+its responsibilities and feature-set have since been expanded significantly
+with the following generations.
+
+
+Features and Integration
+========================
+
+Not much is currently known about SAM on 4th generation devices (Surface Pro
+4, Surface Book 1), due to the use of a different communication interface
+between host and EC (as detailed below). On 5th (Surface Pro 2017, Surface
+Book 2, Surface Laptop 1) and later generation devices, SAM is responsible
+for providing battery information (both current status and static values,
+such as maximum capacity etc.), as well as an assortment of temperature
+sensors (e.g. skin temperature) and cooling/performance-mode setting to the
+host. On the Surface Book 2, specifically, it additionally provides an
+interface for properly handling clipboard detachment (i.e. separating the
+display part from the keyboard part of the device), on the Surface Laptop 1
+and 2 it is required for keyboard HID input. This HID subsystem has been
+restructured for 7th generation devices and on those, specifically Surface
+Laptop 3 and Surface Book 3, is responsible for all major HID input (i.e.
+keyboard and touchpad).
+
+While features have not changed much on a coarse level since the 5th
+generation, internal interfaces have undergone some rather large changes. On
+5th and 6th generation devices, both battery and temperature information is
+exposed to ACPI via a shim driver (referred to as Surface ACPI Notify, or
+SAN), translating ACPI generic serial bus write-/read-accesses to SAM
+requests. On 7th generation devices, this additional layer is gone and these
+devices require a driver hooking directly into the SAM interface. Equally,
+on newer generations, less devices are declared in ACPI, making them a bit
+harder to discover and requiring us to hard-code a sort of device registry.
+Due to this, a SSAM bus and subsystem with client devices
+(:c:type:`struct ssam_device <ssam_device>`) has been implemented.
+
+
+Communication
+=============
+
+The type of communication interface between host and EC depends on the
+generation of the Surface device. On 4th generation devices, host and EC
+communicate via HID, specifically using a HID-over-I2C device, whereas on
+5th and later generations, communication takes place via a USART serial
+device. In accordance to the drivers found on other operating systems, we
+refer to the serial device and its driver as Surface Serial Hub (SSH). When
+needed, we differentiate between both types of SAM by referring to them as
+SAM-over-SSH and SAM-over-HID.
+
+Currently, this subsystem only supports SAM-over-SSH. The SSH communication
+interface is described in more detail below. The HID interface has not been
+reverse engineered yet and it is, at the moment, unclear how many (and
+which) concepts of the SSH interface detailed below can be transferred to
+it.
+
+Surface Serial Hub
+------------------
+
+As already elaborated above, the Surface Serial Hub (SSH) is the
+communication interface for SAM on 5th- and all later-generation Surface
+devices. On the highest level, communication can be separated into two main
+types: Requests, messages sent from host to EC that may trigger a direct
+response from the EC (explicitly associated with the request), and events
+(sometimes also referred to as notifications), sent from EC to host without
+being a direct response to a previous request. We may also refer to requests
+without response as commands. In general, events need to be enabled via one
+of multiple dedicated requests before they are sent by the EC.
+
+See :doc:`ssh` for a more technical protocol documentation and
+:doc:`internal` for an overview of the internal driver architecture.
diff --git a/Documentation/driver-api/surface_aggregator/ssh.rst b/Documentation/driver-api/surface_aggregator/ssh.rst
new file mode 100644
index 000000000000..bf007d6c9873
--- /dev/null
+++ b/Documentation/driver-api/surface_aggregator/ssh.rst
@@ -0,0 +1,344 @@
+.. SPDX-License-Identifier: GPL-2.0+
+
+.. |u8| replace:: :c:type:`u8 <u8>`
+.. |u16| replace:: :c:type:`u16 <u16>`
+.. |TYPE| replace:: ``TYPE``
+.. |LEN| replace:: ``LEN``
+.. |SEQ| replace:: ``SEQ``
+.. |SYN| replace:: ``SYN``
+.. |NAK| replace:: ``NAK``
+.. |ACK| replace:: ``ACK``
+.. |DATA| replace:: ``DATA``
+.. |DATA_SEQ| replace:: ``DATA_SEQ``
+.. |DATA_NSQ| replace:: ``DATA_NSQ``
+.. |TC| replace:: ``TC``
+.. |TID| replace:: ``TID``
+.. |IID| replace:: ``IID``
+.. |RQID| replace:: ``RQID``
+.. |CID| replace:: ``CID``
+
+===========================
+Surface Serial Hub Protocol
+===========================
+
+The Surface Serial Hub (SSH) is the central communication interface for the
+embedded Surface Aggregator Module controller (SAM or EC), found on newer
+Surface generations. We will refer to this protocol and interface as
+SAM-over-SSH, as opposed to SAM-over-HID for the older generations.
+
+On Surface devices with SAM-over-SSH, SAM is connected to the host via UART
+and defined in ACPI as device with ID ``MSHW0084``. On these devices,
+significant functionality is provided via SAM, including access to battery
+and power information and events, thermal read-outs and events, and many
+more. For Surface Laptops, keyboard input is handled via HID directed
+through SAM, on the Surface Laptop 3 and Surface Book 3 this also includes
+touchpad input.
+
+Note that the standard disclaimer for this subsystem also applies to this
+document: All of this has been reverse-engineered and may thus be erroneous
+and/or incomplete.
+
+All CRCs used in the following are two-byte ``crc_ccitt_false(0xffff, ...)``.
+All multi-byte values are little-endian, there is no implicit padding between
+values.
+
+
+SSH Packet Protocol: Definitions
+================================
+
+The fundamental communication unit of the SSH protocol is a frame
+(:c:type:`struct ssh_frame <ssh_frame>`). A frame consists of the following
+fields, packed together and in order:
+
+.. flat-table:: SSH Frame
+ :widths: 1 1 4
+ :header-rows: 1
+
+ * - Field
+ - Type
+ - Description
+
+ * - |TYPE|
+ - |u8|
+ - Type identifier of the frame.
+
+ * - |LEN|
+ - |u16|
+ - Length of the payload associated with the frame.
+
+ * - |SEQ|
+ - |u8|
+ - Sequence ID (see explanation below).
+
+Each frame structure is followed by a CRC over this structure. The CRC over
+the frame structure (|TYPE|, |LEN|, and |SEQ| fields) is placed directly
+after the frame structure and before the payload. The payload is followed by
+its own CRC (over all payload bytes). If the payload is not present (i.e.
+the frame has ``LEN=0``), the CRC of the payload is still present and will
+evaluate to ``0xffff``. The |LEN| field does not include any of the CRCs, it
+equals the number of bytes inbetween the CRC of the frame and the CRC of the
+payload.
+
+Additionally, the following fixed two-byte sequences are used:
+
+.. flat-table:: SSH Byte Sequences
+ :widths: 1 1 4
+ :header-rows: 1
+
+ * - Name
+ - Value
+ - Description
+
+ * - |SYN|
+ - ``[0xAA, 0x55]``
+ - Synchronization bytes.
+
+A message consists of |SYN|, followed by the frame (|TYPE|, |LEN|, |SEQ| and
+CRC) and, if specified in the frame (i.e. ``LEN > 0``), payload bytes,
+followed finally, regardless if the payload is present, the payload CRC. The
+messages corresponding to an exchange are, in part, identified by having the
+same sequence ID (|SEQ|), stored inside the frame (more on this in the next
+section). The sequence ID is a wrapping counter.
+
+A frame can have the following types
+(:c:type:`enum ssh_frame_type <ssh_frame_type>`):
+
+.. flat-table:: SSH Frame Types
+ :widths: 1 1 4
+ :header-rows: 1
+
+ * - Name
+ - Value
+ - Short Description
+
+ * - |NAK|
+ - ``0x04``
+ - Sent on error in previously received message.
+
+ * - |ACK|
+ - ``0x40``
+ - Sent to acknowledge receival of |DATA| frame.
+
+ * - |DATA_SEQ|
+ - ``0x80``
+ - Sent to transfer data. Sequenced.
+
+ * - |DATA_NSQ|
+ - ``0x00``
+ - Same as |DATA_SEQ|, but does not need to be ACKed.
+
+Both |NAK|- and |ACK|-type frames are used to control flow of messages and
+thus do not carry a payload. |DATA_SEQ|- and |DATA_NSQ|-type frames on the
+other hand must carry a payload. The flow sequence and interaction of
+different frame types will be described in more depth in the next section.
+
+
+SSH Packet Protocol: Flow Sequence
+==================================
+
+Each exchange begins with |SYN|, followed by a |DATA_SEQ|- or
+|DATA_NSQ|-type frame, followed by its CRC, payload, and payload CRC. In
+case of a |DATA_NSQ|-type frame, the exchange is then finished. In case of a
+|DATA_SEQ|-type frame, the receiving party has to acknowledge receival of
+the frame by responding with a message containing an |ACK|-type frame with
+the same sequence ID of the |DATA| frame. In other words, the sequence ID of
+the |ACK| frame specifies the |DATA| frame to be acknowledged. In case of an
+error, e.g. an invalid CRC, the receiving party responds with a message
+containing an |NAK|-type frame. As the sequence ID of the previous data
+frame, for which an error is indicated via the |NAK| frame, cannot be relied
+upon, the sequence ID of the |NAK| frame should not be used and is set to
+zero. After receival of an |NAK| frame, the sending party should re-send all
+outstanding (non-ACKed) messages.
+
+Sequence IDs are not synchronized between the two parties, meaning that they
+are managed independently for each party. Identifying the messages
+corresponding to a single exchange thus relies on the sequence ID as well as
+the type of the message, and the context. Specifically, the sequence ID is
+used to associate an ``ACK`` with its ``DATA_SEQ``-type frame, but not
+``DATA_SEQ``- or ``DATA_NSQ``-type frames with other ``DATA``- type frames.
+
+An example exchange might look like this:
+
+::
+
+ tx: -- SYN FRAME(D) CRC(F) PAYLOAD CRC(P) -----------------------------
+ rx: ------------------------------------- SYN FRAME(A) CRC(F) CRC(P) --
+
+where both frames have the same sequence ID (``SEQ``). Here, ``FRAME(D)``
+indicates a |DATA_SEQ|-type frame, ``FRAME(A)`` an ``ACK``-type frame,
+``CRC(F)`` the CRC over the previous frame, ``CRC(P)`` the CRC over the
+previous payload. In case of an error, the exchange would look like this:
+
+::
+
+ tx: -- SYN FRAME(D) CRC(F) PAYLOAD CRC(P) -----------------------------
+ rx: ------------------------------------- SYN FRAME(N) CRC(F) CRC(P) --
+
+upon which the sender should re-send the message. ``FRAME(N)`` indicates an
+|NAK|-type frame. Note that the sequence ID of the |NAK|-type frame is fixed
+to zero. For |DATA_NSQ|-type frames, both exchanges are the same:
+
+::
+
+ tx: -- SYN FRAME(DATA_NSQ) CRC(F) PAYLOAD CRC(P) ----------------------
+ rx: -------------------------------------------------------------------
+
+Here, an error can be detected, but not corrected or indicated to the
+sending party. These exchanges are symmetric, i.e. switching ``rx`` and
+``tx`` results again in a valid exchange. Currently, no longer exchanges are
+known.
+
+
+Commands: Requests, Responses, and Events
+=========================================
+
+Commands are sent as payload inside a data frame. Currently, this is the
+only known payload type of |DATA| frames, with a payload-type value of
+``0x80`` (:c:type:`SSH_PLD_TYPE_CMD <ssh_payload_type>`).
+
+The command-type payload (:c:type:`struct ssh_command <ssh_command>`)
+consists of an eight-byte command structure, followed by optional and
+variable length command data. The length of this optional data is derived
+from the frame payload length given in the corresponding frame, i.e. it is
+``frame.len - sizeof(struct ssh_command)``. The command struct contains the
+following fields, packed together and in order:
+
+.. flat-table:: SSH Command
+ :widths: 1 1 4
+ :header-rows: 1
+
+ * - Field
+ - Type
+ - Description
+
+ * - |TYPE|
+ - |u8|
+ - Type of the payload. For commands always ``0x80``.
+
+ * - |TC|
+ - |u8|
+ - Target category.
+
+ * - |TID| (out)
+ - |u8|
+ - Target ID for outgoing (host to EC) commands.
+
+ * - |TID| (in)
+ - |u8|
+ - Target ID for incoming (EC to host) commands.
+
+ * - |IID|
+ - |u8|
+ - Instance ID.
+
+ * - |RQID|
+ - |u16|
+ - Request ID.
+
+ * - |CID|
+ - |u8|
+ - Command ID.
+
+The command struct and data, in general, does not contain any failure
+detection mechanism (e.g. CRCs), this is solely done on the frame level.
+
+Command-type payloads are used by the host to send commands and requests to
+the EC as well as by the EC to send responses and events back to the host.
+We differentiate between requests (sent by the host), responses (sent by the
+EC in response to a request), and events (sent by the EC without a preceding
+request).
+
+Commands and events are uniquely identified by their target category
+(``TC``) and command ID (``CID``). The target category specifies a general
+category for the command (e.g. system in general, vs. battery and AC, vs.
+temperature, and so on), while the command ID specifies the command inside
+that category. Only the combination of |TC| + |CID| is unique. Additionally,
+commands have an instance ID (``IID``), which is used to differentiate
+between different sub-devices. For example ``TC=3`` ``CID=1`` is a
+request to get the temperature on a thermal sensor, where |IID| specifies
+the respective sensor. If the instance ID is not used, it should be set to
+zero. If instance IDs are used, they, in general, start with a value of one,
+whereas zero may be used for instance independent queries, if applicable. A
+response to a request should have the same target category, command ID, and
+instance ID as the corresponding request.
+
+Responses are matched to their corresponding request via the request ID
+(``RQID``) field. This is a 16 bit wrapping counter similar to the sequence
+ID on the frames. Note that the sequence ID of the frames for a
+request-response pair does not match. Only the request ID has to match.
+Frame-protocol wise these are two separate exchanges, and may even be
+separated, e.g. by an event being sent after the request but before the
+response. Not all commands produce a response, and this is not detectable by
+|TC| + |CID|. It is the responsibility of the issuing party to wait for a
+response (or signal this to the communication framework, as is done in
+SAN/ACPI via the ``SNC`` flag).
+
+Events are identified by unique and reserved request IDs. These IDs should
+not be used by the host when sending a new request. They are used on the
+host to, first, detect events and, second, match them with a registered
+event handler. Request IDs for events are chosen by the host and directed to
+the EC when setting up and enabling an event source (via the
+enable-event-source request). The EC then uses the specified request ID for
+events sent from the respective source. Note that an event should still be
+identified by its target category, command ID, and, if applicable, instance
+ID, as a single event source can send multiple different event types. In
+general, however, a single target category should map to a single reserved
+event request ID.
+
+Furthermore, requests, responses, and events have an associated target ID
+(``TID``). This target ID is split into output (host to EC) and input (EC to
+host) fields, with the respecting other field (e.g. output field on incoming
+messages) set to zero. Two ``TID`` values are known: Primary (``0x01``) and
+secondary (``0x02``). In general, the response to a request should have the
+same ``TID`` value, however, the field (output vs. input) should be used in
+accordance to the direction in which the response is sent (i.e. on the input
+field, as responses are generally sent from the EC to the host).
+
+Note that, even though requests and events should be uniquely identifiable
+by target category and command ID alone, the EC may require specific
+target ID and instance ID values to accept a command. A command that is
+accepted for ``TID=1``, for example, may not be accepted for ``TID=2``
+and vice versa.
+
+
+Limitations and Observations
+============================
+
+The protocol can, in theory, handle up to ``U8_MAX`` frames in parallel,
+with up to ``U16_MAX`` pending requests (neglecting request IDs reserved for
+events). In practice, however, this is more limited. From our testing
+(although via a python and thus a user-space program), it seems that the EC
+can handle up to four requests (mostly) reliably in parallel at a certain
+time. With five or more requests in parallel, consistent discarding of
+commands (ACKed frame but no command response) has been observed. For five
+simultaneous commands, this reproducibly resulted in one command being
+dropped and four commands being handled.
+
+However, it has also been noted that, even with three requests in parallel,
+occasional frame drops happen. Apart from this, with a limit of three
+pending requests, no dropped commands (i.e. command being dropped but frame
+carrying command being ACKed) have been observed. In any case, frames (and
+possibly also commands) should be re-sent by the host if a certain timeout
+is exceeded. This is done by the EC for frames with a timeout of one second,
+up to two re-tries (i.e. three transmissions in total). The limit of
+re-tries also applies to received NAKs, and, in a worst case scenario, can
+lead to entire messages being dropped.
+
+While this also seems to work fine for pending data frames as long as no
+transmission failures occur, implementation and handling of these seems to
+depend on the assumption that there is only one non-acknowledged data frame.
+In particular, the detection of repeated frames relies on the last sequence
+number. This means that, if a frame that has been successfully received by
+the EC is sent again, e.g. due to the host not receiving an |ACK|, the EC
+will only detect this if it has the sequence ID of the last frame received
+by the EC. As an example: Sending two frames with ``SEQ=0`` and ``SEQ=1``
+followed by a repetition of ``SEQ=0`` will not detect the second ``SEQ=0``
+frame as such, and thus execute the command in this frame each time it has
+been received, i.e. twice in this example. Sending ``SEQ=0``, ``SEQ=1`` and
+then repeating ``SEQ=1`` will detect the second ``SEQ=1`` as repetition of
+the first one and ignore it, thus executing the contained command only once.
+
+In conclusion, this suggests a limit of at most one pending un-ACKed frame
+(per party, effectively leading to synchronous communication regarding
+frames) and at most three pending commands. The limit to synchronous frame
+transfers seems to be consistent with behavior observed on Windows.
diff --git a/Documentation/driver-api/thermal/sysfs-api.rst b/Documentation/driver-api/thermal/sysfs-api.rst
index e7520cb439ac..29fdd817ddb0 100644
--- a/Documentation/driver-api/thermal/sysfs-api.rst
+++ b/Documentation/driver-api/thermal/sysfs-api.rst
@@ -54,7 +54,7 @@ temperature) and throttle appropriate devices.
trips:
the total number of trip points this thermal zone supports.
mask:
- Bit string: If 'n'th bit is set, then trip point 'n' is writeable.
+ Bit string: If 'n'th bit is set, then trip point 'n' is writable.
devdata:
device private data
ops:
@@ -406,7 +406,7 @@ Thermal cooling device sys I/F, created once it's registered::
|---stats/reset: Writing any value resets the statistics
|---stats/time_in_state_ms: Time (msec) spent in various cooling states
|---stats/total_trans: Total number of times cooling state is changed
- |---stats/trans_table: Cooing state transition table
+ |---stats/trans_table: Cooling state transition table
Then next two dynamic attributes are created/removed in pairs. They represent
@@ -520,19 +520,6 @@ available_policies
RW, Optional
-passive
- Attribute is only present for zones in which the passive cooling
- policy is not supported by native thermal driver. Default is zero
- and can be set to a temperature (in millidegrees) to enable a
- passive trip point for the zone. Activation is done by polling with
- an interval of 1 second.
-
- Unit: millidegrees Celsius
-
- Valid values: 0 (disabled) or greater than 1000
-
- RW, Optional
-
emul_temp
Interface to set the emulated temperature method in thermal zone
(sensor). After setting this temperature, the thermal zone may pass
@@ -779,5 +766,5 @@ emergency poweroff kicks in after the delay has elapsed and shuts down
the system.
If set to 0 emergency poweroff will not be supported. So a carefully
-profiled non-zero positive value is a must for emergerncy poweroff to be
+profiled non-zero positive value is a must for emergency poweroff to be
triggered.
diff --git a/Documentation/features/core/cBPF-JIT/arch-support.txt b/Documentation/features/core/cBPF-JIT/arch-support.txt
index 399935616813..e59b5215402d 100644
--- a/Documentation/features/core/cBPF-JIT/arch-support.txt
+++ b/Documentation/features/core/cBPF-JIT/arch-support.txt
@@ -10,7 +10,6 @@
| arc: | TODO |
| arm: | TODO |
| arm64: | TODO |
- | c6x: | TODO |
| csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
diff --git a/Documentation/features/core/eBPF-JIT/arch-support.txt b/Documentation/features/core/eBPF-JIT/arch-support.txt
index 79409bfe0263..dcbd8679f514 100644
--- a/Documentation/features/core/eBPF-JIT/arch-support.txt
+++ b/Documentation/features/core/eBPF-JIT/arch-support.txt
@@ -10,7 +10,6 @@
| arc: | TODO |
| arm: | ok |
| arm64: | ok |
- | c6x: | TODO |
| csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
diff --git a/Documentation/features/core/generic-idle-thread/arch-support.txt b/Documentation/features/core/generic-idle-thread/arch-support.txt
index 9ea60e416efd..4efcba7b5239 100644
--- a/Documentation/features/core/generic-idle-thread/arch-support.txt
+++ b/Documentation/features/core/generic-idle-thread/arch-support.txt
@@ -10,7 +10,6 @@
| arc: | ok |
| arm: | ok |
| arm64: | ok |
- | c6x: | TODO |
| csky: | ok |
| h8300: | TODO |
| hexagon: | ok |
diff --git a/Documentation/features/core/jump-labels/arch-support.txt b/Documentation/features/core/jump-labels/arch-support.txt
index 894d9693b380..0c801d1bd2da 100644
--- a/Documentation/features/core/jump-labels/arch-support.txt
+++ b/Documentation/features/core/jump-labels/arch-support.txt
@@ -10,7 +10,6 @@
| arc: | ok |
| arm: | ok |
| arm64: | ok |
- | c6x: | TODO |
| csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
diff --git a/Documentation/features/core/tracehook/arch-support.txt b/Documentation/features/core/tracehook/arch-support.txt
index cd3510e2eedb..af34308fce7f 100644
--- a/Documentation/features/core/tracehook/arch-support.txt
+++ b/Documentation/features/core/tracehook/arch-support.txt
@@ -10,7 +10,6 @@
| arc: | ok |
| arm: | ok |
| arm64: | ok |
- | c6x: | ok |
| csky: | ok |
| h8300: | TODO |
| hexagon: | ok |
diff --git a/Documentation/features/debug/KASAN/arch-support.txt b/Documentation/features/debug/KASAN/arch-support.txt
index b2288dc14b72..c244ac7eee26 100644
--- a/Documentation/features/debug/KASAN/arch-support.txt
+++ b/Documentation/features/debug/KASAN/arch-support.txt
@@ -10,7 +10,6 @@
| arc: | TODO |
| arm: | ok |
| arm64: | ok |
- | c6x: | TODO |
| csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
diff --git a/Documentation/features/debug/debug-vm-pgtable/arch-support.txt b/Documentation/features/debug/debug-vm-pgtable/arch-support.txt
index 1c49723e7534..7aff505af706 100644
--- a/Documentation/features/debug/debug-vm-pgtable/arch-support.txt
+++ b/Documentation/features/debug/debug-vm-pgtable/arch-support.txt
@@ -10,7 +10,6 @@
| arc: | ok |
| arm: | TODO |
| arm64: | ok |
- | c6x: | TODO |
| csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
diff --git a/Documentation/features/debug/gcov-profile-all/arch-support.txt b/Documentation/features/debug/gcov-profile-all/arch-support.txt
index 7563a494ddb8..b39c1a5de3f3 100644
--- a/Documentation/features/debug/gcov-profile-all/arch-support.txt
+++ b/Documentation/features/debug/gcov-profile-all/arch-support.txt
@@ -10,14 +10,13 @@
| arc: | TODO |
| arm: | ok |
| arm64: | ok |
- | c6x: | TODO |
| csky: | ok |
| h8300: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
| m68k: | TODO |
| microblaze: | ok |
- | mips: | TODO |
+ | mips: | ok |
| nds32: | TODO |
| nios2: | TODO |
| openrisc: | TODO |
diff --git a/Documentation/features/debug/kcov/arch-support.txt b/Documentation/features/debug/kcov/arch-support.txt
index ab0ee1c933c2..7e44013cc320 100644
--- a/Documentation/features/debug/kcov/arch-support.txt
+++ b/Documentation/features/debug/kcov/arch-support.txt
@@ -10,7 +10,6 @@
| arc: | TODO |
| arm: | ok |
| arm64: | ok |
- | c6x: | TODO |
| csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
diff --git a/Documentation/features/debug/kgdb/arch-support.txt b/Documentation/features/debug/kgdb/arch-support.txt
index bc45bac20442..2cb0576f9180 100644
--- a/Documentation/features/debug/kgdb/arch-support.txt
+++ b/Documentation/features/debug/kgdb/arch-support.txt
@@ -10,7 +10,6 @@
| arc: | ok |
| arm: | ok |
| arm64: | ok |
- | c6x: | TODO |
| csky: | TODO |
| h8300: | ok |
| hexagon: | ok |
diff --git a/Documentation/features/debug/kmemleak/arch-support.txt b/Documentation/features/debug/kmemleak/arch-support.txt
index 2db76807ec6f..e9ac415f8aec 100644
--- a/Documentation/features/debug/kmemleak/arch-support.txt
+++ b/Documentation/features/debug/kmemleak/arch-support.txt
@@ -10,8 +10,7 @@
| arc: | ok |
| arm: | ok |
| arm64: | ok |
- | c6x: | TODO |
- | csky: | TODO |
+ | csky: | ok |
| h8300: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
diff --git a/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt b/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt
index 6225cfe0c5bf..96156e8802a7 100644
--- a/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt
+++ b/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt
@@ -10,7 +10,6 @@
| arc: | TODO |
| arm: | TODO |
| arm64: | TODO |
- | c6x: | TODO |
| csky: | ok |
| h8300: | TODO |
| hexagon: | TODO |
@@ -23,7 +22,7 @@
| openrisc: | TODO |
| parisc: | ok |
| powerpc: | ok |
- | riscv: | TODO |
+ | riscv: | ok |
| s390: | ok |
| sh: | TODO |
| sparc: | TODO |
diff --git a/Documentation/features/debug/kprobes/arch-support.txt b/Documentation/features/debug/kprobes/arch-support.txt
index 371f0ac488f5..ee95ed61909a 100644
--- a/Documentation/features/debug/kprobes/arch-support.txt
+++ b/Documentation/features/debug/kprobes/arch-support.txt
@@ -10,7 +10,6 @@
| arc: | ok |
| arm: | ok |
| arm64: | ok |
- | c6x: | TODO |
| csky: | ok |
| h8300: | TODO |
| hexagon: | TODO |
@@ -23,7 +22,7 @@
| openrisc: | TODO |
| parisc: | ok |
| powerpc: | ok |
- | riscv: | TODO |
+ | riscv: | ok |
| s390: | ok |
| sh: | ok |
| sparc: | ok |
diff --git a/Documentation/features/debug/kretprobes/arch-support.txt b/Documentation/features/debug/kretprobes/arch-support.txt
index 38e95251deed..612cb97d47b8 100644
--- a/Documentation/features/debug/kretprobes/arch-support.txt
+++ b/Documentation/features/debug/kretprobes/arch-support.txt
@@ -10,7 +10,6 @@
| arc: | ok |
| arm: | ok |
| arm64: | ok |
- | c6x: | TODO |
| csky: | ok |
| h8300: | TODO |
| hexagon: | TODO |
@@ -23,7 +22,7 @@
| openrisc: | TODO |
| parisc: | ok |
| powerpc: | ok |
- | riscv: | TODO |
+ | riscv: | ok |
| s390: | ok |
| sh: | ok |
| sparc: | ok |
diff --git a/Documentation/features/debug/optprobes/arch-support.txt b/Documentation/features/debug/optprobes/arch-support.txt
index 7f4a20e6a12b..d6ff141a6122 100644
--- a/Documentation/features/debug/optprobes/arch-support.txt
+++ b/Documentation/features/debug/optprobes/arch-support.txt
@@ -10,7 +10,6 @@
| arc: | TODO |
| arm: | ok |
| arm64: | TODO |
- | c6x: | TODO |
| csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
diff --git a/Documentation/features/debug/stackprotector/arch-support.txt b/Documentation/features/debug/stackprotector/arch-support.txt
index 3329559c8207..ad4de22a71ab 100644
--- a/Documentation/features/debug/stackprotector/arch-support.txt
+++ b/Documentation/features/debug/stackprotector/arch-support.txt
@@ -10,7 +10,6 @@
| arc: | TODO |
| arm: | ok |
| arm64: | ok |
- | c6x: | TODO |
| csky: | ok |
| h8300: | TODO |
| hexagon: | TODO |
diff --git a/Documentation/features/debug/uprobes/arch-support.txt b/Documentation/features/debug/uprobes/arch-support.txt
index 43cac6ee0c68..8bd5548a4485 100644
--- a/Documentation/features/debug/uprobes/arch-support.txt
+++ b/Documentation/features/debug/uprobes/arch-support.txt
@@ -10,7 +10,6 @@
| arc: | TODO |
| arm: | ok |
| arm64: | ok |
- | c6x: | TODO |
| csky: | ok |
| h8300: | TODO |
| hexagon: | TODO |
@@ -23,7 +22,7 @@
| openrisc: | TODO |
| parisc: | TODO |
| powerpc: | ok |
- | riscv: | TODO |
+ | riscv: | ok |
| s390: | ok |
| sh: | TODO |
| sparc: | ok |
diff --git a/Documentation/features/debug/user-ret-profiler/arch-support.txt b/Documentation/features/debug/user-ret-profiler/arch-support.txt
index d636ed0e679f..2a3fe812a5fa 100644
--- a/Documentation/features/debug/user-ret-profiler/arch-support.txt
+++ b/Documentation/features/debug/user-ret-profiler/arch-support.txt
@@ -10,7 +10,6 @@
| arc: | TODO |
| arm: | TODO |
| arm64: | TODO |
- | c6x: | TODO |
| csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
diff --git a/Documentation/features/io/dma-contiguous/arch-support.txt b/Documentation/features/io/dma-contiguous/arch-support.txt
index dfc93d074e3d..bece89586efa 100644
--- a/Documentation/features/io/dma-contiguous/arch-support.txt
+++ b/Documentation/features/io/dma-contiguous/arch-support.txt
@@ -10,7 +10,6 @@
| arc: | TODO |
| arm: | ok |
| arm64: | ok |
- | c6x: | TODO |
| csky: | ok |
| h8300: | TODO |
| hexagon: | TODO |
diff --git a/Documentation/features/locking/cmpxchg-local/arch-support.txt b/Documentation/features/locking/cmpxchg-local/arch-support.txt
index 1815c7fed06d..52bdda004f5c 100644
--- a/Documentation/features/locking/cmpxchg-local/arch-support.txt
+++ b/Documentation/features/locking/cmpxchg-local/arch-support.txt
@@ -10,7 +10,6 @@
| arc: | TODO |
| arm: | TODO |
| arm64: | ok |
- | c6x: | TODO |
| csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
diff --git a/Documentation/features/locking/lockdep/arch-support.txt b/Documentation/features/locking/lockdep/arch-support.txt
index 940b0bd02957..a8cd163c8b7e 100644
--- a/Documentation/features/locking/lockdep/arch-support.txt
+++ b/Documentation/features/locking/lockdep/arch-support.txt
@@ -10,7 +10,6 @@
| arc: | ok |
| arm: | ok |
| arm64: | ok |
- | c6x: | TODO |
| csky: | ok |
| h8300: | TODO |
| hexagon: | ok |
diff --git a/Documentation/features/locking/queued-rwlocks/arch-support.txt b/Documentation/features/locking/queued-rwlocks/arch-support.txt
index 4dd5e554873f..8c85949752b3 100644
--- a/Documentation/features/locking/queued-rwlocks/arch-support.txt
+++ b/Documentation/features/locking/queued-rwlocks/arch-support.txt
@@ -10,7 +10,6 @@
| arc: | TODO |
| arm: | TODO |
| arm64: | ok |
- | c6x: | TODO |
| csky: | ok |
| h8300: | TODO |
| hexagon: | TODO |
diff --git a/Documentation/features/locking/queued-spinlocks/arch-support.txt b/Documentation/features/locking/queued-spinlocks/arch-support.txt
index b16d4f71e5ce..5f4e1b3841af 100644
--- a/Documentation/features/locking/queued-spinlocks/arch-support.txt
+++ b/Documentation/features/locking/queued-spinlocks/arch-support.txt
@@ -10,7 +10,6 @@
| arc: | TODO |
| arm: | TODO |
| arm64: | ok |
- | c6x: | TODO |
| csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
diff --git a/Documentation/features/perf/kprobes-event/arch-support.txt b/Documentation/features/perf/kprobes-event/arch-support.txt
index 04c17c2106a4..78f3fe080f0e 100644
--- a/Documentation/features/perf/kprobes-event/arch-support.txt
+++ b/Documentation/features/perf/kprobes-event/arch-support.txt
@@ -10,7 +10,6 @@
| arc: | TODO |
| arm: | ok |
| arm64: | ok |
- | c6x: | TODO |
| csky: | ok |
| h8300: | TODO |
| hexagon: | ok |
@@ -23,7 +22,7 @@
| openrisc: | TODO |
| parisc: | ok |
| powerpc: | ok |
- | riscv: | TODO |
+ | riscv: | ok |
| s390: | ok |
| sh: | ok |
| sparc: | ok |
diff --git a/Documentation/features/perf/perf-regs/arch-support.txt b/Documentation/features/perf/perf-regs/arch-support.txt
index e7450fbb8253..5bf3b1854a1f 100644
--- a/Documentation/features/perf/perf-regs/arch-support.txt
+++ b/Documentation/features/perf/perf-regs/arch-support.txt
@@ -10,14 +10,13 @@
| arc: | TODO |
| arm: | ok |
| arm64: | ok |
- | c6x: | TODO |
| csky: | ok |
| h8300: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
| m68k: | TODO |
| microblaze: | TODO |
- | mips: | TODO |
+ | mips: | ok |
| nds32: | TODO |
| nios2: | TODO |
| openrisc: | TODO |
diff --git a/Documentation/features/perf/perf-stackdump/arch-support.txt b/Documentation/features/perf/perf-stackdump/arch-support.txt
index 98e79d128d9b..d88659bb4fc1 100644
--- a/Documentation/features/perf/perf-stackdump/arch-support.txt
+++ b/Documentation/features/perf/perf-stackdump/arch-support.txt
@@ -10,14 +10,13 @@
| arc: | TODO |
| arm: | ok |
| arm64: | ok |
- | c6x: | TODO |
| csky: | ok |
| h8300: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
| m68k: | TODO |
| microblaze: | TODO |
- | mips: | TODO |
+ | mips: | ok |
| nds32: | TODO |
| nios2: | TODO |
| openrisc: | TODO |
diff --git a/Documentation/features/sched/membarrier-sync-core/arch-support.txt b/Documentation/features/sched/membarrier-sync-core/arch-support.txt
index 47e6903f47a5..883d33b265d6 100644
--- a/Documentation/features/sched/membarrier-sync-core/arch-support.txt
+++ b/Documentation/features/sched/membarrier-sync-core/arch-support.txt
@@ -33,7 +33,6 @@
| arc: | TODO |
| arm: | ok |
| arm64: | ok |
- | c6x: | TODO |
| csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
diff --git a/Documentation/features/sched/numa-balancing/arch-support.txt b/Documentation/features/sched/numa-balancing/arch-support.txt
index 964457ad26c1..9affb7c2c500 100644
--- a/Documentation/features/sched/numa-balancing/arch-support.txt
+++ b/Documentation/features/sched/numa-balancing/arch-support.txt
@@ -10,7 +10,6 @@
| arc: | .. |
| arm: | .. |
| arm64: | ok |
- | c6x: | .. |
| csky: | .. |
| h8300: | .. |
| hexagon: | .. |
@@ -23,7 +22,7 @@
| openrisc: | .. |
| parisc: | .. |
| powerpc: | ok |
- | riscv: | TODO |
+ | riscv: | ok |
| s390: | ok |
| sh: | .. |
| sparc: | TODO |
diff --git a/Documentation/features/seccomp/seccomp-filter/arch-support.txt b/Documentation/features/seccomp/seccomp-filter/arch-support.txt
index eb3d74092c61..26eec58ab819 100644
--- a/Documentation/features/seccomp/seccomp-filter/arch-support.txt
+++ b/Documentation/features/seccomp/seccomp-filter/arch-support.txt
@@ -10,7 +10,6 @@
| arc: | TODO |
| arm: | ok |
| arm64: | ok |
- | c6x: | TODO |
| csky: | ok |
| h8300: | TODO |
| hexagon: | TODO |
diff --git a/Documentation/features/time/arch-tick-broadcast/arch-support.txt b/Documentation/features/time/arch-tick-broadcast/arch-support.txt
index 4d11cbb3c09b..8639fe8315f5 100644
--- a/Documentation/features/time/arch-tick-broadcast/arch-support.txt
+++ b/Documentation/features/time/arch-tick-broadcast/arch-support.txt
@@ -10,7 +10,6 @@
| arc: | TODO |
| arm: | ok |
| arm64: | ok |
- | c6x: | TODO |
| csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
diff --git a/Documentation/features/time/clockevents/arch-support.txt b/Documentation/features/time/clockevents/arch-support.txt
index 6863a3fbddad..9a81cb03b1fd 100644
--- a/Documentation/features/time/clockevents/arch-support.txt
+++ b/Documentation/features/time/clockevents/arch-support.txt
@@ -10,7 +10,6 @@
| arc: | ok |
| arm: | TODO |
| arm64: | ok |
- | c6x: | ok |
| csky: | ok |
| h8300: | ok |
| hexagon: | ok |
diff --git a/Documentation/features/time/context-tracking/arch-support.txt b/Documentation/features/time/context-tracking/arch-support.txt
index 52aea275aab7..4ed116c2ec39 100644
--- a/Documentation/features/time/context-tracking/arch-support.txt
+++ b/Documentation/features/time/context-tracking/arch-support.txt
@@ -10,7 +10,6 @@
| arc: | TODO |
| arm: | ok |
| arm64: | ok |
- | c6x: | TODO |
| csky: | ok |
| h8300: | TODO |
| hexagon: | TODO |
diff --git a/Documentation/features/time/irq-time-acct/arch-support.txt b/Documentation/features/time/irq-time-acct/arch-support.txt
index 6fc03deb1c38..bc30c15557c7 100644
--- a/Documentation/features/time/irq-time-acct/arch-support.txt
+++ b/Documentation/features/time/irq-time-acct/arch-support.txt
@@ -10,7 +10,6 @@
| arc: | TODO |
| arm: | ok |
| arm64: | ok |
- | c6x: | TODO |
| csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
diff --git a/Documentation/features/time/virt-cpuacct/arch-support.txt b/Documentation/features/time/virt-cpuacct/arch-support.txt
index e51f3af38e31..050de43bbbb9 100644
--- a/Documentation/features/time/virt-cpuacct/arch-support.txt
+++ b/Documentation/features/time/virt-cpuacct/arch-support.txt
@@ -10,7 +10,6 @@
| arc: | TODO |
| arm: | ok |
| arm64: | ok |
- | c6x: | TODO |
| csky: | ok |
| h8300: | TODO |
| hexagon: | TODO |
diff --git a/Documentation/features/vm/ELF-ASLR/arch-support.txt b/Documentation/features/vm/ELF-ASLR/arch-support.txt
index eccda0732474..99cb6d7f5005 100644
--- a/Documentation/features/vm/ELF-ASLR/arch-support.txt
+++ b/Documentation/features/vm/ELF-ASLR/arch-support.txt
@@ -10,7 +10,6 @@
| arc: | TODO |
| arm: | ok |
| arm64: | ok |
- | c6x: | TODO |
| csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
diff --git a/Documentation/features/vm/PG_uncached/arch-support.txt b/Documentation/features/vm/PG_uncached/arch-support.txt
index c74e3f8040e1..6cde38458596 100644
--- a/Documentation/features/vm/PG_uncached/arch-support.txt
+++ b/Documentation/features/vm/PG_uncached/arch-support.txt
@@ -10,7 +10,6 @@
| arc: | TODO |
| arm: | TODO |
| arm64: | TODO |
- | c6x: | TODO |
| csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
diff --git a/Documentation/features/vm/THP/arch-support.txt b/Documentation/features/vm/THP/arch-support.txt
index 1c0b95f2b40d..e8238cb2a4da 100644
--- a/Documentation/features/vm/THP/arch-support.txt
+++ b/Documentation/features/vm/THP/arch-support.txt
@@ -10,7 +10,6 @@
| arc: | ok |
| arm: | ok |
| arm64: | ok |
- | c6x: | .. |
| csky: | .. |
| h8300: | .. |
| hexagon: | .. |
diff --git a/Documentation/features/vm/TLB/arch-support.txt b/Documentation/features/vm/TLB/arch-support.txt
index 30f75a79ce01..48a5ca548399 100644
--- a/Documentation/features/vm/TLB/arch-support.txt
+++ b/Documentation/features/vm/TLB/arch-support.txt
@@ -10,7 +10,6 @@
| arc: | TODO |
| arm: | TODO |
| arm64: | TODO |
- | c6x: | .. |
| csky: | TODO |
| h8300: | .. |
| hexagon: | TODO |
diff --git a/Documentation/features/vm/huge-vmap/arch-support.txt b/Documentation/features/vm/huge-vmap/arch-support.txt
index c5ff3a427722..439fd9069b8b 100644
--- a/Documentation/features/vm/huge-vmap/arch-support.txt
+++ b/Documentation/features/vm/huge-vmap/arch-support.txt
@@ -10,7 +10,6 @@
| arc: | TODO |
| arm: | TODO |
| arm64: | ok |
- | c6x: | TODO |
| csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
diff --git a/Documentation/features/vm/ioremap_prot/arch-support.txt b/Documentation/features/vm/ioremap_prot/arch-support.txt
index b5fb37c28cc6..9a0c8783b84d 100644
--- a/Documentation/features/vm/ioremap_prot/arch-support.txt
+++ b/Documentation/features/vm/ioremap_prot/arch-support.txt
@@ -10,7 +10,6 @@
| arc: | ok |
| arm: | TODO |
| arm64: | TODO |
- | c6x: | TODO |
| csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
diff --git a/Documentation/features/vm/pte_special/arch-support.txt b/Documentation/features/vm/pte_special/arch-support.txt
index 13d0e1e17001..40b969f3a6bb 100644
--- a/Documentation/features/vm/pte_special/arch-support.txt
+++ b/Documentation/features/vm/pte_special/arch-support.txt
@@ -10,7 +10,6 @@
| arc: | ok |
| arm: | ok |
| arm64: | ok |
- | c6x: | TODO |
| csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
diff --git a/Documentation/filesystems/afs.rst b/Documentation/filesystems/afs.rst
index 0abb155ac666..ca062a7f8ee2 100644
--- a/Documentation/filesystems/afs.rst
+++ b/Documentation/filesystems/afs.rst
@@ -109,7 +109,7 @@ Mountpoints
AFS has a concept of mountpoints. In AFS terms, these are specially formatted
symbolic links (of the same form as the "device name" passed to mount). kAFS
presents these to the user as directories that have a follow-link capability
-(ie: symbolic link semantics). If anyone attempts to access them, they will
+(i.e.: symbolic link semantics). If anyone attempts to access them, they will
automatically cause the target volume to be mounted (if possible) on that site.
Automatically mounted filesystems will be automatically unmounted approximately
@@ -144,7 +144,7 @@ looks up a cell of the same name, for example::
Proc Filesystem
===============
-The AFS modules creates a "/proc/fs/afs/" directory and populates it:
+The AFS module creates a "/proc/fs/afs/" directory and populates it:
(*) A "cells" file that lists cells currently known to the afs module and
their usage counts::
@@ -201,7 +201,7 @@ And then run as::
./klog
Assuming it's successful, this adds a key of type RxRPC, named for the service
-and cell, eg: "afs@<cellname>". This can be viewed with the keyctl program or
+and cell, e.g.: "afs@<cellname>". This can be viewed with the keyctl program or
by cat'ing /proc/keys::
[root@andromeda ~]# keyctl show
@@ -211,7 +211,7 @@ by cat'ing /proc/keys::
111416553 --als--v 0 0 \_ rxrpc: afs@CAMBRIDGE.REDHAT.COM
Currently the username, realm, password and proposed ticket lifetime are
-compiled in to the program.
+compiled into the program.
It is not required to acquire a key before using AFS facilities, but if one is
not acquired then all operations will be governed by the anonymous user parts
diff --git a/Documentation/filesystems/dax.txt b/Documentation/filesystems/dax.txt
index 8fdb78f3c6c9..e03c20564f3a 100644
--- a/Documentation/filesystems/dax.txt
+++ b/Documentation/filesystems/dax.txt
@@ -83,20 +83,9 @@ Summary
directories. This has runtime constraints and limitations that are
described in 6) below.
- 6. When changing the S_DAX policy via toggling the persistent FS_XFLAG_DAX flag,
- the change in behaviour for existing regular files may not occur
- immediately. If the change must take effect immediately, the administrator
- needs to:
-
- a) stop the application so there are no active references to the data set
- the policy change will affect
-
- b) evict the data set from kernel caches so it will be re-instantiated when
- the application is restarted. This can be achieved by:
-
- i. drop-caches
- ii. a filesystem unmount and mount cycle
- iii. a system reboot
+ 6. When changing the S_DAX policy via toggling the persistent FS_XFLAG_DAX
+ flag, the change to existing regular files won't take effect until the
+ files are closed by all processes.
Details
diff --git a/Documentation/filesystems/f2fs.rst b/Documentation/filesystems/f2fs.rst
index dae15c96e659..35ed01a5fbc9 100644
--- a/Documentation/filesystems/f2fs.rst
+++ b/Documentation/filesystems/f2fs.rst
@@ -179,7 +179,6 @@ fault_type=%d Support configuring fault injection type, should be
FAULT_KVMALLOC 0x000000002
FAULT_PAGE_ALLOC 0x000000004
FAULT_PAGE_GET 0x000000008
- FAULT_ALLOC_BIO 0x000000010
FAULT_ALLOC_NID 0x000000020
FAULT_ORPHAN 0x000000040
FAULT_BLOCK 0x000000080
@@ -247,8 +246,24 @@ checkpoint=%s[:%u[%]] Set to "disable" to turn off checkpointing. Set to "enabl
hide up to all remaining free space. The actual space that
would be unusable can be viewed at /sys/fs/f2fs/<disk>/unusable
This space is reclaimed once checkpoint=enable.
+checkpoint_merge When checkpoint is enabled, this can be used to create a kernel
+ daemon and make it to merge concurrent checkpoint requests as
+ much as possible to eliminate redundant checkpoint issues. Plus,
+ we can eliminate the sluggish issue caused by slow checkpoint
+ operation when the checkpoint is done in a process context in
+ a cgroup having low i/o budget and cpu shares. To make this
+ do better, we set the default i/o priority of the kernel daemon
+ to "3", to give one higher priority than other kernel threads.
+ This is the same way to give a I/O priority to the jbd2
+ journaling thread of ext4 filesystem.
+nocheckpoint_merge Disable checkpoint merge feature.
compress_algorithm=%s Control compress algorithm, currently f2fs supports "lzo",
"lz4", "zstd" and "lzo-rle" algorithm.
+compress_algorithm=%s:%d Control compress algorithm and its compress level, now, only
+ "lz4" and "zstd" support compress level config.
+ algorithm level range
+ lz4 3 - 16
+ zstd 1 - 22
compress_log_size=%u Support configuring compress cluster size, the size will
be 4KB * (1 << %u), 16KB is minimum size, also it's
default size.
@@ -831,7 +846,7 @@ This is the default option. f2fs does automatic compression in the writeback of
compression enabled files.
2) compress_mode=user
-This disables the automaic compression and gives the user discretion of choosing the
+This disables the automatic compression and gives the user discretion of choosing the
target file and the timing. The user can do manual compression/decompression on the
compression enabled files using F2FS_IOC_DECOMPRESS_FILE and F2FS_IOC_COMPRESS_FILE
ioctls like the below.
diff --git a/Documentation/filesystems/fsverity.rst b/Documentation/filesystems/fsverity.rst
index e0204a23e997..1d831e3cbcb3 100644
--- a/Documentation/filesystems/fsverity.rst
+++ b/Documentation/filesystems/fsverity.rst
@@ -217,6 +217,82 @@ FS_IOC_MEASURE_VERITY can fail with the following errors:
- ``EOVERFLOW``: the digest is longer than the specified
``digest_size`` bytes. Try providing a larger buffer.
+FS_IOC_READ_VERITY_METADATA
+---------------------------
+
+The FS_IOC_READ_VERITY_METADATA ioctl reads verity metadata from a
+verity file. This ioctl is available since Linux v5.12.
+
+This ioctl allows writing a server program that takes a verity file
+and serves it to a client program, such that the client can do its own
+fs-verity compatible verification of the file. This only makes sense
+if the client doesn't trust the server and if the server needs to
+provide the storage for the client.
+
+This is a fairly specialized use case, and most fs-verity users won't
+need this ioctl.
+
+This ioctl takes in a pointer to the following structure::
+
+ #define FS_VERITY_METADATA_TYPE_MERKLE_TREE 1
+ #define FS_VERITY_METADATA_TYPE_DESCRIPTOR 2
+ #define FS_VERITY_METADATA_TYPE_SIGNATURE 3
+
+ struct fsverity_read_metadata_arg {
+ __u64 metadata_type;
+ __u64 offset;
+ __u64 length;
+ __u64 buf_ptr;
+ __u64 __reserved;
+ };
+
+``metadata_type`` specifies the type of metadata to read:
+
+- ``FS_VERITY_METADATA_TYPE_MERKLE_TREE`` reads the blocks of the
+ Merkle tree. The blocks are returned in order from the root level
+ to the leaf level. Within each level, the blocks are returned in
+ the same order that their hashes are themselves hashed.
+ See `Merkle tree`_ for more information.
+
+- ``FS_VERITY_METADATA_TYPE_DESCRIPTOR`` reads the fs-verity
+ descriptor. See `fs-verity descriptor`_.
+
+- ``FS_VERITY_METADATA_TYPE_SIGNATURE`` reads the signature which was
+ passed to FS_IOC_ENABLE_VERITY, if any. See `Built-in signature
+ verification`_.
+
+The semantics are similar to those of ``pread()``. ``offset``
+specifies the offset in bytes into the metadata item to read from, and
+``length`` specifies the maximum number of bytes to read from the
+metadata item. ``buf_ptr`` is the pointer to the buffer to read into,
+cast to a 64-bit integer. ``__reserved`` must be 0. On success, the
+number of bytes read is returned. 0 is returned at the end of the
+metadata item. The returned length may be less than ``length``, for
+example if the ioctl is interrupted.
+
+The metadata returned by FS_IOC_READ_VERITY_METADATA isn't guaranteed
+to be authenticated against the file digest that would be returned by
+`FS_IOC_MEASURE_VERITY`_, as the metadata is expected to be used to
+implement fs-verity compatible verification anyway (though absent a
+malicious disk, the metadata will indeed match). E.g. to implement
+this ioctl, the filesystem is allowed to just read the Merkle tree
+blocks from disk without actually verifying the path to the root node.
+
+FS_IOC_READ_VERITY_METADATA can fail with the following errors:
+
+- ``EFAULT``: the caller provided inaccessible memory
+- ``EINTR``: the ioctl was interrupted before any data was read
+- ``EINVAL``: reserved fields were set, or ``offset + length``
+ overflowed
+- ``ENODATA``: the file is not a verity file, or
+ FS_VERITY_METADATA_TYPE_SIGNATURE was requested but the file doesn't
+ have a built-in signature
+- ``ENOTTY``: this type of filesystem does not implement fs-verity, or
+ this ioctl is not yet implemented on it
+- ``EOPNOTSUPP``: the kernel was not configured with fs-verity
+ support, or the filesystem superblock has not had the 'verity'
+ feature enabled on it. (See `Filesystem support`_.)
+
FS_IOC_GETFLAGS
---------------
diff --git a/Documentation/filesystems/index.rst b/Documentation/filesystems/index.rst
index 7be9b46d85d9..1f76b1cb3348 100644
--- a/Documentation/filesystems/index.rst
+++ b/Documentation/filesystems/index.rst
@@ -83,6 +83,7 @@ Documentation for filesystem implementations.
erofs
ext2
ext3
+ ext4/index
f2fs
gfs2
gfs2-uevents
diff --git a/Documentation/filesystems/locking.rst b/Documentation/filesystems/locking.rst
index c0f2c7586531..b7dcc86c92a4 100644
--- a/Documentation/filesystems/locking.rst
+++ b/Documentation/filesystems/locking.rst
@@ -126,9 +126,10 @@ prototypes::
int (*get)(const struct xattr_handler *handler, struct dentry *dentry,
struct inode *inode, const char *name, void *buffer,
size_t size);
- int (*set)(const struct xattr_handler *handler, struct dentry *dentry,
- struct inode *inode, const char *name, const void *buffer,
- size_t size, int flags);
+ int (*set)(const struct xattr_handler *handler,
+ struct user_namespace *mnt_userns,
+ struct dentry *dentry, struct inode *inode, const char *name,
+ const void *buffer, size_t size, int flags);
locking rules:
all may block
diff --git a/Documentation/filesystems/overlayfs.rst b/Documentation/filesystems/overlayfs.rst
index 587a93973929..78240e29b0bb 100644
--- a/Documentation/filesystems/overlayfs.rst
+++ b/Documentation/filesystems/overlayfs.rst
@@ -586,6 +586,14 @@ without significant effort.
The advantage of mounting with the "volatile" option is that all forms of
sync calls to the upper filesystem are omitted.
+In order to avoid a giving a false sense of safety, the syncfs (and fsync)
+semantics of volatile mounts are slightly different than that of the rest of
+VFS. If any writeback error occurs on the upperdir's filesystem after a
+volatile mount takes place, all sync functions will return an error. Once this
+condition is reached, the filesystem will not recover, and every subsequent sync
+call will return an error, even if the upperdir has not experience a new error
+since the last sync call.
+
When overlay is mounted with "volatile" option, the directory
"$workdir/work/incompat/volatile" is created. During next mount, overlay
checks for this directory and refuses to mount if present. This is a strong
diff --git a/Documentation/filesystems/porting.rst b/Documentation/filesystems/porting.rst
index 867036aa90b8..0302035781be 100644
--- a/Documentation/filesystems/porting.rst
+++ b/Documentation/filesystems/porting.rst
@@ -717,6 +717,8 @@ be removed. Switch while you still can; the old one won't stay.
**mandatory**
->setxattr() and xattr_handler.set() get dentry and inode passed separately.
+The xattr_handler.set() gets passed the user namespace of the mount the inode
+is seen from so filesystems can idmap the i_uid and i_gid accordingly.
dentry might be yet to be attached to inode, so do _not_ use its ->d_inode
in the instances. Rationale: !@#!@# security_d_instantiate() needs to be
called before we attach dentry to inode and !@#!@##!@$!$#!@#$!@$!@$ smack
@@ -865,3 +867,26 @@ no matter what. Everything is handled by the caller.
clone_private_mount() returns a longterm mount now, so the proper destructor of
its result is kern_unmount() or kern_unmount_array().
+
+---
+
+**mandatory**
+
+zero-length bvec segments are disallowed, they must be filtered out before
+passed on to an iterator.
+
+---
+
+**mandatory**
+
+For bvec based itererators bio_iov_iter_get_pages() now doesn't copy bvecs but
+uses the one provided. Anyone issuing kiocb-I/O should ensure that the bvec and
+page references stay until I/O has completed, i.e. until ->ki_complete() has
+been called or returned with non -EIOCBQUEUED code.
+
+---
+
+**mandatory**
+
+mnt_want_write_file() can now only be paired with mnt_drop_write_file(),
+whereas previously it could be paired with mnt_drop_write() as well.
diff --git a/Documentation/filesystems/proc.rst b/Documentation/filesystems/proc.rst
index 2fa69f710e2a..48fbfc336ebf 100644
--- a/Documentation/filesystems/proc.rst
+++ b/Documentation/filesystems/proc.rst
@@ -687,7 +687,14 @@ files are there, and which are missing.
kcore Kernel core image (can be ELF or A.OUT(deprecated in 2.4))
kmsg Kernel messages
ksyms Kernel symbol table
- loadavg Load average of last 1, 5 & 15 minutes
+ loadavg Load average of last 1, 5 & 15 minutes;
+ number of processes currently runnable (running or on ready queue);
+ total number of processes in system;
+ last pid created.
+ All fields are separated by one space except "number of
+ processes currently runnable" and "total number of processes
+ in system", which are separated by a slash ('/'). Example:
+ 0.61 0.61 0.55 3/828 22084
locks Kernel locks
meminfo Memory info
misc Miscellaneous
diff --git a/Documentation/filesystems/seq_file.rst b/Documentation/filesystems/seq_file.rst
index 56856481dc8d..a6726082a7c2 100644
--- a/Documentation/filesystems/seq_file.rst
+++ b/Documentation/filesystems/seq_file.rst
@@ -217,6 +217,12 @@ between the calls to start() and stop(), so holding a lock during that time
is a reasonable thing to do. The seq_file code will also avoid taking any
other locks while the iterator is active.
+The iterater value returned by start() or next() is guaranteed to be
+passed to a subsequent next() or stop() call. This allows resources
+such as locks that were taken to be reliably released. There is *no*
+guarantee that the iterator will be passed to show(), though in practice
+it often will be.
+
Formatted output
================
diff --git a/Documentation/filesystems/vfs.rst b/Documentation/filesystems/vfs.rst
index ca52c82e5bb5..2049bbf5e388 100644
--- a/Documentation/filesystems/vfs.rst
+++ b/Documentation/filesystems/vfs.rst
@@ -112,7 +112,7 @@ members are defined:
.. code-block:: c
- struct file_system_operations {
+ struct file_system_type {
const char *name;
int fs_flags;
struct dentry *(*mount) (struct file_system_type *, int,
@@ -270,7 +270,10 @@ or bottom half).
->alloc_inode.
``dirty_inode``
- this method is called by the VFS to mark an inode dirty.
+ this method is called by the VFS when an inode is marked dirty.
+ This is specifically for the inode itself being marked dirty,
+ not its data. If the update needs to be persisted by fdatasync(),
+ then I_DIRTY_DATASYNC will be set in the flags argument.
``write_inode``
this method is called when the VFS needs to write an inode to
@@ -415,28 +418,29 @@ As of kernel 2.6.22, the following members are defined:
.. code-block:: c
struct inode_operations {
- int (*create) (struct inode *,struct dentry *, umode_t, bool);
+ int (*create) (struct user_namespace *, struct inode *,struct dentry *, umode_t, bool);
struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
int (*link) (struct dentry *,struct inode *,struct dentry *);
int (*unlink) (struct inode *,struct dentry *);
- int (*symlink) (struct inode *,struct dentry *,const char *);
- int (*mkdir) (struct inode *,struct dentry *,umode_t);
+ int (*symlink) (struct user_namespace *, struct inode *,struct dentry *,const char *);
+ int (*mkdir) (struct user_namespace *, struct inode *,struct dentry *,umode_t);
int (*rmdir) (struct inode *,struct dentry *);
- int (*mknod) (struct inode *,struct dentry *,umode_t,dev_t);
- int (*rename) (struct inode *, struct dentry *,
+ int (*mknod) (struct user_namespace *, struct inode *,struct dentry *,umode_t,dev_t);
+ int (*rename) (struct user_namespace *, struct inode *, struct dentry *,
struct inode *, struct dentry *, unsigned int);
int (*readlink) (struct dentry *, char __user *,int);
const char *(*get_link) (struct dentry *, struct inode *,
struct delayed_call *);
- int (*permission) (struct inode *, int);
+ int (*permission) (struct user_namespace *, struct inode *, int);
int (*get_acl)(struct inode *, int);
- int (*setattr) (struct dentry *, struct iattr *);
- int (*getattr) (const struct path *, struct kstat *, u32, unsigned int);
+ int (*setattr) (struct user_namespace *, struct dentry *, struct iattr *);
+ int (*getattr) (struct user_namespace *, const struct path *, struct kstat *, u32, unsigned int);
ssize_t (*listxattr) (struct dentry *, char *, size_t);
void (*update_time)(struct inode *, struct timespec *, int);
int (*atomic_open)(struct inode *, struct dentry *, struct file *,
unsigned open_flag, umode_t create_mode);
- int (*tmpfile) (struct inode *, struct dentry *, umode_t);
+ int (*tmpfile) (struct user_namespace *, struct inode *, struct dentry *, umode_t);
+ int (*set_acl)(struct user_namespace *, struct inode *, struct posix_acl *, int);
};
Again, all methods are called without any locks being held, unless
diff --git a/Documentation/firmware-guide/acpi/debug.rst b/Documentation/firmware-guide/acpi/debug.rst
index 1a152dd1d765..03cd4e25fc45 100644
--- a/Documentation/firmware-guide/acpi/debug.rst
+++ b/Documentation/firmware-guide/acpi/debug.rst
@@ -52,19 +52,12 @@ shows the supported mask values, currently these::
ACPI_CA_DISASSEMBLER 0x00000800
ACPI_COMPILER 0x00001000
ACPI_TOOLS 0x00002000
- ACPI_BUS_COMPONENT 0x00010000
- ACPI_AC_COMPONENT 0x00020000
- ACPI_BATTERY_COMPONENT 0x00040000
- ACPI_BUTTON_COMPONENT 0x00080000
ACPI_SBS_COMPONENT 0x00100000
ACPI_FAN_COMPONENT 0x00200000
ACPI_PCI_COMPONENT 0x00400000
- ACPI_POWER_COMPONENT 0x00800000
ACPI_CONTAINER_COMPONENT 0x01000000
ACPI_SYSTEM_COMPONENT 0x02000000
- ACPI_THERMAL_COMPONENT 0x04000000
ACPI_MEMORY_DEVICE_COMPONENT 0x08000000
- ACPI_VIDEO_COMPONENT 0x10000000
ACPI_PROCESSOR_COMPONENT 0x20000000
debug_level
@@ -118,15 +111,15 @@ currently these::
Examples
========
-For example, drivers/acpi/bus.c contains this::
+For example, drivers/acpi/acpica/evxfevnt.c contains this::
- #define _COMPONENT ACPI_BUS_COMPONENT
+ #define _COMPONENT ACPI_EVENTS
...
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device insertion detected\n"));
+ ACPI_DEBUG_PRINT((ACPI_DB_INIT, "ACPI mode disabled\n"));
-To turn on this message, set the ACPI_BUS_COMPONENT bit in acpi.debug_layer
-and the ACPI_LV_INFO bit in acpi.debug_level. (The ACPI_DEBUG_PRINT
-statement uses ACPI_DB_INFO, which is macro based on the ACPI_LV_INFO
+To turn on this message, set the ACPI_EVENTS bit in acpi.debug_layer
+and the ACPI_LV_INIT bit in acpi.debug_level. (The ACPI_DEBUG_PRINT
+statement uses ACPI_DB_INIT, which is a macro based on the ACPI_LV_INIT
definition.)
Enable all AML "Debug" output (stores to the Debug object while interpreting
diff --git a/Documentation/firmware-guide/acpi/gpio-properties.rst b/Documentation/firmware-guide/acpi/gpio-properties.rst
index b36aa3e743d8..4e264c16ddff 100644
--- a/Documentation/firmware-guide/acpi/gpio-properties.rst
+++ b/Documentation/firmware-guide/acpi/gpio-properties.rst
@@ -146,6 +146,7 @@ following rules (see also the examples):
other words, it is not mandatory to fill all the GPIO lines
- empty names are allowed (two quotation marks ``""`` correspond to an empty
name)
+ - names inside one GPIO controller/expander must be unique
Example of a GPIO controller of 16 lines, with an incomplete list with two
empty names::
diff --git a/Documentation/fpga/dfl.rst b/Documentation/fpga/dfl.rst
index 0404fe6ffc74..c41ac76ffaae 100644
--- a/Documentation/fpga/dfl.rst
+++ b/Documentation/fpga/dfl.rst
@@ -501,6 +501,34 @@ Developer only needs to provide a sub feature driver with matched feature id.
FME Partial Reconfiguration Sub Feature driver (see drivers/fpga/dfl-fme-pr.c)
could be a reference.
+Location of DFLs on a PCI Device
+================================
+The original method for finding a DFL on a PCI device assumed the start of the
+first DFL to offset 0 of bar 0. If the first node of the DFL is an FME,
+then further DFLs in the port(s) are specified in FME header registers.
+Alternatively, a PCIe vendor specific capability structure can be used to
+specify the location of all the DFLs on the device, providing flexibility
+for the type of starting node in the DFL. Intel has reserved the
+VSEC ID of 0x43 for this purpose. The vendor specific
+data begins with a 4 byte vendor specific register for the number of DFLs followed 4 byte
+Offset/BIR vendor specific registers for each DFL. Bits 2:0 of Offset/BIR register
+indicates the BAR, and bits 31:3 form the 8 byte aligned offset where bits 2:0 are
+zero.
+::
+
+ +----------------------------+
+ |31 Number of DFLS 0|
+ +----------------------------+
+ |31 Offset 3|2 BIR 0|
+ +----------------------------+
+ . . .
+ +----------------------------+
+ |31 Offset 3|2 BIR 0|
+ +----------------------------+
+
+Being able to specify more than one DFL per BAR has been considered, but it
+was determined the use case did not provide value. Specifying a single DFL
+per BAR simplifies the implementation and allows for extra error checking.
Open discussion
===============
diff --git a/Documentation/gpu/drm-kms.rst b/Documentation/gpu/drm-kms.rst
index 3c5ae4f6dfd2..87e5023e3f55 100644
--- a/Documentation/gpu/drm-kms.rst
+++ b/Documentation/gpu/drm-kms.rst
@@ -319,6 +319,15 @@ CRTC Functions Reference
.. kernel-doc:: drivers/gpu/drm/drm_crtc.c
:export:
+Color Management Functions Reference
+------------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_color_mgmt.c
+ :export:
+
+.. kernel-doc:: include/drm/drm_color_mgmt.h
+ :internal:
+
Frame Buffer Abstraction
========================
@@ -370,6 +379,21 @@ Plane Functions Reference
.. kernel-doc:: drivers/gpu/drm/drm_plane.c
:export:
+Plane Composition Functions Reference
+-------------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_blend.c
+ :export:
+
+Plane Damage Tracking Functions Reference
+-----------------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_damage_helper.c
+ :export:
+
+.. kernel-doc:: include/drm/drm_damage_helper.h
+ :internal:
+
Display Modes Function Reference
================================
@@ -436,6 +460,9 @@ KMS Locking
KMS Properties
==============
+This section of the documentation is primarily aimed at user-space developers.
+For the driver APIs, see the other sections.
+
Property Types and Blob Property Support
----------------------------------------
@@ -466,39 +493,30 @@ Standard CRTC Properties
.. kernel-doc:: drivers/gpu/drm/drm_crtc.c
:doc: standard CRTC properties
+Standard Plane Properties
+-------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_plane.c
+ :doc: standard plane properties
+
Plane Composition Properties
----------------------------
.. kernel-doc:: drivers/gpu/drm/drm_blend.c
:doc: overview
-.. kernel-doc:: drivers/gpu/drm/drm_blend.c
- :export:
-
-FB_DAMAGE_CLIPS
-~~~~~~~~~~~~~~~
+Damage Tracking Properties
+--------------------------
.. kernel-doc:: drivers/gpu/drm/drm_damage_helper.c
:doc: overview
-.. kernel-doc:: drivers/gpu/drm/drm_damage_helper.c
- :export:
-
-.. kernel-doc:: include/drm/drm_damage_helper.h
- :internal:
-
Color Management Properties
---------------------------
.. kernel-doc:: drivers/gpu/drm/drm_color_mgmt.c
:doc: overview
-.. kernel-doc:: drivers/gpu/drm/drm_color_mgmt.c
- :export:
-
-.. kernel-doc:: include/drm/drm_color_mgmt.h
- :internal:
-
Tile Group Property
-------------------
diff --git a/Documentation/gpu/drm-uapi.rst b/Documentation/gpu/drm-uapi.rst
index 7dce175f6d75..04bdc7a91d53 100644
--- a/Documentation/gpu/drm-uapi.rst
+++ b/Documentation/gpu/drm-uapi.rst
@@ -457,5 +457,8 @@ Userspace API Structures
.. kernel-doc:: include/uapi/drm/drm_mode.h
:doc: overview
+.. kernel-doc:: include/uapi/drm/drm.h
+ :internal:
+
.. kernel-doc:: include/uapi/drm/drm_mode.h
:internal:
diff --git a/Documentation/gpu/i915.rst b/Documentation/gpu/i915.rst
index 20868f3d0123..486c720f3890 100644
--- a/Documentation/gpu/i915.rst
+++ b/Documentation/gpu/i915.rst
@@ -428,7 +428,7 @@ User Batchbuffer Execution
Logical Rings, Logical Ring Contexts and Execlists
--------------------------------------------------
-.. kernel-doc:: drivers/gpu/drm/i915/gt/intel_lrc.c
+.. kernel-doc:: drivers/gpu/drm/i915/gt/intel_execlists_submission.c
:doc: Logical Rings, Logical Ring Contexts and Execlists
Global GTT views
diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst
index 009d8e6c7e3c..40ccac61137e 100644
--- a/Documentation/gpu/todo.rst
+++ b/Documentation/gpu/todo.rst
@@ -23,6 +23,9 @@ Advanced: Tricky tasks that need fairly good understanding of the DRM subsystem
and graphics topics. Generally need the relevant hardware for development and
testing.
+Expert: Only attempt these if you've successfully completed some tricky
+refactorings already and are an expert in the specific area
+
Subsystem-wide refactorings
===========================
@@ -168,6 +171,22 @@ Contact: Daniel Vetter, respective driver maintainers
Level: Advanced
+Move Buffer Object Locking to dma_resv_lock()
+---------------------------------------------
+
+Many drivers have their own per-object locking scheme, usually using
+mutex_lock(). This causes all kinds of trouble for buffer sharing, since
+depending which driver is the exporter and importer, the locking hierarchy is
+reversed.
+
+To solve this we need one standard per-object locking mechanism, which is
+dma_resv_lock(). This lock needs to be called as the outermost lock, with all
+other driver specific per-object locks removed. The problem is tha rolling out
+the actual change to the locking contract is a flag day, due to struct dma_buf
+buffer sharing.
+
+Level: Expert
+
Convert logging to drm_* functions with drm_device paramater
------------------------------------------------------------
@@ -669,7 +688,7 @@ for fbdev.
https://patchwork.freedesktop.org/patch/306579/
- [RFC PATCH v2 00/13] Kernel based bootsplash
- https://lkml.org/lkml/2017/12/13/764
+ https://lore.kernel.org/r/20171213194755.3409-1-mstaudt@suse.de
Contact: Sam Ravnborg
diff --git a/Documentation/gpu/vkms.rst b/Documentation/gpu/vkms.rst
index 13bab1d93bb3..2c9b376da5ca 100644
--- a/Documentation/gpu/vkms.rst
+++ b/Documentation/gpu/vkms.rst
@@ -7,6 +7,88 @@
.. kernel-doc:: drivers/gpu/drm/vkms/vkms_drv.c
:doc: vkms (Virtual Kernel Modesetting)
+Setup
+=====
+
+The VKMS driver can be setup with the following steps:
+
+To check if VKMS is loaded, run::
+
+ lsmod | grep vkms
+
+This should list the VKMS driver. If no output is obtained, then
+you need to enable and/or load the VKMS driver.
+Ensure that the VKMS driver has been set as a loadable module in your
+kernel config file. Do::
+
+ make nconfig
+
+ Go to `Device Drivers> Graphics support`
+
+ Enable `Virtual KMS (EXPERIMENTAL)`
+
+Compile and build the kernel for the changes to get reflected.
+Now, to load the driver, use::
+
+ sudo modprobe vkms
+
+On running the lsmod command now, the VKMS driver will appear listed.
+You can also observe the driver being loaded in the dmesg logs.
+
+The VKMS driver has optional features to simulate different kinds of hardware,
+which are exposed as module options. You can use the `modinfo` command
+to see the module options for vkms::
+
+ modinfo vkms
+
+Module options are helpful when testing, and enabling modules
+can be done while loading vkms. For example, to load vkms with cursor enabled,
+use::
+
+ sudo modprobe vkms enable_cursor=1
+
+To disable the driver, use ::
+
+ sudo modprobe -r vkms
+
+Testing With IGT
+================
+
+The IGT GPU Tools is a test suite used specifically for debugging and
+development of the DRM drivers.
+The IGT Tools can be installed from
+`here <https://gitlab.freedesktop.org/drm/igt-gpu-tools>`_ .
+
+The tests need to be run without a compositor, so you need to switch to text
+only mode. You can do this by::
+
+ sudo systemctl isolate multi-user.target
+
+To return to graphical mode, do::
+
+ sudo systemctl isolate graphical.target
+
+Once you are in text only mode, you can run tests using the --device switch
+or IGT_DEVICE variable to specify the device filter for the driver we want
+to test. IGT_DEVICE can also be used with the run-test.sh script to run the
+tests for a specific driver::
+
+ sudo ./build/tests/<name of test> --device "sys:/sys/devices/platform/vkms"
+ sudo IGT_DEVICE="sys:/sys/devices/platform/vkms" ./build/tests/<name of test>
+ sudo IGT_DEVICE="sys:/sys/devices/platform/vkms" ./scripts/run-tests.sh -t <name of test>
+
+For example, to test the functionality of the writeback library,
+we can run the kms_writeback test::
+
+ sudo ./build/tests/kms_writeback --device "sys:/sys/devices/platform/vkms"
+ sudo IGT_DEVICE="sys:/sys/devices/platform/vkms" ./build/tests/kms_writeback
+ sudo IGT_DEVICE="sys:/sys/devices/platform/vkms" ./scripts/run-tests.sh -t kms_writeback
+
+You can also run subtests if you do not want to run the entire test::
+
+ sudo ./build/tests/kms_flip --run-subtest basic-plain-flip --device "sys:/sys/devices/platform/vkms"
+ sudo IGT_DEVICE="sys:/sys/devices/platform/vkms" ./build/tests/kms_flip --run-subtest basic-plain-flip
+
TODO
====
diff --git a/Documentation/hid/amd-sfh-hid.rst b/Documentation/hid/amd-sfh-hid.rst
index 1f2fe29ccd4f..19ae94cde3b4 100644
--- a/Documentation/hid/amd-sfh-hid.rst
+++ b/Documentation/hid/amd-sfh-hid.rst
@@ -3,13 +3,13 @@
AMD Sensor Fusion Hub
=====================
-AMD Sensor Fusion Hub (SFH) is part of an SOC starting from Ryzen based platforms.
+AMD Sensor Fusion Hub (SFH) is part of an SOC starting from Ryzen-based platforms.
The solution is working well on several OEM products. AMD SFH uses HID over PCIe bus.
In terms of architecture it resembles ISH, however the major difference is all
the HID reports are generated as part of the kernel driver.
-1. Block Diagram
-================
+Block Diagram
+-------------
::
@@ -45,20 +45,20 @@ the HID reports are generated as part of the kernel driver.
AMD HID Transport Layer
-----------------------
AMD SFH transport is also implemented as a bus. Each client application executing in the AMD MP2 is
-registered as a device on this bus. Here: MP2 which is an ARM core connected to x86 for processing
+registered as a device on this bus. Here, MP2 is an ARM core connected to x86 for processing
sensor data. The layer, which binds each device (AMD SFH HID driver) identifies the device type and
-registers with the hid core. Transport layer attach a constant "struct hid_ll_driver" object with
+registers with the HID core. Transport layer attaches a constant "struct hid_ll_driver" object with
each device. Once a device is registered with HID core, the callbacks provided via this struct are
used by HID core to communicate with the device. AMD HID Transport layer implements the synchronous calls.
AMD HID Client Layer
--------------------
-This layer is responsible to implement HID request and descriptors. As firmware is OS agnostic, HID
+This layer is responsible to implement HID requests and descriptors. As firmware is OS agnostic, HID
client layer fills the HID request structure and descriptors. HID client layer is complex as it is
-interface between MP2 PCIe layer and HID. HID client layer initialized the MP2 PCIe layer and holds
-the instance of MP2 layer. It identifies the number of sensors connected using MP2-PCIe layer. Base
-on that allocates the DRAM address for each and every sensor and pass it to MP2-PCIe driver.On
-enumeration of each the sensor, client layer fills the HID Descriptor structure and HID input repor
+interface between MP2 PCIe layer and HID. HID client layer initializes the MP2 PCIe layer and holds
+the instance of MP2 layer. It identifies the number of sensors connected using MP2-PCIe layer. Based
+on that allocates the DRAM address for each and every sensor and passes it to MP2-PCIe driver. On
+enumeration of each sensor, client layer fills the HID Descriptor structure and HID input report
structure. HID Feature report structure is optional. The report descriptor structure varies from
sensor to sensor.
@@ -72,7 +72,7 @@ The communication between X86 and MP2 is split into three parts.
2. Data transfer via DRAM.
3. Supported sensor info via P2C registers.
-Commands are sent to MP2 using C2P Mailbox registers. Writing into C2P Message registers generate
+Commands are sent to MP2 using C2P Mailbox registers. Writing into C2P Message registers generates
interrupt to MP2. The client layer allocates the physical memory and the same is sent to MP2 via
the PCI layer. MP2 firmware writes the command output to the access DRAM memory which the client
layer has allocated. Firmware always writes minimum of 32 bytes into DRAM. So as a protocol driver
diff --git a/Documentation/hid/hid-alps.rst b/Documentation/hid/hid-alps.rst
index e2f4c4c11e3f..767c96bcbb7c 100644
--- a/Documentation/hid/hid-alps.rst
+++ b/Documentation/hid/hid-alps.rst
@@ -64,7 +64,7 @@ Case2 ReportID_3 TP Absolute
Command Read/Write
------------------
-To read/write to RAM, need to send a commands to the device.
+To read/write to RAM, need to send a command to the device.
The command format is as below.
@@ -80,7 +80,7 @@ Byte6 Value Byte
Byte7 Checksum
===== ======================
-Command Byte is read=0xD1/write=0xD2 .
+Command Byte is read=0xD1/write=0xD2.
Address is read/write RAM address.
diff --git a/Documentation/hid/hid-sensor.rst b/Documentation/hid/hid-sensor.rst
index 758972e34971..c1c9b8d8dca6 100644
--- a/Documentation/hid/hid-sensor.rst
+++ b/Documentation/hid/hid-sensor.rst
@@ -48,12 +48,12 @@ for different sensors. For example an accelerometer can send X,Y and Z data, whe
an ambient light sensor can send illumination data.
So the implementation has two parts:
-- Core hid driver
+- Core HID driver
- Individual sensor processing part (sensor drivers)
Core driver
-----------
-The core driver registers (hid-sensor-hub) registers as a HID driver. It parses
+The core driver (hid-sensor-hub) registers as a HID driver. It parses
report descriptors and identifies all the sensors present. It adds an MFD device
with name HID-SENSOR-xxxx (where xxxx is usage id from the specification).
@@ -95,14 +95,14 @@ Registration functions::
u32 usage_id,
struct hid_sensor_hub_callbacks *usage_callback):
-Registers callbacks for an usage id. The callback functions are not allowed
+Registers callbacks for a usage id. The callback functions are not allowed
to sleep::
int sensor_hub_remove_callback(struct hid_sensor_hub_device *hsdev,
u32 usage_id):
-Removes callbacks for an usage id.
+Removes callbacks for a usage id.
Parsing function::
@@ -166,7 +166,7 @@ This allows some differentiating use cases, where vendor can provide application
Some common use cases are debug other sensors or to provide some events like
keyboard attached/detached or lid open/close.
-To allow application to utilize these sensors, here they are exported uses sysfs
+To allow application to utilize these sensors, here they are exported using sysfs
attribute groups, attributes and misc device interface.
An example of this representation on sysfs::
@@ -207,9 +207,9 @@ An example of this representation on sysfs::
│   │   │   ├── input-1-200202-units
│   │   │   ├── input-1-200202-value
-Here there is a custom sensors with four fields, two feature and two inputs.
+Here there is a custom sensor with four fields: two feature and two inputs.
Each field is represented by a set of attributes. All fields except the "value"
-are read only. The value field is a RW field.
+are read only. The value field is a read-write field.
Example::
@@ -237,6 +237,6 @@ These reports are pushed using misc device interface in a FIFO order::
│   │   │   ├── 10:53 -> ../HID-SENSOR-2000e1.6.auto
│   ├── HID-SENSOR-2000e1.6.auto
-Each reports can be of variable length preceded by a header. This header
-consist of a 32 bit usage id, 64 bit time stamp and 32 bit length field of raw
+Each report can be of variable length preceded by a header. This header
+consists of a 32-bit usage id, 64-bit time stamp and 32-bit length field of raw
data.
diff --git a/Documentation/hid/hid-transport.rst b/Documentation/hid/hid-transport.rst
index 0fe526f36db6..6f1692da296c 100644
--- a/Documentation/hid/hid-transport.rst
+++ b/Documentation/hid/hid-transport.rst
@@ -12,8 +12,8 @@ Bluetooth, I2C and user-space I/O drivers.
The HID subsystem is designed as a bus. Any I/O subsystem may provide HID
devices and register them with the HID bus. HID core then loads generic device
-drivers on top of it. The transport drivers are responsible of raw data
-transport and device setup/management. HID core is responsible of
+drivers on top of it. The transport drivers are responsible for raw data
+transport and device setup/management. HID core is responsible for
report-parsing, report interpretation and the user-space API. Device specifics
and quirks are handled by all layers depending on the quirk.
@@ -67,7 +67,7 @@ Transport drivers attach a constant "struct hid_ll_driver" object with each
device. Once a device is registered with HID core, the callbacks provided via
this struct are used by HID core to communicate with the device.
-Transport drivers are responsible of detecting device failures and unplugging.
+Transport drivers are responsible for detecting device failures and unplugging.
HID core will operate a device as long as it is registered regardless of any
device failures. Once transport drivers detect unplug or failure events, they
must unregister the device from HID core and HID core will stop using the
@@ -101,7 +101,7 @@ properties in common.
channel. Any unrequested incoming or outgoing data report must be sent on
this channel and is never acknowledged by the remote side. Devices usually
send their input events on this channel. Outgoing events are normally
- not send via intr, except if high throughput is required.
+ not sent via intr, except if high throughput is required.
- Control Channel (ctrl): The ctrl channel is used for synchronous requests and
device management. Unrequested data input events must not be sent on this
channel and are normally ignored. Instead, devices only send management
@@ -161,7 +161,7 @@ allowed on the intr channel and are the only means of data there.
payload may be blocked by the underlying transport driver if the
specification does not allow them.
- SET_REPORT: A SET_REPORT request has a report ID plus data as payload. It is
- sent from host to device and a device must update it's current report state
+ sent from host to device and a device must update its current report state
according to the given data. Any of the 3 report types can be used. However,
INPUT reports as payload might be blocked by the underlying transport driver
if the specification does not allow them.
@@ -294,7 +294,7 @@ The available HID callbacks are:
void (*request) (struct hid_device *hdev, struct hid_report *report,
int reqtype)
- Send an HID request on the ctrl channel. "report" contains the report that
+ Send a HID request on the ctrl channel. "report" contains the report that
should be sent and "reqtype" the request type. Request-type can be
HID_REQ_SET_REPORT or HID_REQ_GET_REPORT.
diff --git a/Documentation/hid/hiddev.rst b/Documentation/hid/hiddev.rst
index 9b28a97c03e6..caebc6266603 100644
--- a/Documentation/hid/hiddev.rst
+++ b/Documentation/hid/hiddev.rst
@@ -27,7 +27,7 @@ the following::
--> hiddev.c ----> POWER / MONITOR CONTROL
In addition, other subsystems (apart from USB) can potentially feed
-events into the input subsystem, but these have no effect on the hid
+events into the input subsystem, but these have no effect on the HID
device interface.
Using the HID Device Interface
@@ -73,7 +73,7 @@ The hiddev API uses a read() interface, and a set of ioctl() calls.
HID devices exchange data with the host computer using data
bundles called "reports". Each report is divided into "fields",
each of which can have one or more "usages". In the hid-core,
-each one of these usages has a single signed 32 bit value.
+each one of these usages has a single signed 32-bit value.
read():
-------
@@ -113,7 +113,7 @@ HIDIOCAPPLICATION
- (none)
This ioctl call returns the HID application usage associated with the
-hid device. The third argument to ioctl() specifies which application
+HID device. The third argument to ioctl() specifies which application
index to get. This is useful when the device has more than one
application collection. If the index is invalid (greater or equal to
the number of application collections this device has) the ioctl
@@ -181,7 +181,7 @@ looked up by type (input, output or feature) and id, so these fields
must be filled in by the user. The ID can be absolute -- the actual
report id as reported by the device -- or relative --
HID_REPORT_ID_FIRST for the first report, and (HID_REPORT_ID_NEXT |
-report_id) for the next report after report_id. Without a-priori
+report_id) for the next report after report_id. Without a priori
information about report ids, the right way to use this ioctl is to
use the relative IDs above to enumerate the valid IDs. The ioctl
returns non-zero when there is no more next ID. The real report ID is
@@ -200,7 +200,7 @@ HIDIOCGUCODE
- struct hiddev_usage_ref (read/write)
Returns the usage_code in a hiddev_usage_ref structure, given that
-given its report type, report id, field index, and index within the
+its report type, report id, field index, and index within the
field have already been filled into the structure.
HIDIOCGUSAGE
diff --git a/Documentation/hid/hidraw.rst b/Documentation/hid/hidraw.rst
index f41c1f0f6252..b717ee5cdaef 100644
--- a/Documentation/hid/hidraw.rst
+++ b/Documentation/hid/hidraw.rst
@@ -21,7 +21,7 @@ Hidraw is the only alternative, short of writing a custom kernel driver, for
these non-conformant devices.
A benefit of hidraw is that its use by userspace applications is independent
-of the underlying hardware type. Currently, Hidraw is implemented for USB
+of the underlying hardware type. Currently, hidraw is implemented for USB
and Bluetooth. In the future, as new hardware bus types are developed which
use the HID specification, hidraw will be expanded to add support for these
new bus types.
@@ -31,9 +31,10 @@ create hidraw device nodes. Udev will typically create the device nodes
directly under /dev (eg: /dev/hidraw0). As this location is distribution-
and udev rule-dependent, applications should use libudev to locate hidraw
devices attached to the system. There is a tutorial on libudev with a
-working example at:
+working example at::
http://www.signal11.us/oss/udev/
+ https://web.archive.org/web/2019*/www.signal11.us
The HIDRAW API
---------------
diff --git a/Documentation/hid/intel-ish-hid.rst b/Documentation/hid/intel-ish-hid.rst
index d4785cf6eefd..f6ce44ff611d 100644
--- a/Documentation/hid/intel-ish-hid.rst
+++ b/Documentation/hid/intel-ish-hid.rst
@@ -4,19 +4,19 @@ Intel Integrated Sensor Hub (ISH)
A sensor hub enables the ability to offload sensor polling and algorithm
processing to a dedicated low power co-processor. This allows the core
-processor to go into low power modes more often, resulting in the increased
+processor to go into low power modes more often, resulting in increased
battery life.
-There are many vendors providing external sensor hubs confirming to HID
-Sensor usage tables, and used in several tablets, 2 in 1 convertible laptops
-and embedded products. Linux had this support since Linux 3.9.
+There are many vendors providing external sensor hubs conforming to HID
+Sensor usage tables. These may be found in tablets, 2-in-1 convertible laptops
+and embedded products. Linux has had this support since Linux 3.9.
Intel® introduced integrated sensor hubs as a part of the SoC starting from
Cherry Trail and now supported on multiple generations of CPU packages. There
are many commercial devices already shipped with Integrated Sensor Hubs (ISH).
-These ISH also comply to HID sensor specification, but the difference is the
+These ISH also comply to HID sensor specification, but the difference is the
transport protocol used for communication. The current external sensor hubs
-mainly use HID over i2C or USB. But ISH doesn't use either i2c or USB.
+mainly use HID over I2C or USB. But ISH doesn't use either I2C or USB.
1. Overview
===========
@@ -35,7 +35,7 @@ for a very high speed communication::
----------------- ----------------------
PCI PCI
----------------- ----------------------
- |Host controller| --> | ISH processor |
+ |Host controller| --> | ISH processor |
----------------- ----------------------
USB Link
----------------- ----------------------
@@ -50,13 +50,13 @@ applications implemented in the firmware.
The ISH allows multiple sensor management applications executing in the
firmware. Like USB endpoints the messaging can be to/from a client. As part of
enumeration process, these clients are identified. These clients can be simple
-HID sensor applications, sensor calibration application or senor firmware
-update application.
+HID sensor applications, sensor calibration applications or sensor firmware
+update applications.
The implementation model is similar, like USB bus, ISH transport is also
implemented as a bus. Each client application executing in the ISH processor
is registered as a device on this bus. The driver, which binds each device
-(ISH HID driver) identifies the device type and registers with the hid core.
+(ISH HID driver) identifies the device type and registers with the HID core.
2. ISH Implementation: Block Diagram
====================================
@@ -104,7 +104,7 @@ is registered as a device on this bus. The driver, which binds each device
The ISH is exposed as "Non-VGA unclassified PCI device" to the host. The PCI
product and vendor IDs are changed from different generations of processors. So
-the source code which enumerate drivers needs to update from generation to
+the source code which enumerates drivers needs to update from generation to
generation.
3.2 Inter Processor Communication (IPC) driver
@@ -112,41 +112,42 @@ generation.
Location: drivers/hid/intel-ish-hid/ipc
-The IPC message used memory mapped I/O. The registers are defined in
+The IPC message uses memory mapped I/O. The registers are defined in
hw-ish-regs.h.
3.2.1 IPC/FW message types
^^^^^^^^^^^^^^^^^^^^^^^^^^
-There are two types of messages, one for management of link and other messages
-are to and from transport layers.
+There are two types of messages, one for management of link and another for
+messages to and from transport layers.
TX and RX of Transport messages
...............................
-A set of memory mapped register offers support of multi byte messages TX and
-RX (E.g.IPC_REG_ISH2HOST_MSG, IPC_REG_HOST2ISH_MSG). The IPC layer maintains
-internal queues to sequence messages and send them in order to the FW.
+A set of memory mapped register offers support of multi-byte messages TX and
+RX (e.g. IPC_REG_ISH2HOST_MSG, IPC_REG_HOST2ISH_MSG). The IPC layer maintains
+internal queues to sequence messages and send them in order to the firmware.
Optionally the caller can register handler to get notification of completion.
-A door bell mechanism is used in messaging to trigger processing in host and
+A doorbell mechanism is used in messaging to trigger processing in host and
client firmware side. When ISH interrupt handler is called, the ISH2HOST
doorbell register is used by host drivers to determine that the interrupt
is for ISH.
Each side has 32 32-bit message registers and a 32-bit doorbell. Doorbell
-register has the following format:
-Bits 0..6: fragment length (7 bits are used)
-Bits 10..13: encapsulated protocol
-Bits 16..19: management command (for IPC management protocol)
-Bit 31: doorbell trigger (signal H/W interrupt to the other side)
-Other bits are reserved, should be 0.
+register has the following format::
+
+ Bits 0..6: fragment length (7 bits are used)
+ Bits 10..13: encapsulated protocol
+ Bits 16..19: management command (for IPC management protocol)
+ Bit 31: doorbell trigger (signal H/W interrupt to the other side)
+ Other bits are reserved, should be 0.
3.2.2 Transport layer interface
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-To abstract HW level IPC communication, a set of callbacks are registered.
+To abstract HW level IPC communication, a set of callbacks is registered.
The transport layer uses them to send and receive messages.
-Refer to struct ishtp_hw_ops for callbacks.
+Refer to struct ishtp_hw_ops for callbacks.
3.3 ISH Transport layer
-----------------------
@@ -158,7 +159,7 @@ Location: drivers/hid/intel-ish-hid/ishtp/
The transport layer is a bi-directional protocol, which defines:
- Set of commands to start, stop, connect, disconnect and flow control
-(ishtp/hbm.h) for details
+(see ishtp/hbm.h for details)
- A flow control mechanism to avoid buffer overflows
This protocol resembles bus messages described in the following document:
@@ -168,14 +169,14 @@ specifications/dcmi-hi-1-0-spec.pdf "Chapter 7: Bus Message Layer"
3.3.2 Connection and Flow Control Mechanism
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-Each FW client and a protocol is identified by an UUID. In order to communicate
+Each FW client and a protocol is identified by a UUID. In order to communicate
to a FW client, a connection must be established using connect request and
response bus messages. If successful, a pair (host_client_id and fw_client_id)
will identify the connection.
Once connection is established, peers send each other flow control bus messages
independently. Every peer may send a message only if it has received a
-flow-control credit before. Once it sent a message, it may not send another one
+flow-control credit before. Once it has sent a message, it may not send another one
before receiving the next flow control credit.
Either side can send disconnect request bus message to end communication. Also
the link will be dropped if major FW reset occurs.
@@ -209,7 +210,7 @@ and DMA_XFER_ACK act as ownership indicators.
At initial state all outgoing memory belongs to the sender (TX to host, RX to
FW), DMA_XFER transfers ownership on the region that contains ISHTP message to
the receiving side, DMA_XFER_ACK returns ownership to the sender. A sender
-needs not wait for previous DMA_XFER to be ack'ed, and may send another message
+need not wait for previous DMA_XFER to be ack'ed, and may send another message
as long as remaining continuous memory in its ownership is enough.
In principle, multiple DMA_XFER and DMA_XFER_ACK messages may be sent at once
(up to IPC MTU), thus allowing for interrupt throttling.
@@ -219,8 +220,8 @@ fragments and via IPC otherwise.
3.3.4 Ring Buffers
^^^^^^^^^^^^^^^^^^
-When a client initiate a connection, a ring or RX and TX buffers are allocated.
-The size of ring can be specified by the client. HID client set 16 and 32 for
+When a client initiates a connection, a ring of RX and TX buffers is allocated.
+The size of ring can be specified by the client. HID client sets 16 and 32 for
TX and RX buffers respectively. On send request from client, the data to be
sent is copied to one of the send ring buffer and scheduled to be sent using
bus message protocol. These buffers are required because the FW may have not
@@ -230,10 +231,10 @@ to send. Same thing holds true on receive side and flow control is required.
3.3.5 Host Enumeration
^^^^^^^^^^^^^^^^^^^^^^
-The host enumeration bus command allow discovery of clients present in the FW.
+The host enumeration bus command allows discovery of clients present in the FW.
There can be multiple sensor clients and clients for calibration function.
-To ease in implantation and allow independent driver handle each client
+To ease implementation and allow independent drivers to handle each client,
this transport layer takes advantage of Linux Bus driver model. Each
client is registered as device on the transport bus (ishtp bus).
@@ -270,7 +271,7 @@ The ISHTP client driver is responsible for:
The functionality in these drivers is the same as an external sensor hub.
Refer to
Documentation/hid/hid-sensor.rst for HID sensor
-Documentation/ABI/testing/sysfs-bus-iio for IIO ABIs to user space
+Documentation/ABI/testing/sysfs-bus-iio for IIO ABIs to user space.
3.6 End to End HID transport Sequence Diagram
---------------------------------------------
@@ -341,9 +342,10 @@ Documentation/ABI/testing/sysfs-bus-iio for IIO ABIs to user space
3.7 ISH Debugging
-----------------
-To debug ISH, event tracing mechanism is used. To enable debug logs
-echo 1 > /sys/kernel/debug/tracing/events/intel_ish/enable
-cat sys/kernel/debug/tracing/trace
+To debug ISH, event tracing mechanism is used. To enable debug logs::
+
+ echo 1 > /sys/kernel/debug/tracing/events/intel_ish/enable
+ cat sys/kernel/debug/tracing/trace
3.8 ISH IIO sysfs Example on Lenovo thinkpad Yoga 260
-----------------------------------------------------
diff --git a/Documentation/hid/uhid.rst b/Documentation/hid/uhid.rst
index b18cb96c885f..2243a6b75914 100644
--- a/Documentation/hid/uhid.rst
+++ b/Documentation/hid/uhid.rst
@@ -3,7 +3,7 @@ UHID - User-space I/O driver support for HID subsystem
======================================================
UHID allows user-space to implement HID transport drivers. Please see
-hid-transport.txt for an introduction into HID transport drivers. This document
+hid-transport.rst for an introduction into HID transport drivers. This document
relies heavily on the definitions declared there.
With UHID, a user-space transport driver can create kernel hid-devices for each
@@ -15,7 +15,7 @@ There is an example user-space application in ./samples/uhid/uhid-example.c
The UHID API
------------
-UHID is accessed through a character misc-device. The minor-number is allocated
+UHID is accessed through a character misc-device. The minor number is allocated
dynamically so you need to rely on udev (or similar) to create the device node.
This is /dev/uhid by default.
@@ -45,23 +45,23 @@ The "type" field defines the payload. For each type, there is a
payload-structure available in the union "u" (except for empty payloads). This
payload contains management and/or device data.
-The first thing you should do is sending an UHID_CREATE2 event. This will
-register the device. UHID will respond with an UHID_START event. You can now
+The first thing you should do is send a UHID_CREATE2 event. This will
+register the device. UHID will respond with a UHID_START event. You can now
start sending data to and reading data from UHID. However, unless UHID sends the
UHID_OPEN event, the internally attached HID Device Driver has no user attached.
That is, you might put your device asleep unless you receive the UHID_OPEN
event. If you receive the UHID_OPEN event, you should start I/O. If the last
-user closes the HID device, you will receive an UHID_CLOSE event. This may be
-followed by an UHID_OPEN event again and so on. There is no need to perform
+user closes the HID device, you will receive a UHID_CLOSE event. This may be
+followed by a UHID_OPEN event again and so on. There is no need to perform
reference-counting in user-space. That is, you will never receive multiple
-UHID_OPEN events without an UHID_CLOSE event. The HID subsystem performs
+UHID_OPEN events without a UHID_CLOSE event. The HID subsystem performs
ref-counting for you.
You may decide to ignore UHID_OPEN/UHID_CLOSE, though. I/O is allowed even
though the device may have no users.
If you want to send data on the interrupt channel to the HID subsystem, you send
-an HID_INPUT2 event with your raw data payload. If the kernel wants to send data
-on the interrupt channel to the device, you will read an UHID_OUTPUT event.
+a HID_INPUT2 event with your raw data payload. If the kernel wants to send data
+on the interrupt channel to the device, you will read a UHID_OUTPUT event.
Data requests on the control channel are currently limited to GET_REPORT and
SET_REPORT (no other data reports on the control channel are defined so far).
Those requests are always synchronous. That means, the kernel sends
@@ -71,7 +71,7 @@ the response via UHID_GET_REPORT_REPLY and UHID_SET_REPORT_REPLY to the kernel.
The kernel blocks internal driver-execution during such round-trips (times out
after a hard-coded period).
-If your device disconnects, you should send an UHID_DESTROY event. This will
+If your device disconnects, you should send a UHID_DESTROY event. This will
unregister the device. You can now send UHID_CREATE2 again to register a new
device.
If you close() the fd, the device is automatically unregistered and destroyed
@@ -125,7 +125,7 @@ UHID_START:
This is sent when the HID device is started. Consider this as an answer to
UHID_CREATE2. This is always the first event that is sent. Note that this
event might not be available immediately after write(UHID_CREATE2) returns.
- Device drivers might required delayed setups.
+ Device drivers might require delayed setups.
This event contains a payload of type uhid_start_req. The "dev_flags" field
describes special behaviors of a device. The following flags are defined:
@@ -149,7 +149,7 @@ UHID_STOP:
reloaded/changed the device driver loaded on your HID device (or some other
maintenance actions happened).
- You can usually ignored any UHID_STOP events safely.
+ You can usually ignore any UHID_STOP events safely.
UHID_OPEN:
This is sent when the HID device is opened. That is, the data that the HID
@@ -166,17 +166,17 @@ UHID_OUTPUT:
This is sent if the HID device driver wants to send raw data to the I/O
device on the interrupt channel. You should read the payload and forward it to
the device. The payload is of type "struct uhid_output_req".
- This may be received even though you haven't received UHID_OPEN, yet.
+ This may be received even though you haven't received UHID_OPEN yet.
UHID_GET_REPORT:
This event is sent if the kernel driver wants to perform a GET_REPORT request
- on the control channeld as described in the HID specs. The report-type and
+ on the control channel as described in the HID specs. The report-type and
report-number are available in the payload.
The kernel serializes GET_REPORT requests so there will never be two in
parallel. However, if you fail to respond with a UHID_GET_REPORT_REPLY, the
request might silently time out.
- Once you read a GET_REPORT request, you shall forward it to the hid device and
- remember the "id" field in the payload. Once your hid device responds to the
+ Once you read a GET_REPORT request, you shall forward it to the HID device and
+ remember the "id" field in the payload. Once your HID device responds to the
GET_REPORT (or if it fails), you must send a UHID_GET_REPORT_REPLY to the
kernel with the exact same "id" as in the request. If the request already
timed out, the kernel will ignore the response silently. The "id" field is
@@ -184,7 +184,7 @@ UHID_GET_REPORT:
UHID_SET_REPORT:
This is the SET_REPORT equivalent of UHID_GET_REPORT. On receipt, you shall
- send a SET_REPORT request to your hid device. Once it replies, you must tell
+ send a SET_REPORT request to your HID device. Once it replies, you must tell
the kernel about it via UHID_SET_REPORT_REPLY.
The same restrictions as for UHID_GET_REPORT apply.
diff --git a/Documentation/hwmon/ab8500.rst b/Documentation/hwmon/ab8500.rst
deleted file mode 100644
index 33f93a9cec04..000000000000
--- a/Documentation/hwmon/ab8500.rst
+++ /dev/null
@@ -1,26 +0,0 @@
-Kernel driver ab8500
-====================
-
-Supported chips:
-
- * ST-Ericsson AB8500
-
- Prefix: 'ab8500'
-
- Addresses scanned: -
-
- Datasheet: http://www.stericsson.com/developers/documentation.jsp
-
-Authors:
- - Martin Persson <martin.persson@stericsson.com>
- - Hongbo Zhang <hongbo.zhang@linaro.org>
-
-Description
------------
-
-See also Documentation/hwmon/abx500.rst. This is the ST-Ericsson AB8500 specific
-driver.
-
-Currently only the AB8500 internal sensor and one external sensor for battery
-temperature are monitored. Other GPADC channels can also be monitored if needed
-in future.
diff --git a/Documentation/hwmon/abx500.rst b/Documentation/hwmon/abx500.rst
deleted file mode 100644
index 3d88b2ce0f00..000000000000
--- a/Documentation/hwmon/abx500.rst
+++ /dev/null
@@ -1,32 +0,0 @@
-Kernel driver abx500
-====================
-
-Supported chips:
-
- * ST-Ericsson ABx500 series
-
- Prefix: 'abx500'
-
- Addresses scanned: -
-
- Datasheet: http://www.stericsson.com/developers/documentation.jsp
-
-Authors:
- Martin Persson <martin.persson@stericsson.com>
- Hongbo Zhang <hongbo.zhang@linaro.org>
-
-Description
------------
-
-Every ST-Ericsson Ux500 SOC consists of both ABx500 and DBx500 physically,
-this is kernel hwmon driver for ABx500.
-
-There are some GPADCs inside ABx500 which are designed for connecting to
-thermal sensors, and there is also a thermal sensor inside ABx500 too, which
-raises interrupt when critical temperature reached.
-
-This abx500 is a common layer which can monitor all of the sensors, every
-specific abx500 chip has its special configurations in its own file, e.g. some
-sensors can be configured invisible if they are not available on that chip, and
-the corresponding gpadc_addr should be set to 0, thus this sensor won't be
-polled.
diff --git a/Documentation/hwmon/aht10.rst b/Documentation/hwmon/aht10.rst
new file mode 100644
index 000000000000..482262ca117c
--- /dev/null
+++ b/Documentation/hwmon/aht10.rst
@@ -0,0 +1,46 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Kernel driver aht10
+=====================
+
+Supported chips:
+
+ * Aosong AHT10
+
+ Prefix: 'aht10'
+
+ Addresses scanned: None
+
+ Datasheet:
+
+ Chinese: http://www.aosong.com/userfiles/files/media/AHT10%E4%BA%A7%E5%93%81%E6%89%8B%E5%86%8C%20A3%2020201210.pdf
+ English: https://server4.eca.ir/eshop/AHT10/Aosong_AHT10_en_draft_0c.pdf
+
+Author: Johannes Cornelis Draaijer <jcdra1@gmail.com>
+
+
+Description
+-----------
+
+The AHT10 is a Temperature and Humidity sensor
+
+The address of this i2c device may only be 0x38
+
+Usage Notes
+-----------
+
+This driver does not probe for AHT10 devices, as there is no reliable
+way to determine if an i2c chip is or isn't an AHT10. The device has
+to be instantiated explicitly with the address 0x38. See
+Documentation/i2c/instantiating-devices.rst for details.
+
+Sysfs entries
+-------------
+
+=============== ============================================
+temp1_input Measured temperature in millidegrees Celcius
+humidity1_input Measured humidity in %H
+update_interval The minimum interval for polling the sensor,
+ in milliseconds. Writable. Must be at
+ least 2000.
+=============== ============================================
diff --git a/Documentation/hwmon/ina2xx.rst b/Documentation/hwmon/ina2xx.rst
index f78a5cd44c4c..27d2e39bc8ac 100644
--- a/Documentation/hwmon/ina2xx.rst
+++ b/Documentation/hwmon/ina2xx.rst
@@ -74,7 +74,7 @@ bus supply voltage.
The shunt value in micro-ohms can be set via platform data or device tree at
compile-time or via the shunt_resistor attribute in sysfs at run-time. Please
-refer to the Documentation/devicetree/bindings/hwmon/ina2xx.txt for bindings
+refer to the Documentation/devicetree/bindings/hwmon/ti,ina2xx.yaml for bindings
if the device tree is used.
Additionally ina226 supports update_interval attribute as described in
diff --git a/Documentation/hwmon/index.rst b/Documentation/hwmon/index.rst
index fcb870ce6286..8d5a2df1ecb6 100644
--- a/Documentation/hwmon/index.rst
+++ b/Documentation/hwmon/index.rst
@@ -18,10 +18,8 @@ Hardware Monitoring Kernel Drivers
.. toctree::
:maxdepth: 1
- ab8500
abituguru
abituguru3
- abx500
acpi_power_meter
ad7314
adc128d818
@@ -39,6 +37,7 @@ Hardware Monitoring Kernel Drivers
adt7462
adt7470
adt7475
+ aht10
amc6821
amd_energy
asb100
@@ -178,6 +177,7 @@ Hardware Monitoring Kernel Drivers
tmp401
tmp421
tmp513
+ tps23861
tps40422
tps53679
twl4030-madc-hwmon
diff --git a/Documentation/hwmon/max16601.rst b/Documentation/hwmon/max16601.rst
index 346e74674c51..92c0a7d7808c 100644
--- a/Documentation/hwmon/max16601.rst
+++ b/Documentation/hwmon/max16601.rst
@@ -5,6 +5,14 @@ Kernel driver max16601
Supported chips:
+ * Maxim MAX16508
+
+ Prefix: 'max16508'
+
+ Addresses scanned: -
+
+ Datasheet: Not published
+
* Maxim MAX16601
Prefix: 'max16601'
@@ -19,8 +27,8 @@ Author: Guenter Roeck <linux@roeck-us.net>
Description
-----------
-This driver supports the MAX16601 VR13.HC Dual-Output Voltage Regulator
-Chipset.
+This driver supports the MAX16508 VR13 Dual-Output Voltage Regulator
+as well as the MAX16601 VR13.HC Dual-Output Voltage Regulator chipsets.
The driver is a client driver to the core PMBus driver.
Please see Documentation/hwmon/pmbus.rst for details on PMBus client drivers.
@@ -45,115 +53,76 @@ Sysfs entries
The following attributes are supported.
-======================= =======================================================
-in1_label "vin1"
-in1_input VCORE input voltage.
-in1_alarm Input voltage alarm.
-
-in2_label "vout1"
-in2_input VCORE output voltage.
-in2_alarm Output voltage alarm.
-
-curr1_label "iin1"
-curr1_input VCORE input current, derived from duty cycle and output
- current.
-curr1_max Maximum input current.
-curr1_max_alarm Current high alarm.
-
-curr2_label "iin1.0"
-curr2_input VCORE phase 0 input current.
-
-curr3_label "iin1.1"
-curr3_input VCORE phase 1 input current.
-
-curr4_label "iin1.2"
-curr4_input VCORE phase 2 input current.
-
-curr5_label "iin1.3"
-curr5_input VCORE phase 3 input current.
-
-curr6_label "iin1.4"
-curr6_input VCORE phase 4 input current.
-
-curr7_label "iin1.5"
-curr7_input VCORE phase 5 input current.
-
-curr8_label "iin1.6"
-curr8_input VCORE phase 6 input current.
-
-curr9_label "iin1.7"
-curr9_input VCORE phase 7 input current.
-
-curr10_label "iin2"
-curr10_input VCORE input current, derived from sensor element.
-
-curr11_label "iin3"
-curr11_input VSA input current.
-
-curr12_label "iout1"
-curr12_input VCORE output current.
-curr12_crit Critical output current.
-curr12_crit_alarm Output current critical alarm.
-curr12_max Maximum output current.
-curr12_max_alarm Output current high alarm.
-
-curr13_label "iout1.0"
-curr13_input VCORE phase 0 output current.
-
-curr14_label "iout1.1"
-curr14_input VCORE phase 1 output current.
-
-curr15_label "iout1.2"
-curr15_input VCORE phase 2 output current.
-
-curr16_label "iout1.3"
-curr16_input VCORE phase 3 output current.
-
-curr17_label "iout1.4"
-curr17_input VCORE phase 4 output current.
-
-curr18_label "iout1.5"
-curr18_input VCORE phase 5 output current.
-
-curr19_label "iout1.6"
-curr19_input VCORE phase 6 output current.
-
-curr20_label "iout1.7"
-curr20_input VCORE phase 7 output current.
-
-curr21_label "iout3"
-curr21_input VSA output current.
-curr21_highest Historical maximum VSA output current.
-curr21_reset_history Write any value to reset curr21_highest.
-curr21_crit Critical output current.
-curr21_crit_alarm Output current critical alarm.
-curr21_max Maximum output current.
-curr21_max_alarm Output current high alarm.
-
-power1_label "pin1"
-power1_input Input power, derived from duty cycle and output current.
-power1_alarm Input power alarm.
-
-power2_label "pin2"
-power2_input Input power, derived from input current sensor.
-
-power3_label "pout"
-power3_input Output power.
-
-temp1_input VCORE temperature.
-temp1_crit Critical high temperature.
-temp1_crit_alarm Chip temperature critical high alarm.
-temp1_max Maximum temperature.
-temp1_max_alarm Chip temperature high alarm.
-
-temp2_input TSENSE_0 temperature
-temp3_input TSENSE_1 temperature
-temp4_input TSENSE_2 temperature
-temp5_input TSENSE_3 temperature
-
-temp6_input VSA temperature.
-temp6_crit Critical high temperature.
-temp6_crit_alarm Chip temperature critical high alarm.
-temp6_max Maximum temperature.
-temp6_max_alarm Chip temperature high alarm.
-======================= =======================================================
+=============================== ===============================================
+in1_label "vin1"
+in1_input VCORE input voltage.
+in1_alarm Input voltage alarm.
+
+in2_label "vout1"
+in2_input VCORE output voltage.
+in2_alarm Output voltage alarm.
+
+curr1_label "iin1"
+curr1_input VCORE input current, derived from duty cycle
+ and output current.
+curr1_max Maximum input current.
+curr1_max_alarm Current high alarm.
+
+curr[P+2]_label "iin1.P"
+curr[P+2]_input VCORE phase P input current.
+
+curr[N+2]_label "iin2"
+curr[N+2]_input VCORE input current, derived from sensor
+ element.
+ 'N' is the number of enabled/populated phases.
+
+curr[N+3]_label "iin3"
+curr[N+3]_input VSA input current.
+
+curr[N+4]_label "iout1"
+curr[N+4]_input VCORE output current.
+curr[N+4]_crit Critical output current.
+curr[N+4]_crit_alarm Output current critical alarm.
+curr[N+4]_max Maximum output current.
+curr[N+4]_max_alarm Output current high alarm.
+
+curr[N+P+5]_label "iout1.P"
+curr[N+P+5]_input VCORE phase P output current.
+
+curr[2*N+5]_label "iout3"
+curr[2*N+5]_input VSA output current.
+curr[2*N+5]_highest Historical maximum VSA output current.
+curr[2*N+5]_reset_history Write any value to reset curr21_highest.
+curr[2*N+5]_crit Critical output current.
+curr[2*N+5]_crit_alarm Output current critical alarm.
+curr[2*N+5]_max Maximum output current.
+curr[2*N+5]_max_alarm Output current high alarm.
+
+power1_label "pin1"
+power1_input Input power, derived from duty cycle and output
+ current.
+power1_alarm Input power alarm.
+
+power2_label "pin2"
+power2_input Input power, derived from input current sensor.
+
+power3_label "pout"
+power3_input Output power.
+
+temp1_input VCORE temperature.
+temp1_crit Critical high temperature.
+temp1_crit_alarm Chip temperature critical high alarm.
+temp1_max Maximum temperature.
+temp1_max_alarm Chip temperature high alarm.
+
+temp2_input TSENSE_0 temperature
+temp3_input TSENSE_1 temperature
+temp4_input TSENSE_2 temperature
+temp5_input TSENSE_3 temperature
+
+temp6_input VSA temperature.
+temp6_crit Critical high temperature.
+temp6_crit_alarm Chip temperature critical high alarm.
+temp6_max Maximum temperature.
+temp6_max_alarm Chip temperature high alarm.
+=============================== ===============================================
diff --git a/Documentation/hwmon/nct6683.rst b/Documentation/hwmon/nct6683.rst
index 8646ad519fcd..2e1408d174bd 100644
--- a/Documentation/hwmon/nct6683.rst
+++ b/Documentation/hwmon/nct6683.rst
@@ -61,5 +61,6 @@ Board Firmware version
Intel DH87RL NCT6683D EC firmware version 1.0 build 04/03/13
Intel DH87MC NCT6683D EC firmware version 1.0 build 04/03/13
Intel DB85FL NCT6683D EC firmware version 1.0 build 04/03/13
+ASRock X570 NCT6683D EC firmware version 1.0 build 06/28/19
MSI B550 NCT6687D EC firmware version 1.0 build 05/07/20
=============== ===============================================
diff --git a/Documentation/hwmon/tps23861.rst b/Documentation/hwmon/tps23861.rst
new file mode 100644
index 000000000000..46d121ff3f31
--- /dev/null
+++ b/Documentation/hwmon/tps23861.rst
@@ -0,0 +1,41 @@
+.. SPDX-License-Identifier: GPL-2.0-only
+
+Kernel driver tps23861
+======================
+
+Supported chips:
+ * Texas Instruments TPS23861
+
+ Prefix: 'tps23861'
+
+ Datasheet: https://www.ti.com/lit/gpn/tps23861
+
+Author: Robert Marko <robert.marko@sartura.hr>
+
+Description
+-----------
+
+This driver supports hardware monitoring for Texas Instruments TPS23861 PoE PSE.
+
+TPS23861 is a quad port IEEE802.3at PSE controller with optional I2C control
+and monitoring capabilities.
+
+TPS23861 offers three modes of operation: Auto, Semi-Auto and Manual.
+
+This driver only supports the Auto mode of operation providing monitoring
+as well as enabling/disabling the four ports.
+
+Sysfs entries
+-------------
+
+======================= =====================================================================
+in[0-3]_input Voltage on ports [1-4]
+in[0-3]_label "Port[1-4]"
+in4_input IC input voltage
+in4_label "Input"
+temp1_input IC die temperature
+temp1_label "Die"
+curr[1-4]_input Current on ports [1-4]
+in[1-4]_label "Port[1-4]"
+in[0-3]_enable Enable/disable ports [1-4]
+======================= =====================================================================
diff --git a/Documentation/i2c/slave-testunit-backend.rst b/Documentation/i2c/slave-testunit-backend.rst
index 2c38e64f0bac..ecfc2abec32d 100644
--- a/Documentation/i2c/slave-testunit-backend.rst
+++ b/Documentation/i2c/slave-testunit-backend.rst
@@ -22,8 +22,9 @@ Instantiating the device is regular. Example for bus 0, address 0x30:
After that, you will have a write-only device listening. Reads will just return
an 8-bit version number of the testunit. When writing, the device consists of 4
-8-bit registers and all must be written to start a testcase, i.e. you must
-always write 4 bytes to the device. The registers are:
+8-bit registers and, except for some "partial" commands, all registers must be
+written to start a testcase, i.e. you usually write 4 bytes to the device. The
+registers are:
0x00 CMD - which test to trigger
0x01 DATAL - configuration byte 1 for the test
@@ -67,3 +68,21 @@ status word is currently ignored in the Linux Kernel. Example to send a
notification after 10ms:
# i2cset -y 0 0x30 0x02 0x42 0x64 0x01 i
+
+0x03 SMBUS_BLOCK_PROC_CALL (partial command)
+ DATAL - must be '1', i.e. one further byte will be written
+ DATAH - number of bytes to be sent back
+ DELAY - not applicable, partial command!
+
+This test will respond to a block process call as defined by the SMBus
+specification. The one data byte written specifies how many bytes will be sent
+back in the following read transfer. Note that in this read transfer, the
+testunit will prefix the length of the bytes to follow. So, if your host bus
+driver emulates SMBus calls like the majority does, it needs to support the
+I2C_M_RECV_LEN flag of an i2c_msg. This is a good testcase for it. The returned
+data consists of the length first, and then of an array of bytes from length-1
+to 0. Here is an example which emulates i2c_smbus_block_process_call() using
+i2ctransfer (you need i2c-tools v4.2 or later):
+
+# i2ctransfer -y 0 w3@0x30 0x03 0x01 0x10 r?
+0x10 0x0f 0x0e 0x0d 0x0c 0x0b 0x0a 0x09 0x08 0x07 0x06 0x05 0x04 0x03 0x02 0x01 0x00
diff --git a/Documentation/iio/ep93xx_adc.rst b/Documentation/iio/ep93xx_adc.rst
index 4fd8dea3f6b8..0af0e9040457 100644
--- a/Documentation/iio/ep93xx_adc.rst
+++ b/Documentation/iio/ep93xx_adc.rst
@@ -13,7 +13,7 @@ touchscreen/ADC module.
====================
Numbering scheme for channels 0..4 is defined in EP9301 and EP9302 datasheets.
-EP9307, EP9312 and EP9312 have 3 channels more (total 8), but the numbering is
+EP9307, EP9312 and EP9315 have 3 channels more (total 8), but the numbering is
not defined. So the last three are numbered randomly, let's say.
Assuming ep93xx_adc is IIO device0, you'd find the following entries under
diff --git a/Documentation/index.rst b/Documentation/index.rst
index 5888e8a7272f..31f2adc8542d 100644
--- a/Documentation/index.rst
+++ b/Documentation/index.rst
@@ -171,17 +171,6 @@ implementation.
x86/index
xtensa/index
-Filesystem Documentation
-------------------------
-
-The documentation in this section are provided by specific filesystem
-subprojects.
-
-.. toctree::
- :maxdepth: 2
-
- filesystems/ext4/index
-
Other documentation
-------------------
diff --git a/Documentation/input/event-codes.rst b/Documentation/input/event-codes.rst
index b24b5343f5eb..3118fc1c1e26 100644
--- a/Documentation/input/event-codes.rst
+++ b/Documentation/input/event-codes.rst
@@ -236,6 +236,21 @@ A few EV_ABS codes have special meanings:
- Used to describe multitouch input events. Please see
multi-touch-protocol.txt for details.
+* ABS_PRESSURE/ABS_MT_PRESSURE:
+
+ - For touch devices, many devices converted contact size into pressure.
+ A finger flattens with pressure, causing a larger contact area and thus
+ pressure and contact size are directly related. This is not the case
+ for other devices, for example digitizers and touchpads with a true
+ pressure sensor ("pressure pads").
+
+ A device should set the resolution of the axis to indicate whether the
+ pressure is in measurable units. If the resolution is zero, the
+ pressure data is in arbitrary units. If the resolution is nonzero, the
+ pressure data is in units/gram. For example, a value of 10 with a
+ resolution of 1 represents 10 gram, a value of 10 with a resolution on
+ 1000 represents 10 microgram.
+
EV_SW
-----
diff --git a/Documentation/input/multi-touch-protocol.rst b/Documentation/input/multi-touch-protocol.rst
index 307fe22d9668..21c1e6a22888 100644
--- a/Documentation/input/multi-touch-protocol.rst
+++ b/Documentation/input/multi-touch-protocol.rst
@@ -260,6 +260,10 @@ ABS_MT_PRESSURE
of TOUCH and WIDTH for pressure-based devices or any device with a spatial
signal intensity distribution.
+ If the resolution is zero, the pressure data is in arbitrary units.
+ If the resolution is nonzero, the pressure data is in units/gram. See
+ :ref:`input-event-codes` for details.
+
ABS_MT_DISTANCE
The distance, in surface units, between the contact and the surface. Zero
distance means the contact is touching the surface. A positive number means
diff --git a/Documentation/kbuild/gcc-plugins.rst b/Documentation/kbuild/gcc-plugins.rst
index 4b1c10f88e30..3349966f213d 100644
--- a/Documentation/kbuild/gcc-plugins.rst
+++ b/Documentation/kbuild/gcc-plugins.rst
@@ -11,16 +11,13 @@ compiler [1]_. They are useful for runtime instrumentation and static analysis.
We can analyse, change and add further code during compilation via
callbacks [2]_, GIMPLE [3]_, IPA [4]_ and RTL passes [5]_.
-The GCC plugin infrastructure of the kernel supports all gcc versions from
-4.5 to 6.0, building out-of-tree modules, cross-compilation and building in a
-separate directory.
-Plugin source files have to be compilable by both a C and a C++ compiler as well
-because gcc versions 4.5 and 4.6 are compiled by a C compiler,
-gcc-4.7 can be compiled by a C or a C++ compiler,
-and versions 4.8+ can only be compiled by a C++ compiler.
+The GCC plugin infrastructure of the kernel supports building out-of-tree
+modules, cross-compilation and building in a separate directory.
+Plugin source files have to be compilable by a C++ compiler.
-Currently the GCC plugin infrastructure supports only the x86, arm, arm64 and
-powerpc architectures.
+Currently the GCC plugin infrastructure supports only some architectures.
+Grep "select HAVE_GCC_PLUGINS" to find out which architectures support
+GCC plugins.
This infrastructure was ported from grsecurity [6]_ and PaX [7]_.
@@ -47,20 +44,13 @@ Files
This is a compatibility header for GCC plugins.
It should be always included instead of individual gcc headers.
-**$(src)/scripts/gcc-plugin.sh**
-
- This script checks the availability of the included headers in
- gcc-common.h and chooses the proper host compiler to build the plugins
- (gcc-4.7 can be built by either gcc or g++).
-
**$(src)/scripts/gcc-plugins/gcc-generate-gimple-pass.h,
$(src)/scripts/gcc-plugins/gcc-generate-ipa-pass.h,
$(src)/scripts/gcc-plugins/gcc-generate-simple_ipa-pass.h,
$(src)/scripts/gcc-plugins/gcc-generate-rtl-pass.h**
These headers automatically generate the registration structures for
- GIMPLE, SIMPLE_IPA, IPA and RTL passes. They support all gcc versions
- from 4.5 to 6.0.
+ GIMPLE, SIMPLE_IPA, IPA and RTL passes.
They should be preferred to creating the structures by hand.
@@ -68,21 +58,25 @@ Usage
=====
You must install the gcc plugin headers for your gcc version,
-e.g., on Ubuntu for gcc-4.9::
+e.g., on Ubuntu for gcc-10::
- apt-get install gcc-4.9-plugin-dev
+ apt-get install gcc-10-plugin-dev
Or on Fedora::
dnf install gcc-plugin-devel
-Enable a GCC plugin based feature in the kernel config::
+Enable the GCC plugin infrastructure and some plugin(s) you want to use
+in the kernel config::
- CONFIG_GCC_PLUGIN_CYC_COMPLEXITY = y
+ CONFIG_GCC_PLUGINS=y
+ CONFIG_GCC_PLUGIN_CYC_COMPLEXITY=y
+ CONFIG_GCC_PLUGIN_LATENT_ENTROPY=y
+ ...
-To compile only the plugin(s)::
+To compile the minimum tool set including the plugin(s)::
- make gcc-plugins
+ make scripts
or just run the kernel make and compile the whole kernel with
the cyclomatic complexity GCC plugin.
@@ -91,7 +85,8 @@ the cyclomatic complexity GCC plugin.
4. How to add a new GCC plugin
==============================
-The GCC plugins are in $(src)/scripts/gcc-plugins/. You can use a file or a directory
-here. It must be added to $(src)/scripts/gcc-plugins/Makefile,
-$(src)/scripts/Makefile.gcc-plugins and $(src)/arch/Kconfig.
+The GCC plugins are in scripts/gcc-plugins/. You need to put plugin source files
+right under scripts/gcc-plugins/. Creating subdirectories is not supported.
+It must be added to scripts/gcc-plugins/Makefile, scripts/Makefile.gcc-plugins
+and a relevant Kconfig file.
See the cyc_complexity_plugin.c (CONFIG_GCC_PLUGIN_CYC_COMPLEXITY) GCC plugin.
diff --git a/Documentation/kbuild/llvm.rst b/Documentation/kbuild/llvm.rst
index 21c847890d03..b18401d2ba82 100644
--- a/Documentation/kbuild/llvm.rst
+++ b/Documentation/kbuild/llvm.rst
@@ -63,6 +63,50 @@ They can be enabled individually. The full list of the parameters: ::
Currently, the integrated assembler is disabled by default. You can pass
``LLVM_IAS=1`` to enable it.
+Supported Architectures
+-----------------------
+
+LLVM does not target all of the architectures that Linux supports and
+just because a target is supported in LLVM does not mean that the kernel
+will build or work without any issues. Below is a general summary of
+architectures that currently work with ``CC=clang`` or ``LLVM=1``. Level
+of support corresponds to "S" values in the MAINTAINERS files. If an
+architecture is not present, it either means that LLVM does not target
+it or there are known issues. Using the latest stable version of LLVM or
+even the development tree will generally yield the best results.
+An architecture's ``defconfig`` is generally expected to work well,
+certain configurations may have problems that have not been uncovered
+yet. Bug reports are always welcome at the issue tracker below!
+
+.. list-table::
+ :widths: 10 10 10
+ :header-rows: 1
+
+ * - Architecture
+ - Level of support
+ - ``make`` command
+ * - arm
+ - Supported
+ - ``LLVM=1``
+ * - arm64
+ - Supported
+ - ``LLVM=1``
+ * - mips
+ - Maintained
+ - ``CC=clang``
+ * - powerpc
+ - Maintained
+ - ``CC=clang``
+ * - riscv
+ - Maintained
+ - ``CC=clang``
+ * - s390
+ - Maintained
+ - ``CC=clang``
+ * - x86
+ - Supported
+ - ``LLVM=1``
+
Getting Help
------------
diff --git a/Documentation/kbuild/makefiles.rst b/Documentation/kbuild/makefiles.rst
index 9f6a11881951..db3af0b45baf 100644
--- a/Documentation/kbuild/makefiles.rst
+++ b/Documentation/kbuild/makefiles.rst
@@ -12,7 +12,7 @@ This document describes the Linux kernel Makefiles.
--- 3.1 Goal definitions
--- 3.2 Built-in object goals - obj-y
--- 3.3 Loadable module goals - obj-m
- --- 3.4 Objects which export symbols
+ --- 3.4 <deleted>
--- 3.5 Library file goals - lib-y
--- 3.6 Descending down in directories
--- 3.7 Non-builtin vmlinux targets - extra-y
@@ -247,12 +247,6 @@ more details, with real examples.
kbuild will build an ext2.o file for you out of the individual
parts and then link this into built-in.a, as you would expect.
-3.4 Objects which export symbols
---------------------------------
-
- No special notation is required in the makefiles for
- modules exporting symbols.
-
3.5 Library file goals - lib-y
------------------------------
@@ -461,10 +455,8 @@ more details, with real examples.
# drivers/scsi/Makefile
CFLAGS_aha152x.o = -DAHA152X_STAT -DAUTOCONF
- CFLAGS_gdth.o = # -DDEBUG_GDTH=2 -D__SERIAL__ -D__COM2__ \
- -DGDTH_STATISTICS
- These two lines specify compilation flags for aha152x.o and gdth.o.
+ This line specify compilation flags for aha152x.o.
$(AFLAGS_$@) is a similar feature for source files in assembly
languages.
@@ -755,7 +747,7 @@ more details, with real examples.
bits on the scripts nonetheless.
Kbuild provides variables $(CONFIG_SHELL), $(AWK), $(PERL),
- $(PYTHON) and $(PYTHON3) to refer to interpreters for the respective
+ and $(PYTHON3) to refer to interpreters for the respective
scripts.
Example::
@@ -1317,7 +1309,6 @@ When kbuild executes, the following steps are followed (roughly):
libs-y += arch/sparc/lib/
drivers-$(CONFIG_PM) += arch/sparc/power/
- drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
7.5 Architecture-specific boot images
-------------------------------------
diff --git a/Documentation/kernel-hacking/hacking.rst b/Documentation/kernel-hacking/hacking.rst
index eed2136d847f..451523424942 100644
--- a/Documentation/kernel-hacking/hacking.rst
+++ b/Documentation/kernel-hacking/hacking.rst
@@ -346,8 +346,8 @@ routine.
Before inventing your own cache of often-used objects consider using a
slab cache in ``include/linux/slab.h``
-:c:func:`current()`
--------------------
+:c:macro:`current`
+------------------
Defined in ``include/asm/current.h``
diff --git a/Documentation/kernel-hacking/locking.rst b/Documentation/kernel-hacking/locking.rst
index c3448929a824..ed1284c6f078 100644
--- a/Documentation/kernel-hacking/locking.rst
+++ b/Documentation/kernel-hacking/locking.rst
@@ -958,7 +958,7 @@ grabs a read lock, searches a list, fails to find what it wants, drops
the read lock, grabs a write lock and inserts the object has a race
condition.
-If you don't see why, please stay the fuck away from my code.
+If you don't see why, please stay away from my code.
Racing Timers: A Kernel Pastime
-------------------------------
diff --git a/Documentation/livepatch/index.rst b/Documentation/livepatch/index.rst
index 525944063be7..43cce5fad705 100644
--- a/Documentation/livepatch/index.rst
+++ b/Documentation/livepatch/index.rst
@@ -13,6 +13,7 @@ Kernel Livepatching
module-elf-format
shadow-vars
system-state
+ reliable-stacktrace
.. only:: subproject and html
diff --git a/Documentation/livepatch/livepatch.rst b/Documentation/livepatch/livepatch.rst
index c2c598c4ead8..68e3651e8af9 100644
--- a/Documentation/livepatch/livepatch.rst
+++ b/Documentation/livepatch/livepatch.rst
@@ -6,20 +6,7 @@ This document outlines basic information about kernel livepatching.
.. Table of Contents:
- 1. Motivation
- 2. Kprobes, Ftrace, Livepatching
- 3. Consistency model
- 4. Livepatch module
- 4.1. New functions
- 4.2. Metadata
- 5. Livepatch life-cycle
- 5.1. Loading
- 5.2. Enabling
- 5.3. Replacing
- 5.4. Disabling
- 5.5. Removing
- 6. Sysfs
- 7. Limitations
+.. contents:: :local:
1. Motivation
diff --git a/Documentation/livepatch/module-elf-format.rst b/Documentation/livepatch/module-elf-format.rst
index 8c6b894c4661..dbe9b400e39f 100644
--- a/Documentation/livepatch/module-elf-format.rst
+++ b/Documentation/livepatch/module-elf-format.rst
@@ -7,14 +7,8 @@ This document outlines the Elf format requirements that livepatch modules must f
.. Table of Contents
- 1. Background and motivation
- 2. Livepatch modinfo field
- 3. Livepatch relocation sections
- 3.1 Livepatch relocation section format
- 4. Livepatch symbols
- 4.1 A livepatch module's symbol table
- 4.2 Livepatch symbol format
- 5. Symbol table and Elf section access
+.. contents:: :local:
+
1. Background and motivation
============================
diff --git a/Documentation/livepatch/reliable-stacktrace.rst b/Documentation/livepatch/reliable-stacktrace.rst
new file mode 100644
index 000000000000..67459d2ca2af
--- /dev/null
+++ b/Documentation/livepatch/reliable-stacktrace.rst
@@ -0,0 +1,309 @@
+===================
+Reliable Stacktrace
+===================
+
+This document outlines basic information about reliable stacktracing.
+
+.. Table of Contents:
+
+.. contents:: :local:
+
+1. Introduction
+===============
+
+The kernel livepatch consistency model relies on accurately identifying which
+functions may have live state and therefore may not be safe to patch. One way
+to identify which functions are live is to use a stacktrace.
+
+Existing stacktrace code may not always give an accurate picture of all
+functions with live state, and best-effort approaches which can be helpful for
+debugging are unsound for livepatching. Livepatching depends on architectures
+to provide a *reliable* stacktrace which ensures it never omits any live
+functions from a trace.
+
+
+2. Requirements
+===============
+
+Architectures must implement one of the reliable stacktrace functions.
+Architectures using CONFIG_ARCH_STACKWALK must implement
+'arch_stack_walk_reliable', and other architectures must implement
+'save_stack_trace_tsk_reliable'.
+
+Principally, the reliable stacktrace function must ensure that either:
+
+* The trace includes all functions that the task may be returned to, and the
+ return code is zero to indicate that the trace is reliable.
+
+* The return code is non-zero to indicate that the trace is not reliable.
+
+.. note::
+ In some cases it is legitimate to omit specific functions from the trace,
+ but all other functions must be reported. These cases are described in
+ futher detail below.
+
+Secondly, the reliable stacktrace function must be robust to cases where
+the stack or other unwind state is corrupt or otherwise unreliable. The
+function should attempt to detect such cases and return a non-zero error
+code, and should not get stuck in an infinite loop or access memory in
+an unsafe way. Specific cases are described in further detail below.
+
+
+3. Compile-time analysis
+========================
+
+To ensure that kernel code can be correctly unwound in all cases,
+architectures may need to verify that code has been compiled in a manner
+expected by the unwinder. For example, an unwinder may expect that
+functions manipulate the stack pointer in a limited way, or that all
+functions use specific prologue and epilogue sequences. Architectures
+with such requirements should verify the kernel compilation using
+objtool.
+
+In some cases, an unwinder may require metadata to correctly unwind.
+Where necessary, this metadata should be generated at build time using
+objtool.
+
+
+4. Considerations
+=================
+
+The unwinding process varies across architectures, their respective procedure
+call standards, and kernel configurations. This section describes common
+details that architectures should consider.
+
+4.1 Identifying successful termination
+--------------------------------------
+
+Unwinding may terminate early for a number of reasons, including:
+
+* Stack or frame pointer corruption.
+
+* Missing unwind support for an uncommon scenario, or a bug in the unwinder.
+
+* Dynamically generated code (e.g. eBPF) or foreign code (e.g. EFI runtime
+ services) not following the conventions expected by the unwinder.
+
+To ensure that this does not result in functions being omitted from the trace,
+even if not caught by other checks, it is strongly recommended that
+architectures verify that a stacktrace ends at an expected location, e.g.
+
+* Within a specific function that is an entry point to the kernel.
+
+* At a specific location on a stack expected for a kernel entry point.
+
+* On a specific stack expected for a kernel entry point (e.g. if the
+ architecture has separate task and IRQ stacks).
+
+4.2 Identifying unwindable code
+-------------------------------
+
+Unwinding typically relies on code following specific conventions (e.g.
+manipulating a frame pointer), but there can be code which may not follow these
+conventions and may require special handling in the unwinder, e.g.
+
+* Exception vectors and entry assembly.
+
+* Procedure Linkage Table (PLT) entries and veneer functions.
+
+* Trampoline assembly (e.g. ftrace, kprobes).
+
+* Dynamically generated code (e.g. eBPF, optprobe trampolines).
+
+* Foreign code (e.g. EFI runtime services).
+
+To ensure that such cases do not result in functions being omitted from a
+trace, it is strongly recommended that architectures positively identify code
+which is known to be reliable to unwind from, and reject unwinding from all
+other code.
+
+Kernel code including modules and eBPF can be distinguished from foreign code
+using '__kernel_text_address()'. Checking for this also helps to detect stack
+corruption.
+
+There are several ways an architecture may identify kernel code which is deemed
+unreliable to unwind from, e.g.
+
+* Placing such code into special linker sections, and rejecting unwinding from
+ any code in these sections.
+
+* Identifying specific portions of code using bounds information.
+
+4.3 Unwinding across interrupts and exceptions
+----------------------------------------------
+
+At function call boundaries the stack and other unwind state is expected to be
+in a consistent state suitable for reliable unwinding, but this may not be the
+case part-way through a function. For example, during a function prologue or
+epilogue a frame pointer may be transiently invalid, or during the function
+body the return address may be held in an arbitrary general purpose register.
+For some architectures this may change at runtime as a result of dynamic
+instrumentation.
+
+If an interrupt or other exception is taken while the stack or other unwind
+state is in an inconsistent state, it may not be possible to reliably unwind,
+and it may not be possible to identify whether such unwinding will be reliable.
+See below for examples.
+
+Architectures which cannot identify when it is reliable to unwind such cases
+(or where it is never reliable) must reject unwinding across exception
+boundaries. Note that it may be reliable to unwind across certain
+exceptions (e.g. IRQ) but unreliable to unwind across other exceptions
+(e.g. NMI).
+
+Architectures which can identify when it is reliable to unwind such cases (or
+have no such cases) should attempt to unwind across exception boundaries, as
+doing so can prevent unnecessarily stalling livepatch consistency checks and
+permits livepatch transitions to complete more quickly.
+
+4.4 Rewriting of return addresses
+---------------------------------
+
+Some trampolines temporarily modify the return address of a function in order
+to intercept when that function returns with a return trampoline, e.g.
+
+* An ftrace trampoline may modify the return address so that function graph
+ tracing can intercept returns.
+
+* A kprobes (or optprobes) trampoline may modify the return address so that
+ kretprobes can intercept returns.
+
+When this happens, the original return address will not be in its usual
+location. For trampolines which are not subject to live patching, where an
+unwinder can reliably determine the original return address and no unwind state
+is altered by the trampoline, the unwinder may report the original return
+address in place of the trampoline and report this as reliable. Otherwise, an
+unwinder must report these cases as unreliable.
+
+Special care is required when identifying the original return address, as this
+information is not in a consistent location for the duration of the entry
+trampoline or return trampoline. For example, considering the x86_64
+'return_to_handler' return trampoline:
+
+.. code-block:: none
+
+ SYM_CODE_START(return_to_handler)
+ UNWIND_HINT_EMPTY
+ subq $24, %rsp
+
+ /* Save the return values */
+ movq %rax, (%rsp)
+ movq %rdx, 8(%rsp)
+ movq %rbp, %rdi
+
+ call ftrace_return_to_handler
+
+ movq %rax, %rdi
+ movq 8(%rsp), %rdx
+ movq (%rsp), %rax
+ addq $24, %rsp
+ JMP_NOSPEC rdi
+ SYM_CODE_END(return_to_handler)
+
+While the traced function runs its return address on the stack points to
+the start of return_to_handler, and the original return address is stored in
+the task's cur_ret_stack. During this time the unwinder can find the return
+address using ftrace_graph_ret_addr().
+
+When the traced function returns to return_to_handler, there is no longer a
+return address on the stack, though the original return address is still stored
+in the task's cur_ret_stack. Within ftrace_return_to_handler(), the original
+return address is removed from cur_ret_stack and is transiently moved
+arbitrarily by the compiler before being returned in rax. The return_to_handler
+trampoline moves this into rdi before jumping to it.
+
+Architectures might not always be able to unwind such sequences, such as when
+ftrace_return_to_handler() has removed the address from cur_ret_stack, and the
+location of the return address cannot be reliably determined.
+
+It is recommended that architectures unwind cases where return_to_handler has
+not yet been returned to, but architectures are not required to unwind from the
+middle of return_to_handler and can report this as unreliable. Architectures
+are not required to unwind from other trampolines which modify the return
+address.
+
+4.5 Obscuring of return addresses
+---------------------------------
+
+Some trampolines do not rewrite the return address in order to intercept
+returns, but do transiently clobber the return address or other unwind state.
+
+For example, the x86_64 implementation of optprobes patches the probed function
+with a JMP instruction which targets the associated optprobe trampoline. When
+the probe is hit, the CPU will branch to the optprobe trampoline, and the
+address of the probed function is not held in any register or on the stack.
+
+Similarly, the arm64 implementation of DYNAMIC_FTRACE_WITH_REGS patches traced
+functions with the following:
+
+.. code-block:: none
+
+ MOV X9, X30
+ BL <trampoline>
+
+The MOV saves the link register (X30) into X9 to preserve the return address
+before the BL clobbers the link register and branches to the trampoline. At the
+start of the trampoline, the address of the traced function is in X9 rather
+than the link register as would usually be the case.
+
+Architectures must either ensure that unwinders either reliably unwind
+such cases, or report the unwinding as unreliable.
+
+4.6 Link register unreliability
+-------------------------------
+
+On some other architectures, 'call' instructions place the return address into a
+link register, and 'return' instructions consume the return address from the
+link register without modifying the register. On these architectures software
+must save the return address to the stack prior to making a function call. Over
+the duration of a function call, the return address may be held in the link
+register alone, on the stack alone, or in both locations.
+
+Unwinders typically assume the link register is always live, but this
+assumption can lead to unreliable stack traces. For example, consider the
+following arm64 assembly for a simple function:
+
+.. code-block:: none
+
+ function:
+ STP X29, X30, [SP, -16]!
+ MOV X29, SP
+ BL <other_function>
+ LDP X29, X30, [SP], #16
+ RET
+
+At entry to the function, the link register (x30) points to the caller, and the
+frame pointer (X29) points to the caller's frame including the caller's return
+address. The first two instructions create a new stackframe and update the
+frame pointer, and at this point the link register and the frame pointer both
+describe this function's return address. A trace at this point may describe
+this function twice, and if the function return is being traced, the unwinder
+may consume two entries from the fgraph return stack rather than one entry.
+
+The BL invokes 'other_function' with the link register pointing to this
+function's LDR and the frame pointer pointing to this function's stackframe.
+When 'other_function' returns, the link register is left pointing at the BL,
+and so a trace at this point could result in 'function' appearing twice in the
+backtrace.
+
+Similarly, a function may deliberately clobber the LR, e.g.
+
+.. code-block:: none
+
+ caller:
+ STP X29, X30, [SP, -16]!
+ MOV X29, SP
+ ADR LR, <callee>
+ BLR LR
+ LDP X29, X30, [SP], #16
+ RET
+
+The ADR places the address of 'callee' into the LR, before the BLR branches to
+this address. If a trace is made immediately after the ADR, 'callee' will
+appear to be the parent of 'caller', rather than the child.
+
+Due to cases such as the above, it may only be possible to reliably consume a
+link register value at a function call boundary. Architectures where this is
+the case must reject unwinding across exception boundaries unless they can
+reliably identify when the LR or stack value should be used (e.g. using
+metadata generated by objtool).
diff --git a/Documentation/networking/bonding.rst b/Documentation/networking/bonding.rst
index adc314639085..5f690f0ad0e4 100644
--- a/Documentation/networking/bonding.rst
+++ b/Documentation/networking/bonding.rst
@@ -951,6 +951,19 @@ xmit_hash_policy
packets will be distributed according to the encapsulated
flows.
+ vlan+srcmac
+
+ This policy uses a very rudimentary vlan ID and source mac
+ hash to load-balance traffic per-vlan, with failover
+ should one leg fail. The intended use case is for a bond
+ shared by multiple virtual machines, all configured to
+ use their own vlan, to give lacp-like functionality
+ without requiring lacp-capable switching hardware.
+
+ The formula for the hash is simply
+
+ hash = (vlan ID) XOR (source MAC vendor) XOR (source MAC dev)
+
The default value is layer2. This option was added in bonding
version 2.6.3. In earlier versions of bonding, this parameter
does not exist, and the layer2 policy is the only policy. The
diff --git a/Documentation/networking/caif/caif.rst b/Documentation/networking/caif/caif.rst
index a07213030ccf..81a14373d780 100644
--- a/Documentation/networking/caif/caif.rst
+++ b/Documentation/networking/caif/caif.rst
@@ -68,7 +68,6 @@ There are debugfs parameters provided for serial communication.
* tty_status: Prints the bit-mask tty status information
- 0x01 - tty->warned is on.
- - 0x02 - tty->low_latency is on.
- 0x04 - tty->packed is on.
- 0x08 - tty->flow_stopped is on.
- 0x10 - tty->hw_stopped is on.
diff --git a/Documentation/networking/device_drivers/ethernet/index.rst b/Documentation/networking/device_drivers/ethernet/index.rst
index cbb75a1818c0..6b5dc203da2b 100644
--- a/Documentation/networking/device_drivers/ethernet/index.rst
+++ b/Documentation/networking/device_drivers/ethernet/index.rst
@@ -49,6 +49,7 @@ Contents:
stmicro/stmmac
ti/cpsw
ti/cpsw_switchdev
+ ti/am65_nuss_cpsw_switchdev
ti/tlan
toshiba/spider_net
diff --git a/Documentation/networking/device_drivers/ethernet/intel/ice.rst b/Documentation/networking/device_drivers/ethernet/intel/ice.rst
index ee43ea57d443..e7d9cbff771b 100644
--- a/Documentation/networking/device_drivers/ethernet/intel/ice.rst
+++ b/Documentation/networking/device_drivers/ethernet/intel/ice.rst
@@ -1,46 +1,1031 @@
.. SPDX-License-Identifier: GPL-2.0+
-==================================================================
-Linux Base Driver for the Intel(R) Ethernet Connection E800 Series
-==================================================================
+=================================================================
+Linux Base Driver for the Intel(R) Ethernet Controller 800 Series
+=================================================================
Intel ice Linux driver.
-Copyright(c) 2018 Intel Corporation.
+Copyright(c) 2018-2021 Intel Corporation.
Contents
========
-- Enabling the driver
-- Support
+- Overview
+- Identifying Your Adapter
+- Important Notes
+- Additional Features & Configurations
+- Performance Optimization
-The driver in this release supports Intel's E800 Series of products. For
-more information, visit Intel's support page at https://support.intel.com.
-Enabling the driver
-===================
-The driver is enabled via the standard kernel configuration system,
-using the make command::
+The associated Virtual Function (VF) driver for this driver is iavf.
- make oldconfig/menuconfig/etc.
+Driver information can be obtained using ethtool and lspci.
-The driver is located in the menu structure at:
+For questions related to hardware requirements, refer to the documentation
+supplied with your Intel adapter. All hardware requirements listed apply to use
+with Linux.
+
+This driver supports XDP (Express Data Path) and AF_XDP zero-copy. Note that
+XDP is blocked for frame sizes larger than 3KB.
+
+
+Identifying Your Adapter
+========================
+For information on how to identify your adapter, and for the latest Intel
+network drivers, refer to the Intel Support website:
+https://www.intel.com/support
+
+
+Important Notes
+===============
+
+Packet drops may occur under receive stress
+-------------------------------------------
+Devices based on the Intel(R) Ethernet Controller 800 Series are designed to
+tolerate a limited amount of system latency during PCIe and DMA transactions.
+If these transactions take longer than the tolerated latency, it can impact the
+length of time the packets are buffered in the device and associated memory,
+which may result in dropped packets. These packets drops typically do not have
+a noticeable impact on throughput and performance under standard workloads.
+
+If these packet drops appear to affect your workload, the following may improve
+the situation:
+
+1) Make sure that your system's physical memory is in a high-performance
+ configuration, as recommended by the platform vendor. A common
+ recommendation is for all channels to be populated with a single DIMM
+ module.
+2) In your system's BIOS/UEFI settings, select the "Performance" profile.
+3) Your distribution may provide tools like "tuned," which can help tweak
+ kernel settings to achieve better standard settings for different workloads.
+
+
+Configuring SR-IOV for improved network security
+------------------------------------------------
+In a virtualized environment, on Intel(R) Ethernet Network Adapters that
+support SR-IOV, the virtual function (VF) may be subject to malicious behavior.
+Software-generated layer two frames, like IEEE 802.3x (link flow control), IEEE
+802.1Qbb (priority based flow-control), and others of this type, are not
+expected and can throttle traffic between the host and the virtual switch,
+reducing performance. To resolve this issue, and to ensure isolation from
+unintended traffic streams, configure all SR-IOV enabled ports for VLAN tagging
+from the administrative interface on the PF. This configuration allows
+unexpected, and potentially malicious, frames to be dropped.
+
+See "Configuring VLAN Tagging on SR-IOV Enabled Adapter Ports" later in this
+README for configuration instructions.
+
+
+Do not unload port driver if VF with active VM is bound to it
+-------------------------------------------------------------
+Do not unload a port's driver if a Virtual Function (VF) with an active Virtual
+Machine (VM) is bound to it. Doing so will cause the port to appear to hang.
+Once the VM shuts down, or otherwise releases the VF, the command will
+complete.
+
+
+Important notes for SR-IOV and Link Aggregation
+-----------------------------------------------
+Link Aggregation is mutually exclusive with SR-IOV.
+
+- If Link Aggregation is active, SR-IOV VFs cannot be created on the PF.
+- If SR-IOV is active, you cannot set up Link Aggregation on the interface.
+
+Bridging and MACVLAN are also affected by this. If you wish to use bridging or
+MACVLAN with SR-IOV, you must set up bridging or MACVLAN before enabling
+SR-IOV. If you are using bridging or MACVLAN in conjunction with SR-IOV, and
+you want to remove the interface from the bridge or MACVLAN, you must follow
+these steps:
+
+1. Destroy SR-IOV VFs if they exist
+2. Remove the interface from the bridge or MACVLAN
+3. Recreate SRIOV VFs as needed
+
+
+Additional Features and Configurations
+======================================
+
+ethtool
+-------
+The driver utilizes the ethtool interface for driver configuration and
+diagnostics, as well as displaying statistical information. The latest ethtool
+version is required for this functionality. Download it at:
+https://kernel.org/pub/software/network/ethtool/
+
+NOTE: The rx_bytes value of ethtool does not match the rx_bytes value of
+Netdev, due to the 4-byte CRC being stripped by the device. The difference
+between the two rx_bytes values will be 4 x the number of Rx packets. For
+example, if Rx packets are 10 and Netdev (software statistics) displays
+rx_bytes as "X", then ethtool (hardware statistics) will display rx_bytes as
+"X+40" (4 bytes CRC x 10 packets).
+
+
+Viewing Link Messages
+---------------------
+Link messages will not be displayed to the console if the distribution is
+restricting system messages. In order to see network driver link messages on
+your console, set dmesg to eight by entering the following::
+
+ # dmesg -n 8
+
+NOTE: This setting is not saved across reboots.
+
+
+Dynamic Device Personalization
+------------------------------
+Dynamic Device Personalization (DDP) allows you to change the packet processing
+pipeline of a device by applying a profile package to the device at runtime.
+Profiles can be used to, for example, add support for new protocols, change
+existing protocols, or change default settings. DDP profiles can also be rolled
+back without rebooting the system.
+
+The DDP package loads during device initialization. The driver looks for
+``intel/ice/ddp/ice.pkg`` in your firmware root (typically ``/lib/firmware/``
+or ``/lib/firmware/updates/``) and checks that it contains a valid DDP package
+file.
+
+NOTE: Your distribution should likely have provided the latest DDP file, but if
+ice.pkg is missing, you can find it in the linux-firmware repository or from
+intel.com.
+
+If the driver is unable to load the DDP package, the device will enter Safe
+Mode. Safe Mode disables advanced and performance features and supports only
+basic traffic and minimal functionality, such as updating the NVM or
+downloading a new driver or DDP package. Safe Mode only applies to the affected
+physical function and does not impact any other PFs. See the "Intel(R) Ethernet
+Adapters and Devices User Guide" for more details on DDP and Safe Mode.
+
+NOTES:
+
+- If you encounter issues with the DDP package file, you may need to download
+ an updated driver or DDP package file. See the log messages for more
+ information.
+
+- The ice.pkg file is a symbolic link to the default DDP package file.
+
+- You cannot update the DDP package if any PF drivers are already loaded. To
+ overwrite a package, unload all PFs and then reload the driver with the new
+ package.
+
+- Only the first loaded PF per device can download a package for that device.
+
+You can install specific DDP package files for different physical devices in
+the same system. To install a specific DDP package file:
+
+1. Download the DDP package file you want for your device.
+
+2. Rename the file ice-xxxxxxxxxxxxxxxx.pkg, where 'xxxxxxxxxxxxxxxx' is the
+ unique 64-bit PCI Express device serial number (in hex) of the device you
+ want the package downloaded on. The filename must include the complete
+ serial number (including leading zeros) and be all lowercase. For example,
+ if the 64-bit serial number is b887a3ffffca0568, then the file name would be
+ ice-b887a3ffffca0568.pkg.
+
+ To find the serial number from the PCI bus address, you can use the
+ following command::
+
+ # lspci -vv -s af:00.0 | grep -i Serial
+ Capabilities: [150 v1] Device Serial Number b8-87-a3-ff-ff-ca-05-68
+
+ You can use the following command to format the serial number without the
+ dashes::
+
+ # lspci -vv -s af:00.0 | grep -i Serial | awk '{print $7}' | sed s/-//g
+ b887a3ffffca0568
+
+3. Copy the renamed DDP package file to
+ ``/lib/firmware/updates/intel/ice/ddp/``. If the directory does not yet
+ exist, create it before copying the file.
+
+4. Unload all of the PFs on the device.
+
+5. Reload the driver with the new package.
+
+NOTE: The presence of a device-specific DDP package file overrides the loading
+of the default DDP package file (ice.pkg).
+
+
+Intel(R) Ethernet Flow Director
+-------------------------------
+The Intel Ethernet Flow Director performs the following tasks:
+
+- Directs receive packets according to their flows to different queues
+- Enables tight control on routing a flow in the platform
+- Matches flows and CPU cores for flow affinity
+
+NOTE: This driver supports the following flow types:
+
+- IPv4
+- TCPv4
+- UDPv4
+- SCTPv4
+- IPv6
+- TCPv6
+- UDPv6
+- SCTPv6
+
+Each flow type supports valid combinations of IP addresses (source or
+destination) and UDP/TCP/SCTP ports (source and destination). You can supply
+only a source IP address, a source IP address and a destination port, or any
+combination of one or more of these four parameters.
+
+NOTE: This driver allows you to filter traffic based on a user-defined flexible
+two-byte pattern and offset by using the ethtool user-def and mask fields. Only
+L3 and L4 flow types are supported for user-defined flexible filters. For a
+given flow type, you must clear all Intel Ethernet Flow Director filters before
+changing the input set (for that flow type).
+
+
+Flow Director Filters
+---------------------
+Flow Director filters are used to direct traffic that matches specified
+characteristics. They are enabled through ethtool's ntuple interface. To enable
+or disable the Intel Ethernet Flow Director and these filters::
+
+ # ethtool -K <ethX> ntuple <off|on>
+
+NOTE: When you disable ntuple filters, all the user programmed filters are
+flushed from the driver cache and hardware. All needed filters must be re-added
+when ntuple is re-enabled.
+
+To display all of the active filters::
+
+ # ethtool -u <ethX>
+
+To add a new filter::
+
+ # ethtool -U <ethX> flow-type <type> src-ip <ip> [m <ip_mask>] dst-ip <ip>
+ [m <ip_mask>] src-port <port> [m <port_mask>] dst-port <port> [m <port_mask>]
+ action <queue>
+
+ Where:
+ <ethX> - the Ethernet device to program
+ <type> - can be ip4, tcp4, udp4, sctp4, ip6, tcp6, udp6, sctp6
+ <ip> - the IP address to match on
+ <ip_mask> - the IPv4 address to mask on
+ NOTE: These filters use inverted masks.
+ <port> - the port number to match on
+ <port_mask> - the 16-bit integer for masking
+ NOTE: These filters use inverted masks.
+ <queue> - the queue to direct traffic toward (-1 discards the
+ matched traffic)
+
+To delete a filter::
+
+ # ethtool -U <ethX> delete <N>
+
+ Where <N> is the filter ID displayed when printing all the active filters,
+ and may also have been specified using "loc <N>" when adding the filter.
+
+EXAMPLES:
+
+To add a filter that directs packet to queue 2::
+
+ # ethtool -U <ethX> flow-type tcp4 src-ip 192.168.10.1 dst-ip \
+ 192.168.10.2 src-port 2000 dst-port 2001 action 2 [loc 1]
+
+To set a filter using only the source and destination IP address::
+
+ # ethtool -U <ethX> flow-type tcp4 src-ip 192.168.10.1 dst-ip \
+ 192.168.10.2 action 2 [loc 1]
+
+To set a filter based on a user-defined pattern and offset::
+
+ # ethtool -U <ethX> flow-type tcp4 src-ip 192.168.10.1 dst-ip \
+ 192.168.10.2 user-def 0x4FFFF action 2 [loc 1]
+
+ where the value of the user-def field contains the offset (4 bytes) and
+ the pattern (0xffff).
+
+To match TCP traffic sent from 192.168.0.1, port 5300, directed to 192.168.0.5,
+port 80, and then send it to queue 7::
+
+ # ethtool -U enp130s0 flow-type tcp4 src-ip 192.168.0.1 dst-ip 192.168.0.5
+ src-port 5300 dst-port 80 action 7
+
+To add a TCPv4 filter with a partial mask for a source IP subnet::
+
+ # ethtool -U <ethX> flow-type tcp4 src-ip 192.168.0.0 m 0.255.255.255 dst-ip
+ 192.168.5.12 src-port 12600 dst-port 31 action 12
+
+NOTES:
+
+For each flow-type, the programmed filters must all have the same matching
+input set. For example, issuing the following two commands is acceptable::
+
+ # ethtool -U enp130s0 flow-type ip4 src-ip 192.168.0.1 src-port 5300 action 7
+ # ethtool -U enp130s0 flow-type ip4 src-ip 192.168.0.5 src-port 55 action 10
+
+Issuing the next two commands, however, is not acceptable, since the first
+specifies src-ip and the second specifies dst-ip::
+
+ # ethtool -U enp130s0 flow-type ip4 src-ip 192.168.0.1 src-port 5300 action 7
+ # ethtool -U enp130s0 flow-type ip4 dst-ip 192.168.0.5 src-port 55 action 10
+
+The second command will fail with an error. You may program multiple filters
+with the same fields, using different values, but, on one device, you may not
+program two tcp4 filters with different matching fields.
+
+The ice driver does not support matching on a subportion of a field, thus
+partial mask fields are not supported.
+
+
+Flex Byte Flow Director Filters
+-------------------------------
+The driver also supports matching user-defined data within the packet payload.
+This flexible data is specified using the "user-def" field of the ethtool
+command in the following way:
+
+.. table::
+
+ ============================== ============================
+ ``31 28 24 20 16`` ``15 12 8 4 0``
+ ``offset into packet payload`` ``2 bytes of flexible data``
+ ============================== ============================
+
+For example,
+
+::
+
+ ... user-def 0x4FFFF ...
+
+tells the filter to look 4 bytes into the payload and match that value against
+0xFFFF. The offset is based on the beginning of the payload, and not the
+beginning of the packet. Thus
+
+::
+
+ flow-type tcp4 ... user-def 0x8BEAF ...
+
+would match TCP/IPv4 packets which have the value 0xBEAF 8 bytes into the
+TCP/IPv4 payload.
+
+Note that ICMP headers are parsed as 4 bytes of header and 4 bytes of payload.
+Thus to match the first byte of the payload, you must actually add 4 bytes to
+the offset. Also note that ip4 filters match both ICMP frames as well as raw
+(unknown) ip4 frames, where the payload will be the L3 payload of the IP4
+frame.
+
+The maximum offset is 64. The hardware will only read up to 64 bytes of data
+from the payload. The offset must be even because the flexible data is 2 bytes
+long and must be aligned to byte 0 of the packet payload.
+
+The user-defined flexible offset is also considered part of the input set and
+cannot be programmed separately for multiple filters of the same type. However,
+the flexible data is not part of the input set and multiple filters may use the
+same offset but match against different data.
+
+
+RSS Hash Flow
+-------------
+Allows you to set the hash bytes per flow type and any combination of one or
+more options for Receive Side Scaling (RSS) hash byte configuration.
+
+::
+
+ # ethtool -N <ethX> rx-flow-hash <type> <option>
+
+ Where <type> is:
+ tcp4 signifying TCP over IPv4
+ udp4 signifying UDP over IPv4
+ tcp6 signifying TCP over IPv6
+ udp6 signifying UDP over IPv6
+ And <option> is one or more of:
+ s Hash on the IP source address of the Rx packet.
+ d Hash on the IP destination address of the Rx packet.
+ f Hash on bytes 0 and 1 of the Layer 4 header of the Rx packet.
+ n Hash on bytes 2 and 3 of the Layer 4 header of the Rx packet.
+
+
+Accelerated Receive Flow Steering (aRFS)
+----------------------------------------
+Devices based on the Intel(R) Ethernet Controller 800 Series support
+Accelerated Receive Flow Steering (aRFS) on the PF. aRFS is a load-balancing
+mechanism that allows you to direct packets to the same CPU where an
+application is running or consuming the packets in that flow.
+
+NOTES:
+
+- aRFS requires that ntuple filtering is enabled via ethtool.
+- aRFS support is limited to the following packet types:
+
+ - TCP over IPv4 and IPv6
+ - UDP over IPv4 and IPv6
+ - Nonfragmented packets
+
+- aRFS only supports Flow Director filters, which consist of the
+ source/destination IP addresses and source/destination ports.
+- aRFS and ethtool's ntuple interface both use the device's Flow Director. aRFS
+ and ntuple features can coexist, but you may encounter unexpected results if
+ there's a conflict between aRFS and ntuple requests. See "Intel(R) Ethernet
+ Flow Director" for additional information.
+
+To set up aRFS:
+
+1. Enable the Intel Ethernet Flow Director and ntuple filters using ethtool.
+
+::
+
+ # ethtool -K <ethX> ntuple on
+
+2. Set up the number of entries in the global flow table. For example:
+
+::
+
+ # NUM_RPS_ENTRIES=16384
+ # echo $NUM_RPS_ENTRIES > /proc/sys/net/core/rps_sock_flow_entries
+
+3. Set up the number of entries in the per-queue flow table. For example:
+
+::
+
+ # NUM_RX_QUEUES=64
+ # for file in /sys/class/net/$IFACE/queues/rx-*/rps_flow_cnt; do
+ # echo $(($NUM_RPS_ENTRIES/$NUM_RX_QUEUES)) > $file;
+ # done
+
+4. Disable the IRQ balance daemon (this is only a temporary stop of the service
+ until the next reboot).
+
+::
+
+ # systemctl stop irqbalance
+
+5. Configure the interrupt affinity.
+
+ See ``/Documentation/core-api/irq/irq-affinity.rst``
+
+
+To disable aRFS using ethtool::
+
+ # ethtool -K <ethX> ntuple off
+
+NOTE: This command will disable ntuple filters and clear any aRFS filters in
+software and hardware.
+
+Example Use Case:
+
+1. Set the server application on the desired CPU (e.g., CPU 4).
+
+::
+
+ # taskset -c 4 netserver
+
+2. Use netperf to route traffic from the client to CPU 4 on the server with
+ aRFS configured. This example uses TCP over IPv4.
+
+::
+
+ # netperf -H <Host IPv4 Address> -t TCP_STREAM
+
+
+Enabling Virtual Functions (VFs)
+--------------------------------
+Use sysfs to enable virtual functions (VF).
+
+For example, you can create 4 VFs as follows::
+
+ # echo 4 > /sys/class/net/<ethX>/device/sriov_numvfs
+
+To disable VFs, write 0 to the same file::
+
+ # echo 0 > /sys/class/net/<ethX>/device/sriov_numvfs
+
+The maximum number of VFs for the ice driver is 256 total (all ports). To check
+how many VFs each PF supports, use the following command::
+
+ # cat /sys/class/net/<ethX>/device/sriov_totalvfs
+
+Note: You cannot use SR-IOV when link aggregation (LAG)/bonding is active, and
+vice versa. To enforce this, the driver checks for this mutual exclusion.
+
+
+Displaying VF Statistics on the PF
+----------------------------------
+Use the following command to display the statistics for the PF and its VFs::
+
+ # ip -s link show dev <ethX>
+
+NOTE: The output of this command can be very large due to the maximum number of
+possible VFs.
+
+The PF driver will display a subset of the statistics for the PF and for all
+VFs that are configured. The PF will always print a statistics block for each
+of the possible VFs, and it will show zero for all unconfigured VFs.
+
+
+Configuring VLAN Tagging on SR-IOV Enabled Adapter Ports
+--------------------------------------------------------
+To configure VLAN tagging for the ports on an SR-IOV enabled adapter, use the
+following command. The VLAN configuration should be done before the VF driver
+is loaded or the VM is booted. The VF is not aware of the VLAN tag being
+inserted on transmit and removed on received frames (sometimes called "port
+VLAN" mode).
+
+::
+
+ # ip link set dev <ethX> vf <id> vlan <vlan id>
+
+For example, the following will configure PF eth0 and the first VF on VLAN 10::
+
+ # ip link set dev eth0 vf 0 vlan 10
+
+
+Enabling a VF link if the port is disconnected
+----------------------------------------------
+If the physical function (PF) link is down, you can force link up (from the
+host PF) on any virtual functions (VF) bound to the PF.
+
+For example, to force link up on VF 0 bound to PF eth0::
+
+ # ip link set eth0 vf 0 state enable
+
+Note: If the command does not work, it may not be supported by your system.
+
+
+Setting the MAC Address for a VF
+--------------------------------
+To change the MAC address for the specified VF::
+
+ # ip link set <ethX> vf 0 mac <address>
+
+For example::
+
+ # ip link set <ethX> vf 0 mac 00:01:02:03:04:05
+
+This setting lasts until the PF is reloaded.
+
+NOTE: Assigning a MAC address for a VF from the host will disable any
+subsequent requests to change the MAC address from within the VM. This is a
+security feature. The VM is not aware of this restriction, so if this is
+attempted in the VM, it will trigger MDD events.
+
+
+Trusted VFs and VF Promiscuous Mode
+-----------------------------------
+This feature allows you to designate a particular VF as trusted and allows that
+trusted VF to request selective promiscuous mode on the Physical Function (PF).
+
+To set a VF as trusted or untrusted, enter the following command in the
+Hypervisor::
+
+ # ip link set dev <ethX> vf 1 trust [on|off]
+
+NOTE: It's important to set the VF to trusted before setting promiscuous mode.
+If the VM is not trusted, the PF will ignore promiscuous mode requests from the
+VF. If the VM becomes trusted after the VF driver is loaded, you must make a
+new request to set the VF to promiscuous.
+
+Once the VF is designated as trusted, use the following commands in the VM to
+set the VF to promiscuous mode.
+
+For promiscuous all::
+
+ # ip link set <ethX> promisc on
+ Where <ethX> is a VF interface in the VM
+
+For promiscuous Multicast::
+
+ # ip link set <ethX> allmulticast on
+ Where <ethX> is a VF interface in the VM
+
+NOTE: By default, the ethtool private flag vf-true-promisc-support is set to
+"off," meaning that promiscuous mode for the VF will be limited. To set the
+promiscuous mode for the VF to true promiscuous and allow the VF to see all
+ingress traffic, use the following command::
+
+ # ethtool --set-priv-flags <ethX> vf-true-promisc-support on
+
+The vf-true-promisc-support private flag does not enable promiscuous mode;
+rather, it designates which type of promiscuous mode (limited or true) you will
+get when you enable promiscuous mode using the ip link commands above. Note
+that this is a global setting that affects the entire device. However, the
+vf-true-promisc-support private flag is only exposed to the first PF of the
+device. The PF remains in limited promiscuous mode regardless of the
+vf-true-promisc-support setting.
+
+Next, add a VLAN interface on the VF interface. For example::
+
+ # ip link add link eth2 name eth2.100 type vlan id 100
+
+Note that the order in which you set the VF to promiscuous mode and add the
+VLAN interface does not matter (you can do either first). The result in this
+example is that the VF will get all traffic that is tagged with VLAN 100.
+
+
+Malicious Driver Detection (MDD) for VFs
+----------------------------------------
+Some Intel Ethernet devices use Malicious Driver Detection (MDD) to detect
+malicious traffic from the VF and disable Tx/Rx queues or drop the offending
+packet until a VF driver reset occurs. You can view MDD messages in the PF's
+system log using the dmesg command.
+
+- If the PF driver logs MDD events from the VF, confirm that the correct VF
+ driver is installed.
+- To restore functionality, you can manually reload the VF or VM or enable
+ automatic VF resets.
+- When automatic VF resets are enabled, the PF driver will immediately reset
+ the VF and reenable queues when it detects MDD events on the receive path.
+- If automatic VF resets are disabled, the PF will not automatically reset the
+ VF when it detects MDD events.
+
+To enable or disable automatic VF resets, use the following command::
+
+ # ethtool --set-priv-flags <ethX> mdd-auto-reset-vf on|off
+
+
+MAC and VLAN Anti-Spoofing Feature for VFs
+------------------------------------------
+When a malicious driver on a Virtual Function (VF) interface attempts to send a
+spoofed packet, it is dropped by the hardware and not transmitted.
+
+NOTE: This feature can be disabled for a specific VF::
+
+ # ip link set <ethX> vf <vf id> spoofchk {off|on}
+
+
+Jumbo Frames
+------------
+Jumbo Frames support is enabled by changing the Maximum Transmission Unit (MTU)
+to a value larger than the default value of 1500.
+
+Use the ifconfig command to increase the MTU size. For example, enter the
+following where <ethX> is the interface number::
+
+ # ifconfig <ethX> mtu 9000 up
+
+Alternatively, you can use the ip command as follows::
+
+ # ip link set mtu 9000 dev <ethX>
+ # ip link set up dev <ethX>
+
+This setting is not saved across reboots.
+
+
+NOTE: The maximum MTU setting for jumbo frames is 9702. This corresponds to the
+maximum jumbo frame size of 9728 bytes.
+
+NOTE: This driver will attempt to use multiple page sized buffers to receive
+each jumbo packet. This should help to avoid buffer starvation issues when
+allocating receive packets.
+
+NOTE: Packet loss may have a greater impact on throughput when you use jumbo
+frames. If you observe a drop in performance after enabling jumbo frames,
+enabling flow control may mitigate the issue.
+
+
+Speed and Duplex Configuration
+------------------------------
+In addressing speed and duplex configuration issues, you need to distinguish
+between copper-based adapters and fiber-based adapters.
+
+In the default mode, an Intel(R) Ethernet Network Adapter using copper
+connections will attempt to auto-negotiate with its link partner to determine
+the best setting. If the adapter cannot establish link with the link partner
+using auto-negotiation, you may need to manually configure the adapter and link
+partner to identical settings to establish link and pass packets. This should
+only be needed when attempting to link with an older switch that does not
+support auto-negotiation or one that has been forced to a specific speed or
+duplex mode. Your link partner must match the setting you choose. 1 Gbps speeds
+and higher cannot be forced. Use the autonegotiation advertising setting to
+manually set devices for 1 Gbps and higher.
+
+Speed, duplex, and autonegotiation advertising are configured through the
+ethtool utility. For the latest version, download and install ethtool from the
+following website:
+
+ https://kernel.org/pub/software/network/ethtool/
+
+To see the speed configurations your device supports, run the following::
+
+ # ethtool <ethX>
+
+Caution: Only experienced network administrators should force speed and duplex
+or change autonegotiation advertising manually. The settings at the switch must
+always match the adapter settings. Adapter performance may suffer or your
+adapter may not operate if you configure the adapter differently from your
+switch.
+
+
+Data Center Bridging (DCB)
+--------------------------
+NOTE: The kernel assumes that TC0 is available, and will disable Priority Flow
+Control (PFC) on the device if TC0 is not available. To fix this, ensure TC0 is
+enabled when setting up DCB on your switch.
+
+DCB is a configuration Quality of Service implementation in hardware. It uses
+the VLAN priority tag (802.1p) to filter traffic. That means that there are 8
+different priorities that traffic can be filtered into. It also enables
+priority flow control (802.1Qbb) which can limit or eliminate the number of
+dropped packets during network stress. Bandwidth can be allocated to each of
+these priorities, which is enforced at the hardware level (802.1Qaz).
+
+DCB is normally configured on the network using the DCBX protocol (802.1Qaz), a
+specialization of LLDP (802.1AB). The ice driver supports the following
+mutually exclusive variants of DCBX support:
+
+1) Firmware-based LLDP Agent
+2) Software-based LLDP Agent
+
+In firmware-based mode, firmware intercepts all LLDP traffic and handles DCBX
+negotiation transparently for the user. In this mode, the adapter operates in
+"willing" DCBX mode, receiving DCB settings from the link partner (typically a
+switch). The local user can only query the negotiated DCB configuration. For
+information on configuring DCBX parameters on a switch, please consult the
+switch manufacturer's documentation.
+
+In software-based mode, LLDP traffic is forwarded to the network stack and user
+space, where a software agent can handle it. In this mode, the adapter can
+operate in either "willing" or "nonwilling" DCBX mode and DCB configuration can
+be both queried and set locally. This mode requires the FW-based LLDP Agent to
+be disabled.
+
+NOTE:
+
+- You can enable and disable the firmware-based LLDP Agent using an ethtool
+ private flag. Refer to the "FW-LLDP (Firmware Link Layer Discovery Protocol)"
+ section in this README for more information.
+- In software-based DCBX mode, you can configure DCB parameters using software
+ LLDP/DCBX agents that interface with the Linux kernel's DCB Netlink API. We
+ recommend using OpenLLDP as the DCBX agent when running in software mode. For
+ more information, see the OpenLLDP man pages and
+ https://github.com/intel/openlldp.
+- The driver implements the DCB netlink interface layer to allow the user space
+ to communicate with the driver and query DCB configuration for the port.
+- iSCSI with DCB is not supported.
+
+
+FW-LLDP (Firmware Link Layer Discovery Protocol)
+------------------------------------------------
+Use ethtool to change FW-LLDP settings. The FW-LLDP setting is per port and
+persists across boots.
+
+To enable LLDP::
+
+ # ethtool --set-priv-flags <ethX> fw-lldp-agent on
+
+To disable LLDP::
+
+ # ethtool --set-priv-flags <ethX> fw-lldp-agent off
+
+To check the current LLDP setting::
+
+ # ethtool --show-priv-flags <ethX>
+
+NOTE: You must enable the UEFI HII "LLDP Agent" attribute for this setting to
+take effect. If "LLDP AGENT" is set to disabled, you cannot enable it from the
+OS.
+
+
+Flow Control
+------------
+Ethernet Flow Control (IEEE 802.3x) can be configured with ethtool to enable
+receiving and transmitting pause frames for ice. When transmit is enabled,
+pause frames are generated when the receive packet buffer crosses a predefined
+threshold. When receive is enabled, the transmit unit will halt for the time
+delay specified when a pause frame is received.
+
+NOTE: You must have a flow control capable link partner.
+
+Flow Control is disabled by default.
+
+Use ethtool to change the flow control settings.
+
+To enable or disable Rx or Tx Flow Control::
+
+ # ethtool -A <ethX> rx <on|off> tx <on|off>
+
+Note: This command only enables or disables Flow Control if auto-negotiation is
+disabled. If auto-negotiation is enabled, this command changes the parameters
+used for auto-negotiation with the link partner.
+
+Note: Flow Control auto-negotiation is part of link auto-negotiation. Depending
+on your device, you may not be able to change the auto-negotiation setting.
+
+NOTE:
+
+- The ice driver requires flow control on both the port and link partner. If
+ flow control is disabled on one of the sides, the port may appear to hang on
+ heavy traffic.
+- You may encounter issues with link-level flow control (LFC) after disabling
+ DCB. The LFC status may show as enabled but traffic is not paused. To resolve
+ this issue, disable and reenable LFC using ethtool::
+
+ # ethtool -A <ethX> rx off tx off
+ # ethtool -A <ethX> rx on tx on
+
+
+NAPI
+----
+This driver supports NAPI (Rx polling mode).
+For more information on NAPI, see
+https://www.linuxfoundation.org/collaborate/workgroups/networking/napi
+
+
+MACVLAN
+-------
+This driver supports MACVLAN. Kernel support for MACVLAN can be tested by
+checking if the MACVLAN driver is loaded. You can run 'lsmod | grep macvlan' to
+see if the MACVLAN driver is loaded or run 'modprobe macvlan' to try to load
+the MACVLAN driver.
+
+NOTE:
+
+- In passthru mode, you can only set up one MACVLAN device. It will inherit the
+ MAC address of the underlying PF (Physical Function) device.
+
+
+IEEE 802.1ad (QinQ) Support
+---------------------------
+The IEEE 802.1ad standard, informally known as QinQ, allows for multiple VLAN
+IDs within a single Ethernet frame. VLAN IDs are sometimes referred to as
+"tags," and multiple VLAN IDs are thus referred to as a "tag stack." Tag stacks
+allow L2 tunneling and the ability to segregate traffic within a particular
+VLAN ID, among other uses.
+
+NOTES:
+
+- Receive checksum offloads and VLAN acceleration are not supported for 802.1ad
+ (QinQ) packets.
+
+- 0x88A8 traffic will not be received unless VLAN stripping is disabled with
+ the following command::
+
+ # ethool -K <ethX> rxvlan off
+
+- 0x88A8/0x8100 double VLANs cannot be used with 0x8100 or 0x8100/0x8100 VLANS
+ configured on the same port. 0x88a8/0x8100 traffic will not be received if
+ 0x8100 VLANs are configured.
+
+- The VF can only transmit 0x88A8/0x8100 (i.e., 802.1ad/802.1Q) traffic if:
+
+ 1) The VF is not assigned a port VLAN.
+ 2) spoofchk is disabled from the PF. If you enable spoofchk, the VF will
+ not transmit 0x88A8/0x8100 traffic.
+
+- The VF may not receive all network traffic based on the Inner VLAN header
+ when VF true promiscuous mode (vf-true-promisc-support) and double VLANs are
+ enabled in SR-IOV mode.
+
+The following are examples of how to configure 802.1ad (QinQ)::
+
+ # ip link add link eth0 eth0.24 type vlan proto 802.1ad id 24
+ # ip link add link eth0.24 eth0.24.371 type vlan proto 802.1Q id 371
+
+ Where "24" and "371" are example VLAN IDs.
+
+
+Tunnel/Overlay Stateless Offloads
+---------------------------------
+Supported tunnels and overlays include VXLAN, GENEVE, and others depending on
+hardware and software configuration. Stateless offloads are enabled by default.
+
+To view the current state of all offloads::
+
+ # ethtool -k <ethX>
+
+
+UDP Segmentation Offload
+------------------------
+Allows the adapter to offload transmit segmentation of UDP packets with
+payloads up to 64K into valid Ethernet frames. Because the adapter hardware is
+able to complete data segmentation much faster than operating system software,
+this feature may improve transmission performance.
+In addition, the adapter may use fewer CPU resources.
+
+NOTE:
+
+- The application sending UDP packets must support UDP segmentation offload.
+
+To enable/disable UDP Segmentation Offload, issue the following command::
+
+ # ethtool -K <ethX> tx-udp-segmentation [off|on]
+
+
+Performance Optimization
+========================
+Driver defaults are meant to fit a wide variety of workloads, but if further
+optimization is required, we recommend experimenting with the following
+settings.
+
+
+Rx Descriptor Ring Size
+-----------------------
+To reduce the number of Rx packet discards, increase the number of Rx
+descriptors for each Rx ring using ethtool.
+
+ Check if the interface is dropping Rx packets due to buffers being full
+ (rx_dropped.nic can mean that there is no PCIe bandwidth)::
+
+ # ethtool -S <ethX> | grep "rx_dropped"
+
+ If the previous command shows drops on queues, it may help to increase
+ the number of descriptors using 'ethtool -G'::
+
+ # ethtool -G <ethX> rx <N>
+ Where <N> is the desired number of ring entries/descriptors
+
+ This can provide temporary buffering for issues that create latency while
+ the CPUs process descriptors.
+
+
+Interrupt Rate Limiting
+-----------------------
+This driver supports an adaptive interrupt throttle rate (ITR) mechanism that
+is tuned for general workloads. The user can customize the interrupt rate
+control for specific workloads, via ethtool, adjusting the number of
+microseconds between interrupts.
+
+To set the interrupt rate manually, you must disable adaptive mode::
+
+ # ethtool -C <ethX> adaptive-rx off adaptive-tx off
+
+For lower CPU utilization:
+
+ Disable adaptive ITR and lower Rx and Tx interrupts. The examples below
+ affect every queue of the specified interface.
+
+ Setting rx-usecs and tx-usecs to 80 will limit interrupts to about
+ 12,500 interrupts per second per queue::
+
+ # ethtool -C <ethX> adaptive-rx off adaptive-tx off rx-usecs 80 tx-usecs 80
+
+For reduced latency:
+
+ Disable adaptive ITR and ITR by setting rx-usecs and tx-usecs to 0
+ using ethtool::
+
+ # ethtool -C <ethX> adaptive-rx off adaptive-tx off rx-usecs 0 tx-usecs 0
+
+Per-queue interrupt rate settings:
+
+ The following examples are for queues 1 and 3, but you can adjust other
+ queues.
+
+ To disable Rx adaptive ITR and set static Rx ITR to 10 microseconds or
+ about 100,000 interrupts/second, for queues 1 and 3::
+
+ # ethtool --per-queue <ethX> queue_mask 0xa --coalesce adaptive-rx off
+ rx-usecs 10
+
+ To show the current coalesce settings for queues 1 and 3::
+
+ # ethtool --per-queue <ethX> queue_mask 0xa --show-coalesce
+
+Bounding interrupt rates using rx-usecs-high:
+
+ :Valid Range: 0-236 (0=no limit)
+
+ The range of 0-236 microseconds provides an effective range of 4,237 to
+ 250,000 interrupts per second. The value of rx-usecs-high can be set
+ independently of rx-usecs and tx-usecs in the same ethtool command, and is
+ also independent of the adaptive interrupt moderation algorithm. The
+ underlying hardware supports granularity in 4-microsecond intervals, so
+ adjacent values may result in the same interrupt rate.
+
+ The following command would disable adaptive interrupt moderation, and allow
+ a maximum of 5 microseconds before indicating a receive or transmit was
+ complete. However, instead of resulting in as many as 200,000 interrupts per
+ second, it limits total interrupts per second to 50,000 via the rx-usecs-high
+ parameter.
+
+ ::
+
+ # ethtool -C <ethX> adaptive-rx off adaptive-tx off rx-usecs-high 20
+ rx-usecs 5 tx-usecs 5
+
+
+Virtualized Environments
+------------------------
+In addition to the other suggestions in this section, the following may be
+helpful to optimize performance in VMs.
+
+ Using the appropriate mechanism (vcpupin) in the VM, pin the CPUs to
+ individual LCPUs, making sure to use a set of CPUs included in the
+ device's local_cpulist: ``/sys/class/net/<ethX>/device/local_cpulist``.
+
+ Configure as many Rx/Tx queues in the VM as available. (See the iavf driver
+ documentation for the number of queues supported.) For example::
+
+ # ethtool -L <virt_interface> rx <max> tx <max>
- -> Device Drivers
- -> Network device support (NETDEVICES [=y])
- -> Ethernet driver support
- -> Intel devices
- -> Intel(R) Ethernet Connection E800 Series Support
Support
=======
For general information, go to the Intel support website at:
-
https://www.intel.com/support/
or the Intel Wired Networking project hosted by Sourceforge at:
-
https://sourceforge.net/projects/e1000
If an issue is identified with the released source code on a supported kernel
with a supported adapter, email the specific information related to the issue
to e1000-devel@lists.sf.net.
+
+
+Trademarks
+==========
+Intel is a trademark or registered trademark of Intel Corporation or its
+subsidiaries in the United States and/or other countries.
+
+* Other names and brands may be claimed as the property of others.
diff --git a/Documentation/networking/device_drivers/ethernet/marvell/octeontx2.rst b/Documentation/networking/device_drivers/ethernet/marvell/octeontx2.rst
index 61e850460e18..dd5cd69467be 100644
--- a/Documentation/networking/device_drivers/ethernet/marvell/octeontx2.rst
+++ b/Documentation/networking/device_drivers/ethernet/marvell/octeontx2.rst
@@ -217,3 +217,73 @@ For example::
NPA_AF_ERR:
NPA Error Interrupt Reg : 4096
AQ Doorbell Error
+
+
+NIX Reporters
+-------------
+The NIX reporters are responsible for reporting and recovering the following group of errors:
+
+1. GENERAL events
+
+ - Receive mirror/multicast packet drop due to insufficient buffer.
+ - SMQ Flush operation.
+
+2. ERROR events
+
+ - Memory Fault due to WQE read/write from multicast/mirror buffer.
+ - Receive multicast/mirror replication list error.
+ - Receive packet on an unmapped PF.
+ - Fault due to NIX_AQ_INST_S read or NIX_AQ_RES_S write.
+ - AQ Doorbell Error.
+
+3. RAS events
+
+ - RAS Error Reporting for NIX Receive Multicast/Mirror Entry Structure.
+ - RAS Error Reporting for WQE/Packet Data read from Multicast/Mirror Buffer..
+ - RAS Error Reporting for NIX_AQ_INST_S/NIX_AQ_RES_S.
+
+4. RVU events
+
+ - Error due to unmapped slot.
+
+Sample Output::
+
+ ~# ./devlink health
+ pci/0002:01:00.0:
+ reporter hw_npa_intr
+ state healthy error 0 recover 0 grace_period 0 auto_recover true auto_dump true
+ reporter hw_npa_gen
+ state healthy error 0 recover 0 grace_period 0 auto_recover true auto_dump true
+ reporter hw_npa_err
+ state healthy error 0 recover 0 grace_period 0 auto_recover true auto_dump true
+ reporter hw_npa_ras
+ state healthy error 0 recover 0 grace_period 0 auto_recover true auto_dump true
+ reporter hw_nix_intr
+ state healthy error 1121 recover 1121 last_dump_date 2021-01-19 last_dump_time 05:42:26 grace_period 0 auto_recover true auto_dump true
+ reporter hw_nix_gen
+ state healthy error 949 recover 949 last_dump_date 2021-01-19 last_dump_time 05:42:43 grace_period 0 auto_recover true auto_dump true
+ reporter hw_nix_err
+ state healthy error 1147 recover 1147 last_dump_date 2021-01-19 last_dump_time 05:42:59 grace_period 0 auto_recover true auto_dump true
+ reporter hw_nix_ras
+ state healthy error 409 recover 409 last_dump_date 2021-01-19 last_dump_time 05:43:16 grace_period 0 auto_recover true auto_dump true
+
+Each reporter dumps the
+
+ - Error Type
+ - Error Register value
+ - Reason in words
+
+For example::
+
+ ~# devlink health dump show pci/0002:01:00.0 reporter hw_nix_intr
+ NIX_AF_RVU:
+ NIX RVU Interrupt Reg : 1
+ Unmap Slot Error
+ ~# devlink health dump show pci/0002:01:00.0 reporter hw_nix_gen
+ NIX_AF_GENERAL:
+ NIX General Interrupt Reg : 1
+ Rx multicast pkt drop
+ ~# devlink health dump show pci/0002:01:00.0 reporter hw_nix_err
+ NIX_AF_ERR:
+ NIX Error Interrupt Reg : 64
+ Rx on unmapped PF_FUNC
diff --git a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5.rst b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5.rst
index e9b65035cd47..1b7e32d8a61b 100644
--- a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5.rst
+++ b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5.rst
@@ -12,11 +12,13 @@ Contents
- `Enabling the driver and kconfig options`_
- `Devlink info`_
- `Devlink parameters`_
+- `mlx5 subfunction`_
+- `mlx5 function attributes`_
- `Devlink health reporters`_
- `mlx5 tracepoints`_
Enabling the driver and kconfig options
-================================================
+=======================================
| mlx5 core is modular and most of the major mlx5 core driver features can be selected (compiled in/out)
| at build time via kernel Kconfig flags.
@@ -97,6 +99,11 @@ Enabling the driver and kconfig options
| Provides low-level InfiniBand/RDMA and `RoCE <https://community.mellanox.com/s/article/recommended-network-configuration-examples-for-roce-deployment>`_ support.
+**CONFIG_MLX5_SF=(y/n)**
+
+| Build support for subfunction.
+| Subfunctons are more light weight than PCI SRIOV VFs. Choosing this option
+| will enable support for creating subfunction devices.
**External options** ( Choose if the corresponding mlx5 feature is required )
@@ -176,6 +183,214 @@ User command examples:
values:
cmode driverinit value true
+mlx5 subfunction
+================
+mlx5 supports subfunction management using devlink port (see :ref:`Documentation/networking/devlink/devlink-port.rst <devlink_port>`) interface.
+
+A Subfunction has its own function capabilities and its own resources. This
+means a subfunction has its own dedicated queues (txq, rxq, cq, eq). These
+queues are neither shared nor stolen from the parent PCI function.
+
+When a subfunction is RDMA capable, it has its own QP1, GID table and rdma
+resources neither shared nor stolen from the parent PCI function.
+
+A subfunction has a dedicated window in PCI BAR space that is not shared
+with ther other subfunctions or the parent PCI function. This ensures that all
+devices (netdev, rdma, vdpa etc.) of the subfunction accesses only assigned
+PCI BAR space.
+
+A Subfunction supports eswitch representation through which it supports tc
+offloads. The user configures eswitch to send/receive packets from/to
+the subfunction port.
+
+Subfunctions share PCI level resources such as PCI MSI-X IRQs with
+other subfunctions and/or with its parent PCI function.
+
+Example mlx5 software, system and device view::
+
+ _______
+ | admin |
+ | user |----------
+ |_______| |
+ | |
+ ____|____ __|______ _________________
+ | | | | | |
+ | devlink | | tc tool | | user |
+ | tool | |_________| | applications |
+ |_________| | |_________________|
+ | | | |
+ | | | | Userspace
+ +---------|-------------|-------------------|----------|--------------------+
+ | | +----------+ +----------+ Kernel
+ | | | netdev | | rdma dev |
+ | | +----------+ +----------+
+ (devlink port add/del | ^ ^
+ port function set) | | |
+ | | +---------------|
+ _____|___ | | _______|_______
+ | | | | | mlx5 class |
+ | devlink | +------------+ | | drivers |
+ | kernel | | rep netdev | | |(mlx5_core,ib) |
+ |_________| +------------+ | |_______________|
+ | | | ^
+ (devlink ops) | | (probe/remove)
+ _________|________ | | ____|________
+ | subfunction | | +---------------+ | subfunction |
+ | management driver|----- | subfunction |---| driver |
+ | (mlx5_core) | | auxiliary dev | | (mlx5_core) |
+ |__________________| +---------------+ |_____________|
+ | ^
+ (sf add/del, vhca events) |
+ | (device add/del)
+ _____|____ ____|________
+ | | | subfunction |
+ | PCI NIC |---- activate/deactive events---->| host driver |
+ |__________| | (mlx5_core) |
+ |_____________|
+
+Subfunction is created using devlink port interface.
+
+- Change device to switchdev mode::
+
+ $ devlink dev eswitch set pci/0000:06:00.0 mode switchdev
+
+- Add a devlink port of subfunction flaovur::
+
+ $ devlink port add pci/0000:06:00.0 flavour pcisf pfnum 0 sfnum 88
+ pci/0000:06:00.0/32768: type eth netdev eth6 flavour pcisf controller 0 pfnum 0 sfnum 88 external false splittable false
+ function:
+ hw_addr 00:00:00:00:00:00 state inactive opstate detached
+
+- Show a devlink port of the subfunction::
+
+ $ devlink port show pci/0000:06:00.0/32768
+ pci/0000:06:00.0/32768: type eth netdev enp6s0pf0sf88 flavour pcisf pfnum 0 sfnum 88
+ function:
+ hw_addr 00:00:00:00:00:00 state inactive opstate detached
+
+- Delete a devlink port of subfunction after use::
+
+ $ devlink port del pci/0000:06:00.0/32768
+
+mlx5 function attributes
+========================
+The mlx5 driver provides a mechanism to setup PCI VF/SF function attributes in
+a unified way for SmartNIC and non-SmartNIC.
+
+This is supported only when the eswitch mode is set to switchdev. Port function
+configuration of the PCI VF/SF is supported through devlink eswitch port.
+
+Port function attributes should be set before PCI VF/SF is enumerated by the
+driver.
+
+MAC address setup
+-----------------
+mlx5 driver provides mechanism to setup the MAC address of the PCI VF/SF.
+
+The configured MAC address of the PCI VF/SF will be used by netdevice and rdma
+device created for the PCI VF/SF.
+
+- Get the MAC address of the VF identified by its unique devlink port index::
+
+ $ devlink port show pci/0000:06:00.0/2
+ pci/0000:06:00.0/2: type eth netdev enp6s0pf0vf1 flavour pcivf pfnum 0 vfnum 1
+ function:
+ hw_addr 00:00:00:00:00:00
+
+- Set the MAC address of the VF identified by its unique devlink port index::
+
+ $ devlink port function set pci/0000:06:00.0/2 hw_addr 00:11:22:33:44:55
+
+ $ devlink port show pci/0000:06:00.0/2
+ pci/0000:06:00.0/2: type eth netdev enp6s0pf0vf1 flavour pcivf pfnum 0 vfnum 1
+ function:
+ hw_addr 00:11:22:33:44:55
+
+- Get the MAC address of the SF identified by its unique devlink port index::
+
+ $ devlink port show pci/0000:06:00.0/32768
+ pci/0000:06:00.0/32768: type eth netdev enp6s0pf0sf88 flavour pcisf pfnum 0 sfnum 88
+ function:
+ hw_addr 00:00:00:00:00:00
+
+- Set the MAC address of the VF identified by its unique devlink port index::
+
+ $ devlink port function set pci/0000:06:00.0/32768 hw_addr 00:00:00:00:88:88
+
+ $ devlink port show pci/0000:06:00.0/32768
+ pci/0000:06:00.0/32768: type eth netdev enp6s0pf0sf88 flavour pcivf pfnum 0 sfnum 88
+ function:
+ hw_addr 00:00:00:00:88:88
+
+SF state setup
+--------------
+To use the SF, the user must active the SF using the SF function state
+attribute.
+
+- Get the state of the SF identified by its unique devlink port index::
+
+ $ devlink port show ens2f0npf0sf88
+ pci/0000:06:00.0/32768: type eth netdev ens2f0npf0sf88 flavour pcisf controller 0 pfnum 0 sfnum 88 external false splittable false
+ function:
+ hw_addr 00:00:00:00:88:88 state inactive opstate detached
+
+- Activate the function and verify its state is active::
+
+ $ devlink port function set ens2f0npf0sf88 state active
+
+ $ devlink port show ens2f0npf0sf88
+ pci/0000:06:00.0/32768: type eth netdev ens2f0npf0sf88 flavour pcisf controller 0 pfnum 0 sfnum 88 external false splittable false
+ function:
+ hw_addr 00:00:00:00:88:88 state active opstate detached
+
+Upon function activation, the PF driver instance gets the event from the device
+that a particular SF was activated. It's the cue to put the device on bus, probe
+it and instantiate the devlink instance and class specific auxiliary devices
+for it.
+
+- Show the auxiliary device and port of the subfunction::
+
+ $ devlink dev show
+ devlink dev show auxiliary/mlx5_core.sf.4
+
+ $ devlink port show auxiliary/mlx5_core.sf.4/1
+ auxiliary/mlx5_core.sf.4/1: type eth netdev p0sf88 flavour virtual port 0 splittable false
+
+ $ rdma link show mlx5_0/1
+ link mlx5_0/1 state ACTIVE physical_state LINK_UP netdev p0sf88
+
+ $ rdma dev show
+ 8: rocep6s0f1: node_type ca fw 16.29.0550 node_guid 248a:0703:00b3:d113 sys_image_guid 248a:0703:00b3:d112
+ 13: mlx5_0: node_type ca fw 16.29.0550 node_guid 0000:00ff:fe00:8888 sys_image_guid 248a:0703:00b3:d112
+
+- Subfunction auxiliary device and class device hierarchy::
+
+ mlx5_core.sf.4
+ (subfunction auxiliary device)
+ /\
+ / \
+ / \
+ / \
+ / \
+ mlx5_core.eth.4 mlx5_core.rdma.4
+ (sf eth aux dev) (sf rdma aux dev)
+ | |
+ | |
+ p0sf88 mlx5_0
+ (sf netdev) (sf rdma device)
+
+Additionally, the SF port also gets the event when the driver attaches to the
+auxiliary device of the subfunction. This results in changing the operational
+state of the function. This provides visiblity to the user to decide when is it
+safe to delete the SF port for graceful termination of the subfunction.
+
+- Show the SF port operational state::
+
+ $ devlink port show ens2f0npf0sf88
+ pci/0000:06:00.0/32768: type eth netdev ens2f0npf0sf88 flavour pcisf controller 0 pfnum 0 sfnum 88 external false splittable false
+ function:
+ hw_addr 00:00:00:00:88:88 state active opstate attached
+
Devlink health reporters
========================
diff --git a/Documentation/networking/device_drivers/ethernet/ti/am65_nuss_cpsw_switchdev.rst b/Documentation/networking/device_drivers/ethernet/ti/am65_nuss_cpsw_switchdev.rst
new file mode 100644
index 000000000000..f24adfab6a1b
--- /dev/null
+++ b/Documentation/networking/device_drivers/ethernet/ti/am65_nuss_cpsw_switchdev.rst
@@ -0,0 +1,143 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===================================================================
+Texas Instruments K3 AM65 CPSW NUSS switchdev based ethernet driver
+===================================================================
+
+:Version: 1.0
+
+Port renaming
+=============
+
+In order to rename via udev::
+
+ ip -d link show dev sw0p1 | grep switchid
+
+ SUBSYSTEM=="net", ACTION=="add", ATTR{phys_switch_id}==<switchid>, \
+ ATTR{phys_port_name}!="", NAME="sw0$attr{phys_port_name}"
+
+
+Multi mac mode
+==============
+
+- The driver is operating in multi-mac mode by default, thus
+ working as N individual network interfaces.
+
+Devlink configuration parameters
+================================
+
+See Documentation/networking/devlink/am65-nuss-cpsw-switch.rst
+
+Enabling "switch"
+=================
+
+The Switch mode can be enabled by configuring devlink driver parameter
+"switch_mode" to 1/true::
+
+ devlink dev param set platform/c000000.ethernet \
+ name switch_mode value true cmode runtime
+
+This can be done regardless of the state of Port's netdev devices - UP/DOWN, but
+Port's netdev devices have to be in UP before joining to the bridge to avoid
+overwriting of bridge configuration as CPSW switch driver completely reloads its
+configuration when first port changes its state to UP.
+
+When the both interfaces joined the bridge - CPSW switch driver will enable
+marking packets with offload_fwd_mark flag.
+
+All configuration is implemented via switchdev API.
+
+Bridge setup
+============
+
+::
+
+ devlink dev param set platform/c000000.ethernet \
+ name switch_mode value true cmode runtime
+
+ ip link add name br0 type bridge
+ ip link set dev br0 type bridge ageing_time 1000
+ ip link set dev sw0p1 up
+ ip link set dev sw0p2 up
+ ip link set dev sw0p1 master br0
+ ip link set dev sw0p2 master br0
+
+ [*] bridge vlan add dev br0 vid 1 pvid untagged self
+
+ [*] if vlan_filtering=1. where default_pvid=1
+
+ Note. Steps [*] are mandatory.
+
+
+On/off STP
+==========
+
+::
+
+ ip link set dev BRDEV type bridge stp_state 1/0
+
+VLAN configuration
+==================
+
+::
+
+ bridge vlan add dev br0 vid 1 pvid untagged self <---- add cpu port to VLAN 1
+
+Note. This step is mandatory for bridge/default_pvid.
+
+Add extra VLANs
+===============
+
+ 1. untagged::
+
+ bridge vlan add dev sw0p1 vid 100 pvid untagged master
+ bridge vlan add dev sw0p2 vid 100 pvid untagged master
+ bridge vlan add dev br0 vid 100 pvid untagged self <---- Add cpu port to VLAN100
+
+ 2. tagged::
+
+ bridge vlan add dev sw0p1 vid 100 master
+ bridge vlan add dev sw0p2 vid 100 master
+ bridge vlan add dev br0 vid 100 pvid tagged self <---- Add cpu port to VLAN100
+
+FDBs
+----
+
+FDBs are automatically added on the appropriate switch port upon detection
+
+Manually adding FDBs::
+
+ bridge fdb add aa:bb:cc:dd:ee:ff dev sw0p1 master vlan 100
+ bridge fdb add aa:bb:cc:dd:ee:fe dev sw0p2 master <---- Add on all VLANs
+
+MDBs
+----
+
+MDBs are automatically added on the appropriate switch port upon detection
+
+Manually adding MDBs::
+
+ bridge mdb add dev br0 port sw0p1 grp 239.1.1.1 permanent vid 100
+ bridge mdb add dev br0 port sw0p1 grp 239.1.1.1 permanent <---- Add on all VLANs
+
+Multicast flooding
+==================
+CPU port mcast_flooding is always on
+
+Turning flooding on/off on swithch ports:
+bridge link set dev sw0p1 mcast_flood on/off
+
+Access and Trunk port
+=====================
+
+::
+
+ bridge vlan add dev sw0p1 vid 100 pvid untagged master
+ bridge vlan add dev sw0p2 vid 100 master
+
+
+ bridge vlan add dev br0 vid 100 self
+ ip link add link br0 name br0.100 type vlan id 100
+
+Note. Setting PVID on Bridge device itself works only for
+default VLAN (default_pvid).
diff --git a/Documentation/networking/device_drivers/index.rst b/Documentation/networking/device_drivers/index.rst
index a3113ffd7a16..d8279de7bf25 100644
--- a/Documentation/networking/device_drivers/index.rst
+++ b/Documentation/networking/device_drivers/index.rst
@@ -15,6 +15,7 @@ Contents:
ethernet/index
fddi/index
hamradio/index
+ qlogic/index
wan/index
wifi/index
diff --git a/Documentation/networking/device_drivers/qlogic/index.rst b/Documentation/networking/device_drivers/qlogic/index.rst
new file mode 100644
index 000000000000..ad05b04286e4
--- /dev/null
+++ b/Documentation/networking/device_drivers/qlogic/index.rst
@@ -0,0 +1,18 @@
+.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+
+QLogic QLGE Device Drivers
+===============================================
+
+Contents:
+
+.. toctree::
+ :maxdepth: 2
+
+ qlge
+
+.. only:: subproject and html
+
+ Indices
+ =======
+
+ * :ref:`genindex`
diff --git a/Documentation/networking/device_drivers/qlogic/qlge.rst b/Documentation/networking/device_drivers/qlogic/qlge.rst
new file mode 100644
index 000000000000..0b888253d152
--- /dev/null
+++ b/Documentation/networking/device_drivers/qlogic/qlge.rst
@@ -0,0 +1,118 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=======================================
+QLogic QLGE 10Gb Ethernet device driver
+=======================================
+
+This driver use drgn and devlink for debugging.
+
+Dump kernel data structures in drgn
+-----------------------------------
+
+To dump kernel data structures, the following Python script can be used
+in drgn:
+
+.. code-block:: python
+
+ def align(x, a):
+ """the alignment a should be a power of 2
+ """
+ mask = a - 1
+ return (x+ mask) & ~mask
+
+ def struct_size(struct_type):
+ struct_str = "struct {}".format(struct_type)
+ return sizeof(Object(prog, struct_str, address=0x0))
+
+ def netdev_priv(netdevice):
+ NETDEV_ALIGN = 32
+ return netdevice.value_() + align(struct_size("net_device"), NETDEV_ALIGN)
+
+ name = 'xxx'
+ qlge_device = None
+ netdevices = prog['init_net'].dev_base_head.address_of_()
+ for netdevice in list_for_each_entry("struct net_device", netdevices, "dev_list"):
+ if netdevice.name.string_().decode('ascii') == name:
+ print(netdevice.name)
+
+ ql_adapter = Object(prog, "struct ql_adapter", address=netdev_priv(qlge_device))
+
+The struct ql_adapter will be printed in drgn as follows,
+
+ >>> ql_adapter
+ (struct ql_adapter){
+ .ricb = (struct ricb){
+ .base_cq = (u8)0,
+ .flags = (u8)120,
+ .mask = (__le16)26637,
+ .hash_cq_id = (u8 [1024]){ 172, 142, 255, 255 },
+ .ipv6_hash_key = (__le32 [10]){},
+ .ipv4_hash_key = (__le32 [4]){},
+ },
+ .flags = (unsigned long)0,
+ .wol = (u32)0,
+ .nic_stats = (struct nic_stats){
+ .tx_pkts = (u64)0,
+ .tx_bytes = (u64)0,
+ .tx_mcast_pkts = (u64)0,
+ .tx_bcast_pkts = (u64)0,
+ .tx_ucast_pkts = (u64)0,
+ .tx_ctl_pkts = (u64)0,
+ .tx_pause_pkts = (u64)0,
+ ...
+ },
+ .active_vlans = (unsigned long [64]){
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 52780853100545, 18446744073709551615,
+ 18446619461681283072, 0, 42949673024, 2147483647,
+ },
+ .rx_ring = (struct rx_ring [17]){
+ {
+ .cqicb = (struct cqicb){
+ .msix_vect = (u8)0,
+ .reserved1 = (u8)0,
+ .reserved2 = (u8)0,
+ .flags = (u8)0,
+ .len = (__le16)0,
+ .rid = (__le16)0,
+ ...
+ },
+ .cq_base = (void *)0x0,
+ .cq_base_dma = (dma_addr_t)0,
+ }
+ ...
+ }
+ }
+
+coredump via devlink
+--------------------
+
+
+And the coredump obtained via devlink in json format looks like,
+
+.. code:: shell
+
+ $ devlink health dump show DEVICE reporter coredump -p -j
+ {
+ "Core Registers": {
+ "segment": 1,
+ "values": [ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 ]
+ },
+ "Test Logic Regs": {
+ "segment": 2,
+ "values": [ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 ]
+ },
+ "RMII Registers": {
+ "segment": 3,
+ "values": [ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 ]
+ },
+ ...
+ "Sem Registers": {
+ "segment": 50,
+ "values": [ 0,0,0,0 ]
+ }
+ }
+
+When the module parameter qlge_force_coredump is set to be true, the MPI
+RISC reset before coredumping. So coredumping will much longer since
+devlink tool has to wait for 5 secs for the resetting to be
+finished.
diff --git a/Documentation/networking/devlink/am65-nuss-cpsw-switch.rst b/Documentation/networking/devlink/am65-nuss-cpsw-switch.rst
new file mode 100644
index 000000000000..1e589c26abff
--- /dev/null
+++ b/Documentation/networking/devlink/am65-nuss-cpsw-switch.rst
@@ -0,0 +1,26 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==============================
+am65-cpsw-nuss devlink support
+==============================
+
+This document describes the devlink features implemented by the ``am65-cpsw-nuss``
+device driver.
+
+Parameters
+==========
+
+The ``am65-cpsw-nuss`` driver implements the following driver-specific
+parameters.
+
+.. list-table:: Driver-specific parameters implemented
+ :widths: 5 5 5 85
+
+ * - Name
+ - Type
+ - Mode
+ - Description
+ * - ``switch_mode``
+ - Boolean
+ - runtime
+ - Enable switch mode
diff --git a/Documentation/networking/devlink/devlink-port.rst b/Documentation/networking/devlink/devlink-port.rst
new file mode 100644
index 000000000000..e99b41599465
--- /dev/null
+++ b/Documentation/networking/devlink/devlink-port.rst
@@ -0,0 +1,199 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. _devlink_port:
+
+============
+Devlink Port
+============
+
+``devlink-port`` is a port that exists on the device. It has a logically
+separate ingress/egress point of the device. A devlink port can be any one
+of many flavours. A devlink port flavour along with port attributes
+describe what a port represents.
+
+A device driver that intends to publish a devlink port sets the
+devlink port attributes and registers the devlink port.
+
+Devlink port flavours are described below.
+
+.. list-table:: List of devlink port flavours
+ :widths: 33 90
+
+ * - Flavour
+ - Description
+ * - ``DEVLINK_PORT_FLAVOUR_PHYSICAL``
+ - Any kind of physical port. This can be an eswitch physical port or any
+ other physical port on the device.
+ * - ``DEVLINK_PORT_FLAVOUR_DSA``
+ - This indicates a DSA interconnect port.
+ * - ``DEVLINK_PORT_FLAVOUR_CPU``
+ - This indicates a CPU port applicable only to DSA.
+ * - ``DEVLINK_PORT_FLAVOUR_PCI_PF``
+ - This indicates an eswitch port representing a port of PCI
+ physical function (PF).
+ * - ``DEVLINK_PORT_FLAVOUR_PCI_VF``
+ - This indicates an eswitch port representing a port of PCI
+ virtual function (VF).
+ * - ``DEVLINK_PORT_FLAVOUR_PCI_SF``
+ - This indicates an eswitch port representing a port of PCI
+ subfunction (SF).
+ * - ``DEVLINK_PORT_FLAVOUR_VIRTUAL``
+ - This indicates a virtual port for the PCI virtual function.
+
+Devlink port can have a different type based on the link layer described below.
+
+.. list-table:: List of devlink port types
+ :widths: 23 90
+
+ * - Type
+ - Description
+ * - ``DEVLINK_PORT_TYPE_ETH``
+ - Driver should set this port type when a link layer of the port is
+ Ethernet.
+ * - ``DEVLINK_PORT_TYPE_IB``
+ - Driver should set this port type when a link layer of the port is
+ InfiniBand.
+ * - ``DEVLINK_PORT_TYPE_AUTO``
+ - This type is indicated by the user when driver should detect the port
+ type automatically.
+
+PCI controllers
+---------------
+In most cases a PCI device has only one controller. A controller consists of
+potentially multiple physical, virtual functions and subfunctions. A function
+consists of one or more ports. This port is represented by the devlink eswitch
+port.
+
+A PCI device connected to multiple CPUs or multiple PCI root complexes or a
+SmartNIC, however, may have multiple controllers. For a device with multiple
+controllers, each controller is distinguished by a unique controller number.
+An eswitch is on the PCI device which supports ports of multiple controllers.
+
+An example view of a system with two controllers::
+
+ ---------------------------------------------------------
+ | |
+ | --------- --------- ------- ------- |
+ ----------- | | vf(s) | | sf(s) | |vf(s)| |sf(s)| |
+ | server | | ------- ----/---- ---/----- ------- ---/--- ---/--- |
+ | pci rc |=== | pf0 |______/________/ | pf1 |___/_______/ |
+ | connect | | ------- ------- |
+ ----------- | | controller_num=1 (no eswitch) |
+ ------|--------------------------------------------------
+ (internal wire)
+ |
+ ---------------------------------------------------------
+ | devlink eswitch ports and reps |
+ | ----------------------------------------------------- |
+ | |ctrl-0 | ctrl-0 | ctrl-0 | ctrl-0 | ctrl-0 |ctrl-0 | |
+ | |pf0 | pf0vfN | pf0sfN | pf1 | pf1vfN |pf1sfN | |
+ | ----------------------------------------------------- |
+ | |ctrl-1 | ctrl-1 | ctrl-1 | ctrl-1 | ctrl-1 |ctrl-1 | |
+ | |pf0 | pf0vfN | pf0sfN | pf1 | pf1vfN |pf1sfN | |
+ | ----------------------------------------------------- |
+ | |
+ | |
+ ----------- | --------- --------- ------- ------- |
+ | smartNIC| | | vf(s) | | sf(s) | |vf(s)| |sf(s)| |
+ | pci rc |==| ------- ----/---- ---/----- ------- ---/--- ---/--- |
+ | connect | | | pf0 |______/________/ | pf1 |___/_______/ |
+ ----------- | ------- ------- |
+ | |
+ | local controller_num=0 (eswitch) |
+ ---------------------------------------------------------
+
+In the above example, the external controller (identified by controller number = 1)
+doesn't have the eswitch. Local controller (identified by controller number = 0)
+has the eswitch. The Devlink instance on the local controller has eswitch
+devlink ports for both the controllers.
+
+Function configuration
+======================
+
+A user can configure the function attribute before enumerating the PCI
+function. Usually it means, user should configure function attribute
+before a bus specific device for the function is created. However, when
+SRIOV is enabled, virtual function devices are created on the PCI bus.
+Hence, function attribute should be configured before binding virtual
+function device to the driver. For subfunctions, this means user should
+configure port function attribute before activating the port function.
+
+A user may set the hardware address of the function using
+'devlink port function set hw_addr' command. For Ethernet port function
+this means a MAC address.
+
+Subfunction
+============
+
+Subfunction is a lightweight function that has a parent PCI function on which
+it is deployed. Subfunction is created and deployed in unit of 1. Unlike
+SRIOV VFs, a subfunction doesn't require its own PCI virtual function.
+A subfunction communicates with the hardware through the parent PCI function.
+
+To use a subfunction, 3 steps setup sequence is followed.
+(1) create - create a subfunction;
+(2) configure - configure subfunction attributes;
+(3) deploy - deploy the subfunction;
+
+Subfunction management is done using devlink port user interface.
+User performs setup on the subfunction management device.
+
+(1) Create
+----------
+A subfunction is created using a devlink port interface. A user adds the
+subfunction by adding a devlink port of subfunction flavour. The devlink
+kernel code calls down to subfunction management driver (devlink ops) and asks
+it to create a subfunction devlink port. Driver then instantiates the
+subfunction port and any associated objects such as health reporters and
+representor netdevice.
+
+(2) Configure
+-------------
+A subfunction devlink port is created but it is not active yet. That means the
+entities are created on devlink side, the e-switch port representor is created,
+but the subfunction device itself it not created. A user might use e-switch port
+representor to do settings, putting it into bridge, adding TC rules, etc. A user
+might as well configure the hardware address (such as MAC address) of the
+subfunction while subfunction is inactive.
+
+(3) Deploy
+----------
+Once a subfunction is configured, user must activate it to use it. Upon
+activation, subfunction management driver asks the subfunction management
+device to instantiate the subfunction device on particular PCI function.
+A subfunction device is created on the :ref:`Documentation/driver-api/auxiliary_bus.rst <auxiliary_bus>`.
+At this point a matching subfunction driver binds to the subfunction's auxiliary device.
+
+Terms and Definitions
+=====================
+
+.. list-table:: Terms and Definitions
+ :widths: 22 90
+
+ * - Term
+ - Definitions
+ * - ``PCI device``
+ - A physical PCI device having one or more PCI bus consists of one or
+ more PCI controllers.
+ * - ``PCI controller``
+ - A controller consists of potentially multiple physical functions,
+ virtual functions and subfunctions.
+ * - ``Port function``
+ - An object to manage the function of a port.
+ * - ``Subfunction``
+ - A lightweight function that has parent PCI function on which it is
+ deployed.
+ * - ``Subfunction device``
+ - A bus device of the subfunction, usually on a auxiliary bus.
+ * - ``Subfunction driver``
+ - A device driver for the subfunction auxiliary device.
+ * - ``Subfunction management device``
+ - A PCI physical function that supports subfunction management.
+ * - ``Subfunction management driver``
+ - A device driver for PCI physical function that supports
+ subfunction management using devlink port interface.
+ * - ``Subfunction host driver``
+ - A device driver for PCI physical function that hosts subfunction
+ devices. In most cases it is same as subfunction management driver. When
+ subfunction is used on external controller, subfunction management and
+ host drivers are different.
diff --git a/Documentation/networking/devlink/devlink-resource.rst b/Documentation/networking/devlink/devlink-resource.rst
index 93e92d2f0752..3d5ae51e65a2 100644
--- a/Documentation/networking/devlink/devlink-resource.rst
+++ b/Documentation/networking/devlink/devlink-resource.rst
@@ -23,6 +23,20 @@ current size and related sub resources. To access a sub resource, you
specify the path of the resource. For example ``/IPv4/fib`` is the id for
the ``fib`` sub-resource under the ``IPv4`` resource.
+Generic Resources
+=================
+
+Generic resources are used to describe resources that can be shared by multiple
+device drivers and their description must be added to the following table:
+
+.. list-table:: List of Generic Resources
+ :widths: 10 90
+
+ * - Name
+ - Description
+ * - ``physical_ports``
+ - A limited capacity of physical ports that the switch ASIC can support
+
example usage
-------------
diff --git a/Documentation/networking/devlink/devlink-trap.rst b/Documentation/networking/devlink/devlink-trap.rst
index d875f3e1e9cf..935b6397e8cf 100644
--- a/Documentation/networking/devlink/devlink-trap.rst
+++ b/Documentation/networking/devlink/devlink-trap.rst
@@ -480,6 +480,11 @@ be added to the following table:
- ``drop``
- Traps packets that the device decided to drop in case they hit a
blackhole nexthop
+ * - ``dmac_filter``
+ - ``drop``
+ - Traps incoming packets that the device decided to drop because
+ the destination MAC is not configured in the MAC table and
+ the interface is not in promiscuous mode
Driver-specific Packet Traps
============================
diff --git a/Documentation/networking/devlink/index.rst b/Documentation/networking/devlink/index.rst
index d82874760ae2..8428a1220723 100644
--- a/Documentation/networking/devlink/index.rst
+++ b/Documentation/networking/devlink/index.rst
@@ -18,6 +18,7 @@ general.
devlink-info
devlink-flash
devlink-params
+ devlink-port
devlink-region
devlink-resource
devlink-reload
@@ -44,3 +45,4 @@ parameters, info versions, and other features it supports.
sja1105
qed
ti-cpsw-switch
+ am65-nuss-cpsw-switch
diff --git a/Documentation/networking/dsa/dsa.rst b/Documentation/networking/dsa/dsa.rst
index a8d15dd2b42b..e9517af5fe02 100644
--- a/Documentation/networking/dsa/dsa.rst
+++ b/Documentation/networking/dsa/dsa.rst
@@ -273,10 +273,6 @@ will not make us go through the switch tagging protocol transmit function, so
the Ethernet switch on the other end, expecting a tag will typically drop this
frame.
-Slave network devices check that the master network device is UP before allowing
-you to administratively bring UP these slave network devices. A common
-configuration mistake is forgetting to bring UP the master network device first.
-
Interactions with other subsystems
==================================
diff --git a/Documentation/networking/ethtool-netlink.rst b/Documentation/networking/ethtool-netlink.rst
index 30b98245979f..05073482db05 100644
--- a/Documentation/networking/ethtool-netlink.rst
+++ b/Documentation/networking/ethtool-netlink.rst
@@ -431,16 +431,17 @@ Request contents:
``ETHTOOL_A_LINKMODES_SPEED`` u32 link speed (Mb/s)
``ETHTOOL_A_LINKMODES_DUPLEX`` u8 duplex mode
``ETHTOOL_A_LINKMODES_MASTER_SLAVE_CFG`` u8 Master/slave port mode
+ ``ETHTOOL_A_LINKMODES_LANES`` u32 lanes
========================================== ====== ==========================
``ETHTOOL_A_LINKMODES_OURS`` bit set allows setting advertised link modes. If
autonegotiation is on (either set now or kept from before), advertised modes
are not changed (no ``ETHTOOL_A_LINKMODES_OURS`` attribute) and at least one
-of speed and duplex is specified, kernel adjusts advertised modes to all
-supported modes matching speed, duplex or both (whatever is specified). This
-autoselection is done on ethtool side with ioctl interface, netlink interface
-is supposed to allow requesting changes without knowing what exactly kernel
-supports.
+of speed, duplex and lanes is specified, kernel adjusts advertised modes to all
+supported modes matching speed, duplex, lanes or all (whatever is specified).
+This autoselection is done on ethtool side with ioctl interface, netlink
+interface is supposed to allow requesting changes without knowing what exactly
+kernel supports.
LINKSTATE_GET
diff --git a/Documentation/networking/filter.rst b/Documentation/networking/filter.rst
index debb59e374de..251c6bd73d15 100644
--- a/Documentation/networking/filter.rst
+++ b/Documentation/networking/filter.rst
@@ -1006,13 +1006,13 @@ Size modifier is one of ...
Mode modifier is one of::
- BPF_IMM 0x00 /* used for 32-bit mov in classic BPF and 64-bit in eBPF */
- BPF_ABS 0x20
- BPF_IND 0x40
- BPF_MEM 0x60
- BPF_LEN 0x80 /* classic BPF only, reserved in eBPF */
- BPF_MSH 0xa0 /* classic BPF only, reserved in eBPF */
- BPF_XADD 0xc0 /* eBPF only, exclusive add */
+ BPF_IMM 0x00 /* used for 32-bit mov in classic BPF and 64-bit in eBPF */
+ BPF_ABS 0x20
+ BPF_IND 0x40
+ BPF_MEM 0x60
+ BPF_LEN 0x80 /* classic BPF only, reserved in eBPF */
+ BPF_MSH 0xa0 /* classic BPF only, reserved in eBPF */
+ BPF_ATOMIC 0xc0 /* eBPF only, atomic operations */
eBPF has two non-generic instructions: (BPF_ABS | <size> | BPF_LD) and
(BPF_IND | <size> | BPF_LD) which are used to access packet data.
@@ -1044,16 +1044,57 @@ Unlike classic BPF instruction set, eBPF has generic load/store operations::
BPF_MEM | <size> | BPF_STX: *(size *) (dst_reg + off) = src_reg
BPF_MEM | <size> | BPF_ST: *(size *) (dst_reg + off) = imm32
BPF_MEM | <size> | BPF_LDX: dst_reg = *(size *) (src_reg + off)
- BPF_XADD | BPF_W | BPF_STX: lock xadd *(u32 *)(dst_reg + off16) += src_reg
- BPF_XADD | BPF_DW | BPF_STX: lock xadd *(u64 *)(dst_reg + off16) += src_reg
-Where size is one of: BPF_B or BPF_H or BPF_W or BPF_DW. Note that 1 and
-2 byte atomic increments are not supported.
+Where size is one of: BPF_B or BPF_H or BPF_W or BPF_DW.
-eBPF has one 16-byte instruction: BPF_LD | BPF_DW | BPF_IMM which consists
+It also includes atomic operations, which use the immediate field for extra
+encoding::
+
+ .imm = BPF_ADD, .code = BPF_ATOMIC | BPF_W | BPF_STX: lock xadd *(u32 *)(dst_reg + off16) += src_reg
+ .imm = BPF_ADD, .code = BPF_ATOMIC | BPF_DW | BPF_STX: lock xadd *(u64 *)(dst_reg + off16) += src_reg
+
+The basic atomic operations supported are::
+
+ BPF_ADD
+ BPF_AND
+ BPF_OR
+ BPF_XOR
+
+Each having equivalent semantics with the ``BPF_ADD`` example, that is: the
+memory location addresed by ``dst_reg + off`` is atomically modified, with
+``src_reg`` as the other operand. If the ``BPF_FETCH`` flag is set in the
+immediate, then these operations also overwrite ``src_reg`` with the
+value that was in memory before it was modified.
+
+The more special operations are::
+
+ BPF_XCHG
+
+This atomically exchanges ``src_reg`` with the value addressed by ``dst_reg +
+off``. ::
+
+ BPF_CMPXCHG
+
+This atomically compares the value addressed by ``dst_reg + off`` with
+``R0``. If they match it is replaced with ``src_reg``. In either case, the
+value that was there before is zero-extended and loaded back to ``R0``.
+
+Note that 1 and 2 byte atomic operations are not supported.
+
+Clang can generate atomic instructions by default when ``-mcpu=v3`` is
+enabled. If a lower version for ``-mcpu`` is set, the only atomic instruction
+Clang can generate is ``BPF_ADD`` *without* ``BPF_FETCH``. If you need to enable
+the atomics features, while keeping a lower ``-mcpu`` version, you can use
+``-Xclang -target-feature -Xclang +alu32``.
+
+You may encounter ``BPF_XADD`` - this is a legacy name for ``BPF_ATOMIC``,
+referring to the exclusive-add operation encoded when the immediate field is
+zero.
+
+eBPF has one 16-byte instruction: ``BPF_LD | BPF_DW | BPF_IMM`` which consists
of two consecutive ``struct bpf_insn`` 8-byte blocks and interpreted as single
instruction that loads 64-bit immediate value into a dst_reg.
-Classic BPF has similar instruction: BPF_LD | BPF_W | BPF_IMM which loads
+Classic BPF has similar instruction: ``BPF_LD | BPF_W | BPF_IMM`` which loads
32-bit immediate value into a register.
eBPF verifier
diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst
index dd2b12a32b73..c7952ac5bd2f 100644
--- a/Documentation/networking/ip-sysctl.rst
+++ b/Documentation/networking/ip-sysctl.rst
@@ -178,6 +178,27 @@ min_adv_mss - INTEGER
The advertised MSS depends on the first hop route MTU, but will
never be lower than this setting.
+fib_notify_on_flag_change - INTEGER
+ Whether to emit RTM_NEWROUTE notifications whenever RTM_F_OFFLOAD/
+ RTM_F_TRAP/RTM_F_OFFLOAD_FAILED flags are changed.
+
+ After installing a route to the kernel, user space receives an
+ acknowledgment, which means the route was installed in the kernel,
+ but not necessarily in hardware.
+ It is also possible for a route already installed in hardware to change
+ its action and therefore its flags. For example, a host route that is
+ trapping packets can be "promoted" to perform decapsulation following
+ the installation of an IPinIP/VXLAN tunnel.
+ The notifications will indicate to user-space the state of the route.
+
+ Default: 0 (Do not emit notifications.)
+
+ Possible values:
+
+ - 0 - Do not emit notifications.
+ - 1 - Emit notifications.
+ - 2 - Emit notifications only for RTM_F_OFFLOAD_FAILED flag change.
+
IP Fragmentation:
ipfrag_high_thresh - LONG INTEGER
@@ -630,16 +651,15 @@ tcp_rmem - vector of 3 INTEGERs: min, default, max
default: initial size of receive buffer used by TCP sockets.
This value overrides net.core.rmem_default used by other protocols.
- Default: 87380 bytes. This value results in window of 65535 with
- default setting of tcp_adv_win_scale and tcp_app_win:0 and a bit
- less for default tcp_app_win. See below about these variables.
+ Default: 131072 bytes.
+ This value results in initial window of 65535.
max: maximal size of receive buffer allowed for automatically
selected receiver buffers for TCP socket. This value does not override
net.core.rmem_max. Calling setsockopt() with SO_RCVBUF disables
automatic tuning of that socket's receive buffer size, in which
case this value is ignored.
- Default: between 87380B and 6MB, depending on RAM size.
+ Default: between 131072 and 6MB, depending on RAM size.
tcp_sack - BOOLEAN
Enable select acknowledgments (SACKS).
@@ -1196,7 +1216,7 @@ icmp_errors_use_inbound_ifaddr - BOOLEAN
If non-zero, the message will be sent with the primary address of
the interface that received the packet that caused the icmp error.
- This is the behaviour network many administrators will expect from
+ This is the behaviour many network administrators will expect from
a router. And it can make debugging complicated network layouts
much easier.
@@ -1425,6 +1445,25 @@ rp_filter - INTEGER
Default value is 0. Note that some distributions enable it
in startup scripts.
+src_valid_mark - BOOLEAN
+ - 0 - The fwmark of the packet is not included in reverse path
+ route lookup. This allows for asymmetric routing configurations
+ utilizing the fwmark in only one direction, e.g., transparent
+ proxying.
+
+ - 1 - The fwmark of the packet is included in reverse path route
+ lookup. This permits rp_filter to function when the fwmark is
+ used for routing traffic in both directions.
+
+ This setting also affects the utilization of fmwark when
+ performing source address selection for ICMP replies, or
+ determining addresses stored for the IPOPT_TS_TSANDADDR and
+ IPOPT_RR IP options.
+
+ The max value from conf/{all,interface}/src_valid_mark is used.
+
+ Default value is 0.
+
arp_filter - BOOLEAN
- 1 - Allows you to have multiple network interfaces on the same
subnet, and have the ARPs for each interface be answered
@@ -1775,6 +1814,27 @@ nexthop_compat_mode - BOOLEAN
and extraneous notifications.
Default: true (backward compat mode)
+fib_notify_on_flag_change - INTEGER
+ Whether to emit RTM_NEWROUTE notifications whenever RTM_F_OFFLOAD/
+ RTM_F_TRAP/RTM_F_OFFLOAD_FAILED flags are changed.
+
+ After installing a route to the kernel, user space receives an
+ acknowledgment, which means the route was installed in the kernel,
+ but not necessarily in hardware.
+ It is also possible for a route already installed in hardware to change
+ its action and therefore its flags. For example, a host route that is
+ trapping packets can be "promoted" to perform decapsulation following
+ the installation of an IPinIP/VXLAN tunnel.
+ The notifications will indicate to user-space the state of the route.
+
+ Default: 0 (Do not emit notifications.)
+
+ Possible values:
+
+ - 0 - Do not emit notifications.
+ - 1 - Emit notifications.
+ - 2 - Emit notifications only for RTM_F_OFFLOAD_FAILED flag change.
+
IPv6 Fragmentation:
ip6frag_high_thresh - INTEGER
@@ -1807,12 +1867,24 @@ seg6_flowlabel - INTEGER
``conf/default/*``:
Change the interface-specific default settings.
+ These settings would be used during creating new interfaces.
+
``conf/all/*``:
Change all the interface-specific settings.
[XXX: Other special features than forwarding?]
+conf/all/disable_ipv6 - BOOLEAN
+ Changing this value is same as changing ``conf/default/disable_ipv6``
+ setting and also all per-interface ``disable_ipv6`` settings to the same
+ value.
+
+ Reading this value does not have any particular meaning. It does not say
+ whether IPv6 support is enabled or disabled. Returned value can be 1
+ also in the case when some interface has ``disable_ipv6`` set to 0 and
+ has configured IPv6 addresses.
+
conf/all/forwarding - BOOLEAN
Enable global IPv6 forwarding between all interfaces.
@@ -1871,6 +1943,16 @@ accept_ra_defrtr - BOOLEAN
- enabled if accept_ra is enabled.
- disabled if accept_ra is disabled.
+ra_defrtr_metric - UNSIGNED INTEGER
+ Route metric for default route learned in Router Advertisement. This value
+ will be assigned as metric for the default route learned via IPv6 Router
+ Advertisement. Takes affect only if accept_ra_defrtr is enabled.
+
+ Possible values:
+ 1 to 0xFFFFFFFF
+
+ Default: IP6_RT_PRIO_USER i.e. 1024.
+
accept_ra_from_local - BOOLEAN
Accept RA with source-address that is found on local machine
if the RA is otherwise proper and able to be accepted.
diff --git a/Documentation/networking/netdev-FAQ.rst b/Documentation/networking/netdev-FAQ.rst
index ae2ae37cd921..a64c01b52b4c 100644
--- a/Documentation/networking/netdev-FAQ.rst
+++ b/Documentation/networking/netdev-FAQ.rst
@@ -272,6 +272,22 @@ to the mailing list, e.g.::
Posting as one thread is discouraged because it confuses patchwork
(as of patchwork 2.2.2).
+Can I reproduce the checks from patchwork on my local machine?
+--------------------------------------------------------------
+
+Checks in patchwork are mostly simple wrappers around existing kernel
+scripts, the sources are available at:
+
+https://github.com/kuba-moo/nipa/tree/master/tests
+
+Running all the builds and checks locally is a pain, can I post my patches and have the patchwork bot validate them?
+--------------------------------------------------------------------------------------------------------------------
+
+No, you must ensure that your patches are ready by testing them locally
+before posting to the mailing list. The patchwork build bot instance
+gets overloaded very easily and netdev@vger really doesn't need more
+traffic if we can help it.
+
Any other tips to help ensure my net/net-next patch gets OK'd?
--------------------------------------------------------------
Attention to detail. Re-read your own work as if you were the
diff --git a/Documentation/networking/netdev-features.rst b/Documentation/networking/netdev-features.rst
index a2d7d7160e39..d7b15bb64deb 100644
--- a/Documentation/networking/netdev-features.rst
+++ b/Documentation/networking/netdev-features.rst
@@ -182,3 +182,24 @@ stricter than Hardware LRO. A packet stream merged by Hardware GRO must
be re-segmentable by GSO or TSO back to the exact original packet stream.
Hardware GRO is dependent on RXCSUM since every packet successfully merged
by hardware must also have the checksum verified by hardware.
+
+* hsr-tag-ins-offload
+
+This should be set for devices which insert an HSR (High-availability Seamless
+Redundancy) or PRP (Parallel Redundancy Protocol) tag automatically.
+
+* hsr-tag-rm-offload
+
+This should be set for devices which remove HSR (High-availability Seamless
+Redundancy) or PRP (Parallel Redundancy Protocol) tags automatically.
+
+* hsr-fwd-offload
+
+This should be set for devices which forward HSR (High-availability Seamless
+Redundancy) frames from one port to another in hardware.
+
+* hsr-dup-offload
+
+This should be set for devices which duplicate outgoing HSR (High-availability
+Seamless Redundancy) or PRP (Parallel Redundancy Protocol) tags automatically
+frames in hardware.
diff --git a/Documentation/networking/phy.rst b/Documentation/networking/phy.rst
index b2f7ec794bc8..06adfc2afcf0 100644
--- a/Documentation/networking/phy.rst
+++ b/Documentation/networking/phy.rst
@@ -216,7 +216,7 @@ put into an unsupported state.
Lastly, once the controller is ready to handle network traffic, you call
phy_start(phydev). This tells the PAL that you are ready, and configures the
PHY to connect to the network. If the MAC interrupt of your network driver
-also handles PHY status changes, just set phydev->irq to PHY_IGNORE_INTERRUPT
+also handles PHY status changes, just set phydev->irq to PHY_MAC_INTERRUPT
before you call phy_start and use phy_mac_interrupt() from the network
driver. If you don't want to use interrupts, set phydev->irq to PHY_POLL.
phy_start() enables the PHY interrupts (if applicable) and starts the
@@ -267,6 +267,12 @@ Some of the interface modes are described below:
duplex, pause or other settings. This is dependent on the MAC and/or
PHY behaviour.
+``PHY_INTERFACE_MODE_5GBASER``
+ This is the IEEE 802.3 Clause 129 defined 5GBASE-R protocol. It is
+ identical to the 10GBASE-R protocol defined in Clause 49, with the
+ exception that it operates at half the frequency. Please refer to the
+ IEEE standard for the definition.
+
``PHY_INTERFACE_MODE_10GBASER``
This is the IEEE 802.3 Clause 49 defined 10GBASE-R protocol used with
various different mediums. Please refer to the IEEE standard for a
@@ -286,6 +292,11 @@ Some of the interface modes are described below:
Note: due to legacy usage, some 10GBASE-R usage incorrectly makes
use of this definition.
+``PHY_INTERFACE_MODE_100BASEX``
+ This defines IEEE 802.3 Clause 24. The link operates at a fixed data
+ rate of 125Mpbs using a 4B/5B encoding scheme, resulting in an underlying
+ data rate of 100Mpbs.
+
Pause frames / flow control
===========================
diff --git a/Documentation/networking/sfp-phylink.rst b/Documentation/networking/sfp-phylink.rst
index 5aec7c8857d0..328f8d39eeb3 100644
--- a/Documentation/networking/sfp-phylink.rst
+++ b/Documentation/networking/sfp-phylink.rst
@@ -163,7 +163,7 @@ this documentation.
err = phylink_of_phy_connect(priv->phylink, node, flags);
For the most part, ``flags`` can be zero; these flags are passed to
- the of_phy_attach() inside this function call if a PHY is specified
+ the phy_attach_direct() inside this function call if a PHY is specified
in the DT node ``node``.
``node`` should be the DT node which contains the network phy property,
diff --git a/Documentation/networking/snmp_counter.rst b/Documentation/networking/snmp_counter.rst
index 4edd0d38779e..423d138b5ff3 100644
--- a/Documentation/networking/snmp_counter.rst
+++ b/Documentation/networking/snmp_counter.rst
@@ -314,7 +314,7 @@ https://lwn.net/Articles/576263/
* TcpExtTCPOrigDataSent
This counter is explained by `kernel commit f19c29e3e391`_, I pasted the
-explaination below::
+explanation below::
TCPOrigDataSent: number of outgoing packets with original data (excluding
retransmission but including data-in-SYN). This counter is different from
@@ -324,7 +324,7 @@ explaination below::
* TCPSynRetrans
This counter is explained by `kernel commit f19c29e3e391`_, I pasted the
-explaination below::
+explanation below::
TCPSynRetrans: number of SYN and SYN/ACK retransmits to break down
retransmissions into SYN, fast-retransmits, timeout retransmits, etc.
@@ -332,7 +332,7 @@ explaination below::
* TCPFastOpenActiveFail
This counter is explained by `kernel commit f19c29e3e391`_, I pasted the
-explaination below::
+explanation below::
TCPFastOpenActiveFail: Fast Open attempts (SYN/data) failed because
the remote does not accept it or the attempts timed out.
@@ -382,7 +382,7 @@ Defined in `RFC1213 tcpAttemptFails`_.
Defined in `RFC1213 tcpOutRsts`_. The RFC says this counter indicates
the 'segments sent containing the RST flag', but in linux kernel, this
-couner indicates the segments kerenl tried to send. The sending
+counter indicates the segments kernel tried to send. The sending
process might be failed due to some errors (e.g. memory alloc failed).
.. _RFC1213 tcpOutRsts: https://tools.ietf.org/html/rfc1213#page-52
@@ -700,7 +700,7 @@ SACK option could have up to 4 blocks, they are checked
individually. E.g., if 3 blocks of a SACk is invalid, the
corresponding counter would be updated 3 times. The comment of the
`Add counters for discarded SACK blocks`_ patch has additional
-explaination:
+explanation:
.. _Add counters for discarded SACK blocks: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=18f02545a9a16c9a89778b91a162ad16d510bb32
@@ -829,7 +829,7 @@ PAWS check fails or the received sequence number is out of window.
* TcpExtTCPACKSkippedTimeWait
-Tha ACK is skipped in Time-Wait status, the reason would be either
+The ACK is skipped in Time-Wait status, the reason would be either
PAWS check failed or the received sequence number is out of window.
* TcpExtTCPACKSkippedChallenge
@@ -984,7 +984,7 @@ TcpExtSyncookiesRecv counter wont be updated.
Challenge ACK
=============
-For details of challenge ACK, please refer the explaination of
+For details of challenge ACK, please refer the explanation of
TcpExtTCPACKSkippedChallenge.
* TcpExtTCPChallengeACK
@@ -1002,7 +1002,7 @@ prune
=====
When a socket is under memory pressure, the TCP stack will try to
reclaim memory from the receiving queue and out of order queue. One of
-the reclaiming method is 'collapse', which means allocate a big sbk,
+the reclaiming method is 'collapse', which means allocate a big skb,
copy the contiguous skbs to the single big skb, and free these
contiguous skbs.
@@ -1163,7 +1163,7 @@ The server side nstat output::
IpExtOutOctets 52 0.0
IpExtInNoECTPkts 1 0.0
-Input a string in nc client side again ('world' in our exmaple)::
+Input a string in nc client side again ('world' in our example)::
nstatuser@nstat-a:~$ nc -v nstat-b 9000
Connection to nstat-b 9000 port [tcp/*] succeeded!
@@ -1211,7 +1211,7 @@ replied an ACK. But kernel handled them in different ways. When the
TCP window scale option is not used, kernel will try to enable fast
path immediately when the connection comes into the established state,
but if the TCP window scale option is used, kernel will disable the
-fast path at first, and try to enable it after kerenl receives
+fast path at first, and try to enable it after kernel receives
packets. We could use the 'ss' command to verify whether the window
scale option is used. e.g. run below command on either server or
client::
@@ -1343,7 +1343,7 @@ Check TcpExtTCPAbortOnMemory on client::
nstatuser@nstat-a:~$ nstat | grep -i abort
TcpExtTCPAbortOnMemory 54 0.0
-Check orphane socket count on client::
+Check orphaned socket count on client::
nstatuser@nstat-a:~$ ss -s
Total: 131 (kernel 0)
@@ -1685,7 +1685,7 @@ Send 3 SYN repeatly to nstat-b::
nstatuser@nstat-a:~$ for i in {1..3}; do sudo tcpreplay -i ens3 /tmp/syn_fixcsum.pcap; done
-Check snmp cunter on nstat-b::
+Check snmp counter on nstat-b::
nstatuser@nstat-b:~$ nstat | grep -i skip
TcpExtTCPACKSkippedSynRecv 1 0.0
@@ -1770,7 +1770,7 @@ string 'foo' in our example::
Connection from nstat-a 42132 received!
foo
-On nstat-a, the tcpdump should have caputred the ACK. We should check
+On nstat-a, the tcpdump should have captured the ACK. We should check
the source port numbers of the two nc clients::
nstatuser@nstat-a:~$ ss -ta '( dport = :9000 || dport = :9001 )' | tee
@@ -1778,7 +1778,7 @@ the source port numbers of the two nc clients::
ESTAB 0 0 192.168.122.250:50208 192.168.122.251:9000
ESTAB 0 0 192.168.122.250:42132 192.168.122.251:9001
-Run tcprewrite, change port 9001 to port 9000, chagne port 42132 to
+Run tcprewrite, change port 9001 to port 9000, change port 42132 to
port 50208::
nstatuser@nstat-a:~$ tcprewrite --infile /tmp/seq_pre.pcap --outfile /tmp/seq.pcap -r 9001:9000 -r 42132:50208 --fixcsum
diff --git a/Documentation/networking/timestamping.rst b/Documentation/networking/timestamping.rst
index 03f7beade470..f682e88fa87e 100644
--- a/Documentation/networking/timestamping.rst
+++ b/Documentation/networking/timestamping.rst
@@ -55,7 +55,8 @@ struct __kernel_sock_timeval format.
SO_TIMESTAMP_OLD returns incorrect timestamps after the year 2038
on 32 bit machines.
-1.2 SO_TIMESTAMPNS (also SO_TIMESTAMPNS_OLD and SO_TIMESTAMPNS_NEW):
+1.2 SO_TIMESTAMPNS (also SO_TIMESTAMPNS_OLD and SO_TIMESTAMPNS_NEW)
+-------------------------------------------------------------------
This option is identical to SO_TIMESTAMP except for the returned data type.
Its struct timespec allows for higher resolution (ns) timestamps than the
diff --git a/Documentation/power/freezing-of-tasks.rst b/Documentation/power/freezing-of-tasks.rst
index 8bd693399834..53b6a56c4635 100644
--- a/Documentation/power/freezing-of-tasks.rst
+++ b/Documentation/power/freezing-of-tasks.rst
@@ -134,7 +134,7 @@ Generally speaking, there is a couple of reasons to use the freezing of tasks:
safeguards against race conditions that might occur in such a case.
Although Linus Torvalds doesn't like the freezing of tasks, he said this in one
-of the discussions on LKML (http://lkml.org/lkml/2007/4/27/608):
+of the discussions on LKML (https://lore.kernel.org/r/alpine.LFD.0.98.0704271801020.9964@woody.linux-foundation.org):
"RJW:> Why we freeze tasks at all or why we freeze kernel threads?
diff --git a/Documentation/power/index.rst b/Documentation/power/index.rst
index ced8a8007434..a0f5244fb427 100644
--- a/Documentation/power/index.rst
+++ b/Documentation/power/index.rst
@@ -30,6 +30,7 @@ Power Management
userland-swsusp
powercap/powercap
+ powercap/dtpm
regulator/consumer
regulator/design
diff --git a/Documentation/power/powercap/dtpm.rst b/Documentation/power/powercap/dtpm.rst
new file mode 100644
index 000000000000..a38dee3d815b
--- /dev/null
+++ b/Documentation/power/powercap/dtpm.rst
@@ -0,0 +1,212 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==========================================
+Dynamic Thermal Power Management framework
+==========================================
+
+On the embedded world, the complexity of the SoC leads to an
+increasing number of hotspots which need to be monitored and mitigated
+as a whole in order to prevent the temperature to go above the
+normative and legally stated 'skin temperature'.
+
+Another aspect is to sustain the performance for a given power budget,
+for example virtual reality where the user can feel dizziness if the
+performance is capped while a big CPU is processing something else. Or
+reduce the battery charging because the dissipated power is too high
+compared with the power consumed by other devices.
+
+The user space is the most adequate place to dynamically act on the
+different devices by limiting their power given an application
+profile: it has the knowledge of the platform.
+
+The Dynamic Thermal Power Management (DTPM) is a technique acting on
+the device power by limiting and/or balancing a power budget among
+different devices.
+
+The DTPM framework provides an unified interface to act on the
+device power.
+
+Overview
+========
+
+The DTPM framework relies on the powercap framework to create the
+powercap entries in the sysfs directory and implement the backend
+driver to do the connection with the power manageable device.
+
+The DTPM is a tree representation describing the power constraints
+shared between devices, not their physical positions.
+
+The nodes of the tree are a virtual description aggregating the power
+characteristics of the children nodes and their power limitations.
+
+The leaves of the tree are the real power manageable devices.
+
+For instance::
+
+ SoC
+ |
+ `-- pkg
+ |
+ |-- pd0 (cpu0-3)
+ |
+ `-- pd1 (cpu4-5)
+
+The pkg power will be the sum of pd0 and pd1 power numbers::
+
+ SoC (400mW - 3100mW)
+ |
+ `-- pkg (400mW - 3100mW)
+ |
+ |-- pd0 (100mW - 700mW)
+ |
+ `-- pd1 (300mW - 2400mW)
+
+When the nodes are inserted in the tree, their power characteristics are propagated to the parents::
+
+ SoC (600mW - 5900mW)
+ |
+ |-- pkg (400mW - 3100mW)
+ | |
+ | |-- pd0 (100mW - 700mW)
+ | |
+ | `-- pd1 (300mW - 2400mW)
+ |
+ `-- pd2 (200mW - 2800mW)
+
+Each node have a weight on a 2^10 basis reflecting the percentage of power consumption along the siblings::
+
+ SoC (w=1024)
+ |
+ |-- pkg (w=538)
+ | |
+ | |-- pd0 (w=231)
+ | |
+ | `-- pd1 (w=794)
+ |
+ `-- pd2 (w=486)
+
+ Note the sum of weights at the same level are equal to 1024.
+
+When a power limitation is applied to a node, then it is distributed along the children given their weights. For example, if we set a power limitation of 3200mW at the 'SoC' root node, the resulting tree will be::
+
+ SoC (w=1024) <--- power_limit = 3200mW
+ |
+ |-- pkg (w=538) --> power_limit = 1681mW
+ | |
+ | |-- pd0 (w=231) --> power_limit = 378mW
+ | |
+ | `-- pd1 (w=794) --> power_limit = 1303mW
+ |
+ `-- pd2 (w=486) --> power_limit = 1519mW
+
+
+Flat description
+----------------
+
+A root node is created and it is the parent of all the nodes. This
+description is the simplest one and it is supposed to give to user
+space a flat representation of all the devices supporting the power
+limitation without any power limitation distribution.
+
+Hierarchical description
+------------------------
+
+The different devices supporting the power limitation are represented
+hierarchically. There is one root node, all intermediate nodes are
+grouping the child nodes which can be intermediate nodes also or real
+devices.
+
+The intermediate nodes aggregate the power information and allows to
+set the power limit given the weight of the nodes.
+
+User space API
+==============
+
+As stated in the overview, the DTPM framework is built on top of the
+powercap framework. Thus the sysfs interface is the same, please refer
+to the powercap documentation for further details.
+
+ * power_uw: Instantaneous power consumption. If the node is an
+ intermediate node, then the power consumption will be the sum of all
+ children power consumption.
+
+ * max_power_range_uw: The power range resulting of the maximum power
+ minus the minimum power.
+
+ * name: The name of the node. This is implementation dependent. Even
+ if it is not recommended for the user space, several nodes can have
+ the same name.
+
+ * constraint_X_name: The name of the constraint.
+
+ * constraint_X_max_power_uw: The maximum power limit to be applicable
+ to the node.
+
+ * constraint_X_power_limit_uw: The power limit to be applied to the
+ node. If the value contained in constraint_X_max_power_uw is set,
+ the constraint will be removed.
+
+ * constraint_X_time_window_us: The meaning of this file will depend
+ on the constraint number.
+
+Constraints
+-----------
+
+ * Constraint 0: The power limitation is immediately applied, without
+ limitation in time.
+
+Kernel API
+==========
+
+Overview
+--------
+
+The DTPM framework has no power limiting backend support. It is
+generic and provides a set of API to let the different drivers to
+implement the backend part for the power limitation and create the
+power constraints tree.
+
+It is up to the platform to provide the initialization function to
+allocate and link the different nodes of the tree.
+
+A special macro has the role of declaring a node and the corresponding
+initialization function via a description structure. This one contains
+an optional parent field allowing to hook different devices to an
+already existing tree at boot time.
+
+For instance::
+
+ struct dtpm_descr my_descr = {
+ .name = "my_name",
+ .init = my_init_func,
+ };
+
+ DTPM_DECLARE(my_descr);
+
+The nodes of the DTPM tree are described with dtpm structure. The
+steps to add a new power limitable device is done in three steps:
+
+ * Allocate the dtpm node
+ * Set the power number of the dtpm node
+ * Register the dtpm node
+
+The registration of the dtpm node is done with the powercap
+ops. Basically, it must implements the callbacks to get and set the
+power and the limit.
+
+Alternatively, if the node to be inserted is an intermediate one, then
+a simple function to insert it as a future parent is available.
+
+If a device has its power characteristics changing, then the tree must
+be updated with the new power numbers and weights.
+
+Nomenclature
+------------
+
+ * dtpm_alloc() : Allocate and initialize a dtpm structure
+
+ * dtpm_register() : Add the dtpm node to the tree
+
+ * dtpm_unregister() : Remove the dtpm node from the tree
+
+ * dtpm_update_power() : Update the power characteristics of the dtpm node
diff --git a/Documentation/power/runtime_pm.rst b/Documentation/power/runtime_pm.rst
index 0553008b6279..d9c777b18f7a 100644
--- a/Documentation/power/runtime_pm.rst
+++ b/Documentation/power/runtime_pm.rst
@@ -579,7 +579,7 @@ should be used. Of course, for this purpose the device's runtime PM has to be
enabled earlier by calling pm_runtime_enable().
Note, if the device may execute pm_runtime calls during the probe (such as
-if it is registers with a subsystem that may call back in) then the
+if it is registered with a subsystem that may call back in) then the
pm_runtime_get_sync() call paired with a pm_runtime_put() call will be
appropriate to ensure that the device is not put back to sleep during the
probe. This can happen with systems such as the network device layer.
@@ -587,11 +587,11 @@ probe. This can happen with systems such as the network device layer.
It may be desirable to suspend the device once ->probe() has finished.
Therefore the driver core uses the asynchronous pm_request_idle() to submit a
request to execute the subsystem-level idle callback for the device at that
-time. A driver that makes use of the runtime autosuspend feature, may want to
+time. A driver that makes use of the runtime autosuspend feature may want to
update the last busy mark before returning from ->probe().
Moreover, the driver core prevents runtime PM callbacks from racing with the bus
-notifier callback in __device_release_driver(), which is necessary, because the
+notifier callback in __device_release_driver(), which is necessary because the
notifier is used by some subsystems to carry out operations affecting the
runtime PM functionality. It does so by calling pm_runtime_get_sync() before
driver_sysfs_remove() and the BUS_NOTIFY_UNBIND_DRIVER notifications. This
@@ -603,7 +603,7 @@ calling pm_runtime_suspend() from their ->remove() routines, the driver core
executes pm_runtime_put_sync() after running the BUS_NOTIFY_UNBIND_DRIVER
notifications in __device_release_driver(). This requires bus types and
drivers to make their ->remove() callbacks avoid races with runtime PM directly,
-but also it allows of more flexibility in the handling of devices during the
+but it also allows more flexibility in the handling of devices during the
removal of their drivers.
Drivers in ->remove() callback should undo the runtime PM changes done
@@ -693,7 +693,7 @@ that the device appears to be runtime-suspended and its state is fine, so it
may be left in runtime suspend provided that all of its descendants are also
left in runtime suspend. If that happens, the PM core will not execute any
system suspend and resume callbacks for all of those devices, except for the
-complete callback, which is then entirely responsible for handling the device
+.complete() callback, which is then entirely responsible for handling the device
as appropriate. This only applies to system suspend transitions that are not
related to hibernation (see Documentation/driver-api/pm/devices.rst for more
information).
@@ -706,7 +706,7 @@ out the following operations:
right before executing the subsystem-level .prepare() callback for it and
pm_runtime_barrier() is called for every device right before executing the
subsystem-level .suspend() callback for it. In addition to that the PM core
- calls __pm_runtime_disable() with 'false' as the second argument for every
+ calls __pm_runtime_disable() with 'false' as the second argument for every
device right before executing the subsystem-level .suspend_late() callback
for it.
@@ -783,7 +783,7 @@ driver/base/power/generic_ops.c:
`int pm_generic_restore_noirq(struct device *dev);`
- invoke the ->restore_noirq() callback provided by the device's driver
-These functions are the defaults used by the PM core, if a subsystem doesn't
+These functions are the defaults used by the PM core if a subsystem doesn't
provide its own callbacks for ->runtime_idle(), ->runtime_suspend(),
->runtime_resume(), ->suspend(), ->suspend_noirq(), ->resume(),
->resume_noirq(), ->freeze(), ->freeze_noirq(), ->thaw(), ->thaw_noirq(),
diff --git a/Documentation/powerpc/syscall64-abi.rst b/Documentation/powerpc/syscall64-abi.rst
index cf9b2857c72a..dabee3729e5a 100644
--- a/Documentation/powerpc/syscall64-abi.rst
+++ b/Documentation/powerpc/syscall64-abi.rst
@@ -46,25 +46,38 @@ stack frame LR and CR save fields are not used.
Register preservation rules
---------------------------
-Register preservation rules match the ELF ABI calling sequence with the
-following differences:
-
-+------------------------------------------------------------------------+
-| For the sc instruction, differences with the ELF ABI |
-+--------------+--------------+------------------------------------------+
-| r0 | Volatile | (System call number.) |
-| rr3 | Volatile | (Parameter 1, and return value.) |
-| rr4-r8 | Volatile | (Parameters 2-6.) |
-| rcr0 | Volatile | (cr0.SO is the return error condition.) |
-| rcr1, cr5-7 | Nonvolatile | |
-| rlr | Nonvolatile | |
-+--------------+--------------+------------------------------------------+
-| For the scv 0 instruction, differences with the ELF ABI |
-+--------------+--------------+------------------------------------------+
-| r0 | Volatile | (System call number.) |
-| r3 | Volatile | (Parameter 1, and return value.) |
-| r4-r8 | Volatile | (Parameters 2-6.) |
-+--------------+--------------+------------------------------------------+
+Register preservation rules match the ELF ABI calling sequence with some
+differences.
+
+For the sc instruction, the differences from the ELF ABI are as follows:
+
++--------------+--------------------+-----------------------------------------+
+| Register | Preservation Rules | Purpose |
++==============+====================+=========================================+
+| r0 | Volatile | (System call number.) |
++--------------+--------------------+-----------------------------------------+
+| r3 | Volatile | (Parameter 1, and return value.) |
++--------------+--------------------+-----------------------------------------+
+| r4-r8 | Volatile | (Parameters 2-6.) |
++--------------+--------------------+-----------------------------------------+
+| cr0 | Volatile | (cr0.SO is the return error condition.) |
++--------------+--------------------+-----------------------------------------+
+| cr1, cr5-7 | Nonvolatile | |
++--------------+--------------------+-----------------------------------------+
+| lr | Nonvolatile | |
++--------------+--------------------+-----------------------------------------+
+
+For the scv 0 instruction, the differences from the ELF ABI are as follows:
+
++--------------+--------------------+-----------------------------------------+
+| Register | Preservation Rules | Purpose |
++==============+====================+=========================================+
+| r0 | Volatile | (System call number.) |
++--------------+--------------------+-----------------------------------------+
+| r3 | Volatile | (Parameter 1, and return value.) |
++--------------+--------------------+-----------------------------------------+
+| r4-r8 | Volatile | (Parameters 2-6.) |
++--------------+--------------------+-----------------------------------------+
All floating point and vector data registers as well as control and status
registers are nonvolatile.
diff --git a/Documentation/process/4.Coding.rst b/Documentation/process/4.Coding.rst
index 0825dc496f22..1f0d81f44e14 100644
--- a/Documentation/process/4.Coding.rst
+++ b/Documentation/process/4.Coding.rst
@@ -242,7 +242,7 @@ and try to avoid "fixes" which make the warning go away without addressing
its cause.
Note that not all compiler warnings are enabled by default. Build the
-kernel with "make EXTRA_CFLAGS=-W" to get the full set.
+kernel with "make KCFLAGS=-W" to get the full set.
The kernel provides several configuration options which turn on debugging
features; most of these are found in the "kernel hacking" submenu. Several
diff --git a/Documentation/process/adding-syscalls.rst b/Documentation/process/adding-syscalls.rst
index a3ecb236576c..906c47f1a9e5 100644
--- a/Documentation/process/adding-syscalls.rst
+++ b/Documentation/process/adding-syscalls.rst
@@ -501,7 +501,7 @@ table, but not from elsewhere in the kernel. If the syscall functionality is
useful to be used within the kernel, needs to be shared between an old and a
new syscall, or needs to be shared between a syscall and its compatibility
variant, it should be implemented by means of a "helper" function (such as
-``kern_xyzzy()``). This kernel function may then be called within the
+``ksys_xyzzy()``). This kernel function may then be called within the
syscall stub (``sys_xyzzy()``), the compatibility syscall stub
(``compat_sys_xyzzy()``), and/or other kernel code.
@@ -548,18 +548,18 @@ References and Sources
https://lwn.net/Articles/486306/
- Recommendation from Andrew Morton that all related information for a new
system call should come in the same email thread:
- https://lkml.org/lkml/2014/7/24/641
+ https://lore.kernel.org/r/20140724144747.3041b208832bbdf9fbce5d96@linux-foundation.org
- Recommendation from Michael Kerrisk that a new system call should come with
- a man page: https://lkml.org/lkml/2014/6/13/309
+ a man page: https://lore.kernel.org/r/CAKgNAkgMA39AfoSoA5Pe1r9N+ZzfYQNvNPvcRN7tOvRb8+v06Q@mail.gmail.com
- Suggestion from Thomas Gleixner that x86 wire-up should be in a separate
- commit: https://lkml.org/lkml/2014/11/19/254
+ commit: https://lore.kernel.org/r/alpine.DEB.2.11.1411191249560.3909@nanos
- Suggestion from Greg Kroah-Hartman that it's good for new system calls to
- come with a man-page & selftest: https://lkml.org/lkml/2014/3/19/710
+ come with a man-page & selftest: https://lore.kernel.org/r/20140320025530.GA25469@kroah.com
- Discussion from Michael Kerrisk of new system call vs. :manpage:`prctl(2)` extension:
- https://lkml.org/lkml/2014/6/3/411
+ https://lore.kernel.org/r/CAHO5Pa3F2MjfTtfNxa8LbnkeeU8=YJ+9tDqxZpw7Gz59E-4AUg@mail.gmail.com
- Suggestion from Ingo Molnar that system calls that involve multiple
arguments should encapsulate those arguments in a struct, which includes a
- size field for future extensibility: https://lkml.org/lkml/2015/7/30/117
+ size field for future extensibility: https://lore.kernel.org/r/20150730083831.GA22182@gmail.com
- Numbering oddities arising from (re-)use of O_* numbering space flags:
- commit 75069f2b5bfb ("vfs: renumber FMODE_NONOTIFY and add to uniqueness
@@ -569,9 +569,9 @@ References and Sources
- commit bb458c644a59 ("Safer ABI for O_TMPFILE")
- Discussion from Matthew Wilcox about restrictions on 64-bit arguments:
- https://lkml.org/lkml/2008/12/12/187
+ https://lore.kernel.org/r/20081212152929.GM26095@parisc-linux.org
- Recommendation from Greg Kroah-Hartman that unknown flags should be
- policed: https://lkml.org/lkml/2014/7/17/577
+ policed: https://lore.kernel.org/r/20140717193330.GB4703@kroah.com
- Recommendation from Linus Torvalds that x32 system calls should prefer
compatibility with 64-bit versions rather than 32-bit versions:
- https://lkml.org/lkml/2011/8/31/244
+ https://lore.kernel.org/r/CA+55aFxfmwfB7jbbrXxa=K7VBYPfAvmu3XOkGrLbB1UFjX1+Ew@mail.gmail.com
diff --git a/Documentation/process/coding-style.rst b/Documentation/process/coding-style.rst
index 98227226c4e5..42969ab37b34 100644
--- a/Documentation/process/coding-style.rst
+++ b/Documentation/process/coding-style.rst
@@ -69,9 +69,26 @@ something to hide:
if (condition) do_this;
do_something_everytime;
+Don't use commas to avoid using braces:
+
+.. code-block:: c
+
+ if (condition)
+ do_this(), do_that();
+
+Always uses braces for multiple statements:
+
+.. code-block:: c
+
+ if (condition) {
+ do_this();
+ do_that();
+ }
+
Don't put multiple assignments on a single line either. Kernel coding style
is super simple. Avoid tricky expressions.
+
Outside of comments, documentation and except in Kconfig, spaces are never
used for indentation, and the above example is deliberately broken.
@@ -306,8 +323,7 @@ that counts the number of active users, you should call that
Encoding the type of a function into the name (so-called Hungarian
notation) is asinine - the compiler knows the types anyway and can check
-those, and it only confuses the programmer. No wonder Microsoft makes buggy
-programs.
+those, and it only confuses the programmer.
LOCAL variable names should be short, and to the point. If you have
some random integer loop counter, it should probably be called ``i``.
diff --git a/Documentation/process/howto.rst b/Documentation/process/howto.rst
index 7a5c105e34d4..e4beeca57e5f 100644
--- a/Documentation/process/howto.rst
+++ b/Documentation/process/howto.rst
@@ -342,16 +342,10 @@ Adventurous testers are very welcome to runtime-test the linux-next.
Bug Reporting
-------------
-https://bugzilla.kernel.org is where the Linux kernel developers track kernel
-bugs. Users are encouraged to report all bugs that they find in this
-tool. For details on how to use the kernel bugzilla, please see:
-
- https://bugzilla.kernel.org/page.cgi?id=faq.html
-
The file 'Documentation/admin-guide/reporting-issues.rst' in the main kernel
-source directory has a good template for how to report a possible kernel bug,
-and details what kind of information is needed by the kernel developers to help
-track down the problem.
+source directory describes how to report a possible kernel bug, and details
+what kind of information is needed by the kernel developers to help track
+down the problem.
Managing bug reports
@@ -364,7 +358,13 @@ improve your skills, and other developers will be aware of your presence.
Fixing bugs is one of the best ways to get merits among other developers,
because not many people like wasting time fixing other people's bugs.
-To work in the already reported bug reports, go to https://bugzilla.kernel.org.
+To work on already reported bug reports, find a subsystem you are interested in.
+Check the MAINTAINERS file where bugs for that subsystem get reported to; often
+it will be a mailing list, rarely a bugtracker. Search the archives of said
+place for recent reports and help where you see fit. You may also want to check
+https://bugzilla.kernel.org for bug reports; only a handful of kernel subsystems
+use it actively for reporting or tracking, nevertheless bugs for the whole
+kernel get filed there.
Mailing lists
diff --git a/Documentation/process/magic-number.rst b/Documentation/process/magic-number.rst
index e02ff5ffb653..fa5a62f4150c 100644
--- a/Documentation/process/magic-number.rst
+++ b/Documentation/process/magic-number.rst
@@ -99,7 +99,6 @@ USB_SERIAL_PORT_MAGIC 0x7301 usb_serial_port ``drivers/usb/se
CG_MAGIC 0x00090255 ufs_cylinder_group ``include/linux/ufs_fs.h``
RPORT_MAGIC 0x00525001 r_port ``drivers/char/rocket_int.h``
LSEMAGIC 0x05091998 lse ``drivers/fc4/fc.c``
-GDTIOCTL_MAGIC 0x06030f07 gdth_iowr_str ``drivers/scsi/gdth_ioctl.h``
RIEBL_MAGIC 0x09051990 ``drivers/net/atarilance.c``
NBD_REQUEST_MAGIC 0x12560953 nbd_request ``include/linux/nbd.h``
RED_MAGIC2 0x170fc2a5 (any) ``mm/slab.c``
@@ -135,7 +134,6 @@ FW_HEADER_MAGIC 0x65726F66 fw_header ``drivers/atm/fo
SLOT_MAGIC 0x67267321 slot ``drivers/hotplug/cpqphp.h``
SLOT_MAGIC 0x67267322 slot ``drivers/hotplug/acpiphp.h``
LO_MAGIC 0x68797548 nbd_device ``include/linux/nbd.h``
-OPROFILE_MAGIC 0x6f70726f super_block ``drivers/oprofile/oprofilefs.h``
M3_STATE_MAGIC 0x734d724d m3_state ``sound/oss/maestro3.c``
VMALLOC_MAGIC 0x87654320 snd_alloc_track ``sound/core/memory.c``
KMALLOC_MAGIC 0x87654321 snd_alloc_track ``sound/core/memory.c``
@@ -143,7 +141,6 @@ PWC_MAGIC 0x89DC10AB pwc_device ``drivers/usb/me
NBD_REPLY_MAGIC 0x96744668 nbd_reply ``include/linux/nbd.h``
ENI155_MAGIC 0xa54b872d midway_eprom ``drivers/atm/eni.h``
CODA_MAGIC 0xC0DAC0DA coda_file_info ``fs/coda/coda_fs_i.h``
-DPMEM_MAGIC 0xc0ffee11 gdt_pci_sram ``drivers/scsi/gdth.h``
YAM_MAGIC 0xF10A7654 yam_port ``drivers/net/hamradio/yam.c``
CCB_MAGIC 0xf2691ad2 ccb ``drivers/scsi/ncr53c8xx.c``
QUEUE_MAGIC_FREE 0xf7e1c9a3 queue_entry ``drivers/scsi/arm/queue.c``
diff --git a/Documentation/process/submit-checklist.rst b/Documentation/process/submit-checklist.rst
index 230ee42f872f..b1bc2d37bd0a 100644
--- a/Documentation/process/submit-checklist.rst
+++ b/Documentation/process/submit-checklist.rst
@@ -89,30 +89,28 @@ and elsewhere regarding submitting Linux kernel patches.
Patches that change userspace interfaces should be CCed to
linux-api@vger.kernel.org.
-19) Check that it all passes ``make headers_check``.
-
-20) Has been checked with injection of at least slab and page-allocation
+19) Has been checked with injection of at least slab and page-allocation
failures. See ``Documentation/fault-injection/``.
If the new code is substantial, addition of subsystem-specific fault
injection might be appropriate.
-21) Newly-added code has been compiled with ``gcc -W`` (use
- ``make EXTRA_CFLAGS=-W``). This will generate lots of noise, but is good
+20) Newly-added code has been compiled with ``gcc -W`` (use
+ ``make KCFLAGS=-W``). This will generate lots of noise, but is good
for finding bugs like "warning: comparison between signed and unsigned".
-22) Tested after it has been merged into the -mm patchset to make sure
+21) Tested after it has been merged into the -mm patchset to make sure
that it still works with all of the other queued patches and various
changes in the VM, VFS, and other subsystems.
-23) All memory barriers {e.g., ``barrier()``, ``rmb()``, ``wmb()``} need a
+22) All memory barriers {e.g., ``barrier()``, ``rmb()``, ``wmb()``} need a
comment in the source code that explains the logic of what they are doing
and why.
-24) If any ioctl's are added by the patch, then also update
+23) If any ioctl's are added by the patch, then also update
``Documentation/userspace-api/ioctl/ioctl-number.rst``.
-25) If your modified source code depends on or uses any of the kernel
+24) If your modified source code depends on or uses any of the kernel
APIs or features that are related to the following ``Kconfig`` symbols,
then test multiple builds with the related ``Kconfig`` symbols disabled
and/or ``=m`` (if that option is available) [not all of these at the
diff --git a/Documentation/process/submitting-patches.rst b/Documentation/process/submitting-patches.rst
index 5ba54120bef7..8c991c863628 100644
--- a/Documentation/process/submitting-patches.rst
+++ b/Documentation/process/submitting-patches.rst
@@ -556,6 +556,11 @@ which stable kernel versions should receive your fix. This is the preferred
method for indicating a bug fixed by the patch. See :ref:`describe_changes`
for more details.
+Note: Attaching a Fixes: tag does not subvert the stable kernel rules
+process nor the requirement to Cc: stable@vger.kernel.org on all stable
+patch candidates. For more information, please read
+:ref:`Documentation/process/stable-kernel-rules.rst <stable_kernel_rules>`
+
.. _the_canonical_patch_format:
The canonical patch format
@@ -679,6 +684,26 @@ generates appropriate diffstats by default.)
See more details on the proper patch format in the following
references.
+Backtraces in commit mesages
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Backtraces help document the call chain leading to a problem. However,
+not all backtraces are helpful. For example, early boot call chains are
+unique and obvious. Copying the full dmesg output verbatim, however,
+adds distracting information like timestamps, module lists, register and
+stack dumps.
+
+Therefore, the most useful backtraces should distill the relevant
+information from the dump, which makes it easier to focus on the real
+issue. Here is an example of a well-trimmed backtrace::
+
+ unchecked MSR access error: WRMSR to 0xd51 (tried to write 0x0000000000000064)
+ at rIP: 0xffffffffae059994 (native_write_msr+0x4/0x20)
+ Call Trace:
+ mba_wrmsr
+ update_domains
+ rdtgroup_mkdir
+
.. _explicit_in_reply_to:
Explicit In-Reply-To headers
@@ -769,13 +794,13 @@ Greg Kroah-Hartman, "How to piss off a kernel subsystem maintainer".
<http://www.kroah.com/log/linux/maintainer-06.html>
NO!!!! No more huge patch bombs to linux-kernel@vger.kernel.org people!
- <https://lkml.org/lkml/2005/7/11/336>
+ <https://lore.kernel.org/r/20050711.125305.08322243.davem@davemloft.net>
Kernel Documentation/process/coding-style.rst:
:ref:`Documentation/process/coding-style.rst <codingstyle>`
Linus Torvalds's mail on the canonical patch format:
- <http://lkml.org/lkml/2005/4/7/183>
+ <https://lore.kernel.org/r/Pine.LNX.4.58.0504071023190.28951@ppc970.osdl.org>
Andi Kleen, "On submitting kernel patches"
Some strategies to get difficult or controversial changes in.
diff --git a/Documentation/scheduler/sched-bwc.rst b/Documentation/scheduler/sched-bwc.rst
index 9801d6b284b1..845eee659199 100644
--- a/Documentation/scheduler/sched-bwc.rst
+++ b/Documentation/scheduler/sched-bwc.rst
@@ -2,8 +2,9 @@
CFS Bandwidth Control
=====================
-[ This document only discusses CPU bandwidth control for SCHED_NORMAL.
- The SCHED_RT case is covered in Documentation/scheduler/sched-rt-group.rst ]
+.. note::
+ This document only discusses CPU bandwidth control for SCHED_NORMAL.
+ The SCHED_RT case is covered in Documentation/scheduler/sched-rt-group.rst
CFS bandwidth control is a CONFIG_FAIR_GROUP_SCHED extension which allows the
specification of the maximum CPU bandwidth available to a group or hierarchy.
@@ -25,9 +26,15 @@ Management
----------
Quota and period are managed within the cpu subsystem via cgroupfs.
-cpu.cfs_quota_us: the total available run-time within a period (in microseconds)
-cpu.cfs_period_us: the length of a period (in microseconds)
-cpu.stat: exports throttling statistics [explained further below]
+.. note::
+ The cgroupfs files described in this section are only applicable
+ to cgroup v1. For cgroup v2, see
+ :ref:`Documentation/admin-guide/cgroupv2.rst <cgroup-v2-cpu>`.
+
+- cpu.cfs_quota_us: the total available run-time within a period (in
+ microseconds)
+- cpu.cfs_period_us: the length of a period (in microseconds)
+- cpu.stat: exports throttling statistics [explained further below]
The default values are::
diff --git a/Documentation/scheduler/sched-deadline.rst b/Documentation/scheduler/sched-deadline.rst
index 14a2f7bf63fe..9d9be52f221a 100644
--- a/Documentation/scheduler/sched-deadline.rst
+++ b/Documentation/scheduler/sched-deadline.rst
@@ -707,7 +707,7 @@ Deadline Task Scheduling
and how to prevent non-root users "cheat" the system?
As already discussed, we are planning also to merge this work with the EDF
- throttling patches [https://lkml.org/lkml/2010/2/23/239] but we still are in
+ throttling patches [https://lore.kernel.org/r/cover.1266931410.git.fabio@helm.retis] but we still are in
the preliminary phases of the merge and we really seek feedback that would
help us decide on the direction it should take.
diff --git a/Documentation/scheduler/sched-design-CFS.rst b/Documentation/scheduler/sched-design-CFS.rst
index a96c72651877..59b2d1fb4dc4 100644
--- a/Documentation/scheduler/sched-design-CFS.rst
+++ b/Documentation/scheduler/sched-design-CFS.rst
@@ -34,9 +34,9 @@ In CFS the virtual runtime is expressed and tracked via the per-task
p->se.vruntime (nanosec-unit) value. This way, it's possible to accurately
timestamp and measure the "expected CPU time" a task should have gotten.
-[ small detail: on "ideal" hardware, at any time all tasks would have the same
- p->se.vruntime value --- i.e., tasks would execute simultaneously and no task
- would ever get "out of balance" from the "ideal" share of CPU time. ]
+ Small detail: on "ideal" hardware, at any time all tasks would have the same
+ p->se.vruntime value --- i.e., tasks would execute simultaneously and no task
+ would ever get "out of balance" from the "ideal" share of CPU time.
CFS's task picking logic is based on this p->se.vruntime value and it is thus
very simple: it always tries to run the task with the smallest p->se.vruntime
diff --git a/Documentation/scheduler/schedutil.txt b/Documentation/scheduler/schedutil.txt
new file mode 100644
index 000000000000..78f6b91e2291
--- /dev/null
+++ b/Documentation/scheduler/schedutil.txt
@@ -0,0 +1,169 @@
+
+
+NOTE; all this assumes a linear relation between frequency and work capacity,
+we know this is flawed, but it is the best workable approximation.
+
+
+PELT (Per Entity Load Tracking)
+-------------------------------
+
+With PELT we track some metrics across the various scheduler entities, from
+individual tasks to task-group slices to CPU runqueues. As the basis for this
+we use an Exponentially Weighted Moving Average (EWMA), each period (1024us)
+is decayed such that y^32 = 0.5. That is, the most recent 32ms contribute
+half, while the rest of history contribute the other half.
+
+Specifically:
+
+ ewma_sum(u) := u_0 + u_1*y + u_2*y^2 + ...
+
+ ewma(u) = ewma_sum(u) / ewma_sum(1)
+
+Since this is essentially a progression of an infinite geometric series, the
+results are composable, that is ewma(A) + ewma(B) = ewma(A+B). This property
+is key, since it gives the ability to recompose the averages when tasks move
+around.
+
+Note that blocked tasks still contribute to the aggregates (task-group slices
+and CPU runqueues), which reflects their expected contribution when they
+resume running.
+
+Using this we track 2 key metrics: 'running' and 'runnable'. 'Running'
+reflects the time an entity spends on the CPU, while 'runnable' reflects the
+time an entity spends on the runqueue. When there is only a single task these
+two metrics are the same, but once there is contention for the CPU 'running'
+will decrease to reflect the fraction of time each task spends on the CPU
+while 'runnable' will increase to reflect the amount of contention.
+
+For more detail see: kernel/sched/pelt.c
+
+
+Frequency- / CPU Invariance
+---------------------------
+
+Because consuming the CPU for 50% at 1GHz is not the same as consuming the CPU
+for 50% at 2GHz, nor is running 50% on a LITTLE CPU the same as running 50% on
+a big CPU, we allow architectures to scale the time delta with two ratios, one
+Dynamic Voltage and Frequency Scaling (DVFS) ratio and one microarch ratio.
+
+For simple DVFS architectures (where software is in full control) we trivially
+compute the ratio as:
+
+ f_cur
+ r_dvfs := -----
+ f_max
+
+For more dynamic systems where the hardware is in control of DVFS we use
+hardware counters (Intel APERF/MPERF, ARMv8.4-AMU) to provide us this ratio.
+For Intel specifically, we use:
+
+ APERF
+ f_cur := ----- * P0
+ MPERF
+
+ 4C-turbo; if available and turbo enabled
+ f_max := { 1C-turbo; if turbo enabled
+ P0; otherwise
+
+ f_cur
+ r_dvfs := min( 1, ----- )
+ f_max
+
+We pick 4C turbo over 1C turbo to make it slightly more sustainable.
+
+r_cpu is determined as the ratio of highest performance level of the current
+CPU vs the highest performance level of any other CPU in the system.
+
+ r_tot = r_dvfs * r_cpu
+
+The result is that the above 'running' and 'runnable' metrics become invariant
+of DVFS and CPU type. IOW. we can transfer and compare them between CPUs.
+
+For more detail see:
+
+ - kernel/sched/pelt.h:update_rq_clock_pelt()
+ - arch/x86/kernel/smpboot.c:"APERF/MPERF frequency ratio computation."
+ - Documentation/scheduler/sched-capacity.rst:"1. CPU Capacity + 2. Task utilization"
+
+
+UTIL_EST / UTIL_EST_FASTUP
+--------------------------
+
+Because periodic tasks have their averages decayed while they sleep, even
+though when running their expected utilization will be the same, they suffer a
+(DVFS) ramp-up after they are running again.
+
+To alleviate this (a default enabled option) UTIL_EST drives an Infinite
+Impulse Response (IIR) EWMA with the 'running' value on dequeue -- when it is
+highest. A further default enabled option UTIL_EST_FASTUP modifies the IIR
+filter to instantly increase and only decay on decrease.
+
+A further runqueue wide sum (of runnable tasks) is maintained of:
+
+ util_est := \Sum_t max( t_running, t_util_est_ewma )
+
+For more detail see: kernel/sched/fair.c:util_est_dequeue()
+
+
+UCLAMP
+------
+
+It is possible to set effective u_min and u_max clamps on each CFS or RT task;
+the runqueue keeps an max aggregate of these clamps for all running tasks.
+
+For more detail see: include/uapi/linux/sched/types.h
+
+
+Schedutil / DVFS
+----------------
+
+Every time the scheduler load tracking is updated (task wakeup, task
+migration, time progression) we call out to schedutil to update the hardware
+DVFS state.
+
+The basis is the CPU runqueue's 'running' metric, which per the above it is
+the frequency invariant utilization estimate of the CPU. From this we compute
+a desired frequency like:
+
+ max( running, util_est ); if UTIL_EST
+ u_cfs := { running; otherwise
+
+ clamp( u_cfs + u_rt , u_min, u_max ); if UCLAMP_TASK
+ u_clamp := { u_cfs + u_rt; otherwise
+
+ u := u_clamp + u_irq + u_dl; [approx. see source for more detail]
+
+ f_des := min( f_max, 1.25 u * f_max )
+
+XXX IO-wait; when the update is due to a task wakeup from IO-completion we
+boost 'u' above.
+
+This frequency is then used to select a P-state/OPP or directly munged into a
+CPPC style request to the hardware.
+
+XXX: deadline tasks (Sporadic Task Model) allows us to calculate a hard f_min
+required to satisfy the workload.
+
+Because these callbacks are directly from the scheduler, the DVFS hardware
+interaction should be 'fast' and non-blocking. Schedutil supports
+rate-limiting DVFS requests for when hardware interaction is slow and
+expensive, this reduces effectiveness.
+
+For more information see: kernel/sched/cpufreq_schedutil.c
+
+
+NOTES
+-----
+
+ - On low-load scenarios, where DVFS is most relevant, the 'running' numbers
+ will closely reflect utilization.
+
+ - In saturated scenarios task movement will cause some transient dips,
+ suppose we have a CPU saturated with 4 tasks, then when we migrate a task
+ to an idle CPU, the old CPU will have a 'running' value of 0.75 while the
+ new CPU will gain 0.25. This is inevitable and time progression will
+ correct this. XXX do we still guarantee f_max due to no idle-time?
+
+ - Much of the above is about avoiding DVFS dips, and independent DVFS domains
+ having to re-learn / ramp-up when load shifts.
+
diff --git a/Documentation/scsi/libsas.rst b/Documentation/scsi/libsas.rst
index 7216b5d25800..6589dfefbc02 100644
--- a/Documentation/scsi/libsas.rst
+++ b/Documentation/scsi/libsas.rst
@@ -189,13 +189,8 @@ num_phys
The event interface::
/* LLDD calls these to notify the class of an event. */
- void (*notify_ha_event)(struct sas_ha_struct *, enum ha_event);
- void (*notify_port_event)(struct sas_phy *, enum port_event);
- void (*notify_phy_event)(struct sas_phy *, enum phy_event);
-
-When sas_register_ha() returns, those are set and can be
-called by the LLDD to notify the SAS layer of such events
-the SAS layer.
+ void sas_notify_port_event(struct sas_phy *, enum port_event, gfp_t);
+ void sas_notify_phy_event(struct sas_phy *, enum phy_event, gfp_t);
The port notification::
diff --git a/Documentation/scsi/scsi-parameters.rst b/Documentation/scsi/scsi-parameters.rst
index e5f68b431f5c..c42c55e1e25e 100644
--- a/Documentation/scsi/scsi-parameters.rst
+++ b/Documentation/scsi/scsi-parameters.rst
@@ -38,9 +38,6 @@ parameters may be changed at runtime by the command
See drivers/scsi/BusLogic.c, comment before function
BusLogic_ParseDriverOptions().
- gdth= [HW,SCSI]
- See header of drivers/scsi/gdth.c.
-
gvp11= [HW,SCSI]
ips= [HW,SCSI] Adaptec / IBM ServeRAID controller
@@ -94,7 +91,7 @@ parameters may be changed at runtime by the command
(/proc/sys/dev/scsi/logging_level).
There is also a nice 'scsi_logging_level' script in the
S390-tools package, available for download at
- https://github.com/ibm-s390-tools/s390-tools/blob/master/scripts/scsi_logging_level
+ https://github.com/ibm-s390-linux/s390-tools/blob/master/scripts/scsi_logging_level
scsi_mod.scan= [SCSI] sync (default) scans SCSI busses as they are
discovered. async scans them in kernel threads,
diff --git a/Documentation/security/keys/core.rst b/Documentation/security/keys/core.rst
index aa0081685ee1..b3ed5c581034 100644
--- a/Documentation/security/keys/core.rst
+++ b/Documentation/security/keys/core.rst
@@ -1040,8 +1040,8 @@ The keyctl syscall functions are:
"key" is the ID of the key to be watched.
- "queue_fd" is a file descriptor referring to an open "/dev/watch_queue"
- which manages the buffer into which notifications will be delivered.
+ "queue_fd" is a file descriptor referring to an open pipe which
+ manages the buffer into which notifications will be delivered.
"filter" is either NULL to remove a watch or a filter specification to
indicate what events are required from the key.
diff --git a/Documentation/security/lsm-development.rst b/Documentation/security/lsm-development.rst
index 31d92bc5fdd2..ac53e5065f79 100644
--- a/Documentation/security/lsm-development.rst
+++ b/Documentation/security/lsm-development.rst
@@ -2,7 +2,7 @@
Linux Security Module Development
=================================
-Based on https://lkml.org/lkml/2007/10/26/215,
+Based on https://lore.kernel.org/r/20071026073721.618b4778@laptopd505.fenrus.org,
a new LSM is accepted into the kernel when its intent (a description of
what it tries to protect against and in what cases one would expect to
use it) has been appropriately documented in ``Documentation/admin-guide/LSM/``.
diff --git a/Documentation/sound/designs/index.rst b/Documentation/sound/designs/index.rst
index f0749943ccb2..1eb08e7bae52 100644
--- a/Documentation/sound/designs/index.rst
+++ b/Documentation/sound/designs/index.rst
@@ -14,3 +14,4 @@ Designs and Implementations
powersave
oss-emulation
seq-oss
+ jack-injection
diff --git a/Documentation/sound/designs/jack-injection.rst b/Documentation/sound/designs/jack-injection.rst
new file mode 100644
index 000000000000..f9790521523e
--- /dev/null
+++ b/Documentation/sound/designs/jack-injection.rst
@@ -0,0 +1,166 @@
+============================
+ALSA Jack Software Injection
+============================
+
+Simple Introduction On Jack Injection
+=====================================
+
+Here jack injection means users could inject plugin or plugout events
+to the audio jacks through debugfs interface, it is helpful to
+validate ALSA userspace changes. For example, we change the audio
+profile switching code in the pulseaudio, and we want to verify if the
+change works as expected and if the change introduce the regression,
+in this case, we could inject plugin or plugout events to an audio
+jack or to some audio jacks, we don't need to physically access the
+machine and plug/unplug physical devices to the audio jack.
+
+In this design, an audio jack doesn't equal to a physical audio jack.
+Sometimes a physical audio jack contains multi functions, and the
+ALSA driver creates multi ``jack_kctl`` for a ``snd_jack``, here the
+``snd_jack`` represents a physical audio jack and the ``jack_kctl``
+represents a function, for example a physical jack has two functions:
+headphone and mic_in, the ALSA ASoC driver will build 2 ``jack_kctl``
+for this jack. The jack injection is implemented based on the
+``jack_kctl`` instead of ``snd_jack``.
+
+To inject events to audio jacks, we need to enable the jack injection
+via ``sw_inject_enable`` first, once it is enabled, this jack will not
+change the state by hardware events anymore, we could inject plugin or
+plugout events via ``jackin_inject`` and check the jack state via
+``status``, after we finish our test, we need to disable the jack
+injection via ``sw_inject_enable`` too, once it is disabled, the jack
+state will be restored according to the last reported hardware events
+and will change by future hardware events.
+
+The Layout of Jack Injection Interface
+======================================
+
+If users enable the SND_JACK_INJECTION_DEBUG in the kernel, the audio
+jack injection interface will be created as below:
+::
+
+ $debugfs_mount_dir/sound
+ |-- card0
+ |-- |-- HDMI_DP_pcm_10_Jack
+ |-- |-- |-- jackin_inject
+ |-- |-- |-- kctl_id
+ |-- |-- |-- mask_bits
+ |-- |-- |-- status
+ |-- |-- |-- sw_inject_enable
+ |-- |-- |-- type
+ ...
+ |-- |-- HDMI_DP_pcm_9_Jack
+ |-- |-- jackin_inject
+ |-- |-- kctl_id
+ |-- |-- mask_bits
+ |-- |-- status
+ |-- |-- sw_inject_enable
+ |-- |-- type
+ |-- card1
+ |-- HDMI_DP_pcm_5_Jack
+ |-- |-- jackin_inject
+ |-- |-- kctl_id
+ |-- |-- mask_bits
+ |-- |-- status
+ |-- |-- sw_inject_enable
+ |-- |-- type
+ ...
+ |-- Headphone_Jack
+ |-- |-- jackin_inject
+ |-- |-- kctl_id
+ |-- |-- mask_bits
+ |-- |-- status
+ |-- |-- sw_inject_enable
+ |-- |-- type
+ |-- Headset_Mic_Jack
+ |-- jackin_inject
+ |-- kctl_id
+ |-- mask_bits
+ |-- status
+ |-- sw_inject_enable
+ |-- type
+
+The Explanation Of The Nodes
+======================================
+
+kctl_id
+ read-only, get jack_kctl->kctl's id
+ ::
+
+ sound/card1/Headphone_Jack# cat kctl_id
+ Headphone Jack
+
+mask_bits
+ read-only, get jack_kctl's supported events mask_bits
+ ::
+
+ sound/card1/Headphone_Jack# cat mask_bits
+ 0x0001 HEADPHONE(0x0001)
+
+status
+ read-only, get jack_kctl's current status
+
+- headphone unplugged:
+
+ ::
+
+ sound/card1/Headphone_Jack# cat status
+ Unplugged
+
+- headphone plugged:
+
+ ::
+
+ sound/card1/Headphone_Jack# cat status
+ Plugged
+
+type
+ read-only, get snd_jack's supported events from type (all supported events on the physical audio jack)
+ ::
+
+ sound/card1/Headphone_Jack# cat type
+ 0x7803 HEADPHONE(0x0001) MICROPHONE(0x0002) BTN_3(0x0800) BTN_2(0x1000) BTN_1(0x2000) BTN_0(0x4000)
+
+sw_inject_enable
+ read-write, enable or disable injection
+
+- injection disabled:
+
+ ::
+
+ sound/card1/Headphone_Jack# cat sw_inject_enable
+ Jack: Headphone Jack Inject Enabled: 0
+
+- injection enabled:
+
+ ::
+
+ sound/card1/Headphone_Jack# cat sw_inject_enable
+ Jack: Headphone Jack Inject Enabled: 1
+
+- to enable jack injection:
+
+ ::
+
+ sound/card1/Headphone_Jack# echo 1 > sw_inject_enable
+
+- to disable jack injection:
+
+ ::
+
+ sound/card1/Headphone_Jack# echo 0 > sw_inject_enable
+
+jackin_inject
+ write-only, inject plugin or plugout
+
+- to inject plugin:
+
+ ::
+
+ sound/card1/Headphone_Jack# echo 1 > jackin_inject
+
+- to inject plugout:
+
+ ::
+
+ sound/card1/Headphone_Jack# echo 0 > jackin_inject
diff --git a/Documentation/sphinx/automarkup.py b/Documentation/sphinx/automarkup.py
index 953b24b6e2b4..acf5473002f3 100644
--- a/Documentation/sphinx/automarkup.py
+++ b/Documentation/sphinx/automarkup.py
@@ -51,7 +51,7 @@ RE_typedef = re.compile(r'\b(typedef)\s+([a-zA-Z_]\w+)', flags=ascii_p3)
# Detects a reference to a documentation page of the form Documentation/... with
# an optional extension
#
-RE_doc = re.compile(r'\bDocumentation(/[\w\-_/]+)(\.\w+)*')
+RE_doc = re.compile(r'(\bDocumentation/)?((\.\./)*[\w\-/]+)\.(rst|txt)')
RE_namespace = re.compile(r'^\s*..\s*c:namespace::\s*(\S+)\s*$')
@@ -234,7 +234,10 @@ def markup_doc_ref(docname, app, match):
#
# Go through the dance of getting an xref out of the std domain
#
- target = match.group(1)
+ absolute = match.group(1)
+ target = match.group(2)
+ if absolute:
+ target = "/" + target
xref = None
pxref = addnodes.pending_xref('', refdomain = 'std', reftype = 'doc',
reftarget = target, modname = None,
diff --git a/Documentation/sphinx/cdomain.py b/Documentation/sphinx/cdomain.py
index 014a5229e57a..ca8ac9e59ded 100644
--- a/Documentation/sphinx/cdomain.py
+++ b/Documentation/sphinx/cdomain.py
@@ -236,13 +236,7 @@ class CObject(Base_CObject):
indextext = self.get_index_text(name)
if indextext:
- if major == 1 and minor < 4:
- # indexnode's tuple changed in 1.4
- # https://github.com/sphinx-doc/sphinx/commit/e6a5a3a92e938fcd75866b4227db9e0524d58f7c
- self.indexnode['entries'].append(
- ('single', indextext, targetname, ''))
- else:
- self.indexnode['entries'].append(
+ self.indexnode['entries'].append(
('single', indextext, targetname, '', None))
class CDomain(Base_CDomain):
diff --git a/Documentation/sphinx/kernel_abi.py b/Documentation/sphinx/kernel_abi.py
index f3da859c9878..efe760e410c4 100644
--- a/Documentation/sphinx/kernel_abi.py
+++ b/Documentation/sphinx/kernel_abi.py
@@ -45,17 +45,7 @@ from docutils import nodes, statemachine
from docutils.statemachine import ViewList
from docutils.parsers.rst import directives, Directive
from docutils.utils.error_reporting import ErrorString
-
-#
-# AutodocReporter is only good up to Sphinx 1.7
-#
-import sphinx
-
-Use_SSI = sphinx.__version__[:3] >= '1.7'
-if Use_SSI:
- from sphinx.util.docutils import switch_source_input
-else:
- from sphinx.ext.autodoc import AutodocReporter
+from sphinx.util.docutils import switch_source_input
__version__ = '1.0'
@@ -179,16 +169,5 @@ class KernelCmd(Directive):
return node.children
def do_parse(self, content, node):
- if Use_SSI:
- with switch_source_input(self.state, content):
- self.state.nested_parse(content, 0, node, match_titles=1)
- else:
- buf = self.state.memo.title_styles, self.state.memo.section_level, self.state.memo.reporter
-
- self.state.memo.title_styles = []
- self.state.memo.section_level = 0
- self.state.memo.reporter = AutodocReporter(content, self.state.memo.reporter)
- try:
- self.state.nested_parse(content, 0, node, match_titles=1)
- finally:
- self.state.memo.title_styles, self.state.memo.section_level, self.state.memo.reporter = buf
+ with switch_source_input(self.state, content):
+ self.state.nested_parse(content, 0, node, match_titles=1)
diff --git a/Documentation/sphinx/kernel_feat.py b/Documentation/sphinx/kernel_feat.py
index 2fee04f1dedd..c91ea2b27697 100644
--- a/Documentation/sphinx/kernel_feat.py
+++ b/Documentation/sphinx/kernel_feat.py
@@ -42,17 +42,7 @@ from docutils import nodes, statemachine
from docutils.statemachine import ViewList
from docutils.parsers.rst import directives, Directive
from docutils.utils.error_reporting import ErrorString
-
-#
-# AutodocReporter is only good up to Sphinx 1.7
-#
-import sphinx
-
-Use_SSI = sphinx.__version__[:3] >= '1.7'
-if Use_SSI:
- from sphinx.util.docutils import switch_source_input
-else:
- from sphinx.ext.autodoc import AutodocReporter
+from sphinx.util.docutils import switch_source_input
__version__ = '1.0'
@@ -154,16 +144,7 @@ class KernelFeat(Directive):
buf = self.state.memo.title_styles, self.state.memo.section_level, self.state.memo.reporter
- if Use_SSI:
- with switch_source_input(self.state, content):
- self.state.nested_parse(content, 0, node, match_titles=1)
- else:
- self.state.memo.title_styles = []
- self.state.memo.section_level = 0
- self.state.memo.reporter = AutodocReporter(content, self.state.memo.reporter)
- try:
- self.state.nested_parse(content, 0, node, match_titles=1)
- finally:
- self.state.memo.title_styles, self.state.memo.section_level, self.state.memo.reporter = buf
+ with switch_source_input(self.state, content):
+ self.state.nested_parse(content, 0, node, match_titles=1)
return node.children
diff --git a/Documentation/sphinx/kerneldoc.py b/Documentation/sphinx/kerneldoc.py
index e9857ab904f1..8189c33b9dda 100644
--- a/Documentation/sphinx/kerneldoc.py
+++ b/Documentation/sphinx/kerneldoc.py
@@ -37,18 +37,8 @@ import glob
from docutils import nodes, statemachine
from docutils.statemachine import ViewList
from docutils.parsers.rst import directives, Directive
-
-#
-# AutodocReporter is only good up to Sphinx 1.7
-#
import sphinx
-
-Use_SSI = sphinx.__version__[:3] >= '1.7'
-if Use_SSI:
- from sphinx.util.docutils import switch_source_input
-else:
- from sphinx.ext.autodoc import AutodocReporter
-
+from sphinx.util.docutils import switch_source_input
import kernellog
__version__ = '1.0'
@@ -163,18 +153,8 @@ class KernelDocDirective(Directive):
return [nodes.error(None, nodes.paragraph(text = "kernel-doc missing"))]
def do_parse(self, result, node):
- if Use_SSI:
- with switch_source_input(self.state, result):
- self.state.nested_parse(result, 0, node, match_titles=1)
- else:
- save = self.state.memo.title_styles, self.state.memo.section_level, self.state.memo.reporter
- self.state.memo.reporter = AutodocReporter(result, self.state.memo.reporter)
- self.state.memo.title_styles, self.state.memo.section_level = [], 0
- try:
- self.state.nested_parse(result, 0, node, match_titles=1)
- finally:
- self.state.memo.title_styles, self.state.memo.section_level, self.state.memo.reporter = save
-
+ with switch_source_input(self.state, result):
+ self.state.nested_parse(result, 0, node, match_titles=1)
def setup(app):
app.add_config_value('kerneldoc_bin', None, 'env')
diff --git a/Documentation/sphinx/kernellog.py b/Documentation/sphinx/kernellog.py
index 8ac7d274f542..0bc00c138cad 100644
--- a/Documentation/sphinx/kernellog.py
+++ b/Documentation/sphinx/kernellog.py
@@ -4,29 +4,19 @@
# only goes back to 1.6. So here's a wrapper layer to keep around for
# as long as we support 1.4.
#
+# We don't support 1.4 anymore, but we'll keep the wrappers around until
+# we change all the code to not use them anymore :)
+#
import sphinx
+from sphinx.util import logging
-if sphinx.__version__[:3] >= '1.6':
- UseLogging = True
- from sphinx.util import logging
- logger = logging.getLogger('kerneldoc')
-else:
- UseLogging = False
+logger = logging.getLogger('kerneldoc')
def warn(app, message):
- if UseLogging:
- logger.warning(message)
- else:
- app.warn(message)
+ logger.warning(message)
def verbose(app, message):
- if UseLogging:
- logger.verbose(message)
- else:
- app.verbose(message)
+ logger.verbose(message)
def info(app, message):
- if UseLogging:
- logger.info(message)
- else:
- app.info(message)
+ logger.info(message)
diff --git a/Documentation/sphinx/kfigure.py b/Documentation/sphinx/kfigure.py
index 788704886eec..3c78828330be 100644
--- a/Documentation/sphinx/kfigure.py
+++ b/Documentation/sphinx/kfigure.py
@@ -49,26 +49,14 @@ import os
from os import path
import subprocess
from hashlib import sha1
-import sys
-
from docutils import nodes
from docutils.statemachine import ViewList
from docutils.parsers.rst import directives
from docutils.parsers.rst.directives import images
import sphinx
-
from sphinx.util.nodes import clean_astext
-from six import iteritems
-
import kernellog
-PY3 = sys.version_info[0] == 3
-
-if PY3:
- _unicode = str
-else:
- _unicode = unicode
-
# Get Sphinx version
major, minor, patch = sphinx.version_info[:3]
if major == 1 and minor > 3:
@@ -540,7 +528,7 @@ def add_kernel_figure_to_std_domain(app, doctree):
docname = app.env.docname
labels = std.data["labels"]
- for name, explicit in iteritems(doctree.nametypes):
+ for name, explicit in doctree.nametypes.items():
if not explicit:
continue
labelid = doctree.nameids[name]
diff --git a/Documentation/sphinx/maintainers_include.py b/Documentation/sphinx/maintainers_include.py
index dc8fed48d3c2..328b3631a585 100755
--- a/Documentation/sphinx/maintainers_include.py
+++ b/Documentation/sphinx/maintainers_include.py
@@ -61,8 +61,6 @@ class MaintainersInclude(Include):
field_content = ""
for line in open(path):
- if sys.version_info.major == 2:
- line = unicode(line, 'utf-8')
# Have we reached the end of the preformatted Descriptions text?
if descriptions and line.startswith('Maintainers'):
descriptions = False
diff --git a/Documentation/sphinx/requirements.txt b/Documentation/sphinx/requirements.txt
index 5030d346d23b..489f6626de67 100644
--- a/Documentation/sphinx/requirements.txt
+++ b/Documentation/sphinx/requirements.txt
@@ -1,4 +1,3 @@
docutils
Sphinx==2.4.4
sphinx_rtd_theme
-six
diff --git a/Documentation/sphinx/rstFlatTable.py b/Documentation/sphinx/rstFlatTable.py
index 2019a55f6b18..a3eea0bbe6ba 100755
--- a/Documentation/sphinx/rstFlatTable.py
+++ b/Documentation/sphinx/rstFlatTable.py
@@ -42,8 +42,6 @@ u"""
# imports
# ==============================================================================
-import sys
-
from docutils import nodes
from docutils.parsers.rst import directives, roles
from docutils.parsers.rst.directives.tables import Table
@@ -55,14 +53,6 @@ from docutils.utils import SystemMessagePropagation
__version__ = '1.0'
-PY3 = sys.version_info[0] == 3
-PY2 = sys.version_info[0] == 2
-
-if PY3:
- # pylint: disable=C0103, W0622
- unicode = str
- basestring = str
-
# ==============================================================================
def setup(app):
# ==============================================================================
diff --git a/Documentation/timers/timers-howto.rst b/Documentation/timers/timers-howto.rst
index afb0a43b8cdf..5c169e3d29a8 100644
--- a/Documentation/timers/timers-howto.rst
+++ b/Documentation/timers/timers-howto.rst
@@ -75,7 +75,7 @@ NON-ATOMIC CONTEXT:
- Why not msleep for (1ms - 20ms)?
Explained originally here:
- http://lkml.org/lkml/2007/8/3/250
+ https://lore.kernel.org/r/15327.1186166232@lwn.net
msleep(1~20) may not do what the caller intends, and
will often sleep longer (~20 ms actual sleep for any
diff --git a/Documentation/trace/coresight/coresight.rst b/Documentation/trace/coresight/coresight.rst
index 0b73acb44efa..169749efd8d1 100644
--- a/Documentation/trace/coresight/coresight.rst
+++ b/Documentation/trace/coresight/coresight.rst
@@ -512,6 +512,38 @@ The --itrace option controls the type and frequency of synthesized events
Note that only 64-bit programs are currently supported - further work is
required to support instruction decode of 32-bit Arm programs.
+2.2) Tracing PID
+
+The kernel can be built to write the PID value into the PE ContextID registers.
+For a kernel running at EL1, the PID is stored in CONTEXTIDR_EL1. A PE may
+implement Arm Virtualization Host Extensions (VHE), which the kernel can
+run at EL2 as a virtualisation host; in this case, the PID value is stored in
+CONTEXTIDR_EL2.
+
+perf provides PMU formats that program the ETM to insert these values into the
+trace data; the PMU formats are defined as below:
+
+ "contextid1": Available on both EL1 kernel and EL2 kernel. When the
+ kernel is running at EL1, "contextid1" enables the PID
+ tracing; when the kernel is running at EL2, this enables
+ tracing the PID of guest applications.
+
+ "contextid2": Only usable when the kernel is running at EL2. When
+ selected, enables PID tracing on EL2 kernel.
+
+ "contextid": Will be an alias for the option that enables PID
+ tracing. I.e,
+ contextid == contextid1, on EL1 kernel.
+ contextid == contextid2, on EL2 kernel.
+
+perf will always enable PID tracing at the relevant EL, this is accomplished by
+automatically enable the "contextid" config - but for EL2 it is possible to make
+specific adjustments using configs "contextid1" and "contextid2", E.g. if a user
+wants to trace PIDs for both host and guest, the two configs "contextid1" and
+"contextid2" can be set at the same time:
+
+ perf record -e cs_etm/contextid1,contextid2/u -- vm
+
Generating coverage files for Feedback Directed Optimization: AutoFDO
---------------------------------------------------------------------
diff --git a/Documentation/trace/ftrace.rst b/Documentation/trace/ftrace.rst
index 87cf5c010d5d..62c98e9bbdd9 100644
--- a/Documentation/trace/ftrace.rst
+++ b/Documentation/trace/ftrace.rst
@@ -1159,6 +1159,12 @@ Here are the available options:
This simulates the original behavior of the trace file.
When the file is closed, tracing will be enabled again.
+ hash-ptr
+ When set, "%p" in the event printk format displays the
+ hashed pointer value instead of real address.
+ This will be useful if you want to find out which hashed
+ value is corresponding to the real value in trace log.
+
record-cmd
When any event or tracer is enabled, a hook is enabled
in the sched_switch trace point to fill comm cache
diff --git a/Documentation/translations/it_IT/process/4.Coding.rst b/Documentation/translations/it_IT/process/4.Coding.rst
index a5e36aa60448..8012fe9497ae 100644
--- a/Documentation/translations/it_IT/process/4.Coding.rst
+++ b/Documentation/translations/it_IT/process/4.Coding.rst
@@ -256,7 +256,7 @@ e cercate di evitare le "riparazioni" che fan sparire l'avvertimento senza
però averne trovato la causa.
Tenete a mente che non tutti gli avvertimenti sono disabilitati di default.
-Costruite il kernel con "make EXTRA_CFLAGS=-W" per ottenerli tutti.
+Costruite il kernel con "make KCFLAGS=-W" per ottenerli tutti.
Il kernel fornisce differenti opzioni che abilitano funzionalità di debugging;
molti di queste sono trovano all'interno del sotto menu "kernel hacking".
diff --git a/Documentation/translations/it_IT/process/adding-syscalls.rst b/Documentation/translations/it_IT/process/adding-syscalls.rst
index bff0a82bf127..c478b6e8c292 100644
--- a/Documentation/translations/it_IT/process/adding-syscalls.rst
+++ b/Documentation/translations/it_IT/process/adding-syscalls.rst
@@ -611,21 +611,21 @@ Riferimenti e fonti
https://lwn.net/Articles/486306/
- Raccomandazioni da Andrew Morton circa il fatto che tutte le informazioni
su una nuova chiamata di sistema dovrebbero essere contenute nello stesso
- filone di discussione di email: https://lkml.org/lkml/2014/7/24/641
+ filone di discussione di email: https://lore.kernel.org/r/20140724144747.3041b208832bbdf9fbce5d96@linux-foundation.org
- Raccomandazioni da Michael Kerrisk circa il fatto che le nuove chiamate di
- sistema dovrebbero avere una pagina man: https://lkml.org/lkml/2014/6/13/309
+ sistema dovrebbero avere una pagina man: https://lore.kernel.org/r/CAKgNAkgMA39AfoSoA5Pe1r9N+ZzfYQNvNPvcRN7tOvRb8+v06Q@mail.gmail.com
- Consigli da Thomas Gleixner sul fatto che il collegamento all'architettura
x86 dovrebbe avvenire in un *commit* differente:
- https://lkml.org/lkml/2014/11/19/254
+ https://lore.kernel.org/r/alpine.DEB.2.11.1411191249560.3909@nanos
- Consigli da Greg Kroah-Hartman circa la bontà d'avere una pagina man e un
programma di auto-verifica per le nuove chiamate di sistema:
- https://lkml.org/lkml/2014/3/19/710
+ https://lore.kernel.org/r/20140320025530.GA25469@kroah.com
- Discussione di Michael Kerrisk sulle nuove chiamate di sistema contro
- le estensioni :manpage:`prctl(2)`: https://lkml.org/lkml/2014/6/3/411
+ le estensioni :manpage:`prctl(2)`: https://lore.kernel.org/r/CAHO5Pa3F2MjfTtfNxa8LbnkeeU8=YJ+9tDqxZpw7Gz59E-4AUg@mail.gmail.com
- Consigli da Ingo Molnar che le chiamate di sistema con più argomenti
dovrebbero incapsularli in una struttura che includa un argomento
*size* per garantire l'estensibilità futura:
- https://lkml.org/lkml/2015/7/30/117
+ https://lore.kernel.org/r/20150730083831.GA22182@gmail.com
- Un certo numero di casi strani emersi dall'uso (riuso) dei flag O_*:
- commit 75069f2b5bfb ("vfs: renumber FMODE_NONOTIFY and add to uniqueness
@@ -635,9 +635,9 @@ Riferimenti e fonti
- commit bb458c644a59 ("Safer ABI for O_TMPFILE")
- Discussion from Matthew Wilcox about restrictions on 64-bit arguments:
- https://lkml.org/lkml/2008/12/12/187
+ https://lore.kernel.org/r/20081212152929.GM26095@parisc-linux.org
- Raccomandazioni da Greg Kroah-Hartman sul fatto che i flag sconosciuti dovrebbero
- essere controllati: https://lkml.org/lkml/2014/7/17/577
+ essere controllati: https://lore.kernel.org/r/20140717193330.GB4703@kroah.com
- Raccomandazioni da Linus Torvalds che le chiamate di sistema x32 dovrebbero
favorire la compatibilità con le versioni a 64-bit piuttosto che quelle a 32-bit:
- https://lkml.org/lkml/2011/8/31/244
+ https://lore.kernel.org/r/CA+55aFxfmwfB7jbbrXxa=K7VBYPfAvmu3XOkGrLbB1UFjX1+Ew@mail.gmail.com
diff --git a/Documentation/translations/it_IT/process/magic-number.rst b/Documentation/translations/it_IT/process/magic-number.rst
index 0243d32a0b59..1af30f4228f2 100644
--- a/Documentation/translations/it_IT/process/magic-number.rst
+++ b/Documentation/translations/it_IT/process/magic-number.rst
@@ -141,7 +141,6 @@ FW_HEADER_MAGIC 0x65726F66 fw_header ``drivers/atm/fo
SLOT_MAGIC 0x67267321 slot ``drivers/hotplug/cpqphp.h``
SLOT_MAGIC 0x67267322 slot ``drivers/hotplug/acpiphp.h``
LO_MAGIC 0x68797548 nbd_device ``include/linux/nbd.h``
-OPROFILE_MAGIC 0x6f70726f super_block ``drivers/oprofile/oprofilefs.h``
M3_STATE_MAGIC 0x734d724d m3_state ``sound/oss/maestro3.c``
VMALLOC_MAGIC 0x87654320 snd_alloc_track ``sound/core/memory.c``
KMALLOC_MAGIC 0x87654321 snd_alloc_track ``sound/core/memory.c``
diff --git a/Documentation/translations/it_IT/process/submit-checklist.rst b/Documentation/translations/it_IT/process/submit-checklist.rst
index 3e575502690f..614fc17d9086 100644
--- a/Documentation/translations/it_IT/process/submit-checklist.rst
+++ b/Documentation/translations/it_IT/process/submit-checklist.rst
@@ -104,7 +104,7 @@ sottomissione delle patch, in particolare
l'iniezione di fallimenti specifici per il sottosistema.
22) Il nuovo codice è stato compilato con ``gcc -W`` (usate
- ``make EXTRA_CFLAGS=-W``). Questo genererà molti avvisi, ma è ottimo
+ ``make KCFLAGS=-W``). Questo genererà molti avvisi, ma è ottimo
per scovare bachi come "warning: comparison between signed and unsigned".
23) La patch è stata verificata dopo essere stata inclusa nella serie di patch
diff --git a/Documentation/translations/it_IT/process/submitting-patches.rst b/Documentation/translations/it_IT/process/submitting-patches.rst
index 966cd3242a60..ae00352346ed 100644
--- a/Documentation/translations/it_IT/process/submitting-patches.rst
+++ b/Documentation/translations/it_IT/process/submitting-patches.rst
@@ -731,13 +731,13 @@ Greg Kroah-Hartman, "Come scocciare un manutentore di un sottosistema"
<http://www.kroah.com/log/linux/maintainer-06.html>
No!!!! Basta gigantesche bombe patch alle persone sulla lista linux-kernel@vger.kernel.org!
- <https://lkml.org/lkml/2005/7/11/336>
+ <https://lore.kernel.org/r/20050711.125305.08322243.davem@davemloft.net>
Kernel Documentation/translations/it_IT/process/coding-style.rst:
:ref:`Documentation/translations/it_IT/process/coding-style.rst <it_codingstyle>`
E-mail di Linus Torvalds sul formato canonico di una patch:
- <http://lkml.org/lkml/2005/4/7/183>
+ <https://lore.kernel.org/r/Pine.LNX.4.58.0504071023190.28951@ppc970.osdl.org>
Andi Kleen, "Su come sottomettere patch del kernel"
Alcune strategie su come sottomettere modifiche toste o controverse.
diff --git a/Documentation/translations/ja_JP/SubmittingPatches b/Documentation/translations/ja_JP/SubmittingPatches
index dd0c3280ba5a..6854f5add72e 100644
--- a/Documentation/translations/ja_JP/SubmittingPatches
+++ b/Documentation/translations/ja_JP/SubmittingPatches
@@ -702,13 +702,13 @@ Greg Kroah-Hartman, "How to piss off a kernel subsystem maintainer".
<http://www.kroah.com/log/2006/01/11/>
NO!!!! No more huge patch bombs to linux-kernel@vger.kernel.org people!
- <https://lkml.org/lkml/2005/7/11/336>
+ <https://lore.kernel.org/r/20050711.125305.08322243.davem@davemloft.net>
Kernel Documentation/process/coding-style.rst:
<http://users.sosdg.org/~qiyong/lxr/source/Documentation/process/coding-style.rst>
Linus Torvalds's mail on the canonical patch format:
- <http://lkml.org/lkml/2005/4/7/183>
+ <https://lore.kernel.org/r/Pine.LNX.4.58.0504071023190.28951@ppc970.osdl.org>
Andi Kleen, "On submitting kernel patches"
Some strategies to get difficult or controversial changes in.
diff --git a/Documentation/translations/ko_KR/howto.rst b/Documentation/translations/ko_KR/howto.rst
index 240d29be38f2..787f1e85f8a0 100644
--- a/Documentation/translations/ko_KR/howto.rst
+++ b/Documentation/translations/ko_KR/howto.rst
@@ -345,7 +345,7 @@ https://bugzilla.kernel.org 는 리눅스 ì»¤ë„ ê°œë°œìžë“¤ì´ 커ë„ì˜ ë²„ê·
https://bugzilla.kernel.org/page.cgi?id=faq.html
-ë©”ì¸ ì»¤ë„ ì†ŒìŠ¤ ë””ë ‰í† ë¦¬ì— ìžˆëŠ” :ref:`admin-guide/reporting-bugs.rst <reportingbugs>`
+ë©”ì¸ ì»¤ë„ ì†ŒìŠ¤ ë””ë ‰í† ë¦¬ì— ìžˆëŠ” 'Documentation/admin-guide/reporting-issues.rst'
파ì¼ì€ ì»¤ë„ ë²„ê·¸ë¼ê³  ìƒê°ë˜ëŠ” ê²ƒì„ ë³´ê³ í•˜ëŠ” ë°©ë²•ì— ê´€í•œ ì¢‹ì€ í…œí”Œë¦¿ì´ë©° 문제를
ì¶”ì í•˜ê¸° 위해서 ì»¤ë„ ê°œë°œìžë“¤ì´ 필요로 하는 ì •ë³´ê°€ 무엇들ì¸ì§€ë¥¼ ìƒì„¸ížˆ 설명하고
있다.
@@ -583,7 +583,7 @@ Patì´ë¼ëŠ” ì´ë¦„ì„ ê°€ì§„ ì—¬ìžê°€ ìžˆì„ ìˆ˜ë„ ìžˆëŠ” 것ì´ë‹¤. 리눅ìŠ
"The Perfect Patch"
- http://www.ozlabs.org/~akpm/stuff/tpp.txt
+ https://www.ozlabs.org/~akpm/stuff/tpp.txt
ì´ ëª¨ë“  ê²ƒì„ í•˜ëŠ” ê²ƒì€ ë§¤ìš° 어려운 ì¼ì´ë‹¤. 완벽히 소화하는 ë°ëŠ” ì ì–´ë„ 몇년ì´
diff --git a/Documentation/translations/ko_KR/index.rst b/Documentation/translations/ko_KR/index.rst
index 27995c4233de..b9e27d20b039 100644
--- a/Documentation/translations/ko_KR/index.rst
+++ b/Documentation/translations/ko_KR/index.rst
@@ -10,3 +10,18 @@
:maxdepth: 1
howto
+
+
+리눅스 ì»¤ë„ ë©”ëª¨ë¦¬ 배리어
+-------------------------
+
+.. raw:: latex
+
+ \footnotesize
+
+.. include:: ./memory-barriers.txt
+ :literal:
+
+.. raw:: latex
+
+ \normalsize
diff --git a/Documentation/translations/zh_CN/admin-guide/cpu-load.rst b/Documentation/translations/zh_CN/admin-guide/cpu-load.rst
index c972731c0e57..a73400a054ff 100644
--- a/Documentation/translations/zh_CN/admin-guide/cpu-load.rst
+++ b/Documentation/translations/zh_CN/admin-guide/cpu-load.rst
@@ -95,7 +95,7 @@ Linux通过``/proc/stat``å’Œ``/proc/uptime``导出å„ç§ä¿¡æ¯ï¼Œç”¨æˆ·ç©ºé—´å·¥
å‚考
---
-- http://lkml.org/lkml/2007/2/12/6
+- https://lore.kernel.org/r/loom.20070212T063225-663@post.gmane.org
- Documentation/filesystems/proc.rst (1.8)
diff --git a/Documentation/translations/zh_CN/arm/Booting b/Documentation/translations/zh_CN/arm/Booting
index c3d26ce5f6de..5ecea0767893 100644
--- a/Documentation/translations/zh_CN/arm/Booting
+++ b/Documentation/translations/zh_CN/arm/Booting
@@ -124,7 +124,7 @@ bootloader 必须传递一个系统内存的ä½ç½®å’Œæœ€å°å€¼ï¼Œä»¥åŠæ ¹æ–‡ä»¶
bootloader 必须以 64bit 地å€å¯¹é½çš„å½¢å¼åŠ è½½ä¸€ä¸ªè®¾å¤‡æ ‘æ˜ åƒ(dtb)到系统
RAM 中,并用å¯åŠ¨æ•°æ®åˆå§‹åŒ–它。dtb æ ¼å¼åœ¨æ–‡æ¡£
-Documentation/devicetree/booting-without-of.rst 中。内核将会在
+https://www.devicetree.org/specifications/ 中。内核将会在
dtb 物ç†åœ°å€å¤„查找 dtb 魔数值(0xd00dfeed),以确定 dtb 是å¦å·²ç»ä»£æ›¿
标签列表被传递进æ¥ã€‚
diff --git a/Documentation/translations/zh_CN/iio/ep93xx_adc.rst b/Documentation/translations/zh_CN/iio/ep93xx_adc.rst
new file mode 100644
index 000000000000..7e91d2197867
--- /dev/null
+++ b/Documentation/translations/zh_CN/iio/ep93xx_adc.rst
@@ -0,0 +1,46 @@
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: :doc:`../../../iio/ep93xx_adc`
+:Translator: Yanteng Si <siyanteng@loongson.cn>
+
+.. _cn_iio_ep93xx_adc:
+
+
+==================================
+æ€ç¿é€»è¾‘ EP93xx 模拟数字转æ¢å™¨é©±åЍ
+==================================
+
+1. 概述
+=======
+
+è¯¥é©±åŠ¨åŒæ—¶é€‚用于具有5é€šé“æ¨¡æ‹Ÿæ•°å­—转æ¢å™¨çš„低端 (EP9301, Ep9302) 设备和10通é“
+触摸å±/模拟数字转æ¢å™¨çš„高端设备(EP9307, EP9312, EP9315)。
+
+2. 通é“ç¼–å·
+===========
+
+EP9301å’ŒEP9302æ•°æ®è¡¨å®šä¹‰äº†é€šé“0..4çš„ç¼–å·æ–¹æ¡ˆã€‚虽然EP9307, EP9312å’ŒEP9315多
+了3个通é“(一共8个),但是编å·å¹¶æ²¡æœ‰å®šä¹‰ã€‚所以说最åŽä¸‰ä¸ªé€šé“æ˜¯éšæœºç¼–å·çš„。
+
+如果ep93xx_adc是IIO设备0,您将在以下ä½ç½®æ‰¾åˆ°æ¡ç›®
+/sys/bus/iio/devices/iio:device0/:
+
+ +-----------------+---------------+
+ | sysfs å…¥å£ | ball/pin åç§° |
+ +=================+===============+
+ | in_voltage0_raw | YM |
+ +-----------------+---------------+
+ | in_voltage1_raw | SXP |
+ +-----------------+---------------+
+ | in_voltage2_raw | SXM |
+ +-----------------+---------------+
+ | in_voltage3_raw | SYP |
+ +-----------------+---------------+
+ | in_voltage4_raw | SYM |
+ +-----------------+---------------+
+ | in_voltage5_raw | XP |
+ +-----------------+---------------+
+ | in_voltage6_raw | XM |
+ +-----------------+---------------+
+ | in_voltage7_raw | YP |
+ +-----------------+---------------+
diff --git a/Documentation/translations/zh_CN/iio/iio_configfs.rst b/Documentation/translations/zh_CN/iio/iio_configfs.rst
new file mode 100644
index 000000000000..274488e8dce4
--- /dev/null
+++ b/Documentation/translations/zh_CN/iio/iio_configfs.rst
@@ -0,0 +1,102 @@
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: :doc:`../../../iio/iio_configfs`
+:Translator: Yanteng Si <siyanteng@loongson.cn>
+
+.. _cn_iio_configfs:
+
+
+=====================
+工业 IIO configfs支æŒ
+=====================
+
+1. 概述
+=======
+
+Configfs是一ç§å†…核对象的基于文件系统的管ç†ç³»ç»Ÿï¼ŒIIO使用一些å¯ä»¥é€šè¿‡
+configfsè½»æ¾é…置的对象(例如:设备,触å‘器)。
+
+关于configfs是如何è¿è¡Œçš„,请查阅Documentation/filesystems/configfs.rst
+了解更多信æ¯ã€‚
+
+2. 用法
+=======
+为了使configfs支æŒIIO,我们需è¦åœ¨ç¼–译时选中configçš„CONFIG_IIO_CONFIGFS
+选项。
+
+ç„¶åŽï¼ŒæŒ‚è½½configfs文件系统(通常在 /config directory目录下)::
+
+ $ mkdir/config
+ $ mount -t configfs none/config
+
+此时,将创建所有默认IIO组,并å¯ä»¥åœ¨/ config / iio下对其进行访问。 下一章
+将介ç»å¯ç”¨çš„IIOé…置对象。
+
+3. 软件触å‘器
+=============
+
+IIO默认configfs组之一是“触å‘器â€ç»„。 挂载configfsåŽå¯ä»¥è‡ªåŠ¨è®¿é—®å®ƒï¼Œå¹¶ä¸”å¯
+以在/config/iio/triggers下找到。
+
+IIO软件触å‘器为创建多ç§è§¦å‘器类型æä¾›äº†æ”¯æŒã€‚ 通常在include/linux/iio
+/sw_trigger.h:中的接å£ä¸‹å°†æ–°çš„触å‘器类型实现为å•独的内核模å—:
+::
+
+ /*
+ * drivers/iio/trigger/iio-trig-sample.c
+ * ä¸€ç§æ–°è§¦å‘器类型的内核模å—实例
+ */
+ #include <linux/iio/sw_trigger.h>
+
+
+ static struct iio_sw_trigger *iio_trig_sample_probe(const char *name)
+ {
+ /*
+ * 这将分é…并注册一个IIO触å‘器以åŠå…¶ä»–触å‘器类型特性的åˆå§‹åŒ–。
+ */
+ }
+
+ static int iio_trig_sample_remove(struct iio_sw_trigger *swt)
+ {
+ /*
+ * 这会废弃iio_trig_sample_probe中的æ“作
+ */
+ }
+
+ static const struct iio_sw_trigger_ops iio_trig_sample_ops = {
+ .probe = iio_trig_sample_probe,
+ .remove = iio_trig_sample_remove,
+ };
+
+ static struct iio_sw_trigger_type iio_trig_sample = {
+ .name = "trig-sample",
+ .owner = THIS_MODULE,
+ .ops = &iio_trig_sample_ops,
+ };
+
+module_iio_sw_trigger_driver(iio_trig_sample);
+
+æ¯ç§è§¦å‘器类型在/config/iio/triggers下都有其自己的目录。 加载iio-trig-sample
+模å—将创建“ trig-sampleâ€è§¦å‘器类型目录/config/iio/triggers/trig-sample.
+
+我们支æŒä»¥ä¸‹ä¸­æ–­æºï¼ˆè§¦å‘器类型)
+
+ * hrtimer,使用高分辨率定时器作为中断æº
+
+3.1 Hrtimer触å‘器创建与销æ¯
+---------------------------
+
+加载iio-trig-hrtimer模å—将注册hrtimer触å‘器类型,从而å…许用户在
+/config/iio/triggers/hrtimer下创建hrtimer触å‘器。
+
+例如::
+
+ $ mkdir /config/iio/triggers/hrtimer/instance1
+ $ rmdir /config/iio/triggers/hrtimer/instance1
+
+æ¯ä¸ªè§¦å‘器å¯ä»¥å…·æœ‰ä¸€ä¸ªæˆ–多个独特的触å‘器类型的属性。
+
+3.2 "hrtimer" 触å‘器类型属性
+----------------------------
+
+"hrtimerâ€è§¦å‘器类型没有æ¥è‡ª/config dir的任何å¯é…置属性。
diff --git a/Documentation/translations/zh_CN/iio/index.rst b/Documentation/translations/zh_CN/iio/index.rst
new file mode 100644
index 000000000000..7087076a10f6
--- /dev/null
+++ b/Documentation/translations/zh_CN/iio/index.rst
@@ -0,0 +1,20 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: :doc:`../../../iio/index`
+:Translator: Yanteng Si <siyanteng@loongson.cn>
+
+.. _cn_iio_index:
+
+
+========
+工业 I/O
+========
+
+.. toctree::
+ :maxdepth: 1
+
+ iio_configfs
+
+ ep93xx_adc
diff --git a/Documentation/translations/zh_CN/mips/booting.rst b/Documentation/translations/zh_CN/mips/booting.rst
new file mode 100644
index 000000000000..96453e1b962e
--- /dev/null
+++ b/Documentation/translations/zh_CN/mips/booting.rst
@@ -0,0 +1,31 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: :doc:`../../../mips/booting`
+:Translator: Yanteng Si <siyanteng@loongson.cn>
+
+.. _cn_booting:
+
+BMIPS设备树引导
+------------------------
+
+ 一些bootloadersåªæ”¯æŒåœ¨å†…核镜åƒå¼€å§‹åœ°å€å¤„çš„å•一入å£ç‚¹ã€‚而其它
+ bootloaders将跳转到ELF的开始地å€å¤„ã€‚ä¸¤ç§æ–¹æ¡ˆéƒ½æ”¯æŒçš„;因为
+ CONFIG_BOOT_RAW=y and CONFIG_NO_EXCEPT_FILL=y, æ‰€ä»¥ç¬¬ä¸€æ¡æŒ‡ä»¤
+ 会立å³è·³è½¬åˆ°kernel_entry()å…¥å£å¤„执行。
+
+ 与arch/arm情况(b)类似,dt感知的引导加载程åºéœ€è¦è®¾ç½®ä»¥ä¸‹å¯„存器:
+
+ a0 : 0
+
+ a1 : 0xffffffff
+
+ a2 : RAM中指å‘设备树å—çš„ç‰©ç†æŒ‡é’ˆ(在chapterII中定义)。
+ 设备树å¯ä»¥ä½äºŽå‰512MB物ç†åœ°å€ç©ºé—´(0x00000000 -
+ 0x1fffffff)的任何ä½ç½®ï¼Œä»¥64ä½è¾¹ç•Œå¯¹é½ã€‚
+
+ 传统bootloadersä¸ä¼šä½¿ç”¨è¿™æ ·çš„约定,并且它们ä¸ä¼ å…¥DTå—。
+ åœ¨è¿™ç§æƒ…况下,Linux将通过选中CONFIG_DT_*查找DTB。
+
+ 以上约定åªåœ¨32ä½ç³»ç»Ÿä¸­å®šä¹‰ï¼Œå› ä¸ºç›®å‰æ²¡æœ‰ä»»ä½•64ä½çš„BMIPS实现。
diff --git a/Documentation/translations/zh_CN/mips/features.rst b/Documentation/translations/zh_CN/mips/features.rst
new file mode 100644
index 000000000000..93d93d06b1b3
--- /dev/null
+++ b/Documentation/translations/zh_CN/mips/features.rst
@@ -0,0 +1,10 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: :doc:`../../../mips/features`
+:Translator: Yanteng Si <siyanteng@loongson.cn>
+
+.. _cn_features:
+
+.. kernel-feat:: $srctree/Documentation/features mips
diff --git a/Documentation/translations/zh_CN/mips/index.rst b/Documentation/translations/zh_CN/mips/index.rst
new file mode 100644
index 000000000000..b85033f9d67c
--- /dev/null
+++ b/Documentation/translations/zh_CN/mips/index.rst
@@ -0,0 +1,26 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: :doc:`../../../mips/index`
+:Translator: Yanteng Si <siyanteng@loongson.cn>
+
+===========================
+MIPS特性文档
+===========================
+
+.. toctree::
+ :maxdepth: 2
+ :numbered:
+
+ booting
+ ingenic-tcu
+
+ features
+
+.. only:: subproject and html
+
+ Indices
+ =======
+
+ * :ref:`genindex`
diff --git a/Documentation/translations/zh_CN/mips/ingenic-tcu.rst b/Documentation/translations/zh_CN/mips/ingenic-tcu.rst
new file mode 100644
index 000000000000..f04ba407384a
--- /dev/null
+++ b/Documentation/translations/zh_CN/mips/ingenic-tcu.rst
@@ -0,0 +1,69 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: :doc:`../../../mips/ingenic-tcu`
+:Translator: Yanteng Si <siyanteng@loongson.cn>
+
+.. _cn_ingenic-tcu:
+
+===============================================
+囿­£ JZ47xx SoC定时器/计数器硬件å•å…ƒ
+===============================================
+
+囿­£ JZ47xx SoC中的定时器/计数器å•å…ƒ(TCU)是一个多功能硬件å—。它有多达
+8个通é“,å¯ä»¥ç”¨ä½œè®¡æ•°å™¨ï¼Œè®¡æ—¶å™¨ï¼Œæˆ–脉冲宽度调制器。
+
+- JZ4725B, JZ4750, JZ4755 åªæœ‰ï¼–个TCU通é“。其它SoC都有8个通é“。
+
+- JZ4725B引入了一个独立的通é“,称为æ“作系统计时器(OST)。这是一个32ä½å¯
+ 编程定时器。在JZ4760BåŠä»¥ä¸Šåž‹å·ä¸Šï¼Œå®ƒæ˜¯64ä½çš„。
+
+- æ¯ä¸ªTCU通é“都有自己的时钟æºï¼Œå¯ä»¥é€šè¿‡ TCSR 寄存器设置通é“的父级时钟
+ æºï¼ˆpclkã€extã€rtc)ã€å¼€å…³ä»¥åŠåˆ†é¢‘。
+
+ - 看门狗和OST硬件模å—在它们的寄存器空间中也有相åŒå½¢å¼çš„TCSR寄存器。
+ - 用于关闭/å¼€å¯çš„ TCU 寄存器也å¯ä»¥å…³é—­/å¼€å¯çœ‹é—¨ç‹—å’Œ OST 时钟。
+
+- æ¯ä¸ªTCU通é“åœ¨ä¸¤ç§æ¨¡å¼çš„å…¶ä¸­ä¸€ç§æ¨¡å¼ä¸‹è¿è¡Œï¼š
+
+ - æ¨¡å¼ TCU1ï¼šé€šé“æ— æ³•在ç¡çœ æ¨¡å¼ä¸‹è¿è¡Œï¼Œä½†æ›´æ˜“于æ“作。
+ - æ¨¡å¼ TCU2:通é“å¯ä»¥åœ¨ç¡çœ æ¨¡å¼ä¸‹è¿è¡Œï¼Œä½†æ“作比 TCU1 通é“夿‚一些。
+
+- æ¯ä¸ª TCU 通é“的模å¼å–决于使用的SoC:
+
+ - 在最è€çš„SoC(高于JZ4740),八个通é“都è¿è¡Œåœ¨TCU1模å¼ã€‚
+ - 在 JZ4725B,通é“5è¿è¡Œåœ¨TCU2,其它通é“则è¿è¡Œåœ¨TCU1。
+ - 在最新的SoC(JZ4750åŠä¹‹åŽï¼‰ï¼Œé€šé“1-2è¿è¡Œåœ¨TCU2,其它通é“则è¿è¡Œ
+ 在TCU1。
+
+- æ¯ä¸ªé€šé“都å¯ä»¥ç”Ÿæˆä¸­æ–­ã€‚有些通é“共享一æ¡ä¸­æ–­çº¿ï¼Œè€Œæœ‰äº›æ²¡æœ‰ï¼Œå…¶åœ¨SoCåž‹
+ å·ä¹‹é—´çš„å˜æ›´ï¼š
+
+ - 在很è€çš„SoC(JZ4740åŠæ›´ä½Žï¼‰ï¼Œé€šé“0和通é“1有它们自己的中断线;通
+ é“2-7共享最åŽä¸€æ¡ä¸­æ–­çº¿ã€‚
+ - 在 JZ4725B,通é“0有它自己的中断线;通é“1-5共享一æ¡ä¸­æ–­çº¿ï¼›OST
+ 使用最åŽä¸€æ¡ä¸­æ–­çº¿ã€‚
+ - 在比较新的SoC(JZ4750åŠä»¥åŽï¼‰ï¼Œé€šé“5有它自己的中断线;通
+ é“0-4和(如果是8通é“)6-7全部共享一æ¡ä¸­æ–­çº¿ï¼›OST使用最åŽä¸€æ¡ä¸­
+ 断线。
+
+实现
+====
+
+TCU硬件的功能分布在多个驱动程åºï¼š
+
+============== ===================================
+æ—¶é’Ÿ drivers/clk/ingenic/tcu.c
+中断 drivers/irqchip/irq-ingenic-tcu.c
+定时器 drivers/clocksource/ingenic-timer.c
+OST drivers/clocksource/ingenic-ost.c
+脉冲宽度调制器 drivers/pwm/pwm-jz4740.c
+看门狗 drivers/watchdog/jz4740_wdt.c
+============== ===================================
+
+因为å¯ä»¥ä»Žç›¸åŒçš„寄存器控制属于ä¸åŒé©±åŠ¨ç¨‹åºå’Œæ¡†æž¶çš„TCUçš„å„ç§åŠŸèƒ½ï¼Œæ‰€ä»¥
+所有这些驱动程åºéƒ½é€šè¿‡ç›¸åŒçš„æŽ§åˆ¶æ€»çº¿é€šç”¨æŽ¥å£è®¿é—®å®ƒä»¬çš„寄存器。
+
+有关TCU驱动程åºçš„设备树绑定的更多信æ¯ï¼Œè¯·å‚阅:
+Documentation/devicetree/bindings/timer/ingenic,tcu.yaml.
diff --git a/Documentation/translations/zh_CN/process/4.Coding.rst b/Documentation/translations/zh_CN/process/4.Coding.rst
index 959a06ba025c..66cd8ee07606 100644
--- a/Documentation/translations/zh_CN/process/4.Coding.rst
+++ b/Documentation/translations/zh_CN/process/4.Coding.rst
@@ -165,7 +165,7 @@ Linus对这个问题给出了最佳答案:
通常,这些警告都指å‘真正的问题。æäº¤ä»¥ä¾›å®¡é˜…的代ç é€šå¸¸ä¸ä¼šäº§ç”Ÿä»»ä½•编译器警告。
在消除警告时,注æ„了解真正的原因,并尽é‡é¿å…“修å¤â€ï¼Œä½¿è­¦å‘Šæ¶ˆå¤±è€Œä¸è§£å†³å…¶åŽŸå› ã€‚
-请注æ„ï¼Œå¹¶éžæ‰€æœ‰ç¼–译器警告都默认å¯ç”¨ã€‚使用“make EXTRA_CFLAGS=-Wâ€æž„建内核以
+请注æ„ï¼Œå¹¶éžæ‰€æœ‰ç¼–译器警告都默认å¯ç”¨ã€‚使用“make KCFLAGS=-Wâ€æž„建内核以
获得完整集åˆã€‚
内核æä¾›äº†å‡ ä¸ªé…置选项,å¯ä»¥æ‰“开调试功能;大多数é…置选项ä½äºŽâ€œkernel hackingâ€
diff --git a/Documentation/translations/zh_CN/process/magic-number.rst b/Documentation/translations/zh_CN/process/magic-number.rst
index de182bf4191c..7bb9d4165ed3 100644
--- a/Documentation/translations/zh_CN/process/magic-number.rst
+++ b/Documentation/translations/zh_CN/process/magic-number.rst
@@ -124,7 +124,6 @@ FW_HEADER_MAGIC 0x65726F66 fw_header ``drivers/atm/fo
SLOT_MAGIC 0x67267321 slot ``drivers/hotplug/cpqphp.h``
SLOT_MAGIC 0x67267322 slot ``drivers/hotplug/acpiphp.h``
LO_MAGIC 0x68797548 nbd_device ``include/linux/nbd.h``
-OPROFILE_MAGIC 0x6f70726f super_block ``drivers/oprofile/oprofilefs.h``
M3_STATE_MAGIC 0x734d724d m3_state ``sound/oss/maestro3.c``
VMALLOC_MAGIC 0x87654320 snd_alloc_track ``sound/core/memory.c``
KMALLOC_MAGIC 0x87654321 snd_alloc_track ``sound/core/memory.c``
diff --git a/Documentation/translations/zh_CN/process/submitting-patches.rst b/Documentation/translations/zh_CN/process/submitting-patches.rst
index 2e7dbaad4028..4fc6d16f5196 100644
--- a/Documentation/translations/zh_CN/process/submitting-patches.rst
+++ b/Documentation/translations/zh_CN/process/submitting-patches.rst
@@ -668,13 +668,13 @@ Greg Kroah-Hartman, "How to piss off a kernel subsystem maintainer".
<http://www.kroah.com/log/linux/maintainer-06.html>
NO!!!! No more huge patch bombs to linux-kernel@vger.kernel.org people!
- <https://lkml.org/lkml/2005/7/11/336>
+ <https://lore.kernel.org/r/20050711.125305.08322243.davem@davemloft.net>
Kernel Documentation/process/coding-style.rst:
:ref:`Documentation/translations/zh_CN/process/coding-style.rst <cn_codingstyle>`
Linus Torvalds's mail on the canonical patch format:
- <http://lkml.org/lkml/2005/4/7/183>
+ <https://lore.kernel.org/r/Pine.LNX.4.58.0504071023190.28951@ppc970.osdl.org>
Andi Kleen, "On submitting kernel patches"
Some strategies to get difficult or controversial changes in.
diff --git a/Documentation/usb/gadget-testing.rst b/Documentation/usb/gadget-testing.rst
index 2eeb3e9299e4..2085e7b24eeb 100644
--- a/Documentation/usb/gadget-testing.rst
+++ b/Documentation/usb/gadget-testing.rst
@@ -91,9 +91,9 @@ The ECM function provides these attributes in its function directory:
and after creating the functions/ecm.<instance name> they contain default
values: qmult is 5, dev_addr and host_addr are randomly selected.
-Except for ifname they can be written to until the function is linked to a
-configuration. The ifname is read-only and contains the name of the interface
-which was assigned by the net core, e. g. usb0.
+The ifname can be written to if the function is not bound. A write must be an
+interface pattern such as "usb%d", which will cause the net core to choose the
+next free usbX interface. By default, it is set to "usb%d".
Testing the ECM function
------------------------
@@ -131,9 +131,9 @@ The ECM subset function provides these attributes in its function directory:
and after creating the functions/ecm.<instance name> they contain default
values: qmult is 5, dev_addr and host_addr are randomly selected.
-Except for ifname they can be written to until the function is linked to a
-configuration. The ifname is read-only and contains the name of the interface
-which was assigned by the net core, e. g. usb0.
+The ifname can be written to if the function is not bound. A write must be an
+interface pattern such as "usb%d", which will cause the net core to choose the
+next free usbX interface. By default, it is set to "usb%d".
Testing the ECM subset function
-------------------------------
@@ -171,9 +171,9 @@ The EEM function provides these attributes in its function directory:
and after creating the functions/eem.<instance name> they contain default
values: qmult is 5, dev_addr and host_addr are randomly selected.
-Except for ifname they can be written to until the function is linked to a
-configuration. The ifname is read-only and contains the name of the interface
-which was assigned by the net core, e. g. usb0.
+The ifname can be written to if the function is not bound. A write must be an
+interface pattern such as "usb%d", which will cause the net core to choose the
+next free usbX interface. By default, it is set to "usb%d".
Testing the EEM function
------------------------
@@ -453,9 +453,9 @@ The NCM function provides these attributes in its function directory:
and after creating the functions/ncm.<instance name> they contain default
values: qmult is 5, dev_addr and host_addr are randomly selected.
-Except for ifname they can be written to until the function is linked to a
-configuration. The ifname is read-only and contains the name of the interface
-which was assigned by the net core, e. g. usb0.
+The ifname can be written to if the function is not bound. A write must be an
+interface pattern such as "usb%d", which will cause the net core to choose the
+next free usbX interface. By default, it is set to "usb%d".
Testing the NCM function
------------------------
@@ -591,9 +591,9 @@ The RNDIS function provides these attributes in its function directory:
and after creating the functions/rndis.<instance name> they contain default
values: qmult is 5, dev_addr and host_addr are randomly selected.
-Except for ifname they can be written to until the function is linked to a
-configuration. The ifname is read-only and contains the name of the interface
-which was assigned by the net core, e. g. usb0.
+The ifname can be written to if the function is not bound. A write must be an
+interface pattern such as "usb%d", which will cause the net core to choose the
+next free usbX interface. By default, it is set to "usb%d".
Testing the RNDIS function
--------------------------
diff --git a/Documentation/usb/raw-gadget.rst b/Documentation/usb/raw-gadget.rst
index 68d879a8009e..818a1648b387 100644
--- a/Documentation/usb/raw-gadget.rst
+++ b/Documentation/usb/raw-gadget.rst
@@ -2,83 +2,93 @@
USB Raw Gadget
==============
-USB Raw Gadget is a kernel module that provides a userspace interface for
-the USB Gadget subsystem. Essentially it allows to emulate USB devices
-from userspace. Enabled with CONFIG_USB_RAW_GADGET. Raw Gadget is
-currently a strictly debugging feature and shouldn't be used in
-production, use GadgetFS instead.
+USB Raw Gadget is a gadget driver that gives userspace low-level control over
+the gadget's communication process.
+
+Like any other gadget driver, Raw Gadget implements USB devices via the
+USB gadget API. Unlike most gadget drivers, Raw Gadget does not implement
+any concrete USB functions itself but requires userspace to do that.
+
+Raw Gadget is currently a strictly debugging feature and should not be used
+in production. Use GadgetFS instead.
+
+Enabled with CONFIG_USB_RAW_GADGET.
Comparison to GadgetFS
~~~~~~~~~~~~~~~~~~~~~~
-Raw Gadget is similar to GadgetFS, but provides a more low-level and
-direct access to the USB Gadget layer for the userspace. The key
-differences are:
+Raw Gadget is similar to GadgetFS but provides more direct access to the
+USB gadget layer for userspace. The key differences are:
-1. Every USB request is passed to the userspace to get a response, while
+1. Raw Gadget passes every USB request to userspace to get a response, while
GadgetFS responds to some USB requests internally based on the provided
- descriptors. However note, that the UDC driver might respond to some
- requests on its own and never forward them to the Gadget layer.
+ descriptors. Note that the UDC driver might respond to some requests on
+ its own and never forward them to the gadget layer.
-2. GadgetFS performs some sanity checks on the provided USB descriptors,
- while Raw Gadget allows you to provide arbitrary data as responses to
- USB requests.
+2. Raw Gadget allows providing arbitrary data as responses to USB requests,
+ while GadgetFS performs sanity checks on the provided USB descriptors.
+ This makes Raw Gadget suitable for fuzzing by providing malformed data as
+ responses to USB requests.
3. Raw Gadget provides a way to select a UDC device/driver to bind to,
- while GadgetFS currently binds to the first available UDC.
+ while GadgetFS currently binds to the first available UDC. This allows
+ having multiple Raw Gadget instances bound to different UDCs.
4. Raw Gadget explicitly exposes information about endpoints addresses and
- capabilities allowing a user to write UDC-agnostic gadgets.
+ capabilities. This allows the user to write UDC-agnostic gadgets.
-5. Raw Gadget has ioctl-based interface instead of a filesystem-based one.
+5. Raw Gadget has an ioctl-based interface instead of a filesystem-based
+ one.
Userspace interface
~~~~~~~~~~~~~~~~~~~
-To create a Raw Gadget instance open /dev/raw-gadget. Multiple raw-gadget
-instances (bound to different UDCs) can be used at the same time. The
-interaction with the opened file happens through the ioctl() calls, see
-comments in include/uapi/linux/usb/raw_gadget.h for details.
+The user can interact with Raw Gadget by opening ``/dev/raw-gadget`` and
+issuing ioctl calls; see the comments in include/uapi/linux/usb/raw_gadget.h
+for details. Multiple Raw Gadget instances (bound to different UDCs) can be
+used at the same time.
-The typical usage of Raw Gadget looks like:
+A typical usage scenario of Raw Gadget:
-1. Open Raw Gadget instance via /dev/raw-gadget.
-2. Initialize the instance via USB_RAW_IOCTL_INIT.
-3. Launch the instance with USB_RAW_IOCTL_RUN.
-4. In a loop issue USB_RAW_IOCTL_EVENT_FETCH calls to receive events from
- Raw Gadget and react to those depending on what kind of USB device
- needs to be emulated.
+1. Create a Raw Gadget instance by opening ``/dev/raw-gadget``.
+2. Initialize the instance via ``USB_RAW_IOCTL_INIT``.
+3. Launch the instance with ``USB_RAW_IOCTL_RUN``.
+4. In a loop issue ``USB_RAW_IOCTL_EVENT_FETCH`` to receive events from
+ Raw Gadget and react to those depending on what kind of USB gadget must
+ be implemented.
-Note, that some UDC drivers have fixed addresses assigned to endpoints, and
-therefore arbitrary endpoint addresses can't be used in the descriptors.
-Nevertheles, Raw Gadget provides a UDC-agnostic way to write USB gadgets.
-Once a USB_RAW_EVENT_CONNECT event is received via USB_RAW_IOCTL_EVENT_FETCH,
-the USB_RAW_IOCTL_EPS_INFO ioctl can be used to find out information about
-endpoints that the UDC driver has. Based on that information, the user must
-chose UDC endpoints that will be used for the gadget being emulated, and
-properly assign addresses in endpoint descriptors.
+Note that some UDC drivers have fixed addresses assigned to endpoints, and
+therefore arbitrary endpoint addresses cannot be used in the descriptors.
+Nevertheless, Raw Gadget provides a UDC-agnostic way to write USB gadgets.
+Once ``USB_RAW_EVENT_CONNECT`` is received via ``USB_RAW_IOCTL_EVENT_FETCH``,
+``USB_RAW_IOCTL_EPS_INFO`` can be used to find out information about the
+endpoints that the UDC driver has. Based on that, userspace must choose UDC
+endpoints for the gadget and assign addresses in the endpoint descriptors
+correspondingly.
-You can find usage examples (along with a test suite) here:
+Raw Gadget usage examples and a test suite:
https://github.com/xairy/raw-gadget
Internal details
~~~~~~~~~~~~~~~~
-Currently every endpoint read/write ioctl submits a USB request and waits until
-its completion. This is the desired mode for coverage-guided fuzzing (as we'd
-like all USB request processing happen during the lifetime of a syscall),
-and must be kept in the implementation. (This might be slow for real world
-applications, thus the O_NONBLOCK improvement suggestion below.)
+Every Raw Gadget endpoint read/write ioctl submits a USB request and waits
+until its completion. This is done deliberately to assist with coverage-guided
+fuzzing by having a single syscall fully process a single USB request. This
+feature must be kept in the implementation.
Potential future improvements
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-- Report more events (suspend, resume, etc.) through USB_RAW_IOCTL_EVENT_FETCH.
+- Report more events (suspend, resume, etc.) through
+ ``USB_RAW_IOCTL_EVENT_FETCH``.
-- Support O_NONBLOCK I/O.
+- Support ``O_NONBLOCK`` I/O. This would be another mode of operation, where
+ Raw Gadget would not wait until the completion of each USB request.
- Support USB 3 features (accept SS endpoint companion descriptor when
- enabling endpoints; allow providing stream_id for bulk transfers).
+ enabling endpoints; allow providing ``stream_id`` for bulk transfers).
-- Support ISO transfer features (expose frame_number for completed requests).
+- Support ISO transfer features (expose ``frame_number`` for completed
+ requests).
diff --git a/Documentation/userspace-api/index.rst b/Documentation/userspace-api/index.rst
index acd2cc2a538d..d29b020e5622 100644
--- a/Documentation/userspace-api/index.rst
+++ b/Documentation/userspace-api/index.rst
@@ -24,6 +24,7 @@ place where this information is gathered.
ioctl/index
iommu
media/index
+ sysfs-platform_profile
.. only:: subproject and html
diff --git a/Documentation/userspace-api/ioctl/ioctl-number.rst b/Documentation/userspace-api/ioctl/ioctl-number.rst
index a4c75a28c839..599bd4493944 100644
--- a/Documentation/userspace-api/ioctl/ioctl-number.rst
+++ b/Documentation/userspace-api/ioctl/ioctl-number.rst
@@ -157,7 +157,6 @@ Code Seq# Include File Comments
'I' all linux/isdn.h conflict!
'I' 00-0F drivers/isdn/divert/isdn_divert.h conflict!
'I' 40-4F linux/mISDNif.h conflict!
-'J' 00-1F drivers/scsi/gdth_ioctl.h
'K' all linux/kd.h
'L' 00-1F linux/loop.h conflict!
'L' 10-1F drivers/scsi/mpt3sas/mpt3sas_ctl.h conflict!
@@ -180,6 +179,7 @@ Code Seq# Include File Comments
'R' 00-1F linux/random.h conflict!
'R' 01 linux/rfkill.h conflict!
'R' C0-DF net/bluetooth/rfcomm.h
+'R' E0 uapi/linux/fsl_mc.h
'S' all linux/cdrom.h conflict!
'S' 80-81 scsi/scsi_ioctl.h conflict!
'S' 82-FF scsi/scsi.h conflict!
@@ -319,11 +319,14 @@ Code Seq# Include File Comments
0xA0 all linux/sdp/sdp.h Industrial Device Project
<mailto:kenji@bitgate.com>
0xA1 0 linux/vtpm_proxy.h TPM Emulator Proxy Driver
+0xA2 all uapi/linux/acrn.h ACRN hypervisor
0xA3 80-8F Port ACL in development:
<mailto:tlewis@mindspring.com>
0xA3 90-9F linux/dtlk.h
0xA4 00-1F uapi/linux/tee.h Generic TEE subsystem
0xA4 00-1F uapi/asm/sgx.h <mailto:linux-sgx@vger.kernel.org>
+0xA5 01 linux/surface_aggregator/cdev.h Microsoft Surface Platform System Aggregator
+ <mailto:luzmaximilian@gmail.com>
0xAA 00-3F linux/uapi/linux/userfaultfd.h
0xAB 00-1F linux/nbd.h
0xAC 00-1F linux/raw.h
@@ -352,6 +355,7 @@ Code Seq# Include File Comments
<mailto:michael.klein@puffin.lb.shuttle.de>
0xCC 00-0F drivers/misc/ibmvmc.h pseries VMC driver
0xCD 01 linux/reiserfs_fs.h
+0xCE 01-02 uapi/linux/cxl_mem.h Compute Express Link Memory Devices
0xCF 02 fs/cifs/ioctl.c
0xDB 00-0F drivers/char/mwave/mwavepub.h
0xDD 00-3F ZFCP device driver see drivers/s390/scsi/
diff --git a/Documentation/userspace-api/media/drivers/ccs.rst b/Documentation/userspace-api/media/drivers/ccs.rst
new file mode 100644
index 000000000000..161cb65f4d98
--- /dev/null
+++ b/Documentation/userspace-api/media/drivers/ccs.rst
@@ -0,0 +1,110 @@
+.. SPDX-License-Identifier: GPL-2.0-only
+
+.. include:: <isonum.txt>
+
+MIPI CCS camera sensor driver
+=============================
+
+The MIPI CCS camera sensor driver is a generic driver for `MIPI CCS
+<https://www.mipi.org/specifications/camera-command-set>`_ compliant
+camera sensors. It exposes three sub-devices representing the pixel array,
+the binner and the scaler.
+
+As the capabilities of individual devices vary, the driver exposes
+interfaces based on the capabilities that exist in hardware.
+
+Pixel Array sub-device
+----------------------
+
+The pixel array sub-device represents the camera sensor's pixel matrix, as well
+as analogue crop functionality present in many compliant devices. The analogue
+crop is configured using the ``V4L2_SEL_TGT_CROP`` on the source pad (0) of the
+entity. The size of the pixel matrix can be obtained by getting the
+``V4L2_SEL_TGT_NATIVE_SIZE`` target.
+
+Binner
+------
+
+The binner sub-device represents the binning functionality on the sensor. For
+that purpose, selection target ``V4L2_SEL_TGT_COMPOSE`` is supported on the
+sink pad (0).
+
+Additionally, if a device has no scaler or digital crop functionality, the
+source pad (1) expses another digital crop selection rectangle that can only
+crop at the end of the lines and frames.
+
+Scaler
+------
+
+The scaler sub-device represents the digital crop and scaling functionality of
+the sensor. The V4L2 selection target ``V4L2_SEL_TGT_CROP`` is used to
+configure the digital crop on the sink pad (0) when digital crop is supported.
+Scaling is configured using selection target ``V4L2_SEL_TGT_COMPOSE`` on the
+sink pad (0) as well.
+
+Additionally, if the scaler sub-device exists, its source pad (1) exposes
+another digital crop selection rectangle that can only crop at the end of the
+lines and frames.
+
+Digital and analogue crop
+-------------------------
+
+Digital crop functionality is referred to as cropping that effectively works by
+dropping some data on the floor. Analogue crop, on the other hand, means that
+the cropped information is never retrieved. In case of camera sensors, the
+analogue data is never read from the pixel matrix that are outside the
+configured selection rectangle that designates crop. The difference has an
+effect in device timing and likely also in power consumption.
+
+Private controls
+----------------
+
+The MIPI CCS driver implements a number of private controls under
+``V4L2_CID_USER_BASE_CCS`` to control the MIPI CCS compliant camera sensors.
+
+Analogue gain model
+~~~~~~~~~~~~~~~~~~~
+
+The CCS defines an analogue gain model where the gain can be calculated using
+the following formula:
+
+ gain = m0 * x + c0 / (m1 * x + c1)
+
+Either m0 or c0 will be zero. The constants that are device specific, can be
+obtained from the following controls:
+
+ V4L2_CID_CCS_ANALOGUE_GAIN_M0
+ V4L2_CID_CCS_ANALOGUE_GAIN_M1
+ V4L2_CID_CCS_ANALOGUE_GAIN_C0
+ V4L2_CID_CCS_ANALOGUE_GAIN_C1
+
+The analogue gain (``x`` in the formula) is controlled through
+``V4L2_CID_ANALOGUE_GAIN`` in this case.
+
+Alternate analogue gain model
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The CCS defines another analogue gain model called alternate analogue gain. In
+this case, the formula to calculate actual gain consists of linear and
+exponential parts:
+
+ gain = linear * 2 ^ exponent
+
+The ``linear`` and ``exponent`` factors can be set using the
+``V4L2_CID_CCS_ANALOGUE_LINEAR_GAIN`` and
+``V4L2_CID_CCS_ANALOGUE_EXPONENTIAL_GAIN`` controls, respectively
+
+Shading correction
+~~~~~~~~~~~~~~~~~~
+
+The CCS standard supports lens shading correction. The feature can be controlled
+using ``V4L2_CID_CCS_SHADING_CORRECTION``. Additionally, the luminance
+correction level may be changed using
+``V4L2_CID_CCS_LUMINANCE_CORRECTION_LEVEL``, where value 0 indicates no
+correction and 128 indicates correcting the luminance in corners to 10 % less
+than in the centre.
+
+Shading correction needs to be enabled for luminance correction level to have an
+effect.
+
+**Copyright** |copy| 2020 Intel Corporation
diff --git a/Documentation/userspace-api/media/drivers/index.rst b/Documentation/userspace-api/media/drivers/index.rst
index 05a82f8c0c99..1a9038f5f9fa 100644
--- a/Documentation/userspace-api/media/drivers/index.rst
+++ b/Documentation/userspace-api/media/drivers/index.rst
@@ -31,6 +31,7 @@ For more details see the file COPYING in the source distribution of Linux.
:maxdepth: 5
:numbered:
+ ccs
cx2341x-uapi
imx-uapi
max2175
diff --git a/Documentation/userspace-api/media/dvb/dvbstb.svg b/Documentation/userspace-api/media/dvb/dvbstb.svg
index 87e68baa056b..6f0ba98f9bf9 100644
--- a/Documentation/userspace-api/media/dvb/dvbstb.svg
+++ b/Documentation/userspace-api/media/dvb/dvbstb.svg
@@ -2,7 +2,7 @@
<!-- SPDX-License-Identifier: GPL-2.0 OR GFDL-1.1-no-invariants-or-later -->
<svg id="svg2" width="15.847cm" height="8.4187cm" fill-rule="evenodd" stroke-linejoin="round" stroke-width="28.222" preserveAspectRatio="xMidYMid" version="1.2" viewBox="0 0 23770.123 12628.122" xml:space="preserve" xmlns="http://www.w3.org/2000/svg" xmlns:cc="http://creativecommons.org/ns#" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"><defs id="defs142"><marker id="Arrow1Lend" overflow="visible" orient="auto"><path id="path954" transform="matrix(-.8 0 0 -.8 -10 0)" d="m0 0 5-5-17.5 5 17.5 5z" fill-rule="evenodd" stroke="#000" stroke-width="1pt"/></marker><marker id="marker1243" overflow="visible" orient="auto"><path id="path1241" transform="matrix(-.8 0 0 -.8 -10 0)" d="m0 0 5-5-17.5 5 17.5 5z" fill-rule="evenodd" stroke="#000" stroke-width="1pt"/></marker></defs><metadata id="metadata519"><rdf:RDF><cc:Work
rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type rdf:resource="http://purl.org/dc/dcmitype/StillImage"/><dc:title/></cc:Work></rdf:RDF></metadata><rect id="rect197" class="BoundingBox" x="5355.1" y="13.122" width="18403" height="9603" fill="none"/><path id="path199" d="m14556 9614.1h-9200v-9600h18400v9600z" fill="#fff"/><path id="path201" d="m14556 9614.1h-9200v-9600h18400v9600z" fill="none" stroke="#000"/><rect id="rect206" class="BoundingBox" x="13.122" y="4013.1" width="4544" height="2403" fill="none"/><path id="path208" d="m2285.1 6414.1h-2271v-2400h4541v2400z" fill="#fff"/><path id="path210" d="m2285.1 6414.1h-2271v-2400h4541v2400z" fill="none" stroke="#000"/><text id="text212" class="TextShape" x="-2443.8779" y="-4585.8779"><tspan id="tspan214" class="TextParagraph" font-family="sans-serif" font-size="635px" font-weight="400"><tspan id="tspan216" class="TextPosition"
-x="1281.1219" y="5435.1221"><tspan id="tspan218" fill="#000000">Antena</tspan></tspan></tspan></text>
+x="1013.1317" y="5435.1221"><tspan id="tspan218" fill="#000000">Antenna</tspan></tspan></tspan></text>
<rect id="rect223" class="BoundingBox" x="6213.1" y="1813.1" width="4544" height="2403" fill="none"/><path id="path225" d="m8485.1 4214.1h-2271v-2400h4541v2400z" fill="#fff"/><path id="path227" d="m8485.1 4214.1h-2271v-2400h4541v2400z" fill="none" stroke="#000"/><text id="text229" class="TextShape" x="-2443.8779" y="-4585.8779"><tspan id="tspan231" class="TextParagraph" font-family="sans-serif" font-size="635px" font-weight="400"><tspan id="tspan233" class="TextPosition" x="7217.1221" y="3235.1221"><tspan id="tspan235" fill="#000000">Frontend</tspan></tspan></tspan></text>
<rect id="rect240" class="BoundingBox" x="12113" y="1813.1" width="4544" height="2403" fill="none"/><path id="path242" d="m14385 4214.1h-2271v-2400h4541v2400z" fill="#fff"/><path id="path244" d="m14385 4214.1h-2271v-2400h4541v2400z" fill="none" stroke="#000"/><text id="text246" class="TextShape" x="-2443.8779" y="-4585.8779"><tspan id="tspan248" class="TextParagraph" font-family="sans-serif" font-size="635px" font-weight="400"><tspan id="tspan250" class="TextPosition" x="13944.122" y="3235.1221"><tspan id="tspan252" fill="#000000">CA</tspan></tspan></tspan></text>
<rect id="rect257" class="BoundingBox" x="18113" y="1813.1" width="4544" height="2403" fill="none"/><path id="path259" d="m20385 4214.1h-2271v-2400h4541v2400z" fill="#fff"/><path id="path261" d="m20385 4214.1h-2271v-2400h4541v2400z" fill="none" stroke="#000"/><text id="text263" class="TextShape" x="-2443.8779" y="-4585.8779"><tspan id="tspan265" class="TextParagraph" font-family="sans-serif" font-size="635px" font-weight="400"><tspan id="tspan267" class="TextPosition" x="19384.123" y="3235.1221"><tspan id="tspan269" fill="#000000">Demux</tspan></tspan></tspan></text>
diff --git a/Documentation/userspace-api/media/mediactl/media-types.rst b/Documentation/userspace-api/media/mediactl/media-types.rst
index 7b24a213cae7..e1e4043b3b1c 100644
--- a/Documentation/userspace-api/media/mediactl/media-types.rst
+++ b/Documentation/userspace-api/media/mediactl/media-types.rst
@@ -39,6 +39,7 @@ Types and flags used to represent the media graph elements
.. _MEDIA-ENT-F-PROC-VIDEO-STATISTICS:
.. _MEDIA-ENT-F-PROC-VIDEO-ENCODER:
.. _MEDIA-ENT-F-PROC-VIDEO-DECODER:
+.. _MEDIA-ENT-F-PROC-VIDEO-ISP:
.. _MEDIA-ENT-F-VID-MUX:
.. _MEDIA-ENT-F-VID-IF-BRIDGE:
.. _MEDIA-ENT-F-DV-DECODER:
@@ -201,6 +202,12 @@ Types and flags used to represent the media graph elements
decompressing a compressed video stream into uncompressed video
frames. Must have one sink pad and at least one source pad.
+ * - ``MEDIA_ENT_F_PROC_VIDEO_ISP``
+ - An Image Signal Processor (ISP) device. ISPs generally are one of a
+ kind devices that have their specific control interfaces using a
+ combination of custom V4L2 controls and IOCTLs, and parameters
+ supplied in a metadata buffer.
+
* - ``MEDIA_ENT_F_VID_MUX``
- Video multiplexer. An entity capable of multiplexing must have at
least two sink pads and one source pad, and must pass the video
diff --git a/Documentation/userspace-api/media/v4l/ext-ctrls-codec.rst b/Documentation/userspace-api/media/v4l/ext-ctrls-codec.rst
index 454ecd9a0f83..00944e97d638 100644
--- a/Documentation/userspace-api/media/v4l/ext-ctrls-codec.rst
+++ b/Documentation/userspace-api/media/v4l/ext-ctrls-codec.rst
@@ -1182,6 +1182,18 @@ enum v4l2_mpeg_video_h264_entropy_mode -
V4L2_CID_MPEG_VIDEO_H264_MAX_QP is also set, the quantization parameter
should be chosen to meet both requirements.
+``V4L2_CID_MPEG_VIDEO_H264_B_FRAME_MIN_QP (integer)``
+ Minimum quantization parameter for the H264 B frame to limit B frame
+ quality to a range. Valid range: from 0 to 51. If
+ V4L2_CID_MPEG_VIDEO_H264_MIN_QP is also set, the quantization parameter
+ should be chosen to meet both requirements.
+
+``V4L2_CID_MPEG_VIDEO_H264_B_FRAME_MAX_QP (integer)``
+ Maximum quantization parameter for the H264 B frame to limit B frame
+ quality to a range. Valid range: from 0 to 51. If
+ V4L2_CID_MPEG_VIDEO_H264_MAX_QP is also set, the quantization parameter
+ should be chosen to meet both requirements.
+
``V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP (integer)``
Quantization parameter for an I frame for MPEG4. Valid range: from 1
to 31.
@@ -1501,6 +1513,26 @@ enum v4l2_mpeg_video_h264_hierarchical_coding_type -
* - Bit 16:32
- Layer number
+``V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L0_BR (integer)``
+ Indicates bit rate (bps) for hierarchical coding layer 0 for H264 encoder.
+
+``V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L1_BR (integer)``
+ Indicates bit rate (bps) for hierarchical coding layer 1 for H264 encoder.
+
+``V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L2_BR (integer)``
+ Indicates bit rate (bps) for hierarchical coding layer 2 for H264 encoder.
+
+``V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L3_BR (integer)``
+ Indicates bit rate (bps) for hierarchical coding layer 3 for H264 encoder.
+
+``V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L4_BR (integer)``
+ Indicates bit rate (bps) for hierarchical coding layer 4 for H264 encoder.
+
+``V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L5_BR (integer)``
+ Indicates bit rate (bps) for hierarchical coding layer 5 for H264 encoder.
+
+``V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L6_BR (integer)``
+ Indicates bit rate (bps) for hierarchical coding layer 6 for H264 encoder.
.. _v4l2-mpeg-mpeg2:
@@ -2628,11 +2660,11 @@ HEVC/H.265 Control IDs
``V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP (integer)``
Minimum quantization parameter for HEVC.
- Valid range: from 0 to 51.
+ Valid range: from 0 to 51 for 8 bit and from 0 to 63 for 10 bit.
``V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP (integer)``
Maximum quantization parameter for HEVC.
- Valid range: from 0 to 51.
+ Valid range: from 0 to 51 for 8 bit and from 0 to 63 for 10 bit.
``V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP (integer)``
Quantization parameter for an I frame for HEVC.
@@ -2649,6 +2681,42 @@ HEVC/H.265 Control IDs
Valid range: [V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP,
V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP].
+``V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_MIN_QP (integer)``
+ Minimum quantization parameter for the HEVC I frame to limit I frame
+ quality to a range. Valid range: from 0 to 51 for 8 bit and from 0 to 63 for 10 bit.
+ If V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP is also set, the quantization parameter
+ should be chosen to meet both requirements.
+
+``V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_MAX_QP (integer)``
+ Maximum quantization parameter for the HEVC I frame to limit I frame
+ quality to a range. Valid range: from 0 to 51 for 8 bit and from 0 to 63 for 10 bit.
+ If V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP is also set, the quantization parameter
+ should be chosen to meet both requirements.
+
+``V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_MIN_QP (integer)``
+ Minimum quantization parameter for the HEVC P frame to limit P frame
+ quality to a range. Valid range: from 0 to 51 for 8 bit and from 0 to 63 for 10 bit.
+ If V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP is also set, the quantization parameter
+ should be chosen to meet both requirements.
+
+``V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_MAX_QP (integer)``
+ Maximum quantization parameter for the HEVC P frame to limit P frame
+ quality to a range. Valid range: from 0 to 51 for 8 bit and from 0 to 63 for 10 bit.
+ If V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP is also set, the quantization parameter
+ should be chosen to meet both requirements.
+
+``V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_MIN_QP (integer)``
+ Minimum quantization parameter for the HEVC B frame to limit B frame
+ quality to a range. Valid range: from 0 to 51 for 8 bit and from 0 to 63 for 10 bit.
+ If V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP is also set, the quantization parameter
+ should be chosen to meet both requirements.
+
+``V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_MAX_QP (integer)``
+ Maximum quantization parameter for the HEVC B frame to limit B frame
+ quality to a range. Valid range: from 0 to 51 for 8 bit and from 0 to 63 for 10 bit.
+ If V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP is also set, the quantization parameter
+ should be chosen to meet both requirements.
+
``V4L2_CID_MPEG_VIDEO_HEVC_HIER_QP (boolean)``
HIERARCHICAL_QP allows the host to specify the quantization parameter
values for each temporal layer through HIERARCHICAL_QP_LAYER. This is
@@ -3569,3 +3637,12 @@ enum v4l2_mpeg_video_hevc_size_of_length_field -
- Selecting this value specifies that HEVC slices are expected
to be prefixed by Annex B start codes. According to :ref:`hevc`
valid start codes can be 3-bytes 0x000001 or 4-bytes 0x00000001.
+
+``V4L2_CID_MPEG_VIDEO_BASELAYER_PRIORITY_ID (integer)``
+ Specifies a priority identifier for the NAL unit, which will be applied to
+ the base layer. By default this value is set to 0 for the base layer,
+ and the next layer will have the priority ID assigned as 1, 2, 3 and so on.
+ The video encoder can't decide the priority id to be applied to a layer,
+ so this has to come from client.
+ This is applicable to H264 and valid Range is from 0 to 63.
+ Source Rec. ITU-T H.264 (06/2019); G.7.4.1.1, G.8.8.1.
diff --git a/Documentation/userspace-api/media/v4l/pixfmt-yuv-planar.rst b/Documentation/userspace-api/media/v4l/pixfmt-yuv-planar.rst
index 7d4d39201a3f..1e0db602cc1b 100644
--- a/Documentation/userspace-api/media/v4l/pixfmt-yuv-planar.rst
+++ b/Documentation/userspace-api/media/v4l/pixfmt-yuv-planar.rst
@@ -396,9 +396,9 @@ number of lines as the luma plane.
NV24 and NV42
-------------
-Semi-planar YUV 4:4:4 formats. The chroma plane is subsampled by 2 in the
-horizontal direction. Chroma lines contain half the number of pixels and the
-same number of bytes as luma lines, and the chroma plane contains the same
+Semi-planar YUV 4:4:4 formats. The chroma plane is not subsampled.
+Chroma lines contain the same number of pixels and twice the
+number of bytes as luma lines, and the chroma plane contains the same
number of lines as the luma plane.
.. flat-table:: Sample 4x4 NV24 Image
diff --git a/Documentation/userspace-api/sysfs-platform_profile.rst b/Documentation/userspace-api/sysfs-platform_profile.rst
new file mode 100644
index 000000000000..c33a71263d9e
--- /dev/null
+++ b/Documentation/userspace-api/sysfs-platform_profile.rst
@@ -0,0 +1,42 @@
+=====================================================================
+Platform Profile Selection (e.g. /sys/firmware/acpi/platform_profile)
+=====================================================================
+
+On modern systems the platform performance, temperature, fan and other
+hardware related characteristics are often dynamically configurable. The
+platform configuration is often automatically adjusted to the current
+conditions by some automatic mechanism (which may very well live outside
+the kernel).
+
+These auto platform adjustment mechanisms often can be configured with
+one of several platform profiles, with either a bias towards low power
+operation or towards performance.
+
+The purpose of the platform_profile attribute is to offer a generic sysfs
+API for selecting the platform profile of these automatic mechanisms.
+
+Note that this API is only for selecting the platform profile, it is
+NOT a goal of this API to allow monitoring the resulting performance
+characteristics. Monitoring performance is best done with device/vendor
+specific tools such as e.g. turbostat.
+
+Specifically when selecting a high performance profile the actual achieved
+performance may be limited by various factors such as: the heat generated
+by other components, room temperature, free air flow at the bottom of a
+laptop, etc. It is explicitly NOT a goal of this API to let userspace know
+about any sub-optimal conditions which are impeding reaching the requested
+performance level.
+
+Since numbers on their own cannot represent the multiple variables that a
+profile will adjust (power consumption, heat generation, etc) this API
+uses strings to describe the various profiles. To make sure that userspace
+gets a consistent experience the sysfs-platform_profile ABI document defines
+a fixed set of profile names. Drivers *must* map their internal profile
+representation onto this fixed set.
+
+If there is no good match when mapping then a new profile name may be
+added. Drivers which wish to introduce new profile names must:
+
+ 1. Explain why the existing profile names canot be used.
+ 2. Add the new profile name, along with a clear description of the
+ expected behaviour, to the sysfs-platform_profile ABI documentation.
diff --git a/Documentation/virt/acrn/cpuid.rst b/Documentation/virt/acrn/cpuid.rst
new file mode 100644
index 000000000000..65fa4b9c1798
--- /dev/null
+++ b/Documentation/virt/acrn/cpuid.rst
@@ -0,0 +1,46 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===============
+ACRN CPUID bits
+===============
+
+A guest VM running on an ACRN hypervisor can check some of its features using
+CPUID.
+
+ACRN cpuid functions are:
+
+function: 0x40000000
+
+returns::
+
+ eax = 0x40000010
+ ebx = 0x4e524341
+ ecx = 0x4e524341
+ edx = 0x4e524341
+
+Note that this value in ebx, ecx and edx corresponds to the string
+"ACRNACRNACRN". The value in eax corresponds to the maximum cpuid function
+present in this leaf, and will be updated if more functions are added in the
+future.
+
+function: define ACRN_CPUID_FEATURES (0x40000001)
+
+returns::
+
+ ebx, ecx, edx
+ eax = an OR'ed group of (1 << flag)
+
+where ``flag`` is defined as below:
+
+================================= =========== ================================
+flag value meaning
+================================= =========== ================================
+ACRN_FEATURE_PRIVILEGED_VM 0 guest VM is a privileged VM
+================================= =========== ================================
+
+function: 0x40000010
+
+returns::
+
+ ebx, ecx, edx
+ eax = (Virtual) TSC frequency in kHz.
diff --git a/Documentation/virt/acrn/index.rst b/Documentation/virt/acrn/index.rst
new file mode 100644
index 000000000000..b5f793e73df5
--- /dev/null
+++ b/Documentation/virt/acrn/index.rst
@@ -0,0 +1,12 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===============
+ACRN Hypervisor
+===============
+
+.. toctree::
+ :maxdepth: 1
+
+ introduction
+ io-request
+ cpuid
diff --git a/Documentation/virt/acrn/introduction.rst b/Documentation/virt/acrn/introduction.rst
new file mode 100644
index 000000000000..f8d081bc084d
--- /dev/null
+++ b/Documentation/virt/acrn/introduction.rst
@@ -0,0 +1,43 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+ACRN Hypervisor Introduction
+============================
+
+The ACRN Hypervisor is a Type 1 hypervisor, running directly on bare-metal
+hardware. It has a privileged management VM, called Service VM, to manage User
+VMs and do I/O emulation.
+
+ACRN userspace is an application running in the Service VM that emulates
+devices for a User VM based on command line configurations. ACRN Hypervisor
+Service Module (HSM) is a kernel module in the Service VM which provides
+hypervisor services to the ACRN userspace.
+
+Below figure shows the architecture.
+
+::
+
+ Service VM User VM
+ +----------------------------+ | +------------------+
+ | +--------------+ | | | |
+ | |ACRN userspace| | | | |
+ | +--------------+ | | | |
+ |-----------------ioctl------| | | | ...
+ |kernel space +----------+ | | | |
+ | | HSM | | | | Drivers |
+ | +----------+ | | | |
+ +--------------------|-------+ | +------------------+
+ +---------------------hypercall----------------------------------------+
+ | ACRN Hypervisor |
+ +----------------------------------------------------------------------+
+ | Hardware |
+ +----------------------------------------------------------------------+
+
+ACRN userspace allocates memory for the User VM, configures and initializes the
+devices used by the User VM, loads the virtual bootloader, initializes the
+virtual CPU state and handles I/O request accesses from the User VM. It uses
+ioctls to communicate with the HSM. HSM implements hypervisor services by
+interacting with the ACRN Hypervisor via hypercalls. HSM exports a char device
+interface (/dev/acrn_hsm) to userspace.
+
+The ACRN hypervisor is open for contribution from anyone. The source repo is
+available at https://github.com/projectacrn/acrn-hypervisor.
diff --git a/Documentation/virt/acrn/io-request.rst b/Documentation/virt/acrn/io-request.rst
new file mode 100644
index 000000000000..6cc3ea0fa1f5
--- /dev/null
+++ b/Documentation/virt/acrn/io-request.rst
@@ -0,0 +1,97 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+I/O request handling
+====================
+
+An I/O request of a User VM, which is constructed by the hypervisor, is
+distributed by the ACRN Hypervisor Service Module to an I/O client
+corresponding to the address range of the I/O request. Details of I/O request
+handling are described in the following sections.
+
+1. I/O request
+--------------
+
+For each User VM, there is a shared 4-KByte memory region used for I/O requests
+communication between the hypervisor and Service VM. An I/O request is a
+256-byte structure buffer, which is 'struct acrn_io_request', that is filled by
+an I/O handler of the hypervisor when a trapped I/O access happens in a User
+VM. ACRN userspace in the Service VM first allocates a 4-KByte page and passes
+the GPA (Guest Physical Address) of the buffer to the hypervisor. The buffer is
+used as an array of 16 I/O request slots with each I/O request slot being 256
+bytes. This array is indexed by vCPU ID.
+
+2. I/O clients
+--------------
+
+An I/O client is responsible for handling User VM I/O requests whose accessed
+GPA falls in a certain range. Multiple I/O clients can be associated with each
+User VM. There is a special client associated with each User VM, called the
+default client, that handles all I/O requests that do not fit into the range of
+any other clients. The ACRN userspace acts as the default client for each User
+VM.
+
+Below illustration shows the relationship between I/O requests shared buffer,
+I/O requests and I/O clients.
+
+::
+
+ +------------------------------------------------------+
+ | Service VM |
+ |+--------------------------------------------------+ |
+ || +----------------------------------------+ | |
+ || | shared page ACRN userspace | | |
+ || | +-----------------+ +------------+ | | |
+ || +----+->| acrn_io_request |<-+ default | | | |
+ || | | | +-----------------+ | I/O client | | | |
+ || | | | | ... | +------------+ | | |
+ || | | | +-----------------+ | | |
+ || | +-|--------------------------------------+ | |
+ ||---|----|-----------------------------------------| |
+ || | | kernel | |
+ || | | +----------------------+ | |
+ || | | | +-------------+ HSM | | |
+ || | +--------------+ | | | |
+ || | | | I/O clients | | | |
+ || | | | | | | |
+ || | | +-------------+ | | |
+ || | +----------------------+ | |
+ |+---|----------------------------------------------+ |
+ +----|-------------------------------------------------+
+ |
+ +----|-------------------------------------------------+
+ | +-+-----------+ |
+ | | I/O handler | ACRN Hypervisor |
+ | +-------------+ |
+ +------------------------------------------------------+
+
+3. I/O request state transition
+-------------------------------
+
+The state transitions of an ACRN I/O request are as follows.
+
+::
+
+ FREE -> PENDING -> PROCESSING -> COMPLETE -> FREE -> ...
+
+- FREE: this I/O request slot is empty
+- PENDING: a valid I/O request is pending in this slot
+- PROCESSING: the I/O request is being processed
+- COMPLETE: the I/O request has been processed
+
+An I/O request in COMPLETE or FREE state is owned by the hypervisor. HSM and
+ACRN userspace are in charge of processing the others.
+
+4. Processing flow of I/O requests
+----------------------------------
+
+a. The I/O handler of the hypervisor will fill an I/O request with PENDING
+ state when a trapped I/O access happens in a User VM.
+b. The hypervisor makes an upcall, which is a notification interrupt, to
+ the Service VM.
+c. The upcall handler schedules a worker to dispatch I/O requests.
+d. The worker looks for the PENDING I/O requests, assigns them to different
+ registered clients based on the address of the I/O accesses, updates
+ their state to PROCESSING, and notifies the corresponding client to handle.
+e. The notified client handles the assigned I/O requests.
+f. The HSM updates I/O requests states to COMPLETE and notifies the hypervisor
+ of the completion via hypercalls.
diff --git a/Documentation/virt/index.rst b/Documentation/virt/index.rst
index 350f5c869b56..edea7fea95a8 100644
--- a/Documentation/virt/index.rst
+++ b/Documentation/virt/index.rst
@@ -12,6 +12,7 @@ Linux Virtualization Support
paravirt_ops
guest-halt-polling
ne_overview
+ acrn/index
.. only:: html and subproject
diff --git a/Documentation/virt/kvm/amd-memory-encryption.rst b/Documentation/virt/kvm/amd-memory-encryption.rst
index 09a8f2a34e39..469a6308765b 100644
--- a/Documentation/virt/kvm/amd-memory-encryption.rst
+++ b/Documentation/virt/kvm/amd-memory-encryption.rst
@@ -263,6 +263,27 @@ Returns: 0 on success, -negative on error
__u32 trans_len;
};
+10. KVM_SEV_GET_ATTESTATION_REPORT
+----------------------------------
+
+The KVM_SEV_GET_ATTESTATION_REPORT command can be used by the hypervisor to query the attestation
+report containing the SHA-256 digest of the guest memory and VMSA passed through the KVM_SEV_LAUNCH
+commands and signed with the PEK. The digest returned by the command should match the digest
+used by the guest owner with the KVM_SEV_LAUNCH_MEASURE.
+
+Parameters (in): struct kvm_sev_attestation
+
+Returns: 0 on success, -negative on error
+
+::
+
+ struct kvm_sev_attestation_report {
+ __u8 mnonce[16]; /* A random mnonce that will be placed in the report */
+
+ __u64 uaddr; /* userspace address where the report should be copied */
+ __u32 len;
+ };
+
References
==========
diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
index c136e254b496..1a2b5210cdbf 100644
--- a/Documentation/virt/kvm/api.rst
+++ b/Documentation/virt/kvm/api.rst
@@ -360,10 +360,9 @@ since the last call to this ioctl. Bit 0 is the first page in the
memory slot. Ensure the entire structure is cleared to avoid padding
issues.
-If KVM_CAP_MULTI_ADDRESS_SPACE is available, bits 16-31 specifies
-the address space for which you want to return the dirty bitmap.
-They must be less than the value that KVM_CHECK_EXTENSION returns for
-the KVM_CAP_MULTI_ADDRESS_SPACE capability.
+If KVM_CAP_MULTI_ADDRESS_SPACE is available, bits 16-31 of slot field specifies
+the address space for which you want to return the dirty bitmap. See
+KVM_SET_USER_MEMORY_REGION for details on the usage of slot field.
The bits in the dirty bitmap are cleared before the ioctl returns, unless
KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 is enabled. For more information,
@@ -961,6 +960,14 @@ memory.
__u8 pad2[30];
};
+If the KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL flag is returned from the
+KVM_CAP_XEN_HVM check, it may be set in the flags field of this ioctl.
+This requests KVM to generate the contents of the hypercall page
+automatically; hypercalls will be intercepted and passed to userspace
+through KVM_EXIT_XEN. In this case, all of the blob size and address
+fields must be zero.
+
+No other flags are currently valid in the struct kvm_xen_hvm_config.
4.29 KVM_GET_CLOCK
------------------
@@ -1281,6 +1288,9 @@ field userspace_addr, which must point at user addressable memory for
the entire memory slot size. Any object may back this memory, including
anonymous memory, ordinary files, and hugetlbfs.
+On architectures that support a form of address tagging, userspace_addr must
+be an untagged address.
+
It is recommended that the lower 21 bits of guest_phys_addr and userspace_addr
be identical. This allows large pages in the guest to be backed by large
pages in the host.
@@ -1333,7 +1343,7 @@ documentation when it pops into existence).
:Capability: KVM_CAP_ENABLE_CAP_VM
:Architectures: all
-:Type: vcpu ioctl
+:Type: vm ioctl
:Parameters: struct kvm_enable_cap (in)
:Returns: 0 on success; -1 on error
@@ -2266,6 +2276,8 @@ registers, find a list below:
PPC KVM_REG_PPC_PSSCR 64
PPC KVM_REG_PPC_DEC_EXPIRY 64
PPC KVM_REG_PPC_PTCR 64
+ PPC KVM_REG_PPC_DAWR1 64
+ PPC KVM_REG_PPC_DAWRX1 64
PPC KVM_REG_PPC_TM_GPR0 64
...
PPC KVM_REG_PPC_TM_GPR31 64
@@ -3844,49 +3856,20 @@ base 2 of the page size in the bottom 6 bits.
-EFAULT if struct kvm_reinject_control cannot be read,
-EINVAL if the supplied shift or flags are invalid,
-ENOMEM if unable to allocate the new HPT,
- -ENOSPC if there was a hash collision
-
-::
-
- struct kvm_ppc_rmmu_info {
- struct kvm_ppc_radix_geom {
- __u8 page_shift;
- __u8 level_bits[4];
- __u8 pad[3];
- } geometries[8];
- __u32 ap_encodings[8];
- };
-
-The geometries[] field gives up to 8 supported geometries for the
-radix page table, in terms of the log base 2 of the smallest page
-size, and the number of bits indexed at each level of the tree, from
-the PTE level up to the PGD level in that order. Any unused entries
-will have 0 in the page_shift field.
-
-The ap_encodings gives the supported page sizes and their AP field
-encodings, encoded with the AP value in the top 3 bits and the log
-base 2 of the page size in the bottom 6 bits.
-
-4.102 KVM_PPC_RESIZE_HPT_PREPARE
---------------------------------
-
-:Capability: KVM_CAP_SPAPR_RESIZE_HPT
-:Architectures: powerpc
-:Type: vm ioctl
-:Parameters: struct kvm_ppc_resize_hpt (in)
-:Returns: 0 on successful completion,
- >0 if a new HPT is being prepared, the value is an estimated
- number of milliseconds until preparation is complete,
- -EFAULT if struct kvm_reinject_control cannot be read,
- -EINVAL if the supplied shift or flags are invalid,when moving existing
- HPT entries to the new HPT,
- -EIO on other error conditions
Used to implement the PAPR extension for runtime resizing of a guest's
Hashed Page Table (HPT). Specifically this starts, stops or monitors
the preparation of a new potential HPT for the guest, essentially
implementing the H_RESIZE_HPT_PREPARE hypercall.
+::
+
+ struct kvm_ppc_resize_hpt {
+ __u64 flags;
+ __u32 shift;
+ __u32 pad;
+ };
+
If called with shift > 0 when there is no pending HPT for the guest,
this begins preparation of a new pending HPT of size 2^(shift) bytes.
It then returns a positive integer with the estimated number of
@@ -3914,14 +3897,6 @@ Normally this will be called repeatedly with the same parameters until
it returns <= 0. The first call will initiate preparation, subsequent
ones will monitor preparation until it completes or fails.
-::
-
- struct kvm_ppc_resize_hpt {
- __u64 flags;
- __u32 shift;
- __u32 pad;
- };
-
4.103 KVM_PPC_RESIZE_HPT_COMMIT
-------------------------------
@@ -3944,6 +3919,14 @@ Hashed Page Table (HPT). Specifically this requests that the guest be
transferred to working with the new HPT, essentially implementing the
H_RESIZE_HPT_COMMIT hypercall.
+::
+
+ struct kvm_ppc_resize_hpt {
+ __u64 flags;
+ __u32 shift;
+ __u32 pad;
+ };
+
This should only be called after KVM_PPC_RESIZE_HPT_PREPARE has
returned 0 with the same parameters. In other cases
KVM_PPC_RESIZE_HPT_COMMIT will return an error (usually -ENXIO or
@@ -3959,14 +3942,6 @@ HPT and the previous HPT will be discarded.
On failure, the guest will still be operating on its previous HPT.
-::
-
- struct kvm_ppc_resize_hpt {
- __u64 flags;
- __u32 shift;
- __u32 pad;
- };
-
4.104 KVM_X86_GET_MCE_CAP_SUPPORTED
-----------------------------------
@@ -4432,7 +4407,7 @@ to I/O ports.
:Capability: KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2
:Architectures: x86, arm, arm64, mips
:Type: vm ioctl
-:Parameters: struct kvm_dirty_log (in)
+:Parameters: struct kvm_clear_dirty_log (in)
:Returns: 0 on success, -1 on error
::
@@ -4459,10 +4434,9 @@ in KVM's dirty bitmap, and dirty tracking is re-enabled for that page
(for example via write-protection, or by clearing the dirty bit in
a page table entry).
-If KVM_CAP_MULTI_ADDRESS_SPACE is available, bits 16-31 specifies
-the address space for which you want to return the dirty bitmap.
-They must be less than the value that KVM_CHECK_EXTENSION returns for
-the KVM_CAP_MULTI_ADDRESS_SPACE capability.
+If KVM_CAP_MULTI_ADDRESS_SPACE is available, bits 16-31 of slot field specifies
+the address space for which you want to clear the dirty status. See
+KVM_SET_USER_MEMORY_REGION for details on the usage of slot field.
This ioctl is mostly useful when KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2
is enabled; for more information, see the description of the capability.
@@ -4508,6 +4482,7 @@ KVM_GET_SUPPORTED_CPUID ioctl because some of them intersect with KVM feature
leaves (0x40000000, 0x40000001).
Currently, the following list of CPUID leaves are returned:
+
- HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS
- HYPERV_CPUID_INTERFACE
- HYPERV_CPUID_VERSION
@@ -4532,6 +4507,7 @@ userspace should not expect to get any particular value there.
Note, vcpu version of KVM_GET_SUPPORTED_HV_CPUID is currently deprecated. Unlike
system ioctl which exposes all supported feature bits unconditionally, vcpu
version has the following quirks:
+
- HYPERV_CPUID_NESTED_FEATURES leaf and HV_X64_ENLIGHTENED_VMCS_RECOMMENDED
feature bit are only exposed when Enlightened VMCS was previously enabled
on the corresponding vCPU (KVM_CAP_HYPERV_ENLIGHTENED_VMCS).
@@ -4830,6 +4806,137 @@ into user space.
If a vCPU is in running state while this ioctl is invoked, the vCPU may
experience inconsistent filtering behavior on MSR accesses.
+4.127 KVM_XEN_HVM_SET_ATTR
+--------------------------
+
+:Capability: KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_SHARED_INFO
+:Architectures: x86
+:Type: vm ioctl
+:Parameters: struct kvm_xen_hvm_attr
+:Returns: 0 on success, < 0 on error
+
+::
+
+ struct kvm_xen_hvm_attr {
+ __u16 type;
+ __u16 pad[3];
+ union {
+ __u8 long_mode;
+ __u8 vector;
+ struct {
+ __u64 gfn;
+ } shared_info;
+ __u64 pad[4];
+ } u;
+ };
+
+type values:
+
+KVM_XEN_ATTR_TYPE_LONG_MODE
+ Sets the ABI mode of the VM to 32-bit or 64-bit (long mode). This
+ determines the layout of the shared info pages exposed to the VM.
+
+KVM_XEN_ATTR_TYPE_SHARED_INFO
+ Sets the guest physical frame number at which the Xen "shared info"
+ page resides. Note that although Xen places vcpu_info for the first
+ 32 vCPUs in the shared_info page, KVM does not automatically do so
+ and instead requires that KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO be used
+ explicitly even when the vcpu_info for a given vCPU resides at the
+ "default" location in the shared_info page. This is because KVM is
+ not aware of the Xen CPU id which is used as the index into the
+ vcpu_info[] array, so cannot know the correct default location.
+
+KVM_XEN_ATTR_TYPE_UPCALL_VECTOR
+ Sets the exception vector used to deliver Xen event channel upcalls.
+
+4.128 KVM_XEN_HVM_GET_ATTR
+--------------------------
+
+:Capability: KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_SHARED_INFO
+:Architectures: x86
+:Type: vm ioctl
+:Parameters: struct kvm_xen_hvm_attr
+:Returns: 0 on success, < 0 on error
+
+Allows Xen VM attributes to be read. For the structure and types,
+see KVM_XEN_HVM_SET_ATTR above.
+
+4.129 KVM_XEN_VCPU_SET_ATTR
+---------------------------
+
+:Capability: KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_SHARED_INFO
+:Architectures: x86
+:Type: vcpu ioctl
+:Parameters: struct kvm_xen_vcpu_attr
+:Returns: 0 on success, < 0 on error
+
+::
+
+ struct kvm_xen_vcpu_attr {
+ __u16 type;
+ __u16 pad[3];
+ union {
+ __u64 gpa;
+ __u64 pad[4];
+ struct {
+ __u64 state;
+ __u64 state_entry_time;
+ __u64 time_running;
+ __u64 time_runnable;
+ __u64 time_blocked;
+ __u64 time_offline;
+ } runstate;
+ } u;
+ };
+
+type values:
+
+KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO
+ Sets the guest physical address of the vcpu_info for a given vCPU.
+
+KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO
+ Sets the guest physical address of an additional pvclock structure
+ for a given vCPU. This is typically used for guest vsyscall support.
+
+KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR
+ Sets the guest physical address of the vcpu_runstate_info for a given
+ vCPU. This is how a Xen guest tracks CPU state such as steal time.
+
+KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT
+ Sets the runstate (RUNSTATE_running/_runnable/_blocked/_offline) of
+ the given vCPU from the .u.runstate.state member of the structure.
+ KVM automatically accounts running and runnable time but blocked
+ and offline states are only entered explicitly.
+
+KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA
+ Sets all fields of the vCPU runstate data from the .u.runstate member
+ of the structure, including the current runstate. The state_entry_time
+ must equal the sum of the other four times.
+
+KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST
+ This *adds* the contents of the .u.runstate members of the structure
+ to the corresponding members of the given vCPU's runstate data, thus
+ permitting atomic adjustments to the runstate times. The adjustment
+ to the state_entry_time must equal the sum of the adjustments to the
+ other four times. The state field must be set to -1, or to a valid
+ runstate value (RUNSTATE_running, RUNSTATE_runnable, RUNSTATE_blocked
+ or RUNSTATE_offline) to set the current accounted state as of the
+ adjusted state_entry_time.
+
+4.130 KVM_XEN_VCPU_GET_ATTR
+---------------------------
+
+:Capability: KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_SHARED_INFO
+:Architectures: x86
+:Type: vcpu ioctl
+:Parameters: struct kvm_xen_vcpu_attr
+:Returns: 0 on success, < 0 on error
+
+Allows Xen vCPU attributes to be read. For the structure and types,
+see KVM_XEN_VCPU_SET_ATTR above.
+
+The KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST type may not be used
+with the KVM_XEN_VCPU_GET_ATTR ioctl.
5. The kvm_run structure
========================
@@ -4892,9 +4999,12 @@ local APIC is not used.
__u16 flags;
More architecture-specific flags detailing state of the VCPU that may
-affect the device's behavior. The only currently defined flag is
-KVM_RUN_X86_SMM, which is valid on x86 machines and is set if the
-VCPU is in system management mode.
+affect the device's behavior. Current defined flags::
+
+ /* x86, set if the VCPU is in system management mode */
+ #define KVM_RUN_X86_SMM (1 << 0)
+ /* x86, set if bus lock detected in VM */
+ #define KVM_RUN_BUS_LOCK (1 << 1)
::
@@ -4995,13 +5105,18 @@ to the byte array.
.. note::
- For KVM_EXIT_IO, KVM_EXIT_MMIO, KVM_EXIT_OSI, KVM_EXIT_PAPR,
+ For KVM_EXIT_IO, KVM_EXIT_MMIO, KVM_EXIT_OSI, KVM_EXIT_PAPR, KVM_EXIT_XEN,
KVM_EXIT_EPR, KVM_EXIT_X86_RDMSR and KVM_EXIT_X86_WRMSR the corresponding
operations are complete (and guest state is consistent) only after userspace
has re-entered the kernel with KVM_RUN. The kernel side will first finish
- incomplete operations and then check for pending signals. Userspace
- can re-enter the guest with an unmasked signal pending to complete
- pending operations.
+ incomplete operations and then check for pending signals.
+
+ The pending state of the operation is not preserved in state which is
+ visible to userspace, thus userspace should ensure that the operation is
+ completed before performing a live migration. Userspace can re-enter the
+ guest with an unmasked signal pending or with the immediate_exit field set
+ to complete pending operations without allowing any further instructions
+ to be executed.
::
@@ -5328,6 +5443,34 @@ vCPU execution. If the MSR write was unsuccessful, user space also sets the
::
+
+ struct kvm_xen_exit {
+ #define KVM_EXIT_XEN_HCALL 1
+ __u32 type;
+ union {
+ struct {
+ __u32 longmode;
+ __u32 cpl;
+ __u64 input;
+ __u64 result;
+ __u64 params[6];
+ } hcall;
+ } u;
+ };
+ /* KVM_EXIT_XEN */
+ struct kvm_hyperv_exit xen;
+
+Indicates that the VCPU exits into userspace to process some tasks
+related to Xen emulation.
+
+Valid values for 'type' are:
+
+ - KVM_EXIT_XEN_HCALL -- synchronously notify user-space about Xen hypercall.
+ Userspace is expected to place the hypercall result into the appropriate
+ field before invoking KVM_RUN again.
+
+::
+
/* Fix the size of the union. */
char padding[256];
};
@@ -6037,6 +6180,53 @@ KVM_EXIT_X86_RDMSR and KVM_EXIT_X86_WRMSR exit notifications which user space
can then handle to implement model specific MSR handling and/or user notifications
to inform a user that an MSR was not handled.
+7.22 KVM_CAP_X86_BUS_LOCK_EXIT
+-------------------------------
+
+:Architectures: x86
+:Target: VM
+:Parameters: args[0] defines the policy used when bus locks detected in guest
+:Returns: 0 on success, -EINVAL when args[0] contains invalid bits
+
+Valid bits in args[0] are::
+
+ #define KVM_BUS_LOCK_DETECTION_OFF (1 << 0)
+ #define KVM_BUS_LOCK_DETECTION_EXIT (1 << 1)
+
+Enabling this capability on a VM provides userspace with a way to select
+a policy to handle the bus locks detected in guest. Userspace can obtain
+the supported modes from the result of KVM_CHECK_EXTENSION and define it
+through the KVM_ENABLE_CAP.
+
+KVM_BUS_LOCK_DETECTION_OFF and KVM_BUS_LOCK_DETECTION_EXIT are supported
+currently and mutually exclusive with each other. More bits can be added in
+the future.
+
+With KVM_BUS_LOCK_DETECTION_OFF set, bus locks in guest will not cause vm exits
+so that no additional actions are needed. This is the default mode.
+
+With KVM_BUS_LOCK_DETECTION_EXIT set, vm exits happen when bus lock detected
+in VM. KVM just exits to userspace when handling them. Userspace can enforce
+its own throttling or other policy based mitigations.
+
+This capability is aimed to address the thread that VM can exploit bus locks to
+degree the performance of the whole system. Once the userspace enable this
+capability and select the KVM_BUS_LOCK_DETECTION_EXIT mode, KVM will set the
+KVM_RUN_BUS_LOCK flag in vcpu-run->flags field and exit to userspace. Concerning
+the bus lock vm exit can be preempted by a higher priority VM exit, the exit
+notifications to userspace can be KVM_EXIT_BUS_LOCK or other reasons.
+KVM_RUN_BUS_LOCK flag is used to distinguish between them.
+
+7.23 KVM_CAP_PPC_DAWR1
+----------------------
+
+:Architectures: ppc
+:Parameters: none
+:Returns: 0 on success, -EINVAL when CPU doesn't support 2nd DAWR
+
+This capability can be used to check / enable 2nd DAWR feature provided
+by POWER10 processor.
+
8. Other capabilities.
======================
@@ -6414,7 +6604,6 @@ guest according to the bits in the KVM_CPUID_FEATURES CPUID leaf
(0x40000001). Otherwise, a guest may use the paravirtual features
regardless of what has actually been exposed through the CPUID leaf.
-
8.29 KVM_CAP_DIRTY_LOG_RING
---------------------------
@@ -6501,3 +6690,34 @@ KVM_GET_DIRTY_LOG and KVM_CLEAR_DIRTY_LOG. After enabling
KVM_CAP_DIRTY_LOG_RING with an acceptable dirty ring size, the virtual
machine will switch to ring-buffer dirty page tracking and further
KVM_GET_DIRTY_LOG or KVM_CLEAR_DIRTY_LOG ioctls will fail.
+
+8.30 KVM_CAP_XEN_HVM
+--------------------
+
+:Architectures: x86
+
+This capability indicates the features that Xen supports for hosting Xen
+PVHVM guests. Valid flags are::
+
+ #define KVM_XEN_HVM_CONFIG_HYPERCALL_MSR (1 << 0)
+ #define KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL (1 << 1)
+ #define KVM_XEN_HVM_CONFIG_SHARED_INFO (1 << 2)
+ #define KVM_XEN_HVM_CONFIG_RUNSTATE (1 << 2)
+
+The KVM_XEN_HVM_CONFIG_HYPERCALL_MSR flag indicates that the KVM_XEN_HVM_CONFIG
+ioctl is available, for the guest to set its hypercall page.
+
+If KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL is also set, the same flag may also be
+provided in the flags to KVM_XEN_HVM_CONFIG, without providing hypercall page
+contents, to request that KVM generate hypercall page content automatically
+and also enable interception of guest hypercalls with KVM_EXIT_XEN.
+
+The KVM_XEN_HVM_CONFIG_SHARED_INFO flag indicates the availability of the
+KVM_XEN_HVM_SET_ATTR, KVM_XEN_HVM_GET_ATTR, KVM_XEN_VCPU_SET_ATTR and
+KVM_XEN_VCPU_GET_ATTR ioctls, as well as the delivery of exception vectors
+for event channel upcalls when the evtchn_upcall_pending field of a vcpu's
+vcpu_info is set.
+
+The KVM_XEN_HVM_CONFIG_RUNSTATE flag indicates that the runstate-related
+features KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR/_CURRENT/_DATA/_ADJUST are
+supported by the KVM_XEN_VCPU_SET_ATTR/KVM_XEN_VCPU_GET_ATTR ioctls.
diff --git a/Documentation/virt/kvm/arm/hyp-abi.rst b/Documentation/virt/kvm/arm/hyp-abi.rst
index 83cadd8186fa..4d43fbc25195 100644
--- a/Documentation/virt/kvm/arm/hyp-abi.rst
+++ b/Documentation/virt/kvm/arm/hyp-abi.rst
@@ -58,6 +58,15 @@ these functions (see arch/arm{,64}/include/asm/virt.h):
into place (arm64 only), and jump to the restart address while at HYP/EL2.
This hypercall is not expected to return to its caller.
+* ::
+
+ x0 = HVC_VHE_RESTART (arm64 only)
+
+ Attempt to upgrade the kernel's exception level from EL1 to EL2 by enabling
+ the VHE mode. This is conditioned by the CPU supporting VHE, the EL2 MMU
+ being off, and VHE not being disabled by any other means (command line
+ option, for example).
+
Any other value of r0/x0 triggers a hypervisor-specific handling,
which is not documented here.
diff --git a/Documentation/virt/kvm/locking.rst b/Documentation/virt/kvm/locking.rst
index b21a34c34a21..0aa4817b466d 100644
--- a/Documentation/virt/kvm/locking.rst
+++ b/Documentation/virt/kvm/locking.rst
@@ -16,7 +16,14 @@ The acquisition orders for mutexes are as follows:
- kvm->slots_lock is taken outside kvm->irq_lock, though acquiring
them together is quite rare.
-On x86, vcpu->mutex is taken outside kvm->arch.hyperv.hv_lock.
+On x86:
+
+- vcpu->mutex is taken outside kvm->arch.hyperv.hv_lock
+
+- kvm->arch.mmu_lock is an rwlock. kvm->arch.tdp_mmu_pages_lock is
+ taken inside kvm->arch.mmu_lock, and cannot be taken without already
+ holding kvm->arch.mmu_lock (typically with ``read_lock``, otherwise
+ there's no need to take kvm->arch.tdp_mmu_pages_lock at all).
Everything else is a leaf: no other lock is taken inside the critical
sections.
diff --git a/Documentation/virt/kvm/nested-vmx.rst b/Documentation/virt/kvm/nested-vmx.rst
index 6ab4e35cee23..ac2095d41f02 100644
--- a/Documentation/virt/kvm/nested-vmx.rst
+++ b/Documentation/virt/kvm/nested-vmx.rst
@@ -37,8 +37,10 @@ call L2.
Running nested VMX
------------------
-The nested VMX feature is disabled by default. It can be enabled by giving
-the "nested=1" option to the kvm-intel module.
+The nested VMX feature is enabled by default since Linux kernel v4.20. For
+older Linux kernel, it can be enabled by giving the "nested=1" option to the
+kvm-intel module.
+
No modifications are required to user space (qemu). However, qemu's default
emulated CPU type (qemu64) does not list the "VMX" CPU feature, so it must be
diff --git a/Documentation/virt/kvm/running-nested-guests.rst b/Documentation/virt/kvm/running-nested-guests.rst
index d0a1fc754c84..bd70c69468ae 100644
--- a/Documentation/virt/kvm/running-nested-guests.rst
+++ b/Documentation/virt/kvm/running-nested-guests.rst
@@ -74,7 +74,7 @@ few:
Enabling "nested" (x86)
-----------------------
-From Linux kernel v4.19 onwards, the ``nested`` KVM parameter is enabled
+From Linux kernel v4.20 onwards, the ``nested`` KVM parameter is enabled
by default for Intel and AMD. (Though your Linux distribution might
override this default.)
diff --git a/Documentation/virt/kvm/s390-pv-boot.rst b/Documentation/virt/kvm/s390-pv-boot.rst
index 8b8fa0390409..ad1f7866c001 100644
--- a/Documentation/virt/kvm/s390-pv-boot.rst
+++ b/Documentation/virt/kvm/s390-pv-boot.rst
@@ -80,5 +80,5 @@ Keys
----
Every CEC will have a unique public key to enable tooling to build
encrypted images.
-See `s390-tools <https://github.com/ibm-s390-tools/s390-tools/>`_
+See `s390-tools <https://github.com/ibm-s390-linux/s390-tools/>`_
for the tooling.
diff --git a/Documentation/vm/arch_pgtable_helpers.rst b/Documentation/vm/arch_pgtable_helpers.rst
index f3591ee3aaa8..552567d863b8 100644
--- a/Documentation/vm/arch_pgtable_helpers.rst
+++ b/Documentation/vm/arch_pgtable_helpers.rst
@@ -50,7 +50,7 @@ PTE Page Table Helpers
+---------------------------+--------------------------------------------------+
| pte_mkwrite | Creates a writable PTE |
+---------------------------+--------------------------------------------------+
-| pte_mkwrprotect | Creates a write protected PTE |
+| pte_wrprotect | Creates a write protected PTE |
+---------------------------+--------------------------------------------------+
| pte_mkspecial | Creates a special PTE |
+---------------------------+--------------------------------------------------+
@@ -120,7 +120,7 @@ PMD Page Table Helpers
+---------------------------+--------------------------------------------------+
| pmd_mkwrite | Creates a writable PMD |
+---------------------------+--------------------------------------------------+
-| pmd_mkwrprotect | Creates a write protected PMD |
+| pmd_wrprotect | Creates a write protected PMD |
+---------------------------+--------------------------------------------------+
| pmd_mkspecial | Creates a special PMD |
+---------------------------+--------------------------------------------------+
@@ -186,7 +186,7 @@ PUD Page Table Helpers
+---------------------------+--------------------------------------------------+
| pud_mkwrite | Creates a writable PUD |
+---------------------------+--------------------------------------------------+
-| pud_mkwrprotect | Creates a write protected PUD |
+| pud_wrprotect | Creates a write protected PUD |
+---------------------------+--------------------------------------------------+
| pud_mkdevmap | Creates a ZONE_DEVICE mapped PUD |
+---------------------------+--------------------------------------------------+
@@ -224,7 +224,7 @@ HugeTLB Page Table Helpers
+---------------------------+--------------------------------------------------+
| huge_pte_mkwrite | Creates a writable HugeTLB |
+---------------------------+--------------------------------------------------+
-| huge_pte_mkwrprotect | Creates a write protected HugeTLB |
+| huge_pte_wrprotect | Creates a write protected HugeTLB |
+---------------------------+--------------------------------------------------+
| huge_ptep_get_and_clear | Clears a HugeTLB |
+---------------------------+--------------------------------------------------+
diff --git a/Documentation/vm/split_page_table_lock.rst b/Documentation/vm/split_page_table_lock.rst
index ff51f4a5494d..c08919662704 100644
--- a/Documentation/vm/split_page_table_lock.rst
+++ b/Documentation/vm/split_page_table_lock.rst
@@ -32,7 +32,7 @@ There are helpers to lock/unlock a table and other accessor functions:
Split page table lock for PTE tables is enabled compile-time if
CONFIG_SPLIT_PTLOCK_CPUS (usually 4) is less or equal to NR_CPUS.
-If split lock is disabled, all tables guaded by mm->page_table_lock.
+If split lock is disabled, all tables are guarded by mm->page_table_lock.
Split page table lock for PMD tables is enabled, if it's enabled for PTE
tables and the architecture supports it (see below).
diff --git a/Documentation/x86/boot.rst b/Documentation/x86/boot.rst
index abb9fc164657..fc844913dece 100644
--- a/Documentation/x86/boot.rst
+++ b/Documentation/x86/boot.rst
@@ -851,7 +851,7 @@ Protocol: 2.09+
struct setup_data {
__u64 next = 0 or <addr_of_next_setup_data_struct>;
__u32 type = SETUP_INDIRECT;
- __u32 len = sizeof(setup_data);
+ __u32 len = sizeof(setup_indirect);
__u8 data[sizeof(setup_indirect)] = struct setup_indirect {
__u32 type = SETUP_INDIRECT | SETUP_E820_EXT;
__u32 reserved = 0;
diff --git a/MAINTAINERS b/MAINTAINERS
index 992fe3b0900a..d92f85ca831d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -261,6 +261,8 @@ ABI/API
L: linux-api@vger.kernel.org
F: include/linux/syscalls.h
F: kernel/sys_ni.c
+F: include/uapi/
+F: arch/*/include/uapi/
ABIT UGURU 1,2 HARDWARE MONITOR DRIVER
M: Hans de Goede <hdegoede@redhat.com>
@@ -436,6 +438,15 @@ S: Orphan
F: drivers/platform/x86/wmi.c
F: include/uapi/linux/wmi.h
+ACRN HYPERVISOR SERVICE MODULE
+M: Shuo Liu <shuo.a.liu@intel.com>
+L: acrn-dev@lists.projectacrn.org (subscribers-only)
+S: Supported
+W: https://projectacrn.org
+F: Documentation/virt/acrn/
+F: drivers/virt/acrn/
+F: include/uapi/linux/acrn.h
+
AD1889 ALSA SOUND DRIVER
L: linux-parisc@vger.kernel.org
S: Maintained
@@ -699,7 +710,8 @@ M: Michael Tretter <m.tretter@pengutronix.de>
R: Pengutronix Kernel Team <kernel@pengutronix.de>
L: linux-media@vger.kernel.org
S: Maintained
-F: drivers/staging/media/allegro-dvt/
+F: Documentation/devicetree/bindings/media/allegro,al5e.yaml
+F: drivers/media/platform/allegro-dvt/
ALLWINNER A10 CSI DRIVER
M: Maxime Ripard <mripard@kernel.org>
@@ -1016,7 +1028,7 @@ F: Documentation/devicetree/bindings/mux/adi,adgs1408.txt
F: drivers/mux/adgs1408.c
ANALOG DEVICES INC ADIN DRIVER
-M: Alexandru Ardelean <alexaundru.ardelean@analog.com>
+M: Michael Hennerich <michael.hennerich@analog.com>
L: netdev@vger.kernel.org
S: Supported
W: http://ez.analog.com/community/linux-device-drivers
@@ -1024,7 +1036,7 @@ F: Documentation/devicetree/bindings/net/adi,adin.yaml
F: drivers/net/phy/adin.c
ANALOG DEVICES INC ADIS DRIVER LIBRARY
-M: Alexandru Ardelean <alexandru.ardelean@analog.com>
+M: Nuno Sa <nuno.sa@analog.com>
L: linux-iio@vger.kernel.org
S: Supported
F: drivers/iio/imu/adis.c
@@ -1413,7 +1425,6 @@ F: arch/arm*/include/asm/hw_breakpoint.h
F: arch/arm*/include/asm/perf_event.h
F: arch/arm*/kernel/hw_breakpoint.c
F: arch/arm*/kernel/perf_*
-F: arch/arm/oprofile/common.c
F: drivers/perf/
F: include/linux/perf/arm_pmu.h
@@ -1511,6 +1522,7 @@ ARM/ACTIONS SEMI ARCHITECTURE
M: Andreas Färber <afaerber@suse.de>
M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+L: linux-actions@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: Documentation/devicetree/bindings/arm/actions.yaml
F: Documentation/devicetree/bindings/clock/actions,owl-cmu.txt
@@ -1778,19 +1790,6 @@ F: drivers/net/ethernet/cortina/
F: drivers/pinctrl/pinctrl-gemini.c
F: drivers/rtc/rtc-ftrtc010.c
-ARM/CSR SIRFPRIMA2 MACHINE SUPPORT
-M: Barry Song <baohua@kernel.org>
-L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-S: Maintained
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/baohua/linux.git
-F: arch/arm/boot/dts/prima2*
-F: arch/arm/mach-prima2/
-F: drivers/clk/sirf/
-F: drivers/clocksource/timer-atlas7.c
-F: drivers/clocksource/timer-prima2.c
-X: drivers/gnss
-N: [^a-z]sirf
-
ARM/CZ.NIC TURRIS MOX SUPPORT
M: Marek Behun <marek.behun@nic.cz>
S: Maintained
@@ -1806,13 +1805,6 @@ F: drivers/firmware/turris-mox-rwtm.c
F: drivers/gpio/gpio-moxtet.c
F: include/linux/moxtet.h
-ARM/ENERGY MICRO (SILICON LABS) EFM32 SUPPORT
-M: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
-R: Pengutronix Kernel Team <kernel@pengutronix.de>
-L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-S: Maintained
-N: efm32
-
ARM/EZX SMARTPHONES (A780, A910, A1200, E680, ROKR E2 and ROKR E6)
M: Robert Jarzmik <robert.jarzmik@free.fr>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -2090,7 +2082,7 @@ M: Chunfeng Yun <chunfeng.yun@mediatek.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-mediatek@lists.infradead.org (moderated for non-subscribers)
S: Maintained
-F: Documentation/devicetree/bindings/phy/phy-mtk-*
+F: Documentation/devicetree/bindings/phy/mediatek,*
F: drivers/phy/mediatek/
ARM/Microchip (AT91) SoC support
@@ -2145,17 +2137,20 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
W: http://linux-chenxing.org/
F: Documentation/devicetree/bindings/arm/mstar/*
+F: Documentation/devicetree/bindings/clock/mstar,msc313-mpll.yaml
F: Documentation/devicetree/bindings/gpio/mstar,msc313-gpio.yaml
F: arch/arm/boot/dts/mstar-*
F: arch/arm/mach-mstar/
+F: drivers/clk/mstar/
F: drivers/gpio/gpio-msc313.c
+F: include/dt-bindings/clock/mstar-*
F: include/dt-bindings/gpio/msc313-gpio.h
ARM/NEC MOBILEPRO 900/c MACHINE SUPPORT
M: Michael Petchkovsky <mkpetch@internode.on.net>
S: Maintained
-ARM/NOMADIK/U300/Ux500 ARCHITECTURES
+ARM/NOMADIK/Ux500 ARCHITECTURES
M: Linus Walleij <linus.walleij@linaro.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
@@ -2164,35 +2159,23 @@ F: Documentation/devicetree/bindings/arm/ste-*
F: Documentation/devicetree/bindings/arm/ux500.yaml
F: Documentation/devicetree/bindings/arm/ux500/
F: Documentation/devicetree/bindings/i2c/i2c-nomadik.txt
-F: Documentation/devicetree/bindings/i2c/i2c-stu300.txt
F: arch/arm/boot/dts/ste-*
F: arch/arm/mach-nomadik/
-F: arch/arm/mach-u300/
F: arch/arm/mach-ux500/
F: drivers/clk/clk-nomadik.c
-F: drivers/clk/clk-u300.c
F: drivers/clocksource/clksrc-dbx500-prcmu.c
-F: drivers/clocksource/timer-u300.c
-F: drivers/dma/coh901318*
F: drivers/dma/ste_dma40*
F: drivers/hwspinlock/u8500_hsem.c
F: drivers/i2c/busses/i2c-nomadik.c
-F: drivers/i2c/busses/i2c-stu300.c
F: drivers/iio/adc/ab8500-gpadc.c
-F: drivers/mfd/ab3100*
F: drivers/mfd/ab8500*
F: drivers/mfd/abx500*
F: drivers/mfd/db8500*
F: drivers/mfd/dbx500*
F: drivers/pinctrl/nomadik/
-F: drivers/pinctrl/pinctrl-coh901*
-F: drivers/pinctrl/pinctrl-u300.c
-F: drivers/rtc/rtc-ab3100.c
F: drivers/rtc/rtc-ab8500.c
-F: drivers/rtc/rtc-coh901331.c
F: drivers/rtc/rtc-pl031.c
F: drivers/soc/ux500/
-F: drivers/watchdog/coh901327_wdt.c
ARM/NUVOTON NPCM ARCHITECTURE
M: Avi Fishman <avifishman70@gmail.com>
@@ -2414,6 +2397,8 @@ F: drivers/*/*s5pv210*
F: drivers/memory/samsung/
F: drivers/soc/samsung/
F: drivers/tty/serial/samsung*
+F: include/linux/platform_data/*s3c*
+F: include/linux/serial_s3c.h
F: include/linux/soc/samsung/
N: exynos
N: s3c2410
@@ -2557,13 +2542,6 @@ F: arch/arm/boot/dts/berlin*
F: arch/arm/mach-berlin/
F: arch/arm64/boot/dts/synaptics/
-ARM/TANGO ARCHITECTURE
-M: Marc Gonzalez <marc.w.gonzalez@free.fr>
-M: Mans Rullgard <mans@mansr.com>
-L: linux-arm-kernel@lists.infradead.org
-S: Odd Fixes
-N: tango
-
ARM/TECHNOLOGIC SYSTEMS TS7250 MACHINE SUPPORT
M: Lennert Buytenhek <kernel@wantstofly.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -2602,7 +2580,7 @@ L: linux-kernel@vger.kernel.org
S: Maintained
F: drivers/clk/keystone/
-ARM/TEXAS INSTRUMENT KEYSTONE ClOCKSOURCE
+ARM/TEXAS INSTRUMENT KEYSTONE CLOCKSOURCE
M: Santosh Shilimkar <ssantosh@kernel.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-kernel@vger.kernel.org
@@ -2616,8 +2594,8 @@ S: Maintained
F: drivers/power/reset/keystone-reset.c
ARM/TEXAS INSTRUMENTS K3 ARCHITECTURE
-M: Tero Kristo <t-kristo@ti.com>
M: Nishanth Menon <nm@ti.com>
+M: Tero Kristo <kristo@kernel.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Supported
F: Documentation/devicetree/bindings/arm/ti/k3.yaml
@@ -2641,9 +2619,15 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/iwamatsu/linux-visconti.git
F: Documentation/devicetree/bindings/arm/toshiba.yaml
+F: Documentation/devicetree/bindings/net/toshiba,visconti-dwmac.yaml
+F: Documentation/devicetree/bindings/gpio/toshiba,gpio-visconti.yaml
F: Documentation/devicetree/bindings/pinctrl/toshiba,tmpv7700-pinctrl.yaml
+F: Documentation/devicetree/bindings/watchdog/toshiba,visconti-wdt.yaml
F: arch/arm64/boot/dts/toshiba/
+F: drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
+F: drivers/gpio/gpio-visconti.c
F: drivers/pinctrl/visconti/
+F: drivers/watchdog/visconti_wdt.c
N: visconti
ARM/UNIPHIER ARCHITECTURE
@@ -2723,40 +2707,6 @@ S: Maintained
F: arch/arm/mach-pxa/include/mach/z2.h
F: arch/arm/mach-pxa/z2.c
-ARM/ZTE ARCHITECTURE
-M: Jun Nie <jun.nie@linaro.org>
-M: Shawn Guo <shawnguo@kernel.org>
-L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-S: Maintained
-F: Documentation/devicetree/bindings/arm/zte.yaml
-F: Documentation/devicetree/bindings/clock/zx2967*.txt
-F: Documentation/devicetree/bindings/dma/zxdma.txt
-F: Documentation/devicetree/bindings/gpio/zx296702-gpio.txt
-F: Documentation/devicetree/bindings/i2c/i2c-zx2967.txt
-F: Documentation/devicetree/bindings/mmc/zx-dw-mshc.txt
-F: Documentation/devicetree/bindings/pinctrl/pinctrl-zx.txt
-F: Documentation/devicetree/bindings/reset/zte,zx2967-reset.txt
-F: Documentation/devicetree/bindings/soc/zte/
-F: Documentation/devicetree/bindings/sound/zte,*.txt
-F: Documentation/devicetree/bindings/thermal/zx2967-thermal.txt
-F: Documentation/devicetree/bindings/watchdog/zte,zx2967-wdt.txt
-F: arch/arm/boot/dts/zx2967*
-F: arch/arm/mach-zx/
-F: arch/arm64/boot/dts/zte/
-F: drivers/clk/zte/
-F: drivers/dma/zx_dma.c
-F: drivers/gpio/gpio-zx.c
-F: drivers/i2c/busses/i2c-zx2967.c
-F: drivers/mmc/host/dw_mmc-zx.*
-F: drivers/pinctrl/zte/
-F: drivers/soc/zte/
-F: drivers/thermal/zx2967_thermal.c
-F: drivers/watchdog/zx2967_wdt.c
-F: include/dt-bindings/clock/zx2967*.h
-F: include/dt-bindings/soc/zte,*.h
-F: sound/soc/codecs/zx_aud96p22.c
-F: sound/soc/zte/
-
ARM/ZYNQ ARCHITECTURE
M: Michal Simek <michal.simek@xilinx.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -2765,6 +2715,7 @@ W: http://wiki.xilinx.com
T: git https://github.com/Xilinx/linux-xlnx.git
F: Documentation/devicetree/bindings/i2c/cdns,i2c-r1p10.yaml
F: Documentation/devicetree/bindings/i2c/xlnx,xps-iic-2.00.a.yaml
+F: Documentation/devicetree/bindings/spi/xlnx,zynq-qspi.yaml
F: arch/arm/mach-zynq/
F: drivers/block/xsysace.c
F: drivers/clocksource/timer-cadence-ttc.c
@@ -2787,6 +2738,14 @@ F: arch/arm64/
F: tools/testing/selftests/arm64/
X: arch/arm64/boot/dts/
+ARROW SPEEDCHIPS XRS7000 SERIES ETHERNET SWITCH DRIVER
+M: George McCollister <george.mccollister@gmail.com>
+L: netdev@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/net/dsa/arrow,xrs700x.yaml
+F: drivers/net/dsa/xrs700x/*
+F: net/dsa/tag_xrs700x.c
+
AS3645A LED FLASH CONTROLLER DRIVER
M: Sakari Ailus <sakari.ailus@iki.fi>
L: linux-leds@vger.kernel.org
@@ -2832,6 +2791,15 @@ F: Documentation/devicetree/bindings/interrupt-controller/aspeed,ast2xxx-scu-ic.
F: drivers/irqchip/irq-aspeed-scu-ic.c
F: include/dt-bindings/interrupt-controller/aspeed-scu-ic.h
+ASPEED SD/MMC DRIVER
+M: Andrew Jeffery <andrew@aj.id.au>
+L: linux-aspeed@lists.ozlabs.org (moderated for non-subscribers)
+L: openbmc@lists.ozlabs.org (moderated for non-subscribers)
+L: linux-mmc@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/mmc/aspeed,sdhci.yaml
+F: drivers/mmc/host/sdhci-of-aspeed*
+
ASPEED VIDEO ENGINE DRIVER
M: Eddie James <eajames@linux.ibm.com>
L: linux-media@vger.kernel.org
@@ -2871,9 +2839,7 @@ S: Odd fixes
W: http://sourceforge.net/projects/xscaleiop
F: Documentation/crypto/async-tx-api.rst
F: crypto/async_tx/
-F: drivers/dma/
F: include/linux/async_tx.h
-F: include/linux/dmaengine.h
AT24 EEPROM DRIVER
M: Bartosz Golaszewski <bgolaszewski@baylibre.com>
@@ -3018,7 +2984,7 @@ F: include/uapi/linux/audit.h
F: kernel/audit*
AUXILIARY DISPLAY DRIVERS
-M: Miguel Ojeda Sandonis <miguel.ojeda.sandonis@gmail.com>
+M: Miguel Ojeda <ojeda@kernel.org>
S: Maintained
F: drivers/auxdisplay/
F: include/linux/cfag12864b.h
@@ -3239,6 +3205,7 @@ L: netdev@vger.kernel.org
S: Supported
W: http://sourceforge.net/projects/bonding/
F: drivers/net/bonding/
+F: include/net/bonding.h
F: include/uapi/linux/if_bonding.h
BOSCH SENSORTEC BMA400 ACCELEROMETER IIO DRIVER
@@ -3398,6 +3365,7 @@ L: openwrt-devel@lists.openwrt.org (subscribers-only)
S: Supported
F: Documentation/devicetree/bindings/net/dsa/brcm,b53.yaml
F: drivers/net/dsa/b53/*
+F: include/linux/dsa/brcm.h
F: include/linux/platform_data/b53.h
BROADCOM BCM2711/BCM2835 ARM ARCHITECTURE
@@ -3411,7 +3379,7 @@ F: Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml
F: drivers/pci/controller/pcie-brcmstb.c
F: drivers/staging/vc04_services
N: bcm2711
-N: bcm2835
+N: bcm283*
BROADCOM BCM281XX/BCM11XXX/BCM216XX ARM ARCHITECTURE
M: Florian Fainelli <f.fainelli@gmail.com>
@@ -3435,6 +3403,15 @@ F: Documentation/devicetree/bindings/mips/brcm/
F: arch/mips/bcm47xx/*
F: arch/mips/include/asm/mach-bcm47xx/*
+BROADCOM BCM4908 ETHERNET DRIVER
+M: Rafał Miłecki <rafal@milecki.pl>
+M: bcm-kernel-feedback-list@broadcom.com
+L: netdev@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/net/brcm,bcm4908-enet.yaml
+F: drivers/net/ethernet/broadcom/bcm4908_enet.*
+F: drivers/net/ethernet/broadcom/unimac.h
+
BROADCOM BCM5301X ARM ARCHITECTURE
M: Hauke Mehrtens <hauke@hauke-m.de>
M: Rafał Miłecki <zajec5@gmail.com>
@@ -3623,6 +3600,7 @@ S: Supported
F: Documentation/devicetree/bindings/net/brcm,bcmgenet.txt
F: Documentation/devicetree/bindings/net/brcm,unimac-mdio.txt
F: drivers/net/ethernet/broadcom/genet/
+F: drivers/net/ethernet/broadcom/unimac.h
F: drivers/net/mdio/mdio-bcm-unimac.c
F: include/linux/platform_data/bcmgenet.h
F: include/linux/platform_data/mdio-bcm-unimac.h
@@ -3656,6 +3634,15 @@ N: bcm88312
N: hr2
N: stingray
+BROADCOM IPROC GBIT ETHERNET DRIVER
+M: Rafał Miłecki <rafal@milecki.pl>
+M: bcm-kernel-feedback-list@broadcom.com
+L: netdev@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/net/brcm,amac.txt
+F: drivers/net/ethernet/broadcom/bgmac*
+F: drivers/net/ethernet/broadcom/unimac.h
+
BROADCOM KONA GPIO DRIVER
M: Ray Jui <rjui@broadcom.com>
L: bcm-kernel-feedback-list@broadcom.com
@@ -3681,6 +3668,16 @@ L: linux-mips@vger.kernel.org
S: Maintained
F: drivers/firmware/broadcom/*
+BROADCOM PMB (POWER MANAGEMENT BUS) DRIVER
+M: Rafał Miłecki <rafal@milecki.pl>
+M: Florian Fainelli <f.fainelli@gmail.com>
+M: bcm-kernel-feedback-list@broadcom.com
+L: linux-pm@vger.kernel.org
+S: Maintained
+T: git git://github.com/broadcom/stblinux.git
+F: drivers/soc/bcm/bcm-pmb.c
+F: include/dt-bindings/soc/bcm-pmb.h
+
BROADCOM SPECIFIC AMBA DRIVER (BCMA)
M: Rafał Miłecki <zajec5@gmail.com>
L: linux-wireless@vger.kernel.org
@@ -3735,6 +3732,7 @@ L: bcm-kernel-feedback-list@broadcom.com
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/broadcom/bcmsysport.*
+F: drivers/net/ethernet/broadcom/unimac.h
BROADCOM TG3 GIGABIT ETHERNET DRIVER
M: Siva Reddy Kallam <siva.kallam@broadcom.com>
@@ -3744,6 +3742,13 @@ L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/broadcom/tg3.*
+BROADCOM VK DRIVER
+M: Scott Branden <scott.branden@broadcom.com>
+L: bcm-kernel-feedback-list@broadcom.com
+S: Supported
+F: drivers/misc/bcm-vk/
+F: include/uapi/linux/misc/bcm_vk.h
+
BROCADE BFA FC SCSI DRIVER
M: Anil Gurumurthy <anil.gurumurthy@qlogic.com>
M: Sudarsana Kalluru <sudarsana.kalluru@qlogic.com>
@@ -3842,14 +3847,6 @@ F: drivers/irqchip/irq-csky-*
N: csky
K: csky
-C6X ARCHITECTURE
-M: Mark Salter <msalter@redhat.com>
-M: Aurelien Jacquiot <jacquiot.aurelien@gmail.com>
-L: linux-c6x-dev@linux-c6x.org
-S: Maintained
-W: http://www.linux-c6x.org/wiki/index.php/Main_Page
-F: arch/c6x/
-
CA8210 IEEE-802.15.4 RADIO DRIVER
M: Harry Morris <h.morris@cascoda.com>
L: linux-wpan@vger.kernel.org
@@ -3858,6 +3855,29 @@ W: https://github.com/Cascoda/ca8210-linux.git
F: Documentation/devicetree/bindings/net/ieee802154/ca8210.txt
F: drivers/net/ieee802154/ca8210.c
+CANAAN/KENDRYTE K210 SOC FPIOA DRIVER
+M: Damien Le Moal <damien.lemoal@wdc.com>
+L: linux-riscv@lists.infradead.org
+L: linux-gpio@vger.kernel.org (pinctrl driver)
+F: Documentation/devicetree/bindings/pinctrl/canaan,k210-fpioa.yaml
+F: drivers/pinctrl/pinctrl-k210.c
+
+CANAAN/KENDRYTE K210 SOC RESET CONTROLLER DRIVER
+M: Damien Le Moal <damien.lemoal@wdc.com>
+L: linux-kernel@vger.kernel.org
+L: linux-riscv@lists.infradead.org
+S: Maintained
+F: Documentation/devicetree/bindings/reset/canaan,k210-rst.yaml
+F: drivers/reset/reset-k210.c
+
+CANAAN/KENDRYTE K210 SOC SYSTEM CONTROLLER DRIVER
+M: Damien Le Moal <damien.lemoal@wdc.com>
+L: linux-riscv@lists.infradead.org
+S: Maintained
+F: Documentation/devicetree/bindings/mfd/canaan,k210-sysctl.yaml
+F: drivers/soc/canaan/
+F: include/soc/canaan/
+
CACHEFILES: FS-CACHE BACKEND FOR CACHING ON MOUNTED FILESYSTEMS
M: David Howells <dhowells@redhat.com>
L: linux-cachefs@redhat.com (moderated for non-subscribers)
@@ -3888,6 +3908,15 @@ S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/peter.chen/usb.git
F: Documentation/devicetree/bindings/usb/cdns,usb3.yaml
F: drivers/usb/cdns3/
+X: drivers/usb/cdns3/cdnsp*
+
+CADENCE USBSSP DRD IP DRIVER
+M: Pawel Laszczak <pawell@cadence.com>
+L: linux-usb@vger.kernel.org
+S: Maintained
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/peter.chen/usb.git
+F: drivers/usb/cdns3/
+X: drivers/usb/cdns3/cdns3*
CADET FM/AM RADIO RECEIVER DRIVER
M: Hans Verkuil <hverkuil@xs4all.nl>
@@ -3929,8 +3958,10 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can-next.git
F: Documentation/devicetree/bindings/net/can/
F: drivers/net/can/
+F: include/linux/can/bittiming.h
F: include/linux/can/dev.h
F: include/linux/can/led.h
+F: include/linux/can/length.h
F: include/linux/can/platform/
F: include/linux/can/rx-offload.h
F: include/uapi/linux/can/error.h
@@ -3946,6 +3977,7 @@ W: https://github.com/linux-can
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can-next.git
F: Documentation/networking/can.rst
+F: include/linux/can/can-ml.h
F: include/linux/can/core.h
F: include/linux/can/skb.h
F: include/net/netns/can.h
@@ -4081,7 +4113,6 @@ W: http://www.ibm.com/developerworks/power/cell/
F: arch/powerpc/include/asm/cell*.h
F: arch/powerpc/include/asm/spu*.h
F: arch/powerpc/include/uapi/asm/spu*.h
-F: arch/powerpc/oprofile/*cell*
F: arch/powerpc/platforms/cell/
CELLWISE CW2015 BATTERY DRIVER
@@ -4122,13 +4153,13 @@ F: scripts/extract-cert.c
F: scripts/sign-file.c
CFAG12864B LCD DRIVER
-M: Miguel Ojeda Sandonis <miguel.ojeda.sandonis@gmail.com>
+M: Miguel Ojeda <ojeda@kernel.org>
S: Maintained
F: drivers/auxdisplay/cfag12864b.c
F: include/linux/cfag12864b.h
CFAG12864BFB LCD FRAMEBUFFER DRIVER
-M: Miguel Ojeda Sandonis <miguel.ojeda.sandonis@gmail.com>
+M: Miguel Ojeda <ojeda@kernel.org>
S: Maintained
F: drivers/auxdisplay/cfag12864bfb.c
F: include/linux/cfag12864b.h
@@ -4298,12 +4329,12 @@ S: Supported
F: drivers/infiniband/hw/usnic/
CLANG-FORMAT FILE
-M: Miguel Ojeda <miguel.ojeda.sandonis@gmail.com>
+M: Miguel Ojeda <ojeda@kernel.org>
S: Maintained
F: .clang-format
CLANG/LLVM BUILD SUPPORT
-M: Nathan Chancellor <natechancellor@gmail.com>
+M: Nathan Chancellor <nathan@kernel.org>
M: Nick Desaulniers <ndesaulniers@google.com>
L: clang-built-linux@googlegroups.com
S: Supported
@@ -4313,8 +4344,6 @@ C: irc://chat.freenode.net/clangbuiltlinux
F: Documentation/kbuild/llvm.rst
F: include/linux/compiler-clang.h
F: scripts/clang-tools/
-F: scripts/clang-version.sh
-F: scripts/lld-version.sh
K: \b(?i:clang|llvm)\b
CLEANCACHE API
@@ -4440,10 +4469,21 @@ S: Maintained
F: drivers/platform/x86/compal-laptop.c
COMPILER ATTRIBUTES
-M: Miguel Ojeda <miguel.ojeda.sandonis@gmail.com>
+M: Miguel Ojeda <ojeda@kernel.org>
S: Maintained
F: include/linux/compiler_attributes.h
+COMPUTE EXPRESS LINK (CXL)
+M: Alison Schofield <alison.schofield@intel.com>
+M: Vishal Verma <vishal.l.verma@intel.com>
+M: Ira Weiny <ira.weiny@intel.com>
+M: Ben Widawsky <ben.widawsky@intel.com>
+M: Dan Williams <dan.j.williams@intel.com>
+L: linux-cxl@vger.kernel.org
+S: Maintained
+F: drivers/cxl/
+F: include/uapi/linux/cxl_mem.h
+
CONEXANT ACCESSRUNNER USB DRIVER
L: accessrunner-general@lists.sourceforge.net
S: Orphan
@@ -4467,7 +4507,7 @@ F: include/linux/console*
CONTROL GROUP (CGROUP)
M: Tejun Heo <tj@kernel.org>
-M: Li Zefan <lizefan@huawei.com>
+M: Zefan Li <lizefan.x@bytedance.com>
M: Johannes Weiner <hannes@cmpxchg.org>
L: cgroups@vger.kernel.org
S: Maintained
@@ -4491,11 +4531,9 @@ F: block/blk-throttle.c
F: include/linux/blk-cgroup.h
CONTROL GROUP - CPUSET
-M: Li Zefan <lizefan@huawei.com>
+M: Zefan Li <lizefan.x@bytedance.com>
L: cgroups@vger.kernel.org
S: Maintained
-W: http://www.bullopensource.org/cpuset/
-W: http://oss.sgi.com/projects/cpusets/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup.git
F: Documentation/admin-guide/cgroup-v1/cpusets.rst
F: include/linux/cpuset.h
@@ -4623,6 +4661,7 @@ L: linux-samsung-soc@vger.kernel.org
S: Supported
F: arch/arm/mach-exynos/pm.c
F: drivers/cpuidle/cpuidle-exynos.c
+F: include/linux/platform_data/cpuidle-exynos.h
CPUIDLE DRIVER - ARM PSCI
M: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
@@ -4938,7 +4977,7 @@ F: Documentation/networking/decnet.rst
F: net/decnet/
DECSTATION PLATFORM SUPPORT
-M: "Maciej W. Rozycki" <macro@linux-mips.org>
+M: "Maciej W. Rozycki" <macro@orcam.me.uk>
L: linux-mips@vger.kernel.org
S: Maintained
W: http://www.linux-mips.org/wiki/DECstation
@@ -4947,12 +4986,12 @@ F: arch/mips/include/asm/dec/
F: arch/mips/include/asm/mach-dec/
DEFXX FDDI NETWORK DRIVER
-M: "Maciej W. Rozycki" <macro@linux-mips.org>
+M: "Maciej W. Rozycki" <macro@orcam.me.uk>
S: Maintained
F: drivers/net/fddi/defxx.*
DEFZA FDDI NETWORK DRIVER
-M: "Maciej W. Rozycki" <macro@linux-mips.org>
+M: "Maciej W. Rozycki" <macro@orcam.me.uk>
S: Maintained
F: drivers/net/fddi/defza.*
@@ -4969,17 +5008,17 @@ M: Matthew Garrett <mjg59@srcf.ucam.org>
M: Pali Rohár <pali@kernel.org>
L: platform-driver-x86@vger.kernel.org
S: Maintained
-F: drivers/platform/x86/dell-laptop.c
+F: drivers/platform/x86/dell/dell-laptop.c
DELL LAPTOP FREEFALL DRIVER
M: Pali Rohár <pali@kernel.org>
S: Maintained
-F: drivers/platform/x86/dell-smo8800.c
+F: drivers/platform/x86/dell/dell-smo8800.c
DELL LAPTOP RBTN DRIVER
M: Pali Rohár <pali@kernel.org>
S: Maintained
-F: drivers/platform/x86/dell-rbtn.*
+F: drivers/platform/x86/dell/dell-rbtn.*
DELL LAPTOP SMM DRIVER
M: Pali Rohár <pali@kernel.org>
@@ -4991,26 +5030,26 @@ DELL REMOTE BIOS UPDATE DRIVER
M: Stuart Hayes <stuart.w.hayes@gmail.com>
L: platform-driver-x86@vger.kernel.org
S: Maintained
-F: drivers/platform/x86/dell_rbu.c
+F: drivers/platform/x86/dell/dell_rbu.c
DELL SMBIOS DRIVER
M: Pali Rohár <pali@kernel.org>
M: Mario Limonciello <mario.limonciello@dell.com>
L: platform-driver-x86@vger.kernel.org
S: Maintained
-F: drivers/platform/x86/dell-smbios.*
+F: drivers/platform/x86/dell/dell-smbios.*
DELL SMBIOS SMM DRIVER
M: Mario Limonciello <mario.limonciello@dell.com>
L: platform-driver-x86@vger.kernel.org
S: Maintained
-F: drivers/platform/x86/dell-smbios-smm.c
+F: drivers/platform/x86/dell/dell-smbios-smm.c
DELL SMBIOS WMI DRIVER
M: Mario Limonciello <mario.limonciello@dell.com>
L: platform-driver-x86@vger.kernel.org
S: Maintained
-F: drivers/platform/x86/dell-smbios-wmi.c
+F: drivers/platform/x86/dell/dell-smbios-wmi.c
F: tools/wmi/dell-smbios-example.c
DELL SYSTEMS MANAGEMENT BASE DRIVER (dcdbas)
@@ -5018,12 +5057,12 @@ M: Stuart Hayes <stuart.w.hayes@gmail.com>
L: platform-driver-x86@vger.kernel.org
S: Maintained
F: Documentation/driver-api/dcdbas.rst
-F: drivers/platform/x86/dcdbas.*
+F: drivers/platform/x86/dell/dcdbas.*
DELL WMI DESCRIPTOR DRIVER
M: Mario Limonciello <mario.limonciello@dell.com>
S: Maintained
-F: drivers/platform/x86/dell-wmi-descriptor.c
+F: drivers/platform/x86/dell/dell-wmi-descriptor.c
DELL WMI SYSMAN DRIVER
M: Divya Bharathi <divya.bharathi@dell.com>
@@ -5032,13 +5071,13 @@ M: Prasanth Ksr <prasanth.ksr@dell.com>
L: platform-driver-x86@vger.kernel.org
S: Maintained
F: Documentation/ABI/testing/sysfs-class-firmware-attributes
-F: drivers/platform/x86/dell-wmi-sysman/
+F: drivers/platform/x86/dell/dell-wmi-sysman/
DELL WMI NOTIFICATIONS DRIVER
M: Matthew Garrett <mjg59@srcf.ucam.org>
M: Pali Rohár <pali@kernel.org>
S: Maintained
-F: drivers/platform/x86/dell-wmi.c
+F: drivers/platform/x86/dell/dell-wmi.c
DELTA ST MEDIA DRIVER
M: Hugues Fruchet <hugues.fruchet@st.com>
@@ -5280,6 +5319,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine.git
F: Documentation/devicetree/bindings/dma/
F: Documentation/driver-api/dmaengine/
F: drivers/dma/
+F: include/linux/dma/
F: include/linux/dmaengine.h
F: include/linux/of_dma.h
@@ -5783,6 +5823,7 @@ F: drivers/gpu/drm/vboxvideo/
DRM DRIVER FOR VMWARE VIRTUAL GPU
M: "VMware Graphics" <linux-graphics-maintainer@vmware.com>
M: Roland Scheidegger <sroland@vmware.com>
+M: Zack Rusin <zackr@vmware.com>
L: dri-devel@lists.freedesktop.org
S: Supported
T: git git://people.freedesktop.org/~sroland/linux
@@ -5984,8 +6025,8 @@ F: Documentation/devicetree/bindings/display/st,stm32-ltdc.yaml
F: drivers/gpu/drm/stm
DRM DRIVERS FOR TI KEYSTONE
-M: Jyri Sarha <jsarha@ti.com>
-M: Tomi Valkeinen <tomi.valkeinen@ti.com>
+M: Jyri Sarha <jyri.sarha@iki.fi>
+M: Tomi Valkeinen <tomba@kernel.org>
L: dri-devel@lists.freedesktop.org
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
@@ -5995,15 +6036,15 @@ F: Documentation/devicetree/bindings/display/ti/ti,k2g-dss.yaml
F: drivers/gpu/drm/tidss/
DRM DRIVERS FOR TI LCDC
-M: Jyri Sarha <jsarha@ti.com>
-R: Tomi Valkeinen <tomi.valkeinen@ti.com>
+M: Jyri Sarha <jyri.sarha@iki.fi>
+R: Tomi Valkeinen <tomba@kernel.org>
L: dri-devel@lists.freedesktop.org
S: Maintained
F: Documentation/devicetree/bindings/display/tilcdc/
F: drivers/gpu/drm/tilcdc/
DRM DRIVERS FOR TI OMAP
-M: Tomi Valkeinen <tomi.valkeinen@ti.com>
+M: Tomi Valkeinen <tomba@kernel.org>
L: dri-devel@lists.freedesktop.org
S: Maintained
F: Documentation/devicetree/bindings/display/ti/
@@ -6056,14 +6097,6 @@ T: git git://anongit.freedesktop.org/drm/drm-misc
F: Documentation/devicetree/bindings/display/xlnx/
F: drivers/gpu/drm/xlnx/
-DRM DRIVERS FOR ZTE ZX
-M: Shawn Guo <shawnguo@kernel.org>
-L: dri-devel@lists.freedesktop.org
-S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
-F: Documentation/devicetree/bindings/display/zte,vou.txt
-F: drivers/gpu/drm/zte/
-
DRM PANEL DRIVERS
M: Thierry Reding <thierry.reding@gmail.com>
R: Sam Ravnborg <sam@ravnborg.org>
@@ -6223,7 +6256,7 @@ F: include/linux/dim.h
F: lib/dim/
DZ DECSTATION DZ11 SERIAL DRIVER
-M: "Maciej W. Rozycki" <macro@linux-mips.org>
+M: "Maciej W. Rozycki" <macro@orcam.me.uk>
S: Maintained
F: drivers/tty/serial/dz.*
@@ -6473,9 +6506,9 @@ S: Maintained
F: drivers/edac/skx_*.[ch]
EDAC-TI
-M: Tero Kristo <t-kristo@ti.com>
+M: Tero Kristo <kristo@kernel.org>
L: linux-edac@vger.kernel.org
-S: Maintained
+S: Odd Fixes
F: drivers/edac/ti_edac.c
EDIROL UA-101/UA-1000 DRIVER
@@ -6862,6 +6895,9 @@ F: include/linux/fs.h
F: include/linux/fs_types.h
F: include/uapi/linux/fs.h
F: include/uapi/linux/openat2.h
+X: fs/io-wq.c
+X: fs/io-wq.h
+X: fs/io_uring.c
FINTEK F75375S HARDWARE MONITOR AND FAN CONTROLLER DRIVER
M: Riku Voipio <riku.voipio@iki.fi>
@@ -6953,9 +6989,10 @@ M: Wu Hao <hao.wu@intel.com>
R: Tom Rix <trix@redhat.com>
L: linux-fpga@vger.kernel.org
S: Maintained
-F: Documentation/ABI/testing/sysfs-bus-dfl
+F: Documentation/ABI/testing/sysfs-bus-dfl*
F: Documentation/fpga/dfl.rst
F: drivers/fpga/dfl*
+F: include/linux/dfl.h
F: include/uapi/linux/fpga-dfl.h
FPGA MANAGER FRAMEWORK
@@ -7376,13 +7413,6 @@ M: Kieran Bingham <kbingham@kernel.org>
S: Supported
F: scripts/gdb/
-GDT SCSI DISK ARRAY CONTROLLER DRIVER
-M: Achim Leubner <achim_leubner@adaptec.com>
-L: linux-scsi@vger.kernel.org
-S: Supported
-W: http://www.icp-vortex.com/
-F: drivers/scsi/gdt*
-
GEMTEK FM RADIO RECEIVER DRIVER
M: Hans Verkuil <hverkuil@xs4all.nl>
L: linux-media@vger.kernel.org
@@ -7928,6 +7958,12 @@ F: drivers/hid/
F: include/linux/hid*
F: include/uapi/linux/hid*
+HID PLAYSTATION DRIVER
+M: Roderick Colenbrander <roderick.colenbrander@sony.com>
+L: linux-input@vger.kernel.org
+S: Supported
+F: drivers/hid/hid-playstation.c
+
HID SENSOR HUB DRIVERS
M: Jiri Kosina <jikos@kernel.org>
M: Jonathan Cameron <jic23@kernel.org>
@@ -8159,7 +8195,7 @@ F: net/hsr/
HT16K33 LED CONTROLLER DRIVER
M: Robin van der Gracht <robin@protonic.nl>
S: Maintained
-F: Documentation/devicetree/bindings/display/ht16k33.txt
+F: Documentation/devicetree/bindings/auxdisplay/holtek,ht16k33.yaml
F: drivers/auxdisplay/ht16k33.c
HTCPEN TOUCHSCREEN DRIVER
@@ -8433,11 +8469,8 @@ F: drivers/i3c/
F: include/linux/i3c/
IA64 (Itanium) PLATFORM
-M: Tony Luck <tony.luck@intel.com>
-M: Fenghua Yu <fenghua.yu@intel.com>
L: linux-ia64@vger.kernel.org
-S: Odd Fixes
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux.git
+S: Orphan
F: Documentation/ia64/
F: arch/ia64/
@@ -8880,7 +8913,6 @@ F: drivers/mfd/intel_pmc_bxt.c
F: include/linux/mfd/intel_pmc_bxt.h
INTEL C600 SERIES SAS CONTROLLER DRIVER
-M: Intel SCU Linux support <intel-linux-scu@intel.com>
M: Artur Paszkiewicz <artur.paszkiewicz@intel.com>
L: linux-scsi@vger.kernel.org
S: Supported
@@ -8938,7 +8970,6 @@ L: linux-gpio@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/andy/linux-gpio-intel.git
F: drivers/gpio/gpio-ich.c
-F: drivers/gpio/gpio-intel-mid.c
F: drivers/gpio/gpio-merrifield.c
F: drivers/gpio/gpio-ml-ioh.c
F: drivers/gpio/gpio-pch.c
@@ -9011,9 +9042,11 @@ INTEL IPU3 CSI-2 CIO2 DRIVER
M: Yong Zhi <yong.zhi@intel.com>
M: Sakari Ailus <sakari.ailus@linux.intel.com>
M: Bingbu Cao <bingbu.cao@intel.com>
+M: Dan Scally <djrscally@gmail.com>
R: Tianshu Qiu <tian.shu.qiu@intel.com>
L: linux-media@vger.kernel.org
S: Maintained
+T: git git://linuxtv.org/media_tree.git
F: Documentation/userspace-api/media/v4l/pixfmt-srggb10-ipu3.rst
F: drivers/media/pci/intel/ipu3/
@@ -9060,6 +9093,17 @@ F: drivers/crypto/keembay/keembay-ocs-aes-core.c
F: drivers/crypto/keembay/ocs-aes.c
F: drivers/crypto/keembay/ocs-aes.h
+INTEL KEEM BAY OCS HCU CRYPTO DRIVER
+M: Daniele Alessandrelli <daniele.alessandrelli@intel.com>
+M: Declan Murphy <declan.murphy@intel.com>
+S: Maintained
+F: Documentation/devicetree/bindings/crypto/intel,keembay-ocs-hcu.yaml
+F: drivers/crypto/keembay/Kconfig
+F: drivers/crypto/keembay/Makefile
+F: drivers/crypto/keembay/keembay-ocs-hcu-core.c
+F: drivers/crypto/keembay/ocs-hcu.c
+F: drivers/crypto/keembay/ocs-hcu.h
+
INTEL MANAGEMENT ENGINE (mei)
M: Tomas Winkler <tomas.winkler@intel.com>
L: linux-kernel@vger.kernel.org
@@ -9097,14 +9141,11 @@ M: Andy Shevchenko <andy@kernel.org>
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/andy/linux-gpio-intel.git
F: drivers/gpio/gpio-*cove.c
-F: drivers/gpio/gpio-msic.c
INTEL PMIC MULTIFUNCTION DEVICE DRIVERS
M: Andy Shevchenko <andy@kernel.org>
S: Maintained
-F: drivers/mfd/intel_msic.c
F: drivers/mfd/intel_soc_pmic*
-F: include/linux/mfd/intel_msic.h
F: include/linux/mfd/intel_soc_pmic*
INTEL PMT DRIVER
@@ -9226,10 +9267,11 @@ F: include/linux/tboot.h
INTEL SGX
M: Jarkko Sakkinen <jarkko@kernel.org>
+R: Dave Hansen <dave.hansen@linux.intel.com>
L: linux-sgx@vger.kernel.org
S: Supported
Q: https://patchwork.kernel.org/project/intel-sgx/list/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/jarkko/linux-sgx.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/sgx
F: Documentation/x86/sgx.rst
F: arch/x86/entry/vdso/vsgx.S
F: arch/x86/include/uapi/asm/sgx.h
@@ -9297,6 +9339,7 @@ F: include/uapi/linux/iommu.h
IO_URING
M: Jens Axboe <axboe@kernel.dk>
+R: Pavel Begunkov <asml.silence@gmail.com>
L: io-uring@vger.kernel.org
S: Maintained
T: git git://git.kernel.dk/linux-block
@@ -9304,6 +9347,7 @@ T: git git://git.kernel.dk/liburing
F: fs/io-wq.c
F: fs/io-wq.h
F: fs/io_uring.c
+F: include/linux/io_uring.h
F: include/uapi/linux/io_uring.h
IPMI SUBSYSTEM
@@ -9561,16 +9605,18 @@ F: Documentation/hwmon/k8temp.rst
F: drivers/hwmon/k8temp.c
KASAN
-M: Andrey Ryabinin <aryabinin@virtuozzo.com>
+M: Andrey Ryabinin <ryabinin.a.a@gmail.com>
R: Alexander Potapenko <glider@google.com>
+R: Andrey Konovalov <andreyknvl@gmail.com>
R: Dmitry Vyukov <dvyukov@google.com>
L: kasan-dev@googlegroups.com
S: Maintained
F: Documentation/dev-tools/kasan.rst
-F: arch/*/include/asm/kasan.h
+F: arch/*/include/asm/*kasan.h
F: arch/*/mm/kasan_init*
F: include/linux/kasan*.h
-F: lib/test_kasan.c
+F: lib/Kconfig.kasan
+F: lib/test_kasan*.c
F: mm/kasan/
F: scripts/Makefile.kasan
@@ -9585,7 +9631,7 @@ F: scripts/kconfig/
KCOV
R: Dmitry Vyukov <dvyukov@google.com>
-R: Andrey Konovalov <andreyknvl@google.com>
+R: Andrey Konovalov <andreyknvl@gmail.com>
L: kasan-dev@googlegroups.com
S: Maintained
F: Documentation/dev-tools/kcov.rst
@@ -9736,6 +9782,7 @@ M: Aleksandar Markovic <aleksandar.qemu.devel@gmail.com>
L: linux-mips@vger.kernel.org
L: kvm@vger.kernel.org
S: Maintained
+T: git git://git.kernel.org/pub/scm/virt/kvm/kvm.git
F: arch/mips/include/asm/kvm*
F: arch/mips/include/uapi/asm/kvm*
F: arch/mips/kvm/
@@ -9845,6 +9892,18 @@ F: include/linux/keyctl.h
F: include/uapi/linux/keyctl.h
F: security/keys/
+KFENCE
+M: Alexander Potapenko <glider@google.com>
+M: Marco Elver <elver@google.com>
+R: Dmitry Vyukov <dvyukov@google.com>
+L: kasan-dev@googlegroups.com
+S: Maintained
+F: Documentation/dev-tools/kfence.rst
+F: arch/*/include/asm/kfence.h
+F: include/linux/kfence.h
+F: lib/Kconfig.kfence
+F: mm/kfence/
+
KFIFO
M: Stefani Seibold <stefani@seibold.net>
S: Maintained
@@ -9905,7 +9964,7 @@ F: include/linux/kprobes.h
F: kernel/kprobes.c
KS0108 LCD CONTROLLER DRIVER
-M: Miguel Ojeda Sandonis <miguel.ojeda.sandonis@gmail.com>
+M: Miguel Ojeda <ojeda@kernel.org>
S: Maintained
F: Documentation/admin-guide/auxdisplay/ks0108.rst
F: drivers/auxdisplay/ks0108.c
@@ -10369,6 +10428,8 @@ LOCKING PRIMITIVES
M: Peter Zijlstra <peterz@infradead.org>
M: Ingo Molnar <mingo@redhat.com>
M: Will Deacon <will@kernel.org>
+R: Waiman Long <longman@redhat.com>
+R: Boqun Feng <boqun.feng@gmail.com> (LOCKDEP)
L: linux-kernel@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git locking/core
@@ -10689,6 +10750,8 @@ M: Sunil Goutham <sgoutham@marvell.com>
M: Linu Cherian <lcherian@marvell.com>
M: Geetha sowjanya <gakula@marvell.com>
M: Jerin Jacob <jerinj@marvell.com>
+M: hariprasad <hkelam@marvell.com>
+M: Subbaraya Sundeep <sbhatta@marvell.com>
L: netdev@vger.kernel.org
S: Supported
F: Documentation/networking/device_drivers/ethernet/marvell/octeontx2.rst
@@ -11178,6 +11241,15 @@ S: Maintained
F: Documentation/devicetree/bindings/i2c/i2c-mt65xx.txt
F: drivers/i2c/busses/i2c-mt65xx.c
+MEDIATEK IOMMU DRIVER
+M: Yong Wu <yong.wu@mediatek.com>
+L: iommu@lists.linux-foundation.org
+L: linux-mediatek@lists.infradead.org (moderated for non-subscribers)
+S: Supported
+F: Documentation/devicetree/bindings/iommu/mediatek*
+F: drivers/iommu/mtk_iommu*
+F: include/dt-bindings/memory/mt*-port.h
+
MEDIATEK JPEG DRIVER
M: Rick Chang <rick.chang@mediatek.com>
M: Bin Liu <bin.liu@mediatek.com>
@@ -11267,6 +11339,8 @@ L: linux-usb@vger.kernel.org
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-mediatek@lists.infradead.org (moderated for non-subscribers)
S: Maintained
+F: Documentation/devicetree/bindings/usb/mediatek,*
+F: drivers/usb/host/xhci-mtk*
F: drivers/usb/mtu3/
MEGACHIPS STDPXXXX-GE-B850V3-FW LVDS/DP++ BRIDGES
@@ -11548,7 +11622,7 @@ L: linux-amlogic@lists.infradead.org
S: Supported
T: git git://linuxtv.org/media_tree.git
F: Documentation/devicetree/bindings/media/amlogic,axg-ge2d.yaml
-F: drivers/media/meson/ge2d/
+F: drivers/media/platform/meson/ge2d/
MESON NAND CONTROLLER DRIVER FOR AMLOGIC SOCS
M: Liang Yang <liang.yang@amlogic.com>
@@ -11600,7 +11674,6 @@ F: drivers/dma/at_hdmac.c
F: drivers/dma/at_hdmac_regs.h
F: drivers/dma/at_xdmac.c
F: include/dt-bindings/dma/at91.h
-F: include/linux/platform_data/dma-atmel.h
MICROCHIP AT91 SERIAL DRIVER
M: Richard Genoud <richard.genoud@gmail.com>
@@ -11686,9 +11759,9 @@ F: drivers/video/fbdev/atmel_lcdfb.c
F: include/video/atmel_lcdc.h
MICROCHIP MCP16502 PMIC DRIVER
-M: Andrei Stefanescu <andrei.stefanescu@microchip.com>
+M: Claudiu Beznea <claudiu.beznea@microchip.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-S: Maintained
+S: Supported
F: Documentation/devicetree/bindings/regulator/mcp16502-regulator.txt
F: drivers/regulator/mcp16502.c
@@ -11803,12 +11876,31 @@ S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/pdx86/platform-drivers-x86.git
F: drivers/platform/surface/
+MICROSOFT SURFACE HOT-PLUG DRIVER
+M: Maximilian Luz <luzmaximilian@gmail.com>
+L: platform-driver-x86@vger.kernel.org
+S: Maintained
+F: drivers/platform/surface/surface_hotplug.c
+
MICROSOFT SURFACE PRO 3 BUTTON DRIVER
M: Chen Yu <yu.c.chen@intel.com>
L: platform-driver-x86@vger.kernel.org
S: Supported
F: drivers/platform/surface/surfacepro3_button.c
+MICROSOFT SURFACE SYSTEM AGGREGATOR SUBSYSTEM
+M: Maximilian Luz <luzmaximilian@gmail.com>
+S: Maintained
+W: https://github.com/linux-surface/surface-aggregator-module
+C: irc://chat.freenode.net/##linux-surface
+F: Documentation/driver-api/surface_aggregator/
+F: drivers/platform/surface/aggregator/
+F: drivers/platform/surface/surface_acpi_notify.c
+F: drivers/platform/surface/surface_aggregator_cdev.c
+F: include/linux/surface_acpi_notify.h
+F: include/linux/surface_aggregator/
+F: include/uapi/linux/surface_aggregator/
+
MICROTEK X6 SCANNER
M: Oliver Neukum <oliver@neukum.org>
S: Maintained
@@ -11831,9 +11923,11 @@ L: linux-media@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/media/i2c/mipi-ccs.yaml
F: Documentation/driver-api/media/drivers/ccs/
+F: Documentation/userspace-api/media/drivers/ccs.rst
F: drivers/media/i2c/ccs-pll.c
F: drivers/media/i2c/ccs-pll.h
F: drivers/media/i2c/ccs/
+F: include/uapi/linux/ccs.h
F: include/uapi/linux/smiapp.h
MIPS
@@ -11899,8 +11993,7 @@ L: linux-mips@vger.kernel.org
S: Maintained
F: arch/mips/include/asm/mach-loongson2ef/
F: arch/mips/loongson2ef/
-F: drivers/*/*/*loongson2*
-F: drivers/*/*loongson2*
+F: drivers/cpufreq/loongson2_cpufreq.c
MIPS/LOONGSON64 ARCHITECTURE
M: Huacai Chen <chenhuacai@kernel.org>
@@ -11909,8 +12002,6 @@ L: linux-mips@vger.kernel.org
S: Maintained
F: arch/mips/include/asm/mach-loongson64/
F: arch/mips/loongson64/
-F: drivers/*/*/*loongson3*
-F: drivers/*/*loongson3*
F: drivers/irqchip/irq-loongson*
F: drivers/platform/mips/cpu_hwmon.c
@@ -12415,6 +12506,7 @@ F: tools/testing/selftests/net/ipsec.c
NETWORKING [IPv4/IPv6]
M: "David S. Miller" <davem@davemloft.net>
M: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
+M: David Ahern <dsahern@kernel.org>
L: netdev@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
@@ -12518,6 +12610,14 @@ F: include/net/nfc/
F: include/uapi/linux/nfc.h
F: net/nfc/
+NFC VIRTUAL NCI DEVICE DRIVER
+M: Bongsu Jeon <bongsu.jeon@samsung.com>
+L: netdev@vger.kernel.org
+L: linux-nfc@lists.01.org (moderated for non-subscribers)
+S: Supported
+F: drivers/nfc/virtual_ncidev.c
+F: tools/testing/selftests/nci/
+
NFS, SUNRPC, AND LOCKD CLIENTS
M: Trond Myklebust <trond.myklebust@hammerspace.com>
M: Anna Schumaker <anna.schumaker@netapp.com>
@@ -12745,6 +12845,13 @@ F: drivers/iio/gyro/fxas21002c_core.c
F: drivers/iio/gyro/fxas21002c_i2c.c
F: drivers/iio/gyro/fxas21002c_spi.c
+NXP i.MX CLOCK DRIVERS
+M: Abel Vesa <abel.vesa@nxp.com>
+L: linux-clk@vger.kernel.org
+L: linux-imx@nxp.com
+S: Maintained
+F: drivers/clk/imx/
+
NXP i.MX 8MQ DCSS DRIVER
M: Laurentiu Palcu <laurentiu.palcu@oss.nxp.com>
R: Lucas Stach <l.stach@pengutronix.de>
@@ -12829,6 +12936,7 @@ F: drivers/net/dsa/ocelot/*
F: drivers/net/ethernet/mscc/
F: include/soc/mscc/ocelot*
F: net/dsa/tag_ocelot.c
+F: net/dsa/tag_ocelot_8021q.c
F: tools/testing/selftests/drivers/net/ocelot/*
OCXL (Open Coherent Accelerator Processor Interface OpenCAPI) DRIVER
@@ -12888,7 +12996,7 @@ S: Orphan
F: drivers/video/fbdev/omap/
OMAP GENERAL PURPOSE MEMORY CONTROLLER SUPPORT
-M: Roger Quadros <rogerq@ti.com>
+M: Roger Quadros <rogerq@kernel.org>
M: Tony Lindgren <tony@atomide.com>
L: linux-omap@vger.kernel.org
S: Maintained
@@ -13108,7 +13216,7 @@ M: Jacopo Mondi <jacopo@jmondi.org>
L: linux-media@vger.kernel.org
S: Maintained
T: git git://linuxtv.org/media_tree.git
-F: Documentation/devicetree/bindings/media/i2c/ov5647.yaml
+F: Documentation/devicetree/bindings/media/i2c/ovti,ov5647.yaml
F: drivers/media/i2c/ov5647.c
OMNIVISION OV5670 SENSOR DRIVER
@@ -13307,15 +13415,6 @@ S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git
F: sound/drivers/opl4/
-OPROFILE
-M: Robert Richter <rric@kernel.org>
-L: oprofile-list@lists.sf.net
-S: Maintained
-F: arch/*/include/asm/oprofile*.h
-F: arch/*/oprofile/
-F: drivers/oprofile/
-F: include/linux/oprofile.h
-
ORACLE CLUSTER FILESYSTEM 2 (OCFS2)
M: Mark Fasheh <mark@fasheh.com>
M: Joel Becker <jlbec@evilplan.org>
@@ -13826,6 +13925,13 @@ S: Supported
F: Documentation/devicetree/bindings/pci/mediatek*
F: drivers/pci/controller/*mediatek*
+PCIE DRIVER FOR MICROCHIP
+M: Daire McNamara <daire.mcnamara@microchip.com>
+L: linux-pci@vger.kernel.org
+S: Supported
+F: Documentation/devicetree/bindings/pci/microchip*
+F: drivers/pci/controller/*microchip*
+
PCIE DRIVER FOR QUALCOMM MSM
M: Stanimir Varbanov <svarbanov@mm-sol.com>
L: linux-pci@vger.kernel.org
@@ -13978,15 +14084,6 @@ L: linux-input@vger.kernel.org
S: Maintained
F: drivers/hid/hid-picolcd*
-PICOXCELL SUPPORT
-M: Jamie Iles <jamie@jamieiles.com>
-L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-S: Supported
-T: git git://github.com/jamieiles/linux-2.6-ji.git
-F: arch/arm/boot/dts/picoxcell*
-F: arch/arm/mach-picoxcell/
-F: drivers/crypto/picoxcell*
-
PIDFD API
M: Christian Brauner <christian@brauner.io>
L: linux-kernel@vger.kernel.org
@@ -14093,7 +14190,6 @@ L: linux-mips@vger.kernel.org
S: Odd Fixes
F: arch/mips/boot/dts/img/pistachio*
F: arch/mips/configs/pistachio*_defconfig
-F: arch/mips/include/asm/mach-pistachio/
F: arch/mips/pistachio/
PKTCDVD DRIVER
@@ -14616,6 +14712,12 @@ L: netdev@vger.kernel.org
S: Supported
F: drivers/staging/qlge/
+QLOGIC QLGE 10Gb ETHERNET DRIVER
+M: Coiby Xu <coiby.xu@gmail.com>
+L: netdev@vger.kernel.org
+S: Maintained
+F: Documentation/networking/device_drivers/qlogic/qlge.rst
+
QM1D1B0004 MEDIA DRIVER
M: Akihiro Tsukada <tskd08@gmail.com>
L: linux-media@vger.kernel.org
@@ -14641,9 +14743,11 @@ M: Stuart Yoder <stuyoder@gmail.com>
M: Laurentiu Tudor <laurentiu.tudor@nxp.com>
L: linux-kernel@vger.kernel.org
S: Maintained
+F: Documentation/ABI/stable/sysfs-bus-fsl-mc
F: Documentation/devicetree/bindings/misc/fsl,qoriq-mc.txt
F: Documentation/networking/device_drivers/ethernet/freescale/dpaa2/overview.rst
F: drivers/bus/fsl-mc/
+F: include/uapi/linux/fsl_mc.h
QT1010 MEDIA DRIVER
M: Antti Palosaari <crope@iki.fi>
@@ -14967,6 +15071,18 @@ F: drivers/media/i2c/max9271.c
F: drivers/media/i2c/max9271.h
F: drivers/media/i2c/rdacm20.c
+RDACM21 Camera Sensor
+M: Jacopo Mondi <jacopo+renesas@jmondi.org>
+M: Kieran Bingham <kieran.bingham+renesas@ideasonboard.com>
+M: Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
+M: Niklas Söderlund <niklas.soderlund+renesas@ragnatech.se>
+L: linux-media@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/media/i2c/rdacm2x-gmsl.yaml
+F: drivers/media/i2c/max9271.c
+F: drivers/media/i2c/max9271.h
+F: drivers/media/i2c/rdacm21.c
+
RDC R-321X SoC
M: Florian Fainelli <florian@openwrt.org>
S: Maintained
@@ -15691,7 +15807,6 @@ F: drivers/media/i2c/s5k5baf.c
SAMSUNG S5P Security SubSystem (SSS) DRIVER
M: Krzysztof Kozlowski <krzk@kernel.org>
M: Vladimir Zapolskiy <vz@mleia.com>
-M: Kamil Konieczny <k.konieczny@samsung.com>
L: linux-crypto@vger.kernel.org
L: linux-samsung-soc@vger.kernel.org
S: Maintained
@@ -16239,12 +16354,13 @@ S: Maintained
F: Documentation/fb/sm712fb.rst
F: drivers/video/fbdev/sm712*
-SIMPLE FIRMWARE INTERFACE (SFI)
-S: Obsolete
-W: http://simplefirmware.org/
-F: arch/x86/platform/sfi/
-F: drivers/sfi/
-F: include/linux/sfi*.h
+SILVACO I3C DUAL-ROLE MASTER
+M: Miquel Raynal <miquel.raynal@bootlin.com>
+M: Conor Culhane <conor.culhane@silvaco.com>
+L: linux-i3c@lists.infradead.org
+S: Maintained
+F: Documentation/devicetree/bindings/i3c/silvaco,i3c-master.yaml
+F: drivers/i3c/master/svc-i3c-master.c
SIMPLEFB FB DRIVER
M: Hans de Goede <hdegoede@redhat.com>
@@ -16543,6 +16659,7 @@ M: Sakari Ailus <sakari.ailus@linux.intel.com>
L: linux-media@vger.kernel.org
S: Maintained
T: git git://linuxtv.org/media_tree.git
+F: Documentation/devicetree/bindings/media/i2c/imx258.yaml
F: drivers/media/i2c/imx258.c
SONY IMX274 SENSOR DRIVER
@@ -16568,6 +16685,15 @@ S: Maintained
T: git git://linuxtv.org/media_tree.git
F: drivers/media/i2c/imx319.c
+SONY IMX334 SENSOR DRIVER
+M: Paul J. Murphy <paul.j.murphy@intel.com>
+M: Daniele Alessandrelli <daniele.alessandrelli@intel.com>
+L: linux-media@vger.kernel.org
+S: Maintained
+T: git git://linuxtv.org/media_tree.git
+F: Documentation/devicetree/bindings/media/i2c/sony,imx334.yaml
+F: drivers/media/i2c/imx334.c
+
SONY IMX355 SENSOR DRIVER
M: Tianshu Qiu <tian.shu.qiu@intel.com>
L: linux-media@vger.kernel.org
@@ -16657,6 +16783,7 @@ R: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
R: Sanyog Kale <sanyog.r.kale@intel.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
S: Supported
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/soundwire.git
F: Documentation/driver-api/soundwire/
F: drivers/soundwire/
F: include/linux/soundwire/
@@ -16938,12 +17065,6 @@ F: include/linux/static_call*.h
F: kernel/jump_label.c
F: kernel/static_call.c
-STEC S1220 SKD DRIVER
-M: Damien Le Moal <Damien.LeMoal@wdc.com>
-L: linux-block@vger.kernel.org
-S: Maintained
-F: drivers/block/skd*[ch]
-
STI AUDIO (ASoC) DRIVERS
M: Arnaud Pouliquen <arnaud.pouliquen@st.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
@@ -17213,6 +17334,7 @@ F: drivers/mfd/syscon.c
SYSTEM CONTROL & POWER/MANAGEMENT INTERFACE (SCPI/SCMI) Message Protocol drivers
M: Sudeep Holla <sudeep.holla@arm.com>
+R: Cristian Marussi <cristian.marussi@arm.com>
L: linux-arm-kernel@lists.infradead.org
S: Maintained
F: Documentation/devicetree/bindings/arm/arm,sc[mp]i.txt
@@ -17220,6 +17342,7 @@ F: drivers/clk/clk-sc[mp]i.c
F: drivers/cpufreq/sc[mp]i-cpufreq.c
F: drivers/firmware/arm_scmi/
F: drivers/firmware/arm_scpi.c
+F: drivers/regulator/scmi-regulator.c
F: drivers/reset/reset-scmi.c
F: include/linux/sc[mp]i_protocol.h
F: include/trace/events/scmi.h
@@ -17503,6 +17626,14 @@ M: Laxman Dewangan <ldewangan@nvidia.com>
S: Supported
F: drivers/spi/spi-tegra*
+TEGRA QUAD SPI DRIVER
+M: Thierry Reding <thierry.reding@gmail.com>
+M: Jonathan Hunter <jonathanh@nvidia.com>
+M: Sowjanya Komatineni <skomatineni@nvidia.com>
+L: linux-tegra@vger.kernel.org
+S: Maintained
+F: drivers/spi/spi-tegra210-quad.c
+
TEGRA VIDEO DRIVER
M: Thierry Reding <thierry.reding@gmail.com>
M: Jonathan Hunter <jonathanh@nvidia.com>
@@ -17573,7 +17704,7 @@ F: include/linux/dma/k3-psil.h
TEXAS INSTRUMENTS' SYSTEM CONTROL INTERFACE (TISCI) PROTOCOL DRIVER
M: Nishanth Menon <nm@ti.com>
-M: Tero Kristo <t-kristo@ti.com>
+M: Tero Kristo <kristo@kernel.org>
M: Santosh Shilimkar <ssantosh@kernel.org>
L: linux-arm-kernel@lists.infradead.org
S: Maintained
@@ -17595,6 +17726,15 @@ F: include/dt-bindings/soc/ti,sci_pm_domain.h
F: include/linux/soc/ti/ti_sci_inta_msi.h
F: include/linux/soc/ti/ti_sci_protocol.h
+TEXAS INSTRUMENTS TPS23861 PoE PSE DRIVER
+M: Robert Marko <robert.marko@sartura.hr>
+M: Luka Perkov <luka.perkov@sartura.hr>
+L: linux-hwmon@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/hwmon/ti,tps23861.yaml
+F: Documentation/hwmon/tps23861.rst
+F: drivers/hwmon/tps23861.c
+
THANKO'S RAREMONO AM/FM/SW RADIO RECEIVER USB DRIVER
M: Hans Verkuil <hverkuil@xs4all.nl>
L: linux-media@vger.kernel.org
@@ -17648,7 +17788,7 @@ F: drivers/thermal/gov_power_allocator.c
F: include/trace/events/thermal_power_allocator.h
THINKPAD ACPI EXTRAS DRIVER
-M: Henrique de Moraes Holschuh <ibm-acpi@hmh.eng.br>
+M: Henrique de Moraes Holschuh <hmh@hmh.eng.br>
L: ibm-acpi-devel@lists.sourceforge.net
L: platform-driver-x86@vger.kernel.org
S: Maintained
@@ -17717,9 +17857,9 @@ S: Maintained
F: drivers/clk/clk-cdce706.c
TI CLOCK DRIVER
-M: Tero Kristo <t-kristo@ti.com>
+M: Tero Kristo <kristo@kernel.org>
L: linux-omap@vger.kernel.org
-S: Maintained
+S: Odd Fixes
F: drivers/clk/ti/
F: include/linux/clk/ti.h
@@ -17845,7 +17985,7 @@ M: Dan Murphy <dmurphy@ti.com>
L: linux-can@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/net/can/tcan4x5x.txt
-F: drivers/net/can/m_can/tcan4x5x.c
+F: drivers/net/can/m_can/tcan4x5x*
TI TRF7970A NFC DRIVER
M: Mark Greer <mgreer@animalcreek.com>
@@ -18109,7 +18249,7 @@ F: Documentation/networking/tuntap.rst
F: arch/um/os-Linux/drivers/
TURBOCHANNEL SUBSYSTEM
-M: "Maciej W. Rozycki" <macro@linux-mips.org>
+M: "Maciej W. Rozycki" <macro@orcam.me.uk>
M: Ralf Baechle <ralf@linux-mips.org>
L: linux-mips@vger.kernel.org
S: Maintained
@@ -19742,7 +19882,7 @@ F: Documentation/admin-guide/blockdev/zram.rst
F: drivers/block/zram/
ZS DECSTATION Z85C30 SERIAL DRIVER
-M: "Maciej W. Rozycki" <macro@linux-mips.org>
+M: "Maciej W. Rozycki" <macro@orcam.me.uk>
S: Maintained
F: drivers/tty/serial/zs.*
diff --git a/Makefile b/Makefile
index e0af7a4a5598..31dcdb3d61fa 100644
--- a/Makefile
+++ b/Makefile
@@ -1,9 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
-PATCHLEVEL = 11
+PATCHLEVEL = 12
SUBLEVEL = 0
-EXTRAVERSION = -rc5
-NAME = Kleptomaniac Octopus
+EXTRAVERSION = -rc2
+NAME = Frozen Wasteland
# *DOCUMENTATION*
# To see a list of typical targets execute "make help"
@@ -96,10 +96,41 @@ endif
ifneq ($(findstring s,$(filter-out --%,$(MAKEFLAGS))),)
quiet=silent_
+ KBUILD_VERBOSE = 0
endif
export quiet Q KBUILD_VERBOSE
+# Call a source code checker (by default, "sparse") as part of the
+# C compilation.
+#
+# Use 'make C=1' to enable checking of only re-compiled files.
+# Use 'make C=2' to enable checking of *all* source files, regardless
+# of whether they are re-compiled or not.
+#
+# See the file "Documentation/dev-tools/sparse.rst" for more details,
+# including where to get the "sparse" utility.
+
+ifeq ("$(origin C)", "command line")
+ KBUILD_CHECKSRC = $(C)
+endif
+ifndef KBUILD_CHECKSRC
+ KBUILD_CHECKSRC = 0
+endif
+
+export KBUILD_CHECKSRC
+
+# Use make M=dir or set the environment variable KBUILD_EXTMOD to specify the
+# directory of external module to build. Setting M= takes precedence.
+ifeq ("$(origin M)", "command line")
+ KBUILD_EXTMOD := $(M)
+endif
+
+$(if $(word 2, $(KBUILD_EXTMOD)), \
+ $(error building multiple external modules is not supported))
+
+export KBUILD_EXTMOD
+
# Kbuild will save output files in the current working directory.
# This does not need to match to the root of the kernel source tree.
#
@@ -145,7 +176,8 @@ else
need-sub-make := 1
endif
-abs_srctree := $(realpath $(dir $(lastword $(MAKEFILE_LIST))))
+this-makefile := $(lastword $(MAKEFILE_LIST))
+abs_srctree := $(realpath $(dir $(this-makefile)))
ifneq ($(words $(subst :, ,$(abs_srctree))), 1)
$(error source directory cannot contain spaces or colons)
@@ -160,8 +192,6 @@ MAKEFLAGS += --include-dir=$(abs_srctree)
need-sub-make := 1
endif
-this-makefile := $(lastword $(MAKEFILE_LIST))
-
ifneq ($(filter 3.%,$(MAKE_VERSION)),)
# 'MAKEFLAGS += -rR' does not immediately become effective for GNU Make 3.x
# We need to invoke sub-make to avoid implicit rules in the top Makefile.
@@ -195,36 +225,6 @@ ifeq ($(need-sub-make),)
# so that IDEs/editors are able to understand relative filenames.
MAKEFLAGS += --no-print-directory
-# Call a source code checker (by default, "sparse") as part of the
-# C compilation.
-#
-# Use 'make C=1' to enable checking of only re-compiled files.
-# Use 'make C=2' to enable checking of *all* source files, regardless
-# of whether they are re-compiled or not.
-#
-# See the file "Documentation/dev-tools/sparse.rst" for more details,
-# including where to get the "sparse" utility.
-
-ifeq ("$(origin C)", "command line")
- KBUILD_CHECKSRC = $(C)
-endif
-ifndef KBUILD_CHECKSRC
- KBUILD_CHECKSRC = 0
-endif
-
-# Use make M=dir or set the environment variable KBUILD_EXTMOD to specify the
-# directory of external module to build. Setting M= takes precedence.
-ifeq ("$(origin M)", "command line")
- KBUILD_EXTMOD := $(M)
-endif
-
-$(if $(word 2, $(KBUILD_EXTMOD)), \
- $(error building multiple external modules is not supported))
-
-export KBUILD_CHECKSRC KBUILD_EXTMOD
-
-extmod-prefix = $(if $(KBUILD_EXTMOD),$(KBUILD_EXTMOD)/)
-
ifeq ($(abs_srctree),$(abs_objtree))
# building in the source tree
srctree := .
@@ -257,7 +257,6 @@ export building_out_of_srctree srctree objtree VPATH
# of make so .config is not included in this case either (for *config).
version_h := include/generated/uapi/linux/version.h
-old_version_h := include/linux/version.h
clean-targets := %clean mrproper cleandocs
no-dot-config-targets := $(clean-targets) \
@@ -452,7 +451,6 @@ AWK = awk
INSTALLKERNEL := installkernel
DEPMOD = depmod
PERL = perl
-PYTHON = python
PYTHON3 = python3
CHECK = sparse
BASH = bash
@@ -508,7 +506,7 @@ CLANG_FLAGS :=
export ARCH SRCARCH CONFIG_SHELL BASH HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE LD CC
export CPP AR NM STRIP OBJCOPY OBJDUMP READELF PAHOLE RESOLVE_BTFIDS LEX YACC AWK INSTALLKERNEL
-export PERL PYTHON PYTHON3 CHECK CHECKFLAGS MAKE UTS_MACHINE HOSTCXX
+export PERL PYTHON3 CHECK CHECKFLAGS MAKE UTS_MACHINE HOSTCXX
export KGZIP KBZIP2 KLZOP LZMA LZ4 XZ ZSTD
export KBUILD_HOSTCXXFLAGS KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS LDFLAGS_MODULE
@@ -559,7 +557,13 @@ ifdef building_out_of_srctree
{ echo "# this is build directory, ignore it"; echo "*"; } > .gitignore
endif
-ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep clang),)
+# The expansion should be delayed until arch/$(SRCARCH)/Makefile is included.
+# Some architectures define CROSS_COMPILE in arch/$(SRCARCH)/Makefile.
+# CC_VERSION_TEXT is referenced from Kconfig (so it needs export),
+# and from include/config/auto.conf.cmd to detect the compiler upgrade.
+CC_VERSION_TEXT = $(shell $(CC) --version 2>/dev/null | head -n 1 | sed 's/\#//g')
+
+ifneq ($(findstring clang,$(CC_VERSION_TEXT)),)
ifneq ($(CROSS_COMPILE),)
CLANG_FLAGS += --target=$(notdir $(CROSS_COMPILE:%-=%))
GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE)elfedit))
@@ -578,12 +582,6 @@ KBUILD_AFLAGS += $(CLANG_FLAGS)
export CLANG_FLAGS
endif
-# The expansion should be delayed until arch/$(SRCARCH)/Makefile is included.
-# Some architectures define CROSS_COMPILE in arch/$(SRCARCH)/Makefile.
-# CC_VERSION_TEXT is referenced from Kconfig (so it needs export),
-# and from include/config/auto.conf.cmd to detect the compiler upgrade.
-CC_VERSION_TEXT = $(shell $(CC) --version 2>/dev/null | head -n 1)
-
ifdef config-build
# ===========================================================================
# *config targets only - make sure prerequisites are updated, and descend
@@ -649,7 +647,8 @@ ifeq ($(KBUILD_EXTMOD),)
core-y := init/ usr/
drivers-y := drivers/ sound/
drivers-$(CONFIG_SAMPLES) += samples/
-drivers-y += net/ virt/
+drivers-$(CONFIG_NET) += net/
+drivers-y += virt/
libs-y := lib/
endif # KBUILD_EXTMOD
@@ -812,10 +811,12 @@ KBUILD_CFLAGS += -ftrivial-auto-var-init=zero
KBUILD_CFLAGS += -enable-trivial-auto-var-init-zero-knowing-it-will-be-removed-from-clang
endif
+DEBUG_CFLAGS :=
+
# Workaround for GCC versions < 5.0
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=61801
ifdef CONFIG_CC_IS_GCC
-DEBUG_CFLAGS := $(call cc-ifversion, -lt, 0500, $(call cc-option, -fno-var-tracking-assignments))
+DEBUG_CFLAGS += $(call cc-ifversion, -lt, 0500, $(call cc-option, -fno-var-tracking-assignments))
endif
ifdef CONFIG_DEBUG_INFO
@@ -830,8 +831,10 @@ ifneq ($(LLVM_IAS),1)
KBUILD_AFLAGS += -Wa,-gdwarf-2
endif
-ifdef CONFIG_DEBUG_INFO_DWARF4
-DEBUG_CFLAGS += -gdwarf-4
+ifndef CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT
+dwarf-version-$(CONFIG_DEBUG_INFO_DWARF4) := 4
+dwarf-version-$(CONFIG_DEBUG_INFO_DWARF5) := 5
+DEBUG_CFLAGS += -gdwarf-$(dwarf-version-y)
endif
ifdef CONFIG_DEBUG_INFO_REDUCED
@@ -851,12 +854,8 @@ KBUILD_CFLAGS += $(DEBUG_CFLAGS)
export DEBUG_CFLAGS
ifdef CONFIG_FUNCTION_TRACER
-ifdef CONFIG_FTRACE_MCOUNT_RECORD
- # gcc 5 supports generating the mcount tables directly
- ifeq ($(call cc-option-yn,-mrecord-mcount),y)
- CC_FLAGS_FTRACE += -mrecord-mcount
- export CC_USING_RECORD_MCOUNT := 1
- endif
+ifdef CONFIG_FTRACE_MCOUNT_USE_CC
+ CC_FLAGS_FTRACE += -mrecord-mcount
ifdef CONFIG_HAVE_NOP_MCOUNT
ifeq ($(call cc-option-yn, -mnop-mcount),y)
CC_FLAGS_FTRACE += -mnop-mcount
@@ -864,6 +863,15 @@ ifdef CONFIG_FTRACE_MCOUNT_RECORD
endif
endif
endif
+ifdef CONFIG_FTRACE_MCOUNT_USE_OBJTOOL
+ CC_FLAGS_USING += -DCC_USING_NOP_MCOUNT
+endif
+ifdef CONFIG_FTRACE_MCOUNT_USE_RECORDMCOUNT
+ ifdef CONFIG_HAVE_C_RECORDMCOUNT
+ BUILD_C_RECORDMCOUNT := y
+ export BUILD_C_RECORDMCOUNT
+ endif
+endif
ifdef CONFIG_HAVE_FENTRY
ifeq ($(call cc-option-yn, -mfentry),y)
CC_FLAGS_FTRACE += -mfentry
@@ -873,12 +881,6 @@ endif
export CC_FLAGS_FTRACE
KBUILD_CFLAGS += $(CC_FLAGS_FTRACE) $(CC_FLAGS_USING)
KBUILD_AFLAGS += $(CC_FLAGS_USING)
-ifdef CONFIG_DYNAMIC_FTRACE
- ifdef CONFIG_HAVE_C_RECORDMCOUNT
- BUILD_C_RECORDMCOUNT := y
- export BUILD_C_RECORDMCOUNT
- endif
-endif
endif
# We trigger additional mismatches with less inlining
@@ -897,6 +899,25 @@ KBUILD_CFLAGS += $(CC_FLAGS_SCS)
export CC_FLAGS_SCS
endif
+ifdef CONFIG_LTO_CLANG
+ifdef CONFIG_LTO_CLANG_THIN
+CC_FLAGS_LTO := -flto=thin -fsplit-lto-unit
+KBUILD_LDFLAGS += --thinlto-cache-dir=$(extmod-prefix).thinlto-cache
+else
+CC_FLAGS_LTO := -flto
+endif
+CC_FLAGS_LTO += -fvisibility=hidden
+
+# Limit inlining across translation units to reduce binary size
+KBUILD_LDFLAGS += -mllvm -import-instr-limit=5
+endif
+
+ifdef CONFIG_LTO
+KBUILD_CFLAGS += -fno-lto $(CC_FLAGS_LTO)
+KBUILD_AFLAGS += -fno-lto
+export CC_FLAGS_LTO
+endif
+
ifdef CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_32B
KBUILD_CFLAGS += -falign-functions=32
endif
@@ -948,12 +969,6 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=designated-init)
# change __FILE__ to the relative path from the srctree
KBUILD_CPPFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=)
-# ensure -fcf-protection is disabled when using retpoline as it is
-# incompatible with -mindirect-branch=thunk-extern
-ifdef CONFIG_RETPOLINE
-KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none)
-endif
-
# include additional Makefiles when needed
include-y := scripts/Makefile.extrawarn
include-$(CONFIG_KASAN) += scripts/Makefile.kasan
@@ -1058,7 +1073,7 @@ ifdef CONFIG_MODULE_COMPRESS
mod_compress_cmd = $(KGZIP) -n -f
endif # CONFIG_MODULE_COMPRESS_GZIP
ifdef CONFIG_MODULE_COMPRESS_XZ
- mod_compress_cmd = $(XZ) -f
+ mod_compress_cmd = $(XZ) --lzma2=dict=2MiB -f
endif # CONFIG_MODULE_COMPRESS_XZ
endif # CONFIG_MODULE_COMPRESS
export mod_compress_cmd
@@ -1086,6 +1101,17 @@ ifdef CONFIG_STACK_VALIDATION
endif
endif
+PHONY += resolve_btfids_clean
+
+resolve_btfids_O = $(abspath $(objtree))/tools/bpf/resolve_btfids
+
+# tools/bpf/resolve_btfids directory might not exist
+# in output directory, skip its clean in that case
+resolve_btfids_clean:
+ifneq ($(wildcard $(resolve_btfids_O)),)
+ $(Q)$(MAKE) -sC $(srctree)/tools/bpf/resolve_btfids O=$(resolve_btfids_O) clean
+endif
+
ifdef CONFIG_BPF
ifdef CONFIG_DEBUG_INFO_BTF
ifeq ($(has_libelf),1)
@@ -1098,6 +1124,7 @@ endif # CONFIG_BPF
PHONY += prepare0
+extmod-prefix = $(if $(KBUILD_EXTMOD),$(KBUILD_EXTMOD)/)
export MODORDER := $(extmod-prefix)modules.order
export MODULES_NSDEPS := $(extmod-prefix)modules.nsdeps
@@ -1222,6 +1249,10 @@ uapi-asm-generic:
PHONY += prepare-objtool prepare-resolve_btfids
prepare-objtool: $(objtool_target)
ifeq ($(SKIP_STACK_VALIDATION),1)
+ifdef CONFIG_FTRACE_MCOUNT_USE_OBJTOOL
+ @echo "error: Cannot generate __mcount_loc for CONFIG_DYNAMIC_FTRACE=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel" >&2
+ @false
+endif
ifdef CONFIG_UNWINDER_ORC
@echo "error: Cannot generate ORC metadata for CONFIG_UNWINDER_ORC=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel" >&2
@false
@@ -1251,14 +1282,24 @@ define filechk_utsrelease.h
endef
define filechk_version.h
- echo \#define LINUX_VERSION_CODE $(shell \
- expr $(VERSION) \* 65536 + 0$(PATCHLEVEL) \* 256 + 0$(SUBLEVEL)); \
- echo '#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))'
+ if [ $(SUBLEVEL) -gt 255 ]; then \
+ echo \#define LINUX_VERSION_CODE $(shell \
+ expr $(VERSION) \* 65536 + $(PATCHLEVEL) \* 256 + 255); \
+ else \
+ echo \#define LINUX_VERSION_CODE $(shell \
+ expr $(VERSION) \* 65536 + $(PATCHLEVEL) \* 256 + $(SUBLEVEL)); \
+ fi; \
+ echo '#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + \
+ ((c) > 255 ? 255 : (c)))'; \
+ echo \#define LINUX_VERSION_MAJOR $(VERSION); \
+ echo \#define LINUX_VERSION_PATCHLEVEL $(PATCHLEVEL); \
+ echo \#define LINUX_VERSION_SUBLEVEL $(SUBLEVEL)
endef
+$(version_h): PATCHLEVEL := $(if $(PATCHLEVEL), $(PATCHLEVEL), 0)
+$(version_h): SUBLEVEL := $(if $(SUBLEVEL), $(SUBLEVEL), 0)
$(version_h): FORCE
$(call filechk,version.h)
- $(Q)rm -f $(old_version_h)
include/generated/utsrelease.h: include/config/kernel.release FORCE
$(call filechk,utsrelease.h)
@@ -1337,6 +1378,9 @@ ifneq ($(dtstree),)
%.dtb: include/config/kernel.release scripts_dtc
$(Q)$(MAKE) $(build)=$(dtstree) $(dtstree)/$@
+%.dtbo: include/config/kernel.release scripts_dtc
+ $(Q)$(MAKE) $(build)=$(dtstree) $(dtstree)/$@
+
PHONY += dtbs dtbs_install dtbs_check
dtbs: include/config/kernel.release scripts_dtc
$(Q)$(MAKE) $(build)=$(dtstree)
@@ -1469,7 +1513,7 @@ endif # CONFIG_MODULES
# Directories & files removed with 'make clean'
CLEAN_FILES += include/ksym vmlinux.symvers \
modules.builtin modules.builtin.modinfo modules.nsdeps \
- compile_commands.json
+ compile_commands.json .thinlto-cache
# Directories & files removed with 'make mrproper'
MRPROPER_FILES += include/config include/generated \
@@ -1495,7 +1539,7 @@ vmlinuxclean:
$(Q)$(CONFIG_SHELL) $(srctree)/scripts/link-vmlinux.sh clean
$(Q)$(if $(ARCH_POSTLINK), $(MAKE) -f $(ARCH_POSTLINK) clean)
-clean: archclean vmlinuxclean
+clean: archclean vmlinuxclean resolve_btfids_clean
# mrproper - Delete all generated files, including .config
#
@@ -1729,7 +1773,7 @@ PHONY += compile_commands.json
clean-dirs := $(KBUILD_EXTMOD)
clean: rm-files := $(KBUILD_EXTMOD)/Module.symvers $(KBUILD_EXTMOD)/modules.nsdeps \
- $(KBUILD_EXTMOD)/compile_commands.json
+ $(KBUILD_EXTMOD)/compile_commands.json $(KBUILD_EXTMOD)/.thinlto-cache
PHONY += help
help:
@@ -1816,7 +1860,7 @@ clean: $(clean-dirs)
@find $(if $(KBUILD_EXTMOD), $(KBUILD_EXTMOD), .) $(RCS_FIND_IGNORE) \
\( -name '*.[aios]' -o -name '*.ko' -o -name '.*.cmd' \
-o -name '*.ko.*' \
- -o -name '*.dtb' -o -name '*.dtb.S' -o -name '*.dt.yaml' \
+ -o -name '*.dtb' -o -name '*.dtbo' -o -name '*.dtb.S' -o -name '*.dt.yaml' \
-o -name '*.dwo' -o -name '*.lst' \
-o -name '*.su' -o -name '*.mod' \
-o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \
@@ -1826,7 +1870,8 @@ clean: $(clean-dirs)
-o -name '.tmp_*.o.*' \
-o -name '*.c.[012]*.*' \
-o -name '*.ll' \
- -o -name '*.gcno' \) -type f -print | xargs rm -f
+ -o -name '*.gcno' \
+ -o -name '*.*.symversions' \) -type f -print | xargs rm -f
# Generate tags for editors
# ---------------------------------------------------------------------------
diff --git a/arch/Kconfig b/arch/Kconfig
index 24862d15f3a3..2bb30673d8e6 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -33,38 +33,6 @@ config HOTPLUG_SMT
config GENERIC_ENTRY
bool
-config OPROFILE
- tristate "OProfile system profiling"
- depends on PROFILING
- depends on HAVE_OPROFILE
- select RING_BUFFER
- select RING_BUFFER_ALLOW_SWAP
- help
- OProfile is a profiling system capable of profiling the
- whole system, include the kernel, kernel modules, libraries,
- and applications.
-
- If unsure, say N.
-
-config OPROFILE_EVENT_MULTIPLEX
- bool "OProfile multiplexing support (EXPERIMENTAL)"
- default n
- depends on OPROFILE && X86
- help
- The number of hardware counters is limited. The multiplexing
- feature enables OProfile to gather more events than counters
- are provided by the hardware. This is realized by switching
- between events at a user specified time interval.
-
- If unsure, say N.
-
-config HAVE_OPROFILE
- bool
-
-config OPROFILE_NMI_TIMER
- def_bool y
- depends on PERF_EVENTS && HAVE_PERF_EVENTS_NMI && !PPC64
-
config KPROBES
bool "Kprobes"
depends on MODULES
@@ -156,8 +124,8 @@ config HAVE_64BIT_ALIGNED_ACCESS
accesses are required to be 64 bit aligned in this way even
though it is not a 64 bit architecture.
- See Documentation/unaligned-memory-access.txt for more
- information on the topic of unaligned memory accesses.
+ See Documentation/core-api/unaligned-memory-access.rst for
+ more information on the topic of unaligned memory accesses.
config HAVE_EFFICIENT_UNALIGNED_ACCESS
bool
@@ -327,6 +295,10 @@ config ARCH_32BIT_OFF_T
still support 32-bit off_t. This option is enabled for all such
architectures explicitly.
+# Selected by 64 bit architectures which have a 32 bit f_tinode in struct ustat
+config ARCH_32BIT_USTAT_F_TINODE
+ bool
+
config HAVE_ASM_MODVERSIONS
bool
help
@@ -631,6 +603,96 @@ config SHADOW_CALL_STACK
reading and writing arbitrary memory may be able to locate them
and hijack control flow by modifying the stacks.
+config LTO
+ bool
+ help
+ Selected if the kernel will be built using the compiler's LTO feature.
+
+config LTO_CLANG
+ bool
+ select LTO
+ help
+ Selected if the kernel will be built using Clang's LTO feature.
+
+config ARCH_SUPPORTS_LTO_CLANG
+ bool
+ help
+ An architecture should select this option if it supports:
+ - compiling with Clang,
+ - compiling inline assembly with Clang's integrated assembler,
+ - and linking with LLD.
+
+config ARCH_SUPPORTS_LTO_CLANG_THIN
+ bool
+ help
+ An architecture should select this option if it can support Clang's
+ ThinLTO mode.
+
+config HAS_LTO_CLANG
+ def_bool y
+ # Clang >= 11: https://github.com/ClangBuiltLinux/linux/issues/510
+ depends on CC_IS_CLANG && CLANG_VERSION >= 110000 && LD_IS_LLD
+ depends on $(success,test $(LLVM) -eq 1)
+ depends on $(success,test $(LLVM_IAS) -eq 1)
+ depends on $(success,$(NM) --help | head -n 1 | grep -qi llvm)
+ depends on $(success,$(AR) --help | head -n 1 | grep -qi llvm)
+ depends on ARCH_SUPPORTS_LTO_CLANG
+ depends on !FTRACE_MCOUNT_USE_RECORDMCOUNT
+ depends on !KASAN
+ depends on !GCOV_KERNEL
+ help
+ The compiler and Kconfig options support building with Clang's
+ LTO.
+
+choice
+ prompt "Link Time Optimization (LTO)"
+ default LTO_NONE
+ help
+ This option enables Link Time Optimization (LTO), which allows the
+ compiler to optimize binaries globally.
+
+ If unsure, select LTO_NONE. Note that LTO is very resource-intensive
+ so it's disabled by default.
+
+config LTO_NONE
+ bool "None"
+ help
+ Build the kernel normally, without Link Time Optimization (LTO).
+
+config LTO_CLANG_FULL
+ bool "Clang Full LTO (EXPERIMENTAL)"
+ depends on HAS_LTO_CLANG
+ depends on !COMPILE_TEST
+ select LTO_CLANG
+ help
+ This option enables Clang's full Link Time Optimization (LTO), which
+ allows the compiler to optimize the kernel globally. If you enable
+ this option, the compiler generates LLVM bitcode instead of ELF
+ object files, and the actual compilation from bitcode happens at
+ the LTO link step, which may take several minutes depending on the
+ kernel configuration. More information can be found from LLVM's
+ documentation:
+
+ https://llvm.org/docs/LinkTimeOptimization.html
+
+ During link time, this option can use a large amount of RAM, and
+ may take much longer than the ThinLTO option.
+
+config LTO_CLANG_THIN
+ bool "Clang ThinLTO (EXPERIMENTAL)"
+ depends on HAS_LTO_CLANG && ARCH_SUPPORTS_LTO_CLANG_THIN
+ select LTO_CLANG
+ help
+ This option enables Clang's ThinLTO, which allows for parallel
+ optimization and faster incremental compiles compared to the
+ CONFIG_LTO_CLANG_FULL option. More information can be found
+ from Clang's documentation:
+
+ https://clang.llvm.org/docs/ThinLTO.html
+
+ If unsure, say Y.
+endchoice
+
config HAVE_ARCH_WITHIN_STACK_FRAMES
bool
help
@@ -759,6 +821,12 @@ config HAVE_IRQ_EXIT_ON_IRQ_STACK
This spares a stack switch and improves cache usage on softirq
processing.
+config HAVE_SOFTIRQ_ON_OWN_STACK
+ bool
+ help
+ Architecture provides a function to run __do_softirq() on a
+ seperate stack.
+
config PGTABLE_LEVELS
int
default 2
@@ -1090,6 +1158,15 @@ config HAVE_STATIC_CALL_INLINE
bool
depends on HAVE_STATIC_CALL
+config HAVE_PREEMPT_DYNAMIC
+ bool
+ depends on HAVE_STATIC_CALL
+ depends on GENERIC_ENTRY
+ help
+ Select this if the architecture support boot time preempt setting
+ on top of static calls. It is strongly advised to support inline
+ static call to avoid any overhead.
+
config ARCH_WANT_LD_ORPHAN_WARN
bool
help
@@ -1111,6 +1188,9 @@ config ARCH_SPLIT_ARG64
If a 32-bit architecture requires 64-bit arguments to be split into
pairs of 32-bit arguments, select this option.
+config ARCH_HAS_ELFCORE_COMPAT
+ bool
+
source "kernel/gcov/Kconfig"
source "scripts/gcc-plugins/Kconfig"
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 1f51437d5765..5998106faa60 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -2,6 +2,7 @@
config ALPHA
bool
default y
+ select ARCH_32BIT_USTAT_F_TINODE
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO
select ARCH_NO_PREEMPT
@@ -14,7 +15,6 @@ config ALPHA
select HAVE_AOUT
select HAVE_ASM_MODVERSIONS
select HAVE_IDE
- select HAVE_OPROFILE
select HAVE_PCSPKR_PLATFORM
select HAVE_PERF_EVENTS
select NEED_DMA_MAP_STATE
diff --git a/arch/alpha/Makefile b/arch/alpha/Makefile
index 12dee59b011c..c2946431d88d 100644
--- a/arch/alpha/Makefile
+++ b/arch/alpha/Makefile
@@ -40,7 +40,6 @@ head-y := arch/alpha/kernel/head.o
core-y += arch/alpha/kernel/ arch/alpha/mm/
core-$(CONFIG_MATHEMU) += arch/alpha/math-emu/
-drivers-$(CONFIG_OPROFILE) += arch/alpha/oprofile/
libs-y += arch/alpha/lib/
# export what is needed by arch/alpha/boot/Makefile
diff --git a/arch/alpha/configs/defconfig b/arch/alpha/configs/defconfig
index 6293675db164..724c4075df40 100644
--- a/arch/alpha/configs/defconfig
+++ b/arch/alpha/configs/defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_LOG_BUF_SHIFT=14
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c
index 6c71554206cc..5112ab996394 100644
--- a/arch/alpha/kernel/process.c
+++ b/arch/alpha/kernel/process.c
@@ -249,7 +249,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
childti->pcb.ksp = (unsigned long) childstack;
childti->pcb.flags = 1; /* set FEN, clear everything else */
- if (unlikely(p->flags & PF_KTHREAD)) {
+ if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
/* kernel thread */
memset(childstack, 0,
sizeof(struct switch_stack) + sizeof(struct pt_regs));
diff --git a/arch/alpha/kernel/syscalls/Makefile b/arch/alpha/kernel/syscalls/Makefile
index 659faefdcb1d..285aaba832d9 100644
--- a/arch/alpha/kernel/syscalls/Makefile
+++ b/arch/alpha/kernel/syscalls/Makefile
@@ -5,7 +5,7 @@ uapi := arch/$(SRCARCH)/include/generated/uapi/asm
_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \
$(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
-syscall := $(srctree)/$(src)/syscall.tbl
+syscall := $(src)/syscall.tbl
syshdr := $(srctree)/$(src)/syscallhdr.sh
systbl := $(srctree)/$(src)/syscalltbl.sh
@@ -21,18 +21,19 @@ quiet_cmd_systbl = SYSTBL $@
'$(systbl_abi_$(basetarget))' \
'$(systbl_offset_$(basetarget))'
-$(uapi)/unistd_32.h: $(syscall) $(syshdr)
+$(uapi)/unistd_32.h: $(syscall) $(syshdr) FORCE
$(call if_changed,syshdr)
-$(kapi)/syscall_table.h: $(syscall) $(systbl)
+$(kapi)/syscall_table.h: $(syscall) $(systbl) FORCE
$(call if_changed,systbl)
uapisyshdr-y += unistd_32.h
kapisyshdr-y += syscall_table.h
-targets += $(uapisyshdr-y) $(kapisyshdr-y)
+uapisyshdr-y := $(addprefix $(uapi)/, $(uapisyshdr-y))
+kapisyshdr-y := $(addprefix $(kapi)/, $(kapisyshdr-y))
+targets += $(addprefix ../../../../, $(uapisyshdr-y) $(kapisyshdr-y))
PHONY += all
-all: $(addprefix $(uapi)/,$(uapisyshdr-y))
-all: $(addprefix $(kapi)/,$(kapisyshdr-y))
+all: $(uapisyshdr-y) $(kapisyshdr-y)
@:
diff --git a/arch/alpha/kernel/syscalls/syscall.tbl b/arch/alpha/kernel/syscalls/syscall.tbl
index a6617067dbe6..02f0244e005c 100644
--- a/arch/alpha/kernel/syscalls/syscall.tbl
+++ b/arch/alpha/kernel/syscalls/syscall.tbl
@@ -481,3 +481,4 @@
549 common faccessat2 sys_faccessat2
550 common process_madvise sys_process_madvise
551 common epoll_pwait2 sys_epoll_pwait2
+552 common mount_setattr sys_mount_setattr
diff --git a/arch/alpha/oprofile/Makefile b/arch/alpha/oprofile/Makefile
deleted file mode 100644
index 79f32820a42f..000000000000
--- a/arch/alpha/oprofile/Makefile
+++ /dev/null
@@ -1,20 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-ccflags-y := -Werror -Wno-sign-compare
-
-obj-$(CONFIG_OPROFILE) += oprofile.o
-
-DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
- oprof.o cpu_buffer.o buffer_sync.o \
- event_buffer.o oprofile_files.o \
- oprofilefs.o oprofile_stats.o \
- timer_int.o )
-
-oprofile-y := $(DRIVER_OBJS) common.o
-oprofile-$(CONFIG_ALPHA_GENERIC) += op_model_ev4.o \
- op_model_ev5.o \
- op_model_ev6.o \
- op_model_ev67.o
-oprofile-$(CONFIG_ALPHA_EV4) += op_model_ev4.o
-oprofile-$(CONFIG_ALPHA_EV5) += op_model_ev5.o
-oprofile-$(CONFIG_ALPHA_EV6) += op_model_ev6.o \
- op_model_ev67.o
diff --git a/arch/alpha/oprofile/common.c b/arch/alpha/oprofile/common.c
deleted file mode 100644
index 1b1259c7d7d1..000000000000
--- a/arch/alpha/oprofile/common.c
+++ /dev/null
@@ -1,189 +0,0 @@
-/**
- * @file arch/alpha/oprofile/common.c
- *
- * @remark Copyright 2002 OProfile authors
- * @remark Read the file COPYING
- *
- * @author Richard Henderson <rth@twiddle.net>
- */
-
-#include <linux/oprofile.h>
-#include <linux/init.h>
-#include <linux/smp.h>
-#include <linux/errno.h>
-#include <asm/ptrace.h>
-#include <asm/special_insns.h>
-
-#include "op_impl.h"
-
-extern struct op_axp_model op_model_ev4 __attribute__((weak));
-extern struct op_axp_model op_model_ev5 __attribute__((weak));
-extern struct op_axp_model op_model_pca56 __attribute__((weak));
-extern struct op_axp_model op_model_ev6 __attribute__((weak));
-extern struct op_axp_model op_model_ev67 __attribute__((weak));
-
-static struct op_axp_model *model;
-
-extern void (*perf_irq)(unsigned long, struct pt_regs *);
-static void (*save_perf_irq)(unsigned long, struct pt_regs *);
-
-static struct op_counter_config ctr[20];
-static struct op_system_config sys;
-static struct op_register_config reg;
-
-/* Called from do_entInt to handle the performance monitor interrupt. */
-
-static void
-op_handle_interrupt(unsigned long which, struct pt_regs *regs)
-{
- model->handle_interrupt(which, regs, ctr);
-
- /* If the user has selected an interrupt frequency that is
- not exactly the width of the counter, write a new value
- into the counter such that it'll overflow after N more
- events. */
- if ((reg.need_reset >> which) & 1)
- model->reset_ctr(&reg, which);
-}
-
-static int
-op_axp_setup(void)
-{
- unsigned long i, e;
-
- /* Install our interrupt handler into the existing hook. */
- save_perf_irq = perf_irq;
- perf_irq = op_handle_interrupt;
-
- /* Compute the mask of enabled counters. */
- for (i = e = 0; i < model->num_counters; ++i)
- if (ctr[i].enabled)
- e |= 1 << i;
- reg.enable = e;
-
- /* Pre-compute the values to stuff in the hardware registers. */
- model->reg_setup(&reg, ctr, &sys);
-
- /* Configure the registers on all cpus. */
- smp_call_function(model->cpu_setup, &reg, 1);
- model->cpu_setup(&reg);
- return 0;
-}
-
-static void
-op_axp_shutdown(void)
-{
- /* Remove our interrupt handler. We may be removing this module. */
- perf_irq = save_perf_irq;
-}
-
-static void
-op_axp_cpu_start(void *dummy)
-{
- wrperfmon(1, reg.enable);
-}
-
-static int
-op_axp_start(void)
-{
- smp_call_function(op_axp_cpu_start, NULL, 1);
- op_axp_cpu_start(NULL);
- return 0;
-}
-
-static inline void
-op_axp_cpu_stop(void *dummy)
-{
- /* Disable performance monitoring for all counters. */
- wrperfmon(0, -1);
-}
-
-static void
-op_axp_stop(void)
-{
- smp_call_function(op_axp_cpu_stop, NULL, 1);
- op_axp_cpu_stop(NULL);
-}
-
-static int
-op_axp_create_files(struct dentry *root)
-{
- int i;
-
- for (i = 0; i < model->num_counters; ++i) {
- struct dentry *dir;
- char buf[4];
-
- snprintf(buf, sizeof buf, "%d", i);
- dir = oprofilefs_mkdir(root, buf);
-
- oprofilefs_create_ulong(dir, "enabled", &ctr[i].enabled);
- oprofilefs_create_ulong(dir, "event", &ctr[i].event);
- oprofilefs_create_ulong(dir, "count", &ctr[i].count);
- /* Dummies. */
- oprofilefs_create_ulong(dir, "kernel", &ctr[i].kernel);
- oprofilefs_create_ulong(dir, "user", &ctr[i].user);
- oprofilefs_create_ulong(dir, "unit_mask", &ctr[i].unit_mask);
- }
-
- if (model->can_set_proc_mode) {
- oprofilefs_create_ulong(root, "enable_pal",
- &sys.enable_pal);
- oprofilefs_create_ulong(root, "enable_kernel",
- &sys.enable_kernel);
- oprofilefs_create_ulong(root, "enable_user",
- &sys.enable_user);
- }
-
- return 0;
-}
-
-int __init
-oprofile_arch_init(struct oprofile_operations *ops)
-{
- struct op_axp_model *lmodel = NULL;
-
- switch (implver()) {
- case IMPLVER_EV4:
- lmodel = &op_model_ev4;
- break;
- case IMPLVER_EV5:
- /* 21164PC has a slightly different set of events.
- Recognize the chip by the presence of the MAX insns. */
- if (!amask(AMASK_MAX))
- lmodel = &op_model_pca56;
- else
- lmodel = &op_model_ev5;
- break;
- case IMPLVER_EV6:
- /* 21264A supports ProfileMe.
- Recognize the chip by the presence of the CIX insns. */
- if (!amask(AMASK_CIX))
- lmodel = &op_model_ev67;
- else
- lmodel = &op_model_ev6;
- break;
- }
-
- if (!lmodel)
- return -ENODEV;
- model = lmodel;
-
- ops->create_files = op_axp_create_files;
- ops->setup = op_axp_setup;
- ops->shutdown = op_axp_shutdown;
- ops->start = op_axp_start;
- ops->stop = op_axp_stop;
- ops->cpu_type = lmodel->cpu_type;
-
- printk(KERN_INFO "oprofile: using %s performance monitoring.\n",
- lmodel->cpu_type);
-
- return 0;
-}
-
-
-void
-oprofile_arch_exit(void)
-{
-}
diff --git a/arch/alpha/oprofile/op_impl.h b/arch/alpha/oprofile/op_impl.h
deleted file mode 100644
index b2b87ae9a353..000000000000
--- a/arch/alpha/oprofile/op_impl.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/**
- * @file arch/alpha/oprofile/op_impl.h
- *
- * @remark Copyright 2002 OProfile authors
- * @remark Read the file COPYING
- *
- * @author Richard Henderson <rth@twiddle.net>
- */
-
-#ifndef OP_IMPL_H
-#define OP_IMPL_H 1
-
-/* Per-counter configuration as set via oprofilefs. */
-struct op_counter_config {
- unsigned long enabled;
- unsigned long event;
- unsigned long count;
- /* Dummies because I am too lazy to hack the userspace tools. */
- unsigned long kernel;
- unsigned long user;
- unsigned long unit_mask;
-};
-
-/* System-wide configuration as set via oprofilefs. */
-struct op_system_config {
- unsigned long enable_pal;
- unsigned long enable_kernel;
- unsigned long enable_user;
-};
-
-/* Cached values for the various performance monitoring registers. */
-struct op_register_config {
- unsigned long enable;
- unsigned long mux_select;
- unsigned long proc_mode;
- unsigned long freq;
- unsigned long reset_values;
- unsigned long need_reset;
-};
-
-/* Per-architecture configuration and hooks. */
-struct op_axp_model {
- void (*reg_setup) (struct op_register_config *,
- struct op_counter_config *,
- struct op_system_config *);
- void (*cpu_setup) (void *);
- void (*reset_ctr) (struct op_register_config *, unsigned long);
- void (*handle_interrupt) (unsigned long, struct pt_regs *,
- struct op_counter_config *);
- char *cpu_type;
- unsigned char num_counters;
- unsigned char can_set_proc_mode;
-};
-
-#endif
diff --git a/arch/alpha/oprofile/op_model_ev4.c b/arch/alpha/oprofile/op_model_ev4.c
deleted file mode 100644
index 086a0d5445c5..000000000000
--- a/arch/alpha/oprofile/op_model_ev4.c
+++ /dev/null
@@ -1,114 +0,0 @@
-/**
- * @file arch/alpha/oprofile/op_model_ev4.c
- *
- * @remark Copyright 2002 OProfile authors
- * @remark Read the file COPYING
- *
- * @author Richard Henderson <rth@twiddle.net>
- */
-
-#include <linux/oprofile.h>
-#include <linux/smp.h>
-#include <asm/ptrace.h>
-
-#include "op_impl.h"
-
-
-/* Compute all of the registers in preparation for enabling profiling. */
-
-static void
-ev4_reg_setup(struct op_register_config *reg,
- struct op_counter_config *ctr,
- struct op_system_config *sys)
-{
- unsigned long ctl = 0, count, hilo;
-
- /* Select desired events. We've mapped the event numbers
- such that they fit directly into the event selection fields.
-
- Note that there is no "off" setting. In both cases we select
- the EXTERNAL event source, hoping that it'll be the lowest
- frequency, and set the frequency counter to LOW. The interrupts
- for these "disabled" counter overflows are ignored by the
- interrupt handler.
-
- This is most irritating, because the hardware *can* enable and
- disable the interrupts for these counters independently, but the
- wrperfmon interface doesn't allow it. */
-
- ctl |= (ctr[0].enabled ? ctr[0].event << 8 : 14 << 8);
- ctl |= (ctr[1].enabled ? (ctr[1].event - 16) << 32 : 7ul << 32);
-
- /* EV4 can not read or write its counter registers. The only
- thing one can do at all is see if you overflow and get an
- interrupt. We can set the width of the counters, to some
- extent. Take the interrupt count selected by the user,
- map it onto one of the possible values, and write it back. */
-
- count = ctr[0].count;
- if (count <= 4096)
- count = 4096, hilo = 1;
- else
- count = 65536, hilo = 0;
- ctr[0].count = count;
- ctl |= (ctr[0].enabled && hilo) << 3;
-
- count = ctr[1].count;
- if (count <= 256)
- count = 256, hilo = 1;
- else
- count = 4096, hilo = 0;
- ctr[1].count = count;
- ctl |= (ctr[1].enabled && hilo);
-
- reg->mux_select = ctl;
-
- /* Select performance monitoring options. */
- /* ??? Need to come up with some mechanism to trace only
- selected processes. EV4 does not have a mechanism to
- select kernel or user mode only. For now, enable always. */
- reg->proc_mode = 0;
-
- /* Frequency is folded into mux_select for EV4. */
- reg->freq = 0;
-
- /* See above regarding no writes. */
- reg->reset_values = 0;
- reg->need_reset = 0;
-
-}
-
-/* Program all of the registers in preparation for enabling profiling. */
-
-static void
-ev4_cpu_setup(void *x)
-{
- struct op_register_config *reg = x;
-
- wrperfmon(2, reg->mux_select);
- wrperfmon(3, reg->proc_mode);
-}
-
-static void
-ev4_handle_interrupt(unsigned long which, struct pt_regs *regs,
- struct op_counter_config *ctr)
-{
- /* EV4 can't properly disable counters individually.
- Discard "disabled" events now. */
- if (!ctr[which].enabled)
- return;
-
- /* Record the sample. */
- oprofile_add_sample(regs, which);
-}
-
-
-struct op_axp_model op_model_ev4 = {
- .reg_setup = ev4_reg_setup,
- .cpu_setup = ev4_cpu_setup,
- .reset_ctr = NULL,
- .handle_interrupt = ev4_handle_interrupt,
- .cpu_type = "alpha/ev4",
- .num_counters = 2,
- .can_set_proc_mode = 0,
-};
diff --git a/arch/alpha/oprofile/op_model_ev5.c b/arch/alpha/oprofile/op_model_ev5.c
deleted file mode 100644
index c300f5ef3482..000000000000
--- a/arch/alpha/oprofile/op_model_ev5.c
+++ /dev/null
@@ -1,209 +0,0 @@
-/**
- * @file arch/alpha/oprofile/op_model_ev5.c
- *
- * @remark Copyright 2002 OProfile authors
- * @remark Read the file COPYING
- *
- * @author Richard Henderson <rth@twiddle.net>
- */
-
-#include <linux/oprofile.h>
-#include <linux/smp.h>
-#include <asm/ptrace.h>
-
-#include "op_impl.h"
-
-
-/* Compute all of the registers in preparation for enabling profiling.
-
- The 21164 (EV5) and 21164PC (PCA65) vary in the bit placement and
- meaning of the "CBOX" events. Given that we don't care about meaning
- at this point, arrange for the difference in bit placement to be
- handled by common code. */
-
-static void
-common_reg_setup(struct op_register_config *reg,
- struct op_counter_config *ctr,
- struct op_system_config *sys,
- int cbox1_ofs, int cbox2_ofs)
-{
- int i, ctl, reset, need_reset;
-
- /* Select desired events. The event numbers are selected such
- that they map directly into the event selection fields:
-
- PCSEL0: 0, 1
- PCSEL1: 24-39
- CBOX1: 40-47
- PCSEL2: 48-63
- CBOX2: 64-71
-
- There are two special cases, in that CYCLES can be measured
- on PCSEL[02], and SCACHE_WRITE can be measured on CBOX[12].
- These event numbers are canonicalizes to their first appearance. */
-
- ctl = 0;
- for (i = 0; i < 3; ++i) {
- unsigned long event = ctr[i].event;
- if (!ctr[i].enabled)
- continue;
-
- /* Remap the duplicate events, as described above. */
- if (i == 2) {
- if (event == 0)
- event = 12+48;
- else if (event == 2+41)
- event = 4+65;
- }
-
- /* Convert the event numbers onto mux_select bit mask. */
- if (event < 2)
- ctl |= event << 31;
- else if (event < 24)
- /* error */;
- else if (event < 40)
- ctl |= (event - 24) << 4;
- else if (event < 48)
- ctl |= (event - 40) << cbox1_ofs | 15 << 4;
- else if (event < 64)
- ctl |= event - 48;
- else if (event < 72)
- ctl |= (event - 64) << cbox2_ofs | 15;
- }
- reg->mux_select = ctl;
-
- /* Select processor mode. */
- /* ??? Need to come up with some mechanism to trace only selected
- processes. For now select from pal, kernel and user mode. */
- ctl = 0;
- ctl |= !sys->enable_pal << 9;
- ctl |= !sys->enable_kernel << 8;
- ctl |= !sys->enable_user << 30;
- reg->proc_mode = ctl;
-
- /* Select interrupt frequencies. Take the interrupt count selected
- by the user, and map it onto one of the possible counter widths.
- If the user value is in between, compute a value to which the
- counter is reset at each interrupt. */
-
- ctl = reset = need_reset = 0;
- for (i = 0; i < 3; ++i) {
- unsigned long max, hilo, count = ctr[i].count;
- if (!ctr[i].enabled)
- continue;
-
- if (count <= 256)
- count = 256, hilo = 3, max = 256;
- else {
- max = (i == 2 ? 16384 : 65536);
- hilo = 2;
- if (count > max)
- count = max;
- }
- ctr[i].count = count;
-
- ctl |= hilo << (8 - i*2);
- reset |= (max - count) << (48 - 16*i);
- if (count != max)
- need_reset |= 1 << i;
- }
- reg->freq = ctl;
- reg->reset_values = reset;
- reg->need_reset = need_reset;
-}
-
-static void
-ev5_reg_setup(struct op_register_config *reg,
- struct op_counter_config *ctr,
- struct op_system_config *sys)
-{
- common_reg_setup(reg, ctr, sys, 19, 22);
-}
-
-static void
-pca56_reg_setup(struct op_register_config *reg,
- struct op_counter_config *ctr,
- struct op_system_config *sys)
-{
- common_reg_setup(reg, ctr, sys, 8, 11);
-}
-
-/* Program all of the registers in preparation for enabling profiling. */
-
-static void
-ev5_cpu_setup (void *x)
-{
- struct op_register_config *reg = x;
-
- wrperfmon(2, reg->mux_select);
- wrperfmon(3, reg->proc_mode);
- wrperfmon(4, reg->freq);
- wrperfmon(6, reg->reset_values);
-}
-
-/* CTR is a counter for which the user has requested an interrupt count
- in between one of the widths selectable in hardware. Reset the count
- for CTR to the value stored in REG->RESET_VALUES.
-
- For EV5, this means disabling profiling, reading the current values,
- masking in the value for the desired register, writing, then turning
- profiling back on.
-
- This can be streamlined if profiling is only enabled for user mode.
- In that case we know that the counters are not currently incrementing
- (due to being in kernel mode). */
-
-static void
-ev5_reset_ctr(struct op_register_config *reg, unsigned long ctr)
-{
- unsigned long values, mask, not_pk, reset_values;
-
- mask = (ctr == 0 ? 0xfffful << 48
- : ctr == 1 ? 0xfffful << 32
- : 0x3fff << 16);
-
- not_pk = 1 << 9 | 1 << 8;
-
- reset_values = reg->reset_values;
-
- if ((reg->proc_mode & not_pk) == not_pk) {
- values = wrperfmon(5, 0);
- values = (reset_values & mask) | (values & ~mask & -2);
- wrperfmon(6, values);
- } else {
- wrperfmon(0, -1);
- values = wrperfmon(5, 0);
- values = (reset_values & mask) | (values & ~mask & -2);
- wrperfmon(6, values);
- wrperfmon(1, reg->enable);
- }
-}
-
-static void
-ev5_handle_interrupt(unsigned long which, struct pt_regs *regs,
- struct op_counter_config *ctr)
-{
- /* Record the sample. */
- oprofile_add_sample(regs, which);
-}
-
-
-struct op_axp_model op_model_ev5 = {
- .reg_setup = ev5_reg_setup,
- .cpu_setup = ev5_cpu_setup,
- .reset_ctr = ev5_reset_ctr,
- .handle_interrupt = ev5_handle_interrupt,
- .cpu_type = "alpha/ev5",
- .num_counters = 3,
- .can_set_proc_mode = 1,
-};
-
-struct op_axp_model op_model_pca56 = {
- .reg_setup = pca56_reg_setup,
- .cpu_setup = ev5_cpu_setup,
- .reset_ctr = ev5_reset_ctr,
- .handle_interrupt = ev5_handle_interrupt,
- .cpu_type = "alpha/pca56",
- .num_counters = 3,
- .can_set_proc_mode = 1,
-};
diff --git a/arch/alpha/oprofile/op_model_ev6.c b/arch/alpha/oprofile/op_model_ev6.c
deleted file mode 100644
index 02edf5971614..000000000000
--- a/arch/alpha/oprofile/op_model_ev6.c
+++ /dev/null
@@ -1,101 +0,0 @@
-/**
- * @file arch/alpha/oprofile/op_model_ev6.c
- *
- * @remark Copyright 2002 OProfile authors
- * @remark Read the file COPYING
- *
- * @author Richard Henderson <rth@twiddle.net>
- */
-
-#include <linux/oprofile.h>
-#include <linux/smp.h>
-#include <asm/ptrace.h>
-
-#include "op_impl.h"
-
-
-/* Compute all of the registers in preparation for enabling profiling. */
-
-static void
-ev6_reg_setup(struct op_register_config *reg,
- struct op_counter_config *ctr,
- struct op_system_config *sys)
-{
- unsigned long ctl, reset, need_reset, i;
-
- /* Select desired events. We've mapped the event numbers
- such that they fit directly into the event selection fields. */
- ctl = 0;
- if (ctr[0].enabled && ctr[0].event)
- ctl |= (ctr[0].event & 1) << 4;
- if (ctr[1].enabled)
- ctl |= (ctr[1].event - 2) & 15;
- reg->mux_select = ctl;
-
- /* Select logging options. */
- /* ??? Need to come up with some mechanism to trace only
- selected processes. EV6 does not have a mechanism to
- select kernel or user mode only. For now, enable always. */
- reg->proc_mode = 0;
-
- /* EV6 cannot change the width of the counters as with the
- other implementations. But fortunately, we can write to
- the counters and set the value such that it will overflow
- at the right time. */
- reset = need_reset = 0;
- for (i = 0; i < 2; ++i) {
- unsigned long count = ctr[i].count;
- if (!ctr[i].enabled)
- continue;
-
- if (count > 0x100000)
- count = 0x100000;
- ctr[i].count = count;
- reset |= (0x100000 - count) << (i ? 6 : 28);
- if (count != 0x100000)
- need_reset |= 1 << i;
- }
- reg->reset_values = reset;
- reg->need_reset = need_reset;
-}
-
-/* Program all of the registers in preparation for enabling profiling. */
-
-static void
-ev6_cpu_setup (void *x)
-{
- struct op_register_config *reg = x;
-
- wrperfmon(2, reg->mux_select);
- wrperfmon(3, reg->proc_mode);
- wrperfmon(6, reg->reset_values | 3);
-}
-
-/* CTR is a counter for which the user has requested an interrupt count
- in between one of the widths selectable in hardware. Reset the count
- for CTR to the value stored in REG->RESET_VALUES. */
-
-static void
-ev6_reset_ctr(struct op_register_config *reg, unsigned long ctr)
-{
- wrperfmon(6, reg->reset_values | (1 << ctr));
-}
-
-static void
-ev6_handle_interrupt(unsigned long which, struct pt_regs *regs,
- struct op_counter_config *ctr)
-{
- /* Record the sample. */
- oprofile_add_sample(regs, which);
-}
-
-
-struct op_axp_model op_model_ev6 = {
- .reg_setup = ev6_reg_setup,
- .cpu_setup = ev6_cpu_setup,
- .reset_ctr = ev6_reset_ctr,
- .handle_interrupt = ev6_handle_interrupt,
- .cpu_type = "alpha/ev6",
- .num_counters = 2,
- .can_set_proc_mode = 0,
-};
diff --git a/arch/alpha/oprofile/op_model_ev67.c b/arch/alpha/oprofile/op_model_ev67.c
deleted file mode 100644
index adb1744d20f3..000000000000
--- a/arch/alpha/oprofile/op_model_ev67.c
+++ /dev/null
@@ -1,261 +0,0 @@
-/**
- * @file arch/alpha/oprofile/op_model_ev67.c
- *
- * @remark Copyright 2002 OProfile authors
- * @remark Read the file COPYING
- *
- * @author Richard Henderson <rth@twiddle.net>
- * @author Falk Hueffner <falk@debian.org>
- */
-
-#include <linux/oprofile.h>
-#include <linux/smp.h>
-#include <asm/ptrace.h>
-
-#include "op_impl.h"
-
-
-/* Compute all of the registers in preparation for enabling profiling. */
-
-static void
-ev67_reg_setup(struct op_register_config *reg,
- struct op_counter_config *ctr,
- struct op_system_config *sys)
-{
- unsigned long ctl, reset, need_reset, i;
-
- /* Select desired events. */
- ctl = 1UL << 4; /* Enable ProfileMe mode. */
-
- /* The event numbers are chosen so we can use them directly if
- PCTR1 is enabled. */
- if (ctr[1].enabled) {
- ctl |= (ctr[1].event & 3) << 2;
- } else {
- if (ctr[0].event == 0) /* cycles */
- ctl |= 1UL << 2;
- }
- reg->mux_select = ctl;
-
- /* Select logging options. */
- /* ??? Need to come up with some mechanism to trace only
- selected processes. EV67 does not have a mechanism to
- select kernel or user mode only. For now, enable always. */
- reg->proc_mode = 0;
-
- /* EV67 cannot change the width of the counters as with the
- other implementations. But fortunately, we can write to
- the counters and set the value such that it will overflow
- at the right time. */
- reset = need_reset = 0;
- for (i = 0; i < 2; ++i) {
- unsigned long count = ctr[i].count;
- if (!ctr[i].enabled)
- continue;
-
- if (count > 0x100000)
- count = 0x100000;
- ctr[i].count = count;
- reset |= (0x100000 - count) << (i ? 6 : 28);
- if (count != 0x100000)
- need_reset |= 1 << i;
- }
- reg->reset_values = reset;
- reg->need_reset = need_reset;
-}
-
-/* Program all of the registers in preparation for enabling profiling. */
-
-static void
-ev67_cpu_setup (void *x)
-{
- struct op_register_config *reg = x;
-
- wrperfmon(2, reg->mux_select);
- wrperfmon(3, reg->proc_mode);
- wrperfmon(6, reg->reset_values | 3);
-}
-
-/* CTR is a counter for which the user has requested an interrupt count
- in between one of the widths selectable in hardware. Reset the count
- for CTR to the value stored in REG->RESET_VALUES. */
-
-static void
-ev67_reset_ctr(struct op_register_config *reg, unsigned long ctr)
-{
- wrperfmon(6, reg->reset_values | (1 << ctr));
-}
-
-/* ProfileMe conditions which will show up as counters. We can also
- detect the following, but it seems unlikely that anybody is
- interested in counting them:
- * Reset
- * MT_FPCR (write to floating point control register)
- * Arithmetic trap
- * Dstream Fault
- * Machine Check (ECC fault, etc.)
- * OPCDEC (illegal opcode)
- * Floating point disabled
- * Differentiate between DTB single/double misses and 3 or 4 level
- page tables
- * Istream access violation
- * Interrupt
- * Icache Parity Error.
- * Instruction killed (nop, trapb)
-
- Unfortunately, there seems to be no way to detect Dcache and Bcache
- misses; the latter could be approximated by making the counter
- count Bcache misses, but that is not precise.
-
- We model this as 20 counters:
- * PCTR0
- * PCTR1
- * 9 ProfileMe events, induced by PCTR0
- * 9 ProfileMe events, induced by PCTR1
-*/
-
-enum profileme_counters {
- PM_STALLED, /* Stalled for at least one cycle
- between the fetch and map stages */
- PM_TAKEN, /* Conditional branch taken */
- PM_MISPREDICT, /* Branch caused mispredict trap */
- PM_ITB_MISS, /* ITB miss */
- PM_DTB_MISS, /* DTB miss */
- PM_REPLAY, /* Replay trap */
- PM_LOAD_STORE, /* Load-store order trap */
- PM_ICACHE_MISS, /* Icache miss */
- PM_UNALIGNED, /* Unaligned Load/Store */
- PM_NUM_COUNTERS
-};
-
-static inline void
-op_add_pm(unsigned long pc, int kern, unsigned long counter,
- struct op_counter_config *ctr, unsigned long event)
-{
- unsigned long fake_counter = 2 + event;
- if (counter == 1)
- fake_counter += PM_NUM_COUNTERS;
- if (ctr[fake_counter].enabled)
- oprofile_add_pc(pc, kern, fake_counter);
-}
-
-static void
-ev67_handle_interrupt(unsigned long which, struct pt_regs *regs,
- struct op_counter_config *ctr)
-{
- unsigned long pmpc, pctr_ctl;
- int kern = !user_mode(regs);
- int mispredict = 0;
- union {
- unsigned long v;
- struct {
- unsigned reserved: 30; /* 0-29 */
- unsigned overcount: 3; /* 30-32 */
- unsigned icache_miss: 1; /* 33 */
- unsigned trap_type: 4; /* 34-37 */
- unsigned load_store: 1; /* 38 */
- unsigned trap: 1; /* 39 */
- unsigned mispredict: 1; /* 40 */
- } fields;
- } i_stat;
-
- enum trap_types {
- TRAP_REPLAY,
- TRAP_INVALID0,
- TRAP_DTB_DOUBLE_MISS_3,
- TRAP_DTB_DOUBLE_MISS_4,
- TRAP_FP_DISABLED,
- TRAP_UNALIGNED,
- TRAP_DTB_SINGLE_MISS,
- TRAP_DSTREAM_FAULT,
- TRAP_OPCDEC,
- TRAP_INVALID1,
- TRAP_MACHINE_CHECK,
- TRAP_INVALID2,
- TRAP_ARITHMETIC,
- TRAP_INVALID3,
- TRAP_MT_FPCR,
- TRAP_RESET
- };
-
- pmpc = wrperfmon(9, 0);
- /* ??? Don't know how to handle physical-mode PALcode address. */
- if (pmpc & 1)
- return;
- pmpc &= ~2; /* clear reserved bit */
-
- i_stat.v = wrperfmon(8, 0);
- if (i_stat.fields.trap) {
- switch (i_stat.fields.trap_type) {
- case TRAP_INVALID1:
- case TRAP_INVALID2:
- case TRAP_INVALID3:
- /* Pipeline redirection occurred. PMPC points
- to PALcode. Recognize ITB miss by PALcode
- offset address, and get actual PC from
- EXC_ADDR. */
- oprofile_add_pc(regs->pc, kern, which);
- if ((pmpc & ((1 << 15) - 1)) == 581)
- op_add_pm(regs->pc, kern, which,
- ctr, PM_ITB_MISS);
- /* Most other bit and counter values will be
- those for the first instruction in the
- fault handler, so we're done. */
- return;
- case TRAP_REPLAY:
- op_add_pm(pmpc, kern, which, ctr,
- (i_stat.fields.load_store
- ? PM_LOAD_STORE : PM_REPLAY));
- break;
- case TRAP_DTB_DOUBLE_MISS_3:
- case TRAP_DTB_DOUBLE_MISS_4:
- case TRAP_DTB_SINGLE_MISS:
- op_add_pm(pmpc, kern, which, ctr, PM_DTB_MISS);
- break;
- case TRAP_UNALIGNED:
- op_add_pm(pmpc, kern, which, ctr, PM_UNALIGNED);
- break;
- case TRAP_INVALID0:
- case TRAP_FP_DISABLED:
- case TRAP_DSTREAM_FAULT:
- case TRAP_OPCDEC:
- case TRAP_MACHINE_CHECK:
- case TRAP_ARITHMETIC:
- case TRAP_MT_FPCR:
- case TRAP_RESET:
- break;
- }
-
- /* ??? JSR/JMP/RET/COR or HW_JSR/HW_JMP/HW_RET/HW_COR
- mispredicts do not set this bit but can be
- recognized by the presence of one of these
- instructions at the PMPC location with bit 39
- set. */
- if (i_stat.fields.mispredict) {
- mispredict = 1;
- op_add_pm(pmpc, kern, which, ctr, PM_MISPREDICT);
- }
- }
-
- oprofile_add_pc(pmpc, kern, which);
-
- pctr_ctl = wrperfmon(5, 0);
- if (pctr_ctl & (1UL << 27))
- op_add_pm(pmpc, kern, which, ctr, PM_STALLED);
-
- /* Unfortunately, TAK is undefined on mispredicted branches.
- ??? It is also undefined for non-cbranch insns, should
- check that. */
- if (!mispredict && pctr_ctl & (1UL << 0))
- op_add_pm(pmpc, kern, which, ctr, PM_TAKEN);
-}
-
-struct op_axp_model op_model_ev67 = {
- .reg_setup = ev67_reg_setup,
- .cpu_setup = ev67_cpu_setup,
- .reset_ctr = ev67_reset_ctr,
- .handle_interrupt = ev67_handle_interrupt,
- .cpu_type = "alpha/ev67",
- .num_counters = 20,
- .can_set_proc_mode = 0,
-};
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index b55ca77f619b..bc8d6aecfbbd 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -37,7 +37,6 @@ config ARC
select HAVE_KPROBES
select HAVE_KRETPROBES
select HAVE_MOD_ARCH_SPECIFIC
- select HAVE_OPROFILE
select HAVE_PERF_EVENTS
select HANDLE_DOMAIN_IRQ
select IRQ_DOMAIN
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index 578bdbbb0fa7..4392c9c189c4 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -96,8 +96,6 @@ core-$(CONFIG_ARC_PLAT_TB10X) += arch/arc/plat-tb10x/
core-$(CONFIG_ARC_PLAT_AXS10X) += arch/arc/plat-axs10x/
core-$(CONFIG_ARC_SOC_HSDK) += arch/arc/plat-hsdk/
-drivers-$(CONFIG_OPROFILE) += arch/arc/oprofile/
-
libs-y += arch/arc/lib/ $(LIBGCC)
boot := arch/arc/boot
diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c
index 37f724ad5e39..d838d0d57696 100644
--- a/arch/arc/kernel/process.c
+++ b/arch/arc/kernel/process.c
@@ -191,7 +191,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
childksp[0] = 0; /* fp */
childksp[1] = (unsigned long)ret_from_fork; /* blink */
- if (unlikely(p->flags & PF_KTHREAD)) {
+ if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
memset(c_regs, 0, sizeof(struct pt_regs));
c_callee->r13 = kthread_arg;
diff --git a/arch/arc/oprofile/Makefile b/arch/arc/oprofile/Makefile
deleted file mode 100644
index 698367bb41d0..000000000000
--- a/arch/arc/oprofile/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_OPROFILE) += oprofile.o
-
-DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
- oprof.o cpu_buffer.o buffer_sync.o \
- event_buffer.o oprofile_files.o \
- oprofilefs.o oprofile_stats.o \
- timer_int.o )
-
-oprofile-y := $(DRIVER_OBJS) common.o
diff --git a/arch/arc/oprofile/common.c b/arch/arc/oprofile/common.c
deleted file mode 100644
index 86bf5899533b..000000000000
--- a/arch/arc/oprofile/common.c
+++ /dev/null
@@ -1,23 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
- *
- * Based on orig code from @author John Levon <levon@movementarian.org>
- */
-
-#include <linux/oprofile.h>
-#include <linux/perf_event.h>
-
-int __init oprofile_arch_init(struct oprofile_operations *ops)
-{
- /*
- * A failure here, forces oprofile core to switch to Timer based PC
- * sampling, which will happen if say perf is not enabled/available
- */
- return oprofile_perf_init(ops);
-}
-
-void oprofile_arch_exit(void)
-{
- oprofile_perf_exit();
-}
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 138248999df7..5da96f5df48f 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -23,6 +23,7 @@ config ARM
select ARCH_HAS_TEARDOWN_DMA_OPS if MMU
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAVE_CUSTOM_GPIO_H
+ select ARCH_HAVE_NMI_SAFE_CMPXCHG if CPU_V7 || CPU_V7M || CPU_V6K
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_KEEP_MEMBLOCK
select ARCH_MIGHT_HAVE_PC_PARPORT
@@ -102,7 +103,6 @@ config ARM
select HAVE_KRETPROBES if HAVE_KPROBES
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI
- select HAVE_OPROFILE if HAVE_PERF_EVENTS
select HAVE_OPTPROBES if !THUMB2_KERNEL
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
@@ -348,6 +348,7 @@ config ARCH_EP93XX
select ARM_AMBA
imply ARM_PATCH_PHYS_VIRT
select ARM_VIC
+ select GENERIC_IRQ_MULTI_HANDLER
select AUTO_ZRELADDR
select CLKDEV_LOOKUP
select CLKSRC_MMIO
@@ -671,10 +672,6 @@ source "arch/arm/mach-orion5x/Kconfig"
source "arch/arm/mach-oxnas/Kconfig"
-source "arch/arm/mach-picoxcell/Kconfig"
-
-source "arch/arm/mach-prima2/Kconfig"
-
source "arch/arm/mach-pxa/Kconfig"
source "arch/arm/plat-pxa/Kconfig"
@@ -706,12 +703,8 @@ source "arch/arm/mach-stm32/Kconfig"
source "arch/arm/mach-sunxi/Kconfig"
-source "arch/arm/mach-tango/Kconfig"
-
source "arch/arm/mach-tegra/Kconfig"
-source "arch/arm/mach-u300/Kconfig"
-
source "arch/arm/mach-uniphier/Kconfig"
source "arch/arm/mach-ux500/Kconfig"
@@ -722,19 +715,9 @@ source "arch/arm/mach-vexpress/Kconfig"
source "arch/arm/mach-vt8500/Kconfig"
-source "arch/arm/mach-zx/Kconfig"
-
source "arch/arm/mach-zynq/Kconfig"
# ARMv7-M architecture
-config ARCH_EFM32
- bool "Energy Micro efm32"
- depends on ARM_SINGLE_ARMV7M
- select GPIOLIB
- help
- Support for Energy Micro's (now Silicon Labs) efm32 Giant Gecko
- processors.
-
config ARCH_LPC18XX
bool "NXP LPC18xx/LPC43xx"
depends on ARM_SINGLE_ARMV7M
@@ -1552,7 +1535,7 @@ config ARM_MODULE_PLTS
config FORCE_MAX_ZONEORDER
int "Maximum zone order"
default "12" if SOC_AM33XX
- default "9" if SA1111 || ARCH_EFM32
+ default "9" if SA1111
default "11"
help
The kernel memory allocator divides physically contiguous memory
@@ -1875,9 +1858,10 @@ config AUTO_ZRELADDR
help
ZRELADDR is the physical address where the decompressed kernel
image will be placed. If AUTO_ZRELADDR is selected, the address
- will be determined at run-time by masking the current IP with
- 0xf8000000. This assumes the zImage being placed in the first 128MB
- from start of memory.
+ will be determined at run-time, either by masking the current IP
+ with 0xf8000000, or, if invalid, from the DTB passed in r2.
+ This assumes the zImage being placed in the first 128MB from
+ start of memory.
config EFI_STUB
bool
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index 4ff04201a8cc..9e0b5e7f12af 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -770,14 +770,6 @@ choice
depends on ARCH_OMAP2PLUS
select DEBUG_OMAP2PLUS_UART
- config DEBUG_PICOXCELL_UART
- depends on ARCH_PICOXCELL
- bool "Use PicoXcell UART for low-level debug"
- select DEBUG_UART_8250
- help
- Say Y here if you want kernel low-level debugging support
- on PicoXcell based platforms.
-
config DEBUG_PXA_UART1
depends on ARCH_PXA
bool "Use PXA UART1 for low-level debug"
@@ -1150,32 +1142,6 @@ choice
Say Y here if you want kernel low-level debugging support
on Allwinner A31/A23 based platforms on the R_UART.
- config DEBUG_SIRFPRIMA2_UART1
- bool "Kernel low-level debugging messages via SiRFprimaII UART1"
- depends on ARCH_PRIMA2
- select DEBUG_SIRFSOC_UART
- help
- Say Y here if you want the debug print routines to direct
- their output to the uart1 port on SiRFprimaII devices.
-
- config DEBUG_SIRFATLAS7_UART0
- bool "Kernel low-level debugging messages via SiRFatlas7 UART0"
- depends on ARCH_ATLAS7
- select DEBUG_SIRFSOC_UART
- help
- Say Y here if you want the debug print routines to direct
- their output to the uart0 port on SiRFATLAS7 devices.The uart0
- is used on SiRFATLAS7 as a extra debug port.sometimes an extra
- debug port can be very useful.
-
- config DEBUG_SIRFATLAS7_UART1
- bool "Kernel low-level debugging messages via SiRFatlas7 UART1"
- depends on ARCH_ATLAS7
- select DEBUG_SIRFSOC_UART
- help
- Say Y here if you want the debug print routines to direct
- their output to the uart1 port on SiRFATLAS7 devices.
-
config DEBUG_SPEAR3XX
bool "Kernel low-level debugging messages via ST SPEAr 3xx/6xx UART"
depends on ARCH_SPEAR3XX || ARCH_SPEAR6XX
@@ -1192,10 +1158,9 @@ choice
Say Y here if you want kernel low-level debugging support
on ST SPEAr13xx based platforms.
- config STIH41X_DEBUG_ASC2
+ config DEBUG_STIH41X_ASC2
bool "Use StiH415/416 ASC2 UART for low-level debug"
depends on ARCH_STI
- select DEBUG_STI_UART
help
Say Y here if you want kernel low-level debugging support
on STiH415/416 based platforms like b2000, which has
@@ -1203,10 +1168,9 @@ choice
If unsure, say N.
- config STIH41X_DEBUG_SBC_ASC1
+ config DEBUG_STIH41X_SBC_ASC1
bool "Use StiH415/416 SBC ASC1 UART for low-level debug"
depends on ARCH_STI
- select DEBUG_STI_UART
help
Say Y here if you want kernel low-level debugging support
on STiH415/416 based platforms like b2020. which has
@@ -1214,6 +1178,16 @@ choice
If unsure, say N.
+ config DEBUG_STIH418_SBC_ASC0
+ bool "Use StiH418 SBC ASC0 UART for low-level debug"
+ depends on ARCH_STI
+ help
+ Say Y here if you want kernel low-level debugging support
+ on STiH418 based platforms which has default UART wired
+ up to SBC ASC0.
+
+ If unsure, say N.
+
config STM32F4_DEBUG_UART
bool "Use STM32F4 UART for low-level debug"
depends on MACH_STM32F429 || MACH_STM32F469
@@ -1314,14 +1288,6 @@ choice
Say Y here if you want kernel low-level debugging support
on Tegra based platforms.
- config DEBUG_U300_UART
- bool "Kernel low-level debugging messages via U300 UART0"
- depends on ARCH_U300
- select DEBUG_UART_PL01X
- help
- Say Y here if you want the debug print routines to direct
- their output to the uart port on U300 devices.
-
config DEBUG_UX500_UART
depends on ARCH_U8500
bool "Use Ux500 UART for low-level debug"
@@ -1387,18 +1353,6 @@ choice
This option selects UART0 on VIA/Wondermedia System-on-a-chip
devices, including VT8500, WM8505, WM8650 and WM8850.
- config DEBUG_ZTE_ZX
- bool "Use ZTE ZX UART"
- select DEBUG_UART_PL01X
- depends on ARCH_ZX
- help
- Say Y here if you are enabling ZTE ZX296702 SOC and need
- debug uart support.
-
- This option is preferred over the platform specific
- options; the platform specific options are deprecated
- and will be soon removed.
-
config DEBUG_ZYNQ_UART0
bool "Kernel low-level debugging on Xilinx Zynq using UART0"
depends on ARCH_ZYNQ
@@ -1456,20 +1410,6 @@ choice
options; the platform specific options are deprecated
and will be soon removed.
- config DEBUG_LL_UART_EFM32
- bool "Kernel low-level debugging via efm32 UART"
- depends on ARCH_EFM32
- help
- Say Y here if you want the debug print routines to direct
- their output to an UART or USART port on efm32 based
- machines. Use the following addresses for DEBUG_UART_PHYS:
-
- 0x4000c000 | USART0
- 0x4000c400 | USART1
- 0x4000c800 | USART2
- 0x4000e000 | UART0
- 0x4000e400 | UART1
-
config DEBUG_LL_UART_PL01X
bool "Kernel low-level debugging via ARM Ltd PL01x Primecell UART"
help
@@ -1552,18 +1492,10 @@ config DEBUG_TEGRA_UART
bool
depends on ARCH_TEGRA
-config DEBUG_STI_UART
- bool
- depends on ARCH_STI
-
config DEBUG_STM32_UART
bool
depends on ARCH_STM32
-config DEBUG_SIRFSOC_UART
- bool
- depends on ARCH_SIRF
-
config DEBUG_UART_FLOW_CONTROL
bool "Enable flow control (CTS) for the debug UART"
depends on DEBUG_LL
@@ -1587,7 +1519,6 @@ config DEBUG_LL_INCLUDE
default "debug/meson.S" if DEBUG_MESON_UARTAO
default "debug/pl01x.S" if DEBUG_LL_UART_PL01X || DEBUG_UART_PL01X
default "debug/exynos.S" if DEBUG_EXYNOS_UART
- default "debug/efm32.S" if DEBUG_LL_UART_EFM32
default "debug/icedcc.S" if DEBUG_ICEDCC
default "debug/imx.S" if DEBUG_IMX1_UART || \
DEBUG_IMX25_UART || \
@@ -1619,8 +1550,9 @@ config DEBUG_LL_INCLUDE
default "debug/renesas-scif.S" if DEBUG_RMOBILE_SCIFA4
default "debug/s3c24xx.S" if DEBUG_S3C24XX_UART || DEBUG_S3C64XX_UART
default "debug/s5pv210.S" if DEBUG_S5PV210_UART
- default "debug/sirf.S" if DEBUG_SIRFSOC_UART
- default "debug/sti.S" if DEBUG_STI_UART
+ default "debug/sti.S" if DEBUG_STIH41X_ASC2
+ default "debug/sti.S" if DEBUG_STIH41X_SBC_ASC1
+ default "debug/sti.S" if DEBUG_STIH418_SBC_ASC0
default "debug/stm32.S" if DEBUG_STM32_UART
default "debug/tegra.S" if DEBUG_TEGRA_UART
default "debug/ux500.S" if DEBUG_UX500_UART
@@ -1653,7 +1585,7 @@ config DEBUG_UART_PHYS
default 0x02531000 if DEBUG_KEYSTONE_UART1
default 0x03010fe0 if ARCH_RPC
default 0x07000000 if DEBUG_SUN9I_UART0
- default 0x09405000 if DEBUG_ZTE_ZX
+ default 0x09530000 if DEBUG_STIH418_SBC_ASC0
default 0x10009000 if DEBUG_REALVIEW_STD_PORT || \
DEBUG_VEXPRESS_UART0_CA9
default 0x1010c000 if DEBUG_REALVIEW_PB1176_PORT
@@ -1671,8 +1603,6 @@ config DEBUG_UART_PHYS
default 0x1600d000 if DEBUG_SD5203_UART
default 0x18000300 if DEBUG_BCM_5301X
default 0x18000400 if DEBUG_BCM_HR2
- default 0x18010000 if DEBUG_SIRFATLAS7_UART0
- default 0x18020000 if DEBUG_SIRFATLAS7_UART1
default 0x18023000 if DEBUG_BCM_IPROC_UART3
default 0x1c090000 if DEBUG_VEXPRESS_UART0_RS1
default 0x20001000 if DEBUG_HIP01_UART
@@ -1682,7 +1612,6 @@ config DEBUG_UART_PHYS
default 0x20201000 if DEBUG_BCM2835
default 0x3e000000 if DEBUG_BCM_KONA_UART
default 0x3f201000 if DEBUG_BCM2836
- default 0x4000e400 if DEBUG_LL_UART_EFM32
default 0x40010000 if STM32MP1_DEBUG_UART
default 0x40011000 if STM32F4_DEBUG_UART || STM32F7_DEBUG_UART || \
STM32H7_DEBUG_UART
@@ -1717,12 +1646,9 @@ config DEBUG_UART_PHYS
default 0x80010000 if DEBUG_ASM9260_UART
default 0x80070000 if DEBUG_IMX23_UART
default 0x80074000 if DEBUG_IMX28_UART
- default 0x80230000 if DEBUG_PICOXCELL_UART
default 0x808c0000 if DEBUG_EP93XX || ARCH_EP93XX
default 0x90020000 if DEBUG_NSPIRE_CLASSIC_UART || DEBUG_NSPIRE_CX_UART
- default 0xb0060000 if DEBUG_SIRFPRIMA2_UART1
default 0xb0090000 if DEBUG_VEXPRESS_UART0_CRX
- default 0xc0013000 if DEBUG_U300_UART
default 0xc8000000 if ARCH_IXP4XX && !CPU_BIG_ENDIAN
default 0xc8000003 if ARCH_IXP4XX && CPU_BIG_ENDIAN
default 0xd0000000 if DEBUG_SPEAR3XX
@@ -1752,7 +1678,9 @@ config DEBUG_UART_PHYS
default 0xfc00c000 if DEBUG_AT91_SAMA5D4_USART3
default 0xfcb00000 if DEBUG_HI3620_UART
default 0xfd883000 if DEBUG_ALPINE_UART0
+ default 0xfe531000 if DEBUG_STIH41X_SBC_ASC1
default 0xfe800000 if ARCH_IOP32X
+ default 0xfed32000 if DEBUG_STIH41X_ASC2
default 0xff690000 if DEBUG_RK32_UART2
default 0xffc02000 if DEBUG_SOCFPGA_UART0
default 0xffc02100 if DEBUG_SOCFPGA_ARRIA10_UART1
@@ -1768,7 +1696,6 @@ config DEBUG_UART_PHYS
default 0xfffff200 if DEBUG_AT91_RM9200_DBGU
depends on ARCH_EP93XX || \
DEBUG_LL_UART_8250 || DEBUG_LL_UART_PL01X || \
- DEBUG_LL_UART_EFM32 || \
DEBUG_UART_8250 || DEBUG_UART_PL01X || DEBUG_MESON_UARTAO || \
DEBUG_QCOM_UARTDM || DEBUG_R7S72100_SCIF2 || \
DEBUG_R7S9210_SCIF2 || DEBUG_R7S9210_SCIF4 || \
@@ -1780,8 +1707,10 @@ config DEBUG_UART_PHYS
DEBUG_RMOBILE_SCIFA4 || DEBUG_S3C24XX_UART || \
DEBUG_S3C64XX_UART || \
DEBUG_BCM63XX_UART || DEBUG_ASM9260_UART || \
- DEBUG_SIRFSOC_UART || DEBUG_DIGICOLOR_UA0 || \
- DEBUG_AT91_UART || DEBUG_STM32_UART
+ DEBUG_DIGICOLOR_UA0 || \
+ DEBUG_AT91_UART || DEBUG_STM32_UART || \
+ DEBUG_STIH41X_ASC2 || DEBUG_STIH41X_SBC_ASC1 || \
+ DEBUG_STIH418_SBC_ASC0
config DEBUG_UART_VIRT
hex "Virtual base address of debug UART"
@@ -1826,6 +1755,7 @@ config DEBUG_UART_VIRT
default 0xf8090000 if DEBUG_VEXPRESS_UART0_RS1
default 0xf8ffee00 if DEBUG_AT91_SAM9263_DBGU
default 0xf8fff200 if DEBUG_AT91_RM9200_DBGU
+ default 0xf9530000 if DEBUG_STIH418_SBC_ASC0
default 0xf9e09000 if DEBUG_AM33XXUART1
default 0xfa020000 if DEBUG_OMAP4UART3 || DEBUG_TI81XXUART1
default 0xfa022000 if DEBUG_TI81XXUART2
@@ -1842,15 +1772,15 @@ config DEBUG_UART_VIRT
default 0xfb020000 if DEBUG_OMAP3UART3
default 0xfb042000 if DEBUG_OMAP3UART4
default 0xfb10c000 if DEBUG_REALVIEW_PB1176_PORT
- default 0xfc705000 if DEBUG_ZTE_ZX
default 0xfcfe8600 if DEBUG_BCM63XX_UART
default 0xfd000000 if DEBUG_SPEAR3XX || DEBUG_SPEAR13XX
+ default 0xfd531000 if DEBUG_STIH41X_SBC_ASC1
default 0xfd883000 if DEBUG_ALPINE_UART0
+ default 0xfdd32000 if DEBUG_STIH41X_ASC2
default 0xfe010000 if STM32MP1_DEBUG_UART
default 0xfe017000 if DEBUG_MMP_UART2
default 0xfe018000 if DEBUG_MMP_UART3
default 0xfe100000 if DEBUG_IMX23_UART || DEBUG_IMX28_UART
- default 0xfe230000 if DEBUG_PICOXCELL_UART
default 0xfe300000 if DEBUG_BCM_KONA_UART
default 0xfe800000 if ARCH_IOP32X
default 0xfeb00000 if DEBUG_HI3620_UART || DEBUG_HIX5HD2_UART
@@ -1863,10 +1793,7 @@ config DEBUG_UART_VIRT
default 0xfec03000 if DEBUG_SOCFPGA_CYCLONE5_UART1
default 0xfec12000 if DEBUG_MVEBU_UART0 || DEBUG_MVEBU_UART0_ALTERNATE
default 0xfec12100 if DEBUG_MVEBU_UART1_ALTERNATE
- default 0xfec10000 if DEBUG_SIRFATLAS7_UART0
default 0xfec20000 if DEBUG_DAVINCI_DMx_UART0
- default 0xfec20000 if DEBUG_SIRFATLAS7_UART1
- default 0xfec60000 if DEBUG_SIRFPRIMA2_UART1
default 0xfec90000 if DEBUG_RK32_UART2
default 0xfed0c000 if DEBUG_DAVINCI_DA8XX_UART1
default 0xfed0d000 if DEBUG_DAVINCI_DA8XX_UART2 || DEBUG_SD5203_UART
@@ -1882,7 +1809,6 @@ config DEBUG_UART_VIRT
default 0xfefb0000 if DEBUG_OMAP1UART1 || DEBUG_OMAP7XXUART1
default 0xfefb0800 if DEBUG_OMAP1UART2 || DEBUG_OMAP7XXUART2
default 0xfefb9800 if DEBUG_OMAP1UART3 || DEBUG_OMAP7XXUART3
- default 0xff003000 if DEBUG_U300_UART
default 0xffd01000 if DEBUG_HIP01_UART
default DEBUG_UART_PHYS if !MMU
depends on DEBUG_LL_UART_8250 || DEBUG_LL_UART_PL01X || \
@@ -1890,8 +1816,10 @@ config DEBUG_UART_VIRT
DEBUG_QCOM_UARTDM || DEBUG_S3C24XX_UART || \
DEBUG_S3C64XX_UART || \
DEBUG_BCM63XX_UART || DEBUG_ASM9260_UART || \
- DEBUG_SIRFSOC_UART || DEBUG_DIGICOLOR_UA0 || \
- DEBUG_AT91_UART || DEBUG_STM32_UART
+ DEBUG_DIGICOLOR_UA0 || \
+ DEBUG_AT91_UART || DEBUG_STM32_UART || \
+ DEBUG_STIH41X_ASC2 || DEBUG_STIH41X_SBC_ASC1 || \
+ DEBUG_STIH418_SBC_ASC0
config DEBUG_UART_8250_SHIFT
int "Register offset shift for the 8250 debug UART"
@@ -1905,8 +1833,7 @@ config DEBUG_UART_8250_WORD
bool "Use 32-bit accesses for 8250 UART"
depends on DEBUG_LL_UART_8250 || DEBUG_UART_8250
depends on DEBUG_UART_8250_SHIFT >= 2
- default y if DEBUG_PICOXCELL_UART || \
- DEBUG_SOCFPGA_UART0 || DEBUG_SOCFPGA_ARRIA10_UART1 || \
+ default y if DEBUG_SOCFPGA_UART0 || DEBUG_SOCFPGA_ARRIA10_UART1 || \
DEBUG_SOCFPGA_CYCLONE5_UART1 || DEBUG_KEYSTONE_UART0 || \
DEBUG_KEYSTONE_UART1 || DEBUG_ALPINE_UART0 || \
DEBUG_DAVINCI_DMx_UART0 || DEBUG_DAVINCI_DA8XX_UART1 || \
@@ -1926,7 +1853,7 @@ config DEBUG_UNCOMPRESS
depends on ARCH_MULTIPLATFORM || PLAT_SAMSUNG || ARM_SINGLE_ARMV7M
depends on DEBUG_LL && !DEBUG_OMAP2PLUS_UART && \
(!DEBUG_TEGRA_UART || !ZBOOT_ROM) && \
- !DEBUG_BRCMSTB_UART
+ !DEBUG_BRCMSTB_UART && !DEBUG_SEMIHOSTING
help
This option influences the normal decompressor output for
multiplatform kernels. Normally, multiplatform kernels disable
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 4aaec9599e8a..dad5502ecc28 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -168,7 +168,6 @@ machine-$(CONFIG_ARCH_CNS3XXX) += cns3xxx
machine-$(CONFIG_ARCH_DAVINCI) += davinci
machine-$(CONFIG_ARCH_DIGICOLOR) += digicolor
machine-$(CONFIG_ARCH_DOVE) += dove
-machine-$(CONFIG_ARCH_EFM32) += efm32
machine-$(CONFIG_ARCH_EP93XX) += ep93xx
machine-$(CONFIG_ARCH_EXYNOS) += exynos
machine-$(CONFIG_ARCH_FOOTBRIDGE) += footbridge
@@ -199,7 +198,6 @@ machine-$(CONFIG_ARCH_OXNAS) += oxnas
machine-$(CONFIG_ARCH_OMAP1) += omap1
machine-$(CONFIG_ARCH_OMAP2PLUS) += omap2
machine-$(CONFIG_ARCH_ORION5X) += orion5x
-machine-$(CONFIG_ARCH_PICOXCELL) += picoxcell
machine-$(CONFIG_ARCH_PXA) += pxa
machine-$(CONFIG_ARCH_QCOM) += qcom
machine-$(CONFIG_ARCH_RDA) += rda
@@ -211,19 +209,15 @@ machine-$(CONFIG_PLAT_SAMSUNG) += s3c
machine-$(CONFIG_ARCH_S5PV210) += s5pv210
machine-$(CONFIG_ARCH_SA1100) += sa1100
machine-$(CONFIG_ARCH_RENESAS) += shmobile
-machine-$(CONFIG_ARCH_SIRF) += prima2
machine-$(CONFIG_ARCH_SOCFPGA) += socfpga
machine-$(CONFIG_ARCH_STI) += sti
machine-$(CONFIG_ARCH_STM32) += stm32
machine-$(CONFIG_ARCH_SUNXI) += sunxi
-machine-$(CONFIG_ARCH_TANGO) += tango
machine-$(CONFIG_ARCH_TEGRA) += tegra
-machine-$(CONFIG_ARCH_U300) += u300
machine-$(CONFIG_ARCH_U8500) += ux500
machine-$(CONFIG_ARCH_VERSATILE) += versatile
machine-$(CONFIG_ARCH_VEXPRESS) += vexpress
machine-$(CONFIG_ARCH_VT8500) += vt8500
-machine-$(CONFIG_ARCH_ZX) += zx
machine-$(CONFIG_ARCH_ZYNQ) += zynq
machine-$(CONFIG_PLAT_SPEAR) += spear
@@ -266,8 +260,6 @@ core-y += $(machdirs) $(platdirs)
core- += $(patsubst %,arch/arm/mach-%/, $(machine-))
core- += $(patsubst %,arch/arm/plat-%/, $(plat-))
-drivers-$(CONFIG_OPROFILE) += arch/arm/oprofile/
-
libs-y := arch/arm/lib/ $(libs-y)
# Default target when executing plain make
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index fb521efcc6c2..fd94e27ba4fa 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -87,10 +87,13 @@ libfdt_objs := fdt_rw.o fdt_ro.o fdt_wip.o fdt.o
ifeq ($(CONFIG_ARM_ATAG_DTB_COMPAT),y)
OBJS += $(libfdt_objs) atags_to_fdt.o
endif
+ifeq ($(CONFIG_USE_OF),y)
+OBJS += $(libfdt_objs) fdt_check_mem_start.o
+endif
# -fstack-protector-strong triggers protection checks in this code,
# but it is being used too early to link to meaningful stack_chk logic.
-$(foreach o, $(libfdt_objs) atags_to_fdt.o, \
+$(foreach o, $(libfdt_objs) atags_to_fdt.o fdt_check_mem_start.o, \
$(eval CFLAGS_$(o) := -I $(srctree)/scripts/dtc/libfdt -fno-stack-protector))
# These were previously generated C files. When you are building the kernel
diff --git a/arch/arm/boot/compressed/atags_to_fdt.c b/arch/arm/boot/compressed/atags_to_fdt.c
index 8452753efebe..31927d2fe297 100644
--- a/arch/arm/boot/compressed/atags_to_fdt.c
+++ b/arch/arm/boot/compressed/atags_to_fdt.c
@@ -15,7 +15,8 @@ static int node_offset(void *fdt, const char *node_path)
{
int offset = fdt_path_offset(fdt, node_path);
if (offset == -FDT_ERR_NOTFOUND)
- offset = fdt_add_subnode(fdt, 0, node_path);
+ /* Add the node to root if not found, dropping the leading '/' */
+ offset = fdt_add_subnode(fdt, 0, node_path + 1);
return offset;
}
diff --git a/arch/arm/boot/compressed/fdt_check_mem_start.c b/arch/arm/boot/compressed/fdt_check_mem_start.c
new file mode 100644
index 000000000000..62450d824c3c
--- /dev/null
+++ b/arch/arm/boot/compressed/fdt_check_mem_start.c
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/kernel.h>
+#include <linux/libfdt.h>
+#include <linux/sizes.h>
+
+static const void *get_prop(const void *fdt, const char *node_path,
+ const char *property, int minlen)
+{
+ const void *prop;
+ int offset, len;
+
+ offset = fdt_path_offset(fdt, node_path);
+ if (offset < 0)
+ return NULL;
+
+ prop = fdt_getprop(fdt, offset, property, &len);
+ if (!prop || len < minlen)
+ return NULL;
+
+ return prop;
+}
+
+static uint32_t get_cells(const void *fdt, const char *name)
+{
+ const fdt32_t *prop = get_prop(fdt, "/", name, sizeof(fdt32_t));
+
+ if (!prop) {
+ /* default */
+ return 1;
+ }
+
+ return fdt32_ld(prop);
+}
+
+static uint64_t get_val(const fdt32_t *cells, uint32_t ncells)
+{
+ uint64_t r;
+
+ r = fdt32_ld(cells);
+ if (ncells > 1)
+ r = (r << 32) | fdt32_ld(cells + 1);
+
+ return r;
+}
+
+/*
+ * Check the start of physical memory
+ *
+ * Traditionally, the start address of physical memory is obtained by masking
+ * the program counter. However, this does require that this address is a
+ * multiple of 128 MiB, precluding booting Linux on platforms where this
+ * requirement is not fulfilled.
+ * Hence validate the calculated address against the memory information in the
+ * DTB, and, if out-of-range, replace it by the real start address.
+ * To preserve backwards compatibility (systems reserving a block of memory
+ * at the start of physical memory, kdump, ...), the traditional method is
+ * always used if it yields a valid address.
+ *
+ * Return value: start address of physical memory to use
+ */
+uint32_t fdt_check_mem_start(uint32_t mem_start, const void *fdt)
+{
+ uint32_t addr_cells, size_cells, base;
+ uint32_t fdt_mem_start = 0xffffffff;
+ const fdt32_t *reg, *endp;
+ uint64_t size, end;
+ const char *type;
+ int offset, len;
+
+ if (!fdt)
+ return mem_start;
+
+ if (fdt_magic(fdt) != FDT_MAGIC)
+ return mem_start;
+
+ /* There may be multiple cells on LPAE platforms */
+ addr_cells = get_cells(fdt, "#address-cells");
+ size_cells = get_cells(fdt, "#size-cells");
+ if (addr_cells > 2 || size_cells > 2)
+ return mem_start;
+
+ /* Walk all memory nodes and regions */
+ for (offset = fdt_next_node(fdt, -1, NULL); offset >= 0;
+ offset = fdt_next_node(fdt, offset, NULL)) {
+ type = fdt_getprop(fdt, offset, "device_type", NULL);
+ if (!type || strcmp(type, "memory"))
+ continue;
+
+ reg = fdt_getprop(fdt, offset, "linux,usable-memory", &len);
+ if (!reg)
+ reg = fdt_getprop(fdt, offset, "reg", &len);
+ if (!reg)
+ continue;
+
+ for (endp = reg + (len / sizeof(fdt32_t));
+ endp - reg >= addr_cells + size_cells;
+ reg += addr_cells + size_cells) {
+ size = get_val(reg + addr_cells, size_cells);
+ if (!size)
+ continue;
+
+ if (addr_cells > 1 && fdt32_ld(reg)) {
+ /* Outside 32-bit address space, skipping */
+ continue;
+ }
+
+ base = fdt32_ld(reg + addr_cells - 1);
+ end = base + size;
+ if (mem_start >= base && mem_start < end) {
+ /* Calculated address is valid, use it */
+ return mem_start;
+ }
+
+ if (base < fdt_mem_start)
+ fdt_mem_start = base;
+ }
+ }
+
+ if (fdt_mem_start == 0xffffffff) {
+ /* No usable memory found, falling back to default */
+ return mem_start;
+ }
+
+ /*
+ * The calculated address is not usable.
+ * Use the lowest usable physical memory address from the DTB instead,
+ * and make sure this is a multiple of 2 MiB for phys/virt patching.
+ */
+ return round_up(fdt_mem_start, SZ_2M);
+}
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index d9cce7238a36..b1cb1972361b 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -174,10 +174,7 @@
.macro be32tocpu, val, tmp
#ifndef __ARMEB__
/* convert to little endian */
- eor \tmp, \val, \val, ror #16
- bic \tmp, \tmp, #0x00ff0000
- mov \val, \val, ror #8
- eor \val, \val, \tmp, lsr #8
+ rev_l \val, \tmp
#endif
.endm
@@ -282,10 +279,40 @@ not_angel:
* are already placing their zImage in (eg) the top 64MB
* of this range.
*/
- mov r4, pc
- and r4, r4, #0xf8000000
+ mov r0, pc
+ and r0, r0, #0xf8000000
+#ifdef CONFIG_USE_OF
+ adr r1, LC1
+#ifdef CONFIG_ARM_APPENDED_DTB
+ /*
+ * Look for an appended DTB. If found, we cannot use it to
+ * validate the calculated start of physical memory, as its
+ * memory nodes may need to be augmented by ATAGS stored at
+ * an offset from the same start of physical memory.
+ */
+ ldr r2, [r1, #4] @ get &_edata
+ add r2, r2, r1 @ relocate it
+ ldr r2, [r2] @ get DTB signature
+ ldr r3, =OF_DT_MAGIC
+ cmp r2, r3 @ do we have a DTB there?
+ beq 1f @ if yes, skip validation
+#endif /* CONFIG_ARM_APPENDED_DTB */
+
+ /*
+ * Make sure we have some stack before calling C code.
+ * No GOT fixup has occurred yet, but none of the code we're
+ * about to call uses any global variables.
+ */
+ ldr sp, [r1] @ get stack location
+ add sp, sp, r1 @ apply relocation
+
+ /* Validate calculated start against passed DTB */
+ mov r1, r8
+ bl fdt_check_mem_start
+1:
+#endif /* CONFIG_USE_OF */
/* Determine final kernel image address. */
- add r4, r4, #TEXT_OFFSET
+ add r4, r0, #TEXT_OFFSET
#else
ldr r4, =zreladdr
#endif
@@ -1164,9 +1191,9 @@ __armv4_mmu_cache_off:
__armv7_mmu_cache_off:
mrc p15, 0, r0, c1, c0
#ifdef CONFIG_MMU
- bic r0, r0, #0x000d
+ bic r0, r0, #0x0005
#else
- bic r0, r0, #0x000c
+ bic r0, r0, #0x0004
#endif
mcr p15, 0, r0, c1, c0 @ turn MMU and cache off
mov r0, #0
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index 3d1ea0b25168..8e5d4ab4e75e 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -74,10 +74,6 @@ dtb-$(CONFIG_SOC_SAM_V7) += \
at91-sama5d4_xplained.dtb \
at91-sama5d4ek.dtb \
at91-vinco.dtb
-dtb-$(CONFIG_ARCH_ATLAS6) += \
- atlas6-evb.dtb
-dtb-$(CONFIG_ARCH_ATLAS7) += \
- atlas7-evb.dtb
dtb-$(CONFIG_ARCH_AXXIA) += \
axm5516-amarillo.dtb
dtb-$(CONFIG_ARCH_BCM2835) += \
@@ -177,8 +173,6 @@ dtb-$(CONFIG_ARCH_DAVINCI) += \
da850-lego-ev3.dtb
dtb-$(CONFIG_ARCH_DIGICOLOR) += \
cx92755_equinox.dtb
-dtb-$(CONFIG_ARCH_EFM32) += \
- efm32gg-dk3750.dtb
dtb-$(CONFIG_ARCH_EXYNOS3) += \
exynos3250-artik5-eval.dtb \
exynos3250-monk.dtb \
@@ -465,6 +459,9 @@ dtb-$(CONFIG_SOC_IMX6Q) += \
imx6dl-pico-hobbit.dtb \
imx6dl-pico-nymph.dtb \
imx6dl-pico-pi.dtb \
+ imx6dl-plybas.dtb \
+ imx6dl-plym2m.dtb \
+ imx6dl-prtmvt.dtb \
imx6dl-prtrvt.dtb \
imx6dl-prtvt7.dtb \
imx6dl-rex-basic.dtb \
@@ -487,6 +484,8 @@ dtb-$(CONFIG_SOC_IMX6Q) += \
imx6dl-tx6u-811x.dtb \
imx6dl-tx6u-81xx-mb7.dtb \
imx6dl-udoo.dtb \
+ imx6dl-victgo.dtb \
+ imx6dl-vicut1.dtb \
imx6dl-wandboard.dtb \
imx6dl-wandboard-revb1.dtb \
imx6dl-wandboard-revd1.dtb \
@@ -580,6 +579,7 @@ dtb-$(CONFIG_SOC_IMX6Q) += \
imx6q-udoo.dtb \
imx6q-utilite-pro.dtb \
imx6q-var-dt6customboard.dtb \
+ imx6q-vicut1.dtb \
imx6q-wandboard.dtb \
imx6q-wandboard-revb1.dtb \
imx6q-wandboard-revd1.dtb \
@@ -594,6 +594,7 @@ dtb-$(CONFIG_SOC_IMX6Q) += \
imx6qp-tx6qp-8037-mb7.dtb \
imx6qp-tx6qp-8137.dtb \
imx6qp-tx6qp-8137-mb7.dtb \
+ imx6qp-vicutp.dtb \
imx6qp-wandboard-revd1.dtb \
imx6qp-zii-rdu2.dtb
dtb-$(CONFIG_SOC_IMX6SL) += \
@@ -631,6 +632,7 @@ dtb-$(CONFIG_SOC_IMX6UL) += \
imx6ul-pico-pi.dtb \
imx6ul-phytec-segin-ff-rdk-emmc.dtb \
imx6ul-phytec-segin-ff-rdk-nand.dtb \
+ imx6ul-prti6g.dtb \
imx6ul-tx6ul-0010.dtb \
imx6ul-tx6ul-0011.dtb \
imx6ul-tx6ul-mainboard.dtb \
@@ -817,6 +819,7 @@ dtb-$(CONFIG_SOC_AM33XX) += \
am335x-lxm.dtb \
am335x-moxa-uc-2101.dtb \
am335x-moxa-uc-8100-me-t.dtb \
+ am335x-myirtech-myd.dtb \
am335x-nano.dtb \
am335x-netcan-plus-1xx.dtb \
am335x-netcom-plus-2xx.dtb \
@@ -888,11 +891,6 @@ dtb-$(CONFIG_ARCH_ACTIONS) += \
owl-s500-labrador-base-m.dtb \
owl-s500-roseapplepi.dtb \
owl-s500-sparky.dtb
-dtb-$(CONFIG_ARCH_PICOXCELL) += \
- picoxcell-pc7302-pc3x2.dtb \
- picoxcell-pc7302-pc3x3.dtb
-dtb-$(CONFIG_ARCH_PRIMA2) += \
- prima2-evb.dtb
dtb-$(CONFIG_ARCH_PXA) += \
pxa300-raumfeld-connector.dtb \
pxa300-raumfeld-controller.dtb \
@@ -912,6 +910,9 @@ dtb-$(CONFIG_ARCH_QCOM) += \
qcom-apq8074-dragonboard.dtb \
qcom-apq8084-ifc6540.dtb \
qcom-apq8084-mtp.dtb \
+ qcom-ipq4018-ap120c-ac.dtb \
+ qcom-ipq4018-ap120c-ac-bit.dtb \
+ qcom-ipq4018-jalapeno.dtb \
qcom-ipq4019-ap.dk01.1-c1.dtb \
qcom-ipq4019-ap.dk04.1-c1.dtb \
qcom-ipq4019-ap.dk04.1-c3.dtb \
@@ -927,7 +928,8 @@ dtb-$(CONFIG_ARCH_QCOM) += \
qcom-msm8974-sony-xperia-amami.dtb \
qcom-msm8974-sony-xperia-castor.dtb \
qcom-msm8974-sony-xperia-honami.dtb \
- qcom-mdm9615-wp8548-mangoh-green.dtb
+ qcom-mdm9615-wp8548-mangoh-green.dtb \
+ qcom-sdx55-mtp.dtb
dtb-$(CONFIG_ARCH_RDA) += \
rda8810pl-orangepi-2g-iot.dtb \
rda8810pl-orangepi-i96.dtb
@@ -1224,6 +1226,7 @@ dtb-$(CONFIG_MACH_SUN8I) += \
sun8i-s3-lichee-zero-plus.dtb \
sun8i-s3-pinecube.dtb \
sun8i-t3-cqa3t-bv3.dtb \
+ sun8i-v3-sl631-imx179.dtb \
sun8i-v3s-licheepi-zero.dtb \
sun8i-v3s-licheepi-zero-dock.dtb \
sun8i-v40-bananapi-m2-berry.dtb
@@ -1232,8 +1235,6 @@ dtb-$(CONFIG_MACH_SUN9I) += \
sun9i-a80-cubieboard4.dtb
dtb-$(CONFIG_MACH_SUNIV) += \
suniv-f1c100s-licheepi-nano.dtb
-dtb-$(CONFIG_ARCH_TANGO) += \
- tango4-vantage-1172.dtb
dtb-$(CONFIG_ARCH_TEGRA_2x_SOC) += \
tegra20-acer-a500-picasso.dtb \
tegra20-harmony.dtb \
@@ -1268,8 +1269,6 @@ dtb-$(CONFIG_ARCH_TEGRA_124_SOC) += \
tegra124-nyan-big.dtb \
tegra124-nyan-blaze.dtb \
tegra124-venice2.dtb
-dtb-$(CONFIG_ARCH_U300) += \
- ste-u300.dtb
dtb-$(CONFIG_ARCH_U8500) += \
ste-snowball.dtb \
ste-hrefprev60-stuib.dtb \
@@ -1278,6 +1277,7 @@ dtb-$(CONFIG_ARCH_U8500) += \
ste-hrefv60plus-tvk.dtb \
ste-href520-tvk.dtb \
ste-ux500-samsung-golden.dtb \
+ ste-ux500-samsung-janice.dtb \
ste-ux500-samsung-skomer.dtb
dtb-$(CONFIG_ARCH_UNIPHIER) += \
uniphier-ld4-ref.dtb \
@@ -1307,6 +1307,7 @@ dtb-$(CONFIG_ARCH_VT8500) += \
wm8850-w70v2.dtb
dtb-$(CONFIG_ARCH_ZYNQ) += \
zynq-cc108.dtb \
+ zynq-ebaz4205.dtb \
zynq-microzed.dtb \
zynq-parallella.dtb \
zynq-zc702.dtb \
@@ -1398,11 +1399,11 @@ dtb-$(CONFIG_ARCH_MSTARV7) += \
mstar-infinity2m-ssd202d-ssd201htv2.dtb \
mstar-infinity3-msc313e-breadbee.dtb \
mstar-mercury5-ssc8336n-midrived08.dtb
-dtb-$(CONFIG_ARCH_ZX) += zx296702-ad1.dtb
dtb-$(CONFIG_ARCH_ASPEED) += \
aspeed-ast2500-evb.dtb \
aspeed-ast2600-evb.dtb \
aspeed-bmc-amd-ethanolx.dtb \
+ aspeed-bmc-ampere-mtjade.dtb \
aspeed-bmc-arm-centriq2400-rep.dtb \
aspeed-bmc-arm-stardragon4800-rep2.dtb \
aspeed-bmc-bytedance-g220a.dtb \
@@ -1415,6 +1416,7 @@ dtb-$(CONFIG_ARCH_ASPEED) += \
aspeed-bmc-facebook-wedge400.dtb \
aspeed-bmc-facebook-yamp.dtb \
aspeed-bmc-facebook-yosemitev2.dtb \
+ aspeed-bmc-ibm-everest.dtb \
aspeed-bmc-ibm-rainier.dtb \
aspeed-bmc-ibm-rainier-4u.dtb \
aspeed-bmc-intel-s2600wf.dtb \
@@ -1434,4 +1436,5 @@ dtb-$(CONFIG_ARCH_ASPEED) += \
aspeed-bmc-opp-witherspoon.dtb \
aspeed-bmc-opp-zaius.dtb \
aspeed-bmc-portwell-neptune.dtb \
- aspeed-bmc-quanta-q71l.dtb
+ aspeed-bmc-quanta-q71l.dtb \
+ aspeed-bmc-supermicro-x11spi.dtb
diff --git a/arch/arm/boot/dts/am335x-evm.dts b/arch/arm/boot/dts/am335x-evm.dts
index 7c6f2c11f0e1..902e295b309e 100644
--- a/arch/arm/boot/dts/am335x-evm.dts
+++ b/arch/arm/boot/dts/am335x-evm.dts
@@ -684,28 +684,31 @@
};
};
-&mac {
+&mac_sw {
pinctrl-names = "default", "sleep";
pinctrl-0 = <&cpsw_default>;
pinctrl-1 = <&cpsw_sleep>;
status = "okay";
- slaves = <1>;
};
-&davinci_mdio {
+&davinci_mdio_sw {
pinctrl-names = "default", "sleep";
pinctrl-0 = <&davinci_mdio_default>;
pinctrl-1 = <&davinci_mdio_sleep>;
- status = "okay";
ethphy0: ethernet-phy@0 {
reg = <0>;
};
};
-&cpsw_emac0 {
+&cpsw_port1 {
phy-handle = <&ethphy0>;
phy-mode = "rgmii-id";
+ ti,dual-emac-pvid = <1>;
+};
+
+&cpsw_port2 {
+ status = "disabled";
};
&tscadc {
diff --git a/arch/arm/boot/dts/am335x-evmsk.dts b/arch/arm/boot/dts/am335x-evmsk.dts
index b43b94122d3c..d5f8d5e2eb5d 100644
--- a/arch/arm/boot/dts/am335x-evmsk.dts
+++ b/arch/arm/boot/dts/am335x-evmsk.dts
@@ -596,19 +596,17 @@
};
};
-&mac {
+&mac_sw {
pinctrl-names = "default", "sleep";
pinctrl-0 = <&cpsw_default>;
pinctrl-1 = <&cpsw_sleep>;
- dual_emac = <1>;
status = "okay";
};
-&davinci_mdio {
+&davinci_mdio_sw {
pinctrl-names = "default", "sleep";
pinctrl-0 = <&davinci_mdio_default>;
pinctrl-1 = <&davinci_mdio_sleep>;
- status = "okay";
ethphy0: ethernet-phy@0 {
reg = <0>;
@@ -619,16 +617,16 @@
};
};
-&cpsw_emac0 {
+&cpsw_port1 {
phy-handle = <&ethphy0>;
phy-mode = "rgmii-id";
- dual_emac_res_vlan = <1>;
+ ti,dual-emac-pvid = <1>;
};
-&cpsw_emac1 {
+&cpsw_port2 {
phy-handle = <&ethphy1>;
phy-mode = "rgmii-id";
- dual_emac_res_vlan = <2>;
+ ti,dual-emac-pvid = <2>;
};
&mmc1 {
diff --git a/arch/arm/boot/dts/am335x-icev2.dts b/arch/arm/boot/dts/am335x-icev2.dts
index b958ab56a412..e923d065304d 100644
--- a/arch/arm/boot/dts/am335x-icev2.dts
+++ b/arch/arm/boot/dts/am335x-icev2.dts
@@ -474,31 +474,29 @@
};
};
-&cpsw_emac0 {
+&cpsw_port1 {
phy-handle = <&ethphy0>;
phy-mode = "rmii";
- dual_emac_res_vlan = <1>;
+ ti,dual-emac-pvid = <1>;
};
-&cpsw_emac1 {
+&cpsw_port2 {
phy-handle = <&ethphy1>;
phy-mode = "rmii";
- dual_emac_res_vlan = <2>;
+ ti,dual-emac-pvid = <2>;
};
-&mac {
+&mac_sw {
pinctrl-names = "default", "sleep";
pinctrl-0 = <&cpsw_default>;
pinctrl-1 = <&cpsw_sleep>;
status = "okay";
- dual_emac;
};
-&davinci_mdio {
+&davinci_mdio_sw {
pinctrl-names = "default", "sleep";
pinctrl-0 = <&davinci_mdio_default>;
pinctrl-1 = <&davinci_mdio_sleep>;
- status = "okay";
reset-gpios = <&gpio2 5 GPIO_ACTIVE_LOW>;
reset-delay-us = <2>; /* PHY datasheet states 1uS min */
diff --git a/arch/arm/boot/dts/am335x-myirtech-myc.dtsi b/arch/arm/boot/dts/am335x-myirtech-myc.dtsi
new file mode 100644
index 000000000000..270a3d5e8f98
--- /dev/null
+++ b/arch/arm/boot/dts/am335x-myirtech-myc.dtsi
@@ -0,0 +1,267 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* SPDX-FileCopyrightText: Alexander Shiyan, <shc_work@mail.ru> */
+
+/* Based on code by myc_c335x.dts, MYiRtech.com */
+/* Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/ */
+
+/dts-v1/;
+
+#include "am33xx.dtsi"
+
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/leds/common.h>
+
+/ {
+ model = "MYIR MYC-AM335X";
+ compatible = "myir,myc-am335x", "ti,am33xx";
+
+ cpus {
+ cpu@0 {
+ cpu0-supply = <&vdd_core>;
+ voltage-tolerance = <2>;
+ };
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0x80000000 0x10000000>;
+ };
+
+ vdd_mod: vdd_mod_reg {
+ compatible = "regulator-fixed";
+ regulator-name = "vdd-mod";
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vdd_core: vdd_core_reg {
+ compatible = "regulator-fixed";
+ regulator-name = "vdd-core";
+ regulator-always-on;
+ regulator-boot-on;
+ vin-supply = <&vdd_mod>;
+ };
+
+ leds: leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&led_mod_pins>;
+
+ led_mod: led_mod {
+ label = "module:user";
+ gpios = <&gpio3 18 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_GREEN>;
+ default-state = "off";
+ panic-indicator;
+ };
+ };
+};
+
+&cpsw_emac0 {
+ phy-handle = <&phy0>;
+ phy-mode = "rgmii-id";
+};
+
+&davinci_mdio {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&mdio_pins_default>;
+ pinctrl-1 = <&mdio_pins_sleep>;
+ status = "okay";
+
+ phy0: ethernet-phy@4 {
+ reg = <4>;
+ };
+};
+
+&elm {
+ status = "okay";
+};
+
+&gpmc {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&nand_pins_default>;
+ pinctrl-1 = <&nand_pins_sleep>;
+ ranges = <0 0 0x8000000 0x1000000>;
+ status = "okay";
+
+ nand0: nand@0,0 {
+ compatible = "ti,omap2-nand";
+ reg = <0 0 4>;
+ interrupt-parent = <&gpmc>;
+ interrupts = <0 IRQ_TYPE_NONE>, <1 IRQ_TYPE_NONE>;
+ nand-bus-width = <8>;
+ rb-gpios = <&gpmc 0 GPIO_ACTIVE_HIGH>;
+ gpmc,device-width = <1>;
+ gpmc,sync-clk-ps = <0>;
+ gpmc,cs-on-ns = <0>;
+ gpmc,cs-rd-off-ns = <44>;
+ gpmc,cs-wr-off-ns = <44>;
+ gpmc,adv-on-ns = <6>;
+ gpmc,adv-rd-off-ns = <34>;
+ gpmc,adv-wr-off-ns = <44>;
+ gpmc,we-on-ns = <0>;
+ gpmc,we-off-ns = <40>;
+ gpmc,oe-on-ns = <0>;
+ gpmc,oe-off-ns = <54>;
+ gpmc,access-ns = <64>;
+ gpmc,rd-cycle-ns = <82>;
+ gpmc,wr-cycle-ns = <82>;
+ gpmc,bus-turnaround-ns = <0>;
+ gpmc,cycle2cycle-delay-ns = <0>;
+ gpmc,clk-activation-ns = <0>;
+ gpmc,wr-access-ns = <40>;
+ gpmc,wr-data-mux-bus-ns = <0>;
+ ti,elm-id = <&elm>;
+ ti,nand-ecc-opt = "bch8";
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
+};
+
+&i2c0 {
+ pinctrl-names = "default", "gpio", "sleep";
+ pinctrl-0 = <&i2c0_pins_default>;
+ pinctrl-1 = <&i2c0_pins_gpio>;
+ pinctrl-2 = <&i2c0_pins_sleep>;
+ clock-frequency = <400000>;
+ scl-gpios = <&gpio3 6 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ sda-gpios = <&gpio3 5 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ status = "okay";
+
+ eeprom: eeprom@50 {
+ compatible = "atmel,24c32";
+ reg = <0x50>;
+ pagesize = <32>;
+ vcc-supply = <&vdd_mod>;
+ };
+};
+
+&mac {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&eth_slave1_pins_default>;
+ pinctrl-1 = <&eth_slave1_pins_sleep>;
+ slaves = <1>;
+ status = "okay";
+};
+
+&rtc {
+ system-power-controller;
+};
+
+&am33xx_pinmux {
+ mdio_pins_default: pinmux_mdio_pins_default {
+ pinctrl-single,pins = <
+ AM33XX_PADCONF(AM335X_PIN_MDIO, PIN_INPUT_PULLUP | SLEWCTRL_FAST, MUX_MODE0) /* mdio_data */
+ AM33XX_PADCONF(AM335X_PIN_MDC, PIN_OUTPUT_PULLUP, MUX_MODE0) /* mdio_clk */
+ >;
+ };
+
+ mdio_pins_sleep: pinmux_mdio_pins_sleep {
+ pinctrl-single,pins = <
+ AM33XX_PADCONF(AM335X_PIN_MDIO, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MDC, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ >;
+ };
+
+ eth_slave1_pins_default: pinmux_eth_slave1_pins_default {
+ pinctrl-single,pins = <
+ AM33XX_PADCONF(AM335X_PIN_MII1_TX_EN, PIN_OUTPUT_PULLDOWN, MUX_MODE2) /* rgmii1_tctl */
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_DV, PIN_INPUT_PULLDOWN, MUX_MODE2) /* rgmii1_rctl */
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD3, PIN_OUTPUT_PULLDOWN, MUX_MODE2) /* rgmii1_td3 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD2, PIN_OUTPUT_PULLDOWN, MUX_MODE2) /* rgmii1_td2 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD1, PIN_OUTPUT_PULLDOWN, MUX_MODE2) /* rgmii1_td1 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD0, PIN_OUTPUT_PULLDOWN, MUX_MODE2) /* rgmii1_td0 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_TX_CLK, PIN_OUTPUT_PULLDOWN, MUX_MODE2) /* rgmii1_tclk */
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_CLK, PIN_INPUT_PULLDOWN, MUX_MODE2) /* rgmii1_rclk */
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD3, PIN_INPUT_PULLDOWN, MUX_MODE2) /* rgmii1_rd3 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD2, PIN_INPUT_PULLDOWN, MUX_MODE2) /* rgmii1_rd2 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD1, PIN_INPUT_PULLDOWN, MUX_MODE2) /* rgmii1_rd1 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD0, PIN_INPUT_PULLDOWN, MUX_MODE2) /* rgmii1_rd0 */
+ >;
+ };
+
+ eth_slave1_pins_sleep: pinmux_eth_slave1_pins_sleep {
+ pinctrl-single,pins = <
+ AM33XX_PADCONF(AM335X_PIN_MII1_TX_EN, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_DV, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD3, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD2, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD1, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD0, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TX_CLK, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_CLK, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD3, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD2, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD1, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD0, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ >;
+ };
+
+ i2c0_pins_default: pinmux_i2c0_pins_default {
+ pinctrl-single,pins = <
+ AM33XX_PADCONF(AM335X_PIN_I2C0_SDA, PIN_INPUT | SLEWCTRL_FAST, MUX_MODE0) /* I2C0_SDA */
+ AM33XX_PADCONF(AM335X_PIN_I2C0_SCL, PIN_INPUT | SLEWCTRL_FAST, MUX_MODE0) /* I2C0_SCL */
+ >;
+ };
+
+ i2c0_pins_gpio: pinmux_i2c0_pins_gpio {
+ pinctrl-single,pins = <
+ AM33XX_PADCONF(AM335X_PIN_I2C0_SDA, PIN_INPUT, MUX_MODE7) /* gpio3[5] */
+ AM33XX_PADCONF(AM335X_PIN_I2C0_SCL, PIN_INPUT, MUX_MODE7) /* gpio3[6] */
+ >;
+ };
+
+ i2c0_pins_sleep: pinmux_i2c0_pins_sleep {
+ pinctrl-single,pins = <
+ AM33XX_PADCONF(AM335X_PIN_I2C0_SDA, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_I2C0_SCL, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ >;
+ };
+
+ led_mod_pins: pinmux_led_mod_pins {
+ pinctrl-single,pins = <
+ AM33XX_PADCONF(AM335X_PIN_MCASP0_ACLKR, PIN_OUTPUT_PULLDOWN, MUX_MODE7) /* gpio3[18] */
+ >;
+ };
+
+ nand_pins_default: pinmux_nand_pins_default {
+ pinctrl-single,pins = <
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD0, PIN_INPUT_PULLUP, MUX_MODE0) /* gpmc_ad0 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD1, PIN_INPUT_PULLUP, MUX_MODE0) /* gpmc_ad1 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD2, PIN_INPUT_PULLUP, MUX_MODE0) /* gpmc_ad2 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD3, PIN_INPUT_PULLUP, MUX_MODE0) /* gpmc_ad3 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD4, PIN_INPUT_PULLUP, MUX_MODE0) /* gpmc_ad4 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD5, PIN_INPUT_PULLUP, MUX_MODE0) /* gpmc_ad5 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD6, PIN_INPUT_PULLUP, MUX_MODE0) /* gpmc_ad6 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD7, PIN_INPUT_PULLUP, MUX_MODE0) /* gpmc_ad7 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_WAIT0, PIN_INPUT_PULLUP, MUX_MODE0) /* gpmc_wait0 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_WPN, PIN_INPUT_PULLUP, MUX_MODE7) /* gpio0[31] */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_CSN0, PIN_OUTPUT, MUX_MODE0) /* gpmc_csn0 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_ADVN_ALE, PIN_OUTPUT, MUX_MODE0) /* gpmc_advn_ale */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_OEN_REN, PIN_OUTPUT, MUX_MODE0) /* gpmc_oen_ren */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_WEN, PIN_OUTPUT, MUX_MODE0) /* gpmc_wen */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_BEN0_CLE, PIN_OUTPUT, MUX_MODE0) /* gpmc_be0n_cle */
+ >;
+ };
+
+ nand_pins_sleep: pinmux_nand_pins_sleep {
+ pinctrl-single,pins = <
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD0, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD1, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD2, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD3, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD4, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD5, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD6, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD7, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_WAIT0, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_WPN, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_CSN0, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_ADVN_ALE, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_OEN_REN, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_WEN, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_BEN0_CLE, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ >;
+ };
+};
diff --git a/arch/arm/boot/dts/am335x-myirtech-myd.dts b/arch/arm/boot/dts/am335x-myirtech-myd.dts
new file mode 100644
index 000000000000..c996639874e6
--- /dev/null
+++ b/arch/arm/boot/dts/am335x-myirtech-myd.dts
@@ -0,0 +1,536 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* SPDX-FileCopyrightText: Alexander Shiyan, <shc_work@mail.ru> */
+/* Based on code by myd_c335x.dts, MYiRtech.com */
+/* Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/ */
+
+/dts-v1/;
+
+#include "am335x-myirtech-myc.dtsi"
+
+#include <dt-bindings/display/tda998x.h>
+#include <dt-bindings/input/input.h>
+
+/ {
+ model = "MYIR MYD-AM335X";
+ compatible = "myir,myd-am335x", "myir,myc-am335x", "ti,am33xx";
+
+ chosen {
+ stdout-path = &uart0;
+ };
+
+ clk12m: clk12m {
+ compatible = "fixed-clock";
+ clock-frequency = <12000000>;
+
+ #clock-cells = <0>;
+ };
+
+ gpio_buttons: gpio_buttons {
+ compatible = "gpio-keys";
+ pinctrl-names = "default";
+ pinctrl-0 = <&gpio_buttons_pins>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ button1: button@0 {
+ reg = <0>;
+ label = "button1";
+ linux,code = <BTN_1>;
+ gpios = <&gpio3 0 GPIO_ACTIVE_LOW>;
+ };
+
+ button2: button@1 {
+ reg = <1>;
+ label = "button2";
+ linux,code = <BTN_2>;
+ gpios = <&gpio0 29 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ sound: sound {
+ compatible = "simple-audio-card";
+ simple-audio-card,format = "i2s";
+ simple-audio-card,bitclock-master = <&master_codec>;
+ simple-audio-card,frame-master = <&master_codec>;
+
+ simple-audio-card,cpu {
+ sound-dai = <&mcasp0>;
+ };
+
+ master_codec: simple-audio-card,codec@1 {
+ sound-dai = <&sgtl5000>;
+ };
+
+ simple-audio-card,codec@2 {
+ sound-dai = <&tda9988>;
+ };
+ };
+
+ vdd_5v0: vdd_5v0_reg {
+ compatible = "regulator-fixed";
+ regulator-name = "vdd_5v0";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vdd_3v3: vdd_3v3_reg {
+ compatible = "regulator-fixed";
+ regulator-name = "vdd-3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+ vin-supply = <&vdd_5v0>;
+ };
+};
+
+&cpsw_emac1 {
+ phy-handle = <&phy1>;
+ phy-mode = "rgmii-id";
+};
+
+&davinci_mdio {
+ phy1: ethernet-phy@6 {
+ reg = <6>;
+ eee-broken-1000t;
+ };
+};
+
+&dcan0 {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&dcan0_pins_default>;
+ pinctrl-1 = <&dcan0_pins_sleep>;
+ status = "okay";
+};
+
+&dcan1 {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&dcan1_pins_default>;
+ pinctrl-1 = <&dcan1_pins_sleep>;
+ status = "okay";
+};
+
+&ehrpwm0 {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&ehrpwm0_pins_default>;
+ pinctrl-1 = <&ehrpwm0_pins_sleep>;
+ status = "okay";
+};
+
+&epwmss0 {
+ status = "okay";
+};
+
+&i2c1 {
+ pinctrl-names = "default", "gpio", "sleep";
+ pinctrl-0 = <&i2c1_pins_default>;
+ pinctrl-1 = <&i2c1_pins_gpio>;
+ pinctrl-2 = <&i2c1_pins_sleep>;
+ clock-frequency = <400000>;
+ scl-gpios = <&gpio0 5 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ sda-gpios = <&gpio0 4 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ status = "okay";
+
+ sgtl5000: sgtl5000@a {
+ compatible = "fsl,sgtl5000";
+ reg =<0xa>;
+ clocks = <&clk12m>;
+ micbias-resistor-k-ohms = <4>;
+ micbias-voltage-m-volts = <2250>;
+ VDDA-supply = <&vdd_3v3>;
+ VDDIO-supply = <&vdd_3v3>;
+
+ #sound-dai-cells = <0>;
+ };
+
+ tda9988: tda9988@70 {
+ compatible = "nxp,tda998x";
+ reg =<0x70>;
+ audio-ports = <TDA998x_I2S 1>;
+
+ #sound-dai-cells = <0>;
+
+ ports {
+ port@0 {
+ hdmi_0: endpoint@0 {
+ remote-endpoint = <&lcdc_0>;
+ };
+ };
+ };
+ };
+};
+
+&lcdc {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&lcdc_pins_default>;
+ pinctrl-1 = <&lcdc_pins_sleep>;
+ blue-and-red-wiring = "straight";
+ status = "okay";
+
+ port {
+ lcdc_0: endpoint@0 {
+ remote-endpoint = <&hdmi_0>;
+ };
+ };
+};
+
+&leds {
+ pinctrl-0 = <&led_mod_pins &leds_pins>;
+
+ led1: led1 {
+ label = "base:user1";
+ gpios = <&gpio0 27 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_GREEN>;
+ default-state = "off";
+ };
+
+ led2: led2 {
+ label = "base:user2";
+ gpios = <&gpio0 3 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_GREEN>;
+ default-state = "off";
+ };
+};
+
+&mac {
+ pinctrl-0 = <&eth_slave1_pins_default>, <&eth_slave2_pins_default>;
+ pinctrl-1 = <&eth_slave1_pins_sleep>, <&eth_slave2_pins_sleep>;
+ slaves = <2>;
+};
+
+&mcasp0 {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&mcasp0_pins_default>;
+ pinctrl-1 = <&mcasp0_pins_sleep>;
+ op-mode = <0>;
+ tdm-slots = <2>;
+ serial-dir = <0 1 2 0>;
+ tx-num-evt = <32>;
+ rx-num-evt = <32>;
+ status = "okay";
+
+ #sound-dai-cells = <0>;
+};
+
+&mmc1 {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&mmc1_pins_default>;
+ pinctrl-1 = <&mmc1_pins_sleep>;
+ cd-gpios = <&gpio3 21 GPIO_ACTIVE_LOW>;
+ bus-width = <4>;
+ vmmc-supply = <&vdd_3v3>;
+ status = "okay";
+};
+
+&nand0 {
+ partition@0 {
+ label = "MLO";
+ reg = <0x00000 0x20000>;
+ };
+
+ partition@20000 {
+ label = "boot";
+ reg = <0x20000 0x80000>;
+ };
+};
+
+&tscadc {
+ status = "okay";
+
+ adc: adc {
+ ti,adc-channels = <0 1 2 3 4 5 6>;
+ };
+};
+
+&uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_pins>;
+ status = "okay";
+};
+
+&uart1 {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&uart1_pins_default>;
+ pinctrl-1 = <&uart1_pins_sleep>;
+ linux,rs485-enabled-at-boot-time;
+ status = "okay";
+};
+
+&uart2 {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&uart2_pins_default>;
+ pinctrl-1 = <&uart2_pins_sleep>;
+ status = "okay";
+};
+
+&usb {
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb_pins>;
+};
+
+&usb0 {
+ dr_mode = "otg";
+};
+
+&usb0_phy {
+ vcc-supply = <&vdd_5v0>;
+};
+
+&usb1 {
+ dr_mode = "host";
+};
+
+&usb1_phy {
+ vcc-supply = <&vdd_5v0>;
+};
+
+&vdd_mod {
+ vin-supply = <&vdd_3v3>;
+};
+
+&am33xx_pinmux {
+ dcan0_pins_default: pinmux_dcan0_pins_default {
+ pinctrl-single,pins = <
+ AM33XX_PADCONF(AM335X_PIN_UART1_CTSN, PIN_OUTPUT, MUX_MODE2) /* dcan0_tx_mux2 */
+ AM33XX_PADCONF(AM335X_PIN_UART1_RTSN, PIN_INPUT, MUX_MODE2) /* dcan0_rx_mux2 */
+ >;
+ };
+
+ dcan0_pins_sleep: pinmux_dcan0_pins_sleep {
+ pinctrl-single,pins = <
+ AM33XX_PADCONF(AM335X_PIN_UART1_CTSN, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_UART1_RTSN, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ >;
+ };
+
+ dcan1_pins_default: pinmux_dcan1_pins_default {
+ pinctrl-single,pins = <
+ AM33XX_PADCONF(AM335X_PIN_UART0_CTSN, PIN_OUTPUT, MUX_MODE2) /* dcan1_tx_mux0 */
+ AM33XX_PADCONF(AM335X_PIN_UART0_RTSN, PIN_INPUT, MUX_MODE2) /* dcan1_rx_mux0 */
+ >;
+ };
+
+ dcan1_pins_sleep: pinmux_dcan1_pins_sleep {
+ pinctrl-single,pins = <
+ AM33XX_PADCONF(AM335X_PIN_UART0_CTSN, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_UART0_RTSN, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ >;
+ };
+
+ ehrpwm0_pins_default: pinmux_ehrpwm0_pins_default {
+ pinctrl-single,pins = <
+ AM33XX_PADCONF(AM335X_PIN_SPI0_SCLK, PIN_OUTPUT, MUX_MODE3) /* ehrpwm0A_mux1 */
+ >;
+ };
+
+ ehrpwm0_pins_sleep: pinmux_ehrpwm0_pins_sleep {
+ pinctrl-single,pins = <
+ AM33XX_PADCONF(AM335X_PIN_SPI0_SCLK, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ >;
+ };
+
+ eth_slave2_pins_default: pinmux_eth_slave2_pins_default {
+ pinctrl-single,pins = <
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A0, PIN_OUTPUT_PULLDOWN, MUX_MODE2) /* rgmii2_tctl */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A1, PIN_INPUT_PULLDOWN, MUX_MODE2) /* rgmii2_rctl */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A2, PIN_OUTPUT_PULLDOWN, MUX_MODE2) /* rgmii2_td3 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A3, PIN_OUTPUT_PULLDOWN, MUX_MODE2) /* rgmii2_td2 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A4, PIN_OUTPUT_PULLDOWN, MUX_MODE2) /* rgmii2_td1 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A5, PIN_OUTPUT_PULLDOWN, MUX_MODE2) /* rgmii2_td0 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A6, PIN_OUTPUT_PULLDOWN, MUX_MODE2) /* rgmii2_tclk */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A7, PIN_INPUT_PULLDOWN, MUX_MODE2) /* rgmii2_rclk */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A8, PIN_INPUT_PULLDOWN, MUX_MODE2) /* rgmii2_rd3 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A9, PIN_INPUT_PULLDOWN, MUX_MODE2) /* rgmii2_rd2 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A10, PIN_INPUT_PULLDOWN, MUX_MODE2 /* rgmii2_rd1 */)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A11, PIN_INPUT_PULLDOWN, MUX_MODE2 /* rgmii2_rd0 */)
+ >;
+ };
+
+ eth_slave2_pins_sleep: pinmux_eth_slave2_pins_sleep {
+ pinctrl-single,pins = <
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A0, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A1, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A2, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A3, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A4, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A5, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A6, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A7, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A8, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A9, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A10, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A11, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ >;
+ };
+
+ gpio_buttons_pins: pinmux_gpio_buttons_pins {
+ pinctrl-single,pins = <
+ AM33XX_PADCONF(AM335X_PIN_MII1_COL, PIN_INPUT_PULLDOWN, MUX_MODE7) /* gpio3[0] */
+ AM33XX_PADCONF(AM335X_PIN_RMII1_REF_CLK, PIN_INPUT, MUX_MODE7) /* gpio0[29] */
+ >;
+ };
+
+ i2c1_pins_default: pinmux_i2c1_pins_default {
+ pinctrl-single,pins = <
+ AM33XX_PADCONF(AM335X_PIN_SPI0_D1, PIN_INPUT | SLEWCTRL_FAST, MUX_MODE2) /* I2C1_SDA_mux3 */
+ AM33XX_PADCONF(AM335X_PIN_SPI0_CS0, PIN_INPUT | SLEWCTRL_FAST, MUX_MODE2) /* I2C1_SCL_mux3 */
+ >;
+ };
+
+ i2c1_pins_gpio: pinmux_i2c1_pins_gpio {
+ pinctrl-single,pins = <
+ AM33XX_PADCONF(AM335X_PIN_SPI0_D1, PIN_INPUT, MUX_MODE7) /* gpio0[4] */
+ AM33XX_PADCONF(AM335X_PIN_SPI0_CS0, PIN_INPUT, MUX_MODE7) /* gpio0[5] */
+ >;
+ };
+
+ i2c1_pins_sleep: pinmux_i2c1_pins_sleep {
+ pinctrl-single,pins = <
+ AM33XX_PADCONF(AM335X_PIN_SPI0_D1, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_SPI0_CS0, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ >;
+ };
+
+ lcdc_pins_default: pinmux_lcdc_pins_default {
+ pinctrl-single,pins = <
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA0, PIN_OUTPUT, MUX_MODE0) /* lcd_data0 */
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA1, PIN_OUTPUT, MUX_MODE0) /* lcd_data1 */
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA2, PIN_OUTPUT, MUX_MODE0) /* lcd_data2 */
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA3, PIN_OUTPUT, MUX_MODE0) /* lcd_data3 */
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA4, PIN_OUTPUT, MUX_MODE0) /* lcd_data4 */
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA5, PIN_OUTPUT, MUX_MODE0) /* lcd_data5 */
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA6, PIN_OUTPUT, MUX_MODE0) /* lcd_data6 */
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA7, PIN_OUTPUT, MUX_MODE0) /* lcd_data7 */
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA8, PIN_OUTPUT, MUX_MODE0) /* lcd_data8 */
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA9, PIN_OUTPUT, MUX_MODE0) /* lcd_data9 */
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA10, PIN_OUTPUT, MUX_MODE0) /* lcd_data10 */
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA11, PIN_OUTPUT, MUX_MODE0) /* lcd_data11 */
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA12, PIN_OUTPUT, MUX_MODE0) /* lcd_data12 */
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA13, PIN_OUTPUT, MUX_MODE0) /* lcd_data13 */
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA14, PIN_OUTPUT, MUX_MODE0) /* lcd_data14 */
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA15, PIN_OUTPUT, MUX_MODE0) /* lcd_data15 */
+ AM33XX_PADCONF(AM335X_PIN_LCD_VSYNC, PIN_OUTPUT, MUX_MODE0) /* lcd_vsync */
+ AM33XX_PADCONF(AM335X_PIN_LCD_HSYNC, PIN_OUTPUT, MUX_MODE0) /* lcd_hsync */
+ AM33XX_PADCONF(AM335X_PIN_LCD_PCLK, PIN_OUTPUT, MUX_MODE0) /* lcd_pclk */
+ AM33XX_PADCONF(AM335X_PIN_LCD_AC_BIAS_EN, PIN_OUTPUT, MUX_MODE0) /* lcd_ac_bias_en */
+ >;
+ };
+
+ lcdc_pins_sleep: pinmux_lcdc_pins_sleep {
+ pinctrl-single,pins = <
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA0, PULL_DISABLE, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA1, PULL_DISABLE, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA2, PULL_DISABLE, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA3, PULL_DISABLE, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA4, PULL_DISABLE, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA5, PULL_DISABLE, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA6, PULL_DISABLE, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA7, PULL_DISABLE, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA8, PULL_DISABLE, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA9, PULL_DISABLE, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA10, PULL_DISABLE, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA11, PULL_DISABLE, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA12, PULL_DISABLE, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA13, PULL_DISABLE, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA14, PULL_DISABLE, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA15, PULL_DISABLE, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_VSYNC, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_HSYNC, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_PCLK, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_AC_BIAS_EN, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ >;
+ };
+
+ leds_pins: pinmux_leds_pins {
+ pinctrl-single,pins = <
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD11, PIN_OUTPUT, MUX_MODE7) /* gpio0[27] */
+ AM33XX_PADCONF(AM335X_PIN_SPI0_D0, PIN_OUTPUT, MUX_MODE7) /* gpio0[3] */
+ >;
+ };
+
+ mcasp0_pins_default: pinmux_mcasp0_pins_default {
+ pinctrl-single,pins = <
+ AM33XX_PADCONF(AM335X_PIN_MCASP0_ACLKX, PIN_INPUT_PULLDOWN, MUX_MODE0) /* mcasp0_aclkx_mux0 */
+ AM33XX_PADCONF(AM335X_PIN_MCASP0_FSX, PIN_INPUT_PULLDOWN, MUX_MODE0) /* mcasp0_fsx_mux0 */
+ AM33XX_PADCONF(AM335X_PIN_MCASP0_AHCLKR, PIN_INPUT_PULLDOWN, MUX_MODE2) /* mcasp0_axr2_mux0 */
+ AM33XX_PADCONF(AM335X_PIN_MCASP0_AXR1, PIN_INPUT_PULLDOWN, MUX_MODE0) /* mcasp0_axr1_mux0 */
+ >;
+ };
+
+ mcasp0_pins_sleep: pinmux_mcasp0_pins_sleep {
+ pinctrl-single,pins = <
+ AM33XX_PADCONF(AM335X_PIN_MCASP0_ACLKX, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MCASP0_FSX, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MCASP0_AHCLKR, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MCASP0_AXR1, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ >;
+ };
+
+ mmc1_pins_default: pinmux_mmc1_pins_default {
+ pinctrl-single,pins = <
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT3, PIN_INPUT_PULLUP, MUX_MODE0) /* mmc0_dat3 */
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT2, PIN_INPUT_PULLUP, MUX_MODE0) /* mmc0_dat2 */
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT1, PIN_INPUT_PULLUP, MUX_MODE0) /* mmc0_dat1 */
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT0, PIN_INPUT_PULLUP, MUX_MODE0) /* mmc0_dat0 */
+ AM33XX_PADCONF(AM335X_PIN_MMC0_CLK, PIN_INPUT_PULLUP, MUX_MODE0) /* mmc0_clk */
+ AM33XX_PADCONF(AM335X_PIN_MMC0_CMD, PIN_INPUT_PULLUP, MUX_MODE0) /* mmc0_cmd */
+ AM33XX_PADCONF(AM335X_PIN_MCASP0_AHCLKX, PIN_INPUT_PULLUP, MUX_MODE7) /* gpio3[21] */
+ >;
+ };
+
+ mmc1_pins_sleep: pinmux_mmc1_pins_sleep {
+ pinctrl-single,pins = <
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT3, PIN_INPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT2, PIN_INPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT1, PIN_INPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT0, PIN_INPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_CLK, PIN_INPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_CMD, PIN_INPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MCASP0_AHCLKX, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ >;
+ };
+
+ uart0_pins: pinmux_uart0_pins {
+ pinctrl-single,pins = <
+ AM33XX_PADCONF(AM335X_PIN_UART0_RXD, PIN_INPUT_PULLUP, MUX_MODE0) /* uart0_rxd */
+ AM33XX_PADCONF(AM335X_PIN_UART0_TXD, PIN_OUTPUT_PULLDOWN, MUX_MODE0) /* uart0_txd */
+ >;
+ };
+
+ uart1_pins_default: pinmux_uart1_pins_default {
+ pinctrl-single,pins = <
+ AM33XX_PADCONF(AM335X_PIN_UART1_RXD, PIN_INPUT_PULLUP, MUX_MODE0) /* uart1_rxd */
+ AM33XX_PADCONF(AM335X_PIN_UART1_TXD, PIN_OUTPUT_PULLDOWN, MUX_MODE0) /* uart1_txd */
+ >;
+ };
+
+ uart1_pins_sleep: pinmux_uart1_pins_sleep {
+ pinctrl-single,pins = <
+ AM33XX_PADCONF(AM335X_PIN_UART1_RXD, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_UART1_TXD, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ >;
+ };
+
+ uart2_pins_default: pinmux_uart2_pins_default {
+ pinctrl-single,pins = <
+ AM33XX_PADCONF(AM335X_PIN_MII1_CRS, PIN_INPUT, MUX_MODE6) /* uart2_rxd_mux1 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_ER, PIN_OUTPUT, MUX_MODE6) /* uart2_txd_mux1 */
+ >;
+ };
+
+ uart2_pins_sleep: pinmux_uart2_pins_sleep {
+ pinctrl-single,pins = <
+ AM33XX_PADCONF(AM335X_PIN_MII1_CRS, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_ER, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ >;
+ };
+
+ usb_pins: pinmux_usb_pins {
+ pinctrl-single,pins = <
+ AM33XX_PADCONF(AM335X_PIN_USB0_DRVVBUS, PIN_OUTPUT_PULLDOWN, MUX_MODE0) /* USB0_DRVVBUS */
+ AM33XX_PADCONF(AM335X_PIN_USB1_DRVVBUS, PIN_OUTPUT_PULLDOWN, MUX_MODE0) /* USB1_DRVVBUS */
+ >;
+ };
+};
diff --git a/arch/arm/boot/dts/am33xx-l4.dtsi b/arch/arm/boot/dts/am33xx-l4.dtsi
index 78088506d25b..1fb22088caeb 100644
--- a/arch/arm/boot/dts/am33xx-l4.dtsi
+++ b/arch/arm/boot/dts/am33xx-l4.dtsi
@@ -765,6 +765,55 @@
phys = <&phy_gmii_sel 2 1>;
};
};
+
+ mac_sw: switch@0 {
+ compatible = "ti,am335x-cpsw-switch", "ti,cpsw-switch";
+ reg = <0x0 0x4000>;
+ ranges = <0 0 0x4000>;
+ clocks = <&cpsw_125mhz_gclk>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ syscon = <&scm_conf>;
+ status = "disabled";
+
+ interrupts = <40 41 42 43>;
+ interrupt-names = "rx_thresh", "rx", "tx", "misc";
+
+ ethernet-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpsw_port1: port@1 {
+ reg = <1>;
+ label = "port1";
+ mac-address = [ 00 00 00 00 00 00 ];
+ phys = <&phy_gmii_sel 1 1>;
+ };
+
+ cpsw_port2: port@2 {
+ reg = <2>;
+ label = "port2";
+ mac-address = [ 00 00 00 00 00 00 ];
+ phys = <&phy_gmii_sel 2 1>;
+ };
+ };
+
+ davinci_mdio_sw: mdio@1000 {
+ compatible = "ti,cpsw-mdio","ti,davinci_mdio";
+ clocks = <&cpsw_125mhz_gclk>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ bus_freq = <1000000>;
+ reg = <0x1000 0x100>;
+ };
+
+ cpts {
+ clocks = <&cpsw_cpts_rft_clk>;
+ clock-names = "cpts";
+ };
+ };
};
target-module@180000 { /* 0x4a180000, ap 5 10.0 */
diff --git a/arch/arm/boot/dts/am574x-idk.dts b/arch/arm/boot/dts/am574x-idk.dts
index 37758761cd88..1b8f3a28af05 100644
--- a/arch/arm/boot/dts/am574x-idk.dts
+++ b/arch/arm/boot/dts/am574x-idk.dts
@@ -39,3 +39,7 @@
&m_can0 {
status = "disabled";
};
+
+&emif1 {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/armada-385-linksys.dtsi b/arch/arm/boot/dts/armada-385-linksys.dtsi
index 827e82be2201..fb9c8a0b241c 100644
--- a/arch/arm/boot/dts/armada-385-linksys.dtsi
+++ b/arch/arm/boot/dts/armada-385-linksys.dtsi
@@ -148,6 +148,8 @@
reg = <0>;
label = "pxa3xx_nand-0";
nand-rb = <0>;
+ nand-ecc-strength = <4>;
+ nand-ecc-step-size = <512>;
marvell,nand-keep-config;
nand-on-flash-bbt;
};
diff --git a/arch/arm/boot/dts/armada-388-helios4.dts b/arch/arm/boot/dts/armada-388-helios4.dts
index b3728de3bd3f..ec134e22bae3 100644
--- a/arch/arm/boot/dts/armada-388-helios4.dts
+++ b/arch/arm/boot/dts/armada-388-helios4.dts
@@ -70,6 +70,9 @@
system-leds {
compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&helios_system_led_pins>;
+
status-led {
label = "helios4:green:status";
gpios = <&gpio0 24 GPIO_ACTIVE_LOW>;
@@ -86,6 +89,9 @@
io-leds {
compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&helios_io_led_pins>;
+
sata1-led {
label = "helios4:green:ata1";
gpios = <&gpio1 17 GPIO_ACTIVE_LOW>;
@@ -121,11 +127,15 @@
fan1: j10-pwm {
compatible = "pwm-fan";
pwms = <&gpio1 9 40000>; /* Target freq:25 kHz */
+ pinctrl-names = "default";
+ pinctrl-0 = <&helios_fan1_pins>;
};
fan2: j17-pwm {
compatible = "pwm-fan";
pwms = <&gpio1 23 40000>; /* Target freq:25 kHz */
+ pinctrl-names = "default";
+ pinctrl-0 = <&helios_fan2_pins>;
};
usb2_phy: usb2-phy {
@@ -286,16 +296,22 @@
"mpp39", "mpp40";
marvell,function = "sd0";
};
- helios_led_pins: helios-led-pins {
- marvell,pins = "mpp24", "mpp25",
- "mpp49", "mpp50",
+ helios_system_led_pins: helios-system-led-pins {
+ marvell,pins = "mpp24", "mpp25";
+ marvell,function = "gpio";
+ };
+ helios_io_led_pins: helios-io-led-pins {
+ marvell,pins = "mpp49", "mpp50",
"mpp52", "mpp53",
"mpp54";
marvell,function = "gpio";
};
- helios_fan_pins: helios-fan-pins {
- marvell,pins = "mpp41", "mpp43",
- "mpp48", "mpp55";
+ helios_fan1_pins: helios_fan1_pins {
+ marvell,pins = "mpp41", "mpp43";
+ marvell,function = "gpio";
+ };
+ helios_fan2_pins: helios_fan2_pins {
+ marvell,pins = "mpp48", "mpp55";
marvell,function = "gpio";
};
microsom_spi1_cs_pins: spi1-cs-pins {
diff --git a/arch/arm/boot/dts/aspeed-ast2600-evb.dts b/arch/arm/boot/dts/aspeed-ast2600-evb.dts
index 89be13197780..2772796e215e 100644
--- a/arch/arm/boot/dts/aspeed-ast2600-evb.dts
+++ b/arch/arm/boot/dts/aspeed-ast2600-evb.dts
@@ -237,3 +237,11 @@
&fsim0 {
status = "okay";
};
+
+&ehci1 {
+ status = "okay";
+};
+
+&uhci {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/aspeed-bmc-amd-ethanolx.dts b/arch/arm/boot/dts/aspeed-bmc-amd-ethanolx.dts
index 96ff0aea64e5..ac2d04cfaf2f 100644
--- a/arch/arm/boot/dts/aspeed-bmc-amd-ethanolx.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-amd-ethanolx.dts
@@ -218,7 +218,7 @@
&lpc_snoop {
status = "okay";
- snoop-ports = <0x80>;
+ snoop-ports = <0x80>, <0x81>;
};
&lpc_ctrl {
diff --git a/arch/arm/boot/dts/aspeed-bmc-ampere-mtjade.dts b/arch/arm/boot/dts/aspeed-bmc-ampere-mtjade.dts
new file mode 100644
index 000000000000..8f5ec22e51c2
--- /dev/null
+++ b/arch/arm/boot/dts/aspeed-bmc-ampere-mtjade.dts
@@ -0,0 +1,558 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+#include "aspeed-g5.dtsi"
+#include <dt-bindings/gpio/aspeed-gpio.h>
+
+/ {
+ model = "Ampere Mt. Jade BMC";
+ compatible = "ampere,mtjade-bmc", "aspeed,ast2500";
+
+ chosen {
+ stdout-path = &uart5;
+ bootargs = "console=ttyS4,115200 earlyprintk";
+ };
+
+ memory@80000000 {
+ reg = <0x80000000 0x20000000>;
+ };
+
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ vga_memory: framebuffer@9f000000 {
+ no-map;
+ reg = <0x9f000000 0x01000000>; /* 16M */
+ };
+
+ gfx_memory: framebuffer {
+ size = <0x01000000>;
+ alignment = <0x01000000>;
+ compatible = "shared-dma-pool";
+ reusable;
+ };
+
+ video_engine_memory: jpegbuffer {
+ size = <0x02000000>; /* 32M */
+ alignment = <0x01000000>;
+ compatible = "shared-dma-pool";
+ reusable;
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ fault {
+ gpios = <&gpio ASPEED_GPIO(B, 6) GPIO_ACTIVE_HIGH>;
+ };
+
+ identify {
+ gpios = <&gpio ASPEED_GPIO(Q, 6) GPIO_ACTIVE_HIGH>;
+ };
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ shutdown_ack {
+ label = "SHUTDOWN_ACK";
+ gpios = <&gpio ASPEED_GPIO(G, 2) GPIO_ACTIVE_LOW>;
+ linux,code = <ASPEED_GPIO(G, 2)>;
+ };
+
+ reboot_ack {
+ label = "REBOOT_ACK";
+ gpios = <&gpio ASPEED_GPIO(J, 3) GPIO_ACTIVE_LOW>;
+ linux,code = <ASPEED_GPIO(J, 3)>;
+ };
+
+ S0_overtemp {
+ label = "S0_OVERTEMP";
+ gpios = <&gpio ASPEED_GPIO(G, 3) GPIO_ACTIVE_LOW>;
+ linux,code = <ASPEED_GPIO(G, 3)>;
+ };
+
+ S0_hightemp {
+ label = "S0_HIGHTEMP";
+ gpios = <&gpio ASPEED_GPIO(J, 0) GPIO_ACTIVE_LOW>;
+ linux,code = <ASPEED_GPIO(J, 0)>;
+ };
+
+ S0_cpu_fault {
+ label = "S0_CPU_FAULT";
+ gpios = <&gpio ASPEED_GPIO(J, 1) GPIO_ACTIVE_HIGH>;
+ linux,code = <ASPEED_GPIO(J, 1)>;
+ };
+
+ S1_overtemp {
+ label = "S1_OVERTEMP";
+ gpios = <&gpio ASPEED_GPIO(Z, 6) GPIO_ACTIVE_LOW>;
+ linux,code = <ASPEED_GPIO(Z, 6)>;
+ };
+
+ S1_hightemp {
+ label = "S1_HIGHTEMP";
+ gpios = <&gpio ASPEED_GPIO(AB, 0) GPIO_ACTIVE_LOW>;
+ linux,code = <ASPEED_GPIO(AB, 0)>;
+ };
+
+ S1_cpu_fault {
+ label = "S1_CPU_FAULT";
+ gpios = <&gpio ASPEED_GPIO(Z, 1) GPIO_ACTIVE_HIGH>;
+ linux,code = <ASPEED_GPIO(Z, 1)>;
+ };
+
+ id_button {
+ label = "ID_BUTTON";
+ gpios = <&gpio ASPEED_GPIO(Q, 5) GPIO_ACTIVE_LOW>;
+ linux,code = <ASPEED_GPIO(Q, 5)>;
+ };
+
+ };
+
+ gpioA0mux: mux-controller {
+ compatible = "gpio-mux";
+ #mux-control-cells = <0>;
+ mux-gpios = <&gpio ASPEED_GPIO(A, 0) GPIO_ACTIVE_LOW>;
+ };
+
+ adc0mux: adc0mux {
+ compatible = "io-channel-mux";
+ io-channels = <&adc 0>;
+ #io-channel-cells = <1>;
+ io-channel-names = "parent";
+ mux-controls = <&gpioA0mux>;
+ channels = "s0", "s1";
+ };
+
+ adc1mux: adc1mux {
+ compatible = "io-channel-mux";
+ io-channels = <&adc 1>;
+ #io-channel-cells = <1>;
+ io-channel-names = "parent";
+ mux-controls = <&gpioA0mux>;
+ channels = "s0", "s1";
+ };
+
+ adc2mux: adc2mux {
+ compatible = "io-channel-mux";
+ io-channels = <&adc 2>;
+ #io-channel-cells = <1>;
+ io-channel-names = "parent";
+ mux-controls = <&gpioA0mux>;
+ channels = "s0", "s1";
+ };
+
+ adc3mux: adc3mux {
+ compatible = "io-channel-mux";
+ io-channels = <&adc 3>;
+ #io-channel-cells = <1>;
+ io-channel-names = "parent";
+ mux-controls = <&gpioA0mux>;
+ channels = "s0", "s1";
+ };
+
+ adc4mux: adc4mux {
+ compatible = "io-channel-mux";
+ io-channels = <&adc 4>;
+ #io-channel-cells = <1>;
+ io-channel-names = "parent";
+ mux-controls = <&gpioA0mux>;
+ channels = "s0", "s1";
+ };
+
+ adc5mux: adc5mux {
+ compatible = "io-channel-mux";
+ io-channels = <&adc 5>;
+ #io-channel-cells = <1>;
+ io-channel-names = "parent";
+ mux-controls = <&gpioA0mux>;
+ channels = "s0", "s1";
+ };
+
+ adc6mux: adc6mux {
+ compatible = "io-channel-mux";
+ io-channels = <&adc 6>;
+ #io-channel-cells = <1>;
+ io-channel-names = "parent";
+ mux-controls = <&gpioA0mux>;
+ channels = "s0", "s1";
+ };
+
+ adc7mux: adc7mux {
+ compatible = "io-channel-mux";
+ io-channels = <&adc 7>;
+ #io-channel-cells = <1>;
+ io-channel-names = "parent";
+ mux-controls = <&gpioA0mux>;
+ channels = "s0", "s1";
+ };
+
+ adc8mux: adc8mux {
+ compatible = "io-channel-mux";
+ io-channels = <&adc 8>;
+ #io-channel-cells = <1>;
+ io-channel-names = "parent";
+ mux-controls = <&gpioA0mux>;
+ channels = "s0", "s1";
+ };
+
+ adc9mux: adc9mux {
+ compatible = "io-channel-mux";
+ io-channels = <&adc 9>;
+ #io-channel-cells = <1>;
+ io-channel-names = "parent";
+ mux-controls = <&gpioA0mux>;
+ channels = "s0", "s1";
+ };
+
+ adc10mux: adc10mux {
+ compatible = "io-channel-mux";
+ io-channels = <&adc 10>;
+ #io-channel-cells = <1>;
+ io-channel-names = "parent";
+ mux-controls = <&gpioA0mux>;
+ channels = "s0", "s1";
+ };
+
+ adc11mux: adc11mux {
+ compatible = "io-channel-mux";
+ io-channels = <&adc 11>;
+ #io-channel-cells = <1>;
+ io-channel-names = "parent";
+ mux-controls = <&gpioA0mux>;
+ channels = "s0", "s1";
+ };
+
+ adc12mux: adc12mux {
+ compatible = "io-channel-mux";
+ io-channels = <&adc 12>;
+ #io-channel-cells = <1>;
+ io-channel-names = "parent";
+ mux-controls = <&gpioA0mux>;
+ channels = "s0", "s1";
+ };
+
+ adc13mux: adc13mux {
+ compatible = "io-channel-mux";
+ io-channels = <&adc 13>;
+ #io-channel-cells = <1>;
+ io-channel-names = "parent";
+ mux-controls = <&gpioA0mux>;
+ channels = "s0", "s1";
+ };
+
+ iio-hwmon {
+ compatible = "iio-hwmon";
+ io-channels = <&adc0mux 0>, <&adc0mux 1>,
+ <&adc1mux 0>, <&adc1mux 1>,
+ <&adc2mux 0>, <&adc2mux 1>,
+ <&adc3mux 0>, <&adc3mux 1>,
+ <&adc4mux 0>, <&adc4mux 1>,
+ <&adc5mux 0>, <&adc5mux 1>,
+ <&adc6mux 0>, <&adc6mux 1>,
+ <&adc7mux 0>, <&adc7mux 1>,
+ <&adc8mux 0>, <&adc8mux 1>,
+ <&adc9mux 0>, <&adc9mux 1>,
+ <&adc10mux 0>, <&adc10mux 1>,
+ <&adc11mux 0>, <&adc11mux 1>,
+ <&adc12mux 0>, <&adc12mux 1>,
+ <&adc13mux 0>, <&adc13mux 1>;
+ };
+
+ iio-hwmon-adc14 {
+ compatible = "iio-hwmon";
+ io-channels = <&adc 14>;
+ };
+
+ iio-hwmon-battery {
+ compatible = "iio-hwmon";
+ io-channels = <&adc 15>;
+ };
+};
+
+&fmc {
+ status = "okay";
+ flash@0 {
+ status = "okay";
+ m25p,fast-read;
+ label = "bmc";
+ /* spi-max-frequency = <50000000>; */
+#include "openbmc-flash-layout.dtsi"
+ };
+};
+
+&spi1 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_spi1_default>;
+
+ flash@0 {
+ status = "okay";
+ m25p,fast-read;
+ label = "pnor";
+ /* spi-max-frequency = <100000000>; */
+ };
+};
+
+&uart1 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_txd1_default
+ &pinctrl_rxd1_default
+ &pinctrl_ncts1_default
+ &pinctrl_nrts1_default>;
+};
+
+&uart2 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_txd2_default
+ &pinctrl_rxd2_default>;
+};
+
+&uart3 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_txd3_default
+ &pinctrl_rxd3_default>;
+};
+
+&uart4 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_txd4_default
+ &pinctrl_rxd4_default>;
+};
+
+/* The BMC's uart */
+&uart5 {
+ status = "okay";
+};
+
+&mac1 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_rgmii2_default &pinctrl_mdio2_default>;
+};
+
+&i2c0 {
+ status = "okay";
+};
+
+&i2c1 {
+ status = "okay";
+};
+
+&i2c2 {
+ status = "okay";
+};
+
+&i2c3 {
+ status = "okay";
+ eeprom@50 {
+ compatible = "microchip,24c64", "atmel,24c64";
+ reg = <0x50>;
+ pagesize = <32>;
+ };
+
+ inlet_mem2: tmp175@28 {
+ compatible = "ti,tmp175";
+ reg = <0x28>;
+ };
+
+ inlet_cpu: tmp175@29 {
+ compatible = "ti,tmp175";
+ reg = <0x29>;
+ };
+
+ inlet_mem1: tmp175@2a {
+ compatible = "ti,tmp175";
+ reg = <0x2a>;
+ };
+
+ outlet_cpu: tmp175@2b {
+ compatible = "ti,tmp175";
+ reg = <0x2b>;
+ };
+
+ outlet1: tmp175@2c {
+ compatible = "ti,tmp175";
+ reg = <0x2c>;
+ };
+
+ outlet2: tmp175@2d {
+ compatible = "ti,tmp175";
+ reg = <0x2d>;
+ };
+};
+
+&i2c4 {
+ status = "okay";
+ rtc@51 {
+ compatible = "nxp,pcf85063a";
+ reg = <0x51>;
+ };
+};
+
+&i2c5 {
+ status = "okay";
+};
+
+&i2c6 {
+ status = "okay";
+ psu@58 {
+ compatible = "pmbus";
+ reg = <0x58>;
+ };
+
+ psu@59 {
+ compatible = "pmbus";
+ reg = <0x59>;
+ };
+};
+
+&i2c7 {
+ status = "okay";
+};
+
+&i2c8 {
+ status = "okay";
+};
+
+&i2c9 {
+ status = "okay";
+};
+
+&gfx {
+ status = "okay";
+ memory-region = <&gfx_memory>;
+};
+
+&pinctrl {
+ aspeed,external-nodes = <&gfx &lhc>;
+};
+
+&pwm_tacho {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pwm2_default &pinctrl_pwm3_default
+ &pinctrl_pwm4_default &pinctrl_pwm5_default
+ &pinctrl_pwm6_default &pinctrl_pwm7_default>;
+
+ fan@0 {
+ reg = <0x02>;
+ aspeed,fan-tach-ch = /bits/ 8 <0x04>;
+ };
+
+ fan@1 {
+ reg = <0x02>;
+ aspeed,fan-tach-ch = /bits/ 8 <0x05>;
+ };
+
+ fan@2 {
+ reg = <0x03>;
+ aspeed,fan-tach-ch = /bits/ 8 <0x06>;
+ };
+
+ fan@3 {
+ reg = <0x03>;
+ aspeed,fan-tach-ch = /bits/ 8 <0x07>;
+ };
+
+ fan@4 {
+ reg = <0x04>;
+ aspeed,fan-tach-ch = /bits/ 8 <0x08>;
+ };
+
+ fan@5 {
+ reg = <0x04>;
+ aspeed,fan-tach-ch = /bits/ 8 <0x09>;
+ };
+
+ fan@6 {
+ reg = <0x05>;
+ aspeed,fan-tach-ch = /bits/ 8 <0x0a>;
+ };
+
+ fan@7 {
+ reg = <0x05>;
+ aspeed,fan-tach-ch = /bits/ 8 <0x0b>;
+ };
+
+ fan@8 {
+ reg = <0x06>;
+ aspeed,fan-tach-ch = /bits/ 8 <0x0c>;
+ };
+
+ fan@9 {
+ reg = <0x06>;
+ aspeed,fan-tach-ch = /bits/ 8 <0x0d>;
+ };
+
+ fan@10 {
+ reg = <0x07>;
+ aspeed,fan-tach-ch = /bits/ 8 <0x0e>;
+ };
+
+ fan@11 {
+ reg = <0x07>;
+ aspeed,fan-tach-ch = /bits/ 8 <0x0f>;
+ };
+
+};
+
+&vhub {
+ status = "okay";
+};
+
+&adc {
+ status = "okay";
+};
+
+&video {
+ status = "okay";
+ memory-region = <&video_engine_memory>;
+};
+
+&gpio {
+ gpio-line-names =
+ /*A0-A7*/ "","","","S0_BMC_SPECIAL_BOOT","","","","",
+ /*B0-B7*/ "BMC_SELECT_EEPROM","","","",
+ "POWER_BUTTON","","","",
+ /*C0-C7*/ "","","","","","","","",
+ /*D0-D7*/ "","","","","","","","",
+ /*E0-E7*/ "","","","","","","","",
+ /*F0-F7*/ "","","BMC_SYS_PSON_L","S0_DDR_SAVE","PGOOD",
+ "S1_DDR_SAVE","","",
+ /*G0-G7*/ "S0_FW_BOOT_OK","SHD_REQ_L","","S0_OVERTEMP_L","","",
+ "","",
+ /*H0-H7*/ "","","","","","","","",
+ /*I0-I7*/ "","","S1_BMC_SPECIAL_BOOT","","","","","",
+ /*J0-J7*/ "S0_HIGHTEMP_L","S0_FAULT_L","S0_SCP_AUTH_FAIL_L","",
+ "","","","",
+ /*K0-K7*/ "","","","","","","","",
+ /*L0-L7*/ "","","","BMC_SYSRESET_L","SPI_AUTH_FAIL_L","","","",
+ /*M0-M7*/ "","","","","","","","",
+ /*N0-N7*/ "","","","","","","","",
+ /*O0-O7*/ "","","","","","","","",
+ /*P0-P7*/ "","","","","","","","",
+ /*Q0-Q7*/ "","","","","","UID_BUTTON","","",
+ /*R0-R7*/ "","","BMC_EXT_HIGHTEMP_L","","","RESET_BUTTON","","",
+ /*S0-S7*/ "","","","","","","","",
+ /*T0-T7*/ "","","","","","","","",
+ /*U0-U7*/ "","","","","","","","",
+ /*V0-V7*/ "","","","","","","","",
+ /*W0-W7*/ "","","","","","","","",
+ /*X0-X7*/ "","","","","","","","",
+ /*Y0-Y7*/ "","","","","","","","",
+ /*Z0-Z7*/ "S0_BMC_PLIMIT","S1_FAULT_L","S1_FW_BOOT_OK","","",
+ "S1_SCP_AUTH_FAIL_L","S1_OVERTEMP_L","",
+ /*AA0-AA7*/ "","","","","","","","",
+ /*AB0-AB7*/ "S1_HIGHTEMP_L","S1_BMC_PLIMIT","S0_BMC_DDR_ADDR",
+ "S1_BMC_DDR_ADR","","","","",
+ /*AC0-AC7*/ "SYS_PWR_GD","","","","","BMC_READY","SLAVE_PRESENT_L",
+ "BMC_OCP_PG";
+};
diff --git a/arch/arm/boot/dts/aspeed-bmc-bytedance-g220a.dts b/arch/arm/boot/dts/aspeed-bmc-bytedance-g220a.dts
index 2feb25b0e43b..5ef88c377358 100644
--- a/arch/arm/boot/dts/aspeed-bmc-bytedance-g220a.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-bytedance-g220a.dts
@@ -446,7 +446,11 @@
&i2c4 {
status = "okay";
-
+ ipmb0@10 {
+ compatible = "ipmb-dev";
+ reg = <(0x10 | I2C_OWN_SLAVE_ADDRESS)>;
+ i2c-protocol;
+ };
};
&i2c5 {
@@ -901,14 +905,14 @@
&gpio {
pin_gpio_i3 {
gpio-hog;
- gpios = <ASPEED_GPIO(I, 3) GPIO_ACTIVE_LOW>;
+ gpios = <ASPEED_GPIO(I, 3) GPIO_ACTIVE_HIGH>;
output-low;
line-name = "NCSI_BMC_R_SEL";
};
pin_gpio_b6 {
gpio-hog;
- gpios = <ASPEED_GPIO(B, 6) GPIO_ACTIVE_LOW>;
+ gpios = <ASPEED_GPIO(B, 6) GPIO_ACTIVE_HIGH>;
output-low;
line-name = "EN_NCSI_SWITCH_N";
};
diff --git a/arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts b/arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts
new file mode 100644
index 000000000000..6bd876657bb8
--- /dev/null
+++ b/arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts
@@ -0,0 +1,775 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+// Copyright 2020 IBM Corp.
+/dts-v1/;
+
+#include "aspeed-g6.dtsi"
+#include <dt-bindings/gpio/aspeed-gpio.h>
+#include <dt-bindings/i2c/i2c.h>
+#include <dt-bindings/leds/leds-pca955x.h>
+
+/ {
+ model = "Everest";
+ compatible = "ibm,everest-bmc", "aspeed,ast2600";
+
+ aliases {
+ i2c100 = &cfam0_i2c0;
+ i2c101 = &cfam0_i2c1;
+ i2c110 = &cfam0_i2c10;
+ i2c111 = &cfam0_i2c11;
+ i2c112 = &cfam0_i2c12;
+ i2c113 = &cfam0_i2c13;
+ i2c114 = &cfam0_i2c14;
+ i2c115 = &cfam0_i2c15;
+ i2c202 = &cfam1_i2c2;
+ i2c203 = &cfam1_i2c3;
+ i2c210 = &cfam1_i2c10;
+ i2c211 = &cfam1_i2c11;
+ i2c214 = &cfam1_i2c14;
+ i2c215 = &cfam1_i2c15;
+ i2c216 = &cfam1_i2c16;
+ i2c217 = &cfam1_i2c17;
+ i2c300 = &cfam2_i2c0;
+ i2c301 = &cfam2_i2c1;
+ i2c310 = &cfam2_i2c10;
+ i2c311 = &cfam2_i2c11;
+ i2c312 = &cfam2_i2c12;
+ i2c313 = &cfam2_i2c13;
+ i2c314 = &cfam2_i2c14;
+ i2c315 = &cfam2_i2c15;
+ i2c402 = &cfam3_i2c2;
+ i2c403 = &cfam3_i2c3;
+ i2c410 = &cfam3_i2c10;
+ i2c411 = &cfam3_i2c11;
+ i2c414 = &cfam3_i2c14;
+ i2c415 = &cfam3_i2c15;
+ i2c416 = &cfam3_i2c16;
+ i2c417 = &cfam3_i2c17;
+
+ serial4 = &uart5;
+
+ spi10 = &cfam0_spi0;
+ spi11 = &cfam0_spi1;
+ spi12 = &cfam0_spi2;
+ spi13 = &cfam0_spi3;
+ spi20 = &cfam1_spi0;
+ spi21 = &cfam1_spi1;
+ spi22 = &cfam1_spi2;
+ spi23 = &cfam1_spi3;
+ spi30 = &cfam2_spi0;
+ spi31 = &cfam2_spi1;
+ spi32 = &cfam2_spi2;
+ spi33 = &cfam2_spi3;
+ spi40 = &cfam3_spi0;
+ spi41 = &cfam3_spi1;
+ spi42 = &cfam3_spi2;
+ spi43 = &cfam3_spi3;
+ };
+
+ chosen {
+ stdout-path = &uart5;
+ bootargs = "console=ttyS4,115200n8";
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0x80000000 0x40000000>;
+ };
+
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ /* LPC FW cycle bridge region requires natural alignment */
+ flash_memory: region@b8000000 {
+ no-map;
+ reg = <0xb8000000 0x04000000>; /* 64M */
+ };
+
+ /* 48MB region from the end of flash to start of vga memory */
+ ramoops@bc000000 {
+ compatible = "ramoops";
+ reg = <0xbc000000 0x180000>; /* 16 * (3 * 0x8000) */
+ record-size = <0x8000>;
+ console-size = <0x8000>;
+ pmsg-size = <0x8000>;
+ max-reason = <3>; /* KMSG_DUMP_EMERG */
+ };
+
+ /* VGA region is dictated by hardware strapping */
+ vga_memory: region@bf000000 {
+ no-map;
+ compatible = "shared-dma-pool";
+ reg = <0xbf000000 0x01000000>; /* 16M */
+ };
+ };
+};
+
+&ehci1 {
+ status = "okay";
+};
+
+&emmc_controller {
+ status = "okay";
+};
+
+&pinctrl_emmc_default {
+ bias-disable;
+};
+
+&emmc {
+ status = "okay";
+};
+
+&fsim0 {
+ status = "okay";
+
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ /*
+ * CFAM Reset is supposed to be active low but pass1 hardware is wired
+ * active high.
+ */
+ cfam-reset-gpios = <&gpio0 ASPEED_GPIO(Q, 0) GPIO_ACTIVE_HIGH>;
+
+ cfam@0,0 {
+ reg = <0 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <0>;
+
+ scom@1000 {
+ compatible = "ibm,fsi2pib";
+ reg = <0x1000 0x400>;
+ };
+
+ i2c@1800 {
+ compatible = "ibm,fsi-i2c-master";
+ reg = <0x1800 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cfam0_i2c0: i2c-bus@0 {
+ reg = <0>; /* OMI01 */
+ };
+
+ cfam0_i2c1: i2c-bus@1 {
+ reg = <1>; /* OMI23 */
+ };
+
+ cfam0_i2c10: i2c-bus@a {
+ reg = <10>; /* OP3A */
+ };
+
+ cfam0_i2c11: i2c-bus@b {
+ reg = <11>; /* OP3B */
+ };
+
+ cfam0_i2c12: i2c-bus@c {
+ reg = <12>; /* OP4A */
+ };
+
+ cfam0_i2c13: i2c-bus@d {
+ reg = <13>; /* OP4B */
+ };
+
+ cfam0_i2c14: i2c-bus@e {
+ reg = <14>; /* OP5A */
+ };
+
+ cfam0_i2c15: i2c-bus@f {
+ reg = <15>; /* OP5B */
+ };
+ };
+
+ fsi2spi@1c00 {
+ compatible = "ibm,fsi2spi";
+ reg = <0x1c00 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cfam0_spi0: spi@0 {
+ reg = <0x0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ at25,byte-len = <0x80000>;
+ at25,addr-mode = <4>;
+ at25,page-size = <256>;
+
+ compatible = "atmel,at25";
+ reg = <0>;
+ spi-max-frequency = <1000000>;
+ };
+ };
+
+ cfam0_spi1: spi@20 {
+ reg = <0x20>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ at25,byte-len = <0x80000>;
+ at25,addr-mode = <4>;
+ at25,page-size = <256>;
+
+ compatible = "atmel,at25";
+ reg = <0>;
+ spi-max-frequency = <1000000>;
+ };
+ };
+
+ cfam0_spi2: spi@40 {
+ reg = <0x40>;
+ compatible = "ibm,fsi2spi-restricted";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ at25,byte-len = <0x80000>;
+ at25,addr-mode = <4>;
+ at25,page-size = <256>;
+
+ compatible = "atmel,at25";
+ reg = <0>;
+ spi-max-frequency = <1000000>;
+ };
+ };
+
+ cfam0_spi3: spi@60 {
+ reg = <0x60>;
+ compatible = "ibm,fsi2spi-restricted";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ at25,byte-len = <0x80000>;
+ at25,addr-mode = <4>;
+ at25,page-size = <256>;
+
+ compatible = "atmel,at25";
+ reg = <0>;
+ spi-max-frequency = <1000000>;
+ };
+ };
+ };
+
+ sbefifo@2400 {
+ compatible = "ibm,p9-sbefifo";
+ reg = <0x2400 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi_occ0: occ {
+ compatible = "ibm,p10-occ";
+ };
+ };
+
+ fsi_hub0: hub@3400 {
+ compatible = "fsi-master-hub";
+ reg = <0x3400 0x400>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+ };
+ };
+};
+
+&fsi_hub0 {
+ cfam@1,0 {
+ reg = <1 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <1>;
+
+ scom@1000 {
+ compatible = "ibm,fsi2pib";
+ reg = <0x1000 0x400>;
+ };
+
+ i2c@1800 {
+ compatible = "ibm,fsi-i2c-master";
+ reg = <0x1800 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cfam1_i2c2: i2c-bus@2 {
+ reg = <2>; /* OMI45 */
+ };
+
+ cfam1_i2c3: i2c-bus@3 {
+ reg = <3>; /* OMI67 */
+ };
+
+ cfam1_i2c10: i2c-bus@a {
+ reg = <10>; /* OP3A */
+ };
+
+ cfam1_i2c11: i2c-bus@b {
+ reg = <11>; /* OP3B */
+ };
+
+ cfam1_i2c14: i2c-bus@e {
+ reg = <14>; /* OP5A */
+ };
+
+ cfam1_i2c15: i2c-bus@f {
+ reg = <15>; /* OP5B */
+ };
+
+ cfam1_i2c16: i2c-bus@10 {
+ reg = <16>; /* OP6A */
+ };
+
+ cfam1_i2c17: i2c-bus@11 {
+ reg = <17>; /* OP6B */
+ };
+ };
+
+ fsi2spi@1c00 {
+ compatible = "ibm,fsi2spi";
+ reg = <0x1c00 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cfam1_spi0: spi@0 {
+ reg = <0x0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ at25,byte-len = <0x80000>;
+ at25,addr-mode = <4>;
+ at25,page-size = <256>;
+
+ compatible = "atmel,at25";
+ reg = <0>;
+ spi-max-frequency = <1000000>;
+ };
+ };
+
+ cfam1_spi1: spi@20 {
+ reg = <0x20>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ at25,byte-len = <0x80000>;
+ at25,addr-mode = <4>;
+ at25,page-size = <256>;
+
+ compatible = "atmel,at25";
+ reg = <0>;
+ spi-max-frequency = <1000000>;
+ };
+ };
+
+ cfam1_spi2: spi@40 {
+ reg = <0x40>;
+ compatible = "ibm,fsi2spi-restricted";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ at25,byte-len = <0x80000>;
+ at25,addr-mode = <4>;
+ at25,page-size = <256>;
+
+ compatible = "atmel,at25";
+ reg = <0>;
+ spi-max-frequency = <1000000>;
+ };
+ };
+
+ cfam1_spi3: spi@60 {
+ reg = <0x60>;
+ compatible = "ibm,fsi2spi-restricted";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ at25,byte-len = <0x80000>;
+ at25,addr-mode = <4>;
+ at25,page-size = <256>;
+
+ compatible = "atmel,at25";
+ reg = <0>;
+ spi-max-frequency = <1000000>;
+ };
+ };
+ };
+
+ sbefifo@2400 {
+ compatible = "ibm,p9-sbefifo";
+ reg = <0x2400 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi_occ1: occ {
+ compatible = "ibm,p10-occ";
+ };
+ };
+
+ fsi_hub1: hub@3400 {
+ compatible = "fsi-master-hub";
+ reg = <0x3400 0x400>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ no-scan-on-init;
+ };
+ };
+
+ cfam@2,0 {
+ reg = <2 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <2>;
+
+ scom@1000 {
+ compatible = "ibm,fsi2pib";
+ reg = <0x1000 0x400>;
+ };
+
+ i2c@1800 {
+ compatible = "ibm,fsi-i2c-master";
+ reg = <0x1800 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cfam2_i2c0: i2c-bus@0 {
+ reg = <0>; /* OM01 */
+ };
+
+ cfam2_i2c1: i2c-bus@1 {
+ reg = <1>; /* OM23 */
+ };
+
+ cfam2_i2c10: i2c-bus@a {
+ reg = <10>; /* OP3A */
+ };
+
+ cfam2_i2c11: i2c-bus@b {
+ reg = <11>; /* OP3B */
+ };
+
+ cfam2_i2c12: i2c-bus@c {
+ reg = <12>; /* OP4A */
+ };
+
+ cfam2_i2c13: i2c-bus@d {
+ reg = <13>; /* OP4B */
+ };
+
+ cfam2_i2c14: i2c-bus@e {
+ reg = <14>; /* OP5A */
+ };
+
+ cfam2_i2c15: i2c-bus@f {
+ reg = <15>; /* OP5B */
+ };
+ };
+
+ fsi2spi@1c00 {
+ compatible = "ibm,fsi2spi";
+ reg = <0x1c00 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cfam2_spi0: spi@0 {
+ reg = <0x0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ at25,byte-len = <0x80000>;
+ at25,addr-mode = <4>;
+ at25,page-size = <256>;
+
+ compatible = "atmel,at25";
+ reg = <0>;
+ spi-max-frequency = <1000000>;
+ };
+ };
+
+ cfam2_spi1: spi@20 {
+ reg = <0x20>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ at25,byte-len = <0x80000>;
+ at25,addr-mode = <4>;
+ at25,page-size = <256>;
+
+ compatible = "atmel,at25";
+ reg = <0>;
+ spi-max-frequency = <1000000>;
+ };
+ };
+
+ cfam2_spi2: spi@40 {
+ reg = <0x40>;
+ compatible = "ibm,fsi2spi-restricted";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ at25,byte-len = <0x80000>;
+ at25,addr-mode = <4>;
+ at25,page-size = <256>;
+
+ compatible = "atmel,at25";
+ reg = <0>;
+ spi-max-frequency = <1000000>;
+ };
+ };
+
+ cfam2_spi3: spi@60 {
+ reg = <0x60>;
+ compatible = "ibm,fsi2spi-restricted";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ at25,byte-len = <0x80000>;
+ at25,addr-mode = <4>;
+ at25,page-size = <256>;
+
+ compatible = "atmel,at25";
+ reg = <0>;
+ spi-max-frequency = <1000000>;
+ };
+ };
+ };
+
+ sbefifo@2400 {
+ compatible = "ibm,p9-sbefifo";
+ reg = <0x2400 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi_occ2: occ {
+ compatible = "ibm,p10-occ";
+ };
+ };
+
+ fsi_hub2: hub@3400 {
+ compatible = "fsi-master-hub";
+ reg = <0x3400 0x400>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ no-scan-on-init;
+ };
+ };
+
+ cfam@3,0 {
+ reg = <3 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <3>;
+
+ scom@1000 {
+ compatible = "ibm,fsi2pib";
+ reg = <0x1000 0x400>;
+ };
+
+ i2c@1800 {
+ compatible = "ibm,fsi-i2c-master";
+ reg = <0x1800 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cfam3_i2c2: i2c-bus@2 {
+ reg = <2>; /* OM45 */
+ };
+
+ cfam3_i2c3: i2c-bus@3 {
+ reg = <3>; /* OM67 */
+ };
+
+ cfam3_i2c10: i2c-bus@a {
+ reg = <10>; /* OP3A */
+ };
+
+ cfam3_i2c11: i2c-bus@b {
+ reg = <11>; /* OP3B */
+ };
+
+ cfam3_i2c14: i2c-bus@e {
+ reg = <14>; /* OP5A */
+ };
+
+ cfam3_i2c15: i2c-bus@f {
+ reg = <15>; /* OP5B */
+ };
+
+ cfam3_i2c16: i2c-bus@10 {
+ reg = <16>; /* OP6A */
+ };
+
+ cfam3_i2c17: i2c-bus@11 {
+ reg = <17>; /* OP6B */
+ };
+ };
+
+ fsi2spi@1c00 {
+ compatible = "ibm,fsi2spi";
+ reg = <0x1c00 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cfam3_spi0: spi@0 {
+ reg = <0x0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ at25,byte-len = <0x80000>;
+ at25,addr-mode = <4>;
+ at25,page-size = <256>;
+
+ compatible = "atmel,at25";
+ reg = <0>;
+ spi-max-frequency = <1000000>;
+ };
+ };
+
+ cfam3_spi1: spi@20 {
+ reg = <0x20>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ at25,byte-len = <0x80000>;
+ at25,addr-mode = <4>;
+ at25,page-size = <256>;
+
+ compatible = "atmel,at25";
+ reg = <0>;
+ spi-max-frequency = <1000000>;
+ };
+ };
+
+ cfam3_spi2: spi@40 {
+ reg = <0x40>;
+ compatible = "ibm,fsi2spi-restricted";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ at25,byte-len = <0x80000>;
+ at25,addr-mode = <4>;
+ at25,page-size = <256>;
+
+ compatible = "atmel,at25";
+ reg = <0>;
+ spi-max-frequency = <1000000>;
+ };
+ };
+
+ cfam3_spi3: spi@60 {
+ reg = <0x60>;
+ compatible = "ibm,fsi2spi-restricted";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ at25,byte-len = <0x80000>;
+ at25,addr-mode = <4>;
+ at25,page-size = <256>;
+
+ compatible = "atmel,at25";
+ reg = <0>;
+ spi-max-frequency = <1000000>;
+ };
+ };
+ };
+
+ sbefifo@2400 {
+ compatible = "ibm,p9-sbefifo";
+ reg = <0x2400 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi_occ3: occ {
+ compatible = "ibm,p10-occ";
+ };
+ };
+
+ fsi_hub3: hub@3400 {
+ compatible = "fsi-master-hub";
+ reg = <0x3400 0x400>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ no-scan-on-init;
+ };
+ };
+};
+
+/* Legacy OCC numbering (to get rid of when userspace is fixed) */
+&fsi_occ0 {
+ reg = <1>;
+};
+
+&fsi_occ1 {
+ reg = <2>;
+};
+
+&fsi_occ2 {
+ reg = <3>;
+};
+
+&fsi_occ3 {
+ reg = <4>;
+};
+
+&ibt {
+ status = "okay";
+};
+
+&vuart1 {
+ status = "okay";
+};
+
+&vuart2 {
+ status = "okay";
+};
+
+&lpc_ctrl {
+ status = "okay";
+ memory-region = <&flash_memory>;
+};
+
+&kcs4 {
+ compatible = "openbmc,mctp-lpc";
+ status = "okay";
+};
+
+&mac2 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_rmii3_default>;
+ clocks = <&syscon ASPEED_CLK_GATE_MAC3CLK>,
+ <&syscon ASPEED_CLK_MAC3RCLK>;
+ clock-names = "MACCLK", "RCLK";
+ use-ncsi;
+};
+
+&mac3 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_rmii4_default>;
+ clocks = <&syscon ASPEED_CLK_GATE_MAC4CLK>,
+ <&syscon ASPEED_CLK_MAC4RCLK>;
+ clock-names = "MACCLK", "RCLK";
+ use-ncsi;
+};
+
+&xdma {
+ status = "okay";
+ memory-region = <&vga_memory>;
+};
diff --git a/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts b/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts
index a4b77aec5424..6c9804d2f3b4 100644
--- a/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts
@@ -195,6 +195,7 @@
&emmc {
status = "okay";
+ clk-phase-mmc-hs200 = <180>, <180>;
};
&fsim0 {
@@ -579,7 +580,7 @@
gpio-controller;
#gpio-cells = <2>;
- smbus0 {
+ smbus0-hog {
gpio-hog;
gpios = <4 GPIO_ACTIVE_HIGH>;
output-high;
diff --git a/arch/arm/boot/dts/aspeed-bmc-inspur-fp5280g2.dts b/arch/arm/boot/dts/aspeed-bmc-inspur-fp5280g2.dts
index 62a3ab4c1866..07593897fc9a 100644
--- a/arch/arm/boot/dts/aspeed-bmc-inspur-fp5280g2.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-inspur-fp5280g2.dts
@@ -204,6 +204,39 @@
};
+&gpio {
+ gpio-line-names =
+ /*A0-A7*/ "","","","","","","","",
+ /*B0-B7*/ "","","front-psu","checkstop","cfam-reset","","","init-ok",
+ /*C0-C7*/ "","","","","","","","",
+ /*D0-D7*/ "","","","","","","","",
+ /*E0-E7*/ "","","","","","","","",
+ /*F0-F7*/ "ps0-presence","ps1-presence","","","front-memory","","","",
+ /*G0-G7*/ "","","","","","","","",
+ /*H0-H7*/ "","","","","front-fan","","","",
+ /*I0-I7*/ "front-syshealth","front-syshot","mux-gpios","enable-gpios","","","","",
+ /*J0-J7*/ "","","","","","","","",
+ /*K0-K7*/ "","","","","","","","",
+ /*L0-L7*/ "","","","","","","","",
+ /*M0-M7*/ "","","","","","","","",
+ /*N0-N7*/ "","","","","","","","",
+ /*O0-O7*/ "","","","","","","","",
+ /*P0-P7*/ "","","","","","","","",
+ /*Q0-Q7*/ "","","","","","","","",
+ /*R0-R7*/ "","power","trans-gpios","","","","","",
+ /*S0-S7*/ "","","","","","","","",
+ /*T0-T7*/ "","","","","","","","",
+ /*U0-U7*/ "","","","","","","","",
+ /*V0-V7*/ "","","","","","","","",
+ /*W0-W7*/ "","","","","","","","",
+ /*X0-X7*/ "","","","","","","","",
+ /*Y0-Y7*/ "","","","","","","","",
+ /*Z0-Z7*/ "","","","","","","","identify",
+ /*AA0-AA7*/ "clock-gpios","","data-gpios","","","","","",
+ /*AB0-AB7*/ "","","","","","","","",
+ /*AC0-AC7*/ "","","","","","","","";
+};
+
&fmc {
status = "okay";
@@ -756,12 +789,12 @@
status = "okay";
power-supply@58 {
- compatible = "pmbus";
+ compatible = "inspur,ipsps1";
reg = <0x58>;
};
power-supply@59 {
- compatible = "pmbus";
+ compatible = "inspur,ipsps1";
reg = <0x59>;
};
};
diff --git a/arch/arm/boot/dts/aspeed-bmc-opp-mihawk.dts b/arch/arm/boot/dts/aspeed-bmc-opp-mihawk.dts
index cb85168f6761..577c211c469e 100644
--- a/arch/arm/boot/dts/aspeed-bmc-opp-mihawk.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-opp-mihawk.dts
@@ -827,7 +827,7 @@
gpio-controller;
#gpio-cells = <2>;
- smbus0 {
+ smbus0-hog {
gpio-hog;
gpios = <4 GPIO_ACTIVE_HIGH>;
output-high;
@@ -852,7 +852,7 @@
gpio-controller;
#gpio-cells = <2>;
- smbus1 {
+ smbus1-hog {
gpio-hog;
gpios = <4 GPIO_ACTIVE_HIGH>;
output-high;
@@ -900,7 +900,7 @@
gpio-controller;
#gpio-cells = <2>;
- smbus2 {
+ smbus2-hog {
gpio-hog;
gpios = <4 GPIO_ACTIVE_HIGH>;
output-high;
@@ -925,7 +925,7 @@
gpio-controller;
#gpio-cells = <2>;
- smbus3 {
+ smbus3-hog {
gpio-hog;
gpios = <4 GPIO_ACTIVE_HIGH>;
output-high;
@@ -992,7 +992,7 @@
gpio-controller;
#gpio-cells = <2>;
- smbus4 {
+ smbus4-hog {
gpio-hog;
gpios = <4 GPIO_ACTIVE_HIGH>;
output-high;
@@ -1017,7 +1017,7 @@
gpio-controller;
#gpio-cells = <2>;
- smbus5 {
+ smbus5-hog {
gpio-hog;
gpios = <4 GPIO_ACTIVE_HIGH>;
output-high;
@@ -1065,7 +1065,7 @@
gpio-controller;
#gpio-cells = <2>;
- smbus6 {
+ smbus6-hog {
gpio-hog;
gpios = <4 GPIO_ACTIVE_HIGH>;
output-high;
@@ -1090,7 +1090,7 @@
gpio-controller;
#gpio-cells = <2>;
- smbus7 {
+ smbus7-hog {
gpio-hog;
gpios = <4 GPIO_ACTIVE_HIGH>;
output-high;
diff --git a/arch/arm/boot/dts/aspeed-bmc-opp-mowgli.dts b/arch/arm/boot/dts/aspeed-bmc-opp-mowgli.dts
index b648e468e9db..8503152faaf0 100644
--- a/arch/arm/boot/dts/aspeed-bmc-opp-mowgli.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-opp-mowgli.dts
@@ -582,6 +582,11 @@
/* TMP275A */
/* TMP275A */
+ rtc@32 {
+ compatible = "epson,rx8900";
+ reg = <0x32>;
+ };
+
tmp275@48 {
compatible = "ti,tmp275";
reg = <0x48>;
diff --git a/arch/arm/boot/dts/aspeed-bmc-portwell-neptune.dts b/arch/arm/boot/dts/aspeed-bmc-portwell-neptune.dts
index 4a1ca8f5b6a7..03c161493ffc 100644
--- a/arch/arm/boot/dts/aspeed-bmc-portwell-neptune.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-portwell-neptune.dts
@@ -121,6 +121,8 @@
pca9555@27 {
compatible = "nxp,pca9555";
reg = <0x27>;
+ gpio-controller;
+ #gpio-cells = <2>;
};
};
diff --git a/arch/arm/boot/dts/aspeed-bmc-supermicro-x11spi.dts b/arch/arm/boot/dts/aspeed-bmc-supermicro-x11spi.dts
new file mode 100644
index 000000000000..bc16ad2b5c80
--- /dev/null
+++ b/arch/arm/boot/dts/aspeed-bmc-supermicro-x11spi.dts
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2020 Super Micro Computer, Inc
+
+/dts-v1/;
+
+#include "aspeed-g5.dtsi"
+
+/ {
+ model = "X11SPI BMC";
+ compatible = "supermicro,x11spi-bmc", "aspeed,ast2500";
+
+ chosen {
+ stdout-path = &uart5;
+ bootargs = "earlyprintk";
+ };
+
+ memory@80000000 {
+ reg = <0x80000000 0x20000000>;
+ };
+
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ vga_memory: framebuffer@7f000000 {
+ no-map;
+ reg = <0x7f000000 0x01000000>;
+ };
+ };
+
+ iio-hwmon {
+ compatible = "iio-hwmon";
+ io-channels = <&adc 0>, <&adc 1>, <&adc 2>, <&adc 3>,
+ <&adc 4>, <&adc 5>, <&adc 6>, <&adc 7>,
+ <&adc 8>, <&adc 9>, <&adc 10>, <&adc 11>,
+ <&adc 12>, <&adc 13>, <&adc 14>, <&adc 15>;
+ };
+
+};
+
+&gpio {
+ status = "okay";
+};
+
+&fmc {
+ status = "okay";
+ flash@0 {
+ status = "okay";
+ m25p,fast-read;
+ label = "bmc";
+#include "openbmc-flash-layout.dtsi"
+ };
+};
+
+&spi1 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_spi1_default>;
+
+ flash@0 {
+ status = "okay";
+ m25p,fast-read;
+ label = "pnor";
+ };
+};
+
+&uart5 {
+ status = "okay";
+};
+
+&mac0 {
+ status = "okay";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_rmii1_default>;
+ clocks = <&syscon ASPEED_CLK_GATE_MAC1CLK>,
+ <&syscon ASPEED_CLK_MAC1RCLK>;
+ clock-names = "MACCLK", "RCLK";
+ use-ncsi;
+};
+
+&mac1 {
+ status = "okay";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_rgmii2_default &pinctrl_mdio2_default>;
+};
+
+&i2c1 {
+ status = "okay";
+};
+
+&i2c2 {
+ status = "okay";
+};
+
+&i2c3 {
+ status = "okay";
+};
+
+&i2c4 {
+ status = "okay";
+};
+
+&i2c5 {
+ status = "okay";
+};
+
+&i2c6 {
+ status = "okay";
+};
+
+&i2c7 {
+ status = "okay";
+};
+
+&i2c13 {
+ status = "okay";
+};
+
+&gfx {
+ status = "okay";
+};
+
+&pinctrl {
+ aspeed,external-nodes = <&gfx &lhc>;
+};
+
+&pwm_tacho {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pwm0_default &pinctrl_pwm1_default
+ &pinctrl_pwm2_default &pinctrl_pwm3_default
+ &pinctrl_pwm4_default &pinctrl_pwm5_default
+ &pinctrl_pwm6_default &pinctrl_pwm7_default>;
+};
diff --git a/arch/arm/boot/dts/aspeed-g4.dtsi b/arch/arm/boot/dts/aspeed-g4.dtsi
index b3dafbc8caca..e7a45ba18fc9 100644
--- a/arch/arm/boot/dts/aspeed-g4.dtsi
+++ b/arch/arm/boot/dts/aspeed-g4.dtsi
@@ -375,6 +375,7 @@
compatible = "aspeed,ast2400-lpc-snoop";
reg = <0x10 0x8>;
interrupts = <8>;
+ clocks = <&syscon ASPEED_CLK_GATE_LCLK>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/aspeed-g5.dtsi b/arch/arm/boot/dts/aspeed-g5.dtsi
index 5bc0de0f3365..21930521a986 100644
--- a/arch/arm/boot/dts/aspeed-g5.dtsi
+++ b/arch/arm/boot/dts/aspeed-g5.dtsi
@@ -497,6 +497,7 @@
compatible = "aspeed,ast2500-lpc-snoop";
reg = <0x10 0x8>;
interrupts = <8>;
+ clocks = <&syscon ASPEED_CLK_GATE_LCLK>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/aspeed-g6.dtsi b/arch/arm/boot/dts/aspeed-g6.dtsi
index 810b0676ab03..3ee470c2b7b5 100644
--- a/arch/arm/boot/dts/aspeed-g6.dtsi
+++ b/arch/arm/boot/dts/aspeed-g6.dtsi
@@ -524,6 +524,7 @@
compatible = "aspeed,ast2600-lpc-snoop";
reg = <0x0 0x80>;
interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&syscon ASPEED_CLK_GATE_LCLK>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/at91-kizbox3_common.dtsi b/arch/arm/boot/dts/at91-kizbox3_common.dtsi
index 9ce513dd514b..c4b3750495da 100644
--- a/arch/arm/boot/dts/at91-kizbox3_common.dtsi
+++ b/arch/arm/boot/dts/at91-kizbox3_common.dtsi
@@ -341,7 +341,6 @@
input@0 {
reg = <0>;
- atmel,wakeup-type = "low";
};
};
diff --git a/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts b/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts
index 0e159f879c15..84e1180f3e89 100644
--- a/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts
+++ b/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts
@@ -142,7 +142,6 @@
input@0 {
reg = <0>;
- atmel,wakeup-type = "low";
};
};
diff --git a/arch/arm/boot/dts/at91-sama5d27_wlsom1.dtsi b/arch/arm/boot/dts/at91-sama5d27_wlsom1.dtsi
index a06700e53e4c..025a78310e3a 100644
--- a/arch/arm/boot/dts/at91-sama5d27_wlsom1.dtsi
+++ b/arch/arm/boot/dts/at91-sama5d27_wlsom1.dtsi
@@ -43,14 +43,20 @@
&i2c0 {
pinctrl-0 = <&pinctrl_i2c0_default>;
- pinctrl-names = "default";
+ pinctrl-1 = <&pinctrl_i2c0_gpio>;
+ pinctrl-names = "default", "gpio";
+ sda-gpios = <&pioA PIN_PD21 GPIO_ACTIVE_HIGH>;
+ scl-gpios = <&pioA PIN_PD22 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
status = "okay";
};
&i2c1 {
dmas = <0>, <0>;
- pinctrl-names = "default";
+ pinctrl-names = "default", "gpio";
pinctrl-0 = <&pinctrl_i2c1_default>;
+ pinctrl-1 = <&pinctrl_i2c1_gpio>;
+ sda-gpios = <&pioA PIN_PD19 GPIO_ACTIVE_HIGH>;
+ scl-gpios = <&pioA PIN_PD20 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
status = "okay";
mcp16502@5b {
@@ -258,12 +264,24 @@
bias-disable;
};
+ pinctrl_i2c0_gpio: i2c0_gpio {
+ pinmux = <PIN_PD21__GPIO>,
+ <PIN_PD22__GPIO>;
+ bias-disable;
+ };
+
pinctrl_i2c1_default: i2c1_default {
pinmux = <PIN_PD19__TWD1>,
<PIN_PD20__TWCK1>;
bias-disable;
};
+ pinctrl_i2c1_gpio: i2c1_gpio {
+ pinmux = <PIN_PD19__GPIO>,
+ <PIN_PD20__GPIO>;
+ bias-disable;
+ };
+
pinctrl_macb0_default: macb0_default {
pinmux = <PIN_PB14__GTXCK>,
<PIN_PB15__GTXEN>,
diff --git a/arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts b/arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts
index 6b38fa3f5568..180a08765cb8 100644
--- a/arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts
+++ b/arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts
@@ -209,7 +209,6 @@
input@0 {
reg = <0>;
- atmel,wakeup-type = "low";
};
};
diff --git a/arch/arm/boot/dts/at91-sama5d2_icp.dts b/arch/arm/boot/dts/at91-sama5d2_icp.dts
index 6783cf16ff81..46722a163184 100644
--- a/arch/arm/boot/dts/at91-sama5d2_icp.dts
+++ b/arch/arm/boot/dts/at91-sama5d2_icp.dts
@@ -697,7 +697,6 @@
input@0 {
reg = <0>;
- atmel,wakeup-type = "low";
};
};
diff --git a/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts b/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
index c894c7c788a9..8de57d164acd 100644
--- a/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
+++ b/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
@@ -206,7 +206,6 @@
input@0 {
reg = <0>;
- atmel,wakeup-type = "low";
};
};
diff --git a/arch/arm/boot/dts/at91-sama5d2_xplained.dts b/arch/arm/boot/dts/at91-sama5d2_xplained.dts
index 058fae1b4a76..4e7cf21f124c 100644
--- a/arch/arm/boot/dts/at91-sama5d2_xplained.dts
+++ b/arch/arm/boot/dts/at91-sama5d2_xplained.dts
@@ -351,7 +351,6 @@
input@0 {
reg = <0>;
- atmel,wakeup-type = "low";
};
};
diff --git a/arch/arm/boot/dts/atlas6-evb.dts b/arch/arm/boot/dts/atlas6-evb.dts
deleted file mode 100644
index 89e430392f26..000000000000
--- a/arch/arm/boot/dts/atlas6-evb.dts
+++ /dev/null
@@ -1,78 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * DTS file for CSR SiRFatlas6 Evaluation Board
- *
- * Copyright (c) 2012 Cambridge Silicon Radio Limited, a CSR plc group company.
- */
-
-/dts-v1/;
-
-/include/ "atlas6.dtsi"
-
-/ {
- model = "CSR SiRFatlas6 Evaluation Board";
- compatible = "sirf,atlas6-cb", "sirf,atlas6";
-
- memory {
- device_type = "memory";
- reg = <0x00000000 0x20000000>;
- };
-
- axi {
- peri-iobg {
- uart@b0060000 {
- pinctrl-names = "default";
- pinctrl-0 = <&uart1_pins_a>;
- };
- spi@b00d0000 {
- status = "okay";
- pinctrl-names = "default";
- pinctrl-0 = <&spi0_pins_a>;
- spi@0 {
- compatible = "spidev";
- reg = <0>;
- spi-max-frequency = <1000000>;
- };
- };
- spi@b0170000 {
- pinctrl-names = "default";
- pinctrl-0 = <&spi1_pins_a>;
- };
- i2c0: i2c@b00e0000 {
- status = "okay";
- pinctrl-names = "default";
- pinctrl-0 = <&i2c0_pins_a>;
- lcd@40 {
- compatible = "sirf,lcd";
- reg = <0x40>;
- };
- };
-
- };
- disp-iobg {
- lcd@90010000 {
- status = "okay";
- pinctrl-names = "default";
- pinctrl-0 = <&lcd_24pins_a>;
- };
- };
- };
- display: display@0 {
- panels {
- panel0: panel@0 {
- panel-name = "Innolux TFT";
- hactive = <800>;
- vactive = <480>;
- left_margin = <20>;
- right_margin = <234>;
- upper_margin = <3>;
- lower_margin = <41>;
- hsync_len = <3>;
- vsync_len = <2>;
- pixclock = <33264000>;
- sync = <3>;
- timing = <0x88>;
- };
- };
- };
-};
diff --git a/arch/arm/boot/dts/atlas6.dtsi b/arch/arm/boot/dts/atlas6.dtsi
deleted file mode 100644
index 8ac5d1524a43..000000000000
--- a/arch/arm/boot/dts/atlas6.dtsi
+++ /dev/null
@@ -1,800 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * DTS file for CSR SiRFatlas6 SoC
- *
- * Copyright (c) 2012 Cambridge Silicon Radio Limited, a CSR plc group company.
- */
-
-/ {
- compatible = "sirf,atlas6";
- #address-cells = <1>;
- #size-cells = <1>;
- interrupt-parent = <&intc>;
-
- cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- cpu@0 {
- reg = <0x0>;
- d-cache-line-size = <32>;
- i-cache-line-size = <32>;
- d-cache-size = <32768>;
- i-cache-size = <32768>;
- /* from bootloader */
- timebase-frequency = <0>;
- bus-frequency = <0>;
- clock-frequency = <0>;
- clocks = <&clks 12>;
- operating-points = <
- /* kHz uV */
- 200000 1025000
- 400000 1025000
- 600000 1050000
- 800000 1100000
- >;
- clock-latency = <150000>;
- };
- };
-
- arm-pmu {
- compatible = "arm,cortex-a9-pmu";
- interrupts = <29>;
- };
-
- axi {
- compatible = "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x40000000 0x40000000 0x80000000>;
-
- intc: interrupt-controller@80020000 {
- #interrupt-cells = <1>;
- interrupt-controller;
- compatible = "sirf,prima2-intc";
- reg = <0x80020000 0x1000>;
- };
-
- sys-iobg {
- compatible = "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x88000000 0x88000000 0x40000>;
-
- clks: clock-controller@88000000 {
- compatible = "sirf,atlas6-clkc";
- reg = <0x88000000 0x1000>;
- interrupts = <3>;
- #clock-cells = <1>;
- };
-
- rstc: reset-controller@88010000 {
- compatible = "sirf,prima2-rstc";
- reg = <0x88010000 0x1000>;
- #reset-cells = <1>;
- };
-
- rsc-controller@88020000 {
- compatible = "sirf,prima2-rsc";
- reg = <0x88020000 0x1000>;
- };
-
- cphifbg@88030000 {
- compatible = "sirf,prima2-cphifbg";
- reg = <0x88030000 0x1000>;
- clocks = <&clks 42>;
- };
- };
-
- mem-iobg {
- compatible = "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x90000000 0x90000000 0x10000>;
-
- memory-controller@90000000 {
- compatible = "sirf,prima2-memc";
- reg = <0x90000000 0x2000>;
- interrupts = <27>;
- clocks = <&clks 5>;
- };
-
- memc-monitor {
- compatible = "sirf,prima2-memcmon";
- reg = <0x90002000 0x200>;
- interrupts = <4>;
- clocks = <&clks 32>;
- };
- };
-
- disp-iobg {
- compatible = "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x90010000 0x90010000 0x30000>;
-
- lcd@90010000 {
- compatible = "sirf,prima2-lcd";
- reg = <0x90010000 0x20000>;
- interrupts = <30>;
- clocks = <&clks 34>;
- display=<&display>;
- /* later transfer to pwm */
- bl-gpio = <&gpio 7 0>;
- default-panel = <&panel0>;
- };
-
- vpp@90020000 {
- compatible = "sirf,prima2-vpp";
- reg = <0x90020000 0x10000>;
- interrupts = <31>;
- clocks = <&clks 35>;
- resets = <&rstc 6>;
- };
- };
-
- graphics-iobg {
- compatible = "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x98000000 0x98000000 0x8000000>;
-
- graphics@98000000 {
- compatible = "powervr,sgx510";
- reg = <0x98000000 0x8000000>;
- interrupts = <6>;
- clocks = <&clks 32>;
- };
- };
-
- graphics2d-iobg {
- compatible = "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0xa0000000 0xa0000000 0x8000000>;
-
- ble@a0000000 {
- compatible = "sirf,atlas6-ble";
- reg = <0xa0000000 0x2000>;
- interrupts = <5>;
- clocks = <&clks 33>;
- };
- };
-
- dsp-iobg {
- compatible = "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0xa8000000 0xa8000000 0x2000000>;
-
- dspif@a8000000 {
- compatible = "sirf,prima2-dspif";
- reg = <0xa8000000 0x10000>;
- interrupts = <9>;
- resets = <&rstc 1>;
- };
-
- gps@a8010000 {
- compatible = "sirf,prima2-gps";
- reg = <0xa8010000 0x10000>;
- interrupts = <7>;
- clocks = <&clks 9>;
- resets = <&rstc 2>;
- };
-
- dsp@a9000000 {
- compatible = "sirf,prima2-dsp";
- reg = <0xa9000000 0x1000000>;
- interrupts = <8>;
- clocks = <&clks 8>;
- resets = <&rstc 0>;
- };
- };
-
- peri-iobg {
- compatible = "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0xb0000000 0xb0000000 0x180000>,
- <0x56000000 0x56000000 0x1b00000>;
-
- timer@b0020000 {
- compatible = "sirf,prima2-tick";
- reg = <0xb0020000 0x1000>;
- interrupts = <0>;
- clocks = <&clks 11>;
- };
-
- nand@b0030000 {
- compatible = "sirf,prima2-nand";
- reg = <0xb0030000 0x10000>;
- interrupts = <41>;
- clocks = <&clks 26>;
- };
-
- audio@b0040000 {
- compatible = "sirf,prima2-audio";
- reg = <0xb0040000 0x10000>;
- interrupts = <35>;
- clocks = <&clks 27>;
- };
-
- uart0: uart@b0050000 {
- cell-index = <0>;
- compatible = "sirf,prima2-uart";
- reg = <0xb0050000 0x1000>;
- interrupts = <17>;
- fifosize = <128>;
- clocks = <&clks 13>;
- dmas = <&dmac1 5>, <&dmac0 2>;
- dma-names = "rx", "tx";
- };
-
- uart1: uart@b0060000 {
- cell-index = <1>;
- compatible = "sirf,prima2-uart";
- reg = <0xb0060000 0x1000>;
- interrupts = <18>;
- fifosize = <32>;
- clocks = <&clks 14>;
- dma-names = "no-rx", "no-tx";
- };
-
- uart2: uart@b0070000 {
- cell-index = <2>;
- compatible = "sirf,prima2-uart";
- reg = <0xb0070000 0x1000>;
- interrupts = <19>;
- fifosize = <128>;
- clocks = <&clks 15>;
- dmas = <&dmac0 6>, <&dmac0 7>;
- dma-names = "rx", "tx";
- };
-
- usp0: usp@b0080000 {
- cell-index = <0>;
- compatible = "sirf,prima2-usp";
- reg = <0xb0080000 0x10000>;
- interrupts = <20>;
- fifosize = <128>;
- clocks = <&clks 28>;
- dmas = <&dmac1 1>, <&dmac1 2>;
- dma-names = "rx", "tx";
- };
-
- usp1: usp@b0090000 {
- cell-index = <1>;
- compatible = "sirf,prima2-usp";
- reg = <0xb0090000 0x10000>;
- interrupts = <21>;
- fifosize = <128>;
- clocks = <&clks 29>;
- dmas = <&dmac0 14>, <&dmac0 15>;
- dma-names = "rx", "tx";
- };
-
- dmac0: dma-controller@b00b0000 {
- cell-index = <0>;
- compatible = "sirf,prima2-dmac";
- reg = <0xb00b0000 0x10000>;
- interrupts = <12>;
- clocks = <&clks 24>;
- #dma-cells = <1>;
- };
-
- dmac1: dma-controller@b0160000 {
- cell-index = <1>;
- compatible = "sirf,prima2-dmac";
- reg = <0xb0160000 0x10000>;
- interrupts = <13>;
- clocks = <&clks 25>;
- #dma-cells = <1>;
- };
-
- vip@b00C0000 {
- compatible = "sirf,prima2-vip";
- reg = <0xb00C0000 0x10000>;
- clocks = <&clks 31>;
- interrupts = <14>;
- sirf,vip-dma-rx-channel = <16>;
- };
-
- spi0: spi@b00d0000 {
- cell-index = <0>;
- compatible = "sirf,prima2-spi";
- reg = <0xb00d0000 0x10000>;
- interrupts = <15>;
- sirf,spi-num-chipselects = <1>;
- dmas = <&dmac1 9>,
- <&dmac1 4>;
- dma-names = "rx", "tx";
- #address-cells = <1>;
- #size-cells = <0>;
- clocks = <&clks 19>;
- resets = <&rstc 26>;
- status = "disabled";
- };
-
- spi1: spi@b0170000 {
- cell-index = <1>;
- compatible = "sirf,prima2-spi";
- reg = <0xb0170000 0x10000>;
- interrupts = <16>;
- sirf,spi-num-chipselects = <1>;
- dmas = <&dmac0 12>,
- <&dmac0 13>;
- dma-names = "rx", "tx";
- #address-cells = <1>;
- #size-cells = <0>;
- clocks = <&clks 20>;
- resets = <&rstc 27>;
- status = "disabled";
- };
-
- i2c0: i2c@b00e0000 {
- cell-index = <0>;
- compatible = "sirf,prima2-i2c";
- reg = <0xb00e0000 0x10000>;
- interrupts = <24>;
- #address-cells = <1>;
- #size-cells = <0>;
- clocks = <&clks 17>;
- };
-
- i2c1: i2c@b00f0000 {
- cell-index = <1>;
- compatible = "sirf,prima2-i2c";
- reg = <0xb00f0000 0x10000>;
- interrupts = <25>;
- #address-cells = <1>;
- #size-cells = <0>;
- clocks = <&clks 18>;
- };
-
- tsc@b0110000 {
- compatible = "sirf,prima2-tsc";
- reg = <0xb0110000 0x10000>;
- interrupts = <33>;
- clocks = <&clks 16>;
- };
-
- gpio: pinctrl@b0120000 {
- #gpio-cells = <2>;
- #interrupt-cells = <2>;
- compatible = "sirf,atlas6-pinctrl";
- reg = <0xb0120000 0x10000>;
- interrupts = <43 44 45 46 47>;
- gpio-controller;
- interrupt-controller;
-
- lcd_16pins_a: lcd0@0 {
- lcd {
- sirf,pins = "lcd_16bitsgrp";
- sirf,function = "lcd_16bits";
- };
- };
- lcd_18pins_a: lcd0@1 {
- lcd {
- sirf,pins = "lcd_18bitsgrp";
- sirf,function = "lcd_18bits";
- };
- };
- lcd_24pins_a: lcd0@2 {
- lcd {
- sirf,pins = "lcd_24bitsgrp";
- sirf,function = "lcd_24bits";
- };
- };
- lcdrom_pins_a: lcdrom0@0 {
- lcd {
- sirf,pins = "lcdromgrp";
- sirf,function = "lcdrom";
- };
- };
- uart0_pins_a: uart0@0 {
- uart {
- sirf,pins = "uart0grp";
- sirf,function = "uart0";
- };
- };
- uart0_noflow_pins_a: uart0@1 {
- uart {
- sirf,pins = "uart0_nostreamctrlgrp";
- sirf,function = "uart0_nostreamctrl";
- };
- };
- uart1_pins_a: uart1@0 {
- uart {
- sirf,pins = "uart1grp";
- sirf,function = "uart1";
- };
- };
- uart2_pins_a: uart2@0 {
- uart {
- sirf,pins = "uart2grp";
- sirf,function = "uart2";
- };
- };
- uart2_noflow_pins_a: uart2@1 {
- uart {
- sirf,pins = "uart2_nostreamctrlgrp";
- sirf,function = "uart2_nostreamctrl";
- };
- };
- spi0_pins_a: spi0@0 {
- spi {
- sirf,pins = "spi0grp";
- sirf,function = "spi0";
- };
- };
- spi1_pins_a: spi1@0 {
- spi {
- sirf,pins = "spi1grp";
- sirf,function = "spi1";
- };
- };
- i2c0_pins_a: i2c0@0 {
- i2c {
- sirf,pins = "i2c0grp";
- sirf,function = "i2c0";
- };
- };
- i2c1_pins_a: i2c1@0 {
- i2c {
- sirf,pins = "i2c1grp";
- sirf,function = "i2c1";
- };
- };
- pwm0_pins_a: pwm0@0 {
- pwm {
- sirf,pins = "pwm0grp";
- sirf,function = "pwm0";
- };
- };
- pwm1_pins_a: pwm1@0 {
- pwm {
- sirf,pins = "pwm1grp";
- sirf,function = "pwm1";
- };
- };
- pwm2_pins_a: pwm2@0 {
- pwm {
- sirf,pins = "pwm2grp";
- sirf,function = "pwm2";
- };
- };
- pwm3_pins_a: pwm3@0 {
- pwm {
- sirf,pins = "pwm3grp";
- sirf,function = "pwm3";
- };
- };
- pwm4_pins_a: pwm4@0 {
- pwm {
- sirf,pins = "pwm4grp";
- sirf,function = "pwm4";
- };
- };
- gps_pins_a: gps@0 {
- gps {
- sirf,pins = "gpsgrp";
- sirf,function = "gps";
- };
- };
- vip_pins_a: vip@0 {
- vip {
- sirf,pins = "vipgrp";
- sirf,function = "vip";
- };
- };
- sdmmc0_pins_a: sdmmc0@0 {
- sdmmc0 {
- sirf,pins = "sdmmc0grp";
- sirf,function = "sdmmc0";
- };
- };
- sdmmc1_pins_a: sdmmc1@0 {
- sdmmc1 {
- sirf,pins = "sdmmc1grp";
- sirf,function = "sdmmc1";
- };
- };
- sdmmc2_pins_a: sdmmc2@0 {
- sdmmc2 {
- sirf,pins = "sdmmc2grp";
- sirf,function = "sdmmc2";
- };
- };
- sdmmc2_nowp_pins_a: sdmmc2_nowp@0 {
- sdmmc2_nowp {
- sirf,pins = "sdmmc2_nowpgrp";
- sirf,function = "sdmmc2_nowp";
- };
- };
- sdmmc3_pins_a: sdmmc3@0 {
- sdmmc3 {
- sirf,pins = "sdmmc3grp";
- sirf,function = "sdmmc3";
- };
- };
- sdmmc5_pins_a: sdmmc5@0 {
- sdmmc5 {
- sirf,pins = "sdmmc5grp";
- sirf,function = "sdmmc5";
- };
- };
- i2s_mclk_pins_a: i2s_mclk@0 {
- i2s_mclk {
- sirf,pins = "i2smclkgrp";
- sirf,function = "i2s_mclk";
- };
- };
- i2s_ext_clk_input_pins_a: i2s_ext_clk_input@0 {
- i2s_ext_clk_input {
- sirf,pins = "i2s_ext_clk_inputgrp";
- sirf,function = "i2s_ext_clk_input";
- };
- };
- i2s_pins_a: i2s@0 {
- i2s {
- sirf,pins = "i2sgrp";
- sirf,function = "i2s";
- };
- };
- i2s_no_din_pins_a: i2s_no_din@0 {
- i2s_no_din {
- sirf,pins = "i2s_no_dingrp";
- sirf,function = "i2s_no_din";
- };
- };
- i2s_6chn_pins_a: i2s_6chn@0 {
- i2s_6chn {
- sirf,pins = "i2s_6chngrp";
- sirf,function = "i2s_6chn";
- };
- };
- ac97_pins_a: ac97@0 {
- ac97 {
- sirf,pins = "ac97grp";
- sirf,function = "ac97";
- };
- };
- nand_pins_a: nand@0 {
- nand {
- sirf,pins = "nandgrp";
- sirf,function = "nand";
- };
- };
- usp0_pins_a: usp0@0 {
- usp0 {
- sirf,pins = "usp0grp";
- sirf,function = "usp0";
- };
- };
- usp0_uart_nostreamctrl_pins_a: usp0@1 {
- usp0 {
- sirf,pins = "usp0_uart_nostreamctrl_grp";
- sirf,function = "usp0_uart_nostreamctrl";
- };
- };
- usp0_only_utfs_pins_a: usp0@2 {
- usp0 {
- sirf,pins = "usp0_only_utfs_grp";
- sirf,function = "usp0_only_utfs";
- };
- };
- usp0_only_urfs_pins_a: usp0@3 {
- usp0 {
- sirf,pins = "usp0_only_urfs_grp";
- sirf,function = "usp0_only_urfs";
- };
- };
- usp1_pins_a: usp1@0 {
- usp1 {
- sirf,pins = "usp1grp";
- sirf,function = "usp1";
- };
- };
- usp1_uart_nostreamctrl_pins_a: usp1@1 {
- usp1 {
- sirf,pins = "usp1_uart_nostreamctrl_grp";
- sirf,function = "usp1_uart_nostreamctrl";
- };
- };
- usb0_upli_drvbus_pins_a: usb0_upli_drvbus@0 {
- usb0_upli_drvbus {
- sirf,pins = "usb0_upli_drvbusgrp";
- sirf,function = "usb0_upli_drvbus";
- };
- };
- usb1_utmi_drvbus_pins_a: usb1_utmi_drvbus@0 {
- usb1_utmi_drvbus {
- sirf,pins = "usb1_utmi_drvbusgrp";
- sirf,function = "usb1_utmi_drvbus";
- };
- };
- usb1_dp_dn_pins_a: usb1_dp_dn@0 {
- usb1_dp_dn {
- sirf,pins = "usb1_dp_dngrp";
- sirf,function = "usb1_dp_dn";
- };
- };
- uart1_route_io_usb1_pins_a: uart1_route_io_usb1@0 {
- uart1_route_io_usb1 {
- sirf,pins = "uart1_route_io_usb1grp";
- sirf,function = "uart1_route_io_usb1";
- };
- };
- warm_rst_pins_a: warm_rst@0 {
- warm_rst {
- sirf,pins = "warm_rstgrp";
- sirf,function = "warm_rst";
- };
- };
- pulse_count_pins_a: pulse_count@0 {
- pulse_count {
- sirf,pins = "pulse_countgrp";
- sirf,function = "pulse_count";
- };
- };
- cko0_pins_a: cko0@0 {
- cko0 {
- sirf,pins = "cko0grp";
- sirf,function = "cko0";
- };
- };
- cko1_pins_a: cko1@0 {
- cko1 {
- sirf,pins = "cko1grp";
- sirf,function = "cko1";
- };
- };
- };
-
- pwm@b0130000 {
- compatible = "sirf,prima2-pwm";
- reg = <0xb0130000 0x10000>;
- clocks = <&clks 21>;
- };
-
- efusesys@b0140000 {
- compatible = "sirf,prima2-efuse";
- reg = <0xb0140000 0x10000>;
- clocks = <&clks 22>;
- };
-
- pulsec@b0150000 {
- compatible = "sirf,prima2-pulsec";
- reg = <0xb0150000 0x10000>;
- interrupts = <48>;
- clocks = <&clks 23>;
- };
-
- pci-iobg {
- compatible = "sirf,prima2-pciiobg", "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x56000000 0x56000000 0x1b00000>;
-
- sd0: sdhci@56000000 {
- cell-index = <0>;
- compatible = "sirf,prima2-sdhc";
- reg = <0x56000000 0x100000>;
- interrupts = <38>;
- bus-width = <8>;
- clocks = <&clks 36>;
- };
-
- sd1: sdhci@56100000 {
- cell-index = <1>;
- compatible = "sirf,prima2-sdhc";
- reg = <0x56100000 0x100000>;
- interrupts = <38>;
- status = "disabled";
- bus-width = <4>;
- clocks = <&clks 36>;
- };
-
- sd2: sdhci@56200000 {
- cell-index = <2>;
- compatible = "sirf,prima2-sdhc";
- reg = <0x56200000 0x100000>;
- interrupts = <23>;
- status = "disabled";
- bus-width = <4>;
- clocks = <&clks 37>;
- };
-
- sd3: sdhci@56300000 {
- cell-index = <3>;
- compatible = "sirf,prima2-sdhc";
- reg = <0x56300000 0x100000>;
- interrupts = <23>;
- status = "disabled";
- bus-width = <4>;
- clocks = <&clks 37>;
- };
-
- sd5: sdhci@56500000 {
- cell-index = <5>;
- compatible = "sirf,prima2-sdhc";
- reg = <0x56500000 0x100000>;
- interrupts = <39>;
- status = "disabled";
- bus-width = <4>;
- clocks = <&clks 38>;
- };
-
- pci-copy@57900000 {
- compatible = "sirf,prima2-pcicp";
- reg = <0x57900000 0x100000>;
- interrupts = <40>;
- };
-
- rom-interface@57a00000 {
- compatible = "sirf,prima2-romif";
- reg = <0x57a00000 0x100000>;
- };
- };
- };
-
- rtc-iobg {
- compatible = "sirf,prima2-rtciobg", "sirf-prima2-rtciobg-bus", "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- reg = <0x80030000 0x10000>;
-
- gpsrtc@1000 {
- compatible = "sirf,prima2-gpsrtc";
- reg = <0x1000 0x1000>;
- interrupts = <55 56 57>;
- };
-
- sysrtc@2000 {
- compatible = "sirf,prima2-sysrtc";
- reg = <0x2000 0x1000>;
- interrupts = <52 53 54>;
- };
-
- minigpsrtc@2000 {
- compatible = "sirf,prima2-minigpsrtc";
- reg = <0x2000 0x1000>;
- interrupts = <54>;
- };
-
- pwrc@3000 {
- compatible = "sirf,prima2-pwrc";
- reg = <0x3000 0x1000>;
- interrupts = <32>;
- };
- };
-
- uus-iobg {
- compatible = "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0xb8000000 0xb8000000 0x40000>;
-
- usb0: usb@b00e0000 {
- compatible = "chipidea,ci13611a-prima2";
- reg = <0xb8000000 0x10000>;
- interrupts = <10>;
- clocks = <&clks 40>;
- };
-
- usb1: usb@b00f0000 {
- compatible = "chipidea,ci13611a-prima2";
- reg = <0xb8010000 0x10000>;
- interrupts = <11>;
- clocks = <&clks 41>;
- };
-
- security@b00f0000 {
- compatible = "sirf,prima2-security";
- reg = <0xb8030000 0x10000>;
- interrupts = <42>;
- clocks = <&clks 7>;
- };
- };
- };
-};
diff --git a/arch/arm/boot/dts/atlas7-evb.dts b/arch/arm/boot/dts/atlas7-evb.dts
deleted file mode 100644
index e0515043d145..000000000000
--- a/arch/arm/boot/dts/atlas7-evb.dts
+++ /dev/null
@@ -1,127 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * DTS file for CSR SiRFatlas7 Evaluation Board
- *
- * Copyright (c) 2014 Cambridge Silicon Radio Limited, a CSR plc group company.
- */
-
-/dts-v1/;
-
-/include/ "atlas7.dtsi"
-
-#include <dt-bindings/input/input.h>
-#include <dt-bindings/gpio/gpio.h>
-
-/ {
- model = "CSR SiRFatlas7 Evaluation Board";
- compatible = "sirf,atlas7-cb", "sirf,atlas7";
-
- chosen {
- bootargs = "console=ttySiRF1,115200 earlyprintk";
- };
-
- memory {
- device_type = "memory";
- reg = <0x40000000 0x20000000>;
- };
-
- reserved-memory {
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
-
- vpp_reserved: vpp_mem@5e800000 {
- compatible = "sirf,reserved-memory";
- reg = <0x5e800000 0x800000>;
- };
-
- nanddisk_reserved: nanddisk@46000000 {
- reg = <0x46000000 0x200000>;
- no-map;
- };
- };
-
-
- noc {
- mediam {
- nand@17050000 {
- memory-region = <&nanddisk_reserved>;
- };
- };
-
- gnssm {
- spi1: spi@18200000 {
- status = "okay";
- spiflash: macronix@0{
- status = "okay";
- compatible = "macronix,mx25l6405d";
- reg = <0>;
- spi-max-frequency = <37500000>;
- spi-cpha;
- spi-cpol;
- #address-cells = <1>;
- #size-cells = <1>;
- partitions@0 {
- label = "myspiboot";
- reg = <0x0 0x800000>;
- };
- };
- };
- };
-
- btm {
- uart6: uart@11000000 {
- status = "okay";
- uart-has-rtscts;
- };
- };
-
- disp-iobg {
- vpp@13110000 {
- memory-region = <&vpp_reserved>;
- };
- };
-
- display0: display@0 {
- compatible = "lvds-panel";
- source = "lvds.0";
-
- bl-gpios = <&gpio_1 63 0>;
- data-lines = <24>;
-
- display-timings {
- native-mode = <&timing0>;
- timing0: timing0 {
- clock-frequency = <60000000>;
- hactive = <1024>;
- vactive = <600>;
- hfront-porch = <220>;
- hback-porch = <100>;
- hsync-len = <1>;
- vback-porch = <10>;
- vfront-porch = <25>;
- vsync-len = <1>;
- hsync-active = <0>;
- vsync-active = <0>;
- de-active = <1>;
- pixelclk-active = <1>;
- };
- };
- };
-
- gpio_keys {
- compatible = "gpio-keys";
- status = "okay";
- #address-cells = <1>;
- #size-cells = <0>;
-
- rearview_key {
- label = "rearview key";
- linux,code = <KEY_CAMERA>;
- gpios = <&gpio_1 3 GPIO_ACTIVE_LOW>;
- debounce-interval = <100>;
- };
- };
-
- };
-};
diff --git a/arch/arm/boot/dts/atlas7.dtsi b/arch/arm/boot/dts/atlas7.dtsi
deleted file mode 100644
index 99c9d9d9267f..000000000000
--- a/arch/arm/boot/dts/atlas7.dtsi
+++ /dev/null
@@ -1,1955 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * DTS file for CSR SiRFatlas7 SoC
- *
- * Copyright (c) 2014 Cambridge Silicon Radio Limited, a CSR plc group company.
- */
-
-/ {
- compatible = "sirf,atlas7";
- #address-cells = <1>;
- #size-cells = <1>;
- interrupt-parent = <&gic>;
- aliases {
- serial0 = &uart0;
- serial1 = &uart1;
- serial2 = &uart2;
- serial3 = &uart3;
- serial4 = &uart4;
- serial5 = &uart5;
- serial6 = &uart6;
- serial9 = &usp2;
- spi1 = &spi1;
- spi2 = &usp1;
- spi3 = &usp2;
- spi4 = &usp3;
- };
- cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- cpu@0 {
- device_type = "cpu";
- compatible = "arm,cortex-a7";
- reg = <0>;
- };
- cpu@1 {
- device_type = "cpu";
- compatible = "arm,cortex-a7";
- reg = <1>;
- };
- };
-
- clocks {
- xinw {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <32768>;
- clock-output-names = "xinw";
- };
- xin {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <26000000>;
- clock-output-names = "xin";
- };
- };
-
- arm-pmu {
- compatible = "arm,cortex-a7-pmu";
- interrupts = <0 29 4>, <0 82 4>;
- };
-
- noc {
- compatible = "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x10000000 0x10000000 0xc0000000>;
-
- gic: interrupt-controller@10301000 {
- compatible = "arm,cortex-a9-gic";
- interrupt-controller;
- #interrupt-cells = <3>;
- reg = <0x10301000 0x1000>,
- <0x10302000 0x0100>;
- };
-
- pmu_regulator: pmu_regulator@10E30020 {
- compatible = "sirf,atlas7-pmu-ldo";
- reg = <0x10E30020 0x4>;
- ldo: ldo {
- regulator-name = "ldo";
- };
- };
-
- atlas7_codec: atlas7_codec@10E30000 {
- #sound-dai-cells = <0>;
- compatible = "sirf,atlas7-codec";
- reg = <0x10E30000 0x400>;
- clocks = <&car 62>;
- ldo-supply = <&ldo>;
- };
-
- atlas7_iacc: atlas7_iacc@10D01000 {
- #sound-dai-cells = <0>;
- compatible = "sirf,atlas7-iacc";
- reg = <0x10D01000 0x100>;
- dmas = <&dmac3 0>, <&dmac3 7>, <&dmac3 8>,
- <&dmac3 3>, <&dmac3 9>;
- dma-names = "rx", "tx0", "tx1", "tx2", "tx3";
- clocks = <&car 62>;
- };
-
- ipc@13240000 {
- compatible = "sirf,atlas7-ipc";
- ranges = <0x13240000 0x13240000 0x00010000>;
- #address-cells = <1>;
- #size-cells = <1>;
-
- hwspinlock {
- compatible = "sirf,hwspinlock";
- reg = <0x13240000 0x00010000>;
-
- num-spinlocks = <30>;
- };
-
- ns_m3_rproc@0 {
- compatible = "sirf,ns2m30-rproc";
- reg = <0x13240000 0x00010000>;
- interrupts = <0 123 0>;
- };
-
- ns_m3_rproc@1 {
- compatible = "sirf,ns2m31-rproc";
- reg = <0x13240000 0x00010000>;
- interrupts = <0 126 0>;
- };
-
- ns_kal_rproc@0 {
- compatible = "sirf,ns2kal0-rproc";
- reg = <0x13240000 0x00010000>;
- interrupts = <0 124 0>;
- };
-
- ns_kal_rproc@1 {
- compatible = "sirf,ns2kal1-rproc";
- reg = <0x13240000 0x00010000>;
- interrupts = <0 127 0>;
- };
- };
-
- pinctrl: ioc@18880000 {
- compatible = "sirf,atlas7-ioc";
- reg = <0x18880000 0x1000>,
- <0x10E40000 0x1000>;
-
- audio_ac97_pmx: audio_ac97@0 {
- audio_ac97 {
- groups = "audio_ac97_grp";
- function = "audio_ac97";
- };
- };
-
- audio_func_dbg_pmx: audio_func_dbg@0 {
- audio_func_dbg {
- groups = "audio_func_dbg_grp";
- function = "audio_func_dbg";
- };
- };
-
- audio_i2s_pmx: audio_i2s@0 {
- audio_i2s {
- groups = "audio_i2s_grp";
- function = "audio_i2s";
- };
- };
-
- audio_i2s_2ch_pmx: audio_i2s_2ch@0 {
- audio_i2s_2ch {
- groups = "audio_i2s_2ch_grp";
- function = "audio_i2s_2ch";
- };
- };
-
- audio_i2s_extclk_pmx: audio_i2s_extclk@0 {
- audio_i2s_extclk {
- groups = "audio_i2s_extclk_grp";
- function = "audio_i2s_extclk";
- };
- };
-
- audio_uart0_pmx: audio_uart0@0 {
- audio_uart0 {
- groups = "audio_uart0_grp";
- function = "audio_uart0";
- };
- };
-
- audio_uart1_pmx: audio_uart1@0 {
- audio_uart1 {
- groups = "audio_uart1_grp";
- function = "audio_uart1";
- };
- };
-
- audio_uart2_pmx0: audio_uart2@0 {
- audio_uart2_0 {
- groups = "audio_uart2_grp0";
- function = "audio_uart2_m0";
- };
- };
-
- audio_uart2_pmx1: audio_uart2@1 {
- audio_uart2_1 {
- groups = "audio_uart2_grp1";
- function = "audio_uart2_m1";
- };
- };
-
- c_can_trnsvr_pmx: c_can_trnsvr@0 {
- c_can_trnsvr {
- groups = "c_can_trnsvr_grp";
- function = "c_can_trnsvr";
- };
- };
-
- c0_can_pmx0: c0_can@0 {
- c0_can_0 {
- groups = "c0_can_grp0";
- function = "c0_can_m0";
- };
- };
-
- c0_can_pmx1: c0_can@1 {
- c0_can_1 {
- groups = "c0_can_grp1";
- function = "c0_can_m1";
- };
- };
-
- c1_can_pmx0: c1_can@0 {
- c1_can_0 {
- groups = "c1_can_grp0";
- function = "c1_can_m0";
- };
- };
-
- c1_can_pmx1: c1_can@1 {
- c1_can_1 {
- groups = "c1_can_grp1";
- function = "c1_can_m1";
- };
- };
-
- c1_can_pmx2: c1_can@2 {
- c1_can_2 {
- groups = "c1_can_grp2";
- function = "c1_can_m2";
- };
- };
-
- ca_audio_lpc_pmx: ca_audio_lpc@0 {
- ca_audio_lpc {
- groups = "ca_audio_lpc_grp";
- function = "ca_audio_lpc";
- };
- };
-
- ca_bt_lpc_pmx: ca_bt_lpc@0 {
- ca_bt_lpc {
- groups = "ca_bt_lpc_grp";
- function = "ca_bt_lpc";
- };
- };
-
- ca_coex_pmx: ca_coex@0 {
- ca_coex {
- groups = "ca_coex_grp";
- function = "ca_coex";
- };
- };
-
- ca_curator_lpc_pmx: ca_curator_lpc@0 {
- ca_curator_lpc {
- groups = "ca_curator_lpc_grp";
- function = "ca_curator_lpc";
- };
- };
-
- ca_pcm_debug_pmx: ca_pcm_debug@0 {
- ca_pcm_debug {
- groups = "ca_pcm_debug_grp";
- function = "ca_pcm_debug";
- };
- };
-
- ca_pio_pmx: ca_pio@0 {
- ca_pio {
- groups = "ca_pio_grp";
- function = "ca_pio";
- };
- };
-
- ca_sdio_debug_pmx: ca_sdio_debug@0 {
- ca_sdio_debug {
- groups = "ca_sdio_debug_grp";
- function = "ca_sdio_debug";
- };
- };
-
- ca_spi_pmx: ca_spi@0 {
- ca_spi {
- groups = "ca_spi_grp";
- function = "ca_spi";
- };
- };
-
- ca_trb_pmx: ca_trb@0 {
- ca_trb {
- groups = "ca_trb_grp";
- function = "ca_trb";
- };
- };
-
- ca_uart_debug_pmx: ca_uart_debug@0 {
- ca_uart_debug {
- groups = "ca_uart_debug_grp";
- function = "ca_uart_debug";
- };
- };
-
- clkc_pmx0: clkc@0 {
- clkc_0 {
- groups = "clkc_grp0";
- function = "clkc_m0";
- };
- };
-
- clkc_pmx1: clkc@1 {
- clkc_1 {
- groups = "clkc_grp1";
- function = "clkc_m1";
- };
- };
-
- gn_gnss_i2c_pmx: gn_gnss_i2c@0 {
- gn_gnss_i2c {
- groups = "gn_gnss_i2c_grp";
- function = "gn_gnss_i2c";
- };
- };
-
- gn_gnss_uart_nopause_pmx: gn_gnss_uart_nopause@0 {
- gn_gnss_uart_nopause {
- groups = "gn_gnss_uart_nopause_grp";
- function = "gn_gnss_uart_nopause";
- };
- };
-
- gn_gnss_uart_pmx: gn_gnss_uart@0 {
- gn_gnss_uart {
- groups = "gn_gnss_uart_grp";
- function = "gn_gnss_uart";
- };
- };
-
- gn_trg_spi_pmx0: gn_trg_spi@0 {
- gn_trg_spi_0 {
- groups = "gn_trg_spi_grp0";
- function = "gn_trg_spi_m0";
- };
- };
-
- gn_trg_spi_pmx1: gn_trg_spi@1 {
- gn_trg_spi_1 {
- groups = "gn_trg_spi_grp1";
- function = "gn_trg_spi_m1";
- };
- };
-
- cvbs_dbg_pmx: cvbs_dbg@0 {
- cvbs_dbg {
- groups = "cvbs_dbg_grp";
- function = "cvbs_dbg";
- };
- };
-
- cvbs_dbg_test_pmx0: cvbs_dbg_test@0 {
- cvbs_dbg_test_0 {
- groups = "cvbs_dbg_test_grp0";
- function = "cvbs_dbg_test_m0";
- };
- };
-
- cvbs_dbg_test_pmx1: cvbs_dbg_test@1 {
- cvbs_dbg_test_1 {
- groups = "cvbs_dbg_test_grp1";
- function = "cvbs_dbg_test_m1";
- };
- };
-
- cvbs_dbg_test_pmx2: cvbs_dbg_test@2 {
- cvbs_dbg_test_2 {
- groups = "cvbs_dbg_test_grp2";
- function = "cvbs_dbg_test_m2";
- };
- };
-
- cvbs_dbg_test_pmx3: cvbs_dbg_test@3 {
- cvbs_dbg_test_3 {
- groups = "cvbs_dbg_test_grp3";
- function = "cvbs_dbg_test_m3";
- };
- };
-
- cvbs_dbg_test_pmx4: cvbs_dbg_test@4 {
- cvbs_dbg_test_4 {
- groups = "cvbs_dbg_test_grp4";
- function = "cvbs_dbg_test_m4";
- };
- };
-
- cvbs_dbg_test_pmx5: cvbs_dbg_test@5 {
- cvbs_dbg_test_5 {
- groups = "cvbs_dbg_test_grp5";
- function = "cvbs_dbg_test_m5";
- };
- };
-
- cvbs_dbg_test_pmx6: cvbs_dbg_test@6 {
- cvbs_dbg_test_6 {
- groups = "cvbs_dbg_test_grp6";
- function = "cvbs_dbg_test_m6";
- };
- };
-
- cvbs_dbg_test_pmx7: cvbs_dbg_test@7 {
- cvbs_dbg_test_7 {
- groups = "cvbs_dbg_test_grp7";
- function = "cvbs_dbg_test_m7";
- };
- };
-
- cvbs_dbg_test_pmx8: cvbs_dbg_test@8 {
- cvbs_dbg_test_8 {
- groups = "cvbs_dbg_test_grp8";
- function = "cvbs_dbg_test_m8";
- };
- };
-
- cvbs_dbg_test_pmx9: cvbs_dbg_test@9 {
- cvbs_dbg_test_9 {
- groups = "cvbs_dbg_test_grp9";
- function = "cvbs_dbg_test_m9";
- };
- };
-
- cvbs_dbg_test_pmx10: cvbs_dbg_test@10 {
- cvbs_dbg_test_10 {
- groups = "cvbs_dbg_test_grp10";
- function = "cvbs_dbg_test_m10";
- };
- };
-
- cvbs_dbg_test_pmx11: cvbs_dbg_test@11 {
- cvbs_dbg_test_11 {
- groups = "cvbs_dbg_test_grp11";
- function = "cvbs_dbg_test_m11";
- };
- };
-
- cvbs_dbg_test_pmx12: cvbs_dbg_test@12 {
- cvbs_dbg_test_12 {
- groups = "cvbs_dbg_test_grp12";
- function = "cvbs_dbg_test_m12";
- };
- };
-
- cvbs_dbg_test_pmx13: cvbs_dbg_test@13 {
- cvbs_dbg_test_13 {
- groups = "cvbs_dbg_test_grp13";
- function = "cvbs_dbg_test_m13";
- };
- };
-
- cvbs_dbg_test_pmx14: cvbs_dbg_test@14 {
- cvbs_dbg_test_14 {
- groups = "cvbs_dbg_test_grp14";
- function = "cvbs_dbg_test_m14";
- };
- };
-
- cvbs_dbg_test_pmx15: cvbs_dbg_test@15 {
- cvbs_dbg_test_15 {
- groups = "cvbs_dbg_test_grp15";
- function = "cvbs_dbg_test_m15";
- };
- };
-
- gn_gnss_power_pmx: gn_gnss_power@0 {
- gn_gnss_power {
- groups = "gn_gnss_power_grp";
- function = "gn_gnss_power";
- };
- };
-
- gn_gnss_sw_status_pmx: gn_gnss_sw_status@0 {
- gn_gnss_sw_status {
- groups = "gn_gnss_sw_status_grp";
- function = "gn_gnss_sw_status";
- };
- };
-
- gn_gnss_eclk_pmx: gn_gnss_eclk@0 {
- gn_gnss_eclk {
- groups = "gn_gnss_eclk_grp";
- function = "gn_gnss_eclk";
- };
- };
-
- gn_gnss_irq1_pmx0: gn_gnss_irq1@0 {
- gn_gnss_irq1_0 {
- groups = "gn_gnss_irq1_grp0";
- function = "gn_gnss_irq1_m0";
- };
- };
-
- gn_gnss_irq2_pmx0: gn_gnss_irq2@0 {
- gn_gnss_irq2_0 {
- groups = "gn_gnss_irq2_grp0";
- function = "gn_gnss_irq2_m0";
- };
- };
-
- gn_gnss_tm_pmx: gn_gnss_tm@0 {
- gn_gnss_tm {
- groups = "gn_gnss_tm_grp";
- function = "gn_gnss_tm";
- };
- };
-
- gn_gnss_tsync_pmx: gn_gnss_tsync@0 {
- gn_gnss_tsync {
- groups = "gn_gnss_tsync_grp";
- function = "gn_gnss_tsync";
- };
- };
-
- gn_io_gnsssys_sw_cfg_pmx: gn_io_gnsssys_sw_cfg@0 {
- gn_io_gnsssys_sw_cfg {
- groups = "gn_io_gnsssys_sw_cfg_grp";
- function = "gn_io_gnsssys_sw_cfg";
- };
- };
-
- gn_trg_pmx0: gn_trg@0 {
- gn_trg_0 {
- groups = "gn_trg_grp0";
- function = "gn_trg_m0";
- };
- };
-
- gn_trg_pmx1: gn_trg@1 {
- gn_trg_1 {
- groups = "gn_trg_grp1";
- function = "gn_trg_m1";
- };
- };
-
- gn_trg_shutdown_pmx0: gn_trg_shutdown@0 {
- gn_trg_shutdown_0 {
- groups = "gn_trg_shutdown_grp0";
- function = "gn_trg_shutdown_m0";
- };
- };
-
- gn_trg_shutdown_pmx1: gn_trg_shutdown@1 {
- gn_trg_shutdown_1 {
- groups = "gn_trg_shutdown_grp1";
- function = "gn_trg_shutdown_m1";
- };
- };
-
- gn_trg_shutdown_pmx2: gn_trg_shutdown@2 {
- gn_trg_shutdown_2 {
- groups = "gn_trg_shutdown_grp2";
- function = "gn_trg_shutdown_m2";
- };
- };
-
- gn_trg_shutdown_pmx3: gn_trg_shutdown@3 {
- gn_trg_shutdown_3 {
- groups = "gn_trg_shutdown_grp3";
- function = "gn_trg_shutdown_m3";
- };
- };
-
- i2c0_pmx: i2c0@0 {
- i2c0 {
- groups = "i2c0_grp";
- function = "i2c0";
- };
- };
-
- i2c1_pmx: i2c1@0 {
- i2c1 {
- groups = "i2c1_grp";
- function = "i2c1";
- };
- };
-
- jtag_pmx0: jtag@0 {
- jtag_0 {
- groups = "jtag_grp0";
- function = "jtag_m0";
- };
- };
-
- ks_kas_spi_pmx0: ks_kas_spi@0 {
- ks_kas_spi_0 {
- groups = "ks_kas_spi_grp0";
- function = "ks_kas_spi_m0";
- };
- };
-
- ld_ldd_pmx: ld_ldd@0 {
- ld_ldd {
- groups = "ld_ldd_grp";
- function = "ld_ldd";
- };
- };
-
- ld_ldd_16bit_pmx: ld_ldd_16bit@0 {
- ld_ldd_16bit {
- groups = "ld_ldd_16bit_grp";
- function = "ld_ldd_16bit";
- };
- };
-
- ld_ldd_fck_pmx: ld_ldd_fck@0 {
- ld_ldd_fck {
- groups = "ld_ldd_fck_grp";
- function = "ld_ldd_fck";
- };
- };
-
- ld_ldd_lck_pmx: ld_ldd_lck@0 {
- ld_ldd_lck {
- groups = "ld_ldd_lck_grp";
- function = "ld_ldd_lck";
- };
- };
-
- lr_lcdrom_pmx: lr_lcdrom@0 {
- lr_lcdrom {
- groups = "lr_lcdrom_grp";
- function = "lr_lcdrom";
- };
- };
-
- lvds_analog_pmx: lvds_analog@0 {
- lvds_analog {
- groups = "lvds_analog_grp";
- function = "lvds_analog";
- };
- };
-
- nd_df_pmx: nd_df@0 {
- nd_df {
- groups = "nd_df_grp";
- function = "nd_df";
- };
- };
-
- nd_df_nowp_pmx: nd_df_nowp@0 {
- nd_df_nowp {
- groups = "nd_df_nowp_grp";
- function = "nd_df_nowp";
- };
- };
-
- ps_pmx: ps@0 {
- ps {
- groups = "ps_grp";
- function = "ps";
- };
- };
-
- pwc_core_on_pmx: pwc_core_on@0 {
- pwc_core_on {
- groups = "pwc_core_on_grp";
- function = "pwc_core_on";
- };
- };
-
- pwc_ext_on_pmx: pwc_ext_on@0 {
- pwc_ext_on {
- groups = "pwc_ext_on_grp";
- function = "pwc_ext_on";
- };
- };
-
- pwc_gpio3_clk_pmx: pwc_gpio3_clk@0 {
- pwc_gpio3_clk {
- groups = "pwc_gpio3_clk_grp";
- function = "pwc_gpio3_clk";
- };
- };
-
- pwc_io_on_pmx: pwc_io_on@0 {
- pwc_io_on {
- groups = "pwc_io_on_grp";
- function = "pwc_io_on";
- };
- };
-
- pwc_lowbatt_b_pmx0: pwc_lowbatt_b@0 {
- pwc_lowbatt_b_0 {
- groups = "pwc_lowbatt_b_grp0";
- function = "pwc_lowbatt_b_m0";
- };
- };
-
- pwc_mem_on_pmx: pwc_mem_on@0 {
- pwc_mem_on {
- groups = "pwc_mem_on_grp";
- function = "pwc_mem_on";
- };
- };
-
- pwc_on_key_b_pmx0: pwc_on_key_b@0 {
- pwc_on_key_b_0 {
- groups = "pwc_on_key_b_grp0";
- function = "pwc_on_key_b_m0";
- };
- };
-
- pwc_wakeup_src0_pmx: pwc_wakeup_src0@0 {
- pwc_wakeup_src0 {
- groups = "pwc_wakeup_src0_grp";
- function = "pwc_wakeup_src0";
- };
- };
-
- pwc_wakeup_src1_pmx: pwc_wakeup_src1@0 {
- pwc_wakeup_src1 {
- groups = "pwc_wakeup_src1_grp";
- function = "pwc_wakeup_src1";
- };
- };
-
- pwc_wakeup_src2_pmx: pwc_wakeup_src2@0 {
- pwc_wakeup_src2 {
- groups = "pwc_wakeup_src2_grp";
- function = "pwc_wakeup_src2";
- };
- };
-
- pwc_wakeup_src3_pmx: pwc_wakeup_src3@0 {
- pwc_wakeup_src3 {
- groups = "pwc_wakeup_src3_grp";
- function = "pwc_wakeup_src3";
- };
- };
-
- pw_cko0_pmx0: pw_cko0@0 {
- pw_cko0_0 {
- groups = "pw_cko0_grp0";
- function = "pw_cko0_m0";
- };
- };
-
- pw_cko0_pmx1: pw_cko0@1 {
- pw_cko0_1 {
- groups = "pw_cko0_grp1";
- function = "pw_cko0_m1";
- };
- };
-
- pw_cko0_pmx2: pw_cko0@2 {
- pw_cko0_2 {
- groups = "pw_cko0_grp2";
- function = "pw_cko0_m2";
- };
- };
-
- pw_cko1_pmx0: pw_cko1@0 {
- pw_cko1_0 {
- groups = "pw_cko1_grp0";
- function = "pw_cko1_m0";
- };
- };
-
- pw_cko1_pmx1: pw_cko1@1 {
- pw_cko1_1 {
- groups = "pw_cko1_grp1";
- function = "pw_cko1_m1";
- };
- };
-
- pw_i2s01_clk_pmx0: pw_i2s01_clk@0 {
- pw_i2s01_clk_0 {
- groups = "pw_i2s01_clk_grp0";
- function = "pw_i2s01_clk_m0";
- };
- };
-
- pw_i2s01_clk_pmx1: pw_i2s01_clk@1 {
- pw_i2s01_clk_1 {
- groups = "pw_i2s01_clk_grp1";
- function = "pw_i2s01_clk_m1";
- };
- };
-
- pw_pwm0_pmx: pw_pwm0@0 {
- pw_pwm0 {
- groups = "pw_pwm0_grp";
- function = "pw_pwm0";
- };
- };
-
- pw_pwm1_pmx: pw_pwm1@0 {
- pw_pwm1 {
- groups = "pw_pwm1_grp";
- function = "pw_pwm1";
- };
- };
-
- pw_pwm2_pmx0: pw_pwm2@0 {
- pw_pwm2_0 {
- groups = "pw_pwm2_grp0";
- function = "pw_pwm2_m0";
- };
- };
-
- pw_pwm2_pmx1: pw_pwm2@1 {
- pw_pwm2_1 {
- groups = "pw_pwm2_grp1";
- function = "pw_pwm2_m1";
- };
- };
-
- pw_pwm3_pmx0: pw_pwm3@0 {
- pw_pwm3_0 {
- groups = "pw_pwm3_grp0";
- function = "pw_pwm3_m0";
- };
- };
-
- pw_pwm3_pmx1: pw_pwm3@1 {
- pw_pwm3_1 {
- groups = "pw_pwm3_grp1";
- function = "pw_pwm3_m1";
- };
- };
-
- pw_pwm_cpu_vol_pmx0: pw_pwm_cpu_vol@0 {
- pw_pwm_cpu_vol_0 {
- groups = "pw_pwm_cpu_vol_grp0";
- function = "pw_pwm_cpu_vol_m0";
- };
- };
-
- pw_pwm_cpu_vol_pmx1: pw_pwm_cpu_vol@1 {
- pw_pwm_cpu_vol_1 {
- groups = "pw_pwm_cpu_vol_grp1";
- function = "pw_pwm_cpu_vol_m1";
- };
- };
-
- pw_backlight_pmx0: pw_backlight@0 {
- pw_backlight_0 {
- groups = "pw_backlight_grp0";
- function = "pw_backlight_m0";
- };
- };
-
- pw_backlight_pmx1: pw_backlight@1 {
- pw_backlight_1 {
- groups = "pw_backlight_grp1";
- function = "pw_backlight_m1";
- };
- };
-
- rg_eth_mac_pmx: rg_eth_mac@0 {
- rg_eth_mac {
- groups = "rg_eth_mac_grp";
- function = "rg_eth_mac";
- };
- };
-
- rg_gmac_phy_intr_n_pmx: rg_gmac_phy_intr_n@0 {
- rg_gmac_phy_intr_n {
- groups = "rg_gmac_phy_intr_n_grp";
- function = "rg_gmac_phy_intr_n";
- };
- };
-
- rg_rgmii_mac_pmx: rg_rgmii_mac@0 {
- rg_rgmii_mac {
- groups = "rg_rgmii_mac_grp";
- function = "rg_rgmii_mac";
- };
- };
-
- rg_rgmii_phy_ref_clk_pmx0: rg_rgmii_phy_ref_clk@0 {
- rg_rgmii_phy_ref_clk_0 {
- groups =
- "rg_rgmii_phy_ref_clk_grp0";
- function =
- "rg_rgmii_phy_ref_clk_m0";
- };
- };
-
- rg_rgmii_phy_ref_clk_pmx1: rg_rgmii_phy_ref_clk@1 {
- rg_rgmii_phy_ref_clk_1 {
- groups =
- "rg_rgmii_phy_ref_clk_grp1";
- function =
- "rg_rgmii_phy_ref_clk_m1";
- };
- };
-
- sd0_pmx: sd0@0 {
- sd0 {
- groups = "sd0_grp";
- function = "sd0";
- };
- };
-
- sd0_4bit_pmx: sd0_4bit@0 {
- sd0_4bit {
- groups = "sd0_4bit_grp";
- function = "sd0_4bit";
- };
- };
-
- sd1_pmx: sd1@0 {
- sd1 {
- groups = "sd1_grp";
- function = "sd1";
- };
- };
-
- sd1_4bit_pmx0: sd1_4bit@0 {
- sd1_4bit_0 {
- groups = "sd1_4bit_grp0";
- function = "sd1_4bit_m0";
- };
- };
-
- sd1_4bit_pmx1: sd1_4bit@1 {
- sd1_4bit_1 {
- groups = "sd1_4bit_grp1";
- function = "sd1_4bit_m1";
- };
- };
-
- sd2_pmx0: sd2@0 {
- sd2_0 {
- groups = "sd2_grp0";
- function = "sd2_m0";
- };
- };
-
- sd2_no_cdb_pmx0: sd2_no_cdb@0 {
- sd2_no_cdb_0 {
- groups = "sd2_no_cdb_grp0";
- function = "sd2_no_cdb_m0";
- };
- };
-
- sd3_pmx: sd3@0 {
- sd3 {
- groups = "sd3_grp";
- function = "sd3";
- };
- };
-
- sd5_pmx: sd5@0 {
- sd5 {
- groups = "sd5_grp";
- function = "sd5";
- };
- };
-
- sd6_pmx0: sd6@0 {
- sd6_0 {
- groups = "sd6_grp0";
- function = "sd6_m0";
- };
- };
-
- sd6_pmx1: sd6@1 {
- sd6_1 {
- groups = "sd6_grp1";
- function = "sd6_m1";
- };
- };
-
- sp0_ext_ldo_on_pmx: sp0_ext_ldo_on@0 {
- sp0_ext_ldo_on {
- groups = "sp0_ext_ldo_on_grp";
- function = "sp0_ext_ldo_on";
- };
- };
-
- sp0_qspi_pmx: sp0_qspi@0 {
- sp0_qspi {
- groups = "sp0_qspi_grp";
- function = "sp0_qspi";
- };
- };
-
- sp1_spi_pmx: sp1_spi@0 {
- sp1_spi {
- groups = "sp1_spi_grp";
- function = "sp1_spi";
- };
- };
-
- tpiu_trace_pmx: tpiu_trace@0 {
- tpiu_trace {
- groups = "tpiu_trace_grp";
- function = "tpiu_trace";
- };
- };
-
- uart0_pmx: uart0@0 {
- uart0 {
- groups = "uart0_grp";
- function = "uart0";
- };
- };
-
- uart0_nopause_pmx: uart0_nopause@0 {
- uart0_nopause {
- groups = "uart0_nopause_grp";
- function = "uart0_nopause";
- };
- };
-
- uart1_pmx: uart1@0 {
- uart1 {
- groups = "uart1_grp";
- function = "uart1";
- };
- };
-
- uart2_pmx: uart2@0 {
- uart2 {
- groups = "uart2_grp";
- function = "uart2";
- };
- };
-
- uart3_pmx0: uart3@0 {
- uart3_0 {
- groups = "uart3_grp0";
- function = "uart3_m0";
- };
- };
-
- uart3_pmx1: uart3@1 {
- uart3_1 {
- groups = "uart3_grp1";
- function = "uart3_m1";
- };
- };
-
- uart3_pmx2: uart3@2 {
- uart3_2 {
- groups = "uart3_grp2";
- function = "uart3_m2";
- };
- };
-
- uart3_pmx3: uart3@3 {
- uart3_3 {
- groups = "uart3_grp3";
- function = "uart3_m3";
- };
- };
-
- uart3_nopause_pmx0: uart3_nopause@0 {
- uart3_nopause_0 {
- groups = "uart3_nopause_grp0";
- function = "uart3_nopause_m0";
- };
- };
-
- uart3_nopause_pmx1: uart3_nopause@1 {
- uart3_nopause_1 {
- groups = "uart3_nopause_grp1";
- function = "uart3_nopause_m1";
- };
- };
-
- uart4_pmx0: uart4@0 {
- uart4_0 {
- groups = "uart4_grp0";
- function = "uart4_m0";
- };
- };
-
- uart4_pmx1: uart4@1 {
- uart4_1 {
- groups = "uart4_grp1";
- function = "uart4_m1";
- };
- };
-
- uart4_pmx2: uart4@2 {
- uart4_2 {
- groups = "uart4_grp2";
- function = "uart4_m2";
- };
- };
-
- uart4_nopause_pmx: uart4_nopause@0 {
- uart4_nopause {
- groups = "uart4_nopause_grp";
- function = "uart4_nopause";
- };
- };
-
- usb0_drvvbus_pmx: usb0_drvvbus@0 {
- usb0_drvvbus {
- groups = "usb0_drvvbus_grp";
- function = "usb0_drvvbus";
- };
- };
-
- usb1_drvvbus_pmx: usb1_drvvbus@0 {
- usb1_drvvbus {
- groups = "usb1_drvvbus_grp";
- function = "usb1_drvvbus";
- };
- };
-
- visbus_dout_pmx: visbus_dout@0 {
- visbus_dout {
- groups = "visbus_dout_grp";
- function = "visbus_dout";
- };
- };
-
- vi_vip1_pmx: vi_vip1@0 {
- vi_vip1 {
- groups = "vi_vip1_grp";
- function = "vi_vip1";
- };
- };
-
- vi_vip1_ext_pmx: vi_vip1_ext@0 {
- vi_vip1_ext {
- groups = "vi_vip1_ext_grp";
- function = "vi_vip1_ext";
- };
- };
-
- vi_vip1_low8bit_pmx: vi_vip1_low8bit@0 {
- vi_vip1_low8bit {
- groups = "vi_vip1_low8bit_grp";
- function = "vi_vip1_low8bit";
- };
- };
-
- vi_vip1_high8bit_pmx: vi_vip1_high8bit@0 {
- vi_vip1_high8bit {
- groups = "vi_vip1_high8bit_grp";
- function = "vi_vip1_high8bit";
- };
- };
- };
-
- pmipc {
- compatible = "arteris, flexnoc", "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x13240000 0x13240000 0x00010000>;
- pmipc@0x13240000 {
- compatible = "sirf,atlas7-pmipc";
- reg = <0x13240000 0x00010000>;
- };
- };
-
- dramfw {
- compatible = "arteris, flexnoc", "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x10830000 0x10830000 0x18000>;
- dramfw@10820000 {
- compatible = "sirf,nocfw-dramfw";
- reg = <0x10830000 0x18000>;
- };
- };
-
- spramfw {
- compatible = "arteris, flexnoc", "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x10250000 0x10250000 0x3000>;
- spramfw@10820000 {
- compatible = "sirf,nocfw-spramfw";
- reg = <0x10250000 0x3000>;
- };
- };
-
- cpum {
- compatible = "arteris, flexnoc", "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x10200000 0x10200000 0x3000>;
- cpum@10200000 {
- compatible = "sirf,nocfw-cpum";
- reg = <0x10200000 0x3000>;
- };
- };
-
- cgum {
- compatible = "arteris, flexnoc", "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x18641000 0x18641000 0x3000>,
- <0x18620000 0x18620000 0x1000>,
- <0x18630000 0x18630000 0x10000>;
-
- cgum@18641000 {
- compatible = "sirf,nocfw-cgum";
- reg = <0x18641000 0x3000>;
- };
-
- car: clock-controller@18620000 {
- compatible = "sirf,atlas7-car";
- reg = <0x18620000 0x1000>;
- #clock-cells = <1>;
- #reset-cells = <1>;
- };
- pwm: pwm@18630000 {
- compatible = "sirf,prima2-pwm";
- #pwm-cells = <2>;
- reg = <0x18630000 0x10000>;
- clocks = <&car 138>, <&car 139>, <&car 237>,
- <&car 240>, <&car 140>, <&car 246>;
- clock-names = "pwmc", "sigsrc0", "sigsrc1",
- "sigsrc2", "sigsrc3", "sigsrc4";
- };
- };
-
- gnssm {
- compatible = "arteris, flexnoc", "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x18000000 0x18000000 0x0000ffff>,
- <0x18010000 0x18010000 0x1000>,
- <0x18020000 0x18020000 0x1000>,
- <0x18030000 0x18030000 0x1000>,
- <0x18040000 0x18040000 0x1000>,
- <0x18050000 0x18050000 0x1000>,
- <0x18060000 0x18060000 0x1000>,
- <0x180b0000 0x180b0000 0x4000>,
- <0x18100000 0x18100000 0x3000>,
- <0x18250000 0x18250000 0x10000>,
- <0x18200000 0x18200000 0x1000>;
-
- dmac0: dma-controller@18000000 {
- cell-index = <0>;
- compatible = "sirf,atlas7-dmac";
- reg = <0x18000000 0x1000>;
- interrupts = <0 12 0>;
- clocks = <&car 89>;
- dma-channels = <16>;
- #dma-cells = <1>;
- };
-
- gnssmfw@0x18100000 {
- compatible = "sirf,nocfw-gnssm";
- reg = <0x18100000 0x3000>;
- };
-
- uart0: uart@18010000 {
- cell-index = <0>;
- compatible = "sirf,atlas7-uart";
- reg = <0x18010000 0x1000>;
- interrupts = <0 17 0>;
- clocks = <&car 90>;
- fifosize = <128>;
- dmas = <&dmac0 3>, <&dmac0 2>;
- dma-names = "rx", "tx";
- };
-
- uart1: uart@18020000 {
- cell-index = <1>;
- compatible = "sirf,atlas7-uart";
- reg = <0x18020000 0x1000>;
- interrupts = <0 18 0>;
- clocks = <&car 88>;
- fifosize = <32>;
- };
-
- uart2: uart@18030000 {
- cell-index = <2>;
- compatible = "sirf,atlas7-uart";
- reg = <0x18030000 0x1000>;
- interrupts = <0 19 0>;
- clocks = <&car 91>;
- fifosize = <128>;
- dmas = <&dmac0 6>, <&dmac0 7>;
- dma-names = "rx", "tx";
- status = "disabled";
- };
- uart3: uart@18040000 {
- cell-index = <3>;
- compatible = "sirf,atlas7-uart";
- reg = <0x18040000 0x1000>;
- interrupts = <0 66 0>;
- clocks = <&car 92>;
- fifosize = <128>;
- dmas = <&dmac0 4>, <&dmac0 5>;
- dma-names = "rx", "tx";
- status = "disabled";
- };
- uart4: uart@18050000 {
- cell-index = <4>;
- compatible = "sirf,atlas7-uart";
- reg = <0x18050000 0x1000>;
- interrupts = <0 69 0>;
- clocks = <&car 93>;
- fifosize = <128>;
- dmas = <&dmac0 0>, <&dmac0 1>;
- dma-names = "rx", "tx";
- status = "disabled";
- };
- uart5: uart@18060000 {
- cell-index = <5>;
- compatible = "sirf,atlas7-uart";
- reg = <0x18060000 0x1000>;
- interrupts = <0 71 0>;
- clocks = <&car 94>;
- fifosize = <128>;
- dmas = <&dmac0 8>, <&dmac0 9>;
- dma-names = "rx", "tx";
- status = "disabled";
- };
- gmac: eth@180b0000 {
- compatible = "snps, dwc-eth-qos";
- reg = <0x180b0000 0x4000>;
- interrupts = <0 59 0>, <0 70 0>;
- interrupt-names = "macirq", "macpmt";
- clocks = <&car 39>, <&car 45>,
- <&car 86>, <&car 87>;
- clock-names = "gnssm_rgmii", "gnssm_gmac",
- "rgmii", "gmac";
- local-mac-address = [00 00 00 00 00 00];
- phy-mode = "rgmii";
- };
- dspub@18250000 {
- compatible = "dx,cc44p";
- reg = <0x18250000 0x10000>;
- interrupts = <0 27 0>;
- };
-
- spi1: spi@18200000 {
- compatible = "sirf,prima2-spi";
- reg = <0x18200000 0x1000>;
- interrupts = <0 16 0>;
- clocks = <&car 95>;
- #address-cells = <1>;
- #size-cells = <0>;
- dmas = <&dmac0 12>, <&dmac0 13>;
- dma-names = "rx", "tx";
- status = "disabled";
- };
- };
-
-
- gpum {
- compatible = "arteris, flexnoc", "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x13000000 0x13000000 0x3000>,
- <0x13010000 0x13010000 0x1400>,
- <0x13010800 0x13010800 0x100>,
- <0x13011000 0x13011000 0x100>;
- gpum@0x13000000 {
- compatible = "sirf,nocfw-gpum";
- reg = <0x13000000 0x3000>;
- };
- dmacsdrr: dma-controller@13010800 {
- cell-index = <5>;
- compatible = "sirf,atlas7-dmac-v2";
- reg = <0x13010800 0x100>;
- interrupts = <0 8 0>;
- clocks = <&car 127>;
- #dma-cells = <1>;
- #dma-channels = <1>;
- };
- dmacsdrw: dma-controller@13011000 {
- cell-index = <6>;
- compatible = "sirf,atlas7-dmac-v2";
- reg = <0x13011000 0x100>;
- interrupts = <0 9 0>;
- clocks = <&car 127>;
- #dma-cells = <1>;
- #dma-channels = <1>;
- };
- sdr@0x13010000 {
- compatible = "sirf,atlas7-sdr";
- reg = <0x13010000 0x1400>;
- interrupts = <0 7 0>,
- <0 8 0>,
- <0 9 0>;
- clocks = <&car 127>;
- dmas = <&dmacsdrr 0>, <&dmacsdrw 0>;
- dma-names = "tx", "rx";
- };
- };
-
- mediam {
- compatible = "arteris, flexnoc", "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x15000000 0x15000000 0x00600000>,
- <0x16000000 0x16000000 0x00200000>,
- <0x17000000 0x17000000 0x10000>,
- <0x17020000 0x17020000 0x1000>,
- <0x17030000 0x17030000 0x1000>,
- <0x17040000 0x17040000 0x1000>,
- <0x17050000 0x17050000 0x10000>,
- <0x17060000 0x17060000 0x200>,
- <0x17060200 0x17060200 0x100>,
- <0x17070000 0x17070000 0x200>,
- <0x17070200 0x17070200 0x100>,
- <0x170A0000 0x170A0000 0x3000>;
-
- multimedia@15000000 {
- compatible = "sirf,atlas7-video-codec";
- reg = <0x15000000 0x10000>;
- interrupts = <0 5 0>;
- clocks = <&car 102>;
- };
-
- mediam@170A0000 {
- compatible = "sirf,nocfw-mediam";
- reg = <0x170A0000 0x3000>;
- };
-
- gpio_0: gpio_mediam@17040000 {
- #gpio-cells = <2>;
- #interrupt-cells = <2>;
- compatible = "sirf,atlas7-gpio";
- reg = <0x17040000 0x1000>;
- interrupts = <0 13 0>, <0 14 0>;
- clocks = <&car 107>;
- clock-names = "gpio0_io";
- gpio-controller;
- interrupt-controller;
-
- gpio-banks = <2>;
- gpio-ranges = <&pinctrl 0 0 0>,
- <&pinctrl 32 0 0>;
- gpio-ranges-group-names = "lvds_gpio_grp",
- "uart_nand_gpio_grp";
- };
-
- nand@17050000 {
- compatible = "sirf,atlas7-nand";
- reg = <0x17050000 0x10000>;
- pinctrl-names = "default";
- pinctrl-0 = <&nd_df_pmx>;
- interrupts = <0 41 0>;
- clocks = <&car 108>, <&car 112>;
- clock-names = "nand_io", "nand_nand";
- };
-
- sd0: sdhci@16000000 {
- cell-index = <0>;
- compatible = "sirf,atlas7-sdhc";
- reg = <0x16000000 0x100000>;
- interrupts = <0 38 0>;
- clocks = <&car 109>, <&car 111>;
- clock-names = "core", "iface";
- wp-inverted;
- non-removable;
- status = "disabled";
- bus-width = <8>;
- };
-
- sd1: sdhci@16100000 {
- cell-index = <1>;
- compatible = "sirf,atlas7-sdhc";
- reg = <0x16100000 0x100000>;
- interrupts = <0 38 0>;
- clocks = <&car 109>, <&car 111>;
- clock-names = "core", "iface";
- non-removable;
- status = "disabled";
- bus-width = <8>;
- };
-
- jpeg@17000000 {
- compatible = "sirf,atlas7-jpeg";
- reg = <0x17000000 0x10000>;
- interrupts = <0 72 0>,
- <0 73 0>;
- clocks = <&car 103>;
- };
-
- usb0: usb@17060000 {
- cell-index = <0>;
- compatible = "sirf,atlas7-usb";
- reg = <0x17060000 0x200>;
- interrupts = <0 10 0>;
- clocks = <&car 113>;
- sirf,usbphy = <&usbphy0>;
- phy_type = "utmi";
- dr_mode = "otg";
- maximum-speed = "high-speed";
- status = "okay";
- };
-
- usb1: usb@17070000 {
- cell-index = <1>;
- compatible = "sirf,atlas7-usb";
- reg = <0x17070000 0x200>;
- interrupts = <0 11 0>;
- clocks = <&car 114>;
- sirf,usbphy = <&usbphy1>;
- phy_type = "utmi";
- dr_mode = "host";
- maximum-speed = "high-speed";
- status = "okay";
- };
-
- usbphy0: usbphy@0 {
- compatible = "sirf,atlas7-usbphy";
- reg = <0x17060200 0x100>;
- clocks = <&car 115>;
- status = "okay";
- };
-
- usbphy1: usbphy@1 {
- compatible = "sirf,atlas7-usbphy";
- reg = <0x17070200 0x100>;
- clocks = <&car 116>;
- status = "okay";
- };
-
- i2c0: i2c@17020000 {
- cell-index = <0>;
- compatible = "sirf,prima2-i2c";
- reg = <0x17020000 0x1000>;
- interrupts = <0 24 0>;
- clocks = <&car 105>;
- #address-cells = <1>;
- #size-cells = <0>;
- };
-
- };
-
- vdifm {
- compatible = "arteris, flexnoc", "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x13290000 0x13290000 0x3000>,
- <0x13300000 0x13300000 0x1000>,
- <0x14200000 0x14200000 0x600000>;
-
- vdifm@13290000 {
- compatible = "sirf,nocfw-vdifm";
- reg = <0x13290000 0x3000>;
- };
-
- gpio_1: gpio_vdifm@13300000 {
- #gpio-cells = <2>;
- #interrupt-cells = <2>;
- compatible = "sirf,atlas7-gpio";
- reg = <0x13300000 0x1000>;
- interrupts = <0 43 0>, <0 44 0>,
- <0 45 0>, <0 46 0>;
- clocks = <&car 84>;
- clock-names = "gpio1_io";
- gpio-controller;
- interrupt-controller;
-
- gpio-banks = <4>;
- gpio-ranges = <&pinctrl 0 0 0>,
- <&pinctrl 32 0 0>,
- <&pinctrl 64 0 0>,
- <&pinctrl 96 0 0>;
- gpio-ranges-group-names = "gnss_gpio_grp",
- "lcd_vip_gpio_grp",
- "sdio_i2s_gpio_grp",
- "sp_rgmii_gpio_grp";
- };
-
- sd2: sdhci@14200000 {
- cell-index = <2>;
- compatible = "sirf,atlas7-sdhc";
- reg = <0x14200000 0x100000>;
- interrupts = <0 23 0>;
- clocks = <&car 70>, <&car 75>;
- clock-names = "core", "iface";
- status = "disabled";
- bus-width = <4>;
- sd-uhs-sdr50;
- vqmmc-supply = <&vqmmc>;
- vqmmc: vqmmc@2 {
- regulator-min-microvolt = <1650000>;
- regulator-max-microvolt = <1950000>;
- regulator-name = "vqmmc-ldo";
- regulator-type = "voltage";
- regulator-boot-on;
- regulator-allow-bypass;
- };
- };
-
- sd3: sdhci@14300000 {
- cell-index = <3>;
- compatible = "sirf,atlas7-sdhc";
- reg = <0x14300000 0x100000>;
- interrupts = <0 23 0>;
- clocks = <&car 76>, <&car 81>;
- clock-names = "core", "iface";
- status = "disabled";
- bus-width = <4>;
- };
-
- sd5: sdhci@14500000 {
- cell-index = <5>;
- compatible = "sirf,atlas7-sdhc";
- reg = <0x14500000 0x100000>;
- interrupts = <0 39 0>;
- clocks = <&car 71>, <&car 76>;
- clock-names = "core", "iface";
- status = "disabled";
- bus-width = <4>;
- loop-dma;
- };
-
- sd6: sdhci@14600000 {
- cell-index = <6>;
- compatible = "sirf,atlas7-sdhc";
- reg = <0x14600000 0x100000>;
- interrupts = <0 98 0>;
- clocks = <&car 72>, <&car 77>;
- clock-names = "core", "iface";
- status = "disabled";
- bus-width = <4>;
- };
-
- sd7: sdhci@14700000 {
- cell-index = <7>;
- compatible = "sirf,atlas7-sdhc";
- reg = <0x14700000 0x100000>;
- interrupts = <0 98 0>;
- clocks = <&car 72>, <&car 77>;
- clock-names = "core", "iface";
- status = "disabled";
- bus-width = <4>;
- };
- };
-
- audiom {
- compatible = "arteris, flexnoc", "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x10d50000 0x10d50000 0x0000ffff>,
- <0x10d60000 0x10d60000 0x0000ffff>,
- <0x10d80000 0x10d80000 0x0000ffff>,
- <0x10d90000 0x10d90000 0x0000ffff>,
- <0x10ED0000 0x10ED0000 0x3000>,
- <0x10dc8000 0x10dc8000 0x1000>,
- <0x10dc0000 0x10dc0000 0x1000>,
- <0x10db0000 0x10db0000 0x4000>,
- <0x10d40000 0x10d40000 0x1000>,
- <0x10d30000 0x10d30000 0x1000>;
-
- timer@10dc0000 {
- compatible = "sirf,atlas7-tick";
- reg = <0x10dc0000 0x1000>;
- interrupts = <0 0 0>,
- <0 1 0>,
- <0 2 0>,
- <0 49 0>,
- <0 50 0>,
- <0 51 0>;
- clocks = <&car 47>;
- };
-
- timerb@10dc8000 {
- compatible = "sirf,atlas7-tick";
- reg = <0x10dc8000 0x1000>;
- interrupts = <0 74 0>,
- <0 75 0>,
- <0 76 0>,
- <0 77 0>,
- <0 78 0>,
- <0 79 0>;
- clocks = <&car 47>;
- };
-
- vip0@10db0000 {
- compatible = "sirf,atlas7-vip0";
- reg = <0x10db0000 0x2000>;
- interrupts = <0 85 0>;
- sirf,vip_cma_size = <0xC00000>;
- };
-
- cvd@10db2000 {
- compatible = "sirf,cvd";
- reg = <0x10db2000 0x2000>;
- clocks = <&car 46>;
- };
-
- dmac2: dma-controller@10d50000 {
- cell-index = <2>;
- compatible = "sirf,atlas7-dmac";
- reg = <0x10d50000 0xffff>;
- interrupts = <0 55 0>;
- clocks = <&car 60>;
- dma-channels = <16>;
- #dma-cells = <1>;
- };
-
- dmac3: dma-controller@10d60000 {
- cell-index = <3>;
- compatible = "sirf,atlas7-dmac";
- reg = <0x10d60000 0xffff>;
- interrupts = <0 56 0>;
- clocks = <&car 61>;
- dma-channels = <16>;
- #dma-cells = <1>;
- };
-
- adc: adc@10d80000 {
- compatible = "sirf,atlas7-adc";
- reg = <0x10d80000 0xffff>;
- interrupts = <0 34 0>;
- clocks = <&car 49>;
- #io-channel-cells = <1>;
- };
-
- pulsec@10d90000 {
- compatible = "sirf,prima2-pulsec";
- reg = <0x10d90000 0xffff>;
- interrupts = <0 42 0>;
- clocks = <&car 54>;
- };
-
- audiom@10ED0000 {
- compatible = "sirf,nocfw-audiom";
- reg = <0x10ED0000 0x3000>;
- interrupts = <0 102 0>;
- };
-
- usp1: usp@10d30000 {
- cell-index = <1>;
- reg = <0x10d30000 0x1000>;
- fifosize = <512>;
- clocks = <&car 58>;
- dmas = <&dmac2 6>, <&dmac2 7>;
- dma-names = "rx", "tx";
- };
-
- usp2: usp@10d40000 {
- cell-index = <2>;
- reg = <0x10d40000 0x1000>;
- interrupts = <0 22 0>;
- clocks = <&car 59>;
- dmas = <&dmac2 12>, <&dmac2 13>;
- dma-names = "rx", "tx";
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
- };
- };
-
- ddrm {
- compatible = "arteris, flexnoc", "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x10820000 0x10820000 0x3000>,
- <0x10800000 0x10800000 0x2000>;
- ddrm@10820000 {
- compatible = "sirf,nocfw-ddrm";
- reg = <0x10820000 0x3000>;
- interrupts = <0 105 0>;
- };
-
- memory-controller@0x10800000 {
- compatible = "sirf,atlas7-memc";
- reg = <0x10800000 0x2000>;
- };
-
- };
-
- btm {
- compatible = "arteris, flexnoc", "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x11002000 0x11002000 0x0000ffff>,
- <0x11010000 0x11010000 0x3000>,
- <0x11000000 0x11000000 0x1000>,
- <0x11001000 0x11001000 0x1000>;
-
- dmac4: dma-controller@11002000 {
- cell-index = <4>;
- compatible = "sirf,atlas7-dmac";
- reg = <0x11002000 0x1000>;
- interrupts = <0 99 0>;
- clocks = <&car 130>;
- dma-channels = <16>;
- #dma-cells = <1>;
- };
- uart6: uart@11000000 {
- cell-index = <6>;
- compatible = "sirf,atlas7-bt-uart",
- "sirf,atlas7-uart";
- reg = <0x11000000 0x1000>;
- interrupts = <0 100 0>;
- clocks = <&car 131>, <&car 133>, <&car 134>;
- clock-names = "uart", "general", "noc";
- fifosize = <128>;
- dmas = <&dmac4 12>, <&dmac4 13>;
- dma-names = "rx", "tx";
- status = "disabled";
- };
-
- usp3: usp@11001000 {
- compatible = "sirf,atlas7-bt-usp",
- "sirf,prima2-usp-pcm";
- cell-index = <3>;
- reg = <0x11001000 0x1000>;
- fifosize = <512>;
- clocks = <&car 132>, <&car 129>, <&car 133>,
- <&car 134>, <&car 135>;
- clock-names = "usp3_io", "a7ca_btss", "a7ca_io",
- "noc_btm_io", "thbtm_io";
- dmas = <&dmac4 0>, <&dmac4 1>;
- dma-names = "rx", "tx";
- };
-
- btm@11010000 {
- compatible = "sirf,nocfw-btm";
- reg = <0x11010000 0x3000>;
- };
- };
-
- rtcm {
- compatible = "arteris, flexnoc", "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x18810000 0x18810000 0x3000>,
- <0x18840000 0x18840000 0x1000>,
- <0x18890000 0x18890000 0x1000>,
- <0x188B0000 0x188B0000 0x10000>,
- <0x188D0000 0x188D0000 0x1000>;
- rtcm@18810000 {
- compatible = "sirf,nocfw-rtcm";
- reg = <0x18810000 0x3000>;
- interrupts = <0 109 0>;
- };
-
- gpio_2: gpio_rtcm@18890000 {
- #gpio-cells = <2>;
- #interrupt-cells = <2>;
- compatible = "sirf,atlas7-gpio";
- reg = <0x18890000 0x1000>;
- interrupts = <0 47 0>;
- gpio-controller;
- interrupt-controller;
-
- gpio-banks = <1>;
- gpio-ranges = <&pinctrl 0 0 0>;
- gpio-ranges-group-names = "rtc_gpio_grp";
- };
-
- rtc-iobg@18840000 {
- compatible = "sirf,prima2-rtciobg",
- "sirf-prima2-rtciobg-bus",
- "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- reg = <0x18840000 0x1000>;
-
- sysrtc@2000 {
- compatible = "sirf,prima2-sysrtc";
- reg = <0x2000 0x100>;
- interrupts = <0 52 0>;
- };
- pwrc@3000 {
- compatible = "sirf,atlas7-pwrc";
- reg = <0x3000 0x100>;
- };
- };
-
- qspi: flash@188B0000 {
- cell-index = <0>;
- compatible = "sirf,atlas7-qspi-nor";
- reg = <0x188B0000 0x10000>;
- interrupts = <0 15 0>;
- #address-cells = <1>;
- #size-cells = <0>;
- };
-
- retain@0x188D0000 {
- compatible = "sirf,atlas7-retain";
- reg = <0x188D0000 0x1000>;
- };
-
- };
- disp-iobg {
- /* lcdc0 */
- compatible = "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x13100000 0x13100000 0x20000>,
- <0x10e10000 0x10e10000 0x10000>,
- <0x17010000 0x17010000 0x10000>;
-
- lcd@13100000 {
- compatible = "sirf,atlas7-lcdc";
- reg = <0x13100000 0x10000>;
- interrupts = <0 30 0>;
- clocks = <&car 79>;
- };
- vpp@13110000 {
- compatible = "sirf,atlas7-vpp";
- reg = <0x13110000 0x10000>;
- interrupts = <0 31 0>;
- clocks = <&car 78>;
- resets = <&car 29>;
- };
- lvds@10e10000 {
- compatible = "sirf,atlas7-lvdsc";
- reg = <0x10e10000 0x10000>;
- interrupts = <0 64 0>;
- clocks = <&car 54>;
- resets = <&car 29>;
- };
- g2d@17010000 {
- compatible = "sirf, atlas7-g2d";
- reg = <0x17010000 0x10000>;
- interrupts = <0 61 0>;
- clocks = <&car 104>;
- };
-
- };
-
- graphics-iobg {
- compatible = "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x12000000 0x12000000 0x1000000>;
-
- graphics@12000000 {
- compatible = "powervr,sgx531";
- reg = <0x12000000 0x1000000>;
- interrupts = <0 6 0>;
- clocks = <&car 126>;
- };
- };
- };
-};
diff --git a/arch/arm/boot/dts/bcm21664.dtsi b/arch/arm/boot/dts/bcm21664.dtsi
index 58ec1b2f8ef6..cc58f2b926b9 100644
--- a/arch/arm/boot/dts/bcm21664.dtsi
+++ b/arch/arm/boot/dts/bcm21664.dtsi
@@ -27,7 +27,7 @@
bootargs = "console=ttyS0,115200n8";
};
- cpus {
+ cpus {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm/boot/dts/bcm2711-rpi-4-b.dts b/arch/arm/boot/dts/bcm2711-rpi-4-b.dts
index 403bacf986eb..3b4ab947492a 100644
--- a/arch/arm/boot/dts/bcm2711-rpi-4-b.dts
+++ b/arch/arm/boot/dts/bcm2711-rpi-4-b.dts
@@ -25,6 +25,7 @@
emmc2bus = &emmc2bus;
ethernet0 = &genet;
pcie0 = &pcie0;
+ blconfig = &blconfig;
};
leds {
@@ -218,6 +219,22 @@
status = "okay";
};
+&rmem {
+ /*
+ * RPi4's co-processor will copy the board's bootloader configuration
+ * into memory for the OS to consume. It'll also update this node with
+ * its placement information.
+ */
+ blconfig: nvram@0 {
+ compatible = "raspberrypi,bootloader-config", "nvmem-rmem";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x0 0x0 0x0>;
+ no-map;
+ status = "disabled";
+ };
+};
+
/* SDHCI is used to control the SDIO for wireless */
&sdhci {
#address-cells = <1>;
diff --git a/arch/arm/boot/dts/bcm2711.dtsi b/arch/arm/boot/dts/bcm2711.dtsi
index 4847dd305317..462b1dfb0385 100644
--- a/arch/arm/boot/dts/bcm2711.dtsi
+++ b/arch/arm/boot/dts/bcm2711.dtsi
@@ -308,6 +308,22 @@
#reset-cells = <1>;
};
+ bsc_intr: interrupt-controller@7ef00040 {
+ compatible = "brcm,bcm2711-l2-intc", "brcm,l2-intc";
+ reg = <0x7ef00040 0x30>;
+ interrupts = <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ aon_intr: interrupt-controller@7ef00100 {
+ compatible = "brcm,bcm2711-l2-intc", "brcm,l2-intc";
+ reg = <0x7ef00100 0x30>;
+ interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
hdmi0: hdmi@7ef00700 {
compatible = "brcm,bcm2711-hdmi0";
reg = <0x7ef00700 0x300>,
@@ -330,6 +346,11 @@
"hd";
clock-names = "hdmi", "bvb", "audio", "cec";
resets = <&dvp 0>;
+ interrupt-parent = <&aon_intr>;
+ interrupts = <0>, <1>, <2>,
+ <3>, <4>, <5>;
+ interrupt-names = "cec-tx", "cec-rx", "cec-low",
+ "wakeup", "hpd-connected", "hpd-removed";
ddc = <&ddc0>;
dmas = <&dma 10>;
dma-names = "audio-rx";
@@ -341,6 +362,8 @@
reg = <0x7ef04500 0x100>, <0x7ef00b00 0x300>;
reg-names = "bsc", "auto-i2c";
clock-frequency = <97500>;
+ interrupt-parent = <&bsc_intr>;
+ interrupts = <0>;
status = "disabled";
};
@@ -367,6 +390,11 @@
ddc = <&ddc1>;
clock-names = "hdmi", "bvb", "audio", "cec";
resets = <&dvp 1>;
+ interrupt-parent = <&aon_intr>;
+ interrupts = <8>, <7>, <6>,
+ <9>, <10>, <11>;
+ interrupt-names = "cec-tx", "cec-rx", "cec-low",
+ "wakeup", "hpd-connected", "hpd-removed";
dmas = <&dma 17>;
dma-names = "audio-rx";
status = "disabled";
@@ -377,6 +405,8 @@
reg = <0x7ef09500 0x100>, <0x7ef05b00 0x300>;
reg-names = "bsc", "auto-i2c";
clock-frequency = <97500>;
+ interrupt-parent = <&bsc_intr>;
+ interrupts = <1>;
status = "disabled";
};
};
@@ -540,6 +570,7 @@
&dsi1 {
interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
+ compatible = "brcm,bcm2711-dsi1";
};
&gpio {
diff --git a/arch/arm/boot/dts/berlin2.dtsi b/arch/arm/boot/dts/berlin2.dtsi
index 6194857f8a02..1114c592e461 100644
--- a/arch/arm/boot/dts/berlin2.dtsi
+++ b/arch/arm/boot/dts/berlin2.dtsi
@@ -191,7 +191,7 @@
compatible = "snps,dw-apb-gpio-port";
gpio-controller;
#gpio-cells = <2>;
- snps,nr-gpios = <8>;
+ ngpios = <8>;
reg = <0>;
interrupt-controller;
#interrupt-cells = <2>;
@@ -209,7 +209,7 @@
compatible = "snps,dw-apb-gpio-port";
gpio-controller;
#gpio-cells = <2>;
- snps,nr-gpios = <8>;
+ ngpios = <8>;
reg = <0>;
interrupt-controller;
#interrupt-cells = <2>;
@@ -227,7 +227,7 @@
compatible = "snps,dw-apb-gpio-port";
gpio-controller;
#gpio-cells = <2>;
- snps,nr-gpios = <8>;
+ ngpios = <8>;
reg = <0>;
interrupt-controller;
#interrupt-cells = <2>;
@@ -245,7 +245,7 @@
compatible = "snps,dw-apb-gpio-port";
gpio-controller;
#gpio-cells = <2>;
- snps,nr-gpios = <8>;
+ ngpios = <8>;
reg = <0>;
interrupt-controller;
#interrupt-cells = <2>;
@@ -446,7 +446,7 @@
compatible = "snps,dw-apb-gpio-port";
gpio-controller;
#gpio-cells = <2>;
- snps,nr-gpios = <8>;
+ ngpios = <8>;
reg = <0>;
};
};
@@ -461,7 +461,7 @@
compatible = "snps,dw-apb-gpio-port";
gpio-controller;
#gpio-cells = <2>;
- snps,nr-gpios = <8>;
+ ngpios = <8>;
reg = <0>;
interrupt-controller;
#interrupt-cells = <2>;
diff --git a/arch/arm/boot/dts/berlin2cd-google-chromecast.dts b/arch/arm/boot/dts/berlin2cd-google-chromecast.dts
index 56fa951bc86f..c1d91424e658 100644
--- a/arch/arm/boot/dts/berlin2cd-google-chromecast.dts
+++ b/arch/arm/boot/dts/berlin2cd-google-chromecast.dts
@@ -34,19 +34,19 @@
linux,usable-memory = <0x00000000 0x20000000>; /* 512 MB */
};
- leds {
+ led-controller {
compatible = "pwm-leds";
pinctrl-0 = <&ledpwm_pmux>;
pinctrl-names = "default";
- white {
+ led-1 {
label = "white";
pwms = <&pwm 0 600000 0>;
max-brightness = <255>;
linux,default-trigger = "default-on";
};
- red {
+ led-2 {
label = "red";
pwms = <&pwm 1 600000 0>;
max-brightness = <255>;
diff --git a/arch/arm/boot/dts/berlin2cd.dtsi b/arch/arm/boot/dts/berlin2cd.dtsi
index 6f30d7eb3b41..b2768f7a3185 100644
--- a/arch/arm/boot/dts/berlin2cd.dtsi
+++ b/arch/arm/boot/dts/berlin2cd.dtsi
@@ -181,7 +181,7 @@
compatible = "snps,dw-apb-gpio-port";
gpio-controller;
#gpio-cells = <2>;
- snps,nr-gpios = <8>;
+ ngpios = <8>;
reg = <0>;
interrupt-controller;
#interrupt-cells = <2>;
@@ -199,7 +199,7 @@
compatible = "snps,dw-apb-gpio-port";
gpio-controller;
#gpio-cells = <2>;
- snps,nr-gpios = <8>;
+ ngpios = <8>;
reg = <0>;
interrupt-controller;
#interrupt-cells = <2>;
@@ -217,7 +217,7 @@
compatible = "snps,dw-apb-gpio-port";
gpio-controller;
#gpio-cells = <2>;
- snps,nr-gpios = <8>;
+ ngpios = <8>;
reg = <0>;
interrupt-controller;
#interrupt-cells = <2>;
@@ -235,7 +235,7 @@
compatible = "snps,dw-apb-gpio-port";
gpio-controller;
#gpio-cells = <2>;
- snps,nr-gpios = <8>;
+ ngpios = <8>;
reg = <0>;
interrupt-controller;
#interrupt-cells = <2>;
@@ -473,7 +473,7 @@
compatible = "snps,dw-apb-gpio-port";
gpio-controller;
#gpio-cells = <2>;
- snps,nr-gpios = <8>;
+ ngpios = <8>;
reg = <0>;
};
};
@@ -518,7 +518,7 @@
compatible = "snps,dw-apb-gpio-port";
gpio-controller;
#gpio-cells = <2>;
- snps,nr-gpios = <8>;
+ ngpios = <8>;
reg = <0>;
};
};
diff --git a/arch/arm/boot/dts/berlin2q.dtsi b/arch/arm/boot/dts/berlin2q.dtsi
index b6a0acac6836..598a46f96a82 100644
--- a/arch/arm/boot/dts/berlin2q.dtsi
+++ b/arch/arm/boot/dts/berlin2q.dtsi
@@ -252,7 +252,7 @@
compatible = "snps,dw-apb-gpio-port";
gpio-controller;
#gpio-cells = <2>;
- snps,nr-gpios = <32>;
+ ngpios = <32>;
reg = <0>;
interrupt-controller;
#interrupt-cells = <2>;
@@ -270,7 +270,7 @@
compatible = "snps,dw-apb-gpio-port";
gpio-controller;
#gpio-cells = <2>;
- snps,nr-gpios = <32>;
+ ngpios = <32>;
reg = <0>;
interrupt-controller;
#interrupt-cells = <2>;
@@ -288,7 +288,7 @@
compatible = "snps,dw-apb-gpio-port";
gpio-controller;
#gpio-cells = <2>;
- snps,nr-gpios = <32>;
+ ngpios = <32>;
reg = <0>;
interrupt-controller;
#interrupt-cells = <2>;
@@ -306,7 +306,7 @@
compatible = "snps,dw-apb-gpio-port";
gpio-controller;
#gpio-cells = <2>;
- snps,nr-gpios = <32>;
+ ngpios = <32>;
reg = <0>;
interrupt-controller;
#interrupt-cells = <2>;
@@ -552,7 +552,7 @@
compatible = "snps,dw-apb-gpio-port";
gpio-controller;
#gpio-cells = <2>;
- snps,nr-gpios = <32>;
+ ngpios = <32>;
reg = <0>;
};
};
@@ -613,7 +613,7 @@
compatible = "snps,dw-apb-gpio-port";
gpio-controller;
#gpio-cells = <2>;
- snps,nr-gpios = <32>;
+ ngpios = <32>;
reg = <0>;
};
};
diff --git a/arch/arm/boot/dts/cros-ec-keyboard.dtsi b/arch/arm/boot/dts/cros-ec-keyboard.dtsi
index 165c5bcd510e..55c4744fa7e7 100644
--- a/arch/arm/boot/dts/cros-ec-keyboard.dtsi
+++ b/arch/arm/boot/dts/cros-ec-keyboard.dtsi
@@ -6,103 +6,18 @@
*/
#include <dt-bindings/input/input.h>
+#include <dt-bindings/input/cros-ec-keyboard.h>
&cros_ec {
- keyboard-controller {
+ keyboard_controller: keyboard-controller {
compatible = "google,cros-ec-keyb";
keypad,num-rows = <8>;
keypad,num-columns = <13>;
google,needs-ghost-filter;
linux,keymap = <
- MATRIX_KEY(0x00, 0x01, KEY_LEFTMETA)
- MATRIX_KEY(0x00, 0x02, KEY_F1)
- MATRIX_KEY(0x00, 0x03, KEY_B)
- MATRIX_KEY(0x00, 0x04, KEY_F10)
- MATRIX_KEY(0x00, 0x05, KEY_RO)
- MATRIX_KEY(0x00, 0x06, KEY_N)
- MATRIX_KEY(0x00, 0x08, KEY_EQUAL)
- MATRIX_KEY(0x00, 0x0a, KEY_RIGHTALT)
-
- MATRIX_KEY(0x01, 0x01, KEY_ESC)
- MATRIX_KEY(0x01, 0x02, KEY_F4)
- MATRIX_KEY(0x01, 0x03, KEY_G)
- MATRIX_KEY(0x01, 0x04, KEY_F7)
- MATRIX_KEY(0x01, 0x06, KEY_H)
- MATRIX_KEY(0x01, 0x08, KEY_APOSTROPHE)
- MATRIX_KEY(0x01, 0x09, KEY_F9)
- MATRIX_KEY(0x01, 0x0b, KEY_BACKSPACE)
- MATRIX_KEY(0x01, 0x0c, KEY_HENKAN)
-
- MATRIX_KEY(0x02, 0x00, KEY_LEFTCTRL)
- MATRIX_KEY(0x02, 0x01, KEY_TAB)
- MATRIX_KEY(0x02, 0x02, KEY_F3)
- MATRIX_KEY(0x02, 0x03, KEY_T)
- MATRIX_KEY(0x02, 0x04, KEY_F6)
- MATRIX_KEY(0x02, 0x05, KEY_RIGHTBRACE)
- MATRIX_KEY(0x02, 0x06, KEY_Y)
- MATRIX_KEY(0x02, 0x07, KEY_102ND)
- MATRIX_KEY(0x02, 0x08, KEY_LEFTBRACE)
- MATRIX_KEY(0x02, 0x09, KEY_F8)
- MATRIX_KEY(0x02, 0x0a, KEY_YEN)
-
- MATRIX_KEY(0x03, 0x00, KEY_LEFTMETA)
- MATRIX_KEY(0x03, 0x01, KEY_GRAVE)
- MATRIX_KEY(0x03, 0x02, KEY_F2)
- MATRIX_KEY(0x03, 0x03, KEY_5)
- MATRIX_KEY(0x03, 0x04, KEY_F5)
- MATRIX_KEY(0x03, 0x06, KEY_6)
- MATRIX_KEY(0x03, 0x08, KEY_MINUS)
- MATRIX_KEY(0x03, 0x09, KEY_F13)
- MATRIX_KEY(0x03, 0x0b, KEY_BACKSLASH)
- MATRIX_KEY(0x03, 0x0c, KEY_MUHENKAN)
-
- MATRIX_KEY(0x04, 0x00, KEY_RIGHTCTRL)
- MATRIX_KEY(0x04, 0x01, KEY_A)
- MATRIX_KEY(0x04, 0x02, KEY_D)
- MATRIX_KEY(0x04, 0x03, KEY_F)
- MATRIX_KEY(0x04, 0x04, KEY_S)
- MATRIX_KEY(0x04, 0x05, KEY_K)
- MATRIX_KEY(0x04, 0x06, KEY_J)
- MATRIX_KEY(0x04, 0x08, KEY_SEMICOLON)
- MATRIX_KEY(0x04, 0x09, KEY_L)
- MATRIX_KEY(0x04, 0x0a, KEY_BACKSLASH)
- MATRIX_KEY(0x04, 0x0b, KEY_ENTER)
-
- MATRIX_KEY(0x05, 0x01, KEY_Z)
- MATRIX_KEY(0x05, 0x02, KEY_C)
- MATRIX_KEY(0x05, 0x03, KEY_V)
- MATRIX_KEY(0x05, 0x04, KEY_X)
- MATRIX_KEY(0x05, 0x05, KEY_COMMA)
- MATRIX_KEY(0x05, 0x06, KEY_M)
- MATRIX_KEY(0x05, 0x07, KEY_LEFTSHIFT)
- MATRIX_KEY(0x05, 0x08, KEY_SLASH)
- MATRIX_KEY(0x05, 0x09, KEY_DOT)
- MATRIX_KEY(0x05, 0x0b, KEY_SPACE)
-
- MATRIX_KEY(0x06, 0x01, KEY_1)
- MATRIX_KEY(0x06, 0x02, KEY_3)
- MATRIX_KEY(0x06, 0x03, KEY_4)
- MATRIX_KEY(0x06, 0x04, KEY_2)
- MATRIX_KEY(0x06, 0x05, KEY_8)
- MATRIX_KEY(0x06, 0x06, KEY_7)
- MATRIX_KEY(0x06, 0x08, KEY_0)
- MATRIX_KEY(0x06, 0x09, KEY_9)
- MATRIX_KEY(0x06, 0x0a, KEY_LEFTALT)
- MATRIX_KEY(0x06, 0x0b, KEY_DOWN)
- MATRIX_KEY(0x06, 0x0c, KEY_RIGHT)
-
- MATRIX_KEY(0x07, 0x01, KEY_Q)
- MATRIX_KEY(0x07, 0x02, KEY_E)
- MATRIX_KEY(0x07, 0x03, KEY_R)
- MATRIX_KEY(0x07, 0x04, KEY_W)
- MATRIX_KEY(0x07, 0x05, KEY_I)
- MATRIX_KEY(0x07, 0x06, KEY_U)
- MATRIX_KEY(0x07, 0x07, KEY_RIGHTSHIFT)
- MATRIX_KEY(0x07, 0x08, KEY_P)
- MATRIX_KEY(0x07, 0x09, KEY_O)
- MATRIX_KEY(0x07, 0x0b, KEY_UP)
- MATRIX_KEY(0x07, 0x0c, KEY_LEFT)
+ CROS_STD_TOP_ROW_KEYMAP
+ CROS_STD_MAIN_KEYMAP
>;
};
};
diff --git a/arch/arm/boot/dts/dra71-evm.dts b/arch/arm/boot/dts/dra71-evm.dts
index cad58f733bd6..6d2cca6b4488 100644
--- a/arch/arm/boot/dts/dra71-evm.dts
+++ b/arch/arm/boot/dts/dra71-evm.dts
@@ -112,6 +112,8 @@
regulator-name = "lp8733-ldo0";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
};
lp8733_ldo1_reg: ldo1 {
diff --git a/arch/arm/boot/dts/dra76x.dtsi b/arch/arm/boot/dts/dra76x.dtsi
index 2f326151116b..a09e7bd77fc7 100644
--- a/arch/arm/boot/dts/dra76x.dtsi
+++ b/arch/arm/boot/dts/dra76x.dtsi
@@ -9,6 +9,13 @@
compatible = "ti,dra762", "ti,dra7";
ocp {
+ emif1: emif@4c000000 {
+ compatible = "ti,emif-dra7xx";
+ reg = <0x4c000000 0x200>;
+ interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
target-module@42c01900 {
compatible = "ti,sysc-dra7-mcan", "ti,sysc";
ranges = <0x0 0x42c00000 0x2000>;
@@ -133,3 +140,32 @@
/* dra76x is not affected by i887 */
max-frequency = <96000000>;
};
+
+&cpu0_opp_table {
+ opp_plus@1800000000 {
+ opp-hz = /bits/ 64 <1800000000>;
+ opp-microvolt = <1250000 950000 1250000>,
+ <1250000 950000 1250000>;
+ opp-supported-hw = <0xFF 0x08>;
+ };
+};
+
+&opp_supply_mpu {
+ ti,efuse-settings = <
+ /* uV offset */
+ 1060000 0x0
+ 1160000 0x4
+ 1210000 0x8
+ 1250000 0xC
+ >;
+};
+
+&abb_mpu {
+ ti,abb_info = <
+ /*uV ABB efuse rbb_m fbb_m vset_m*/
+ 1060000 0 0x0 0 0x02000000 0x01F00000
+ 1160000 0 0x4 0 0x02000000 0x01F00000
+ 1210000 0 0x8 0 0x02000000 0x01F00000
+ 1250000 0 0xC 0 0x02000000 0x01F00000
+ >;
+};
diff --git a/arch/arm/boot/dts/e60k02.dtsi b/arch/arm/boot/dts/e60k02.dtsi
index 3af1ab4458ef..cfb239d5186a 100644
--- a/arch/arm/boot/dts/e60k02.dtsi
+++ b/arch/arm/boot/dts/e60k02.dtsi
@@ -278,6 +278,12 @@
};
&uart1 {
+ /* J4, through-hole */
+ status = "okay";
+};
+
+&uart4 {
+ /* TP198, next to J4, SMD pads */
status = "okay";
};
diff --git a/arch/arm/boot/dts/efm32gg-dk3750.dts b/arch/arm/boot/dts/efm32gg-dk3750.dts
deleted file mode 100644
index adfa559a488b..000000000000
--- a/arch/arm/boot/dts/efm32gg-dk3750.dts
+++ /dev/null
@@ -1,88 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Device tree for EFM32GG-DK3750 development board.
- *
- * Documentation available from
- * http://www.silabs.com/Support%20Documents/TechnicalDocs/efm32gg-dk3750-ug.pdf
- */
-
-/dts-v1/;
-#include "efm32gg.dtsi"
-
-/ {
- model = "Energy Micro Giant Gecko Development Kit";
- compatible = "efm32,dk3750";
-
- chosen {
- bootargs = "console=ttyefm4,115200 init=/linuxrc ignore_loglevel ihash_entries=64 dhash_entries=64 earlyprintk uclinux.physaddr=0x8c400000 root=/dev/mtdblock0";
- };
-
- memory@88000000 {
- device_type = "memory";
- reg = <0x88000000 0x400000>;
- };
-
- soc {
- adc@40002000 {
- status = "ok";
- };
-
- i2c@4000a000 {
- energymicro,location = <3>;
- status = "ok";
-
- temp@48 {
- compatible = "st,stds75";
- reg = <0x48>;
- };
-
- eeprom@50 {
- compatible = "microchip,24c02", "atmel,24c02";
- reg = <0x50>;
- pagesize = <16>;
- };
- };
-
- spi0: spi@4000c000 { /* USART0 */
- cs-gpios = <&gpio 68 1>; // E4
- energymicro,location = <1>;
- status = "ok";
-
- microsd@0 {
- compatible = "mmc-spi-slot";
- spi-max-frequency = <100000>;
- voltage-ranges = <3200 3400>;
- broken-cd;
- reg = <0>;
- };
- };
-
- spi1: spi@4000c400 { /* USART1 */
- cs-gpios = <&gpio 51 1>; // D3
- energymicro,location = <1>;
- status = "ok";
-
- ks8851@0 {
- compatible = "ks8851";
- spi-max-frequency = <6000000>;
- reg = <0>;
- interrupt-parent = <&boardfpga>;
- interrupts = <4>;
- };
- };
-
- uart4: uart@4000e400 { /* UART1 */
- energymicro,location = <2>;
- status = "ok";
- };
-
- boardfpga: boardfpga@80000000 {
- compatible = "efm32board";
- reg = <0x80000000 0x400>;
- irq-gpios = <&gpio 64 1>;
- interrupt-controller;
- #interrupt-cells = <1>;
- status = "ok";
- };
- };
-};
diff --git a/arch/arm/boot/dts/efm32gg.dtsi b/arch/arm/boot/dts/efm32gg.dtsi
deleted file mode 100644
index 8a58e49144cc..000000000000
--- a/arch/arm/boot/dts/efm32gg.dtsi
+++ /dev/null
@@ -1,177 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Device tree for Energy Micro EFM32 Giant Gecko SoC.
- *
- * Documentation available from
- * http://www.silabs.com/Support%20Documents/TechnicalDocs/EFM32GG-RM.pdf
- */
-
-#include "armv7-m.dtsi"
-#include "dt-bindings/clock/efm32-cmu.h"
-
-/ {
- #address-cells = <1>;
- #size-cells = <1>;
-
- aliases {
- i2c0 = &i2c0;
- i2c1 = &i2c1;
- serial0 = &uart0;
- serial1 = &uart1;
- serial2 = &uart2;
- serial3 = &uart3;
- serial4 = &uart4;
- spi0 = &spi0;
- spi1 = &spi1;
- spi2 = &spi2;
- };
-
- soc {
- adc: adc@40002000 {
- compatible = "energymicro,efm32-adc";
- reg = <0x40002000 0x400>;
- interrupts = <7>;
- clocks = <&cmu clk_HFPERCLKADC0>;
- status = "disabled";
- };
-
- gpio: gpio@40006000 {
- compatible = "energymicro,efm32-gpio";
- reg = <0x40006000 0x1000>;
- interrupts = <1 11>;
- gpio-controller;
- #gpio-cells = <2>;
- interrupt-controller;
- #interrupt-cells = <1>;
- clocks = <&cmu clk_HFPERCLKGPIO>;
- status = "ok";
- };
-
- i2c0: i2c@4000a000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "energymicro,efm32-i2c";
- reg = <0x4000a000 0x400>;
- interrupts = <9>;
- clocks = <&cmu clk_HFPERCLKI2C0>;
- clock-frequency = <100000>;
- status = "disabled";
- };
-
- i2c1: i2c@4000a400 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "energymicro,efm32-i2c";
- reg = <0x4000a400 0x400>;
- interrupts = <10>;
- clocks = <&cmu clk_HFPERCLKI2C1>;
- clock-frequency = <100000>;
- status = "disabled";
- };
-
- spi0: spi@4000c000 { /* USART0 */
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "energymicro,efm32-spi";
- reg = <0x4000c000 0x400>;
- interrupts = <3 4>;
- clocks = <&cmu clk_HFPERCLKUSART0>;
- status = "disabled";
- };
-
- spi1: spi@4000c400 { /* USART1 */
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "energymicro,efm32-spi";
- reg = <0x4000c400 0x400>;
- interrupts = <15 16>;
- clocks = <&cmu clk_HFPERCLKUSART1>;
- status = "disabled";
- };
-
- spi2: spi@4000c800 { /* USART2 */
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "energymicro,efm32-spi";
- reg = <0x4000c800 0x400>;
- interrupts = <18 19>;
- clocks = <&cmu clk_HFPERCLKUSART2>;
- status = "disabled";
- };
-
- uart0: uart@4000c000 { /* USART0 */
- compatible = "energymicro,efm32-uart";
- reg = <0x4000c000 0x400>;
- interrupts = <3 4>;
- clocks = <&cmu clk_HFPERCLKUSART0>;
- status = "disabled";
- };
-
- uart1: uart@4000c400 { /* USART1 */
- compatible = "energymicro,efm32-uart";
- reg = <0x4000c400 0x400>;
- interrupts = <15 16>;
- clocks = <&cmu clk_HFPERCLKUSART1>;
- status = "disabled";
- };
-
- uart2: uart@4000c800 { /* USART2 */
- compatible = "energymicro,efm32-uart";
- reg = <0x4000c800 0x400>;
- interrupts = <18 19>;
- clocks = <&cmu clk_HFPERCLKUSART2>;
- status = "disabled";
- };
-
- uart3: uart@4000e000 { /* UART0 */
- compatible = "energymicro,efm32-uart";
- reg = <0x4000e000 0x400>;
- interrupts = <20 21>;
- clocks = <&cmu clk_HFPERCLKUART0>;
- status = "disabled";
- };
-
- uart4: uart@4000e400 { /* UART1 */
- compatible = "energymicro,efm32-uart";
- reg = <0x4000e400 0x400>;
- interrupts = <22 23>;
- clocks = <&cmu clk_HFPERCLKUART1>;
- status = "disabled";
- };
-
- timer0: timer@40010000 {
- compatible = "energymicro,efm32-timer";
- reg = <0x40010000 0x400>;
- interrupts = <2>;
- clocks = <&cmu clk_HFPERCLKTIMER0>;
- };
-
- timer1: timer@40010400 {
- compatible = "energymicro,efm32-timer";
- reg = <0x40010400 0x400>;
- interrupts = <12>;
- clocks = <&cmu clk_HFPERCLKTIMER1>;
- };
-
- timer2: timer@40010800 {
- compatible = "energymicro,efm32-timer";
- reg = <0x40010800 0x400>;
- interrupts = <13>;
- clocks = <&cmu clk_HFPERCLKTIMER2>;
- };
-
- timer3: timer@40010c00 {
- compatible = "energymicro,efm32-timer";
- reg = <0x40010c00 0x400>;
- interrupts = <14>;
- clocks = <&cmu clk_HFPERCLKTIMER3>;
- };
-
- cmu: cmu@400c8000 {
- compatible = "efm32gg,cmu";
- reg = <0x400c8000 0x400>;
- interrupts = <32>;
- #clock-cells = <1>;
- };
- };
-};
diff --git a/arch/arm/boot/dts/exynos3250-artik5.dtsi b/arch/arm/boot/dts/exynos3250-artik5.dtsi
index 04290ec4583a..829c05b2c405 100644
--- a/arch/arm/boot/dts/exynos3250-artik5.dtsi
+++ b/arch/arm/boot/dts/exynos3250-artik5.dtsi
@@ -79,7 +79,7 @@
pmic@66 {
compatible = "samsung,s2mps14-pmic";
interrupt-parent = <&gpx3>;
- interrupts = <5 IRQ_TYPE_NONE>;
+ interrupts = <5 IRQ_TYPE_LEVEL_LOW>;
pinctrl-names = "default";
pinctrl-0 = <&s2mps14_irq>;
reg = <0x66>;
diff --git a/arch/arm/boot/dts/exynos3250-monk.dts b/arch/arm/boot/dts/exynos3250-monk.dts
index 69451566945d..fae046e08a5d 100644
--- a/arch/arm/boot/dts/exynos3250-monk.dts
+++ b/arch/arm/boot/dts/exynos3250-monk.dts
@@ -200,7 +200,7 @@
pmic@66 {
compatible = "samsung,s2mps14-pmic";
interrupt-parent = <&gpx0>;
- interrupts = <7 IRQ_TYPE_NONE>;
+ interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
reg = <0x66>;
wakeup-source;
diff --git a/arch/arm/boot/dts/exynos3250-rinato.dts b/arch/arm/boot/dts/exynos3250-rinato.dts
index a26e3e582a7e..d64ccf4b7d32 100644
--- a/arch/arm/boot/dts/exynos3250-rinato.dts
+++ b/arch/arm/boot/dts/exynos3250-rinato.dts
@@ -270,7 +270,7 @@
pmic@66 {
compatible = "samsung,s2mps14-pmic";
interrupt-parent = <&gpx0>;
- interrupts = <7 IRQ_TYPE_NONE>;
+ interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
reg = <0x66>;
wakeup-source;
diff --git a/arch/arm/boot/dts/exynos4210-i9100.dts b/arch/arm/boot/dts/exynos4210-i9100.dts
index a0c3bab382ae..304a8ee2364c 100644
--- a/arch/arm/boot/dts/exynos4210-i9100.dts
+++ b/arch/arm/boot/dts/exynos4210-i9100.dts
@@ -560,27 +560,33 @@
regulator-boot-on;
};
- charger_reg: CHARGER {
- regulator-name = "CHARGER";
- regulator-min-microamp = <60000>;
- regulator-max-microamp = <2580000>;
+ EN32KHZ_AP {
+ regulator-name = "EN32KHZ_AP";
regulator-always-on;
};
- chargercv_reg: CHARGER_CV {
- regulator-name = "CHARGER_CV";
- regulator-min-microvolt = <3800000>;
- regulator-max-microvolt = <4100000>;
+ EN32KHZ_CP {
+ regulator-name = "EN32KHZ_CP";
regulator-always-on;
};
- EN32KHZ_AP {
- regulator-name = "EN32KHZ_AP";
+ charger_reg: CHARGER {
+ regulator-name = "CHARGER";
+ regulator-min-microamp = <200000>;
+ regulator-max-microamp = <950000>;
+ };
+
+ chargercv_reg: CHARGER_CV {
+ regulator-name = "CHARGER_CV";
+ regulator-min-microvolt = <4200000>;
+ regulator-max-microvolt = <4200000>;
regulator-always-on;
};
- EN32KHZ_CP {
- regulator-name = "EN32KHZ_CP";
+ CHARGER_TOPOFF {
+ regulator-name = "CHARGER_TOPOFF";
+ regulator-min-microamp = <200000>;
+ regulator-max-microamp = <200000>;
regulator-always-on;
};
};
diff --git a/arch/arm/boot/dts/exynos5250-spring.dts b/arch/arm/boot/dts/exynos5250-spring.dts
index 9d2baea62d0d..fba1462b19df 100644
--- a/arch/arm/boot/dts/exynos5250-spring.dts
+++ b/arch/arm/boot/dts/exynos5250-spring.dts
@@ -109,7 +109,7 @@
compatible = "samsung,s5m8767-pmic";
reg = <0x66>;
interrupt-parent = <&gpx3>;
- interrupts = <2 IRQ_TYPE_NONE>;
+ interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
pinctrl-names = "default";
pinctrl-0 = <&s5m8767_irq &s5m8767_dvs &s5m8767_ds>;
wakeup-source;
diff --git a/arch/arm/boot/dts/exynos5420-arndale-octa.dts b/arch/arm/boot/dts/exynos5420-arndale-octa.dts
index bf457d0c02eb..1aad4859c5f1 100644
--- a/arch/arm/boot/dts/exynos5420-arndale-octa.dts
+++ b/arch/arm/boot/dts/exynos5420-arndale-octa.dts
@@ -349,7 +349,7 @@
reg = <0x66>;
interrupt-parent = <&gpx3>;
- interrupts = <2 IRQ_TYPE_EDGE_FALLING>;
+ interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
pinctrl-names = "default";
pinctrl-0 = <&s2mps11_irq>;
diff --git a/arch/arm/boot/dts/exynos5422-odroid-core.dtsi b/arch/arm/boot/dts/exynos5422-odroid-core.dtsi
index d0df560eb0db..6d690b1db099 100644
--- a/arch/arm/boot/dts/exynos5422-odroid-core.dtsi
+++ b/arch/arm/boot/dts/exynos5422-odroid-core.dtsi
@@ -509,7 +509,7 @@
samsung,s2mps11-acokb-ground;
interrupt-parent = <&gpx0>;
- interrupts = <4 IRQ_TYPE_EDGE_FALLING>;
+ interrupts = <4 IRQ_TYPE_LEVEL_LOW>;
pinctrl-names = "default";
pinctrl-0 = <&s2mps11_irq>;
diff --git a/arch/arm/boot/dts/exynos54xx.dtsi b/arch/arm/boot/dts/exynos54xx.dtsi
index fe9d34c23374..2ddb7a5f12b3 100644
--- a/arch/arm/boot/dts/exynos54xx.dtsi
+++ b/arch/arm/boot/dts/exynos54xx.dtsi
@@ -188,7 +188,7 @@
compatible = "samsung,exynos4210-ehci";
reg = <0x12110000 0x100>;
interrupts = <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
- phys = <&usb2_phy 1>;
+ phys = <&usb2_phy 0>;
phy-names = "host";
};
@@ -196,12 +196,12 @@
compatible = "samsung,exynos4210-ohci";
reg = <0x12120000 0x100>;
interrupts = <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
- phys = <&usb2_phy 1>;
+ phys = <&usb2_phy 0>;
phy-names = "host";
};
usb2_phy: phy@12130000 {
- compatible = "samsung,exynos5250-usb2-phy";
+ compatible = "samsung,exynos5420-usb2-phy";
reg = <0x12130000 0x100>;
#phy-cells = <1>;
};
diff --git a/arch/arm/boot/dts/imx28.dtsi b/arch/arm/boot/dts/imx28.dtsi
index bbe52150b165..84d0176d5193 100644
--- a/arch/arm/boot/dts/imx28.dtsi
+++ b/arch/arm/boot/dts/imx28.dtsi
@@ -948,6 +948,16 @@
fsl,pull-up = <MXS_PULL_DISABLE>;
};
+ usb1_pins_b: usb1@1 {
+ reg = <1>;
+ fsl,pinmux-ids = <
+ MX28_PAD_PWM2__USB1_OVERCURRENT
+ >;
+ fsl,drive-strength = <MXS_DRIVE_12mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
+ };
+
usb0_id_pins_a: usb0id@0 {
reg = <0>;
fsl,pinmux-ids = <
diff --git a/arch/arm/boot/dts/imx6-logicpd-baseboard.dtsi b/arch/arm/boot/dts/imx6-logicpd-baseboard.dtsi
index 665d63765cdc..d9de9b4f0c52 100644
--- a/arch/arm/boot/dts/imx6-logicpd-baseboard.dtsi
+++ b/arch/arm/boot/dts/imx6-logicpd-baseboard.dtsi
@@ -238,7 +238,6 @@
compatible = "wlf,wm8962";
reg = <0x1a>;
clocks = <&clks IMX6QDL_CLK_CKO>;
- clock-names = "xclk";
DCVDD-supply = <&reg_audio>;
DBVDD-supply = <&reg_audio>;
AVDD-supply = <&reg_audio>;
diff --git a/arch/arm/boot/dts/imx6dl-plybas.dts b/arch/arm/boot/dts/imx6dl-plybas.dts
new file mode 100644
index 000000000000..333c306aa946
--- /dev/null
+++ b/arch/arm/boot/dts/imx6dl-plybas.dts
@@ -0,0 +1,394 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+/*
+ * Copyright (c) 2014 Protonic Holland
+ * Copyright (c) 2020 Oleksij Rempel <kernel@pengutronix.de>, Pengutronix
+ */
+
+/dts-v1/;
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/leds/common.h>
+#include "imx6dl.dtsi"
+
+/ {
+ model = "Plymovent BAS board";
+ compatible = "ply,plybas", "fsl,imx6dl";
+
+ chosen {
+ stdout-path = &uart4;
+ };
+
+ gpio_keys {
+ compatible = "gpio-keys";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ autorepeat;
+
+ button@20 {
+ label = "START";
+ linux,code = <31>;
+ gpios = <&gpio5 8 GPIO_ACTIVE_LOW>;
+ };
+
+ button@21 {
+ label = "CLEAN";
+ linux,code = <46>;
+ gpios = <&gpio5 9 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_leds>;
+
+ led-0 {
+ label = "debug0";
+ gpios = <&gpio1 8 GPIO_ACTIVE_HIGH>;
+ };
+
+ led-1 {
+ label = "debug1";
+ gpios = <&gpio1 9 GPIO_ACTIVE_HIGH>;
+ };
+
+ led-2 {
+ label = "light_tower1";
+ gpios = <&gpio4 22 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ };
+
+ led-3 {
+ label = "light_tower2";
+ gpios = <&gpio4 23 GPIO_ACTIVE_HIGH>;
+ };
+
+ led-4 {
+ label = "light_tower3";
+ gpios = <&gpio4 24 GPIO_ACTIVE_HIGH>;
+ };
+
+ led-5 {
+ label = "light_tower4";
+ gpios = <&gpio4 25 GPIO_ACTIVE_HIGH>;
+ };
+ };
+
+ clk50m_phy: phy-clock {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <50000000>;
+ };
+
+ reg_5v0: regulator-5v0 {
+ compatible = "regulator-fixed";
+ regulator-name = "5v0";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+};
+
+&can1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_can1>;
+ xceiver-supply = <&reg_5v0>;
+ status = "okay";
+};
+
+&can2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_can2>;
+ xceiver-supply = <&reg_5v0>;
+ status = "okay";
+};
+
+&ecspi1 {
+ cs-gpios = <&gpio3 19 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ecspi1>;
+ status = "okay";
+
+ flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ spi-max-frequency = <20000000>;
+ };
+};
+
+&fec {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_enet>;
+ phy-mode = "rmii";
+ clocks = <&clks IMX6QDL_CLK_ENET>,
+ <&clks IMX6QDL_CLK_ENET>,
+ <&clk50m_phy>;
+ clock-names = "ipg", "ahb", "ptp";
+ phy-handle = <&rgmii_phy>;
+ status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* Microchip KSZ8081RNA PHY */
+ rgmii_phy: ethernet-phy@0 {
+ reg = <0>;
+ interrupts-extended = <&gpio4 30 IRQ_TYPE_LEVEL_LOW>;
+ reset-gpios = <&gpio4 26 GPIO_ACTIVE_LOW>;
+ reset-assert-us = <10000>;
+ reset-deassert-us = <300>;
+ };
+ };
+};
+
+&gpio1 {
+ gpio-line-names =
+ "", "SD1_CD", "", "", "", "", "", "",
+ "DEBUG_0", "DEBUG_1", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "";
+};
+
+&gpio3 {
+ gpio-line-names =
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "",
+ "", "", "", "ECSPI1_SS1", "", "USB_EXT_PWR", "", "",
+ "", "", "", "", "", "", "", "";
+};
+
+&gpio4 {
+ gpio-line-names =
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "CAN1_SR", "CAN2_SR", "", "",
+ "LED_DI0_DEBUG_0", "LED_DI0_DEBUG_1", "IMX6_IN12", "IMX6_HMI",
+ "IMX6_IN11", "IMX6_BUZZER", "IMX6_LED1", "IMX6_LED2",
+ "IMX6_LED3", "IMX6_LED4", "ETH_RESET", "IMX6_ANA_OUT_SD",
+ "IMX6_ANA_OUT_ERR", "IMX6_ANA_OUT", "ETH_INTRP", "";
+};
+
+&gpio5 {
+ gpio-line-names =
+ "", "", "", "", "", "IMX6_RELAY1", "IMX6_RELAY2", "",
+ "IMX6_IN1", "IMX6_IN2", "IMX6_IN3", "IMX6_IN4", "IMX6_IN5",
+ "IMX6_IN6", "IMX6_IN7", "IMX6_IN8",
+ "IMX6_IN9", "IMX6_IN10", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "";
+};
+
+&i2c1 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c1>;
+ status = "okay";
+
+ /* additional i2c devices are added automatically by the boot loader */
+};
+
+&i2c3 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c3>;
+ status = "okay";
+
+ temperature-sensor@70 {
+ compatible = "ti,tmp103";
+ reg = <0x70>;
+ };
+
+ rtc@51 {
+ compatible = "nxp,pcf8563";
+ reg = <0x51>;
+ };
+};
+
+&pwm1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pwm1>;
+ status = "okay";
+};
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1>;
+ status = "okay";
+};
+
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart2>;
+ fsl,uart-has-rtscts;
+ linux,rs485-enabled-at-boot-time;
+ rs485-rts-delay = <0 20>;
+ status = "okay";
+};
+
+&uart4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart4>;
+ status = "okay";
+};
+
+&usbotg {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usbotg>;
+ phy_type = "utmi";
+ dr_mode = "host";
+ disable-over-current;
+ status = "okay";
+};
+
+&usbphynop1 {
+ status = "disabled";
+};
+
+&usbphynop2 {
+ status = "disabled";
+};
+
+&iomuxc {
+ pinctrl_can1: can1grp {
+ fsl,pins = <
+ MX6QDL_PAD_KEY_ROW2__FLEXCAN1_RX 0x1b000
+ MX6QDL_PAD_KEY_COL2__FLEXCAN1_TX 0x3008
+ /* CAN1_SR */
+ MX6QDL_PAD_KEY_COL3__GPIO4_IO12 0x13008
+ >;
+ };
+
+ pinctrl_can2: can2grp {
+ fsl,pins = <
+ MX6QDL_PAD_KEY_ROW4__FLEXCAN2_RX 0x1b000
+ MX6QDL_PAD_KEY_COL4__FLEXCAN2_TX 0x3008
+ /* CAN2_SR */
+ MX6QDL_PAD_KEY_ROW3__GPIO4_IO13 0x13008
+ >;
+ };
+
+ pinctrl_ecspi1: ecspi1grp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D17__ECSPI1_MISO 0x1b000
+ MX6QDL_PAD_EIM_D18__ECSPI1_MOSI 0x3008
+ MX6QDL_PAD_EIM_D16__ECSPI1_SCLK 0x3008
+ /* CS */
+ MX6QDL_PAD_EIM_D19__GPIO3_IO19 0x3008
+ >;
+ };
+
+ pinctrl_enet: enetgrp {
+ fsl,pins = <
+ /* MX6QDL_ENET_PINGRP4 */
+ MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
+ MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
+ MX6QDL_PAD_ENET_RXD0__ENET_RX_DATA0 0x1b0b0
+ MX6QDL_PAD_ENET_RXD1__ENET_RX_DATA1 0x1b0b0
+ MX6QDL_PAD_ENET_RX_ER__ENET_RX_ER 0x1b0b0
+ MX6QDL_PAD_ENET_TX_EN__ENET_TX_EN 0x1b0b0
+ MX6QDL_PAD_ENET_TXD0__ENET_TX_DATA0 0x1b0b0
+ MX6QDL_PAD_ENET_TXD1__ENET_TX_DATA1 0x1b0b0
+ MX6QDL_PAD_ENET_CRS_DV__ENET_RX_EN 0x1b0b0
+
+ MX6QDL_PAD_GPIO_16__ENET_REF_CLK 0x1b0b0
+ /* Phy reset */
+ MX6QDL_PAD_DISP0_DAT5__GPIO4_IO26 0x1b0b0
+ /* nINTRP */
+ MX6QDL_PAD_DISP0_DAT9__GPIO4_IO30 0x1b0b0
+ >;
+ };
+
+ pinctrl_i2c1: i2c1grp {
+ fsl,pins = <
+ MX6QDL_PAD_CSI0_DAT8__I2C1_SDA 0x4001f8b1
+ MX6QDL_PAD_CSI0_DAT9__I2C1_SCL 0x4001f8b1
+ >;
+ };
+
+ pinctrl_i2c3: i2c3grp {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_5__I2C3_SCL 0x4001b8b1
+ MX6QDL_PAD_GPIO_6__I2C3_SDA 0x4001b8b1
+ >;
+ };
+
+ pinctrl_leds: ledsgrp {
+ fsl,pins = <
+ /* DEBUG_0 */
+ MX6QDL_PAD_GPIO_8__GPIO1_IO08 0x1b0b0
+ /* DEBUG_1 */
+ MX6QDL_PAD_GPIO_9__GPIO1_IO09 0x1b0b0
+
+ /* LED1 (lighttower) */
+ MX6QDL_PAD_DISP0_DAT1__GPIO4_IO22 0x13070
+ /* LED2 (lighttower) */
+ MX6QDL_PAD_DISP0_DAT2__GPIO4_IO23 0x13070
+ /* LED3 (lighttower) */
+ MX6QDL_PAD_DISP0_DAT3__GPIO4_IO24 0x13070
+ /* LED4 (lighttower) */
+ MX6QDL_PAD_DISP0_DAT4__GPIO4_IO25 0x13070
+ >;
+ };
+
+ pinctrl_pwm1: pwm1grp {
+ fsl,pins = <
+ MX6QDL_PAD_DISP0_DAT8__PWM1_OUT 0x1b0b0
+ >;
+ };
+
+ /* YaCO AUX Uart */
+ pinctrl_uart1: uart1grp {
+ fsl,pins = <
+ MX6QDL_PAD_CSI0_DAT10__UART1_TX_DATA 0x1b0b1
+ MX6QDL_PAD_CSI0_DAT11__UART1_RX_DATA 0x1b0b1
+ >;
+ };
+
+ pinctrl_uart2: uart2grp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D26__UART2_TX_DATA 0x1b0b1
+ MX6QDL_PAD_EIM_D27__UART2_RX_DATA 0x1b0b1
+ MX6QDL_PAD_EIM_D28__UART2_DTE_CTS_B 0x130b1
+ >;
+ };
+
+ pinctrl_uart4: uart4grp {
+ fsl,pins = <
+ MX6QDL_PAD_KEY_COL0__UART4_TX_DATA 0x1b0b1
+ MX6QDL_PAD_KEY_ROW0__UART4_RX_DATA 0x1b0b1
+ >;
+ };
+
+ pinctrl_usbotg: usbotggrp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D21__USB_OTG_OC 0x1b0b0
+ /* power enable, high active */
+ MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x1b0b0
+ >;
+ };
+
+ pinctrl_usdhc1: usdhc1grp {
+ fsl,pins = <
+ MX6QDL_PAD_SD1_CMD__SD1_CMD 0x170f9
+ MX6QDL_PAD_SD1_CLK__SD1_CLK 0x100f9
+ MX6QDL_PAD_SD1_DAT0__SD1_DATA0 0x170f9
+ MX6QDL_PAD_SD1_DAT1__SD1_DATA1 0x170f9
+ MX6QDL_PAD_SD1_DAT2__SD1_DATA2 0x170f9
+ MX6QDL_PAD_SD1_DAT3__SD1_DATA3 0x170f9
+ MX6QDL_PAD_GPIO_1__GPIO1_IO01 0x1b0b0
+ >;
+ };
+
+ pinctrl_usdhc3: usdhc3grp {
+ fsl,pins = <
+ MX6QDL_PAD_SD3_CMD__SD3_CMD 0x17099
+ MX6QDL_PAD_SD3_CLK__SD3_CLK 0x10099
+ MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x17099
+ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17099
+ MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17099
+ MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17099
+ MX6QDL_PAD_SD3_DAT4__SD3_DATA4 0x17099
+ MX6QDL_PAD_SD3_DAT5__SD3_DATA5 0x17099
+ MX6QDL_PAD_SD3_DAT6__SD3_DATA6 0x17099
+ MX6QDL_PAD_SD3_DAT7__SD3_DATA7 0x17099
+ MX6QDL_PAD_SD3_RST__SD3_RESET 0x1b0b1
+ >;
+ };
+};
diff --git a/arch/arm/boot/dts/imx6dl-plym2m.dts b/arch/arm/boot/dts/imx6dl-plym2m.dts
new file mode 100644
index 000000000000..4d0d3d3386af
--- /dev/null
+++ b/arch/arm/boot/dts/imx6dl-plym2m.dts
@@ -0,0 +1,446 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+/*
+ * Copyright (c) 2014 Protonic Holland
+ * Copyright (c) 2020 Oleksij Rempel <kernel@pengutronix.de>, Pengutronix
+ */
+
+/dts-v1/;
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/leds/common.h>
+#include "imx6dl.dtsi"
+
+/ {
+ model = "Plymovent M2M board";
+ compatible = "ply,plym2m", "fsl,imx6dl";
+
+ chosen {
+ stdout-path = &uart4;
+ };
+
+ backlight: backlight {
+ compatible = "pwm-backlight";
+ pwms = <&pwm1 0 5000000 0>;
+ brightness-levels = <0 1000>;
+ num-interpolated-steps = <20>;
+ default-brightness-level = <19>;
+ power-supply = <&reg_12v0>;
+ };
+
+ display {
+ compatible = "fsl,imx-parallel-display";
+ pinctrl-0 = <&pinctrl_ipu1_disp>;
+ pinctrl-names = "default";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ display_in: endpoint {
+ remote-endpoint = <&ipu1_di0_disp0>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ display_out: endpoint {
+ remote-endpoint = <&panel_in>;
+ };
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_leds>;
+
+ led-0 {
+ label = "debug0";
+ function = LED_FUNCTION_STATUS;
+ gpios = <&gpio1 8 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ };
+ };
+
+ panel {
+ compatible = "edt,etm0700g0bdh6";
+ backlight = <&backlight>;
+ power-supply = <&reg_3v3>;
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&display_out>;
+ };
+ };
+ };
+
+ clk50m_phy: phy-clock {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <50000000>;
+ };
+
+ reg_3v3: regulator-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ reg_5v0: regulator-5v0 {
+ compatible = "regulator-fixed";
+ regulator-name = "5v0";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+
+ reg_12v0: regulator-12v0 {
+ compatible = "regulator-fixed";
+ regulator-name = "12v0";
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
+ };
+};
+
+&can1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_can1>;
+ xceiver-supply = <&reg_5v0>;
+ status = "okay";
+};
+
+&ecspi1 {
+ cs-gpios = <&gpio3 19 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ecspi1>;
+ status = "okay";
+
+ flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ spi-max-frequency = <20000000>;
+ };
+};
+
+&ecspi2 {
+ cs-gpios = <&gpio2 26 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ecspi2>;
+ status = "okay";
+
+ touchscreen@0 {
+ compatible = "ti,tsc2046";
+ reg = <0>;
+ pinctrl-0 = <&pinctrl_tsc2046>;
+ pinctrl-names ="default";
+ spi-max-frequency = <100000>;
+ interrupts-extended = <&gpio3 20 IRQ_TYPE_EDGE_FALLING>;
+ pendown-gpio = <&gpio3 20 GPIO_ACTIVE_LOW>;
+
+ touchscreen-size-x = <800>;
+ touchscreen-size-y = <480>;
+ touchscreen-inverted-x;
+ touchscreen-inverted-y;
+ touchscreen-max-pressure = <4095>;
+
+ ti,vref-delay-usecs = /bits/ 16 <100>;
+ ti,x-plate-ohms = /bits/ 16 <800>;
+ ti,y-plate-ohms = /bits/ 16 <300>;
+
+ wakeup-source;
+ };
+};
+
+&fec {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_enet>;
+ phy-mode = "rmii";
+ clocks = <&clks IMX6QDL_CLK_ENET>,
+ <&clks IMX6QDL_CLK_ENET>,
+ <&clk50m_phy>;
+ clock-names = "ipg", "ahb", "ptp";
+ phy-handle = <&rgmii_phy>;
+ status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* Microchip KSZ8081RNA PHY */
+ rgmii_phy: ethernet-phy@0 {
+ reg = <0>;
+ interrupts-extended = <&gpio5 23 IRQ_TYPE_LEVEL_LOW>;
+ reset-gpios = <&gpio5 22 GPIO_ACTIVE_LOW>;
+ reset-assert-us = <10000>;
+ reset-deassert-us = <300>;
+ };
+ };
+};
+
+&gpio1 {
+ gpio-line-names =
+ "CAN1_TERM", "SD1_CD", "", "", "", "", "", "",
+ "DEBUG_0", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "";
+};
+
+&gpio2 {
+ gpio-line-names =
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "",
+ "", "", "ECSPI2_SS0", "", "", "", "TSC_BUSY", "";
+};
+
+&gpio3 {
+ gpio-line-names =
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "",
+ "", "", "", "ECSPI1_SS1", "TSC_PENIRQ", "", "", "",
+ "", "", "", "", "", "", "", "";
+};
+
+&gpio4 {
+ gpio-line-names =
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "CAN1_SR", "", "", "",
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "";
+};
+
+&gpio5 {
+ gpio-line-names =
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "ETH_RESET", "ETH_INTRP",
+ "", "", "", "", "", "", "", "";
+};
+
+&i2c1 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c1>;
+ status = "okay";
+
+ /* additional i2c devices are added automatically by the boot loader */
+};
+
+&i2c3 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c3>;
+ status = "okay";
+
+ temperature-sensor@70 {
+ compatible = "ti,tmp103";
+ reg = <0x70>;
+ };
+};
+
+&ipu1_di0_disp0 {
+ remote-endpoint = <&display_in>;
+};
+
+&pwm1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pwm1>;
+ status = "okay";
+};
+
+&uart4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart4>;
+ status = "okay";
+};
+
+&usbphynop1 {
+ status = "disabled";
+};
+
+&usbphynop2 {
+ status = "disabled";
+};
+
+&usbotg {
+ phy_type = "utmi";
+ dr_mode = "host";
+ disable-over-current;
+ status = "okay";
+};
+
+&usdhc1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc1>;
+ cd-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>;
+ no-1-8-v;
+ disable-wp;
+ cap-sd-highspeed;
+ no-mmc;
+ no-sdio;
+ status = "okay";
+};
+
+&usdhc3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc3>;
+ bus-width = <8>;
+ no-1-8-v;
+ non-removable;
+ no-sd;
+ no-sdio;
+ status = "okay";
+};
+
+&iomuxc {
+ pinctrl_can1: can1grp {
+ fsl,pins = <
+ MX6QDL_PAD_KEY_ROW2__FLEXCAN1_RX 0x1b000
+ MX6QDL_PAD_KEY_COL2__FLEXCAN1_TX 0x3008
+ /* CAN1_SR */
+ MX6QDL_PAD_KEY_COL3__GPIO4_IO12 0x13008
+ /* CAN1_TERM */
+ MX6QDL_PAD_GPIO_0__GPIO1_IO00 0x1b088
+ >;
+ };
+
+ pinctrl_ecspi1: ecspi1grp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D17__ECSPI1_MISO 0x1b000
+ MX6QDL_PAD_EIM_D18__ECSPI1_MOSI 0x3008
+ MX6QDL_PAD_EIM_D16__ECSPI1_SCLK 0x3008
+ /* CS */
+ MX6QDL_PAD_EIM_D19__GPIO3_IO19 0x3008
+ >;
+ };
+
+ pinctrl_ecspi2: ecspi2grp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_OE__ECSPI2_MISO 0x10000
+ MX6QDL_PAD_EIM_CS0__ECSPI2_SCLK 0x3008
+ MX6QDL_PAD_EIM_CS1__ECSPI2_MOSI 0x3008
+ /* CS */
+ MX6QDL_PAD_EIM_RW__GPIO2_IO26 0x3008
+ >;
+ };
+
+ pinctrl_enet: enetgrp {
+ fsl,pins = <
+ /* MX6QDL_ENET_PINGRP4 */
+ MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
+ MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
+ MX6QDL_PAD_ENET_RXD0__ENET_RX_DATA0 0x1b0b0
+ MX6QDL_PAD_ENET_RXD1__ENET_RX_DATA1 0x1b0b0
+ MX6QDL_PAD_ENET_RX_ER__ENET_RX_ER 0x1b0b0
+ MX6QDL_PAD_ENET_TX_EN__ENET_TX_EN 0x1b0b0
+ MX6QDL_PAD_ENET_TXD0__ENET_TX_DATA0 0x1b0b0
+ MX6QDL_PAD_ENET_TXD1__ENET_TX_DATA1 0x1b0b0
+ MX6QDL_PAD_ENET_CRS_DV__ENET_RX_EN 0x1b0b0
+
+ MX6QDL_PAD_GPIO_16__ENET_REF_CLK 0x1b0b0
+ /* Phy reset */
+ MX6QDL_PAD_CSI0_DAT4__GPIO5_IO22 0x1b0b0
+ /* nINTRP */
+ MX6QDL_PAD_CSI0_DAT5__GPIO5_IO23 0x1b0b0
+ >;
+ };
+
+ pinctrl_i2c1: i2c1grp {
+ fsl,pins = <
+ MX6QDL_PAD_CSI0_DAT8__I2C1_SDA 0x4001f8b1
+ MX6QDL_PAD_CSI0_DAT9__I2C1_SCL 0x4001f8b1
+ >;
+ };
+
+ pinctrl_i2c3: i2c3grp {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_5__I2C3_SCL 0x4001b8b1
+ MX6QDL_PAD_GPIO_6__I2C3_SDA 0x4001b8b1
+ >;
+ };
+
+ pinctrl_ipu1_disp: ipudisp1grp {
+ fsl,pins = <
+ /* DSE 0x30 => 25 Ohm, 0x20 => 37 Ohm, 0x10 => 75 Ohm */
+ MX6QDL_PAD_DI0_DISP_CLK__IPU1_DI0_DISP_CLK 0x30
+ MX6QDL_PAD_DI0_PIN15__IPU1_DI0_PIN15 0x30
+ MX6QDL_PAD_DI0_PIN2__IPU1_DI0_PIN02 0x30
+ MX6QDL_PAD_DI0_PIN3__IPU1_DI0_PIN03 0x30
+ MX6QDL_PAD_DISP0_DAT0__IPU1_DISP0_DATA00 0x30
+ MX6QDL_PAD_DISP0_DAT1__IPU1_DISP0_DATA01 0x30
+ MX6QDL_PAD_DISP0_DAT2__IPU1_DISP0_DATA02 0x30
+ MX6QDL_PAD_DISP0_DAT3__IPU1_DISP0_DATA03 0x30
+ MX6QDL_PAD_DISP0_DAT4__IPU1_DISP0_DATA04 0x30
+ MX6QDL_PAD_DISP0_DAT5__IPU1_DISP0_DATA05 0x30
+ MX6QDL_PAD_DISP0_DAT6__IPU1_DISP0_DATA06 0x30
+ MX6QDL_PAD_DISP0_DAT7__IPU1_DISP0_DATA07 0x30
+ MX6QDL_PAD_DISP0_DAT8__IPU1_DISP0_DATA08 0x30
+ MX6QDL_PAD_DISP0_DAT9__IPU1_DISP0_DATA09 0x30
+ MX6QDL_PAD_DISP0_DAT10__IPU1_DISP0_DATA10 0x30
+ MX6QDL_PAD_DISP0_DAT11__IPU1_DISP0_DATA11 0x30
+ MX6QDL_PAD_DISP0_DAT12__IPU1_DISP0_DATA12 0x30
+ MX6QDL_PAD_DISP0_DAT13__IPU1_DISP0_DATA13 0x30
+ MX6QDL_PAD_DISP0_DAT14__IPU1_DISP0_DATA14 0x30
+ MX6QDL_PAD_DISP0_DAT15__IPU1_DISP0_DATA15 0x30
+ MX6QDL_PAD_DISP0_DAT16__IPU1_DISP0_DATA16 0x30
+ MX6QDL_PAD_DISP0_DAT17__IPU1_DISP0_DATA17 0x30
+ >;
+ };
+
+ pinctrl_leds: ledsgrp {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_8__GPIO1_IO08 0x1b0b0
+ >;
+ };
+
+ pinctrl_pwm1: pwm1grp {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_9__PWM1_OUT 0x8
+ >;
+ };
+
+ pinctrl_tsc2046: tsc2046grp {
+ fsl,pins = <
+ /* TSC_PENIRQ */
+ MX6QDL_PAD_EIM_D20__GPIO3_IO20 0x1b0b1
+ /* TSC_BUSY */
+ MX6QDL_PAD_EIM_EB2__GPIO2_IO30 0x1b0b0
+ >;
+ };
+
+ pinctrl_uart4: uart4grp {
+ fsl,pins = <
+ MX6QDL_PAD_KEY_COL0__UART4_TX_DATA 0x1b0b1
+ MX6QDL_PAD_KEY_ROW0__UART4_RX_DATA 0x1b0b1
+ >;
+ };
+
+ pinctrl_usdhc1: usdhc1grp {
+ fsl,pins = <
+ MX6QDL_PAD_SD1_CMD__SD1_CMD 0x170f9
+ MX6QDL_PAD_SD1_CLK__SD1_CLK 0x100f9
+ MX6QDL_PAD_SD1_DAT0__SD1_DATA0 0x170f9
+ MX6QDL_PAD_SD1_DAT1__SD1_DATA1 0x170f9
+ MX6QDL_PAD_SD1_DAT2__SD1_DATA2 0x170f9
+ MX6QDL_PAD_SD1_DAT3__SD1_DATA3 0x170f9
+ MX6QDL_PAD_GPIO_1__GPIO1_IO01 0x1b0b0
+ >;
+ };
+
+ pinctrl_usdhc3: usdhc3grp {
+ fsl,pins = <
+ MX6QDL_PAD_SD3_CMD__SD3_CMD 0x17099
+ MX6QDL_PAD_SD3_CLK__SD3_CLK 0x10099
+ MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x17099
+ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17099
+ MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17099
+ MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17099
+ MX6QDL_PAD_SD3_DAT4__SD3_DATA4 0x17099
+ MX6QDL_PAD_SD3_DAT5__SD3_DATA5 0x17099
+ MX6QDL_PAD_SD3_DAT6__SD3_DATA6 0x17099
+ MX6QDL_PAD_SD3_DAT7__SD3_DATA7 0x17099
+ MX6QDL_PAD_SD3_RST__SD3_RESET 0x1b0b1
+ >;
+ };
+};
diff --git a/arch/arm/boot/dts/imx6dl-prtmvt.dts b/arch/arm/boot/dts/imx6dl-prtmvt.dts
new file mode 100644
index 000000000000..a35a1c66e770
--- /dev/null
+++ b/arch/arm/boot/dts/imx6dl-prtmvt.dts
@@ -0,0 +1,852 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+/*
+ * Copyright (c) 2016 Protonic Holland
+ * Copyright (c) 2020 Oleksij Rempel <kernel@pengutronix.de>, Pengutronix
+ */
+
+/dts-v1/;
+#include <dt-bindings/display/sdtv-standards.h>
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/media/tvp5150.h>
+#include <dt-bindings/sound/fsl-imx-audmux.h>
+#include "imx6dl.dtsi"
+
+/ {
+ model = "Protonic MVT board";
+ compatible = "prt,prtmvt", "fsl,imx6dl";
+
+ chosen {
+ stdout-path = &uart4;
+ };
+
+ backlight: backlight {
+ compatible = "pwm-backlight";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_backlight>;
+ pwms = <&pwm1 0 5000000 0>;
+ brightness-levels = <0 16 64 255>;
+ num-interpolated-steps = <16>;
+ default-brightness-level = <1>;
+ power-supply = <&reg_3v3>;
+ enable-gpios = <&gpio4 28 GPIO_ACTIVE_HIGH>;
+ };
+
+ connector {
+ compatible = "composite-video-connector";
+ label = "Composite0";
+ sdtv-standards = <SDTV_STD_PAL_B>;
+
+ port {
+ comp0_out: endpoint {
+ remote-endpoint = <&tvp5150_comp0_in>;
+ };
+ };
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpiokeys>;
+ autorepeat;
+
+ power {
+ label = "Power Button";
+ gpios = <&gpio2 23 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_POWER>;
+ wakeup-source;
+ };
+
+ f1 {
+ label = "GPIO Key F1";
+ linux,code = <KEY_F1>;
+ gpios = <&gpio_pca 0 GPIO_ACTIVE_LOW>;
+ };
+
+ f2 {
+ label = "GPIO Key F2";
+ linux,code = <KEY_F2>;
+ gpios = <&gpio_pca 1 GPIO_ACTIVE_LOW>;
+ };
+
+ f3 {
+ label = "GPIO Key F3";
+ linux,code = <KEY_F3>;
+ gpios = <&gpio_pca 2 GPIO_ACTIVE_LOW>;
+ };
+
+ f4 {
+ label = "GPIO Key F4";
+ linux,code = <KEY_F4>;
+ gpios = <&gpio_pca 3 GPIO_ACTIVE_LOW>;
+ };
+
+ f5 {
+ label = "GPIO Key F5";
+ linux,code = <KEY_F5>;
+ gpios = <&gpio_pca 4 GPIO_ACTIVE_LOW>;
+ };
+
+ cycle {
+ label = "GPIO Key CYCLE";
+ linux,code = <KEY_CYCLEWINDOWS>;
+ gpios = <&gpio_pca 5 GPIO_ACTIVE_LOW>;
+ };
+
+ esc {
+ label = "GPIO Key ESC";
+ linux,code = <KEY_ESC>;
+ gpios = <&gpio_pca 6 GPIO_ACTIVE_LOW>;
+ };
+
+ up {
+ label = "GPIO Key UP";
+ linux,code = <KEY_UP>;
+ gpios = <&gpio_pca 7 GPIO_ACTIVE_LOW>;
+ };
+
+ down {
+ label = "GPIO Key DOWN";
+ linux,code = <KEY_DOWN>;
+ gpios = <&gpio_pca 8 GPIO_ACTIVE_LOW>;
+ };
+
+ ok {
+ label = "GPIO Key OK";
+ linux,code = <KEY_OK>;
+ gpios = <&gpio_pca 9 GPIO_ACTIVE_LOW>;
+ };
+
+ f6 {
+ label = "GPIO Key F6";
+ linux,code = <KEY_F6>;
+ gpios = <&gpio_pca 10 GPIO_ACTIVE_LOW>;
+ };
+
+ f7 {
+ label = "GPIO Key F7";
+ linux,code = <KEY_F7>;
+ gpios = <&gpio_pca 11 GPIO_ACTIVE_LOW>;
+ };
+
+ f8 {
+ label = "GPIO Key F8";
+ linux,code = <KEY_F8>;
+ gpios = <&gpio_pca 12 GPIO_ACTIVE_LOW>;
+ };
+
+ f9 {
+ label = "GPIO Key F9";
+ linux,code = <KEY_F9>;
+ gpios = <&gpio_pca 13 GPIO_ACTIVE_LOW>;
+ };
+
+ f10 {
+ label = "GPIO Key F10";
+ linux,code = <KEY_F10>;
+ gpios = <&gpio_pca 14 GPIO_ACTIVE_LOW>;
+ };
+
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_leds>;
+
+ led-0 {
+ label = "debug0";
+ function = LED_FUNCTION_HEARTBEAT;
+ gpios = <&gpio1 8 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ };
+
+ led-1 {
+ label = "debug1";
+ function = LED_FUNCTION_DISK;
+ gpios = <&gpio1 9 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "disk-activity";
+ };
+
+ led-2 {
+ label = "power_led";
+ function = LED_FUNCTION_POWER;
+ gpios = <&gpio2 24 GPIO_ACTIVE_HIGH>;
+ default-state = "on";
+ };
+ };
+
+ panel {
+ compatible = "kyo,tcg070wvlq", "lg,lb070wv8";
+ backlight = <&backlight>;
+ power-supply = <&reg_3v3>;
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&lvds0_out>;
+ };
+ };
+ };
+
+ clk50m_phy: phy-clock {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <50000000>;
+ };
+
+ reg_1v8: regulator-1v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "1v8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ reg_3v3: regulator-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ reg_h1_vbus: regulator-h1-vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "h1-vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio1 0 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ reg_otg_vbus: regulator-otg-vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "otg-vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio3 22 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ sound {
+ compatible = "simple-audio-card";
+ simple-audio-card,name = "prti6q-sgtl5000";
+ simple-audio-card,format = "i2s";
+ simple-audio-card,widgets =
+ "Microphone", "Microphone Jack",
+ "Line", "Line In Jack",
+ "Headphone", "Headphone Jack",
+ "Speaker", "External Speaker";
+ simple-audio-card,routing =
+ "MIC_IN", "Microphone Jack",
+ "LINE_IN", "Line In Jack",
+ "Headphone Jack", "HP_OUT",
+ "External Speaker", "LINE_OUT";
+
+ simple-audio-card,cpu {
+ sound-dai = <&ssi1>;
+ system-clock-frequency = <0>;
+ };
+
+ simple-audio-card,codec {
+ sound-dai = <&codec>;
+ bitclock-master;
+ frame-master;
+ };
+ };
+};
+
+&audmux {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_audmux>;
+ status = "okay";
+
+ mux-ssi1 {
+ fsl,audmux-port = <0>;
+ fsl,port-config = <
+ IMX_AUDMUX_V2_PTCR_SYN 0
+ IMX_AUDMUX_V2_PTCR_TFSEL(2) 0
+ IMX_AUDMUX_V2_PTCR_TCSEL(2) 0
+ IMX_AUDMUX_V2_PTCR_TFSDIR 0
+ IMX_AUDMUX_V2_PTCR_TCLKDIR IMX_AUDMUX_V2_PDCR_RXDSEL(2)
+ >;
+ };
+
+ mux-pins3 {
+ fsl,audmux-port = <2>;
+ fsl,port-config = <
+ IMX_AUDMUX_V2_PTCR_SYN IMX_AUDMUX_V2_PDCR_RXDSEL(0)
+ 0 IMX_AUDMUX_V2_PDCR_TXRXEN
+ >;
+ };
+};
+
+&can1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_can1>;
+ status = "okay";
+};
+
+&can2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_can2>;
+ status = "okay";
+};
+
+&clks {
+ assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>;
+ assigned-clock-parents = <&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>;
+};
+
+&ecspi1 {
+ cs-gpios = <&gpio3 19 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ecspi1>;
+ status = "okay";
+
+ flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ spi-max-frequency = <20000000>;
+ };
+};
+
+&fec {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_enet>;
+ phy-mode = "rmii";
+ clocks = <&clks IMX6QDL_CLK_ENET>,
+ <&clks IMX6QDL_CLK_ENET>,
+ <&clk50m_phy>;
+ clock-names = "ipg", "ahb", "ptp";
+ phy-handle = <&rmii_phy>;
+ status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* Microchip KSZ8081RNA PHY */
+ rmii_phy: ethernet-phy@0 {
+ reg = <0>;
+ interrupts-extended = <&gpio4 30 IRQ_TYPE_LEVEL_LOW>;
+ reset-gpios = <&gpio4 26 GPIO_ACTIVE_LOW>;
+ reset-assert-us = <10000>;
+ reset-deassert-us = <3000>;
+ };
+ };
+};
+
+&gpio1 {
+ gpio-line-names =
+ "CAN1_TERM", "SD1_CD", "ITU656_RESET", "CAM1_MIRROR",
+ "CAM2_MIRROR", "", "", "SMBALERT",
+ "DEBUG_0", "DEBUG_1", "", "", "", "", "", "",
+ "SD1_DATA0", "SD1_DATA1", "SD1_CMD", "SD1_DATA2", "SD1_CLK",
+ "SD1_DATA3", "", "",
+ "", "", "", "", "", "", "", "";
+};
+
+&gpio2 {
+ gpio-line-names =
+ "", "", "", "", "", "", "", "",
+ "REV_ID0", "REV_ID1", "REV_ID2", "REV_ID3", "REV_ID4",
+ "BOARD_ID0", "BOARD_ID1", "BOARD_ID2",
+ "", "", "", "", "", "", "", "ON_SWITCH",
+ "POWER_LED", "", "", "", "", "", "", "";
+};
+
+&gpio3 {
+ gpio-line-names =
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "",
+ "ECSPI1_SCLK", "ECSPI1_MISO", "ECSPI1_MOSI", "ECSPI1_SS1",
+ "CPU_ON1_FB", "USB_EXT1_OC", "USB_EXT1_PWR", "YACO_IRQ",
+ "TSS_TXD", "TSS_RXD", "", "", "", "", "YACO_BOOT0",
+ "YACO_RESET";
+};
+
+&gpio4 {
+ gpio-line-names =
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "CAN1_SR", "CAN2_SR", "CAN2_TX", "CAN2_RX",
+ "", "", "DIP1_FB", "", "", "", "", "",
+ "CPU_LIGHT_ON", "", "ETH_RESET", "", "BL_EN",
+ "BL_PWM", "ETH_INTRP", "";
+};
+
+&gpio5 {
+ gpio-line-names =
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "",
+ "I2S_LRCLK", "I2S_DIN", "I2C1_SDA", "I2C1_SCL", "YACO_AUX_RX",
+ "YACO_AUX_TX", "ITU656_D0", "ITU656_D1";
+};
+
+&i2c1 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c1>;
+ status = "okay";
+
+ codec: audio-codec@a {
+ compatible = "fsl,sgtl5000";
+ reg = <0xa>;
+ #sound-dai-cells = <0>;
+ clocks = <&clks 201>;
+ VDDA-supply = <&reg_3v3>;
+ VDDIO-supply = <&reg_3v3>;
+ VDDD-supply = <&reg_1v8>;
+ };
+
+ video@5c {
+ compatible = "ti,tvp5150";
+ reg = <0x5c>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ tvp5150_comp0_in: endpoint {
+ remote-endpoint = <&comp0_out>;
+ };
+ };
+
+ /* Output port 2 is video output pad */
+ port@2 {
+ reg = <2>;
+ tvp5151_to_ipu1_csi0_mux: endpoint {
+ remote-endpoint = <&ipu1_csi0_mux_from_parallel_sensor>;
+ };
+ };
+ };
+
+ gpio_pca: gpio@74 {
+ compatible = "nxp,pca9539";
+ reg = <0x74>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pca9539>;
+ interrupt-parent = <&gpio4>;
+ interrupts = <5 IRQ_TYPE_LEVEL_LOW>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ /* additional i2c devices are added automatically by the boot loader */
+};
+
+&i2c3 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c3>;
+ status = "okay";
+
+ adc@49 {
+ compatible = "ti,ads1015";
+ reg = <0x49>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ channel@4 {
+ reg = <4>;
+ ti,gain = <3>;
+ ti,datarate = <3>;
+ };
+
+ channel@5 {
+ reg = <5>;
+ ti,gain = <3>;
+ ti,datarate = <3>;
+ };
+
+ channel@6 {
+ reg = <6>;
+ ti,gain = <3>;
+ ti,datarate = <3>;
+ };
+
+ channel@7 {
+ reg = <7>;
+ ti,gain = <3>;
+ ti,datarate = <3>;
+ };
+ };
+
+ rtc@51 {
+ compatible = "nxp,pcf8563";
+ reg = <0x51>;
+ };
+
+ temperature-sensor@70 {
+ compatible = "ti,tmp103";
+ reg = <0x70>;
+ };
+};
+
+&ipu1_csi0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ipu1_csi0>;
+ status = "okay";
+};
+
+&ipu1_csi0_mux_from_parallel_sensor {
+ remote-endpoint = <&tvp5151_to_ipu1_csi0_mux>;
+};
+
+&ldb {
+ status = "okay";
+
+ lvds-channel@0 {
+ status = "okay";
+
+ port@4 {
+ reg = <4>;
+
+ lvds0_out: endpoint {
+ remote-endpoint = <&panel_in>;
+ };
+ };
+ };
+};
+
+&pcie {
+ status = "okay";
+};
+
+&pwm1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pwm1>;
+ status = "okay";
+};
+
+&ssi1 {
+ #sound-dai-cells = <0>;
+ fsl,mode = "ac97-slave";
+ status = "okay";
+};
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1>;
+ status = "okay";
+};
+
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart2>;
+ status = "okay";
+};
+
+&uart3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart3>;
+ status = "okay";
+};
+
+&uart4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart4>;
+ status = "okay";
+};
+
+&uart5 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart5>;
+ status = "okay";
+};
+
+&usbh1 {
+ vbus-supply = <&reg_h1_vbus>;
+ pinctrl-names = "default";
+ phy_type = "utmi";
+ dr_mode = "host";
+ status = "okay";
+};
+
+&usbotg {
+ vbus-supply = <&reg_otg_vbus>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usbotg>;
+ phy_type = "utmi";
+ dr_mode = "host";
+ disable-over-current;
+ status = "okay";
+};
+
+&usdhc1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc1>;
+ cd-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>;
+ no-1-8-v;
+ disable-wp;
+ cap-sd-highspeed;
+ no-mmc;
+ no-sdio;
+ status = "okay";
+};
+
+&usdhc3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc3>;
+ bus-width = <8>;
+ no-1-8-v;
+ non-removable;
+ no-sd;
+ no-sdio;
+ status = "okay";
+};
+
+&iomuxc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hog>;
+
+ pinctrl_audmux: audmuxgrp {
+ fsl,pins = <
+ /* SGTL5000 sys_mclk */
+ MX6QDL_PAD_CSI0_MCLK__CCM_CLKO1 0x030b0
+ MX6QDL_PAD_CSI0_DAT7__AUD3_RXD 0x130b0
+ MX6QDL_PAD_CSI0_DAT4__AUD3_TXC 0x130b0
+ MX6QDL_PAD_CSI0_DAT5__AUD3_TXD 0x110b0
+ MX6QDL_PAD_CSI0_DAT6__AUD3_TXFS 0x130b0
+ >;
+ };
+
+ pinctrl_backlight: backlightgrp {
+ fsl,pins = <
+ MX6QDL_PAD_DISP0_DAT7__GPIO4_IO28 0x1b0b0
+ >;
+ };
+
+ pinctrl_can1: can1grp {
+ fsl,pins = <
+ MX6QDL_PAD_KEY_ROW2__FLEXCAN1_RX 0x1b000
+ MX6QDL_PAD_KEY_COL2__FLEXCAN1_TX 0x3008
+ /* CAN1_SR */
+ MX6QDL_PAD_KEY_COL3__GPIO4_IO12 0x13008
+ /* CAN1_TERM */
+ MX6QDL_PAD_GPIO_0__GPIO1_IO00 0x1b088
+ >;
+ };
+
+ pinctrl_can2: can2grp {
+ fsl,pins = <
+ MX6QDL_PAD_KEY_ROW4__FLEXCAN2_RX 0x1b000
+ MX6QDL_PAD_KEY_COL4__FLEXCAN2_TX 0x3008
+ /* CAN2_SR */
+ MX6QDL_PAD_KEY_ROW3__GPIO4_IO13 0x13008
+ >;
+ };
+
+ pinctrl_ecspi1: ecspi1grp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D17__ECSPI1_MISO 0x100b1
+ MX6QDL_PAD_EIM_D18__ECSPI1_MOSI 0x100b1
+ MX6QDL_PAD_EIM_D16__ECSPI1_SCLK 0x100b1
+ /* CS */
+ MX6QDL_PAD_EIM_D19__GPIO3_IO19 0x000b1
+ >;
+ };
+
+ pinctrl_enet: enetgrp {
+ fsl,pins = <
+ /* MX6QDL_ENET_PINGRP4 */
+ MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
+ MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
+ MX6QDL_PAD_ENET_RXD0__ENET_RX_DATA0 0x1b0b0
+ MX6QDL_PAD_ENET_RXD1__ENET_RX_DATA1 0x1b0b0
+ MX6QDL_PAD_ENET_RX_ER__ENET_RX_ER 0x1b0b0
+ MX6QDL_PAD_ENET_TX_EN__ENET_TX_EN 0x1b0b0
+ MX6QDL_PAD_ENET_TXD0__ENET_TX_DATA0 0x1b0b0
+ MX6QDL_PAD_ENET_TXD1__ENET_TX_DATA1 0x1b0b0
+ MX6QDL_PAD_ENET_CRS_DV__ENET_RX_EN 0x1b0b0
+ MX6QDL_PAD_GPIO_16__ENET_REF_CLK 0x1b0b0
+ /* Phy reset */
+ MX6QDL_PAD_DISP0_DAT5__GPIO4_IO26 0x1b0b0
+ /* nINTRP */
+ MX6QDL_PAD_DISP0_DAT9__GPIO4_IO30 0x1b0b0
+ >;
+ };
+
+ pinctrl_gpiokeys: gpiokeygrp {
+ fsl,pins = <
+ /* nON_SWITCH */
+ MX6QDL_PAD_EIM_CS0__GPIO2_IO23 0x1b0b0
+ >;
+ };
+
+ pinctrl_hog: hoggrp {
+ fsl,pins = <
+ /* ITU656_nRESET */
+ MX6QDL_PAD_GPIO_2__GPIO1_IO02 0x1b0b0
+ /* CAM1_MIRROR */
+ MX6QDL_PAD_GPIO_3__GPIO1_IO03 0x130b0
+ /* CAM2_MIRROR */
+ MX6QDL_PAD_GPIO_4__GPIO1_IO04 0x130b0
+ /* CAM_nDETECT */
+ MX6QDL_PAD_GPIO_17__GPIO7_IO12 0x1b0b0
+ /* ISB_IN1 */
+ MX6QDL_PAD_EIM_A16__GPIO2_IO22 0x130b0
+ /* ISB_nIN2 */
+ MX6QDL_PAD_EIM_A17__GPIO2_IO21 0x1b0b0
+ /* WARN_LIGHT */
+ MX6QDL_PAD_EIM_A19__GPIO2_IO19 0x100b0
+ /* ON2_FB */
+ MX6QDL_PAD_EIM_A25__GPIO5_IO02 0x100b0
+ /* YACO_nIRQ */
+ MX6QDL_PAD_EIM_D23__GPIO3_IO23 0x1b0b0
+ /* YACO_BOOT0 */
+ MX6QDL_PAD_EIM_D30__GPIO3_IO30 0x130b0
+ /* YACO_nRESET */
+ MX6QDL_PAD_EIM_D31__GPIO3_IO31 0x1b0b0
+ /* FORCE_ON1 */
+ MX6QDL_PAD_EIM_EB2__GPIO2_IO30 0x1b0b0
+ /* AUDIO_nRESET */
+ MX6QDL_PAD_CSI0_VSYNC__GPIO5_IO21 0x1f0b0
+ /* ITU656_nPDN */
+ MX6QDL_PAD_CSI0_DATA_EN__GPIO5_IO20 0x1b0b0
+
+ /* HW revision detect */
+ /* REV_ID0 */
+ MX6QDL_PAD_SD4_DAT0__GPIO2_IO08 0x1b0b0
+ /* REV_ID1 */
+ MX6QDL_PAD_SD4_DAT1__GPIO2_IO09 0x1b0b0
+ /* REV_ID2 */
+ MX6QDL_PAD_SD4_DAT2__GPIO2_IO10 0x1b0b0
+ /* REV_ID3 */
+ MX6QDL_PAD_SD4_DAT3__GPIO2_IO11 0x1b0b0
+ /* REV_ID4 */
+ MX6QDL_PAD_SD4_DAT4__GPIO2_IO12 0x1b0b0
+
+ /* New in HW revision 1 */
+ /* ON1_FB */
+ MX6QDL_PAD_EIM_D20__GPIO3_IO20 0x100b0
+ /* DIP1_FB */
+ MX6QDL_PAD_DI0_PIN2__GPIO4_IO18 0x1b0b0
+ >;
+ };
+
+ pinctrl_i2c1: i2c1grp {
+ fsl,pins = <
+ MX6QDL_PAD_CSI0_DAT8__I2C1_SDA 0x4001f8b1
+ MX6QDL_PAD_CSI0_DAT9__I2C1_SCL 0x4001f8b1
+ >;
+ };
+
+ pinctrl_i2c3: i2c3grp {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_5__I2C3_SCL 0x4001b8b1
+ MX6QDL_PAD_GPIO_6__I2C3_SDA 0x4001b8b1
+ >;
+ };
+
+ pinctrl_ipu1_csi0: ipu1csi0grp {
+ fsl,pins = <
+ MX6QDL_PAD_CSI0_DAT12__IPU1_CSI0_DATA12 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT13__IPU1_CSI0_DATA13 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT14__IPU1_CSI0_DATA14 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT15__IPU1_CSI0_DATA15 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT16__IPU1_CSI0_DATA16 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT17__IPU1_CSI0_DATA17 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT18__IPU1_CSI0_DATA18 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT19__IPU1_CSI0_DATA19 0x1b0b0
+ MX6QDL_PAD_CSI0_PIXCLK__IPU1_CSI0_PIXCLK 0x1b0b0
+ >;
+ };
+
+ pinctrl_leds: ledsgrp {
+ fsl,pins = <
+ /* DEBUG0 */
+ MX6QDL_PAD_DI0_DISP_CLK__GPIO4_IO16 0x1b0b0
+ /* DEBUG1 */
+ MX6QDL_PAD_DI0_PIN15__GPIO4_IO17 0x1b0b0
+ /* POWER_LED */
+ MX6QDL_PAD_EIM_CS1__GPIO2_IO24 0x1b0b0
+ >;
+ };
+
+ pinctrl_pca9539: pca9539 {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_19__GPIO4_IO05 0x1b0b0
+ >;
+ };
+
+ pinctrl_pwm1: pwm1grp {
+ fsl,pins = <
+ MX6QDL_PAD_DISP0_DAT8__PWM1_OUT 0x1b0b0
+ >;
+ };
+
+ /* YaCO AUX Uart */
+ pinctrl_uart1: uart1grp {
+ fsl,pins = <
+ MX6QDL_PAD_CSI0_DAT10__UART1_TX_DATA 0x1b0b1
+ MX6QDL_PAD_CSI0_DAT11__UART1_RX_DATA 0x1b0b1
+ >;
+ };
+
+ pinctrl_uart2: uart2grp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D26__UART2_TX_DATA 0x1b0b1
+ MX6QDL_PAD_EIM_D27__UART2_RX_DATA 0x1b0b1
+ >;
+ };
+
+ /* YaCO Touchscreen UART */
+ pinctrl_uart3: uart3grp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D24__UART3_TX_DATA 0x1b0b1
+ MX6QDL_PAD_EIM_D25__UART3_RX_DATA 0x1b0b1
+ >;
+ };
+
+ pinctrl_uart4: uart4grp {
+ fsl,pins = <
+ MX6QDL_PAD_KEY_COL0__UART4_TX_DATA 0x1b0b1
+ MX6QDL_PAD_KEY_ROW0__UART4_RX_DATA 0x1b0b1
+ >;
+ };
+
+ pinctrl_uart5: uart5grp {
+ fsl,pins = <
+ MX6QDL_PAD_KEY_COL1__UART5_TX_DATA 0x1b0b1
+ MX6QDL_PAD_KEY_ROW1__UART5_RX_DATA 0x1b0b1
+ >;
+ };
+
+ pinctrl_usbotg: usbotggrp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D21__USB_OTG_OC 0x1b0b0
+ /* power enable, high active */
+ MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x1b0b0
+ >;
+ };
+
+ pinctrl_usdhc1: usdhc1grp {
+ fsl,pins = <
+ MX6QDL_PAD_SD1_CMD__SD1_CMD 0x170f9
+ MX6QDL_PAD_SD1_CLK__SD1_CLK 0x100f9
+ MX6QDL_PAD_SD1_DAT0__SD1_DATA0 0x170f9
+ MX6QDL_PAD_SD1_DAT1__SD1_DATA1 0x170f9
+ MX6QDL_PAD_SD1_DAT2__SD1_DATA2 0x170f9
+ MX6QDL_PAD_SD1_DAT3__SD1_DATA3 0x170f9
+ MX6QDL_PAD_GPIO_1__GPIO1_IO01 0x1b0b0
+ >;
+ };
+
+ pinctrl_usdhc3: usdhc3grp {
+ fsl,pins = <
+ MX6QDL_PAD_SD3_CMD__SD3_CMD 0x17099
+ MX6QDL_PAD_SD3_CLK__SD3_CLK 0x10099
+ MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x17099
+ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17099
+ MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17099
+ MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17099
+ MX6QDL_PAD_SD3_DAT4__SD3_DATA4 0x17099
+ MX6QDL_PAD_SD3_DAT5__SD3_DATA5 0x17099
+ MX6QDL_PAD_SD3_DAT6__SD3_DATA6 0x17099
+ MX6QDL_PAD_SD3_DAT7__SD3_DATA7 0x17099
+ MX6QDL_PAD_SD3_RST__SD3_RESET 0x1b0b1
+ >;
+ };
+};
diff --git a/arch/arm/boot/dts/imx6dl-victgo.dts b/arch/arm/boot/dts/imx6dl-victgo.dts
new file mode 100644
index 000000000000..d37ba4ed847d
--- /dev/null
+++ b/arch/arm/boot/dts/imx6dl-victgo.dts
@@ -0,0 +1,852 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+/*
+ * Copyright (c) 2016 Protonic Holland
+ * Copyright (c) 2020 Oleksij Rempel <kernel@pengutronix.de>, Pengutronix
+ */
+
+/dts-v1/;
+#include <dt-bindings/display/sdtv-standards.h>
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/media/tvp5150.h>
+#include <dt-bindings/sound/fsl-imx-audmux.h>
+#include "imx6dl.dtsi"
+
+/ {
+ model = "Kverneland TGO";
+ compatible = "kvg,victgo", "fsl,imx6dl";
+
+ chosen {
+ stdout-path = &uart4;
+ };
+
+ backlight: backlight {
+ compatible = "pwm-backlight";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_backlight>;
+ pwms = <&pwm1 0 5000000 0>;
+ brightness-levels = <0 16 64 255>;
+ num-interpolated-steps = <16>;
+ default-brightness-level = <1>;
+ power-supply = <&reg_3v3>;
+ enable-gpios = <&gpio4 28 GPIO_ACTIVE_HIGH>;
+ };
+
+ connector {
+ compatible = "composite-video-connector";
+ label = "Composite0";
+ sdtv-standards = <SDTV_STD_PAL_B>;
+
+ port {
+ comp0_out: endpoint {
+ remote-endpoint = <&tvp5150_comp0_in>;
+ };
+ };
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpiokeys>;
+ autorepeat;
+
+ power {
+ label = "Power Button";
+ gpios = <&gpio2 23 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_POWER>;
+ wakeup-source;
+ };
+
+ enter {
+ label = "Rotary Key";
+ gpios = <&gpio2 05 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_ENTER>;
+ wakeup-source;
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_leds>;
+
+ led-0 {
+ label = "debug0";
+ function = LED_FUNCTION_HEARTBEAT;
+ gpios = <&gpio1 8 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ };
+
+ led-1 {
+ label = "debug1";
+ function = LED_FUNCTION_DISK;
+ gpios = <&gpio1 9 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "disk-activity";
+ };
+
+ led-2 {
+ label = "power_led";
+ function = LED_FUNCTION_POWER;
+ gpios = <&gpio2 24 GPIO_ACTIVE_HIGH>;
+ default-state = "on";
+ };
+ };
+
+ panel {
+ compatible = "kyo,tcg121xglp";
+ backlight = <&backlight>;
+ power-supply = <&reg_3v3>;
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&lvds0_out>;
+ };
+ };
+ };
+
+ clk50m_phy: phy-clock {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <50000000>;
+ };
+
+ reg_1v8: regulator-1v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "1v8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ reg_3v3: regulator-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ reg_h1_vbus: regulator-h1-vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "h1-vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio1 0 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ reg_otg_vbus: regulator-otg-vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "otg-vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio3 22 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ rotary-encoder {
+ compatible = "rotary-encoder";
+ pinctrl-0 = <&pinctrl_rotary_ch>;
+ gpios = <&gpio2 3 GPIO_ACTIVE_HIGH>,
+ <&gpio2 4 GPIO_ACTIVE_HIGH>;
+ linux,axis = <REL_WHEEL>;
+ rotary-encoder,steps-per-period = <4>;
+ rotary-encoder,relative-axis;
+ rotary-encoder,rollover;
+ wakeup-source;
+ };
+
+ sound {
+ compatible = "simple-audio-card";
+ simple-audio-card,name = "prti6q-sgtl5000";
+ simple-audio-card,format = "i2s";
+ simple-audio-card,widgets =
+ "Microphone", "Microphone Jack",
+ "Line", "Line In Jack",
+ "Headphone", "Headphone Jack",
+ "Speaker", "External Speaker";
+ simple-audio-card,routing =
+ "MIC_IN", "Microphone Jack",
+ "LINE_IN", "Line In Jack",
+ "Headphone Jack", "HP_OUT",
+ "External Speaker", "LINE_OUT";
+
+ simple-audio-card,cpu {
+ sound-dai = <&ssi1>;
+ system-clock-frequency = <0>;
+ };
+
+ simple-audio-card,codec {
+ sound-dai = <&codec>;
+ bitclock-master;
+ frame-master;
+ };
+ };
+};
+
+&audmux {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_audmux>;
+ status = "okay";
+
+ mux-ssi1 {
+ fsl,audmux-port = <0>;
+ fsl,port-config = <
+ IMX_AUDMUX_V2_PTCR_SYN 0
+ IMX_AUDMUX_V2_PTCR_TFSEL(2) 0
+ IMX_AUDMUX_V2_PTCR_TCSEL(2) 0
+ IMX_AUDMUX_V2_PTCR_TFSDIR 0
+ IMX_AUDMUX_V2_PTCR_TCLKDIR IMX_AUDMUX_V2_PDCR_RXDSEL(2)
+ >;
+ };
+
+ mux-pins3 {
+ fsl,audmux-port = <2>;
+ fsl,port-config = <
+ IMX_AUDMUX_V2_PTCR_SYN IMX_AUDMUX_V2_PDCR_RXDSEL(0)
+ 0 IMX_AUDMUX_V2_PDCR_TXRXEN
+ >;
+ };
+};
+
+&can1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_can1>;
+ status = "okay";
+};
+
+&can2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_can2>;
+ status = "okay";
+};
+
+&clks {
+ assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>;
+ assigned-clock-parents = <&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>;
+};
+
+&ecspi1 {
+ cs-gpios = <&gpio3 19 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ecspi1>;
+ status = "okay";
+
+ flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ spi-max-frequency = <20000000>;
+ };
+};
+
+&ecspi2 {
+ cs-gpios = <&gpio5 12 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ecspi2>;
+ status = "okay";
+
+ touchscreen@0 {
+ compatible = "ti,tsc2046";
+ reg = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_touchscreen>;
+ spi-max-frequency = <200000>;
+ interrupts-extended = <&gpio5 8 IRQ_TYPE_EDGE_FALLING>;
+ pendown-gpio = <&gpio5 8 GPIO_ACTIVE_LOW>;
+ touchscreen-size-x = <800>;
+ touchscreen-size-y = <480>;
+ touchscreen-inverted-y;
+ touchscreen-max-pressure = <4095>;
+ ti,vref-delay-usecs = /bits/ 16 <100>;
+ ti,x-plate-ohms = /bits/ 16 <800>;
+ ti,y-plate-ohms = /bits/ 16 <300>;
+ wakeup-source;
+ };
+};
+
+&fec {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_enet>;
+ phy-mode = "rmii";
+ clocks = <&clks IMX6QDL_CLK_ENET>,
+ <&clks IMX6QDL_CLK_ENET>,
+ <&clk50m_phy>;
+ clock-names = "ipg", "ahb", "ptp";
+ phy-handle = <&rmii_phy>;
+ status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* Microchip KSZ8081RNA PHY */
+ rmii_phy: ethernet-phy@0 {
+ reg = <0>;
+ interrupts-extended = <&gpio4 30 IRQ_TYPE_LEVEL_LOW>;
+ reset-gpios = <&gpio4 26 GPIO_ACTIVE_LOW>;
+ reset-assert-us = <10000>;
+ reset-deassert-us = <300>;
+ };
+ };
+};
+
+&gpio1 {
+ gpio-line-names =
+ "CAN1_TERM", "SD1_CD", "ITU656_RESET", "CAM1_MIRROR",
+ "CAM2_MIRROR", "", "", "SMBALERT",
+ "DEBUG_0", "DEBUG_1", "", "", "", "", "", "",
+ "SD1_DATA0", "SD1_DATA1", "SD1_CMD", "SD1_DATA2", "SD1_CLK",
+ "SD1_DATA3", "", "",
+ "", "", "", "", "", "", "", "";
+};
+
+&gpio2 {
+ gpio-line-names =
+ "", "", "", "", "", "", "", "",
+ "REV_ID0", "REV_ID1", "REV_ID2", "REV_ID3", "REV_ID4",
+ "BOARD_ID0", "BOARD_ID1", "BOARD_ID2",
+ "", "", "", "", "", "", "ISB_IN1", "ON_SWITCH",
+ "POWER_LED", "", "", "", "", "", "", "";
+};
+
+&gpio3 {
+ gpio-line-names =
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "",
+ "ECSPI1_SCLK", "ECSPI1_MISO", "ECSPI1_MOSI", "ECSPI1_SS1",
+ "CPU_ON1_FB", "USB_EXT1_OC", "USB_EXT1_PWR", "YACO_IRQ",
+ "TSS_TXD", "TSS_RXD", "", "", "", "", "YACO_BOOT0",
+ "YACO_RESET";
+};
+
+&gpio4 {
+ gpio-line-names =
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "CAN1_SR", "CAN2_SR", "CAN2_TX", "CAN2_RX",
+ "", "", "DIP1_FB", "", "VCAM_EN", "", "", "",
+ "CPU_LIGHT_ON", "", "ETH_RESET", "CPU_CONTACT_IN", "BL_EN",
+ "BL_PWM", "ETH_INTRP", "ISB_LED";
+};
+
+&gpio5 {
+ gpio-line-names =
+ "", "", "", "", "", "", "", "",
+ "TSC_PENIRQ", "TSC_BUSY", "ECSPI2_MOSI", "ECSPI2_MISO",
+ "ECSPI2_SS0", "ECSPI2_SCLK", "", "",
+ "", "", "", "", "", "", "", "",
+ "I2S_LRCLK", "I2S_DIN", "I2C1_SDA", "I2C1_SCL", "YACO_AUX_RX",
+ "YACO_AUX_TX", "ITU656_D0", "ITU656_D1";
+};
+
+&i2c1 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c1>;
+ status = "okay";
+
+ codec: audio-codec@a {
+ compatible = "fsl,sgtl5000";
+ reg = <0xa>;
+ #sound-dai-cells = <0>;
+ clocks = <&clks 201>;
+ VDDA-supply = <&reg_3v3>;
+ VDDIO-supply = <&reg_3v3>;
+ VDDD-supply = <&reg_1v8>;
+ };
+
+ video-decoder@5c {
+ compatible = "ti,tvp5150";
+ reg = <0x5c>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ tvp5150_comp0_in: endpoint {
+ remote-endpoint = <&comp0_out>;
+ };
+ };
+
+ /* Output port 2 is video output pad */
+ port@2 {
+ reg = <2>;
+
+ tvp5151_to_ipu1_csi0_mux: endpoint {
+ remote-endpoint = <&ipu1_csi0_mux_from_parallel_sensor>;
+ };
+ };
+ };
+
+ keypad@70 {
+ compatible = "holtek,ht16k33";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_keypad>;
+ reg = <0x70>;
+ refresh-rate-hz = <20>;
+ debounce-delay-ms = <50>;
+ interrupts-extended = <&gpio4 5 (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_EDGE_RISING)>;
+ keypad,num-rows = <12>;
+ keypad,num-columns = <3>;
+ linux,keymap = <
+ MATRIX_KEY(2, 0, KEY_F6)
+ MATRIX_KEY(3, 0, KEY_F8)
+ MATRIX_KEY(4, 0, KEY_F10)
+ MATRIX_KEY(5, 0, KEY_F4)
+ MATRIX_KEY(6, 0, KEY_F2)
+ MATRIX_KEY(2, 1, KEY_F5)
+ MATRIX_KEY(3, 1, KEY_F7)
+ MATRIX_KEY(4, 1, KEY_F9)
+ MATRIX_KEY(5, 1, KEY_F3)
+ MATRIX_KEY(6, 1, KEY_F1)
+ >;
+ };
+
+ /* additional i2c devices are added automatically by the boot loader */
+};
+
+&i2c3 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c3>;
+ status = "okay";
+
+ adc@49 {
+ compatible = "ti,ads1015";
+ reg = <0x49>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ channel@4 {
+ reg = <4>;
+ ti,gain = <3>;
+ ti,datarate = <3>;
+ };
+
+ channel@5 {
+ reg = <5>;
+ ti,gain = <3>;
+ ti,datarate = <3>;
+ };
+
+ channel@6 {
+ reg = <6>;
+ ti,gain = <3>;
+ ti,datarate = <3>;
+ };
+
+ channel@7 {
+ reg = <7>;
+ ti,gain = <3>;
+ ti,datarate = <3>;
+ };
+ };
+
+ rtc@51 {
+ compatible = "nxp,pcf8563";
+ reg = <0x51>;
+ };
+
+ temperature-sensor@70 {
+ compatible = "ti,tmp103";
+ reg = <0x70>;
+ };
+};
+
+&ipu1_csi0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ipu1_csi0>;
+ status = "okay";
+};
+
+&ipu1_csi0_mux_from_parallel_sensor {
+ remote-endpoint = <&tvp5151_to_ipu1_csi0_mux>;
+};
+
+&ldb {
+ status = "okay";
+
+ lvds-channel@0 {
+ status = "okay";
+
+ port@4 {
+ reg = <4>;
+
+ lvds0_out: endpoint {
+ remote-endpoint = <&panel_in>;
+ };
+ };
+ };
+};
+
+&pwm1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pwm1>;
+ status = "okay";
+};
+
+&pwm3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pwm3>;
+ status = "okay";
+};
+
+&ssi1 {
+ #sound-dai-cells = <0>;
+ fsl,mode = "ac97-slave";
+ status = "okay";
+};
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1>;
+ status = "okay";
+};
+
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart2>;
+ status = "okay";
+};
+
+&uart3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart3>;
+ status = "okay";
+};
+
+&uart4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart4>;
+ status = "okay";
+};
+
+&uart5 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart5>;
+ status = "okay";
+};
+
+&usbh1 {
+ vbus-supply = <&reg_h1_vbus>;
+ pinctrl-names = "default";
+ phy_type = "utmi";
+ dr_mode = "host";
+ status = "okay";
+};
+
+&usbotg {
+ vbus-supply = <&reg_otg_vbus>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usbotg>;
+ phy_type = "utmi";
+ dr_mode = "host";
+ disable-over-current;
+ status = "okay";
+};
+
+&usdhc1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc1>;
+ cd-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>;
+ no-1-8-v;
+ disable-wp;
+ cap-sd-highspeed;
+ no-mmc;
+ no-sdio;
+ status = "okay";
+};
+
+&usdhc3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc3>;
+ bus-width = <8>;
+ no-1-8-v;
+ non-removable;
+ no-sd;
+ no-sdio;
+ status = "okay";
+};
+
+&iomuxc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hog>;
+
+ pinctrl_audmux: audmuxgrp {
+ fsl,pins = <
+ /* SGTL5000 sys_mclk */
+ MX6QDL_PAD_CSI0_MCLK__CCM_CLKO1 0x030b0
+ MX6QDL_PAD_CSI0_DAT7__AUD3_RXD 0x130b0
+ MX6QDL_PAD_CSI0_DAT4__AUD3_TXC 0x130b0
+ MX6QDL_PAD_CSI0_DAT5__AUD3_TXD 0x110b0
+ MX6QDL_PAD_CSI0_DAT6__AUD3_TXFS 0x130b0
+ >;
+ };
+
+ pinctrl_backlight: backlightgrp {
+ fsl,pins = <
+ MX6QDL_PAD_DISP0_DAT7__GPIO4_IO28 0x1b0b0
+ >;
+ };
+
+ pinctrl_can1: can1grp {
+ fsl,pins = <
+ MX6QDL_PAD_KEY_ROW2__FLEXCAN1_RX 0x1b000
+ MX6QDL_PAD_KEY_COL2__FLEXCAN1_TX 0x3008
+ /* CAN1_SR */
+ MX6QDL_PAD_KEY_COL3__GPIO4_IO12 0x13008
+ /* CAN1_TERM */
+ MX6QDL_PAD_GPIO_0__GPIO1_IO00 0x1b088
+ >;
+ };
+
+ pinctrl_can2: can2grp {
+ fsl,pins = <
+ MX6QDL_PAD_KEY_ROW4__FLEXCAN2_RX 0x1b000
+ MX6QDL_PAD_KEY_COL4__FLEXCAN2_TX 0x3008
+ /* CAN2_SR */
+ MX6QDL_PAD_KEY_ROW3__GPIO4_IO13 0x13008
+ >;
+ };
+
+ pinctrl_ecspi1: ecspi1grp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D17__ECSPI1_MISO 0x100b1
+ MX6QDL_PAD_EIM_D18__ECSPI1_MOSI 0x100b1
+ MX6QDL_PAD_EIM_D16__ECSPI1_SCLK 0x100b1
+ /* CS */
+ MX6QDL_PAD_EIM_D19__GPIO3_IO19 0x000b1
+ >;
+ };
+
+ pinctrl_ecspi2: ecspi2grp {
+ fsl,pins = <
+ MX6QDL_PAD_DISP0_DAT16__ECSPI2_MOSI 0x100b1
+ MX6QDL_PAD_DISP0_DAT17__ECSPI2_MISO 0x100b1
+ MX6QDL_PAD_DISP0_DAT18__GPIO5_IO12 0x100b1
+ MX6QDL_PAD_DISP0_DAT19__ECSPI2_SCLK 0x100b1
+ >;
+ };
+
+ pinctrl_enet: enetgrp {
+ fsl,pins = <
+ /* MX6QDL_ENET_PINGRP4 */
+ MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
+ MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
+ MX6QDL_PAD_ENET_RXD0__ENET_RX_DATA0 0x1b0b0
+ MX6QDL_PAD_ENET_RXD1__ENET_RX_DATA1 0x1b0b0
+ MX6QDL_PAD_ENET_RX_ER__ENET_RX_ER 0x1b0b0
+ MX6QDL_PAD_ENET_TX_EN__ENET_TX_EN 0x1b0b0
+ MX6QDL_PAD_ENET_TXD0__ENET_TX_DATA0 0x1b0b0
+ MX6QDL_PAD_ENET_TXD1__ENET_TX_DATA1 0x1b0b0
+ MX6QDL_PAD_ENET_CRS_DV__ENET_RX_EN 0x1b0b0
+ MX6QDL_PAD_GPIO_16__ENET_REF_CLK 0x1b0b0
+ /* Phy reset */
+ MX6QDL_PAD_DISP0_DAT5__GPIO4_IO26 0x1b0b0
+ /* nINTRP */
+ MX6QDL_PAD_DISP0_DAT9__GPIO4_IO30 0x1b0b0
+ >;
+ };
+
+ pinctrl_gpiokeys: gpiokeygrp {
+ fsl,pins = <
+ /* ROTARY_BTN */
+ MX6QDL_PAD_NANDF_D5__GPIO2_IO05 0x1b0b0
+ /* nON_SWITCH */
+ MX6QDL_PAD_EIM_CS0__GPIO2_IO23 0x1b0b0
+ >;
+ };
+
+ pinctrl_hog: hoggrp {
+ fsl,pins = <
+ /* ITU656_nRESET */
+ MX6QDL_PAD_GPIO_2__GPIO1_IO02 0x1b0b0
+ /* CAM1_MIRROR */
+ MX6QDL_PAD_GPIO_3__GPIO1_IO03 0x130b0
+ /* CAM2_MIRROR */
+ MX6QDL_PAD_GPIO_4__GPIO1_IO04 0x130b0
+ /* CAM_nDETECT */
+ MX6QDL_PAD_GPIO_17__GPIO7_IO12 0x1b0b0
+ /* ISB_IN1 */
+ MX6QDL_PAD_EIM_A16__GPIO2_IO22 0x130b0
+ /* ISB_nIN2 */
+ MX6QDL_PAD_EIM_A17__GPIO2_IO21 0x1b0b0
+ /* WARN_LIGHT */
+ MX6QDL_PAD_EIM_A19__GPIO2_IO19 0x100b0
+ /* ON2_FB */
+ MX6QDL_PAD_EIM_A25__GPIO5_IO02 0x100b0
+ /* YACO_nIRQ */
+ MX6QDL_PAD_EIM_D23__GPIO3_IO23 0x1b0b0
+ /* YACO_BOOT0 */
+ MX6QDL_PAD_EIM_D30__GPIO3_IO30 0x130b0
+ /* YACO_nRESET */
+ MX6QDL_PAD_EIM_D31__GPIO3_IO31 0x1b0b0
+ /* FORCE_ON1 */
+ MX6QDL_PAD_EIM_EB2__GPIO2_IO30 0x1b0b0
+ /* AUDIO_nRESET */
+ MX6QDL_PAD_CSI0_VSYNC__GPIO5_IO21 0x1f0b0
+ /* ITU656_nPDN */
+ MX6QDL_PAD_CSI0_DATA_EN__GPIO5_IO20 0x1b0b0
+
+ /* HW revision detect */
+ /* REV_ID0 */
+ MX6QDL_PAD_SD4_DAT0__GPIO2_IO08 0x1b0b0
+ /* REV_ID1 is shared with PWM3 */
+ /* REV_ID2 */
+ MX6QDL_PAD_SD4_DAT2__GPIO2_IO10 0x1b0b0
+ /* REV_ID3 */
+ MX6QDL_PAD_SD4_DAT3__GPIO2_IO11 0x1b0b0
+ /* REV_ID4 */
+ MX6QDL_PAD_SD4_DAT4__GPIO2_IO12 0x1b0b0
+
+ /* New in HW revision 1 */
+ /* ON1_FB */
+ MX6QDL_PAD_EIM_D20__GPIO3_IO20 0x100b0
+ /* DIP1_FB */
+ MX6QDL_PAD_DI0_PIN2__GPIO4_IO18 0x1b0b0
+ >;
+ };
+
+ pinctrl_i2c1: i2c1grp {
+ fsl,pins = <
+ MX6QDL_PAD_CSI0_DAT8__I2C1_SDA 0x4001f8b1
+ MX6QDL_PAD_CSI0_DAT9__I2C1_SCL 0x4001f8b1
+ >;
+ };
+
+ pinctrl_i2c3: i2c3grp {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_5__I2C3_SCL 0x4001b8b1
+ MX6QDL_PAD_GPIO_6__I2C3_SDA 0x4001b8b1
+ >;
+ };
+
+ pinctrl_ipu1_csi0: ipu1csi0grp {
+ fsl,pins = <
+ MX6QDL_PAD_CSI0_DAT12__IPU1_CSI0_DATA12 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT13__IPU1_CSI0_DATA13 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT14__IPU1_CSI0_DATA14 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT15__IPU1_CSI0_DATA15 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT16__IPU1_CSI0_DATA16 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT17__IPU1_CSI0_DATA17 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT18__IPU1_CSI0_DATA18 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT19__IPU1_CSI0_DATA19 0x1b0b0
+ MX6QDL_PAD_CSI0_PIXCLK__IPU1_CSI0_PIXCLK 0x1b0b0
+ >;
+ };
+
+ pinctrl_keypad: keypadgrp {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_19__GPIO4_IO05 0x1b0b0
+ >;
+ };
+
+ pinctrl_leds: ledsgrp {
+ fsl,pins = <
+ /* DEBUG0 */
+ MX6QDL_PAD_DI0_DISP_CLK__GPIO4_IO16 0x1b0b0
+ /* DEBUG1 */
+ MX6QDL_PAD_DI0_PIN15__GPIO4_IO17 0x1b0b0
+ /* POWER_LED */
+ MX6QDL_PAD_EIM_CS1__GPIO2_IO24 0x1b0b0
+ >;
+ };
+
+ pinctrl_pwm1: pwm1grp {
+ fsl,pins = <
+ MX6QDL_PAD_DISP0_DAT8__PWM1_OUT 0x1b0b0
+ >;
+ };
+
+ pinctrl_pwm3: pwm3grp {
+ fsl,pins = <
+ MX6QDL_PAD_SD4_DAT1__PWM3_OUT 0x1b0b0
+ >;
+ };
+
+ pinctrl_rotary_ch: rotarychgrp {
+ fsl,pins = <
+ MX6QDL_PAD_NANDF_D3__GPIO2_IO03 0x1b0b0
+ MX6QDL_PAD_NANDF_D4__GPIO2_IO04 0x1b0b0
+ >;
+ };
+
+ pinctrl_touchscreen: touchscreengrp {
+ fsl,pins = <
+ MX6QDL_PAD_DISP0_DAT14__GPIO5_IO08 0x1b0b0
+ MX6QDL_PAD_DISP0_DAT15__GPIO5_IO09 0x1b0b0
+ >;
+ };
+
+ /* YaCO AUX Uart */
+ pinctrl_uart1: uart1grp {
+ fsl,pins = <
+ MX6QDL_PAD_CSI0_DAT10__UART1_TX_DATA 0x1b0b1
+ MX6QDL_PAD_CSI0_DAT11__UART1_RX_DATA 0x1b0b1
+ >;
+ };
+
+ pinctrl_uart2: uart2grp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D26__UART2_TX_DATA 0x1b0b1
+ MX6QDL_PAD_EIM_D27__UART2_RX_DATA 0x1b0b1
+ >;
+ };
+
+ /* YaCO Touchscreen UART */
+ pinctrl_uart3: uart3grp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D24__UART3_TX_DATA 0x1b0b1
+ MX6QDL_PAD_EIM_D25__UART3_RX_DATA 0x1b0b1
+ >;
+ };
+
+ pinctrl_uart4: uart4grp {
+ fsl,pins = <
+ MX6QDL_PAD_KEY_COL0__UART4_TX_DATA 0x1b0b1
+ MX6QDL_PAD_KEY_ROW0__UART4_RX_DATA 0x1b0b1
+ >;
+ };
+
+ pinctrl_uart5: uart5grp {
+ fsl,pins = <
+ MX6QDL_PAD_KEY_COL1__UART5_TX_DATA 0x1b0b1
+ MX6QDL_PAD_KEY_ROW1__UART5_RX_DATA 0x1b0b1
+ >;
+ };
+
+ pinctrl_usbotg: usbotggrp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D21__USB_OTG_OC 0x1b0b0
+ /* power enable, high active */
+ MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x1b0b0
+ >;
+ };
+
+ pinctrl_usdhc1: usdhc1grp {
+ fsl,pins = <
+ MX6QDL_PAD_SD1_CMD__SD1_CMD 0x170f9
+ MX6QDL_PAD_SD1_CLK__SD1_CLK 0x100f9
+ MX6QDL_PAD_SD1_DAT0__SD1_DATA0 0x170f9
+ MX6QDL_PAD_SD1_DAT1__SD1_DATA1 0x170f9
+ MX6QDL_PAD_SD1_DAT2__SD1_DATA2 0x170f9
+ MX6QDL_PAD_SD1_DAT3__SD1_DATA3 0x170f9
+ MX6QDL_PAD_GPIO_1__GPIO1_IO01 0x1b0b0
+ >;
+ };
+
+ pinctrl_usdhc3: usdhc3grp {
+ fsl,pins = <
+ MX6QDL_PAD_SD3_CMD__SD3_CMD 0x17099
+ MX6QDL_PAD_SD3_CLK__SD3_CLK 0x10099
+ MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x17099
+ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17099
+ MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17099
+ MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17099
+ MX6QDL_PAD_SD3_DAT4__SD3_DATA4 0x17099
+ MX6QDL_PAD_SD3_DAT5__SD3_DATA5 0x17099
+ MX6QDL_PAD_SD3_DAT6__SD3_DATA6 0x17099
+ MX6QDL_PAD_SD3_DAT7__SD3_DATA7 0x17099
+ MX6QDL_PAD_SD3_RST__SD3_RESET 0x1b0b1
+ >;
+ };
+};
diff --git a/arch/arm/boot/dts/imx6dl-vicut1.dts b/arch/arm/boot/dts/imx6dl-vicut1.dts
new file mode 100644
index 000000000000..174fd913bf96
--- /dev/null
+++ b/arch/arm/boot/dts/imx6dl-vicut1.dts
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+/*
+ * Copyright (c) 2014 Protonic Holland
+ */
+
+/dts-v1/;
+#include "imx6dl.dtsi"
+#include "imx6qdl-vicut1.dtsi"
+
+/ {
+ model = "Kverneland UT1 Board";
+ compatible = "kvg,vicut1", "fsl,imx6dl";
+};
diff --git a/arch/arm/boot/dts/imx6q-tbs2910.dts b/arch/arm/boot/dts/imx6q-tbs2910.dts
index 861e05d53157..343364d3e4f7 100644
--- a/arch/arm/boot/dts/imx6q-tbs2910.dts
+++ b/arch/arm/boot/dts/imx6q-tbs2910.dts
@@ -16,6 +16,13 @@
stdout-path = &uart1;
};
+ aliases {
+ mmc0 = &usdhc2;
+ mmc1 = &usdhc3;
+ mmc2 = &usdhc4;
+ /delete-property/ mmc3;
+ };
+
memory@10000000 {
device_type = "memory";
reg = <0x10000000 0x80000000>;
diff --git a/arch/arm/boot/dts/imx6q-vicut1.dts b/arch/arm/boot/dts/imx6q-vicut1.dts
new file mode 100644
index 000000000000..0a4e251be162
--- /dev/null
+++ b/arch/arm/boot/dts/imx6q-vicut1.dts
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+/*
+ * Copyright (c) 2014 Protonic Holland
+ */
+
+/dts-v1/;
+#include "imx6q.dtsi"
+#include "imx6qdl-vicut1.dtsi"
+
+/ {
+ model = "Kverneland UT1Q Board";
+ compatible = "kvg,vicut1q", "fsl,imx6q";
+};
+
+&sata {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx6q.dtsi b/arch/arm/boot/dts/imx6q.dtsi
index 5277e3903291..8d209c1b3ca7 100644
--- a/arch/arm/boot/dts/imx6q.dtsi
+++ b/arch/arm/boot/dts/imx6q.dtsi
@@ -406,19 +406,21 @@
&hdmi {
compatible = "fsl,imx6q-hdmi";
- port@2 {
- reg = <2>;
+ ports {
+ port@2 {
+ reg = <2>;
- hdmi_mux_2: endpoint {
- remote-endpoint = <&ipu2_di0_hdmi>;
+ hdmi_mux_2: endpoint {
+ remote-endpoint = <&ipu2_di0_hdmi>;
+ };
};
- };
- port@3 {
- reg = <3>;
+ port@3 {
+ reg = <3>;
- hdmi_mux_3: endpoint {
- remote-endpoint = <&ipu2_di1_hdmi>;
+ hdmi_mux_3: endpoint {
+ remote-endpoint = <&ipu2_di1_hdmi>;
+ };
};
};
};
diff --git a/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
index 736074f1c3ef..959d8ac2e393 100644
--- a/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
@@ -418,7 +418,7 @@
/* VDD_AUD_1P8: Audio codec */
reg_aud_1p8v: ldo3 {
- regulator-name = "vdd1p8";
+ regulator-name = "vdd1p8a";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
regulator-boot-on;
diff --git a/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi b/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi
index d6df598bd1c2..b167b33bd108 100644
--- a/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi
@@ -137,7 +137,7 @@
lcd_backlight: lcd-backlight {
compatible = "pwm-backlight";
- pwms = <&pwm4 0 5000000>;
+ pwms = <&pwm4 0 5000000 0>;
pwm-names = "LCD_BKLT_PWM";
brightness-levels = <0 10 20 30 40 50 60 70 80 90 100>;
@@ -167,7 +167,7 @@
i2c-gpio,delay-us = <2>; /* ~100 kHz */
#address-cells = <1>;
#size-cells = <0>;
- status = "disabld";
+ status = "disabled";
};
i2c_cam: i2c-gpio-cam {
@@ -179,7 +179,7 @@
i2c-gpio,delay-us = <2>; /* ~100 kHz */
#address-cells = <1>;
#size-cells = <0>;
- status = "disabld";
+ status = "disabled";
};
};
diff --git a/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi b/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
index afe477f32984..5e58740d40c5 100644
--- a/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
@@ -298,6 +298,7 @@
interrupts-extended = <&gpio1 6 IRQ_TYPE_LEVEL_HIGH>,
<&intc 0 119 IRQ_TYPE_LEVEL_HIGH>;
fsl,err006687-workaround-present;
+ fsl,magic-packet;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx6qdl-sr-som.dtsi b/arch/arm/boot/dts/imx6qdl-sr-som.dtsi
index b06577808ff4..0ad8ccde0cf8 100644
--- a/arch/arm/boot/dts/imx6qdl-sr-som.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-sr-som.dtsi
@@ -53,7 +53,6 @@
&fec {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_microsom_enet_ar8035>;
- phy-handle = <&phy>;
phy-mode = "rgmii-id";
phy-reset-duration = <2>;
phy-reset-gpios = <&gpio4 15 GPIO_ACTIVE_LOW>;
@@ -63,9 +62,20 @@
#address-cells = <1>;
#size-cells = <0>;
- phy: ethernet-phy@0 {
+ /*
+ * The PHY can appear at either address 0 or 4 due to the
+ * configuration (LED) pin not being pulled sufficiently.
+ */
+ ethernet-phy@0 {
reg = <0>;
qca,clk-out-frequency = <125000000>;
+ qca,smarteee-tw-us-1g = <24>;
+ };
+
+ ethernet-phy@4 {
+ reg = <4>;
+ qca,clk-out-frequency = <125000000>;
+ qca,smarteee-tw-us-1g = <24>;
};
};
};
diff --git a/arch/arm/boot/dts/imx6qdl-vicut1.dtsi b/arch/arm/boot/dts/imx6qdl-vicut1.dtsi
new file mode 100644
index 000000000000..eb25d21a2ace
--- /dev/null
+++ b/arch/arm/boot/dts/imx6qdl-vicut1.dtsi
@@ -0,0 +1,803 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+/*
+ * Copyright (c) 2014 Protonic Holland
+ * Copyright (c) 2020 Oleksij Rempel <kernel@pengutronix.de>, Pengutronix
+ */
+
+#include <dt-bindings/display/sdtv-standards.h>
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/media/tvp5150.h>
+#include <dt-bindings/sound/fsl-imx-audmux.h>
+
+/ {
+ chosen {
+ stdout-path = &uart4;
+ };
+
+ backlight: backlight {
+ compatible = "pwm-backlight";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_backlight>;
+ pwms = <&pwm1 0 5000000 0>;
+ brightness-levels = <0 16 64 255>;
+ num-interpolated-steps = <16>;
+ default-brightness-level = <1>;
+ power-supply = <&reg_3v3>;
+ enable-gpios = <&gpio4 28 GPIO_ACTIVE_HIGH>;
+ };
+
+ connector {
+ compatible = "composite-video-connector";
+ label = "Composite0";
+ sdtv-standards = <SDTV_STD_PAL_B>;
+
+ port {
+ comp0_out: endpoint {
+ remote-endpoint = <&tvp5150_comp0_in>;
+ };
+ };
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ autorepeat;
+
+ power {
+ label = "Power Button";
+ gpios = <&gpio2 23 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_POWER>;
+ wakeup-source;
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_leds>;
+
+ led-0 {
+ label = "LED_DI0_DEBUG_0";
+ function = LED_FUNCTION_HEARTBEAT;
+ gpios = <&gpio4 16 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ };
+
+ led-1 {
+ label = "LED_DI0_DEBUG_1";
+ function = LED_FUNCTION_DISK;
+ gpios = <&gpio4 17 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "disk-activity";
+ };
+
+ led-2 {
+ label = "POWER_LED";
+ function = LED_FUNCTION_POWER;
+ gpios = <&gpio2 24 GPIO_ACTIVE_HIGH>;
+ default-state = "on";
+ };
+ };
+
+ panel {
+ compatible = "kyo,tcg121xglp";
+ backlight = <&backlight>;
+ power-supply = <&reg_3v3>;
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&lvds0_out>;
+ };
+ };
+ };
+
+ reg_1v8: regulator-1v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "1v8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ reg_3v3: regulator-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ reg_h1_vbus: regulator-h1-vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "h1-vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio1 0 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ reg_otg_vbus: regulator-otg-vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "otg-vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio3 22 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ reg_wifi: regulator-wifi {
+ compatible = "regulator-fixed";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_wifi_npd>;
+ regulator-name = "wifi";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ gpio = <&gpio1 26 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ startup-delay-us = <70000>;
+ };
+
+ sound {
+ compatible = "simple-audio-card";
+ simple-audio-card,name = "prti6q-sgtl5000";
+ simple-audio-card,format = "i2s";
+ simple-audio-card,widgets =
+ "Microphone", "Microphone Jack",
+ "Line", "Line In Jack",
+ "Headphone", "Headphone Jack",
+ "Speaker", "External Speaker";
+ simple-audio-card,routing =
+ "MIC_IN", "Microphone Jack",
+ "LINE_IN", "Line In Jack",
+ "Headphone Jack", "HP_OUT",
+ "External Speaker", "LINE_OUT";
+
+ simple-audio-card,cpu {
+ sound-dai = <&ssi1>;
+ system-clock-frequency = <0>; /* Do NOT call fsl_ssi_set_dai_sysclk! */
+ };
+
+ simple-audio-card,codec {
+ sound-dai = <&codec>;
+ bitclock-master;
+ frame-master;
+ };
+ };
+};
+
+&audmux {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_audmux>;
+ status = "okay";
+
+ mux-ssi1 {
+ fsl,audmux-port = <0>;
+ fsl,port-config = <
+ IMX_AUDMUX_V2_PTCR_SYN 0
+ IMX_AUDMUX_V2_PTCR_TFSEL(2) 0
+ IMX_AUDMUX_V2_PTCR_TCSEL(2) 0
+ IMX_AUDMUX_V2_PTCR_TFSDIR 0
+ IMX_AUDMUX_V2_PTCR_TCLKDIR IMX_AUDMUX_V2_PDCR_RXDSEL(2)
+ >;
+ };
+
+ mux-pins3 {
+ fsl,audmux-port = <2>;
+ fsl,port-config = <
+ IMX_AUDMUX_V2_PTCR_SYN IMX_AUDMUX_V2_PDCR_RXDSEL(0)
+ 0 IMX_AUDMUX_V2_PDCR_TXRXEN
+ >;
+ };
+};
+
+&can1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_can1>;
+ status = "okay";
+};
+
+&can2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_can2>;
+ status = "okay";
+};
+
+&clks {
+ assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>;
+ assigned-clock-parents = <&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>;
+};
+
+&ecspi1 {
+ cs-gpios = <&gpio3 19 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ecspi1>;
+ status = "okay";
+
+ flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ spi-max-frequency = <20000000>;
+ };
+};
+
+&fec {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_enet>;
+ phy-mode = "rgmii-id";
+ phy-handle = <&rgmii_phy>;
+ status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* Microchip KSZ9031RNX PHY */
+ rgmii_phy: ethernet-phy@0 {
+ reg = <0>;
+ interrupts-extended = <&gpio1 28 IRQ_TYPE_LEVEL_LOW>;
+ reset-gpios = <&gpio1 25 GPIO_ACTIVE_LOW>;
+ reset-assert-us = <10000>;
+ reset-deassert-us = <300>;
+ };
+ };
+};
+
+&gpio1 {
+ gpio-line-names =
+ "CAN1_TERM", "SD1_CD", "ITU656_RESET", "CAM1_MIRROR",
+ "CAM2_MIRROR", "", "", "SMBALERT",
+ "DEBUG_0", "DEBUG_1", "SDIO_SCK", "SDIO_CMD", "SDIO_D3",
+ "SDIO_D2", "SDIO_D1", "SDIO_D0",
+ "SD1_DATA0", "SD1_DATA1", "SD1_CMD", "SD1_DATA2", "SD1_CLK",
+ "SD1_DATA3", "", "",
+ "", "ETH_RESET", "WIFI_PD", "WIFI_BT_RST", "ETH_INT", "",
+ "WL_IRQ", "ETH_MDC";
+};
+
+&gpio2 {
+ gpio-line-names =
+ "", "", "", "", "", "", "", "",
+ "REV_ID0", "REV_ID1", "REV_ID2", "REV_ID3", "REV_ID4",
+ "BOARD_ID0", "BOARD_ID1", "BOARD_ID2",
+ "", "", "", "", "", "", "", "ON_SWITCH",
+ "POWER_LED", "", "ECSPI2_SS0", "", "", "", "", "";
+};
+
+&gpio3 {
+ gpio-line-names =
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "",
+ "ECSPI1_SCLK", "ECSPI1_MISO", "ECSPI1_MOSI", "ECSPI1_SS1",
+ "CPU_ON1_FB", "USB_OTG_OC", "USB_OTG_PWR", "YACO_IRQ",
+ "", "", "", "", "", "", "", "";
+};
+
+&gpio4 {
+ gpio-line-names =
+ "", "", "", "", "", "", "UART4_TXD", "UART4_RXD",
+ "UART5_TXD", "UART5_RXD", "CAN1_TX", "CAN1_RX", "CAN1_SR",
+ "CAN2_SR", "CAN2_TX", "CAN2_RX",
+ "LED_DI0_DEBUG_0", "LED_DI0_DEBUG_1", "", "", "", "", "", "",
+ "", "", "", "", "BL_EN", "BL_PWM", "", "";
+};
+
+&gpio5 {
+ gpio-line-names =
+ "", "", "", "", "", "PCIE_WAKE", "PCIE_CLKREQ", "PCIE_W_DIS",
+ "PCIE_RESET", "", "", "", "", "", "", "",
+ "", "", "ITU656_CLK", "I2S_MCLK", "ITU656_PDN", "AUDIO_RESET",
+ "I2S_BITCLK", "I2S_DOUT",
+ "I2S_LRCLK", "I2S_DIN", "I2C1_SDA", "I2C1_SCL", "YACO_AUX_RX",
+ "YACO_AUX_TX", "ITU656_D0", "ITU656_D1";
+};
+
+&gpio6 {
+ gpio-line-names =
+ "ITU656_D2", "ITU656_D3", "ITU656_D4", "ITU656_D5",
+ "ITU656_D6", "ITU656_D7", "", "",
+ "", "", "", "", "", "", "", "",
+ "", "", "", "RGMII_TXC", "RGMII_TD0", "RGMII_TD1", "RGMII_TD2",
+ "RGMII_TD3",
+ "RGMII_RX_CTL", "RGMII_RD0", "RGMII_TX_CTL", "RGMII_RD1",
+ "RGMII_RD2", "RGMII_RD3", "", "";
+};
+
+&gpio7 {
+ gpio-line-names =
+ "EMMC_DAT5", "EMMC_DAT4", "EMMC_CMD", "EMMC_CLK", "EMMC_DAT0",
+ "EMMC_DAT1", "EMMC_DAT2", "EMMC_DAT3",
+ "EMMC_RST", "", "", "", "CAM_DETECT", "", "", "",
+ "", "EMMC_DAT7", "EMMC_DAT6", "", "", "", "", "",
+ "", "", "", "", "", "", "", "";
+};
+
+&i2c1 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c1>;
+ status = "okay";
+
+ codec: audio-codec@a {
+ compatible = "fsl,sgtl5000";
+ reg = <0xa>;
+ #sound-dai-cells = <0>;
+ clocks = <&clks 201>;
+ VDDA-supply = <&reg_3v3>;
+ VDDIO-supply = <&reg_3v3>;
+ VDDD-supply = <&reg_1v8>;
+ };
+
+ video-decoder@5c {
+ compatible = "ti,tvp5150";
+ reg = <0x5c>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ tvp5150_comp0_in: endpoint {
+ remote-endpoint = <&comp0_out>;
+ };
+ };
+
+ /* Output port 2 is video output pad */
+ port@2 {
+ reg = <2>;
+
+ tvp5151_to_ipu1_csi0_mux: endpoint {
+ remote-endpoint = <&ipu1_csi0_mux_from_parallel_sensor>;
+ };
+ };
+ };
+};
+
+&i2c3 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c3>;
+ status = "okay";
+
+ adc@49 {
+ compatible = "ti,ads1015";
+ reg = <0x49>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ channel@4 {
+ reg = <4>;
+ ti,gain = <3>;
+ ti,datarate = <3>;
+ };
+
+ channel@5 {
+ reg = <5>;
+ ti,gain = <3>;
+ ti,datarate = <3>;
+ };
+
+ channel@6 {
+ reg = <6>;
+ ti,gain = <3>;
+ ti,datarate = <3>;
+ };
+
+ channel@7 {
+ reg = <7>;
+ ti,gain = <3>;
+ ti,datarate = <3>;
+ };
+ };
+
+ rtc@51 {
+ compatible = "nxp,pcf8563";
+ reg = <0x51>;
+ };
+
+ temperature-sensor@70 {
+ compatible = "ti,tmp103";
+ reg = <0x70>;
+ };
+};
+
+&ipu1_csi0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ipu1_csi0>;
+ status = "okay";
+};
+
+&ipu1_csi0_mux_from_parallel_sensor {
+ remote-endpoint = <&tvp5151_to_ipu1_csi0_mux>;
+};
+
+&ldb {
+ status = "okay";
+
+ lvds-channel@0 {
+ status = "okay";
+
+ port@4 {
+ reg = <4>;
+
+ lvds0_out: endpoint {
+ remote-endpoint = <&panel_in>;
+ };
+ };
+ };
+};
+
+&pcie {
+ status = "okay";
+};
+
+&pwm1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pwm1>;
+ status = "okay";
+};
+
+&ssi1 {
+ #sound-dai-cells = <0>;
+ fsl,mode = "ac97-slave";
+ status = "okay";
+};
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1>;
+ status = "okay";
+};
+
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart2>;
+ status = "okay";
+};
+
+&uart3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart3>;
+ status = "okay";
+};
+
+&uart4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart4>;
+ status = "okay";
+};
+
+&uart5 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart5>;
+ status = "okay";
+};
+
+&usbh1 {
+ vbus-supply = <&reg_h1_vbus>;
+ pinctrl-names = "default";
+ phy_type = "utmi";
+ dr_mode = "host";
+ status = "okay";
+};
+
+&usbotg {
+ vbus-supply = <&reg_otg_vbus>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usbotg>;
+ phy_type = "utmi";
+ dr_mode = "host";
+ disable-over-current;
+ status = "okay";
+};
+
+&usdhc1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc1>;
+ cd-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>;
+ no-1-8-v;
+ disable-wp;
+ cap-sd-highspeed;
+ no-mmc;
+ no-sdio;
+ status = "okay";
+};
+
+&usdhc2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc2>;
+ vmmc-supply = <&reg_wifi>;
+ non-removable;
+ cap-power-off-card;
+ keep-power-in-suspend;
+ no-1-8-v;
+ no-mmc;
+ no-sd;
+ status = "okay";
+
+ wifi {
+ compatible = "ti,wl1271";
+ interrupts-extended = <&gpio1 30 IRQ_TYPE_LEVEL_HIGH>;
+ ref-clock-frequency = "38400000";
+ tcxo-clock-frequency = "19200000";
+ };
+};
+
+&usdhc3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc3>;
+ bus-width = <8>;
+ no-1-8-v;
+ non-removable;
+ no-sd;
+ no-sdio;
+ status = "okay";
+};
+
+&iomuxc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hog>;
+
+ pinctrl_audmux: audmuxgrp {
+ fsl,pins = <
+ /* SGTL5000 sys_mclk */
+ MX6QDL_PAD_CSI0_MCLK__CCM_CLKO1 0x030b0
+ MX6QDL_PAD_CSI0_DAT7__AUD3_RXD 0x130b0
+ MX6QDL_PAD_CSI0_DAT4__AUD3_TXC 0x130b0
+ MX6QDL_PAD_CSI0_DAT5__AUD3_TXD 0x110b0
+ MX6QDL_PAD_CSI0_DAT6__AUD3_TXFS 0x130b0
+ >;
+ };
+
+ pinctrl_backlight: backlightgrp {
+ fsl,pins = <
+ MX6QDL_PAD_DISP0_DAT7__GPIO4_IO28 0x1b0b0
+ >;
+ };
+
+ pinctrl_can1: can1grp {
+ fsl,pins = <
+ MX6QDL_PAD_KEY_ROW2__FLEXCAN1_RX 0x1b000
+ MX6QDL_PAD_KEY_COL2__FLEXCAN1_TX 0x3008
+ /* CAN1_SR */
+ MX6QDL_PAD_KEY_COL3__GPIO4_IO12 0x13008
+ /* CAN1_TERM */
+ MX6QDL_PAD_GPIO_0__GPIO1_IO00 0x1b088
+ >;
+ };
+
+ pinctrl_can2: can2grp {
+ fsl,pins = <
+ MX6QDL_PAD_KEY_ROW4__FLEXCAN2_RX 0x1b000
+ MX6QDL_PAD_KEY_COL4__FLEXCAN2_TX 0x3008
+ /* CAN2_SR */
+ MX6QDL_PAD_KEY_ROW3__GPIO4_IO13 0x13008
+ >;
+ };
+
+ pinctrl_ecspi1: ecspi1grp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D17__ECSPI1_MISO 0x100b1
+ MX6QDL_PAD_EIM_D18__ECSPI1_MOSI 0x100b1
+ MX6QDL_PAD_EIM_D16__ECSPI1_SCLK 0x100b1
+ /* CS */
+ MX6QDL_PAD_EIM_D19__GPIO3_IO19 0x000b1
+ >;
+ };
+
+ pinctrl_enet: enetgrp {
+ fsl,pins = <
+ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b030
+ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b030
+ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b030
+ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b030
+ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b030
+ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b030
+ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x10030
+ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x10030
+ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x10030
+ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x10030
+ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x10030
+ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x10030
+ MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x10030
+ MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x10030
+ MX6QDL_PAD_ENET_MDC__ENET_MDC 0x10030
+ /* Phy reset */
+ MX6QDL_PAD_ENET_CRS_DV__GPIO1_IO25 0x1b0b0
+ MX6QDL_PAD_ENET_TX_EN__GPIO1_IO28 0x1b0b1
+ >;
+ };
+
+ pinctrl_hog: hoggrp {
+ fsl,pins = <
+ /* ITU656_nRESET */
+ MX6QDL_PAD_GPIO_2__GPIO1_IO02 0x1b0b0
+ /* CAM1_MIRROR */
+ MX6QDL_PAD_GPIO_3__GPIO1_IO03 0x130b0
+ /* CAM2_MIRROR */
+ MX6QDL_PAD_GPIO_4__GPIO1_IO04 0x130b0
+ /* CAM_nDETECT */
+ MX6QDL_PAD_GPIO_17__GPIO7_IO12 0x1b0b0
+ /* nON_SWITCH */
+ MX6QDL_PAD_EIM_CS0__GPIO2_IO23 0x1b0b0
+ /* ISB_IN1 */
+ MX6QDL_PAD_EIM_A16__GPIO2_IO22 0x130b0
+ /* ISB_nIN2 */
+ MX6QDL_PAD_EIM_A17__GPIO2_IO21 0x1b0b0
+ /* WARN_LIGHT */
+ MX6QDL_PAD_EIM_A19__GPIO2_IO19 0x100b0
+ /* ON2_FB */
+ MX6QDL_PAD_EIM_A25__GPIO5_IO02 0x100b0
+ /* YACO_nIRQ */
+ MX6QDL_PAD_EIM_D23__GPIO3_IO23 0x1b0b0
+ /* YACO_BOOT0 */
+ MX6QDL_PAD_EIM_D30__GPIO3_IO30 0x130b0
+ /* YACO_nRESET */
+ MX6QDL_PAD_EIM_D31__GPIO3_IO31 0x1b0b0
+ /* FORCE_ON1 */
+ MX6QDL_PAD_EIM_EB2__GPIO2_IO30 0x1b0b0
+ /* AUDIO_nRESET */
+ MX6QDL_PAD_CSI0_VSYNC__GPIO5_IO21 0x1f0b0
+ /* ITU656_nPDN */
+ MX6QDL_PAD_CSI0_DATA_EN__GPIO5_IO20 0x1b0b0
+
+ /* HW revision detect */
+ /* REV_ID0 */
+ MX6QDL_PAD_SD4_DAT0__GPIO2_IO08 0x1b0b0
+ /* REV_ID1 */
+ MX6QDL_PAD_SD4_DAT1__GPIO2_IO09 0x1b0b0
+ /* REV_ID2 */
+ MX6QDL_PAD_SD4_DAT2__GPIO2_IO10 0x1b0b0
+ /* REV_ID3 */
+ MX6QDL_PAD_SD4_DAT3__GPIO2_IO11 0x1b0b0
+ /* REV_ID4 */
+ MX6QDL_PAD_SD4_DAT4__GPIO2_IO12 0x1b0b0
+
+ /* New in HW revision 1 */
+ /* ON1_FB */
+ MX6QDL_PAD_EIM_D20__GPIO3_IO20 0x100b0
+ /* DIP1_FB */
+ MX6QDL_PAD_DI0_PIN2__GPIO4_IO18 0x1b0b0
+
+ /* New in UT2: FIXME: ISB PWM should start off, PD */
+ /* ISB_LED_PWM */
+ MX6QDL_PAD_DISP0_DAT9__GPIO4_IO30 0x130b0
+ >;
+ };
+
+ pinctrl_i2c1: i2c1grp {
+ fsl,pins = <
+ MX6QDL_PAD_CSI0_DAT8__I2C1_SDA 0x4001f8b1
+ MX6QDL_PAD_CSI0_DAT9__I2C1_SCL 0x4001f8b1
+ >;
+ };
+
+ pinctrl_i2c3: i2c3grp {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_5__I2C3_SCL 0x4001b8b1
+ MX6QDL_PAD_GPIO_6__I2C3_SDA 0x4001b8b1
+ >;
+ };
+
+ pinctrl_ipu1_csi0: ipu1csi0grp {
+ fsl,pins = <
+ MX6QDL_PAD_CSI0_DAT12__IPU1_CSI0_DATA12 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT13__IPU1_CSI0_DATA13 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT14__IPU1_CSI0_DATA14 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT15__IPU1_CSI0_DATA15 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT16__IPU1_CSI0_DATA16 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT17__IPU1_CSI0_DATA17 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT18__IPU1_CSI0_DATA18 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT19__IPU1_CSI0_DATA19 0x1b0b0
+ MX6QDL_PAD_CSI0_PIXCLK__IPU1_CSI0_PIXCLK 0x1b0b0
+ >;
+ };
+
+ pinctrl_leds: ledsgrp {
+ fsl,pins = <
+ /* DEBUG0 */
+ MX6QDL_PAD_DI0_DISP_CLK__GPIO4_IO16 0x1b0b0
+ /* DEBUG1 */
+ MX6QDL_PAD_DI0_PIN15__GPIO4_IO17 0x1b0b0
+ /* POWER_LED */
+ MX6QDL_PAD_EIM_CS1__GPIO2_IO24 0x1b0b0
+ >;
+ };
+
+ pinctrl_pwm1: pwm1grp {
+ fsl,pins = <
+ MX6QDL_PAD_DISP0_DAT8__PWM1_OUT 0x1b0b0
+ >;
+ };
+
+ /* YaCO AUX Uart */
+ pinctrl_uart1: uart1grp {
+ fsl,pins = <
+ MX6QDL_PAD_CSI0_DAT10__UART1_TX_DATA 0x1b0b1
+ MX6QDL_PAD_CSI0_DAT11__UART1_RX_DATA 0x1b0b1
+ >;
+ };
+
+ pinctrl_uart2: uart2grp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D26__UART2_RX_DATA 0x1b0b1
+ MX6QDL_PAD_EIM_D27__UART2_TX_DATA 0x1b0b1
+ MX6QDL_PAD_EIM_D28__UART2_DTE_CTS_B 0x1b0b1
+ MX6QDL_PAD_EIM_D29__UART2_DTE_RTS_B 0x1b0b1
+ >;
+ };
+
+ /* YaCO Touchscreen UART */
+ pinctrl_uart3: uart3grp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D24__UART3_TX_DATA 0x1b0b1
+ MX6QDL_PAD_EIM_D25__UART3_RX_DATA 0x1b0b1
+ >;
+ };
+
+ pinctrl_uart4: uart4grp {
+ fsl,pins = <
+ MX6QDL_PAD_KEY_COL0__UART4_TX_DATA 0x1b0b1
+ MX6QDL_PAD_KEY_ROW0__UART4_RX_DATA 0x1b0b1
+ >;
+ };
+
+ pinctrl_uart5: uart5grp {
+ fsl,pins = <
+ MX6QDL_PAD_KEY_COL1__UART5_TX_DATA 0x1b0b1
+ MX6QDL_PAD_KEY_ROW1__UART5_RX_DATA 0x1b0b1
+ >;
+ };
+
+ pinctrl_usbotg: usbotggrp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D21__USB_OTG_OC 0x1b0b0
+ /* power enable, high active */
+ MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x1b0b0
+ >;
+ };
+
+ pinctrl_usdhc1: usdhc1grp {
+ fsl,pins = <
+ MX6QDL_PAD_SD1_CMD__SD1_CMD 0x170f9
+ MX6QDL_PAD_SD1_CLK__SD1_CLK 0x100f9
+ MX6QDL_PAD_SD1_DAT0__SD1_DATA0 0x170f9
+ MX6QDL_PAD_SD1_DAT1__SD1_DATA1 0x170f9
+ MX6QDL_PAD_SD1_DAT2__SD1_DATA2 0x170f9
+ MX6QDL_PAD_SD1_DAT3__SD1_DATA3 0x170f9
+ MX6QDL_PAD_GPIO_1__GPIO1_IO01 0x1b0b0
+ >;
+ };
+
+ pinctrl_usdhc2: usdhc2grp {
+ fsl,pins = <
+ MX6QDL_PAD_SD2_CMD__SD2_CMD 0x170b9
+ MX6QDL_PAD_SD2_CLK__SD2_CLK 0x100b9
+ MX6QDL_PAD_SD2_DAT0__SD2_DATA0 0x170b9
+ MX6QDL_PAD_SD2_DAT1__SD2_DATA1 0x170b9
+ MX6QDL_PAD_SD2_DAT2__SD2_DATA2 0x170b9
+ MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x170b9
+ /* WL12xx IRQ */
+ MX6QDL_PAD_ENET_TXD0__GPIO1_IO30 0x10880
+ >;
+ };
+
+ pinctrl_usdhc3: usdhc3grp {
+ fsl,pins = <
+ MX6QDL_PAD_SD3_CMD__SD3_CMD 0x17099
+ MX6QDL_PAD_SD3_CLK__SD3_CLK 0x10099
+ MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x17099
+ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17099
+ MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17099
+ MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17099
+ MX6QDL_PAD_SD3_DAT4__SD3_DATA4 0x17099
+ MX6QDL_PAD_SD3_DAT5__SD3_DATA5 0x17099
+ MX6QDL_PAD_SD3_DAT6__SD3_DATA6 0x17099
+ MX6QDL_PAD_SD3_DAT7__SD3_DATA7 0x17099
+ MX6QDL_PAD_SD3_RST__SD3_RESET 0x1b0b1
+ >;
+ };
+
+ pinctrl_wifi_npd: wifinpdgrp {
+ fsl,pins = <
+ MX6QDL_PAD_ENET_RXD1__GPIO1_IO26 0x1b8b0
+ >;
+ };
+};
diff --git a/arch/arm/boot/dts/imx6qdl-zii-rdu2.dtsi b/arch/arm/boot/dts/imx6qdl-zii-rdu2.dtsi
index c0a76202e16b..525ff62b47f5 100644
--- a/arch/arm/boot/dts/imx6qdl-zii-rdu2.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-zii-rdu2.dtsi
@@ -112,17 +112,17 @@
sound1 {
compatible = "simple-audio-card";
- simple-audio-card,name = "Front";
+ simple-audio-card,name = "front";
simple-audio-card,format = "i2s";
simple-audio-card,bitclock-master = <&sound1_codec>;
simple-audio-card,frame-master = <&sound1_codec>;
simple-audio-card,widgets =
"Headphone", "Headphone Jack";
simple-audio-card,routing =
- "Headphone Jack", "HPLEFT",
- "Headphone Jack", "HPRIGHT",
- "LEFTIN", "HPL",
- "RIGHTIN", "HPR";
+ "Headphone Jack", "HPA1 HPLEFT",
+ "Headphone Jack", "HPA1 HPRIGHT",
+ "HPA1 LEFTIN", "HPL",
+ "HPA1 RIGHTIN", "HPR";
simple-audio-card,aux-devs = <&hpa1>;
sound1_cpu: simple-audio-card,cpu {
@@ -137,17 +137,17 @@
sound2 {
compatible = "simple-audio-card";
- simple-audio-card,name = "Back";
+ simple-audio-card,name = "periph";
simple-audio-card,format = "i2s";
simple-audio-card,bitclock-master = <&sound2_codec>;
simple-audio-card,frame-master = <&sound2_codec>;
simple-audio-card,widgets =
"Headphone", "Headphone Jack";
simple-audio-card,routing =
- "Headphone Jack", "HPLEFT",
- "Headphone Jack", "HPRIGHT",
- "LEFTIN", "HPL",
- "RIGHTIN", "HPR";
+ "Headphone Jack", "HPA1 HPLEFT",
+ "Headphone Jack", "HPA1 HPRIGHT",
+ "HPA1 LEFTIN", "HPL",
+ "HPA1 RIGHTIN", "HPR";
simple-audio-card,aux-devs = <&hpa2>;
sound2_cpu: simple-audio-card,cpu {
@@ -399,6 +399,7 @@
reg = <0x60>;
power-gpio = <&gpio1 5 GPIO_ACTIVE_HIGH>;
Vdd-supply = <&reg_5p0v_main>;
+ sound-name-prefix = "HPA1";
};
edp-bridge@68 {
@@ -598,6 +599,8 @@
touchscreen-inverted-x;
touchscreen-swapped-x-y;
syna,sensor-type = <1>;
+ syna,delta-x-threshold = <5>;
+ syna,delta-y-threshold = <10>;
};
rmi4-f12@12 {
@@ -626,7 +629,7 @@
pinctrl-0 = <&pinctrl_ucs1002_pins>;
reg = <0x32>;
interrupts-extended = <&gpio5 2 IRQ_TYPE_EDGE_BOTH>,
- <&gpio3 21 IRQ_TYPE_EDGE_BOTH>;
+ <&gpio3 21 IRQ_TYPE_EDGE_FALLING>;
interrupt-names = "a_det", "alert";
};
@@ -637,6 +640,7 @@
reg = <0x60>;
power-gpio = <&gpio1 4 GPIO_ACTIVE_HIGH>;
Vdd-supply = <&reg_5p0v_main>;
+ sound-name-prefix = "HPA1";
};
};
@@ -885,10 +889,6 @@
};
};
-&wdog1 {
- status = "disabled";
-};
-
&iomuxc {
pinctrl_accel: accelgrp {
fsl,pins = <
@@ -988,22 +988,22 @@
pinctrl_i2c1: i2c1grp {
fsl,pins = <
- MX6QDL_PAD_CSI0_DAT8__I2C1_SDA 0x4001b8b1
- MX6QDL_PAD_CSI0_DAT9__I2C1_SCL 0x4001b8b1
+ MX6QDL_PAD_CSI0_DAT8__I2C1_SDA 0x4001b811
+ MX6QDL_PAD_CSI0_DAT9__I2C1_SCL 0x4001b811
>;
};
pinctrl_i2c2: i2c2grp {
fsl,pins = <
- MX6QDL_PAD_KEY_COL3__I2C2_SCL 0x4001b8b1
- MX6QDL_PAD_KEY_ROW3__I2C2_SDA 0x4001b8b1
+ MX6QDL_PAD_KEY_COL3__I2C2_SCL 0x4001b811
+ MX6QDL_PAD_KEY_ROW3__I2C2_SDA 0x4001b811
>;
};
pinctrl_i2c3: i2c3grp {
fsl,pins = <
- MX6QDL_PAD_GPIO_3__I2C3_SCL 0x4001b8b1
- MX6QDL_PAD_GPIO_6__I2C3_SDA 0x4001b8b1
+ MX6QDL_PAD_GPIO_3__I2C3_SCL 0x4001b811
+ MX6QDL_PAD_GPIO_6__I2C3_SDA 0x4001b811
>;
};
diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi
index 6f59a99cbe82..82e01ce026ea 100644
--- a/arch/arm/boot/dts/imx6qdl.dtsi
+++ b/arch/arm/boot/dts/imx6qdl.dtsi
@@ -182,8 +182,6 @@
};
hdmi: hdmi@120000 {
- #address-cells = <1>;
- #size-cells = <0>;
reg = <0x00120000 0x9000>;
interrupts = <0 115 0x04>;
gpr = <&gpr>;
@@ -192,19 +190,24 @@
clock-names = "iahb", "isfr";
status = "disabled";
- port@0 {
- reg = <0>;
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
- hdmi_mux_0: endpoint {
- remote-endpoint = <&ipu1_di0_hdmi>;
+ hdmi_mux_0: endpoint {
+ remote-endpoint = <&ipu1_di0_hdmi>;
+ };
};
- };
- port@1 {
- reg = <1>;
+ port@1 {
+ reg = <1>;
- hdmi_mux_1: endpoint {
- remote-endpoint = <&ipu1_di1_hdmi>;
+ hdmi_mux_1: endpoint {
+ remote-endpoint = <&ipu1_di1_hdmi>;
+ };
};
};
};
diff --git a/arch/arm/boot/dts/imx6qp-vicutp.dts b/arch/arm/boot/dts/imx6qp-vicutp.dts
new file mode 100644
index 000000000000..7bad7ca6b12e
--- /dev/null
+++ b/arch/arm/boot/dts/imx6qp-vicutp.dts
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+/*
+ * Copyright (c) 2014 Protonic Holland
+ */
+
+/dts-v1/;
+#include "imx6qp.dtsi"
+#include "imx6qdl-vicut1.dtsi"
+
+/ {
+ model = "Kverneland UT1P Board";
+ compatible = "kvg,vicutp", "fsl,imx6qp";
+};
diff --git a/arch/arm/boot/dts/imx6sl-tolino-shine2hd.dts b/arch/arm/boot/dts/imx6sl-tolino-shine2hd.dts
index caa279608803..6ea5f918d059 100644
--- a/arch/arm/boot/dts/imx6sl-tolino-shine2hd.dts
+++ b/arch/arm/boot/dts/imx6sl-tolino-shine2hd.dts
@@ -340,7 +340,6 @@
MX6SL_PAD_KEY_ROW7__GPIO4_IO07 0x79
MX6SL_PAD_ECSPI2_MOSI__GPIO4_IO13 0x79
MX6SL_PAD_KEY_COL5__GPIO4_IO02 0x79
- MX6SL_PAD_KEY_ROW6__GPIO4_IO05 0x79
>;
};
@@ -396,7 +395,14 @@
pinctrl_uart1: uart1grp {
fsl,pins = <
MX6SL_PAD_UART1_TXD__UART1_TX_DATA 0x1b0b1
- MX6SL_PAD_UART1_RXD__UART1_TX_DATA 0x1b0b1
+ MX6SL_PAD_UART1_RXD__UART1_RX_DATA 0x1b0b1
+ >;
+ };
+
+ pinctrl_uart4: uart4grp {
+ fsl,pins = <
+ MX6SL_PAD_KEY_ROW6__UART4_TX_DATA 0x1b0b1
+ MX6SL_PAD_KEY_COL6__UART4_RX_DATA 0x1b0b1
>;
};
@@ -543,11 +549,19 @@
};
&uart1 {
+ /* J4, through-holes */
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart1>;
status = "okay";
};
+&uart4 {
+ /* TP198, next to J4, SMD pads */
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart4>;
+ status = "okay";
+};
+
&usdhc2 {
pinctrl-names = "default", "state_100mhz", "state_200mhz", "sleep";
pinctrl-0 = <&pinctrl_usdhc2>;
diff --git a/arch/arm/boot/dts/imx6sl-tolino-shine3.dts b/arch/arm/boot/dts/imx6sl-tolino-shine3.dts
index 27143ea0f0f1..e3f1e8d79528 100644
--- a/arch/arm/boot/dts/imx6sl-tolino-shine3.dts
+++ b/arch/arm/boot/dts/imx6sl-tolino-shine3.dts
@@ -94,7 +94,6 @@
MX6SL_PAD_KEY_ROW7__GPIO4_IO07 0x79
MX6SL_PAD_ECSPI2_MOSI__GPIO4_IO13 0x79
MX6SL_PAD_KEY_COL5__GPIO4_IO02 0x79
- MX6SL_PAD_KEY_ROW6__GPIO4_IO05 0x79
>;
};
@@ -156,7 +155,14 @@
pinctrl_uart1: uart1grp {
fsl,pins = <
MX6SL_PAD_UART1_TXD__UART1_TX_DATA 0x1b0b1
- MX6SL_PAD_UART1_RXD__UART1_TX_DATA 0x1b0b1
+ MX6SL_PAD_UART1_RXD__UART1_RX_DATA 0x1b0b1
+ >;
+ };
+
+ pinctrl_uart4: uart4grp {
+ fsl,pins = <
+ MX6SL_PAD_KEY_ROW6__UART4_TX_DATA 0x1b0b1
+ MX6SL_PAD_KEY_COL6__UART4_RX_DATA 0x1b0b1
>;
};
@@ -300,6 +306,11 @@
pinctrl-0 = <&pinctrl_uart1>;
};
+&uart4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart4>;
+};
+
&usdhc2 {
pinctrl-names = "default", "state_100mhz", "state_200mhz", "sleep";
pinctrl-0 = <&pinctrl_usdhc2>;
diff --git a/arch/arm/boot/dts/imx6sll-kobo-clarahd.dts b/arch/arm/boot/dts/imx6sll-kobo-clarahd.dts
index 7214d1c98249..90b32f5eb529 100644
--- a/arch/arm/boot/dts/imx6sll-kobo-clarahd.dts
+++ b/arch/arm/boot/dts/imx6sll-kobo-clarahd.dts
@@ -104,7 +104,6 @@
MX6SLL_PAD_KEY_ROW7__GPIO4_IO07 0x79
MX6SLL_PAD_ECSPI2_MOSI__GPIO4_IO13 0x79
MX6SLL_PAD_KEY_COL5__GPIO4_IO02 0x79
- MX6SLL_PAD_KEY_ROW6__GPIO4_IO05 0x79
>;
};
@@ -170,6 +169,13 @@
>;
};
+ pinctrl_uart4: uart4grp {
+ fsl,pins = <
+ MX6SLL_PAD_KEY_ROW6__UART4_DCE_TX 0x1b0b1
+ MX6SLL_PAD_KEY_COL6__UART4_DCE_RX 0x1b0b1
+ >;
+ };
+
pinctrl_usbotg1: usbotg1grp {
fsl,pins = <
MX6SLL_PAD_EPDC_PWR_COM__USB_OTG1_ID 0x17059
@@ -302,6 +308,11 @@
pinctrl-0 = <&pinctrl_uart1>;
};
+&uart4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart4>;
+};
+
&usdhc2 {
pinctrl-names = "default", "state_100mhz", "state_200mhz","sleep";
pinctrl-0 = <&pinctrl_usdhc2>;
diff --git a/arch/arm/boot/dts/imx6sx-sdb.dtsi b/arch/arm/boot/dts/imx6sx-sdb.dtsi
index 1351d7f70a54..c6e85e4a0883 100644
--- a/arch/arm/boot/dts/imx6sx-sdb.dtsi
+++ b/arch/arm/boot/dts/imx6sx-sdb.dtsi
@@ -206,6 +206,7 @@
phy-mode = "rgmii-id";
phy-handle = <&ethphy1>;
phy-reset-gpios = <&gpio2 7 GPIO_ACTIVE_LOW>;
+ fsl,magic-packet;
status = "okay";
mdio {
@@ -227,6 +228,7 @@
pinctrl-0 = <&pinctrl_enet2>;
phy-mode = "rgmii-id";
phy-handle = <&ethphy2>;
+ fsl,magic-packet;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi b/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi
index 64c2d1e9f7fc..c593597b2119 100644
--- a/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi
+++ b/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi
@@ -101,7 +101,7 @@
status = "okay";
gpio-sck = <&gpio5 11 0>;
gpio-mosi = <&gpio5 10 0>;
- cs-gpios = <&gpio5 7 0>;
+ cs-gpios = <&gpio5 7 GPIO_ACTIVE_LOW>;
num-chipselects = <1>;
#address-cells = <1>;
#size-cells = <0>;
@@ -113,6 +113,7 @@
reg = <0>;
registers-number = <1>;
spi-max-frequency = <100000>;
+ enable-gpios = <&gpio5 8 GPIO_ACTIVE_LOW>;
};
};
@@ -145,6 +146,41 @@
reg = <0x1a>;
wlf,shared-lrclk;
};
+
+ camera@3c {
+ compatible = "ovti,ov5640";
+ reg = <0x3c>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_camera_clock>;
+ clocks = <&clks IMX6UL_CLK_CSI>;
+ clock-names = "xclk";
+ powerdown-gpios = <&gpio_spi 6 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&gpio_spi 5 GPIO_ACTIVE_LOW>;
+
+ port {
+ ov5640_to_parallel: endpoint {
+ remote-endpoint = <&parallel_from_ov5640>;
+ bus-width = <8>;
+ data-shift = <2>; /* lines 9:2 are used */
+ hsync-active = <0>;
+ vsync-active = <0>;
+ pclk-sample = <1>;
+ };
+ };
+ };
+};
+
+&csi {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_csi1>;
+ status = "okay";
+
+ port {
+ parallel_from_ov5640: endpoint {
+ remote-endpoint = <&ov5640_to_parallel>;
+ bus-type = <5>; /* Parallel bus */
+ };
+ };
};
&fec1 {
@@ -169,17 +205,26 @@
#size-cells = <0>;
ethphy0: ethernet-phy@2 {
+ compatible = "ethernet-phy-id0022.1560";
reg = <2>;
micrel,led-mode = <1>;
clocks = <&clks IMX6UL_CLK_ENET_REF>;
clock-names = "rmii-ref";
+ reset-gpios = <&gpio_spi 1 GPIO_ACTIVE_LOW>;
+ reset-assert-us = <10000>;
+ reset-deassert-us = <100>;
+
};
ethphy1: ethernet-phy@1 {
+ compatible = "ethernet-phy-id0022.1560";
reg = <1>;
micrel,led-mode = <1>;
clocks = <&clks IMX6UL_CLK_ENET2_REF>;
clock-names = "rmii-ref";
+ reset-gpios = <&gpio_spi 2 GPIO_ACTIVE_LOW>;
+ reset-assert-us = <10000>;
+ reset-deassert-us = <100>;
};
};
};
@@ -343,9 +388,14 @@
&iomuxc {
pinctrl-names = "default";
- pinctrl_csi1: csi1grp {
+ pinctrl_camera_clock: cameraclockgrp {
fsl,pins = <
MX6UL_PAD_CSI_MCLK__CSI_MCLK 0x1b088
+ >;
+ };
+
+ pinctrl_csi1: csi1grp {
+ fsl,pins = <
MX6UL_PAD_CSI_PIXCLK__CSI_PIXCLK 0x1b088
MX6UL_PAD_CSI_VSYNC__CSI_VSYNC 0x1b088
MX6UL_PAD_CSI_HSYNC__CSI_HSYNC 0x1b088
diff --git a/arch/arm/boot/dts/imx6ul-prti6g.dts b/arch/arm/boot/dts/imx6ul-prti6g.dts
new file mode 100644
index 000000000000..d62015701d0a
--- /dev/null
+++ b/arch/arm/boot/dts/imx6ul-prti6g.dts
@@ -0,0 +1,356 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+/*
+ * Copyright (c) 2016 Protonic Holland
+ * Copyright (c) 2020 Oleksij Rempel <kernel@pengutronix.de>, Pengutronix
+ */
+
+/dts-v1/;
+#include "imx6ul.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+ model = "Protonic PRTI6G Board";
+ compatible = "prt,prti6g", "fsl,imx6ul";
+
+ chosen {
+ stdout-path = &uart1;
+ };
+
+ clock_ksz8081_in: clock-ksz8081-in {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <25000000>;
+ };
+
+ clock_ksz8081_out: clock-ksz8081-out {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <50000000>;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_leds>;
+
+ led-0 {
+ label = "debug0";
+ gpios = <&gpio4 16 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ };
+ };
+
+ reg_3v2: regulator-3v2 {
+ compatible = "regulator-fixed";
+ regulator-name = "3v2";
+ regulator-min-microvolt = <3200000>;
+ regulator-max-microvolt = <3200000>;
+ };
+};
+
+&can1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_can1>;
+ status = "okay";
+};
+
+&can2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_can2>;
+ status = "okay";
+};
+
+&ecspi1 {
+ cs-gpios = <&gpio4 26 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ecspi1>;
+ status = "okay";
+
+ flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ spi-max-frequency = <20000000>;
+ };
+};
+
+&ecspi2 {
+ cs-gpios = <&gpio4 22 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ecspi2>;
+ status = "okay";
+
+ spi@0 {
+ compatible = "spidev";
+ reg = <0>;
+ spi-max-frequency = <1000000>;
+ };
+};
+
+&fec1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_eth1>;
+ phy-mode = "rmii";
+ phy-handle = <&rmii_phy>;
+ clocks = <&clks IMX6UL_CLK_ENET>,
+ <&clks IMX6UL_CLK_ENET_AHB>,
+ <&clks IMX6UL_CLK_ENET_PTP>,
+ <&clock_ksz8081_out>;
+ clock-names = "ipg", "ahb", "ptp",
+ "enet_clk_ref";
+ status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* Microchip KSZ8081RNA PHY */
+ rmii_phy: ethernet-phy@0 {
+ reg = <0>;
+ interrupts-extended = <&gpio5 1 IRQ_TYPE_LEVEL_LOW>;
+ reset-gpios = <&gpio5 0 GPIO_ACTIVE_LOW>;
+ reset-assert-us = <10000>;
+ reset-deassert-us = <300>;
+ clocks = <&clock_ksz8081_in>;
+ clock-names = "rmii-ref";
+ };
+ };
+};
+
+&i2c1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c1>;
+ clock-frequency = <100000>;
+ status = "okay";
+
+ /* additional i2c devices are added automatically by the boot loader */
+};
+
+&i2c2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c2>;
+ clock-frequency = <100000>;
+ status = "okay";
+
+ adc@49 {
+ compatible = "ti,ads1015";
+ reg = <0x49>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ channel@4 {
+ reg = <4>;
+ ti,gain = <3>;
+ ti,datarate = <3>;
+ };
+
+ channel@5 {
+ reg = <5>;
+ ti,gain = <3>;
+ ti,datarate = <3>;
+ };
+
+ channel@6 {
+ reg = <6>;
+ ti,gain = <3>;
+ ti,datarate = <3>;
+ };
+
+ channel@7 {
+ reg = <7>;
+ ti,gain = <3>;
+ ti,datarate = <3>;
+ };
+ };
+
+ rtc@51 {
+ compatible = "nxp,pcf8563";
+ reg = <0x51>;
+ };
+
+ temperature-sensor@70 {
+ compatible = "ti,tmp103";
+ reg = <0x70>;
+ };
+};
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1>;
+ status = "okay";
+};
+
+&usbotg1 {
+ dr_mode = "host";
+ status = "okay";
+};
+
+&usdhc1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc1>;
+ cd-gpios = <&gpio4 12 GPIO_ACTIVE_LOW>;
+ vmmc-supply = <&reg_3v2>;
+ no-1-8-v;
+ disable-wp;
+ cap-sd-highspeed;
+ no-mmc;
+ no-sdio;
+ status = "okay";
+};
+
+&usdhc2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc2>;
+ bus-width = <8>;
+ no-1-8-v;
+ non-removable;
+ no-sd;
+ no-sdio;
+ status = "okay";
+};
+
+&iomuxc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hog>;
+
+ pinctrl_can1: can1grp {
+ fsl,pins = <
+ MX6UL_PAD_UART3_CTS_B__FLEXCAN1_TX 0x0b0b0
+ MX6UL_PAD_UART3_RTS_B__FLEXCAN1_RX 0x0b0b0
+ /* SR */
+ MX6UL_PAD_SNVS_TAMPER3__GPIO5_IO03 0x0b0b0
+ /* TERM */
+ MX6UL_PAD_SNVS_TAMPER4__GPIO5_IO04 0x0b0b0
+ /* nSMBALERT */
+ MX6UL_PAD_SNVS_TAMPER2__GPIO5_IO02 0x0b0b0
+ >;
+ };
+
+ pinctrl_can2: can2grp {
+ fsl,pins = <
+ MX6UL_PAD_UART2_CTS_B__FLEXCAN2_TX 0x0b0b0
+ MX6UL_PAD_UART2_RTS_B__FLEXCAN2_RX 0x0b0b0
+ /* SR */
+ MX6UL_PAD_SNVS_TAMPER5__GPIO5_IO05 0x0b0b0
+ >;
+ };
+
+ pinctrl_ecspi1: ecspi1grp {
+ fsl,pins = <
+ MX6UL_PAD_CSI_DATA04__ECSPI1_SCLK 0x0b0b0
+ MX6UL_PAD_CSI_DATA05__GPIO4_IO26 0x000b1
+ MX6UL_PAD_CSI_DATA06__ECSPI1_MOSI 0x0b0b0
+ MX6UL_PAD_CSI_DATA07__ECSPI1_MISO 0x0b0b0
+ >;
+ };
+
+ pinctrl_ecspi2: ecspi2grp {
+ fsl,pins = <
+ MX6UL_PAD_CSI_DATA00__ECSPI2_SCLK 0x0b0b0
+ MX6UL_PAD_CSI_DATA01__GPIO4_IO22 0x000b1
+ MX6UL_PAD_CSI_DATA02__ECSPI2_MOSI 0x0b0b0
+ MX6UL_PAD_CSI_DATA03__ECSPI2_MISO 0x0b0b0
+ >;
+ };
+
+ pinctrl_eth1: eth1grp {
+ fsl,pins = <
+ MX6UL_PAD_GPIO1_IO07__ENET1_MDC 0x1b0b0
+ MX6UL_PAD_GPIO1_IO06__ENET1_MDIO 0x100b0
+ MX6UL_PAD_ENET1_RX_DATA0__ENET1_RDATA00 0x1b0b0
+ MX6UL_PAD_ENET1_RX_DATA1__ENET1_RDATA01 0x1b0b0
+ MX6UL_PAD_ENET1_RX_EN__ENET1_RX_EN 0x100b0
+ MX6UL_PAD_ENET1_RX_ER__ENET1_RX_ER 0x1b0b0
+ MX6UL_PAD_ENET1_TX_EN__ENET1_TX_EN 0x1b0b0
+ MX6UL_PAD_ENET1_TX_DATA0__ENET1_TDATA00 0x1b0b0
+ MX6UL_PAD_ENET1_TX_DATA1__ENET1_TDATA01 0x1b0b0
+ MX6UL_PAD_ENET1_TX_CLK__ENET1_REF_CLK1 0x1b000
+ /* PHY ENET1_RST */
+ MX6UL_PAD_SNVS_TAMPER0__GPIO5_IO00 0x00880
+ /* PHY ENET1_IRQ */
+ MX6UL_PAD_SNVS_TAMPER1__GPIO5_IO01 0x00880
+ >;
+ };
+
+ pinctrl_hog: hoggrp {
+ fsl,pins = <
+ /* HW revision detect */
+ /* REV_ID0 */
+ MX6UL_PAD_ENET2_RX_DATA0__GPIO2_IO08 0x1b0b0
+ /* REV_ID1 */
+ MX6UL_PAD_ENET2_RX_DATA1__GPIO2_IO09 0x1b0b0
+ /* REV_ID2 */
+ MX6UL_PAD_ENET2_RX_EN__GPIO2_IO10 0x1b0b0
+ /* REV_ID3 */
+ MX6UL_PAD_ENET2_TX_DATA0__GPIO2_IO11 0x1b0b0
+ /* BOARD_ID0 */
+ MX6UL_PAD_ENET2_TX_EN__GPIO2_IO13 0x1b0b0
+ /* BOARD_ID1 */
+ MX6UL_PAD_ENET2_TX_CLK__GPIO2_IO14 0x1b0b0
+ /* BOARD_ID2 */
+ MX6UL_PAD_ENET2_RX_ER__GPIO2_IO15 0x1b0b0
+ /* BOARD_ID3 */
+ MX6UL_PAD_ENET2_TX_DATA1__GPIO2_IO12 0x1b0b0
+ /* Safety controller IO */
+ /* WAKE_SC */
+ MX6UL_PAD_SNVS_TAMPER6__GPIO5_IO06 0x1b0b0
+ /* PROGRAM_SC */
+ MX6UL_PAD_SNVS_TAMPER7__GPIO5_IO07 0x1b0b0
+ >;
+ };
+
+ pinctrl_i2c1: i2c1grp {
+ fsl,pins = <
+ MX6UL_PAD_CSI_MCLK__I2C1_SDA 0x4001b8b0
+ MX6UL_PAD_CSI_PIXCLK__I2C1_SCL 0x4001b8b0
+ >;
+ };
+
+ pinctrl_i2c2: i2c2grp {
+ fsl,pins = <
+ MX6UL_PAD_CSI_VSYNC__I2C2_SDA 0x4001b8b0
+ MX6UL_PAD_CSI_HSYNC__I2C2_SCL 0x4001b8b0
+ >;
+ };
+
+ pinctrl_leds: ledsgrp {
+ fsl,pins = <
+ MX6UL_PAD_NAND_DQS__GPIO4_IO16 0x1b0b0
+ >;
+ };
+
+ pinctrl_uart1: uart1grp {
+ fsl,pins = <
+ MX6UL_PAD_UART1_TX_DATA__UART1_DCE_TX 0x1b0b1
+ MX6UL_PAD_UART1_RX_DATA__UART1_DCE_RX 0x1b0b1
+ >;
+ };
+
+ pinctrl_usdhc1: usdhc1grp {
+ fsl,pins = <
+ MX6UL_PAD_SD1_CMD__USDHC1_CMD 0x070b1
+ MX6UL_PAD_SD1_CLK__USDHC1_CLK 0x07099
+ MX6UL_PAD_SD1_DATA0__USDHC1_DATA0 0x070b1
+ MX6UL_PAD_SD1_DATA1__USDHC1_DATA1 0x070b1
+ MX6UL_PAD_SD1_DATA2__USDHC1_DATA2 0x070b1
+ MX6UL_PAD_SD1_DATA3__USDHC1_DATA3 0x070b1
+ /* SD1 CD */
+ MX6UL_PAD_NAND_READY_B__GPIO4_IO12 0x170b0
+ >;
+ };
+
+ pinctrl_usdhc2: usdhc2grp {
+ fsl,pins = <
+ MX6UL_PAD_NAND_WE_B__USDHC2_CMD 0x170f9
+ MX6UL_PAD_NAND_RE_B__USDHC2_CLK 0x100f9
+ MX6UL_PAD_NAND_DATA00__USDHC2_DATA0 0x170f9
+ MX6UL_PAD_NAND_DATA01__USDHC2_DATA1 0x170f9
+ MX6UL_PAD_NAND_DATA02__USDHC2_DATA2 0x170f9
+ MX6UL_PAD_NAND_DATA03__USDHC2_DATA3 0x170f9
+ MX6UL_PAD_NAND_DATA04__USDHC2_DATA4 0x170f9
+ MX6UL_PAD_NAND_DATA05__USDHC2_DATA5 0x170f9
+ MX6UL_PAD_NAND_DATA06__USDHC2_DATA6 0x170f9
+ MX6UL_PAD_NAND_DATA07__USDHC2_DATA7 0x170f9
+ MX6UL_PAD_NAND_ALE__USDHC2_RESET_B 0x170b0
+ >;
+ };
+};
diff --git a/arch/arm/boot/dts/imx6ul.dtsi b/arch/arm/boot/dts/imx6ul.dtsi
index 9d3411cc597b..afeec01f6522 100644
--- a/arch/arm/boot/dts/imx6ul.dtsi
+++ b/arch/arm/boot/dts/imx6ul.dtsi
@@ -538,6 +538,7 @@
fsl,num-tx-queues = <1>;
fsl,num-rx-queues = <1>;
fsl,stop-mode = <&gpr 0x10 4>;
+ fsl,magic-packet;
status = "disabled";
};
@@ -885,6 +886,7 @@
fsl,num-tx-queues = <1>;
fsl,num-rx-queues = <1>;
fsl,stop-mode = <&gpr 0x10 3>;
+ fsl,magic-packet;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/imx7d-flex-concentrator.dts b/arch/arm/boot/dts/imx7d-flex-concentrator.dts
index 84b095279e65..bd6b5285aa8d 100644
--- a/arch/arm/boot/dts/imx7d-flex-concentrator.dts
+++ b/arch/arm/boot/dts/imx7d-flex-concentrator.dts
@@ -115,6 +115,7 @@
compatible = "nxp,pcf2127";
reg = <0>;
spi-max-frequency = <2000000>;
+ reset-source;
};
};
diff --git a/arch/arm/boot/dts/imx7s.dtsi b/arch/arm/boot/dts/imx7s.dtsi
index 251007a7b836..a22d41e0cf31 100644
--- a/arch/arm/boot/dts/imx7s.dtsi
+++ b/arch/arm/boot/dts/imx7s.dtsi
@@ -151,6 +151,7 @@
timer {
compatible = "arm,armv7-timer";
+ arm,cpu-registers-not-fw-configured;
interrupt-parent = <&intc>;
interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>,
diff --git a/arch/arm/boot/dts/keystone-k2e.dtsi b/arch/arm/boot/dts/keystone-k2e.dtsi
index 2d94faf31fab..b8f152e7af7f 100644
--- a/arch/arm/boot/dts/keystone-k2e.dtsi
+++ b/arch/arm/boot/dts/keystone-k2e.dtsi
@@ -52,7 +52,7 @@
usb: usb@2680000 {
interrupts = <GIC_SPI 152 IRQ_TYPE_EDGE_RISING>;
- dwc3@2690000 {
+ usb@2690000 {
interrupts = <GIC_SPI 152 IRQ_TYPE_EDGE_RISING>;
};
};
@@ -78,8 +78,8 @@
dma-ranges;
status = "disabled";
- usb1: dwc3@25010000 {
- compatible = "synopsys,dwc3";
+ usb1: usb@25010000 {
+ compatible = "snps,dwc3";
reg = <0x25010000 0x70000>;
interrupts = <GIC_SPI 414 IRQ_TYPE_EDGE_RISING>;
usb-phy = <&usb1_phy>, <&usb1_phy>;
diff --git a/arch/arm/boot/dts/keystone.dtsi b/arch/arm/boot/dts/keystone.dtsi
index c298675a29a5..fc9fdc857ae8 100644
--- a/arch/arm/boot/dts/keystone.dtsi
+++ b/arch/arm/boot/dts/keystone.dtsi
@@ -217,8 +217,8 @@
dma-ranges;
status = "disabled";
- usb0: dwc3@2690000 {
- compatible = "synopsys,dwc3";
+ usb0: usb@2690000 {
+ compatible = "snps,dwc3";
reg = <0x2690000 0x70000>;
interrupts = <GIC_SPI 393 IRQ_TYPE_EDGE_RISING>;
usb-phy = <&usb_phy>, <&usb_phy>;
diff --git a/arch/arm/boot/dts/lpc32xx.dtsi b/arch/arm/boot/dts/lpc32xx.dtsi
index 3a5cfb0ddb20..c87066d6c995 100644
--- a/arch/arm/boot/dts/lpc32xx.dtsi
+++ b/arch/arm/boot/dts/lpc32xx.dtsi
@@ -326,9 +326,6 @@
clocks = <&xtal_32k>, <&xtal>;
clock-names = "xtal_32k", "xtal";
-
- assigned-clocks = <&clk LPC32XX_CLK_HCLK_PLL>;
- assigned-clock-rates = <208000000>;
};
};
diff --git a/arch/arm/boot/dts/meson.dtsi b/arch/arm/boot/dts/meson.dtsi
index 7649dd1e0b9e..8bae6ed0abb2 100644
--- a/arch/arm/boot/dts/meson.dtsi
+++ b/arch/arm/boot/dts/meson.dtsi
@@ -11,6 +11,11 @@
#size-cells = <1>;
interrupt-parent = <&gic>;
+ iio-hwmon {
+ compatible = "iio-hwmon";
+ io-channels = <&saradc 8>;
+ };
+
soc {
compatible = "simple-bus";
#address-cells = <1>;
@@ -195,6 +200,13 @@
#size-cells = <1>;
ranges = <0x0 0xc8100000 0x100000>;
+ ao_arc_rproc: remoteproc@1c {
+ compatible= "amlogic,meson-mx-ao-arc";
+ reg = <0x1c 0x8>, <0x38 0x8>;
+ reg-names = "remap", "cpu";
+ status = "disabled";
+ };
+
ir_receiver: ir-receiver@480 {
compatible= "amlogic,meson6-ir";
reg = <0x480 0x20>;
@@ -293,6 +305,13 @@
};
};
+ thermal_sensor: thermal-sensor {
+ compatible = "generic-adc-thermal";
+ #thermal-sensor-cells = <0>;
+ io-channels = <&saradc 8>;
+ io-channel-names = "sensor-channel";
+ };
+
xtal: xtal-clk {
compatible = "fixed-clock";
clock-frequency = <24000000>;
diff --git a/arch/arm/boot/dts/meson8.dtsi b/arch/arm/boot/dts/meson8.dtsi
index 04688e8abce2..157a950a55d3 100644
--- a/arch/arm/boot/dts/meson8.dtsi
+++ b/arch/arm/boot/dts/meson8.dtsi
@@ -9,6 +9,7 @@
#include <dt-bindings/power/meson8-power.h>
#include <dt-bindings/reset/amlogic,meson8b-clkc-reset.h>
#include <dt-bindings/reset/amlogic,meson8b-reset.h>
+#include <dt-bindings/thermal/thermal.h>
#include "meson.dtsi"
/ {
@@ -28,6 +29,7 @@
resets = <&clkc CLKC_RESET_CPU0_SOFT_RESET>;
operating-points-v2 = <&cpu_opp_table>;
clocks = <&clkc CLKID_CPUCLK>;
+ #cooling-cells = <2>; /* min followed by max */
};
cpu1: cpu@201 {
@@ -39,6 +41,7 @@
resets = <&clkc CLKC_RESET_CPU1_SOFT_RESET>;
operating-points-v2 = <&cpu_opp_table>;
clocks = <&clkc CLKID_CPUCLK>;
+ #cooling-cells = <2>; /* min followed by max */
};
cpu2: cpu@202 {
@@ -50,6 +53,7 @@
resets = <&clkc CLKC_RESET_CPU2_SOFT_RESET>;
operating-points-v2 = <&cpu_opp_table>;
clocks = <&clkc CLKID_CPUCLK>;
+ #cooling-cells = <2>; /* min followed by max */
};
cpu3: cpu@203 {
@@ -61,6 +65,7 @@
resets = <&clkc CLKC_RESET_CPU3_SOFT_RESET>;
operating-points-v2 = <&cpu_opp_table>;
clocks = <&clkc CLKID_CPUCLK>;
+ #cooling-cells = <2>; /* min followed by max */
};
};
@@ -190,6 +195,54 @@
};
};
+ thermal-zones {
+ soc {
+ polling-delay-passive = <250>; /* milliseconds */
+ polling-delay = <1000>; /* milliseconds */
+ thermal-sensors = <&thermal_sensor>;
+
+ cooling-maps {
+ map0 {
+ trip = <&soc_passive>;
+ cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&mali THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+
+ map1 {
+ trip = <&soc_hot>;
+ cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&mali THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+
+ trips {
+ soc_passive: soc-passive {
+ temperature = <80000>; /* millicelsius */
+ hysteresis = <2000>; /* millicelsius */
+ type = "passive";
+ };
+
+ soc_hot: soc-hot {
+ temperature = <90000>; /* millicelsius */
+ hysteresis = <2000>; /* millicelsius */
+ type = "hot";
+ };
+
+ soc_critical: soc-critical {
+ temperature = <110000>; /* millicelsius */
+ hysteresis = <2000>; /* millicelsius */
+ type = "critical";
+ };
+ };
+ };
+ };
+
mmcbus: bus@c8000000 {
compatible = "simple-bus";
reg = <0xc8000000 0x8000>;
@@ -254,6 +307,7 @@
clocks = <&clkc CLKID_CLK81>, <&clkc CLKID_MALI>;
clock-names = "bus", "core";
operating-points-v2 = <&gpu_opp_table>;
+ #cooling-cells = <2>; /* min followed by max */
};
};
}; /* end of / */
@@ -315,6 +369,14 @@
};
};
+&ao_arc_rproc {
+ compatible= "amlogic,meson8-ao-arc", "amlogic,meson-mx-ao-arc";
+ amlogic,secbus2 = <&secbus2>;
+ sram = <&ao_arc_sram>;
+ resets = <&reset RESET_MEDIA_CPU>;
+ clocks = <&clkc CLKID_AO_MEDIA_CPU>;
+};
+
&cbus {
reset: reset-controller@4404 {
compatible = "amlogic,meson8b-reset";
@@ -442,6 +504,12 @@
};
&ahb_sram {
+ ao_arc_sram: ao-arc-sram@0 {
+ compatible = "amlogic,meson8-ao-arc-sram";
+ reg = <0x0 0x8000>;
+ pool;
+ };
+
smp-sram@1ff80 {
compatible = "amlogic,meson8-smp-sram";
reg = <0x1ff80 0x8>;
@@ -577,6 +645,13 @@
clock-names = "clkin0", "clkin1", "clkin2", "clkin3", "pclk";
};
+&secbus {
+ secbus2: system-controller@4000 {
+ compatible = "amlogic,meson8-secbus2", "syscon";
+ reg = <0x4000 0x2000>;
+ };
+};
+
&sdio {
compatible = "amlogic,meson8-sdio", "amlogic,meson-mx-sdio";
clocks = <&clkc CLKID_SDIO>, <&clkc CLKID_CLK81>;
diff --git a/arch/arm/boot/dts/meson8b-ec100.dts b/arch/arm/boot/dts/meson8b-ec100.dts
index ed06102a4014..8e48ccc6b634 100644
--- a/arch/arm/boot/dts/meson8b-ec100.dts
+++ b/arch/arm/boot/dts/meson8b-ec100.dts
@@ -70,11 +70,6 @@
timeout-ms = <20000>;
};
- iio-hwmon {
- compatible = "iio-hwmon";
- io-channels = <&saradc 8>;
- };
-
leds {
compatible = "gpio-leds";
diff --git a/arch/arm/boot/dts/meson8b-mxq.dts b/arch/arm/boot/dts/meson8b-mxq.dts
index 33037ef62d0a..f3937d55472d 100644
--- a/arch/arm/boot/dts/meson8b-mxq.dts
+++ b/arch/arm/boot/dts/meson8b-mxq.dts
@@ -27,11 +27,6 @@
reg = <0x40000000 0x40000000>;
};
- iio-hwmon {
- compatible = "iio-hwmon";
- io-channels = <&saradc 8>;
- };
-
vcck: regulator-vcck {
compatible = "pwm-regulator";
diff --git a/arch/arm/boot/dts/meson8b-odroidc1.dts b/arch/arm/boot/dts/meson8b-odroidc1.dts
index 5963566dbcc9..c440ef94e082 100644
--- a/arch/arm/boot/dts/meson8b-odroidc1.dts
+++ b/arch/arm/boot/dts/meson8b-odroidc1.dts
@@ -85,11 +85,6 @@
1800000 1>;
};
- iio-hwmon {
- compatible = "iio-hwmon";
- io-channels = <&saradc 8>;
- };
-
rtc32k_xtal: rtc32k-xtal-clk {
/* X3 in the schematics */
compatible = "fixed-clock";
diff --git a/arch/arm/boot/dts/meson8b.dtsi b/arch/arm/boot/dts/meson8b.dtsi
index 2401cdf5f751..c02b03cbcdf4 100644
--- a/arch/arm/boot/dts/meson8b.dtsi
+++ b/arch/arm/boot/dts/meson8b.dtsi
@@ -10,6 +10,7 @@
#include <dt-bindings/power/meson8-power.h>
#include <dt-bindings/reset/amlogic,meson8b-reset.h>
#include <dt-bindings/reset/amlogic,meson8b-clkc-reset.h>
+#include <dt-bindings/thermal/thermal.h>
#include "meson.dtsi"
/ {
@@ -26,6 +27,7 @@
resets = <&clkc CLKC_RESET_CPU0_SOFT_RESET>;
operating-points-v2 = <&cpu_opp_table>;
clocks = <&clkc CLKID_CPUCLK>;
+ #cooling-cells = <2>; /* min followed by max */
};
cpu1: cpu@201 {
@@ -37,6 +39,7 @@
resets = <&clkc CLKC_RESET_CPU1_SOFT_RESET>;
operating-points-v2 = <&cpu_opp_table>;
clocks = <&clkc CLKID_CPUCLK>;
+ #cooling-cells = <2>; /* min followed by max */
};
cpu2: cpu@202 {
@@ -48,6 +51,7 @@
resets = <&clkc CLKC_RESET_CPU2_SOFT_RESET>;
operating-points-v2 = <&cpu_opp_table>;
clocks = <&clkc CLKID_CPUCLK>;
+ #cooling-cells = <2>; /* min followed by max */
};
cpu3: cpu@203 {
@@ -59,6 +63,7 @@
resets = <&clkc CLKC_RESET_CPU3_SOFT_RESET>;
operating-points-v2 = <&cpu_opp_table>;
clocks = <&clkc CLKID_CPUCLK>;
+ #cooling-cells = <2>; /* min followed by max */
};
};
@@ -167,6 +172,54 @@
};
};
+ thermal-zones {
+ soc {
+ polling-delay-passive = <250>; /* milliseconds */
+ polling-delay = <1000>; /* milliseconds */
+ thermal-sensors = <&thermal_sensor>;
+
+ cooling-maps {
+ map0 {
+ trip = <&soc_passive>;
+ cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&mali THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+
+ map1 {
+ trip = <&soc_hot>;
+ cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&mali THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+
+ trips {
+ soc_passive: soc-passive {
+ temperature = <80000>; /* millicelsius */
+ hysteresis = <2000>; /* millicelsius */
+ type = "passive";
+ };
+
+ soc_hot: soc-hot {
+ temperature = <90000>; /* millicelsius */
+ hysteresis = <2000>; /* millicelsius */
+ type = "hot";
+ };
+
+ soc_critical: soc-critical {
+ temperature = <110000>; /* millicelsius */
+ hysteresis = <2000>; /* millicelsius */
+ type = "critical";
+ };
+ };
+ };
+ };
+
mmcbus: bus@c8000000 {
compatible = "simple-bus";
reg = <0xc8000000 0x8000>;
@@ -221,6 +274,7 @@
clocks = <&clkc CLKID_CLK81>, <&clkc CLKID_MALI>;
clock-names = "bus", "core";
operating-points-v2 = <&gpu_opp_table>;
+ #cooling-cells = <2>; /* min followed by max */
};
};
}; /* end of / */
@@ -266,6 +320,14 @@
};
};
+&ao_arc_rproc {
+ compatible= "amlogic,meson8b-ao-arc", "amlogic,meson-mx-ao-arc";
+ amlogic,secbus2 = <&secbus2>;
+ sram = <&ao_arc_sram>;
+ resets = <&reset RESET_MEDIA_CPU>;
+ clocks = <&clkc CLKID_AO_MEDIA_CPU>;
+};
+
&cbus {
reset: reset-controller@4404 {
compatible = "amlogic,meson8b-reset";
@@ -410,6 +472,12 @@
};
&ahb_sram {
+ ao_arc_sram: ao-arc-sram@0 {
+ compatible = "amlogic,meson8b-ao-arc-sram";
+ reg = <0x0 0x8000>;
+ pool;
+ };
+
smp-sram@1ff80 {
compatible = "amlogic,meson8b-smp-sram";
reg = <0x1ff80 0x8>;
@@ -574,6 +642,13 @@
clock-names = "clkin0", "clkin1", "clkin2", "clkin3", "pclk";
};
+&secbus {
+ secbus2: system-controller@4000 {
+ compatible = "amlogic,meson8b-secbus2", "syscon";
+ reg = <0x4000 0x2000>;
+ };
+};
+
&sdio {
compatible = "amlogic,meson8b-sdio", "amlogic,meson-mx-sdio";
clocks = <&clkc CLKID_SDIO>, <&clkc CLKID_CLK81>;
diff --git a/arch/arm/boot/dts/meson8m2-mxiii-plus.dts b/arch/arm/boot/dts/meson8m2-mxiii-plus.dts
index 8f4eb1ed4581..fa6d55f1cfb9 100644
--- a/arch/arm/boot/dts/meson8m2-mxiii-plus.dts
+++ b/arch/arm/boot/dts/meson8m2-mxiii-plus.dts
@@ -45,11 +45,6 @@
};
};
- iio-hwmon {
- compatible = "iio-hwmon";
- io-channels = <&saradc 8>;
- };
-
vcc_3v3: regulator-vcc3v3 {
compatible = "regulator-fixed";
regulator-name = "VCC3V3";
diff --git a/arch/arm/boot/dts/mmp2-olpc-xo-1-75.dts b/arch/arm/boot/dts/mmp2-olpc-xo-1-75.dts
index 342304f5653a..55ea87870af3 100644
--- a/arch/arm/boot/dts/mmp2-olpc-xo-1-75.dts
+++ b/arch/arm/boot/dts/mmp2-olpc-xo-1-75.dts
@@ -2,7 +2,7 @@
/*
* OLPC XO 1.75 Laptop.
*
- * Copyright (C) 2018,2019 Lubomir Rintel <lkundrak@v3.sk>
+ * Copyright (C) 2018,2019,2020 Lubomir Rintel <lkundrak@v3.sk>
*/
/dts-v1/;
@@ -10,6 +10,7 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/input/linux-event-codes.h>
#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/clock/marvell,mmp2-audio.h>
/ {
model = "OLPC XO-1.75";
@@ -32,8 +33,7 @@
};
};
- memory {
- linux,usable-memory = <0x0 0x1f800000>;
+ memory@0 {
available = <0xcf000 0x1ef31000 0x1000 0xbf000>;
reg = <0x0 0x20000000>;
device_type = "memory";
@@ -195,7 +195,7 @@
port {
rt5631_0: endpoint {
mclk-fs = <256>;
- clocks = <&audio_clk 0>;
+ clocks = <&audio_clk MMP2_CLK_AUDIO_SYSCLK>;
remote-endpoint = <&sspa0_0>;
};
};
diff --git a/arch/arm/boot/dts/mmp2.dtsi b/arch/arm/boot/dts/mmp2.dtsi
index 445bdcd50b9e..46984d4c5224 100644
--- a/arch/arm/boot/dts/mmp2.dtsi
+++ b/arch/arm/boot/dts/mmp2.dtsi
@@ -6,6 +6,7 @@
#include <dt-bindings/clock/marvell,mmp2.h>
#include <dt-bindings/power/marvell,mmp2.h>
+#include <dt-bindings/clock/marvell,mmp2-audio.h>
/ {
#address-cells = <1>;
@@ -243,7 +244,7 @@
interrupts = <2>;
clock-names = "audio", "bitclk";
clocks = <&soc_clocks MMP2_CLK_AUDIO>,
- <&audio_clk 1>;
+ <&audio_clk MMP2_CLK_AUDIO_SSPA0>;
power-domains = <&soc_clocks MMP2_POWER_DOMAIN_AUDIO>;
#sound-dai-cells = <0>;
status = "disabled";
@@ -256,7 +257,7 @@
interrupts = <3>;
clock-names = "audio", "bitclk";
clocks = <&soc_clocks MMP2_CLK_AUDIO>,
- <&audio_clk 2>;
+ <&audio_clk MMP2_CLK_AUDIO_SSPA1>;
power-domains = <&soc_clocks MMP2_POWER_DOMAIN_AUDIO>;
#sound-dai-cells = <0>;
status = "disabled";
diff --git a/arch/arm/boot/dts/mmp3-dell-ariel.dts b/arch/arm/boot/dts/mmp3-dell-ariel.dts
index fe3b1cd695ee..fe6df364a9eb 100644
--- a/arch/arm/boot/dts/mmp3-dell-ariel.dts
+++ b/arch/arm/boot/dts/mmp3-dell-ariel.dts
@@ -26,11 +26,21 @@
};
memory@0 {
- linux,usable-memory = <0x0 0x7f600000>;
available = <0x7f700000 0x7ff00000 0x00000000 0x7f600000>;
reg = <0x0 0x80000000>;
device_type = "memory";
};
+
+ ec_input_spi: spi {
+ compatible = "spi-gpio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ num-chipselects = <0>;
+ sck-gpios = <&gpio 55 GPIO_ACTIVE_HIGH>;
+ miso-gpios = <&gpio 57 GPIO_ACTIVE_HIGH>;
+ mosi-gpios = <&gpio 58 GPIO_ACTIVE_HIGH>;
+ };
};
&uart3 {
@@ -96,6 +106,15 @@
&twsi4 {
status = "okay";
+
+ embedded-controller@58 {
+ compatible = "dell,wyse-ariel-ec", "ene,kb3930";
+ reg = <0x58>;
+ system-power-controller;
+
+ off-gpios = <&gpio 126 GPIO_ACTIVE_HIGH>,
+ <&gpio 127 GPIO_ACTIVE_HIGH>;
+ };
};
&ssp1 {
@@ -110,9 +129,17 @@
};
};
-&ssp2 {
- cs-gpios = <&gpio 56 GPIO_ACTIVE_LOW>;
+&ec_input_spi {
status = "okay";
+ cs-gpios = <&gpio 56 GPIO_ACTIVE_LOW>;
+
+ power-button@0 {
+ reg = <0>;
+ interrupt-parent = <&gpio>;
+ interrupts = <60 IRQ_TYPE_EDGE_RISING>;
+ compatible = "dell,wyse-ariel-ec-input", "ene,kb3930-input";
+ spi-max-frequency = <33000000>;
+ };
};
&gpu_2d {
diff --git a/arch/arm/boot/dts/mmp3.dtsi b/arch/arm/boot/dts/mmp3.dtsi
index 4ae630d37d09..a4fb9203ec1f 100644
--- a/arch/arm/boot/dts/mmp3.dtsi
+++ b/arch/arm/boot/dts/mmp3.dtsi
@@ -293,7 +293,8 @@
camera0: camera@d420a000 {
compatible = "marvell,mmp2-ccic";
reg = <0xd420a000 0x800>;
- interrupts = <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <1>;
+ interrupt-parent = <&ci_mux>;
clocks = <&soc_clocks MMP2_CLK_CCIC0>;
clock-names = "axi";
power-domains = <&soc_clocks MMP3_POWER_DOMAIN_CAMERA>;
@@ -305,7 +306,8 @@
camera1: camera@d420a800 {
compatible = "marvell,mmp2-ccic";
reg = <0xd420a800 0x800>;
- interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <2>;
+ interrupt-parent = <&ci_mux>;
clocks = <&soc_clocks MMP2_CLK_CCIC1>;
clock-names = "axi";
power-domains = <&soc_clocks MMP3_POWER_DOMAIN_CAMERA>;
@@ -567,7 +569,7 @@
soc_clocks: clocks@d4050000 {
compatible = "marvell,mmp3-clock";
- reg = <0xd4050000 0x1000>,
+ reg = <0xd4050000 0x2000>,
<0xd4282800 0x400>,
<0xd4015000 0x1000>;
reg-names = "mpmu", "apmu", "apbc";
diff --git a/arch/arm/boot/dts/motorola-mapphone-common.dtsi b/arch/arm/boot/dts/motorola-mapphone-common.dtsi
index f75806d0cd47..a4423ff0df39 100644
--- a/arch/arm/boot/dts/motorola-mapphone-common.dtsi
+++ b/arch/arm/boot/dts/motorola-mapphone-common.dtsi
@@ -169,6 +169,29 @@
};
};
+&cpu_thermal {
+ polling-delay = <10000>; /* milliseconds */
+};
+
+&cpu_alert0 {
+ temperature = <80000>; /* millicelsius */
+};
+
+&cpu0 {
+ /*
+ * Note that the 1.2GiHz mode is enabled for all SoC variants for
+ * the Motorola Android Linux v3.0.8 based kernel.
+ */
+ operating-points = <
+ /* kHz uV */
+ 300000 1025000
+ 600000 1200000
+ 800000 1313000
+ 1008000 1375000
+ 1200000 1375000
+ >;
+};
+
&dss {
status = "okay";
};
diff --git a/arch/arm/boot/dts/mstar-infinity-breadbee-common.dtsi b/arch/arm/boot/dts/mstar-infinity-breadbee-common.dtsi
new file mode 100644
index 000000000000..507ff2fba837
--- /dev/null
+++ b/arch/arm/boot/dts/mstar-infinity-breadbee-common.dtsi
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020 thingy.jp.
+ * Author: Daniel Palmer <daniel@thingy.jp>
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+ vcc_core: fixedregulator@0 {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_core";
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ regulator-boot-on;
+ };
+
+ vcc_dram: fixedregulator@1 {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_dram";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ };
+
+ vcc_io: fixedregulator@2 {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_io";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ red {
+ gpios = <&gpio MSC313_GPIO_SR_IO16 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "activity";
+ };
+ yellow {
+ gpios = <&gpio MSC313_GPIO_SR_IO17 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ };
+ };
+};
+
+&cpu0 {
+ cpu-supply = <&vcc_core>;
+};
diff --git a/arch/arm/boot/dts/mstar-infinity-msc313-breadbee_crust.dts b/arch/arm/boot/dts/mstar-infinity-msc313-breadbee_crust.dts
index f9db2ff86f2d..db4910dcb8a7 100644
--- a/arch/arm/boot/dts/mstar-infinity-msc313-breadbee_crust.dts
+++ b/arch/arm/boot/dts/mstar-infinity-msc313-breadbee_crust.dts
@@ -6,6 +6,7 @@
/dts-v1/;
#include "mstar-infinity-msc313.dtsi"
+#include "mstar-infinity-breadbee-common.dtsi"
/ {
model = "BreadBee Crust";
diff --git a/arch/arm/boot/dts/mstar-infinity3-msc313e-breadbee.dts b/arch/arm/boot/dts/mstar-infinity3-msc313e-breadbee.dts
index f0eda80a95cc..e64ca4ce1830 100644
--- a/arch/arm/boot/dts/mstar-infinity3-msc313e-breadbee.dts
+++ b/arch/arm/boot/dts/mstar-infinity3-msc313e-breadbee.dts
@@ -6,6 +6,7 @@
/dts-v1/;
#include "mstar-infinity3-msc313e.dtsi"
+#include "mstar-infinity-breadbee-common.dtsi"
/ {
model = "BreadBee";
diff --git a/arch/arm/boot/dts/omap3-echo.dts b/arch/arm/boot/dts/omap3-echo.dts
index 93ffeddada1e..b9fd113979f2 100644
--- a/arch/arm/boot/dts/omap3-echo.dts
+++ b/arch/arm/boot/dts/omap3-echo.dts
@@ -86,6 +86,38 @@
linux,axis = <REL_X>;
rotary-encoder,relative-axis;
};
+
+ speaker_amp: speaker-amplifier {
+ compatible = "simple-audio-amplifier";
+ enable-gpios = <&gpio5 1 GPIO_ACTIVE_HIGH>; /* gpio_129 */
+ sound-name-prefix = "Speaker Amp";
+ VCC-supply = <&vcc1v8>;
+ };
+
+ sound {
+ compatible = "simple-audio-card";
+ simple-audio-card,name = "Misto Speaker";
+ simple-audio-card,widgets =
+ "Speaker", "Speaker";
+ simple-audio-card,routing =
+ "Speaker Amp INL", "HPL",
+ "Speaker Amp INR", "HPR",
+ "Speaker", "Speaker Amp OUTL",
+ "Speaker", "Speaker Amp OUTR";
+ simple-audio-card,format = "i2s";
+ simple-audio-card,bitclock-master = <&sound_master>;
+ simple-audio-card,frame-master = <&sound_master>;
+ simple-audio-card,aux-devs = <&speaker_amp>;
+
+ simple-audio-card,cpu {
+ sound-dai = <&mcbsp2>;
+ };
+
+ sound_master: simple-audio-card,codec {
+ sound-dai = <&codec0>;
+ system-clock-frequency = <19200000>;
+ };
+ };
};
&i2c1 {
@@ -96,6 +128,13 @@
};
};
+&mcbsp2 {
+ status = "okay";
+ #sound-dai-cells = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&mcbsp2_pins>;
+};
+
&i2c2 {
clock-frequency = <400000>;
@@ -277,6 +316,22 @@
};
};
+&i2c3 {
+ clock-frequency = <400000>;
+
+ codec0: codec@18 {
+ #sound-dai-cells = <0>;
+ compatible = "ti,tlv320aic32x4";
+ reg = <0x18>;
+ clocks = <&sys_clkout1>;
+ clock-names = "mclk";
+ ldoin-supply = <&vcc1v8>;
+ iov-supply = <&vcc1v8>;
+ reset-gpios = <&gpio3 10 GPIO_ACTIVE_LOW>; /* gpio_74 */
+ };
+};
+
+
#include "tps65910.dtsi"
&omap3_pmx_core {
@@ -290,6 +345,9 @@
pinctrl-single,pins = <
OMAP3_CORE1_IOPAD(0x20dc, PIN_INPUT | MUX_MODE4) /* dss_data0.gpio_70 */
OMAP3_CORE1_IOPAD(0x20e0, PIN_INPUT | MUX_MODE4) /* dss_data2.gpio_72 */
+ OMAP3_CORE1_IOPAD(0x20e4, PIN_OUTPUT | MUX_MODE4) /* dss_data4.gpio_74 */
+ OMAP3_CORE1_IOPAD(0x20fa, PIN_OUTPUT_PULLDOWN | MUX_MODE4) /* dss_data15.gpio_85 */
+ OMAP3_CORE1_IOPAD(0x2a1a, PIN_OUTPUT | MUX_MODE0) /* sys_clkout1.sys_clkout1 */
>;
};
@@ -318,6 +376,15 @@
OMAP3_CORE1_IOPAD(0x216a, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat7.sdmmc2_dat7 */
>;
};
+
+ mcbsp2_pins: pinmux_mcbsp2_pins {
+ pinctrl-single,pins = <
+ OMAP3_CORE1_IOPAD(0x213c, PIN_INPUT | MUX_MODE0) /* mcbsp2_fsx.mcbsp2_fsx */
+ OMAP3_CORE1_IOPAD(0x213e, PIN_INPUT | MUX_MODE0) /* mcbsp2_clkx.mcbsp2_clkx */
+ OMAP3_CORE1_IOPAD(0x2140, PIN_INPUT | MUX_MODE0) /* mcbsp2_dr.mcbsp2.dr */
+ OMAP3_CORE1_IOPAD(0x2142, PIN_OUTPUT | MUX_MODE0) /* mcbsp2_dx.mcbsp2_dx */
+ >;
+ };
};
&omap3_pmx_core2 {
diff --git a/arch/arm/boot/dts/omap3-gta04.dtsi b/arch/arm/boot/dts/omap3-gta04.dtsi
index c8745bc800f7..938cc691bb2f 100644
--- a/arch/arm/boot/dts/omap3-gta04.dtsi
+++ b/arch/arm/boot/dts/omap3-gta04.dtsi
@@ -114,7 +114,7 @@
gpio-sck = <&gpio1 12 GPIO_ACTIVE_HIGH>;
gpio-miso = <&gpio1 18 GPIO_ACTIVE_HIGH>;
gpio-mosi = <&gpio1 20 GPIO_ACTIVE_HIGH>;
- cs-gpios = <&gpio1 19 GPIO_ACTIVE_HIGH>;
+ cs-gpios = <&gpio1 19 GPIO_ACTIVE_LOW>;
num-chipselects = <1>;
/* lcd panel */
@@ -124,7 +124,6 @@
spi-max-frequency = <100000>;
spi-cpol;
spi-cpha;
- spi-cs-high;
backlight= <&backlight>;
label = "lcd";
@@ -489,8 +488,8 @@
};
twl_power: power {
- compatible = "ti,twl4030-power";
- ti,use_poweroff;
+ compatible = "ti,twl4030-power-idle";
+ ti,system-power-controller;
};
};
};
diff --git a/arch/arm/boot/dts/omap3-igep.dtsi b/arch/arm/boot/dts/omap3-igep.dtsi
index 5de2be9bbe6f..99f5585097a1 100644
--- a/arch/arm/boot/dts/omap3-igep.dtsi
+++ b/arch/arm/boot/dts/omap3-igep.dtsi
@@ -2,7 +2,7 @@
/*
* Common device tree for IGEP boards based on AM/DM37x
*
- * Copyright (C) 2012 Javier Martinez Canillas <javier@osg.samsung.com>
+ * Copyright (C) 2012 Javier Martinez Canillas <javier@dowhile0.org>
* Copyright (C) 2012 Enric Balletbo i Serra <eballetbo@gmail.com>
*/
/dts-v1/;
diff --git a/arch/arm/boot/dts/omap3-igep0020-common.dtsi b/arch/arm/boot/dts/omap3-igep0020-common.dtsi
index af8aa5f0feb7..73d8f471b9ec 100644
--- a/arch/arm/boot/dts/omap3-igep0020-common.dtsi
+++ b/arch/arm/boot/dts/omap3-igep0020-common.dtsi
@@ -2,7 +2,7 @@
/*
* Common Device Tree Source for IGEPv2
*
- * Copyright (C) 2014 Javier Martinez Canillas <javier@osg.samsung.com>
+ * Copyright (C) 2014 Javier Martinez Canillas <javier@dowhile0.org>
* Copyright (C) 2014 Enric Balletbo i Serra <eballetbo@gmail.com>
*/
diff --git a/arch/arm/boot/dts/omap3-igep0020-rev-f.dts b/arch/arm/boot/dts/omap3-igep0020-rev-f.dts
index 567232584f08..9dca5bfc87ab 100644
--- a/arch/arm/boot/dts/omap3-igep0020-rev-f.dts
+++ b/arch/arm/boot/dts/omap3-igep0020-rev-f.dts
@@ -2,7 +2,7 @@
/*
* Device Tree Source for IGEPv2 Rev. F (TI OMAP AM/DM37x)
*
- * Copyright (C) 2012 Javier Martinez Canillas <javier@osg.samsung.com>
+ * Copyright (C) Javier Martinez Canillas <javier@dowhile0.org>
* Copyright (C) 2012 Enric Balletbo i Serra <eballetbo@gmail.com>
*/
diff --git a/arch/arm/boot/dts/omap3-igep0020.dts b/arch/arm/boot/dts/omap3-igep0020.dts
index e341535a7162..c6f863bc03ad 100644
--- a/arch/arm/boot/dts/omap3-igep0020.dts
+++ b/arch/arm/boot/dts/omap3-igep0020.dts
@@ -2,7 +2,7 @@
/*
* Device Tree Source for IGEPv2 Rev. C (TI OMAP AM/DM37x)
*
- * Copyright (C) 2012 Javier Martinez Canillas <javier@osg.samsung.com>
+ * Copyright (C) 2012 Javier Martinez Canillas <javier@dowhile0.org>
* Copyright (C) 2012 Enric Balletbo i Serra <eballetbo@gmail.com>
*/
diff --git a/arch/arm/boot/dts/omap3-igep0030-common.dtsi b/arch/arm/boot/dts/omap3-igep0030-common.dtsi
index 71b0ae807ecd..742e3e147063 100644
--- a/arch/arm/boot/dts/omap3-igep0030-common.dtsi
+++ b/arch/arm/boot/dts/omap3-igep0030-common.dtsi
@@ -2,7 +2,7 @@
/*
* Common Device Tree Source for IGEP COM MODULE
*
- * Copyright (C) 2014 Javier Martinez Canillas <javier@osg.samsung.com>
+ * Copyright (C) 2014 Javier Martinez Canillas <javier@dowhile0.org>
* Copyright (C) 2014 Enric Balletbo i Serra <eballetbo@gmail.com>
*/
diff --git a/arch/arm/boot/dts/omap3-igep0030-rev-g.dts b/arch/arm/boot/dts/omap3-igep0030-rev-g.dts
index df6ba1219830..8e9c12cf51a7 100644
--- a/arch/arm/boot/dts/omap3-igep0030-rev-g.dts
+++ b/arch/arm/boot/dts/omap3-igep0030-rev-g.dts
@@ -2,7 +2,7 @@
/*
* Device Tree Source for IGEP COM MODULE Rev. G (TI OMAP AM/DM37x)
*
- * Copyright (C) 2014 Javier Martinez Canillas <javier@osg.samsung.com>
+ * Copyright (C) 2014 Javier Martinez Canillas <javier@dowhile0.org>
* Copyright (C) 2014 Enric Balletbo i Serra <eballetbo@gmail.com>
*/
diff --git a/arch/arm/boot/dts/omap3-igep0030.dts b/arch/arm/boot/dts/omap3-igep0030.dts
index 32f31035daa2..5188f96f431e 100644
--- a/arch/arm/boot/dts/omap3-igep0030.dts
+++ b/arch/arm/boot/dts/omap3-igep0030.dts
@@ -2,7 +2,7 @@
/*
* Device Tree Source for IGEP COM MODULE Rev. E (TI OMAP AM/DM37x)
*
- * Copyright (C) 2012 Javier Martinez Canillas <javier@osg.samsung.com>
+ * Copyright (C) 2012 Javier Martinez Canillas <javier@dowhile0.org>
* Copyright (C) 2012 Enric Balletbo i Serra <eballetbo@gmail.com>
*/
diff --git a/arch/arm/boot/dts/omap36xx.dtsi b/arch/arm/boot/dts/omap36xx.dtsi
index 05fe5ed127b0..20844dbc002e 100644
--- a/arch/arm/boot/dts/omap36xx.dtsi
+++ b/arch/arm/boot/dts/omap36xx.dtsi
@@ -72,7 +72,6 @@
<1375000 1375000 1375000>;
/* only on am/dm37x with speed-binned bit set */
opp-supported-hw = <0xffffffff 2>;
- turbo-mode;
};
};
diff --git a/arch/arm/boot/dts/omap4-droid4-xt894.dts b/arch/arm/boot/dts/omap4-droid4-xt894.dts
index 3ea4c5b9fd31..e833c21f1c01 100644
--- a/arch/arm/boot/dts/omap4-droid4-xt894.dts
+++ b/arch/arm/boot/dts/omap4-droid4-xt894.dts
@@ -16,8 +16,13 @@
debounce-interval = <10>;
};
+ /*
+ * We use pad 0x4a100116 abe_dmic_din3.gpio_122 as the irq instead
+ * of the gpio interrupt to avoid lost events in deeper idle states.
+ */
slider {
label = "Keypad Slide";
+ interrupts-extended = <&omap4_pmx_core 0xd6>;
gpios = <&gpio4 26 GPIO_ACTIVE_HIGH>; /* gpio122 */
linux,input-type = <EV_SW>;
linux,code = <SW_KEYPAD_SLIDE>;
diff --git a/arch/arm/boot/dts/omap443x.dtsi b/arch/arm/boot/dts/omap443x.dtsi
index cb309743de5d..8466161197ae 100644
--- a/arch/arm/boot/dts/omap443x.dtsi
+++ b/arch/arm/boot/dts/omap443x.dtsi
@@ -33,10 +33,12 @@
};
ocp {
+ /* 4430 has only gpio_86 tshut and no talert interrupt */
bandgap: bandgap@4a002260 {
reg = <0x4a002260 0x4
0x4a00232C 0x4>;
compatible = "ti,omap4430-bandgap";
+ gpios = <&gpio3 22 GPIO_ACTIVE_HIGH>;
#thermal-sensor-cells = <0>;
};
@@ -76,11 +78,11 @@
/include/ "omap443x-clocks.dtsi"
/*
- * Use dpll_per for sgx at 153.6MHz like droid4 stock v3.0.8 Android kernel
+ * Use dpll_per for sgx at 307.2MHz like droid4 stock v3.0.8 Android kernel
*/
&sgx_module {
assigned-clocks = <&l3_gfx_clkctrl OMAP4_GPU_CLKCTRL 24>,
<&dpll_per_m7x2_ck>;
- assigned-clock-rates = <0>, <153600000>;
+ assigned-clock-rates = <0>, <307200000>;
assigned-clock-parents = <&dpll_per_m7x2_ck>;
};
diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
index 5f1a8bd13880..e025b7c9a357 100644
--- a/arch/arm/boot/dts/omap5.dtsi
+++ b/arch/arm/boot/dts/omap5.dtsi
@@ -518,6 +518,9 @@
clocks = <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 8>,
<&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 10>;
clock-names = "fck", "sys_clk";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
};
};
@@ -550,6 +553,9 @@
clocks = <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 8>,
<&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 10>;
clock-names = "fck", "sys_clk";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
};
};
diff --git a/arch/arm/boot/dts/owl-s500-cubieboard6.dts b/arch/arm/boot/dts/owl-s500-cubieboard6.dts
index 7c96c59b610d..c2b02895910c 100644
--- a/arch/arm/boot/dts/owl-s500-cubieboard6.dts
+++ b/arch/arm/boot/dts/owl-s500-cubieboard6.dts
@@ -25,12 +25,6 @@
device_type = "memory";
reg = <0x0 0x80000000>;
};
-
- uart3_clk: uart3-clk {
- compatible = "fixed-clock";
- clock-frequency = <921600>;
- #clock-cells = <0>;
- };
};
&timer {
@@ -39,5 +33,4 @@
&uart3 {
status = "okay";
- clocks = <&uart3_clk>;
};
diff --git a/arch/arm/boot/dts/owl-s500-guitar-bb-rev-b.dts b/arch/arm/boot/dts/owl-s500-guitar-bb-rev-b.dts
index e610d49395d2..7ae34a23e320 100644
--- a/arch/arm/boot/dts/owl-s500-guitar-bb-rev-b.dts
+++ b/arch/arm/boot/dts/owl-s500-guitar-bb-rev-b.dts
@@ -18,15 +18,8 @@
chosen {
stdout-path = "serial3:115200n8";
};
-
- uart3_clk: uart3-clk {
- compatible = "fixed-clock";
- clock-frequency = <921600>;
- #clock-cells = <0>;
- };
};
&uart3 {
status = "okay";
- clocks = <&uart3_clk>;
};
diff --git a/arch/arm/boot/dts/owl-s500-labrador-base-m.dts b/arch/arm/boot/dts/owl-s500-labrador-base-m.dts
index c92f8bdcb331..1585e33f703b 100644
--- a/arch/arm/boot/dts/owl-s500-labrador-base-m.dts
+++ b/arch/arm/boot/dts/owl-s500-labrador-base-m.dts
@@ -21,15 +21,8 @@
chosen {
stdout-path = "serial3:115200n8";
};
-
- uart3_clk: uart3-clk {
- compatible = "fixed-clock";
- clock-frequency = <921600>;
- #clock-cells = <0>;
- };
};
&uart3 {
status = "okay";
- clocks = <&uart3_clk>;
};
diff --git a/arch/arm/boot/dts/owl-s500-roseapplepi.dts b/arch/arm/boot/dts/owl-s500-roseapplepi.dts
index a2087e617cb2..ff91561ca99c 100644
--- a/arch/arm/boot/dts/owl-s500-roseapplepi.dts
+++ b/arch/arm/boot/dts/owl-s500-roseapplepi.dts
@@ -14,6 +14,7 @@
model = "Roseapple Pi";
aliases {
+ mmc0 = &mmc0;
serial2 = &uart2;
};
@@ -26,13 +27,100 @@
reg = <0x0 0x80000000>; /* 2GB */
};
- uart2_clk: uart2-clk {
- compatible = "fixed-clock";
- clock-frequency = <921600>;
- #clock-cells = <0>;
+ /* Fixed regulator used in the absence of PMIC */
+ sd_vcc: sd-vcc {
+ compatible = "regulator-fixed";
+ regulator-name = "fixed-3.1V";
+ regulator-min-microvolt = <3100000>;
+ regulator-max-microvolt = <3100000>;
+ regulator-always-on;
};
};
+&i2c0 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0_pins>;
+};
+
+&i2c1 {
+ status = "disabled";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c1_pins>;
+};
+
+&i2c2 {
+ status = "disabled";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c2_pins>;
+};
+
+&pinctrl {
+ i2c0_pins: i2c0-pins {
+ pinmux {
+ groups = "i2c0_mfp";
+ function = "i2c0";
+ };
+
+ pinconf {
+ pins = "i2c0_sclk", "i2c0_sdata";
+ bias-pull-up;
+ };
+ };
+
+ i2c1_pins: i2c1-pins {
+ pinconf {
+ pins = "i2c1_sclk", "i2c1_sdata";
+ bias-pull-up;
+ };
+ };
+
+ i2c2_pins: i2c2-pins {
+ pinconf {
+ pins = "i2c2_sclk", "i2c2_sdata";
+ bias-pull-up;
+ };
+ };
+
+ mmc0_pins: mmc0-pins {
+ pinmux {
+ groups = "sd0_d0_mfp", "sd0_d1_mfp", "sd0_d2_d3_mfp",
+ "sd0_cmd_mfp", "sd0_clk_mfp";
+ function = "sd0";
+ };
+
+ drv-pinconf {
+ groups = "sd0_d0_d3_drv", "sd0_cmd_drv", "sd0_clk_drv";
+ drive-strength = <8>;
+ };
+
+ bias0-pinconf {
+ pins = "sd0_d0", "sd0_d1", "sd0_d2",
+ "sd0_d3", "sd0_cmd";
+ bias-pull-up;
+ };
+
+ bias1-pinconf {
+ pins = "sd0_clk";
+ bias-pull-down;
+ };
+ };
+};
+
+/* uSD */
+&mmc0 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc0_pins>;
+ no-sdio;
+ no-mmc;
+ no-1-8-v;
+ cd-gpios = <&pinctrl 117 GPIO_ACTIVE_LOW>;
+ bus-width = <4>;
+ vmmc-supply = <&sd_vcc>;
+ vqmmc-supply = <&sd_vcc>;
+};
+
&twd_timer {
status = "okay";
};
@@ -43,5 +131,4 @@
&uart2 {
status = "okay";
- clocks = <&uart2_clk>;
};
diff --git a/arch/arm/boot/dts/owl-s500-sparky.dts b/arch/arm/boot/dts/owl-s500-sparky.dts
index c665ce8b88b4..9d8f7336bec0 100644
--- a/arch/arm/boot/dts/owl-s500-sparky.dts
+++ b/arch/arm/boot/dts/owl-s500-sparky.dts
@@ -25,12 +25,6 @@
device_type = "memory";
reg = <0x0 0x40000000>; /* 1 or 2 GiB */
};
-
- uart3_clk: uart3-clk {
- compatible = "fixed-clock";
- clock-frequency = <921600>;
- #clock-cells = <0>;
- };
};
&timer {
@@ -39,5 +33,4 @@
&uart3 {
status = "okay";
- clocks = <&uart3_clk>;
};
diff --git a/arch/arm/boot/dts/owl-s500.dtsi b/arch/arm/boot/dts/owl-s500.dtsi
index 1dbe4e8b38ac..cd635f222d26 100644
--- a/arch/arm/boot/dts/owl-s500.dtsi
+++ b/arch/arm/boot/dts/owl-s500.dtsi
@@ -5,8 +5,11 @@
* Copyright (c) 2016-2017 Andreas Färber
*/
+#include <dt-bindings/clock/actions,s500-cmu.h>
+#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/power/owl-s500-powergate.h>
+#include <dt-bindings/reset/actions,s500-reset.h>
/ {
compatible = "actions,s500";
@@ -70,6 +73,12 @@
#clock-cells = <0>;
};
+ losc: losc {
+ compatible = "fixed-clock";
+ clock-frequency = <32768>;
+ #clock-cells = <0>;
+ };
+
soc {
compatible = "simple-bus";
#address-cells = <1>;
@@ -124,6 +133,7 @@
compatible = "actions,s500-uart", "actions,owl-uart";
reg = <0xb0120000 0x2000>;
interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cmu CLK_UART0>;
status = "disabled";
};
@@ -131,6 +141,7 @@
compatible = "actions,s500-uart", "actions,owl-uart";
reg = <0xb0122000 0x2000>;
interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cmu CLK_UART1>;
status = "disabled";
};
@@ -138,6 +149,7 @@
compatible = "actions,s500-uart", "actions,owl-uart";
reg = <0xb0124000 0x2000>;
interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cmu CLK_UART2>;
status = "disabled";
};
@@ -145,6 +157,7 @@
compatible = "actions,s500-uart", "actions,owl-uart";
reg = <0xb0126000 0x2000>;
interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cmu CLK_UART3>;
status = "disabled";
};
@@ -152,6 +165,7 @@
compatible = "actions,s500-uart", "actions,owl-uart";
reg = <0xb0128000 0x2000>;
interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cmu CLK_UART4>;
status = "disabled";
};
@@ -159,6 +173,7 @@
compatible = "actions,s500-uart", "actions,owl-uart";
reg = <0xb012a000 0x2000>;
interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cmu CLK_UART5>;
status = "disabled";
};
@@ -166,9 +181,68 @@
compatible = "actions,s500-uart", "actions,owl-uart";
reg = <0xb012c000 0x2000>;
interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cmu CLK_UART6>;
+ status = "disabled";
+ };
+
+ cmu: clock-controller@b0160000 {
+ compatible = "actions,s500-cmu";
+ reg = <0xb0160000 0x8000>;
+ clocks = <&hosc>, <&losc>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ };
+
+ i2c0: i2c@b0170000 {
+ compatible = "actions,s500-i2c";
+ reg = <0xb0170000 0x4000>;
+ clocks = <&cmu CLK_I2C0>;
+ interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c1: i2c@b0174000 {
+ compatible = "actions,s500-i2c";
+ reg = <0xb0174000 0x4000>;
+ clocks = <&cmu CLK_I2C1>;
+ interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c2: i2c@b0178000 {
+ compatible = "actions,s500-i2c";
+ reg = <0xb0178000 0x4000>;
+ clocks = <&cmu CLK_I2C2>;
+ interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c3: i2c@b017c000 {
+ compatible = "actions,s500-i2c";
+ reg = <0xb017c000 0x4000>;
+ clocks = <&cmu CLK_I2C3>;
+ interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
status = "disabled";
};
+ sirq: interrupt-controller@b01b0200 {
+ compatible = "actions,s500-sirq";
+ reg = <0xb01b0200 0x4>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>, /* SIRQ0 */
+ <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>, /* SIRQ1 */
+ <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>; /* SIRQ2 */
+ };
+
timer: timer@b0168000 {
compatible = "actions,s500-timer";
reg = <0xb0168000 0x8000>;
@@ -184,5 +258,71 @@
reg = <0xb01b0100 0x100>;
#power-domain-cells = <1>;
};
+
+ pinctrl: pinctrl@b01b0000 {
+ compatible = "actions,s500-pinctrl";
+ reg = <0xb01b0000 0x40>, /* GPIO */
+ <0xb01b0040 0x10>, /* Multiplexing Control */
+ <0xb01b0060 0x18>, /* PAD Control */
+ <0xb01b0080 0xc>; /* PAD Drive Capacity */
+ clocks = <&cmu CLK_GPIO>;
+ gpio-controller;
+ gpio-ranges = <&pinctrl 0 0 132>;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>, /* GPIOA */
+ <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>, /* GPIOB */
+ <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>, /* GPIOC */
+ <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>, /* GPIOD */
+ <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>; /* GPIOE */
+ };
+
+ dma: dma-controller@b0260000 {
+ compatible = "actions,s500-dma";
+ reg = <0xb0260000 0xd00>;
+ interrupts = <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>;
+ #dma-cells = <1>;
+ dma-channels = <12>;
+ dma-requests = <46>;
+ clocks = <&cmu CLK_DMAC>;
+ power-domains = <&sps S500_PD_DMA>;
+ };
+
+ mmc0: mmc@b0230000 {
+ compatible = "actions,s500-mmc", "actions,owl-mmc";
+ reg = <0xb0230000 0x38>;
+ interrupts = <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cmu CLK_SD0>;
+ resets = <&cmu RESET_SD0>;
+ dmas = <&dma 2>;
+ dma-names = "mmc";
+ status = "disabled";
+ };
+
+ mmc1: mmc@b0234000 {
+ compatible = "actions,s500-mmc", "actions,owl-mmc";
+ reg = <0xb0234000 0x38>;
+ interrupts = <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cmu CLK_SD1>;
+ resets = <&cmu RESET_SD1>;
+ dmas = <&dma 3>;
+ dma-names = "mmc";
+ status = "disabled";
+ };
+
+ mmc2: mmc@b0238000 {
+ compatible = "actions,s500-mmc", "actions,owl-mmc";
+ reg = <0xb0238000 0x38>;
+ interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cmu CLK_SD2>;
+ resets = <&cmu RESET_SD2>;
+ dmas = <&dma 4>;
+ dma-names = "mmc";
+ status = "disabled";
+ };
};
};
diff --git a/arch/arm/boot/dts/picoxcell-pc3x2.dtsi b/arch/arm/boot/dts/picoxcell-pc3x2.dtsi
deleted file mode 100644
index 5898879a3038..000000000000
--- a/arch/arm/boot/dts/picoxcell-pc3x2.dtsi
+++ /dev/null
@@ -1,243 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2011 Picochip, Jamie Iles
- */
-/ {
- model = "Picochip picoXcell PC3X2";
- compatible = "picochip,pc3x2";
- #address-cells = <1>;
- #size-cells = <1>;
-
- cpus {
- #address-cells = <0>;
- #size-cells = <0>;
-
- cpu {
- compatible = "arm,arm1176jz-s";
- device_type = "cpu";
- clock-frequency = <400000000>;
- d-cache-line-size = <32>;
- d-cache-size = <32768>;
- i-cache-line-size = <32>;
- i-cache-size = <32768>;
- };
- };
-
- clocks {
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
-
- pclk: clock@0 {
- compatible = "fixed-clock";
- clock-outputs = "bus", "pclk";
- clock-frequency = <200000000>;
- ref-clock = <&ref_clk>, "ref";
- };
- };
-
- paxi {
- compatible = "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0 0x80000000 0x400000>;
-
- emac: gem@30000 {
- compatible = "cadence,gem";
- reg = <0x30000 0x10000>;
- interrupt-parent = <&vic0>;
- interrupts = <31>;
- };
-
- dmac1: dmac@40000 {
- compatible = "snps,dw-dmac";
- reg = <0x40000 0x10000>;
- interrupt-parent = <&vic0>;
- interrupts = <25>;
- };
-
- dmac2: dmac@50000 {
- compatible = "snps,dw-dmac";
- reg = <0x50000 0x10000>;
- interrupt-parent = <&vic0>;
- interrupts = <26>;
- };
-
- vic0: interrupt-controller@60000 {
- compatible = "arm,pl192-vic";
- interrupt-controller;
- reg = <0x60000 0x1000>;
- #interrupt-cells = <1>;
- };
-
- vic1: interrupt-controller@64000 {
- compatible = "arm,pl192-vic";
- interrupt-controller;
- reg = <0x64000 0x1000>;
- #interrupt-cells = <1>;
- };
-
- fuse: picoxcell-fuse@80000 {
- compatible = "picoxcell,fuse-pc3x2";
- reg = <0x80000 0x10000>;
- };
-
- ssi: picoxcell-spi@90000 {
- compatible = "picoxcell,spi";
- reg = <0x90000 0x10000>;
- interrupt-parent = <&vic0>;
- interrupts = <10>;
- };
-
- ipsec: spacc@100000 {
- compatible = "picochip,spacc-ipsec";
- reg = <0x100000 0x10000>;
- interrupt-parent = <&vic0>;
- interrupts = <24>;
- ref-clock = <&pclk>, "ref";
- };
-
- srtp: spacc@140000 {
- compatible = "picochip,spacc-srtp";
- reg = <0x140000 0x10000>;
- interrupt-parent = <&vic0>;
- interrupts = <23>;
- };
-
- l2_engine: spacc@180000 {
- compatible = "picochip,spacc-l2";
- reg = <0x180000 0x10000>;
- interrupt-parent = <&vic0>;
- interrupts = <22>;
- ref-clock = <&pclk>, "ref";
- };
-
- apb {
- compatible = "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0 0x200000 0x80000>;
-
- rtc0: rtc@0 {
- compatible = "picochip,pc3x2-rtc";
- clock-freq = <200000000>;
- reg = <0x00000 0xf>;
- interrupt-parent = <&vic1>;
- interrupts = <8>;
- };
-
- timer0: timer@10000 {
- compatible = "picochip,pc3x2-timer";
- interrupt-parent = <&vic0>;
- interrupts = <4>;
- clock-freq = <200000000>;
- reg = <0x10000 0x14>;
- };
-
- timer1: timer@10014 {
- compatible = "picochip,pc3x2-timer";
- interrupt-parent = <&vic0>;
- interrupts = <5>;
- clock-freq = <200000000>;
- reg = <0x10014 0x14>;
- };
-
- timer2: timer@10028 {
- compatible = "picochip,pc3x2-timer";
- interrupt-parent = <&vic0>;
- interrupts = <6>;
- clock-freq = <200000000>;
- reg = <0x10028 0x14>;
- };
-
- timer3: timer@1003c {
- compatible = "picochip,pc3x2-timer";
- interrupt-parent = <&vic0>;
- interrupts = <7>;
- clock-freq = <200000000>;
- reg = <0x1003c 0x14>;
- };
-
- gpio: gpio@20000 {
- compatible = "snps,dw-apb-gpio";
- reg = <0x20000 0x1000>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- banka: gpio-controller@0 {
- compatible = "snps,dw-apb-gpio-bank";
- gpio-controller;
- #gpio-cells = <2>;
- gpio-generic,nr-gpio = <8>;
-
- regoffset-dat = <0x50>;
- regoffset-set = <0x00>;
- regoffset-dirout = <0x04>;
- };
-
- bankb: gpio-controller@1 {
- compatible = "snps,dw-apb-gpio-bank";
- gpio-controller;
- #gpio-cells = <2>;
- gpio-generic,nr-gpio = <8>;
-
- regoffset-dat = <0x54>;
- regoffset-set = <0x0c>;
- regoffset-dirout = <0x10>;
- };
- };
-
- uart0: uart@30000 {
- compatible = "snps,dw-apb-uart";
- reg = <0x30000 0x1000>;
- interrupt-parent = <&vic1>;
- interrupts = <10>;
- clock-frequency = <3686400>;
- reg-shift = <2>;
- reg-io-width = <4>;
- };
-
- uart1: uart@40000 {
- compatible = "snps,dw-apb-uart";
- reg = <0x40000 0x1000>;
- interrupt-parent = <&vic1>;
- interrupts = <9>;
- clock-frequency = <3686400>;
- reg-shift = <2>;
- reg-io-width = <4>;
- };
-
- wdog: watchdog@50000 {
- compatible = "snps,dw-apb-wdg";
- reg = <0x50000 0x10000>;
- interrupt-parent = <&vic0>;
- interrupts = <11>;
- bus-clock = <&pclk>, "bus";
- };
- };
- };
-
- rwid-axi {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "simple-bus";
- ranges;
-
- ebi@50000000 {
- compatible = "simple-bus";
- #address-cells = <2>;
- #size-cells = <1>;
- ranges = <0 0 0x40000000 0x08000000
- 1 0 0x48000000 0x08000000
- 2 0 0x50000000 0x08000000
- 3 0 0x58000000 0x08000000>;
- };
-
- axi2pico@c0000000 {
- compatible = "picochip,axi2pico-pc3x2";
- reg = <0xc0000000 0x10000>;
- interrupt-parent = <&vic0>;
- interrupts = <13 14 15 16 17 18 19 20 21>;
- };
- };
-};
diff --git a/arch/arm/boot/dts/picoxcell-pc3x3.dtsi b/arch/arm/boot/dts/picoxcell-pc3x3.dtsi
deleted file mode 100644
index 0e85bb6bd150..000000000000
--- a/arch/arm/boot/dts/picoxcell-pc3x3.dtsi
+++ /dev/null
@@ -1,355 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2011 Picochip, Jamie Iles
- */
-/ {
- model = "Picochip picoXcell PC3X3";
- compatible = "picochip,pc3x3";
- #address-cells = <1>;
- #size-cells = <1>;
-
- cpus {
- #address-cells = <0>;
- #size-cells = <0>;
-
- cpu {
- compatible = "arm,arm1176jz-s";
- device_type = "cpu";
- cpu-clock = <&arm_clk>, "cpu";
- d-cache-line-size = <32>;
- d-cache-size = <32768>;
- i-cache-line-size = <32>;
- i-cache-size = <32768>;
- };
- };
-
- clocks {
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
-
- clkgate: clkgate@800a0048 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x800a0048 4>;
- compatible = "picochip,pc3x3-clk-gate";
-
- tzprot_clk: clock@0 {
- compatible = "picochip,pc3x3-gated-clk";
- clock-outputs = "bus";
- picochip,clk-disable-bit = <0>;
- clock-frequency = <200000000>;
- ref-clock = <&ref_clk>, "ref";
- };
-
- spi_clk: clock@1 {
- compatible = "picochip,pc3x3-gated-clk";
- clock-outputs = "bus";
- picochip,clk-disable-bit = <1>;
- clock-frequency = <200000000>;
- ref-clock = <&ref_clk>, "ref";
- };
-
- dmac0_clk: clock@2 {
- compatible = "picochip,pc3x3-gated-clk";
- clock-outputs = "bus";
- picochip,clk-disable-bit = <2>;
- clock-frequency = <200000000>;
- ref-clock = <&ref_clk>, "ref";
- };
-
- dmac1_clk: clock@3 {
- compatible = "picochip,pc3x3-gated-clk";
- clock-outputs = "bus";
- picochip,clk-disable-bit = <3>;
- clock-frequency = <200000000>;
- ref-clock = <&ref_clk>, "ref";
- };
-
- ebi_clk: clock@4 {
- compatible = "picochip,pc3x3-gated-clk";
- clock-outputs = "bus";
- picochip,clk-disable-bit = <4>;
- clock-frequency = <200000000>;
- ref-clock = <&ref_clk>, "ref";
- };
-
- ipsec_clk: clock@5 {
- compatible = "picochip,pc3x3-gated-clk";
- clock-outputs = "bus";
- picochip,clk-disable-bit = <5>;
- clock-frequency = <200000000>;
- ref-clock = <&ref_clk>, "ref";
- };
-
- l2_clk: clock@6 {
- compatible = "picochip,pc3x3-gated-clk";
- clock-outputs = "bus";
- picochip,clk-disable-bit = <6>;
- clock-frequency = <200000000>;
- ref-clock = <&ref_clk>, "ref";
- };
-
- trng_clk: clock@7 {
- compatible = "picochip,pc3x3-gated-clk";
- clock-outputs = "bus";
- picochip,clk-disable-bit = <7>;
- clock-frequency = <200000000>;
- ref-clock = <&ref_clk>, "ref";
- };
-
- fuse_clk: clock@8 {
- compatible = "picochip,pc3x3-gated-clk";
- clock-outputs = "bus";
- picochip,clk-disable-bit = <8>;
- clock-frequency = <200000000>;
- ref-clock = <&ref_clk>, "ref";
- };
-
- otp_clk: clock@9 {
- compatible = "picochip,pc3x3-gated-clk";
- clock-outputs = "bus";
- picochip,clk-disable-bit = <9>;
- clock-frequency = <200000000>;
- ref-clock = <&ref_clk>, "ref";
- };
- };
-
- arm_clk: clock@11 {
- compatible = "picochip,pc3x3-pll";
- reg = <0x800a0050 0x8>;
- picochip,min-freq = <140000000>;
- picochip,max-freq = <700000000>;
- ref-clock = <&ref_clk>, "ref";
- clock-outputs = "cpu";
- };
-
- pclk: clock@12 {
- compatible = "fixed-clock";
- clock-outputs = "bus", "pclk";
- clock-frequency = <200000000>;
- ref-clock = <&ref_clk>, "ref";
- };
- };
-
- paxi {
- compatible = "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0 0x80000000 0x400000>;
-
- emac: gem@30000 {
- compatible = "cadence,gem";
- reg = <0x30000 0x10000>;
- interrupt-parent = <&vic0>;
- interrupts = <31>;
- };
-
- dmac1: dmac@40000 {
- compatible = "snps,dw-dmac";
- reg = <0x40000 0x10000>;
- interrupt-parent = <&vic0>;
- interrupts = <25>;
- };
-
- dmac2: dmac@50000 {
- compatible = "snps,dw-dmac";
- reg = <0x50000 0x10000>;
- interrupt-parent = <&vic0>;
- interrupts = <26>;
- };
-
- vic0: interrupt-controller@60000 {
- compatible = "arm,pl192-vic";
- interrupt-controller;
- reg = <0x60000 0x1000>;
- #interrupt-cells = <1>;
- };
-
- vic1: interrupt-controller@64000 {
- compatible = "arm,pl192-vic";
- interrupt-controller;
- reg = <0x64000 0x1000>;
- #interrupt-cells = <1>;
- };
-
- fuse: picoxcell-fuse@80000 {
- compatible = "picoxcell,fuse-pc3x3";
- reg = <0x80000 0x10000>;
- };
-
- ssi: picoxcell-spi@90000 {
- compatible = "picoxcell,spi";
- reg = <0x90000 0x10000>;
- interrupt-parent = <&vic0>;
- interrupts = <10>;
- };
-
- ipsec: spacc@100000 {
- compatible = "picochip,spacc-ipsec";
- reg = <0x100000 0x10000>;
- interrupt-parent = <&vic0>;
- interrupts = <24>;
- ref-clock = <&ipsec_clk>, "ref";
- };
-
- srtp: spacc@140000 {
- compatible = "picochip,spacc-srtp";
- reg = <0x140000 0x10000>;
- interrupt-parent = <&vic0>;
- interrupts = <23>;
- };
-
- l2_engine: spacc@180000 {
- compatible = "picochip,spacc-l2";
- reg = <0x180000 0x10000>;
- interrupt-parent = <&vic0>;
- interrupts = <22>;
- ref-clock = <&l2_clk>, "ref";
- };
-
- apb {
- compatible = "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0 0x200000 0x80000>;
-
- rtc0: rtc@0 {
- compatible = "picochip,pc3x2-rtc";
- clock-freq = <200000000>;
- reg = <0x00000 0xf>;
- interrupt-parent = <&vic0>;
- interrupts = <8>;
- };
-
- timer0: timer@10000 {
- compatible = "picochip,pc3x2-timer";
- interrupt-parent = <&vic0>;
- interrupts = <4>;
- clock-freq = <200000000>;
- reg = <0x10000 0x14>;
- };
-
- timer1: timer@10014 {
- compatible = "picochip,pc3x2-timer";
- interrupt-parent = <&vic0>;
- interrupts = <5>;
- clock-freq = <200000000>;
- reg = <0x10014 0x14>;
- };
-
- gpio: gpio@20000 {
- compatible = "snps,dw-apb-gpio";
- reg = <0x20000 0x1000>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- banka: gpio-controller@0 {
- compatible = "snps,dw-apb-gpio-bank";
- gpio-controller;
- #gpio-cells = <2>;
- gpio-generic,nr-gpio = <8>;
-
- regoffset-dat = <0x50>;
- regoffset-set = <0x00>;
- regoffset-dirout = <0x04>;
- };
-
- bankb: gpio-controller@1 {
- compatible = "snps,dw-apb-gpio-bank";
- gpio-controller;
- #gpio-cells = <2>;
- gpio-generic,nr-gpio = <16>;
-
- regoffset-dat = <0x54>;
- regoffset-set = <0x0c>;
- regoffset-dirout = <0x10>;
- };
-
- bankd: gpio-controller@2 {
- compatible = "snps,dw-apb-gpio-bank";
- gpio-controller;
- #gpio-cells = <2>;
- gpio-generic,nr-gpio = <30>;
-
- regoffset-dat = <0x5c>;
- regoffset-set = <0x24>;
- regoffset-dirout = <0x28>;
- };
- };
-
- uart0: uart@30000 {
- compatible = "snps,dw-apb-uart";
- reg = <0x30000 0x1000>;
- interrupt-parent = <&vic1>;
- interrupts = <10>;
- clock-frequency = <3686400>;
- reg-shift = <2>;
- reg-io-width = <4>;
- };
-
- uart1: uart@40000 {
- compatible = "snps,dw-apb-uart";
- reg = <0x40000 0x1000>;
- interrupt-parent = <&vic1>;
- interrupts = <9>;
- clock-frequency = <3686400>;
- reg-shift = <2>;
- reg-io-width = <4>;
- };
-
- wdog: watchdog@50000 {
- compatible = "snps,dw-apb-wdg";
- reg = <0x50000 0x10000>;
- interrupt-parent = <&vic0>;
- interrupts = <11>;
- bus-clock = <&pclk>, "bus";
- };
-
- timer2: timer@60000 {
- compatible = "picochip,pc3x2-timer";
- interrupt-parent = <&vic0>;
- interrupts = <6>;
- clock-freq = <200000000>;
- reg = <0x60000 0x14>;
- };
-
- timer3: timer@60014 {
- compatible = "picochip,pc3x2-timer";
- interrupt-parent = <&vic0>;
- interrupts = <7>;
- clock-freq = <200000000>;
- reg = <0x60014 0x14>;
- };
- };
- };
-
- rwid-axi {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "simple-bus";
- ranges;
-
- ebi@50000000 {
- compatible = "simple-bus";
- #address-cells = <2>;
- #size-cells = <1>;
- ranges = <0 0 0x40000000 0x08000000
- 1 0 0x48000000 0x08000000
- 2 0 0x50000000 0x08000000
- 3 0 0x58000000 0x08000000>;
- };
-
- axi2pico@c0000000 {
- compatible = "picochip,axi2pico-pc3x3";
- reg = <0xc0000000 0x10000>;
- interrupt-parent = <&vic0>;
- interrupts = <13 14 15 16 17 18 19 20 21>;
- };
-
- otp@ffff8000 {
- compatible = "picochip,otp-pc3x3";
- reg = <0xffff8000 0x8000>;
- };
- };
-};
diff --git a/arch/arm/boot/dts/picoxcell-pc7302-pc3x2.dts b/arch/arm/boot/dts/picoxcell-pc7302-pc3x2.dts
deleted file mode 100644
index 3626e5380681..000000000000
--- a/arch/arm/boot/dts/picoxcell-pc7302-pc3x2.dts
+++ /dev/null
@@ -1,78 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2011 Picochip, Jamie Iles
- */
-
-/dts-v1/;
-/include/ "picoxcell-pc3x2.dtsi"
-/ {
- model = "Picochip PC7302 (PC3X2)";
- compatible = "picochip,pc7302-pc3x2", "picochip,pc3x2";
-
- memory {
- device_type = "memory";
- reg = <0x0 0x08000000>;
- };
-
- chosen {
- stdout-path = &uart0;
- };
-
- clocks {
- ref_clk: clock@1 {
- compatible = "fixed-clock";
- clock-outputs = "ref";
- clock-frequency = <20000000>;
- };
- };
-
- rwid-axi {
- ebi@50000000 {
- nand: gpio-nand@2,0 {
- compatible = "gpio-control-nand";
- #address-cells = <1>;
- #size-cells = <1>;
- reg = <2 0x0000 0x1000>;
- bus-clock = <&pclk>, "bus";
- gpio-control-nand,io-sync-reg =
- <0x00000000 0x80220000>;
-
- gpios = <&banka 1 0 /* rdy */
- &banka 2 0 /* nce */
- &banka 3 0 /* ale */
- &banka 4 0 /* cle */
- 0 /* nwp */>;
-
- boot@100000 {
- label = "Boot";
- reg = <0x100000 0x80000>;
- };
-
- redundant-boot@200000 {
- label = "Redundant Boot";
- reg = <0x200000 0x80000>;
- };
-
- boot-env@300000 {
- label = "Boot Evironment";
- reg = <0x300000 0x20000>;
- };
-
- redundant-boot-env@320000 {
- label = "Redundant Boot Environment";
- reg = <0x300000 0x20000>;
- };
-
- kernel@380000 {
- label = "Kernel";
- reg = <0x380000 0x800000>;
- };
-
- fs@b80000 {
- label = "File System";
- reg = <0xb80000 0xf480000>;
- };
- };
- };
- };
-};
diff --git a/arch/arm/boot/dts/picoxcell-pc7302-pc3x3.dts b/arch/arm/boot/dts/picoxcell-pc7302-pc3x3.dts
deleted file mode 100644
index 3eca65e8ee09..000000000000
--- a/arch/arm/boot/dts/picoxcell-pc7302-pc3x3.dts
+++ /dev/null
@@ -1,84 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2011 Picochip, Jamie Iles
- */
-
-/dts-v1/;
-/include/ "picoxcell-pc3x3.dtsi"
-/ {
- model = "Picochip PC7302 (PC3X3)";
- compatible = "picochip,pc7302-pc3x3", "picochip,pc3x3";
-
- memory {
- device_type = "memory";
- reg = <0x0 0x08000000>;
- };
-
- chosen {
- stdout-path = &uart0;
- };
-
- clocks {
- ref_clk: clock@10 {
- compatible = "fixed-clock";
- clock-outputs = "ref";
- clock-frequency = <20000000>;
- };
-
- clkgate: clkgate@800a0048 {
- clock@4 {
- picochip,clk-no-disable;
- };
- };
- };
-
- rwid-axi {
- ebi@50000000 {
- nand: gpio-nand@2,0 {
- compatible = "gpio-control-nand";
- #address-cells = <1>;
- #size-cells = <1>;
- reg = <2 0x0000 0x1000>;
- bus-clock = <&ebi_clk>, "bus";
- gpio-control-nand,io-sync-reg =
- <0x00000000 0x80220000>;
-
- gpios = <&banka 1 0 /* rdy */
- &banka 2 0 /* nce */
- &banka 3 0 /* ale */
- &banka 4 0 /* cle */
- 0 /* nwp */>;
-
- boot@100000 {
- label = "Boot";
- reg = <0x100000 0x80000>;
- };
-
- redundant-boot@200000 {
- label = "Redundant Boot";
- reg = <0x200000 0x80000>;
- };
-
- boot-env@300000 {
- label = "Boot Evironment";
- reg = <0x300000 0x20000>;
- };
-
- redundant-boot-env@320000 {
- label = "Redundant Boot Environment";
- reg = <0x300000 0x20000>;
- };
-
- kernel@380000 {
- label = "Kernel";
- reg = <0x380000 0x800000>;
- };
-
- fs@b80000 {
- label = "File System";
- reg = <0xb80000 0xf480000>;
- };
- };
- };
- };
-};
diff --git a/arch/arm/boot/dts/prima2-evb.dts b/arch/arm/boot/dts/prima2-evb.dts
deleted file mode 100644
index 7394f764df65..000000000000
--- a/arch/arm/boot/dts/prima2-evb.dts
+++ /dev/null
@@ -1,37 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * DTS file for CSR SiRFprimaII Evaluation Board
- *
- * Copyright (c) 2012 Cambridge Silicon Radio Limited, a CSR plc group company.
- */
-
-/dts-v1/;
-
-/include/ "prima2.dtsi"
-
-/ {
- model = "CSR SiRFprimaII Evaluation Board";
- compatible = "sirf,prima2", "sirf,prima2-cb";
-
- memory {
- device_type = "memory";
- reg = <0x00000000 0x20000000>;
- };
-
- axi {
- peri-iobg {
- uart@b0060000 {
- pinctrl-names = "default";
- pinctrl-0 = <&uart1_pins_a>;
- };
- spi@b00d0000 {
- pinctrl-names = "default";
- pinctrl-0 = <&spi0_pins_a>;
- };
- spi@b0170000 {
- pinctrl-names = "default";
- pinctrl-0 = <&spi1_pins_a>;
- };
- };
- };
-};
diff --git a/arch/arm/boot/dts/prima2.dtsi b/arch/arm/boot/dts/prima2.dtsi
deleted file mode 100644
index 7d3d93c22ed9..000000000000
--- a/arch/arm/boot/dts/prima2.dtsi
+++ /dev/null
@@ -1,838 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * DTS file for CSR SiRFprimaII SoC
- *
- * Copyright (c) 2012 Cambridge Silicon Radio Limited, a CSR plc group company.
- */
-
-/ {
- compatible = "sirf,prima2";
- #address-cells = <1>;
- #size-cells = <1>;
- interrupt-parent = <&intc>;
-
- cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- cpu@0 {
- compatible = "arm,cortex-a9";
- device_type = "cpu";
- reg = <0x0>;
- d-cache-line-size = <32>;
- i-cache-line-size = <32>;
- d-cache-size = <32768>;
- i-cache-size = <32768>;
- /* from bootloader */
- timebase-frequency = <0>;
- bus-frequency = <0>;
- clock-frequency = <0>;
- clocks = <&clks 12>;
- operating-points = <
- /* kHz uV */
- 200000 1025000
- 400000 1025000
- 664000 1050000
- 800000 1100000
- >;
- clock-latency = <150000>;
- };
- };
-
- arm-pmu {
- compatible = "arm,cortex-a9-pmu";
- interrupts = <29>;
- };
-
- axi {
- compatible = "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x40000000 0x40000000 0x80000000>;
-
- cache-controller@80040000 {
- compatible = "arm,pl310-cache";
- reg = <0x80040000 0x1000>;
- interrupts = <59>;
- arm,tag-latency = <1 1 1>;
- arm,data-latency = <1 1 1>;
- arm,filter-ranges = <0 0x40000000>;
- };
-
- intc: interrupt-controller@80020000 {
- #interrupt-cells = <1>;
- interrupt-controller;
- compatible = "sirf,prima2-intc";
- reg = <0x80020000 0x1000>;
- };
-
- sys-iobg {
- compatible = "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x88000000 0x88000000 0x40000>;
-
- clks: clock-controller@88000000 {
- compatible = "sirf,prima2-clkc";
- reg = <0x88000000 0x1000>;
- interrupts = <3>;
- #clock-cells = <1>;
- };
-
- rstc: reset-controller@88010000 {
- compatible = "sirf,prima2-rstc";
- reg = <0x88010000 0x1000>;
- #reset-cells = <1>;
- };
-
- rsc-controller@88020000 {
- compatible = "sirf,prima2-rsc";
- reg = <0x88020000 0x1000>;
- };
-
- cphifbg@88030000 {
- compatible = "sirf,prima2-cphifbg";
- reg = <0x88030000 0x1000>;
- clocks = <&clks 42>;
- };
- };
-
- mem-iobg {
- compatible = "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x90000000 0x90000000 0x10000>;
-
- memory-controller@90000000 {
- compatible = "sirf,prima2-memc";
- reg = <0x90000000 0x2000>;
- interrupts = <27>;
- clocks = <&clks 5>;
- };
-
- memc-monitor {
- compatible = "sirf,prima2-memcmon";
- reg = <0x90002000 0x200>;
- interrupts = <4>;
- clocks = <&clks 32>;
- };
- };
-
- disp-iobg {
- compatible = "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x90010000 0x90010000 0x30000>;
-
- display@90010000 {
- compatible = "sirf,prima2-lcd";
- reg = <0x90010000 0x20000>;
- interrupts = <30>;
- };
-
- vpp@90020000 {
- compatible = "sirf,prima2-vpp";
- reg = <0x90020000 0x10000>;
- interrupts = <31>;
- clocks = <&clks 35>;
- resets = <&rstc 6>;
- };
- };
-
- graphics-iobg {
- compatible = "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x98000000 0x98000000 0x8000000>;
-
- graphics@98000000 {
- compatible = "powervr,sgx531";
- reg = <0x98000000 0x8000000>;
- interrupts = <6>;
- clocks = <&clks 32>;
- };
- };
-
- multimedia-iobg {
- compatible = "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0xa0000000 0xa0000000 0x8000000>;
-
- multimedia@a0000000 {
- compatible = "sirf,prima2-video-codec";
- reg = <0xa0000000 0x8000000>;
- interrupts = <5>;
- clocks = <&clks 33>;
- };
- };
-
- dsp-iobg {
- compatible = "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0xa8000000 0xa8000000 0x2000000>;
-
- dspif@a8000000 {
- compatible = "sirf,prima2-dspif";
- reg = <0xa8000000 0x10000>;
- interrupts = <9>;
- resets = <&rstc 1>;
- };
-
- gps@a8010000 {
- compatible = "sirf,prima2-gps";
- reg = <0xa8010000 0x10000>;
- interrupts = <7>;
- clocks = <&clks 9>;
- resets = <&rstc 2>;
- };
-
- dsp@a9000000 {
- compatible = "sirf,prima2-dsp";
- reg = <0xa9000000 0x1000000>;
- interrupts = <8>;
- clocks = <&clks 8>;
- resets = <&rstc 0>;
- };
- };
-
- peri-iobg {
- compatible = "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0xb0000000 0xb0000000 0x180000>,
- <0x56000000 0x56000000 0x1b00000>;
-
- timer@b0020000 {
- compatible = "sirf,prima2-tick";
- reg = <0xb0020000 0x1000>;
- interrupts = <0>;
- clocks = <&clks 11>;
- };
-
- nand@b0030000 {
- compatible = "sirf,prima2-nand";
- reg = <0xb0030000 0x10000>;
- interrupts = <41>;
- clocks = <&clks 26>;
- };
-
- audio@b0040000 {
- compatible = "sirf,prima2-audio";
- reg = <0xb0040000 0x10000>;
- interrupts = <35>;
- clocks = <&clks 27>;
- };
-
- uart0: uart@b0050000 {
- cell-index = <0>;
- compatible = "sirf,prima2-uart";
- reg = <0xb0050000 0x1000>;
- interrupts = <17>;
- fifosize = <128>;
- clocks = <&clks 13>;
- dmas = <&dmac1 5>, <&dmac0 2>;
- dma-names = "rx", "tx";
- };
-
- uart1: uart@b0060000 {
- cell-index = <1>;
- compatible = "sirf,prima2-uart";
- reg = <0xb0060000 0x1000>;
- interrupts = <18>;
- fifosize = <32>;
- clocks = <&clks 14>;
- };
-
- uart2: uart@b0070000 {
- cell-index = <2>;
- compatible = "sirf,prima2-uart";
- reg = <0xb0070000 0x1000>;
- interrupts = <19>;
- fifosize = <128>;
- clocks = <&clks 15>;
- dmas = <&dmac0 6>, <&dmac0 7>;
- dma-names = "rx", "tx";
- };
-
- usp0: usp@b0080000 {
- cell-index = <0>;
- compatible = "sirf,prima2-usp";
- reg = <0xb0080000 0x10000>;
- interrupts = <20>;
- fifosize = <128>;
- clocks = <&clks 28>;
- dmas = <&dmac1 1>, <&dmac1 2>;
- dma-names = "rx", "tx";
- };
-
- usp1: usp@b0090000 {
- cell-index = <1>;
- compatible = "sirf,prima2-usp";
- reg = <0xb0090000 0x10000>;
- interrupts = <21>;
- fifosize = <128>;
- clocks = <&clks 29>;
- dmas = <&dmac0 14>, <&dmac0 15>;
- dma-names = "rx", "tx";
- };
-
- usp2: usp@b00a0000 {
- cell-index = <2>;
- compatible = "sirf,prima2-usp";
- reg = <0xb00a0000 0x10000>;
- interrupts = <22>;
- fifosize = <128>;
- clocks = <&clks 30>;
- dmas = <&dmac0 10>, <&dmac0 11>;
- dma-names = "rx", "tx";
- };
-
- dmac0: dma-controller@b00b0000 {
- cell-index = <0>;
- compatible = "sirf,prima2-dmac";
- reg = <0xb00b0000 0x10000>;
- interrupts = <12>;
- clocks = <&clks 24>;
- #dma-cells = <1>;
- };
-
- dmac1: dma-controller@b0160000 {
- cell-index = <1>;
- compatible = "sirf,prima2-dmac";
- reg = <0xb0160000 0x10000>;
- interrupts = <13>;
- clocks = <&clks 25>;
- #dma-cells = <1>;
- };
-
- vip@b00C0000 {
- compatible = "sirf,prima2-vip";
- reg = <0xb00C0000 0x10000>;
- clocks = <&clks 31>;
- interrupts = <14>;
- sirf,vip-dma-rx-channel = <16>;
- };
-
- spi0: spi@b00d0000 {
- cell-index = <0>;
- compatible = "sirf,prima2-spi";
- reg = <0xb00d0000 0x10000>;
- interrupts = <15>;
- sirf,spi-num-chipselects = <1>;
- dmas = <&dmac1 9>,
- <&dmac1 4>;
- dma-names = "rx", "tx";
- #address-cells = <1>;
- #size-cells = <0>;
- clocks = <&clks 19>;
- status = "disabled";
- };
-
- spi1: spi@b0170000 {
- cell-index = <1>;
- compatible = "sirf,prima2-spi";
- reg = <0xb0170000 0x10000>;
- interrupts = <16>;
- sirf,spi-num-chipselects = <1>;
- dmas = <&dmac0 12>,
- <&dmac0 13>;
- dma-names = "rx", "tx";
- #address-cells = <1>;
- #size-cells = <0>;
- clocks = <&clks 20>;
- status = "disabled";
- };
-
- i2c0: i2c@b00e0000 {
- cell-index = <0>;
- compatible = "sirf,prima2-i2c";
- reg = <0xb00e0000 0x10000>;
- interrupts = <24>;
- clocks = <&clks 17>;
- #address-cells = <1>;
- #size-cells = <0>;
- };
-
- i2c1: i2c@b00f0000 {
- cell-index = <1>;
- compatible = "sirf,prima2-i2c";
- reg = <0xb00f0000 0x10000>;
- interrupts = <25>;
- clocks = <&clks 18>;
- #address-cells = <1>;
- #size-cells = <0>;
- };
-
- tsc@b0110000 {
- compatible = "sirf,prima2-tsc";
- reg = <0xb0110000 0x10000>;
- interrupts = <33>;
- clocks = <&clks 16>;
- };
-
- gpio: pinctrl@b0120000 {
- #gpio-cells = <2>;
- #interrupt-cells = <2>;
- compatible = "sirf,prima2-pinctrl";
- reg = <0xb0120000 0x10000>;
- interrupts = <43 44 45 46 47>;
- gpio-controller;
- interrupt-controller;
-
- lcd_16pins_a: lcd0@0 {
- lcd {
- sirf,pins = "lcd_16bitsgrp";
- sirf,function = "lcd_16bits";
- };
- };
- lcd_18pins_a: lcd0@1 {
- lcd {
- sirf,pins = "lcd_18bitsgrp";
- sirf,function = "lcd_18bits";
- };
- };
- lcd_24pins_a: lcd0@2 {
- lcd {
- sirf,pins = "lcd_24bitsgrp";
- sirf,function = "lcd_24bits";
- };
- };
- lcdrom_pins_a: lcdrom0@0 {
- lcd {
- sirf,pins = "lcdromgrp";
- sirf,function = "lcdrom";
- };
- };
- uart0_pins_a: uart0@0 {
- uart {
- sirf,pins = "uart0grp";
- sirf,function = "uart0";
- };
- };
- uart0_noflow_pins_a: uart0@1 {
- uart {
- sirf,pins = "uart0_nostreamctrlgrp";
- sirf,function = "uart0_nostreamctrl";
- };
- };
- uart1_pins_a: uart1@0 {
- uart {
- sirf,pins = "uart1grp";
- sirf,function = "uart1";
- };
- };
- uart2_pins_a: uart2@0 {
- uart {
- sirf,pins = "uart2grp";
- sirf,function = "uart2";
- };
- };
- uart2_noflow_pins_a: uart2@1 {
- uart {
- sirf,pins = "uart2_nostreamctrlgrp";
- sirf,function = "uart2_nostreamctrl";
- };
- };
- spi0_pins_a: spi0@0 {
- spi {
- sirf,pins = "spi0grp";
- sirf,function = "spi0";
- };
- };
- spi1_pins_a: spi1@0 {
- spi {
- sirf,pins = "spi1grp";
- sirf,function = "spi1";
- };
- };
- i2c0_pins_a: i2c0@0 {
- i2c {
- sirf,pins = "i2c0grp";
- sirf,function = "i2c0";
- };
- };
- i2c1_pins_a: i2c1@0 {
- i2c {
- sirf,pins = "i2c1grp";
- sirf,function = "i2c1";
- };
- };
- pwm0_pins_a: pwm0@0 {
- pwm {
- sirf,pins = "pwm0grp";
- sirf,function = "pwm0";
- };
- };
- pwm1_pins_a: pwm1@0 {
- pwm {
- sirf,pins = "pwm1grp";
- sirf,function = "pwm1";
- };
- };
- pwm2_pins_a: pwm2@0 {
- pwm {
- sirf,pins = "pwm2grp";
- sirf,function = "pwm2";
- };
- };
- pwm3_pins_a: pwm3@0 {
- pwm {
- sirf,pins = "pwm3grp";
- sirf,function = "pwm3";
- };
- };
- gps_pins_a: gps@0 {
- gps {
- sirf,pins = "gpsgrp";
- sirf,function = "gps";
- };
- };
- vip_pins_a: vip@0 {
- vip {
- sirf,pins = "vipgrp";
- sirf,function = "vip";
- };
- };
- sdmmc0_pins_a: sdmmc0@0 {
- sdmmc0 {
- sirf,pins = "sdmmc0grp";
- sirf,function = "sdmmc0";
- };
- };
- sdmmc1_pins_a: sdmmc1@0 {
- sdmmc1 {
- sirf,pins = "sdmmc1grp";
- sirf,function = "sdmmc1";
- };
- };
- sdmmc2_pins_a: sdmmc2@0 {
- sdmmc2 {
- sirf,pins = "sdmmc2grp";
- sirf,function = "sdmmc2";
- };
- };
- sdmmc3_pins_a: sdmmc3@0 {
- sdmmc3 {
- sirf,pins = "sdmmc3grp";
- sirf,function = "sdmmc3";
- };
- };
- sdmmc4_pins_a: sdmmc4@0 {
- sdmmc4 {
- sirf,pins = "sdmmc4grp";
- sirf,function = "sdmmc4";
- };
- };
- sdmmc5_pins_a: sdmmc5@0 {
- sdmmc5 {
- sirf,pins = "sdmmc5grp";
- sirf,function = "sdmmc5";
- };
- };
- i2s_mclk_pins_a: i2s_mclk@0 {
- i2s_mclk {
- sirf,pins = "i2smclkgrp";
- sirf,function = "i2s_mclk";
- };
- };
- i2s_ext_clk_input_pins_a: i2s_ext_clk_input@0 {
- i2s_ext_clk_input {
- sirf,pins = "i2s_ext_clk_inputgrp";
- sirf,function = "i2s_ext_clk_input";
- };
- };
- i2s_pins_a: i2s@0 {
- i2s {
- sirf,pins = "i2sgrp";
- sirf,function = "i2s";
- };
- };
- i2s_no_din_pins_a: i2s_no_din@0 {
- i2s_no_din {
- sirf,pins = "i2s_no_dingrp";
- sirf,function = "i2s_no_din";
- };
- };
- i2s_6chn_pins_a: i2s_6chn@0 {
- i2s_6chn {
- sirf,pins = "i2s_6chngrp";
- sirf,function = "i2s_6chn";
- };
- };
- ac97_pins_a: ac97@0 {
- ac97 {
- sirf,pins = "ac97grp";
- sirf,function = "ac97";
- };
- };
- nand_pins_a: nand@0 {
- nand {
- sirf,pins = "nandgrp";
- sirf,function = "nand";
- };
- };
- usp0_pins_a: usp0@0 {
- usp0 {
- sirf,pins = "usp0grp";
- sirf,function = "usp0";
- };
- };
- usp0_uart_nostreamctrl_pins_a: usp0@1 {
- usp0 {
- sirf,pins =
- "usp0_uart_nostreamctrl_grp";
- sirf,function =
- "usp0_uart_nostreamctrl";
- };
- };
- usp0_only_utfs_pins_a: usp0@2 {
- usp0 {
- sirf,pins = "usp0_only_utfs_grp";
- sirf,function = "usp0_only_utfs";
- };
- };
- usp0_only_urfs_pins_a: usp0@3 {
- usp0 {
- sirf,pins = "usp0_only_urfs_grp";
- sirf,function = "usp0_only_urfs";
- };
- };
- usp1_pins_a: usp1@0 {
- usp1 {
- sirf,pins = "usp1grp";
- sirf,function = "usp1";
- };
- };
- usp1_uart_nostreamctrl_pins_a: usp1@1 {
- usp1 {
- sirf,pins =
- "usp1_uart_nostreamctrl_grp";
- sirf,function =
- "usp1_uart_nostreamctrl";
- };
- };
- usp2_pins_a: usp2@0 {
- usp2 {
- sirf,pins = "usp2grp";
- sirf,function = "usp2";
- };
- };
- usp2_uart_nostreamctrl_pins_a: usp2@1 {
- usp2 {
- sirf,pins =
- "usp2_uart_nostreamctrl_grp";
- sirf,function =
- "usp2_uart_nostreamctrl";
- };
- };
- usb0_utmi_drvbus_pins_a: usb0_utmi_drvbus@0 {
- usb0_utmi_drvbus {
- sirf,pins = "usb0_utmi_drvbusgrp";
- sirf,function = "usb0_utmi_drvbus";
- };
- };
- usb1_utmi_drvbus_pins_a: usb1_utmi_drvbus@0 {
- usb1_utmi_drvbus {
- sirf,pins = "usb1_utmi_drvbusgrp";
- sirf,function = "usb1_utmi_drvbus";
- };
- };
- usb1_dp_dn_pins_a: usb1_dp_dn@0 {
- usb1_dp_dn {
- sirf,pins = "usb1_dp_dngrp";
- sirf,function = "usb1_dp_dn";
- };
- };
- uart1_route_io_usb1_pins_a: uart1_route_io_usb1@0 {
- uart1_route_io_usb1 {
- sirf,pins = "uart1_route_io_usb1grp";
- sirf,function = "uart1_route_io_usb1";
- };
- };
- warm_rst_pins_a: warm_rst@0 {
- warm_rst {
- sirf,pins = "warm_rstgrp";
- sirf,function = "warm_rst";
- };
- };
- pulse_count_pins_a: pulse_count@0 {
- pulse_count {
- sirf,pins = "pulse_countgrp";
- sirf,function = "pulse_count";
- };
- };
- cko0_pins_a: cko0@0 {
- cko0 {
- sirf,pins = "cko0grp";
- sirf,function = "cko0";
- };
- };
- cko1_pins_a: cko1@0 {
- cko1 {
- sirf,pins = "cko1grp";
- sirf,function = "cko1";
- };
- };
- };
-
- pwm@b0130000 {
- compatible = "sirf,prima2-pwm";
- reg = <0xb0130000 0x10000>;
- clocks = <&clks 21>;
- };
-
- efusesys@b0140000 {
- compatible = "sirf,prima2-efuse";
- reg = <0xb0140000 0x10000>;
- clocks = <&clks 22>;
- };
-
- pulsec@b0150000 {
- compatible = "sirf,prima2-pulsec";
- reg = <0xb0150000 0x10000>;
- interrupts = <48>;
- clocks = <&clks 23>;
- };
-
- pci-iobg {
- compatible = "sirf,prima2-pciiobg", "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x56000000 0x56000000 0x1b00000>;
-
- sd0: sdhci@56000000 {
- cell-index = <0>;
- compatible = "sirf,prima2-sdhc";
- reg = <0x56000000 0x100000>;
- interrupts = <38>;
- status = "disabled";
- bus-width = <8>;
- clocks = <&clks 36>;
- };
-
- sd1: sdhci@56100000 {
- cell-index = <1>;
- compatible = "sirf,prima2-sdhc";
- reg = <0x56100000 0x100000>;
- interrupts = <38>;
- status = "disabled";
- bus-width = <4>;
- clocks = <&clks 36>;
- };
-
- sd2: sdhci@56200000 {
- cell-index = <2>;
- compatible = "sirf,prima2-sdhc";
- reg = <0x56200000 0x100000>;
- interrupts = <23>;
- status = "disabled";
- clocks = <&clks 37>;
- };
-
- sd3: sdhci@56300000 {
- cell-index = <3>;
- compatible = "sirf,prima2-sdhc";
- reg = <0x56300000 0x100000>;
- interrupts = <23>;
- status = "disabled";
- clocks = <&clks 37>;
- };
-
- sd4: sdhci@56400000 {
- cell-index = <4>;
- compatible = "sirf,prima2-sdhc";
- reg = <0x56400000 0x100000>;
- interrupts = <39>;
- status = "disabled";
- clocks = <&clks 38>;
- };
-
- sd5: sdhci@56500000 {
- cell-index = <5>;
- compatible = "sirf,prima2-sdhc";
- reg = <0x56500000 0x100000>;
- interrupts = <39>;
- clocks = <&clks 38>;
- };
-
- pci-copy@57900000 {
- compatible = "sirf,prima2-pcicp";
- reg = <0x57900000 0x100000>;
- interrupts = <40>;
- };
-
- rom-interface@57a00000 {
- compatible = "sirf,prima2-romif";
- reg = <0x57a00000 0x100000>;
- };
- };
- };
-
- rtc-iobg {
- compatible = "sirf,prima2-rtciobg", "sirf-prima2-rtciobg-bus", "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- reg = <0x80030000 0x10000>;
-
- gpsrtc@1000 {
- compatible = "sirf,prima2-gpsrtc";
- reg = <0x1000 0x1000>;
- interrupts = <55 56 57>;
- };
-
- sysrtc@2000 {
- compatible = "sirf,prima2-sysrtc";
- reg = <0x2000 0x1000>;
- interrupts = <52 53 54>;
- };
-
- minigpsrtc@2000 {
- compatible = "sirf,prima2-minigpsrtc";
- reg = <0x2000 0x1000>;
- interrupts = <54>;
- };
-
- pwrc@3000 {
- compatible = "sirf,prima2-pwrc";
- reg = <0x3000 0x1000>;
- interrupts = <32>;
- };
- };
-
- uus-iobg {
- compatible = "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0xb8000000 0xb8000000 0x40000>;
-
- usb0: usb@b00e0000 {
- compatible = "chipidea,ci13611a-prima2";
- reg = <0xb8000000 0x10000>;
- interrupts = <10>;
- clocks = <&clks 40>;
- };
-
- usb1: usb@b00f0000 {
- compatible = "chipidea,ci13611a-prima2";
- reg = <0xb8010000 0x10000>;
- interrupts = <11>;
- clocks = <&clks 41>;
- };
-
- sata@b00f0000 {
- compatible = "synopsys,dwc-ahsata";
- reg = <0xb8020000 0x10000>;
- interrupts = <37>;
- };
-
- security@b00f0000 {
- compatible = "sirf,prima2-security";
- reg = <0xb8030000 0x10000>;
- interrupts = <42>;
- clocks = <&clks 7>;
- };
- };
- };
-};
diff --git a/arch/arm/boot/dts/qcom-apq8060-dragonboard.dts b/arch/arm/boot/dts/qcom-apq8060-dragonboard.dts
index 4e6c50d45cb2..dace8ffeb991 100644
--- a/arch/arm/boot/dts/qcom-apq8060-dragonboard.dts
+++ b/arch/arm/boot/dts/qcom-apq8060-dragonboard.dts
@@ -461,11 +461,11 @@
};
gsbi@19800000 {
- status = "ok";
+ status = "okay";
qcom,mode = <GSBI_PROT_I2C>;
i2c@19880000 {
- status = "ok";
+ status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&dragon_gsbi8_i2c_pins>;
@@ -497,17 +497,17 @@
};
gsbi@19c00000 {
- status = "ok";
+ status = "okay";
qcom,mode = <GSBI_PROT_I2C_UART>;
serial@19c40000 {
- status = "ok";
+ status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&dragon_gsbi12_serial_pins>;
};
i2c@19c80000 {
- status = "ok";
+ status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&dragon_gsbi12_i2c_pins>;
@@ -571,7 +571,7 @@
external-bus@1a100000 {
/* The EBI2 will instantiate first, then populate its children */
- status = "ok";
+ status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&dragon_ebi2_pins>;
diff --git a/arch/arm/boot/dts/qcom-apq8064-asus-nexus7-flo.dts b/arch/arm/boot/dts/qcom-apq8064-asus-nexus7-flo.dts
index a701d4bac320..3bce47d16ab3 100644
--- a/arch/arm/boot/dts/qcom-apq8064-asus-nexus7-flo.dts
+++ b/arch/arm/boot/dts/qcom-apq8064-asus-nexus7-flo.dts
@@ -302,11 +302,11 @@
};
gsbi@16500000 {
- status = "ok";
+ status = "okay";
qcom,mode = <GSBI_PROT_I2C_UART>;
serial@16540000 {
- status = "ok";
+ status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&gsbi6_uart_4pins>;
@@ -314,10 +314,10 @@
};
gsbi@16600000 {
- status = "ok";
+ status = "okay";
qcom,mode = <GSBI_PROT_I2C_UART>;
serial@16640000 {
- status = "ok";
+ status = "okay";
};
};
diff --git a/arch/arm/boot/dts/qcom-apq8064-cm-qs600.dts b/arch/arm/boot/dts/qcom-apq8064-cm-qs600.dts
index 209eb21cea00..0148148a8e0a 100644
--- a/arch/arm/boot/dts/qcom-apq8064-cm-qs600.dts
+++ b/arch/arm/boot/dts/qcom-apq8064-cm-qs600.dts
@@ -141,10 +141,10 @@
};
gsbi@16600000 {
- status = "ok";
+ status = "okay";
qcom,mode = <GSBI_PROT_I2C_UART>;
serial@16640000 {
- status = "ok";
+ status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&gsbi7_uart_2pins>;
};
@@ -152,7 +152,7 @@
/* OTG */
usb@12500000 {
- status = "ok";
+ status = "okay";
dr_mode = "otg";
ulpi {
phy {
@@ -209,7 +209,7 @@
};
pci@1b500000 {
- status = "ok";
+ status = "okay";
vdda-supply = <&pm8921_s3>;
vdda_phy-supply = <&pm8921_lvs6>;
vdda_refclk-supply = <&v3p3_fixed>;
diff --git a/arch/arm/boot/dts/qcom-apq8064-ifc6410.dts b/arch/arm/boot/dts/qcom-apq8064-ifc6410.dts
index 83aaf4a74398..d0a17b5a5fa3 100644
--- a/arch/arm/boot/dts/qcom-apq8064-ifc6410.dts
+++ b/arch/arm/boot/dts/qcom-apq8064-ifc6410.dts
@@ -215,21 +215,21 @@
};
gsbi@16500000 {
- status = "ok";
+ status = "okay";
qcom,mode = <GSBI_PROT_UART_W_FC>;
serial@16540000 {
- status = "ok";
+ status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&gsbi6_uart_4pins>;
};
};
gsbi@16600000 {
- status = "ok";
+ status = "okay";
qcom,mode = <GSBI_PROT_I2C_UART>;
serial@16640000 {
- status = "ok";
+ status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&gsbi7_uart_2pins>;
};
@@ -279,7 +279,7 @@
};
pci@1b500000 {
- status = "ok";
+ status = "okay";
vdda-supply = <&pm8921_s3>;
vdda_phy-supply = <&pm8921_lvs6>;
vdda_refclk-supply = <&ext_3p3v>;
diff --git a/arch/arm/boot/dts/qcom-apq8064-sony-xperia-yuga.dts b/arch/arm/boot/dts/qcom-apq8064-sony-xperia-yuga.dts
index 8bf488fb86ad..72e47bdc5c12 100644
--- a/arch/arm/boot/dts/qcom-apq8064-sony-xperia-yuga.dts
+++ b/arch/arm/boot/dts/qcom-apq8064-sony-xperia-yuga.dts
@@ -362,11 +362,11 @@
};
gsbi@1a200000 {
- status = "ok";
+ status = "okay";
qcom,mode = <GSBI_PROT_I2C_UART>;
serial@1a240000 {
- status = "ok";
+ status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&gsbi5_uart_pin_a>;
diff --git a/arch/arm/boot/dts/qcom-apq8074-dragonboard.dts b/arch/arm/boot/dts/qcom-apq8074-dragonboard.dts
index 244f04e19c9d..83793b835d40 100644
--- a/arch/arm/boot/dts/qcom-apq8074-dragonboard.dts
+++ b/arch/arm/boot/dts/qcom-apq8074-dragonboard.dts
@@ -19,13 +19,13 @@
soc {
serial@f991e000 {
- status = "ok";
+ status = "okay";
};
sdhci@f9824900 {
bus-width = <8>;
non-removable;
- status = "ok";
+ status = "okay";
vmmc-supply = <&pm8941_l20>;
vqmmc-supply = <&pm8941_s3>;
@@ -39,14 +39,14 @@
pinctrl-names = "default";
pinctrl-0 = <&sdhc2_pin_a>, <&sdhc2_cd_pin_a>;
bus-width = <4>;
- status = "ok";
+ status = "okay";
vmmc-supply = <&pm8941_l21>;
vqmmc-supply = <&pm8941_l13>;
};
usb@f9a55000 {
- status = "ok";
+ status = "okay";
phys = <&usb_hs2_phy>;
phy-select = <&tcsr 0xb000 1>;
extcon = <&smbb>, <&usb_id>;
@@ -56,7 +56,7 @@
adp-disable;
ulpi {
phy@b {
- status = "ok";
+ status = "okay";
v3p3-supply = <&pm8941_l24>;
v1p8-supply = <&pm8941_l6>;
extcon = <&smbb>;
diff --git a/arch/arm/boot/dts/qcom-ipq4018-ap120c-ac-bit.dts b/arch/arm/boot/dts/qcom-ipq4018-ap120c-ac-bit.dts
new file mode 100644
index 000000000000..028ac8e24797
--- /dev/null
+++ b/arch/arm/boot/dts/qcom-ipq4018-ap120c-ac-bit.dts
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+
+#include "qcom-ipq4018-ap120c-ac.dtsi"
+
+/ {
+ model = "ALFA Network AP120C-AC Bit";
+
+ leds {
+ compatible = "gpio-leds";
+
+ power {
+ label = "ap120c-ac:green:power";
+ gpios = <&tlmm 5 GPIO_ACTIVE_LOW>;
+ default-state = "on";
+ };
+
+ wlan {
+ label = "ap120c-ac:green:wlan";
+ gpios = <&tlmm 3 GPIO_ACTIVE_HIGH>;
+ };
+
+ support {
+ label = "ap120c-ac:green:support";
+ gpios = <&tlmm 2 GPIO_ACTIVE_HIGH>;
+ panic-indicator;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/qcom-ipq4018-ap120c-ac.dts b/arch/arm/boot/dts/qcom-ipq4018-ap120c-ac.dts
new file mode 100644
index 000000000000..b7916fc26d68
--- /dev/null
+++ b/arch/arm/boot/dts/qcom-ipq4018-ap120c-ac.dts
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+
+#include "qcom-ipq4018-ap120c-ac.dtsi"
+
+/ {
+ leds {
+ compatible = "gpio-leds";
+
+ status: status {
+ label = "ap120c-ac:blue:status";
+ gpios = <&tlmm 5 GPIO_ACTIVE_LOW>;
+ default-state = "keep";
+ };
+
+ wlan2g {
+ label = "ap120c-ac:green:wlan2g";
+ gpios = <&tlmm 3 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "phy0tpt";
+ };
+
+ wlan5g {
+ label = "ap120c-ac:red:wlan5g";
+ gpios = <&tlmm 2 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "phy1tpt";
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/qcom-ipq4018-ap120c-ac.dtsi b/arch/arm/boot/dts/qcom-ipq4018-ap120c-ac.dtsi
new file mode 100644
index 000000000000..1f3b1ce82108
--- /dev/null
+++ b/arch/arm/boot/dts/qcom-ipq4018-ap120c-ac.dtsi
@@ -0,0 +1,254 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+
+#include "qcom-ipq4019.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+
+/ {
+ model = "ALFA Network AP120C-AC";
+ compatible = "alfa-network,ap120c-ac";
+
+ keys {
+ compatible = "gpio-keys";
+
+ reset {
+ label = "reset";
+ gpios = <&tlmm 63 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_RESTART>;
+ };
+ };
+};
+
+&tlmm {
+ i2c0_pins: i2c0_pinmux {
+ mux_i2c {
+ function = "blsp_i2c0";
+ pins = "gpio58", "gpio59";
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+
+ mdio_pins: mdio_pinmux {
+ mux_mdio {
+ pins = "gpio53";
+ function = "mdio";
+ bias-pull-up;
+ };
+
+ mux_mdc {
+ pins = "gpio52";
+ function = "mdc";
+ bias-pull-up;
+ };
+ };
+
+ serial0_pins: serial0_pinmux {
+ mux_uart {
+ pins = "gpio60", "gpio61";
+ function = "blsp_uart0";
+ bias-disable;
+ };
+ };
+
+ spi0_pins: spi0_pinmux {
+ mux_spi {
+ function = "blsp_spi0";
+ pins = "gpio55", "gpio56", "gpio57";
+ drive-strength = <12>;
+ bias-disable;
+ };
+
+ mux_cs {
+ function = "gpio";
+ pins = "gpio54", "gpio4";
+ drive-strength = <2>;
+ bias-disable;
+ output-high;
+ };
+ };
+
+ usb-power {
+ line-name = "USB-power";
+ gpios = <1 GPIO_ACTIVE_HIGH>;
+ gpio-hog;
+ output-high;
+ };
+};
+
+&watchdog {
+ status = "okay";
+};
+
+&prng {
+ status = "okay";
+};
+
+&blsp_dma {
+ status = "okay";
+};
+
+&blsp1_i2c3 {
+ status = "okay";
+
+ pinctrl-0 = <&i2c0_pins>;
+ pinctrl-names = "default";
+
+ tpm@29 {
+ compatible = "atmel,at97sc3204t";
+ reg = <0x29>;
+ };
+};
+
+&blsp1_spi1 {
+ status = "okay";
+
+ pinctrl-0 = <&spi0_pins>;
+ pinctrl-names = "default";
+ cs-gpios = <&tlmm 54 GPIO_ACTIVE_HIGH>, <&tlmm 4 GPIO_ACTIVE_HIGH>;
+
+ flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ spi-max-frequency = <24000000>;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "SBL1";
+ reg = <0x00000000 0x00040000>;
+ read-only;
+ };
+
+ partition@40000 {
+ label = "MIBIB";
+ reg = <0x00040000 0x00020000>;
+ read-only;
+ };
+
+ partition@60000 {
+ label = "QSEE";
+ reg = <0x00060000 0x00060000>;
+ read-only;
+ };
+
+ partition@c0000 {
+ label = "CDT";
+ reg = <0x000c0000 0x00010000>;
+ read-only;
+ };
+
+ partition@d0000 {
+ label = "DDRPARAMS";
+ reg = <0x000d0000 0x00010000>;
+ read-only;
+ };
+
+ partition@e0000 {
+ label = "u-boot-env";
+ reg = <0x000e0000 0x00010000>;
+ };
+
+ partition@f0000 {
+ label = "u-boot";
+ reg = <0x000f0000 0x00080000>;
+ read-only;
+ };
+
+ partition@170000 {
+ label = "ART";
+ reg = <0x00170000 0x00010000>;
+ read-only;
+ };
+
+ partition@180000 {
+ label = "priv_data1";
+ reg = <0x00180000 0x00010000>;
+ read-only;
+ };
+
+ partition@190000 {
+ label = "priv_data2";
+ reg = <0x00190000 0x00010000>;
+ read-only;
+ };
+ };
+ };
+
+ nand@1 {
+ compatible = "spi-nand";
+ reg = <1>;
+ spi-max-frequency = <40000000>;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "ubi1";
+ reg = <0x00000000 0x04000000>;
+ };
+
+ partition@4000000 {
+ label = "ubi2";
+ reg = <0x04000000 0x04000000>;
+ };
+ };
+ };
+};
+
+&blsp1_uart1 {
+ status = "okay";
+
+ pinctrl-0 = <&serial0_pins>;
+ pinctrl-names = "default";
+};
+
+&cryptobam {
+ status = "okay";
+};
+
+&crypto {
+ status = "okay";
+};
+
+&mdio {
+ status = "okay";
+
+ pinctrl-0 = <&mdio_pins>;
+ pinctrl-names = "default";
+};
+
+&wifi0 {
+ status = "okay";
+};
+
+&wifi1 {
+ status = "okay";
+ qcom,ath10k-calibration-variant = "ALFA-Network-AP120C-AC";
+};
+
+&usb3_hs_phy {
+ status = "okay";
+};
+
+&usb3 {
+ status = "okay";
+
+ dwc3@8a00000 {
+ phys = <&usb3_hs_phy>;
+ phy-names = "usb2-phy";
+ };
+};
+
+&usb2_hs_phy {
+ status = "okay";
+};
+
+&usb2 {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/qcom-ipq4018-jalapeno.dts b/arch/arm/boot/dts/qcom-ipq4018-jalapeno.dts
new file mode 100644
index 000000000000..394412619894
--- /dev/null
+++ b/arch/arm/boot/dts/qcom-ipq4018-jalapeno.dts
@@ -0,0 +1,214 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+// Copyright (c) 2018, Robert Marko <robimarko@gmail.com>
+
+#include "qcom-ipq4019.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+
+/ {
+ model = "8devices Jalapeno";
+ compatible = "8dev,jalapeno";
+};
+
+&tlmm {
+ mdio_pins: mdio_pinmux {
+ pinmux_1 {
+ pins = "gpio53";
+ function = "mdio";
+ };
+
+ pinmux_2 {
+ pins = "gpio52";
+ function = "mdc";
+ };
+
+ pinconf {
+ pins = "gpio52", "gpio53";
+ bias-pull-up;
+ };
+ };
+
+ serial_pins: serial_pinmux {
+ mux {
+ pins = "gpio60", "gpio61";
+ function = "blsp_uart0";
+ bias-disable;
+ };
+ };
+
+ spi_0_pins: spi_0_pinmux {
+ pin {
+ function = "blsp_spi0";
+ pins = "gpio55", "gpio56", "gpio57";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ pin_cs {
+ function = "gpio";
+ pins = "gpio54", "gpio59";
+ drive-strength = <2>;
+ bias-disable;
+ output-high;
+ };
+ };
+};
+
+&watchdog {
+ status = "okay";
+};
+
+&prng {
+ status = "okay";
+};
+
+&blsp_dma {
+ status = "okay";
+};
+
+&blsp1_spi1 {
+ status = "okay";
+
+ pinctrl-0 = <&spi_0_pins>;
+ pinctrl-names = "default";
+ cs-gpios = <&tlmm 54 GPIO_ACTIVE_HIGH>, <&tlmm 59 GPIO_ACTIVE_HIGH>;
+
+ flash@0 {
+ status = "okay";
+
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ spi-max-frequency = <24000000>;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "SBL1";
+ reg = <0x00000000 0x00040000>;
+ read-only;
+ };
+
+ partition@40000 {
+ label = "MIBIB";
+ reg = <0x00040000 0x00020000>;
+ read-only;
+ };
+
+ partition@60000 {
+ label = "QSEE";
+ reg = <0x00060000 0x00060000>;
+ read-only;
+ };
+
+ partition@c0000 {
+ label = "CDT";
+ reg = <0x000c0000 0x00010000>;
+ read-only;
+ };
+
+ partition@d0000 {
+ label = "DDRPARAMS";
+ reg = <0x000d0000 0x00010000>;
+ read-only;
+ };
+
+ partition@e0000 {
+ label = "u-boot-env";
+ reg = <0x000e0000 0x00010000>;
+ };
+
+ partition@f0000 {
+ label = "u-boot";
+ reg = <0x000f0000 0x00080000>;
+ read-only;
+ };
+
+ partition@170000 {
+ label = "ART";
+ reg = <0x00170000 0x00010000>;
+ read-only;
+ };
+ };
+ };
+
+ spi-nand@1 {
+ status = "okay";
+
+ compatible = "spi-nand";
+ reg = <1>;
+ spi-max-frequency = <24000000>;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "ubi1";
+ reg = <0x00000000 0x04000000>;
+ };
+
+ partition@4000000 {
+ label = "ubi2";
+ reg = <0x04000000 0x04000000>;
+ };
+ };
+ };
+};
+
+&blsp1_uart1 {
+ status = "okay";
+
+ pinctrl-0 = <&serial_pins>;
+ pinctrl-names = "default";
+};
+
+&cryptobam {
+ status = "okay";
+};
+
+&crypto {
+ status = "okay";
+};
+
+&mdio {
+ status = "okay";
+
+ pinctrl-0 = <&mdio_pins>;
+ pinctrl-names = "default";
+};
+
+&wifi0 {
+ status = "okay";
+
+ qcom,ath10k-calibration-variant = "8devices-Jalapeno";
+};
+
+&wifi1 {
+ status = "okay";
+
+ qcom,ath10k-calibration-variant = "8devices-Jalapeno";
+};
+
+&usb3_ss_phy {
+ status = "okay";
+};
+
+&usb3_hs_phy {
+ status = "okay";
+};
+
+&usb3 {
+ status = "okay";
+};
+
+&usb2_hs_phy {
+ status = "okay";
+};
+
+&usb2 {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/qcom-ipq4019-ap.dk01.1.dtsi b/arch/arm/boot/dts/qcom-ipq4019-ap.dk01.1.dtsi
index 418f9a022336..c93b2164db44 100644
--- a/arch/arm/boot/dts/qcom-ipq4019-ap.dk01.1.dtsi
+++ b/arch/arm/boot/dts/qcom-ipq4019-ap.dk01.1.dtsi
@@ -30,7 +30,7 @@
soc {
rng@22000 {
- status = "ok";
+ status = "okay";
};
pinctrl@1000000 {
@@ -66,13 +66,13 @@
};
blsp_dma: dma@7884000 {
- status = "ok";
+ status = "okay";
};
spi@78b5000 {
pinctrl-0 = <&spi_0_pins>;
pinctrl-names = "default";
- status = "ok";
+ status = "okay";
cs-gpios = <&tlmm 54 0>;
mx25l25635e@0 {
@@ -87,27 +87,27 @@
serial@78af000 {
pinctrl-0 = <&serial_pins>;
pinctrl-names = "default";
- status = "ok";
+ status = "okay";
};
cryptobam: dma@8e04000 {
- status = "ok";
+ status = "okay";
};
crypto@8e3a000 {
- status = "ok";
+ status = "okay";
};
watchdog@b017000 {
- status = "ok";
+ status = "okay";
};
wifi@a000000 {
- status = "ok";
+ status = "okay";
};
wifi@a800000 {
- status = "ok";
+ status = "okay";
};
};
};
diff --git a/arch/arm/boot/dts/qcom-ipq4019-ap.dk04.1-c1.dts b/arch/arm/boot/dts/qcom-ipq4019-ap.dk04.1-c1.dts
index 7a96f300bc8d..b0f476ff017f 100644
--- a/arch/arm/boot/dts/qcom-ipq4019-ap.dk04.1-c1.dts
+++ b/arch/arm/boot/dts/qcom-ipq4019-ap.dk04.1-c1.dts
@@ -9,11 +9,11 @@
soc {
dma@7984000 {
- status = "ok";
+ status = "okay";
};
qpic-nand@79b0000 {
- status = "ok";
+ status = "okay";
};
};
};
diff --git a/arch/arm/boot/dts/qcom-ipq4019-ap.dk04.1.dtsi b/arch/arm/boot/dts/qcom-ipq4019-ap.dk04.1.dtsi
index 7c1eb1963c67..7a337dc08741 100644
--- a/arch/arm/boot/dts/qcom-ipq4019-ap.dk04.1.dtsi
+++ b/arch/arm/boot/dts/qcom-ipq4019-ap.dk04.1.dtsi
@@ -70,23 +70,23 @@
serial@78af000 {
pinctrl-0 = <&serial_0_pins>;
pinctrl-names = "default";
- status = "ok";
+ status = "okay";
};
serial@78b0000 {
pinctrl-0 = <&serial_1_pins>;
pinctrl-names = "default";
- status = "ok";
+ status = "okay";
};
dma@7884000 {
- status = "ok";
+ status = "okay";
};
spi@78b5000 { /* BLSP1 QUP1 */
pinctrl-0 = <&spi_0_pins>;
pinctrl-names = "default";
- status = "ok";
+ status = "okay";
cs-gpios = <&tlmm 12 0>;
m25p80@0 {
@@ -99,7 +99,7 @@
};
pci@40000000 {
- status = "ok";
+ status = "okay";
perst-gpio = <&tlmm 38 0x1>;
};
diff --git a/arch/arm/boot/dts/qcom-ipq4019-ap.dk07.1-c1.dts b/arch/arm/boot/dts/qcom-ipq4019-ap.dk07.1-c1.dts
index 8c7ef6537ae6..f343a2244386 100644
--- a/arch/arm/boot/dts/qcom-ipq4019-ap.dk07.1-c1.dts
+++ b/arch/arm/boot/dts/qcom-ipq4019-ap.dk07.1-c1.dts
@@ -9,12 +9,12 @@
soc {
pci@40000000 {
- status = "ok";
+ status = "okay";
perst-gpio = <&tlmm 38 0x1>;
};
spi@78b6000 {
- status = "ok";
+ status = "okay";
};
pinctrl@1000000 {
@@ -43,13 +43,13 @@
serial@78b0000 {
pinctrl-0 = <&serial_1_pins>;
pinctrl-names = "default";
- status = "ok";
+ status = "okay";
};
spi@78b5000 {
pinctrl-0 = <&spi_0_pins>;
pinctrl-names = "default";
- status = "ok";
+ status = "okay";
cs-gpios = <&tlmm 12 0>;
m25p80@0 {
diff --git a/arch/arm/boot/dts/qcom-ipq4019-ap.dk07.1-c2.dts b/arch/arm/boot/dts/qcom-ipq4019-ap.dk07.1-c2.dts
index af7a9028d492..582acb681a98 100644
--- a/arch/arm/boot/dts/qcom-ipq4019-ap.dk07.1-c2.dts
+++ b/arch/arm/boot/dts/qcom-ipq4019-ap.dk07.1-c2.dts
@@ -19,7 +19,7 @@
serial@78b0000 {
pinctrl-0 = <&serial_1_pins>;
pinctrl-names = "default";
- status = "ok";
+ status = "okay";
};
};
};
diff --git a/arch/arm/boot/dts/qcom-ipq4019-ap.dk07.1.dtsi b/arch/arm/boot/dts/qcom-ipq4019-ap.dk07.1.dtsi
index 9f1a5a668772..94872518b5a2 100644
--- a/arch/arm/boot/dts/qcom-ipq4019-ap.dk07.1.dtsi
+++ b/arch/arm/boot/dts/qcom-ipq4019-ap.dk07.1.dtsi
@@ -49,27 +49,27 @@
serial@78af000 {
pinctrl-0 = <&serial_0_pins>;
pinctrl-names = "default";
- status = "ok";
+ status = "okay";
};
dma@7884000 {
- status = "ok";
+ status = "okay";
};
i2c@78b7000 { /* BLSP1 QUP2 */
pinctrl-0 = <&i2c_0_pins>;
pinctrl-names = "default";
- status = "ok";
+ status = "okay";
};
dma@7984000 {
- status = "ok";
+ status = "okay";
};
qpic-nand@79b0000 {
pinctrl-0 = <&nand_pins>;
pinctrl-names = "default";
- status = "ok";
+ status = "okay";
};
};
};
diff --git a/arch/arm/boot/dts/qcom-ipq4019.dtsi b/arch/arm/boot/dts/qcom-ipq4019.dtsi
index 74d8e2c8e4b3..7bf1da916f25 100644
--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi
+++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi
@@ -190,7 +190,7 @@
reg = <0x1800000 0x60000>;
};
- rng@22000 {
+ prng: rng@22000 {
compatible = "qcom,prng";
reg = <0x22000 0x140>;
clocks = <&gcc GCC_PRNG_AHB_CLK>;
@@ -209,6 +209,16 @@
interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
};
+ vqmmc: regulator@1948000 {
+ compatible = "qcom,vqmmc-ipq4019-regulator";
+ reg = <0x01948000 0x4>;
+ regulator-name = "vqmmc";
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-always-on;
+ status = "disabled";
+ };
+
sdhci: sdhci@7824900 {
compatible = "qcom,sdhci-msm-v4";
reg = <0x7824900 0x11c>, <0x7824000 0x800>;
@@ -300,7 +310,7 @@
status = "disabled";
};
- crypto@8e3a000 {
+ crypto: crypto@8e3a000 {
compatible = "qcom,crypto-v5.1";
reg = <0x08e3a000 0x6000>;
clocks = <&gcc GCC_CRYPTO_AHB_CLK>,
@@ -386,7 +396,7 @@
dma-names = "rx", "tx";
};
- watchdog@b017000 {
+ watchdog: watchdog@b017000 {
compatible = "qcom,kpss-wdt", "qcom,kpss-wdt-ipq4019";
reg = <0xb017000 0x40>;
clocks = <&sleep_clk>;
@@ -605,5 +615,79 @@
reg = <4>;
};
};
+
+ usb3_ss_phy: ssphy@9a000 {
+ compatible = "qcom,usb-ss-ipq4019-phy";
+ #phy-cells = <0>;
+ reg = <0x9a000 0x800>;
+ reg-names = "phy_base";
+ resets = <&gcc USB3_UNIPHY_PHY_ARES>;
+ reset-names = "por_rst";
+ status = "disabled";
+ };
+
+ usb3_hs_phy: hsphy@a6000 {
+ compatible = "qcom,usb-hs-ipq4019-phy";
+ #phy-cells = <0>;
+ reg = <0xa6000 0x40>;
+ reg-names = "phy_base";
+ resets = <&gcc USB3_HSPHY_POR_ARES>, <&gcc USB3_HSPHY_S_ARES>;
+ reset-names = "por_rst", "srif_rst";
+ status = "disabled";
+ };
+
+ usb3: usb3@8af8800 {
+ compatible = "qcom,dwc3";
+ reg = <0x8af8800 0x100>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ clocks = <&gcc GCC_USB3_MASTER_CLK>,
+ <&gcc GCC_USB3_SLEEP_CLK>,
+ <&gcc GCC_USB3_MOCK_UTMI_CLK>;
+ clock-names = "master", "sleep", "mock_utmi";
+ ranges;
+ status = "disabled";
+
+ dwc3@8a00000 {
+ compatible = "snps,dwc3";
+ reg = <0x8a00000 0xf8000>;
+ interrupts = <GIC_SPI 132 IRQ_TYPE_LEVEL_HIGH>;
+ phys = <&usb3_hs_phy>, <&usb3_ss_phy>;
+ phy-names = "usb2-phy", "usb3-phy";
+ dr_mode = "host";
+ };
+ };
+
+ usb2_hs_phy: hsphy@a8000 {
+ compatible = "qcom,usb-hs-ipq4019-phy";
+ #phy-cells = <0>;
+ reg = <0xa8000 0x40>;
+ reg-names = "phy_base";
+ resets = <&gcc USB2_HSPHY_POR_ARES>, <&gcc USB2_HSPHY_S_ARES>;
+ reset-names = "por_rst", "srif_rst";
+ status = "disabled";
+ };
+
+ usb2: usb2@60f8800 {
+ compatible = "qcom,dwc3";
+ reg = <0x60f8800 0x100>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ clocks = <&gcc GCC_USB2_MASTER_CLK>,
+ <&gcc GCC_USB2_SLEEP_CLK>,
+ <&gcc GCC_USB2_MOCK_UTMI_CLK>;
+ clock-names = "master", "sleep", "mock_utmi";
+ ranges;
+ status = "disabled";
+
+ dwc3@6000000 {
+ compatible = "snps,dwc3";
+ reg = <0x6000000 0xf8000>;
+ interrupts = <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>;
+ phys = <&usb2_hs_phy>;
+ phy-names = "usb2-phy";
+ dr_mode = "host";
+ };
+ };
};
};
diff --git a/arch/arm/boot/dts/qcom-ipq8064-ap148.dts b/arch/arm/boot/dts/qcom-ipq8064-ap148.dts
index 554c65e7aa0e..e5b9b9cf6097 100644
--- a/arch/arm/boot/dts/qcom-ipq8064-ap148.dts
+++ b/arch/arm/boot/dts/qcom-ipq8064-ap148.dts
@@ -24,7 +24,7 @@
gsbi@16300000 {
i2c@16380000 {
- status = "ok";
+ status = "okay";
clock-frequency = <200000>;
pinctrl-0 = <&i2c4_pins>;
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/qcom-ipq8064-v1.0.dtsi b/arch/arm/boot/dts/qcom-ipq8064-v1.0.dtsi
index e239a0486936..65330065390a 100644
--- a/arch/arm/boot/dts/qcom-ipq8064-v1.0.dtsi
+++ b/arch/arm/boot/dts/qcom-ipq8064-v1.0.dtsi
@@ -16,19 +16,19 @@
soc {
gsbi@16300000 {
qcom,mode = <GSBI_PROT_I2C_UART>;
- status = "ok";
+ status = "okay";
serial@16340000 {
- status = "ok";
+ status = "okay";
};
};
gsbi5: gsbi@1a200000 {
qcom,mode = <GSBI_PROT_SPI>;
- status = "ok";
+ status = "okay";
spi4: spi@1a280000 {
- status = "ok";
+ status = "okay";
spi-max-frequency = <50000000>;
pinctrl-0 = <&spi_pins>;
@@ -57,12 +57,12 @@
};
sata-phy@1b400000 {
- status = "ok";
+ status = "okay";
};
sata@29000000 {
ports-implemented = <0x1>;
- status = "ok";
+ status = "okay";
};
gpio_keys {
diff --git a/arch/arm/boot/dts/qcom-ipq8064.dtsi b/arch/arm/boot/dts/qcom-ipq8064.dtsi
index c51481405e7f..98995ead4413 100644
--- a/arch/arm/boot/dts/qcom-ipq8064.dtsi
+++ b/arch/arm/boot/dts/qcom-ipq8064.dtsi
@@ -20,7 +20,7 @@
#address-cells = <1>;
#size-cells = <0>;
- cpu@0 {
+ cpu0: cpu@0 {
compatible = "qcom,krait";
enable-method = "qcom,kpss-acc-v1";
device_type = "cpu";
@@ -30,7 +30,7 @@
qcom,saw = <&saw0>;
};
- cpu@1 {
+ cpu1: cpu@1 {
compatible = "qcom,krait";
enable-method = "qcom,kpss-acc-v1";
device_type = "cpu";
@@ -67,7 +67,7 @@
no-map;
};
- smem@41000000 {
+ smem: smem@41000000 {
reg = <0x41000000 0x200000>;
no-map;
};
@@ -251,7 +251,7 @@
syscon-tcsr = <&tcsr>;
- serial@12490000 {
+ gsbi2_serial: serial@12490000 {
compatible = "qcom,msm-uartdm-v1.3", "qcom,msm-uartdm";
reg = <0x12490000 0x1000>,
<0x12480000 0x1000>;
@@ -273,7 +273,6 @@
#address-cells = <1>;
#size-cells = <0>;
};
-
};
gsbi4: gsbi@16300000 {
@@ -326,7 +325,7 @@
syscon-tcsr = <&tcsr>;
- serial@1a240000 {
+ gsbi5_serial: serial@1a240000 {
compatible = "qcom,msm-uartdm-v1.3", "qcom,msm-uartdm";
reg = <0x1a240000 0x1000>,
<0x1a200000 0x1000>;
@@ -386,6 +385,13 @@
};
};
+ rng@1a500000 {
+ compatible = "qcom,prng";
+ reg = <0x1a500000 0x200>;
+ clocks = <&gcc PRNG_CLK>;
+ clock-names = "core";
+ };
+
sata_phy: sata-phy@1b400000 {
compatible = "qcom,ipq806x-sata-phy";
reg = <0x1b400000 0x200>;
@@ -397,7 +403,7 @@
status = "disabled";
};
- sata@29000000 {
+ sata: sata@29000000 {
compatible = "qcom,ipq806x-ahci", "generic-ahci";
reg = <0x29000000 0x180>;
@@ -720,7 +726,7 @@
regulator-always-on;
};
- sdcc1bam:dma@12402000 {
+ sdcc1bam: dma@12402000 {
compatible = "qcom,bam-v1.3.0";
reg = <0x12402000 0x8000>;
interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>;
@@ -730,7 +736,7 @@
qcom,ee = <0>;
};
- sdcc3bam:dma@12182000 {
+ sdcc3bam: dma@12182000 {
compatible = "qcom,bam-v1.3.0";
reg = <0x12182000 0x8000>;
interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
@@ -740,13 +746,13 @@
qcom,ee = <0>;
};
- amba {
+ amba: amba {
compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
ranges;
- sdcc@12400000 {
+ sdcc1: sdcc@12400000 {
status = "disabled";
compatible = "arm,pl18x", "arm,primecell";
arm,primecell-periphid = <0x00051180>;
@@ -766,7 +772,7 @@
dma-names = "tx", "rx";
};
- sdcc@12180000 {
+ sdcc3: sdcc@12180000 {
compatible = "arm,pl18x", "arm,primecell";
arm,primecell-periphid = <0x00051180>;
status = "disabled";
@@ -779,7 +785,6 @@
cap-sd-highspeed;
cap-mmc-highspeed;
max-frequency = <192000000>;
- #mmc-ddr-1_8v;
sd-uhs-sdr104;
sd-uhs-ddr50;
vqmmc-supply = <&vsdcc_fixed>;
diff --git a/arch/arm/boot/dts/qcom-mdm9615-wp8548.dtsi b/arch/arm/boot/dts/qcom-mdm9615-wp8548.dtsi
index 26b034bd19d2..a725b73b5a2e 100644
--- a/arch/arm/boot/dts/qcom-mdm9615-wp8548.dtsi
+++ b/arch/arm/boot/dts/qcom-mdm9615-wp8548.dtsi
@@ -125,12 +125,12 @@
};
&gsbi3 {
- status = "ok";
+ status = "okay";
qcom,mode = <GSBI_PROT_SPI>;
};
&gsbi3_spi {
- status = "ok";
+ status = "okay";
pinctrl-0 = <&gsbi3_pins>;
pinctrl-names = "default";
assigned-clocks = <&gcc GSBI3_QUP_CLK>;
@@ -138,34 +138,34 @@
};
&gsbi4 {
- status = "ok";
+ status = "okay";
qcom,mode = <GSBI_PROT_UART_W_FC>;
};
&gsbi4_serial {
- status = "ok";
+ status = "okay";
pinctrl-0 = <&gsbi4_pins>;
pinctrl-names = "default";
};
&gsbi5 {
- status = "ok";
+ status = "okay";
qcom,mode = <GSBI_PROT_I2C_UART>;
};
&gsbi5_i2c {
- status = "ok";
+ status = "okay";
clock-frequency = <200000>;
pinctrl-0 = <&gsbi5_i2c_pins>;
pinctrl-names = "default";
};
&gsbi5_serial {
- status = "ok";
+ status = "okay";
pinctrl-0 = <&gsbi5_uart_pins>;
pinctrl-names = "default";
};
&sdcc1 {
- status = "ok";
+ status = "okay";
};
diff --git a/arch/arm/boot/dts/qcom-msm8660-surf.dts b/arch/arm/boot/dts/qcom-msm8660-surf.dts
index f01a11b18d6a..6a321ccb0bd0 100644
--- a/arch/arm/boot/dts/qcom-msm8660-surf.dts
+++ b/arch/arm/boot/dts/qcom-msm8660-surf.dts
@@ -17,10 +17,10 @@
soc {
gsbi@19c00000 {
- status = "ok";
+ status = "okay";
qcom,mode = <GSBI_PROT_I2C_UART>;
serial@19c40000 {
- status = "ok";
+ status = "okay";
};
};
diff --git a/arch/arm/boot/dts/qcom-msm8960-cdp.dts b/arch/arm/boot/dts/qcom-msm8960-cdp.dts
index 82d5d8267adf..e7d2e937ea4c 100644
--- a/arch/arm/boot/dts/qcom-msm8960-cdp.dts
+++ b/arch/arm/boot/dts/qcom-msm8960-cdp.dts
@@ -17,10 +17,10 @@
soc {
gsbi@16400000 {
- status = "ok";
+ status = "okay";
qcom,mode = <GSBI_PROT_I2C_UART>;
serial@16440000 {
- status = "ok";
+ status = "okay";
};
};
@@ -273,12 +273,12 @@
};
gsbi@16000000 {
- status = "ok";
+ status = "okay";
qcom,mode = <GSBI_PROT_SPI>;
pinctrl-names = "default";
pinctrl-0 = <&spi1_default>;
spi@16080000 {
- status = "ok";
+ status = "okay";
eth@0 {
compatible = "micrel,ks8851";
reg = <0>;
diff --git a/arch/arm/boot/dts/qcom-msm8974-fairphone-fp2.dts b/arch/arm/boot/dts/qcom-msm8974-fairphone-fp2.dts
index d2d48770ec0f..ea15b645b229 100644
--- a/arch/arm/boot/dts/qcom-msm8974-fairphone-fp2.dts
+++ b/arch/arm/boot/dts/qcom-msm8974-fairphone-fp2.dts
@@ -256,11 +256,11 @@
&soc {
serial@f991e000 {
- status = "ok";
+ status = "okay";
};
remoteproc@fb21b000 {
- status = "ok";
+ status = "okay";
vddmx-supply = <&pm8841_s1>;
vddcx-supply = <&pm8841_s2>;
@@ -273,7 +273,7 @@
label = "pronto";
wcnss {
- status = "ok";
+ status = "okay";
};
};
};
@@ -335,7 +335,7 @@
};
sdhci@f9824900 {
- status = "ok";
+ status = "okay";
vmmc-supply = <&pm8941_l20>;
vqmmc-supply = <&pm8941_s3>;
@@ -348,7 +348,7 @@
};
sdhci@f98a4900 {
- status = "ok";
+ status = "okay";
vmmc-supply = <&pm8941_l21>;
vqmmc-supply = <&pm8941_l13>;
@@ -360,7 +360,7 @@
};
usb@f9a55000 {
- status = "ok";
+ status = "okay";
phys = <&usb_hs1_phy>;
phy-select = <&tcsr 0xb000 0>;
@@ -373,7 +373,7 @@
ulpi {
phy@a {
- status = "ok";
+ status = "okay";
v1p8-supply = <&pm8941_l6>;
v3p3-supply = <&pm8941_l24>;
diff --git a/arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts b/arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts
index e769f638f205..0cda654371ae 100644
--- a/arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts
+++ b/arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts
@@ -239,7 +239,7 @@
&soc {
serial@f991d000 {
- status = "ok";
+ status = "okay";
};
pinctrl@fd510000 {
@@ -410,7 +410,7 @@
};
sdhci@f9824900 {
- status = "ok";
+ status = "okay";
vmmc-supply = <&pm8941_l20>;
vqmmc-supply = <&pm8941_s3>;
@@ -423,7 +423,7 @@
};
sdhci@f98a4900 {
- status = "ok";
+ status = "okay";
max-frequency = <100000000>;
bus-width = <4>;
@@ -471,7 +471,7 @@
};
serial@f9960000 {
- status = "ok";
+ status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&blsp2_uart10_pin_a>;
@@ -490,7 +490,7 @@
};
i2c@f9967000 {
- status = "ok";
+ status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&i2c11_pins>;
clock-frequency = <355000>;
@@ -498,7 +498,7 @@
led-controller@38 {
compatible = "ti,lm3630a";
- status = "ok";
+ status = "okay";
reg = <0x38>;
#address-cells = <1>;
@@ -514,7 +514,7 @@
};
i2c@f9968000 {
- status = "ok";
+ status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&i2c12_pins>;
clock-frequency = <100000>;
@@ -551,7 +551,7 @@
};
i2c@f9923000 {
- status = "ok";
+ status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&i2c1_pins>;
clock-frequency = <100000>;
@@ -585,7 +585,7 @@
};
i2c@f9924000 {
- status = "ok";
+ status = "okay";
clock-frequency = <355000>;
qcom,src-freq = <50000000>;
@@ -620,7 +620,7 @@
};
i2c@f9925000 {
- status = "ok";
+ status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&i2c3_pins>;
clock-frequency = <100000>;
@@ -638,7 +638,7 @@
};
usb@f9a55000 {
- status = "ok";
+ status = "okay";
phys = <&usb_hs1_phy>;
phy-select = <&tcsr 0xb000 0>;
@@ -652,7 +652,7 @@
ulpi {
phy@a {
- status = "ok";
+ status = "okay";
v1p8-supply = <&pm8941_l6>;
v3p3-supply = <&pm8941_l24>;
@@ -663,14 +663,14 @@
};
mdss@fd900000 {
- status = "ok";
+ status = "okay";
mdp@fd900000 {
- status = "ok";
+ status = "okay";
};
dsi@fd922800 {
- status = "ok";
+ status = "okay";
vdda-supply = <&pm8941_l2>;
vdd-supply = <&pm8941_lvs3>;
@@ -704,7 +704,7 @@
};
dsi-phy@fd922a00 {
- status = "ok";
+ status = "okay";
vddio-supply = <&pm8941_l12>;
};
diff --git a/arch/arm/boot/dts/qcom-msm8974-samsung-klte.dts b/arch/arm/boot/dts/qcom-msm8974-samsung-klte.dts
index 97352de91314..a0f7f461f48c 100644
--- a/arch/arm/boot/dts/qcom-msm8974-samsung-klte.dts
+++ b/arch/arm/boot/dts/qcom-msm8974-samsung-klte.dts
@@ -12,8 +12,8 @@
aliases {
serial0 = &blsp1_uart1;
- sdhc1 = &sdhc_1; /* SDC1 eMMC slot */
- sdhc2 = &sdhc_2; /* SDC2 SD card slot */
+ mmc0 = &sdhc_1; /* SDC1 eMMC slot */
+ mmc1 = &sdhc_2; /* SDC2 SD card slot */
};
chosen {
@@ -30,6 +30,7 @@
pma8084_s1: s1 {
regulator-min-microvolt = <675000>;
regulator-max-microvolt = <1050000>;
+ regulator-always-on;
};
pma8084_s2: s2 {
@@ -115,6 +116,7 @@
pma8084_l12: l12 {
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
+ regulator-always-on;
};
pma8084_l13: l13 {
@@ -298,12 +300,26 @@
enable-active-high;
};
+ vreg_panel: panel-regulator {
+ compatible = "regulator-fixed";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&panel_en_pin>;
+
+ regulator-name = "panel-vddr-reg";
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1500000>;
+
+ gpio = <&pma8084_gpios 14 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
/delete-node/ vreg-boost;
};
&soc {
serial@f991e000 {
- status = "ok";
+ status = "okay";
};
gpio-keys {
@@ -453,10 +469,20 @@
bias-pull-down;
};
};
+
+ panel_te_pin: panel {
+ te {
+ pins = "gpio12";
+ function = "mdp_vsync";
+
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
};
sdhc_1: sdhci@f9824900 {
- status = "ok";
+ status = "okay";
vmmc-supply = <&pma8084_l20>;
vqmmc-supply = <&pma8084_s4>;
@@ -469,7 +495,7 @@
};
sdhc_2: sdhci@f9864900 {
- status = "ok";
+ status = "okay";
max-frequency = <100000000>;
@@ -518,7 +544,7 @@
};
usb@f9a55000 {
- status = "ok";
+ status = "okay";
phys = <&usb_hs1_phy>;
phy-select = <&tcsr 0xb000 0>;
@@ -531,7 +557,7 @@
ulpi {
phy@a {
- status = "ok";
+ status = "okay";
v1p8-supply = <&pma8084_l6>;
v3p3-supply = <&pma8084_l24>;
@@ -697,6 +723,64 @@
pinctrl-0 = <&fuelgauge_pin>;
};
};
+
+ adreno@fdb00000 {
+ status = "ok";
+ };
+
+ mdss@fd900000 {
+ status = "ok";
+
+ mdp@fd900000 {
+ status = "ok";
+ };
+
+ dsi@fd922800 {
+ status = "ok";
+
+ vdda-supply = <&pma8084_l2>;
+ vdd-supply = <&pma8084_l22>;
+ vddio-supply = <&pma8084_l12>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ports {
+ port@1 {
+ endpoint {
+ remote-endpoint = <&panel_in>;
+ data-lanes = <0 1 2 3>;
+ };
+ };
+ };
+
+ panel: panel@0 {
+ reg = <0>;
+ compatible = "samsung,s6e3fa2";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&panel_te_pin &panel_rst_pin>;
+
+ iovdd-supply = <&pma8084_lvs4>;
+ vddr-supply = <&vreg_panel>;
+
+ reset-gpios = <&pma8084_gpios 17 GPIO_ACTIVE_LOW>;
+ te-gpios = <&msmgpio 12 GPIO_ACTIVE_HIGH>;
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&dsi0_out>;
+ };
+ };
+ };
+ };
+
+ dsi-phy@fd922a00 {
+ status = "ok";
+
+ vddio-supply = <&pma8084_l12>;
+ };
+ };
};
&spmi_bus {
@@ -726,6 +810,14 @@
power-source = <PMA8084_GPIO_S4>;
};
+ panel_en_pin: panel-en-pin {
+ pins = "gpio14";
+ function = "normal";
+ bias-pull-up;
+ power-source = <PMA8084_GPIO_S4>;
+ qcom,drive-strength = <PMIC_GPIO_STRENGTH_LOW>;
+ };
+
wlan_sleep_clk_pin: wlan-sleep-clk-pin {
pins = "gpio16";
function = "func2";
@@ -735,6 +827,15 @@
qcom,drive-strength = <PMIC_GPIO_STRENGTH_HIGH>;
};
+ panel_rst_pin: panel-rst-pin {
+ pins = "gpio17";
+ function = "normal";
+ bias-disable;
+ power-source = <PMA8084_GPIO_S4>;
+ qcom,drive-strength = <PMIC_GPIO_STRENGTH_LOW>;
+ };
+
+
fuelgauge_pin: fuelgauge-int-pin {
pins = "gpio21";
function = "normal";
diff --git a/arch/arm/boot/dts/qcom-msm8974-sony-xperia-amami.dts b/arch/arm/boot/dts/qcom-msm8974-sony-xperia-amami.dts
index 5669f5f58a86..398a3eaf306b 100644
--- a/arch/arm/boot/dts/qcom-msm8974-sony-xperia-amami.dts
+++ b/arch/arm/boot/dts/qcom-msm8974-sony-xperia-amami.dts
@@ -261,7 +261,7 @@
&soc {
sdhci@f9824900 {
- status = "ok";
+ status = "okay";
vmmc-supply = <&pm8941_l20>;
vqmmc-supply = <&pm8941_s3>;
@@ -274,7 +274,7 @@
};
sdhci@f98a4900 {
- status = "ok";
+ status = "okay";
bus-width = <4>;
@@ -288,7 +288,7 @@
};
serial@f991e000 {
- status = "ok";
+ status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&blsp1_uart2_pin_a>;
@@ -366,7 +366,7 @@
};
usb@f9a55000 {
- status = "ok";
+ status = "okay";
phys = <&usb_hs1_phy>;
phy-select = <&tcsr 0xb000 0>;
@@ -379,7 +379,7 @@
ulpi {
phy@a {
- status = "ok";
+ status = "okay";
v1p8-supply = <&pm8941_l6>;
v3p3-supply = <&pm8941_l24>;
@@ -415,7 +415,7 @@
};
coincell@2800 {
- status = "ok";
+ status = "okay";
qcom,rset-ohms = <2100>;
qcom,vset-millivolts = <3000>;
};
@@ -423,7 +423,7 @@
pm8941@1 {
wled@d800 {
- status = "ok";
+ status = "okay";
qcom,cs-out;
qcom,current-limit = <20>;
diff --git a/arch/arm/boot/dts/qcom-msm8974-sony-xperia-castor.dts b/arch/arm/boot/dts/qcom-msm8974-sony-xperia-castor.dts
index 701b396719c7..f4ec08f13003 100644
--- a/arch/arm/boot/dts/qcom-msm8974-sony-xperia-castor.dts
+++ b/arch/arm/boot/dts/qcom-msm8974-sony-xperia-castor.dts
@@ -279,7 +279,7 @@
&soc {
sdhci@f9824900 {
- status = "ok";
+ status = "okay";
vmmc-supply = <&pm8941_l20>;
vqmmc-supply = <&pm8941_s3>;
@@ -292,7 +292,7 @@
};
sdhci@f9864900 {
- status = "ok";
+ status = "okay";
max-frequency = <100000000>;
non-removable;
@@ -316,7 +316,7 @@
};
sdhci@f98a4900 {
- status = "ok";
+ status = "okay";
bus-width = <4>;
@@ -330,14 +330,14 @@
};
serial@f991e000 {
- status = "ok";
+ status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&blsp1_uart2_pin_a>;
};
usb@f9a55000 {
- status = "ok";
+ status = "okay";
phys = <&usb_hs1_phy>;
phy-select = <&tcsr 0xb000 0>;
@@ -350,7 +350,7 @@
ulpi {
phy@a {
- status = "ok";
+ status = "okay";
v1p8-supply = <&pm8941_l6>;
v3p3-supply = <&pm8941_l24>;
@@ -482,7 +482,7 @@
};
i2c@f9964000 {
- status = "ok";
+ status = "okay";
clock-frequency = <355000>;
qcom,src-freq = <50000000>;
@@ -522,7 +522,7 @@
};
i2c@f9967000 {
- status = "ok";
+ status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&i2c11_pins>;
clock-frequency = <355000>;
@@ -635,7 +635,7 @@
};
coincell@2800 {
- status = "ok";
+ status = "okay";
qcom,rset-ohms = <2100>;
qcom,vset-millivolts = <3000>;
};
diff --git a/arch/arm/boot/dts/qcom-msm8974-sony-xperia-honami.dts b/arch/arm/boot/dts/qcom-msm8974-sony-xperia-honami.dts
index 611bae9fe66b..9743beebd84d 100644
--- a/arch/arm/boot/dts/qcom-msm8974-sony-xperia-honami.dts
+++ b/arch/arm/boot/dts/qcom-msm8974-sony-xperia-honami.dts
@@ -261,7 +261,7 @@
&soc {
usb@f9a55000 {
- status = "ok";
+ status = "okay";
phys = <&usb_hs1_phy>;
phy-select = <&tcsr 0xb000 0>;
@@ -274,7 +274,7 @@
ulpi {
phy@a {
- status = "ok";
+ status = "okay";
v1p8-supply = <&pm8941_l6>;
v3p3-supply = <&pm8941_l24>;
@@ -286,7 +286,7 @@
};
sdhci@f9824900 {
- status = "ok";
+ status = "okay";
vmmc-supply = <&pm8941_l20>;
vqmmc-supply = <&pm8941_s3>;
@@ -299,7 +299,7 @@
};
sdhci@f98a4900 {
- status = "ok";
+ status = "okay";
bus-width = <4>;
@@ -313,14 +313,14 @@
};
serial@f991e000 {
- status = "ok";
+ status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&blsp1_uart2_pin_a>;
};
i2c@f9924000 {
- status = "ok";
+ status = "okay";
clock-frequency = <355000>;
qcom,src-freq = <50000000>;
@@ -464,7 +464,7 @@
};
coincell@2800 {
- status = "ok";
+ status = "okay";
qcom,rset-ohms = <2100>;
qcom,vset-millivolts = <3000>;
};
@@ -472,7 +472,7 @@
pm8941@1 {
wled@d800 {
- status = "ok";
+ status = "okay";
qcom,cs-out;
qcom,current-limit = <20>;
diff --git a/arch/arm/boot/dts/qcom-msm8974.dtsi b/arch/arm/boot/dts/qcom-msm8974.dtsi
index 51f5f904f9eb..c65d33591efa 100644
--- a/arch/arm/boot/dts/qcom-msm8974.dtsi
+++ b/arch/arm/boot/dts/qcom-msm8974.dtsi
@@ -1399,6 +1399,49 @@
<&rpmcc RPM_SMD_CNOC_A_CLK>;
};
+ gpu: adreno@fdb00000 {
+ status = "disabled";
+
+ compatible = "qcom,adreno-330.1",
+ "qcom,adreno";
+ reg = <0xfdb00000 0x10000>;
+ reg-names = "kgsl_3d0_reg_memory";
+ interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "kgsl_3d0_irq";
+ clock-names = "core",
+ "iface",
+ "mem_iface";
+ clocks = <&mmcc OXILI_GFX3D_CLK>,
+ <&mmcc OXILICX_AHB_CLK>,
+ <&mmcc OXILICX_AXI_CLK>;
+ sram = <&gmu_sram>;
+ power-domains = <&mmcc OXILICX_GDSC>;
+ operating-points-v2 = <&gpu_opp_table>;
+
+ interconnects = <&mmssnoc MNOC_MAS_GRAPHICS_3D &bimc BIMC_SLV_EBI_CH0>,
+ <&ocmemnoc OCMEM_VNOC_MAS_GFX3D &ocmemnoc OCMEM_SLV_OCMEM>;
+ interconnect-names = "gfx-mem",
+ "ocmem";
+
+ // iommus = <&gpu_iommu 0>;
+
+ gpu_opp_table: opp_table {
+ compatible = "operating-points-v2";
+
+ opp-320000000 {
+ opp-hz = /bits/ 64 <320000000>;
+ };
+
+ opp-200000000 {
+ opp-hz = /bits/ 64 <200000000>;
+ };
+
+ opp-27000000 {
+ opp-hz = /bits/ 64 <27000000>;
+ };
+ };
+ };
+
mdss: mdss@fd900000 {
status = "disabled";
diff --git a/arch/arm/boot/dts/qcom-msm8974pro.dtsi b/arch/arm/boot/dts/qcom-msm8974pro.dtsi
index 6740a4cb7da8..b64c28036dd0 100644
--- a/arch/arm/boot/dts/qcom-msm8974pro.dtsi
+++ b/arch/arm/boot/dts/qcom-msm8974pro.dtsi
@@ -14,5 +14,10 @@
clock-controller@fc400000 {
compatible = "qcom,gcc-msm8974pro";
};
+
+ adreno@fdb00000 {
+ compatible = "qcom,adreno-330.2",
+ "qcom,adreno";
+ };
};
};
diff --git a/arch/arm/boot/dts/qcom-pmx55.dtsi b/arch/arm/boot/dts/qcom-pmx55.dtsi
new file mode 100644
index 000000000000..6571b88d018a
--- /dev/null
+++ b/arch/arm/boot/dts/qcom-pmx55.dtsi
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: BSD-3-Clause
+
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2020, Linaro Limited
+ */
+
+#include <dt-bindings/iio/qcom,spmi-vadc.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/spmi/spmi.h>
+
+&spmi_bus {
+ pmic@8 {
+ compatible = "qcom,pmx55", "qcom,spmi-pmic";
+ reg = <0x8 SPMI_USID>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ power-on@800 {
+ compatible = "qcom,pm8916-pon";
+ reg = <0x0800>;
+
+ status = "disabled";
+ };
+
+ pmx55_temp: temp-alarm@2400 {
+ compatible = "qcom,spmi-temp-alarm";
+ reg = <0x2400>;
+ interrupts = <0x8 0x24 0x0 IRQ_TYPE_EDGE_BOTH>;
+ io-channels = <&pmx55_adc ADC5_DIE_TEMP>;
+ io-channel-names = "thermal";
+ #thermal-sensor-cells = <0>;
+ };
+
+ pmx55_adc: adc@3100 {
+ compatible = "qcom,spmi-adc5";
+ reg = <0x3100>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #io-channel-cells = <1>;
+ interrupts = <0x8 0x31 0x0 IRQ_TYPE_EDGE_RISING>;
+
+ ref-gnd@0 {
+ reg = <ADC5_REF_GND>;
+ qcom,pre-scaling = <1 1>;
+ label = "ref_gnd";
+ };
+
+ vref-1p25@1 {
+ reg = <ADC5_1P25VREF>;
+ qcom,pre-scaling = <1 1>;
+ label = "vref_1p25";
+ };
+
+ die-temp@6 {
+ reg = <ADC5_DIE_TEMP>;
+ qcom,pre-scaling = <1 1>;
+ label = "die_temp";
+ };
+
+ chg-temp@9 {
+ reg = <ADC5_CHG_TEMP>;
+ qcom,pre-scaling = <1 1>;
+ label = "chg_temp";
+ };
+ };
+
+ pmx55_gpios: gpio@c000 {
+ compatible = "qcom,pmx55-gpio", "qcom,spmi-gpio";
+ reg = <0xc000>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+
+ pmic@9 {
+ compatible = "qcom,pmx55", "qcom,spmi-pmic";
+ reg = <0x9 SPMI_USID>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+};
diff --git a/arch/arm/boot/dts/qcom-sdx55-mtp.dts b/arch/arm/boot/dts/qcom-sdx55-mtp.dts
new file mode 100644
index 000000000000..9649c1e11311
--- /dev/null
+++ b/arch/arm/boot/dts/qcom-sdx55-mtp.dts
@@ -0,0 +1,251 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2020, Linaro Ltd.
+ */
+
+/dts-v1/;
+
+#include "qcom-sdx55.dtsi"
+#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+#include <arm64/qcom/pm8150b.dtsi>
+#include "qcom-pmx55.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. SDX55 MTP";
+ compatible = "qcom,sdx55-mtp", "qcom,sdx55";
+ qcom,board-id = <0x5010008 0x0>;
+
+ aliases {
+ serial0 = &blsp1_uart3;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ mpss_debug_mem: memory@8ef00000 {
+ no-map;
+ reg = <0x8ef00000 0x800000>;
+ };
+
+ ipa_fw_mem: memory@8fced000 {
+ no-map;
+ reg = <0x8fced000 0x10000>;
+ };
+
+ mpss_adsp_mem: memory@90c00000 {
+ no-map;
+ reg = <0x90c00000 0xd400000>;
+ };
+ };
+
+ vph_pwr: vph-pwr-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vph_pwr";
+ regulator-min-microvolt = <3700000>;
+ regulator-max-microvolt = <3700000>;
+ };
+
+ vreg_bob_3p3: pmx55-bob {
+ compatible = "regulator-fixed";
+ regulator-name = "vreg_bob_3p3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ regulator-always-on;
+ regulator-boot-on;
+
+ vin-supply = <&vph_pwr>;
+ };
+
+ vreg_s7e_mx_0p752: pmx55-s7e {
+ compatible = "regulator-fixed";
+ regulator-name = "vreg_s7e_mx_0p752";
+ regulator-min-microvolt = <752000>;
+ regulator-max-microvolt = <752000>;
+
+ vin-supply = <&vph_pwr>;
+ };
+};
+
+&apps_rsc {
+ pmx55-rpmh-regulators {
+ compatible = "qcom,pmx55-rpmh-regulators";
+ qcom,pmic-id = "e";
+
+ vdd-s1-supply = <&vph_pwr>;
+ vdd-s2-supply = <&vph_pwr>;
+ vdd-s3-supply = <&vph_pwr>;
+ vdd-s4-supply = <&vph_pwr>;
+ vdd-s5-supply = <&vph_pwr>;
+ vdd-s6-supply = <&vph_pwr>;
+ vdd-s7-supply = <&vph_pwr>;
+ vdd-l1-l2-supply = <&vreg_s2e_1p224>;
+ vdd-l3-l9-supply = <&vreg_s3e_0p824>;
+ vdd-l4-l12-supply = <&vreg_s4e_1p904>;
+ vdd-l5-l6-supply = <&vreg_s4e_1p904>;
+ vdd-l7-l8-supply = <&vreg_s3e_0p824>;
+ vdd-l10-l11-l13-supply = <&vreg_bob_3p3>;
+ vdd-l14-supply = <&vreg_s7e_mx_0p752>;
+ vdd-l15-supply = <&vreg_s2e_1p224>;
+ vdd-l16-supply = <&vreg_s4e_1p904>;
+
+ vreg_s2e_1p224: smps2 {
+ regulator-min-microvolt = <1280000>;
+ regulator-max-microvolt = <1400000>;
+ };
+
+ vreg_s3e_0p824: smps3 {
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1000000>;
+ };
+
+ vreg_s4e_1p904: smps4 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1960000>;
+ };
+
+ vreg_l1e_bb_1p2: ldo1 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ };
+
+ ldo2 {
+ regulator-min-microvolt = <1128000>;
+ regulator-max-microvolt = <1128000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ };
+
+ ldo3 {
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ };
+
+ vreg_l4e_bb_0p875: ldo4 {
+ regulator-min-microvolt = <872000>;
+ regulator-max-microvolt = <872000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ };
+
+ vreg_l5e_bb_1p7: ldo5 {
+ regulator-min-microvolt = <1704000>;
+ regulator-max-microvolt = <1900000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ };
+
+ ldo6 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ };
+
+ ldo7 {
+ regulator-min-microvolt = <480000>;
+ regulator-max-microvolt = <900000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ };
+
+ ldo8 {
+ regulator-min-microvolt = <480000>;
+ regulator-max-microvolt = <900000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ };
+
+ ldo9 {
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ };
+
+ vreg_l10e_3p1: ldo10 {
+ regulator-min-microvolt = <3088000>;
+ regulator-max-microvolt = <3088000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ };
+
+ ldo11 {
+ regulator-min-microvolt = <1704000>;
+ regulator-max-microvolt = <2928000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ };
+
+ ldo12 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ };
+
+ ldo13 {
+ regulator-min-microvolt = <1704000>;
+ regulator-max-microvolt = <2928000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ };
+
+ ldo14 {
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ };
+
+ ldo15 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ };
+
+ ldo16 {
+ regulator-min-microvolt = <1704000>;
+ regulator-max-microvolt = <1904000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ };
+ };
+};
+
+&blsp1_uart3 {
+ status = "okay";
+};
+
+&qpic_bam {
+ status = "okay";
+};
+
+&qpic_nand {
+ status = "okay";
+
+ nand@0 {
+ reg = <0>;
+
+ nand-ecc-strength = <8>;
+ nand-ecc-step-size = <512>;
+ nand-bus-width = <8>;
+ };
+};
+
+&usb {
+ status = "okay";
+};
+
+&usb_dwc3 {
+ dr_mode = "peripheral";
+};
+
+&usb_hsphy {
+ status = "okay";
+ vdda-pll-supply = <&vreg_l4e_bb_0p875>;
+ vdda33-supply = <&vreg_l10e_3p1>;
+ vdda18-supply = <&vreg_l5e_bb_1p7>;
+};
+
+&usb_qmpphy {
+ status = "okay";
+ vdda-phy-supply = <&vreg_l4e_bb_0p875>;
+ vdda-pll-supply = <&vreg_l1e_bb_1p2>;
+};
diff --git a/arch/arm/boot/dts/qcom-sdx55.dtsi b/arch/arm/boot/dts/qcom-sdx55.dtsi
new file mode 100644
index 000000000000..e4180bbc4655
--- /dev/null
+++ b/arch/arm/boot/dts/qcom-sdx55.dtsi
@@ -0,0 +1,505 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * SDX55 SoC device tree source
+ *
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2020, Linaro Ltd.
+ */
+
+#include <dt-bindings/clock/qcom,gcc-sdx55.h>
+#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/power/qcom-rpmpd.h>
+#include <dt-bindings/soc/qcom,rpmh-rsc.h>
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ qcom,msm-id = <357 0x10000>, <368 0x10000>, <418 0x10000>;
+ interrupt-parent = <&intc>;
+
+ memory {
+ device_type = "memory";
+ reg = <0 0>;
+ };
+
+ clocks {
+ xo_board: xo-board {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <38400000>;
+ clock-output-names = "xo_board";
+ };
+
+ sleep_clk: sleep-clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32000>;
+ };
+
+ nand_clk_dummy: nand-clk-dummy {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32000>;
+ };
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a7";
+ reg = <0x0>;
+ enable-method = "psci";
+ };
+ };
+
+ psci {
+ compatible = "arm,psci-1.0";
+ method = "smc";
+ };
+
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ hyp_mem: memory@8fc00000 {
+ no-map;
+ reg = <0x8fc00000 0x80000>;
+ };
+
+ ac_db_mem: memory@8fc80000 {
+ no-map;
+ reg = <0x8fc80000 0x40000>;
+ };
+
+ secdata_mem: memory@8fcfd000 {
+ no-map;
+ reg = <0x8fcfd000 0x1000>;
+ };
+
+ sbl_mem: memory@8fd00000 {
+ no-map;
+ reg = <0x8fd00000 0x100000>;
+ };
+
+ aop_image: memory@8fe00000 {
+ no-map;
+ reg = <0x8fe00000 0x20000>;
+ };
+
+ aop_cmd_db: memory@8fe20000 {
+ compatible = "qcom,cmd-db";
+ reg = <0x8fe20000 0x20000>;
+ no-map;
+ };
+
+ smem_mem: memory@8fe40000 {
+ no-map;
+ reg = <0x8fe40000 0xc0000>;
+ };
+
+ tz_mem: memory@8ff00000 {
+ no-map;
+ reg = <0x8ff00000 0x100000>;
+ };
+
+ tz_apps_mem: memory@0x90000000 {
+ no-map;
+ reg = <0x90000000 0x500000>;
+ };
+ };
+
+ smem {
+ compatible = "qcom,smem";
+ memory-region = <&smem_mem>;
+ hwlocks = <&tcsr_mutex 3>;
+ };
+
+ soc: soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ compatible = "simple-bus";
+
+ gcc: clock-controller@100000 {
+ compatible = "qcom,gcc-sdx55";
+ reg = <0x100000 0x1f0000>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ clock-names = "bi_tcxo", "sleep_clk";
+ clocks = <&rpmhcc RPMH_CXO_CLK>, <&sleep_clk>;
+ };
+
+ blsp1_uart3: serial@831000 {
+ compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+ reg = <0x00831000 0x200>;
+ interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&gcc 30>,
+ <&gcc 9>;
+ clock-names = "core", "iface";
+ status = "disabled";
+ };
+
+ usb_hsphy: phy@ff4000 {
+ compatible = "qcom,usb-snps-hs-7nm-phy";
+ reg = <0x00ff4000 0x114>;
+ status = "disabled";
+ #phy-cells = <0>;
+
+ clocks = <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "ref";
+
+ resets = <&gcc GCC_QUSB2PHY_BCR>;
+ };
+
+ usb_qmpphy: phy@ff6000 {
+ compatible = "qcom,sdx55-qmp-usb3-uni-phy";
+ reg = <0x00ff6000 0x1c0>;
+ status = "disabled";
+ #clock-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ clocks = <&gcc GCC_USB3_PHY_AUX_CLK>,
+ <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
+ <&gcc GCC_USB3_PRIM_CLKREF_CLK>;
+ clock-names = "aux", "cfg_ahb", "ref";
+
+ resets = <&gcc GCC_USB3PHY_PHY_BCR>,
+ <&gcc GCC_USB3_PHY_BCR>;
+ reset-names = "phy", "common";
+
+ usb_ssphy: phy@ff6200 {
+ reg = <0x00ff6200 0x170>,
+ <0x00ff6400 0x200>,
+ <0x00ff6800 0x800>;
+ #phy-cells = <0>;
+ #clock-cells = <0>;
+ clocks = <&gcc GCC_USB3_PHY_PIPE_CLK>;
+ clock-names = "pipe0";
+ clock-output-names = "usb3_uni_phy_pipe_clk_src";
+ };
+ };
+
+ qpic_bam: dma-controller@1b04000 {
+ compatible = "qcom,bam-v1.7.0";
+ reg = <0x01b04000 0x1c000>;
+ interrupts = <GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rpmhcc RPMH_QPIC_CLK>;
+ clock-names = "bam_clk";
+ #dma-cells = <1>;
+ qcom,ee = <0>;
+ qcom,controlled-remotely;
+ status = "disabled";
+ };
+
+ qpic_nand: nand@1b30000 {
+ compatible = "qcom,sdx55-nand";
+ reg = <0x01b30000 0x10000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&rpmhcc RPMH_QPIC_CLK>,
+ <&nand_clk_dummy>;
+ clock-names = "core", "aon";
+
+ dmas = <&qpic_bam 0>,
+ <&qpic_bam 1>,
+ <&qpic_bam 2>;
+ dma-names = "tx", "rx", "cmd";
+ status = "disabled";
+ };
+
+ tcsr_mutex: hwlock@1f40000 {
+ compatible = "qcom,tcsr-mutex";
+ reg = <0x01f40000 0x40000>;
+ #hwlock-cells = <1>;
+ };
+
+ sdhc_1: sdhci@8804000 {
+ compatible = "qcom,sdx55-sdhci", "qcom,sdhci-msm-v5";
+ reg = <0x08804000 0x1000>;
+ interrupts = <GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 227 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "hc_irq", "pwr_irq";
+ clocks = <&gcc GCC_SDCC1_AHB_CLK>,
+ <&gcc GCC_SDCC1_APPS_CLK>;
+ clock-names = "iface", "core";
+ status = "disabled";
+ };
+
+ usb: usb@a6f8800 {
+ compatible = "qcom,sdx55-dwc3", "qcom,dwc3";
+ reg = <0x0a6f8800 0x400>;
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ clocks = <&gcc GCC_USB30_SLV_AHB_CLK>,
+ <&gcc GCC_USB30_MASTER_CLK>,
+ <&gcc GCC_USB30_MSTR_AXI_CLK>,
+ <&gcc GCC_USB30_MOCK_UTMI_CLK>,
+ <&gcc GCC_USB30_SLEEP_CLK>;
+ clock-names = "cfg_noc", "core", "iface", "mock_utmi",
+ "sleep";
+
+ assigned-clocks = <&gcc GCC_USB30_MOCK_UTMI_CLK>,
+ <&gcc GCC_USB30_MASTER_CLK>;
+ assigned-clock-rates = <19200000>, <200000000>;
+
+ interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 198 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 158 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "hs_phy_irq", "ss_phy_irq",
+ "dm_hs_phy_irq", "dp_hs_phy_irq";
+
+ power-domains = <&gcc USB30_GDSC>;
+
+ resets = <&gcc GCC_USB30_BCR>;
+
+ usb_dwc3: dwc3@a600000 {
+ compatible = "snps,dwc3";
+ reg = <0x0a600000 0xcd00>;
+ interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
+ iommus = <&apps_smmu 0x1a0 0x0>;
+ snps,dis_u2_susphy_quirk;
+ snps,dis_enblslpm_quirk;
+ phys = <&usb_hsphy>, <&usb_ssphy>;
+ phy-names = "usb2-phy", "usb3-phy";
+ };
+ };
+
+ pdc: interrupt-controller@b210000 {
+ compatible = "qcom,sdx55-pdc", "qcom,pdc";
+ reg = <0x0b210000 0x30000>;
+ qcom,pdc-ranges = <0 179 52>;
+ #interrupt-cells = <3>;
+ interrupt-parent = <&intc>;
+ interrupt-controller;
+ };
+
+ restart@c264000 {
+ compatible = "qcom,pshold";
+ reg = <0x0c264000 0x1000>;
+ };
+
+ spmi_bus: qcom,spmi@c440000 {
+ compatible = "qcom,spmi-pmic-arb";
+ reg = <0x0c440000 0x0000d00>,
+ <0x0c600000 0x2000000>,
+ <0x0e600000 0x0100000>,
+ <0x0e700000 0x00a0000>,
+ <0x0c40a000 0x0000700>;
+ reg-names = "core", "chnls", "obsrvr", "intr", "cnfg";
+ interrupt-names = "periph_irq";
+ interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
+ qcom,ee = <0>;
+ qcom,channel = <0>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+ interrupt-controller;
+ #interrupt-cells = <4>;
+ cell-index = <0>;
+ };
+
+ tlmm: pinctrl@f100000 {
+ compatible = "qcom,sdx55-pinctrl";
+ reg = <0xf100000 0x300000>;
+ interrupts = <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ apps_smmu: iommu@15000000 {
+ compatible = "qcom,sdx55-smmu-500", "arm,mmu-500";
+ reg = <0x15000000 0x20000>;
+ #iommu-cells = <2>;
+ #global-interrupts = <1>;
+ interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ intc: interrupt-controller@17800000 {
+ compatible = "qcom,msm-qgic2";
+ interrupt-controller;
+ interrupt-parent = <&intc>;
+ #interrupt-cells = <3>;
+ reg = <0x17800000 0x1000>,
+ <0x17802000 0x1000>;
+ };
+
+ watchdog@17817000 {
+ compatible = "qcom,apss-wdt-sdx55", "qcom,kpss-wdt";
+ reg = <0x17817000 0x1000>;
+ clocks = <&sleep_clk>;
+ };
+
+ timer@17820000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ compatible = "arm,armv7-timer-mem";
+ reg = <0x17820000 0x1000>;
+ clock-frequency = <19200000>;
+
+ frame@17821000 {
+ frame-number = <0>;
+ interrupts = <GIC_SPI 7 0x4>,
+ <GIC_SPI 6 0x4>;
+ reg = <0x17821000 0x1000>,
+ <0x17822000 0x1000>;
+ };
+
+ frame@17823000 {
+ frame-number = <1>;
+ interrupts = <GIC_SPI 8 0x4>;
+ reg = <0x17823000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17824000 {
+ frame-number = <2>;
+ interrupts = <GIC_SPI 9 0x4>;
+ reg = <0x17824000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17825000 {
+ frame-number = <3>;
+ interrupts = <GIC_SPI 10 0x4>;
+ reg = <0x17825000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17826000 {
+ frame-number = <4>;
+ interrupts = <GIC_SPI 11 0x4>;
+ reg = <0x17826000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17827000 {
+ frame-number = <5>;
+ interrupts = <GIC_SPI 12 0x4>;
+ reg = <0x17827000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17828000 {
+ frame-number = <6>;
+ interrupts = <GIC_SPI 13 0x4>;
+ reg = <0x17828000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17829000 {
+ frame-number = <7>;
+ interrupts = <GIC_SPI 14 0x4>;
+ reg = <0x17829000 0x1000>;
+ status = "disabled";
+ };
+ };
+
+ apps_rsc: rsc@17840000 {
+ compatible = "qcom,rpmh-rsc";
+ reg = <0x17830000 0x10000>, <0x17840000 0x10000>;
+ reg-names = "drv-0", "drv-1";
+ interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
+ qcom,tcs-offset = <0xd00>;
+ qcom,drv-id = <1>;
+ qcom,tcs-config = <ACTIVE_TCS 2>, <SLEEP_TCS 2>,
+ <WAKE_TCS 2>, <CONTROL_TCS 1>;
+
+ rpmhcc: clock-controller {
+ compatible = "qcom,sdx55-rpmh-clk";
+ #clock-cells = <1>;
+ clock-names = "xo";
+ clocks = <&xo_board>;
+ };
+
+ rpmhpd: power-controller {
+ compatible = "qcom,sdx55-rpmhpd";
+ #power-domain-cells = <1>;
+ operating-points-v2 = <&rpmhpd_opp_table>;
+
+ rpmhpd_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ rpmhpd_opp_ret: opp1 {
+ opp-level = <RPMH_REGULATOR_LEVEL_RETENTION>;
+ };
+
+ rpmhpd_opp_min_svs: opp2 {
+ opp-level = <RPMH_REGULATOR_LEVEL_MIN_SVS>;
+ };
+
+ rpmhpd_opp_low_svs: opp3 {
+ opp-level = <RPMH_REGULATOR_LEVEL_LOW_SVS>;
+ };
+
+ rpmhpd_opp_svs: opp4 {
+ opp-level = <RPMH_REGULATOR_LEVEL_SVS>;
+ };
+
+ rpmhpd_opp_svs_l1: opp5 {
+ opp-level = <RPMH_REGULATOR_LEVEL_SVS_L1>;
+ };
+
+ rpmhpd_opp_nom: opp6 {
+ opp-level = <RPMH_REGULATOR_LEVEL_NOM>;
+ };
+
+ rpmhpd_opp_nom_l1: opp7 {
+ opp-level = <RPMH_REGULATOR_LEVEL_NOM_L1>;
+ };
+
+ rpmhpd_opp_nom_l2: opp8 {
+ opp-level = <RPMH_REGULATOR_LEVEL_NOM_L2>;
+ };
+
+ rpmhpd_opp_turbo: opp9 {
+ opp-level = <RPMH_REGULATOR_LEVEL_TURBO>;
+ };
+
+ rpmhpd_opp_turbo_l1: opp10 {
+ opp-level = <RPMH_REGULATOR_LEVEL_TURBO_L1>;
+ };
+ };
+ };
+ };
+ };
+
+ timer {
+ compatible = "arm,armv7-timer";
+ interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 12 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
+ clock-frequency = <19200000>;
+ };
+};
diff --git a/arch/arm/boot/dts/rk3036.dtsi b/arch/arm/boot/dts/rk3036.dtsi
index 093567022386..47a787a12e55 100644
--- a/arch/arm/boot/dts/rk3036.dtsi
+++ b/arch/arm/boot/dts/rk3036.dtsi
@@ -54,25 +54,6 @@
};
};
- amba: bus {
- compatible = "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
-
- pdma: pdma@20078000 {
- compatible = "arm,pl330", "arm,primecell";
- reg = <0x20078000 0x4000>;
- interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>;
- #dma-cells = <1>;
- arm,pl330-broken-no-flushp;
- arm,pl330-periph-burst;
- clocks = <&cru ACLK_DMAC2>;
- clock-names = "apb_pclk";
- };
- };
-
arm-pmu {
compatible = "arm,cortex-a7-pmu";
interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>,
@@ -292,6 +273,21 @@
status = "disabled";
};
+ nfc: nand-controller@10500000 {
+ compatible = "rockchip,rk3036-nfc",
+ "rockchip,rk2928-nfc";
+ reg = <0x10500000 0x4000>;
+ interrupts = <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru HCLK_NANDC>, <&cru SCLK_NANDC>;
+ clock-names = "ahb", "nfc";
+ assigned-clocks = <&cru SCLK_NANDC>;
+ assigned-clock-rates = <150000000>;
+ pinctrl-0 = <&flash_ale &flash_bus8 &flash_cle &flash_csn0
+ &flash_rdn &flash_rdy &flash_wrn>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+
cru: clock-controller@20000000 {
compatible = "rockchip,rk3036-cru";
reg = <0x20000000 0x1000>;
@@ -494,6 +490,18 @@
status = "disabled";
};
+ pdma: pdma@20078000 {
+ compatible = "arm,pl330", "arm,primecell";
+ reg = <0x20078000 0x4000>;
+ interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>;
+ #dma-cells = <1>;
+ arm,pl330-broken-no-flushp;
+ arm,pl330-periph-burst;
+ clocks = <&cru ACLK_DMAC2>;
+ clock-names = "apb_pclk";
+ };
+
pinctrl: pinctrl {
compatible = "rockchip,rk3036-pinctrl";
rockchip,grf = <&grf>;
@@ -643,6 +651,43 @@
};
};
+ nfc {
+ flash_ale: flash-ale {
+ rockchip,pins = <2 RK_PA0 1 &pcfg_pull_default>;
+ };
+
+ flash_bus8: flash-bus8 {
+ rockchip,pins = <1 RK_PD0 1 &pcfg_pull_default>,
+ <1 RK_PD1 1 &pcfg_pull_default>,
+ <1 RK_PD2 1 &pcfg_pull_default>,
+ <1 RK_PD3 1 &pcfg_pull_default>,
+ <1 RK_PD4 1 &pcfg_pull_default>,
+ <1 RK_PD5 1 &pcfg_pull_default>,
+ <1 RK_PD6 1 &pcfg_pull_default>,
+ <1 RK_PD7 1 &pcfg_pull_default>;
+ };
+
+ flash_cle: flash-cle {
+ rockchip,pins = <2 RK_PA1 1 &pcfg_pull_default>;
+ };
+
+ flash_csn0: flash-csn0 {
+ rockchip,pins = <2 RK_PA6 1 &pcfg_pull_default>;
+ };
+
+ flash_rdn: flash-rdn {
+ rockchip,pins = <2 RK_PA3 1 &pcfg_pull_default>;
+ };
+
+ flash_rdy: flash-rdy {
+ rockchip,pins = <2 RK_PA4 1 &pcfg_pull_default>;
+ };
+
+ flash_wrn: flash-wrn {
+ rockchip,pins = <2 RK_PA2 1 &pcfg_pull_default>;
+ };
+ };
+
emac {
emac_xfer: emac-xfer {
rockchip,pins = <2 RK_PB2 1 &pcfg_pull_default>, /* crs_dvalid */
diff --git a/arch/arm/boot/dts/rk322x.dtsi b/arch/arm/boot/dts/rk322x.dtsi
index 48e6e8d44a1a..a4dd50aaf3fc 100644
--- a/arch/arm/boot/dts/rk322x.dtsi
+++ b/arch/arm/boot/dts/rk322x.dtsi
@@ -14,6 +14,9 @@
interrupt-parent = <&gic>;
aliases {
+ mmc0 = &sdmmc;
+ mmc1 = &sdio;
+ mmc2 = &emmc;
serial0 = &uart0;
serial1 = &uart1;
serial2 = &uart2;
@@ -95,24 +98,6 @@
};
};
- amba: bus {
- compatible = "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
-
- pdma: pdma@110f0000 {
- compatible = "arm,pl330", "arm,primecell";
- reg = <0x110f0000 0x4000>;
- interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>;
- #dma-cells = <1>;
- arm,pl330-periph-burst;
- clocks = <&cru ACLK_DMAC>;
- clock-names = "apb_pclk";
- };
- };
-
arm-pmu {
compatible = "arm,cortex-a7-pmu";
interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>,
@@ -464,6 +449,17 @@
<75000000>;
};
+ pdma: pdma@110f0000 {
+ compatible = "arm,pl330", "arm,primecell";
+ reg = <0x110f0000 0x4000>;
+ interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>;
+ #dma-cells = <1>;
+ arm,pl330-periph-burst;
+ clocks = <&cru ACLK_DMAC>;
+ clock-names = "apb_pclk";
+ };
+
thermal-zones {
cpu_thermal: cpu-thermal {
polling-delay-passive = <100>; /* milliseconds */
diff --git a/arch/arm/boot/dts/rk3288-miqi.dts b/arch/arm/boot/dts/rk3288-miqi.dts
index cf54d5ffff2f..713f55e143c6 100644
--- a/arch/arm/boot/dts/rk3288-miqi.dts
+++ b/arch/arm/boot/dts/rk3288-miqi.dts
@@ -123,6 +123,11 @@
status = "okay";
};
+&gpu {
+ mali-supply = <&vdd_gpu>;
+ status = "okay";
+};
+
&hdmi {
ddc-i2c-bus = <&i2c5>;
status = "okay";
diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi
index 68d5a58cfe88..ea7416c31f9b 100644
--- a/arch/arm/boot/dts/rk3288.dtsi
+++ b/arch/arm/boot/dts/rk3288.dtsi
@@ -154,50 +154,6 @@
};
};
- amba: bus {
- compatible = "simple-bus";
- #address-cells = <2>;
- #size-cells = <2>;
- ranges;
-
- dmac_peri: dma-controller@ff250000 {
- compatible = "arm,pl330", "arm,primecell";
- reg = <0x0 0xff250000 0x0 0x4000>;
- interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
- #dma-cells = <1>;
- arm,pl330-broken-no-flushp;
- arm,pl330-periph-burst;
- clocks = <&cru ACLK_DMAC2>;
- clock-names = "apb_pclk";
- };
-
- dmac_bus_ns: dma-controller@ff600000 {
- compatible = "arm,pl330", "arm,primecell";
- reg = <0x0 0xff600000 0x0 0x4000>;
- interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>;
- #dma-cells = <1>;
- arm,pl330-broken-no-flushp;
- arm,pl330-periph-burst;
- clocks = <&cru ACLK_DMAC1>;
- clock-names = "apb_pclk";
- status = "disabled";
- };
-
- dmac_bus_s: dma-controller@ffb20000 {
- compatible = "arm,pl330", "arm,primecell";
- reg = <0x0 0xffb20000 0x0 0x4000>;
- interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>;
- #dma-cells = <1>;
- arm,pl330-broken-no-flushp;
- arm,pl330-periph-burst;
- clocks = <&cru ACLK_DMAC1>;
- clock-names = "apb_pclk";
- };
- };
-
reserved-memory {
#address-cells = <2>;
#size-cells = <2>;
@@ -487,15 +443,27 @@
status = "disabled";
};
+ dmac_peri: dma-controller@ff250000 {
+ compatible = "arm,pl330", "arm,primecell";
+ reg = <0x0 0xff250000 0x0 0x4000>;
+ interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
+ #dma-cells = <1>;
+ arm,pl330-broken-no-flushp;
+ arm,pl330-periph-burst;
+ clocks = <&cru ACLK_DMAC2>;
+ clock-names = "apb_pclk";
+ };
+
thermal-zones {
- reserve_thermal: reserve_thermal {
+ reserve_thermal: reserve-thermal {
polling-delay-passive = <1000>; /* milliseconds */
polling-delay = <5000>; /* milliseconds */
thermal-sensors = <&tsadc 0>;
};
- cpu_thermal: cpu_thermal {
+ cpu_thermal: cpu-thermal {
polling-delay-passive = <100>; /* milliseconds */
polling-delay = <5000>; /* milliseconds */
@@ -539,7 +507,7 @@
};
};
- gpu_thermal: gpu_thermal {
+ gpu_thermal: gpu-thermal {
polling-delay-passive = <100>; /* milliseconds */
polling-delay = <5000>; /* milliseconds */
@@ -665,6 +633,19 @@
status = "disabled";
};
+ dmac_bus_ns: dma-controller@ff600000 {
+ compatible = "arm,pl330", "arm,primecell";
+ reg = <0x0 0xff600000 0x0 0x4000>;
+ interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>;
+ #dma-cells = <1>;
+ arm,pl330-broken-no-flushp;
+ arm,pl330-periph-burst;
+ clocks = <&cru ACLK_DMAC1>;
+ clock-names = "apb_pclk";
+ status = "disabled";
+ };
+
i2c0: i2c@ff650000 {
compatible = "rockchip,rk3288-i2c";
reg = <0x0 0xff650000 0x0 0x1000>;
@@ -1329,75 +1310,87 @@
};
qos_gpu_r: qos@ffaa0000 {
- compatible = "syscon";
+ compatible = "rockchip,rk3288-qos", "syscon";
reg = <0x0 0xffaa0000 0x0 0x20>;
};
qos_gpu_w: qos@ffaa0080 {
- compatible = "syscon";
+ compatible = "rockchip,rk3288-qos", "syscon";
reg = <0x0 0xffaa0080 0x0 0x20>;
};
qos_vio1_vop: qos@ffad0000 {
- compatible = "syscon";
+ compatible = "rockchip,rk3288-qos", "syscon";
reg = <0x0 0xffad0000 0x0 0x20>;
};
qos_vio1_isp_w0: qos@ffad0100 {
- compatible = "syscon";
+ compatible = "rockchip,rk3288-qos", "syscon";
reg = <0x0 0xffad0100 0x0 0x20>;
};
qos_vio1_isp_w1: qos@ffad0180 {
- compatible = "syscon";
+ compatible = "rockchip,rk3288-qos", "syscon";
reg = <0x0 0xffad0180 0x0 0x20>;
};
qos_vio0_vop: qos@ffad0400 {
- compatible = "syscon";
+ compatible = "rockchip,rk3288-qos", "syscon";
reg = <0x0 0xffad0400 0x0 0x20>;
};
qos_vio0_vip: qos@ffad0480 {
- compatible = "syscon";
+ compatible = "rockchip,rk3288-qos", "syscon";
reg = <0x0 0xffad0480 0x0 0x20>;
};
qos_vio0_iep: qos@ffad0500 {
- compatible = "syscon";
+ compatible = "rockchip,rk3288-qos", "syscon";
reg = <0x0 0xffad0500 0x0 0x20>;
};
qos_vio2_rga_r: qos@ffad0800 {
- compatible = "syscon";
+ compatible = "rockchip,rk3288-qos", "syscon";
reg = <0x0 0xffad0800 0x0 0x20>;
};
qos_vio2_rga_w: qos@ffad0880 {
- compatible = "syscon";
+ compatible = "rockchip,rk3288-qos", "syscon";
reg = <0x0 0xffad0880 0x0 0x20>;
};
qos_vio1_isp_r: qos@ffad0900 {
- compatible = "syscon";
+ compatible = "rockchip,rk3288-qos", "syscon";
reg = <0x0 0xffad0900 0x0 0x20>;
};
qos_video: qos@ffae0000 {
- compatible = "syscon";
+ compatible = "rockchip,rk3288-qos", "syscon";
reg = <0x0 0xffae0000 0x0 0x20>;
};
qos_hevc_r: qos@ffaf0000 {
- compatible = "syscon";
+ compatible = "rockchip,rk3288-qos", "syscon";
reg = <0x0 0xffaf0000 0x0 0x20>;
};
qos_hevc_w: qos@ffaf0080 {
- compatible = "syscon";
+ compatible = "rockchip,rk3288-qos", "syscon";
reg = <0x0 0xffaf0080 0x0 0x20>;
};
+ dmac_bus_s: dma-controller@ffb20000 {
+ compatible = "arm,pl330", "arm,primecell";
+ reg = <0x0 0xffb20000 0x0 0x4000>;
+ interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>;
+ #dma-cells = <1>;
+ arm,pl330-broken-no-flushp;
+ arm,pl330-periph-burst;
+ clocks = <&cru ACLK_DMAC1>;
+ clock-names = "apb_pclk";
+ };
+
efuse: efuse@ffb40000 {
compatible = "rockchip,rk3288-efuse";
reg = <0x0 0xffb40000 0x0 0x20>;
diff --git a/arch/arm/boot/dts/rk3xxx.dtsi b/arch/arm/boot/dts/rk3xxx.dtsi
index 859a7477909f..755c946f11de 100644
--- a/arch/arm/boot/dts/rk3xxx.dtsi
+++ b/arch/arm/boot/dts/rk3xxx.dtsi
@@ -32,50 +32,6 @@
spi1 = &spi1;
};
- amba: bus {
- compatible = "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
-
- dmac1_s: dma-controller@20018000 {
- compatible = "arm,pl330", "arm,primecell";
- reg = <0x20018000 0x4000>;
- interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>;
- #dma-cells = <1>;
- arm,pl330-broken-no-flushp;
- arm,pl330-periph-burst;
- clocks = <&cru ACLK_DMA1>;
- clock-names = "apb_pclk";
- };
-
- dmac1_ns: dma-controller@2001c000 {
- compatible = "arm,pl330", "arm,primecell";
- reg = <0x2001c000 0x4000>;
- interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>;
- #dma-cells = <1>;
- arm,pl330-broken-no-flushp;
- arm,pl330-periph-burst;
- clocks = <&cru ACLK_DMA1>;
- clock-names = "apb_pclk";
- status = "disabled";
- };
-
- dmac2: dma-controller@20078000 {
- compatible = "arm,pl330", "arm,primecell";
- reg = <0x20078000 0x4000>;
- interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
- #dma-cells = <1>;
- arm,pl330-broken-no-flushp;
- arm,pl330-periph-burst;
- clocks = <&cru ACLK_DMA2>;
- clock-names = "apb_pclk";
- };
- };
-
xin24m: oscillator {
compatible = "fixed-clock";
clock-frequency = <24000000>;
@@ -151,42 +107,42 @@
};
qos_gpu: qos@1012d000 {
- compatible = "syscon";
+ compatible = "rockchip,rk3066-qos", "syscon";
reg = <0x1012d000 0x20>;
};
qos_vpu: qos@1012e000 {
- compatible = "syscon";
+ compatible = "rockchip,rk3066-qos", "syscon";
reg = <0x1012e000 0x20>;
};
qos_lcdc0: qos@1012f000 {
- compatible = "syscon";
+ compatible = "rockchip,rk3066-qos", "syscon";
reg = <0x1012f000 0x20>;
};
qos_cif0: qos@1012f080 {
- compatible = "syscon";
+ compatible = "rockchip,rk3066-qos", "syscon";
reg = <0x1012f080 0x20>;
};
qos_ipp: qos@1012f100 {
- compatible = "syscon";
+ compatible = "rockchip,rk3066-qos", "syscon";
reg = <0x1012f100 0x20>;
};
qos_lcdc1: qos@1012f180 {
- compatible = "syscon";
+ compatible = "rockchip,rk3066-qos", "syscon";
reg = <0x1012f180 0x20>;
};
qos_cif1: qos@1012f200 {
- compatible = "syscon";
+ compatible = "rockchip,rk3066-qos", "syscon";
reg = <0x1012f200 0x20>;
};
qos_rga: qos@1012f280 {
- compatible = "syscon";
+ compatible = "rockchip,rk3066-qos", "syscon";
reg = <0x1012f280 0x20>;
};
@@ -276,6 +232,15 @@
status = "disabled";
};
+ nfc: nand-controller@10500000 {
+ compatible = "rockchip,rk2928-nfc";
+ reg = <0x10500000 0x4000>;
+ interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru HCLK_NANDC0>;
+ clock-names = "ahb";
+ status = "disabled";
+ };
+
pmu: pmu@20004000 {
compatible = "rockchip,rk3066-pmu", "syscon", "simple-mfd";
reg = <0x20004000 0x100>;
@@ -295,6 +260,31 @@
reg = <0x20008000 0x200>;
};
+ dmac1_s: dma-controller@20018000 {
+ compatible = "arm,pl330", "arm,primecell";
+ reg = <0x20018000 0x4000>;
+ interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>;
+ #dma-cells = <1>;
+ arm,pl330-broken-no-flushp;
+ arm,pl330-periph-burst;
+ clocks = <&cru ACLK_DMA1>;
+ clock-names = "apb_pclk";
+ };
+
+ dmac1_ns: dma-controller@2001c000 {
+ compatible = "arm,pl330", "arm,primecell";
+ reg = <0x2001c000 0x4000>;
+ interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>;
+ #dma-cells = <1>;
+ arm,pl330-broken-no-flushp;
+ arm,pl330-periph-burst;
+ clocks = <&cru ACLK_DMA1>;
+ clock-names = "apb_pclk";
+ status = "disabled";
+ };
+
i2c0: i2c@2002d000 {
compatible = "rockchip,rk3066-i2c";
reg = <0x2002d000 0x1000>;
@@ -469,4 +459,16 @@
dma-names = "tx", "rx";
status = "disabled";
};
+
+ dmac2: dma-controller@20078000 {
+ compatible = "arm,pl330", "arm,primecell";
+ reg = <0x20078000 0x4000>;
+ interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
+ #dma-cells = <1>;
+ arm,pl330-broken-no-flushp;
+ arm,pl330-periph-burst;
+ clocks = <&cru ACLK_DMA2>;
+ clock-names = "apb_pclk";
+ };
};
diff --git a/arch/arm/boot/dts/rv1108.dtsi b/arch/arm/boot/dts/rv1108.dtsi
index e491964b1c3d..7319a2473b80 100644
--- a/arch/arm/boot/dts/rv1108.dtsi
+++ b/arch/arm/boot/dts/rv1108.dtsi
@@ -19,6 +19,9 @@
i2c1 = &i2c1;
i2c2 = &i2c2;
i2c3 = &i2c3;
+ mmc0 = &emmc;
+ mmc1 = &sdio;
+ mmc2 = &sdmmc;
serial0 = &uart0;
serial1 = &uart1;
serial2 = &uart2;
@@ -452,6 +455,17 @@
#reset-cells = <1>;
};
+ nfc: nand-controller@30100000 {
+ compatible = "rockchip,rv1108-nfc";
+ reg = <0x30100000 0x1000>;
+ interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru HCLK_NANDC>, <&cru SCLK_NANDC>;
+ clock-names = "ahb", "nfc";
+ assigned-clocks = <&cru SCLK_NANDC>;
+ assigned-clock-rates = <150000000>;
+ status = "disabled";
+ };
+
emmc: mmc@30110000 {
compatible = "rockchip,rv1108-dw-mshc", "rockchip,rk3288-dw-mshc";
reg = <0x30110000 0x4000>;
diff --git a/arch/arm/boot/dts/socfpga_arria10.dtsi b/arch/arm/boot/dts/socfpga_arria10.dtsi
index 0013ec3463c4..a574ea91d9d3 100644
--- a/arch/arm/boot/dts/socfpga_arria10.dtsi
+++ b/arch/arm/boot/dts/socfpga_arria10.dtsi
@@ -15,13 +15,13 @@
#size-cells = <0>;
enable-method = "altr,socfpga-a10-smp";
- cpu@0 {
+ cpu0: cpu@0 {
compatible = "arm,cortex-a9";
device_type = "cpu";
reg = <0>;
next-level-cache = <&L2>;
};
- cpu@1 {
+ cpu1: cpu@1 {
compatible = "arm,cortex-a9";
device_type = "cpu";
reg = <1>;
@@ -29,6 +29,15 @@
};
};
+ pmu: pmu@ff111000 {
+ compatible = "arm,cortex-a9-pmu";
+ interrupt-parent = <&intc>;
+ interrupts = <0 124 4>, <0 125 4>;
+ interrupt-affinity = <&cpu0>, <&cpu1>;
+ reg = <0xff111000 0x1000>,
+ <0xff113000 0x1000>;
+ };
+
intc: intc@ffffd000 {
compatible = "arm,cortex-a9-gic";
#interrupt-cells = <3>;
diff --git a/arch/arm/boot/dts/ste-ab8500.dtsi b/arch/arm/boot/dts/ste-ab8500.dtsi
index 4c16736ea789..4fd09997a2b9 100644
--- a/arch/arm/boot/dts/ste-ab8500.dtsi
+++ b/arch/arm/boot/dts/ste-ab8500.dtsi
@@ -122,9 +122,11 @@
ab8500_temp {
compatible = "stericsson,abx500-temp";
+ interrupts = <3 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "ABX500_TEMP_WARM";
io-channels = <&gpadc 0x06>,
<&gpadc 0x07>;
- io-channel-name = "aux1", "aux2";
+ io-channel-names = "aux1", "aux2";
};
ab8500_battery: ab8500_battery {
@@ -134,29 +136,77 @@
ab8500_fg {
compatible = "stericsson,ab8500-fg";
- battery = <&ab8500_battery>;
+ interrupts = <24 IRQ_TYPE_LEVEL_HIGH>,
+ <8 IRQ_TYPE_LEVEL_HIGH>,
+ <28 IRQ_TYPE_LEVEL_HIGH>,
+ <27 IRQ_TYPE_LEVEL_HIGH>,
+ <26 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "NCONV_ACCU",
+ "BATT_OVV",
+ "LOW_BAT_F",
+ "CC_INT_CALIB",
+ "CCEOC";
+ battery = <&ab8500_battery>;
io-channels = <&gpadc 0x08>;
- io-channel-name = "main_bat_v";
+ io-channel-names = "main_bat_v";
};
ab8500_btemp {
compatible = "stericsson,ab8500-btemp";
- battery = <&ab8500_battery>;
+ interrupts = <20 IRQ_TYPE_LEVEL_HIGH>,
+ <80 IRQ_TYPE_LEVEL_HIGH>,
+ <83 IRQ_TYPE_LEVEL_HIGH>,
+ <81 IRQ_TYPE_LEVEL_HIGH>,
+ <82 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "BAT_CTRL_INDB",
+ "BTEMP_LOW",
+ "BTEMP_HIGH",
+ "BTEMP_LOW_MEDIUM",
+ "BTEMP_MEDIUM_HIGH";
+ battery = <&ab8500_battery>;
io-channels = <&gpadc 0x02>,
<&gpadc 0x01>;
- io-channel-name = "btemp_ball",
+ io-channel-names = "btemp_ball",
"bat_ctrl";
};
ab8500_charger {
- compatible = "stericsson,ab8500-charger";
+ compatible = "stericsson,ab8500-charger";
+ interrupts = <10 IRQ_TYPE_LEVEL_HIGH>,
+ <11 IRQ_TYPE_LEVEL_HIGH>,
+ <0 IRQ_TYPE_LEVEL_HIGH>,
+ <107 IRQ_TYPE_LEVEL_HIGH>,
+ <106 IRQ_TYPE_LEVEL_HIGH>,
+ <14 IRQ_TYPE_LEVEL_HIGH>,
+ <15 IRQ_TYPE_LEVEL_HIGH>,
+ <79 IRQ_TYPE_LEVEL_HIGH>,
+ <105 IRQ_TYPE_LEVEL_HIGH>,
+ <104 IRQ_TYPE_LEVEL_HIGH>,
+ <89 IRQ_TYPE_LEVEL_HIGH>,
+ <22 IRQ_TYPE_LEVEL_HIGH>,
+ <21 IRQ_TYPE_LEVEL_HIGH>,
+ <16 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "MAIN_CH_UNPLUG_DET",
+ "MAIN_CHARGE_PLUG_DET",
+ "MAIN_EXT_CH_NOT_OK",
+ "MAIN_CH_TH_PROT_R",
+ "MAIN_CH_TH_PROT_F",
+ "VBUS_DET_F",
+ "VBUS_DET_R",
+ "USB_LINK_STATUS",
+ "USB_CH_TH_PROT_R",
+ "USB_CH_TH_PROT_F",
+ "USB_CHARGER_NOT_OKR",
+ "VBUS_OVV",
+ "CH_WD_EXP",
+ "VBUS_CH_DROP_END";
battery = <&ab8500_battery>;
vddadc-supply = <&ab8500_ldo_tvout_reg>;
io-channels = <&gpadc 0x03>,
<&gpadc 0x0a>,
<&gpadc 0x09>,
<&gpadc 0x0b>;
- io-channel-name = "main_charger_v",
+ io-channel-names = "main_charger_v",
"main_charger_c",
"vbus_v",
"usb_charger_c";
diff --git a/arch/arm/boot/dts/ste-ab8505.dtsi b/arch/arm/boot/dts/ste-ab8505.dtsi
index c72aa250bf6f..cc045b2fc217 100644
--- a/arch/arm/boot/dts/ste-ab8505.dtsi
+++ b/arch/arm/boot/dts/ste-ab8505.dtsi
@@ -13,7 +13,8 @@
<&gpadc 0x08>, /* Main battery voltage */
<&gpadc 0x09>, /* VBUS */
<&gpadc 0x0b>, /* Charger current */
- <&gpadc 0x0c>; /* Backup battery voltage */
+ <&gpadc 0x0c>, /* Backup battery voltage */
+ <&gpadc 0x0d>; /* Die temperature */
};
soc {
@@ -45,9 +46,8 @@
gpadc: ab8500-gpadc {
compatible = "stericsson,ab8500-gpadc";
- interrupts = <32 IRQ_TYPE_LEVEL_HIGH
- 39 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "HW_CONV_END", "SW_CONV_END";
+ interrupts = <39 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "SW_CONV_END";
vddadc-supply = <&ab8500_ldo_adc_reg>;
#address-cells = <1>;
#size-cells = <0>;
@@ -84,42 +84,93 @@
bk_bat_v: channel@0c {
reg = <0x0c>;
};
+ die_temp: channel@0d {
+ reg = <0x0d>;
+ };
usb_id: channel@0e {
reg = <0x0e>;
};
};
ab8500_battery: ab8500_battery {
- status = "disabled";
+ stericsson,battery-type = "LIPO";
thermistor-on-batctrl;
};
ab8500_fg {
status = "disabled";
compatible = "stericsson,ab8500-fg";
+ interrupts = <24 IRQ_TYPE_LEVEL_HIGH>,
+ <8 IRQ_TYPE_LEVEL_HIGH>,
+ <28 IRQ_TYPE_LEVEL_HIGH>,
+ <27 IRQ_TYPE_LEVEL_HIGH>,
+ <26 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "NCONV_ACCU",
+ "BATT_OVV",
+ "LOW_BAT_F",
+ "CC_INT_CALIB",
+ "CCEOC";
battery = <&ab8500_battery>;
io-channels = <&gpadc 0x08>;
- io-channel-name = "main_bat_v";
+ io-channel-names = "main_bat_v";
};
ab8500_btemp {
status = "disabled";
compatible = "stericsson,ab8500-btemp";
+ interrupts = <20 IRQ_TYPE_LEVEL_HIGH>,
+ <80 IRQ_TYPE_LEVEL_HIGH>,
+ <83 IRQ_TYPE_LEVEL_HIGH>,
+ <81 IRQ_TYPE_LEVEL_HIGH>,
+ <82 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "BAT_CTRL_INDB",
+ "BTEMP_LOW",
+ "BTEMP_HIGH",
+ "BTEMP_LOW_MEDIUM",
+ "BTEMP_MEDIUM_HIGH";
battery = <&ab8500_battery>;
io-channels = <&gpadc 0x02>,
<&gpadc 0x01>;
- io-channel-name = "btemp_ball",
+ io-channel-names = "btemp_ball",
"bat_ctrl";
};
ab8500_charger {
status = "disabled";
compatible = "stericsson,ab8500-charger";
+ interrupts = <10 IRQ_TYPE_LEVEL_HIGH>,
+ <11 IRQ_TYPE_LEVEL_HIGH>,
+ <0 IRQ_TYPE_LEVEL_HIGH>,
+ <107 IRQ_TYPE_LEVEL_HIGH>,
+ <106 IRQ_TYPE_LEVEL_HIGH>,
+ <14 IRQ_TYPE_LEVEL_HIGH>,
+ <15 IRQ_TYPE_LEVEL_HIGH>,
+ <79 IRQ_TYPE_LEVEL_HIGH>,
+ <105 IRQ_TYPE_LEVEL_HIGH>,
+ <104 IRQ_TYPE_LEVEL_HIGH>,
+ <89 IRQ_TYPE_LEVEL_HIGH>,
+ <22 IRQ_TYPE_LEVEL_HIGH>,
+ <21 IRQ_TYPE_LEVEL_HIGH>,
+ <16 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "MAIN_CH_UNPLUG_DET",
+ "MAIN_CHARGE_PLUG_DET",
+ "MAIN_EXT_CH_NOT_OK",
+ "MAIN_CH_TH_PROT_R",
+ "MAIN_CH_TH_PROT_F",
+ "VBUS_DET_F",
+ "VBUS_DET_R",
+ "USB_LINK_STATUS",
+ "USB_CH_TH_PROT_R",
+ "USB_CH_TH_PROT_F",
+ "USB_CHARGER_NOT_OKR",
+ "VBUS_OVV",
+ "CH_WD_EXP",
+ "VBUS_CH_DROP_END";
battery = <&ab8500_battery>;
vddadc-supply = <&ab8500_ldo_adc_reg>;
io-channels = <&gpadc 0x09>,
<&gpadc 0x0b>;
- io-channel-name = "vbus_v",
+ io-channel-names = "vbus_v",
"usb_charger_c";
};
diff --git a/arch/arm/boot/dts/ste-db8500.dtsi b/arch/arm/boot/dts/ste-db8500.dtsi
index d309fad32229..344d29853bf7 100644
--- a/arch/arm/boot/dts/ste-db8500.dtsi
+++ b/arch/arm/boot/dts/ste-db8500.dtsi
@@ -12,4 +12,42 @@
200000 0>;
};
};
+
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ /* Modem trace memory */
+ ram@06000000 {
+ reg = <0x06000000 0x00f00000>;
+ no-map;
+ };
+
+ /* Modem shared memory */
+ ram@06f00000 {
+ reg = <0x06f00000 0x00100000>;
+ no-map;
+ };
+
+ /* Modem private memory */
+ ram@07000000 {
+ reg = <0x07000000 0x01000000>;
+ no-map;
+ };
+
+ /*
+ * Initial Secure Software ISSW memory
+ *
+ * This is probably only used if the kernel tries
+ * to actually call into trustzone to run secure
+ * applications, which the mainline kernel probably
+ * will not do on this old chipset. But you can never
+ * be too careful, so reserve this memory anyway.
+ */
+ ram@17f00000 {
+ reg = <0x17f00000 0x00100000>;
+ no-map;
+ };
+ };
};
diff --git a/arch/arm/boot/dts/ste-db8520.dtsi b/arch/arm/boot/dts/ste-db8520.dtsi
index 48bd8728ae27..287804e9e183 100644
--- a/arch/arm/boot/dts/ste-db8520.dtsi
+++ b/arch/arm/boot/dts/ste-db8520.dtsi
@@ -12,4 +12,42 @@
200000 0>;
};
};
+
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ /* Modem trace memory */
+ ram@06000000 {
+ reg = <0x06000000 0x00f00000>;
+ no-map;
+ };
+
+ /* Modem shared memory */
+ ram@06f00000 {
+ reg = <0x06f00000 0x00100000>;
+ no-map;
+ };
+
+ /* Modem private memory */
+ ram@07000000 {
+ reg = <0x07000000 0x01000000>;
+ no-map;
+ };
+
+ /*
+ * Initial Secure Software ISSW memory
+ *
+ * This is probably only used if the kernel tries
+ * to actually call into trustzone to run secure
+ * applications, which the mainline kernel probably
+ * will not do on this old chipset. But you can never
+ * be too careful, so reserve this memory anyway.
+ */
+ ram@17f00000 {
+ reg = <0x17f00000 0x00100000>;
+ no-map;
+ };
+ };
};
diff --git a/arch/arm/boot/dts/ste-db9500.dtsi b/arch/arm/boot/dts/ste-db9500.dtsi
new file mode 100644
index 000000000000..0afff703191c
--- /dev/null
+++ b/arch/arm/boot/dts/ste-db9500.dtsi
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "ste-dbx5x0.dtsi"
+
+/ {
+ cpus {
+ cpu@300 {
+ /* cpufreq controls */
+ operating-points = <1152000 0
+ 800000 0
+ 400000 0
+ 200000 0>;
+ };
+ };
+
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ /*
+ * Initial Secure Software ISSW memory
+ *
+ * This is probably only used if the kernel tries
+ * to actually call into trustzone to run secure
+ * applications, which the mainline kernel probably
+ * will not do on this old chipset. But you can never
+ * be too careful, so reserve this memory anyway.
+ */
+ ram@17f00000 {
+ reg = <0x17f00000 0x00100000>;
+ no-map;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/ste-dbx5x0.dtsi b/arch/arm/boot/dts/ste-dbx5x0.dtsi
index 404b9c4a5fee..68607e4ad80c 100644
--- a/arch/arm/boot/dts/ste-dbx5x0.dtsi
+++ b/arch/arm/boot/dts/ste-dbx5x0.dtsi
@@ -883,7 +883,7 @@
status = "disabled";
};
- sdi0_per1@80126000 {
+ mmc@80126000 {
compatible = "arm,pl18x", "arm,primecell";
reg = <0x80126000 0x1000>;
interrupts = <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>;
@@ -899,7 +899,7 @@
status = "disabled";
};
- sdi1_per2@80118000 {
+ mmc@80118000 {
compatible = "arm,pl18x", "arm,primecell";
reg = <0x80118000 0x1000>;
interrupts = <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>;
@@ -915,7 +915,7 @@
status = "disabled";
};
- sdi2_per3@80005000 {
+ mmc@80005000 {
compatible = "arm,pl18x", "arm,primecell";
reg = <0x80005000 0x1000>;
interrupts = <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>;
@@ -931,7 +931,7 @@
status = "disabled";
};
- sdi3_per2@80119000 {
+ mmc@80119000 {
compatible = "arm,pl18x", "arm,primecell";
reg = <0x80119000 0x1000>;
interrupts = <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>;
@@ -947,7 +947,7 @@
status = "disabled";
};
- sdi4_per2@80114000 {
+ mmc@80114000 {
compatible = "arm,pl18x", "arm,primecell";
reg = <0x80114000 0x1000>;
interrupts = <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;
@@ -963,7 +963,7 @@
status = "disabled";
};
- sdi5_per3@80008000 {
+ mmc@80008000 {
compatible = "arm,pl18x", "arm,primecell";
reg = <0x80008000 0x1000>;
interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm/boot/dts/ste-href.dtsi b/arch/arm/boot/dts/ste-href.dtsi
index ff47cbf6ed3b..83b179692dff 100644
--- a/arch/arm/boot/dts/ste-href.dtsi
+++ b/arch/arm/boot/dts/ste-href.dtsi
@@ -113,23 +113,8 @@
status = "okay";
};
- /* ST6G3244ME level translator for 1.8/2.9 V */
- vmmci: regulator-gpio {
- compatible = "regulator-gpio";
-
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <2900000>;
- regulator-name = "mmci-reg";
- regulator-type = "voltage";
-
- startup-delay-us = <100>;
-
- states = <1800000 0x1
- 2900000 0x0>;
- };
-
// External Micro SD slot
- sdi0_per1@80126000 {
+ mmc@80126000 {
arm,primecell-periphid = <0x10480180>;
max-frequency = <100000000>;
bus-width = <4>;
@@ -152,7 +137,7 @@
};
// WLAN SDIO channel
- sdi1_per2@80118000 {
+ mmc@80118000 {
arm,primecell-periphid = <0x10480180>;
max-frequency = <100000000>;
bus-width = <4>;
@@ -165,7 +150,7 @@
};
// PoP:ed eMMC
- sdi2_per3@80005000 {
+ mmc@80005000 {
arm,primecell-periphid = <0x10480180>;
max-frequency = <100000000>;
bus-width = <8>;
@@ -180,7 +165,7 @@
};
// On-board eMMC
- sdi4_per2@80114000 {
+ mmc@80114000 {
arm,primecell-periphid = <0x10480180>;
max-frequency = <100000000>;
bus-width = <8>;
diff --git a/arch/arm/boot/dts/ste-href520-tvk.dts b/arch/arm/boot/dts/ste-href520-tvk.dts
index f8c0c1e6aa04..a036a03f6718 100644
--- a/arch/arm/boot/dts/ste-href520-tvk.dts
+++ b/arch/arm/boot/dts/ste-href520-tvk.dts
@@ -12,11 +12,43 @@
model = "ST-Ericsson HREF520 and TVK1281618 UIB";
compatible = "st-ericsson,href520", "st-ericsson,u8500";
- soc {
- vmmci: regulator-gpio {
- gpios = <&gpio0 5 GPIO_ACTIVE_HIGH>;
- enable-gpio = <&gpio2 14 GPIO_ACTIVE_HIGH>;
- enable-active-high;
+
+ /* ST6G3244ME level translator for 1.8/2.9 V */
+ vmmci: regulator-gpio {
+ compatible = "regulator-gpio";
+
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2900000>;
+ regulator-name = "mmci-reg";
+ regulator-type = "voltage";
+
+ startup-delay-us = <100>;
+
+ states = <1800000 0x1
+ 2900000 0x0>;
+
+ gpios = <&gpio0 5 GPIO_ACTIVE_HIGH>;
+ enable-gpio = <&gpio2 14 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&vmmci_default_mode>;
+ };
+};
+
+&pinctrl {
+ vmmci {
+ vmmci_default_mode: vmmc_default {
+ /* VMMCI level-shifter enable */
+ default_href520_cfg1 {
+ pins = "GPIO78_F4";
+ ste,config = <&gpio_out_hi>;
+ };
+ /* VMMCI level-shifter voltage select */
+ default_href520_cfg2 {
+ pins = "GPIO5_AG6";
+ ste,config = <&gpio_out_hi>;
+ };
};
};
};
diff --git a/arch/arm/boot/dts/ste-hrefprev60-stuib.dts b/arch/arm/boot/dts/ste-hrefprev60-stuib.dts
index 8ce6b723abf2..dfc933214c1a 100644
--- a/arch/arm/boot/dts/ste-hrefprev60-stuib.dts
+++ b/arch/arm/boot/dts/ste-hrefprev60-stuib.dts
@@ -12,6 +12,25 @@
model = "ST-Ericsson HREF (pre-v60) and ST UIB";
compatible = "st-ericsson,mop500", "st-ericsson,u8500";
+ /* ST6G3244ME level translator for 1.8/2.9 V */
+ vmmci: regulator-gpio {
+ compatible = "regulator-gpio";
+
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2900000>;
+ regulator-name = "mmci-reg";
+ regulator-type = "voltage";
+
+ startup-delay-us = <100>;
+
+ states = <1800000 0x1
+ 2900000 0x0>;
+
+ gpios = <&tc3589x_gpio 18 GPIO_ACTIVE_HIGH>;
+ enable-gpio = <&tc3589x_gpio 17 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
soc {
/* Reset line for the BU21013 touchscreen */
i2c@80110000 {
diff --git a/arch/arm/boot/dts/ste-hrefprev60-tvk.dts b/arch/arm/boot/dts/ste-hrefprev60-tvk.dts
index 142f5475521f..4e6e4439dcff 100644
--- a/arch/arm/boot/dts/ste-hrefprev60-tvk.dts
+++ b/arch/arm/boot/dts/ste-hrefprev60-tvk.dts
@@ -11,4 +11,23 @@
/ {
model = "ST-Ericsson HREF (pre-v60) and TVK1281618 UIB";
compatible = "st-ericsson,mop500", "st-ericsson,u8500";
+
+ /* ST6G3244ME level translator for 1.8/2.9 V */
+ vmmci: regulator-gpio {
+ compatible = "regulator-gpio";
+
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2900000>;
+ regulator-name = "mmci-reg";
+ regulator-type = "voltage";
+
+ startup-delay-us = <100>;
+
+ states = <1800000 0x1
+ 2900000 0x0>;
+
+ gpios = <&tc3589x_gpio 18 GPIO_ACTIVE_HIGH>;
+ enable-gpio = <&tc3589x_gpio 17 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
};
diff --git a/arch/arm/boot/dts/ste-hrefprev60.dtsi b/arch/arm/boot/dts/ste-hrefprev60.dtsi
index 115495de8612..29b67abfc461 100644
--- a/arch/arm/boot/dts/ste-hrefprev60.dtsi
+++ b/arch/arm/boot/dts/ste-hrefprev60.dtsi
@@ -61,16 +61,10 @@
};
// External Micro SD slot
- sdi0_per1@80126000 {
+ mmc@80126000 {
cd-gpios = <&tc3589x_gpio 3 GPIO_ACTIVE_HIGH>;
};
- vmmci: regulator-gpio {
- gpios = <&tc3589x_gpio 18 GPIO_ACTIVE_HIGH>;
- enable-gpio = <&tc3589x_gpio 17 GPIO_ACTIVE_HIGH>;
- enable-active-high;
- };
-
pinctrl {
/* Set this up using hogs */
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/ste-hrefv60plus-stuib.dts b/arch/arm/boot/dts/ste-hrefv60plus-stuib.dts
index 1316886e6bcb..52c56ed17ae6 100644
--- a/arch/arm/boot/dts/ste-hrefv60plus-stuib.dts
+++ b/arch/arm/boot/dts/ste-hrefv60plus-stuib.dts
@@ -14,6 +14,28 @@
model = "ST-Ericsson HREF (v60+) and ST UIB";
compatible = "st-ericsson,hrefv60+", "st-ericsson,u8500";
+ /* ST6G3244ME level translator for 1.8/2.9 V */
+ vmmci: regulator-gpio {
+ compatible = "regulator-gpio";
+
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2900000>;
+ regulator-name = "mmci-reg";
+ regulator-type = "voltage";
+
+ startup-delay-us = <100>;
+
+ states = <1800000 0x1
+ 2900000 0x0>;
+
+ gpios = <&gpio0 5 GPIO_ACTIVE_HIGH>;
+ enable-gpio = <&gpio5 9 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&vmmci_default_mode>;
+ };
+
soc {
/* Reset line for the BU21013 touchscreen */
i2c@80110000 {
@@ -33,3 +55,20 @@
};
};
};
+
+&pinctrl {
+ vmmci {
+ vmmci_default_mode: vmmc_default {
+ /* VMMCI level-shifter enable */
+ default_hrefv60_cfg2 {
+ pins = "GPIO169_D22";
+ ste,config = <&gpio_out_hi>;
+ };
+ /* VMMCI level-shifter voltage select */
+ default_hrefv60_cfg3 {
+ pins = "GPIO5_AG6";
+ ste,config = <&gpio_out_hi>;
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/ste-hrefv60plus-tvk.dts b/arch/arm/boot/dts/ste-hrefv60plus-tvk.dts
index 5d4b8245f02c..9c2d2ee6d6d8 100644
--- a/arch/arm/boot/dts/ste-hrefv60plus-tvk.dts
+++ b/arch/arm/boot/dts/ste-hrefv60plus-tvk.dts
@@ -13,4 +13,43 @@
/ {
model = "ST-Ericsson HREF (v60+) and TVK1281618 UIB";
compatible = "st-ericsson,hrefv60+", "st-ericsson,u8500";
+
+ /* ST6G3244ME level translator for 1.8/2.9 V */
+ vmmci: regulator-gpio {
+ compatible = "regulator-gpio";
+
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2900000>;
+ regulator-name = "mmci-reg";
+ regulator-type = "voltage";
+
+ startup-delay-us = <100>;
+
+ states = <1800000 0x1
+ 2900000 0x0>;
+
+ gpios = <&gpio0 5 GPIO_ACTIVE_HIGH>;
+ enable-gpio = <&gpio5 9 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&vmmci_default_mode>;
+ };
+};
+
+&pinctrl {
+ vmmci {
+ vmmci_default_mode: vmmc_default {
+ /* VMMCI level-shifter enable */
+ default_hrefv60_cfg2 {
+ pins = "GPIO169_D22";
+ ste,config = <&gpio_out_hi>;
+ };
+ /* VMMCI level-shifter voltage select */
+ default_hrefv60_cfg3 {
+ pins = "GPIO5_AG6";
+ ste,config = <&gpio_out_hi>;
+ };
+ };
+ };
};
diff --git a/arch/arm/boot/dts/ste-hrefv60plus.dtsi b/arch/arm/boot/dts/ste-hrefv60plus.dtsi
index 05b4fbbba57f..8f504edefd3f 100644
--- a/arch/arm/boot/dts/ste-hrefv60plus.dtsi
+++ b/arch/arm/boot/dts/ste-hrefv60plus.dtsi
@@ -10,6 +10,64 @@
model = "ST-Ericsson HREF (v60+) platform with Device Tree";
compatible = "st-ericsson,hrefv60+", "st-ericsson,u8500";
+ thermal-zones {
+ chassis-thermal {
+ /* Poll every 20 seconds */
+ polling-delay = <20000>;
+ /* Poll every 2nd second when cooling */
+ polling-delay-passive = <2000>;
+
+ thermal-sensors = <&therm1>, <&therm2>;
+
+ /* Tripping points made from rough guess about operating conditions */
+ trips {
+ chassis_alert: chassis-alert {
+ /* At 50 degrees take down the CPU frequency */
+ temperature = <50000>;
+ hysteresis = <3000>;
+ type = "active";
+ };
+ chassis_crit: chassis-crit {
+ /* Just shut down at 70 degrees */
+ temperature = <70000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+
+ /* Push down the operating frequency of the SoC when it gets hot */
+ cooling-maps {
+ map0 {
+ trip = <&chassis_alert>;
+ cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ contribution = <100>;
+ };
+ };
+ };
+ };
+
+ /*
+ * Thermistors on the board, formally to monitor battery temperatures
+ * but what they measure is the board temperature.
+ */
+ therm1: thermistor@0 {
+ compatible = "murata,ncp18wb473";
+ io-channels = <&gpadc 0x06>; /* AUX1 */
+ pullup-uv = <1800000>;
+ pullup-ohm = <220000>;
+ pulldown-ohm = <0>;
+ #thermal-sensor-cells = <0>;
+ };
+
+ therm2: thermistor@1 {
+ compatible = "murata,ncp18wb473";
+ io-channels = <&gpadc 0x07>; /* AUX2 */
+ pullup-uv = <1800000>;
+ pullup-ohm = <220000>;
+ pulldown-ohm = <0>;
+ #thermal-sensor-cells = <0>;
+ };
+
soc {
/* Name the GPIO muxed rails on the HREF boards */
gpio@8012e000 {
@@ -132,16 +190,10 @@
};
// External Micro SD slot
- sdi0_per1@80126000 {
+ mmc@80126000 {
cd-gpios = <&gpio2 31 GPIO_ACTIVE_HIGH>; // 95
};
- vmmci: regulator-gpio {
- gpios = <&gpio0 5 GPIO_ACTIVE_HIGH>;
- enable-gpio = <&gpio5 9 GPIO_ACTIVE_HIGH>;
- enable-active-high;
- };
-
pinctrl {
/*
* Set this up using hogs, as time goes by and as seems fit, these
@@ -166,16 +218,6 @@
pins = "GPIO95_E8";
ste,config = <&gpio_in_pu>;
};
- /* VMMCI level-shifter enable */
- default_hrefv60_cfg2 {
- pins = "GPIO169_D22";
- ste,config = <&gpio_out_hi>;
- };
- /* VMMCI level-shifter voltage select */
- default_hrefv60_cfg3 {
- pins = "GPIO5_AG6";
- ste,config = <&gpio_out_hi>;
- };
};
};
ipgpio {
diff --git a/arch/arm/boot/dts/ste-nomadik-nhk15.dts b/arch/arm/boot/dts/ste-nomadik-nhk15.dts
index 41ed21a4fdc1..8142c017882c 100644
--- a/arch/arm/boot/dts/ste-nomadik-nhk15.dts
+++ b/arch/arm/boot/dts/ste-nomadik-nhk15.dts
@@ -195,7 +195,7 @@
pinctrl-0 = <&uart0_nhk_mode>;
status = "okay";
};
- mmcsd: sdi@101f6000 {
+ mmcsd: mmc@101f6000 {
cd-gpios = <&stmpe_gpio44 7 GPIO_ACTIVE_LOW>;
wp-gpios = <&stmpe_gpio44 18 GPIO_ACTIVE_HIGH>;
};
diff --git a/arch/arm/boot/dts/ste-nomadik-s8815.dts b/arch/arm/boot/dts/ste-nomadik-s8815.dts
index 4445446fa828..f16314ffbf4b 100644
--- a/arch/arm/boot/dts/ste-nomadik-s8815.dts
+++ b/arch/arm/boot/dts/ste-nomadik-s8815.dts
@@ -139,7 +139,7 @@
status = "okay";
};
/* Configure card detect for the uSD slot */
- mmcsd: sdi@101f6000 {
+ mmc@101f6000 {
cd-gpios = <&gpio3 15 GPIO_ACTIVE_LOW>;
};
};
diff --git a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
index 4f38aeecadb3..c9b906432341 100644
--- a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
+++ b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
@@ -825,7 +825,7 @@
interrupts = <10>;
};
- mmcsd: sdi@101f6000 {
+ mmcsd: mmc@101f6000 {
compatible = "arm,pl18x", "arm,primecell";
reg = <0x101f6000 0x1000>;
clocks = <&sdiclk>, <&pclksdi>;
diff --git a/arch/arm/boot/dts/ste-snowball.dts b/arch/arm/boot/dts/ste-snowball.dts
index be90e73c923e..b344b3748143 100644
--- a/arch/arm/boot/dts/ste-snowball.dts
+++ b/arch/arm/boot/dts/ste-snowball.dts
@@ -4,7 +4,7 @@
*/
/dts-v1/;
-#include "ste-db8500.dtsi"
+#include "ste-db9500.dtsi"
#include "ste-href-ab8500.dtsi"
#include "ste-href-family-pinctrl.dtsi"
@@ -213,7 +213,7 @@
};
// External Micro SD slot
- sdi0_per1@80126000 {
+ mmc@80126000 {
arm,primecell-periphid = <0x10480180>;
max-frequency = <100000000>;
bus-width = <4>;
@@ -241,7 +241,7 @@
};
// WLAN SDIO channel
- sdi1_per2@80118000 {
+ mmc@80118000 {
arm,primecell-periphid = <0x10480180>;
max-frequency = <100000000>;
bus-width = <4>;
@@ -253,7 +253,7 @@
};
// Unused PoP eMMC - register and put it to sleep by default */
- sdi2_per3@80005000 {
+ mmc@80005000 {
arm,primecell-periphid = <0x10480180>;
pinctrl-names = "default";
pinctrl-0 = <&mc2_a_1_sleep>;
@@ -262,7 +262,7 @@
};
// On-board eMMC
- sdi4_per2@80114000 {
+ mmc@80114000 {
arm,primecell-periphid = <0x10480180>;
max-frequency = <100000000>;
bus-width = <8>;
diff --git a/arch/arm/boot/dts/ste-u300.dts b/arch/arm/boot/dts/ste-u300.dts
deleted file mode 100644
index f4e7660fead7..000000000000
--- a/arch/arm/boot/dts/ste-u300.dts
+++ /dev/null
@@ -1,464 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Device Tree for the ST-Ericsson U300 Machine and SoC
- */
-
-/dts-v1/;
-
-/ {
- model = "ST-Ericsson U300";
- compatible = "stericsson,u300";
- #address-cells = <1>;
- #size-cells = <1>;
-
- chosen {
- bootargs = "root=/dev/ram0 console=ttyAMA0,115200n8 earlyprintk";
- };
-
- aliases {
- serial0 = &uart0;
- serial1 = &uart1;
- };
-
- memory {
- device_type = "memory";
- reg = <0x48000000 0x03c00000>;
- };
-
- s365 {
- compatible = "stericsson,s365";
- vana15-supply = <&ab3100_ldo_d_reg>;
- syscon = <&syscon>;
- };
-
- syscon: syscon@c0011000 {
- compatible = "stericsson,u300-syscon", "syscon";
- reg = <0xc0011000 0x1000>;
- clk32: app_32_clk@32k {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <32768>;
- };
- pll13: pll13@13M {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <13000000>;
- };
- /* Slow bridge clocks under PLL13 */
- slow_clk: slow_clk@13M {
- #clock-cells = <0>;
- compatible = "stericsson,u300-syscon-clk";
- clock-type = <0>; /* Slow */
- clock-id = <0>;
- clocks = <&pll13>;
- };
- uart0_clk: uart0_clk@13M {
- #clock-cells = <0>;
- compatible = "stericsson,u300-syscon-clk";
- clock-type = <0>; /* Slow */
- clock-id = <1>;
- clocks = <&slow_clk>;
- };
- gpio_clk: gpio_clk@13M {
- #clock-cells = <0>;
- compatible = "stericsson,u300-syscon-clk";
- clock-type = <0>; /* Slow */
- clock-id = <4>;
- clocks = <&slow_clk>;
- };
- rtc_clk: rtc_clk@13M {
- #clock-cells = <0>;
- compatible = "stericsson,u300-syscon-clk";
- clock-type = <0>; /* Slow */
- clock-id = <6>;
- clocks = <&slow_clk>;
- };
- apptimer_clk: app_tmr_clk@13M {
- #clock-cells = <0>;
- compatible = "stericsson,u300-syscon-clk";
- clock-type = <0>; /* Slow */
- clock-id = <7>;
- clocks = <&slow_clk>;
- };
- acc_tmr_clk@13M {
- #clock-cells = <0>;
- compatible = "stericsson,u300-syscon-clk";
- clock-type = <0>; /* Slow */
- clock-id = <8>;
- clocks = <&slow_clk>;
- };
- pll208: pll208@208M {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <208000000>;
- };
- app208: app_208_clk@208M {
- #clock-cells = <0>;
- compatible = "fixed-factor-clock";
- clock-div = <1>;
- clock-mult = <1>;
- clocks = <&pll208>;
- };
- cpu_clk@208M {
- #clock-cells = <0>;
- compatible = "stericsson,u300-syscon-clk";
- clock-type = <2>; /* Rest */
- clock-id = <3>;
- clocks = <&app208>;
- };
- app104: app_104_clk@104M {
- #clock-cells = <0>;
- compatible = "fixed-factor-clock";
- clock-div = <2>;
- clock-mult = <1>;
- clocks = <&pll208>;
- };
- semi_clk@104M {
- #clock-cells = <0>;
- compatible = "stericsson,u300-syscon-clk";
- clock-type = <2>; /* Rest */
- clock-id = <9>;
- clocks = <&app104>;
- };
- app52: app_52_clk@52M {
- #clock-cells = <0>;
- compatible = "fixed-factor-clock";
- clock-div = <4>;
- clock-mult = <1>;
- clocks = <&pll208>;
- };
- /* AHB subsystem clocks */
- ahb_clk: ahb_subsys_clk@52M {
- #clock-cells = <0>;
- compatible = "stericsson,u300-syscon-clk";
- clock-type = <2>; /* Rest */
- clock-id = <10>;
- clocks = <&app52>;
- };
- intcon_clk@52M {
- #clock-cells = <0>;
- compatible = "stericsson,u300-syscon-clk";
- clock-type = <2>; /* Rest */
- clock-id = <12>;
- clocks = <&ahb_clk>;
- };
- emif_clk@52M {
- #clock-cells = <0>;
- compatible = "stericsson,u300-syscon-clk";
- clock-type = <2>; /* Rest */
- clock-id = <5>;
- clocks = <&ahb_clk>;
- };
- dmac_clk: dmac_clk@52M {
- #clock-cells = <0>;
- compatible = "stericsson,u300-syscon-clk";
- clock-type = <2>; /* Rest */
- clock-id = <4>;
- clocks = <&app52>;
- };
- fsmc_clk: fsmc_clk@52M {
- #clock-cells = <0>;
- compatible = "stericsson,u300-syscon-clk";
- clock-type = <2>; /* Rest */
- clock-id = <6>;
- clocks = <&app52>;
- };
- xgam_clk: xgam_clk@52M {
- #clock-cells = <0>;
- compatible = "stericsson,u300-syscon-clk";
- clock-type = <2>; /* Rest */
- clock-id = <8>;
- clocks = <&app52>;
- };
- app26: app_26_clk@26M {
- #clock-cells = <0>;
- compatible = "fixed-factor-clock";
- clock-div = <2>;
- clock-mult = <1>;
- clocks = <&app52>;
- };
- /* Fast bridge clocks */
- fast_clk: fast_clk@26M {
- #clock-cells = <0>;
- compatible = "stericsson,u300-syscon-clk";
- clock-type = <1>; /* Fast */
- clock-id = <0>;
- clocks = <&app26>;
- };
- i2c0_clk: i2c0_clk@26M {
- #clock-cells = <0>;
- compatible = "stericsson,u300-syscon-clk";
- clock-type = <1>; /* Fast */
- clock-id = <1>;
- clocks = <&fast_clk>;
- };
- i2c1_clk: i2c1_clk@26M {
- #clock-cells = <0>;
- compatible = "stericsson,u300-syscon-clk";
- clock-type = <1>; /* Fast */
- clock-id = <2>;
- clocks = <&fast_clk>;
- };
- mmc_pclk: mmc_p_clk@26M {
- #clock-cells = <0>;
- compatible = "stericsson,u300-syscon-clk";
- clock-type = <1>; /* Fast */
- clock-id = <5>;
- clocks = <&fast_clk>;
- };
- mmc_mclk: mmc_mclk {
- #clock-cells = <0>;
- compatible = "stericsson,u300-syscon-mclk";
- clocks = <&mmc_pclk>;
- };
- spi_clk: spi_p_clk@26M {
- #clock-cells = <0>;
- compatible = "stericsson,u300-syscon-clk";
- clock-type = <1>; /* Fast */
- clock-id = <6>;
- clocks = <&fast_clk>;
- };
- };
-
- timer: timer@c0014000 {
- compatible = "stericsson,u300-apptimer";
- reg = <0xc0014000 0x1000>;
- interrupt-parent = <&vica>;
- interrupts = <24 25 26 27>;
- clocks = <&apptimer_clk>;
- };
-
- gpio: gpio@c0016000 {
- compatible = "stericsson,gpio-coh901";
- reg = <0xc0016000 0x1000>;
- interrupt-parent = <&vicb>;
- interrupts = <0 1 2 18 21 22 23>;
- clocks = <&gpio_clk>;
- interrupt-names = "gpio0", "gpio1", "gpio2", "gpio3",
- "gpio4", "gpio5", "gpio6";
- interrupt-controller;
- #interrupt-cells = <2>;
- gpio-controller;
- #gpio-cells = <2>;
- };
-
- pinctrl: pinctrl@c0011000 {
- compatible = "stericsson,pinctrl-u300";
- reg = <0xc0011000 0x1000>;
- };
-
- watchdog: watchdog@c0012000 {
- compatible = "stericsson,coh901327";
- reg = <0xc0012000 0x1000>;
- interrupt-parent = <&vicb>;
- interrupts = <3>;
- clocks = <&clk32>;
- };
-
- rtc: rtc@c0017000 {
- compatible = "stericsson,coh901331";
- reg = <0xc0017000 0x1000>;
- interrupt-parent = <&vicb>;
- interrupts = <10>;
- clocks = <&rtc_clk>;
- };
-
- dmac: dma-controller@c00020000 {
- compatible = "stericsson,coh901318";
- reg = <0xc0020000 0x1000>;
- interrupt-parent = <&vica>;
- interrupts = <2>;
- #dma-cells = <1>;
- dma-channels = <40>;
- clocks = <&dmac_clk>;
- };
-
- /* A NAND flash of 128 MiB */
- fsmc: flash@40000000 {
- compatible = "stericsson,fsmc-nand";
- #address-cells = <1>;
- #size-cells = <1>;
- reg = <0x9f800000 0x1000>, /* FSMC Register*/
- <0x80000000 0x4000>, /* NAND Base DATA */
- <0x80020000 0x4000>, /* NAND Base ADDR */
- <0x80010000 0x4000>; /* NAND Base CMD */
- reg-names = "fsmc_regs", "nand_data", "nand_addr", "nand_cmd";
- nand-skip-bbtscan;
- clocks = <&fsmc_clk>;
-
- partition@0 {
- label = "boot records";
- reg = <0x0 0x20000>;
- };
- partition@20000 {
- label = "free";
- reg = <0x20000 0x7e0000>;
- };
- partition@800000 {
- label = "platform";
- reg = <0x800000 0xf800000>;
- };
- };
-
- i2c0: i2c@c0004000 {
- compatible = "st,ddci2c";
- reg = <0xc0004000 0x1000>;
- interrupt-parent = <&vicb>;
- interrupts = <8>;
- clocks = <&i2c0_clk>;
- #address-cells = <1>;
- #size-cells = <0>;
- ab3100: ab3100@48 {
- compatible = "stericsson,ab3100";
- reg = <0x48>;
- interrupt-parent = <&vica>;
- interrupts = <0>; /* EXT0 IRQ */
- ab3100-regulators {
- compatible = "stericsson,ab3100-regulators";
- ab3100_ldo_a_reg: ab3100_ldo_a {
- startup-delay-us = <200>;
- regulator-always-on;
- regulator-boot-on;
- };
- ab3100_ldo_c_reg: ab3100_ldo_c {
- startup-delay-us = <200>;
- };
- ab3100_ldo_d_reg: ab3100_ldo_d {
- startup-delay-us = <200>;
- };
- ab3100_ldo_e_reg: ab3100_ldo_e {
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- startup-delay-us = <200>;
- regulator-always-on;
- regulator-boot-on;
- };
- ab3100_ldo_f_reg: ab3100_ldo_f {
- regulator-min-microvolt = <2500000>;
- regulator-max-microvolt = <2500000>;
- startup-delay-us = <600>;
- regulator-always-on;
- regulator-boot-on;
- };
- ab3100_ldo_g_reg: ab3100_ldo_g {
- regulator-min-microvolt = <1500000>;
- regulator-max-microvolt = <2850000>;
- startup-delay-us = <400>;
- };
- ab3100_ldo_h_reg: ab3100_ldo_h {
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <2750000>;
- startup-delay-us = <200>;
- };
- ab3100_ldo_k_reg: ab3100_ldo_k {
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <2750000>;
- startup-delay-us = <200>;
- };
- ab3100_ext_reg: ab3100_ext {
- };
- ab3100_buck_reg: ab3100_buck {
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <1800000>;
- startup-delay-us = <1000>;
- regulator-always-on;
- regulator-boot-on;
- };
- };
- };
- };
-
- i2c1: i2c@c0005000 {
- compatible = "st,ddci2c";
- reg = <0xc0005000 0x1000>;
- interrupt-parent = <&vicb>;
- interrupts = <9>;
- clocks = <&i2c1_clk>;
- #address-cells = <1>;
- #size-cells = <0>;
- fwcam0: fwcam@10 {
- reg = <0x10>;
- };
- fwcam1: fwcam@5d {
- reg = <0x5d>;
- };
- };
-
- amba {
- compatible = "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
-
- vica: interrupt-controller@a0001000 {
- compatible = "arm,versatile-vic";
- interrupt-controller;
- #interrupt-cells = <1>;
- reg = <0xa0001000 0x20>;
- };
-
- vicb: interrupt-controller@a0002000 {
- compatible = "arm,versatile-vic";
- interrupt-controller;
- #interrupt-cells = <1>;
- reg = <0xa0002000 0x20>;
- };
-
- uart0: serial@c0013000 {
- compatible = "arm,pl011", "arm,primecell";
- reg = <0xc0013000 0x1000>;
- interrupt-parent = <&vica>;
- interrupts = <22>;
- clocks = <&uart0_clk>, <&uart0_clk>;
- clock-names = "apb_pclk", "uart0_clk";
- dmas = <&dmac 17 &dmac 18>;
- dma-names = "tx", "rx";
- };
-
- uart1: serial@c0007000 {
- compatible = "arm,pl011", "arm,primecell";
- reg = <0xc0007000 0x1000>;
- interrupt-parent = <&vicb>;
- interrupts = <20>;
- dmas = <&dmac 38 &dmac 39>;
- dma-names = "tx", "rx";
- };
-
- mmcsd: mmcsd@c0001000 {
- compatible = "arm,pl18x", "arm,primecell";
- reg = <0xc0001000 0x1000>;
- interrupt-parent = <&vicb>;
- interrupts = <6 7>;
- clocks = <&mmc_pclk>, <&mmc_mclk>;
- clock-names = "apb_pclk", "mclk";
- max-frequency = <24000000>;
- bus-width = <4>; // SD-card slot
- cap-mmc-highspeed;
- cap-sd-highspeed;
- cd-gpios = <&gpio 12 0x4>;
- cd-inverted;
- vmmc-supply = <&ab3100_ldo_g_reg>;
- dmas = <&dmac 14>;
- dma-names = "rx";
- };
-
- spi: spi@c0006000 {
- compatible = "arm,pl022", "arm,primecell";
- reg = <0xc0006000 0x1000>;
- interrupt-parent = <&vica>;
- interrupts = <23>;
- clocks = <&spi_clk>, <&spi_clk>;
- clock-names = "SSPCLK", "apb_pclk";
- dmas = <&dmac 27 &dmac 28>;
- dma-names = "tx", "rx";
- num-cs = <3>;
- #address-cells = <1>;
- #size-cells = <0>;
- spi-dummy@1 {
- compatible = "arm,pl022-dummy";
- reg = <1>;
- spi-max-frequency = <20000000>;
- };
- };
- };
-};
diff --git a/arch/arm/boot/dts/ste-ux500-samsung-golden.dts b/arch/arm/boot/dts/ste-ux500-samsung-golden.dts
index 60fe6189e728..0d43ee6583cf 100644
--- a/arch/arm/boot/dts/ste-ux500-samsung-golden.dts
+++ b/arch/arm/boot/dts/ste-ux500-samsung-golden.dts
@@ -5,6 +5,7 @@
#include "ste-ab8505.dtsi"
#include "ste-dbx5x0-pinctrl.dtsi"
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/leds/common.h>
#include <dt-bindings/input/input.h>
#include <dt-bindings/interrupt-controller/irq.h>
@@ -72,7 +73,7 @@
soc {
/* External Micro SD card slot */
- sdi0_per1@80126000 {
+ mmc@80126000 {
status = "okay";
arm,primecell-periphid = <0x10480180>;
@@ -100,7 +101,7 @@
};
/* WLAN SDIO */
- sdi1_per2@80118000 {
+ mmc@80118000 {
status = "okay";
arm,primecell-periphid = <0x10480180>;
@@ -134,7 +135,7 @@
};
/* eMMC */
- sdi2_per3@80005000 {
+ mmc@80005000 {
status = "okay";
arm,primecell-periphid = <0x10480180>;
@@ -374,6 +375,32 @@
};
};
+ /* Richtek RT8515GQW Flash LED Driver IC */
+ flash {
+ compatible = "richtek,rt8515";
+ /* GPIO 140 */
+ enf-gpios = <&gpio4 12 GPIO_ACTIVE_HIGH>;
+ /* GPIO 141 */
+ ent-gpios = <&gpio4 13 GPIO_ACTIVE_HIGH>;
+ /*
+ * RFS is 16 kOhm and RTS is 100 kOhm giving
+ * the flash max current 343mA and torch max
+ * current 55 mA.
+ */
+ richtek,rfs-ohms = <16000>;
+ richtek,rts-ohms = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&gpio_flash_default_mode>;
+
+ led {
+ function = LED_FUNCTION_FLASH;
+ color = <LED_COLOR_ID_WHITE>;
+ flash-max-timeout-us = <250000>;
+ flash-max-microamp = <343750>;
+ led-max-microamp = <55000>;
+ };
+ };
+
vibrator {
compatible = "gpio-vibrator";
/* GPIO195 (MOT_EN) */
@@ -499,6 +526,15 @@
};
};
+ flash {
+ gpio_flash_default_mode: flash_default {
+ golden_cfg1 {
+ pins = "GPIO140_B11", "GPIO141_C12";
+ ste,config = <&gpio_out_lo>;
+ };
+ };
+ };
+
i2c-gpio-1 {
i2c_gpio_1_default: i2c_gpio_1 {
golden_cfg1 {
diff --git a/arch/arm/boot/dts/ste-ux500-samsung-janice.dts b/arch/arm/boot/dts/ste-ux500-samsung-janice.dts
new file mode 100644
index 000000000000..7411bfeda285
--- /dev/null
+++ b/arch/arm/boot/dts/ste-ux500-samsung-janice.dts
@@ -0,0 +1,930 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Devicetree for the Samsung Galaxy S Advance GT-I9070 also known as Janice.
+ */
+
+/dts-v1/;
+#include "ste-db8500.dtsi"
+#include "ste-ab8500.dtsi"
+#include "ste-dbx5x0-pinctrl.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+
+/ {
+ model = "Samsung Galaxy S Advance (GT-I9070)";
+ compatible = "samsung,janice", "st-ericsson,u8500";
+
+ chosen {
+ stdout-path = &serial2;
+ };
+
+ /* External LDO for eMMC LDO VMEM_3V3 controlled by GPIO6 */
+ ldo_3v3_reg: regulator-gpio-ldo-3v3 {
+ compatible = "regulator-fixed";
+ /* Supplied in turn by VBAT */
+ regulator-name = "VMEM_3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio0 6 GPIO_ACTIVE_HIGH>;
+ startup-delay-us = <5000>; // FIXME
+ enable-active-high;
+ pinctrl-names = "default";
+ pinctrl-0 = <&emmc_ldo_en_default_mode>;
+ };
+
+ /*
+ * External Ricoh "TSP" regulator for the touchscreen.
+ * One GPIO line controls two voltages of 3.3V and 1.8V
+ * this line is known as "TSP_LDO_ON1" in the schematics.
+ */
+ ldo_tsp_3v3_reg: regulator-gpio-tsp-ldo-3v3 {
+ compatible = "regulator-fixed";
+ /* Supplied in turn by VBAT */
+ regulator-name = "LDO_TSP_A3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ /* GPIO94 controls this regulator */
+ gpio = <&gpio2 30 GPIO_ACTIVE_HIGH>;
+ /* 70 ms power-on delay */
+ startup-delay-us = <70000>;
+ enable-active-high;
+ pinctrl-names = "default";
+ pinctrl-0 = <&tsp_ldo_en_default_mode>;
+ };
+ ldo_tsp_1v8_reg: regulator-gpio-tsp-ldo-1v8 {
+ compatible = "regulator-fixed";
+ /* Supplied in turn by VBAT */
+ regulator-name = "VREG_TSP_1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ /* GPIO94 controls this regulator */
+ gpio = <&gpio2 30 GPIO_ACTIVE_HIGH>;
+ /* 70 ms power-on delay */
+ startup-delay-us = <70000>;
+ enable-active-high;
+ pinctrl-names = "default";
+ pinctrl-0 = <&tsp_ldo_en_default_mode>;
+ };
+
+ /*
+ * External Ricoh "TSP" regulator for the touchkeys.
+ * Two GPIO lines controls two voltages of 3.3V and 1.8V
+ * TSP_LDO_ON2 controls VREG_TOUCHKEY_1V8
+ * EN_LED_LDO controls VREG_KLED_3V3 (key LED)
+ */
+ ldo_kled_3v3_reg: regulator-gpio-vreg-kled-3v3 {
+ compatible = "regulator-fixed";
+ /* Supplied in turn by VBAT */
+ regulator-name = "VREG_KLED_3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ /* GPIO68 controls this regulator */
+ gpio = <&gpio2 4 GPIO_ACTIVE_HIGH>;
+ /* 70 ms power-on delay */
+ startup-delay-us = <70000>;
+ enable-active-high;
+ pinctrl-names = "default";
+ pinctrl-0 = <&en_led_ldo_default_mode>;
+ };
+ ldo_touchkey_1v8_reg: regulator-gpio-vreg-touchkey-1v8 {
+ compatible = "regulator-fixed";
+ /* Supplied in turn by VBAT */
+ regulator-name = "VREG_TOUCHKEY_1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ /* GPIO89 controls this regulator */
+ gpio = <&gpio2 25 GPIO_ACTIVE_HIGH>;
+ /* 70 ms power-on delay */
+ startup-delay-us = <70000>;
+ enable-active-high;
+ pinctrl-names = "default";
+ pinctrl-0 = <&tsp_ldo_on2_default_mode>;
+ };
+
+
+ /*
+ * External Ricoh RP152L010B-TR LCD LDO regulator for the display.
+ * LCD_PWR_EN controls a 3.0V and 1.8V output.
+ */
+ lcd_3v0_reg: regulator-gpio-lcd-3v0 {
+ compatible = "regulator-fixed";
+ /* Supplied in turn by VBAT */
+ regulator-name = "VREG_LCD_3V0";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ /* GPIO219 controls this regulator */
+ gpio = <&gpio6 27 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ pinctrl-names = "default";
+ pinctrl-0 = <&lcd_pwr_en_default_mode>;
+ };
+ lcd_1v8_reg: regulator-gpio-lcd-1v8 {
+ compatible = "regulator-fixed";
+ /* Supplied in turn by VBAT */
+ regulator-name = "VREG_LCD_1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ /* GPIO219 controls this regulator */
+ gpio = <&gpio6 27 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ pinctrl-names = "default";
+ pinctrl-0 = <&lcd_pwr_en_default_mode>;
+ };
+
+ /*
+ * This regulator is a GPIO line that drives the Broadcom WLAN
+ * line BT_VREG_EN high and enables the internal regulators
+ * inside the chip.
+ *
+ * The voltage specified here is only used to determine the OCR mask,
+ * the for the SDIO connector, the chip is actually connected
+ * directly to VBAT.
+ */
+ wl_bt_reg: regulator-gpio-wlan {
+ compatible = "regulator-fixed";
+ regulator-name = "BT_VREG_EN";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ startup-delay-us = <100000>;
+ /* GPIO222 (BT_VREG_EN) */
+ gpio = <&gpio6 30 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ pinctrl-names = "default";
+ pinctrl-0 = <&wlan_ldo_en_default>;
+ };
+
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ pinctrl-names = "default";
+ pinctrl-0 = <&gpio_keys_default_mode>;
+
+ button-home {
+ linux,code = <KEY_HOME>;
+ label = "HOME";
+ /* GPIO91 */
+ gpios = <&gpio2 27 GPIO_ACTIVE_LOW>;
+ };
+ button-volup {
+ linux,code = <KEY_VOLUMEUP>;
+ label = "VOL+";
+ /* GPIO67 */
+ gpios = <&gpio2 3 GPIO_ACTIVE_LOW>;
+ };
+ button-voldown {
+ linux,code = <KEY_VOLUMEDOWN>;
+ label = "VOL-";
+ /* GPIO92 */
+ gpios = <&gpio2 28 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ /* Richtek RT8515GQW Flash LED Driver IC */
+ flash {
+ compatible = "richtek,rt8515";
+ /* GPIO 140 */
+ enf-gpios = <&gpio4 12 GPIO_ACTIVE_HIGH>;
+ /* GPIO 141 */
+ ent-gpios = <&gpio4 13 GPIO_ACTIVE_HIGH>;
+ /*
+ * RFS is 16 kOhm and RTS is 100 kOhm giving
+ * the flash max current 343mA and torch max
+ * current 55 mA.
+ */
+ richtek,rfs-ohms = <16000>;
+ richtek,rts-ohms = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&gpio_flash_default_mode>;
+
+ led {
+ function = LED_FUNCTION_FLASH;
+ color = <LED_COLOR_ID_WHITE>;
+ flash-max-timeout-us = <250000>;
+ flash-max-microamp = <343750>;
+ led-max-microamp = <55000>;
+ };
+ };
+
+ /* Bit-banged I2C on GPIO143 and GPIO144 also called "SUBPMU I2C" */
+ i2c-gpio-0 {
+ compatible = "i2c-gpio";
+ sda-gpios = <&gpio4 16 (GPIO_ACTIVE_HIGH|GPIO_OPEN_DRAIN)>;
+ scl-gpios = <&gpio4 15 (GPIO_ACTIVE_HIGH|GPIO_OPEN_DRAIN)>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c_gpio_0_default>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* Yamaha YAS530 magnetometer */
+ magnetometer@2e {
+ compatible = "yamaha,yas530";
+ reg = <0x2e>;
+ /* VDD 3V */
+ vdd-supply = <&ab8500_ldo_aux1_reg>;
+ /* IOVDD 1.8V */
+ iovdd-supply = <&ab8500_ldo_aux2_reg>;
+ /* GPIO204 COMPASS_RST_N */
+ reset-gpios = <&gpio6 12 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&yas529_default>;
+ };
+ /* TODO: this should also be used by the NCP6914 Camera power management unit */
+ };
+
+ /*
+ * These pins do have an spi controller, however the controller on
+ * these pins is not the fully featured PL022 SSP/SPI block but the
+ * ST Micro diet "PL023" version. One of the lacking features in
+ * this derivative is 3wire support, so it cannot be used to drive
+ * this panel interface. We have to use GPIO bit-banging instead.
+ */
+ spi-gpio-0 {
+ compatible = "spi-gpio";
+ /* Clock on GPIO220 */
+ sck-gpios = <&gpio6 28 GPIO_ACTIVE_HIGH>;
+ /* MISO/MOSI on GPIO224 (no separate MISO pin) */
+ mosi-gpios = <&gpio7 0 GPIO_ACTIVE_HIGH>;
+ /* Chip select on GPIO223 */
+ cs-gpios = <&gpio6 31 GPIO_ACTIVE_LOW>;
+ num-chipselects = <1>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi_gpio_0_default>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ panel@0 {
+ compatible = "samsung,s6e63m0";
+ reg = <0>;
+ vdd3-supply = <&lcd_3v0_reg>;
+ vci-supply = <&lcd_1v8_reg>;
+ /* Reset on GPIO139 */
+ reset-gpios = <&gpio4 11 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&panel_default_mode>;
+ spi-3wire;
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&display_out>;
+ };
+ };
+ };
+ };
+
+ /*
+ * Current sense amplifier on the light sensor to convert current to
+ * voltage. We do not know if this is the actual configuration. The
+ * sense resistor value was found by calibrating in a room ambient
+ * light with a second mobile phone light sensor as reference. If you
+ * pry a Janice phone apart and inspect it you may figure this out.
+ */
+ gp2a_shunt: current-sense-shunt {
+ compatible = "current-sense-shunt";
+ io-channels = <&gpadc 0x07>;
+ shunt-resistor-micro-ohms = <15000000>; /* 15 ohms c:a */
+ #io-channel-cells = <0>;
+ io-channel-ranges;
+ };
+
+ /* Bit-banged I2C on GPIO196 and GPIO197 also called "TOUCHKEY_I2C" */
+ i2c-gpio-1 {
+ compatible = "i2c-gpio";
+ sda-gpios = <&gpio6 5 (GPIO_ACTIVE_HIGH|GPIO_OPEN_DRAIN)>;
+ scl-gpios = <&gpio6 4 (GPIO_ACTIVE_HIGH|GPIO_OPEN_DRAIN)>;
+ clock-frequency = <400000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c_gpio_1_default>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ touchkey@20 {
+ compatible = "coreriver,tc360-touchkey";
+ reg = <0x20>;
+ vdd-supply = <&ldo_kled_3v3_reg>;
+ vcc-supply = <&ldo_touchkey_1v8_reg>;
+ vddio-supply = <&ldo_touchkey_1v8_reg>;
+
+ /* Interrupt on GPIO 198 */
+ interrupt-parent = <&gpio6>;
+ interrupts = <6 IRQ_TYPE_EDGE_RISING>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&touchkey_default_mode>;
+ linux,keycodes = <KEY_MENU KEY_BACK>;
+ };
+ };
+
+ /* Bit-banged I2C on GPIO201 and GPIO202 also called "MOT_I2C" */
+ i2c-gpio-2 {
+ compatible = "i2c-gpio";
+ sda-gpios = <&gpio6 10 (GPIO_ACTIVE_HIGH|GPIO_OPEN_DRAIN)>;
+ scl-gpios = <&gpio6 9 (GPIO_ACTIVE_HIGH|GPIO_OPEN_DRAIN)>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c_gpio_2_default>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ /* TODO: add the Immersion ISA1200 I2C device here */
+ };
+
+ /* Bit-banged I2C on GPIO151 and GPIO152 also called "NFC_I2C" */
+ i2c-gpio-3 {
+ compatible = "i2c-gpio";
+ sda-gpios = <&gpio4 24 (GPIO_ACTIVE_HIGH|GPIO_OPEN_DRAIN)>;
+ scl-gpios = <&gpio4 23 (GPIO_ACTIVE_HIGH|GPIO_OPEN_DRAIN)>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c_gpio_3_default>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ nfc@30 {
+ compatible = "nxp,pn547", "nxp,nxp-nci-i2c";
+ reg = <0x30>;
+ /* NFC IRQ on GPIO32 */
+ interrupt-parent = <&gpio1>;
+ interrupts = <0 IRQ_TYPE_EDGE_FALLING>;
+ /* GPIO 31 */
+ firmware-gpios = <&gpio0 31 GPIO_ACTIVE_HIGH>;
+ /* GPIO88 */
+ enable-gpios = <&gpio2 24 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pn547_janice_default>;
+ };
+ };
+
+ soc {
+ /* External Micro SD slot */
+ mmc@80126000 {
+ arm,primecell-periphid = <0x10480180>;
+ max-frequency = <50000000>;
+ bus-width = <4>;
+ cap-sd-highspeed;
+ cap-mmc-highspeed;
+ st,sig-dir-cmd;
+ st,sig-dir-dat0;
+ st,sig-dir-dat2;
+ st,sig-pin-fbclk;
+ full-pwr-cycle;
+ /* MMC is powered by AUX3 1.2V .. 2.91V */
+ vmmc-supply = <&ab8500_ldo_aux3_reg>;
+ /* 2.9 V level translator is using AUX3 at 2.9 V as well */
+ vqmmc-supply = <&ab8500_ldo_aux3_reg>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&mc0_a_2_default>;
+ pinctrl-1 = <&mc0_a_2_sleep>;
+ cd-gpios = <&gpio6 25 GPIO_ACTIVE_LOW>; // GPIO217
+ status = "okay";
+ };
+
+ /* WLAN SDIO channel */
+ mmc@80118000 {
+ arm,primecell-periphid = <0x10480180>;
+ max-frequency = <50000000>;
+ bus-width = <4>;
+ non-removable;
+ cap-sd-highspeed;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&mc1_a_2_default>;
+ pinctrl-1 = <&mc1_a_2_sleep>;
+ /*
+ * GPIO-controlled voltage enablement: this drives
+ * the BT_VREG_EN line high when we use this device.
+ * Represented as regulator to fill OCR mask and to
+ * be usable in parallel with the Bluetooth chip.
+ */
+ vmmc-supply = <&wl_bt_reg>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+
+ wifi@1 {
+ /* Actually BRCM4330 */
+ compatible = "brcm,bcm4329-fmac";
+ reg = <1>;
+ /* GPIO216 WL_HOST_WAKE */
+ interrupt-parent = <&gpio6>;
+ interrupts = <24 IRQ_TYPE_EDGE_FALLING>;
+ interrupt-names = "host-wake";
+ /* GPIO215 WLAN_RST_N */
+ /* FIXME: kernel does not use this assert/deassert */
+ reset-gpios = <&gpio6 23 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&wlan_default_mode>;
+ };
+ };
+
+ /* eMMC */
+ mmc@80005000 {
+ arm,primecell-periphid = <0x10480180>;
+ max-frequency = <50000000>;
+ bus-width = <8>;
+ non-removable;
+ cap-mmc-highspeed;
+ mmc-ddr-1_8v;
+ vmmc-supply = <&ldo_3v3_reg>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&mc2_a_1_default>;
+ pinctrl-1 = <&mc2_a_1_sleep>;
+ status = "okay";
+ };
+
+ /* GBF (Bluetooth) UART */
+ uart@80120000 {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&u0_a_1_default>;
+ pinctrl-1 = <&u0_a_1_sleep>;
+ status = "okay";
+
+ bluetooth {
+ compatible = "brcm,bcm4330-bt";
+ /*
+ * We actually have shutdown-gpios, BT_VREG_EN on GPIO222,
+ * but since this GPIO is shared with the WLAN chip, we need
+ * to reference the regulator instead. The regulator
+ * framework will reference count the GPIO usage and
+ * make sure we can use the same GPIO for several supplies.
+ */
+ // shutdown-gpios = <&gpio6 30 GPIO_ACTIVE_HIGH>;
+ vbat-supply = <&wl_bt_reg>;
+ /* BT_WAKE on GPIO199 */
+ device-wakeup-gpios = <&gpio6 7 GPIO_ACTIVE_HIGH>;
+ /* BT_HOST_WAKE on GPIO97 */
+ /* FIXME: convert to interrupt */
+ host-wakeup-gpios = <&gpio3 1 GPIO_ACTIVE_HIGH>;
+ /* BT_RST_N on GPIO209 */
+ reset-gpios = <&gpio6 17 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&bluetooth_default_mode>;
+ };
+ };
+
+ /* GPS UART */
+ uart@80121000 {
+ status = "okay";
+ pinctrl-names = "default", "sleep";
+ /* CTS/RTS is not used, CTS is repurposed as GPIO */
+ pinctrl-0 = <&u1rxtx_a_1_default>;
+ pinctrl-1 = <&u1rxtx_a_1_sleep>;
+ /* FIXME: add a device for the GPS here */
+ };
+
+ /* Debugging console UART connected to TSU6111RSVR (FSA880) */
+ uart@80007000 {
+ status = "okay";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&u2rxtx_c_1_default>;
+ pinctrl-1 = <&u2rxtx_c_1_sleep>;
+ };
+
+ prcmu@80157000 {
+ ab8500 {
+ ab8500_usb {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&usb_a_1_default>;
+ pinctrl-1 = <&usb_a_1_sleep>;
+ };
+
+ ab8500-regulators {
+ ab8500_ldo_aux1 {
+ /* Used for VDD for sensors */
+ regulator-name = "V-SENSORS-VDD";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ };
+
+ ab8500_ldo_aux2 {
+ /* Used for VIO for sensors */
+ regulator-name = "V-SENSORS-VIO";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ ab8500_ldo_aux3 {
+ /* Used for voltage for external MMC/SD card */
+ regulator-name = "V-MMC-SD";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <2910000>;
+ };
+ };
+ };
+ };
+
+ /* I2C0 */
+ i2c@80004000 {
+ status = "okay";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&i2c0_a_1_default>;
+ pinctrl-1 = <&i2c0_a_1_sleep>;
+
+ proximity@44 {
+ /* Janice has the GP2AP002A00F with light sensor */
+ compatible = "sharp,gp2ap002a00f";
+ clock-frequency = <400000>;
+ reg = <0x44>;
+
+ interrupt-parent = <&gpio4>;
+ interrupts = <18 IRQ_TYPE_EDGE_FALLING>;
+ vdd-supply = <&ab8500_ldo_aux1_reg>;
+ vio-supply = <&ab8500_ldo_aux2_reg>;
+ /* ADC channel AUX2 to read ALSOUT ambient light sensor out */
+ io-channels = <&gp2a_shunt>;
+ io-channel-names = "alsout";
+ pinctrl-names = "default";
+ pinctrl-0 = <&gp2ap002_janice_default>;
+ /* B1 mode (arch/arm/mach-ux500/include/mach/gp2a.h) */
+ sharp,proximity-far-hysteresis = /bits/ 8 <0x40>;
+ sharp,proximity-close-hysteresis = /bits/ 8 <0x0f>;
+ };
+ };
+
+ /* I2C1 on GPIO16 and GPIO17 also called "MUS I2C" */
+ i2c@80122000 {
+ status = "okay";
+ pinctrl-names = "default","sleep";
+ pinctrl-0 = <&i2c1_b_2_default>;
+ pinctrl-1 = <&i2c1_b_2_sleep>;
+
+ /* Texas Instruments TSU6111 micro USB switch */
+ usb-switch@25 {
+ compatible = "ti,tsu6111";
+ reg = <0x25>;
+ /* Interrupt JACK_INT_N on GPIO95 */
+ interrupt-parent = <&gpio2>;
+ interrupts = <31 IRQ_TYPE_EDGE_FALLING>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&tsu6111_janice_default>;
+ };
+ };
+
+ /* I2C2 on GPIO10 and GPIO11 also called "SENSORS I2C" */
+ i2c@80128000 {
+ status = "okay";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&i2c2_b_2_default>;
+ pinctrl-1 = <&i2c2_b_2_sleep>;
+
+ gyroscope@68 {
+ compatible = "invensense,mpu3050";
+ reg = <0x68>;
+ /* GPIO226 interrupt */
+ interrupt-parent = <&gpio7>;
+ interrupts = <2 IRQ_TYPE_EDGE_FALLING>;
+ /* FIXME: no idea about this */
+ mount-matrix = "1", "0", "0",
+ "0", "1", "0",
+ "0", "0", "1";
+ vlogic-supply = <&ab8500_ldo_aux2_reg>; // 1.8V
+ vdd-supply = <&ab8500_ldo_aux1_reg>; // 3V
+ pinctrl-names = "default";
+ pinctrl-0 = <&mpu3050_janice_default>;
+
+ /*
+ * The MPU-3050 acts as a hub for the
+ * accelerometer.
+ */
+ i2c-gate {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* Bosch BMA222 accelerometer */
+ accelerometer@08 {
+ compatible = "bosch,bma222";
+ reg = <0x08>;
+ /* FIXME: no idea about this */
+ mount-matrix = "1", "0", "0",
+ "0", "1", "0",
+ "0", "0", "1";
+ vddio-supply = <&ab8500_ldo_aux2_reg>; // 1.8V
+ vdd-supply = <&ab8500_ldo_aux1_reg>; // 3V
+ };
+ };
+ };
+ };
+
+ /* I2C3 */
+ i2c@80110000 {
+ status = "okay";
+
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&i2c3_c_2_default>;
+ pinctrl-1 = <&i2c3_c_2_sleep>;
+
+ /* Atmel mXT224E touchscreen */
+ touchscreen@4a {
+ compatible = "atmel,maxtouch";
+ reg = <0x4a>;
+ /* GPIO218 (TSP_INT_1V8) */
+ interrupt-parent = <&gpio6>;
+ interrupts = <26 IRQ_TYPE_EDGE_FALLING>;
+ /* VDDA is "analog supply", 2.57-3.47 V */
+ vdda-supply = <&ldo_tsp_3v3_reg>;
+ /* VDD is "digital supply" 1.71-3.47V */
+ vdd-supply = <&ldo_tsp_1v8_reg>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&tsp_default>;
+ };
+ };
+
+ mcde@a0350000 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&dpi_default_mode>;
+
+ port {
+ display_out: endpoint {
+ remote-endpoint = <&panel_in>;
+ };
+ };
+ };
+ };
+};
+
+&pinctrl {
+ /*
+ * This extends the MC0_A_2 default config to include
+ * the card detect GPIO217 line.
+ */
+ sdi0 {
+ mc0_a_2_default {
+ default_cfg4 {
+ pins = "GPIO217_AH12"; /* card detect */
+ ste,config = <&gpio_in_pd>;
+ };
+ };
+ };
+ mcde {
+ dpi_default_mode: dpi_default {
+ default_mux1 {
+ /* Mux in all the data lines */
+ function = "lcd";
+ groups =
+ /* Data lines D0-D7 GPIO70..GPIO77 */
+ "lcd_d0_d7_a_1",
+ /* Data lines D8-D11 GPIO78..GPIO81 */
+ "lcd_d8_d11_a_1",
+ /* Data lines D12-D15 GPIO82..GPIO85 */
+ "lcd_d12_d15_a_1",
+ /* Data lines D16-D23 GPIO161..GPIO168 */
+ "lcd_d16_d23_b_1";
+ };
+ default_mux2 {
+ function = "lcda";
+ /* Clock line on GPIO150, DE, VSO, HSO on GPIO169..GPIO171 */
+ groups = "lcdaclk_b_1", "lcda_b_1";
+ };
+ /* Input, no pull-up is the default state for pins used for an alt function */
+ default_cfg1 {
+ pins = "GPIO150_C14", "GPIO169_D22", "GPIO170_C23", "GPIO171_D23";
+ ste,config = <&in_nopull>;
+ };
+ };
+ };
+ /* GPIO for panel reset control */
+ panel {
+ panel_default_mode: panel_default {
+ janice_cfg1 {
+ /* Reset line */
+ pins = "GPIO139_C9";
+ ste,config = <&gpio_out_lo>;
+ };
+ };
+ };
+ /* GPIO that enables the LDO regulator for the eMMC */
+ emmc-ldo {
+ emmc_ldo_en_default_mode: emmc_ldo_default {
+ /* LDO enable on GPIO6 */
+ janice_cfg1 {
+ pins = "GPIO6_AF6";
+ ste,config = <&gpio_out_hi>;
+ };
+ };
+ };
+ /* GPIO that enables the LDO regulator for the touchscreen */
+ tsp-ldo {
+ tsp_ldo_en_default_mode: tsp_ldo_default {
+ /* LDO enable on GPIO94 */
+ janice_cfg1 {
+ pins = "GPIO94_D7";
+ ste,config = <&gpio_out_hi>;
+ };
+ };
+ };
+ /* GPIO that enables the LDO regulator for the key LED */
+ key-led {
+ en_led_ldo_default_mode: en_led_ldo_default {
+ /* EN_LED_LDO on GPIO68 */
+ janice_cfg1 {
+ pins = "GPIO68_E1";
+ ste,config = <&gpio_out_hi>;
+ };
+ };
+ };
+ /* GPIO that enables the LDO regulator for the touchkeys */
+ touchkey-ldo {
+ tsp_ldo_on2_default_mode: tsp_ldo_on2_default {
+ /* TSP_LDO_ON2 on GPIO89 */
+ janice_cfg1 {
+ pins = "GPIO89_E6";
+ ste,config = <&gpio_out_lo>;
+ };
+ };
+ };
+ touchkey {
+ touchkey_default_mode: touchkey_default {
+ janice_cfg1 {
+ /* Interrupt */
+ pins = "GPIO198_AG25";
+ ste,config = <&gpio_in_nopull>;
+ };
+ janice_cfg2 {
+ /* Reset, actually completely unused (not routed) */
+ pins = "GPIO205_AG23";
+ ste,config = <&gpio_in_pd>;
+ };
+ };
+ };
+ /* GPIO that enabled the LDO regulator for the LCD display */
+ lcd-ldo {
+ lcd_pwr_en_default_mode: lcd_pwr_en_default {
+ /* LCD_PWR_EN on GPIO219 */
+ janice_cfg1 {
+ pins = "GPIO219_AG10";
+ ste,config = <&gpio_out_hi>;
+ };
+ };
+ };
+ /* GPIO that enables the WLAN internal LDO regulators */
+ wlan-ldo {
+ wlan_ldo_en_default: wlan_ldo_default {
+ /* GPIO222 BT_VREG_ON */
+ janice_cfg1 {
+ pins = "GPIO222_AJ9";
+ ste,config = <&gpio_out_lo>;
+ };
+ };
+ };
+ /* Flash and torch */
+ flash {
+ gpio_flash_default_mode: flash_default {
+ janice_cfg1 {
+ pins = "GPIO140_B11", "GPIO141_C12";
+ ste,config = <&gpio_out_lo>;
+ };
+ };
+ };
+ /* GPIO keys */
+ gpio-keys {
+ gpio_keys_default_mode: gpio_keys_default {
+ skomer_cfg1 {
+ pins = "GPIO67_G2", /* VOL UP */
+ "GPIO91_B6", /* HOME */
+ "GPIO92_D6"; /* VOL DOWN */
+ ste,config = <&gpio_in_pu>;
+ };
+ };
+ };
+ /* Interrupt line for the Atmel MXT228 touchscreen */
+ tsp {
+ tsp_default: tsp_default {
+ janice_cfg1 {
+ pins = "GPIO218_AH11"; /* TSP_INT_1V8 */
+ ste,config = <&gpio_in_nopull>;
+ };
+ };
+ };
+ /* Reset line for the Yamaha YAS529 magnetometer */
+ yas529 {
+ yas529_default: yas529_janice {
+ janice_cfg1 {
+ pins = "GPIO204_AF23";
+ ste,config = <&gpio_out_hi>;
+ };
+ };
+ };
+ /* Interrupt line for light/proximity sensor GP2AP002 */
+ gp2ap002 {
+ gp2ap002_janice_default: gp2ap002_janice {
+ janice_cfg1 {
+ pins = "GPIO146_D13";
+ ste,config = <&gpio_in_nopull>;
+ };
+ };
+ };
+ /* Interrupt line for Invensense MPU3050 gyroscope */
+ mpu3050 {
+ mpu3050_janice_default: mpu3050_janice {
+ janice_cfg1 {
+ /* GPIO226 used for IRQ */
+ pins = "GPIO226_AF8";
+ ste,config = <&gpio_in_nopull>;
+ };
+ };
+ };
+ /* GPIO-based I2C bus for magnetometer and NCP6914 */
+ i2c-gpio-0 {
+ i2c_gpio_0_default: i2c_gpio_0 {
+ janice_cfg1 {
+ pins = "GPIO143_D12", "GPIO144_B13";
+ ste,config = <&gpio_in_nopull>;
+ };
+ };
+ };
+ /* GPIO-based I2C bus for the Cypress touchkeys */
+ i2c-gpio-1 {
+ i2c_gpio_1_default: i2c_gpio_1 {
+ janice_cfg1 {
+ pins = "GPIO196_AG26", "GPIO197_AH24";
+ ste,config = <&gpio_in_nopull>;
+ };
+ };
+ };
+ /* GPIO-based I2C bus for the Immersion ISA1200 */
+ i2c-gpio-2 {
+ i2c_gpio_2_default: i2c_gpio_2 {
+ janice_cfg1 {
+ pins = "GPIO201_AF24", "GPIO202_AF25";
+ ste,config = <&gpio_in_nopull>;
+ };
+ };
+ };
+ /* GPIO-based I2C bus for the NFC */
+ i2c-gpio-3 {
+ i2c_gpio_3_default: i2c_gpio_3 {
+ janice_cfg1 {
+ pins = "GPIO151_D17", "GPIO152_D16";
+ ste,config = <&gpio_in_nopull>;
+ };
+ };
+ };
+ /* GPIO-based SPI bus for the display */
+ spi-gpio-0 {
+ spi_gpio_0_default: spi_gpio_0 {
+ janice_cfg1 {
+ pins = "GPIO220_AH10", "GPIO223_AH9", "GPIO224_AG9";
+ ste,config = <&gpio_out_hi>;
+ };
+ /* This pin is unused but belongs with this SPI block */
+ janice_cfg2 {
+ pins = "GPIO225_AG8";
+ ste,config = <&in_pd>;
+ };
+ };
+ };
+ wlan {
+ wlan_default_mode: wlan_default {
+ /* GPIO215 used for RESET_N */
+ janice_cfg1 {
+ pins = "GPIO215_AH13";
+ ste,config = <&gpio_out_lo>;
+ };
+ /* GPIO216 for WL_HOST_WAKE */
+ janice_cfg2 {
+ pins = "GPIO216_AG12";
+ ste,config = <&gpio_in_pd>;
+ };
+ };
+ };
+ bluetooth {
+ bluetooth_default_mode: bluetooth_default {
+ janice_cfg1 {
+ pins = "GPIO199_AH23";
+ ste,config = <&gpio_out_lo>;
+ };
+ janice_cfg2 {
+ pins = "GPIO97_D9";
+ ste,config = <&gpio_in_nopull>;
+ };
+ janice_cfg3 {
+ pins = "GPIO209_AG15";
+ ste,config = <&gpio_out_hi>;
+ };
+ };
+ };
+ /* Interrupt line for TI TSU6111 Micro USB switch */
+ tsu6111 {
+ tsu6111_janice_default: tsu6111_janice {
+ janice_cfg1 {
+ /* GPIO95 used for IRQ */
+ pins = "GPIO95_E8";
+ ste,config = <&gpio_in_nopull>;
+ };
+ };
+ };
+ nfc {
+ pn547_janice_default: pn547_janice {
+ /* Interrupt line */
+ janice_cfg1 {
+ pins = "GPIO32_V2";
+ ste,config = <&gpio_in_nopull>;
+ };
+ /* Enable and firmware GPIOs */
+ janice_cfg2 {
+ pins = "GPIO31_V3", "GPIO88_C4";
+ ste,config = <&gpio_out_lo>;
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/ste-ux500-samsung-skomer.dts b/arch/arm/boot/dts/ste-ux500-samsung-skomer.dts
index b50634c81b44..d28a00757d0b 100644
--- a/arch/arm/boot/dts/ste-ux500-samsung-skomer.dts
+++ b/arch/arm/boot/dts/ste-ux500-samsung-skomer.dts
@@ -8,6 +8,7 @@
#include "ste-ab8505.dtsi"
#include "ste-dbx5x0-pinctrl.dtsi"
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/leds/common.h>
#include <dt-bindings/input/input.h>
#include <dt-bindings/interrupt-controller/irq.h>
@@ -118,6 +119,32 @@
pinctrl-0 = <&gpio_backlight_default_mode>;
};
+ /* Richtek RT8515GQW Flash LED Driver IC */
+ flash {
+ compatible = "richtek,rt8515";
+ /* GPIO 140 */
+ enf-gpios = <&gpio4 12 GPIO_ACTIVE_HIGH>;
+ /* GPIO 141 */
+ ent-gpios = <&gpio4 13 GPIO_ACTIVE_HIGH>;
+ /*
+ * RFS is 16 kOhm and RTS is 100 kOhm giving
+ * the flash max current 343mA and torch max
+ * current 55 mA.
+ */
+ richtek,rfs-ohms = <16000>;
+ richtek,rts-ohms = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&gpio_flash_default_mode>;
+
+ led {
+ function = LED_FUNCTION_FLASH;
+ color = <LED_COLOR_ID_WHITE>;
+ flash-max-timeout-us = <250000>;
+ flash-max-microamp = <343750>;
+ led-max-microamp = <55000>;
+ };
+ };
+
i2c-gpio-0 {
compatible = "i2c-gpio";
sda-gpios = <&gpio4 16 (GPIO_ACTIVE_HIGH|GPIO_OPEN_DRAIN)>;
@@ -147,7 +174,7 @@
soc {
// External Micro SD slot
- sdi0_per1@80126000 {
+ mmc@80126000 {
arm,primecell-periphid = <0x10480180>;
max-frequency = <100000000>;
bus-width = <4>;
@@ -169,7 +196,7 @@
};
// WLAN SDIO channel
- sdi1_per2@80118000 {
+ mmc@80118000 {
arm,primecell-periphid = <0x10480180>;
max-frequency = <50000000>;
bus-width = <4>;
@@ -196,7 +223,7 @@
};
// eMMC
- sdi2_per3@80005000 {
+ mmc@80005000 {
arm,primecell-periphid = <0x10480180>;
max-frequency = <100000000>;
bus-width = <8>;
@@ -487,6 +514,14 @@
};
};
};
+ flash {
+ gpio_flash_default_mode: flash_default {
+ skomer_cfg1 {
+ pins = "GPIO140_B11", "GPIO141_C12";
+ ste,config = <&gpio_out_lo>;
+ };
+ };
+ };
/* GPIO that enables the 2.9V SD card level translator */
sd-level-translator {
sd_level_translator_default: sd_level_translator_default {
diff --git a/arch/arm/boot/dts/stm32f429.dtsi b/arch/arm/boot/dts/stm32f429.dtsi
index ad715a0e1c9a..f6530d724d00 100644
--- a/arch/arm/boot/dts/stm32f429.dtsi
+++ b/arch/arm/boot/dts/stm32f429.dtsi
@@ -566,7 +566,7 @@
};
};
- sdio: sdio@40012c00 {
+ sdio: mmc@40012c00 {
compatible = "arm,pl180", "arm,primecell";
arm,primecell-periphid = <0x00880180>;
reg = <0x40012c00 0x400>;
diff --git a/arch/arm/boot/dts/stm32f746.dtsi b/arch/arm/boot/dts/stm32f746.dtsi
index 640ff54ed00c..e1df603fc981 100644
--- a/arch/arm/boot/dts/stm32f746.dtsi
+++ b/arch/arm/boot/dts/stm32f746.dtsi
@@ -473,7 +473,7 @@
status = "disabled";
};
- sdio2: sdio2@40011c00 {
+ sdio2: mmc@40011c00 {
compatible = "arm,pl180", "arm,primecell";
arm,primecell-periphid = <0x00880180>;
reg = <0x40011c00 0x400>;
@@ -484,7 +484,7 @@
status = "disabled";
};
- sdio1: sdio1@40012c00 {
+ sdio1: mmc@40012c00 {
compatible = "arm,pl180", "arm,primecell";
arm,primecell-periphid = <0x00880180>;
reg = <0x40012c00 0x400>;
diff --git a/arch/arm/boot/dts/stm32h743.dtsi b/arch/arm/boot/dts/stm32h743.dtsi
index b083afd0ebd6..4ebffb0a45a3 100644
--- a/arch/arm/boot/dts/stm32h743.dtsi
+++ b/arch/arm/boot/dts/stm32h743.dtsi
@@ -354,7 +354,7 @@
dma-requests = <32>;
};
- sdmmc1: sdmmc@52007000 {
+ sdmmc1: mmc@52007000 {
compatible = "arm,pl18x", "arm,primecell";
arm,primecell-periphid = <0x10153180>;
reg = <0x52007000 0x1000>;
diff --git a/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi b/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi
index 20a59e8f7a33..7b4249ed1983 100644
--- a/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi
+++ b/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi
@@ -1273,6 +1273,18 @@
};
};
+ sdmmc1_b4_init_pins_a: sdmmc1-b4-init-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('C', 8, AF12)>, /* SDMMC1_D0 */
+ <STM32_PINMUX('C', 9, AF12)>, /* SDMMC1_D1 */
+ <STM32_PINMUX('C', 10, AF12)>, /* SDMMC1_D2 */
+ <STM32_PINMUX('C', 11, AF12)>; /* SDMMC1_D3 */
+ slew-rate = <1>;
+ drive-push-pull;
+ bias-disable;
+ };
+ };
+
sdmmc1_b4_sleep_pins_a: sdmmc1-b4-sleep-0 {
pins {
pinmux = <STM32_PINMUX('C', 8, ANALOG)>, /* SDMMC1_D0 */
@@ -1299,6 +1311,17 @@
};
};
+ sdmmc1_dir_init_pins_a: sdmmc1-dir-init-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('F', 2, AF11)>, /* SDMMC1_D0DIR */
+ <STM32_PINMUX('C', 7, AF8)>, /* SDMMC1_D123DIR */
+ <STM32_PINMUX('B', 9, AF11)>; /* SDMMC1_CDIR */
+ slew-rate = <1>;
+ drive-push-pull;
+ bias-pull-up;
+ };
+ };
+
sdmmc1_dir_sleep_pins_a: sdmmc1-dir-sleep-0 {
pins {
pinmux = <STM32_PINMUX('F', 2, ANALOG)>, /* SDMMC1_D0DIR */
@@ -2018,6 +2041,23 @@
};
};
+ i2c6_pins_a: i2c6-0 {
+ pins {
+ pinmux = <STM32_PINMUX('Z', 6, AF2)>, /* I2C6_SCL */
+ <STM32_PINMUX('Z', 7, AF2)>; /* I2C6_SDA */
+ bias-disable;
+ drive-open-drain;
+ slew-rate = <0>;
+ };
+ };
+
+ i2c6_sleep_pins_a: i2c6-sleep-0 {
+ pins {
+ pinmux = <STM32_PINMUX('Z', 6, ANALOG)>, /* I2C6_SCL */
+ <STM32_PINMUX('Z', 7, ANALOG)>; /* I2C6_SDA */
+ };
+ };
+
spi1_pins_a: spi1-0 {
pins1 {
pinmux = <STM32_PINMUX('Z', 0, AF5)>, /* SPI1_SCK */
diff --git a/arch/arm/boot/dts/stm32mp151.dtsi b/arch/arm/boot/dts/stm32mp151.dtsi
index 3c75abacb374..4b8031782555 100644
--- a/arch/arm/boot/dts/stm32mp151.dtsi
+++ b/arch/arm/boot/dts/stm32mp151.dtsi
@@ -1047,7 +1047,7 @@
};
};
- sdmmc3: sdmmc@48004000 {
+ sdmmc3: mmc@48004000 {
compatible = "arm,pl18x", "arm,primecell";
arm,primecell-periphid = <0x00253180>;
reg = <0x48004000 0x400>;
@@ -1368,7 +1368,7 @@
status = "disabled";
};
- sdmmc1: sdmmc@58005000 {
+ sdmmc1: mmc@58005000 {
compatible = "arm,pl18x", "arm,primecell";
arm,primecell-periphid = <0x00253180>;
reg = <0x58005000 0x1000>;
@@ -1383,7 +1383,7 @@
status = "disabled";
};
- sdmmc2: sdmmc@58007000 {
+ sdmmc2: mmc@58007000 {
compatible = "arm,pl18x", "arm,primecell";
arm,primecell-periphid = <0x00253180>;
reg = <0x58007000 0x1000>;
@@ -1482,10 +1482,13 @@
usbphyc: usbphyc@5a006000 {
#address-cells = <1>;
#size-cells = <0>;
+ #clock-cells = <0>;
compatible = "st,stm32mp1-usbphyc";
reg = <0x5a006000 0x1000>;
clocks = <&rcc USBPHY_K>;
resets = <&rcc USBPHY_R>;
+ vdda1v1-supply = <&reg11>;
+ vdda1v8-supply = <&reg18>;
status = "disabled";
usbphyc_port0: usb-phy@0 {
diff --git a/arch/arm/boot/dts/stm32mp157a-stinger96.dtsi b/arch/arm/boot/dts/stm32mp157a-stinger96.dtsi
index 58275bcf9e26..113c48b2ef93 100644
--- a/arch/arm/boot/dts/stm32mp157a-stinger96.dtsi
+++ b/arch/arm/boot/dts/stm32mp157a-stinger96.dtsi
@@ -331,12 +331,8 @@
&usbphyc_port0 {
phy-supply = <&vdd_usb>;
- vdda1v1-supply = <&reg11>;
- vdda1v8-supply = <&reg18>;
};
&usbphyc_port1 {
phy-supply = <&vdd_usb>;
- vdda1v1-supply = <&reg11>;
- vdda1v8-supply = <&reg18>;
};
diff --git a/arch/arm/boot/dts/stm32mp157c-ed1.dts b/arch/arm/boot/dts/stm32mp157c-ed1.dts
index 81a7d5849db4..95b08876b2b3 100644
--- a/arch/arm/boot/dts/stm32mp157c-ed1.dts
+++ b/arch/arm/boot/dts/stm32mp157c-ed1.dts
@@ -393,12 +393,8 @@
&usbphyc_port0 {
phy-supply = <&vdd_usb>;
- vdda1v1-supply = <&reg11>;
- vdda1v8-supply = <&reg18>;
};
&usbphyc_port1 {
phy-supply = <&vdd_usb>;
- vdda1v1-supply = <&reg11>;
- vdda1v8-supply = <&reg18>;
};
diff --git a/arch/arm/boot/dts/stm32mp157c-lxa-mc1.dts b/arch/arm/boot/dts/stm32mp157c-lxa-mc1.dts
index cda8e871f999..1e9bf7eea0f1 100644
--- a/arch/arm/boot/dts/stm32mp157c-lxa-mc1.dts
+++ b/arch/arm/boot/dts/stm32mp157c-lxa-mc1.dts
@@ -36,34 +36,35 @@
stdout-path = &uart4;
};
- led-act {
+ led-controller-0 {
compatible = "gpio-leds";
- led-green {
+ led-0 {
label = "mc1:green:act";
gpios = <&gpioa 13 GPIO_ACTIVE_LOW>;
linux,default-trigger = "heartbeat";
};
};
- led-rgb {
+ led-controller-1 {
compatible = "pwm-leds";
- led-red {
+ /* led-1 to led-3 are part of a single RGB led */
+ led-1 {
label = "mc1:red:rgb";
pwms = <&leds_pwm 1 1000000 0>;
max-brightness = <255>;
active-low;
};
- led-green {
+ led-2 {
label = "mc1:green:rgb";
pwms = <&leds_pwm 2 1000000 0>;
max-brightness = <255>;
active-low;
};
- led-blue {
+ led-3 {
label = "mc1:blue:rgb";
pwms = <&leds_pwm 3 1000000 0>;
max-brightness = <255>;
diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcom-drc02.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcom-drc02.dtsi
index 62ab23824a3e..fad23d6f69b8 100644
--- a/arch/arm/boot/dts/stm32mp15xx-dhcom-drc02.dtsi
+++ b/arch/arm/boot/dts/stm32mp15xx-dhcom-drc02.dtsi
@@ -33,9 +33,9 @@
* during TX anyway and that it only controls drive enable DE
* line. Hence, the RX is always enabled here.
*/
- rs485-rx-en {
+ rs485-rx-en-hog {
gpio-hog;
- gpios = <8 GPIO_ACTIVE_HIGH>;
+ gpios = <8 0>;
output-low;
line-name = "rs485-rx-en";
};
@@ -61,9 +61,9 @@
* order to reset the Hub when USB bus is powered down, but
* so far there is no such functionality.
*/
- usb-hub {
+ usb-hub-hog {
gpio-hog;
- gpios = <2 GPIO_ACTIVE_HIGH>;
+ gpios = <2 0>;
output-high;
line-name = "usb-hub-reset";
};
@@ -87,6 +87,12 @@
};
};
+&i2c4 {
+ touchscreen@49 {
+ status = "disabled";
+ };
+};
+
&i2c5 { /* TP7/TP8 */
pinctrl-names = "default";
pinctrl-0 = <&i2c5_pins_a>;
@@ -104,7 +110,7 @@
* are used for on-board microSD slot instead.
*/
/delete-property/broken-cd;
- cd-gpios = <&gpioi 10 (GPIO_ACTIVE_LOW | GPIO_PULL_UP)>;
+ cd-gpios = <&gpioi 10 GPIO_ACTIVE_HIGH>;
disable-wp;
};
@@ -152,6 +158,4 @@
&usbphyc_port0 {
phy-supply = <&vdd_usb>;
- vdda1v1-supply = <&reg11>;
- vdda1v8-supply = <&reg18>;
};
diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi
index 8456f172d4b1..5523f4138fd6 100644
--- a/arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi
+++ b/arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi
@@ -300,12 +300,8 @@
&usbphyc_port0 {
phy-supply = <&vdd_usb>;
- vdda1v1-supply = <&reg11>;
- vdda1v8-supply = <&reg18>;
};
&usbphyc_port1 {
phy-supply = <&vdd_usb>;
- vdda1v1-supply = <&reg11>;
- vdda1v8-supply = <&reg18>;
};
diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcom-picoitx.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcom-picoitx.dtsi
index 356150d28c42..cd3a1798ca68 100644
--- a/arch/arm/boot/dts/stm32mp15xx-dhcom-picoitx.dtsi
+++ b/arch/arm/boot/dts/stm32mp15xx-dhcom-picoitx.dtsi
@@ -36,6 +36,10 @@
status = "disabled";
};
+&fmc {
+ status = "disabled";
+};
+
&gpioa {
/*
* NOTE: The USB Port on the PicoITX needs a PWR_EN signal to enable
@@ -43,9 +47,9 @@
* in order to turn on port power when USB bus is powered up, but so
* far there is no such functionality.
*/
- usb-port-power {
+ usb-port-power-hog {
gpio-hog;
- gpios = <13 GPIO_ACTIVE_LOW>;
+ gpios = <13 0>;
output-low;
line-name = "usb-port-power";
};
@@ -94,6 +98,10 @@
/delete-property/dma-names;
};
+&ksz8851 {
+ status = "disabled";
+};
+
&usart3 {
pinctrl-names = "default";
pinctrl-0 = <&usart3_pins_a>;
@@ -132,12 +140,8 @@
&usbphyc_port0 {
phy-supply = <&vdd_usb>;
- vdda1v1-supply = <&reg11>;
- vdda1v8-supply = <&reg18>;
};
&usbphyc_port1 {
phy-supply = <&vdd_usb>;
- vdda1v1-supply = <&reg11>;
- vdda1v8-supply = <&reg18>;
};
diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi
index ac46ab363e1b..2617815e42a6 100644
--- a/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi
+++ b/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi
@@ -386,19 +386,38 @@
};
&sdmmc1 {
- pinctrl-names = "default", "opendrain", "sleep";
+ pinctrl-names = "default", "opendrain", "sleep", "init";
pinctrl-0 = <&sdmmc1_b4_pins_a &sdmmc1_dir_pins_a>;
pinctrl-1 = <&sdmmc1_b4_od_pins_a &sdmmc1_dir_pins_a>;
pinctrl-2 = <&sdmmc1_b4_sleep_pins_a &sdmmc1_dir_sleep_pins_a>;
- broken-cd;
+ pinctrl-3 = <&sdmmc1_b4_init_pins_a &sdmmc1_dir_init_pins_a>;
+ cd-gpios = <&gpiog 1 (GPIO_ACTIVE_LOW | GPIO_PULL_UP)>;
+ disable-wp;
st,sig-dir;
st,neg-edge;
st,use-ckin;
+ st,cmd-gpios = <&gpiod 2 0>;
+ st,ck-gpios = <&gpioc 12 0>;
+ st,ckin-gpios = <&gpioe 4 0>;
bus-width = <4>;
vmmc-supply = <&vdd_sd>;
status = "okay";
};
+&sdmmc1_b4_pins_a {
+ /*
+ * SD bus pull-up resistors:
+ * - optional on SoMs with SD voltage translator
+ * - mandatory on SoMs without SD voltage translator
+ */
+ pins1 {
+ bias-pull-up;
+ };
+ pins2 {
+ bias-pull-up;
+ };
+};
+
&sdmmc2 {
pinctrl-names = "default", "opendrain", "sleep";
pinctrl-0 = <&sdmmc2_b4_pins_a &sdmmc2_d47_pins_a>;
diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi
index ec02cee1dd9b..b09e87fe901a 100644
--- a/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi
+++ b/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi
@@ -391,12 +391,8 @@
&usbphyc_port0 {
phy-supply = <&vdd_usb>;
- vdda1v1-supply = <&reg11>;
- vdda1v8-supply = <&reg18>;
};
&usbphyc_port1 {
phy-supply = <&vdd_usb>;
- vdda1v1-supply = <&reg11>;
- vdda1v8-supply = <&reg18>;
};
diff --git a/arch/arm/boot/dts/stm32mp15xx-dkx.dtsi b/arch/arm/boot/dts/stm32mp15xx-dkx.dtsi
index 89c0e1ddc387..59f18846cf5d 100644
--- a/arch/arm/boot/dts/stm32mp15xx-dkx.dtsi
+++ b/arch/arm/boot/dts/stm32mp15xx-dkx.dtsi
@@ -694,14 +694,10 @@
&usbphyc_port0 {
phy-supply = <&vdd_usb>;
- vdda1v1-supply = <&reg11>;
- vdda1v8-supply = <&reg18>;
};
&usbphyc_port1 {
phy-supply = <&vdd_usb>;
- vdda1v1-supply = <&reg11>;
- vdda1v8-supply = <&reg18>;
};
&vrefbuf {
diff --git a/arch/arm/boot/dts/sun4i-a10-a1000.dts b/arch/arm/boot/dts/sun4i-a10-a1000.dts
index af8ab736fd3c..20f9ed244851 100644
--- a/arch/arm/boot/dts/sun4i-a10-a1000.dts
+++ b/arch/arm/boot/dts/sun4i-a10-a1000.dts
@@ -74,12 +74,12 @@
leds {
compatible = "gpio-leds";
- red {
+ led-0 {
label = "a1000:red:usr";
gpios = <&pio 7 10 GPIO_ACTIVE_HIGH>;
};
- blue {
+ led-1 {
label = "a1000:blue:pwr";
gpios = <&pio 7 20 GPIO_ACTIVE_HIGH>;
default-state = "on";
diff --git a/arch/arm/boot/dts/sun4i-a10-cubieboard.dts b/arch/arm/boot/dts/sun4i-a10-cubieboard.dts
index 6ca02e824acc..0645d6064235 100644
--- a/arch/arm/boot/dts/sun4i-a10-cubieboard.dts
+++ b/arch/arm/boot/dts/sun4i-a10-cubieboard.dts
@@ -75,12 +75,12 @@
pinctrl-names = "default";
pinctrl-0 = <&led_pins_cubieboard>;
- blue {
+ led-0 {
label = "cubieboard:blue:usr";
gpios = <&pio 7 21 GPIO_ACTIVE_HIGH>; /* LED1 */
};
- green {
+ led-1 {
label = "cubieboard:green:usr";
gpios = <&pio 7 20 GPIO_ACTIVE_HIGH>; /* LED2 */
linux,default-trigger = "heartbeat";
diff --git a/arch/arm/boot/dts/sun4i-a10-dserve-dsrv9703c.dts b/arch/arm/boot/dts/sun4i-a10-dserve-dsrv9703c.dts
index 8ee3ff42bd55..63e77c05bfda 100644
--- a/arch/arm/boot/dts/sun4i-a10-dserve-dsrv9703c.dts
+++ b/arch/arm/boot/dts/sun4i-a10-dserve-dsrv9703c.dts
@@ -62,6 +62,7 @@
brightness-levels = <0 10 20 30 40 50 60 70 80 90 100>;
default-brightness-level = <8>;
enable-gpios = <&pio 7 7 GPIO_ACTIVE_HIGH>; /* PH7 */
+ power-supply = <&reg_vcc3v3>;
};
chosen {
diff --git a/arch/arm/boot/dts/sun4i-a10-inet1.dts b/arch/arm/boot/dts/sun4i-a10-inet1.dts
index ca878384e902..60e432a0ef1c 100644
--- a/arch/arm/boot/dts/sun4i-a10-inet1.dts
+++ b/arch/arm/boot/dts/sun4i-a10-inet1.dts
@@ -62,6 +62,7 @@
brightness-levels = <0 10 20 30 40 50 60 70 80 90 100>;
default-brightness-level = <8>;
enable-gpios = <&pio 7 7 GPIO_ACTIVE_HIGH>; /* PH7 */
+ power-supply = <&reg_vcc3v3>;
};
chosen {
diff --git a/arch/arm/boot/dts/sun4i-a10-jesurun-q5.dts b/arch/arm/boot/dts/sun4i-a10-jesurun-q5.dts
index 8a7b4c53d278..1aeb0bd5519e 100644
--- a/arch/arm/boot/dts/sun4i-a10-jesurun-q5.dts
+++ b/arch/arm/boot/dts/sun4i-a10-jesurun-q5.dts
@@ -63,7 +63,7 @@
leds {
compatible = "gpio-leds";
- green {
+ led {
label = "q5:green:usr";
gpios = <&pio 7 20 GPIO_ACTIVE_HIGH>; /* PH20 */
};
diff --git a/arch/arm/boot/dts/sun4i-a10-marsboard.dts b/arch/arm/boot/dts/sun4i-a10-marsboard.dts
index a843e57530ed..81fdb217d339 100644
--- a/arch/arm/boot/dts/sun4i-a10-marsboard.dts
+++ b/arch/arm/boot/dts/sun4i-a10-marsboard.dts
@@ -62,22 +62,22 @@
leds {
compatible = "gpio-leds";
- red1 {
+ led-0 {
label = "marsboard:red1:usr";
gpios = <&pio 1 5 GPIO_ACTIVE_HIGH>;
};
- red2 {
+ led-1 {
label = "marsboard:red2:usr";
gpios = <&pio 1 6 GPIO_ACTIVE_HIGH>;
};
- red3 {
+ led-2 {
label = "marsboard:red3:usr";
gpios = <&pio 1 7 GPIO_ACTIVE_HIGH>;
};
- red4 {
+ led-3 {
label = "marsboard:red4:usr";
gpios = <&pio 1 8 GPIO_ACTIVE_HIGH>;
};
diff --git a/arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts b/arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts
index 845f76824d57..ad0e25af45be 100644
--- a/arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts
+++ b/arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts
@@ -74,7 +74,7 @@
pinctrl-names = "default";
pinctrl-0 = <&led_pins_olinuxinolime>;
- green {
+ led {
label = "a10-olinuxino-lime:green:usr";
gpios = <&pio 7 2 GPIO_ACTIVE_HIGH>;
default-state = "on";
diff --git a/arch/arm/boot/dts/sun4i-a10-pcduino.dts b/arch/arm/boot/dts/sun4i-a10-pcduino.dts
index 83287b6c975e..1ac82376baef 100644
--- a/arch/arm/boot/dts/sun4i-a10-pcduino.dts
+++ b/arch/arm/boot/dts/sun4i-a10-pcduino.dts
@@ -63,12 +63,12 @@
leds {
compatible = "gpio-leds";
- tx {
+ led-0 {
label = "pcduino:green:tx";
gpios = <&pio 7 15 GPIO_ACTIVE_LOW>;
};
- rx {
+ led-1 {
label = "pcduino:green:rx";
gpios = <&pio 7 16 GPIO_ACTIVE_LOW>;
};
diff --git a/arch/arm/boot/dts/sun4i-a10-pov-protab2-ips9.dts b/arch/arm/boot/dts/sun4i-a10-pov-protab2-ips9.dts
index 24a3d23e1952..c32596947647 100644
--- a/arch/arm/boot/dts/sun4i-a10-pov-protab2-ips9.dts
+++ b/arch/arm/boot/dts/sun4i-a10-pov-protab2-ips9.dts
@@ -62,6 +62,7 @@
brightness-levels = <0 10 20 30 40 50 60 70 80 90 100>;
default-brightness-level = <8>;
enable-gpios = <&pio 7 7 GPIO_ACTIVE_HIGH>; /* PH7 */
+ power-supply = <&reg_vcc3v3>;
};
chosen {
diff --git a/arch/arm/boot/dts/sun5i-a10s-auxtek-t003.dts b/arch/arm/boot/dts/sun5i-a10s-auxtek-t003.dts
index 64d50fcfcd3a..04b0e6d28769 100644
--- a/arch/arm/boot/dts/sun5i-a10s-auxtek-t003.dts
+++ b/arch/arm/boot/dts/sun5i-a10s-auxtek-t003.dts
@@ -62,7 +62,7 @@
pinctrl-names = "default";
pinctrl-0 = <&led_pins_t003>;
- red {
+ led {
label = "t003-tv-dongle:red:usr";
gpios = <&pio 1 2 GPIO_ACTIVE_HIGH>; /* PB2 */
default-state = "on";
diff --git a/arch/arm/boot/dts/sun5i-a10s-auxtek-t004.dts b/arch/arm/boot/dts/sun5i-a10s-auxtek-t004.dts
index 8af0eae2ddc1..667bc2dc1ea9 100644
--- a/arch/arm/boot/dts/sun5i-a10s-auxtek-t004.dts
+++ b/arch/arm/boot/dts/sun5i-a10s-auxtek-t004.dts
@@ -62,7 +62,7 @@
pinctrl-names = "default";
pinctrl-0 = <&led_pins_t004>;
- red {
+ led {
label = "t004-tv-dongle:red:usr";
gpios = <&pio 1 2 GPIO_ACTIVE_HIGH>; /* PB2 */
default-state = "on";
diff --git a/arch/arm/boot/dts/sun5i-a10s-mk802.dts b/arch/arm/boot/dts/sun5i-a10s-mk802.dts
index 6e90ccb267aa..d0219404c231 100644
--- a/arch/arm/boot/dts/sun5i-a10s-mk802.dts
+++ b/arch/arm/boot/dts/sun5i-a10s-mk802.dts
@@ -60,7 +60,7 @@
leds {
compatible = "gpio-leds";
- red {
+ led {
label = "mk802:red:usr";
gpios = <&pio 1 2 GPIO_ACTIVE_HIGH>; /* PB2 */
};
diff --git a/arch/arm/boot/dts/sun5i-a10s-olinuxino-micro.dts b/arch/arm/boot/dts/sun5i-a10s-olinuxino-micro.dts
index d6bb82c295f0..5832bb31fc51 100644
--- a/arch/arm/boot/dts/sun5i-a10s-olinuxino-micro.dts
+++ b/arch/arm/boot/dts/sun5i-a10s-olinuxino-micro.dts
@@ -79,7 +79,7 @@
pinctrl-names = "default";
pinctrl-0 = <&led_pins_olinuxino>;
- green {
+ led {
label = "a10s-olinuxino-micro:green:usr";
gpios = <&pio 4 3 GPIO_ACTIVE_HIGH>;
default-state = "on";
diff --git a/arch/arm/boot/dts/sun5i-a10s-r7-tv-dongle.dts b/arch/arm/boot/dts/sun5i-a10s-r7-tv-dongle.dts
index b2a49a216ebf..964360f0610a 100644
--- a/arch/arm/boot/dts/sun5i-a10s-r7-tv-dongle.dts
+++ b/arch/arm/boot/dts/sun5i-a10s-r7-tv-dongle.dts
@@ -63,7 +63,7 @@
pinctrl-names = "default";
pinctrl-0 = <&led_pins_r7>;
- green {
+ led {
label = "r7-tv-dongle:green:usr";
gpios = <&pio 1 2 GPIO_ACTIVE_HIGH>;
default-state = "on";
diff --git a/arch/arm/boot/dts/sun5i-a10s-wobo-i5.dts b/arch/arm/boot/dts/sun5i-a10s-wobo-i5.dts
index 1f74ba1634cc..ef8baa992687 100644
--- a/arch/arm/boot/dts/sun5i-a10s-wobo-i5.dts
+++ b/arch/arm/boot/dts/sun5i-a10s-wobo-i5.dts
@@ -62,7 +62,7 @@
leds {
compatible = "gpio-leds";
- blue {
+ led {
label = "a10s-wobo-i5:blue:usr";
gpios = <&pio 1 2 GPIO_ACTIVE_HIGH>;
default-state = "on";
diff --git a/arch/arm/boot/dts/sun5i-a13-empire-electronix-d709.dts b/arch/arm/boot/dts/sun5i-a13-empire-electronix-d709.dts
index a23bf24792ec..d059388d7252 100644
--- a/arch/arm/boot/dts/sun5i-a13-empire-electronix-d709.dts
+++ b/arch/arm/boot/dts/sun5i-a13-empire-electronix-d709.dts
@@ -61,6 +61,7 @@
pwms = <&pwm 0 50000 PWM_POLARITY_INVERTED>;
brightness-levels = <0 10 20 30 40 50 60 70 80 90 100>;
default-brightness-level = <8>;
+ power-supply = <&reg_vcc3v3>;
/* TODO: backlight uses axp gpio1 as enable pin */
};
diff --git a/arch/arm/boot/dts/sun5i-a13-licheepi-one.dts b/arch/arm/boot/dts/sun5i-a13-licheepi-one.dts
index ba8d75b3c716..2ce361f8fede 100644
--- a/arch/arm/boot/dts/sun5i-a13-licheepi-one.dts
+++ b/arch/arm/boot/dts/sun5i-a13-licheepi-one.dts
@@ -66,18 +66,18 @@
leds {
compatible = "gpio-leds";
- red {
+ led-0 {
label ="licheepi:red:usr";
gpios = <&pio 2 5 GPIO_ACTIVE_LOW>;
};
- green {
+ led-1 {
label ="licheepi:green:usr";
gpios = <&pio 2 19 GPIO_ACTIVE_LOW>;
default-state = "on";
};
- blue {
+ led-2 {
label ="licheepi:blue:usr";
gpios = <&pio 2 4 GPIO_ACTIVE_LOW>;
};
diff --git a/arch/arm/boot/dts/sun5i-a13-olinuxino-micro.dts b/arch/arm/boot/dts/sun5i-a13-olinuxino-micro.dts
index 5df398d77238..bfe1075e62cc 100644
--- a/arch/arm/boot/dts/sun5i-a13-olinuxino-micro.dts
+++ b/arch/arm/boot/dts/sun5i-a13-olinuxino-micro.dts
@@ -64,7 +64,7 @@
pinctrl-names = "default";
pinctrl-0 = <&led_pins_olinuxinom>;
- power {
+ led {
label = "a13-olinuxino-micro:green:power";
gpios = <&pio 6 9 GPIO_ACTIVE_HIGH>;
default-state = "on";
diff --git a/arch/arm/boot/dts/sun5i-a13-olinuxino.dts b/arch/arm/boot/dts/sun5i-a13-olinuxino.dts
index 39101228a755..fadeae3cd8bb 100644
--- a/arch/arm/boot/dts/sun5i-a13-olinuxino.dts
+++ b/arch/arm/boot/dts/sun5i-a13-olinuxino.dts
@@ -66,7 +66,7 @@
pinctrl-names = "default";
pinctrl-0 = <&led_pins_olinuxino>;
- power {
+ led {
gpios = <&pio 6 9 GPIO_ACTIVE_HIGH>;
default-state = "on";
};
diff --git a/arch/arm/boot/dts/sun5i-a13-pocketbook-touch-lux-3.dts b/arch/arm/boot/dts/sun5i-a13-pocketbook-touch-lux-3.dts
index e9ef97c9c893..d60407772e5d 100644
--- a/arch/arm/boot/dts/sun5i-a13-pocketbook-touch-lux-3.dts
+++ b/arch/arm/boot/dts/sun5i-a13-pocketbook-touch-lux-3.dts
@@ -28,6 +28,7 @@
enable-gpios = <&pio 1 4 GPIO_ACTIVE_HIGH>; /* PB4 */
brightness-levels = <0 10 20 30 40 50 60 70 80 90 100>;
default-brightness-level = <8>;
+ power-supply = <&reg_vcc3v3>;
};
chosen {
@@ -37,7 +38,7 @@
leds {
compatible = "gpio-leds";
- power {
+ led {
gpios = <&pio 4 8 GPIO_ACTIVE_LOW>; /* PE8 */
default-state = "on";
};
diff --git a/arch/arm/boot/dts/sun5i-a13.dtsi b/arch/arm/boot/dts/sun5i-a13.dtsi
index ae04955fd9a3..7075e10911d5 100644
--- a/arch/arm/boot/dts/sun5i-a13.dtsi
+++ b/arch/arm/boot/dts/sun5i-a13.dtsi
@@ -48,7 +48,7 @@
/ {
thermal-zones {
- cpu_thermal {
+ cpu-thermal {
/* milliseconds */
polling-delay-passive = <250>;
polling-delay = <1000>;
diff --git a/arch/arm/boot/dts/sun5i-gr8-evb.dts b/arch/arm/boot/dts/sun5i-gr8-evb.dts
index 4c20d731a9c6..f4fe258ef06d 100644
--- a/arch/arm/boot/dts/sun5i-gr8-evb.dts
+++ b/arch/arm/boot/dts/sun5i-gr8-evb.dts
@@ -71,7 +71,7 @@
compatible = "pwm-backlight";
pwms = <&pwm 0 10000 0>;
enable-gpios = <&axp_gpio 1 GPIO_ACTIVE_HIGH>;
-
+ power-supply = <&reg_vcc3v3>;
brightness-levels = <0 10 20 30 40 50 60 70 80 90 100>;
default-brightness-level = <8>;
};
diff --git a/arch/arm/boot/dts/sun5i-reference-design-tablet.dtsi b/arch/arm/boot/dts/sun5i-reference-design-tablet.dtsi
index 1a9926d71410..6847f66699ac 100644
--- a/arch/arm/boot/dts/sun5i-reference-design-tablet.dtsi
+++ b/arch/arm/boot/dts/sun5i-reference-design-tablet.dtsi
@@ -55,6 +55,7 @@
brightness-levels = <0 10 20 30 40 50 60 70 80 90 100>;
default-brightness-level = <8>;
enable-gpios = <&axp_gpio 1 GPIO_ACTIVE_HIGH>; /* AXP GPIO1 */
+ power-supply = <&reg_vcc3v0>;
};
chosen {
diff --git a/arch/arm/boot/dts/sun5i.dtsi b/arch/arm/boot/dts/sun5i.dtsi
index c2b4fbf552a3..250d6b87ab4d 100644
--- a/arch/arm/boot/dts/sun5i.dtsi
+++ b/arch/arm/boot/dts/sun5i.dtsi
@@ -726,6 +726,18 @@
#size-cells = <0>;
};
+ mali: gpu@1c40000 {
+ compatible = "allwinner,sun4i-a10-mali", "arm,mali-400";
+ reg = <0x01c40000 0x10000>;
+ interrupts = <69>, <70>, <71>, <72>, <73>;
+ interrupt-names = "gp", "gpmmu", "pp0", "ppmmu0", "pmu";
+ clocks = <&ccu CLK_AHB_GPU>, <&ccu CLK_GPU>;
+ clock-names = "bus", "core";
+ resets = <&ccu RST_GPU>;
+ assigned-clocks = <&ccu CLK_GPU>;
+ assigned-clock-rates = <320000000>;
+ };
+
timer@1c60000 {
compatible = "allwinner,sun5i-a13-hstimer";
reg = <0x01c60000 0x1000>;
diff --git a/arch/arm/boot/dts/sun6i-a31-hummingbird.dts b/arch/arm/boot/dts/sun6i-a31-hummingbird.dts
index 73de34ae37fd..486cec6f71e0 100644
--- a/arch/arm/boot/dts/sun6i-a31-hummingbird.dts
+++ b/arch/arm/boot/dts/sun6i-a31-hummingbird.dts
@@ -226,7 +226,7 @@
axp22x: pmic@68 {
compatible = "x-powers,axp221";
reg = <0x68>;
- interrupt-parent = <&nmi_intc>;
+ interrupt-parent = <&r_intc>;
interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
x-powers,drive-vbus-en;
};
diff --git a/arch/arm/boot/dts/sun6i-a31-i7.dts b/arch/arm/boot/dts/sun6i-a31-i7.dts
index 6cc8ccf53d88..744723d956f0 100644
--- a/arch/arm/boot/dts/sun6i-a31-i7.dts
+++ b/arch/arm/boot/dts/sun6i-a31-i7.dts
@@ -72,7 +72,7 @@
leds {
compatible = "gpio-leds";
- blue {
+ led {
label = "i7:blue:usr";
gpios = <&pio 7 13 GPIO_ACTIVE_HIGH>;
};
diff --git a/arch/arm/boot/dts/sun6i-a31-m9.dts b/arch/arm/boot/dts/sun6i-a31-m9.dts
index a645c8f4257c..e4f3415e6108 100644
--- a/arch/arm/boot/dts/sun6i-a31-m9.dts
+++ b/arch/arm/boot/dts/sun6i-a31-m9.dts
@@ -61,7 +61,7 @@
leds {
compatible = "gpio-leds";
- blue {
+ led {
label = "m9:blue:pwr";
gpios = <&pio 7 13 GPIO_ACTIVE_HIGH>;
default-state = "on";
@@ -115,7 +115,7 @@
axp22x: pmic@68 {
compatible = "x-powers,axp221";
reg = <0x68>;
- interrupt-parent = <&nmi_intc>;
+ interrupt-parent = <&r_intc>;
interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
};
};
diff --git a/arch/arm/boot/dts/sun6i-a31-mele-a1000g-quad.dts b/arch/arm/boot/dts/sun6i-a31-mele-a1000g-quad.dts
index 648f24746234..7bd4bdd66a76 100644
--- a/arch/arm/boot/dts/sun6i-a31-mele-a1000g-quad.dts
+++ b/arch/arm/boot/dts/sun6i-a31-mele-a1000g-quad.dts
@@ -61,7 +61,7 @@
leds {
compatible = "gpio-leds";
- blue {
+ led {
label = "a1000g:blue:pwr";
gpios = <&pio 7 13 GPIO_ACTIVE_HIGH>;
default-state = "on";
@@ -115,7 +115,7 @@
axp22x: pmic@68 {
compatible = "x-powers,axp221";
reg = <0x68>;
- interrupt-parent = <&nmi_intc>;
+ interrupt-parent = <&r_intc>;
interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
};
};
diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi
index f3425a66fc0a..a75033e85fcb 100644
--- a/arch/arm/boot/dts/sun6i-a31.dtsi
+++ b/arch/arm/boot/dts/sun6i-a31.dtsi
@@ -165,7 +165,7 @@
};
thermal-zones {
- cpu_thermal {
+ cpu-thermal {
/* milliseconds */
polling-delay-passive = <250>;
polling-delay = <1000>;
@@ -1305,7 +1305,7 @@
clock-output-names = "osc32k";
};
- nmi_intc: interrupt-controller@1f00c00 {
+ r_intc: interrupt-controller@1f00c00 {
compatible = "allwinner,sun6i-a31-r-intc";
interrupt-controller;
#interrupt-cells = <2>;
diff --git a/arch/arm/boot/dts/sun6i-a31s-primo81.dts b/arch/arm/boot/dts/sun6i-a31s-primo81.dts
index bc3170a0b8b5..66bc6ca77afb 100644
--- a/arch/arm/boot/dts/sun6i-a31s-primo81.dts
+++ b/arch/arm/boot/dts/sun6i-a31s-primo81.dts
@@ -115,7 +115,6 @@
reg = <0x1c>;
interrupt-parent = <&pio>;
interrupts = <0 9 IRQ_TYPE_LEVEL_HIGH>; /* PA9 */
- #io-channel-cells = <1>;
};
};
@@ -159,7 +158,7 @@
axp22x: pmic@68 {
compatible = "x-powers,axp221";
reg = <0x68>;
- interrupt-parent = <&nmi_intc>;
+ interrupt-parent = <&r_intc>;
interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
x-powers,drive-vbus-en;
};
diff --git a/arch/arm/boot/dts/sun6i-a31s-sina31s-core.dtsi b/arch/arm/boot/dts/sun6i-a31s-sina31s-core.dtsi
index 3099491de8c4..7455c0db4a8a 100644
--- a/arch/arm/boot/dts/sun6i-a31s-sina31s-core.dtsi
+++ b/arch/arm/boot/dts/sun6i-a31s-sina31s-core.dtsi
@@ -78,7 +78,7 @@
axp22x: pmic@68 {
compatible = "x-powers,axp221";
reg = <0x68>;
- interrupt-parent = <&nmi_intc>;
+ interrupt-parent = <&r_intc>;
interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
};
};
diff --git a/arch/arm/boot/dts/sun6i-a31s-sinovoip-bpi-m2.dts b/arch/arm/boot/dts/sun6i-a31s-sinovoip-bpi-m2.dts
index 708caee52425..efb25b949f30 100644
--- a/arch/arm/boot/dts/sun6i-a31s-sinovoip-bpi-m2.dts
+++ b/arch/arm/boot/dts/sun6i-a31s-sinovoip-bpi-m2.dts
@@ -59,17 +59,17 @@
leds {
compatible = "gpio-leds";
- blue {
+ led-0 {
label = "bpi-m2:blue:usr";
gpios = <&pio 6 11 GPIO_ACTIVE_HIGH>; /* PG11 */
};
- green {
+ led-1 {
label = "bpi-m2:green:usr";
gpios = <&pio 6 10 GPIO_ACTIVE_HIGH>; /* PG10 */
};
- red {
+ led-2 {
label = "bpi-m2:red:usr";
gpios = <&pio 6 5 GPIO_ACTIVE_HIGH>; /* PG5 */
};
@@ -148,7 +148,7 @@
axp22x: pmic@68 {
compatible = "x-powers,axp221";
reg = <0x68>;
- interrupt-parent = <&nmi_intc>;
+ interrupt-parent = <&r_intc>;
interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
eldoin-supply = <&reg_dcdc1>;
x-powers,drive-vbus-en;
@@ -261,3 +261,74 @@
&usbphy {
status = "okay";
};
+
+&pio {
+ gpio-line-names =
+ /* PA */
+ "ETXD0", "ETXD1", "ETXD2", "ETXD3", "SDC0-DET", "", "",
+ "", "ETXCLK", "ETXEN", "EGTXCLK", "ERXD0", "ERXD1",
+ "ERXD2", "ERXD3", "", "", "", "", "ERXDV", "ERXCK",
+ "ETXERR", "ERXERR", "ECOL", "ECRS", "ECLKIN", "EMDC",
+ "EMDIO", "", "", "", "",
+
+ /* PB */
+ "CN7-P29", "CN7-P31", "CN7-P33", "CN7-P35", "CN7-P37",
+ "CN7-P28", "CN7-P27", "CN7-P32", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "", "", "", "", "", "", "",
+ "", "", "", "",
+
+ /* PC */
+ "", "", "", "", "", "", "WL-SDIO-CMD", "WL-SDIO-CLK",
+ "WL-SDIO-D0", "WL-SDIO-D2", "WL-SDIO-D2", "WL-SDIO-D3",
+ "", "", "", "", "", "", "", "", "", "", "", "", "", "",
+ "", "USB-DRV", "", "", "", "",
+
+ /* PD */
+ "CN9-P09", "CN9-P11", "CN9-P13", "CN9-P15", "CN9-P17",
+ "CN9-P19", "CN9-P21", "CN9-P23", "CN9-P25", "CN9-P27",
+ "CN9-P29", "CN9-P31", "CN9-P33", "CN9-P35", "CN9-P37",
+ "CN9-P39", "CN9-P40", "CN9-P38", "CN9-P36", "CN9-P34",
+ "CN9-P32", "CN9-P30", "CN9-P28", "CN9-P26", "CN9-P22",
+ "CN9-P14", "CN9-P18", "CN9-P16", "", "", "", "",
+
+ /* PE */
+ "CN6-P20", "CN6-P24", "CN6-P30", "CN6-P28", "CN7-P08",
+ "CN7-P10", "CN7-P36", "CN7-P38", "CN6-P17", "CN6-P19",
+ "CN6-P21", "CN6-P23", "CN6-P25", "CN6-P27", "CN6-P29",
+ "CN6-P31", "", "", "", "", "", "", "", "", "", "", "",
+ "", "", "", "", "",
+
+ /* PF */
+ "SDC0-D1", "SDC0-D0", "SDC0-CLK", "SDC0-CMD", "SDC0-D3",
+ "SDC0-D2", "", "", "", "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "", "", "", "", "", "", "",
+ "",
+
+ /* PG */
+ "CN9-P06", "CN9-P08", "CN9-P20", "CN9-P12", "CN9-P07",
+ "LED-PWR", "CN7-P13", "CN7-P11", "CN7-P22", "CN7-P15",
+ "LED-G", "LED-B", "CN7-P26", "CN7-P24", "CN7-P23",
+ "CN7-P19", "CN7-P21", "HCEC", "CN6-P22", "", "", "", "",
+ "", "", "", "", "", "", "", "", "",
+
+ /* PH */
+ "", "", "", "", "", "", "", "", "", "CN7-P07",
+ "CN7-P12", "CN7-P16", "CN7-P18", "CN9-P10", "CN6-P16",
+ "CN6-P14", "CN9-P04", "CN9-P02", "CN7-P05", "CN7-P03",
+ "CN8-P03", "CN8-P02", "", "", "CN6-P34", "CN6-P32",
+ "CN6-P26", "CN6-P18", "", "", "", "";
+};
+
+&r_pio {
+ gpio-line-names =
+ /* PL */
+ "PMU-SCK", "PMU-SDA", "VBAT-EN", "", "IR-RX",
+ "WL-WAKE-HOST", "BT-WAKE_HOST", "BT-ENABLE",
+ "WL-PMU-EN", "", "", "", "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "", "", "", "", "",
+
+ /* PM */
+ "CN6-P12", "CN6-P35", "CN7-P40", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "", "";
+};
diff --git a/arch/arm/boot/dts/sun6i-a31s-yones-toptech-bs1078-v2.dts b/arch/arm/boot/dts/sun6i-a31s-yones-toptech-bs1078-v2.dts
index 2504e7189c54..cadc45255d7b 100644
--- a/arch/arm/boot/dts/sun6i-a31s-yones-toptech-bs1078-v2.dts
+++ b/arch/arm/boot/dts/sun6i-a31s-yones-toptech-bs1078-v2.dts
@@ -98,7 +98,7 @@
axp22x: pmic@68 {
compatible = "x-powers,axp221";
reg = <0x68>;
- interrupt-parent = <&nmi_intc>;
+ interrupt-parent = <&r_intc>;
interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
};
};
diff --git a/arch/arm/boot/dts/sun6i-reference-design-tablet.dtsi b/arch/arm/boot/dts/sun6i-reference-design-tablet.dtsi
index 7de2abd541c1..6bf3fbdd738f 100644
--- a/arch/arm/boot/dts/sun6i-reference-design-tablet.dtsi
+++ b/arch/arm/boot/dts/sun6i-reference-design-tablet.dtsi
@@ -79,7 +79,7 @@
axp22x: pmic@68 {
compatible = "x-powers,axp221";
reg = <0x68>;
- interrupt-parent = <&nmi_intc>;
+ interrupt-parent = <&r_intc>;
interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
drivevbus-supply = <&reg_vcc5v0>;
x-powers,drive-vbus-en;
diff --git a/arch/arm/boot/dts/sun7i-a20-bananapi-m1-plus.dts b/arch/arm/boot/dts/sun7i-a20-bananapi-m1-plus.dts
index 8945dbb114a2..caa935ca4f19 100644
--- a/arch/arm/boot/dts/sun7i-a20-bananapi-m1-plus.dts
+++ b/arch/arm/boot/dts/sun7i-a20-bananapi-m1-plus.dts
@@ -74,12 +74,12 @@
leds {
compatible = "gpio-leds";
- green {
+ led-0 {
label = "bananapi-m1-plus:green:usr";
gpios = <&pio 7 24 GPIO_ACTIVE_HIGH>;
};
- pwr {
+ led-1 {
label = "bananapi-m1-plus:pwr:usr";
gpios = <&pio 7 25 GPIO_ACTIVE_HIGH>;
default-state = "on";
diff --git a/arch/arm/boot/dts/sun7i-a20-bananapi.dts b/arch/arm/boot/dts/sun7i-a20-bananapi.dts
index 0b3d9ae75650..9d792d7a0f92 100644
--- a/arch/arm/boot/dts/sun7i-a20-bananapi.dts
+++ b/arch/arm/boot/dts/sun7i-a20-bananapi.dts
@@ -77,7 +77,7 @@
leds {
compatible = "gpio-leds";
- green {
+ led {
label = "bananapi:green:usr";
gpios = <&pio 7 24 GPIO_ACTIVE_HIGH>;
};
diff --git a/arch/arm/boot/dts/sun7i-a20-bananapro.dts b/arch/arm/boot/dts/sun7i-a20-bananapro.dts
index 01ccff756996..e22f0e8bb17a 100644
--- a/arch/arm/boot/dts/sun7i-a20-bananapro.dts
+++ b/arch/arm/boot/dts/sun7i-a20-bananapro.dts
@@ -63,12 +63,12 @@
leds {
compatible = "gpio-leds";
- blue {
+ led-0 {
label = "bananapro:blue:usr";
gpios = <&pio 6 2 GPIO_ACTIVE_HIGH>;
};
- green {
+ led-1 {
label = "bananapro:green:usr";
gpios = <&pio 7 24 GPIO_ACTIVE_HIGH>;
};
@@ -110,7 +110,7 @@
pinctrl-names = "default";
pinctrl-0 = <&gmac_rgmii_pins>;
phy-handle = <&phy1>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
phy-supply = <&reg_gmac_3v3>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/sun7i-a20-cubieboard2.dts b/arch/arm/boot/dts/sun7i-a20-cubieboard2.dts
index b8203e4ef21c..e35e6990c4b2 100644
--- a/arch/arm/boot/dts/sun7i-a20-cubieboard2.dts
+++ b/arch/arm/boot/dts/sun7i-a20-cubieboard2.dts
@@ -75,12 +75,12 @@
leds {
compatible = "gpio-leds";
- blue {
+ led-0 {
label = "cubieboard2:blue:usr";
gpios = <&pio 7 21 GPIO_ACTIVE_HIGH>;
};
- green {
+ led-1 {
label = "cubieboard2:green:usr";
gpios = <&pio 7 20 GPIO_ACTIVE_HIGH>;
};
diff --git a/arch/arm/boot/dts/sun7i-a20-cubietruck.dts b/arch/arm/boot/dts/sun7i-a20-cubietruck.dts
index 9109ca0919ad..52160e368304 100644
--- a/arch/arm/boot/dts/sun7i-a20-cubietruck.dts
+++ b/arch/arm/boot/dts/sun7i-a20-cubietruck.dts
@@ -75,22 +75,22 @@
leds {
compatible = "gpio-leds";
- blue {
+ led-0 {
label = "cubietruck:blue:usr";
gpios = <&pio 7 21 GPIO_ACTIVE_HIGH>;
};
- orange {
+ led-1 {
label = "cubietruck:orange:usr";
gpios = <&pio 7 20 GPIO_ACTIVE_HIGH>;
};
- white {
+ led-2 {
label = "cubietruck:white:usr";
gpios = <&pio 7 11 GPIO_ACTIVE_HIGH>;
};
- green {
+ led-3 {
label = "cubietruck:green:usr";
gpios = <&pio 7 7 GPIO_ACTIVE_HIGH>;
};
diff --git a/arch/arm/boot/dts/sun7i-a20-i12-tvbox.dts b/arch/arm/boot/dts/sun7i-a20-i12-tvbox.dts
index 358ed5f1b1c1..b21ddd0ec1c2 100644
--- a/arch/arm/boot/dts/sun7i-a20-i12-tvbox.dts
+++ b/arch/arm/boot/dts/sun7i-a20-i12-tvbox.dts
@@ -62,12 +62,12 @@
leds {
compatible = "gpio-leds";
- red {
+ led-0 {
label = "i12_tvbox:red:usr";
gpios = <&pio 7 9 GPIO_ACTIVE_LOW>;
};
- blue {
+ led-1 {
label = "i12_tvbox:blue:usr";
gpios = <&pio 7 20 GPIO_ACTIVE_HIGH>;
};
diff --git a/arch/arm/boot/dts/sun7i-a20-itead-ibox.dts b/arch/arm/boot/dts/sun7i-a20-itead-ibox.dts
index 946c27278321..8ff83016ff5a 100644
--- a/arch/arm/boot/dts/sun7i-a20-itead-ibox.dts
+++ b/arch/arm/boot/dts/sun7i-a20-itead-ibox.dts
@@ -53,13 +53,13 @@
pinctrl-names = "default";
pinctrl-0 = <&led_pins_itead_core>;
- green {
+ led-0 {
label = "itead_core:green:usr";
gpios = <&pio 7 20 GPIO_ACTIVE_HIGH>;
default-state = "on";
};
- blue {
+ led-1 {
label = "itead_core:blue:usr";
gpios = <&pio 7 21 GPIO_ACTIVE_HIGH>;
default-state = "on";
diff --git a/arch/arm/boot/dts/sun7i-a20-lamobo-r1.dts b/arch/arm/boot/dts/sun7i-a20-lamobo-r1.dts
index 17fa8901fc00..97518afe4658 100644
--- a/arch/arm/boot/dts/sun7i-a20-lamobo-r1.dts
+++ b/arch/arm/boot/dts/sun7i-a20-lamobo-r1.dts
@@ -75,7 +75,7 @@
leds {
compatible = "gpio-leds";
- green {
+ led {
label = "lamobo_r1:green:usr";
gpios = <&pio 7 24 GPIO_ACTIVE_HIGH>;
};
diff --git a/arch/arm/boot/dts/sun7i-a20-m3.dts b/arch/arm/boot/dts/sun7i-a20-m3.dts
index 6bff9e731fc3..f161d5238860 100644
--- a/arch/arm/boot/dts/sun7i-a20-m3.dts
+++ b/arch/arm/boot/dts/sun7i-a20-m3.dts
@@ -64,7 +64,7 @@
leds {
compatible = "gpio-leds";
- blue {
+ led {
label = "m3:blue:usr";
gpios = <&pio 7 20 GPIO_ACTIVE_HIGH>;
};
diff --git a/arch/arm/boot/dts/sun7i-a20-olimex-som-evb.dts b/arch/arm/boot/dts/sun7i-a20-olimex-som-evb.dts
index 6f9c54b8e49a..f05ee32bc9cb 100644
--- a/arch/arm/boot/dts/sun7i-a20-olimex-som-evb.dts
+++ b/arch/arm/boot/dts/sun7i-a20-olimex-som-evb.dts
@@ -75,7 +75,7 @@
leds {
compatible = "gpio-leds";
- green {
+ led {
label = "a20-olimex-som-evb:green:usr";
gpios = <&pio 7 2 GPIO_ACTIVE_HIGH>;
default-state = "on";
diff --git a/arch/arm/boot/dts/sun7i-a20-olimex-som204-evb.dts b/arch/arm/boot/dts/sun7i-a20-olimex-som204-evb.dts
index 230d62a6b8f1..54af6c18075b 100644
--- a/arch/arm/boot/dts/sun7i-a20-olimex-som204-evb.dts
+++ b/arch/arm/boot/dts/sun7i-a20-olimex-som204-evb.dts
@@ -46,19 +46,19 @@
leds {
compatible = "gpio-leds";
- stat {
+ led-0 {
label = "a20-som204-evb:green:stat";
gpios = <&pio 8 0 GPIO_ACTIVE_HIGH>;
default-state = "on";
};
- led1 {
+ led-1 {
label = "a20-som204-evb:green:led1";
gpios = <&pio 8 10 GPIO_ACTIVE_HIGH>;
default-state = "on";
};
- led2 {
+ led-2 {
label = "a20-som204-evb:yellow:led2";
gpios = <&pio 8 11 GPIO_ACTIVE_HIGH>;
default-state = "on";
diff --git a/arch/arm/boot/dts/sun7i-a20-olinuxino-lime.dts b/arch/arm/boot/dts/sun7i-a20-olinuxino-lime.dts
index 2adbac860119..92938d022295 100644
--- a/arch/arm/boot/dts/sun7i-a20-olinuxino-lime.dts
+++ b/arch/arm/boot/dts/sun7i-a20-olinuxino-lime.dts
@@ -78,7 +78,7 @@
pinctrl-names = "default";
pinctrl-0 = <&led_pins_olinuxinolime>;
- green {
+ led {
label = "a20-olinuxino-lime:green:usr";
gpios = <&pio 7 2 GPIO_ACTIVE_HIGH>;
default-state = "on";
diff --git a/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2.dts b/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2.dts
index 9ba62774e89a..8077f1716fbc 100644
--- a/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2.dts
+++ b/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2.dts
@@ -75,7 +75,7 @@
pinctrl-names = "default";
pinctrl-0 = <&led_pins_olinuxinolime>;
- green {
+ led {
label = "a20-olinuxino-lime2:green:usr";
gpios = <&pio 7 2 GPIO_ACTIVE_HIGH>;
default-state = "on";
diff --git a/arch/arm/boot/dts/sun7i-a20-olinuxino-micro.dts b/arch/arm/boot/dts/sun7i-a20-olinuxino-micro.dts
index 359bd0d5b3b1..a1b89b2a2999 100644
--- a/arch/arm/boot/dts/sun7i-a20-olinuxino-micro.dts
+++ b/arch/arm/boot/dts/sun7i-a20-olinuxino-micro.dts
@@ -82,7 +82,7 @@
pinctrl-names = "default";
pinctrl-0 = <&led_pins_olinuxino>;
- green {
+ led {
label = "a20-olinuxino-micro:green:usr";
gpios = <&pio 7 2 GPIO_ACTIVE_HIGH>;
default-state = "on";
diff --git a/arch/arm/boot/dts/sun7i-a20-orangepi-mini.dts b/arch/arm/boot/dts/sun7i-a20-orangepi-mini.dts
index 2e328d2cefc1..84efa01e7cba 100644
--- a/arch/arm/boot/dts/sun7i-a20-orangepi-mini.dts
+++ b/arch/arm/boot/dts/sun7i-a20-orangepi-mini.dts
@@ -75,12 +75,12 @@
leds {
compatible = "gpio-leds";
- green {
+ led-0 {
label = "orangepi:green:usr";
gpios = <&pio 7 24 GPIO_ACTIVE_HIGH>; /* PH24 */
};
- blue {
+ led-1 {
label = "orangepi:blue:usr";
gpios = <&pio 7 25 GPIO_ACTIVE_HIGH>; /* PH25 */
};
diff --git a/arch/arm/boot/dts/sun7i-a20-orangepi.dts b/arch/arm/boot/dts/sun7i-a20-orangepi.dts
index d75b2e2bab28..5d77f1d9818f 100644
--- a/arch/arm/boot/dts/sun7i-a20-orangepi.dts
+++ b/arch/arm/boot/dts/sun7i-a20-orangepi.dts
@@ -64,7 +64,7 @@
leds {
compatible = "gpio-leds";
- green {
+ led {
label = "orangepi:green:usr";
gpios = <&pio 7 24 GPIO_ACTIVE_HIGH>; /* PH24 */
};
diff --git a/arch/arm/boot/dts/sun7i-a20-pcduino3-nano.dts b/arch/arm/boot/dts/sun7i-a20-pcduino3-nano.dts
index bf38c66c1815..e40ecb48d726 100644
--- a/arch/arm/boot/dts/sun7i-a20-pcduino3-nano.dts
+++ b/arch/arm/boot/dts/sun7i-a20-pcduino3-nano.dts
@@ -72,14 +72,12 @@
leds {
compatible = "gpio-leds";
- /* Marked "LED3" on the PCB. */
- usr1 {
+ led-3 {
label = "pcduino3-nano:green:usr1";
gpios = <&pio 7 16 GPIO_ACTIVE_LOW>; /* PH16 */
};
- /* Marked "LED4" on the PCB. */
- usr2 {
+ led-4 {
label = "pcduino3-nano:green:usr2";
gpios = <&pio 7 15 GPIO_ACTIVE_LOW>; /* PH15 */
};
diff --git a/arch/arm/boot/dts/sun7i-a20-pcduino3.dts b/arch/arm/boot/dts/sun7i-a20-pcduino3.dts
index cc8271d777b8..4f8d55d3ba79 100644
--- a/arch/arm/boot/dts/sun7i-a20-pcduino3.dts
+++ b/arch/arm/boot/dts/sun7i-a20-pcduino3.dts
@@ -64,12 +64,12 @@
leds {
compatible = "gpio-leds";
- tx {
+ led-0 {
label = "pcduino3:green:tx";
gpios = <&pio 7 15 GPIO_ACTIVE_LOW>;
};
- rx {
+ led-1 {
label = "pcduino3:green:rx";
gpios = <&pio 7 16 GPIO_ACTIVE_LOW>;
};
diff --git a/arch/arm/boot/dts/sun7i-a20-wexler-tab7200.dts b/arch/arm/boot/dts/sun7i-a20-wexler-tab7200.dts
index 6a66b0432dfa..fef02fcbbdf8 100644
--- a/arch/arm/boot/dts/sun7i-a20-wexler-tab7200.dts
+++ b/arch/arm/boot/dts/sun7i-a20-wexler-tab7200.dts
@@ -64,6 +64,7 @@
brightness-levels = <0 10 20 30 40 50 60 70 80 90 100>;
default-brightness-level = <8>;
enable-gpios = <&pio 7 7 GPIO_ACTIVE_HIGH>; /* PH7 */
+ power-supply = <&reg_vcc3v3>;
};
chosen {
diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi
index 6d6a37940db2..5a40e0280665 100644
--- a/arch/arm/boot/dts/sun7i-a20.dtsi
+++ b/arch/arm/boot/dts/sun7i-a20.dtsi
@@ -140,7 +140,7 @@
};
thermal-zones {
- cpu_thermal {
+ cpu-thermal {
/* milliseconds */
polling-delay-passive = <250>;
polling-delay = <1000>;
diff --git a/arch/arm/boot/dts/sun8i-a23-a33.dtsi b/arch/arm/boot/dts/sun8i-a23-a33.dtsi
index c1362d0f0ff8..a42fac676b31 100644
--- a/arch/arm/boot/dts/sun8i-a23-a33.dtsi
+++ b/arch/arm/boot/dts/sun8i-a23-a33.dtsi
@@ -716,7 +716,7 @@
#clock-cells = <1>;
};
- nmi_intc: interrupt-controller@1f00c00 {
+ r_intc: interrupt-controller@1f00c00 {
compatible = "allwinner,sun6i-a31-r-intc";
interrupt-controller;
#interrupt-cells = <2>;
diff --git a/arch/arm/boot/dts/sun8i-a33-inet-d978-rev2.dts b/arch/arm/boot/dts/sun8i-a33-inet-d978-rev2.dts
index 317763069c0a..065cb620aa99 100644
--- a/arch/arm/boot/dts/sun8i-a33-inet-d978-rev2.dts
+++ b/arch/arm/boot/dts/sun8i-a33-inet-d978-rev2.dts
@@ -63,7 +63,7 @@
pinctrl-names = "default";
pinctrl-0 = <&led_pin_d978>;
- home {
+ led {
label = "d978:blue:home";
gpios = <&r_pio 0 5 GPIO_ACTIVE_HIGH>; /* PL5 */
};
diff --git a/arch/arm/boot/dts/sun8i-a33-olinuxino.dts b/arch/arm/boot/dts/sun8i-a33-olinuxino.dts
index a1953b2872d0..8538514c8588 100644
--- a/arch/arm/boot/dts/sun8i-a33-olinuxino.dts
+++ b/arch/arm/boot/dts/sun8i-a33-olinuxino.dts
@@ -62,7 +62,7 @@
leds {
compatible = "gpio-leds";
- green {
+ led {
label = "a33-olinuxino:green:usr";
gpios = <&pio 1 7 GPIO_ACTIVE_HIGH>;
};
@@ -98,7 +98,7 @@
axp22x: pmic@3a3 {
compatible = "x-powers,axp223";
reg = <0x3a3>;
- interrupt-parent = <&nmi_intc>;
+ interrupt-parent = <&r_intc>;
interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
eldoin-supply = <&reg_dcdc1>;
x-powers,drive-vbus-en;
diff --git a/arch/arm/boot/dts/sun8i-a33-sinlinx-sina33.dts b/arch/arm/boot/dts/sun8i-a33-sinlinx-sina33.dts
index 785798e3a104..d54a067fc76e 100644
--- a/arch/arm/boot/dts/sun8i-a33-sinlinx-sina33.dts
+++ b/arch/arm/boot/dts/sun8i-a33-sinlinx-sina33.dts
@@ -63,6 +63,7 @@
panel {
compatible = "netron-dy,e231732";
+ power-supply = <&reg_vcc3v3>;
port {
panel_input: endpoint {
@@ -164,7 +165,7 @@
axp22x: pmic@3a3 {
compatible = "x-powers,axp223";
reg = <0x3a3>;
- interrupt-parent = <&nmi_intc>;
+ interrupt-parent = <&r_intc>;
interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
eldoin-supply = <&reg_dcdc1>;
};
diff --git a/arch/arm/boot/dts/sun8i-a33.dtsi b/arch/arm/boot/dts/sun8i-a33.dtsi
index c458f5fb124f..7344c37107c6 100644
--- a/arch/arm/boot/dts/sun8i-a33.dtsi
+++ b/arch/arm/boot/dts/sun8i-a33.dtsi
@@ -288,7 +288,7 @@
};
thermal-zones {
- cpu_thermal {
+ cpu-thermal {
/* milliseconds */
polling-delay-passive = <250>;
polling-delay = <1000>;
diff --git a/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts b/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts
index 431f70234d36..b60016a4429c 100644
--- a/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts
+++ b/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts
@@ -74,12 +74,12 @@
leds {
compatible = "gpio-leds";
- blue {
+ led-0 {
label = "bananapi-m3:blue:usr";
gpios = <&axp_gpio 1 GPIO_ACTIVE_HIGH>;
};
- green {
+ led-1 {
label = "bananapi-m3:green:usr";
gpios = <&axp_gpio 0 GPIO_ACTIVE_HIGH>;
};
diff --git a/arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts b/arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts
index d8326a5c681d..e26af7cf10e0 100644
--- a/arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts
+++ b/arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts
@@ -74,22 +74,22 @@
leds {
compatible = "gpio-leds";
- blue {
+ led-0 {
label = "cubietruck-plus:blue:usr";
gpios = <&pio 3 25 GPIO_ACTIVE_HIGH>; /* PD25 */
};
- orange {
+ led-1 {
label = "cubietruck-plus:orange:usr";
gpios = <&pio 3 26 GPIO_ACTIVE_HIGH>; /* PD26 */
};
- white {
+ led-2 {
label = "cubietruck-plus:white:usr";
gpios = <&pio 3 27 GPIO_ACTIVE_HIGH>; /* PD27 */
};
- green {
+ led-3 {
label = "cubietruck-plus:green:usr";
gpios = <&pio 4 4 GPIO_ACTIVE_HIGH>; /* PE4 */
};
diff --git a/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts b/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts
index bfc9bb277a49..83b01b03e08e 100644
--- a/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts
+++ b/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts
@@ -65,7 +65,7 @@
compatible = "pwm-backlight";
pwms = <&pwm 0 50000 PWM_POLARITY_INVERTED>;
enable-gpios = <&pio 3 29 GPIO_ACTIVE_HIGH>;
-
+ power-supply = <&reg_sw>;
brightness-levels = <0 1 2 4 8 16 32 64 128 255>;
default-brightness-level = <9>;
};
diff --git a/arch/arm/boot/dts/sun8i-a83t.dtsi b/arch/arm/boot/dts/sun8i-a83t.dtsi
index c010b27fdb6a..bd898b250e74 100644
--- a/arch/arm/boot/dts/sun8i-a83t.dtsi
+++ b/arch/arm/boot/dts/sun8i-a83t.dtsi
@@ -1061,9 +1061,6 @@
clock-names = "bus", "mod", "ram";
resets = <&ccu RST_BUS_CSI>;
status = "disabled";
-
- csi_in: port {
- };
};
hdmi: hdmi@1ee0000 {
diff --git a/arch/arm/boot/dts/sun8i-h2-plus-bananapi-m2-zero.dts b/arch/arm/boot/dts/sun8i-h2-plus-bananapi-m2-zero.dts
index e76d56a3df9c..f3f7a2c912ab 100644
--- a/arch/arm/boot/dts/sun8i-h2-plus-bananapi-m2-zero.dts
+++ b/arch/arm/boot/dts/sun8i-h2-plus-bananapi-m2-zero.dts
@@ -62,6 +62,35 @@
states = <1100000 0>, <1300000 1>;
};
+ reg_vcc_dram: vcc-dram {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc-dram";
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1500000>;
+ regulator-always-on;
+ regulator-boot-on;
+ enable-active-high;
+ gpio = <&r_pio 0 9 GPIO_ACTIVE_HIGH>; /* PL9 */
+ vin-supply = <&reg_vcc5v0>;
+ };
+
+ reg_vcc1v2: vcc1v2 {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc1v2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-always-on;
+ regulator-boot-on;
+ enable-active-high;
+ gpio = <&r_pio 0 8 GPIO_ACTIVE_HIGH>; /* PL8 */
+ vin-supply = <&reg_vcc5v0>;
+ };
+
+ poweroff {
+ compatible = "regulator-poweroff";
+ cpu-supply = <&reg_vcc1v2>;
+ };
+
wifi_pwrseq: wifi_pwrseq {
compatible = "mmc-pwrseq-simple";
reset-gpios = <&r_pio 0 7 GPIO_ACTIVE_LOW>; /* PL7 */
@@ -125,6 +154,7 @@
bluetooth {
compatible = "brcm,bcm43438-bt";
+ max-speed = <1500000>;
clocks = <&rtc 1>;
clock-names = "lpo";
vbat-supply = <&reg_vcc3v3>;
diff --git a/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts b/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts
index 45a24441ff18..62b5280ec093 100644
--- a/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts
+++ b/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts
@@ -75,13 +75,13 @@
leds {
compatible = "gpio-leds";
- blue {
+ led-0 {
label = "beelink-x2:blue:pwr";
gpios = <&r_pio 0 10 GPIO_ACTIVE_HIGH>; /* PL10 */
default-state = "on";
};
- red {
+ led-1 {
label = "beelink-x2:red:standby";
gpios = <&pio 0 15 GPIO_ACTIVE_HIGH>; /* PA15 */
};
diff --git a/arch/arm/boot/dts/sun8i-h3-nanopi-duo2.dts b/arch/arm/boot/dts/sun8i-h3-nanopi-duo2.dts
index 6b149271ef13..8e7dfcffe1fb 100644
--- a/arch/arm/boot/dts/sun8i-h3-nanopi-duo2.dts
+++ b/arch/arm/boot/dts/sun8i-h3-nanopi-duo2.dts
@@ -25,13 +25,13 @@
leds {
compatible = "gpio-leds";
- pwr {
+ led-0 {
label = "nanopi:red:pwr";
gpios = <&r_pio 0 10 GPIO_ACTIVE_HIGH>; /* PL10 */
default-state = "on";
};
- status {
+ led-1 {
label = "nanopi:green:status";
gpios = <&pio 0 10 GPIO_ACTIVE_HIGH>; /* PA10 */
};
diff --git a/arch/arm/boot/dts/sun8i-h3-nanopi-neo-air.dts b/arch/arm/boot/dts/sun8i-h3-nanopi-neo-air.dts
index 07867a0d569b..be49eabbff94 100644
--- a/arch/arm/boot/dts/sun8i-h3-nanopi-neo-air.dts
+++ b/arch/arm/boot/dts/sun8i-h3-nanopi-neo-air.dts
@@ -61,13 +61,13 @@
leds {
compatible = "gpio-leds";
- pwr {
+ led-0 {
label = "nanopi:green:pwr";
gpios = <&r_pio 0 10 GPIO_ACTIVE_HIGH>; /* PL10 */
default-state = "on";
};
- status {
+ led-1 {
label = "nanopi:blue:status";
gpios = <&pio 0 10 GPIO_ACTIVE_HIGH>; /* PA10 */
};
diff --git a/arch/arm/boot/dts/sun8i-h3-nanopi-r1.dts b/arch/arm/boot/dts/sun8i-h3-nanopi-r1.dts
index 204a39f93f4e..26e2e6172e0d 100644
--- a/arch/arm/boot/dts/sun8i-h3-nanopi-r1.dts
+++ b/arch/arm/boot/dts/sun8i-h3-nanopi-r1.dts
@@ -39,8 +39,8 @@
regulator-ramp-delay = <50>;
gpios = <&r_pio 0 6 GPIO_ACTIVE_HIGH>; /* PL6 */
gpios-states = <0x1>;
- states = <1100000 0x0
- 1300000 0x1>;
+ states = <1100000 0x0>,
+ <1300000 0x1>;
};
wifi_pwrseq: wifi_pwrseq {
diff --git a/arch/arm/boot/dts/sun8i-h3-nanopi.dtsi b/arch/arm/boot/dts/sun8i-h3-nanopi.dtsi
index 4df29a65316d..c7c3e7d8b3c8 100644
--- a/arch/arm/boot/dts/sun8i-h3-nanopi.dtsi
+++ b/arch/arm/boot/dts/sun8i-h3-nanopi.dtsi
@@ -60,13 +60,13 @@
leds {
compatible = "gpio-leds";
- status {
+ led-0 {
label = "nanopi:blue:status";
gpios = <&pio 0 10 GPIO_ACTIVE_HIGH>;
linux,default-trigger = "heartbeat";
};
- pwr {
+ led-1 {
label = "nanopi:green:pwr";
gpios = <&r_pio 0 10 GPIO_ACTIVE_HIGH>;
default-state = "on";
diff --git a/arch/arm/boot/dts/sun8i-h3-orangepi-zero-plus2.dts b/arch/arm/boot/dts/sun8i-h3-orangepi-zero-plus2.dts
index 251bbab7d707..561ea1d2f861 100644
--- a/arch/arm/boot/dts/sun8i-h3-orangepi-zero-plus2.dts
+++ b/arch/arm/boot/dts/sun8i-h3-orangepi-zero-plus2.dts
@@ -73,13 +73,13 @@
leds {
compatible = "gpio-leds";
- pwr {
+ led-0 {
label = "orangepi:green:pwr";
gpios = <&r_pio 0 10 GPIO_ACTIVE_HIGH>;
default-state = "on";
};
- status {
+ led-1 {
label = "orangepi:red:status";
gpios = <&pio 0 17 GPIO_ACTIVE_HIGH>;
};
diff --git a/arch/arm/boot/dts/sun8i-r16-bananapi-m2m.dts b/arch/arm/boot/dts/sun8i-r16-bananapi-m2m.dts
index e1c75f7fa3ca..293016d081cd 100644
--- a/arch/arm/boot/dts/sun8i-r16-bananapi-m2m.dts
+++ b/arch/arm/boot/dts/sun8i-r16-bananapi-m2m.dts
@@ -64,17 +64,17 @@
leds {
compatible = "gpio-leds";
- blue {
+ led-0 {
label = "bpi-m2m:blue:usr";
gpios = <&pio 2 7 GPIO_ACTIVE_LOW>;
};
- green {
+ led-1 {
label = "bpi-m2m:green:usr";
gpios = <&r_pio 0 2 GPIO_ACTIVE_LOW>;
};
- red {
+ led-2 {
label = "bpi-m2m:red:power";
gpios = <&r_pio 0 3 GPIO_ACTIVE_LOW>;
default-state = "on";
@@ -163,7 +163,7 @@
axp22x: pmic@3a3 {
compatible = "x-powers,axp223";
reg = <0x3a3>;
- interrupt-parent = <&nmi_intc>;
+ interrupt-parent = <&r_intc>;
interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
eldoin-supply = <&reg_dcdc1>;
x-powers,drive-vbus-en;
diff --git a/arch/arm/boot/dts/sun8i-r16-parrot.dts b/arch/arm/boot/dts/sun8i-r16-parrot.dts
index 4f48eec6b2ef..2be1b76fe2f6 100644
--- a/arch/arm/boot/dts/sun8i-r16-parrot.dts
+++ b/arch/arm/boot/dts/sun8i-r16-parrot.dts
@@ -64,14 +64,14 @@
leds {
compatible = "gpio-leds";
- led1 {
+ led-1 {
label = "parrot:led1:usr";
- gpio = <&pio 4 17 GPIO_ACTIVE_HIGH>; /* PE17 */
+ gpios = <&pio 4 17 GPIO_ACTIVE_HIGH>; /* PE17 */
};
- led2 {
+ led-2 {
label = "parrot:led2:usr";
- gpio = <&pio 4 16 GPIO_ACTIVE_HIGH>; /* PE16 */
+ gpios = <&pio 4 16 GPIO_ACTIVE_HIGH>; /* PE16 */
};
};
@@ -164,7 +164,7 @@
axp22x: pmic@3a3 {
compatible = "x-powers,axp223";
reg = <0x3a3>;
- interrupt-parent = <&nmi_intc>;
+ interrupt-parent = <&r_intc>;
interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
drivevbus-supply = <&reg_vcc5v0>;
x-powers,drive-vbus-en;
diff --git a/arch/arm/boot/dts/sun8i-r40.dtsi b/arch/arm/boot/dts/sun8i-r40.dtsi
index 7907569e7b5c..d5ad3b9efd12 100644
--- a/arch/arm/boot/dts/sun8i-r40.dtsi
+++ b/arch/arm/boot/dts/sun8i-r40.dtsi
@@ -190,6 +190,25 @@
};
};
+ deinterlace: deinterlace@1400000 {
+ compatible = "allwinner,sun8i-r40-deinterlace",
+ "allwinner,sun8i-h3-deinterlace";
+ reg = <0x01400000 0x20000>;
+ clocks = <&ccu CLK_BUS_DEINTERLACE>,
+ <&ccu CLK_DEINTERLACE>,
+ /*
+ * NOTE: Contrary to what datasheet claims,
+ * DRAM deinterlace gate doesn't exist and
+ * it's shared with CSI1.
+ */
+ <&ccu CLK_DRAM_CSI1>;
+ clock-names = "bus", "mod", "ram";
+ resets = <&ccu RST_BUS_DEINTERLACE>;
+ interrupts = <GIC_SPI 93 IRQ_TYPE_LEVEL_HIGH>;
+ interconnects = <&mbus 9>;
+ interconnect-names = "dma-mem";
+ };
+
syscon: system-control@1c00000 {
compatible = "allwinner,sun8i-r40-system-control",
"allwinner,sun4i-a10-system-control";
diff --git a/arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi b/arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi
index b3d8b8f056cd..797d61cff11e 100644
--- a/arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi
+++ b/arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi
@@ -54,6 +54,7 @@
brightness-levels = <0 10 20 30 40 50 60 70 80 90 100>;
default-brightness-level = <8>;
enable-gpios = <&pio 7 6 GPIO_ACTIVE_HIGH>; /* PH6 */
+ power-supply = <&reg_dc1sw>;
};
chosen {
@@ -92,7 +93,7 @@
axp22x: pmic@3a3 {
compatible = "x-powers,axp223";
reg = <0x3a3>;
- interrupt-parent = <&nmi_intc>;
+ interrupt-parent = <&r_intc>;
interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
eldoin-supply = <&reg_dcdc1>;
drivevbus-supply = <&reg_vcc5v0>;
diff --git a/arch/arm/boot/dts/sun8i-s3-elimo-impetus.dtsi b/arch/arm/boot/dts/sun8i-s3-elimo-impetus.dtsi
index 24d507cdbcf9..052b010a5607 100644
--- a/arch/arm/boot/dts/sun8i-s3-elimo-impetus.dtsi
+++ b/arch/arm/boot/dts/sun8i-s3-elimo-impetus.dtsi
@@ -39,6 +39,6 @@
};
&usbphy {
- usb0_id_det-gpio = <&pio 5 6 GPIO_ACTIVE_HIGH>;
+ usb0_id_det-gpios = <&pio 5 6 GPIO_ACTIVE_HIGH>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/sun8i-s3-pinecube.dts b/arch/arm/boot/dts/sun8i-s3-pinecube.dts
index 4aa0ee897a0a..20966e954eda 100644
--- a/arch/arm/boot/dts/sun8i-s3-pinecube.dts
+++ b/arch/arm/boot/dts/sun8i-s3-pinecube.dts
@@ -64,9 +64,6 @@
status = "okay";
port {
- #address-cells = <1>;
- #size-cells = <0>;
-
csi1_ep: endpoint {
remote-endpoint = <&ov5640_ep>;
bus-width = <8>;
@@ -88,13 +85,9 @@
status = "okay";
axp209: pmic@34 {
- compatible = "x-powers,axp203",
- "x-powers,axp209";
reg = <0x34>;
- interrupt-parent = <&gic>;
- interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-controller;
- #interrupt-cells = <1>;
+ interrupt-parent = <&nmi_intc>;
+ interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
};
};
diff --git a/arch/arm/boot/dts/sun8i-v3-sl631-imx179.dts b/arch/arm/boot/dts/sun8i-v3-sl631-imx179.dts
new file mode 100644
index 000000000000..117aeece4e55
--- /dev/null
+++ b/arch/arm/boot/dts/sun8i-v3-sl631-imx179.dts
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR X11)
+/*
+ * Copyright 2020 Paul Kocialkowski <contact@paulk.fr>
+ */
+
+#include "sun8i-v3-sl631.dtsi"
+
+/ {
+ model = "SL631 Action Camera with IMX179";
+ compatible = "allwinner,sl631-imx179", "allwinner,sl631",
+ "allwinner,sun8i-v3";
+};
diff --git a/arch/arm/boot/dts/sun8i-v3-sl631.dtsi b/arch/arm/boot/dts/sun8i-v3-sl631.dtsi
new file mode 100644
index 000000000000..e0d2a31efc7f
--- /dev/null
+++ b/arch/arm/boot/dts/sun8i-v3-sl631.dtsi
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR X11)
+/*
+ * Copyright 2020 Paul Kocialkowski <contact@paulk.fr>
+ */
+
+/dts-v1/;
+
+#include "sun8i-v3.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+
+/ {
+ model = "SL631 Action Camera";
+ compatible = "allwinner,sl631", "allwinner,sun8i-v3";
+
+ aliases {
+ serial0 = &uart1;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+};
+
+&i2c0 {
+ status = "okay";
+
+ axp209: pmic@34 {
+ reg = <0x34>;
+ interrupt-parent = <&nmi_intc>;
+ interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ };
+};
+
+&i2c1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c1_pb_pins>;
+ status = "okay";
+};
+
+&lradc {
+ vref-supply = <&reg_ldo2>;
+ status = "okay";
+
+ button-174 {
+ label = "Down";
+ linux,code = <KEY_DOWN>;
+ channel = <0>;
+ voltage = <174603>;
+ };
+
+ button-384 {
+ label = "Up";
+ linux,code = <KEY_UP>;
+ channel = <0>;
+ voltage = <384126>;
+ };
+
+ button-593 {
+ label = "OK";
+ linux,code = <KEY_OK>;
+ channel = <0>;
+ voltage = <593650>;
+ };
+};
+
+&mmc0 {
+ cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>; /* PF6 */
+ bus-width = <4>;
+ vmmc-supply = <&reg_dcdc3>;
+ status = "okay";
+};
+
+&pio {
+ vcc-pd-supply = <&reg_dcdc3>;
+ vcc-pe-supply = <&reg_dcdc3>;
+};
+
+#include "axp209.dtsi"
+
+&ac_power_supply {
+ status = "okay";
+};
+
+&battery_power_supply {
+ status = "okay";
+};
+
+&reg_dcdc2 {
+ regulator-always-on;
+ regulator-min-microvolt = <1250000>;
+ regulator-max-microvolt = <1250000>;
+ regulator-name = "vdd-sys-cpu";
+};
+
+&reg_dcdc3 {
+ regulator-always-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vdd-3v3";
+};
+
+&reg_ldo1 {
+ regulator-name = "vdd-rtc";
+};
+
+&reg_ldo2 {
+ regulator-always-on;
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-name = "avcc";
+};
+
+&spi0 {
+ status = "okay";
+
+ spi-flash@0 {
+ reg = <0>;
+ compatible = "jedec,spi-nor";
+ spi-max-frequency = <50000000>;
+ };
+};
+
+&uart1 {
+ pinctrl-0 = <&uart1_pg_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+&usb_otg {
+ dr_mode = "peripheral";
+ status = "okay";
+};
+
+&usbphy {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/sun8i-v3s.dtsi b/arch/arm/boot/dts/sun8i-v3s.dtsi
index f8f19d8fa795..eb4cb63fef13 100644
--- a/arch/arm/boot/dts/sun8i-v3s.dtsi
+++ b/arch/arm/boot/dts/sun8i-v3s.dtsi
@@ -157,12 +157,21 @@
syscon: system-control@1c00000 {
compatible = "allwinner,sun8i-v3s-system-control",
"allwinner,sun8i-h3-system-control";
- reg = <0x01c00000 0x1000>;
+ reg = <0x01c00000 0xd0>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
};
+ nmi_intc: interrupt-controller@1c000d0 {
+ compatible = "allwinner,sun8i-v3s-nmi",
+ "allwinner,sun9i-a80-nmi";
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ reg = <0x01c000d0 0x0c>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
tcon0: lcd-controller@1c0c000 {
compatible = "allwinner,sun8i-v3s-tcon";
reg = <0x01c0c000 0x1000>;
@@ -329,6 +338,12 @@
#interrupt-cells = <3>;
/omit-if-no-ref/
+ csi0_mclk_pin: csi0-mclk-pin {
+ pins = "PE20";
+ function = "csi_mipi";
+ };
+
+ /omit-if-no-ref/
csi1_8bit_pins: csi1-8bit-pins {
pins = "PE0", "PE2", "PE3", "PE8", "PE9",
"PE10", "PE11", "PE12", "PE13", "PE14",
diff --git a/arch/arm/boot/dts/sun9i-a80-cubieboard4.dts b/arch/arm/boot/dts/sun9i-a80-cubieboard4.dts
index 484b93df20cb..1fe251ea94bc 100644
--- a/arch/arm/boot/dts/sun9i-a80-cubieboard4.dts
+++ b/arch/arm/boot/dts/sun9i-a80-cubieboard4.dts
@@ -63,12 +63,12 @@
leds {
compatible = "gpio-leds";
- green {
+ led-0 {
label = "cubieboard4:green:usr";
gpios = <&pio 7 17 GPIO_ACTIVE_HIGH>; /* PH17 */
};
- red {
+ led-1 {
label = "cubieboard4:red:usr";
gpios = <&pio 7 6 GPIO_ACTIVE_HIGH>; /* PH6 */
};
diff --git a/arch/arm/boot/dts/sunxi-bananapi-m2-plus.dtsi b/arch/arm/boot/dts/sunxi-bananapi-m2-plus.dtsi
index 8e5cb3b3fd68..7a6af54dd342 100644
--- a/arch/arm/boot/dts/sunxi-bananapi-m2-plus.dtsi
+++ b/arch/arm/boot/dts/sunxi-bananapi-m2-plus.dtsi
@@ -219,6 +219,7 @@
bluetooth {
compatible = "brcm,bcm43438-bt";
+ max-speed = <1500000>;
clocks = <&rtc 1>;
clock-names = "lpo";
vbat-supply = <&reg_vcc3v3>;
diff --git a/arch/arm/boot/dts/tango4-common.dtsi b/arch/arm/boot/dts/tango4-common.dtsi
deleted file mode 100644
index d584da314500..000000000000
--- a/arch/arm/boot/dts/tango4-common.dtsi
+++ /dev/null
@@ -1,184 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Based on Mans Rullgard's Tango3 DT
- * https://github.com/mansr/linux-tangox
- */
-
-#include <dt-bindings/interrupt-controller/arm-gic.h>
-
-#define CPU_CLK 0
-#define SYS_CLK 1
-#define USB_CLK 2
-#define SDIO_CLK 3
-
-/ {
- interrupt-parent = <&gic>;
- #address-cells = <1>;
- #size-cells = <1>;
-
- periph_clk: periph_clk {
- compatible = "fixed-factor-clock";
- clocks = <&clkgen CPU_CLK>;
- clock-mult = <1>;
- clock-div = <2>;
- #clock-cells = <0>;
- };
-
- mpcore {
- compatible = "simple-bus";
- ranges = <0x00000000 0x20000000 0x2000>;
- #address-cells = <1>;
- #size-cells = <1>;
-
- scu@0 {
- compatible = "arm,cortex-a9-scu";
- reg = <0x0 0x100>;
- };
-
- twd@600 {
- compatible = "arm,cortex-a9-twd-timer";
- reg = <0x600 0x10>;
- interrupts = <GIC_PPI 13 IRQ_TYPE_EDGE_RISING>;
- clocks = <&periph_clk>;
- always-on;
- };
-
- gic: interrupt-controller@1000 {
- compatible = "arm,cortex-a9-gic";
- #interrupt-cells = <3>;
- interrupt-controller;
- reg = <0x1000 0x1000>, <0x100 0x100>;
- };
- };
-
- l2cc: cache-controller@20100000 {
- compatible = "arm,pl310-cache";
- reg = <0x20100000 0x1000>;
- cache-level = <2>;
- cache-unified;
- };
-
- soc {
- compatible = "simple-bus";
- interrupt-parent = <&irq0>;
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
-
- xtal: xtal {
- compatible = "fixed-clock";
- clock-frequency = <27000000>;
- #clock-cells = <0>;
- };
-
- clkgen: clkgen@10000 {
- compatible = "sigma,tango4-clkgen";
- reg = <0x10000 0x100>;
- clocks = <&xtal>;
- #clock-cells = <1>;
- };
-
- tick-counter@10048 {
- compatible = "sigma,tick-counter";
- reg = <0x10048 0x4>;
- clocks = <&xtal>;
- };
-
- uart: serial@10700 {
- compatible = "ralink,rt2880-uart", "ns16550a";
- reg = <0x10700 0x30>;
- interrupts = <1 IRQ_TYPE_LEVEL_HIGH>;
- clock-frequency = <7372800>;
- reg-shift = <2>;
- };
-
- watchdog@1fd00 {
- compatible = "sigma,smp8759-wdt";
- reg = <0x1fd00 8>;
- clocks = <&xtal>;
- };
-
- mmc0: mmc@21000 {
- compatible = "arasan,sdhci-8.9a";
- reg = <0x21000 0x200>;
- clock-names = "clk_xin", "clk_ahb";
- clocks = <&clkgen SDIO_CLK>, <&clkgen SYS_CLK>;
- interrupts = <60 IRQ_TYPE_LEVEL_HIGH>;
- };
-
- mmc1: mmc@21200 {
- compatible = "arasan,sdhci-8.9a";
- reg = <0x21200 0x200>;
- clock-names = "clk_xin", "clk_ahb";
- clocks = <&clkgen SDIO_CLK>, <&clkgen SYS_CLK>;
- interrupts = <8 IRQ_TYPE_LEVEL_HIGH>;
- };
-
- usb0: usb@21400 {
- compatible = "chipidea,usb2";
- reg = <0x21400 0x200>;
- interrupts = <40 IRQ_TYPE_LEVEL_HIGH>;
- phys = <&usb0_phy>;
- phy-names = "usb-phy";
- };
-
- usb0_phy: phy@21700 {
- compatible = "sigma,smp8642-usb-phy";
- reg = <0x21700 0x100>;
- #phy-cells = <0>;
- clocks = <&clkgen USB_CLK>;
- };
-
- usb1: usb@25400 {
- compatible = "chipidea,usb2";
- reg = <0x25400 0x200>;
- interrupts = <7 IRQ_TYPE_LEVEL_HIGH>;
- phys = <&usb1_phy>;
- phy-names = "usb-phy";
- };
-
- usb1_phy: phy@25700 {
- compatible = "sigma,smp8642-usb-phy";
- reg = <0x25700 0x100>;
- #phy-cells = <0>;
- clocks = <&clkgen USB_CLK>;
- };
-
- eth0: ethernet@26000 {
- compatible = "sigma,smp8734-ethernet";
- reg = <0x26000 0x800>;
- interrupts = <38 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clkgen SYS_CLK>;
- };
-
- intc: interrupt-controller@6e000 {
- compatible = "sigma,smp8642-intc";
- reg = <0x6e000 0x400>;
- ranges = <0 0x6e000 0x400>;
- interrupt-parent = <&gic>;
- #address-cells = <1>;
- #size-cells = <1>;
-
- irq0: irq0@0 {
- reg = <0x000 0x100>;
- interrupt-controller;
- #interrupt-cells = <2>;
- interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
- };
-
- irq1: irq1@100 {
- reg = <0x100 0x100>;
- interrupt-controller;
- #interrupt-cells = <2>;
- interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
- };
-
- irq2: irq2@300 {
- reg = <0x300 0x100>;
- interrupt-controller;
- #interrupt-cells = <2>;
- interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
- };
- };
- };
-};
diff --git a/arch/arm/boot/dts/tango4-smp8758.dtsi b/arch/arm/boot/dts/tango4-smp8758.dtsi
deleted file mode 100644
index 1c6a5bf1a86b..000000000000
--- a/arch/arm/boot/dts/tango4-smp8758.dtsi
+++ /dev/null
@@ -1,57 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include "tango4-common.dtsi"
-
-/ {
- cpus {
- #address-cells = <1>;
- #size-cells = <0>;
- enable-method = "sigma,tango4-smp";
-
- cpu0: cpu@0 {
- compatible = "arm,cortex-a9";
- next-level-cache = <&l2cc>;
- device_type = "cpu";
- reg = <0>;
- clocks = <&clkgen CPU_CLK>;
- clock-latency = <1>;
- };
-
- cpu1: cpu@1 {
- compatible = "arm,cortex-a9";
- next-level-cache = <&l2cc>;
- device_type = "cpu";
- reg = <1>;
- };
- };
-
- pmu {
- compatible = "arm,cortex-a9-pmu";
- interrupt-affinity = <&cpu0>, <&cpu1>;
- interrupts =
- <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
- };
-
- soc {
- cpu_temp: thermal@920100 {
- #thermal-sensor-cells = <0>;
- compatible = "sigma,smp8758-thermal";
- reg = <0x920100 12>;
- };
- };
-
- thermal-zones {
- cpu_thermal: cpu-thermal {
- polling-delay = <997>; /* milliseconds */
- polling-delay-passive = <499>; /* milliseconds */
- thermal-sensors = <&cpu_temp>;
- trips {
- cpu_critical {
- temperature = <120000>;
- hysteresis = <2500>;
- type = "critical";
- };
- };
- };
- };
-};
diff --git a/arch/arm/boot/dts/tango4-vantage-1172.dts b/arch/arm/boot/dts/tango4-vantage-1172.dts
deleted file mode 100644
index d237d7f02c51..000000000000
--- a/arch/arm/boot/dts/tango4-vantage-1172.dts
+++ /dev/null
@@ -1,42 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/dts-v1/;
-
-#include "tango4-smp8758.dtsi"
-
-/ {
- model = "Sigma Designs SMP8758 Vantage-1172 Rev E1";
- compatible = "sigma,vantage-1172", "sigma,smp8758", "sigma,tango4";
-
- aliases {
- serial = &uart;
- eth0 = &eth0;
- };
-
- memory@80000000 {
- device_type = "memory";
- reg = <0x80000000 0x80000000>; /* 2 GB */
- };
-
- chosen {
- stdout-path = "serial:115200n8";
- };
-};
-
-&eth0 {
- phy-connection-type = "rgmii-id";
- phy-handle = <&eth0_phy>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- /* Atheros AR8035 */
- eth0_phy: ethernet-phy@4 {
- compatible = "ethernet-phy-id004d.d072",
- "ethernet-phy-ieee802.3-c22";
- interrupts = <37 IRQ_TYPE_EDGE_RISING>;
- reg = <4>;
- };
-};
-
-&mmc1 {
- non-removable; /* eMMC */
-};
diff --git a/arch/arm/boot/dts/tegra30-ouya.dts b/arch/arm/boot/dts/tegra30-ouya.dts
index 74da1360d297..0368b3b816ef 100644
--- a/arch/arm/boot/dts/tegra30-ouya.dts
+++ b/arch/arm/boot/dts/tegra30-ouya.dts
@@ -4352,8 +4352,8 @@
nvidia,pins = "cam_mclk_pcc0";
nvidia,function = "vi_alt3";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
- nvidia,tristate = <TEGRA_PIN_ENABLE>;
- nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
pcc1 {
nvidia,pins = "pcc1";
diff --git a/arch/arm/boot/dts/zx296702-ad1.dts b/arch/arm/boot/dts/zx296702-ad1.dts
deleted file mode 100644
index bd9400840023..000000000000
--- a/arch/arm/boot/dts/zx296702-ad1.dts
+++ /dev/null
@@ -1,48 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-/dts-v1/;
-
-#include "zx296702.dtsi"
-
-/ {
- model = "ZTE ZX296702 AD1 Board";
- compatible = "zte,zx296702-ad1", "zte,zx296702";
-
- aliases {
- serial0 = &uart0;
- serial1 = &uart1;
- };
-
- memory {
- device_type = "memory";
- reg = <0x50000000 0x20000000>;
- };
-};
-
-&mmc0 {
- supports-highspeed;
- non-removable;
- disable-wp;
- status = "okay";
-
- slot@0 {
- reg = <0>;
- bus-width = <4>;
- };
-};
-
-&mmc1 {
- supports-highspeed;
- non-removable;
- disable-wp;
- status = "okay";
-
- slot@0 {
- reg = <0>;
- bus-width = <8>;
- };
-};
-
-&uart0 {
- status = "okay";
-};
diff --git a/arch/arm/boot/dts/zx296702.dtsi b/arch/arm/boot/dts/zx296702.dtsi
deleted file mode 100644
index f378c661b3bf..000000000000
--- a/arch/arm/boot/dts/zx296702.dtsi
+++ /dev/null
@@ -1,142 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include <dt-bindings/clock/zx296702-clock.h>
-#include <dt-bindings/interrupt-controller/arm-gic.h>
-
-/ {
- #address-cells = <1>;
- #size-cells = <1>;
-
- cpus {
- #address-cells = <1>;
- #size-cells = <0>;
- enable-method = "zte,zx296702-smp";
-
- cpu@0 {
- compatible = "arm,cortex-a9";
- device_type = "cpu";
- next-level-cache = <&l2cc>;
- reg = <0>;
- };
-
- cpu@1 {
- compatible = "arm,cortex-a9";
- device_type = "cpu";
- next-level-cache = <&l2cc>;
- reg = <1>;
- };
- };
-
-
- soc {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "simple-bus";
- interrupt-parent = <&intc>;
- ranges;
-
- matrix: bus-matrix@400000 {
- compatible = "zte,zx-bus-matrix";
- reg = <0x00400000 0x1000>;
- };
-
- intc: interrupt-controller@801000 {
- compatible = "arm,cortex-a9-gic";
- #interrupt-cells = <3>;
- #address-cells = <1>;
- #size-cells = <1>;
- interrupt-controller;
- reg = <0x00801000 0x1000>,
- <0x00800100 0x100>;
- };
-
- global_timer: timer@8000200 {
- compatible = "arm,cortex-a9-global-timer";
- reg = <0x00800200 0x20>;
- interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-parent = <&intc>;
- clocks = <&topclk ZX296702_A9_PERIPHCLK>;
- };
-
- l2cc: cache-controller@c00000 {
- compatible = "arm,pl310-cache";
- reg = <0x00c00000 0x1000>;
- cache-unified;
- cache-level = <2>;
- arm,data-latency = <1 1 1>;
- arm,tag-latency = <1 1 1>;
- arm,double-linefill = <1>;
- arm,double-linefill-incr = <0>;
- };
-
- pcu: pcu@a0008000 {
- compatible = "zte,zx296702-pcu";
- reg = <0xa0008000 0x1000>;
- };
-
- topclk: topclk@9800000 {
- compatible = "zte,zx296702-topcrm-clk";
- reg = <0x09800000 0x1000>;
- #clock-cells = <1>;
- };
-
- lsp1clk: lsp1clk@9400000 {
- compatible = "zte,zx296702-lsp1crpm-clk";
- reg = <0x09400000 0x1000>;
- #clock-cells = <1>;
- };
-
- lsp0clk: lsp0clk@b000000 {
- compatible = "zte,zx296702-lsp0crpm-clk";
- reg = <0x0b000000 0x1000>;
- #clock-cells = <1>;
- };
-
- uart0: serial@9405000 {
- compatible = "zte,zx296702-uart";
- reg = <0x09405000 0x1000>;
- interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&lsp1clk ZX296702_UART0_WCLK>;
- status = "disabled";
- };
-
- uart1: serial@9406000 {
- compatible = "zte,zx296702-uart";
- reg = <0x09406000 0x1000>;
- interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&lsp1clk ZX296702_UART1_WCLK>;
- status = "disabled";
- };
-
- mmc0: mmc@9408000 {
- compatible = "snps,dw-mshc";
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x09408000 0x1000>;
- interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>;
- fifo-depth = <32>;
- clocks = <&lsp1clk ZX296702_SDMMC0_PCLK>,
- <&lsp1clk ZX296702_SDMMC0_WCLK>;
- clock-names = "biu", "ciu";
- status = "disabled";
- };
-
- mmc1: mmc@b003000 {
- compatible = "snps,dw-mshc";
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x0b003000 0x1000>;
- interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>;
- fifo-depth = <32>;
- clocks = <&lsp0clk ZX296702_SDMMC1_PCLK>,
- <&lsp0clk ZX296702_SDMMC1_WCLK>;
- clock-names = "biu", "ciu";
- status = "disabled";
- };
-
- sysctrl: sysctrl@a0007000 {
- compatible = "zte,sysctrl", "syscon";
- reg = <0xa0007000 0x1000>;
- };
- };
-};
diff --git a/arch/arm/boot/dts/zynq-ebaz4205.dts b/arch/arm/boot/dts/zynq-ebaz4205.dts
new file mode 100644
index 000000000000..b0b836aedd76
--- /dev/null
+++ b/arch/arm/boot/dts/zynq-ebaz4205.dts
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Michael Walle <michael@walle.cc>
+ */
+/dts-v1/;
+/include/ "zynq-7000.dtsi"
+
+/ {
+ model = "Ebang EBAZ4205";
+ compatible = "ebang,ebaz4205", "xlnx,zynq-7000";
+
+ aliases {
+ ethernet0 = &gem0;
+ serial0 = &uart1;
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x10000000>;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+};
+
+&clkc {
+ ps-clk-frequency = <33333333>;
+ fclk-enable = <8>;
+};
+
+&gem0 {
+ status = "okay";
+ phy-mode = "mii";
+ phy-handle = <&phy>;
+
+ /* PHY clock */
+ assigned-clocks = <&clkc 18>;
+ assigned-clock-rates = <25000000>;
+
+ phy: ethernet-phy@0 {
+ reg = <0>;
+ };
+};
+
+&gpio0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio0_default>;
+};
+
+&pinctrl0 {
+ pinctrl_gpio0_default: gpio0-default {
+ mux {
+ groups = "gpio0_20_grp", "gpio0_32_grp";
+ function = "gpio0";
+ };
+
+ conf {
+ groups = "gpio0_20_grp", "gpio0_32_grp";
+ io-standard = <3>;
+ slew-rate = <0>;
+ };
+
+ conf-pull-up {
+ pins = "MIO20", "MIO32";
+ bias-disable;
+ };
+ };
+
+ pinctrl_sdhci0_default: sdhci0-default {
+ mux {
+ groups = "sdio0_2_grp";
+ function = "sdio0";
+ };
+
+ conf {
+ groups = "sdio0_2_grp";
+ io-standard = <3>;
+ slew-rate = <0>;
+ bias-disable;
+ };
+
+ mux-cd {
+ groups = "gpio0_34_grp";
+ function = "sdio0_cd";
+ };
+
+ conf-cd {
+ groups = "gpio0_34_grp";
+ io-standard = <3>;
+ slew-rate = <0>;
+ bias-high-impedance;
+ bias-pull-up;
+ };
+ };
+
+ pinctrl_uart1_default: uart1-default {
+ mux {
+ groups = "uart1_4_grp";
+ function = "uart1";
+ };
+
+ conf {
+ groups = "uart1_4_grp";
+ io-standard = <3>;
+ slew-rate = <0>;
+ };
+
+ conf-rx {
+ pins = "MIO25";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO24";
+ bias-disable;
+ };
+ };
+};
+
+&sdhci0 {
+ status = "okay";
+ disable-wp;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sdhci0_default>;
+};
+
+&uart1 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1_default>;
+};
diff --git a/arch/arm/common/locomo.c b/arch/arm/common/locomo.c
index 62f241b09fe3..e45f4e4e06b6 100644
--- a/arch/arm/common/locomo.c
+++ b/arch/arm/common/locomo.c
@@ -838,11 +838,10 @@ static int locomo_bus_remove(struct device *dev)
{
struct locomo_dev *ldev = LOCOMO_DEV(dev);
struct locomo_driver *drv = LOCOMO_DRV(dev->driver);
- int ret = 0;
if (drv->remove)
- ret = drv->remove(ldev);
- return ret;
+ drv->remove(ldev);
+ return 0;
}
struct bus_type locomo_bus_type = {
diff --git a/arch/arm/common/sa1111.c b/arch/arm/common/sa1111.c
index f89c1ea327a2..ff5e0d04cb89 100644
--- a/arch/arm/common/sa1111.c
+++ b/arch/arm/common/sa1111.c
@@ -1368,11 +1368,11 @@ static int sa1111_bus_remove(struct device *dev)
{
struct sa1111_dev *sadev = to_sa1111_device(dev);
struct sa1111_driver *drv = SA1111_DRV(dev->driver);
- int ret = 0;
if (drv->remove)
- ret = drv->remove(sadev);
- return ret;
+ drv->remove(sadev);
+
+ return 0;
}
struct bus_type sa1111_bus_type = {
diff --git a/arch/arm/configs/at91_dt_defconfig b/arch/arm/configs/at91_dt_defconfig
index c0c219d53b24..6403b064e8dc 100644
--- a/arch/arm/configs/at91_dt_defconfig
+++ b/arch/arm/configs/at91_dt_defconfig
@@ -17,8 +17,7 @@ CONFIG_SOC_SAM9X60=y
# CONFIG_ATMEL_CLOCKSOURCE_PIT is not set
CONFIG_AEABI=y
CONFIG_UACCESS_WITH_MEMCPY=y
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
+# CONFIG_ATAGS is not set
CONFIG_ARM_APPENDED_DTB=y
CONFIG_ARM_ATAG_DTB_COMPAT=y
CONFIG_CMDLINE="console=ttyS0,115200 initrd=0x21100000,25165824 root=/dev/ram0 rw"
@@ -38,6 +37,8 @@ CONFIG_IP_PNP_BOOTP=y
CONFIG_IP_PNP_RARP=y
# CONFIG_INET_DIAG is not set
CONFIG_IPV6_SIT_6RD=y
+CONFIG_CAN=y
+CONFIG_CAN_AT91=y
CONFIG_CFG80211=y
CONFIG_MAC80211=y
CONFIG_DEVTMPFS=y
@@ -57,8 +58,8 @@ CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=4
CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_ATMEL_TCLIB=y
CONFIG_ATMEL_SSC=y
+CONFIG_EEPROM_AT24=m
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
# CONFIG_SCSI_LOWLEVEL is not set
@@ -92,7 +93,6 @@ CONFIG_RT2800USB_UNKNOWN=y
CONFIG_RTL8187=m
CONFIG_RTL8192CU=m
# CONFIG_RTLWIFI_DEBUG is not set
-CONFIG_INPUT_POLLDEV=y
CONFIG_INPUT_JOYDEV=y
CONFIG_INPUT_EVDEV=y
# CONFIG_KEYBOARD_ATKBD is not set
@@ -107,13 +107,16 @@ CONFIG_LEGACY_PTY_COUNT=4
CONFIG_SERIAL_ATMEL=y
CONFIG_SERIAL_ATMEL_CONSOLE=y
CONFIG_HW_RANDOM=y
+CONFIG_I2C_CHARDEV=y
CONFIG_I2C_AT91=y
CONFIG_I2C_GPIO=y
CONFIG_SPI=y
CONFIG_SPI_ATMEL=y
CONFIG_SPI_ATMEL_QUADSPI=y
+CONFIG_SPI_GPIO=y
+CONFIG_PINCTRL_MCP23S08=m
+CONFIG_GPIO_SYSFS=y
CONFIG_POWER_RESET=y
-# CONFIG_POWER_RESET_AT91_SAMA5D2_SHDWC is not set
CONFIG_POWER_SUPPLY=y
# CONFIG_HWMON is not set
CONFIG_WATCHDOG=y
@@ -131,6 +134,7 @@ CONFIG_MEDIA_PLATFORM_SUPPORT=y
CONFIG_V4L_PLATFORM_DRIVERS=y
CONFIG_VIDEO_ATMEL_ISI=y
CONFIG_VIDEO_OV2640=m
+CONFIG_VIDEO_OV7740=m
CONFIG_VIDEO_MT9V032=m
CONFIG_DRM=y
CONFIG_DRM_ATMEL_HLCDC=y
@@ -209,7 +213,9 @@ CONFIG_NLS_UTF8=y
CONFIG_CRYPTO_ECB=y
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
-# CONFIG_CRYPTO_HW is not set
+CONFIG_CRYPTO_DEV_ATMEL_AES=y
+CONFIG_CRYPTO_DEV_ATMEL_TDES=y
+CONFIG_CRYPTO_DEV_ATMEL_SHA=y
CONFIG_CRC_CCITT=y
CONFIG_FONTS=y
CONFIG_FONT_8x8=y
diff --git a/arch/arm/configs/bcm2835_defconfig b/arch/arm/configs/bcm2835_defconfig
index 44ff9cd88d81..383c632eba7b 100644
--- a/arch/arm/configs/bcm2835_defconfig
+++ b/arch/arm/configs/bcm2835_defconfig
@@ -21,7 +21,6 @@ CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
CONFIG_JUMP_LABEL=y
CONFIG_CC_STACKPROTECTOR_REGULAR=y
CONFIG_MODULES=y
@@ -177,7 +176,6 @@ CONFIG_BOOT_PRINTK_DELAY=y
CONFIG_DYNAMIC_DEBUG=y
CONFIG_DEBUG_INFO=y
# CONFIG_ENABLE_MUST_CHECK is not set
-CONFIG_UNUSED_SYMBOLS=y
CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_LOCKUP_DETECTOR=y
CONFIG_SCHED_TRACER=y
diff --git a/arch/arm/configs/cns3420vb_defconfig b/arch/arm/configs/cns3420vb_defconfig
index 66a80b46038d..63fa2eb21b75 100644
--- a/arch/arm/configs/cns3420vb_defconfig
+++ b/arch/arm/configs/cns3420vb_defconfig
@@ -11,7 +11,6 @@ CONFIG_BLK_DEV_INITRD=y
# CONFIG_PERF_EVENTS is not set
CONFIG_SLAB=y
CONFIG_PROFILING=y
-CONFIG_OPROFILE=m
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
diff --git a/arch/arm/configs/corgi_defconfig b/arch/arm/configs/corgi_defconfig
index 911e880f06ed..15b749f6996d 100644
--- a/arch/arm/configs/corgi_defconfig
+++ b/arch/arm/configs/corgi_defconfig
@@ -5,7 +5,6 @@ CONFIG_SYSFS_DEPRECATED_V2=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
CONFIG_PROFILING=y
-CONFIG_OPROFILE=m
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
diff --git a/arch/arm/configs/efm32_defconfig b/arch/arm/configs/efm32_defconfig
deleted file mode 100644
index 46213f0530c4..000000000000
--- a/arch/arm/configs/efm32_defconfig
+++ /dev/null
@@ -1,98 +0,0 @@
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_LOG_BUF_SHIFT=12
-CONFIG_CC_OPTIMIZE_FOR_SIZE=y
-# CONFIG_UID16 is not set
-# CONFIG_BASE_FULL is not set
-# CONFIG_FUTEX is not set
-# CONFIG_EPOLL is not set
-# CONFIG_SIGNALFD is not set
-# CONFIG_EVENTFD is not set
-# CONFIG_AIO is not set
-CONFIG_EMBEDDED=y
-# CONFIG_VM_EVENT_COUNTERS is not set
-# CONFIG_SLUB_DEBUG is not set
-# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_MMU is not set
-CONFIG_ARM_SINGLE_ARMV7M=y
-CONFIG_ARCH_EFM32=y
-CONFIG_SET_MEM_PARAM=y
-CONFIG_DRAM_BASE=0x88000000
-CONFIG_DRAM_SIZE=0x00400000
-CONFIG_FLASH_MEM_BASE=0x8c000000
-CONFIG_FLASH_SIZE=0x01000000
-CONFIG_PREEMPT=y
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_XIP_KERNEL=y
-CONFIG_XIP_PHYS_ADDR=0x8c000000
-CONFIG_BINFMT_FLAT=y
-CONFIG_BINFMT_SHARED_FLAT=y
-# CONFIG_COREDUMP is not set
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_DIAG is not set
-# CONFIG_IPV6 is not set
-# CONFIG_WIRELESS is not set
-CONFIG_DEVTMPFS=y
-CONFIG_DEVTMPFS_MOUNT=y
-# CONFIG_FW_LOADER is not set
-CONFIG_MTD=y
-CONFIG_MTD_BLOCK_RO=y
-CONFIG_MTD_ROM=y
-CONFIG_MTD_UCLINUX=y
-# CONFIG_BLK_DEV is not set
-CONFIG_NETDEVICES=y
-# CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
-# CONFIG_NET_VENDOR_BROADCOM is not set
-# CONFIG_NET_VENDOR_CIRRUS is not set
-# CONFIG_NET_VENDOR_FARADAY is not set
-# CONFIG_NET_VENDOR_INTEL is not set
-# CONFIG_NET_VENDOR_MARVELL is not set
-CONFIG_KS8851=y
-# CONFIG_NET_VENDOR_MICROCHIP is not set
-# CONFIG_NET_VENDOR_NATSEMI is not set
-# CONFIG_NET_VENDOR_SEEQ is not set
-# CONFIG_NET_VENDOR_SMSC is not set
-# CONFIG_NET_VENDOR_STMICRO is not set
-# CONFIG_NET_VENDOR_VIA is not set
-# CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_WLAN is not set
-# CONFIG_INPUT is not set
-# CONFIG_SERIO is not set
-# CONFIG_VT is not set
-# CONFIG_UNIX98_PTYS is not set
-# CONFIG_LEGACY_PTYS is not set
-CONFIG_SERIAL_NONSTANDARD=y
-# CONFIG_DEVKMEM is not set
-CONFIG_SERIAL_EFM32_UART=y
-CONFIG_SERIAL_EFM32_UART_CONSOLE=y
-# CONFIG_HW_RANDOM is not set
-CONFIG_I2C=y
-# CONFIG_I2C_COMPAT is not set
-CONFIG_I2C_EFM32=y
-CONFIG_SPI=y
-CONFIG_SPI_EFM32=y
-CONFIG_GPIO_SYSFS=y
-# CONFIG_USB_SUPPORT is not set
-CONFIG_MMC=y
-CONFIG_MMC_SPI=y
-CONFIG_EXT2_FS=y
-# CONFIG_FILE_LOCKING is not set
-# CONFIG_DNOTIFY is not set
-# CONFIG_INOTIFY_USER is not set
-CONFIG_ROMFS_FS=y
-CONFIG_ROMFS_BACKED_BY_MTD=y
-# CONFIG_NETWORK_FILESYSTEMS is not set
-CONFIG_PRINTK_TIME=y
-CONFIG_DEBUG_INFO=y
-# CONFIG_ENABLE_MUST_CHECK is not set
-CONFIG_MAGIC_SYSRQ=y
-# CONFIG_SCHED_DEBUG is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
-# CONFIG_FTRACE is not set
diff --git a/arch/arm/configs/imx_v4_v5_defconfig b/arch/arm/configs/imx_v4_v5_defconfig
index bb70acc6b526..1d9fa77bbafc 100644
--- a/arch/arm/configs/imx_v4_v5_defconfig
+++ b/arch/arm/configs/imx_v4_v5_defconfig
@@ -27,7 +27,6 @@ CONFIG_AEABI=y
CONFIG_ZBOOT_ROM_TEXT=0x0
CONFIG_ZBOOT_ROM_BSS=0x0
CONFIG_PM_DEBUG=y
-CONFIG_OPROFILE=y
CONFIG_KPROBES=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
index 221f5c340c86..70928cc48939 100644
--- a/arch/arm/configs/imx_v6_v7_defconfig
+++ b/arch/arm/configs/imx_v6_v7_defconfig
@@ -222,6 +222,7 @@ CONFIG_POWER_RESET=y
CONFIG_POWER_RESET_SYSCON=y
CONFIG_POWER_RESET_SYSCON_POWEROFF=y
CONFIG_POWER_SUPPLY=y
+CONFIG_RN5T618_POWER=m
CONFIG_SENSORS_MC13783_ADC=y
CONFIG_SENSORS_GPIO_FAN=y
CONFIG_SENSORS_IIO_HWMON=y
diff --git a/arch/arm/configs/keystone_defconfig b/arch/arm/configs/keystone_defconfig
index 84a3b055f253..33c917df7b32 100644
--- a/arch/arm/configs/keystone_defconfig
+++ b/arch/arm/configs/keystone_defconfig
@@ -16,7 +16,6 @@ CONFIG_KALLSYMS_ALL=y
# CONFIG_BASE_FULL is not set
CONFIG_EMBEDDED=y
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
CONFIG_KPROBES=y
CONFIG_MODULES=y
CONFIG_MODULE_FORCE_LOAD=y
diff --git a/arch/arm/configs/multi_v5_defconfig b/arch/arm/configs/multi_v5_defconfig
index e00be9faa23b..9f862b21b40a 100644
--- a/arch/arm/configs/multi_v5_defconfig
+++ b/arch/arm/configs/multi_v5_defconfig
@@ -67,7 +67,6 @@ CONFIG_CPU_FREQ_STAT=y
CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
CONFIG_CPU_IDLE=y
CONFIG_ARM_KIRKWOOD_CPUIDLE=y
-CONFIG_OPROFILE=y
CONFIG_KPROBES=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
@@ -105,7 +104,6 @@ CONFIG_MTD_SPI_NOR=y
CONFIG_SPI_ASPEED_SMC=y
CONFIG_MTD_UBI=y
CONFIG_BLK_DEV_LOOP=y
-CONFIG_ATMEL_TCLIB=y
CONFIG_ATMEL_SSC=m
CONFIG_EEPROM_AT24=y
# CONFIG_SCSI_PROC_FS is not set
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index c5f25710fedc..3823da605430 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -6,6 +6,7 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_EMBEDDED=y
CONFIG_PERF_EVENTS=y
CONFIG_ARCH_VIRT=y
+CONFIG_ARCH_ACTIONS=y
CONFIG_ARCH_ALPINE=y
CONFIG_ARCH_ARTPEC=y
CONFIG_MACH_ARTPEC6=y
@@ -223,7 +224,6 @@ CONFIG_BLK_DEV_RAM_SIZE=65536
CONFIG_VIRTIO_BLK=y
CONFIG_AD525X_DPOT=y
CONFIG_AD525X_DPOT_I2C=y
-CONFIG_ATMEL_TCLIB=y
CONFIG_ICS932S401=y
CONFIG_ATMEL_SSC=m
CONFIG_QCOM_COINCELL=m
@@ -377,6 +377,8 @@ CONFIG_SERIAL_ST_ASC=y
CONFIG_SERIAL_ST_ASC_CONSOLE=y
CONFIG_SERIAL_STM32=y
CONFIG_SERIAL_STM32_CONSOLE=y
+CONFIG_SERIAL_OWL=y
+CONFIG_SERIAL_OWL_CONSOLE=y
CONFIG_SERIAL_DEV_BUS=y
CONFIG_VIRTIO_CONSOLE=y
CONFIG_ASPEED_KCS_IPMI_BMC=m
@@ -401,6 +403,7 @@ CONFIG_I2C_EMEV2=m
CONFIG_I2C_IMX=y
CONFIG_I2C_MESON=y
CONFIG_I2C_MV64XXX=y
+CONFIG_I2C_OWL=y
CONFIG_I2C_RIIC=y
CONFIG_I2C_RK3X=y
CONFIG_I2C_S3C2410=y
@@ -449,6 +452,8 @@ CONFIG_PINCTRL_AS3722=y
CONFIG_PINCTRL_RZA2=y
CONFIG_PINCTRL_STMFX=y
CONFIG_PINCTRL_PALMAS=y
+CONFIG_PINCTRL_OWL=y
+CONFIG_PINCTRL_S500=y
CONFIG_PINCTRL_APQ8064=y
CONFIG_PINCTRL_APQ8084=y
CONFIG_PINCTRL_IPQ8064=y
@@ -515,7 +520,9 @@ CONFIG_ARMADA_THERMAL=y
CONFIG_BCM2711_THERMAL=m
CONFIG_BCM2835_THERMAL=m
CONFIG_BRCMSTB_THERMAL=m
+CONFIG_GENERIC_ADC_THERMAL=m
CONFIG_ST_THERMAL_MEMMAP=y
+CONFIG_TEGRA_SOCTHERM=m
CONFIG_UNIPHIER_THERMAL=y
CONFIG_DA9063_WATCHDOG=m
CONFIG_XILINX_WATCHDOG=y
@@ -656,6 +663,7 @@ CONFIG_V4L_TEST_DRIVERS=y
CONFIG_VIDEO_VIVID=m
CONFIG_CEC_PLATFORM_DRIVERS=y
CONFIG_CEC_SAMSUNG_S5P=m
+CONFIG_CEC_STM32=m
CONFIG_VIDEO_ADV7180=m
CONFIG_VIDEO_ADV7604=m
CONFIG_VIDEO_ADV7604_CEC=y
@@ -878,6 +886,7 @@ CONFIG_MMC_SH_MMCIF=y
CONFIG_MMC_SUNXI=y
CONFIG_MMC_BCM2835=y
CONFIG_MMC_SDHCI_OMAP=y
+CONFIG_MMC_OWL=y
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
CONFIG_LEDS_CLASS_FLASH=m
@@ -947,6 +956,7 @@ CONFIG_IMX_DMA=y
CONFIG_IMX_SDMA=y
CONFIG_MV_XOR=y
CONFIG_MXS_DMA=y
+CONFIG_OWL_DMA=y
CONFIG_PL330_DMA=y
CONFIG_SIRF_DMA=y
CONFIG_STE_DMA40=y
@@ -977,6 +987,8 @@ CONFIG_COMMON_CLK_MAX77686=y
CONFIG_COMMON_CLK_RK808=m
CONFIG_COMMON_CLK_SCMI=y
CONFIG_COMMON_CLK_S2MPS11=m
+CONFIG_CLK_ACTIONS=y
+CONFIG_CLK_OWL_S500=y
CONFIG_CLK_RASPBERRYPI=y
CONFIG_COMMON_CLK_QCOM=y
CONFIG_QCOM_CLK_RPM=y
@@ -1107,6 +1119,7 @@ CONFIG_ROCKCHIP_EFUSE=m
CONFIG_NVMEM_SUNXI_SID=y
CONFIG_NVMEM_VF610_OCOTP=y
CONFIG_MESON_MX_EFUSE=m
+CONFIG_NVMEM_RMEM=m
CONFIG_FSI=m
CONFIG_FSI_MASTER_GPIO=m
CONFIG_FSI_MASTER_HUB=m
diff --git a/arch/arm/configs/mv78xx0_defconfig b/arch/arm/configs/mv78xx0_defconfig
index b39b1300a459..cd703c15798f 100644
--- a/arch/arm/configs/mv78xx0_defconfig
+++ b/arch/arm/configs/mv78xx0_defconfig
@@ -5,7 +5,6 @@ CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
# CONFIG_SLUB_DEBUG is not set
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
CONFIG_KPROBES=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
diff --git a/arch/arm/configs/mvebu_v5_defconfig b/arch/arm/configs/mvebu_v5_defconfig
index 226f2e97c6e2..4f16716bfc32 100644
--- a/arch/arm/configs/mvebu_v5_defconfig
+++ b/arch/arm/configs/mvebu_v5_defconfig
@@ -4,7 +4,6 @@ CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=19
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
CONFIG_KPROBES=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
diff --git a/arch/arm/configs/mxs_defconfig b/arch/arm/configs/mxs_defconfig
index a9c6f32a9b1c..ca32446b187f 100644
--- a/arch/arm/configs/mxs_defconfig
+++ b/arch/arm/configs/mxs_defconfig
@@ -164,7 +164,6 @@ CONFIG_FONTS=y
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_INFO=y
CONFIG_FRAME_WARN=2048
-CONFIG_UNUSED_SYMBOLS=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_SOFTLOCKUP_DETECTOR=y
diff --git a/arch/arm/configs/omap1_defconfig b/arch/arm/configs/omap1_defconfig
index 3b6e7452609b..3148567b66b6 100644
--- a/arch/arm/configs/omap1_defconfig
+++ b/arch/arm/configs/omap1_defconfig
@@ -13,7 +13,6 @@ CONFIG_EXPERT=y
# CONFIG_VM_EVENT_COUNTERS is not set
CONFIG_SLOB=y
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index b515c31f0ab7..f250bf1cc022 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -10,7 +10,6 @@ CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=16
CONFIG_CGROUPS=y
CONFIG_MEMCG=y
-CONFIG_MEMCG_SWAP=y
CONFIG_BLK_CGROUP=y
CONFIG_CGROUP_SCHED=y
CONFIG_CFS_BANDWIDTH=y
@@ -40,9 +39,6 @@ CONFIG_ARM_THUMBEE=y
CONFIG_ARM_ERRATA_411920=y
CONFIG_SMP=y
CONFIG_NR_CPUS=2
-CONFIG_SECCOMP=y
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
CONFIG_ARM_APPENDED_DTB=y
CONFIG_ARM_ATAG_DTB_COMPAT=y
CONFIG_CMDLINE="root=/dev/mmcblk0p2 rootwait console=ttyO2,115200"
@@ -54,10 +50,8 @@ CONFIG_CPU_FREQ_GOV_USERSPACE=y
CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
CONFIG_CPUFREQ_DT=m
# CONFIG_ARM_OMAP2PLUS_CPUFREQ is not set
-CONFIG_ARM_TI_CPUFREQ=y
CONFIG_CPU_IDLE=y
CONFIG_ARM_CPUIDLE=y
-CONFIG_DT_IDLE_STATES=y
CONFIG_KERNEL_MODE_NEON=y
CONFIG_PM_DEBUG=y
CONFIG_ARM_CRYPTO=y
@@ -68,7 +62,6 @@ CONFIG_CRYPTO_AES_ARM=m
CONFIG_CRYPTO_AES_ARM_BS=m
CONFIG_CRYPTO_GHASH_ARM_CE=m
CONFIG_CRYPTO_CHACHA20_NEON=m
-CONFIG_OPROFILE=y
CONFIG_KPROBES=y
CONFIG_MODULES=y
CONFIG_MODULE_FORCE_LOAD=y
@@ -158,9 +151,9 @@ CONFIG_MTD_ONENAND=y
CONFIG_MTD_ONENAND_VERIFY_WRITE=y
CONFIG_MTD_ONENAND_OMAP2=y
CONFIG_MTD_RAW_NAND=y
-CONFIG_MTD_NAND_ECC_SW_BCH=y
CONFIG_MTD_NAND_OMAP2=y
CONFIG_MTD_NAND_OMAP_BCH=y
+CONFIG_MTD_NAND_ECC_SW_BCH=y
CONFIG_MTD_SPI_NOR=m
CONFIG_MTD_UBI=y
CONFIG_ZRAM=m
@@ -202,11 +195,11 @@ CONFIG_TI_CPSW_SWITCHDEV=y
CONFIG_TI_CPTS=y
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
-CONFIG_DP83848_PHY=y
-CONFIG_DP83867_PHY=y
CONFIG_MICREL_PHY=y
CONFIG_AT803X_PHY=y
CONFIG_SMSC_PHY=y
+CONFIG_DP83848_PHY=y
+CONFIG_DP83867_PHY=y
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_DEFLATE=m
@@ -354,14 +347,8 @@ CONFIG_IR_RX51=m
CONFIG_IR_GPIO_TX=m
CONFIG_IR_PWM_TX=m
CONFIG_MEDIA_SUPPORT=m
-CONFIG_MEDIA_CAMERA_SUPPORT=y
-CONFIG_MEDIA_CEC_SUPPORT=y
-CONFIG_MEDIA_CONTROLLER=y
-CONFIG_VIDEO_V4L2_SUBDEV_API=y
CONFIG_V4L_PLATFORM_DRIVERS=y
CONFIG_VIDEO_OMAP3=m
-CONFIG_CEC_PLATFORM_DRIVERS=y
-# CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set
CONFIG_VIDEO_TVP5150=m
CONFIG_VIDEO_MT9P031=m
CONFIG_DRM=m
@@ -369,8 +356,8 @@ CONFIG_DRM_OMAP=m
CONFIG_OMAP5_DSS_HDMI=y
CONFIG_OMAP2_DSS_SDI=y
CONFIG_OMAP2_DSS_DSI=y
-CONFIG_DRM_OMAP_PANEL_DSI_CM=m
CONFIG_DRM_TILCDC=m
+CONFIG_DRM_PANEL_DSI_CM=m
CONFIG_DRM_PANEL_SIMPLE=m
CONFIG_DRM_PANEL_LG_LB035Q02=m
CONFIG_DRM_PANEL_NEC_NL8048HL11=m
@@ -517,14 +504,14 @@ CONFIG_IIO=m
CONFIG_IIO_SW_DEVICE=m
CONFIG_IIO_SW_TRIGGER=m
CONFIG_IIO_ST_ACCEL_3AXIS=m
+CONFIG_KXCJK1013=m
CONFIG_CPCAP_ADC=m
CONFIG_INA2XX_ADC=m
CONFIG_TI_AM335X_ADC=m
CONFIG_TWL4030_MADC=m
CONFIG_SENSORS_ISL29028=m
-CONFIG_BMP280=m
-CONFIG_KXCJK1013=m
CONFIG_AK8975=m
+CONFIG_BMP280=m
CONFIG_PWM=y
CONFIG_PWM_OMAP_DMTIMER=m
CONFIG_PWM_TIECAP=m
diff --git a/arch/arm/configs/orion5x_defconfig b/arch/arm/configs/orion5x_defconfig
index 4bdbb036ac26..b9e3b647e732 100644
--- a/arch/arm/configs/orion5x_defconfig
+++ b/arch/arm/configs/orion5x_defconfig
@@ -5,7 +5,6 @@ CONFIG_LOG_BUF_SHIFT=14
CONFIG_EXPERT=y
# CONFIG_SLUB_DEBUG is not set
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
CONFIG_KPROBES=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
diff --git a/arch/arm/configs/prima2_defconfig b/arch/arm/configs/prima2_defconfig
deleted file mode 100644
index be19aa127595..000000000000
--- a/arch/arm/configs/prima2_defconfig
+++ /dev/null
@@ -1,72 +0,0 @@
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_RELAY=y
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_KALLSYMS_ALL=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_BSD_DISKLABEL=y
-CONFIG_SOLARIS_X86_PARTITION=y
-CONFIG_ARCH_SIRF=y
-CONFIG_SMP=y
-CONFIG_SCHED_MC=y
-CONFIG_PREEMPT=y
-CONFIG_AEABI=y
-CONFIG_KEXEC=y
-CONFIG_BINFMT_MISC=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_SCSI=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_CHR_DEV_SG=y
-CONFIG_INPUT_EVDEV=y
-# CONFIG_INPUT_MOUSE is not set
-CONFIG_INPUT_TOUCHSCREEN=y
-CONFIG_INPUT_MISC=y
-CONFIG_SERIAL_SIRFSOC=y
-CONFIG_SERIAL_SIRFSOC_CONSOLE=y
-CONFIG_HW_RANDOM=y
-CONFIG_I2C=y
-CONFIG_I2C_CHARDEV=y
-CONFIG_I2C_SIRF=y
-CONFIG_SPI=y
-CONFIG_SPI_SIRF=y
-CONFIG_SPI_SPIDEV=y
-# CONFIG_HWMON is not set
-CONFIG_WATCHDOG=y
-CONFIG_USB_GADGET=y
-CONFIG_USB_MASS_STORAGE=m
-CONFIG_MMC=y
-CONFIG_MMC_SDHCI=y
-CONFIG_MMC_SDHCI_PLTFM=y
-CONFIG_MMC_SDHCI_SIRF=y
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_SIRFSOC=y
-CONFIG_DMADEVICES=y
-CONFIG_DMADEVICES_DEBUG=y
-CONFIG_DMADEVICES_VDEBUG=y
-CONFIG_SIRF_DMA=y
-CONFIG_HWSPINLOCK_SIRF=y
-# CONFIG_IOMMU_SUPPORT is not set
-CONFIG_EXT2_FS=y
-CONFIG_MSDOS_FS=y
-CONFIG_VFAT_FS=y
-CONFIG_TMPFS=y
-CONFIG_TMPFS_POSIX_ACL=y
-CONFIG_CRAMFS=y
-CONFIG_ROMFS_FS=y
-CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_ASCII=y
-CONFIG_NLS_ISO8859_1=y
-CONFIG_DEBUG_INFO=y
-CONFIG_DEBUG_SECTION_MISMATCH=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_KERNEL=y
-# CONFIG_DEBUG_PREEMPT is not set
-CONFIG_DEBUG_RT_MUTEXES=y
-CONFIG_DEBUG_SPINLOCK=y
-CONFIG_DEBUG_MUTEXES=y
-CONFIG_CRC_CCITT=y
diff --git a/arch/arm/configs/pxa_defconfig b/arch/arm/configs/pxa_defconfig
index 8654ece13004..bd7dd81c9c54 100644
--- a/arch/arm/configs/pxa_defconfig
+++ b/arch/arm/configs/pxa_defconfig
@@ -13,7 +13,6 @@ CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
CONFIG_SLOB=y
CONFIG_PROFILING=y
-CONFIG_OPROFILE=m
CONFIG_KPROBES=y
CONFIG_MODULES=y
CONFIG_MODULE_FORCE_LOAD=y
diff --git a/arch/arm/configs/qcom_defconfig b/arch/arm/configs/qcom_defconfig
index d6733e745b80..3f36887e8333 100644
--- a/arch/arm/configs/qcom_defconfig
+++ b/arch/arm/configs/qcom_defconfig
@@ -10,7 +10,6 @@ CONFIG_EMBEDDED=y
# CONFIG_SLUB_DEBUG is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
CONFIG_KPROBES=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
@@ -66,6 +65,8 @@ CONFIG_MTD_M25P80=y
CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_NAND_QCOM=y
CONFIG_MTD_SPI_NOR=y
+CONFIG_MTD_QCOMSMEM_PARTS=y
+CONFIG_MTD_UBI=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_QCOM_COINCELL=y
@@ -124,6 +125,7 @@ CONFIG_I2C_QUP=y
CONFIG_SPI=y
CONFIG_SPI_QUP=y
CONFIG_SPMI=y
+CONFIG_PINCTRL_MSM=y
CONFIG_PINCTRL_APQ8064=y
CONFIG_PINCTRL_APQ8084=y
CONFIG_PINCTRL_IPQ4019=y
@@ -132,6 +134,7 @@ CONFIG_PINCTRL_MSM8660=y
CONFIG_PINCTRL_MSM8960=y
CONFIG_PINCTRL_MDM9615=y
CONFIG_PINCTRL_MSM8X74=y
+CONFIG_PINCTRL_SDX55=y
CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
CONFIG_PINCTRL_QCOM_SSBI_PMIC=y
CONFIG_GPIOLIB=y
@@ -150,6 +153,7 @@ CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_REGULATOR_QCOM_RPM=y
CONFIG_REGULATOR_QCOM_SMD_RPM=y
CONFIG_REGULATOR_QCOM_SPMI=y
+CONFIG_REGULATOR_QCOM_RPMH=y
CONFIG_MEDIA_SUPPORT=y
CONFIG_DRM=y
CONFIG_DRM_MSM=m
@@ -192,6 +196,7 @@ CONFIG_USB_CONFIGFS_ECM=y
CONFIG_USB_CONFIGFS_F_FS=y
CONFIG_USB_ULPI_BUS=y
CONFIG_USB_ETH=m
+CONFIG_USB_DWC3=y
CONFIG_MMC=y
CONFIG_MMC_BLOCK_MINORS=32
CONFIG_MMC_ARMMMCI=y
@@ -211,6 +216,7 @@ CONFIG_QCOM_BAM_DMA=y
CONFIG_STAGING=y
CONFIG_COMMON_CLK_QCOM=y
CONFIG_QCOM_CLK_RPM=y
+CONFIG_QCOM_CLK_RPMH=y
CONFIG_QCOM_CLK_SMD_RPM=y
CONFIG_APQ_MMCC_8084=y
CONFIG_IPQ_GCC_4019=y
@@ -220,7 +226,9 @@ CONFIG_MSM_LCC_8960=y
CONFIG_MDM_LCC_9615=y
CONFIG_MSM_MMCC_8960=y
CONFIG_MSM_MMCC_8974=y
+CONFIG_SDX_GCC_55=y
CONFIG_MSM_IOMMU=y
+CONFIG_ARM_SMMU=y
CONFIG_HWSPINLOCK=y
CONFIG_HWSPINLOCK_QCOM=y
CONFIG_MAILBOX=y
@@ -230,6 +238,7 @@ CONFIG_QCOM_Q6V5_PIL=y
CONFIG_QCOM_WCNSS_PIL=y
CONFIG_RPMSG_CHAR=y
CONFIG_RPMSG_QCOM_SMD=y
+CONFIG_QCOM_COMMAND_DB=y
CONFIG_QCOM_GSBI=y
CONFIG_QCOM_OCMEM=y
CONFIG_QCOM_PM=y
@@ -237,6 +246,8 @@ CONFIG_QCOM_SMEM=y
CONFIG_QCOM_SMD_RPM=y
CONFIG_QCOM_SMP2P=y
CONFIG_QCOM_SMSM=y
+CONFIG_QCOM_RPMH=y
+CONFIG_QCOM_RPMHPD=y
CONFIG_QCOM_WCNSS_CTRL=y
CONFIG_EXTCON_QCOM_SPMI_MISC=y
CONFIG_IIO=y
@@ -256,6 +267,8 @@ CONFIG_PHY_QCOM_APQ8064_SATA=y
CONFIG_PHY_QCOM_IPQ806X_SATA=y
CONFIG_PHY_QCOM_USB_HS=y
CONFIG_PHY_QCOM_USB_HSIC=y
+CONFIG_PHY_QCOM_QMP=y
+CONFIG_PHY_QCOM_USB_SNPS_FEMTO_V2=y
CONFIG_QCOM_QFPROM=y
CONFIG_INTERCONNECT=y
CONFIG_INTERCONNECT_QCOM=y
@@ -267,6 +280,7 @@ CONFIG_FUSE_FS=y
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
+CONFIG_UBIFS_FS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3_ACL=y
CONFIG_NFS_V4=y
@@ -282,3 +296,6 @@ CONFIG_DYNAMIC_DEBUG=y
CONFIG_DEBUG_INFO=y
CONFIG_MAGIC_SYSRQ=y
# CONFIG_SCHED_DEBUG is not set
+CONFIG_WATCHDOG=y
+CONFIG_QCOM_WDT=y
+CONFIG_ARM_PSCI=y
diff --git a/arch/arm/configs/sama5_defconfig b/arch/arm/configs/sama5_defconfig
index 5f6297e6c549..f4c3c0652432 100644
--- a/arch/arm/configs/sama5_defconfig
+++ b/arch/arm/configs/sama5_defconfig
@@ -1,7 +1,6 @@
# CONFIG_LOCALVERSION_AUTO is not set
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
-CONFIG_FHANDLE=y
CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=14
@@ -19,11 +18,8 @@ CONFIG_SOC_SAMA5D2=y
CONFIG_SOC_SAMA5D3=y
CONFIG_SOC_SAMA5D4=y
# CONFIG_ATMEL_CLOCKSOURCE_PIT is not set
-CONFIG_AEABI=y
CONFIG_UACCESS_WITH_MEMCPY=y
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_ARM_APPENDED_DTB=y
+# CONFIG_ATAGS is not set
CONFIG_CMDLINE="console=ttyS0,115200 initrd=0x21100000,25165824 root=/dev/ram0 rw"
CONFIG_KEXEC=y
CONFIG_VFP=y
@@ -41,13 +37,7 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_IP_PNP_RARP=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_DIAG is not set
-# CONFIG_INET6_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET6_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET6_XFRM_MODE_BEET is not set
CONFIG_IPV6_SIT_6RD=y
CONFIG_BRIDGE=m
CONFIG_BRIDGE_VLAN_FILTERING=y
@@ -68,7 +58,6 @@ CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
-CONFIG_MTD_M25P80=y
CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_NAND_ATMEL=y
CONFIG_MTD_SPI_NOR=y
@@ -78,7 +67,6 @@ CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=4
CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_ATMEL_TCLIB=y
CONFIG_ATMEL_SSC=y
CONFIG_EEPROM_AT24=y
CONFIG_SCSI=y
@@ -133,6 +121,7 @@ CONFIG_I2C_AT91=y
CONFIG_I2C_GPIO=y
CONFIG_SPI=y
CONFIG_SPI_ATMEL=y
+CONFIG_SPI_ATMEL_QUADSPI=y
CONFIG_SPI_GPIO=y
CONFIG_GPIO_SYSFS=y
CONFIG_GPIO_SAMA5D2_PIOBU=m
diff --git a/arch/arm/configs/socfpga_defconfig b/arch/arm/configs/socfpga_defconfig
index e73c97b0f5b0..0c60eb382c80 100644
--- a/arch/arm/configs/socfpga_defconfig
+++ b/arch/arm/configs/socfpga_defconfig
@@ -18,7 +18,6 @@ CONFIG_ZBOOT_ROM_TEXT=0x0
CONFIG_ZBOOT_ROM_BSS=0x0
CONFIG_VFP=y
CONFIG_NEON=y
-CONFIG_OPROFILE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/arm/configs/spitz_defconfig b/arch/arm/configs/spitz_defconfig
index 8b2c14424927..f42c7a502b6e 100644
--- a/arch/arm/configs/spitz_defconfig
+++ b/arch/arm/configs/spitz_defconfig
@@ -5,7 +5,6 @@ CONFIG_SYSFS_DEPRECATED_V2=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
CONFIG_PROFILING=y
-CONFIG_OPROFILE=m
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
diff --git a/arch/arm/configs/tango4_defconfig b/arch/arm/configs/tango4_defconfig
deleted file mode 100644
index cbc9ade78f14..000000000000
--- a/arch/arm/configs/tango4_defconfig
+++ /dev/null
@@ -1,93 +0,0 @@
-# CONFIG_SWAP is not set
-CONFIG_SYSVIPC=y
-CONFIG_NO_HZ_IDLE=y
-CONFIG_HIGH_RES_TIMERS=y
-# CONFIG_COMPAT_BRK is not set
-CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODVERSIONS=y
-CONFIG_ARCH_TANGO=y
-# CONFIG_ARM_ERRATA_643719 is not set
-CONFIG_SMP=y
-CONFIG_PREEMPT=y
-CONFIG_HZ_300=y
-CONFIG_AEABI=y
-CONFIG_HIGHMEM=y
-# CONFIG_ATAGS is not set
-CONFIG_ARM_APPENDED_DTB=y
-CONFIG_ARM_ATAG_DTB_COMPAT=y
-CONFIG_CPU_FREQ=y
-CONFIG_CPU_FREQ_GOV_ONDEMAND=y
-CONFIG_CPUFREQ_DT=y
-CONFIG_VFP=y
-CONFIG_NEON=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_IPV6 is not set
-CONFIG_DEVTMPFS=y
-CONFIG_DEVTMPFS_MOUNT=y
-CONFIG_MTD=y
-CONFIG_MTD_TESTS=m
-CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_RAW_NAND=y
-CONFIG_MTD_NAND_TANGO=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_SCSI=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_NETDEVICES=y
-CONFIG_NET_VENDOR_AURORA=y
-CONFIG_AURORA_NB8800=y
-CONFIG_AT803X_PHY=y
-# CONFIG_WLAN is not set
-# CONFIG_INPUT_MOUSEDEV is not set
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_SERIO is not set
-CONFIG_SERIAL_8250=y
-# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_RT288X=y
-CONFIG_SERIAL_OF_PLATFORM=y
-# CONFIG_HW_RANDOM is not set
-CONFIG_I2C=y
-CONFIG_I2C_XLR=y
-CONFIG_GPIOLIB=y
-CONFIG_THERMAL=y
-CONFIG_CPU_THERMAL=y
-CONFIG_TANGO_THERMAL=y
-CONFIG_WATCHDOG=y
-CONFIG_TANGOX_WATCHDOG=y
-CONFIG_FB=y
-# CONFIG_HID is not set
-# CONFIG_USB_HID is not set
-CONFIG_USB=y
-CONFIG_USB_EHCI_HCD=y
-CONFIG_USB_STORAGE=y
-CONFIG_USB_CHIPIDEA=y
-CONFIG_USB_CHIPIDEA_HOST=y
-CONFIG_MMC=y
-CONFIG_MMC_SDHCI=y
-CONFIG_MMC_SDHCI_PLTFM=y
-CONFIG_MMC_SDHCI_OF_ARASAN=y
-CONFIG_DMADEVICES=y
-CONFIG_EXT4_FS=y
-CONFIG_FUSE_FS=m
-CONFIG_VFAT_FS=m
-CONFIG_TMPFS=y
-CONFIG_NFS_FS=y
-# CONFIG_NFS_V2 is not set
-CONFIG_ROOT_NFS=y
-CONFIG_NLS_CODEPAGE_437=m
-CONFIG_NLS_ISO8859_1=m
-CONFIG_NLS_UTF8=m
-CONFIG_PRINTK_TIME=y
-# CONFIG_CRYPTO_ECHAINIV is not set
diff --git a/arch/arm/configs/tegra_defconfig b/arch/arm/configs/tegra_defconfig
index 74739a52a8ad..13ef3e4dcbb7 100644
--- a/arch/arm/configs/tegra_defconfig
+++ b/arch/arm/configs/tegra_defconfig
@@ -167,6 +167,7 @@ CONFIG_SENSORS_LM95245=y
CONFIG_THERMAL=y
CONFIG_THERMAL_STATISTICS=y
CONFIG_CPU_THERMAL=y
+CONFIG_TEGRA_SOCTHERM=m
CONFIG_WATCHDOG=y
CONFIG_MAX77620_WATCHDOG=y
CONFIG_TEGRA_WATCHDOG=y
@@ -237,12 +238,13 @@ CONFIG_USB=y
CONFIG_USB_XHCI_HCD=y
CONFIG_USB_XHCI_TEGRA=y
CONFIG_USB_EHCI_HCD=y
-CONFIG_USB_EHCI_TEGRA=y
CONFIG_USB_ACM=y
CONFIG_USB_WDM=y
CONFIG_USB_STORAGE=y
CONFIG_USB_CHIPIDEA=y
CONFIG_USB_CHIPIDEA_UDC=y
+CONFIG_USB_CHIPIDEA_HOST=y
+CONFIG_USB_CHIPIDEA_TEGRA=y
CONFIG_USB_GADGET=y
CONFIG_MMC=y
CONFIG_MMC_BLOCK_MINORS=16
diff --git a/arch/arm/configs/u300_defconfig b/arch/arm/configs/u300_defconfig
deleted file mode 100644
index 543f07338100..000000000000
--- a/arch/arm/configs/u300_defconfig
+++ /dev/null
@@ -1,65 +0,0 @@
-# CONFIG_LOCALVERSION_AUTO is not set
-# CONFIG_SWAP is not set
-CONFIG_SYSVIPC=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EXPERT=y
-# CONFIG_AIO is not set
-# CONFIG_VM_EVENT_COUNTERS is not set
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_PARTITION_ADVANCED=y
-# CONFIG_ARCH_MULTI_V7 is not set
-CONFIG_ARCH_U300=y
-CONFIG_MACH_U300_SPIDUMMY=y
-CONFIG_PREEMPT=y
-CONFIG_AEABI=y
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_CMDLINE="root=/dev/ram0 rw rootfstype=rootfs console=ttyAMA0,115200n8 lpj=515072"
-CONFIG_CPU_IDLE=y
-# CONFIG_SUSPEND is not set
-# CONFIG_PREVENT_FIRMWARE_BUILD is not set
-CONFIG_MTD=y
-CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_RAW_NAND=y
-CONFIG_MTD_NAND_FSMC=y
-# CONFIG_INPUT_MOUSEDEV is not set
-CONFIG_INPUT_EVDEV=y
-# CONFIG_KEYBOARD_ATKBD is not set
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_SERIO is not set
-CONFIG_LEGACY_PTY_COUNT=16
-CONFIG_SERIAL_AMBA_PL011=y
-CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
-# CONFIG_HW_RANDOM is not set
-CONFIG_I2C=y
-# CONFIG_HWMON is not set
-CONFIG_WATCHDOG=y
-CONFIG_REGULATOR=y
-CONFIG_REGULATOR_FIXED_VOLTAGE=y
-CONFIG_FB=y
-# CONFIG_LCD_CLASS_DEVICE is not set
-CONFIG_BACKLIGHT_CLASS_DEVICE=y
-# CONFIG_USB_SUPPORT is not set
-CONFIG_MMC=y
-CONFIG_MMC_ARMMMCI=y
-CONFIG_RTC_CLASS=y
-# CONFIG_RTC_HCTOSYS is not set
-CONFIG_RTC_DRV_COH901331=y
-CONFIG_DMADEVICES=y
-CONFIG_COH901318=y
-# CONFIG_DNOTIFY is not set
-CONFIG_FUSE_FS=y
-CONFIG_VFAT_FS=y
-CONFIG_TMPFS=y
-CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_ISO8859_1=y
-CONFIG_PRINTK_TIME=y
-CONFIG_DEBUG_INFO=y
-CONFIG_DEBUG_FS=y
-# CONFIG_SCHED_DEBUG is not set
-CONFIG_TIMER_STATS=y
-# CONFIG_DEBUG_PREEMPT is not set
diff --git a/arch/arm/configs/vexpress_defconfig b/arch/arm/configs/vexpress_defconfig
index c01baf7d6e37..4479369540f2 100644
--- a/arch/arm/configs/vexpress_defconfig
+++ b/arch/arm/configs/vexpress_defconfig
@@ -11,7 +11,6 @@ CONFIG_CPUSETS=y
# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/arm/configs/zx_defconfig b/arch/arm/configs/zx_defconfig
deleted file mode 100644
index a046a492bfa7..000000000000
--- a/arch/arm/configs/zx_defconfig
+++ /dev/null
@@ -1,122 +0,0 @@
-CONFIG_SYSVIPC=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_CGROUPS=y
-CONFIG_CGROUP_DEBUG=y
-CONFIG_CGROUP_FREEZER=y
-CONFIG_CGROUP_CPUACCT=y
-CONFIG_CGROUP_SCHED=y
-CONFIG_RT_GROUP_SCHED=y
-CONFIG_NAMESPACES=y
-CONFIG_USER_NS=y
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_KALLSYMS_ALL=y
-CONFIG_EMBEDDED=y
-CONFIG_PERF_EVENTS=y
-CONFIG_SLAB=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_ARCH_ZX=y
-CONFIG_SOC_ZX296702=y
-# CONFIG_SWP_EMULATE is not set
-CONFIG_ARM_ERRATA_754322=y
-CONFIG_ARM_ERRATA_775420=y
-CONFIG_SMP=y
-CONFIG_VMSPLIT_2G=y
-CONFIG_PREEMPT=y
-CONFIG_AEABI=y
-CONFIG_KSM=y
-# CONFIG_IOMMU_SUPPORT is not set
-CONFIG_VFP=y
-CONFIG_NEON=y
-CONFIG_KERNEL_MODE_NEON=y
-# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
-CONFIG_HIBERNATION=y
-CONFIG_PM_RUNTIME=y
-CONFIG_PM_DEBUG=y
-CONFIG_SUSPEND_TIME=y
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_CMDLINE="console=ttyAMA0,115200 debug earlyprintk root=/dev/ram rw rootwait"
-#CONFIG_NET is not set
-CONFIG_DEVTMPFS=y
-CONFIG_DEVTMPFS_MOUNT=y
-CONFIG_DMA_CMA=y
-CONFIG_CMA_SIZE_MBYTES=192
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_COUNT=1
-CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_UID_STAT=y
-CONFIG_SCSI=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_CHR_DEV_SG=y
-CONFIG_CHR_DEV_SCH=y
-CONFIG_SCSI_MULTI_LUN=y
-CONFIG_MD=y
-CONFIG_BLK_DEV_DM=y
-CONFIG_DM_CRYPT=y
-CONFIG_DM_UEVENT=y
-CONFIG_DM_VERITY=y
-CONFIG_NETDEVICES=y
-# CONFIG_INPUT_MOUSE is not set
-CONFIG_SERIO=y
-CONFIG_SERIO_LIBPS2=y
-CONFIG_SPI=y
-CONFIG_LOGO=y
-CONFIG_SERIAL_CORE=y
-CONFIG_SERIAL_CORE_CONSOLE=y
-CONFIG_CONSOLE_POLL=y
-CONFIG_SERIAL_AMBA_PL011=y
-CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
-CONFIG_SERIAL_OF_PLATFORM=y
-# CONFIG_LEGACY_PTYS is not set
-# CONFIG_HW_RANDOM is not set
-# CONFIG_HWMON is not set
-# CONFIG_USB_SUPPORT is not set
-CONFIG_MMC=y
-CONFIG_MMC_BLOCK_MINORS=16
-CONFIG_MMC_DW=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT4_FS=y
-CONFIG_EXT4_FS_POSIX_ACL=y
-CONFIG_EXT4_FS_SECURITY=y
-CONFIG_EXT4_DEBUG=y
-CONFIG_FUSE_FS=y
-CONFIG_MSDOS_FS=y
-CONFIG_VFAT_FS=y
-CONFIG_FAT_DEFAULT_CODEPAGE=936
-CONFIG_TMPFS=y
-CONFIG_TMPFS_POSIX_ACL=y
-#CONFIG_NFS_FS is not set
-CONFIG_NLS_CODEPAGE_936=y
-CONFIG_NLS_ISO8859_1=y
-CONFIG_NLS_UTF8=y
-CONFIG_PRINTK_TIME=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_KERNEL=y
-CONFIG_DEBUG_INFO=y
-CONFIG_FRAME_WARN=4096
-CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_MEMORY_INIT=y
-CONFIG_PANIC_TIMEOUT=5
-# CONFIG_SCHED_DEBUG is not set
-CONFIG_SCHEDSTATS=y
-CONFIG_TIMER_STATS=y
-CONFIG_DEBUG_RT_MUTEXES=y
-CONFIG_DEBUG_SPINLOCK=y
-CONFIG_DEBUG_MUTEXES=y
-CONFIG_RCU_CPU_STALL_TIMEOUT=60
-# CONFIG_FTRACE is not set
-CONFIG_KGDB=y
-CONFIG_KGDB_KDB=y
-# CONFIG_ARM_UNWIND is not set
-CONFIG_DEBUG_PREEMPT=y
-CONFIG_DEBUG_USER=y
-CONFIG_DEBUG_LL=y
-CONFIG_DYNAMIC_DEBUG=y
-CONFIG_STACKTRACE=y
-CONFIG_DEBUG_ZTE_ZX=y
-CONFIG_EARLY_PRINTK=y
-CONFIG_CRYPTO_LZO=y
-CONFIG_GPIOLIB=y
diff --git a/arch/arm/crypto/Kconfig b/arch/arm/crypto/Kconfig
index c9bf2df85cb9..2b575792363e 100644
--- a/arch/arm/crypto/Kconfig
+++ b/arch/arm/crypto/Kconfig
@@ -62,6 +62,25 @@ config CRYPTO_SHA512_ARM
SHA-512 secure hash standard (DFIPS 180-2) implemented
using optimized ARM assembler and NEON, when available.
+config CRYPTO_BLAKE2S_ARM
+ tristate "BLAKE2s digest algorithm (ARM)"
+ select CRYPTO_ARCH_HAVE_LIB_BLAKE2S
+ help
+ BLAKE2s digest algorithm optimized with ARM scalar instructions. This
+ is faster than the generic implementations of BLAKE2s and BLAKE2b, but
+ slower than the NEON implementation of BLAKE2b. (There is no NEON
+ implementation of BLAKE2s, since NEON doesn't really help with it.)
+
+config CRYPTO_BLAKE2B_NEON
+ tristate "BLAKE2b digest algorithm (ARM NEON)"
+ depends on KERNEL_MODE_NEON
+ select CRYPTO_BLAKE2B
+ help
+ BLAKE2b digest algorithm optimized with ARM NEON instructions.
+ On ARM processors that have NEON support but not the ARMv8
+ Crypto Extensions, typically this BLAKE2b implementation is
+ much faster than SHA-2 and slightly faster than SHA-1.
+
config CRYPTO_AES_ARM
tristate "Scalar AES cipher for ARM"
select CRYPTO_ALGAPI
diff --git a/arch/arm/crypto/Makefile b/arch/arm/crypto/Makefile
index b745c17d356f..8f26c454ea12 100644
--- a/arch/arm/crypto/Makefile
+++ b/arch/arm/crypto/Makefile
@@ -9,6 +9,8 @@ obj-$(CONFIG_CRYPTO_SHA1_ARM) += sha1-arm.o
obj-$(CONFIG_CRYPTO_SHA1_ARM_NEON) += sha1-arm-neon.o
obj-$(CONFIG_CRYPTO_SHA256_ARM) += sha256-arm.o
obj-$(CONFIG_CRYPTO_SHA512_ARM) += sha512-arm.o
+obj-$(CONFIG_CRYPTO_BLAKE2S_ARM) += blake2s-arm.o
+obj-$(CONFIG_CRYPTO_BLAKE2B_NEON) += blake2b-neon.o
obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha-neon.o
obj-$(CONFIG_CRYPTO_POLY1305_ARM) += poly1305-arm.o
obj-$(CONFIG_CRYPTO_NHPOLY1305_NEON) += nhpoly1305-neon.o
@@ -29,6 +31,8 @@ sha256-arm-neon-$(CONFIG_KERNEL_MODE_NEON) := sha256_neon_glue.o
sha256-arm-y := sha256-core.o sha256_glue.o $(sha256-arm-neon-y)
sha512-arm-neon-$(CONFIG_KERNEL_MODE_NEON) := sha512-neon-glue.o
sha512-arm-y := sha512-core.o sha512-glue.o $(sha512-arm-neon-y)
+blake2s-arm-y := blake2s-core.o blake2s-glue.o
+blake2b-neon-y := blake2b-neon-core.o blake2b-neon-glue.o
sha1-arm-ce-y := sha1-ce-core.o sha1-ce-glue.o
sha2-arm-ce-y := sha2-ce-core.o sha2-ce-glue.o
aes-arm-ce-y := aes-ce-core.o aes-ce-glue.o
diff --git a/arch/arm/crypto/aes-neonbs-glue.c b/arch/arm/crypto/aes-neonbs-glue.c
index f70af1d0514b..5c6cd3c63cbc 100644
--- a/arch/arm/crypto/aes-neonbs-glue.c
+++ b/arch/arm/crypto/aes-neonbs-glue.c
@@ -9,6 +9,7 @@
#include <asm/simd.h>
#include <crypto/aes.h>
#include <crypto/ctr.h>
+#include <crypto/internal/cipher.h>
#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
@@ -23,6 +24,8 @@ MODULE_ALIAS_CRYPTO("cbc(aes)-all");
MODULE_ALIAS_CRYPTO("ctr(aes)");
MODULE_ALIAS_CRYPTO("xts(aes)");
+MODULE_IMPORT_NS(CRYPTO_INTERNAL);
+
asmlinkage void aesbs_convert_key(u8 out[], u32 const rk[], int rounds);
asmlinkage void aesbs_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[],
diff --git a/arch/arm/crypto/blake2b-neon-core.S b/arch/arm/crypto/blake2b-neon-core.S
new file mode 100644
index 000000000000..0406a186377f
--- /dev/null
+++ b/arch/arm/crypto/blake2b-neon-core.S
@@ -0,0 +1,347 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * BLAKE2b digest algorithm, NEON accelerated
+ *
+ * Copyright 2020 Google LLC
+ *
+ * Author: Eric Biggers <ebiggers@google.com>
+ */
+
+#include <linux/linkage.h>
+
+ .text
+ .fpu neon
+
+ // The arguments to blake2b_compress_neon()
+ STATE .req r0
+ BLOCK .req r1
+ NBLOCKS .req r2
+ INC .req r3
+
+ // Pointers to the rotation tables
+ ROR24_TABLE .req r4
+ ROR16_TABLE .req r5
+
+ // The original stack pointer
+ ORIG_SP .req r6
+
+ // NEON registers which contain the message words of the current block.
+ // M_0-M_3 are occasionally used for other purposes too.
+ M_0 .req d16
+ M_1 .req d17
+ M_2 .req d18
+ M_3 .req d19
+ M_4 .req d20
+ M_5 .req d21
+ M_6 .req d22
+ M_7 .req d23
+ M_8 .req d24
+ M_9 .req d25
+ M_10 .req d26
+ M_11 .req d27
+ M_12 .req d28
+ M_13 .req d29
+ M_14 .req d30
+ M_15 .req d31
+
+ .align 4
+ // Tables for computing ror64(x, 24) and ror64(x, 16) using the vtbl.8
+ // instruction. This is the most efficient way to implement these
+ // rotation amounts with NEON. (On Cortex-A53 it's the same speed as
+ // vshr.u64 + vsli.u64, while on Cortex-A7 it's faster.)
+.Lror24_table:
+ .byte 3, 4, 5, 6, 7, 0, 1, 2
+.Lror16_table:
+ .byte 2, 3, 4, 5, 6, 7, 0, 1
+ // The BLAKE2b initialization vector
+.Lblake2b_IV:
+ .quad 0x6a09e667f3bcc908, 0xbb67ae8584caa73b
+ .quad 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1
+ .quad 0x510e527fade682d1, 0x9b05688c2b3e6c1f
+ .quad 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179
+
+// Execute one round of BLAKE2b by updating the state matrix v[0..15] in the
+// NEON registers q0-q7. The message block is in q8..q15 (M_0-M_15). The stack
+// pointer points to a 32-byte aligned buffer containing a copy of q8 and q9
+// (M_0-M_3), so that they can be reloaded if they are used as temporary
+// registers. The macro arguments s0-s15 give the order in which the message
+// words are used in this round. 'final' is 1 if this is the final round.
+.macro _blake2b_round s0, s1, s2, s3, s4, s5, s6, s7, \
+ s8, s9, s10, s11, s12, s13, s14, s15, final=0
+
+ // Mix the columns:
+ // (v[0], v[4], v[8], v[12]), (v[1], v[5], v[9], v[13]),
+ // (v[2], v[6], v[10], v[14]), and (v[3], v[7], v[11], v[15]).
+
+ // a += b + m[blake2b_sigma[r][2*i + 0]];
+ vadd.u64 q0, q0, q2
+ vadd.u64 q1, q1, q3
+ vadd.u64 d0, d0, M_\s0
+ vadd.u64 d1, d1, M_\s2
+ vadd.u64 d2, d2, M_\s4
+ vadd.u64 d3, d3, M_\s6
+
+ // d = ror64(d ^ a, 32);
+ veor q6, q6, q0
+ veor q7, q7, q1
+ vrev64.32 q6, q6
+ vrev64.32 q7, q7
+
+ // c += d;
+ vadd.u64 q4, q4, q6
+ vadd.u64 q5, q5, q7
+
+ // b = ror64(b ^ c, 24);
+ vld1.8 {M_0}, [ROR24_TABLE, :64]
+ veor q2, q2, q4
+ veor q3, q3, q5
+ vtbl.8 d4, {d4}, M_0
+ vtbl.8 d5, {d5}, M_0
+ vtbl.8 d6, {d6}, M_0
+ vtbl.8 d7, {d7}, M_0
+
+ // a += b + m[blake2b_sigma[r][2*i + 1]];
+ //
+ // M_0 got clobbered above, so we have to reload it if any of the four
+ // message words this step needs happens to be M_0. Otherwise we don't
+ // need to reload it here, as it will just get clobbered again below.
+.if \s1 == 0 || \s3 == 0 || \s5 == 0 || \s7 == 0
+ vld1.8 {M_0}, [sp, :64]
+.endif
+ vadd.u64 q0, q0, q2
+ vadd.u64 q1, q1, q3
+ vadd.u64 d0, d0, M_\s1
+ vadd.u64 d1, d1, M_\s3
+ vadd.u64 d2, d2, M_\s5
+ vadd.u64 d3, d3, M_\s7
+
+ // d = ror64(d ^ a, 16);
+ vld1.8 {M_0}, [ROR16_TABLE, :64]
+ veor q6, q6, q0
+ veor q7, q7, q1
+ vtbl.8 d12, {d12}, M_0
+ vtbl.8 d13, {d13}, M_0
+ vtbl.8 d14, {d14}, M_0
+ vtbl.8 d15, {d15}, M_0
+
+ // c += d;
+ vadd.u64 q4, q4, q6
+ vadd.u64 q5, q5, q7
+
+ // b = ror64(b ^ c, 63);
+ //
+ // This rotation amount isn't a multiple of 8, so it has to be
+ // implemented using a pair of shifts, which requires temporary
+ // registers. Use q8-q9 (M_0-M_3) for this, and reload them afterwards.
+ veor q8, q2, q4
+ veor q9, q3, q5
+ vshr.u64 q2, q8, #63
+ vshr.u64 q3, q9, #63
+ vsli.u64 q2, q8, #1
+ vsli.u64 q3, q9, #1
+ vld1.8 {q8-q9}, [sp, :256]
+
+ // Mix the diagonals:
+ // (v[0], v[5], v[10], v[15]), (v[1], v[6], v[11], v[12]),
+ // (v[2], v[7], v[8], v[13]), and (v[3], v[4], v[9], v[14]).
+ //
+ // There are two possible ways to do this: use 'vext' instructions to
+ // shift the rows of the matrix so that the diagonals become columns,
+ // and undo it afterwards; or just use 64-bit operations on 'd'
+ // registers instead of 128-bit operations on 'q' registers. We use the
+ // latter approach, as it performs much better on Cortex-A7.
+
+ // a += b + m[blake2b_sigma[r][2*i + 0]];
+ vadd.u64 d0, d0, d5
+ vadd.u64 d1, d1, d6
+ vadd.u64 d2, d2, d7
+ vadd.u64 d3, d3, d4
+ vadd.u64 d0, d0, M_\s8
+ vadd.u64 d1, d1, M_\s10
+ vadd.u64 d2, d2, M_\s12
+ vadd.u64 d3, d3, M_\s14
+
+ // d = ror64(d ^ a, 32);
+ veor d15, d15, d0
+ veor d12, d12, d1
+ veor d13, d13, d2
+ veor d14, d14, d3
+ vrev64.32 d15, d15
+ vrev64.32 d12, d12
+ vrev64.32 d13, d13
+ vrev64.32 d14, d14
+
+ // c += d;
+ vadd.u64 d10, d10, d15
+ vadd.u64 d11, d11, d12
+ vadd.u64 d8, d8, d13
+ vadd.u64 d9, d9, d14
+
+ // b = ror64(b ^ c, 24);
+ vld1.8 {M_0}, [ROR24_TABLE, :64]
+ veor d5, d5, d10
+ veor d6, d6, d11
+ veor d7, d7, d8
+ veor d4, d4, d9
+ vtbl.8 d5, {d5}, M_0
+ vtbl.8 d6, {d6}, M_0
+ vtbl.8 d7, {d7}, M_0
+ vtbl.8 d4, {d4}, M_0
+
+ // a += b + m[blake2b_sigma[r][2*i + 1]];
+.if \s9 == 0 || \s11 == 0 || \s13 == 0 || \s15 == 0
+ vld1.8 {M_0}, [sp, :64]
+.endif
+ vadd.u64 d0, d0, d5
+ vadd.u64 d1, d1, d6
+ vadd.u64 d2, d2, d7
+ vadd.u64 d3, d3, d4
+ vadd.u64 d0, d0, M_\s9
+ vadd.u64 d1, d1, M_\s11
+ vadd.u64 d2, d2, M_\s13
+ vadd.u64 d3, d3, M_\s15
+
+ // d = ror64(d ^ a, 16);
+ vld1.8 {M_0}, [ROR16_TABLE, :64]
+ veor d15, d15, d0
+ veor d12, d12, d1
+ veor d13, d13, d2
+ veor d14, d14, d3
+ vtbl.8 d12, {d12}, M_0
+ vtbl.8 d13, {d13}, M_0
+ vtbl.8 d14, {d14}, M_0
+ vtbl.8 d15, {d15}, M_0
+
+ // c += d;
+ vadd.u64 d10, d10, d15
+ vadd.u64 d11, d11, d12
+ vadd.u64 d8, d8, d13
+ vadd.u64 d9, d9, d14
+
+ // b = ror64(b ^ c, 63);
+ veor d16, d4, d9
+ veor d17, d5, d10
+ veor d18, d6, d11
+ veor d19, d7, d8
+ vshr.u64 q2, q8, #63
+ vshr.u64 q3, q9, #63
+ vsli.u64 q2, q8, #1
+ vsli.u64 q3, q9, #1
+ // Reloading q8-q9 can be skipped on the final round.
+.if ! \final
+ vld1.8 {q8-q9}, [sp, :256]
+.endif
+.endm
+
+//
+// void blake2b_compress_neon(struct blake2b_state *state,
+// const u8 *block, size_t nblocks, u32 inc);
+//
+// Only the first three fields of struct blake2b_state are used:
+// u64 h[8]; (inout)
+// u64 t[2]; (inout)
+// u64 f[2]; (in)
+//
+ .align 5
+ENTRY(blake2b_compress_neon)
+ push {r4-r10}
+
+ // Allocate a 32-byte stack buffer that is 32-byte aligned.
+ mov ORIG_SP, sp
+ sub ip, sp, #32
+ bic ip, ip, #31
+ mov sp, ip
+
+ adr ROR24_TABLE, .Lror24_table
+ adr ROR16_TABLE, .Lror16_table
+
+ mov ip, STATE
+ vld1.64 {q0-q1}, [ip]! // Load h[0..3]
+ vld1.64 {q2-q3}, [ip]! // Load h[4..7]
+.Lnext_block:
+ adr r10, .Lblake2b_IV
+ vld1.64 {q14-q15}, [ip] // Load t[0..1] and f[0..1]
+ vld1.64 {q4-q5}, [r10]! // Load IV[0..3]
+ vmov r7, r8, d28 // Copy t[0] to (r7, r8)
+ vld1.64 {q6-q7}, [r10] // Load IV[4..7]
+ adds r7, r7, INC // Increment counter
+ bcs .Lslow_inc_ctr
+ vmov.i32 d28[0], r7
+ vst1.64 {d28}, [ip] // Update t[0]
+.Linc_ctr_done:
+
+ // Load the next message block and finish initializing the state matrix
+ // 'v'. Fortunately, there are exactly enough NEON registers to fit the
+ // entire state matrix in q0-q7 and the entire message block in q8-15.
+ //
+ // However, _blake2b_round also needs some extra registers for rotates,
+ // so we have to spill some registers. It's better to spill the message
+ // registers than the state registers, as the message doesn't change.
+ // Therefore we store a copy of the first 32 bytes of the message block
+ // (q8-q9) in an aligned buffer on the stack so that they can be
+ // reloaded when needed. (We could just reload directly from the
+ // message buffer, but it's faster to use aligned loads.)
+ vld1.8 {q8-q9}, [BLOCK]!
+ veor q6, q6, q14 // v[12..13] = IV[4..5] ^ t[0..1]
+ vld1.8 {q10-q11}, [BLOCK]!
+ veor q7, q7, q15 // v[14..15] = IV[6..7] ^ f[0..1]
+ vld1.8 {q12-q13}, [BLOCK]!
+ vst1.8 {q8-q9}, [sp, :256]
+ mov ip, STATE
+ vld1.8 {q14-q15}, [BLOCK]!
+
+ // Execute the rounds. Each round is provided the order in which it
+ // needs to use the message words.
+ _blake2b_round 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
+ _blake2b_round 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3
+ _blake2b_round 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4
+ _blake2b_round 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8
+ _blake2b_round 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13
+ _blake2b_round 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9
+ _blake2b_round 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11
+ _blake2b_round 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10
+ _blake2b_round 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5
+ _blake2b_round 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0
+ _blake2b_round 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
+ _blake2b_round 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 \
+ final=1
+
+ // Fold the final state matrix into the hash chaining value:
+ //
+ // for (i = 0; i < 8; i++)
+ // h[i] ^= v[i] ^ v[i + 8];
+ //
+ vld1.64 {q8-q9}, [ip]! // Load old h[0..3]
+ veor q0, q0, q4 // v[0..1] ^= v[8..9]
+ veor q1, q1, q5 // v[2..3] ^= v[10..11]
+ vld1.64 {q10-q11}, [ip] // Load old h[4..7]
+ veor q2, q2, q6 // v[4..5] ^= v[12..13]
+ veor q3, q3, q7 // v[6..7] ^= v[14..15]
+ veor q0, q0, q8 // v[0..1] ^= h[0..1]
+ veor q1, q1, q9 // v[2..3] ^= h[2..3]
+ mov ip, STATE
+ subs NBLOCKS, NBLOCKS, #1 // nblocks--
+ vst1.64 {q0-q1}, [ip]! // Store new h[0..3]
+ veor q2, q2, q10 // v[4..5] ^= h[4..5]
+ veor q3, q3, q11 // v[6..7] ^= h[6..7]
+ vst1.64 {q2-q3}, [ip]! // Store new h[4..7]
+
+ // Advance to the next block, if there is one.
+ bne .Lnext_block // nblocks != 0?
+
+ mov sp, ORIG_SP
+ pop {r4-r10}
+ mov pc, lr
+
+.Lslow_inc_ctr:
+ // Handle the case where the counter overflowed its low 32 bits, by
+ // carrying the overflow bit into the full 128-bit counter.
+ vmov r9, r10, d29
+ adcs r8, r8, #0
+ adcs r9, r9, #0
+ adc r10, r10, #0
+ vmov d28, r7, r8
+ vmov d29, r9, r10
+ vst1.64 {q14}, [ip] // Update t[0] and t[1]
+ b .Linc_ctr_done
+ENDPROC(blake2b_compress_neon)
diff --git a/arch/arm/crypto/blake2b-neon-glue.c b/arch/arm/crypto/blake2b-neon-glue.c
new file mode 100644
index 000000000000..34d73200e7fa
--- /dev/null
+++ b/arch/arm/crypto/blake2b-neon-glue.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * BLAKE2b digest algorithm, NEON accelerated
+ *
+ * Copyright 2020 Google LLC
+ */
+
+#include <crypto/internal/blake2b.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
+
+#include <linux/module.h>
+#include <linux/sizes.h>
+
+#include <asm/neon.h>
+#include <asm/simd.h>
+
+asmlinkage void blake2b_compress_neon(struct blake2b_state *state,
+ const u8 *block, size_t nblocks, u32 inc);
+
+static void blake2b_compress_arch(struct blake2b_state *state,
+ const u8 *block, size_t nblocks, u32 inc)
+{
+ if (!crypto_simd_usable()) {
+ blake2b_compress_generic(state, block, nblocks, inc);
+ return;
+ }
+
+ do {
+ const size_t blocks = min_t(size_t, nblocks,
+ SZ_4K / BLAKE2B_BLOCK_SIZE);
+
+ kernel_neon_begin();
+ blake2b_compress_neon(state, block, blocks, inc);
+ kernel_neon_end();
+
+ nblocks -= blocks;
+ block += blocks * BLAKE2B_BLOCK_SIZE;
+ } while (nblocks);
+}
+
+static int crypto_blake2b_update_neon(struct shash_desc *desc,
+ const u8 *in, unsigned int inlen)
+{
+ return crypto_blake2b_update(desc, in, inlen, blake2b_compress_arch);
+}
+
+static int crypto_blake2b_final_neon(struct shash_desc *desc, u8 *out)
+{
+ return crypto_blake2b_final(desc, out, blake2b_compress_arch);
+}
+
+#define BLAKE2B_ALG(name, driver_name, digest_size) \
+ { \
+ .base.cra_name = name, \
+ .base.cra_driver_name = driver_name, \
+ .base.cra_priority = 200, \
+ .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, \
+ .base.cra_blocksize = BLAKE2B_BLOCK_SIZE, \
+ .base.cra_ctxsize = sizeof(struct blake2b_tfm_ctx), \
+ .base.cra_module = THIS_MODULE, \
+ .digestsize = digest_size, \
+ .setkey = crypto_blake2b_setkey, \
+ .init = crypto_blake2b_init, \
+ .update = crypto_blake2b_update_neon, \
+ .final = crypto_blake2b_final_neon, \
+ .descsize = sizeof(struct blake2b_state), \
+ }
+
+static struct shash_alg blake2b_neon_algs[] = {
+ BLAKE2B_ALG("blake2b-160", "blake2b-160-neon", BLAKE2B_160_HASH_SIZE),
+ BLAKE2B_ALG("blake2b-256", "blake2b-256-neon", BLAKE2B_256_HASH_SIZE),
+ BLAKE2B_ALG("blake2b-384", "blake2b-384-neon", BLAKE2B_384_HASH_SIZE),
+ BLAKE2B_ALG("blake2b-512", "blake2b-512-neon", BLAKE2B_512_HASH_SIZE),
+};
+
+static int __init blake2b_neon_mod_init(void)
+{
+ if (!(elf_hwcap & HWCAP_NEON))
+ return -ENODEV;
+
+ return crypto_register_shashes(blake2b_neon_algs,
+ ARRAY_SIZE(blake2b_neon_algs));
+}
+
+static void __exit blake2b_neon_mod_exit(void)
+{
+ return crypto_unregister_shashes(blake2b_neon_algs,
+ ARRAY_SIZE(blake2b_neon_algs));
+}
+
+module_init(blake2b_neon_mod_init);
+module_exit(blake2b_neon_mod_exit);
+
+MODULE_DESCRIPTION("BLAKE2b digest algorithm, NEON accelerated");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
+MODULE_ALIAS_CRYPTO("blake2b-160");
+MODULE_ALIAS_CRYPTO("blake2b-160-neon");
+MODULE_ALIAS_CRYPTO("blake2b-256");
+MODULE_ALIAS_CRYPTO("blake2b-256-neon");
+MODULE_ALIAS_CRYPTO("blake2b-384");
+MODULE_ALIAS_CRYPTO("blake2b-384-neon");
+MODULE_ALIAS_CRYPTO("blake2b-512");
+MODULE_ALIAS_CRYPTO("blake2b-512-neon");
diff --git a/arch/arm/crypto/blake2s-core.S b/arch/arm/crypto/blake2s-core.S
new file mode 100644
index 000000000000..bed897e9a181
--- /dev/null
+++ b/arch/arm/crypto/blake2s-core.S
@@ -0,0 +1,285 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * BLAKE2s digest algorithm, ARM scalar implementation
+ *
+ * Copyright 2020 Google LLC
+ *
+ * Author: Eric Biggers <ebiggers@google.com>
+ */
+
+#include <linux/linkage.h>
+
+ // Registers used to hold message words temporarily. There aren't
+ // enough ARM registers to hold the whole message block, so we have to
+ // load the words on-demand.
+ M_0 .req r12
+ M_1 .req r14
+
+// The BLAKE2s initialization vector
+.Lblake2s_IV:
+ .word 0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A
+ .word 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19
+
+.macro __ldrd a, b, src, offset
+#if __LINUX_ARM_ARCH__ >= 6
+ ldrd \a, \b, [\src, #\offset]
+#else
+ ldr \a, [\src, #\offset]
+ ldr \b, [\src, #\offset + 4]
+#endif
+.endm
+
+.macro __strd a, b, dst, offset
+#if __LINUX_ARM_ARCH__ >= 6
+ strd \a, \b, [\dst, #\offset]
+#else
+ str \a, [\dst, #\offset]
+ str \b, [\dst, #\offset + 4]
+#endif
+.endm
+
+// Execute a quarter-round of BLAKE2s by mixing two columns or two diagonals.
+// (a0, b0, c0, d0) and (a1, b1, c1, d1) give the registers containing the two
+// columns/diagonals. s0-s1 are the word offsets to the message words the first
+// column/diagonal needs, and likewise s2-s3 for the second column/diagonal.
+// M_0 and M_1 are free to use, and the message block can be found at sp + 32.
+//
+// Note that to save instructions, the rotations don't happen when the
+// pseudocode says they should, but rather they are delayed until the values are
+// used. See the comment above _blake2s_round().
+.macro _blake2s_quarterround a0, b0, c0, d0, a1, b1, c1, d1, s0, s1, s2, s3
+
+ ldr M_0, [sp, #32 + 4 * \s0]
+ ldr M_1, [sp, #32 + 4 * \s2]
+
+ // a += b + m[blake2s_sigma[r][2*i + 0]];
+ add \a0, \a0, \b0, ror #brot
+ add \a1, \a1, \b1, ror #brot
+ add \a0, \a0, M_0
+ add \a1, \a1, M_1
+
+ // d = ror32(d ^ a, 16);
+ eor \d0, \a0, \d0, ror #drot
+ eor \d1, \a1, \d1, ror #drot
+
+ // c += d;
+ add \c0, \c0, \d0, ror #16
+ add \c1, \c1, \d1, ror #16
+
+ // b = ror32(b ^ c, 12);
+ eor \b0, \c0, \b0, ror #brot
+ eor \b1, \c1, \b1, ror #brot
+
+ ldr M_0, [sp, #32 + 4 * \s1]
+ ldr M_1, [sp, #32 + 4 * \s3]
+
+ // a += b + m[blake2s_sigma[r][2*i + 1]];
+ add \a0, \a0, \b0, ror #12
+ add \a1, \a1, \b1, ror #12
+ add \a0, \a0, M_0
+ add \a1, \a1, M_1
+
+ // d = ror32(d ^ a, 8);
+ eor \d0, \a0, \d0, ror#16
+ eor \d1, \a1, \d1, ror#16
+
+ // c += d;
+ add \c0, \c0, \d0, ror#8
+ add \c1, \c1, \d1, ror#8
+
+ // b = ror32(b ^ c, 7);
+ eor \b0, \c0, \b0, ror#12
+ eor \b1, \c1, \b1, ror#12
+.endm
+
+// Execute one round of BLAKE2s by updating the state matrix v[0..15]. v[0..9]
+// are in r0..r9. The stack pointer points to 8 bytes of scratch space for
+// spilling v[8..9], then to v[9..15], then to the message block. r10-r12 and
+// r14 are free to use. The macro arguments s0-s15 give the order in which the
+// message words are used in this round.
+//
+// All rotates are performed using the implicit rotate operand accepted by the
+// 'add' and 'eor' instructions. This is faster than using explicit rotate
+// instructions. To make this work, we allow the values in the second and last
+// rows of the BLAKE2s state matrix (rows 'b' and 'd') to temporarily have the
+// wrong rotation amount. The rotation amount is then fixed up just in time
+// when the values are used. 'brot' is the number of bits the values in row 'b'
+// need to be rotated right to arrive at the correct values, and 'drot'
+// similarly for row 'd'. (brot, drot) start out as (0, 0) but we make it such
+// that they end up as (7, 8) after every round.
+.macro _blake2s_round s0, s1, s2, s3, s4, s5, s6, s7, \
+ s8, s9, s10, s11, s12, s13, s14, s15
+
+ // Mix first two columns:
+ // (v[0], v[4], v[8], v[12]) and (v[1], v[5], v[9], v[13]).
+ __ldrd r10, r11, sp, 16 // load v[12] and v[13]
+ _blake2s_quarterround r0, r4, r8, r10, r1, r5, r9, r11, \
+ \s0, \s1, \s2, \s3
+ __strd r8, r9, sp, 0
+ __strd r10, r11, sp, 16
+
+ // Mix second two columns:
+ // (v[2], v[6], v[10], v[14]) and (v[3], v[7], v[11], v[15]).
+ __ldrd r8, r9, sp, 8 // load v[10] and v[11]
+ __ldrd r10, r11, sp, 24 // load v[14] and v[15]
+ _blake2s_quarterround r2, r6, r8, r10, r3, r7, r9, r11, \
+ \s4, \s5, \s6, \s7
+ str r10, [sp, #24] // store v[14]
+ // v[10], v[11], and v[15] are used below, so no need to store them yet.
+
+ .set brot, 7
+ .set drot, 8
+
+ // Mix first two diagonals:
+ // (v[0], v[5], v[10], v[15]) and (v[1], v[6], v[11], v[12]).
+ ldr r10, [sp, #16] // load v[12]
+ _blake2s_quarterround r0, r5, r8, r11, r1, r6, r9, r10, \
+ \s8, \s9, \s10, \s11
+ __strd r8, r9, sp, 8
+ str r11, [sp, #28]
+ str r10, [sp, #16]
+
+ // Mix second two diagonals:
+ // (v[2], v[7], v[8], v[13]) and (v[3], v[4], v[9], v[14]).
+ __ldrd r8, r9, sp, 0 // load v[8] and v[9]
+ __ldrd r10, r11, sp, 20 // load v[13] and v[14]
+ _blake2s_quarterround r2, r7, r8, r10, r3, r4, r9, r11, \
+ \s12, \s13, \s14, \s15
+ __strd r10, r11, sp, 20
+.endm
+
+//
+// void blake2s_compress_arch(struct blake2s_state *state,
+// const u8 *block, size_t nblocks, u32 inc);
+//
+// Only the first three fields of struct blake2s_state are used:
+// u32 h[8]; (inout)
+// u32 t[2]; (inout)
+// u32 f[2]; (in)
+//
+ .align 5
+ENTRY(blake2s_compress_arch)
+ push {r0-r2,r4-r11,lr} // keep this an even number
+
+.Lnext_block:
+ // r0 is 'state'
+ // r1 is 'block'
+ // r3 is 'inc'
+
+ // Load and increment the counter t[0..1].
+ __ldrd r10, r11, r0, 32
+ adds r10, r10, r3
+ adc r11, r11, #0
+ __strd r10, r11, r0, 32
+
+ // _blake2s_round is very short on registers, so copy the message block
+ // to the stack to save a register during the rounds. This also has the
+ // advantage that misalignment only needs to be dealt with in one place.
+ sub sp, sp, #64
+ mov r12, sp
+ tst r1, #3
+ bne .Lcopy_block_misaligned
+ ldmia r1!, {r2-r9}
+ stmia r12!, {r2-r9}
+ ldmia r1!, {r2-r9}
+ stmia r12, {r2-r9}
+.Lcopy_block_done:
+ str r1, [sp, #68] // Update message pointer
+
+ // Calculate v[8..15]. Push v[9..15] onto the stack, and leave space
+ // for spilling v[8..9]. Leave v[8..9] in r8-r9.
+ mov r14, r0 // r14 = state
+ adr r12, .Lblake2s_IV
+ ldmia r12!, {r8-r9} // load IV[0..1]
+ __ldrd r0, r1, r14, 40 // load f[0..1]
+ ldm r12, {r2-r7} // load IV[3..7]
+ eor r4, r4, r10 // v[12] = IV[4] ^ t[0]
+ eor r5, r5, r11 // v[13] = IV[5] ^ t[1]
+ eor r6, r6, r0 // v[14] = IV[6] ^ f[0]
+ eor r7, r7, r1 // v[15] = IV[7] ^ f[1]
+ push {r2-r7} // push v[9..15]
+ sub sp, sp, #8 // leave space for v[8..9]
+
+ // Load h[0..7] == v[0..7].
+ ldm r14, {r0-r7}
+
+ // Execute the rounds. Each round is provided the order in which it
+ // needs to use the message words.
+ .set brot, 0
+ .set drot, 0
+ _blake2s_round 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
+ _blake2s_round 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3
+ _blake2s_round 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4
+ _blake2s_round 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8
+ _blake2s_round 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13
+ _blake2s_round 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9
+ _blake2s_round 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11
+ _blake2s_round 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10
+ _blake2s_round 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5
+ _blake2s_round 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0
+
+ // Fold the final state matrix into the hash chaining value:
+ //
+ // for (i = 0; i < 8; i++)
+ // h[i] ^= v[i] ^ v[i + 8];
+ //
+ ldr r14, [sp, #96] // r14 = &h[0]
+ add sp, sp, #8 // v[8..9] are already loaded.
+ pop {r10-r11} // load v[10..11]
+ eor r0, r0, r8
+ eor r1, r1, r9
+ eor r2, r2, r10
+ eor r3, r3, r11
+ ldm r14, {r8-r11} // load h[0..3]
+ eor r0, r0, r8
+ eor r1, r1, r9
+ eor r2, r2, r10
+ eor r3, r3, r11
+ stmia r14!, {r0-r3} // store new h[0..3]
+ ldm r14, {r0-r3} // load old h[4..7]
+ pop {r8-r11} // load v[12..15]
+ eor r0, r0, r4, ror #brot
+ eor r1, r1, r5, ror #brot
+ eor r2, r2, r6, ror #brot
+ eor r3, r3, r7, ror #brot
+ eor r0, r0, r8, ror #drot
+ eor r1, r1, r9, ror #drot
+ eor r2, r2, r10, ror #drot
+ eor r3, r3, r11, ror #drot
+ add sp, sp, #64 // skip copy of message block
+ stm r14, {r0-r3} // store new h[4..7]
+
+ // Advance to the next block, if there is one. Note that if there are
+ // multiple blocks, then 'inc' (the counter increment amount) must be
+ // 64. So we can simply set it to 64 without re-loading it.
+ ldm sp, {r0, r1, r2} // load (state, block, nblocks)
+ mov r3, #64 // set 'inc'
+ subs r2, r2, #1 // nblocks--
+ str r2, [sp, #8]
+ bne .Lnext_block // nblocks != 0?
+
+ pop {r0-r2,r4-r11,pc}
+
+ // The next message block (pointed to by r1) isn't 4-byte aligned, so it
+ // can't be loaded using ldmia. Copy it to the stack buffer (pointed to
+ // by r12) using an alternative method. r2-r9 are free to use.
+.Lcopy_block_misaligned:
+ mov r2, #64
+1:
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ ldr r3, [r1], #4
+#else
+ ldrb r3, [r1, #0]
+ ldrb r4, [r1, #1]
+ ldrb r5, [r1, #2]
+ ldrb r6, [r1, #3]
+ add r1, r1, #4
+ orr r3, r3, r4, lsl #8
+ orr r3, r3, r5, lsl #16
+ orr r3, r3, r6, lsl #24
+#endif
+ subs r2, r2, #4
+ str r3, [r12], #4
+ bne 1b
+ b .Lcopy_block_done
+ENDPROC(blake2s_compress_arch)
diff --git a/arch/arm/crypto/blake2s-glue.c b/arch/arm/crypto/blake2s-glue.c
new file mode 100644
index 000000000000..f2cc1e5fc9ec
--- /dev/null
+++ b/arch/arm/crypto/blake2s-glue.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * BLAKE2s digest algorithm, ARM scalar implementation
+ *
+ * Copyright 2020 Google LLC
+ */
+
+#include <crypto/internal/blake2s.h>
+#include <crypto/internal/hash.h>
+
+#include <linux/module.h>
+
+/* defined in blake2s-core.S */
+EXPORT_SYMBOL(blake2s_compress_arch);
+
+static int crypto_blake2s_update_arm(struct shash_desc *desc,
+ const u8 *in, unsigned int inlen)
+{
+ return crypto_blake2s_update(desc, in, inlen, blake2s_compress_arch);
+}
+
+static int crypto_blake2s_final_arm(struct shash_desc *desc, u8 *out)
+{
+ return crypto_blake2s_final(desc, out, blake2s_compress_arch);
+}
+
+#define BLAKE2S_ALG(name, driver_name, digest_size) \
+ { \
+ .base.cra_name = name, \
+ .base.cra_driver_name = driver_name, \
+ .base.cra_priority = 200, \
+ .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, \
+ .base.cra_blocksize = BLAKE2S_BLOCK_SIZE, \
+ .base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx), \
+ .base.cra_module = THIS_MODULE, \
+ .digestsize = digest_size, \
+ .setkey = crypto_blake2s_setkey, \
+ .init = crypto_blake2s_init, \
+ .update = crypto_blake2s_update_arm, \
+ .final = crypto_blake2s_final_arm, \
+ .descsize = sizeof(struct blake2s_state), \
+ }
+
+static struct shash_alg blake2s_arm_algs[] = {
+ BLAKE2S_ALG("blake2s-128", "blake2s-128-arm", BLAKE2S_128_HASH_SIZE),
+ BLAKE2S_ALG("blake2s-160", "blake2s-160-arm", BLAKE2S_160_HASH_SIZE),
+ BLAKE2S_ALG("blake2s-224", "blake2s-224-arm", BLAKE2S_224_HASH_SIZE),
+ BLAKE2S_ALG("blake2s-256", "blake2s-256-arm", BLAKE2S_256_HASH_SIZE),
+};
+
+static int __init blake2s_arm_mod_init(void)
+{
+ return IS_REACHABLE(CONFIG_CRYPTO_HASH) ?
+ crypto_register_shashes(blake2s_arm_algs,
+ ARRAY_SIZE(blake2s_arm_algs)) : 0;
+}
+
+static void __exit blake2s_arm_mod_exit(void)
+{
+ if (IS_REACHABLE(CONFIG_CRYPTO_HASH))
+ crypto_unregister_shashes(blake2s_arm_algs,
+ ARRAY_SIZE(blake2s_arm_algs));
+}
+
+module_init(blake2s_arm_mod_init);
+module_exit(blake2s_arm_mod_exit);
+
+MODULE_DESCRIPTION("BLAKE2s digest algorithm, ARM scalar implementation");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
+MODULE_ALIAS_CRYPTO("blake2s-128");
+MODULE_ALIAS_CRYPTO("blake2s-128-arm");
+MODULE_ALIAS_CRYPTO("blake2s-160");
+MODULE_ALIAS_CRYPTO("blake2s-160-arm");
+MODULE_ALIAS_CRYPTO("blake2s-224");
+MODULE_ALIAS_CRYPTO("blake2s-224-arm");
+MODULE_ALIAS_CRYPTO("blake2s-256");
+MODULE_ALIAS_CRYPTO("blake2s-256-arm");
diff --git a/arch/arm/include/asm/archrandom.h b/arch/arm/include/asm/archrandom.h
new file mode 100644
index 000000000000..a8e84ca5c2ee
--- /dev/null
+++ b/arch/arm/include/asm/archrandom.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_ARCHRANDOM_H
+#define _ASM_ARCHRANDOM_H
+
+static inline bool __init smccc_probe_trng(void)
+{
+ return false;
+}
+
+#endif /* _ASM_ARCHRANDOM_H */
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 6ed30421f697..e2b1fd558bf3 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -578,4 +578,21 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
__adldst_l str, \src, \sym, \tmp, \cond
.endm
+ /*
+ * rev_l - byte-swap a 32-bit value
+ *
+ * @val: source/destination register
+ * @tmp: scratch register
+ */
+ .macro rev_l, val:req, tmp:req
+ .if __LINUX_ARM_ARCH__ < 6
+ eor \tmp, \val, \val, ror #16
+ bic \tmp, \tmp, #0x00ff0000
+ mov \val, \val, ror #8
+ eor \val, \val, \tmp, lsr #8
+ .else
+ rev \val, \val
+ .endif
+ .endm
+
#endif /* __ASM_ASSEMBLER_H__ */
diff --git a/arch/arm/include/asm/hardware/locomo.h b/arch/arm/include/asm/hardware/locomo.h
index f8712e3c29cf..246a3de25931 100644
--- a/arch/arm/include/asm/hardware/locomo.h
+++ b/arch/arm/include/asm/hardware/locomo.h
@@ -188,7 +188,7 @@ struct locomo_driver {
struct device_driver drv;
unsigned int devid;
int (*probe)(struct locomo_dev *);
- int (*remove)(struct locomo_dev *);
+ void (*remove)(struct locomo_dev *);
};
#define LOCOMO_DRV(_d) container_of((_d), struct locomo_driver, drv)
diff --git a/arch/arm/include/asm/hardware/sa1111.h b/arch/arm/include/asm/hardware/sa1111.h
index d134b9a5ff94..2e70db6f22ea 100644
--- a/arch/arm/include/asm/hardware/sa1111.h
+++ b/arch/arm/include/asm/hardware/sa1111.h
@@ -403,7 +403,7 @@ struct sa1111_driver {
struct device_driver drv;
unsigned int devid;
int (*probe)(struct sa1111_dev *);
- int (*remove)(struct sa1111_dev *);
+ void (*remove)(struct sa1111_dev *);
};
#define SA1111_DRV(_d) container_of((_d), struct sa1111_driver, drv)
diff --git a/arch/arm/include/asm/kexec-internal.h b/arch/arm/include/asm/kexec-internal.h
new file mode 100644
index 000000000000..ecc2322db7aa
--- /dev/null
+++ b/arch/arm/include/asm/kexec-internal.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ARM_KEXEC_INTERNAL_H
+#define _ARM_KEXEC_INTERNAL_H
+
+struct kexec_relocate_data {
+ unsigned long kexec_start_address;
+ unsigned long kexec_indirection_page;
+ unsigned long kexec_mach_type;
+ unsigned long kexec_r2;
+};
+
+#endif
diff --git a/arch/arm/include/debug/brcmstb.S b/arch/arm/include/debug/brcmstb.S
index 0ff32ffc610c..f684e3a815f6 100644
--- a/arch/arm/include/debug/brcmstb.S
+++ b/arch/arm/include/debug/brcmstb.S
@@ -25,6 +25,7 @@
#define SUN_TOP_CTRL_BASE_V7 REG_PHYS_ADDR_V7(0x404000)
#define UARTA_3390 REG_PHYS_ADDR(0x40a900)
+#define UARTA_72116 UARTA_7255
#define UARTA_7250 REG_PHYS_ADDR(0x40b400)
#define UARTA_7255 REG_PHYS_ADDR(0x40c000)
#define UARTA_7260 UARTA_7255
@@ -85,20 +86,21 @@ ARM_BE8( rev \rv, \rv )
/* Chip specific detection starts here */
20: checkuart(\rp, \rv, 0x33900000, 3390)
-21: checkuart(\rp, \rv, 0x72160000, 7216)
-22: checkuart(\rp, \rv, 0x07216400, 72164)
-23: checkuart(\rp, \rv, 0x07216500, 72165)
-24: checkuart(\rp, \rv, 0x72500000, 7250)
-25: checkuart(\rp, \rv, 0x72550000, 7255)
-26: checkuart(\rp, \rv, 0x72600000, 7260)
-27: checkuart(\rp, \rv, 0x72680000, 7268)
-28: checkuart(\rp, \rv, 0x72710000, 7271)
-29: checkuart(\rp, \rv, 0x72780000, 7278)
-30: checkuart(\rp, \rv, 0x73640000, 7364)
-31: checkuart(\rp, \rv, 0x73660000, 7366)
-32: checkuart(\rp, \rv, 0x07437100, 74371)
-33: checkuart(\rp, \rv, 0x74390000, 7439)
-34: checkuart(\rp, \rv, 0x74450000, 7445)
+21: checkuart(\rp, \rv, 0x07211600, 72116)
+22: checkuart(\rp, \rv, 0x72160000, 7216)
+23: checkuart(\rp, \rv, 0x07216400, 72164)
+24: checkuart(\rp, \rv, 0x07216500, 72165)
+25: checkuart(\rp, \rv, 0x72500000, 7250)
+26: checkuart(\rp, \rv, 0x72550000, 7255)
+27: checkuart(\rp, \rv, 0x72600000, 7260)
+28: checkuart(\rp, \rv, 0x72680000, 7268)
+29: checkuart(\rp, \rv, 0x72710000, 7271)
+30: checkuart(\rp, \rv, 0x72780000, 7278)
+31: checkuart(\rp, \rv, 0x73640000, 7364)
+32: checkuart(\rp, \rv, 0x73660000, 7366)
+33: checkuart(\rp, \rv, 0x07437100, 74371)
+34: checkuart(\rp, \rv, 0x74390000, 7439)
+35: checkuart(\rp, \rv, 0x74450000, 7445)
/* No valid UART found */
90: mov \rp, #0
diff --git a/arch/arm/include/debug/efm32.S b/arch/arm/include/debug/efm32.S
deleted file mode 100644
index b0083d6e31e8..000000000000
--- a/arch/arm/include/debug/efm32.S
+++ /dev/null
@@ -1,45 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2013 Pengutronix
- * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
- */
-
-#define UARTn_CMD 0x000c
-#define UARTn_CMD_TXEN 0x0004
-
-#define UARTn_STATUS 0x0010
-#define UARTn_STATUS_TXC 0x0020
-#define UARTn_STATUS_TXBL 0x0040
-
-#define UARTn_TXDATA 0x0034
-
- .macro addruart, rx, tmp, tmp2
- ldr \rx, =(CONFIG_DEBUG_UART_PHYS)
-
- /*
- * enable TX. The driver might disable it to save energy. We
- * don't care about disabling at the end as during debug power
- * consumption isn't that important.
- */
- ldr \tmp, =(UARTn_CMD_TXEN)
- str \tmp, [\rx, #UARTn_CMD]
- .endm
-
- .macro senduart,rd,rx
- strb \rd, [\rx, #UARTn_TXDATA]
- .endm
-
- .macro waituartcts,rd,rx
- .endm
-
- .macro waituarttxrdy,rd,rx
-1001: ldr \rd, [\rx, #UARTn_STATUS]
- tst \rd, #UARTn_STATUS_TXBL
- beq 1001b
- .endm
-
- .macro busyuart,rd,rx
-1001: ldr \rd, [\rx, UARTn_STATUS]
- tst \rd, #UARTn_STATUS_TXC
- bne 1001b
- .endm
diff --git a/arch/arm/include/debug/sirf.S b/arch/arm/include/debug/sirf.S
deleted file mode 100644
index 3612c7b9cbe7..000000000000
--- a/arch/arm/include/debug/sirf.S
+++ /dev/null
@@ -1,40 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * arch/arm/mach-prima2/include/mach/debug-macro.S
- *
- * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
- */
-
-#define SIRF_LLUART_TXFIFO_STATUS 0x0114
-#define SIRF_LLUART_TXFIFO_DATA 0x0118
-
-#define SIRF_LLUART_TXFIFO_FULL (1 << 5)
-
-#ifdef CONFIG_DEBUG_SIRFATLAS7_UART0
-#define SIRF_LLUART_TXFIFO_EMPTY (1 << 8)
-#else
-#define SIRF_LLUART_TXFIFO_EMPTY (1 << 6)
-#endif
-
-
- .macro addruart, rp, rv, tmp
- ldr \rp, =CONFIG_DEBUG_UART_PHYS @ physical
- ldr \rv, =CONFIG_DEBUG_UART_VIRT @ virtual
- .endm
-
- .macro senduart,rd,rx
- str \rd, [\rx, #SIRF_LLUART_TXFIFO_DATA]
- .endm
-
- .macro busyuart,rd,rx
- .endm
-
- .macro waituartcts,rd,rx
- .endm
-
- .macro waituarttxrdy,rd,rx
-1001: ldr \rd, [\rx, #SIRF_LLUART_TXFIFO_STATUS]
- tst \rd, #SIRF_LLUART_TXFIFO_EMPTY
- beq 1001b
- .endm
-
diff --git a/arch/arm/include/debug/sti.S b/arch/arm/include/debug/sti.S
index 72d052511890..dc796ac2ac57 100644
--- a/arch/arm/include/debug/sti.S
+++ b/arch/arm/include/debug/sti.S
@@ -6,28 +6,6 @@
* Copyright (C) 2013 STMicroelectronics (R&D) Limited.
*/
-#define STIH41X_COMMS_BASE 0xfed00000
-#define STIH41X_ASC2_BASE (STIH41X_COMMS_BASE+0x32000)
-
-#define STIH41X_SBC_LPM_BASE 0xfe400000
-#define STIH41X_SBC_COMMS_BASE (STIH41X_SBC_LPM_BASE + 0x100000)
-#define STIH41X_SBC_ASC1_BASE (STIH41X_SBC_COMMS_BASE + 0x31000)
-
-
-#define VIRT_ADDRESS(x) (x - 0x1000000)
-
-#if IS_ENABLED(CONFIG_STIH41X_DEBUG_ASC2)
-#define DEBUG_LL_UART_BASE STIH41X_ASC2_BASE
-#endif
-
-#if IS_ENABLED(CONFIG_STIH41X_DEBUG_SBC_ASC1)
-#define DEBUG_LL_UART_BASE STIH41X_SBC_ASC1_BASE
-#endif
-
-#ifndef DEBUG_LL_UART_BASE
-#error "DEBUG UART is not Configured"
-#endif
-
#define ASC_TX_BUF_OFF 0x04
#define ASC_CTRL_OFF 0x0c
#define ASC_STA_OFF 0x14
@@ -37,8 +15,8 @@
.macro addruart, rp, rv, tmp
- ldr \rp, =DEBUG_LL_UART_BASE @ physical base
- ldr \rv, =VIRT_ADDRESS(DEBUG_LL_UART_BASE) @ virt base
+ ldr \rp, =CONFIG_DEBUG_UART_PHYS @ physical base
+ ldr \rv, =CONFIG_DEBUG_UART_VIRT @ virt base
.endm
.macro senduart,rd,rx
diff --git a/arch/arm/include/debug/tegra.S b/arch/arm/include/debug/tegra.S
index 98daa7f48314..7454480d084b 100644
--- a/arch/arm/include/debug/tegra.S
+++ b/arch/arm/include/debug/tegra.S
@@ -149,7 +149,34 @@
.align
99: .word .
+#if defined(ZIMAGE)
+ .word . + 4
+/*
+ * Storage for the state maintained by the macro.
+ *
+ * In the kernel proper, this data is located in arch/arm/mach-tegra/tegra.c.
+ * That's because this header is included from multiple files, and we only
+ * want a single copy of the data. In particular, the UART probing code above
+ * assumes it's running using physical addresses. This is true when this file
+ * is included from head.o, but not when included from debug.o. So we need
+ * to share the probe results between the two copies, rather than having
+ * to re-run the probing again later.
+ *
+ * In the decompressor, we put the storage right here, since common.c
+ * isn't included in the decompressor build. This storage data gets put in
+ * .text even though it's really data, since .data is discarded from the
+ * decompressor. Luckily, .text is writeable in the decompressor, unless
+ * CONFIG_ZBOOT_ROM. That dependency is handled in arch/arm/Kconfig.debug.
+ */
+ /* Debug UART initialization required */
+ .word 1
+ /* Debug UART physical address */
+ .word 0
+ /* Debug UART virtual address */
+ .word 0
+#else
.word tegra_uart_config
+#endif
.ltorg
/* Load previously selected UART address */
@@ -189,30 +216,3 @@
.macro waituarttxrdy,rd,rx
.endm
-
-/*
- * Storage for the state maintained by the macros above.
- *
- * In the kernel proper, this data is located in arch/arm/mach-tegra/tegra.c.
- * That's because this header is included from multiple files, and we only
- * want a single copy of the data. In particular, the UART probing code above
- * assumes it's running using physical addresses. This is true when this file
- * is included from head.o, but not when included from debug.o. So we need
- * to share the probe results between the two copies, rather than having
- * to re-run the probing again later.
- *
- * In the decompressor, we put the symbol/storage right here, since common.c
- * isn't included in the decompressor build. This symbol gets put in .text
- * even though it's really data, since .data is discarded from the
- * decompressor. Luckily, .text is writeable in the decompressor, unless
- * CONFIG_ZBOOT_ROM. That dependency is handled in arch/arm/Kconfig.debug.
- */
-#if defined(ZIMAGE)
-tegra_uart_config:
- /* Debug UART initialization required */
- .word 1
- /* Debug UART physical address */
- .word 0
- /* Debug UART virtual address */
- .word 0
-#endif
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index a1570c8bab25..be8050b0c3df 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -12,6 +12,7 @@
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <asm/cacheflush.h>
+#include <asm/kexec-internal.h>
#include <asm/glue-df.h>
#include <asm/glue-pf.h>
#include <asm/mach/arch.h>
@@ -170,5 +171,9 @@ int main(void)
DEFINE(MPU_RGN_PRBAR, offsetof(struct mpu_rgn, prbar));
DEFINE(MPU_RGN_PRLAR, offsetof(struct mpu_rgn, prlar));
#endif
+ DEFINE(KEXEC_START_ADDR, offsetof(struct kexec_relocate_data, kexec_start_address));
+ DEFINE(KEXEC_INDIR_PAGE, offsetof(struct kexec_relocate_data, kexec_indirection_page));
+ DEFINE(KEXEC_MACH_TYPE, offsetof(struct kexec_relocate_data, kexec_mach_type));
+ DEFINE(KEXEC_R2, offsetof(struct kexec_relocate_data, kexec_r2));
return 0;
}
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
index 5d84ad333f05..2b09dad7935e 100644
--- a/arch/arm/kernel/machine_kexec.c
+++ b/arch/arm/kernel/machine_kexec.c
@@ -13,6 +13,7 @@
#include <linux/of_fdt.h>
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
+#include <asm/kexec-internal.h>
#include <asm/fncpy.h>
#include <asm/mach-types.h>
#include <asm/smp_plat.h>
@@ -22,11 +23,6 @@
extern void relocate_new_kernel(void);
extern const unsigned int relocate_new_kernel_size;
-extern unsigned long kexec_start_address;
-extern unsigned long kexec_indirection_page;
-extern unsigned long kexec_mach_type;
-extern unsigned long kexec_boot_atags;
-
static atomic_t waiting_for_crash_ipi;
/*
@@ -159,6 +155,7 @@ void (*kexec_reinit)(void);
void machine_kexec(struct kimage *image)
{
unsigned long page_list, reboot_entry_phys;
+ struct kexec_relocate_data *data;
void (*reboot_entry)(void);
void *reboot_code_buffer;
@@ -174,18 +171,17 @@ void machine_kexec(struct kimage *image)
reboot_code_buffer = page_address(image->control_code_page);
- /* Prepare parameters for reboot_code_buffer*/
- set_kernel_text_rw();
- kexec_start_address = image->start;
- kexec_indirection_page = page_list;
- kexec_mach_type = machine_arch_type;
- kexec_boot_atags = image->arch.kernel_r2;
-
/* copy our kernel relocation code to the control code page */
reboot_entry = fncpy(reboot_code_buffer,
&relocate_new_kernel,
relocate_new_kernel_size);
+ data = reboot_code_buffer + relocate_new_kernel_size;
+ data->kexec_start_address = image->start;
+ data->kexec_indirection_page = page_list;
+ data->kexec_mach_type = machine_arch_type;
+ data->kexec_r2 = image->arch.kernel_r2;
+
/* get the identity mapping physical address for the reboot code */
reboot_entry_phys = virt_to_idmap(reboot_entry);
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index ee3aee69e444..5199a2bb4111 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -243,7 +243,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
thread->cpu_domain = get_domain();
#endif
- if (likely(!(p->flags & PF_KTHREAD))) {
+ if (likely(!(p->flags & (PF_KTHREAD | PF_IO_WORKER)))) {
*childregs = *current_pt_regs();
childregs->ARM_r0 = 0;
if (stack_start)
diff --git a/arch/arm/kernel/relocate_kernel.S b/arch/arm/kernel/relocate_kernel.S
index 72a08786e16e..218d524360fc 100644
--- a/arch/arm/kernel/relocate_kernel.S
+++ b/arch/arm/kernel/relocate_kernel.S
@@ -5,14 +5,16 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
+#include <asm/asm-offsets.h>
#include <asm/kexec.h>
.align 3 /* not needed for this code, but keeps fncpy() happy */
ENTRY(relocate_new_kernel)
- ldr r0,kexec_indirection_page
- ldr r1,kexec_start_address
+ adr r7, relocate_new_kernel_end
+ ldr r0, [r7, #KEXEC_INDIR_PAGE]
+ ldr r1, [r7, #KEXEC_START_ADDR]
/*
* If there is no indirection page (we are doing crashdumps)
@@ -57,34 +59,16 @@ ENTRY(relocate_new_kernel)
2:
/* Jump to relocated kernel */
- mov lr,r1
- mov r0,#0
- ldr r1,kexec_mach_type
- ldr r2,kexec_boot_atags
- ARM( ret lr )
- THUMB( bx lr )
-
- .align
-
- .globl kexec_start_address
-kexec_start_address:
- .long 0x0
-
- .globl kexec_indirection_page
-kexec_indirection_page:
- .long 0x0
-
- .globl kexec_mach_type
-kexec_mach_type:
- .long 0x0
-
- /* phy addr of the atags for the new kernel */
- .globl kexec_boot_atags
-kexec_boot_atags:
- .long 0x0
+ mov lr, r1
+ mov r0, #0
+ ldr r1, [r7, #KEXEC_MACH_TYPE]
+ ldr r2, [r7, #KEXEC_R2]
+ ARM( ret lr )
+ THUMB( bx lr )
ENDPROC(relocate_new_kernel)
+ .align 3
relocate_new_kernel_end:
.globl relocate_new_kernel_size
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 9d2e916121be..a3a38d0a4c85 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -693,18 +693,20 @@ struct page *get_signal_page(void)
addr = page_address(page);
+ /* Poison the entire page */
+ memset32(addr, __opcode_to_mem_arm(0xe7fddef1),
+ PAGE_SIZE / sizeof(u32));
+
/* Give the signal return code some randomness */
offset = 0x200 + (get_random_int() & 0x7fc);
signal_return_offset = offset;
- /*
- * Copy signal return handlers into the vector page, and
- * set sigreturn to be a pointer to these.
- */
+ /* Copy signal return handlers into the page */
memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
- ptr = (unsigned long)addr + offset;
- flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
+ /* Flush out all instructions in this page */
+ ptr = (unsigned long)addr;
+ flush_icache_range(ptr, ptr + PAGE_SIZE);
return page;
}
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 5c48eb4fd0e5..74679240a9d8 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -540,12 +540,9 @@ void show_ipi_list(struct seq_file *p, int prec)
unsigned int cpu, i;
for (i = 0; i < NR_IPI; i++) {
- unsigned int irq;
-
if (!ipi_desc[i])
continue;
- irq = irq_desc_get_irq(ipi_desc[i]);
seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
for_each_online_cpu(cpu)
diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c
index 0203e545bbc8..075a2e0ed2c1 100644
--- a/arch/arm/kernel/sys_oabi-compat.c
+++ b/arch/arm/kernel/sys_oabi-compat.c
@@ -248,6 +248,7 @@ struct oabi_epoll_event {
__u64 data;
} __attribute__ ((packed,aligned(4)));
+#ifdef CONFIG_EPOLL
asmlinkage long sys_oabi_epoll_ctl(int epfd, int op, int fd,
struct oabi_epoll_event __user *event)
{
@@ -298,6 +299,20 @@ asmlinkage long sys_oabi_epoll_wait(int epfd,
kfree(kbuf);
return err ? -EFAULT : ret;
}
+#else
+asmlinkage long sys_oabi_epoll_ctl(int epfd, int op, int fd,
+ struct oabi_epoll_event __user *event)
+{
+ return -EINVAL;
+}
+
+asmlinkage long sys_oabi_epoll_wait(int epfd,
+ struct oabi_epoll_event __user *events,
+ int maxevents, int timeout)
+{
+ return -EINVAL;
+}
+#endif
struct oabi_sembuf {
unsigned short sem_num;
diff --git a/arch/arm/mach-at91/pm_suspend.S b/arch/arm/mach-at91/pm_suspend.S
index 0184de05c1be..b683c2caa40b 100644
--- a/arch/arm/mach-at91/pm_suspend.S
+++ b/arch/arm/mach-at91/pm_suspend.S
@@ -442,7 +442,7 @@ ENDPROC(at91_backup_mode)
str tmp1, [pmc, #AT91_PMC_PLL_UPDT]
/* step 2. */
- ldr tmp1, =#AT91_PMC_PLL_ACR_DEFAULT_PLLA
+ ldr tmp1, =AT91_PMC_PLL_ACR_DEFAULT_PLLA
str tmp1, [pmc, #AT91_PMC_PLL_ACR]
/* step 3. */
diff --git a/arch/arm/mach-bcm/Kconfig b/arch/arm/mach-bcm/Kconfig
index 9b594ae98153..2890e61b2b46 100644
--- a/arch/arm/mach-bcm/Kconfig
+++ b/arch/arm/mach-bcm/Kconfig
@@ -161,6 +161,7 @@ config ARCH_BCM2835
select ARM_TIMER_SP804
select HAVE_ARM_ARCH_TIMER if ARCH_MULTI_V7
select BCM2835_TIMER
+ select BRCMSTB_L2_IRQ
select PINCTRL
select PINCTRL_BCM2835
select MFD_CORE
diff --git a/arch/arm/mach-efm32/Makefile.boot b/arch/arm/mach-efm32/Makefile.boot
deleted file mode 100644
index cec195d4fcba..000000000000
--- a/arch/arm/mach-efm32/Makefile.boot
+++ /dev/null
@@ -1,4 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-# Empty file waiting for deletion once Makefile.boot isn't needed any more.
-# Patch waits for application at
-# http://www.arm.linux.org.uk/developer/patches/viewpatch.php?id=7889/1 .
diff --git a/arch/arm/mach-efm32/dtmachine.c b/arch/arm/mach-efm32/dtmachine.c
deleted file mode 100644
index e9364b843641..000000000000
--- a/arch/arm/mach-efm32/dtmachine.c
+++ /dev/null
@@ -1,16 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/kernel.h>
-
-#include <asm/v7m.h>
-
-#include <asm/mach/arch.h>
-
-static const char *const efm32gg_compat[] __initconst = {
- "efm32,dk3750",
- NULL
-};
-
-DT_MACHINE_START(EFM32DT, "EFM32 (Device Tree Support)")
- .dt_compat = efm32gg_compat,
- .restart = armv7m_restart,
-MACHINE_END
diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
index 56d272967fc0..5a48abac6af4 100644
--- a/arch/arm/mach-exynos/Kconfig
+++ b/arch/arm/mach-exynos/Kconfig
@@ -13,7 +13,6 @@ menuconfig ARCH_EXYNOS
select ARM_GIC
select EXYNOS_IRQ_COMBINER
select COMMON_CLK_SAMSUNG
- select EXYNOS_ASV
select EXYNOS_CHIPID
select EXYNOS_THERMAL
select EXYNOS_PMU
diff --git a/arch/arm/mach-footbridge/dc21285.c b/arch/arm/mach-footbridge/dc21285.c
index 416462e3f5d6..f9713dc561cf 100644
--- a/arch/arm/mach-footbridge/dc21285.c
+++ b/arch/arm/mach-footbridge/dc21285.c
@@ -65,15 +65,15 @@ dc21285_read_config(struct pci_bus *bus, unsigned int devfn, int where,
if (addr)
switch (size) {
case 1:
- asm("ldrb %0, [%1, %2]"
+ asm volatile("ldrb %0, [%1, %2]"
: "=r" (v) : "r" (addr), "r" (where) : "cc");
break;
case 2:
- asm("ldrh %0, [%1, %2]"
+ asm volatile("ldrh %0, [%1, %2]"
: "=r" (v) : "r" (addr), "r" (where) : "cc");
break;
case 4:
- asm("ldr %0, [%1, %2]"
+ asm volatile("ldr %0, [%1, %2]"
: "=r" (v) : "r" (addr), "r" (where) : "cc");
break;
}
@@ -99,17 +99,17 @@ dc21285_write_config(struct pci_bus *bus, unsigned int devfn, int where,
if (addr)
switch (size) {
case 1:
- asm("strb %0, [%1, %2]"
+ asm volatile("strb %0, [%1, %2]"
: : "r" (value), "r" (addr), "r" (where)
: "cc");
break;
case 2:
- asm("strh %0, [%1, %2]"
+ asm volatile("strh %0, [%1, %2]"
: : "r" (value), "r" (addr), "r" (where)
: "cc");
break;
case 4:
- asm("str %0, [%1, %2]"
+ asm volatile("str %0, [%1, %2]"
: : "r" (value), "r" (addr), "r" (where)
: "cc");
break;
diff --git a/arch/arm/mach-imx/common.h b/arch/arm/mach-imx/common.h
index 2d76e2c6c99e..2b004cc4f95e 100644
--- a/arch/arm/mach-imx/common.h
+++ b/arch/arm/mach-imx/common.h
@@ -85,7 +85,6 @@ void imx_anatop_pre_suspend(void);
void imx_anatop_post_resume(void);
int imx6_set_lpm(enum mxc_cpu_pwr_mode mode);
void imx6_set_int_mem_clk_lpm(bool enable);
-void imx6sl_set_wait_clk(bool enter);
int imx_mmdc_get_ddr_type(void);
int imx7ulp_set_lpm(enum ulp_cpu_pwr_mode mode);
diff --git a/arch/arm/mach-imx/cpuidle-imx6sl.c b/arch/arm/mach-imx/cpuidle-imx6sl.c
index 4521e5352bf6..b86ffbeb28e4 100644
--- a/arch/arm/mach-imx/cpuidle-imx6sl.c
+++ b/arch/arm/mach-imx/cpuidle-imx6sl.c
@@ -3,6 +3,7 @@
* Copyright (C) 2014 Freescale Semiconductor, Inc.
*/
+#include <linux/clk/imx.h>
#include <linux/cpuidle.h>
#include <linux/module.h>
#include <asm/cpuidle.h>
diff --git a/arch/arm/mach-imx/hardware.h b/arch/arm/mach-imx/hardware.h
index 7acf7ce467ed..0760fff39a0b 100644
--- a/arch/arm/mach-imx/hardware.h
+++ b/arch/arm/mach-imx/hardware.h
@@ -106,8 +106,4 @@
.type = _type, \
}
-/* There's an off-by-one between the gpio bank number and the gpiochip */
-/* range e.g. GPIO_1_5 is gpio 5 under linux */
-#define IMX_GPIO_NR(bank, nr) (((bank) - 1) * 32 + (nr))
-
#endif /* __ASM_ARCH_MXC_HARDWARE_H__ */
diff --git a/arch/arm/mach-imx/mach-imx6ul.c b/arch/arm/mach-imx/mach-imx6ul.c
index e018e716735f..35e81201cb5d 100644
--- a/arch/arm/mach-imx/mach-imx6ul.c
+++ b/arch/arm/mach-imx/mach-imx6ul.c
@@ -14,6 +14,7 @@
#include "common.h"
#include "cpuidle.h"
+#include "hardware.h"
static void __init imx6ul_enet_clk_init(void)
{
@@ -27,34 +28,16 @@ static void __init imx6ul_enet_clk_init(void)
pr_err("failed to find fsl,imx6ul-iomux-gpr regmap\n");
}
-static int ksz8081_phy_fixup(struct phy_device *dev)
-{
- if (dev && dev->interface == PHY_INTERFACE_MODE_MII) {
- phy_write(dev, 0x1f, 0x8110);
- phy_write(dev, 0x16, 0x201);
- } else if (dev && dev->interface == PHY_INTERFACE_MODE_RMII) {
- phy_write(dev, 0x1f, 0x8190);
- phy_write(dev, 0x16, 0x202);
- }
-
- return 0;
-}
-
-static void __init imx6ul_enet_phy_init(void)
-{
- if (IS_BUILTIN(CONFIG_PHYLIB))
- phy_register_fixup_for_uid(PHY_ID_KSZ8081, MICREL_PHY_ID_MASK,
- ksz8081_phy_fixup);
-}
-
static inline void imx6ul_enet_init(void)
{
imx6ul_enet_clk_init();
- imx6ul_enet_phy_init();
}
static void __init imx6ul_init_machine(void)
{
+ imx_print_silicon_rev(cpu_is_imx6ull() ? "i.MX6ULL" : "i.MX6UL",
+ imx_get_soc_revision());
+
of_platform_default_populate(NULL, NULL, NULL);
imx6ul_enet_init();
imx_anatop_init();
diff --git a/arch/arm/mach-imx/pm-imx6.c b/arch/arm/mach-imx/pm-imx6.c
index 40c74b4c4d73..9244437cb1b9 100644
--- a/arch/arm/mach-imx/pm-imx6.c
+++ b/arch/arm/mach-imx/pm-imx6.c
@@ -4,6 +4,7 @@
* Copyright 2011 Linaro Ltd.
*/
+#include <linux/clk/imx.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/io.h>
diff --git a/arch/arm/mach-imx/suspend-imx6.S b/arch/arm/mach-imx/suspend-imx6.S
index 1eabf2d2834b..e06f946b75b9 100644
--- a/arch/arm/mach-imx/suspend-imx6.S
+++ b/arch/arm/mach-imx/suspend-imx6.S
@@ -67,6 +67,7 @@
#define MX6Q_CCM_CCR 0x0
.align 3
+ .arm
.macro sync_l2_cache
diff --git a/arch/arm/mach-ixp4xx/Kconfig b/arch/arm/mach-ixp4xx/Kconfig
index f7211b57b1e7..165c184801e1 100644
--- a/arch/arm/mach-ixp4xx/Kconfig
+++ b/arch/arm/mach-ixp4xx/Kconfig
@@ -13,7 +13,6 @@ config MACH_IXP4XX_OF
select I2C
select I2C_IOP3XX
select PCI
- select TIMER_OF
select USE_OF
help
Say 'Y' here to support Device Tree-based IXP4xx platforms.
diff --git a/arch/arm/mach-omap1/board-osk.c b/arch/arm/mach-omap1/board-osk.c
index a720259099ed..0a4c9b0b13b0 100644
--- a/arch/arm/mach-omap1/board-osk.c
+++ b/arch/arm/mach-omap1/board-osk.c
@@ -203,6 +203,8 @@ static int osk_tps_setup(struct i2c_client *client, void *context)
*/
gpio_request(OSK_TPS_GPIO_USB_PWR_EN, "n_vbus_en");
gpio_direction_output(OSK_TPS_GPIO_USB_PWR_EN, 1);
+ /* Free the GPIO again as the driver will request it */
+ gpio_free(OSK_TPS_GPIO_USB_PWR_EN);
/* Set GPIO 2 high so LED D3 is off by default */
tps65010_set_gpio_out_value(GPIO2, HIGH);
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 4a59c169a113..4178c0ee46eb 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -17,11 +17,10 @@ config ARCH_OMAP3
bool "TI OMAP3"
depends on ARCH_MULTI_V7
select ARCH_OMAP2PLUS
- select ARM_CPU_SUSPEND if PM
+ select ARM_CPU_SUSPEND
select OMAP_HWMOD
select OMAP_INTERCONNECT
- select PM_OPP if PM
- select PM if CPU_IDLE
+ select PM_OPP
select SOC_HAS_OMAP2_SDRC
select ARM_ERRATA_430973
@@ -30,7 +29,7 @@ config ARCH_OMAP4
depends on ARCH_MULTI_V7
select ARCH_OMAP2PLUS
select ARCH_NEEDS_CPU_IDLE_COUPLED if SMP
- select ARM_CPU_SUSPEND if PM
+ select ARM_CPU_SUSPEND
select ARM_ERRATA_720789
select ARM_GIC
select HAVE_ARM_SCU if SMP
@@ -40,7 +39,7 @@ config ARCH_OMAP4
select OMAP_INTERCONNECT_BARRIER
select PL310_ERRATA_588369 if CACHE_L2X0
select PL310_ERRATA_727915 if CACHE_L2X0
- select PM_OPP if PM
+ select PM_OPP
select PM if CPU_IDLE
select ARM_ERRATA_754322
select ARM_ERRATA_775420
@@ -50,7 +49,7 @@ config SOC_OMAP5
bool "TI OMAP5"
depends on ARCH_MULTI_V7
select ARCH_OMAP2PLUS
- select ARM_CPU_SUSPEND if PM
+ select ARM_CPU_SUSPEND
select ARM_GIC
select HAVE_ARM_SCU if SMP
select HAVE_ARM_ARCH_TIMER
@@ -58,14 +57,14 @@ config SOC_OMAP5
select OMAP_HWMOD
select OMAP_INTERCONNECT
select OMAP_INTERCONNECT_BARRIER
- select PM_OPP if PM
+ select PM_OPP
select ZONE_DMA if ARM_LPAE
config SOC_AM33XX
bool "TI AM33XX"
depends on ARCH_MULTI_V7
select ARCH_OMAP2PLUS
- select ARM_CPU_SUSPEND if PM
+ select ARM_CPU_SUSPEND
config SOC_AM43XX
bool "TI AM43x"
@@ -79,13 +78,13 @@ config SOC_AM43XX
select ARM_ERRATA_754322
select ARM_ERRATA_775420
select OMAP_INTERCONNECT
- select ARM_CPU_SUSPEND if PM
+ select ARM_CPU_SUSPEND
config SOC_DRA7XX
bool "TI DRA7XX"
depends on ARCH_MULTI_V7
select ARCH_OMAP2PLUS
- select ARM_CPU_SUSPEND if PM
+ select ARM_CPU_SUSPEND
select ARM_GIC
select HAVE_ARM_SCU if SMP
select HAVE_ARM_ARCH_TIMER
@@ -94,7 +93,7 @@ config SOC_DRA7XX
select OMAP_HWMOD
select OMAP_INTERCONNECT
select OMAP_INTERCONNECT_BARRIER
- select PM_OPP if PM
+ select PM_OPP
select ZONE_DMA if ARM_LPAE
select PINCTRL_TI_IODELAY if OF && PINCTRL
@@ -112,9 +111,11 @@ config ARCH_OMAP2PLUS
select OMAP_DM_TIMER
select OMAP_GPMC
select PINCTRL
- select PM_GENERIC_DOMAINS if PM
- select PM_GENERIC_DOMAINS_OF if PM
+ select PM
+ select PM_GENERIC_DOMAINS
+ select PM_GENERIC_DOMAINS_OF
select RESET_CONTROLLER
+ select SIMPLE_PM_BUS
select SOC_BUS
select TI_SYSC
select OMAP_IRQCHIP
@@ -140,7 +141,6 @@ config ARCH_OMAP2PLUS_TYPICAL
select I2C_OMAP
select MENELAUS if ARCH_OMAP2
select NEON if CPU_V7
- select PM
select REGULATOR
select REGULATOR_FIXED_VOLTAGE
select TWL4030_CORE if ARCH_OMAP3 || ARCH_OMAP4
diff --git a/arch/arm/mach-omap2/clockdomain.c b/arch/arm/mach-omap2/clockdomain.c
index dedd47e30b98..1feb0098705e 100644
--- a/arch/arm/mach-omap2/clockdomain.c
+++ b/arch/arm/mach-omap2/clockdomain.c
@@ -1299,7 +1299,7 @@ int clkdm_hwmod_disable(struct clockdomain *clkdm, struct omap_hwmod *oh)
* Due to a suspend or hibernation operation, the state of the registers
* controlling this clkdm will be lost, save their context.
*/
-static int _clkdm_save_context(struct clockdomain *clkdm, void *ununsed)
+static int _clkdm_save_context(struct clockdomain *clkdm, void *unused)
{
if (!arch_clkdm || !arch_clkdm->clkdm_save_context)
return -EINVAL;
@@ -1312,7 +1312,7 @@ static int _clkdm_save_context(struct clockdomain *clkdm, void *ununsed)
*
* Restore the register values for this clockdomain.
*/
-static int _clkdm_restore_context(struct clockdomain *clkdm, void *ununsed)
+static int _clkdm_restore_context(struct clockdomain *clkdm, void *unused)
{
if (!arch_clkdm || !arch_clkdm->clkdm_restore_context)
return -EINVAL;
diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c
index c8d317fafe2e..de37027ad758 100644
--- a/arch/arm/mach-omap2/cpuidle44xx.c
+++ b/arch/arm/mach-omap2/cpuidle44xx.c
@@ -151,10 +151,10 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
(cx->mpu_logic_state == PWRDM_POWER_OFF);
/* Enter broadcast mode for periodic timers */
- tick_broadcast_enable();
+ RCU_NONIDLE(tick_broadcast_enable());
/* Enter broadcast mode for one-shot timers */
- tick_broadcast_enter();
+ RCU_NONIDLE(tick_broadcast_enter());
/*
* Call idle CPU PM enter notifier chain so that
@@ -166,7 +166,7 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
if (dev->cpu == 0) {
pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
- omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
+ RCU_NONIDLE(omap_set_pwrdm_state(mpu_pd, cx->mpu_state));
/*
* Call idle CPU cluster PM enter notifier chain
@@ -178,7 +178,7 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
index = 0;
cx = state_ptr + index;
pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
- omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
+ RCU_NONIDLE(omap_set_pwrdm_state(mpu_pd, cx->mpu_state));
mpuss_can_lose_context = 0;
}
}
@@ -194,9 +194,9 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
mpuss_can_lose_context)
gic_dist_disable();
- clkdm_deny_idle(cpu_clkdm[1]);
- omap_set_pwrdm_state(cpu_pd[1], PWRDM_POWER_ON);
- clkdm_allow_idle(cpu_clkdm[1]);
+ RCU_NONIDLE(clkdm_deny_idle(cpu_clkdm[1]));
+ RCU_NONIDLE(omap_set_pwrdm_state(cpu_pd[1], PWRDM_POWER_ON));
+ RCU_NONIDLE(clkdm_allow_idle(cpu_clkdm[1]));
if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD) &&
mpuss_can_lose_context) {
@@ -222,7 +222,7 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
cpu_pm_exit();
cpu_pm_out:
- tick_broadcast_exit();
+ RCU_NONIDLE(tick_broadcast_exit());
fail:
cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c
index cd38bf07c094..2e3a10914c40 100644
--- a/arch/arm/mach-omap2/pdata-quirks.c
+++ b/arch/arm/mach-omap2/pdata-quirks.c
@@ -522,6 +522,7 @@ static struct of_dev_auxdata omap_auxdata_lookup[] = {
&dra7_ipu1_dsp_iommu_pdata),
#endif
/* Common auxdata */
+ OF_DEV_AUXDATA("simple-pm-bus", 0, NULL, omap_auxdata_lookup),
OF_DEV_AUXDATA("ti,sysc", 0, NULL, &ti_sysc_pdata),
OF_DEV_AUXDATA("pinctrl-single", 0, NULL, &pcs_pdata),
OF_DEV_AUXDATA("ti,omap-prm-inst", 0, NULL, &ti_prm_pdata),
diff --git a/arch/arm/mach-picoxcell/Kconfig b/arch/arm/mach-picoxcell/Kconfig
deleted file mode 100644
index b8eba18c0265..000000000000
--- a/arch/arm/mach-picoxcell/Kconfig
+++ /dev/null
@@ -1,9 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-config ARCH_PICOXCELL
- bool "Picochip PicoXcell"
- depends on ARCH_MULTI_V6
- select ARM_VIC
- select DW_APB_TIMER_OF
- select GPIOLIB
- select HAVE_TCM
- select NO_IOPORT_MAP
diff --git a/arch/arm/mach-picoxcell/common.c b/arch/arm/mach-picoxcell/common.c
deleted file mode 100644
index 8e738266a66a..000000000000
--- a/arch/arm/mach-picoxcell/common.c
+++ /dev/null
@@ -1,81 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2011 Picochip Ltd., Jamie Iles
- *
- * All enquiries to support@picochip.com
- */
-#include <linux/delay.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/reboot.h>
-
-#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-
-#define PHYS_TO_IO(x) (((x) & 0x00ffffff) | 0xfe000000)
-#define PICOXCELL_PERIPH_BASE 0x80000000
-#define PICOXCELL_PERIPH_LENGTH SZ_4M
-
-#define WDT_CTRL_REG_EN_MASK (1 << 0)
-#define WDT_CTRL_REG_OFFS (0x00)
-#define WDT_TIMEOUT_REG_OFFS (0x04)
-static void __iomem *wdt_regs;
-
-/*
- * The machine restart method can be called from an atomic context so we won't
- * be able to ioremap the regs then.
- */
-static void picoxcell_setup_restart(void)
-{
- struct device_node *np = of_find_compatible_node(NULL, NULL,
- "snps,dw-apb-wdg");
- if (WARN(!np, "unable to setup watchdog restart"))
- return;
-
- wdt_regs = of_iomap(np, 0);
- WARN(!wdt_regs, "failed to remap watchdog regs");
-}
-
-static struct map_desc io_map __initdata = {
- .virtual = PHYS_TO_IO(PICOXCELL_PERIPH_BASE),
- .pfn = __phys_to_pfn(PICOXCELL_PERIPH_BASE),
- .length = PICOXCELL_PERIPH_LENGTH,
- .type = MT_DEVICE,
-};
-
-static void __init picoxcell_map_io(void)
-{
- iotable_init(&io_map, 1);
-}
-
-static void __init picoxcell_init_machine(void)
-{
- picoxcell_setup_restart();
-}
-
-static const char *picoxcell_dt_match[] = {
- "picochip,pc3x2",
- "picochip,pc3x3",
- NULL
-};
-
-static void picoxcell_wdt_restart(enum reboot_mode mode, const char *cmd)
-{
- /*
- * Configure the watchdog to reset with the shortest possible timeout
- * and give it chance to do the reset.
- */
- if (wdt_regs) {
- writel_relaxed(WDT_CTRL_REG_EN_MASK, wdt_regs + WDT_CTRL_REG_OFFS);
- writel_relaxed(0, wdt_regs + WDT_TIMEOUT_REG_OFFS);
- /* No sleeping, possibly atomic. */
- mdelay(500);
- }
-}
-
-DT_MACHINE_START(PICOXCELL, "Picochip picoXcell")
- .map_io = picoxcell_map_io,
- .init_machine = picoxcell_init_machine,
- .dt_compat = picoxcell_dt_match,
- .restart = picoxcell_wdt_restart,
-MACHINE_END
diff --git a/arch/arm/mach-prima2/Kconfig b/arch/arm/mach-prima2/Kconfig
deleted file mode 100644
index ea077f66372d..000000000000
--- a/arch/arm/mach-prima2/Kconfig
+++ /dev/null
@@ -1,48 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-menuconfig ARCH_SIRF
- bool "CSR SiRF"
- depends on ARCH_MULTI_V7
- select ARCH_HAS_RESET_CONTROLLER
- select RESET_CONTROLLER
- select GENERIC_IRQ_CHIP
- select GPIOLIB
- select NO_IOPORT_MAP
- select REGMAP
- select PINCTRL
- select PINCTRL_SIRF
- help
- Support for CSR SiRFprimaII/Marco/Polo platforms
-
-if ARCH_SIRF
-
-comment "CSR SiRF atlas6/primaII/Atlas7 Specific Features"
-
-config ARCH_ATLAS6
- bool "CSR SiRFSoC ATLAS6 ARM Cortex A9 Platform"
- default y
- select SIRF_IRQ
- help
- Support for CSR SiRFSoC ARM Cortex A9 Platform
-
-config ARCH_ATLAS7
- bool "CSR SiRFSoC ATLAS7 ARM Cortex A7 Platform"
- default y
- select ARM_GIC
- select ATLAS7_TIMER
- select HAVE_ARM_SCU if SMP
- help
- Support for CSR SiRFSoC ARM Cortex A7 Platform
-
-config ARCH_PRIMA2
- bool "CSR SiRFSoC PRIMA2 ARM Cortex A9 Platform"
- default y
- select SIRF_IRQ
- select ZONE_DMA
- select PRIMA2_TIMER
- help
- Support for CSR SiRFSoC ARM Cortex A9 Platform
-
-config SIRF_IRQ
- bool
-
-endif
diff --git a/arch/arm/mach-prima2/Makefile b/arch/arm/mach-prima2/Makefile
deleted file mode 100644
index 0fd2763031e9..000000000000
--- a/arch/arm/mach-prima2/Makefile
+++ /dev/null
@@ -1,9 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-y += rstc.o
-obj-y += common.o
-obj-y += rtciobrg.o
-obj-$(CONFIG_SUSPEND) += pm.o sleep.o
-obj-$(CONFIG_SMP) += platsmp.o headsmp.o
-obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
-
-CFLAGS_hotplug.o += -march=armv7-a
diff --git a/arch/arm/mach-prima2/common.c b/arch/arm/mach-prima2/common.c
deleted file mode 100644
index e2d158e331e2..000000000000
--- a/arch/arm/mach-prima2/common.c
+++ /dev/null
@@ -1,64 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Defines machines for CSR SiRFprimaII
- *
- * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
- */
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/sizes.h>
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-#include <linux/of.h>
-#include <linux/of_platform.h>
-#include "common.h"
-
-static void __init __maybe_unused sirfsoc_init_late(void)
-{
- sirfsoc_pm_init();
-}
-
-#ifdef CONFIG_ARCH_ATLAS6
-static const char *const atlas6_dt_match[] __initconst = {
- "sirf,atlas6",
- NULL
-};
-
-DT_MACHINE_START(ATLAS6_DT, "Generic ATLAS6 (Flattened Device Tree)")
- /* Maintainer: Barry Song <baohua.song@csr.com> */
- .l2c_aux_val = 0,
- .l2c_aux_mask = ~0,
- .init_late = sirfsoc_init_late,
- .dt_compat = atlas6_dt_match,
-MACHINE_END
-#endif
-
-#ifdef CONFIG_ARCH_PRIMA2
-static const char *const prima2_dt_match[] __initconst = {
- "sirf,prima2",
- NULL
-};
-
-DT_MACHINE_START(PRIMA2_DT, "Generic PRIMA2 (Flattened Device Tree)")
- /* Maintainer: Barry Song <baohua.song@csr.com> */
- .l2c_aux_val = 0,
- .l2c_aux_mask = ~0,
- .dma_zone_size = SZ_256M,
- .init_late = sirfsoc_init_late,
- .dt_compat = prima2_dt_match,
-MACHINE_END
-#endif
-
-#ifdef CONFIG_ARCH_ATLAS7
-static const char *const atlas7_dt_match[] __initconst = {
- "sirf,atlas7",
- NULL
-};
-
-DT_MACHINE_START(ATLAS7_DT, "Generic ATLAS7 (Flattened Device Tree)")
- /* Maintainer: Barry Song <baohua.song@csr.com> */
- .smp = smp_ops(sirfsoc_smp_ops),
- .dt_compat = atlas7_dt_match,
-MACHINE_END
-#endif
diff --git a/arch/arm/mach-prima2/common.h b/arch/arm/mach-prima2/common.h
deleted file mode 100644
index 3bab7e571ded..000000000000
--- a/arch/arm/mach-prima2/common.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * This file contains common function prototypes to avoid externs in the c files.
- *
- * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
- */
-
-#ifndef __MACH_PRIMA2_COMMON_H__
-#define __MACH_PRIMA2_COMMON_H__
-
-#include <linux/init.h>
-#include <linux/reboot.h>
-
-#include <asm/mach/time.h>
-#include <asm/exception.h>
-
-extern volatile int prima2_pen_release;
-
-extern const struct smp_operations sirfsoc_smp_ops;
-extern void sirfsoc_secondary_startup(void);
-extern void sirfsoc_cpu_die(unsigned int cpu);
-
-extern void __init sirfsoc_of_irq_init(void);
-extern asmlinkage void __exception_irq_entry sirfsoc_handle_irq(struct pt_regs *regs);
-
-#ifdef CONFIG_SUSPEND
-extern int sirfsoc_pm_init(void);
-#else
-static inline int sirfsoc_pm_init(void) { return 0; }
-#endif
-
-#endif
diff --git a/arch/arm/mach-prima2/headsmp.S b/arch/arm/mach-prima2/headsmp.S
deleted file mode 100644
index 88ea1243942a..000000000000
--- a/arch/arm/mach-prima2/headsmp.S
+++ /dev/null
@@ -1,36 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Entry of the second core for CSR Marco dual-core SMP SoCs
- *
- * Copyright (c) 2012 Cambridge Silicon Radio Limited, a CSR plc group company.
- */
-
-#include <linux/linkage.h>
-#include <linux/init.h>
-
-/*
- * SIRFSOC specific entry point for secondary CPUs. This provides
- * a "holding pen" into which all secondary cores are held until we're
- * ready for them to initialise.
- */
-ENTRY(sirfsoc_secondary_startup)
- mrc p15, 0, r0, c0, c0, 5
- and r0, r0, #15
- adr r4, 1f
- ldmia r4, {r5, r6}
- sub r4, r4, r5
- add r6, r6, r4
-pen: ldr r7, [r6]
- cmp r7, r0
- bne pen
-
- /*
- * we've been released from the holding pen: secondary_stack
- * should now contain the SVC stack for this core
- */
- b secondary_startup
-ENDPROC(sirfsoc_secondary_startup)
-
- .align
-1: .long .
- .long prima2_pen_release
diff --git a/arch/arm/mach-prima2/hotplug.c b/arch/arm/mach-prima2/hotplug.c
deleted file mode 100644
index bc0d957e89ac..000000000000
--- a/arch/arm/mach-prima2/hotplug.c
+++ /dev/null
@@ -1,38 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * CPU hotplug support for CSR Marco dual-core SMP SoCs
- *
- * Copyright (c) 2012 Cambridge Silicon Radio Limited, a CSR plc group company.
- */
-
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/smp.h>
-
-#include <asm/smp_plat.h>
-#include "common.h"
-
-static inline void platform_do_lowpower(unsigned int cpu)
-{
- /* we put the platform to just WFI */
- for (;;) {
- __asm__ __volatile__("dsb\n\t" "wfi\n\t"
- : : : "memory");
- if (prima2_pen_release == cpu_logical_map(cpu)) {
- /*
- * OK, proper wakeup, we're done
- */
- break;
- }
- }
-}
-
-/*
- * platform-specific code to shutdown a CPU
- *
- * Called with IRQs disabled
- */
-void sirfsoc_cpu_die(unsigned int cpu)
-{
- platform_do_lowpower(cpu);
-}
diff --git a/arch/arm/mach-prima2/platsmp.c b/arch/arm/mach-prima2/platsmp.c
deleted file mode 100644
index 8f7bbb57fb20..000000000000
--- a/arch/arm/mach-prima2/platsmp.c
+++ /dev/null
@@ -1,123 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * plat smp support for CSR Marco dual-core SMP SoCs
- *
- * Copyright (c) 2012 Cambridge Silicon Radio Limited, a CSR plc group company.
- */
-
-#include <linux/init.h>
-#include <linux/smp.h>
-#include <linux/delay.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <asm/page.h>
-#include <asm/mach/map.h>
-#include <asm/smp_plat.h>
-#include <asm/smp_scu.h>
-#include <asm/cacheflush.h>
-#include <asm/cputype.h>
-
-#include "common.h"
-
-static void __iomem *clk_base;
-
-static DEFINE_SPINLOCK(boot_lock);
-
-/* XXX prima2_pen_release is cargo culted code - DO NOT COPY XXX */
-volatile int prima2_pen_release = -1;
-
-static void sirfsoc_secondary_init(unsigned int cpu)
-{
- /*
- * let the primary processor know we're out of the
- * pen, then head off into the C entry point
- */
- prima2_pen_release = -1;
- smp_wmb();
-
- /*
- * Synchronise with the boot thread.
- */
- spin_lock(&boot_lock);
- spin_unlock(&boot_lock);
-}
-
-static const struct of_device_id clk_ids[] = {
- { .compatible = "sirf,atlas7-clkc" },
- {},
-};
-
-static int sirfsoc_boot_secondary(unsigned int cpu, struct task_struct *idle)
-{
- unsigned long timeout;
- struct device_node *np;
-
- np = of_find_matching_node(NULL, clk_ids);
- if (!np)
- return -ENODEV;
-
- clk_base = of_iomap(np, 0);
- if (!clk_base)
- return -ENOMEM;
-
- /*
- * write the address of secondary startup into the clkc register
- * at offset 0x2bC, then write the magic number 0x3CAF5D62 to the
- * clkc register at offset 0x2b8, which is what boot rom code is
- * waiting for. This would wake up the secondary core from WFE
- */
-#define SIRFSOC_CPU1_JUMPADDR_OFFSET 0x2bc
- __raw_writel(__pa_symbol(sirfsoc_secondary_startup),
- clk_base + SIRFSOC_CPU1_JUMPADDR_OFFSET);
-
-#define SIRFSOC_CPU1_WAKEMAGIC_OFFSET 0x2b8
- __raw_writel(0x3CAF5D62,
- clk_base + SIRFSOC_CPU1_WAKEMAGIC_OFFSET);
-
- /* make sure write buffer is drained */
- mb();
-
- spin_lock(&boot_lock);
-
- /*
- * The secondary processor is waiting to be released from
- * the holding pen - release it, then wait for it to flag
- * that it has been released by resetting prima2_pen_release.
- *
- * Note that "prima2_pen_release" is the hardware CPU ID, whereas
- * "cpu" is Linux's internal ID.
- */
- prima2_pen_release = cpu_logical_map(cpu);
- sync_cache_w(&prima2_pen_release);
-
- /*
- * Send the secondary CPU SEV, thereby causing the boot monitor to read
- * the JUMPADDR and WAKEMAGIC, and branch to the address found there.
- */
- dsb_sev();
-
- timeout = jiffies + (1 * HZ);
- while (time_before(jiffies, timeout)) {
- smp_rmb();
- if (prima2_pen_release == -1)
- break;
-
- udelay(10);
- }
-
- /*
- * now the secondary core is starting up let it run its
- * calibrations, then wait for it to finish
- */
- spin_unlock(&boot_lock);
-
- return prima2_pen_release != -1 ? -ENOSYS : 0;
-}
-
-const struct smp_operations sirfsoc_smp_ops __initconst = {
- .smp_secondary_init = sirfsoc_secondary_init,
- .smp_boot_secondary = sirfsoc_boot_secondary,
-#ifdef CONFIG_HOTPLUG_CPU
- .cpu_die = sirfsoc_cpu_die,
-#endif
-};
diff --git a/arch/arm/mach-prima2/pm.c b/arch/arm/mach-prima2/pm.c
deleted file mode 100644
index c24bc89f320b..000000000000
--- a/arch/arm/mach-prima2/pm.c
+++ /dev/null
@@ -1,153 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * power management entry for CSR SiRFprimaII
- *
- * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
- */
-
-#include <linux/kernel.h>
-#include <linux/suspend.h>
-#include <linux/slab.h>
-#include <linux/export.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
-#include <linux/of_platform.h>
-#include <linux/io.h>
-#include <linux/rtc/sirfsoc_rtciobrg.h>
-#include <asm/outercache.h>
-#include <asm/suspend.h>
-#include <asm/hardware/cache-l2x0.h>
-
-#include "pm.h"
-
-/*
- * suspend asm codes will access these to make DRAM become self-refresh and
- * system sleep
- */
-u32 sirfsoc_pwrc_base;
-void __iomem *sirfsoc_memc_base;
-
-static void sirfsoc_set_wakeup_source(void)
-{
- u32 pwr_trigger_en_reg;
- pwr_trigger_en_reg = sirfsoc_rtc_iobrg_readl(sirfsoc_pwrc_base +
- SIRFSOC_PWRC_TRIGGER_EN);
-#define X_ON_KEY_B (1 << 0)
-#define RTC_ALARM0_B (1 << 2)
-#define RTC_ALARM1_B (1 << 3)
- sirfsoc_rtc_iobrg_writel(pwr_trigger_en_reg | X_ON_KEY_B |
- RTC_ALARM0_B | RTC_ALARM1_B,
- sirfsoc_pwrc_base + SIRFSOC_PWRC_TRIGGER_EN);
-}
-
-static void sirfsoc_set_sleep_mode(u32 mode)
-{
- u32 sleep_mode = sirfsoc_rtc_iobrg_readl(sirfsoc_pwrc_base +
- SIRFSOC_PWRC_PDN_CTRL);
- sleep_mode &= ~(SIRFSOC_SLEEP_MODE_MASK << 1);
- sleep_mode |= mode << 1;
- sirfsoc_rtc_iobrg_writel(sleep_mode, sirfsoc_pwrc_base +
- SIRFSOC_PWRC_PDN_CTRL);
-}
-
-static int sirfsoc_pre_suspend_power_off(void)
-{
- u32 wakeup_entry = __pa_symbol(cpu_resume);
-
- sirfsoc_rtc_iobrg_writel(wakeup_entry, sirfsoc_pwrc_base +
- SIRFSOC_PWRC_SCRATCH_PAD1);
-
- sirfsoc_set_wakeup_source();
-
- sirfsoc_set_sleep_mode(SIRFSOC_DEEP_SLEEP_MODE);
-
- return 0;
-}
-
-static int sirfsoc_pm_enter(suspend_state_t state)
-{
- switch (state) {
- case PM_SUSPEND_MEM:
- sirfsoc_pre_suspend_power_off();
-
- outer_disable();
- /* go zzz */
- cpu_suspend(0, sirfsoc_finish_suspend);
- outer_resume();
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static const struct platform_suspend_ops sirfsoc_pm_ops = {
- .enter = sirfsoc_pm_enter,
- .valid = suspend_valid_only_mem,
-};
-
-static const struct of_device_id pwrc_ids[] = {
- { .compatible = "sirf,prima2-pwrc" },
- {}
-};
-
-static int __init sirfsoc_of_pwrc_init(void)
-{
- struct device_node *np;
-
- np = of_find_matching_node(NULL, pwrc_ids);
- if (!np) {
- pr_err("unable to find compatible sirf pwrc node in dtb\n");
- return -ENOENT;
- }
-
- /*
- * pwrc behind rtciobrg is not located in memory space
- * though the property is named reg. reg only means base
- * offset for pwrc. then of_iomap is not suitable here.
- */
- if (of_property_read_u32(np, "reg", &sirfsoc_pwrc_base))
- panic("unable to find base address of pwrc node in dtb\n");
-
- of_node_put(np);
-
- return 0;
-}
-
-static const struct of_device_id memc_ids[] = {
- { .compatible = "sirf,prima2-memc" },
- {}
-};
-
-static int sirfsoc_memc_probe(struct platform_device *op)
-{
- struct device_node *np = op->dev.of_node;
-
- sirfsoc_memc_base = of_iomap(np, 0);
- if (!sirfsoc_memc_base)
- panic("unable to map memc registers\n");
-
- return 0;
-}
-
-static struct platform_driver sirfsoc_memc_driver = {
- .probe = sirfsoc_memc_probe,
- .driver = {
- .name = "sirfsoc-memc",
- .of_match_table = memc_ids,
- },
-};
-
-static int __init sirfsoc_memc_init(void)
-{
- return platform_driver_register(&sirfsoc_memc_driver);
-}
-
-int __init sirfsoc_pm_init(void)
-{
- sirfsoc_of_pwrc_init();
- sirfsoc_memc_init();
- suspend_set_ops(&sirfsoc_pm_ops);
- return 0;
-}
diff --git a/arch/arm/mach-prima2/pm.h b/arch/arm/mach-prima2/pm.h
deleted file mode 100644
index 0aff6cb876be..000000000000
--- a/arch/arm/mach-prima2/pm.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * arch/arm/mach-prima2/pm.h
- *
- * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
- */
-
-#ifndef _MACH_PRIMA2_PM_H_
-#define _MACH_PRIMA2_PM_H_
-
-#define SIRFSOC_PWR_SLEEPFORCE 0x01
-
-#define SIRFSOC_SLEEP_MODE_MASK 0x3
-#define SIRFSOC_DEEP_SLEEP_MODE 0x1
-
-#define SIRFSOC_PWRC_PDN_CTRL 0x0
-#define SIRFSOC_PWRC_PON_OFF 0x4
-#define SIRFSOC_PWRC_TRIGGER_EN 0x8
-#define SIRFSOC_PWRC_PIN_STATUS 0x14
-#define SIRFSOC_PWRC_SCRATCH_PAD1 0x18
-#define SIRFSOC_PWRC_SCRATCH_PAD2 0x1C
-
-#ifndef __ASSEMBLY__
-extern int sirfsoc_finish_suspend(unsigned long);
-#endif
-
-#endif
-
diff --git a/arch/arm/mach-prima2/rstc.c b/arch/arm/mach-prima2/rstc.c
deleted file mode 100644
index 9d56606ac87f..000000000000
--- a/arch/arm/mach-prima2/rstc.c
+++ /dev/null
@@ -1,107 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * reset controller for CSR SiRFprimaII
- *
- * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
- */
-
-#include <linux/kernel.h>
-#include <linux/mutex.h>
-#include <linux/io.h>
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/platform_device.h>
-#include <linux/reboot.h>
-#include <linux/reset-controller.h>
-
-#include <asm/system_misc.h>
-
-#define SIRFSOC_RSTBIT_NUM 64
-
-static void __iomem *sirfsoc_rstc_base;
-static DEFINE_MUTEX(rstc_lock);
-
-static int sirfsoc_reset_module(struct reset_controller_dev *rcdev,
- unsigned long sw_reset_idx)
-{
- u32 reset_bit = sw_reset_idx;
-
- if (reset_bit >= SIRFSOC_RSTBIT_NUM)
- return -EINVAL;
-
- mutex_lock(&rstc_lock);
-
- /*
- * Writing 1 to this bit resets corresponding block.
- * Writing 0 to this bit de-asserts reset signal of the
- * corresponding block. datasheet doesn't require explicit
- * delay between the set and clear of reset bit. it could
- * be shorter if tests pass.
- */
- writel(readl(sirfsoc_rstc_base +
- (reset_bit / 32) * 4) | (1 << reset_bit),
- sirfsoc_rstc_base + (reset_bit / 32) * 4);
- msleep(20);
- writel(readl(sirfsoc_rstc_base +
- (reset_bit / 32) * 4) & ~(1 << reset_bit),
- sirfsoc_rstc_base + (reset_bit / 32) * 4);
-
- mutex_unlock(&rstc_lock);
-
- return 0;
-}
-
-static struct reset_control_ops sirfsoc_rstc_ops = {
- .reset = sirfsoc_reset_module,
-};
-
-static struct reset_controller_dev sirfsoc_reset_controller = {
- .ops = &sirfsoc_rstc_ops,
- .nr_resets = SIRFSOC_RSTBIT_NUM,
-};
-
-#define SIRFSOC_SYS_RST_BIT BIT(31)
-
-static void sirfsoc_restart(enum reboot_mode mode, const char *cmd)
-{
- writel(SIRFSOC_SYS_RST_BIT, sirfsoc_rstc_base);
-}
-
-static int sirfsoc_rstc_probe(struct platform_device *pdev)
-{
- struct device_node *np = pdev->dev.of_node;
- sirfsoc_rstc_base = of_iomap(np, 0);
- if (!sirfsoc_rstc_base) {
- dev_err(&pdev->dev, "unable to map rstc cpu registers\n");
- return -ENOMEM;
- }
-
- sirfsoc_reset_controller.of_node = np;
- arm_pm_restart = sirfsoc_restart;
-
- if (IS_ENABLED(CONFIG_RESET_CONTROLLER))
- reset_controller_register(&sirfsoc_reset_controller);
-
- return 0;
-}
-
-static const struct of_device_id rstc_ids[] = {
- { .compatible = "sirf,prima2-rstc" },
- {},
-};
-
-static struct platform_driver sirfsoc_rstc_driver = {
- .probe = sirfsoc_rstc_probe,
- .driver = {
- .name = "sirfsoc_rstc",
- .of_match_table = rstc_ids,
- },
-};
-
-static int __init sirfsoc_rstc_init(void)
-{
- return platform_driver_register(&sirfsoc_rstc_driver);
-}
-subsys_initcall(sirfsoc_rstc_init);
diff --git a/arch/arm/mach-prima2/rtciobrg.c b/arch/arm/mach-prima2/rtciobrg.c
deleted file mode 100644
index 97c0e333e3b9..000000000000
--- a/arch/arm/mach-prima2/rtciobrg.c
+++ /dev/null
@@ -1,179 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * RTC I/O Bridge interfaces for CSR SiRFprimaII/atlas7
- * ARM access the registers of SYSRTC, GPSRTC and PWRC through this module
- *
- * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/io.h>
-#include <linux/regmap.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
-#include <linux/of_platform.h>
-
-#define SIRFSOC_CPUIOBRG_CTRL 0x00
-#define SIRFSOC_CPUIOBRG_WRBE 0x04
-#define SIRFSOC_CPUIOBRG_ADDR 0x08
-#define SIRFSOC_CPUIOBRG_DATA 0x0c
-
-/*
- * suspend asm codes will access this address to make system deepsleep
- * after DRAM becomes self-refresh
- */
-void __iomem *sirfsoc_rtciobrg_base;
-static DEFINE_SPINLOCK(rtciobrg_lock);
-
-/*
- * symbols without lock are only used by suspend asm codes
- * and these symbols are not exported too
- */
-void sirfsoc_rtc_iobrg_wait_sync(void)
-{
- while (readl_relaxed(sirfsoc_rtciobrg_base + SIRFSOC_CPUIOBRG_CTRL))
- cpu_relax();
-}
-
-void sirfsoc_rtc_iobrg_besyncing(void)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&rtciobrg_lock, flags);
-
- sirfsoc_rtc_iobrg_wait_sync();
-
- spin_unlock_irqrestore(&rtciobrg_lock, flags);
-}
-EXPORT_SYMBOL_GPL(sirfsoc_rtc_iobrg_besyncing);
-
-u32 __sirfsoc_rtc_iobrg_readl(u32 addr)
-{
- sirfsoc_rtc_iobrg_wait_sync();
-
- writel_relaxed(0x00, sirfsoc_rtciobrg_base + SIRFSOC_CPUIOBRG_WRBE);
- writel_relaxed(addr, sirfsoc_rtciobrg_base + SIRFSOC_CPUIOBRG_ADDR);
- writel_relaxed(0x01, sirfsoc_rtciobrg_base + SIRFSOC_CPUIOBRG_CTRL);
-
- sirfsoc_rtc_iobrg_wait_sync();
-
- return readl_relaxed(sirfsoc_rtciobrg_base + SIRFSOC_CPUIOBRG_DATA);
-}
-
-u32 sirfsoc_rtc_iobrg_readl(u32 addr)
-{
- unsigned long flags, val;
-
- /* TODO: add hwspinlock to sync with M3 */
- spin_lock_irqsave(&rtciobrg_lock, flags);
-
- val = __sirfsoc_rtc_iobrg_readl(addr);
-
- spin_unlock_irqrestore(&rtciobrg_lock, flags);
-
- return val;
-}
-EXPORT_SYMBOL_GPL(sirfsoc_rtc_iobrg_readl);
-
-void sirfsoc_rtc_iobrg_pre_writel(u32 val, u32 addr)
-{
- sirfsoc_rtc_iobrg_wait_sync();
-
- writel_relaxed(0xf1, sirfsoc_rtciobrg_base + SIRFSOC_CPUIOBRG_WRBE);
- writel_relaxed(addr, sirfsoc_rtciobrg_base + SIRFSOC_CPUIOBRG_ADDR);
-
- writel_relaxed(val, sirfsoc_rtciobrg_base + SIRFSOC_CPUIOBRG_DATA);
-}
-
-void sirfsoc_rtc_iobrg_writel(u32 val, u32 addr)
-{
- unsigned long flags;
-
- /* TODO: add hwspinlock to sync with M3 */
- spin_lock_irqsave(&rtciobrg_lock, flags);
-
- sirfsoc_rtc_iobrg_pre_writel(val, addr);
-
- writel_relaxed(0x01, sirfsoc_rtciobrg_base + SIRFSOC_CPUIOBRG_CTRL);
-
- sirfsoc_rtc_iobrg_wait_sync();
-
- spin_unlock_irqrestore(&rtciobrg_lock, flags);
-}
-EXPORT_SYMBOL_GPL(sirfsoc_rtc_iobrg_writel);
-
-
-static int regmap_iobg_regwrite(void *context, unsigned int reg,
- unsigned int val)
-{
- sirfsoc_rtc_iobrg_writel(val, reg);
- return 0;
-}
-
-static int regmap_iobg_regread(void *context, unsigned int reg,
- unsigned int *val)
-{
- *val = (u32)sirfsoc_rtc_iobrg_readl(reg);
- return 0;
-}
-
-static struct regmap_bus regmap_iobg = {
- .reg_write = regmap_iobg_regwrite,
- .reg_read = regmap_iobg_regread,
-};
-
-/**
- * devm_regmap_init_iobg(): Initialise managed register map
- *
- * @iobg: Device that will be interacted with
- * @config: Configuration for register map
- *
- * The return value will be an ERR_PTR() on error or a valid pointer
- * to a struct regmap. The regmap will be automatically freed by the
- * device management code.
- */
-struct regmap *devm_regmap_init_iobg(struct device *dev,
- const struct regmap_config *config)
-{
- const struct regmap_bus *bus = &regmap_iobg;
-
- return devm_regmap_init(dev, bus, dev, config);
-}
-EXPORT_SYMBOL_GPL(devm_regmap_init_iobg);
-
-static const struct of_device_id rtciobrg_ids[] = {
- { .compatible = "sirf,prima2-rtciobg" },
- {}
-};
-
-static int sirfsoc_rtciobrg_probe(struct platform_device *op)
-{
- struct device_node *np = op->dev.of_node;
-
- sirfsoc_rtciobrg_base = of_iomap(np, 0);
- if (!sirfsoc_rtciobrg_base)
- panic("unable to map rtc iobrg registers\n");
-
- return 0;
-}
-
-static struct platform_driver sirfsoc_rtciobrg_driver = {
- .probe = sirfsoc_rtciobrg_probe,
- .driver = {
- .name = "sirfsoc-rtciobrg",
- .of_match_table = rtciobrg_ids,
- },
-};
-
-static int __init sirfsoc_rtciobrg_init(void)
-{
- return platform_driver_register(&sirfsoc_rtciobrg_driver);
-}
-postcore_initcall(sirfsoc_rtciobrg_init);
-
-MODULE_AUTHOR("Zhiwu Song <zhiwu.song@csr.com>");
-MODULE_AUTHOR("Barry Song <baohua.song@csr.com>");
-MODULE_DESCRIPTION("CSR SiRFprimaII rtc io bridge");
-MODULE_LICENSE("GPL v2");
diff --git a/arch/arm/mach-prima2/sleep.S b/arch/arm/mach-prima2/sleep.S
deleted file mode 100644
index d9bbc5ca39ef..000000000000
--- a/arch/arm/mach-prima2/sleep.S
+++ /dev/null
@@ -1,63 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * sleep mode for CSR SiRFprimaII
- *
- * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
- */
-
-#include <linux/linkage.h>
-#include <asm/ptrace.h>
-#include <asm/assembler.h>
-
-#include "pm.h"
-
-#define DENALI_CTL_22_OFF 0x58
-#define DENALI_CTL_112_OFF 0x1c0
-
- .text
-
-ENTRY(sirfsoc_finish_suspend)
- @ r5: mem controller
- ldr r0, =sirfsoc_memc_base
- ldr r5, [r0]
- @ r6: pwrc base offset
- ldr r0, =sirfsoc_pwrc_base
- ldr r6, [r0]
- @ r7: rtc iobrg controller
- ldr r0, =sirfsoc_rtciobrg_base
- ldr r7, [r0]
-
- @ Read the power control register and set the
- @ sleep force bit.
- add r0, r6, #SIRFSOC_PWRC_PDN_CTRL
- bl __sirfsoc_rtc_iobrg_readl
- orr r0,r0,#SIRFSOC_PWR_SLEEPFORCE
- add r1, r6, #SIRFSOC_PWRC_PDN_CTRL
- bl sirfsoc_rtc_iobrg_pre_writel
- mov r1, #0x1
-
- @ read the MEM ctl register and set the self
- @ refresh bit
-
- ldr r2, [r5, #DENALI_CTL_22_OFF]
- orr r2, r2, #0x1
-
- @ Following code has to run from cache since
- @ the RAM is going to self refresh mode
- .align 5
- str r2, [r5, #DENALI_CTL_22_OFF]
-
-1:
- ldr r4, [r5, #DENALI_CTL_112_OFF]
- tst r4, #0x1
- bne 1b
-
- @ write SLEEPFORCE through rtc iobridge
-
- str r1, [r7]
- @ wait rtc io bridge sync
-1:
- ldr r3, [r7]
- tst r3, #0x01
- bne 1b
- b .
diff --git a/arch/arm/mach-pxa/devices.c b/arch/arm/mach-pxa/devices.c
index 524d6093e0c7..09b8495f3fd9 100644
--- a/arch/arm/mach-pxa/devices.c
+++ b/arch/arm/mach-pxa/devices.c
@@ -4,6 +4,7 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/spi/pxa2xx_spi.h>
@@ -634,6 +635,13 @@ static struct platform_device pxa27x_device_camera = {
void __init pxa_set_camera_info(struct pxacamera_platform_data *info)
{
+ struct clk *mclk;
+
+ /* Register a fixed-rate clock for camera sensors. */
+ mclk = clk_register_fixed_rate(NULL, "pxa_camera_clk", NULL, 0,
+ info->mclk_10khz * 10000);
+ if (!IS_ERR(mclk))
+ clkdev_create(mclk, "mclk", NULL);
pxa_register_device(&pxa27x_device_camera, info);
}
diff --git a/arch/arm/mach-pxa/mioa701.c b/arch/arm/mach-pxa/mioa701.c
index d3af80317f2d..a79f296e81e0 100644
--- a/arch/arm/mach-pxa/mioa701.c
+++ b/arch/arm/mach-pxa/mioa701.c
@@ -577,7 +577,6 @@ static struct platform_device power_dev = {
static struct wm97xx_batt_pdata mioa701_battery_data = {
.batt_aux = WM97XX_AUX_ID1,
.temp_aux = -1,
- .charge_gpio = -1,
.min_voltage = 0xc00,
.max_voltage = 0xfc0,
.batt_tech = POWER_SUPPLY_TECHNOLOGY_LION,
diff --git a/arch/arm/mach-pxa/palm27x.c b/arch/arm/mach-pxa/palm27x.c
index 0d246a1aebbc..6230381a7ca0 100644
--- a/arch/arm/mach-pxa/palm27x.c
+++ b/arch/arm/mach-pxa/palm27x.c
@@ -212,7 +212,6 @@ void __init palm27x_irda_init(int pwdn)
static struct wm97xx_batt_pdata palm27x_batt_pdata = {
.batt_aux = WM97XX_AUX_ID3,
.temp_aux = WM97XX_AUX_ID2,
- .charge_gpio = -1,
.batt_mult = 1000,
.batt_div = 414,
.temp_mult = 1,
diff --git a/arch/arm/mach-pxa/palmte2.c b/arch/arm/mach-pxa/palmte2.c
index e3bcf58b4e63..a2b10db4aacc 100644
--- a/arch/arm/mach-pxa/palmte2.c
+++ b/arch/arm/mach-pxa/palmte2.c
@@ -273,7 +273,6 @@ static struct platform_device power_supply = {
static struct wm97xx_batt_pdata palmte2_batt_pdata = {
.batt_aux = WM97XX_AUX_ID3,
.temp_aux = WM97XX_AUX_ID2,
- .charge_gpio = -1,
.max_voltage = PALMTE2_BAT_MAX_VOLTAGE,
.min_voltage = PALMTE2_BAT_MIN_VOLTAGE,
.batt_mult = 1000,
diff --git a/arch/arm/mach-pxa/z2.c b/arch/arm/mach-pxa/z2.c
index 21fd76bb09cd..8e74fbb0a96e 100644
--- a/arch/arm/mach-pxa/z2.c
+++ b/arch/arm/mach-pxa/z2.c
@@ -20,7 +20,6 @@
#include <linux/spi/spi.h>
#include <linux/spi/pxa2xx_spi.h>
#include <linux/spi/libertas_spi.h>
-#include <linux/spi/lms283gf05.h>
#include <linux/power_supply.h>
#include <linux/mtd/physmap.h>
#include <linux/gpio.h>
@@ -488,7 +487,6 @@ static struct z2_battery_info batt_chip_info = {
.batt_I2C_bus = 0,
.batt_I2C_addr = 0x55,
.batt_I2C_reg = 2,
- .charge_gpio = GPIO0_ZIPITZ2_AC_DETECT,
.min_voltage = 3475000,
.max_voltage = 4190000,
.batt_div = 59,
@@ -497,9 +495,19 @@ static struct z2_battery_info batt_chip_info = {
.batt_name = "Z2",
};
+static struct gpiod_lookup_table z2_battery_gpio_table = {
+ .dev_id = "aer915",
+ .table = {
+ GPIO_LOOKUP("gpio-pxa", GPIO0_ZIPITZ2_AC_DETECT,
+ NULL, GPIO_ACTIVE_HIGH),
+ { },
+ },
+};
+
static struct i2c_board_info __initdata z2_i2c_board_info[] = {
{
I2C_BOARD_INFO("aer915", 0x55),
+ .dev_name = "aer915",
.platform_data = &batt_chip_info,
}, {
I2C_BOARD_INFO("wm8750", 0x1b),
@@ -510,6 +518,7 @@ static struct i2c_board_info __initdata z2_i2c_board_info[] = {
static void __init z2_i2c_init(void)
{
pxa_set_i2c_info(NULL);
+ gpiod_add_lookup_table(&z2_battery_gpio_table);
i2c_register_board_info(0, ARRAY_AND_SIZE(z2_i2c_board_info));
}
#else
@@ -578,8 +587,13 @@ static struct pxa2xx_spi_chip lms283_chip_info = {
.gpio_cs = GPIO88_ZIPITZ2_LCD_CS,
};
-static const struct lms283gf05_pdata lms283_pdata = {
- .reset_gpio = GPIO19_ZIPITZ2_LCD_RESET,
+static struct gpiod_lookup_table lms283_gpio_table = {
+ .dev_id = "spi2.0", /* SPI bus 2 chip select 0 */
+ .table = {
+ GPIO_LOOKUP("gpio-pxa", GPIO19_ZIPITZ2_LCD_RESET,
+ "reset", GPIO_ACTIVE_LOW),
+ { },
+ },
};
static struct spi_board_info spi_board_info[] __initdata = {
@@ -595,7 +609,6 @@ static struct spi_board_info spi_board_info[] __initdata = {
{
.modalias = "lms283gf05",
.controller_data = &lms283_chip_info,
- .platform_data = &lms283_pdata,
.max_speed_hz = 400000,
.bus_num = 2,
.chip_select = 0,
@@ -615,6 +628,7 @@ static void __init z2_spi_init(void)
{
pxa2xx_set_spi_info(1, &pxa_ssp1_master_info);
pxa2xx_set_spi_info(2, &pxa_ssp2_master_info);
+ gpiod_add_lookup_table(&lms283_gpio_table);
spi_register_board_info(spi_board_info, ARRAY_SIZE(spi_board_info));
}
#else
diff --git a/arch/arm/mach-s3c/irq-s3c24xx-fiq.S b/arch/arm/mach-s3c/irq-s3c24xx-fiq.S
index b54cbd012241..5d238d9a798e 100644
--- a/arch/arm/mach-s3c/irq-s3c24xx-fiq.S
+++ b/arch/arm/mach-s3c/irq-s3c24xx-fiq.S
@@ -35,7 +35,6 @@
@ and an offset to the irq acknowledgment word
ENTRY(s3c24xx_spi_fiq_rx)
-s3c24xx_spi_fix_rx:
.word fiq_rx_end - fiq_rx_start
.word fiq_rx_irq_ack - fiq_rx_start
fiq_rx_start:
@@ -49,7 +48,7 @@ fiq_rx_start:
strb fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ]
subs fiq_rcount, fiq_rcount, #1
- subnes pc, lr, #4 @@ return, still have work to do
+ subsne pc, lr, #4 @@ return, still have work to do
@@ set IRQ controller so that next op will trigger IRQ
mov fiq_rtmp, #0
@@ -61,7 +60,6 @@ fiq_rx_irq_ack:
fiq_rx_end:
ENTRY(s3c24xx_spi_fiq_txrx)
-s3c24xx_spi_fiq_txrx:
.word fiq_txrx_end - fiq_txrx_start
.word fiq_txrx_irq_ack - fiq_txrx_start
fiq_txrx_start:
@@ -76,7 +74,7 @@ fiq_txrx_start:
strb fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ]
subs fiq_rcount, fiq_rcount, #1
- subnes pc, lr, #4 @@ return, still have work to do
+ subsne pc, lr, #4 @@ return, still have work to do
mov fiq_rtmp, #0
str fiq_rtmp, [ fiq_rirq, # S3C2410_INTMOD - S3C24XX_VA_IRQ ]
@@ -88,7 +86,6 @@ fiq_txrx_irq_ack:
fiq_txrx_end:
ENTRY(s3c24xx_spi_fiq_tx)
-s3c24xx_spi_fix_tx:
.word fiq_tx_end - fiq_tx_start
.word fiq_tx_irq_ack - fiq_tx_start
fiq_tx_start:
@@ -101,7 +98,7 @@ fiq_tx_start:
strb fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ]
subs fiq_rcount, fiq_rcount, #1
- subnes pc, lr, #4 @@ return, still have work to do
+ subsne pc, lr, #4 @@ return, still have work to do
mov fiq_rtmp, #0
str fiq_rtmp, [ fiq_rirq, # S3C2410_INTMOD - S3C24XX_VA_IRQ ]
diff --git a/arch/arm/mach-s3c/irq-s3c24xx.c b/arch/arm/mach-s3c/irq-s3c24xx.c
index 79b5f19af7a5..0c631c14a817 100644
--- a/arch/arm/mach-s3c/irq-s3c24xx.c
+++ b/arch/arm/mach-s3c/irq-s3c24xx.c
@@ -21,6 +21,7 @@
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
+#include <linux/spi/s3c24xx.h>
#include <asm/exception.h>
#include <asm/mach/irq.h>
@@ -32,6 +33,7 @@
#include "cpu.h"
#include "regs-irqtype.h"
#include "pm.h"
+#include "s3c24xx.h"
#define S3C_IRQTYPE_NONE 0
#define S3C_IRQTYPE_EINT 1
@@ -357,7 +359,7 @@ static inline int s3c24xx_handle_intc(struct s3c_irq_intc *intc,
return true;
}
-asmlinkage void __exception_irq_entry s3c24xx_handle_irq(struct pt_regs *regs)
+static asmlinkage void __exception_irq_entry s3c24xx_handle_irq(struct pt_regs *regs)
{
do {
if (likely(s3c_intc[0]))
@@ -1305,7 +1307,7 @@ static struct s3c24xx_irq_of_ctrl s3c2410_ctrl[] = {
}
};
-int __init s3c2410_init_intc_of(struct device_node *np,
+static int __init s3c2410_init_intc_of(struct device_node *np,
struct device_node *interrupt_parent)
{
return s3c_init_intc_of(np, interrupt_parent,
@@ -1327,7 +1329,7 @@ static struct s3c24xx_irq_of_ctrl s3c2416_ctrl[] = {
}
};
-int __init s3c2416_init_intc_of(struct device_node *np,
+static int __init s3c2416_init_intc_of(struct device_node *np,
struct device_node *interrupt_parent)
{
return s3c_init_intc_of(np, interrupt_parent,
diff --git a/arch/arm/mach-sa1100/collie.c b/arch/arm/mach-sa1100/collie.c
index d4e89a02c8c8..14c33ed05318 100644
--- a/arch/arm/mach-sa1100/collie.c
+++ b/arch/arm/mach-sa1100/collie.c
@@ -224,18 +224,12 @@ static int collie_uart_probe(struct locomo_dev *dev)
return 0;
}
-static int collie_uart_remove(struct locomo_dev *dev)
-{
- return 0;
-}
-
static struct locomo_driver collie_uart_driver = {
.drv = {
.name = "collie_uart",
},
.devid = LOCOMO_DEVID_UART,
.probe = collie_uart_probe,
- .remove = collie_uart_remove,
};
static int __init collie_uart_init(void)
diff --git a/arch/arm/mach-spear/generic.h b/arch/arm/mach-spear/generic.h
index 25b4c5e66e39..8ec2b92dca19 100644
--- a/arch/arm/mach-spear/generic.h
+++ b/arch/arm/mach-spear/generic.h
@@ -43,16 +43,4 @@ void spear13xx_cpu_die(unsigned int cpu);
extern const struct smp_operations spear13xx_smp_ops;
-#ifdef CONFIG_MACH_SPEAR1310
-void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base);
-#else
-static inline void spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base) {}
-#endif
-
-#ifdef CONFIG_MACH_SPEAR1340
-void __init spear1340_clk_init(void __iomem *misc_base);
-#else
-static inline void spear1340_clk_init(void __iomem *misc_base) {}
-#endif
-
#endif /* __MACH_GENERIC_H */
diff --git a/arch/arm/mach-spear/spear13xx.c b/arch/arm/mach-spear/spear13xx.c
index 31c43cabf362..74d1ca2a529a 100644
--- a/arch/arm/mach-spear/spear13xx.c
+++ b/arch/arm/mach-spear/spear13xx.c
@@ -15,6 +15,7 @@
#include <linux/amba/pl022.h>
#include <linux/clk.h>
+#include <linux/clk/spear.h>
#include <linux/clocksource.h>
#include <linux/err.h>
#include <linux/of.h>
diff --git a/arch/arm/mach-sunxi/Kconfig b/arch/arm/mach-sunxi/Kconfig
index eeadb1a4dcfe..e5c2fce281cd 100644
--- a/arch/arm/mach-sunxi/Kconfig
+++ b/arch/arm/mach-sunxi/Kconfig
@@ -6,6 +6,8 @@ menuconfig ARCH_SUNXI
select CLKSRC_MMIO
select GENERIC_IRQ_CHIP
select GPIOLIB
+ select IRQ_DOMAIN_HIERARCHY
+ select IRQ_FASTEOI_HIERARCHY_HANDLERS
select PINCTRL
select PM_OPP
select SUN4I_TIMER
diff --git a/arch/arm/mach-tango/Kconfig b/arch/arm/mach-tango/Kconfig
deleted file mode 100644
index a9eeda36aeb1..000000000000
--- a/arch/arm/mach-tango/Kconfig
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-config ARCH_TANGO
- bool "Sigma Designs Tango4 (SMP87xx)"
- depends on ARCH_MULTI_V7
- # Cortex-A9 MPCore r3p0, PL310 r3p2
- select ARM_ERRATA_754322
- select ARM_ERRATA_764369 if SMP
- select ARM_ERRATA_775420
- select ARM_GIC
- select CLKSRC_TANGO_XTAL
- select HAVE_ARM_SCU
- select HAVE_ARM_TWD
- select TANGO_IRQ
diff --git a/arch/arm/mach-tango/Makefile b/arch/arm/mach-tango/Makefile
deleted file mode 100644
index 97cd04508fa1..000000000000
--- a/arch/arm/mach-tango/Makefile
+++ /dev/null
@@ -1,4 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-y += setup.o smc.o
-obj-$(CONFIG_SMP) += platsmp.o
-obj-$(CONFIG_SUSPEND) += pm.o
diff --git a/arch/arm/mach-tango/platsmp.c b/arch/arm/mach-tango/platsmp.c
deleted file mode 100644
index 65012afbc1a3..000000000000
--- a/arch/arm/mach-tango/platsmp.c
+++ /dev/null
@@ -1,52 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/smp.h>
-#include "smc.h"
-
-static int tango_boot_secondary(unsigned int cpu, struct task_struct *idle)
-{
- tango_set_aux_boot_addr(__pa_symbol(secondary_startup));
- tango_start_aux_core(cpu);
- return 0;
-}
-
-#ifdef CONFIG_HOTPLUG_CPU
-/*
- * cpu_kill() and cpu_die() run concurrently on different cores.
- * Firmware will only "kill" a core once it has properly "died".
- * Try a few times to kill a core before giving up, and sleep
- * between tries to give that core enough time to die.
- */
-static int tango_cpu_kill(unsigned int cpu)
-{
- int i, err;
-
- for (i = 0; i < 10; ++i) {
- msleep(10);
- err = tango_aux_core_kill(cpu);
- if (!err)
- return true;
- }
-
- return false;
-}
-
-static void tango_cpu_die(unsigned int cpu)
-{
- while (tango_aux_core_die(cpu) < 0)
- cpu_relax();
-
- panic("cpu %d failed to die\n", cpu);
-}
-#endif
-
-static const struct smp_operations tango_smp_ops __initconst = {
- .smp_boot_secondary = tango_boot_secondary,
-#ifdef CONFIG_HOTPLUG_CPU
- .cpu_kill = tango_cpu_kill,
- .cpu_die = tango_cpu_die,
-#endif
-};
-
-CPU_METHOD_OF_DECLARE(tango4_smp, "sigma,tango4-smp", &tango_smp_ops);
diff --git a/arch/arm/mach-tango/pm.c b/arch/arm/mach-tango/pm.c
deleted file mode 100644
index a32c3b631484..000000000000
--- a/arch/arm/mach-tango/pm.c
+++ /dev/null
@@ -1,31 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/init.h>
-#include <linux/suspend.h>
-#include <asm/suspend.h>
-#include "smc.h"
-#include "pm.h"
-
-static int tango_pm_powerdown(unsigned long arg)
-{
- tango_suspend(__pa_symbol(cpu_resume));
-
- return -EIO; /* tango_suspend has failed */
-}
-
-static int tango_pm_enter(suspend_state_t state)
-{
- if (state == PM_SUSPEND_MEM)
- return cpu_suspend(0, tango_pm_powerdown);
-
- return -EINVAL;
-}
-
-static const struct platform_suspend_ops tango_pm_ops = {
- .enter = tango_pm_enter,
- .valid = suspend_valid_only_mem,
-};
-
-void __init tango_pm_init(void)
-{
- suspend_set_ops(&tango_pm_ops);
-}
diff --git a/arch/arm/mach-tango/pm.h b/arch/arm/mach-tango/pm.h
deleted file mode 100644
index 35ea705a0ee2..000000000000
--- a/arch/arm/mach-tango/pm.h
+++ /dev/null
@@ -1,7 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-#ifdef CONFIG_SUSPEND
-void __init tango_pm_init(void);
-#else
-#define tango_pm_init NULL
-#endif
diff --git a/arch/arm/mach-tango/setup.c b/arch/arm/mach-tango/setup.c
deleted file mode 100644
index 824f90737b04..000000000000
--- a/arch/arm/mach-tango/setup.c
+++ /dev/null
@@ -1,20 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <asm/mach/arch.h>
-#include <asm/hardware/cache-l2x0.h>
-#include "smc.h"
-#include "pm.h"
-
-static void tango_l2c_write(unsigned long val, unsigned int reg)
-{
- if (reg == L2X0_CTRL)
- tango_set_l2_control(val);
-}
-
-static const char *const tango_dt_compat[] = { "sigma,tango4", NULL };
-
-DT_MACHINE_START(TANGO_DT, "Sigma Tango DT")
- .dt_compat = tango_dt_compat,
- .l2c_aux_mask = ~0,
- .l2c_write_sec = tango_l2c_write,
- .init_late = tango_pm_init,
-MACHINE_END
diff --git a/arch/arm/mach-tango/smc.S b/arch/arm/mach-tango/smc.S
deleted file mode 100644
index b1752aaa72bc..000000000000
--- a/arch/arm/mach-tango/smc.S
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#include <linux/linkage.h>
-
- .arch armv7-a
- .arch_extension sec
-ENTRY(tango_smc)
- push {lr}
- mov ip, r1
- dsb /* This barrier is probably unnecessary */
- smc #0
- pop {pc}
-ENDPROC(tango_smc)
diff --git a/arch/arm/mach-tango/smc.h b/arch/arm/mach-tango/smc.h
deleted file mode 100644
index 455ce3e06daf..000000000000
--- a/arch/arm/mach-tango/smc.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-extern int tango_smc(unsigned int val, unsigned int service);
-
-#define tango_set_l2_control(val) tango_smc(val, 0x102)
-#define tango_start_aux_core(val) tango_smc(val, 0x104)
-#define tango_set_aux_boot_addr(val) tango_smc(val, 0x105)
-#define tango_suspend(val) tango_smc(val, 0x120)
-#define tango_aux_core_die(val) tango_smc(val, 0x121)
-#define tango_aux_core_kill(val) tango_smc(val, 0x122)
diff --git a/arch/arm/mach-tegra/sleep-tegra20.S b/arch/arm/mach-tegra/sleep-tegra20.S
index 0e00ba8cf646..a5a36cce142a 100644
--- a/arch/arm/mach-tegra/sleep-tegra20.S
+++ b/arch/arm/mach-tegra/sleep-tegra20.S
@@ -43,11 +43,34 @@
#define APB_MISC_XM2CFGCPADCTRL2 0x8e4
#define APB_MISC_XM2CFGDPADCTRL2 0x8e8
-.macro pll_enable, rd, r_car_base, pll_base
+#define PLLC_STORE_MASK (1 << 0)
+#define PLLM_STORE_MASK (1 << 1)
+#define PLLP_STORE_MASK (1 << 2)
+
+.macro test_pll_state, rd, test_mask
+ ldr \rd, tegra_pll_state
+ tst \rd, #\test_mask
+.endm
+
+.macro store_pll_state, rd, tmp, r_car_base, pll_base, pll_mask
+ ldr \rd, [\r_car_base, #\pll_base]
+ tst \rd, #(1 << 30)
+ ldr \rd, tegra_pll_state
+ biceq \rd, \rd, #\pll_mask
+ orrne \rd, \rd, #\pll_mask
+ adr \tmp, tegra_pll_state
+ str \rd, [\tmp]
+.endm
+
+.macro pll_enable, rd, r_car_base, pll_base, test_mask
+ test_pll_state \rd, \test_mask
+ beq 1f
+
ldr \rd, [\r_car_base, #\pll_base]
tst \rd, #(1 << 30)
orreq \rd, \rd, #(1 << 30)
streq \rd, [\r_car_base, #\pll_base]
+1:
.endm
.macro emc_device_mask, rd, base
@@ -177,9 +200,9 @@ ENTRY(tegra20_lp1_reset)
str r1, [r0, #CLK_RESET_CCLK_DIVIDER]
str r1, [r0, #CLK_RESET_SCLK_DIVIDER]
- pll_enable r1, r0, CLK_RESET_PLLM_BASE
- pll_enable r1, r0, CLK_RESET_PLLP_BASE
- pll_enable r1, r0, CLK_RESET_PLLC_BASE
+ pll_enable r1, r0, CLK_RESET_PLLM_BASE, PLLM_STORE_MASK
+ pll_enable r1, r0, CLK_RESET_PLLP_BASE, PLLP_STORE_MASK
+ pll_enable r1, r0, CLK_RESET_PLLC_BASE, PLLC_STORE_MASK
adr r2, tegra20_sdram_pad_address
adr r4, tegra20_sdram_pad_save
@@ -270,6 +293,10 @@ tegra20_switch_cpu_to_clk32k:
add r1, r1, #2
wait_until r1, r7, r9
+ store_pll_state r0, r1, r5, CLK_RESET_PLLC_BASE, PLLC_STORE_MASK
+ store_pll_state r0, r1, r5, CLK_RESET_PLLM_BASE, PLLM_STORE_MASK
+ store_pll_state r0, r1, r5, CLK_RESET_PLLP_BASE, PLLP_STORE_MASK
+
/* disable PLLM, PLLP and PLLC */
ldr r0, [r5, #CLK_RESET_PLLM_BASE]
bic r0, r0, #(1 << 30)
@@ -396,6 +423,9 @@ tegra20_sdram_pad_save:
.long 0
.endr
+tegra_pll_state:
+ .word 0x0
+
.ltorg
/* dummy symbol for end of IRAM */
.align L1_CACHE_SHIFT
diff --git a/arch/arm/mach-tegra/sleep-tegra30.S b/arch/arm/mach-tegra/sleep-tegra30.S
index 2667bcdb5dc6..0cc40b6b2ba3 100644
--- a/arch/arm/mach-tegra/sleep-tegra30.S
+++ b/arch/arm/mach-tegra/sleep-tegra30.S
@@ -71,6 +71,13 @@
#define TEGRA30_POWER_HOTPLUG_SHUTDOWN (1 << 27) /* Hotplug shutdown */
+#define PLLA_STORE_MASK (1 << 0)
+#define PLLC_STORE_MASK (1 << 1)
+#define PLLM_STORE_MASK (1 << 2)
+#define PLLP_STORE_MASK (1 << 3)
+#define PLLX_STORE_MASK (1 << 4)
+#define PLLM_PMC_STORE_MASK (1 << 5)
+
.macro emc_device_mask, rd, base
ldr \rd, [\base, #EMC_ADR_CFG]
tst \rd, #0x1
@@ -87,7 +94,43 @@
bne 1001b
.endm
-.macro pll_enable, rd, r_car_base, pll_base, pll_misc
+.macro test_pll_state, rd, test_mask
+ ldr \rd, tegra_pll_state
+ tst \rd, #\test_mask
+.endm
+
+.macro store_pll_state, rd, tmp, r_car_base, pll_base, pll_mask
+ ldr \rd, [\r_car_base, #\pll_base]
+ tst \rd, #(1 << 30)
+ ldr \rd, tegra_pll_state
+ biceq \rd, \rd, #\pll_mask
+ orrne \rd, \rd, #\pll_mask
+ adr \tmp, tegra_pll_state
+ str \rd, [\tmp]
+.endm
+
+.macro store_pllm_pmc_state, rd, tmp, pmc_base
+ ldr \rd, [\pmc_base, #PMC_PLLP_WB0_OVERRIDE]
+ tst \rd, #(1 << 12)
+ ldr \rd, tegra_pll_state
+ biceq \rd, \rd, #PLLM_PMC_STORE_MASK
+ orrne \rd, \rd, #PLLM_PMC_STORE_MASK
+ adr \tmp, tegra_pll_state
+ str \rd, [\tmp]
+.endm
+
+.macro pllm_pmc_enable, rd, pmc_base
+ test_pll_state \rd, PLLM_PMC_STORE_MASK
+
+ ldrne \rd, [\pmc_base, #PMC_PLLP_WB0_OVERRIDE]
+ orrne \rd, \rd, #(1 << 12)
+ strne \rd, [\pmc_base, #PMC_PLLP_WB0_OVERRIDE]
+.endm
+
+.macro pll_enable, rd, r_car_base, pll_base, pll_misc, test_mask
+ test_pll_state \rd, \test_mask
+ beq 1f
+
ldr \rd, [\r_car_base, #\pll_base]
tst \rd, #(1 << 30)
orreq \rd, \rd, #(1 << 30)
@@ -102,13 +145,17 @@
orr \rd, \rd, #(1 << 18)
str \rd, [\r_car_base, #\pll_misc]
.endif
+1:
.endm
-.macro pll_locked, rd, r_car_base, pll_base
+.macro pll_locked, rd, r_car_base, pll_base, test_mask
+ test_pll_state \rd, \test_mask
+ beq 2f
1:
ldr \rd, [\r_car_base, #\pll_base]
tst \rd, #(1 << 27)
beq 1b
+2:
.endm
.macro pll_iddq_exit, rd, car, iddq, iddq_bit
@@ -342,34 +389,30 @@ ENTRY(tegra30_lp1_reset)
/* enable PLLM via PMC */
mov32 r2, TEGRA_PMC_BASE
- ldr r1, [r2, #PMC_PLLP_WB0_OVERRIDE]
- orr r1, r1, #(1 << 12)
- str r1, [r2, #PMC_PLLP_WB0_OVERRIDE]
+ pllm_pmc_enable r1, r2
- pll_enable r1, r0, CLK_RESET_PLLM_BASE, 0
- pll_enable r1, r0, CLK_RESET_PLLC_BASE, 0
- pll_enable r1, r0, CLK_RESET_PLLX_BASE, 0
+ pll_enable r1, r0, CLK_RESET_PLLM_BASE, 0, PLLM_STORE_MASK
+ pll_enable r1, r0, CLK_RESET_PLLC_BASE, 0, PLLC_STORE_MASK
+ pll_enable r1, r0, CLK_RESET_PLLX_BASE, 0, PLLX_STORE_MASK
b _pll_m_c_x_done
_no_pll_iddq_exit:
/* enable PLLM via PMC */
mov32 r2, TEGRA_PMC_BASE
- ldr r1, [r2, #PMC_PLLP_WB0_OVERRIDE]
- orr r1, r1, #(1 << 12)
- str r1, [r2, #PMC_PLLP_WB0_OVERRIDE]
+ pllm_pmc_enable r1, r2
- pll_enable r1, r0, CLK_RESET_PLLM_BASE, CLK_RESET_PLLM_MISC
- pll_enable r1, r0, CLK_RESET_PLLC_BASE, CLK_RESET_PLLC_MISC
+ pll_enable r1, r0, CLK_RESET_PLLM_BASE, CLK_RESET_PLLM_MISC, PLLM_STORE_MASK
+ pll_enable r1, r0, CLK_RESET_PLLC_BASE, CLK_RESET_PLLC_MISC, PLLC_STORE_MASK
_pll_m_c_x_done:
- pll_enable r1, r0, CLK_RESET_PLLP_BASE, CLK_RESET_PLLP_MISC
- pll_enable r1, r0, CLK_RESET_PLLA_BASE, CLK_RESET_PLLA_MISC
+ pll_enable r1, r0, CLK_RESET_PLLP_BASE, CLK_RESET_PLLP_MISC, PLLP_STORE_MASK
+ pll_enable r1, r0, CLK_RESET_PLLA_BASE, CLK_RESET_PLLA_MISC, PLLA_STORE_MASK
- pll_locked r1, r0, CLK_RESET_PLLM_BASE
- pll_locked r1, r0, CLK_RESET_PLLP_BASE
- pll_locked r1, r0, CLK_RESET_PLLA_BASE
- pll_locked r1, r0, CLK_RESET_PLLC_BASE
+ pll_locked r1, r0, CLK_RESET_PLLM_BASE, PLLM_STORE_MASK
+ pll_locked r1, r0, CLK_RESET_PLLP_BASE, PLLP_STORE_MASK
+ pll_locked r1, r0, CLK_RESET_PLLA_BASE, PLLA_STORE_MASK
+ pll_locked r1, r0, CLK_RESET_PLLC_BASE, PLLC_STORE_MASK
/*
* CPUFreq driver could select other PLL for CPU. PLLX will be
@@ -380,7 +423,7 @@ _pll_m_c_x_done:
cmp r1, #TEGRA30
beq 1f
- pll_locked r1, r0, CLK_RESET_PLLX_BASE
+ pll_locked r1, r0, CLK_RESET_PLLX_BASE, PLLX_STORE_MASK
ldr r1, [r0, #CLK_RESET_PLLP_BASE]
bic r1, r1, #(1<<31) @ disable PllP bypass
@@ -593,6 +636,9 @@ tegra_sdram_pad_save:
.long 0
.endr
+tegra_pll_state:
+ .word 0x0
+
/*
* tegra30_tear_down_core
*
@@ -641,6 +687,14 @@ tegra30_switch_cpu_to_clk32k:
add r1, r1, #2
wait_until r1, r7, r9
+ /* store enable-state of PLLs */
+ store_pll_state r0, r1, r5, CLK_RESET_PLLA_BASE, PLLA_STORE_MASK
+ store_pll_state r0, r1, r5, CLK_RESET_PLLC_BASE, PLLC_STORE_MASK
+ store_pll_state r0, r1, r5, CLK_RESET_PLLM_BASE, PLLM_STORE_MASK
+ store_pll_state r0, r1, r5, CLK_RESET_PLLP_BASE, PLLP_STORE_MASK
+ store_pll_state r0, r1, r5, CLK_RESET_PLLX_BASE, PLLX_STORE_MASK
+ store_pllm_pmc_state r0, r1, r4
+
/* disable PLLM via PMC in LP1 */
ldr r0, [r4, #PMC_PLLP_WB0_OVERRIDE]
bic r0, r0, #(1 << 12)
diff --git a/arch/arm/mach-u300/Kconfig b/arch/arm/mach-u300/Kconfig
deleted file mode 100644
index c3c8bf54f033..000000000000
--- a/arch/arm/mach-u300/Kconfig
+++ /dev/null
@@ -1,32 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-menuconfig ARCH_U300
- bool "ST-Ericsson U300 Series"
- depends on ARCH_MULTI_V5 && MMU
- select ARM_AMBA
- select ARM_VIC
- select U300_TIMER
- select CPU_ARM926T
- select GPIOLIB
- select HAVE_TCM
- select PINCTRL
- select PINCTRL_COH901
- select PINCTRL_U300
- select MFD_SYSCON
- help
- Support for ST-Ericsson U300 series mobile platforms.
-
-if ARCH_U300
-
-config MACH_U300
- depends on ARCH_U300
- bool "U300"
- default y
-
-config U300_DEBUG
- depends on ARCH_U300
- bool "Debug support for U300"
- depends on PM
- help
- Debug support for U300 in sysfs, procfs etc.
-
-endif
diff --git a/arch/arm/mach-u300/Makefile b/arch/arm/mach-u300/Makefile
deleted file mode 100644
index 67f71ae45dfc..000000000000
--- a/arch/arm/mach-u300/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Makefile for the linux kernel, U300 machine.
-#
-
-obj-y := core.o
-
-obj-$(CONFIG_REGULATOR_AB3100) += regulator.o
diff --git a/arch/arm/mach-u300/core.c b/arch/arm/mach-u300/core.c
deleted file mode 100644
index a1694d977ec9..000000000000
--- a/arch/arm/mach-u300/core.c
+++ /dev/null
@@ -1,413 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- *
- * arch/arm/mach-u300/core.c
- *
- * Copyright (C) 2007-2012 ST-Ericsson SA
- * Core platform support, IRQ handling and device definitions.
- * Author: Linus Walleij <linus.walleij@stericsson.com>
- */
-#include <linux/kernel.h>
-#include <linux/pinctrl/machine.h>
-#include <linux/pinctrl/pinconf-generic.h>
-#include <linux/platform_data/clk-u300.h>
-#include <linux/irqchip.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
-#include <linux/clocksource.h>
-#include <linux/clk.h>
-
-#include <asm/mach/map.h>
-#include <asm/mach/arch.h>
-
-/*
- * These are the large blocks of memory allocated for I/O.
- * the defines are used for setting up the I/O memory mapping.
- */
-
-/* NAND Flash CS0 */
-#define U300_NAND_CS0_PHYS_BASE 0x80000000
-/* NFIF */
-#define U300_NAND_IF_PHYS_BASE 0x9f800000
-/* ALE, CLE offset for FSMC NAND */
-#define PLAT_NAND_CLE (1 << 16)
-#define PLAT_NAND_ALE (1 << 17)
-/* AHB Peripherals */
-#define U300_AHB_PER_PHYS_BASE 0xa0000000
-#define U300_AHB_PER_VIRT_BASE 0xff010000
-/* FAST Peripherals */
-#define U300_FAST_PER_PHYS_BASE 0xc0000000
-#define U300_FAST_PER_VIRT_BASE 0xff020000
-/* SLOW Peripherals */
-#define U300_SLOW_PER_PHYS_BASE 0xc0010000
-#define U300_SLOW_PER_VIRT_BASE 0xff000000
-/* Boot ROM */
-#define U300_BOOTROM_PHYS_BASE 0xffff0000
-#define U300_BOOTROM_VIRT_BASE 0xffff0000
-/* SEMI config base */
-#define U300_SEMI_CONFIG_BASE 0x2FFE0000
-
-/*
- * AHB peripherals
- */
-
-/* AHB Peripherals Bridge Controller */
-#define U300_AHB_BRIDGE_BASE (U300_AHB_PER_PHYS_BASE+0x0000)
-/* Vectored Interrupt Controller 0, servicing 32 interrupts */
-#define U300_INTCON0_BASE (U300_AHB_PER_PHYS_BASE+0x1000)
-#define U300_INTCON0_VBASE IOMEM(U300_AHB_PER_VIRT_BASE+0x1000)
-/* Vectored Interrupt Controller 1, servicing 32 interrupts */
-#define U300_INTCON1_BASE (U300_AHB_PER_PHYS_BASE+0x2000)
-#define U300_INTCON1_VBASE IOMEM(U300_AHB_PER_VIRT_BASE+0x2000)
-/* Memory Stick Pro (MSPRO) controller */
-#define U300_MSPRO_BASE (U300_AHB_PER_PHYS_BASE+0x3000)
-/* EMIF Configuration Area */
-#define U300_EMIF_CFG_BASE (U300_AHB_PER_PHYS_BASE+0x4000)
-
-/*
- * FAST peripherals
- */
-
-/* FAST bridge control */
-#define U300_FAST_BRIDGE_BASE (U300_FAST_PER_PHYS_BASE+0x0000)
-/* MMC/SD controller */
-#define U300_MMCSD_BASE (U300_FAST_PER_PHYS_BASE+0x1000)
-/* PCM I2S0 controller */
-#define U300_PCM_I2S0_BASE (U300_FAST_PER_PHYS_BASE+0x2000)
-/* PCM I2S1 controller */
-#define U300_PCM_I2S1_BASE (U300_FAST_PER_PHYS_BASE+0x3000)
-/* I2C0 controller */
-#define U300_I2C0_BASE (U300_FAST_PER_PHYS_BASE+0x4000)
-/* I2C1 controller */
-#define U300_I2C1_BASE (U300_FAST_PER_PHYS_BASE+0x5000)
-/* SPI controller */
-#define U300_SPI_BASE (U300_FAST_PER_PHYS_BASE+0x6000)
-/* Fast UART1 on U335 only */
-#define U300_UART1_BASE (U300_FAST_PER_PHYS_BASE+0x7000)
-
-/*
- * SLOW peripherals
- */
-
-/* SLOW bridge control */
-#define U300_SLOW_BRIDGE_BASE (U300_SLOW_PER_PHYS_BASE)
-/* SYSCON */
-#define U300_SYSCON_BASE (U300_SLOW_PER_PHYS_BASE+0x1000)
-#define U300_SYSCON_VBASE IOMEM(U300_SLOW_PER_VIRT_BASE+0x1000)
-/* Watchdog */
-#define U300_WDOG_BASE (U300_SLOW_PER_PHYS_BASE+0x2000)
-/* UART0 */
-#define U300_UART0_BASE (U300_SLOW_PER_PHYS_BASE+0x3000)
-/* APP side special timer */
-#define U300_TIMER_APP_BASE (U300_SLOW_PER_PHYS_BASE+0x4000)
-#define U300_TIMER_APP_VBASE IOMEM(U300_SLOW_PER_VIRT_BASE+0x4000)
-/* Keypad */
-#define U300_KEYPAD_BASE (U300_SLOW_PER_PHYS_BASE+0x5000)
-/* GPIO */
-#define U300_GPIO_BASE (U300_SLOW_PER_PHYS_BASE+0x6000)
-/* RTC */
-#define U300_RTC_BASE (U300_SLOW_PER_PHYS_BASE+0x7000)
-/* Bus tracer */
-#define U300_BUSTR_BASE (U300_SLOW_PER_PHYS_BASE+0x8000)
-/* Event handler (hardware queue) */
-#define U300_EVHIST_BASE (U300_SLOW_PER_PHYS_BASE+0x9000)
-/* Genric Timer */
-#define U300_TIMER_BASE (U300_SLOW_PER_PHYS_BASE+0xa000)
-/* PPM */
-#define U300_PPM_BASE (U300_SLOW_PER_PHYS_BASE+0xb000)
-
-/*
- * REST peripherals
- */
-
-/* ISP (image signal processor) */
-#define U300_ISP_BASE (0xA0008000)
-/* DMA Controller base */
-#define U300_DMAC_BASE (0xC0020000)
-/* MSL Base */
-#define U300_MSL_BASE (0xc0022000)
-/* APEX Base */
-#define U300_APEX_BASE (0xc0030000)
-/* Video Encoder Base */
-#define U300_VIDEOENC_BASE (0xc0080000)
-/* XGAM Base */
-#define U300_XGAM_BASE (0xd0000000)
-
-/*
- * SYSCON addresses applicable to the core machine.
- */
-
-/* Chip ID register 16bit (R/-) */
-#define U300_SYSCON_CIDR (0x400)
-/* SMCR */
-#define U300_SYSCON_SMCR (0x4d0)
-#define U300_SYSCON_SMCR_FIELD_MASK (0x000e)
-#define U300_SYSCON_SMCR_SEMI_SREFACK_IND (0x0008)
-#define U300_SYSCON_SMCR_SEMI_SREFREQ_ENABLE (0x0004)
-#define U300_SYSCON_SMCR_SEMI_EXT_BOOT_MODE_ENABLE (0x0002)
-/* CPU_SW_DBGEN Software Debug Enable 16bit (R/W) */
-#define U300_SYSCON_CSDR (0x4f0)
-#define U300_SYSCON_CSDR_SW_DEBUG_ENABLE (0x0001)
-/* PRINT_CONTROL Print Control 16bit (R/-) */
-#define U300_SYSCON_PCR (0x4f8)
-#define U300_SYSCON_PCR_SERV_IND (0x0001)
-/* BOOT_CONTROL 16bit (R/-) */
-#define U300_SYSCON_BCR (0x4fc)
-#define U300_SYSCON_BCR_ACC_CPU_SUBSYS_VINITHI_IND (0x0400)
-#define U300_SYSCON_BCR_APP_CPU_SUBSYS_VINITHI_IND (0x0200)
-#define U300_SYSCON_BCR_EXTRA_BOOT_OPTION_MASK (0x01FC)
-#define U300_SYSCON_BCR_APP_BOOT_SERV_MASK (0x0003)
-
-static void __iomem *syscon_base;
-
-/*
- * Static I/O mappings that are needed for booting the U300 platforms. The
- * only things we need are the areas where we find the timer, syscon and
- * intcon, since the remaining device drivers will map their own memory
- * physical to virtual as the need arise.
- */
-static struct map_desc u300_io_desc[] __initdata = {
- {
- .virtual = U300_SLOW_PER_VIRT_BASE,
- .pfn = __phys_to_pfn(U300_SLOW_PER_PHYS_BASE),
- .length = SZ_64K,
- .type = MT_DEVICE,
- },
- {
- .virtual = U300_AHB_PER_VIRT_BASE,
- .pfn = __phys_to_pfn(U300_AHB_PER_PHYS_BASE),
- .length = SZ_32K,
- .type = MT_DEVICE,
- },
- {
- .virtual = U300_FAST_PER_VIRT_BASE,
- .pfn = __phys_to_pfn(U300_FAST_PER_PHYS_BASE),
- .length = SZ_32K,
- .type = MT_DEVICE,
- },
-};
-
-static void __init u300_map_io(void)
-{
- iotable_init(u300_io_desc, ARRAY_SIZE(u300_io_desc));
-}
-
-static unsigned long pin_pullup_conf[] = {
- PIN_CONF_PACKED(PIN_CONFIG_BIAS_PULL_UP, 1),
-};
-
-static unsigned long pin_highz_conf[] = {
- PIN_CONF_PACKED(PIN_CONFIG_BIAS_HIGH_IMPEDANCE, 0),
-};
-
-/* Pin control settings */
-static const struct pinctrl_map u300_pinmux_map[] = {
- /* anonymous maps for chip power and EMIFs */
- PIN_MAP_MUX_GROUP_HOG_DEFAULT("pinctrl-u300", NULL, "power"),
- PIN_MAP_MUX_GROUP_HOG_DEFAULT("pinctrl-u300", NULL, "emif0"),
- PIN_MAP_MUX_GROUP_HOG_DEFAULT("pinctrl-u300", NULL, "emif1"),
- /* per-device maps for MMC/SD, SPI and UART */
- PIN_MAP_MUX_GROUP_DEFAULT("mmci", "pinctrl-u300", NULL, "mmc0"),
- PIN_MAP_MUX_GROUP_DEFAULT("pl022", "pinctrl-u300", NULL, "spi0"),
- PIN_MAP_MUX_GROUP_DEFAULT("uart0", "pinctrl-u300", NULL, "uart0"),
- /* This pin is used for clock return rather than GPIO */
- PIN_MAP_CONFIGS_PIN_DEFAULT("mmci", "pinctrl-u300", "PIO APP GPIO 11",
- pin_pullup_conf),
- /* This pin is used for card detect */
- PIN_MAP_CONFIGS_PIN_DEFAULT("mmci", "pinctrl-u300", "PIO MS INS",
- pin_highz_conf),
-};
-
-struct db_chip {
- u16 chipid;
- const char *name;
-};
-
-/*
- * This is a list of the Digital Baseband chips used in the U300 platform.
- */
-static struct db_chip db_chips[] __initdata = {
- {
- .chipid = 0xb800,
- .name = "DB3000",
- },
- {
- .chipid = 0xc000,
- .name = "DB3100",
- },
- {
- .chipid = 0xc800,
- .name = "DB3150",
- },
- {
- .chipid = 0xd800,
- .name = "DB3200",
- },
- {
- .chipid = 0xe000,
- .name = "DB3250",
- },
- {
- .chipid = 0xe800,
- .name = "DB3210",
- },
- {
- .chipid = 0xf000,
- .name = "DB3350 P1x",
- },
- {
- .chipid = 0xf100,
- .name = "DB3350 P2x",
- },
- {
- .chipid = 0x0000, /* List terminator */
- .name = NULL,
- }
-};
-
-static void __init u300_init_check_chip(void)
-{
-
- u16 val;
- struct db_chip *chip;
- const char *chipname;
- const char unknown[] = "UNKNOWN";
-
- /* Read out and print chip ID */
- val = readw(syscon_base + U300_SYSCON_CIDR);
- /* This is in funky bigendian order... */
- val = (val & 0xFFU) << 8 | (val >> 8);
- chip = db_chips;
- chipname = unknown;
-
- for ( ; chip->chipid; chip++) {
- if (chip->chipid == (val & 0xFF00U)) {
- chipname = chip->name;
- break;
- }
- }
- printk(KERN_INFO "Initializing U300 system on %s baseband chip " \
- "(chip ID 0x%04x)\n", chipname, val);
-
- if ((val & 0xFF00U) != 0xf000 && (val & 0xFF00U) != 0xf100) {
- printk(KERN_ERR "Platform configured for BS335 " \
- " with DB3350 but %s detected, expect problems!",
- chipname);
- }
-}
-
-/* Forward declare this function from the watchdog */
-void coh901327_watchdog_reset(void);
-
-static void u300_restart(enum reboot_mode mode, const char *cmd)
-{
- switch (mode) {
- case REBOOT_SOFT:
- case REBOOT_HARD:
-#ifdef CONFIG_COH901327_WATCHDOG
- coh901327_watchdog_reset();
-#endif
- break;
- default:
- /* Do nothing */
- break;
- }
- /* Wait for system do die/reset. */
- while (1);
-}
-
-/* These are mostly to get the right device names for the clock lookups */
-static struct of_dev_auxdata u300_auxdata_lookup[] __initdata = {
- OF_DEV_AUXDATA("stericsson,pinctrl-u300", U300_SYSCON_BASE,
- "pinctrl-u300", NULL),
- OF_DEV_AUXDATA("stericsson,gpio-coh901", U300_GPIO_BASE,
- "u300-gpio", NULL),
- OF_DEV_AUXDATA("stericsson,coh901327", U300_WDOG_BASE,
- "coh901327_wdog", NULL),
- OF_DEV_AUXDATA("stericsson,coh901331", U300_RTC_BASE,
- "rtc-coh901331", NULL),
- OF_DEV_AUXDATA("stericsson,coh901318", U300_DMAC_BASE,
- "coh901318", NULL),
- OF_DEV_AUXDATA("stericsson,fsmc-nand", U300_NAND_IF_PHYS_BASE,
- "fsmc-nand", NULL),
- OF_DEV_AUXDATA("arm,primecell", U300_UART0_BASE,
- "uart0", NULL),
- OF_DEV_AUXDATA("arm,primecell", U300_UART1_BASE,
- "uart1", NULL),
- OF_DEV_AUXDATA("arm,primecell", U300_SPI_BASE,
- "pl022", NULL),
- OF_DEV_AUXDATA("st,ddci2c", U300_I2C0_BASE,
- "stu300.0", NULL),
- OF_DEV_AUXDATA("st,ddci2c", U300_I2C1_BASE,
- "stu300.1", NULL),
- OF_DEV_AUXDATA("arm,primecell", U300_MMCSD_BASE,
- "mmci", NULL),
- { /* sentinel */ },
-};
-
-static void __init u300_init_irq_dt(void)
-{
- struct device_node *syscon;
- struct clk *clk;
-
- syscon = of_find_node_by_path("/syscon@c0011000");
- if (!syscon) {
- pr_crit("could not find syscon node\n");
- return;
- }
- syscon_base = of_iomap(syscon, 0);
- if (!syscon_base) {
- pr_crit("could not remap syscon\n");
- return;
- }
- /* initialize clocking early, we want to clock the INTCON */
- u300_clk_init(syscon_base);
-
- /* Bootstrap EMIF and SEMI clocks */
- clk = clk_get_sys("pl172", NULL);
- BUG_ON(IS_ERR(clk));
- clk_prepare_enable(clk);
- clk = clk_get_sys("semi", NULL);
- BUG_ON(IS_ERR(clk));
- clk_prepare_enable(clk);
-
- /* Clock the interrupt controller */
- clk = clk_get_sys("intcon", NULL);
- BUG_ON(IS_ERR(clk));
- clk_prepare_enable(clk);
-
- irqchip_init();
-}
-
-static void __init u300_init_machine_dt(void)
-{
- u16 val;
-
- /* Check what platform we run and print some status information */
- u300_init_check_chip();
-
- /* Initialize pinmuxing */
- pinctrl_register_mappings(u300_pinmux_map,
- ARRAY_SIZE(u300_pinmux_map));
-
- of_platform_default_populate(NULL, u300_auxdata_lookup, NULL);
-
- /* Enable SEMI self refresh */
- val = readw(syscon_base + U300_SYSCON_SMCR) |
- U300_SYSCON_SMCR_SEMI_SREFREQ_ENABLE;
- writew(val, syscon_base + U300_SYSCON_SMCR);
-}
-
-static const char * u300_board_compat[] = {
- "stericsson,u300",
- NULL,
-};
-
-DT_MACHINE_START(U300_DT, "U300 S335/B335 (Device Tree)")
- .map_io = u300_map_io,
- .init_irq = u300_init_irq_dt,
- .init_time = timer_probe,
- .init_machine = u300_init_machine_dt,
- .restart = u300_restart,
- .dt_compat = u300_board_compat,
-MACHINE_END
diff --git a/arch/arm/mach-u300/regulator.c b/arch/arm/mach-u300/regulator.c
deleted file mode 100644
index c0cc1d82e1b9..000000000000
--- a/arch/arm/mach-u300/regulator.c
+++ /dev/null
@@ -1,134 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * arch/arm/mach-u300/regulator.c
- *
- * Copyright (C) 2009 ST-Ericsson AB
- * Handle board-bound regulators and board power not related
- * to any devices.
- * Author: Linus Walleij <linus.walleij@stericsson.com>
- */
-#include <linux/device.h>
-#include <linux/signal.h>
-#include <linux/err.h>
-#include <linux/of.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/regulator/machine.h>
-#include <linux/regulator/consumer.h>
-#include <linux/mfd/syscon.h>
-#include <linux/regmap.h>
-
-/* Power Management Control 16bit (R/W) */
-#define U300_SYSCON_PMCR (0x50)
-#define U300_SYSCON_PMCR_DCON_ENABLE (0x0002)
-#define U300_SYSCON_PMCR_PWR_MGNT_ENABLE (0x0001)
-
-/*
- * Regulators that power the board and chip and which are
- * not copuled to specific drivers are hogged in these
- * instances.
- */
-static struct regulator *main_power_15;
-
-/*
- * This function is used from pm.h to shut down the system by
- * resetting all regulators in turn and then disable regulator
- * LDO D (main power).
- */
-void u300_pm_poweroff(void)
-{
- sigset_t old, all;
-
- sigfillset(&all);
- if (!sigprocmask(SIG_BLOCK, &all, &old)) {
- /* Disable LDO D to shut down the system */
- if (main_power_15)
- regulator_disable(main_power_15);
- else
- pr_err("regulator not available to shut down system\n");
- (void) sigprocmask(SIG_SETMASK, &old, NULL);
- }
- return;
-}
-
-/*
- * Hog the regulators needed to power up the board.
- */
-static int __init __u300_init_boardpower(struct platform_device *pdev)
-{
- struct device_node *np = pdev->dev.of_node;
- struct device_node *syscon_np;
- struct regmap *regmap;
- int err;
-
- pr_info("U300: setting up board power\n");
-
- syscon_np = of_parse_phandle(np, "syscon", 0);
- if (!syscon_np) {
- pr_crit("U300: no syscon node\n");
- return -ENODEV;
- }
- regmap = syscon_node_to_regmap(syscon_np);
- if (IS_ERR(regmap)) {
- pr_crit("U300: could not locate syscon regmap\n");
- return PTR_ERR(regmap);
- }
-
- main_power_15 = regulator_get(&pdev->dev, "vana15");
-
- if (IS_ERR(main_power_15)) {
- pr_err("could not get vana15");
- return PTR_ERR(main_power_15);
- }
- err = regulator_enable(main_power_15);
- if (err) {
- pr_err("could not enable vana15\n");
- return err;
- }
-
- /*
- * On U300 a special system controller register pulls up the DC
- * until the vana15 (LDO D) regulator comes up. At this point, all
- * regulators are set and we do not need power control via
- * DC ON anymore. This function will likely be moved whenever
- * the rest of the U300 power management is implemented.
- */
- pr_info("U300: disable system controller pull-up\n");
- regmap_update_bits(regmap, U300_SYSCON_PMCR,
- U300_SYSCON_PMCR_DCON_ENABLE, 0);
-
- /* Register globally exported PM poweroff hook */
- pm_power_off = u300_pm_poweroff;
-
- return 0;
-}
-
-static int __init s365_board_probe(struct platform_device *pdev)
-{
- return __u300_init_boardpower(pdev);
-}
-
-static const struct of_device_id s365_board_match[] = {
- { .compatible = "stericsson,s365" },
- {},
-};
-
-static struct platform_driver s365_board_driver = {
- .driver = {
- .name = "s365-board",
- .of_match_table = s365_board_match,
- },
-};
-
-/*
- * So at module init time we hog the regulator!
- */
-static int __init u300_init_boardpower(void)
-{
- return platform_driver_probe(&s365_board_driver,
- s365_board_probe);
-}
-
-device_initcall(u300_init_boardpower);
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Linus Walleij");
diff --git a/arch/arm/mach-zx/Kconfig b/arch/arm/mach-zx/Kconfig
deleted file mode 100644
index ea29c84a7849..000000000000
--- a/arch/arm/mach-zx/Kconfig
+++ /dev/null
@@ -1,21 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-menuconfig ARCH_ZX
- bool "ZTE ZX family"
- depends on ARCH_MULTI_V7
- help
- Support for ZTE ZX-based family of processors. TV
- set-top-box processor is supported. More will be
- added soon.
-
-if ARCH_ZX
-
-config SOC_ZX296702
- def_bool y
- select ARM_GIC
- select ARM_GLOBAL_TIMER
- select HAVE_ARM_SCU if SMP
- select HAVE_ARM_TWD if SMP
- select PM_GENERIC_DOMAINS if PM
- help
- Support for ZTE ZX296702 SoC which is a dual core CortexA9MP
-endif
diff --git a/arch/arm/mach-zx/Makefile b/arch/arm/mach-zx/Makefile
deleted file mode 100644
index 6f8930cdb8fb..000000000000
--- a/arch/arm/mach-zx/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_SOC_ZX296702) += zx296702.o zx296702-pm-domain.o
-obj-$(CONFIG_SMP) += headsmp.o platsmp.o
diff --git a/arch/arm/mach-zx/core.h b/arch/arm/mach-zx/core.h
deleted file mode 100644
index 25fe873892c9..000000000000
--- a/arch/arm/mach-zx/core.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright 2014 Linaro Ltd.
- * Copyright (C) 2014 ZTE Corporation.
- */
-
-#ifndef __MACH_ZX_CORE_H
-#define __MACH_ZX_CORE_H
-
-extern void zx_resume_jump(void);
-extern size_t zx_suspend_iram_sz;
-extern unsigned long zx_secondary_startup_pa;
-
-void zx_secondary_startup(void);
-
-#endif /* __MACH_ZX_CORE_H */
diff --git a/arch/arm/mach-zx/headsmp.S b/arch/arm/mach-zx/headsmp.S
deleted file mode 100644
index 0846859b0573..000000000000
--- a/arch/arm/mach-zx/headsmp.S
+++ /dev/null
@@ -1,30 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright 2014 Linaro Ltd.
- * Copyright (C) 2014 ZTE Corporation.
- */
-
-#include <linux/linkage.h>
-
- .align 3
- .arm
-
-/* It runs from physical address */
-ENTRY(zx_resume_jump)
- adr r1, zx_secondary_startup_pa
- ldr r0, [r1]
- bx r0
-ENDPROC(zx_resume_jump)
-
-ENTRY(zx_secondary_startup_pa)
- .word zx_secondary_startup_pa
-
-ENTRY(zx_suspend_iram_sz)
- .word . - zx_resume_jump
-ENDPROC(zx_secondary_startup_pa)
-
-
-ENTRY(zx_secondary_startup)
- bl v7_invalidate_l1
- b secondary_startup
-ENDPROC(zx_secondary_startup)
diff --git a/arch/arm/mach-zx/platsmp.c b/arch/arm/mach-zx/platsmp.c
deleted file mode 100644
index d4e1d3792224..000000000000
--- a/arch/arm/mach-zx/platsmp.c
+++ /dev/null
@@ -1,186 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright 2014 Linaro Ltd.
- * Copyright (C) 2014 ZTE Corporation.
- */
-
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/jiffies.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/smp.h>
-
-#include <asm/cacheflush.h>
-#include <asm/cp15.h>
-#include <asm/fncpy.h>
-#include <asm/proc-fns.h>
-#include <asm/smp_scu.h>
-#include <asm/smp_plat.h>
-
-#include "core.h"
-
-#define AON_SYS_CTRL_RESERVED1 0xa8
-
-#define BUS_MATRIX_REMAP_CONFIG 0x00
-
-#define PCU_CPU0_CTRL 0x00
-#define PCU_CPU1_CTRL 0x04
-#define PCU_CPU1_ST 0x0c
-#define PCU_GLOBAL_CTRL 0x14
-#define PCU_EXPEND_CONTROL 0x34
-
-#define ZX_IRAM_BASE 0x00200000
-
-static void __iomem *pcu_base;
-static void __iomem *matrix_base;
-static void __iomem *scu_base;
-
-void __init zx_smp_prepare_cpus(unsigned int max_cpus)
-{
- struct device_node *np;
- unsigned long base = 0;
- void __iomem *aonsysctrl_base;
- void __iomem *sys_iram;
-
- base = scu_a9_get_base();
- scu_base = ioremap(base, SZ_256);
- if (!scu_base) {
- pr_err("%s: failed to map scu\n", __func__);
- return;
- }
-
- scu_enable(scu_base);
-
- np = of_find_compatible_node(NULL, NULL, "zte,sysctrl");
- if (!np) {
- pr_err("%s: failed to find sysctrl node\n", __func__);
- return;
- }
-
- aonsysctrl_base = of_iomap(np, 0);
- if (!aonsysctrl_base) {
- pr_err("%s: failed to map aonsysctrl\n", __func__);
- of_node_put(np);
- return;
- }
-
- /*
- * Write the address of secondary startup into the
- * system-wide flags register. The BootMonitor waits
- * until it receives a soft interrupt, and then the
- * secondary CPU branches to this address.
- */
- __raw_writel(__pa_symbol(zx_secondary_startup),
- aonsysctrl_base + AON_SYS_CTRL_RESERVED1);
-
- iounmap(aonsysctrl_base);
- of_node_put(np);
-
- np = of_find_compatible_node(NULL, NULL, "zte,zx296702-pcu");
- pcu_base = of_iomap(np, 0);
- of_node_put(np);
- WARN_ON(!pcu_base);
-
- np = of_find_compatible_node(NULL, NULL, "zte,zx-bus-matrix");
- matrix_base = of_iomap(np, 0);
- of_node_put(np);
- WARN_ON(!matrix_base);
-
- /* Map the first 4 KB IRAM for suspend usage */
- sys_iram = __arm_ioremap_exec(ZX_IRAM_BASE, PAGE_SIZE, false);
- zx_secondary_startup_pa = __pa_symbol(zx_secondary_startup);
- fncpy(sys_iram, &zx_resume_jump, zx_suspend_iram_sz);
-}
-
-static int zx_boot_secondary(unsigned int cpu, struct task_struct *idle)
-{
- static bool first_boot = true;
-
- if (first_boot) {
- arch_send_wakeup_ipi_mask(cpumask_of(cpu));
- first_boot = false;
- return 0;
- }
-
- /* Swap the base address mapping between IRAM and IROM */
- writel_relaxed(0x1, matrix_base + BUS_MATRIX_REMAP_CONFIG);
-
- /* Power on CPU1 */
- writel_relaxed(0x0, pcu_base + PCU_CPU1_CTRL);
-
- /* Wait for power on ack */
- while (readl_relaxed(pcu_base + PCU_CPU1_ST) & 0x4)
- cpu_relax();
-
- /* Swap back the mapping of IRAM and IROM */
- writel_relaxed(0x0, matrix_base + BUS_MATRIX_REMAP_CONFIG);
-
- return 0;
-}
-
-#ifdef CONFIG_HOTPLUG_CPU
-static inline void cpu_enter_lowpower(void)
-{
- unsigned int v;
-
- asm volatile(
- "mcr p15, 0, %1, c7, c5, 0\n"
- " mcr p15, 0, %1, c7, c10, 4\n"
- /*
- * Turn off coherency
- */
- " mrc p15, 0, %0, c1, c0, 1\n"
- " bic %0, %0, %3\n"
- " mcr p15, 0, %0, c1, c0, 1\n"
- " mrc p15, 0, %0, c1, c0, 0\n"
- " bic %0, %0, %2\n"
- " mcr p15, 0, %0, c1, c0, 0\n"
- : "=&r" (v)
- : "r" (0), "Ir" (CR_C), "Ir" (0x40)
- : "cc");
-}
-
-static int zx_cpu_kill(unsigned int cpu)
-{
- unsigned long timeout = jiffies + msecs_to_jiffies(2000);
-
- writel_relaxed(0x2, pcu_base + PCU_CPU1_CTRL);
-
- while ((readl_relaxed(pcu_base + PCU_CPU1_ST) & 0x3) != 0x0) {
- if (time_after(jiffies, timeout)) {
- pr_err("*** cpu1 poweroff timeout\n");
- break;
- }
- }
- return 1;
-}
-
-static void zx_cpu_die(unsigned int cpu)
-{
- scu_power_mode(scu_base, SCU_PM_POWEROFF);
- cpu_enter_lowpower();
-
- while (1)
- cpu_do_idle();
-}
-#endif
-
-static void zx_secondary_init(unsigned int cpu)
-{
- scu_power_mode(scu_base, SCU_PM_NORMAL);
-}
-
-static const struct smp_operations zx_smp_ops __initconst = {
- .smp_prepare_cpus = zx_smp_prepare_cpus,
- .smp_secondary_init = zx_secondary_init,
- .smp_boot_secondary = zx_boot_secondary,
-#ifdef CONFIG_HOTPLUG_CPU
- .cpu_kill = zx_cpu_kill,
- .cpu_die = zx_cpu_die,
-#endif
-};
-
-CPU_METHOD_OF_DECLARE(zx_smp, "zte,zx296702-smp", &zx_smp_ops);
diff --git a/arch/arm/mach-zx/zx296702-pm-domain.c b/arch/arm/mach-zx/zx296702-pm-domain.c
deleted file mode 100644
index 7a08bf9dd792..000000000000
--- a/arch/arm/mach-zx/zx296702-pm-domain.c
+++ /dev/null
@@ -1,202 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2015 Linaro Ltd.
- *
- * Author: Jun Nie <jun.nie@linaro.org>
- */
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-#include <linux/pm_domain.h>
-#include <linux/slab.h>
-
-#define PCU_DM_CLKEN 0x18
-#define PCU_DM_RSTEN 0x1C
-#define PCU_DM_ISOEN 0x20
-#define PCU_DM_PWRDN 0x24
-#define PCU_DM_ACK_SYNC 0x28
-
-enum {
- PCU_DM_NEON0 = 0,
- PCU_DM_NEON1,
- PCU_DM_GPU,
- PCU_DM_DECPPU,
- PCU_DM_VOU,
- PCU_DM_R2D,
- PCU_DM_TOP,
-};
-
-static void __iomem *pcubase;
-
-struct zx_pm_domain {
- struct generic_pm_domain dm;
- unsigned int bit;
-};
-
-static int normal_power_off(struct generic_pm_domain *domain)
-{
- struct zx_pm_domain *zpd = (struct zx_pm_domain *)domain;
- unsigned long loop = 1000;
- u32 tmp;
-
- tmp = readl_relaxed(pcubase + PCU_DM_CLKEN);
- tmp &= ~BIT(zpd->bit);
- writel_relaxed(tmp, pcubase + PCU_DM_CLKEN);
- udelay(5);
-
- tmp = readl_relaxed(pcubase + PCU_DM_ISOEN);
- tmp &= ~BIT(zpd->bit);
- writel_relaxed(tmp | BIT(zpd->bit), pcubase + PCU_DM_ISOEN);
- udelay(5);
-
- tmp = readl_relaxed(pcubase + PCU_DM_RSTEN);
- tmp &= ~BIT(zpd->bit);
- writel_relaxed(tmp, pcubase + PCU_DM_RSTEN);
- udelay(5);
-
- tmp = readl_relaxed(pcubase + PCU_DM_PWRDN);
- tmp &= ~BIT(zpd->bit);
- writel_relaxed(tmp | BIT(zpd->bit), pcubase + PCU_DM_PWRDN);
- do {
- tmp = readl_relaxed(pcubase + PCU_DM_ACK_SYNC) & BIT(zpd->bit);
- } while (--loop && !tmp);
-
- if (!loop) {
- pr_err("Error: %s %s fail\n", __func__, domain->name);
- return -EIO;
- }
-
- return 0;
-}
-
-static int normal_power_on(struct generic_pm_domain *domain)
-{
- struct zx_pm_domain *zpd = (struct zx_pm_domain *)domain;
- unsigned long loop = 10000;
- u32 tmp;
-
- tmp = readl_relaxed(pcubase + PCU_DM_PWRDN);
- tmp &= ~BIT(zpd->bit);
- writel_relaxed(tmp, pcubase + PCU_DM_PWRDN);
- do {
- tmp = readl_relaxed(pcubase + PCU_DM_ACK_SYNC) & BIT(zpd->bit);
- } while (--loop && tmp);
-
- if (!loop) {
- pr_err("Error: %s %s fail\n", __func__, domain->name);
- return -EIO;
- }
-
- tmp = readl_relaxed(pcubase + PCU_DM_RSTEN);
- tmp &= ~BIT(zpd->bit);
- writel_relaxed(tmp | BIT(zpd->bit), pcubase + PCU_DM_RSTEN);
- udelay(5);
-
- tmp = readl_relaxed(pcubase + PCU_DM_ISOEN);
- tmp &= ~BIT(zpd->bit);
- writel_relaxed(tmp, pcubase + PCU_DM_ISOEN);
- udelay(5);
-
- tmp = readl_relaxed(pcubase + PCU_DM_CLKEN);
- tmp &= ~BIT(zpd->bit);
- writel_relaxed(tmp | BIT(zpd->bit), pcubase + PCU_DM_CLKEN);
- udelay(5);
- return 0;
-}
-
-static struct zx_pm_domain gpu_domain = {
- .dm = {
- .name = "gpu_domain",
- .power_off = normal_power_off,
- .power_on = normal_power_on,
- },
- .bit = PCU_DM_GPU,
-};
-
-static struct zx_pm_domain decppu_domain = {
- .dm = {
- .name = "decppu_domain",
- .power_off = normal_power_off,
- .power_on = normal_power_on,
- },
- .bit = PCU_DM_DECPPU,
-};
-
-static struct zx_pm_domain vou_domain = {
- .dm = {
- .name = "vou_domain",
- .power_off = normal_power_off,
- .power_on = normal_power_on,
- },
- .bit = PCU_DM_VOU,
-};
-
-static struct zx_pm_domain r2d_domain = {
- .dm = {
- .name = "r2d_domain",
- .power_off = normal_power_off,
- .power_on = normal_power_on,
- },
- .bit = PCU_DM_R2D,
-};
-
-static struct generic_pm_domain *zx296702_pm_domains[] = {
- &vou_domain.dm,
- &gpu_domain.dm,
- &decppu_domain.dm,
- &r2d_domain.dm,
-};
-
-static int zx296702_pd_probe(struct platform_device *pdev)
-{
- struct genpd_onecell_data *genpd_data;
- struct resource *res;
- int i;
-
- genpd_data = devm_kzalloc(&pdev->dev, sizeof(*genpd_data), GFP_KERNEL);
- if (!genpd_data)
- return -ENOMEM;
-
- genpd_data->domains = zx296702_pm_domains;
- genpd_data->num_domains = ARRAY_SIZE(zx296702_pm_domains);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "no memory resource defined\n");
- return -ENODEV;
- }
-
- pcubase = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(pcubase)) {
- dev_err(&pdev->dev, "ioremap fail.\n");
- return -EIO;
- }
-
- for (i = 0; i < ARRAY_SIZE(zx296702_pm_domains); ++i)
- pm_genpd_init(zx296702_pm_domains[i], NULL, false);
-
- of_genpd_add_provider_onecell(pdev->dev.of_node, genpd_data);
- return 0;
-}
-
-static const struct of_device_id zx296702_pm_domain_matches[] __initconst = {
- { .compatible = "zte,zx296702-pcu", },
- { },
-};
-
-static struct platform_driver zx296702_pd_driver __initdata = {
- .driver = {
- .name = "zx-powerdomain",
- .owner = THIS_MODULE,
- .of_match_table = zx296702_pm_domain_matches,
- },
- .probe = zx296702_pd_probe,
-};
-
-static int __init zx296702_pd_init(void)
-{
- return platform_driver_register(&zx296702_pd_driver);
-}
-subsys_initcall(zx296702_pd_init);
diff --git a/arch/arm/mach-zx/zx296702.c b/arch/arm/mach-zx/zx296702.c
deleted file mode 100644
index fd8fa3a074fa..000000000000
--- a/arch/arm/mach-zx/zx296702.c
+++ /dev/null
@@ -1,22 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright 2014 Linaro Ltd.
- * Copyright (C) 2014 ZTE Corporation.
- */
-
-#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
-
-static const char *const zx296702_dt_compat[] __initconst = {
- "zte,zx296702",
- NULL,
-};
-
-DT_MACHINE_START(ZX, "ZTE ZX296702 (Device Tree)")
- .dt_compat = zx296702_dt_compat,
- .l2c_aux_val = 0,
- .l2c_aux_mask = ~0,
-MACHINE_END
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 02692fbe2db5..35f43d0aa056 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -638,7 +638,6 @@ config CPU_V7M_NUM_IRQ
int "Number of external interrupts connected to the NVIC"
depends on CPU_V7M
default 90 if ARCH_STM32
- default 38 if ARCH_EFM32
default 112 if SOC_VF610
default 240
help
diff --git a/arch/arm/mm/dump.c b/arch/arm/mm/dump.c
index c18d23a5e5f1..93ff0097f00b 100644
--- a/arch/arm/mm/dump.c
+++ b/arch/arm/mm/dump.c
@@ -19,6 +19,10 @@
#include <asm/ptdump.h>
static struct addr_marker address_markers[] = {
+#ifdef CONFIG_KASAN
+ { KASAN_SHADOW_START, "Kasan shadow start"},
+ { KASAN_SHADOW_END, "Kasan shadow end"},
+#endif
{ MODULES_VADDR, "Modules" },
{ PAGE_OFFSET, "Kernel Mapping" },
{ 0, "vmalloc() Area" },
@@ -429,8 +433,11 @@ static void ptdump_initialize(void)
if (pg_level[i].bits[j].nx_bit)
pg_level[i].nx_bit = &pg_level[i].bits[j];
}
-
+#ifdef CONFIG_KASAN
+ address_markers[4].start_address = VMALLOC_START;
+#else
address_markers[2].start_address = VMALLOC_START;
+#endif
}
static struct ptdump_info kernel_ptdump_info = {
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index c06ebfbc48c4..a25b660c3017 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -18,7 +18,6 @@
#include <asm/cp15.h>
#include <asm/cputype.h>
#include <asm/cachetype.h>
-#include <asm/fixmap.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/smp_plat.h>
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index 0207b6ea6e8a..897634d0a67c 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -1620,10 +1620,9 @@ exit:
}
emit_str_r(dst_lo, tmp2, off, ctx, BPF_SIZE(code));
break;
- /* STX XADD: lock *(u32 *)(dst + off) += src */
- case BPF_STX | BPF_XADD | BPF_W:
- /* STX XADD: lock *(u64 *)(dst + off) += src */
- case BPF_STX | BPF_XADD | BPF_DW:
+ /* Atomic ops */
+ case BPF_STX | BPF_ATOMIC | BPF_W:
+ case BPF_STX | BPF_ATOMIC | BPF_DW:
goto notyet;
/* STX: *(size *)(dst + off) = src */
case BPF_STX | BPF_MEM | BPF_W:
diff --git a/arch/arm/oprofile/Makefile b/arch/arm/oprofile/Makefile
deleted file mode 100644
index 39688dc9f181..000000000000
--- a/arch/arm/oprofile/Makefile
+++ /dev/null
@@ -1,14 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_OPROFILE) += oprofile.o
-
-DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
- oprof.o cpu_buffer.o buffer_sync.o \
- event_buffer.o oprofile_files.o \
- oprofilefs.o oprofile_stats.o \
- timer_int.o )
-
-ifeq ($(CONFIG_HW_PERF_EVENTS),y)
-DRIVER_OBJS += $(addprefix ../../../drivers/oprofile/, oprofile_perf.o)
-endif
-
-oprofile-y := $(DRIVER_OBJS) common.o
diff --git a/arch/arm/oprofile/common.c b/arch/arm/oprofile/common.c
deleted file mode 100644
index 7cb3e0453fcd..000000000000
--- a/arch/arm/oprofile/common.c
+++ /dev/null
@@ -1,132 +0,0 @@
-/**
- * @file common.c
- *
- * @remark Copyright 2004 Oprofile Authors
- * @remark Copyright 2010 ARM Ltd.
- * @remark Read the file COPYING
- *
- * @author Zwane Mwaikambo
- * @author Will Deacon [move to perf]
- */
-
-#include <linux/cpumask.h>
-#include <linux/init.h>
-#include <linux/mutex.h>
-#include <linux/oprofile.h>
-#include <linux/perf_event.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <asm/stacktrace.h>
-#include <linux/uaccess.h>
-
-#include <asm/perf_event.h>
-#include <asm/ptrace.h>
-
-#ifdef CONFIG_HW_PERF_EVENTS
-
-/*
- * OProfile has a curious naming scheme for the ARM PMUs, but they are
- * part of the user ABI so we need to map from the perf PMU name for
- * supported PMUs.
- */
-static struct op_perf_name {
- char *perf_name;
- char *op_name;
-} op_perf_name_map[] = {
- { "armv5_xscale1", "arm/xscale1" },
- { "armv5_xscale2", "arm/xscale2" },
- { "armv6_1136", "arm/armv6" },
- { "armv6_1156", "arm/armv6" },
- { "armv6_1176", "arm/armv6" },
- { "armv6_11mpcore", "arm/mpcore" },
- { "armv7_cortex_a8", "arm/armv7" },
- { "armv7_cortex_a9", "arm/armv7-ca9" },
-};
-
-char *op_name_from_perf_id(void)
-{
- int i;
- struct op_perf_name names;
- const char *perf_name = perf_pmu_name();
-
- for (i = 0; i < ARRAY_SIZE(op_perf_name_map); ++i) {
- names = op_perf_name_map[i];
- if (!strcmp(names.perf_name, perf_name))
- return names.op_name;
- }
-
- return NULL;
-}
-#endif
-
-static int report_trace(struct stackframe *frame, void *d)
-{
- unsigned int *depth = d;
-
- if (*depth) {
- oprofile_add_trace(frame->pc);
- (*depth)--;
- }
-
- return *depth == 0;
-}
-
-/*
- * The registers we're interested in are at the end of the variable
- * length saved register structure. The fp points at the end of this
- * structure so the address of this struct is:
- * (struct frame_tail *)(xxx->fp)-1
- */
-struct frame_tail {
- struct frame_tail *fp;
- unsigned long sp;
- unsigned long lr;
-} __attribute__((packed));
-
-static struct frame_tail* user_backtrace(struct frame_tail *tail)
-{
- struct frame_tail buftail[2];
-
- /* Also check accessibility of one struct frame_tail beyond */
- if (!access_ok(tail, sizeof(buftail)))
- return NULL;
- if (__copy_from_user_inatomic(buftail, tail, sizeof(buftail)))
- return NULL;
-
- oprofile_add_trace(buftail[0].lr);
-
- /* frame pointers should strictly progress back up the stack
- * (towards higher addresses) */
- if (tail + 1 >= buftail[0].fp)
- return NULL;
-
- return buftail[0].fp-1;
-}
-
-static void arm_backtrace(struct pt_regs * const regs, unsigned int depth)
-{
- struct frame_tail *tail = ((struct frame_tail *) regs->ARM_fp) - 1;
-
- if (!user_mode(regs)) {
- struct stackframe frame;
- arm_get_current_stackframe(regs, &frame);
- walk_stackframe(&frame, report_trace, &depth);
- return;
- }
-
- while (depth-- && tail && !((unsigned long) tail & 3))
- tail = user_backtrace(tail);
-}
-
-int __init oprofile_arch_init(struct oprofile_operations *ops)
-{
- /* provide backtrace support also in timer mode: */
- ops->backtrace = arm_backtrace;
-
- return oprofile_perf_init(ops);
-}
-
-void oprofile_arch_exit(void)
-{
- oprofile_perf_exit();
-}
diff --git a/arch/arm/tools/Makefile b/arch/arm/tools/Makefile
index 27d8beb7c941..3654f979851b 100644
--- a/arch/arm/tools/Makefile
+++ b/arch/arm/tools/Makefile
@@ -11,7 +11,7 @@ uapi := $(gen)/uapi/asm
syshdr := $(srctree)/$(src)/syscallhdr.sh
sysnr := $(srctree)/$(src)/syscallnr.sh
systbl := $(srctree)/$(src)/syscalltbl.sh
-syscall := $(srctree)/$(src)/syscall.tbl
+syscall := $(src)/syscall.tbl
gen-y := $(gen)/calls-oabi.S
gen-y += $(gen)/calls-eabi.S
diff --git a/arch/arm/tools/syscall.tbl b/arch/arm/tools/syscall.tbl
index 20e1170e2e0a..dcc1191291a2 100644
--- a/arch/arm/tools/syscall.tbl
+++ b/arch/arm/tools/syscall.tbl
@@ -455,3 +455,4 @@
439 common faccessat2 sys_faccessat2
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2
+442 common mount_setattr sys_mount_setattr
diff --git a/arch/arm/vdso/Makefile b/arch/arm/vdso/Makefile
index b558bee0e1f6..7c9e395b77f7 100644
--- a/arch/arm/vdso/Makefile
+++ b/arch/arm/vdso/Makefile
@@ -23,7 +23,6 @@ ldflags-y := -Bsymbolic --no-undefined -soname=linux-vdso.so.1 \
-T
obj-$(CONFIG_VDSO) += vdso.o
-extra-$(CONFIG_VDSO) += vdso.lds
CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
CFLAGS_REMOVE_vdso.o = -pg
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index 5a957a9a0984..8ad576ecd0f1 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -370,8 +370,6 @@ static int __init xen_guest_init(void)
return -ENOMEM;
}
gnttab_init();
- if (!xen_initial_domain())
- xenbus_probe();
/*
* Making sure board specific code will not set up ops for
diff --git a/arch/arm/xen/p2m.c b/arch/arm/xen/p2m.c
index e52950a43f2e..acb464547a54 100644
--- a/arch/arm/xen/p2m.c
+++ b/arch/arm/xen/p2m.c
@@ -93,10 +93,39 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
int i;
for (i = 0; i < count; i++) {
+ struct gnttab_unmap_grant_ref unmap;
+ int rc;
+
if (map_ops[i].status)
continue;
- set_phys_to_machine(map_ops[i].host_addr >> XEN_PAGE_SHIFT,
- map_ops[i].dev_bus_addr >> XEN_PAGE_SHIFT);
+ if (likely(set_phys_to_machine(map_ops[i].host_addr >> XEN_PAGE_SHIFT,
+ map_ops[i].dev_bus_addr >> XEN_PAGE_SHIFT)))
+ continue;
+
+ /*
+ * Signal an error for this slot. This in turn requires
+ * immediate unmapping.
+ */
+ map_ops[i].status = GNTST_general_error;
+ unmap.host_addr = map_ops[i].host_addr,
+ unmap.handle = map_ops[i].handle;
+ map_ops[i].handle = ~0;
+ if (map_ops[i].flags & GNTMAP_device_map)
+ unmap.dev_bus_addr = map_ops[i].dev_bus_addr;
+ else
+ unmap.dev_bus_addr = 0;
+
+ /*
+ * Pre-populate the status field, to be recognizable in
+ * the log message below.
+ */
+ unmap.status = 1;
+
+ rc = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
+ &unmap, 1);
+ if (rc || unmap.status != GNTST_okay)
+ pr_err_once("gnttab unmap failed: rc=%d st=%d\n",
+ rc, unmap.status);
}
return 0;
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index f39568b28ec1..1f212b47a48a 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -73,6 +73,8 @@ config ARM64
select ARCH_SUPPORTS_DEBUG_PAGEALLOC
select ARCH_SUPPORTS_MEMORY_FAILURE
select ARCH_SUPPORTS_SHADOW_CALL_STACK if CC_HAVE_SHADOW_CALL_STACK
+ select ARCH_SUPPORTS_LTO_CLANG if CPU_LITTLE_ENDIAN
+ select ARCH_SUPPORTS_LTO_CLANG_THIN
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && (GCC_VERSION >= 50000 || CC_IS_CLANG)
select ARCH_SUPPORTS_NUMA_BALANCING
@@ -138,6 +140,7 @@ config ARM64
select HAVE_ARCH_KASAN if !(ARM64_16K_PAGES && ARM64_VA_BITS_48)
select HAVE_ARCH_KASAN_SW_TAGS if HAVE_ARCH_KASAN
select HAVE_ARCH_KASAN_HW_TAGS if (HAVE_ARCH_KASAN && ARM64_MTE)
+ select HAVE_ARCH_KFENCE
select HAVE_ARCH_KGDB
select HAVE_ARCH_MMAP_RND_BITS
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
@@ -162,6 +165,8 @@ config ARM64
select HAVE_DYNAMIC_FTRACE
select HAVE_DYNAMIC_FTRACE_WITH_REGS \
if $(cc-option,-fpatchable-function-entry=2)
+ select FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY \
+ if DYNAMIC_FTRACE_WITH_REGS
select HAVE_EFFICIENT_UNALIGNED_ACCESS
select HAVE_FAST_GUP
select HAVE_FTRACE_MCOUNT_RECORD
@@ -522,7 +527,7 @@ config ARM64_ERRATUM_1024718
help
This option adds a workaround for ARM Cortex-A55 Erratum 1024718.
- Affected Cortex-A55 cores (r0p0, r0p1, r1p0) could cause incorrect
+ Affected Cortex-A55 cores (all revisions) could cause incorrect
update of the hardware dirty bit when the DBM/AP bits are updated
without a break-before-make. The workaround is to disable the usage
of hardware DBM locally on the affected cores. CPUs not affected by
@@ -952,8 +957,9 @@ choice
that is selected here.
config CPU_BIG_ENDIAN
- bool "Build big-endian kernel"
- help
+ bool "Build big-endian kernel"
+ depends on !LD_IS_LLD || LLD_VERSION >= 130000
+ help
Say Y if you plan on running a kernel with a big-endian userspace.
config CPU_LITTLE_ENDIAN
@@ -993,6 +999,7 @@ config HOTPLUG_CPU
# Common NUMA Features
config NUMA
bool "NUMA Memory Allocation and Scheduler Support"
+ select GENERIC_ARCH_NUMA
select ACPI_NUMA if ACPI
select OF_NUMA
help
@@ -1132,6 +1139,10 @@ config CRASH_DUMP
For more details see Documentation/admin-guide/kdump/kdump.rst
+config TRANS_TABLE
+ def_bool y
+ depends on HIBERNATION
+
config XEN_DOM0
def_bool y
depends on XEN
@@ -1213,7 +1224,6 @@ config ARM64_TAGGED_ADDR_ABI
menuconfig COMPAT
bool "Kernel support for 32-bit EL0"
depends on ARM64_4K_PAGES || EXPERT
- select COMPAT_BINFMT_ELF if BINFMT_ELF
select HAVE_UID16
select OLD_SIGSUSPEND3
select COMPAT_OLD_SIGACTION
@@ -1477,7 +1487,7 @@ config ARM64_PTR_AUTH
depends on (CC_HAS_SIGN_RETURN_ADDRESS || CC_HAS_BRANCH_PROT_PAC_RET) && AS_HAS_PAC
# Modern compilers insert a .note.gnu.property section note for PAC
# which is only understood by binutils starting with version 2.33.1.
- depends on LD_IS_LLD || LD_VERSION >= 233010000 || (CC_IS_GCC && GCC_VERSION < 90100)
+ depends on LD_IS_LLD || LD_VERSION >= 23301 || (CC_IS_GCC && GCC_VERSION < 90100)
depends on !CC_IS_CLANG || AS_HAS_CFI_NEGATE_RA_STATE
depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS)
help
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index 6eecdef538bd..cdfd5fed457f 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -13,10 +13,17 @@ config ARCH_AGILEX
help
This enables support for Intel's Agilex SoCFPGA Family.
+config ARCH_N5X
+ bool "Intel's eASIC N5X SoCFPGA Family"
+ help
+ This enables support for Intel's eASIC N5X SoCFPGA Family.
+
config ARCH_SUNXI
bool "Allwinner sunxi 64-bit SoC Family"
select ARCH_HAS_RESET_CONTROLLER
select GENERIC_IRQ_CHIP
+ select IRQ_DOMAIN_HIERARCHY
+ select IRQ_FASTEOI_HIERARCHY_HANDLERS
select PINCTRL
select RESET_CONTROLLER
help
@@ -39,6 +46,7 @@ config ARCH_BCM2835
select ARM_AMBA
select ARM_GIC
select ARM_TIMER_SP804
+ select BRCMSTB_L2_IRQ
help
This enables support for the Broadcom BCM2837 and BCM2711 SoC.
These SoCs are used in the Raspberry Pi 3 and 4 devices.
@@ -317,12 +325,6 @@ config ARCH_XGENE
help
This enables support for AppliedMicro X-Gene SOC Family
-config ARCH_ZX
- bool "ZTE ZX SoC Family"
- select PINCTRL
- help
- This enables support for ZTE ZX SoC Family
-
config ARCH_ZYNQMP
bool "Xilinx ZynqMP Family"
help
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 90309208bb28..5b84aec31ed3 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -188,10 +188,12 @@ ifeq ($(KBUILD_EXTMOD),)
# this hack.
prepare: vdso_prepare
vdso_prepare: prepare0
- $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso include/generated/vdso-offsets.h
- $(if $(CONFIG_COMPAT_VDSO),$(Q)$(MAKE) \
- $(build)=arch/arm64/kernel/vdso32 \
- include/generated/vdso32-offsets.h)
+ $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso \
+ include/generated/vdso-offsets.h arch/arm64/kernel/vdso/vdso.so
+ifdef CONFIG_COMPAT_VDSO
+ $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso32 \
+ include/generated/vdso32-offsets.h arch/arm64/kernel/vdso32/vdso.so
+endif
endif
define archhelp
diff --git a/arch/arm64/boot/dts/Makefile b/arch/arm64/boot/dts/Makefile
index 9b1170658d60..f1173cd93594 100644
--- a/arch/arm64/boot/dts/Makefile
+++ b/arch/arm64/boot/dts/Makefile
@@ -29,4 +29,3 @@ subdir-y += synaptics
subdir-y += ti
subdir-y += toshiba
subdir-y += xilinx
-subdir-y += zte
diff --git a/arch/arm64/boot/dts/allwinner/Makefile b/arch/arm64/boot/dts/allwinner/Makefile
index 211d1e9d4701..41ce680e5f8d 100644
--- a/arch/arm64/boot/dts/allwinner/Makefile
+++ b/arch/arm64/boot/dts/allwinner/Makefile
@@ -13,6 +13,7 @@ dtb-$(CONFIG_ARCH_SUNXI) += sun50i-a64-pinephone-1.0.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-a64-pinephone-1.1.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-a64-pinephone-1.2.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-a64-pinetab.dtb
+dtb-$(CONFIG_ARCH_SUNXI) += sun50i-a64-pinetab-early-adopter.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-a64-sopine-baseboard.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-a64-teres-i.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-a100-allwinner-perf1.dtb
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts
index e5e840b9fbb4..f7fe9fa50cb3 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts
@@ -36,18 +36,18 @@
leds {
compatible = "gpio-leds";
- pwr-led {
+ led-0 {
label = "bananapi-m64:red:pwr";
gpios = <&pio 3 24 GPIO_ACTIVE_HIGH>; /* PD24 */
default-state = "on";
};
- green {
+ led-1 {
label = "bananapi-m64:green:user";
gpios = <&pio 4 14 GPIO_ACTIVE_HIGH>; /* PE14 */
};
- blue {
+ led-2 {
label = "bananapi-m64:blue:user";
gpios = <&pio 4 15 GPIO_ACTIVE_HIGH>; /* PE15 */
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-nanopi-a64.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-nanopi-a64.dts
index e58db8a6cab6..09b3c7fb82c0 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-nanopi-a64.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-nanopi-a64.dts
@@ -35,7 +35,7 @@
leds {
compatible = "gpio-leds";
- blue {
+ led {
label = "nanopi-a64:blue:status";
gpios = <&pio 3 24 GPIO_ACTIVE_LOW>; /* PD24 */
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-lts.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-lts.dts
index 302e24be0a31..437ffe3628a5 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-lts.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-lts.dts
@@ -1,10 +1,21 @@
// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
// Copyright (c) 2018 ARM Ltd.
+#include <dt-bindings/leds/common.h>
#include "sun50i-a64-sopine-baseboard.dts"
/ {
model = "Pine64 LTS";
compatible = "pine64,pine64-lts", "allwinner,sun50i-r18",
"allwinner,sun50i-a64";
+
+ leds {
+ compatible = "gpio-leds";
+
+ led {
+ function = LED_FUNCTION_STATUS;
+ color = <LED_COLOR_ID_BLUE>;
+ gpios = <&r_pio 0 7 GPIO_ACTIVE_LOW>; /* PL7 */
+ };
+ };
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts
index 896f34fd9fc3..7ae16541d14f 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts
@@ -126,8 +126,6 @@
};
&ehci0 {
- phys = <&usbphy 0>;
- phy-names = "usb";
status = "okay";
};
@@ -169,6 +167,7 @@
pinctrl-0 = <&mmc2_pins>, <&mmc2_ds_pin>;
vmmc-supply = <&reg_dcdc1>;
vqmmc-supply = <&reg_eldo1>;
+ max-frequency = <200000000>;
bus-width = <8>;
non-removable;
cap-mmc-hw-reset;
@@ -177,8 +176,6 @@
};
&ohci0 {
- phys = <&usbphy 0>;
- phy-names = "usb";
status = "okay";
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone-1.0.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone-1.0.dts
index 3d5a2ae9aa39..fb65319a3bd3 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone-1.0.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone-1.0.dts
@@ -7,7 +7,7 @@
/ {
model = "Pine64 PinePhone Developer Batch (1.0)";
- compatible = "pine64,pinephone-1.0", "allwinner,sun50i-a64";
+ compatible = "pine64,pinephone-1.0", "pine64,pinephone", "allwinner,sun50i-a64";
};
&sgm3140 {
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone-1.1.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone-1.1.dts
index c9b9f6e9ee8c..5e59d3752178 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone-1.1.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone-1.1.dts
@@ -7,7 +7,7 @@
/ {
model = "Pine64 PinePhone Braveheart (1.1)";
- compatible = "pine64,pinephone-1.1", "allwinner,sun50i-a64";
+ compatible = "pine64,pinephone-1.1", "pine64,pinephone", "allwinner,sun50i-a64";
};
&backlight {
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone-1.2.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone-1.2.dts
index acc0ab53b9c1..4e7e237cb46a 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone-1.2.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone-1.2.dts
@@ -7,7 +7,7 @@
/ {
model = "Pine64 PinePhone (1.2)";
- compatible = "pine64,pinephone-1.2", "allwinner,sun50i-a64";
+ compatible = "pine64,pinephone-1.2", "pine64,pinephone", "allwinner,sun50i-a64";
wifi_pwrseq: wifi-pwrseq {
compatible = "mmc-pwrseq-simple";
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone.dtsi
index 2dfe9bae8c67..9f69d489a81d 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone.dtsi
@@ -21,6 +21,7 @@
compatible = "pwm-backlight";
pwms = <&r_pwm 0 50000 PWM_POLARITY_INVERTED>;
enable-gpios = <&pio 7 10 GPIO_ACTIVE_HIGH>; /* PH10 */
+ power-supply = <&reg_ps>;
/* Backlight configuration differs per PinePhone revision. */
};
@@ -31,25 +32,33 @@
leds {
compatible = "gpio-leds";
- blue {
+ led-0 {
function = LED_FUNCTION_INDICATOR;
color = <LED_COLOR_ID_BLUE>;
gpios = <&pio 3 20 GPIO_ACTIVE_HIGH>; /* PD20 */
};
- green {
+ led-1 {
function = LED_FUNCTION_INDICATOR;
color = <LED_COLOR_ID_GREEN>;
gpios = <&pio 3 18 GPIO_ACTIVE_HIGH>; /* PD18 */
};
- red {
+ led-2 {
function = LED_FUNCTION_INDICATOR;
color = <LED_COLOR_ID_RED>;
gpios = <&pio 3 19 GPIO_ACTIVE_HIGH>; /* PD19 */
};
};
+ reg_ps: ps-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "ps";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-boot-on;
+ };
+
reg_vbat_wifi: vbat-wifi {
compatible = "regulator-fixed";
regulator-min-microvolt = <3300000>;
@@ -174,8 +183,6 @@
reg = <0x48>;
interrupt-parent = <&pio>;
interrupts = <1 0 IRQ_TYPE_EDGE_FALLING>; /* PB0 */
- vdd-supply = <&reg_ldo_io0>;
- leda-supply = <&reg_dldo1>;
};
/* Accelerometer/gyroscope */
@@ -196,6 +203,7 @@
&lradc {
vref-supply = <&reg_aldo3>;
+ wakeup-source;
status = "okay";
button-200 {
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinetab-early-adopter.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinetab-early-adopter.dts
new file mode 100644
index 000000000000..6265360ce623
--- /dev/null
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinetab-early-adopter.dts
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (C) 2020 Icenowy Zheng <icenowy@aosc.io>
+ *
+ */
+
+/dts-v1/;
+
+#include "sun50i-a64-pinetab.dts"
+
+/ {
+ model = "PineTab, Early Adopter's version";
+ compatible = "pine64,pinetab-early-adopter", "allwinner,sun50i-a64";
+};
+
+&dsi {
+ /delete-node/ panel@0;
+
+ panel@0 {
+ compatible = "feixin,k101-im2byl02", "ilitek,ili9881c";
+ reg = <0>;
+ power-supply = <&reg_dc1sw>;
+ reset-gpios = <&pio 3 24 GPIO_ACTIVE_LOW>; /* PD24 */
+ backlight = <&backlight>;
+ };
+};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinetab.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinetab.dts
index 0494bfaf2ffa..422a8507f674 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinetab.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinetab.dts
@@ -14,7 +14,7 @@
#include <dt-bindings/pwm/pwm.h>
/ {
- model = "PineTab";
+ model = "PineTab, Development Sample";
compatible = "pine64,pinetab", "allwinner,sun50i-a64";
aliases {
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts
index d4069749d721..e22b94c83647 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts
@@ -111,6 +111,7 @@
bus-width = <8>;
non-removable;
cap-mmc-hw-reset;
+ mmc-hs200-1_8v;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi
index c48692b06e1f..3402cec87035 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi
@@ -32,7 +32,6 @@
pinctrl-names = "default";
pinctrl-0 = <&mmc0_pins>;
vmmc-supply = <&reg_dcdc1>;
- non-removable;
disable-wp;
bus-width = <4>;
cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>; /* PF6 */
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-teres-i.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-teres-i.dts
index a1864a89fb89..f0a16f355e27 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-teres-i.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-teres-i.dts
@@ -52,12 +52,12 @@
leds {
compatible = "gpio-leds";
- capslock {
+ led-0 {
label = "teres-i:green:capslock";
gpios = <&pio 2 7 GPIO_ACTIVE_HIGH>; /* PC7 */
};
- numlock {
+ led-1 {
label = "teres-i:green:numlock";
gpios = <&pio 2 4 GPIO_ACTIVE_HIGH>; /* PC4 */
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
index 51cc30e84e26..57786fc120c3 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
@@ -514,7 +514,7 @@
resets = <&ccu RST_BUS_MMC2>;
reset-names = "ahb";
interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
- max-frequency = <200000000>;
+ max-frequency = <150000000>;
status = "disabled";
#address-cells = <1>;
#size-cells = <0>;
@@ -593,6 +593,8 @@
<&ccu CLK_USB_OHCI0>;
resets = <&ccu RST_BUS_OHCI0>,
<&ccu RST_BUS_EHCI0>;
+ phys = <&usbphy 0>;
+ phy-names = "usb";
status = "disabled";
};
@@ -603,6 +605,8 @@
clocks = <&ccu CLK_BUS_OHCI0>,
<&ccu CLK_USB_OHCI0>;
resets = <&ccu RST_BUS_OHCI0>;
+ phys = <&usbphy 0>;
+ phy-names = "usb";
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo-plus2.dts b/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo-plus2.dts
index 9d93fe153689..4c3921ac236c 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo-plus2.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo-plus2.dts
@@ -25,13 +25,13 @@
leds {
compatible = "gpio-leds";
- pwr {
+ led-0 {
label = "nanopi:green:pwr";
gpios = <&r_pio 0 10 GPIO_ACTIVE_HIGH>;
default-state = "on";
};
- status {
+ led-1 {
label = "nanopi:red:status";
gpios = <&pio 0 20 GPIO_ACTIVE_HIGH>;
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo2.dts b/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo2.dts
index b059e20813bd..02f8e72f0cad 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo2.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo2.dts
@@ -22,13 +22,13 @@
leds {
compatible = "gpio-leds";
- pwr {
+ led-0 {
label = "nanopi:green:pwr";
gpios = <&r_pio 0 10 GPIO_ACTIVE_HIGH>;
default-state = "on";
};
- status {
+ led-1 {
label = "nanopi:blue:status";
gpios = <&pio 0 10 GPIO_ACTIVE_HIGH>;
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-pc2.dts b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-pc2.dts
index 8bf2db9dcbda..1010c1b22d2e 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-pc2.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-pc2.dts
@@ -42,13 +42,13 @@
leds {
compatible = "gpio-leds";
- pwr {
+ led-0 {
label = "orangepi:green:pwr";
gpios = <&r_pio 0 10 GPIO_ACTIVE_HIGH>;
default-state = "on";
};
- status {
+ led-1 {
label = "orangepi:red:status";
gpios = <&pio 0 20 GPIO_ACTIVE_HIGH>;
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-prime.dts b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-prime.dts
index 33ab44072e6d..74e0444af19b 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-prime.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-prime.dts
@@ -36,13 +36,13 @@
leds {
compatible = "gpio-leds";
- pwr {
+ led-0 {
label = "orangepi:green:pwr";
gpios = <&r_pio 0 10 GPIO_ACTIVE_HIGH>;
default-state = "on";
};
- status {
+ led-1 {
label = "orangepi:red:status";
gpios = <&pio 0 20 GPIO_ACTIVE_HIGH>;
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus.dts b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus.dts
index ef5ca6444220..d13980ed7a79 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus.dts
@@ -33,13 +33,13 @@
leds {
compatible = "gpio-leds";
- pwr {
+ led-0 {
label = "orangepi:green:pwr";
gpios = <&r_pio 0 10 GPIO_ACTIVE_HIGH>; /* PA10 */
default-state = "on";
};
- status {
+ led-1 {
label = "orangepi:red:status";
gpios = <&pio 0 17 GPIO_ACTIVE_HIGH>; /* PA17 */
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus2.dts b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus2.dts
index de19e68eb84e..22530ace12d5 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus2.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus2.dts
@@ -33,13 +33,13 @@
leds {
compatible = "gpio-leds";
- pwr {
+ led-0 {
label = "orangepi:green:pwr";
gpios = <&r_pio 0 10 GPIO_ACTIVE_HIGH>;
default-state = "on";
};
- status {
+ led-1 {
label = "orangepi:red:status";
gpios = <&pio 0 17 GPIO_ACTIVE_HIGH>;
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi
index 10489e508695..578a63dedf46 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi
@@ -121,6 +121,19 @@
resets = <&ccu RST_BUS_CE>;
};
+ deinterlace: deinterlace@1e00000 {
+ compatible = "allwinner,sun8i-h3-deinterlace";
+ reg = <0x01e00000 0x20000>;
+ clocks = <&ccu CLK_BUS_DEINTERLACE>,
+ <&ccu CLK_DEINTERLACE>,
+ <&ccu CLK_DRAM_DEINTERLACE>;
+ clock-names = "bus", "mod", "ram";
+ resets = <&ccu RST_BUS_DEINTERLACE>;
+ interrupts = <GIC_SPI 93 IRQ_TYPE_LEVEL_HIGH>;
+ interconnects = <&mbus 9>;
+ interconnect-names = "dma-mem";
+ };
+
mali: gpu@1e80000 {
compatible = "allwinner,sun50i-h5-mali", "arm,mali-450";
reg = <0x01e80000 0x30000>;
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts
index 7c9dbde645b5..4f4755152fce 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts
@@ -43,7 +43,7 @@
leds {
compatible = "gpio-leds";
- power {
+ led {
label = "beelink:white:power";
gpios = <&r_pio 0 4 GPIO_ACTIVE_HIGH>; /* PL4 */
default-state = "on";
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-cpu-opp.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h6-cpu-opp.dtsi
index 1a5eddc5a40f..8c6e8536b69f 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-cpu-opp.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-cpu-opp.dtsi
@@ -8,7 +8,7 @@
nvmem-cells = <&cpu_speed_grade>;
opp-shared;
- opp@480000000 {
+ opp-480000000 {
clock-latency-ns = <244144>; /* 8 32k periods */
opp-hz = /bits/ 64 <480000000>;
@@ -17,7 +17,7 @@
opp-microvolt-speed2 = <820000 820000 1200000>;
};
- opp@720000000 {
+ opp-720000000 {
clock-latency-ns = <244144>; /* 8 32k periods */
opp-hz = /bits/ 64 <720000000>;
@@ -26,7 +26,7 @@
opp-microvolt-speed2 = <820000 820000 1200000>;
};
- opp@816000000 {
+ opp-816000000 {
clock-latency-ns = <244144>; /* 8 32k periods */
opp-hz = /bits/ 64 <816000000>;
@@ -35,7 +35,7 @@
opp-microvolt-speed2 = <820000 820000 1200000>;
};
- opp@888000000 {
+ opp-888000000 {
clock-latency-ns = <244144>; /* 8 32k periods */
opp-hz = /bits/ 64 <888000000>;
@@ -44,7 +44,7 @@
opp-microvolt-speed2 = <820000 820000 1200000>;
};
- opp@1080000000 {
+ opp-1080000000 {
clock-latency-ns = <244144>; /* 8 32k periods */
opp-hz = /bits/ 64 <1080000000>;
@@ -53,7 +53,7 @@
opp-microvolt-speed2 = <880000 880000 1200000>;
};
- opp@1320000000 {
+ opp-1320000000 {
clock-latency-ns = <244144>; /* 8 32k periods */
opp-hz = /bits/ 64 <1320000000>;
@@ -62,7 +62,7 @@
opp-microvolt-speed2 = <940000 940000 1200000>;
};
- opp@1488000000 {
+ opp-1488000000 {
clock-latency-ns = <244144>; /* 8 32k periods */
opp-hz = /bits/ 64 <1488000000>;
@@ -71,7 +71,7 @@
opp-microvolt-speed2 = <1000000 1000000 1200000>;
};
- opp@1608000000 {
+ opp-1608000000 {
clock-latency-ns = <244144>; /* 8 32k periods */
opp-hz = /bits/ 64 <1608000000>;
@@ -80,7 +80,7 @@
opp-microvolt-speed2 = <1030000 1030000 1200000>;
};
- opp@1704000000 {
+ opp-1704000000 {
clock-latency-ns = <244144>; /* 8 32k periods */
opp-hz = /bits/ 64 <1704000000>;
@@ -89,7 +89,7 @@
opp-microvolt-speed2 = <1060000 1060000 1200000>;
};
- opp@1800000000 {
+ opp-1800000000 {
clock-latency-ns = <244144>; /* 8 32k periods */
opp-hz = /bits/ 64 <1800000000>;
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-3.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-3.dts
index 15c9dd8c4479..7e83f6146f8a 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-3.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-3.dts
@@ -43,13 +43,13 @@
leds {
compatible = "gpio-leds";
- power {
+ led-0 {
label = "orangepi:red:power";
gpios = <&r_pio 0 4 GPIO_ACTIVE_HIGH>; /* PL4 */
default-state = "on";
};
- status {
+ led-1 {
label = "orangepi:green:status";
gpios = <&r_pio 0 7 GPIO_ACTIVE_HIGH>; /* PL7 */
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi.dtsi
index ebc120a9232f..da0875bd38d4 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi.dtsi
@@ -42,13 +42,13 @@
leds {
compatible = "gpio-leds";
- power {
+ led-0 {
label = "orangepi:red:power";
gpios = <&r_pio 0 4 GPIO_ACTIVE_HIGH>; /* PL4 */
default-state = "on";
};
- status {
+ led-1 {
label = "orangepi:green:status";
gpios = <&r_pio 0 7 GPIO_ACTIVE_HIGH>; /* PL7 */
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64-model-b.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64-model-b.dts
index 7fea1e4e2d49..686f58e77004 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64-model-b.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64-model-b.dts
@@ -34,3 +34,18 @@
non-removable;
status = "okay";
};
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart1_pins>, <&uart1_rts_cts_pins>;
+ uart-has-rtscts;
+ status = "okay";
+
+ bluetooth {
+ compatible = "realtek,rtl8723bs-bt";
+ device-wake-gpios = <&r_pio 1 2 GPIO_ACTIVE_HIGH>; /* PM2 */
+ host-wake-gpios = <&r_pio 1 1 GPIO_ACTIVE_HIGH>; /* PM1 */
+ enable-gpios = <&r_pio 1 4 GPIO_ACTIVE_HIGH>; /* PM4 */
+ max-speed = <1500000>;
+ };
+};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts
index 961732c52aa0..b868ad17af8f 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts
@@ -44,17 +44,17 @@
leds {
compatible = "gpio-leds";
- heartbeat {
+ led-0 {
label = "pine-h64:green:heartbeat";
gpios = <&r_pio 0 4 GPIO_ACTIVE_HIGH>; /* PL4 */
};
- link {
+ led-1 {
label = "pine-h64:white:link";
gpios = <&r_pio 0 3 GPIO_ACTIVE_HIGH>; /* PL3 */
};
- status {
+ led-2 {
label = "pine-h64:blue:status";
gpios = <&r_pio 0 7 GPIO_ACTIVE_HIGH>; /* PL7 */
};
@@ -142,6 +142,7 @@
vqmmc-supply = <&reg_bldo2>;
non-removable;
cap-mmc-hw-reset;
+ mmc-hs200-1_8v;
bus-width = <8>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
index 8a62a9fbe347..49e979794094 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
@@ -436,6 +436,7 @@
interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&mmc0_pins>;
+ max-frequency = <150000000>;
status = "disabled";
#address-cells = <1>;
#size-cells = <0>;
@@ -452,6 +453,7 @@
interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&mmc1_pins>;
+ max-frequency = <150000000>;
status = "disabled";
#address-cells = <1>;
#size-cells = <0>;
@@ -468,6 +470,7 @@
interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&mmc2_pins>;
+ max-frequency = <150000000>;
status = "disabled";
#address-cells = <1>;
#size-cells = <0>;
@@ -680,6 +683,8 @@
<&ccu CLK_USB_OHCI0>;
resets = <&ccu RST_BUS_OHCI0>,
<&ccu RST_BUS_EHCI0>;
+ phys = <&usb2phy 0>;
+ phy-names = "usb";
status = "disabled";
};
@@ -690,6 +695,8 @@
clocks = <&ccu CLK_BUS_OHCI0>,
<&ccu CLK_USB_OHCI0>;
resets = <&ccu RST_BUS_OHCI0>;
+ phys = <&usb2phy 0>;
+ phy-names = "usb";
status = "disabled";
};
@@ -949,6 +956,11 @@
pins = "PL9";
function = "s_cir_rx";
};
+
+ r_rsb_pins: r-rsb-pins {
+ pins = "PL0", "PL1";
+ function = "s_rsb";
+ };
};
r_ir: ir@7040000 {
@@ -979,6 +991,20 @@
#size-cells = <0>;
};
+ r_rsb: rsb@7083000 {
+ compatible = "allwinner,sun8i-a23-rsb";
+ reg = <0x07083000 0x400>;
+ interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&r_ccu 13>;
+ clock-frequency = <3000000>;
+ resets = <&r_ccu 7>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&r_rsb_pins>;
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
ths: thermal-sensor@5070400 {
compatible = "allwinner,sun50i-h6-ths";
reg = <0x05070400 0x100>;
diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
index 0f893984c256..d301ac0d406b 100644
--- a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
+++ b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
@@ -203,7 +203,7 @@
compatible = "snps,dw-apb-gpio-port";
gpio-controller;
#gpio-cells = <2>;
- snps,nr-gpios = <24>;
+ ngpios = <24>;
reg = <0>;
interrupt-controller;
#interrupt-cells = <2>;
@@ -223,7 +223,7 @@
compatible = "snps,dw-apb-gpio-port";
gpio-controller;
#gpio-cells = <2>;
- snps,nr-gpios = <24>;
+ ngpios = <24>;
reg = <0>;
interrupt-controller;
#interrupt-cells = <2>;
diff --git a/arch/arm64/boot/dts/amlogic/Makefile b/arch/arm64/boot/dts/amlogic/Makefile
index ced03946314f..78a569d7fa20 100644
--- a/arch/arm64/boot/dts/amlogic/Makefile
+++ b/arch/arm64/boot/dts/amlogic/Makefile
@@ -3,6 +3,7 @@ dtb-$(CONFIG_ARCH_MESON) += meson-axg-s400.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-g12a-sei510.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-g12a-u200.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-g12a-x96-max.dtb
+dtb-$(CONFIG_ARCH_MESON) += meson-g12b-gsking-x.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-g12b-gtking.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-g12b-gtking-pro.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-g12b-a311d-khadas-vim3.dtb
@@ -44,7 +45,8 @@ dtb-$(CONFIG_ARCH_MESON) += meson-gxm-rbox-pro.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-gxm-s912-libretech-pc.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-gxm-vega-s96.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-gxm-wetek-core2.dtb
-dtb-$(CONFIG_ARCH_MESON) += meson-sm1-sei610.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-sm1-khadas-vim3l.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-sm1-odroid-c4.dtb
+dtb-$(CONFIG_ARCH_MESON) += meson-sm1-odroid-hc4.dtb
+dtb-$(CONFIG_ARCH_MESON) += meson-sm1-sei610.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-a1-ad401.dtb
diff --git a/arch/arm64/boot/dts/amlogic/meson-axg.dtsi b/arch/arm64/boot/dts/amlogic/meson-axg.dtsi
index ba1c6dfdc4b6..d945c84ab697 100644
--- a/arch/arm64/boot/dts/amlogic/meson-axg.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-axg.dtsi
@@ -280,8 +280,6 @@
"timing-adjustment";
rx-fifo-depth = <4096>;
tx-fifo-depth = <2048>;
- resets = <&reset RESET_ETHERNET>;
- reset-names = "stmmaceth";
power-domains = <&pwrc PWRC_AXG_ETHERNET_MEM_ID>;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
index 9c90d562ada1..b858c5e43cc8 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
@@ -224,8 +224,6 @@
"timing-adjustment";
rx-fifo-depth = <4096>;
tx-fifo-depth = <2048>;
- resets = <&reset RESET_ETHERNET>;
- reset-names = "stmmaceth";
status = "disabled";
mdio0: mdio {
@@ -2390,7 +2388,7 @@
interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
dr_mode = "host";
snps,dis_u2_susphy_quirk;
- snps,quirk-frame-length-adjustment;
+ snps,quirk-frame-length-adjustment = <0x20>;
snps,parkmode-disable-ss-quirk;
};
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts b/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts
index b00d0468c753..81269ccc2496 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts
@@ -181,7 +181,7 @@
sound {
compatible = "amlogic,axg-sound-card";
- model = "G12A-SEI510";
+ model = "SEI510";
audio-aux-devs = <&tdmout_a>, <&tdmout_b>,
<&tdmin_a>, <&tdmin_b>;
audio-routing = "TDMOUT_A IN 0", "FRDDR_A OUT 0",
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a-x96-max.dts b/arch/arm64/boot/dts/amlogic/meson-g12a-x96-max.dts
index 463a72d6bb7c..579f3d02d613 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12a-x96-max.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-g12a-x96-max.dts
@@ -150,7 +150,7 @@
sound {
compatible = "amlogic,axg-sound-card";
- model = "G12A-X96-MAX";
+ model = "X96-MAX";
audio-aux-devs = <&tdmout_b>;
audio-routing = "TDMOUT_B IN 0", "FRDDR_A OUT 1",
"TDMOUT_B IN 1", "FRDDR_B OUT 1",
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-gsking-x.dts b/arch/arm64/boot/dts/amlogic/meson-g12b-gsking-x.dts
new file mode 100644
index 000000000000..211191f66344
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-gsking-x.dts
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2019 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ * Copyright (c) 2019 Christian Hewitt <christianshewitt@gmail.com>
+ */
+
+/dts-v1/;
+
+#include "meson-g12b-w400.dtsi"
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/sound/meson-g12a-tohdmitx.h>
+
+/ {
+ compatible = "azw,gsking-x", "amlogic,g12b";
+ model = "Beelink GS-King X";
+
+ aliases {
+ rtc0 = &rtc;
+ rtc1 = &vrtc;
+ };
+
+ gpio-keys-polled {
+ compatible = "gpio-keys-polled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ poll-interval = <100>;
+
+ power-button {
+ label = "power";
+ linux,code = <KEY_POWER>;
+ gpios = <&gpio_ao GPIOAO_3 GPIO_ACTIVE_HIGH>;
+ };
+ };
+
+ sound {
+ compatible = "amlogic,axg-sound-card";
+ model = "GSKING-X";
+ audio-aux-devs = <&tdmout_a>;
+ audio-routing = "TDMOUT_A IN 0", "FRDDR_A OUT 1",
+ "TDMOUT_A IN 1", "FRDDR_B OUT 1",
+ "TDMOUT_A IN 2", "FRDDR_C OUT 1",
+ "TDM_A Playback", "TDMOUT_A OUT";
+
+ assigned-clocks = <&clkc CLKID_MPLL2>,
+ <&clkc CLKID_MPLL0>,
+ <&clkc CLKID_MPLL1>;
+ assigned-clock-parents = <0>, <0>, <0>;
+ assigned-clock-rates = <294912000>,
+ <270950400>,
+ <393216000>;
+ status = "okay";
+
+ dai-link-0 {
+ sound-dai = <&frddr_a>;
+ };
+
+ dai-link-1 {
+ sound-dai = <&frddr_b>;
+ };
+
+ dai-link-2 {
+ sound-dai = <&frddr_c>;
+ };
+
+ /* 8ch hdmi interface */
+ dai-link-3 {
+ sound-dai = <&tdmif_a>;
+ dai-format = "i2s";
+ dai-tdm-slot-tx-mask-0 = <1 1>;
+ dai-tdm-slot-tx-mask-1 = <1 1>;
+ dai-tdm-slot-tx-mask-2 = <1 1>;
+ dai-tdm-slot-tx-mask-3 = <1 1>;
+ mclk-fs = <256>;
+
+ codec {
+ sound-dai = <&tohdmitx TOHDMITX_I2S_IN_A>;
+ };
+ };
+
+ dai-link-4 {
+ sound-dai = <&tohdmitx TOHDMITX_I2S_OUT>;
+
+ codec {
+ sound-dai = <&hdmi_tx>;
+ };
+ };
+ };
+};
+
+&arb {
+ status = "okay";
+};
+
+&clkc_audio {
+ status = "okay";
+};
+
+&frddr_a {
+ status = "okay";
+};
+
+&frddr_b {
+ status = "okay";
+};
+
+&frddr_c {
+ status = "okay";
+};
+
+&i2c3 {
+ status = "okay";
+ pinctrl-0 = <&i2c3_sda_a_pins>, <&i2c3_sck_a_pins>;
+ pinctrl-names = "default";
+
+ rtc: rtc@51 {
+ compatible = "nxp,pcf8563";
+ reg = <0x51>;
+ wakeup-source;
+ };
+};
+
+&tdmif_a {
+ status = "okay";
+};
+
+&tdmout_a {
+ status = "okay";
+};
+
+&tohdmitx {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-gtking-pro.dts b/arch/arm64/boot/dts/amlogic/meson-g12b-gtking-pro.dts
index 0e5c500fb78f..0e331aa5a2d7 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12b-gtking-pro.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-gtking-pro.dts
@@ -44,7 +44,7 @@
sound {
compatible = "amlogic,axg-sound-card";
- model = "G12B-GTKING-PRO";
+ model = "GTKING-PRO";
audio-aux-devs = <&tdmout_b>;
audio-routing = "TDMOUT_B IN 0", "FRDDR_A OUT 1",
"TDMOUT_B IN 1", "FRDDR_B OUT 1",
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-gtking.dts b/arch/arm64/boot/dts/amlogic/meson-g12b-gtking.dts
index 10b87eb97b14..a7db84a500bb 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12b-gtking.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-gtking.dts
@@ -28,7 +28,7 @@
sound {
compatible = "amlogic,axg-sound-card";
- model = "G12B-GTKING";
+ model = "GTKING";
audio-aux-devs = <&tdmout_b>;
audio-routing = "TDMOUT_B IN 0", "FRDDR_A OUT 1",
"TDMOUT_B IN 1", "FRDDR_B OUT 1",
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dtsi
index 39a09661c5f6..58ce569b2ace 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dtsi
@@ -13,6 +13,8 @@
aliases {
serial0 = &uart_AO;
ethernet0 = &ethmac;
+ rtc0 = &rtc;
+ rtc1 = &vrtc;
};
dioo2133: audio-amplifier-0 {
@@ -211,7 +213,7 @@
sound {
compatible = "amlogic,axg-sound-card";
- model = "G12B-ODROID-N2";
+ model = "ODROID-N2";
audio-widgets = "Line", "Lineout";
audio-aux-devs = <&tdmout_b>, <&tdmout_c>, <&tdmin_a>,
<&tdmin_b>, <&tdmin_c>, <&tdmin_lb>,
@@ -478,6 +480,18 @@
linux,rc-map-name = "rc-odroid";
};
+&i2c3 {
+ status = "okay";
+ pinctrl-0 = <&i2c3_sda_a_pins>, <&i2c3_sck_a_pins>;
+ pinctrl-names = "default";
+
+ rtc: rtc@51 {
+ compatible = "nxp,pcf8563";
+ reg = <0x51>;
+ wakeup-source;
+ };
+};
+
&pwm_ab {
pinctrl-0 = <&pwm_a_e_pins>;
pinctrl-names = "default";
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-ugoos-am6.dts b/arch/arm64/boot/dts/amlogic/meson-g12b-ugoos-am6.dts
index b57bb0befc69..0c7892600d56 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12b-ugoos-am6.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-ugoos-am6.dts
@@ -23,7 +23,7 @@
sound {
compatible = "amlogic,axg-sound-card";
- model = "G12B-UGOOS-AM6";
+ model = "UGOOS-AM6";
audio-aux-devs = <&tdmout_b>;
audio-routing = "TDMOUT_B IN 0", "FRDDR_A OUT 1",
"TDMOUT_B IN 1", "FRDDR_B OUT 1",
diff --git a/arch/arm64/boot/dts/amlogic/meson-gx-libretech-pc.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx-libretech-pc.dtsi
index c2480bab8d33..2d7032f41e4b 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gx-libretech-pc.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gx-libretech-pc.dtsi
@@ -186,7 +186,7 @@
sound {
compatible = "amlogic,gx-sound-card";
- model = "GXL-LIBRETECH-S9XX-PC";
+ model = "LIBRETECH-PC";
audio-aux-devs = <&dio2133>;
audio-widgets = "Speaker", "7J4-14 LEFT",
"Speaker", "7J4-11 RIGHT";
diff --git a/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi
index 6b57e15aade3..dafc841f7c16 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi
@@ -121,7 +121,7 @@
sound {
compatible = "amlogic,gx-sound-card";
- model = "GX-P230-Q200";
+ model = "P230-Q200";
audio-aux-devs = <&dio2133>;
audio-widgets = "Line", "Lineout";
audio-routing = "AU2 INL", "ACODEC LOLP",
diff --git a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
index 726b91d3a905..0edd137151f8 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
@@ -13,7 +13,6 @@
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/power/meson-gxbb-power.h>
-#include <dt-bindings/reset/amlogic,meson-gxbb-reset.h>
#include <dt-bindings/thermal/thermal.h>
/ {
@@ -576,8 +575,6 @@
interrupt-names = "macirq";
rx-fifo-depth = <4096>;
tx-fifo-depth = <2048>;
- resets = <&reset RESET_ETHERNET>;
- reset-names = "stmmaceth";
power-domains = <&pwrc PWRC_GXBB_ETHERNET_MEM_ID>;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts
index 089e0636ba8a..7273eed5292c 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts
@@ -134,7 +134,7 @@
sound {
compatible = "amlogic,gx-sound-card";
- model = "GXBB-NANOPI-K2";
+ model = "NANOPI-K2";
assigned-clocks = <&clkc CLKID_MPLL0>,
<&clkc CLKID_MPLL1>,
<&clkc CLKID_MPLL2>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts
index b5b11cb9f393..f887bfb445fd 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts
@@ -143,7 +143,7 @@
sound {
compatible = "amlogic,gx-sound-card";
- model = "GXBB-NEXBOX-A95X";
+ model = "NEXBOX-A95X";
assigned-clocks = <&clkc CLKID_MPLL0>,
<&clkc CLKID_MPLL1>,
<&clkc CLKID_MPLL2>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
index c04ef57f7b3b..bfaf7f41a2d6 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
@@ -176,7 +176,7 @@
sound {
compatible = "amlogic,gx-sound-card";
- model = "GXBB-ODROID-C2";
+ model = "ODROID-C2";
assigned-clocks = <&clkc CLKID_MPLL0>,
<&clkc CLKID_MPLL1>,
<&clkc CLKID_MPLL2>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek-hub.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek-hub.dts
index 0c1570153842..58733017eda8 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek-hub.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek-hub.dts
@@ -15,7 +15,7 @@
sound {
compatible = "amlogic,gx-sound-card";
- model = "GXBB-WETEK-HUB";
+ model = "WETEK-HUB";
assigned-clocks = <&clkc CLKID_MPLL0>,
<&clkc CLKID_MPLL1>,
<&clkc CLKID_MPLL2>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek-play2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek-play2.dts
index f2562c7de67c..6eae692792ec 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek-play2.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek-play2.dts
@@ -50,7 +50,7 @@
sound {
compatible = "amlogic,gx-sound-card";
- model = "GXBB-WETEK-PLAY2";
+ model = "WETEK-PLAY2";
assigned-clocks = <&clkc CLKID_MPLL0>,
<&clkc CLKID_MPLL1>,
<&clkc CLKID_MPLL2>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s805x-libretech-ac.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s805x-libretech-ac.dts
index 9e43f4dca90d..2d769203f671 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s805x-libretech-ac.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s805x-libretech-ac.dts
@@ -118,7 +118,7 @@
sound {
compatible = "amlogic,gx-sound-card";
- model = "GXL-LIBRETECH-S805X-AC";
+ model = "LIBRETECH-AC";
audio-widgets = "Speaker", "9J5-3 LEFT",
"Speaker", "9J5-2 RIGHT";
audio-routing = "9J5-3 LEFT", "ACODEC LOLN",
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-khadas-vim.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-khadas-vim.dts
index 6fe589cd2ba2..60feac0179c0 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-khadas-vim.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-khadas-vim.dts
@@ -42,10 +42,10 @@
};
};
- pwmleds {
+ led-controller {
compatible = "pwm-leds";
- power {
+ led-1 {
label = "vim:red:power";
pwms = <&pwm_AO_ab 1 7812500 0>;
max-brightness = <255>;
@@ -66,7 +66,7 @@
sound {
compatible = "amlogic,gx-sound-card";
- model = "GXL-KHADAS-VIM1";
+ model = "KHADAS-VIM";
assigned-clocks = <&clkc CLKID_MPLL0>,
<&clkc CLKID_MPLL1>,
<&clkc CLKID_MPLL2>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc-v2.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc-v2.dts
index 9a3c08e6e6cc..93d8f8aff70d 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc-v2.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc-v2.dts
@@ -159,7 +159,7 @@
sound {
compatible = "amlogic,gx-sound-card";
- model = "GXL-LIBRETECH-S905X-CC-V2";
+ model = "LIBRETECH-CC-V2";
assigned-clocks = <&clkc CLKID_MPLL0>,
<&clkc CLKID_MPLL1>,
<&clkc CLKID_MPLL2>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts
index 5ae7bb6209cb..82bfabfbd39c 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts
@@ -135,7 +135,7 @@
sound {
compatible = "amlogic,gx-sound-card";
- model = "GXL-LIBRETECH-S905X-CC";
+ model = "LIBRETECH-CC";
audio-aux-devs = <&dio2133>;
audio-widgets = "Line", "Lineout";
audio-routing = "AU2 INL", "ACODEC LOLN",
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts
index bf9877d33427..18a4b7a6c5df 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts
@@ -81,10 +81,10 @@
};
};
- pwmleds {
+ led-controller {
compatible = "pwm-leds";
- power {
+ led-1 {
label = "vim:red:power";
pwms = <&pwm_AO_ab 1 7812500 0>;
max-brightness = <255>;
@@ -148,7 +148,7 @@
sound {
compatible = "amlogic,gx-sound-card";
- model = "GXM-KHADAS-VIM2";
+ model = "KHADAS-VIM2";
assigned-clocks = <&clkc CLKID_MPLL0>,
<&clkc CLKID_MPLL1>,
<&clkc CLKID_MPLL2>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi b/arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi
index 8f8656262ae7..877e3b989203 100644
--- a/arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi
@@ -170,7 +170,7 @@
sound {
compatible = "amlogic,axg-sound-card";
- model = "G12B-KHADAS-VIM3";
+ model = "KHADAS-VIM3";
audio-aux-devs = <&tdmout_a>;
audio-routing = "TDMOUT_A IN 0", "FRDDR_A OUT 0",
"TDMOUT_A IN 1", "FRDDR_B OUT 0",
@@ -283,12 +283,12 @@
};
&ethmac {
- pinctrl-0 = <&eth_pins>, <&eth_rgmii_pins>;
- pinctrl-names = "default";
- status = "okay";
- phy-mode = "rgmii";
- phy-handle = <&external_phy>;
- amlogic,tx-delay-ns = <2>;
+ pinctrl-0 = <&eth_pins>, <&eth_rgmii_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+ phy-mode = "rgmii";
+ phy-handle = <&external_phy>;
+ amlogic,tx-delay-ns = <2>;
};
&frddr_a {
@@ -354,9 +354,9 @@
};
&pwm_ef {
- status = "okay";
- pinctrl-0 = <&pwm_e_pins>;
- pinctrl-names = "default";
+ status = "okay";
+ pinctrl-0 = <&pwm_e_pins>;
+ pinctrl-names = "default";
};
&saradc {
@@ -450,7 +450,6 @@
};
};
-
&tdmif_a {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-khadas-vim3l.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-khadas-vim3l.dts
index 4b517ca72059..06de0b1ce726 100644
--- a/arch/arm64/boot/dts/amlogic/meson-sm1-khadas-vim3l.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-sm1-khadas-vim3l.dts
@@ -89,13 +89,12 @@
status = "okay";
};
-&sd_emmc_a {
- sd-uhs-sdr50;
-};
-
&usb {
phys = <&usb2_phy0>, <&usb2_phy1>;
phy-names = "usb2-phy0", "usb2-phy1";
};
*/
+&sd_emmc_a {
+ sd-uhs-sdr50;
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-odroid-c4.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-odroid-c4.dts
index cf5a98f0e47c..b2a4e823c1d8 100644
--- a/arch/arm64/boot/dts/amlogic/meson-sm1-odroid-c4.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-sm1-odroid-c4.dts
@@ -5,34 +5,12 @@
/dts-v1/;
-#include "meson-sm1.dtsi"
-#include <dt-bindings/gpio/meson-g12a-gpio.h>
-#include <dt-bindings/leds/common.h>
-#include <dt-bindings/sound/meson-g12a-tohdmitx.h>
+#include "meson-sm1-odroid.dtsi"
/ {
compatible = "hardkernel,odroid-c4", "amlogic,sm1";
model = "Hardkernel ODROID-C4";
- aliases {
- serial0 = &uart_AO;
- ethernet0 = &ethmac;
- };
-
- chosen {
- stdout-path = "serial0:115200n8";
- };
-
- memory@0 {
- device_type = "memory";
- reg = <0x0 0x0 0x0 0x40000000>;
- };
-
- emmc_pwrseq: emmc-pwrseq {
- compatible = "mmc-pwrseq-emmc";
- reset-gpios = <&gpio BOOT_12 GPIO_ACTIVE_LOW>;
- };
-
leds {
compatible = "gpio-leds";
@@ -45,96 +23,6 @@
};
};
- tflash_vdd: regulator-tflash_vdd {
- compatible = "regulator-fixed";
-
- regulator-name = "TFLASH_VDD";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
-
- gpio = <&gpio_ao GPIOAO_3 GPIO_ACTIVE_HIGH>;
- enable-active-high;
- regulator-always-on;
- };
-
- tf_io: gpio-regulator-tf_io {
- compatible = "regulator-gpio";
-
- regulator-name = "TF_IO";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <3300000>;
-
- gpios = <&gpio_ao GPIOAO_6 GPIO_ACTIVE_HIGH>;
- gpios-states = <0>;
-
- states = <3300000 0>,
- <1800000 1>;
- };
-
- flash_1v8: regulator-flash_1v8 {
- compatible = "regulator-fixed";
- regulator-name = "FLASH_1V8";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- vin-supply = <&vcc_3v3>;
- regulator-always-on;
- };
-
- main_12v: regulator-main_12v {
- compatible = "regulator-fixed";
- regulator-name = "12V";
- regulator-min-microvolt = <12000000>;
- regulator-max-microvolt = <12000000>;
- regulator-always-on;
- };
-
- vcc_5v: regulator-vcc_5v {
- compatible = "regulator-fixed";
- regulator-name = "5V";
- regulator-min-microvolt = <5000000>;
- regulator-max-microvolt = <5000000>;
- regulator-always-on;
- vin-supply = <&main_12v>;
- };
-
- vcc_1v8: regulator-vcc_1v8 {
- compatible = "regulator-fixed";
- regulator-name = "VCC_1V8";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- vin-supply = <&vcc_3v3>;
- regulator-always-on;
- };
-
- vcc_3v3: regulator-vcc_3v3 {
- compatible = "regulator-fixed";
- regulator-name = "VCC_3V3";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- vin-supply = <&vddao_3v3>;
- regulator-always-on;
- /* FIXME: actually controlled by VDDCPU_B_EN */
- };
-
- vddcpu: regulator-vddcpu {
- /*
- * MP8756GD Regulator.
- */
- compatible = "pwm-regulator";
-
- regulator-name = "VDDCPU";
- regulator-min-microvolt = <721000>;
- regulator-max-microvolt = <1022000>;
-
- vin-supply = <&main_12v>;
-
- pwms = <&pwm_AO_cd 1 1250 0>;
- pwm-dutycycle-range = <100 0>;
-
- regulator-boot-on;
- regulator-always-on;
- };
-
hub_5v: regulator-hub_5v {
compatible = "regulator-fixed";
regulator-name = "HUB_5V";
@@ -147,215 +35,12 @@
enable-active-high;
};
- usb_pwr_en: regulator-usb_pwr_en {
- compatible = "regulator-fixed";
- regulator-name = "USB_PWR_EN";
- regulator-min-microvolt = <5000000>;
- regulator-max-microvolt = <5000000>;
- vin-supply = <&vcc_5v>;
-
- /* Connected to the microUSB port power enable */
- gpio = <&gpio_ao GPIOAO_2 GPIO_ACTIVE_HIGH>;
- enable-active-high;
- };
-
- vddao_1v8: regulator-vddao_1v8 {
- compatible = "regulator-fixed";
- regulator-name = "VDDAO_1V8";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- vin-supply = <&vddao_3v3>;
- regulator-always-on;
- };
-
- vddao_3v3: regulator-vddao_3v3 {
- compatible = "regulator-fixed";
- regulator-name = "VDDAO_3V3";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- vin-supply = <&main_12v>;
- regulator-always-on;
- };
-
- hdmi-connector {
- compatible = "hdmi-connector";
- type = "a";
-
- port {
- hdmi_connector_in: endpoint {
- remote-endpoint = <&hdmi_tx_tmds_out>;
- };
- };
- };
-
sound {
- compatible = "amlogic,axg-sound-card";
- model = "SM1-ODROID-C4";
- audio-aux-devs = <&tdmout_b>;
- audio-routing = "TDMOUT_B IN 0", "FRDDR_A OUT 1",
- "TDMOUT_B IN 1", "FRDDR_B OUT 1",
- "TDMOUT_B IN 2", "FRDDR_C OUT 1",
- "TDM_B Playback", "TDMOUT_B OUT";
-
- assigned-clocks = <&clkc CLKID_MPLL2>,
- <&clkc CLKID_MPLL0>,
- <&clkc CLKID_MPLL1>;
- assigned-clock-parents = <0>, <0>, <0>;
- assigned-clock-rates = <294912000>,
- <270950400>,
- <393216000>;
- status = "okay";
-
- dai-link-0 {
- sound-dai = <&frddr_a>;
- };
-
- dai-link-1 {
- sound-dai = <&frddr_b>;
- };
-
- dai-link-2 {
- sound-dai = <&frddr_c>;
- };
-
- /* 8ch hdmi interface */
- dai-link-3 {
- sound-dai = <&tdmif_b>;
- dai-format = "i2s";
- dai-tdm-slot-tx-mask-0 = <1 1>;
- dai-tdm-slot-tx-mask-1 = <1 1>;
- dai-tdm-slot-tx-mask-2 = <1 1>;
- dai-tdm-slot-tx-mask-3 = <1 1>;
- mclk-fs = <256>;
-
- codec {
- sound-dai = <&tohdmitx TOHDMITX_I2S_IN_B>;
- };
- };
-
- /* hdmi glue */
- dai-link-4 {
- sound-dai = <&tohdmitx TOHDMITX_I2S_OUT>;
-
- codec {
- sound-dai = <&hdmi_tx>;
- };
- };
- };
-};
-
-&arb {
- status = "okay";
-};
-
-&clkc_audio {
- status = "okay";
-};
-
-&cpu0 {
- cpu-supply = <&vddcpu>;
- operating-points-v2 = <&cpu_opp_table>;
- clocks = <&clkc CLKID_CPU_CLK>;
- clock-latency = <50000>;
-};
-
-&cpu1 {
- cpu-supply = <&vddcpu>;
- operating-points-v2 = <&cpu_opp_table>;
- clocks = <&clkc CLKID_CPU1_CLK>;
- clock-latency = <50000>;
-};
-
-&cpu2 {
- cpu-supply = <&vddcpu>;
- operating-points-v2 = <&cpu_opp_table>;
- clocks = <&clkc CLKID_CPU2_CLK>;
- clock-latency = <50000>;
-};
-
-&cpu3 {
- cpu-supply = <&vddcpu>;
- operating-points-v2 = <&cpu_opp_table>;
- clocks = <&clkc CLKID_CPU3_CLK>;
- clock-latency = <50000>;
-};
-
-&ext_mdio {
- external_phy: ethernet-phy@0 {
- /* Realtek RTL8211F (0x001cc916) */
- reg = <0>;
- max-speed = <1000>;
-
- interrupt-parent = <&gpio_intc>;
- /* MAC_INTR on GPIOZ_14 */
- interrupts = <26 IRQ_TYPE_LEVEL_LOW>;
+ model = "ODROID-C4";
};
};
-&ethmac {
- pinctrl-0 = <&eth_pins>, <&eth_rgmii_pins>;
- pinctrl-names = "default";
- status = "okay";
- phy-mode = "rgmii";
- phy-handle = <&external_phy>;
- amlogic,tx-delay-ns = <2>;
-};
-
-&frddr_a {
- status = "okay";
-};
-
-&frddr_b {
- status = "okay";
-};
-
-&frddr_c {
- status = "okay";
-};
-
&gpio {
- gpio-line-names =
- /* GPIOZ */
- "", "", "", "", "", "", "", "",
- "", "", "", "", "", "", "", "",
- /* GPIOH */
- "", "", "", "", "",
- "PIN_36", /* GPIOH_5 */
- "PIN_26", /* GPIOH_6 */
- "PIN_32", /* GPIOH_7 */
- "",
- /* BOOT */
- "", "", "", "", "", "", "", "",
- "", "", "", "", "", "", "", "",
- /* GPIOC */
- "", "", "", "", "", "", "", "",
- /* GPIOA */
- "", "", "", "", "", "", "", "",
- "", "", "", "", "", "",
- "PIN_27", /* GPIOA_14 */
- "PIN_28", /* GPIOA_15 */
- /* GPIOX */
- "PIN_16", /* GPIOX_0 */
- "PIN_18", /* GPIOX_1 */
- "PIN_22", /* GPIOX_2 */
- "PIN_11", /* GPIOX_3 */
- "PIN_13", /* GPIOX_4 */
- "PIN_7", /* GPIOX_5 */
- "PIN_33", /* GPIOX_6 */
- "PIN_15", /* GPIOX_7 */
- "PIN_19", /* GPIOX_8 */
- "PIN_21", /* GPIOX_9 */
- "PIN_24", /* GPIOX_10 */
- "PIN_23", /* GPIOX_11 */
- "PIN_8", /* GPIOX_12 */
- "PIN_10", /* GPIOX_13 */
- "PIN_29", /* GPIOX_14 */
- "PIN_31", /* GPIOX_15 */
- "PIN_12", /* GPIOX_16 */
- "PIN_3", /* GPIOX_17 */
- "PIN_5", /* GPIOX_18 */
- "PIN_35"; /* GPIOX_19 */
-
/*
* WARNING: The USB Hub on the Odroid-C4 needs a reset signal
* to be turned high in order to be detected by the USB Controller
@@ -370,120 +55,10 @@
};
};
-&gpio_ao {
- gpio-line-names =
- /* GPIOAO */
- "", "", "", "",
- "PIN_47", /* GPIOAO_4 */
- "", "",
- "PIN_45", /* GPIOAO_7 */
- "PIN_46", /* GPIOAO_8 */
- "PIN_44", /* GPIOAO_9 */
- "PIN_42", /* GPIOAO_10 */
- "",
- /* GPIOE */
- "", "", "";
-};
-
-&hdmi_tx {
- status = "okay";
- pinctrl-0 = <&hdmitx_hpd_pins>, <&hdmitx_ddc_pins>;
- pinctrl-names = "default";
- hdmi-supply = <&vcc_5v>;
-};
-
-&hdmi_tx_tmds_port {
- hdmi_tx_tmds_out: endpoint {
- remote-endpoint = <&hdmi_connector_in>;
- };
-};
-
&ir {
- status = "okay";
- pinctrl-0 = <&remote_input_ao_pins>;
- pinctrl-names = "default";
linux,rc-map-name = "rc-odroid";
};
-&pwm_AO_cd {
- pinctrl-0 = <&pwm_ao_d_e_pins>;
- pinctrl-names = "default";
- clocks = <&xtal>;
- clock-names = "clkin1";
- status = "okay";
-};
-
-&saradc {
- status = "okay";
-};
-
-/* SD card */
-&sd_emmc_b {
- status = "okay";
- pinctrl-0 = <&sdcard_c_pins>;
- pinctrl-1 = <&sdcard_clk_gate_c_pins>;
- pinctrl-names = "default", "clk-gate";
-
- bus-width = <4>;
- cap-sd-highspeed;
- max-frequency = <200000000>;
- sd-uhs-sdr12;
- sd-uhs-sdr25;
- sd-uhs-sdr50;
- sd-uhs-sdr104;
- disable-wp;
-
- cd-gpios = <&gpio GPIOC_6 GPIO_ACTIVE_LOW>;
- vmmc-supply = <&tflash_vdd>;
- vqmmc-supply = <&tf_io>;
-};
-
-/* eMMC */
-&sd_emmc_c {
- status = "okay";
- pinctrl-0 = <&emmc_ctrl_pins>, <&emmc_data_8b_pins>, <&emmc_ds_pins>;
- pinctrl-1 = <&emmc_clk_gate_pins>;
- pinctrl-names = "default", "clk-gate";
-
- bus-width = <8>;
- cap-mmc-highspeed;
- mmc-ddr-1_8v;
- mmc-hs200-1_8v;
- max-frequency = <200000000>;
- disable-wp;
-
- mmc-pwrseq = <&emmc_pwrseq>;
- vmmc-supply = <&vcc_3v3>;
- vqmmc-supply = <&flash_1v8>;
-};
-
-&tdmif_b {
- status = "okay";
-};
-
-&tdmout_b {
- status = "okay";
-};
-
-&tohdmitx {
- status = "okay";
-};
-
-&uart_AO {
- status = "okay";
- pinctrl-0 = <&uart_ao_a_pins>;
- pinctrl-names = "default";
-};
-
-&usb {
- status = "okay";
- vbus-supply = <&usb_pwr_en>;
-};
-
-&usb2_phy0 {
- phy-supply = <&vcc_5v>;
-};
-
&usb2_phy1 {
/* Enable the hub which is connected to this port */
phy-supply = <&hub_5v>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-odroid-hc4.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-odroid-hc4.dts
new file mode 100644
index 000000000000..bf15700c4b15
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/meson-sm1-odroid-hc4.dts
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2020 Dongjin Kim <tobetter@gmail.com>
+ */
+
+/dts-v1/;
+
+#include "meson-sm1-odroid.dtsi"
+
+/ {
+ compatible = "hardkernel,odroid-hc4", "amlogic,sm1";
+ model = "Hardkernel ODROID-HC4";
+
+ aliases {
+ rtc0 = &rtc;
+ rtc1 = &vrtc;
+ };
+
+ fan0: pwm-fan {
+ compatible = "pwm-fan";
+ #cooling-cells = <2>;
+ cooling-min-state = <0>;
+ cooling-max-state = <3>;
+ cooling-levels = <0 120 170 220>;
+ pwms = <&pwm_cd 1 40000 0>;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ led-blue {
+ color = <LED_COLOR_ID_BLUE>;
+ function = LED_FUNCTION_STATUS;
+ gpios = <&gpio_ao GPIOAO_11 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ panic-indicator;
+ };
+
+ led-red {
+ color = <LED_COLOR_ID_RED>;
+ function = LED_FUNCTION_POWER;
+ gpios = <&gpio_ao GPIOAO_7 GPIO_ACTIVE_HIGH>;
+ default-state = "on";
+ };
+ };
+
+ sound {
+ model = "ODROID-HC4";
+ };
+};
+
+&cpu_thermal {
+ cooling-maps {
+ map {
+ trip = <&cpu_passive>;
+ cooling-device = <&fan0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+};
+
+&ir {
+ linux,rc-map-name = "rc-odroid";
+};
+
+&i2c2 {
+ status = "okay";
+ pinctrl-0 = <&i2c2_sda_x_pins>, <&i2c2_sck_x_pins>;
+ pinctrl-names = "default";
+
+ rtc: rtc@51 {
+ status = "okay";
+ compatible = "nxp,pcf8563";
+ reg = <0x51>;
+ wakeup-source;
+ };
+};
+
+&pcie {
+ status = "okay";
+ reset-gpios = <&gpio GPIOH_4 GPIO_ACTIVE_LOW>;
+};
+
+&pwm_cd {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm_d_x6_pins>;
+};
+
+&sd_emmc_c {
+ status = "disabled";
+};
+
+&usb {
+ phys = <&usb2_phy0>, <&usb2_phy1>;
+ phy-names = "usb2-phy0", "usb2-phy1";
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-odroid.dtsi b/arch/arm64/boot/dts/amlogic/meson-sm1-odroid.dtsi
new file mode 100644
index 000000000000..d14716b3d0f1
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/meson-sm1-odroid.dtsi
@@ -0,0 +1,442 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2020 Dongjin Kim <tobetter@gmail.com>
+ */
+
+#include "meson-sm1.dtsi"
+#include <dt-bindings/gpio/meson-g12a-gpio.h>
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/sound/meson-g12a-tohdmitx.h>
+
+/ {
+ aliases {
+ serial0 = &uart_AO;
+ ethernet0 = &ethmac;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x0 0x0 0x40000000>;
+ };
+
+ emmc_pwrseq: emmc-pwrseq {
+ compatible = "mmc-pwrseq-emmc";
+ reset-gpios = <&gpio BOOT_12 GPIO_ACTIVE_LOW>;
+ };
+
+ tflash_vdd: regulator-tflash_vdd {
+ compatible = "regulator-fixed";
+
+ regulator-name = "TFLASH_VDD";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&gpio_ao GPIOAO_3 GPIO_OPEN_DRAIN>;
+ enable-active-high;
+ regulator-always-on;
+ };
+
+ tf_io: gpio-regulator-tf_io {
+ compatible = "regulator-gpio";
+
+ regulator-name = "TF_IO";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpios = <&gpio_ao GPIOAO_6 GPIO_ACTIVE_HIGH>;
+ gpios-states = <0>;
+
+ states = <3300000 0>,
+ <1800000 1>;
+ };
+
+ flash_1v8: regulator-flash_1v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "FLASH_1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&vcc_3v3>;
+ regulator-always-on;
+ };
+
+ main_12v: regulator-main_12v {
+ compatible = "regulator-fixed";
+ regulator-name = "12V";
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
+ regulator-always-on;
+ };
+
+ vcc_5v: regulator-vcc_5v {
+ compatible = "regulator-fixed";
+ regulator-name = "5V";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ vin-supply = <&main_12v>;
+ };
+
+ vcc_1v8: regulator-vcc_1v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "VCC_1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&vcc_3v3>;
+ regulator-always-on;
+ };
+
+ vcc_3v3: regulator-vcc_3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "VCC_3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vddao_3v3>;
+ regulator-always-on;
+ /* FIXME: actually controlled by VDDCPU_B_EN */
+ };
+
+ vddcpu: regulator-vddcpu {
+ /*
+ * MP8756GD Regulator.
+ */
+ compatible = "pwm-regulator";
+
+ regulator-name = "VDDCPU";
+ regulator-min-microvolt = <721000>;
+ regulator-max-microvolt = <1022000>;
+
+ vin-supply = <&main_12v>;
+
+ pwms = <&pwm_AO_cd 1 1250 0>;
+ pwm-dutycycle-range = <100 0>;
+
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ usb_pwr_en: regulator-usb_pwr_en {
+ compatible = "regulator-fixed";
+ regulator-name = "USB_PWR_EN";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc_5v>;
+
+ /* Connected to the microUSB port power enable */
+ gpio = <&gpio_ao GPIOAO_2 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ vddao_1v8: regulator-vddao_1v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "VDDAO_1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&vddao_3v3>;
+ regulator-always-on;
+ };
+
+ vddao_3v3: regulator-vddao_3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "VDDAO_3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&main_12v>;
+ regulator-always-on;
+ };
+
+ hdmi-connector {
+ compatible = "hdmi-connector";
+ type = "a";
+
+ port {
+ hdmi_connector_in: endpoint {
+ remote-endpoint = <&hdmi_tx_tmds_out>;
+ };
+ };
+ };
+
+ sound {
+ compatible = "amlogic,axg-sound-card";
+ audio-aux-devs = <&tdmout_b>;
+ audio-routing = "TDMOUT_B IN 0", "FRDDR_A OUT 1",
+ "TDMOUT_B IN 1", "FRDDR_B OUT 1",
+ "TDMOUT_B IN 2", "FRDDR_C OUT 1",
+ "TDM_B Playback", "TDMOUT_B OUT";
+
+ assigned-clocks = <&clkc CLKID_MPLL2>,
+ <&clkc CLKID_MPLL0>,
+ <&clkc CLKID_MPLL1>;
+ assigned-clock-parents = <0>, <0>, <0>;
+ assigned-clock-rates = <294912000>,
+ <270950400>,
+ <393216000>;
+ status = "okay";
+
+ dai-link-0 {
+ sound-dai = <&frddr_a>;
+ };
+
+ dai-link-1 {
+ sound-dai = <&frddr_b>;
+ };
+
+ dai-link-2 {
+ sound-dai = <&frddr_c>;
+ };
+
+ /* 8ch hdmi interface */
+ dai-link-3 {
+ sound-dai = <&tdmif_b>;
+ dai-format = "i2s";
+ dai-tdm-slot-tx-mask-0 = <1 1>;
+ dai-tdm-slot-tx-mask-1 = <1 1>;
+ dai-tdm-slot-tx-mask-2 = <1 1>;
+ dai-tdm-slot-tx-mask-3 = <1 1>;
+ mclk-fs = <256>;
+
+ codec {
+ sound-dai = <&tohdmitx TOHDMITX_I2S_IN_B>;
+ };
+ };
+
+ /* hdmi glue */
+ dai-link-4 {
+ sound-dai = <&tohdmitx TOHDMITX_I2S_OUT>;
+
+ codec {
+ sound-dai = <&hdmi_tx>;
+ };
+ };
+ };
+};
+
+&arb {
+ status = "okay";
+};
+
+&clkc_audio {
+ status = "okay";
+};
+
+&cpu0 {
+ cpu-supply = <&vddcpu>;
+ operating-points-v2 = <&cpu_opp_table>;
+ clocks = <&clkc CLKID_CPU_CLK>;
+ clock-latency = <50000>;
+};
+
+&cpu1 {
+ cpu-supply = <&vddcpu>;
+ operating-points-v2 = <&cpu_opp_table>;
+ clocks = <&clkc CLKID_CPU1_CLK>;
+ clock-latency = <50000>;
+};
+
+&cpu2 {
+ cpu-supply = <&vddcpu>;
+ operating-points-v2 = <&cpu_opp_table>;
+ clocks = <&clkc CLKID_CPU2_CLK>;
+ clock-latency = <50000>;
+};
+
+&cpu3 {
+ cpu-supply = <&vddcpu>;
+ operating-points-v2 = <&cpu_opp_table>;
+ clocks = <&clkc CLKID_CPU3_CLK>;
+ clock-latency = <50000>;
+};
+
+&ext_mdio {
+ external_phy: ethernet-phy@0 {
+ /* Realtek RTL8211F (0x001cc916) */
+ reg = <0>;
+ max-speed = <1000>;
+
+ interrupt-parent = <&gpio_intc>;
+ /* MAC_INTR on GPIOZ_14 */
+ interrupts = <26 IRQ_TYPE_LEVEL_LOW>;
+ };
+};
+
+&ethmac {
+ pinctrl-0 = <&eth_pins>, <&eth_rgmii_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+ phy-mode = "rgmii";
+ phy-handle = <&external_phy>;
+ amlogic,tx-delay-ns = <2>;
+};
+
+&frddr_a {
+ status = "okay";
+};
+
+&frddr_b {
+ status = "okay";
+};
+
+&frddr_c {
+ status = "okay";
+};
+
+&gpio {
+ gpio-line-names =
+ /* GPIOZ */
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "",
+ /* GPIOH */
+ "", "", "", "", "",
+ "PIN_36", /* GPIOH_5 */
+ "PIN_26", /* GPIOH_6 */
+ "PIN_32", /* GPIOH_7 */
+ "",
+ /* BOOT */
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "",
+ /* GPIOC */
+ "", "", "", "", "", "", "", "",
+ /* GPIOA */
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "",
+ "PIN_27", /* GPIOA_14 */
+ "PIN_28", /* GPIOA_15 */
+ /* GPIOX */
+ "PIN_16", /* GPIOX_0 */
+ "PIN_18", /* GPIOX_1 */
+ "PIN_22", /* GPIOX_2 */
+ "PIN_11", /* GPIOX_3 */
+ "PIN_13", /* GPIOX_4 */
+ "PIN_7", /* GPIOX_5 */
+ "PIN_33", /* GPIOX_6 */
+ "PIN_15", /* GPIOX_7 */
+ "PIN_19", /* GPIOX_8 */
+ "PIN_21", /* GPIOX_9 */
+ "PIN_24", /* GPIOX_10 */
+ "PIN_23", /* GPIOX_11 */
+ "PIN_8", /* GPIOX_12 */
+ "PIN_10", /* GPIOX_13 */
+ "PIN_29", /* GPIOX_14 */
+ "PIN_31", /* GPIOX_15 */
+ "PIN_12", /* GPIOX_16 */
+ "PIN_3", /* GPIOX_17 */
+ "PIN_5", /* GPIOX_18 */
+ "PIN_35"; /* GPIOX_19 */
+};
+
+&gpio_ao {
+ gpio-line-names =
+ /* GPIOAO */
+ "", "", "", "",
+ "PIN_47", /* GPIOAO_4 */
+ "", "",
+ "PIN_45", /* GPIOAO_7 */
+ "PIN_46", /* GPIOAO_8 */
+ "PIN_44", /* GPIOAO_9 */
+ "PIN_42", /* GPIOAO_10 */
+ "",
+ /* GPIOE */
+ "", "", "";
+};
+
+&hdmi_tx {
+ status = "okay";
+ pinctrl-0 = <&hdmitx_hpd_pins>, <&hdmitx_ddc_pins>;
+ pinctrl-names = "default";
+ hdmi-supply = <&vcc_5v>;
+};
+
+&hdmi_tx_tmds_port {
+ hdmi_tx_tmds_out: endpoint {
+ remote-endpoint = <&hdmi_connector_in>;
+ };
+};
+
+&ir {
+ status = "okay";
+ pinctrl-0 = <&remote_input_ao_pins>;
+ pinctrl-names = "default";
+};
+
+&pwm_AO_cd {
+ pinctrl-0 = <&pwm_ao_d_e_pins>;
+ pinctrl-names = "default";
+ clocks = <&xtal>;
+ clock-names = "clkin1";
+ status = "okay";
+};
+
+&saradc {
+ status = "okay";
+};
+
+/* SD card */
+&sd_emmc_b {
+ status = "okay";
+ pinctrl-0 = <&sdcard_c_pins>;
+ pinctrl-1 = <&sdcard_clk_gate_c_pins>;
+ pinctrl-names = "default", "clk-gate";
+
+ bus-width = <4>;
+ cap-sd-highspeed;
+ max-frequency = <200000000>;
+ sd-uhs-sdr12;
+ sd-uhs-sdr25;
+ sd-uhs-sdr50;
+ sd-uhs-sdr104;
+ disable-wp;
+
+ cd-gpios = <&gpio GPIOC_6 GPIO_ACTIVE_LOW>;
+ vmmc-supply = <&tflash_vdd>;
+ vqmmc-supply = <&tf_io>;
+};
+
+/* eMMC */
+&sd_emmc_c {
+ status = "okay";
+ pinctrl-0 = <&emmc_ctrl_pins>, <&emmc_data_8b_pins>, <&emmc_ds_pins>;
+ pinctrl-1 = <&emmc_clk_gate_pins>;
+ pinctrl-names = "default", "clk-gate";
+
+ bus-width = <8>;
+ cap-mmc-highspeed;
+ mmc-ddr-1_8v;
+ mmc-hs200-1_8v;
+ max-frequency = <200000000>;
+ disable-wp;
+
+ mmc-pwrseq = <&emmc_pwrseq>;
+ vmmc-supply = <&vcc_3v3>;
+ vqmmc-supply = <&flash_1v8>;
+};
+
+&tdmif_b {
+ status = "okay";
+};
+
+&tdmout_b {
+ status = "okay";
+};
+
+&tohdmitx {
+ status = "okay";
+};
+
+&uart_AO {
+ status = "okay";
+ pinctrl-0 = <&uart_ao_a_pins>;
+ pinctrl-names = "default";
+};
+
+&usb {
+ status = "okay";
+ vbus-supply = <&usb_pwr_en>;
+};
+
+&usb2_phy0 {
+ phy-supply = <&vcc_5v>;
+};
+
diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts
index 5ab139a34c01..2194a778973f 100644
--- a/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts
@@ -101,20 +101,20 @@
};
};
- leds {
+ led-controller-1 {
compatible = "gpio-leds";
- led-bluetooth {
+ led-1 {
label = "sei610:blue:bt";
gpios = <&gpio GPIOC_7 (GPIO_ACTIVE_LOW | GPIO_OPEN_DRAIN)>;
default-state = "off";
};
};
- pwmleds {
+ led-controller-2 {
compatible = "pwm-leds";
- power {
+ led-2 {
label = "sei610:red:power";
pwms = <&pwm_AO_ab 0 30518 0>;
max-brightness = <255>;
@@ -220,7 +220,7 @@
sound {
compatible = "amlogic,axg-sound-card";
- model = "SM1-SEI610";
+ model = "SEI610";
audio-aux-devs = <&tdmout_a>, <&tdmout_b>,
<&tdmin_a>, <&tdmin_b>;
audio-routing = "TDMOUT_A IN 0", "FRDDR_A OUT 0",
diff --git a/arch/arm64/boot/dts/broadcom/bcm4908/Makefile b/arch/arm64/boot/dts/broadcom/bcm4908/Makefile
index ef26c23603ce..ebebc0cd421f 100644
--- a/arch/arm64/boot/dts/broadcom/bcm4908/Makefile
+++ b/arch/arm64/boot/dts/broadcom/bcm4908/Makefile
@@ -1,2 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
+dtb-$(CONFIG_ARCH_BCM4908) += bcm4906-netgear-r8000p.dtb
dtb-$(CONFIG_ARCH_BCM4908) += bcm4908-asus-gt-ac5300.dtb
diff --git a/arch/arm64/boot/dts/broadcom/bcm4908/bcm4906-netgear-r8000p.dts b/arch/arm64/boot/dts/broadcom/bcm4908/bcm4906-netgear-r8000p.dts
new file mode 100644
index 000000000000..ee3ed612274c
--- /dev/null
+++ b/arch/arm64/boot/dts/broadcom/bcm4908/bcm4906-netgear-r8000p.dts
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
+
+#include "bcm4906.dtsi"
+
+/ {
+ compatible = "netgear,r8000p", "brcm,bcm4906", "brcm,bcm4908";
+ model = "Netgear R8000P";
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x00 0x00 0x00 0x20000000>;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ wps {
+ function = LED_FUNCTION_WPS;
+ color = <LED_COLOR_ID_WHITE>;
+ gpios = <&gpio0 10 GPIO_ACTIVE_LOW>;
+ };
+ };
+};
+
+&nandcs {
+ nand-ecc-strength = <4>;
+ nand-ecc-step-size = <512>;
+ nand-on-flash-bbt;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "cferom";
+ reg = <0x0 0x100000>;
+ };
+
+ partition@100000 {
+ label = "firmware";
+ reg = <0x100000 0x4400000>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/broadcom/bcm4908/bcm4906.dtsi b/arch/arm64/boot/dts/broadcom/bcm4908/bcm4906.dtsi
new file mode 100644
index 000000000000..66023d553524
--- /dev/null
+++ b/arch/arm64/boot/dts/broadcom/bcm4908/bcm4906.dtsi
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+
+#include "bcm4908.dtsi"
+
+/ {
+ cpus {
+ /delete-node/ cpu@2;
+
+ /delete-node/ cpu@3;
+ };
+
+ pmu {
+ compatible = "arm,cortex-a53-pmu";
+ interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-affinity = <&cpu0>, <&cpu1>;
+ };
+};
diff --git a/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908-asus-gt-ac5300.dts b/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908-asus-gt-ac5300.dts
index 13c6b86eef21..6e4ad66ff536 100644
--- a/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908-asus-gt-ac5300.dts
+++ b/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908-asus-gt-ac5300.dts
@@ -44,6 +44,57 @@
};
};
+&ports {
+ port@0 {
+ label = "lan2";
+ };
+
+ port@1 {
+ label = "lan1";
+ };
+
+ port@2 {
+ label = "lan6";
+ };
+
+ port@3 {
+ label = "lan5";
+ };
+
+ /* External BCM53134S switch */
+ port@7 {
+ label = "sw";
+ reg = <7>;
+
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+};
+
+&mdio {
+ /* lan8 */
+ ethernet-phy@0 {
+ reg = <0>;
+ };
+
+ /* lan7 */
+ ethernet-phy@1 {
+ reg = <1>;
+ };
+
+ /* lan4 */
+ ethernet-phy@2 {
+ reg = <2>;
+ };
+
+ /* lan3 */
+ ethernet-phy@3 {
+ reg = <3>;
+ };
+};
+
&nandcs {
nand-ecc-strength = <4>;
nand-ecc-step-size = <512>;
diff --git a/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908.dtsi b/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908.dtsi
index f873dc44ce9c..9354077f74cd 100644
--- a/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908.dtsi
+++ b/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908.dtsi
@@ -108,7 +108,7 @@
compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
- ranges = <0x00 0x00 0x80000000 0x10000>;
+ ranges = <0x00 0x00 0x80000000 0x281000>;
usb@c300 {
compatible = "generic-ehci";
@@ -130,6 +130,104 @@
interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
+
+ ethernet-switch@80000 {
+ compatible = "simple-bus";
+ #size-cells = <1>;
+ #address-cells = <1>;
+ ranges = <0 0x80000 0x50000>;
+
+ ethernet-switch@0 {
+ compatible = "brcm,bcm4908-switch";
+ reg = <0x0 0x40000>,
+ <0x40000 0x110>,
+ <0x40340 0x30>,
+ <0x40380 0x30>,
+ <0x40600 0x34>,
+ <0x40800 0x208>;
+ reg-names = "core", "reg", "intrl2_0",
+ "intrl2_1", "fcb", "acb";
+ interrupts = <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>;
+ brcm,num-gphy = <5>;
+ brcm,num-rgmii-ports = <2>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ports: ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ phy-mode = "internal";
+ phy-handle = <&phy8>;
+ };
+
+ port@1 {
+ reg = <1>;
+ phy-mode = "internal";
+ phy-handle = <&phy9>;
+ };
+
+ port@2 {
+ reg = <2>;
+ phy-mode = "internal";
+ phy-handle = <&phy10>;
+ };
+
+ port@3 {
+ reg = <3>;
+ phy-mode = "internal";
+ phy-handle = <&phy11>;
+ };
+ };
+ };
+
+ mdio: mdio@405c0 {
+ compatible = "brcm,unimac-mdio";
+ reg = <0x405c0 0x8>;
+ reg-names = "mdio";
+ #size-cells = <0>;
+ #address-cells = <1>;
+
+ phy8: ethernet-phy@8 {
+ reg = <8>;
+ };
+
+ phy9: ethernet-phy@9 {
+ reg = <9>;
+ };
+
+ phy10: ethernet-phy@a {
+ reg = <10>;
+ };
+
+ phy11: ethernet-phy@b {
+ reg = <11>;
+ };
+
+ phy12: ethernet-phy@c {
+ reg = <12>;
+ };
+ };
+ };
+
+ procmon: syscon@280000 {
+ compatible = "simple-bus";
+ reg = <0x280000 0x1000>;
+ ranges;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ power-controller@2800c0 {
+ compatible = "brcm,bcm4908-pmb";
+ reg = <0x2800c0 0x40>;
+ #power-domain-cells = <1>;
+ };
+ };
};
bus@ff800000 {
@@ -164,7 +262,7 @@
nand@1800 {
#address-cells = <1>;
#size-cells = <0>;
- compatible = "brcm,brcmnand-v7.1", "brcm,brcmnand";
+ compatible = "brcm,nand-bcm63138", "brcm,brcmnand-v7.1", "brcm,brcmnand";
reg = <0x1800 0x600>, <0x2000 0x10>;
reg-names = "nand", "nand-int-base";
interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
@@ -177,6 +275,21 @@
};
};
+ misc@2600 {
+ compatible = "brcm,misc", "simple-mfd";
+ reg = <0x2600 0xe4>;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x00 0x2600 0xe4>;
+
+ reset-controller@2644 {
+ compatible = "brcm,bcm4908-misc-pcie-reset";
+ reg = <0x44 0x04>;
+ #reset-cells = <1>;
+ };
+ };
+
reboot {
compatible = "syscon-reboot";
regmap = <&timer>;
diff --git a/arch/arm64/boot/dts/broadcom/stingray/bcm958742-base.dtsi b/arch/arm64/boot/dts/broadcom/stingray/bcm958742-base.dtsi
index 43aa5e9c0020..8fe7325cfbb2 100644
--- a/arch/arm64/boot/dts/broadcom/stingray/bcm958742-base.dtsi
+++ b/arch/arm64/boot/dts/broadcom/stingray/bcm958742-base.dtsi
@@ -56,70 +56,6 @@
};
};
-&sata0 {
- status = "okay";
-};
-
-&sata_phy0{
- status = "okay";
-};
-
-&sata1 {
- status = "okay";
-};
-
-&sata_phy1{
- status = "okay";
-};
-
-&sata2 {
- status = "okay";
-};
-
-&sata_phy2{
- status = "okay";
-};
-
-&sata3 {
- status = "okay";
-};
-
-&sata_phy3{
- status = "okay";
-};
-
-&sata4 {
- status = "okay";
-};
-
-&sata_phy4{
- status = "okay";
-};
-
-&sata5 {
- status = "okay";
-};
-
-&sata_phy5{
- status = "okay";
-};
-
-&sata6 {
- status = "okay";
-};
-
-&sata_phy6{
- status = "okay";
-};
-
-&sata7 {
- status = "okay";
-};
-
-&sata_phy7{
- status = "okay";
-};
-
&pwm {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/broadcom/stingray/stingray-sata.dtsi b/arch/arm64/boot/dts/broadcom/stingray/stingray-sata.dtsi
deleted file mode 100644
index 8c68e0c26f1b..000000000000
--- a/arch/arm64/boot/dts/broadcom/stingray/stingray-sata.dtsi
+++ /dev/null
@@ -1,278 +0,0 @@
-/*
- * BSD LICENSE
- *
- * Copyright(c) 2016-2017 Broadcom. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Broadcom nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
- sata {
- compatible = "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x0 0x0 0x67d00000 0x00800000>;
-
- sata0: ahci@0 {
- compatible = "brcm,iproc-ahci", "generic-ahci";
- reg = <0x00000000 0x1000>;
- reg-names = "ahci";
- interrupts = <GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH>;
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
-
- sata0_port0: sata-port@0 {
- reg = <0>;
- phys = <&sata0_phy0>;
- phy-names = "sata-phy";
- };
- };
-
- sata_phy0: sata_phy@2100 {
- compatible = "brcm,iproc-sr-sata-phy";
- reg = <0x00002100 0x1000>;
- reg-names = "phy";
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
-
- sata0_phy0: sata-phy@0 {
- reg = <0>;
- #phy-cells = <0>;
- };
- };
-
- sata1: ahci@10000 {
- compatible = "brcm,iproc-ahci", "generic-ahci";
- reg = <0x00010000 0x1000>;
- reg-names = "ahci";
- interrupts = <GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH>;
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
-
- sata1_port0: sata-port@0 {
- reg = <0>;
- phys = <&sata1_phy0>;
- phy-names = "sata-phy";
- };
- };
-
- sata_phy1: sata_phy@12100 {
- compatible = "brcm,iproc-sr-sata-phy";
- reg = <0x00012100 0x1000>;
- reg-names = "phy";
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
-
- sata1_phy0: sata-phy@0 {
- reg = <0>;
- #phy-cells = <0>;
- };
- };
-
- sata2: ahci@20000 {
- compatible = "brcm,iproc-ahci", "generic-ahci";
- reg = <0x00020000 0x1000>;
- reg-names = "ahci";
- interrupts = <GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH>;
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
-
- sata2_port0: sata-port@0 {
- reg = <0>;
- phys = <&sata2_phy0>;
- phy-names = "sata-phy";
- };
- };
-
- sata_phy2: sata_phy@22100 {
- compatible = "brcm,iproc-sr-sata-phy";
- reg = <0x00022100 0x1000>;
- reg-names = "phy";
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
-
- sata2_phy0: sata-phy@0 {
- reg = <0>;
- #phy-cells = <0>;
- };
- };
-
- sata3: ahci@30000 {
- compatible = "brcm,iproc-ahci", "generic-ahci";
- reg = <0x00030000 0x1000>;
- reg-names = "ahci";
- interrupts = <GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH>;
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
-
- sata3_port0: sata-port@0 {
- reg = <0>;
- phys = <&sata3_phy0>;
- phy-names = "sata-phy";
- };
- };
-
- sata_phy3: sata_phy@32100 {
- compatible = "brcm,iproc-sr-sata-phy";
- reg = <0x00032100 0x1000>;
- reg-names = "phy";
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
-
- sata3_phy0: sata-phy@0 {
- reg = <0>;
- #phy-cells = <0>;
- };
- };
-
- sata4: ahci@100000 {
- compatible = "brcm,iproc-ahci", "generic-ahci";
- reg = <0x00100000 0x1000>;
- reg-names = "ahci";
- interrupts = <GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH>;
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
-
- sata4_port0: sata-port@0 {
- reg = <0>;
- phys = <&sata4_phy0>;
- phy-names = "sata-phy";
- };
- };
-
- sata_phy4: sata_phy@102100 {
- compatible = "brcm,iproc-sr-sata-phy";
- reg = <0x00102100 0x1000>;
- reg-names = "phy";
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
-
- sata4_phy0: sata-phy@0 {
- reg = <0>;
- #phy-cells = <0>;
- };
- };
-
- sata5: ahci@110000 {
- compatible = "brcm,iproc-ahci", "generic-ahci";
- reg = <0x00110000 0x1000>;
- reg-names = "ahci";
- interrupts = <GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH>;
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
-
- sata5_port0: sata-port@0 {
- reg = <0>;
- phys = <&sata5_phy0>;
- phy-names = "sata-phy";
- };
- };
-
- sata_phy5: sata_phy@112100 {
- compatible = "brcm,iproc-sr-sata-phy";
- reg = <0x00112100 0x1000>;
- reg-names = "phy";
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
-
- sata5_phy0: sata-phy@0 {
- reg = <0>;
- #phy-cells = <0>;
- };
- };
-
- sata6: ahci@120000 {
- compatible = "brcm,iproc-ahci", "generic-ahci";
- reg = <0x00120000 0x1000>;
- reg-names = "ahci";
- interrupts = <GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH>;
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
-
- sata6_port0: sata-port@0 {
- reg = <0>;
- phys = <&sata6_phy0>;
- phy-names = "sata-phy";
- };
- };
-
- sata_phy6: sata_phy@122100 {
- compatible = "brcm,iproc-sr-sata-phy";
- reg = <0x00122100 0x1000>;
- reg-names = "phy";
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
-
- sata6_phy0: sata-phy@0 {
- reg = <0>;
- #phy-cells = <0>;
- };
- };
-
- sata7: ahci@130000 {
- compatible = "brcm,iproc-ahci", "generic-ahci";
- reg = <0x00130000 0x1000>;
- reg-names = "ahci";
- interrupts = <GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>;
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
-
- sata7_port0: sata-port@0 {
- reg = <0>;
- phys = <&sata7_phy0>;
- phy-names = "sata-phy";
- };
- };
-
- sata_phy7: sata_phy@132100 {
- compatible = "brcm,iproc-sr-sata-phy";
- reg = <0x00132100 0x1000>;
- reg-names = "phy";
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
-
- sata7_phy0: sata-phy@0 {
- reg = <0>;
- #phy-cells = <0>;
- };
- };
- };
diff --git a/arch/arm64/boot/dts/broadcom/stingray/stingray-usb.dtsi b/arch/arm64/boot/dts/broadcom/stingray/stingray-usb.dtsi
index aef8f2b00778..5401a646c840 100644
--- a/arch/arm64/boot/dts/broadcom/stingray/stingray-usb.dtsi
+++ b/arch/arm64/boot/dts/broadcom/stingray/stingray-usb.dtsi
@@ -4,11 +4,16 @@
*/
usb {
compatible = "simple-bus";
- dma-ranges;
#address-cells = <2>;
#size-cells = <2>;
ranges = <0x0 0x0 0x0 0x68500000 0x0 0x00400000>;
+ /*
+ * Internally, USB bus to the interconnect can only address up
+ * to 40-bit
+ */
+ dma-ranges = <0 0 0 0 0x100 0x0>;
+
usbphy0: usb-phy@0 {
compatible = "brcm,sr-usb-combo-phy";
reg = <0x0 0x00000000 0x0 0x100>;
diff --git a/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi b/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi
index b425b12c3ed2..2ffb2c92182a 100644
--- a/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi
+++ b/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi
@@ -285,7 +285,6 @@
};
#include "stingray-fs4.dtsi"
- #include "stingray-sata.dtsi"
#include "stingray-pcie.dtsi"
#include "stingray-usb.dtsi"
@@ -309,12 +308,6 @@
#size-cells = <0>;
};
- mdio@2 { /* SATA */
- reg = <0x2>;
- #address-cells = <1>;
- #size-cells = <0>;
- };
-
mdio@3 { /* USB */
reg = <0x3>;
#address-cells = <1>;
diff --git a/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi b/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi
index 03486a8ffc67..413cac63a1cb 100644
--- a/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi
+++ b/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi
@@ -388,7 +388,7 @@
pmic@66 {
compatible = "samsung,s2mps13-pmic";
interrupt-parent = <&gpa0>;
- interrupts = <7 IRQ_TYPE_NONE>;
+ interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
reg = <0x66>;
samsung,s2mps11-wrstbi-ground;
@@ -793,7 +793,7 @@
compatible = "samsung,s3fwrn5-i2c";
reg = <0x27>;
interrupt-parent = <&gpa1>;
- interrupts = <3 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <3 IRQ_TYPE_EDGE_RISING>;
en-gpios = <&gpf1 4 GPIO_ACTIVE_HIGH>;
wake-gpios = <&gpj0 2 GPIO_ACTIVE_HIGH>;
};
diff --git a/arch/arm64/boot/dts/exynos/exynos7-espresso.dts b/arch/arm64/boot/dts/exynos/exynos7-espresso.dts
index 695d4c140646..125c03f351d9 100644
--- a/arch/arm64/boot/dts/exynos/exynos7-espresso.dts
+++ b/arch/arm64/boot/dts/exynos/exynos7-espresso.dts
@@ -90,7 +90,7 @@
pmic@66 {
compatible = "samsung,s2mps15-pmic";
reg = <0x66>;
- interrupts = <2 IRQ_TYPE_NONE>;
+ interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
interrupt-parent = <&gpa0>;
pinctrl-names = "default";
pinctrl-0 = <&pmic_irq>;
diff --git a/arch/arm64/boot/dts/freescale/Makefile b/arch/arm64/boot/dts/freescale/Makefile
index 6f0777ee6cd6..6438db3822f8 100644
--- a/arch/arm64/boot/dts/freescale/Makefile
+++ b/arch/arm64/boot/dts/freescale/Makefile
@@ -6,6 +6,7 @@ dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1012a-qds.dtb
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1012a-rdb.dtb
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1028a-kontron-kbox-a-230-ls.dtb
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1028a-kontron-sl28.dtb
+dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1028a-kontron-sl28-var1.dtb
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1028a-kontron-sl28-var2.dtb
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1028a-kontron-sl28-var3-ads2.dtb
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1028a-kontron-sl28-var4.dtb
@@ -33,16 +34,23 @@ dtb-$(CONFIG_ARCH_MXC) += imx8mm-beacon-kit.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mm-evk.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mm-ddr4-evk.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mm-kontron-n801x-s.dtb
+dtb-$(CONFIG_ARCH_MXC) += imx8mm-nitrogen-r2.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mm-var-som-symphony.dtb
+dtb-$(CONFIG_ARCH_MXC) += imx8mm-venice-gw71xx-0x.dtb
+dtb-$(CONFIG_ARCH_MXC) += imx8mm-venice-gw72xx-0x.dtb
+dtb-$(CONFIG_ARCH_MXC) += imx8mm-venice-gw73xx-0x.dtb
+dtb-$(CONFIG_ARCH_MXC) += imx8mn-beacon-kit.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mn-evk.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mn-ddr4-evk.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mn-var-som-symphony.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mp-evk.dtb
+dtb-$(CONFIG_ARCH_MXC) += imx8mp-phyboard-pollux-rdk.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mq-evk.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mq-hummingboard-pulse.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mq-librem5-devkit.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mq-librem5-r2.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mq-librem5-r3.dtb
+dtb-$(CONFIG_ARCH_MXC) += imx8mq-librem5-r4.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mq-nitrogen.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mq-phanbell.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mq-pico-pi.dtb
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1012a-frdm.dts b/arch/arm64/boot/dts/freescale/fsl-ls1012a-frdm.dts
index 67702667ed8a..2517528f684f 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1012a-frdm.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a-frdm.dts
@@ -7,6 +7,7 @@
*/
/dts-v1/;
+#include <dt-bindings/interrupt-controller/irq.h>
#include "fsl-ls1012a.dtsi"
/ {
@@ -57,6 +58,26 @@
};
};
+&dspi {
+ bus-num = <0>;
+ status = "okay";
+
+ serial@0 {
+ compatible = "nxp,sc16is740";
+ reg = <0>;
+ spi-max-frequency = <4000000>;
+ clocks = <&sc16is7xx_clk>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <13 IRQ_TYPE_EDGE_FALLING>;
+
+ sc16is7xx_clk: clock-sc16is7xx {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <24000000>;
+ };
+ };
+};
+
&duart0 {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1012a-qds.dts b/arch/arm64/boot/dts/freescale/fsl-ls1012a-qds.dts
index 449475a97bf1..e22c5e77fecd 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1012a-qds.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a-qds.dts
@@ -13,6 +13,11 @@
model = "LS1012A QDS Board";
compatible = "fsl,ls1012a-qds", "fsl,ls1012a";
+ aliases {
+ mmc0 = &esdhc0;
+ mmc1 = &esdhc1;
+ };
+
sys_mclk: clock-mclk {
compatible = "fixed-clock";
#clock-cells = <0>;
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1012a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls1012a-rdb.dts
index d45c17620b98..79f155dedb2d 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1012a-rdb.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a-rdb.dts
@@ -7,11 +7,17 @@
*/
/dts-v1/;
+#include <dt-bindings/interrupt-controller/irq.h>
#include "fsl-ls1012a.dtsi"
/ {
model = "LS1012A RDB Board";
compatible = "fsl,ls1012a-rdb", "fsl,ls1012a";
+
+ aliases {
+ mmc0 = &esdhc0;
+ mmc1 = &esdhc1;
+ };
};
&duart0 {
@@ -33,6 +39,50 @@
&i2c0 {
status = "okay";
+
+ accelerometer@1e {
+ compatible = "nxp,fxos8700";
+ reg = <0x1e>;
+ interrupt-parent = <&gpio26>;
+ interrupts = <13 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "INT1";
+ };
+
+ gyroscope@20 {
+ compatible = "nxp,fxas21002c";
+ reg = <0x20>;
+ };
+
+ gpio@24 {
+ compatible = "nxp,pcal9555a";
+ reg = <0x24>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ gpio@25 {
+ compatible = "nxp,pcal9555a";
+ reg = <0x25>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ gpio26: gpio@26 {
+ compatible = "nxp,pcal9555a";
+ reg = <0x26>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <13 IRQ_TYPE_EDGE_FALLING>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ current-sensor@40 {
+ compatible = "ti,ina220";
+ reg = <0x40>;
+ shunt-resistor = <2000>;
+ };
};
&qspi {
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
index 626b709d1fb9..7de6b376d792 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
@@ -7,6 +7,7 @@
*
*/
+#include <dt-bindings/clock/fsl,qoriq-clockgen.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/thermal/thermal.h>
@@ -34,7 +35,7 @@
device_type = "cpu";
compatible = "arm,cortex-a53";
reg = <0x0>;
- clocks = <&clockgen 1 0>;
+ clocks = <&clockgen QORIQ_CLK_CMUX 0>;
#cooling-cells = <2>;
cpu-idle-states = <&CPU_PH20>;
};
@@ -148,7 +149,10 @@
reg-names = "QuadSPI", "QuadSPI-memory";
interrupts = <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "qspi_en", "qspi";
- clocks = <&clockgen 4 0>, <&clockgen 4 0>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(1)>,
+ <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(1)>;
status = "disabled";
};
@@ -156,7 +160,8 @@
compatible = "fsl,ls1012a-esdhc", "fsl,esdhc";
reg = <0x0 0x1560000 0x0 0x10000>;
interrupts = <0 62 0x4>;
- clocks = <&clockgen 4 0>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(1)>;
voltage-ranges = <1800 1800 3300 3300>;
sdhci,auto-cmd12;
big-endian;
@@ -174,7 +179,8 @@
compatible = "fsl,ls1012a-esdhc", "fsl,esdhc";
reg = <0x0 0x1580000 0x0 0x10000>;
interrupts = <0 65 0x4>;
- clocks = <&clockgen 4 0>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(1)>;
voltage-ranges = <1800 1800 3300 3300>;
sdhci,auto-cmd12;
big-endian;
@@ -341,7 +347,8 @@
#size-cells = <0>;
reg = <0x0 0x2180000 0x0 0x10000>;
interrupts = <0 56 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 4 3>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(4)>;
status = "disabled";
};
@@ -351,7 +358,8 @@
#size-cells = <0>;
reg = <0x0 0x2190000 0x0 0x10000>;
interrupts = <0 57 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 4 3>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(4)>;
status = "disabled";
};
@@ -362,7 +370,8 @@
reg = <0x0 0x2100000 0x0 0x10000>;
interrupts = <0 64 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "dspi";
- clocks = <&clockgen 4 0>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(1)>;
spi-num-chipselects = <5>;
big-endian;
status = "disabled";
@@ -372,7 +381,8 @@
compatible = "fsl,ns16550", "ns16550a";
reg = <0x00 0x21c0500 0x0 0x100>;
interrupts = <0 54 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 4 0>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(1)>;
status = "disabled";
};
@@ -380,7 +390,8 @@
compatible = "fsl,ns16550", "ns16550a";
reg = <0x00 0x21c0600 0x0 0x100>;
interrupts = <0 54 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 4 0>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(1)>;
status = "disabled";
};
@@ -409,7 +420,7 @@
"fsl,imx21-wdt";
reg = <0x0 0x2ad0000 0x0 0x10000>;
interrupts = <0 83 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 4 0>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL QORIQ_CLK_PLL_DIV(1)>;
big-endian;
};
@@ -418,8 +429,14 @@
compatible = "fsl,vf610-sai";
reg = <0x0 0x2b50000 0x0 0x10000>;
interrupts = <0 148 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 4 3>, <&clockgen 4 3>,
- <&clockgen 4 3>, <&clockgen 4 3>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(4)>,
+ <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(4)>,
+ <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(4)>,
+ <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(4)>;
clock-names = "bus", "mclk1", "mclk2", "mclk3";
dma-names = "tx", "rx";
dmas = <&edma0 1 47>,
@@ -432,8 +449,14 @@
compatible = "fsl,vf610-sai";
reg = <0x0 0x2b60000 0x0 0x10000>;
interrupts = <0 149 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 4 3>, <&clockgen 4 3>,
- <&clockgen 4 3>, <&clockgen 4 3>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(4)>,
+ <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(4)>,
+ <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(4)>,
+ <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(4)>;
clock-names = "bus", "mclk1", "mclk2", "mclk3";
dma-names = "tx", "rx";
dmas = <&edma0 1 45>,
@@ -453,8 +476,10 @@
dma-channels = <32>;
big-endian;
clock-names = "dmamux0", "dmamux1";
- clocks = <&clockgen 4 3>,
- <&clockgen 4 3>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(4)>,
+ <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(4)>;
};
usb0: usb@2f00000 {
@@ -473,7 +498,8 @@
<0x0 0x20140520 0x0 0x4>;
reg-names = "ahci", "sata-ecc";
interrupts = <0 69 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 4 0>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(1)>;
dma-coherent;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-kbox-a-230-ls.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-kbox-a-230-ls.dts
index d66d8b2c3d1a..6b575efd84a7 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-kbox-a-230-ls.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-kbox-a-230-ls.dts
@@ -107,3 +107,7 @@
ethernet = <&enetc_port2>;
status = "okay";
};
+
+&sata {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var1.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var1.dts
new file mode 100644
index 000000000000..6c309b97587d
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var1.dts
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Device Tree file for the Kontron SMARC-sAL28 board.
+ *
+ * This is for the network variant 1 which has one ethernet port. It is
+ * different than the base variant, which also has one port, but here the
+ * port is connected via RGMII. This port is not TSN aware.
+ * None of the four SerDes lanes are used by the module, instead they are
+ * all led out to the carrier for customer use.
+ *
+ * Copyright (C) 2020 Michael Walle <michael@walle.cc>
+ *
+ */
+
+/dts-v1/;
+#include "fsl-ls1028a-kontron-sl28.dts"
+#include <dt-bindings/net/qca-ar803x.h>
+
+/ {
+ model = "Kontron SMARC-sAL28 (4 Lanes)";
+ compatible = "kontron,sl28-var1", "kontron,sl28", "fsl,ls1028a";
+};
+
+&enetc_port0 {
+ status = "disabled";
+ /*
+ * Delete both the phy-handle to the old phy0 label as well as
+ * the mdio node with the old phy node with the old phy0 label.
+ */
+ /delete-property/ phy-handle;
+ /delete-node/ mdio;
+};
+
+&enetc_port1 {
+ phy-handle = <&phy0>;
+ phy-connection-type = "rgmii-id";
+ status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ phy0: ethernet-phy@4 {
+ reg = <0x4>;
+ eee-broken-1000t;
+ eee-broken-100tx;
+ qca,clk-out-frequency = <125000000>;
+ qca,clk-out-strength = <AR803X_STRENGTH_FULL>;
+ vddio-supply = <&vddh>;
+
+ vddio: vddio-regulator {
+ regulator-name = "VDDIO";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ vddh: vddh-regulator {
+ regulator-name = "VDDH";
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var3-ads2.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var3-ads2.dts
index c45d7b40e374..ed4e69e87e30 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var3-ads2.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var3-ads2.dts
@@ -8,6 +8,8 @@
*/
/dts-v1/;
+
+#include <dt-bindings/clock/fsl,qoriq-clockgen.h>
#include "fsl-ls1028a-kontron-sl28.dts"
/ {
@@ -120,7 +122,8 @@
mclk: clock-mclk@f130080 {
compatible = "fsl,vf610-sai-clock";
reg = <0x0 0xf130080 0x0 0x80>;
- clocks = <&clockgen 4 1>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>;
#clock-cells = <0>;
};
};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-qds.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-qds.dts
index c0786b713791..fbcba9cb8503 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a-qds.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-qds.dts
@@ -109,6 +109,14 @@
};
};
+&can0 {
+ status = "okay";
+};
+
+&can1 {
+ status = "okay";
+};
+
&dspi0 {
bus-num = <0>;
status = "okay";
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-rdb.dts
index c1d1ba459307..41ae6e7675ba 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a-rdb.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-rdb.dts
@@ -85,6 +85,22 @@
};
};
+&can0 {
+ status = "okay";
+
+ can-transceiver {
+ max-bitrate = <5000000>;
+ };
+};
+
+&can1 {
+ status = "okay";
+
+ can-transceiver {
+ max-bitrate = <5000000>;
+ };
+};
+
&esdhc {
sd-uhs-sdr104;
sd-uhs-sdr50;
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
index 60ff19fa53b4..262fbad8f0ec 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
@@ -8,6 +8,7 @@
*
*/
+#include <dt-bindings/clock/fsl,qoriq-clockgen.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/thermal/thermal.h>
@@ -30,7 +31,7 @@
compatible = "arm,cortex-a72";
reg = <0x0>;
enable-method = "psci";
- clocks = <&clockgen 1 0>;
+ clocks = <&clockgen QORIQ_CLK_CMUX 0>;
next-level-cache = <&l2>;
cpu-idle-states = <&CPU_PW20>;
#cooling-cells = <2>;
@@ -41,7 +42,7 @@
compatible = "arm,cortex-a72";
reg = <0x1>;
enable-method = "psci";
- clocks = <&clockgen 1 0>;
+ clocks = <&clockgen QORIQ_CLK_CMUX 0>;
next-level-cache = <&l2>;
cpu-idle-states = <&CPU_PW20>;
#cooling-cells = <2>;
@@ -101,7 +102,7 @@
reboot {
compatible ="syscon-reboot";
regmap = <&rst>;
- offset = <0xb0>;
+ offset = <0>;
mask = <0x02>;
};
@@ -205,9 +206,20 @@
};
dcfg: syscon@1e00000 {
- compatible = "fsl,ls1028a-dcfg", "syscon";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,ls1028a-dcfg", "syscon", "simple-mfd";
reg = <0x0 0x1e00000 0x0 0x10000>;
+ ranges = <0x0 0x0 0x1e00000 0x10000>;
little-endian;
+
+ fspi_clk: clock-controller@900 {
+ compatible = "fsl,ls1028a-flexspi-clk";
+ reg = <0x900 0x4>;
+ #clock-cells = <0>;
+ clocks = <&clockgen QORIQ_CLK_HWACCEL 0>;
+ clock-output-names = "fspi_clk";
+ };
};
rst: syscon@1e60000 {
@@ -235,7 +247,8 @@
#size-cells = <0>;
reg = <0x0 0x2000000 0x0 0x10000>;
interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 4 3>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(4)>;
status = "disabled";
};
@@ -245,7 +258,8 @@
#size-cells = <0>;
reg = <0x0 0x2010000 0x0 0x10000>;
interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 4 3>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(4)>;
status = "disabled";
};
@@ -255,7 +269,8 @@
#size-cells = <0>;
reg = <0x0 0x2020000 0x0 0x10000>;
interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 4 3>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(4)>;
status = "disabled";
};
@@ -265,7 +280,8 @@
#size-cells = <0>;
reg = <0x0 0x2030000 0x0 0x10000>;
interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 4 3>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(4)>;
status = "disabled";
};
@@ -275,7 +291,8 @@
#size-cells = <0>;
reg = <0x0 0x2040000 0x0 0x10000>;
interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 4 3>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(4)>;
status = "disabled";
};
@@ -285,7 +302,8 @@
#size-cells = <0>;
reg = <0x0 0x2050000 0x0 0x10000>;
interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 4 3>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(4)>;
status = "disabled";
};
@@ -295,7 +313,8 @@
#size-cells = <0>;
reg = <0x0 0x2060000 0x0 0x10000>;
interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 4 3>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(4)>;
status = "disabled";
};
@@ -305,7 +324,8 @@
#size-cells = <0>;
reg = <0x0 0x2070000 0x0 0x10000>;
interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 4 3>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(4)>;
status = "disabled";
};
@@ -317,7 +337,7 @@
<0x0 0x20000000 0x0 0x10000000>;
reg-names = "fspi_base", "fspi_mmap";
interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 2 0>, <&clockgen 2 0>;
+ clocks = <&fspi_clk>, <&fspi_clk>;
clock-names = "fspi_en", "fspi";
status = "disabled";
};
@@ -329,7 +349,8 @@
reg = <0x0 0x2100000 0x0 0x10000>;
interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "dspi";
- clocks = <&clockgen 4 1>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>;
dmas = <&edma0 0 62>, <&edma0 0 60>;
dma-names = "tx", "rx";
spi-num-chipselects = <4>;
@@ -344,7 +365,8 @@
reg = <0x0 0x2110000 0x0 0x10000>;
interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "dspi";
- clocks = <&clockgen 4 1>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>;
dmas = <&edma0 0 58>, <&edma0 0 56>;
dma-names = "tx", "rx";
spi-num-chipselects = <4>;
@@ -359,7 +381,8 @@
reg = <0x0 0x2120000 0x0 0x10000>;
interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "dspi";
- clocks = <&clockgen 4 1>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>;
dmas = <&edma0 0 54>, <&edma0 0 2>;
dma-names = "tx", "rx";
spi-num-chipselects = <3>;
@@ -372,7 +395,7 @@
reg = <0x0 0x2140000 0x0 0x10000>;
interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <0>; /* fixed up by bootloader */
- clocks = <&clockgen 2 1>;
+ clocks = <&clockgen QORIQ_CLK_HWACCEL 1>;
voltage-ranges = <1800 1800 3300 3300>;
sdhci,auto-cmd12;
little-endian;
@@ -385,7 +408,7 @@
reg = <0x0 0x2150000 0x0 0x10000>;
interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <0>; /* fixed up by bootloader */
- clocks = <&clockgen 2 1>;
+ clocks = <&clockgen QORIQ_CLK_HWACCEL 1>;
voltage-ranges = <1800 1800 3300 3300>;
sdhci,auto-cmd12;
broken-cd;
@@ -395,19 +418,25 @@
};
can0: can@2180000 {
- compatible = "fsl,ls1028ar1-flexcan", "fsl,lx2160ar1-flexcan";
+ compatible = "fsl,lx2160ar1-flexcan";
reg = <0x0 0x2180000 0x0 0x10000>;
interrupts = <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&sysclk>, <&clockgen 4 1>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>,
+ <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>;
clock-names = "ipg", "per";
status = "disabled";
};
can1: can@2190000 {
- compatible = "fsl,ls1028ar1-flexcan", "fsl,lx2160ar1-flexcan";
+ compatible = "fsl,lx2160ar1-flexcan";
reg = <0x0 0x2190000 0x0 0x10000>;
interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&sysclk>, <&clockgen 4 1>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>,
+ <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>;
clock-names = "ipg", "per";
status = "disabled";
};
@@ -416,7 +445,8 @@
compatible = "fsl,ns16550", "ns16550a";
reg = <0x00 0x21c0500 0x0 0x100>;
interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 4 1>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>;
status = "disabled";
};
@@ -424,7 +454,8 @@
compatible = "fsl,ns16550", "ns16550a";
reg = <0x00 0x21c0600 0x0 0x100>;
interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 4 1>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>;
status = "disabled";
};
@@ -433,7 +464,8 @@
compatible = "fsl,ls1028a-lpuart";
reg = <0x0 0x2260000 0x0 0x1000>;
interrupts = <GIC_SPI 232 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 4 1>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>;
clock-names = "ipg";
dma-names = "rx","tx";
dmas = <&edma0 1 32>,
@@ -445,7 +477,8 @@
compatible = "fsl,ls1028a-lpuart";
reg = <0x0 0x2270000 0x0 0x1000>;
interrupts = <GIC_SPI 233 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 4 1>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>;
clock-names = "ipg";
dma-names = "rx","tx";
dmas = <&edma0 1 30>,
@@ -457,7 +490,8 @@
compatible = "fsl,ls1028a-lpuart";
reg = <0x0 0x2280000 0x0 0x1000>;
interrupts = <GIC_SPI 234 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 4 1>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>;
clock-names = "ipg";
dma-names = "rx","tx";
dmas = <&edma0 1 28>,
@@ -469,7 +503,8 @@
compatible = "fsl,ls1028a-lpuart";
reg = <0x0 0x2290000 0x0 0x1000>;
interrupts = <GIC_SPI 235 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 4 1>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>;
clock-names = "ipg";
dma-names = "rx","tx";
dmas = <&edma0 1 26>,
@@ -481,7 +516,8 @@
compatible = "fsl,ls1028a-lpuart";
reg = <0x0 0x22a0000 0x0 0x1000>;
interrupts = <GIC_SPI 236 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 4 1>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>;
clock-names = "ipg";
dma-names = "rx","tx";
dmas = <&edma0 1 24>,
@@ -493,7 +529,8 @@
compatible = "fsl,ls1028a-lpuart";
reg = <0x0 0x22b0000 0x0 0x1000>;
interrupts = <GIC_SPI 237 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 4 1>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>;
clock-names = "ipg";
dma-names = "rx","tx";
dmas = <&edma0 1 22>,
@@ -512,8 +549,10 @@
interrupt-names = "edma-tx", "edma-err";
dma-channels = <32>;
clock-names = "dmamux0", "dmamux1";
- clocks = <&clockgen 4 1>,
- <&clockgen 4 1>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>,
+ <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>;
};
gpio1: gpio@2300000 {
@@ -575,7 +614,8 @@
<0x7 0x100520 0x0 0x4>;
reg-names = "ahci", "sata-ecc";
interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 4 1>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>;
status = "disabled";
};
@@ -747,14 +787,20 @@
cluster1_core0_watchdog: watchdog@c000000 {
compatible = "arm,sp805", "arm,primecell";
reg = <0x0 0xc000000 0x0 0x1000>;
- clocks = <&clockgen 4 15>, <&clockgen 4 15>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(16)>,
+ <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(16)>;
clock-names = "wdog_clk", "apb_pclk";
};
cluster1_core1_watchdog: watchdog@c010000 {
compatible = "arm,sp805", "arm,primecell";
reg = <0x0 0xc010000 0x0 0x1000>;
- clocks = <&clockgen 4 15>, <&clockgen 4 15>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(16)>,
+ <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(16)>;
clock-names = "wdog_clk", "apb_pclk";
};
@@ -763,8 +809,14 @@
compatible = "fsl,vf610-sai";
reg = <0x0 0xf100000 0x0 0x10000>;
interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 4 1>, <&clockgen 4 1>,
- <&clockgen 4 1>, <&clockgen 4 1>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>,
+ <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>,
+ <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>,
+ <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>;
clock-names = "bus", "mclk1", "mclk2", "mclk3";
dma-names = "tx", "rx";
dmas = <&edma0 1 4>,
@@ -778,8 +830,14 @@
compatible = "fsl,vf610-sai";
reg = <0x0 0xf110000 0x0 0x10000>;
interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 4 1>, <&clockgen 4 1>,
- <&clockgen 4 1>, <&clockgen 4 1>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>,
+ <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>,
+ <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>,
+ <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>;
clock-names = "bus", "mclk1", "mclk2", "mclk3";
dma-names = "tx", "rx";
dmas = <&edma0 1 6>,
@@ -793,8 +851,14 @@
compatible = "fsl,vf610-sai";
reg = <0x0 0xf120000 0x0 0x10000>;
interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 4 1>, <&clockgen 4 1>,
- <&clockgen 4 1>, <&clockgen 4 1>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>,
+ <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>,
+ <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>,
+ <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>;
clock-names = "bus", "mclk1", "mclk2", "mclk3";
dma-names = "tx", "rx";
dmas = <&edma0 1 8>,
@@ -808,8 +872,14 @@
compatible = "fsl,vf610-sai";
reg = <0x0 0xf130000 0x0 0x10000>;
interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 4 1>, <&clockgen 4 1>,
- <&clockgen 4 1>, <&clockgen 4 1>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>,
+ <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>,
+ <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>,
+ <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>;
clock-names = "bus", "mclk1", "mclk2", "mclk3";
dma-names = "tx", "rx";
dmas = <&edma0 1 10>,
@@ -823,8 +893,14 @@
compatible = "fsl,vf610-sai";
reg = <0x0 0xf140000 0x0 0x10000>;
interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 4 1>, <&clockgen 4 1>,
- <&clockgen 4 1>, <&clockgen 4 1>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>,
+ <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>,
+ <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>,
+ <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>;
clock-names = "bus", "mclk1", "mclk2", "mclk3";
dma-names = "tx", "rx";
dmas = <&edma0 1 12>,
@@ -838,8 +914,14 @@
compatible = "fsl,vf610-sai";
reg = <0x0 0xf150000 0x0 0x10000>;
interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 4 1>, <&clockgen 4 1>,
- <&clockgen 4 1>, <&clockgen 4 1>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>,
+ <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>,
+ <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>,
+ <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>;
clock-names = "bus", "mclk1", "mclk2", "mclk3";
dma-names = "tx", "rx";
dmas = <&edma0 1 14>,
@@ -960,7 +1042,7 @@
ethernet@0,4 {
compatible = "fsl,enetc-ptp";
reg = <0x000400 0 0 0 0>;
- clocks = <&clockgen 2 3>;
+ clocks = <&clockgen QORIQ_CLK_HWACCEL 3>;
little-endian;
fsl,extts-fifo;
};
@@ -1055,8 +1137,10 @@
interrupts = <0 222 IRQ_TYPE_LEVEL_HIGH>,
<0 223 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "DE", "SE";
- clocks = <&dpclk>, <&clockgen 2 2>, <&clockgen 2 2>,
- <&clockgen 2 2>;
+ clocks = <&dpclk>,
+ <&clockgen QORIQ_CLK_HWACCEL 2>,
+ <&clockgen QORIQ_CLK_HWACCEL 2>,
+ <&clockgen QORIQ_CLK_HWACCEL 2>;
clock-names = "pxlclk", "mclk", "aclk", "pclk";
arm,malidp-output-port-lines = /bits/ 8 <8 8 8>;
arm,malidp-arqos-value = <0xd000d000>;
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
index bbae4b353d3f..5a8a1dc4262d 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
@@ -8,6 +8,7 @@
* Mingkai Hu <Mingkai.hu@freescale.com>
*/
+#include <dt-bindings/clock/fsl,qoriq-clockgen.h>
#include <dt-bindings/thermal/thermal.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
@@ -44,7 +45,7 @@
device_type = "cpu";
compatible = "arm,cortex-a53";
reg = <0x0>;
- clocks = <&clockgen 1 0>;
+ clocks = <&clockgen QORIQ_CLK_CMUX 0>;
next-level-cache = <&l2>;
cpu-idle-states = <&CPU_PH20>;
#cooling-cells = <2>;
@@ -54,7 +55,7 @@
device_type = "cpu";
compatible = "arm,cortex-a53";
reg = <0x1>;
- clocks = <&clockgen 1 0>;
+ clocks = <&clockgen QORIQ_CLK_CMUX 0>;
next-level-cache = <&l2>;
cpu-idle-states = <&CPU_PH20>;
#cooling-cells = <2>;
@@ -64,7 +65,7 @@
device_type = "cpu";
compatible = "arm,cortex-a53";
reg = <0x2>;
- clocks = <&clockgen 1 0>;
+ clocks = <&clockgen QORIQ_CLK_CMUX 0>;
next-level-cache = <&l2>;
cpu-idle-states = <&CPU_PH20>;
#cooling-cells = <2>;
@@ -74,7 +75,7 @@
device_type = "cpu";
compatible = "arm,cortex-a53";
reg = <0x3>;
- clocks = <&clockgen 1 0>;
+ clocks = <&clockgen QORIQ_CLK_CMUX 0>;
next-level-cache = <&l2>;
cpu-idle-states = <&CPU_PH20>;
#cooling-cells = <2>;
@@ -311,6 +312,31 @@
compatible = "fsl,ls1043a-scfg", "syscon";
reg = <0x0 0x1570000 0x0 0x10000>;
big-endian;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x0 0x1570000 0x10000>;
+
+ extirq: interrupt-controller@1ac {
+ compatible = "fsl,ls1043a-extirq";
+ #interrupt-cells = <2>;
+ #address-cells = <0>;
+ interrupt-controller;
+ reg = <0x1ac 4>;
+ interrupt-map =
+ <0 0 &gic GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+ <1 0 &gic GIC_SPI 132 IRQ_TYPE_LEVEL_HIGH>,
+ <2 0 &gic GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>,
+ <3 0 &gic GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>,
+ <4 0 &gic GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>,
+ <5 0 &gic GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>,
+ <6 0 &gic GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>,
+ <7 0 &gic GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>,
+ <8 0 &gic GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>,
+ <9 0 &gic GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>,
+ <10 0 &gic GIC_SPI 150 IRQ_TYPE_LEVEL_HIGH>,
+ <11 0 &gic GIC_SPI 151 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-map-mask = <0xffffffff 0x0>;
+ };
};
crypto: crypto@1700000 {
@@ -377,7 +403,10 @@
reg-names = "QuadSPI", "QuadSPI-memory";
interrupts = <0 99 0x4>;
clock-names = "qspi_en", "qspi";
- clocks = <&clockgen 4 0>, <&clockgen 4 0>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(1)>,
+ <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(1)>;
status = "disabled";
};
@@ -476,7 +505,8 @@
reg = <0x0 0x2100000 0x0 0x10000>;
interrupts = <0 64 0x4>;
clock-names = "dspi";
- clocks = <&clockgen 4 0>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(1)>;
spi-num-chipselects = <5>;
big-endian;
status = "disabled";
@@ -489,7 +519,8 @@
reg = <0x0 0x2110000 0x0 0x10000>;
interrupts = <0 65 0x4>;
clock-names = "dspi";
- clocks = <&clockgen 4 0>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(1)>;
spi-num-chipselects = <5>;
big-endian;
status = "disabled";
@@ -502,7 +533,8 @@
reg = <0x0 0x2180000 0x0 0x10000>;
interrupts = <0 56 0x4>;
clock-names = "i2c";
- clocks = <&clockgen 4 0>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(1)>;
dmas = <&edma0 1 39>,
<&edma0 1 38>;
dma-names = "tx", "rx";
@@ -516,7 +548,8 @@
reg = <0x0 0x2190000 0x0 0x10000>;
interrupts = <0 57 0x4>;
clock-names = "i2c";
- clocks = <&clockgen 4 0>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(1)>;
status = "disabled";
};
@@ -527,7 +560,8 @@
reg = <0x0 0x21a0000 0x0 0x10000>;
interrupts = <0 58 0x4>;
clock-names = "i2c";
- clocks = <&clockgen 4 0>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(1)>;
status = "disabled";
};
@@ -538,7 +572,8 @@
reg = <0x0 0x21b0000 0x0 0x10000>;
interrupts = <0 59 0x4>;
clock-names = "i2c";
- clocks = <&clockgen 4 0>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(1)>;
status = "disabled";
};
@@ -546,28 +581,32 @@
compatible = "fsl,ns16550", "ns16550a";
reg = <0x00 0x21c0500 0x0 0x100>;
interrupts = <0 54 0x4>;
- clocks = <&clockgen 4 0>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(1)>;
};
duart1: serial@21c0600 {
compatible = "fsl,ns16550", "ns16550a";
reg = <0x00 0x21c0600 0x0 0x100>;
interrupts = <0 54 0x4>;
- clocks = <&clockgen 4 0>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(1)>;
};
duart2: serial@21d0500 {
compatible = "fsl,ns16550", "ns16550a";
reg = <0x0 0x21d0500 0x0 0x100>;
interrupts = <0 55 0x4>;
- clocks = <&clockgen 4 0>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(1)>;
};
duart3: serial@21d0600 {
compatible = "fsl,ns16550", "ns16550a";
reg = <0x0 0x21d0600 0x0 0x100>;
interrupts = <0 55 0x4>;
- clocks = <&clockgen 4 0>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(1)>;
};
gpio1: gpio@2300000 {
@@ -679,7 +718,7 @@
compatible = "fsl,ls1021a-lpuart";
reg = <0x0 0x2950000 0x0 0x1000>;
interrupts = <0 48 0x4>;
- clocks = <&clockgen 0 0>;
+ clocks = <&clockgen QORIQ_CLK_SYSCLK 0>;
clock-names = "ipg";
status = "disabled";
};
@@ -688,7 +727,8 @@
compatible = "fsl,ls1021a-lpuart";
reg = <0x0 0x2960000 0x0 0x1000>;
interrupts = <0 49 0x4>;
- clocks = <&clockgen 4 0>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(1)>;
clock-names = "ipg";
status = "disabled";
};
@@ -697,7 +737,8 @@
compatible = "fsl,ls1021a-lpuart";
reg = <0x0 0x2970000 0x0 0x1000>;
interrupts = <0 50 0x4>;
- clocks = <&clockgen 4 0>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(1)>;
clock-names = "ipg";
status = "disabled";
};
@@ -706,7 +747,8 @@
compatible = "fsl,ls1021a-lpuart";
reg = <0x0 0x2980000 0x0 0x1000>;
interrupts = <0 51 0x4>;
- clocks = <&clockgen 4 0>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(1)>;
clock-names = "ipg";
status = "disabled";
};
@@ -715,7 +757,8 @@
compatible = "fsl,ls1021a-lpuart";
reg = <0x0 0x2990000 0x0 0x1000>;
interrupts = <0 52 0x4>;
- clocks = <&clockgen 4 0>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(1)>;
clock-names = "ipg";
status = "disabled";
};
@@ -724,7 +767,8 @@
compatible = "fsl,ls1021a-lpuart";
reg = <0x0 0x29a0000 0x0 0x1000>;
interrupts = <0 53 0x4>;
- clocks = <&clockgen 4 0>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(1)>;
clock-names = "ipg";
status = "disabled";
};
@@ -733,7 +777,8 @@
compatible = "fsl,ls1043a-wdt", "fsl,imx21-wdt";
reg = <0x0 0x2ad0000 0x0 0x10000>;
interrupts = <0 83 0x4>;
- clocks = <&clockgen 4 0>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(1)>;
clock-names = "wdog";
big-endian;
};
@@ -750,8 +795,10 @@
dma-channels = <32>;
big-endian;
clock-names = "dmamux0", "dmamux1";
- clocks = <&clockgen 4 0>,
- <&clockgen 4 0>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(1)>,
+ <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(1)>;
};
usb0: usb@2f00000 {
@@ -793,7 +840,8 @@
<0x0 0x20140520 0x0 0x4>;
reg-names = "ahci", "sata-ecc";
interrupts = <0 69 0x4>;
- clocks = <&clockgen 4 0>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(1)>;
dma-coherent;
};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts
index d53ccc56bb63..60acdf0b689e 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts
@@ -3,6 +3,7 @@
* Device Tree Include file for Freescale Layerscape-1046A family SoC.
*
* Copyright 2016 Freescale Semiconductor, Inc.
+ * Copyright 2019-2020 NXP
*
* Mingkai Hu <mingkai.hu@nxp.com>
*/
@@ -74,6 +75,8 @@
rtc@51 {
compatible = "nxp,pcf2129";
reg = <0x51>;
+ /* IRQ_RTC_B -> IRQ05, active low */
+ interrupts-extended = <&extirq 5 IRQ_TYPE_LEVEL_LOW>;
};
};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
index 025e1f587662..1d6dfd189c7f 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
@@ -8,6 +8,7 @@
* Mingkai Hu <mingkai.hu@nxp.com>
*/
+#include <dt-bindings/clock/fsl,qoriq-clockgen.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/thermal/thermal.h>
@@ -39,7 +40,7 @@
device_type = "cpu";
compatible = "arm,cortex-a72";
reg = <0x0>;
- clocks = <&clockgen 1 0>;
+ clocks = <&clockgen QORIQ_CLK_CMUX 0>;
next-level-cache = <&l2>;
cpu-idle-states = <&CPU_PH20>;
#cooling-cells = <2>;
@@ -49,7 +50,7 @@
device_type = "cpu";
compatible = "arm,cortex-a72";
reg = <0x1>;
- clocks = <&clockgen 1 0>;
+ clocks = <&clockgen QORIQ_CLK_CMUX 0>;
next-level-cache = <&l2>;
cpu-idle-states = <&CPU_PH20>;
#cooling-cells = <2>;
@@ -59,7 +60,7 @@
device_type = "cpu";
compatible = "arm,cortex-a72";
reg = <0x2>;
- clocks = <&clockgen 1 0>;
+ clocks = <&clockgen QORIQ_CLK_CMUX 0>;
next-level-cache = <&l2>;
cpu-idle-states = <&CPU_PH20>;
#cooling-cells = <2>;
@@ -69,7 +70,7 @@
device_type = "cpu";
compatible = "arm,cortex-a72";
reg = <0x3>;
- clocks = <&clockgen 1 0>;
+ clocks = <&clockgen QORIQ_CLK_CMUX 0>;
next-level-cache = <&l2>;
cpu-idle-states = <&CPU_PH20>;
#cooling-cells = <2>;
@@ -295,7 +296,10 @@
reg-names = "QuadSPI", "QuadSPI-memory";
interrupts = <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "qspi_en", "qspi";
- clocks = <&clockgen 4 1>, <&clockgen 4 1>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>,
+ <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>;
status = "disabled";
};
@@ -303,7 +307,7 @@
compatible = "fsl,ls1046a-esdhc", "fsl,esdhc";
reg = <0x0 0x1560000 0x0 0x10000>;
interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 2 1>;
+ clocks = <&clockgen QORIQ_CLK_HWACCEL 1>;
voltage-ranges = <1800 1800 3300 3300>;
sdhci,auto-cmd12;
big-endian;
@@ -314,6 +318,31 @@
compatible = "fsl,ls1046a-scfg", "syscon";
reg = <0x0 0x1570000 0x0 0x10000>;
big-endian;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x0 0x1570000 0x10000>;
+
+ extirq: interrupt-controller@1ac {
+ compatible = "fsl,ls1046a-extirq", "fsl,ls1043a-extirq";
+ #interrupt-cells = <2>;
+ #address-cells = <0>;
+ interrupt-controller;
+ reg = <0x1ac 4>;
+ interrupt-map =
+ <0 0 &gic GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+ <1 0 &gic GIC_SPI 132 IRQ_TYPE_LEVEL_HIGH>,
+ <2 0 &gic GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>,
+ <3 0 &gic GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>,
+ <4 0 &gic GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>,
+ <5 0 &gic GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>,
+ <6 0 &gic GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>,
+ <7 0 &gic GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>,
+ <8 0 &gic GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>,
+ <9 0 &gic GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>,
+ <10 0 &gic GIC_SPI 150 IRQ_TYPE_LEVEL_HIGH>,
+ <11 0 &gic GIC_SPI 151 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-map-mask = <0xffffffff 0x0>;
+ };
};
crypto: crypto@1700000 {
@@ -385,7 +414,7 @@
dcfg: dcfg@1ee0000 {
compatible = "fsl,ls1046a-dcfg", "syscon";
- reg = <0x0 0x1ee0000 0x0 0x10000>;
+ reg = <0x0 0x1ee0000 0x0 0x1000>;
big-endian;
};
@@ -454,7 +483,8 @@
reg = <0x0 0x2100000 0x0 0x10000>;
interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "dspi";
- clocks = <&clockgen 4 1>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>;
spi-num-chipselects = <5>;
big-endian;
status = "disabled";
@@ -466,7 +496,8 @@
#size-cells = <0>;
reg = <0x0 0x2180000 0x0 0x10000>;
interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 4 1>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>;
dmas = <&edma0 1 39>,
<&edma0 1 38>;
dma-names = "tx", "rx";
@@ -479,7 +510,8 @@
#size-cells = <0>;
reg = <0x0 0x2190000 0x0 0x10000>;
interrupts = <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 4 1>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>;
status = "disabled";
};
@@ -489,7 +521,8 @@
#size-cells = <0>;
reg = <0x0 0x21a0000 0x0 0x10000>;
interrupts = <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 4 1>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>;
status = "disabled";
};
@@ -499,7 +532,8 @@
#size-cells = <0>;
reg = <0x0 0x21b0000 0x0 0x10000>;
interrupts = <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 4 1>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>;
status = "disabled";
};
@@ -507,7 +541,8 @@
compatible = "fsl,ns16550", "ns16550a";
reg = <0x00 0x21c0500 0x0 0x100>;
interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 4 1>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>;
status = "disabled";
};
@@ -515,7 +550,8 @@
compatible = "fsl,ns16550", "ns16550a";
reg = <0x00 0x21c0600 0x0 0x100>;
interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 4 1>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>;
status = "disabled";
};
@@ -523,7 +559,8 @@
compatible = "fsl,ns16550", "ns16550a";
reg = <0x0 0x21d0500 0x0 0x100>;
interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 4 1>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>;
status = "disabled";
};
@@ -531,7 +568,8 @@
compatible = "fsl,ns16550", "ns16550a";
reg = <0x0 0x21d0600 0x0 0x100>;
interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 4 1>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>;
status = "disabled";
};
@@ -579,7 +617,8 @@
compatible = "fsl,ls1021a-lpuart";
reg = <0x0 0x2950000 0x0 0x1000>;
interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 4 0>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(1)>;
clock-names = "ipg";
status = "disabled";
};
@@ -588,7 +627,8 @@
compatible = "fsl,ls1021a-lpuart";
reg = <0x0 0x2960000 0x0 0x1000>;
interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 4 1>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>;
clock-names = "ipg";
status = "disabled";
};
@@ -597,7 +637,8 @@
compatible = "fsl,ls1021a-lpuart";
reg = <0x0 0x2970000 0x0 0x1000>;
interrupts = <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 4 1>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>;
clock-names = "ipg";
status = "disabled";
};
@@ -606,7 +647,8 @@
compatible = "fsl,ls1021a-lpuart";
reg = <0x0 0x2980000 0x0 0x1000>;
interrupts = <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 4 1>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>;
clock-names = "ipg";
status = "disabled";
};
@@ -615,7 +657,8 @@
compatible = "fsl,ls1021a-lpuart";
reg = <0x0 0x2990000 0x0 0x1000>;
interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 4 1>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>;
clock-names = "ipg";
status = "disabled";
};
@@ -624,7 +667,8 @@
compatible = "fsl,ls1021a-lpuart";
reg = <0x0 0x29a0000 0x0 0x1000>;
interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 4 1>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>;
clock-names = "ipg";
status = "disabled";
};
@@ -633,7 +677,8 @@
compatible = "fsl,imx21-wdt";
reg = <0x0 0x2ad0000 0x0 0x10000>;
interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 4 1>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>;
big-endian;
};
@@ -649,8 +694,10 @@
dma-channels = <32>;
big-endian;
clock-names = "dmamux0", "dmamux1";
- clocks = <&clockgen 4 1>,
- <&clockgen 4 1>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>,
+ <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>;
};
usb0: usb@2f00000 {
@@ -689,7 +736,8 @@
<0x0 0x20140520 0x0 0x4>;
reg-names = "ahci", "sata-ecc";
interrupts = <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 4 1>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>;
};
msi1: msi-controller@1580000 {
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts
index 528ec72d0b83..bf7b43ab1293 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts
@@ -2,7 +2,7 @@
/*
* Device Tree file for NXP LS1088A RDB Board.
*
- * Copyright 2017 NXP
+ * Copyright 2017-2020 NXP
*
* Harninder Rai <harninder.rai@nxp.com>
*
@@ -158,8 +158,8 @@
rtc@51 {
compatible = "nxp,pcf2129";
reg = <0x51>;
- /* IRQ10_B */
- interrupts = <0 150 IRQ_TYPE_LEVEL_HIGH>;
+ /* IRQ_RTC_B -> IRQ0_B(CPLD) -> IRQ00(CPU), active low */
+ interrupts-extended = <&extirq 0 IRQ_TYPE_LEVEL_LOW>;
};
};
};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi
index 6403455ed039..8ffbc9fde041 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi
@@ -7,6 +7,7 @@
* Harninder Rai <harninder.rai@nxp.com>
*
*/
+#include <dt-bindings/clock/fsl,qoriq-clockgen.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/thermal/thermal.h>
@@ -30,7 +31,7 @@
device_type = "cpu";
compatible = "arm,cortex-a53";
reg = <0x0>;
- clocks = <&clockgen 1 0>;
+ clocks = <&clockgen QORIQ_CLK_CMUX 0>;
cpu-idle-states = <&CPU_PH20>;
#cooling-cells = <2>;
};
@@ -39,7 +40,7 @@
device_type = "cpu";
compatible = "arm,cortex-a53";
reg = <0x1>;
- clocks = <&clockgen 1 0>;
+ clocks = <&clockgen QORIQ_CLK_CMUX 0>;
cpu-idle-states = <&CPU_PH20>;
#cooling-cells = <2>;
};
@@ -48,7 +49,7 @@
device_type = "cpu";
compatible = "arm,cortex-a53";
reg = <0x2>;
- clocks = <&clockgen 1 0>;
+ clocks = <&clockgen QORIQ_CLK_CMUX 0>;
cpu-idle-states = <&CPU_PH20>;
#cooling-cells = <2>;
};
@@ -57,7 +58,7 @@
device_type = "cpu";
compatible = "arm,cortex-a53";
reg = <0x3>;
- clocks = <&clockgen 1 0>;
+ clocks = <&clockgen QORIQ_CLK_CMUX 0>;
cpu-idle-states = <&CPU_PH20>;
#cooling-cells = <2>;
};
@@ -66,7 +67,7 @@
device_type = "cpu";
compatible = "arm,cortex-a53";
reg = <0x100>;
- clocks = <&clockgen 1 1>;
+ clocks = <&clockgen QORIQ_CLK_CMUX 1>;
cpu-idle-states = <&CPU_PH20>;
#cooling-cells = <2>;
};
@@ -75,7 +76,7 @@
device_type = "cpu";
compatible = "arm,cortex-a53";
reg = <0x101>;
- clocks = <&clockgen 1 1>;
+ clocks = <&clockgen QORIQ_CLK_CMUX 1>;
cpu-idle-states = <&CPU_PH20>;
#cooling-cells = <2>;
};
@@ -84,7 +85,7 @@
device_type = "cpu";
compatible = "arm,cortex-a53";
reg = <0x102>;
- clocks = <&clockgen 1 1>;
+ clocks = <&clockgen QORIQ_CLK_CMUX 1>;
cpu-idle-states = <&CPU_PH20>;
#cooling-cells = <2>;
};
@@ -93,7 +94,7 @@
device_type = "cpu";
compatible = "arm,cortex-a53";
reg = <0x103>;
- clocks = <&clockgen 1 1>;
+ clocks = <&clockgen QORIQ_CLK_CMUX 1>;
cpu-idle-states = <&CPU_PH20>;
#cooling-cells = <2>;
};
@@ -220,6 +221,37 @@
little-endian;
};
+ isc: syscon@1f70000 {
+ compatible = "fsl,ls1088a-isc", "syscon";
+ reg = <0x0 0x1f70000 0x0 0x10000>;
+ little-endian;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x0 0x1f70000 0x10000>;
+
+ extirq: interrupt-controller@14 {
+ compatible = "fsl,ls1088a-extirq";
+ #interrupt-cells = <2>;
+ #address-cells = <0>;
+ interrupt-controller;
+ reg = <0x14 4>;
+ interrupt-map =
+ <0 0 &gic GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+ <1 0 &gic GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
+ <2 0 &gic GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
+ <3 0 &gic GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
+ <4 0 &gic GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
+ <5 0 &gic GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>,
+ <6 0 &gic GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>,
+ <7 0 &gic GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>,
+ <8 0 &gic GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
+ <9 0 &gic GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
+ <10 0 &gic GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>,
+ <11 0 &gic GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-map-mask = <0xffffffff 0x0>;
+ };
+ };
+
tmu: tmu@1f80000 {
compatible = "fsl,qoriq-tmu";
reg = <0x0 0x1f80000 0x0 0x10000>;
@@ -279,7 +311,8 @@
reg = <0x0 0x2100000 0x0 0x10000>;
interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "dspi";
- clocks = <&clockgen 4 1>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>;
spi-num-chipselects = <6>;
status = "disabled";
};
@@ -287,7 +320,8 @@
duart0: serial@21c0500 {
compatible = "fsl,ns16550", "ns16550a";
reg = <0x0 0x21c0500 0x0 0x100>;
- clocks = <&clockgen 4 3>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(4)>;
interrupts = <0 32 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
@@ -295,7 +329,8 @@
duart1: serial@21c0600 {
compatible = "fsl,ns16550", "ns16550a";
reg = <0x0 0x21c0600 0x0 0x100>;
- clocks = <&clockgen 4 3>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(4)>;
interrupts = <0 32 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
@@ -360,7 +395,8 @@
#size-cells = <0>;
reg = <0x0 0x2000000 0x0 0x10000>;
interrupts = <0 34 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 4 7>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(8)>;
status = "disabled";
};
@@ -370,7 +406,8 @@
#size-cells = <0>;
reg = <0x0 0x2010000 0x0 0x10000>;
interrupts = <0 34 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 4 7>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(8)>;
status = "disabled";
};
@@ -380,7 +417,8 @@
#size-cells = <0>;
reg = <0x0 0x2020000 0x0 0x10000>;
interrupts = <0 35 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 4 7>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(8)>;
status = "disabled";
};
@@ -390,7 +428,8 @@
#size-cells = <0>;
reg = <0x0 0x2030000 0x0 0x10000>;
interrupts = <0 35 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 4 7>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(8)>;
status = "disabled";
};
@@ -403,7 +442,10 @@
reg-names = "QuadSPI", "QuadSPI-memory";
interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "qspi_en", "qspi";
- clocks = <&clockgen 4 3>, <&clockgen 4 3>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(4)>,
+ <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(4)>;
status = "disabled";
};
@@ -412,7 +454,7 @@
reg = <0x0 0x2140000 0x0 0x10000>;
interrupts = <0 28 0x4>; /* Level high type */
clock-frequency = <0>;
- clocks = <&clockgen 2 1>;
+ clocks = <&clockgen QORIQ_CLK_HWACCEL 1>;
voltage-ranges = <1800 1800 3300 3300>;
sdhci,auto-cmd12;
little-endian;
@@ -447,7 +489,8 @@
<0x7 0x100520 0x0 0x4>;
reg-names = "ahci", "sata-ecc";
interrupts = <0 133 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 4 3>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(4)>;
dma-coherent;
status = "disabled";
};
@@ -698,7 +741,8 @@
ptp-timer@8b95000 {
compatible = "fsl,dpaa2-ptp";
reg = <0x0 0x8b95000 0x0 0x100>;
- clocks = <&clockgen 4 0>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(1)>;
little-endian;
fsl,extts-fifo;
};
@@ -787,56 +831,80 @@
cluster1_core0_watchdog: wdt@c000000 {
compatible = "arm,sp805-wdt", "arm,primecell";
reg = <0x0 0xc000000 0x0 0x1000>;
- clocks = <&clockgen 4 15>, <&clockgen 4 15>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(16)>,
+ <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(16)>;
clock-names = "wdog_clk", "apb_pclk";
};
cluster1_core1_watchdog: wdt@c010000 {
compatible = "arm,sp805-wdt", "arm,primecell";
reg = <0x0 0xc010000 0x0 0x1000>;
- clocks = <&clockgen 4 15>, <&clockgen 4 15>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(16)>,
+ <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(16)>;
clock-names = "wdog_clk", "apb_pclk";
};
cluster1_core2_watchdog: wdt@c020000 {
compatible = "arm,sp805-wdt", "arm,primecell";
reg = <0x0 0xc020000 0x0 0x1000>;
- clocks = <&clockgen 4 15>, <&clockgen 4 15>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(16)>,
+ <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(16)>;
clock-names = "wdog_clk", "apb_pclk";
};
cluster1_core3_watchdog: wdt@c030000 {
compatible = "arm,sp805-wdt", "arm,primecell";
reg = <0x0 0xc030000 0x0 0x1000>;
- clocks = <&clockgen 4 15>, <&clockgen 4 15>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(16)>,
+ <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(16)>;
clock-names = "wdog_clk", "apb_pclk";
};
cluster2_core0_watchdog: wdt@c100000 {
compatible = "arm,sp805-wdt", "arm,primecell";
reg = <0x0 0xc100000 0x0 0x1000>;
- clocks = <&clockgen 4 15>, <&clockgen 4 15>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(16)>,
+ <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(16)>;
clock-names = "wdog_clk", "apb_pclk";
};
cluster2_core1_watchdog: wdt@c110000 {
compatible = "arm,sp805-wdt", "arm,primecell";
reg = <0x0 0xc110000 0x0 0x1000>;
- clocks = <&clockgen 4 15>, <&clockgen 4 15>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(16)>,
+ <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(16)>;
clock-names = "wdog_clk", "apb_pclk";
};
cluster2_core2_watchdog: wdt@c120000 {
compatible = "arm,sp805-wdt", "arm,primecell";
reg = <0x0 0xc120000 0x0 0x1000>;
- clocks = <&clockgen 4 15>, <&clockgen 4 15>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(16)>,
+ <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(16)>;
clock-names = "wdog_clk", "apb_pclk";
};
cluster2_core3_watchdog: wdt@c130000 {
compatible = "arm,sp805-wdt", "arm,primecell";
reg = <0x0 0xc130000 0x0 0x1000>;
- clocks = <&clockgen 4 15>, <&clockgen 4 15>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(16)>,
+ <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(16)>;
clock-names = "wdog_clk", "apb_pclk";
};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
index f9c1d30cf4a7..76ab68d2de0b 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
@@ -9,6 +9,7 @@
*
*/
+#include <dt-bindings/clock/fsl,qoriq-clockgen.h>
#include "fsl-ls208xa.dtsi"
&cpu {
@@ -16,7 +17,7 @@
device_type = "cpu";
compatible = "arm,cortex-a57";
reg = <0x0>;
- clocks = <&clockgen 1 0>;
+ clocks = <&clockgen QORIQ_CLK_CMUX 0>;
cpu-idle-states = <&CPU_PW20>;
next-level-cache = <&cluster0_l2>;
#cooling-cells = <2>;
@@ -26,7 +27,7 @@
device_type = "cpu";
compatible = "arm,cortex-a57";
reg = <0x1>;
- clocks = <&clockgen 1 0>;
+ clocks = <&clockgen QORIQ_CLK_CMUX 0>;
cpu-idle-states = <&CPU_PW20>;
next-level-cache = <&cluster0_l2>;
#cooling-cells = <2>;
@@ -36,7 +37,7 @@
device_type = "cpu";
compatible = "arm,cortex-a57";
reg = <0x100>;
- clocks = <&clockgen 1 1>;
+ clocks = <&clockgen QORIQ_CLK_CMUX 1>;
cpu-idle-states = <&CPU_PW20>;
next-level-cache = <&cluster1_l2>;
#cooling-cells = <2>;
@@ -46,7 +47,7 @@
device_type = "cpu";
compatible = "arm,cortex-a57";
reg = <0x101>;
- clocks = <&clockgen 1 1>;
+ clocks = <&clockgen QORIQ_CLK_CMUX 1>;
cpu-idle-states = <&CPU_PW20>;
next-level-cache = <&cluster1_l2>;
#cooling-cells = <2>;
@@ -56,7 +57,7 @@
device_type = "cpu";
compatible = "arm,cortex-a57";
reg = <0x200>;
- clocks = <&clockgen 1 2>;
+ clocks = <&clockgen QORIQ_CLK_CMUX 2>;
cpu-idle-states = <&CPU_PW20>;
next-level-cache = <&cluster2_l2>;
#cooling-cells = <2>;
@@ -66,7 +67,7 @@
device_type = "cpu";
compatible = "arm,cortex-a57";
reg = <0x201>;
- clocks = <&clockgen 1 2>;
+ clocks = <&clockgen QORIQ_CLK_CMUX 2>;
cpu-idle-states = <&CPU_PW20>;
next-level-cache = <&cluster2_l2>;
#cooling-cells = <2>;
@@ -76,7 +77,7 @@
device_type = "cpu";
compatible = "arm,cortex-a57";
reg = <0x300>;
- clocks = <&clockgen 1 3>;
+ clocks = <&clockgen QORIQ_CLK_CMUX 3>;
next-level-cache = <&cluster3_l2>;
cpu-idle-states = <&CPU_PW20>;
#cooling-cells = <2>;
@@ -86,7 +87,7 @@
device_type = "cpu";
compatible = "arm,cortex-a57";
reg = <0x301>;
- clocks = <&clockgen 1 3>;
+ clocks = <&clockgen QORIQ_CLK_CMUX 3>;
cpu-idle-states = <&CPU_PW20>;
next-level-cache = <&cluster3_l2>;
#cooling-cells = <2>;
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls2088a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls2088a.dtsi
index a5f668d786b8..da24dc127698 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls2088a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls2088a.dtsi
@@ -9,6 +9,7 @@
*
*/
+#include <dt-bindings/clock/fsl,qoriq-clockgen.h>
#include "fsl-ls208xa.dtsi"
&cpu {
@@ -16,7 +17,7 @@
device_type = "cpu";
compatible = "arm,cortex-a72";
reg = <0x0>;
- clocks = <&clockgen 1 0>;
+ clocks = <&clockgen QORIQ_CLK_CMUX 0>;
cpu-idle-states = <&CPU_PW20>;
next-level-cache = <&cluster0_l2>;
#cooling-cells = <2>;
@@ -26,7 +27,7 @@
device_type = "cpu";
compatible = "arm,cortex-a72";
reg = <0x1>;
- clocks = <&clockgen 1 0>;
+ clocks = <&clockgen QORIQ_CLK_CMUX 0>;
cpu-idle-states = <&CPU_PW20>;
next-level-cache = <&cluster0_l2>;
#cooling-cells = <2>;
@@ -36,7 +37,7 @@
device_type = "cpu";
compatible = "arm,cortex-a72";
reg = <0x100>;
- clocks = <&clockgen 1 1>;
+ clocks = <&clockgen QORIQ_CLK_CMUX 1>;
cpu-idle-states = <&CPU_PW20>;
next-level-cache = <&cluster1_l2>;
#cooling-cells = <2>;
@@ -46,7 +47,7 @@
device_type = "cpu";
compatible = "arm,cortex-a72";
reg = <0x101>;
- clocks = <&clockgen 1 1>;
+ clocks = <&clockgen QORIQ_CLK_CMUX 1>;
cpu-idle-states = <&CPU_PW20>;
next-level-cache = <&cluster1_l2>;
#cooling-cells = <2>;
@@ -56,7 +57,7 @@
device_type = "cpu";
compatible = "arm,cortex-a72";
reg = <0x200>;
- clocks = <&clockgen 1 2>;
+ clocks = <&clockgen QORIQ_CLK_CMUX 2>;
next-level-cache = <&cluster2_l2>;
cpu-idle-states = <&CPU_PW20>;
#cooling-cells = <2>;
@@ -66,7 +67,7 @@
device_type = "cpu";
compatible = "arm,cortex-a72";
reg = <0x201>;
- clocks = <&clockgen 1 2>;
+ clocks = <&clockgen QORIQ_CLK_CMUX 2>;
cpu-idle-states = <&CPU_PW20>;
next-level-cache = <&cluster2_l2>;
#cooling-cells = <2>;
@@ -76,7 +77,7 @@
device_type = "cpu";
compatible = "arm,cortex-a72";
reg = <0x300>;
- clocks = <&clockgen 1 3>;
+ clocks = <&clockgen QORIQ_CLK_CMUX 3>;
cpu-idle-states = <&CPU_PW20>;
next-level-cache = <&cluster3_l2>;
#cooling-cells = <2>;
@@ -86,7 +87,7 @@
device_type = "cpu";
compatible = "arm,cortex-a72";
reg = <0x301>;
- clocks = <&clockgen 1 3>;
+ clocks = <&clockgen QORIQ_CLK_CMUX 3>;
cpu-idle-states = <&CPU_PW20>;
next-level-cache = <&cluster3_l2>;
#cooling-cells = <2>;
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls208xa-rdb.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls208xa-rdb.dtsi
index d0d670227ae2..4b71c4fcb35f 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls208xa-rdb.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls208xa-rdb.dtsi
@@ -3,7 +3,7 @@
* Device Tree file for Freescale LS2080A RDB Board.
*
* Copyright 2016 Freescale Semiconductor, Inc.
- * Copyright 2017 NXP
+ * Copyright 2017-2020 NXP
*
* Abhimanyu Saini <abhimanyu.saini@nxp.com>
*
@@ -56,6 +56,8 @@
rtc@68 {
compatible = "dallas,ds3232";
reg = <0x68>;
+ /* IRQ_RTC_B -> IRQ06, active low */
+ interrupts-extended = <&extirq 6 IRQ_TYPE_LEVEL_LOW>;
};
};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
index c68901f8c6f0..135ac8210871 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
@@ -3,12 +3,13 @@
* Device Tree Include file for Freescale Layerscape-2080A family SoC.
*
* Copyright 2016 Freescale Semiconductor, Inc.
- * Copyright 2017 NXP
+ * Copyright 2017-2020 NXP
*
* Abhimanyu Saini <abhimanyu.saini@nxp.com>
*
*/
+#include <dt-bindings/clock/fsl,qoriq-clockgen.h>
#include <dt-bindings/thermal/thermal.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
@@ -277,6 +278,37 @@
little-endian;
};
+ isc: syscon@1f70000 {
+ compatible = "fsl,ls2080a-isc", "syscon";
+ reg = <0x0 0x1f70000 0x0 0x10000>;
+ little-endian;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x0 0x1f70000 0x10000>;
+
+ extirq: interrupt-controller@14 {
+ compatible = "fsl,ls2080a-extirq", "fsl,ls1088a-extirq";
+ #interrupt-cells = <2>;
+ #address-cells = <0>;
+ interrupt-controller;
+ reg = <0x14 4>;
+ interrupt-map =
+ <0 0 &gic GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+ <1 0 &gic GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
+ <2 0 &gic GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
+ <3 0 &gic GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
+ <4 0 &gic GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
+ <5 0 &gic GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>,
+ <6 0 &gic GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>,
+ <7 0 &gic GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>,
+ <8 0 &gic GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
+ <9 0 &gic GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
+ <10 0 &gic GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>,
+ <11 0 &gic GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-map-mask = <0xffffffff 0x0>;
+ };
+ };
+
tmu: tmu@1f80000 {
compatible = "fsl,qoriq-tmu";
reg = <0x0 0x1f80000 0x0 0x10000>;
@@ -325,84 +357,112 @@
serial0: serial@21c0500 {
compatible = "fsl,ns16550", "ns16550a";
reg = <0x0 0x21c0500 0x0 0x100>;
- clocks = <&clockgen 4 3>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(4)>;
interrupts = <0 32 0x4>; /* Level high type */
};
serial1: serial@21c0600 {
compatible = "fsl,ns16550", "ns16550a";
reg = <0x0 0x21c0600 0x0 0x100>;
- clocks = <&clockgen 4 3>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(4)>;
interrupts = <0 32 0x4>; /* Level high type */
};
serial2: serial@21d0500 {
compatible = "fsl,ns16550", "ns16550a";
reg = <0x0 0x21d0500 0x0 0x100>;
- clocks = <&clockgen 4 3>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(4)>;
interrupts = <0 33 0x4>; /* Level high type */
};
serial3: serial@21d0600 {
compatible = "fsl,ns16550", "ns16550a";
reg = <0x0 0x21d0600 0x0 0x100>;
- clocks = <&clockgen 4 3>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(4)>;
interrupts = <0 33 0x4>; /* Level high type */
};
cluster1_core0_watchdog: wdt@c000000 {
compatible = "arm,sp805-wdt", "arm,primecell";
reg = <0x0 0xc000000 0x0 0x1000>;
- clocks = <&clockgen 4 3>, <&clockgen 4 3>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(4)>,
+ <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(4)>;
clock-names = "wdog_clk", "apb_pclk";
};
cluster1_core1_watchdog: wdt@c010000 {
compatible = "arm,sp805-wdt", "arm,primecell";
reg = <0x0 0xc010000 0x0 0x1000>;
- clocks = <&clockgen 4 3>, <&clockgen 4 3>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(4)>,
+ <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(4)>;
clock-names = "wdog_clk", "apb_pclk";
};
cluster2_core0_watchdog: wdt@c100000 {
compatible = "arm,sp805-wdt", "arm,primecell";
reg = <0x0 0xc100000 0x0 0x1000>;
- clocks = <&clockgen 4 3>, <&clockgen 4 3>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(4)>,
+ <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(4)>;
clock-names = "wdog_clk", "apb_pclk";
};
cluster2_core1_watchdog: wdt@c110000 {
compatible = "arm,sp805-wdt", "arm,primecell";
reg = <0x0 0xc110000 0x0 0x1000>;
- clocks = <&clockgen 4 3>, <&clockgen 4 3>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(4)>,
+ <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(4)>;
clock-names = "wdog_clk", "apb_pclk";
};
cluster3_core0_watchdog: wdt@c200000 {
compatible = "arm,sp805-wdt", "arm,primecell";
reg = <0x0 0xc200000 0x0 0x1000>;
- clocks = <&clockgen 4 3>, <&clockgen 4 3>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(4)>,
+ <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(4)>;
clock-names = "wdog_clk", "apb_pclk";
};
cluster3_core1_watchdog: wdt@c210000 {
compatible = "arm,sp805-wdt", "arm,primecell";
reg = <0x0 0xc210000 0x0 0x1000>;
- clocks = <&clockgen 4 3>, <&clockgen 4 3>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(4)>,
+ <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(4)>;
clock-names = "wdog_clk", "apb_pclk";
};
cluster4_core0_watchdog: wdt@c300000 {
compatible = "arm,sp805-wdt", "arm,primecell";
reg = <0x0 0xc300000 0x0 0x1000>;
- clocks = <&clockgen 4 3>, <&clockgen 4 3>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(4)>,
+ <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(4)>;
clock-names = "wdog_clk", "apb_pclk";
};
cluster4_core1_watchdog: wdt@c310000 {
compatible = "arm,sp805-wdt", "arm,primecell";
reg = <0x0 0xc310000 0x0 0x1000>;
- clocks = <&clockgen 4 3>, <&clockgen 4 3>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(4)>,
+ <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(4)>;
clock-names = "wdog_clk", "apb_pclk";
};
@@ -453,7 +513,8 @@
ptp-timer@8b95000 {
compatible = "fsl,dpaa2-ptp";
reg = <0x0 0x8b95000 0x0 0x100>;
- clocks = <&clockgen 4 1>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>;
little-endian;
fsl,extts-fifo;
};
@@ -864,7 +925,8 @@
#size-cells = <0>;
reg = <0x0 0x2100000 0x0 0x10000>;
interrupts = <0 26 0x4>; /* Level high type */
- clocks = <&clockgen 4 3>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(4)>;
clock-names = "dspi";
spi-num-chipselects = <5>;
bus-num = <0>;
@@ -875,7 +937,8 @@
compatible = "fsl,ls2080a-esdhc", "fsl,esdhc";
reg = <0x0 0x2140000 0x0 0x10000>;
interrupts = <0 28 0x4>; /* Level high type */
- clocks = <&clockgen 4 1>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>;
voltage-ranges = <1800 1800 3300 3300>;
sdhci,auto-cmd12;
little-endian;
@@ -934,7 +997,8 @@
reg = <0x0 0x2000000 0x0 0x10000>;
interrupts = <0 34 0x4>; /* Level high type */
clock-names = "i2c";
- clocks = <&clockgen 4 3>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(4)>;
};
i2c1: i2c@2010000 {
@@ -945,7 +1009,8 @@
reg = <0x0 0x2010000 0x0 0x10000>;
interrupts = <0 34 0x4>; /* Level high type */
clock-names = "i2c";
- clocks = <&clockgen 4 3>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(4)>;
};
i2c2: i2c@2020000 {
@@ -956,7 +1021,8 @@
reg = <0x0 0x2020000 0x0 0x10000>;
interrupts = <0 35 0x4>; /* Level high type */
clock-names = "i2c";
- clocks = <&clockgen 4 3>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(4)>;
};
i2c3: i2c@2030000 {
@@ -967,7 +1033,8 @@
reg = <0x0 0x2030000 0x0 0x10000>;
interrupts = <0 35 0x4>; /* Level high type */
clock-names = "i2c";
- clocks = <&clockgen 4 3>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(4)>;
};
ifc: ifc@2240000 {
@@ -991,7 +1058,10 @@
<0x0 0x20000000 0x0 0x10000000>;
reg-names = "QuadSPI", "QuadSPI-memory";
interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 4 3>, <&clockgen 4 3>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(4)>,
+ <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(4)>;
clock-names = "qspi_en", "qspi";
status = "disabled";
};
@@ -1089,7 +1159,8 @@
compatible = "fsl,ls2080a-ahci";
reg = <0x0 0x3200000 0x0 0x10000>;
interrupts = <0 133 0x4>; /* Level high type */
- clocks = <&clockgen 4 3>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(4)>;
dma-coherent;
};
@@ -1098,7 +1169,8 @@
compatible = "fsl,ls2080a-ahci";
reg = <0x0 0x3210000 0x0 0x10000>;
interrupts = <0 136 0x4>; /* Level high type */
- clocks = <&clockgen 4 3>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(4)>;
dma-coherent;
};
diff --git a/arch/arm64/boot/dts/freescale/fsl-lx2160a-cex7.dtsi b/arch/arm64/boot/dts/freescale/fsl-lx2160a-cex7.dtsi
index d87d16460875..459dccad8326 100644
--- a/arch/arm64/boot/dts/freescale/fsl-lx2160a-cex7.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-lx2160a-cex7.dtsi
@@ -40,6 +40,7 @@
rgmii_phy1: ethernet-phy@1 {
reg = <1>;
+ qca,smarteee-tw-us-1g = <24>;
};
};
@@ -134,8 +135,6 @@
rtc@51 {
compatible = "nxp,pcf2129";
reg = <0x51>;
- // IRQ10_B
- interrupts = <GIC_SPI 150 IRQ_TYPE_LEVEL_HIGH>;
};
};
diff --git a/arch/arm64/boot/dts/freescale/fsl-lx2160a-clearfog-itx.dtsi b/arch/arm64/boot/dts/freescale/fsl-lx2160a-clearfog-itx.dtsi
index f3741a32e868..2b63235ca627 100644
--- a/arch/arm64/boot/dts/freescale/fsl-lx2160a-clearfog-itx.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-lx2160a-clearfog-itx.dtsi
@@ -8,6 +8,7 @@
/dts-v1/;
#include "fsl-lx2160a-cex7.dtsi"
+#include <dt-bindings/input/linux-event-codes.h>
/ {
aliases {
@@ -18,6 +19,17 @@
chosen {
stdout-path = "serial0:115200n8";
};
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ key {
+ label = "power";
+ linux,can-disable;
+ linux,code = <KEY_POWER>;
+ gpios = <&gpio2 6 GPIO_ACTIVE_LOW>;
+ };
+ };
};
&emdio2 {
diff --git a/arch/arm64/boot/dts/freescale/fsl-lx2160a-qds.dts b/arch/arm64/boot/dts/freescale/fsl-lx2160a-qds.dts
index 2d1fe6c3797f..d858d9c8b583 100644
--- a/arch/arm64/boot/dts/freescale/fsl-lx2160a-qds.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-lx2160a-qds.dts
@@ -14,6 +14,8 @@
aliases {
crypto = &crypto;
+ mmc0 = &esdhc0;
+ mmc1 = &esdhc1;
serial0 = &uart0;
};
@@ -31,6 +33,14 @@
};
};
+&can0 {
+ status = "okay";
+};
+
+&can1 {
+ status = "okay";
+};
+
&crypto {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/freescale/fsl-lx2160a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-lx2160a-rdb.dts
index 7723ad5efd37..5dbf27493e8b 100644
--- a/arch/arm64/boot/dts/freescale/fsl-lx2160a-rdb.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-lx2160a-rdb.dts
@@ -2,7 +2,7 @@
//
// Device Tree file for LX2160ARDB
//
-// Copyright 2018 NXP
+// Copyright 2018-2020 NXP
/dts-v1/;
@@ -14,6 +14,8 @@
aliases {
crypto = &crypto;
+ mmc0 = &esdhc0;
+ mmc1 = &esdhc1;
serial0 = &uart0;
};
@@ -87,6 +89,22 @@
};
};
+&can0 {
+ status = "okay";
+
+ can-transceiver {
+ max-bitrate = <5000000>;
+ };
+};
+
+&can1 {
+ status = "okay";
+
+ can-transceiver {
+ max-bitrate = <5000000>;
+ };
+};
+
&esdhc0 {
sd-uhs-sdr104;
sd-uhs-sdr50;
@@ -175,8 +193,8 @@
rtc@51 {
compatible = "nxp,pcf2129";
reg = <0x51>;
- // IRQ10_B
- interrupts = <0 150 0x4>;
+ /* IRQ_RTC_B -> IRQ08, active low */
+ interrupts-extended = <&extirq 8 IRQ_TYPE_LEVEL_LOW>;
};
};
diff --git a/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi b/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi
index 197397777c83..0551f6f4c313 100644
--- a/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi
@@ -4,6 +4,7 @@
//
// Copyright 2018-2020 NXP
+#include <dt-bindings/clock/fsl,qoriq-clockgen.h>
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/thermal/thermal.h>
@@ -30,7 +31,7 @@
compatible = "arm,cortex-a72";
enable-method = "psci";
reg = <0x0>;
- clocks = <&clockgen 1 0>;
+ clocks = <&clockgen QORIQ_CLK_CMUX 0>;
d-cache-size = <0x8000>;
d-cache-line-size = <64>;
d-cache-sets = <128>;
@@ -47,7 +48,7 @@
compatible = "arm,cortex-a72";
enable-method = "psci";
reg = <0x1>;
- clocks = <&clockgen 1 0>;
+ clocks = <&clockgen QORIQ_CLK_CMUX 0>;
d-cache-size = <0x8000>;
d-cache-line-size = <64>;
d-cache-sets = <128>;
@@ -64,7 +65,7 @@
compatible = "arm,cortex-a72";
enable-method = "psci";
reg = <0x100>;
- clocks = <&clockgen 1 1>;
+ clocks = <&clockgen QORIQ_CLK_CMUX 1>;
d-cache-size = <0x8000>;
d-cache-line-size = <64>;
d-cache-sets = <128>;
@@ -81,7 +82,7 @@
compatible = "arm,cortex-a72";
enable-method = "psci";
reg = <0x101>;
- clocks = <&clockgen 1 1>;
+ clocks = <&clockgen QORIQ_CLK_CMUX 1>;
d-cache-size = <0x8000>;
d-cache-line-size = <64>;
d-cache-sets = <128>;
@@ -98,7 +99,7 @@
compatible = "arm,cortex-a72";
enable-method = "psci";
reg = <0x200>;
- clocks = <&clockgen 1 2>;
+ clocks = <&clockgen QORIQ_CLK_CMUX 2>;
d-cache-size = <0x8000>;
d-cache-line-size = <64>;
d-cache-sets = <128>;
@@ -115,7 +116,7 @@
compatible = "arm,cortex-a72";
enable-method = "psci";
reg = <0x201>;
- clocks = <&clockgen 1 2>;
+ clocks = <&clockgen QORIQ_CLK_CMUX 2>;
d-cache-size = <0x8000>;
d-cache-line-size = <64>;
d-cache-sets = <128>;
@@ -132,7 +133,7 @@
compatible = "arm,cortex-a72";
enable-method = "psci";
reg = <0x300>;
- clocks = <&clockgen 1 3>;
+ clocks = <&clockgen QORIQ_CLK_CMUX 3>;
d-cache-size = <0x8000>;
d-cache-line-size = <64>;
d-cache-sets = <128>;
@@ -149,7 +150,7 @@
compatible = "arm,cortex-a72";
enable-method = "psci";
reg = <0x301>;
- clocks = <&clockgen 1 3>;
+ clocks = <&clockgen QORIQ_CLK_CMUX 3>;
d-cache-size = <0x8000>;
d-cache-line-size = <64>;
d-cache-sets = <128>;
@@ -166,7 +167,7 @@
compatible = "arm,cortex-a72";
enable-method = "psci";
reg = <0x400>;
- clocks = <&clockgen 1 4>;
+ clocks = <&clockgen QORIQ_CLK_CMUX 4>;
d-cache-size = <0x8000>;
d-cache-line-size = <64>;
d-cache-sets = <128>;
@@ -183,7 +184,7 @@
compatible = "arm,cortex-a72";
enable-method = "psci";
reg = <0x401>;
- clocks = <&clockgen 1 4>;
+ clocks = <&clockgen QORIQ_CLK_CMUX 4>;
d-cache-size = <0x8000>;
d-cache-line-size = <64>;
d-cache-sets = <128>;
@@ -200,7 +201,7 @@
compatible = "arm,cortex-a72";
enable-method = "psci";
reg = <0x500>;
- clocks = <&clockgen 1 5>;
+ clocks = <&clockgen QORIQ_CLK_CMUX 5>;
d-cache-size = <0x8000>;
d-cache-line-size = <64>;
d-cache-sets = <128>;
@@ -217,7 +218,7 @@
compatible = "arm,cortex-a72";
enable-method = "psci";
reg = <0x501>;
- clocks = <&clockgen 1 5>;
+ clocks = <&clockgen QORIQ_CLK_CMUX 5>;
d-cache-size = <0x8000>;
d-cache-line-size = <64>;
d-cache-sets = <128>;
@@ -234,7 +235,7 @@
compatible = "arm,cortex-a72";
enable-method = "psci";
reg = <0x600>;
- clocks = <&clockgen 1 6>;
+ clocks = <&clockgen QORIQ_CLK_CMUX 6>;
d-cache-size = <0x8000>;
d-cache-line-size = <64>;
d-cache-sets = <128>;
@@ -251,7 +252,7 @@
compatible = "arm,cortex-a72";
enable-method = "psci";
reg = <0x601>;
- clocks = <&clockgen 1 6>;
+ clocks = <&clockgen QORIQ_CLK_CMUX 6>;
d-cache-size = <0x8000>;
d-cache-line-size = <64>;
d-cache-sets = <128>;
@@ -268,7 +269,7 @@
compatible = "arm,cortex-a72";
enable-method = "psci";
reg = <0x700>;
- clocks = <&clockgen 1 7>;
+ clocks = <&clockgen QORIQ_CLK_CMUX 7>;
d-cache-size = <0x8000>;
d-cache-line-size = <64>;
d-cache-sets = <128>;
@@ -285,7 +286,7 @@
compatible = "arm,cortex-a72";
enable-method = "psci";
reg = <0x701>;
- clocks = <&clockgen 1 7>;
+ clocks = <&clockgen QORIQ_CLK_CMUX 7>;
d-cache-size = <0x8000>;
d-cache-line-size = <64>;
d-cache-sets = <128>;
@@ -664,6 +665,37 @@
little-endian;
};
+ isc: syscon@1f70000 {
+ compatible = "fsl,lx2160a-isc", "syscon";
+ reg = <0x0 0x1f70000 0x0 0x10000>;
+ little-endian;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x0 0x1f70000 0x10000>;
+
+ extirq: interrupt-controller@14 {
+ compatible = "fsl,lx2160a-extirq", "fsl,ls1088a-extirq";
+ #interrupt-cells = <2>;
+ #address-cells = <0>;
+ interrupt-controller;
+ reg = <0x14 4>;
+ interrupt-map =
+ <0 0 &gic GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+ <1 0 &gic GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
+ <2 0 &gic GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
+ <3 0 &gic GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
+ <4 0 &gic GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
+ <5 0 &gic GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>,
+ <6 0 &gic GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>,
+ <7 0 &gic GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>,
+ <8 0 &gic GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
+ <9 0 &gic GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
+ <10 0 &gic GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>,
+ <11 0 &gic GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-map-mask = <0xffffffff 0x0>;
+ };
+ };
+
tmu: tmu@1f80000 {
compatible = "fsl,qoriq-tmu";
reg = <0x0 0x1f80000 0x0 0x10000>;
@@ -685,7 +717,8 @@
reg = <0x0 0x2000000 0x0 0x10000>;
interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "i2c";
- clocks = <&clockgen 4 15>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(16)>;
scl-gpio = <&gpio2 15 GPIO_ACTIVE_HIGH>;
status = "disabled";
};
@@ -697,7 +730,8 @@
reg = <0x0 0x2010000 0x0 0x10000>;
interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "i2c";
- clocks = <&clockgen 4 15>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(16)>;
status = "disabled";
};
@@ -708,7 +742,8 @@
reg = <0x0 0x2020000 0x0 0x10000>;
interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "i2c";
- clocks = <&clockgen 4 15>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(16)>;
status = "disabled";
};
@@ -719,7 +754,8 @@
reg = <0x0 0x2030000 0x0 0x10000>;
interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "i2c";
- clocks = <&clockgen 4 15>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(16)>;
status = "disabled";
};
@@ -730,7 +766,8 @@
reg = <0x0 0x2040000 0x0 0x10000>;
interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "i2c";
- clocks = <&clockgen 4 15>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(16)>;
scl-gpio = <&gpio2 16 GPIO_ACTIVE_HIGH>;
status = "disabled";
};
@@ -742,7 +779,8 @@
reg = <0x0 0x2050000 0x0 0x10000>;
interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "i2c";
- clocks = <&clockgen 4 15>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(16)>;
status = "disabled";
};
@@ -753,7 +791,8 @@
reg = <0x0 0x2060000 0x0 0x10000>;
interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "i2c";
- clocks = <&clockgen 4 15>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(16)>;
status = "disabled";
};
@@ -764,7 +803,8 @@
reg = <0x0 0x2070000 0x0 0x10000>;
interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "i2c";
- clocks = <&clockgen 4 15>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(16)>;
status = "disabled";
};
@@ -776,7 +816,10 @@
<0x0 0x20000000 0x0 0x10000000>;
reg-names = "fspi_base", "fspi_mmap";
interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 4 3>, <&clockgen 4 3>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(4)>,
+ <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(4)>;
clock-names = "fspi_en", "fspi";
status = "disabled";
};
@@ -787,7 +830,8 @@
#size-cells = <0>;
reg = <0x0 0x2100000 0x0 0x10000>;
interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 4 7>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(8)>;
clock-names = "dspi";
spi-num-chipselects = <5>;
bus-num = <0>;
@@ -800,7 +844,8 @@
#size-cells = <0>;
reg = <0x0 0x2110000 0x0 0x10000>;
interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 4 7>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(8)>;
clock-names = "dspi";
spi-num-chipselects = <5>;
bus-num = <1>;
@@ -813,7 +858,8 @@
#size-cells = <0>;
reg = <0x0 0x2120000 0x0 0x10000>;
interrupts = <GIC_SPI 241 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 4 7>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(8)>;
clock-names = "dspi";
spi-num-chipselects = <5>;
bus-num = <2>;
@@ -824,7 +870,8 @@
compatible = "fsl,esdhc";
reg = <0x0 0x2140000 0x0 0x10000>;
interrupts = <0 28 0x4>; /* Level high type */
- clocks = <&clockgen 4 1>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>;
dma-coherent;
voltage-ranges = <1800 1800 3300 3300>;
sdhci,auto-cmd12;
@@ -837,7 +884,8 @@
compatible = "fsl,esdhc";
reg = <0x0 0x2150000 0x0 0x10000>;
interrupts = <0 63 0x4>; /* Level high type */
- clocks = <&clockgen 4 1>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>;
dma-coherent;
voltage-ranges = <1800 1800 3300 3300>;
sdhci,auto-cmd12;
@@ -847,6 +895,30 @@
status = "disabled";
};
+ can0: can@2180000 {
+ compatible = "fsl,lx2160ar1-flexcan";
+ reg = <0x0 0x2180000 0x0 0x10000>;
+ interrupts = <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(8)>,
+ <&clockgen QORIQ_CLK_SYSCLK 0>;
+ clock-names = "ipg", "per";
+ fsl,clk-source = <0>;
+ status = "disabled";
+ };
+
+ can1: can@2190000 {
+ compatible = "fsl,lx2160ar1-flexcan";
+ reg = <0x0 0x2190000 0x0 0x10000>;
+ interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(8)>,
+ <&clockgen QORIQ_CLK_SYSCLK 0>;
+ clock-names = "ipg", "per";
+ fsl,clk-source = <0>;
+ status = "disabled";
+ };
+
uart0: serial@21c0000 {
compatible = "arm,sbsa-uart","arm,pl011";
reg = <0x0 0x21c0000 0x0 0x1000>;
@@ -973,7 +1045,8 @@
<0x7 0x100520 0x0 0x4>;
reg-names = "ahci", "sata-ecc";
interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 4 3>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(4)>;
dma-coherent;
status = "disabled";
};
@@ -984,7 +1057,8 @@
<0x7 0x100520 0x0 0x4>;
reg-names = "ahci", "sata-ecc";
interrupts = <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 4 3>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(4)>;
dma-coherent;
status = "disabled";
};
@@ -995,7 +1069,8 @@
<0x7 0x100520 0x0 0x4>;
reg-names = "ahci", "sata-ecc";
interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 4 3>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(4)>;
dma-coherent;
status = "disabled";
};
@@ -1006,7 +1081,8 @@
<0x7 0x100520 0x0 0x4>;
reg-names = "ahci", "sata-ecc";
interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 4 3>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(4)>;
dma-coherent;
status = "disabled";
};
@@ -1279,7 +1355,8 @@
ptp-timer@8b95000 {
compatible = "fsl,dpaa2-ptp";
reg = <0x0 0x8b95000 0x0 0x100>;
- clocks = <&clockgen 4 1>;
+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
+ QORIQ_CLK_PLL_DIV(2)>;
little-endian;
fsl,extts-fifo;
};
diff --git a/arch/arm64/boot/dts/freescale/fsl-lx2162a-qds.dts b/arch/arm64/boot/dts/freescale/fsl-lx2162a-qds.dts
index 91786848bd30..e1defee1ad27 100644
--- a/arch/arm64/boot/dts/freescale/fsl-lx2162a-qds.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-lx2162a-qds.dts
@@ -14,6 +14,8 @@
aliases {
crypto = &crypto;
+ mmc0 = &esdhc0;
+ mmc1 = &esdhc1;
serial0 = &uart0;
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-beacon-baseboard.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-beacon-baseboard.dtsi
index d6b9dedd168f..6f5e63696ec0 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-beacon-baseboard.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm-beacon-baseboard.dtsi
@@ -102,7 +102,6 @@
compatible = "wlf,wm8962";
reg = <0x1a>;
clocks = <&clk IMX8MM_CLK_SAI3_ROOT>;
- clock-names = "xclk";
DCVDD-supply = <&reg_audio>;
DBVDD-supply = <&reg_audio>;
AVDD-supply = <&reg_audio>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-beacon-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-beacon-som.dtsi
index d897913537ca..988f8ab679ad 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-beacon-som.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm-beacon-som.dtsi
@@ -256,8 +256,10 @@
&usdhc1 {
#address-cells = <1>;
#size-cells = <0>;
- pinctrl-names = "default";
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
pinctrl-0 = <&pinctrl_usdhc1>;
+ pinctrl-1 = <&pinctrl_usdhc1_100mhz>;
+ pinctrl-2 = <&pinctrl_usdhc1_200mhz>;
bus-width = <4>;
non-removable;
cap-power-off-card;
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-nitrogen-r2.dts b/arch/arm64/boot/dts/freescale/imx8mm-nitrogen-r2.dts
new file mode 100644
index 000000000000..c0c384d76147
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mm-nitrogen-r2.dts
@@ -0,0 +1,393 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Device Tree file for Boundary Devices i.MX8MMini Nitrogen8MM Rev2 board.
+ * Adrien Grassein <adrien.grassein@gmail.com.com>
+ */
+/dts-v1/;
+#include "imx8mm.dtsi"
+
+/ {
+ model = "Boundary Devices i.MX8MMini Nitrogen8MM Rev2";
+ compatible = "boundary,imx8mm-nitrogen8mm", "fsl,imx8mm";
+};
+
+&A53_0 {
+ cpu-supply = <&reg_buck3>;
+};
+
+&A53_1 {
+ cpu-supply = <&reg_buck3>;
+};
+
+&A53_2 {
+ cpu-supply = <&reg_buck3>;
+};
+
+&A53_3 {
+ cpu-supply = <&reg_buck3>;
+};
+
+&fec1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_fec1>;
+ phy-mode = "rgmii-id";
+ phy-handle = <&ethphy0>;
+ fsl,magic-packet;
+ status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethphy0: ethernet-phy@4 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <4>;
+ interrupts-extended = <&gpio3 16 IRQ_TYPE_LEVEL_LOW>;
+ };
+ };
+};
+
+&i2c1 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c1>;
+ status = "okay";
+
+ pmic@8 {
+ compatible = "nxp,pf8121a";
+ reg = <0x8>;
+
+ regulators {
+ reg_ldo1: ldo1 {
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ reg_ldo2: ldo2 {
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ reg_ldo3: ldo3 {
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ reg_ldo4: ldo4 {
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ reg_buck1: buck1 {
+ regulator-min-microvolt = <400000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ reg_buck2: buck2 {
+ regulator-min-microvolt = <400000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ reg_buck3: buck3 {
+ regulator-min-microvolt = <400000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ reg_buck4: buck4 {
+ regulator-min-microvolt = <400000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ reg_buck5: buck5 {
+ regulator-min-microvolt = <400000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ reg_buck6: buck6 {
+ regulator-min-microvolt = <400000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ reg_buck7: buck7 {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ reg_vsnvs: vsnvs {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ };
+ };
+ };
+};
+
+&i2c3 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c3>;
+ status = "okay";
+
+ i2cmux@70 {
+ compatible = "nxp,pca9540";
+ reg = <0x70>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ i2c3 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ rtc@68 {
+ compatible = "microcrystal,rv4162";
+ reg = <0x68>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c3a_rv4162>;
+ interrupts-extended = <&gpio4 22 IRQ_TYPE_LEVEL_LOW>;
+ wakeup-source;
+ };
+ };
+ };
+};
+
+/* console */
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart2>;
+ assigned-clocks = <&clk IMX8MM_CLK_UART2>;
+ assigned-clock-parents = <&clk IMX8MM_CLK_24M>;
+ status = "okay";
+};
+
+/* eMMC */
+&usdhc1 {
+ bus-width = <8>;
+ sdhci-caps-mask = <0x80000000 0x0>;
+ non-removable;
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc1>;
+ pinctrl-1 = <&pinctrl_usdhc1_100mhz>;
+ pinctrl-2 = <&pinctrl_usdhc1_200mhz>;
+ status = "okay";
+};
+
+/* sdcard */
+&usdhc2 {
+ bus-width = <4>;
+ cd-gpios = <&gpio2 12 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc2>;
+ pinctrl-1 = <&pinctrl_usdhc2_100mhz>;
+ pinctrl-2 = <&pinctrl_usdhc2_200mhz>;
+ vqmmc-supply = <&reg_ldo2>;
+ status = "okay";
+};
+
+&wdog1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_wdog>;
+ fsl,ext-reset-output;
+ status = "okay";
+};
+
+&iomuxc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hog>;
+
+ pinctrl_fec1: fec1grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_ENET_MDC_ENET1_MDC 0x3
+ MX8MM_IOMUXC_ENET_MDIO_ENET1_MDIO 0x3
+ MX8MM_IOMUXC_ENET_TD3_ENET1_RGMII_TD3 0x1f
+ MX8MM_IOMUXC_ENET_TD2_ENET1_RGMII_TD2 0x1f
+ MX8MM_IOMUXC_ENET_TD1_ENET1_RGMII_TD1 0x1f
+ MX8MM_IOMUXC_ENET_TD0_ENET1_RGMII_TD0 0x1f
+ MX8MM_IOMUXC_ENET_RD3_ENET1_RGMII_RD3 0x91
+ MX8MM_IOMUXC_ENET_RD2_ENET1_RGMII_RD2 0x91
+ MX8MM_IOMUXC_ENET_RD1_ENET1_RGMII_RD1 0x91
+ MX8MM_IOMUXC_ENET_RD0_ENET1_RGMII_RD0 0x91
+ MX8MM_IOMUXC_ENET_TXC_ENET1_RGMII_TXC 0x1f
+ MX8MM_IOMUXC_ENET_RXC_ENET1_RGMII_RXC 0x91
+ MX8MM_IOMUXC_ENET_RX_CTL_ENET1_RGMII_RX_CTL 0x91
+ MX8MM_IOMUXC_ENET_TX_CTL_ENET1_RGMII_TX_CTL 0x1f
+ MX8MM_IOMUXC_NAND_READY_B_GPIO3_IO16 0x159
+ >;
+ };
+
+ pinctrl_hog: hoggrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_GPIO1_IO03_GPIO1_IO3 0x09
+ MX8MM_IOMUXC_GPIO1_IO08_GPIO1_IO8 0x09
+ >;
+ };
+
+ pinctrl_i2c1: i2c1grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_I2C1_SCL_I2C1_SCL 0x400001c3
+ MX8MM_IOMUXC_I2C1_SDA_I2C1_SDA 0x400001c3
+ >;
+ };
+
+ pinctrl_i2c3: i2c3grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_I2C3_SCL_I2C3_SCL 0x400001c3
+ MX8MM_IOMUXC_I2C3_SDA_I2C3_SDA 0x400001c3
+ >;
+ };
+
+ pinctrl_i2c3a_rv4162: i2c3a-rv4162grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SAI2_RXC_GPIO4_IO22 0x1c0
+ >;
+ };
+
+ pinctrl_uart2: uart2grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_UART2_RXD_UART2_DCE_RX 0x140
+ MX8MM_IOMUXC_UART2_TXD_UART2_DCE_TX 0x140
+ >;
+ };
+
+ pinctrl_usdhc1: usdhc1grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SD1_CLK_USDHC1_CLK 0x190
+ MX8MM_IOMUXC_SD1_CMD_USDHC1_CMD 0x1d0
+ MX8MM_IOMUXC_SD1_DATA0_USDHC1_DATA0 0x1d0
+ MX8MM_IOMUXC_SD1_DATA1_USDHC1_DATA1 0x1d0
+ MX8MM_IOMUXC_SD1_DATA2_USDHC1_DATA2 0x1d0
+ MX8MM_IOMUXC_SD1_DATA3_USDHC1_DATA3 0x1d0
+ MX8MM_IOMUXC_SD1_DATA4_USDHC1_DATA4 0x1d0
+ MX8MM_IOMUXC_SD1_DATA5_USDHC1_DATA5 0x1d0
+ MX8MM_IOMUXC_SD1_DATA6_USDHC1_DATA6 0x1d0
+ MX8MM_IOMUXC_SD1_DATA7_USDHC1_DATA7 0x1d0
+ MX8MM_IOMUXC_SD1_RESET_B_GPIO2_IO10 0x141
+ >;
+ };
+
+ pinctrl_usdhc1_100mhz: usdhc1-100mhz-grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SD1_CLK_USDHC1_CLK 0x194
+ MX8MM_IOMUXC_SD1_CMD_USDHC1_CMD 0x1d4
+ MX8MM_IOMUXC_SD1_DATA0_USDHC1_DATA0 0x1d4
+ MX8MM_IOMUXC_SD1_DATA1_USDHC1_DATA1 0x1d4
+ MX8MM_IOMUXC_SD1_DATA2_USDHC1_DATA2 0x1d4
+ MX8MM_IOMUXC_SD1_DATA3_USDHC1_DATA3 0x1d4
+ MX8MM_IOMUXC_SD1_DATA4_USDHC1_DATA4 0x1d4
+ MX8MM_IOMUXC_SD1_DATA5_USDHC1_DATA5 0x1d4
+ MX8MM_IOMUXC_SD1_DATA6_USDHC1_DATA6 0x1d4
+ MX8MM_IOMUXC_SD1_DATA7_USDHC1_DATA7 0x1d4
+ >;
+ };
+
+ pinctrl_usdhc1_200mhz: usdhc1-200mhz-grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SD1_CLK_USDHC1_CLK 0x196
+ MX8MM_IOMUXC_SD1_CMD_USDHC1_CMD 0x1d6
+ MX8MM_IOMUXC_SD1_DATA0_USDHC1_DATA0 0x1d6
+ MX8MM_IOMUXC_SD1_DATA1_USDHC1_DATA1 0x1d6
+ MX8MM_IOMUXC_SD1_DATA2_USDHC1_DATA2 0x1d6
+ MX8MM_IOMUXC_SD1_DATA3_USDHC1_DATA3 0x1d6
+ MX8MM_IOMUXC_SD1_DATA4_USDHC1_DATA4 0x1d6
+ MX8MM_IOMUXC_SD1_DATA5_USDHC1_DATA5 0x1d6
+ MX8MM_IOMUXC_SD1_DATA6_USDHC1_DATA6 0x1d6
+ MX8MM_IOMUXC_SD1_DATA7_USDHC1_DATA7 0x1d6
+ >;
+ };
+
+ pinctrl_usdhc2: usdhc2grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x190
+ MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x1d0
+ MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x1d0
+ MX8MM_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x1d0
+ MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d0
+ MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d0
+ MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12 0x1c4
+ >;
+ };
+
+ pinctrl_usdhc2_100mhz: usdhc2-100mhz-grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x194
+ MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x1d4
+ MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x1d4
+ MX8MM_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x1d4
+ MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d4
+ MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d4
+ >;
+ };
+
+ pinctrl_usdhc2_200mhz: usdhc2-200mhz-grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x196
+ MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x1d6
+ MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x1d6
+ MX8MM_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x1d6
+ MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d6
+ MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d6
+ >;
+ };
+
+ pinctrl_usdhc3: usdhc3grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_NAND_WE_B_USDHC3_CLK 0x190
+ MX8MM_IOMUXC_NAND_WP_B_USDHC3_CMD 0x1d0
+ MX8MM_IOMUXC_NAND_DATA04_USDHC3_DATA0 0x1d0
+ MX8MM_IOMUXC_NAND_DATA05_USDHC3_DATA1 0x1d0
+ MX8MM_IOMUXC_NAND_DATA06_USDHC3_DATA2 0x1d0
+ MX8MM_IOMUXC_NAND_DATA07_USDHC3_DATA3 0x1d0
+ MX8MM_IOMUXC_GPIO1_IO00_ANAMIX_REF_CLK_32K 0x03
+ >;
+ };
+
+ pinctrl_usdhc3_100mhz: usdhc3-100mhz-grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_NAND_WE_B_USDHC3_CLK 0x194
+ MX8MM_IOMUXC_NAND_WP_B_USDHC3_CMD 0x1d4
+ MX8MM_IOMUXC_NAND_DATA04_USDHC3_DATA0 0x1d4
+ MX8MM_IOMUXC_NAND_DATA05_USDHC3_DATA1 0x1d4
+ MX8MM_IOMUXC_NAND_DATA06_USDHC3_DATA2 0x1d4
+ MX8MM_IOMUXC_NAND_DATA07_USDHC3_DATA3 0x1d4
+ >;
+ };
+
+ pinctrl_usdhc3_200mhz: usdhc3-200mhz-grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_NAND_WE_B_USDHC3_CLK 0x196
+ MX8MM_IOMUXC_NAND_WP_B_USDHC3_CMD 0x1d6
+ MX8MM_IOMUXC_NAND_DATA04_USDHC3_DATA0 0x1d6
+ MX8MM_IOMUXC_NAND_DATA05_USDHC3_DATA1 0x1d6
+ MX8MM_IOMUXC_NAND_DATA06_USDHC3_DATA2 0x1d6
+ MX8MM_IOMUXC_NAND_DATA07_USDHC3_DATA3 0x1d6
+ >;
+ };
+
+ pinctrl_wdog: wdoggrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_GPIO1_IO02_WDOG1_WDOG_B 0x140
+ >;
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw700x.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw700x.dtsi
new file mode 100644
index 000000000000..c769fadbd008
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw700x.dtsi
@@ -0,0 +1,495 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright 2020 Gateworks Corporation
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/linux-event-codes.h>
+#include <dt-bindings/net/ti-dp83867.h>
+
+/ {
+ memory@40000000 {
+ device_type = "memory";
+ reg = <0x0 0x40000000 0 0x80000000>;
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ user-pb {
+ label = "user_pb";
+ gpios = <&gpio 2 GPIO_ACTIVE_LOW>;
+ linux,code = <BTN_0>;
+ };
+
+ user-pb1x {
+ label = "user_pb1x";
+ linux,code = <BTN_1>;
+ interrupt-parent = <&gsc>;
+ interrupts = <0>;
+ };
+
+ key-erased {
+ label = "key_erased";
+ linux,code = <BTN_2>;
+ interrupt-parent = <&gsc>;
+ interrupts = <1>;
+ };
+
+ eeprom-wp {
+ label = "eeprom_wp";
+ linux,code = <BTN_3>;
+ interrupt-parent = <&gsc>;
+ interrupts = <2>;
+ };
+
+ tamper {
+ label = "tamper";
+ linux,code = <BTN_4>;
+ interrupt-parent = <&gsc>;
+ interrupts = <5>;
+ };
+
+ switch-hold {
+ label = "switch_hold";
+ linux,code = <BTN_5>;
+ interrupt-parent = <&gsc>;
+ interrupts = <7>;
+ };
+ };
+};
+
+&A53_0 {
+ cpu-supply = <&buck3_reg>;
+};
+
+&A53_1 {
+ cpu-supply = <&buck3_reg>;
+};
+
+&A53_2 {
+ cpu-supply = <&buck3_reg>;
+};
+
+&A53_3 {
+ cpu-supply = <&buck3_reg>;
+};
+
+&ddrc {
+ operating-points-v2 = <&ddrc_opp_table>;
+
+ ddrc_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ opp-25M {
+ opp-hz = /bits/ 64 <25000000>;
+ };
+
+ opp-100M {
+ opp-hz = /bits/ 64 <100000000>;
+ };
+
+ opp-750M {
+ opp-hz = /bits/ 64 <750000000>;
+ };
+ };
+};
+
+&fec1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_fec1>;
+ phy-mode = "rgmii-id";
+ phy-handle = <&ethphy0>;
+ status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethphy0: ethernet-phy@0 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <0>;
+ ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>;
+ ti,tx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>;
+ tx-fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
+ rx-fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
+ };
+ };
+};
+
+&i2c1 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c1>;
+ status = "okay";
+
+ gsc: gsc@20 {
+ compatible = "gw,gsc";
+ reg = <0x20>;
+ pinctrl-0 = <&pinctrl_gsc>;
+ interrupt-parent = <&gpio2>;
+ interrupts = <6 IRQ_TYPE_EDGE_FALLING>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ adc {
+ compatible = "gw,gsc-adc";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ channel@6 {
+ gw,mode = <0>;
+ reg = <0x06>;
+ label = "temp";
+ };
+
+ channel@8 {
+ gw,mode = <1>;
+ reg = <0x08>;
+ label = "vdd_bat";
+ };
+
+ channel@16 {
+ gw,mode = <4>;
+ reg = <0x16>;
+ label = "fan_tach";
+ };
+
+ channel@82 {
+ gw,mode = <2>;
+ reg = <0x82>;
+ label = "vdd_vin";
+ gw,voltage-divider-ohms = <22100 1000>;
+ };
+
+ channel@84 {
+ gw,mode = <2>;
+ reg = <0x84>;
+ label = "vdd_adc1";
+ gw,voltage-divider-ohms = <10000 10000>;
+ };
+
+ channel@86 {
+ gw,mode = <2>;
+ reg = <0x86>;
+ label = "vdd_adc2";
+ gw,voltage-divider-ohms = <10000 10000>;
+ };
+
+ channel@88 {
+ gw,mode = <2>;
+ reg = <0x88>;
+ label = "vdd_dram";
+ };
+
+ channel@8c {
+ gw,mode = <2>;
+ reg = <0x8c>;
+ label = "vdd_1p2";
+ };
+
+ channel@8e {
+ gw,mode = <2>;
+ reg = <0x8e>;
+ label = "vdd_1p0";
+ };
+
+ channel@90 {
+ gw,mode = <2>;
+ reg = <0x90>;
+ label = "vdd_2p5";
+ gw,voltage-divider-ohms = <10000 10000>;
+ };
+
+ channel@92 {
+ gw,mode = <2>;
+ reg = <0x92>;
+ label = "vdd_3p3";
+ gw,voltage-divider-ohms = <10000 10000>;
+ };
+
+ channel@98 {
+ gw,mode = <2>;
+ reg = <0x98>;
+ label = "vdd_0p95";
+ };
+
+ channel@9a {
+ gw,mode = <2>;
+ reg = <0x9a>;
+ label = "vdd_1p8";
+ };
+
+ channel@a2 {
+ gw,mode = <2>;
+ reg = <0xa2>;
+ label = "vdd_gsc";
+ gw,voltage-divider-ohms = <10000 10000>;
+ };
+ };
+
+ fan-controller@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "gw,gsc-fan";
+ reg = <0x0a>;
+ };
+ };
+
+ gpio: gpio@23 {
+ compatible = "nxp,pca9555";
+ reg = <0x23>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-parent = <&gsc>;
+ interrupts = <4>;
+ };
+
+ eeprom@50 {
+ compatible = "atmel,24c02";
+ reg = <0x50>;
+ pagesize = <16>;
+ };
+
+ eeprom@51 {
+ compatible = "atmel,24c02";
+ reg = <0x51>;
+ pagesize = <16>;
+ };
+
+ eeprom@52 {
+ compatible = "atmel,24c02";
+ reg = <0x52>;
+ pagesize = <16>;
+ };
+
+ eeprom@53 {
+ compatible = "atmel,24c02";
+ reg = <0x53>;
+ pagesize = <16>;
+ };
+
+ rtc@68 {
+ compatible = "dallas,ds1672";
+ reg = <0x68>;
+ };
+
+ pmic@69 {
+ compatible = "mps,mp5416";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pmic>;
+ reg = <0x69>;
+
+ regulators {
+ buck1 {
+ regulator-name = "vdd_0p95";
+ regulator-min-microvolt = <805000>;
+ regulator-max-microvolt = <1000000>;
+ regulator-max-microamp = <2500000>;
+ regulator-boot-on;
+ };
+
+ buck2 {
+ regulator-name = "vdd_soc";
+ regulator-min-microvolt = <805000>;
+ regulator-max-microvolt = <900000>;
+ regulator-max-microamp = <1000000>;
+ regulator-boot-on;
+ };
+
+ buck3_reg: buck3 {
+ regulator-name = "vdd_arm";
+ regulator-min-microvolt = <805000>;
+ regulator-max-microvolt = <1000000>;
+ regulator-max-microamp = <2200000>;
+ regulator-boot-on;
+ };
+
+ buck4 {
+ regulator-name = "vdd_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-max-microamp = <500000>;
+ regulator-boot-on;
+ };
+
+ ldo1 {
+ regulator-name = "nvcc_snvs_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-max-microamp = <300000>;
+ regulator-boot-on;
+ };
+
+ ldo2 {
+ regulator-name = "vdd_snvs_0p8";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <800000>;
+ regulator-boot-on;
+ };
+
+ ldo3 {
+ regulator-name = "vdd_0p95";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <800000>;
+ regulator-boot-on;
+ };
+
+ ldo4 {
+ regulator-name = "vdd_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ };
+ };
+ };
+};
+
+&i2c2 {
+ clock-frequency = <400000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c2>;
+ status = "okay";
+
+ eeprom@52 {
+ compatible = "atmel,24c32";
+ reg = <0x52>;
+ pagesize = <32>;
+ };
+};
+
+/* console */
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart2>;
+ status = "okay";
+};
+
+/* eMMC */
+&usdhc3 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc3>;
+ pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
+ pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
+ bus-width = <8>;
+ non-removable;
+ status = "okay";
+};
+
+&wdog1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_wdog>;
+ fsl,ext-reset-output;
+ status = "okay";
+};
+
+&iomuxc {
+ pinctrl_fec1: fec1grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_ENET_MDC_ENET1_MDC 0x3
+ MX8MM_IOMUXC_ENET_MDIO_ENET1_MDIO 0x3
+ MX8MM_IOMUXC_ENET_TD3_ENET1_RGMII_TD3 0x1f
+ MX8MM_IOMUXC_ENET_TD2_ENET1_RGMII_TD2 0x1f
+ MX8MM_IOMUXC_ENET_TD1_ENET1_RGMII_TD1 0x1f
+ MX8MM_IOMUXC_ENET_TD0_ENET1_RGMII_TD0 0x1f
+ MX8MM_IOMUXC_ENET_RD3_ENET1_RGMII_RD3 0x91
+ MX8MM_IOMUXC_ENET_RD2_ENET1_RGMII_RD2 0x91
+ MX8MM_IOMUXC_ENET_RD1_ENET1_RGMII_RD1 0x91
+ MX8MM_IOMUXC_ENET_RD0_ENET1_RGMII_RD0 0x91
+ MX8MM_IOMUXC_ENET_TXC_ENET1_RGMII_TXC 0x1f
+ MX8MM_IOMUXC_ENET_RXC_ENET1_RGMII_RXC 0x91
+ MX8MM_IOMUXC_ENET_RX_CTL_ENET1_RGMII_RX_CTL 0x91
+ MX8MM_IOMUXC_ENET_TX_CTL_ENET1_RGMII_TX_CTL 0x1f
+ MX8MM_IOMUXC_NAND_ALE_GPIO3_IO0 0x19
+ >;
+ };
+
+ pinctrl_gsc: gscgrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SD1_DATA4_GPIO2_IO6 0x159
+ >;
+ };
+
+ pinctrl_i2c1: i2c1grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_I2C1_SCL_I2C1_SCL 0x400001c3
+ MX8MM_IOMUXC_I2C1_SDA_I2C1_SDA 0x400001c3
+ >;
+ };
+
+ pinctrl_i2c2: i2c2grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_I2C2_SCL_I2C2_SCL 0x400001c3
+ MX8MM_IOMUXC_I2C2_SDA_I2C2_SDA 0x400001c3
+ >;
+ };
+
+ pinctrl_pmic: pmicgrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_GPIO1_IO03_GPIO1_IO3 0x41
+ >;
+ };
+
+ pinctrl_uart2: uart2grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_UART2_RXD_UART2_DCE_RX 0x140
+ MX8MM_IOMUXC_UART2_TXD_UART2_DCE_TX 0x140
+ >;
+ };
+
+ pinctrl_usdhc3: usdhc3grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_NAND_WE_B_USDHC3_CLK 0x190
+ MX8MM_IOMUXC_NAND_WP_B_USDHC3_CMD 0x1d0
+ MX8MM_IOMUXC_NAND_DATA04_USDHC3_DATA0 0x1d0
+ MX8MM_IOMUXC_NAND_DATA05_USDHC3_DATA1 0x1d0
+ MX8MM_IOMUXC_NAND_DATA06_USDHC3_DATA2 0x1d0
+ MX8MM_IOMUXC_NAND_DATA07_USDHC3_DATA3 0x1d0
+ MX8MM_IOMUXC_NAND_RE_B_USDHC3_DATA4 0x1d0
+ MX8MM_IOMUXC_NAND_CE2_B_USDHC3_DATA5 0x1d0
+ MX8MM_IOMUXC_NAND_CE3_B_USDHC3_DATA6 0x1d0
+ MX8MM_IOMUXC_NAND_CLE_USDHC3_DATA7 0x1d0
+ MX8MM_IOMUXC_NAND_CE1_B_USDHC3_STROBE 0x190
+ >;
+ };
+
+ pinctrl_usdhc3_100mhz: usdhc3-100mhzgrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_NAND_WE_B_USDHC3_CLK 0x194
+ MX8MM_IOMUXC_NAND_WP_B_USDHC3_CMD 0x1d4
+ MX8MM_IOMUXC_NAND_DATA04_USDHC3_DATA0 0x1d4
+ MX8MM_IOMUXC_NAND_DATA05_USDHC3_DATA1 0x1d4
+ MX8MM_IOMUXC_NAND_DATA06_USDHC3_DATA2 0x1d4
+ MX8MM_IOMUXC_NAND_DATA07_USDHC3_DATA3 0x1d4
+ MX8MM_IOMUXC_NAND_RE_B_USDHC3_DATA4 0x1d4
+ MX8MM_IOMUXC_NAND_CE2_B_USDHC3_DATA5 0x1d4
+ MX8MM_IOMUXC_NAND_CE3_B_USDHC3_DATA6 0x1d4
+ MX8MM_IOMUXC_NAND_CLE_USDHC3_DATA7 0x1d4
+ MX8MM_IOMUXC_NAND_CE1_B_USDHC3_STROBE 0x194
+ >;
+ };
+
+ pinctrl_usdhc3_200mhz: usdhc3-200mhzgrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_NAND_WE_B_USDHC3_CLK 0x196
+ MX8MM_IOMUXC_NAND_WP_B_USDHC3_CMD 0x1d6
+ MX8MM_IOMUXC_NAND_DATA04_USDHC3_DATA0 0x1d6
+ MX8MM_IOMUXC_NAND_DATA05_USDHC3_DATA1 0x1d6
+ MX8MM_IOMUXC_NAND_DATA06_USDHC3_DATA2 0x1d6
+ MX8MM_IOMUXC_NAND_DATA07_USDHC3_DATA3 0x1d6
+ MX8MM_IOMUXC_NAND_RE_B_USDHC3_DATA4 0x1d6
+ MX8MM_IOMUXC_NAND_CE2_B_USDHC3_DATA5 0x1d6
+ MX8MM_IOMUXC_NAND_CE3_B_USDHC3_DATA6 0x1d6
+ MX8MM_IOMUXC_NAND_CLE_USDHC3_DATA7 0x1d6
+ MX8MM_IOMUXC_NAND_CE1_B_USDHC3_STROBE 0x196
+ >;
+ };
+
+ pinctrl_wdog: wdoggrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_GPIO1_IO02_WDOG1_WDOG_B 0xc6
+ >;
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw71xx-0x.dts b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw71xx-0x.dts
new file mode 100644
index 000000000000..3f88c4ad5716
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw71xx-0x.dts
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright 2020 Gateworks Corporation
+ */
+
+/dts-v1/;
+
+#include "imx8mm.dtsi"
+#include "imx8mm-venice-gw700x.dtsi"
+#include "imx8mm-venice-gw71xx.dtsi"
+
+/ {
+ model = "Gateworks Venice GW71xx-0x i.MX8MM Development Kit";
+ compatible = "gw,imx8mm-gw71xx-0x", "fsl,imx8mm";
+
+ chosen {
+ stdout-path = &uart2;
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw71xx.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw71xx.dtsi
new file mode 100644
index 000000000000..905b68a3daa5
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw71xx.dtsi
@@ -0,0 +1,186 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright 2020 Gateworks Corporation
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/leds/common.h>
+
+/ {
+ aliases {
+ usb0 = &usbotg1;
+ usb1 = &usbotg2;
+ };
+
+ led-controller {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_leds>;
+
+ led-0 {
+ function = LED_FUNCTION_STATUS;
+ color = <LED_COLOR_ID_GREEN>;
+ gpios = <&gpio5 5 GPIO_ACTIVE_HIGH>;
+ default-state = "on";
+ linux,default-trigger = "heartbeat";
+ };
+
+ led-1 {
+ function = LED_FUNCTION_STATUS;
+ color = <LED_COLOR_ID_RED>;
+ gpios = <&gpio5 4 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+ };
+
+ pps {
+ compatible = "pps-gpio";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pps>;
+ gpios = <&gpio1 15 GPIO_ACTIVE_HIGH>;
+ status = "okay";
+ };
+
+ reg_usb_otg1_vbus: regulator-usb-otg1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_reg_usb1_en>;
+ compatible = "regulator-fixed";
+ regulator-name = "usb_otg1_vbus";
+ gpio = <&gpio1 12 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+};
+
+/* off-board header */
+&ecspi2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_spi2>;
+ cs-gpios = <&gpio5 13 GPIO_ACTIVE_HIGH>;
+ status = "okay";
+};
+
+&i2c2 {
+ clock-frequency = <400000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c2>;
+ status = "okay";
+
+ accelerometer@19 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_accel>;
+ compatible = "st,lis2de12";
+ reg = <0x19>;
+ st,drdy-int-pin = <1>;
+ interrupt-parent = <&gpio4>;
+ interrupts = <5 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-names = "INT1";
+ };
+};
+
+/* off-board header */
+&i2c3 {
+ clock-frequency = <400000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c3>;
+ status = "okay";
+};
+
+/* GPS */
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1>;
+ status = "okay";
+};
+
+/* off-board header */
+&uart3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart3>;
+ status = "okay";
+};
+
+&usbotg1 {
+ dr_mode = "otg";
+ vbus-supply = <&reg_usb_otg1_vbus>;
+ status = "okay";
+};
+
+&usbotg2 {
+ dr_mode = "host";
+ status = "okay";
+};
+
+&iomuxc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hog>;
+
+ pinctrl_hog: hoggrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SPDIF_TX_GPIO5_IO3 0x40000041 /* PLUG_TEST */
+ MX8MM_IOMUXC_GPIO1_IO06_GPIO1_IO6 0x40000041 /* PCI_USBSEL */
+ MX8MM_IOMUXC_SAI1_RXD5_GPIO4_IO7 0x40000041 /* PCIE_WDIS# */
+ MX8MM_IOMUXC_GPIO1_IO07_GPIO1_IO7 0x40000041 /* DIO0 */
+ MX8MM_IOMUXC_GPIO1_IO09_GPIO1_IO9 0x40000041 /* DIO1 */
+ MX8MM_IOMUXC_SAI1_RXD1_GPIO4_IO3 0x40000041 /* DIO2 */
+ MX8MM_IOMUXC_SAI1_RXD2_GPIO4_IO4 0x40000041 /* DIO2 */
+ >;
+ };
+
+ pinctrl_accel: accelgrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SAI1_RXD3_GPIO4_IO5 0x159
+ >;
+ };
+
+ pinctrl_gpio_leds: gpioledgrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SPDIF_EXT_CLK_GPIO5_IO5 0x19
+ MX8MM_IOMUXC_SPDIF_RX_GPIO5_IO4 0x19
+ >;
+ };
+
+ pinctrl_i2c3: i2c3grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_I2C3_SCL_I2C3_SCL 0x400001c3
+ MX8MM_IOMUXC_I2C3_SDA_I2C3_SDA 0x400001c3
+ >;
+ };
+
+ pinctrl_pps: ppsgrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_GPIO1_IO15_GPIO1_IO15 0x41
+ >;
+ };
+
+ pinctrl_reg_usb1_en: regusb1grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_GPIO1_IO12_GPIO1_IO12 0x41
+ MX8MM_IOMUXC_GPIO1_IO13_USB1_OTG_OC 0x41
+ >;
+ };
+
+ pinctrl_spi2: spi2grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK 0xd6
+ MX8MM_IOMUXC_ECSPI2_MOSI_ECSPI2_MOSI 0xd6
+ MX8MM_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK 0xd6
+ MX8MM_IOMUXC_ECSPI2_SS0_GPIO5_IO13 0xd6
+ >;
+ };
+
+ pinctrl_uart1: uart1grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_UART1_RXD_UART1_DCE_RX 0x140
+ MX8MM_IOMUXC_UART1_TXD_UART1_DCE_TX 0x140
+ >;
+ };
+
+ pinctrl_uart3: uart3grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_UART3_RXD_UART3_DCE_RX 0x140
+ MX8MM_IOMUXC_UART3_TXD_UART3_DCE_TX 0x140
+ >;
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx-0x.dts b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx-0x.dts
new file mode 100644
index 000000000000..b1e7540f0281
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx-0x.dts
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright 2020 Gateworks Corporation
+ */
+
+/dts-v1/;
+
+#include "imx8mm.dtsi"
+#include "imx8mm-venice-gw700x.dtsi"
+#include "imx8mm-venice-gw72xx.dtsi"
+
+/ {
+ model = "Gateworks Venice GW72xx-0x i.MX8MM Development Kit";
+ compatible = "gw,imx8mm-gw72xx-0x", "fsl,imx8mm";
+
+ chosen {
+ stdout-path = &uart2;
+ };
+};
+
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx.dtsi
new file mode 100644
index 000000000000..b7c91bdc21dd
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx.dtsi
@@ -0,0 +1,311 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright 2020 Gateworks Corporation
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/leds/common.h>
+
+/ {
+ aliases {
+ usb0 = &usbotg1;
+ usb1 = &usbotg2;
+ };
+
+ led-controller {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_leds>;
+
+ led-0 {
+ function = LED_FUNCTION_STATUS;
+ color = <LED_COLOR_ID_GREEN>;
+ gpios = <&gpio5 5 GPIO_ACTIVE_HIGH>;
+ default-state = "on";
+ linux,default-trigger = "heartbeat";
+ };
+
+ led-1 {
+ function = LED_FUNCTION_STATUS;
+ color = <LED_COLOR_ID_RED>;
+ gpios = <&gpio5 4 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+ };
+
+ pps {
+ compatible = "pps-gpio";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pps>;
+ gpios = <&gpio1 15 GPIO_ACTIVE_HIGH>;
+ status = "okay";
+ };
+
+ reg_3p3v: regulator-3p3v {
+ compatible = "regulator-fixed";
+ regulator-name = "3P3V";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ reg_usb_otg1_vbus: regulator-usb-otg1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_reg_usb1_en>;
+ compatible = "regulator-fixed";
+ regulator-name = "usb_otg1_vbus";
+ gpio = <&gpio1 12 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+
+ reg_usb_otg2_vbus: regulator-usb-otg2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_reg_usb2_en>;
+ compatible = "regulator-fixed";
+ regulator-name = "usb_otg2_vbus";
+ gpio = <&gpio1 8 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+};
+
+/* off-board header */
+&ecspi2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_spi2>;
+ cs-gpios = <&gpio5 13 GPIO_ACTIVE_HIGH>;
+ status = "okay";
+};
+
+&i2c2 {
+ clock-frequency = <400000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c2>;
+ status = "okay";
+
+ accelerometer@19 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_accel>;
+ compatible = "st,lis2de12";
+ reg = <0x19>;
+ st,drdy-int-pin = <1>;
+ interrupt-parent = <&gpio4>;
+ interrupts = <5 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-names = "INT1";
+ };
+};
+
+/* off-board header */
+&i2c3 {
+ clock-frequency = <400000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c3>;
+ status = "okay";
+};
+
+/* off-board header */
+&sai3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sai3>;
+ assigned-clocks = <&clk IMX8MM_CLK_SAI3>;
+ assigned-clock-parents = <&clk IMX8MM_AUDIO_PLL1_OUT>;
+ assigned-clock-rates = <24576000>;
+ status = "okay";
+};
+
+/* GPS */
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1>;
+ status = "okay";
+};
+
+/* off-board header */
+&uart3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart3>;
+ status = "okay";
+};
+
+/* RS232 */
+&uart4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart4>;
+ status = "okay";
+};
+
+&usbotg1 {
+ dr_mode = "otg";
+ vbus-supply = <&reg_usb_otg1_vbus>;
+ status = "okay";
+};
+
+&usbotg2 {
+ dr_mode = "host";
+ vbus-supply = <&reg_usb_otg2_vbus>;
+ status = "okay";
+};
+
+/* microSD */
+&usdhc2 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc2>, <&pinctrl_usdhc2_gpio>;
+ pinctrl-1 = <&pinctrl_usdhc2_100mhz>, <&pinctrl_usdhc2_gpio>;
+ pinctrl-2 = <&pinctrl_usdhc2_200mhz>, <&pinctrl_usdhc2_gpio>;
+ cd-gpios = <&gpio2 12 GPIO_ACTIVE_LOW>;
+ bus-width = <4>;
+ vmmc-supply = <&reg_3p3v>;
+ status = "okay";
+};
+
+&iomuxc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hog>;
+
+ pinctrl_hog: hoggrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SPDIF_TX_GPIO5_IO3 0x40000041 /* PLUG_TEST */
+ MX8MM_IOMUXC_GPIO1_IO06_GPIO1_IO6 0x40000041 /* PCI_USBSEL */
+ MX8MM_IOMUXC_SAI1_RXD5_GPIO4_IO7 0x40000041 /* PCIE_WDIS# */
+ MX8MM_IOMUXC_GPIO1_IO07_GPIO1_IO7 0x40000041 /* DIO0 */
+ MX8MM_IOMUXC_GPIO1_IO09_GPIO1_IO9 0x40000041 /* DIO1 */
+ MX8MM_IOMUXC_GPIO1_IO00_GPIO1_IO0 0x40000104 /* RS485_TERM */
+ MX8MM_IOMUXC_SAI1_RXFS_GPIO4_IO0 0x40000104 /* RS485 */
+ MX8MM_IOMUXC_SAI1_RXD0_GPIO4_IO2 0x40000104 /* RS485_HALF */
+ >;
+ };
+
+ pinctrl_accel: accelgrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SAI1_RXD3_GPIO4_IO5 0x159
+ >;
+ };
+
+ pinctrl_gpio_leds: gpioledgrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SPDIF_EXT_CLK_GPIO5_IO5 0x19
+ MX8MM_IOMUXC_SPDIF_RX_GPIO5_IO4 0x19
+ >;
+ };
+
+ pinctrl_i2c3: i2c3grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_I2C3_SCL_I2C3_SCL 0x400001c3
+ MX8MM_IOMUXC_I2C3_SDA_I2C3_SDA 0x400001c3
+ >;
+ };
+
+ pinctrl_pps: ppsgrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_GPIO1_IO15_GPIO1_IO15 0x41
+ >;
+ };
+
+ pinctrl_reg_usb1_en: regusb1grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_GPIO1_IO12_GPIO1_IO12 0x41
+ MX8MM_IOMUXC_GPIO1_IO13_USB1_OTG_OC 0x41
+ >;
+ };
+
+ pinctrl_reg_usb2_en: regusb2grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_GPIO1_IO08_GPIO1_IO8 0x41
+ >;
+ };
+
+ pinctrl_sai3: sai3grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SAI3_TXFS_SAI3_TX_SYNC 0xd6
+ MX8MM_IOMUXC_SAI3_TXC_SAI3_TX_BCLK 0xd6
+ MX8MM_IOMUXC_SAI3_MCLK_SAI3_MCLK 0xd6
+ MX8MM_IOMUXC_SAI3_TXD_SAI3_TX_DATA0 0xd6
+ MX8MM_IOMUXC_SAI3_RXD_SAI3_RX_DATA0 0xd6
+ >;
+ };
+
+ pinctrl_spi2: spi2grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK 0xd6
+ MX8MM_IOMUXC_ECSPI2_MOSI_ECSPI2_MOSI 0xd6
+ MX8MM_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK 0xd6
+ MX8MM_IOMUXC_ECSPI2_SS0_GPIO5_IO13 0xd6
+ >;
+ };
+
+ pinctrl_uart1: uart1grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_UART1_RXD_UART1_DCE_RX 0x140
+ MX8MM_IOMUXC_UART1_TXD_UART1_DCE_TX 0x140
+ >;
+ };
+
+ pinctrl_uart3: uart3grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_UART3_RXD_UART3_DCE_RX 0x140
+ MX8MM_IOMUXC_UART3_TXD_UART3_DCE_TX 0x140
+ >;
+ };
+
+ pinctrl_uart4: uart4grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_UART4_RXD_UART4_DCE_RX 0x140
+ MX8MM_IOMUXC_UART4_TXD_UART4_DCE_TX 0x140
+ >;
+ };
+
+ pinctrl_usdhc1: usdhc1grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SD1_CLK_USDHC1_CLK 0x190
+ MX8MM_IOMUXC_SD1_CMD_USDHC1_CMD 0x1d0
+ MX8MM_IOMUXC_SD1_DATA0_USDHC1_DATA0 0x1d0
+ MX8MM_IOMUXC_SD1_DATA1_USDHC1_DATA1 0x1d0
+ MX8MM_IOMUXC_SD1_DATA2_USDHC1_DATA2 0x1d0
+ MX8MM_IOMUXC_SD1_DATA3_USDHC1_DATA3 0x1d0
+ >;
+ };
+
+ pinctrl_usdhc2: usdhc2grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x190
+ MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x1d0
+ MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x1d0
+ MX8MM_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x1d0
+ MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d0
+ MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d0
+ >;
+ };
+
+ pinctrl_usdhc2_100mhz: usdhc2-100mhzgrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x194
+ MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x1d4
+ MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x1d4
+ MX8MM_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x1d4
+ MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d4
+ MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d4
+ >;
+ };
+
+ pinctrl_usdhc2_200mhz: usdhc2-200mhzgrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x196
+ MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x1d6
+ MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x1d6
+ MX8MM_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x1d6
+ MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d6
+ MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d6
+ >;
+ };
+
+ pinctrl_usdhc2_gpio: usdhc2gpiogrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12 0x1c4
+ MX8MM_IOMUXC_SD2_RESET_B_USDHC2_RESET_B 0x1d0
+ MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x1d0
+ >;
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx-0x.dts b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx-0x.dts
new file mode 100644
index 000000000000..6905437ff281
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx-0x.dts
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright 2020 Gateworks Corporation
+ */
+
+/dts-v1/;
+
+#include "imx8mm.dtsi"
+#include "imx8mm-venice-gw700x.dtsi"
+#include "imx8mm-venice-gw73xx.dtsi"
+
+/ {
+ model = "Gateworks Venice GW73xx-0x i.MX8MM Development Kit";
+ compatible = "gw,imx8mm-gw73xx-0x", "fsl,imx8mm";
+
+ chosen {
+ stdout-path = &uart2;
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx.dtsi
new file mode 100644
index 000000000000..d2ffd62a3bd4
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx.dtsi
@@ -0,0 +1,362 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright 2020 Gateworks Corporation
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/leds/common.h>
+
+/ {
+ aliases {
+ usb0 = &usbotg1;
+ usb1 = &usbotg2;
+ };
+
+ led-controller {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_leds>;
+
+ led-0 {
+ function = LED_FUNCTION_STATUS;
+ color = <LED_COLOR_ID_GREEN>;
+ gpios = <&gpio5 5 GPIO_ACTIVE_HIGH>;
+ default-state = "on";
+ linux,default-trigger = "heartbeat";
+ };
+
+ led-1 {
+ function = LED_FUNCTION_STATUS;
+ color = <LED_COLOR_ID_RED>;
+ gpios = <&gpio5 4 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+ };
+
+ pps {
+ compatible = "pps-gpio";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pps>;
+ gpios = <&gpio1 15 GPIO_ACTIVE_HIGH>;
+ status = "okay";
+ };
+
+ reg_1p8v: regulator-1p8v {
+ compatible = "regulator-fixed";
+ regulator-name = "1P8V";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ };
+
+ reg_3p3v: regulator-3p3v {
+ compatible = "regulator-fixed";
+ regulator-name = "3P3V";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ reg_usb_otg1_vbus: regulator-usb-otg1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_reg_usb1_en>;
+ compatible = "regulator-fixed";
+ regulator-name = "usb_otg1_vbus";
+ gpio = <&gpio1 12 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+
+ reg_usb_otg2_vbus: regulator-usb-otg2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_reg_usb2_en>;
+ compatible = "regulator-fixed";
+ regulator-name = "usb_otg2_vbus";
+ gpio = <&gpio1 8 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+
+ reg_wifi_en: regulator-wifi-en {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_reg_wl>;
+ compatible = "regulator-fixed";
+ regulator-name = "wl";
+ gpio = <&gpio1 5 GPIO_ACTIVE_HIGH>;
+ startup-delay-us = <100>;
+ enable-active-high;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+};
+
+/* off-board header */
+&ecspi2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_spi2>;
+ cs-gpios = <&gpio5 13 GPIO_ACTIVE_HIGH>;
+ status = "okay";
+};
+
+&i2c2 {
+ clock-frequency = <400000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c2>;
+ status = "okay";
+
+ accelerometer@19 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_accel>;
+ compatible = "st,lis2de12";
+ reg = <0x19>;
+ st,drdy-int-pin = <1>;
+ interrupt-parent = <&gpio4>;
+ interrupts = <5 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-names = "INT1";
+ };
+};
+
+/* off-board header */
+&i2c3 {
+ clock-frequency = <400000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c3>;
+ status = "okay";
+};
+
+/* off-board header */
+&sai3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sai3>;
+ assigned-clocks = <&clk IMX8MM_CLK_SAI3>;
+ assigned-clock-parents = <&clk IMX8MM_AUDIO_PLL1_OUT>;
+ assigned-clock-rates = <24576000>;
+ status = "okay";
+};
+
+/* GPS */
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1>;
+ status = "okay";
+};
+
+/* bluetooth HCI */
+&uart3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart3>, <&pinctrl_bten>;
+ cts-gpios = <&gpio5 8 GPIO_ACTIVE_LOW>;
+ rts-gpios = <&gpio5 9 GPIO_ACTIVE_LOW>;
+ status = "okay";
+
+ bluetooth {
+ compatible = "brcm,bcm4330-bt";
+ shutdown-gpios = <&gpio1 3 GPIO_ACTIVE_HIGH>;
+ };
+};
+
+/* RS232 */
+&uart4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart4>;
+ status = "okay";
+};
+
+&usbotg1 {
+ dr_mode = "otg";
+ vbus-supply = <&reg_usb_otg1_vbus>;
+ status = "okay";
+};
+
+&usbotg2 {
+ dr_mode = "host";
+ vbus-supply = <&reg_usb_otg2_vbus>;
+ status = "okay";
+};
+
+/* SDIO WiFi */
+&usdhc1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc1>;
+ bus-width = <4>;
+ non-removable;
+ vmmc-supply = <&reg_wifi_en>;
+ status = "okay";
+};
+
+/* microSD */
+&usdhc2 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc2>, <&pinctrl_usdhc2_gpio>;
+ pinctrl-1 = <&pinctrl_usdhc2_100mhz>, <&pinctrl_usdhc2_gpio>;
+ pinctrl-2 = <&pinctrl_usdhc2_200mhz>, <&pinctrl_usdhc2_gpio>;
+ cd-gpios = <&gpio2 12 GPIO_ACTIVE_LOW>;
+ bus-width = <4>;
+ vmmc-supply = <&reg_3p3v>;
+ status = "okay";
+};
+
+&iomuxc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hog>;
+
+ pinctrl_hog: hoggrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SPDIF_TX_GPIO5_IO3 0x40000041 /* PLUG_TEST */
+ MX8MM_IOMUXC_GPIO1_IO06_GPIO1_IO6 0x40000041 /* PCI_USBSEL */
+ MX8MM_IOMUXC_SAI1_RXD5_GPIO4_IO7 0x40000041 /* PCIE_WDIS# */
+ MX8MM_IOMUXC_GPIO1_IO07_GPIO1_IO7 0x40000041 /* DIO0 */
+ MX8MM_IOMUXC_GPIO1_IO09_GPIO1_IO9 0x40000041 /* DIO1 */
+ MX8MM_IOMUXC_GPIO1_IO00_GPIO1_IO0 0x40000104 /* RS485_TERM */
+ MX8MM_IOMUXC_SAI1_RXFS_GPIO4_IO0 0x40000104 /* RS485 */
+ MX8MM_IOMUXC_SAI1_RXD0_GPIO4_IO2 0x40000104 /* RS485_HALF */
+ >;
+ };
+
+ pinctrl_accel: accelgrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SAI1_RXD3_GPIO4_IO5 0x159
+ >;
+ };
+
+ pinctrl_bten: btengrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_GPIO1_IO03_GPIO1_IO3 0x41
+ >;
+ };
+
+ pinctrl_gpio_leds: gpioledgrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SPDIF_EXT_CLK_GPIO5_IO5 0x19
+ MX8MM_IOMUXC_SPDIF_RX_GPIO5_IO4 0x19
+ >;
+ };
+
+ pinctrl_i2c3: i2c3grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_I2C3_SCL_I2C3_SCL 0x400001c3
+ MX8MM_IOMUXC_I2C3_SDA_I2C3_SDA 0x400001c3
+ >;
+ };
+
+ pinctrl_pps: ppsgrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_GPIO1_IO15_GPIO1_IO15 0x41
+ >;
+ };
+
+ pinctrl_reg_wl: regwlgrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_GPIO1_IO05_GPIO1_IO5 0x41
+ >;
+ };
+
+ pinctrl_reg_usb1_en: regusb1grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_GPIO1_IO12_GPIO1_IO12 0x41
+ MX8MM_IOMUXC_GPIO1_IO13_USB1_OTG_OC 0x41
+ >;
+ };
+
+ pinctrl_reg_usb2_en: regusb2grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_GPIO1_IO08_GPIO1_IO8 0x41
+ >;
+ };
+
+ pinctrl_sai3: sai3grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SAI3_TXFS_SAI3_TX_SYNC 0xd6
+ MX8MM_IOMUXC_SAI3_TXC_SAI3_TX_BCLK 0xd6
+ MX8MM_IOMUXC_SAI3_MCLK_SAI3_MCLK 0xd6
+ MX8MM_IOMUXC_SAI3_TXD_SAI3_TX_DATA0 0xd6
+ MX8MM_IOMUXC_SAI3_RXD_SAI3_RX_DATA0 0xd6
+ >;
+ };
+
+ pinctrl_spi2: spi2grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK 0xd6
+ MX8MM_IOMUXC_ECSPI2_MOSI_ECSPI2_MOSI 0xd6
+ MX8MM_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK 0xd6
+ MX8MM_IOMUXC_ECSPI2_SS0_GPIO5_IO13 0xd6
+ >;
+ };
+
+ pinctrl_uart1: uart1grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_UART1_RXD_UART1_DCE_RX 0x140
+ MX8MM_IOMUXC_UART1_TXD_UART1_DCE_TX 0x140
+ >;
+ };
+
+ pinctrl_uart3: uart3grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_UART3_RXD_UART3_DCE_RX 0x140
+ MX8MM_IOMUXC_UART3_TXD_UART3_DCE_TX 0x140
+ MX8MM_IOMUXC_ECSPI1_MISO_GPIO5_IO8 0x140
+ MX8MM_IOMUXC_ECSPI1_SS0_GPIO5_IO9 0x140
+ >;
+ };
+
+ pinctrl_uart4: uart4grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_UART4_RXD_UART4_DCE_RX 0x140
+ MX8MM_IOMUXC_UART4_TXD_UART4_DCE_TX 0x140
+ >;
+ };
+
+ pinctrl_usdhc1: usdhc1grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SD1_CLK_USDHC1_CLK 0x190
+ MX8MM_IOMUXC_SD1_CMD_USDHC1_CMD 0x1d0
+ MX8MM_IOMUXC_SD1_DATA0_USDHC1_DATA0 0x1d0
+ MX8MM_IOMUXC_SD1_DATA1_USDHC1_DATA1 0x1d0
+ MX8MM_IOMUXC_SD1_DATA2_USDHC1_DATA2 0x1d0
+ MX8MM_IOMUXC_SD1_DATA3_USDHC1_DATA3 0x1d0
+ >;
+ };
+
+ pinctrl_usdhc2: usdhc2grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x190
+ MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x1d0
+ MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x1d0
+ MX8MM_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x1d0
+ MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d0
+ MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d0
+ >;
+ };
+
+ pinctrl_usdhc2_100mhz: usdhc2-100mhzgrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x194
+ MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x1d4
+ MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x1d4
+ MX8MM_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x1d4
+ MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d4
+ MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d4
+ >;
+ };
+
+ pinctrl_usdhc2_200mhz: usdhc2-200mhzgrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x196
+ MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x1d6
+ MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x1d6
+ MX8MM_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x1d6
+ MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d6
+ MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d6
+ >;
+ };
+
+ pinctrl_usdhc2_gpio: usdhc2gpiogrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12 0x1c4
+ MX8MM_IOMUXC_SD2_RESET_B_USDHC2_RESET_B 0x1d0
+ MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x1d0
+ >;
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm.dtsi b/arch/arm64/boot/dts/freescale/imx8mm.dtsi
index c824f2615fe8..6bf1d15ba16a 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm.dtsi
@@ -257,10 +257,12 @@
};
soc@0 {
- compatible = "simple-bus";
+ compatible = "fsl,imx8mm-soc", "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x0 0x0 0x0 0x3e000000>;
+ nvmem-cells = <&imx8mm_uid>;
+ nvmem-cell-names = "soc_unique_id";
aips1: bus@30000000 {
compatible = "fsl,aips-bus", "simple-bus";
@@ -518,9 +520,17 @@
#address-cells = <1>;
#size-cells = <1>;
+ imx8mm_uid: unique-id@410 {
+ reg = <0x4 0x8>;
+ };
+
cpu_speed_grade: speed-grade@10 {
reg = <0x10 4>;
};
+
+ fec_mac_address: mac-address@90 {
+ reg = <0x90 6>;
+ };
};
anatop: anatop@30360000 {
@@ -909,13 +919,18 @@
assigned-clocks = <&clk IMX8MM_CLK_ENET_AXI>,
<&clk IMX8MM_CLK_ENET_TIMER>,
<&clk IMX8MM_CLK_ENET_REF>,
- <&clk IMX8MM_CLK_ENET_TIMER>;
+ <&clk IMX8MM_CLK_ENET_PHY_REF>;
assigned-clock-parents = <&clk IMX8MM_SYS_PLL1_266M>,
<&clk IMX8MM_SYS_PLL2_100M>,
- <&clk IMX8MM_SYS_PLL2_125M>;
- assigned-clock-rates = <0>, <0>, <125000000>, <100000000>;
+ <&clk IMX8MM_SYS_PLL2_125M>,
+ <&clk IMX8MM_SYS_PLL2_50M>;
+ assigned-clock-rates = <0>, <100000000>, <125000000>, <0>;
fsl,num-tx-queues = <3>;
fsl,num-rx-queues = <3>;
+ nvmem-cells = <&fec_mac_address>;
+ nvmem-cell-names = "mac-address";
+ nvmem_macaddr_swap;
+ fsl,stop-mode = <&gpr 0x10 3>;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mn-beacon-baseboard.dtsi b/arch/arm64/boot/dts/freescale/imx8mn-beacon-baseboard.dtsi
new file mode 100644
index 000000000000..376ca8ff7213
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mn-beacon-baseboard.dtsi
@@ -0,0 +1,307 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright 2020 Compass Electronics Group, LLC
+ */
+
+/ {
+ leds {
+ compatible = "gpio-leds";
+
+ led-0 {
+ label = "gen_led0";
+ gpios = <&pca6416_1 4 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+
+ led-1 {
+ label = "gen_led1";
+ gpios = <&pca6416_1 5 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+
+ led-2 {
+ label = "gen_led2";
+ gpios = <&pca6416_1 6 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+
+ led-3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_led3>;
+ label = "heartbeat";
+ gpios = <&gpio4 28 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ };
+ };
+
+ reg_audio: regulator-audio {
+ compatible = "regulator-fixed";
+ regulator-name = "3v3_aud";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&pca6416_1 11 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ reg_usdhc2_vmmc: regulator-usdhc2 {
+ compatible = "regulator-fixed";
+ regulator-name = "vsd_3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio2 19 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ reg_usb_otg_vbus: regulator-usb {
+ compatible = "regulator-fixed";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_reg_usb_otg>;
+ regulator-name = "usb_otg_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio4 29 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ sound {
+ compatible = "fsl,imx-audio-wm8962";
+ model = "wm8962-audio";
+ audio-cpu = <&sai3>;
+ audio-codec = <&wm8962>;
+ audio-routing =
+ "Headphone Jack", "HPOUTL",
+ "Headphone Jack", "HPOUTR",
+ "Ext Spk", "SPKOUTL",
+ "Ext Spk", "SPKOUTR",
+ "AMIC", "MICBIAS",
+ "IN3R", "AMIC";
+ };
+};
+
+&ecspi2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_espi2>;
+ cs-gpios = <&gpio5 9 GPIO_ACTIVE_LOW>;
+ status = "okay";
+
+ eeprom@0 {
+ compatible = "microchip,at25160bn", "atmel,at25";
+ reg = <0>;
+ spi-max-frequency = <5000000>;
+ spi-cpha;
+ spi-cpol;
+ pagesize = <32>;
+ size = <2048>;
+ address-width = <16>;
+ };
+};
+
+&i2c4 {
+ clock-frequency = <400000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c4>;
+ status = "okay";
+
+ pca6416_0: gpio@20 {
+ compatible = "nxp,pcal6416";
+ reg = <0x20>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pcal6414>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-parent = <&gpio4>;
+ interrupts = <27 IRQ_TYPE_LEVEL_LOW>;
+ };
+
+ pca6416_1: gpio@21 {
+ compatible = "nxp,pcal6416";
+ reg = <0x21>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-parent = <&gpio4>;
+ interrupts = <27 IRQ_TYPE_LEVEL_LOW>;
+ };
+
+ wm8962: audio-codec@1a {
+ compatible = "wlf,wm8962";
+ reg = <0x1a>;
+ clocks = <&clk IMX8MN_CLK_SAI3_ROOT>;
+ clock-names = "xclk";
+ DCVDD-supply = <&reg_audio>;
+ DBVDD-supply = <&reg_audio>;
+ AVDD-supply = <&reg_audio>;
+ CPVDD-supply = <&reg_audio>;
+ MICVDD-supply = <&reg_audio>;
+ PLLVDD-supply = <&reg_audio>;
+ SPKVDD1-supply = <&reg_audio>;
+ SPKVDD2-supply = <&reg_audio>;
+ gpio-cfg = <
+ 0x0000 /* 0:Default */
+ 0x0000 /* 1:Default */
+ 0x0000 /* 2:FN_DMICCLK */
+ 0x0000 /* 3:Default */
+ 0x0000 /* 4:FN_DMICCDAT */
+ 0x0000 /* 5:Default */
+ >;
+ };
+};
+
+&easrc {
+ fsl,asrc-rate = <48000>;
+ status = "okay";
+};
+
+&sai3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sai3>;
+ assigned-clocks = <&clk IMX8MN_CLK_SAI3>;
+ assigned-clock-parents = <&clk IMX8MN_AUDIO_PLL1_OUT>;
+ assigned-clock-rates = <24576000>;
+ fsl,sai-mclk-direction-output;
+ status = "okay";
+};
+
+&snvs_pwrkey {
+ status = "okay";
+};
+
+&uart2 { /* console */
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart2>;
+ status = "okay";
+};
+
+&uart3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart3>;
+ assigned-clocks = <&clk IMX8MN_CLK_UART3>;
+ assigned-clock-parents = <&clk IMX8MN_SYS_PLL1_80M>;
+ status = "okay";
+};
+
+&usbotg1 {
+ vbus-supply = <&reg_usb_otg_vbus>;
+ disable-over-current;
+ dr_mode="otg";
+ status = "okay";
+};
+
+&usdhc2 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc2>, <&pinctrl_usdhc2_gpio>;
+ pinctrl-1 = <&pinctrl_usdhc2_100mhz>;
+ pinctrl-2 = <&pinctrl_usdhc2_200mhz>;
+ bus-width = <4>;
+ vmmc-supply = <&reg_usdhc2_vmmc>;
+ status = "okay";
+};
+
+&iomuxc {
+ pinctrl_espi2: espi2grp {
+ fsl,pins = <
+ MX8MN_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK 0x82
+ MX8MN_IOMUXC_ECSPI2_MOSI_ECSPI2_MOSI 0x82
+ MX8MN_IOMUXC_ECSPI2_MISO_ECSPI2_MISO 0x82
+ MX8MN_IOMUXC_ECSPI1_SS0_GPIO5_IO9 0x41
+ >;
+ };
+
+ pinctrl_i2c2: i2c2grp {
+ fsl,pins = <
+ MX8MN_IOMUXC_I2C2_SCL_I2C2_SCL 0x400001c3
+ MX8MN_IOMUXC_I2C2_SDA_I2C2_SDA 0x400001c3
+ >;
+ };
+
+ pinctrl_i2c4: i2c4grp {
+ fsl,pins = <
+ MX8MN_IOMUXC_I2C4_SCL_I2C4_SCL 0x400001c3
+ MX8MN_IOMUXC_I2C4_SDA_I2C4_SDA 0x400001c3
+ >;
+ };
+
+ pinctrl_led3: led3grp {
+ fsl,pins = <
+ MX8MN_IOMUXC_SAI3_RXFS_GPIO4_IO28 0x41
+ >;
+ };
+
+ pinctrl_pcal6414: pcal6414-gpiogrp {
+ fsl,pins = <
+ MX8MN_IOMUXC_SAI2_MCLK_GPIO4_IO27 0x19
+ >;
+ };
+
+ pinctrl_reg_usb_otg: reg-otggrp {
+ fsl,pins = <
+ MX8MN_IOMUXC_SAI3_RXC_GPIO4_IO29 0x19
+ >;
+ };
+
+ pinctrl_sai3: sai3grp {
+ fsl,pins = <
+ MX8MN_IOMUXC_SAI3_TXFS_SAI3_TX_SYNC 0xd6
+ MX8MN_IOMUXC_SAI3_TXC_SAI3_TX_BCLK 0xd6
+ MX8MN_IOMUXC_SAI3_MCLK_SAI3_MCLK 0xd6
+ MX8MN_IOMUXC_SAI3_TXD_SAI3_TX_DATA0 0xd6
+ MX8MN_IOMUXC_SAI3_RXD_SAI3_RX_DATA0 0xd6
+ >;
+ };
+
+ pinctrl_uart2: uart2grp {
+ fsl,pins = <
+ MX8MN_IOMUXC_UART2_RXD_UART2_DCE_RX 0x140
+ MX8MN_IOMUXC_UART2_TXD_UART2_DCE_TX 0x140
+ >;
+ };
+
+ pinctrl_uart3: uart3grp {
+ fsl,pins = <
+ MX8MN_IOMUXC_ECSPI1_SCLK_UART3_DCE_RX 0x40
+ MX8MN_IOMUXC_ECSPI1_MOSI_UART3_DCE_TX 0x40
+ >;
+ };
+
+ pinctrl_usdhc2_gpio: usdhc2gpiogrp {
+ fsl,pins = <
+ MX8MN_IOMUXC_SD2_CD_B_USDHC2_CD_B 0x41
+ MX8MN_IOMUXC_SD2_RESET_B_GPIO2_IO19 0x41
+ >;
+ };
+
+ pinctrl_usdhc2: usdhc2grp {
+ fsl,pins = <
+ MX8MN_IOMUXC_SD2_CLK_USDHC2_CLK 0x190
+ MX8MN_IOMUXC_SD2_CMD_USDHC2_CMD 0x1d0
+ MX8MN_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x1d0
+ MX8MN_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x1d0
+ MX8MN_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d0
+ MX8MN_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d0
+ MX8MN_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x1d0
+ >;
+ };
+
+ pinctrl_usdhc2_100mhz: usdhc2-100mhzgrp {
+ fsl,pins = <
+ MX8MN_IOMUXC_SD2_CLK_USDHC2_CLK 0x194
+ MX8MN_IOMUXC_SD2_CMD_USDHC2_CMD 0x1d4
+ MX8MN_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x1d4
+ MX8MN_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x1d4
+ MX8MN_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d4
+ MX8MN_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d4
+ MX8MN_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x1d0
+ >;
+ };
+
+ pinctrl_usdhc2_200mhz: usdhc2-200mhzgrp {
+ fsl,pins = <
+ MX8MN_IOMUXC_SD2_CLK_USDHC2_CLK 0x196
+ MX8MN_IOMUXC_SD2_CMD_USDHC2_CMD 0x1d6
+ MX8MN_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x1d6
+ MX8MN_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x1d6
+ MX8MN_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d6
+ MX8MN_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d6
+ MX8MN_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x1d0
+ >;
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mn-beacon-kit.dts b/arch/arm64/boot/dts/freescale/imx8mn-beacon-kit.dts
new file mode 100644
index 000000000000..1392ce02587b
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mn-beacon-kit.dts
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright 2020 Compass Electronics Group, LLC
+ */
+
+/dts-v1/;
+
+#include "imx8mn.dtsi"
+#include "imx8mn-beacon-som.dtsi"
+#include "imx8mn-beacon-baseboard.dtsi"
+
+/ {
+ model = "Beacon EmbeddedWorks i.MX8M Nano Development Kit";
+ compatible = "beacon,imx8mn-beacon-kit", "fsl,imx8mn";
+
+ chosen {
+ stdout-path = &uart2;
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mn-beacon-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mn-beacon-som.dtsi
new file mode 100644
index 000000000000..de2cd0e3201c
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mn-beacon-som.dtsi
@@ -0,0 +1,466 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright 2020 Compass Electronics Group, LLC
+ */
+
+/ {
+ aliases {
+ rtc0 = &rtc;
+ rtc1 = &snvs_rtc;
+ spi0 = &flexspi;
+ };
+
+ usdhc1_pwrseq: usdhc1_pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc1_gpio>;
+ reset-gpios = <&gpio2 10 GPIO_ACTIVE_LOW>;
+ clocks = <&osc_32k>;
+ clock-names = "ext_clock";
+ post-power-on-delay-ms = <80>;
+ };
+
+ memory@40000000 {
+ device_type = "memory";
+ reg = <0x0 0x40000000 0 0x80000000>;
+ };
+};
+
+&A53_0 {
+ cpu-supply = <&buck2_reg>;
+};
+
+&A53_1 {
+ cpu-supply = <&buck2_reg>;
+};
+
+&A53_2 {
+ cpu-supply = <&buck2_reg>;
+};
+
+&A53_3 {
+ cpu-supply = <&buck2_reg>;
+};
+
+/* DDR controller is running LPDDR at 800MHz which requires 0.95V */
+&a53_opp_table {
+ opp-1200000000 {
+ opp-microvolt = <950000>;
+ };
+};
+
+&ddrc {
+ operating-points-v2 = <&ddrc_opp_table>;
+
+ ddrc_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ opp-25M {
+ opp-hz = /bits/ 64 <25000000>;
+ };
+
+ opp-100M {
+ opp-hz = /bits/ 64 <100000000>;
+ };
+
+ opp-800M {
+ opp-hz = /bits/ 64 <800000000>;
+ };
+ };
+};
+
+&fec1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_fec1>;
+ phy-mode = "rgmii-id";
+ phy-handle = <&ethphy0>;
+ phy-supply = <&buck6_reg>;
+ phy-reset-gpios = <&gpio4 22 GPIO_ACTIVE_LOW>;
+ fsl,magic-packet;
+ status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethphy0: ethernet-phy@0 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <0>;
+ };
+ };
+};
+
+&flexspi {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_flexspi>;
+ status = "okay";
+
+ flash@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "jedec,spi-nor";
+ spi-max-frequency = <80000000>;
+ spi-tx-bus-width = <4>;
+ spi-rx-bus-width = <4>;
+ };
+};
+
+&i2c1 {
+ clock-frequency = <400000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c1>;
+ status = "okay";
+
+ pmic@4b {
+ compatible = "rohm,bd71847";
+ reg = <0x4b>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pmic>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
+ rohm,reset-snvs-powered;
+
+ regulators {
+ buck1_reg: BUCK1 {
+ regulator-name = "buck1";
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-ramp-delay = <1250>;
+ };
+
+ buck2_reg: BUCK2 {
+ regulator-name = "buck2";
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-ramp-delay = <1250>;
+ rohm,dvs-run-voltage = <1000000>;
+ rohm,dvs-idle-voltage = <900000>;
+ };
+
+ buck3_reg: BUCK3 {
+ // BUCK5 in datasheet
+ regulator-name = "buck3";
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ buck4_reg: BUCK4 {
+ // BUCK6 in datasheet
+ regulator-name = "buck4";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ buck5_reg: BUCK5 {
+ // BUCK7 in datasheet
+ regulator-name = "buck5";
+ regulator-min-microvolt = <1605000>;
+ regulator-max-microvolt = <1995000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ buck6_reg: BUCK6 {
+ // BUCK8 in datasheet
+ regulator-name = "buck6";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1400000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo1_reg: LDO1 {
+ regulator-name = "ldo1";
+ regulator-min-microvolt = <1600000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo2_reg: LDO2 {
+ regulator-name = "ldo2";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <900000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo3_reg: LDO3 {
+ regulator-name = "ldo3";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo4_reg: LDO4 {
+ regulator-name = "ldo4";
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo6_reg: LDO6 {
+ regulator-name = "ldo6";
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+ };
+ };
+};
+
+&i2c3 {
+ clock-frequency = <400000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c3>;
+ status = "okay";
+
+ eeprom@50 {
+ compatible = "microchip,24c64", "atmel,24c64";
+ pagesize = <32>;
+ read-only; /* Manufacturing EEPROM programmed at factory */
+ reg = <0x50>;
+ };
+
+ rtc: rtc@51 {
+ compatible = "nxp,pcf85263";
+ reg = <0x51>;
+ };
+};
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1>;
+ assigned-clocks = <&clk IMX8MN_CLK_UART1>;
+ assigned-clock-parents = <&clk IMX8MN_SYS_PLL1_80M>;
+ uart-has-rtscts;
+ status = "okay";
+
+ bluetooth {
+ compatible = "brcm,bcm43438-bt";
+ shutdown-gpios = <&gpio2 6 GPIO_ACTIVE_HIGH>;
+ host-wakeup-gpios = <&gpio2 8 GPIO_ACTIVE_HIGH>;
+ device-wakeup-gpios = <&gpio2 7 GPIO_ACTIVE_HIGH>;
+ clocks = <&osc_32k>;
+ max-speed = <4000000>;
+ clock-names = "extclk";
+ };
+};
+
+&usdhc1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc1>;
+ bus-width = <4>;
+ non-removable;
+ cap-power-off-card;
+ pm-ignore-notify;
+ keep-power-in-suspend;
+ mmc-pwrseq = <&usdhc1_pwrseq>;
+ status = "okay";
+
+ brcmf: bcrmf@1 {
+ reg = <1>;
+ compatible = "brcm,bcm4329-fmac";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_wlan>;
+ interrupt-parent = <&gpio2>;
+ interrupts = <9 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "host-wake";
+ };
+};
+
+&usdhc3 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc3>;
+ pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
+ pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
+ bus-width = <8>;
+ non-removable;
+ status = "okay";
+};
+
+&wdog1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_wdog>;
+ fsl,ext-reset-output;
+ status = "okay";
+};
+
+&iomuxc {
+ pinctrl_fec1: fec1grp {
+ fsl,pins = <
+ MX8MN_IOMUXC_ENET_MDC_ENET1_MDC 0x3
+ MX8MN_IOMUXC_ENET_MDIO_ENET1_MDIO 0x3
+ MX8MN_IOMUXC_ENET_TD3_ENET1_RGMII_TD3 0x1f
+ MX8MN_IOMUXC_ENET_TD2_ENET1_RGMII_TD2 0x1f
+ MX8MN_IOMUXC_ENET_TD1_ENET1_RGMII_TD1 0x1f
+ MX8MN_IOMUXC_ENET_TD0_ENET1_RGMII_TD0 0x1f
+ MX8MN_IOMUXC_ENET_RD3_ENET1_RGMII_RD3 0x91
+ MX8MN_IOMUXC_ENET_RD2_ENET1_RGMII_RD2 0x91
+ MX8MN_IOMUXC_ENET_RD1_ENET1_RGMII_RD1 0x91
+ MX8MN_IOMUXC_ENET_RD0_ENET1_RGMII_RD0 0x91
+ MX8MN_IOMUXC_ENET_TXC_ENET1_RGMII_TXC 0x1f
+ MX8MN_IOMUXC_ENET_RXC_ENET1_RGMII_RXC 0x91
+ MX8MN_IOMUXC_ENET_RX_CTL_ENET1_RGMII_RX_CTL 0x91
+ MX8MN_IOMUXC_ENET_TX_CTL_ENET1_RGMII_TX_CTL 0x1f
+ MX8MN_IOMUXC_SAI2_RXC_GPIO4_IO22 0x19
+ >;
+ };
+
+ pinctrl_i2c1: i2c1grp {
+ fsl,pins = <
+ MX8MN_IOMUXC_I2C1_SCL_I2C1_SCL 0x400001c3
+ MX8MN_IOMUXC_I2C1_SDA_I2C1_SDA 0x400001c3
+ >;
+ };
+
+ pinctrl_i2c3: i2c3grp {
+ fsl,pins = <
+ MX8MN_IOMUXC_I2C3_SCL_I2C3_SCL 0x400001c3
+ MX8MN_IOMUXC_I2C3_SDA_I2C3_SDA 0x400001c3
+ >;
+ };
+
+ pinctrl_flexspi: flexspigrp {
+ fsl,pins = <
+ MX8MN_IOMUXC_NAND_ALE_QSPI_A_SCLK 0x1c2
+ MX8MN_IOMUXC_NAND_CE0_B_QSPI_A_SS0_B 0x82
+ MX8MN_IOMUXC_NAND_DATA00_QSPI_A_DATA0 0x82
+ MX8MN_IOMUXC_NAND_DATA01_QSPI_A_DATA1 0x82
+ MX8MN_IOMUXC_NAND_DATA02_QSPI_A_DATA2 0x82
+ MX8MN_IOMUXC_NAND_DATA03_QSPI_A_DATA3 0x82
+ >;
+ };
+
+ pinctrl_pmic: pmicirqgrp {
+ fsl,pins = <
+ MX8MN_IOMUXC_GPIO1_IO03_GPIO1_IO3 0x141
+ >;
+ };
+
+ pinctrl_uart1: uart1grp {
+ fsl,pins = <
+ MX8MN_IOMUXC_UART1_RXD_UART1_DCE_RX 0x140
+ MX8MN_IOMUXC_UART1_TXD_UART1_DCE_TX 0x140
+ MX8MN_IOMUXC_UART3_RXD_UART1_DCE_CTS_B 0x140
+ MX8MN_IOMUXC_UART3_TXD_UART1_DCE_RTS_B 0x140
+ MX8MN_IOMUXC_SD1_DATA4_GPIO2_IO6 0x19
+ MX8MN_IOMUXC_SD1_DATA5_GPIO2_IO7 0x19
+ MX8MN_IOMUXC_SD1_DATA6_GPIO2_IO8 0x19
+ MX8MN_IOMUXC_GPIO1_IO00_ANAMIX_REF_CLK_32K 0x141
+ >;
+ };
+
+ pinctrl_usdhc1_gpio: usdhc1gpiogrp {
+ fsl,pins = <
+ MX8MN_IOMUXC_SD1_RESET_B_GPIO2_IO10 0x41
+ >;
+ };
+
+ pinctrl_usdhc1: usdhc1grp {
+ fsl,pins = <
+ MX8MN_IOMUXC_SD1_CLK_USDHC1_CLK 0x190
+ MX8MN_IOMUXC_SD1_CMD_USDHC1_CMD 0x1d0
+ MX8MN_IOMUXC_SD1_DATA0_USDHC1_DATA0 0x1d0
+ MX8MN_IOMUXC_SD1_DATA1_USDHC1_DATA1 0x1d0
+ MX8MN_IOMUXC_SD1_DATA2_USDHC1_DATA2 0x1d0
+ MX8MN_IOMUXC_SD1_DATA3_USDHC1_DATA3 0x1d0
+ >;
+ };
+
+ pinctrl_usdhc1_100mhz: usdhc1-100mhzgrp {
+ fsl,pins = <
+ MX8MN_IOMUXC_SD1_CLK_USDHC1_CLK 0x194
+ MX8MN_IOMUXC_SD1_CMD_USDHC1_CMD 0x1d4
+ MX8MN_IOMUXC_SD1_DATA0_USDHC1_DATA0 0x1d4
+ MX8MN_IOMUXC_SD1_DATA1_USDHC1_DATA1 0x1d4
+ MX8MN_IOMUXC_SD1_DATA2_USDHC1_DATA2 0x1d4
+ MX8MN_IOMUXC_SD1_DATA3_USDHC1_DATA3 0x1d4
+ >;
+ };
+
+ pinctrl_usdhc1_200mhz: usdhc1-200mhzgrp {
+ fsl,pins = <
+ MX8MN_IOMUXC_SD1_CLK_USDHC1_CLK 0x196
+ MX8MN_IOMUXC_SD1_CMD_USDHC1_CMD 0x1d6
+ MX8MN_IOMUXC_SD1_DATA0_USDHC1_DATA0 0x1d6
+ MX8MN_IOMUXC_SD1_DATA1_USDHC1_DATA1 0x1d6
+ MX8MN_IOMUXC_SD1_DATA2_USDHC1_DATA2 0x1d6
+ MX8MN_IOMUXC_SD1_DATA3_USDHC1_DATA3 0x1d6
+ >;
+ };
+
+ pinctrl_usdhc3: usdhc3grp {
+ fsl,pins = <
+ MX8MN_IOMUXC_NAND_WE_B_USDHC3_CLK 0x190
+ MX8MN_IOMUXC_NAND_WP_B_USDHC3_CMD 0x1d0
+ MX8MN_IOMUXC_NAND_DATA04_USDHC3_DATA0 0x1d0
+ MX8MN_IOMUXC_NAND_DATA05_USDHC3_DATA1 0x1d0
+ MX8MN_IOMUXC_NAND_DATA06_USDHC3_DATA2 0x1d0
+ MX8MN_IOMUXC_NAND_DATA07_USDHC3_DATA3 0x1d0
+ MX8MN_IOMUXC_NAND_RE_B_USDHC3_DATA4 0x1d0
+ MX8MN_IOMUXC_NAND_CE2_B_USDHC3_DATA5 0x1d0
+ MX8MN_IOMUXC_NAND_CE3_B_USDHC3_DATA6 0x1d0
+ MX8MN_IOMUXC_NAND_CLE_USDHC3_DATA7 0x1d0
+ MX8MN_IOMUXC_NAND_CE1_B_USDHC3_STROBE 0x190
+ >;
+ };
+
+ pinctrl_usdhc3_100mhz: usdhc3-100mhzgrp {
+ fsl,pins = <
+ MX8MN_IOMUXC_NAND_WE_B_USDHC3_CLK 0x194
+ MX8MN_IOMUXC_NAND_WP_B_USDHC3_CMD 0x1d4
+ MX8MN_IOMUXC_NAND_DATA04_USDHC3_DATA0 0x1d4
+ MX8MN_IOMUXC_NAND_DATA05_USDHC3_DATA1 0x1d4
+ MX8MN_IOMUXC_NAND_DATA06_USDHC3_DATA2 0x1d4
+ MX8MN_IOMUXC_NAND_DATA07_USDHC3_DATA3 0x1d4
+ MX8MN_IOMUXC_NAND_RE_B_USDHC3_DATA4 0x1d4
+ MX8MN_IOMUXC_NAND_CE2_B_USDHC3_DATA5 0x1d4
+ MX8MN_IOMUXC_NAND_CE3_B_USDHC3_DATA6 0x1d4
+ MX8MN_IOMUXC_NAND_CLE_USDHC3_DATA7 0x1d4
+ MX8MN_IOMUXC_NAND_CE1_B_USDHC3_STROBE 0x194
+ >;
+ };
+
+ pinctrl_usdhc3_200mhz: usdhc3-200mhzgrp {
+ fsl,pins = <
+ MX8MN_IOMUXC_NAND_WE_B_USDHC3_CLK 0x196
+ MX8MN_IOMUXC_NAND_WP_B_USDHC3_CMD 0x1d6
+ MX8MN_IOMUXC_NAND_DATA04_USDHC3_DATA0 0x1d6
+ MX8MN_IOMUXC_NAND_DATA05_USDHC3_DATA1 0x1d6
+ MX8MN_IOMUXC_NAND_DATA06_USDHC3_DATA2 0x1d6
+ MX8MN_IOMUXC_NAND_DATA07_USDHC3_DATA3 0x1d6
+ MX8MN_IOMUXC_NAND_RE_B_USDHC3_DATA4 0x1d6
+ MX8MN_IOMUXC_NAND_CE2_B_USDHC3_DATA5 0x1d6
+ MX8MN_IOMUXC_NAND_CE3_B_USDHC3_DATA6 0x1d6
+ MX8MN_IOMUXC_NAND_CLE_USDHC3_DATA7 0x1d6
+ MX8MN_IOMUXC_NAND_CE1_B_USDHC3_STROBE 0x196
+ >;
+ };
+
+ pinctrl_wdog: wdoggrp {
+ fsl,pins = <
+ MX8MN_IOMUXC_GPIO1_IO02_WDOG1_WDOG_B 0xc6
+ >;
+ };
+
+ pinctrl_wlan: wlangrp {
+ fsl,pins = <
+ MX8MN_IOMUXC_SD1_DATA7_GPIO2_IO9 0x111
+ >;
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mn-evk.dtsi b/arch/arm64/boot/dts/freescale/imx8mn-evk.dtsi
index 76d042a4cf09..a0dddba2e561 100644
--- a/arch/arm64/boot/dts/freescale/imx8mn-evk.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mn-evk.dtsi
@@ -46,6 +46,40 @@
pinctrl-0 = <&pinctrl_ir>;
linux,autosuspend-period = <125>;
};
+
+ wm8524: audio-codec {
+ #sound-dai-cells = <0>;
+ compatible = "wlf,wm8524";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_wlf>;
+ wlf,mute-gpios = <&gpio5 21 GPIO_ACTIVE_LOW>;
+ clocks = <&clk IMX8MN_CLK_SAI3_ROOT>;
+ clock-names = "mclk";
+ };
+
+ sound-wm8524 {
+ compatible = "fsl,imx-audio-wm8524";
+ model = "wm8524-audio";
+ audio-cpu = <&sai3>;
+ audio-codec = <&wm8524>;
+ audio-asrc = <&easrc>;
+ audio-routing =
+ "Line Out Jack", "LINEVOUTL",
+ "Line Out Jack", "LINEVOUTR";
+ };
+
+ sound-spdif {
+ compatible = "fsl,imx-audio-spdif";
+ model = "imx-spdif";
+ spdif-controller = <&spdif1>;
+ spdif-out;
+ spdif-in;
+ };
+};
+
+&easrc {
+ fsl,asrc-rate = <48000>;
+ status = "okay";
};
&fec1 {
@@ -124,10 +158,29 @@
};
};
+&sai3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sai3>;
+ assigned-clocks = <&clk IMX8MN_CLK_SAI3>;
+ assigned-clock-parents = <&clk IMX8MN_AUDIO_PLL1_OUT>;
+ assigned-clock-rates = <24576000>;
+ fsl,sai-mclk-direction-output;
+ status = "okay";
+};
+
&snvs_pwrkey {
status = "okay";
};
+&spdif1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_spdif1>;
+ assigned-clocks = <&clk IMX8MN_CLK_SPDIF1>;
+ assigned-clock-parents = <&clk IMX8MN_AUDIO_PLL1_OUT>;
+ assigned-clock-rates = <24576000>;
+ status = "okay";
+};
+
&uart2 { /* console */
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart2>;
@@ -210,6 +263,12 @@
>;
};
+ pinctrl_gpio_wlf: gpiowlfgrp {
+ fsl,pins = <
+ MX8MN_IOMUXC_I2C4_SDA_GPIO5_IO21 0xd6
+ >;
+ };
+
pinctrl_ir: irgrp {
fsl,pins = <
MX8MN_IOMUXC_GPIO1_IO13_GPIO1_IO13 0x4f
@@ -249,6 +308,22 @@
>;
};
+ pinctrl_sai3: sai3grp {
+ fsl,pins = <
+ MX8MN_IOMUXC_SAI3_TXFS_SAI3_TX_SYNC 0xd6
+ MX8MN_IOMUXC_SAI3_TXC_SAI3_TX_BCLK 0xd6
+ MX8MN_IOMUXC_SAI3_MCLK_SAI3_MCLK 0xd6
+ MX8MN_IOMUXC_SAI3_TXD_SAI3_TX_DATA0 0xd6
+ >;
+ };
+
+ pinctrl_spdif1: spdif1grp {
+ fsl,pins = <
+ MX8MN_IOMUXC_SPDIF_TX_SPDIF1_OUT 0xd6
+ MX8MN_IOMUXC_SPDIF_RX_SPDIF1_IN 0xd6
+ >;
+ };
+
pinctrl_typec1: typec1grp {
fsl,pins = <
MX8MN_IOMUXC_SD1_STROBE_GPIO2_IO11 0x159
diff --git a/arch/arm64/boot/dts/freescale/imx8mn.dtsi b/arch/arm64/boot/dts/freescale/imx8mn.dtsi
index ee1790230490..16ea50089567 100644
--- a/arch/arm64/boot/dts/freescale/imx8mn.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mn.dtsi
@@ -241,10 +241,12 @@
};
soc@0 {
- compatible = "simple-bus";
+ compatible = "fsl,imx8mn-soc", "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x0 0x0 0x0 0x3e000000>;
+ nvmem-cells = <&imx8mn_uid>;
+ nvmem-cell-names = "soc_unique_id";
aips1: bus@30000000 {
compatible = "fsl,aips-bus", "simple-bus";
@@ -253,7 +255,7 @@
#size-cells = <1>;
ranges;
- spba: bus@30000000 {
+ spba: spba-bus@30000000 {
compatible = "fsl,spba-bus", "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
@@ -531,9 +533,17 @@
#address-cells = <1>;
#size-cells = <1>;
+ imx8mn_uid: unique-id@410 {
+ reg = <0x4 0x8>;
+ };
+
cpu_speed_grade: speed-grade@10 {
reg = <0x10 4>;
};
+
+ fec_mac_address: mac-address@90 {
+ reg = <0x90 6>;
+ };
};
anatop: anatop@30360000 {
@@ -581,7 +591,9 @@
<&clk IMX8MN_CLK_NOC>,
<&clk IMX8MN_CLK_AUDIO_AHB>,
<&clk IMX8MN_CLK_IPG_AUDIO_ROOT>,
- <&clk IMX8MN_SYS_PLL3>;
+ <&clk IMX8MN_SYS_PLL3>,
+ <&clk IMX8MN_AUDIO_PLL1>,
+ <&clk IMX8MN_AUDIO_PLL2>;
assigned-clock-parents = <&clk IMX8MN_SYS_PLL1_800M>,
<&clk IMX8MN_ARM_PLL_OUT>,
<&clk IMX8MN_SYS_PLL3_OUT>,
@@ -589,7 +601,9 @@
assigned-clock-rates = <0>, <0>, <0>,
<400000000>,
<400000000>,
- <600000000>;
+ <600000000>,
+ <393216000>,
+ <361267200>;
};
src: reset-controller@30390000 {
@@ -875,6 +889,19 @@
status = "disabled";
};
+ flexspi: spi@30bb0000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "nxp,imx8mm-fspi";
+ reg = <0x30bb0000 0x10000>, <0x8000000 0x10000000>;
+ reg-names = "fspi_base", "fspi_mmap";
+ interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MN_CLK_QSPI_ROOT>,
+ <&clk IMX8MN_CLK_QSPI_ROOT>;
+ clock-names = "fspi", "fspi_en";
+ status = "disabled";
+ };
+
sdma1: dma-controller@30bd0000 {
compatible = "fsl,imx8mn-sdma", "fsl,imx8mq-sdma";
reg = <0x30bd0000 0x10000>;
@@ -903,13 +930,18 @@
assigned-clocks = <&clk IMX8MN_CLK_ENET_AXI>,
<&clk IMX8MN_CLK_ENET_TIMER>,
<&clk IMX8MN_CLK_ENET_REF>,
- <&clk IMX8MN_CLK_ENET_TIMER>;
+ <&clk IMX8MN_CLK_ENET_PHY_REF>;
assigned-clock-parents = <&clk IMX8MN_SYS_PLL1_266M>,
<&clk IMX8MN_SYS_PLL2_100M>,
- <&clk IMX8MN_SYS_PLL2_125M>;
- assigned-clock-rates = <0>, <0>, <125000000>, <100000000>;
+ <&clk IMX8MN_SYS_PLL2_125M>,
+ <&clk IMX8MN_SYS_PLL2_50M>;
+ assigned-clock-rates = <0>, <100000000>, <125000000>, <0>;
fsl,num-tx-queues = <3>;
fsl,num-rx-queues = <3>;
+ nvmem-cells = <&fec_mac_address>;
+ nvmem-cell-names = "mac-address";
+ nvmem_macaddr_swap;
+ fsl,stop-mode = <&gpr 0x10 3>;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-evk.dts b/arch/arm64/boot/dts/freescale/imx8mp-evk.dts
index b10dce8767a4..7db4273cc88b 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-evk.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mp-evk.dts
@@ -127,6 +127,21 @@
status = "okay";
};
+&usb3_phy1 {
+ status = "okay";
+};
+
+&usb3_1 {
+ status = "okay";
+};
+
+&usb_dwc3_1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usb1_vbus>;
+ dr_mode = "host";
+ status = "okay";
+};
+
&usdhc2 {
assigned-clocks = <&clk IMX8MP_CLK_USDHC2>;
assigned-clock-rates = <400000000>;
@@ -232,6 +247,12 @@
>;
};
+ pinctrl_usb1_vbus: usb1grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_GPIO1_IO14__USB2_OTG_PWR 0x19
+ >;
+ };
+
pinctrl_usdhc2: usdhc2grp {
fsl,pins = <
MX8MP_IOMUXC_SD2_CLK__USDHC2_CLK 0x190
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-phyboard-pollux-rdk.dts b/arch/arm64/boot/dts/freescale/imx8mp-phyboard-pollux-rdk.dts
new file mode 100644
index 000000000000..0e1a6d953389
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mp-phyboard-pollux-rdk.dts
@@ -0,0 +1,161 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 PHYTEC Messtechnik GmbH
+ * Author: Teresa Remmet <t.remmet@phytec.de>
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/leds/leds-pca9532.h>
+#include <dt-bindings/pwm/pwm.h>
+#include "imx8mp-phycore-som.dtsi"
+
+/ {
+ model = "PHYTEC phyBOARD-Pollux i.MX8MP";
+ compatible = "phytec,imx8mp-phyboard-pollux-rdk",
+ "phytec,imx8mp-phycore-som", "fsl,imx8mp";
+
+ chosen {
+ stdout-path = &uart2;
+ };
+
+ reg_usdhc2_vmmc: regulator-usdhc2 {
+ compatible = "regulator-fixed";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_reg_usdhc2_vmmc>;
+ regulator-name = "VSD_3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio2 19 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ startup-delay-us = <100>;
+ off-on-delay-us = <12000>;
+ };
+};
+
+&i2c2 {
+ clock-frequency = <400000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c2>;
+ pinctrl-1 = <&pinctrl_i2c2_gpio>;
+ sda-gpios = <&gpio5 17 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ scl-gpios = <&gpio5 16 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ status = "okay";
+
+ eeprom@51 {
+ compatible = "atmel,24c02";
+ reg = <0x51>;
+ pagesize = <16>;
+ };
+
+ leds@62 {
+ compatible = "nxp,pca9533";
+ reg = <0x62>;
+
+ led1 {
+ type = <PCA9532_TYPE_LED>;
+ };
+
+ led2 {
+ type = <PCA9532_TYPE_LED>;
+ };
+
+ led3 {
+ type = <PCA9532_TYPE_LED>;
+ };
+ };
+};
+
+&snvs_pwrkey {
+ status = "okay";
+};
+
+/* debug console */
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart2>;
+ status = "okay";
+};
+
+/* SD-Card */
+&usdhc2 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc2>, <&pinctrl_usdhc2_pins>;
+ pinctrl-1 = <&pinctrl_usdhc2_100mhz>, <&pinctrl_usdhc2_pins>;
+ pinctrl-2 = <&pinctrl_usdhc2_200mhz>, <&pinctrl_usdhc2_pins>;
+ cd-gpios = <&gpio2 12 GPIO_ACTIVE_LOW>;
+ vmmc-supply = <&reg_usdhc2_vmmc>;
+ bus-width = <4>;
+ status = "okay";
+};
+
+&iomuxc {
+ pinctrl_i2c2: i2c2grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_I2C2_SCL__I2C2_SCL 0x400001c3
+ MX8MP_IOMUXC_I2C2_SDA__I2C2_SDA 0x400001c3
+ >;
+ };
+
+ pinctrl_i2c2_gpio: i2c2gpiogrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_I2C2_SCL__GPIO5_IO16 0x1e3
+ MX8MP_IOMUXC_I2C2_SDA__GPIO5_IO17 0x1e3
+ >;
+ };
+
+ pinctrl_reg_usdhc2_vmmc: regusdhc2vmmcgrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_SD2_RESET_B__GPIO2_IO19 0x41
+ >;
+ };
+
+ pinctrl_uart2: uart2grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_UART2_RXD__UART2_DCE_RX 0x49
+ MX8MP_IOMUXC_UART2_TXD__UART2_DCE_TX 0x49
+ >;
+ };
+
+ pinctrl_usdhc2_pins: usdhc2-gpiogrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_SD2_CD_B__GPIO2_IO12 0x1c4
+ >;
+ };
+
+ pinctrl_usdhc2: usdhc2grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_SD2_CLK__USDHC2_CLK 0x190
+ MX8MP_IOMUXC_SD2_CMD__USDHC2_CMD 0x1d0
+ MX8MP_IOMUXC_SD2_DATA0__USDHC2_DATA0 0x1d0
+ MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1 0x1d0
+ MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2 0x1d0
+ MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d0
+ MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc1
+ >;
+ };
+
+ pinctrl_usdhc2_100mhz: usdhc2-100mhzgrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_SD2_CLK__USDHC2_CLK 0x194
+ MX8MP_IOMUXC_SD2_CMD__USDHC2_CMD 0x1d4
+ MX8MP_IOMUXC_SD2_DATA0__USDHC2_DATA0 0x1d4
+ MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1 0x1d4
+ MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2 0x1d4
+ MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d4
+ MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc1
+ >;
+ };
+
+ pinctrl_usdhc2_200mhz: usdhc2-200mhzgrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_SD2_CLK__USDHC2_CLK 0x196
+ MX8MP_IOMUXC_SD2_CMD__USDHC2_CMD 0x1d6
+ MX8MP_IOMUXC_SD2_DATA0__USDHC2_DATA0 0x1d6
+ MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1 0x1d6
+ MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2 0x1d6
+ MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d6
+ MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc1
+ >;
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi
new file mode 100644
index 000000000000..44a8c2337cee
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi
@@ -0,0 +1,293 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 PHYTEC Messtechnik GmbH
+ * Author: Teresa Remmet <t.remmet@phytec.de>
+ */
+
+#include <dt-bindings/net/ti-dp83867.h>
+#include "imx8mp.dtsi"
+
+/ {
+ model = "PHYTEC phyCORE-i.MX8MP";
+ compatible = "phytec,imx8mp-phycore-som", "fsl,imx8mp";
+
+ aliases {
+ rtc0 = &rv3028;
+ rtc1 = &snvs_rtc;
+ };
+
+ memory@40000000 {
+ device_type = "memory";
+ reg = <0x0 0x40000000 0 0x80000000>;
+ };
+};
+
+&A53_0 {
+ cpu-supply = <&buck2>;
+};
+
+&A53_1 {
+ cpu-supply = <&buck2>;
+};
+
+&A53_2 {
+ cpu-supply = <&buck2>;
+};
+
+&A53_3 {
+ cpu-supply = <&buck2>;
+};
+
+/* ethernet 1 */
+&fec {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_fec>;
+ phy-mode = "rgmii-id";
+ phy-handle = <&ethphy1>;
+ fsl,magic-packet;
+ status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethphy1: ethernet-phy@0 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <0>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <15 IRQ_TYPE_EDGE_FALLING>;
+ ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>;
+ ti,tx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>;
+ ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
+ ti,clk-output-sel = <DP83867_CLK_O_SEL_OFF>;
+ enet-phy-lane-no-swap;
+ };
+ };
+};
+
+&i2c1 {
+ clock-frequency = <400000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c1>;
+ pinctrl-1 = <&pinctrl_i2c1_gpio>;
+ sda-gpios = <&gpio5 15 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ scl-gpios = <&gpio5 14 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ status = "okay";
+
+ pmic: pmic@25 {
+ reg = <0x25>;
+ compatible = "nxp,pca9450c";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pmic>;
+ interrupt-parent = <&gpio4>;
+ interrupts = <18 IRQ_TYPE_LEVEL_LOW>;
+
+ regulators {
+ buck1: BUCK1 {
+ regulator-compatible = "BUCK1";
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <2187500>;
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-ramp-delay = <3125>;
+ };
+
+ buck2: BUCK2 {
+ regulator-compatible = "BUCK2";
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <2187500>;
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-ramp-delay = <3125>;
+ };
+
+ buck4: BUCK4 {
+ regulator-compatible = "BUCK4";
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <3400000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ buck5: BUCK5 {
+ regulator-compatible = "BUCK5";
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <3400000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ buck6: BUCK6 {
+ regulator-compatible = "BUCK6";
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <3400000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo1: LDO1 {
+ regulator-compatible = "LDO1";
+ regulator-min-microvolt = <1600000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo2: LDO2 {
+ regulator-compatible = "LDO2";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1150000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo3: LDO3 {
+ regulator-compatible = "LDO3";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo4: LDO4 {
+ regulator-compatible = "LDO4";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo5: LDO5 {
+ regulator-compatible = "LDO5";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ };
+ };
+ };
+
+ eeprom@51 {
+ compatible = "atmel,24c32";
+ reg = <0x51>;
+ pagesize = <32>;
+ };
+
+ rv3028: rtc@52 {
+ compatible = "microcrystal,rv3028";
+ reg = <0x52>;
+ trickle-resistor-ohms = <3000>;
+ };
+};
+
+/* eMMC */
+&usdhc3 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc3>;
+ pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
+ pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
+ bus-width = <8>;
+ non-removable;
+ status = "okay";
+};
+
+&wdog1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_wdog>;
+ fsl,ext-reset-output;
+ status = "okay";
+};
+
+&iomuxc {
+ pinctrl_fec: fecgrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_SAI1_RXD2__ENET1_MDC 0x3
+ MX8MP_IOMUXC_SAI1_RXD3__ENET1_MDIO 0x3
+ MX8MP_IOMUXC_SAI1_RXD4__ENET1_RGMII_RD0 0x91
+ MX8MP_IOMUXC_SAI1_RXD5__ENET1_RGMII_RD1 0x91
+ MX8MP_IOMUXC_SAI1_RXD6__ENET1_RGMII_RD2 0x91
+ MX8MP_IOMUXC_SAI1_RXD7__ENET1_RGMII_RD3 0x91
+ MX8MP_IOMUXC_SAI1_TXC__ENET1_RGMII_RXC 0x91
+ MX8MP_IOMUXC_SAI1_TXFS__ENET1_RGMII_RX_CTL 0x91
+ MX8MP_IOMUXC_SAI1_TXD0__ENET1_RGMII_TD0 0x1f
+ MX8MP_IOMUXC_SAI1_TXD1__ENET1_RGMII_TD1 0x1f
+ MX8MP_IOMUXC_SAI1_TXD2__ENET1_RGMII_TD2 0x1f
+ MX8MP_IOMUXC_SAI1_TXD3__ENET1_RGMII_TD3 0x1f
+ MX8MP_IOMUXC_SAI1_TXD4__ENET1_RGMII_TX_CTL 0x1f
+ MX8MP_IOMUXC_SAI1_TXD5__ENET1_RGMII_TXC 0x1f
+ MX8MP_IOMUXC_GPIO1_IO15__GPIO1_IO15 0x11
+ >;
+ };
+
+ pinctrl_i2c1: i2c1grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_I2C1_SCL__I2C1_SCL 0x400001c3
+ MX8MP_IOMUXC_I2C1_SDA__I2C1_SDA 0x400001c3
+ >;
+ };
+
+ pinctrl_i2c1_gpio: i2c1gpiogrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_I2C1_SCL__GPIO5_IO14 0x1e3
+ MX8MP_IOMUXC_I2C1_SDA__GPIO5_IO15 0x1e3
+ >;
+ };
+
+ pinctrl_pmic: pmicirqgrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_SAI1_TXD6__GPIO4_IO18 0x141
+ >;
+ };
+
+ pinctrl_usdhc3: usdhc3grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_NAND_WE_B__USDHC3_CLK 0x190
+ MX8MP_IOMUXC_NAND_WP_B__USDHC3_CMD 0x1d0
+ MX8MP_IOMUXC_NAND_DATA04__USDHC3_DATA0 0x1d0
+ MX8MP_IOMUXC_NAND_DATA05__USDHC3_DATA1 0x1d0
+ MX8MP_IOMUXC_NAND_DATA06__USDHC3_DATA2 0x1d0
+ MX8MP_IOMUXC_NAND_DATA07__USDHC3_DATA3 0x1d0
+ MX8MP_IOMUXC_NAND_RE_B__USDHC3_DATA4 0x1d0
+ MX8MP_IOMUXC_NAND_CE2_B__USDHC3_DATA5 0x1d0
+ MX8MP_IOMUXC_NAND_CE3_B__USDHC3_DATA6 0x1d0
+ MX8MP_IOMUXC_NAND_CLE__USDHC3_DATA7 0x1d0
+ MX8MP_IOMUXC_NAND_CE1_B__USDHC3_STROBE 0x190
+ >;
+ };
+
+ pinctrl_usdhc3_100mhz: usdhc3-100mhzgrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_NAND_WE_B__USDHC3_CLK 0x194
+ MX8MP_IOMUXC_NAND_WP_B__USDHC3_CMD 0x1d4
+ MX8MP_IOMUXC_NAND_DATA04__USDHC3_DATA0 0x1d4
+ MX8MP_IOMUXC_NAND_DATA05__USDHC3_DATA1 0x1d4
+ MX8MP_IOMUXC_NAND_DATA06__USDHC3_DATA2 0x1d4
+ MX8MP_IOMUXC_NAND_DATA07__USDHC3_DATA3 0x1d4
+ MX8MP_IOMUXC_NAND_RE_B__USDHC3_DATA4 0x1d4
+ MX8MP_IOMUXC_NAND_CE2_B__USDHC3_DATA5 0x1d4
+ MX8MP_IOMUXC_NAND_CE3_B__USDHC3_DATA6 0x1d4
+ MX8MP_IOMUXC_NAND_CLE__USDHC3_DATA7 0x1d4
+ MX8MP_IOMUXC_NAND_CE1_B__USDHC3_STROBE 0x194
+ >;
+ };
+
+ pinctrl_usdhc3_200mhz: usdhc3-200mhzgrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_NAND_WE_B__USDHC3_CLK 0x196
+ MX8MP_IOMUXC_NAND_WP_B__USDHC3_CMD 0x1d6
+ MX8MP_IOMUXC_NAND_DATA04__USDHC3_DATA0 0x1d6
+ MX8MP_IOMUXC_NAND_DATA05__USDHC3_DATA1 0x1d6
+ MX8MP_IOMUXC_NAND_DATA06__USDHC3_DATA2 0x1d6
+ MX8MP_IOMUXC_NAND_DATA07__USDHC3_DATA3 0x1d6
+ MX8MP_IOMUXC_NAND_RE_B__USDHC3_DATA4 0x1d6
+ MX8MP_IOMUXC_NAND_CE2_B__USDHC3_DATA5 0x1d6
+ MX8MP_IOMUXC_NAND_CE3_B__USDHC3_DATA6 0x1d6
+ MX8MP_IOMUXC_NAND_CLE__USDHC3_DATA7 0x1d6
+ MX8MP_IOMUXC_NAND_CE1_B__USDHC3_STROBE 0x196
+ >;
+ };
+
+ pinctrl_wdog: wdoggrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_GPIO1_IO02__WDOG1_WDOG_B 0xc6
+ >;
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp.dtsi b/arch/arm64/boot/dts/freescale/imx8mp.dtsi
index ecccfbb4f5ad..c7523fd4eae9 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp.dtsi
@@ -218,10 +218,12 @@
};
soc@0 {
- compatible = "simple-bus";
+ compatible = "fsl,imx8mp-soc", "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x0 0x0 0x0 0x3e000000>;
+ nvmem-cells = <&imx8mp_uid>;
+ nvmem-cell-names = "soc_unique_id";
aips1: bus@30000000 {
compatible = "fsl,aips-bus", "simple-bus";
@@ -266,7 +268,7 @@
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
- gpio-ranges = <&iomuxc 0 56 26>, <&iomuxc 0 144 4>;
+ gpio-ranges = <&iomuxc 0 56 26>, <&iomuxc 26 144 4>;
};
gpio4: gpio@30230000 {
@@ -328,9 +330,17 @@
#address-cells = <1>;
#size-cells = <1>;
+ imx8mp_uid: unique-id@420 {
+ reg = <0x8 0x8>;
+ };
+
cpu_speed_grade: speed-grade@10 {
reg = <0x10 4>;
};
+
+ eth_mac1: mac-address@90 {
+ reg = <0x90 6>;
+ };
};
anatop: anatop@30360000 {
@@ -762,13 +772,18 @@
assigned-clocks = <&clk IMX8MP_CLK_ENET_AXI>,
<&clk IMX8MP_CLK_ENET_TIMER>,
<&clk IMX8MP_CLK_ENET_REF>,
- <&clk IMX8MP_CLK_ENET_TIMER>;
+ <&clk IMX8MP_CLK_ENET_PHY_REF>;
assigned-clock-parents = <&clk IMX8MP_SYS_PLL1_266M>,
<&clk IMX8MP_SYS_PLL2_100M>,
- <&clk IMX8MP_SYS_PLL2_125M>;
- assigned-clock-rates = <0>, <0>, <125000000>, <100000000>;
+ <&clk IMX8MP_SYS_PLL2_125M>,
+ <&clk IMX8MP_SYS_PLL2_50M>;
+ assigned-clock-rates = <0>, <100000000>, <125000000>, <0>;
fsl,num-tx-queues = <3>;
fsl,num-rx-queues = <3>;
+ nvmem-cells = <&eth_mac1>;
+ nvmem-cell-names = "mac-address";
+ fsl,stop-mode = <&gpr 0x10 3>;
+ nvmem_macaddr_swap;
status = "disabled";
};
};
@@ -788,5 +803,87 @@
reg = <0x3d800000 0x400000>;
interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>;
};
+
+ usb3_phy0: usb-phy@381f0040 {
+ compatible = "fsl,imx8mp-usb-phy";
+ reg = <0x381f0040 0x40>;
+ clocks = <&clk IMX8MP_CLK_USB_PHY_ROOT>;
+ clock-names = "phy";
+ assigned-clocks = <&clk IMX8MP_CLK_USB_PHY_REF>;
+ assigned-clock-parents = <&clk IMX8MP_CLK_24M>;
+ #phy-cells = <0>;
+ status = "disabled";
+ };
+
+ usb3_0: usb@32f10100 {
+ compatible = "fsl,imx8mp-dwc3";
+ reg = <0x32f10100 0x8>;
+ clocks = <&clk IMX8MP_CLK_HSIO_ROOT>,
+ <&clk IMX8MP_CLK_USB_ROOT>;
+ clock-names = "hsio", "suspend";
+ interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ dma-ranges = <0x40000000 0x40000000 0xc0000000>;
+ ranges;
+ status = "disabled";
+
+ usb_dwc3_0: dwc3@38100000 {
+ compatible = "snps,dwc3";
+ reg = <0x38100000 0x10000>;
+ clocks = <&clk IMX8MP_CLK_HSIO_AXI>,
+ <&clk IMX8MP_CLK_USB_CORE_REF>,
+ <&clk IMX8MP_CLK_USB_ROOT>;
+ clock-names = "bus_early", "ref", "suspend";
+ assigned-clocks = <&clk IMX8MP_CLK_HSIO_AXI>;
+ assigned-clock-parents = <&clk IMX8MP_SYS_PLL2_500M>;
+ assigned-clock-rates = <500000000>;
+ interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>;
+ phys = <&usb3_phy0>, <&usb3_phy0>;
+ phy-names = "usb2-phy", "usb3-phy";
+ snps,dis-u2-freeclk-exists-quirk;
+ };
+
+ };
+
+ usb3_phy1: usb-phy@382f0040 {
+ compatible = "fsl,imx8mp-usb-phy";
+ reg = <0x382f0040 0x40>;
+ clocks = <&clk IMX8MP_CLK_USB_PHY_ROOT>;
+ clock-names = "phy";
+ assigned-clocks = <&clk IMX8MP_CLK_USB_PHY_REF>;
+ assigned-clock-parents = <&clk IMX8MP_CLK_24M>;
+ #phy-cells = <0>;
+ };
+
+ usb3_1: usb@32f10108 {
+ compatible = "fsl,imx8mp-dwc3";
+ reg = <0x32f10108 0x8>;
+ clocks = <&clk IMX8MP_CLK_HSIO_ROOT>,
+ <&clk IMX8MP_CLK_USB_ROOT>;
+ clock-names = "hsio", "suspend";
+ interrupts = <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ dma-ranges = <0x40000000 0x40000000 0xc0000000>;
+ ranges;
+ status = "disabled";
+
+ usb_dwc3_1: dwc3@38200000 {
+ compatible = "snps,dwc3";
+ reg = <0x38200000 0x10000>;
+ clocks = <&clk IMX8MP_CLK_HSIO_AXI>,
+ <&clk IMX8MP_CLK_USB_CORE_REF>,
+ <&clk IMX8MP_CLK_USB_ROOT>;
+ clock-names = "bus_early", "ref", "suspend";
+ assigned-clocks = <&clk IMX8MP_CLK_HSIO_AXI>;
+ assigned-clock-parents = <&clk IMX8MP_SYS_PLL2_500M>;
+ assigned-clock-rates = <500000000>;
+ interrupts = <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>;
+ phys = <&usb3_phy1>, <&usb3_phy1>;
+ phy-names = "usb2-phy", "usb3-phy";
+ snps,dis-u2-freeclk-exists-quirk;
+ };
+ };
};
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-librem5-devkit.dts b/arch/arm64/boot/dts/freescale/imx8mq-librem5-devkit.dts
index af139b283daf..dd217a0760e9 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq-librem5-devkit.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mq-librem5-devkit.dts
@@ -244,11 +244,6 @@
cpu-supply = <&buck2_reg>;
};
-&clk {
- assigned-clocks = <&clk IMX8MQ_AUDIO_PLL1>, <&clk IMX8MQ_AUDIO_PLL2>;
- assigned-clock-rates = <786432000>, <722534400>;
-};
-
&dphy {
status = "okay";
};
@@ -298,6 +293,7 @@
regulator-min-microvolt = <700000>;
regulator-max-microvolt = <1300000>;
regulator-boot-on;
+ regulator-always-on;
regulator-ramp-delay = <1250>;
rohm,dvs-run-voltage = <900000>;
rohm,dvs-idle-voltage = <850000>;
@@ -319,6 +315,7 @@
regulator-min-microvolt = <700000>;
regulator-max-microvolt = <1300000>;
regulator-boot-on;
+ regulator-enable-ramp-delay = <200>;
rohm,dvs-run-voltage = <900000>;
};
@@ -334,6 +331,7 @@
regulator-min-microvolt = <700000>;
regulator-max-microvolt = <1350000>;
regulator-boot-on;
+ regulator-always-on;
};
buck6_reg: BUCK6 {
@@ -341,6 +339,7 @@
regulator-min-microvolt = <3000000>;
regulator-max-microvolt = <3300000>;
regulator-boot-on;
+ regulator-always-on;
};
buck7_reg: BUCK7 {
@@ -348,6 +347,7 @@
regulator-min-microvolt = <1605000>;
regulator-max-microvolt = <1995000>;
regulator-boot-on;
+ regulator-always-on;
};
buck8_reg: BUCK8 {
@@ -355,6 +355,7 @@
regulator-min-microvolt = <800000>;
regulator-max-microvolt = <1400000>;
regulator-boot-on;
+ regulator-always-on;
};
ldo1_reg: LDO1 {
@@ -380,6 +381,7 @@
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <3300000>;
regulator-boot-on;
+ regulator-always-on;
};
ldo4_reg: LDO4 {
@@ -387,12 +389,14 @@
regulator-min-microvolt = <900000>;
regulator-max-microvolt = <1800000>;
regulator-boot-on;
+ regulator-always-on;
};
ldo5_reg: LDO5 {
regulator-name = "ldo5";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <3300000>;
+ regulator-always-on;
};
ldo6_reg: LDO6 {
@@ -400,6 +404,7 @@
regulator-min-microvolt = <900000>;
regulator-max-microvolt = <1800000>;
regulator-boot-on;
+ regulator-always-on;
};
ldo7_reg: LDO7 {
@@ -407,6 +412,7 @@
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <3300000>;
regulator-boot-on;
+ regulator-always-on;
};
};
};
@@ -886,6 +892,10 @@
status = "okay";
};
+&snvs_rtc {
+ status = "disabled";
+};
+
&sai2 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_sai2>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-librem5-r3.dts b/arch/arm64/boot/dts/freescale/imx8mq-librem5-r3.dts
index 6704ea2c72a3..0d38327043f8 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq-librem5-r3.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mq-librem5-r3.dts
@@ -10,6 +10,12 @@
compatible = "purism,librem5r3", "purism,librem5", "fsl,imx8mq";
};
+&a53_opp_table {
+ opp-1000000000 {
+ opp-microvolt = <1000000>;
+ };
+};
+
&accel_gyro {
mount-matrix = "1", "0", "0",
"0", "1", "0",
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-librem5-r4.dts b/arch/arm64/boot/dts/freescale/imx8mq-librem5-r4.dts
new file mode 100644
index 000000000000..cbfb49aa2563
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mq-librem5-r4.dts
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+// Copyright (C) 2020 Purism SPC <kernel@puri.sm>
+
+/dts-v1/;
+
+#include "imx8mq-librem5.dtsi"
+
+/ {
+ model = "Purism Librem 5r4";
+ compatible = "purism,librem5r4", "purism,librem5", "fsl,imx8mq";
+};
+
+&accel_gyro {
+ mount-matrix = "1", "0", "0",
+ "0", "1", "0",
+ "0", "0", "-1";
+};
+
+&bat {
+ maxim,rsns-microohm = <1667>;
+};
+
+&bq25895 {
+ ti,battery-regulation-voltage = <4200000>; /* uV */
+ ti,charge-current = <1500000>; /* uA */
+ ti,termination-current = <144000>; /* uA */
+};
+
+&led_backlight {
+ led-max-microamp = <25000>;
+};
+
+&proximity {
+ proximity-near-level = <10>;
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-librem5.dtsi b/arch/arm64/boot/dts/freescale/imx8mq-librem5.dtsi
index 64fc546b110f..06a4799b6aeb 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq-librem5.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mq-librem5.dtsi
@@ -82,6 +82,20 @@
enable-active-high;
};
+ reg_lcd_1v8: regulator-lcd-1v8 {
+ compatible = "regulator-fixed";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_dsien>;
+ regulator-name = "LCD_1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&reg_vdd_1v8>;
+ gpio = <&gpio1 5 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ /* Otherwise i2c3 is not functional */
+ regulator-always-on;
+ };
+
reg_lcd_3v4: regulator-lcd-3v4 {
compatible = "regulator-fixed";
regulator-name = "LCD_3V4";
@@ -99,6 +113,14 @@
regulator-max-microvolt = <3300000>;
};
+ reg_vdd_1v8: regulator-vdd-1v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "VDD_1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&buck7_reg>;
+ };
+
reg_vdd_3v3: regulator-vdd-3v3 {
compatible = "regulator-fixed";
regulator-name = "VDD_3V3";
@@ -106,13 +128,6 @@
regulator-max-microvolt = <3300000>;
};
- reg_vdd_1v8: regulator-vdd-1v8 {
- compatible = "regulator-fixed";
- regulator-name = "VCC_1V8";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- };
-
reg_vsys_3v4: regulator-vsys-3v4 {
compatible = "regulator-fixed";
regulator-name = "VSYS_3V4";
@@ -275,7 +290,7 @@
pinctrl_charger_in: chargeringrp {
fsl,pins = <
/* CHRG_INT */
- MX8MQ_IOMUXC_NAND_CE2_B_GPIO3_IO3 0x00
+ MX8MQ_IOMUXC_NAND_CE2_B_GPIO3_IO3 0x80
/* CHG_STATUS_B */
MX8MQ_IOMUXC_NAND_ALE_GPIO3_IO0 0x80
>;
@@ -295,6 +310,17 @@
>;
};
+ pinctrl_dsirst: dsirstgrp {
+ fsl,pins = <
+ /* DSI_RST */
+ MX8MQ_IOMUXC_ENET_RD3_GPIO1_IO29 0x83
+ /* DSI_TE */
+ MX8MQ_IOMUXC_ENET_RD2_GPIO1_IO28 0x83
+ /* TP_RST */
+ MX8MQ_IOMUXC_ENET_RX_CTL_GPIO1_IO24 0x83
+ >;
+ };
+
pinctrl_ecspi1: ecspigrp {
fsl,pins = <
MX8MQ_IOMUXC_ECSPI1_MOSI_ECSPI1_MOSI 0x83
@@ -458,6 +484,13 @@
>;
};
+ pinctrl_touch: touchgrp {
+ fsl,pins = <
+ /* TP_INT */
+ MX8MQ_IOMUXC_ENET_RD1_GPIO1_IO27 0x80
+ >;
+ };
+
pinctrl_typec: typecgrp {
fsl,pins = <
/* TYPEC_MUX_EN */
@@ -649,6 +682,7 @@
regulator-name = "buck1";
regulator-min-microvolt = <700000>;
regulator-max-microvolt = <1300000>;
+ regulator-boot-on;
regulator-ramp-delay = <1250>;
rohm,dvs-run-voltage = <900000>;
rohm,dvs-idle-voltage = <850000>;
@@ -660,6 +694,7 @@
regulator-name = "buck2";
regulator-min-microvolt = <700000>;
regulator-max-microvolt = <1300000>;
+ regulator-boot-on;
regulator-ramp-delay = <1250>;
rohm,dvs-run-voltage = <1000000>;
rohm,dvs-idle-voltage = <900000>;
@@ -670,8 +705,8 @@
regulator-name = "buck3";
regulator-min-microvolt = <700000>;
regulator-max-microvolt = <1300000>;
+ regulator-boot-on;
rohm,dvs-run-voltage = <900000>;
- regulator-always-on;
};
buck4_reg: BUCK4 {
@@ -685,6 +720,7 @@
regulator-name = "buck5";
regulator-min-microvolt = <700000>;
regulator-max-microvolt = <1350000>;
+ regulator-boot-on;
regulator-always-on;
};
@@ -692,6 +728,7 @@
regulator-name = "buck6";
regulator-min-microvolt = <3000000>;
regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
regulator-always-on;
};
@@ -699,6 +736,7 @@
regulator-name = "buck7";
regulator-min-microvolt = <1605000>;
regulator-max-microvolt = <1995000>;
+ regulator-boot-on;
regulator-always-on;
};
@@ -706,6 +744,7 @@
regulator-name = "buck8";
regulator-min-microvolt = <800000>;
regulator-max-microvolt = <1400000>;
+ regulator-boot-on;
regulator-always-on;
};
@@ -713,6 +752,7 @@
regulator-name = "ldo1";
regulator-min-microvolt = <3000000>;
regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
/* leave on for snvs power button */
regulator-always-on;
};
@@ -721,6 +761,7 @@
regulator-name = "ldo2";
regulator-min-microvolt = <900000>;
regulator-max-microvolt = <900000>;
+ regulator-boot-on;
/* leave on for snvs power button */
regulator-always-on;
};
@@ -729,6 +770,7 @@
regulator-name = "ldo3";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
regulator-always-on;
};
@@ -736,6 +778,7 @@
regulator-name = "ldo4";
regulator-min-microvolt = <900000>;
regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
regulator-always-on;
};
@@ -752,6 +795,7 @@
regulator-name = "ldo6";
regulator-min-microvolt = <900000>;
regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
regulator-always-on;
};
@@ -760,6 +804,7 @@
regulator-name = "ldo7";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
regulator-always-on;
};
};
@@ -796,12 +841,12 @@
compatible = "tps65132";
reg = <0x3e>;
- outp {
+ reg_lcd_avdd: outp {
regulator-name = "LCD_AVDD";
vin-supply = <&reg_lcd_3v4>;
};
- outn {
+ reg_lcd_avee: outn {
regulator-name = "LCD_AVEE";
vin-supply = <&reg_lcd_3v4>;
};
@@ -879,10 +924,13 @@
touchscreen@38 {
compatible = "edt,edt-ft5506";
reg = <0x38>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_touch>;
interrupt-parent = <&gpio1>;
interrupts = <27 IRQ_TYPE_EDGE_FALLING>;
touchscreen-size-x = <720>;
touchscreen-size-y = <1440>;
+ vcc-supply = <&reg_lcd_1v8>;
};
};
@@ -919,6 +967,45 @@
ti,use-vinmin-threshold = <1>; /* enable VINDPM */
ti,vinmin-threshold = <3900000>; /* uV */
monitored-battery = <&bat>;
+ power-supplies = <&typec_pd>;
+ };
+};
+
+&lcdif {
+ status = "okay";
+};
+
+&mipi_dsi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+
+ lcd_panel: panel@0 {
+ compatible = "mantix,mlaf057we51-x";
+ reg = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_dsirst>;
+ avdd-supply = <&reg_lcd_avdd>;
+ avee-supply = <&reg_lcd_avee>;
+ vddi-supply = <&reg_lcd_1v8>;
+ backlight = <&backlight_dsi>;
+ reset-gpios = <&gpio1 29 GPIO_ACTIVE_LOW>;
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&mipi_dsi_out>;
+ };
+ };
+ };
+
+ ports {
+ port@1 {
+ reg = <1>;
+
+ mipi_dsi_out: endpoint {
+ remote-endpoint = <&panel_in>;
+ };
+ };
};
};
@@ -1072,6 +1159,8 @@
};
&usdhc1 {
+ assigned-clocks = <&clk IMX8MQ_CLK_USDHC1>;
+ assigned-clock-rates = <400000000>;
pinctrl-names = "default", "state_100mhz", "state_200mhz";
pinctrl-0 = <&pinctrl_usdhc1>;
pinctrl-1 = <&pinctrl_usdhc1_100mhz>;
@@ -1084,6 +1173,8 @@
};
&usdhc2 {
+ assigned-clocks = <&clk IMX8MQ_CLK_USDHC2>;
+ assigned-clock-rates = <200000000>;
pinctrl-names = "default", "state_100mhz", "state_200mhz";
pinctrl-0 = <&pinctrl_usdhc2>;
pinctrl-1 = <&pinctrl_usdhc2_100mhz>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra-rmb3.dts b/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra-rmb3.dts
index bfad4b885905..631e01c1b9fd 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra-rmb3.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra-rmb3.dts
@@ -10,6 +10,56 @@
/ {
model = "ZII Ultra RMB3 Board";
compatible = "zii,imx8mq-ultra-rmb3", "zii,imx8mq-ultra", "fsl,imx8mq";
+
+ sound1 {
+ compatible = "simple-audio-card";
+ simple-audio-card,name = "front";
+ simple-audio-card,format = "i2s";
+ simple-audio-card,bitclock-master = <&sound1_codec>;
+ simple-audio-card,frame-master = <&sound1_codec>;
+ simple-audio-card,widgets =
+ "Headphone", "Headphone Jack Front";
+ simple-audio-card,routing =
+ "Headphone Jack Front", "HPA1 HPLEFT",
+ "Headphone Jack Front", "HPA1 HPRIGHT",
+ "HPA1 LEFTIN", "HPL",
+ "HPA1 RIGHTIN", "HPR";
+ simple-audio-card,aux-devs = <&hpa1>;
+
+ sound1_cpu: simple-audio-card,cpu {
+ sound-dai = <&sai2>;
+ };
+
+ sound1_codec: simple-audio-card,codec {
+ sound-dai = <&codec1>;
+ clocks = <&cs2000>;
+ };
+ };
+
+ sound2 {
+ compatible = "simple-audio-card";
+ simple-audio-card,name = "periph";
+ simple-audio-card,format = "i2s";
+ simple-audio-card,bitclock-master = <&sound2_codec>;
+ simple-audio-card,frame-master = <&sound2_codec>;
+ simple-audio-card,widgets =
+ "Headphone", "Headphone Jack Back";
+ simple-audio-card,routing =
+ "Headphone Jack Back", "HPA1 HPLEFT",
+ "Headphone Jack Back", "HPA1 HPRIGHT",
+ "HPA1 LEFTIN", "HPL",
+ "HPA1 RIGHTIN", "HPR";
+ simple-audio-card,aux-devs = <&hpa2>;
+
+ sound2_cpu: simple-audio-card,cpu {
+ sound-dai = <&sai3>;
+ };
+
+ sound2_codec: simple-audio-card,codec {
+ sound-dai = <&codec2>;
+ clocks = <&cs2000>;
+ };
+ };
};
&ecspi1 {
@@ -27,6 +77,27 @@
};
};
+&hpa2 {
+ sound-name-prefix = "HPA1";
+};
+
+&i2c1 {
+ codec2: codec@18 {
+ compatible = "ti,tlv320dac3100";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_codec2>;
+ reg = <0x18>;
+ #sound-dai-cells = <0>;
+ HPVDD-supply = <&reg_3p3v>;
+ SPRVDD-supply = <&reg_3p3v>;
+ SPLVDD-supply = <&reg_3p3v>;
+ AVDD-supply = <&reg_3p3v>;
+ IOVDD-supply = <&reg_3p3v>;
+ DVDD-supply = <&vgen4_reg>;
+ reset-gpios = <&gpio3 4 GPIO_ACTIVE_HIGH>;
+ };
+};
+
&i2c2 {
temp-sense@48 {
compatible = "national,lm75";
@@ -56,6 +127,8 @@
touchscreen-inverted-x;
touchscreen-swapped-x-y;
syna,sensor-type = <1>;
+ syna,delta-x-threshold = <5>;
+ syna,delta-y-threshold = <10>;
};
rmi4-f12@12 {
@@ -79,11 +152,23 @@
};
};
+&sai3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sai3>;
+ status = "okay";
+};
+
&usbhub {
swap-dx-lanes = <0>;
};
&iomuxc {
+ pinctrl_codec2: dac2grp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_NAND_CE3_B_GPIO3_IO4 0x41
+ >;
+ };
+
pinctrl_ecspi1: ecspi1grp {
fsl,pins = <
MX8MQ_IOMUXC_ECSPI1_SS0_GPIO5_IO9 0x19
@@ -92,4 +177,12 @@
MX8MQ_IOMUXC_ECSPI1_MOSI_ECSPI1_MOSI 0x82
>;
};
+
+ pinctrl_sai3: sai3grp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_SAI3_TXFS_SAI3_TX_SYNC 0xd6
+ MX8MQ_IOMUXC_SAI3_TXC_SAI3_TX_BCLK 0xd6
+ MX8MQ_IOMUXC_SAI3_TXD_SAI3_TX_DATA0 0xd6
+ >;
+ };
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra-zest.dts b/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra-zest.dts
index 173b9e9b2bbd..f6130167a1c7 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra-zest.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra-zest.dts
@@ -10,6 +10,36 @@
/ {
model = "ZII Ultra Zest Board";
compatible = "zii,imx8mq-ultra-zest", "zii,imx8mq-ultra", "fsl,imx8mq";
+
+ sound {
+ compatible = "simple-audio-card";
+ simple-audio-card,name = "front";
+ simple-audio-card,format = "i2s";
+ simple-audio-card,bitclock-master = <&sound_codec>;
+ simple-audio-card,frame-master = <&sound_codec>;
+ simple-audio-card,widgets =
+ "Headphone", "Headphone Jack Front",
+ "Headphone", "Headphone Jack Back";
+ simple-audio-card,routing =
+ "Headphone Jack Front", "HPA1 HPLEFT",
+ "Headphone Jack Front", "HPA1 HPRIGHT",
+ "Headphone Jack Back", "HPA2 HPLEFT",
+ "Headphone Jack Back", "HPA2 HPRIGHT",
+ "HPA1 LEFTIN", "HPL",
+ "HPA1 RIGHTIN", "HPR",
+ "HPA2 LEFTIN", "HPL",
+ "HPA2 RIGHTIN", "HPR";
+ simple-audio-card,aux-devs = <&hpa1>, <&hpa2>;
+
+ sound_cpu: simple-audio-card,cpu {
+ sound-dai = <&sai2>;
+ };
+
+ sound_codec: simple-audio-card,codec {
+ sound-dai = <&codec1>;
+ clocks = <&cs2000>;
+ };
+ };
};
&i2c4 {
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi b/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi
index fa7a041ffcfd..4dc8383478ee 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi
@@ -77,6 +77,15 @@
regulator-always-on;
};
+ reg_3p3v: regulator-3p3v {
+ compatible = "regulator-fixed";
+ vin-supply = <&reg_3p3_main>;
+ regulator-name = "GEN_3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
reg_usdhc2_vmmc: regulator-vsd-3v3 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_reg_usdhc2>;
@@ -102,6 +111,18 @@
900000 0x0>;
regulator-always-on;
};
+
+ cs2000_ref: cs2000-ref {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <24576000>;
+ };
+
+ cs2000_in_dummy: cs2000-in-dummy {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <0>;
+ };
};
&A53_0 {
@@ -283,9 +304,19 @@
reg = <0x32>;
interrupt-parent = <&gpio3>;
interrupts = <17 IRQ_TYPE_EDGE_BOTH>,
- <18 IRQ_TYPE_EDGE_BOTH>;
+ <18 IRQ_TYPE_EDGE_FALLING>;
interrupt-names = "a_det", "alert";
};
+
+ hpa2: amp@60 {
+ compatible = "ti,tpa6130a2";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_tpa2>;
+ reg = <0x60>;
+ power-gpio = <&gpio1 8 GPIO_ACTIVE_HIGH>;
+ Vdd-supply = <&reg_5p0_main>;
+ sound-name-prefix = "HPA2";
+ };
};
&i2c2 {
@@ -378,11 +409,36 @@
};
};
+ codec1: codec@18 {
+ compatible = "ti,tlv320dac3100";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_codec1>;
+ reg = <0x18>;
+ #sound-dai-cells = <0>;
+ HPVDD-supply = <&reg_3p3v>;
+ SPRVDD-supply = <&reg_3p3v>;
+ SPLVDD-supply = <&reg_3p3v>;
+ AVDD-supply = <&reg_3p3v>;
+ IOVDD-supply = <&reg_3p3v>;
+ DVDD-supply = <&vgen4_reg>;
+ reset-gpios = <&gpio3 3 GPIO_ACTIVE_LOW>;
+ };
+
eeprom@54 {
compatible = "atmel,24c128";
reg = <0x54>;
};
+ hpa1: amp@60 {
+ compatible = "ti,tpa6130a2";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_tpa1>;
+ reg = <0x60>;
+ power-gpio = <&gpio4 10 GPIO_ACTIVE_HIGH>;
+ Vdd-supply = <&reg_5p0_main>;
+ sound-name-prefix = "HPA1";
+ };
+
ds1341: rtc@68 {
compatible = "dallas,ds1341";
reg = <0x68>;
@@ -407,6 +463,16 @@
compatible = "zii,rave-wdt";
reg = <0x38>;
};
+
+ cs2000: clkgen@4e {
+ compatible = "cirrus,cs2000-cp";
+ reg = <0x4e>;
+ #clock-cells = <0>;
+ clock-names = "clk_in", "ref_clk";
+ clocks = <&cs2000_in_dummy>, <&cs2000_ref>;
+ assigned-clocks = <&cs2000>;
+ assigned-clock-rates = <24000000>;
+ };
};
&i2c4 {
@@ -416,6 +482,12 @@
status = "okay";
};
+&sai2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sai2>;
+ status = "okay";
+};
+
&uart1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart1>;
@@ -468,6 +540,7 @@
&usb_dwc3_0 {
dr_mode = "host";
+ maximum-speed = "high-speed";
status = "okay";
};
@@ -478,6 +551,7 @@
&usb_dwc3_1 {
dr_mode = "host";
+ maximum-speed = "high-speed";
status = "okay";
};
@@ -551,6 +625,12 @@
>;
};
+ pinctrl_codec1: dac1grp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_NAND_CE2_B_GPIO3_IO3 0x41
+ >;
+ };
+
pinctrl_fec1: fec1grp {
fsl,pins = <
MX8MQ_IOMUXC_ENET_MDC_ENET1_MDC 0x3
@@ -583,29 +663,29 @@
pinctrl_i2c1: i2c1grp {
fsl,pins = <
- MX8MQ_IOMUXC_I2C1_SCL_I2C1_SCL 0x4000007f
- MX8MQ_IOMUXC_I2C1_SDA_I2C1_SDA 0x4000007f
+ MX8MQ_IOMUXC_I2C1_SCL_I2C1_SCL 0x40000022
+ MX8MQ_IOMUXC_I2C1_SDA_I2C1_SDA 0x400000a2
>;
};
pinctrl_i2c2: i2c2grp {
fsl,pins = <
- MX8MQ_IOMUXC_I2C2_SCL_I2C2_SCL 0x4000007f
- MX8MQ_IOMUXC_I2C2_SDA_I2C2_SDA 0x4000007f
+ MX8MQ_IOMUXC_I2C2_SCL_I2C2_SCL 0x40000022
+ MX8MQ_IOMUXC_I2C2_SDA_I2C2_SDA 0x400000a2
>;
};
pinctrl_i2c3: i2c3grp {
fsl,pins = <
- MX8MQ_IOMUXC_I2C3_SCL_I2C3_SCL 0x4000007f
- MX8MQ_IOMUXC_I2C3_SDA_I2C3_SDA 0x4000007f
+ MX8MQ_IOMUXC_I2C3_SCL_I2C3_SCL 0x40000022
+ MX8MQ_IOMUXC_I2C3_SDA_I2C3_SDA 0x400000a2
>;
};
pinctrl_i2c4: i2c4grp {
fsl,pins = <
- MX8MQ_IOMUXC_I2C4_SCL_I2C4_SCL 0x4000007f
- MX8MQ_IOMUXC_I2C4_SDA_I2C4_SDA 0x4000007f
+ MX8MQ_IOMUXC_I2C4_SCL_I2C4_SCL 0x40000022
+ MX8MQ_IOMUXC_I2C4_SDA_I2C4_SDA 0x400000a2
>;
};
@@ -642,12 +722,32 @@
>;
};
+ pinctrl_sai2: sai2grp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_SAI2_TXFS_SAI2_TX_SYNC 0xd6
+ MX8MQ_IOMUXC_SAI2_TXC_SAI2_TX_BCLK 0xd6
+ MX8MQ_IOMUXC_SAI2_TXD0_SAI2_TX_DATA0 0xd6
+ >;
+ };
+
pinctrl_switch_irq: switchgrp {
fsl,pins = <
MX8MQ_IOMUXC_GPIO1_IO15_GPIO1_IO15 0x41
>;
};
+ pinctrl_tpa1: tpa6130-1grp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_SAI1_TXFS_GPIO4_IO10 0x41
+ >;
+ };
+
+ pinctrl_tpa2: tpa6130-2grp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_GPIO1_IO08_GPIO1_IO8 0x41
+ >;
+ };
+
pinctrl_ts: tsgrp {
fsl,pins = <
MX8MQ_IOMUXC_GPIO1_IO11_GPIO1_IO11 0x96
diff --git a/arch/arm64/boot/dts/freescale/imx8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
index a841a023e8e0..17c449e12c2e 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
@@ -11,6 +11,7 @@
#include "dt-bindings/input/input.h"
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/thermal/thermal.h>
+#include <dt-bindings/interconnect/imx8mq.h>
#include "imx8mq-pinfunc.h"
/ {
@@ -286,11 +287,13 @@
};
soc@0 {
- compatible = "simple-bus";
+ compatible = "fsl,imx8mq-soc", "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x0 0x0 0x0 0x3e000000>;
dma-ranges = <0x40000000 0x0 0x40000000 0xc0000000>;
+ nvmem-cells = <&imx8mq_uid>;
+ nvmem-cell-names = "soc_unique_id";
bus@30000000 { /* AIPS1 */
compatible = "fsl,aips-bus", "simple-bus";
@@ -522,6 +525,8 @@
<&clk IMX8MQ_VIDEO_PLL1>,
<&clk IMX8MQ_VIDEO_PLL1_OUT>;
assigned-clock-rates = <0>, <0>, <0>, <594000000>;
+ interconnects = <&noc IMX8MQ_ICM_LCDIF &noc IMX8MQ_ICS_DRAM>;
+ interconnect-names = "dram";
status = "disabled";
port@0 {
@@ -555,9 +560,17 @@
#address-cells = <1>;
#size-cells = <1>;
+ imx8mq_uid: soc-uid@410 {
+ reg = <0x4 0x8>;
+ };
+
cpu_speed_grade: speed-grade@10 {
reg = <0x10 4>;
};
+
+ fec_mac_address: mac-address@90 {
+ reg = <0x90 6>;
+ };
};
anatop: syscon@30360000 {
@@ -826,6 +839,8 @@
clocks = <&clk IMX8MQ_CLK_ECSPI1_ROOT>,
<&clk IMX8MQ_CLK_ECSPI1_ROOT>;
clock-names = "ipg", "per";
+ dmas = <&sdma1 0 7 1>, <&sdma1 1 7 2>;
+ dma-names = "rx", "tx";
status = "disabled";
};
@@ -838,6 +853,8 @@
clocks = <&clk IMX8MQ_CLK_ECSPI2_ROOT>,
<&clk IMX8MQ_CLK_ECSPI2_ROOT>;
clock-names = "ipg", "per";
+ dmas = <&sdma1 2 7 1>, <&sdma1 3 7 2>;
+ dma-names = "rx", "tx";
status = "disabled";
};
@@ -850,6 +867,8 @@
clocks = <&clk IMX8MQ_CLK_ECSPI3_ROOT>,
<&clk IMX8MQ_CLK_ECSPI3_ROOT>;
clock-names = "ipg", "per";
+ dmas = <&sdma1 4 7 1>, <&sdma1 5 7 2>;
+ dma-names = "rx", "tx";
status = "disabled";
};
@@ -1016,9 +1035,14 @@
reg = <0x30a00300 0x100>;
clocks = <&clk IMX8MQ_CLK_DSI_PHY_REF>;
clock-names = "phy_ref";
- assigned-clocks = <&clk IMX8MQ_CLK_DSI_PHY_REF>;
- assigned-clock-parents = <&clk IMX8MQ_VIDEO_PLL1_OUT>;
- assigned-clock-rates = <24000000>;
+ assigned-clocks = <&clk IMX8MQ_VIDEO_PLL1_REF_SEL>,
+ <&clk IMX8MQ_VIDEO_PLL1_BYPASS>,
+ <&clk IMX8MQ_CLK_DSI_PHY_REF>,
+ <&clk IMX8MQ_VIDEO_PLL1>;
+ assigned-clock-parents = <&clk IMX8MQ_CLK_25M>,
+ <&clk IMX8MQ_VIDEO_PLL1>,
+ <&clk IMX8MQ_VIDEO_PLL1_OUT>;
+ assigned-clock-rates = <0>, <0>, <24000000>, <594000000>;
#phy-cells = <0>;
power-domains = <&pgc_mipi>;
status = "disabled";
@@ -1152,12 +1176,50 @@
<&clk IMX8MQ_CLK_ENET_PHY_REF>;
clock-names = "ipg", "ahb", "ptp",
"enet_clk_ref", "enet_out";
+ assigned-clocks = <&clk IMX8MQ_CLK_ENET_AXI>,
+ <&clk IMX8MQ_CLK_ENET_TIMER>,
+ <&clk IMX8MQ_CLK_ENET_REF>,
+ <&clk IMX8MQ_CLK_ENET_PHY_REF>;
+ assigned-clock-parents = <&clk IMX8MQ_SYS1_PLL_266M>,
+ <&clk IMX8MQ_SYS2_PLL_100M>,
+ <&clk IMX8MQ_SYS2_PLL_125M>,
+ <&clk IMX8MQ_SYS2_PLL_50M>;
+ assigned-clock-rates = <0>, <100000000>, <125000000>, <0>;
fsl,num-tx-queues = <3>;
fsl,num-rx-queues = <3>;
+ nvmem-cells = <&fec_mac_address>;
+ nvmem-cell-names = "mac-address";
+ nvmem_macaddr_swap;
+ fsl,stop-mode = <&iomuxc_gpr 0x10 3>;
status = "disabled";
};
};
+ noc: interconnect@32700000 {
+ compatible = "fsl,imx8mq-noc", "fsl,imx8m-noc";
+ reg = <0x32700000 0x100000>;
+ clocks = <&clk IMX8MQ_CLK_NOC>;
+ fsl,ddrc = <&ddrc>;
+ #interconnect-cells = <1>;
+ operating-points-v2 = <&noc_opp_table>;
+
+ noc_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ opp-133M {
+ opp-hz = /bits/ 64 <133333333>;
+ };
+
+ opp-400M {
+ opp-hz = /bits/ 64 <400000000>;
+ };
+
+ opp-800M {
+ opp-hz = /bits/ 64 <800000000>;
+ };
+ };
+ };
+
bus@32c00000 { /* AIPS4 */
compatible = "fsl,aips-bus", "simple-bus";
reg = <0x32c00000 0x400000>;
@@ -1315,6 +1377,7 @@
<0 0 0 3 &gic GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
<0 0 0 4 &gic GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>;
fsl,max-link-speed = <2>;
+ linux,pci-domain = <0>;
power-domains = <&pgc_pcie>;
resets = <&src IMX8MQ_RESET_PCIEPHY>,
<&src IMX8MQ_RESET_PCIE_CTRL_APPS_EN>,
@@ -1344,6 +1407,7 @@
<0 0 0 3 &gic GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>,
<0 0 0 4 &gic GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
fsl,max-link-speed = <2>;
+ linux,pci-domain = <1>;
power-domains = <&pgc_pcie>;
resets = <&src IMX8MQ_RESET_PCIEPHY2>,
<&src IMX8MQ_RESET_PCIE2_CTRL_APPS_EN>,
diff --git a/arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi b/arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi
index 4338db14c5da..ae1c2abaaf36 100644
--- a/arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi
+++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi
@@ -6,6 +6,8 @@
*
*/
+#include <dt-bindings/clock/fsl,qoriq-clockgen.h>
+
fman0: fman@1a00000 {
#address-cells = <1>;
#size-cells = <1>;
@@ -15,7 +17,7 @@ fman0: fman@1a00000 {
reg = <0x0 0x1a00000 0x0 0xfe000>;
interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 3 0>;
+ clocks = <&clockgen QORIQ_CLK_FMAN 0>;
clock-names = "fmanclk";
fsl,qman-channel-range = <0x800 0x10>;
ptimer-handle = <&ptp_timer0>;
@@ -81,6 +83,6 @@ ptp_timer0: ptp-timer@1afe000 {
compatible = "fsl,fman-ptp-timer";
reg = <0x0 0x1afe000 0x0 0x1000>;
interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 3 0>;
+ clocks = <&clockgen QORIQ_CLK_FMAN 0>;
fsl,extts-fifo;
};
diff --git a/arch/arm64/boot/dts/hisilicon/hi3660.dtsi b/arch/arm64/boot/dts/hisilicon/hi3660.dtsi
index 49c19c6879f9..cab89dc6f596 100644
--- a/arch/arm64/boot/dts/hisilicon/hi3660.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hi3660.dtsi
@@ -1113,7 +1113,7 @@
thermal-zones {
- cls0: cls0 {
+ cls0: cls0-thermal {
polling-delay = <1000>;
polling-delay-passive = <100>;
sustainable-power = <4500>;
@@ -1122,13 +1122,13 @@
thermal-sensors = <&tsensor 1>;
trips {
- threshold: trip-point@0 {
+ threshold: trip-point0 {
temperature = <65000>;
hysteresis = <1000>;
type = "passive";
};
- target: trip-point@1 {
+ target: trip-point1 {
temperature = <75000>;
hysteresis = <1000>;
type = "passive";
diff --git a/arch/arm64/boot/dts/hisilicon/hi3670.dtsi b/arch/arm64/boot/dts/hisilicon/hi3670.dtsi
index 85b0dfb35d6d..8830795c8efc 100644
--- a/arch/arm64/boot/dts/hisilicon/hi3670.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hi3670.dtsi
@@ -194,6 +194,12 @@
#clock-cells = <1>;
};
+ iomcu_rst: reset {
+ compatible = "hisilicon,hi3660-reset";
+ hisi,rst-syscon = <&iomcu>;
+ #reset-cells = <2>;
+ };
+
uart0: serial@fdf02000 {
compatible = "arm,pl011", "arm,primecell";
reg = <0x0 0xfdf02000 0x0 0x1000>;
@@ -708,5 +714,76 @@
card-detect-delay = <200>;
status = "disabled";
};
+
+ /* I2C */
+ i2c0: i2c@ffd71000 {
+ compatible = "snps,designware-i2c";
+ reg = <0x0 0xffd71000 0x0 0x1000>;
+ interrupts = <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clock-frequency = <400000>;
+ clocks = <&iomcu HI3670_CLK_GATE_I2C0>;
+ resets = <&iomcu_rst 0x20 3>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0_pmx_func &i2c0_cfg_func>;
+ status = "disabled";
+ };
+
+ i2c1: i2c@ffd72000 {
+ compatible = "snps,designware-i2c";
+ reg = <0x0 0xffd72000 0x0 0x1000>;
+ interrupts = <GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clock-frequency = <400000>;
+ clocks = <&iomcu HI3670_CLK_GATE_I2C1>;
+ resets = <&iomcu_rst 0x20 4>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c1_pmx_func &i2c1_cfg_func>;
+ status = "disabled";
+ };
+
+ i2c2: i2c@ffd73000 {
+ compatible = "snps,designware-i2c";
+ reg = <0x0 0xffd73000 0x0 0x1000>;
+ interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clock-frequency = <400000>;
+ clocks = <&iomcu HI3670_CLK_GATE_I2C2>;
+ resets = <&iomcu_rst 0x20 5>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c2_pmx_func &i2c2_cfg_func>;
+ status = "disabled";
+ };
+
+ i2c3: i2c@fdf0c000 {
+ compatible = "snps,designware-i2c";
+ reg = <0x0 0xfdf0c000 0x0 0x1000>;
+ interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clock-frequency = <400000>;
+ clocks = <&crg_ctrl HI3670_CLK_GATE_I2C3>;
+ resets = <&crg_rst 0x78 7>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c3_pmx_func &i2c3_cfg_func>;
+ status = "disabled";
+ };
+
+ i2c4: i2c@fdf0d000 {
+ compatible = "snps,designware-i2c";
+ reg = <0x0 0xfdf0d000 0x0 0x1000>;
+ interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clock-frequency = <400000>;
+ clocks = <&crg_ctrl HI3670_CLK_GATE_I2C4>;
+ resets = <&crg_rst 0x78 27>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c4_pmx_func &i2c4_cfg_func>;
+ status = "disabled";
+ };
};
};
diff --git a/arch/arm64/boot/dts/hisilicon/hi3798cv200.dtsi b/arch/arm64/boot/dts/hisilicon/hi3798cv200.dtsi
index 81d09434c5c6..a83b9d4f172e 100644
--- a/arch/arm64/boot/dts/hisilicon/hi3798cv200.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hi3798cv200.dtsi
@@ -113,7 +113,7 @@
#size-cells = <1>;
ranges = <0x0 0x8a20000 0x1000>;
- usb2_phy1: usb2-phy@120 {
+ usb2_phy1: usb2_phy@120 {
compatible = "hisilicon,hi3798cv200-usb2-phy";
reg = <0x120 0x4>;
clocks = <&crg HISTB_USB2_PHY1_REF_CLK>;
@@ -134,7 +134,7 @@
};
};
- usb2_phy2: usb2-phy@124 {
+ usb2_phy2: usb2_phy@124 {
compatible = "hisilicon,hi3798cv200-usb2-phy";
reg = <0x124 0x4>;
clocks = <&crg HISTB_USB2_PHY2_REF_CLK>;
@@ -565,8 +565,8 @@
device_type = "pci";
bus-range = <0x00 0xff>;
num-lanes = <1>;
- ranges = <0x81000000 0x0 0x00000000 0x4f00000 0x0 0x100000
- 0x82000000 0x0 0x3000000 0x3000000 0x0 0x01f00000>;
+ ranges = <0x81000000 0x0 0x00000000 0x4f00000 0x0 0x100000>,
+ <0x82000000 0x0 0x3000000 0x3000000 0x0 0x01f00000>;
interrupts = <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "msi";
#interrupt-cells = <1>;
diff --git a/arch/arm64/boot/dts/hisilicon/hi6220.dtsi b/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
index c6580c9f068e..d426c6c8722b 100644
--- a/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
@@ -872,7 +872,7 @@
thermal-zones {
- cls0: cls0 {
+ cls0: cls0-thermal {
polling-delay = <1000>;
polling-delay-passive = <100>;
sustainable-power = <3326>;
@@ -881,13 +881,13 @@
thermal-sensors = <&tsensor 2>;
trips {
- threshold: trip-point@0 {
+ threshold: trip-point0 {
temperature = <65000>;
hysteresis = <0>;
type = "passive";
};
- target: trip-point@1 {
+ target: trip-point1 {
temperature = <75000>;
hysteresis = <0>;
type = "passive";
@@ -1053,7 +1053,7 @@
"ppmmu3";
clocks = <&media_ctrl HI6220_G3D_CLK>,
<&media_ctrl HI6220_G3D_PCLK>;
- clock-names = "core", "bus";
+ clock-names = "bus", "core";
assigned-clocks = <&media_ctrl HI6220_G3D_CLK>,
<&media_ctrl HI6220_G3D_PCLK>;
assigned-clock-rates = <500000000>, <144000000>;
diff --git a/arch/arm64/boot/dts/hisilicon/hikey970-pinctrl.dtsi b/arch/arm64/boot/dts/hisilicon/hikey970-pinctrl.dtsi
index d456b0aa6f58..77bd8c3a8314 100644
--- a/arch/arm64/boot/dts/hisilicon/hikey970-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hikey970-pinctrl.dtsi
@@ -61,6 +61,153 @@
0x060 MUX_M1 /* UART6_TXD */
>;
};
+
+ i2c3_pmx_func: i2c3_pmx_func {
+ pinctrl-single,pins = <
+ 0x010 MUX_M1 /* I2C3_SCL */
+ 0x014 MUX_M1 /* I2C3_SDA */
+ >;
+ };
+
+ i2c4_pmx_func: i2c4_pmx_func {
+ pinctrl-single,pins = <
+ 0x03c MUX_M1 /* I2C4_SCL */
+ 0x040 MUX_M1 /* I2C4_SDA */
+ >;
+ };
+
+ cam0_rst_pmx_func: cam0_rst_pmx_func {
+ pinctrl-single,pins = <
+ 0x714 MUX_M0 /* CAM0_RST */
+ >;
+ };
+
+ cam1_rst_pmx_func: cam1_rst_pmx_func {
+ pinctrl-single,pins = <
+ 0x048 MUX_M0 /* CAM1_RST */
+ >;
+ };
+
+ cam0_pwd_n_pmx_func: cam0_pwd_n_pmx_func {
+ pinctrl-single,pins = <
+ 0x098 MUX_M0 /* CAM0_PWD_N */
+ >;
+ };
+
+ cam1_pwd_n_pmx_func: cam1_pwd_n_pmx_func {
+ pinctrl-single,pins = <
+ 0x044 MUX_M0 /* CAM1_PWD_N */
+ >;
+ };
+
+ isp0_pmx_func: isp0_pmx_func {
+ pinctrl-single,pins = <
+ 0x018 MUX_M1 /* ISP_CLK0 */
+ 0x024 MUX_M1 /* ISP_SCL0 */
+ 0x028 MUX_M1 /* ISP_SDA0 */
+ >;
+ };
+
+ isp1_pmx_func: isp1_pmx_func {
+ pinctrl-single,pins = <
+ 0x01c MUX_M1 /* ISP_CLK1 */
+ 0x02c MUX_M1 /* ISP_SCL1 */
+ 0x030 MUX_M1 /* ISP_SDA1 */
+ >;
+ };
+ };
+
+ pmx1: pinmux@fff11000 {
+ compatible = "pinctrl-single";
+ reg = <0x0 0xfff11000 0x0 0x73c>;
+ #gpio-range-cells = <0x3>;
+ #pinctrl-cells = <1>;
+ pinctrl-single,register-width = <0x20>;
+ pinctrl-single,function-mask = <0x7>;
+ /* pin base, nr pins & gpio function */
+ pinctrl-single,gpio-range = <&range 0 46 0>;
+
+ pwr_key_pmx_func: pwr_key_pmx_func {
+ pinctrl-single,pins = <
+ 0x064 MUX_M0 /* GPIO_203 */
+ >;
+ };
+
+ pd_pmx_func: pd_pmx_func{
+ pinctrl-single,pins = <
+ 0x080 MUX_M0 /* GPIO_221 */
+ >;
+ };
+
+ i2s2_pmx_func: i2s2_pmx_func {
+ pinctrl-single,pins = <
+ 0x050 MUX_M1 /* I2S2_DI */
+ 0x054 MUX_M1 /* I2S2_DO */
+ 0x058 MUX_M1 /* I2S2_XCLK */
+ 0x05c MUX_M1 /* I2S2_XFS */
+ >;
+ };
+
+ spi0_pmx_func: spi0_pmx_func {
+ pinctrl-single,pins = <
+ 0x094 MUX_M1 /* SPI0_CLK */
+ 0x098 MUX_M1 /* SPI0_DI */
+ 0x09c MUX_M1 /* SPI0_DO */
+ 0x0a0 MUX_M1 /* SPI0_CS0_N */
+ >;
+ };
+
+ spi2_pmx_func: spi2_pmx_func {
+ pinctrl-single,pins = <
+ 0x710 MUX_M1 /* SPI2_CLK */
+ 0x714 MUX_M1 /* SPI2_DI */
+ 0x718 MUX_M1 /* SPI2_DO */
+ 0x71c MUX_M1 /* SPI2_CS0_N */
+ >;
+ };
+
+ spi3_pmx_func: spi3_pmx_func {
+ pinctrl-single,pins = <
+ 0x72c MUX_M1 /* SPI3_CLK */
+ 0x730 MUX_M1 /* SPI3_DI */
+ 0x734 MUX_M1 /* SPI3_DO */
+ 0x738 MUX_M1 /* SPI3_CS0_N */
+ >;
+ };
+
+ i2c0_pmx_func: i2c0_pmx_func {
+ pinctrl-single,pins = <
+ 0x020 MUX_M1 /* I2C0_SCL */
+ 0x024 MUX_M1 /* I2C0_SDA */
+ >;
+ };
+
+ i2c1_pmx_func: i2c1_pmx_func {
+ pinctrl-single,pins = <
+ 0x028 MUX_M1 /* I2C1_SCL */
+ 0x02c MUX_M1 /* I2C1_SDA */
+ >;
+ };
+ i2c2_pmx_func: i2c2_pmx_func {
+ pinctrl-single,pins = <
+ 0x030 MUX_M1 /* I2C2_SCL */
+ 0x034 MUX_M1 /* I2C2_SDA */
+ >;
+ };
+
+ pcie_clkreq_pmx_func: pcie_clkreq_pmx_func {
+ pinctrl-single,pins = <
+ 0x084 MUX_M1 /* PCIE0_CLKREQ_N */
+ >;
+ };
+
+ gpio185_pmx_func: gpio185_pmx_func {
+ pinctrl-single,pins = <0x01C 0x1>;
+ };
+
+ gpio185_pmx_idle: gpio185_pmx_idle {
+ pinctrl-single,pins = <0x01C 0x0>;
+ };
};
pmx2: pinmux@e896c800 {
@@ -184,6 +331,180 @@
DRIVE7_02MA DRIVE6_MASK
>;
};
+
+ i2c3_cfg_func: i2c3_cfg_func {
+ pinctrl-single,pins = <
+ 0x014 0x0 /* I2C3_SCL */
+ 0x018 0x0 /* I2C3_SDA */
+ >;
+ pinctrl-single,bias-pulldown = <
+ PULL_DIS
+ PULL_DOWN
+ PULL_DIS
+ PULL_DOWN
+ >;
+ pinctrl-single,bias-pullup = <
+ PULL_DIS
+ PULL_UP
+ PULL_DIS
+ PULL_UP
+ >;
+ pinctrl-single,drive-strength = <
+ DRIVE7_04MA DRIVE6_MASK
+ >;
+ };
+
+ i2c4_cfg_func: i2c4_cfg_func {
+ pinctrl-single,pins = <
+ 0x040 0x0 /* I2C4_SCL */
+ 0x044 0x0 /* I2C4_SDA */
+ >;
+ pinctrl-single,bias-pulldown = <
+ PULL_DIS
+ PULL_DOWN
+ PULL_DIS
+ PULL_DOWN
+ >;
+ pinctrl-single,bias-pullup = <
+ PULL_DIS
+ PULL_UP
+ PULL_DIS
+ PULL_UP
+ >;
+ pinctrl-single,drive-strength = <
+ DRIVE7_04MA DRIVE6_MASK
+ >;
+ };
+
+ cam0_rst_cfg_func: cam0_rst_cfg_func {
+ pinctrl-single,pins = <
+ 0x714 0x0 /* CAM0_RST */
+ >;
+ pinctrl-single,bias-pulldown = <
+ PULL_DIS
+ PULL_DOWN
+ PULL_DIS
+ PULL_DOWN
+ >;
+ pinctrl-single,bias-pullup = <
+ PULL_DIS
+ PULL_UP
+ PULL_DIS
+ PULL_UP
+ >;
+ pinctrl-single,drive-strength = <
+ DRIVE7_04MA DRIVE6_MASK
+ >;
+ };
+
+ cam1_rst_cfg_func: cam1_rst_cfg_func {
+ pinctrl-single,pins = <
+ 0x04C 0x0 /* CAM1_RST */
+ >;
+ pinctrl-single,bias-pulldown = <
+ PULL_DIS
+ PULL_DOWN
+ PULL_DIS
+ PULL_DOWN
+ >;
+ pinctrl-single,bias-pullup = <
+ PULL_DIS
+ PULL_UP
+ PULL_DIS
+ PULL_UP
+ >;
+ pinctrl-single,drive-strength = <
+ DRIVE7_04MA DRIVE6_MASK
+ >;
+ };
+
+ cam0_pwd_n_cfg_func: cam0_pwd_n_cfg_func {
+ pinctrl-single,pins = <
+ 0x09C 0x0 /* CAM0_PWD_N */
+ >;
+ pinctrl-single,bias-pulldown = <
+ PULL_DIS
+ PULL_DOWN
+ PULL_DIS
+ PULL_DOWN
+ >;
+ pinctrl-single,bias-pullup = <
+ PULL_DIS
+ PULL_UP
+ PULL_DIS
+ PULL_UP
+ >;
+ pinctrl-single,drive-strength = <
+ DRIVE7_04MA DRIVE6_MASK
+ >;
+ };
+
+ cam1_pwd_n_cfg_func: cam1_pwd_n_cfg_func {
+ pinctrl-single,pins = <
+ 0x048 0x0 /* CAM1_PWD_N */
+ >;
+ pinctrl-single,bias-pulldown = <
+ PULL_DIS
+ PULL_DOWN
+ PULL_DIS
+ PULL_DOWN
+ >;
+ pinctrl-single,bias-pullup = <
+ PULL_DIS
+ PULL_UP
+ PULL_DIS
+ PULL_UP
+ >;
+ pinctrl-single,drive-strength = <
+ DRIVE7_04MA DRIVE6_MASK
+ >;
+ };
+
+ isp0_cfg_func: isp0_cfg_func {
+ pinctrl-single,pins = <
+ 0x01C 0x0 /* ISP_CLK0 */
+ 0x028 0x0 /* ISP_SCL0 */
+ 0x02C 0x0 /* ISP_SDA0 */
+ >;
+ pinctrl-single,bias-pulldown = <
+ PULL_DIS
+ PULL_DOWN
+ PULL_DIS
+ PULL_DOWN
+ >;
+ pinctrl-single,bias-pullup = <
+ PULL_DIS
+ PULL_UP
+ PULL_DIS
+ PULL_UP
+ >;
+ pinctrl-single,drive-strength = <
+ DRIVE7_04MA DRIVE6_MASK
+ >;
+ };
+
+ isp1_cfg_func: isp1_cfg_func {
+ pinctrl-single,pins = <
+ 0x020 0x0 /* ISP_CLK1 */
+ 0x030 0x0 /* ISP_SCL1 */
+ 0x034 0x0 /* ISP_SDA1 */
+ >;
+ pinctrl-single,bias-pulldown = <
+ PULL_DIS
+ PULL_DOWN
+ PULL_DIS
+ PULL_DOWN
+ >;
+ pinctrl-single,bias-pullup = <
+ PULL_DIS
+ PULL_UP
+ PULL_DIS
+ PULL_UP
+ >;
+ pinctrl-single,drive-strength = <
+ DRIVE7_04MA DRIVE6_MASK
+ >;
+ };
};
pmx5: pinmux@fc182000 {
@@ -338,22 +659,311 @@
};
};
- pmx1: pinmux@fff11000 {
- compatible = "pinctrl-single";
- reg = <0x0 0xfff11000 0x0 0x73c>;
- #gpio-range-cells = <0x3>;
- #pinctrl-cells = <1>;
- pinctrl-single,register-width = <0x20>;
- pinctrl-single,function-mask = <0x7>;
- /* pin base, nr pins & gpio function */
- pinctrl-single,gpio-range = <&range 0 46 0>;
- };
-
pmx16: pinmux@fff11800 {
compatible = "pinconf-single";
reg = <0x0 0xfff11800 0x0 0x73c>;
#pinctrl-cells = <1>;
pinctrl-single,register-width = <0x20>;
+
+ pwr_key_cfg_func: pwr_key_cfg_func {
+ pinctrl-single,pins = <
+ 0x090 0x0 /* GPIO_203 */
+ >;
+ pinctrl-single,bias-pulldown = <
+ PULL_DIS
+ PULL_DOWN
+ PULL_DIS
+ PULL_DOWN
+ >;
+ pinctrl-single,bias-pullup = <
+ PULL_UP
+ PULL_UP
+ PULL_DIS
+ PULL_UP
+ >;
+ pinctrl-single,drive-strength = <
+ DRIVE7_02MA DRIVE6_MASK
+ >;
+ };
+
+ usb_cfg_func: usb_cfg_func {
+ pinctrl-single,pins = <
+ 0x0AC 0x0 /* GPIO_221 */
+ >;
+ pinctrl-single,bias-pulldown = <
+ PULL_DIS
+ PULL_DOWN
+ PULL_DIS
+ PULL_DOWN
+ >;
+ pinctrl-single,bias-pullup = <
+ PULL_UP
+ PULL_UP
+ PULL_DIS
+ PULL_UP
+ >;
+ pinctrl-single,drive-strength = <
+ DRIVE7_02MA DRIVE6_MASK
+ >;
+ };
+
+ spi0_cfg_func: spi0_cfg_func {
+ pinctrl-single,pins = <
+ 0x0c8 0x0 /* SPI0_DI */
+ 0x0cc 0x0 /* SPI0_DO */
+ 0x0d0 0x0 /* SPI0_CS0_N */
+ >;
+ pinctrl-single,bias-pulldown = <
+ PULL_DIS
+ PULL_DOWN
+ PULL_DIS
+ PULL_DOWN
+ >;
+ pinctrl-single,bias-pullup = <
+ PULL_DIS
+ PULL_UP
+ PULL_DIS
+ PULL_UP
+ >;
+ pinctrl-single,drive-strength = <
+ DRIVE7_06MA DRIVE6_MASK
+ >;
+ };
+
+ spi2_cfg_func: spi2_cfg_func {
+ pinctrl-single,pins = <
+ 0x714 0x0 /* SPI2_DI */
+ 0x718 0x0 /* SPI2_DO */
+ 0x71c 0x0 /* SPI2_CS0_N */
+ >;
+ pinctrl-single,bias-pulldown = <
+ PULL_DIS
+ PULL_DOWN
+ PULL_DIS
+ PULL_DOWN
+ >;
+ pinctrl-single,bias-pullup = <
+ PULL_DIS
+ PULL_UP
+ PULL_DIS
+ PULL_UP
+ >;
+ pinctrl-single,drive-strength = <
+ DRIVE7_06MA DRIVE6_MASK
+ >;
+ };
+
+ spi3_cfg_func: spi3_cfg_func {
+ pinctrl-single,pins = <
+ 0x730 0x0 /* SPI3_DI */
+ 0x734 0x0 /* SPI3_DO */
+ 0x738 0x0 /* SPI3_CS0_N */
+ >;
+ pinctrl-single,bias-pulldown = <
+ PULL_DIS
+ PULL_DOWN
+ PULL_DIS
+ PULL_DOWN
+ >;
+ pinctrl-single,bias-pullup = <
+ PULL_DIS
+ PULL_UP
+ PULL_DIS
+ PULL_UP
+ >;
+ pinctrl-single,drive-strength = <
+ DRIVE7_06MA DRIVE6_MASK
+ >;
+ };
+
+ spi0_clk_cfg_func: spi0_clk_cfg_func {
+ pinctrl-single,pins = <
+ 0x0c4 0x0 /* SPI0_CLK */
+ >;
+ pinctrl-single,bias-pulldown = <
+ PULL_DIS
+ PULL_DOWN
+ PULL_DIS
+ PULL_DOWN
+ >;
+ pinctrl-single,bias-pullup = <
+ PULL_DIS
+ PULL_UP
+ PULL_DIS
+ PULL_UP
+ >;
+ pinctrl-single,drive-strength = <
+ DRIVE7_10MA DRIVE6_MASK
+ >;
+ };
+
+ spi2_clk_cfg_func: spi2_clk_cfg_func {
+ pinctrl-single,pins = <
+ 0x710 0x0 /* SPI2_CLK */
+ >;
+ pinctrl-single,bias-pulldown = <
+ PULL_DIS
+ PULL_DOWN
+ PULL_DIS
+ PULL_DOWN
+ >;
+ pinctrl-single,bias-pullup = <
+ PULL_DIS
+ PULL_UP
+ PULL_DIS
+ PULL_UP
+ >;
+ pinctrl-single,drive-strength = <
+ DRIVE7_10MA DRIVE6_MASK
+ >;
+ };
+
+ spi3_clk_cfg_func: spi3_clk_cfg_func {
+ pinctrl-single,pins = <
+ 0x72c 0x0 /* SPI3_CLK */
+ >;
+ pinctrl-single,bias-pulldown = <
+ PULL_DIS
+ PULL_DOWN
+ PULL_DIS
+ PULL_DOWN
+ >;
+ pinctrl-single,bias-pullup = <
+ PULL_DIS
+ PULL_UP
+ PULL_DIS
+ PULL_UP
+ >;
+ pinctrl-single,drive-strength = <
+ DRIVE7_10MA DRIVE6_MASK
+ >;
+ };
+
+ i2c0_cfg_func: i2c0_cfg_func {
+ pinctrl-single,pins = <
+ 0x04c 0x0 /* I2C0_SCL */
+ 0x050 0x0 /* I2C0_SDA */
+ >;
+ pinctrl-single,bias-pulldown = <
+ PULL_DIS
+ PULL_DOWN
+ PULL_DIS
+ PULL_DOWN
+ >;
+ pinctrl-single,bias-pullup = <
+ PULL_DIS
+ PULL_UP
+ PULL_DIS
+ PULL_UP
+ >;
+ pinctrl-single,drive-strength = <
+ DRIVE7_04MA DRIVE6_MASK
+ >;
+ };
+
+ i2c1_cfg_func: i2c1_cfg_func {
+ pinctrl-single,pins = <
+ 0x054 0x0 /* I2C1_SCL */
+ 0x058 0x0 /* I2C1_SDA */
+ >;
+ pinctrl-single,bias-pulldown = <
+ PULL_DIS
+ PULL_DOWN
+ PULL_DIS
+ PULL_DOWN
+ >;
+ pinctrl-single,bias-pullup = <
+ PULL_DIS
+ PULL_UP
+ PULL_DIS
+ PULL_UP
+ >;
+ pinctrl-single,drive-strength = <
+ DRIVE7_04MA DRIVE6_MASK
+ >;
+ };
+
+ i2c2_cfg_func: i2c2_cfg_func {
+ pinctrl-single,pins = <
+ 0x05c 0x0 /* I2C2_SCL */
+ 0x060 0x0 /* I2C2_SDA */
+ >;
+ pinctrl-single,bias-pulldown = <
+ PULL_DIS
+ PULL_DOWN
+ PULL_DIS
+ PULL_DOWN
+ >;
+ pinctrl-single,bias-pullup = <
+ PULL_DIS
+ PULL_UP
+ PULL_DIS
+ PULL_UP
+ >;
+ pinctrl-single,drive-strength = <
+ DRIVE7_04MA DRIVE6_MASK
+ >;
+ };
+
+ pcie_clkreq_cfg_func: pcie_clkreq_cfg_func {
+ pinctrl-single,pins = <
+ 0x0b0 0x0
+ >;
+ pinctrl-single,bias-pulldown = <
+ PULL_DIS
+ PULL_DOWN
+ PULL_DIS
+ PULL_DOWN
+ >;
+ pinctrl-single,bias-pullup = <
+ PULL_DIS
+ PULL_UP
+ PULL_DIS
+ PULL_UP
+ >;
+ pinctrl-single,drive-strength = <
+ DRIVE7_06MA DRIVE6_MASK
+ >;
+ };
+ i2s2_cfg_func: i2s2_cfg_func {
+ pinctrl-single,pins = <
+ 0x07c 0x0 /* I2S2_DI */
+ 0x080 0x0 /* I2S2_DO */
+ 0x084 0x0 /* I2S2_XCLK */
+ 0x088 0x0 /* I2S2_XFS */
+ >;
+ pinctrl-single,bias-pulldown = <
+ PULL_DIS
+ PULL_DOWN
+ PULL_DIS
+ PULL_DOWN
+ >;
+ pinctrl-single,bias-pullup = <
+ PULL_UP
+ PULL_UP
+ PULL_DIS
+ PULL_UP
+ >;
+ pinctrl-single,drive-strength = <
+ DRIVE7_02MA DRIVE6_MASK
+ >;
+ };
+
+ gpio185_cfg_func: gpio185_cfg_func {
+ pinctrl-single,pins = <0x048 0>;
+ pinctrl-single,bias-pulldown = <0 2 0 2>;
+ pinctrl-single,bias-pullup = <0 1 0 1>;
+ pinctrl-single,drive-strength = <0x00 0x70>;
+ pinctrl-single,slew-rate = <0x0 0x80>;
+ };
+
+ gpio185_cfg_idle: gpio185_cfg_idle {
+ pinctrl-single,pins = <0x048 0>;
+ pinctrl-single,bias-pulldown = <2 2 0 2>;
+ pinctrl-single,bias-pullup = <0 1 0 1>;
+ pinctrl-single,drive-strength = <0x00 0x70>;
+ pinctrl-single,slew-rate = <0x0 0x80>;
+ };
};
};
};
diff --git a/arch/arm64/boot/dts/hisilicon/hip05.dtsi b/arch/arm64/boot/dts/hisilicon/hip05.dtsi
index 405acaa3e9dd..4aed8d440b3a 100644
--- a/arch/arm64/boot/dts/hisilicon/hip05.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hip05.dtsi
@@ -318,7 +318,7 @@
status = "disabled";
};
- lbc: localbus@80380000 {
+ lbc: local-bus@80380000 {
compatible = "hisilicon,hisi-localbus", "simple-bus";
reg = <0x0 0x80380000 0x0 0x10000>;
status = "disabled";
diff --git a/arch/arm64/boot/dts/hisilicon/hip06.dtsi b/arch/arm64/boot/dts/hisilicon/hip06.dtsi
index 7980709e21ff..7deca5f763d5 100644
--- a/arch/arm64/boot/dts/hisilicon/hip06.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hip06.dtsi
@@ -335,7 +335,6 @@
reg = <0x0 0xa0040000 0x0 0x20000>;
#iommu-cells = <1>;
dma-coherent;
- smmu-cb-memtype = <0x0 0x1>;
hisilicon,broken-prefetch-cmd;
status = "disabled";
};
@@ -737,9 +736,8 @@
#size-cells = <2>;
device_type = "pci";
dma-coherent;
- ranges = <0x02000000 0 0xb2000000 0x0 0xb2000000 0
- 0x5ff0000 0x01000000 0 0 0 0xb7ff0000
- 0 0x10000>;
+ ranges = <0x02000000 0 0xb2000000 0x0 0xb2000000 0 0x5ff0000>,
+ <0x01000000 0 0 0 0xb7ff0000 0 0x10000>;
#interrupt-cells = <1>;
interrupt-map-mask = <0xf800 0 0 7>;
interrupt-map = <0x0 0 0 1 &mbigen_pcie0 650 4
diff --git a/arch/arm64/boot/dts/hisilicon/hip07.dtsi b/arch/arm64/boot/dts/hisilicon/hip07.dtsi
index 7832d9cdec21..2172d8071181 100644
--- a/arch/arm64/boot/dts/hisilicon/hip07.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hip07.dtsi
@@ -1166,7 +1166,6 @@
reg = <0x0 0xa0040000 0x0 0x20000>;
#iommu-cells = <1>;
dma-coherent;
- smmu-cb-memtype = <0x0 0x1>;
hisilicon,broken-prefetch-cmd;
status = "disabled";
};
@@ -1181,7 +1180,6 @@
#iommu-cells = <1>;
dma-coherent;
hisilicon,broken-prefetch-cmd;
- /* smmu-cb-memtype = <0x0 0x1>;*/
};
p0_smmu_alg_b: iommu@8d0040000 {
compatible = "arm,smmu-v3";
@@ -1194,7 +1192,6 @@
#iommu-cells = <1>;
dma-coherent;
hisilicon,broken-prefetch-cmd;
- /* smmu-cb-memtype = <0x0 0x1>;*/
};
p1_smmu_alg_a: iommu@400d0040000 {
compatible = "arm,smmu-v3";
@@ -1207,7 +1204,6 @@
#iommu-cells = <1>;
dma-coherent;
hisilicon,broken-prefetch-cmd;
- /* smmu-cb-memtype = <0x0 0x1>;*/
};
p1_smmu_alg_b: iommu@408d0040000 {
compatible = "arm,smmu-v3";
@@ -1220,7 +1216,6 @@
#iommu-cells = <1>;
dma-coherent;
hisilicon,broken-prefetch-cmd;
- /* smmu-cb-memtype = <0x0 0x1>;*/
};
soc {
@@ -1708,8 +1703,8 @@
#size-cells = <2>;
device_type = "pci";
dma-coherent;
- ranges = <0x02000000 0 0xa8000000 0 0xa8000000 0 0x77f0000
- 0x01000000 0 0 0 0xaf7f0000 0 0x10000>;
+ ranges = <0x02000000 0 0xa8000000 0 0xa8000000 0 0x77f0000>,
+ <0x01000000 0 0 0 0xaf7f0000 0 0x10000>;
#interrupt-cells = <1>;
interrupt-map-mask = <0xf800 0 0 7>;
interrupt-map = <0x0 0 0 1 &mbigen_pcie2_a 671 4
diff --git a/arch/arm64/boot/dts/intel/Makefile b/arch/arm64/boot/dts/intel/Makefile
index 296eceec4276..3a052540605b 100644
--- a/arch/arm64/boot/dts/intel/Makefile
+++ b/arch/arm64/boot/dts/intel/Makefile
@@ -2,3 +2,4 @@
dtb-$(CONFIG_ARCH_AGILEX) += socfpga_agilex_socdk.dtb \
socfpga_agilex_socdk_nand.dtb
dtb-$(CONFIG_ARCH_KEEMBAY) += keembay-evm.dtb
+dtb-$(CONFIG_ARCH_N5X) += socfpga_n5x_socdk.dtb
diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi b/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi
index e1c0fcba5c20..07c099b4ed5b 100644
--- a/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi
+++ b/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi
@@ -166,7 +166,7 @@
rx-fifo-depth = <16384>;
snps,multicast-filter-bins = <256>;
iommus = <&smmu 2>;
- altr,sysmgr-syscon = <&sysmgr 0x48 8>;
+ altr,sysmgr-syscon = <&sysmgr 0x48 0>;
clocks = <&clkmgr AGILEX_EMAC1_CLK>, <&clkmgr AGILEX_EMAC_PTP_CLK>;
clock-names = "stmmaceth", "ptp_ref";
status = "disabled";
@@ -184,7 +184,7 @@
rx-fifo-depth = <16384>;
snps,multicast-filter-bins = <256>;
iommus = <&smmu 3>;
- altr,sysmgr-syscon = <&sysmgr 0x4c 16>;
+ altr,sysmgr-syscon = <&sysmgr 0x4c 0>;
clocks = <&clkmgr AGILEX_EMAC2_CLK>, <&clkmgr AGILEX_EMAC_PTP_CLK>;
clock-names = "stmmaceth", "ptp_ref";
status = "disabled";
diff --git a/arch/arm64/boot/dts/intel/socfpga_n5x_socdk.dts b/arch/arm64/boot/dts/intel/socfpga_n5x_socdk.dts
new file mode 100644
index 000000000000..5f56e2697fee
--- /dev/null
+++ b/arch/arm64/boot/dts/intel/socfpga_n5x_socdk.dts
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021, Intel Corporation
+ */
+#include "socfpga_agilex.dtsi"
+
+/ {
+ model = "eASIC N5X SoCDK";
+
+ aliases {
+ serial0 = &uart0;
+ ethernet0 = &gmac0;
+ ethernet1 = &gmac1;
+ ethernet2 = &gmac2;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory {
+ device_type = "memory";
+ /* We expect the bootloader to fill in the reg */
+ reg = <0 0 0 0>;
+ };
+
+ soc {
+ clocks {
+ osc1 {
+ clock-frequency = <25000000>;
+ };
+ };
+ };
+};
+
+&clkmgr {
+ compatible = "intel,easic-n5x-clkmgr";
+};
+
+&mmc {
+ status = "okay";
+ cap-sd-highspeed;
+ broken-cd;
+ bus-width = <4>;
+};
+
+&uart0 {
+ status = "okay";
+};
+
+&watchdog0 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dtsi b/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dtsi
index daffe136c523..5fc613d24151 100644
--- a/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dtsi
@@ -67,8 +67,6 @@
/* J6 */
&sata {
status = "okay";
- phys = <&comphy2 0>;
- phy-names = "sata-phy";
};
/* U11 */
diff --git a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
index f5ec3b644769..d239ab70ed99 100644
--- a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
+++ b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
@@ -205,7 +205,7 @@
};
partition@20000 {
- label = "u-boot";
+ label = "a53-firmware";
reg = <0x20000 0x160000>;
};
diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
index d5b6c0a1c54a..7a2df148c6a3 100644
--- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
@@ -458,6 +458,8 @@
reg = <0xe0000 0x178>;
interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&nb_periph_clk 1>;
+ phys = <&comphy2 0>;
+ phy-names = "sata-phy";
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/marvell/armada-ap807.dtsi b/arch/arm64/boot/dts/marvell/armada-ap807.dtsi
index 623010f3ca89..d9bbbfa4b4eb 100644
--- a/arch/arm64/boot/dts/marvell/armada-ap807.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-ap807.dtsi
@@ -27,3 +27,8 @@
#clock-cells = <1>;
};
};
+
+&ap_sdhci0 {
+ compatible = "marvell,armada-ap807-sdhci";
+};
+
diff --git a/arch/arm64/boot/dts/marvell/armada-ap80x.dtsi b/arch/arm64/boot/dts/marvell/armada-ap80x.dtsi
index 12e477f1aeb9..6614472100c2 100644
--- a/arch/arm64/boot/dts/marvell/armada-ap80x.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-ap80x.dtsi
@@ -281,6 +281,9 @@
gpio-controller;
#gpio-cells = <2>;
gpio-ranges = <&ap_pinctrl 0 0 20>;
+ marvell,pwm-offset = <0x10c0>;
+ #pwm-cells = <2>;
+ clocks = <&ap_clk 3>;
};
};
diff --git a/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi b/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi
index 994a2fce449a..64179a372ecf 100644
--- a/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi
@@ -59,7 +59,7 @@
CP11X_LABEL(ethernet): ethernet@0 {
compatible = "marvell,armada-7k-pp22";
- reg = <0x0 0x100000>, <0x129000 0xb000>;
+ reg = <0x0 0x100000>, <0x129000 0xb000>, <0x220000 0x800>;
clocks = <&CP11X_LABEL(clk) 1 3>, <&CP11X_LABEL(clk) 1 9>,
<&CP11X_LABEL(clk) 1 5>, <&CP11X_LABEL(clk) 1 6>,
<&CP11X_LABEL(clk) 1 18>;
@@ -234,12 +234,17 @@
gpio-controller;
#gpio-cells = <2>;
gpio-ranges = <&CP11X_LABEL(pinctrl) 0 0 32>;
+ marvell,pwm-offset = <0x1f0>;
+ #pwm-cells = <2>;
interrupt-controller;
interrupts = <86 IRQ_TYPE_LEVEL_HIGH>,
<85 IRQ_TYPE_LEVEL_HIGH>,
<84 IRQ_TYPE_LEVEL_HIGH>,
<83 IRQ_TYPE_LEVEL_HIGH>;
#interrupt-cells = <2>;
+ clock-names = "core", "axi";
+ clocks = <&CP11X_LABEL(clk) 1 21>,
+ <&CP11X_LABEL(clk) 1 17>;
status = "disabled";
};
@@ -250,12 +255,17 @@
gpio-controller;
#gpio-cells = <2>;
gpio-ranges = <&CP11X_LABEL(pinctrl) 0 32 31>;
+ marvell,pwm-offset = <0x1f0>;
+ #pwm-cells = <2>;
interrupt-controller;
interrupts = <82 IRQ_TYPE_LEVEL_HIGH>,
<81 IRQ_TYPE_LEVEL_HIGH>,
<80 IRQ_TYPE_LEVEL_HIGH>,
<79 IRQ_TYPE_LEVEL_HIGH>;
#interrupt-cells = <2>;
+ clock-names = "core", "axi";
+ clocks = <&CP11X_LABEL(clk) 1 21>,
+ <&CP11X_LABEL(clk) 1 17>;
status = "disabled";
};
};
diff --git a/arch/arm64/boot/dts/marvell/cn9130-db.dts b/arch/arm64/boot/dts/marvell/cn9130-db.dts
index ce49a70d88a0..79020e6d2792 100644
--- a/arch/arm64/boot/dts/marvell/cn9130-db.dts
+++ b/arch/arm64/boot/dts/marvell/cn9130-db.dts
@@ -113,6 +113,8 @@
&ap_sdhci0 {
pinctrl-names = "default";
bus-width = <8>;
+ mmc-ddr-1_8v;
+ mmc-hs400-1_8v;
vqmmc-supply = <&ap0_reg_sd_vccq>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/mediatek/Makefile b/arch/arm64/boot/dts/mediatek/Makefile
index 18f7b46c4095..deba27ab7657 100644
--- a/arch/arm64/boot/dts/mediatek/Makefile
+++ b/arch/arm64/boot/dts/mediatek/Makefile
@@ -13,6 +13,7 @@ dtb-$(CONFIG_ARCH_MEDIATEK) += mt8173-elm-hana.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8173-elm-hana-rev7.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8173-evb.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8183-evb.dtb
+dtb-$(CONFIG_ARCH_MEDIATEK) += mt8183-kukui-krane-sku0.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8183-kukui-krane-sku176.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8192-evb.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8516-pumpkin.dtb
diff --git a/arch/arm64/boot/dts/mediatek/mt6779.dtsi b/arch/arm64/boot/dts/mediatek/mt6779.dtsi
index 370f309d32de..9bdf5145966c 100644
--- a/arch/arm64/boot/dts/mediatek/mt6779.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt6779.dtsi
@@ -189,6 +189,23 @@
#clock-cells = <1>;
};
+ pwrap: pwrap@1000d000 {
+ compatible = "mediatek,mt6779-pwrap";
+ reg = <0 0x1000d000 0 0x1000>;
+ reg-names = "pwrap";
+ interrupts = <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk26m>, <&infracfg_ao CLK_INFRA_PMIC_AP>;
+ clock-names = "spi", "wrap";
+ };
+
+ devapc: devapc@10207000 {
+ compatible = "mediatek,mt6779-devapc";
+ reg = <0 0x10207000 0 0x1000>;
+ interrupts = <GIC_SPI 168 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&infracfg_ao CLK_INFRA_DEVICE_APC>;
+ clock-names = "devapc-infra-clock";
+ };
+
uart0: serial@11002000 {
compatible = "mediatek,mt6779-uart",
"mediatek,mt6577-uart";
diff --git a/arch/arm64/boot/dts/mediatek/mt7622.dtsi b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
index 5b9ec032ce8d..7c6d871538a6 100644
--- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
@@ -698,6 +698,8 @@
clocks = <&pericfg CLK_PERI_MSDC30_1_PD>,
<&topckgen CLK_TOP_AXI_SEL>;
clock-names = "source", "hclk";
+ resets = <&pericfg MT7622_PERI_MSDC1_SW_RST>;
+ reset-names = "hrst";
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-evb.dts b/arch/arm64/boot/dts/mediatek/mt8183-evb.dts
index cba2d8933e79..3249c959f76f 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183-evb.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8183-evb.dts
@@ -344,6 +344,12 @@
bias-disable;
};
};
+
+ pwm_pins_1: pwm1 {
+ pins_pwm {
+ pinmux = <PINMUX_GPIO90__FUNC_PWM_A>;
+ };
+ };
};
&spi0 {
@@ -392,3 +398,9 @@
&uart0 {
status = "okay";
};
+
+&pwm1 {
+ status = "okay";
+ pinctrl-0 = <&pwm_pins_1>;
+ pinctrl-names = "default";
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-krane-sku0.dts b/arch/arm64/boot/dts/mediatek/mt8183-kukui-krane-sku0.dts
new file mode 100644
index 000000000000..fb5ee91b6fe0
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-krane-sku0.dts
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Device-tree for Krane sku0.
+ *
+ * SKU is a 8-bit value (0x00 == 0):
+ * - Bits 7..4: Panel ID: 0x0 (AUO)
+ * - Bits 3..0: SKU ID: 0x0 (default)
+ */
+
+/dts-v1/;
+#include "mt8183-kukui-krane.dtsi"
+
+/ {
+ model = "MediaTek krane sku0 board";
+ compatible = "google,krane-sku0", "google,krane", "mediatek,mt8183";
+};
+
+&panel {
+ status = "okay";
+ compatible = "auo,kd101n80-45na";
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-krane-sku176.dts b/arch/arm64/boot/dts/mediatek/mt8183-kukui-krane-sku176.dts
index 47113e275cb5..721d16f9c3b4 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-krane-sku176.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-krane-sku176.dts
@@ -16,3 +16,8 @@
model = "MediaTek krane sku176 board";
compatible = "google,krane-sku176", "google,krane", "mediatek,mt8183";
};
+
+&panel {
+ status = "okay";
+ compatible = "boe,tv101wum-nl6";
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
index bf2ad1294dd3..ff56bcfa3370 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
@@ -249,6 +249,36 @@
proc-supply = <&mt6358_vproc11_reg>;
};
+&dsi0 {
+ status = "okay";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ panel: panel@0 {
+ /* compatible will be set in board dts */
+ reg = <0>;
+ enable-gpios = <&pio 45 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&panel_pins_default>;
+ avdd-supply = <&ppvarn_lcd>;
+ avee-supply = <&ppvarp_lcd>;
+ pp1800-supply = <&pp1800_lcd>;
+ backlight = <&backlight_lcd0>;
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&dsi_out>;
+ };
+ };
+ };
+
+ ports {
+ port {
+ dsi_out: endpoint {
+ remote-endpoint = <&panel_in>;
+ };
+ };
+ };
+};
+
&i2c0 {
pinctrl-names = "default";
pinctrl-0 = <&i2c0_pins>;
@@ -290,6 +320,10 @@
clock-frequency = <100000>;
};
+&mipi_tx0 {
+ status = "okay";
+};
+
&mmc0 {
status = "okay";
pinctrl-names = "default", "state_uhs";
@@ -547,6 +581,14 @@
};
};
+ panel_pins_default: panel_pins_default {
+ panel_reset {
+ pinmux = <PINMUX_GPIO45__FUNC_GPIO45>;
+ output-low;
+ bias-pull-up;
+ };
+ };
+
pwm0_pin_default: pwm0_pin_default {
pins1 {
pinmux = <PINMUX_GPIO176__FUNC_GPIO176>;
@@ -709,6 +751,10 @@
};
};
+&mfg {
+ domain-supply = <&mt6358_vgpu_reg>;
+};
+
&soc_data {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8183.dtsi b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
index 5b782a4769e7..80519a145f13 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
@@ -6,7 +6,7 @@
*/
#include <dt-bindings/clock/mt8183-clk.h>
-#include <dt-bindings/gce/mt8173-gce.h>
+#include <dt-bindings/gce/mt8183-gce.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/memory/mt8183-larb-port.h>
@@ -360,7 +360,7 @@
#size-cells = <0>;
#power-domain-cells = <1>;
- power-domain@MT8183_POWER_DOMAIN_MFG {
+ mfg: power-domain@MT8183_POWER_DOMAIN_MFG {
reg = <MT8183_POWER_DOMAIN_MFG>;
#address-cells = <1>;
#size-cells = <0>;
@@ -661,12 +661,27 @@
compatible = "mediatek,mt8183-disp-pwm";
reg = <0 0x1100e000 0 0x1000>;
interrupts = <GIC_SPI 128 IRQ_TYPE_LEVEL_LOW>;
+ power-domains = <&spm MT8183_POWER_DOMAIN_DISP>;
#pwm-cells = <2>;
clocks = <&topckgen CLK_TOP_MUX_DISP_PWM>,
<&infracfg CLK_INFRA_DISP_PWM>;
clock-names = "main", "mm";
};
+ pwm1: pwm@11006000 {
+ compatible = "mediatek,mt8183-pwm";
+ reg = <0 0x11006000 0 0x1000>;
+ #pwm-cells = <2>;
+ clocks = <&infracfg CLK_INFRA_PWM>,
+ <&infracfg CLK_INFRA_PWM_HCLK>,
+ <&infracfg CLK_INFRA_PWM1>,
+ <&infracfg CLK_INFRA_PWM2>,
+ <&infracfg CLK_INFRA_PWM3>,
+ <&infracfg CLK_INFRA_PWM4>;
+ clock-names = "top", "main", "pwm1", "pwm2", "pwm3",
+ "pwm4";
+ };
+
i2c3: i2c@1100f000 {
compatible = "mediatek,mt8183-i2c";
reg = <0 0x1100f000 0 0x1000>,
@@ -1011,7 +1026,7 @@
clocks = <&mmsys CLK_MM_DISP_RDMA0>;
iommus = <&iommu M4U_PORT_DISP_RDMA0>;
mediatek,larb = <&larb0>;
- mediatek,rdma_fifo_size = <5120>;
+ mediatek,rdma-fifo-size = <5120>;
mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0xb000 0x1000>;
};
@@ -1023,7 +1038,7 @@
clocks = <&mmsys CLK_MM_DISP_RDMA1>;
iommus = <&iommu M4U_PORT_DISP_RDMA1>;
mediatek,larb = <&larb0>;
- mediatek,rdma_fifo_size = <2048>;
+ mediatek,rdma-fifo-size = <2048>;
mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0xc000 0x1000>;
};
@@ -1055,8 +1070,7 @@
};
gamma0: gamma@14011000 {
- compatible = "mediatek,mt8183-disp-gamma",
- "mediatek,mt8173-disp-gamma";
+ compatible = "mediatek,mt8183-disp-gamma";
reg = <0 0x14011000 0 0x1000>;
interrupts = <GIC_SPI 234 IRQ_TYPE_LEVEL_LOW>;
power-domains = <&spm MT8183_POWER_DOMAIN_DISP>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8192.dtsi b/arch/arm64/boot/dts/mediatek/mt8192.dtsi
index e12e024de122..9757138a8bbd 100644
--- a/arch/arm64/boot/dts/mediatek/mt8192.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8192.dtsi
@@ -39,6 +39,7 @@
reg = <0x000>;
enable-method = "psci";
clock-frequency = <1701000000>;
+ cpu-idle-states = <&cpuoff_l &clusteroff_l>;
next-level-cache = <&l2_0>;
capacity-dmips-mhz = <530>;
};
@@ -49,6 +50,7 @@
reg = <0x100>;
enable-method = "psci";
clock-frequency = <1701000000>;
+ cpu-idle-states = <&cpuoff_l &clusteroff_l>;
next-level-cache = <&l2_0>;
capacity-dmips-mhz = <530>;
};
@@ -59,6 +61,7 @@
reg = <0x200>;
enable-method = "psci";
clock-frequency = <1701000000>;
+ cpu-idle-states = <&cpuoff_l &clusteroff_l>;
next-level-cache = <&l2_0>;
capacity-dmips-mhz = <530>;
};
@@ -69,6 +72,7 @@
reg = <0x300>;
enable-method = "psci";
clock-frequency = <1701000000>;
+ cpu-idle-states = <&cpuoff_l &clusteroff_l>;
next-level-cache = <&l2_0>;
capacity-dmips-mhz = <530>;
};
@@ -79,6 +83,7 @@
reg = <0x400>;
enable-method = "psci";
clock-frequency = <2171000000>;
+ cpu-idle-states = <&cpuoff_b &clusteroff_b>;
next-level-cache = <&l2_1>;
capacity-dmips-mhz = <1024>;
};
@@ -89,6 +94,7 @@
reg = <0x500>;
enable-method = "psci";
clock-frequency = <2171000000>;
+ cpu-idle-states = <&cpuoff_b &clusteroff_b>;
next-level-cache = <&l2_1>;
capacity-dmips-mhz = <1024>;
};
@@ -99,6 +105,7 @@
reg = <0x600>;
enable-method = "psci";
clock-frequency = <2171000000>;
+ cpu-idle-states = <&cpuoff_b &clusteroff_b>;
next-level-cache = <&l2_1>;
capacity-dmips-mhz = <1024>;
};
@@ -109,6 +116,7 @@
reg = <0x700>;
enable-method = "psci";
clock-frequency = <2171000000>;
+ cpu-idle-states = <&cpuoff_b &clusteroff_b>;
next-level-cache = <&l2_1>;
capacity-dmips-mhz = <1024>;
};
@@ -158,6 +166,42 @@
l3_0: l3-cache {
compatible = "cache";
};
+
+ idle-states {
+ entry-method = "arm,psci";
+ cpuoff_l: cpuoff_l {
+ compatible = "arm,idle-state";
+ arm,psci-suspend-param = <0x00010001>;
+ local-timer-stop;
+ entry-latency-us = <55>;
+ exit-latency-us = <140>;
+ min-residency-us = <780>;
+ };
+ cpuoff_b: cpuoff_b {
+ compatible = "arm,idle-state";
+ arm,psci-suspend-param = <0x00010001>;
+ local-timer-stop;
+ entry-latency-us = <35>;
+ exit-latency-us = <145>;
+ min-residency-us = <720>;
+ };
+ clusteroff_l: clusteroff_l {
+ compatible = "arm,idle-state";
+ arm,psci-suspend-param = <0x01010002>;
+ local-timer-stop;
+ entry-latency-us = <60>;
+ exit-latency-us = <155>;
+ min-residency-us = <860>;
+ };
+ clusteroff_b: clusteroff_b {
+ compatible = "arm,idle-state";
+ arm,psci-suspend-param = <0x01010002>;
+ local-timer-stop;
+ entry-latency-us = <40>;
+ exit-latency-us = <155>;
+ min-residency-us = <780>;
+ };
+ };
};
pmu-a55 {
@@ -379,6 +423,19 @@
status = "disabled";
};
+ nor_flash: spi@11234000 {
+ compatible = "mediatek,mt8192-nor";
+ reg = <0 0x11234000 0 0xe0>;
+ interrupts = <GIC_SPI 431 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&clk26m>,
+ <&clk26m>,
+ <&clk26m>;
+ clock-names = "spi", "sf", "axi";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disable";
+ };
+
i2c3: i2c3@11cb0000 {
compatible = "mediatek,mt8192-i2c";
reg = <0 0x11cb0000 0 0x1000>,
diff --git a/arch/arm64/boot/dts/mediatek/mt8516.dtsi b/arch/arm64/boot/dts/mediatek/mt8516.dtsi
index e6e4d9d60094..b80e95574bef 100644
--- a/arch/arm64/boot/dts/mediatek/mt8516.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8516.dtsi
@@ -276,6 +276,27 @@
(GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
};
+ apdma: dma-controller@11000480 {
+ compatible = "mediatek,mt8516-uart-dma",
+ "mediatek,mt6577-uart-dma";
+ reg = <0 0x11000480 0 0x80>,
+ <0 0x11000500 0 0x80>,
+ <0 0x11000580 0 0x80>,
+ <0 0x11000600 0 0x80>,
+ <0 0x11000980 0 0x80>,
+ <0 0x11000a00 0 0x80>;
+ interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_SPI 97 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_SPI 98 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_SPI 99 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_SPI 100 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_SPI 101 IRQ_TYPE_LEVEL_LOW>;
+ dma-requests = <6>;
+ clocks = <&topckgen CLK_TOP_APDMA>;
+ clock-names = "apdma";
+ #dma-cells = <1>;
+ };
+
uart0: serial@11005000 {
compatible = "mediatek,mt8516-uart",
"mediatek,mt6577-uart";
@@ -284,6 +305,9 @@
clocks = <&topckgen CLK_TOP_UART0_SEL>,
<&topckgen CLK_TOP_UART0>;
clock-names = "baud", "bus";
+ dmas = <&apdma 0
+ &apdma 1>;
+ dma-names = "tx", "rx";
status = "disabled";
};
@@ -295,6 +319,9 @@
clocks = <&topckgen CLK_TOP_UART1_SEL>,
<&topckgen CLK_TOP_UART1>;
clock-names = "baud", "bus";
+ dmas = <&apdma 2
+ &apdma 3>;
+ dma-names = "tx", "rx";
status = "disabled";
};
@@ -306,6 +333,9 @@
clocks = <&topckgen CLK_TOP_UART2_SEL>,
<&topckgen CLK_TOP_UART2>;
clock-names = "baud", "bus";
+ dmas = <&apdma 4
+ &apdma 5>;
+ dma-names = "tx", "rx";
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/nvidia/Makefile b/arch/arm64/boot/dts/nvidia/Makefile
index 9296d12d11e9..e13fb1070472 100644
--- a/arch/arm64/boot/dts/nvidia/Makefile
+++ b/arch/arm64/boot/dts/nvidia/Makefile
@@ -9,4 +9,5 @@ dtb-$(CONFIG_ARCH_TEGRA_210_SOC) += tegra210-p2894-0050-a08.dtb
dtb-$(CONFIG_ARCH_TEGRA_186_SOC) += tegra186-p2771-0000.dtb
dtb-$(CONFIG_ARCH_TEGRA_194_SOC) += tegra194-p2972-0000.dtb
dtb-$(CONFIG_ARCH_TEGRA_194_SOC) += tegra194-p3509-0000+p3668-0000.dtb
+dtb-$(CONFIG_ARCH_TEGRA_194_SOC) += tegra194-p3509-0000+p3668-0001.dtb
dtb-$(CONFIG_ARCH_TEGRA_234_SOC) += tegra234-sim-vdk.dtb
diff --git a/arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts b/arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts
index 6fd2e0542c27..9f5f5e1fa82e 100644
--- a/arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts
+++ b/arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts
@@ -10,6 +10,725 @@
model = "NVIDIA Jetson TX2 Developer Kit";
compatible = "nvidia,p2771-0000", "nvidia,tegra186";
+ aconnect {
+ status = "okay";
+
+ dma-controller@2930000 {
+ status = "okay";
+ };
+
+ interrupt-controller@2a40000 {
+ status = "okay";
+ };
+
+ ahub@2900800 {
+ status = "okay";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0x0>;
+
+ xbar_admaif0_ep: endpoint {
+ remote-endpoint = <&admaif0_ep>;
+ };
+ };
+
+ port@1 {
+ reg = <0x1>;
+
+ xbar_admaif1_ep: endpoint {
+ remote-endpoint = <&admaif1_ep>;
+ };
+ };
+
+ port@2 {
+ reg = <0x2>;
+
+ xbar_admaif2_ep: endpoint {
+ remote-endpoint = <&admaif2_ep>;
+ };
+ };
+
+ port@3 {
+ reg = <0x3>;
+
+ xbar_admaif3_ep: endpoint {
+ remote-endpoint = <&admaif3_ep>;
+ };
+ };
+
+ port@4 {
+ reg = <0x4>;
+
+ xbar_admaif4_ep: endpoint {
+ remote-endpoint = <&admaif4_ep>;
+ };
+ };
+
+ port@5 {
+ reg = <0x5>;
+
+ xbar_admaif5_ep: endpoint {
+ remote-endpoint = <&admaif5_ep>;
+ };
+ };
+
+ port@6 {
+ reg = <0x6>;
+
+ xbar_admaif6_ep: endpoint {
+ remote-endpoint = <&admaif6_ep>;
+ };
+ };
+
+ port@7 {
+ reg = <0x7>;
+
+ xbar_admaif7_ep: endpoint {
+ remote-endpoint = <&admaif7_ep>;
+ };
+ };
+
+ port@8 {
+ reg = <0x8>;
+
+ xbar_admaif8_ep: endpoint {
+ remote-endpoint = <&admaif8_ep>;
+ };
+ };
+
+ port@9 {
+ reg = <0x9>;
+
+ xbar_admaif9_ep: endpoint {
+ remote-endpoint = <&admaif9_ep>;
+ };
+ };
+
+ port@a {
+ reg = <0xa>;
+
+ xbar_admaif10_ep: endpoint {
+ remote-endpoint = <&admaif10_ep>;
+ };
+ };
+
+ port@b {
+ reg = <0xb>;
+
+ xbar_admaif11_ep: endpoint {
+ remote-endpoint = <&admaif11_ep>;
+ };
+ };
+
+ port@c {
+ reg = <0xc>;
+
+ xbar_admaif12_ep: endpoint {
+ remote-endpoint = <&admaif12_ep>;
+ };
+ };
+
+ port@d {
+ reg = <0xd>;
+
+ xbar_admaif13_ep: endpoint {
+ remote-endpoint = <&admaif13_ep>;
+ };
+ };
+
+ port@e {
+ reg = <0xe>;
+
+ xbar_admaif14_ep: endpoint {
+ remote-endpoint = <&admaif14_ep>;
+ };
+ };
+
+ port@f {
+ reg = <0xf>;
+
+ xbar_admaif15_ep: endpoint {
+ remote-endpoint = <&admaif15_ep>;
+ };
+ };
+
+ port@10 {
+ reg = <0x10>;
+
+ xbar_admaif16_ep: endpoint {
+ remote-endpoint = <&admaif16_ep>;
+ };
+ };
+
+ port@11 {
+ reg = <0x11>;
+
+ xbar_admaif17_ep: endpoint {
+ remote-endpoint = <&admaif17_ep>;
+ };
+ };
+
+ port@12 {
+ reg = <0x12>;
+
+ xbar_admaif18_ep: endpoint {
+ remote-endpoint = <&admaif18_ep>;
+ };
+ };
+
+ port@13 {
+ reg = <0x13>;
+
+ xbar_admaif19_ep: endpoint {
+ remote-endpoint = <&admaif19_ep>;
+ };
+ };
+
+ xbar_i2s1_port: port@14 {
+ reg = <0x14>;
+
+ xbar_i2s1_ep: endpoint {
+ remote-endpoint = <&i2s1_cif_ep>;
+ };
+ };
+
+ xbar_i2s2_port: port@15 {
+ reg = <0x15>;
+
+ xbar_i2s2_ep: endpoint {
+ remote-endpoint = <&i2s2_cif_ep>;
+ };
+ };
+
+ xbar_i2s3_port: port@16 {
+ reg = <0x16>;
+
+ xbar_i2s3_ep: endpoint {
+ remote-endpoint = <&i2s3_cif_ep>;
+ };
+ };
+
+ xbar_i2s4_port: port@17 {
+ reg = <0x17>;
+
+ xbar_i2s4_ep: endpoint {
+ remote-endpoint = <&i2s4_cif_ep>;
+ };
+ };
+
+ xbar_i2s5_port: port@18 {
+ reg = <0x18>;
+
+ xbar_i2s5_ep: endpoint {
+ remote-endpoint = <&i2s5_cif_ep>;
+ };
+ };
+
+ xbar_i2s6_port: port@19 {
+ reg = <0x19>;
+
+ xbar_i2s6_ep: endpoint {
+ remote-endpoint = <&i2s6_cif_ep>;
+ };
+ };
+
+ xbar_dmic1_port: port@1a {
+ reg = <0x1a>;
+
+ xbar_dmic1_ep: endpoint {
+ remote-endpoint = <&dmic1_cif_ep>;
+ };
+ };
+
+ xbar_dmic2_port: port@1b {
+ reg = <0x1b>;
+
+ xbar_dmic2_ep: endpoint {
+ remote-endpoint = <&dmic2_cif_ep>;
+ };
+ };
+
+ xbar_dmic3_port: port@1c {
+ reg = <0x1c>;
+
+ xbar_dmic3_ep: endpoint {
+ remote-endpoint = <&dmic3_cif_ep>;
+ };
+ };
+
+ xbar_dspk1_port: port@1e {
+ reg = <0x1e>;
+
+ xbar_dspk1_ep: endpoint {
+ remote-endpoint = <&dspk1_cif_ep>;
+ };
+ };
+
+ xbar_dspk2_port: port@1f {
+ reg = <0x1f>;
+
+ xbar_dspk2_ep: endpoint {
+ remote-endpoint = <&dspk2_cif_ep>;
+ };
+ };
+ };
+
+ admaif@290f000 {
+ status = "okay";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ admaif0_port: port@0 {
+ reg = <0x0>;
+
+ admaif0_ep: endpoint {
+ remote-endpoint = <&xbar_admaif0_ep>;
+ };
+ };
+
+ admaif1_port: port@1 {
+ reg = <0x1>;
+
+ admaif1_ep: endpoint {
+ remote-endpoint = <&xbar_admaif1_ep>;
+ };
+ };
+
+ admaif2_port: port@2 {
+ reg = <0x2>;
+
+ admaif2_ep: endpoint {
+ remote-endpoint = <&xbar_admaif2_ep>;
+ };
+ };
+
+ admaif3_port: port@3 {
+ reg = <0x3>;
+
+ admaif3_ep: endpoint {
+ remote-endpoint = <&xbar_admaif3_ep>;
+ };
+ };
+
+ admaif4_port: port@4 {
+ reg = <0x4>;
+
+ admaif4_ep: endpoint {
+ remote-endpoint = <&xbar_admaif4_ep>;
+ };
+ };
+
+ admaif5_port: port@5 {
+ reg = <0x5>;
+
+ admaif5_ep: endpoint {
+ remote-endpoint = <&xbar_admaif5_ep>;
+ };
+ };
+
+ admaif6_port: port@6 {
+ reg = <0x6>;
+
+ admaif6_ep: endpoint {
+ remote-endpoint = <&xbar_admaif6_ep>;
+ };
+ };
+
+ admaif7_port: port@7 {
+ reg = <0x7>;
+
+ admaif7_ep: endpoint {
+ remote-endpoint = <&xbar_admaif7_ep>;
+ };
+ };
+
+ admaif8_port: port@8 {
+ reg = <0x8>;
+
+ admaif8_ep: endpoint {
+ remote-endpoint = <&xbar_admaif8_ep>;
+ };
+ };
+
+ admaif9_port: port@9 {
+ reg = <0x9>;
+
+ admaif9_ep: endpoint {
+ remote-endpoint = <&xbar_admaif9_ep>;
+ };
+ };
+
+ admaif10_port: port@a {
+ reg = <0xa>;
+
+ admaif10_ep: endpoint {
+ remote-endpoint = <&xbar_admaif10_ep>;
+ };
+ };
+
+ admaif11_port: port@b {
+ reg = <0xb>;
+
+ admaif11_ep: endpoint {
+ remote-endpoint = <&xbar_admaif11_ep>;
+ };
+ };
+
+ admaif12_port: port@c {
+ reg = <0xc>;
+
+ admaif12_ep: endpoint {
+ remote-endpoint = <&xbar_admaif12_ep>;
+ };
+ };
+
+ admaif13_port: port@d {
+ reg = <0xd>;
+
+ admaif13_ep: endpoint {
+ remote-endpoint = <&xbar_admaif13_ep>;
+ };
+ };
+
+ admaif14_port: port@e {
+ reg = <0xe>;
+
+ admaif14_ep: endpoint {
+ remote-endpoint = <&xbar_admaif14_ep>;
+ };
+ };
+
+ admaif15_port: port@f {
+ reg = <0xf>;
+
+ admaif15_ep: endpoint {
+ remote-endpoint = <&xbar_admaif15_ep>;
+ };
+ };
+
+ admaif16_port: port@10 {
+ reg = <0x10>;
+
+ admaif16_ep: endpoint {
+ remote-endpoint = <&xbar_admaif16_ep>;
+ };
+ };
+
+ admaif17_port: port@11 {
+ reg = <0x11>;
+
+ admaif17_ep: endpoint {
+ remote-endpoint = <&xbar_admaif17_ep>;
+ };
+ };
+
+ admaif18_port: port@12 {
+ reg = <0x12>;
+
+ admaif18_ep: endpoint {
+ remote-endpoint = <&xbar_admaif18_ep>;
+ };
+ };
+
+ admaif19_port: port@13 {
+ reg = <0x13>;
+
+ admaif19_ep: endpoint {
+ remote-endpoint = <&xbar_admaif19_ep>;
+ };
+ };
+ };
+ };
+
+ i2s@2901000 {
+ status = "okay";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ i2s1_cif_ep: endpoint {
+ remote-endpoint = <&xbar_i2s1_ep>;
+ };
+ };
+
+ i2s1_port: port@1 {
+ reg = <1>;
+
+ i2s1_dap_ep: endpoint {
+ dai-format = "i2s";
+ /* Placeholder for external Codec */
+ };
+ };
+ };
+ };
+
+ i2s@2901100 {
+ status = "okay";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ i2s2_cif_ep: endpoint {
+ remote-endpoint = <&xbar_i2s2_ep>;
+ };
+ };
+
+ i2s2_port: port@1 {
+ reg = <1>;
+
+ i2s2_dap_ep: endpoint {
+ dai-format = "i2s";
+ /* Placeholder for external Codec */
+ };
+ };
+ };
+ };
+
+ i2s@2901200 {
+ status = "okay";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ i2s3_cif_ep: endpoint {
+ remote-endpoint = <&xbar_i2s3_ep>;
+ };
+ };
+
+ i2s3_port: port@1 {
+ reg = <1>;
+
+ i2s3_dap_ep: endpoint {
+ dai-format = "i2s";
+ /* Placeholder for external Codec */
+ };
+ };
+ };
+ };
+
+ i2s@2901300 {
+ status = "okay";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ i2s4_cif_ep: endpoint {
+ remote-endpoint = <&xbar_i2s4_ep>;
+ };
+ };
+
+ i2s4_port: port@1 {
+ reg = <1>;
+
+ i2s4_dap_ep: endpoint {
+ dai-format = "i2s";
+ /* Placeholder for external Codec */
+ };
+ };
+ };
+ };
+
+ i2s@2901400 {
+ status = "okay";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ i2s5_cif_ep: endpoint {
+ remote-endpoint = <&xbar_i2s5_ep>;
+ };
+ };
+
+ i2s5_port: port@1 {
+ reg = <1>;
+
+ i2s5_dap_ep: endpoint {
+ dai-format = "i2s";
+ /* Placeholder for external Codec */
+ };
+ };
+ };
+ };
+
+ i2s@2901500 {
+ status = "okay";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ i2s6_cif_ep: endpoint {
+ remote-endpoint = <&xbar_i2s6_ep>;
+ };
+ };
+
+ i2s6_port: port@1 {
+ reg = <1>;
+
+ i2s6_dap_ep: endpoint {
+ dai-format = "i2s";
+ /* Placeholder for external Codec */
+ };
+ };
+ };
+ };
+
+ dmic@2904000 {
+ status = "okay";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ dmic1_cif_ep: endpoint {
+ remote-endpoint = <&xbar_dmic1_ep>;
+ };
+ };
+
+ dmic1_port: port@1 {
+ reg = <1>;
+
+ dmic1_dap_ep: endpoint {
+ /* Place holder for external Codec */
+ };
+ };
+ };
+ };
+
+ dmic@2904100 {
+ status = "okay";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ dmic2_cif_ep: endpoint {
+ remote-endpoint = <&xbar_dmic2_ep>;
+ };
+ };
+
+ dmic2_port: port@1 {
+ reg = <1>;
+
+ dmic2_dap_ep: endpoint {
+ /* Place holder for external Codec */
+ };
+ };
+ };
+ };
+
+ dmic@2904200 {
+ status = "okay";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ dmic3_cif_ep: endpoint {
+ remote-endpoint = <&xbar_dmic3_ep>;
+ };
+ };
+
+ dmic3_port: port@1 {
+ reg = <1>;
+
+ dmic3_dap_ep: endpoint {
+ /* Place holder for external Codec */
+ };
+ };
+ };
+ };
+
+ dspk@2905000 {
+ status = "okay";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ dspk1_cif_ep: endpoint {
+ remote-endpoint = <&xbar_dspk1_ep>;
+ };
+ };
+
+ dspk1_port: port@1 {
+ reg = <1>;
+
+ dspk1_dap_ep: endpoint {
+ /* Place holder for external Codec */
+ };
+ };
+ };
+ };
+
+ dspk@2905100 {
+ status = "okay";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ dspk2_cif_ep: endpoint {
+ remote-endpoint = <&xbar_dspk2_ep>;
+ };
+ };
+
+ dspk2_port: port@1 {
+ reg = <1>;
+
+ dspk2_dap_ep: endpoint {
+ /* Place holder for external Codec */
+ };
+ };
+ };
+ };
+ };
+ };
+
i2c@3160000 {
power-monitor@42 {
compatible = "ti,ina3221";
@@ -369,4 +1088,27 @@
vin-supply = <&vdd_5v0_sys>;
};
+
+ sound {
+ compatible = "nvidia,tegra186-audio-graph-card";
+ status = "okay";
+
+ dais = /* FE */
+ <&admaif0_port>, <&admaif1_port>, <&admaif2_port>, <&admaif3_port>,
+ <&admaif4_port>, <&admaif5_port>, <&admaif6_port>, <&admaif7_port>,
+ <&admaif8_port>, <&admaif9_port>, <&admaif10_port>, <&admaif11_port>,
+ <&admaif12_port>, <&admaif13_port>, <&admaif14_port>, <&admaif15_port>,
+ <&admaif16_port>, <&admaif17_port>, <&admaif18_port>, <&admaif19_port>,
+ /* Router */
+ <&xbar_i2s1_port>, <&xbar_i2s2_port>, <&xbar_i2s3_port>,
+ <&xbar_i2s4_port>, <&xbar_i2s5_port>, <&xbar_i2s6_port>,
+ <&xbar_dmic1_port>, <&xbar_dmic2_port>, <&xbar_dmic3_port>,
+ <&xbar_dspk1_port>, <&xbar_dspk2_port>,
+ /* I/O */
+ <&i2s1_port>, <&i2s2_port>, <&i2s3_port>, <&i2s4_port>,
+ <&i2s5_port>, <&i2s6_port>, <&dmic1_port>, <&dmic2_port>,
+ <&dmic3_port>, <&dspk1_port>, <&dspk2_port>;
+
+ label = "jetson-tx2-ape";
+ };
};
diff --git a/arch/arm64/boot/dts/nvidia/tegra186.dtsi b/arch/arm64/boot/dts/nvidia/tegra186.dtsi
index 58c51965df47..02b26b39cedc 100644
--- a/arch/arm64/boot/dts/nvidia/tegra186.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra186.dtsi
@@ -1678,6 +1678,28 @@
interrupt-affinity = <&ca57_0 &ca57_1 &ca57_2 &ca57_3>;
};
+ sound {
+ status = "disabled";
+
+ clocks = <&bpmp TEGRA186_CLK_PLLA>,
+ <&bpmp TEGRA186_CLK_PLL_A_OUT0>;
+ clock-names = "pll_a", "plla_out0";
+ assigned-clocks = <&bpmp TEGRA186_CLK_PLLA>,
+ <&bpmp TEGRA186_CLK_PLL_A_OUT0>,
+ <&bpmp TEGRA186_CLK_AUD_MCLK>;
+ assigned-clock-parents = <0>,
+ <&bpmp TEGRA186_CLK_PLLA>,
+ <&bpmp TEGRA186_CLK_PLL_A_OUT0>;
+ /*
+ * PLLA supports dynamic ramp. Below initial rate is chosen
+ * for this to work and oscillate between base rates required
+ * for 8x and 11.025x sample rate streams.
+ */
+ assigned-clock-rates = <258000000>;
+
+ iommus = <&smmu TEGRA186_SID_APE>;
+ };
+
thermal-zones {
a57 {
polling-delay = <0>;
diff --git a/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi b/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi
index d71b7a1140fe..7e7b0eb90c80 100644
--- a/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi
@@ -93,6 +93,10 @@
vclamp-usb-supply = <&vdd_1v8ao>;
ports {
+ usb2-0 {
+ vbus-supply = <&vdd_5v0_sys>;
+ };
+
usb2-1 {
vbus-supply = <&vdd_5v0_sys>;
};
@@ -105,6 +109,10 @@
vbus-supply = <&vdd_5v0_sys>;
};
+ usb3-2 {
+ vbus-supply = <&vdd_5v0_sys>;
+ };
+
usb3-3 {
vbus-supply = <&vdd_5v0_sys>;
};
diff --git a/arch/arm64/boot/dts/nvidia/tegra194-p2972-0000.dts b/arch/arm64/boot/dts/nvidia/tegra194-p2972-0000.dts
index 54d057beec59..2888efc42ba1 100644
--- a/arch/arm64/boot/dts/nvidia/tegra194-p2972-0000.dts
+++ b/arch/arm64/boot/dts/nvidia/tegra194-p2972-0000.dts
@@ -21,6 +21,513 @@
interrupt-controller@2a40000 {
status = "okay";
};
+
+ ahub@2900800 {
+ status = "okay";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0x0>;
+
+ xbar_admaif0_ep: endpoint {
+ remote-endpoint = <&admaif0_ep>;
+ };
+ };
+
+ port@1 {
+ reg = <0x1>;
+
+ xbar_admaif1_ep: endpoint {
+ remote-endpoint = <&admaif1_ep>;
+ };
+ };
+
+ port@2 {
+ reg = <0x2>;
+
+ xbar_admaif2_ep: endpoint {
+ remote-endpoint = <&admaif2_ep>;
+ };
+ };
+
+ port@3 {
+ reg = <0x3>;
+
+ xbar_admaif3_ep: endpoint {
+ remote-endpoint = <&admaif3_ep>;
+ };
+ };
+
+ port@4 {
+ reg = <0x4>;
+
+ xbar_admaif4_ep: endpoint {
+ remote-endpoint = <&admaif4_ep>;
+ };
+ };
+
+ port@5 {
+ reg = <0x5>;
+
+ xbar_admaif5_ep: endpoint {
+ remote-endpoint = <&admaif5_ep>;
+ };
+ };
+
+ port@6 {
+ reg = <0x6>;
+
+ xbar_admaif6_ep: endpoint {
+ remote-endpoint = <&admaif6_ep>;
+ };
+ };
+
+ port@7 {
+ reg = <0x7>;
+
+ xbar_admaif7_ep: endpoint {
+ remote-endpoint = <&admaif7_ep>;
+ };
+ };
+
+ port@8 {
+ reg = <0x8>;
+
+ xbar_admaif8_ep: endpoint {
+ remote-endpoint = <&admaif8_ep>;
+ };
+ };
+
+ port@9 {
+ reg = <0x9>;
+
+ xbar_admaif9_ep: endpoint {
+ remote-endpoint = <&admaif9_ep>;
+ };
+ };
+
+ port@a {
+ reg = <0xa>;
+
+ xbar_admaif10_ep: endpoint {
+ remote-endpoint = <&admaif10_ep>;
+ };
+ };
+
+ port@b {
+ reg = <0xb>;
+
+ xbar_admaif11_ep: endpoint {
+ remote-endpoint = <&admaif11_ep>;
+ };
+ };
+
+ port@c {
+ reg = <0xc>;
+
+ xbar_admaif12_ep: endpoint {
+ remote-endpoint = <&admaif12_ep>;
+ };
+ };
+
+ port@d {
+ reg = <0xd>;
+
+ xbar_admaif13_ep: endpoint {
+ remote-endpoint = <&admaif13_ep>;
+ };
+ };
+
+ port@e {
+ reg = <0xe>;
+
+ xbar_admaif14_ep: endpoint {
+ remote-endpoint = <&admaif14_ep>;
+ };
+ };
+
+ port@f {
+ reg = <0xf>;
+
+ xbar_admaif15_ep: endpoint {
+ remote-endpoint = <&admaif15_ep>;
+ };
+ };
+
+ port@10 {
+ reg = <0x10>;
+
+ xbar_admaif16_ep: endpoint {
+ remote-endpoint = <&admaif16_ep>;
+ };
+ };
+
+ port@11 {
+ reg = <0x11>;
+
+ xbar_admaif17_ep: endpoint {
+ remote-endpoint = <&admaif17_ep>;
+ };
+ };
+
+ port@12 {
+ reg = <0x12>;
+
+ xbar_admaif18_ep: endpoint {
+ remote-endpoint = <&admaif18_ep>;
+ };
+ };
+
+ port@13 {
+ reg = <0x13>;
+
+ xbar_admaif19_ep: endpoint {
+ remote-endpoint = <&admaif19_ep>;
+ };
+ };
+
+ xbar_i2s1_port: port@14 {
+ reg = <0x14>;
+
+ xbar_i2s1_ep: endpoint {
+ remote-endpoint = <&i2s1_cif_ep>;
+ };
+ };
+
+ xbar_i2s2_port: port@15 {
+ reg = <0x15>;
+
+ xbar_i2s2_ep: endpoint {
+ remote-endpoint = <&i2s2_cif_ep>;
+ };
+ };
+
+ xbar_i2s4_port: port@17 {
+ reg = <0x17>;
+
+ xbar_i2s4_ep: endpoint {
+ remote-endpoint = <&i2s4_cif_ep>;
+ };
+ };
+
+ xbar_i2s6_port: port@19 {
+ reg = <0x19>;
+
+ xbar_i2s6_ep: endpoint {
+ remote-endpoint = <&i2s6_cif_ep>;
+ };
+ };
+
+ xbar_dmic3_port: port@1c {
+ reg = <0x1c>;
+
+ xbar_dmic3_ep: endpoint {
+ remote-endpoint = <&dmic3_cif_ep>;
+ };
+ };
+ };
+
+ admaif@290f000 {
+ status = "okay";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ admaif0_port: port@0 {
+ reg = <0x0>;
+
+ admaif0_ep: endpoint {
+ remote-endpoint = <&xbar_admaif0_ep>;
+ };
+ };
+
+ admaif1_port: port@1 {
+ reg = <0x1>;
+
+ admaif1_ep: endpoint {
+ remote-endpoint = <&xbar_admaif1_ep>;
+ };
+ };
+
+ admaif2_port: port@2 {
+ reg = <0x2>;
+
+ admaif2_ep: endpoint {
+ remote-endpoint = <&xbar_admaif2_ep>;
+ };
+ };
+
+ admaif3_port: port@3 {
+ reg = <0x3>;
+
+ admaif3_ep: endpoint {
+ remote-endpoint = <&xbar_admaif3_ep>;
+ };
+ };
+
+ admaif4_port: port@4 {
+ reg = <0x4>;
+
+ admaif4_ep: endpoint {
+ remote-endpoint = <&xbar_admaif4_ep>;
+ };
+ };
+
+ admaif5_port: port@5 {
+ reg = <0x5>;
+
+ admaif5_ep: endpoint {
+ remote-endpoint = <&xbar_admaif5_ep>;
+ };
+ };
+
+ admaif6_port: port@6 {
+ reg = <0x6>;
+
+ admaif6_ep: endpoint {
+ remote-endpoint = <&xbar_admaif6_ep>;
+ };
+ };
+
+ admaif7_port: port@7 {
+ reg = <0x7>;
+
+ admaif7_ep: endpoint {
+ remote-endpoint = <&xbar_admaif7_ep>;
+ };
+ };
+
+ admaif8_port: port@8 {
+ reg = <0x8>;
+
+ admaif8_ep: endpoint {
+ remote-endpoint = <&xbar_admaif8_ep>;
+ };
+ };
+
+ admaif9_port: port@9 {
+ reg = <0x9>;
+
+ admaif9_ep: endpoint {
+ remote-endpoint = <&xbar_admaif9_ep>;
+ };
+ };
+
+ admaif10_port: port@a {
+ reg = <0xa>;
+
+ admaif10_ep: endpoint {
+ remote-endpoint = <&xbar_admaif10_ep>;
+ };
+ };
+
+ admaif11_port: port@b {
+ reg = <0xb>;
+
+ admaif11_ep: endpoint {
+ remote-endpoint = <&xbar_admaif11_ep>;
+ };
+ };
+
+ admaif12_port: port@c {
+ reg = <0xc>;
+
+ admaif12_ep: endpoint {
+ remote-endpoint = <&xbar_admaif12_ep>;
+ };
+ };
+
+ admaif13_port: port@d {
+ reg = <0xd>;
+
+ admaif13_ep: endpoint {
+ remote-endpoint = <&xbar_admaif13_ep>;
+ };
+ };
+
+ admaif14_port: port@e {
+ reg = <0xe>;
+
+ admaif14_ep: endpoint {
+ remote-endpoint = <&xbar_admaif14_ep>;
+ };
+ };
+
+ admaif15_port: port@f {
+ reg = <0xf>;
+
+ admaif15_ep: endpoint {
+ remote-endpoint = <&xbar_admaif15_ep>;
+ };
+ };
+
+ admaif16_port: port@10 {
+ reg = <0x10>;
+
+ admaif16_ep: endpoint {
+ remote-endpoint = <&xbar_admaif16_ep>;
+ };
+ };
+
+ admaif17_port: port@11 {
+ reg = <0x11>;
+
+ admaif17_ep: endpoint {
+ remote-endpoint = <&xbar_admaif17_ep>;
+ };
+ };
+
+ admaif18_port: port@12 {
+ reg = <0x12>;
+
+ admaif18_ep: endpoint {
+ remote-endpoint = <&xbar_admaif18_ep>;
+ };
+ };
+
+ admaif19_port: port@13 {
+ reg = <0x13>;
+
+ admaif19_ep: endpoint {
+ remote-endpoint = <&xbar_admaif19_ep>;
+ };
+ };
+ };
+ };
+
+ i2s@2901000 {
+ status = "okay";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ i2s1_cif_ep: endpoint {
+ remote-endpoint = <&xbar_i2s1_ep>;
+ };
+ };
+
+ i2s1_port: port@1 {
+ reg = <1>;
+
+ i2s1_dap_ep: endpoint {
+ dai-format = "i2s";
+ remote-endpoint = <&rt5658_ep>;
+ };
+ };
+ };
+ };
+
+ i2s@2901100 {
+ status = "okay";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ i2s2_cif_ep: endpoint {
+ remote-endpoint = <&xbar_i2s2_ep>;
+ };
+ };
+
+ i2s2_port: port@1 {
+ reg = <1>;
+
+ i2s2_dap_ep: endpoint {
+ dai-format = "i2s";
+ /* Place holder for external Codec */
+ };
+ };
+ };
+ };
+
+ i2s@2901300 {
+ status = "okay";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ i2s4_cif_ep: endpoint {
+ remote-endpoint = <&xbar_i2s4_ep>;
+ };
+ };
+
+ i2s4_port: port@1 {
+ reg = <1>;
+
+ i2s4_dap_ep: endpoint {
+ dai-format = "i2s";
+ /* Place holder for external Codec */
+ };
+ };
+ };
+ };
+
+ i2s@2901500 {
+ status = "okay";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ i2s6_cif_ep: endpoint {
+ remote-endpoint = <&xbar_i2s6_ep>;
+ };
+ };
+
+ i2s6_port: port@1 {
+ reg = <1>;
+
+ i2s6_dap_ep: endpoint@0 {
+ dai-format = "i2s";
+ /* Place holder for external Codec */
+ };
+ };
+ };
+ };
+
+ dmic@2904200 {
+ status = "okay";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ dmic3_cif_ep: endpoint {
+ remote-endpoint = <&xbar_dmic3_ep>;
+ };
+ };
+
+ dmic3_port: port@1 {
+ reg = <1>;
+
+ dmic3_dap_ep: endpoint {
+ /* Place holder for external Codec */
+ };
+ };
+ };
+ };
+ };
};
i2c@3160000 {
@@ -57,6 +564,10 @@
pads {
usb2 {
lanes {
+ usb2-0 {
+ status = "okay";
+ };
+
usb2-1 {
status = "okay";
};
@@ -73,6 +584,10 @@
status = "okay";
};
+ usb3-2 {
+ status = "okay";
+ };
+
usb3-3 {
status = "okay";
};
@@ -81,6 +596,11 @@
};
ports {
+ usb2-0 {
+ mode = "host";
+ status = "okay";
+ };
+
usb2-1 {
mode = "host";
status = "okay";
@@ -96,6 +616,11 @@
status = "okay";
};
+ usb3-2 {
+ nvidia,usb2-companion = <0>;
+ status = "okay";
+ };
+
usb3-3 {
nvidia,usb2-companion = <3>;
maximum-speed = "super-speed";
@@ -107,11 +632,36 @@
usb@3610000 {
status = "okay";
- phys = <&{/bus@0/padctl@3520000/pads/usb2/lanes/usb2-1}>,
+ phys = <&{/bus@0/padctl@3520000/pads/usb2/lanes/usb2-0}>,
+ <&{/bus@0/padctl@3520000/pads/usb2/lanes/usb2-1}>,
<&{/bus@0/padctl@3520000/pads/usb2/lanes/usb2-3}>,
<&{/bus@0/padctl@3520000/pads/usb3/lanes/usb3-0}>,
+ <&{/bus@0/padctl@3520000/pads/usb3/lanes/usb3-2}>,
<&{/bus@0/padctl@3520000/pads/usb3/lanes/usb3-3}>;
- phy-names = "usb2-1", "usb2-3", "usb3-0", "usb3-3";
+ phy-names = "usb2-0", "usb2-1", "usb2-3", "usb3-0", "usb3-2", "usb3-3";
+ };
+
+ i2c@c250000 {
+ status = "okay";
+
+ rt5658: audio-codec@1a {
+ status = "okay";
+
+ compatible = "realtek,rt5658";
+ reg = <0x1a>;
+ interrupt-parent = <&gpio>;
+ interrupts = <TEGRA194_MAIN_GPIO(S, 5) GPIO_ACTIVE_HIGH>;
+ realtek,jd-src = <2>;
+ sound-name-prefix = "CVB-RT";
+
+ port {
+ rt5658_ep: endpoint {
+ remote-endpoint = <&i2s1_dap_ep>;
+ mclk-fs = <256>;
+ clocks = <&bpmp TEGRA194_CLK_AUD_MCLK>;
+ };
+ };
+ };
};
pwm@c340000 {
@@ -263,6 +813,47 @@
};
};
+ sound {
+ compatible = "nvidia,tegra186-audio-graph-card";
+ status = "okay";
+
+ dais = /* ADMAIF (FE) Ports */
+ <&admaif0_port>, <&admaif1_port>, <&admaif2_port>, <&admaif3_port>,
+ <&admaif4_port>, <&admaif5_port>, <&admaif6_port>, <&admaif7_port>,
+ <&admaif8_port>, <&admaif9_port>, <&admaif10_port>, <&admaif11_port>,
+ <&admaif12_port>, <&admaif13_port>, <&admaif14_port>, <&admaif15_port>,
+ <&admaif16_port>, <&admaif17_port>, <&admaif18_port>, <&admaif19_port>,
+ /* XBAR Ports */
+ <&xbar_i2s1_port>, <&xbar_i2s2_port>, <&xbar_i2s4_port>,
+ <&xbar_i2s6_port>, <&xbar_dmic3_port>,
+ /* BE I/O Ports */
+ <&i2s1_port>, <&i2s2_port>, <&i2s4_port>, <&i2s6_port>,
+ <&dmic3_port>;
+
+ label = "jetson-xavier-ape";
+
+ widgets =
+ "Microphone", "CVB-RT MIC Jack",
+ "Microphone", "CVB-RT MIC",
+ "Headphone", "CVB-RT HP Jack",
+ "Speaker", "CVB-RT SPK";
+
+ routing =
+ /* I2S1 <-> RT5658 */
+ "CVB-RT AIF1 Playback", "I2S1 DAP-Playback",
+ "I2S1 DAP-Capture", "CVB-RT AIF1 Capture",
+ /* RT5658 Codec controls */
+ "CVB-RT HP Jack", "CVB-RT HPO L Playback",
+ "CVB-RT HP Jack", "CVB-RT HPO R Playback",
+ "CVB-RT IN1P", "CVB-RT MIC Jack",
+ "CVB-RT IN2P", "CVB-RT MIC Jack",
+ "CVB-RT SPK", "CVB-RT SPO Playback",
+ "CVB-RT DMIC L1", "CVB-RT MIC",
+ "CVB-RT DMIC L2", "CVB-RT MIC",
+ "CVB-RT DMIC R1", "CVB-RT MIC",
+ "CVB-RT DMIC R2", "CVB-RT MIC";
+ };
+
thermal-zones {
cpu {
polling-delay = <0>;
diff --git a/arch/arm64/boot/dts/nvidia/tegra194-p3509-0000+p3668-0000.dts b/arch/arm64/boot/dts/nvidia/tegra194-p3509-0000+p3668-0000.dts
index 7f97b34216a0..1c3874b677c0 100644
--- a/arch/arm64/boot/dts/nvidia/tegra194-p3509-0000+p3668-0000.dts
+++ b/arch/arm64/boot/dts/nvidia/tegra194-p3509-0000+p3668-0000.dts
@@ -1,345 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
/dts-v1/;
-#include <dt-bindings/input/linux-event-codes.h>
-#include <dt-bindings/input/gpio-keys.h>
-
#include "tegra194-p3668-0000.dtsi"
+#include "tegra194-p3509-0000.dtsi"
/ {
- model = "NVIDIA Jetson Xavier NX Developer Kit";
+ model = "NVIDIA Jetson Xavier NX Developer Kit (SD-card)";
compatible = "nvidia,p3509-0000+p3668-0000", "nvidia,tegra194";
-
- bus@0 {
- aconnect@2900000 {
- status = "okay";
-
- dma-controller@2930000 {
- status = "okay";
- };
-
- interrupt-controller@2a40000 {
- status = "okay";
- };
- };
-
- ddc: i2c@3190000 {
- status = "okay";
- };
-
- i2c@3160000 {
- eeprom@57 {
- compatible = "atmel,24c02";
- reg = <0x57>;
-
- label = "system";
- vcc-supply = <&vdd_1v8>;
- address-width = <8>;
- pagesize = <8>;
- size = <256>;
- read-only;
- };
- };
-
- hda@3510000 {
- nvidia,model = "jetson-xavier-nx-hda";
- status = "okay";
- };
-
- padctl@3520000 {
- status = "okay";
-
- pads {
- usb2 {
- lanes {
- usb2-1 {
- status = "okay";
- };
-
- usb2-2 {
- status = "okay";
- };
- };
- };
-
- usb3 {
- lanes {
- usb3-2 {
- status = "okay";
- };
- };
- };
- };
-
- ports {
- usb2-1 {
- mode = "host";
- status = "okay";
- };
-
- usb2-2 {
- mode = "host";
- vbus-supply = <&vdd_5v0_sys>;
- status = "okay";
- };
-
- usb3-2 {
- nvidia,usb2-companion = <1>;
- vbus-supply = <&vdd_5v0_sys>;
- status = "okay";
- };
- };
- };
-
- usb@3610000 {
- status = "okay";
-
- phys = <&{/bus@0/padctl@3520000/pads/usb2/lanes/usb2-1}>,
- <&{/bus@0/padctl@3520000/pads/usb2/lanes/usb2-2}>,
- <&{/bus@0/padctl@3520000/pads/usb3/lanes/usb3-2}>;
- phy-names = "usb2-1", "usb2-2", "usb3-2";
- };
-
- pwm@32d0000 {
- status = "okay";
- };
-
- host1x@13e00000 {
- display-hub@15200000 {
- status = "okay";
- };
-
- dpaux@155c0000 {
- status = "okay";
- };
-
- dpaux@155d0000 {
- status = "okay";
- };
-
- /* DP0 */
- sor@15b00000 {
- status = "okay";
-
- avdd-io-hdmi-dp-supply = <&vdd_1v0>;
- vdd-hdmi-dp-pll-supply = <&vdd_1v8hs>;
-
- nvidia,dpaux = <&dpaux0>;
- };
-
- /* HDMI */
- sor@15b40000 {
- status = "okay";
-
- avdd-io-hdmi-dp-supply = <&vdd_1v0>;
- vdd-hdmi-dp-pll-supply = <&vdd_1v8hs>;
- hdmi-supply = <&vdd_hdmi>;
-
- nvidia,ddc-i2c-bus = <&ddc>;
- nvidia,hpd-gpio = <&gpio TEGRA194_MAIN_GPIO(M, 1)
- GPIO_ACTIVE_LOW>;
- };
- };
- };
-
- pcie@14160000 {
- status = "okay";
-
- vddio-pex-ctl-supply = <&vdd_1v8ao>;
-
- phys = <&p2u_hsio_11>;
- phy-names = "p2u-0";
- };
-
- pcie@141a0000 {
- status = "okay";
-
- vddio-pex-ctl-supply = <&vdd_1v8ao>;
-
- phys = <&p2u_nvhs_0>, <&p2u_nvhs_1>, <&p2u_nvhs_2>,
- <&p2u_nvhs_3>, <&p2u_nvhs_4>, <&p2u_nvhs_5>,
- <&p2u_nvhs_6>, <&p2u_nvhs_7>;
-
- phy-names = "p2u-0", "p2u-1", "p2u-2", "p2u-3", "p2u-4",
- "p2u-5", "p2u-6", "p2u-7";
- };
-
- pcie_ep@141a0000 {
- status = "disabled";
-
- vddio-pex-ctl-supply = <&vdd_1v8ao>;
-
- reset-gpios = <&gpio TEGRA194_MAIN_GPIO(GG, 1) GPIO_ACTIVE_LOW>;
-
- nvidia,refclk-select-gpios = <&gpio_aon TEGRA194_AON_GPIO(AA, 5)
- GPIO_ACTIVE_HIGH>;
-
- phys = <&p2u_nvhs_0>, <&p2u_nvhs_1>, <&p2u_nvhs_2>,
- <&p2u_nvhs_3>, <&p2u_nvhs_4>, <&p2u_nvhs_5>,
- <&p2u_nvhs_6>, <&p2u_nvhs_7>;
-
- phy-names = "p2u-0", "p2u-1", "p2u-2", "p2u-3", "p2u-4",
- "p2u-5", "p2u-6", "p2u-7";
- };
-
- fan: fan {
- compatible = "pwm-fan";
- pwms = <&pwm6 0 45334>;
-
- cooling-levels = <0 64 128 255>;
- #cooling-cells = <2>;
- };
-
- gpio-keys {
- compatible = "gpio-keys";
-
- force-recovery {
- label = "Force Recovery";
- gpios = <&gpio TEGRA194_MAIN_GPIO(G, 0)
- GPIO_ACTIVE_LOW>;
- linux,input-type = <EV_KEY>;
- linux,code = <KEY_SLEEP>;
- debounce-interval = <10>;
- };
-
- power {
- label = "Power";
- gpios = <&gpio_aon TEGRA194_AON_GPIO(EE, 4)
- GPIO_ACTIVE_LOW>;
- linux,input-type = <EV_KEY>;
- linux,code = <KEY_POWER>;
- debounce-interval = <10>;
- wakeup-event-action = <EV_ACT_ASSERTED>;
- wakeup-source;
- };
- };
-
- vdd_5v0_sys: regulator@100 {
- compatible = "regulator-fixed";
- regulator-name = "VDD_5V_SYS";
- regulator-min-microvolt = <5000000>;
- regulator-max-microvolt = <5000000>;
- regulator-always-on;
- regulator-boot-on;
- };
-
- vdd_3v3_sys: regulator@101 {
- compatible = "regulator-fixed";
- regulator-name = "VDD_3V3_SYS";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-always-on;
- regulator-boot-on;
- };
-
- vdd_3v3_ao: regulator@102 {
- compatible = "regulator-fixed";
- regulator-name = "VDD_3V3_AO";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-always-on;
- regulator-boot-on;
- };
-
- vdd_1v8: regulator@103 {
- compatible = "regulator-fixed";
- regulator-name = "VDD_1V8";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-always-on;
- regulator-boot-on;
- };
-
- vdd_hdmi: regulator@104 {
- compatible = "regulator-fixed";
- regulator-name = "VDD_5V0_HDMI_CON";
- regulator-min-microvolt = <5000000>;
- regulator-max-microvolt = <5000000>;
- regulator-always-on;
- regulator-boot-on;
- };
-
- thermal-zones {
- cpu {
- polling-delay = <0>;
- polling-delay-passive = <500>;
- status = "okay";
-
- trips {
- cpu_trip_critical: critical {
- temperature = <96500>;
- hysteresis = <0>;
- type = "critical";
- };
-
- cpu_trip_hot: hot {
- temperature = <70000>;
- hysteresis = <2000>;
- type = "hot";
- };
-
- cpu_trip_active: active {
- temperature = <50000>;
- hysteresis = <2000>;
- type = "active";
- };
-
- cpu_trip_passive: passive {
- temperature = <30000>;
- hysteresis = <2000>;
- type = "passive";
- };
- };
-
- cooling-maps {
- cpu-critical {
- cooling-device = <&fan 3 3>;
- trip = <&cpu_trip_critical>;
- };
-
- cpu-hot {
- cooling-device = <&fan 2 2>;
- trip = <&cpu_trip_hot>;
- };
-
- cpu-active {
- cooling-device = <&fan 1 1>;
- trip = <&cpu_trip_active>;
- };
-
- cpu-passive {
- cooling-device = <&fan 0 0>;
- trip = <&cpu_trip_passive>;
- };
- };
- };
-
- gpu {
- polling-delay = <0>;
- polling-delay-passive = <500>;
- status = "okay";
-
- trips {
- gpu_alert0: critical {
- temperature = <99000>;
- hysteresis = <0>;
- type = "critical";
- };
- };
- };
-
- aux {
- polling-delay = <0>;
- polling-delay-passive = <500>;
- status = "okay";
-
- trips {
- aux_alert0: critical {
- temperature = <90000>;
- hysteresis = <0>;
- type = "critical";
- };
- };
- };
- };
};
diff --git a/arch/arm64/boot/dts/nvidia/tegra194-p3509-0000+p3668-0001.dts b/arch/arm64/boot/dts/nvidia/tegra194-p3509-0000+p3668-0001.dts
new file mode 100644
index 000000000000..238fd98e8e45
--- /dev/null
+++ b/arch/arm64/boot/dts/nvidia/tegra194-p3509-0000+p3668-0001.dts
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+
+#include "tegra194-p3668-0001.dtsi"
+#include "tegra194-p3509-0000.dtsi"
+
+/ {
+ model = "NVIDIA Jetson Xavier NX Developer Kit (eMMC)";
+ compatible = "nvidia,p3509-0000+p3668-0001", "nvidia,tegra194";
+};
diff --git a/arch/arm64/boot/dts/nvidia/tegra194-p3509-0000.dtsi b/arch/arm64/boot/dts/nvidia/tegra194-p3509-0000.dtsi
new file mode 100644
index 000000000000..d1d77220154f
--- /dev/null
+++ b/arch/arm64/boot/dts/nvidia/tegra194-p3509-0000.dtsi
@@ -0,0 +1,351 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <dt-bindings/input/linux-event-codes.h>
+#include <dt-bindings/input/gpio-keys.h>
+
+/ {
+ bus@0 {
+ aconnect@2900000 {
+ status = "okay";
+
+ dma-controller@2930000 {
+ status = "okay";
+ };
+
+ interrupt-controller@2a40000 {
+ status = "okay";
+ };
+ };
+
+ ddc: i2c@3190000 {
+ status = "okay";
+ };
+
+ i2c@3160000 {
+ eeprom@57 {
+ compatible = "atmel,24c02";
+ reg = <0x57>;
+
+ label = "system";
+ vcc-supply = <&vdd_1v8>;
+ address-width = <8>;
+ pagesize = <8>;
+ size = <256>;
+ read-only;
+ };
+ };
+
+ hda@3510000 {
+ nvidia,model = "jetson-xavier-nx-hda";
+ status = "okay";
+ };
+
+ padctl@3520000 {
+ status = "okay";
+
+ pads {
+ usb2 {
+ lanes {
+ usb2-1 {
+ status = "okay";
+ };
+
+ usb2-2 {
+ status = "okay";
+ };
+ };
+ };
+
+ usb3 {
+ lanes {
+ usb3-2 {
+ status = "okay";
+ };
+ };
+ };
+ };
+
+ ports {
+ usb2-1 {
+ mode = "host";
+ status = "okay";
+ };
+
+ usb2-2 {
+ mode = "host";
+ vbus-supply = <&vdd_5v0_sys>;
+ status = "okay";
+ };
+
+ usb3-2 {
+ nvidia,usb2-companion = <1>;
+ vbus-supply = <&vdd_5v0_sys>;
+ status = "okay";
+ };
+ };
+ };
+
+ usb@3610000 {
+ status = "okay";
+
+ phys = <&{/bus@0/padctl@3520000/pads/usb2/lanes/usb2-1}>,
+ <&{/bus@0/padctl@3520000/pads/usb2/lanes/usb2-2}>,
+ <&{/bus@0/padctl@3520000/pads/usb3/lanes/usb3-2}>;
+ phy-names = "usb2-1", "usb2-2", "usb3-2";
+ };
+
+ spi@3270000 {
+ status = "okay";
+
+ flash@0 {
+ compatible = "spi-nor";
+ reg = <0>;
+ spi-max-frequency = <102000000>;
+ spi-tx-bus-width = <4>;
+ spi-rx-bus-width = <4>;
+ };
+ };
+
+ pwm@32d0000 {
+ status = "okay";
+ };
+
+ host1x@13e00000 {
+ display-hub@15200000 {
+ status = "okay";
+ };
+
+ dpaux@155c0000 {
+ status = "okay";
+ };
+
+ dpaux@155d0000 {
+ status = "okay";
+ };
+
+ /* DP0 */
+ sor@15b00000 {
+ status = "okay";
+
+ avdd-io-hdmi-dp-supply = <&vdd_1v0>;
+ vdd-hdmi-dp-pll-supply = <&vdd_1v8hs>;
+
+ nvidia,dpaux = <&dpaux0>;
+ };
+
+ /* HDMI */
+ sor@15b40000 {
+ status = "okay";
+
+ avdd-io-hdmi-dp-supply = <&vdd_1v0>;
+ vdd-hdmi-dp-pll-supply = <&vdd_1v8hs>;
+ hdmi-supply = <&vdd_hdmi>;
+
+ nvidia,ddc-i2c-bus = <&ddc>;
+ nvidia,hpd-gpio = <&gpio TEGRA194_MAIN_GPIO(M, 1)
+ GPIO_ACTIVE_LOW>;
+ };
+ };
+ };
+
+ pcie@14160000 {
+ status = "okay";
+
+ vddio-pex-ctl-supply = <&vdd_1v8ao>;
+
+ phys = <&p2u_hsio_11>;
+ phy-names = "p2u-0";
+ };
+
+ pcie@141a0000 {
+ status = "okay";
+
+ vddio-pex-ctl-supply = <&vdd_1v8ao>;
+
+ phys = <&p2u_nvhs_0>, <&p2u_nvhs_1>, <&p2u_nvhs_2>,
+ <&p2u_nvhs_3>, <&p2u_nvhs_4>, <&p2u_nvhs_5>,
+ <&p2u_nvhs_6>, <&p2u_nvhs_7>;
+
+ phy-names = "p2u-0", "p2u-1", "p2u-2", "p2u-3", "p2u-4",
+ "p2u-5", "p2u-6", "p2u-7";
+ };
+
+ pcie_ep@141a0000 {
+ status = "disabled";
+
+ vddio-pex-ctl-supply = <&vdd_1v8ao>;
+
+ reset-gpios = <&gpio TEGRA194_MAIN_GPIO(GG, 1) GPIO_ACTIVE_LOW>;
+
+ nvidia,refclk-select-gpios = <&gpio_aon TEGRA194_AON_GPIO(AA, 5)
+ GPIO_ACTIVE_HIGH>;
+
+ phys = <&p2u_nvhs_0>, <&p2u_nvhs_1>, <&p2u_nvhs_2>,
+ <&p2u_nvhs_3>, <&p2u_nvhs_4>, <&p2u_nvhs_5>,
+ <&p2u_nvhs_6>, <&p2u_nvhs_7>;
+
+ phy-names = "p2u-0", "p2u-1", "p2u-2", "p2u-3", "p2u-4",
+ "p2u-5", "p2u-6", "p2u-7";
+ };
+
+ fan: fan {
+ compatible = "pwm-fan";
+ pwms = <&pwm6 0 45334>;
+
+ cooling-levels = <0 64 128 255>;
+ #cooling-cells = <2>;
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ force-recovery {
+ label = "Force Recovery";
+ gpios = <&gpio TEGRA194_MAIN_GPIO(G, 0)
+ GPIO_ACTIVE_LOW>;
+ linux,input-type = <EV_KEY>;
+ linux,code = <KEY_SLEEP>;
+ debounce-interval = <10>;
+ };
+
+ power {
+ label = "Power";
+ gpios = <&gpio_aon TEGRA194_AON_GPIO(EE, 4)
+ GPIO_ACTIVE_LOW>;
+ linux,input-type = <EV_KEY>;
+ linux,code = <KEY_POWER>;
+ debounce-interval = <10>;
+ wakeup-event-action = <EV_ACT_ASSERTED>;
+ wakeup-source;
+ };
+ };
+
+ vdd_5v0_sys: regulator@100 {
+ compatible = "regulator-fixed";
+ regulator-name = "VDD_5V_SYS";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vdd_3v3_sys: regulator@101 {
+ compatible = "regulator-fixed";
+ regulator-name = "VDD_3V3_SYS";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vdd_3v3_ao: regulator@102 {
+ compatible = "regulator-fixed";
+ regulator-name = "VDD_3V3_AO";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vdd_1v8: regulator@103 {
+ compatible = "regulator-fixed";
+ regulator-name = "VDD_1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vdd_hdmi: regulator@104 {
+ compatible = "regulator-fixed";
+ regulator-name = "VDD_5V0_HDMI_CON";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ thermal-zones {
+ cpu {
+ polling-delay = <0>;
+ polling-delay-passive = <500>;
+ status = "okay";
+
+ trips {
+ cpu_trip_critical: critical {
+ temperature = <96500>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+
+ cpu_trip_hot: hot {
+ temperature = <70000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+
+ cpu_trip_active: active {
+ temperature = <50000>;
+ hysteresis = <2000>;
+ type = "active";
+ };
+
+ cpu_trip_passive: passive {
+ temperature = <30000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+ };
+
+ cooling-maps {
+ cpu-critical {
+ cooling-device = <&fan 3 3>;
+ trip = <&cpu_trip_critical>;
+ };
+
+ cpu-hot {
+ cooling-device = <&fan 2 2>;
+ trip = <&cpu_trip_hot>;
+ };
+
+ cpu-active {
+ cooling-device = <&fan 1 1>;
+ trip = <&cpu_trip_active>;
+ };
+
+ cpu-passive {
+ cooling-device = <&fan 0 0>;
+ trip = <&cpu_trip_passive>;
+ };
+ };
+ };
+
+ gpu {
+ polling-delay = <0>;
+ polling-delay-passive = <500>;
+ status = "okay";
+
+ trips {
+ gpu_alert0: critical {
+ temperature = <99000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+
+ aux {
+ polling-delay = <0>;
+ polling-delay-passive = <500>;
+ status = "okay";
+
+ trips {
+ aux_alert0: critical {
+ temperature = <90000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/nvidia/tegra194-p3668-0000.dtsi b/arch/arm64/boot/dts/nvidia/tegra194-p3668-0000.dtsi
index 0dc8304a2edd..7da3d48cb410 100644
--- a/arch/arm64/boot/dts/nvidia/tegra194-p3668-0000.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra194-p3668-0000.dtsi
@@ -1,79 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
-#include "tegra194.dtsi"
-
-#include <dt-bindings/mfd/max77620.h>
+#include "tegra194-p3668.dtsi"
/ {
- model = "NVIDIA Jetson Xavier NX";
+ model = "NVIDIA Jetson Xavier NX (SD-card)";
compatible = "nvidia,p3668-0000", "nvidia,tegra194";
- aliases {
- ethernet0 = "/bus@0/ethernet@2490000";
- i2c0 = "/bpmp/i2c";
- i2c1 = "/bus@0/i2c@3160000";
- i2c2 = "/bus@0/i2c@c240000";
- i2c3 = "/bus@0/i2c@3180000";
- i2c4 = "/bus@0/i2c@3190000";
- i2c5 = "/bus@0/i2c@31c0000";
- i2c6 = "/bus@0/i2c@c250000";
- i2c7 = "/bus@0/i2c@31e0000";
- mmc0 = "/bus@0/mmc@3460000";
- rtc0 = "/bpmp/i2c/pmic@3c";
- rtc1 = "/bus@0/rtc@c2a0000";
- serial0 = &tcu;
- };
-
- chosen {
- bootargs = "console=ttyS0,115200n8";
- stdout-path = "serial0:115200n8";
- };
-
bus@0 {
- ethernet@2490000 {
- status = "okay";
-
- phy-reset-gpios = <&gpio TEGRA194_MAIN_GPIO(R, 1) GPIO_ACTIVE_LOW>;
- phy-handle = <&phy>;
- phy-mode = "rgmii-id";
-
- mdio {
- #address-cells = <1>;
- #size-cells = <0>;
-
- phy: phy@0 {
- compatible = "ethernet-phy-ieee802.3-c22";
- reg = <0x0>;
- interrupt-parent = <&gpio>;
- interrupts = <TEGRA194_MAIN_GPIO(G, 4) IRQ_TYPE_LEVEL_LOW>;
- #phy-cells = <0>;
- };
- };
- };
-
- memory-controller@2c00000 {
- status = "okay";
- };
-
- serial@3100000 {
- status = "okay";
- };
-
- i2c@3160000 {
- status = "okay";
-
- eeprom@50 {
- compatible = "atmel,24c02";
- reg = <0x50>;
-
- label = "module";
- vcc-supply = <&vdd_1v8ls>;
- address-width = <8>;
- pagesize = <8>;
- size = <256>;
- read-only;
- };
- };
-
/* SDMMC1 (SD/MMC) */
mmc@3400000 {
status = "okay";
@@ -82,216 +14,6 @@
disable-wp;
vmmc-supply = <&vdd_3v3_sd>;
};
-
- padctl@3520000 {
- avdd-usb-supply = <&vdd_usb_3v3>;
- vclamp-usb-supply = <&vdd_1v8ao>;
-
- ports {
- usb2-1 {
- vbus-supply = <&vdd_5v0_sys>;
- };
-
- usb2-3 {
- vbus-supply = <&vdd_5v0_sys>;
- };
-
- usb3-0 {
- vbus-supply = <&vdd_5v0_sys>;
- };
-
- usb3-3 {
- vbus-supply = <&vdd_5v0_sys>;
- };
- };
- };
-
- rtc@c2a0000 {
- status = "okay";
- };
-
- pmc@c360000 {
- nvidia,invert-interrupt;
- };
- };
-
- bpmp {
- i2c {
- status = "okay";
-
- pmic: pmic@3c {
- compatible = "maxim,max20024";
- reg = <0x3c>;
-
- interrupt-parent = <&pmc>;
- interrupts = <24 IRQ_TYPE_LEVEL_LOW>;
- #interrupt-cells = <2>;
- interrupt-controller;
-
- #gpio-cells = <2>;
- gpio-controller;
-
- pinctrl-names = "default";
- pinctrl-0 = <&max20024_default>;
-
- max20024_default: pinmux {
- gpio0 {
- pins = "gpio0";
- function = "gpio";
- };
-
- gpio1 {
- pins = "gpio1";
- function = "fps-out";
- maxim,active-fps-source = <MAX77620_FPS_SRC_DEF>;
- };
-
- gpio2 {
- pins = "gpio2";
- function = "fps-out";
- maxim,active-fps-source = <MAX77620_FPS_SRC_DEF>;
- };
-
- gpio3 {
- pins = "gpio3";
- function = "fps-out";
- maxim,active-fps-source = <MAX77620_FPS_SRC_DEF>;
- };
-
- gpio4 {
- pins = "gpio4";
- function = "32k-out1";
- drive-push-pull = <1>;
- };
-
- gpio6 {
- pins = "gpio6";
- function = "gpio";
- drive-push-pull = <1>;
- };
-
- gpio7 {
- pins = "gpio7";
- function = "gpio";
- drive-push-pull = <0>;
- };
- };
-
- fps {
- fps0 {
- maxim,fps-event-source = <MAX77620_FPS_EVENT_SRC_EN0>;
- maxim,shutdown-fps-time-period-us = <640>;
- };
-
- fps1 {
- maxim,fps-event-source = <MAX77620_FPS_EVENT_SRC_EN1>;
- maxim,shutdown-fps-time-period-us = <640>;
- maxim,device-state-on-disabled-event = <MAX77620_FPS_INACTIVE_STATE_SLEEP>;
- };
-
- fps2 {
- maxim,fps-event-source = <MAX77620_FPS_EVENT_SRC_EN0>;
- maxim,shutdown-fps-time-period-us = <640>;
- };
- };
-
- regulators {
- in-sd0-supply = <&vdd_5v0_sys>;
- in-sd1-supply = <&vdd_5v0_sys>;
- in-sd2-supply = <&vdd_5v0_sys>;
- in-sd3-supply = <&vdd_5v0_sys>;
- in-sd4-supply = <&vdd_5v0_sys>;
-
- in-ldo0-1-supply = <&vdd_5v0_sys>;
- in-ldo2-supply = <&vdd_5v0_sys>;
- in-ldo3-5-supply = <&vdd_5v0_sys>;
- in-ldo4-6-supply = <&vdd_5v0_sys>;
- in-ldo7-8-supply = <&vdd_1v8ls>;
-
- vdd_1v0: sd0 {
- regulator-name = "VDDIO_SYS_1V0";
- regulator-min-microvolt = <1000000>;
- regulator-max-microvolt = <1000000>;
- regulator-always-on;
- regulator-boot-on;
- };
-
- vdd_1v8hs: sd1 {
- regulator-name = "VDDIO_SYS_1V8HS";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-always-on;
- regulator-boot-on;
- };
-
- vdd_1v8ls: sd2 {
- regulator-name = "VDDIO_SYS_1V8LS";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-always-on;
- regulator-boot-on;
- };
-
- vdd_1v8ao: sd3 {
- regulator-name = "VDDIO_AO_1V8";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-always-on;
- regulator-boot-on;
- };
-
- sd4 {
- regulator-name = "VDD_DDR_1V1";
- regulator-min-microvolt = <1100000>;
- regulator-max-microvolt = <1100000>;
- regulator-always-on;
- regulator-boot-on;
- };
-
- ldo0 {
- regulator-name = "VDD_RTC";
- regulator-min-microvolt = <800000>;
- regulator-max-microvolt = <800000>;
- regulator-always-on;
- regulator-boot-on;
- };
-
- ldo2 {
- regulator-name = "VDDIO_AO_3V3";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-always-on;
- regulator-boot-on;
- };
-
- ldo3 {
- regulator-name = "VDD_EMMC_3V3";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- };
-
- vdd_usb_3v3: ldo5 {
- regulator-name = "VDD_USB_3V3";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-always-on;
- regulator-boot-on;
- };
-
- ldo6 {
- regulator-name = "VDD_SDIO_3V3";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- };
-
- ldo7 {
- regulator-name = "AVDD_CSI_1V2";
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <1200000>;
- };
- };
- };
- };
};
vdd_3v3_sd: regulator@0 {
diff --git a/arch/arm64/boot/dts/nvidia/tegra194-p3668-0001.dtsi b/arch/arm64/boot/dts/nvidia/tegra194-p3668-0001.dtsi
new file mode 100644
index 000000000000..b7808648cfe4
--- /dev/null
+++ b/arch/arm64/boot/dts/nvidia/tegra194-p3668-0001.dtsi
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "tegra194-p3668.dtsi"
+
+/ {
+ model = "NVIDIA Jetson Xavier NX (eMMC)";
+ compatible = "nvidia,p3668-0001", "nvidia,tegra194";
+
+ bus@0 {
+ /* SDMMC4 (eMMC) */
+ mmc@3460000 {
+ status = "okay";
+ bus-width = <8>;
+ non-removable;
+
+ vqmmc-supply = <&vdd_1v8ls>;
+ vmmc-supply = <&vdd_emmc_3v3>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/nvidia/tegra194-p3668.dtsi b/arch/arm64/boot/dts/nvidia/tegra194-p3668.dtsi
new file mode 100644
index 000000000000..4f12721c332b
--- /dev/null
+++ b/arch/arm64/boot/dts/nvidia/tegra194-p3668.dtsi
@@ -0,0 +1,284 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "tegra194.dtsi"
+
+#include <dt-bindings/mfd/max77620.h>
+
+/ {
+ aliases {
+ ethernet0 = "/bus@0/ethernet@2490000";
+ i2c0 = "/bpmp/i2c";
+ i2c1 = "/bus@0/i2c@3160000";
+ i2c2 = "/bus@0/i2c@c240000";
+ i2c3 = "/bus@0/i2c@3180000";
+ i2c4 = "/bus@0/i2c@3190000";
+ i2c5 = "/bus@0/i2c@31c0000";
+ i2c6 = "/bus@0/i2c@c250000";
+ i2c7 = "/bus@0/i2c@31e0000";
+ mmc0 = "/bus@0/mmc@3460000";
+ rtc0 = "/bpmp/i2c/pmic@3c";
+ rtc1 = "/bus@0/rtc@c2a0000";
+ serial0 = &tcu;
+ };
+
+ chosen {
+ bootargs = "console=ttyS0,115200n8";
+ stdout-path = "serial0:115200n8";
+ };
+
+ bus@0 {
+ ethernet@2490000 {
+ status = "okay";
+
+ phy-reset-gpios = <&gpio TEGRA194_MAIN_GPIO(R, 1) GPIO_ACTIVE_LOW>;
+ phy-handle = <&phy>;
+ phy-mode = "rgmii-id";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ phy: phy@0 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <0x0>;
+ interrupt-parent = <&gpio>;
+ interrupts = <TEGRA194_MAIN_GPIO(G, 4) IRQ_TYPE_LEVEL_LOW>;
+ #phy-cells = <0>;
+ };
+ };
+ };
+
+ memory-controller@2c00000 {
+ status = "okay";
+ };
+
+ serial@3100000 {
+ status = "okay";
+ };
+
+ i2c@3160000 {
+ status = "okay";
+
+ eeprom@50 {
+ compatible = "atmel,24c02";
+ reg = <0x50>;
+
+ label = "module";
+ vcc-supply = <&vdd_1v8ls>;
+ address-width = <8>;
+ pagesize = <8>;
+ size = <256>;
+ read-only;
+ };
+ };
+
+ padctl@3520000 {
+ avdd-usb-supply = <&vdd_usb_3v3>;
+ vclamp-usb-supply = <&vdd_1v8ao>;
+
+ ports {
+ usb2-1 {
+ vbus-supply = <&vdd_5v0_sys>;
+ };
+
+ usb2-3 {
+ vbus-supply = <&vdd_5v0_sys>;
+ };
+
+ usb3-0 {
+ vbus-supply = <&vdd_5v0_sys>;
+ };
+
+ usb3-3 {
+ vbus-supply = <&vdd_5v0_sys>;
+ };
+ };
+ };
+
+ rtc@c2a0000 {
+ status = "okay";
+ };
+
+ pmc@c360000 {
+ nvidia,invert-interrupt;
+ };
+ };
+
+ bpmp {
+ i2c {
+ status = "okay";
+
+ pmic: pmic@3c {
+ compatible = "maxim,max20024";
+ reg = <0x3c>;
+
+ interrupt-parent = <&pmc>;
+ interrupts = <24 IRQ_TYPE_LEVEL_LOW>;
+ #interrupt-cells = <2>;
+ interrupt-controller;
+
+ #gpio-cells = <2>;
+ gpio-controller;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&max20024_default>;
+
+ max20024_default: pinmux {
+ gpio0 {
+ pins = "gpio0";
+ function = "gpio";
+ };
+
+ gpio1 {
+ pins = "gpio1";
+ function = "fps-out";
+ maxim,active-fps-source = <MAX77620_FPS_SRC_DEF>;
+ };
+
+ gpio2 {
+ pins = "gpio2";
+ function = "fps-out";
+ maxim,active-fps-source = <MAX77620_FPS_SRC_DEF>;
+ };
+
+ gpio3 {
+ pins = "gpio3";
+ function = "fps-out";
+ maxim,active-fps-source = <MAX77620_FPS_SRC_DEF>;
+ };
+
+ gpio4 {
+ pins = "gpio4";
+ function = "32k-out1";
+ drive-push-pull = <1>;
+ };
+
+ gpio6 {
+ pins = "gpio6";
+ function = "gpio";
+ drive-push-pull = <1>;
+ };
+
+ gpio7 {
+ pins = "gpio7";
+ function = "gpio";
+ drive-push-pull = <0>;
+ };
+ };
+
+ fps {
+ fps0 {
+ maxim,fps-event-source = <MAX77620_FPS_EVENT_SRC_EN0>;
+ maxim,shutdown-fps-time-period-us = <640>;
+ };
+
+ fps1 {
+ maxim,fps-event-source = <MAX77620_FPS_EVENT_SRC_EN1>;
+ maxim,shutdown-fps-time-period-us = <640>;
+ maxim,device-state-on-disabled-event = <MAX77620_FPS_INACTIVE_STATE_SLEEP>;
+ };
+
+ fps2 {
+ maxim,fps-event-source = <MAX77620_FPS_EVENT_SRC_EN0>;
+ maxim,shutdown-fps-time-period-us = <640>;
+ };
+ };
+
+ regulators {
+ in-sd0-supply = <&vdd_5v0_sys>;
+ in-sd1-supply = <&vdd_5v0_sys>;
+ in-sd2-supply = <&vdd_5v0_sys>;
+ in-sd3-supply = <&vdd_5v0_sys>;
+ in-sd4-supply = <&vdd_5v0_sys>;
+
+ in-ldo0-1-supply = <&vdd_5v0_sys>;
+ in-ldo2-supply = <&vdd_5v0_sys>;
+ in-ldo3-5-supply = <&vdd_5v0_sys>;
+ in-ldo4-6-supply = <&vdd_5v0_sys>;
+ in-ldo7-8-supply = <&vdd_1v8ls>;
+
+ vdd_1v0: sd0 {
+ regulator-name = "VDDIO_SYS_1V0";
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vdd_1v8hs: sd1 {
+ regulator-name = "VDDIO_SYS_1V8HS";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vdd_1v8ls: sd2 {
+ regulator-name = "VDDIO_SYS_1V8LS";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vdd_1v8ao: sd3 {
+ regulator-name = "VDDIO_AO_1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ sd4 {
+ regulator-name = "VDD_DDR_1V1";
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ ldo0 {
+ regulator-name = "VDD_RTC";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <800000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ ldo2 {
+ regulator-name = "VDDIO_AO_3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vdd_emmc_3v3: ldo3 {
+ regulator-name = "VDD_EMMC_3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ vdd_usb_3v3: ldo5 {
+ regulator-name = "VDD_USB_3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ ldo6 {
+ regulator-name = "VDD_SDIO_3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ ldo7 {
+ regulator-name = "AVDD_CSI_1V2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ };
+ };
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/nvidia/tegra194.dtsi b/arch/arm64/boot/dts/nvidia/tegra194.dtsi
index 25f36d6118f8..9449156fae39 100644
--- a/arch/arm64/boot/dts/nvidia/tegra194.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra194.dtsi
@@ -609,6 +609,34 @@
status = "disabled";
};
+ spi@3270000 {
+ compatible = "nvidia,tegra194-qspi";
+ reg = <0x3270000 0x1000>;
+ interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&bpmp TEGRA194_CLK_QSPI0>,
+ <&bpmp TEGRA194_CLK_QSPI0_PM>;
+ clock-names = "qspi", "qspi_out";
+ resets = <&bpmp TEGRA194_RESET_QSPI0>;
+ reset-names = "qspi";
+ status = "disabled";
+ };
+
+ spi@3300000 {
+ compatible = "nvidia,tegra194-qspi";
+ reg = <0x3300000 0x1000>;
+ interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&bpmp TEGRA194_CLK_QSPI1>,
+ <&bpmp TEGRA194_CLK_QSPI1_PM>;
+ clock-names = "qspi", "qspi_out";
+ resets = <&bpmp TEGRA194_RESET_QSPI1>;
+ reset-names = "qspi";
+ status = "disabled";
+ };
+
pwm1: pwm@3280000 {
compatible = "nvidia,tegra194-pwm",
"nvidia,tegra186-pwm";
@@ -2323,6 +2351,26 @@
method = "smc";
};
+ sound {
+ status = "disabled";
+
+ clocks = <&bpmp TEGRA194_CLK_PLLA>,
+ <&bpmp TEGRA194_CLK_PLLA_OUT0>;
+ clock-names = "pll_a", "plla_out0";
+ assigned-clocks = <&bpmp TEGRA194_CLK_PLLA>,
+ <&bpmp TEGRA194_CLK_PLLA_OUT0>,
+ <&bpmp TEGRA194_CLK_AUD_MCLK>;
+ assigned-clock-parents = <0>,
+ <&bpmp TEGRA194_CLK_PLLA>,
+ <&bpmp TEGRA194_CLK_PLLA_OUT0>;
+ /*
+ * PLLA supports dynamic ramp. Below initial rate is chosen
+ * for this to work and oscillate between base rates required
+ * for 8x and 11.025x sample rate streams.
+ */
+ assigned-clock-rates = <258000000>;
+ };
+
tcu: tcu {
compatible = "nvidia,tegra194-tcu";
mboxes = <&hsp_top0 TEGRA_HSP_MBOX_TYPE_SM TEGRA_HSP_SM_RX(0)>,
diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2371-2180.dts b/arch/arm64/boot/dts/nvidia/tegra210-p2371-2180.dts
index 69102dcea8b0..497635af7fab 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210-p2371-2180.dts
+++ b/arch/arm64/boot/dts/nvidia/tegra210-p2371-2180.dts
@@ -126,5 +126,304 @@
interrupt-controller@702f9000 {
status = "okay";
};
+
+ ahub@702d0800 {
+ status = "okay";
+
+ admaif@702d0000 {
+ status = "okay";
+ };
+
+ i2s@702d1000 {
+ status = "okay";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ i2s1_cif_ep: endpoint {
+ remote-endpoint = <&xbar_i2s1_ep>;
+ };
+ };
+
+ i2s1_port: port@1 {
+ reg = <1>;
+
+ i2s1_dap_ep: endpoint {
+ dai-format = "i2s";
+ /* Placeholder for external Codec */
+ };
+ };
+ };
+ };
+
+ i2s@702d1100 {
+ status = "okay";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ i2s2_cif_ep: endpoint {
+ remote-endpoint = <&xbar_i2s2_ep>;
+ };
+ };
+
+ i2s2_port: port@1 {
+ reg = <1>;
+
+ i2s2_dap_ep: endpoint {
+ dai-format = "i2s";
+ /* Placeholder for external Codec */
+ };
+ };
+ };
+ };
+
+ i2s@702d1200 {
+ status = "okay";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ i2s3_cif_ep: endpoint {
+ remote-endpoint = <&xbar_i2s3_ep>;
+ };
+ };
+
+ i2s3_port: port@1 {
+ reg = <1>;
+
+ i2s3_dap_ep: endpoint {
+ dai-format = "i2s";
+ /* Placeholder for external Codec */
+ };
+ };
+ };
+ };
+
+ i2s@702d1300 {
+ status = "okay";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ i2s4_cif_ep: endpoint {
+ remote-endpoint = <&xbar_i2s4_ep>;
+ };
+ };
+
+ i2s4_port: port@1 {
+ reg = <1>;
+
+ i2s4_dap_ep: endpoint {
+ dai-format = "i2s";
+ /* Placeholder for external Codec */
+ };
+ };
+ };
+ };
+
+ i2s@702d1400 {
+ status = "okay";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ i2s5_cif_ep: endpoint {
+ remote-endpoint = <&xbar_i2s5_ep>;
+ };
+ };
+
+ i2s5_port: port@1 {
+ reg = <1>;
+
+ i2s5_dap_ep: endpoint {
+ dai-format = "i2s";
+ /* Placeholder for external Codec */
+ };
+ };
+ };
+ };
+
+ dmic@702d4000 {
+ status = "okay";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ dmic1_cif_ep: endpoint {
+ remote-endpoint = <&xbar_dmic1_ep>;
+ };
+ };
+
+ dmic1_port: port@1 {
+ reg = <1>;
+
+ dmic1_dap_ep: endpoint {
+ /* Placeholder for external Codec */
+ };
+ };
+ };
+ };
+
+ dmic@702d4100 {
+ status = "okay";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ dmic2_cif_ep: endpoint {
+ remote-endpoint = <&xbar_dmic2_ep>;
+ };
+ };
+
+ dmic2_port: port@1 {
+ reg = <1>;
+
+ dmic2_dap_ep: endpoint {
+ /* Placeholder for external Codec */
+ };
+ };
+ };
+ };
+
+ dmic@702d4200 {
+ status = "okay";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ dmic3_cif_ep: endpoint {
+ remote-endpoint = <&xbar_dmic3_ep>;
+ };
+ };
+
+ dmic3_port: port@1 {
+ reg = <1>;
+
+ dmic3_dap_ep: endpoint {
+ /* Placeholder for external Codec */
+ };
+ };
+ };
+ };
+
+ ports {
+ xbar_i2s1_port: port@a {
+ reg = <0xa>;
+
+ xbar_i2s1_ep: endpoint {
+ remote-endpoint = <&i2s1_cif_ep>;
+ };
+ };
+
+ xbar_i2s2_port: port@b {
+ reg = <0xb>;
+
+ xbar_i2s2_ep: endpoint {
+ remote-endpoint = <&i2s2_cif_ep>;
+ };
+ };
+
+ xbar_i2s3_port: port@c {
+ reg = <0xc>;
+
+ xbar_i2s3_ep: endpoint {
+ remote-endpoint = <&i2s3_cif_ep>;
+ };
+ };
+
+ xbar_i2s4_port: port@d {
+ reg = <0xd>;
+
+ xbar_i2s4_ep: endpoint {
+ remote-endpoint = <&i2s4_cif_ep>;
+ };
+ };
+
+ xbar_i2s5_port: port@e {
+ reg = <0xe>;
+
+ xbar_i2s5_ep: endpoint {
+ remote-endpoint = <&i2s5_cif_ep>;
+ };
+ };
+
+ xbar_dmic1_port: port@f {
+ reg = <0xf>;
+
+ xbar_dmic1_ep: endpoint {
+ remote-endpoint = <&dmic1_cif_ep>;
+ };
+ };
+
+ xbar_dmic2_port: port@10 {
+ reg = <0x10>;
+
+ xbar_dmic2_ep: endpoint {
+ remote-endpoint = <&dmic2_cif_ep>;
+ };
+ };
+
+ xbar_dmic3_port: port@11 {
+ reg = <0x11>;
+
+ xbar_dmic3_ep: endpoint {
+ remote-endpoint = <&dmic3_cif_ep>;
+ };
+ };
+ };
+ };
+ };
+
+ sound {
+ compatible = "nvidia,tegra210-audio-graph-card";
+ status = "okay";
+
+ dais = /* FE */
+ <&admaif1_port>, <&admaif2_port>, <&admaif3_port>,
+ <&admaif4_port>, <&admaif5_port>, <&admaif6_port>,
+ <&admaif7_port>, <&admaif8_port>, <&admaif9_port>,
+ <&admaif10_port>,
+ /* Router */
+ <&xbar_i2s1_port>, <&xbar_i2s2_port>, <&xbar_i2s3_port>,
+ <&xbar_i2s4_port>, <&xbar_i2s5_port>, <&xbar_dmic1_port>,
+ <&xbar_dmic2_port>, <&xbar_dmic3_port>,
+ /* I/O DAP Ports */
+ <&i2s1_port>, <&i2s2_port>, <&i2s3_port>, <&i2s4_port>,
+ <&i2s5_port>, <&dmic1_port>, <&dmic2_port>, <&dmic3_port>;
+
+ label = "jetson-tx1-ape";
};
};
diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p3450-0000.dts b/arch/arm64/boot/dts/nvidia/tegra210-p3450-0000.dts
index 6a877decffc1..14c128a5e248 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210-p3450-0000.dts
+++ b/arch/arm64/boot/dts/nvidia/tegra210-p3450-0000.dts
@@ -636,6 +636,162 @@
interrupt-controller@702f9000 {
status = "okay";
};
+
+ ahub@702d0800 {
+ status = "okay";
+
+ admaif@702d0000 {
+ status = "okay";
+ };
+
+ i2s@702d1200 {
+ status = "okay";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ i2s3_cif_ep: endpoint {
+ remote-endpoint = <&xbar_i2s3_ep>;
+ };
+ };
+
+ i2s3_port: port@1 {
+ reg = <1>;
+
+ i2s3_dap_ep: endpoint {
+ dai-format = "i2s";
+ /* Placeholder for external Codec */
+ };
+ };
+ };
+ };
+
+ i2s@702d1300 {
+ status = "okay";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ i2s4_cif_ep: endpoint {
+ remote-endpoint = <&xbar_i2s4_ep>;
+ };
+ };
+
+ i2s4_port: port@1 {
+ reg = <1>;
+
+ i2s4_dap_ep: endpoint@0 {
+ dai-format = "i2s";
+ /* Placeholder for external Codec */
+ };
+ };
+ };
+ };
+
+ dmic@702d4000 {
+ status = "okay";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ dmic1_cif_ep: endpoint@0 {
+ remote-endpoint = <&xbar_dmic1_ep>;
+ };
+ };
+
+ dmic1_port: port@1 {
+ reg = <1>;
+
+ dmic1_dap_ep: endpoint@0 {
+ /* Placeholder for external Codec */
+ };
+ };
+ };
+ };
+
+ dmic@702d4100 {
+ status = "okay";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ dmic2_cif_ep: endpoint@0 {
+ remote-endpoint = <&xbar_dmic2_ep>;
+ };
+ };
+
+ dmic2_port: port@1 {
+ reg = <1>;
+
+ dmic2_dap_ep: endpoint@0 {
+ /* Placeholder for external Codec */
+ };
+ };
+ };
+ };
+
+ ports {
+ xbar_i2s3_port: port@c {
+ reg = <0xc>;
+
+ xbar_i2s3_ep: endpoint {
+ remote-endpoint = <&i2s3_cif_ep>;
+ };
+ };
+
+ xbar_i2s4_port: port@d {
+ reg = <0xd>;
+
+ xbar_i2s4_ep: endpoint {
+ remote-endpoint = <&i2s4_cif_ep>;
+ };
+ };
+
+ xbar_dmic1_port: port@f {
+ reg = <0xf>;
+
+ xbar_dmic1_ep: endpoint {
+ remote-endpoint = <&dmic1_cif_ep>;
+ };
+ };
+
+ xbar_dmic2_port: port@10 {
+ reg = <0x10>;
+
+ xbar_dmic2_ep: endpoint {
+ remote-endpoint = <&dmic2_cif_ep>;
+ };
+ };
+ };
+ };
+ };
+
+ spi@70410000 {
+ status = "okay";
+
+ flash@0 {
+ compatible = "spi-nor";
+ reg = <0>;
+ spi-max-frequency = <104000000>;
+ spi-tx-bus-width = <2>;
+ spi-rx-bus-width = <2>;
+ };
};
clk32k_in: clock@0 {
@@ -870,4 +1026,23 @@
vin-supply = <&vdd_5v0_sys>;
};
+
+ sound {
+ compatible = "nvidia,tegra210-audio-graph-card";
+ status = "okay";
+
+ dais = /* FE */
+ <&admaif1_port>, <&admaif2_port>, <&admaif3_port>,
+ <&admaif4_port>, <&admaif5_port>, <&admaif6_port>,
+ <&admaif7_port>, <&admaif8_port>, <&admaif9_port>,
+ <&admaif10_port>,
+ /* Router */
+ <&xbar_i2s3_port>, <&xbar_i2s4_port>,
+ <&xbar_dmic1_port>, <&xbar_dmic2_port>,
+ /* I/O DAP Ports */
+ <&i2s3_port>, <&i2s4_port>,
+ <&dmic1_port>, <&dmic2_port>;
+
+ label = "jetson-nano-ape";
+ };
};
diff --git a/arch/arm64/boot/dts/nvidia/tegra210.dtsi b/arch/arm64/boot/dts/nvidia/tegra210.dtsi
index 4fbf8c15b0a1..26b3f98a211c 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra210.dtsi
@@ -997,6 +997,7 @@
<&tegra_car 128>, /* hda2hdmi */
<&tegra_car 111>; /* hda2codec_2x */
reset-names = "hda", "hda2hdmi", "hda2codec_2x";
+ power-domains = <&pd_sor>;
status = "disabled";
};
@@ -1043,6 +1044,7 @@
interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>;
resets = <&tegra_car 142>;
reset-names = "padctl";
+ nvidia,pmc = <&tegra_pmc>;
status = "disabled";
@@ -1307,6 +1309,32 @@
status = "disabled";
};
+ soctherm: thermal-sensor@700e2000 {
+ compatible = "nvidia,tegra210-soctherm";
+ reg = <0x0 0x700e2000 0x0 0x600>, /* SOC_THERM reg_base */
+ <0x0 0x60006000 0x0 0x400>; /* CAR reg_base */
+ reg-names = "soctherm-reg", "car-reg";
+ interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "thermal", "edp";
+ clocks = <&tegra_car TEGRA210_CLK_TSENSOR>,
+ <&tegra_car TEGRA210_CLK_SOC_THERM>;
+ clock-names = "tsensor", "soctherm";
+ resets = <&tegra_car 78>;
+ reset-names = "soctherm";
+ #thermal-sensor-cells = <1>;
+
+ throttle-cfgs {
+ throttle_heavy: heavy {
+ nvidia,priority = <100>;
+ nvidia,cpu-throt-percent = <85>;
+ nvidia,gpu-throt-level = <TEGRA_SOCTHERM_THROT_LEVEL_HIGH>;
+
+ #cooling-cells = <2>;
+ };
+ };
+ };
+
mipi: mipi@700e3000 {
compatible = "nvidia,tegra210-mipi";
reg = <0x0 0x700e3000 0x0 0x100>;
@@ -1425,6 +1453,91 @@
"rx9", "tx9",
"rx10", "tx10";
status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ admaif1_port: port@0 {
+ reg = <0>;
+
+ admaif1_ep: endpoint {
+ remote-endpoint = <&xbar_admaif1_ep>;
+ };
+ };
+
+ admaif2_port: port@1 {
+ reg = <1>;
+
+ admaif2_ep: endpoint {
+ remote-endpoint = <&xbar_admaif2_ep>;
+ };
+ };
+
+ admaif3_port: port@2 {
+ reg = <2>;
+
+ admaif3_ep: endpoint {
+ remote-endpoint = <&xbar_admaif3_ep>;
+ };
+ };
+
+ admaif4_port: port@3 {
+ reg = <3>;
+
+ admaif4_ep: endpoint {
+ remote-endpoint = <&xbar_admaif4_ep>;
+ };
+ };
+
+ admaif5_port: port@4 {
+ reg = <4>;
+
+ admaif5_ep: endpoint {
+ remote-endpoint = <&xbar_admaif5_ep>;
+ };
+ };
+
+ admaif6_port: port@5 {
+ reg = <5>;
+
+ admaif6_ep: endpoint {
+ remote-endpoint = <&xbar_admaif6_ep>;
+ };
+ };
+
+ admaif7_port: port@6 {
+ reg = <6>;
+
+ admaif7_ep: endpoint {
+ remote-endpoint = <&xbar_admaif7_ep>;
+ };
+ };
+
+ admaif8_port: port@7 {
+ reg = <7>;
+
+ admaif8_ep: endpoint {
+ remote-endpoint = <&xbar_admaif8_ep>;
+ };
+ };
+
+ admaif9_port: port@8 {
+ reg = <8>;
+
+ admaif9_ep: endpoint {
+ remote-endpoint = <&xbar_admaif9_ep>;
+ };
+ };
+
+ admaif10_port: port@9 {
+ reg = <9>;
+
+ admaif10_ep: endpoint {
+ remote-endpoint = <&xbar_admaif10_ep>;
+ };
+ };
+ };
};
tegra_i2s1: i2s@702d1000 {
@@ -1527,6 +1640,89 @@
sound-name-prefix = "DMIC3";
status = "disabled";
};
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0x0>;
+
+ xbar_admaif1_ep: endpoint {
+ remote-endpoint = <&admaif1_ep>;
+ };
+ };
+
+ port@1 {
+ reg = <0x1>;
+
+ xbar_admaif2_ep: endpoint {
+ remote-endpoint = <&admaif2_ep>;
+ };
+ };
+
+ port@2 {
+ reg = <0x2>;
+
+ xbar_admaif3_ep: endpoint {
+ remote-endpoint = <&admaif3_ep>;
+ };
+ };
+
+ port@3 {
+ reg = <0x3>;
+
+ xbar_admaif4_ep: endpoint {
+ remote-endpoint = <&admaif4_ep>;
+ };
+ };
+
+ port@4 {
+ reg = <0x4>;
+ xbar_admaif5_ep: endpoint {
+ remote-endpoint = <&admaif5_ep>;
+ };
+ };
+ port@5 {
+ reg = <0x5>;
+
+ xbar_admaif6_ep: endpoint {
+ remote-endpoint = <&admaif6_ep>;
+ };
+ };
+
+ port@6 {
+ reg = <0x6>;
+
+ xbar_admaif7_ep: endpoint {
+ remote-endpoint = <&admaif7_ep>;
+ };
+ };
+
+ port@7 {
+ reg = <0x7>;
+
+ xbar_admaif8_ep: endpoint {
+ remote-endpoint = <&admaif8_ep>;
+ };
+ };
+
+ port@8 {
+ reg = <0x8>;
+
+ xbar_admaif9_ep: endpoint {
+ remote-endpoint = <&admaif9_ep>;
+ };
+ };
+
+ port@9 {
+ reg = <0x9>;
+
+ xbar_admaif10_ep: endpoint {
+ remote-endpoint = <&admaif10_ep>;
+ };
+ };
+ };
};
};
@@ -1536,8 +1732,9 @@
interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
- clocks = <&tegra_car TEGRA210_CLK_QSPI>;
- clock-names = "qspi";
+ clocks = <&tegra_car TEGRA210_CLK_QSPI>,
+ <&tegra_car TEGRA210_CLK_QSPI_PM>;
+ clock-names = "qspi", "qspi_out";
resets = <&tegra_car 211>;
reset-names = "qspi";
dmas = <&apbdma 5>, <&apbdma 5>;
@@ -1692,44 +1889,18 @@
&{/cpus/cpu@2} &{/cpus/cpu@3}>;
};
- timer {
- compatible = "arm,armv8-timer";
- interrupts = <GIC_PPI 13
- (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
- <GIC_PPI 14
- (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
- <GIC_PPI 11
- (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
- <GIC_PPI 10
- (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
- interrupt-parent = <&gic>;
- arm,no-tick-in-suspend;
- };
-
- soctherm: thermal-sensor@700e2000 {
- compatible = "nvidia,tegra210-soctherm";
- reg = <0x0 0x700e2000 0x0 0x600>, /* SOC_THERM reg_base */
- <0x0 0x60006000 0x0 0x400>; /* CAR reg_base */
- reg-names = "soctherm-reg", "car-reg";
- interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "thermal", "edp";
- clocks = <&tegra_car TEGRA210_CLK_TSENSOR>,
- <&tegra_car TEGRA210_CLK_SOC_THERM>;
- clock-names = "tsensor", "soctherm";
- resets = <&tegra_car 78>;
- reset-names = "soctherm";
- #thermal-sensor-cells = <1>;
+ sound {
+ status = "disabled";
- throttle-cfgs {
- throttle_heavy: heavy {
- nvidia,priority = <100>;
- nvidia,cpu-throt-percent = <85>;
- nvidia,gpu-throt-level = <TEGRA_SOCTHERM_THROT_LEVEL_HIGH>;
+ clocks = <&tegra_car TEGRA210_CLK_PLL_A>,
+ <&tegra_car TEGRA210_CLK_PLL_A_OUT0>;
+ clock-names = "pll_a", "plla_out0";
- #cooling-cells = <2>;
- };
- };
+ assigned-clocks = <&tegra_car TEGRA210_CLK_PLL_A>,
+ <&tegra_car TEGRA210_CLK_PLL_A_OUT0>,
+ <&tegra_car TEGRA210_CLK_EXTERN1>;
+ assigned-clock-parents = <0>, <0>, <&tegra_car TEGRA210_CLK_PLL_A_OUT0>;
+ assigned-clock-rates = <368640000>, <49152000>, <12288000>;
};
thermal-zones {
@@ -1866,4 +2037,18 @@
};
};
};
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 13
+ (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 14
+ (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 11
+ (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 10
+ (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
+ interrupt-parent = <&gic>;
+ arm,no-tick-in-suspend;
+ };
};
diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile
index 5113fac80b7a..549a7a2151d4 100644
--- a/arch/arm64/boot/dts/qcom/Makefile
+++ b/arch/arm64/boot/dts/qcom/Makefile
@@ -1,19 +1,27 @@
# SPDX-License-Identifier: GPL-2.0
dtb-$(CONFIG_ARCH_QCOM) += apq8016-sbc.dtb
+dtb-$(CONFIG_ARCH_QCOM) += apq8094-sony-xperia-kitakami-karin_windy.dtb
dtb-$(CONFIG_ARCH_QCOM) += apq8096-db820c.dtb
dtb-$(CONFIG_ARCH_QCOM) += apq8096-ifc6640.dtb
dtb-$(CONFIG_ARCH_QCOM) += ipq6018-cp01-c1.dtb
dtb-$(CONFIG_ARCH_QCOM) += ipq8074-hk01.dtb
+dtb-$(CONFIG_ARCH_QCOM) += msm8916-alcatel-idol347.dtb
+dtb-$(CONFIG_ARCH_QCOM) += msm8916-asus-z00l.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8916-longcheer-l8150.dtb
+dtb-$(CONFIG_ARCH_QCOM) += msm8916-longcheer-l8910.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8916-mtp.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8916-samsung-a3u-eur.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8916-samsung-a5u-eur.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8992-bullhead-rev-101.dtb
-dtb-$(CONFIG_ARCH_QCOM) += msm8992-msft-lumia-talkman.dtb
+dtb-$(CONFIG_ARCH_QCOM) += msm8992-msft-lumia-octagon-talkman.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8992-xiaomi-libra.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8994-angler-rev-101.dtb
-dtb-$(CONFIG_ARCH_QCOM) += msm8994-msft-lumia-cityman.dtb
+dtb-$(CONFIG_ARCH_QCOM) += msm8994-msft-lumia-octagon-cityman.dtb
+dtb-$(CONFIG_ARCH_QCOM) += msm8994-sony-xperia-kitakami-ivy.dtb
+dtb-$(CONFIG_ARCH_QCOM) += msm8994-sony-xperia-kitakami-karin.dtb
+dtb-$(CONFIG_ARCH_QCOM) += msm8994-sony-xperia-kitakami-satsuki.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8994-sony-xperia-kitakami-sumire.dtb
+dtb-$(CONFIG_ARCH_QCOM) += msm8994-sony-xperia-kitakami-suzuran.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8996-mtp.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8998-asus-novago-tp370ql.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8998-hp-envy-x2.dtb
@@ -43,9 +51,12 @@ dtb-$(CONFIG_ARCH_QCOM) += sdm845-cheza-r2.dtb
dtb-$(CONFIG_ARCH_QCOM) += sdm845-cheza-r3.dtb
dtb-$(CONFIG_ARCH_QCOM) += sdm845-db845c.dtb
dtb-$(CONFIG_ARCH_QCOM) += sdm845-mtp.dtb
+dtb-$(CONFIG_ARCH_QCOM) += sdm845-oneplus-enchilada.dtb
+dtb-$(CONFIG_ARCH_QCOM) += sdm845-oneplus-fajita.dtb
dtb-$(CONFIG_ARCH_QCOM) += sdm845-xiaomi-beryllium.dtb
dtb-$(CONFIG_ARCH_QCOM) += sdm850-lenovo-yoga-c630.dtb
dtb-$(CONFIG_ARCH_QCOM) += sm8150-hdk.dtb
dtb-$(CONFIG_ARCH_QCOM) += sm8150-mtp.dtb
dtb-$(CONFIG_ARCH_QCOM) += sm8250-hdk.dtb
dtb-$(CONFIG_ARCH_QCOM) += sm8250-mtp.dtb
+dtb-$(CONFIG_ARCH_QCOM) += sm8350-mtp.dtb
diff --git a/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi b/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi
index 3a9538e1ec97..6aef0c2e4f0a 100644
--- a/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi
+++ b/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi
@@ -301,6 +301,10 @@
status = "okay";
};
+&mdss {
+ status = "okay";
+};
+
&pm8916_resin {
status = "okay";
linux,code = <KEY_VOLUMEDOWN>;
diff --git a/arch/arm64/boot/dts/qcom/apq8094-sony-xperia-kitakami-karin_windy.dts b/arch/arm64/boot/dts/qcom/apq8094-sony-xperia-kitakami-karin_windy.dts
new file mode 100644
index 000000000000..a8dffc8c64ea
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/apq8094-sony-xperia-kitakami-karin_windy.dts
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2020, Konrad Dybcio <konrad.dybcio@somainline.org>
+ */
+
+/dts-v1/;
+
+/* As the names may imply, there is quite a bunch of duplication there. */
+#include "msm8994-sony-xperia-kitakami-karin.dts"
+
+/ {
+ model = "Sony Xperia Z4 Tablet (Wi-Fi)";
+ compatible = "sony,karin_windy", "qcom,apq8094";
+
+ /*
+ * This model uses the APQ variant of MSM8994 (APQ8094).
+ * The v1/v2/v2.1 story (from kitakami.dtsi) also applies here.
+ */
+ qcom,msm-id = <253 0x20000>, <253 0x20001>;
+};
+
+/delete-node/ &pm8994_l1;
+/delete-node/ &pm8994_l19;
diff --git a/arch/arm64/boot/dts/qcom/msm8916-alcatel-idol347.dts b/arch/arm64/boot/dts/qcom/msm8916-alcatel-idol347.dts
new file mode 100644
index 000000000000..540b1fa4b260
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8916-alcatel-idol347.dts
@@ -0,0 +1,291 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/dts-v1/;
+
+#include "msm8916-pm8916.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+
+/ {
+ model = "Alcatel OneTouch Idol 3 (4.7)";
+ compatible = "alcatel,idol347", "qcom,msm8916";
+
+ aliases {
+ serial0 = &blsp1_uart2;
+ };
+
+ chosen {
+ stdout-path = "serial0";
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&gpio_keys_default>;
+
+ label = "GPIO Buttons";
+
+ volume-up {
+ label = "Volume Up";
+ gpios = <&msmgpio 107 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_VOLUMEUP>;
+ };
+ };
+
+ usb_id: usb-id {
+ compatible = "linux,extcon-usb-gpio";
+ id-gpio = <&msmgpio 69 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb_id_default>;
+ };
+};
+
+&blsp1_uart2 {
+ status = "okay";
+};
+
+&blsp_i2c5 {
+ status = "okay";
+
+ magnetometer@c {
+ compatible = "asahi-kasei,ak09911";
+ reg = <0x0c>;
+ vdd-supply = <&pm8916_l17>;
+ vid-supply = <&pm8916_l6>;
+ reset-gpios = <&msmgpio 8 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&mag_reset_default>;
+ mount-matrix = "0", "1", "0",
+ "-1", "0", "0",
+ "0", "0", "1";
+ };
+
+ accelerometer@f {
+ compatible = "kionix,kxtj21009";
+ reg = <0x0f>;
+ vdd-supply = <&pm8916_l17>;
+ vddio-supply = <&pm8916_l6>;
+ interrupt-parent = <&msmgpio>;
+ interrupts = <31 IRQ_TYPE_EDGE_RISING>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&accel_int_default>;
+ mount-matrix = "-1", "0", "0",
+ "0", "1", "0",
+ "0", "0", "-1";
+ };
+
+ proximity@48 {
+ compatible = "sensortek,stk3310";
+ reg = <0x48>;
+ interrupt-parent = <&msmgpio>;
+ interrupts = <12 IRQ_TYPE_EDGE_FALLING>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&proximity_int_default>;
+ };
+
+ gyroscope@68 {
+ compatible = "bosch,bmg160";
+ reg = <0x68>;
+ vdd-supply = <&pm8916_l17>;
+ vddio-supply = <&pm8916_l6>;
+ interrupt-parent = <&msmgpio>;
+ interrupts = <97 IRQ_TYPE_EDGE_RISING>,
+ <98 IRQ_TYPE_EDGE_RISING>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&gyro_int_default>;
+ };
+};
+
+&pm8916_resin {
+ status = "okay";
+ linux,code = <KEY_VOLUMEDOWN>;
+};
+
+&pm8916_vib {
+ status = "okay";
+};
+
+&pronto {
+ status = "okay";
+};
+
+&sdhc_1 {
+ status = "okay";
+
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&sdc1_clk_on &sdc1_cmd_on &sdc1_data_on>;
+ pinctrl-1 = <&sdc1_clk_off &sdc1_cmd_off &sdc1_data_off>;
+};
+
+&sdhc_2 {
+ status = "okay";
+
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on &sdc2_cd_on>;
+ pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off &sdc2_cd_off>;
+
+ cd-gpios = <&msmgpio 38 GPIO_ACTIVE_LOW>;
+};
+
+&usb {
+ status = "okay";
+ extcon = <&usb_id>, <&usb_id>;
+};
+
+&usb_hs_phy {
+ extcon = <&usb_id>;
+};
+
+&smd_rpm_regulators {
+ vdd_l1_l2_l3-supply = <&pm8916_s3>;
+ vdd_l4_l5_l6-supply = <&pm8916_s4>;
+ vdd_l7-supply = <&pm8916_s4>;
+
+ s3 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1300000>;
+ };
+
+ s4 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2100000>;
+ };
+
+ l1 {
+ regulator-min-microvolt = <1225000>;
+ regulator-max-microvolt = <1225000>;
+ };
+
+ l2 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ };
+
+ l4 {
+ regulator-min-microvolt = <2050000>;
+ regulator-max-microvolt = <2050000>;
+ };
+
+ l5 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ l6 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ l7 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ l8 {
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <2900000>;
+ };
+
+ l9 {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ l10 {
+ regulator-min-microvolt = <2700000>;
+ regulator-max-microvolt = <2800000>;
+ };
+
+ l11 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2950000>;
+ regulator-allow-set-load;
+ regulator-system-load = <200000>;
+ };
+
+ l12 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2950000>;
+ };
+
+ l13 {
+ regulator-min-microvolt = <3075000>;
+ regulator-max-microvolt = <3075000>;
+ };
+
+ l14 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ l15 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ l16 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ l17 {
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <2850000>;
+ };
+
+ l18 {
+ regulator-min-microvolt = <2700000>;
+ regulator-max-microvolt = <2700000>;
+ };
+};
+
+&msmgpio {
+ accel_int_default: accel-int-default {
+ pins = "gpio31";
+ function = "gpio";
+
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ gpio_keys_default: gpio-keys-default {
+ pins = "gpio107";
+ function = "gpio";
+
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ gyro_int_default: gyro-int-default {
+ pins = "gpio97", "gpio98";
+ function = "gpio";
+
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ mag_reset_default: mag-reset-default {
+ pins = "gpio8";
+ function = "gpio";
+
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ proximity_int_default: proximity-int-default {
+ pins = "gpio12";
+ function = "gpio";
+
+ drive-strength = <6>;
+ bias-pull-up;
+ };
+
+ usb_id_default: usb-id-default {
+ pins = "gpio69";
+ function = "gpio";
+
+ drive-strength = <8>;
+ bias-pull-up;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8916-asus-z00l.dts b/arch/arm64/boot/dts/qcom/msm8916-asus-z00l.dts
new file mode 100644
index 000000000000..cee451e59385
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8916-asus-z00l.dts
@@ -0,0 +1,195 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/dts-v1/;
+
+#include "msm8916-pm8916.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+ model = "Asus Zenfone 2 Laser";
+ compatible = "asus,z00l", "qcom,msm8916";
+
+ aliases {
+ serial0 = &blsp1_uart2;
+ };
+
+ chosen {
+ stdout-path = "serial0";
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&gpio_keys_default>;
+
+ label = "GPIO Buttons";
+
+ volume-up {
+ label = "Volume Up";
+ gpios = <&msmgpio 107 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_VOLUMEUP>;
+ debounce-interval = <15>;
+ };
+
+ volume-down {
+ label = "Volume Down";
+ gpios = <&msmgpio 117 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_VOLUMEDOWN>;
+ debounce-interval = <15>;
+ };
+ };
+
+ usb_id: usb-id {
+ compatible = "linux,extcon-usb-gpio";
+ id-gpios = <&msmgpio 110 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb_id_default>;
+ };
+};
+
+&blsp1_uart2 {
+ status = "okay";
+};
+
+&pronto {
+ status = "okay";
+};
+
+&sdhc_1 {
+ status = "okay";
+
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&sdc1_clk_on &sdc1_cmd_on &sdc1_data_on>;
+ pinctrl-1 = <&sdc1_clk_off &sdc1_cmd_off &sdc1_data_off>;
+};
+
+&usb {
+ status = "okay";
+ extcon = <&usb_id>, <&usb_id>;
+};
+
+&usb_hs_phy {
+ extcon = <&usb_id>;
+};
+
+&smd_rpm_regulators {
+ vdd_l1_l2_l3-supply = <&pm8916_s3>;
+ vdd_l4_l5_l6-supply = <&pm8916_s4>;
+ vdd_l7-supply = <&pm8916_s4>;
+
+ s3 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1300000>;
+ };
+
+ s4 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2100000>;
+ };
+
+ l1 {
+ regulator-min-microvolt = <1225000>;
+ regulator-max-microvolt = <1225000>;
+ };
+
+ l2 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ };
+
+ l4 {
+ regulator-min-microvolt = <2050000>;
+ regulator-max-microvolt = <2050000>;
+ };
+
+ l5 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ l6 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ l7 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ l8 {
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <2900000>;
+ };
+
+ l9 {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ l10 {
+ regulator-min-microvolt = <2700000>;
+ regulator-max-microvolt = <2800000>;
+ };
+
+ l11 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2950000>;
+ regulator-allow-set-load;
+ regulator-system-load = <200000>;
+ };
+
+ l12 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2950000>;
+ };
+
+ l13 {
+ regulator-min-microvolt = <3075000>;
+ regulator-max-microvolt = <3075000>;
+ };
+
+ l14 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ l15 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ l16 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ l17 {
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <2850000>;
+ };
+
+ l18 {
+ regulator-min-microvolt = <2700000>;
+ regulator-max-microvolt = <2700000>;
+ };
+};
+
+&msmgpio {
+ gpio_keys_default: gpio-keys-default {
+ pins = "gpio107", "gpio117";
+ function = "gpio";
+
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ usb_id_default: usb-id-default {
+ pins = "gpio110";
+ function = "gpio";
+
+ drive-strength = <8>;
+ bias-pull-up;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8916-longcheer-l8910.dts b/arch/arm64/boot/dts/qcom/msm8916-longcheer-l8910.dts
new file mode 100644
index 000000000000..27845189ac2b
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8916-longcheer-l8910.dts
@@ -0,0 +1,267 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/dts-v1/;
+
+#include "msm8916-pm8916.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
+
+/ {
+ model = "BQ Aquaris X5 (Longcheer L8910)";
+ compatible = "longcheer,l8910", "qcom,msm8916";
+
+ aliases {
+ serial0 = &blsp1_uart2;
+ };
+
+ chosen {
+ stdout-path = "serial0";
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&gpio_keys_default>;
+
+ label = "GPIO Buttons";
+
+ volume-up {
+ label = "Volume Up";
+ gpios = <&msmgpio 107 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_VOLUMEUP>;
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ led-0 {
+ gpios = <&msmgpio 17 GPIO_ACTIVE_HIGH>;
+ color = <LED_COLOR_ID_WHITE>;
+ default-state = "off";
+ function = LED_FUNCTION_KBD_BACKLIGHT;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&button_backlight_default>;
+ };
+ };
+
+ usb_id: usb-id {
+ compatible = "linux,extcon-usb-gpio";
+ id-gpio = <&msmgpio 110 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb_id_default>;
+ };
+};
+
+&blsp_i2c3 {
+ status = "okay";
+
+ magnetometer@d {
+ compatible = "asahi-kasei,ak09911";
+ reg = <0x0d>;
+
+ vdd-supply = <&pm8916_l17>;
+ vid-supply = <&pm8916_l6>;
+
+ reset-gpios = <&msmgpio 111 GPIO_ACTIVE_LOW>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&mag_reset_default>;
+ };
+
+ imu@68 {
+ compatible = "bosch,bmi160";
+ reg = <0x68>;
+
+ vdd-supply = <&pm8916_l17>;
+ vddio-supply = <&pm8916_l6>;
+
+ mount-matrix = "0", "1", "0",
+ "-1", "0", "0",
+ "0", "0", "1";
+ };
+};
+
+&blsp1_uart2 {
+ status = "okay";
+};
+
+&pm8916_resin {
+ status = "okay";
+ linux,code = <KEY_VOLUMEDOWN>;
+};
+
+&pm8916_vib {
+ status = "okay";
+};
+
+&pronto {
+ status = "okay";
+};
+
+&sdhc_1 {
+ status = "okay";
+
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&sdc1_clk_on &sdc1_cmd_on &sdc1_data_on>;
+ pinctrl-1 = <&sdc1_clk_off &sdc1_cmd_off &sdc1_data_off>;
+};
+
+&sdhc_2 {
+ status = "okay";
+
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on &sdc2_cd_on>;
+ pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off &sdc2_cd_off>;
+
+ cd-gpios = <&msmgpio 38 GPIO_ACTIVE_LOW>;
+};
+
+&usb {
+ status = "okay";
+ extcon = <&usb_id>, <&usb_id>;
+};
+
+&usb_hs_phy {
+ extcon = <&usb_id>;
+};
+
+&smd_rpm_regulators {
+ vdd_l1_l2_l3-supply = <&pm8916_s3>;
+ vdd_l4_l5_l6-supply = <&pm8916_s4>;
+ vdd_l7-supply = <&pm8916_s4>;
+
+ s3 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1300000>;
+ };
+
+ s4 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2100000>;
+ };
+
+ l1 {
+ regulator-min-microvolt = <1225000>;
+ regulator-max-microvolt = <1225000>;
+ };
+
+ l2 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ };
+
+ l4 {
+ regulator-min-microvolt = <2050000>;
+ regulator-max-microvolt = <2050000>;
+ };
+
+ l5 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ l6 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ l7 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ l8 {
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <2900000>;
+ };
+
+ l9 {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ l10 {
+ regulator-min-microvolt = <2700000>;
+ regulator-max-microvolt = <2800000>;
+ };
+
+ l11 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2950000>;
+ regulator-allow-set-load;
+ regulator-system-load = <200000>;
+ };
+
+ l12 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2950000>;
+ };
+
+ l13 {
+ regulator-min-microvolt = <3075000>;
+ regulator-max-microvolt = <3075000>;
+ };
+
+ l14 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ l15 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ l16 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ l17 {
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <2850000>;
+ };
+
+ l18 {
+ regulator-min-microvolt = <2700000>;
+ regulator-max-microvolt = <2700000>;
+ };
+};
+
+&msmgpio {
+ button_backlight_default: button-backlight-default {
+ pins = "gpio17";
+ function = "gpio";
+
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ gpio_keys_default: gpio-keys-default {
+ pins = "gpio107";
+ function = "gpio";
+
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ mag_reset_default: mag-reset-default {
+ pins = "gpio111";
+ function = "gpio";
+
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ usb_id_default: usb-id-default {
+ pins = "gpio110";
+ function = "gpio";
+
+ drive-strength = <8>;
+ bias-pull-up;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi b/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi
index 4dc437f13fa5..7dedb91b9930 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi
@@ -220,6 +220,22 @@
bias-disable;
};
+ i2c3_default: i2c3-default {
+ pins = "gpio10", "gpio11";
+ function = "blsp_i2c3";
+
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ i2c3_sleep: i2c3-sleep {
+ pins = "gpio10", "gpio11";
+ function = "gpio";
+
+ drive-strength = <2>;
+ bias-disable;
+ };
+
i2c4_default: i2c4-default {
pins = "gpio14", "gpio15";
function = "blsp_i2c4";
diff --git a/arch/arm64/boot/dts/qcom/msm8916-samsung-a2015-common.dtsi b/arch/arm64/boot/dts/qcom/msm8916-samsung-a2015-common.dtsi
index f91269492d72..230ba3ce3277 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-samsung-a2015-common.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8916-samsung-a2015-common.dtsi
@@ -106,6 +106,9 @@
interrupt-parent = <&msmgpio>;
interrupts = <115 IRQ_TYPE_EDGE_RISING>;
+ vdd-supply = <&pm8916_l17>;
+ vddio-supply = <&pm8916_l5>;
+
pinctrl-names = "default";
pinctrl-0 = <&accel_int_default>;
};
@@ -113,6 +116,9 @@
magnetometer@12 {
compatible = "bosch,bmc150_magn";
reg = <0x12>;
+
+ vdd-supply = <&pm8916_l17>;
+ vddio-supply = <&pm8916_l5>;
};
};
@@ -126,6 +132,10 @@
pinctrl-1 = <&mdss_sleep>;
};
+&mdss {
+ status = "okay";
+};
+
&pm8916_resin {
status = "okay";
linux,code = <KEY_VOLUMEDOWN>;
diff --git a/arch/arm64/boot/dts/qcom/msm8916-samsung-a5u-eur.dts b/arch/arm64/boot/dts/qcom/msm8916-samsung-a5u-eur.dts
index e39c04d977c2..dd35c3344358 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-samsung-a5u-eur.dts
+++ b/arch/arm64/boot/dts/qcom/msm8916-samsung-a5u-eur.dts
@@ -38,7 +38,7 @@
&pronto {
iris {
- compatible = "qcom,wcn3680";
+ compatible = "qcom,wcn3660b";
};
};
diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi
index 402e891a84ab..5353da521974 100644
--- a/arch/arm64/boot/dts/qcom/msm8916.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi
@@ -56,7 +56,7 @@
no-map;
};
- reserved@8668000 {
+ reserved@86680000 {
reg = <0x0 0x86680000 0x0 0x80000>;
no-map;
};
@@ -69,7 +69,7 @@
qcom,client-id = <1>;
};
- rfsa@867e00000 {
+ rfsa@867e0000 {
reg = <0x0 0x867e0000 0x0 0x20000>;
no-map;
};
@@ -913,6 +913,7 @@
};
mdss: mdss@1a00000 {
+ status = "disabled";
compatible = "qcom,mdss";
reg = <0x01a00000 0x1000>,
<0x01ac8000 0x3000>;
@@ -1528,6 +1529,21 @@
status = "disabled";
};
+ blsp_i2c3: i2c@78b7000 {
+ compatible = "qcom,i2c-qup-v2.2.1";
+ reg = <0x078b7000 0x500>;
+ interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP1_AHB_CLK>,
+ <&gcc GCC_BLSP1_QUP3_I2C_APPS_CLK>;
+ clock-names = "iface", "core";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&i2c3_default>;
+ pinctrl-1 = <&i2c3_sleep>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
blsp_spi3: spi@78b7000 {
compatible = "qcom,spi-qup-v2.2.1";
reg = <0x078b7000 0x500>;
diff --git a/arch/arm64/boot/dts/qcom/msm8992-bullhead-rev-101.dts b/arch/arm64/boot/dts/qcom/msm8992-bullhead-rev-101.dts
index 5969b5cfdc85..23cdcc9f7c72 100644
--- a/arch/arm64/boot/dts/qcom/msm8992-bullhead-rev-101.dts
+++ b/arch/arm64/boot/dts/qcom/msm8992-bullhead-rev-101.dts
@@ -6,6 +6,8 @@
/dts-v1/;
#include "msm8992.dtsi"
+#include "pm8994.dtsi"
+#include "pmi8994.dtsi"
/ {
model = "LG Nexus 5X";
@@ -44,7 +46,7 @@
};
&rpm_requests {
- pm8994-regulators {
+ pm8994_regulators: pm8994-regulators {
compatible = "qcom,rpm-pm8994-regulators";
vdd_l1-supply = <&pm8994_s1>;
@@ -53,15 +55,17 @@
vdd_l4_27_31-supply = <&pm8994_s3>;
vdd_l5_7-supply = <&pm8994_s3>;
vdd_l6_12_32-supply = <&pm8994_s5>;
- vdd_l8_16_30-supply = <&vreg_vph_pwr>;
- vdd_l9_10_18_22-supply = <&vreg_vph_pwr>;
- vdd_l13_19_23_24-supply = <&vreg_vph_pwr>;
+ vdd_l8_16_30-supply = <&vph_pwr>;
+ vdd_l9_10_18_22-supply = <&vph_pwr>;
+ vdd_l13_19_23_24-supply = <&vph_pwr>;
vdd_l14_15-supply = <&pm8994_s5>;
- vdd_l17_29-supply = <&vreg_vph_pwr>;
- vdd_l20_21-supply = <&vreg_vph_pwr>;
+ vdd_l17_29-supply = <&vph_pwr>;
+ vdd_l20_21-supply = <&vph_pwr>;
vdd_l25-supply = <&pm8994_s5>;
vdd_lvs1_2 = <&pm8994_s4>;
+ /* S1, S2, S6 and S12 are managed by RPMPD */
+
pm8994_s1: s1 {
regulator-min-microvolt = <800000>;
regulator-max-microvolt = <800000>;
@@ -93,6 +97,8 @@
regulator-max-microvolt = <1000000>;
};
+ /* S8, S9, S10 and S11 - SPMI-managed VDD_APC */
+
pm8994_l1: l1 {
regulator-min-microvolt = <1000000>;
regulator-max-microvolt = <1000000>;
@@ -113,18 +119,14 @@
regulator-max-microvolt = <1225000>;
};
- pm8994_l5: l5 {
- /* TODO */
- };
+ /* L5 is inaccessible from RPM */
pm8994_l6: l6 {
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
};
- pm8994_l7: l7 {
- /* TODO */
- };
+ /* L7 is inaccessible from RPM */
pm8994_l8: l8 {
regulator-min-microvolt = <1800000>;
@@ -266,9 +268,22 @@
*/
};
};
+
+ pmi8994_regulators: pmi8994-regulators {
+ compatible = "qcom,rpm-pmi8994-regulators";
+
+ vdd_s1-supply = <&vph_pwr>;
+ vdd_bst_byp-supply = <&vph_pwr>;
+
+ pmi8994_s1: s1 {};
+
+ /* S2 & S3 - VDD_GFX */
+
+ pmi8994_bby: boost-bypass {};
+ };
};
-&sdhc_1 {
+&sdhc1 {
status = "okay";
mmc-hs400-1_8v;
diff --git a/arch/arm64/boot/dts/qcom/msm8992-msft-lumia-octagon-talkman.dts b/arch/arm64/boot/dts/qcom/msm8992-msft-lumia-octagon-talkman.dts
new file mode 100644
index 000000000000..5322b9ce5839
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8992-msft-lumia-octagon-talkman.dts
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2020, Konrad Dybcio <konrad.dybcio@somainline.org>
+ * Copyright (c) 2020, Gustave Monce <gustave.monce@outlook.com>
+ */
+
+/dts-v1/;
+
+#include "msm8992.dtsi"
+#include "msm8994-msft-lumia-octagon.dtsi"
+
+/ {
+ model = "Microsoft Lumia 950";
+ compatible = "microsoft,talkman", "qcom,msm8992";
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8992-msft-lumia-talkman.dts b/arch/arm64/boot/dts/qcom/msm8992-msft-lumia-talkman.dts
deleted file mode 100644
index c337a86a5c77..000000000000
--- a/arch/arm64/boot/dts/qcom/msm8992-msft-lumia-talkman.dts
+++ /dev/null
@@ -1,67 +0,0 @@
-// SPDX-License-Identifier: BSD-3-Clause
-/*
- * Copyright (c) 2020, Konrad Dybcio
- */
-
-/dts-v1/;
-
-#include "msm8992.dtsi"
-#include "pm8994.dtsi"
-#include "pmi8994.dtsi"
-#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/input/gpio-keys.h>
-
-/ {
- model = "Microsoft Lumia 950";
- compatible = "microsoft,talkman", "qcom,msm8992";
-
- /* Most Lumia 950 users use GRUB to load their kernels,
- * hence there is no need for msm-id and friends.
- */
-
- /* This enables graphical output via bootloader-enabled display.
- * acpi=no is required due to WP platforms having ACPI support, but
- * only for Windows-based OSes.
- */
- chosen {
- bootargs = "earlycon=efifb console=efifb acpi=no";
-
- #address-cells = <2>;
- #size-cells = <2>;
- ranges;
- };
-};
-
-&blsp_i2c1 {
- status = "okay";
-
- rmi4-i2c-dev@4b {
- compatible = "syna,rmi4-i2c";
- reg = <0x4b>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- interrupt-parent = <&tlmm>;
- interrupts = <77 IRQ_TYPE_EDGE_FALLING>;
-
- rmi4-f01@1 {
- reg = <0x01>;
- syna,nosleep-mode = <1>;
- };
-
- rmi4-f12@12 {
- reg = <0x12>;
- syna,sensor-type = <1>;
- syna,clip-x-low = <0>;
- syna,clip-x-high = <1440>;
- syna,clip-y-low = <0>;
- syna,clip-y-high = <2560>;
- };
- };
-};
-
-&sdhc_1 {
- status = "okay";
-
- mmc-hs200-1_8v;
-};
diff --git a/arch/arm64/boot/dts/qcom/msm8992-xiaomi-libra.dts b/arch/arm64/boot/dts/qcom/msm8992-xiaomi-libra.dts
index 4f64ca3ea1ef..357d55496e75 100644
--- a/arch/arm64/boot/dts/qcom/msm8992-xiaomi-libra.dts
+++ b/arch/arm64/boot/dts/qcom/msm8992-xiaomi-libra.dts
@@ -70,21 +70,6 @@
pmsg-size = <0x20000>;
};
- continuous_splash: framebuffer@3401000{
- reg = <0x0 0x3401000 0x0 0x2200000>;
- no-map;
- };
-
- dfps_data_mem: dfps_data_mem@3400000 {
- reg = <0x0 0x3400000 0x0 0x1000>;
- no-map;
- };
-
- peripheral_region: peripheral_region@7400000 {
- reg = <0x0 0x7400000 0x0 0x1c00000>;
- no-map;
- };
-
modem_region: modem_region@9000000 {
reg = <0x0 0x9000000 0x0 0x5a00000>;
no-map;
@@ -97,43 +82,49 @@
};
};
-&blsp_i2c2 {
+&blsp1_i2c2 {
status = "okay";
/* Atmel or Synaptics touchscreen */
};
-&blsp_i2c5 {
+&blsp1_i2c5 {
status = "okay";
- /* Silabs si4705 FM transmitter */
+ /* ST lsm6db0 gyro/accelerometer */
};
-&blsp_i2c6 {
+&blsp1_i2c6 {
status = "okay";
- /* NCI NFC,
+ /*
+ * NXP NCI NFC,
* TI USB320 Type-C controller,
* Pericom 30216a USB (de)mux switch
*/
};
-&blsp_i2c7 {
+&blsp2_i2c1 {
status = "okay";
/* cm36686 proximity and ambient light sensor */
};
-&blsp_i2c13 {
+&blsp2_i2c5 {
status = "okay";
- /* ST lsm6db0 gyro/accelerometer */
+ /* Silabs si4705 FM transmitter */
};
&blsp2_uart2 {
status = "okay";
};
+&peripheral_region {
+ reg = <0x0 0x7400000 0x0 0x1c00000>;
+ no-map;
+};
+
&rpm_requests {
pm8994-regulators {
compatible = "qcom,rpm-pm8994-regulators";
@@ -144,24 +135,16 @@
vdd_l4_27_31-supply = <&pm8994_s3>;
vdd_l5_7-supply = <&pm8994_s3>;
vdd_l6_12_32-supply = <&pm8994_s5>;
- vdd_l8_16_30-supply = <&vreg_vph_pwr>;
- vdd_l9_10_18_22-supply = <&vreg_vph_pwr>;
- vdd_l13_19_23_24-supply = <&vreg_vph_pwr>;
+ vdd_l8_16_30-supply = <&vph_pwr>;
+ vdd_l9_10_18_22-supply = <&vph_pwr>;
+ vdd_l13_19_23_24-supply = <&vph_pwr>;
vdd_l14_15-supply = <&pm8994_s5>;
- vdd_l17_29-supply = <&vreg_vph_pwr>;
- vdd_l20_21-supply = <&vreg_vph_pwr>;
+ vdd_l17_29-supply = <&vph_pwr>;
+ vdd_l20_21-supply = <&vph_pwr>;
vdd_l25-supply = <&pm8994_s5>;
vdd_lvs1_2 = <&pm8994_s4>;
- pm8994_s1: s1 {
- /* unused */
- status = "disabled";
- };
-
- pm8994_s2: s2 {
- /* unused */
- status = "disabled";
- };
+ /* S1, S2, S6 and S12 are managed by RPMPD */
pm8994_s3: s3 {
regulator-min-microvolt = <1300000>;
@@ -186,6 +169,8 @@
regulator-max-microvolt = <1000000>;
};
+ /* S8, S9, S10 and S11 - SPMI-managed VDD_APC */
+
pm8994_l1: l1 {
regulator-min-microvolt = <1000000>;
regulator-max-microvolt = <1000000>;
@@ -206,20 +191,14 @@
regulator-max-microvolt = <1225000>;
};
- pm8994_l5: l5 {
- /* unused */
- status = "disabled";
- };
+ /* L5 is inaccessible from RPM */
pm8994_l6: l6 {
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
};
- pm8994_l7: l7 {
- /* unused */
- status = "disabled";
- };
+ /* L7 is inaccessible from RPM */
pm8994_l8: l8 {
regulator-min-microvolt = <1800000>;
@@ -352,10 +331,31 @@
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
};
+
+ pm8994_lvs1: lvs1 {};
+ pm8994_lvs2: lvs2 {};
+ };
+
+ pmi8994_regulators: pmi8994-regulators {
+ compatible = "qcom,rpm-pmi8994-regulators";
+ vdd_s1-supply = <&vph_pwr>;
+ vdd_bst_byp-supply = <&vph_pwr>;
+
+ pmi8994_s1: s1 {
+ regulator-min-microvolt = <1025000>;
+ regulator-max-microvolt = <1025000>;
+ };
+
+ /* S2 & S3 - VDD_GFX */
+
+ pmi8994_bby: boost-bypass {
+ regulator-min-microvolt = <3150000>;
+ regulator-max-microvolt = <3600000>;
+ };
};
};
-&sdhc_1 {
+&sdhc1 {
status = "okay";
mmc-hs400-1_8v;
diff --git a/arch/arm64/boot/dts/qcom/msm8992.dtsi b/arch/arm64/boot/dts/qcom/msm8992.dtsi
index 0c422af47917..58fe58cc7703 100644
--- a/arch/arm64/boot/dts/qcom/msm8992.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8992.dtsi
@@ -2,738 +2,29 @@
/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
*/
-#include <dt-bindings/interrupt-controller/arm-gic.h>
-#include <dt-bindings/clock/qcom,gcc-msm8994.h>
+#include "msm8994.dtsi"
-/ {
- interrupt-parent = <&intc>;
+/* 8992 only features 2 A57 cores. */
+/delete-node/ &CPU6;
+/delete-node/ &CPU7;
+/delete-node/ &cpu6_map;
+/delete-node/ &cpu7_map;
- #address-cells = <2>;
- #size-cells = <2>;
-
- chosen { };
-
- cpus {
- #address-cells = <2>;
- #size-cells = <0>;
-
- CPU0: cpu@0 {
- device_type = "cpu";
- compatible = "arm,cortex-a53";
- reg = <0x0 0x0>;
- next-level-cache = <&L2_0>;
- enable-method = "psci";
- L2_0: l2-cache {
- compatible = "cache";
- cache-level = <2>;
- };
- };
-
- CPU1: cpu@1 {
- device_type = "cpu";
- compatible = "arm,cortex-a53";
- reg = <0x0 0x1>;
- next-level-cache = <&L2_0>;
- enable-method = "psci";
- };
-
- CPU2: cpu@2 {
- device_type = "cpu";
- compatible = "arm,cortex-a53";
- reg = <0x0 0x2>;
- next-level-cache = <&L2_0>;
- enable-method = "psci";
- };
-
- CPU3: cpu@3 {
- device_type = "cpu";
- compatible = "arm,cortex-a53";
- reg = <0x0 0x3>;
- next-level-cache = <&L2_0>;
- enable-method = "psci";
- };
-
- CPU4: cpu@100 {
- device_type = "cpu";
- compatible = "arm,cortex-a57";
- reg = <0x0 0x100>;
- next-level-cache = <&L2_1>;
- enable-method = "psci";
- L2_1: l2-cache {
- compatible = "cache";
- cache-level = <2>;
- };
- };
-
- CPU5: cpu@101 {
- device_type = "cpu";
- compatible = "arm,cortex-a57";
- reg = <0x0 0x101>;
- next-level-cache = <&L2_1>;
- enable-method = "psci";
- };
-
- cpu-map {
- cluster0 {
- core0 {
- cpu = <&CPU0>;
- };
-
- core1 {
- cpu = <&CPU1>;
- };
-
- core2 {
- cpu = <&CPU2>;
- };
-
- core3 {
- cpu = <&CPU3>;
- };
- };
-
- cluster1 {
- core0 {
- cpu = <&CPU4>;
- };
-
- core1 {
- cpu = <&CPU5>;
- };
- };
- };
- };
-
- clocks {
- xo_board: xo_board {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <19200000>;
- };
-
- sleep_clk: sleep_clk {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <32768>;
- };
- };
-
- firmware {
- scm {
- compatible = "qcom,scm-msm8994", "qcom,scm";
- };
- };
-
- memory {
- device_type = "memory";
- /* We expect the bootloader to fill in the reg */
- reg = <0 0 0 0>;
- };
-
- pmu {
- compatible = "arm,cortex-a53-pmu";
- interrupts = <GIC_PPI 7 (GIC_CPU_MASK_SIMPLE(4)| IRQ_TYPE_LEVEL_HIGH)>;
- };
-
- psci {
- compatible = "arm,psci-0.2";
- method = "hvc";
- };
-
- reserved-memory {
- #address-cells = <2>;
- #size-cells = <2>;
- ranges;
-
- smem_region: smem@6a00000 {
- reg = <0x0 0x6a00000 0x0 0x200000>;
- no-map;
- };
- };
-
- sfpb_mutex: hwmutex {
- compatible = "qcom,sfpb-mutex";
- syscon = <&sfpb_mutex_regs 0x0 0x100>;
- #hwlock-cells = <1>;
- };
-
- smem {
- compatible = "qcom,smem";
- memory-region = <&smem_region>;
- qcom,rpm-msg-ram = <&rpm_msg_ram>;
- hwlocks = <&sfpb_mutex 3>;
- };
-
- soc {
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0 0 0 0xffffffff>;
- compatible = "simple-bus";
-
- intc: interrupt-controller@f9000000 {
- compatible = "qcom,msm-qgic2";
- interrupt-controller;
- #interrupt-cells = <3>;
- reg = <0xf9000000 0x1000>,
- <0xf9002000 0x1000>;
- };
-
- apcs: mailbox@f900d000 {
- compatible = "qcom,msm8994-apcs-kpss-global", "syscon";
- reg = <0xf900d000 0x2000>;
- #mbox-cells = <1>;
- };
-
- timer@f9020000 {
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
- compatible = "arm,armv7-timer-mem";
- reg = <0xf9020000 0x1000>;
-
- frame@f9021000 {
- frame-number = <0>;
- interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
- reg = <0xf9021000 0x1000>,
- <0xf9022000 0x1000>;
- };
-
- frame@f9023000 {
- frame-number = <1>;
- interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
- reg = <0xf9023000 0x1000>;
- status = "disabled";
- };
-
- frame@f9024000 {
- frame-number = <2>;
- interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
- reg = <0xf9024000 0x1000>;
- status = "disabled";
- };
-
- frame@f9025000 {
- frame-number = <3>;
- interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
- reg = <0xf9025000 0x1000>;
- status = "disabled";
- };
-
- frame@f9026000 {
- frame-number = <4>;
- interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
- reg = <0xf9026000 0x1000>;
- status = "disabled";
- };
-
- frame@f9027000 {
- frame-number = <5>;
- interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
- reg = <0xf9027000 0x1000>;
- status = "disabled";
- };
-
- frame@f9028000 {
- frame-number = <6>;
- interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
- reg = <0xf9028000 0x1000>;
- status = "disabled";
- };
- };
-
- usb3: usb@f92f8800 {
- compatible = "qcom,msm8996-dwc3", "qcom,dwc3";
- reg = <0xf92f8800 0x400>;
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
-
- clocks = <&gcc GCC_USB30_MASTER_CLK>,
- <&gcc GCC_SYS_NOC_USB3_AXI_CLK>,
- <&gcc GCC_USB30_SLEEP_CLK>,
- <&gcc GCC_USB30_MOCK_UTMI_CLK>;
- clock-names = "core", "iface", "sleep", "mock_utmi", "ref", "xo";
-
- assigned-clocks = <&gcc GCC_USB30_MOCK_UTMI_CLK>,
- <&gcc GCC_USB30_MASTER_CLK>;
- assigned-clock-rates = <19200000>, <120000000>;
-
- power-domains = <&gcc USB30_GDSC>;
- qcom,select-utmi-as-pipe-clk;
-
- dwc3@f9200000 {
- compatible = "snps,dwc3";
- reg = <0xf9200000 0xcc00>;
- interrupts = <0 131 IRQ_TYPE_LEVEL_HIGH>;
- snps,dis_u2_susphy_quirk;
- snps,dis_enblslpm_quirk;
- maximum-speed = "high-speed";
- dr_mode = "peripheral";
- };
- };
-
- sdhc_1: sdhci@f9824900 {
- compatible = "qcom,sdhci-msm-v4";
- reg = <0xf9824900 0x1a0>, <0xf9824000 0x800>;
- reg-names = "hc_mem", "core_mem";
-
- interrupts = <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "hc_irq", "pwr_irq";
-
- clocks = <&gcc GCC_SDCC1_APPS_CLK>,
- <&gcc GCC_SDCC1_AHB_CLK>,
- <&xo_board>;
- clock-names = "core", "iface", "xo";
-
- pinctrl-names = "default", "sleep";
- pinctrl-0 = <&sdc1_clk_on &sdc1_cmd_on &sdc1_data_on
- &sdc1_rclk_on>;
- pinctrl-1 = <&sdc1_clk_off &sdc1_cmd_off &sdc1_data_off
- &sdc1_rclk_off>;
-
- regulator-always-on;
- bus-width = <8>;
- non-removable;
-
- status = "disabled";
- };
-
- sdhc_2: sdhci@f98a4900 {
- compatible = "qcom,sdhci-msm-v4";
- reg = <0xf98a4900 0x11c>, <0xf98a4000 0x800>;
- reg-names = "hc_mem", "core_mem";
-
- interrupts = <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 221 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "hc_irq", "pwr_irq";
-
- clocks = <&gcc GCC_SDCC2_APPS_CLK>,
- <&gcc GCC_SDCC2_AHB_CLK>,
- <&xo_board>;
- clock-names = "core", "iface", "xo";
-
- pinctrl-names = "default", "sleep";
- pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on>;
- pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off>;
-
- cd-gpios = <&tlmm 100 0>;
- bus-width = <4>;
- status = "disabled";
- };
-
- blsp1_uart2: serial@f991e000 {
- compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
- reg = <0xf991e000 0x1000>;
- interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_LOW>;
- clock-names = "core", "iface";
- clocks = <&gcc GCC_BLSP1_UART2_APPS_CLK>,
- <&gcc GCC_BLSP1_AHB_CLK>;
- pinctrl-names = "default", "sleep";
- pinctrl-0 = <&blsp1_uart2_default>;
- pinctrl-1 = <&blsp1_uart2_sleep>;
- status = "disabled";
- };
-
- blsp_i2c1: i2c@f9923000 {
- compatible = "qcom,i2c-qup-v2.2.1";
- reg = <0xf9923000 0x500>;
- interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&gcc GCC_BLSP1_AHB_CLK>,
- <&gcc GCC_BLSP1_QUP1_I2C_APPS_CLK>;
- clock-names = "iface", "core";
- clock-frequency = <400000>;
- pinctrl-names = "default", "sleep";
- pinctrl-0 = <&i2c1_default>;
- pinctrl-1 = <&i2c1_sleep>;
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
- };
-
- blsp_i2c2: i2c@f9924000 {
- compatible = "qcom,i2c-qup-v2.2.1";
- reg = <0xf9924000 0x500>;
- interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&gcc GCC_BLSP1_AHB_CLK>,
- <&gcc GCC_BLSP1_QUP2_I2C_APPS_CLK>;
- clock-names = "iface", "core";
- clock-frequency = <400000>;
- pinctrl-names = "default", "sleep";
- pinctrl-0 = <&i2c2_default>;
- pinctrl-1 = <&i2c2_sleep>;
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
- };
-
- /* Somebody was very creative with their numbering scheme downstream... */
-
- blsp_i2c13: i2c@f9927000 {
- compatible = "qcom,i2c-qup-v2.2.1";
- reg = <0xf9927000 0x500>;
- interrupts = <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&gcc GCC_BLSP1_AHB_CLK>,
- <&gcc GCC_BLSP1_QUP5_I2C_APPS_CLK>;
- clock-names = "iface", "core";
- clock-frequency = <400000>;
- pinctrl-names = "default", "sleep";
- pinctrl-0 = <&i2c13_default>;
- pinctrl-1 = <&i2c13_sleep>;
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
- };
-
- blsp_i2c6: i2c@f9928000 {
- compatible = "qcom,i2c-qup-v2.2.1";
- reg = <0xf9928000 0x500>;
- interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&gcc GCC_BLSP1_AHB_CLK>,
- <&gcc GCC_BLSP1_QUP6_I2C_APPS_CLK>;
- clock-names = "iface", "core";
- clock-frequency = <400000>;
- pinctrl-names = "default", "sleep";
- pinctrl-0 = <&i2c6_default>;
- pinctrl-1 = <&i2c6_sleep>;
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
- };
-
- blsp2_uart2: serial@f995e000 {
- compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
- reg = <0xf995e000 0x1000>;
- interrupts = <GIC_SPI 146 IRQ_TYPE_LEVEL_LOW>;
- clock-names = "core", "iface";
- clocks = <&gcc GCC_BLSP2_UART2_APPS_CLK>,
- <&gcc GCC_BLSP2_AHB_CLK>;
- pinctrl-names = "default", "sleep";
- pinctrl-0 = <&blsp2_uart2_default>;
- pinctrl-1 = <&blsp2_uart2_sleep>;
- status = "disabled";
- };
-
- blsp_i2c7: i2c@f9963000 {
- compatible = "qcom,i2c-qup-v2.2.1";
- reg = <0xf9963000 0x500>;
- interrupts = <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&gcc GCC_BLSP2_AHB_CLK>,
- <&gcc GCC_BLSP2_QUP1_I2C_APPS_CLK>;
- clock-names = "iface", "core";
- clock-frequency = <400000>;
- pinctrl-names = "default", "sleep";
- pinctrl-0 = <&i2c7_default>;
- pinctrl-1 = <&i2c7_sleep>;
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
- };
-
- blsp_i2c5: i2c@f9967000 {
- compatible = "qcom,i2c-qup-v2.2.1";
- reg = <0xf9967000 0x500>;
- interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&gcc GCC_BLSP2_AHB_CLK>,
- <&gcc GCC_BLSP2_QUP5_I2C_APPS_CLK>;
- clock-names = "iface", "core";
- clock-frequency = <100000>;
- pinctrl-names = "default", "sleep";
- pinctrl-0 = <&i2c5_default>;
- pinctrl-1 = <&i2c5_sleep>;
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
- };
-
- gcc: clock-controller@fc400000 {
- compatible = "qcom,gcc-msm8994";
- #clock-cells = <1>;
- #reset-cells = <1>;
- #power-domain-cells = <1>;
- reg = <0xfc400000 0x2000>;
- };
-
- rpm_msg_ram: memory@fc428000 {
- compatible = "qcom,rpm-msg-ram";
- reg = <0xfc428000 0x4000>;
- };
-
- restart@fc4ab000 {
- compatible = "qcom,pshold";
- reg = <0xfc4ab000 0x4>;
- };
-
- spmi_bus: spmi@fc4c0000 {
- compatible = "qcom,spmi-pmic-arb";
- reg = <0xfc4cf000 0x1000>,
- <0xfc4cb000 0x1000>,
- <0xfc4ca000 0x1000>;
- reg-names = "core", "intr", "cnfg";
- interrupt-names = "periph_irq";
- interrupts = <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>;
- qcom,ee = <0>;
- qcom,channel = <0>;
- #address-cells = <2>;
- #size-cells = <0>;
- interrupt-controller;
- #interrupt-cells = <4>;
- };
-
- sfpb_mutex_regs: syscon@fd484000 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "syscon";
- reg = <0xfd484000 0x400>;
- };
-
- tlmm: pinctrl@fd510000 {
- compatible = "qcom,msm8994-pinctrl";
- reg = <0xfd510000 0x4000>;
- interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
- gpio-controller;
- gpio-ranges = <&tlmm 0 0 146>;
- #gpio-cells = <2>;
- interrupt-controller;
- #interrupt-cells = <2>;
-
- blsp1_uart2_default: blsp1-uart2-default {
- function = "blsp_uart2";
- pins = "gpio4", "gpio5";
- drive-strength = <16>;
- bias-disable;
- };
-
- blsp1_uart2_sleep: blsp1-uart2-sleep {
- function = "gpio";
- pins = "gpio4", "gpio5";
- drive-strength = <2>;
- bias-pull-down;
- };
-
- blsp2_uart2_default: blsp2-uart2-default {
- function = "blsp_uart8";
- pins = "gpio45", "gpio46", "gpio47", "gpio48";
- drive-strength = <16>;
- bias-disable;
- };
-
- blsp2_uart2_sleep: blsp2-uart2-sleep {
- function = "gpio";
- pins = "gpio45", "gpio46", "gpio47", "gpio48";
- drive-strength = <2>;
- bias-pull-down;
- };
-
- sdc1_clk_on: clk-on {
- pins = "sdc1_clk";
- bias-disable;
- drive-strength = <6>;
- };
-
- sdc1_clk_off: clk-off {
- pins = "sdc1_clk";
- bias-disable;
- drive-strength = <2>;
- };
-
- sdc1_cmd_on: cmd-on {
- pins = "sdc1_cmd";
- bias-pull-up;
- drive-strength = <6>;
- };
-
- sdc1_cmd_off: cmd-off {
- pins = "sdc1_cmd";
- bias-pull-up;
- drive-strength = <2>;
- };
-
- sdc1_data_on: data-on {
- pins = "sdc1_data";
- bias-pull-up;
- drive-strength = <6>;
- };
-
- sdc1_data_off: data-off {
- pins = "sdc1_data";
- bias-pull-up;
- drive-strength = <2>;
- };
-
- sdc1_rclk_on: rclk-on {
- pins = "sdc1_rclk";
- bias-pull-down;
- };
-
- sdc1_rclk_off: rclk-off {
- pins = "sdc1_rclk";
- bias-pull-down;
- };
-
- i2c1_default: i2c1-default {
- function = "blsp_i2c1";
- pins = "gpio2", "gpio3";
- drive-strength = <2>;
- bias-disable;
- };
-
- i2c1_sleep: i2c1-sleep {
- function = "gpio";
- pins = "gpio2", "gpio3";
- drive-strength = <2>;
- bias-disable;
- };
-
- i2c2_default: i2c2-default {
- function = "blsp_i2c2";
- pins = "gpio6", "gpio7";
- drive-strength = <2>;
- bias-disable;
- };
-
- i2c2_sleep: i2c2-sleep {
- function = "gpio";
- pins = "gpio6", "gpio7";
- drive-strength = <2>;
- bias-disable;
- };
-
- i2c5_default: i2c5-default {
- /* Don't be fooled! Nobody knows the reason why though... */
- function = "blsp_i2c11";
- pins = "gpio83", "gpio84";
- drive-strength = <2>;
- bias-disable;
- };
-
- i2c5_sleep: i2c5-sleep {
- function = "gpio";
- pins = "gpio83", "gpio84";
- drive-strength = <2>;
- bias-disable;
- };
-
- i2c6_default: i2c6-default {
- function = "blsp_i2c6";
- pins = "gpio28", "gpio27";
- drive-strength = <2>;
- bias-disable;
- };
-
- i2c6_sleep: i2c6-sleep {
- function = "gpio";
- pins = "gpio28", "gpio27";
- drive-strength = <2>;
- bias-disable;
- };
-
- i2c7_default: i2c7-default {
- function = "blsp_i2c7";
- pins = "gpio43", "gpio44";
- drive-strength = <2>;
- bias-disable;
- };
-
- i2c7_sleep: i2c7-sleep {
- function = "gpio";
- pins = "gpio43", "gpio44";
- drive-strength = <2>;
- bias-disable;
- };
-
- i2c13_default: i2c13-default {
- /* Not a typo either. */
- function = "blsp_i2c5";
- pins = "gpio23", "gpio24";
- drive-strength = <2>;
- bias-disable;
- };
-
- i2c13_sleep: i2c13-sleep {
- function = "gpio";
- pins = "gpio23", "gpio24";
- drive-strength = <2>;
- bias-disable;
- };
-
- sdc2_clk_on: sdc2-clk-on {
- pins = "sdc2_clk";
- bias-disable;
- drive-strength = <16>;
- };
-
- sdc2_clk_off: sdc2-clk-off {
- pins = "sdc2_clk";
- bias-disable;
- drive-strength = <2>;
- };
-
- sdc2_cmd_on: sdc2-cmd-on {
- pins = "sdc2_cmd";
- bias-pull-up;
- drive-strength = <10>;
- };
-
- sdc2_cmd_off: sdc2-cmd-off {
- pins = "sdc2_cmd";
- bias-pull-up;
- drive-strength = <2>;
- };
-
- sdc2_data_on: sdc2-data-on {
- pins = "sdc2_data";
- bias-pull-up;
- drive-strength = <10>;
- };
-
- sdc2_data_off: sdc2-data-off {
- pins = "sdc2_data";
- bias-pull-up;
- drive-strength = <2>;
- };
- };
- };
-
- smd_rpm: smd {
- compatible = "qcom,smd";
- rpm {
- interrupts = <GIC_SPI 168 IRQ_TYPE_EDGE_RISING>;
- qcom,ipc = <&apcs 8 0>;
- qcom,smd-edge = <15>;
- qcom,local-pid = <0>;
- qcom,remote-pid = <6>;
-
- rpm_requests: rpm-requests {
- compatible = "qcom,rpm-msm8994";
- qcom,smd-channels = "rpm_requests";
+&rpmcc {
+ compatible = "qcom,rpmcc-msm8992";
+};
- rpmcc: rpmcc {
- compatible = "qcom,rpmcc-msm8992";
- #clock-cells = <1>;
- };
- };
- };
- };
+&tcsr_mutex {
+ compatible = "qcom,sfpb-mutex";
+};
- timer {
- compatible = "arm,armv8-timer";
- interrupts = <GIC_PPI 2 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+&timer {
+ interrupts = <GIC_PPI 2 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 3 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 4 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 1 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
- };
-
- vreg_vph_pwr: vreg-vph-pwr {
- compatible = "regulator-fixed";
- status = "okay";
- regulator-name = "vph-pwr";
-
- regulator-min-microvolt = <3600000>;
- regulator-max-microvolt = <3600000>;
-
- regulator-always-on;
- };
};
+&tlmm {
+ compatible = "qcom,msm8992-pinctrl";
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8994-msft-lumia-cityman.dts b/arch/arm64/boot/dts/qcom/msm8994-msft-lumia-cityman.dts
deleted file mode 100644
index ed9034b96013..000000000000
--- a/arch/arm64/boot/dts/qcom/msm8994-msft-lumia-cityman.dts
+++ /dev/null
@@ -1,73 +0,0 @@
-// SPDX-License-Identifier: BSD-3-Clause
-/*
- * Copyright (c) 2020, Konrad Dybcio
- */
-
-/dts-v1/;
-
-#include "msm8994.dtsi"
-#include "pm8994.dtsi"
-#include "pmi8994.dtsi"
-
-/ {
- model = "Microsoft Lumia 950 XL";
- compatible = "microsoft,cityman", "qcom,msm8994";
-
- /*
- * Most Lumia 950XL users use GRUB to load their kernels,
- * hence there is no need for msm-id and friends.
- */
-
- /*
- * This enables graphical output via bootloader-enabled display.
- * acpi=no is required due to WP platforms having ACPI support, but
- * only for Windows-based OSes.
- */
- chosen {
- bootargs = "earlycon=efifb console=efifb acpi=no";
-
- #address-cells = <2>;
- #size-cells = <2>;
- ranges;
- };
-};
-
-&blsp_i2c1 {
- status = "okay";
-
- rmi4-i2c-dev@4b {
- compatible = "syna,rmi4-i2c";
- reg = <0x4b>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- interrupt-parent = <&tlmm>;
- interrupts = <77 IRQ_TYPE_EDGE_FALLING>;
-
- rmi4-f01@1 {
- reg = <0x01>;
- syna,nosleep-mode = <1>;
- };
-
- rmi4-f12@12 {
- reg = <0x12>;
- syna,sensor-type = <1>;
- syna,clip-x-low = <0>;
- syna,clip-x-high = <1440>;
- syna,clip-y-low = <0>;
- syna,clip-y-high = <2660>;
- };
- };
-};
-
-&blsp1_uart2 {
- status = "okay";
-};
-
-&blsp2_uart2 {
- status = "okay";
-};
-
-&sdhc1 {
- status = "okay";
-};
diff --git a/arch/arm64/boot/dts/qcom/msm8994-msft-lumia-octagon-cityman.dts b/arch/arm64/boot/dts/qcom/msm8994-msft-lumia-octagon-cityman.dts
new file mode 100644
index 000000000000..d0aaf5750c21
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8994-msft-lumia-octagon-cityman.dts
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2020, Konrad Dybcio <konrad.dybcio@somainline.org>
+ * Copyright (c) 2020, Gustave Monce <gustave.monce@outlook.com>
+ */
+
+/dts-v1/;
+
+#include "msm8994.dtsi"
+#include "msm8994-msft-lumia-octagon.dtsi"
+
+/ {
+ model = "Microsoft Lumia 950 XL";
+ compatible = "microsoft,cityman", "qcom,msm8994";
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8994-msft-lumia-octagon.dtsi b/arch/arm64/boot/dts/qcom/msm8994-msft-lumia-octagon.dtsi
new file mode 100644
index 000000000000..3a3790a52a2c
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8994-msft-lumia-octagon.dtsi
@@ -0,0 +1,909 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Common Board Device Tree for
+ * Microsoft Mobile MSM8994 Octagon Platforms
+ *
+ * Copyright (c) 2020, Konrad Dybcio
+ * Copyright (c) 2020, Gustave Monce <gustave.monce@outlook.com>
+ */
+
+#include "pm8994.dtsi"
+#include "pmi8994.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/gpio-keys.h>
+#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
+
+/*
+ * Delete all generic (msm8994.dtsi) reserved
+ * memory mappings which are different in this device.
+ */
+/delete-node/ &adsp_mem;
+/delete-node/ &audio_mem;
+/delete-node/ &cont_splash_mem;
+/delete-node/ &mba_mem;
+/delete-node/ &mpss_mem;
+/delete-node/ &peripheral_region;
+/delete-node/ &rmtfs_mem;
+/delete-node/ &smem_mem;
+
+/ {
+ /*
+ * Most Lumia 950/XL users use GRUB to load their kernels,
+ * hence there is no need for msm-id and friends.
+ */
+
+ /*
+ * This enables graphical output via bootloader-enabled display.
+ * acpi=no is required due to WP platforms having ACPI support, but
+ * only for Windows-based OSes.
+ */
+ chosen {
+ bootargs = "earlycon=efifb console=efifb acpi=no";
+
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ };
+
+ clocks {
+ compatible = "simple-bus";
+
+ divclk4: divclk4 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+
+ clock-frequency = <32768>;
+ clock-output-names = "divclk4";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&divclk4_pin_a>;
+ };
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ input-name = "gpio-keys";
+ autorepeat;
+
+ volupkey {
+ label = "Volume Up";
+ gpios = <&pm8994_gpios 3 GPIO_ACTIVE_LOW>;
+ linux,input-type = <1>;
+ linux,code = <KEY_VOLUMEUP>;
+ wakeup-source;
+ debounce-interval = <15>;
+ };
+
+ camsnapkey {
+ label = "Camera Snapshot";
+ gpios = <&pm8994_gpios 4 GPIO_ACTIVE_LOW>;
+ linux,input-type = <1>;
+ linux,code = <KEY_CAMERA>;
+ wakeup-source;
+ debounce-interval = <15>;
+ };
+
+ camfocuskey {
+ label = "Camera Focus";
+ gpios = <&pm8994_gpios 5 GPIO_ACTIVE_LOW>;
+ linux,input-type = <1>;
+ linux,code = <KEY_VOLUMEUP>;
+ wakeup-source;
+ debounce-interval = <15>;
+ };
+ };
+
+ gpio-hall-sensor {
+ compatible = "gpio-keys";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&hall_front_default &hall_back_default>;
+
+ label = "GPIO Hall Effect Sensor";
+
+ hall-front-sensor {
+ label = "Hall Effect Front Sensor";
+ gpios = <&tlmm 42 GPIO_ACTIVE_HIGH>;
+ linux,input-type = <EV_SW>;
+ linux,code = <SW_LID>;
+ linux,can-disable;
+ };
+
+ hall-back-sensor {
+ label = "Hall Effect Back Sensor";
+ gpios = <&tlmm 75 GPIO_ACTIVE_HIGH>;
+ linux,input-type = <EV_SW>;
+ linux,code = <SW_MACHINE_COVER>;
+ linux,can-disable;
+ };
+ };
+
+ reserved-memory {
+ /*
+ * This device being a WP platform has a very different
+ * memory layout than other Android based devices.
+ * This memory layout is directly copied from the original
+ * device UEFI firmware, and adapted based on observations
+ * using JTAG for the Qualcomm Peripheral Image regions.
+ */
+
+ uefi_mem: memory@200000 {
+ reg = <0 0x200000 0 0x100000>;
+ no-map;
+ };
+
+ mppark_mem: memory@300000 {
+ reg = <0 0x300000 0 0x80000>;
+ no-map;
+ };
+
+ fbpt_mem: memory@380000 {
+ reg = <0 0x380000 0 0x1000>;
+ no-map;
+ };
+
+ dbg2_mem: memory@381000 {
+ reg = <0 0x381000 0 0x4000>;
+ no-map;
+ };
+
+ capsule_mem: memory@385000 {
+ reg = <0 0x385000 0 0x1000>;
+ no-map;
+ };
+
+ tpmctrl_mem: memory@386000 {
+ reg = <0 0x386000 0 0x3000>;
+ no-map;
+ };
+
+ uefiinfo_mem: memory@389000 {
+ reg = <0 0x389000 0 0x1000>;
+ no-map;
+ };
+
+ reset_mem: memory@389000 {
+ reg = <0 0x389000 0 0x1000>;
+ no-map;
+ };
+
+ resuncached_mem: memory@38e000 {
+ reg = <0 0x38e000 0 0x72000>;
+ no-map;
+ };
+
+ disp_mem: memory@400000 {
+ reg = <0 0x400000 0 0x800000>;
+ no-map;
+ };
+
+ uefistack_mem: memory@c00000 {
+ reg = <0 0xc00000 0 0x40000>;
+ no-map;
+ };
+
+ cpuvect_mem: memory@c40000 {
+ reg = <0 0xc40000 0 0x10000>;
+ no-map;
+ };
+
+ rescached_mem: memory@400000 {
+ reg = <0 0xc50000 0 0xb0000>;
+ no-map;
+ };
+
+ tzapps_mem: memory@6500000 {
+ reg = <0 0x6500000 0 0x500000>;
+ no-map;
+ };
+
+ smem_mem: memory@6a00000 {
+ reg = <0 0x6a00000 0 0x200000>;
+ no-map;
+ };
+
+ hyp_mem: memory@6c00000 {
+ reg = <0 0x6c00000 0 0x100000>;
+ no-map;
+ };
+
+ tz_mem: memory@6d00000 {
+ reg = <0 0x6d00000 0 0x160000>;
+ no-map;
+ };
+
+ rfsa_adsp_mem: memory@6e60000 {
+ reg = <0 0x6e60000 0 0x10000>;
+ no-map;
+ };
+
+ rfsa_mpss_mem: memory@6e70000 {
+ compatible = "qcom,rmtfs-mem";
+ reg = <0 0x6e70000 0 0x10000>;
+ no-map;
+
+ qcom,client-id = <1>;
+ };
+
+ /*
+ * Value obtained from the device original ACPI DSDT table
+ * MPSS_EFS / SBL
+ */
+ mba_mem: memory@6e80000 {
+ reg = <0 0x6e80000 0 0x180000>;
+ no-map;
+ };
+
+ /*
+ * Peripheral Image loader region begin!
+ * The region reserved for pil is 0x7000000-0xef00000
+ */
+
+ mpss_mem: memory@7000000 {
+ reg = <0 0x7000000 0 0x5a00000>;
+ no-map;
+ };
+
+ adsp_mem: memory@ca00000 {
+ reg = <0 0xca00000 0 0x1800000>;
+ no-map;
+ };
+
+ venus_mem: memory@e200000 {
+ reg = <0 0xe200000 0 0x500000>;
+ no-map;
+ };
+
+ pil_metadata_mem: memory@e700000 {
+ reg = <0 0xe700000 0 0x4000>;
+ no-map;
+ };
+
+ memory@e704000 {
+ reg = <0 0xe704000 0 0x7fc000>;
+ no-map;
+ };
+ /* Peripheral Image loader region end */
+
+ cnss_mem: memory@ef00000 {
+ reg = <0 0xef00000 0 0x300000>;
+ no-map;
+ };
+ };
+};
+
+&blsp1_i2c1 {
+ status = "okay";
+
+ rmi4-i2c-dev@4b {
+ compatible = "syna,rmi4-i2c";
+ reg = <0x4b>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ interrupt-parent = <&tlmm>;
+ interrupts = <77 IRQ_TYPE_EDGE_FALLING>;
+
+ rmi4-f01@1 {
+ reg = <0x01>;
+ syna,nosleep-mode = <1>;
+ };
+
+ rmi4-f12@12 {
+ reg = <0x12>;
+ syna,sensor-type = <1>;
+ syna,clip-x-low = <0>;
+ syna,clip-x-high = <1440>;
+ syna,clip-y-low = <0>;
+ syna,clip-y-high = <2560>;
+ };
+ };
+};
+
+&blsp1_i2c2 {
+ status = "okay";
+
+ /*
+ * This device uses the Texas Instruments TAS2553, however the TAS2552 driver
+ * seems to work here. In the future a proper driver might need to
+ * be written for this device.
+ */
+ tas2553: tas2553@40 {
+ compatible = "ti,tas2552";
+ reg = <0x40>;
+
+ vbat-supply = <&vph_pwr>;
+ iovdd-supply = <&vreg_s4a_1p8>;
+ avdd-supply = <&vreg_s4a_1p8>;
+
+ enable-gpio = <&pm8994_gpios 12 GPIO_ACTIVE_HIGH>;
+ };
+};
+
+&blsp1_i2c5 {
+ status = "okay";
+
+ ak09912: magnetometer@c {
+ compatible = "asahi-kasei,ak09912";
+ reg = <0xc>;
+
+ interrupt-parent = <&tlmm>;
+ interrupts = <26 IRQ_TYPE_EDGE_RISING>;
+
+ vdd-supply = <&vreg_l18a_2p85>;
+ vid-supply = <&vreg_lvs2a_1p8>;
+ };
+
+ zpa2326: barometer@5c {
+ compatible = "murata,zpa2326";
+ reg = <0x5c>;
+
+ interrupt-parent = <&tlmm>;
+ interrupts = <74 IRQ_TYPE_EDGE_RISING>;
+
+ vdd-supply = <&vreg_lvs2a_1p8>;
+ };
+
+ mpu6050: accelerometer@68 {
+ compatible = "invensense,mpu6500";
+ reg = <0x68>;
+
+ interrupt-parent = <&tlmm>;
+ interrupts = <64 IRQ_TYPE_EDGE_RISING>;
+
+ vdd-supply = <&vreg_lvs2a_1p8>;
+ vddio-supply = <&vreg_lvs2a_1p8>;
+ };
+};
+
+&blsp1_i2c6 {
+ status = "okay";
+
+ pn547: pn547@28 {
+ compatible = "nxp,pn544-i2c";
+
+ reg = <0x28>;
+
+ interrupt-parent = <&tlmm>;
+ interrupts = <29 IRQ_TYPE_EDGE_RISING>;
+
+ enable-gpios = <&tlmm 30 GPIO_ACTIVE_HIGH>;
+ firmware-gpios = <&tlmm 94 GPIO_ACTIVE_HIGH>;
+ };
+};
+
+&blsp1_uart2 {
+ status = "okay";
+};
+
+&blsp2_i2c1 {
+ status = "okay";
+
+ sideinteraction: ad7147_captouch@2c {
+ compatible = "ad,ad7147_captouch";
+ reg = <0x2c>;
+
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&grip_default>;
+ pinctrl-1 = <&grip_sleep>;
+
+ interrupts = <&tlmm 96 IRQ_TYPE_EDGE_FALLING>;
+
+ button_num = <8>;
+ touchpad_num = <0>;
+ wheel_num = <0>;
+ slider_num = <0>;
+
+ vcc-supply = <&vreg_l18a_2p85>;
+ };
+
+ /*
+ * The QPDS-T900/QPDS-T930 is a customized part built for Nokia
+ * by Avago. It is very similar to the Avago APDS-9930 with some
+ * minor differences. In the future a proper driver might need to
+ * be written for this device. For now this works fine.
+ */
+ qpdst900: qpdst900@39 {
+ compatible = "avago,apds9930";
+ reg = <0x39>;
+
+ interrupt-parent = <&tlmm>;
+ interrupts = <40 IRQ_TYPE_EDGE_FALLING>;
+ };
+};
+
+&blsp2_i2c5 {
+ status = "okay";
+
+ fm_radio: si4705@11 {
+ compatible = "silabs,si470x";
+ reg = <0x11>;
+
+ interrupt-parent = <&tlmm>;
+ interrupts = <9 IRQ_TYPE_EDGE_FALLING>;
+ reset-gpios = <&tlmm 93 GPIO_ACTIVE_HIGH>;
+ };
+
+ vreg_lpddr_1p1: fan53526a@6c {
+ compatible = "fcs,fan53526";
+ reg = <0x6c>;
+
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ vin-supply = <&vph_pwr>;
+ fcs,suspend-voltage-selector = <1>;
+ regulator-always-on; /* Turning off DDR power doesn't sound good. */
+ };
+
+ /* ANX7816 HDMI bridge (needs MDSS HDMI) */
+};
+
+&blsp2_spi4 {
+ status = "okay";
+
+ /*
+ * This device is a Lattice UC120 USB-C PD PHY.
+ * It is actually a Lattice iCE40 FPGA pre-programmed by
+ * the device firmware with a specific bitstream
+ * enabling USB Type C PHY functionality.
+ * Communication is done via a proprietary protocol over SPI.
+ *
+ * TODO: Once a proper driver is available, replace this.
+ */
+ uc120: ice5lp2k@0 {
+ compatible = "lattice,ice40-fpga-mgr";
+ reg = <0>;
+ spi-max-frequency = <5000000>;
+ cdone-gpios = <&tlmm 95 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&pmi8994_gpios 4 GPIO_ACTIVE_LOW>;
+ };
+};
+
+&blsp2_uart2 {
+ status = "okay";
+
+ qca6174_bt: bluetooth {
+ compatible = "qcom,qca6174-bt";
+
+ enable-gpios = <&pm8994_gpios 19 GPIO_ACTIVE_HIGH>;
+ clocks = <&divclk4>;
+ };
+};
+
+&pm8994_gpios {
+ bt_en_gpios: bt_en_gpios {
+ pinconf {
+ pins = "gpio19";
+ function = PMIC_GPIO_FUNC_NORMAL;
+ output-low;
+ power-source = <PM8994_GPIO_S4>;
+ qcom,drive-strength = <PMIC_GPIO_STRENGTH_LOW>;
+ bias-pull-down;
+ };
+ };
+
+ divclk4_pin_a: divclk4 {
+ pinconf {
+ pins = "gpio18";
+ function = PMIC_GPIO_FUNC_FUNC2;
+ power-source = <PM8994_GPIO_S4>;
+ bias-disable;
+ };
+ };
+};
+
+&pm8994_pon {
+ pwrkey {
+ compatible = "qcom,pm8941-pwrkey";
+ interrupts = <0 8 0 IRQ_TYPE_EDGE_BOTH>;
+ debounce = <15625>;
+ linux,code = <KEY_POWER>;
+ };
+
+ volwnkey {
+ compatible = "qcom,pm8941-resin";
+ interrupts = <0 8 1 IRQ_TYPE_EDGE_BOTH>;
+ debounce = <15625>;
+ linux,code = <KEY_VOLUMEDOWN>;
+ };
+};
+
+&pmi8994_gpios {
+ pinctrl-0 = <&hd3ss460_pol &hd3ss460_amsel &hd3ss460_en>;
+ pinctrl-names = "default";
+
+ /*
+ * This device uses a TI HD3SS460 Type-C MUX
+ * As this device has no driver currently,
+ * the configuration for USB Face Up is set-up here.
+ *
+ * TODO: remove once a driver is available
+ * TODO: add VBUS GPIO 5
+ */
+ hd3ss460_pol: pol_low {
+ pins = "gpio8";
+ drive-strength = <3>;
+ bias-pull-down;
+ };
+
+ hd3ss460_amsel: amsel_high {
+ pins = "gpio9";
+ drive-strength = <1>;
+ bias-pull-up;
+ };
+
+ hd3ss460_en: en_high {
+ pins = "gpio10";
+ drive-strength = <1>;
+ bias-pull-up;
+ };
+};
+
+&pmi8994_spmi_regulators {
+ vdd_gfx: s2@1700 {
+ reg = <0x1700 0x100>;
+ regulator-min-microvolt = <980000>;
+ regulator-max-microvolt = <980000>;
+ };
+};
+
+&rpm_requests {
+ /* These values were taken from the original firmware ACPI tables */
+ pm8994_regulators: pm8994-regulators {
+ compatible = "qcom,rpm-pm8994-regulators";
+
+ vdd_s1-supply = <&vph_pwr>;
+ vdd_s2-supply = <&vph_pwr>;
+ vdd_s3-supply = <&vph_pwr>;
+ vdd_s4-supply = <&vph_pwr>;
+ vdd_s5-supply = <&vph_pwr>;
+ vdd_s6-supply = <&vph_pwr>;
+ vdd_s7-supply = <&vph_pwr>;
+ vdd_s8-supply = <&vph_pwr>;
+ vdd_s9-supply = <&vph_pwr>;
+ vdd_s10-supply = <&vph_pwr>;
+ vdd_s11-supply = <&vph_pwr>;
+ vdd_s12-supply = <&vph_pwr>;
+ vdd_l1-supply = <&vreg_s1b_1p0>;
+ vdd_l2_l26_l28-supply = <&vreg_s3a_1p3>;
+ vdd_l3_l11-supply = <&vreg_s3a_1p3>;
+ vdd_l4_l27_l31-supply = <&vreg_s3a_1p3>;
+ vdd_l5_l7-supply = <&vreg_s5a_2p15>;
+ vdd_l6_l12_l32-supply = <&vreg_s5a_2p15>;
+ vdd_l8_l16_l30-supply = <&vph_pwr>;
+ vdd_l9_l10_l18_l22-supply = <&vph_pwr_bbyp>;
+ vdd_l13_l19_l23_l24-supply = <&vph_pwr_bbyp>;
+ vdd_l14_l15-supply = <&vreg_s5a_2p15>;
+ vdd_l17_l29-supply = <&vph_pwr_bbyp>;
+ vdd_l20_l21-supply = <&vph_pwr_bbyp>;
+ vdd_l25-supply = <&vreg_s5a_2p15>;
+ vdd_lvs1_2-supply = <&vreg_s4a_1p8>;
+
+ /* S1, S2, S6 and S12 are managed by RPMPD */
+
+ vreg_s3a_1p3: s3 {
+ regulator-min-microvolt = <1300000>;
+ regulator-max-microvolt = <1300000>;
+ regulator-allow-set-load;
+ regulator-system-load = <300000>;
+ };
+
+ vreg_s4a_1p8: s4 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-allow-set-load;
+ regulator-always-on;
+ regulator-system-load = <325000>;
+ };
+
+ vreg_s5a_2p15: s5 {
+ regulator-min-microvolt = <2150000>;
+ regulator-max-microvolt = <2150000>;
+ regulator-allow-set-load;
+ regulator-system-load = <325000>;
+ };
+
+ vreg_s7a_1p0: s7 {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ };
+
+ /*
+ * S8 - SPMI-managed VDD_APC0
+ * S9, S10 and S11 (the main one) - SPMI-managed VDD_APC1
+ */
+
+ vreg_l1a_1p0: l1 {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ };
+
+ vreg_l2a_1p25: l2 {
+ regulator-min-microvolt = <1250000>;
+ regulator-max-microvolt = <1250000>;
+ regulator-allow-set-load;
+ regulator-system-load = <4160>;
+ };
+
+ vreg_l3a_1p2: l3 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-always-on;
+ regulator-allow-set-load;
+ regulator-system-load = <80000>;
+ };
+
+ vreg_l4a_1p225: l4 {
+ regulator-min-microvolt = <1225000>;
+ regulator-max-microvolt = <1225000>;
+ };
+
+ /* L5 is inaccessible from RPM */
+
+ vreg_l6a_1p8: l6 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-allow-set-load;
+ regulator-system-load = <1000>;
+ };
+
+ /* L7 is inaccessible from RPM */
+
+ vreg_l8a_1p8: l8 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ vreg_l9a_1p8: l9 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ vreg_l10a_1p8: l10 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ vreg_l11a_1p2: l11 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-always-on;
+ regulator-allow-set-load;
+ regulator-system-load = <35000>;
+ };
+
+ vreg_l12a_1p8: l12 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-allow-set-load;
+ regulator-system-load = <50000>;
+ };
+
+ vreg_l13a_2p95: l13 {
+ regulator-min-microvolt = <1850000>;
+ regulator-max-microvolt = <2950000>;
+ regulator-always-on;
+ regulator-allow-set-load;
+ regulator-system-load = <22000>;
+ };
+
+ vreg_l14a_1p8: l14 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-allow-set-load;
+ regulator-system-load = <52000>;
+ };
+
+ vreg_l15a_1p8: l15 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ vreg_l16a_2p7: l16 {
+ regulator-min-microvolt = <2700000>;
+ regulator-max-microvolt = <2700000>;
+ };
+
+ vreg_l17a_2p7: l17 {
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-always-on;
+ regulator-allow-set-load;
+ regulator-system-load = <300000>;
+ };
+
+ vreg_l18a_2p85: l18 {
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <2850000>;
+ regulator-always-on;
+ regulator-allow-set-load;
+ regulator-system-load = <600000>;
+ };
+
+ vreg_l19a_3p3: l19 {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-allow-set-load;
+ regulator-system-load = <500000>;
+ };
+
+ vreg_l20a_2p95: l20 {
+ regulator-min-microvolt = <2950000>;
+ regulator-max-microvolt = <2950000>;
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-allow-set-load;
+ regulator-system-load = <570000>;
+ };
+
+ vreg_l21a_2p95: l21 {
+ regulator-min-microvolt = <2950000>;
+ regulator-max-microvolt = <2950000>;
+ regulator-always-on;
+ regulator-allow-set-load;
+ regulator-system-load = <800000>;
+ };
+
+ vreg_l22a_3p0: l22 {
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-always-on;
+ regulator-allow-set-load;
+ regulator-system-load = <150000>;
+ };
+
+ vreg_l23a_2p8: l23 {
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <2850000>;
+ regulator-always-on;
+ regulator-allow-set-load;
+ regulator-system-load = <80000>;
+ };
+
+ vreg_l24a_3p075: l24 {
+ regulator-min-microvolt = <3075000>;
+ regulator-max-microvolt = <3150000>;
+ regulator-allow-set-load;
+ regulator-system-load = <5800>;
+ };
+
+ vreg_l25a_1p1: l25 {
+ regulator-min-microvolt = <1150000>;
+ regulator-max-microvolt = <1150000>;
+ regulator-always-on;
+ regulator-allow-set-load;
+ regulator-system-load = <80000>;
+ };
+
+ vreg_l26a_1p0: l26 {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ };
+
+ vreg_l27a_1p05: l27 {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ regulator-always-on;
+ regulator-allow-set-load;
+ regulator-system-load = <500000>;
+ };
+
+ vreg_l28a_1p0: l28 {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ regulator-always-on;
+ regulator-allow-set-load;
+ regulator-system-load = <26000>;
+ };
+
+ vreg_l29a_2p8: l29 {
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <2850000>;
+ regulator-always-on;
+ regulator-allow-set-load;
+ regulator-system-load = <80000>;
+ };
+
+ vreg_l30a_1p8: l30 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-allow-set-load;
+ regulator-system-load = <2500>;
+ };
+
+ vreg_l31a_1p2: l31 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-always-on;
+ regulator-allow-set-load;
+ regulator-system-load = <600000>;
+ };
+
+ vreg_l32a_1p8: l32 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ vreg_lvs1a_1p8: lvs1 { };
+
+ vreg_lvs2a_1p8: lvs2 { };
+ };
+
+ pmi8994_regulators: pmi8994-regulators {
+ compatible = "qcom,rpm-pmi8994-regulators";
+
+ vdd_s1-supply = <&vph_pwr>;
+ vdd_bst_byp-supply = <&vph_pwr>;
+
+ vreg_s1b_1p0: s1 {
+ regulator-min-microvolt = <1025000>;
+ regulator-max-microvolt = <1025000>;
+ };
+
+ /* S2 & S3 - VDD_GFX */
+
+ vph_pwr_bbyp: boost-bypass {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+ };
+};
+
+&sdhc1 {
+ status = "okay";
+
+ /*
+ * This device is shipped with HS400 capabable eMMCs
+ * However various brands have been used in various product batches,
+ * including a Samsung eMMC (BGND3R) which features a quirk with HS400.
+ * Set the speed to HS200 as a safety measure.
+ */
+ mmc-hs200-1_8v;
+};
+
+&sdhc2 {
+ status = "okay";
+
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on>;
+ pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off>;
+
+ vmmc-supply = <&vreg_l21a_2p95>;
+ vqmmc-supply = <&vreg_l13a_2p95>;
+
+ cd-gpios = <&pm8994_gpios 8 GPIO_ACTIVE_LOW>;
+};
+
+&tlmm {
+ grip_default: grip-default {
+ pins = "gpio39";
+ function = "gpio";
+ drive-strength = <6>;
+ bias-pull-down;
+ };
+
+ grip_sleep: grip-sleep {
+ pins = "gpio39";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+
+ hall_front_default: hall-front-default {
+ pins = "gpio42";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ hall_back_default: hall-back-default {
+ pins = "gpio75";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8994-sony-xperia-kitakami-ivy.dts b/arch/arm64/boot/dts/qcom/msm8994-sony-xperia-kitakami-ivy.dts
new file mode 100644
index 000000000000..b5e90c85aaf6
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8994-sony-xperia-kitakami-ivy.dts
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2020, Konrad Dybcio <konrad.dybcio@somainline.org>
+ */
+
+/dts-v1/;
+
+#include "msm8994-sony-xperia-kitakami.dtsi"
+
+/ {
+ model = "Sony Xperia Z3+/Z4";
+ compatible = "sony,ivy-row", "qcom,msm8994";
+};
+
+&pm8994_l3 {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+};
+
+&pm8994_l17 {
+ regulator-min-microvolt = <2700000>;
+ regulator-max-microvolt = <2700000>;
+};
+
+/delete-node/ &pm8994_l19;
+/delete-node/ &pm8994_l32;
diff --git a/arch/arm64/boot/dts/qcom/msm8994-sony-xperia-kitakami-karin.dts b/arch/arm64/boot/dts/qcom/msm8994-sony-xperia-kitakami-karin.dts
new file mode 100644
index 000000000000..a1d1a075941a
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8994-sony-xperia-kitakami-karin.dts
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2020, Konrad Dybcio <konrad.dybcio@somainline.org>
+ */
+
+/dts-v1/;
+
+#include "msm8994-sony-xperia-kitakami.dtsi"
+
+/ {
+ model = "Sony Xperia Z4 Tablet (LTE)";
+ compatible = "sony,karin-row", "qcom,msm8994";
+};
+
+&blsp2_i2c5 {
+ /*
+ * TI LP8557 backlight driver @ 2c
+ * AD AD7146 touch controller @ 2f
+ * sii8620 HDMI/MHL bridge @ 72 (kitakami-common)
+ */
+};
+
+&pm8994_l3 {
+ regulator-min-microvolt = <1050000>;
+ regulator-max-microvolt = <1050000>;
+};
+
+&pm8994_l17 {
+ regulator-min-microvolt = <2700000>;
+ regulator-max-microvolt = <2700000>;
+};
+
+&pm8994_l22 {
+ regulator-min-microvolt = <3100000>;
+ regulator-max-microvolt = <3100000>;
+};
+
+&pm8994_l25 {
+ regulator-min-microvolt = <1037500>;
+ regulator-max-microvolt = <1037500>;
+};
+
+/delete-node/ &pm8994_l32;
+/* Z4 tablets use a different touchscreen. */
+/delete-node/ &touchscreen;
diff --git a/arch/arm64/boot/dts/qcom/msm8994-sony-xperia-kitakami-satsuki.dts b/arch/arm64/boot/dts/qcom/msm8994-sony-xperia-kitakami-satsuki.dts
new file mode 100644
index 000000000000..1385956a69f3
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8994-sony-xperia-kitakami-satsuki.dts
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2020, Konrad Dybcio <konrad.dybcio@somainline.org>
+ */
+
+/dts-v1/;
+
+#include "msm8994-sony-xperia-kitakami.dtsi"
+
+/ {
+ model = "Sony Xperia Z5 Premium";
+ compatible = "sony,satsuki-row", "qcom,msm8994";
+};
+
+&pm8994_l14 {
+ regulator-min-microvolt = <1850000>;
+ regulator-max-microvolt = <1850000>;
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8994-sony-xperia-kitakami-sumire.dts b/arch/arm64/boot/dts/qcom/msm8994-sony-xperia-kitakami-sumire.dts
index 5d6bbbf6c119..d3ba9867a369 100644
--- a/arch/arm64/boot/dts/qcom/msm8994-sony-xperia-kitakami-sumire.dts
+++ b/arch/arm64/boot/dts/qcom/msm8994-sony-xperia-kitakami-sumire.dts
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: BSD-3-Clause
/*
- * Copyright (c) 2020, Konrad Dybcio
+ * Copyright (c) 2020, Konrad Dybcio <konrad.dybcio@somainline.org>
*/
/dts-v1/;
@@ -11,3 +11,5 @@
model = "Sony Xperia Z5";
compatible = "sony,sumire-row", "qcom,msm8994";
};
+
+/delete-node/ &pm8994_l19;
diff --git a/arch/arm64/boot/dts/qcom/msm8994-sony-xperia-kitakami-suzuran.dts b/arch/arm64/boot/dts/qcom/msm8994-sony-xperia-kitakami-suzuran.dts
new file mode 100644
index 000000000000..f129479bbf95
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8994-sony-xperia-kitakami-suzuran.dts
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2020, Konrad Dybcio <konrad.dybcio@somainline.org>
+ */
+
+/dts-v1/;
+
+#include "msm8994-sony-xperia-kitakami.dtsi"
+
+/ {
+ model = "Sony Xperia Z5 Compact";
+ compatible = "sony,suzuran-row", "qcom,msm8994";
+};
+
+&pm8994_l14 {
+ regulator-min-microvolt = <2000000>;
+ regulator-max-microvolt = <2000000>;
+};
+
+/delete-node/ &pm8994_l19;
diff --git a/arch/arm64/boot/dts/qcom/msm8994-sony-xperia-kitakami.dtsi b/arch/arm64/boot/dts/qcom/msm8994-sony-xperia-kitakami.dtsi
index 791f254ac3f8..48de66bf19c4 100644
--- a/arch/arm64/boot/dts/qcom/msm8994-sony-xperia-kitakami.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8994-sony-xperia-kitakami.dtsi
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: BSD-3-Clause
/*
- * Copyright (c) 2020, Konrad Dybcio
+ * Copyright (c) 2020, Konrad Dybcio <konrad.dybcio@somainline.org>
*/
#include "msm8994.dtsi"
@@ -11,8 +11,17 @@
/ {
/* required for bootloader to select correct board */
- qcom,msm-id = <0xcf 0x20001>;
+
+ /*
+ * We support MSM8994 v2 (0x20000) and v2.1 (0x20001).
+ * The V1 chip (0x0 and 0x10000) is significantly different
+ * and requires driver-side changes (including CPR, be warned!!).
+ * Besides that, it's very rare.
+ */
+ qcom,msm-id = <207 0x20000>, <207 0x20001>;
+ /* We only use pm8994+pmi8994. */
qcom,pmic-id = <0x10009 0x1000a 0x00 0x00>;
+ /* This property is shared across all kitakami devices. */
qcom,board-id = <8 0>;
/* Kitakami firmware doesn't support PSCI */
@@ -63,53 +72,29 @@
};
reserved-memory {
- #address-cells = <2>;
- #size-cells = <2>;
- ranges;
-
/* This is for getting crash logs using Android downstream kernels */
ramoops@1fe00000 {
compatible = "ramoops";
- reg = <0x0 0x1fe00000 0x0 0x200000>;
+ reg = <0 0x1fe00000 0 0x200000>;
console-size = <0x100000>;
record-size = <0x10000>;
ftrace-size = <0x10000>;
pmsg-size = <0x80000>;
};
- continuous_splash: framebuffer@3401000{
- reg = <0x0 0x3401000 0x0 0x2200000>;
- no-map;
- };
-
- dfps_data_mem: dfps_data_mem@3400000 {
- reg = <0x0 0x3400000 0x0 0x1000>;
- no-map;
- };
-
- peripheral_region: peripheral_region@7400000 {
- reg = <0x0 0x7400000 0x0 0x1c00000>;
- no-map;
- };
-
- modem_region: modem_region@9000000 {
- reg = <0x0 0x9000000 0x0 0x5a00000>;
- no-map;
- };
-
- tzapp: modem_region@ea00000 {
- reg = <0x0 0xea00000 0x0 0x1900000>;
+ fb_region: fb_region@40000000 {
+ reg = <0 0x40000000 0 0x1000000>;
no-map;
};
- fb_region: fb_region@40000000 {
- reg = <0x00 0x40000000 0x00 0x1000000>;
+ tzapp: memory@c7800000 {
+ reg = <0 0xc7800000 0 0x1900000>;
no-map;
};
};
};
-&blsp_spi0 {
+&blsp1_spi1 {
status = "okay";
/* FPC fingerprint reader */
@@ -117,94 +102,330 @@
/* I2C1 is disabled on this board */
-&blsp_i2c2 {
+&blsp1_i2c2 {
status = "okay";
+ clock-frequency = <355000>;
- /* NXP NFC */
+ /* NXP PN547 NFC */
};
-&blsp_i2c4 {
+&blsp1_i2c4 {
status = "okay";
+ clock-frequency = <355000>;
/* Empty but active */
};
-&blsp_i2c5 {
+&blsp1_i2c6 {
status = "okay";
+ clock-frequency = <355000>;
- /* SMB1357 charger and sii8620 HDMI/MHL bridge */
-};
+ touchscreen: rmi4-i2c-dev@2c {
+ compatible = "syna,rmi4-i2c";
+ reg = <0x2c>;
+ #address-cells = <1>;
+ #size-cells = <0>;
-&blsp_i2c6 {
- status = "okay";
+ interrupt-parent = <&tlmm>;
+ interrupts = <42 IRQ_TYPE_EDGE_FALLING>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&ts_int_active &ts_reset_active>;
+
+ vdd-supply = <&pm8994_l22>;
+ vio-supply = <&pm8994_s4>;
+
+ syna,reset-delay-ms = <220>;
+ syna,startup-delay-ms = <220>;
- /* Synaptics touchscreen */
+ rmi4-f01@1 {
+ reg = <0x01>;
+ syna,nosleep-mode = <1>;
+ };
+
+ rmi4-f11@11 {
+ reg = <0x11>;
+ syna,sensor-type = <1>;
+ };
+ };
};
&blsp1_uart2 {
status = "okay";
};
+&blsp2_i2c5 {
+ status = "okay";
+ clock-frequency = <355000>;
+
+ /* sii8620 HDMI/MHL bridge */
+};
+
&blsp2_uart2 {
status = "okay";
};
+/*
+ * Kitakami bootloader only turns cont_splash on when it detects
+ * specific downstream MDSS/backlight nodes in the active DTB.
+ * One way to use that framebuffer is to load a secondary instance of
+ * LK with the downstream DTB appended and then, only from there, load
+ * mainline Linux.
+ */
+&cont_splash_mem {
+ reg = <0 0x3401000 0 0x2200000>;
+};
+
+&pmi8994_spmi_regulators {
+ /*
+ * Yeah, this one *is* managed by RPMPD, but also needs
+ * to be hacked up as a-o due to the GPU device only accepting a single
+ * power domain.. which still isn't enough and forces us to bind
+ * OXILI_CX and OXILI_GX together!
+ */
+ vdd_gfx: s2@1700 {
+ reg = <0x1700 0x100>;
+ regulator-name = "VDD_GFX";
+ regulator-min-microvolt = <980000>;
+ regulator-max-microvolt = <980000>;
+
+ /* hack until we rig up the gpu consumer */
+ regulator-always-on;
+ };
+};
+
&rpm_requests {
pm8994_regulators: pm8994-regulators {
compatible = "qcom,rpm-pm8994-regulators";
- vdd_l1-supply = <&pm8994_s1>;
- vdd_l2_26_28-supply = <&pm8994_s3>;
- vdd_l3_11-supply = <&pm8994_s3>;
- vdd_l4_27_31-supply = <&pm8994_s3>;
- vdd_l5_7-supply = <&pm8994_s3>;
- vdd_l6_12_32-supply = <&pm8994_s5>;
- vdd_l8_16_30-supply = <&vreg_vph_pwr>;
- vdd_l9_10_18_22-supply = <&vreg_vph_pwr>;
- vdd_l13_19_23_24-supply = <&vreg_vph_pwr>;
- vdd_l14_15-supply = <&pm8994_s5>;
- vdd_l17_29-supply = <&vreg_vph_pwr>;
- vdd_l20_21-supply = <&vreg_vph_pwr>;
- vdd_l25-supply = <&pm8994_s5>;
- vdd_lvs1_2 = <&pm8994_s4>;
-
- pm8994_s1: s1 {};
- pm8994_s2: s2 {};
- pm8994_s3: s3 {};
- pm8994_s4: s4 {};
- pm8994_s5: s5 {};
- pm8994_s6: s6 {};
- pm8994_s7: s7 {};
-
- pm8994_l1: l1 {};
- pm8994_l2: l2 {};
- pm8994_l3: l3 {};
- pm8994_l4: l4 {};
- pm8994_l6: l6 {};
- pm8994_l8: l8 {};
- pm8994_l9: l9 {};
- pm8994_l10: l10 {};
- pm8994_l11: l11 {};
- pm8994_l12: l12 {};
- pm8994_l13: l13 {};
- pm8994_l14: l14 {};
- pm8994_l15: l15 {};
- pm8994_l16: l16 {};
- pm8994_l17: l17 {};
- pm8994_l18: l18 {};
- pm8994_l19: l19 {};
- pm8994_l20: l20 {};
- pm8994_l21: l21 {};
- pm8994_l22: l22 {};
- pm8994_l23: l23 {};
- pm8994_l24: l24 {};
- pm8994_l25: l25 {};
- pm8994_l26: l26 {};
- pm8994_l27: l27 {};
- pm8994_l28: l28 {};
- pm8994_l29: l29 {};
- pm8994_l30: l30 {};
- pm8994_l31: l31 {};
- pm8994_l32: l32 {};
+
+ vdd_s1-supply = <&vph_pwr>;
+ vdd_s2-supply = <&vph_pwr>;
+ vdd_s3-supply = <&vph_pwr>;
+ vdd_s4-supply = <&vph_pwr>;
+ vdd_s5-supply = <&vph_pwr>;
+ vdd_s6-supply = <&vph_pwr>;
+ vdd_s7-supply = <&vph_pwr>;
+ vdd_s8-supply = <&vph_pwr>;
+ vdd_s9-supply = <&vph_pwr>;
+ vdd_s10-supply = <&vph_pwr>;
+ vdd_s11-supply = <&vph_pwr>;
+ vdd_s12-supply = <&vph_pwr>;
+ vdd_l1-supply = <&pmi8994_s1>;
+ vdd_l2_l26_l28-supply = <&pm8994_s3>;
+ vdd_l3_l11-supply = <&pm8994_s3>;
+ vdd_l4_l27_l31-supply = <&pm8994_s3>;
+ vdd_l5_l7-supply = <&pm8994_s5>;
+ vdd_l6_l12_l32-supply = <&pm8994_s5>;
+ vdd_l8_l16_l30-supply = <&vph_pwr>;
+ vdd_l9_l10_l18_l22-supply = <&pmi8994_bby>;
+ vdd_l13_l19_l23_l24-supply = <&pmi8994_bby>;
+ vdd_l14_l15-supply = <&pm8994_s5>;
+ vdd_l17_l29-supply = <&pmi8994_bby>;
+ vdd_l20_l21-supply = <&pmi8994_bby>;
+ vdd_l25-supply = <&pm8994_s3>;
+ vdd_lvs1_lvs2-supply = <&pm8994_s4>;
+
+ /* S1, S2, S6 and S12 are managed by RPMPD */
+
+ pm8994_s3: s3 {
+ regulator-min-microvolt = <1300000>;
+ regulator-max-microvolt = <1300000>;
+ };
+
+ pm8994_s4: s4 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-allow-set-load;
+ regulator-always-on;
+ regulator-system-load = <325000>;
+ };
+
+ pm8994_s5: s5 {
+ regulator-min-microvolt = <2150000>;
+ regulator-max-microvolt = <2150000>;
+ };
+
+ pm8994_s7: s7 {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ };
+
+ /*
+ * S8 - SPMI-managed VDD_APC0
+ * S9, S10 and S11 (the main one) - SPMI-managed VDD_APC1
+ */
+
+ pm8994_l1: l1 {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ };
+
+ pm8994_l2: l2 {
+ regulator-min-microvolt = <1250000>;
+ regulator-max-microvolt = <1250000>;
+ regulator-allow-set-load;
+ regulator-system-load = <10000>;
+ };
+
+ pm8994_l3: l3 {
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ };
+
+ pm8994_l4: l4 {
+ regulator-min-microvolt = <1225000>;
+ regulator-max-microvolt = <1225000>;
+ };
+
+ /* L5 is inaccessible from RPM */
+
+ pm8994_l6: l6 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ /* L7 is inaccessible from RPM */
+
+ pm8994_l8: l8 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ pm8994_l9: l9 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ pm8994_l10: l10 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ pm8994_l11: l11 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ };
+
+ pm8994_l12: l12 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-allow-set-load;
+ regulator-system-load = <10000>;
+ };
+
+ pm8994_l13: l13 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2950000>;
+ };
+
+ pm8994_l14: l14 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-allow-set-load;
+ regulator-system-load = <10000>;
+ };
+
+ pm8994_l15: l15 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ pm8994_l16: l16 {
+ regulator-min-microvolt = <2700000>;
+ regulator-max-microvolt = <2700000>;
+ };
+
+ pm8994_l17: l17 {
+ regulator-min-microvolt = <2200000>;
+ regulator-max-microvolt = <2200000>;
+ };
+
+ pm8994_l18: l18 {
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <2850000>;
+ regulator-always-on;
+ };
+
+ pm8994_l19: l19 {
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <2850000>;
+ };
+
+ pm8994_l20: l20 {
+ regulator-min-microvolt = <2950000>;
+ regulator-max-microvolt = <2950000>;
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-allow-set-load;
+ regulator-system-load = <570000>;
+ };
+
+ pm8994_l21: l21 {
+ regulator-min-microvolt = <2950000>;
+ regulator-max-microvolt = <2950000>;
+ regulator-always-on;
+ regulator-allow-set-load;
+ regulator-system-load = <800000>;
+ };
+
+ pm8994_l22: l22 {
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ };
+
+ pm8994_l23: l23 {
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ };
+
+ pm8994_l24: l24 {
+ regulator-min-microvolt = <3075000>;
+ regulator-max-microvolt = <3150000>;
+ };
+
+ pm8994_l25: l25 {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ };
+
+ pm8994_l26: l26 {
+ regulator-min-microvolt = <987500>;
+ regulator-max-microvolt = <987500>;
+ };
+
+ pm8994_l27: l27 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ };
+
+ pm8994_l28: l28 {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ regulator-allow-set-load;
+ regulator-system-load = <10000>;
+ };
+
+ pm8994_l29: l29 {
+ regulator-min-microvolt = <2700000>;
+ regulator-max-microvolt = <2700000>;
+ };
+
+ pm8994_l30: l30 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ pm8994_l31: l31 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-allow-set-load;
+ regulator-system-load = <10000>;
+ };
+
+ pm8994_l32: l32 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
pm8994_lvs1: lvs1 {};
pm8994_lvs2: lvs2 {};
@@ -213,22 +434,34 @@
pmi8994_regulators: pmi8994-regulators {
compatible = "qcom,rpm-pmi8994-regulators";
- pmi8994_s1: s1 {};
- pmi8994_s2: s2 {};
- pmi8994_s3: s3 {};
- pmi8994_bby: boost-bypass {};
+ vdd_s1-supply = <&vph_pwr>;
+ vdd_bst_byp-supply = <&vph_pwr>;
+
+ pmi8994_s1: s1 {
+ regulator-min-microvolt = <1025000>;
+ regulator-max-microvolt = <1025000>;
+ };
+
+ /* S2 & S3 - VDD_GFX */
+
+ pmi8994_bby: boost-bypass {
+ regulator-min-microvolt = <3150000>;
+ regulator-max-microvolt = <3600000>;
+ };
};
};
&sdhc1 {
- /* There is an issue with the eMMC causing permanent
+ /*
+ * There is an issue with the eMMC causing permanent
* damage to the card if a quirk isn't addressed.
* Until it's fixed, disable the MMC so as not to brick
* devices.
*/
status = "disabled";
- /* Downstream pushes 2.95V to the sdhci device,
+ /*
+ * Downstream pushes 2.95V to the sdhci device,
* but upstream driver REALLY wants to make vmmc 1.8v
* cause of the hs400-1_8v mode. MMC works fine without
* that regulator, so let's not use it for now.
@@ -238,3 +471,27 @@
* vqmmc-supply = <&pm8994_s4>;
*/
};
+
+&sdhc2 {
+ status = "okay";
+
+ cd-gpios = <&tlmm 100 0>;
+ vmmc-supply = <&pm8994_l21>;
+ vqmmc-supply = <&pm8994_l13>;
+};
+
+&tlmm {
+ ts_int_active: ts-int-active {
+ pins = "gpio42";
+ drive-strength = <2>;
+ bias-disable;
+ input-enable;
+ };
+
+ ts_reset_active: ts-reset-active {
+ pins = "gpio109";
+ drive-strength = <2>;
+ bias-disable;
+ output-low;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8994.dtsi b/arch/arm64/boot/dts/qcom/msm8994.dtsi
index 6e083a2f690b..f49d442d2edf 100644
--- a/arch/arm64/boot/dts/qcom/msm8994.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8994.dtsi
@@ -4,6 +4,7 @@
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/clock/qcom,gcc-msm8994.h>
+#include <dt-bindings/power/qcom-rpmpd.h>
/ {
interrupt-parent = <&intc>;
@@ -131,11 +132,11 @@
cpu = <&CPU5>;
};
- core2 {
+ cpu6_map: core2 {
cpu = <&CPU6>;
};
- core3 {
+ cpu7_map: core3 {
cpu = <&CPU7>;
};
};
@@ -154,6 +155,12 @@
reg = <0 0 0 0>;
};
+ tcsr_mutex: hwlock {
+ compatible = "qcom,tcsr-mutex";
+ syscon = <&tcsr_mutex_regs 0 0x80>;
+ #hwlock-cells = <1>;
+ };
+
pmu {
compatible = "arm,cortex-a53-pmu";
interrupts = <GIC_PPI 7 (GIC_CPU_MASK_SIMPLE(4)| IRQ_TYPE_LEVEL_HIGH)>;
@@ -169,8 +176,51 @@
#size-cells = <2>;
ranges;
+ dfps_data_mem: dfps_data_mem@3400000 {
+ reg = <0 0x03400000 0 0x1000>;
+ no-map;
+ };
+
+ cont_splash_mem: memory@3800000 {
+ reg = <0 0x03800000 0 0x2400000>;
+ no-map;
+ };
+
smem_mem: smem_region@6a00000 {
- reg = <0x0 0x6a00000 0x0 0x200000>;
+ reg = <0 0x06a00000 0 0x200000>;
+ no-map;
+ };
+
+ mpss_mem: memory@7000000 {
+ reg = <0 0x07000000 0 0x5a00000>;
+ no-map;
+ };
+
+ peripheral_region: memory@ca00000 {
+ reg = <0 0x0ca00000 0 0x1f00000>;
+ no-map;
+ };
+
+ rmtfs_mem: memory@c6400000 {
+ compatible = "qcom,rmtfs-mem";
+ reg = <0 0xc6400000 0 0x180000>;
+ no-map;
+
+ qcom,client-id = <1>;
+ };
+
+ mba_mem: memory@c6700000 {
+ reg = <0 0xc6700000 0 0x100000>;
+ no-map;
+ };
+
+ audio_mem: memory@c7000000 {
+ reg = <0 0xc7000000 0 0x800000>;
+ no-map;
+ };
+
+ adsp_mem: memory@c9400000 {
+ reg = <0 0xc9400000 0 0x3f00000>;
no-map;
};
};
@@ -192,6 +242,35 @@
compatible = "qcom,rpmcc-msm8994";
#clock-cells = <1>;
};
+
+ rpmpd: power-controller {
+ compatible = "qcom,msm8994-rpmpd";
+ #power-domain-cells = <1>;
+ operating-points-v2 = <&rpmpd_opp_table>;
+
+ rpmpd_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ rpmpd_opp_ret: opp1 {
+ opp-level = <1>;
+ };
+ rpmpd_opp_svs_krait: opp2 {
+ opp-level = <2>;
+ };
+ rpmpd_opp_svs_soc: opp3 {
+ opp-level = <3>;
+ };
+ rpmpd_opp_nom: opp4 {
+ opp-level = <4>;
+ };
+ rpmpd_opp_turbo: opp5 {
+ opp-level = <5>;
+ };
+ rpmpd_opp_super_turbo: opp6 {
+ opp-level = <6>;
+ };
+ };
+ };
};
};
};
@@ -203,6 +282,55 @@
hwlocks = <&tcsr_mutex 3>;
};
+ smp2p-lpass {
+ compatible = "qcom,smp2p";
+ qcom,smem = <443>, <429>;
+
+ interrupts = <GIC_SPI 158 IRQ_TYPE_EDGE_RISING>;
+
+ qcom,ipc = <&apcs 8 10>;
+
+ qcom,local-pid = <0>;
+ qcom,remote-pid = <2>;
+
+ adsp_smp2p_out: master-kernel {
+ qcom,entry-name = "master-kernel";
+ #qcom,smem-state-cells = <1>;
+ };
+
+ adsp_smp2p_in: slave-kernel {
+ qcom,entry-name = "slave-kernel";
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+
+ smp2p-modem {
+ compatible = "qcom,smp2p";
+ qcom,smem = <435>, <428>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <GIC_SPI 27 IRQ_TYPE_EDGE_RISING>;
+
+ qcom,ipc = <&apcs 8 14>;
+
+ qcom,local-pid = <0>;
+ qcom,remote-pid = <1>;
+
+ modem_smp2p_out: master-kernel {
+ qcom,entry-name = "master-kernel";
+ #qcom,smem-state-cells = <1>;
+ };
+
+ modem_smp2p_in: slave-kernel {
+ qcom,entry-name = "slave-kernel";
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+
soc: soc {
#address-cells = <1>;
@@ -385,7 +513,7 @@
status = "disabled";
};
- blsp_i2c1: i2c@f9923000 {
+ blsp1_i2c1: i2c@f9923000 {
compatible = "qcom,i2c-qup-v2.2.1";
reg = <0xf9923000 0x500>;
interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
@@ -393,6 +521,8 @@
<&gcc GCC_BLSP1_QUP1_I2C_APPS_CLK>;
clock-names = "iface", "core";
clock-frequency = <400000>;
+ dmas = <&blsp1_dma 12>, <&blsp1_dma 13>;
+ dma-names = "tx", "rx";
pinctrl-names = "default", "sleep";
pinctrl-0 = <&i2c1_default>;
pinctrl-1 = <&i2c1_sleep>;
@@ -401,7 +531,7 @@
status = "disabled";
};
- blsp_spi0: spi@f9923000 {
+ blsp1_spi1: spi@f9923000 {
compatible = "qcom,spi-qup-v2.2.1";
reg = <0xf9923000 0x500>;
interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
@@ -412,21 +542,21 @@
dmas = <&blsp1_dma 12>, <&blsp1_dma 13>;
dma-names = "tx", "rx";
pinctrl-names = "default", "sleep";
- pinctrl-0 = <&blsp1_spi0_default>;
- pinctrl-1 = <&blsp1_spi0_sleep>;
+ pinctrl-0 = <&blsp1_spi1_default>;
+ pinctrl-1 = <&blsp1_spi1_sleep>;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
};
- blsp_i2c2: i2c@f9924000 {
+ blsp1_i2c2: i2c@f9924000 {
compatible = "qcom,i2c-qup-v2.2.1";
reg = <0xf9924000 0x500>;
interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_BLSP1_AHB_CLK>,
<&gcc GCC_BLSP1_QUP2_I2C_APPS_CLK>;
clock-names = "iface", "core";
- clock-frequency = <355000>;
+ clock-frequency = <400000>;
dmas = <&blsp1_dma 14>, <&blsp1_dma 15>;
dma-names = "tx", "rx";
pinctrl-names = "default", "sleep";
@@ -439,14 +569,16 @@
/* I2C3 doesn't exist */
- blsp_i2c4: i2c@f9926000 {
+ blsp1_i2c4: i2c@f9926000 {
compatible = "qcom,i2c-qup-v2.2.1";
reg = <0xf9926000 0x500>;
interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_BLSP1_AHB_CLK>,
<&gcc GCC_BLSP1_QUP4_I2C_APPS_CLK>;
clock-names = "iface", "core";
- clock-frequency = <355000>;
+ clock-frequency = <400000>;
+ dmas = <&blsp1_dma 18>, <&blsp1_dma 19>;
+ dma-names = "tx", "rx";
pinctrl-names = "default", "sleep";
pinctrl-0 = <&i2c4_default>;
pinctrl-1 = <&i2c4_sleep>;
@@ -455,31 +587,32 @@
status = "disabled";
};
- blsp2_dma: dma-controller@f9944000 {
- compatible = "qcom,bam-v1.7.0";
- reg = <0xf9944000 0x19000>;
- interrupts = <GIC_SPI 239 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&gcc GCC_BLSP2_AHB_CLK>;
- clock-names = "bam_clk";
- #dma-cells = <1>;
- qcom,ee = <0>;
- qcom,controlled-remotely;
- num-channels = <18>;
- qcom,num-ees = <4>;
+ blsp1_i2c5: i2c@f9927000 {
+ compatible = "qcom,i2c-qup-v2.2.1";
+ reg = <0xf9927000 0x500>;
+ interrupts = <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP1_AHB_CLK>,
+ <&gcc GCC_BLSP1_QUP5_I2C_APPS_CLK>;
+ clock-names = "iface", "core";
+ clock-frequency = <400000>;
+ dmas = <&blsp2_dma 20>, <&blsp2_dma 21>;
+ dma-names = "tx", "rx";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&i2c5_default>;
+ pinctrl-1 = <&i2c5_sleep>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
};
- /* According to downstream kernels, i2c6
- * comes before i2c5 address-wise...
- */
-
- blsp_i2c6: i2c@f9928000 {
+ blsp1_i2c6: i2c@f9928000 {
compatible = "qcom,i2c-qup-v2.2.1";
reg = <0xf9928000 0x500>;
interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_BLSP1_AHB_CLK>,
<&gcc GCC_BLSP1_QUP6_I2C_APPS_CLK>;
clock-names = "iface", "core";
- clock-frequency = <355000>;
+ clock-frequency = <400000>;
dmas = <&blsp1_dma 22>, <&blsp1_dma 23>;
dma-names = "tx", "rx";
pinctrl-names = "default", "sleep";
@@ -490,10 +623,23 @@
status = "disabled";
};
+ blsp2_dma: dma-controller@f9944000 {
+ compatible = "qcom,bam-v1.7.0";
+ reg = <0xf9944000 0x19000>;
+ interrupts = <GIC_SPI 239 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP2_AHB_CLK>;
+ clock-names = "bam_clk";
+ #dma-cells = <1>;
+ qcom,ee = <0>;
+ qcom,controlled-remotely;
+ num-channels = <18>;
+ qcom,num-ees = <4>;
+ };
+
blsp2_uart2: serial@f995e000 {
compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
reg = <0xf995e000 0x1000>;
- interrupts = <GIC_SPI 146 IRQ_TYPE_EDGE_FALLING>;
+ interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "core", "iface";
clocks = <&gcc GCC_BLSP2_UART2_APPS_CLK>,
<&gcc GCC_BLSP2_AHB_CLK>;
@@ -505,7 +651,43 @@
status = "disabled";
};
- blsp_i2c5: i2c@f9967000 {
+ blsp2_i2c1: i2c@f9963000 {
+ compatible = "qcom,i2c-qup-v2.2.1";
+ reg = <0xf9963000 0x500>;
+ interrupts = <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP2_AHB_CLK>,
+ <&gcc GCC_BLSP2_QUP1_I2C_APPS_CLK>;
+ clock-names = "iface", "core";
+ clock-frequency = <400000>;
+ dmas = <&blsp2_dma 12>, <&blsp2_dma 13>;
+ dma-names = "tx", "rx";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&i2c7_default>;
+ pinctrl-1 = <&i2c7_sleep>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ blsp2_spi4: spi@f9966000 {
+ compatible = "qcom,spi-qup-v2.2.1";
+ reg = <0xf9966000 0x500>;
+ interrupts = <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP2_QUP4_SPI_APPS_CLK>,
+ <&gcc GCC_BLSP2_AHB_CLK>;
+ clock-names = "core", "iface";
+ spi-max-frequency = <19200000>;
+ dmas = <&blsp2_dma 18>, <&blsp2_dma 19>;
+ dma-names = "tx", "rx";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp2_spi10_default>;
+ pinctrl-1 = <&blsp2_spi10_sleep>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ blsp2_i2c5: i2c@f9967000 {
compatible = "qcom,i2c-qup-v2.2.1";
reg = <0xf9967000 0x500>;
interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
@@ -516,8 +698,8 @@
dmas = <&blsp2_dma 20>, <&blsp2_dma 21>;
dma-names = "tx", "rx";
pinctrl-names = "default", "sleep";
- pinctrl-0 = <&i2c5_default>;
- pinctrl-1 = <&i2c5_sleep>;
+ pinctrl-0 = <&i2c11_default>;
+ pinctrl-1 = <&i2c11_sleep>;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
@@ -588,16 +770,18 @@
blsp2_uart2_default: blsp2-uart2-default {
function = "blsp_uart8";
- pins = "gpio45", "gpio46";
- drive-strength = <2>;
+ pins = "gpio45", "gpio46",
+ "gpio47", "gpio48";
+ drive-strength = <16>;
bias-disable;
};
blsp2_uart2_sleep: blsp2-uart2-sleep {
function = "gpio";
- pins = "gpio45", "gpio46";
+ pins = "gpio45", "gpio46",
+ "gpio47", "gpio48";
drive-strength = <2>;
- bias-pull-down;
+ bias-disable;
};
i2c1_default: i2c1-default {
@@ -671,7 +855,56 @@
bias-disable;
};
- blsp1_spi0_default: blsp1-spi0-default {
+ i2c7_default: i2c7-default {
+ function = "blsp_i2c7";
+ pins = "gpio44", "gpio43";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ i2c7_sleep: i2c7-sleep {
+ function = "gpio";
+ pins = "gpio44", "gpio43";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ blsp2_spi10_default: blsp2-spi10-default {
+ default {
+ function = "blsp_spi10";
+ pins = "gpio53", "gpio54", "gpio55";
+ drive-strength = <10>;
+ bias-pull-down;
+ };
+ cs {
+ function = "gpio";
+ pins = "gpio55";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ blsp2_spi10_sleep: blsp2-spi10-sleep {
+ pins = "gpio53", "gpio54", "gpio55";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ i2c11_default: i2c11-default {
+ function = "blsp_i2c11";
+ pins = "gpio83", "gpio84";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ i2c11_sleep: i2c11-sleep {
+ function = "gpio";
+ pins = "gpio83", "gpio84";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ blsp1_spi1_default: blsp1-spi1-default {
default {
function = "blsp_spi1";
pins = "gpio0", "gpio1", "gpio3";
@@ -686,7 +919,7 @@
};
};
- blsp1_spi0_sleep: blsp1-spi0-sleep {
+ blsp1_spi1_sleep: blsp1-spi1-sleep {
pins = "gpio0", "gpio1", "gpio3";
drive-strength = <2>;
bias-disable;
@@ -776,13 +1009,7 @@
};
};
- tcsr_mutex: hwlock {
- compatible = "qcom,tcsr-mutex";
- syscon = <&tcsr_mutex_regs 0 0x80>;
- #hwlock-cells = <1>;
- };
-
- timer {
+ timer: timer {
compatible = "arm,armv8-timer";
interrupts = <GIC_PPI 2 0xff08>,
<GIC_PPI 3 0xff08>,
@@ -790,9 +1017,9 @@
<GIC_PPI 1 0xff08>;
};
- vreg_vph_pwr: vreg-vph-pwr {
+ vph_pwr: vph-pwr-regulator {
compatible = "regulator-fixed";
- regulator-name = "vph-pwr";
+ regulator-name = "vph_pwr";
regulator-min-microvolt = <3600000>;
regulator-max-microvolt = <3600000>;
diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
index 7eef07e73e25..ce430ba9c118 100644
--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
@@ -750,6 +750,8 @@
ranges = <0x01000000 0x0 0x0c200000 0x0c200000 0x0 0x100000>,
<0x02000000 0x0 0x0c300000 0x0c300000 0x0 0xd00000>;
+ device_type = "pci";
+
interrupts = <GIC_SPI 405 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "msi";
#interrupt-cells = <1>;
@@ -802,6 +804,8 @@
ranges = <0x01000000 0x0 0x0d200000 0x0d200000 0x0 0x100000>,
<0x02000000 0x0 0x0d300000 0x0d300000 0x0 0xd00000>;
+ device_type = "pci";
+
interrupts = <GIC_SPI 413 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "msi";
#interrupt-cells = <1>;
diff --git a/arch/arm64/boot/dts/qcom/msm8998-clamshell.dtsi b/arch/arm64/boot/dts/qcom/msm8998-clamshell.dtsi
index 00d84fb21798..b500f24d47bc 100644
--- a/arch/arm64/boot/dts/qcom/msm8998-clamshell.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8998-clamshell.dtsi
@@ -74,6 +74,14 @@
cpu-idle-states = <&BIG_CPU_SLEEP_1>;
};
+&pcie0 {
+ status = "okay";
+};
+
+&pcie_phy {
+ status = "okay";
+};
+
&pm8005_lsid1 {
pm8005-regulators {
compatible = "qcom,pm8005-regulators";
@@ -295,6 +303,14 @@
pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off &sdc2_cd_off>;
};
+&ufshc {
+ status = "okay";
+};
+
+&ufsphy {
+ status = "okay";
+};
+
&usb3 {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi b/arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi
index cec42437b302..c1ef0c71d5f5 100644
--- a/arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi
@@ -106,6 +106,14 @@
// status = "okay";
};
+&pcie0 {
+ status = "okay";
+};
+
+&pcie_phy {
+ status = "okay";
+};
+
&pm8005_lsid1 {
pm8005-regulators {
compatible = "qcom,pm8005-regulators";
@@ -345,6 +353,7 @@
};
&ufshc {
+ status = "okay";
vcc-supply = <&vreg_l20a_2p95>;
vccq-supply = <&vreg_l26a_1p2>;
vccq2-supply = <&vreg_s4a_1p8>;
@@ -354,6 +363,7 @@
};
&ufsphy {
+ status = "okay";
vdda-phy-supply = <&vreg_l1a_0p875>;
vdda-pll-supply = <&vreg_l2a_1p2>;
vddp-ref-clk-supply = <&vreg_l26a_1p2>;
diff --git a/arch/arm64/boot/dts/qcom/msm8998-pins.dtsi b/arch/arm64/boot/dts/qcom/msm8998-pins.dtsi
deleted file mode 100644
index 7c222cbf19d9..000000000000
--- a/arch/arm64/boot/dts/qcom/msm8998-pins.dtsi
+++ /dev/null
@@ -1,108 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2018, The Linux Foundation. All rights reserved. */
-
-&tlmm {
- sdc2_clk_on: sdc2_clk_on {
- config {
- pins = "sdc2_clk";
- bias-disable; /* NO pull */
- drive-strength = <16>; /* 16 mA */
- };
- };
-
- sdc2_clk_off: sdc2_clk_off {
- config {
- pins = "sdc2_clk";
- bias-disable; /* NO pull */
- drive-strength = <2>; /* 2 mA */
- };
- };
-
- sdc2_cmd_on: sdc2_cmd_on {
- config {
- pins = "sdc2_cmd";
- bias-pull-up; /* pull up */
- drive-strength = <10>; /* 10 mA */
- };
- };
-
- sdc2_cmd_off: sdc2_cmd_off {
- config {
- pins = "sdc2_cmd";
- bias-pull-up; /* pull up */
- drive-strength = <2>; /* 2 mA */
- };
- };
-
- sdc2_data_on: sdc2_data_on {
- config {
- pins = "sdc2_data";
- bias-pull-up; /* pull up */
- drive-strength = <10>; /* 10 mA */
- };
- };
-
- sdc2_data_off: sdc2_data_off {
- config {
- pins = "sdc2_data";
- bias-pull-up; /* pull up */
- drive-strength = <2>; /* 2 mA */
- };
- };
-
- sdc2_cd_on: sdc2_cd_on {
- mux {
- pins = "gpio95";
- function = "gpio";
- };
-
- config {
- pins = "gpio95";
- bias-pull-up; /* pull up */
- drive-strength = <2>; /* 2 mA */
- };
- };
-
- sdc2_cd_off: sdc2_cd_off {
- mux {
- pins = "gpio95";
- function = "gpio";
- };
-
- config {
- pins = "gpio95";
- bias-pull-up; /* pull up */
- drive-strength = <2>; /* 2 mA */
- };
- };
-
- blsp1_uart3_on: blsp1_uart3_on {
- tx {
- pins = "gpio45";
- function = "blsp_uart3_a";
- drive-strength = <2>;
- bias-disable;
- };
-
- rx {
- pins = "gpio46";
- function = "blsp_uart3_a";
- drive-strength = <2>;
- bias-disable;
- };
-
- cts {
- pins = "gpio47";
- function = "blsp_uart3_a";
- drive-strength = <2>;
- bias-disable;
- };
-
- rfr {
- pins = "gpio48";
- function = "blsp_uart3_a";
- drive-strength = <2>;
- bias-disable;
- };
- };
-};
diff --git a/arch/arm64/boot/dts/qcom/msm8998.dtsi b/arch/arm64/boot/dts/qcom/msm8998.dtsi
index ebdaaf1dfca4..1f2e93aa6553 100644
--- a/arch/arm64/boot/dts/qcom/msm8998.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8998.dtsi
@@ -133,6 +133,7 @@
compatible = "qcom,kryo280";
reg = <0x0 0x0>;
enable-method = "psci";
+ capacity-dmips-mhz = <1024>;
cpu-idle-states = <&LITTLE_CPU_SLEEP_0 &LITTLE_CPU_SLEEP_1>;
next-level-cache = <&L2_0>;
L2_0: l2-cache {
@@ -152,6 +153,7 @@
compatible = "qcom,kryo280";
reg = <0x0 0x1>;
enable-method = "psci";
+ capacity-dmips-mhz = <1024>;
cpu-idle-states = <&LITTLE_CPU_SLEEP_0 &LITTLE_CPU_SLEEP_1>;
next-level-cache = <&L2_0>;
L1_I_1: l1-icache {
@@ -167,6 +169,7 @@
compatible = "qcom,kryo280";
reg = <0x0 0x2>;
enable-method = "psci";
+ capacity-dmips-mhz = <1024>;
cpu-idle-states = <&LITTLE_CPU_SLEEP_0 &LITTLE_CPU_SLEEP_1>;
next-level-cache = <&L2_0>;
L1_I_2: l1-icache {
@@ -182,6 +185,7 @@
compatible = "qcom,kryo280";
reg = <0x0 0x3>;
enable-method = "psci";
+ capacity-dmips-mhz = <1024>;
cpu-idle-states = <&LITTLE_CPU_SLEEP_0 &LITTLE_CPU_SLEEP_1>;
next-level-cache = <&L2_0>;
L1_I_3: l1-icache {
@@ -197,6 +201,7 @@
compatible = "qcom,kryo280";
reg = <0x0 0x100>;
enable-method = "psci";
+ capacity-dmips-mhz = <1536>;
cpu-idle-states = <&BIG_CPU_SLEEP_0 &BIG_CPU_SLEEP_1>;
next-level-cache = <&L2_1>;
L2_1: l2-cache {
@@ -216,6 +221,7 @@
compatible = "qcom,kryo280";
reg = <0x0 0x101>;
enable-method = "psci";
+ capacity-dmips-mhz = <1536>;
cpu-idle-states = <&BIG_CPU_SLEEP_0 &BIG_CPU_SLEEP_1>;
next-level-cache = <&L2_1>;
L1_I_101: l1-icache {
@@ -231,6 +237,7 @@
compatible = "qcom,kryo280";
reg = <0x0 0x102>;
enable-method = "psci";
+ capacity-dmips-mhz = <1536>;
cpu-idle-states = <&BIG_CPU_SLEEP_0 &BIG_CPU_SLEEP_1>;
next-level-cache = <&L2_1>;
L1_I_102: l1-icache {
@@ -246,6 +253,7 @@
compatible = "qcom,kryo280";
reg = <0x0 0x103>;
enable-method = "psci";
+ capacity-dmips-mhz = <1536>;
cpu-idle-states = <&BIG_CPU_SLEEP_0 &BIG_CPU_SLEEP_1>;
next-level-cache = <&L2_1>;
L1_I_103: l1-icache {
@@ -379,43 +387,43 @@
compatible = "operating-points-v2";
rpmpd_opp_ret: opp1 {
- opp-level = <16>;
+ opp-level = <RPM_SMD_LEVEL_RETENTION>;
};
rpmpd_opp_ret_plus: opp2 {
- opp-level = <32>;
+ opp-level = <RPM_SMD_LEVEL_RETENTION_PLUS>;
};
rpmpd_opp_min_svs: opp3 {
- opp-level = <48>;
+ opp-level = <RPM_SMD_LEVEL_MIN_SVS>;
};
rpmpd_opp_low_svs: opp4 {
- opp-level = <64>;
+ opp-level = <RPM_SMD_LEVEL_LOW_SVS>;
};
rpmpd_opp_svs: opp5 {
- opp-level = <128>;
+ opp-level = <RPM_SMD_LEVEL_SVS>;
};
rpmpd_opp_svs_plus: opp6 {
- opp-level = <192>;
+ opp-level = <RPM_SMD_LEVEL_SVS_PLUS>;
};
rpmpd_opp_nom: opp7 {
- opp-level = <256>;
+ opp-level = <RPM_SMD_LEVEL_NOM>;
};
rpmpd_opp_nom_plus: opp8 {
- opp-level = <320>;
+ opp-level = <RPM_SMD_LEVEL_NOM_PLUS>;
};
rpmpd_opp_turbo: opp9 {
- opp-level = <384>;
+ opp-level = <RPM_SMD_LEVEL_TURBO>;
};
rpmpd_opp_turbo_plus: opp10 {
- opp-level = <512>;
+ opp-level = <RPM_SMD_LEVEL_BINNING>;
};
};
};
@@ -937,6 +945,7 @@
num-lanes = <1>;
phys = <&pciephy>;
phy-names = "pciephy";
+ status = "disabled";
ranges = <0x01000000 0x0 0x1b200000 0x1b200000 0x0 0x100000>,
<0x02000000 0x0 0x1b300000 0x1b300000 0x0 0xd00000>;
@@ -962,11 +971,12 @@
perst-gpios = <&tlmm 35 GPIO_ACTIVE_LOW>;
};
- phy@1c06000 {
+ pcie_phy: phy@1c06000 {
compatible = "qcom,msm8998-qmp-pcie-phy";
reg = <0x01c06000 0x18c>;
#address-cells = <1>;
#size-cells = <1>;
+ status = "disabled";
ranges;
clocks = <&gcc GCC_PCIE_PHY_AUX_CLK>,
@@ -999,6 +1009,7 @@
phy-names = "ufsphy";
lanes-per-direction = <2>;
power-domains = <&gcc UFS_GDSC>;
+ status = "disabled";
#reset-cells = <1>;
clock-names =
@@ -1038,6 +1049,7 @@
reg = <0x01da7000 0x18c>;
#address-cells = <1>;
#size-cells = <1>;
+ status = "disabled";
ranges;
clock-names =
@@ -1073,6 +1085,278 @@
#gpio-cells = <0x2>;
interrupt-controller;
#interrupt-cells = <0x2>;
+
+ sdc2_clk_on: sdc2_clk_on {
+ config {
+ pins = "sdc2_clk";
+ bias-disable;
+ drive-strength = <16>;
+ };
+ };
+
+ sdc2_clk_off: sdc2_clk_off {
+ config {
+ pins = "sdc2_clk";
+ bias-disable;
+ drive-strength = <2>;
+ };
+ };
+
+ sdc2_cmd_on: sdc2_cmd_on {
+ config {
+ pins = "sdc2_cmd";
+ bias-pull-up;
+ drive-strength = <10>;
+ };
+ };
+
+ sdc2_cmd_off: sdc2_cmd_off {
+ config {
+ pins = "sdc2_cmd";
+ bias-pull-up;
+ drive-strength = <2>;
+ };
+ };
+
+ sdc2_data_on: sdc2_data_on {
+ config {
+ pins = "sdc2_data";
+ bias-pull-up;
+ drive-strength = <10>;
+ };
+ };
+
+ sdc2_data_off: sdc2_data_off {
+ config {
+ pins = "sdc2_data";
+ bias-pull-up;
+ drive-strength = <2>;
+ };
+ };
+
+ sdc2_cd_on: sdc2_cd_on {
+ mux {
+ pins = "gpio95";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio95";
+ bias-pull-up;
+ drive-strength = <2>;
+ };
+ };
+
+ sdc2_cd_off: sdc2_cd_off {
+ mux {
+ pins = "gpio95";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio95";
+ bias-pull-up;
+ drive-strength = <2>;
+ };
+ };
+
+ blsp1_uart3_on: blsp1_uart3_on {
+ tx {
+ pins = "gpio45";
+ function = "blsp_uart3_a";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ rx {
+ pins = "gpio46";
+ function = "blsp_uart3_a";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ cts {
+ pins = "gpio47";
+ function = "blsp_uart3_a";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ rfr {
+ pins = "gpio48";
+ function = "blsp_uart3_a";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ blsp1_i2c1_default: blsp1-i2c1-default {
+ pins = "gpio2", "gpio3";
+ function = "blsp_i2c1";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ blsp1_i2c1_sleep: blsp1-i2c1-sleep {
+ pins = "gpio2", "gpio3";
+ function = "blsp_i2c1";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ blsp1_i2c2_default: blsp1-i2c2-default {
+ pins = "gpio32", "gpio33";
+ function = "blsp_i2c2";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ blsp1_i2c2_sleep: blsp1-i2c2-sleep {
+ pins = "gpio32", "gpio33";
+ function = "blsp_i2c2";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ blsp1_i2c3_default: blsp1-i2c3-default {
+ pins = "gpio47", "gpio48";
+ function = "blsp_i2c3";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ blsp1_i2c3_sleep: blsp1-i2c3-sleep {
+ pins = "gpio47", "gpio48";
+ function = "blsp_i2c3";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ blsp1_i2c4_default: blsp1-i2c4-default {
+ pins = "gpio10", "gpio11";
+ function = "blsp_i2c4";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ blsp1_i2c4_sleep: blsp1-i2c4-sleep {
+ pins = "gpio10", "gpio11";
+ function = "blsp_i2c4";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ blsp1_i2c5_default: blsp1-i2c5-default {
+ pins = "gpio87", "gpio88";
+ function = "blsp_i2c5";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ blsp1_i2c5_sleep: blsp1-i2c5-sleep {
+ pins = "gpio87", "gpio88";
+ function = "blsp_i2c5";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ blsp1_i2c6_default: blsp1-i2c6-default {
+ pins = "gpio43", "gpio44";
+ function = "blsp_i2c6";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ blsp1_i2c6_sleep: blsp1-i2c6-sleep {
+ pins = "gpio43", "gpio44";
+ function = "blsp_i2c6";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ /* 6 interfaces per QUP, BLSP2 indexes are numbered (n)+6 */
+ blsp2_i2c1_default: blsp2-i2c1-default {
+ pins = "gpio55", "gpio56";
+ function = "blsp_i2c7";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ blsp2_i2c1_sleep: blsp2-i2c1-sleep {
+ pins = "gpio55", "gpio56";
+ function = "blsp_i2c7";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ blsp2_i2c2_default: blsp2-i2c2-default {
+ pins = "gpio6", "gpio7";
+ function = "blsp_i2c8";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ blsp2_i2c2_sleep: blsp2-i2c2-sleep {
+ pins = "gpio6", "gpio7";
+ function = "blsp_i2c8";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ blsp2_i2c3_default: blsp2-i2c3-default {
+ pins = "gpio51", "gpio52";
+ function = "blsp_i2c9";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ blsp2_i2c3_sleep: blsp2-i2c3-sleep {
+ pins = "gpio51", "gpio52";
+ function = "blsp_i2c9";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ blsp2_i2c4_default: blsp2-i2c4-default {
+ pins = "gpio67", "gpio68";
+ function = "blsp_i2c10";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ blsp2_i2c4_sleep: blsp2-i2c4-sleep {
+ pins = "gpio67", "gpio68";
+ function = "blsp_i2c10";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ blsp2_i2c5_default: blsp2-i2c5-default {
+ pins = "gpio60", "gpio61";
+ function = "blsp_i2c11";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ blsp2_i2c5_sleep: blsp2-i2c5-sleep {
+ pins = "gpio60", "gpio61";
+ function = "blsp_i2c11";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ blsp2_i2c6_default: blsp2-i2c6-default {
+ pins = "gpio83", "gpio84";
+ function = "blsp_i2c12";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ blsp2_i2c6_sleep: blsp2-i2c6-sleep {
+ pins = "gpio83", "gpio84";
+ function = "blsp_i2c12";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
};
remoteproc_mss: remoteproc@4080000 {
@@ -1789,6 +2073,11 @@
clocks = <&gcc GCC_BLSP1_QUP1_I2C_APPS_CLK>,
<&gcc GCC_BLSP1_AHB_CLK>;
clock-names = "core", "iface";
+ dmas = <&blsp1_dma 6>, <&blsp1_dma 7>;
+ dma-names = "tx", "rx";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp1_i2c1_default>;
+ pinctrl-1 = <&blsp1_i2c1_sleep>;
clock-frequency = <400000>;
status = "disabled";
@@ -1804,6 +2093,11 @@
clocks = <&gcc GCC_BLSP1_QUP2_I2C_APPS_CLK>,
<&gcc GCC_BLSP1_AHB_CLK>;
clock-names = "core", "iface";
+ dmas = <&blsp1_dma 8>, <&blsp1_dma 9>;
+ dma-names = "tx", "rx";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp1_i2c2_default>;
+ pinctrl-1 = <&blsp1_i2c2_sleep>;
clock-frequency = <400000>;
status = "disabled";
@@ -1819,6 +2113,11 @@
clocks = <&gcc GCC_BLSP1_QUP3_I2C_APPS_CLK>,
<&gcc GCC_BLSP1_AHB_CLK>;
clock-names = "core", "iface";
+ dmas = <&blsp1_dma 10>, <&blsp1_dma 11>;
+ dma-names = "tx", "rx";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp1_i2c3_default>;
+ pinctrl-1 = <&blsp1_i2c3_sleep>;
clock-frequency = <400000>;
status = "disabled";
@@ -1834,6 +2133,11 @@
clocks = <&gcc GCC_BLSP1_QUP4_I2C_APPS_CLK>,
<&gcc GCC_BLSP1_AHB_CLK>;
clock-names = "core", "iface";
+ dmas = <&blsp1_dma 12>, <&blsp1_dma 13>;
+ dma-names = "tx", "rx";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp1_i2c4_default>;
+ pinctrl-1 = <&blsp1_i2c4_sleep>;
clock-frequency = <400000>;
status = "disabled";
@@ -1849,6 +2153,11 @@
clocks = <&gcc GCC_BLSP1_QUP5_I2C_APPS_CLK>,
<&gcc GCC_BLSP1_AHB_CLK>;
clock-names = "core", "iface";
+ dmas = <&blsp1_dma 14>, <&blsp1_dma 15>;
+ dma-names = "tx", "rx";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp1_i2c5_default>;
+ pinctrl-1 = <&blsp1_i2c5_sleep>;
clock-frequency = <400000>;
status = "disabled";
@@ -1864,6 +2173,11 @@
clocks = <&gcc GCC_BLSP1_QUP6_I2C_APPS_CLK>,
<&gcc GCC_BLSP1_AHB_CLK>;
clock-names = "core", "iface";
+ dmas = <&blsp1_dma 16>, <&blsp1_dma 17>;
+ dma-names = "tx", "rx";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp1_i2c6_default>;
+ pinctrl-1 = <&blsp1_i2c6_sleep>;
clock-frequency = <400000>;
status = "disabled";
@@ -1871,6 +2185,19 @@
#size-cells = <0>;
};
+ blsp2_dma: dma@c184000 {
+ compatible = "qcom,bam-v1.7.0";
+ reg = <0x0c184000 0x25000>;
+ interrupts = <GIC_SPI 239 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP2_AHB_CLK>;
+ clock-names = "bam_clk";
+ #dma-cells = <1>;
+ qcom,ee = <0>;
+ qcom,controlled-remotely;
+ num-channels = <18>;
+ qcom,num-ees = <4>;
+ };
+
blsp2_uart1: serial@c1b0000 {
compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
reg = <0x0c1b0000 0x1000>;
@@ -1881,7 +2208,7 @@
status = "disabled";
};
- blsp2_i2c0: i2c@c1b5000 {
+ blsp2_i2c1: i2c@c1b5000 {
compatible = "qcom,i2c-qup-v2.2.1";
reg = <0x0c1b5000 0x600>;
interrupts = <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>;
@@ -1889,6 +2216,11 @@
clocks = <&gcc GCC_BLSP2_QUP1_I2C_APPS_CLK>,
<&gcc GCC_BLSP2_AHB_CLK>;
clock-names = "core", "iface";
+ dmas = <&blsp2_dma 6>, <&blsp2_dma 7>;
+ dma-names = "tx", "rx";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp2_i2c1_default>;
+ pinctrl-1 = <&blsp2_i2c1_sleep>;
clock-frequency = <400000>;
status = "disabled";
@@ -1896,7 +2228,7 @@
#size-cells = <0>;
};
- blsp2_i2c1: i2c@c1b6000 {
+ blsp2_i2c2: i2c@c1b6000 {
compatible = "qcom,i2c-qup-v2.2.1";
reg = <0x0c1b6000 0x600>;
interrupts = <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>;
@@ -1904,6 +2236,11 @@
clocks = <&gcc GCC_BLSP2_QUP2_I2C_APPS_CLK>,
<&gcc GCC_BLSP2_AHB_CLK>;
clock-names = "core", "iface";
+ dmas = <&blsp2_dma 8>, <&blsp2_dma 9>;
+ dma-names = "tx", "rx";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp2_i2c2_default>;
+ pinctrl-1 = <&blsp2_i2c2_sleep>;
clock-frequency = <400000>;
status = "disabled";
@@ -1911,7 +2248,7 @@
#size-cells = <0>;
};
- blsp2_i2c2: i2c@c1b7000 {
+ blsp2_i2c3: i2c@c1b7000 {
compatible = "qcom,i2c-qup-v2.2.1";
reg = <0x0c1b7000 0x600>;
interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
@@ -1919,6 +2256,11 @@
clocks = <&gcc GCC_BLSP2_QUP3_I2C_APPS_CLK>,
<&gcc GCC_BLSP2_AHB_CLK>;
clock-names = "core", "iface";
+ dmas = <&blsp2_dma 10>, <&blsp2_dma 11>;
+ dma-names = "tx", "rx";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp2_i2c3_default>;
+ pinctrl-1 = <&blsp2_i2c3_sleep>;
clock-frequency = <400000>;
status = "disabled";
@@ -1926,7 +2268,7 @@
#size-cells = <0>;
};
- blsp2_i2c3: i2c@c1b8000 {
+ blsp2_i2c4: i2c@c1b8000 {
compatible = "qcom,i2c-qup-v2.2.1";
reg = <0x0c1b8000 0x600>;
interrupts = <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>;
@@ -1934,6 +2276,11 @@
clocks = <&gcc GCC_BLSP2_QUP4_I2C_APPS_CLK>,
<&gcc GCC_BLSP2_AHB_CLK>;
clock-names = "core", "iface";
+ dmas = <&blsp2_dma 12>, <&blsp2_dma 13>;
+ dma-names = "tx", "rx";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp2_i2c4_default>;
+ pinctrl-1 = <&blsp2_i2c4_sleep>;
clock-frequency = <400000>;
status = "disabled";
@@ -1941,7 +2288,7 @@
#size-cells = <0>;
};
- blsp2_i2c4: i2c@c1b9000 {
+ blsp2_i2c5: i2c@c1b9000 {
compatible = "qcom,i2c-qup-v2.2.1";
reg = <0x0c1b9000 0x600>;
interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
@@ -1949,6 +2296,11 @@
clocks = <&gcc GCC_BLSP2_QUP5_I2C_APPS_CLK>,
<&gcc GCC_BLSP2_AHB_CLK>;
clock-names = "core", "iface";
+ dmas = <&blsp2_dma 14>, <&blsp2_dma 15>;
+ dma-names = "tx", "rx";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp2_i2c5_default>;
+ pinctrl-1 = <&blsp2_i2c5_sleep>;
clock-frequency = <400000>;
status = "disabled";
@@ -1956,7 +2308,7 @@
#size-cells = <0>;
};
- blsp2_i2c5: i2c@c1ba000 {
+ blsp2_i2c6: i2c@c1ba000 {
compatible = "qcom,i2c-qup-v2.2.1";
reg = <0x0c1ba000 0x600>;
interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
@@ -1964,6 +2316,11 @@
clocks = <&gcc GCC_BLSP2_QUP6_I2C_APPS_CLK>,
<&gcc GCC_BLSP2_AHB_CLK>;
clock-names = "core", "iface";
+ dmas = <&blsp2_dma 16>, <&blsp2_dma 17>;
+ dma-names = "tx", "rx";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp2_i2c6_default>;
+ pinctrl-1 = <&blsp2_i2c6_sleep>;
clock-frequency = <400000>;
status = "disabled";
@@ -2110,5 +2467,3 @@
};
};
};
-
-#include "msm8998-pins.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/pm8150.dtsi b/arch/arm64/boot/dts/qcom/pm8150.dtsi
index a53eccf2b695..bdc76d504b78 100644
--- a/arch/arm64/boot/dts/qcom/pm8150.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8150.dtsi
@@ -11,7 +11,7 @@
/ {
thermal-zones {
- pm8150 {
+ pm8150-thermal {
polling-delay-passive = <100>;
polling-delay = <0>;
@@ -97,6 +97,16 @@
};
};
+ pm8150_adc_tm: adc-tm@3500 {
+ compatible = "qcom,spmi-adc-tm5";
+ reg = <0x3500>;
+ interrupts = <0x0 0x35 0x0 IRQ_TYPE_EDGE_RISING>;
+ #thermal-sensor-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
pm8150_rtc: rtc@6000 {
compatible = "qcom,pm8941-rtc";
reg = <0x6000>;
diff --git a/arch/arm64/boot/dts/qcom/pm8150b.dtsi b/arch/arm64/boot/dts/qcom/pm8150b.dtsi
index e112e8876db6..b21e56a46145 100644
--- a/arch/arm64/boot/dts/qcom/pm8150b.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8150b.dtsi
@@ -10,7 +10,7 @@
/ {
thermal-zones {
- pm8150b {
+ pm8150b-thermal {
polling-delay-passive = <100>;
polling-delay = <0>;
@@ -95,6 +95,16 @@
};
};
+ pm8150b_adc_tm: adc-tm@3500 {
+ compatible = "qcom,spmi-adc-tm5";
+ reg = <0x3500>;
+ interrupts = <0x2 0x35 0x0 IRQ_TYPE_EDGE_RISING>;
+ #thermal-sensor-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
pm8150b_gpios: gpio@c000 {
compatible = "qcom,pm8150b-gpio";
reg = <0xc000>;
diff --git a/arch/arm64/boot/dts/qcom/pm8150l.dtsi b/arch/arm64/boot/dts/qcom/pm8150l.dtsi
index 62139538b7d9..52f094a2b713 100644
--- a/arch/arm64/boot/dts/qcom/pm8150l.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8150l.dtsi
@@ -10,7 +10,7 @@
/ {
thermal-zones {
- pm8150l {
+ pm8150l-thermal {
polling-delay-passive = <100>;
polling-delay = <0>;
@@ -89,6 +89,16 @@
};
};
+ pm8150l_adc_tm: adc-tm@3500 {
+ compatible = "qcom,spmi-adc-tm5";
+ reg = <0x3500>;
+ interrupts = <0x4 0x35 0x0 IRQ_TYPE_EDGE_RISING>;
+ #thermal-sensor-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
pm8150l_gpios: gpio@c000 {
compatible = "qcom,pm8150l-gpio";
reg = <0xc000>;
diff --git a/arch/arm64/boot/dts/qcom/pm8994.dtsi b/arch/arm64/boot/dts/qcom/pm8994.dtsi
index 5ffdf37d8e31..c3876c82c874 100644
--- a/arch/arm64/boot/dts/qcom/pm8994.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8994.dtsi
@@ -6,7 +6,7 @@
/ {
thermal-zones {
- pm8994 {
+ pm8994-thermal {
polling-delay-passive = <250>;
polling-delay = <1000>;
@@ -43,7 +43,7 @@
interrupts = <0x0 0x61 0x1 IRQ_TYPE_EDGE_RISING>;
};
- pon@800 {
+ pm8994_pon: pon@800 {
compatible = "qcom,pm8916-pon";
reg = <0x800>;
diff --git a/arch/arm64/boot/dts/qcom/pm8998.dtsi b/arch/arm64/boot/dts/qcom/pm8998.dtsi
index 67283d60e2ac..6f5bb6b37ec2 100644
--- a/arch/arm64/boot/dts/qcom/pm8998.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8998.dtsi
@@ -9,7 +9,7 @@
/ {
thermal-zones {
- pm8998 {
+ pm8998-thermal {
polling-delay-passive = <250>;
polling-delay = <1000>;
diff --git a/arch/arm64/boot/dts/qcom/pmi8998.dtsi b/arch/arm64/boot/dts/qcom/pmi8998.dtsi
index d016b12967eb..d230c510d4b7 100644
--- a/arch/arm64/boot/dts/qcom/pmi8998.dtsi
+++ b/arch/arm64/boot/dts/qcom/pmi8998.dtsi
@@ -30,11 +30,15 @@
compatible = "qcom,pmi8998-lab-ibb";
ibb: ibb {
- interrupts = <0x3 0xdc 0x2 IRQ_TYPE_EDGE_RISING>;
+ interrupts = <0x3 0xdc 0x2 IRQ_TYPE_EDGE_RISING>,
+ <0x3 0xdc 0x0 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "sc-err", "ocp";
};
lab: lab {
- interrupts = <0x3 0xde 0x0 IRQ_TYPE_EDGE_RISING>;
+ interrupts = <0x3 0xde 0x1 IRQ_TYPE_EDGE_RISING>,
+ <0x3 0xde 0x0 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-names = "sc-err", "ocp";
};
};
};
diff --git a/arch/arm64/boot/dts/qcom/pms405.dtsi b/arch/arm64/boot/dts/qcom/pms405.dtsi
index ff4005186895..172be177fc8f 100644
--- a/arch/arm64/boot/dts/qcom/pms405.dtsi
+++ b/arch/arm64/boot/dts/qcom/pms405.dtsi
@@ -8,7 +8,7 @@
/ {
thermal-zones {
- pms405 {
+ pms405-thermal {
polling-delay-passive = <250>;
polling-delay = <1000>;
diff --git a/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts b/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts
index ce22d4fa383e..2f0528d01299 100644
--- a/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts
+++ b/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts
@@ -7,6 +7,8 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+#include <dt-bindings/sound/qcom,q6afe.h>
+#include <dt-bindings/sound/qcom,q6asm.h>
#include "sm8250.dtsi"
#include "pm8150.dtsi"
#include "pm8150b.dtsi"
@@ -40,6 +42,17 @@
regulator-always-on;
};
+ hdmi-out {
+ compatible = "hdmi-connector";
+ type = "a";
+
+ port {
+ hdmi_con: endpoint {
+ remote-endpoint = <&lt9611_out>;
+ };
+ };
+ };
+
leds {
compatible = "gpio-leds";
@@ -66,6 +79,98 @@
};
+ lt9611_1v2: lt9611-vdd12-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "LT9611_1V2";
+
+ vin-supply = <&vdc_3v3>;
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ };
+
+ lt9611_3v3: lt9611-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "LT9611_3V3";
+
+ vin-supply = <&vdc_3v3>;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ thermal-zones {
+ conn-thermal {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&pm8150b_adc_tm 0>;
+
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+ };
+
+ pm8150l-thermal {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&pm8150l_adc_tm 1>;
+
+ trips {
+ active-config0 {
+ temperature = <50000>;
+ hysteresis = <4000>;
+ type = "passive";
+ };
+ };
+ };
+
+ skin-msm-thermal {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&pm8150l_adc_tm 0>;
+
+ trips {
+ active-config0 {
+ temperature = <50000>;
+ hysteresis = <4000>;
+ type = "passive";
+ };
+ };
+ };
+
+ wifi-thermal {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&pm8150_adc_tm 1>;
+
+ trips {
+ active-config0 {
+ temperature = <52000>;
+ hysteresis = <4000>;
+ type = "passive";
+ };
+ };
+ };
+
+ xo-thermal {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&pm8150_adc_tm 0>;
+
+ trips {
+ active-config0 {
+ temperature = <50000>;
+ hysteresis = <4000>;
+ type = "passive";
+ };
+ };
+ };
+ };
+
vbat: vbat-regulator {
compatible = "regulator-fixed";
regulator-name = "VBAT";
@@ -87,7 +192,7 @@
vdc_3v3: vdc-3v3-regulator {
compatible = "regulator-fixed";
regulator-name = "VDC_3V3";
- vin-supply = <&dc12v>;
+ vin-supply = <&vreg_l11c_3p3>;
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
regulator-always-on;
@@ -120,9 +225,14 @@
};
};
+&adsp {
+ status = "okay";
+ firmware-name = "qcom/sm8250/adsp.mbn";
+};
+
&apps_rsc {
pm8009-rpmh-regulators {
- compatible = "qcom,pm8009-rpmh-regulators";
+ compatible = "qcom,pm8009-1-rpmh-regulators";
qcom,pmic-id = "f";
vdd-s1-supply = <&vph_pwr>;
@@ -131,6 +241,13 @@
vdd-l5-l6-supply = <&vreg_bob>;
vdd-l7-supply = <&vreg_s4a_1p8>;
+ vreg_s2f_0p95: smps2 {
+ regulator-name = "vreg_s2f_0p95";
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <952000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_AUTO>;
+ };
+
vreg_l1f_1p1: ldo1 {
regulator-name = "vreg_l1f_1p1";
regulator-min-microvolt = <1104000>;
@@ -406,6 +523,42 @@
};
};
+&cdsp {
+ status = "okay";
+ firmware-name = "qcom/sm8250/cdsp.mbn";
+};
+
+&dsi0 {
+ status = "okay";
+ vdda-supply = <&vreg_l9a_1p2>;
+
+#if 0
+ qcom,dual-dsi-mode;
+ qcom,master-dsi;
+#endif
+
+ ports {
+ port@1 {
+ endpoint {
+ remote-endpoint = <&lt9611_a>;
+ data-lanes = <0 1 2 3>;
+ };
+ };
+ };
+};
+
+&dsi0_phy {
+ status = "okay";
+ vdds-supply = <&vreg_l5a_0p88>;
+};
+
+&gpu {
+ zap-shader {
+ memory-region = <&gpu_mem>;
+ firmware-name = "qcom/sm8250/a650_zap.mbn";
+ };
+};
+
/* LS-I2C0 */
&i2c4 {
status = "okay";
@@ -413,6 +566,55 @@
&i2c5 {
status = "okay";
+ clock-frequency = <400000>;
+
+ lt9611_codec: hdmi-bridge@2b {
+ compatible = "lontium,lt9611uxc";
+ reg = <0x2b>;
+ #sound-dai-cells = <1>;
+
+ interrupts-extended = <&tlmm 63 IRQ_TYPE_EDGE_FALLING>;
+
+ reset-gpios = <&pm8150l_gpios 5 GPIO_ACTIVE_HIGH>;
+
+ vdd-supply = <&lt9611_1v2>;
+ vcc-supply = <&lt9611_3v3>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&lt9611_irq_pin &lt9611_rst_pin>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ lt9611_a: endpoint {
+ remote-endpoint = <&dsi0_out>;
+ };
+ };
+
+#if 0
+ port@1 {
+ reg = <1>;
+
+ lt9611_b: endpoint {
+ remote-endpoint = <&dsi1_out>;
+ };
+ };
+#endif
+
+ port@2 {
+ reg = <2>;
+
+ lt9611_out: endpoint {
+ remote-endpoint = <&hdmi_con>;
+ };
+ };
+
+ };
+ };
};
/* LS-I2C1 */
@@ -420,6 +622,88 @@
status = "okay";
};
+&mdss {
+ status = "okay";
+};
+
+&mdss_mdp {
+ status = "okay";
+};
+
+&pm8150_adc {
+ xo-therm@4c {
+ reg = <ADC5_XO_THERM_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time = <200>;
+ };
+
+ wifi-therm@4e {
+ reg = <ADC5_AMUX_THM2_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time = <200>;
+ };
+};
+
+&pm8150_adc_tm {
+ status = "okay";
+
+ xo-therm@0 {
+ reg = <0>;
+ io-channels = <&pm8150_adc ADC5_XO_THERM_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time-us = <200>;
+ };
+
+ wifi-therm@1 {
+ reg = <1>;
+ io-channels = <&pm8150_adc ADC5_AMUX_THM2_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time-us = <200>;
+ };
+};
+
+&pcie0 {
+ status = "okay";
+ perst-gpio = <&tlmm 79 GPIO_ACTIVE_LOW>;
+ wake-gpio = <&tlmm 81 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie0_default_state>;
+};
+
+&pcie0_phy {
+ status = "okay";
+ vdda-phy-supply = <&vreg_l5a_0p88>;
+ vdda-pll-supply = <&vreg_l9a_1p2>;
+};
+
+&pcie1 {
+ status = "okay";
+ perst-gpio = <&tlmm 82 GPIO_ACTIVE_LOW>;
+ wake-gpio = <&tlmm 84 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie1_default_state>;
+};
+
+&pcie1_phy {
+ status = "okay";
+ vdda-phy-supply = <&vreg_l5a_0p88>;
+ vdda-pll-supply = <&vreg_l9a_1p2>;
+};
+
+&pcie2 {
+ status = "okay";
+ perst-gpio = <&tlmm 85 GPIO_ACTIVE_LOW>;
+ wake-gpio = <&tlmm 87 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie2_default_state>;
+};
+
+&pcie2_phy {
+ status = "okay";
+ vdda-phy-supply = <&vreg_l5a_0p88>;
+ vdda-pll-supply = <&vreg_l9a_1p2>;
+};
+
&pm8150_gpios {
gpio-reserved-ranges = <1 1>, <3 2>, <7 1>;
gpio-line-names =
@@ -435,6 +719,25 @@
"GPIO_10_P"; /* Green LED */
};
+&pm8150b_adc {
+ conn-therm@4f {
+ reg = <ADC5_AMUX_THM3_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time = <200>;
+ };
+};
+
+&pm8150b_adc_tm {
+ status = "okay";
+
+ conn-therm@0 {
+ reg = <0>;
+ io-channels = <&pm8150b_adc ADC5_AMUX_THM3_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time-us = <200>;
+ };
+};
+
&pm8150b_gpios {
gpio-line-names =
"NC",
@@ -451,6 +754,38 @@
"NC";
};
+&pm8150l_adc {
+ skin-msm-therm@4e {
+ reg = <ADC5_AMUX_THM2_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time = <200>;
+ };
+
+ pm8150l-therm@4f {
+ reg = <ADC5_AMUX_THM3_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time = <200>;
+ };
+};
+
+&pm8150l_adc_tm {
+ status = "okay";
+
+ skin-msm-therm@0 {
+ reg = <0>;
+ io-channels = <&pm8150l_adc ADC5_AMUX_THM2_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time-us = <200>;
+ };
+
+ pm8150l-therm@1 {
+ reg = <1>;
+ io-channels = <&pm8150l_adc ADC5_AMUX_THM3_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time-us = <200>;
+ };
+};
+
&pm8150l_gpios {
gpio-line-names =
"NC",
@@ -465,6 +800,15 @@
"PM_GPIO-B",
"NC",
"PM3003A_MODE";
+
+ lt9611_rst_pin: lt9611-rst-pin {
+ pins = "gpio5";
+ function = "normal";
+
+ output-high;
+ input-disable;
+ power-source = <0>;
+ };
};
&pm8150_rtc {
@@ -483,6 +827,35 @@
status = "okay";
};
+&q6afedai {
+ qi2s@16 {
+ reg = <16>;
+ qcom,sd-lines = <0 1 2 3>;
+ };
+};
+
+/* TERT I2S Uses 1 I2S SD Lines for audio on LT9611 HDMI Bridge */
+&q6afedai {
+ qi2s@20 {
+ reg = <20>;
+ qcom,sd-lines = <0>;
+ };
+};
+
+&q6asmdai {
+ dai@0 {
+ reg = <0>;
+ };
+
+ dai@1 {
+ reg = <1>;
+ };
+
+ dai@2 {
+ reg = <2>;
+ };
+};
+
&sdhc_2 {
status = "okay";
pinctrl-names = "default";
@@ -491,12 +864,91 @@
vqmmc-supply = <&vreg_l6c_2p96>;
cd-gpios = <&tlmm 77 GPIO_ACTIVE_LOW>;
bus-width = <4>;
- /* there seem to be issues with HS400-1.8V mode, so disable it */
- no-1-8-v;
no-sdio;
no-emmc;
};
+&sound {
+ compatible = "qcom,qrb5165-rb5-sndcard";
+ pinctrl-0 = <&tert_mi2s_active>;
+ pinctrl-names = "default";
+ model = "Qualcomm-RB5-WSA8815-Speakers-DMIC0";
+ audio-routing =
+ "SpkrLeft IN", "WSA_SPK1 OUT",
+ "SpkrRight IN", "WSA_SPK2 OUT",
+ "VA DMIC0", "vdd-micb",
+ "VA DMIC1", "vdd-micb",
+ "MM_DL1", "MultiMedia1 Playback",
+ "MM_DL2", "MultiMedia2 Playback",
+ "MultiMedia3 Capture", "MM_UL3";
+
+ mm1-dai-link {
+ link-name = "MultiMedia1";
+ cpu {
+ sound-dai = <&q6asmdai MSM_FRONTEND_DAI_MULTIMEDIA1>;
+ };
+ };
+
+ mm2-dai-link {
+ link-name = "MultiMedia2";
+ cpu {
+ sound-dai = <&q6asmdai MSM_FRONTEND_DAI_MULTIMEDIA2>;
+ };
+ };
+
+ mm3-dai-link {
+ link-name = "MultiMedia3";
+ cpu {
+ sound-dai = <&q6asmdai MSM_FRONTEND_DAI_MULTIMEDIA3>;
+ };
+ };
+
+ hdmi-dai-link {
+ link-name = "HDMI Playback";
+ cpu {
+ sound-dai = <&q6afedai TERTIARY_MI2S_RX>;
+ };
+
+ platform {
+ sound-dai = <&q6routing>;
+ };
+
+ codec {
+ sound-dai = <&lt9611_codec 0>;
+ };
+ };
+
+ dma-dai-link {
+ link-name = "WSA Playback";
+ cpu {
+ sound-dai = <&q6afedai WSA_CODEC_DMA_RX_0>;
+ };
+
+ platform {
+ sound-dai = <&q6routing>;
+ };
+
+ codec {
+ sound-dai = <&left_spkr>, <&right_spkr>, <&swr0 0>, <&wsamacro 0>;
+ };
+ };
+
+ va-dai-link {
+ link-name = "VA Capture";
+ cpu {
+ sound-dai = <&q6afedai VA_CODEC_DMA_TX_0>;
+ };
+
+ platform {
+ sound-dai = <&q6routing>;
+ };
+
+ codec {
+ sound-dai = <&vamacro 0>;
+ };
+ };
+};
+
/* CAN */
&spi0 {
status = "okay";
@@ -512,6 +964,26 @@
};
};
+&swr0 {
+ left_spkr: wsa8810-left{
+ compatible = "sdw10217211000";
+ reg = <0 3>;
+ powerdown-gpios = <&tlmm 130 GPIO_ACTIVE_HIGH>;
+ #thermal-sensor-cells = <0>;
+ sound-name-prefix = "SpkrLeft";
+ #sound-dai-cells = <0>;
+ };
+
+ right_spkr: wsa8810-right{
+ compatible = "sdw10217211000";
+ reg = <0 4>;
+ powerdown-gpios = <&tlmm 130 GPIO_ACTIVE_HIGH>;
+ #thermal-sensor-cells = <0>;
+ sound-name-prefix = "SpkrRight";
+ #sound-dai-cells = <0>;
+ };
+};
+
&tlmm {
gpio-reserved-ranges = <40 4>;
gpio-line-names =
@@ -696,6 +1168,87 @@
"HST_WLAN_UART_TX",
"HST_WLAN_UART_RX";
+ lt9611_irq_pin: lt9611-irq {
+ pins = "gpio63";
+ function = "gpio";
+ bias-disable;
+ };
+
+ pcie0_default_state: pcie0-default {
+ clkreq {
+ pins = "gpio80";
+ function = "pci_e0";
+ bias-pull-up;
+ };
+
+ reset-n {
+ pins = "gpio79";
+ function = "gpio";
+
+ drive-strength = <2>;
+ output-low;
+ bias-pull-down;
+ };
+
+ wake-n {
+ pins = "gpio81";
+ function = "gpio";
+
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+
+ pcie1_default_state: pcie1-default {
+ clkreq {
+ pins = "gpio83";
+ function = "pci_e1";
+ bias-pull-up;
+ };
+
+ reset-n {
+ pins = "gpio82";
+ function = "gpio";
+
+ drive-strength = <2>;
+ output-low;
+ bias-pull-down;
+ };
+
+ wake-n {
+ pins = "gpio84";
+ function = "gpio";
+
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+
+ pcie2_default_state: pcie2-default {
+ clkreq {
+ pins = "gpio86";
+ function = "pci_e2";
+ bias-pull-up;
+ };
+
+ reset-n {
+ pins = "gpio85";
+ function = "gpio";
+
+ drive-strength = <2>;
+ output-low;
+ bias-pull-down;
+ };
+
+ wake-n {
+ pins = "gpio87";
+ function = "gpio";
+
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+
sdc2_default_state: sdc2-default {
clk {
pins = "sdc2_clk";
@@ -706,13 +1259,13 @@
cmd {
pins = "sdc2_cmd";
bias-pull-up;
- drive-strength = <16>;
+ drive-strength = <10>;
};
data {
pins = "sdc2_data";
bias-pull-up;
- drive-strength = <16>;
+ drive-strength = <10>;
};
};
@@ -792,3 +1345,10 @@
vdda-phy-supply = <&vreg_l9a_1p2>;
vdda-pll-supply = <&vreg_l18a_0p92>;
};
+
+&vamacro {
+ pinctrl-0 = <&dmic01_active>;
+ pinctrl-names = "default";
+ vdd-micb-supply = <&vreg_s4a_1p8>;
+ qcom,dmic-sample-rate = <600000>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi
index 8ed7dd39f6e3..07c8b2c926c0 100644
--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi
@@ -298,40 +298,6 @@
regulator-max-microvolt = <1128000>;
};
- /*
- * pp2040_s5a (smps5) and pp1056_s4a (smps4) are just
- * inputs to other rails on AOP-managed PMICs on trogdor.
- * The system is already configured to manage these rails
- * automatically (enable when needed, adjust voltage for
- * headroom) so we won't specify anything here.
- *
- * NOTE: though the rails have a voltage implied by their
- * name, the automatic headroom calculation might not result
- * in them being that voltage. ...and that's OK.
- * Specifically the only point of these rails is to provide
- * an input source for other rails and if we can satisify the
- * needs of those other rails with a lower source voltage then
- * we save power.
- */
-
- pp1200_l1a: ldo1 {
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <1200000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
- pp1000_l2a: ldo2 {
- regulator-min-microvolt = <944000>;
- regulator-max-microvolt = <1056000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
- pp1000_l3a: ldo3 {
- regulator-min-microvolt = <968000>;
- regulator-max-microvolt = <1064000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
vdd_qlink_lv:
vdd_qlink_lv_ck:
vdd_qusb_hs0_core:
@@ -350,24 +316,6 @@
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
- pp2700_l5a: ldo5 {
- regulator-min-microvolt = <2704000>;
- regulator-max-microvolt = <2704000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
- ebi0_cal:
- ebi1_cal:
- vddio_ck_ebi0:
- vddio_ck_ebi1:
- vddio_ebi0:
- vddq:
- pp600_l6a: ldo6 {
- regulator-min-microvolt = <568000>;
- regulator-max-microvolt = <648000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
vdd_cx_wlan:
pp800_l9a: ldo9 {
regulator-min-microvolt = <488000>;
@@ -404,6 +352,11 @@
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
+ /*
+ * On trogdor this needs to match l10a since we use it to
+ * give power to things like SPI flash which communicate back
+ * on lines powered by l10a. Thus we force to 1.8V.
+ */
pp1800_l13a: ldo13 {
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
@@ -424,12 +377,6 @@
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
- pp2700_l16a: ldo16 {
- regulator-min-microvolt = <2496000>;
- regulator-max-microvolt = <3304000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
vdda_qusb_hs0_3p1:
vdd_pdphy:
pp3100_l17a: ldo17 {
@@ -463,8 +410,8 @@
};
pp1800_l1c: ldo1 {
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
+ regulator-min-microvolt = <1616000>;
+ regulator-max-microvolt = <1984000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
@@ -491,25 +438,10 @@
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
- ld_pp1800_esim_l4c:
- vddpx_5:
- pp1800_l4c: ldo4 {
- regulator-min-microvolt = <1648000>;
- regulator-max-microvolt = <3304000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
- vddpx_6:
- pp1800_l5c: ldo5 {
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- };
-
vddpx_2:
ppvar_l6c: ldo6 {
regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <3304000>;
+ regulator-max-microvolt = <2952000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
@@ -936,6 +868,13 @@ ap_spi_fp: &spi10 {
/* PINCTRL - additions to nodes defined in sc7180.dtsi */
+&dp_hot_plug_det {
+ pinconf {
+ pins = "gpio117";
+ bias-disable;
+ };
+};
+
&qspi_cs0 {
pinconf {
pins = "gpio68";
diff --git a/arch/arm64/boot/dts/qcom/sc7180.dtsi b/arch/arm64/boot/dts/qcom/sc7180.dtsi
index 22b832fc62e3..1ea3344ab62c 100644
--- a/arch/arm64/boot/dts/qcom/sc7180.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc7180.dtsi
@@ -682,6 +682,11 @@
reg = <0x25b 0x1>;
bits = <1 3>;
};
+
+ gpu_speed_bin: gpu_speed_bin@1d2 {
+ reg = <0x1d2 0x2>;
+ bits = <5 8>;
+ };
};
sdhc_1: sdhci@7c4000 {
@@ -1434,8 +1439,6 @@
qcom,smem-state-names = "ipa-clock-enabled-valid",
"ipa-clock-enabled";
- modem-remoteproc = <&remoteproc_mpss>;
-
status = "disabled";
};
@@ -1468,12 +1471,6 @@
pins = "gpio117";
function = "dp_hot";
};
-
- pinconf {
- pins = "gpio117";
- bias-disable;
- input-enable;
- };
};
qspi_clk: qspi-clk {
@@ -2060,52 +2057,69 @@
#cooling-cells = <2>;
+ nvmem-cells = <&gpu_speed_bin>;
+ nvmem-cell-names = "speed_bin";
+
interconnects = <&gem_noc MASTER_GFX3D 0 &mc_virt SLAVE_EBI1 0>;
interconnect-names = "gfx-mem";
gpu_opp_table: opp-table {
compatible = "operating-points-v2";
+ opp-825000000 {
+ opp-hz = /bits/ 64 <825000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_TURBO_L1>;
+ opp-peak-kBps = <8532000>;
+ opp-supported-hw = <0x04>;
+ };
+
opp-800000000 {
opp-hz = /bits/ 64 <800000000>;
opp-level = <RPMH_REGULATOR_LEVEL_TURBO>;
opp-peak-kBps = <8532000>;
+ opp-supported-hw = <0x07>;
};
opp-650000000 {
opp-hz = /bits/ 64 <650000000>;
opp-level = <RPMH_REGULATOR_LEVEL_NOM_L1>;
opp-peak-kBps = <7216000>;
+ opp-supported-hw = <0x07>;
};
opp-565000000 {
opp-hz = /bits/ 64 <565000000>;
opp-level = <RPMH_REGULATOR_LEVEL_NOM>;
opp-peak-kBps = <5412000>;
+ opp-supported-hw = <0x07>;
};
opp-430000000 {
opp-hz = /bits/ 64 <430000000>;
opp-level = <RPMH_REGULATOR_LEVEL_SVS_L1>;
opp-peak-kBps = <5412000>;
+ opp-supported-hw = <0x07>;
};
opp-355000000 {
opp-hz = /bits/ 64 <355000000>;
opp-level = <RPMH_REGULATOR_LEVEL_SVS>;
opp-peak-kBps = <3072000>;
+ opp-supported-hw = <0x07>;
};
opp-267000000 {
opp-hz = /bits/ 64 <267000000>;
opp-level = <RPMH_REGULATOR_LEVEL_LOW_SVS>;
opp-peak-kBps = <3072000>;
+ opp-supported-hw = <0x07>;
};
opp-180000000 {
opp-hz = /bits/ 64 <180000000>;
opp-level = <RPMH_REGULATOR_LEVEL_MIN_SVS>;
opp-peak-kBps = <1804000>;
+ opp-supported-hw = <0x07>;
};
};
};
@@ -3355,6 +3369,7 @@
compatible = "qcom,apss-wdt-sc7180", "qcom,kpss-wdt";
reg = <0 0x17c10000 0 0x1000>;
clocks = <&sleep_clk>;
+ interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
};
timer@17c20000{
@@ -3596,7 +3611,7 @@
};
thermal-zones {
- cpu0-thermal {
+ cpu0_thermal: cpu0-thermal {
polling-delay-passive = <250>;
polling-delay = <0>;
@@ -3645,7 +3660,7 @@
};
};
- cpu1-thermal {
+ cpu1_thermal: cpu1-thermal {
polling-delay-passive = <250>;
polling-delay = <0>;
@@ -3694,7 +3709,7 @@
};
};
- cpu2-thermal {
+ cpu2_thermal: cpu2-thermal {
polling-delay-passive = <250>;
polling-delay = <0>;
@@ -3743,7 +3758,7 @@
};
};
- cpu3-thermal {
+ cpu3_thermal: cpu3-thermal {
polling-delay-passive = <250>;
polling-delay = <0>;
@@ -3792,7 +3807,7 @@
};
};
- cpu4-thermal {
+ cpu4_thermal: cpu4-thermal {
polling-delay-passive = <250>;
polling-delay = <0>;
@@ -3841,7 +3856,7 @@
};
};
- cpu5-thermal {
+ cpu5_thermal: cpu5-thermal {
polling-delay-passive = <250>;
polling-delay = <0>;
@@ -3890,7 +3905,7 @@
};
};
- cpu6-thermal {
+ cpu6_thermal: cpu6-thermal {
polling-delay-passive = <250>;
polling-delay = <0>;
@@ -3931,7 +3946,7 @@
};
};
- cpu7-thermal {
+ cpu7_thermal: cpu7-thermal {
polling-delay-passive = <250>;
polling-delay = <0>;
@@ -3972,7 +3987,7 @@
};
};
- cpu8-thermal {
+ cpu8_thermal: cpu8-thermal {
polling-delay-passive = <250>;
polling-delay = <0>;
@@ -4013,7 +4028,7 @@
};
};
- cpu9-thermal {
+ cpu9_thermal: cpu9-thermal {
polling-delay-passive = <250>;
polling-delay = <0>;
diff --git a/arch/arm64/boot/dts/qcom/sdm630.dtsi b/arch/arm64/boot/dts/qcom/sdm630.dtsi
index 37d5cc32f6b6..f91a928466c3 100644
--- a/arch/arm64/boot/dts/qcom/sdm630.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm630.dtsi
@@ -808,17 +808,19 @@
sdhc_1: sdhci@c0c4000 {
compatible = "qcom,sdm630-sdhci", "qcom,sdhci-msm-v5";
reg = <0x0c0c4000 0x1000>,
- <0x0c0c5000 0x1000>;
- reg-names = "hc", "cqhci";
+ <0x0c0c5000 0x1000>,
+ <0x0c0c8000 0x8000>;
+ reg-names = "hc", "cqhci", "ice";
interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "hc_irq", "pwr_irq";
clocks = <&gcc GCC_SDCC1_APPS_CLK>,
- <&gcc GCC_SDCC1_AHB_CLK>,
- <&xo_board>;
- clock-names = "core", "iface", "xo";
+ <&gcc GCC_SDCC1_AHB_CLK>,
+ <&xo_board>,
+ <&gcc GCC_SDCC1_ICE_CORE_CLK>;
+ clock-names = "core", "iface", "xo", "ice";
pinctrl-names = "default", "sleep";
pinctrl-0 = <&sdc1_clk_on &sdc1_cmd_on &sdc1_data_on &sdc1_rclk_on>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
index 7cc236575ee2..c4ac6f5dc008 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
@@ -415,7 +415,9 @@
&gcc {
protected-clocks = <GCC_QSPI_CORE_CLK>,
<GCC_QSPI_CORE_CLK_SRC>,
- <GCC_QSPI_CNOC_PERIPH_AHB_CLK>;
+ <GCC_QSPI_CNOC_PERIPH_AHB_CLK>,
+ <GCC_LPASS_Q6_AXI_CLK>,
+ <GCC_LPASS_SWAY_CLK>;
};
&gpu {
@@ -1112,11 +1114,11 @@
reg = <0x10>;
// CAM0_RST_N
- reset-gpios = <&tlmm 9 0>;
+ reset-gpios = <&tlmm 9 GPIO_ACTIVE_LOW>;
pinctrl-names = "default";
pinctrl-0 = <&cam0_default>;
gpios = <&tlmm 13 0>,
- <&tlmm 9 0>;
+ <&tlmm 9 GPIO_ACTIVE_LOW>;
clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
clock-names = "xvclk";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-oneplus-common.dtsi b/arch/arm64/boot/dts/qcom/sdm845-oneplus-common.dtsi
new file mode 100644
index 000000000000..8f617f7b6d34
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-oneplus-common.dtsi
@@ -0,0 +1,623 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SDM845 OnePlus 6(T) (enchilada / fajita) common device tree source
+ *
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/linux-event-codes.h>
+#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+
+#include "sdm845.dtsi"
+#include "pm8998.dtsi"
+#include "pmi8998.dtsi"
+
+/delete-node/ &rmtfs_mem;
+
+/ {
+ aliases {
+ hsuart0 = &uart6;
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ label = "Volume keys";
+ autorepeat;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&volume_down_gpio &volume_up_gpio>;
+
+ vol-down {
+ label = "Volume down";
+ linux,code = <KEY_VOLUMEDOWN>;
+ gpios = <&pm8998_gpio 5 GPIO_ACTIVE_LOW>;
+ debounce-interval = <15>;
+ };
+
+ vol-up {
+ label = "Volume up";
+ linux,code = <KEY_VOLUMEUP>;
+ gpios = <&pm8998_gpio 6 GPIO_ACTIVE_LOW>;
+ debounce-interval = <15>;
+ };
+ };
+
+ reserved-memory {
+ /*
+ * The rmtfs memory region in downstream is 'dynamically allocated'
+ * but given the same address every time. Hard code it as this address is
+ * where the modem firmware expects it to be.
+ */
+ rmtfs_mem: memory@f5b01000 {
+ compatible = "qcom,rmtfs-mem";
+ reg = <0 0xf5b01000 0 0x200000>;
+ no-map;
+
+ qcom,client-id = <1>;
+ qcom,vmid = <15>;
+ };
+
+ /*
+ * It seems like reserving the old rmtfs_mem region is also needed to prevent
+ * random crashes which are most likely modem related, more testing needed.
+ */
+ removed_region: memory@88f00000 {
+ no-map;
+ reg = <0 0x88f00000 0 0x200000>;
+ };
+
+ ramoops: ramoops@ac300000 {
+ compatible = "ramoops";
+ reg = <0 0xac300000 0 0x400000>;
+ record-size = <0x40000>;
+ console-size = <0x40000>;
+ ftrace-size = <0x40000>;
+ pmsg-size = <0x200000>;
+ devinfo-size = <0x1000>;
+ ecc-size = <16>;
+ };
+ };
+
+ vph_pwr: vph-pwr-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vph_pwr";
+ regulator-min-microvolt = <3700000>;
+ regulator-max-microvolt = <3700000>;
+ };
+
+ /*
+ * Apparently RPMh does not provide support for PM8998 S4 because it
+ * is always-on; model it as a fixed regulator.
+ */
+ vreg_s4a_1p8: pm8998-smps4 {
+ compatible = "regulator-fixed";
+ regulator-name = "vreg_s4a_1p8";
+
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-always-on;
+ regulator-boot-on;
+
+ vin-supply = <&vph_pwr>;
+ };
+
+ /*
+ * The touchscreen regulator seems to be controlled somehow by a gpio.
+ * Model it as a fixed regulator and keep it on. Without schematics we
+ * don't know how this is actually wired up...
+ */
+ ts_1p8_supply: ts-1p8-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "ts_1p8_supply";
+
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ gpio = <&tlmm 88 0>;
+ enable-active-high;
+ regulator-boot-on;
+ };
+};
+
+&adsp_pas {
+ status = "okay";
+ firmware-name = "qcom/sdm845/oneplus6/adsp.mbn";
+};
+
+&apps_rsc {
+ pm8998-rpmh-regulators {
+ compatible = "qcom,pm8998-rpmh-regulators";
+ qcom,pmic-id = "a";
+
+ vdd-s1-supply = <&vph_pwr>;
+ vdd-s2-supply = <&vph_pwr>;
+ vdd-s3-supply = <&vph_pwr>;
+ vdd-s4-supply = <&vph_pwr>;
+ vdd-s5-supply = <&vph_pwr>;
+ vdd-s6-supply = <&vph_pwr>;
+ vdd-s7-supply = <&vph_pwr>;
+ vdd-s8-supply = <&vph_pwr>;
+ vdd-s9-supply = <&vph_pwr>;
+ vdd-s10-supply = <&vph_pwr>;
+ vdd-s11-supply = <&vph_pwr>;
+ vdd-s12-supply = <&vph_pwr>;
+ vdd-s13-supply = <&vph_pwr>;
+ vdd-l1-l27-supply = <&vreg_s7a_1p025>;
+ vdd-l2-l8-l17-supply = <&vreg_s3a_1p35>;
+ vdd-l3-l11-supply = <&vreg_s7a_1p025>;
+ vdd-l4-l5-supply = <&vreg_s7a_1p025>;
+ vdd-l6-supply = <&vph_pwr>;
+ vdd-l7-l12-l14-l15-supply = <&vreg_s5a_2p04>;
+ vdd-l9-supply = <&vreg_bob>;
+ vdd-l10-l23-l25-supply = <&vreg_bob>;
+ vdd-l13-l19-l21-supply = <&vreg_bob>;
+ vdd-l16-l28-supply = <&vreg_bob>;
+ vdd-l18-l22-supply = <&vreg_bob>;
+ vdd-l20-l24-supply = <&vreg_bob>;
+ vdd-l26-supply = <&vreg_s3a_1p35>;
+ vin-lvs-1-2-supply = <&vreg_s4a_1p8>;
+
+ vreg_s3a_1p35: smps3 {
+ regulator-min-microvolt = <1352000>;
+ regulator-max-microvolt = <1352000>;
+ };
+
+ vreg_s5a_2p04: smps5 {
+ regulator-min-microvolt = <1904000>;
+ regulator-max-microvolt = <2040000>;
+ };
+
+ vreg_s7a_1p025: smps7 {
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <1028000>;
+ };
+
+ vdda_mipi_dsi0_pll:
+ vdda_qlink_lv:
+ vdda_ufs1_core:
+ vdda_usb1_ss_core:
+ vreg_l1a_0p875: ldo1 {
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <880000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2a_1p2: ldo2 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-always-on;
+ };
+
+ vreg_l5a_0p8: ldo5 {
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l7a_1p8: ldo7 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vdda_qusb_hs0_1p8:
+ vreg_l12a_1p8: ldo12 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l14a_1p88: ldo14 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-always-on;
+ };
+
+ vreg_l17a_1p3: ldo17 {
+ regulator-min-microvolt = <1304000>;
+ regulator-max-microvolt = <1304000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l20a_2p95: ldo20 {
+ regulator-min-microvolt = <2704000>;
+ regulator-max-microvolt = <2960000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vdda_qusb_hs0_3p1:
+ vreg_l24a_3p075: ldo24 {
+ regulator-min-microvolt = <3088000>;
+ regulator-max-microvolt = <3088000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l25a_3p3: ldo25 {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3312000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vdda_mipi_dsi0_1p2:
+ vdda_ufs1_1p2:
+ vreg_l26a_1p2: ldo26 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l28a_3p0: ldo28 {
+ regulator-min-microvolt = <2856000>;
+ regulator-max-microvolt = <3008000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ pmi8998-rpmh-regulators {
+ compatible = "qcom,pmi8998-rpmh-regulators";
+ qcom,pmic-id = "b";
+
+ vdd-bob-supply = <&vph_pwr>;
+
+ vreg_bob: bob {
+ regulator-min-microvolt = <3312000>;
+ regulator-max-microvolt = <3600000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_AUTO>;
+ regulator-allow-bypass;
+ };
+ };
+
+ pm8005-rpmh-regulators {
+ compatible = "qcom,pm8005-rpmh-regulators";
+ qcom,pmic-id = "c";
+
+ vdd-s1-supply = <&vph_pwr>;
+ vdd-s2-supply = <&vph_pwr>;
+ vdd-s3-supply = <&vph_pwr>;
+ vdd-s4-supply = <&vph_pwr>;
+
+ vreg_s3c_0p6: smps3 {
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <600000>;
+ };
+ };
+};
+
+&cdsp_pas {
+ status = "okay";
+ firmware-name = "qcom/sdm845/oneplus6/cdsp.mbn";
+};
+
+&dsi0 {
+ status = "okay";
+ vdda-supply = <&vdda_mipi_dsi0_1p2>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /*
+ * Both devices use different panels but all other properties
+ * are common. Compatible line is declared in device dts.
+ */
+ display_panel: panel@0 {
+ status = "disabled";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+
+ vddio-supply = <&vreg_l14a_1p88>;
+
+ reset-gpios = <&tlmm 6 GPIO_ACTIVE_LOW>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&panel_reset_pins &panel_te_pin &panel_esd_pin>;
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&dsi0_out>;
+ };
+ };
+ };
+};
+
+&dsi0_out {
+ remote-endpoint = <&panel_in>;
+ data-lanes = <0 1 2 3>;
+};
+
+&dsi0_phy {
+ status = "okay";
+ vdds-supply = <&vdda_mipi_dsi0_pll>;
+};
+
+&gcc {
+ protected-clocks = <GCC_QSPI_CORE_CLK>,
+ <GCC_QSPI_CORE_CLK_SRC>,
+ <GCC_QSPI_CNOC_PERIPH_AHB_CLK>,
+ <GCC_LPASS_Q6_AXI_CLK>,
+ <GCC_LPASS_SWAY_CLK>;
+};
+
+&gpu {
+ zap-shader {
+ memory-region = <&gpu_mem>;
+ firmware-name = "qcom/sdm845/oneplus6/a630_zap.mbn";
+ };
+};
+
+&i2c12 {
+ status = "okay";
+ clock-frequency = <400000>;
+
+ synaptics-rmi4-i2c@20 {
+ compatible = "syna,rmi4-i2c";
+ reg = <0x20>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts-extended = <&tlmm 125 IRQ_TYPE_EDGE_FALLING>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&ts_default_pins>;
+
+ vdd-supply = <&vreg_l28a_3p0>;
+ vio-supply = <&ts_1p8_supply>;
+
+ syna,reset-delay-ms = <200>;
+ syna,startup-delay-ms = <200>;
+
+ rmi4-f01@1 {
+ reg = <0x01>;
+ syna,nosleep-mode = <1>;
+ };
+
+ rmi4_f12: rmi4-f12@12 {
+ reg = <0x12>;
+ touchscreen-x-mm = <68>;
+ touchscreen-y-mm = <144>;
+ syna,sensor-type = <1>;
+ syna,rezero-wait-ms = <200>;
+ };
+ };
+};
+
+&mdss {
+ status = "okay";
+};
+
+&mdss_mdp {
+ status = "okay";
+};
+
+/* Modem/wifi*/
+&mss_pil {
+ status = "okay";
+ firmware-name = "qcom/sdm845/oneplus6/mba.mbn", "qcom/sdm845/oneplus6/modem.mbn";
+};
+
+&pm8998_gpio {
+ volume_down_gpio: pm8998_gpio5 {
+ pinconf {
+ pins = "gpio5";
+ function = "normal";
+ input-enable;
+ bias-pull-up;
+ qcom,drive-strength = <0>;
+ };
+ };
+
+ volume_up_gpio: pm8998_gpio6 {
+ pinconf {
+ pins = "gpio6";
+ function = "normal";
+ input-enable;
+ bias-pull-up;
+ qcom,drive-strength = <0>;
+ };
+ };
+};
+
+&qupv3_id_1 {
+ status = "okay";
+};
+
+&qupv3_id_0 {
+ status = "okay";
+};
+
+&qup_i2c12_default {
+ mux {
+ pins = "gpio49", "gpio50";
+ function = "qup12";
+ drive-strength = <2>;
+ bias-disable;
+ };
+};
+
+&qup_i2c10_default {
+ pinconf {
+ pins = "gpio55", "gpio56";
+ drive-strength = <2>;
+ bias-disable;
+ };
+};
+
+&qup_uart9_default {
+ pinconf-tx {
+ pins = "gpio4";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ pinconf-rx {
+ pins = "gpio5";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+};
+
+/*
+ * Prevent garbage data on bluetooth UART lines
+ */
+&qup_uart6_default {
+ pinmux {
+ pins = "gpio45", "gpio46", "gpio47", "gpio48";
+ function = "qup6";
+ };
+
+ cts {
+ pins = "gpio45";
+ bias-pull-down;
+ };
+
+ rts-tx {
+ pins = "gpio46", "gpio47";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ rx {
+ pins = "gpio48";
+ bias-pull-up;
+ };
+};
+
+&uart6 {
+ status = "okay";
+
+ bluetooth {
+ compatible = "qcom,wcn3990-bt";
+
+ /*
+ * This path is relative to the qca/
+ * subdir under lib/firmware.
+ */
+ firmware-name = "oneplus6/crnv21.bin";
+
+ vddio-supply = <&vreg_s4a_1p8>;
+ vddxo-supply = <&vreg_l7a_1p8>;
+ vddrf-supply = <&vreg_l17a_1p3>;
+ vddch0-supply = <&vreg_l25a_3p3>;
+ max-speed = <3200000>;
+ };
+};
+
+&ufs_mem_hc {
+ status = "okay";
+
+ reset-gpios = <&tlmm 150 GPIO_ACTIVE_LOW>;
+
+ vcc-supply = <&vreg_l20a_2p95>;
+ vcc-max-microamp = <600000>;
+};
+
+&ufs_mem_phy {
+ status = "okay";
+
+ vdda-phy-supply = <&vdda_ufs1_core>;
+ vdda-pll-supply = <&vdda_ufs1_1p2>;
+};
+
+&usb_1 {
+ status = "okay";
+
+ /*
+ * disable USB3 clock requirement as the device only supports
+ * USB2.
+ */
+ qcom,select-utmi-as-pipe-clk;
+};
+
+&usb_1_dwc3 {
+ /*
+ * We don't have the capability to switch modes yet.
+ */
+ dr_mode = "peripheral";
+
+ /* fastest mode for USB 2 */
+ maximum-speed = "high-speed";
+
+ /* Remove USB3 phy as it's unused on this device. */
+ phys = <&usb_1_hsphy>;
+ phy-names = "usb2-phy";
+};
+
+&usb_1_hsphy {
+ status = "okay";
+
+ vdd-supply = <&vdda_usb1_ss_core>;
+ vdda-pll-supply = <&vdda_qusb_hs0_1p8>;
+ vdda-phy-dpdm-supply = <&vdda_qusb_hs0_3p1>;
+
+ qcom,imp-res-offset-value = <8>;
+ qcom,hstx-trim-value = <QUSB2_V2_HSTX_TRIM_21_6_MA>;
+ qcom,preemphasis-level = <QUSB2_V2_PREEMPHASIS_5_PERCENT>;
+ qcom,preemphasis-width = <QUSB2_V2_PREEMPHASIS_WIDTH_HALF_BIT>;
+};
+
+&tlmm {
+ gpio-reserved-ranges = <0 4>, <81 4>;
+
+ tri_state_key_default: tri_state_key_default {
+ mux {
+ pins = "gpio40", "gpio42", "gpio26";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ ts_default_pins: ts-int {
+ mux {
+ pins = "gpio99", "gpio125";
+ function = "gpio";
+ drive-strength = <16>;
+ bias-pull-up;
+ };
+ };
+
+ panel_reset_pins: panel-reset {
+ mux {
+ pins = "gpio6", "gpio25", "gpio26";
+ function = "gpio";
+ drive-strength = <8>;
+ bias-disable = <0>;
+ };
+ };
+
+ panel_te_pin: panel-te {
+ mux {
+ pins = "gpio10";
+ function = "mdp_vsync";
+ drive-strength = <2>;
+ bias-disable;
+ input-enable;
+ };
+ };
+
+ panel_esd_pin: panel-esd {
+ mux {
+ pins = "gpio30";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-down;
+ input-enable;
+ };
+ };
+};
+
+&wifi {
+ status = "okay";
+ vdd-0.8-cx-mx-supply = <&vreg_l5a_0p8>;
+ vdd-1.8-xo-supply = <&vreg_l7a_1p8>;
+ vdd-1.3-rfa-supply = <&vreg_l17a_1p3>;
+ vdd-3.3-ch0-supply = <&vreg_l25a_3p3>;
+
+ qcom,snoc-host-cap-8bit-quirk;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-oneplus-enchilada.dts b/arch/arm64/boot/dts/qcom/sdm845-oneplus-enchilada.dts
new file mode 100644
index 000000000000..72842c887617
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-oneplus-enchilada.dts
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SDM845 OnePlus 6 (enchilada) device tree.
+ *
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#include "sdm845-oneplus-common.dtsi"
+
+/ {
+ model = "OnePlus 6";
+ compatible = "oneplus,enchilada", "qcom,sdm845";
+};
+
+&display_panel {
+ status = "okay";
+
+ compatible = "samsung,sofef00";
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-oneplus-fajita.dts b/arch/arm64/boot/dts/qcom/sdm845-oneplus-fajita.dts
new file mode 100644
index 000000000000..969b36dc9e2c
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-oneplus-fajita.dts
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SDM845 OnePlus 6T (fajita) device tree.
+ *
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#include "sdm845-oneplus-common.dtsi"
+
+/ {
+ model = "OnePlus 6T";
+ compatible = "oneplus,fajita", "qcom,sdm845";
+};
+
+&display_panel {
+ status = "okay";
+
+ compatible = "samsung,s6e3fc2x01";
+};
+
+&rmi4_f12 {
+ touchscreen-y-mm = <148>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index bcf888381f14..454f794af547 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -2366,8 +2366,6 @@
qcom,smem-state-names = "ipa-clock-enabled-valid",
"ipa-clock-enabled";
- modem-remoteproc = <&mss_pil>;
-
status = "disabled";
};
@@ -4573,6 +4571,7 @@
compatible = "qcom,apss-wdt-sdm845", "qcom,kpss-wdt";
reg = <0 0x17980000 0 0x1000>;
clocks = <&sleep_clk>;
+ interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
};
apss_shared: mailbox@17990000 {
diff --git a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts
index 13fdd02cffe6..140db2d5ba31 100644
--- a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts
+++ b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts
@@ -13,7 +13,7 @@
#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
#include <dt-bindings/sound/qcom,q6afe.h>
#include <dt-bindings/sound/qcom,q6asm.h>
-#include "sdm845.dtsi"
+#include "sdm850.dtsi"
#include "pm8998.dtsi"
/ {
@@ -302,7 +302,9 @@
&gcc {
protected-clocks = <GCC_QSPI_CORE_CLK>,
<GCC_QSPI_CORE_CLK_SRC>,
- <GCC_QSPI_CNOC_PERIPH_AHB_CLK>;
+ <GCC_QSPI_CNOC_PERIPH_AHB_CLK>,
+ <GCC_LPASS_Q6_AXI_CLK>,
+ <GCC_LPASS_SWAY_CLK>;
};
&gpu {
@@ -320,6 +322,8 @@
&i2c3 {
status = "okay";
clock-frequency = <400000>;
+ /* Overwrite pinctrl-0 from sdm845.dtsi */
+ pinctrl-0 = <&qup_i2c3_default &i2c3_hid_active>;
tsel: hid@15 {
compatible = "hid-over-i2c";
@@ -327,9 +331,6 @@
hid-descr-addr = <0x1>;
interrupts-extended = <&tlmm 37 IRQ_TYPE_LEVEL_HIGH>;
-
- pinctrl-names = "default";
- pinctrl-0 = <&i2c3_hid_active>;
};
tsc2: hid@2c {
@@ -338,11 +339,6 @@
hid-descr-addr = <0x20>;
interrupts-extended = <&tlmm 37 IRQ_TYPE_LEVEL_HIGH>;
-
- pinctrl-names = "default";
- pinctrl-0 = <&i2c3_hid_active>;
-
- status = "disabled";
};
};
diff --git a/arch/arm64/boot/dts/qcom/sdm850.dtsi b/arch/arm64/boot/dts/qcom/sdm850.dtsi
new file mode 100644
index 000000000000..b1c2cf566c7a
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm850.dtsi
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SDM850 SoC device tree source
+ *
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#include "sdm845.dtsi"
+
+&cpu4_opp_table {
+ cpu4_opp33: opp-2841600000 {
+ opp-hz = /bits/ 64 <2841600000>;
+ opp-peak-kBps = <7216000 25497600>;
+ };
+
+ cpu4_opp34: opp-2956800000 {
+ opp-hz = /bits/ 64 <2956800000>;
+ opp-peak-kBps = <7216000 25497600>;
+ turbo-mode;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi
index 5270bda7418f..e5bb17bc2f46 100644
--- a/arch/arm64/boot/dts/qcom/sm8150.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi
@@ -47,8 +47,12 @@
compatible = "qcom,kryo485";
reg = <0x0 0x0>;
enable-method = "psci";
+ capacity-dmips-mhz = <488>;
+ dynamic-power-coefficient = <232>;
next-level-cache = <&L2_0>;
qcom,freq-domain = <&cpufreq_hw 0>;
+ power-domains = <&CPU_PD0>;
+ power-domain-names = "psci";
#cooling-cells = <2>;
L2_0: l2-cache {
compatible = "cache";
@@ -64,8 +68,12 @@
compatible = "qcom,kryo485";
reg = <0x0 0x100>;
enable-method = "psci";
+ capacity-dmips-mhz = <488>;
+ dynamic-power-coefficient = <232>;
next-level-cache = <&L2_100>;
qcom,freq-domain = <&cpufreq_hw 0>;
+ power-domains = <&CPU_PD1>;
+ power-domain-names = "psci";
#cooling-cells = <2>;
L2_100: l2-cache {
compatible = "cache";
@@ -79,8 +87,12 @@
compatible = "qcom,kryo485";
reg = <0x0 0x200>;
enable-method = "psci";
+ capacity-dmips-mhz = <488>;
+ dynamic-power-coefficient = <232>;
next-level-cache = <&L2_200>;
qcom,freq-domain = <&cpufreq_hw 0>;
+ power-domains = <&CPU_PD2>;
+ power-domain-names = "psci";
#cooling-cells = <2>;
L2_200: l2-cache {
compatible = "cache";
@@ -93,8 +105,12 @@
compatible = "qcom,kryo485";
reg = <0x0 0x300>;
enable-method = "psci";
+ capacity-dmips-mhz = <488>;
+ dynamic-power-coefficient = <232>;
next-level-cache = <&L2_300>;
qcom,freq-domain = <&cpufreq_hw 0>;
+ power-domains = <&CPU_PD3>;
+ power-domain-names = "psci";
#cooling-cells = <2>;
L2_300: l2-cache {
compatible = "cache";
@@ -107,8 +123,12 @@
compatible = "qcom,kryo485";
reg = <0x0 0x400>;
enable-method = "psci";
+ capacity-dmips-mhz = <1024>;
+ dynamic-power-coefficient = <369>;
next-level-cache = <&L2_400>;
qcom,freq-domain = <&cpufreq_hw 1>;
+ power-domains = <&CPU_PD4>;
+ power-domain-names = "psci";
#cooling-cells = <2>;
L2_400: l2-cache {
compatible = "cache";
@@ -121,8 +141,12 @@
compatible = "qcom,kryo485";
reg = <0x0 0x500>;
enable-method = "psci";
+ capacity-dmips-mhz = <1024>;
+ dynamic-power-coefficient = <369>;
next-level-cache = <&L2_500>;
qcom,freq-domain = <&cpufreq_hw 1>;
+ power-domains = <&CPU_PD5>;
+ power-domain-names = "psci";
#cooling-cells = <2>;
L2_500: l2-cache {
compatible = "cache";
@@ -135,8 +159,12 @@
compatible = "qcom,kryo485";
reg = <0x0 0x600>;
enable-method = "psci";
+ capacity-dmips-mhz = <1024>;
+ dynamic-power-coefficient = <369>;
next-level-cache = <&L2_600>;
qcom,freq-domain = <&cpufreq_hw 1>;
+ power-domains = <&CPU_PD6>;
+ power-domain-names = "psci";
#cooling-cells = <2>;
L2_600: l2-cache {
compatible = "cache";
@@ -149,14 +177,90 @@
compatible = "qcom,kryo485";
reg = <0x0 0x700>;
enable-method = "psci";
+ capacity-dmips-mhz = <1024>;
+ dynamic-power-coefficient = <421>;
next-level-cache = <&L2_700>;
qcom,freq-domain = <&cpufreq_hw 2>;
+ power-domains = <&CPU_PD7>;
+ power-domain-names = "psci";
#cooling-cells = <2>;
L2_700: l2-cache {
compatible = "cache";
next-level-cache = <&L3_0>;
};
};
+
+ cpu-map {
+ cluster0 {
+ core0 {
+ cpu = <&CPU0>;
+ };
+
+ core1 {
+ cpu = <&CPU1>;
+ };
+
+ core2 {
+ cpu = <&CPU2>;
+ };
+
+ core3 {
+ cpu = <&CPU3>;
+ };
+
+ core4 {
+ cpu = <&CPU4>;
+ };
+
+ core5 {
+ cpu = <&CPU5>;
+ };
+
+ core6 {
+ cpu = <&CPU6>;
+ };
+
+ core7 {
+ cpu = <&CPU7>;
+ };
+ };
+ };
+
+ idle-states {
+ entry-method = "psci";
+
+ LITTLE_CPU_SLEEP_0: cpu-sleep-0-0 {
+ compatible = "arm,idle-state";
+ idle-state-name = "little-rail-power-collapse";
+ arm,psci-suspend-param = <0x40000004>;
+ entry-latency-us = <355>;
+ exit-latency-us = <909>;
+ min-residency-us = <3934>;
+ local-timer-stop;
+ };
+
+ BIG_CPU_SLEEP_0: cpu-sleep-1-0 {
+ compatible = "arm,idle-state";
+ idle-state-name = "big-rail-power-collapse";
+ arm,psci-suspend-param = <0x40000004>;
+ entry-latency-us = <241>;
+ exit-latency-us = <1461>;
+ min-residency-us = <4488>;
+ local-timer-stop;
+ };
+ };
+
+ domain-idle-states {
+ CLUSTER_SLEEP_0: cluster-sleep-0 {
+ compatible = "domain-idle-state";
+ idle-state-name = "cluster-power-collapse";
+ arm,psci-suspend-param = <0x4100c244>;
+ entry-latency-us = <3263>;
+ exit-latency-us = <6562>;
+ min-residency-us = <9987>;
+ local-timer-stop;
+ };
+ };
};
firmware {
@@ -186,6 +290,59 @@
psci {
compatible = "arm,psci-1.0";
method = "smc";
+
+ CPU_PD0: cpu0 {
+ #power-domain-cells = <0>;
+ power-domains = <&CLUSTER_PD>;
+ domain-idle-states = <&LITTLE_CPU_SLEEP_0>;
+ };
+
+ CPU_PD1: cpu1 {
+ #power-domain-cells = <0>;
+ power-domains = <&CLUSTER_PD>;
+ domain-idle-states = <&LITTLE_CPU_SLEEP_0>;
+ };
+
+ CPU_PD2: cpu2 {
+ #power-domain-cells = <0>;
+ power-domains = <&CLUSTER_PD>;
+ domain-idle-states = <&LITTLE_CPU_SLEEP_0>;
+ };
+
+ CPU_PD3: cpu3 {
+ #power-domain-cells = <0>;
+ power-domains = <&CLUSTER_PD>;
+ domain-idle-states = <&LITTLE_CPU_SLEEP_0>;
+ };
+
+ CPU_PD4: cpu4 {
+ #power-domain-cells = <0>;
+ power-domains = <&CLUSTER_PD>;
+ domain-idle-states = <&BIG_CPU_SLEEP_0>;
+ };
+
+ CPU_PD5: cpu5 {
+ #power-domain-cells = <0>;
+ power-domains = <&CLUSTER_PD>;
+ domain-idle-states = <&BIG_CPU_SLEEP_0>;
+ };
+
+ CPU_PD6: cpu6 {
+ #power-domain-cells = <0>;
+ power-domains = <&CLUSTER_PD>;
+ domain-idle-states = <&BIG_CPU_SLEEP_0>;
+ };
+
+ CPU_PD7: cpu7 {
+ #power-domain-cells = <0>;
+ power-domains = <&CLUSTER_PD>;
+ domain-idle-states = <&BIG_CPU_SLEEP_0>;
+ };
+
+ CLUSTER_PD: cpu-cluster0 {
+ #power-domain-cells = <0>;
+ domain-idle-states = <&CLUSTER_SLEEP_0>;
+ };
};
reserved-memory {
@@ -1818,6 +1975,7 @@
compatible = "qcom,apss-wdt-sm8150", "qcom,kpss-wdt";
reg = <0 0x17c10000 0 0x1000>;
clocks = <&sleep_clk>;
+ interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
};
timer@17c20000 {
diff --git a/arch/arm64/boot/dts/qcom/sm8250-mtp.dts b/arch/arm64/boot/dts/qcom/sm8250-mtp.dts
index dea00f19711d..5b4c5b08434c 100644
--- a/arch/arm64/boot/dts/qcom/sm8250-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sm8250-mtp.dts
@@ -24,6 +24,106 @@
stdout-path = "serial0:115200n8";
};
+ thermal-zones {
+ camera-thermal {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&pm8150l_adc_tm 0>;
+
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ conn-thermal {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&pm8150b_adc_tm 0>;
+
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ mmw-pa1-thermal {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&pm8150_adc_tm 2>;
+
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ mmw-pa2-thermal {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&pm8150l_adc_tm 2>;
+
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ skin-msm-thermal {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&pm8150l_adc_tm 1>;
+
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ skin-thermal {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&pm8150_adc_tm 1>;
+
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ xo-thermal {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&pm8150_adc_tm 0>;
+
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+ };
+
vph_pwr: vph-pwr-regulator {
compatible = "regulator-fixed";
regulator-name = "vph_pwr";
@@ -186,6 +286,13 @@
regulator-max-microvolt = <3008000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
+
+ vreg_l18a_0p9: ldo18 {
+ regulator-name = "vreg_l18a_0p9";
+ regulator-min-microvolt = <912000>;
+ regulator-max-microvolt = <912000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
};
pm8150l-rpmh-regulators {
@@ -358,6 +465,13 @@
firmware-name = "qcom/sm8250/cdsp.mbn";
};
+&gpu {
+ zap-shader {
+ memory-region = <&gpu_mem>;
+ firmware-name = "qcom/sm8250/a650_zap.mbn";
+ };
+};
+
&i2c1 {
status = "okay";
clock-frequency = <1000000>;
@@ -378,6 +492,115 @@
/* rtc6226 @ 64 */
};
+&pm8150_adc {
+ xo-therm@4c {
+ reg = <ADC5_XO_THERM_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time = <200>;
+ };
+
+ skin-therm@4d {
+ reg = <ADC5_AMUX_THM1_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time = <200>;
+ };
+
+ pa-therm1@4e {
+ reg = <ADC5_AMUX_THM2_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time = <200>;
+ };
+};
+
+&pm8150_adc_tm {
+ status = "okay";
+
+ xo-therm@0 {
+ reg = <0>;
+ io-channels = <&pm8150_adc ADC5_XO_THERM_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time-us = <200>;
+ };
+
+ skin-therm@1 {
+ reg = <1>;
+ io-channels = <&pm8150_adc ADC5_AMUX_THM1_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time-us = <200>;
+ };
+
+ pa-therm1@2 {
+ reg = <2>;
+ io-channels = <&pm8150_adc ADC5_AMUX_THM2_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time-us = <200>;
+ };
+};
+
+&pm8150b_adc {
+ conn-therm@4f {
+ reg = <ADC5_AMUX_THM3_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time = <200>;
+ };
+};
+
+&pm8150b_adc_tm {
+ status = "okay";
+
+ conn-therm@0 {
+ reg = <0>;
+ io-channels = <&pm8150b_adc ADC5_AMUX_THM3_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time-us = <200>;
+ };
+};
+
+&pm8150l_adc_tm {
+ status = "okay";
+
+ camera-flash-therm@0 {
+ reg = <0>;
+ io-channels = <&pm8150l_adc ADC5_AMUX_THM1_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time-us = <200>;
+ };
+
+ skin-msm-therm@1 {
+ reg = <1>;
+ io-channels = <&pm8150l_adc ADC5_AMUX_THM2_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time-us = <200>;
+ };
+
+ pa-therm2@2 {
+ reg = <2>;
+ io-channels = <&pm8150l_adc ADC5_AMUX_THM3_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time-us = <200>;
+ };
+};
+
+&pm8150l_adc {
+ camera-flash-therm@4d {
+ reg = <ADC5_AMUX_THM1_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time = <200>;
+ };
+
+ skin-msm-therm@4e {
+ reg = <ADC5_AMUX_THM2_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time = <200>;
+ };
+
+ pa-therm2@4f {
+ reg = <ADC5_AMUX_THM3_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time = <200>;
+ };
+};
+
&pm8150_rtc {
status = "okay";
};
@@ -426,3 +649,49 @@
vdda-pll-supply = <&vreg_l9a_1p2>;
vdda-pll-max-microamp = <19000>;
};
+
+&usb_1 {
+ status = "okay";
+};
+
+&usb_1_dwc3 {
+ dr_mode = "host";
+};
+
+&usb_1_hsphy {
+ status = "okay";
+
+ vdda-pll-supply = <&vreg_l5a_0p875>;
+ vdda18-supply = <&vreg_l12a_1p8>;
+ vdda33-supply = <&vreg_l2a_3p1>;
+};
+
+&usb_1_qmpphy {
+ status = "okay";
+
+ vdda-phy-supply = <&vreg_l9a_1p2>;
+ vdda-pll-supply = <&vreg_l18a_0p9>;
+};
+
+&usb_2 {
+ status = "okay";
+};
+
+&usb_2_dwc3 {
+ dr_mode = "host";
+};
+
+&usb_2_hsphy {
+ status = "okay";
+
+ vdda-pll-supply = <&vreg_l5a_0p875>;
+ vdda18-supply = <&vreg_l12a_1p8>;
+ vdda33-supply = <&vreg_l2a_3p1>;
+};
+
+&usb_2_qmpphy {
+ status = "okay";
+
+ vdda-phy-supply = <&vreg_l9a_1p2>;
+ vdda-pll-supply = <&vreg_l18a_0p9>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sm8250.dtsi b/arch/arm64/boot/dts/qcom/sm8250.dtsi
index 65acd1f381eb..947e1accae3a 100644
--- a/arch/arm64/boot/dts/qcom/sm8250.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8250.dtsi
@@ -4,14 +4,18 @@
*/
#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/clock/qcom,dispcc-sm8250.h>
#include <dt-bindings/clock/qcom,gcc-sm8250.h>
#include <dt-bindings/clock/qcom,gpucc-sm8250.h>
#include <dt-bindings/clock/qcom,rpmh.h>
#include <dt-bindings/interconnect/qcom,osm-l3.h>
+#include <dt-bindings/interconnect/qcom,sm8250.h>
#include <dt-bindings/mailbox/qcom-ipcc.h>
#include <dt-bindings/power/qcom-aoss-qmp.h>
#include <dt-bindings/power/qcom-rpmpd.h>
+#include <dt-bindings/soc/qcom,apr.h>
#include <dt-bindings/soc/qcom,rpmh-rsc.h>
+#include <dt-bindings/sound/qcom,q6afe.h>
#include <dt-bindings/thermal/thermal.h>
/ {
@@ -89,6 +93,8 @@
compatible = "qcom,kryo485";
reg = <0x0 0x0>;
enable-method = "psci";
+ capacity-dmips-mhz = <448>;
+ dynamic-power-coefficient = <205>;
next-level-cache = <&L2_0>;
qcom,freq-domain = <&cpufreq_hw 0>;
#cooling-cells = <2>;
@@ -106,6 +112,8 @@
compatible = "qcom,kryo485";
reg = <0x0 0x100>;
enable-method = "psci";
+ capacity-dmips-mhz = <448>;
+ dynamic-power-coefficient = <205>;
next-level-cache = <&L2_100>;
qcom,freq-domain = <&cpufreq_hw 0>;
#cooling-cells = <2>;
@@ -120,6 +128,8 @@
compatible = "qcom,kryo485";
reg = <0x0 0x200>;
enable-method = "psci";
+ capacity-dmips-mhz = <448>;
+ dynamic-power-coefficient = <205>;
next-level-cache = <&L2_200>;
qcom,freq-domain = <&cpufreq_hw 0>;
#cooling-cells = <2>;
@@ -134,6 +144,8 @@
compatible = "qcom,kryo485";
reg = <0x0 0x300>;
enable-method = "psci";
+ capacity-dmips-mhz = <448>;
+ dynamic-power-coefficient = <205>;
next-level-cache = <&L2_300>;
qcom,freq-domain = <&cpufreq_hw 0>;
#cooling-cells = <2>;
@@ -148,6 +160,8 @@
compatible = "qcom,kryo485";
reg = <0x0 0x400>;
enable-method = "psci";
+ capacity-dmips-mhz = <1024>;
+ dynamic-power-coefficient = <379>;
next-level-cache = <&L2_400>;
qcom,freq-domain = <&cpufreq_hw 1>;
#cooling-cells = <2>;
@@ -162,6 +176,8 @@
compatible = "qcom,kryo485";
reg = <0x0 0x500>;
enable-method = "psci";
+ capacity-dmips-mhz = <1024>;
+ dynamic-power-coefficient = <379>;
next-level-cache = <&L2_500>;
qcom,freq-domain = <&cpufreq_hw 1>;
#cooling-cells = <2>;
@@ -177,6 +193,8 @@
compatible = "qcom,kryo485";
reg = <0x0 0x600>;
enable-method = "psci";
+ capacity-dmips-mhz = <1024>;
+ dynamic-power-coefficient = <379>;
next-level-cache = <&L2_600>;
qcom,freq-domain = <&cpufreq_hw 1>;
#cooling-cells = <2>;
@@ -191,6 +209,8 @@
compatible = "qcom,kryo485";
reg = <0x0 0x700>;
enable-method = "psci";
+ capacity-dmips-mhz = <1024>;
+ dynamic-power-coefficient = <444>;
next-level-cache = <&L2_700>;
qcom,freq-domain = <&cpufreq_hw 2>;
#cooling-cells = <2>;
@@ -199,6 +219,42 @@
next-level-cache = <&L3_0>;
};
};
+
+ cpu-map {
+ cluster0 {
+ core0 {
+ cpu = <&CPU0>;
+ };
+
+ core1 {
+ cpu = <&CPU1>;
+ };
+
+ core2 {
+ cpu = <&CPU2>;
+ };
+
+ core3 {
+ cpu = <&CPU3>;
+ };
+
+ core4 {
+ cpu = <&CPU4>;
+ };
+
+ core5 {
+ cpu = <&CPU5>;
+ };
+
+ core6 {
+ cpu = <&CPU6>;
+ };
+
+ core7 {
+ cpu = <&CPU7>;
+ };
+ };
+ };
};
firmware {
@@ -214,6 +270,13 @@
reg = <0x0 0x80000000 0x0 0x0>;
};
+ mmcx_reg: mmcx-reg {
+ compatible = "regulator-fixed-domain";
+ power-domains = <&rpmhpd SM8250_MMCX>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ regulator-name = "MMCX";
+ };
+
pmu {
compatible = "arm,armv8-pmuv3";
interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_HIGH>;
@@ -321,7 +384,7 @@
};
};
- smem: qcom,smem {
+ smem {
compatible = "qcom,smem";
memory-region = <&smem_mem>;
hwlocks = <&tcsr_mutex 3>;
@@ -1168,6 +1231,299 @@
qcom,bcm-voters = <&apps_bcm_voter>;
};
+ pcie0: pci@1c00000 {
+ compatible = "qcom,pcie-sm8250", "snps,dw-pcie";
+ reg = <0 0x01c00000 0 0x3000>,
+ <0 0x60000000 0 0xf1d>,
+ <0 0x60000f20 0 0xa8>,
+ <0 0x60001000 0 0x1000>,
+ <0 0x60100000 0 0x100000>;
+ reg-names = "parf", "dbi", "elbi", "atu", "config";
+ device_type = "pci";
+ linux,pci-domain = <0>;
+ bus-range = <0x00 0xff>;
+ num-lanes = <1>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+
+ ranges = <0x01000000 0x0 0x60200000 0 0x60200000 0x0 0x100000>,
+ <0x02000000 0x0 0x60300000 0 0x60300000 0x0 0x3d00000>;
+
+ interrupts = <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "msi";
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0x7>;
+ interrupt-map = <0 0 0 1 &intc 0 149 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
+ <0 0 0 2 &intc 0 150 IRQ_TYPE_LEVEL_HIGH>, /* int_b */
+ <0 0 0 3 &intc 0 151 IRQ_TYPE_LEVEL_HIGH>, /* int_c */
+ <0 0 0 4 &intc 0 152 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
+
+ clocks = <&gcc GCC_PCIE_0_PIPE_CLK>,
+ <&gcc GCC_PCIE_0_AUX_CLK>,
+ <&gcc GCC_PCIE_0_CFG_AHB_CLK>,
+ <&gcc GCC_PCIE_0_MSTR_AXI_CLK>,
+ <&gcc GCC_PCIE_0_SLV_AXI_CLK>,
+ <&gcc GCC_PCIE_0_SLV_Q2A_AXI_CLK>,
+ <&gcc GCC_AGGRE_NOC_PCIE_TBU_CLK>,
+ <&gcc GCC_DDRSS_PCIE_SF_TBU_CLK>;
+ clock-names = "pipe",
+ "aux",
+ "cfg",
+ "bus_master",
+ "bus_slave",
+ "slave_q2a",
+ "tbu",
+ "ddrss_sf_tbu";
+
+ iommus = <&apps_smmu 0x1c00 0x7f>;
+ iommu-map = <0x0 &apps_smmu 0x1c00 0x1>,
+ <0x100 &apps_smmu 0x1c01 0x1>;
+
+ resets = <&gcc GCC_PCIE_0_BCR>;
+ reset-names = "pci";
+
+ power-domains = <&gcc PCIE_0_GDSC>;
+
+ phys = <&pcie0_lane>;
+ phy-names = "pciephy";
+
+ status = "disabled";
+ };
+
+ pcie0_phy: phy@1c06000 {
+ compatible = "qcom,sm8250-qmp-gen3x1-pcie-phy";
+ reg = <0 0x01c06000 0 0x1c0>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ clocks = <&gcc GCC_PCIE_PHY_AUX_CLK>,
+ <&gcc GCC_PCIE_0_CFG_AHB_CLK>,
+ <&gcc GCC_PCIE_WIFI_CLKREF_EN>,
+ <&gcc GCC_PCIE0_PHY_REFGEN_CLK>;
+ clock-names = "aux", "cfg_ahb", "ref", "refgen";
+
+ resets = <&gcc GCC_PCIE_0_PHY_BCR>;
+ reset-names = "phy";
+
+ assigned-clocks = <&gcc GCC_PCIE0_PHY_REFGEN_CLK>;
+ assigned-clock-rates = <100000000>;
+
+ status = "disabled";
+
+ pcie0_lane: lanes@1c06200 {
+ reg = <0 0x1c06200 0 0x170>, /* tx */
+ <0 0x1c06400 0 0x200>, /* rx */
+ <0 0x1c06800 0 0x1f0>, /* pcs */
+ <0 0x1c06c00 0 0xf4>; /* "pcs_lane" same as pcs_misc? */
+ clocks = <&gcc GCC_PCIE_0_PIPE_CLK>;
+ clock-names = "pipe0";
+
+ #phy-cells = <0>;
+ clock-output-names = "pcie_0_pipe_clk";
+ };
+ };
+
+ pcie1: pci@1c08000 {
+ compatible = "qcom,pcie-sm8250", "snps,dw-pcie";
+ reg = <0 0x01c08000 0 0x3000>,
+ <0 0x40000000 0 0xf1d>,
+ <0 0x40000f20 0 0xa8>,
+ <0 0x40001000 0 0x1000>,
+ <0 0x40100000 0 0x100000>;
+ reg-names = "parf", "dbi", "elbi", "atu", "config";
+ device_type = "pci";
+ linux,pci-domain = <1>;
+ bus-range = <0x00 0xff>;
+ num-lanes = <2>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+
+ ranges = <0x01000000 0x0 0x40200000 0x0 0x40200000 0x0 0x100000>,
+ <0x02000000 0x0 0x40300000 0x0 0x40300000 0x0 0x1fd00000>;
+
+ interrupts = <GIC_SPI 306 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "msi";
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0x7>;
+ interrupt-map = <0 0 0 1 &intc 0 434 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
+ <0 0 0 2 &intc 0 435 IRQ_TYPE_LEVEL_HIGH>, /* int_b */
+ <0 0 0 3 &intc 0 438 IRQ_TYPE_LEVEL_HIGH>, /* int_c */
+ <0 0 0 4 &intc 0 439 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
+
+ clocks = <&gcc GCC_PCIE_1_PIPE_CLK>,
+ <&gcc GCC_PCIE_1_AUX_CLK>,
+ <&gcc GCC_PCIE_1_CFG_AHB_CLK>,
+ <&gcc GCC_PCIE_1_MSTR_AXI_CLK>,
+ <&gcc GCC_PCIE_1_SLV_AXI_CLK>,
+ <&gcc GCC_PCIE_1_SLV_Q2A_AXI_CLK>,
+ <&gcc GCC_PCIE_WIGIG_CLKREF_EN>,
+ <&gcc GCC_AGGRE_NOC_PCIE_TBU_CLK>,
+ <&gcc GCC_DDRSS_PCIE_SF_TBU_CLK>;
+ clock-names = "pipe",
+ "aux",
+ "cfg",
+ "bus_master",
+ "bus_slave",
+ "slave_q2a",
+ "ref",
+ "tbu",
+ "ddrss_sf_tbu";
+
+ assigned-clocks = <&gcc GCC_PCIE_1_AUX_CLK>;
+ assigned-clock-rates = <19200000>;
+
+ iommus = <&apps_smmu 0x1c80 0x7f>;
+ iommu-map = <0x0 &apps_smmu 0x1c80 0x1>,
+ <0x100 &apps_smmu 0x1c81 0x1>;
+
+ resets = <&gcc GCC_PCIE_1_BCR>;
+ reset-names = "pci";
+
+ power-domains = <&gcc PCIE_1_GDSC>;
+
+ phys = <&pcie1_lane>;
+ phy-names = "pciephy";
+
+ status = "disabled";
+ };
+
+ pcie1_phy: phy@1c0e000 {
+ compatible = "qcom,sm8250-qmp-gen3x2-pcie-phy";
+ reg = <0 0x01c0e000 0 0x1c0>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ clocks = <&gcc GCC_PCIE_PHY_AUX_CLK>,
+ <&gcc GCC_PCIE_1_CFG_AHB_CLK>,
+ <&gcc GCC_PCIE_WIGIG_CLKREF_EN>,
+ <&gcc GCC_PCIE1_PHY_REFGEN_CLK>;
+ clock-names = "aux", "cfg_ahb", "ref", "refgen";
+
+ resets = <&gcc GCC_PCIE_1_PHY_BCR>;
+ reset-names = "phy";
+
+ assigned-clocks = <&gcc GCC_PCIE1_PHY_REFGEN_CLK>;
+ assigned-clock-rates = <100000000>;
+
+ status = "disabled";
+
+ pcie1_lane: lanes@1c0e200 {
+ reg = <0 0x1c0e200 0 0x170>, /* tx0 */
+ <0 0x1c0e400 0 0x200>, /* rx0 */
+ <0 0x1c0ea00 0 0x1f0>, /* pcs */
+ <0 0x1c0e600 0 0x170>, /* tx1 */
+ <0 0x1c0e800 0 0x200>, /* rx1 */
+ <0 0x1c0ee00 0 0xf4>; /* "pcs_com" same as pcs_misc? */
+ clocks = <&gcc GCC_PCIE_1_PIPE_CLK>;
+ clock-names = "pipe0";
+
+ #phy-cells = <0>;
+ clock-output-names = "pcie_1_pipe_clk";
+ };
+ };
+
+ pcie2: pci@1c10000 {
+ compatible = "qcom,pcie-sm8250", "snps,dw-pcie";
+ reg = <0 0x01c10000 0 0x3000>,
+ <0 0x64000000 0 0xf1d>,
+ <0 0x64000f20 0 0xa8>,
+ <0 0x64001000 0 0x1000>,
+ <0 0x64100000 0 0x100000>;
+ reg-names = "parf", "dbi", "elbi", "atu", "config";
+ device_type = "pci";
+ linux,pci-domain = <2>;
+ bus-range = <0x00 0xff>;
+ num-lanes = <2>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+
+ ranges = <0x01000000 0x0 0x64200000 0x0 0x64200000 0x0 0x100000>,
+ <0x02000000 0x0 0x64300000 0x0 0x64300000 0x0 0x3d00000>;
+
+ interrupts = <GIC_SPI 236 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "msi";
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0x7>;
+ interrupt-map = <0 0 0 1 &intc 0 290 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
+ <0 0 0 2 &intc 0 415 IRQ_TYPE_LEVEL_HIGH>, /* int_b */
+ <0 0 0 3 &intc 0 416 IRQ_TYPE_LEVEL_HIGH>, /* int_c */
+ <0 0 0 4 &intc 0 417 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
+
+ clocks = <&gcc GCC_PCIE_2_PIPE_CLK>,
+ <&gcc GCC_PCIE_2_AUX_CLK>,
+ <&gcc GCC_PCIE_2_CFG_AHB_CLK>,
+ <&gcc GCC_PCIE_2_MSTR_AXI_CLK>,
+ <&gcc GCC_PCIE_2_SLV_AXI_CLK>,
+ <&gcc GCC_PCIE_2_SLV_Q2A_AXI_CLK>,
+ <&gcc GCC_PCIE_MDM_CLKREF_EN>,
+ <&gcc GCC_AGGRE_NOC_PCIE_TBU_CLK>,
+ <&gcc GCC_DDRSS_PCIE_SF_TBU_CLK>;
+ clock-names = "pipe",
+ "aux",
+ "cfg",
+ "bus_master",
+ "bus_slave",
+ "slave_q2a",
+ "ref",
+ "tbu",
+ "ddrss_sf_tbu";
+
+ assigned-clocks = <&gcc GCC_PCIE_2_AUX_CLK>;
+ assigned-clock-rates = <19200000>;
+
+ iommus = <&apps_smmu 0x1d00 0x7f>;
+ iommu-map = <0x0 &apps_smmu 0x1d00 0x1>,
+ <0x100 &apps_smmu 0x1d01 0x1>;
+
+ resets = <&gcc GCC_PCIE_2_BCR>;
+ reset-names = "pci";
+
+ power-domains = <&gcc PCIE_2_GDSC>;
+
+ phys = <&pcie2_lane>;
+ phy-names = "pciephy";
+
+ status = "disabled";
+ };
+
+ pcie2_phy: phy@1c16000 {
+ compatible = "qcom,sm8250-qmp-modem-pcie-phy";
+ reg = <0 0x1c16000 0 0x1c0>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ clocks = <&gcc GCC_PCIE_PHY_AUX_CLK>,
+ <&gcc GCC_PCIE_2_CFG_AHB_CLK>,
+ <&gcc GCC_PCIE_MDM_CLKREF_EN>,
+ <&gcc GCC_PCIE2_PHY_REFGEN_CLK>;
+ clock-names = "aux", "cfg_ahb", "ref", "refgen";
+
+ resets = <&gcc GCC_PCIE_2_PHY_BCR>;
+ reset-names = "phy";
+
+ assigned-clocks = <&gcc GCC_PCIE2_PHY_REFGEN_CLK>;
+ assigned-clock-rates = <100000000>;
+
+ status = "disabled";
+
+ pcie2_lane: lanes@1c0e200 {
+ reg = <0 0x1c16200 0 0x170>, /* tx0 */
+ <0 0x1c16400 0 0x200>, /* rx0 */
+ <0 0x1c16a00 0 0x1f0>, /* pcs */
+ <0 0x1c16600 0 0x170>, /* tx1 */
+ <0 0x1c16800 0 0x200>, /* rx1 */
+ <0 0x1c16e00 0 0xf4>; /* "pcs_com" same as pcs_misc? */
+ clocks = <&gcc GCC_PCIE_2_PIPE_CLK>;
+ clock-names = "pipe0";
+
+ #phy-cells = <0>;
+ clock-output-names = "pcie_2_pipe_clk";
+ };
+ };
+
ufs_mem_hc: ufshc@1d84000 {
compatible = "qcom,sm8250-ufshc", "qcom,ufshc",
"jedec,ufs-2.0";
@@ -1253,15 +1609,169 @@
#hwlock-cells = <1>;
};
+ wsamacro: codec@3240000 {
+ compatible = "qcom,sm8250-lpass-wsa-macro";
+ reg = <0 0x03240000 0 0x1000>;
+ clocks = <&audiocc 1>,
+ <&audiocc 0>,
+ <&q6afecc LPASS_HW_MACRO_VOTE LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
+ <&q6afecc LPASS_HW_DCODEC_VOTE LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
+ <&aoncc 0>,
+ <&vamacro>;
+
+ clock-names = "mclk", "npl", "macro", "dcodec", "va", "fsgen";
+
+ #clock-cells = <0>;
+ clock-frequency = <9600000>;
+ clock-output-names = "mclk";
+ #sound-dai-cells = <1>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&wsa_swr_active>;
+ };
+
+ swr0: soundwire-controller@3250000 {
+ reg = <0 0x03250000 0 0x2000>;
+ compatible = "qcom,soundwire-v1.5.1";
+ interrupts = <GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&wsamacro>;
+ clock-names = "iface";
+
+ qcom,din-ports = <2>;
+ qcom,dout-ports = <6>;
+
+ qcom,ports-sinterval-low = /bits/ 8 <0x07 0x1f 0x3f 0x07 0x1f 0x3f 0x0f 0x0f>;
+ qcom,ports-offset1 = /bits/ 8 <0x01 0x02 0x0c 0x06 0x12 0x0d 0x07 0x0a>;
+ qcom,ports-offset2 = /bits/ 8 <0xff 0x00 0x1f 0xff 0x00 0x1f 0x00 0x00>;
+ qcom,ports-block-pack-mode = /bits/ 8 <0x0 0x0 0x1 0x0 0x0 0x1 0x0 0x0>;
+
+ #sound-dai-cells = <1>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+ };
+
+ audiocc: clock-controller@3300000 {
+ compatible = "qcom,sm8250-lpass-audiocc";
+ reg = <0 0x03300000 0 0x30000>;
+ #clock-cells = <1>;
+ clocks = <&q6afecc LPASS_HW_MACRO_VOTE LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
+ <&q6afecc LPASS_HW_DCODEC_VOTE LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
+ <&q6afecc LPASS_CLK_ID_TX_CORE_MCLK LPASS_CLK_ATTRIBUTE_COUPLE_NO>;
+ clock-names = "core", "audio", "bus";
+ };
+
+ vamacro: codec@3370000 {
+ compatible = "qcom,sm8250-lpass-va-macro";
+ reg = <0 0x03370000 0 0x1000>;
+ clocks = <&aoncc 0>,
+ <&q6afecc LPASS_HW_MACRO_VOTE LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
+ <&q6afecc LPASS_HW_DCODEC_VOTE LPASS_CLK_ATTRIBUTE_COUPLE_NO>;
+
+ clock-names = "mclk", "macro", "dcodec";
+
+ #clock-cells = <0>;
+ clock-frequency = <9600000>;
+ clock-output-names = "fsgen";
+ #sound-dai-cells = <1>;
+ };
+
+ aoncc: clock-controller@3380000 {
+ compatible = "qcom,sm8250-lpass-aoncc";
+ reg = <0 0x03380000 0 0x40000>;
+ #clock-cells = <1>;
+ clocks = <&q6afecc LPASS_HW_MACRO_VOTE LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
+ <&q6afecc LPASS_HW_DCODEC_VOTE LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
+ <&q6afecc LPASS_CLK_ID_TX_CORE_NPL_MCLK LPASS_CLK_ATTRIBUTE_COUPLE_NO>;
+ clock-names = "core", "audio", "bus";
+ };
+
+ lpass_tlmm: pinctrl@33c0000{
+ compatible = "qcom,sm8250-lpass-lpi-pinctrl";
+ reg = <0 0x033c0000 0x0 0x20000>,
+ <0 0x03550000 0x0 0x10000>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&lpass_tlmm 0 0 14>;
+
+ clocks = <&q6afecc LPASS_HW_MACRO_VOTE LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
+ <&q6afecc LPASS_HW_DCODEC_VOTE LPASS_CLK_ATTRIBUTE_COUPLE_NO>;
+ clock-names = "core", "audio";
+
+ wsa_swr_active: wsa-swr-active-pins {
+ clk {
+ pins = "gpio10";
+ function = "wsa_swr_clk";
+ drive-strength = <2>;
+ slew-rate = <1>;
+ bias-disable;
+ };
+
+ data {
+ pins = "gpio11";
+ function = "wsa_swr_data";
+ drive-strength = <2>;
+ slew-rate = <1>;
+ bias-bus-hold;
+
+ };
+ };
+
+ wsa_swr_sleep: wsa-swr-sleep-pins {
+ clk {
+ pins = "gpio10";
+ function = "wsa_swr_clk";
+ drive-strength = <2>;
+ input-enable;
+ bias-pull-down;
+ };
+
+ data {
+ pins = "gpio11";
+ function = "wsa_swr_data";
+ drive-strength = <2>;
+ input-enable;
+ bias-pull-down;
+
+ };
+ };
+
+ dmic01_active: dmic01-active-pins {
+ clk {
+ pins = "gpio6";
+ function = "dmic1_clk";
+ drive-strength = <8>;
+ output-high;
+ };
+ data {
+ pins = "gpio7";
+ function = "dmic1_data";
+ drive-strength = <8>;
+ input-enable;
+ };
+ };
+
+ dmic01_sleep: dmic01-sleep-pins {
+ clk {
+ pins = "gpio6";
+ function = "dmic1_clk";
+ drive-strength = <2>;
+ bias-disable;
+ output-low;
+ };
+
+ data {
+ pins = "gpio7";
+ function = "dmic1_data";
+ drive-strength = <2>;
+ pull-down;
+ input-enable;
+ };
+ };
+ };
+
gpu: gpu@3d00000 {
- /*
- * note: the amd,imageon compatible makes it possible
- * to use the drm/msm driver without the display node,
- * make sure to remove it when display node is added
- */
compatible = "qcom,adreno-650.2",
- "qcom,adreno",
- "amd,imageon";
+ "qcom,adreno";
#stream-id-cells = <16>;
reg = <0 0x03d00000 0 0x40000>;
@@ -1557,6 +2067,9 @@
};
};
+ sound: sound {
+ };
+
usb_1_hsphy: phy@88e3000 {
compatible = "qcom,sm8250-usb-hs-phy",
"qcom,usb-snps-hs-7nm-phy";
@@ -1657,7 +2170,7 @@
clocks = <&gcc GCC_SDCC2_AHB_CLK>,
<&gcc GCC_SDCC2_APPS_CLK>,
- <&xo_board>;
+ <&rpmhcc RPMH_CXO_CLK>;
clock-names = "iface", "core", "xo";
iommus = <&apps_smmu 0x4a0 0x0>;
qcom,dll-config = <0x0007642c>;
@@ -1758,6 +2271,12 @@
};
};
+ system-cache-controller@9200000 {
+ compatible = "qcom,sm8250-llcc";
+ reg = <0 0x09200000 0 0x1d0000>, <0 0x09600000 0 0x50000>;
+ reg-names = "llcc_base", "llcc_broadcast_base";
+ };
+
usb_2: usb@a8f8800 {
compatible = "qcom,sm8250-dwc3", "qcom,dwc3";
reg = <0 0x0a8f8800 0 0x400>;
@@ -1803,6 +2322,299 @@
};
};
+ mdss: mdss@ae00000 {
+ compatible = "qcom,sdm845-mdss";
+ reg = <0 0x0ae00000 0 0x1000>;
+ reg-names = "mdss";
+
+ interconnects = <&gem_noc MASTER_AMPSS_M0 &config_noc SLAVE_DISPLAY_CFG>,
+ <&mmss_noc MASTER_MDP_PORT0 &mc_virt SLAVE_EBI_CH0>,
+ <&mmss_noc MASTER_MDP_PORT1 &mc_virt SLAVE_EBI_CH0>;
+ interconnect-names = "notused", "mdp0-mem", "mdp1-mem";
+
+ power-domains = <&dispcc MDSS_GDSC>;
+
+ clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>,
+ <&gcc GCC_DISP_HF_AXI_CLK>,
+ <&gcc GCC_DISP_SF_AXI_CLK>,
+ <&dispcc DISP_CC_MDSS_MDP_CLK>;
+ clock-names = "iface", "bus", "nrt_bus", "core";
+
+ assigned-clocks = <&dispcc DISP_CC_MDSS_MDP_CLK>;
+ assigned-clock-rates = <460000000>;
+
+ interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ iommus = <&apps_smmu 0x820 0x402>;
+
+ status = "disabled";
+
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ mdss_mdp: mdp@ae01000 {
+ compatible = "qcom,sdm845-dpu";
+ reg = <0 0x0ae01000 0 0x8f000>,
+ <0 0x0aeb0000 0 0x2008>;
+ reg-names = "mdp", "vbif";
+
+ clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>,
+ <&gcc GCC_DISP_HF_AXI_CLK>,
+ <&dispcc DISP_CC_MDSS_MDP_CLK>,
+ <&dispcc DISP_CC_MDSS_VSYNC_CLK>;
+ clock-names = "iface", "bus", "core", "vsync";
+
+ assigned-clocks = <&dispcc DISP_CC_MDSS_MDP_CLK>,
+ <&dispcc DISP_CC_MDSS_VSYNC_CLK>;
+ assigned-clock-rates = <460000000>,
+ <19200000>;
+
+ operating-points-v2 = <&mdp_opp_table>;
+ power-domains = <&rpmhpd SM8250_MMCX>;
+
+ interrupt-parent = <&mdss>;
+ interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
+
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ dpu_intf1_out: endpoint {
+ remote-endpoint = <&dsi0_in>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ dpu_intf2_out: endpoint {
+ remote-endpoint = <&dsi1_in>;
+ };
+ };
+ };
+
+ mdp_opp_table: mdp-opp-table {
+ compatible = "operating-points-v2";
+
+ opp-200000000 {
+ opp-hz = /bits/ 64 <200000000>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ };
+
+ opp-300000000 {
+ opp-hz = /bits/ 64 <300000000>;
+ required-opps = <&rpmhpd_opp_svs>;
+ };
+
+ opp-345000000 {
+ opp-hz = /bits/ 64 <345000000>;
+ required-opps = <&rpmhpd_opp_svs_l1>;
+ };
+
+ opp-460000000 {
+ opp-hz = /bits/ 64 <460000000>;
+ required-opps = <&rpmhpd_opp_nom>;
+ };
+ };
+ };
+
+ dsi0: dsi@ae94000 {
+ compatible = "qcom,mdss-dsi-ctrl";
+ reg = <0 0x0ae94000 0 0x400>;
+ reg-names = "dsi_ctrl";
+
+ interrupt-parent = <&mdss>;
+ interrupts = <4 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&dispcc DISP_CC_MDSS_BYTE0_CLK>,
+ <&dispcc DISP_CC_MDSS_BYTE0_INTF_CLK>,
+ <&dispcc DISP_CC_MDSS_PCLK0_CLK>,
+ <&dispcc DISP_CC_MDSS_ESC0_CLK>,
+ <&dispcc DISP_CC_MDSS_AHB_CLK>,
+ <&gcc GCC_DISP_HF_AXI_CLK>;
+ clock-names = "byte",
+ "byte_intf",
+ "pixel",
+ "core",
+ "iface",
+ "bus";
+
+ operating-points-v2 = <&dsi_opp_table>;
+ power-domains = <&rpmhpd SM8250_MMCX>;
+
+ phys = <&dsi0_phy>;
+ phy-names = "dsi";
+
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ dsi0_in: endpoint {
+ remote-endpoint = <&dpu_intf1_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ dsi0_out: endpoint {
+ };
+ };
+ };
+ };
+
+ dsi0_phy: dsi-phy@ae94400 {
+ compatible = "qcom,dsi-phy-7nm";
+ reg = <0 0x0ae94400 0 0x200>,
+ <0 0x0ae94600 0 0x280>,
+ <0 0x0ae94900 0 0x260>;
+ reg-names = "dsi_phy",
+ "dsi_phy_lane",
+ "dsi_pll";
+
+ #clock-cells = <1>;
+ #phy-cells = <0>;
+
+ clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>,
+ <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "iface", "ref";
+
+ status = "disabled";
+ };
+
+ dsi1: dsi@ae96000 {
+ compatible = "qcom,mdss-dsi-ctrl";
+ reg = <0 0x0ae96000 0 0x400>;
+ reg-names = "dsi_ctrl";
+
+ interrupt-parent = <&mdss>;
+ interrupts = <5 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&dispcc DISP_CC_MDSS_BYTE1_CLK>,
+ <&dispcc DISP_CC_MDSS_BYTE1_INTF_CLK>,
+ <&dispcc DISP_CC_MDSS_PCLK1_CLK>,
+ <&dispcc DISP_CC_MDSS_ESC1_CLK>,
+ <&dispcc DISP_CC_MDSS_AHB_CLK>,
+ <&gcc GCC_DISP_HF_AXI_CLK>;
+ clock-names = "byte",
+ "byte_intf",
+ "pixel",
+ "core",
+ "iface",
+ "bus";
+
+ operating-points-v2 = <&dsi_opp_table>;
+ power-domains = <&rpmhpd SM8250_MMCX>;
+
+ phys = <&dsi1_phy>;
+ phy-names = "dsi";
+
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ dsi1_in: endpoint {
+ remote-endpoint = <&dpu_intf2_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ dsi1_out: endpoint {
+ };
+ };
+ };
+ };
+
+ dsi1_phy: dsi-phy@ae96400 {
+ compatible = "qcom,dsi-phy-7nm";
+ reg = <0 0x0ae96400 0 0x200>,
+ <0 0x0ae96600 0 0x280>,
+ <0 0x0ae96900 0 0x260>;
+ reg-names = "dsi_phy",
+ "dsi_phy_lane",
+ "dsi_pll";
+
+ #clock-cells = <1>;
+ #phy-cells = <0>;
+
+ clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>,
+ <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "iface", "ref";
+
+ status = "disabled";
+
+ dsi_opp_table: dsi-opp-table {
+ compatible = "operating-points-v2";
+
+ opp-187500000 {
+ opp-hz = /bits/ 64 <187500000>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ };
+
+ opp-300000000 {
+ opp-hz = /bits/ 64 <300000000>;
+ required-opps = <&rpmhpd_opp_svs>;
+ };
+
+ opp-358000000 {
+ opp-hz = /bits/ 64 <358000000>;
+ required-opps = <&rpmhpd_opp_svs_l1>;
+ };
+ };
+ };
+ };
+
+ dispcc: clock-controller@af00000 {
+ compatible = "qcom,sm8250-dispcc";
+ reg = <0 0x0af00000 0 0x20000>;
+ mmcx-supply = <&mmcx_reg>;
+ clocks = <&rpmhcc RPMH_CXO_CLK>,
+ <&dsi0_phy 0>,
+ <&dsi0_phy 1>,
+ <&dsi1_phy 0>,
+ <&dsi1_phy 1>,
+ <0>,
+ <0>,
+ <0>,
+ <0>,
+ <0>,
+ <0>,
+ <0>,
+ <0>,
+ <&sleep_clk>;
+ clock-names = "bi_tcxo",
+ "dsi0_phy_pll_out_byteclk",
+ "dsi0_phy_pll_out_dsiclk",
+ "dsi1_phy_pll_out_byteclk",
+ "dsi1_phy_pll_out_dsiclk",
+ "dp_link_clk_divsel_ten",
+ "dp_vco_divided_clk_src_mux",
+ "dptx1_phy_pll_link_clk",
+ "dptx1_phy_pll_vco_div_clk",
+ "dptx2_phy_pll_link_clk",
+ "dptx2_phy_pll_vco_div_clk",
+ "edp_phy_pll_link_clk",
+ "edp_phy_pll_vco_div_clk",
+ "sleep_clk";
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ };
+
pdc: interrupt-controller@b220000 {
compatible = "qcom,sm8250-pdc", "qcom,pdc";
reg = <0 0x0b220000 0 0x30000>, <0 0x17c000f0 0 0x60>;
@@ -1880,6 +2692,37 @@
gpio-ranges = <&tlmm 0 0 180>;
wakeup-parent = <&pdc>;
+ pri_mi2s_active: pri-mi2s-active {
+ sclk {
+ pins = "gpio138";
+ function = "mi2s0_sck";
+ drive-strength = <8>;
+ bias-disable;
+ };
+
+ ws {
+ pins = "gpio141";
+ function = "mi2s0_ws";
+ drive-strength = <8>;
+ output-high;
+ };
+
+ data0 {
+ pins = "gpio139";
+ function = "mi2s0_data0";
+ drive-strength = <8>;
+ bias-disable;
+ output-high;
+ };
+
+ data1 {
+ pins = "gpio140";
+ function = "mi2s0_data1";
+ drive-strength = <8>;
+ output-high;
+ };
+ };
+
qup_i2c0_default: qup-i2c0-default {
mux {
pins = "gpio28", "gpio29";
@@ -2476,6 +3319,30 @@
function = "qup18";
};
};
+
+ tert_mi2s_active: tert-mi2s-active {
+ sck {
+ pins = "gpio133";
+ function = "mi2s2_sck";
+ drive-strength = <8>;
+ bias-disable;
+ };
+
+ data0 {
+ pins = "gpio134";
+ function = "mi2s2_data0";
+ drive-strength = <8>;
+ bias-disable;
+ output-high;
+ };
+
+ ws {
+ pins = "gpio135";
+ function = "mi2s2_ws";
+ drive-strength = <8>;
+ output-high;
+ };
+ };
};
apps_smmu: iommu@15000000 {
@@ -2620,6 +3487,60 @@
label = "lpass";
qcom,remote-pid = <2>;
+ apr {
+ compatible = "qcom,apr-v2";
+ qcom,glink-channels = "apr_audio_svc";
+ qcom,apr-domain = <APR_DOMAIN_ADSP>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ apr-service@3 {
+ reg = <APR_SVC_ADSP_CORE>;
+ compatible = "qcom,q6core";
+ qcom,protection-domain = "avs/audio", "msm/adsp/audio_pd";
+ };
+
+ q6afe: apr-service@4 {
+ compatible = "qcom,q6afe";
+ reg = <APR_SVC_AFE>;
+ qcom,protection-domain = "avs/audio", "msm/adsp/audio_pd";
+ q6afedai: dais {
+ compatible = "qcom,q6afe-dais";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #sound-dai-cells = <1>;
+ };
+
+ q6afecc: cc {
+ compatible = "qcom,q6afe-clocks";
+ #clock-cells = <2>;
+ };
+ };
+
+ q6asm: apr-service@7 {
+ compatible = "qcom,q6asm";
+ reg = <APR_SVC_ASM>;
+ qcom,protection-domain = "avs/audio", "msm/adsp/audio_pd";
+ q6asmdai: dais {
+ compatible = "qcom,q6asm-dais";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #sound-dai-cells = <1>;
+ iommus = <&apps_smmu 0x1801 0x0>;
+ };
+ };
+
+ q6adm: apr-service@8 {
+ compatible = "qcom,q6adm";
+ reg = <APR_SVC_ADM>;
+ qcom,protection-domain = "avs/audio", "msm/adsp/audio_pd";
+ q6routing: routing {
+ compatible = "qcom,q6adm-routing";
+ #sound-dai-cells = <0>;
+ };
+ };
+ };
+
fastrpc {
compatible = "qcom,fastrpc";
qcom,glink-channels = "fastrpcglink-apps-dsp";
@@ -2661,6 +3582,7 @@
compatible = "qcom,apss-wdt-sm8250", "qcom,kpss-wdt";
reg = <0 0x17c10000 0 0x1000>;
clocks = <&sleep_clk>;
+ interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
};
timer@17c20000 {
diff --git a/arch/arm64/boot/dts/qcom/sm8350-mtp.dts b/arch/arm64/boot/dts/qcom/sm8350-mtp.dts
new file mode 100644
index 000000000000..8923657579fb
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sm8350-mtp.dts
@@ -0,0 +1,250 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2020, Linaro Limited
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+#include "sm8350.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. sm8350 MTP";
+ compatible = "qcom,sm8350-mtp", "qcom,sm8350";
+
+ aliases {
+ serial0 = &uart2;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ vph_pwr: vph-pwr-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vph_pwr";
+ regulator-min-microvolt = <3700000>;
+ regulator-max-microvolt = <3700000>;
+
+ regulator-always-on;
+ regulator-boot-on;
+ };
+};
+
+&apps_rsc {
+ pm8350-rpmh-regulators {
+ compatible = "qcom,pm8350-rpmh-regulators";
+ qcom,pmic-id = "b";
+
+ vdd-s1-supply = <&vph_pwr>;
+ vdd-s2-supply = <&vph_pwr>;
+ vdd-s3-supply = <&vph_pwr>;
+ vdd-s4-supply = <&vph_pwr>;
+ vdd-s5-supply = <&vph_pwr>;
+ vdd-s6-supply = <&vph_pwr>;
+ vdd-s7-supply = <&vph_pwr>;
+ vdd-s8-supply = <&vph_pwr>;
+ vdd-s9-supply = <&vph_pwr>;
+ vdd-s10-supply = <&vph_pwr>;
+ vdd-s11-supply = <&vph_pwr>;
+ vdd-s12-supply = <&vph_pwr>;
+
+ vdd-l1-l4-supply = <&vreg_s11b_0p95>;
+ vdd-l2-l7-supply = <&vreg_bob>;
+ vdd-l3-l5-supply = <&vreg_bob>;
+ vdd-l6-l9-l10-supply = <&vreg_s11b_0p95>;
+ vdd-l8-supply = <&vreg_s2c_0p8>;
+
+ vreg_s10b_1p8: smps10 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ vreg_s11b_0p95: smps11 {
+ regulator-min-microvolt = <752000>;
+ regulator-max-microvolt = <1000000>;
+ };
+
+ vreg_s12b_1p25: smps12 {
+ regulator-min-microvolt = <1224000>;
+ regulator-max-microvolt = <1360000>;
+ };
+
+ vreg_l1b_0p88: ldo1 {
+ regulator-min-microvolt = <912000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2b_3p07: ldo2 {
+ regulator-min-microvolt = <3072000>;
+ regulator-max-microvolt = <3072000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3b_0p9: ldo3 {
+ regulator-min-microvolt = <904000>;
+ regulator-max-microvolt = <904000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l5b_0p88: ldo5 {
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <888000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l6b_1p2: ldo6 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1208000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l7b_2p96: ldo7 {
+ regulator-min-microvolt = <2400000>;
+ regulator-max-microvolt = <3008000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l9b_1p2: ldo9 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ pm8350c-rpmh-regulators {
+ compatible = "qcom,pm8350c-rpmh-regulators";
+ qcom,pmic-id = "c";
+
+ vdd-s1-supply = <&vph_pwr>;
+ vdd-s2-supply = <&vph_pwr>;
+ vdd-s3-supply = <&vph_pwr>;
+ vdd-s4-supply = <&vph_pwr>;
+ vdd-s5-supply = <&vph_pwr>;
+ vdd-s6-supply = <&vph_pwr>;
+ vdd-s7-supply = <&vph_pwr>;
+ vdd-s8-supply = <&vph_pwr>;
+ vdd-s9-supply = <&vph_pwr>;
+ vdd-s10-supply = <&vph_pwr>;
+
+ vdd-l1-l12-supply = <&vreg_s1c_1p86>;
+ vdd-l2-l8-supply = <&vreg_s1c_1p86>;
+ vdd-l3-l4-l5-l7-l13-supply = <&vreg_bob>;
+ vdd-l6-l9-l11-supply = <&vreg_bob>;
+ vdd-l10-supply = <&vreg_s12b_1p25>;
+
+ vdd-bob-supply = <&vph_pwr>;
+
+ vreg_s1c_1p86: smps1 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1952000>;
+ };
+
+ vreg_s2c_0p8: smps2 {
+ regulator-min-microvolt = <640000>;
+ regulator-max-microvolt = <1000000>;
+ };
+
+ vreg_s10c_1p05: smps10 {
+ regulator-min-microvolt = <1048000>;
+ regulator-max-microvolt = <1128000>;
+ };
+
+ vreg_bob: bob {
+ regulator-min-microvolt = <3008000>;
+ regulator-max-microvolt = <3960000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_AUTO>;
+ };
+
+ vreg_l1c_1p8: ldo1 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2c_1p8: ldo2 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3c_3p0: ldo3 {
+ regulator-min-microvolt = <3008000>;
+ regulator-max-microvolt = <3008000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l4c_uim1: ldo4 {
+ regulator-min-microvolt = <1704000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l5c_uim2: ldo5 {
+ regulator-min-microvolt = <1704000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l6c_1p8: ldo6 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2960000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l7c_3p0: ldo7 {
+ regulator-min-microvolt = <3008000>;
+ regulator-max-microvolt = <3008000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l8c_1p8: ldo8 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l9c_2p96: ldo9 {
+ regulator-min-microvolt = <2960000>;
+ regulator-max-microvolt = <3008000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l10c_1p2: ldo10 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l11c_2p96: ldo11 {
+ regulator-min-microvolt = <2400000>;
+ regulator-max-microvolt = <3008000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l12c_1p8: ldo12 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2000000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l13c_3p0: ldo13 {
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+};
+
+&qupv3_id_1 {
+ status = "okay";
+};
+
+&tlmm {
+ gpio-reserved-ranges = <52 8>;
+};
+
+&uart2 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/qcom/sm8350.dtsi b/arch/arm64/boot/dts/qcom/sm8350.dtsi
new file mode 100644
index 000000000000..5ef460458f5c
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sm8350.dtsi
@@ -0,0 +1,499 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2020, Linaro Limaited
+ */
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/mailbox/qcom-ipcc.h>
+#include <dt-bindings/power/qcom-aoss-qmp.h>
+#include <dt-bindings/power/qcom-rpmpd.h>
+#include <dt-bindings/soc/qcom,rpmh-rsc.h>
+
+/ {
+ interrupt-parent = <&intc>;
+
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ chosen { };
+
+ clocks {
+ xo_board: xo-board {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <38400000>;
+ clock-output-names = "xo_board";
+ };
+
+ sleep_clk: sleep-clk {
+ compatible = "fixed-clock";
+ clock-frequency = <32000>;
+ #clock-cells = <0>;
+ };
+ };
+
+ cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ CPU0: cpu@0 {
+ device_type = "cpu";
+ compatible = "qcom,kryo685";
+ reg = <0x0 0x0>;
+ enable-method = "psci";
+ next-level-cache = <&L2_0>;
+ L2_0: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ L3_0: l3-cache {
+ compatible = "cache";
+ };
+ };
+ };
+
+ CPU1: cpu@100 {
+ device_type = "cpu";
+ compatible = "qcom,kryo685";
+ reg = <0x0 0x100>;
+ enable-method = "psci";
+ next-level-cache = <&L2_100>;
+ L2_100: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU2: cpu@200 {
+ device_type = "cpu";
+ compatible = "qcom,kryo685";
+ reg = <0x0 0x200>;
+ enable-method = "psci";
+ next-level-cache = <&L2_200>;
+ L2_200: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU3: cpu@300 {
+ device_type = "cpu";
+ compatible = "qcom,kryo685";
+ reg = <0x0 0x300>;
+ enable-method = "psci";
+ next-level-cache = <&L2_300>;
+ L2_300: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU4: cpu@400 {
+ device_type = "cpu";
+ compatible = "qcom,kryo685";
+ reg = <0x0 0x400>;
+ enable-method = "psci";
+ next-level-cache = <&L2_400>;
+ L2_400: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU5: cpu@500 {
+ device_type = "cpu";
+ compatible = "qcom,kryo685";
+ reg = <0x0 0x500>;
+ enable-method = "psci";
+ next-level-cache = <&L2_500>;
+ L2_500: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+
+ };
+
+ CPU6: cpu@600 {
+ device_type = "cpu";
+ compatible = "qcom,kryo685";
+ reg = <0x0 0x600>;
+ enable-method = "psci";
+ next-level-cache = <&L2_600>;
+ L2_600: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU7: cpu@700 {
+ device_type = "cpu";
+ compatible = "qcom,kryo685";
+ reg = <0x0 0x700>;
+ enable-method = "psci";
+ next-level-cache = <&L2_700>;
+ L2_700: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+ };
+
+ firmware {
+ scm: scm {
+ compatible = "qcom,scm-sm8350", "qcom,scm";
+ #reset-cells = <1>;
+ };
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ /* We expect the bootloader to fill in the size */
+ reg = <0x0 0x80000000 0x0 0x0>;
+ };
+
+ pmu {
+ compatible = "arm,armv8-pmuv3";
+ interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ psci {
+ compatible = "arm,psci-1.0";
+ method = "smc";
+ };
+
+ reserved_memory: reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ hyp_mem: memory@80000000 {
+ reg = <0x0 0x80000000 0x0 0x600000>;
+ no-map;
+ };
+
+ xbl_aop_mem: memory@80700000 {
+ no-map;
+ reg = <0x0 0x80700000 0x0 0x160000>;
+ };
+
+ cmd_db: memory@80860000 {
+ compatible = "qcom,cmd-db";
+ reg = <0x0 0x80860000 0x0 0x20000>;
+ no-map;
+ };
+
+ reserved_xbl_uefi_log: memory@80880000 {
+ reg = <0x0 0x80880000 0x0 0x14000>;
+ no-map;
+ };
+
+ smem_mem: memory@80900000 {
+ reg = <0x0 0x80900000 0x0 0x200000>;
+ no-map;
+ };
+
+ cpucp_fw_mem: memory@80b00000 {
+ reg = <0x0 0x80b00000 0x0 0x100000>;
+ no-map;
+ };
+
+ cdsp_secure_heap: memory@80c00000 {
+ reg = <0x0 0x80c00000 0x0 0x4600000>;
+ no-map;
+ };
+
+ pil_camera_mem: mmeory@85200000 {
+ reg = <0x0 0x85200000 0x0 0x500000>;
+ no-map;
+ };
+
+ pil_video_mem: memory@85700000 {
+ reg = <0x0 0x85700000 0x0 0x500000>;
+ no-map;
+ };
+
+ pil_cvp_mem: memory@85c00000 {
+ reg = <0x0 0x85c00000 0x0 0x500000>;
+ no-map;
+ };
+
+ pil_adsp_mem: memory@86100000 {
+ reg = <0x0 0x86100000 0x0 0x2100000>;
+ no-map;
+ };
+
+ pil_slpi_mem: memory@88200000 {
+ reg = <0x0 0x88200000 0x0 0x1500000>;
+ no-map;
+ };
+
+ pil_cdsp_mem: memory@89700000 {
+ reg = <0x0 0x89700000 0x0 0x1e00000>;
+ no-map;
+ };
+
+ pil_ipa_fw_mem: memory@8b500000 {
+ reg = <0x0 0x8b500000 0x0 0x10000>;
+ no-map;
+ };
+
+ pil_ipa_gsi_mem: memory@8b510000 {
+ reg = <0x0 0x8b510000 0x0 0xa000>;
+ no-map;
+ };
+
+ pil_gpu_mem: memory@8b51a000 {
+ reg = <0x0 0x8b51a000 0x0 0x2000>;
+ no-map;
+ };
+
+ pil_spss_mem: memory@8b600000 {
+ reg = <0x0 0x8b600000 0x0 0x100000>;
+ no-map;
+ };
+
+ pil_modem_mem: memory@8b800000 {
+ reg = <0x0 0x8b800000 0x0 0x10000000>;
+ no-map;
+ };
+
+ hyp_reserved_mem: memory@d0000000 {
+ reg = <0x0 0xd0000000 0x0 0x800000>;
+ no-map;
+ };
+
+ pil_trustedvm_mem: memory@d0800000 {
+ reg = <0x0 0xd0800000 0x0 0x76f7000>;
+ no-map;
+ };
+
+ qrtr_shbuf: memory@d7ef7000 {
+ reg = <0x0 0xd7ef7000 0x0 0x9000>;
+ no-map;
+ };
+
+ chan0_shbuf: memory@d7f00000 {
+ reg = <0x0 0xd7f00000 0x0 0x80000>;
+ no-map;
+ };
+
+ chan1_shbuf: memory@d7f80000 {
+ reg = <0x0 0xd7f80000 0x0 0x80000>;
+ no-map;
+ };
+
+ removed_mem: memory@d8800000 {
+ reg = <0x0 0xd8800000 0x0 0x6800000>;
+ no-map;
+ };
+ };
+
+ smem: qcom,smem {
+ compatible = "qcom,smem";
+ memory-region = <&smem_mem>;
+ hwlocks = <&tcsr_mutex 3>;
+ };
+
+ soc: soc@0 {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges = <0 0 0 0 0x10 0>;
+ dma-ranges = <0 0 0 0 0x10 0>;
+ compatible = "simple-bus";
+
+ gcc: clock-controller@100000 {
+ compatible = "qcom,gcc-sm8350";
+ reg = <0x0 0x00100000 0x0 0x1f0000>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ clock-names = "bi_tcxo", "sleep_clk";
+ clocks = <&rpmhcc RPMH_CXO_CLK>, <&sleep_clk>;
+ };
+
+ ipcc: mailbox@408000 {
+ compatible = "qcom,sm8350-ipcc", "qcom,ipcc";
+ reg = <0 0x00408000 0 0x1000>;
+ interrupts = <GIC_SPI 229 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ #mbox-cells = <2>;
+ };
+
+ qupv3_id_1: geniqup@9c0000 {
+ compatible = "qcom,geni-se-qup";
+ reg = <0x0 0x009c0000 0x0 0x6000>;
+ clock-names = "m-ahb", "s-ahb";
+ clocks = <&gcc 121>,
+ <&gcc 122>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ status = "disabled";
+
+ uart2: serial@98c000 {
+ compatible = "qcom,geni-debug-uart";
+ reg = <0 0x0098c000 0 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc 83>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_uart3_default_state>;
+ interrupts = <GIC_SPI 604 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+ };
+
+ tcsr_mutex: hwlock@1f40000 {
+ compatible = "qcom,tcsr-mutex";
+ reg = <0x0 0x01f40000 0x0 0x40000>;
+ #hwlock-cells = <1>;
+ };
+
+ pdc: interrupt-controller@b220000 {
+ compatible = "qcom,sm8350-pdc", "qcom,pdc";
+ reg = <0 0x0b220000 0 0x30000>, <0 0x17c000f0 0 0x60>;
+ qcom,pdc-ranges = <0 480 40>, <40 140 14>, <54 263 1>, <55 306 4>,
+ <59 312 3>, <62 374 2>, <64 434 2>, <66 438 3>,
+ <69 86 1>, <70 520 54>, <124 609 31>, <155 63 1>,
+ <156 716 12>;
+ #interrupt-cells = <2>;
+ interrupt-parent = <&intc>;
+ interrupt-controller;
+ };
+
+ aoss_qmp: qmp@c300000 {
+ compatible = "qcom,sm8350-aoss-qmp";
+ reg = <0 0x0c300000 0 0x100000>;
+ interrupts-extended = <&ipcc IPCC_CLIENT_AOP IPCC_MPROC_SIGNAL_GLINK_QMP
+ IRQ_TYPE_EDGE_RISING>;
+ mboxes = <&ipcc IPCC_CLIENT_AOP IPCC_MPROC_SIGNAL_GLINK_QMP>;
+
+ #clock-cells = <0>;
+ #power-domain-cells = <1>;
+ };
+
+ tlmm: pinctrl@f100000 {
+ compatible = "qcom,sm8350-tlmm";
+ reg = <0 0x0f100000 0 0x300000>;
+ interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ gpio-ranges = <&tlmm 0 0 203>;
+
+ qup_uart3_default_state: qup-uart3-default-state {
+ rx {
+ pins = "gpio18";
+ function = "qup3";
+ };
+ tx {
+ pins = "gpio19";
+ function = "qup3";
+ };
+ };
+ };
+
+ intc: interrupt-controller@17a00000 {
+ compatible = "arm,gic-v3";
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ reg = <0x0 0x17a00000 0x0 0x10000>, /* GICD */
+ <0x0 0x17a60000 0x0 0x100000>; /* GICR * 8 */
+ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ timer@17c20000 {
+ compatible = "arm,armv7-timer-mem";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ reg = <0x0 0x17c20000 0x0 0x1000>;
+ clock-frequency = <19200000>;
+
+ frame@17c21000 {
+ frame-number = <0>;
+ interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x0 0x17c21000 0x0 0x1000>,
+ <0x0 0x17c22000 0x0 0x1000>;
+ };
+
+ frame@17c23000 {
+ frame-number = <1>;
+ interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x0 0x17c23000 0x0 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17c25000 {
+ frame-number = <2>;
+ interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x0 0x17c25000 0x0 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17c27000 {
+ frame-number = <3>;
+ interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x0 0x17c27000 0x0 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17c29000 {
+ frame-number = <4>;
+ interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x0 0x17c29000 0x0 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17c2b000 {
+ frame-number = <5>;
+ interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x0 0x17c2b000 0x0 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17c2d000 {
+ frame-number = <6>;
+ interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x0 0x17c2d000 0x0 0x1000>;
+ status = "disabled";
+ };
+ };
+
+ apps_rsc: rsc@18200000 {
+ label = "apps_rsc";
+ compatible = "qcom,rpmh-rsc";
+ reg = <0x0 0x18200000 0x0 0x10000>,
+ <0x0 0x18210000 0x0 0x10000>,
+ <0x0 0x18220000 0x0 0x10000>;
+ reg-names = "drv-0", "drv-1", "drv-2";
+ interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
+ qcom,tcs-offset = <0xd00>;
+ qcom,drv-id = <2>;
+ qcom,tcs-config = <ACTIVE_TCS 2>, <SLEEP_TCS 3>,
+ <WAKE_TCS 3>, <CONTROL_TCS 1>;
+
+ rpmhcc: clock-controller {
+ compatible = "qcom,sm8350-rpmh-clk";
+ #clock-cells = <1>;
+ clock-names = "xo";
+ clocks = <&xo_board>;
+ };
+
+ };
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>;
+ };
+};
diff --git a/arch/arm64/boot/dts/renesas/Makefile b/arch/arm64/boot/dts/renesas/Makefile
index 3b8b03705917..f2de2fa0c8b8 100644
--- a/arch/arm64/boot/dts/renesas/Makefile
+++ b/arch/arm64/boot/dts/renesas/Makefile
@@ -8,6 +8,7 @@ dtb-$(CONFIG_ARCH_R8A774A1) += r8a774a1-hihope-rzg2m-rev2.dtb
dtb-$(CONFIG_ARCH_R8A774A1) += r8a774a1-hihope-rzg2m-rev2-ex.dtb
dtb-$(CONFIG_ARCH_R8A774A1) += r8a774a1-hihope-rzg2m-rev2-ex-idk-1110wr.dtb
+dtb-$(CONFIG_ARCH_R8A774B1) += r8a774b1-beacon-rzg2n-kit.dtb
dtb-$(CONFIG_ARCH_R8A774B1) += r8a774b1-hihope-rzg2n.dtb
dtb-$(CONFIG_ARCH_R8A774B1) += r8a774b1-hihope-rzg2n-ex.dtb
dtb-$(CONFIG_ARCH_R8A774B1) += r8a774b1-hihope-rzg2n-ex-idk-1110wr.dtb
@@ -21,6 +22,7 @@ dtb-$(CONFIG_ARCH_R8A774C0) += r8a774c0-ek874.dtb
dtb-$(CONFIG_ARCH_R8A774C0) += r8a774c0-ek874-idk-2121wr.dtb
dtb-$(CONFIG_ARCH_R8A774C0) += r8a774c0-ek874-mipi-2.1.dtb
+dtb-$(CONFIG_ARCH_R8A774E1) += r8a774e1-beacon-rzg2h-kit.dtb
dtb-$(CONFIG_ARCH_R8A774E1) += r8a774e1-hihope-rzg2h.dtb
dtb-$(CONFIG_ARCH_R8A774E1) += r8a774e1-hihope-rzg2h-ex.dtb
dtb-$(CONFIG_ARCH_R8A774E1) += r8a774e1-hihope-rzg2h-ex-idk-1110wr.dtb
diff --git a/arch/arm64/boot/dts/renesas/beacon-renesom-baseboard.dtsi b/arch/arm64/boot/dts/renesas/beacon-renesom-baseboard.dtsi
index e66b5b36e489..30c169b08536 100644
--- a/arch/arm64/boot/dts/renesas/beacon-renesom-baseboard.dtsi
+++ b/arch/arm64/boot/dts/renesas/beacon-renesom-baseboard.dtsi
@@ -5,23 +5,24 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/input/input.h>
+#include <dt-bindings/clk/versaclock.h>
/ {
backlight_lvds: backlight-lvds {
compatible = "pwm-backlight";
power-supply = <&reg_lcd>;
- enable-gpios = <&gpio_exp1 3 GPIO_ACTIVE_LOW>;
- pwms = <&pwm2 0 50000>;
+ enable-gpios = <&gpio_exp1 3 GPIO_ACTIVE_HIGH>;
+ pwms = <&pwm2 0 25000>;
brightness-levels = <0 4 8 16 32 64 128 255>;
default-brightness-level = <6>;
};
- backlight_rgb: backlight-rgb {
+ backlight_dpi: backlight-dpi {
compatible = "pwm-backlight";
power-supply = <&reg_lcd>;
enable-gpios = <&gpio_exp1 7 GPIO_ACTIVE_LOW>;
- pwms = <&pwm0 0 50000>;
- brightness-levels = <0 4 8 16 32 64 128 255>;
+ pwms = <&pwm0 0 25000>;
+ brightness-levels = <0 25 33 50 63 75 88 100>;
default-brightness-level = <6>;
};
@@ -39,38 +40,38 @@
keys {
compatible = "gpio-keys";
- key-1 {
+ key-1 { /* S19 */
gpios = <&gpio4 6 GPIO_ACTIVE_LOW>;
- linux,code = <KEY_1>;
- label = "Switch-1";
+ linux,code = <KEY_UP>;
+ label = "Up";
wakeup-source;
debounce-interval = <20>;
};
- key-2 {
+ key-2 { /*S20 */
gpios = <&gpio3 13 GPIO_ACTIVE_LOW>;
- linux,code = <KEY_2>;
- label = "Switch-2";
+ linux,code = <KEY_LEFT>;
+ label = "Left";
wakeup-source;
debounce-interval = <20>;
};
- key-3 {
+ key-3 { /* S21 */
gpios = <&gpio5 17 GPIO_ACTIVE_LOW>;
- linux,code = <KEY_3>;
- label = "Switch-3";
+ linux,code = <KEY_DOWN>;
+ label = "Down";
wakeup-source;
debounce-interval = <20>;
};
- key-4 {
+ key-4 { /* S22 */
gpios = <&gpio5 20 GPIO_ACTIVE_LOW>;
- linux,code = <KEY_4>;
- label = "Switch-4";
+ linux,code = <KEY_RIGHT>;
+ label = "Right";
wakeup-source;
debounce-interval = <20>;
};
- key-5 {
+ key-5 { /* S23 */
gpios = <&gpio5 22 GPIO_ACTIVE_LOW>;
- linux,code = <KEY_5>;
- label = "Switch-4";
+ linux,code = <KEY_ENTER>;
+ label = "Center";
wakeup-source;
debounce-interval = <20>;
};
@@ -118,9 +119,9 @@
hback-porch = <40>;
vfront-porch = <13>;
vback-porch = <29>;
- vsync-len = <3>;
+ vsync-len = <1>;
hsync-active = <1>;
- vsync-active = <1>;
+ vsync-active = <3>;
de-active = <1>;
pixelclk-active = <0>;
};
@@ -135,7 +136,7 @@
rgb {
/* Different LCD with compatible timings */
compatible = "rocktech,rk070er9427";
- backlight = <&backlight_rgb>;
+ backlight = <&backlight_dpi>;
enable-gpios = <&gpio1 21 GPIO_ACTIVE_HIGH>;
power-supply = <&reg_lcd>;
port {
@@ -150,7 +151,7 @@
regulator-name = "audio-1.8V";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
- gpio = <&gpio_exp2 7 GPIO_ACTIVE_HIGH>;
+ gpio = <&gpio_exp4 1 GPIO_ACTIVE_HIGH>;
enable-active-high;
};
@@ -200,15 +201,12 @@
vccq_sdhi0: regulator-vccq-sdhi0 {
compatible = "regulator-gpio";
-
regulator-name = "SDHI0 VccQ";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <3300000>;
-
gpios = <&gpio6 30 GPIO_ACTIVE_HIGH>;
gpios-states = <1>;
states = <3300000 1>, <1800000 0>;
- regulator-always-on;
};
/* External DU dot clocks */
@@ -248,12 +246,6 @@
};
};
-&audio_clk_a {
- clock-frequency = <24576000>;
- assigned-clocks = <&versaclock6_bb 4>;
- assigned-clock-rates = <24576000>;
-};
-
&audio_clk_b {
clock-frequency = <22579200>;
};
@@ -272,21 +264,6 @@
status = "okay";
};
-&du {
- pinctrl-0 = <&du_pins>;
- pinctrl-names = "default";
- status = "okay";
-
- clocks = <&cpg CPG_MOD 724>,
- <&cpg CPG_MOD 723>,
- <&cpg CPG_MOD 722>,
- <&versaclock5 1>,
- <&x302_clk>,
- <&versaclock5 2>;
- clock-names = "du.0", "du.1", "du.2",
- "dclkin.0", "dclkin.1", "dclkin.2";
-};
-
&du_out_rgb {
remote-endpoint = <&rgb_panel>;
};
@@ -342,7 +319,7 @@
&i2c2 {
status = "okay";
- clock-frequency = <100000>;
+ clock-frequency = <400000>;
pinctrl-0 = <&i2c2_pins>;
pinctrl-names = "default";
@@ -373,12 +350,36 @@
#clock-cells = <1>;
clocks = <&x304_clk>;
clock-names = "xin";
- /* CSI0_MCLK, CSI1_MCLK, AUDIO_CLKIN, USB_HUB_MCLK_BB */
+
assigned-clocks = <&versaclock6_bb 1>,
<&versaclock6_bb 2>,
<&versaclock6_bb 3>,
<&versaclock6_bb 4>;
assigned-clock-rates = <24000000>, <24000000>, <24000000>, <24576000>;
+
+ OUT1 {
+ idt,mode = <VC5_CMOS>;
+ idt,voltage-microvolt = <1800000>;
+ idt,slew-percent = <100>;
+ };
+
+ OUT2 {
+ idt,mode = <VC5_CMOS>;
+ idt,voltage-microvolt = <1800000>;
+ idt,slew-percent = <100>;
+ };
+
+ OUT3 {
+ idt,mode = <VC5_CMOS>;
+ idt,voltage-microvolt = <3300000>;
+ idt,slew-percent = <100>;
+ };
+
+ OUT4 {
+ idt,mode = <VC5_CMOS>;
+ idt,voltage-microvolt = <3300000>;
+ idt,slew-percent = <100>;
+ };
};
};
@@ -392,13 +393,14 @@
&i2c5 {
status = "okay";
- clock-frequency = <100000>;
+ clock-frequency = <400000>;
pinctrl-0 = <&i2c5_pins>;
pinctrl-names = "default";
codec: wm8962@1a {
compatible = "wlf,wm8962";
reg = <0x1a>;
+ clocks = <&versaclock6_bb 3>;
DCVDD-supply = <&reg_audio>;
DBVDD-supply = <&reg_audio>;
AVDD-supply = <&reg_audio>;
@@ -481,6 +483,13 @@
};
};
+&msiof1 {
+ pinctrl-0 = <&msiof1_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+ cs-gpios = <&gpio3 10 GPIO_ACTIVE_LOW>;
+};
+
&ohci0 {
dr_mode = "otg";
status = "okay";
@@ -534,6 +543,11 @@
bias-pull-down;
};
+ msiof1_pins: msiof1 {
+ groups = "msiof1_clk_g", "msiof1_rxd_g", "msiof1_txd_g";
+ function = "msiof1";
+ };
+
pwm0_pins: pwm0 {
groups = "pwm0";
function = "pwm0";
@@ -541,7 +555,7 @@
pwm2_pins: pwm2 {
groups = "pwm2_a";
- function = "pwm2_a";
+ function = "pwm2";
};
sdhi0_pins: sd0 {
@@ -562,7 +576,7 @@
};
sound_clk_pins: sound_clk {
- groups = "audio_clk_a_a";
+ groups = "audio_clk_a_a", "audio_clk_b_a";
function = "audio_clk";
};
@@ -613,23 +627,6 @@
status = "okay";
- clocks = <&cpg CPG_MOD 1005>,
- <&cpg CPG_MOD 1006>, <&cpg CPG_MOD 1007>,
- <&cpg CPG_MOD 1008>, <&cpg CPG_MOD 1009>,
- <&cpg CPG_MOD 1010>, <&cpg CPG_MOD 1011>,
- <&cpg CPG_MOD 1012>, <&cpg CPG_MOD 1013>,
- <&cpg CPG_MOD 1014>, <&cpg CPG_MOD 1015>,
- <&cpg CPG_MOD 1022>, <&cpg CPG_MOD 1023>,
- <&cpg CPG_MOD 1024>, <&cpg CPG_MOD 1025>,
- <&cpg CPG_MOD 1026>, <&cpg CPG_MOD 1027>,
- <&cpg CPG_MOD 1028>, <&cpg CPG_MOD 1029>,
- <&cpg CPG_MOD 1030>, <&cpg CPG_MOD 1031>,
- <&cpg CPG_MOD 1020>, <&cpg CPG_MOD 1021>,
- <&cpg CPG_MOD 1020>, <&cpg CPG_MOD 1021>,
- <&cpg CPG_MOD 1019>, <&cpg CPG_MOD 1018>,
- <&audio_clk_a>, <&audio_clk_b>, <&audio_clk_c>,
- <&cpg CPG_CORE R8A774A1_CLK_S0D4>;
-
ports {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/renesas/beacon-renesom-som.dtsi b/arch/arm64/boot/dts/renesas/beacon-renesom-som.dtsi
index 8ac167aa18f0..8d3a4d6ee885 100644
--- a/arch/arm64/boot/dts/renesas/beacon-renesom-som.dtsi
+++ b/arch/arm64/boot/dts/renesas/beacon-renesom-som.dtsi
@@ -4,6 +4,7 @@
*/
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/clk/versaclock.h>
/ {
memory@48000000 {
@@ -12,11 +13,6 @@
reg = <0x0 0x48000000 0x0 0x78000000>;
};
- memory@600000000 {
- device_type = "memory";
- reg = <0x6 0x00000000 0x0 0x80000000>;
- };
-
osc_32k: osc_32k {
compatible = "fixed-clock";
#clock-cells = <0>;
@@ -89,7 +85,6 @@
pinctrl-names = "default";
uart-has-rtscts;
status = "okay";
- max-speed = <4000000>;
bluetooth {
compatible = "brcm,bcm43438-bt";
@@ -98,6 +93,7 @@
device-wakeup-gpios = <&pca9654 5 GPIO_ACTIVE_HIGH>;
clocks = <&osc_32k>;
clock-names = "extclk";
+ max-speed = <4000000>;
};
};
@@ -109,7 +105,7 @@
&i2c4 {
status = "okay";
- clock-frequency = <400000>;
+ clock-frequency = <100000>;
pca9654: gpio@20 {
compatible = "onnn,pca9654";
@@ -148,7 +144,7 @@
};
eeprom@50 {
- compatible = "microchip,at24c64", "atmel,24c64";
+ compatible = "microchip,24c64", "atmel,24c64";
pagesize = <32>;
read-only; /* Manufacturing EEPROM programmed at factory */
reg = <0x50>;
@@ -170,7 +166,32 @@
<&versaclock5 2>,
<&versaclock5 3>,
<&versaclock5 4>;
+
assigned-clock-rates = <33333333>, <33333333>, <50000000>, <125000000>;
+
+ OUT1 {
+ idt,mode = <VC5_CMOS>;
+ idt,voltage-microvolt = <1800000>;
+ idt,slew-percent = <100>;
+ };
+
+ OUT2 {
+ idt,mode = <VC5_CMOS>;
+ idt,voltage-microvolt = <1800000>;
+ idt,slew-percent = <100>;
+ };
+
+ OUT3 {
+ idt,mode = <VC5_CMOS>;
+ idt,voltage-microvolt = <1800000>;
+ idt,slew-percent = <100>;
+ };
+
+ OUT4 {
+ idt,mode = <VC5_CMOS>;
+ idt,voltage-microvolt = <3300000>;
+ idt,slew-percent = <100>;
+ };
};
};
@@ -291,6 +312,8 @@
vqmmc-supply = <&reg_1p8v>;
bus-width = <8>;
mmc-hs200-1_8v;
+ no-sd;
+ no-sdio;
non-removable;
fixed-emmc-driver-type = <1>;
status = "okay";
@@ -303,11 +326,3 @@
&usb3s0_clk {
clock-frequency = <100000000>;
};
-
-&vspb {
- status = "okay";
-};
-
-&vspi0 {
- status = "okay";
-};
diff --git a/arch/arm64/boot/dts/renesas/hihope-common.dtsi b/arch/arm64/boot/dts/renesas/hihope-common.dtsi
index 2eda9f66ae81..7a3da9b06f67 100644
--- a/arch/arm64/boot/dts/renesas/hihope-common.dtsi
+++ b/arch/arm64/boot/dts/renesas/hihope-common.dtsi
@@ -328,6 +328,8 @@
vqmmc-supply = <&reg_1p8v>;
bus-width = <8>;
mmc-hs200-1_8v;
+ no-sd;
+ no-sdio;
non-removable;
fixed-emmc-driver-type = <1>;
status = "okay";
diff --git a/arch/arm64/boot/dts/renesas/r8a774a1-beacon-rzg2m-kit.dts b/arch/arm64/boot/dts/renesas/r8a774a1-beacon-rzg2m-kit.dts
index 2c5b057c30c6..501cb05da228 100644
--- a/arch/arm64/boot/dts/renesas/r8a774a1-beacon-rzg2m-kit.dts
+++ b/arch/arm64/boot/dts/renesas/r8a774a1-beacon-rzg2m-kit.dts
@@ -26,4 +26,45 @@
chosen {
stdout-path = "serial0:115200n8";
};
+
+ memory@600000000 {
+ device_type = "memory";
+ reg = <0x6 0x00000000 0x0 0x80000000>;
+ };
+};
+
+&du {
+ pinctrl-0 = <&du_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+
+ clocks = <&cpg CPG_MOD 724>,
+ <&cpg CPG_MOD 723>,
+ <&cpg CPG_MOD 722>,
+ <&versaclock5 1>,
+ <&x302_clk>,
+ <&versaclock5 2>;
+ clock-names = "du.0", "du.1", "du.2",
+ "dclkin.0", "dclkin.1", "dclkin.2";
+};
+
+/* Reference versaclock instead of audio_clk_a */
+&rcar_sound {
+ clocks = <&cpg CPG_MOD 1005>,
+ <&cpg CPG_MOD 1006>, <&cpg CPG_MOD 1007>,
+ <&cpg CPG_MOD 1008>, <&cpg CPG_MOD 1009>,
+ <&cpg CPG_MOD 1010>, <&cpg CPG_MOD 1011>,
+ <&cpg CPG_MOD 1012>, <&cpg CPG_MOD 1013>,
+ <&cpg CPG_MOD 1014>, <&cpg CPG_MOD 1015>,
+ <&cpg CPG_MOD 1022>, <&cpg CPG_MOD 1023>,
+ <&cpg CPG_MOD 1024>, <&cpg CPG_MOD 1025>,
+ <&cpg CPG_MOD 1026>, <&cpg CPG_MOD 1027>,
+ <&cpg CPG_MOD 1028>, <&cpg CPG_MOD 1029>,
+ <&cpg CPG_MOD 1030>, <&cpg CPG_MOD 1031>,
+ <&cpg CPG_MOD 1020>, <&cpg CPG_MOD 1021>,
+ <&cpg CPG_MOD 1020>, <&cpg CPG_MOD 1021>,
+ <&cpg CPG_MOD 1019>, <&cpg CPG_MOD 1018>,
+ <&versaclock6_bb 4>, <&audio_clk_b>,
+ <&audio_clk_c>,
+ <&cpg CPG_CORE R8A774A1_CLK_S0D4>;
};
diff --git a/arch/arm64/boot/dts/renesas/r8a774a1.dtsi b/arch/arm64/boot/dts/renesas/r8a774a1.dtsi
index d37ec42a1caa..d64fb8b1b86c 100644
--- a/arch/arm64/boot/dts/renesas/r8a774a1.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a774a1.dtsi
@@ -835,6 +835,21 @@
status = "disabled";
};
+ usb2_clksel: clock-controller@e6590630 {
+ compatible = "renesas,r8a774a1-rcar-usb2-clock-sel",
+ "renesas,rcar-gen3-usb2-clock-sel";
+ reg = <0 0xe6590630 0 0x02>;
+ clocks = <&cpg CPG_MOD 703>, <&cpg CPG_MOD 704>,
+ <&usb_extal_clk>, <&usb3s0_clk>;
+ clock-names = "ehci_ohci", "hs-usb-if",
+ "usb_extal", "usb_xtal";
+ #clock-cells = <0>;
+ power-domains = <&sysc R8A774A1_PD_ALWAYS_ON>;
+ resets = <&cpg 703>, <&cpg 704>;
+ reset-names = "ehci_ohci", "hs-usb-if";
+ status = "disabled";
+ };
+
usb_dmac0: dma-controller@e65a0000 {
compatible = "renesas,r8a774a1-usb-dmac",
"renesas,usb-dmac";
@@ -2302,6 +2317,23 @@
status = "disabled";
};
+ rpc: spi@ee200000 {
+ compatible = "renesas,r8a774a1-rpc-if",
+ "renesas,rcar-gen3-rpc-if";
+ reg = <0 0xee200000 0 0x200>,
+ <0 0x08000000 0 0x4000000>,
+ <0 0xee208000 0 0x100>;
+ reg-names = "regs", "dirmap", "wbuf";
+ interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 917>;
+ clock-names = "rpc";
+ power-domains = <&sysc R8A774A1_PD_ALWAYS_ON>;
+ resets = <&cpg 917>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
gic: interrupt-controller@f1010000 {
compatible = "arm,gic-400";
#interrupt-cells = <3>;
diff --git a/arch/arm64/boot/dts/renesas/r8a774b1-beacon-rzg2n-kit.dts b/arch/arm64/boot/dts/renesas/r8a774b1-beacon-rzg2n-kit.dts
new file mode 100644
index 000000000000..71763f4402a7
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/r8a774b1-beacon-rzg2n-kit.dts
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2020, Compass Electronics Group, LLC
+ */
+
+/dts-v1/;
+
+#include "r8a774b1.dtsi"
+#include "beacon-renesom-som.dtsi"
+#include "beacon-renesom-baseboard.dtsi"
+
+/ {
+ model = "Beacon Embedded Works RZ/G2N Development Kit";
+ compatible = "beacon,beacon-rzg2n", "renesas,r8a774b1";
+
+ aliases {
+ serial0 = &scif2;
+ serial1 = &hscif0;
+ serial2 = &hscif1;
+ serial3 = &scif0;
+ serial4 = &hscif2;
+ serial5 = &scif5;
+ serial6 = &scif4;
+ ethernet0 = &avb;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+};
+
+&du {
+ pinctrl-0 = <&du_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+
+ clocks = <&cpg CPG_MOD 724>,
+ <&cpg CPG_MOD 723>,
+ <&cpg CPG_MOD 721>,
+ <&versaclock5 1>,
+ <&x302_clk>,
+ <&versaclock5 2>;
+ clock-names = "du.0", "du.1", "du.3",
+ "dclkin.0", "dclkin.1", "dclkin.3";
+};
+
+/* Reference versaclock instead of audio_clk_a */
+&rcar_sound {
+ clocks = <&cpg CPG_MOD 1005>,
+ <&cpg CPG_MOD 1006>, <&cpg CPG_MOD 1007>,
+ <&cpg CPG_MOD 1008>, <&cpg CPG_MOD 1009>,
+ <&cpg CPG_MOD 1010>, <&cpg CPG_MOD 1011>,
+ <&cpg CPG_MOD 1012>, <&cpg CPG_MOD 1013>,
+ <&cpg CPG_MOD 1014>, <&cpg CPG_MOD 1015>,
+ <&cpg CPG_MOD 1022>, <&cpg CPG_MOD 1023>,
+ <&cpg CPG_MOD 1024>, <&cpg CPG_MOD 1025>,
+ <&cpg CPG_MOD 1026>, <&cpg CPG_MOD 1027>,
+ <&cpg CPG_MOD 1028>, <&cpg CPG_MOD 1029>,
+ <&cpg CPG_MOD 1030>, <&cpg CPG_MOD 1031>,
+ <&cpg CPG_MOD 1020>, <&cpg CPG_MOD 1021>,
+ <&cpg CPG_MOD 1020>, <&cpg CPG_MOD 1021>,
+ <&cpg CPG_MOD 1019>, <&cpg CPG_MOD 1018>,
+ <&versaclock6_bb 4>, <&audio_clk_b>,
+ <&audio_clk_c>,
+ <&cpg CPG_CORE R8A774B1_CLK_S0D4>;
+};
diff --git a/arch/arm64/boot/dts/renesas/r8a774b1.dtsi b/arch/arm64/boot/dts/renesas/r8a774b1.dtsi
index 83523916d360..5b05474dc272 100644
--- a/arch/arm64/boot/dts/renesas/r8a774b1.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a774b1.dtsi
@@ -709,6 +709,21 @@
status = "disabled";
};
+ usb2_clksel: clock-controller@e6590630 {
+ compatible = "renesas,r8a774b1-rcar-usb2-clock-sel",
+ "renesas,rcar-gen3-usb2-clock-sel";
+ reg = <0 0xe6590630 0 0x02>;
+ clocks = <&cpg CPG_MOD 703>, <&cpg CPG_MOD 704>,
+ <&usb_extal_clk>, <&usb3s0_clk>;
+ clock-names = "ehci_ohci", "hs-usb-if",
+ "usb_extal", "usb_xtal";
+ #clock-cells = <0>;
+ power-domains = <&sysc R8A774B1_PD_ALWAYS_ON>;
+ resets = <&cpg 703>, <&cpg 704>;
+ reset-names = "ehci_ohci", "hs-usb-if";
+ status = "disabled";
+ };
+
usb_dmac0: dma-controller@e65a0000 {
compatible = "renesas,r8a774b1-usb-dmac",
"renesas,usb-dmac";
@@ -2160,6 +2175,23 @@
status = "disabled";
};
+ rpc: spi@ee200000 {
+ compatible = "renesas,r8a774b1-rpc-if",
+ "renesas,rcar-gen3-rpc-if";
+ reg = <0 0xee200000 0 0x200>,
+ <0 0x08000000 0 0x4000000>,
+ <0 0xee208000 0 0x100>;
+ reg-names = "regs", "dirmap", "wbuf";
+ interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 917>;
+ clock-names = "rpc";
+ power-domains = <&sysc R8A774B1_PD_ALWAYS_ON>;
+ resets = <&cpg 917>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
sata: sata@ee300000 {
compatible = "renesas,sata-r8a774b1",
"renesas,rcar-gen3-sata";
diff --git a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
index e0e54342cd4c..20fa3caa050e 100644
--- a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
@@ -1654,6 +1654,23 @@
status = "disabled";
};
+ rpc: spi@ee200000 {
+ compatible = "renesas,r8a774c0-rpc-if",
+ "renesas,rcar-gen3-rpc-if";
+ reg = <0 0xee200000 0 0x200>,
+ <0 0x08000000 0 0x4000000>,
+ <0 0xee208000 0 0x100>;
+ reg-names = "regs", "dirmap", "wbuf";
+ interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 917>;
+ clock-names = "rpc";
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 917>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
gic: interrupt-controller@f1010000 {
compatible = "arm,gic-400";
#interrupt-cells = <3>;
diff --git a/arch/arm64/boot/dts/renesas/r8a774e1-beacon-rzg2h-kit.dts b/arch/arm64/boot/dts/renesas/r8a774e1-beacon-rzg2h-kit.dts
new file mode 100644
index 000000000000..273f062f2909
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/r8a774e1-beacon-rzg2h-kit.dts
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2020, Compass Electronics Group, LLC
+ */
+
+/dts-v1/;
+
+#include "r8a774e1.dtsi"
+#include "beacon-renesom-som.dtsi"
+#include "beacon-renesom-baseboard.dtsi"
+
+/ {
+ model = "Beacon Embedded Works RZ/G2H Development Kit";
+ compatible = "beacon,beacon-rzg2h", "renesas,r8a774e1";
+
+ aliases {
+ serial0 = &scif2;
+ serial1 = &hscif0;
+ serial2 = &hscif1;
+ serial3 = &scif0;
+ serial4 = &hscif2;
+ serial5 = &scif5;
+ serial6 = &scif4;
+ ethernet0 = &avb;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory@500000000 {
+ device_type = "memory";
+ reg = <0x5 0x00000000 0x0 0x80000000>;
+ };
+};
+
+&du {
+ pinctrl-0 = <&du_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+
+ clocks = <&cpg CPG_MOD 724>,
+ <&cpg CPG_MOD 723>,
+ <&cpg CPG_MOD 721>,
+ <&versaclock5 1>,
+ <&x302_clk>,
+ <&versaclock5 2>;
+ clock-names = "du.0", "du.1", "du.3",
+ "dclkin.0", "dclkin.1", "dclkin.3";
+};
+
+/* Reference versaclock instead of audio_clk_a */
+&rcar_sound {
+ clocks = <&cpg CPG_MOD 1005>,
+ <&cpg CPG_MOD 1006>, <&cpg CPG_MOD 1007>,
+ <&cpg CPG_MOD 1008>, <&cpg CPG_MOD 1009>,
+ <&cpg CPG_MOD 1010>, <&cpg CPG_MOD 1011>,
+ <&cpg CPG_MOD 1012>, <&cpg CPG_MOD 1013>,
+ <&cpg CPG_MOD 1014>, <&cpg CPG_MOD 1015>,
+ <&cpg CPG_MOD 1022>, <&cpg CPG_MOD 1023>,
+ <&cpg CPG_MOD 1024>, <&cpg CPG_MOD 1025>,
+ <&cpg CPG_MOD 1026>, <&cpg CPG_MOD 1027>,
+ <&cpg CPG_MOD 1028>, <&cpg CPG_MOD 1029>,
+ <&cpg CPG_MOD 1030>, <&cpg CPG_MOD 1031>,
+ <&cpg CPG_MOD 1020>, <&cpg CPG_MOD 1021>,
+ <&cpg CPG_MOD 1020>, <&cpg CPG_MOD 1021>,
+ <&cpg CPG_MOD 1019>, <&cpg CPG_MOD 1018>,
+ <&versaclock6_bb 4>, <&audio_clk_b>,
+ <&audio_clk_c>,
+ <&cpg CPG_CORE R8A774E1_CLK_S0D4>;
+};
diff --git a/arch/arm64/boot/dts/renesas/r8a774e1.dtsi b/arch/arm64/boot/dts/renesas/r8a774e1.dtsi
index 1333b02d623a..8eb006cbd9af 100644
--- a/arch/arm64/boot/dts/renesas/r8a774e1.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a774e1.dtsi
@@ -890,6 +890,21 @@
status = "disabled";
};
+ usb2_clksel: clock-controller@e6590630 {
+ compatible = "renesas,r8a774e1-rcar-usb2-clock-sel",
+ "renesas,rcar-gen3-usb2-clock-sel";
+ reg = <0 0xe6590630 0 0x02>;
+ clocks = <&cpg CPG_MOD 703>, <&cpg CPG_MOD 704>,
+ <&usb_extal_clk>, <&usb3s0_clk>;
+ clock-names = "ehci_ohci", "hs-usb-if",
+ "usb_extal", "usb_xtal";
+ #clock-cells = <0>;
+ power-domains = <&sysc R8A774E1_PD_ALWAYS_ON>;
+ resets = <&cpg 703>, <&cpg 704>;
+ reset-names = "ehci_ohci", "hs-usb-if";
+ status = "disabled";
+ };
+
usb_dmac0: dma-controller@e65a0000 {
compatible = "renesas,r8a774e1-usb-dmac",
"renesas,usb-dmac";
@@ -2393,6 +2408,23 @@
status = "disabled";
};
+ rpc: spi@ee200000 {
+ compatible = "renesas,r8a774e1-rpc-if",
+ "renesas,rcar-gen3-rpc-if";
+ reg = <0 0xee200000 0 0x200>,
+ <0 0x08000000 0 0x4000000>,
+ <0 0xee208000 0 0x100>;
+ reg-names = "regs", "dirmap", "wbuf";
+ interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 917>;
+ clock-names = "rpc";
+ power-domains = <&sysc R8A774E1_PD_ALWAYS_ON>;
+ resets = <&cpg 917>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
sata: sata@ee300000 {
compatible = "renesas,sata-r8a774e1",
"renesas,rcar-gen3-sata";
diff --git a/arch/arm64/boot/dts/renesas/r8a77951.dtsi b/arch/arm64/boot/dts/renesas/r8a77951.dtsi
index 9d60bcf69e4f..5c39152e4570 100644
--- a/arch/arm64/boot/dts/renesas/r8a77951.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77951.dtsi
@@ -616,6 +616,71 @@
resets = <&cpg 407>;
};
+ tmu0: timer@e61e0000 {
+ compatible = "renesas,tmu-r8a7795", "renesas,tmu";
+ reg = <0 0xe61e0000 0 0x30>;
+ interrupts = <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 125>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 125>;
+ status = "disabled";
+ };
+
+ tmu1: timer@e6fc0000 {
+ compatible = "renesas,tmu-r8a7795", "renesas,tmu";
+ reg = <0 0xe6fc0000 0 0x30>;
+ interrupts = <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 124>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 124>;
+ status = "disabled";
+ };
+
+ tmu2: timer@e6fd0000 {
+ compatible = "renesas,tmu-r8a7795", "renesas,tmu";
+ reg = <0 0xe6fd0000 0 0x30>;
+ interrupts = <GIC_SPI 303 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 304 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 305 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 123>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 123>;
+ status = "disabled";
+ };
+
+ tmu3: timer@e6fe0000 {
+ compatible = "renesas,tmu-r8a7795", "renesas,tmu";
+ reg = <0 0xe6fe0000 0 0x30>;
+ interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 132 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 122>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 122>;
+ status = "disabled";
+ };
+
+ tmu4: timer@ffc00000 {
+ compatible = "renesas,tmu-r8a7795", "renesas,tmu";
+ reg = <0 0xffc00000 0 0x30>;
+ interrupts = <GIC_SPI 406 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 407 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 408 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 121>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 121>;
+ status = "disabled";
+ };
+
i2c0: i2c@e6500000 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/renesas/r8a77960.dtsi b/arch/arm64/boot/dts/renesas/r8a77960.dtsi
index 53b9aa26c9b1..25d947a81b29 100644
--- a/arch/arm64/boot/dts/renesas/r8a77960.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77960.dtsi
@@ -585,6 +585,71 @@
resets = <&cpg 407>;
};
+ tmu0: timer@e61e0000 {
+ compatible = "renesas,tmu-r8a7796", "renesas,tmu";
+ reg = <0 0xe61e0000 0 0x30>;
+ interrupts = <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 125>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+ resets = <&cpg 125>;
+ status = "disabled";
+ };
+
+ tmu1: timer@e6fc0000 {
+ compatible = "renesas,tmu-r8a7796", "renesas,tmu";
+ reg = <0 0xe6fc0000 0 0x30>;
+ interrupts = <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 124>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+ resets = <&cpg 124>;
+ status = "disabled";
+ };
+
+ tmu2: timer@e6fd0000 {
+ compatible = "renesas,tmu-r8a7796", "renesas,tmu";
+ reg = <0 0xe6fd0000 0 0x30>;
+ interrupts = <GIC_SPI 303 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 304 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 305 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 123>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+ resets = <&cpg 123>;
+ status = "disabled";
+ };
+
+ tmu3: timer@e6fe0000 {
+ compatible = "renesas,tmu-r8a7796", "renesas,tmu";
+ reg = <0 0xe6fe0000 0 0x30>;
+ interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 132 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 122>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+ resets = <&cpg 122>;
+ status = "disabled";
+ };
+
+ tmu4: timer@ffc00000 {
+ compatible = "renesas,tmu-r8a7796", "renesas,tmu";
+ reg = <0 0xffc00000 0 0x30>;
+ interrupts = <GIC_SPI 406 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 407 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 408 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 121>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+ resets = <&cpg 121>;
+ status = "disabled";
+ };
+
i2c0: i2c@e6500000 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/renesas/r8a77961.dtsi b/arch/arm64/boot/dts/renesas/r8a77961.dtsi
index 4b737c616257..e8c31ebec097 100644
--- a/arch/arm64/boot/dts/renesas/r8a77961.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77961.dtsi
@@ -453,6 +453,76 @@
reg = <0 0xe6060000 0 0x50c>;
};
+ cmt0: timer@e60f0000 {
+ compatible = "renesas,r8a77961-cmt0",
+ "renesas,rcar-gen3-cmt0";
+ reg = <0 0xe60f0000 0 0x1004>;
+ interrupts = <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 303>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+ resets = <&cpg 303>;
+ status = "disabled";
+ };
+
+ cmt1: timer@e6130000 {
+ compatible = "renesas,r8a77961-cmt1",
+ "renesas,rcar-gen3-cmt1";
+ reg = <0 0xe6130000 0 0x1004>;
+ interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 302>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+ resets = <&cpg 302>;
+ status = "disabled";
+ };
+
+ cmt2: timer@e6140000 {
+ compatible = "renesas,r8a77961-cmt1",
+ "renesas,rcar-gen3-cmt1";
+ reg = <0 0xe6140000 0 0x1004>;
+ interrupts = <GIC_SPI 398 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 399 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 400 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 401 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 402 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 403 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 404 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 405 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 301>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+ resets = <&cpg 301>;
+ status = "disabled";
+ };
+
+ cmt3: timer@e6148000 {
+ compatible = "renesas,r8a77961-cmt1",
+ "renesas,rcar-gen3-cmt1";
+ reg = <0 0xe6148000 0 0x1004>;
+ interrupts = <GIC_SPI 470 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 471 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 472 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 473 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 474 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 475 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 476 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 477 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 300>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+ resets = <&cpg 300>;
+ status = "disabled";
+ };
+
cpg: clock-controller@e6150000 {
compatible = "renesas,r8a77961-cpg-mssr";
reg = <0 0xe6150000 0 0x1000>;
@@ -495,6 +565,71 @@
/* placeholder */
};
+ tmu0: timer@e61e0000 {
+ compatible = "renesas,tmu-r8a77961", "renesas,tmu";
+ reg = <0 0xe61e0000 0 0x30>;
+ interrupts = <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 125>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+ resets = <&cpg 125>;
+ status = "disabled";
+ };
+
+ tmu1: timer@e6fc0000 {
+ compatible = "renesas,tmu-r8a77961", "renesas,tmu";
+ reg = <0 0xe6fc0000 0 0x30>;
+ interrupts = <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 124>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+ resets = <&cpg 124>;
+ status = "disabled";
+ };
+
+ tmu2: timer@e6fd0000 {
+ compatible = "renesas,tmu-r8a77961", "renesas,tmu";
+ reg = <0 0xe6fd0000 0 0x30>;
+ interrupts = <GIC_SPI 303 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 304 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 305 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 123>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+ resets = <&cpg 123>;
+ status = "disabled";
+ };
+
+ tmu3: timer@e6fe0000 {
+ compatible = "renesas,tmu-r8a77961", "renesas,tmu";
+ reg = <0 0xe6fe0000 0 0x30>;
+ interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 132 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 122>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+ resets = <&cpg 122>;
+ status = "disabled";
+ };
+
+ tmu4: timer@ffc00000 {
+ compatible = "renesas,tmu-r8a77961", "renesas,tmu";
+ reg = <0 0xffc00000 0 0x30>;
+ interrupts = <GIC_SPI 406 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 407 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 408 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 121>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+ resets = <&cpg 121>;
+ status = "disabled";
+ };
+
i2c0: i2c@e6500000 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/renesas/r8a77965.dtsi b/arch/arm64/boot/dts/renesas/r8a77965.dtsi
index 4a913df17b1d..657b20d3533b 100644
--- a/arch/arm64/boot/dts/renesas/r8a77965.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77965.dtsi
@@ -455,6 +455,71 @@
resets = <&cpg 407>;
};
+ tmu0: timer@e61e0000 {
+ compatible = "renesas,tmu-r8a77965", "renesas,tmu";
+ reg = <0 0xe61e0000 0 0x30>;
+ interrupts = <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 125>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A77965_PD_ALWAYS_ON>;
+ resets = <&cpg 125>;
+ status = "disabled";
+ };
+
+ tmu1: timer@e6fc0000 {
+ compatible = "renesas,tmu-r8a77965", "renesas,tmu";
+ reg = <0 0xe6fc0000 0 0x30>;
+ interrupts = <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 124>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A77965_PD_ALWAYS_ON>;
+ resets = <&cpg 124>;
+ status = "disabled";
+ };
+
+ tmu2: timer@e6fd0000 {
+ compatible = "renesas,tmu-r8a77965", "renesas,tmu";
+ reg = <0 0xe6fd0000 0 0x30>;
+ interrupts = <GIC_SPI 303 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 304 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 305 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 123>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A77965_PD_ALWAYS_ON>;
+ resets = <&cpg 123>;
+ status = "disabled";
+ };
+
+ tmu3: timer@e6fe0000 {
+ compatible = "renesas,tmu-r8a77965", "renesas,tmu";
+ reg = <0 0xe6fe0000 0 0x30>;
+ interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 132 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 122>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A77965_PD_ALWAYS_ON>;
+ resets = <&cpg 122>;
+ status = "disabled";
+ };
+
+ tmu4: timer@ffc00000 {
+ compatible = "renesas,tmu-r8a77965", "renesas,tmu";
+ reg = <0 0xffc00000 0 0x30>;
+ interrupts = <GIC_SPI 406 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 407 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 408 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 121>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A77965_PD_ALWAYS_ON>;
+ resets = <&cpg 121>;
+ status = "disabled";
+ };
+
i2c0: i2c@e6500000 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/renesas/r8a77980-condor.dts b/arch/arm64/boot/dts/renesas/r8a77980-condor.dts
index 422ec53740cb..04d47c0c9bb9 100644
--- a/arch/arm64/boot/dts/renesas/r8a77980-condor.dts
+++ b/arch/arm64/boot/dts/renesas/r8a77980-condor.dts
@@ -217,6 +217,8 @@
vqmmc-supply = <&vddq_vin01>;
mmc-hs200-1_8v;
bus-width = <8>;
+ no-sd;
+ no-sdio;
non-removable;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts b/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts
index e0ccca2222d2..f74f8b9993f1 100644
--- a/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts
+++ b/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts
@@ -712,6 +712,8 @@
mmc-hs200-1_8v;
mmc-hs400-1_8v;
bus-width = <8>;
+ no-sd;
+ no-sdio;
non-removable;
full-pwr-cycle-in-suspend;
status = "okay";
diff --git a/arch/arm64/boot/dts/renesas/r8a77990.dtsi b/arch/arm64/boot/dts/renesas/r8a77990.dtsi
index 87d41bc076a9..5010f23fafcc 100644
--- a/arch/arm64/boot/dts/renesas/r8a77990.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77990.dtsi
@@ -420,6 +420,71 @@
resets = <&cpg 407>;
};
+ tmu0: timer@e61e0000 {
+ compatible = "renesas,tmu-r8a77990", "renesas,tmu";
+ reg = <0 0xe61e0000 0 0x30>;
+ interrupts = <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 125>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A77990_PD_ALWAYS_ON>;
+ resets = <&cpg 125>;
+ status = "disabled";
+ };
+
+ tmu1: timer@e6fc0000 {
+ compatible = "renesas,tmu-r8a77990", "renesas,tmu";
+ reg = <0 0xe6fc0000 0 0x30>;
+ interrupts = <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 124>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A77990_PD_ALWAYS_ON>;
+ resets = <&cpg 124>;
+ status = "disabled";
+ };
+
+ tmu2: timer@e6fd0000 {
+ compatible = "renesas,tmu-r8a77990", "renesas,tmu";
+ reg = <0 0xe6fd0000 0 0x30>;
+ interrupts = <GIC_SPI 303 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 304 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 305 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 123>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A77990_PD_ALWAYS_ON>;
+ resets = <&cpg 123>;
+ status = "disabled";
+ };
+
+ tmu3: timer@e6fe0000 {
+ compatible = "renesas,tmu-r8a77990", "renesas,tmu";
+ reg = <0 0xe6fe0000 0 0x30>;
+ interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 132 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 122>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A77990_PD_ALWAYS_ON>;
+ resets = <&cpg 122>;
+ status = "disabled";
+ };
+
+ tmu4: timer@ffc00000 {
+ compatible = "renesas,tmu-r8a77990", "renesas,tmu";
+ reg = <0 0xffc00000 0 0x30>;
+ interrupts = <GIC_SPI 406 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 407 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 408 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 121>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A77990_PD_ALWAYS_ON>;
+ resets = <&cpg 121>;
+ status = "disabled";
+ };
+
i2c0: i2c@e6500000 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/renesas/r8a77995-draak.dts b/arch/arm64/boot/dts/renesas/r8a77995-draak.dts
index 8f471881b7a3..6783c3ad0856 100644
--- a/arch/arm64/boot/dts/renesas/r8a77995-draak.dts
+++ b/arch/arm64/boot/dts/renesas/r8a77995-draak.dts
@@ -496,6 +496,8 @@
vqmmc-supply = <&reg_1p8v>;
bus-width = <8>;
mmc-hs200-1_8v;
+ no-sd;
+ no-sdio;
non-removable;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/renesas/r8a77995.dtsi b/arch/arm64/boot/dts/renesas/r8a77995.dtsi
index e1af7c4782f4..2319271c881b 100644
--- a/arch/arm64/boot/dts/renesas/r8a77995.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77995.dtsi
@@ -193,6 +193,76 @@
reg = <0 0xe6060000 0 0x508>;
};
+ cmt0: timer@e60f0000 {
+ compatible = "renesas,r8a77995-cmt0",
+ "renesas,rcar-gen3-cmt0";
+ reg = <0 0xe60f0000 0 0x1004>;
+ interrupts = <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 303>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A77995_PD_ALWAYS_ON>;
+ resets = <&cpg 303>;
+ status = "disabled";
+ };
+
+ cmt1: timer@e6130000 {
+ compatible = "renesas,r8a77995-cmt1",
+ "renesas,rcar-gen3-cmt1";
+ reg = <0 0xe6130000 0 0x1004>;
+ interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 302>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A77995_PD_ALWAYS_ON>;
+ resets = <&cpg 302>;
+ status = "disabled";
+ };
+
+ cmt2: timer@e6140000 {
+ compatible = "renesas,r8a77995-cmt1",
+ "renesas,rcar-gen3-cmt1";
+ reg = <0 0xe6140000 0 0x1004>;
+ interrupts = <GIC_SPI 398 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 399 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 400 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 401 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 402 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 403 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 404 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 405 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 301>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A77995_PD_ALWAYS_ON>;
+ resets = <&cpg 301>;
+ status = "disabled";
+ };
+
+ cmt3: timer@e6148000 {
+ compatible = "renesas,r8a77995-cmt1",
+ "renesas,rcar-gen3-cmt1";
+ reg = <0 0xe6148000 0 0x1004>;
+ interrupts = <GIC_SPI 470 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 471 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 472 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 473 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 474 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 475 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 476 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 477 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 300>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A77995_PD_ALWAYS_ON>;
+ resets = <&cpg 300>;
+ status = "disabled";
+ };
+
cpg: clock-controller@e6150000 {
compatible = "renesas,r8a77995-cpg-mssr";
reg = <0 0xe6150000 0 0x1000>;
@@ -242,6 +312,71 @@
resets = <&cpg 407>;
};
+ tmu0: timer@e61e0000 {
+ compatible = "renesas,tmu-r8a77995", "renesas,tmu";
+ reg = <0 0xe61e0000 0 0x30>;
+ interrupts = <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 125>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A77995_PD_ALWAYS_ON>;
+ resets = <&cpg 125>;
+ status = "disabled";
+ };
+
+ tmu1: timer@e6fc0000 {
+ compatible = "renesas,tmu-r8a77995", "renesas,tmu";
+ reg = <0 0xe6fc0000 0 0x30>;
+ interrupts = <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 124>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A77995_PD_ALWAYS_ON>;
+ resets = <&cpg 124>;
+ status = "disabled";
+ };
+
+ tmu2: timer@e6fd0000 {
+ compatible = "renesas,tmu-r8a77995", "renesas,tmu";
+ reg = <0 0xe6fd0000 0 0x30>;
+ interrupts = <GIC_SPI 303 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 304 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 305 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 123>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A77995_PD_ALWAYS_ON>;
+ resets = <&cpg 123>;
+ status = "disabled";
+ };
+
+ tmu3: timer@e6fe0000 {
+ compatible = "renesas,tmu-r8a77995", "renesas,tmu";
+ reg = <0 0xe6fe0000 0 0x30>;
+ interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 132 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 122>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A77995_PD_ALWAYS_ON>;
+ resets = <&cpg 122>;
+ status = "disabled";
+ };
+
+ tmu4: timer@ffc00000 {
+ compatible = "renesas,tmu-r8a77995", "renesas,tmu";
+ reg = <0 0xffc00000 0 0x30>;
+ interrupts = <GIC_SPI 406 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 407 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 408 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 121>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A77995_PD_ALWAYS_ON>;
+ resets = <&cpg 121>;
+ status = "disabled";
+ };
+
i2c0: i2c@e6500000 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/renesas/r8a779a0-falcon-cpu.dtsi b/arch/arm64/boot/dts/renesas/r8a779a0-falcon-cpu.dtsi
index 4ba269a4cec8..fa284a7260d6 100644
--- a/arch/arm64/boot/dts/renesas/r8a779a0-falcon-cpu.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a779a0-falcon-cpu.dtsi
@@ -5,6 +5,7 @@
* Copyright (C) 2020 Renesas Electronics Corp.
*/
+#include <dt-bindings/gpio/gpio.h>
#include "r8a779a0.dtsi"
/ {
@@ -31,6 +32,40 @@
device_type = "memory";
reg = <0x7 0x00000000 0x0 0x80000000>;
};
+
+ reg_1p8v: regulator-1p8v {
+ compatible = "regulator-fixed";
+ regulator-name = "fixed-1.8V";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ reg_3p3v: regulator-3p3v {
+ compatible = "regulator-fixed";
+ regulator-name = "fixed-3.3V";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+};
+
+&avb0 {
+ pinctrl-0 = <&avb0_pins>;
+ pinctrl-names = "default";
+ phy-handle = <&phy0>;
+ tx-internal-delay-ps = <2000>;
+ status = "okay";
+
+ phy0: ethernet-phy@0 {
+ rxc-skew-ps = <1500>;
+ reg = <0>;
+ interrupt-parent = <&gpio4>;
+ interrupts = <16 IRQ_TYPE_LEVEL_LOW>;
+ reset-gpios = <&gpio4 15 GPIO_ACTIVE_LOW>;
+ };
};
&extal_clk {
@@ -41,6 +76,109 @@
clock-frequency = <32768>;
};
+&i2c0 {
+ pinctrl-0 = <&i2c0_pins>;
+ pinctrl-names = "default";
+
+ status = "okay";
+ clock-frequency = <400000>;
+};
+
+&i2c1 {
+ pinctrl-0 = <&i2c1_pins>;
+ pinctrl-names = "default";
+
+ status = "okay";
+ clock-frequency = <400000>;
+};
+
+&i2c6 {
+ pinctrl-0 = <&i2c6_pins>;
+ pinctrl-names = "default";
+
+ status = "okay";
+ clock-frequency = <400000>;
+};
+
+&mmc0 {
+ pinctrl-0 = <&mmc_pins>;
+ pinctrl-1 = <&mmc_pins>;
+ pinctrl-names = "default", "state_uhs";
+
+ vmmc-supply = <&reg_3p3v>;
+ vqmmc-supply = <&reg_1p8v>;
+ mmc-hs200-1_8v;
+ mmc-hs400-1_8v;
+ bus-width = <8>;
+ no-sd;
+ no-sdio;
+ non-removable;
+ full-pwr-cycle-in-suspend;
+ status = "okay";
+};
+
+&pfc {
+ pinctrl-0 = <&scif_clk_pins>;
+ pinctrl-names = "default";
+
+ avb0_pins: avb0 {
+ mux {
+ groups = "avb0_link", "avb0_mdio", "avb0_rgmii", "avb0_txcrefclk";
+ function = "avb0";
+ };
+
+ pins_mdio {
+ groups = "avb0_mdio";
+ drive-strength = <21>;
+ };
+
+ pins_mii {
+ groups = "avb0_rgmii";
+ drive-strength = <21>;
+ };
+
+ };
+
+ i2c0_pins: i2c0 {
+ groups = "i2c0";
+ function = "i2c0";
+ };
+
+ i2c1_pins: i2c1 {
+ groups = "i2c1";
+ function = "i2c1";
+ };
+
+ i2c6_pins: i2c6 {
+ groups = "i2c6";
+ function = "i2c6";
+ };
+
+ mmc_pins: mmc {
+ groups = "mmc_data8", "mmc_ctrl", "mmc_ds";
+ function = "mmc";
+ power-source = <1800>;
+ };
+
+ scif0_pins: scif0 {
+ groups = "scif0_data", "scif0_ctrl";
+ function = "scif0";
+ };
+
+ scif_clk_pins: scif_clk {
+ groups = "scif_clk";
+ function = "scif_clk";
+ };
+};
+
&scif0 {
+ pinctrl-0 = <&scif0_pins>;
+ pinctrl-names = "default";
+
+ uart-has-rtscts;
status = "okay";
};
+
+&scif_clk {
+ clock-frequency = <24000000>;
+};
diff --git a/arch/arm64/boot/dts/renesas/r8a779a0-falcon.dts b/arch/arm64/boot/dts/renesas/r8a779a0-falcon.dts
index 8eda70e5a82b..5617b81dd7dc 100644
--- a/arch/arm64/boot/dts/renesas/r8a779a0-falcon.dts
+++ b/arch/arm64/boot/dts/renesas/r8a779a0-falcon.dts
@@ -13,6 +13,7 @@
compatible = "renesas,falcon-breakout", "renesas,falcon-cpu", "renesas,r8a779a0";
aliases {
+ ethernet0 = &avb0;
serial0 = &scif0;
};
@@ -20,3 +21,8 @@
stdout-path = "serial0:115200n8";
};
};
+
+&rwdt {
+ timeout-sec = <60>;
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/renesas/r8a779a0.dtsi b/arch/arm64/boot/dts/renesas/r8a779a0.dtsi
index 6cf77ce9aa93..dfd6ae8b564f 100644
--- a/arch/arm64/boot/dts/renesas/r8a779a0.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a779a0.dtsi
@@ -14,6 +14,16 @@
#address-cells = <2>;
#size-cells = <2>;
+ aliases {
+ i2c0 = &i2c0;
+ i2c1 = &i2c1;
+ i2c2 = &i2c2;
+ i2c3 = &i2c3;
+ i2c4 = &i2c4;
+ i2c5 = &i2c5;
+ i2c6 = &i2c6;
+ };
+
cpus {
#address-cells = <1>;
#size-cells = <0>;
@@ -70,6 +80,165 @@
#size-cells = <2>;
ranges;
+ rwdt: watchdog@e6020000 {
+ compatible = "renesas,r8a779a0-wdt",
+ "renesas,rcar-gen3-wdt";
+ reg = <0 0xe6020000 0 0x0c>;
+ clocks = <&cpg CPG_MOD 907>;
+ power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
+ resets = <&cpg 907>;
+ status = "disabled";
+ };
+
+ pfc: pin-controller@e6050000 {
+ compatible = "renesas,pfc-r8a779a0";
+ reg = <0 0xe6050000 0 0x16c>, <0 0xe6050800 0 0x16c>,
+ <0 0xe6058000 0 0x16c>, <0 0xe6058800 0 0x16c>,
+ <0 0xe6060000 0 0x16c>, <0 0xe6060800 0 0x16c>,
+ <0 0xe6068000 0 0x16c>, <0 0xe6068800 0 0x16c>,
+ <0 0xe6069000 0 0x16c>, <0 0xe6069800 0 0x16c>;
+ };
+
+ gpio0: gpio@e6058180 {
+ compatible = "renesas,gpio-r8a779a0";
+ reg = <0 0xe6058180 0 0x54>;
+ interrupts = <GIC_SPI 832 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 916>;
+ power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
+ resets = <&cpg 916>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pfc 0 0 28>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio1: gpio@e6050180 {
+ compatible = "renesas,gpio-r8a779a0";
+ reg = <0 0xe6050180 0 0x54>;
+ interrupts = <GIC_SPI 836 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 915>;
+ power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
+ resets = <&cpg 915>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pfc 0 32 31>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio2: gpio@e6050980 {
+ compatible = "renesas,gpio-r8a779a0";
+ reg = <0 0xe6050980 0 0x54>;
+ interrupts = <GIC_SPI 840 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 915>;
+ power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
+ resets = <&cpg 915>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pfc 0 64 25>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio3: gpio@e6058980 {
+ compatible = "renesas,gpio-r8a779a0";
+ reg = <0 0xe6058980 0 0x54>;
+ interrupts = <GIC_SPI 844 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 916>;
+ power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
+ resets = <&cpg 916>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pfc 0 96 17>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio4: gpio@e6060180 {
+ compatible = "renesas,gpio-r8a779a0";
+ reg = <0 0xe6060180 0 0x54>;
+ interrupts = <GIC_SPI 848 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 917>;
+ power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
+ resets = <&cpg 917>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pfc 0 128 27>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio5: gpio@e6060980 {
+ compatible = "renesas,gpio-r8a779a0";
+ reg = <0 0xe6060980 0 0x54>;
+ interrupts = <GIC_SPI 852 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 917>;
+ power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
+ resets = <&cpg 917>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pfc 0 160 21>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio6: gpio@e6068180 {
+ compatible = "renesas,gpio-r8a779a0";
+ reg = <0 0xe6068180 0 0x54>;
+ interrupts = <GIC_SPI 856 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 918>;
+ power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
+ resets = <&cpg 918>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pfc 0 192 21>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio7: gpio@e6068980 {
+ compatible = "renesas,gpio-r8a779a0";
+ reg = <0 0xe6068980 0 0x54>;
+ interrupts = <GIC_SPI 860 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 918>;
+ power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
+ resets = <&cpg 918>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pfc 0 224 21>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio8: gpio@e6069180 {
+ compatible = "renesas,gpio-r8a779a0";
+ reg = <0 0xe6069180 0 0x54>;
+ interrupts = <GIC_SPI 864 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 918>;
+ power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
+ resets = <&cpg 918>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pfc 0 256 21>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio9: gpio@e6069980 {
+ compatible = "renesas,gpio-r8a779a0";
+ reg = <0 0xe6069980 0 0x54>;
+ interrupts = <GIC_SPI 868 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 918>;
+ power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
+ resets = <&cpg 918>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pfc 0 288 21>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
cpg: clock-controller@e6150000 {
compatible = "renesas,r8a779a0-cpg-mssr";
reg = <0 0xe6150000 0 0x4000>;
@@ -91,6 +260,464 @@
#power-domain-cells = <1>;
};
+ i2c0: i2c@e6500000 {
+ compatible = "renesas,i2c-r8a779a0",
+ "renesas,rcar-gen3-i2c";
+ reg = <0 0xe6500000 0 0x40>;
+ interrupts = <GIC_SPI 238 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 518>;
+ power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
+ resets = <&cpg 518>;
+ dmas = <&dmac1 0x91>, <&dmac1 0x90>;
+ dma-names = "tx", "rx";
+ i2c-scl-internal-delay-ns = <110>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c1: i2c@e6508000 {
+ compatible = "renesas,i2c-r8a779a0",
+ "renesas,rcar-gen3-i2c";
+ reg = <0 0xe6508000 0 0x40>;
+ interrupts = <GIC_SPI 239 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 519>;
+ power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
+ resets = <&cpg 519>;
+ dmas = <&dmac1 0x93>, <&dmac1 0x92>;
+ dma-names = "tx", "rx";
+ i2c-scl-internal-delay-ns = <110>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c2: i2c@e6510000 {
+ compatible = "renesas,i2c-r8a779a0",
+ "renesas,rcar-gen3-i2c";
+ reg = <0 0xe6510000 0 0x40>;
+ interrupts = <GIC_SPI 240 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 520>;
+ power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
+ resets = <&cpg 520>;
+ dmas = <&dmac1 0x95>, <&dmac1 0x94>;
+ dma-names = "tx", "rx";
+ i2c-scl-internal-delay-ns = <110>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c3: i2c@e66d0000 {
+ compatible = "renesas,i2c-r8a779a0",
+ "renesas,rcar-gen3-i2c";
+ reg = <0 0xe66d0000 0 0x40>;
+ interrupts = <GIC_SPI 241 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 521>;
+ power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
+ resets = <&cpg 521>;
+ dmas = <&dmac1 0x97>, <&dmac1 0x96>;
+ dma-names = "tx", "rx";
+ i2c-scl-internal-delay-ns = <110>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c4: i2c@e66d8000 {
+ compatible = "renesas,i2c-r8a779a0",
+ "renesas,rcar-gen3-i2c";
+ reg = <0 0xe66d8000 0 0x40>;
+ interrupts = <GIC_SPI 242 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 522>;
+ power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
+ resets = <&cpg 522>;
+ dmas = <&dmac1 0x99>, <&dmac1 0x98>;
+ dma-names = "tx", "rx";
+ i2c-scl-internal-delay-ns = <110>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c5: i2c@e66e0000 {
+ compatible = "renesas,i2c-r8a779a0",
+ "renesas,rcar-gen3-i2c";
+ reg = <0 0xe66e0000 0 0x40>;
+ interrupts = <GIC_SPI 243 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 523>;
+ power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
+ resets = <&cpg 523>;
+ dmas = <&dmac1 0x9b>, <&dmac1 0x9a>;
+ dma-names = "tx", "rx";
+ i2c-scl-internal-delay-ns = <110>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c6: i2c@e66e8000 {
+ compatible = "renesas,i2c-r8a779a0",
+ "renesas,rcar-gen3-i2c";
+ reg = <0 0xe66e8000 0 0x40>;
+ interrupts = <GIC_SPI 244 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 524>;
+ power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
+ resets = <&cpg 524>;
+ dmas = <&dmac1 0x9d>, <&dmac1 0x9c>;
+ dma-names = "tx", "rx";
+ i2c-scl-internal-delay-ns = <110>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ hscif0: serial@e6540000 {
+ compatible = "renesas,hscif-r8a779a0",
+ "renesas,rcar-gen3-hscif", "renesas,hscif";
+ reg = <0 0xe6540000 0 0x60>;
+ interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 514>,
+ <&cpg CPG_CORE R8A779A0_CLK_S1D2>,
+ <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac1 0x31>, <&dmac1 0x30>;
+ dma-names = "tx", "rx";
+ power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
+ resets = <&cpg 514>;
+ status = "disabled";
+ };
+
+ hscif1: serial@e6550000 {
+ compatible = "renesas,hscif-r8a779a0",
+ "renesas,rcar-gen3-hscif", "renesas,hscif";
+ reg = <0 0xe6550000 0 0x60>;
+ interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 515>,
+ <&cpg CPG_CORE R8A779A0_CLK_S1D2>,
+ <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac1 0x33>, <&dmac1 0x32>;
+ dma-names = "tx", "rx";
+ power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
+ resets = <&cpg 515>;
+ status = "disabled";
+ };
+
+ hscif2: serial@e6560000 {
+ compatible = "renesas,hscif-r8a779a0",
+ "renesas,rcar-gen3-hscif", "renesas,hscif";
+ reg = <0 0xe6560000 0 0x60>;
+ interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 516>,
+ <&cpg CPG_CORE R8A779A0_CLK_S1D2>,
+ <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac1 0x35>, <&dmac1 0x34>;
+ dma-names = "tx", "rx";
+ power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
+ resets = <&cpg 516>;
+ status = "disabled";
+ };
+
+ hscif3: serial@e66a0000 {
+ compatible = "renesas,hscif-r8a779a0",
+ "renesas,rcar-gen3-hscif", "renesas,hscif";
+ reg = <0 0xe66a0000 0 0x60>;
+ interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 517>,
+ <&cpg CPG_CORE R8A779A0_CLK_S1D2>,
+ <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac1 0x37>, <&dmac1 0x36>;
+ dma-names = "tx", "rx";
+ power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
+ resets = <&cpg 517>;
+ status = "disabled";
+ };
+
+ avb0: ethernet@e6800000 {
+ compatible = "renesas,etheravb-r8a779a0",
+ "renesas,etheravb-rcar-gen3";
+ reg = <0 0xe6800000 0 0x800>;
+ interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 257 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 258 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 259 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 260 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 261 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 262 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 263 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 264 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 265 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 266 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 267 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 269 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 270 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 271 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 272 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 273 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 274 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 275 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 276 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 277 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 278 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 279 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 280 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "ch0", "ch1", "ch2", "ch3",
+ "ch4", "ch5", "ch6", "ch7",
+ "ch8", "ch9", "ch10", "ch11",
+ "ch12", "ch13", "ch14", "ch15",
+ "ch16", "ch17", "ch18", "ch19",
+ "ch20", "ch21", "ch22", "ch23",
+ "ch24";
+ clocks = <&cpg CPG_MOD 211>;
+ power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
+ resets = <&cpg 211>;
+ phy-mode = "rgmii";
+ rx-internal-delay-ps = <0>;
+ tx-internal-delay-ps = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ avb1: ethernet@e6810000 {
+ compatible = "renesas,etheravb-r8a779a0",
+ "renesas,etheravb-rcar-gen3";
+ reg = <0 0xe6810000 0 0x800>;
+ interrupts = <GIC_SPI 281 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 282 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 283 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 284 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 285 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 286 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 287 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 288 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 289 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 290 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 291 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 292 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 293 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 294 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 295 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 296 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 297 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 298 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 299 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 300 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 301 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 302 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 303 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 304 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 305 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "ch0", "ch1", "ch2", "ch3",
+ "ch4", "ch5", "ch6", "ch7",
+ "ch8", "ch9", "ch10", "ch11",
+ "ch12", "ch13", "ch14", "ch15",
+ "ch16", "ch17", "ch18", "ch19",
+ "ch20", "ch21", "ch22", "ch23",
+ "ch24";
+ clocks = <&cpg CPG_MOD 212>;
+ power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
+ resets = <&cpg 212>;
+ phy-mode = "rgmii";
+ rx-internal-delay-ps = <0>;
+ tx-internal-delay-ps = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ avb2: ethernet@e6820000 {
+ compatible = "renesas,etheravb-r8a779a0",
+ "renesas,etheravb-rcar-gen3";
+ reg = <0 0xe6820000 0 0x1000>;
+ interrupts = <GIC_SPI 306 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 307 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 310 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 311 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 319 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 322 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 324 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 328 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "ch0", "ch1", "ch2", "ch3",
+ "ch4", "ch5", "ch6", "ch7",
+ "ch8", "ch9", "ch10", "ch11",
+ "ch12", "ch13", "ch14", "ch15",
+ "ch16", "ch17", "ch18", "ch19",
+ "ch20", "ch21", "ch22", "ch23",
+ "ch24";
+ clocks = <&cpg CPG_MOD 213>;
+ power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
+ resets = <&cpg 213>;
+ phy-mode = "rgmii";
+ rx-internal-delay-ps = <0>;
+ tx-internal-delay-ps = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ avb3: ethernet@e6830000 {
+ compatible = "renesas,etheravb-r8a779a0",
+ "renesas,etheravb-rcar-gen3";
+ reg = <0 0xe6830000 0 0x1000>;
+ interrupts = <GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 344 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 345 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 346 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 347 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 348 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 349 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 350 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 351 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 352 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "ch0", "ch1", "ch2", "ch3",
+ "ch4", "ch5", "ch6", "ch7",
+ "ch8", "ch9", "ch10", "ch11",
+ "ch12", "ch13", "ch14", "ch15",
+ "ch16", "ch17", "ch18", "ch19",
+ "ch20", "ch21", "ch22", "ch23",
+ "ch24";
+ clocks = <&cpg CPG_MOD 214>;
+ power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
+ resets = <&cpg 214>;
+ phy-mode = "rgmii";
+ rx-internal-delay-ps = <0>;
+ tx-internal-delay-ps = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ avb4: ethernet@e6840000 {
+ compatible = "renesas,etheravb-r8a779a0",
+ "renesas,etheravb-rcar-gen3";
+ reg = <0 0xe6840000 0 0x1000>;
+ interrupts = <GIC_SPI 356 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 358 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 359 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 360 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 361 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 362 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 363 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 364 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 365 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 366 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 367 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 368 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 369 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 370 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 371 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 372 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 373 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 374 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 375 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 376 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 377 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 378 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 379 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 380 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "ch0", "ch1", "ch2", "ch3",
+ "ch4", "ch5", "ch6", "ch7",
+ "ch8", "ch9", "ch10", "ch11",
+ "ch12", "ch13", "ch14", "ch15",
+ "ch16", "ch17", "ch18", "ch19",
+ "ch20", "ch21", "ch22", "ch23",
+ "ch24";
+ clocks = <&cpg CPG_MOD 215>;
+ power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
+ resets = <&cpg 215>;
+ phy-mode = "rgmii";
+ rx-internal-delay-ps = <0>;
+ tx-internal-delay-ps = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ avb5: ethernet@e6850000 {
+ compatible = "renesas,etheravb-r8a779a0",
+ "renesas,etheravb-rcar-gen3";
+ reg = <0 0xe6850000 0 0x1000>;
+ interrupts = <GIC_SPI 381 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 382 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 383 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 384 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 385 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 386 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 387 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 388 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 389 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 390 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 391 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 392 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 393 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 394 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 395 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 396 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 397 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 398 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 399 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 400 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 401 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 402 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 403 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 404 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 405 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "ch0", "ch1", "ch2", "ch3",
+ "ch4", "ch5", "ch6", "ch7",
+ "ch8", "ch9", "ch10", "ch11",
+ "ch12", "ch13", "ch14", "ch15",
+ "ch16", "ch17", "ch18", "ch19",
+ "ch20", "ch21", "ch22", "ch23",
+ "ch24";
+ clocks = <&cpg CPG_MOD 216>;
+ power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
+ resets = <&cpg 216>;
+ phy-mode = "rgmii";
+ rx-internal-delay-ps = <0>;
+ tx-internal-delay-ps = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
scif0: serial@e6e60000 {
compatible = "renesas,scif-r8a779a0",
"renesas,rcar-gen3-scif", "renesas,scif";
@@ -100,11 +727,221 @@
<&cpg CPG_CORE R8A779A0_CLK_S1D2>,
<&scif_clk>;
clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac1 0x51>, <&dmac1 0x50>;
+ dma-names = "tx", "rx";
power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
resets = <&cpg 702>;
status = "disabled";
};
+ scif1: serial@e6e68000 {
+ compatible = "renesas,scif-r8a779a0",
+ "renesas,rcar-gen3-scif", "renesas,scif";
+ reg = <0 0xe6e68000 0 64>;
+ interrupts = <GIC_SPI 252 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 703>,
+ <&cpg CPG_CORE R8A779A0_CLK_S1D2>,
+ <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac1 0x53>, <&dmac1 0x52>;
+ dma-names = "tx", "rx";
+ power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
+ resets = <&cpg 703>;
+ status = "disabled";
+ };
+
+ scif3: serial@e6c50000 {
+ compatible = "renesas,scif-r8a779a0",
+ "renesas,rcar-gen3-scif", "renesas,scif";
+ reg = <0 0xe6c50000 0 64>;
+ interrupts = <GIC_SPI 253 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 704>,
+ <&cpg CPG_CORE R8A779A0_CLK_S1D2>,
+ <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac1 0x57>, <&dmac1 0x56>;
+ dma-names = "tx", "rx";
+ power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
+ resets = <&cpg 704>;
+ status = "disabled";
+ };
+
+ scif4: serial@e6c40000 {
+ compatible = "renesas,scif-r8a779a0",
+ "renesas,rcar-gen3-scif", "renesas,scif";
+ reg = <0 0xe6c40000 0 64>;
+ interrupts = <GIC_SPI 254 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 705>,
+ <&cpg CPG_CORE R8A779A0_CLK_S1D2>,
+ <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac1 0x59>, <&dmac1 0x58>;
+ dma-names = "tx", "rx";
+ power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
+ resets = <&cpg 705>;
+ status = "disabled";
+ };
+
+ msiof0: spi@e6e90000 {
+ compatible = "renesas,msiof-r8a779a0",
+ "renesas,rcar-gen3-msiof";
+ reg = <0 0xe6e90000 0 0x0064>;
+ interrupts = <GIC_SPI 245 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 618>;
+ power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
+ resets = <&cpg 618>;
+ dmas = <&dmac1 0x41>, <&dmac1 0x40>;
+ dma-names = "tx", "rx";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ msiof1: spi@e6ea0000 {
+ compatible = "renesas,msiof-r8a779a0",
+ "renesas,rcar-gen3-msiof";
+ reg = <0 0xe6ea0000 0 0x0064>;
+ interrupts = <GIC_SPI 246 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 619>;
+ power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
+ resets = <&cpg 619>;
+ dmas = <&dmac1 0x43>, <&dmac1 0x42>;
+ dma-names = "tx", "rx";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ msiof2: spi@e6c00000 {
+ compatible = "renesas,msiof-r8a779a0",
+ "renesas,rcar-gen3-msiof";
+ reg = <0 0xe6c00000 0 0x0064>;
+ interrupts = <GIC_SPI 247 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 620>;
+ power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
+ resets = <&cpg 620>;
+ dmas = <&dmac1 0x45>, <&dmac1 0x44>;
+ dma-names = "tx", "rx";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ msiof3: spi@e6c10000 {
+ compatible = "renesas,msiof-r8a779a0",
+ "renesas,rcar-gen3-msiof";
+ reg = <0 0xe6c10000 0 0x0064>;
+ interrupts = <GIC_SPI 248 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 621>;
+ power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
+ resets = <&cpg 621>;
+ dmas = <&dmac1 0x47>, <&dmac1 0x46>;
+ dma-names = "tx", "rx";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ msiof4: spi@e6c20000 {
+ compatible = "renesas,msiof-r8a779a0",
+ "renesas,rcar-gen3-msiof";
+ reg = <0 0xe6c20000 0 0x0064>;
+ interrupts = <GIC_SPI 249 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 622>;
+ power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
+ resets = <&cpg 622>;
+ dmas = <&dmac1 0x49>, <&dmac1 0x48>;
+ dma-names = "tx", "rx";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ msiof5: spi@e6c28000 {
+ compatible = "renesas,msiof-r8a779a0",
+ "renesas,rcar-gen3-msiof";
+ reg = <0 0xe6c28000 0 0x0064>;
+ interrupts = <GIC_SPI 250 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 623>;
+ power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
+ resets = <&cpg 623>;
+ dmas = <&dmac1 0x4b>, <&dmac1 0x4a>;
+ dma-names = "tx", "rx";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ dmac1: dma-controller@e7350000 {
+ compatible = "renesas,dmac-r8a779a0";
+ reg = <0 0xe7350000 0 0x1000>,
+ <0 0xe7300000 0 0x10000>;
+ interrupts = <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "error",
+ "ch0", "ch1", "ch2", "ch3", "ch4",
+ "ch5", "ch6", "ch7", "ch8", "ch9",
+ "ch10", "ch11", "ch12", "ch13",
+ "ch14", "ch15";
+ clocks = <&cpg CPG_MOD 709>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
+ resets = <&cpg 709>;
+ #dma-cells = <1>;
+ dma-channels = <16>;
+ };
+
+ dmac2: dma-controller@e7351000 {
+ compatible = "renesas,dmac-r8a779a0";
+ reg = <0 0xe7351000 0 0x1000>,
+ <0 0xe7310000 0 0x10000>;
+ interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "error",
+ "ch0", "ch1", "ch2", "ch3", "ch4",
+ "ch5", "ch6", "ch7";
+ clocks = <&cpg CPG_MOD 710>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
+ resets = <&cpg 710>;
+ #dma-cells = <1>;
+ dma-channels = <8>;
+ };
+
+ mmc0: mmc@ee140000 {
+ compatible = "renesas,sdhi-r8a779a0",
+ "renesas,rcar-gen3-sdhi";
+ reg = <0 0xee140000 0 0x2000>;
+ interrupts = <GIC_SPI 236 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 706>;
+ power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
+ resets = <&cpg 706>;
+ max-frequency = <200000000>;
+ status = "disabled";
+ };
+
gic: interrupt-controller@f1000000 {
compatible = "arm,gic-v3";
#interrupt-cells = <3>;
diff --git a/arch/arm64/boot/dts/renesas/salvator-common.dtsi b/arch/arm64/boot/dts/renesas/salvator-common.dtsi
index 6c643ed74fc5..c22bb38994e8 100644
--- a/arch/arm64/boot/dts/renesas/salvator-common.dtsi
+++ b/arch/arm64/boot/dts/renesas/salvator-common.dtsi
@@ -831,6 +831,8 @@
bus-width = <8>;
mmc-hs200-1_8v;
mmc-hs400-1_8v;
+ no-sd;
+ no-sdio;
non-removable;
fixed-emmc-driver-type = <1>;
full-pwr-cycle-in-suspend;
diff --git a/arch/arm64/boot/dts/renesas/ulcb.dtsi b/arch/arm64/boot/dts/renesas/ulcb.dtsi
index 8f8d7371d8e2..a04eae55dd6c 100644
--- a/arch/arm64/boot/dts/renesas/ulcb.dtsi
+++ b/arch/arm64/boot/dts/renesas/ulcb.dtsi
@@ -468,6 +468,8 @@
bus-width = <8>;
mmc-hs200-1_8v;
mmc-hs400-1_8v;
+ no-sd;
+ no-sdio;
non-removable;
full-pwr-cycle-in-suspend;
status = "okay";
diff --git a/arch/arm64/boot/dts/rockchip/Makefile b/arch/arm64/boot/dts/rockchip/Makefile
index 1ab55a124a87..62d3abc17a24 100644
--- a/arch/arm64/boot/dts/rockchip/Makefile
+++ b/arch/arm64/boot/dts/rockchip/Makefile
@@ -11,6 +11,7 @@ dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3328-a1.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3328-evb.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3328-nanopi-r2s.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3328-rock64.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3328-rock-pi-e.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3328-roc-cc.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3368-evb-act8846.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3368-geekbox.dtb
@@ -33,6 +34,7 @@ dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-kobol-helios64.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-leez-p710.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-nanopc-t4.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-nanopi-m4.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-nanopi-m4b.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-nanopi-neo4.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-orangepi.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-pinebook-pro.dtb
diff --git a/arch/arm64/boot/dts/rockchip/px30.dtsi b/arch/arm64/boot/dts/rockchip/px30.dtsi
index 2695ea8cda14..c45b0cfcae09 100644
--- a/arch/arm64/boot/dts/rockchip/px30.dtsi
+++ b/arch/arm64/boot/dts/rockchip/px30.dtsi
@@ -25,6 +25,9 @@
i2c1 = &i2c1;
i2c2 = &i2c2;
i2c3 = &i2c3;
+ mmc0 = &sdmmc;
+ mmc1 = &sdio;
+ mmc2 = &emmc;
serial0 = &uart0;
serial1 = &uart1;
serial2 = &uart2;
@@ -703,22 +706,15 @@
clock-names = "pclk", "timer";
};
- amba: bus {
- compatible = "simple-bus";
- #address-cells = <2>;
- #size-cells = <2>;
- ranges;
-
- dmac: dmac@ff240000 {
- compatible = "arm,pl330", "arm,primecell";
- reg = <0x0 0xff240000 0x0 0x4000>;
- interrupts = <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
- arm,pl330-periph-burst;
- clocks = <&cru ACLK_DMAC>;
- clock-names = "apb_pclk";
- #dma-cells = <1>;
- };
+ dmac: dmac@ff240000 {
+ compatible = "arm,pl330", "arm,primecell";
+ reg = <0x0 0xff240000 0x0 0x4000>;
+ interrupts = <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
+ arm,pl330-periph-burst;
+ clocks = <&cru ACLK_DMAC>;
+ clock-names = "apb_pclk";
+ #dma-cells = <1>;
};
tsadc: tsadc@ff280000 {
@@ -973,6 +969,21 @@
status = "disabled";
};
+ nfc: nand-controller@ff3b0000 {
+ compatible = "rockchip,px30-nfc";
+ reg = <0x0 0xff3b0000 0x0 0x4000>;
+ interrupts = <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru HCLK_NANDC>, <&cru SCLK_NANDC>;
+ clock-names = "ahb", "nfc";
+ assigned-clocks = <&cru SCLK_NANDC>;
+ assigned-clock-rates = <150000000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&flash_ale &flash_bus8 &flash_cle &flash_cs0
+ &flash_rdn &flash_rdy &flash_wrn &flash_dqs>;
+ power-domains = <&power PX30_PD_MMC_NAND>;
+ status = "disabled";
+ };
+
gpu: gpu@ff400000 {
compatible = "rockchip,px30-mali", "arm,mali-bifrost";
reg = <0x0 0xff400000 0x0 0x4000>;
@@ -1097,7 +1108,7 @@
vopl_mmu: iommu@ff470f00 {
compatible = "rockchip,iommu";
reg = <0x0 0xff470f00 0x0 0x100>;
- interrupts = <GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "vopl_mmu";
clocks = <&cru ACLK_VOPL>, <&cru HCLK_VOPL>;
clock-names = "aclk", "iface";
@@ -1107,102 +1118,102 @@
};
qos_gmac: qos@ff518000 {
- compatible = "syscon";
+ compatible = "rockchip,px30-qos", "syscon";
reg = <0x0 0xff518000 0x0 0x20>;
};
qos_gpu: qos@ff520000 {
- compatible = "syscon";
+ compatible = "rockchip,px30-qos", "syscon";
reg = <0x0 0xff520000 0x0 0x20>;
};
qos_sdmmc: qos@ff52c000 {
- compatible = "syscon";
+ compatible = "rockchip,px30-qos", "syscon";
reg = <0x0 0xff52c000 0x0 0x20>;
};
qos_emmc: qos@ff538000 {
- compatible = "syscon";
+ compatible = "rockchip,px30-qos", "syscon";
reg = <0x0 0xff538000 0x0 0x20>;
};
qos_nand: qos@ff538080 {
- compatible = "syscon";
+ compatible = "rockchip,px30-qos", "syscon";
reg = <0x0 0xff538080 0x0 0x20>;
};
qos_sdio: qos@ff538100 {
- compatible = "syscon";
+ compatible = "rockchip,px30-qos", "syscon";
reg = <0x0 0xff538100 0x0 0x20>;
};
qos_sfc: qos@ff538180 {
- compatible = "syscon";
+ compatible = "rockchip,px30-qos", "syscon";
reg = <0x0 0xff538180 0x0 0x20>;
};
qos_usb_host: qos@ff540000 {
- compatible = "syscon";
+ compatible = "rockchip,px30-qos", "syscon";
reg = <0x0 0xff540000 0x0 0x20>;
};
qos_usb_otg: qos@ff540080 {
- compatible = "syscon";
+ compatible = "rockchip,px30-qos", "syscon";
reg = <0x0 0xff540080 0x0 0x20>;
};
qos_isp_128: qos@ff548000 {
- compatible = "syscon";
+ compatible = "rockchip,px30-qos", "syscon";
reg = <0x0 0xff548000 0x0 0x20>;
};
qos_isp_rd: qos@ff548080 {
- compatible = "syscon";
+ compatible = "rockchip,px30-qos", "syscon";
reg = <0x0 0xff548080 0x0 0x20>;
};
qos_isp_wr: qos@ff548100 {
- compatible = "syscon";
+ compatible = "rockchip,px30-qos", "syscon";
reg = <0x0 0xff548100 0x0 0x20>;
};
qos_isp_m1: qos@ff548180 {
- compatible = "syscon";
+ compatible = "rockchip,px30-qos", "syscon";
reg = <0x0 0xff548180 0x0 0x20>;
};
qos_vip: qos@ff548200 {
- compatible = "syscon";
+ compatible = "rockchip,px30-qos", "syscon";
reg = <0x0 0xff548200 0x0 0x20>;
};
qos_rga_rd: qos@ff550000 {
- compatible = "syscon";
+ compatible = "rockchip,px30-qos", "syscon";
reg = <0x0 0xff550000 0x0 0x20>;
};
qos_rga_wr: qos@ff550080 {
- compatible = "syscon";
+ compatible = "rockchip,px30-qos", "syscon";
reg = <0x0 0xff550080 0x0 0x20>;
};
qos_vop_m0: qos@ff550100 {
- compatible = "syscon";
+ compatible = "rockchip,px30-qos", "syscon";
reg = <0x0 0xff550100 0x0 0x20>;
};
qos_vop_m1: qos@ff550180 {
- compatible = "syscon";
+ compatible = "rockchip,px30-qos", "syscon";
reg = <0x0 0xff550180 0x0 0x20>;
};
qos_vpu: qos@ff558000 {
- compatible = "syscon";
+ compatible = "rockchip,px30-qos", "syscon";
reg = <0x0 0xff558000 0x0 0x20>;
};
qos_vpu_r128: qos@ff558080 {
- compatible = "syscon";
+ compatible = "rockchip,px30-qos", "syscon";
reg = <0x0 0xff558080 0x0 0x20>;
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3308.dtsi b/arch/arm64/boot/dts/rockchip/rk3308.dtsi
index 2560b98771ca..3a035a189450 100644
--- a/arch/arm64/boot/dts/rockchip/rk3308.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3308.dtsi
@@ -24,6 +24,9 @@
i2c1 = &i2c1;
i2c2 = &i2c2;
i2c3 = &i2c3;
+ mmc0 = &sdmmc;
+ mmc1 = &emmc;
+ mmc2 = &sdio;
serial0 = &uart0;
serial1 = &uart1;
serial2 = &uart2;
@@ -513,33 +516,26 @@
status = "disabled";
};
- amba: bus {
- compatible = "simple-bus";
- #address-cells = <2>;
- #size-cells = <2>;
- ranges;
+ dmac0: dma-controller@ff2c0000 {
+ compatible = "arm,pl330", "arm,primecell";
+ reg = <0x0 0xff2c0000 0x0 0x4000>;
+ interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>;
+ arm,pl330-periph-burst;
+ clocks = <&cru ACLK_DMAC0>;
+ clock-names = "apb_pclk";
+ #dma-cells = <1>;
+ };
- dmac0: dma-controller@ff2c0000 {
- compatible = "arm,pl330", "arm,primecell";
- reg = <0x0 0xff2c0000 0x0 0x4000>;
- interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>;
- arm,pl330-periph-burst;
- clocks = <&cru ACLK_DMAC0>;
- clock-names = "apb_pclk";
- #dma-cells = <1>;
- };
-
- dmac1: dma-controller@ff2d0000 {
- compatible = "arm,pl330", "arm,primecell";
- reg = <0x0 0xff2d0000 0x0 0x4000>;
- interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
- arm,pl330-periph-burst;
- clocks = <&cru ACLK_DMAC1>;
- clock-names = "apb_pclk";
- #dma-cells = <1>;
- };
+ dmac1: dma-controller@ff2d0000 {
+ compatible = "arm,pl330", "arm,primecell";
+ reg = <0x0 0xff2d0000 0x0 0x4000>;
+ interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
+ arm,pl330-periph-burst;
+ clocks = <&cru ACLK_DMAC1>;
+ clock-names = "apb_pclk";
+ #dma-cells = <1>;
};
i2s_2ch_0: i2s@ff350000 {
@@ -629,6 +625,21 @@
status = "disabled";
};
+ nfc: nand-controller@ff4b0000 {
+ compatible = "rockchip,rk3308-nfc",
+ "rockchip,rv1108-nfc";
+ reg = <0x0 0xff4b0000 0x0 0x4000>;
+ interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru HCLK_NANDC>, <&cru SCLK_NANDC>;
+ clock-names = "ahb", "nfc";
+ assigned-clocks = <&cru SCLK_NANDC>;
+ assigned-clock-rates = <150000000>;
+ pinctrl-0 = <&flash_ale &flash_bus8 &flash_cle &flash_csn0
+ &flash_rdn &flash_rdy &flash_wrn>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+
cru: clock-controller@ff500000 {
compatible = "rockchip,rk3308-cru";
reg = <0x0 0xff500000 0x0 0x1000>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-nanopi-r2s.dts b/arch/arm64/boot/dts/rockchip/rk3328-nanopi-r2s.dts
index 2ee07d15a6e3..faf496d789cf 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328-nanopi-r2s.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3328-nanopi-r2s.dts
@@ -50,6 +50,7 @@
sys_led: led-1 {
gpios = <&gpio0 RK_PA2 GPIO_ACTIVE_HIGH>;
label = "nanopi-r2s:red:sys";
+ default-state = "on";
};
wan_led: led-2 {
@@ -114,6 +115,10 @@
cpu-supply = <&vdd_arm>;
};
+&display_subsystem {
+ status = "disabled";
+};
+
&gmac2io {
assigned-clocks = <&cru SCLK_MAC2IO>, <&cru SCLK_MAC2IO_EXT>;
assigned-clock-parents = <&gmac_clk>, <&gmac_clk>;
@@ -280,7 +285,7 @@
};
};
- ethernet-phy {
+ gmac2io {
eth_phy_reset_pin: eth-phy-reset-pin {
rockchip,pins = <1 RK_PC2 RK_FUNC_GPIO &pcfg_pull_down>;
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts
new file mode 100644
index 000000000000..2d71ca7e429c
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts
@@ -0,0 +1,382 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * (C) Copyright 2020 Chen-Yu Tsai <wens@csie.org>
+ *
+ * Based on ./rk3328-rock64.dts, which is
+ *
+ * Copyright (c) 2017 PINE64
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/pinctrl/rockchip.h>
+
+#include "rk3328.dtsi"
+
+/ {
+ model = "Radxa ROCK Pi E";
+ compatible = "radxa,rockpi-e", "rockchip,rk3328";
+
+ chosen {
+ stdout-path = "serial2:1500000n8";
+ };
+
+ adc-keys {
+ compatible = "adc-keys";
+ io-channels = <&saradc 0>;
+ io-channel-names = "buttons";
+ keyup-threshold-microvolt = <1750000>;
+
+ /* This button is unpopulated out of the factory. */
+ button-recovery {
+ label = "Recovery";
+ linux,code = <KEY_VENDOR>;
+ press-threshold-microvolt = <10000>;
+ };
+ };
+
+ gmac_clkin: external-gmac-clock {
+ compatible = "fixed-clock";
+ clock-frequency = <125000000>;
+ clock-output-names = "gmac_clkin";
+ #clock-cells = <0>;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-0 = <&led_pin>;
+ pinctrl-names = "default";
+
+ led-0 {
+ color = <LED_COLOR_ID_BLUE>;
+ gpios = <&gpio3 RK_PA5 GPIO_ACTIVE_LOW>;
+ linux,default-trigger = "heartbeat";
+ };
+ };
+
+ vcc_sd: sdmmc-regulator {
+ compatible = "regulator-fixed";
+ gpio = <&gpio0 RK_PD6 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdmmc0m1_pin>;
+ regulator-name = "vcc_sd";
+ regulator-boot-on;
+ vin-supply = <&vcc_io>;
+ };
+
+ vcc_host_5v: vcc-host-5v-regulator {
+ compatible = "regulator-fixed";
+ gpio = <&gpio3 RK_PA7 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb30_host_drv>;
+ enable-active-high;
+ regulator-name = "vcc_host_5v";
+ regulator-always-on;
+ regulator-boot-on;
+ vin-supply = <&vcc_sys>;
+ };
+
+ vcc_sys: vcc-sys {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_sys";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+
+ vcc_wifi: vcc-wifi-regulator {
+ compatible = "regulator-fixed";
+ gpio = <&gpio0 RK_PA0 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&wifi_en>;
+ regulator-name = "vcc_wifi";
+ regulator-always-on;
+ regulator-boot-on;
+ vin-supply = <&vcc_io>;
+ };
+};
+
+&analog_sound {
+ status = "okay";
+};
+
+&codec {
+ status = "okay";
+};
+
+&cpu0 {
+ cpu-supply = <&vdd_arm>;
+};
+
+&cpu1 {
+ cpu-supply = <&vdd_arm>;
+};
+
+&cpu2 {
+ cpu-supply = <&vdd_arm>;
+};
+
+&cpu3 {
+ cpu-supply = <&vdd_arm>;
+};
+
+&emmc {
+ bus-width = <8>;
+ cap-mmc-highspeed;
+ mmc-ddr-1_8v;
+ mmc-hs200-1_8v;
+ non-removable;
+ pinctrl-names = "default";
+ pinctrl-0 = <&emmc_clk>, <&emmc_cmd>, <&emmc_bus8>;
+ vmmc-supply = <&vcc_io>;
+ vqmmc-supply = <&vcc18_emmc>;
+ status = "okay";
+};
+
+&gmac2io {
+ assigned-clocks = <&cru SCLK_MAC2IO>, <&cru SCLK_MAC2IO_EXT>;
+ assigned-clock-parents = <&gmac_clkin>, <&gmac_clkin>;
+ clock_in_out = "input";
+ phy-handle = <&rtl8211e>;
+ phy-mode = "rgmii";
+ phy-supply = <&vcc_io>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&rgmiim1_pins>;
+ snps,aal;
+ snps,rxpbl = <0x4>;
+ snps,txpbl = <0x4>;
+ tx_delay = <0x26>;
+ rx_delay = <0x11>;
+ status = "okay";
+
+ mdio {
+ compatible = "snps,dwmac-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ rtl8211e: ethernet-phy@1 {
+ reg = <1>;
+ pinctrl-0 = <&eth_phy_int_pin>, <&eth_phy_reset_pin>;
+ pinctrl-names = "default";
+ interrupt-parent = <&gpio1>;
+ interrupts = <24 IRQ_TYPE_LEVEL_LOW>;
+ reset-assert-us = <10000>;
+ reset-deassert-us = <50000>;
+ reset-gpios = <&gpio1 RK_PC2 GPIO_ACTIVE_LOW>;
+ };
+ };
+};
+
+&gmac2phy {
+ pinctrl-names = "default";
+ pinctrl-0 = <&fephyled_linkm1>, <&fephyled_rxm1>;
+ status = "okay";
+};
+
+&i2c1 {
+ status = "okay";
+
+ rk805: pmic@18 {
+ compatible = "rockchip,rk805";
+ reg = <0x18>;
+ interrupt-parent = <&gpio2>;
+ interrupts = <6 IRQ_TYPE_LEVEL_LOW>;
+ #clock-cells = <1>;
+ clock-output-names = "xin32k", "rk805-clkout2";
+ gpio-controller;
+ #gpio-cells = <2>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pmic_int_l>;
+ rockchip,system-power-controller;
+ wakeup-source;
+
+ vcc1-supply = <&vcc_sys>;
+ vcc2-supply = <&vcc_sys>;
+ vcc3-supply = <&vcc_sys>;
+ vcc4-supply = <&vcc_sys>;
+ vcc5-supply = <&vcc_io>;
+ vcc6-supply = <&vcc_sys>;
+
+ regulators {
+ vdd_log: DCDC_REG1 {
+ regulator-name = "vdd_log";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <712500>;
+ regulator-max-microvolt = <1450000>;
+ regulator-ramp-delay = <12500>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1000000>;
+ };
+ };
+
+ vdd_arm: DCDC_REG2 {
+ regulator-name = "vdd_arm";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <712500>;
+ regulator-max-microvolt = <1450000>;
+ regulator-ramp-delay = <12500>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <950000>;
+ };
+ };
+
+ vcc_ddr: DCDC_REG3 {
+ regulator-name = "vcc_ddr";
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+
+ vcc_io: DCDC_REG4 {
+ regulator-name = "vcc_io";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vcc_18: LDO_REG1 {
+ regulator-name = "vcc_18";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vcc18_emmc: LDO_REG2 {
+ regulator-name = "vcc18_emmc";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vdd_10: LDO_REG3 {
+ regulator-name = "vdd_10";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1000000>;
+ };
+ };
+ };
+ };
+};
+
+&i2s1 {
+ status = "okay";
+};
+
+&io_domains {
+ pmuio-supply = <&vcc_io>;
+ vccio1-supply = <&vcc_io>;
+ vccio2-supply = <&vcc18_emmc>;
+ vccio3-supply = <&vcc_io>;
+ vccio4-supply = <&vcc_io>;
+ vccio5-supply = <&vcc_io>;
+ vccio6-supply = <&vcc_io>;
+ status = "okay";
+};
+
+&pinctrl {
+ ephy {
+ eth_phy_int_pin: eth-phy-int-pin {
+ rockchip,pins = <1 RK_PD0 RK_FUNC_GPIO &pcfg_pull_down>;
+ };
+
+ eth_phy_reset_pin: eth-phy-reset-pin {
+ rockchip,pins = <1 RK_PC2 RK_FUNC_GPIO &pcfg_pull_down>;
+ };
+ };
+
+ leds {
+ led_pin: led-pin {
+ rockchip,pins = <3 RK_PA5 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ pmic {
+ pmic_int_l: pmic-int-l {
+ rockchip,pins = <2 RK_PA6 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ usb3 {
+ usb30_host_drv: usb30-host-drv {
+ rockchip,pins = <3 RK_PA7 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ wifi {
+ wifi_en: wifi-en {
+ rockchip,pins = <0 RK_PA0 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+};
+
+&sdmmc {
+ bus-width = <4>;
+ cap-sd-highspeed;
+ disable-wp;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdmmc0_clk>, <&sdmmc0_cmd>, <&sdmmc0_dectn>, <&sdmmc0_bus4>;
+ vmmc-supply = <&vcc_sd>;
+ status = "okay";
+};
+
+&saradc {
+ vref-supply = <&vcc_18>;
+ status = "okay";
+};
+
+&tsadc {
+ status = "okay";
+};
+
+&u2phy {
+ status = "okay";
+};
+
+&u2phy_host {
+ status = "okay";
+};
+
+&uart2 {
+ status = "okay";
+};
+
+&usb_host0_ehci {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
index 86cfb5c50a94..c984662043da 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
@@ -84,34 +84,32 @@
};
};
- sound {
- compatible = "audio-graph-card";
- label = "rockchip,rk3328";
- dais = <&i2s1_p0
- &spdif_p0>;
+ spdif_sound: spdif-sound {
+ compatible = "simple-audio-card";
+ simple-audio-card,name = "SPDIF";
+
+ simple-audio-card,cpu {
+ sound-dai = <&spdif>;
+ };
+
+ simple-audio-card,codec {
+ sound-dai = <&spdif_dit>;
+ };
};
- spdif-dit {
+ spdif_dit: spdif-dit {
compatible = "linux,spdif-dit";
#sound-dai-cells = <0>;
-
- port {
- dit_p0_0: endpoint {
- remote-endpoint = <&spdif_p0_0>;
- };
- };
};
};
+&analog_sound {
+ status = "okay";
+};
+
&codec {
mute-gpios = <&grf_gpio 0 GPIO_ACTIVE_LOW>;
status = "okay";
-
- port@0 {
- codec_p0_0: endpoint {
- remote-endpoint = <&i2s1_p0_0>;
- };
- };
};
&cpu0 {
@@ -163,6 +161,10 @@
status = "okay";
};
+&hdmi_sound {
+ status = "okay";
+};
+
&hdmiphy {
status = "okay";
};
@@ -278,16 +280,12 @@
};
};
-&i2s1 {
+&i2s0 {
status = "okay";
+};
- i2s1_p0: port {
- i2s1_p0_0: endpoint {
- dai-format = "i2s";
- mclk-fs = <256>;
- remote-endpoint = <&codec_p0_0>;
- };
- };
+&i2s1 {
+ status = "okay";
};
&io_domains {
@@ -337,12 +335,6 @@
&spdif {
pinctrl-0 = <&spdifm0_tx>;
status = "okay";
-
- spdif_p0: port {
- spdif_p0_0: endpoint {
- remote-endpoint = <&dit_p0_0>;
- };
- };
};
&spi0 {
diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
index db0d5c8e5f96..063ed0adbec4 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
@@ -27,6 +27,9 @@
i2c1 = &i2c1;
i2c2 = &i2c2;
i2c3 = &i2c3;
+ mmc0 = &sdmmc;
+ mmc1 = &sdio;
+ mmc2 = &emmc;
ethernet0 = &gmac2io;
ethernet1 = &gmac2phy;
};
@@ -142,24 +145,6 @@
};
};
- amba: bus {
- compatible = "simple-bus";
- #address-cells = <2>;
- #size-cells = <2>;
- ranges;
-
- dmac: dmac@ff1f0000 {
- compatible = "arm,pl330", "arm,primecell";
- reg = <0x0 0xff1f0000 0x0 0x4000>;
- interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>;
- arm,pl330-periph-burst;
- clocks = <&cru ACLK_DMAC>;
- clock-names = "apb_pclk";
- #dma-cells = <1>;
- };
- };
-
analog_sound: analog-sound {
compatible = "simple-audio-card";
simple-audio-card,format = "i2s";
@@ -504,6 +489,17 @@
status = "disabled";
};
+ dmac: dmac@ff1f0000 {
+ compatible = "arm,pl330", "arm,primecell";
+ reg = <0x0 0xff1f0000 0x0 0x4000>;
+ interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>;
+ arm,pl330-periph-burst;
+ clocks = <&cru ACLK_DMAC>;
+ clock-names = "apb_pclk";
+ #dma-cells = <1>;
+ };
+
thermal-zones {
soc_thermal: soc-thermal {
polling-delay-passive = <20>;
@@ -928,6 +924,7 @@
phy-mode = "rmii";
phy-handle = <&phy>;
snps,txpbl = <0x4>;
+ clock_in_out = "output";
status = "disabled";
mdio {
diff --git a/arch/arm64/boot/dts/rockchip/rk3368.dtsi b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
index 3746f23dc3df..7af68ec3feae 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
@@ -25,6 +25,9 @@
i2c3 = &i2c3;
i2c4 = &i2c4;
i2c5 = &i2c5;
+ mmc0 = &sdmmc;
+ mmc1 = &sdio0;
+ mmc2 = &emmc;
serial0 = &uart0;
serial1 = &uart1;
serial2 = &uart2;
@@ -136,37 +139,6 @@
};
};
- amba: bus {
- compatible = "simple-bus";
- #address-cells = <2>;
- #size-cells = <2>;
- ranges;
-
- dmac_peri: dma-controller@ff250000 {
- compatible = "arm,pl330", "arm,primecell";
- reg = <0x0 0xff250000 0x0 0x4000>;
- interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
- #dma-cells = <1>;
- arm,pl330-broken-no-flushp;
- arm,pl330-periph-burst;
- clocks = <&cru ACLK_DMAC_PERI>;
- clock-names = "apb_pclk";
- };
-
- dmac_bus: dma-controller@ff600000 {
- compatible = "arm,pl330", "arm,primecell";
- reg = <0x0 0xff600000 0x0 0x4000>;
- interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>;
- #dma-cells = <1>;
- arm,pl330-broken-no-flushp;
- arm,pl330-periph-burst;
- clocks = <&cru ACLK_DMAC_BUS>;
- clock-names = "apb_pclk";
- };
- };
-
arm-pmu {
compatible = "arm,armv8-pmuv3";
interrupts = <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>,
@@ -399,8 +371,20 @@
status = "disabled";
};
+ dmac_peri: dma-controller@ff250000 {
+ compatible = "arm,pl330", "arm,primecell";
+ reg = <0x0 0xff250000 0x0 0x4000>;
+ interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
+ #dma-cells = <1>;
+ arm,pl330-broken-no-flushp;
+ arm,pl330-periph-burst;
+ clocks = <&cru ACLK_DMAC_PERI>;
+ clock-names = "apb_pclk";
+ };
+
thermal-zones {
- cpu {
+ cpu_thermal: cpu-thermal {
polling-delay-passive = <100>; /* milliseconds */
polling-delay = <5000>; /* milliseconds */
@@ -444,7 +428,7 @@
};
};
- gpu {
+ gpu_thermal: gpu-thermal {
polling-delay-passive = <100>; /* milliseconds */
polling-delay = <5000>; /* milliseconds */
@@ -532,6 +516,18 @@
status = "disabled";
};
+ dmac_bus: dma-controller@ff600000 {
+ compatible = "arm,pl330", "arm,primecell";
+ reg = <0x0 0xff600000 0x0 0x4000>;
+ interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>;
+ #dma-cells = <1>;
+ arm,pl330-broken-no-flushp;
+ arm,pl330-periph-burst;
+ clocks = <&cru ACLK_DMAC_BUS>;
+ clock-names = "apb_pclk";
+ };
+
i2c0: i2c@ff650000 {
compatible = "rockchip,rk3368-i2c", "rockchip,rk3288-i2c";
reg = <0x0 0xff650000 0x0 0x1000>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-kobol-helios64.dts b/arch/arm64/boot/dts/rockchip/rk3399-kobol-helios64.dts
index 2a561be724b2..66c725a34220 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-kobol-helios64.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-kobol-helios64.dts
@@ -331,7 +331,7 @@
pmic {
pmic_int_l: pmic-int-l {
- rockchip,pins = <0 RK_PB2 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <0 RK_PB2 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-nanopc-t4.dts b/arch/arm64/boot/dts/rockchip/rk3399-nanopc-t4.dts
index e0d75617bb7e..452728b82e42 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-nanopc-t4.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-nanopc-t4.dts
@@ -95,6 +95,7 @@
};
&pcie0 {
+ ep-gpios = <&gpio2 RK_PA4 GPIO_ACTIVE_HIGH>;
num-lanes = <4>;
vpcie3v3-supply = <&vcc3v3_sys>;
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-nanopi-m4b.dts b/arch/arm64/boot/dts/rockchip/rk3399-nanopi-m4b.dts
new file mode 100644
index 000000000000..72182c58cc46
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3399-nanopi-m4b.dts
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * FriendlyElec NanoPi M4B board device tree source
+ *
+ * Copyright (c) 2020 Chen-Yu Tsai <wens@csie.org>
+ */
+
+/dts-v1/;
+#include "rk3399-nanopi-m4.dts"
+
+/ {
+ model = "FriendlyElec NanoPi M4B";
+ compatible = "friendlyarm,nanopi-m4b", "rockchip,rk3399";
+
+ adc-keys {
+ compatible = "adc-keys";
+ io-channels = <&saradc 1>;
+ io-channel-names = "buttons";
+ keyup-threshold-microvolt = <1500000>;
+ poll-interval = <100>;
+
+ recovery {
+ label = "Recovery";
+ linux,code = <KEY_VENDOR>;
+ press-threshold-microvolt = <18000>;
+ };
+ };
+};
+
+/* No USB type-C PD power manager */
+/delete-node/ &fusb0;
+
+&i2c4 {
+ status = "disabled";
+};
+
+&u2phy0_host {
+ phy-supply = <&vcc5v0_usb2>;
+};
+
+&u2phy0_otg {
+ phy-supply = <&vbus_typec>;
+};
+
+&u2phy1_otg {
+ phy-supply = <&vcc5v0_usb1>;
+};
+
+&vbus_typec {
+ enable-active-high;
+ gpios = <&gpio4 RK_PD2 GPIO_ACTIVE_HIGH>;
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi
index 76a8b40a93c6..48ed4aaa37f3 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi
@@ -504,7 +504,6 @@
};
&pcie0 {
- ep-gpios = <&gpio2 RK_PA4 GPIO_ACTIVE_HIGH>;
max-link-speed = <2>;
num-lanes = <2>;
vpcie0v9-supply = <&vcca0v9_s3>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts b/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts
index 06d48338c836..219b7507a10f 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts
@@ -790,7 +790,6 @@
&pcie0 {
bus-scan-delay-ms = <1000>;
ep-gpios = <&gpio2 RK_PD4 GPIO_ACTIVE_HIGH>;
- max-link-speed = <2>;
num-lanes = <4>;
pinctrl-names = "default";
pinctrl-0 = <&pcie_clkreqn_cpm>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock960.dts b/arch/arm64/boot/dts/rockchip/rk3399-rock960.dts
index c88295782e7b..1a23e8f3cdf6 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-rock960.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-rock960.dts
@@ -63,6 +63,20 @@
};
+&cpu_alert0 {
+ temperature = <65000>;
+};
+
+&cpu_thermal {
+ sustainable-power = <1550>;
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu_alert1>;
+ };
+ };
+};
+
&pcie0 {
ep-gpios = <&gpio2 RK_PA2 GPIO_ACTIVE_HIGH>;
};
@@ -125,45 +139,6 @@
status = "okay";
};
-&thermal_zones {
- cpu_thermal: cpu {
- polling-delay-passive = <100>;
- polling-delay = <1000>;
- thermal-sensors = <&tsadc 0>;
- sustainable-power = <1550>;
-
- trips {
- cpu_alert0: cpu_alert0 {
- temperature = <65000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
- cpu_alert1: cpu_alert1 {
- temperature = <75000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
- cpu_crit: cpu_crit {
- temperature = <95000>;
- hysteresis = <2000>;
- type = "critical";
- };
- };
-
- cooling-maps {
- map0 {
-
- trip = <&cpu_alert1>;
- cooling-device =
- <&cpu_b0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
- <&cpu_b1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
- };
- };
- };
-};
-
&usbdrd_dwc3_0 {
dr_mode = "otg";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dtsi
index 6e553ff47534..5ab0b9edfc88 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dtsi
@@ -72,10 +72,27 @@
sound {
compatible = "audio-graph-card";
- label = "rockchip,rk3399";
+ label = "Analog";
dais = <&i2s1_p0>;
};
+ sound-dit {
+ compatible = "audio-graph-card";
+ label = "SPDIF";
+ dais = <&spdif_p0>;
+ };
+
+ spdif-dit {
+ compatible = "linux,spdif-dit";
+ #sound-dai-cells = <0>;
+
+ port {
+ dit_p0_0: endpoint {
+ remote-endpoint = <&spdif_p0_0>;
+ };
+ };
+ };
+
vcc12v_dcin: vcc12v-dcin {
compatible = "regulator-fixed";
regulator-name = "vcc12v_dcin";
@@ -698,6 +715,16 @@
status = "okay";
};
+&spdif {
+ pinctrl-0 = <&spdif_bus_1>;
+
+ spdif_p0: port {
+ spdif_p0_0: endpoint {
+ remote-endpoint = <&dit_p0_0>;
+ };
+ };
+};
+
&spi1 {
status = "okay";
diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
index f5dee5f447bb..edbbf35fe19e 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
@@ -200,40 +200,12 @@
#clock-cells = <0>;
};
- amba: bus {
- compatible = "simple-bus";
- #address-cells = <2>;
- #size-cells = <2>;
- ranges;
-
- dmac_bus: dma-controller@ff6d0000 {
- compatible = "arm,pl330", "arm,primecell";
- reg = <0x0 0xff6d0000 0x0 0x4000>;
- interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH 0>,
- <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH 0>;
- #dma-cells = <1>;
- arm,pl330-periph-burst;
- clocks = <&cru ACLK_DMAC0_PERILP>;
- clock-names = "apb_pclk";
- };
-
- dmac_peri: dma-controller@ff6e0000 {
- compatible = "arm,pl330", "arm,primecell";
- reg = <0x0 0xff6e0000 0x0 0x4000>;
- interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH 0>,
- <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH 0>;
- #dma-cells = <1>;
- arm,pl330-periph-burst;
- clocks = <&cru ACLK_DMAC1_PERILP>;
- clock-names = "apb_pclk";
- };
- };
-
pcie0: pcie@f8000000 {
compatible = "rockchip,rk3399-pcie";
reg = <0x0 0xf8000000 0x0 0x2000000>,
<0x0 0xfd000000 0x0 0x1000000>;
reg-names = "axi-base", "apb-base";
+ device_type = "pci";
#address-cells = <3>;
#size-cells = <2>;
#interrupt-cells = <1>;
@@ -252,15 +224,14 @@
<0 0 0 2 &pcie0_intc 1>,
<0 0 0 3 &pcie0_intc 2>,
<0 0 0 4 &pcie0_intc 3>;
- linux,pci-domain = <0>;
max-link-speed = <1>;
msi-map = <0x0 &its 0x0 0x1000>;
phys = <&pcie_phy 0>, <&pcie_phy 1>,
<&pcie_phy 2>, <&pcie_phy 3>;
phy-names = "pcie-phy-0", "pcie-phy-1",
"pcie-phy-2", "pcie-phy-3";
- ranges = <0x83000000 0x0 0xfa000000 0x0 0xfa000000 0x0 0x1e00000
- 0x81000000 0x0 0xfbe00000 0x0 0xfbe00000 0x0 0x100000>;
+ ranges = <0x83000000 0x0 0xfa000000 0x0 0xfa000000 0x0 0x1e00000>,
+ <0x81000000 0x0 0xfbe00000 0x0 0xfbe00000 0x0 0x100000>;
resets = <&cru SRST_PCIE_CORE>, <&cru SRST_PCIE_MGMT>,
<&cru SRST_PCIE_MGMT_STICKY>, <&cru SRST_PCIE_PIPE>,
<&cru SRST_PCIE_PM>, <&cru SRST_P_PCIE>,
@@ -764,7 +735,7 @@
};
thermal_zones: thermal-zones {
- cpu_thermal: cpu {
+ cpu_thermal: cpu-thermal {
polling-delay-passive = <100>;
polling-delay = <1000>;
@@ -808,7 +779,7 @@
};
};
- gpu_thermal: gpu {
+ gpu_thermal: gpu-thermal {
polling-delay-passive = <100>;
polling-delay = <1000>;
@@ -858,127 +829,127 @@
};
qos_emmc: qos@ffa58000 {
- compatible = "syscon";
+ compatible = "rockchip,rk3399-qos", "syscon";
reg = <0x0 0xffa58000 0x0 0x20>;
};
qos_gmac: qos@ffa5c000 {
- compatible = "syscon";
+ compatible = "rockchip,rk3399-qos", "syscon";
reg = <0x0 0xffa5c000 0x0 0x20>;
};
qos_pcie: qos@ffa60080 {
- compatible = "syscon";
+ compatible = "rockchip,rk3399-qos", "syscon";
reg = <0x0 0xffa60080 0x0 0x20>;
};
qos_usb_host0: qos@ffa60100 {
- compatible = "syscon";
+ compatible = "rockchip,rk3399-qos", "syscon";
reg = <0x0 0xffa60100 0x0 0x20>;
};
qos_usb_host1: qos@ffa60180 {
- compatible = "syscon";
+ compatible = "rockchip,rk3399-qos", "syscon";
reg = <0x0 0xffa60180 0x0 0x20>;
};
qos_usb_otg0: qos@ffa70000 {
- compatible = "syscon";
+ compatible = "rockchip,rk3399-qos", "syscon";
reg = <0x0 0xffa70000 0x0 0x20>;
};
qos_usb_otg1: qos@ffa70080 {
- compatible = "syscon";
+ compatible = "rockchip,rk3399-qos", "syscon";
reg = <0x0 0xffa70080 0x0 0x20>;
};
qos_sd: qos@ffa74000 {
- compatible = "syscon";
+ compatible = "rockchip,rk3399-qos", "syscon";
reg = <0x0 0xffa74000 0x0 0x20>;
};
qos_sdioaudio: qos@ffa76000 {
- compatible = "syscon";
+ compatible = "rockchip,rk3399-qos", "syscon";
reg = <0x0 0xffa76000 0x0 0x20>;
};
qos_hdcp: qos@ffa90000 {
- compatible = "syscon";
+ compatible = "rockchip,rk3399-qos", "syscon";
reg = <0x0 0xffa90000 0x0 0x20>;
};
qos_iep: qos@ffa98000 {
- compatible = "syscon";
+ compatible = "rockchip,rk3399-qos", "syscon";
reg = <0x0 0xffa98000 0x0 0x20>;
};
qos_isp0_m0: qos@ffaa0000 {
- compatible = "syscon";
+ compatible = "rockchip,rk3399-qos", "syscon";
reg = <0x0 0xffaa0000 0x0 0x20>;
};
qos_isp0_m1: qos@ffaa0080 {
- compatible = "syscon";
+ compatible = "rockchip,rk3399-qos", "syscon";
reg = <0x0 0xffaa0080 0x0 0x20>;
};
qos_isp1_m0: qos@ffaa8000 {
- compatible = "syscon";
+ compatible = "rockchip,rk3399-qos", "syscon";
reg = <0x0 0xffaa8000 0x0 0x20>;
};
qos_isp1_m1: qos@ffaa8080 {
- compatible = "syscon";
+ compatible = "rockchip,rk3399-qos", "syscon";
reg = <0x0 0xffaa8080 0x0 0x20>;
};
qos_rga_r: qos@ffab0000 {
- compatible = "syscon";
+ compatible = "rockchip,rk3399-qos", "syscon";
reg = <0x0 0xffab0000 0x0 0x20>;
};
qos_rga_w: qos@ffab0080 {
- compatible = "syscon";
+ compatible = "rockchip,rk3399-qos", "syscon";
reg = <0x0 0xffab0080 0x0 0x20>;
};
qos_video_m0: qos@ffab8000 {
- compatible = "syscon";
+ compatible = "rockchip,rk3399-qos", "syscon";
reg = <0x0 0xffab8000 0x0 0x20>;
};
qos_video_m1_r: qos@ffac0000 {
- compatible = "syscon";
+ compatible = "rockchip,rk3399-qos", "syscon";
reg = <0x0 0xffac0000 0x0 0x20>;
};
qos_video_m1_w: qos@ffac0080 {
- compatible = "syscon";
+ compatible = "rockchip,rk3399-qos", "syscon";
reg = <0x0 0xffac0080 0x0 0x20>;
};
qos_vop_big_r: qos@ffac8000 {
- compatible = "syscon";
+ compatible = "rockchip,rk3399-qos", "syscon";
reg = <0x0 0xffac8000 0x0 0x20>;
};
qos_vop_big_w: qos@ffac8080 {
- compatible = "syscon";
+ compatible = "rockchip,rk3399-qos", "syscon";
reg = <0x0 0xffac8080 0x0 0x20>;
};
qos_vop_little: qos@ffad0000 {
- compatible = "syscon";
+ compatible = "rockchip,rk3399-qos", "syscon";
reg = <0x0 0xffad0000 0x0 0x20>;
};
qos_perihp: qos@ffad8080 {
- compatible = "syscon";
+ compatible = "rockchip,rk3399-qos", "syscon";
reg = <0x0 0xffad8080 0x0 0x20>;
};
qos_gpu: qos@ffae0000 {
- compatible = "syscon";
+ compatible = "rockchip,rk3399-qos", "syscon";
reg = <0x0 0xffae0000 0x0 0x20>;
};
@@ -1278,7 +1249,6 @@
compatible = "rockchip,rk3399-vdec";
reg = <0x0 0xff660000 0x0 0x400>;
interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH 0>;
- interrupt-names = "vdpu";
clocks = <&cru ACLK_VDU>, <&cru HCLK_VDU>,
<&cru SCLK_VDU_CA>, <&cru SCLK_VDU_CORE>;
clock-names = "axi", "ahb", "cabac", "core";
@@ -1351,6 +1321,28 @@
};
};
+ dmac_bus: dma-controller@ff6d0000 {
+ compatible = "arm,pl330", "arm,primecell";
+ reg = <0x0 0xff6d0000 0x0 0x4000>;
+ interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH 0>;
+ #dma-cells = <1>;
+ arm,pl330-periph-burst;
+ clocks = <&cru ACLK_DMAC0_PERILP>;
+ clock-names = "apb_pclk";
+ };
+
+ dmac_peri: dma-controller@ff6e0000 {
+ compatible = "arm,pl330", "arm,primecell";
+ reg = <0x0 0xff6e0000 0x0 0x4000>;
+ interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH 0>;
+ #dma-cells = <1>;
+ arm,pl330-periph-burst;
+ clocks = <&cru ACLK_DMAC1_PERILP>;
+ clock-names = "apb_pclk";
+ };
+
pmucru: pmu-clock-controller@ff750000 {
compatible = "rockchip,rk3399-pmucru";
reg = <0x0 0xff750000 0x0 0x1000>;
diff --git a/arch/arm64/boot/dts/synaptics/as370.dtsi b/arch/arm64/boot/dts/synaptics/as370.dtsi
index addeb0efc616..4bb5d650df9c 100644
--- a/arch/arm64/boot/dts/synaptics/as370.dtsi
+++ b/arch/arm64/boot/dts/synaptics/as370.dtsi
@@ -143,7 +143,7 @@
compatible = "snps,dw-apb-gpio-port";
gpio-controller;
#gpio-cells = <2>;
- snps,nr-gpios = <32>;
+ ngpios = <32>;
reg = <0>;
interrupt-controller;
#interrupt-cells = <2>;
@@ -161,7 +161,7 @@
compatible = "snps,dw-apb-gpio-port";
gpio-controller;
#gpio-cells = <2>;
- snps,nr-gpios = <32>;
+ ngpios = <32>;
reg = <0>;
interrupt-controller;
#interrupt-cells = <2>;
diff --git a/arch/arm64/boot/dts/synaptics/berlin4ct.dtsi b/arch/arm64/boot/dts/synaptics/berlin4ct.dtsi
index 15625b99e336..0949acee4728 100644
--- a/arch/arm64/boot/dts/synaptics/berlin4ct.dtsi
+++ b/arch/arm64/boot/dts/synaptics/berlin4ct.dtsi
@@ -140,7 +140,7 @@
compatible = "snps,dw-apb-gpio-port";
gpio-controller;
#gpio-cells = <2>;
- snps,nr-gpios = <32>;
+ ngpios = <32>;
reg = <0>;
interrupt-controller;
#interrupt-cells = <2>;
@@ -158,7 +158,7 @@
compatible = "snps,dw-apb-gpio-port";
gpio-controller;
#gpio-cells = <2>;
- snps,nr-gpios = <32>;
+ ngpios = <32>;
reg = <0>;
interrupt-controller;
#interrupt-cells = <2>;
@@ -176,7 +176,7 @@
compatible = "snps,dw-apb-gpio-port";
gpio-controller;
#gpio-cells = <2>;
- snps,nr-gpios = <32>;
+ ngpios = <32>;
reg = <0>;
interrupt-controller;
#interrupt-cells = <2>;
@@ -194,7 +194,7 @@
compatible = "snps,dw-apb-gpio-port";
gpio-controller;
#gpio-cells = <2>;
- snps,nr-gpios = <32>;
+ ngpios = <32>;
reg = <0>;
interrupt-controller;
#interrupt-cells = <2>;
@@ -269,7 +269,7 @@
compatible = "snps,dw-apb-gpio-port";
gpio-controller;
#gpio-cells = <2>;
- snps,nr-gpios = <32>;
+ ngpios = <32>;
reg = <0>;
};
};
@@ -284,7 +284,7 @@
compatible = "snps,dw-apb-gpio-port";
gpio-controller;
#gpio-cells = <2>;
- snps,nr-gpios = <32>;
+ ngpios = <32>;
reg = <0>;
};
};
diff --git a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
index 12591a854020..ceb579fb427d 100644
--- a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
@@ -256,7 +256,7 @@
#size-cells = <0>;
};
- sdhci0: sdhci@4f80000 {
+ sdhci0: mmc@4f80000 {
compatible = "ti,am654-sdhci-5.1";
reg = <0x0 0x4f80000 0x0 0x260>, <0x0 0x4f90000 0x0 0x134>;
power-domains = <&k3_pds 47 TI_SCI_PD_EXCLUSIVE>;
@@ -280,7 +280,7 @@
dma-coherent;
};
- sdhci1: sdhci@4fa0000 {
+ sdhci1: mmc@4fa0000 {
compatible = "ti,am654-sdhci-5.1";
reg = <0x0 0x4fa0000 0x0 0x260>, <0x0 0x4fb0000 0x0 0x134>;
power-domains = <&k3_pds 48 TI_SCI_PD_EXCLUSIVE>;
diff --git a/arch/arm64/boot/dts/ti/k3-am65.dtsi b/arch/arm64/boot/dts/ti/k3-am65.dtsi
index d84c0bc05023..a9fc1af03f27 100644
--- a/arch/arm64/boot/dts/ti/k3-am65.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am65.dtsi
@@ -56,7 +56,7 @@
};
pmu: pmu {
- compatible = "arm,armv8-pmuv3";
+ compatible = "arm,cortex-a53-pmu";
/* Recommendation from GIC500 TRM Table A.3 */
interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_HIGH>;
};
diff --git a/arch/arm64/boot/dts/ti/k3-j7200-common-proc-board.dts b/arch/arm64/boot/dts/ti/k3-j7200-common-proc-board.dts
index 331b388e1d1b..4a7182abccf5 100644
--- a/arch/arm64/boot/dts/ti/k3-j7200-common-proc-board.dts
+++ b/arch/arm64/boot/dts/ti/k3-j7200-common-proc-board.dts
@@ -6,8 +6,10 @@
/dts-v1/;
#include "k3-j7200-som-p0.dtsi"
+#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/net/ti-dp83867.h>
#include <dt-bindings/mux/ti-serdes.h>
+#include <dt-bindings/phy/phy.h>
/ {
chosen {
@@ -218,3 +220,39 @@
ti,adc-channels = <0 1 2 3 4 5 6 7>;
};
};
+
+&serdes_refclk {
+ clock-frequency = <100000000>;
+};
+
+&serdes0 {
+ serdes0_pcie_link: phy@0 {
+ reg = <0>;
+ cdns,num-lanes = <2>;
+ #phy-cells = <0>;
+ cdns,phy-type = <PHY_TYPE_PCIE>;
+ resets = <&serdes_wiz0 1>, <&serdes_wiz0 2>;
+ };
+
+ serdes0_qsgmii_link: phy@1 {
+ reg = <2>;
+ cdns,num-lanes = <1>;
+ #phy-cells = <0>;
+ cdns,phy-type = <PHY_TYPE_QSGMII>;
+ resets = <&serdes_wiz0 3>;
+ };
+};
+
+&pcie1_rc {
+ reset-gpios = <&exp1 2 GPIO_ACTIVE_HIGH>;
+ phys = <&serdes0_pcie_link>;
+ phy-names = "pcie-phy";
+ num-lanes = <2>;
+};
+
+&pcie1_ep {
+ phys = <&serdes0_pcie_link>;
+ phy-names = "pcie-phy";
+ num-lanes = <2>;
+ status = "disabled";
+};
diff --git a/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi b/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi
index b0094212aa82..17477ab0fd8e 100644
--- a/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi
@@ -2,9 +2,16 @@
/*
* Device Tree Source for J7200 SoC Family Main Domain peripherals
*
- * Copyright (C) 2020 Texas Instruments Incorporated - https://www.ti.com/
+ * Copyright (C) 2020-2021 Texas Instruments Incorporated - https://www.ti.com/
*/
+/ {
+ serdes_refclk: serdes-refclk {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ };
+};
+
&cbass_main {
msmc_ram: sram@70000000 {
compatible = "mmio-sram";
@@ -499,8 +506,8 @@
reg = <0x00 0x04f80000 0x00 0x260>, <0x00 0x4f88000 0x00 0x134>;
interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
power-domains = <&k3_pds 91 TI_SCI_PD_EXCLUSIVE>;
- clock-names = "clk_xin", "clk_ahb";
- clocks = <&k3_clks 91 3>, <&k3_clks 91 0>;
+ clock-names = "clk_ahb", "clk_xin";
+ clocks = <&k3_clks 91 0>, <&k3_clks 91 3>;
ti,otap-del-sel-legacy = <0x0>;
ti,otap-del-sel-mmc-hs = <0x0>;
ti,otap-del-sel-ddr52 = <0x6>;
@@ -518,8 +525,8 @@
reg = <0x00 0x04fb0000 0x00 0x260>, <0x00 0x4fb8000 0x00 0x134>;
interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
power-domains = <&k3_pds 92 TI_SCI_PD_EXCLUSIVE>;
- clock-names = "clk_xin", "clk_ahb";
- clocks = <&k3_clks 92 2>, <&k3_clks 92 1>;
+ clock-names = "clk_ahb", "clk_xin";
+ clocks = <&k3_clks 92 1>, <&k3_clks 92 2>;
ti,otap-del-sel-legacy = <0x0>;
ti,otap-del-sel-sd-hs = <0x0>;
ti,otap-del-sel-sdr12 = <0xf>;
@@ -531,6 +538,110 @@
dma-coherent;
};
+ serdes_wiz0: wiz@5060000 {
+ compatible = "ti,j721e-wiz-10g";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ power-domains = <&k3_pds 292 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 292 11>, <&k3_clks 292 85>, <&serdes_refclk>;
+ clock-names = "fck", "core_ref_clk", "ext_ref_clk";
+ num-lanes = <4>;
+ #reset-cells = <1>;
+ ranges = <0x5060000 0x0 0x5060000 0x10000>;
+
+ assigned-clocks = <&k3_clks 292 85>;
+ assigned-clock-parents = <&k3_clks 292 89>;
+
+ wiz0_pll0_refclk: pll0-refclk {
+ clocks = <&k3_clks 292 85>, <&serdes_refclk>;
+ clock-output-names = "wiz0_pll0_refclk";
+ #clock-cells = <0>;
+ assigned-clocks = <&wiz0_pll0_refclk>;
+ assigned-clock-parents = <&k3_clks 292 85>;
+ };
+
+ wiz0_pll1_refclk: pll1-refclk {
+ clocks = <&k3_clks 292 85>, <&serdes_refclk>;
+ clock-output-names = "wiz0_pll1_refclk";
+ #clock-cells = <0>;
+ assigned-clocks = <&wiz0_pll1_refclk>;
+ assigned-clock-parents = <&k3_clks 292 85>;
+ };
+
+ wiz0_refclk_dig: refclk-dig {
+ clocks = <&k3_clks 292 85>, <&serdes_refclk>;
+ clock-output-names = "wiz0_refclk_dig";
+ #clock-cells = <0>;
+ assigned-clocks = <&wiz0_refclk_dig>;
+ assigned-clock-parents = <&k3_clks 292 85>;
+ };
+
+ wiz0_cmn_refclk_dig_div: cmn-refclk-dig-div {
+ clocks = <&wiz0_refclk_dig>;
+ #clock-cells = <0>;
+ };
+
+ serdes0: serdes@5060000 {
+ compatible = "ti,j721e-serdes-10g";
+ reg = <0x05060000 0x00010000>;
+ reg-names = "torrent_phy";
+ resets = <&serdes_wiz0 0>;
+ reset-names = "torrent_reset";
+ clocks = <&wiz0_pll0_refclk>;
+ clock-names = "refclk";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+
+ pcie1_rc: pcie@2910000 {
+ compatible = "ti,j7200-pcie-host", "ti,j721e-pcie-host";
+ reg = <0x00 0x02910000 0x00 0x1000>,
+ <0x00 0x02917000 0x00 0x400>,
+ <0x00 0x0d800000 0x00 0x00800000>,
+ <0x00 0x18000000 0x00 0x00001000>;
+ reg-names = "intd_cfg", "user_cfg", "reg", "cfg";
+ interrupt-names = "link_state";
+ interrupts = <GIC_SPI 330 IRQ_TYPE_EDGE_RISING>;
+ device_type = "pci";
+ ti,syscon-pcie-ctrl = <&scm_conf 0x4074>;
+ max-link-speed = <3>;
+ num-lanes = <4>;
+ power-domains = <&k3_pds 240 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 240 6>;
+ clock-names = "fck";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ bus-range = <0x0 0xf>;
+ cdns,no-bar-match-nbits = <64>;
+ vendor-id = /bits/ 16 <0x104c>;
+ device-id = /bits/ 16 <0xb00f>;
+ msi-map = <0x0 &gic_its 0x0 0x10000>;
+ dma-coherent;
+ ranges = <0x01000000 0x0 0x18001000 0x00 0x18001000 0x0 0x0010000>,
+ <0x02000000 0x0 0x18011000 0x00 0x18011000 0x0 0x7fef000>;
+ dma-ranges = <0x02000000 0x0 0x0 0x0 0x0 0x10000 0x0>;
+ };
+
+ pcie1_ep: pcie-ep@2910000 {
+ compatible = "ti,j7200-pcie-ep", "ti,j721e-pcie-ep";
+ reg = <0x00 0x02910000 0x00 0x1000>,
+ <0x00 0x02917000 0x00 0x400>,
+ <0x00 0x0d800000 0x00 0x00800000>,
+ <0x00 0x18000000 0x00 0x08000000>;
+ reg-names = "intd_cfg", "user_cfg", "reg", "mem";
+ interrupt-names = "link_state";
+ interrupts = <GIC_SPI 330 IRQ_TYPE_EDGE_RISING>;
+ ti,syscon-pcie-ctrl = <&scm_conf 0x4074>;
+ max-link-speed = <3>;
+ num-lanes = <4>;
+ power-domains = <&k3_pds 240 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 240 6>;
+ clock-names = "fck";
+ max-functions = /bits/ 8 <6>;
+ dma-coherent;
+ };
+
usbss0: cdns-usb@4104000 {
compatible = "ti,j721e-usb";
reg = <0x00 0x4104000 0x00 0x100>;
@@ -560,4 +671,44 @@
dr_mode = "otg";
};
};
+
+ main_r5fss0: r5fss@5c00000 {
+ compatible = "ti,j7200-r5fss";
+ ti,cluster-mode = <1>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x5c00000 0x00 0x5c00000 0x20000>,
+ <0x5d00000 0x00 0x5d00000 0x20000>;
+ power-domains = <&k3_pds 243 TI_SCI_PD_EXCLUSIVE>;
+
+ main_r5fss0_core0: r5f@5c00000 {
+ compatible = "ti,j7200-r5f";
+ reg = <0x5c00000 0x00010000>,
+ <0x5c10000 0x00010000>;
+ reg-names = "atcm", "btcm";
+ ti,sci = <&dmsc>;
+ ti,sci-dev-id = <245>;
+ ti,sci-proc-ids = <0x06 0xff>;
+ resets = <&k3_reset 245 1>;
+ firmware-name = "j7200-main-r5f0_0-fw";
+ ti,atcm-enable = <1>;
+ ti,btcm-enable = <1>;
+ ti,loczrama = <1>;
+ };
+
+ main_r5fss0_core1: r5f@5d00000 {
+ compatible = "ti,j7200-r5f";
+ reg = <0x5d00000 0x00008000>,
+ <0x5d10000 0x00008000>;
+ reg-names = "atcm", "btcm";
+ ti,sci = <&dmsc>;
+ ti,sci-dev-id = <246>;
+ ti,sci-proc-ids = <0x07 0xff>;
+ resets = <&k3_reset 246 1>;
+ firmware-name = "j7200-main-r5f0_1-fw";
+ ti,atcm-enable = <1>;
+ ti,btcm-enable = <1>;
+ ti,loczrama = <1>;
+ };
+ };
};
diff --git a/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi
index bb1fe9c12e44..359e3e8a8cd0 100644
--- a/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi
@@ -2,7 +2,7 @@
/*
* Device Tree Source for J7200 SoC Family MCU/WAKEUP Domain peripherals
*
- * Copyright (C) 2020 Texas Instruments Incorporated - https://www.ti.com/
+ * Copyright (C) 2020-2021 Texas Instruments Incorporated - https://www.ti.com/
*/
&cbass_mcu_wakeup {
@@ -289,4 +289,44 @@
compatible = "ti,am3359-adc";
};
};
+
+ mcu_r5fss0: r5fss@41000000 {
+ compatible = "ti,j7200-r5fss";
+ ti,cluster-mode = <1>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x41000000 0x00 0x41000000 0x20000>,
+ <0x41400000 0x00 0x41400000 0x20000>;
+ power-domains = <&k3_pds 249 TI_SCI_PD_EXCLUSIVE>;
+
+ mcu_r5fss0_core0: r5f@41000000 {
+ compatible = "ti,j7200-r5f";
+ reg = <0x41000000 0x00010000>,
+ <0x41010000 0x00010000>;
+ reg-names = "atcm", "btcm";
+ ti,sci = <&dmsc>;
+ ti,sci-dev-id = <250>;
+ ti,sci-proc-ids = <0x01 0xff>;
+ resets = <&k3_reset 250 1>;
+ firmware-name = "j7200-mcu-r5f0_0-fw";
+ ti,atcm-enable = <1>;
+ ti,btcm-enable = <1>;
+ ti,loczrama = <1>;
+ };
+
+ mcu_r5fss0_core1: r5f@41400000 {
+ compatible = "ti,j7200-r5f";
+ reg = <0x41400000 0x00008000>,
+ <0x41410000 0x00008000>;
+ reg-names = "atcm", "btcm";
+ ti,sci = <&dmsc>;
+ ti,sci-dev-id = <251>;
+ ti,sci-proc-ids = <0x02 0xff>;
+ resets = <&k3_reset 251 1>;
+ firmware-name = "j7200-mcu-r5f0_1-fw";
+ ti,atcm-enable = <1>;
+ ti,btcm-enable = <1>;
+ ti,loczrama = <1>;
+ };
+ };
};
diff --git a/arch/arm64/boot/dts/ti/k3-j7200-som-p0.dtsi b/arch/arm64/boot/dts/ti/k3-j7200-som-p0.dtsi
index 7b5e9aa0324e..a988e2ab2ba1 100644
--- a/arch/arm64/boot/dts/ti/k3-j7200-som-p0.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j7200-som-p0.dtsi
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (C) 2020 Texas Instruments Incorporated - https://www.ti.com/
+ * Copyright (C) 2020-2021 Texas Instruments Incorporated - https://www.ti.com/
*/
/dts-v1/;
@@ -25,6 +25,60 @@
alignment = <0x1000>;
no-map;
};
+
+ mcu_r5fss0_core0_dma_memory_region: r5f-dma-memory@a0000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0xa0000000 0x00 0x100000>;
+ no-map;
+ };
+
+ mcu_r5fss0_core0_memory_region: r5f-memory@a0100000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0xa0100000 0x00 0xf00000>;
+ no-map;
+ };
+
+ mcu_r5fss0_core1_dma_memory_region: r5f-dma-memory@a1000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0xa1000000 0x00 0x100000>;
+ no-map;
+ };
+
+ mcu_r5fss0_core1_memory_region: r5f-memory@a1100000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0xa1100000 0x00 0xf00000>;
+ no-map;
+ };
+
+ main_r5fss0_core0_dma_memory_region: r5f-dma-memory@a2000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0xa2000000 0x00 0x100000>;
+ no-map;
+ };
+
+ main_r5fss0_core0_memory_region: r5f-memory@a2100000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0xa2100000 0x00 0xf00000>;
+ no-map;
+ };
+
+ main_r5fss0_core1_dma_memory_region: r5f-dma-memory@a3000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0xa3000000 0x00 0x100000>;
+ no-map;
+ };
+
+ main_r5fss0_core1_memory_region: r5f-memory@a3100000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0xa3100000 0x00 0xf00000>;
+ no-map;
+ };
+
+ rtos_ipc_memory_region: ipc-memories@a4000000 {
+ reg = <0x00 0xa4000000 0x00 0x00800000>;
+ alignment = <0x1000>;
+ no-map;
+ };
};
};
@@ -141,6 +195,30 @@
status = "disabled";
};
+&mcu_r5fss0_core0 {
+ mboxes = <&mailbox0_cluster0 &mbox_mcu_r5fss0_core0>;
+ memory-region = <&mcu_r5fss0_core0_dma_memory_region>,
+ <&mcu_r5fss0_core0_memory_region>;
+};
+
+&mcu_r5fss0_core1 {
+ mboxes = <&mailbox0_cluster0 &mbox_mcu_r5fss0_core1>;
+ memory-region = <&mcu_r5fss0_core1_dma_memory_region>,
+ <&mcu_r5fss0_core1_memory_region>;
+};
+
+&main_r5fss0_core0 {
+ mboxes = <&mailbox0_cluster1 &mbox_main_r5fss0_core0>;
+ memory-region = <&main_r5fss0_core0_dma_memory_region>,
+ <&main_r5fss0_core0_memory_region>;
+};
+
+&main_r5fss0_core1 {
+ mboxes = <&mailbox0_cluster1 &mbox_main_r5fss0_core1>;
+ memory-region = <&main_r5fss0_core1_dma_memory_region>,
+ <&main_r5fss0_core1_memory_region>;
+};
+
&main_i2c0 {
pinctrl-names = "default";
pinctrl-0 = <&main_i2c0_pins_default>;
diff --git a/arch/arm64/boot/dts/ti/k3-j7200.dtsi b/arch/arm64/boot/dts/ti/k3-j7200.dtsi
index 66169bcf7c9a..b7005b803149 100644
--- a/arch/arm64/boot/dts/ti/k3-j7200.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j7200.dtsi
@@ -114,7 +114,7 @@
};
pmu: pmu {
- compatible = "arm,armv8-pmuv3";
+ compatible = "arm,cortex-a72-pmu";
interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_HIGH>;
};
diff --git a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
index b32df591c766..8c84dafb7125 100644
--- a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
@@ -28,38 +28,6 @@
#size-cells = <1>;
ranges = <0x0 0x0 0x00100000 0x1c000>;
- pcie0_ctrl: syscon@4070 {
- compatible = "ti,j721e-system-controller", "syscon", "simple-mfd";
- reg = <0x00004070 0x4>;
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x4070 0x4070 0x4>;
- };
-
- pcie1_ctrl: syscon@4074 {
- compatible = "ti,j721e-system-controller", "syscon", "simple-mfd";
- reg = <0x00004074 0x4>;
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x4074 0x4074 0x4>;
- };
-
- pcie2_ctrl: syscon@4078 {
- compatible = "ti,j721e-system-controller", "syscon", "simple-mfd";
- reg = <0x00004078 0x4>;
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x4078 0x4078 0x4>;
- };
-
- pcie3_ctrl: syscon@407c {
- compatible = "ti,j721e-system-controller", "syscon", "simple-mfd";
- reg = <0x0000407c 0x4>;
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x407c 0x407c 0x4>;
- };
-
serdes_ln_ctrl: mux@4080 {
compatible = "mmio-mux";
reg = <0x00004080 0x50>;
@@ -618,7 +586,7 @@
interrupt-names = "link_state";
interrupts = <GIC_SPI 318 IRQ_TYPE_EDGE_RISING>;
device_type = "pci";
- ti,syscon-pcie-ctrl = <&pcie0_ctrl>;
+ ti,syscon-pcie-ctrl = <&scm_conf 0x4070>;
max-link-speed = <3>;
num-lanes = <2>;
power-domains = <&k3_pds 239 TI_SCI_PD_EXCLUSIVE>;
@@ -645,13 +613,12 @@
reg-names = "intd_cfg", "user_cfg", "reg", "mem";
interrupt-names = "link_state";
interrupts = <GIC_SPI 318 IRQ_TYPE_EDGE_RISING>;
- ti,syscon-pcie-ctrl = <&pcie0_ctrl>;
+ ti,syscon-pcie-ctrl = <&scm_conf 0x4070>;
max-link-speed = <3>;
num-lanes = <2>;
power-domains = <&k3_pds 239 TI_SCI_PD_EXCLUSIVE>;
clocks = <&k3_clks 239 1>;
clock-names = "fck";
- cdns,max-outbound-regions = <16>;
max-functions = /bits/ 8 <6>;
max-virtual-functions = /bits/ 16 <4 4 4 4 0 0>;
dma-coherent;
@@ -667,7 +634,7 @@
interrupt-names = "link_state";
interrupts = <GIC_SPI 330 IRQ_TYPE_EDGE_RISING>;
device_type = "pci";
- ti,syscon-pcie-ctrl = <&pcie1_ctrl>;
+ ti,syscon-pcie-ctrl = <&scm_conf 0x4074>;
max-link-speed = <3>;
num-lanes = <2>;
power-domains = <&k3_pds 240 TI_SCI_PD_EXCLUSIVE>;
@@ -694,13 +661,12 @@
reg-names = "intd_cfg", "user_cfg", "reg", "mem";
interrupt-names = "link_state";
interrupts = <GIC_SPI 330 IRQ_TYPE_EDGE_RISING>;
- ti,syscon-pcie-ctrl = <&pcie1_ctrl>;
+ ti,syscon-pcie-ctrl = <&scm_conf 0x4074>;
max-link-speed = <3>;
num-lanes = <2>;
power-domains = <&k3_pds 240 TI_SCI_PD_EXCLUSIVE>;
clocks = <&k3_clks 240 1>;
clock-names = "fck";
- cdns,max-outbound-regions = <16>;
max-functions = /bits/ 8 <6>;
max-virtual-functions = /bits/ 16 <4 4 4 4 0 0>;
dma-coherent;
@@ -716,7 +682,7 @@
interrupt-names = "link_state";
interrupts = <GIC_SPI 342 IRQ_TYPE_EDGE_RISING>;
device_type = "pci";
- ti,syscon-pcie-ctrl = <&pcie2_ctrl>;
+ ti,syscon-pcie-ctrl = <&scm_conf 0x4078>;
max-link-speed = <3>;
num-lanes = <2>;
power-domains = <&k3_pds 241 TI_SCI_PD_EXCLUSIVE>;
@@ -743,13 +709,12 @@
reg-names = "intd_cfg", "user_cfg", "reg", "mem";
interrupt-names = "link_state";
interrupts = <GIC_SPI 342 IRQ_TYPE_EDGE_RISING>;
- ti,syscon-pcie-ctrl = <&pcie2_ctrl>;
+ ti,syscon-pcie-ctrl = <&scm_conf 0x4078>;
max-link-speed = <3>;
num-lanes = <2>;
power-domains = <&k3_pds 241 TI_SCI_PD_EXCLUSIVE>;
clocks = <&k3_clks 241 1>;
clock-names = "fck";
- cdns,max-outbound-regions = <16>;
max-functions = /bits/ 8 <6>;
max-virtual-functions = /bits/ 16 <4 4 4 4 0 0>;
dma-coherent;
@@ -765,7 +730,7 @@
interrupt-names = "link_state";
interrupts = <GIC_SPI 354 IRQ_TYPE_EDGE_RISING>;
device_type = "pci";
- ti,syscon-pcie-ctrl = <&pcie3_ctrl>;
+ ti,syscon-pcie-ctrl = <&scm_conf 0x407c>;
max-link-speed = <3>;
num-lanes = <2>;
power-domains = <&k3_pds 242 TI_SCI_PD_EXCLUSIVE>;
@@ -792,13 +757,12 @@
reg-names = "intd_cfg", "user_cfg", "reg", "mem";
interrupt-names = "link_state";
interrupts = <GIC_SPI 354 IRQ_TYPE_EDGE_RISING>;
- ti,syscon-pcie-ctrl = <&pcie3_ctrl>;
+ ti,syscon-pcie-ctrl = <&scm_conf 0x407c>;
max-link-speed = <3>;
num-lanes = <2>;
power-domains = <&k3_pds 242 TI_SCI_PD_EXCLUSIVE>;
clocks = <&k3_clks 242 1>;
clock-names = "fck";
- cdns,max-outbound-regions = <16>;
max-functions = /bits/ 8 <6>;
max-virtual-functions = /bits/ 16 <4 4 4 4 0 0>;
dma-coherent;
@@ -1068,13 +1032,13 @@
clock-names = "gpio";
};
- main_sdhci0: sdhci@4f80000 {
+ main_sdhci0: mmc@4f80000 {
compatible = "ti,j721e-sdhci-8bit";
reg = <0x0 0x4f80000 0x0 0x1000>, <0x0 0x4f88000 0x0 0x400>;
interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
power-domains = <&k3_pds 91 TI_SCI_PD_EXCLUSIVE>;
- clock-names = "clk_xin", "clk_ahb";
- clocks = <&k3_clks 91 1>, <&k3_clks 91 0>;
+ clock-names = "clk_ahb", "clk_xin";
+ clocks = <&k3_clks 91 0>, <&k3_clks 91 1>;
assigned-clocks = <&k3_clks 91 1>;
assigned-clock-parents = <&k3_clks 91 2>;
bus-width = <8>;
@@ -1090,13 +1054,13 @@
dma-coherent;
};
- main_sdhci1: sdhci@4fb0000 {
+ main_sdhci1: mmc@4fb0000 {
compatible = "ti,j721e-sdhci-4bit";
reg = <0x0 0x04fb0000 0x0 0x1000>, <0x0 0x4fb8000 0x0 0x400>;
interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
power-domains = <&k3_pds 92 TI_SCI_PD_EXCLUSIVE>;
- clock-names = "clk_xin", "clk_ahb";
- clocks = <&k3_clks 92 0>, <&k3_clks 92 5>;
+ clock-names = "clk_ahb", "clk_xin";
+ clocks = <&k3_clks 92 5>, <&k3_clks 92 0>;
assigned-clocks = <&k3_clks 92 0>;
assigned-clock-parents = <&k3_clks 92 1>;
ti,otap-del-sel-legacy = <0x0>;
@@ -1110,13 +1074,13 @@
dma-coherent;
};
- main_sdhci2: sdhci@4f98000 {
+ main_sdhci2: mmc@4f98000 {
compatible = "ti,j721e-sdhci-4bit";
reg = <0x0 0x4f98000 0x0 0x1000>, <0x0 0x4f90000 0x0 0x400>;
interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
power-domains = <&k3_pds 93 TI_SCI_PD_EXCLUSIVE>;
- clock-names = "clk_xin", "clk_ahb";
- clocks = <&k3_clks 93 0>, <&k3_clks 93 5>;
+ clock-names = "clk_ahb", "clk_xin";
+ clocks = <&k3_clks 93 5>, <&k3_clks 93 0>;
assigned-clocks = <&k3_clks 93 0>;
assigned-clock-parents = <&k3_clks 93 1>;
ti,otap-del-sel-legacy = <0x0>;
diff --git a/arch/arm64/boot/dts/ti/k3-j721e.dtsi b/arch/arm64/boot/dts/ti/k3-j721e.dtsi
index cc483f7344af..f0587fde147e 100644
--- a/arch/arm64/boot/dts/ti/k3-j721e.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j721e.dtsi
@@ -115,7 +115,7 @@
};
pmu: pmu {
- compatible = "arm,armv8-pmuv3";
+ compatible = "arm,cortex-a72-pmu";
/* Recommendation from GIC500 TRM Table A.3 */
interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_HIGH>;
};
diff --git a/arch/arm64/boot/dts/toshiba/tmpv7708-rm-mbrc.dts b/arch/arm64/boot/dts/toshiba/tmpv7708-rm-mbrc.dts
index ed0bf7f13f54..bf0620afe117 100644
--- a/arch/arm64/boot/dts/toshiba/tmpv7708-rm-mbrc.dts
+++ b/arch/arm64/boot/dts/toshiba/tmpv7708-rm-mbrc.dts
@@ -41,3 +41,30 @@
clocks = <&uart_clk>;
clock-names = "apb_pclk";
};
+
+&piether {
+ status = "okay";
+ phy-handle = <&phy0>;
+ phy-mode = "rgmii-id";
+ clocks = <&clk300mhz>, <&clk125mhz>;
+ clock-names = "stmmaceth", "phy_ref_clk";
+
+ mdio0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,dwmac-mdio";
+ phy0: ethernet-phy@1 {
+ device_type = "ethernet-phy";
+ reg = <0x1>;
+ };
+ };
+};
+
+&wdt {
+ status = "okay";
+ clocks = <&wdt_clk>;
+};
+
+&gpio {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/toshiba/tmpv7708.dtsi b/arch/arm64/boot/dts/toshiba/tmpv7708.dtsi
index 242f25f4e12a..17934fd9a14c 100644
--- a/arch/arm64/boot/dts/toshiba/tmpv7708.dtsi
+++ b/arch/arm64/boot/dts/toshiba/tmpv7708.dtsi
@@ -134,6 +134,26 @@
#clock-cells = <0>;
};
+ clk125mhz: clk125mhz {
+ compatible = "fixed-clock";
+ clock-frequency = <125000000>;
+ #clock-cells = <0>;
+ clock-output-names = "clk125mhz";
+ };
+
+ clk300mhz: clk300mhz {
+ compatible = "fixed-clock";
+ clock-frequency = <300000000>;
+ #clock-cells = <0>;
+ clock-output-names = "clk300mhz";
+ };
+
+ wdt_clk: wdt-clk {
+ compatible = "fixed-clock";
+ clock-frequency = <150000000>;
+ #clock-cells = <0>;
+ };
+
soc {
#address-cells = <2>;
#size-cells = <2>;
@@ -157,6 +177,17 @@
reg = <0 0x24190000 0 0x10000>;
};
+ gpio: gpio@28020000 {
+ compatible = "toshiba,gpio-tmpv7708";
+ reg = <0 0x28020000 0 0x1000>;
+ #gpio-cells = <0x2>;
+ gpio-ranges = <&pmux 0 0 32>;
+ gpio-controller;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupt-parent = <&gic>;
+ };
+
uart0: serial@28200000 {
compatible = "arm,pl011", "arm,primecell";
reg = <0 0x28200000 0 0x1000>;
@@ -384,6 +415,23 @@
#size-cells = <0>;
status = "disabled";
};
+
+ piether: ethernet@28000000 {
+ compatible = "toshiba,visconti-dwmac", "snps,dwmac-4.20a";
+ reg = <0 0x28000000 0 0x10000>;
+ interrupts = <GIC_SPI 156 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq";
+ snps,txpbl = <4>;
+ snps,rxpbl = <4>;
+ snps,tso;
+ status = "disabled";
+ };
+
+ wdt: wdt@28330000 {
+ compatible = "toshiba,visconti-wdt";
+ reg = <0 0x28330000 0 0x1000>;
+ status = "disabled";
+ };
};
};
diff --git a/arch/arm64/boot/dts/xilinx/Makefile b/arch/arm64/boot/dts/xilinx/Makefile
index 60f5443f3ef4..11fb4fd3ebd4 100644
--- a/arch/arm64/boot/dts/xilinx/Makefile
+++ b/arch/arm64/boot/dts/xilinx/Makefile
@@ -13,5 +13,6 @@ dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-zcu102-revA.dtb
dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-zcu102-revB.dtb
dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-zcu102-rev1.0.dtb
dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-zcu104-revA.dtb
+dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-zcu104-revC.dtb
dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-zcu106-revA.dtb
dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-zcu111-revA.dtb
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-clk-ccf.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp-clk-ccf.dtsi
index c94c3bb67edc..cf5295224750 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-clk-ccf.dtsi
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-clk-ccf.dtsi
@@ -116,6 +116,10 @@
clocks = <&zynqmp_clk ADMA_REF>, <&zynqmp_clk LPD_LSBUS>;
};
+&nand0 {
+ clocks = <&zynqmp_clk NAND_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
&gem0 {
clocks = <&zynqmp_clk LPD_LSBUS>, <&zynqmp_clk GEM0_REF>,
<&zynqmp_clk GEM0_TX>, <&zynqmp_clk GEM0_RX>,
@@ -160,6 +164,10 @@
clocks = <&zynqmp_clk PCIE_REF>;
};
+&qspi {
+ clocks = <&zynqmp_clk QSPI_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
&sata {
clocks = <&zynqmp_clk SATA_REF>;
};
@@ -215,3 +223,17 @@
&watchdog0 {
clocks = <&zynqmp_clk WDT>;
};
+
+&lpd_watchdog {
+ clocks = <&zynqmp_clk LPD_WDT>;
+};
+
+&zynqmp_dpdma {
+ clocks = <&zynqmp_clk DPDMA_REF>;
+};
+
+&zynqmp_dpsub {
+ clocks = <&zynqmp_clk TOPSW_LSBUS>,
+ <&zynqmp_clk DP_AUDIO_REF>,
+ <&zynqmp_clk DP_VIDEO_REF>;
+};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts
index 68ecd0f7b2f2..a53598c3624b 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts
@@ -15,6 +15,7 @@
#include <dt-bindings/input/input.h>
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/phy/phy.h>
/ {
model = "ZynqMP ZCU100 RevC";
@@ -108,6 +109,18 @@
compatible = "iio-hwmon";
io-channels = <&u35 0>, <&u35 1>, <&u35 2>, <&u35 3>;
};
+
+ si5335a_0: clk26 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <26000000>;
+ };
+
+ si5335a_1: clk27 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <27000000>;
+ };
};
&dcc {
@@ -224,6 +237,13 @@
};
};
+&psgtr {
+ status = "okay";
+ /* usb3, dps */
+ clocks = <&si5335a_0>, <&si5335a_1>;
+ clock-names = "ref0", "ref1";
+};
+
&rtc {
status = "okay";
};
@@ -233,11 +253,13 @@
status = "okay";
no-1-8-v;
disable-wp;
+ xlnx,mio-bank = <0>;
};
&sdhci1 {
status = "okay";
bus-width = <0x4>;
+ xlnx,mio-bank = <0>;
non-removable;
disable-wp;
cap-power-off-card;
@@ -293,3 +315,14 @@
&watchdog0 {
status = "okay";
};
+
+&zynqmp_dpdma {
+ status = "okay";
+};
+
+&zynqmp_dpsub {
+ status = "okay";
+ phy-names = "dp-phy0", "dp-phy1";
+ phys = <&psgtr 1 PHY_TYPE_DP 0 1>,
+ <&psgtr 0 PHY_TYPE_DP 1 1>;
+};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revA.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revA.dts
index f1255f635dfd..12e8bd48dc8c 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revA.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revA.dts
@@ -13,6 +13,7 @@
#include "zynqmp-clk-ccf.dtsi"
#include <dt-bindings/input/input.h>
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/phy/phy.h>
/ {
model = "ZynqMP ZCU102 RevA";
@@ -132,6 +133,19 @@
compatible = "iio-hwmon";
io-channels = <&u75 0>, <&u75 1>, <&u75 2>, <&u75 3>;
};
+
+ /* 48MHz reference crystal */
+ ref48: ref48M {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <48000000>;
+ };
+
+ refhdmi: refhdmi {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <114285000>;
+ };
};
&can1 {
@@ -483,9 +497,56 @@
#size-cells = <0>;
reg = <1>;
si5341: clock-generator@36 { /* SI5341 - u69 */
+ compatible = "silabs,si5341";
reg = <0x36>;
+ #clock-cells = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&ref48>;
+ clock-names = "xtal";
+ clock-output-names = "si5341";
+
+ si5341_0: out@0 {
+ /* refclk0 for PS-GT, used for DP */
+ reg = <0>;
+ always-on;
+ };
+ si5341_2: out@2 {
+ /* refclk2 for PS-GT, used for USB3 */
+ reg = <2>;
+ always-on;
+ };
+ si5341_3: out@3 {
+ /* refclk3 for PS-GT, used for SATA */
+ reg = <3>;
+ always-on;
+ };
+ si5341_4: out@4 {
+ /* refclk4 for PS-GT, used for PCIE slot */
+ reg = <4>;
+ always-on;
+ };
+ si5341_5: out@5 {
+ /* refclk5 for PS-GT, used for PCIE */
+ reg = <5>;
+ always-on;
+ };
+ si5341_6: out@6 {
+ /* refclk6 PL CLK125 */
+ reg = <6>;
+ always-on;
+ };
+ si5341_7: out@7 {
+ /* refclk7 PL CLK74 */
+ reg = <7>;
+ always-on;
+ };
+ si5341_9: out@9 {
+ /* refclk9 used for PS_REF_CLK 33.3 MHz */
+ reg = <9>;
+ always-on;
+ };
};
-
};
i2c@2 {
#address-cells = <1>;
@@ -526,6 +587,17 @@
* interrupt-parent = <&>;
* interrupts = <>;
*/
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #clock-cells = <1>;
+ clocks = <&refhdmi>;
+ clock-names = "xtal";
+ clock-output-names = "si5328";
+
+ si5328_clk: clk0@0 {
+ reg = <0>;
+ clock-frequency = <27000000>;
+ };
};
};
/* 5 - 7 unconnected */
@@ -592,6 +664,13 @@
status = "okay";
};
+&psgtr {
+ status = "okay";
+ /* pcie, sata, usb3, dp */
+ clocks = <&si5341 0 5>, <&si5341 0 3>, <&si5341 0 2>, <&si5341 0 0>;
+ clock-names = "ref0", "ref1", "ref2", "ref3";
+};
+
&rtc {
status = "okay";
};
@@ -607,12 +686,15 @@
ceva,p1-comwake-params = /bits/ 8 <0x06 0x14 0x08 0x0E>;
ceva,p1-burst-params = /bits/ 8 <0x13 0x08 0x4A 0x06>;
ceva,p1-retry-params = /bits/ 16 <0x96A4 0x3FFC>;
+ phy-names = "sata-phy";
+ phys = <&psgtr 3 PHY_TYPE_SATA 1 1>;
};
/* SD1 with level shifter */
&sdhci1 {
status = "okay";
no-1-8-v;
+ xlnx,mio-bank = <1>;
};
&uart0 {
@@ -632,3 +714,13 @@
&watchdog0 {
status = "okay";
};
+
+&zynqmp_dpdma {
+ status = "okay";
+};
+
+&zynqmp_dpsub {
+ status = "okay";
+ phy-names = "dp-phy0";
+ phys = <&psgtr 1 PHY_TYPE_DP 0 3>;
+};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu104-revA.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu104-revA.dts
index 7a4614e3f5fa..5637e1c17fdf 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zcu104-revA.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu104-revA.dts
@@ -12,6 +12,7 @@
#include "zynqmp.dtsi"
#include "zynqmp-clk-ccf.dtsi"
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/phy/phy.h>
/ {
model = "ZynqMP ZCU104 RevA";
@@ -36,6 +37,24 @@
device_type = "memory";
reg = <0x0 0x0 0x0 0x80000000>;
};
+
+ clock_8t49n287_5: clk125 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <125000000>;
+ };
+
+ clock_8t49n287_2: clk26 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <26000000>;
+ };
+
+ clock_8t49n287_3: clk27 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <27000000>;
+ };
};
&can1 {
@@ -158,6 +177,13 @@
status = "okay";
};
+&psgtr {
+ status = "okay";
+ /* nc, sata, usb3, dp */
+ clocks = <&clock_8t49n287_5>, <&clock_8t49n287_2>, <&clock_8t49n287_3>;
+ clock-names = "ref1", "ref2", "ref3";
+};
+
&sata {
status = "okay";
/* SATA OOB timing settings */
@@ -169,12 +195,15 @@
ceva,p1-comwake-params = /bits/ 8 <0x06 0x14 0x08 0x0E>;
ceva,p1-burst-params = /bits/ 8 <0x13 0x08 0x4A 0x06>;
ceva,p1-retry-params = /bits/ 16 <0x96A4 0x3FFC>;
+ phy-names = "sata-phy";
+ phys = <&psgtr 3 PHY_TYPE_SATA 1 1>;
};
/* SD1 with level shifter */
&sdhci1 {
status = "okay";
no-1-8-v;
+ xlnx,mio-bank = <1>;
disable-wp;
};
@@ -195,3 +224,14 @@
&watchdog0 {
status = "okay";
};
+
+&zynqmp_dpdma {
+ status = "okay";
+};
+
+&zynqmp_dpsub {
+ status = "okay";
+ phy-names = "dp-phy0", "dp-phy1";
+ phys = <&psgtr 1 PHY_TYPE_DP 0 3>,
+ <&psgtr 0 PHY_TYPE_DP 1 3>;
+};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu104-revC.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu104-revC.dts
new file mode 100644
index 000000000000..7f2e32831b05
--- /dev/null
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu104-revC.dts
@@ -0,0 +1,293 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * dts file for Xilinx ZynqMP ZCU104
+ *
+ * (C) Copyright 2017 - 2020, Xilinx, Inc.
+ *
+ * Michal Simek <michal.simek@xilinx.com>
+ */
+
+/dts-v1/;
+
+#include "zynqmp.dtsi"
+#include "zynqmp-clk-ccf.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/phy/phy.h>
+
+/ {
+ model = "ZynqMP ZCU104 RevC";
+ compatible = "xlnx,zynqmp-zcu104-revC", "xlnx,zynqmp-zcu104", "xlnx,zynqmp";
+
+ aliases {
+ ethernet0 = &gem3;
+ i2c0 = &i2c1;
+ mmc0 = &sdhci1;
+ rtc0 = &rtc;
+ serial0 = &uart0;
+ serial1 = &uart1;
+ serial2 = &dcc;
+ };
+
+ chosen {
+ bootargs = "earlycon";
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x0 0x0 0x80000000>;
+ };
+
+ ina226 {
+ compatible = "iio-hwmon";
+ io-channels = <&u183 0>, <&u183 1>, <&u183 2>, <&u183 3>;
+ };
+
+ clock_8t49n287_5: clk125 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <125000000>;
+ };
+
+ clock_8t49n287_2: clk26 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <26000000>;
+ };
+
+ clock_8t49n287_3: clk27 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <27000000>;
+ };
+};
+
+&can1 {
+ status = "okay";
+};
+
+&dcc {
+ status = "okay";
+};
+
+&fpd_dma_chan1 {
+ status = "okay";
+};
+
+&fpd_dma_chan2 {
+ status = "okay";
+};
+
+&fpd_dma_chan3 {
+ status = "okay";
+};
+
+&fpd_dma_chan4 {
+ status = "okay";
+};
+
+&fpd_dma_chan5 {
+ status = "okay";
+};
+
+&fpd_dma_chan6 {
+ status = "okay";
+};
+
+&fpd_dma_chan7 {
+ status = "okay";
+};
+
+&fpd_dma_chan8 {
+ status = "okay";
+};
+
+&gem3 {
+ status = "okay";
+ phy-handle = <&phy0>;
+ phy-mode = "rgmii-id";
+ phy0: ethernet-phy@c {
+ reg = <0xc>;
+ ti,rx-internal-delay = <0x8>;
+ ti,tx-internal-delay = <0xa>;
+ ti,fifo-depth = <0x1>;
+ ti,dp83867-rxctrl-strap-quirk;
+ };
+};
+
+&gpio {
+ status = "okay";
+};
+
+&i2c1 {
+ status = "okay";
+ clock-frequency = <400000>;
+
+ tca6416_u97: gpio@20 {
+ compatible = "ti,tca6416";
+ reg = <0x20>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ /*
+ * IRQ not connected
+ * Lines:
+ * 0 - IRPS5401_ALERT_B
+ * 1 - HDMI_8T49N241_INT_ALM
+ * 2 - MAX6643_OT_B
+ * 3 - MAX6643_FANFAIL_B
+ * 5 - IIC_MUX_RESET_B
+ * 6 - GEM3_EXP_RESET_B
+ * 7 - FMC_LPC_PRSNT_M2C_B
+ * 4, 10 - 17 - not connected
+ */
+ };
+
+ /* Another connection to this bus via PL i2c via PCA9306 - u45 */
+ i2c-mux@74 { /* u34 */
+ compatible = "nxp,pca9548";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x74>;
+ i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ /*
+ * IIC_EEPROM 1kB memory which uses 256B blocks
+ * where every block has different address.
+ * 0 - 256B address 0x54
+ * 256B - 512B address 0x55
+ * 512B - 768B address 0x56
+ * 768B - 1024B address 0x57
+ */
+ eeprom: eeprom@54 { /* u23 */
+ compatible = "atmel,24c08";
+ reg = <0x54>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
+ };
+
+ i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ clock_8t49n287: clock-generator@6c { /* 8T49N287 - u182 */
+ reg = <0x6c>;
+ };
+ };
+
+ i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+ irps5401_43: irps5401@43 { /* IRPS5401 - u175 */
+ compatible = "infineon,irps5401";
+ reg = <0x43>; /* pmbus / i2c 0x13 */
+ };
+ irps5401_44: irps5401@44 { /* IRPS5401 - u180 */
+ compatible = "infineon,irps5401";
+ reg = <0x44>; /* pmbus / i2c 0x14 */
+ };
+ };
+
+ i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+ u183: ina226@40 { /* u183 */
+ compatible = "ti,ina226";
+ #io-channel-cells = <1>;
+ reg = <0x40>;
+ shunt-resistor = <5000>;
+ };
+ };
+
+ i2c@5 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <5>;
+ };
+
+ i2c@7 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <7>;
+ };
+
+ /* 4, 6 not connected */
+ };
+};
+
+&qspi {
+ status = "okay";
+ flash@0 {
+ compatible = "m25p80", "jedec,spi-nor"; /* n25q512a 128MiB */
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x0>;
+ };
+};
+
+&rtc {
+ status = "okay";
+};
+
+&psgtr {
+ status = "okay";
+ /* nc, sata, usb3, dp */
+ clocks = <&clock_8t49n287_5>, <&clock_8t49n287_2>, <&clock_8t49n287_3>;
+ clock-names = "ref1", "ref2", "ref3";
+};
+
+&sata {
+ status = "okay";
+ /* SATA OOB timing settings */
+ ceva,p0-cominit-params = /bits/ 8 <0x18 0x40 0x18 0x28>;
+ ceva,p0-comwake-params = /bits/ 8 <0x06 0x14 0x08 0x0E>;
+ ceva,p0-burst-params = /bits/ 8 <0x13 0x08 0x4A 0x06>;
+ ceva,p0-retry-params = /bits/ 16 <0x96A4 0x3FFC>;
+ ceva,p1-cominit-params = /bits/ 8 <0x18 0x40 0x18 0x28>;
+ ceva,p1-comwake-params = /bits/ 8 <0x06 0x14 0x08 0x0E>;
+ ceva,p1-burst-params = /bits/ 8 <0x13 0x08 0x4A 0x06>;
+ ceva,p1-retry-params = /bits/ 16 <0x96A4 0x3FFC>;
+ phy-names = "sata-phy";
+ phys = <&psgtr 3 PHY_TYPE_SATA 1 1>;
+};
+
+/* SD1 with level shifter */
+&sdhci1 {
+ status = "okay";
+ no-1-8-v;
+ xlnx,mio-bank = <1>;
+ disable-wp;
+};
+
+&uart0 {
+ status = "okay";
+};
+
+&uart1 {
+ status = "okay";
+};
+
+/* ULPI SMSC USB3320 */
+&usb0 {
+ status = "okay";
+ dr_mode = "host";
+};
+
+&watchdog0 {
+ status = "okay";
+};
+
+&zynqmp_dpdma {
+ status = "okay";
+};
+
+&zynqmp_dpsub {
+ status = "okay";
+ phy-names = "dp-phy0", "dp-phy1";
+ phys = <&psgtr 1 PHY_TYPE_DP 0 3>,
+ <&psgtr 0 PHY_TYPE_DP 1 3>;
+};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu106-revA.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu106-revA.dts
index 6e9efe233838..18771e868399 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zcu106-revA.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu106-revA.dts
@@ -13,6 +13,7 @@
#include "zynqmp-clk-ccf.dtsi"
#include <dt-bindings/input/input.h>
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/phy/phy.h>
/ {
model = "ZynqMP ZCU106 RevA";
@@ -132,6 +133,19 @@
compatible = "iio-hwmon";
io-channels = <&u75 0>, <&u75 1>, <&u75 2>, <&u75 3>;
};
+
+ /* 48MHz reference crystal */
+ ref48: ref48M {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <48000000>;
+ };
+
+ refhdmi: refhdmi {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <114285000>;
+ };
};
&can1 {
@@ -142,6 +156,17 @@
status = "okay";
};
+&zynqmp_dpdma {
+ status = "okay";
+};
+
+&zynqmp_dpsub {
+ status = "okay";
+ phy-names = "dp-phy0", "dp-phy1";
+ phys = <&psgtr 1 PHY_TYPE_DP 0 3>,
+ <&psgtr 0 PHY_TYPE_DP 1 3>;
+};
+
/* fpd_dma clk 667MHz, lpd_dma 500MHz */
&fpd_dma_chan1 {
status = "okay";
@@ -482,7 +507,45 @@
#size-cells = <0>;
reg = <1>;
si5341: clock-generator@36 { /* SI5341 - u69 */
+ compatible = "silabs,si5341";
reg = <0x36>;
+ #clock-cells = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&ref48>;
+ clock-names = "xtal";
+ clock-output-names = "si5341";
+
+ si5341_0: out@0 {
+ /* refclk0 for PS-GT, used for DP */
+ reg = <0>;
+ always-on;
+ };
+ si5341_2: out@2 {
+ /* refclk2 for PS-GT, used for USB3 */
+ reg = <2>;
+ always-on;
+ };
+ si5341_3: out@3 {
+ /* refclk3 for PS-GT, used for SATA */
+ reg = <3>;
+ always-on;
+ };
+ si5341_6: out@6 {
+ /* refclk6 PL CLK125 */
+ reg = <6>;
+ always-on;
+ };
+ si5341_7: out@7 {
+ /* refclk7 PL CLK74 */
+ reg = <7>;
+ always-on;
+ };
+ si5341_9: out@9 {
+ /* refclk9 used for PS_REF_CLK 33.3 MHz */
+ reg = <9>;
+ always-on;
+ };
};
};
@@ -520,6 +583,22 @@
reg = <4>;
si5328: clock-generator@69 {/* SI5328 - u20 */
reg = <0x69>;
+ /*
+ * Chip has interrupt present connected to PL
+ * interrupt-parent = <&>;
+ * interrupts = <>;
+ */
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #clock-cells = <1>;
+ clocks = <&refhdmi>;
+ clock-names = "xtal";
+ clock-output-names = "si5328";
+
+ si5328_clk: clk0@0 {
+ reg = <0>;
+ clock-frequency = <27000000>;
+ };
};
};
i2c@5 {
@@ -591,6 +670,13 @@
};
};
+&psgtr {
+ status = "okay";
+ /* nc, sata, usb3, dp */
+ clocks = <&si5341 0 3>, <&si5341 0 2>, <&si5341 0 0>;
+ clock-names = "ref1", "ref2", "ref3";
+};
+
&rtc {
status = "okay";
};
@@ -606,12 +692,15 @@
ceva,p1-comwake-params = /bits/ 8 <0x06 0x14 0x08 0x0E>;
ceva,p1-burst-params = /bits/ 8 <0x13 0x08 0x4A 0x06>;
ceva,p1-retry-params = /bits/ 16 <0x96A4 0x3FFC>;
+ phy-names = "sata-phy";
+ phys = <&psgtr 3 PHY_TYPE_SATA 1 1>;
};
/* SD1 with level shifter */
&sdhci1 {
status = "okay";
no-1-8-v;
+ xlnx,mio-bank = <1>;
};
&uart0 {
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu111-revA.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu111-revA.dts
index 2e92634c77f9..d4b68f0d0098 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zcu111-revA.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu111-revA.dts
@@ -13,6 +13,7 @@
#include "zynqmp-clk-ccf.dtsi"
#include <dt-bindings/input/input.h>
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/phy/phy.h>
/ {
model = "ZynqMP ZCU111 RevA";
@@ -116,6 +117,13 @@
compatible = "iio-hwmon";
io-channels = <&u79 0>, <&u79 1>, <&u79 2>, <&u79 3>;
};
+
+ /* 48MHz reference crystal */
+ ref48: ref48M {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <48000000>;
+ };
};
&dcc {
@@ -374,9 +382,46 @@
#size-cells = <0>;
reg = <1>;
si5341: clock-generator@36 { /* SI5341 - u46 */
+ compatible = "silabs,si5341";
reg = <0x36>;
+ #clock-cells = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&ref48>;
+ clock-names = "xtal";
+ clock-output-names = "si5341";
+
+ si5341_0: out@0 {
+ /* refclk0 for PS-GT, used for DP */
+ reg = <0>;
+ always-on;
+ };
+ si5341_2: out@2 {
+ /* refclk2 for PS-GT, used for USB3 */
+ reg = <2>;
+ always-on;
+ };
+ si5341_3: out@3 {
+ /* refclk3 for PS-GT, used for SATA */
+ reg = <3>;
+ always-on;
+ };
+ si5341_5: out@5 {
+ /* refclk5 PL CLK100 */
+ reg = <5>;
+ always-on;
+ };
+ si5341_6: out@6 {
+ /* refclk6 PL CLK125 */
+ reg = <6>;
+ always-on;
+ };
+ si5341_9: out@9 {
+ /* refclk9 used for PS_REF_CLK 33.3 MHz */
+ reg = <9>;
+ always-on;
+ };
};
-
};
i2c@2 {
#address-cells = <1>;
@@ -410,7 +455,7 @@
#address-cells = <1>;
#size-cells = <0>;
reg = <4>;
- si5328: clock-generator@69 { /* SI5328 - u48 */
+ si5382: clock-generator@69 { /* SI5382 - u48 */
reg = <0x69>;
};
};
@@ -497,6 +542,13 @@
};
};
+&psgtr {
+ status = "okay";
+ /* nc, sata, usb3, dp */
+ clocks = <&si5341 0 3>, <&si5341 0 2>, <&si5341 0 0>;
+ clock-names = "ref1", "ref2", "ref3";
+};
+
&rtc {
status = "okay";
};
@@ -512,12 +564,15 @@
ceva,p1-comwake-params = /bits/ 8 <0x06 0x14 0x08 0x0E>;
ceva,p1-burst-params = /bits/ 8 <0x13 0x08 0x4A 0x06>;
ceva,p1-retry-params = /bits/ 16 <0x96A4 0x3FFC>;
+ phy-names = "sata-phy";
+ phys = <&psgtr 3 PHY_TYPE_SATA 1 1>;
};
/* SD1 with level shifter */
&sdhci1 {
status = "okay";
no-1-8-v;
+ xlnx,mio-bank = <1>;
};
&uart0 {
@@ -529,3 +584,14 @@
status = "okay";
dr_mode = "host";
};
+
+&zynqmp_dpdma {
+ status = "okay";
+};
+
+&zynqmp_dpsub {
+ status = "okay";
+ phy-names = "dp-phy0", "dp-phy1";
+ phys = <&psgtr 1 PHY_TYPE_DP 0 1>,
+ <&psgtr 0 PHY_TYPE_DP 1 1>;
+};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
index 68923fbd0e89..a3b391d18787 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
+++ b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
@@ -12,6 +12,7 @@
* the License, or (at your option) any later version.
*/
+#include <dt-bindings/dma/xlnx-zynqmp-dpdma.h>
#include <dt-bindings/power/xlnx-zynqmp-power.h>
#include <dt-bindings/reset/xlnx-zynqmp-resets.h>
@@ -99,7 +100,7 @@
};
};
- zynqmp_ipi {
+ zynqmp_ipi: zynqmp_ipi {
compatible = "xlnx,zynqmp-ipi-mailbox";
interrupt-parent = <&gic>;
interrupts = <0 35 4>;
@@ -187,6 +188,11 @@
xlnx_aes: zynqmp-aes {
compatible = "xlnx,zynqmp-aes";
};
+
+ zynqmp_reset: reset-controller {
+ compatible = "xlnx,zynqmp-reset";
+ #reset-cells = <1>;
+ };
};
};
@@ -265,6 +271,8 @@
interrupts = <0 124 4>;
clock-names = "clk_main", "clk_apb";
xlnx,bus-width = <128>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x14e8>;
power-domains = <&zynqmp_firmware PD_GDMA>;
};
@@ -276,6 +284,8 @@
interrupts = <0 125 4>;
clock-names = "clk_main", "clk_apb";
xlnx,bus-width = <128>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x14e9>;
power-domains = <&zynqmp_firmware PD_GDMA>;
};
@@ -287,6 +297,8 @@
interrupts = <0 126 4>;
clock-names = "clk_main", "clk_apb";
xlnx,bus-width = <128>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x14ea>;
power-domains = <&zynqmp_firmware PD_GDMA>;
};
@@ -298,6 +310,8 @@
interrupts = <0 127 4>;
clock-names = "clk_main", "clk_apb";
xlnx,bus-width = <128>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x14eb>;
power-domains = <&zynqmp_firmware PD_GDMA>;
};
@@ -309,6 +323,8 @@
interrupts = <0 128 4>;
clock-names = "clk_main", "clk_apb";
xlnx,bus-width = <128>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x14ec>;
power-domains = <&zynqmp_firmware PD_GDMA>;
};
@@ -320,6 +336,8 @@
interrupts = <0 129 4>;
clock-names = "clk_main", "clk_apb";
xlnx,bus-width = <128>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x14ed>;
power-domains = <&zynqmp_firmware PD_GDMA>;
};
@@ -331,6 +349,8 @@
interrupts = <0 130 4>;
clock-names = "clk_main", "clk_apb";
xlnx,bus-width = <128>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x14ee>;
power-domains = <&zynqmp_firmware PD_GDMA>;
};
@@ -342,11 +362,14 @@
interrupts = <0 131 4>;
clock-names = "clk_main", "clk_apb";
xlnx,bus-width = <128>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x14ef>;
power-domains = <&zynqmp_firmware PD_GDMA>;
};
gic: interrupt-controller@f9010000 {
compatible = "arm,gic-400";
+ #address-cells = <0>;
#interrupt-cells = <3>;
reg = <0x0 0xf9010000 0x0 0x10000>,
<0x0 0xf9020000 0x0 0x20000>,
@@ -369,6 +392,8 @@
interrupts = <0 77 4>;
clock-names = "clk_main", "clk_apb";
xlnx,bus-width = <64>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x868>;
power-domains = <&zynqmp_firmware PD_ADMA>;
};
@@ -380,6 +405,8 @@
interrupts = <0 78 4>;
clock-names = "clk_main", "clk_apb";
xlnx,bus-width = <64>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x869>;
power-domains = <&zynqmp_firmware PD_ADMA>;
};
@@ -391,6 +418,8 @@
interrupts = <0 79 4>;
clock-names = "clk_main", "clk_apb";
xlnx,bus-width = <64>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x86a>;
power-domains = <&zynqmp_firmware PD_ADMA>;
};
@@ -402,6 +431,8 @@
interrupts = <0 80 4>;
clock-names = "clk_main", "clk_apb";
xlnx,bus-width = <64>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x86b>;
power-domains = <&zynqmp_firmware PD_ADMA>;
};
@@ -413,6 +444,8 @@
interrupts = <0 81 4>;
clock-names = "clk_main", "clk_apb";
xlnx,bus-width = <64>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x86c>;
power-domains = <&zynqmp_firmware PD_ADMA>;
};
@@ -424,6 +457,8 @@
interrupts = <0 82 4>;
clock-names = "clk_main", "clk_apb";
xlnx,bus-width = <64>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x86d>;
power-domains = <&zynqmp_firmware PD_ADMA>;
};
@@ -435,6 +470,8 @@
interrupts = <0 83 4>;
clock-names = "clk_main", "clk_apb";
xlnx,bus-width = <64>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x86e>;
power-domains = <&zynqmp_firmware PD_ADMA>;
};
@@ -446,6 +483,8 @@
interrupts = <0 84 4>;
clock-names = "clk_main", "clk_apb";
xlnx,bus-width = <64>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x86f>;
power-domains = <&zynqmp_firmware PD_ADMA>;
};
@@ -456,6 +495,20 @@
interrupts = <0 112 4>;
};
+ nand0: nand-controller@ff100000 {
+ compatible = "xlnx,zynqmp-nand-controller", "arasan,nfc-v3p10";
+ status = "disabled";
+ reg = <0x0 0xff100000 0x0 0x1000>;
+ clock-names = "controller", "bus";
+ interrupt-parent = <&gic>;
+ interrupts = <0 14 4>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x872>;
+ power-domains = <&zynqmp_firmware PD_NAND>;
+ };
+
gem0: ethernet@ff0b0000 {
compatible = "cdns,zynqmp-gem", "cdns,gem";
status = "disabled";
@@ -465,6 +518,8 @@
clock-names = "pclk", "hclk", "tx_clk";
#address-cells = <1>;
#size-cells = <0>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x874>;
power-domains = <&zynqmp_firmware PD_ETH_0>;
};
@@ -477,6 +532,8 @@
clock-names = "pclk", "hclk", "tx_clk";
#address-cells = <1>;
#size-cells = <0>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x875>;
power-domains = <&zynqmp_firmware PD_ETH_1>;
};
@@ -489,6 +546,8 @@
clock-names = "pclk", "hclk", "tx_clk";
#address-cells = <1>;
#size-cells = <0>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x876>;
power-domains = <&zynqmp_firmware PD_ETH_2>;
};
@@ -501,12 +560,15 @@
clock-names = "pclk", "hclk", "tx_clk";
#address-cells = <1>;
#size-cells = <0>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x877>;
power-domains = <&zynqmp_firmware PD_ETH_3>;
};
gpio: gpio@ff0a0000 {
compatible = "xlnx,zynqmp-gpio-1.0";
status = "disabled";
+ #address-cells = <0>;
#gpio-cells = <0x2>;
gpio-controller;
interrupt-parent = <&gic>;
@@ -576,6 +638,22 @@
};
};
+ qspi: spi@ff0f0000 {
+ compatible = "xlnx,zynqmp-qspi-1.0";
+ status = "disabled";
+ clock-names = "ref_clk", "pclk";
+ interrupts = <0 15 4>;
+ interrupt-parent = <&gic>;
+ num-cs = <1>;
+ reg = <0x0 0xff0f0000 0x0 0x1000>,
+ <0x0 0xc0000000 0x0 0x8000000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x873>;
+ power-domains = <&zynqmp_firmware PD_QSPI>;
+ };
+
psgtr: phy@fd400000 {
compatible = "xlnx,zynqmp-psgtr-v1.1";
status = "disabled";
@@ -602,6 +680,9 @@
interrupt-parent = <&gic>;
interrupts = <0 133 4>;
power-domains = <&zynqmp_firmware PD_SATA>;
+ #stream-id-cells = <4>;
+ iommus = <&smmu 0x4c0>, <&smmu 0x4c1>,
+ <&smmu 0x4c2>, <&smmu 0x4c3>;
};
sdhci0: mmc@ff160000 {
@@ -611,6 +692,8 @@
interrupts = <0 48 4>;
reg = <0x0 0xff160000 0x0 0x1000>;
clock-names = "clk_xin", "clk_ahb";
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x870>;
#clock-cells = <1>;
clock-output-names = "clk_out_sd0", "clk_in_sd0";
power-domains = <&zynqmp_firmware PD_SD_0>;
@@ -623,6 +706,8 @@
interrupts = <0 49 4>;
reg = <0x0 0xff170000 0x0 0x1000>;
clock-names = "clk_xin", "clk_ahb";
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x871>;
#clock-cells = <1>;
clock-output-names = "clk_out_sd1", "clk_in_sd1";
power-domains = <&zynqmp_firmware PD_SD_1>;
@@ -631,6 +716,7 @@
smmu: iommu@fd800000 {
compatible = "arm,mmu-500";
reg = <0x0 0xfd800000 0x0 0x20000>;
+ #iommu-cells = <1>;
status = "disabled";
#global-interrupts = <1>;
interrupt-parent = <&gic>;
@@ -753,5 +839,45 @@
reg = <0x0 0xfd4d0000 0x0 0x1000>;
timeout-sec = <10>;
};
+
+ lpd_watchdog: watchdog@ff150000 {
+ compatible = "cdns,wdt-r1p2";
+ status = "disabled";
+ interrupt-parent = <&gic>;
+ interrupts = <0 52 1>;
+ reg = <0x0 0xff150000 0x0 0x1000>;
+ timeout-sec = <10>;
+ };
+
+ zynqmp_dpdma: dma-controller@fd4c0000 {
+ compatible = "xlnx,zynqmp-dpdma";
+ status = "disabled";
+ reg = <0x0 0xfd4c0000 0x0 0x1000>;
+ interrupts = <0 122 4>;
+ interrupt-parent = <&gic>;
+ clock-names = "axi_clk";
+ #dma-cells = <1>;
+ };
+
+ zynqmp_dpsub: display@fd4a0000 {
+ compatible = "xlnx,zynqmp-dpsub-1.7";
+ status = "disabled";
+ reg = <0x0 0xfd4a0000 0x0 0x1000>,
+ <0x0 0xfd4aa000 0x0 0x1000>,
+ <0x0 0xfd4ab000 0x0 0x1000>,
+ <0x0 0xfd4ac000 0x0 0x1000>;
+ reg-names = "dp", "blend", "av_buf", "aud";
+ interrupts = <0 119 4>;
+ interrupt-parent = <&gic>;
+ clock-names = "dp_apb_clk", "dp_aud_clk",
+ "dp_vtc_pixel_clk_in";
+ power-domains = <&zynqmp_firmware PD_DP>;
+ resets = <&zynqmp_reset ZYNQMP_RESET_DP>;
+ dma-names = "vid0", "vid1", "vid2", "gfx0";
+ dmas = <&zynqmp_dpdma ZYNQMP_DPDMA_VIDEO0>,
+ <&zynqmp_dpdma ZYNQMP_DPDMA_VIDEO1>,
+ <&zynqmp_dpdma ZYNQMP_DPDMA_VIDEO2>,
+ <&zynqmp_dpdma ZYNQMP_DPDMA_GRAPHICS>;
+ };
};
};
diff --git a/arch/arm64/boot/dts/zte/Makefile b/arch/arm64/boot/dts/zte/Makefile
deleted file mode 100644
index 126896144bda..000000000000
--- a/arch/arm64/boot/dts/zte/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-dtb-$(CONFIG_ARCH_ZX) += zx296718-evb.dtb
-dtb-$(CONFIG_ARCH_ZX) += zx296718-pcbox.dtb
diff --git a/arch/arm64/boot/dts/zte/zx296718-evb.dts b/arch/arm64/boot/dts/zte/zx296718-evb.dts
deleted file mode 100644
index cb2519ecd724..000000000000
--- a/arch/arm64/boot/dts/zte/zx296718-evb.dts
+++ /dev/null
@@ -1,144 +0,0 @@
-/*
- * Copyright 2016 ZTE Corporation.
- * Copyright 2016 Linaro Ltd.
- *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- * a) This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- * b) Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-/dts-v1/;
-#include "zx296718.dtsi"
-
-/ {
- model = "ZTE zx296718 evaluation board";
- compatible = "zte,zx296718-evb", "zte,zx296718";
-
- chosen {
- stdout-path = "serial0:115200n8";
- };
-
- memory@40000000 {
- device_type = "memory";
- reg = <0x40000000 0x40000000>;
- };
-
- sound-spdif0 {
- compatible = "audio-graph-card";
- dais = <&spdif0_port>;
- };
-
- sound-i2s0 {
- compatible = "audio-graph-card";
- dais = <&i2s0_port>;
- pinctrl-names = "default";
- pinctrl-0 = <&amplifier_pins>;
- pa-gpios = <&bgpio4 0 GPIO_ACTIVE_HIGH>;
- widgets = "Line", "Line Out Jack";
- routing = "Amplifier", "LINEOUTL",
- "Amplifier", "LINEOUTR",
- "Line Out Jack", "Amplifier";
- };
-};
-
-&aud96p22 {
- port {
- aud96p22_endpoint: endpoint {
- remote-endpoint = <&i2s0_endpoint>;
- };
- };
-};
-
-&emmc {
- status = "okay";
-};
-
-&hdmi {
- status = "okay";
-
- port {
- hdmi_endpoint: endpoint {
- remote-endpoint = <&spdif0_endpoint>;
- };
- };
-};
-
-&i2c0 {
- status = "okay";
-};
-
-&i2s0 {
- status = "okay";
-
- i2s0_port: port {
- i2s0_endpoint: endpoint {
- remote-endpoint = <&aud96p22_endpoint>;
- dai-format = "i2s";
- frame-master;
- bitclock-master;
- };
- };
-};
-
-&pmm {
- amplifier_pins: amplifier {
- pins = "TSI3_DATA";
- function = "BGPIO";
- };
-};
-
-&sd1 {
- status = "okay";
-};
-
-&spdif0 {
- status = "okay";
-
- spdif0_port: port {
- spdif0_endpoint: endpoint {
- remote-endpoint = <&hdmi_endpoint>;
- };
- };
-};
-
-&tvenc {
- status = "okay";
-};
-
-&uart0 {
- status = "okay";
-};
diff --git a/arch/arm64/boot/dts/zte/zx296718-pcbox.dts b/arch/arm64/boot/dts/zte/zx296718-pcbox.dts
deleted file mode 100644
index e02509f7082b..000000000000
--- a/arch/arm64/boot/dts/zte/zx296718-pcbox.dts
+++ /dev/null
@@ -1,143 +0,0 @@
-/*
- * Copyright (C) 2017 Sanechips Technology Co., Ltd.
- * Copyright 2017 Linaro Ltd.
- *
- * SPDX-License-Identifier: (GPL-2.0+ OR MIT)
- */
-
-/dts-v1/;
-#include "zx296718.dtsi"
-#include <dt-bindings/pwm/pwm.h>
-
-/ {
- model = "ZTE ZX296718 PCBOX Board";
- compatible = "zte,zx296718-pcbox", "zte,zx296718";
-
- chosen {
- stdout-path = "serial0:115200n8";
- };
-
- memory@80000000 {
- device_type = "memory";
- reg = <0x80000000 0x80000000>;
- };
-
- a53_vdd0v9: regulator-a53 {
- compatible = "pwm-regulator";
- pwms = <&pwm 3 1250 PWM_POLARITY_INVERTED>;
- regulator-name = "A53_VDD0V9";
- regulator-min-microvolt = <855000>;
- regulator-max-microvolt = <1183000>;
- pwm-dutycycle-unit = <100>;
- pwm-dutycycle-range = <0 100>;
- regulator-always-on;
- regulator-boot-on;
- };
-
- sound-spdif0 {
- compatible = "audio-graph-card";
- dais = <&spdif0_port>;
- };
-
- sound-i2s0 {
- compatible = "audio-graph-card";
- dais = <&i2s0_port>;
- };
-};
-
-&aud96p22 {
- port {
- aud96p22_endpoint: endpoint {
- remote-endpoint = <&i2s0_endpoint>;
- };
- };
-};
-
-&cpu0 {
- cpu-supply = <&a53_vdd0v9>;
-};
-
-&emmc {
- status = "okay";
-};
-
-&hdmi {
- status = "disabled";
-
- port {
- hdmi_endpoint: endpoint {
- remote-endpoint = <&spdif0_endpoint>;
- };
- };
-};
-
-&i2c0 {
- status = "okay";
-};
-
-&i2s0 {
- status = "okay";
-
- i2s0_port: port {
- i2s0_endpoint: endpoint {
- remote-endpoint = <&aud96p22_endpoint>;
- dai-format = "i2s";
- frame-master;
- bitclock-master;
- };
- };
-};
-
-&irdec {
- status = "okay";
-};
-
-&pmm {
- pwm3_pins: pwm3 {
- pins = "KEY_ROW2";
- function = "PWM";
- };
-
- vga_pins: vga {
- pins = "KEY_COL1", "KEY_COL2", "VGA_HS", "VGA_VS";
- function = "VGA";
- };
-};
-
-&pwm {
- pinctrl-names = "default";
- pinctrl-0 = <&pwm3_pins>;
- status = "okay";
-};
-
-&sd0 {
- status = "okay";
-};
-
-&sd1 {
- status = "okay";
-};
-
-&spdif0 {
- status = "okay";
-
- spdif0_port: port {
- spdif0_endpoint: endpoint {
- remote-endpoint = <&hdmi_endpoint>;
- };
- };
-};
-
-&tvenc {
- status = "disabled";
-};
-
-&uart0 {
- status = "okay";
-};
-
-&vga {
- pinctrl-names = "default";
- pinctrl-0 = <&vga_pins>;
- status = "okay";
-};
diff --git a/arch/arm64/boot/dts/zte/zx296718.dtsi b/arch/arm64/boot/dts/zte/zx296718.dtsi
deleted file mode 100644
index cc54837ff4ba..000000000000
--- a/arch/arm64/boot/dts/zte/zx296718.dtsi
+++ /dev/null
@@ -1,627 +0,0 @@
-/*
- * Copyright 2016 ZTE Corporation.
- * Copyright 2016 Linaro Ltd.
- *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- * a) This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- * b) Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <dt-bindings/input/input.h>
-#include <dt-bindings/interrupt-controller/arm-gic.h>
-#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/clock/zx296718-clock.h>
-
-/ {
- compatible = "zte,zx296718";
- #address-cells = <1>;
- #size-cells = <1>;
- interrupt-parent = <&gic>;
-
- aliases {
- gpio0 = &bgpio0;
- gpio1 = &bgpio1;
- gpio2 = &bgpio2;
- gpio3 = &bgpio3;
- gpio4 = &bgpio4;
- gpio5 = &bgpio5;
- gpio6 = &bgpio6;
- serial0 = &uart0;
- };
-
- cpus {
- #address-cells = <2>;
- #size-cells = <0>;
-
- cpu-map {
- cluster0 {
- core0 {
- cpu = <&cpu0>;
- };
- core1 {
- cpu = <&cpu1>;
- };
- core2 {
- cpu = <&cpu2>;
- };
- core3 {
- cpu = <&cpu3>;
- };
- };
- };
-
- cpu0: cpu@0 {
- device_type = "cpu";
- compatible = "arm,cortex-a53";
- reg = <0x0 0x0>;
- enable-method = "psci";
- clocks = <&topcrm A53_GATE>;
- operating-points-v2 = <&cluster0_opp>;
- };
-
- cpu1: cpu@1 {
- device_type = "cpu";
- compatible = "arm,cortex-a53";
- reg = <0x0 0x1>;
- enable-method = "psci";
- clocks = <&topcrm A53_GATE>;
- operating-points-v2 = <&cluster0_opp>;
- };
-
- cpu2: cpu@2 {
- device_type = "cpu";
- compatible = "arm,cortex-a53";
- reg = <0x0 0x2>;
- enable-method = "psci";
- clocks = <&topcrm A53_GATE>;
- operating-points-v2 = <&cluster0_opp>;
- };
-
- cpu3: cpu@3 {
- device_type = "cpu";
- compatible = "arm,cortex-a53";
- reg = <0x0 0x3>;
- enable-method = "psci";
- clocks = <&topcrm A53_GATE>;
- operating-points-v2 = <&cluster0_opp>;
- };
- };
-
- cluster0_opp: opp-table0 {
- compatible = "operating-points-v2";
- opp-shared;
-
- opp-500000000 {
- opp-hz = /bits/ 64 <500000000>;
- opp-microvolt = <866000>;
- clock-latency-ns = <500000>;
- };
-
- opp-648000000 {
- opp-hz = /bits/ 64 <648000000>;
- opp-microvolt = <866000>;
- clock-latency-ns = <500000>;
- };
-
- opp-800000000 {
- opp-hz = /bits/ 64 <800000000>;
- opp-microvolt = <888000>;
- clock-latency-ns = <500000>;
- };
-
- opp-1000000000 {
- opp-hz = /bits/ 64 <1000000000>;
- opp-microvolt = <898000>;
- clock-latency-ns = <500000>;
- };
-
- opp-1188000000 {
- opp-hz = /bits/ 64 <1188000000>;
- opp-microvolt = <1015000>;
- clock-latency-ns = <500000>;
- };
- };
-
- clk24k: clk-24k {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <24000>;
- clock-output-names = "rtcclk";
- };
-
- osc32k: clk-osc32k {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <32000>;
- clock-output-names = "osc32k";
- };
-
- osc12m: clk-osc12m {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <12000000>;
- clock-output-names = "osc12m";
- };
-
- osc24m: clk-osc24m {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <24000000>;
- clock-output-names = "osc24m";
- };
-
- osc25m: clk-osc25m {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <25000000>;
- clock-output-names = "osc25m";
- };
-
- osc60m: clk-osc60m {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <60000000>;
- clock-output-names = "osc60m";
- };
-
- osc99m: clk-osc99m {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <99000000>;
- clock-output-names = "osc99m";
- };
-
- osc125m: clk-osc125m {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <125000000>;
- clock-output-names = "osc125m";
- };
-
- osc198m: clk-osc198m {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <198000000>;
- clock-output-names = "osc198m";
- };
-
- pll_audio: clk-pll-884m {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <884000000>;
- clock-output-names = "pll_audio";
- };
-
- pll_ddr: clk-pll-932m {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <932000000>;
- clock-output-names = "pll_ddr";
- };
-
- pll_hsic: clk-pll-960m {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <960000000>;
- clock-output-names = "pll_hsic";
- };
-
- pll_mac: clk-pll-1000m {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <1000000000>;
- clock-output-names = "pll_mac";
- };
-
- pll_mm0: clk-pll-1188m {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <1188000000>;
- clock-output-names = "pll_mm0";
- };
-
- pll_mm1: clk-pll-1296m {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <1296000000>;
- clock-output-names = "pll_mm1";
- };
-
- psci {
- compatible = "arm,psci-1.0";
- method = "smc";
- };
-
- timer {
- compatible = "arm,armv8-timer";
- interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>,
- <GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>,
- <GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>,
- <GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>;
- };
-
- pmu {
- compatible = "arm,cortex-a53-pmu";
- interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_HIGH>;
- };
-
- gic: interrupt-controller@2a00000 {
- compatible = "arm,gic-v3";
- #interrupt-cells = <3>;
- #address-cells = <0>;
- interrupt-controller;
- reg = <0x02a00000 0x10000>,
- <0x02b00000 0xc0000>;
- interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
- };
-
- soc {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "simple-bus";
- ranges;
-
- irdec: ir-decoder@111000 {
- compatible = "zte,zx296718-irdec";
- reg = <0x111000 0x1000>;
- interrupts = <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>;
- status = "disabled";
- };
-
- aon_sysctrl: aon-sysctrl@116000 {
- compatible = "zte,zx296718-aon-sysctrl", "syscon";
- reg = <0x116000 0x1000>;
- };
-
- iocfg: pin-controller@119000 {
- compatible = "zte,zx296718-iocfg";
- reg = <0x119000 0x1000>;
- };
-
- uart0: uart@11f000 {
- compatible = "arm,pl011", "arm,primecell";
- arm,primecell-periphid = <0x001feffe>;
- reg = <0x11f000 0x1000>;
- interrupts = <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&osc24m>;
- clock-names = "apb_pclk";
- status = "disabled";
- };
-
- sd0: mmc@1110000 {
- compatible = "zte,zx296718-dw-mshc";
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x01110000 0x1000>;
- interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
- fifo-depth = <32>;
- data-addr = <0x200>;
- fifo-watermark-aligned;
- bus-width = <4>;
- clock-frequency = <50000000>;
- clocks = <&topcrm SD0_AHB>, <&topcrm SD0_WCLK>;
- clock-names = "biu", "ciu";
- max-frequency = <50000000>;
- cap-sdio-irq;
- cap-sd-highspeed;
- sd-uhs-sdr12;
- sd-uhs-sdr25;
- sd-uhs-sdr50;
- sd-uhs-sdr104;
- sd-uhs-ddr50;
- status = "disabled";
- };
-
- sd1: mmc@1111000 {
- compatible = "zte,zx296718-dw-mshc";
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x01111000 0x1000>;
- interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
- fifo-depth = <32>;
- data-addr = <0x200>;
- fifo-watermark-aligned;
- bus-width = <4>;
- clock-frequency = <167000000>;
- clocks = <&topcrm SD1_AHB>, <&topcrm SD1_WCLK>;
- clock-names = "biu", "ciu";
- max-frequency = <167000000>;
- cap-sdio-irq;
- cap-sd-highspeed;
- status = "disabled";
- };
-
- dma: dma-controller@1460000 {
- compatible = "zte,zx296702-dma";
- reg = <0x01460000 0x1000>;
- interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&osc24m>;
- clock-names = "dmaclk";
- #dma-cells = <1>;
- dma-channels = <32>;
- dma-requests = <32>;
- };
-
- lsp0crm: clock-controller@1420000 {
- compatible = "zte,zx296718-lsp0crm";
- reg = <0x01420000 0x1000>;
- #clock-cells = <1>;
- };
-
- bgpio0: gpio@142d000 {
- compatible = "zte,zx296718-gpio", "zte,zx296702-gpio";
- reg = <0x142d000 0x40>;
- gpio-controller;
- #gpio-cells = <2>;
- gpio-ranges = <&pmm 0 48 16>;
- interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-parent = <&gic>;
- interrupt-controller;
- #interrupt-cells = <2>;
- };
-
- bgpio1: gpio@142d040 {
- compatible = "zte,zx296718-gpio", "zte,zx296702-gpio";
- reg = <0x142d040 0x40>;
- gpio-controller;
- #gpio-cells = <2>;
- gpio-ranges = <&pmm 0 80 16>;
- interrupts = <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-parent = <&gic>;
- interrupt-controller;
- #interrupt-cells = <2>;
- };
-
- bgpio2: gpio@142d080 {
- compatible = "zte,zx296718-gpio", "zte,zx296702-gpio";
- reg = <0x142d080 0x40>;
- gpio-controller;
- #gpio-cells = <2>;
- gpio-ranges = <&pmm 0 80 3
- &pmm 3 32 4
- &pmm 7 83 9>;
- interrupts = <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-parent = <&gic>;
- interrupt-controller;
- #interrupt-cells = <2>;
- };
-
- bgpio3: gpio@142d0c0 {
- compatible = "zte,zx296718-gpio", "zte,zx296702-gpio";
- reg = <0x142d0c0 0x40>;
- gpio-controller;
- #gpio-cells = <2>;
- gpio-ranges = <&pmm 0 92 16>;
- interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-parent = <&gic>;
- interrupt-controller;
- #interrupt-cells = <2>;
- };
-
- bgpio4: gpio@142d100 {
- compatible = "zte,zx296718-gpio", "zte,zx296702-gpio";
- reg = <0x142d100 0x40>;
- gpio-controller;
- #gpio-cells = <2>;
- gpio-ranges = <&pmm 0 108 12
- &pmm 12 121 4>;
- interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-parent = <&gic>;
- interrupt-controller;
- #interrupt-cells = <2>;
- };
-
- bgpio5: gpio@142d140 {
- compatible = "zte,zx296718-gpio", "zte,zx296702-gpio";
- reg = <0x142d140 0x40>;
- gpio-controller;
- #gpio-cells = <2>;
- gpio-ranges = <&pmm 0 125 16>;
- interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-parent = <&gic>;
- interrupt-controller;
- #interrupt-cells = <2>;
- };
-
- bgpio6: gpio@142d180 {
- compatible = "zte,zx296718-gpio", "zte,zx296702-gpio";
- reg = <0x142d180 0x40>;
- gpio-controller;
- #gpio-cells = <2>;
- gpio-ranges = <&pmm 0 141 2>;
- interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-parent = <&gic>;
- interrupt-controller;
- #interrupt-cells = <2>;
- };
-
- lsp1crm: clock-controller@1430000 {
- compatible = "zte,zx296718-lsp1crm";
- reg = <0x01430000 0x1000>;
- #clock-cells = <1>;
- };
-
- pwm: pwm@1439000 {
- compatible = "zte,zx296718-pwm";
- reg = <0x1439000 0x1000>;
- clocks = <&lsp1crm LSP1_PWM_PCLK>,
- <&lsp1crm LSP1_PWM_WCLK>;
- clock-names = "pclk", "wclk";
- #pwm-cells = <3>;
- status = "disabled";
- };
-
- vou: vou@1440000 {
- compatible = "zte,zx296718-vou";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0 0x1440000 0x10000>;
-
- dpc: dpc@0 {
- compatible = "zte,zx296718-dpc";
- reg = <0x0000 0x1000>, <0x1000 0x1000>,
- <0x5000 0x1000>, <0x6000 0x1000>,
- <0xa000 0x1000>;
- reg-names = "osd", "timing_ctrl",
- "dtrc", "vou_ctrl",
- "otfppu";
- interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&topcrm VOU_ACLK>, <&topcrm VOU_PPU_WCLK>,
- <&topcrm VOU_MAIN_WCLK>, <&topcrm VOU_AUX_WCLK>;
- clock-names = "aclk", "ppu_wclk",
- "main_wclk", "aux_wclk";
- };
-
- vga: vga@8000 {
- compatible = "zte,zx296718-vga";
- reg = <0x8000 0x1000>;
- interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&topcrm VGA_I2C_WCLK>;
- clock-names = "i2c_wclk";
- zte,vga-power-control = <&sysctrl 0x170 0xe0>;
- status = "disabled";
- };
-
- hdmi: hdmi@c000 {
- compatible = "zte,zx296718-hdmi";
- reg = <0xc000 0x4000>;
- interrupts = <GIC_SPI 82 IRQ_TYPE_EDGE_RISING>;
- clocks = <&topcrm HDMI_OSC_CEC>,
- <&topcrm HDMI_OSC_CLK>,
- <&topcrm HDMI_XCLK>;
- clock-names = "osc_cec", "osc_clk", "xclk";
- #sound-dai-cells = <0>;
- status = "disabled";
- };
-
- tvenc: tvenc@2000 {
- compatible = "zte,zx296718-tvenc";
- reg = <0x2000 0x1000>;
- zte,tvenc-power-control = <&sysctrl 0x170 0x10>;
- status = "disabled";
- };
- };
-
- topcrm: clock-controller@1461000 {
- compatible = "zte,zx296718-topcrm";
- reg = <0x01461000 0x1000>;
- #clock-cells = <1>;
- };
-
- pmm: pin-controller@1462000 {
- compatible = "zte,zx296718-pmm";
- reg = <0x1462000 0x1000>;
- zte,auxiliary-controller = <&iocfg>;
- };
-
- sysctrl: sysctrl@1463000 {
- compatible = "zte,zx296718-sysctrl", "syscon";
- reg = <0x1463000 0x1000>;
- };
-
- emmc: mmc@1470000{
- compatible = "zte,zx296718-dw-mshc";
- reg = <0x01470000 0x1000>;
- interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
- zte,aon-syscon = <&aon_sysctrl>;
- bus-width = <8>;
- fifo-depth = <128>;
- data-addr = <0x200>;
- fifo-watermark-aligned;
- clock-frequency = <167000000>;
- clocks = <&topcrm EMMC_NAND_AHB>, <&topcrm EMMC_WCLK>;
- clock-names = "biu", "ciu";
- max-frequency = <167000000>;
- cap-mmc-highspeed;
- mmc-ddr-1_8v;
- mmc-hs200-1_8v;
- non-removable;
- disable-wp;
- status = "disabled";
- };
-
- audiocrm: clock-controller@1480000 {
- compatible = "zte,zx296718-audiocrm";
- reg = <0x01480000 0x1000>;
- #clock-cells = <1>;
- };
-
- i2s0: i2s@1482000 {
- compatible = "zte,zx296718-i2s", "zte,zx296702-i2s";
- reg = <0x01482000 0x1000>;
- clocks = <&audiocrm AUDIO_I2S0_WCLK>,
- <&audiocrm AUDIO_I2S0_PCLK>;
- clock-names = "wclk", "pclk";
- assigned-clocks = <&audiocrm I2S0_WCLK_MUX>;
- assigned-clock-parents = <&topcrm AUDIO_99M>;
- interrupts = <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>;
- dmas = <&dma 22>, <&dma 23>;
- dma-names = "tx", "rx";
- #sound-dai-cells = <0>;
- status = "disabled";
- };
-
- i2c0: i2c@1486000 {
- compatible = "zte,zx296718-i2c";
- reg = <0x01486000 0x1000>;
- interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
- #address-cells = <1>;
- #size-cells = <0>;
- clocks = <&audiocrm AUDIO_I2C0_WCLK>;
- clock-frequency = <1600000>;
- status = "disabled";
-
- aud96p22: codec@22 {
- compatible = "zte,zx-aud96p22";
- #sound-dai-cells = <0>;
- reg = <0x22>;
- };
- };
-
- spdif0: spdif@1488000 {
- compatible = "zte,zx296702-spdif";
- reg = <0x1488000 0x1000>;
- clocks = <&audiocrm AUDIO_SPDIF0_WCLK>;
- clock-names = "tx";
- interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
- #sound-dai-cells = <0>;
- dmas = <&dma 30>;
- dma-names = "tx";
- status = "disabled";
- };
- };
-};
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 838301650a79..d612f633b771 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -263,6 +263,7 @@ CONFIG_MTD_NAND_MARVELL=y
CONFIG_MTD_NAND_FSL_IFC=y
CONFIG_MTD_NAND_QCOM=y
CONFIG_MTD_SPI_NOR=y
+CONFIG_MTK_DEVAPC=m
CONFIG_SPI_CADENCE_QUADSPI=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_NBD=m
@@ -351,6 +352,7 @@ CONFIG_QCOM_IPA=m
CONFIG_MDIO_BUS_MUX_MMIOREG=y
CONFIG_MDIO_BUS_MUX_MULTIPLEXER=y
CONFIG_AQUANTIA_PHY=y
+CONFIG_BCM54140_PHY=m
CONFIG_MARVELL_PHY=m
CONFIG_MARVELL_10G_PHY=m
CONFIG_MESON_GXL_PHY=m
@@ -387,10 +389,12 @@ CONFIG_KEYBOARD_IMX_SC_KEY=m
CONFIG_KEYBOARD_CROS_EC=y
CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_TOUCHSCREEN_ATMEL_MXT=m
+CONFIG_TOUCHSCREEN_GOODIX=m
CONFIG_TOUCHSCREEN_EDT_FT5X06=m
CONFIG_INPUT_MISC=y
CONFIG_INPUT_PM8941_PWRKEY=y
CONFIG_INPUT_PM8XXX_VIBRATOR=m
+CONFIG_INPUT_PWM_VIBRA=m
CONFIG_INPUT_HISI_POWERKEY=y
# CONFIG_SERIO_SERPORT is not set
CONFIG_SERIO_AMBAKMI=y
@@ -479,6 +483,7 @@ CONFIG_SPI_S3C64XX=y
CONFIG_SPI_SH_MSIOF=m
CONFIG_SPI_SUN6I=y
CONFIG_SPI_SPIDEV=m
+CONFIG_MTK_PMIC_WRAP=m
CONFIG_SPMI=y
CONFIG_PINCTRL_SINGLE=y
CONFIG_PINCTRL_MAX77620=y
@@ -505,6 +510,7 @@ CONFIG_PINCTRL_SC7180=y
CONFIG_PINCTRL_SDM845=y
CONFIG_PINCTRL_SM8150=y
CONFIG_PINCTRL_SM8250=y
+CONFIG_PINCTRL_LPASS_LPI=m
CONFIG_GPIO_ALTERA=m
CONFIG_GPIO_DAVINCI=y
CONFIG_GPIO_DWAPB=y
@@ -535,6 +541,7 @@ CONFIG_BATTERY_SBS=m
CONFIG_BATTERY_BQ27XXX=y
CONFIG_SENSORS_ARM_SCMI=y
CONFIG_BATTERY_MAX17042=m
+CONFIG_CHARGER_BQ25890=m
CONFIG_CHARGER_BQ25980=m
CONFIG_SENSORS_ARM_SCPI=y
CONFIG_SENSORS_LM90=m
@@ -559,6 +566,7 @@ CONFIG_BCM2835_THERMAL=m
CONFIG_BRCMSTB_THERMAL=m
CONFIG_EXYNOS_THERMAL=y
CONFIG_TEGRA_BPMP_THERMAL=m
+CONFIG_TEGRA_SOCTHERM=m
CONFIG_QCOM_TSENS=y
CONFIG_QCOM_SPMI_TEMP_ALARM=m
CONFIG_UNIPHIER_THERMAL=y
@@ -602,7 +610,9 @@ CONFIG_REGULATOR_HI6421V530=y
CONFIG_REGULATOR_HI655X=y
CONFIG_REGULATOR_MAX77620=y
CONFIG_REGULATOR_MAX8973=y
+CONFIG_REGULATOR_MP8859=y
CONFIG_REGULATOR_PCA9450=y
+CONFIG_REGULATOR_PF8X00=y
CONFIG_REGULATOR_PFUZE100=y
CONFIG_REGULATOR_PWM=y
CONFIG_REGULATOR_QCOM_RPMH=y
@@ -683,6 +693,7 @@ CONFIG_DRM_SII902X=m
CONFIG_DRM_SIMPLE_BRIDGE=m
CONFIG_DRM_THINE_THC63LVD1024=m
CONFIG_DRM_TI_SN65DSI86=m
+CONFIG_DRM_LONTIUM_LT9611UXC=m
CONFIG_DRM_I2C_ADV7511=m
CONFIG_DRM_I2C_ADV7511_AUDIO=y
CONFIG_DRM_DW_HDMI_AHB_AUDIO=m
@@ -716,14 +727,17 @@ CONFIG_SND_SOC_FSL_ASRC=m
CONFIG_SND_SOC_FSL_MICFIL=m
CONFIG_SND_SOC_FSL_EASRC=m
CONFIG_SND_IMX_SOC=m
+CONFIG_SND_SOC_IMX_SGTL5000=m
CONFIG_SND_SOC_IMX_SPDIF=m
CONFIG_SND_SOC_IMX_AUDMIX=m
+CONFIG_SND_SOC_FSL_ASOC_CARD=m
CONFIG_SND_MESON_AXG_SOUND_CARD=m
CONFIG_SND_MESON_GX_SOUND_CARD=m
CONFIG_SND_SOC_QCOM=m
CONFIG_SND_SOC_APQ8016_SBC=m
CONFIG_SND_SOC_MSM8996=m
CONFIG_SND_SOC_SDM845=m
+CONFIG_SND_SOC_SM8250=m
CONFIG_SND_SOC_ROCKCHIP=m
CONFIG_SND_SOC_ROCKCHIP_SPDIF=m
CONFIG_SND_SOC_ROCKCHIP_RT5645=m
@@ -738,18 +752,26 @@ CONFIG_SND_SOC_TEGRA210_DMIC=m
CONFIG_SND_SOC_TEGRA210_I2S=m
CONFIG_SND_SOC_TEGRA186_DSPK=m
CONFIG_SND_SOC_TEGRA210_ADMAIF=m
+CONFIG_SND_SOC_TEGRA_AUDIO_GRAPH_CARD=m
CONFIG_SND_SOC_AK4613=m
CONFIG_SND_SOC_ES7134=m
CONFIG_SND_SOC_ES7241=m
+CONFIG_SND_SOC_GTM601=m
CONFIG_SND_SOC_PCM3168A_I2C=m
+CONFIG_SND_SOC_RT5659=m
CONFIG_SND_SOC_SIMPLE_AMPLIFIER=m
CONFIG_SND_SOC_TAS571X=m
CONFIG_SND_SOC_WCD934X=m
CONFIG_SND_SOC_WM8904=m
+CONFIG_SND_SOC_WM8962=m
CONFIG_SND_SOC_WSA881X=m
+CONFIG_SND_SOC_LPASS_WSA_MACRO=m
+CONFIG_SND_SOC_LPASS_VA_MACRO=m
CONFIG_SND_SIMPLE_CARD=m
CONFIG_SND_AUDIO_GRAPH_CARD=m
-CONFIG_I2C_HID=m
+CONFIG_HID_MULTITOUCH=m
+CONFIG_I2C_HID_ACPI=m
+CONFIG_I2C_HID_OF=m
CONFIG_USB_CONN_GPIO=m
CONFIG_USB=y
CONFIG_USB_OTG=y
@@ -776,6 +798,7 @@ CONFIG_USB_ISP1760=y
CONFIG_USB_SERIAL=m
CONFIG_USB_SERIAL_CP210X=m
CONFIG_USB_SERIAL_FTDI_SIO=m
+CONFIG_USB_SERIAL_OPTION=m
CONFIG_USB_HSIC_USB3503=y
CONFIG_NOP_USB_XCEIV=y
CONFIG_USB_GADGET=y
@@ -795,6 +818,7 @@ CONFIG_USB_CONFIGFS_MASS_STORAGE=y
CONFIG_USB_CONFIGFS_F_FS=y
CONFIG_TYPEC=m
CONFIG_TYPEC_TCPM=m
+CONFIG_TYPEC_TCPCI=m
CONFIG_TYPEC_FUSB302=m
CONFIG_TYPEC_HD3SS3220=m
CONFIG_TYPEC_TPS6598X=m
@@ -828,6 +852,7 @@ CONFIG_MMC_OWL=y
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
CONFIG_LEDS_LM3692X=m
+CONFIG_LEDS_PCA9532=m
CONFIG_LEDS_GPIO=y
CONFIG_LEDS_PWM=y
CONFIG_LEDS_SYSCON=y
@@ -845,7 +870,9 @@ CONFIG_RTC_DRV_HYM8563=m
CONFIG_RTC_DRV_MAX77686=y
CONFIG_RTC_DRV_RK808=m
CONFIG_RTC_DRV_PCF85363=m
+CONFIG_RTC_DRV_M41T80=m
CONFIG_RTC_DRV_RX8581=m
+CONFIG_RTC_DRV_RV3028=m
CONFIG_RTC_DRV_RV8803=m
CONFIG_RTC_DRV_S5M=y
CONFIG_RTC_DRV_DS3232=y
@@ -901,6 +928,7 @@ CONFIG_COMMON_CLK_FSL_SAI=y
CONFIG_COMMON_CLK_S2MPS11=y
CONFIG_COMMON_CLK_PWM=y
CONFIG_COMMON_CLK_VC5=y
+CONFIG_COMMON_CLK_ZYNQMP=y
CONFIG_COMMON_CLK_BD718XX=m
CONFIG_CLK_RASPBERRYPI=m
CONFIG_CLK_IMX8MM=y
@@ -931,7 +959,9 @@ CONFIG_SM_GCC_8150=y
CONFIG_SM_GCC_8250=y
CONFIG_SM_GPUCC_8150=y
CONFIG_SM_GPUCC_8250=y
+CONFIG_SM_DISPCC_8250=y
CONFIG_QCOM_HFPLL=y
+CONFIG_CLK_GFM_LPASS_SM8250=m
CONFIG_HWSPINLOCK=y
CONFIG_HWSPINLOCK_QCOM=y
CONFIG_ARM_MHU=y
@@ -991,8 +1021,6 @@ CONFIG_ARCH_TEGRA_210_SOC=y
CONFIG_ARCH_TEGRA_186_SOC=y
CONFIG_ARCH_TEGRA_194_SOC=y
CONFIG_ARCH_TEGRA_234_SOC=y
-CONFIG_ARCH_K3_AM6_SOC=y
-CONFIG_ARCH_K3_J721E_SOC=y
CONFIG_TI_SCI_PM_DOMAINS=y
CONFIG_EXTCON_PTN5150=m
CONFIG_EXTCON_USB_GPIO=y
@@ -1009,11 +1037,13 @@ CONFIG_IIO_ST_LSM6DSX=m
CONFIG_IIO_CROS_EC_LIGHT_PROX=m
CONFIG_SENSORS_ISL29018=m
CONFIG_VCNL4000=m
+CONFIG_IIO_ST_MAGN_3AXIS=m
CONFIG_IIO_CROS_EC_BARO=m
CONFIG_MPL3115=m
CONFIG_PWM=y
CONFIG_PWM_BCM2835=m
CONFIG_PWM_CROS_EC=m
+CONFIG_PWM_IMX27=m
CONFIG_PWM_MESON=m
CONFIG_PWM_RCAR=m
CONFIG_PWM_ROCKCHIP=y
@@ -1062,6 +1092,7 @@ CONFIG_ROCKCHIP_EFUSE=y
CONFIG_NVMEM_SUNXI_SID=y
CONFIG_UNIPHIER_EFUSE=y
CONFIG_MESON_EFUSE=m
+CONFIG_NVMEM_RMEM=m
CONFIG_FPGA=y
CONFIG_FPGA_MGR_STRATIX10_SOC=m
CONFIG_FPGA_BRIDGE=m
@@ -1075,10 +1106,12 @@ CONFIG_SLIM_QCOM_CTRL=m
CONFIG_SLIM_QCOM_NGD_CTRL=m
CONFIG_MUX_MMIO=y
CONFIG_INTERCONNECT=y
+CONFIG_INTERCONNECT_IMX=m
+CONFIG_INTERCONNECT_IMX8MQ=m
CONFIG_INTERCONNECT_QCOM=y
CONFIG_INTERCONNECT_QCOM_MSM8916=m
CONFIG_INTERCONNECT_QCOM_OSM_L3=m
-CONFIG_INTERCONNECT_QCOM_SDM845=m
+CONFIG_INTERCONNECT_QCOM_SDM845=y
CONFIG_INTERCONNECT_QCOM_SM8150=m
CONFIG_INTERCONNECT_QCOM_SM8250=m
CONFIG_EXT2_FS=y
@@ -1094,6 +1127,7 @@ CONFIG_FUSE_FS=m
CONFIG_CUSE=m
CONFIG_OVERLAY_FS=m
CONFIG_VFAT_FS=y
+CONFIG_TMPFS_POSIX_ACL=y
CONFIG_HUGETLBFS=y
CONFIG_CONFIGFS_FS=y
CONFIG_EFIVAR_FS=y
diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c
index 34b8a89197be..17e735931a0c 100644
--- a/arch/arm64/crypto/aes-glue.c
+++ b/arch/arm64/crypto/aes-glue.c
@@ -24,6 +24,7 @@
#ifdef USE_V8_CRYPTO_EXTENSIONS
#define MODE "ce"
#define PRIO 300
+#define STRIDE 5
#define aes_expandkey ce_aes_expandkey
#define aes_ecb_encrypt ce_aes_ecb_encrypt
#define aes_ecb_decrypt ce_aes_ecb_decrypt
@@ -41,6 +42,7 @@ MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 Crypto Extensions");
#else
#define MODE "neon"
#define PRIO 200
+#define STRIDE 4
#define aes_ecb_encrypt neon_aes_ecb_encrypt
#define aes_ecb_decrypt neon_aes_ecb_decrypt
#define aes_cbc_encrypt neon_aes_cbc_encrypt
@@ -55,7 +57,7 @@ MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 Crypto Extensions");
#define aes_mac_update neon_aes_mac_update
MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 NEON");
#endif
-#if defined(USE_V8_CRYPTO_EXTENSIONS) || !defined(CONFIG_CRYPTO_AES_ARM64_BS)
+#if defined(USE_V8_CRYPTO_EXTENSIONS) || !IS_ENABLED(CONFIG_CRYPTO_AES_ARM64_BS)
MODULE_ALIAS_CRYPTO("ecb(aes)");
MODULE_ALIAS_CRYPTO("cbc(aes)");
MODULE_ALIAS_CRYPTO("ctr(aes)");
@@ -87,7 +89,7 @@ asmlinkage void aes_cbc_cts_decrypt(u8 out[], u8 const in[], u32 const rk[],
int rounds, int bytes, u8 const iv[]);
asmlinkage void aes_ctr_encrypt(u8 out[], u8 const in[], u32 const rk[],
- int rounds, int blocks, u8 ctr[]);
+ int rounds, int bytes, u8 ctr[], u8 finalbuf[]);
asmlinkage void aes_xts_encrypt(u8 out[], u8 const in[], u32 const rk1[],
int rounds, int bytes, u32 const rk2[], u8 iv[],
@@ -103,9 +105,9 @@ asmlinkage void aes_essiv_cbc_decrypt(u8 out[], u8 const in[], u32 const rk1[],
int rounds, int blocks, u8 iv[],
u32 const rk2[]);
-asmlinkage void aes_mac_update(u8 const in[], u32 const rk[], int rounds,
- int blocks, u8 dg[], int enc_before,
- int enc_after);
+asmlinkage int aes_mac_update(u8 const in[], u32 const rk[], int rounds,
+ int blocks, u8 dg[], int enc_before,
+ int enc_after);
struct crypto_aes_xts_ctx {
struct crypto_aes_ctx key1;
@@ -448,34 +450,36 @@ static int ctr_encrypt(struct skcipher_request *req)
struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
int err, rounds = 6 + ctx->key_length / 4;
struct skcipher_walk walk;
- int blocks;
err = skcipher_walk_virt(&walk, req, false);
- while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
- kernel_neon_begin();
- aes_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
- ctx->key_enc, rounds, blocks, walk.iv);
- kernel_neon_end();
- err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
- }
- if (walk.nbytes) {
- u8 __aligned(8) tail[AES_BLOCK_SIZE];
+ while (walk.nbytes > 0) {
+ const u8 *src = walk.src.virt.addr;
unsigned int nbytes = walk.nbytes;
- u8 *tdst = walk.dst.virt.addr;
- u8 *tsrc = walk.src.virt.addr;
+ u8 *dst = walk.dst.virt.addr;
+ u8 buf[AES_BLOCK_SIZE];
+ unsigned int tail;
- /*
- * Tell aes_ctr_encrypt() to process a tail block.
- */
- blocks = -1;
+ if (unlikely(nbytes < AES_BLOCK_SIZE))
+ src = memcpy(buf, src, nbytes);
+ else if (nbytes < walk.total)
+ nbytes &= ~(AES_BLOCK_SIZE - 1);
kernel_neon_begin();
- aes_ctr_encrypt(tail, NULL, ctx->key_enc, rounds,
- blocks, walk.iv);
+ aes_ctr_encrypt(dst, src, ctx->key_enc, rounds, nbytes,
+ walk.iv, buf);
kernel_neon_end();
- crypto_xor_cpy(tdst, tsrc, tail, nbytes);
- err = skcipher_walk_done(&walk, 0);
+
+ tail = nbytes % (STRIDE * AES_BLOCK_SIZE);
+ if (tail > 0 && tail < AES_BLOCK_SIZE)
+ /*
+ * The final partial block could not be returned using
+ * an overlapping store, so it was passed via buf[]
+ * instead.
+ */
+ memcpy(dst + nbytes - tail, buf, tail);
+
+ err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
}
return err;
@@ -650,7 +654,7 @@ static int __maybe_unused xts_decrypt(struct skcipher_request *req)
}
static struct skcipher_alg aes_algs[] = { {
-#if defined(USE_V8_CRYPTO_EXTENSIONS) || !defined(CONFIG_CRYPTO_AES_ARM64_BS)
+#if defined(USE_V8_CRYPTO_EXTENSIONS) || !IS_ENABLED(CONFIG_CRYPTO_AES_ARM64_BS)
.base = {
.cra_name = "__ecb(aes)",
.cra_driver_name = "__ecb-aes-" MODE,
@@ -852,10 +856,17 @@ static void mac_do_update(struct crypto_aes_ctx *ctx, u8 const in[], int blocks,
int rounds = 6 + ctx->key_length / 4;
if (crypto_simd_usable()) {
- kernel_neon_begin();
- aes_mac_update(in, ctx->key_enc, rounds, blocks, dg, enc_before,
- enc_after);
- kernel_neon_end();
+ int rem;
+
+ do {
+ kernel_neon_begin();
+ rem = aes_mac_update(in, ctx->key_enc, rounds, blocks,
+ dg, enc_before, enc_after);
+ kernel_neon_end();
+ in += (blocks - rem) * AES_BLOCK_SIZE;
+ blocks = rem;
+ enc_before = 0;
+ } while (blocks);
} else {
if (enc_before)
aes_encrypt(ctx, dg, dg);
diff --git a/arch/arm64/crypto/aes-modes.S b/arch/arm64/crypto/aes-modes.S
index cf618d8f6cec..bbdb54702aa7 100644
--- a/arch/arm64/crypto/aes-modes.S
+++ b/arch/arm64/crypto/aes-modes.S
@@ -321,42 +321,76 @@ AES_FUNC_END(aes_cbc_cts_decrypt)
/*
* aes_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
- * int blocks, u8 ctr[])
+ * int bytes, u8 ctr[], u8 finalbuf[])
*/
AES_FUNC_START(aes_ctr_encrypt)
stp x29, x30, [sp, #-16]!
mov x29, sp
- enc_prepare w3, x2, x6
+ enc_prepare w3, x2, x12
ld1 {vctr.16b}, [x5]
- umov x6, vctr.d[1] /* keep swabbed ctr in reg */
- rev x6, x6
- cmn w6, w4 /* 32 bit overflow? */
- bcs .Lctrloop
+ umov x12, vctr.d[1] /* keep swabbed ctr in reg */
+ rev x12, x12
+
.LctrloopNx:
- subs w4, w4, #MAX_STRIDE
- bmi .Lctr1x
- add w7, w6, #1
+ add w7, w4, #15
+ sub w4, w4, #MAX_STRIDE << 4
+ lsr w7, w7, #4
+ mov w8, #MAX_STRIDE
+ cmp w7, w8
+ csel w7, w7, w8, lt
+ adds x12, x12, x7
+
mov v0.16b, vctr.16b
- add w8, w6, #2
mov v1.16b, vctr.16b
- add w9, w6, #3
mov v2.16b, vctr.16b
- add w9, w6, #3
- rev w7, w7
mov v3.16b, vctr.16b
- rev w8, w8
ST5( mov v4.16b, vctr.16b )
- mov v1.s[3], w7
- rev w9, w9
-ST5( add w10, w6, #4 )
- mov v2.s[3], w8
-ST5( rev w10, w10 )
- mov v3.s[3], w9
-ST5( mov v4.s[3], w10 )
- ld1 {v5.16b-v7.16b}, [x1], #48 /* get 3 input blocks */
+ bcs 0f
+
+ .subsection 1
+ /* apply carry to outgoing counter */
+0: umov x8, vctr.d[0]
+ rev x8, x8
+ add x8, x8, #1
+ rev x8, x8
+ ins vctr.d[0], x8
+
+ /* apply carry to N counter blocks for N := x12 */
+ adr x16, 1f
+ sub x16, x16, x12, lsl #3
+ br x16
+ hint 34 // bti c
+ mov v0.d[0], vctr.d[0]
+ hint 34 // bti c
+ mov v1.d[0], vctr.d[0]
+ hint 34 // bti c
+ mov v2.d[0], vctr.d[0]
+ hint 34 // bti c
+ mov v3.d[0], vctr.d[0]
+ST5( hint 34 )
+ST5( mov v4.d[0], vctr.d[0] )
+1: b 2f
+ .previous
+
+2: rev x7, x12
+ ins vctr.d[1], x7
+ sub x7, x12, #MAX_STRIDE - 1
+ sub x8, x12, #MAX_STRIDE - 2
+ sub x9, x12, #MAX_STRIDE - 3
+ rev x7, x7
+ rev x8, x8
+ mov v1.d[1], x7
+ rev x9, x9
+ST5( sub x10, x12, #MAX_STRIDE - 4 )
+ mov v2.d[1], x8
+ST5( rev x10, x10 )
+ mov v3.d[1], x9
+ST5( mov v4.d[1], x10 )
+ tbnz w4, #31, .Lctrtail
+ ld1 {v5.16b-v7.16b}, [x1], #48
ST4( bl aes_encrypt_block4x )
ST5( bl aes_encrypt_block5x )
eor v0.16b, v5.16b, v0.16b
@@ -368,47 +402,72 @@ ST5( ld1 {v5.16b-v6.16b}, [x1], #32 )
ST5( eor v4.16b, v6.16b, v4.16b )
st1 {v0.16b-v3.16b}, [x0], #64
ST5( st1 {v4.16b}, [x0], #16 )
- add x6, x6, #MAX_STRIDE
- rev x7, x6
- ins vctr.d[1], x7
cbz w4, .Lctrout
b .LctrloopNx
-.Lctr1x:
- adds w4, w4, #MAX_STRIDE
- beq .Lctrout
-.Lctrloop:
- mov v0.16b, vctr.16b
- encrypt_block v0, w3, x2, x8, w7
-
- adds x6, x6, #1 /* increment BE ctr */
- rev x7, x6
- ins vctr.d[1], x7
- bcs .Lctrcarry /* overflow? */
-
-.Lctrcarrydone:
- subs w4, w4, #1
- bmi .Lctrtailblock /* blocks <0 means tail block */
- ld1 {v3.16b}, [x1], #16
- eor v3.16b, v0.16b, v3.16b
- st1 {v3.16b}, [x0], #16
- bne .Lctrloop
.Lctrout:
st1 {vctr.16b}, [x5] /* return next CTR value */
ldp x29, x30, [sp], #16
ret
-.Lctrtailblock:
- st1 {v0.16b}, [x0]
+.Lctrtail:
+ /* XOR up to MAX_STRIDE * 16 - 1 bytes of in/output with v0 ... v3/v4 */
+ mov x16, #16
+ ands x13, x4, #0xf
+ csel x13, x13, x16, ne
+
+ST5( cmp w4, #64 - (MAX_STRIDE << 4) )
+ST5( csel x14, x16, xzr, gt )
+ cmp w4, #48 - (MAX_STRIDE << 4)
+ csel x15, x16, xzr, gt
+ cmp w4, #32 - (MAX_STRIDE << 4)
+ csel x16, x16, xzr, gt
+ cmp w4, #16 - (MAX_STRIDE << 4)
+ ble .Lctrtail1x
+
+ adr_l x12, .Lcts_permute_table
+ add x12, x12, x13
+
+ST5( ld1 {v5.16b}, [x1], x14 )
+ ld1 {v6.16b}, [x1], x15
+ ld1 {v7.16b}, [x1], x16
+
+ST4( bl aes_encrypt_block4x )
+ST5( bl aes_encrypt_block5x )
+
+ ld1 {v8.16b}, [x1], x13
+ ld1 {v9.16b}, [x1]
+ ld1 {v10.16b}, [x12]
+
+ST4( eor v6.16b, v6.16b, v0.16b )
+ST4( eor v7.16b, v7.16b, v1.16b )
+ST4( tbl v3.16b, {v3.16b}, v10.16b )
+ST4( eor v8.16b, v8.16b, v2.16b )
+ST4( eor v9.16b, v9.16b, v3.16b )
+
+ST5( eor v5.16b, v5.16b, v0.16b )
+ST5( eor v6.16b, v6.16b, v1.16b )
+ST5( tbl v4.16b, {v4.16b}, v10.16b )
+ST5( eor v7.16b, v7.16b, v2.16b )
+ST5( eor v8.16b, v8.16b, v3.16b )
+ST5( eor v9.16b, v9.16b, v4.16b )
+
+ST5( st1 {v5.16b}, [x0], x14 )
+ st1 {v6.16b}, [x0], x15
+ st1 {v7.16b}, [x0], x16
+ add x13, x13, x0
+ st1 {v9.16b}, [x13] // overlapping stores
+ st1 {v8.16b}, [x0]
b .Lctrout
-.Lctrcarry:
- umov x7, vctr.d[0] /* load upper word of ctr */
- rev x7, x7 /* ... to handle the carry */
- add x7, x7, #1
- rev x7, x7
- ins vctr.d[0], x7
- b .Lctrcarrydone
+.Lctrtail1x:
+ csel x0, x0, x6, eq // use finalbuf if less than a full block
+ ld1 {v5.16b}, [x1]
+ST5( mov v3.16b, v4.16b )
+ encrypt_block v3, w3, x2, x8, w7
+ eor v5.16b, v5.16b, v3.16b
+ st1 {v5.16b}, [x0]
+ b .Lctrout
AES_FUNC_END(aes_ctr_encrypt)
@@ -619,61 +678,47 @@ AES_FUNC_END(aes_xts_decrypt)
* int blocks, u8 dg[], int enc_before, int enc_after)
*/
AES_FUNC_START(aes_mac_update)
- frame_push 6
-
- mov x19, x0
- mov x20, x1
- mov x21, x2
- mov x22, x3
- mov x23, x4
- mov x24, x6
-
- ld1 {v0.16b}, [x23] /* get dg */
+ ld1 {v0.16b}, [x4] /* get dg */
enc_prepare w2, x1, x7
cbz w5, .Lmacloop4x
encrypt_block v0, w2, x1, x7, w8
.Lmacloop4x:
- subs w22, w22, #4
+ subs w3, w3, #4
bmi .Lmac1x
- ld1 {v1.16b-v4.16b}, [x19], #64 /* get next pt block */
+ ld1 {v1.16b-v4.16b}, [x0], #64 /* get next pt block */
eor v0.16b, v0.16b, v1.16b /* ..and xor with dg */
- encrypt_block v0, w21, x20, x7, w8
+ encrypt_block v0, w2, x1, x7, w8
eor v0.16b, v0.16b, v2.16b
- encrypt_block v0, w21, x20, x7, w8
+ encrypt_block v0, w2, x1, x7, w8
eor v0.16b, v0.16b, v3.16b
- encrypt_block v0, w21, x20, x7, w8
+ encrypt_block v0, w2, x1, x7, w8
eor v0.16b, v0.16b, v4.16b
- cmp w22, wzr
- csinv x5, x24, xzr, eq
+ cmp w3, wzr
+ csinv x5, x6, xzr, eq
cbz w5, .Lmacout
- encrypt_block v0, w21, x20, x7, w8
- st1 {v0.16b}, [x23] /* return dg */
- cond_yield_neon .Lmacrestart
+ encrypt_block v0, w2, x1, x7, w8
+ st1 {v0.16b}, [x4] /* return dg */
+ cond_yield .Lmacout, x7
b .Lmacloop4x
.Lmac1x:
- add w22, w22, #4
+ add w3, w3, #4
.Lmacloop:
- cbz w22, .Lmacout
- ld1 {v1.16b}, [x19], #16 /* get next pt block */
+ cbz w3, .Lmacout
+ ld1 {v1.16b}, [x0], #16 /* get next pt block */
eor v0.16b, v0.16b, v1.16b /* ..and xor with dg */
- subs w22, w22, #1
- csinv x5, x24, xzr, eq
+ subs w3, w3, #1
+ csinv x5, x6, xzr, eq
cbz w5, .Lmacout
.Lmacenc:
- encrypt_block v0, w21, x20, x7, w8
+ encrypt_block v0, w2, x1, x7, w8
b .Lmacloop
.Lmacout:
- st1 {v0.16b}, [x23] /* return dg */
- frame_pop
+ st1 {v0.16b}, [x4] /* return dg */
+ mov w0, w3
ret
-
-.Lmacrestart:
- ld1 {v0.16b}, [x23] /* get dg */
- enc_prepare w21, x20, x0
- b .Lmacloop4x
AES_FUNC_END(aes_mac_update)
diff --git a/arch/arm64/crypto/aes-neonbs-core.S b/arch/arm64/crypto/aes-neonbs-core.S
index 63a52ad9a75c..a3405b8c344b 100644
--- a/arch/arm64/crypto/aes-neonbs-core.S
+++ b/arch/arm64/crypto/aes-neonbs-core.S
@@ -613,7 +613,6 @@ SYM_FUNC_END(aesbs_decrypt8)
st1 {\o7\().16b}, [x19], #16
cbz x23, 1f
- cond_yield_neon
b 99b
1: frame_pop
@@ -715,7 +714,6 @@ SYM_FUNC_START(aesbs_cbc_decrypt)
1: st1 {v24.16b}, [x24] // store IV
cbz x23, 2f
- cond_yield_neon
b 99b
2: frame_pop
@@ -801,7 +799,7 @@ SYM_FUNC_END(__xts_crypt8)
mov x23, x4
mov x24, x5
-0: movi v30.2s, #0x1
+ movi v30.2s, #0x1
movi v25.2s, #0x87
uzp1 v30.4s, v30.4s, v25.4s
ld1 {v25.16b}, [x24]
@@ -846,7 +844,6 @@ SYM_FUNC_END(__xts_crypt8)
cbz x23, 1f
st1 {v25.16b}, [x24]
- cond_yield_neon 0b
b 99b
1: st1 {v25.16b}, [x24]
@@ -889,7 +886,7 @@ SYM_FUNC_START(aesbs_ctr_encrypt)
cset x26, ne
add x23, x23, x26 // do one extra block if final
-98: ldp x7, x8, [x24]
+ ldp x7, x8, [x24]
ld1 {v0.16b}, [x24]
CPU_LE( rev x7, x7 )
CPU_LE( rev x8, x8 )
@@ -967,7 +964,6 @@ CPU_LE( rev x8, x8 )
st1 {v0.16b}, [x24]
cbz x23, .Lctr_done
- cond_yield_neon 98b
b 99b
.Lctr_done:
diff --git a/arch/arm64/crypto/crct10dif-ce-core.S b/arch/arm64/crypto/crct10dif-ce-core.S
index 111d9c9abddd..dce6dcebfca1 100644
--- a/arch/arm64/crypto/crct10dif-ce-core.S
+++ b/arch/arm64/crypto/crct10dif-ce-core.S
@@ -68,10 +68,10 @@
.text
.arch armv8-a+crypto
- init_crc .req w19
- buf .req x20
- len .req x21
- fold_consts_ptr .req x22
+ init_crc .req w0
+ buf .req x1
+ len .req x2
+ fold_consts_ptr .req x3
fold_consts .req v10
@@ -257,12 +257,6 @@ CPU_LE( ext v12.16b, v12.16b, v12.16b, #8 )
.endm
.macro crc_t10dif_pmull, p
- frame_push 4, 128
-
- mov init_crc, w0
- mov buf, x1
- mov len, x2
-
__pmull_init_\p
// For sizes less than 256 bytes, we can't fold 128 bytes at a time.
@@ -317,26 +311,7 @@ CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 )
fold_32_bytes \p, v6, v7
subs len, len, #128
- b.lt .Lfold_128_bytes_loop_done_\@
-
- if_will_cond_yield_neon
- stp q0, q1, [sp, #.Lframe_local_offset]
- stp q2, q3, [sp, #.Lframe_local_offset + 32]
- stp q4, q5, [sp, #.Lframe_local_offset + 64]
- stp q6, q7, [sp, #.Lframe_local_offset + 96]
- do_cond_yield_neon
- ldp q0, q1, [sp, #.Lframe_local_offset]
- ldp q2, q3, [sp, #.Lframe_local_offset + 32]
- ldp q4, q5, [sp, #.Lframe_local_offset + 64]
- ldp q6, q7, [sp, #.Lframe_local_offset + 96]
- ld1 {fold_consts.2d}, [fold_consts_ptr]
- __pmull_init_\p
- __pmull_pre_\p fold_consts
- endif_yield_neon
-
- b .Lfold_128_bytes_loop_\@
-
-.Lfold_128_bytes_loop_done_\@:
+ b.ge .Lfold_128_bytes_loop_\@
// Now fold the 112 bytes in v0-v6 into the 16 bytes in v7.
@@ -453,7 +428,9 @@ CPU_LE( ext v0.16b, v0.16b, v0.16b, #8 )
// Final CRC value (x^16 * M(x)) mod G(x) is in low 16 bits of v0.
umov w0, v0.h[0]
- frame_pop
+ .ifc \p, p8
+ ldp x29, x30, [sp], #16
+ .endif
ret
.Lless_than_256_bytes_\@:
@@ -489,7 +466,9 @@ CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 )
// Assumes len >= 16.
//
SYM_FUNC_START(crc_t10dif_pmull_p8)
- crc_t10dif_pmull p8
+ stp x29, x30, [sp, #-16]!
+ mov x29, sp
+ crc_t10dif_pmull p8
SYM_FUNC_END(crc_t10dif_pmull_p8)
.align 5
diff --git a/arch/arm64/crypto/crct10dif-ce-glue.c b/arch/arm64/crypto/crct10dif-ce-glue.c
index ccc3f6067742..09eb1456aed4 100644
--- a/arch/arm64/crypto/crct10dif-ce-glue.c
+++ b/arch/arm64/crypto/crct10dif-ce-glue.c
@@ -37,9 +37,18 @@ static int crct10dif_update_pmull_p8(struct shash_desc *desc, const u8 *data,
u16 *crc = shash_desc_ctx(desc);
if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && crypto_simd_usable()) {
- kernel_neon_begin();
- *crc = crc_t10dif_pmull_p8(*crc, data, length);
- kernel_neon_end();
+ do {
+ unsigned int chunk = length;
+
+ if (chunk > SZ_4K + CRC_T10DIF_PMULL_CHUNK_SIZE)
+ chunk = SZ_4K;
+
+ kernel_neon_begin();
+ *crc = crc_t10dif_pmull_p8(*crc, data, chunk);
+ kernel_neon_end();
+ data += chunk;
+ length -= chunk;
+ } while (length);
} else {
*crc = crc_t10dif_generic(*crc, data, length);
}
@@ -53,9 +62,18 @@ static int crct10dif_update_pmull_p64(struct shash_desc *desc, const u8 *data,
u16 *crc = shash_desc_ctx(desc);
if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && crypto_simd_usable()) {
- kernel_neon_begin();
- *crc = crc_t10dif_pmull_p64(*crc, data, length);
- kernel_neon_end();
+ do {
+ unsigned int chunk = length;
+
+ if (chunk > SZ_4K + CRC_T10DIF_PMULL_CHUNK_SIZE)
+ chunk = SZ_4K;
+
+ kernel_neon_begin();
+ *crc = crc_t10dif_pmull_p64(*crc, data, chunk);
+ kernel_neon_end();
+ data += chunk;
+ length -= chunk;
+ } while (length);
} else {
*crc = crc_t10dif_generic(*crc, data, length);
}
diff --git a/arch/arm64/crypto/sha1-ce-core.S b/arch/arm64/crypto/sha1-ce-core.S
index 92d0d2753e81..8c02bbc2684e 100644
--- a/arch/arm64/crypto/sha1-ce-core.S
+++ b/arch/arm64/crypto/sha1-ce-core.S
@@ -62,40 +62,34 @@
.endm
/*
- * void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
- * int blocks)
+ * int sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
+ * int blocks)
*/
SYM_FUNC_START(sha1_ce_transform)
- frame_push 3
-
- mov x19, x0
- mov x20, x1
- mov x21, x2
-
/* load round constants */
-0: loadrc k0.4s, 0x5a827999, w6
+ loadrc k0.4s, 0x5a827999, w6
loadrc k1.4s, 0x6ed9eba1, w6
loadrc k2.4s, 0x8f1bbcdc, w6
loadrc k3.4s, 0xca62c1d6, w6
/* load state */
- ld1 {dgav.4s}, [x19]
- ldr dgb, [x19, #16]
+ ld1 {dgav.4s}, [x0]
+ ldr dgb, [x0, #16]
/* load sha1_ce_state::finalize */
ldr_l w4, sha1_ce_offsetof_finalize, x4
- ldr w4, [x19, x4]
+ ldr w4, [x0, x4]
/* load input */
-1: ld1 {v8.4s-v11.4s}, [x20], #64
- sub w21, w21, #1
+0: ld1 {v8.4s-v11.4s}, [x1], #64
+ sub w2, w2, #1
CPU_LE( rev32 v8.16b, v8.16b )
CPU_LE( rev32 v9.16b, v9.16b )
CPU_LE( rev32 v10.16b, v10.16b )
CPU_LE( rev32 v11.16b, v11.16b )
-2: add t0.4s, v8.4s, k0.4s
+1: add t0.4s, v8.4s, k0.4s
mov dg0v.16b, dgav.16b
add_update c, ev, k0, 8, 9, 10, 11, dgb
@@ -126,25 +120,18 @@ CPU_LE( rev32 v11.16b, v11.16b )
add dgbv.2s, dgbv.2s, dg1v.2s
add dgav.4s, dgav.4s, dg0v.4s
- cbz w21, 3f
-
- if_will_cond_yield_neon
- st1 {dgav.4s}, [x19]
- str dgb, [x19, #16]
- do_cond_yield_neon
+ cbz w2, 2f
+ cond_yield 3f, x5
b 0b
- endif_yield_neon
-
- b 1b
/*
* Final block: add padding and total bit count.
* Skip if the input size was not a round multiple of the block size,
* the padding is handled by the C code in that case.
*/
-3: cbz x4, 4f
+2: cbz x4, 3f
ldr_l w4, sha1_ce_offsetof_count, x4
- ldr x4, [x19, x4]
+ ldr x4, [x0, x4]
movi v9.2d, #0
mov x8, #0x80000000
movi v10.2d, #0
@@ -153,11 +140,11 @@ CPU_LE( rev32 v11.16b, v11.16b )
mov x4, #0
mov v11.d[0], xzr
mov v11.d[1], x7
- b 2b
+ b 1b
/* store new state */
-4: st1 {dgav.4s}, [x19]
- str dgb, [x19, #16]
- frame_pop
+3: st1 {dgav.4s}, [x0]
+ str dgb, [x0, #16]
+ mov w0, w2
ret
SYM_FUNC_END(sha1_ce_transform)
diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
index c93121bcfdeb..71fa4f1122d7 100644
--- a/arch/arm64/crypto/sha1-ce-glue.c
+++ b/arch/arm64/crypto/sha1-ce-glue.c
@@ -19,6 +19,7 @@
MODULE_DESCRIPTION("SHA1 secure hash using ARMv8 Crypto Extensions");
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS_CRYPTO("sha1");
struct sha1_ce_state {
struct sha1_state sst;
@@ -28,14 +29,22 @@ struct sha1_ce_state {
extern const u32 sha1_ce_offsetof_count;
extern const u32 sha1_ce_offsetof_finalize;
-asmlinkage void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
- int blocks);
+asmlinkage int sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
+ int blocks);
static void __sha1_ce_transform(struct sha1_state *sst, u8 const *src,
int blocks)
{
- sha1_ce_transform(container_of(sst, struct sha1_ce_state, sst), src,
- blocks);
+ while (blocks) {
+ int rem;
+
+ kernel_neon_begin();
+ rem = sha1_ce_transform(container_of(sst, struct sha1_ce_state,
+ sst), src, blocks);
+ kernel_neon_end();
+ src += (blocks - rem) * SHA1_BLOCK_SIZE;
+ blocks = rem;
+ }
}
const u32 sha1_ce_offsetof_count = offsetof(struct sha1_ce_state, sst.count);
@@ -50,9 +59,7 @@ static int sha1_ce_update(struct shash_desc *desc, const u8 *data,
return crypto_sha1_update(desc, data, len);
sctx->finalize = 0;
- kernel_neon_begin();
sha1_base_do_update(desc, data, len, __sha1_ce_transform);
- kernel_neon_end();
return 0;
}
@@ -72,11 +79,9 @@ static int sha1_ce_finup(struct shash_desc *desc, const u8 *data,
*/
sctx->finalize = finalize;
- kernel_neon_begin();
sha1_base_do_update(desc, data, len, __sha1_ce_transform);
if (!finalize)
sha1_base_do_finalize(desc, __sha1_ce_transform);
- kernel_neon_end();
return sha1_base_finish(desc, out);
}
@@ -88,9 +93,7 @@ static int sha1_ce_final(struct shash_desc *desc, u8 *out)
return crypto_sha1_finup(desc, NULL, 0, out);
sctx->finalize = 0;
- kernel_neon_begin();
sha1_base_do_finalize(desc, __sha1_ce_transform);
- kernel_neon_end();
return sha1_base_finish(desc, out);
}
diff --git a/arch/arm64/crypto/sha2-ce-core.S b/arch/arm64/crypto/sha2-ce-core.S
index 3f9d0f326987..6cdea7d56059 100644
--- a/arch/arm64/crypto/sha2-ce-core.S
+++ b/arch/arm64/crypto/sha2-ce-core.S
@@ -76,36 +76,30 @@
*/
.text
SYM_FUNC_START(sha2_ce_transform)
- frame_push 3
-
- mov x19, x0
- mov x20, x1
- mov x21, x2
-
/* load round constants */
-0: adr_l x8, .Lsha2_rcon
+ adr_l x8, .Lsha2_rcon
ld1 { v0.4s- v3.4s}, [x8], #64
ld1 { v4.4s- v7.4s}, [x8], #64
ld1 { v8.4s-v11.4s}, [x8], #64
ld1 {v12.4s-v15.4s}, [x8]
/* load state */
- ld1 {dgav.4s, dgbv.4s}, [x19]
+ ld1 {dgav.4s, dgbv.4s}, [x0]
/* load sha256_ce_state::finalize */
ldr_l w4, sha256_ce_offsetof_finalize, x4
- ldr w4, [x19, x4]
+ ldr w4, [x0, x4]
/* load input */
-1: ld1 {v16.4s-v19.4s}, [x20], #64
- sub w21, w21, #1
+0: ld1 {v16.4s-v19.4s}, [x1], #64
+ sub w2, w2, #1
CPU_LE( rev32 v16.16b, v16.16b )
CPU_LE( rev32 v17.16b, v17.16b )
CPU_LE( rev32 v18.16b, v18.16b )
CPU_LE( rev32 v19.16b, v19.16b )
-2: add t0.4s, v16.4s, v0.4s
+1: add t0.4s, v16.4s, v0.4s
mov dg0v.16b, dgav.16b
mov dg1v.16b, dgbv.16b
@@ -134,24 +128,18 @@ CPU_LE( rev32 v19.16b, v19.16b )
add dgbv.4s, dgbv.4s, dg1v.4s
/* handled all input blocks? */
- cbz w21, 3f
-
- if_will_cond_yield_neon
- st1 {dgav.4s, dgbv.4s}, [x19]
- do_cond_yield_neon
+ cbz w2, 2f
+ cond_yield 3f, x5
b 0b
- endif_yield_neon
-
- b 1b
/*
* Final block: add padding and total bit count.
* Skip if the input size was not a round multiple of the block size,
* the padding is handled by the C code in that case.
*/
-3: cbz x4, 4f
+2: cbz x4, 3f
ldr_l w4, sha256_ce_offsetof_count, x4
- ldr x4, [x19, x4]
+ ldr x4, [x0, x4]
movi v17.2d, #0
mov x8, #0x80000000
movi v18.2d, #0
@@ -160,10 +148,10 @@ CPU_LE( rev32 v19.16b, v19.16b )
mov x4, #0
mov v19.d[0], xzr
mov v19.d[1], x7
- b 2b
+ b 1b
/* store new state */
-4: st1 {dgav.4s, dgbv.4s}, [x19]
- frame_pop
+3: st1 {dgav.4s, dgbv.4s}, [x0]
+ mov w0, w2
ret
SYM_FUNC_END(sha2_ce_transform)
diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c
index 31ba3da5e61b..c57a6119fefc 100644
--- a/arch/arm64/crypto/sha2-ce-glue.c
+++ b/arch/arm64/crypto/sha2-ce-glue.c
@@ -19,6 +19,8 @@
MODULE_DESCRIPTION("SHA-224/SHA-256 secure hash using ARMv8 Crypto Extensions");
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS_CRYPTO("sha224");
+MODULE_ALIAS_CRYPTO("sha256");
struct sha256_ce_state {
struct sha256_state sst;
@@ -28,14 +30,22 @@ struct sha256_ce_state {
extern const u32 sha256_ce_offsetof_count;
extern const u32 sha256_ce_offsetof_finalize;
-asmlinkage void sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src,
- int blocks);
+asmlinkage int sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src,
+ int blocks);
static void __sha2_ce_transform(struct sha256_state *sst, u8 const *src,
int blocks)
{
- sha2_ce_transform(container_of(sst, struct sha256_ce_state, sst), src,
- blocks);
+ while (blocks) {
+ int rem;
+
+ kernel_neon_begin();
+ rem = sha2_ce_transform(container_of(sst, struct sha256_ce_state,
+ sst), src, blocks);
+ kernel_neon_end();
+ src += (blocks - rem) * SHA256_BLOCK_SIZE;
+ blocks = rem;
+ }
}
const u32 sha256_ce_offsetof_count = offsetof(struct sha256_ce_state,
@@ -61,9 +71,7 @@ static int sha256_ce_update(struct shash_desc *desc, const u8 *data,
__sha256_block_data_order);
sctx->finalize = 0;
- kernel_neon_begin();
sha256_base_do_update(desc, data, len, __sha2_ce_transform);
- kernel_neon_end();
return 0;
}
@@ -88,11 +96,9 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
*/
sctx->finalize = finalize;
- kernel_neon_begin();
sha256_base_do_update(desc, data, len, __sha2_ce_transform);
if (!finalize)
sha256_base_do_finalize(desc, __sha2_ce_transform);
- kernel_neon_end();
return sha256_base_finish(desc, out);
}
@@ -106,9 +112,7 @@ static int sha256_ce_final(struct shash_desc *desc, u8 *out)
}
sctx->finalize = 0;
- kernel_neon_begin();
sha256_base_do_finalize(desc, __sha2_ce_transform);
- kernel_neon_end();
return sha256_base_finish(desc, out);
}
diff --git a/arch/arm64/crypto/sha3-ce-core.S b/arch/arm64/crypto/sha3-ce-core.S
index 1cfb768df350..6f5208414fe3 100644
--- a/arch/arm64/crypto/sha3-ce-core.S
+++ b/arch/arm64/crypto/sha3-ce-core.S
@@ -37,20 +37,13 @@
.endm
/*
- * sha3_ce_transform(u64 *st, const u8 *data, int blocks, int dg_size)
+ * int sha3_ce_transform(u64 *st, const u8 *data, int blocks, int dg_size)
*/
.text
SYM_FUNC_START(sha3_ce_transform)
- frame_push 4
-
- mov x19, x0
- mov x20, x1
- mov x21, x2
- mov x22, x3
-
-0: /* load state */
- add x8, x19, #32
- ld1 { v0.1d- v3.1d}, [x19]
+ /* load state */
+ add x8, x0, #32
+ ld1 { v0.1d- v3.1d}, [x0]
ld1 { v4.1d- v7.1d}, [x8], #32
ld1 { v8.1d-v11.1d}, [x8], #32
ld1 {v12.1d-v15.1d}, [x8], #32
@@ -58,13 +51,13 @@ SYM_FUNC_START(sha3_ce_transform)
ld1 {v20.1d-v23.1d}, [x8], #32
ld1 {v24.1d}, [x8]
-1: sub w21, w21, #1
+0: sub w2, w2, #1
mov w8, #24
adr_l x9, .Lsha3_rcon
/* load input */
- ld1 {v25.8b-v28.8b}, [x20], #32
- ld1 {v29.8b-v31.8b}, [x20], #24
+ ld1 {v25.8b-v28.8b}, [x1], #32
+ ld1 {v29.8b-v31.8b}, [x1], #24
eor v0.8b, v0.8b, v25.8b
eor v1.8b, v1.8b, v26.8b
eor v2.8b, v2.8b, v27.8b
@@ -73,10 +66,10 @@ SYM_FUNC_START(sha3_ce_transform)
eor v5.8b, v5.8b, v30.8b
eor v6.8b, v6.8b, v31.8b
- tbnz x22, #6, 3f // SHA3-512
+ tbnz x3, #6, 2f // SHA3-512
- ld1 {v25.8b-v28.8b}, [x20], #32
- ld1 {v29.8b-v30.8b}, [x20], #16
+ ld1 {v25.8b-v28.8b}, [x1], #32
+ ld1 {v29.8b-v30.8b}, [x1], #16
eor v7.8b, v7.8b, v25.8b
eor v8.8b, v8.8b, v26.8b
eor v9.8b, v9.8b, v27.8b
@@ -84,34 +77,34 @@ SYM_FUNC_START(sha3_ce_transform)
eor v11.8b, v11.8b, v29.8b
eor v12.8b, v12.8b, v30.8b
- tbnz x22, #4, 2f // SHA3-384 or SHA3-224
+ tbnz x3, #4, 1f // SHA3-384 or SHA3-224
// SHA3-256
- ld1 {v25.8b-v28.8b}, [x20], #32
+ ld1 {v25.8b-v28.8b}, [x1], #32
eor v13.8b, v13.8b, v25.8b
eor v14.8b, v14.8b, v26.8b
eor v15.8b, v15.8b, v27.8b
eor v16.8b, v16.8b, v28.8b
- b 4f
+ b 3f
-2: tbz x22, #2, 4f // bit 2 cleared? SHA-384
+1: tbz x3, #2, 3f // bit 2 cleared? SHA-384
// SHA3-224
- ld1 {v25.8b-v28.8b}, [x20], #32
- ld1 {v29.8b}, [x20], #8
+ ld1 {v25.8b-v28.8b}, [x1], #32
+ ld1 {v29.8b}, [x1], #8
eor v13.8b, v13.8b, v25.8b
eor v14.8b, v14.8b, v26.8b
eor v15.8b, v15.8b, v27.8b
eor v16.8b, v16.8b, v28.8b
eor v17.8b, v17.8b, v29.8b
- b 4f
+ b 3f
// SHA3-512
-3: ld1 {v25.8b-v26.8b}, [x20], #16
+2: ld1 {v25.8b-v26.8b}, [x1], #16
eor v7.8b, v7.8b, v25.8b
eor v8.8b, v8.8b, v26.8b
-4: sub w8, w8, #1
+3: sub w8, w8, #1
eor3 v29.16b, v4.16b, v9.16b, v14.16b
eor3 v26.16b, v1.16b, v6.16b, v11.16b
@@ -190,33 +183,19 @@ SYM_FUNC_START(sha3_ce_transform)
eor v0.16b, v0.16b, v31.16b
- cbnz w8, 4b
- cbz w21, 5f
-
- if_will_cond_yield_neon
- add x8, x19, #32
- st1 { v0.1d- v3.1d}, [x19]
- st1 { v4.1d- v7.1d}, [x8], #32
- st1 { v8.1d-v11.1d}, [x8], #32
- st1 {v12.1d-v15.1d}, [x8], #32
- st1 {v16.1d-v19.1d}, [x8], #32
- st1 {v20.1d-v23.1d}, [x8], #32
- st1 {v24.1d}, [x8]
- do_cond_yield_neon
- b 0b
- endif_yield_neon
-
- b 1b
+ cbnz w8, 3b
+ cond_yield 3f, x8
+ cbnz w2, 0b
/* save state */
-5: st1 { v0.1d- v3.1d}, [x19], #32
- st1 { v4.1d- v7.1d}, [x19], #32
- st1 { v8.1d-v11.1d}, [x19], #32
- st1 {v12.1d-v15.1d}, [x19], #32
- st1 {v16.1d-v19.1d}, [x19], #32
- st1 {v20.1d-v23.1d}, [x19], #32
- st1 {v24.1d}, [x19]
- frame_pop
+3: st1 { v0.1d- v3.1d}, [x0], #32
+ st1 { v4.1d- v7.1d}, [x0], #32
+ st1 { v8.1d-v11.1d}, [x0], #32
+ st1 {v12.1d-v15.1d}, [x0], #32
+ st1 {v16.1d-v19.1d}, [x0], #32
+ st1 {v20.1d-v23.1d}, [x0], #32
+ st1 {v24.1d}, [x0]
+ mov w0, w2
ret
SYM_FUNC_END(sha3_ce_transform)
diff --git a/arch/arm64/crypto/sha3-ce-glue.c b/arch/arm64/crypto/sha3-ce-glue.c
index e5a2936f0886..8c65cecf560a 100644
--- a/arch/arm64/crypto/sha3-ce-glue.c
+++ b/arch/arm64/crypto/sha3-ce-glue.c
@@ -23,9 +23,13 @@
MODULE_DESCRIPTION("SHA3 secure hash using ARMv8 Crypto Extensions");
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS_CRYPTO("sha3-224");
+MODULE_ALIAS_CRYPTO("sha3-256");
+MODULE_ALIAS_CRYPTO("sha3-384");
+MODULE_ALIAS_CRYPTO("sha3-512");
-asmlinkage void sha3_ce_transform(u64 *st, const u8 *data, int blocks,
- int md_len);
+asmlinkage int sha3_ce_transform(u64 *st, const u8 *data, int blocks,
+ int md_len);
static int sha3_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
@@ -55,11 +59,15 @@ static int sha3_update(struct shash_desc *desc, const u8 *data,
blocks = len / sctx->rsiz;
len %= sctx->rsiz;
- if (blocks) {
+ while (blocks) {
+ int rem;
+
kernel_neon_begin();
- sha3_ce_transform(sctx->st, data, blocks, digest_size);
+ rem = sha3_ce_transform(sctx->st, data, blocks,
+ digest_size);
kernel_neon_end();
- data += blocks * sctx->rsiz;
+ data += (blocks - rem) * sctx->rsiz;
+ blocks = rem;
}
}
diff --git a/arch/arm64/crypto/sha512-ce-core.S b/arch/arm64/crypto/sha512-ce-core.S
index cde606c0323e..d6e7f6c95fa6 100644
--- a/arch/arm64/crypto/sha512-ce-core.S
+++ b/arch/arm64/crypto/sha512-ce-core.S
@@ -107,23 +107,17 @@
*/
.text
SYM_FUNC_START(sha512_ce_transform)
- frame_push 3
-
- mov x19, x0
- mov x20, x1
- mov x21, x2
-
/* load state */
-0: ld1 {v8.2d-v11.2d}, [x19]
+ ld1 {v8.2d-v11.2d}, [x0]
/* load first 4 round constants */
adr_l x3, .Lsha512_rcon
ld1 {v20.2d-v23.2d}, [x3], #64
/* load input */
-1: ld1 {v12.2d-v15.2d}, [x20], #64
- ld1 {v16.2d-v19.2d}, [x20], #64
- sub w21, w21, #1
+0: ld1 {v12.2d-v15.2d}, [x1], #64
+ ld1 {v16.2d-v19.2d}, [x1], #64
+ sub w2, w2, #1
CPU_LE( rev64 v12.16b, v12.16b )
CPU_LE( rev64 v13.16b, v13.16b )
@@ -201,19 +195,12 @@ CPU_LE( rev64 v19.16b, v19.16b )
add v10.2d, v10.2d, v2.2d
add v11.2d, v11.2d, v3.2d
+ cond_yield 3f, x4
/* handled all input blocks? */
- cbz w21, 3f
-
- if_will_cond_yield_neon
- st1 {v8.2d-v11.2d}, [x19]
- do_cond_yield_neon
- b 0b
- endif_yield_neon
-
- b 1b
+ cbnz w2, 0b
/* store new state */
-3: st1 {v8.2d-v11.2d}, [x19]
- frame_pop
+3: st1 {v8.2d-v11.2d}, [x0]
+ mov w0, w2
ret
SYM_FUNC_END(sha512_ce_transform)
diff --git a/arch/arm64/crypto/sha512-ce-glue.c b/arch/arm64/crypto/sha512-ce-glue.c
index faa83f6cf376..e62a094a9d52 100644
--- a/arch/arm64/crypto/sha512-ce-glue.c
+++ b/arch/arm64/crypto/sha512-ce-glue.c
@@ -23,12 +23,28 @@
MODULE_DESCRIPTION("SHA-384/SHA-512 secure hash using ARMv8 Crypto Extensions");
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS_CRYPTO("sha384");
+MODULE_ALIAS_CRYPTO("sha512");
-asmlinkage void sha512_ce_transform(struct sha512_state *sst, u8 const *src,
- int blocks);
+asmlinkage int sha512_ce_transform(struct sha512_state *sst, u8 const *src,
+ int blocks);
asmlinkage void sha512_block_data_order(u64 *digest, u8 const *src, int blocks);
+static void __sha512_ce_transform(struct sha512_state *sst, u8 const *src,
+ int blocks)
+{
+ while (blocks) {
+ int rem;
+
+ kernel_neon_begin();
+ rem = sha512_ce_transform(sst, src, blocks);
+ kernel_neon_end();
+ src += (blocks - rem) * SHA512_BLOCK_SIZE;
+ blocks = rem;
+ }
+}
+
static void __sha512_block_data_order(struct sha512_state *sst, u8 const *src,
int blocks)
{
@@ -38,45 +54,30 @@ static void __sha512_block_data_order(struct sha512_state *sst, u8 const *src,
static int sha512_ce_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
- if (!crypto_simd_usable())
- return sha512_base_do_update(desc, data, len,
- __sha512_block_data_order);
-
- kernel_neon_begin();
- sha512_base_do_update(desc, data, len, sha512_ce_transform);
- kernel_neon_end();
+ sha512_block_fn *fn = crypto_simd_usable() ? __sha512_ce_transform
+ : __sha512_block_data_order;
+ sha512_base_do_update(desc, data, len, fn);
return 0;
}
static int sha512_ce_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
- if (!crypto_simd_usable()) {
- if (len)
- sha512_base_do_update(desc, data, len,
- __sha512_block_data_order);
- sha512_base_do_finalize(desc, __sha512_block_data_order);
- return sha512_base_finish(desc, out);
- }
+ sha512_block_fn *fn = crypto_simd_usable() ? __sha512_ce_transform
+ : __sha512_block_data_order;
- kernel_neon_begin();
- sha512_base_do_update(desc, data, len, sha512_ce_transform);
- sha512_base_do_finalize(desc, sha512_ce_transform);
- kernel_neon_end();
+ sha512_base_do_update(desc, data, len, fn);
+ sha512_base_do_finalize(desc, fn);
return sha512_base_finish(desc, out);
}
static int sha512_ce_final(struct shash_desc *desc, u8 *out)
{
- if (!crypto_simd_usable()) {
- sha512_base_do_finalize(desc, __sha512_block_data_order);
- return sha512_base_finish(desc, out);
- }
+ sha512_block_fn *fn = crypto_simd_usable() ? __sha512_ce_transform
+ : __sha512_block_data_order;
- kernel_neon_begin();
- sha512_base_do_finalize(desc, sha512_ce_transform);
- kernel_neon_end();
+ sha512_base_do_finalize(desc, fn);
return sha512_base_finish(desc, out);
}
diff --git a/arch/arm64/include/asm/archrandom.h b/arch/arm64/include/asm/archrandom.h
index ffb1a40d5475..09e43272ccb0 100644
--- a/arch/arm64/include/asm/archrandom.h
+++ b/arch/arm64/include/asm/archrandom.h
@@ -4,10 +4,26 @@
#ifdef CONFIG_ARCH_RANDOM
+#include <linux/arm-smccc.h>
#include <linux/bug.h>
#include <linux/kernel.h>
#include <asm/cpufeature.h>
+#define ARM_SMCCC_TRNG_MIN_VERSION 0x10000UL
+
+extern bool smccc_trng_available;
+
+static inline bool __init smccc_probe_trng(void)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_VERSION, &res);
+ if ((s32)res.a0 < 0)
+ return false;
+
+ return res.a0 >= ARM_SMCCC_TRNG_MIN_VERSION;
+}
+
static inline bool __arm64_rndr(unsigned long *v)
{
bool ok;
@@ -38,26 +54,55 @@ static inline bool __must_check arch_get_random_int(unsigned int *v)
static inline bool __must_check arch_get_random_seed_long(unsigned long *v)
{
+ struct arm_smccc_res res;
+
+ /*
+ * We prefer the SMCCC call, since its semantics (return actual
+ * hardware backed entropy) is closer to the idea behind this
+ * function here than what even the RNDRSS register provides
+ * (the output of a pseudo RNG freshly seeded by a TRNG).
+ */
+ if (smccc_trng_available) {
+ arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_RND64, 64, &res);
+ if ((int)res.a0 >= 0) {
+ *v = res.a3;
+ return true;
+ }
+ }
+
/*
* Only support the generic interface after we have detected
* the system wide capability, avoiding complexity with the
* cpufeature code and with potential scheduling between CPUs
* with and without the feature.
*/
- if (!cpus_have_const_cap(ARM64_HAS_RNG))
- return false;
+ if (cpus_have_const_cap(ARM64_HAS_RNG) && __arm64_rndr(v))
+ return true;
- return __arm64_rndr(v);
+ return false;
}
-
static inline bool __must_check arch_get_random_seed_int(unsigned int *v)
{
+ struct arm_smccc_res res;
unsigned long val;
- bool ok = arch_get_random_seed_long(&val);
- *v = val;
- return ok;
+ if (smccc_trng_available) {
+ arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_RND64, 32, &res);
+ if ((int)res.a0 >= 0) {
+ *v = res.a3 & GENMASK(31, 0);
+ return true;
+ }
+ }
+
+ if (cpus_have_const_cap(ARM64_HAS_RNG)) {
+ if (__arm64_rndr(&val)) {
+ *v = val;
+ return true;
+ }
+ }
+
+ return false;
}
static inline bool __init __early_cpu_has_rndr(void)
@@ -72,12 +117,29 @@ arch_get_random_seed_long_early(unsigned long *v)
{
WARN_ON(system_state != SYSTEM_BOOTING);
- if (!__early_cpu_has_rndr())
- return false;
+ if (smccc_trng_available) {
+ struct arm_smccc_res res;
+
+ arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_RND64, 64, &res);
+ if ((int)res.a0 >= 0) {
+ *v = res.a3;
+ return true;
+ }
+ }
+
+ if (__early_cpu_has_rndr() && __arm64_rndr(v))
+ return true;
- return __arm64_rndr(v);
+ return false;
}
#define arch_get_random_seed_long_early arch_get_random_seed_long_early
+#else /* !CONFIG_ARCH_RANDOM */
+
+static inline bool __init smccc_probe_trng(void)
+{
+ return false;
+}
+
#endif /* CONFIG_ARCH_RANDOM */
#endif /* _ASM_ARCHRANDOM_H */
diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h
index 9990059be106..ccedf548dac9 100644
--- a/arch/arm64/include/asm/asm-uaccess.h
+++ b/arch/arm64/include/asm/asm-uaccess.h
@@ -15,10 +15,10 @@
.macro __uaccess_ttbr0_disable, tmp1
mrs \tmp1, ttbr1_el1 // swapper_pg_dir
bic \tmp1, \tmp1, #TTBR_ASID_MASK
- sub \tmp1, \tmp1, #PAGE_SIZE // reserved_pg_dir just before swapper_pg_dir
+ sub \tmp1, \tmp1, #RESERVED_SWAPPER_OFFSET // reserved_pg_dir
msr ttbr0_el1, \tmp1 // set reserved TTBR0_EL1
isb
- add \tmp1, \tmp1, #PAGE_SIZE
+ add \tmp1, \tmp1, #RESERVED_SWAPPER_OFFSET
msr ttbr1_el1, \tmp1 // set reserved ASID
isb
.endm
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index bf125c591116..ca31594d3d6c 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -676,6 +676,23 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU
.endm
/*
+ * Set SCTLR_EL1 to the passed value, and invalidate the local icache
+ * in the process. This is called when setting the MMU on.
+ */
+.macro set_sctlr_el1, reg
+ msr sctlr_el1, \reg
+ isb
+ /*
+ * Invalidate the local I-cache so that any instructions fetched
+ * speculatively from the PoC are discarded, since they may have
+ * been dynamically patched at the PoU.
+ */
+ ic iallu
+ dsb nsh
+ isb
+.endm
+
+/*
* Check whether to yield to another runnable task from kernel mode NEON code
* (which runs with preemption disabled).
*
@@ -745,6 +762,22 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU
.Lyield_out_\@ :
.endm
+ /*
+ * Check whether preempt-disabled code should yield as soon as it
+ * is able. This is the case if re-enabling preemption a single
+ * time results in a preempt count of zero, and the TIF_NEED_RESCHED
+ * flag is set. (Note that the latter is stored negated in the
+ * top word of the thread_info::preempt_count field)
+ */
+ .macro cond_yield, lbl:req, tmp:req
+#ifdef CONFIG_PREEMPTION
+ get_current_task \tmp
+ ldr \tmp, [\tmp, #TSK_TI_PREEMPT]
+ sub \tmp, \tmp, #PREEMPT_DISABLE_OFFSET
+ cbz \tmp, \lbl
+#endif
+ .endm
+
/*
* This macro emits a program property note section identifying
* architecture features which require special handling, mainly for
diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h
index 77cbbe3625f2..a074459f8f2f 100644
--- a/arch/arm64/include/asm/cache.h
+++ b/arch/arm64/include/asm/cache.h
@@ -6,7 +6,6 @@
#define __ASM_CACHE_H
#include <asm/cputype.h>
-#include <asm/mte-kasan.h>
#define CTR_L1IP_SHIFT 14
#define CTR_L1IP_MASK 3
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index 45217f21f1fe..52e5c1623224 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -30,11 +30,6 @@
* the implementation assumes non-aliasing VIPT D-cache and (aliasing)
* VIPT I-cache.
*
- * flush_cache_mm(mm)
- *
- * Clean and invalidate all user space cache entries
- * before a change of page tables.
- *
* flush_icache_range(start, end)
*
* Ensure coherency between the I-cache and the D-cache in the
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 9a555809b89c..61177bac49fa 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -63,6 +63,11 @@ struct arm64_ftr_bits {
s64 safe_val; /* safe value for FTR_EXACT features */
};
+struct arm64_ftr_override {
+ u64 val;
+ u64 mask;
+};
+
/*
* @arm64_ftr_reg - Feature register
* @strict_mask Bits which should match across all CPUs for sanity.
@@ -74,6 +79,7 @@ struct arm64_ftr_reg {
u64 user_mask;
u64 sys_val;
u64 user_val;
+ struct arm64_ftr_override *override;
const struct arm64_ftr_bits *ftr_bits;
};
@@ -600,6 +606,7 @@ void __init setup_cpu_features(void);
void check_local_cpu_capabilities(void);
u64 read_sanitised_ftr_reg(u32 id);
+u64 __read_sysreg_by_encoding(u32 sys_id);
static inline bool cpu_supports_mixed_endian_el0(void)
{
@@ -811,6 +818,10 @@ static inline unsigned int get_vmid_bits(u64 mmfr1)
return 8;
}
+extern struct arm64_ftr_override id_aa64mmfr1_override;
+extern struct arm64_ftr_override id_aa64pfr1_override;
+extern struct arm64_ftr_override id_aa64isar1_override;
+
u32 get_kvm_ipa_limit(void);
void dump_cpu_features(void);
diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h
index a7f5a1bbc8ac..d77d358f9395 100644
--- a/arch/arm64/include/asm/el2_setup.h
+++ b/arch/arm64/include/asm/el2_setup.h
@@ -32,46 +32,39 @@
* to transparently mess with the EL0 bits via CNTKCTL_EL1 access in
* EL2.
*/
-.macro __init_el2_timers mode
-.ifeqs "\mode", "nvhe"
+.macro __init_el2_timers
mrs x0, cnthctl_el2
orr x0, x0, #3 // Enable EL1 physical timers
msr cnthctl_el2, x0
-.endif
msr cntvoff_el2, xzr // Clear virtual offset
.endm
-.macro __init_el2_debug mode
+.macro __init_el2_debug
mrs x1, id_aa64dfr0_el1
sbfx x0, x1, #ID_AA64DFR0_PMUVER_SHIFT, #4
cmp x0, #1
- b.lt 1f // Skip if no PMU present
+ b.lt .Lskip_pmu_\@ // Skip if no PMU present
mrs x0, pmcr_el0 // Disable debug access traps
ubfx x0, x0, #11, #5 // to EL2 and allow access to
-1:
+.Lskip_pmu_\@:
csel x2, xzr, x0, lt // all PMU counters from EL1
/* Statistical profiling */
ubfx x0, x1, #ID_AA64DFR0_PMSVER_SHIFT, #4
- cbz x0, 3f // Skip if SPE not present
+ cbz x0, .Lskip_spe_\@ // Skip if SPE not present
-.ifeqs "\mode", "nvhe"
mrs_s x0, SYS_PMBIDR_EL1 // If SPE available at EL2,
and x0, x0, #(1 << SYS_PMBIDR_EL1_P_SHIFT)
- cbnz x0, 2f // then permit sampling of physical
+ cbnz x0, .Lskip_spe_el2_\@ // then permit sampling of physical
mov x0, #(1 << SYS_PMSCR_EL2_PCT_SHIFT | \
1 << SYS_PMSCR_EL2_PA_SHIFT)
msr_s SYS_PMSCR_EL2, x0 // addresses and physical counter
-2:
+.Lskip_spe_el2_\@:
mov x0, #(MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT)
orr x2, x2, x0 // If we don't have VHE, then
// use EL1&0 translation.
-.else
- orr x2, x2, #MDCR_EL2_TPMS // For VHE, use EL2 translation
- // and disable access from EL1
-.endif
-3:
+.Lskip_spe_\@:
msr mdcr_el2, x2 // Configure debug traps
.endm
@@ -79,9 +72,9 @@
.macro __init_el2_lor
mrs x1, id_aa64mmfr1_el1
ubfx x0, x1, #ID_AA64MMFR1_LOR_SHIFT, 4
- cbz x0, 1f
+ cbz x0, .Lskip_lor_\@
msr_s SYS_LORC_EL1, xzr
-1:
+.Lskip_lor_\@:
.endm
/* Stage-2 translation */
@@ -93,7 +86,7 @@
.macro __init_el2_gicv3
mrs x0, id_aa64pfr0_el1
ubfx x0, x0, #ID_AA64PFR0_GIC_SHIFT, #4
- cbz x0, 1f
+ cbz x0, .Lskip_gicv3_\@
mrs_s x0, SYS_ICC_SRE_EL2
orr x0, x0, #ICC_SRE_EL2_SRE // Set ICC_SRE_EL2.SRE==1
@@ -103,7 +96,7 @@
mrs_s x0, SYS_ICC_SRE_EL2 // Read SRE back,
tbz x0, #0, 1f // and check that it sticks
msr_s SYS_ICH_HCR_EL2, xzr // Reset ICC_HCR_EL2 to defaults
-1:
+.Lskip_gicv3_\@:
.endm
.macro __init_el2_hstr
@@ -128,14 +121,14 @@
.macro __init_el2_nvhe_sve
mrs x1, id_aa64pfr0_el1
ubfx x1, x1, #ID_AA64PFR0_SVE_SHIFT, #4
- cbz x1, 1f
+ cbz x1, .Lskip_sve_\@
bic x0, x0, #CPTR_EL2_TZ // Also disable SVE traps
msr cptr_el2, x0 // Disable copro. traps to EL2
isb
mov x1, #ZCR_ELx_LEN_MASK // SVE: Enable full vector
msr_s SYS_ZCR_EL2, x1 // length for EL1.
-1:
+.Lskip_sve_\@:
.endm
.macro __init_el2_nvhe_prepare_eret
@@ -145,37 +138,24 @@
/**
* Initialize EL2 registers to sane values. This should be called early on all
- * cores that were booted in EL2.
+ * cores that were booted in EL2. Note that everything gets initialised as
+ * if VHE was not evailable. The kernel context will be upgraded to VHE
+ * if possible later on in the boot process
*
* Regs: x0, x1 and x2 are clobbered.
*/
-.macro init_el2_state mode
-.ifnes "\mode", "vhe"
-.ifnes "\mode", "nvhe"
-.error "Invalid 'mode' argument"
-.endif
-.endif
-
+.macro init_el2_state
__init_el2_sctlr
- __init_el2_timers \mode
- __init_el2_debug \mode
+ __init_el2_timers
+ __init_el2_debug
__init_el2_lor
__init_el2_stage2
__init_el2_gicv3
__init_el2_hstr
-
- /*
- * When VHE is not in use, early init of EL2 needs to be done here.
- * When VHE _is_ in use, EL1 will not be used in the host and
- * requires no configuration, and all non-hyp-specific EL2 setup
- * will be done via the _EL1 system register aliases in __cpu_setup.
- */
-.ifeqs "\mode", "nvhe"
__init_el2_nvhe_idregs
__init_el2_nvhe_cptr
__init_el2_nvhe_sve
__init_el2_nvhe_prepare_eret
-.endif
.endm
#endif /* __ARM_KVM_INIT_H__ */
diff --git a/arch/arm64/include/asm/hyp_image.h b/arch/arm64/include/asm/hyp_image.h
index daa1a1da539e..737ded6b6d0d 100644
--- a/arch/arm64/include/asm/hyp_image.h
+++ b/arch/arm64/include/asm/hyp_image.h
@@ -7,6 +7,9 @@
#ifndef __ARM64_HYP_IMAGE_H__
#define __ARM64_HYP_IMAGE_H__
+#define __HYP_CONCAT(a, b) a ## b
+#define HYP_CONCAT(a, b) __HYP_CONCAT(a, b)
+
/*
* KVM nVHE code has its own symbol namespace prefixed with __kvm_nvhe_,
* to separate it from the kernel proper.
@@ -21,9 +24,31 @@
*/
#define HYP_SECTION_NAME(NAME) .hyp##NAME
+/* Symbol defined at the beginning of each hyp section. */
+#define HYP_SECTION_SYMBOL_NAME(NAME) \
+ HYP_CONCAT(__hyp_section_, HYP_SECTION_NAME(NAME))
+
+/*
+ * Helper to generate linker script statements starting a hyp section.
+ *
+ * A symbol with a well-known name is defined at the first byte. This
+ * is used as a base for hyp relocations (see gen-hyprel.c). It must
+ * be defined inside the section so the linker of `vmlinux` cannot
+ * separate it from the section data.
+ */
+#define BEGIN_HYP_SECTION(NAME) \
+ HYP_SECTION_NAME(NAME) : { \
+ HYP_SECTION_SYMBOL_NAME(NAME) = .;
+
+/* Helper to generate linker script statements ending a hyp section. */
+#define END_HYP_SECTION \
+ }
+
/* Defines an ELF hyp section from input section @NAME and its subsections. */
-#define HYP_SECTION(NAME) \
- HYP_SECTION_NAME(NAME) : { *(NAME NAME##.*) }
+#define HYP_SECTION(NAME) \
+ BEGIN_HYP_SECTION(NAME) \
+ *(NAME NAME##.*) \
+ END_HYP_SECTION
/*
* Defines a linker script alias of a kernel-proper symbol referenced by
diff --git a/arch/arm64/include/asm/kasan.h b/arch/arm64/include/asm/kasan.h
index 0aaf9044cd6a..12d5f47f7dbe 100644
--- a/arch/arm64/include/asm/kasan.h
+++ b/arch/arm64/include/asm/kasan.h
@@ -6,6 +6,7 @@
#include <linux/linkage.h>
#include <asm/memory.h>
+#include <asm/mte-kasan.h>
#include <asm/pgtable-types.h>
#define arch_kasan_set_tag(addr, tag) __tag_set(addr, tag)
diff --git a/arch/arm64/include/asm/kexec.h b/arch/arm64/include/asm/kexec.h
index d24b527e8c00..9befcd87e9a8 100644
--- a/arch/arm64/include/asm/kexec.h
+++ b/arch/arm64/include/asm/kexec.h
@@ -90,18 +90,19 @@ static inline void crash_prepare_suspend(void) {}
static inline void crash_post_resume(void) {}
#endif
-#ifdef CONFIG_KEXEC_FILE
#define ARCH_HAS_KIMAGE_ARCH
struct kimage_arch {
void *dtb;
- unsigned long dtb_mem;
+ phys_addr_t dtb_mem;
+ phys_addr_t kern_reloc;
/* Core ELF header buffer */
void *elf_headers;
unsigned long elf_headers_mem;
unsigned long elf_headers_sz;
};
+#ifdef CONFIG_KEXEC_FILE
extern const struct kexec_file_ops kexec_image_ops;
struct kimage;
diff --git a/arch/arm64/include/asm/kfence.h b/arch/arm64/include/asm/kfence.h
new file mode 100644
index 000000000000..d061176d57ea
--- /dev/null
+++ b/arch/arm64/include/asm/kfence.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * arm64 KFENCE support.
+ *
+ * Copyright (C) 2020, Google LLC.
+ */
+
+#ifndef __ASM_KFENCE_H
+#define __ASM_KFENCE_H
+
+#include <asm/cacheflush.h>
+
+static inline bool arch_kfence_init_pool(void) { return true; }
+
+static inline bool kfence_protect_page(unsigned long addr, bool protect)
+{
+ set_memory_valid(addr, 1, !protect);
+
+ return true;
+}
+
+#endif /* __ASM_KFENCE_H */
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 8a33d83ea843..22d933e9b59e 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -199,32 +199,6 @@ extern void __vgic_v3_init_lrs(void);
extern u32 __kvm_get_mdcr_el2(void);
-#if defined(GCC_VERSION) && GCC_VERSION < 50000
-#define SYM_CONSTRAINT "i"
-#else
-#define SYM_CONSTRAINT "S"
-#endif
-
-/*
- * Obtain the PC-relative address of a kernel symbol
- * s: symbol
- *
- * The goal of this macro is to return a symbol's address based on a
- * PC-relative computation, as opposed to a loading the VA from a
- * constant pool or something similar. This works well for HYP, as an
- * absolute VA is guaranteed to be wrong. Only use this if trying to
- * obtain the address of a symbol (i.e. not something you obtained by
- * following a pointer).
- */
-#define hyp_symbol_addr(s) \
- ({ \
- typeof(s) *addr; \
- asm("adrp %0, %1\n" \
- "add %0, %0, :lo12:%1\n" \
- : "=r" (addr) : SYM_CONSTRAINT (&s)); \
- addr; \
- })
-
#define __KVM_EXTABLE(from, to) \
" .pushsection __kvm_ex_table, \"a\"\n" \
" .align 3\n" \
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 8fcfab0c2567..3d10e6527f7d 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -30,7 +30,6 @@
#define __KVM_HAVE_ARCH_INTC_INITIALIZED
-#define KVM_USER_MEM_SLOTS 512
#define KVM_HALT_POLL_NS_DEFAULT 500000
#include <kvm/arm_vgic.h>
@@ -771,4 +770,6 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
#define kvm_vcpu_has_pmu(vcpu) \
(test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features))
+int kvm_trng_call(struct kvm_vcpu *vcpu);
+
#endif /* __ARM64_KVM_HOST_H__ */
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index e52d82aeadca..90873851f677 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -73,49 +73,39 @@ alternative_cb_end
.endm
/*
- * Convert a kernel image address to a PA
- * reg: kernel address to be converted in place
+ * Convert a hypervisor VA to a PA
+ * reg: hypervisor address to be converted in place
* tmp: temporary register
- *
- * The actual code generation takes place in kvm_get_kimage_voffset, and
- * the instructions below are only there to reserve the space and
- * perform the register allocation (kvm_get_kimage_voffset uses the
- * specific registers encoded in the instructions).
*/
-.macro kimg_pa reg, tmp
-alternative_cb kvm_get_kimage_voffset
- movz \tmp, #0
- movk \tmp, #0, lsl #16
- movk \tmp, #0, lsl #32
- movk \tmp, #0, lsl #48
-alternative_cb_end
-
- /* reg = __pa(reg) */
- sub \reg, \reg, \tmp
+.macro hyp_pa reg, tmp
+ ldr_l \tmp, hyp_physvirt_offset
+ add \reg, \reg, \tmp
.endm
/*
- * Convert a kernel image address to a hyp VA
- * reg: kernel address to be converted in place
+ * Convert a hypervisor VA to a kernel image address
+ * reg: hypervisor address to be converted in place
* tmp: temporary register
*
* The actual code generation takes place in kvm_get_kimage_voffset, and
* the instructions below are only there to reserve the space and
- * perform the register allocation (kvm_update_kimg_phys_offset uses the
+ * perform the register allocation (kvm_get_kimage_voffset uses the
* specific registers encoded in the instructions).
*/
-.macro kimg_hyp_va reg, tmp
-alternative_cb kvm_update_kimg_phys_offset
+.macro hyp_kimg_va reg, tmp
+ /* Convert hyp VA -> PA. */
+ hyp_pa \reg, \tmp
+
+ /* Load kimage_voffset. */
+alternative_cb kvm_get_kimage_voffset
movz \tmp, #0
movk \tmp, #0, lsl #16
movk \tmp, #0, lsl #32
movk \tmp, #0, lsl #48
alternative_cb_end
- sub \reg, \reg, \tmp
- mov_q \tmp, PAGE_OFFSET
- orr \reg, \reg, \tmp
- kern_hyp_va \reg
+ /* Convert PA -> kimg VA. */
+ add \reg, \reg, \tmp
.endm
#else
@@ -129,6 +119,7 @@ alternative_cb_end
void kvm_update_va_mask(struct alt_instr *alt,
__le32 *origptr, __le32 *updptr, int nr_inst);
void kvm_compute_layout(void);
+void kvm_apply_hyp_relocations(void);
static __always_inline unsigned long __kern_hyp_va(unsigned long v)
{
@@ -144,24 +135,6 @@ static __always_inline unsigned long __kern_hyp_va(unsigned long v)
#define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v))))
-static __always_inline unsigned long __kimg_hyp_va(unsigned long v)
-{
- unsigned long offset;
-
- asm volatile(ALTERNATIVE_CB("movz %0, #0\n"
- "movk %0, #0, lsl #16\n"
- "movk %0, #0, lsl #32\n"
- "movk %0, #0, lsl #48\n",
- kvm_update_kimg_phys_offset)
- : "=r" (offset));
-
- return __kern_hyp_va((v - offset) | PAGE_OFFSET);
-}
-
-#define kimg_fn_hyp_va(v) ((typeof(*v))(__kimg_hyp_va((unsigned long)(v))))
-
-#define kimg_fn_ptr(x) (typeof(x) **)(x)
-
/*
* We currently support using a VM-specified IPA size. For backward
* compatibility, the default IPA size is fixed to 40bits.
diff --git a/arch/arm64/include/asm/kvm_pgtable.h b/arch/arm64/include/asm/kvm_pgtable.h
index 52ab38db04c7..8886d43cfb11 100644
--- a/arch/arm64/include/asm/kvm_pgtable.h
+++ b/arch/arm64/include/asm/kvm_pgtable.h
@@ -157,6 +157,11 @@ void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt);
* If device attributes are not explicitly requested in @prot, then the
* mapping will be normal, cacheable.
*
+ * Note that the update of a valid leaf PTE in this function will be aborted,
+ * if it's trying to recreate the exact same mapping or only change the access
+ * permissions. Instead, the vCPU will exit one more time from guest if still
+ * needed and then go through the path of relaxing permissions.
+ *
* Note that this function will both coalesce existing table entries and split
* existing block mappings, relying on page-faults to fault back areas outside
* of the new mapping lazily.
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 18fce223b67b..c759faf7a1ff 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -159,6 +159,18 @@
#define IOREMAP_MAX_ORDER (PMD_SHIFT)
#endif
+/*
+ * Open-coded (swapper_pg_dir - reserved_pg_dir) as this cannot be calculated
+ * until link time.
+ */
+#define RESERVED_SWAPPER_OFFSET (PAGE_SIZE)
+
+/*
+ * Open-coded (swapper_pg_dir - tramp_pg_dir) as this cannot be calculated
+ * until link time.
+ */
+#define TRAMP_SWAPPER_OFFSET (2 * PAGE_SIZE)
+
#ifndef __ASSEMBLY__
#include <linux/bitops.h>
@@ -232,6 +244,7 @@ static inline const void *__tag_set(const void *addr, u8 tag)
#ifdef CONFIG_KASAN_HW_TAGS
#define arch_enable_tagging() mte_enable_kernel()
+#define arch_set_tagging_report_once(state) mte_set_report_once(state)
#define arch_init_tags(max_tag) mte_init_tags(max_tag)
#define arch_get_random_tag() mte_get_random_tag()
#define arch_get_mem_tag(addr) mte_get_mem_tag(addr)
@@ -247,11 +260,13 @@ static inline const void *__tag_set(const void *addr, u8 tag)
/*
- * The linear kernel range starts at the bottom of the virtual address space.
+ * Check whether an arbitrary address is within the linear map, which
+ * lives in the [PAGE_OFFSET, PAGE_END) interval at the bottom of the
+ * kernel's TTBR1 address range.
*/
-#define __is_lm_address(addr) (((u64)(addr) & ~PAGE_OFFSET) < (PAGE_END - PAGE_OFFSET))
+#define __is_lm_address(addr) (((u64)(addr) - PAGE_OFFSET) < (PAGE_END - PAGE_OFFSET))
-#define __lm_to_phys(addr) (((addr) & ~PAGE_OFFSET) + PHYS_OFFSET)
+#define __lm_to_phys(addr) (((addr) - PAGE_OFFSET) + PHYS_OFFSET)
#define __kimg_to_phys(addr) ((addr) - kimage_voffset)
#define __virt_to_phys_nodebug(x) ({ \
@@ -330,7 +345,7 @@ static inline void *phys_to_virt(phys_addr_t x)
#endif /* !CONFIG_SPARSEMEM_VMEMMAP || CONFIG_DEBUG_VIRTUAL */
#define virt_addr_valid(addr) ({ \
- __typeof__(addr) __addr = addr; \
+ __typeof__(addr) __addr = __tag_reset(addr); \
__is_lm_address(__addr) && pfn_valid(virt_to_pfn(__addr)); \
})
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index 0b3079fd28eb..70ce8c1d2b07 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -81,16 +81,15 @@ static inline bool __cpu_uses_extended_idmap_level(void)
}
/*
- * Set TCR.T0SZ to its default value (based on VA_BITS)
+ * Ensure TCR.T0SZ is set to the provided value.
*/
static inline void __cpu_set_tcr_t0sz(unsigned long t0sz)
{
- unsigned long tcr;
+ unsigned long tcr = read_sysreg(tcr_el1);
- if (!__cpu_uses_extended_idmap())
+ if ((tcr & TCR_T0SZ_MASK) >> TCR_T0SZ_OFFSET == t0sz)
return;
- tcr = read_sysreg(tcr_el1);
tcr &= ~TCR_T0SZ_MASK;
tcr |= t0sz << TCR_T0SZ_OFFSET;
write_sysreg(tcr, tcr_el1);
diff --git a/arch/arm64/include/asm/module.lds.h b/arch/arm64/include/asm/module.lds.h
index 691f15af788e..810045628c66 100644
--- a/arch/arm64/include/asm/module.lds.h
+++ b/arch/arm64/include/asm/module.lds.h
@@ -1,7 +1,7 @@
#ifdef CONFIG_ARM64_MODULE_PLTS
SECTIONS {
- .plt (NOLOAD) : { BYTE(0) }
- .init.plt (NOLOAD) : { BYTE(0) }
- .text.ftrace_trampoline (NOLOAD) : { BYTE(0) }
+ .plt 0 (NOLOAD) : { BYTE(0) }
+ .init.plt 0 (NOLOAD) : { BYTE(0) }
+ .text.ftrace_trampoline 0 (NOLOAD) : { BYTE(0) }
}
#endif
diff --git a/arch/arm64/include/asm/mte-def.h b/arch/arm64/include/asm/mte-def.h
index 2d73a1612f09..cf241b0f0a42 100644
--- a/arch/arm64/include/asm/mte-def.h
+++ b/arch/arm64/include/asm/mte-def.h
@@ -11,4 +11,6 @@
#define MTE_TAG_SIZE 4
#define MTE_TAG_MASK GENMASK((MTE_TAG_SHIFT + (MTE_TAG_SIZE - 1)), MTE_TAG_SHIFT)
+#define __MTE_PREAMBLE ARM64_ASM_PREAMBLE ".arch_extension memtag\n"
+
#endif /* __ASM_MTE_DEF_H */
diff --git a/arch/arm64/include/asm/mte-kasan.h b/arch/arm64/include/asm/mte-kasan.h
index 26349a4b5e2e..7ab500e2ad17 100644
--- a/arch/arm64/include/asm/mte-kasan.h
+++ b/arch/arm64/include/asm/mte-kasan.h
@@ -11,11 +11,14 @@
#include <linux/types.h>
+#ifdef CONFIG_ARM64_MTE
+
/*
- * The functions below are meant to be used only for the
- * KASAN_HW_TAGS interface defined in asm/memory.h.
+ * These functions are meant to be only used from KASAN runtime through
+ * the arch_*() interface defined in asm/memory.h.
+ * These functions don't include system_supports_mte() checks,
+ * as KASAN only calls them when MTE is supported and enabled.
*/
-#ifdef CONFIG_ARM64_MTE
static inline u8 mte_get_ptr_tag(void *ptr)
{
@@ -25,13 +28,61 @@ static inline u8 mte_get_ptr_tag(void *ptr)
return tag;
}
-u8 mte_get_mem_tag(void *addr);
-u8 mte_get_random_tag(void);
-void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag);
+/* Get allocation tag for the address. */
+static inline u8 mte_get_mem_tag(void *addr)
+{
+ asm(__MTE_PREAMBLE "ldg %0, [%0]"
+ : "+r" (addr));
+
+ return mte_get_ptr_tag(addr);
+}
+
+/* Generate a random tag. */
+static inline u8 mte_get_random_tag(void)
+{
+ void *addr;
+
+ asm(__MTE_PREAMBLE "irg %0, %0"
+ : "=r" (addr));
+
+ return mte_get_ptr_tag(addr);
+}
+
+/*
+ * Assign allocation tags for a region of memory based on the pointer tag.
+ * Note: The address must be non-NULL and MTE_GRANULE_SIZE aligned and
+ * size must be non-zero and MTE_GRANULE_SIZE aligned.
+ */
+static inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag)
+{
+ u64 curr, end;
+
+ if (!size)
+ return;
+
+ curr = (u64)__tag_set(addr, tag);
+ end = curr + size;
+
+ do {
+ /*
+ * 'asm volatile' is required to prevent the compiler to move
+ * the statement outside of the loop.
+ */
+ asm volatile(__MTE_PREAMBLE "stg %0, [%0]"
+ :
+ : "r" (curr)
+ : "memory");
+
+ curr += MTE_GRANULE_SIZE;
+ } while (curr != end);
+}
void mte_enable_kernel(void);
void mte_init_tags(u64 max_tag);
+void mte_set_report_once(bool state);
+bool mte_report_once(void);
+
#else /* CONFIG_ARM64_MTE */
static inline u8 mte_get_ptr_tag(void *ptr)
@@ -43,13 +94,14 @@ static inline u8 mte_get_mem_tag(void *addr)
{
return 0xFF;
}
+
static inline u8 mte_get_random_tag(void)
{
return 0xFF;
}
-static inline void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag)
+
+static inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag)
{
- return addr;
}
static inline void mte_enable_kernel(void)
@@ -60,6 +112,15 @@ static inline void mte_init_tags(u64 max_tag)
{
}
+static inline void mte_set_report_once(bool state)
+{
+}
+
+static inline bool mte_report_once(void)
+{
+ return false;
+}
+
#endif /* CONFIG_ARM64_MTE */
#endif /* __ASSEMBLY__ */
diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h
index d02aff9f493d..9b557a457f24 100644
--- a/arch/arm64/include/asm/mte.h
+++ b/arch/arm64/include/asm/mte.h
@@ -8,8 +8,6 @@
#include <asm/compiler.h>
#include <asm/mte-def.h>
-#define __MTE_PREAMBLE ARM64_ASM_PREAMBLE ".arch_extension memtag\n"
-
#ifndef __ASSEMBLY__
#include <linux/bitfield.h>
diff --git a/arch/arm64/include/asm/numa.h b/arch/arm64/include/asm/numa.h
index dd870390d639..8c8cf4297cc3 100644
--- a/arch/arm64/include/asm/numa.h
+++ b/arch/arm64/include/asm/numa.h
@@ -3,52 +3,6 @@
#define __ASM_NUMA_H
#include <asm/topology.h>
-
-#ifdef CONFIG_NUMA
-
-#define NR_NODE_MEMBLKS (MAX_NUMNODES * 2)
-
-int __node_distance(int from, int to);
-#define node_distance(a, b) __node_distance(a, b)
-
-extern nodemask_t numa_nodes_parsed __initdata;
-
-extern bool numa_off;
-
-/* Mappings between node number and cpus on that node. */
-extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
-void numa_clear_node(unsigned int cpu);
-
-#ifdef CONFIG_DEBUG_PER_CPU_MAPS
-const struct cpumask *cpumask_of_node(int node);
-#else
-/* Returns a pointer to the cpumask of CPUs on Node 'node'. */
-static inline const struct cpumask *cpumask_of_node(int node)
-{
- if (node == NUMA_NO_NODE)
- return cpu_all_mask;
-
- return node_to_cpumask_map[node];
-}
-#endif
-
-void __init arm64_numa_init(void);
-int __init numa_add_memblk(int nodeid, u64 start, u64 end);
-void __init numa_set_distance(int from, int to, int distance);
-void __init numa_free_distance(void);
-void __init early_map_cpu_to_node(unsigned int cpu, int nid);
-void numa_store_cpu_info(unsigned int cpu);
-void numa_add_cpu(unsigned int cpu);
-void numa_remove_cpu(unsigned int cpu);
-
-#else /* CONFIG_NUMA */
-
-static inline void numa_store_cpu_info(unsigned int cpu) { }
-static inline void numa_add_cpu(unsigned int cpu) { }
-static inline void numa_remove_cpu(unsigned int cpu) { }
-static inline void arm64_numa_init(void) { }
-static inline void early_map_cpu_to_node(unsigned int cpu, int nid) { }
-
-#endif /* CONFIG_NUMA */
+#include <asm-generic/numa.h>
#endif /* __ASM_NUMA_H */
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 501562793ce2..e17b96d0e4b5 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -980,7 +980,17 @@ static inline bool arch_faults_on_old_pte(void)
return !cpu_has_hw_af();
}
-#define arch_faults_on_old_pte arch_faults_on_old_pte
+#define arch_faults_on_old_pte arch_faults_on_old_pte
+
+/*
+ * Experimentally, it's cheap to set the access flag in hardware and we
+ * benefit from prefaulting mappings as 'old' to start with.
+ */
+static inline bool arch_wants_old_prefaulted_pte(void)
+{
+ return !arch_faults_on_old_pte();
+}
+#define arch_wants_old_prefaulted_pte arch_wants_old_prefaulted_pte
#endif /* !__ASSEMBLY__ */
diff --git a/arch/arm64/include/asm/pointer_auth.h b/arch/arm64/include/asm/pointer_auth.h
index c6b4f0603024..b112a11e9302 100644
--- a/arch/arm64/include/asm/pointer_auth.h
+++ b/arch/arm64/include/asm/pointer_auth.h
@@ -76,6 +76,15 @@ static inline unsigned long ptrauth_strip_insn_pac(unsigned long ptr)
return ptrauth_clear_pac(ptr);
}
+static __always_inline void ptrauth_enable(void)
+{
+ if (!system_supports_address_auth())
+ return;
+ sysreg_clear_set(sctlr_el1, 0, (SCTLR_ELx_ENIA | SCTLR_ELx_ENIB |
+ SCTLR_ELx_ENDA | SCTLR_ELx_ENDB));
+ isb();
+}
+
#define ptrauth_thread_init_user(tsk) \
ptrauth_keys_init_user(&(tsk)->thread.keys_user)
#define ptrauth_thread_init_kernel(tsk) \
@@ -84,6 +93,7 @@ static inline unsigned long ptrauth_strip_insn_pac(unsigned long ptr)
ptrauth_keys_switch_kernel(&(tsk)->thread.keys_kernel)
#else /* CONFIG_ARM64_PTR_AUTH */
+#define ptrauth_enable()
#define ptrauth_prctl_reset_keys(tsk, arg) (-EINVAL)
#define ptrauth_strip_insn_pac(lr) (lr)
#define ptrauth_thread_init_user(tsk)
diff --git a/arch/arm64/include/asm/sections.h b/arch/arm64/include/asm/sections.h
index 8ff579361731..2f36b16a5b5d 100644
--- a/arch/arm64/include/asm/sections.h
+++ b/arch/arm64/include/asm/sections.h
@@ -11,7 +11,8 @@ extern char __alt_instructions[], __alt_instructions_end[];
extern char __hibernate_exit_text_start[], __hibernate_exit_text_end[];
extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
extern char __hyp_text_start[], __hyp_text_end[];
-extern char __hyp_data_ro_after_init_start[], __hyp_data_ro_after_init_end[];
+extern char __hyp_rodata_start[], __hyp_rodata_end[];
+extern char __hyp_reloc_begin[], __hyp_reloc_end[];
extern char __idmap_text_start[], __idmap_text_end[];
extern char __initdata_begin[], __initdata_end[];
extern char __inittext_begin[], __inittext_end[];
diff --git a/arch/arm64/include/asm/setup.h b/arch/arm64/include/asm/setup.h
new file mode 100644
index 000000000000..d3320618ed14
--- /dev/null
+++ b/arch/arm64/include/asm/setup.h
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#ifndef __ARM64_ASM_SETUP_H
+#define __ARM64_ASM_SETUP_H
+
+#include <uapi/asm/setup.h>
+
+void *get_early_fdt_ptr(void);
+void early_fdt_map(u64 dt_phys);
+
+#endif
diff --git a/arch/arm64/include/asm/sparsemem.h b/arch/arm64/include/asm/sparsemem.h
index 1f43fcc79738..eb4a75d720ed 100644
--- a/arch/arm64/include/asm/sparsemem.h
+++ b/arch/arm64/include/asm/sparsemem.h
@@ -7,7 +7,26 @@
#ifdef CONFIG_SPARSEMEM
#define MAX_PHYSMEM_BITS CONFIG_ARM64_PA_BITS
-#define SECTION_SIZE_BITS 30
-#endif
+
+/*
+ * Section size must be at least 512MB for 64K base
+ * page size config. Otherwise it will be less than
+ * (MAX_ORDER - 1) and the build process will fail.
+ */
+#ifdef CONFIG_ARM64_64K_PAGES
+#define SECTION_SIZE_BITS 29
+
+#else
+
+/*
+ * Section size must be at least 128MB for 4K base
+ * page size config. Otherwise PMD based huge page
+ * entries could not be created for vmemmap mappings.
+ * 16K follows 4K for simplicity.
+ */
+#define SECTION_SIZE_BITS 27
+#endif /* CONFIG_ARM64_64K_PAGES */
+
+#endif /* CONFIG_SPARSEMEM*/
#endif
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
index 9083d6992603..0525c0b089ed 100644
--- a/arch/arm64/include/asm/spinlock.h
+++ b/arch/arm64/include/asm/spinlock.h
@@ -5,8 +5,8 @@
#ifndef __ASM_SPINLOCK_H
#define __ASM_SPINLOCK_H
-#include <asm/qrwlock.h>
#include <asm/qspinlock.h>
+#include <asm/qrwlock.h>
/* See include/linux/spinlock.h */
#define smp_mb__after_spinlock() smp_mb()
diff --git a/arch/arm64/include/asm/stackprotector.h b/arch/arm64/include/asm/stackprotector.h
index 7263e0bac680..33f1bb453150 100644
--- a/arch/arm64/include/asm/stackprotector.h
+++ b/arch/arm64/include/asm/stackprotector.h
@@ -41,6 +41,7 @@ static __always_inline void boot_init_stack_canary(void)
#endif
ptrauth_thread_init_kernel(current);
ptrauth_thread_switch_kernel(current);
+ ptrauth_enable();
}
#endif /* _ASM_STACKPROTECTOR_H */
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 8b5e7e5c3cc8..dfd4edbfe360 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -191,6 +191,7 @@
#define SYS_GCR_EL1 sys_reg(3, 0, 1, 0, 6)
#define SYS_ZCR_EL1 sys_reg(3, 0, 1, 2, 0)
+#define SYS_TRFCR_EL1 sys_reg(3, 0, 1, 2, 1)
#define SYS_TTBR0_EL1 sys_reg(3, 0, 2, 0, 0)
#define SYS_TTBR1_EL1 sys_reg(3, 0, 2, 0, 1)
@@ -291,7 +292,11 @@
#define SYS_PMSFCR_EL1_ST_SHIFT 18
#define SYS_PMSEVFR_EL1 sys_reg(3, 0, 9, 9, 5)
-#define SYS_PMSEVFR_EL1_RES0 0x0000ffff00ff0f55UL
+#define SYS_PMSEVFR_EL1_RES0_8_2 \
+ (GENMASK_ULL(47, 32) | GENMASK_ULL(23, 16) | GENMASK_ULL(11, 8) |\
+ BIT_ULL(6) | BIT_ULL(4) | BIT_ULL(2) | BIT_ULL(0))
+#define SYS_PMSEVFR_EL1_RES0_8_3 \
+ (SYS_PMSEVFR_EL1_RES0_8_2 & ~(BIT_ULL(18) | BIT_ULL(17) | BIT_ULL(11)))
#define SYS_PMSLATFR_EL1 sys_reg(3, 0, 9, 9, 6)
#define SYS_PMSLATFR_EL1_MINLAT_SHIFT 0
@@ -471,6 +476,7 @@
#define SYS_SCTLR_EL2 sys_reg(3, 4, 1, 0, 0)
#define SYS_ZCR_EL2 sys_reg(3, 4, 1, 2, 0)
+#define SYS_TRFCR_EL2 sys_reg(3, 4, 1, 2, 1)
#define SYS_DACR32_EL2 sys_reg(3, 4, 3, 0, 0)
#define SYS_SPSR_EL2 sys_reg(3, 4, 4, 0, 0)
#define SYS_ELR_EL2 sys_reg(3, 4, 4, 0, 1)
@@ -829,6 +835,7 @@
#define ID_AA64MMFR2_CNP_SHIFT 0
/* id_aa64dfr0 */
+#define ID_AA64DFR0_TRACE_FILT_SHIFT 40
#define ID_AA64DFR0_DOUBLELOCK_SHIFT 36
#define ID_AA64DFR0_PMSVER_SHIFT 32
#define ID_AA64DFR0_CTX_CMPS_SHIFT 28
@@ -844,9 +851,15 @@
#define ID_AA64DFR0_PMUVER_8_5 0x6
#define ID_AA64DFR0_PMUVER_IMP_DEF 0xf
+#define ID_AA64DFR0_PMSVER_8_2 0x1
+#define ID_AA64DFR0_PMSVER_8_3 0x2
+
#define ID_DFR0_PERFMON_SHIFT 24
+#define ID_DFR0_PERFMON_8_0 0x3
#define ID_DFR0_PERFMON_8_1 0x4
+#define ID_DFR0_PERFMON_8_4 0x5
+#define ID_DFR0_PERFMON_8_5 0x6
#define ID_ISAR4_SWP_FRAC_SHIFT 28
#define ID_ISAR4_PSR_M_SHIFT 24
@@ -1003,6 +1016,14 @@
/* Safe value for MPIDR_EL1: Bit31:RES1, Bit30:U:0, Bit24:MT:0 */
#define SYS_MPIDR_SAFE_VAL (BIT(31))
+#define TRFCR_ELx_TS_SHIFT 5
+#define TRFCR_ELx_TS_VIRTUAL ((0x1UL) << TRFCR_ELx_TS_SHIFT)
+#define TRFCR_ELx_TS_GUEST_PHYSICAL ((0x2UL) << TRFCR_ELx_TS_SHIFT)
+#define TRFCR_ELx_TS_PHYSICAL ((0x3UL) << TRFCR_ELx_TS_SHIFT)
+#define TRFCR_EL2_CX BIT(3)
+#define TRFCR_ELx_ExTRE BIT(1)
+#define TRFCR_ELx_E0TRE BIT(0)
+
#ifdef __ASSEMBLY__
.irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30
diff --git a/arch/arm64/include/asm/trans_pgd.h b/arch/arm64/include/asm/trans_pgd.h
new file mode 100644
index 000000000000..5d08e5adf3d5
--- /dev/null
+++ b/arch/arm64/include/asm/trans_pgd.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Copyright (c) 2020, Microsoft Corporation.
+ * Pavel Tatashin <pasha.tatashin@soleen.com>
+ */
+
+#ifndef _ASM_TRANS_TABLE_H
+#define _ASM_TRANS_TABLE_H
+
+#include <linux/bits.h>
+#include <linux/types.h>
+#include <asm/pgtable-types.h>
+
+/*
+ * trans_alloc_page
+ * - Allocator that should return exactly one zeroed page, if this
+ * allocator fails, trans_pgd_create_copy() and trans_pgd_map_page()
+ * return -ENOMEM error.
+ *
+ * trans_alloc_arg
+ * - Passed to trans_alloc_page as an argument
+ */
+
+struct trans_pgd_info {
+ void * (*trans_alloc_page)(void *arg);
+ void *trans_alloc_arg;
+};
+
+int trans_pgd_create_copy(struct trans_pgd_info *info, pgd_t **trans_pgd,
+ unsigned long start, unsigned long end);
+
+int trans_pgd_map_page(struct trans_pgd_info *info, pgd_t *trans_pgd,
+ void *page, unsigned long dst_addr, pgprot_t pgprot);
+
+int trans_pgd_idmap_page(struct trans_pgd_info *info, phys_addr_t *trans_ttbr0,
+ unsigned long *t0sz, void *page);
+
+#endif /* _ASM_TRANS_TABLE_H */
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index f0fe0cc6abe0..0deb88467111 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -87,7 +87,7 @@ static inline void __uaccess_ttbr0_disable(void)
ttbr = read_sysreg(ttbr1_el1);
ttbr &= ~TTBR_ASID_MASK;
/* reserved_pg_dir placed before swapper_pg_dir */
- write_sysreg(ttbr - PAGE_SIZE, ttbr0_el1);
+ write_sysreg(ttbr - RESERVED_SWAPPER_OFFSET, ttbr0_el1);
isb();
/* Set reserved ASID */
write_sysreg(ttbr, ttbr1_el1);
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h
index 86a9d7b3eabe..949788f5ba40 100644
--- a/arch/arm64/include/asm/unistd.h
+++ b/arch/arm64/include/asm/unistd.h
@@ -38,7 +38,7 @@
#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE + 5)
#define __ARM_NR_COMPAT_END (__ARM_NR_COMPAT_BASE + 0x800)
-#define __NR_compat_syscalls 442
+#define __NR_compat_syscalls 443
#endif
#define __ARCH_WANT_SYS_CLONE
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
index cccfbbefbf95..3d874f624056 100644
--- a/arch/arm64/include/asm/unistd32.h
+++ b/arch/arm64/include/asm/unistd32.h
@@ -891,6 +891,8 @@ __SYSCALL(__NR_faccessat2, sys_faccessat2)
__SYSCALL(__NR_process_madvise, sys_process_madvise)
#define __NR_epoll_pwait2 441
__SYSCALL(__NR_epoll_pwait2, compat_sys_epoll_pwait2)
+#define __NR_mount_setattr 442
+__SYSCALL(__NR_mount_setattr, sys_mount_setattr)
/*
* Please add new compat syscalls above this comment and update
diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
index ee6a48df89d9..7379f35ae2c6 100644
--- a/arch/arm64/include/asm/virt.h
+++ b/arch/arm64/include/asm/virt.h
@@ -35,8 +35,13 @@
*/
#define HVC_RESET_VECTORS 2
+/*
+ * HVC_VHE_RESTART - Upgrade the CPU from EL1 to EL2, if possible
+ */
+#define HVC_VHE_RESTART 3
+
/* Max number of HYP stub hypercalls */
-#define HVC_STUB_HCALL_NR 3
+#define HVC_STUB_HCALL_NR 4
/* Error returned when an invalid stub number is passed into x0 */
#define HVC_STUB_ERR 0xbadca11
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 86364ab6f13f..ed65576ce710 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -17,7 +17,7 @@ obj-y := debug-monitors.o entry.o irq.o fpsimd.o \
return_address.o cpuinfo.o cpu_errata.o \
cpufeature.o alternative.o cacheinfo.o \
smp.o smp_spin_table.o topology.o smccc-call.o \
- syscall.o proton-pack.o
+ syscall.o proton-pack.o idreg-override.o
targets += efi-entry.o
@@ -59,9 +59,10 @@ obj-$(CONFIG_CRASH_CORE) += crash_core.o
obj-$(CONFIG_ARM_SDE_INTERFACE) += sdei.o
obj-$(CONFIG_ARM64_PTR_AUTH) += pointer_auth.o
obj-$(CONFIG_ARM64_MTE) += mte.o
+obj-y += vdso-wrap.o
+obj-$(CONFIG_COMPAT_VDSO) += vdso32-wrap.o
-obj-y += vdso/ probes/
-obj-$(CONFIG_COMPAT_VDSO) += vdso32/
+obj-y += probes/
head-y := head.o
extra-y += $(head-y) vmlinux.lds
diff --git a/arch/arm64/kernel/acpi_numa.c b/arch/arm64/kernel/acpi_numa.c
index 7ff800045434..fdfecf0991ce 100644
--- a/arch/arm64/kernel/acpi_numa.c
+++ b/arch/arm64/kernel/acpi_numa.c
@@ -118,15 +118,3 @@ void __init acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa)
node_set(node, numa_nodes_parsed);
}
-int __init arm64_acpi_numa_init(void)
-{
- int ret;
-
- ret = acpi_numa_init();
- if (ret) {
- pr_info("Failed to initialise from firmware\n");
- return ret;
- }
-
- return srat_disabled() ? -EINVAL : 0;
-}
diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c
index a57cffb752e8..1184c44ea2c7 100644
--- a/arch/arm64/kernel/alternative.c
+++ b/arch/arm64/kernel/alternative.c
@@ -17,7 +17,7 @@
#include <asm/sections.h>
#include <linux/stop_machine.h>
-#define __ALT_PTR(a,f) ((void *)&(a)->f + (a)->f)
+#define __ALT_PTR(a, f) ((void *)&(a)->f + (a)->f)
#define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset)
#define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset)
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index 301784463587..a36e2fc330d4 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -99,6 +99,9 @@ int main(void)
DEFINE(CPU_BOOT_STACK, offsetof(struct secondary_data, stack));
DEFINE(CPU_BOOT_TASK, offsetof(struct secondary_data, task));
BLANK();
+ DEFINE(FTR_OVR_VAL_OFFSET, offsetof(struct arm64_ftr_override, val));
+ DEFINE(FTR_OVR_MASK_OFFSET, offsetof(struct arm64_ftr_override, mask));
+ BLANK();
#ifdef CONFIG_KVM
DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt));
DEFINE(VCPU_FAULT_DISR, offsetof(struct kvm_vcpu, arch.fault.disr_el1));
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index a63428301f42..506a1cd37973 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -107,8 +107,6 @@ cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *cap)
}
#ifdef CONFIG_ARM64_ERRATUM_1463225
-DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
-
static bool
has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry,
int scope)
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index e99eddec0a46..066030717a4c 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -352,9 +352,12 @@ static const struct arm64_ftr_bits ftr_ctr[] = {
ARM64_FTR_END,
};
+static struct arm64_ftr_override __ro_after_init no_override = { };
+
struct arm64_ftr_reg arm64_ftr_reg_ctrel0 = {
.name = "SYS_CTR_EL0",
- .ftr_bits = ftr_ctr
+ .ftr_bits = ftr_ctr,
+ .override = &no_override,
};
static const struct arm64_ftr_bits ftr_id_mmfr0[] = {
@@ -544,13 +547,20 @@ static const struct arm64_ftr_bits ftr_raz[] = {
ARM64_FTR_END,
};
-#define ARM64_FTR_REG(id, table) { \
- .sys_id = id, \
- .reg = &(struct arm64_ftr_reg){ \
- .name = #id, \
- .ftr_bits = &((table)[0]), \
+#define ARM64_FTR_REG_OVERRIDE(id, table, ovr) { \
+ .sys_id = id, \
+ .reg = &(struct arm64_ftr_reg){ \
+ .name = #id, \
+ .override = (ovr), \
+ .ftr_bits = &((table)[0]), \
}}
+#define ARM64_FTR_REG(id, table) ARM64_FTR_REG_OVERRIDE(id, table, &no_override)
+
+struct arm64_ftr_override __ro_after_init id_aa64mmfr1_override;
+struct arm64_ftr_override __ro_after_init id_aa64pfr1_override;
+struct arm64_ftr_override __ro_after_init id_aa64isar1_override;
+
static const struct __ftr_reg_entry {
u32 sys_id;
struct arm64_ftr_reg *reg;
@@ -585,7 +595,8 @@ static const struct __ftr_reg_entry {
/* Op1 = 0, CRn = 0, CRm = 4 */
ARM64_FTR_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0),
- ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_id_aa64pfr1),
+ ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64PFR1_EL1, ftr_id_aa64pfr1,
+ &id_aa64pfr1_override),
ARM64_FTR_REG(SYS_ID_AA64ZFR0_EL1, ftr_id_aa64zfr0),
/* Op1 = 0, CRn = 0, CRm = 5 */
@@ -594,11 +605,13 @@ static const struct __ftr_reg_entry {
/* Op1 = 0, CRn = 0, CRm = 6 */
ARM64_FTR_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0),
- ARM64_FTR_REG(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1),
+ ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1,
+ &id_aa64isar1_override),
/* Op1 = 0, CRn = 0, CRm = 7 */
ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0),
- ARM64_FTR_REG(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1),
+ ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1,
+ &id_aa64mmfr1_override),
ARM64_FTR_REG(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2),
/* Op1 = 0, CRn = 1, CRm = 2 */
@@ -770,6 +783,33 @@ static void __init init_cpu_ftr_reg(u32 sys_reg, u64 new)
for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) {
u64 ftr_mask = arm64_ftr_mask(ftrp);
s64 ftr_new = arm64_ftr_value(ftrp, new);
+ s64 ftr_ovr = arm64_ftr_value(ftrp, reg->override->val);
+
+ if ((ftr_mask & reg->override->mask) == ftr_mask) {
+ s64 tmp = arm64_ftr_safe_value(ftrp, ftr_ovr, ftr_new);
+ char *str = NULL;
+
+ if (ftr_ovr != tmp) {
+ /* Unsafe, remove the override */
+ reg->override->mask &= ~ftr_mask;
+ reg->override->val &= ~ftr_mask;
+ tmp = ftr_ovr;
+ str = "ignoring override";
+ } else if (ftr_new != tmp) {
+ /* Override was valid */
+ ftr_new = tmp;
+ str = "forced";
+ } else if (ftr_ovr == tmp) {
+ /* Override was the safe value */
+ str = "already set";
+ }
+
+ if (str)
+ pr_warn("%s[%d:%d]: %s to %llx\n",
+ reg->name,
+ ftrp->shift + ftrp->width - 1,
+ ftrp->shift, str, tmp);
+ }
val = arm64_ftr_set_value(ftrp, val, ftr_new);
@@ -1115,14 +1155,17 @@ u64 read_sanitised_ftr_reg(u32 id)
EXPORT_SYMBOL_GPL(read_sanitised_ftr_reg);
#define read_sysreg_case(r) \
- case r: return read_sysreg_s(r)
+ case r: val = read_sysreg_s(r); break;
/*
* __read_sysreg_by_encoding() - Used by a STARTING cpu before cpuinfo is populated.
* Read the system register on the current CPU
*/
-static u64 __read_sysreg_by_encoding(u32 sys_id)
+u64 __read_sysreg_by_encoding(u32 sys_id)
{
+ struct arm64_ftr_reg *regp;
+ u64 val;
+
switch (sys_id) {
read_sysreg_case(SYS_ID_PFR0_EL1);
read_sysreg_case(SYS_ID_PFR1_EL1);
@@ -1165,6 +1208,14 @@ static u64 __read_sysreg_by_encoding(u32 sys_id)
BUG();
return 0;
}
+
+ regp = get_arm64_ftr_reg(sys_id);
+ if (regp) {
+ val &= ~regp->override->mask;
+ val |= (regp->override->val & regp->override->mask);
+ }
+
+ return val;
}
#include <linux/irqchip/arm-gic-v3.h>
@@ -1455,7 +1506,7 @@ static bool cpu_has_broken_dbm(void)
/* List of CPUs which have broken DBM support. */
static const struct midr_range cpus[] = {
#ifdef CONFIG_ARM64_ERRATUM_1024718
- MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 1, 0), // A55 r0p0 -r1p0
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
/* Kryo4xx Silver (rdpe => r1p0) */
MIDR_REV(MIDR_QCOM_KRYO_4XX_SILVER, 0xd, 0xe),
#endif
@@ -1701,16 +1752,12 @@ static void bti_enable(const struct arm64_cpu_capabilities *__unused)
#ifdef CONFIG_ARM64_MTE
static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap)
{
- static bool cleared_zero_page = false;
-
/*
* Clear the tags in the zero page. This needs to be done via the
* linear map which has the Tagged attribute.
*/
- if (!cleared_zero_page) {
- cleared_zero_page = true;
+ if (!test_and_set_bit(PG_mte_tagged, &ZERO_PAGE(0)->flags))
mte_clear_page_tags(lm_alias(empty_zero_page));
- }
kasan_init_hw_tags_cpu();
}
diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
index 5346953e4382..9d3588450473 100644
--- a/arch/arm64/kernel/entry-common.c
+++ b/arch/arm64/kernel/entry-common.c
@@ -109,6 +109,55 @@ asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs)
exit_to_kernel_mode(regs);
}
+#ifdef CONFIG_ARM64_ERRATUM_1463225
+static DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
+
+static void cortex_a76_erratum_1463225_svc_handler(void)
+{
+ u32 reg, val;
+
+ if (!unlikely(test_thread_flag(TIF_SINGLESTEP)))
+ return;
+
+ if (!unlikely(this_cpu_has_cap(ARM64_WORKAROUND_1463225)))
+ return;
+
+ __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 1);
+ reg = read_sysreg(mdscr_el1);
+ val = reg | DBG_MDSCR_SS | DBG_MDSCR_KDE;
+ write_sysreg(val, mdscr_el1);
+ asm volatile("msr daifclr, #8");
+ isb();
+
+ /* We will have taken a single-step exception by this point */
+
+ write_sysreg(reg, mdscr_el1);
+ __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 0);
+}
+
+static bool cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
+{
+ if (!__this_cpu_read(__in_cortex_a76_erratum_1463225_wa))
+ return false;
+
+ /*
+ * We've taken a dummy step exception from the kernel to ensure
+ * that interrupts are re-enabled on the syscall path. Return back
+ * to cortex_a76_erratum_1463225_svc_handler() with debug exceptions
+ * masked so that we can safely restore the mdscr and get on with
+ * handling the syscall.
+ */
+ regs->pstate |= PSR_D_BIT;
+ return true;
+}
+#else /* CONFIG_ARM64_ERRATUM_1463225 */
+static void cortex_a76_erratum_1463225_svc_handler(void) { }
+static bool cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
+{
+ return false;
+}
+#endif /* CONFIG_ARM64_ERRATUM_1463225 */
+
static void noinstr el1_abort(struct pt_regs *regs, unsigned long esr)
{
unsigned long far = read_sysreg(far_el1);
@@ -186,7 +235,8 @@ static void noinstr el1_dbg(struct pt_regs *regs, unsigned long esr)
gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
arm64_enter_el1_dbg(regs);
- do_debug_exception(far, esr, regs);
+ if (!cortex_a76_erratum_1463225_debug_handler(regs))
+ do_debug_exception(far, esr, regs);
arm64_exit_el1_dbg(regs);
}
@@ -362,6 +412,7 @@ static void noinstr el0_svc(struct pt_regs *regs)
gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
enter_from_user_mode();
+ cortex_a76_erratum_1463225_svc_handler();
do_el0_svc(regs);
}
@@ -439,6 +490,7 @@ static void noinstr el0_svc_compat(struct pt_regs *regs)
gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
enter_from_user_mode();
+ cortex_a76_erratum_1463225_svc_handler();
do_el0_svc_compat(regs);
}
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index c9bae73f2621..a31a0a713c85 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -261,16 +261,16 @@ alternative_else_nop_endif
stp lr, x21, [sp, #S_LR]
/*
- * In order to be able to dump the contents of struct pt_regs at the
- * time the exception was taken (in case we attempt to walk the call
- * stack later), chain it together with the stack frames.
+ * For exceptions from EL0, terminate the callchain here.
+ * For exceptions from EL1, create a synthetic frame record so the
+ * interrupted code shows up in the backtrace.
*/
.if \el == 0
- stp xzr, xzr, [sp, #S_STACKFRAME]
+ mov x29, xzr
.else
stp x29, x22, [sp, #S_STACKFRAME]
- .endif
add x29, sp, #S_STACKFRAME
+ .endif
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
alternative_if_not ARM64_HAS_PAN
@@ -805,7 +805,7 @@ SYM_CODE_END(ret_to_user)
// Move from tramp_pg_dir to swapper_pg_dir
.macro tramp_map_kernel, tmp
mrs \tmp, ttbr1_el1
- add \tmp, \tmp, #(2 * PAGE_SIZE)
+ add \tmp, \tmp, #TRAMP_SWAPPER_OFFSET
bic \tmp, \tmp, #USER_ASID_FLAG
msr ttbr1_el1, \tmp
#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
@@ -825,7 +825,7 @@ alternative_else_nop_endif
// Move from swapper_pg_dir to tramp_pg_dir
.macro tramp_unmap_kernel, tmp
mrs \tmp, ttbr1_el1
- sub \tmp, \tmp, #(2 * PAGE_SIZE)
+ sub \tmp, \tmp, #TRAMP_SWAPPER_OFFSET
orr \tmp, \tmp, #USER_ASID_FLAG
msr ttbr1_el1, \tmp
/*
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index a0dc987724ed..66b0e0b66e31 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -404,10 +404,6 @@ SYM_FUNC_START_LOCAL(__primary_switched)
adr_l x5, init_task
msr sp_el0, x5 // Save thread_info
-#ifdef CONFIG_ARM64_PTR_AUTH
- __ptrauth_keys_init_cpu x5, x6, x7, x8
-#endif
-
adr_l x8, vectors // load VBAR_EL1 with virtual
msr vbar_el1, x8 // vector table address
isb
@@ -436,10 +432,12 @@ SYM_FUNC_START_LOCAL(__primary_switched)
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
bl kasan_early_init
#endif
+ mov x0, x21 // pass FDT address in x0
+ bl early_fdt_map // Try mapping the FDT early
+ bl init_feature_override // Parse cpu feature overrides
#ifdef CONFIG_RANDOMIZE_BASE
tst x23, ~(MIN_KIMG_ALIGN - 1) // already running randomized?
b.ne 0f
- mov x0, x21 // pass FDT address in x0
bl kaslr_early_init // parse FDT for KASLR options
cbz x0, 0f // KASLR disabled? just proceed
orr x23, x23, x0 // record KASLR offset
@@ -447,6 +445,7 @@ SYM_FUNC_START_LOCAL(__primary_switched)
ret // to __primary_switch()
0:
#endif
+ bl switch_to_vhe // Prefer VHE if possible
add sp, sp, #16
mov x29, #0
mov x30, #0
@@ -478,13 +477,14 @@ EXPORT_SYMBOL(kimage_vaddr)
* booted in EL1 or EL2 respectively.
*/
SYM_FUNC_START(init_kernel_el)
+ mov_q x0, INIT_SCTLR_EL1_MMU_OFF
+ msr sctlr_el1, x0
+
mrs x0, CurrentEL
cmp x0, #CurrentEL_EL2
b.eq init_el2
SYM_INNER_LABEL(init_el1, SYM_L_LOCAL)
- mov_q x0, INIT_SCTLR_EL1_MMU_OFF
- msr sctlr_el1, x0
isb
mov_q x0, INIT_PSTATE_EL1
msr spsr_el1, x0
@@ -493,50 +493,11 @@ SYM_INNER_LABEL(init_el1, SYM_L_LOCAL)
eret
SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)
-#ifdef CONFIG_ARM64_VHE
- /*
- * Check for VHE being present. x2 being non-zero indicates that we
- * do have VHE, and that the kernel is intended to run at EL2.
- */
- mrs x2, id_aa64mmfr1_el1
- ubfx x2, x2, #ID_AA64MMFR1_VHE_SHIFT, #4
-#else
- mov x2, xzr
-#endif
- cbz x2, init_el2_nvhe
-
- /*
- * When VHE _is_ in use, EL1 will not be used in the host and
- * requires no configuration, and all non-hyp-specific EL2 setup
- * will be done via the _EL1 system register aliases in __cpu_setup.
- */
- mov_q x0, HCR_HOST_VHE_FLAGS
- msr hcr_el2, x0
- isb
-
- init_el2_state vhe
-
- isb
-
- mov_q x0, INIT_PSTATE_EL2
- msr spsr_el2, x0
- msr elr_el2, lr
- mov w0, #BOOT_CPU_MODE_EL2
- eret
-
-SYM_INNER_LABEL(init_el2_nvhe, SYM_L_LOCAL)
- /*
- * When VHE is not in use, early init of EL2 and EL1 needs to be
- * done here.
- */
- mov_q x0, INIT_SCTLR_EL1_MMU_OFF
- msr sctlr_el1, x0
-
mov_q x0, HCR_HOST_NVHE_FLAGS
msr hcr_el2, x0
isb
- init_el2_state nvhe
+ init_el2_state
/* Hypervisor stub */
adr_l x0, __hyp_stub_vectors
@@ -623,6 +584,7 @@ SYM_FUNC_START_LOCAL(secondary_startup)
/*
* Common entry point for secondary CPUs.
*/
+ bl switch_to_vhe
bl __cpu_secondary_check52bitva
bl __cpu_setup // initialise processor
adrp x1, swapper_pg_dir
@@ -703,16 +665,9 @@ SYM_FUNC_START(__enable_mmu)
offset_ttbr1 x1, x3
msr ttbr1_el1, x1 // load TTBR1
isb
- msr sctlr_el1, x0
- isb
- /*
- * Invalidate the local I-cache so that any instructions fetched
- * speculatively from the PoC are discarded, since they may have
- * been dynamically patched at the PoU.
- */
- ic iallu
- dsb nsh
- isb
+
+ set_sctlr_el1 x0
+
ret
SYM_FUNC_END(__enable_mmu)
@@ -882,13 +837,10 @@ SYM_FUNC_START_LOCAL(__primary_switch)
tlbi vmalle1 // Remove any stale TLB entries
dsb nsh
-
- msr sctlr_el1, x19 // re-enable the MMU
- isb
- ic iallu // flush instructions fetched
- dsb nsh // via old mapping
isb
+ set_sctlr_el1 x19 // re-enable the MMU
+
bl __relocate_kernel
#endif
#endif
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
index 9c9f47e9f7f4..b1cef371df2b 100644
--- a/arch/arm64/kernel/hibernate.c
+++ b/arch/arm64/kernel/hibernate.c
@@ -16,7 +16,6 @@
#define pr_fmt(x) "hibernate: " x
#include <linux/cpu.h>
#include <linux/kvm_host.h>
-#include <linux/mm.h>
#include <linux/pm.h>
#include <linux/sched.h>
#include <linux/suspend.h>
@@ -31,13 +30,12 @@
#include <asm/memory.h>
#include <asm/mmu_context.h>
#include <asm/mte.h>
-#include <asm/pgalloc.h>
-#include <asm/pgtable-hwdef.h>
#include <asm/sections.h>
#include <asm/smp.h>
#include <asm/smp_plat.h>
#include <asm/suspend.h>
#include <asm/sysreg.h>
+#include <asm/trans_pgd.h>
#include <asm/virt.h>
/*
@@ -178,52 +176,9 @@ int arch_hibernation_header_restore(void *addr)
}
EXPORT_SYMBOL(arch_hibernation_header_restore);
-static int trans_pgd_map_page(pgd_t *trans_pgd, void *page,
- unsigned long dst_addr,
- pgprot_t pgprot)
+static void *hibernate_page_alloc(void *arg)
{
- pgd_t *pgdp;
- p4d_t *p4dp;
- pud_t *pudp;
- pmd_t *pmdp;
- pte_t *ptep;
-
- pgdp = pgd_offset_pgd(trans_pgd, dst_addr);
- if (pgd_none(READ_ONCE(*pgdp))) {
- pudp = (void *)get_safe_page(GFP_ATOMIC);
- if (!pudp)
- return -ENOMEM;
- pgd_populate(&init_mm, pgdp, pudp);
- }
-
- p4dp = p4d_offset(pgdp, dst_addr);
- if (p4d_none(READ_ONCE(*p4dp))) {
- pudp = (void *)get_safe_page(GFP_ATOMIC);
- if (!pudp)
- return -ENOMEM;
- p4d_populate(&init_mm, p4dp, pudp);
- }
-
- pudp = pud_offset(p4dp, dst_addr);
- if (pud_none(READ_ONCE(*pudp))) {
- pmdp = (void *)get_safe_page(GFP_ATOMIC);
- if (!pmdp)
- return -ENOMEM;
- pud_populate(&init_mm, pudp, pmdp);
- }
-
- pmdp = pmd_offset(pudp, dst_addr);
- if (pmd_none(READ_ONCE(*pmdp))) {
- ptep = (void *)get_safe_page(GFP_ATOMIC);
- if (!ptep)
- return -ENOMEM;
- pmd_populate_kernel(&init_mm, pmdp, ptep);
- }
-
- ptep = pte_offset_kernel(pmdp, dst_addr);
- set_pte(ptep, pfn_pte(virt_to_pfn(page), PAGE_KERNEL_EXEC));
-
- return 0;
+ return (void *)get_safe_page((__force gfp_t)(unsigned long)arg);
}
/*
@@ -239,11 +194,16 @@ static int trans_pgd_map_page(pgd_t *trans_pgd, void *page,
* page system.
*/
static int create_safe_exec_page(void *src_start, size_t length,
- unsigned long dst_addr,
phys_addr_t *phys_dst_addr)
{
+ struct trans_pgd_info trans_info = {
+ .trans_alloc_page = hibernate_page_alloc,
+ .trans_alloc_arg = (__force void *)GFP_ATOMIC,
+ };
+
void *page = (void *)get_safe_page(GFP_ATOMIC);
- pgd_t *trans_pgd;
+ phys_addr_t trans_ttbr0;
+ unsigned long t0sz;
int rc;
if (!page)
@@ -251,13 +211,7 @@ static int create_safe_exec_page(void *src_start, size_t length,
memcpy(page, src_start, length);
__flush_icache_range((unsigned long)page, (unsigned long)page + length);
-
- trans_pgd = (void *)get_safe_page(GFP_ATOMIC);
- if (!trans_pgd)
- return -ENOMEM;
-
- rc = trans_pgd_map_page(trans_pgd, page, dst_addr,
- PAGE_KERNEL_EXEC);
+ rc = trans_pgd_idmap_page(&trans_info, &trans_ttbr0, &t0sz, page);
if (rc)
return rc;
@@ -270,12 +224,15 @@ static int create_safe_exec_page(void *src_start, size_t length,
* page, but TLBs may contain stale ASID-tagged entries (e.g. for EFI
* runtime services), while for a userspace-driven test_resume cycle it
* points to userspace page tables (and we must point it at a zero page
- * ourselves). Elsewhere we only (un)install the idmap with preemption
- * disabled, so T0SZ should be as required regardless.
+ * ourselves).
+ *
+ * We change T0SZ as part of installing the idmap. This is undone by
+ * cpu_uninstall_idmap() in __cpu_suspend_exit().
*/
cpu_set_reserved_ttbr0();
local_flush_tlb_all();
- write_sysreg(phys_to_ttbr(virt_to_phys(trans_pgd)), ttbr0_el1);
+ __cpu_set_tcr_t0sz(t0sz);
+ write_sysreg(trans_ttbr0, ttbr0_el1);
isb();
*phys_dst_addr = virt_to_phys(page);
@@ -462,182 +419,6 @@ int swsusp_arch_suspend(void)
return ret;
}
-static void _copy_pte(pte_t *dst_ptep, pte_t *src_ptep, unsigned long addr)
-{
- pte_t pte = READ_ONCE(*src_ptep);
-
- if (pte_valid(pte)) {
- /*
- * Resume will overwrite areas that may be marked
- * read only (code, rodata). Clear the RDONLY bit from
- * the temporary mappings we use during restore.
- */
- set_pte(dst_ptep, pte_mkwrite(pte));
- } else if (debug_pagealloc_enabled() && !pte_none(pte)) {
- /*
- * debug_pagealloc will removed the PTE_VALID bit if
- * the page isn't in use by the resume kernel. It may have
- * been in use by the original kernel, in which case we need
- * to put it back in our copy to do the restore.
- *
- * Before marking this entry valid, check the pfn should
- * be mapped.
- */
- BUG_ON(!pfn_valid(pte_pfn(pte)));
-
- set_pte(dst_ptep, pte_mkpresent(pte_mkwrite(pte)));
- }
-}
-
-static int copy_pte(pmd_t *dst_pmdp, pmd_t *src_pmdp, unsigned long start,
- unsigned long end)
-{
- pte_t *src_ptep;
- pte_t *dst_ptep;
- unsigned long addr = start;
-
- dst_ptep = (pte_t *)get_safe_page(GFP_ATOMIC);
- if (!dst_ptep)
- return -ENOMEM;
- pmd_populate_kernel(&init_mm, dst_pmdp, dst_ptep);
- dst_ptep = pte_offset_kernel(dst_pmdp, start);
-
- src_ptep = pte_offset_kernel(src_pmdp, start);
- do {
- _copy_pte(dst_ptep, src_ptep, addr);
- } while (dst_ptep++, src_ptep++, addr += PAGE_SIZE, addr != end);
-
- return 0;
-}
-
-static int copy_pmd(pud_t *dst_pudp, pud_t *src_pudp, unsigned long start,
- unsigned long end)
-{
- pmd_t *src_pmdp;
- pmd_t *dst_pmdp;
- unsigned long next;
- unsigned long addr = start;
-
- if (pud_none(READ_ONCE(*dst_pudp))) {
- dst_pmdp = (pmd_t *)get_safe_page(GFP_ATOMIC);
- if (!dst_pmdp)
- return -ENOMEM;
- pud_populate(&init_mm, dst_pudp, dst_pmdp);
- }
- dst_pmdp = pmd_offset(dst_pudp, start);
-
- src_pmdp = pmd_offset(src_pudp, start);
- do {
- pmd_t pmd = READ_ONCE(*src_pmdp);
-
- next = pmd_addr_end(addr, end);
- if (pmd_none(pmd))
- continue;
- if (pmd_table(pmd)) {
- if (copy_pte(dst_pmdp, src_pmdp, addr, next))
- return -ENOMEM;
- } else {
- set_pmd(dst_pmdp,
- __pmd(pmd_val(pmd) & ~PMD_SECT_RDONLY));
- }
- } while (dst_pmdp++, src_pmdp++, addr = next, addr != end);
-
- return 0;
-}
-
-static int copy_pud(p4d_t *dst_p4dp, p4d_t *src_p4dp, unsigned long start,
- unsigned long end)
-{
- pud_t *dst_pudp;
- pud_t *src_pudp;
- unsigned long next;
- unsigned long addr = start;
-
- if (p4d_none(READ_ONCE(*dst_p4dp))) {
- dst_pudp = (pud_t *)get_safe_page(GFP_ATOMIC);
- if (!dst_pudp)
- return -ENOMEM;
- p4d_populate(&init_mm, dst_p4dp, dst_pudp);
- }
- dst_pudp = pud_offset(dst_p4dp, start);
-
- src_pudp = pud_offset(src_p4dp, start);
- do {
- pud_t pud = READ_ONCE(*src_pudp);
-
- next = pud_addr_end(addr, end);
- if (pud_none(pud))
- continue;
- if (pud_table(pud)) {
- if (copy_pmd(dst_pudp, src_pudp, addr, next))
- return -ENOMEM;
- } else {
- set_pud(dst_pudp,
- __pud(pud_val(pud) & ~PUD_SECT_RDONLY));
- }
- } while (dst_pudp++, src_pudp++, addr = next, addr != end);
-
- return 0;
-}
-
-static int copy_p4d(pgd_t *dst_pgdp, pgd_t *src_pgdp, unsigned long start,
- unsigned long end)
-{
- p4d_t *dst_p4dp;
- p4d_t *src_p4dp;
- unsigned long next;
- unsigned long addr = start;
-
- dst_p4dp = p4d_offset(dst_pgdp, start);
- src_p4dp = p4d_offset(src_pgdp, start);
- do {
- next = p4d_addr_end(addr, end);
- if (p4d_none(READ_ONCE(*src_p4dp)))
- continue;
- if (copy_pud(dst_p4dp, src_p4dp, addr, next))
- return -ENOMEM;
- } while (dst_p4dp++, src_p4dp++, addr = next, addr != end);
-
- return 0;
-}
-
-static int copy_page_tables(pgd_t *dst_pgdp, unsigned long start,
- unsigned long end)
-{
- unsigned long next;
- unsigned long addr = start;
- pgd_t *src_pgdp = pgd_offset_k(start);
-
- dst_pgdp = pgd_offset_pgd(dst_pgdp, start);
- do {
- next = pgd_addr_end(addr, end);
- if (pgd_none(READ_ONCE(*src_pgdp)))
- continue;
- if (copy_p4d(dst_pgdp, src_pgdp, addr, next))
- return -ENOMEM;
- } while (dst_pgdp++, src_pgdp++, addr = next, addr != end);
-
- return 0;
-}
-
-static int trans_pgd_create_copy(pgd_t **dst_pgdp, unsigned long start,
- unsigned long end)
-{
- int rc;
- pgd_t *trans_pgd = (pgd_t *)get_safe_page(GFP_ATOMIC);
-
- if (!trans_pgd) {
- pr_err("Failed to allocate memory for temporary page tables.\n");
- return -ENOMEM;
- }
-
- rc = copy_page_tables(trans_pgd, start, end);
- if (!rc)
- *dst_pgdp = trans_pgd;
-
- return rc;
-}
-
/*
* Setup then Resume from the hibernate image using swsusp_arch_suspend_exit().
*
@@ -650,16 +431,20 @@ int swsusp_arch_resume(void)
void *zero_page;
size_t exit_size;
pgd_t *tmp_pg_dir;
- phys_addr_t phys_hibernate_exit;
void __noreturn (*hibernate_exit)(phys_addr_t, phys_addr_t, void *,
void *, phys_addr_t, phys_addr_t);
+ struct trans_pgd_info trans_info = {
+ .trans_alloc_page = hibernate_page_alloc,
+ .trans_alloc_arg = (void *)GFP_ATOMIC,
+ };
/*
* Restoring the memory image will overwrite the ttbr1 page tables.
* Create a second copy of just the linear map, and use this when
* restoring.
*/
- rc = trans_pgd_create_copy(&tmp_pg_dir, PAGE_OFFSET, PAGE_END);
+ rc = trans_pgd_create_copy(&trans_info, &tmp_pg_dir, PAGE_OFFSET,
+ PAGE_END);
if (rc)
return rc;
@@ -673,19 +458,13 @@ int swsusp_arch_resume(void)
return -ENOMEM;
}
- /*
- * Locate the exit code in the bottom-but-one page, so that *NULL
- * still has disastrous affects.
- */
- hibernate_exit = (void *)PAGE_SIZE;
exit_size = __hibernate_exit_text_end - __hibernate_exit_text_start;
/*
* Copy swsusp_arch_suspend_exit() to a safe page. This will generate
* a new set of ttbr0 page tables and load them.
*/
rc = create_safe_exec_page(__hibernate_exit_text_start, exit_size,
- (unsigned long)hibernate_exit,
- &phys_hibernate_exit);
+ (phys_addr_t *)&hibernate_exit);
if (rc) {
pr_err("Failed to create safe executable page for hibernate_exit code.\n");
return rc;
@@ -704,7 +483,7 @@ int swsusp_arch_resume(void)
* We can skip this step if we booted at EL1, or are running with VHE.
*/
if (el2_reset_needed()) {
- phys_addr_t el2_vectors = phys_hibernate_exit; /* base */
+ phys_addr_t el2_vectors = (phys_addr_t)hibernate_exit;
el2_vectors += hibernate_el2_vectors -
__hibernate_exit_text_start; /* offset */
diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S
index 160f5881a0b7..5eccbd62fec8 100644
--- a/arch/arm64/kernel/hyp-stub.S
+++ b/arch/arm64/kernel/hyp-stub.S
@@ -8,9 +8,9 @@
#include <linux/init.h>
#include <linux/linkage.h>
-#include <linux/irqchip/arm-gic-v3.h>
#include <asm/assembler.h>
+#include <asm/el2_setup.h>
#include <asm/kvm_arm.h>
#include <asm/kvm_asm.h>
#include <asm/ptrace.h>
@@ -47,10 +47,13 @@ SYM_CODE_END(__hyp_stub_vectors)
SYM_CODE_START_LOCAL(el1_sync)
cmp x0, #HVC_SET_VECTORS
- b.ne 2f
+ b.ne 1f
msr vbar_el2, x1
b 9f
+1: cmp x0, #HVC_VHE_RESTART
+ b.eq mutate_to_vhe
+
2: cmp x0, #HVC_SOFT_RESTART
b.ne 3f
mov x0, x2
@@ -70,6 +73,102 @@ SYM_CODE_START_LOCAL(el1_sync)
eret
SYM_CODE_END(el1_sync)
+// nVHE? No way! Give me the real thing!
+SYM_CODE_START_LOCAL(mutate_to_vhe)
+ // Sanity check: MMU *must* be off
+ mrs x1, sctlr_el2
+ tbnz x1, #0, 1f
+
+ // Needs to be VHE capable, obviously
+ mrs x1, id_aa64mmfr1_el1
+ ubfx x1, x1, #ID_AA64MMFR1_VHE_SHIFT, #4
+ cbz x1, 1f
+
+ // Check whether VHE is disabled from the command line
+ adr_l x1, id_aa64mmfr1_override
+ ldr x2, [x1, FTR_OVR_VAL_OFFSET]
+ ldr x1, [x1, FTR_OVR_MASK_OFFSET]
+ ubfx x2, x2, #ID_AA64MMFR1_VHE_SHIFT, #4
+ ubfx x1, x1, #ID_AA64MMFR1_VHE_SHIFT, #4
+ cmp x1, xzr
+ and x2, x2, x1
+ csinv x2, x2, xzr, ne
+ cbnz x2, 2f
+
+1: mov_q x0, HVC_STUB_ERR
+ eret
+2:
+ // Engage the VHE magic!
+ mov_q x0, HCR_HOST_VHE_FLAGS
+ msr hcr_el2, x0
+ isb
+
+ // Use the EL1 allocated stack, per-cpu offset
+ mrs x0, sp_el1
+ mov sp, x0
+ mrs x0, tpidr_el1
+ msr tpidr_el2, x0
+
+ // FP configuration, vectors
+ mrs_s x0, SYS_CPACR_EL12
+ msr cpacr_el1, x0
+ mrs_s x0, SYS_VBAR_EL12
+ msr vbar_el1, x0
+
+ // Use EL2 translations for SPE and disable access from EL1
+ mrs x0, mdcr_el2
+ bic x0, x0, #(MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT)
+ msr mdcr_el2, x0
+
+ // Transfer the MM state from EL1 to EL2
+ mrs_s x0, SYS_TCR_EL12
+ msr tcr_el1, x0
+ mrs_s x0, SYS_TTBR0_EL12
+ msr ttbr0_el1, x0
+ mrs_s x0, SYS_TTBR1_EL12
+ msr ttbr1_el1, x0
+ mrs_s x0, SYS_MAIR_EL12
+ msr mair_el1, x0
+ isb
+
+ // Hack the exception return to stay at EL2
+ mrs x0, spsr_el1
+ and x0, x0, #~PSR_MODE_MASK
+ mov x1, #PSR_MODE_EL2h
+ orr x0, x0, x1
+ msr spsr_el1, x0
+
+ b enter_vhe
+SYM_CODE_END(mutate_to_vhe)
+
+ // At the point where we reach enter_vhe(), we run with
+ // the MMU off (which is enforced by mutate_to_vhe()).
+ // We thus need to be in the idmap, or everything will
+ // explode when enabling the MMU.
+
+ .pushsection .idmap.text, "ax"
+
+SYM_CODE_START_LOCAL(enter_vhe)
+ // Invalidate TLBs before enabling the MMU
+ tlbi vmalle1
+ dsb nsh
+ isb
+
+ // Enable the EL2 S1 MMU, as set up from EL1
+ mrs_s x0, SYS_SCTLR_EL12
+ set_sctlr_el1 x0
+
+ // Disable the EL1 S1 MMU for a good measure
+ mov_q x0, INIT_SCTLR_EL1_MMU_OFF
+ msr_s SYS_SCTLR_EL12, x0
+
+ mov x0, xzr
+
+ eret
+SYM_CODE_END(enter_vhe)
+
+ .popsection
+
.macro invalid_vector label
SYM_CODE_START_LOCAL(\label)
b \label
@@ -85,6 +184,8 @@ SYM_CODE_END(\label)
invalid_vector el1_fiq_invalid
invalid_vector el1_error_invalid
+ .popsection
+
/*
* __hyp_set_vectors: Call this after boot to set the initial hypervisor
* vectors as part of hypervisor installation. On an SMP system, this should
@@ -118,3 +219,27 @@ SYM_FUNC_START(__hyp_reset_vectors)
hvc #0
ret
SYM_FUNC_END(__hyp_reset_vectors)
+
+/*
+ * Entry point to switch to VHE if deemed capable
+ */
+SYM_FUNC_START(switch_to_vhe)
+#ifdef CONFIG_ARM64_VHE
+ // Need to have booted at EL2
+ adr_l x1, __boot_cpu_mode
+ ldr w0, [x1]
+ cmp w0, #BOOT_CPU_MODE_EL2
+ b.ne 1f
+
+ // and still be at EL1
+ mrs x0, CurrentEL
+ cmp x0, #CurrentEL_EL1
+ b.ne 1f
+
+ // Turn the world upside down
+ mov x0, #HVC_VHE_RESTART
+ hvc #0
+1:
+#endif
+ ret
+SYM_FUNC_END(switch_to_vhe)
diff --git a/arch/arm64/kernel/idreg-override.c b/arch/arm64/kernel/idreg-override.c
new file mode 100644
index 000000000000..dffb16682330
--- /dev/null
+++ b/arch/arm64/kernel/idreg-override.c
@@ -0,0 +1,216 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Early cpufeature override framework
+ *
+ * Copyright (C) 2020 Google LLC
+ * Author: Marc Zyngier <maz@kernel.org>
+ */
+
+#include <linux/ctype.h>
+#include <linux/kernel.h>
+#include <linux/libfdt.h>
+
+#include <asm/cacheflush.h>
+#include <asm/cpufeature.h>
+#include <asm/setup.h>
+
+#define FTR_DESC_NAME_LEN 20
+#define FTR_DESC_FIELD_LEN 10
+#define FTR_ALIAS_NAME_LEN 30
+#define FTR_ALIAS_OPTION_LEN 80
+
+struct ftr_set_desc {
+ char name[FTR_DESC_NAME_LEN];
+ struct arm64_ftr_override *override;
+ struct {
+ char name[FTR_DESC_FIELD_LEN];
+ u8 shift;
+ } fields[];
+};
+
+static const struct ftr_set_desc mmfr1 __initconst = {
+ .name = "id_aa64mmfr1",
+ .override = &id_aa64mmfr1_override,
+ .fields = {
+ { "vh", ID_AA64MMFR1_VHE_SHIFT },
+ {}
+ },
+};
+
+static const struct ftr_set_desc pfr1 __initconst = {
+ .name = "id_aa64pfr1",
+ .override = &id_aa64pfr1_override,
+ .fields = {
+ { "bt", ID_AA64PFR1_BT_SHIFT },
+ {}
+ },
+};
+
+static const struct ftr_set_desc isar1 __initconst = {
+ .name = "id_aa64isar1",
+ .override = &id_aa64isar1_override,
+ .fields = {
+ { "gpi", ID_AA64ISAR1_GPI_SHIFT },
+ { "gpa", ID_AA64ISAR1_GPA_SHIFT },
+ { "api", ID_AA64ISAR1_API_SHIFT },
+ { "apa", ID_AA64ISAR1_APA_SHIFT },
+ {}
+ },
+};
+
+extern struct arm64_ftr_override kaslr_feature_override;
+
+static const struct ftr_set_desc kaslr __initconst = {
+ .name = "kaslr",
+#ifdef CONFIG_RANDOMIZE_BASE
+ .override = &kaslr_feature_override,
+#endif
+ .fields = {
+ { "disabled", 0 },
+ {}
+ },
+};
+
+static const struct ftr_set_desc * const regs[] __initconst = {
+ &mmfr1,
+ &pfr1,
+ &isar1,
+ &kaslr,
+};
+
+static const struct {
+ char alias[FTR_ALIAS_NAME_LEN];
+ char feature[FTR_ALIAS_OPTION_LEN];
+} aliases[] __initconst = {
+ { "kvm-arm.mode=nvhe", "id_aa64mmfr1.vh=0" },
+ { "kvm-arm.mode=protected", "id_aa64mmfr1.vh=0" },
+ { "arm64.nobti", "id_aa64pfr1.bt=0" },
+ { "arm64.nopauth",
+ "id_aa64isar1.gpi=0 id_aa64isar1.gpa=0 "
+ "id_aa64isar1.api=0 id_aa64isar1.apa=0" },
+ { "nokaslr", "kaslr.disabled=1" },
+};
+
+static int __init find_field(const char *cmdline,
+ const struct ftr_set_desc *reg, int f, u64 *v)
+{
+ char opt[FTR_DESC_NAME_LEN + FTR_DESC_FIELD_LEN + 2];
+ int len;
+
+ len = snprintf(opt, ARRAY_SIZE(opt), "%s.%s=",
+ reg->name, reg->fields[f].name);
+
+ if (!parameqn(cmdline, opt, len))
+ return -1;
+
+ return kstrtou64(cmdline + len, 0, v);
+}
+
+static void __init match_options(const char *cmdline)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(regs); i++) {
+ int f;
+
+ if (!regs[i]->override)
+ continue;
+
+ for (f = 0; strlen(regs[i]->fields[f].name); f++) {
+ u64 shift = regs[i]->fields[f].shift;
+ u64 mask = 0xfUL << shift;
+ u64 v;
+
+ if (find_field(cmdline, regs[i], f, &v))
+ continue;
+
+ regs[i]->override->val &= ~mask;
+ regs[i]->override->val |= (v << shift) & mask;
+ regs[i]->override->mask |= mask;
+
+ return;
+ }
+ }
+}
+
+static __init void __parse_cmdline(const char *cmdline, bool parse_aliases)
+{
+ do {
+ char buf[256];
+ size_t len;
+ int i;
+
+ cmdline = skip_spaces(cmdline);
+
+ for (len = 0; cmdline[len] && !isspace(cmdline[len]); len++);
+ if (!len)
+ return;
+
+ len = min(len, ARRAY_SIZE(buf) - 1);
+ strncpy(buf, cmdline, len);
+ buf[len] = 0;
+
+ if (strcmp(buf, "--") == 0)
+ return;
+
+ cmdline += len;
+
+ match_options(buf);
+
+ for (i = 0; parse_aliases && i < ARRAY_SIZE(aliases); i++)
+ if (parameq(buf, aliases[i].alias))
+ __parse_cmdline(aliases[i].feature, false);
+ } while (1);
+}
+
+static __init void parse_cmdline(void)
+{
+ if (!IS_ENABLED(CONFIG_CMDLINE_FORCE)) {
+ const u8 *prop;
+ void *fdt;
+ int node;
+
+ fdt = get_early_fdt_ptr();
+ if (!fdt)
+ goto out;
+
+ node = fdt_path_offset(fdt, "/chosen");
+ if (node < 0)
+ goto out;
+
+ prop = fdt_getprop(fdt, node, "bootargs", NULL);
+ if (!prop)
+ goto out;
+
+ __parse_cmdline(prop, true);
+
+ if (!IS_ENABLED(CONFIG_CMDLINE_EXTEND))
+ return;
+ }
+
+out:
+ __parse_cmdline(CONFIG_CMDLINE, true);
+}
+
+/* Keep checkers quiet */
+void init_feature_override(void);
+
+asmlinkage void __init init_feature_override(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(regs); i++) {
+ if (regs[i]->override) {
+ regs[i]->override->val = 0;
+ regs[i]->override->mask = 0;
+ }
+ }
+
+ parse_cmdline();
+
+ for (i = 0; i < ARRAY_SIZE(regs); i++) {
+ if (regs[i]->override)
+ __flush_dcache_area(regs[i]->override,
+ sizeof(*regs[i]->override));
+ }
+}
diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h
index f676243abac6..23f1a557bd9f 100644
--- a/arch/arm64/kernel/image-vars.h
+++ b/arch/arm64/kernel/image-vars.h
@@ -64,7 +64,6 @@ __efistub__ctype = _ctype;
/* Alternative callbacks for init-time patching of nVHE hyp code. */
KVM_NVHE_ALIAS(kvm_patch_vector_branch);
KVM_NVHE_ALIAS(kvm_update_va_mask);
-KVM_NVHE_ALIAS(kvm_update_kimg_phys_offset);
KVM_NVHE_ALIAS(kvm_get_kimage_voffset);
/* Global kernel state accessed by nVHE hyp code. */
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
index 1c74c45b9494..27f8939deb1b 100644
--- a/arch/arm64/kernel/kaslr.c
+++ b/arch/arm64/kernel/kaslr.c
@@ -19,6 +19,7 @@
#include <asm/memory.h>
#include <asm/mmu.h>
#include <asm/sections.h>
+#include <asm/setup.h>
enum kaslr_status {
KASLR_ENABLED,
@@ -50,39 +51,7 @@ static __init u64 get_kaslr_seed(void *fdt)
return ret;
}
-static __init bool cmdline_contains_nokaslr(const u8 *cmdline)
-{
- const u8 *str;
-
- str = strstr(cmdline, "nokaslr");
- return str == cmdline || (str > cmdline && *(str - 1) == ' ');
-}
-
-static __init bool is_kaslr_disabled_cmdline(void *fdt)
-{
- if (!IS_ENABLED(CONFIG_CMDLINE_FORCE)) {
- int node;
- const u8 *prop;
-
- node = fdt_path_offset(fdt, "/chosen");
- if (node < 0)
- goto out;
-
- prop = fdt_getprop(fdt, node, "bootargs", NULL);
- if (!prop)
- goto out;
-
- if (cmdline_contains_nokaslr(prop))
- return true;
-
- if (IS_ENABLED(CONFIG_CMDLINE_EXTEND))
- goto out;
-
- return false;
- }
-out:
- return cmdline_contains_nokaslr(CONFIG_CMDLINE);
-}
+struct arm64_ftr_override kaslr_feature_override __initdata;
/*
* This routine will be executed with the kernel mapped at its default virtual
@@ -92,12 +61,11 @@ out:
* containing function pointers) to be reinitialized, and zero-initialized
* .bss variables will be reset to 0.
*/
-u64 __init kaslr_early_init(u64 dt_phys)
+u64 __init kaslr_early_init(void)
{
void *fdt;
u64 seed, offset, mask, module_range;
unsigned long raw;
- int size;
/*
* Set a reasonable default for module_alloc_base in case
@@ -111,8 +79,7 @@ u64 __init kaslr_early_init(u64 dt_phys)
* and proceed with KASLR disabled. We will make another
* attempt at mapping the FDT in setup_machine()
*/
- early_fixmap_init();
- fdt = fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL);
+ fdt = get_early_fdt_ptr();
if (!fdt) {
kaslr_status = KASLR_DISABLED_FDT_REMAP;
return 0;
@@ -127,7 +94,7 @@ u64 __init kaslr_early_init(u64 dt_phys)
* Check if 'nokaslr' appears on the command line, and
* return 0 if that is the case.
*/
- if (is_kaslr_disabled_cmdline(fdt)) {
+ if (kaslr_feature_override.val & kaslr_feature_override.mask & 0xf) {
kaslr_status = KASLR_DISABLED_CMDLINE;
return 0;
}
diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c
index a0b144cfaea7..90a335c74442 100644
--- a/arch/arm64/kernel/machine_kexec.c
+++ b/arch/arm64/kernel/machine_kexec.c
@@ -42,6 +42,7 @@ static void _kexec_image_info(const char *func, int line,
pr_debug(" start: %lx\n", kimage->start);
pr_debug(" head: %lx\n", kimage->head);
pr_debug(" nr_segments: %lu\n", kimage->nr_segments);
+ pr_debug(" kern_reloc: %pa\n", &kimage->arch.kern_reloc);
for (i = 0; i < kimage->nr_segments; i++) {
pr_debug(" segment[%lu]: %016lx - %016lx, 0x%lx bytes, %lu pages\n",
@@ -58,6 +59,23 @@ void machine_kexec_cleanup(struct kimage *kimage)
/* Empty routine needed to avoid build errors. */
}
+int machine_kexec_post_load(struct kimage *kimage)
+{
+ void *reloc_code = page_to_virt(kimage->control_code_page);
+
+ memcpy(reloc_code, arm64_relocate_new_kernel,
+ arm64_relocate_new_kernel_size);
+ kimage->arch.kern_reloc = __pa(reloc_code);
+ kexec_image_info(kimage);
+
+ /* Flush the reloc_code in preparation for its execution. */
+ __flush_dcache_area(reloc_code, arm64_relocate_new_kernel_size);
+ flush_icache_range((uintptr_t)reloc_code, (uintptr_t)reloc_code +
+ arm64_relocate_new_kernel_size);
+
+ return 0;
+}
+
/**
* machine_kexec_prepare - Prepare for a kexec reboot.
*
@@ -67,8 +85,6 @@ void machine_kexec_cleanup(struct kimage *kimage)
*/
int machine_kexec_prepare(struct kimage *kimage)
{
- kexec_image_info(kimage);
-
if (kimage->type != KEXEC_TYPE_CRASH && cpus_are_stuck_in_kernel()) {
pr_err("Can't kexec: CPUs are stuck in the kernel.\n");
return -EBUSY;
@@ -143,8 +159,6 @@ static void kexec_segment_flush(const struct kimage *kimage)
*/
void machine_kexec(struct kimage *kimage)
{
- phys_addr_t reboot_code_buffer_phys;
- void *reboot_code_buffer;
bool in_kexec_crash = (kimage == kexec_crash_image);
bool stuck_cpus = cpus_are_stuck_in_kernel();
@@ -155,31 +169,6 @@ void machine_kexec(struct kimage *kimage)
WARN(in_kexec_crash && (stuck_cpus || smp_crash_stop_failed()),
"Some CPUs may be stale, kdump will be unreliable.\n");
- reboot_code_buffer_phys = page_to_phys(kimage->control_code_page);
- reboot_code_buffer = phys_to_virt(reboot_code_buffer_phys);
-
- kexec_image_info(kimage);
-
- /*
- * Copy arm64_relocate_new_kernel to the reboot_code_buffer for use
- * after the kernel is shut down.
- */
- memcpy(reboot_code_buffer, arm64_relocate_new_kernel,
- arm64_relocate_new_kernel_size);
-
- /* Flush the reboot_code_buffer in preparation for its execution. */
- __flush_dcache_area(reboot_code_buffer, arm64_relocate_new_kernel_size);
-
- /*
- * Although we've killed off the secondary CPUs, we don't update
- * the online mask if we're handling a crash kernel and consequently
- * need to avoid flush_icache_range(), which will attempt to IPI
- * the offline CPUs. Therefore, we must use the __* variant here.
- */
- __flush_icache_range((uintptr_t)reboot_code_buffer,
- (uintptr_t)reboot_code_buffer +
- arm64_relocate_new_kernel_size);
-
/* Flush the kimage list and its buffers. */
kexec_list_flush(kimage);
@@ -193,7 +182,7 @@ void machine_kexec(struct kimage *kimage)
/*
* cpu_soft_restart will shutdown the MMU, disable data caches, then
- * transfer control to the reboot_code_buffer which contains a copy of
+ * transfer control to the kern_reloc which contains a copy of
* the arm64_relocate_new_kernel routine. arm64_relocate_new_kernel
* uses physical addressing to relocate the new image to its final
* position and transfers control to the image entry point when the
@@ -203,12 +192,8 @@ void machine_kexec(struct kimage *kimage)
* userspace (kexec-tools).
* In kexec_file case, the kernel starts directly without purgatory.
*/
- cpu_soft_restart(reboot_code_buffer_phys, kimage->head, kimage->start,
-#ifdef CONFIG_KEXEC_FILE
- kimage->arch.dtb_mem);
-#else
- 0);
-#endif
+ cpu_soft_restart(kimage->arch.kern_reloc, kimage->head, kimage->start,
+ kimage->arch.dtb_mem);
BUG(); /* Should never get here. */
}
diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c
index 03210f644790..0cde47a63beb 100644
--- a/arch/arm64/kernel/machine_kexec_file.c
+++ b/arch/arm64/kernel/machine_kexec_file.c
@@ -182,8 +182,10 @@ static int create_dtb(struct kimage *image,
/* duplicate a device tree blob */
ret = fdt_open_into(initial_boot_params, buf, buf_size);
- if (ret)
+ if (ret) {
+ vfree(buf);
return -EINVAL;
+ }
ret = setup_dtb(image, initrd_load_addr, initrd_len,
cmdline, buf);
diff --git a/arch/arm64/kernel/module-plts.c b/arch/arm64/kernel/module-plts.c
index 2e224435c024..e53493d8b208 100644
--- a/arch/arm64/kernel/module-plts.c
+++ b/arch/arm64/kernel/module-plts.c
@@ -131,7 +131,7 @@ u64 module_emit_veneer_for_adrp(struct module *mod, Elf64_Shdr *sechdrs,
}
#endif
-#define cmp_3way(a,b) ((a) < (b) ? -1 : (a) > (b))
+#define cmp_3way(a, b) ((a) < (b) ? -1 : (a) > (b))
static int cmp_rela(const void *a, const void *b)
{
diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
index dc9ada64feed..b3c70a612c7a 100644
--- a/arch/arm64/kernel/mte.c
+++ b/arch/arm64/kernel/mte.c
@@ -19,12 +19,13 @@
#include <asm/barrier.h>
#include <asm/cpufeature.h>
#include <asm/mte.h>
-#include <asm/mte-kasan.h>
#include <asm/ptrace.h>
#include <asm/sysreg.h>
u64 gcr_kernel_excl __ro_after_init;
+static bool report_fault_once = true;
+
static void mte_sync_page_tags(struct page *page, pte_t *ptep, bool check_swap)
{
pte_t old_pte = READ_ONCE(*ptep);
@@ -86,51 +87,6 @@ int memcmp_pages(struct page *page1, struct page *page2)
return ret;
}
-u8 mte_get_mem_tag(void *addr)
-{
- if (!system_supports_mte())
- return 0xFF;
-
- asm(__MTE_PREAMBLE "ldg %0, [%0]"
- : "+r" (addr));
-
- return mte_get_ptr_tag(addr);
-}
-
-u8 mte_get_random_tag(void)
-{
- void *addr;
-
- if (!system_supports_mte())
- return 0xFF;
-
- asm(__MTE_PREAMBLE "irg %0, %0"
- : "+r" (addr));
-
- return mte_get_ptr_tag(addr);
-}
-
-void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag)
-{
- void *ptr = addr;
-
- if ((!system_supports_mte()) || (size == 0))
- return addr;
-
- /* Make sure that size is MTE granule aligned. */
- WARN_ON(size & (MTE_GRANULE_SIZE - 1));
-
- /* Make sure that the address is MTE granule aligned. */
- WARN_ON((u64)addr & (MTE_GRANULE_SIZE - 1));
-
- tag = 0xF0 | tag;
- ptr = (void *)__tag_set(ptr, tag);
-
- mte_assign_mem_tag_range(ptr, size);
-
- return ptr;
-}
-
void mte_init_tags(u64 max_tag)
{
static bool gcr_kernel_excl_initialized;
@@ -158,6 +114,16 @@ void mte_enable_kernel(void)
isb();
}
+void mte_set_report_once(bool state)
+{
+ WRITE_ONCE(report_fault_once, state);
+}
+
+bool mte_report_once(void)
+{
+ return READ_ONCE(report_fault_once);
+}
+
static void update_sctlr_el1_tcf0(u64 tcf0)
{
/* ISB required for the kernel uaccess routines */
@@ -329,11 +295,12 @@ static int __access_remote_tags(struct mm_struct *mm, unsigned long addr,
* would cause the existing tags to be cleared if the page
* was never mapped with PROT_MTE.
*/
- if (!test_bit(PG_mte_tagged, &page->flags)) {
+ if (!(vma->vm_flags & VM_MTE)) {
ret = -EOPNOTSUPP;
put_page(page);
break;
}
+ WARN_ON_ONCE(!test_bit(PG_mte_tagged, &page->flags));
/* limit access to the end of the page */
offset = offset_in_page(addr);
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 3605f77ad4df..7d2318f80955 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -280,7 +280,7 @@ armv8pmu_event_attr_is_visible(struct kobject *kobj,
return 0;
}
-static struct attribute_group armv8_pmuv3_events_attr_group = {
+static const struct attribute_group armv8_pmuv3_events_attr_group = {
.name = "events",
.attrs = armv8_pmuv3_event_attrs,
.is_visible = armv8pmu_event_attr_is_visible,
@@ -300,7 +300,7 @@ static struct attribute *armv8_pmuv3_format_attrs[] = {
NULL,
};
-static struct attribute_group armv8_pmuv3_format_attr_group = {
+static const struct attribute_group armv8_pmuv3_format_attr_group = {
.name = "format",
.attrs = armv8_pmuv3_format_attrs,
};
@@ -322,7 +322,7 @@ static struct attribute *armv8_pmuv3_caps_attrs[] = {
NULL,
};
-static struct attribute_group armv8_pmuv3_caps_attr_group = {
+static const struct attribute_group armv8_pmuv3_caps_attr_group = {
.name = "caps",
.attrs = armv8_pmuv3_caps_attrs,
};
@@ -810,7 +810,7 @@ static int armv8pmu_get_single_idx(struct pmu_hw_events *cpuc,
{
int idx;
- for (idx = ARMV8_IDX_COUNTER0; idx < cpu_pmu->num_events; idx ++) {
+ for (idx = ARMV8_IDX_COUNTER0; idx < cpu_pmu->num_events; idx++) {
if (!test_and_set_bit(idx, cpuc->used_mask))
return idx;
}
@@ -1188,6 +1188,12 @@ static int armv8_a77_pmu_init(struct arm_pmu *cpu_pmu)
armv8_pmuv3_map_event);
}
+static int armv8_a78_pmu_init(struct arm_pmu *cpu_pmu)
+{
+ return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a78",
+ armv8_pmuv3_map_event);
+}
+
static int armv8_e1_pmu_init(struct arm_pmu *cpu_pmu)
{
return armv8_pmu_init_nogroups(cpu_pmu, "armv8_neoverse_e1",
@@ -1225,6 +1231,7 @@ static const struct of_device_id armv8_pmu_of_device_ids[] = {
{.compatible = "arm,cortex-a75-pmu", .data = armv8_a75_pmu_init},
{.compatible = "arm,cortex-a76-pmu", .data = armv8_a76_pmu_init},
{.compatible = "arm,cortex-a77-pmu", .data = armv8_a77_pmu_init},
+ {.compatible = "arm,cortex-a78-pmu", .data = armv8_a78_pmu_init},
{.compatible = "arm,neoverse-e1-pmu", .data = armv8_e1_pmu_init},
{.compatible = "arm,neoverse-n1-pmu", .data = armv8_n1_pmu_init},
{.compatible = "cavium,thunder-pmu", .data = armv8_thunder_pmu_init},
diff --git a/arch/arm64/kernel/probes/uprobes.c b/arch/arm64/kernel/probes/uprobes.c
index a412d8edbcd2..2c247634552b 100644
--- a/arch/arm64/kernel/probes/uprobes.c
+++ b/arch/arm64/kernel/probes/uprobes.c
@@ -38,7 +38,7 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
/* TODO: Currently we do not support AARCH32 instruction probing */
if (mm->context.flags & MMCF_AARCH32)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
else if (!IS_ALIGNED(addr, AARCH64_INSN_SIZE))
return -EINVAL;
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 6616486a58fe..325c83b1a24d 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -304,7 +304,7 @@ void __show_regs(struct pt_regs *regs)
}
}
-void show_regs(struct pt_regs * regs)
+void show_regs(struct pt_regs *regs)
{
__show_regs(regs);
dump_backtrace(regs, NULL, KERN_DEFAULT);
@@ -398,7 +398,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
ptrauth_thread_init_kernel(p);
- if (likely(!(p->flags & PF_KTHREAD))) {
+ if (likely(!(p->flags & (PF_KTHREAD | PF_IO_WORKER)))) {
*childregs = *current_pt_regs();
childregs->regs[0] = 0;
@@ -587,7 +587,7 @@ unsigned long get_wchan(struct task_struct *p)
ret = frame.pc;
goto out;
}
- } while (count ++ < 16);
+ } while (count++ < 16);
out:
put_task_stack(p);
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 8ac487c84e37..170f42fd6101 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -194,6 +194,7 @@ static void ptrace_hbptriggered(struct perf_event *bp,
}
arm64_force_sig_ptrace_errno_trap(si_errno, bkpt->trigger,
desc);
+ return;
}
#endif
arm64_force_sig_fault(SIGTRAP, TRAP_HWBKPT, bkpt->trigger, desc);
@@ -1796,7 +1797,7 @@ int syscall_trace_enter(struct pt_regs *regs)
if (flags & (_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE)) {
tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
- if (!in_syscall(regs) || (flags & _TIF_SYSCALL_EMU))
+ if (flags & _TIF_SYSCALL_EMU)
return NO_SYSCALL;
}
diff --git a/arch/arm64/kernel/relocate_kernel.S b/arch/arm64/kernel/relocate_kernel.S
index 84eec95ec06c..b78ea5de97a4 100644
--- a/arch/arm64/kernel/relocate_kernel.S
+++ b/arch/arm64/kernel/relocate_kernel.S
@@ -17,28 +17,24 @@
/*
* arm64_relocate_new_kernel - Put a 2nd stage image in place and boot it.
*
- * The memory that the old kernel occupies may be overwritten when coping the
+ * The memory that the old kernel occupies may be overwritten when copying the
* new image to its final location. To assure that the
* arm64_relocate_new_kernel routine which does that copy is not overwritten,
* all code and data needed by arm64_relocate_new_kernel must be between the
* symbols arm64_relocate_new_kernel and arm64_relocate_new_kernel_end. The
* machine_kexec() routine will copy arm64_relocate_new_kernel to the kexec
- * control_code_page, a special page which has been set up to be preserved
- * during the copy operation.
+ * safe memory that has been set up to be preserved during the copy operation.
*/
SYM_CODE_START(arm64_relocate_new_kernel)
-
/* Setup the list loop variables. */
mov x18, x2 /* x18 = dtb address */
mov x17, x1 /* x17 = kimage_start */
mov x16, x0 /* x16 = kimage_head */
- raw_dcache_line_size x15, x0 /* x15 = dcache line size */
mov x14, xzr /* x14 = entry ptr */
mov x13, xzr /* x13 = copy dest */
-
/* Check if the new image needs relocation. */
tbnz x16, IND_DONE_BIT, .Ldone
-
+ raw_dcache_line_size x15, x1 /* x15 = dcache line size */
.Lloop:
and x12, x16, PAGE_MASK /* x12 = addr */
@@ -47,44 +43,28 @@ SYM_CODE_START(arm64_relocate_new_kernel)
tbz x16, IND_SOURCE_BIT, .Ltest_indirection
/* Invalidate dest page to PoC. */
- mov x0, x13
- add x20, x0, #PAGE_SIZE
+ mov x2, x13
+ add x20, x2, #PAGE_SIZE
sub x1, x15, #1
- bic x0, x0, x1
-2: dc ivac, x0
- add x0, x0, x15
- cmp x0, x20
+ bic x2, x2, x1
+2: dc ivac, x2
+ add x2, x2, x15
+ cmp x2, x20
b.lo 2b
dsb sy
- mov x20, x13
- mov x21, x12
- copy_page x20, x21, x0, x1, x2, x3, x4, x5, x6, x7
-
- /* dest += PAGE_SIZE */
- add x13, x13, PAGE_SIZE
+ copy_page x13, x12, x1, x2, x3, x4, x5, x6, x7, x8
b .Lnext
-
.Ltest_indirection:
tbz x16, IND_INDIRECTION_BIT, .Ltest_destination
-
- /* ptr = addr */
- mov x14, x12
+ mov x14, x12 /* ptr = addr */
b .Lnext
-
.Ltest_destination:
tbz x16, IND_DESTINATION_BIT, .Lnext
-
- /* dest = addr */
- mov x13, x12
-
+ mov x13, x12 /* dest = addr */
.Lnext:
- /* entry = *ptr++ */
- ldr x16, [x14], #8
-
- /* while (!(entry & DONE)) */
- tbz x16, IND_DONE_BIT, .Lloop
-
+ ldr x16, [x14], #8 /* entry = *ptr++ */
+ tbz x16, IND_DONE_BIT, .Lloop /* while (!(entry & DONE)) */
.Ldone:
/* wait for writes from copy_page to finish */
dsb nsh
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index c18aacde8bb0..61845c0821d9 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -168,6 +168,21 @@ static void __init smp_build_mpidr_hash(void)
pr_warn("Large number of MPIDR hash buckets detected\n");
}
+static void *early_fdt_ptr __initdata;
+
+void __init *get_early_fdt_ptr(void)
+{
+ return early_fdt_ptr;
+}
+
+asmlinkage void __init early_fdt_map(u64 dt_phys)
+{
+ int fdt_size;
+
+ early_fixmap_init();
+ early_fdt_ptr = fixmap_remap_fdt(dt_phys, &fdt_size, PAGE_KERNEL);
+}
+
static void __init setup_machine_fdt(phys_addr_t dt_phys)
{
int size;
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
index 6bdef7362c0e..5bfd9b87f85d 100644
--- a/arch/arm64/kernel/sleep.S
+++ b/arch/arm64/kernel/sleep.S
@@ -100,6 +100,7 @@ SYM_FUNC_END(__cpu_suspend_enter)
.pushsection ".idmap.text", "awx"
SYM_CODE_START(cpu_resume)
bl init_kernel_el
+ bl switch_to_vhe
bl __cpu_setup
/* enable the MMU early - so we can access sleep_save_stash by va */
adrp x1, swapper_pg_dir
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index ad00f99ee9b0..357590beaabb 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -434,8 +434,10 @@ static void __init hyp_mode_check(void)
"CPU: CPUs started in inconsistent modes");
else
pr_info("CPU: All CPU(s) started at EL1\n");
- if (IS_ENABLED(CONFIG_KVM) && !is_kernel_in_hyp_mode())
+ if (IS_ENABLED(CONFIG_KVM) && !is_kernel_in_hyp_mode()) {
kvm_compute_layout();
+ kvm_apply_hyp_relocations();
+ }
}
void __init smp_cpus_done(unsigned int max_cpus)
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index fa56af1a59c3..ad20981dfda4 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -44,6 +44,10 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
unsigned long fp = frame->fp;
struct stack_info info;
+ /* Terminal record; nothing to unwind */
+ if (!fp)
+ return -ENOENT;
+
if (fp & 0xf)
return -EINVAL;
@@ -104,15 +108,6 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
frame->pc = ptrauth_strip_insn_pac(frame->pc);
- /*
- * Frames created upon entry from EL0 have NULL FP and PC values, so
- * don't bother reporting these. Frames created by __noreturn functions
- * might have a valid FP even if PC is bogus, so only terminate where
- * both are NULL.
- */
- if (!frame->fp && !frame->pc)
- return -EINVAL;
-
return 0;
}
NOKPROBE_SYMBOL(unwind_frame);
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index a67b37a7a47e..d7564891ffe1 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -119,7 +119,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
if (!ret)
ret = -EOPNOTSUPP;
} else {
- __cpu_suspend_exit();
+ RCU_NONIDLE(__cpu_suspend_exit());
}
unpause_graph_tracing();
diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c
index c2877c332f2d..b9cf12b271d7 100644
--- a/arch/arm64/kernel/syscall.c
+++ b/arch/arm64/kernel/syscall.c
@@ -65,35 +65,6 @@ static inline bool has_syscall_work(unsigned long flags)
int syscall_trace_enter(struct pt_regs *regs);
void syscall_trace_exit(struct pt_regs *regs);
-#ifdef CONFIG_ARM64_ERRATUM_1463225
-DECLARE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
-
-static void cortex_a76_erratum_1463225_svc_handler(void)
-{
- u32 reg, val;
-
- if (!unlikely(test_thread_flag(TIF_SINGLESTEP)))
- return;
-
- if (!unlikely(this_cpu_has_cap(ARM64_WORKAROUND_1463225)))
- return;
-
- __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 1);
- reg = read_sysreg(mdscr_el1);
- val = reg | DBG_MDSCR_SS | DBG_MDSCR_KDE;
- write_sysreg(val, mdscr_el1);
- asm volatile("msr daifclr, #8");
- isb();
-
- /* We will have taken a single-step exception by this point */
-
- write_sysreg(reg, mdscr_el1);
- __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 0);
-}
-#else
-static void cortex_a76_erratum_1463225_svc_handler(void) { }
-#endif /* CONFIG_ARM64_ERRATUM_1463225 */
-
static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
const syscall_fn_t syscall_table[])
{
@@ -120,7 +91,6 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
* (Similarly for HVC and SMC elsewhere.)
*/
- cortex_a76_erratum_1463225_svc_handler();
local_daif_restore(DAIF_PROCCTX);
if (flags & _TIF_MTE_ASYNC_FAULT) {
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index f6faa697e83e..e08a4126453a 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -199,76 +199,38 @@ static int freq_inv_set_max_ratio(int cpu, u64 max_rate, u64 ref_rate)
return 0;
}
-static inline bool
-enable_policy_freq_counters(int cpu, cpumask_var_t valid_cpus)
-{
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
-
- if (!policy) {
- pr_debug("CPU%d: No cpufreq policy found.\n", cpu);
- return false;
- }
-
- if (cpumask_subset(policy->related_cpus, valid_cpus))
- cpumask_or(amu_fie_cpus, policy->related_cpus,
- amu_fie_cpus);
-
- cpufreq_cpu_put(policy);
-
- return true;
-}
-
static DEFINE_STATIC_KEY_FALSE(amu_fie_key);
#define amu_freq_invariant() static_branch_unlikely(&amu_fie_key)
-static int __init init_amu_fie(void)
+static void amu_fie_setup(const struct cpumask *cpus)
{
- bool invariance_status = topology_scale_freq_invariant();
- cpumask_var_t valid_cpus;
- bool have_policy = false;
- int ret = 0;
+ bool invariant;
int cpu;
- if (!zalloc_cpumask_var(&valid_cpus, GFP_KERNEL))
- return -ENOMEM;
-
- if (!zalloc_cpumask_var(&amu_fie_cpus, GFP_KERNEL)) {
- ret = -ENOMEM;
- goto free_valid_mask;
- }
+ /* We are already set since the last insmod of cpufreq driver */
+ if (unlikely(cpumask_subset(cpus, amu_fie_cpus)))
+ return;
- for_each_present_cpu(cpu) {
+ for_each_cpu(cpu, cpus) {
if (!freq_counters_valid(cpu) ||
freq_inv_set_max_ratio(cpu,
cpufreq_get_hw_max_freq(cpu) * 1000,
arch_timer_get_rate()))
- continue;
-
- cpumask_set_cpu(cpu, valid_cpus);
- have_policy |= enable_policy_freq_counters(cpu, valid_cpus);
+ return;
}
- /*
- * If we are not restricted by cpufreq policies, we only enable
- * the use of the AMU feature for FIE if all CPUs support AMU.
- * Otherwise, enable_policy_freq_counters has already enabled
- * policy cpus.
- */
- if (!have_policy && cpumask_equal(valid_cpus, cpu_present_mask))
- cpumask_or(amu_fie_cpus, amu_fie_cpus, valid_cpus);
+ cpumask_or(amu_fie_cpus, amu_fie_cpus, cpus);
- if (!cpumask_empty(amu_fie_cpus)) {
- pr_info("CPUs[%*pbl]: counters will be used for FIE.",
- cpumask_pr_args(amu_fie_cpus));
- static_branch_enable(&amu_fie_key);
- }
+ invariant = topology_scale_freq_invariant();
- /*
- * If the system is not fully invariant after AMU init, disable
- * partial use of counters for frequency invariance.
- */
- if (!topology_scale_freq_invariant())
- static_branch_disable(&amu_fie_key);
+ /* We aren't fully invariant yet */
+ if (!invariant && !cpumask_equal(amu_fie_cpus, cpu_present_mask))
+ return;
+
+ static_branch_enable(&amu_fie_key);
+
+ pr_debug("CPUs[%*pbl]: counters will be used for FIE.",
+ cpumask_pr_args(cpus));
/*
* Task scheduler behavior depends on frequency invariance support,
@@ -276,15 +238,50 @@ static int __init init_amu_fie(void)
* a result of counter initialisation and use, retrigger the build of
* scheduling domains to ensure the information is propagated properly.
*/
- if (invariance_status != topology_scale_freq_invariant())
+ if (!invariant)
rebuild_sched_domains_energy();
+}
+
+static int init_amu_fie_callback(struct notifier_block *nb, unsigned long val,
+ void *data)
+{
+ struct cpufreq_policy *policy = data;
+
+ if (val == CPUFREQ_CREATE_POLICY)
+ amu_fie_setup(policy->related_cpus);
+
+ /*
+ * We don't need to handle CPUFREQ_REMOVE_POLICY event as the AMU
+ * counters don't have any dependency on cpufreq driver once we have
+ * initialized AMU support and enabled invariance. The AMU counters will
+ * keep on working just fine in the absence of the cpufreq driver, and
+ * for the CPUs for which there are no counters available, the last set
+ * value of freq_scale will remain valid as that is the frequency those
+ * CPUs are running at.
+ */
+
+ return 0;
+}
+
+static struct notifier_block init_amu_fie_notifier = {
+ .notifier_call = init_amu_fie_callback,
+};
+
+static int __init init_amu_fie(void)
+{
+ int ret;
+
+ if (!zalloc_cpumask_var(&amu_fie_cpus, GFP_KERNEL))
+ return -ENOMEM;
-free_valid_mask:
- free_cpumask_var(valid_cpus);
+ ret = cpufreq_register_notifier(&init_amu_fie_notifier,
+ CPUFREQ_POLICY_NOTIFIER);
+ if (ret)
+ free_cpumask_var(amu_fie_cpus);
return ret;
}
-late_initcall_sync(init_amu_fie);
+core_initcall(init_amu_fie);
bool arch_freq_counters_available(const struct cpumask *cpus)
{
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 6895ce777e7f..a05d34f0e82a 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -45,7 +45,7 @@
#include <asm/system_misc.h>
#include <asm/sysreg.h>
-static const char *handler[]= {
+static const char *handler[] = {
"Synchronous Abort",
"IRQ",
"FIQ",
diff --git a/arch/arm64/kernel/vdso/vdso.S b/arch/arm64/kernel/vdso-wrap.S
index c4b1990bf2be..c4b1990bf2be 100644
--- a/arch/arm64/kernel/vdso/vdso.S
+++ b/arch/arm64/kernel/vdso-wrap.S
diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile
index cd9c3fa25902..945e6bb326e3 100644
--- a/arch/arm64/kernel/vdso/Makefile
+++ b/arch/arm64/kernel/vdso/Makefile
@@ -29,7 +29,8 @@ ldflags-y := -shared -nostdlib -soname=linux-vdso.so.1 --hash-style=sysv \
ccflags-y := -fno-common -fno-builtin -fno-stack-protector -ffixed-x18
ccflags-y += -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO
-CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os $(CC_FLAGS_SCS) $(GCC_PLUGINS_CFLAGS)
+CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os $(CC_FLAGS_SCS) $(GCC_PLUGINS_CFLAGS) \
+ $(CC_FLAGS_LTO)
KASAN_SANITIZE := n
UBSAN_SANITIZE := n
OBJECT_FILES_NON_STANDARD := y
@@ -44,7 +45,6 @@ endif
# Disable gcov profiling for VDSO code
GCOV_PROFILE := n
-obj-y += vdso.o
targets += vdso.lds
CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
diff --git a/arch/arm64/kernel/vdso/gen_vdso_offsets.sh b/arch/arm64/kernel/vdso/gen_vdso_offsets.sh
index 8b806eacd0a6..0387209d65b1 100755
--- a/arch/arm64/kernel/vdso/gen_vdso_offsets.sh
+++ b/arch/arm64/kernel/vdso/gen_vdso_offsets.sh
@@ -13,4 +13,4 @@
LC_ALL=C
sed -n -e 's/^00*/0/' -e \
-'s/^\([0-9a-fA-F]*\) . VDSO_\([a-zA-Z0-9_]*\)$/\#define vdso_offset_\2\t0x\1/p'
+'s/^\([0-9a-fA-F]*\) . VDSO_\([a-zA-Z0-9_]*\)$/\#define vdso_offset_\2 0x\1/p'
diff --git a/arch/arm64/kernel/vdso32/vdso.S b/arch/arm64/kernel/vdso32-wrap.S
index e72ac7bc4c04..e72ac7bc4c04 100644
--- a/arch/arm64/kernel/vdso32/vdso.S
+++ b/arch/arm64/kernel/vdso32-wrap.S
diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile
index a1e0f91e6cea..789ad420f16b 100644
--- a/arch/arm64/kernel/vdso32/Makefile
+++ b/arch/arm64/kernel/vdso32/Makefile
@@ -155,7 +155,6 @@ c-obj-vdso-gettimeofday := $(addprefix $(obj)/, $(c-obj-vdso-gettimeofday))
asm-obj-vdso := $(addprefix $(obj)/, $(asm-obj-vdso))
obj-vdso := $(c-obj-vdso) $(c-obj-vdso-gettimeofday) $(asm-obj-vdso)
-obj-y += vdso.o
targets += vdso.lds
CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 4c0b0c89ad59..7eea7888bb02 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -31,10 +31,11 @@ jiffies = jiffies_64;
__stop___kvm_ex_table = .;
#define HYPERVISOR_DATA_SECTIONS \
- HYP_SECTION_NAME(.data..ro_after_init) : { \
- __hyp_data_ro_after_init_start = .; \
+ HYP_SECTION_NAME(.rodata) : { \
+ __hyp_rodata_start = .; \
*(HYP_SECTION_NAME(.data..ro_after_init)) \
- __hyp_data_ro_after_init_end = .; \
+ *(HYP_SECTION_NAME(.rodata)) \
+ __hyp_rodata_end = .; \
}
#define HYPERVISOR_PERCPU_SECTION \
@@ -42,10 +43,19 @@ jiffies = jiffies_64;
HYP_SECTION_NAME(.data..percpu) : { \
*(HYP_SECTION_NAME(.data..percpu)) \
}
+
+#define HYPERVISOR_RELOC_SECTION \
+ .hyp.reloc : ALIGN(4) { \
+ __hyp_reloc_begin = .; \
+ *(.hyp.reloc) \
+ __hyp_reloc_end = .; \
+ }
+
#else /* CONFIG_KVM */
#define HYPERVISOR_EXTABLE
#define HYPERVISOR_DATA_SECTIONS
#define HYPERVISOR_PERCPU_SECTION
+#define HYPERVISOR_RELOC_SECTION
#endif
#define HYPERVISOR_TEXT \
@@ -216,6 +226,8 @@ SECTIONS
PERCPU_SECTION(L1_CACHE_BYTES)
HYPERVISOR_PERCPU_SECTION
+ HYPERVISOR_RELOC_SECTION
+
.rela.dyn : ALIGN(8) {
*(.rela .rela*)
}
@@ -316,3 +328,11 @@ ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) == PAGE_SIZE,
* If padding is applied before .head.text, virt<->phys conversions will fail.
*/
ASSERT(_text == KIMAGE_VADDR, "HEAD is misaligned")
+
+ASSERT(swapper_pg_dir - reserved_pg_dir == RESERVED_SWAPPER_OFFSET,
+ "RESERVED_SWAPPER_OFFSET is wrong!")
+
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+ASSERT(swapper_pg_dir - tramp_pg_dir == TRAMP_SWAPPER_OFFSET,
+ "TRAMP_SWAPPER_OFFSET is wrong!")
+#endif
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index 13b017284bf9..589921392cb1 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -16,7 +16,7 @@ kvm-y := $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o \
inject_fault.o va_layout.o handle_exit.o \
guest.o debug.o reset.o sys_regs.o \
vgic-sys-reg-v3.o fpsimd.o pmu.o \
- arch_timer.o \
+ arch_timer.o trng.o\
vgic/vgic.o vgic/vgic-init.o \
vgic/vgic-irqfd.o vgic/vgic-v2.o \
vgic/vgic-v3.o vgic/vgic-v4.o \
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 04c44853b103..fc4c95dd2d26 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -1396,8 +1396,9 @@ static void cpu_init_hyp_mode(void)
* Calculate the raw per-cpu offset without a translation from the
* kernel's mapping to the linear mapping, and store it in tpidr_el2
* so that we can use adr_l to access per-cpu variables in EL2.
+ * Also drop the KASAN tag which gets in the way...
*/
- params->tpidr_el2 = (unsigned long)this_cpu_ptr_nvhe_sym(__per_cpu_start) -
+ params->tpidr_el2 = (unsigned long)kasan_reset_tag(this_cpu_ptr_nvhe_sym(__per_cpu_start)) -
(unsigned long)kvm_ksym_ref(CHOOSE_NVHE_SYM(__per_cpu_start));
params->mair_el2 = read_sysreg(mair_el1);
@@ -1749,11 +1750,10 @@ static int init_hyp_mode(void)
goto out_err;
}
- err = create_hyp_mappings(kvm_ksym_ref(__hyp_data_ro_after_init_start),
- kvm_ksym_ref(__hyp_data_ro_after_init_end),
- PAGE_HYP_RO);
+ err = create_hyp_mappings(kvm_ksym_ref(__hyp_rodata_start),
+ kvm_ksym_ref(__hyp_rodata_end), PAGE_HYP_RO);
if (err) {
- kvm_err("Cannot map .hyp.data..ro_after_init section\n");
+ kvm_err("Cannot map .hyp.rodata section\n");
goto out_err;
}
@@ -1966,6 +1966,9 @@ static int __init early_kvm_mode_cfg(char *arg)
return 0;
}
+ if (strcmp(arg, "nvhe") == 0 && !WARN_ON(is_kernel_in_hyp_mode()))
+ return 0;
+
return -EINVAL;
}
early_param("kvm-arm.mode", early_kvm_mode_cfg);
diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
index d179056e1af8..5f49df4ffdd8 100644
--- a/arch/arm64/kvm/hyp/hyp-entry.S
+++ b/arch/arm64/kvm/hyp/hyp-entry.S
@@ -119,7 +119,7 @@ el2_error:
.macro invalid_vector label, target = __guest_exit_panic
.align 2
-SYM_CODE_START(\label)
+SYM_CODE_START_LOCAL(\label)
b \target
SYM_CODE_END(\label)
.endm
diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
index 84473574c2e7..54f4860cd87c 100644
--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
@@ -505,8 +505,8 @@ static inline void __kvm_unexpected_el2_exception(void)
struct exception_table_entry *entry, *end;
unsigned long elr_el2 = read_sysreg(elr_el2);
- entry = hyp_symbol_addr(__start___kvm_ex_table);
- end = hyp_symbol_addr(__stop___kvm_ex_table);
+ entry = &__start___kvm_ex_table;
+ end = &__stop___kvm_ex_table;
while (entry < end) {
addr = (unsigned long)&entry->insn + entry->insn;
diff --git a/arch/arm64/kvm/hyp/nvhe/.gitignore b/arch/arm64/kvm/hyp/nvhe/.gitignore
index 695d73d0249e..5b6c43cc96f8 100644
--- a/arch/arm64/kvm/hyp/nvhe/.gitignore
+++ b/arch/arm64/kvm/hyp/nvhe/.gitignore
@@ -1,2 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
+gen-hyprel
hyp.lds
+hyp-reloc.S
diff --git a/arch/arm64/kvm/hyp/nvhe/Makefile b/arch/arm64/kvm/hyp/nvhe/Makefile
index 1f1e351c5fe2..a6707df4f6c0 100644
--- a/arch/arm64/kvm/hyp/nvhe/Makefile
+++ b/arch/arm64/kvm/hyp/nvhe/Makefile
@@ -3,8 +3,11 @@
# Makefile for Kernel-based Virtual Machine module, HYP/nVHE part
#
-asflags-y := -D__KVM_NVHE_HYPERVISOR__
-ccflags-y := -D__KVM_NVHE_HYPERVISOR__
+asflags-y := -D__KVM_NVHE_HYPERVISOR__ -D__DISABLE_EXPORTS
+ccflags-y := -D__KVM_NVHE_HYPERVISOR__ -D__DISABLE_EXPORTS
+
+hostprogs := gen-hyprel
+HOST_EXTRACFLAGS += -I$(objtree)/include
obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o host.o \
hyp-main.o hyp-smp.o psci-relay.o
@@ -19,7 +22,7 @@ obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \
hyp-obj := $(patsubst %.o,%.nvhe.o,$(obj-y))
obj-y := kvm_nvhe.o
-extra-y := $(hyp-obj) kvm_nvhe.tmp.o hyp.lds
+extra-y := $(hyp-obj) kvm_nvhe.tmp.o kvm_nvhe.rel.o hyp.lds hyp-reloc.S hyp-reloc.o
# 1) Compile all source files to `.nvhe.o` object files. The file extension
# avoids file name clashes for files shared with VHE.
@@ -42,11 +45,31 @@ LDFLAGS_kvm_nvhe.tmp.o := -r -T
$(obj)/kvm_nvhe.tmp.o: $(obj)/hyp.lds $(addprefix $(obj)/,$(hyp-obj)) FORCE
$(call if_changed,ld)
-# 4) Produce the final 'kvm_nvhe.o', ready to be linked into 'vmlinux'.
+# 4) Generate list of hyp code/data positions that need to be relocated at
+# runtime. Because the hypervisor is part of the kernel binary, relocations
+# produce a kernel VA. We enumerate relocations targeting hyp at build time
+# and convert the kernel VAs at those positions to hyp VAs.
+$(obj)/hyp-reloc.S: $(obj)/kvm_nvhe.tmp.o $(obj)/gen-hyprel
+ $(call if_changed,hyprel)
+
+# 5) Compile hyp-reloc.S and link it into the existing partially linked object.
+# The object file now contains a section with pointers to hyp positions that
+# will contain kernel VAs at runtime. These pointers have relocations on them
+# so that they get updated as the hyp object is linked into `vmlinux`.
+LDFLAGS_kvm_nvhe.rel.o := -r
+$(obj)/kvm_nvhe.rel.o: $(obj)/kvm_nvhe.tmp.o $(obj)/hyp-reloc.o FORCE
+ $(call if_changed,ld)
+
+# 6) Produce the final 'kvm_nvhe.o', ready to be linked into 'vmlinux'.
# Prefixes names of ELF symbols with '__kvm_nvhe_'.
-$(obj)/kvm_nvhe.o: $(obj)/kvm_nvhe.tmp.o FORCE
+$(obj)/kvm_nvhe.o: $(obj)/kvm_nvhe.rel.o FORCE
$(call if_changed,hypcopy)
+# The HYPREL command calls `gen-hyprel` to generate an assembly file with
+# a list of relocations targeting hyp code/data.
+quiet_cmd_hyprel = HYPREL $@
+ cmd_hyprel = $(obj)/gen-hyprel $< > $@
+
# The HYPCOPY command uses `objcopy` to prefix all ELF symbol names
# to avoid clashes with VHE code/data.
quiet_cmd_hypcopy = HYPCOPY $@
diff --git a/arch/arm64/kvm/hyp/nvhe/gen-hyprel.c b/arch/arm64/kvm/hyp/nvhe/gen-hyprel.c
new file mode 100644
index 000000000000..ead02c6a7628
--- /dev/null
+++ b/arch/arm64/kvm/hyp/nvhe/gen-hyprel.c
@@ -0,0 +1,438 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 - Google LLC
+ * Author: David Brazdil <dbrazdil@google.com>
+ *
+ * Generates relocation information used by the kernel to convert
+ * absolute addresses in hyp data from kernel VAs to hyp VAs.
+ *
+ * This is necessary because hyp code is linked into the same binary
+ * as the kernel but executes under different memory mappings.
+ * If the compiler used absolute addressing, those addresses need to
+ * be converted before they are used by hyp code.
+ *
+ * The input of this program is the relocatable ELF object containing
+ * all hyp code/data, not yet linked into vmlinux. Hyp section names
+ * should have been prefixed with `.hyp` at this point.
+ *
+ * The output (printed to stdout) is an assembly file containing
+ * an array of 32-bit integers and static relocations that instruct
+ * the linker of `vmlinux` to populate the array entries with offsets
+ * to positions in the kernel binary containing VAs used by hyp code.
+ *
+ * Note that dynamic relocations could be used for the same purpose.
+ * However, those are only generated if CONFIG_RELOCATABLE=y.
+ */
+
+#include <elf.h>
+#include <endian.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#include <generated/autoconf.h>
+
+#define HYP_SECTION_PREFIX ".hyp"
+#define HYP_RELOC_SECTION ".hyp.reloc"
+#define HYP_SECTION_SYMBOL_PREFIX "__hyp_section_"
+
+/*
+ * AArch64 relocation type constants.
+ * Included in case these are not defined in the host toolchain.
+ */
+#ifndef R_AARCH64_ABS64
+#define R_AARCH64_ABS64 257
+#endif
+#ifndef R_AARCH64_LD_PREL_LO19
+#define R_AARCH64_LD_PREL_LO19 273
+#endif
+#ifndef R_AARCH64_ADR_PREL_LO21
+#define R_AARCH64_ADR_PREL_LO21 274
+#endif
+#ifndef R_AARCH64_ADR_PREL_PG_HI21
+#define R_AARCH64_ADR_PREL_PG_HI21 275
+#endif
+#ifndef R_AARCH64_ADR_PREL_PG_HI21_NC
+#define R_AARCH64_ADR_PREL_PG_HI21_NC 276
+#endif
+#ifndef R_AARCH64_ADD_ABS_LO12_NC
+#define R_AARCH64_ADD_ABS_LO12_NC 277
+#endif
+#ifndef R_AARCH64_LDST8_ABS_LO12_NC
+#define R_AARCH64_LDST8_ABS_LO12_NC 278
+#endif
+#ifndef R_AARCH64_TSTBR14
+#define R_AARCH64_TSTBR14 279
+#endif
+#ifndef R_AARCH64_CONDBR19
+#define R_AARCH64_CONDBR19 280
+#endif
+#ifndef R_AARCH64_JUMP26
+#define R_AARCH64_JUMP26 282
+#endif
+#ifndef R_AARCH64_CALL26
+#define R_AARCH64_CALL26 283
+#endif
+#ifndef R_AARCH64_LDST16_ABS_LO12_NC
+#define R_AARCH64_LDST16_ABS_LO12_NC 284
+#endif
+#ifndef R_AARCH64_LDST32_ABS_LO12_NC
+#define R_AARCH64_LDST32_ABS_LO12_NC 285
+#endif
+#ifndef R_AARCH64_LDST64_ABS_LO12_NC
+#define R_AARCH64_LDST64_ABS_LO12_NC 286
+#endif
+#ifndef R_AARCH64_MOVW_PREL_G0
+#define R_AARCH64_MOVW_PREL_G0 287
+#endif
+#ifndef R_AARCH64_MOVW_PREL_G0_NC
+#define R_AARCH64_MOVW_PREL_G0_NC 288
+#endif
+#ifndef R_AARCH64_MOVW_PREL_G1
+#define R_AARCH64_MOVW_PREL_G1 289
+#endif
+#ifndef R_AARCH64_MOVW_PREL_G1_NC
+#define R_AARCH64_MOVW_PREL_G1_NC 290
+#endif
+#ifndef R_AARCH64_MOVW_PREL_G2
+#define R_AARCH64_MOVW_PREL_G2 291
+#endif
+#ifndef R_AARCH64_MOVW_PREL_G2_NC
+#define R_AARCH64_MOVW_PREL_G2_NC 292
+#endif
+#ifndef R_AARCH64_MOVW_PREL_G3
+#define R_AARCH64_MOVW_PREL_G3 293
+#endif
+#ifndef R_AARCH64_LDST128_ABS_LO12_NC
+#define R_AARCH64_LDST128_ABS_LO12_NC 299
+#endif
+
+/* Global state of the processed ELF. */
+static struct {
+ const char *path;
+ char *begin;
+ size_t size;
+ Elf64_Ehdr *ehdr;
+ Elf64_Shdr *sh_table;
+ const char *sh_string;
+} elf;
+
+#if defined(CONFIG_CPU_LITTLE_ENDIAN)
+
+#define elf16toh(x) le16toh(x)
+#define elf32toh(x) le32toh(x)
+#define elf64toh(x) le64toh(x)
+
+#define ELFENDIAN ELFDATA2LSB
+
+#elif defined(CONFIG_CPU_BIG_ENDIAN)
+
+#define elf16toh(x) be16toh(x)
+#define elf32toh(x) be32toh(x)
+#define elf64toh(x) be64toh(x)
+
+#define ELFENDIAN ELFDATA2MSB
+
+#else
+
+#error PDP-endian sadly unsupported...
+
+#endif
+
+#define fatal_error(fmt, ...) \
+ ({ \
+ fprintf(stderr, "error: %s: " fmt "\n", \
+ elf.path, ## __VA_ARGS__); \
+ exit(EXIT_FAILURE); \
+ __builtin_unreachable(); \
+ })
+
+#define fatal_perror(msg) \
+ ({ \
+ fprintf(stderr, "error: %s: " msg ": %s\n", \
+ elf.path, strerror(errno)); \
+ exit(EXIT_FAILURE); \
+ __builtin_unreachable(); \
+ })
+
+#define assert_op(lhs, rhs, fmt, op) \
+ ({ \
+ typeof(lhs) _lhs = (lhs); \
+ typeof(rhs) _rhs = (rhs); \
+ \
+ if (!(_lhs op _rhs)) { \
+ fatal_error("assertion " #lhs " " #op " " #rhs \
+ " failed (lhs=" fmt ", rhs=" fmt \
+ ", line=%d)", _lhs, _rhs, __LINE__); \
+ } \
+ })
+
+#define assert_eq(lhs, rhs, fmt) assert_op(lhs, rhs, fmt, ==)
+#define assert_ne(lhs, rhs, fmt) assert_op(lhs, rhs, fmt, !=)
+#define assert_lt(lhs, rhs, fmt) assert_op(lhs, rhs, fmt, <)
+#define assert_ge(lhs, rhs, fmt) assert_op(lhs, rhs, fmt, >=)
+
+/*
+ * Return a pointer of a given type at a given offset from
+ * the beginning of the ELF file.
+ */
+#define elf_ptr(type, off) ((type *)(elf.begin + (off)))
+
+/* Iterate over all sections in the ELF. */
+#define for_each_section(var) \
+ for (var = elf.sh_table; var < elf.sh_table + elf16toh(elf.ehdr->e_shnum); ++var)
+
+/* Iterate over all Elf64_Rela relocations in a given section. */
+#define for_each_rela(shdr, var) \
+ for (var = elf_ptr(Elf64_Rela, elf64toh(shdr->sh_offset)); \
+ var < elf_ptr(Elf64_Rela, elf64toh(shdr->sh_offset) + elf64toh(shdr->sh_size)); var++)
+
+/* True if a string starts with a given prefix. */
+static inline bool starts_with(const char *str, const char *prefix)
+{
+ return memcmp(str, prefix, strlen(prefix)) == 0;
+}
+
+/* Returns a string containing the name of a given section. */
+static inline const char *section_name(Elf64_Shdr *shdr)
+{
+ return elf.sh_string + elf32toh(shdr->sh_name);
+}
+
+/* Returns a pointer to the first byte of section data. */
+static inline const char *section_begin(Elf64_Shdr *shdr)
+{
+ return elf_ptr(char, elf64toh(shdr->sh_offset));
+}
+
+/* Find a section by its offset from the beginning of the file. */
+static inline Elf64_Shdr *section_by_off(Elf64_Off off)
+{
+ assert_ne(off, 0UL, "%lu");
+ return elf_ptr(Elf64_Shdr, off);
+}
+
+/* Find a section by its index. */
+static inline Elf64_Shdr *section_by_idx(uint16_t idx)
+{
+ assert_ne(idx, SHN_UNDEF, "%u");
+ return &elf.sh_table[idx];
+}
+
+/*
+ * Memory-map the given ELF file, perform sanity checks, and
+ * populate global state.
+ */
+static void init_elf(const char *path)
+{
+ int fd, ret;
+ struct stat stat;
+
+ /* Store path in the global struct for error printing. */
+ elf.path = path;
+
+ /* Open the ELF file. */
+ fd = open(path, O_RDONLY);
+ if (fd < 0)
+ fatal_perror("Could not open ELF file");
+
+ /* Get status of ELF file to obtain its size. */
+ ret = fstat(fd, &stat);
+ if (ret < 0) {
+ close(fd);
+ fatal_perror("Could not get status of ELF file");
+ }
+
+ /* mmap() the entire ELF file read-only at an arbitrary address. */
+ elf.begin = mmap(0, stat.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
+ if (elf.begin == MAP_FAILED) {
+ close(fd);
+ fatal_perror("Could not mmap ELF file");
+ }
+
+ /* mmap() was successful, close the FD. */
+ close(fd);
+
+ /* Get pointer to the ELF header. */
+ assert_ge(stat.st_size, sizeof(*elf.ehdr), "%lu");
+ elf.ehdr = elf_ptr(Elf64_Ehdr, 0);
+
+ /* Check the ELF magic. */
+ assert_eq(elf.ehdr->e_ident[EI_MAG0], ELFMAG0, "0x%x");
+ assert_eq(elf.ehdr->e_ident[EI_MAG1], ELFMAG1, "0x%x");
+ assert_eq(elf.ehdr->e_ident[EI_MAG2], ELFMAG2, "0x%x");
+ assert_eq(elf.ehdr->e_ident[EI_MAG3], ELFMAG3, "0x%x");
+
+ /* Sanity check that this is an ELF64 relocatable object for AArch64. */
+ assert_eq(elf.ehdr->e_ident[EI_CLASS], ELFCLASS64, "%u");
+ assert_eq(elf.ehdr->e_ident[EI_DATA], ELFENDIAN, "%u");
+ assert_eq(elf16toh(elf.ehdr->e_type), ET_REL, "%u");
+ assert_eq(elf16toh(elf.ehdr->e_machine), EM_AARCH64, "%u");
+
+ /* Populate fields of the global struct. */
+ elf.sh_table = section_by_off(elf64toh(elf.ehdr->e_shoff));
+ elf.sh_string = section_begin(section_by_idx(elf16toh(elf.ehdr->e_shstrndx)));
+}
+
+/* Print the prologue of the output ASM file. */
+static void emit_prologue(void)
+{
+ printf(".data\n"
+ ".pushsection " HYP_RELOC_SECTION ", \"a\"\n");
+}
+
+/* Print ASM statements needed as a prologue to a processed hyp section. */
+static void emit_section_prologue(const char *sh_orig_name)
+{
+ /* Declare the hyp section symbol. */
+ printf(".global %s%s\n", HYP_SECTION_SYMBOL_PREFIX, sh_orig_name);
+}
+
+/*
+ * Print ASM statements to create a hyp relocation entry for a given
+ * R_AARCH64_ABS64 relocation.
+ *
+ * The linker of vmlinux will populate the position given by `rela` with
+ * an absolute 64-bit kernel VA. If the kernel is relocatable, it will
+ * also generate a dynamic relocation entry so that the kernel can shift
+ * the address at runtime for KASLR.
+ *
+ * Emit a 32-bit offset from the current address to the position given
+ * by `rela`. This way the kernel can iterate over all kernel VAs used
+ * by hyp at runtime and convert them to hyp VAs. However, that offset
+ * will not be known until linking of `vmlinux`, so emit a PREL32
+ * relocation referencing a symbol that the hyp linker script put at
+ * the beginning of the relocated section + the offset from `rela`.
+ */
+static void emit_rela_abs64(Elf64_Rela *rela, const char *sh_orig_name)
+{
+ /* Offset of this reloc from the beginning of HYP_RELOC_SECTION. */
+ static size_t reloc_offset;
+
+ /* Create storage for the 32-bit offset. */
+ printf(".word 0\n");
+
+ /*
+ * Create a PREL32 relocation which instructs the linker of `vmlinux`
+ * to insert offset to position <base> + <offset>, where <base> is
+ * a symbol at the beginning of the relocated section, and <offset>
+ * is `rela->r_offset`.
+ */
+ printf(".reloc %lu, R_AARCH64_PREL32, %s%s + 0x%lx\n",
+ reloc_offset, HYP_SECTION_SYMBOL_PREFIX, sh_orig_name,
+ elf64toh(rela->r_offset));
+
+ reloc_offset += 4;
+}
+
+/* Print the epilogue of the output ASM file. */
+static void emit_epilogue(void)
+{
+ printf(".popsection\n");
+}
+
+/*
+ * Iterate over all RELA relocations in a given section and emit
+ * hyp relocation data for all absolute addresses in hyp code/data.
+ *
+ * Static relocations that generate PC-relative-addressing are ignored.
+ * Failure is reported for unexpected relocation types.
+ */
+static void emit_rela_section(Elf64_Shdr *sh_rela)
+{
+ Elf64_Shdr *sh_orig = &elf.sh_table[elf32toh(sh_rela->sh_info)];
+ const char *sh_orig_name = section_name(sh_orig);
+ Elf64_Rela *rela;
+
+ /* Skip all non-hyp sections. */
+ if (!starts_with(sh_orig_name, HYP_SECTION_PREFIX))
+ return;
+
+ emit_section_prologue(sh_orig_name);
+
+ for_each_rela(sh_rela, rela) {
+ uint32_t type = (uint32_t)elf64toh(rela->r_info);
+
+ /* Check that rela points inside the relocated section. */
+ assert_lt(elf64toh(rela->r_offset), elf64toh(sh_orig->sh_size), "0x%lx");
+
+ switch (type) {
+ /*
+ * Data relocations to generate absolute addressing.
+ * Emit a hyp relocation.
+ */
+ case R_AARCH64_ABS64:
+ emit_rela_abs64(rela, sh_orig_name);
+ break;
+ /* Allow relocations to generate PC-relative addressing. */
+ case R_AARCH64_LD_PREL_LO19:
+ case R_AARCH64_ADR_PREL_LO21:
+ case R_AARCH64_ADR_PREL_PG_HI21:
+ case R_AARCH64_ADR_PREL_PG_HI21_NC:
+ case R_AARCH64_ADD_ABS_LO12_NC:
+ case R_AARCH64_LDST8_ABS_LO12_NC:
+ case R_AARCH64_LDST16_ABS_LO12_NC:
+ case R_AARCH64_LDST32_ABS_LO12_NC:
+ case R_AARCH64_LDST64_ABS_LO12_NC:
+ case R_AARCH64_LDST128_ABS_LO12_NC:
+ break;
+ /* Allow relative relocations for control-flow instructions. */
+ case R_AARCH64_TSTBR14:
+ case R_AARCH64_CONDBR19:
+ case R_AARCH64_JUMP26:
+ case R_AARCH64_CALL26:
+ break;
+ /* Allow group relocations to create PC-relative offset inline. */
+ case R_AARCH64_MOVW_PREL_G0:
+ case R_AARCH64_MOVW_PREL_G0_NC:
+ case R_AARCH64_MOVW_PREL_G1:
+ case R_AARCH64_MOVW_PREL_G1_NC:
+ case R_AARCH64_MOVW_PREL_G2:
+ case R_AARCH64_MOVW_PREL_G2_NC:
+ case R_AARCH64_MOVW_PREL_G3:
+ break;
+ default:
+ fatal_error("Unexpected RELA type %u", type);
+ }
+ }
+}
+
+/* Iterate over all sections and emit hyp relocation data for RELA sections. */
+static void emit_all_relocs(void)
+{
+ Elf64_Shdr *shdr;
+
+ for_each_section(shdr) {
+ switch (elf32toh(shdr->sh_type)) {
+ case SHT_REL:
+ fatal_error("Unexpected SHT_REL section \"%s\"",
+ section_name(shdr));
+ case SHT_RELA:
+ emit_rela_section(shdr);
+ break;
+ }
+ }
+}
+
+int main(int argc, const char **argv)
+{
+ if (argc != 2) {
+ fprintf(stderr, "Usage: %s <elf_input>\n", argv[0]);
+ return EXIT_FAILURE;
+ }
+
+ init_elf(argv[1]);
+
+ emit_prologue();
+ emit_all_relocs();
+ emit_epilogue();
+
+ return EXIT_SUCCESS;
+}
diff --git a/arch/arm64/kvm/hyp/nvhe/host.S b/arch/arm64/kvm/hyp/nvhe/host.S
index a820dfdc9c25..6585a7cbbc56 100644
--- a/arch/arm64/kvm/hyp/nvhe/host.S
+++ b/arch/arm64/kvm/hyp/nvhe/host.S
@@ -74,27 +74,28 @@ SYM_FUNC_END(__host_enter)
* void __noreturn __hyp_do_panic(bool restore_host, u64 spsr, u64 elr, u64 par);
*/
SYM_FUNC_START(__hyp_do_panic)
- /* Load the format arguments into x1-7 */
- mov x6, x3
- get_vcpu_ptr x7, x3
-
- mrs x3, esr_el2
- mrs x4, far_el2
- mrs x5, hpfar_el2
-
/* Prepare and exit to the host's panic funciton. */
mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
PSR_MODE_EL1h)
msr spsr_el2, lr
ldr lr, =panic
+ hyp_kimg_va lr, x6
msr elr_el2, lr
- /*
- * Set the panic format string and enter the host, conditionally
- * restoring the host context.
- */
+ /* Set the panic format string. Use the, now free, LR as scratch. */
+ ldr lr, =__hyp_panic_string
+ hyp_kimg_va lr, x6
+
+ /* Load the format arguments into x1-7. */
+ mov x6, x3
+ get_vcpu_ptr x7, x3
+ mrs x3, esr_el2
+ mrs x4, far_el2
+ mrs x5, hpfar_el2
+
+ /* Enter the host, conditionally restoring the host context. */
cmp x0, xzr
- ldr x0, =__hyp_panic_string
+ mov x0, lr
b.eq __host_enter_without_restoring
b __host_enter_for_panic
SYM_FUNC_END(__hyp_do_panic)
@@ -124,7 +125,7 @@ SYM_FUNC_END(__hyp_do_panic)
* Preserve x0-x4, which may contain stub parameters.
*/
ldr x5, =__kvm_handle_stub_hvc
- kimg_pa x5, x6
+ hyp_pa x5, x6
br x5
.L__vect_end\@:
.if ((.L__vect_end\@ - .L__vect_start\@) > 0x80)
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-init.S b/arch/arm64/kvm/hyp/nvhe/hyp-init.S
index 31b060a44045..c631e29fb001 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-init.S
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-init.S
@@ -18,7 +18,7 @@
#include <asm/virt.h>
.text
- .pushsection .hyp.idmap.text, "ax"
+ .pushsection .idmap.text, "ax"
.align 11
@@ -47,6 +47,8 @@ __invalid:
b .
/*
+ * Only uses x0..x3 so as to not clobber callee-saved SMCCC registers.
+ *
* x0: SMCCC function ID
* x1: struct kvm_nvhe_init_params PA
*/
@@ -55,24 +57,17 @@ __do_hyp_init:
cmp x0, #HVC_STUB_HCALL_NR
b.lo __kvm_handle_stub_hvc
- // We only actively check bits [24:31], and everything
- // else has to be zero, which we check at build time.
-#if (KVM_HOST_SMCCC_FUNC(__kvm_hyp_init) & 0xFFFFFFFF00FFFFFF)
-#error Unexpected __KVM_HOST_SMCCC_FUNC___kvm_hyp_init value
-#endif
-
- ror x0, x0, #24
- eor x0, x0, #((KVM_HOST_SMCCC_FUNC(__kvm_hyp_init) >> 24) & 0xF)
- ror x0, x0, #4
- eor x0, x0, #((KVM_HOST_SMCCC_FUNC(__kvm_hyp_init) >> 28) & 0xF)
- cbz x0, 1f
+ mov x3, #KVM_HOST_SMCCC_FUNC(__kvm_hyp_init)
+ cmp x0, x3
+ b.eq 1f
+
mov x0, #SMCCC_RET_NOT_SUPPORTED
eret
1: mov x0, x1
- mov x4, lr
- bl ___kvm_hyp_init
- mov lr, x4
+ mov x3, lr
+ bl ___kvm_hyp_init // Clobbers x0..x2
+ mov lr, x3
/* Hello, World! */
mov x0, #SMCCC_RET_SUCCESS
@@ -82,8 +77,8 @@ SYM_CODE_END(__kvm_hyp_init)
/*
* Initialize the hypervisor in EL2.
*
- * Only uses x0..x3 so as to not clobber callee-saved SMCCC registers
- * and leave x4 for the caller.
+ * Only uses x0..x2 so as to not clobber callee-saved SMCCC registers
+ * and leave x3 for the caller.
*
* x0: struct kvm_nvhe_init_params PA
*/
@@ -112,9 +107,9 @@ alternative_else_nop_endif
/*
* Set the PS bits in TCR_EL2.
*/
- ldr x1, [x0, #NVHE_INIT_TCR_EL2]
- tcr_compute_pa_size x1, #TCR_EL2_PS_SHIFT, x2, x3
- msr tcr_el2, x1
+ ldr x0, [x0, #NVHE_INIT_TCR_EL2]
+ tcr_compute_pa_size x0, #TCR_EL2_PS_SHIFT, x1, x2
+ msr tcr_el2, x0
isb
@@ -139,7 +134,6 @@ alternative_else_nop_endif
/* Set the host vector */
ldr x0, =__kvm_hyp_host_vector
- kimg_hyp_va x0, x1
msr vbar_el2, x0
ret
@@ -189,16 +183,15 @@ SYM_CODE_START_LOCAL(__kvm_hyp_init_cpu)
2: msr SPsel, #1 // We want to use SP_EL{1,2}
/* Initialize EL2 CPU state to sane values. */
- init_el2_state nvhe // Clobbers x0..x2
+ init_el2_state // Clobbers x0..x2
/* Enable MMU, set vectors and stack. */
mov x0, x28
- bl ___kvm_hyp_init // Clobbers x0..x3
+ bl ___kvm_hyp_init // Clobbers x0..x2
/* Leave idmap. */
mov x0, x29
ldr x1, =kvm_host_psci_cpu_entry
- kimg_hyp_va x1, x2
br x1
SYM_CODE_END(__kvm_hyp_init_cpu)
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
index a906f9e2ff34..f012f8665ecc 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -108,9 +108,9 @@ static void handle___vgic_v3_restore_aprs(struct kvm_cpu_context *host_ctxt)
typedef void (*hcall_t)(struct kvm_cpu_context *);
-#define HANDLE_FUNC(x) [__KVM_HOST_SMCCC_FUNC_##x] = kimg_fn_ptr(handle_##x)
+#define HANDLE_FUNC(x) [__KVM_HOST_SMCCC_FUNC_##x] = (hcall_t)handle_##x
-static const hcall_t *host_hcall[] = {
+static const hcall_t host_hcall[] = {
HANDLE_FUNC(__kvm_vcpu_run),
HANDLE_FUNC(__kvm_flush_vm_context),
HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa),
@@ -130,7 +130,6 @@ static const hcall_t *host_hcall[] = {
static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
{
DECLARE_REG(unsigned long, id, host_ctxt, 0);
- const hcall_t *kfn;
hcall_t hfn;
id -= KVM_HOST_SMCCC_ID(0);
@@ -138,13 +137,11 @@ static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
if (unlikely(id >= ARRAY_SIZE(host_hcall)))
goto inval;
- kfn = host_hcall[id];
- if (unlikely(!kfn))
+ hfn = host_hcall[id];
+ if (unlikely(!hfn))
goto inval;
cpu_reg(host_ctxt, 0) = SMCCC_RET_SUCCESS;
-
- hfn = kimg_fn_hyp_va(kfn);
hfn(host_ctxt);
return;
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-smp.c b/arch/arm64/kvm/hyp/nvhe/hyp-smp.c
index 2997aa156d8e..879559057dee 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-smp.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-smp.c
@@ -33,8 +33,8 @@ unsigned long __hyp_per_cpu_offset(unsigned int cpu)
if (cpu >= ARRAY_SIZE(kvm_arm_hyp_percpu_base))
hyp_panic();
- cpu_base_array = (unsigned long *)hyp_symbol_addr(kvm_arm_hyp_percpu_base);
+ cpu_base_array = (unsigned long *)&kvm_arm_hyp_percpu_base;
this_cpu_base = kern_hyp_va(cpu_base_array[cpu]);
- elf_base = (unsigned long)hyp_symbol_addr(__per_cpu_start);
+ elf_base = (unsigned long)&__per_cpu_start;
return this_cpu_base - elf_base;
}
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp.lds.S b/arch/arm64/kvm/hyp/nvhe/hyp.lds.S
index 1206d0d754d5..cd119d82d8e3 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp.lds.S
+++ b/arch/arm64/kvm/hyp/nvhe/hyp.lds.S
@@ -12,14 +12,17 @@
#include <asm/memory.h>
SECTIONS {
+ HYP_SECTION(.idmap.text)
HYP_SECTION(.text)
+ HYP_SECTION(.data..ro_after_init)
+ HYP_SECTION(.rodata)
+
/*
* .hyp..data..percpu needs to be page aligned to maintain the same
* alignment for when linking into vmlinux.
*/
. = ALIGN(PAGE_SIZE);
- HYP_SECTION_NAME(.data..percpu) : {
+ BEGIN_HYP_SECTION(.data..percpu)
PERCPU_INPUT(L1_CACHE_BYTES)
- }
- HYP_SECTION(.data..ro_after_init)
+ END_HYP_SECTION
}
diff --git a/arch/arm64/kvm/hyp/nvhe/psci-relay.c b/arch/arm64/kvm/hyp/nvhe/psci-relay.c
index e3947846ffcb..63de71c0481e 100644
--- a/arch/arm64/kvm/hyp/nvhe/psci-relay.c
+++ b/arch/arm64/kvm/hyp/nvhe/psci-relay.c
@@ -77,12 +77,6 @@ static unsigned long psci_forward(struct kvm_cpu_context *host_ctxt)
cpu_reg(host_ctxt, 2), cpu_reg(host_ctxt, 3));
}
-static __noreturn unsigned long psci_forward_noreturn(struct kvm_cpu_context *host_ctxt)
-{
- psci_forward(host_ctxt);
- hyp_panic(); /* unreachable */
-}
-
static unsigned int find_cpu_id(u64 mpidr)
{
unsigned int i;
@@ -134,8 +128,8 @@ static int psci_cpu_on(u64 func_id, struct kvm_cpu_context *host_ctxt)
if (cpu_id == INVALID_CPU_ID)
return PSCI_RET_INVALID_PARAMS;
- boot_args = per_cpu_ptr(hyp_symbol_addr(cpu_on_args), cpu_id);
- init_params = per_cpu_ptr(hyp_symbol_addr(kvm_init_params), cpu_id);
+ boot_args = per_cpu_ptr(&cpu_on_args, cpu_id);
+ init_params = per_cpu_ptr(&kvm_init_params, cpu_id);
/* Check if the target CPU is already being booted. */
if (!try_acquire_boot_args(boot_args))
@@ -146,7 +140,7 @@ static int psci_cpu_on(u64 func_id, struct kvm_cpu_context *host_ctxt)
wmb();
ret = psci_call(func_id, mpidr,
- __hyp_pa(hyp_symbol_addr(kvm_hyp_cpu_entry)),
+ __hyp_pa(&kvm_hyp_cpu_entry),
__hyp_pa(init_params));
/* If successful, the lock will be released by the target CPU. */
@@ -165,8 +159,8 @@ static int psci_cpu_suspend(u64 func_id, struct kvm_cpu_context *host_ctxt)
struct psci_boot_args *boot_args;
struct kvm_nvhe_init_params *init_params;
- boot_args = this_cpu_ptr(hyp_symbol_addr(suspend_args));
- init_params = this_cpu_ptr(hyp_symbol_addr(kvm_init_params));
+ boot_args = this_cpu_ptr(&suspend_args);
+ init_params = this_cpu_ptr(&kvm_init_params);
/*
* No need to acquire a lock before writing to boot_args because a core
@@ -180,7 +174,7 @@ static int psci_cpu_suspend(u64 func_id, struct kvm_cpu_context *host_ctxt)
* point if it is a deep sleep state.
*/
return psci_call(func_id, power_state,
- __hyp_pa(hyp_symbol_addr(kvm_hyp_cpu_resume)),
+ __hyp_pa(&kvm_hyp_cpu_resume),
__hyp_pa(init_params));
}
@@ -192,8 +186,8 @@ static int psci_system_suspend(u64 func_id, struct kvm_cpu_context *host_ctxt)
struct psci_boot_args *boot_args;
struct kvm_nvhe_init_params *init_params;
- boot_args = this_cpu_ptr(hyp_symbol_addr(suspend_args));
- init_params = this_cpu_ptr(hyp_symbol_addr(kvm_init_params));
+ boot_args = this_cpu_ptr(&suspend_args);
+ init_params = this_cpu_ptr(&kvm_init_params);
/*
* No need to acquire a lock before writing to boot_args because a core
@@ -204,7 +198,7 @@ static int psci_system_suspend(u64 func_id, struct kvm_cpu_context *host_ctxt)
/* Will only return on error. */
return psci_call(func_id,
- __hyp_pa(hyp_symbol_addr(kvm_hyp_cpu_resume)),
+ __hyp_pa(&kvm_hyp_cpu_resume),
__hyp_pa(init_params), 0);
}
@@ -213,12 +207,12 @@ asmlinkage void __noreturn kvm_host_psci_cpu_entry(bool is_cpu_on)
struct psci_boot_args *boot_args;
struct kvm_cpu_context *host_ctxt;
- host_ctxt = &this_cpu_ptr(hyp_symbol_addr(kvm_host_data))->host_ctxt;
+ host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
if (is_cpu_on)
- boot_args = this_cpu_ptr(hyp_symbol_addr(cpu_on_args));
+ boot_args = this_cpu_ptr(&cpu_on_args);
else
- boot_args = this_cpu_ptr(hyp_symbol_addr(suspend_args));
+ boot_args = this_cpu_ptr(&suspend_args);
cpu_reg(host_ctxt, 0) = boot_args->r0;
write_sysreg_el2(boot_args->pc, SYS_ELR);
@@ -251,10 +245,13 @@ static unsigned long psci_0_2_handler(u64 func_id, struct kvm_cpu_context *host_
case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
case PSCI_0_2_FN64_MIGRATE_INFO_UP_CPU:
return psci_forward(host_ctxt);
+ /*
+ * SYSTEM_OFF/RESET should not return according to the spec.
+ * Allow it so as to stay robust to broken firmware.
+ */
case PSCI_0_2_FN_SYSTEM_OFF:
case PSCI_0_2_FN_SYSTEM_RESET:
- psci_forward_noreturn(host_ctxt);
- unreachable();
+ return psci_forward(host_ctxt);
case PSCI_0_2_FN64_CPU_SUSPEND:
return psci_cpu_suspend(func_id, host_ctxt);
case PSCI_0_2_FN64_CPU_ON:
diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c
index bdf8e55ed308..4d177ce1d536 100644
--- a/arch/arm64/kvm/hyp/pgtable.c
+++ b/arch/arm64/kvm/hyp/pgtable.c
@@ -45,6 +45,10 @@
#define KVM_PTE_LEAF_ATTR_HI_S2_XN BIT(54)
+#define KVM_PTE_LEAF_ATTR_S2_PERMS (KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R | \
+ KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W | \
+ KVM_PTE_LEAF_ATTR_HI_S2_XN)
+
struct kvm_pgtable_walk_data {
struct kvm_pgtable *pgt;
struct kvm_pgtable_walker *walker;
@@ -170,10 +174,9 @@ static void kvm_set_table_pte(kvm_pte_t *ptep, kvm_pte_t *childp)
smp_store_release(ptep, pte);
}
-static bool kvm_set_valid_leaf_pte(kvm_pte_t *ptep, u64 pa, kvm_pte_t attr,
- u32 level)
+static kvm_pte_t kvm_init_valid_leaf_pte(u64 pa, kvm_pte_t attr, u32 level)
{
- kvm_pte_t old = *ptep, pte = kvm_phys_to_pte(pa);
+ kvm_pte_t pte = kvm_phys_to_pte(pa);
u64 type = (level == KVM_PGTABLE_MAX_LEVELS - 1) ? KVM_PTE_TYPE_PAGE :
KVM_PTE_TYPE_BLOCK;
@@ -181,12 +184,7 @@ static bool kvm_set_valid_leaf_pte(kvm_pte_t *ptep, u64 pa, kvm_pte_t attr,
pte |= FIELD_PREP(KVM_PTE_TYPE, type);
pte |= KVM_PTE_VALID;
- /* Tolerate KVM recreating the exact same mapping. */
- if (kvm_pte_valid(old))
- return old == pte;
-
- smp_store_release(ptep, pte);
- return true;
+ return pte;
}
static int kvm_pgtable_visitor_cb(struct kvm_pgtable_walk_data *data, u64 addr,
@@ -341,12 +339,17 @@ static int hyp_map_set_prot_attr(enum kvm_pgtable_prot prot,
static bool hyp_map_walker_try_leaf(u64 addr, u64 end, u32 level,
kvm_pte_t *ptep, struct hyp_map_data *data)
{
+ kvm_pte_t new, old = *ptep;
u64 granule = kvm_granule_size(level), phys = data->phys;
if (!kvm_block_mapping_supported(addr, end, phys, level))
return false;
- WARN_ON(!kvm_set_valid_leaf_pte(ptep, phys, data->attr, level));
+ /* Tolerate KVM recreating the exact same mapping */
+ new = kvm_init_valid_leaf_pte(phys, data->attr, level);
+ if (old != new && !WARN_ON(kvm_pte_valid(old)))
+ smp_store_release(ptep, new);
+
data->phys += granule;
return true;
}
@@ -461,34 +464,41 @@ static int stage2_map_set_prot_attr(enum kvm_pgtable_prot prot,
return 0;
}
-static bool stage2_map_walker_try_leaf(u64 addr, u64 end, u32 level,
- kvm_pte_t *ptep,
- struct stage2_map_data *data)
+static int stage2_map_walker_try_leaf(u64 addr, u64 end, u32 level,
+ kvm_pte_t *ptep,
+ struct stage2_map_data *data)
{
+ kvm_pte_t new, old = *ptep;
u64 granule = kvm_granule_size(level), phys = data->phys;
+ struct page *page = virt_to_page(ptep);
if (!kvm_block_mapping_supported(addr, end, phys, level))
- return false;
-
- /*
- * If the PTE was already valid, drop the refcount on the table
- * early, as it will be bumped-up again in stage2_map_walk_leaf().
- * This ensures that the refcount stays constant across a valid to
- * valid PTE update.
- */
- if (kvm_pte_valid(*ptep))
- put_page(virt_to_page(ptep));
-
- if (kvm_set_valid_leaf_pte(ptep, phys, data->attr, level))
- goto out;
+ return -E2BIG;
+
+ new = kvm_init_valid_leaf_pte(phys, data->attr, level);
+ if (kvm_pte_valid(old)) {
+ /*
+ * Skip updating the PTE if we are trying to recreate the exact
+ * same mapping or only change the access permissions. Instead,
+ * the vCPU will exit one more time from guest if still needed
+ * and then go through the path of relaxing permissions.
+ */
+ if (!((old ^ new) & (~KVM_PTE_LEAF_ATTR_S2_PERMS)))
+ return -EAGAIN;
+
+ /*
+ * There's an existing different valid leaf entry, so perform
+ * break-before-make.
+ */
+ kvm_set_invalid_pte(ptep);
+ kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, data->mmu, addr, level);
+ put_page(page);
+ }
- /* There's an existing valid leaf entry, so perform break-before-make */
- kvm_set_invalid_pte(ptep);
- kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, data->mmu, addr, level);
- kvm_set_valid_leaf_pte(ptep, phys, data->attr, level);
-out:
+ smp_store_release(ptep, new);
+ get_page(page);
data->phys += granule;
- return true;
+ return 0;
}
static int stage2_map_walk_table_pre(u64 addr, u64 end, u32 level,
@@ -516,6 +526,7 @@ static int stage2_map_walk_table_pre(u64 addr, u64 end, u32 level,
static int stage2_map_walk_leaf(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
struct stage2_map_data *data)
{
+ int ret;
kvm_pte_t *childp, pte = *ptep;
struct page *page = virt_to_page(ptep);
@@ -526,8 +537,9 @@ static int stage2_map_walk_leaf(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
return 0;
}
- if (stage2_map_walker_try_leaf(addr, end, level, ptep, data))
- goto out_get_page;
+ ret = stage2_map_walker_try_leaf(addr, end, level, ptep, data);
+ if (ret != -E2BIG)
+ return ret;
if (WARN_ON(level == KVM_PGTABLE_MAX_LEVELS - 1))
return -EINVAL;
@@ -551,9 +563,8 @@ static int stage2_map_walk_leaf(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
}
kvm_set_table_pte(ptep, childp);
-
-out_get_page:
get_page(page);
+
return 0;
}
diff --git a/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c b/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c
index 8f0585640241..87a54375bd6e 100644
--- a/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c
+++ b/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c
@@ -64,7 +64,7 @@ int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu)
}
rd = kvm_vcpu_dabt_get_rd(vcpu);
- addr = hyp_symbol_addr(kvm_vgic_global_state)->vcpu_hyp_va;
+ addr = kvm_vgic_global_state.vcpu_hyp_va;
addr += fault_ipa - vgic->vgic_cpu_base;
if (kvm_vcpu_dabt_iswrite(vcpu)) {
diff --git a/arch/arm64/kvm/hypercalls.c b/arch/arm64/kvm/hypercalls.c
index 25ea4ecb6449..ead21b98b620 100644
--- a/arch/arm64/kvm/hypercalls.c
+++ b/arch/arm64/kvm/hypercalls.c
@@ -71,6 +71,12 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
if (gpa != GPA_INVALID)
val = gpa;
break;
+ case ARM_SMCCC_TRNG_VERSION:
+ case ARM_SMCCC_TRNG_FEATURES:
+ case ARM_SMCCC_TRNG_GET_UUID:
+ case ARM_SMCCC_TRNG_RND32:
+ case ARM_SMCCC_TRNG_RND64:
+ return kvm_trng_call(vcpu);
default:
return kvm_psci_call(vcpu);
}
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 7d2257cc5438..77cb2d28f2a4 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -879,11 +879,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if (vma_pagesize == PAGE_SIZE && !force_pte)
vma_pagesize = transparent_hugepage_adjust(memslot, hva,
&pfn, &fault_ipa);
- if (writable) {
+ if (writable)
prot |= KVM_PGTABLE_PROT_W;
- kvm_set_pfn_dirty(pfn);
- mark_page_dirty(kvm, gfn);
- }
if (fault_status != FSC_PERM && !device)
clean_dcache_guest_page(pfn, vma_pagesize);
@@ -911,11 +908,17 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
memcache);
}
+ /* Mark the page dirty only if the fault is handled successfully */
+ if (writable && !ret) {
+ kvm_set_pfn_dirty(pfn);
+ mark_page_dirty(kvm, gfn);
+ }
+
out_unlock:
spin_unlock(&kvm->mmu_lock);
kvm_set_pfn_accessed(pfn);
kvm_release_pfn_clean(pfn);
- return ret;
+ return ret != -EAGAIN ? ret : 0;
}
/* Resolve the access fault by making the page young again. */
diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
index 4ad66a532e38..e9ec08b0b070 100644
--- a/arch/arm64/kvm/pmu-emul.c
+++ b/arch/arm64/kvm/pmu-emul.c
@@ -23,11 +23,11 @@ static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc);
static u32 kvm_pmu_event_mask(struct kvm *kvm)
{
switch (kvm->arch.pmuver) {
- case 1: /* ARMv8.0 */
+ case ID_AA64DFR0_PMUVER_8_0:
return GENMASK(9, 0);
- case 4: /* ARMv8.1 */
- case 5: /* ARMv8.4 */
- case 6: /* ARMv8.5 */
+ case ID_AA64DFR0_PMUVER_8_1:
+ case ID_AA64DFR0_PMUVER_8_4:
+ case ID_AA64DFR0_PMUVER_8_5:
return GENMASK(15, 0);
default: /* Shouldn't be here, just for sanity */
WARN_ONCE(1, "Unknown PMU version %d\n", kvm->arch.pmuver);
@@ -788,26 +788,36 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
{
unsigned long *bmap = vcpu->kvm->arch.pmu_filter;
u64 val, mask = 0;
- int base, i;
+ int base, i, nr_events;
if (!pmceid1) {
val = read_sysreg(pmceid0_el0);
base = 0;
} else {
val = read_sysreg(pmceid1_el0);
+ /*
+ * Don't advertise STALL_SLOT, as PMMIR_EL0 is handled
+ * as RAZ
+ */
+ if (vcpu->kvm->arch.pmuver >= ID_AA64DFR0_PMUVER_8_4)
+ val &= ~BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32);
base = 32;
}
if (!bmap)
return val;
+ nr_events = kvm_pmu_event_mask(vcpu->kvm) + 1;
+
for (i = 0; i < 32; i += 8) {
u64 byte;
byte = bitmap_get_value8(bmap, base + i);
mask |= byte << i;
- byte = bitmap_get_value8(bmap, 0x4000 + base + i);
- mask |= byte << (32 + i);
+ if (nr_events >= (0x4000 + base + 32)) {
+ byte = bitmap_get_value8(bmap, 0x4000 + base + i);
+ mask |= byte << (32 + i);
+ }
}
return val & mask;
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 42ccc27fb684..4f2f1e3145de 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -9,6 +9,7 @@
* Christoffer Dall <c.dall@virtualopensystems.com>
*/
+#include <linux/bitfield.h>
#include <linux/bsearch.h>
#include <linux/kvm_host.h>
#include <linux/mm.h>
@@ -43,6 +44,10 @@
* 64bit interface.
*/
+#define reg_to_encoding(x) \
+ sys_reg((u32)(x)->Op0, (u32)(x)->Op1, \
+ (u32)(x)->CRn, (u32)(x)->CRm, (u32)(x)->Op2)
+
static bool read_from_write_only(struct kvm_vcpu *vcpu,
struct sys_reg_params *params,
const struct sys_reg_desc *r)
@@ -273,8 +278,7 @@ static bool trap_loregion(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *r)
{
u64 val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
- u32 sr = sys_reg((u32)r->Op0, (u32)r->Op1,
- (u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
+ u32 sr = reg_to_encoding(r);
if (!(val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))) {
kvm_inject_undefined(vcpu);
@@ -590,6 +594,15 @@ static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
vcpu_write_sys_reg(vcpu, (1ULL << 31) | mpidr, MPIDR_EL1);
}
+static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *r)
+{
+ if (kvm_vcpu_has_pmu(vcpu))
+ return 0;
+
+ return REG_HIDDEN;
+}
+
static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
{
u64 pmcr, val;
@@ -613,9 +626,8 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
{
u64 reg = __vcpu_sys_reg(vcpu, PMUSERENR_EL0);
- bool enabled = kvm_vcpu_has_pmu(vcpu);
+ bool enabled = (reg & flags) || vcpu_mode_priv(vcpu);
- enabled &= (reg & flags) || vcpu_mode_priv(vcpu);
if (!enabled)
kvm_inject_undefined(vcpu);
@@ -689,14 +701,18 @@ static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
- u64 pmceid;
+ u64 pmceid, mask, shift;
BUG_ON(p->is_write);
if (pmu_access_el0_disabled(vcpu))
return false;
+ get_access_mask(r, &mask, &shift);
+
pmceid = kvm_pmu_get_pmceid(vcpu, (p->Op2 & 1));
+ pmceid &= mask;
+ pmceid >>= shift;
p->regval = pmceid;
@@ -900,11 +916,6 @@ static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
- if (!kvm_vcpu_has_pmu(vcpu)) {
- kvm_inject_undefined(vcpu);
- return false;
- }
-
if (p->is_write) {
if (!vcpu_mode_priv(vcpu)) {
kvm_inject_undefined(vcpu);
@@ -921,10 +932,6 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
return true;
}
-#define reg_to_encoding(x) \
- sys_reg((u32)(x)->Op0, (u32)(x)->Op1, \
- (u32)(x)->CRn, (u32)(x)->CRm, (u32)(x)->Op2)
-
/* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
#define DBG_BCR_BVR_WCR_WVR_EL1(n) \
{ SYS_DESC(SYS_DBGBVRn_EL1(n)), \
@@ -936,15 +943,18 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
{ SYS_DESC(SYS_DBGWCRn_EL1(n)), \
trap_wcr, reset_wcr, 0, 0, get_wcr, set_wcr }
+#define PMU_SYS_REG(r) \
+ SYS_DESC(r), .reset = reset_unknown, .visibility = pmu_visibility
+
/* Macro to expand the PMEVCNTRn_EL0 register */
#define PMU_PMEVCNTR_EL0(n) \
- { SYS_DESC(SYS_PMEVCNTRn_EL0(n)), \
- access_pmu_evcntr, reset_unknown, (PMEVCNTR0_EL0 + n), }
+ { PMU_SYS_REG(SYS_PMEVCNTRn_EL0(n)), \
+ .access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), }
/* Macro to expand the PMEVTYPERn_EL0 register */
#define PMU_PMEVTYPER_EL0(n) \
- { SYS_DESC(SYS_PMEVTYPERn_EL0(n)), \
- access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), }
+ { PMU_SYS_REG(SYS_PMEVTYPERn_EL0(n)), \
+ .access = access_pmu_evtyper, .reg = (PMEVTYPER0_EL0 + n), }
static bool undef_access(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
const struct sys_reg_desc *r)
@@ -1016,44 +1026,50 @@ static bool access_arch_timer(struct kvm_vcpu *vcpu,
return true;
}
+#define FEATURE(x) (GENMASK_ULL(x##_SHIFT + 3, x##_SHIFT))
+
/* Read a sanitised cpufeature ID register by sys_reg_desc */
static u64 read_id_reg(const struct kvm_vcpu *vcpu,
struct sys_reg_desc const *r, bool raz)
{
- u32 id = sys_reg((u32)r->Op0, (u32)r->Op1,
- (u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
+ u32 id = reg_to_encoding(r);
u64 val = raz ? 0 : read_sanitised_ftr_reg(id);
- if (id == SYS_ID_AA64PFR0_EL1) {
+ switch (id) {
+ case SYS_ID_AA64PFR0_EL1:
if (!vcpu_has_sve(vcpu))
- val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT);
- val &= ~(0xfUL << ID_AA64PFR0_AMU_SHIFT);
- val &= ~(0xfUL << ID_AA64PFR0_CSV2_SHIFT);
- val |= ((u64)vcpu->kvm->arch.pfr0_csv2 << ID_AA64PFR0_CSV2_SHIFT);
- val &= ~(0xfUL << ID_AA64PFR0_CSV3_SHIFT);
- val |= ((u64)vcpu->kvm->arch.pfr0_csv3 << ID_AA64PFR0_CSV3_SHIFT);
- } else if (id == SYS_ID_AA64PFR1_EL1) {
- val &= ~(0xfUL << ID_AA64PFR1_MTE_SHIFT);
- } else if (id == SYS_ID_AA64ISAR1_EL1 && !vcpu_has_ptrauth(vcpu)) {
- val &= ~((0xfUL << ID_AA64ISAR1_APA_SHIFT) |
- (0xfUL << ID_AA64ISAR1_API_SHIFT) |
- (0xfUL << ID_AA64ISAR1_GPA_SHIFT) |
- (0xfUL << ID_AA64ISAR1_GPI_SHIFT));
- } else if (id == SYS_ID_AA64DFR0_EL1) {
- u64 cap = 0;
-
- /* Limit guests to PMUv3 for ARMv8.1 */
- if (kvm_vcpu_has_pmu(vcpu))
- cap = ID_AA64DFR0_PMUVER_8_1;
-
+ val &= ~FEATURE(ID_AA64PFR0_SVE);
+ val &= ~FEATURE(ID_AA64PFR0_AMU);
+ val &= ~FEATURE(ID_AA64PFR0_CSV2);
+ val |= FIELD_PREP(FEATURE(ID_AA64PFR0_CSV2), (u64)vcpu->kvm->arch.pfr0_csv2);
+ val &= ~FEATURE(ID_AA64PFR0_CSV3);
+ val |= FIELD_PREP(FEATURE(ID_AA64PFR0_CSV3), (u64)vcpu->kvm->arch.pfr0_csv3);
+ break;
+ case SYS_ID_AA64PFR1_EL1:
+ val &= ~FEATURE(ID_AA64PFR1_MTE);
+ break;
+ case SYS_ID_AA64ISAR1_EL1:
+ if (!vcpu_has_ptrauth(vcpu))
+ val &= ~(FEATURE(ID_AA64ISAR1_APA) |
+ FEATURE(ID_AA64ISAR1_API) |
+ FEATURE(ID_AA64ISAR1_GPA) |
+ FEATURE(ID_AA64ISAR1_GPI));
+ break;
+ case SYS_ID_AA64DFR0_EL1:
+ /* Limit debug to ARMv8.0 */
+ val &= ~FEATURE(ID_AA64DFR0_DEBUGVER);
+ val |= FIELD_PREP(FEATURE(ID_AA64DFR0_DEBUGVER), 6);
+ /* Limit guests to PMUv3 for ARMv8.4 */
val = cpuid_feature_cap_perfmon_field(val,
- ID_AA64DFR0_PMUVER_SHIFT,
- cap);
- } else if (id == SYS_ID_DFR0_EL1) {
- /* Limit guests to PMUv3 for ARMv8.1 */
+ ID_AA64DFR0_PMUVER_SHIFT,
+ kvm_vcpu_has_pmu(vcpu) ? ID_AA64DFR0_PMUVER_8_4 : 0);
+ break;
+ case SYS_ID_DFR0_EL1:
+ /* Limit guests to PMUv3 for ARMv8.4 */
val = cpuid_feature_cap_perfmon_field(val,
- ID_DFR0_PERFMON_SHIFT,
- ID_DFR0_PERFMON_8_1);
+ ID_DFR0_PERFMON_SHIFT,
+ kvm_vcpu_has_pmu(vcpu) ? ID_DFR0_PERFMON_8_4 : 0);
+ break;
}
return val;
@@ -1062,8 +1078,7 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,
static unsigned int id_visibility(const struct kvm_vcpu *vcpu,
const struct sys_reg_desc *r)
{
- u32 id = sys_reg((u32)r->Op0, (u32)r->Op1,
- (u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
+ u32 id = reg_to_encoding(r);
switch (id) {
case SYS_ID_AA64ZFR0_EL1:
@@ -1486,8 +1501,11 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 },
{ SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
- { SYS_DESC(SYS_PMINTENSET_EL1), access_pminten, reset_unknown, PMINTENSET_EL1 },
- { SYS_DESC(SYS_PMINTENCLR_EL1), access_pminten, reset_unknown, PMINTENSET_EL1 },
+ { PMU_SYS_REG(SYS_PMINTENSET_EL1),
+ .access = access_pminten, .reg = PMINTENSET_EL1 },
+ { PMU_SYS_REG(SYS_PMINTENCLR_EL1),
+ .access = access_pminten, .reg = PMINTENSET_EL1 },
+ { SYS_DESC(SYS_PMMIR_EL1), trap_raz_wi },
{ SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
{ SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
@@ -1526,23 +1544,36 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 },
{ SYS_DESC(SYS_CTR_EL0), access_ctr },
- { SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, PMCR_EL0 },
- { SYS_DESC(SYS_PMCNTENSET_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
- { SYS_DESC(SYS_PMCNTENCLR_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
- { SYS_DESC(SYS_PMOVSCLR_EL0), access_pmovs, reset_unknown, PMOVSSET_EL0 },
- { SYS_DESC(SYS_PMSWINC_EL0), access_pmswinc, reset_unknown, PMSWINC_EL0 },
- { SYS_DESC(SYS_PMSELR_EL0), access_pmselr, reset_unknown, PMSELR_EL0 },
- { SYS_DESC(SYS_PMCEID0_EL0), access_pmceid },
- { SYS_DESC(SYS_PMCEID1_EL0), access_pmceid },
- { SYS_DESC(SYS_PMCCNTR_EL0), access_pmu_evcntr, reset_unknown, PMCCNTR_EL0 },
- { SYS_DESC(SYS_PMXEVTYPER_EL0), access_pmu_evtyper },
- { SYS_DESC(SYS_PMXEVCNTR_EL0), access_pmu_evcntr },
+ { PMU_SYS_REG(SYS_PMCR_EL0), .access = access_pmcr,
+ .reset = reset_pmcr, .reg = PMCR_EL0 },
+ { PMU_SYS_REG(SYS_PMCNTENSET_EL0),
+ .access = access_pmcnten, .reg = PMCNTENSET_EL0 },
+ { PMU_SYS_REG(SYS_PMCNTENCLR_EL0),
+ .access = access_pmcnten, .reg = PMCNTENSET_EL0 },
+ { PMU_SYS_REG(SYS_PMOVSCLR_EL0),
+ .access = access_pmovs, .reg = PMOVSSET_EL0 },
+ { PMU_SYS_REG(SYS_PMSWINC_EL0),
+ .access = access_pmswinc, .reg = PMSWINC_EL0 },
+ { PMU_SYS_REG(SYS_PMSELR_EL0),
+ .access = access_pmselr, .reg = PMSELR_EL0 },
+ { PMU_SYS_REG(SYS_PMCEID0_EL0),
+ .access = access_pmceid, .reset = NULL },
+ { PMU_SYS_REG(SYS_PMCEID1_EL0),
+ .access = access_pmceid, .reset = NULL },
+ { PMU_SYS_REG(SYS_PMCCNTR_EL0),
+ .access = access_pmu_evcntr, .reg = PMCCNTR_EL0 },
+ { PMU_SYS_REG(SYS_PMXEVTYPER_EL0),
+ .access = access_pmu_evtyper, .reset = NULL },
+ { PMU_SYS_REG(SYS_PMXEVCNTR_EL0),
+ .access = access_pmu_evcntr, .reset = NULL },
/*
* PMUSERENR_EL0 resets as unknown in 64bit mode while it resets as zero
* in 32bit mode. Here we choose to reset it as zero for consistency.
*/
- { SYS_DESC(SYS_PMUSERENR_EL0), access_pmuserenr, reset_val, PMUSERENR_EL0, 0 },
- { SYS_DESC(SYS_PMOVSSET_EL0), access_pmovs, reset_unknown, PMOVSSET_EL0 },
+ { PMU_SYS_REG(SYS_PMUSERENR_EL0), .access = access_pmuserenr,
+ .reset = reset_val, .reg = PMUSERENR_EL0, .val = 0 },
+ { PMU_SYS_REG(SYS_PMOVSSET_EL0),
+ .access = access_pmovs, .reg = PMOVSSET_EL0 },
{ SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
{ SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
@@ -1694,14 +1725,15 @@ static const struct sys_reg_desc sys_reg_descs[] = {
* PMCCFILTR_EL0 resets as unknown in 64bit mode while it resets as zero
* in 32bit mode. Here we choose to reset it as zero for consistency.
*/
- { SYS_DESC(SYS_PMCCFILTR_EL0), access_pmu_evtyper, reset_val, PMCCFILTR_EL0, 0 },
+ { PMU_SYS_REG(SYS_PMCCFILTR_EL0), .access = access_pmu_evtyper,
+ .reset = reset_val, .reg = PMCCFILTR_EL0, .val = 0 },
{ SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 },
{ SYS_DESC(SYS_IFSR32_EL2), NULL, reset_unknown, IFSR32_EL2 },
{ SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x700 },
};
-static bool trap_dbgidr(struct kvm_vcpu *vcpu,
+static bool trap_dbgdidr(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
@@ -1715,7 +1747,7 @@ static bool trap_dbgidr(struct kvm_vcpu *vcpu,
p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) |
(((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) |
(((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20)
- | (6 << 16) | (el3 << 14) | (el3 << 12));
+ | (6 << 16) | (1 << 15) | (el3 << 14) | (el3 << 12));
return true;
}
}
@@ -1748,8 +1780,8 @@ static bool trap_dbgidr(struct kvm_vcpu *vcpu,
* guest. Revisit this one day, would this principle change.
*/
static const struct sys_reg_desc cp14_regs[] = {
- /* DBGIDR */
- { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgidr },
+ /* DBGDIDR */
+ { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgdidr },
/* DBGDTRRXext */
{ Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi },
@@ -1899,8 +1931,8 @@ static const struct sys_reg_desc cp15_regs[] = {
{ Op1( 0), CRn( 9), CRm(12), Op2( 3), access_pmovs },
{ Op1( 0), CRn( 9), CRm(12), Op2( 4), access_pmswinc },
{ Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr },
- { Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid },
- { Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid },
+ { AA32(LO), Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid },
+ { AA32(LO), Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid },
{ Op1( 0), CRn( 9), CRm(13), Op2( 0), access_pmu_evcntr },
{ Op1( 0), CRn( 9), CRm(13), Op2( 1), access_pmu_evtyper },
{ Op1( 0), CRn( 9), CRm(13), Op2( 2), access_pmu_evcntr },
@@ -1908,6 +1940,10 @@ static const struct sys_reg_desc cp15_regs[] = {
{ Op1( 0), CRn( 9), CRm(14), Op2( 1), access_pminten },
{ Op1( 0), CRn( 9), CRm(14), Op2( 2), access_pminten },
{ Op1( 0), CRn( 9), CRm(14), Op2( 3), access_pmovs },
+ { AA32(HI), Op1( 0), CRn( 9), CRm(14), Op2( 4), access_pmceid },
+ { AA32(HI), Op1( 0), CRn( 9), CRm(14), Op2( 5), access_pmceid },
+ /* PMMIR */
+ { Op1( 0), CRn( 9), CRm(14), Op2( 6), trap_raz_wi },
/* PRRR/MAIR0 */
{ AA32(LO), Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, MAIR_EL1 },
diff --git a/arch/arm64/kvm/trng.c b/arch/arm64/kvm/trng.c
new file mode 100644
index 000000000000..99bdd7103c9c
--- /dev/null
+++ b/arch/arm64/kvm/trng.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2020 Arm Ltd.
+
+#include <linux/arm-smccc.h>
+#include <linux/kvm_host.h>
+
+#include <asm/kvm_emulate.h>
+
+#include <kvm/arm_hypercalls.h>
+
+#define ARM_SMCCC_TRNG_VERSION_1_0 0x10000UL
+
+/* Those values are deliberately separate from the generic SMCCC definitions. */
+#define TRNG_SUCCESS 0UL
+#define TRNG_NOT_SUPPORTED ((unsigned long)-1)
+#define TRNG_INVALID_PARAMETER ((unsigned long)-2)
+#define TRNG_NO_ENTROPY ((unsigned long)-3)
+
+#define TRNG_MAX_BITS64 192
+
+static const uuid_t arm_smc_trng_uuid __aligned(4) = UUID_INIT(
+ 0x0d21e000, 0x4384, 0x11eb, 0x80, 0x70, 0x52, 0x44, 0x55, 0x4e, 0x5a, 0x4c);
+
+static int kvm_trng_do_rnd(struct kvm_vcpu *vcpu, int size)
+{
+ DECLARE_BITMAP(bits, TRNG_MAX_BITS64);
+ u32 num_bits = smccc_get_arg1(vcpu);
+ int i;
+
+ if (num_bits > 3 * size) {
+ smccc_set_retval(vcpu, TRNG_INVALID_PARAMETER, 0, 0, 0);
+ return 1;
+ }
+
+ /* get as many bits as we need to fulfil the request */
+ for (i = 0; i < DIV_ROUND_UP(num_bits, BITS_PER_LONG); i++)
+ bits[i] = get_random_long();
+
+ bitmap_clear(bits, num_bits, TRNG_MAX_BITS64 - num_bits);
+
+ if (size == 32)
+ smccc_set_retval(vcpu, TRNG_SUCCESS, lower_32_bits(bits[1]),
+ upper_32_bits(bits[0]), lower_32_bits(bits[0]));
+ else
+ smccc_set_retval(vcpu, TRNG_SUCCESS, bits[2], bits[1], bits[0]);
+
+ memzero_explicit(bits, sizeof(bits));
+ return 1;
+}
+
+int kvm_trng_call(struct kvm_vcpu *vcpu)
+{
+ const __le32 *u = (__le32 *)arm_smc_trng_uuid.b;
+ u32 func_id = smccc_get_function(vcpu);
+ unsigned long val = TRNG_NOT_SUPPORTED;
+ int size = 64;
+
+ switch (func_id) {
+ case ARM_SMCCC_TRNG_VERSION:
+ val = ARM_SMCCC_TRNG_VERSION_1_0;
+ break;
+ case ARM_SMCCC_TRNG_FEATURES:
+ switch (smccc_get_arg1(vcpu)) {
+ case ARM_SMCCC_TRNG_VERSION:
+ case ARM_SMCCC_TRNG_FEATURES:
+ case ARM_SMCCC_TRNG_GET_UUID:
+ case ARM_SMCCC_TRNG_RND32:
+ case ARM_SMCCC_TRNG_RND64:
+ val = TRNG_SUCCESS;
+ }
+ break;
+ case ARM_SMCCC_TRNG_GET_UUID:
+ smccc_set_retval(vcpu, le32_to_cpu(u[0]), le32_to_cpu(u[1]),
+ le32_to_cpu(u[2]), le32_to_cpu(u[3]));
+ return 1;
+ case ARM_SMCCC_TRNG_RND32:
+ size = 32;
+ fallthrough;
+ case ARM_SMCCC_TRNG_RND64:
+ return kvm_trng_do_rnd(vcpu, size);
+ }
+
+ smccc_set_retval(vcpu, val, 0, 0, 0);
+ return 1;
+}
diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c
index 70fcd6a12fe1..978301392d67 100644
--- a/arch/arm64/kvm/va_layout.c
+++ b/arch/arm64/kvm/va_layout.c
@@ -81,6 +81,34 @@ __init void kvm_compute_layout(void)
init_hyp_physvirt_offset();
}
+/*
+ * The .hyp.reloc ELF section contains a list of kimg positions that
+ * contains kimg VAs but will be accessed only in hyp execution context.
+ * Convert them to hyp VAs. See gen-hyprel.c for more details.
+ */
+__init void kvm_apply_hyp_relocations(void)
+{
+ int32_t *rel;
+ int32_t *begin = (int32_t *)__hyp_reloc_begin;
+ int32_t *end = (int32_t *)__hyp_reloc_end;
+
+ for (rel = begin; rel < end; ++rel) {
+ uintptr_t *ptr, kimg_va;
+
+ /*
+ * Each entry contains a 32-bit relative offset from itself
+ * to a kimg VA position.
+ */
+ ptr = (uintptr_t *)lm_alias((char *)rel + *rel);
+
+ /* Read the kimg VA value at the relocation address. */
+ kimg_va = *ptr;
+
+ /* Convert to hyp VA and store back to the relocation address. */
+ *ptr = __early_kern_hyp_va((uintptr_t)lm_alias(kimg_va));
+ }
+}
+
static u32 compute_instruction(int n, u32 rd, u32 rn)
{
u32 insn = AARCH64_BREAK_FAULT;
@@ -255,12 +283,6 @@ static void generate_mov_q(u64 val, __le32 *origptr, __le32 *updptr, int nr_inst
*updptr++ = cpu_to_le32(insn);
}
-void kvm_update_kimg_phys_offset(struct alt_instr *alt,
- __le32 *origptr, __le32 *updptr, int nr_inst)
-{
- generate_mov_q(kimage_voffset + PHYS_OFFSET, origptr, updptr, nr_inst);
-}
-
void kvm_get_kimage_voffset(struct alt_instr *alt,
__le32 *origptr, __le32 *updptr, int nr_inst)
{
diff --git a/arch/arm64/lib/mte.S b/arch/arm64/lib/mte.S
index 9e1a12e10053..351537c12f36 100644
--- a/arch/arm64/lib/mte.S
+++ b/arch/arm64/lib/mte.S
@@ -149,19 +149,3 @@ SYM_FUNC_START(mte_restore_page_tags)
ret
SYM_FUNC_END(mte_restore_page_tags)
-
-/*
- * Assign allocation tags for a region of memory based on the pointer tag
- * x0 - source pointer
- * x1 - size
- *
- * Note: The address must be non-NULL and MTE_GRANULE_SIZE aligned and
- * size must be non-zero and MTE_GRANULE_SIZE aligned.
- */
-SYM_FUNC_START(mte_assign_mem_tag_range)
-1: stg x0, [x0]
- add x0, x0, #MTE_GRANULE_SIZE
- subs x1, x1, #MTE_GRANULE_SIZE
- b.gt 1b
- ret
-SYM_FUNC_END(mte_assign_mem_tag_range)
diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile
index 5ead3c3de3b6..f188c9092696 100644
--- a/arch/arm64/mm/Makefile
+++ b/arch/arm64/mm/Makefile
@@ -6,7 +6,7 @@ obj-y := dma-mapping.o extable.o fault.o init.o \
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_PTDUMP_CORE) += ptdump.o
obj-$(CONFIG_PTDUMP_DEBUGFS) += ptdump_debugfs.o
-obj-$(CONFIG_NUMA) += numa.o
+obj-$(CONFIG_TRANS_TABLE) += trans_pgd.o
obj-$(CONFIG_DEBUG_VIRTUAL) += physaddr.o
obj-$(CONFIG_ARM64_MTE) += mteswap.o
KASAN_SANITIZE_physaddr.o += n
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 35d75c60e2b8..f37d4e3830b7 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -10,6 +10,7 @@
#include <linux/acpi.h>
#include <linux/bitfield.h>
#include <linux/extable.h>
+#include <linux/kfence.h>
#include <linux/signal.h>
#include <linux/mm.h>
#include <linux/hardirq.h>
@@ -302,12 +303,24 @@ static void die_kernel_fault(const char *msg, unsigned long addr,
static void report_tag_fault(unsigned long addr, unsigned int esr,
struct pt_regs *regs)
{
- bool is_write = ((esr & ESR_ELx_WNR) >> ESR_ELx_WNR_SHIFT) != 0;
+ static bool reported;
+ bool is_write;
+
+ if (READ_ONCE(reported))
+ return;
+
+ /*
+ * This is used for KASAN tests and assumes that no MTE faults
+ * happened before running the tests.
+ */
+ if (mte_report_once())
+ WRITE_ONCE(reported, true);
/*
* SAS bits aren't set for all faults reported in EL1, so we can't
* find out access size.
*/
+ is_write = !!(esr & ESR_ELx_WNR);
kasan_report(addr, 0, is_write, regs->pc);
}
#else
@@ -319,12 +332,8 @@ static inline void report_tag_fault(unsigned long addr, unsigned int esr,
static void do_tag_recovery(unsigned long addr, unsigned int esr,
struct pt_regs *regs)
{
- static bool reported;
- if (!READ_ONCE(reported)) {
- report_tag_fault(addr, esr, regs);
- WRITE_ONCE(reported, true);
- }
+ report_tag_fault(addr, esr, regs);
/*
* Disable MTE Tag Checking on the local CPU for the current EL.
@@ -381,6 +390,9 @@ static void __do_kernel_fault(unsigned long addr, unsigned int esr,
} else if (addr < PAGE_SIZE) {
msg = "NULL pointer dereference";
} else {
+ if (kfence_handle_page_fault(addr, esr & ESR_ELx_WNR, regs))
+ return;
+
msg = "paging request";
}
@@ -564,7 +576,7 @@ retry:
mmap_read_lock(mm);
} else {
/*
- * The above down_read_trylock() might have succeeded in which
+ * The above mmap_read_trylock() might have succeeded in which
* case, we'll have missed the might_sleep() from down_read().
*/
might_sleep();
@@ -875,44 +887,12 @@ static void debug_exception_exit(struct pt_regs *regs)
}
NOKPROBE_SYMBOL(debug_exception_exit);
-#ifdef CONFIG_ARM64_ERRATUM_1463225
-DECLARE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
-
-static int cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
-{
- if (user_mode(regs))
- return 0;
-
- if (!__this_cpu_read(__in_cortex_a76_erratum_1463225_wa))
- return 0;
-
- /*
- * We've taken a dummy step exception from the kernel to ensure
- * that interrupts are re-enabled on the syscall path. Return back
- * to cortex_a76_erratum_1463225_svc_handler() with debug exceptions
- * masked so that we can safely restore the mdscr and get on with
- * handling the syscall.
- */
- regs->pstate |= PSR_D_BIT;
- return 1;
-}
-#else
-static int cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
-{
- return 0;
-}
-#endif /* CONFIG_ARM64_ERRATUM_1463225 */
-NOKPROBE_SYMBOL(cortex_a76_erratum_1463225_debug_handler);
-
void do_debug_exception(unsigned long addr_if_watchpoint, unsigned int esr,
struct pt_regs *regs)
{
const struct fault_info *inf = esr_to_debug_fault_info(esr);
unsigned long pc = instruction_pointer(regs);
- if (cortex_a76_erratum_1463225_debug_handler(regs))
- return;
-
debug_exception_enter(regs);
if (user_mode(regs) && !is_ttbr0_addr(pc))
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 709d98fea90c..0ace5e68efba 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -416,10 +416,10 @@ void __init bootmem_init(void)
max_pfn = max_low_pfn = max;
min_low_pfn = min;
- arm64_numa_init();
+ arch_numa_init();
/*
- * must be done after arm64_numa_init() which calls numa_init() to
+ * must be done after arch_numa_init() which calls numa_init() to
* initialize node_online_map that gets used in hugetlb_cma_reserve()
* while allocating required CMA size across online nodes.
*/
diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c
index 07937b49cb88..a38f54cd638c 100644
--- a/arch/arm64/mm/mmap.c
+++ b/arch/arm64/mm/mmap.c
@@ -5,20 +5,11 @@
* Copyright (C) 2012 ARM Ltd.
*/
-#include <linux/elf.h>
-#include <linux/fs.h>
-#include <linux/memblock.h>
-#include <linux/mm.h>
-#include <linux/mman.h>
-#include <linux/export.h>
-#include <linux/shm.h>
-#include <linux/sched/signal.h>
-#include <linux/sched/mm.h>
#include <linux/io.h>
-#include <linux/personality.h>
-#include <linux/random.h>
+#include <linux/memblock.h>
+#include <linux/types.h>
-#include <asm/cputype.h>
+#include <asm/page.h>
/*
* You really shouldn't be using read() or write() on /dev/mem. This might go
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index ae0c3d023824..3802cfbdd20d 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -628,7 +628,7 @@ static bool arm64_early_this_cpu_has_bti(void)
if (!IS_ENABLED(CONFIG_ARM64_BTI_KERNEL))
return false;
- pfr1 = read_sysreg_s(SYS_ID_AA64PFR1_EL1);
+ pfr1 = __read_sysreg_by_encoding(SYS_ID_AA64PFR1_EL1);
return cpuid_feature_extract_unsigned_field(pfr1,
ID_AA64PFR1_BT_SHIFT);
}
@@ -1094,6 +1094,7 @@ static void free_empty_tables(unsigned long addr, unsigned long end,
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
struct vmem_altmap *altmap)
{
+ WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END));
return vmemmap_populate_basepages(start, end, node, altmap);
}
#else /* !ARM64_SWAPPER_USES_SECTION_MAPS */
@@ -1107,6 +1108,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
pud_t *pudp;
pmd_t *pmdp;
+ WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END));
do {
next = pmd_addr_end(addr, end);
@@ -1153,7 +1155,7 @@ void vmemmap_free(unsigned long start, unsigned long end,
}
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
-static inline pud_t * fixmap_pud(unsigned long addr)
+static inline pud_t *fixmap_pud(unsigned long addr)
{
pgd_t *pgdp = pgd_offset_k(addr);
p4d_t *p4dp = p4d_offset(pgdp, addr);
@@ -1164,7 +1166,7 @@ static inline pud_t * fixmap_pud(unsigned long addr)
return pud_offset_kimg(p4dp, addr);
}
-static inline pmd_t * fixmap_pmd(unsigned long addr)
+static inline pmd_t *fixmap_pmd(unsigned long addr)
{
pud_t *pudp = fixmap_pud(addr);
pud_t pud = READ_ONCE(*pudp);
@@ -1174,7 +1176,7 @@ static inline pmd_t * fixmap_pmd(unsigned long addr)
return pmd_offset_kimg(pudp, addr);
}
-static inline pte_t * fixmap_pte(unsigned long addr)
+static inline pte_t *fixmap_pte(unsigned long addr)
{
return &bm_pte[pte_index(addr)];
}
@@ -1442,16 +1444,19 @@ static void __remove_pgd_mapping(pgd_t *pgdir, unsigned long start, u64 size)
free_empty_tables(start, end, PAGE_OFFSET, PAGE_END);
}
-static bool inside_linear_region(u64 start, u64 size)
+struct range arch_get_mappable_range(void)
{
+ struct range mhp_range;
+
/*
* Linear mapping region is the range [PAGE_OFFSET..(PAGE_END - 1)]
* accommodating both its ends but excluding PAGE_END. Max physical
* range which can be mapped inside this linear mapping range, must
* also be derived from its end points.
*/
- return start >= __pa(_PAGE_OFFSET(vabits_actual)) &&
- (start + size - 1) <= __pa(PAGE_END - 1);
+ mhp_range.start = __pa(_PAGE_OFFSET(vabits_actual));
+ mhp_range.end = __pa(PAGE_END - 1);
+ return mhp_range;
}
int arch_add_memory(int nid, u64 start, u64 size,
@@ -1459,12 +1464,14 @@ int arch_add_memory(int nid, u64 start, u64 size,
{
int ret, flags = 0;
- if (!inside_linear_region(start, size)) {
- pr_err("[%llx %llx] is outside linear mapping region\n", start, start + size);
- return -EINVAL;
- }
+ VM_BUG_ON(!mhp_range_allowed(start, size, true));
- if (rodata_full || debug_pagealloc_enabled())
+ /*
+ * KFENCE requires linear map to be mapped at page granularity, so that
+ * it is possible to protect/unprotect single pages in the KFENCE pool.
+ */
+ if (rodata_full || debug_pagealloc_enabled() ||
+ IS_ENABLED(CONFIG_KFENCE))
flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
__create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start),
diff --git a/arch/arm64/mm/physaddr.c b/arch/arm64/mm/physaddr.c
index 67a9ba9eaa96..cde44c13dda1 100644
--- a/arch/arm64/mm/physaddr.c
+++ b/arch/arm64/mm/physaddr.c
@@ -9,7 +9,7 @@
phys_addr_t __virt_to_phys(unsigned long x)
{
- WARN(!__is_lm_address(x),
+ WARN(!__is_lm_address(__tag_reset(x)),
"virt_to_phys used for non-linear address: %pK (%pS)\n",
(void *)x,
(void *)x);
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 1f7ee8c8b7b8..c967bfd30d2b 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -291,17 +291,7 @@ skip_pgd:
/* We're done: fire up the MMU again */
mrs x17, sctlr_el1
orr x17, x17, #SCTLR_ELx_M
- msr sctlr_el1, x17
- isb
-
- /*
- * Invalidate the local I-cache so that any instructions fetched
- * speculatively from the PoC are discarded, since they may have
- * been dynamically patched at the PoU.
- */
- ic iallu
- dsb nsh
- isb
+ set_sctlr_el1 x17
/* Set the flag to zero to indicate that we're all done */
str wzr, [flag_ptr]
@@ -464,8 +454,8 @@ SYM_FUNC_START(__cpu_setup)
#endif
msr mair_el1, x5
/*
- * Set/prepare TCR and TTBR. We use 512GB (39-bit) address range for
- * both user and kernel.
+ * Set/prepare TCR and TTBR. TCR_EL1.T1SZ gets further
+ * adjusted if the kernel is compiled with 52bit VA support.
*/
mov_q x10, TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \
diff --git a/arch/arm64/mm/ptdump.c b/arch/arm64/mm/ptdump.c
index 04137a8f3d2d..0e050d76b83a 100644
--- a/arch/arm64/mm/ptdump.c
+++ b/arch/arm64/mm/ptdump.c
@@ -324,6 +324,7 @@ void ptdump_walk(struct seq_file *s, struct ptdump_info *info)
st = (struct pg_state){
.seq = s,
.marker = info->markers,
+ .level = -1,
.ptdump = {
.note_page = note_page,
.range = (struct ptdump_range[]){
diff --git a/arch/arm64/mm/trans_pgd.c b/arch/arm64/mm/trans_pgd.c
new file mode 100644
index 000000000000..527f0a39c3da
--- /dev/null
+++ b/arch/arm64/mm/trans_pgd.c
@@ -0,0 +1,324 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Transitional page tables for kexec and hibernate
+ *
+ * This file derived from: arch/arm64/kernel/hibernate.c
+ *
+ * Copyright (c) 2020, Microsoft Corporation.
+ * Pavel Tatashin <pasha.tatashin@soleen.com>
+ *
+ */
+
+/*
+ * Transitional tables are used during system transferring from one world to
+ * another: such as during hibernate restore, and kexec reboots. During these
+ * phases one cannot rely on page table not being overwritten. This is because
+ * hibernate and kexec can overwrite the current page tables during transition.
+ */
+
+#include <asm/trans_pgd.h>
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <linux/suspend.h>
+#include <linux/bug.h>
+#include <linux/mm.h>
+#include <linux/mmzone.h>
+
+static void *trans_alloc(struct trans_pgd_info *info)
+{
+ return info->trans_alloc_page(info->trans_alloc_arg);
+}
+
+static void _copy_pte(pte_t *dst_ptep, pte_t *src_ptep, unsigned long addr)
+{
+ pte_t pte = READ_ONCE(*src_ptep);
+
+ if (pte_valid(pte)) {
+ /*
+ * Resume will overwrite areas that may be marked
+ * read only (code, rodata). Clear the RDONLY bit from
+ * the temporary mappings we use during restore.
+ */
+ set_pte(dst_ptep, pte_mkwrite(pte));
+ } else if (debug_pagealloc_enabled() && !pte_none(pte)) {
+ /*
+ * debug_pagealloc will removed the PTE_VALID bit if
+ * the page isn't in use by the resume kernel. It may have
+ * been in use by the original kernel, in which case we need
+ * to put it back in our copy to do the restore.
+ *
+ * Before marking this entry valid, check the pfn should
+ * be mapped.
+ */
+ BUG_ON(!pfn_valid(pte_pfn(pte)));
+
+ set_pte(dst_ptep, pte_mkpresent(pte_mkwrite(pte)));
+ }
+}
+
+static int copy_pte(struct trans_pgd_info *info, pmd_t *dst_pmdp,
+ pmd_t *src_pmdp, unsigned long start, unsigned long end)
+{
+ pte_t *src_ptep;
+ pte_t *dst_ptep;
+ unsigned long addr = start;
+
+ dst_ptep = trans_alloc(info);
+ if (!dst_ptep)
+ return -ENOMEM;
+ pmd_populate_kernel(NULL, dst_pmdp, dst_ptep);
+ dst_ptep = pte_offset_kernel(dst_pmdp, start);
+
+ src_ptep = pte_offset_kernel(src_pmdp, start);
+ do {
+ _copy_pte(dst_ptep, src_ptep, addr);
+ } while (dst_ptep++, src_ptep++, addr += PAGE_SIZE, addr != end);
+
+ return 0;
+}
+
+static int copy_pmd(struct trans_pgd_info *info, pud_t *dst_pudp,
+ pud_t *src_pudp, unsigned long start, unsigned long end)
+{
+ pmd_t *src_pmdp;
+ pmd_t *dst_pmdp;
+ unsigned long next;
+ unsigned long addr = start;
+
+ if (pud_none(READ_ONCE(*dst_pudp))) {
+ dst_pmdp = trans_alloc(info);
+ if (!dst_pmdp)
+ return -ENOMEM;
+ pud_populate(NULL, dst_pudp, dst_pmdp);
+ }
+ dst_pmdp = pmd_offset(dst_pudp, start);
+
+ src_pmdp = pmd_offset(src_pudp, start);
+ do {
+ pmd_t pmd = READ_ONCE(*src_pmdp);
+
+ next = pmd_addr_end(addr, end);
+ if (pmd_none(pmd))
+ continue;
+ if (pmd_table(pmd)) {
+ if (copy_pte(info, dst_pmdp, src_pmdp, addr, next))
+ return -ENOMEM;
+ } else {
+ set_pmd(dst_pmdp,
+ __pmd(pmd_val(pmd) & ~PMD_SECT_RDONLY));
+ }
+ } while (dst_pmdp++, src_pmdp++, addr = next, addr != end);
+
+ return 0;
+}
+
+static int copy_pud(struct trans_pgd_info *info, p4d_t *dst_p4dp,
+ p4d_t *src_p4dp, unsigned long start,
+ unsigned long end)
+{
+ pud_t *dst_pudp;
+ pud_t *src_pudp;
+ unsigned long next;
+ unsigned long addr = start;
+
+ if (p4d_none(READ_ONCE(*dst_p4dp))) {
+ dst_pudp = trans_alloc(info);
+ if (!dst_pudp)
+ return -ENOMEM;
+ p4d_populate(NULL, dst_p4dp, dst_pudp);
+ }
+ dst_pudp = pud_offset(dst_p4dp, start);
+
+ src_pudp = pud_offset(src_p4dp, start);
+ do {
+ pud_t pud = READ_ONCE(*src_pudp);
+
+ next = pud_addr_end(addr, end);
+ if (pud_none(pud))
+ continue;
+ if (pud_table(pud)) {
+ if (copy_pmd(info, dst_pudp, src_pudp, addr, next))
+ return -ENOMEM;
+ } else {
+ set_pud(dst_pudp,
+ __pud(pud_val(pud) & ~PUD_SECT_RDONLY));
+ }
+ } while (dst_pudp++, src_pudp++, addr = next, addr != end);
+
+ return 0;
+}
+
+static int copy_p4d(struct trans_pgd_info *info, pgd_t *dst_pgdp,
+ pgd_t *src_pgdp, unsigned long start,
+ unsigned long end)
+{
+ p4d_t *dst_p4dp;
+ p4d_t *src_p4dp;
+ unsigned long next;
+ unsigned long addr = start;
+
+ dst_p4dp = p4d_offset(dst_pgdp, start);
+ src_p4dp = p4d_offset(src_pgdp, start);
+ do {
+ next = p4d_addr_end(addr, end);
+ if (p4d_none(READ_ONCE(*src_p4dp)))
+ continue;
+ if (copy_pud(info, dst_p4dp, src_p4dp, addr, next))
+ return -ENOMEM;
+ } while (dst_p4dp++, src_p4dp++, addr = next, addr != end);
+
+ return 0;
+}
+
+static int copy_page_tables(struct trans_pgd_info *info, pgd_t *dst_pgdp,
+ unsigned long start, unsigned long end)
+{
+ unsigned long next;
+ unsigned long addr = start;
+ pgd_t *src_pgdp = pgd_offset_k(start);
+
+ dst_pgdp = pgd_offset_pgd(dst_pgdp, start);
+ do {
+ next = pgd_addr_end(addr, end);
+ if (pgd_none(READ_ONCE(*src_pgdp)))
+ continue;
+ if (copy_p4d(info, dst_pgdp, src_pgdp, addr, next))
+ return -ENOMEM;
+ } while (dst_pgdp++, src_pgdp++, addr = next, addr != end);
+
+ return 0;
+}
+
+/*
+ * Create trans_pgd and copy linear map.
+ * info: contains allocator and its argument
+ * dst_pgdp: new page table that is created, and to which map is copied.
+ * start: Start of the interval (inclusive).
+ * end: End of the interval (exclusive).
+ *
+ * Returns 0 on success, and -ENOMEM on failure.
+ */
+int trans_pgd_create_copy(struct trans_pgd_info *info, pgd_t **dst_pgdp,
+ unsigned long start, unsigned long end)
+{
+ int rc;
+ pgd_t *trans_pgd = trans_alloc(info);
+
+ if (!trans_pgd) {
+ pr_err("Failed to allocate memory for temporary page tables.\n");
+ return -ENOMEM;
+ }
+
+ rc = copy_page_tables(info, trans_pgd, start, end);
+ if (!rc)
+ *dst_pgdp = trans_pgd;
+
+ return rc;
+}
+
+/*
+ * Add map entry to trans_pgd for a base-size page at PTE level.
+ * info: contains allocator and its argument
+ * trans_pgd: page table in which new map is added.
+ * page: page to be mapped.
+ * dst_addr: new VA address for the page
+ * pgprot: protection for the page.
+ *
+ * Returns 0 on success, and -ENOMEM on failure.
+ */
+int trans_pgd_map_page(struct trans_pgd_info *info, pgd_t *trans_pgd,
+ void *page, unsigned long dst_addr, pgprot_t pgprot)
+{
+ pgd_t *pgdp;
+ p4d_t *p4dp;
+ pud_t *pudp;
+ pmd_t *pmdp;
+ pte_t *ptep;
+
+ pgdp = pgd_offset_pgd(trans_pgd, dst_addr);
+ if (pgd_none(READ_ONCE(*pgdp))) {
+ p4dp = trans_alloc(info);
+ if (!pgdp)
+ return -ENOMEM;
+ pgd_populate(NULL, pgdp, p4dp);
+ }
+
+ p4dp = p4d_offset(pgdp, dst_addr);
+ if (p4d_none(READ_ONCE(*p4dp))) {
+ pudp = trans_alloc(info);
+ if (!pudp)
+ return -ENOMEM;
+ p4d_populate(NULL, p4dp, pudp);
+ }
+
+ pudp = pud_offset(p4dp, dst_addr);
+ if (pud_none(READ_ONCE(*pudp))) {
+ pmdp = trans_alloc(info);
+ if (!pmdp)
+ return -ENOMEM;
+ pud_populate(NULL, pudp, pmdp);
+ }
+
+ pmdp = pmd_offset(pudp, dst_addr);
+ if (pmd_none(READ_ONCE(*pmdp))) {
+ ptep = trans_alloc(info);
+ if (!ptep)
+ return -ENOMEM;
+ pmd_populate_kernel(NULL, pmdp, ptep);
+ }
+
+ ptep = pte_offset_kernel(pmdp, dst_addr);
+ set_pte(ptep, pfn_pte(virt_to_pfn(page), pgprot));
+
+ return 0;
+}
+
+/*
+ * The page we want to idmap may be outside the range covered by VA_BITS that
+ * can be built using the kernel's p?d_populate() helpers. As a one off, for a
+ * single page, we build these page tables bottom up and just assume that will
+ * need the maximum T0SZ.
+ *
+ * Returns 0 on success, and -ENOMEM on failure.
+ * On success trans_ttbr0 contains page table with idmapped page, t0sz is set to
+ * maximum T0SZ for this page.
+ */
+int trans_pgd_idmap_page(struct trans_pgd_info *info, phys_addr_t *trans_ttbr0,
+ unsigned long *t0sz, void *page)
+{
+ phys_addr_t dst_addr = virt_to_phys(page);
+ unsigned long pfn = __phys_to_pfn(dst_addr);
+ int max_msb = (dst_addr & GENMASK(52, 48)) ? 51 : 47;
+ int bits_mapped = PAGE_SHIFT - 4;
+ unsigned long level_mask, prev_level_entry, *levels[4];
+ int this_level, index, level_lsb, level_msb;
+
+ dst_addr &= PAGE_MASK;
+ prev_level_entry = pte_val(pfn_pte(pfn, PAGE_KERNEL_EXEC));
+
+ for (this_level = 3; this_level >= 0; this_level--) {
+ levels[this_level] = trans_alloc(info);
+ if (!levels[this_level])
+ return -ENOMEM;
+
+ level_lsb = ARM64_HW_PGTABLE_LEVEL_SHIFT(this_level);
+ level_msb = min(level_lsb + bits_mapped, max_msb);
+ level_mask = GENMASK_ULL(level_msb, level_lsb);
+
+ index = (dst_addr & level_mask) >> level_lsb;
+ *(levels[this_level] + index) = prev_level_entry;
+
+ pfn = virt_to_pfn(levels[this_level]);
+ prev_level_entry = pte_val(pfn_pte(pfn,
+ __pgprot(PMD_TYPE_TABLE)));
+
+ if (level_msb == max_msb)
+ break;
+ }
+
+ *trans_ttbr0 = phys_to_ttbr(__pfn_to_phys(pfn));
+ *t0sz = TCR_T0SZ(max_msb + 1);
+
+ return 0;
+}
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index ef9f1d5e989d..f7b194878a99 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -875,10 +875,18 @@ emit_cond_jmp:
}
break;
- /* STX XADD: lock *(u32 *)(dst + off) += src */
- case BPF_STX | BPF_XADD | BPF_W:
- /* STX XADD: lock *(u64 *)(dst + off) += src */
- case BPF_STX | BPF_XADD | BPF_DW:
+ case BPF_STX | BPF_ATOMIC | BPF_W:
+ case BPF_STX | BPF_ATOMIC | BPF_DW:
+ if (insn->imm != BPF_ADD) {
+ pr_err_once("unknown atomic op code %02x\n", insn->imm);
+ return -EINVAL;
+ }
+
+ /* STX XADD: lock *(u32 *)(dst + off) += src
+ * and
+ * STX XADD: lock *(u64 *)(dst + off) += src
+ */
+
if (!off) {
reg = dst;
} else {
diff --git a/arch/c6x/Kconfig b/arch/c6x/Kconfig
deleted file mode 100644
index bdeeac28b1be..000000000000
--- a/arch/c6x/Kconfig
+++ /dev/null
@@ -1,113 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# For a description of the syntax of this configuration file,
-# see Documentation/kbuild/kconfig-language.rst.
-#
-
-config C6X
- def_bool y
- select ARCH_32BIT_OFF_T
- select ARCH_HAS_BINFMT_FLAT
- select ARCH_HAS_SYNC_DMA_FOR_CPU
- select ARCH_HAS_SYNC_DMA_FOR_DEVICE
- select CLKDEV_LOOKUP
- select HAVE_LEGACY_CLK
- select GENERIC_ATOMIC64
- select GENERIC_IRQ_SHOW
- select HAVE_ARCH_TRACEHOOK
- select SPARSE_IRQ
- select IRQ_DOMAIN
- select OF
- select OF_EARLY_FLATTREE
- select MODULES_USE_ELF_RELA
- select MMU_GATHER_NO_RANGE if MMU
- select SET_FS
-
-config MMU
- def_bool n
-
-config FPU
- def_bool n
-
-config GENERIC_CALIBRATE_DELAY
- def_bool y
-
-config GENERIC_HWEIGHT
- def_bool y
-
-config GENERIC_BUG
- def_bool y
- depends on BUG
-
-config C6X_BIG_KERNEL
- bool "Build a big kernel"
- help
- The C6X function call instruction has a limited range of +/- 2MiB.
- This is sufficient for most kernels, but some kernel configurations
- with lots of compiled-in functionality may require a larger range
- for function calls. Use this option to have the compiler generate
- function calls with 32-bit range. This will make the kernel both
- larger and slower.
-
- If unsure, say N.
-
-# Use the generic interrupt handling code in kernel/irq/
-
-config CMDLINE_BOOL
- bool "Default bootloader kernel arguments"
-
-config CMDLINE
- string "Kernel command line"
- depends on CMDLINE_BOOL
- default "console=ttyS0,57600"
- help
- On some architectures there is currently no way for the boot loader
- to pass arguments to the kernel. For these architectures, you should
- supply some command-line options at build time by entering them
- here.
-
-config CMDLINE_FORCE
- bool "Force default kernel command string"
- depends on CMDLINE_BOOL
- default n
- help
- Set this to have arguments from the default kernel command string
- override those passed by the boot loader.
-
-config CPU_BIG_ENDIAN
- bool "Build big-endian kernel"
- default n
- help
- Say Y if you plan on running a kernel in big-endian mode.
- Note that your board must be properly built and your board
- port must properly enable any big-endian related features
- of your chipset/board/processor.
-
-config FORCE_MAX_ZONEORDER
- int "Maximum zone order"
- default "13"
- help
- The kernel memory allocator divides physically contiguous memory
- blocks into "zones", where each zone is a power of two number of
- pages. This option selects the largest power of two that the kernel
- keeps in the memory allocator. If you need to allocate very large
- blocks of physically contiguous memory, then you may need to
- increase this value.
-
- This config option is actually maximum order plus one. For example,
- a value of 11 means that the largest free memory block is 2^10 pages.
-
-menu "Processor type and features"
-
-source "arch/c6x/platforms/Kconfig"
-
-config KERNEL_RAM_BASE_ADDRESS
- hex "Virtual address of memory base"
- default 0xe0000000 if SOC_TMS320C6455
- default 0xe0000000 if SOC_TMS320C6457
- default 0xe0000000 if SOC_TMS320C6472
- default 0x80000000
-
-source "kernel/Kconfig.hz"
-
-endmenu
diff --git a/arch/c6x/Kconfig.debug b/arch/c6x/Kconfig.debug
deleted file mode 100644
index c299e0d8eca3..000000000000
--- a/arch/c6x/Kconfig.debug
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-
-config ACCESS_CHECK
- bool "Check the user pointer address"
- default y
- help
- Usually the pointer transfer from user space is checked to see if its
- address is in the kernel space.
-
- Say N here to disable that check to improve the performance.
diff --git a/arch/c6x/Makefile b/arch/c6x/Makefile
deleted file mode 100644
index b7aa854f7008..000000000000
--- a/arch/c6x/Makefile
+++ /dev/null
@@ -1,60 +0,0 @@
-#
-# linux/arch/c6x/Makefile
-#
-# This file is subject to the terms and conditions of the GNU General Public
-# License. See the file "COPYING" in the main directory of this archive
-# for more details.
-#
-
-KBUILD_DEFCONFIG := dsk6455_defconfig
-
-cflags-y += -mno-dsbt -msdata=none -D__linux__
-
-cflags-$(CONFIG_C6X_BIG_KERNEL) += -mlong-calls
-
-KBUILD_CFLAGS_MODULE += -mlong-calls -mno-dsbt -msdata=none
-
-CHECKFLAGS +=
-
-KBUILD_CFLAGS += $(cflags-y)
-KBUILD_AFLAGS += $(cflags-y)
-
-ifdef CONFIG_CPU_BIG_ENDIAN
-KBUILD_CFLAGS += -mbig-endian
-KBUILD_AFLAGS += -mbig-endian
-LINKFLAGS += -mbig-endian
-KBUILD_LDFLAGS += -mbig-endian -EB
-CHECKFLAGS += -D_BIG_ENDIAN
-endif
-
-head-y := arch/c6x/kernel/head.o
-core-y += arch/c6x/kernel/ arch/c6x/mm/ arch/c6x/platforms/
-libs-y += arch/c6x/lib/
-
-# Default to vmlinux.bin, override when needed
-all: vmlinux.bin
-
-boot := arch/$(ARCH)/boot
-
-# Are we making a dtbImage.<boardname> target? If so, crack out the boardname
-DTB:=$(subst dtbImage.,,$(filter dtbImage.%, $(MAKECMDGOALS)))
-export DTB
-
-core-y += $(boot)/dts/
-
-# With make 3.82 we cannot mix normal and wildcard targets
-
-vmlinux.bin: vmlinux
- $(Q)$(MAKE) $(build)=$(boot) $(patsubst %,$(boot)/%,$@)
-
-dtbImage.%: vmlinux
- $(Q)$(MAKE) $(build)=$(boot) $(patsubst %,$(boot)/%,$@)
-
-archclean:
- $(Q)$(MAKE) $(clean)=$(boot)
-
-define archhelp
- @echo ' vmlinux.bin - Binary kernel image (arch/$(ARCH)/boot/vmlinux.bin)'
- @echo ' dtbImage.<dt> - ELF image with $(arch)/boot/dts/<dt>.dts linked in'
- @echo ' - stripped elf with fdt blob'
-endef
diff --git a/arch/c6x/boot/dts/Makefile b/arch/c6x/boot/dts/Makefile
deleted file mode 100644
index f438285c3640..000000000000
--- a/arch/c6x/boot/dts/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile for device trees
-#
-
-DTC_FLAGS ?= -p 1024
-
-dtb-$(CONFIG_SOC_TMS320C6455) += dsk6455.dtb
-dtb-$(CONFIG_SOC_TMS320C6457) += evmc6457.dtb
-dtb-$(CONFIG_SOC_TMS320C6472) += evmc6472.dtb
-dtb-$(CONFIG_SOC_TMS320C6474) += evmc6474.dtb
-dtb-$(CONFIG_SOC_TMS320C6678) += evmc6678.dtb
-
-ifneq ($(DTB),)
-obj-y += $(DTB).dtb.o
-endif
diff --git a/arch/c6x/boot/dts/dsk6455.dts b/arch/c6x/boot/dts/dsk6455.dts
deleted file mode 100644
index fa904f2916b5..000000000000
--- a/arch/c6x/boot/dts/dsk6455.dts
+++ /dev/null
@@ -1,57 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * arch/c6x/boot/dts/dsk6455.dts
- *
- * DSK6455 Evaluation Platform For TMS320C6455
- * Copyright (C) 2011 Texas Instruments Incorporated
- *
- * Author: Mark Salter <msalter@redhat.com>
- */
-
-/dts-v1/;
-
-/include/ "tms320c6455.dtsi"
-
-/ {
- model = "Spectrum Digital DSK6455";
- compatible = "spectrum-digital,dsk6455";
-
- chosen {
- bootargs = "root=/dev/nfs ip=dhcp rw";
- };
-
- memory {
- device_type = "memory";
- reg = <0xE0000000 0x08000000>;
- };
-
- soc {
- megamod_pic: interrupt-controller@1800000 {
- interrupts = < 12 13 14 15 >;
- };
-
- emifa@70000000 {
- flash@3,0 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "cfi-flash";
- reg = <0x3 0x0 0x400000>;
- bank-width = <1>;
- device-width = <1>;
- partition@0 {
- reg = <0x0 0x400000>;
- label = "NOR";
- };
- };
- };
-
- timer1: timer@2980000 {
- interrupt-parent = <&megamod_pic>;
- interrupts = < 69 >;
- };
-
- clock-controller@029a0000 {
- clock-frequency = <50000000>;
- };
- };
-};
diff --git a/arch/c6x/boot/dts/evmc6457.dts b/arch/c6x/boot/dts/evmc6457.dts
deleted file mode 100644
index 73e1d43b51ce..000000000000
--- a/arch/c6x/boot/dts/evmc6457.dts
+++ /dev/null
@@ -1,43 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * arch/c6x/boot/dts/evmc6457.dts
- *
- * EVMC6457 Evaluation Platform For TMS320C6457
- *
- * Copyright (C) 2011 Texas Instruments Incorporated
- *
- * Author: Mark Salter <msalter@redhat.com>
- */
-
-/dts-v1/;
-
-/include/ "tms320c6457.dtsi"
-
-/ {
- model = "eInfochips EVMC6457";
- compatible = "einfochips,evmc6457";
-
- chosen {
- bootargs = "console=hvc root=/dev/nfs ip=dhcp rw";
- };
-
- memory {
- device_type = "memory";
- reg = <0xE0000000 0x10000000>;
- };
-
- soc {
- megamod_pic: interrupt-controller@1800000 {
- interrupts = < 12 13 14 15 >;
- };
-
- timer0: timer@2940000 {
- interrupt-parent = <&megamod_pic>;
- interrupts = < 67 >;
- };
-
- clock-controller@29a0000 {
- clock-frequency = <60000000>;
- };
- };
-};
diff --git a/arch/c6x/boot/dts/evmc6472.dts b/arch/c6x/boot/dts/evmc6472.dts
deleted file mode 100644
index 4878b78919fa..000000000000
--- a/arch/c6x/boot/dts/evmc6472.dts
+++ /dev/null
@@ -1,68 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * arch/c6x/boot/dts/evmc6472.dts
- *
- * EVMC6472 Evaluation Platform For TMS320C6472
- *
- * Copyright (C) 2011 Texas Instruments Incorporated
- *
- * Author: Mark Salter <msalter@redhat.com>
- */
-
-/dts-v1/;
-
-/include/ "tms320c6472.dtsi"
-
-/ {
- model = "eInfochips EVMC6472";
- compatible = "einfochips,evmc6472";
-
- chosen {
- bootargs = "console=hvc root=/dev/nfs ip=dhcp rw";
- };
-
- memory {
- device_type = "memory";
- reg = <0xE0000000 0x10000000>;
- };
-
- soc {
- megamod_pic: interrupt-controller@1800000 {
- interrupts = < 12 13 14 15 >;
- };
-
- timer0: timer@25e0000 {
- interrupt-parent = <&megamod_pic>;
- interrupts = < 16 >;
- };
-
- timer1: timer@25f0000 {
- interrupt-parent = <&megamod_pic>;
- interrupts = < 16 >;
- };
-
- timer2: timer@2600000 {
- interrupt-parent = <&megamod_pic>;
- interrupts = < 16 >;
- };
-
- timer3: timer@2610000 {
- interrupt-parent = <&megamod_pic>;
- interrupts = < 16 >;
- };
-
- timer4: timer@2620000 {
- interrupt-parent = <&megamod_pic>;
- interrupts = < 16 >;
- };
-
- timer5: timer@2630000 {
- interrupt-parent = <&megamod_pic>;
- interrupts = < 16 >;
- };
-
- clock-controller@29a0000 {
- clock-frequency = <25000000>;
- };
- };
-};
diff --git a/arch/c6x/boot/dts/evmc6474.dts b/arch/c6x/boot/dts/evmc6474.dts
deleted file mode 100644
index d10746453217..000000000000
--- a/arch/c6x/boot/dts/evmc6474.dts
+++ /dev/null
@@ -1,53 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * arch/c6x/boot/dts/evmc6474.dts
- *
- * EVMC6474 Evaluation Platform For TMS320C6474
- *
- * Copyright (C) 2011 Texas Instruments Incorporated
- *
- * Author: Mark Salter <msalter@redhat.com>
- */
-
-/dts-v1/;
-
-/include/ "tms320c6474.dtsi"
-
-/ {
- model = "Spectrum Digital EVMC6474";
- compatible = "spectrum-digital,evmc6474";
-
- chosen {
- bootargs = "console=hvc root=/dev/nfs ip=dhcp rw";
- };
-
- memory {
- device_type = "memory";
- reg = <0x80000000 0x08000000>;
- };
-
- soc {
- megamod_pic: interrupt-controller@1800000 {
- interrupts = < 12 13 14 15 >;
- };
-
- timer3: timer@2940000 {
- interrupt-parent = <&megamod_pic>;
- interrupts = < 39 >;
- };
-
- timer4: timer@2950000 {
- interrupt-parent = <&megamod_pic>;
- interrupts = < 41 >;
- };
-
- timer5: timer@2960000 {
- interrupt-parent = <&megamod_pic>;
- interrupts = < 43 >;
- };
-
- clock-controller@29a0000 {
- clock-frequency = <50000000>;
- };
- };
-};
diff --git a/arch/c6x/boot/dts/evmc6678.dts b/arch/c6x/boot/dts/evmc6678.dts
deleted file mode 100644
index 5e6c0961e7b2..000000000000
--- a/arch/c6x/boot/dts/evmc6678.dts
+++ /dev/null
@@ -1,78 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * arch/c6x/boot/dts/evmc6678.dts
- *
- * EVMC6678 Evaluation Platform For TMS320C6678
- *
- * Copyright (C) 2012 Texas Instruments Incorporated
- *
- * Author: Ken Cox <jkc@redhat.com>
- */
-
-/dts-v1/;
-
-/include/ "tms320c6678.dtsi"
-
-/ {
- model = "Advantech EVMC6678";
- compatible = "advantech,evmc6678";
-
- chosen {
- bootargs = "root=/dev/nfs ip=dhcp rw";
- };
-
- memory {
- device_type = "memory";
- reg = <0x80000000 0x20000000>;
- };
-
- soc {
- megamod_pic: interrupt-controller@1800000 {
- interrupts = < 12 13 14 15 >;
- };
-
- timer8: timer@2280000 {
- interrupt-parent = <&megamod_pic>;
- interrupts = < 66 >;
- };
-
- timer9: timer@2290000 {
- interrupt-parent = <&megamod_pic>;
- interrupts = < 68 >;
- };
-
- timer10: timer@22A0000 {
- interrupt-parent = <&megamod_pic>;
- interrupts = < 70 >;
- };
-
- timer11: timer@22B0000 {
- interrupt-parent = <&megamod_pic>;
- interrupts = < 72 >;
- };
-
- timer12: timer@22C0000 {
- interrupt-parent = <&megamod_pic>;
- interrupts = < 74 >;
- };
-
- timer13: timer@22D0000 {
- interrupt-parent = <&megamod_pic>;
- interrupts = < 76 >;
- };
-
- timer14: timer@22E0000 {
- interrupt-parent = <&megamod_pic>;
- interrupts = < 78 >;
- };
-
- timer15: timer@22F0000 {
- interrupt-parent = <&megamod_pic>;
- interrupts = < 80 >;
- };
-
- clock-controller@2310000 {
- clock-frequency = <100000000>;
- };
- };
-};
diff --git a/arch/c6x/boot/dts/tms320c6455.dtsi b/arch/c6x/boot/dts/tms320c6455.dtsi
deleted file mode 100644
index 0b21cb30343b..000000000000
--- a/arch/c6x/boot/dts/tms320c6455.dtsi
+++ /dev/null
@@ -1,97 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-/ {
- #address-cells = <1>;
- #size-cells = <1>;
-
- cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- cpu@0 {
- device_type = "cpu";
- model = "ti,c64x+";
- reg = <0>;
- };
- };
-
- soc {
- compatible = "simple-bus";
- model = "tms320c6455";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
-
- core_pic: interrupt-controller {
- interrupt-controller;
- #interrupt-cells = <1>;
- compatible = "ti,c64x+core-pic";
- };
-
- /*
- * Megamodule interrupt controller
- */
- megamod_pic: interrupt-controller@1800000 {
- compatible = "ti,c64x+megamod-pic";
- interrupt-controller;
- #interrupt-cells = <1>;
- reg = <0x1800000 0x1000>;
- interrupt-parent = <&core_pic>;
- };
-
- cache-controller@1840000 {
- compatible = "ti,c64x+cache";
- reg = <0x01840000 0x8400>;
- };
-
- emifa@70000000 {
- compatible = "ti,c64x+emifa", "simple-bus";
- #address-cells = <2>;
- #size-cells = <1>;
- reg = <0x70000000 0x100>;
- ranges = <0x2 0x0 0xa0000000 0x00000008
- 0x3 0x0 0xb0000000 0x00400000
- 0x4 0x0 0xc0000000 0x10000000
- 0x5 0x0 0xD0000000 0x10000000>;
-
- ti,dscr-dev-enable = <13>;
- ti,emifa-burst-priority = <255>;
- ti,emifa-ce-config = <0x00240120
- 0x00240120
- 0x00240122
- 0x00240122>;
- };
-
- timer1: timer@2980000 {
- compatible = "ti,c64x+timer64";
- reg = <0x2980000 0x40>;
- ti,dscr-dev-enable = <4>;
- };
-
- clock-controller@029a0000 {
- compatible = "ti,c6455-pll", "ti,c64x+pll";
- reg = <0x029a0000 0x200>;
- ti,c64x+pll-bypass-delay = <1440>;
- ti,c64x+pll-reset-delay = <15360>;
- ti,c64x+pll-lock-delay = <24000>;
- };
-
- device-state-config-regs@2a80000 {
- compatible = "ti,c64x+dscr";
- reg = <0x02a80000 0x41000>;
-
- ti,dscr-devstat = <0>;
- ti,dscr-silicon-rev = <8 28 0xf>;
- ti,dscr-rmii-resets = <0 0x40020 0x00040000>;
-
- ti,dscr-locked-regs = <0x40008 0x40004 0x0f0a0b00>;
- ti,dscr-devstate-ctl-regs =
- <0 12 0x40008 1 0 0 2
- 12 1 0x40008 3 0 30 2
- 13 2 0x4002c 1 0xffffffff 0 1>;
- ti,dscr-devstate-stat-regs =
- <0 10 0x40014 1 0 0 3
- 10 2 0x40018 1 0 0 3>;
- };
- };
-};
diff --git a/arch/c6x/boot/dts/tms320c6457.dtsi b/arch/c6x/boot/dts/tms320c6457.dtsi
deleted file mode 100644
index e49f7ae19124..000000000000
--- a/arch/c6x/boot/dts/tms320c6457.dtsi
+++ /dev/null
@@ -1,69 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-/ {
- #address-cells = <1>;
- #size-cells = <1>;
-
- cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- cpu@0 {
- device_type = "cpu";
- model = "ti,c64x+";
- reg = <0>;
- };
- };
-
- soc {
- compatible = "simple-bus";
- model = "tms320c6457";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
-
- core_pic: interrupt-controller {
- interrupt-controller;
- #interrupt-cells = <1>;
- compatible = "ti,c64x+core-pic";
- };
-
- megamod_pic: interrupt-controller@1800000 {
- compatible = "ti,c64x+megamod-pic";
- interrupt-controller;
- #interrupt-cells = <1>;
- interrupt-parent = <&core_pic>;
- reg = <0x1800000 0x1000>;
- };
-
- cache-controller@1840000 {
- compatible = "ti,c64x+cache";
- reg = <0x01840000 0x8400>;
- };
-
- device-state-controller@2880800 {
- compatible = "ti,c64x+dscr";
- reg = <0x02880800 0x400>;
-
- ti,dscr-devstat = <0x20>;
- ti,dscr-silicon-rev = <0x18 28 0xf>;
- ti,dscr-mac-fuse-regs = <0x114 3 4 5 6
- 0x118 0 0 1 2>;
- ti,dscr-kick-regs = <0x38 0x83E70B13
- 0x3c 0x95A4F1E0>;
- };
-
- timer0: timer@2940000 {
- compatible = "ti,c64x+timer64";
- reg = <0x2940000 0x40>;
- };
-
- clock-controller@29a0000 {
- compatible = "ti,c6457-pll", "ti,c64x+pll";
- reg = <0x029a0000 0x200>;
- ti,c64x+pll-bypass-delay = <300>;
- ti,c64x+pll-reset-delay = <24000>;
- ti,c64x+pll-lock-delay = <50000>;
- };
- };
-};
diff --git a/arch/c6x/boot/dts/tms320c6472.dtsi b/arch/c6x/boot/dts/tms320c6472.dtsi
deleted file mode 100644
index 9dd4b04e78ef..000000000000
--- a/arch/c6x/boot/dts/tms320c6472.dtsi
+++ /dev/null
@@ -1,135 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-/ {
- #address-cells = <1>;
- #size-cells = <1>;
-
- cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- cpu@0 {
- device_type = "cpu";
- reg = <0>;
- model = "ti,c64x+";
- };
- cpu@1 {
- device_type = "cpu";
- reg = <1>;
- model = "ti,c64x+";
- };
- cpu@2 {
- device_type = "cpu";
- reg = <2>;
- model = "ti,c64x+";
- };
- cpu@3 {
- device_type = "cpu";
- reg = <3>;
- model = "ti,c64x+";
- };
- cpu@4 {
- device_type = "cpu";
- reg = <4>;
- model = "ti,c64x+";
- };
- cpu@5 {
- device_type = "cpu";
- reg = <5>;
- model = "ti,c64x+";
- };
- };
-
- soc {
- compatible = "simple-bus";
- model = "tms320c6472";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
-
- core_pic: interrupt-controller {
- compatible = "ti,c64x+core-pic";
- interrupt-controller;
- #interrupt-cells = <1>;
- };
-
- megamod_pic: interrupt-controller@1800000 {
- compatible = "ti,c64x+megamod-pic";
- interrupt-controller;
- #interrupt-cells = <1>;
- reg = <0x1800000 0x1000>;
- interrupt-parent = <&core_pic>;
- };
-
- cache-controller@1840000 {
- compatible = "ti,c64x+cache";
- reg = <0x01840000 0x8400>;
- };
-
- timer0: timer@25e0000 {
- compatible = "ti,c64x+timer64";
- ti,core-mask = < 0x01 >;
- reg = <0x25e0000 0x40>;
- };
-
- timer1: timer@25f0000 {
- compatible = "ti,c64x+timer64";
- ti,core-mask = < 0x02 >;
- reg = <0x25f0000 0x40>;
- };
-
- timer2: timer@2600000 {
- compatible = "ti,c64x+timer64";
- ti,core-mask = < 0x04 >;
- reg = <0x2600000 0x40>;
- };
-
- timer3: timer@2610000 {
- compatible = "ti,c64x+timer64";
- ti,core-mask = < 0x08 >;
- reg = <0x2610000 0x40>;
- };
-
- timer4: timer@2620000 {
- compatible = "ti,c64x+timer64";
- ti,core-mask = < 0x10 >;
- reg = <0x2620000 0x40>;
- };
-
- timer5: timer@2630000 {
- compatible = "ti,c64x+timer64";
- ti,core-mask = < 0x20 >;
- reg = <0x2630000 0x40>;
- };
-
- clock-controller@29a0000 {
- compatible = "ti,c6472-pll", "ti,c64x+pll";
- reg = <0x029a0000 0x200>;
- ti,c64x+pll-bypass-delay = <200>;
- ti,c64x+pll-reset-delay = <12000>;
- ti,c64x+pll-lock-delay = <80000>;
- };
-
- device-state-controller@2a80000 {
- compatible = "ti,c64x+dscr";
- reg = <0x02a80000 0x1000>;
-
- ti,dscr-devstat = <0>;
- ti,dscr-silicon-rev = <0x70c 16 0xff>;
-
- ti,dscr-mac-fuse-regs = <0x700 1 2 3 4
- 0x704 5 6 0 0>;
-
- ti,dscr-rmii-resets = <0x208 1
- 0x20c 1>;
-
- ti,dscr-locked-regs = <0x200 0x204 0x0a1e183a
- 0x40c 0x420 0xbea7
- 0x41c 0x420 0xbea7>;
-
- ti,dscr-privperm = <0x41c 0xaaaaaaaa>;
-
- ti,dscr-devstate-ctl-regs = <0 13 0x200 1 0 0 1>;
- };
- };
-};
diff --git a/arch/c6x/boot/dts/tms320c6474.dtsi b/arch/c6x/boot/dts/tms320c6474.dtsi
deleted file mode 100644
index 0ef5333629a6..000000000000
--- a/arch/c6x/boot/dts/tms320c6474.dtsi
+++ /dev/null
@@ -1,90 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-/ {
- #address-cells = <1>;
- #size-cells = <1>;
-
- cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- cpu@0 {
- device_type = "cpu";
- reg = <0>;
- model = "ti,c64x+";
- };
- cpu@1 {
- device_type = "cpu";
- reg = <1>;
- model = "ti,c64x+";
- };
- cpu@2 {
- device_type = "cpu";
- reg = <2>;
- model = "ti,c64x+";
- };
- };
-
- soc {
- compatible = "simple-bus";
- model = "tms320c6474";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
-
- core_pic: interrupt-controller {
- interrupt-controller;
- #interrupt-cells = <1>;
- compatible = "ti,c64x+core-pic";
- };
-
- megamod_pic: interrupt-controller@1800000 {
- compatible = "ti,c64x+megamod-pic";
- interrupt-controller;
- #interrupt-cells = <1>;
- reg = <0x1800000 0x1000>;
- interrupt-parent = <&core_pic>;
- };
-
- cache-controller@1840000 {
- compatible = "ti,c64x+cache";
- reg = <0x01840000 0x8400>;
- };
-
- timer3: timer@2940000 {
- compatible = "ti,c64x+timer64";
- ti,core-mask = < 0x04 >;
- reg = <0x2940000 0x40>;
- };
-
- timer4: timer@2950000 {
- compatible = "ti,c64x+timer64";
- ti,core-mask = < 0x02 >;
- reg = <0x2950000 0x40>;
- };
-
- timer5: timer@2960000 {
- compatible = "ti,c64x+timer64";
- ti,core-mask = < 0x01 >;
- reg = <0x2960000 0x40>;
- };
-
- device-state-controller@2880800 {
- compatible = "ti,c64x+dscr";
- reg = <0x02880800 0x400>;
-
- ti,dscr-devstat = <0x004>;
- ti,dscr-silicon-rev = <0x014 28 0xf>;
- ti,dscr-mac-fuse-regs = <0x34 3 4 5 6
- 0x38 0 0 1 2>;
- };
-
- clock-controller@29a0000 {
- compatible = "ti,c6474-pll", "ti,c64x+pll";
- reg = <0x029a0000 0x200>;
- ti,c64x+pll-bypass-delay = <120>;
- ti,c64x+pll-reset-delay = <30000>;
- ti,c64x+pll-lock-delay = <60000>;
- };
- };
-};
diff --git a/arch/c6x/boot/dts/tms320c6678.dtsi b/arch/c6x/boot/dts/tms320c6678.dtsi
deleted file mode 100644
index da1e3f2bf062..000000000000
--- a/arch/c6x/boot/dts/tms320c6678.dtsi
+++ /dev/null
@@ -1,147 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-/ {
- #address-cells = <1>;
- #size-cells = <1>;
-
- cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- cpu@0 {
- device_type = "cpu";
- reg = <0>;
- model = "ti,c66x";
- };
- cpu@1 {
- device_type = "cpu";
- reg = <1>;
- model = "ti,c66x";
- };
- cpu@2 {
- device_type = "cpu";
- reg = <2>;
- model = "ti,c66x";
- };
- cpu@3 {
- device_type = "cpu";
- reg = <3>;
- model = "ti,c66x";
- };
- cpu@4 {
- device_type = "cpu";
- reg = <4>;
- model = "ti,c66x";
- };
- cpu@5 {
- device_type = "cpu";
- reg = <5>;
- model = "ti,c66x";
- };
- cpu@6 {
- device_type = "cpu";
- reg = <6>;
- model = "ti,c66x";
- };
- cpu@7 {
- device_type = "cpu";
- reg = <7>;
- model = "ti,c66x";
- };
- };
-
- soc {
- compatible = "simple-bus";
- model = "tms320c6678";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
-
- core_pic: interrupt-controller {
- compatible = "ti,c64x+core-pic";
- interrupt-controller;
- #interrupt-cells = <1>;
- };
-
- megamod_pic: interrupt-controller@1800000 {
- compatible = "ti,c64x+megamod-pic";
- interrupt-controller;
- #interrupt-cells = <1>;
- reg = <0x1800000 0x1000>;
- interrupt-parent = <&core_pic>;
- };
-
- cache-controller@1840000 {
- compatible = "ti,c64x+cache";
- reg = <0x01840000 0x8400>;
- };
-
- timer8: timer@2280000 {
- compatible = "ti,c64x+timer64";
- ti,core-mask = < 0x01 >;
- reg = <0x2280000 0x40>;
- };
-
- timer9: timer@2290000 {
- compatible = "ti,c64x+timer64";
- ti,core-mask = < 0x02 >;
- reg = <0x2290000 0x40>;
- };
-
- timer10: timer@22A0000 {
- compatible = "ti,c64x+timer64";
- ti,core-mask = < 0x04 >;
- reg = <0x22A0000 0x40>;
- };
-
- timer11: timer@22B0000 {
- compatible = "ti,c64x+timer64";
- ti,core-mask = < 0x08 >;
- reg = <0x22B0000 0x40>;
- };
-
- timer12: timer@22C0000 {
- compatible = "ti,c64x+timer64";
- ti,core-mask = < 0x10 >;
- reg = <0x22C0000 0x40>;
- };
-
- timer13: timer@22D0000 {
- compatible = "ti,c64x+timer64";
- ti,core-mask = < 0x20 >;
- reg = <0x22D0000 0x40>;
- };
-
- timer14: timer@22E0000 {
- compatible = "ti,c64x+timer64";
- ti,core-mask = < 0x40 >;
- reg = <0x22E0000 0x40>;
- };
-
- timer15: timer@22F0000 {
- compatible = "ti,c64x+timer64";
- ti,core-mask = < 0x80 >;
- reg = <0x22F0000 0x40>;
- };
-
- clock-controller@2310000 {
- compatible = "ti,c6678-pll", "ti,c64x+pll";
- reg = <0x02310000 0x200>;
- ti,c64x+pll-bypass-delay = <200>;
- ti,c64x+pll-reset-delay = <12000>;
- ti,c64x+pll-lock-delay = <80000>;
- };
-
- device-state-controller@2620000 {
- compatible = "ti,c64x+dscr";
- reg = <0x02620000 0x1000>;
-
- ti,dscr-devstat = <0x20>;
- ti,dscr-silicon-rev = <0x18 28 0xf>;
-
- ti,dscr-mac-fuse-regs = <0x110 1 2 3 4
- 0x114 5 6 0 0>;
-
- };
- };
-};
diff --git a/arch/c6x/configs/dsk6455_defconfig b/arch/c6x/configs/dsk6455_defconfig
deleted file mode 100644
index d764ea4cce7f..000000000000
--- a/arch/c6x/configs/dsk6455_defconfig
+++ /dev/null
@@ -1,42 +0,0 @@
-CONFIG_SOC_TMS320C6455=y
-# CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_SYSVIPC=y
-CONFIG_SPARSE_IRQ=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_NAMESPACES=y
-# CONFIG_UTS_NS is not set
-# CONFIG_USER_NS is not set
-# CONFIG_PID_NS is not set
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_CC_OPTIMIZE_FOR_SIZE=y
-CONFIG_EXPERT=y
-# CONFIG_FUTEX is not set
-# CONFIG_SLUB_DEBUG is not set
-CONFIG_MODULES=y
-CONFIG_MODULE_FORCE_LOAD=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE=""
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_COUNT=2
-CONFIG_BLK_DEV_RAM_SIZE=17000
-# CONFIG_INPUT is not set
-# CONFIG_SERIO is not set
-# CONFIG_VT is not set
-# CONFIG_HW_RANDOM is not set
-# CONFIG_HWMON is not set
-# CONFIG_USB_SUPPORT is not set
-# CONFIG_IOMMU_SUPPORT is not set
-# CONFIG_MISC_FILESYSTEMS is not set
-CONFIG_CRC16=y
-# CONFIG_ENABLE_MUST_CHECK is not set
-# CONFIG_SCHED_DEBUG is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
-CONFIG_MTD=y
-CONFIG_MTD_CFI=y
-CONFIG_MTD_CFI_AMDSTD=y
-CONFIG_MTD_PHYSMAP_OF=y
diff --git a/arch/c6x/configs/evmc6457_defconfig b/arch/c6x/configs/evmc6457_defconfig
deleted file mode 100644
index 05d0b4a25ab1..000000000000
--- a/arch/c6x/configs/evmc6457_defconfig
+++ /dev/null
@@ -1,39 +0,0 @@
-CONFIG_SOC_TMS320C6457=y
-# CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_SYSVIPC=y
-CONFIG_SPARSE_IRQ=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_NAMESPACES=y
-# CONFIG_UTS_NS is not set
-# CONFIG_USER_NS is not set
-# CONFIG_PID_NS is not set
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_CC_OPTIMIZE_FOR_SIZE=y
-CONFIG_EXPERT=y
-# CONFIG_FUTEX is not set
-# CONFIG_SLUB_DEBUG is not set
-CONFIG_MODULES=y
-CONFIG_MODULE_FORCE_LOAD=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE=""
-CONFIG_BOARD_EVM6457=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_COUNT=2
-CONFIG_BLK_DEV_RAM_SIZE=17000
-# CONFIG_INPUT is not set
-# CONFIG_SERIO is not set
-# CONFIG_VT is not set
-# CONFIG_HW_RANDOM is not set
-# CONFIG_HWMON is not set
-# CONFIG_USB_SUPPORT is not set
-# CONFIG_IOMMU_SUPPORT is not set
-# CONFIG_MISC_FILESYSTEMS is not set
-CONFIG_CRC16=y
-# CONFIG_ENABLE_MUST_CHECK is not set
-# CONFIG_SCHED_DEBUG is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
diff --git a/arch/c6x/configs/evmc6472_defconfig b/arch/c6x/configs/evmc6472_defconfig
deleted file mode 100644
index 8d81fcf86b0e..000000000000
--- a/arch/c6x/configs/evmc6472_defconfig
+++ /dev/null
@@ -1,40 +0,0 @@
-CONFIG_SOC_TMS320C6472=y
-# CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_SYSVIPC=y
-CONFIG_SPARSE_IRQ=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_NAMESPACES=y
-# CONFIG_UTS_NS is not set
-# CONFIG_USER_NS is not set
-# CONFIG_PID_NS is not set
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_CC_OPTIMIZE_FOR_SIZE=y
-CONFIG_EXPERT=y
-# CONFIG_FUTEX is not set
-# CONFIG_SLUB_DEBUG is not set
-CONFIG_MODULES=y
-CONFIG_MODULE_FORCE_LOAD=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE=""
-# CONFIG_CMDLINE_FORCE is not set
-CONFIG_BOARD_EVM6472=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_COUNT=2
-CONFIG_BLK_DEV_RAM_SIZE=17000
-# CONFIG_INPUT is not set
-# CONFIG_SERIO is not set
-# CONFIG_VT is not set
-# CONFIG_HW_RANDOM is not set
-# CONFIG_HWMON is not set
-# CONFIG_USB_SUPPORT is not set
-# CONFIG_IOMMU_SUPPORT is not set
-# CONFIG_MISC_FILESYSTEMS is not set
-CONFIG_CRC16=y
-# CONFIG_ENABLE_MUST_CHECK is not set
-# CONFIG_SCHED_DEBUG is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
diff --git a/arch/c6x/configs/evmc6474_defconfig b/arch/c6x/configs/evmc6474_defconfig
deleted file mode 100644
index 8156a98f3958..000000000000
--- a/arch/c6x/configs/evmc6474_defconfig
+++ /dev/null
@@ -1,40 +0,0 @@
-CONFIG_SOC_TMS320C6474=y
-# CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_SYSVIPC=y
-CONFIG_SPARSE_IRQ=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_NAMESPACES=y
-# CONFIG_UTS_NS is not set
-# CONFIG_USER_NS is not set
-# CONFIG_PID_NS is not set
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_CC_OPTIMIZE_FOR_SIZE=y
-CONFIG_EXPERT=y
-# CONFIG_FUTEX is not set
-# CONFIG_SLUB_DEBUG is not set
-CONFIG_MODULES=y
-CONFIG_MODULE_FORCE_LOAD=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE=""
-# CONFIG_CMDLINE_FORCE is not set
-CONFIG_BOARD_EVM6474=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_COUNT=2
-CONFIG_BLK_DEV_RAM_SIZE=17000
-# CONFIG_INPUT is not set
-# CONFIG_SERIO is not set
-# CONFIG_VT is not set
-# CONFIG_HW_RANDOM is not set
-# CONFIG_HWMON is not set
-# CONFIG_USB_SUPPORT is not set
-# CONFIG_IOMMU_SUPPORT is not set
-# CONFIG_MISC_FILESYSTEMS is not set
-CONFIG_CRC16=y
-# CONFIG_ENABLE_MUST_CHECK is not set
-# CONFIG_SCHED_DEBUG is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
diff --git a/arch/c6x/configs/evmc6678_defconfig b/arch/c6x/configs/evmc6678_defconfig
deleted file mode 100644
index c4f433c25b69..000000000000
--- a/arch/c6x/configs/evmc6678_defconfig
+++ /dev/null
@@ -1,40 +0,0 @@
-CONFIG_SOC_TMS320C6678=y
-# CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_SYSVIPC=y
-CONFIG_SPARSE_IRQ=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_NAMESPACES=y
-# CONFIG_UTS_NS is not set
-# CONFIG_USER_NS is not set
-# CONFIG_PID_NS is not set
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_CC_OPTIMIZE_FOR_SIZE=y
-CONFIG_EXPERT=y
-# CONFIG_FUTEX is not set
-# CONFIG_SLUB_DEBUG is not set
-CONFIG_MODULES=y
-CONFIG_MODULE_FORCE_LOAD=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE=""
-# CONFIG_CMDLINE_FORCE is not set
-CONFIG_BOARD_EVM6678=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_COUNT=2
-CONFIG_BLK_DEV_RAM_SIZE=17000
-# CONFIG_INPUT is not set
-# CONFIG_SERIO is not set
-# CONFIG_VT is not set
-# CONFIG_HW_RANDOM is not set
-# CONFIG_HWMON is not set
-# CONFIG_USB_SUPPORT is not set
-# CONFIG_IOMMU_SUPPORT is not set
-# CONFIG_MISC_FILESYSTEMS is not set
-CONFIG_CRC16=y
-# CONFIG_ENABLE_MUST_CHECK is not set
-# CONFIG_SCHED_DEBUG is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
diff --git a/arch/c6x/include/asm/Kbuild b/arch/c6x/include/asm/Kbuild
deleted file mode 100644
index a4ef93a1f7ae..000000000000
--- a/arch/c6x/include/asm/Kbuild
+++ /dev/null
@@ -1,5 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-generic-y += extable.h
-generic-y += kvm_para.h
-generic-y += mcs_spinlock.h
-generic-y += user.h
diff --git a/arch/c6x/include/asm/asm-offsets.h b/arch/c6x/include/asm/asm-offsets.h
deleted file mode 100644
index d370ee36a182..000000000000
--- a/arch/c6x/include/asm/asm-offsets.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <generated/asm-offsets.h>
diff --git a/arch/c6x/include/asm/bitops.h b/arch/c6x/include/asm/bitops.h
deleted file mode 100644
index 50e618f38a11..000000000000
--- a/arch/c6x/include/asm/bitops.h
+++ /dev/null
@@ -1,95 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Port on Texas Instruments TMS320C6x architecture
- *
- * Copyright (C) 2004, 2009, 2010 Texas Instruments Incorporated
- * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
- */
-#ifndef _ASM_C6X_BITOPS_H
-#define _ASM_C6X_BITOPS_H
-
-#ifdef __KERNEL__
-
-#include <linux/bitops.h>
-#include <asm/byteorder.h>
-#include <asm/barrier.h>
-
-/*
- * We are lucky, DSP is perfect for bitops: do it in 3 cycles
- */
-
-/**
- * __ffs - find first bit in word.
- * @word: The word to search
- *
- * Undefined if no bit exists, so code should check against 0 first.
- * Note __ffs(0) = undef, __ffs(1) = 0, __ffs(0x80000000) = 31.
- *
- */
-static inline unsigned long __ffs(unsigned long x)
-{
- asm (" bitr .M1 %0,%0\n"
- " nop\n"
- " lmbd .L1 1,%0,%0\n"
- : "+a"(x));
-
- return x;
-}
-
-/*
- * ffz - find first zero in word.
- * @word: The word to search
- *
- * Undefined if no zero exists, so code should check against ~0UL first.
- */
-#define ffz(x) __ffs(~(x))
-
-/**
- * fls - find last (most-significant) bit set
- * @x: the word to search
- *
- * This is defined the same way as ffs.
- * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
- */
-static inline int fls(unsigned int x)
-{
- if (!x)
- return 0;
-
- asm (" lmbd .L1 1,%0,%0\n" : "+a"(x));
-
- return 32 - x;
-}
-
-/**
- * ffs - find first bit set
- * @x: the word to search
- *
- * This is defined the same way as
- * the libc and compiler builtin ffs routines, therefore
- * differs in spirit from the above ffz (man ffs).
- * Note ffs(0) = 0, ffs(1) = 1, ffs(0x80000000) = 32.
- */
-static inline int ffs(int x)
-{
- if (!x)
- return 0;
-
- return __ffs(x) + 1;
-}
-
-#include <asm-generic/bitops/__fls.h>
-#include <asm-generic/bitops/fls64.h>
-#include <asm-generic/bitops/find.h>
-
-#include <asm-generic/bitops/sched.h>
-#include <asm-generic/bitops/hweight.h>
-#include <asm-generic/bitops/lock.h>
-
-#include <asm-generic/bitops/atomic.h>
-#include <asm-generic/bitops/non-atomic.h>
-#include <asm-generic/bitops/le.h>
-#include <asm-generic/bitops/ext2-atomic.h>
-
-#endif /* __KERNEL__ */
-#endif /* _ASM_C6X_BITOPS_H */
diff --git a/arch/c6x/include/asm/bug.h b/arch/c6x/include/asm/bug.h
deleted file mode 100644
index 1a68676256ee..000000000000
--- a/arch/c6x/include/asm/bug.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Port on Texas Instruments TMS320C6x architecture
- *
- * Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated
- * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
- */
-#ifndef _ASM_C6X_BUG_H
-#define _ASM_C6X_BUG_H
-
-#include <linux/linkage.h>
-#include <asm-generic/bug.h>
-
-struct pt_regs;
-
-extern void die(char *str, struct pt_regs *fp, int nr);
-extern asmlinkage int process_exception(struct pt_regs *regs);
-extern asmlinkage void enable_exception(void);
-
-#endif /* _ASM_C6X_BUG_H */
diff --git a/arch/c6x/include/asm/cache.h b/arch/c6x/include/asm/cache.h
deleted file mode 100644
index 0fa8bf77c954..000000000000
--- a/arch/c6x/include/asm/cache.h
+++ /dev/null
@@ -1,94 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Port on Texas Instruments TMS320C6x architecture
- *
- * Copyright (C) 2005, 2006, 2009, 2010, 2012 Texas Instruments Incorporated
- * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
- */
-#ifndef _ASM_C6X_CACHE_H
-#define _ASM_C6X_CACHE_H
-
-#include <linux/irqflags.h>
-#include <linux/init.h>
-
-/*
- * Cache line size
- */
-#define L1D_CACHE_SHIFT 6
-#define L1D_CACHE_BYTES (1 << L1D_CACHE_SHIFT)
-
-#define L1P_CACHE_SHIFT 5
-#define L1P_CACHE_BYTES (1 << L1P_CACHE_SHIFT)
-
-#define L2_CACHE_SHIFT 7
-#define L2_CACHE_BYTES (1 << L2_CACHE_SHIFT)
-
-/*
- * L2 used as cache
- */
-#define L2MODE_SIZE L2MODE_256K_CACHE
-
-/*
- * For practical reasons the L1_CACHE_BYTES defines should not be smaller than
- * the L2 line size
- */
-#define L1_CACHE_SHIFT L2_CACHE_SHIFT
-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
-
-#define L2_CACHE_ALIGN_LOW(x) \
- (((x) & ~(L2_CACHE_BYTES - 1)))
-#define L2_CACHE_ALIGN_UP(x) \
- (((x) + (L2_CACHE_BYTES - 1)) & ~(L2_CACHE_BYTES - 1))
-#define L2_CACHE_ALIGN_CNT(x) \
- (((x) + (sizeof(int) - 1)) & ~(sizeof(int) - 1))
-
-#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
-#define ARCH_SLAB_MINALIGN L1_CACHE_BYTES
-
-/*
- * This is the granularity of hardware cacheability control.
- */
-#define CACHEABILITY_ALIGN 0x01000000
-
-/*
- * Align a physical address to MAR regions
- */
-#define CACHE_REGION_START(v) \
- (((u32) (v)) & ~(CACHEABILITY_ALIGN - 1))
-#define CACHE_REGION_END(v) \
- (((u32) (v) + (CACHEABILITY_ALIGN - 1)) & ~(CACHEABILITY_ALIGN - 1))
-
-extern void __init c6x_cache_init(void);
-
-extern void enable_caching(unsigned long start, unsigned long end);
-extern void disable_caching(unsigned long start, unsigned long end);
-
-extern void L1_cache_off(void);
-extern void L1_cache_on(void);
-
-extern void L1P_cache_global_invalidate(void);
-extern void L1D_cache_global_invalidate(void);
-extern void L1D_cache_global_writeback(void);
-extern void L1D_cache_global_writeback_invalidate(void);
-extern void L2_cache_set_mode(unsigned int mode);
-extern void L2_cache_global_writeback_invalidate(void);
-extern void L2_cache_global_writeback(void);
-
-extern void L1P_cache_block_invalidate(unsigned int start, unsigned int end);
-extern void L1D_cache_block_invalidate(unsigned int start, unsigned int end);
-extern void L1D_cache_block_writeback_invalidate(unsigned int start,
- unsigned int end);
-extern void L1D_cache_block_writeback(unsigned int start, unsigned int end);
-extern void L2_cache_block_invalidate(unsigned int start, unsigned int end);
-extern void L2_cache_block_writeback(unsigned int start, unsigned int end);
-extern void L2_cache_block_writeback_invalidate(unsigned int start,
- unsigned int end);
-extern void L2_cache_block_invalidate_nowait(unsigned int start,
- unsigned int end);
-extern void L2_cache_block_writeback_nowait(unsigned int start,
- unsigned int end);
-
-extern void L2_cache_block_writeback_invalidate_nowait(unsigned int start,
- unsigned int end);
-
-#endif /* _ASM_C6X_CACHE_H */
diff --git a/arch/c6x/include/asm/cacheflush.h b/arch/c6x/include/asm/cacheflush.h
deleted file mode 100644
index 10922d528de6..000000000000
--- a/arch/c6x/include/asm/cacheflush.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Port on Texas Instruments TMS320C6x architecture
- *
- * Copyright (C) 2004, 2009, 2010 Texas Instruments Incorporated
- * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
- */
-#ifndef _ASM_C6X_CACHEFLUSH_H
-#define _ASM_C6X_CACHEFLUSH_H
-
-#include <linux/spinlock.h>
-
-#include <asm/setup.h>
-#include <asm/cache.h>
-#include <asm/mman.h>
-#include <asm/page.h>
-#include <asm/string.h>
-
-/*
- * physically-indexed cache management
- */
-#define flush_icache_range(s, e) \
-do { \
- L1D_cache_block_writeback((s), (e)); \
- L1P_cache_block_invalidate((s), (e)); \
-} while (0)
-
-#define flush_icache_page(vma, page) \
-do { \
- if ((vma)->vm_flags & PROT_EXEC) \
- L1D_cache_block_writeback_invalidate(page_address(page), \
- (unsigned long) page_address(page) + PAGE_SIZE)); \
- L1P_cache_block_invalidate(page_address(page), \
- (unsigned long) page_address(page) + PAGE_SIZE)); \
-} while (0)
-
-#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
-do { \
- memcpy(dst, src, len); \
- flush_icache_range((unsigned) (dst), (unsigned) (dst) + (len)); \
-} while (0)
-
-#include <asm-generic/cacheflush.h>
-
-#endif /* _ASM_C6X_CACHEFLUSH_H */
diff --git a/arch/c6x/include/asm/checksum.h b/arch/c6x/include/asm/checksum.h
deleted file mode 100644
index 934918def632..000000000000
--- a/arch/c6x/include/asm/checksum.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2011 Texas Instruments Incorporated
- * Author: Mark Salter <msalter@redhat.com>
- */
-#ifndef _ASM_C6X_CHECKSUM_H
-#define _ASM_C6X_CHECKSUM_H
-
-static inline __wsum
-csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len,
- __u8 proto, __wsum sum)
-{
- unsigned long long tmp;
-
- asm ("add .d1 %1,%5,%1\n"
- "|| addu .l1 %3,%4,%0\n"
- "addu .l1 %2,%0,%0\n"
-#ifndef CONFIG_CPU_BIG_ENDIAN
- "|| shl .s1 %1,8,%1\n"
-#endif
- "addu .l1 %1,%0,%0\n"
- "add .l1 %P0,%p0,%2\n"
- : "=&a"(tmp), "+a"(len), "+a"(sum)
- : "a" (saddr), "a" (daddr), "a" (proto));
- return sum;
-}
-#define csum_tcpudp_nofold csum_tcpudp_nofold
-
-#define _HAVE_ARCH_CSUM_AND_COPY
-extern __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len);
-
-#include <asm-generic/checksum.h>
-
-#endif /* _ASM_C6X_CHECKSUM_H */
diff --git a/arch/c6x/include/asm/clock.h b/arch/c6x/include/asm/clock.h
deleted file mode 100644
index 7b6c42a52ec9..000000000000
--- a/arch/c6x/include/asm/clock.h
+++ /dev/null
@@ -1,145 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * TI C64X clock definitions
- *
- * Copyright (C) 2010, 2011 Texas Instruments.
- * Contributed by: Mark Salter <msalter@redhat.com>
- *
- * Copied heavily from arm/mach-davinci/clock.h, so:
- *
- * Copyright (C) 2006-2007 Texas Instruments.
- * Copyright (C) 2008-2009 Deep Root Systems, LLC
- */
-
-#ifndef _ASM_C6X_CLOCK_H
-#define _ASM_C6X_CLOCK_H
-
-#ifndef __ASSEMBLER__
-
-#include <linux/list.h>
-
-/* PLL/Reset register offsets */
-#define PLLCTL 0x100
-#define PLLM 0x110
-#define PLLPRE 0x114
-#define PLLDIV1 0x118
-#define PLLDIV2 0x11c
-#define PLLDIV3 0x120
-#define PLLPOST 0x128
-#define PLLCMD 0x138
-#define PLLSTAT 0x13c
-#define PLLALNCTL 0x140
-#define PLLDCHANGE 0x144
-#define PLLCKEN 0x148
-#define PLLCKSTAT 0x14c
-#define PLLSYSTAT 0x150
-#define PLLDIV4 0x160
-#define PLLDIV5 0x164
-#define PLLDIV6 0x168
-#define PLLDIV7 0x16c
-#define PLLDIV8 0x170
-#define PLLDIV9 0x174
-#define PLLDIV10 0x178
-#define PLLDIV11 0x17c
-#define PLLDIV12 0x180
-#define PLLDIV13 0x184
-#define PLLDIV14 0x188
-#define PLLDIV15 0x18c
-#define PLLDIV16 0x190
-
-/* PLLM register bits */
-#define PLLM_PLLM_MASK 0xff
-#define PLLM_VAL(x) ((x) - 1)
-
-/* PREDIV register bits */
-#define PLLPREDIV_EN BIT(15)
-#define PLLPREDIV_VAL(x) ((x) - 1)
-
-/* PLLCTL register bits */
-#define PLLCTL_PLLEN BIT(0)
-#define PLLCTL_PLLPWRDN BIT(1)
-#define PLLCTL_PLLRST BIT(3)
-#define PLLCTL_PLLDIS BIT(4)
-#define PLLCTL_PLLENSRC BIT(5)
-#define PLLCTL_CLKMODE BIT(8)
-
-/* PLLCMD register bits */
-#define PLLCMD_GOSTAT BIT(0)
-
-/* PLLSTAT register bits */
-#define PLLSTAT_GOSTAT BIT(0)
-
-/* PLLDIV register bits */
-#define PLLDIV_EN BIT(15)
-#define PLLDIV_RATIO_MASK 0x1f
-#define PLLDIV_RATIO(x) ((x) - 1)
-
-struct pll_data;
-
-struct clk {
- struct list_head node;
- struct module *owner;
- const char *name;
- unsigned long rate;
- int usecount;
- u32 flags;
- struct clk *parent;
- struct list_head children; /* list of children */
- struct list_head childnode; /* parent's child list node */
- struct pll_data *pll_data;
- u32 div;
- unsigned long (*recalc) (struct clk *);
- int (*set_rate) (struct clk *clk, unsigned long rate);
- int (*round_rate) (struct clk *clk, unsigned long rate);
-};
-
-/* Clock flags: SoC-specific flags start at BIT(16) */
-#define ALWAYS_ENABLED BIT(1)
-#define CLK_PLL BIT(2) /* PLL-derived clock */
-#define PRE_PLL BIT(3) /* source is before PLL mult/div */
-#define FIXED_DIV_PLL BIT(4) /* fixed divisor from PLL */
-#define FIXED_RATE_PLL BIT(5) /* fixed output rate PLL */
-
-#define MAX_PLL_SYSCLKS 16
-
-struct pll_data {
- void __iomem *base;
- u32 num;
- u32 flags;
- u32 input_rate;
- u32 bypass_delay; /* in loops */
- u32 reset_delay; /* in loops */
- u32 lock_delay; /* in loops */
- struct clk sysclks[MAX_PLL_SYSCLKS + 1];
-};
-
-/* pll_data flag bit */
-#define PLL_HAS_PRE BIT(0)
-#define PLL_HAS_MUL BIT(1)
-#define PLL_HAS_POST BIT(2)
-
-#define CLK(dev, con, ck) \
- { \
- .dev_id = dev, \
- .con_id = con, \
- .clk = ck, \
- } \
-
-extern void c6x_clks_init(struct clk_lookup *clocks);
-extern int clk_register(struct clk *clk);
-extern void clk_unregister(struct clk *clk);
-extern void c64x_setup_clocks(void);
-
-extern struct pll_data c6x_soc_pll1;
-
-extern struct clk clkin1;
-extern struct clk c6x_core_clk;
-extern struct clk c6x_i2c_clk;
-extern struct clk c6x_watchdog_clk;
-extern struct clk c6x_mcbsp1_clk;
-extern struct clk c6x_mcbsp2_clk;
-extern struct clk c6x_mdio_clk;
-
-#endif
-
-#endif /* _ASM_C6X_CLOCK_H */
diff --git a/arch/c6x/include/asm/cmpxchg.h b/arch/c6x/include/asm/cmpxchg.h
deleted file mode 100644
index 6eed628a9e7f..000000000000
--- a/arch/c6x/include/asm/cmpxchg.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Port on Texas Instruments TMS320C6x architecture
- *
- * Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated
- * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
- */
-#ifndef _ASM_C6X_CMPXCHG_H
-#define _ASM_C6X_CMPXCHG_H
-
-#include <linux/irqflags.h>
-
-/*
- * Misc. functions
- */
-static inline unsigned int __xchg(unsigned int x, volatile void *ptr, int size)
-{
- unsigned int tmp;
- unsigned long flags;
-
- local_irq_save(flags);
-
- switch (size) {
- case 1:
- tmp = 0;
- tmp = *((unsigned char *) ptr);
- *((unsigned char *) ptr) = (unsigned char) x;
- break;
- case 2:
- tmp = 0;
- tmp = *((unsigned short *) ptr);
- *((unsigned short *) ptr) = x;
- break;
- case 4:
- tmp = 0;
- tmp = *((unsigned int *) ptr);
- *((unsigned int *) ptr) = x;
- break;
- }
- local_irq_restore(flags);
- return tmp;
-}
-
-#define xchg(ptr, x) \
- ((__typeof__(*(ptr)))__xchg((unsigned int)(x), (void *) (ptr), \
- sizeof(*(ptr))))
-
-#include <asm-generic/cmpxchg-local.h>
-
-/*
- * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
- * them available.
- */
-#define cmpxchg_local(ptr, o, n) \
- ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), \
- (unsigned long)(o), \
- (unsigned long)(n), \
- sizeof(*(ptr))))
-#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
-
-#include <asm-generic/cmpxchg.h>
-
-#endif /* _ASM_C6X_CMPXCHG_H */
diff --git a/arch/c6x/include/asm/delay.h b/arch/c6x/include/asm/delay.h
deleted file mode 100644
index 455fc713ae54..000000000000
--- a/arch/c6x/include/asm/delay.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Port on Texas Instruments TMS320C6x architecture
- *
- * Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated
- * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
- */
-#ifndef _ASM_C6X_DELAY_H
-#define _ASM_C6X_DELAY_H
-
-#include <linux/kernel.h>
-
-extern unsigned int ticks_per_ns_scaled;
-
-static inline void __delay(unsigned long loops)
-{
- uint32_t tmp;
-
- /* 6 cycles per loop */
- asm volatile (" mv .s1 %0,%1\n"
- "0: [%1] b .s1 0b\n"
- " add .l1 -6,%0,%0\n"
- " cmplt .l1 1,%0,%1\n"
- " nop 3\n"
- : "+a"(loops), "=A"(tmp));
-}
-
-static inline void _c6x_tickdelay(unsigned int x)
-{
- uint32_t cnt, endcnt;
-
- asm volatile (" mvc .s2 TSCL,%0\n"
- " add .s2x %0,%1,%2\n"
- " || mvk .l2 1,B0\n"
- "0: [B0] b .s2 0b\n"
- " mvc .s2 TSCL,%0\n"
- " sub .s2 %0,%2,%0\n"
- " cmpgt .l2 0,%0,B0\n"
- " nop 2\n"
- : "=b"(cnt), "+a"(x), "=b"(endcnt) : : "B0");
-}
-
-/* use scaled math to avoid slow division */
-#define C6X_NDELAY_SCALE 10
-
-static inline void _ndelay(unsigned int n)
-{
- _c6x_tickdelay((ticks_per_ns_scaled * n) >> C6X_NDELAY_SCALE);
-}
-
-static inline void _udelay(unsigned int n)
-{
- while (n >= 10) {
- _ndelay(10000);
- n -= 10;
- }
- while (n-- > 0)
- _ndelay(1000);
-}
-
-#define udelay(x) _udelay((unsigned int)(x))
-#define ndelay(x) _ndelay((unsigned int)(x))
-
-#endif /* _ASM_C6X_DELAY_H */
diff --git a/arch/c6x/include/asm/dscr.h b/arch/c6x/include/asm/dscr.h
deleted file mode 100644
index f6b095c3d3f5..000000000000
--- a/arch/c6x/include/asm/dscr.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2011 Texas Instruments Incorporated
- * Author: Mark Salter <msalter@redhat.com>
- */
-#ifndef _ASM_C6X_DSCR_H
-#define _ASM_C6X_DSCR_H
-
-enum dscr_devstate_t {
- DSCR_DEVSTATE_ENABLED,
- DSCR_DEVSTATE_DISABLED,
-};
-
-/*
- * Set the device state of the device with the given ID.
- *
- * Individual drivers should use this to enable or disable the
- * hardware device. The devid used to identify the device being
- * controlled should be a property in the device's tree node.
- */
-extern void dscr_set_devstate(int devid, enum dscr_devstate_t state);
-
-/*
- * Assert or de-assert an RMII reset.
- */
-extern void dscr_rmii_reset(int id, int assert);
-
-extern void dscr_probe(void);
-
-#endif /* _ASM_C6X_DSCR_H */
diff --git a/arch/c6x/include/asm/elf.h b/arch/c6x/include/asm/elf.h
deleted file mode 100644
index ca88acbf560b..000000000000
--- a/arch/c6x/include/asm/elf.h
+++ /dev/null
@@ -1,117 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Port on Texas Instruments TMS320C6x architecture
- *
- * Copyright (C) 2004, 2009, 2010 Texas Instruments Incorporated
- * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
- */
-#ifndef _ASM_C6X_ELF_H
-#define _ASM_C6X_ELF_H
-
-/*
- * ELF register definitions..
- */
-#include <asm/ptrace.h>
-
-typedef unsigned long elf_greg_t;
-typedef unsigned long elf_fpreg_t;
-
-#define ELF_NGREG 58
-#define ELF_NFPREG 1
-
-typedef elf_greg_t elf_gregset_t[ELF_NGREG];
-typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
-
-/*
- * This is used to ensure we don't load something for the wrong architecture.
- */
-#define elf_check_arch(x) ((x)->e_machine == EM_TI_C6000)
-
-#define elf_check_fdpic(x) (1)
-#define elf_check_const_displacement(x) (0)
-
-#define ELF_FDPIC_PLAT_INIT(_regs, _exec_map, _interp_map, _dynamic_addr) \
-do { \
- _regs->b4 = (_exec_map); \
- _regs->a6 = (_interp_map); \
- _regs->b6 = (_dynamic_addr); \
-} while (0)
-
-#define ELF_FDPIC_CORE_EFLAGS 0
-
-/*
- * These are used to set parameters in the core dumps.
- */
-#ifdef __LITTLE_ENDIAN__
-#define ELF_DATA ELFDATA2LSB
-#else
-#define ELF_DATA ELFDATA2MSB
-#endif
-
-#define ELF_CLASS ELFCLASS32
-#define ELF_ARCH EM_TI_C6000
-
-/* Nothing for now. Need to setup DP... */
-#define ELF_PLAT_INIT(_r)
-
-#define ELF_EXEC_PAGESIZE 4096
-
-#define ELF_CORE_COPY_REGS(_dest, _regs) \
- memcpy((char *) &_dest, (char *) _regs, \
- sizeof(struct pt_regs));
-
-/* This yields a mask that user programs can use to figure out what
- instruction set this cpu supports. */
-
-#define ELF_HWCAP (0)
-
-/* This yields a string that ld.so will use to load implementation
- specific libraries for optimization. This is more specific in
- intent than poking at uname or /proc/cpuinfo. */
-
-#define ELF_PLATFORM (NULL)
-
-/* C6X specific section types */
-#define SHT_C6000_UNWIND 0x70000001
-#define SHT_C6000_PREEMPTMAP 0x70000002
-#define SHT_C6000_ATTRIBUTES 0x70000003
-
-/* C6X specific DT_ tags */
-#define DT_C6000_DSBT_BASE 0x70000000
-#define DT_C6000_DSBT_SIZE 0x70000001
-#define DT_C6000_PREEMPTMAP 0x70000002
-#define DT_C6000_DSBT_INDEX 0x70000003
-
-/* C6X specific relocs */
-#define R_C6000_NONE 0
-#define R_C6000_ABS32 1
-#define R_C6000_ABS16 2
-#define R_C6000_ABS8 3
-#define R_C6000_PCR_S21 4
-#define R_C6000_PCR_S12 5
-#define R_C6000_PCR_S10 6
-#define R_C6000_PCR_S7 7
-#define R_C6000_ABS_S16 8
-#define R_C6000_ABS_L16 9
-#define R_C6000_ABS_H16 10
-#define R_C6000_SBR_U15_B 11
-#define R_C6000_SBR_U15_H 12
-#define R_C6000_SBR_U15_W 13
-#define R_C6000_SBR_S16 14
-#define R_C6000_SBR_L16_B 15
-#define R_C6000_SBR_L16_H 16
-#define R_C6000_SBR_L16_W 17
-#define R_C6000_SBR_H16_B 18
-#define R_C6000_SBR_H16_H 19
-#define R_C6000_SBR_H16_W 20
-#define R_C6000_SBR_GOT_U15_W 21
-#define R_C6000_SBR_GOT_L16_W 22
-#define R_C6000_SBR_GOT_H16_W 23
-#define R_C6000_DSBT_INDEX 24
-#define R_C6000_PREL31 25
-#define R_C6000_COPY 26
-#define R_C6000_ALIGN 253
-#define R_C6000_FPHEAD 254
-#define R_C6000_NOCMP 255
-
-#endif /*_ASM_C6X_ELF_H */
diff --git a/arch/c6x/include/asm/flat.h b/arch/c6x/include/asm/flat.h
deleted file mode 100644
index 9e6544b51386..000000000000
--- a/arch/c6x/include/asm/flat.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_C6X_FLAT_H
-#define __ASM_C6X_FLAT_H
-
-#include <asm/unaligned.h>
-
-static inline int flat_get_addr_from_rp(u32 __user *rp, u32 relval, u32 flags,
- u32 *addr)
-{
- *addr = get_unaligned((__force u32 *)rp);
- return 0;
-}
-static inline int flat_put_addr_at_rp(u32 __user *rp, u32 addr, u32 rel)
-{
- put_unaligned(addr, (__force u32 *)rp);
- return 0;
-}
-
-#endif /* __ASM_C6X_FLAT_H */
diff --git a/arch/c6x/include/asm/ftrace.h b/arch/c6x/include/asm/ftrace.h
deleted file mode 100644
index 3701958d3d1c..000000000000
--- a/arch/c6x/include/asm/ftrace.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_C6X_FTRACE_H
-#define _ASM_C6X_FTRACE_H
-
-/* empty */
-
-#endif /* _ASM_C6X_FTRACE_H */
diff --git a/arch/c6x/include/asm/hardirq.h b/arch/c6x/include/asm/hardirq.h
deleted file mode 100644
index f37d07d31040..000000000000
--- a/arch/c6x/include/asm/hardirq.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Port on Texas Instruments TMS320C6x architecture
- *
- * Copyright (C) 2004, 2009, 2010 Texas Instruments Incorporated
- * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
- */
-
-#ifndef _ASM_C6X_HARDIRQ_H
-#define _ASM_C6X_HARDIRQ_H
-
-extern void ack_bad_irq(int irq);
-#define ack_bad_irq ack_bad_irq
-
-#include <asm-generic/hardirq.h>
-
-#endif /* _ASM_C6X_HARDIRQ_H */
diff --git a/arch/c6x/include/asm/irq.h b/arch/c6x/include/asm/irq.h
deleted file mode 100644
index 9da4d1afd0d7..000000000000
--- a/arch/c6x/include/asm/irq.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Port on Texas Instruments TMS320C6x architecture
- *
- * Copyright (C) 2004, 2006, 2009, 2010, 2011 Texas Instruments Incorporated
- * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
- *
- * Large parts taken directly from powerpc.
- */
-#ifndef _ASM_C6X_IRQ_H
-#define _ASM_C6X_IRQ_H
-
-#include <linux/irqdomain.h>
-#include <linux/threads.h>
-#include <linux/list.h>
-#include <linux/radix-tree.h>
-#include <asm/percpu.h>
-
-#define irq_canonicalize(irq) (irq)
-
-/*
- * The C64X+ core has 16 IRQ vectors. One each is used by Reset and NMI. Two
- * are reserved. The remaining 12 vectors are used to route SoC interrupts.
- * These interrupt vectors are prioritized with IRQ 4 having the highest
- * priority and IRQ 15 having the lowest.
- *
- * The C64x+ megamodule provides a PIC which combines SoC IRQ sources into a
- * single core IRQ vector. There are four combined sources, each of which
- * feed into one of the 12 general interrupt vectors. The remaining 8 vectors
- * can each route a single SoC interrupt directly.
- */
-#define NR_PRIORITY_IRQS 16
-
-/* Total number of virq in the platform */
-#define NR_IRQS 256
-
-/* This number is used when no interrupt has been assigned */
-#define NO_IRQ 0
-
-extern void __init init_pic_c64xplus(void);
-
-extern void init_IRQ(void);
-
-struct pt_regs;
-
-extern asmlinkage void c6x_do_IRQ(unsigned int prio, struct pt_regs *regs);
-
-extern unsigned long irq_err_count;
-
-#endif /* _ASM_C6X_IRQ_H */
diff --git a/arch/c6x/include/asm/irqflags.h b/arch/c6x/include/asm/irqflags.h
deleted file mode 100644
index d6cd71c02629..000000000000
--- a/arch/c6x/include/asm/irqflags.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * C6X IRQ flag handling
- *
- * Copyright (C) 2010 Texas Instruments Incorporated
- * Written by Mark Salter (msalter@redhat.com)
- */
-
-#ifndef _ASM_IRQFLAGS_H
-#define _ASM_IRQFLAGS_H
-
-#ifndef __ASSEMBLY__
-
-/* read interrupt enabled status */
-static inline unsigned long arch_local_save_flags(void)
-{
- unsigned long flags;
-
- asm volatile (" mvc .s2 CSR,%0\n" : "=b"(flags));
- return flags;
-}
-
-/* set interrupt enabled status */
-static inline void arch_local_irq_restore(unsigned long flags)
-{
- asm volatile (" mvc .s2 %0,CSR\n" : : "b"(flags) : "memory");
-}
-
-/* unconditionally enable interrupts */
-static inline void arch_local_irq_enable(void)
-{
- unsigned long flags = arch_local_save_flags();
- flags |= 1;
- arch_local_irq_restore(flags);
-}
-
-/* unconditionally disable interrupts */
-static inline void arch_local_irq_disable(void)
-{
- unsigned long flags = arch_local_save_flags();
- flags &= ~1;
- arch_local_irq_restore(flags);
-}
-
-/* get status and disable interrupts */
-static inline unsigned long arch_local_irq_save(void)
-{
- unsigned long flags;
-
- flags = arch_local_save_flags();
- arch_local_irq_restore(flags & ~1);
- return flags;
-}
-
-/* test flags */
-static inline int arch_irqs_disabled_flags(unsigned long flags)
-{
- return (flags & 1) == 0;
-}
-
-/* test hardware interrupt enable bit */
-static inline int arch_irqs_disabled(void)
-{
- return arch_irqs_disabled_flags(arch_local_save_flags());
-}
-
-#endif /* __ASSEMBLY__ */
-#endif /* __ASM_IRQFLAGS_H */
diff --git a/arch/c6x/include/asm/linkage.h b/arch/c6x/include/asm/linkage.h
deleted file mode 100644
index 1ad615da6479..000000000000
--- a/arch/c6x/include/asm/linkage.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_C6X_LINKAGE_H
-#define _ASM_C6X_LINKAGE_H
-
-#ifdef __ASSEMBLER__
-
-#define __ALIGN .align 2
-#define __ALIGN_STR ".align 2"
-
-#ifndef __DSBT__
-#define ENTRY(name) \
- .global name @ \
- __ALIGN @ \
-name:
-#else
-#define ENTRY(name) \
- .global name @ \
- .hidden name @ \
- __ALIGN @ \
-name:
-#endif
-
-#define ENDPROC(name) \
- .type name, @function @ \
- .size name, . - name
-
-#endif
-
-#include <asm-generic/linkage.h>
-
-#endif /* _ASM_C6X_LINKAGE_H */
diff --git a/arch/c6x/include/asm/megamod-pic.h b/arch/c6x/include/asm/megamod-pic.h
deleted file mode 100644
index a0a6d596bf9b..000000000000
--- a/arch/c6x/include/asm/megamod-pic.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _C6X_MEGAMOD_PIC_H
-#define _C6X_MEGAMOD_PIC_H
-
-#ifdef __KERNEL__
-
-extern void __init megamod_pic_init(void);
-
-#endif /* __KERNEL__ */
-#endif /* _C6X_MEGAMOD_PIC_H */
diff --git a/arch/c6x/include/asm/mmu_context.h b/arch/c6x/include/asm/mmu_context.h
deleted file mode 100644
index d2659d0a3297..000000000000
--- a/arch/c6x/include/asm/mmu_context.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_C6X_MMU_CONTEXT_H
-#define _ASM_C6X_MMU_CONTEXT_H
-
-#include <asm-generic/nommu_context.h>
-
-#endif /* _ASM_C6X_MMU_CONTEXT_H */
diff --git a/arch/c6x/include/asm/module.h b/arch/c6x/include/asm/module.h
deleted file mode 100644
index 9fc9f4a8ecc2..000000000000
--- a/arch/c6x/include/asm/module.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Port on Texas Instruments TMS320C6x architecture
- *
- * Copyright (C) 2004, 2009, 2010 Texas Instruments Incorporated
- * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
- *
- * Updated for 2.6.34 by: Mark Salter (msalter@redhat.com)
- */
-#ifndef _ASM_C6X_MODULE_H
-#define _ASM_C6X_MODULE_H
-
-#include <asm-generic/module.h>
-
-struct loaded_sections {
- unsigned int new_vaddr;
- unsigned int loaded;
-};
-
-#endif /* _ASM_C6X_MODULE_H */
diff --git a/arch/c6x/include/asm/page.h b/arch/c6x/include/asm/page.h
deleted file mode 100644
index 40079899084d..000000000000
--- a/arch/c6x/include/asm/page.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_C6X_PAGE_H
-#define _ASM_C6X_PAGE_H
-
-#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_TSK_EXEC
-
-#include <asm-generic/page.h>
-
-#endif /* _ASM_C6X_PAGE_H */
diff --git a/arch/c6x/include/asm/pgtable.h b/arch/c6x/include/asm/pgtable.h
deleted file mode 100644
index 8a91ceda39fa..000000000000
--- a/arch/c6x/include/asm/pgtable.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Port on Texas Instruments TMS320C6x architecture
- *
- * Copyright (C) 2004, 2009, 2010 Texas Instruments Incorporated
- * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
- */
-#ifndef _ASM_C6X_PGTABLE_H
-#define _ASM_C6X_PGTABLE_H
-
-#include <asm-generic/pgtable-nopud.h>
-
-#include <asm/setup.h>
-#include <asm/page.h>
-
-/*
- * All 32bit addresses are effectively valid for vmalloc...
- * Sort of meaningless for non-VM targets.
- */
-#define VMALLOC_START 0
-#define VMALLOC_END 0xffffffff
-
-#define pgd_present(pgd) (1)
-#define pgd_none(pgd) (0)
-#define pgd_bad(pgd) (0)
-#define pgd_clear(pgdp)
-#define kern_addr_valid(addr) (1)
-
-#define pmd_none(x) (!pmd_val(x))
-#define pmd_present(x) (pmd_val(x))
-#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
-#define pmd_bad(x) (pmd_val(x) & ~PAGE_MASK)
-
-#define PAGE_NONE __pgprot(0) /* these mean nothing to NO_MM */
-#define PAGE_SHARED __pgprot(0) /* these mean nothing to NO_MM */
-#define PAGE_COPY __pgprot(0) /* these mean nothing to NO_MM */
-#define PAGE_READONLY __pgprot(0) /* these mean nothing to NO_MM */
-#define PAGE_KERNEL __pgprot(0) /* these mean nothing to NO_MM */
-#define pgprot_noncached(prot) (prot)
-
-extern void paging_init(void);
-
-#define __swp_type(x) (0)
-#define __swp_offset(x) (0)
-#define __swp_entry(typ, off) ((swp_entry_t) { ((typ) | ((off) << 7)) })
-#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
-#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
-
-#define set_pte(pteptr, pteval) (*(pteptr) = pteval)
-#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
-
-/*
- * ZERO_PAGE is a global shared page that is always zero: used
- * for zero-mapped memory areas etc..
- */
-#define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
-extern unsigned long empty_zero_page;
-
-#define swapper_pg_dir ((pgd_t *) 0)
-
-/*
- * c6x is !MMU, so define the simpliest implementation
- */
-#define pgprot_writecombine pgprot_noncached
-
-#endif /* _ASM_C6X_PGTABLE_H */
diff --git a/arch/c6x/include/asm/processor.h b/arch/c6x/include/asm/processor.h
deleted file mode 100644
index 1456f5e11de3..000000000000
--- a/arch/c6x/include/asm/processor.h
+++ /dev/null
@@ -1,114 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Port on Texas Instruments TMS320C6x architecture
- *
- * Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated
- * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
- *
- * Updated for 2.6.34: Mark Salter <msalter@redhat.com>
- */
-#ifndef _ASM_C6X_PROCESSOR_H
-#define _ASM_C6X_PROCESSOR_H
-
-#include <asm/ptrace.h>
-#include <asm/page.h>
-#include <asm/current.h>
-
-/*
- * User space process size. This is mostly meaningless for NOMMU
- * but some C6X processors may have RAM addresses up to 0xFFFFFFFF.
- * Since calls like mmap() can return an address or an error, we
- * have to allow room for error returns when code does something
- * like:
- *
- * addr = do_mmap(...)
- * if ((unsigned long)addr >= TASK_SIZE)
- * ... its an error code, not an address ...
- *
- * Here, we allow for 4096 error codes which means we really can't
- * use the last 4K page on systems with RAM extending all the way
- * to the end of the 32-bit address space.
- */
-#define TASK_SIZE 0xFFFFF000
-
-/*
- * This decides where the kernel will search for a free chunk of vm
- * space during mmap's. We won't be using it
- */
-#define TASK_UNMAPPED_BASE 0
-
-struct thread_struct {
- unsigned long long b15_14;
- unsigned long long a15_14;
- unsigned long long b13_12;
- unsigned long long a13_12;
- unsigned long long b11_10;
- unsigned long long a11_10;
- unsigned long long ricl_icl;
- unsigned long usp; /* user stack pointer */
- unsigned long pc; /* kernel pc */
- unsigned long wchan;
-};
-
-#define INIT_THREAD \
-{ \
- .usp = 0, \
- .wchan = 0, \
-}
-
-#define INIT_MMAP { \
- &init_mm, 0, 0, NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1, \
- NULL, NULL }
-
-#define task_pt_regs(task) \
- ((struct pt_regs *)(THREAD_START_SP + task_stack_page(task)) - 1)
-
-#define alloc_kernel_stack() __get_free_page(GFP_KERNEL)
-#define free_kernel_stack(page) free_page((page))
-
-
-/* Forward declaration, a strange C thing */
-struct task_struct;
-
-extern void start_thread(struct pt_regs *regs, unsigned int pc,
- unsigned long usp);
-
-/* Free all resources held by a thread. */
-static inline void release_thread(struct task_struct *dead_task)
-{
-}
-
-/*
- * saved kernel SP and DP of a blocked thread.
- */
-#ifdef _BIG_ENDIAN
-#define thread_saved_ksp(tsk) \
- (*(unsigned long *)&(tsk)->thread.b15_14)
-#define thread_saved_dp(tsk) \
- (*(((unsigned long *)&(tsk)->thread.b15_14) + 1))
-#else
-#define thread_saved_ksp(tsk) \
- (*(((unsigned long *)&(tsk)->thread.b15_14) + 1))
-#define thread_saved_dp(tsk) \
- (*(unsigned long *)&(tsk)->thread.b15_14)
-#endif
-
-extern unsigned long get_wchan(struct task_struct *p);
-
-#define KSTK_EIP(task) (task_pt_regs(task)->pc)
-#define KSTK_ESP(task) (task_pt_regs(task)->sp)
-
-#define cpu_relax() do { } while (0)
-
-extern const struct seq_operations cpuinfo_op;
-
-/* Reset the board */
-#define HARD_RESET_NOW()
-
-extern unsigned int c6x_core_freq;
-
-
-extern void (*c6x_restart)(void);
-extern void (*c6x_halt)(void);
-
-#endif /* ASM_C6X_PROCESSOR_H */
diff --git a/arch/c6x/include/asm/procinfo.h b/arch/c6x/include/asm/procinfo.h
deleted file mode 100644
index aaa3cb902c43..000000000000
--- a/arch/c6x/include/asm/procinfo.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2010 Texas Instruments Incorporated
- * Author: Mark Salter (msalter@redhat.com)
- */
-#ifndef _ASM_C6X_PROCINFO_H
-#define _ASM_C6X_PROCINFO_H
-
-#ifdef __KERNEL__
-
-struct proc_info_list {
- unsigned int cpu_val;
- unsigned int cpu_mask;
- const char *arch_name;
- const char *elf_name;
- unsigned int elf_hwcap;
-};
-
-#else /* __KERNEL__ */
-#include <asm/elf.h>
-#warning "Please include asm/elf.h instead"
-#endif /* __KERNEL__ */
-
-#endif /* _ASM_C6X_PROCINFO_H */
diff --git a/arch/c6x/include/asm/ptrace.h b/arch/c6x/include/asm/ptrace.h
deleted file mode 100644
index 7cbae382cf37..000000000000
--- a/arch/c6x/include/asm/ptrace.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2004, 2006, 2009, 2010 Texas Instruments Incorporated
- * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
- *
- * Updated for 2.6.34: Mark Salter <msalter@redhat.com>
- */
-#ifndef _ASM_C6X_PTRACE_H
-#define _ASM_C6X_PTRACE_H
-
-#include <uapi/asm/ptrace.h>
-
-#ifndef __ASSEMBLY__
-#ifdef _BIG_ENDIAN
-#else
-#endif
-
-#include <linux/linkage.h>
-
-#define user_mode(regs) ((((regs)->tsr) & 0x40) != 0)
-
-#define instruction_pointer(regs) ((regs)->pc)
-#define profile_pc(regs) instruction_pointer(regs)
-#define user_stack_pointer(regs) ((regs)->sp)
-
-extern void show_regs(struct pt_regs *);
-
-extern asmlinkage unsigned long syscall_trace_entry(struct pt_regs *regs);
-extern asmlinkage void syscall_trace_exit(struct pt_regs *regs);
-
-#endif /* __ASSEMBLY__ */
-#endif /* _ASM_C6X_PTRACE_H */
diff --git a/arch/c6x/include/asm/sections.h b/arch/c6x/include/asm/sections.h
deleted file mode 100644
index dc2f15eb3bde..000000000000
--- a/arch/c6x/include/asm/sections.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_C6X_SECTIONS_H
-#define _ASM_C6X_SECTIONS_H
-
-#include <asm-generic/sections.h>
-
-extern char _vectors_start[];
-extern char _vectors_end[];
-
-extern char _data_lma[];
-
-#endif /* _ASM_C6X_SECTIONS_H */
diff --git a/arch/c6x/include/asm/setup.h b/arch/c6x/include/asm/setup.h
deleted file mode 100644
index 5496bccecaa0..000000000000
--- a/arch/c6x/include/asm/setup.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Port on Texas Instruments TMS320C6x architecture
- *
- * Copyright (C) 2004, 2009, 2010 2011 Texas Instruments Incorporated
- * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
- */
-#ifndef _ASM_C6X_SETUP_H
-#define _ASM_C6X_SETUP_H
-
-#include <uapi/asm/setup.h>
-#include <linux/types.h>
-
-#ifndef __ASSEMBLY__
-extern int c6x_add_memory(phys_addr_t start, unsigned long size);
-
-extern unsigned long ram_start;
-extern unsigned long ram_end;
-
-extern int c6x_num_cores;
-extern unsigned int c6x_silicon_rev;
-extern unsigned int c6x_devstat;
-extern unsigned char c6x_fuse_mac[6];
-
-extern void machine_init(unsigned long dt_ptr);
-extern void time_init(void);
-
-extern void coherent_mem_init(u32 start, u32 size);
-
-#endif /* !__ASSEMBLY__ */
-#endif /* _ASM_C6X_SETUP_H */
diff --git a/arch/c6x/include/asm/soc.h b/arch/c6x/include/asm/soc.h
deleted file mode 100644
index 43f50159e59b..000000000000
--- a/arch/c6x/include/asm/soc.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Miscellaneous SoC-specific hooks.
- *
- * Copyright (C) 2011 Texas Instruments Incorporated
- *
- * Author: Mark Salter <msalter@redhat.com>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-#ifndef _ASM_C6X_SOC_H
-#define _ASM_C6X_SOC_H
-
-struct soc_ops {
- /* Return active exception event or -1 if none */
- int (*get_exception)(void);
-
- /* Assert an event */
- void (*assert_event)(unsigned int evt);
-};
-
-extern struct soc_ops soc_ops;
-
-extern int soc_get_exception(void);
-extern void soc_assert_event(unsigned int event);
-extern int soc_mac_addr(unsigned int index, u8 *addr);
-
-/*
- * for mmio on SoC devices. regs are always same byte order as cpu.
- */
-#define soc_readl(addr) __raw_readl(addr)
-#define soc_writel(b, addr) __raw_writel((b), (addr))
-
-#endif /* _ASM_C6X_SOC_H */
diff --git a/arch/c6x/include/asm/special_insns.h b/arch/c6x/include/asm/special_insns.h
deleted file mode 100644
index d233160aefd4..000000000000
--- a/arch/c6x/include/asm/special_insns.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Port on Texas Instruments TMS320C6x architecture
- *
- * Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated
- * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
- */
-#ifndef _ASM_C6X_SPECIAL_INSNS_H
-#define _ASM_C6X_SPECIAL_INSNS_H
-
-
-#define get_creg(reg) \
- ({ unsigned int __x; \
- asm volatile ("mvc .s2 " #reg ",%0\n" : "=b"(__x)); __x; })
-
-#define set_creg(reg, v) \
- do { unsigned int __x = (unsigned int)(v); \
- asm volatile ("mvc .s2 %0," #reg "\n" : : "b"(__x)); \
- } while (0)
-
-#define or_creg(reg, n) \
- do { unsigned __x, __n = (unsigned)(n); \
- asm volatile ("mvc .s2 " #reg ",%0\n" \
- "or .l2 %1,%0,%0\n" \
- "mvc .s2 %0," #reg "\n" \
- "nop\n" \
- : "=&b"(__x) : "b"(__n)); \
- } while (0)
-
-#define and_creg(reg, n) \
- do { unsigned __x, __n = (unsigned)(n); \
- asm volatile ("mvc .s2 " #reg ",%0\n" \
- "and .l2 %1,%0,%0\n" \
- "mvc .s2 %0," #reg "\n" \
- "nop\n" \
- : "=&b"(__x) : "b"(__n)); \
- } while (0)
-
-#define get_coreid() (get_creg(DNUM) & 0xff)
-
-/* Set/get IST */
-#define set_ist(x) set_creg(ISTP, x)
-#define get_ist() get_creg(ISTP)
-
-/*
- * Exception management
- */
-#define disable_exception()
-#define get_except_type() get_creg(EFR)
-#define ack_exception(type) set_creg(ECR, 1 << (type))
-#define get_iexcept() get_creg(IERR)
-#define set_iexcept(mask) set_creg(IERR, (mask))
-
-#define _extu(x, s, e) \
- ({ unsigned int __x; \
- asm volatile ("extu .S2 %3,%1,%2,%0\n" : \
- "=b"(__x) : "n"(s), "n"(e), "b"(x)); \
- __x; })
-
-#endif /* _ASM_C6X_SPECIAL_INSNS_H */
diff --git a/arch/c6x/include/asm/string.h b/arch/c6x/include/asm/string.h
deleted file mode 100644
index b290ead40f68..000000000000
--- a/arch/c6x/include/asm/string.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Port on Texas Instruments TMS320C6x architecture
- *
- * Copyright (C) 2004, 2009, 2011 Texas Instruments Incorporated
- * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
- */
-#ifndef _ASM_C6X_STRING_H
-#define _ASM_C6X_STRING_H
-
-#include <asm/page.h>
-#include <linux/linkage.h>
-
-asmlinkage extern void *memcpy(void *to, const void *from, size_t n);
-
-#define __HAVE_ARCH_MEMCPY
-
-#endif /* _ASM_C6X_STRING_H */
diff --git a/arch/c6x/include/asm/switch_to.h b/arch/c6x/include/asm/switch_to.h
deleted file mode 100644
index 36c5332fadae..000000000000
--- a/arch/c6x/include/asm/switch_to.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Port on Texas Instruments TMS320C6x architecture
- *
- * Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated
- * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
- */
-#ifndef _ASM_C6X_SWITCH_TO_H
-#define _ASM_C6X_SWITCH_TO_H
-
-#include <linux/linkage.h>
-
-#define prepare_to_switch() do { } while (0)
-
-struct task_struct;
-struct thread_struct;
-asmlinkage void *__switch_to(struct thread_struct *prev,
- struct thread_struct *next,
- struct task_struct *tsk);
-
-#define switch_to(prev, next, last) \
- do { \
- current->thread.wchan = (u_long) __builtin_return_address(0); \
- (last) = __switch_to(&(prev)->thread, \
- &(next)->thread, (prev)); \
- mb(); \
- current->thread.wchan = 0; \
- } while (0)
-
-#endif /* _ASM_C6X_SWITCH_TO_H */
diff --git a/arch/c6x/include/asm/syscall.h b/arch/c6x/include/asm/syscall.h
deleted file mode 100644
index 38f3e2284ecd..000000000000
--- a/arch/c6x/include/asm/syscall.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2011 Texas Instruments Incorporated
- * Author: Mark Salter <msalter@redhat.com>
- */
-
-#ifndef __ASM_C6X_SYSCALL_H
-#define __ASM_C6X_SYSCALL_H
-
-#include <uapi/linux/audit.h>
-#include <linux/err.h>
-#include <linux/sched.h>
-
-static inline int syscall_get_nr(struct task_struct *task,
- struct pt_regs *regs)
-{
- return regs->b0;
-}
-
-static inline void syscall_rollback(struct task_struct *task,
- struct pt_regs *regs)
-{
- /* do nothing */
-}
-
-static inline long syscall_get_error(struct task_struct *task,
- struct pt_regs *regs)
-{
- return IS_ERR_VALUE(regs->a4) ? regs->a4 : 0;
-}
-
-static inline long syscall_get_return_value(struct task_struct *task,
- struct pt_regs *regs)
-{
- return regs->a4;
-}
-
-static inline void syscall_set_return_value(struct task_struct *task,
- struct pt_regs *regs,
- int error, long val)
-{
- regs->a4 = error ?: val;
-}
-
-static inline void syscall_get_arguments(struct task_struct *task,
- struct pt_regs *regs,
- unsigned long *args)
-{
- *args++ = regs->a4;
- *args++ = regs->b4;
- *args++ = regs->a6;
- *args++ = regs->b6;
- *args++ = regs->a8;
- *args = regs->b8;
-}
-
-static inline void syscall_set_arguments(struct task_struct *task,
- struct pt_regs *regs,
- const unsigned long *args)
-{
- regs->a4 = *args++;
- regs->b4 = *args++;
- regs->a6 = *args++;
- regs->b6 = *args++;
- regs->a8 = *args++;
- regs->a9 = *args;
-}
-
-static inline int syscall_get_arch(struct task_struct *task)
-{
- return IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)
- ? AUDIT_ARCH_C6XBE : AUDIT_ARCH_C6X;
-}
-
-#endif /* __ASM_C6X_SYSCALLS_H */
diff --git a/arch/c6x/include/asm/syscalls.h b/arch/c6x/include/asm/syscalls.h
deleted file mode 100644
index df3d05feb153..000000000000
--- a/arch/c6x/include/asm/syscalls.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Copyright (C) 2011 Texas Instruments Incorporated
- * Author: Mark Salter <msalter@redhat.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef __ASM_C6X_SYSCALLS_H
-#define __ASM_C6X_SYSCALLS_H
-
-#include <linux/compiler.h>
-#include <linux/linkage.h>
-#include <linux/types.h>
-
-/* The array of function pointers for syscalls. */
-extern void *sys_call_table[];
-
-/* The following are trampolines in entry.S to handle 64-bit arguments */
-extern long sys_pread_c6x(unsigned int fd, char __user *buf,
- size_t count, off_t pos_low, off_t pos_high);
-extern long sys_pwrite_c6x(unsigned int fd, const char __user *buf,
- size_t count, off_t pos_low, off_t pos_high);
-extern long sys_truncate64_c6x(const char __user *path,
- off_t length_low, off_t length_high);
-extern long sys_ftruncate64_c6x(unsigned int fd,
- off_t length_low, off_t length_high);
-extern long sys_fadvise64_c6x(int fd, u32 offset_lo, u32 offset_hi,
- u32 len, int advice);
-extern long sys_fadvise64_64_c6x(int fd, u32 offset_lo, u32 offset_hi,
- u32 len_lo, u32 len_hi, int advice);
-extern long sys_fallocate_c6x(int fd, int mode,
- u32 offset_lo, u32 offset_hi,
- u32 len_lo, u32 len_hi);
-extern int sys_cache_sync(unsigned long s, unsigned long e);
-
-#include <asm-generic/syscalls.h>
-
-#endif /* __ASM_C6X_SYSCALLS_H */
diff --git a/arch/c6x/include/asm/thread_info.h b/arch/c6x/include/asm/thread_info.h
deleted file mode 100644
index dd8913d57189..000000000000
--- a/arch/c6x/include/asm/thread_info.h
+++ /dev/null
@@ -1,94 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Port on Texas Instruments TMS320C6x architecture
- *
- * Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated
- * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
- *
- * Updated for 2.6.3x: Mark Salter <msalter@redhat.com>
- */
-#ifndef _ASM_C6X_THREAD_INFO_H
-#define _ASM_C6X_THREAD_INFO_H
-
-#ifdef __KERNEL__
-
-#include <asm/page.h>
-
-#ifdef CONFIG_4KSTACKS
-#define THREAD_SIZE 4096
-#define THREAD_SHIFT 12
-#define THREAD_SIZE_ORDER 0
-#else
-#define THREAD_SIZE 8192
-#define THREAD_SHIFT 13
-#define THREAD_SIZE_ORDER 1
-#endif
-
-#define THREAD_START_SP (THREAD_SIZE - 8)
-
-#ifndef __ASSEMBLY__
-
-typedef struct {
- unsigned long seg;
-} mm_segment_t;
-
-/*
- * low level task data.
- */
-struct thread_info {
- struct task_struct *task; /* main task structure */
- unsigned long flags; /* low level flags */
- int cpu; /* cpu we're on */
- int preempt_count; /* 0 = preemptable, <0 = BUG */
- mm_segment_t addr_limit; /* thread address space */
-};
-
-/*
- * macros/functions for gaining access to the thread information structure
- *
- * preempt_count needs to be 1 initially, until the scheduler is functional.
- */
-#define INIT_THREAD_INFO(tsk) \
-{ \
- .task = &tsk, \
- .flags = 0, \
- .cpu = 0, \
- .preempt_count = INIT_PREEMPT_COUNT, \
- .addr_limit = KERNEL_DS, \
-}
-
-/* get the thread information struct of current task */
-static inline __attribute__((const))
-struct thread_info *current_thread_info(void)
-{
- struct thread_info *ti;
- asm volatile (" clr .s2 B15,0,%1,%0\n"
- : "=b" (ti)
- : "Iu5" (THREAD_SHIFT - 1));
- return ti;
-}
-
-#define get_thread_info(ti) get_task_struct((ti)->task)
-#define put_thread_info(ti) put_task_struct((ti)->task)
-#endif /* __ASSEMBLY__ */
-
-/*
- * thread information flag bit numbers
- * - pending work-to-be-done flags are in LSW
- * - other flags in MSW
- */
-#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
-#define TIF_NOTIFY_RESUME 1 /* resumption notification requested */
-#define TIF_SIGPENDING 2 /* signal pending */
-#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
-#define TIF_RESTORE_SIGMASK 4 /* restore signal mask in do_signal() */
-#define TIF_NOTIFY_SIGNAL 5 /* signal notifications exist */
-
-#define TIF_MEMDIE 17 /* OOM killer killed process */
-
-#define TIF_WORK_MASK 0x00007FFE /* work on irq/exception return */
-#define TIF_ALLWORK_MASK 0x00007FFF /* work on any return to u-space */
-
-#endif /* __KERNEL__ */
-
-#endif /* _ASM_C6X_THREAD_INFO_H */
diff --git a/arch/c6x/include/asm/timer64.h b/arch/c6x/include/asm/timer64.h
deleted file mode 100644
index b850dfef1f79..000000000000
--- a/arch/c6x/include/asm/timer64.h
+++ /dev/null
@@ -1,7 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _C6X_TIMER64_H
-#define _C6X_TIMER64_H
-
-extern void __init timer64_init(void);
-
-#endif /* _C6X_TIMER64_H */
diff --git a/arch/c6x/include/asm/timex.h b/arch/c6x/include/asm/timex.h
deleted file mode 100644
index f946ce297e13..000000000000
--- a/arch/c6x/include/asm/timex.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Port on Texas Instruments TMS320C6x architecture
- *
- * Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated
- * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
- *
- * Modified for 2.6.34: Mark Salter <msalter@redhat.com>
- */
-#ifndef _ASM_C6X_TIMEX_H
-#define _ASM_C6X_TIMEX_H
-
-#define CLOCK_TICK_RATE ((1000 * 1000000UL) / 6)
-
-/* 64-bit timestamp */
-typedef unsigned long long cycles_t;
-
-static inline cycles_t get_cycles(void)
-{
- unsigned l, h;
-
- asm volatile (" dint\n"
- " mvc .s2 TSCL,%0\n"
- " mvc .s2 TSCH,%1\n"
- " rint\n"
- : "=b"(l), "=b"(h));
- return ((cycles_t)h << 32) | l;
-}
-
-#endif /* _ASM_C6X_TIMEX_H */
diff --git a/arch/c6x/include/asm/tlb.h b/arch/c6x/include/asm/tlb.h
deleted file mode 100644
index 240ba0febb57..000000000000
--- a/arch/c6x/include/asm/tlb.h
+++ /dev/null
@@ -1,7 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_C6X_TLB_H
-#define _ASM_C6X_TLB_H
-
-#include <asm-generic/tlb.h>
-
-#endif /* _ASM_C6X_TLB_H */
diff --git a/arch/c6x/include/asm/traps.h b/arch/c6x/include/asm/traps.h
deleted file mode 100644
index 7e1d31c47680..000000000000
--- a/arch/c6x/include/asm/traps.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Port on Texas Instruments TMS320C6x architecture
- *
- * Copyright (C) 2004, 2009, 2011 Texas Instruments Incorporated
- * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
- */
-#ifndef _ASM_C6X_TRAPS_H
-#define _ASM_C6X_TRAPS_H
-
-#define EXCEPT_TYPE_NXF 31 /* NMI */
-#define EXCEPT_TYPE_EXC 30 /* external exception */
-#define EXCEPT_TYPE_IXF 1 /* internal exception */
-#define EXCEPT_TYPE_SXF 0 /* software exception */
-
-#define EXCEPT_CAUSE_LBX (1 << 7) /* loop buffer exception */
-#define EXCEPT_CAUSE_PRX (1 << 6) /* privilege exception */
-#define EXCEPT_CAUSE_RAX (1 << 5) /* resource access exception */
-#define EXCEPT_CAUSE_RCX (1 << 4) /* resource conflict exception */
-#define EXCEPT_CAUSE_OPX (1 << 3) /* opcode exception */
-#define EXCEPT_CAUSE_EPX (1 << 2) /* execute packet exception */
-#define EXCEPT_CAUSE_FPX (1 << 1) /* fetch packet exception */
-#define EXCEPT_CAUSE_IFX (1 << 0) /* instruction fetch exception */
-
-struct exception_info {
- char *kernel_str;
- int signo;
- int code;
-};
-
-extern int (*c6x_nmi_handler)(struct pt_regs *regs);
-
-#endif /* _ASM_C6X_TRAPS_H */
diff --git a/arch/c6x/include/asm/uaccess.h b/arch/c6x/include/asm/uaccess.h
deleted file mode 100644
index 585adf9201b7..000000000000
--- a/arch/c6x/include/asm/uaccess.h
+++ /dev/null
@@ -1,97 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2011 Texas Instruments Incorporated
- * Author: Mark Salter <msalter@redhat.com>
- */
-#ifndef _ASM_C6X_UACCESS_H
-#define _ASM_C6X_UACCESS_H
-
-#include <linux/types.h>
-#include <linux/compiler.h>
-#include <linux/string.h>
-
-/*
- * C6X supports unaligned 32 and 64 bit loads and stores.
- */
-static inline __must_check unsigned long
-raw_copy_from_user(void *to, const void __user *from, unsigned long n)
-{
- u32 tmp32;
- u64 tmp64;
-
- if (__builtin_constant_p(n)) {
- switch (n) {
- case 1:
- *(u8 *)to = *(u8 __force *)from;
- return 0;
- case 4:
- asm volatile ("ldnw .d1t1 *%2,%0\n"
- "nop 4\n"
- "stnw .d1t1 %0,*%1\n"
- : "=&a"(tmp32)
- : "A"(to), "a"(from)
- : "memory");
- return 0;
- case 8:
- asm volatile ("ldndw .d1t1 *%2,%0\n"
- "nop 4\n"
- "stndw .d1t1 %0,*%1\n"
- : "=&a"(tmp64)
- : "a"(to), "a"(from)
- : "memory");
- return 0;
- default:
- break;
- }
- }
-
- memcpy(to, (const void __force *)from, n);
- return 0;
-}
-
-static inline __must_check unsigned long
-raw_copy_to_user(void __user *to, const void *from, unsigned long n)
-{
- u32 tmp32;
- u64 tmp64;
-
- if (__builtin_constant_p(n)) {
- switch (n) {
- case 1:
- *(u8 __force *)to = *(u8 *)from;
- return 0;
- case 4:
- asm volatile ("ldnw .d1t1 *%2,%0\n"
- "nop 4\n"
- "stnw .d1t1 %0,*%1\n"
- : "=&a"(tmp32)
- : "a"(to), "a"(from)
- : "memory");
- return 0;
- case 8:
- asm volatile ("ldndw .d1t1 *%2,%0\n"
- "nop 4\n"
- "stndw .d1t1 %0,*%1\n"
- : "=&a"(tmp64)
- : "a"(to), "a"(from)
- : "memory");
- return 0;
- default:
- break;
- }
- }
-
- memcpy((void __force *)to, from, n);
- return 0;
-}
-#define INLINE_COPY_FROM_USER
-#define INLINE_COPY_TO_USER
-
-extern int _access_ok(unsigned long addr, unsigned long size);
-#ifdef CONFIG_ACCESS_CHECK
-#define __access_ok _access_ok
-#endif
-
-#include <asm-generic/uaccess.h>
-
-#endif /* _ASM_C6X_UACCESS_H */
diff --git a/arch/c6x/include/asm/unaligned.h b/arch/c6x/include/asm/unaligned.h
deleted file mode 100644
index d628cc170564..000000000000
--- a/arch/c6x/include/asm/unaligned.h
+++ /dev/null
@@ -1,104 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Port on Texas Instruments TMS320C6x architecture
- *
- * Copyright (C) 2004, 2009, 2010 Texas Instruments Incorporated
- * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
- * Rewritten for 2.6.3x: Mark Salter <msalter@redhat.com>
- */
-#ifndef _ASM_C6X_UNALIGNED_H
-#define _ASM_C6X_UNALIGNED_H
-
-#include <linux/swab.h>
-#include <linux/unaligned/generic.h>
-
-/*
- * The C64x+ can do unaligned word and dword accesses in hardware
- * using special load/store instructions.
- */
-
-static inline u16 get_unaligned_le16(const void *p)
-{
- const u8 *_p = p;
- return _p[0] | _p[1] << 8;
-}
-
-static inline u16 get_unaligned_be16(const void *p)
-{
- const u8 *_p = p;
- return _p[0] << 8 | _p[1];
-}
-
-static inline void put_unaligned_le16(u16 val, void *p)
-{
- u8 *_p = p;
- _p[0] = val;
- _p[1] = val >> 8;
-}
-
-static inline void put_unaligned_be16(u16 val, void *p)
-{
- u8 *_p = p;
- _p[0] = val >> 8;
- _p[1] = val;
-}
-
-static inline u32 get_unaligned32(const void *p)
-{
- u32 val = (u32) p;
- asm (" ldnw .d1t1 *%0,%0\n"
- " nop 4\n"
- : "+a"(val));
- return val;
-}
-
-static inline void put_unaligned32(u32 val, void *p)
-{
- asm volatile (" stnw .d2t1 %0,*%1\n"
- : : "a"(val), "b"(p) : "memory");
-}
-
-static inline u64 get_unaligned64(const void *p)
-{
- u64 val;
- asm volatile (" ldndw .d1t1 *%1,%0\n"
- " nop 4\n"
- : "=a"(val) : "a"(p));
- return val;
-}
-
-static inline void put_unaligned64(u64 val, const void *p)
-{
- asm volatile (" stndw .d2t1 %0,*%1\n"
- : : "a"(val), "b"(p) : "memory");
-}
-
-#ifdef CONFIG_CPU_BIG_ENDIAN
-
-#define get_unaligned_le32(p) __swab32(get_unaligned32(p))
-#define get_unaligned_le64(p) __swab64(get_unaligned64(p))
-#define get_unaligned_be32(p) get_unaligned32(p)
-#define get_unaligned_be64(p) get_unaligned64(p)
-#define put_unaligned_le32(v, p) put_unaligned32(__swab32(v), (p))
-#define put_unaligned_le64(v, p) put_unaligned64(__swab64(v), (p))
-#define put_unaligned_be32(v, p) put_unaligned32((v), (p))
-#define put_unaligned_be64(v, p) put_unaligned64((v), (p))
-#define get_unaligned __get_unaligned_be
-#define put_unaligned __put_unaligned_be
-
-#else
-
-#define get_unaligned_le32(p) get_unaligned32(p)
-#define get_unaligned_le64(p) get_unaligned64(p)
-#define get_unaligned_be32(p) __swab32(get_unaligned32(p))
-#define get_unaligned_be64(p) __swab64(get_unaligned64(p))
-#define put_unaligned_le32(v, p) put_unaligned32((v), (p))
-#define put_unaligned_le64(v, p) put_unaligned64((v), (p))
-#define put_unaligned_be32(v, p) put_unaligned32(__swab32(v), (p))
-#define put_unaligned_be64(v, p) put_unaligned64(__swab64(v), (p))
-#define get_unaligned __get_unaligned_le
-#define put_unaligned __put_unaligned_le
-
-#endif
-
-#endif /* _ASM_C6X_UNALIGNED_H */
diff --git a/arch/c6x/include/asm/vmalloc.h b/arch/c6x/include/asm/vmalloc.h
deleted file mode 100644
index 26c6c6696bbd..000000000000
--- a/arch/c6x/include/asm/vmalloc.h
+++ /dev/null
@@ -1,4 +0,0 @@
-#ifndef _ASM_C6X_VMALLOC_H
-#define _ASM_C6X_VMALLOC_H
-
-#endif /* _ASM_C6X_VMALLOC_H */
diff --git a/arch/c6x/include/uapi/asm/byteorder.h b/arch/c6x/include/uapi/asm/byteorder.h
deleted file mode 100644
index ab61f867391c..000000000000
--- a/arch/c6x/include/uapi/asm/byteorder.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef _ASM_C6X_BYTEORDER_H
-#define _ASM_C6X_BYTEORDER_H
-
-#include <asm/types.h>
-
-#ifdef _BIG_ENDIAN
-#include <linux/byteorder/big_endian.h>
-#else /* _BIG_ENDIAN */
-#include <linux/byteorder/little_endian.h>
-#endif /* _BIG_ENDIAN */
-
-#endif /* _ASM_BYTEORDER_H */
diff --git a/arch/c6x/include/uapi/asm/ptrace.h b/arch/c6x/include/uapi/asm/ptrace.h
deleted file mode 100644
index 9b51110a0842..000000000000
--- a/arch/c6x/include/uapi/asm/ptrace.h
+++ /dev/null
@@ -1,164 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * Copyright (C) 2004, 2006, 2009, 2010 Texas Instruments Incorporated
- * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
- *
- * Updated for 2.6.34: Mark Salter <msalter@redhat.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#ifndef _UAPI_ASM_C6X_PTRACE_H
-#define _UAPI_ASM_C6X_PTRACE_H
-
-#define BKPT_OPCODE 0x56454314 /* illegal opcode */
-
-#ifdef _BIG_ENDIAN
-#define PT_LO(odd, even) odd
-#define PT_HI(odd, even) even
-#else
-#define PT_LO(odd, even) even
-#define PT_HI(odd, even) odd
-#endif
-
-#define PT_A4_ORG PT_LO(1, 0)
-#define PT_TSR PT_HI(1, 0)
-#define PT_ILC PT_LO(3, 2)
-#define PT_RILC PT_HI(3, 2)
-#define PT_CSR PT_LO(5, 4)
-#define PT_PC PT_HI(5, 4)
-#define PT_B16 PT_LO(7, 6)
-#define PT_B17 PT_HI(7, 6)
-#define PT_B18 PT_LO(9, 8)
-#define PT_B19 PT_HI(9, 8)
-#define PT_B20 PT_LO(11, 10)
-#define PT_B21 PT_HI(11, 10)
-#define PT_B22 PT_LO(13, 12)
-#define PT_B23 PT_HI(13, 12)
-#define PT_B24 PT_LO(15, 14)
-#define PT_B25 PT_HI(15, 14)
-#define PT_B26 PT_LO(17, 16)
-#define PT_B27 PT_HI(17, 16)
-#define PT_B28 PT_LO(19, 18)
-#define PT_B29 PT_HI(19, 18)
-#define PT_B30 PT_LO(21, 20)
-#define PT_B31 PT_HI(21, 20)
-#define PT_B0 PT_LO(23, 22)
-#define PT_B1 PT_HI(23, 22)
-#define PT_B2 PT_LO(25, 24)
-#define PT_B3 PT_HI(25, 24)
-#define PT_B4 PT_LO(27, 26)
-#define PT_B5 PT_HI(27, 26)
-#define PT_B6 PT_LO(29, 28)
-#define PT_B7 PT_HI(29, 28)
-#define PT_B8 PT_LO(31, 30)
-#define PT_B9 PT_HI(31, 30)
-#define PT_B10 PT_LO(33, 32)
-#define PT_B11 PT_HI(33, 32)
-#define PT_B12 PT_LO(35, 34)
-#define PT_B13 PT_HI(35, 34)
-#define PT_A16 PT_LO(37, 36)
-#define PT_A17 PT_HI(37, 36)
-#define PT_A18 PT_LO(39, 38)
-#define PT_A19 PT_HI(39, 38)
-#define PT_A20 PT_LO(41, 40)
-#define PT_A21 PT_HI(41, 40)
-#define PT_A22 PT_LO(43, 42)
-#define PT_A23 PT_HI(43, 42)
-#define PT_A24 PT_LO(45, 44)
-#define PT_A25 PT_HI(45, 44)
-#define PT_A26 PT_LO(47, 46)
-#define PT_A27 PT_HI(47, 46)
-#define PT_A28 PT_LO(49, 48)
-#define PT_A29 PT_HI(49, 48)
-#define PT_A30 PT_LO(51, 50)
-#define PT_A31 PT_HI(51, 50)
-#define PT_A0 PT_LO(53, 52)
-#define PT_A1 PT_HI(53, 52)
-#define PT_A2 PT_LO(55, 54)
-#define PT_A3 PT_HI(55, 54)
-#define PT_A4 PT_LO(57, 56)
-#define PT_A5 PT_HI(57, 56)
-#define PT_A6 PT_LO(59, 58)
-#define PT_A7 PT_HI(59, 58)
-#define PT_A8 PT_LO(61, 60)
-#define PT_A9 PT_HI(61, 60)
-#define PT_A10 PT_LO(63, 62)
-#define PT_A11 PT_HI(63, 62)
-#define PT_A12 PT_LO(65, 64)
-#define PT_A13 PT_HI(65, 64)
-#define PT_A14 PT_LO(67, 66)
-#define PT_A15 PT_HI(67, 66)
-#define PT_B14 PT_LO(69, 68)
-#define PT_B15 PT_HI(69, 68)
-
-#define NR_PTREGS 70
-
-#define PT_DP PT_B14 /* Data Segment Pointer (B14) */
-#define PT_SP PT_B15 /* Stack Pointer (B15) */
-
-#define PTRACE_GETFDPIC 31 /* get the ELF fdpic loadmap address */
-
-#define PTRACE_GETFDPIC_EXEC 0 /* [addr] request the executable loadmap */
-#define PTRACE_GETFDPIC_INTERP 1 /* [addr] request the interpreter loadmap */
-
-#ifndef __ASSEMBLY__
-
-#ifdef _BIG_ENDIAN
-#define REG_PAIR(odd, even) unsigned long odd; unsigned long even
-#else
-#define REG_PAIR(odd, even) unsigned long even; unsigned long odd
-#endif
-
-/*
- * this struct defines the way the registers are stored on the
- * stack during a system call. fields defined with REG_PAIR
- * are saved and restored using double-word memory operations
- * which means the word ordering of the pair depends on endianess.
- */
-struct pt_regs {
- REG_PAIR(tsr, orig_a4);
- REG_PAIR(rilc, ilc);
- REG_PAIR(pc, csr);
-
- REG_PAIR(b17, b16);
- REG_PAIR(b19, b18);
- REG_PAIR(b21, b20);
- REG_PAIR(b23, b22);
- REG_PAIR(b25, b24);
- REG_PAIR(b27, b26);
- REG_PAIR(b29, b28);
- REG_PAIR(b31, b30);
-
- REG_PAIR(b1, b0);
- REG_PAIR(b3, b2);
- REG_PAIR(b5, b4);
- REG_PAIR(b7, b6);
- REG_PAIR(b9, b8);
- REG_PAIR(b11, b10);
- REG_PAIR(b13, b12);
-
- REG_PAIR(a17, a16);
- REG_PAIR(a19, a18);
- REG_PAIR(a21, a20);
- REG_PAIR(a23, a22);
- REG_PAIR(a25, a24);
- REG_PAIR(a27, a26);
- REG_PAIR(a29, a28);
- REG_PAIR(a31, a30);
-
- REG_PAIR(a1, a0);
- REG_PAIR(a3, a2);
- REG_PAIR(a5, a4);
- REG_PAIR(a7, a6);
- REG_PAIR(a9, a8);
- REG_PAIR(a11, a10);
- REG_PAIR(a13, a12);
-
- REG_PAIR(a15, a14);
- REG_PAIR(sp, dp);
-};
-
-#endif /* __ASSEMBLY__ */
-#endif /* _UAPI_ASM_C6X_PTRACE_H */
diff --git a/arch/c6x/include/uapi/asm/setup.h b/arch/c6x/include/uapi/asm/setup.h
deleted file mode 100644
index e90548cebec3..000000000000
--- a/arch/c6x/include/uapi/asm/setup.h
+++ /dev/null
@@ -1,7 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef _UAPI_ASM_C6X_SETUP_H
-#define _UAPI_ASM_C6X_SETUP_H
-
-#define COMMAND_LINE_SIZE 1024
-
-#endif /* _UAPI_ASM_C6X_SETUP_H */
diff --git a/arch/c6x/include/uapi/asm/sigcontext.h b/arch/c6x/include/uapi/asm/sigcontext.h
deleted file mode 100644
index 4e5a9a260861..000000000000
--- a/arch/c6x/include/uapi/asm/sigcontext.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * Port on Texas Instruments TMS320C6x architecture
- *
- * Copyright (C) 2004, 2009 Texas Instruments Incorporated
- * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#ifndef _ASM_C6X_SIGCONTEXT_H
-#define _ASM_C6X_SIGCONTEXT_H
-
-
-struct sigcontext {
- unsigned long sc_mask; /* old sigmask */
- unsigned long sc_sp; /* old user stack pointer */
-
- unsigned long sc_a4;
- unsigned long sc_b4;
- unsigned long sc_a6;
- unsigned long sc_b6;
- unsigned long sc_a8;
- unsigned long sc_b8;
-
- unsigned long sc_a0;
- unsigned long sc_a1;
- unsigned long sc_a2;
- unsigned long sc_a3;
- unsigned long sc_a5;
- unsigned long sc_a7;
- unsigned long sc_a9;
-
- unsigned long sc_b0;
- unsigned long sc_b1;
- unsigned long sc_b2;
- unsigned long sc_b3;
- unsigned long sc_b5;
- unsigned long sc_b7;
- unsigned long sc_b9;
-
- unsigned long sc_a16;
- unsigned long sc_a17;
- unsigned long sc_a18;
- unsigned long sc_a19;
- unsigned long sc_a20;
- unsigned long sc_a21;
- unsigned long sc_a22;
- unsigned long sc_a23;
- unsigned long sc_a24;
- unsigned long sc_a25;
- unsigned long sc_a26;
- unsigned long sc_a27;
- unsigned long sc_a28;
- unsigned long sc_a29;
- unsigned long sc_a30;
- unsigned long sc_a31;
-
- unsigned long sc_b16;
- unsigned long sc_b17;
- unsigned long sc_b18;
- unsigned long sc_b19;
- unsigned long sc_b20;
- unsigned long sc_b21;
- unsigned long sc_b22;
- unsigned long sc_b23;
- unsigned long sc_b24;
- unsigned long sc_b25;
- unsigned long sc_b26;
- unsigned long sc_b27;
- unsigned long sc_b28;
- unsigned long sc_b29;
- unsigned long sc_b30;
- unsigned long sc_b31;
-
- unsigned long sc_csr;
- unsigned long sc_pc;
-};
-
-#endif /* _ASM_C6X_SIGCONTEXT_H */
diff --git a/arch/c6x/include/uapi/asm/swab.h b/arch/c6x/include/uapi/asm/swab.h
deleted file mode 100644
index c407c0497718..000000000000
--- a/arch/c6x/include/uapi/asm/swab.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * Copyright (C) 2011 Texas Instruments Incorporated
- * Author: Mark Salter <msalter@redhat.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#ifndef _ASM_C6X_SWAB_H
-#define _ASM_C6X_SWAB_H
-
-static inline __attribute_const__ __u16 __c6x_swab16(__u16 val)
-{
- asm("swap4 .l1 %0,%0\n" : "+a"(val));
- return val;
-}
-
-static inline __attribute_const__ __u32 __c6x_swab32(__u32 val)
-{
- asm("swap4 .l1 %0,%0\n"
- "swap2 .l1 %0,%0\n"
- : "+a"(val));
- return val;
-}
-
-static inline __attribute_const__ __u64 __c6x_swab64(__u64 val)
-{
- asm(" swap2 .s1 %p0,%P0\n"
- "|| swap2 .l1 %P0,%p0\n"
- " swap4 .l1 %p0,%p0\n"
- " swap4 .l1 %P0,%P0\n"
- : "+a"(val));
- return val;
-}
-
-static inline __attribute_const__ __u32 __c6x_swahw32(__u32 val)
-{
- asm("swap2 .l1 %0,%0\n" : "+a"(val));
- return val;
-}
-
-static inline __attribute_const__ __u32 __c6x_swahb32(__u32 val)
-{
- asm("swap4 .l1 %0,%0\n" : "+a"(val));
- return val;
-}
-
-#define __arch_swab16 __c6x_swab16
-#define __arch_swab32 __c6x_swab32
-#define __arch_swab64 __c6x_swab64
-#define __arch_swahw32 __c6x_swahw32
-#define __arch_swahb32 __c6x_swahb32
-
-#endif /* _ASM_C6X_SWAB_H */
diff --git a/arch/c6x/include/uapi/asm/unistd.h b/arch/c6x/include/uapi/asm/unistd.h
deleted file mode 100644
index 79b724c39d9b..000000000000
--- a/arch/c6x/include/uapi/asm/unistd.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * Copyright (C) 2011 Texas Instruments Incorporated
- *
- * Based on arch/tile version.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#define __ARCH_WANT_RENAMEAT
-#define __ARCH_WANT_STAT64
-#define __ARCH_WANT_SET_GET_RLIMIT
-#define __ARCH_WANT_SYS_CLONE
-#define __ARCH_WANT_TIME32_SYSCALLS
-
-/* Use the standard ABI for syscalls. */
-#include <asm-generic/unistd.h>
-
-/* C6X-specific syscalls. */
-#define __NR_cache_sync (__NR_arch_specific_syscall + 0)
-__SYSCALL(__NR_cache_sync, sys_cache_sync)
diff --git a/arch/c6x/kernel/Makefile b/arch/c6x/kernel/Makefile
deleted file mode 100644
index fbe74174de87..000000000000
--- a/arch/c6x/kernel/Makefile
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile for arch/c6x/kernel/
-#
-
-extra-y := head.o vmlinux.lds
-
-obj-y := process.o traps.o irq.o signal.o ptrace.o
-obj-y += setup.o sys_c6x.o time.o devicetree.o
-obj-y += switch_to.o entry.o vectors.o c6x_ksyms.o
-obj-y += soc.o
-
-obj-$(CONFIG_MODULES) += module.o
diff --git a/arch/c6x/kernel/asm-offsets.c b/arch/c6x/kernel/asm-offsets.c
deleted file mode 100644
index 4a264ef87dcb..000000000000
--- a/arch/c6x/kernel/asm-offsets.c
+++ /dev/null
@@ -1,123 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Generate definitions needed by assembly language modules.
- * This code generates raw asm output which is post-processed
- * to extract and format the required data.
- */
-
-#include <linux/sched.h>
-#include <linux/thread_info.h>
-#include <asm/procinfo.h>
-#include <linux/kbuild.h>
-#include <linux/unistd.h>
-
-void foo(void)
-{
- OFFSET(REGS_A16, pt_regs, a16);
- OFFSET(REGS_A17, pt_regs, a17);
- OFFSET(REGS_A18, pt_regs, a18);
- OFFSET(REGS_A19, pt_regs, a19);
- OFFSET(REGS_A20, pt_regs, a20);
- OFFSET(REGS_A21, pt_regs, a21);
- OFFSET(REGS_A22, pt_regs, a22);
- OFFSET(REGS_A23, pt_regs, a23);
- OFFSET(REGS_A24, pt_regs, a24);
- OFFSET(REGS_A25, pt_regs, a25);
- OFFSET(REGS_A26, pt_regs, a26);
- OFFSET(REGS_A27, pt_regs, a27);
- OFFSET(REGS_A28, pt_regs, a28);
- OFFSET(REGS_A29, pt_regs, a29);
- OFFSET(REGS_A30, pt_regs, a30);
- OFFSET(REGS_A31, pt_regs, a31);
-
- OFFSET(REGS_B16, pt_regs, b16);
- OFFSET(REGS_B17, pt_regs, b17);
- OFFSET(REGS_B18, pt_regs, b18);
- OFFSET(REGS_B19, pt_regs, b19);
- OFFSET(REGS_B20, pt_regs, b20);
- OFFSET(REGS_B21, pt_regs, b21);
- OFFSET(REGS_B22, pt_regs, b22);
- OFFSET(REGS_B23, pt_regs, b23);
- OFFSET(REGS_B24, pt_regs, b24);
- OFFSET(REGS_B25, pt_regs, b25);
- OFFSET(REGS_B26, pt_regs, b26);
- OFFSET(REGS_B27, pt_regs, b27);
- OFFSET(REGS_B28, pt_regs, b28);
- OFFSET(REGS_B29, pt_regs, b29);
- OFFSET(REGS_B30, pt_regs, b30);
- OFFSET(REGS_B31, pt_regs, b31);
-
- OFFSET(REGS_A0, pt_regs, a0);
- OFFSET(REGS_A1, pt_regs, a1);
- OFFSET(REGS_A2, pt_regs, a2);
- OFFSET(REGS_A3, pt_regs, a3);
- OFFSET(REGS_A4, pt_regs, a4);
- OFFSET(REGS_A5, pt_regs, a5);
- OFFSET(REGS_A6, pt_regs, a6);
- OFFSET(REGS_A7, pt_regs, a7);
- OFFSET(REGS_A8, pt_regs, a8);
- OFFSET(REGS_A9, pt_regs, a9);
- OFFSET(REGS_A10, pt_regs, a10);
- OFFSET(REGS_A11, pt_regs, a11);
- OFFSET(REGS_A12, pt_regs, a12);
- OFFSET(REGS_A13, pt_regs, a13);
- OFFSET(REGS_A14, pt_regs, a14);
- OFFSET(REGS_A15, pt_regs, a15);
-
- OFFSET(REGS_B0, pt_regs, b0);
- OFFSET(REGS_B1, pt_regs, b1);
- OFFSET(REGS_B2, pt_regs, b2);
- OFFSET(REGS_B3, pt_regs, b3);
- OFFSET(REGS_B4, pt_regs, b4);
- OFFSET(REGS_B5, pt_regs, b5);
- OFFSET(REGS_B6, pt_regs, b6);
- OFFSET(REGS_B7, pt_regs, b7);
- OFFSET(REGS_B8, pt_regs, b8);
- OFFSET(REGS_B9, pt_regs, b9);
- OFFSET(REGS_B10, pt_regs, b10);
- OFFSET(REGS_B11, pt_regs, b11);
- OFFSET(REGS_B12, pt_regs, b12);
- OFFSET(REGS_B13, pt_regs, b13);
- OFFSET(REGS_DP, pt_regs, dp);
- OFFSET(REGS_SP, pt_regs, sp);
-
- OFFSET(REGS_TSR, pt_regs, tsr);
- OFFSET(REGS_ORIG_A4, pt_regs, orig_a4);
-
- DEFINE(REGS__END, sizeof(struct pt_regs));
- BLANK();
-
- OFFSET(THREAD_PC, thread_struct, pc);
- OFFSET(THREAD_B15_14, thread_struct, b15_14);
- OFFSET(THREAD_A15_14, thread_struct, a15_14);
- OFFSET(THREAD_B13_12, thread_struct, b13_12);
- OFFSET(THREAD_A13_12, thread_struct, a13_12);
- OFFSET(THREAD_B11_10, thread_struct, b11_10);
- OFFSET(THREAD_A11_10, thread_struct, a11_10);
- OFFSET(THREAD_RICL_ICL, thread_struct, ricl_icl);
- BLANK();
-
- OFFSET(TASK_STATE, task_struct, state);
- BLANK();
-
- OFFSET(THREAD_INFO_FLAGS, thread_info, flags);
- OFFSET(THREAD_INFO_PREEMPT_COUNT, thread_info, preempt_count);
- BLANK();
-
- /* These would be unneccessary if we ran asm files
- * through the preprocessor.
- */
- DEFINE(KTHREAD_SHIFT, THREAD_SHIFT);
- DEFINE(KTHREAD_START_SP, THREAD_START_SP);
- DEFINE(ENOSYS_, ENOSYS);
- DEFINE(NR_SYSCALLS_, __NR_syscalls);
-
- DEFINE(_TIF_SYSCALL_TRACE, (1<<TIF_SYSCALL_TRACE));
- DEFINE(_TIF_NOTIFY_RESUME, (1<<TIF_NOTIFY_RESUME));
- DEFINE(_TIF_SIGPENDING, (1<<TIF_SIGPENDING));
- DEFINE(_TIF_NEED_RESCHED, (1<<TIF_NEED_RESCHED));
- DEFINE(_TIF_NOTIFY_SIGNAL, (1<<TIF_NOTIFY_SIGNAL));
-
- DEFINE(_TIF_ALLWORK_MASK, TIF_ALLWORK_MASK);
- DEFINE(_TIF_WORK_MASK, TIF_WORK_MASK);
-}
diff --git a/arch/c6x/kernel/c6x_ksyms.c b/arch/c6x/kernel/c6x_ksyms.c
deleted file mode 100644
index 5a39f52f9db4..000000000000
--- a/arch/c6x/kernel/c6x_ksyms.c
+++ /dev/null
@@ -1,62 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Port on Texas Instruments TMS320C6x architecture
- *
- * Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated
- * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
- */
-#include <linux/module.h>
-#include <asm/checksum.h>
-#include <linux/io.h>
-
-/*
- * libgcc functions - used internally by the compiler...
- */
-extern int __c6xabi_divi(int dividend, int divisor);
-EXPORT_SYMBOL(__c6xabi_divi);
-
-extern unsigned __c6xabi_divu(unsigned dividend, unsigned divisor);
-EXPORT_SYMBOL(__c6xabi_divu);
-
-extern int __c6xabi_remi(int dividend, int divisor);
-EXPORT_SYMBOL(__c6xabi_remi);
-
-extern unsigned __c6xabi_remu(unsigned dividend, unsigned divisor);
-EXPORT_SYMBOL(__c6xabi_remu);
-
-extern int __c6xabi_divremi(int dividend, int divisor);
-EXPORT_SYMBOL(__c6xabi_divremi);
-
-extern unsigned __c6xabi_divremu(unsigned dividend, unsigned divisor);
-EXPORT_SYMBOL(__c6xabi_divremu);
-
-extern unsigned long long __c6xabi_mpyll(unsigned long long src1,
- unsigned long long src2);
-EXPORT_SYMBOL(__c6xabi_mpyll);
-
-extern long long __c6xabi_negll(long long src);
-EXPORT_SYMBOL(__c6xabi_negll);
-
-extern unsigned long long __c6xabi_llshl(unsigned long long src1, uint src2);
-EXPORT_SYMBOL(__c6xabi_llshl);
-
-extern long long __c6xabi_llshr(long long src1, uint src2);
-EXPORT_SYMBOL(__c6xabi_llshr);
-
-extern unsigned long long __c6xabi_llshru(unsigned long long src1, uint src2);
-EXPORT_SYMBOL(__c6xabi_llshru);
-
-extern void __c6xabi_strasgi(int *dst, const int *src, unsigned cnt);
-EXPORT_SYMBOL(__c6xabi_strasgi);
-
-extern void __c6xabi_push_rts(void);
-EXPORT_SYMBOL(__c6xabi_push_rts);
-
-extern void __c6xabi_pop_rts(void);
-EXPORT_SYMBOL(__c6xabi_pop_rts);
-
-extern void __c6xabi_strasgi_64plus(int *dst, const int *src, unsigned cnt);
-EXPORT_SYMBOL(__c6xabi_strasgi_64plus);
-
-/* lib functions */
-EXPORT_SYMBOL(memcpy);
diff --git a/arch/c6x/kernel/devicetree.c b/arch/c6x/kernel/devicetree.c
deleted file mode 100644
index a0c73f0545b2..000000000000
--- a/arch/c6x/kernel/devicetree.c
+++ /dev/null
@@ -1,14 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Architecture specific OF callbacks.
- *
- * Copyright (C) 2011 Texas Instruments Incorporated
- * Author: Mark Salter <msalter@redhat.com>
- */
-#include <linux/init.h>
-#include <linux/memblock.h>
-
-void __init early_init_dt_add_memory_arch(u64 base, u64 size)
-{
- c6x_add_memory(base, size);
-}
diff --git a/arch/c6x/kernel/entry.S b/arch/c6x/kernel/entry.S
deleted file mode 100644
index fb154d19625b..000000000000
--- a/arch/c6x/kernel/entry.S
+++ /dev/null
@@ -1,736 +0,0 @@
-; SPDX-License-Identifier: GPL-2.0-only
-;
-; Port on Texas Instruments TMS320C6x architecture
-;
-; Copyright (C) 2004-2011 Texas Instruments Incorporated
-; Author: Aurelien Jacquiot (aurelien.jacquiot@virtuallogix.com)
-; Updated for 2.6.34: Mark Salter <msalter@redhat.com>
-;
-
-#include <linux/sys.h>
-#include <linux/linkage.h>
-#include <asm/thread_info.h>
-#include <asm/asm-offsets.h>
-#include <asm/unistd.h>
-#include <asm/errno.h>
-
-; Registers naming
-#define DP B14
-#define SP B15
-
-#ifndef CONFIG_PREEMPTION
-#define resume_kernel restore_all
-#endif
-
- .altmacro
-
- .macro MASK_INT reg
- MVC .S2 CSR,reg
- CLR .S2 reg,0,0,reg
- MVC .S2 reg,CSR
- .endm
-
- .macro UNMASK_INT reg
- MVC .S2 CSR,reg
- SET .S2 reg,0,0,reg
- MVC .S2 reg,CSR
- .endm
-
- .macro GET_THREAD_INFO reg
- SHR .S1X SP,THREAD_SHIFT,reg
- SHL .S1 reg,THREAD_SHIFT,reg
- .endm
-
- ;;
- ;; This defines the normal kernel pt_regs layout.
- ;;
- .macro SAVE_ALL __rp __tsr
- STW .D2T2 B0,*SP--[2] ; save original B0
- MVKL .S2 current_ksp,B0
- MVKH .S2 current_ksp,B0
- LDW .D2T2 *B0,B1 ; KSP
-
- NOP 3
- STW .D2T2 B1,*+SP[1] ; save original B1
- XOR .D2 SP,B1,B0 ; (SP ^ KSP)
- LDW .D2T2 *+SP[1],B1 ; restore B0/B1
- LDW .D2T2 *++SP[2],B0
- SHR .S2 B0,THREAD_SHIFT,B0 ; 0 if already using kstack
- [B0] STDW .D2T2 SP:DP,*--B1[1] ; user: save user sp/dp kstack
- [B0] MV .S2 B1,SP ; and switch to kstack
-||[!B0] STDW .D2T2 SP:DP,*--SP[1] ; kernel: save on current stack
-
- SUBAW .D2 SP,2,SP
-
- ADD .D1X SP,-8,A15
- || STDW .D2T1 A15:A14,*SP--[16] ; save A15:A14
-
- STDW .D2T2 B13:B12,*SP--[1]
- || STDW .D1T1 A13:A12,*A15--[1]
- || MVC .S2 __rp,B13
-
- STDW .D2T2 B11:B10,*SP--[1]
- || STDW .D1T1 A11:A10,*A15--[1]
- || MVC .S2 CSR,B12
-
- STDW .D2T2 B9:B8,*SP--[1]
- || STDW .D1T1 A9:A8,*A15--[1]
- || MVC .S2 RILC,B11
- STDW .D2T2 B7:B6,*SP--[1]
- || STDW .D1T1 A7:A6,*A15--[1]
- || MVC .S2 ILC,B10
-
- STDW .D2T2 B5:B4,*SP--[1]
- || STDW .D1T1 A5:A4,*A15--[1]
-
- STDW .D2T2 B3:B2,*SP--[1]
- || STDW .D1T1 A3:A2,*A15--[1]
- || MVC .S2 __tsr,B5
-
- STDW .D2T2 B1:B0,*SP--[1]
- || STDW .D1T1 A1:A0,*A15--[1]
- || MV .S1X B5,A5
-
- STDW .D2T2 B31:B30,*SP--[1]
- || STDW .D1T1 A31:A30,*A15--[1]
- STDW .D2T2 B29:B28,*SP--[1]
- || STDW .D1T1 A29:A28,*A15--[1]
- STDW .D2T2 B27:B26,*SP--[1]
- || STDW .D1T1 A27:A26,*A15--[1]
- STDW .D2T2 B25:B24,*SP--[1]
- || STDW .D1T1 A25:A24,*A15--[1]
- STDW .D2T2 B23:B22,*SP--[1]
- || STDW .D1T1 A23:A22,*A15--[1]
- STDW .D2T2 B21:B20,*SP--[1]
- || STDW .D1T1 A21:A20,*A15--[1]
- STDW .D2T2 B19:B18,*SP--[1]
- || STDW .D1T1 A19:A18,*A15--[1]
- STDW .D2T2 B17:B16,*SP--[1]
- || STDW .D1T1 A17:A16,*A15--[1]
-
- STDW .D2T2 B13:B12,*SP--[1] ; save PC and CSR
-
- STDW .D2T2 B11:B10,*SP--[1] ; save RILC and ILC
- STDW .D2T1 A5:A4,*SP--[1] ; save TSR and orig A4
-
- ;; We left an unused word on the stack just above pt_regs.
- ;; It is used to save whether or not this frame is due to
- ;; a syscall. It is cleared here, but the syscall handler
- ;; sets it to a non-zero value.
- MVK .L2 0,B1
- STW .D2T2 B1,*+SP(REGS__END+8) ; clear syscall flag
- .endm
-
- .macro RESTORE_ALL __rp __tsr
- LDDW .D2T2 *++SP[1],B9:B8 ; get TSR (B9)
- LDDW .D2T2 *++SP[1],B11:B10 ; get RILC (B11) and ILC (B10)
- LDDW .D2T2 *++SP[1],B13:B12 ; get PC (B13) and CSR (B12)
-
- ADDAW .D1X SP,30,A15
-
- LDDW .D1T1 *++A15[1],A17:A16
- || LDDW .D2T2 *++SP[1],B17:B16
- LDDW .D1T1 *++A15[1],A19:A18
- || LDDW .D2T2 *++SP[1],B19:B18
- LDDW .D1T1 *++A15[1],A21:A20
- || LDDW .D2T2 *++SP[1],B21:B20
- LDDW .D1T1 *++A15[1],A23:A22
- || LDDW .D2T2 *++SP[1],B23:B22
- LDDW .D1T1 *++A15[1],A25:A24
- || LDDW .D2T2 *++SP[1],B25:B24
- LDDW .D1T1 *++A15[1],A27:A26
- || LDDW .D2T2 *++SP[1],B27:B26
- LDDW .D1T1 *++A15[1],A29:A28
- || LDDW .D2T2 *++SP[1],B29:B28
- LDDW .D1T1 *++A15[1],A31:A30
- || LDDW .D2T2 *++SP[1],B31:B30
-
- LDDW .D1T1 *++A15[1],A1:A0
- || LDDW .D2T2 *++SP[1],B1:B0
-
- LDDW .D1T1 *++A15[1],A3:A2
- || LDDW .D2T2 *++SP[1],B3:B2
- || MVC .S2 B9,__tsr
- LDDW .D1T1 *++A15[1],A5:A4
- || LDDW .D2T2 *++SP[1],B5:B4
- || MVC .S2 B11,RILC
- LDDW .D1T1 *++A15[1],A7:A6
- || LDDW .D2T2 *++SP[1],B7:B6
- || MVC .S2 B10,ILC
-
- LDDW .D1T1 *++A15[1],A9:A8
- || LDDW .D2T2 *++SP[1],B9:B8
- || MVC .S2 B13,__rp
-
- LDDW .D1T1 *++A15[1],A11:A10
- || LDDW .D2T2 *++SP[1],B11:B10
- || MVC .S2 B12,CSR
-
- LDDW .D1T1 *++A15[1],A13:A12
- || LDDW .D2T2 *++SP[1],B13:B12
-
- MV .D2X A15,SP
- || MVKL .S1 current_ksp,A15
- MVKH .S1 current_ksp,A15
- || ADDAW .D1X SP,6,A14
- STW .D1T1 A14,*A15 ; save kernel stack pointer
-
- LDDW .D2T1 *++SP[1],A15:A14
-
- B .S2 __rp ; return from interruption
- LDDW .D2T2 *+SP[1],SP:DP
- NOP 4
- .endm
-
- .section .text
-
- ;;
- ;; Jump to schedule() then return to ret_from_exception
- ;;
-_reschedule:
-#ifdef CONFIG_C6X_BIG_KERNEL
- MVKL .S1 schedule,A0
- MVKH .S1 schedule,A0
- B .S2X A0
-#else
- B .S1 schedule
-#endif
- ADDKPC .S2 ret_from_exception,B3,4
-
- ;;
- ;; Called before syscall handler when process is being debugged
- ;;
-tracesys_on:
-#ifdef CONFIG_C6X_BIG_KERNEL
- MVKL .S1 syscall_trace_entry,A0
- MVKH .S1 syscall_trace_entry,A0
- B .S2X A0
-#else
- B .S1 syscall_trace_entry
-#endif
- ADDKPC .S2 ret_from_syscall_trace,B3,3
- ADD .S1X 8,SP,A4
-
-ret_from_syscall_trace:
- ;; tracing returns (possibly new) syscall number
- MV .D2X A4,B0
- || MVK .S2 __NR_syscalls,B1
- CMPLTU .L2 B0,B1,B1
-
- [!B1] BNOP .S2 ret_from_syscall_function,5
- || MVK .S1 -ENOSYS,A4
-
- ;; reload syscall args from (possibly modified) stack frame
- ;; and get syscall handler addr from sys_call_table:
- LDW .D2T2 *+SP(REGS_B4+8),B4
- || MVKL .S2 sys_call_table,B1
- LDW .D2T1 *+SP(REGS_A6+8),A6
- || MVKH .S2 sys_call_table,B1
- LDW .D2T2 *+B1[B0],B0
- || MVKL .S2 ret_from_syscall_function,B3
- LDW .D2T2 *+SP(REGS_B6+8),B6
- || MVKH .S2 ret_from_syscall_function,B3
- LDW .D2T1 *+SP(REGS_A8+8),A8
- LDW .D2T2 *+SP(REGS_B8+8),B8
- NOP
- ; B0 = sys_call_table[__NR_*]
- BNOP .S2 B0,5 ; branch to syscall handler
- || LDW .D2T1 *+SP(REGS_ORIG_A4+8),A4
-
-syscall_exit_work:
- AND .D1 _TIF_SYSCALL_TRACE,A2,A0
- [!A0] BNOP .S1 work_pending,5
- [A0] B .S2 syscall_trace_exit
- ADDKPC .S2 resume_userspace,B3,1
- MVC .S2 CSR,B1
- SET .S2 B1,0,0,B1
- MVC .S2 B1,CSR ; enable ints
-
-work_pending:
- AND .D1 _TIF_NEED_RESCHED,A2,A0
- [!A0] BNOP .S1 work_notifysig,5
-
-work_resched:
-#ifdef CONFIG_C6X_BIG_KERNEL
- MVKL .S1 schedule,A1
- MVKH .S1 schedule,A1
- B .S2X A1
-#else
- B .S2 schedule
-#endif
- ADDKPC .S2 work_rescheduled,B3,4
-work_rescheduled:
- ;; make sure we don't miss an interrupt setting need_resched or
- ;; sigpending between sampling and the rti
- MASK_INT B2
- GET_THREAD_INFO A12
- LDW .D1T1 *+A12(THREAD_INFO_FLAGS),A2
- MVK .S1 _TIF_WORK_MASK,A1
- MVK .S1 _TIF_NEED_RESCHED,A3
- NOP 2
- AND .D1 A1,A2,A0
- || AND .S1 A3,A2,A1
- [!A0] BNOP .S1 restore_all,5
- [A1] BNOP .S1 work_resched,5
-
-work_notifysig:
- ;; enable interrupts for do_notify_resume()
- UNMASK_INT B2
- B .S2 do_notify_resume
- LDW .D2T1 *+SP(REGS__END+8),A6 ; syscall flag
- ADDKPC .S2 resume_userspace,B3,1
- ADD .S1X 8,SP,A4 ; pt_regs pointer is first arg
- MV .D2X A2,B4 ; thread_info flags is second arg
-
- ;;
- ;; On C64x+, the return way from exception and interrupt
- ;; is a little bit different
- ;;
-ENTRY(ret_from_exception)
-#ifdef CONFIG_PREEMPTION
- MASK_INT B2
-#endif
-
-ENTRY(ret_from_interrupt)
- ;;
- ;; Check if we are comming from user mode.
- ;;
- LDW .D2T2 *+SP(REGS_TSR+8),B0
- MVK .S2 0x40,B1
- NOP 3
- AND .D2 B0,B1,B0
- [!B0] BNOP .S2 resume_kernel,5
-
-resume_userspace:
- ;; make sure we don't miss an interrupt setting need_resched or
- ;; sigpending between sampling and the rti
- MASK_INT B2
- GET_THREAD_INFO A12
- LDW .D1T1 *+A12(THREAD_INFO_FLAGS),A2
- MVK .S1 _TIF_WORK_MASK,A1
- MVK .S1 _TIF_NEED_RESCHED,A3
- NOP 2
- AND .D1 A1,A2,A0
- [A0] BNOP .S1 work_pending,5
- BNOP .S1 restore_all,5
-
- ;;
- ;; System call handling
- ;; B0 = syscall number (in sys_call_table)
- ;; A4,B4,A6,B6,A8,B8 = arguments of the syscall function
- ;; A4 is the return value register
- ;;
-system_call_saved:
- MVK .L2 1,B2
- STW .D2T2 B2,*+SP(REGS__END+8) ; set syscall flag
- MVC .S2 B2,ECR ; ack the software exception
-
- UNMASK_INT B2 ; re-enable global IT
-
-system_call_saved_noack:
- ;; Check system call number
- MVK .S2 __NR_syscalls,B1
-#ifdef CONFIG_C6X_BIG_KERNEL
- || MVKL .S1 sys_ni_syscall,A0
-#endif
- CMPLTU .L2 B0,B1,B1
-#ifdef CONFIG_C6X_BIG_KERNEL
- || MVKH .S1 sys_ni_syscall,A0
-#endif
-
- ;; Check for ptrace
- GET_THREAD_INFO A12
-
-#ifdef CONFIG_C6X_BIG_KERNEL
- [!B1] B .S2X A0
-#else
- [!B1] B .S2 sys_ni_syscall
-#endif
- [!B1] ADDKPC .S2 ret_from_syscall_function,B3,4
-
- ;; Get syscall handler addr from sys_call_table
- ;; call tracesys_on or call syscall handler
- LDW .D1T1 *+A12(THREAD_INFO_FLAGS),A2
- || MVKL .S2 sys_call_table,B1
- MVKH .S2 sys_call_table,B1
- LDW .D2T2 *+B1[B0],B0
- NOP 2
- ; A2 = thread_info flags
- AND .D1 _TIF_SYSCALL_TRACE,A2,A2
- [A2] BNOP .S1 tracesys_on,5
- ;; B0 = _sys_call_table[__NR_*]
- B .S2 B0
- ADDKPC .S2 ret_from_syscall_function,B3,4
-
-ret_from_syscall_function:
- STW .D2T1 A4,*+SP(REGS_A4+8) ; save return value in A4
- ; original A4 is in orig_A4
-syscall_exit:
- ;; make sure we don't miss an interrupt setting need_resched or
- ;; sigpending between sampling and the rti
- MASK_INT B2
- LDW .D1T1 *+A12(THREAD_INFO_FLAGS),A2
- MVK .S1 _TIF_ALLWORK_MASK,A1
- NOP 3
- AND .D1 A1,A2,A2 ; check for work to do
- [A2] BNOP .S1 syscall_exit_work,5
-
-restore_all:
- RESTORE_ALL NRP,NTSR
-
- ;;
- ;; After a fork we jump here directly from resume,
- ;; so that A4 contains the previous task structure.
- ;;
-ENTRY(ret_from_fork)
-#ifdef CONFIG_C6X_BIG_KERNEL
- MVKL .S1 schedule_tail,A0
- MVKH .S1 schedule_tail,A0
- B .S2X A0
-#else
- B .S2 schedule_tail
-#endif
- ADDKPC .S2 ret_from_fork_2,B3,4
-ret_from_fork_2:
- ;; return 0 in A4 for child process
- GET_THREAD_INFO A12
- BNOP .S2 syscall_exit,3
- MVK .L2 0,B0
- STW .D2T2 B0,*+SP(REGS_A4+8)
-ENDPROC(ret_from_fork)
-
-ENTRY(ret_from_kernel_thread)
-#ifdef CONFIG_C6X_BIG_KERNEL
- MVKL .S1 schedule_tail,A0
- MVKH .S1 schedule_tail,A0
- B .S2X A0
-#else
- B .S2 schedule_tail
-#endif
- LDW .D2T2 *+SP(REGS_A0+8),B10 /* get fn */
- ADDKPC .S2 0f,B3,3
-0:
- B .S2 B10 /* call fn */
- LDW .D2T1 *+SP(REGS_A1+8),A4 /* get arg */
- ADDKPC .S2 ret_from_fork_2,B3,3
-ENDPROC(ret_from_kernel_thread)
-
- ;;
- ;; These are the interrupt handlers, responsible for calling c6x_do_IRQ()
- ;;
- .macro SAVE_ALL_INT
- SAVE_ALL IRP,ITSR
- .endm
-
- .macro CALL_INT int
-#ifdef CONFIG_C6X_BIG_KERNEL
- MVKL .S1 c6x_do_IRQ,A0
- MVKH .S1 c6x_do_IRQ,A0
- BNOP .S2X A0,1
- MVK .S1 int,A4
- ADDAW .D2 SP,2,B4
- MVKL .S2 ret_from_interrupt,B3
- MVKH .S2 ret_from_interrupt,B3
-#else
- CALLP .S2 c6x_do_IRQ,B3
- || MVK .S1 int,A4
- || ADDAW .D2 SP,2,B4
- B .S1 ret_from_interrupt
- NOP 5
-#endif
- .endm
-
-ENTRY(_int4_handler)
- SAVE_ALL_INT
- CALL_INT 4
-ENDPROC(_int4_handler)
-
-ENTRY(_int5_handler)
- SAVE_ALL_INT
- CALL_INT 5
-ENDPROC(_int5_handler)
-
-ENTRY(_int6_handler)
- SAVE_ALL_INT
- CALL_INT 6
-ENDPROC(_int6_handler)
-
-ENTRY(_int7_handler)
- SAVE_ALL_INT
- CALL_INT 7
-ENDPROC(_int7_handler)
-
-ENTRY(_int8_handler)
- SAVE_ALL_INT
- CALL_INT 8
-ENDPROC(_int8_handler)
-
-ENTRY(_int9_handler)
- SAVE_ALL_INT
- CALL_INT 9
-ENDPROC(_int9_handler)
-
-ENTRY(_int10_handler)
- SAVE_ALL_INT
- CALL_INT 10
-ENDPROC(_int10_handler)
-
-ENTRY(_int11_handler)
- SAVE_ALL_INT
- CALL_INT 11
-ENDPROC(_int11_handler)
-
-ENTRY(_int12_handler)
- SAVE_ALL_INT
- CALL_INT 12
-ENDPROC(_int12_handler)
-
-ENTRY(_int13_handler)
- SAVE_ALL_INT
- CALL_INT 13
-ENDPROC(_int13_handler)
-
-ENTRY(_int14_handler)
- SAVE_ALL_INT
- CALL_INT 14
-ENDPROC(_int14_handler)
-
-ENTRY(_int15_handler)
- SAVE_ALL_INT
- CALL_INT 15
-ENDPROC(_int15_handler)
-
- ;;
- ;; Handler for uninitialized and spurious interrupts
- ;;
-ENTRY(_bad_interrupt)
- B .S2 IRP
- NOP 5
-ENDPROC(_bad_interrupt)
-
- ;;
- ;; Entry for NMI/exceptions/syscall
- ;;
-ENTRY(_nmi_handler)
- SAVE_ALL NRP,NTSR
-
- MVC .S2 EFR,B2
- CMPEQ .L2 1,B2,B2
- || MVC .S2 TSR,B1
- CLR .S2 B1,10,10,B1
- MVC .S2 B1,TSR
-#ifdef CONFIG_C6X_BIG_KERNEL
- [!B2] MVKL .S1 process_exception,A0
- [!B2] MVKH .S1 process_exception,A0
- [!B2] B .S2X A0
-#else
- [!B2] B .S2 process_exception
-#endif
- [B2] B .S2 system_call_saved
- [!B2] ADDAW .D2 SP,2,B1
- [!B2] MV .D1X B1,A4
- ADDKPC .S2 ret_from_trap,B3,2
-
-ret_from_trap:
- MV .D2X A4,B0
- [!B0] BNOP .S2 ret_from_exception,5
-
-#ifdef CONFIG_C6X_BIG_KERNEL
- MVKL .S2 system_call_saved_noack,B3
- MVKH .S2 system_call_saved_noack,B3
-#endif
- LDW .D2T2 *+SP(REGS_B0+8),B0
- LDW .D2T1 *+SP(REGS_A4+8),A4
- LDW .D2T2 *+SP(REGS_B4+8),B4
- LDW .D2T1 *+SP(REGS_A6+8),A6
- LDW .D2T2 *+SP(REGS_B6+8),B6
- LDW .D2T1 *+SP(REGS_A8+8),A8
-#ifdef CONFIG_C6X_BIG_KERNEL
- || B .S2 B3
-#else
- || B .S2 system_call_saved_noack
-#endif
- LDW .D2T2 *+SP(REGS_B8+8),B8
- NOP 4
-ENDPROC(_nmi_handler)
-
- ;;
- ;; Jump to schedule() then return to ret_from_isr
- ;;
-#ifdef CONFIG_PREEMPTION
-resume_kernel:
- GET_THREAD_INFO A12
- LDW .D1T1 *+A12(THREAD_INFO_PREEMPT_COUNT),A1
- NOP 4
- [A1] BNOP .S2 restore_all,5
-
-preempt_schedule:
- GET_THREAD_INFO A2
- LDW .D1T1 *+A2(THREAD_INFO_FLAGS),A1
-#ifdef CONFIG_C6X_BIG_KERNEL
- MVKL .S2 preempt_schedule_irq,B0
- MVKH .S2 preempt_schedule_irq,B0
- NOP 2
-#else
- NOP 4
-#endif
- AND .D1 _TIF_NEED_RESCHED,A1,A1
- [!A1] BNOP .S2 restore_all,5
-#ifdef CONFIG_C6X_BIG_KERNEL
- B .S2 B0
-#else
- B .S2 preempt_schedule_irq
-#endif
- ADDKPC .S2 preempt_schedule,B3,4
-#endif /* CONFIG_PREEMPTION */
-
-ENTRY(enable_exception)
- DINT
- MVC .S2 TSR,B0
- MVC .S2 B3,NRP
- MVK .L2 0xc,B1
- OR .D2 B0,B1,B0
- MVC .S2 B0,TSR ; Set GEE and XEN in TSR
- B .S2 NRP
- NOP 5
-ENDPROC(enable_exception)
-
- ;;
- ;; Special system calls
- ;; return address is in B3
- ;;
-ENTRY(sys_rt_sigreturn)
- ADD .D1X SP,8,A4
-#ifdef CONFIG_C6X_BIG_KERNEL
- || MVKL .S1 do_rt_sigreturn,A0
- MVKH .S1 do_rt_sigreturn,A0
- BNOP .S2X A0,5
-#else
- || B .S2 do_rt_sigreturn
- NOP 5
-#endif
-ENDPROC(sys_rt_sigreturn)
-
-ENTRY(sys_pread_c6x)
- MV .D2X A8,B7
-#ifdef CONFIG_C6X_BIG_KERNEL
- || MVKL .S1 sys_pread64,A0
- MVKH .S1 sys_pread64,A0
- BNOP .S2X A0,5
-#else
- || B .S2 sys_pread64
- NOP 5
-#endif
-ENDPROC(sys_pread_c6x)
-
-ENTRY(sys_pwrite_c6x)
- MV .D2X A8,B7
-#ifdef CONFIG_C6X_BIG_KERNEL
- || MVKL .S1 sys_pwrite64,A0
- MVKH .S1 sys_pwrite64,A0
- BNOP .S2X A0,5
-#else
- || B .S2 sys_pwrite64
- NOP 5
-#endif
-ENDPROC(sys_pwrite_c6x)
-
-;; On Entry
-;; A4 - path
-;; B4 - offset_lo (LE), offset_hi (BE)
-;; A6 - offset_lo (BE), offset_hi (LE)
-ENTRY(sys_truncate64_c6x)
-#ifdef CONFIG_CPU_BIG_ENDIAN
- MV .S2 B4,B5
- MV .D2X A6,B4
-#else
- MV .D2X A6,B5
-#endif
-#ifdef CONFIG_C6X_BIG_KERNEL
- || MVKL .S1 sys_truncate64,A0
- MVKH .S1 sys_truncate64,A0
- BNOP .S2X A0,5
-#else
- || B .S2 sys_truncate64
- NOP 5
-#endif
-ENDPROC(sys_truncate64_c6x)
-
-;; On Entry
-;; A4 - fd
-;; B4 - offset_lo (LE), offset_hi (BE)
-;; A6 - offset_lo (BE), offset_hi (LE)
-ENTRY(sys_ftruncate64_c6x)
-#ifdef CONFIG_CPU_BIG_ENDIAN
- MV .S2 B4,B5
- MV .D2X A6,B4
-#else
- MV .D2X A6,B5
-#endif
-#ifdef CONFIG_C6X_BIG_KERNEL
- || MVKL .S1 sys_ftruncate64,A0
- MVKH .S1 sys_ftruncate64,A0
- BNOP .S2X A0,5
-#else
- || B .S2 sys_ftruncate64
- NOP 5
-#endif
-ENDPROC(sys_ftruncate64_c6x)
-
-;; On Entry
-;; A4 - fd
-;; B4 - offset_lo (LE), offset_hi (BE)
-;; A6 - offset_lo (BE), offset_hi (LE)
-;; B6 - len_lo (LE), len_hi (BE)
-;; A8 - len_lo (BE), len_hi (LE)
-;; B8 - advice
-ENTRY(sys_fadvise64_64_c6x)
-#ifdef CONFIG_C6X_BIG_KERNEL
- MVKL .S1 sys_fadvise64_64,A0
- MVKH .S1 sys_fadvise64_64,A0
- BNOP .S2X A0,2
-#else
- B .S2 sys_fadvise64_64
- NOP 2
-#endif
-#ifdef CONFIG_CPU_BIG_ENDIAN
- MV .L2 B4,B5
- || MV .D2X A6,B4
- MV .L1 A8,A6
- || MV .D1X B6,A7
-#else
- MV .D2X A6,B5
- MV .L1 A8,A7
- || MV .D1X B6,A6
-#endif
- MV .L2 B8,B6
-ENDPROC(sys_fadvise64_64_c6x)
-
-;; On Entry
-;; A4 - fd
-;; B4 - mode
-;; A6 - offset_hi
-;; B6 - offset_lo
-;; A8 - len_hi
-;; B8 - len_lo
-ENTRY(sys_fallocate_c6x)
-#ifdef CONFIG_C6X_BIG_KERNEL
- MVKL .S1 sys_fallocate,A0
- MVKH .S1 sys_fallocate,A0
- BNOP .S2X A0,1
-#else
- B .S2 sys_fallocate
- NOP
-#endif
- MV .D1 A6,A7
- MV .D1X B6,A6
- MV .D2X A8,B7
- MV .D2 B8,B6
-ENDPROC(sys_fallocate_c6x)
-
- ;; put this in .neardata for faster access when using DSBT mode
- .section .neardata,"aw",@progbits
- .global current_ksp
- .hidden current_ksp
-current_ksp:
- .word init_thread_union + THREAD_START_SP
diff --git a/arch/c6x/kernel/head.S b/arch/c6x/kernel/head.S
deleted file mode 100644
index fecbeef827bc..000000000000
--- a/arch/c6x/kernel/head.S
+++ /dev/null
@@ -1,81 +0,0 @@
-; SPDX-License-Identifier: GPL-2.0-only
-;
-; Port on Texas Instruments TMS320C6x architecture
-;
-; Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated
-; Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
-;
-#include <linux/linkage.h>
-#include <linux/of_fdt.h>
-#include <asm/asm-offsets.h>
-
- __HEAD
-ENTRY(_c_int00)
- ;; Save magic and pointer
- MV .S1 A4,A10
- MV .S2 B4,B10
- MVKL .S2 __bss_start,B5
- MVKH .S2 __bss_start,B5
- MVKL .S2 __bss_stop,B6
- MVKH .S2 __bss_stop,B6
- SUB .L2 B6,B5,B6 ; bss size
-
- ;; Set the stack pointer
- MVKL .S2 current_ksp,B0
- MVKH .S2 current_ksp,B0
- LDW .D2T2 *B0,B15
-
- ;; clear bss
- SHR .S2 B6,3,B0 ; number of dwords to clear
- ZERO .L2 B13
- ZERO .L2 B12
-bss_loop:
- BDEC .S2 bss_loop,B0
- NOP 3
- CMPLT .L2 B0,0,B1
- [!B1] STDW .D2T2 B13:B12,*B5++[1]
-
- NOP 4
- AND .D2 ~7,B15,B15
-
- ;; Clear GIE and PGIE
- MVC .S2 CSR,B2
- CLR .S2 B2,0,1,B2
- MVC .S2 B2,CSR
- MVC .S2 TSR,B2
- CLR .S2 B2,0,1,B2
- MVC .S2 B2,TSR
- MVC .S2 ITSR,B2
- CLR .S2 B2,0,1,B2
- MVC .S2 B2,ITSR
- MVC .S2 NTSR,B2
- CLR .S2 B2,0,1,B2
- MVC .S2 B2,NTSR
-
- ;; pass DTB pointer to machine_init (or zero if none)
- MVKL .S1 OF_DT_HEADER,A0
- MVKH .S1 OF_DT_HEADER,A0
- CMPEQ .L1 A10,A0,A0
- [A0] MV .S1X B10,A4
- [!A0] MVK .S1 0,A4
-
-#ifdef CONFIG_C6X_BIG_KERNEL
- MVKL .S1 machine_init,A0
- MVKH .S1 machine_init,A0
- B .S2X A0
- ADDKPC .S2 0f,B3,4
-0:
-#else
- CALLP .S2 machine_init,B3
-#endif
-
- ;; Jump to Linux init
-#ifdef CONFIG_C6X_BIG_KERNEL
- MVKL .S1 start_kernel,A0
- MVKH .S1 start_kernel,A0
- B .S2X A0
-#else
- B .S2 start_kernel
-#endif
- NOP 5
-L1: BNOP .S2 L1,5
diff --git a/arch/c6x/kernel/irq.c b/arch/c6x/kernel/irq.c
deleted file mode 100644
index e4c53d185b62..000000000000
--- a/arch/c6x/kernel/irq.c
+++ /dev/null
@@ -1,127 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2011-2012 Texas Instruments Incorporated
- *
- * This borrows heavily from powerpc version, which is:
- *
- * Derived from arch/i386/kernel/irq.c
- * Copyright (C) 1992 Linus Torvalds
- * Adapted from arch/i386 by Gary Thomas
- * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
- * Updated and modified by Cort Dougan <cort@fsmlabs.com>
- * Copyright (C) 1996-2001 Cort Dougan
- * Adapted for Power Macintosh by Paul Mackerras
- * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
- */
-#include <linux/slab.h>
-#include <linux/seq_file.h>
-#include <linux/radix-tree.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_irq.h>
-#include <linux/interrupt.h>
-#include <linux/kernel_stat.h>
-
-#include <asm/megamod-pic.h>
-#include <asm/special_insns.h>
-
-unsigned long irq_err_count;
-
-static DEFINE_RAW_SPINLOCK(core_irq_lock);
-
-static void mask_core_irq(struct irq_data *data)
-{
- unsigned int prio = data->hwirq;
-
- raw_spin_lock(&core_irq_lock);
- and_creg(IER, ~(1 << prio));
- raw_spin_unlock(&core_irq_lock);
-}
-
-static void unmask_core_irq(struct irq_data *data)
-{
- unsigned int prio = data->hwirq;
-
- raw_spin_lock(&core_irq_lock);
- or_creg(IER, 1 << prio);
- raw_spin_unlock(&core_irq_lock);
-}
-
-static struct irq_chip core_chip = {
- .name = "core",
- .irq_mask = mask_core_irq,
- .irq_unmask = unmask_core_irq,
-};
-
-static int prio_to_virq[NR_PRIORITY_IRQS];
-
-asmlinkage void c6x_do_IRQ(unsigned int prio, struct pt_regs *regs)
-{
- struct pt_regs *old_regs = set_irq_regs(regs);
-
- irq_enter();
-
- generic_handle_irq(prio_to_virq[prio]);
-
- irq_exit();
-
- set_irq_regs(old_regs);
-}
-
-static struct irq_domain *core_domain;
-
-static int core_domain_map(struct irq_domain *h, unsigned int virq,
- irq_hw_number_t hw)
-{
- if (hw < 4 || hw >= NR_PRIORITY_IRQS)
- return -EINVAL;
-
- prio_to_virq[hw] = virq;
-
- irq_set_status_flags(virq, IRQ_LEVEL);
- irq_set_chip_and_handler(virq, &core_chip, handle_level_irq);
- return 0;
-}
-
-static const struct irq_domain_ops core_domain_ops = {
- .map = core_domain_map,
- .xlate = irq_domain_xlate_onecell,
-};
-
-void __init init_IRQ(void)
-{
- struct device_node *np;
-
- /* Mask all priority IRQs */
- and_creg(IER, ~0xfff0);
-
- np = of_find_compatible_node(NULL, NULL, "ti,c64x+core-pic");
- if (np != NULL) {
- /* create the core host */
- core_domain = irq_domain_add_linear(np, NR_PRIORITY_IRQS,
- &core_domain_ops, NULL);
- if (core_domain)
- irq_set_default_host(core_domain);
- of_node_put(np);
- }
-
- printk(KERN_INFO "Core interrupt controller initialized\n");
-
- /* now we're ready for other SoC controllers */
- megamod_pic_init();
-
- /* Clear all general IRQ flags */
- set_creg(ICR, 0xfff0);
-}
-
-void ack_bad_irq(int irq)
-{
- printk(KERN_ERR "IRQ: spurious interrupt %d\n", irq);
- irq_err_count++;
-}
-
-int arch_show_interrupts(struct seq_file *p, int prec)
-{
- seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count);
- return 0;
-}
diff --git a/arch/c6x/kernel/module.c b/arch/c6x/kernel/module.c
deleted file mode 100644
index 09b4c6bfe877..000000000000
--- a/arch/c6x/kernel/module.c
+++ /dev/null
@@ -1,119 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Port on Texas Instruments TMS320C6x architecture
- *
- * Copyright (C) 2005, 2009, 2010, 2011 Texas Instruments Incorporated
- * Author: Thomas Charleux (thomas.charleux@jaluna.com)
- */
-#include <linux/moduleloader.h>
-#include <linux/elf.h>
-#include <linux/vmalloc.h>
-#include <linux/kernel.h>
-
-static inline int fixup_pcr(u32 *ip, Elf32_Addr dest, u32 maskbits, int shift)
-{
- u32 opcode;
- long ep = (long)ip & ~31;
- long delta = ((long)dest - ep) >> 2;
- long mask = (1 << maskbits) - 1;
-
- if ((delta >> (maskbits - 1)) == 0 ||
- (delta >> (maskbits - 1)) == -1) {
- opcode = *ip;
- opcode &= ~(mask << shift);
- opcode |= ((delta & mask) << shift);
- *ip = opcode;
-
- pr_debug("REL PCR_S%d[%p] dest[%p] opcode[%08x]\n",
- maskbits, ip, (void *)dest, opcode);
-
- return 0;
- }
- pr_err("PCR_S%d reloc %p -> %p out of range!\n",
- maskbits, ip, (void *)dest);
-
- return -1;
-}
-
-/*
- * apply a RELA relocation
- */
-int apply_relocate_add(Elf32_Shdr *sechdrs,
- const char *strtab,
- unsigned int symindex,
- unsigned int relsec,
- struct module *me)
-{
- Elf32_Rela *rel = (void *) sechdrs[relsec].sh_addr;
- Elf_Sym *sym;
- u32 *location, opcode;
- unsigned int i;
- Elf32_Addr v;
- Elf_Addr offset = 0;
-
- pr_debug("Applying relocate section %u to %u with offset 0x%x\n",
- relsec, sechdrs[relsec].sh_info, offset);
-
- for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
- /* This is where to make the change */
- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
- + rel[i].r_offset - offset;
-
- /* This is the symbol it is referring to. Note that all
- undefined symbols have been resolved. */
- sym = (Elf_Sym *)sechdrs[symindex].sh_addr
- + ELF32_R_SYM(rel[i].r_info);
-
- /* this is the adjustment to be made */
- v = sym->st_value + rel[i].r_addend;
-
- switch (ELF32_R_TYPE(rel[i].r_info)) {
- case R_C6000_ABS32:
- pr_debug("RELA ABS32: [%p] = 0x%x\n", location, v);
- *location = v;
- break;
- case R_C6000_ABS16:
- pr_debug("RELA ABS16: [%p] = 0x%x\n", location, v);
- *(u16 *)location = v;
- break;
- case R_C6000_ABS8:
- pr_debug("RELA ABS8: [%p] = 0x%x\n", location, v);
- *(u8 *)location = v;
- break;
- case R_C6000_ABS_L16:
- opcode = *location;
- opcode &= ~0x7fff80;
- opcode |= ((v & 0xffff) << 7);
- pr_debug("RELA ABS_L16[%p] v[0x%x] opcode[0x%x]\n",
- location, v, opcode);
- *location = opcode;
- break;
- case R_C6000_ABS_H16:
- opcode = *location;
- opcode &= ~0x7fff80;
- opcode |= ((v >> 9) & 0x7fff80);
- pr_debug("RELA ABS_H16[%p] v[0x%x] opcode[0x%x]\n",
- location, v, opcode);
- *location = opcode;
- break;
- case R_C6000_PCR_S21:
- if (fixup_pcr(location, v, 21, 7))
- return -ENOEXEC;
- break;
- case R_C6000_PCR_S12:
- if (fixup_pcr(location, v, 12, 16))
- return -ENOEXEC;
- break;
- case R_C6000_PCR_S10:
- if (fixup_pcr(location, v, 10, 13))
- return -ENOEXEC;
- break;
- default:
- pr_err("module %s: Unknown RELA relocation: %u\n",
- me->name, ELF32_R_TYPE(rel[i].r_info));
- return -ENOEXEC;
- }
- }
-
- return 0;
-}
diff --git a/arch/c6x/kernel/process.c b/arch/c6x/kernel/process.c
deleted file mode 100644
index 9f4fd6a40a10..000000000000
--- a/arch/c6x/kernel/process.c
+++ /dev/null
@@ -1,151 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Port on Texas Instruments TMS320C6x architecture
- *
- * Copyright (C) 2004, 2006, 2009, 2010, 2011 Texas Instruments Incorporated
- * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
- */
-#include <linux/module.h>
-#include <linux/unistd.h>
-#include <linux/ptrace.h>
-#include <linux/init_task.h>
-#include <linux/tick.h>
-#include <linux/mqueue.h>
-#include <linux/syscalls.h>
-#include <linux/reboot.h>
-#include <linux/sched/task.h>
-#include <linux/sched/task_stack.h>
-
-#include <asm/syscalls.h>
-
-/* hooks for board specific support */
-void (*c6x_restart)(void);
-void (*c6x_halt)(void);
-
-extern asmlinkage void ret_from_fork(void);
-extern asmlinkage void ret_from_kernel_thread(void);
-
-/*
- * power off function, if any
- */
-void (*pm_power_off)(void);
-EXPORT_SYMBOL(pm_power_off);
-
-void arch_cpu_idle(void)
-{
- unsigned long tmp;
-
- /*
- * Put local_irq_enable and idle in same execute packet
- * to make them atomic and avoid race to idle with
- * interrupts enabled.
- */
- asm volatile (" mvc .s2 CSR,%0\n"
- " or .d2 1,%0,%0\n"
- " mvc .s2 %0,CSR\n"
- "|| idle\n"
- : "=b"(tmp));
-}
-
-static void halt_loop(void)
-{
- printk(KERN_EMERG "System Halted, OK to turn off power\n");
- local_irq_disable();
- while (1)
- asm volatile("idle\n");
-}
-
-void machine_restart(char *__unused)
-{
- if (c6x_restart)
- c6x_restart();
- halt_loop();
-}
-
-void machine_halt(void)
-{
- if (c6x_halt)
- c6x_halt();
- halt_loop();
-}
-
-void machine_power_off(void)
-{
- if (pm_power_off)
- pm_power_off();
- halt_loop();
-}
-
-void flush_thread(void)
-{
-}
-
-/*
- * Do necessary setup to start up a newly executed thread.
- */
-void start_thread(struct pt_regs *regs, unsigned int pc, unsigned long usp)
-{
- /*
- * The binfmt loader will setup a "full" stack, but the C6X
- * operates an "empty" stack. So we adjust the usp so that
- * argc doesn't get destroyed if an interrupt is taken before
- * it is read from the stack.
- *
- * NB: Library startup code needs to match this.
- */
- usp -= 8;
-
- regs->pc = pc;
- regs->sp = usp;
- regs->tsr |= 0x40; /* set user mode */
- current->thread.usp = usp;
-}
-
-/*
- * Copy a new thread context in its stack.
- */
-int copy_thread(unsigned long clone_flags, unsigned long usp,
- unsigned long ustk_size, struct task_struct *p,
- unsigned long tls)
-{
- struct pt_regs *childregs;
-
- childregs = task_pt_regs(p);
-
- if (unlikely(p->flags & PF_KTHREAD)) {
- /* case of __kernel_thread: we return to supervisor space */
- memset(childregs, 0, sizeof(struct pt_regs));
- childregs->sp = (unsigned long)(childregs + 1);
- p->thread.pc = (unsigned long) ret_from_kernel_thread;
- childregs->a0 = usp; /* function */
- childregs->a1 = ustk_size; /* argument */
- } else {
- /* Otherwise use the given stack */
- *childregs = *current_pt_regs();
- if (usp)
- childregs->sp = usp;
- p->thread.pc = (unsigned long) ret_from_fork;
- }
-
- /* Set usp/ksp */
- p->thread.usp = childregs->sp;
- thread_saved_ksp(p) = (unsigned long)childregs - 8;
- p->thread.wchan = p->thread.pc;
-#ifdef __DSBT__
- {
- unsigned long dp;
-
- asm volatile ("mv .S2 b14,%0\n" : "=b"(dp));
-
- thread_saved_dp(p) = dp;
- if (usp == -1)
- childregs->dp = dp;
- }
-#endif
- return 0;
-}
-
-unsigned long get_wchan(struct task_struct *p)
-{
- return p->thread.wchan;
-}
diff --git a/arch/c6x/kernel/ptrace.c b/arch/c6x/kernel/ptrace.c
deleted file mode 100644
index 3cdaa8cf0ed6..000000000000
--- a/arch/c6x/kernel/ptrace.c
+++ /dev/null
@@ -1,139 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Port on Texas Instruments TMS320C6x architecture
- *
- * Copyright (C) 2004, 2006, 2009, 2010, 2011 Texas Instruments Incorporated
- * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
- *
- * Updated for 2.6.34: Mark Salter <msalter@redhat.com>
- */
-#include <linux/ptrace.h>
-#include <linux/tracehook.h>
-#include <linux/regset.h>
-#include <linux/elf.h>
-#include <linux/sched/task_stack.h>
-
-#include <asm/cacheflush.h>
-
-#define PT_REG_SIZE (sizeof(struct pt_regs))
-
-/*
- * Called by kernel/ptrace.c when detaching.
- */
-void ptrace_disable(struct task_struct *child)
-{
- /* nothing to do */
-}
-
-/*
- * Get a register number from live pt_regs for the specified task.
- */
-static inline long get_reg(struct task_struct *task, int regno)
-{
- long *addr = (long *)task_pt_regs(task);
-
- if (regno == PT_TSR || regno == PT_CSR)
- return 0;
-
- return addr[regno];
-}
-
-/*
- * Write contents of register REGNO in task TASK.
- */
-static inline int put_reg(struct task_struct *task,
- int regno,
- unsigned long data)
-{
- unsigned long *addr = (unsigned long *)task_pt_regs(task);
-
- if (regno != PT_TSR && regno != PT_CSR)
- addr[regno] = data;
-
- return 0;
-}
-
-/* regset get/set implementations */
-
-static int gpr_get(struct task_struct *target,
- const struct user_regset *regset,
- struct membuf to)
-{
- return membuf_write(&to, task_pt_regs(target), sizeof(struct pt_regs));
-}
-
-enum c6x_regset {
- REGSET_GPR,
-};
-
-static const struct user_regset c6x_regsets[] = {
- [REGSET_GPR] = {
- .core_note_type = NT_PRSTATUS,
- .n = ELF_NGREG,
- .size = sizeof(u32),
- .align = sizeof(u32),
- .regset_get = gpr_get,
- },
-};
-
-static const struct user_regset_view user_c6x_native_view = {
- .name = "tic6x",
- .e_machine = EM_TI_C6000,
- .regsets = c6x_regsets,
- .n = ARRAY_SIZE(c6x_regsets),
-};
-
-const struct user_regset_view *task_user_regset_view(struct task_struct *task)
-{
- return &user_c6x_native_view;
-}
-
-/*
- * Perform ptrace request
- */
-long arch_ptrace(struct task_struct *child, long request,
- unsigned long addr, unsigned long data)
-{
- int ret = 0;
-
- switch (request) {
- /*
- * write the word at location addr.
- */
- case PTRACE_POKETEXT:
- ret = generic_ptrace_pokedata(child, addr, data);
- if (ret == 0 && request == PTRACE_POKETEXT)
- flush_icache_range(addr, addr + 4);
- break;
- default:
- ret = ptrace_request(child, request, addr, data);
- break;
- }
-
- return ret;
-}
-
-/*
- * handle tracing of system call entry
- * - return the revised system call number or ULONG_MAX to cause ENOSYS
- */
-asmlinkage unsigned long syscall_trace_entry(struct pt_regs *regs)
-{
- if (tracehook_report_syscall_entry(regs))
- /* tracing decided this syscall should not happen, so
- * We'll return a bogus call number to get an ENOSYS
- * error, but leave the original number in
- * regs->orig_a4
- */
- return ULONG_MAX;
-
- return regs->b0;
-}
-
-/*
- * handle tracing of system call exit
- */
-asmlinkage void syscall_trace_exit(struct pt_regs *regs)
-{
- tracehook_report_syscall_exit(regs, 0);
-}
diff --git a/arch/c6x/kernel/setup.c b/arch/c6x/kernel/setup.c
deleted file mode 100644
index 9254c3b794a5..000000000000
--- a/arch/c6x/kernel/setup.c
+++ /dev/null
@@ -1,476 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Port on Texas Instruments TMS320C6x architecture
- *
- * Copyright (C) 2004, 2006, 2009, 2010, 2011 Texas Instruments Incorporated
- * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
- */
-#include <linux/dma-mapping.h>
-#include <linux/memblock.h>
-#include <linux/seq_file.h>
-#include <linux/clkdev.h>
-#include <linux/initrd.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/of_fdt.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/cache.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/clk.h>
-#include <linux/cpu.h>
-#include <linux/fs.h>
-#include <linux/of.h>
-#include <linux/console.h>
-#include <linux/screen_info.h>
-
-#include <asm/sections.h>
-#include <asm/div64.h>
-#include <asm/setup.h>
-#include <asm/dscr.h>
-#include <asm/clock.h>
-#include <asm/soc.h>
-#include <asm/special_insns.h>
-
-static const char *c6x_soc_name;
-
-struct screen_info screen_info;
-
-int c6x_num_cores;
-EXPORT_SYMBOL_GPL(c6x_num_cores);
-
-unsigned int c6x_silicon_rev;
-EXPORT_SYMBOL_GPL(c6x_silicon_rev);
-
-/*
- * Device status register. This holds information
- * about device configuration needed by some drivers.
- */
-unsigned int c6x_devstat;
-EXPORT_SYMBOL_GPL(c6x_devstat);
-
-/*
- * Some SoCs have fuse registers holding a unique MAC
- * address. This is parsed out of the device tree with
- * the resulting MAC being held here.
- */
-unsigned char c6x_fuse_mac[6];
-
-unsigned long memory_start;
-unsigned long memory_end;
-EXPORT_SYMBOL(memory_end);
-
-unsigned long ram_start;
-unsigned long ram_end;
-
-/* Uncached memory for DMA consistent use (memdma=) */
-static unsigned long dma_start __initdata;
-static unsigned long dma_size __initdata;
-
-struct cpuinfo_c6x {
- const char *cpu_name;
- const char *cpu_voltage;
- const char *mmu;
- const char *fpu;
- char *cpu_rev;
- unsigned int core_id;
- char __cpu_rev[5];
-};
-
-static DEFINE_PER_CPU(struct cpuinfo_c6x, cpu_data);
-
-unsigned int ticks_per_ns_scaled;
-EXPORT_SYMBOL(ticks_per_ns_scaled);
-
-unsigned int c6x_core_freq;
-
-static void __init get_cpuinfo(void)
-{
- unsigned cpu_id, rev_id, csr;
- struct clk *coreclk = clk_get_sys(NULL, "core");
- unsigned long core_khz;
- u64 tmp;
- struct cpuinfo_c6x *p;
- struct device_node *node;
-
- p = &per_cpu(cpu_data, smp_processor_id());
-
- if (!IS_ERR(coreclk))
- c6x_core_freq = clk_get_rate(coreclk);
- else {
- printk(KERN_WARNING
- "Cannot find core clock frequency. Using 700MHz\n");
- c6x_core_freq = 700000000;
- }
-
- core_khz = c6x_core_freq / 1000;
-
- tmp = (uint64_t)core_khz << C6X_NDELAY_SCALE;
- do_div(tmp, 1000000);
- ticks_per_ns_scaled = tmp;
-
- csr = get_creg(CSR);
- cpu_id = csr >> 24;
- rev_id = (csr >> 16) & 0xff;
-
- p->mmu = "none";
- p->fpu = "none";
- p->cpu_voltage = "unknown";
-
- switch (cpu_id) {
- case 0:
- p->cpu_name = "C67x";
- p->fpu = "yes";
- break;
- case 2:
- p->cpu_name = "C62x";
- break;
- case 8:
- p->cpu_name = "C64x";
- break;
- case 12:
- p->cpu_name = "C64x";
- break;
- case 16:
- p->cpu_name = "C64x+";
- p->cpu_voltage = "1.2";
- break;
- case 21:
- p->cpu_name = "C66X";
- p->cpu_voltage = "1.2";
- break;
- default:
- p->cpu_name = "unknown";
- break;
- }
-
- if (cpu_id < 16) {
- switch (rev_id) {
- case 0x1:
- if (cpu_id > 8) {
- p->cpu_rev = "DM640/DM641/DM642/DM643";
- p->cpu_voltage = "1.2 - 1.4";
- } else {
- p->cpu_rev = "C6201";
- p->cpu_voltage = "2.5";
- }
- break;
- case 0x2:
- p->cpu_rev = "C6201B/C6202/C6211";
- p->cpu_voltage = "1.8";
- break;
- case 0x3:
- p->cpu_rev = "C6202B/C6203/C6204/C6205";
- p->cpu_voltage = "1.5";
- break;
- case 0x201:
- p->cpu_rev = "C6701 revision 0 (early CPU)";
- p->cpu_voltage = "1.8";
- break;
- case 0x202:
- p->cpu_rev = "C6701/C6711/C6712";
- p->cpu_voltage = "1.8";
- break;
- case 0x801:
- p->cpu_rev = "C64x";
- p->cpu_voltage = "1.5";
- break;
- default:
- p->cpu_rev = "unknown";
- }
- } else {
- p->cpu_rev = p->__cpu_rev;
- snprintf(p->__cpu_rev, sizeof(p->__cpu_rev), "0x%x", cpu_id);
- }
-
- p->core_id = get_coreid();
-
- for_each_of_cpu_node(node)
- ++c6x_num_cores;
-
- node = of_find_node_by_name(NULL, "soc");
- if (node) {
- if (of_property_read_string(node, "model", &c6x_soc_name))
- c6x_soc_name = "unknown";
- of_node_put(node);
- } else
- c6x_soc_name = "unknown";
-
- printk(KERN_INFO "CPU%d: %s rev %s, %s volts, %uMHz\n",
- p->core_id, p->cpu_name, p->cpu_rev,
- p->cpu_voltage, c6x_core_freq / 1000000);
-}
-
-/*
- * Early parsing of the command line
- */
-static u32 mem_size __initdata;
-
-/* "mem=" parsing. */
-static int __init early_mem(char *p)
-{
- if (!p)
- return -EINVAL;
-
- mem_size = memparse(p, &p);
- /* don't remove all of memory when handling "mem={invalid}" */
- if (mem_size == 0)
- return -EINVAL;
-
- return 0;
-}
-early_param("mem", early_mem);
-
-/* "memdma=<size>[@<address>]" parsing. */
-static int __init early_memdma(char *p)
-{
- if (!p)
- return -EINVAL;
-
- dma_size = memparse(p, &p);
- if (*p == '@')
- dma_start = memparse(p, &p);
-
- return 0;
-}
-early_param("memdma", early_memdma);
-
-int __init c6x_add_memory(phys_addr_t start, unsigned long size)
-{
- static int ram_found __initdata;
-
- /* We only handle one bank (the one with PAGE_OFFSET) for now */
- if (ram_found)
- return -EINVAL;
-
- if (start > PAGE_OFFSET || PAGE_OFFSET >= (start + size))
- return 0;
-
- ram_start = start;
- ram_end = start + size;
-
- ram_found = 1;
- return 0;
-}
-
-/*
- * Do early machine setup and device tree parsing. This is called very
- * early on the boot process.
- */
-notrace void __init machine_init(unsigned long dt_ptr)
-{
- void *dtb = __va(dt_ptr);
- void *fdt = __dtb_start;
-
- /* interrupts must be masked */
- set_creg(IER, 2);
-
- /*
- * Set the Interrupt Service Table (IST) to the beginning of the
- * vector table.
- */
- set_ist(_vectors_start);
-
- /*
- * dtb is passed in from bootloader.
- * fdt is linked in blob.
- */
- if (dtb && dtb != fdt)
- fdt = dtb;
-
- /* Do some early initialization based on the flat device tree */
- early_init_dt_scan(fdt);
-
- parse_early_param();
-}
-
-void __init setup_arch(char **cmdline_p)
-{
- phys_addr_t start, end;
- u64 i;
-
- printk(KERN_INFO "Initializing kernel\n");
-
- /* Initialize command line */
- *cmdline_p = boot_command_line;
-
- memory_end = ram_end;
- memory_end &= ~(PAGE_SIZE - 1);
-
- if (mem_size && (PAGE_OFFSET + PAGE_ALIGN(mem_size)) < memory_end)
- memory_end = PAGE_OFFSET + PAGE_ALIGN(mem_size);
-
- /* add block that this kernel can use */
- memblock_add(PAGE_OFFSET, memory_end - PAGE_OFFSET);
-
- /* reserve kernel text/data/bss */
- memblock_reserve(PAGE_OFFSET,
- PAGE_ALIGN((unsigned long)&_end - PAGE_OFFSET));
-
- if (dma_size) {
- /* align to cacheability granularity */
- dma_size = CACHE_REGION_END(dma_size);
-
- if (!dma_start)
- dma_start = memory_end - dma_size;
-
- /* align to cacheability granularity */
- dma_start = CACHE_REGION_START(dma_start);
-
- /* reserve DMA memory taken from kernel memory */
- if (memblock_is_region_memory(dma_start, dma_size))
- memblock_reserve(dma_start, dma_size);
- }
-
- memory_start = PAGE_ALIGN((unsigned int) &_end);
-
- printk(KERN_INFO "Memory Start=%08lx, Memory End=%08lx\n",
- memory_start, memory_end);
-
-#ifdef CONFIG_BLK_DEV_INITRD
- /*
- * Reserve initrd memory if in kernel memory.
- */
- if (initrd_start < initrd_end)
- if (memblock_is_region_memory(initrd_start,
- initrd_end - initrd_start))
- memblock_reserve(initrd_start,
- initrd_end - initrd_start);
-#endif
-
- init_mm.start_code = (unsigned long) &_stext;
- init_mm.end_code = (unsigned long) &_etext;
- init_mm.end_data = memory_start;
- init_mm.brk = memory_start;
-
- unflatten_and_copy_device_tree();
-
- c6x_cache_init();
-
- /* Set the whole external memory as non-cacheable */
- disable_caching(ram_start, ram_end - 1);
-
- /* Set caching of external RAM used by Linux */
- for_each_mem_range(i, &start, &end)
- enable_caching(CACHE_REGION_START(start),
- CACHE_REGION_START(end - 1));
-
-#ifdef CONFIG_BLK_DEV_INITRD
- /*
- * Enable caching for initrd which falls outside kernel memory.
- */
- if (initrd_start < initrd_end) {
- if (!memblock_is_region_memory(initrd_start,
- initrd_end - initrd_start))
- enable_caching(CACHE_REGION_START(initrd_start),
- CACHE_REGION_START(initrd_end - 1));
- }
-#endif
-
- /*
- * Disable caching for dma coherent memory taken from kernel memory.
- */
- if (dma_size && memblock_is_region_memory(dma_start, dma_size))
- disable_caching(dma_start,
- CACHE_REGION_START(dma_start + dma_size - 1));
-
- /* Initialize the coherent memory allocator */
- coherent_mem_init(dma_start, dma_size);
-
- max_low_pfn = PFN_DOWN(memory_end);
- min_low_pfn = PFN_UP(memory_start);
- max_pfn = max_low_pfn;
- max_mapnr = max_low_pfn - min_low_pfn;
-
- /* Get kmalloc into gear */
- paging_init();
-
- /*
- * Probe for Device State Configuration Registers.
- * We have to do this early in case timer needs to be enabled
- * through DSCR.
- */
- dscr_probe();
-
- /* We do this early for timer and core clock frequency */
- c64x_setup_clocks();
-
- /* Get CPU info */
- get_cpuinfo();
-
-#if defined(CONFIG_VT) && defined(CONFIG_DUMMY_CONSOLE)
- conswitchp = &dummy_con;
-#endif
-}
-
-#define cpu_to_ptr(n) ((void *)((long)(n)+1))
-#define ptr_to_cpu(p) ((long)(p) - 1)
-
-static int show_cpuinfo(struct seq_file *m, void *v)
-{
- int n = ptr_to_cpu(v);
- struct cpuinfo_c6x *p = &per_cpu(cpu_data, n);
-
- if (n == 0) {
- seq_printf(m,
- "soc\t\t: %s\n"
- "soc revision\t: 0x%x\n"
- "soc cores\t: %d\n",
- c6x_soc_name, c6x_silicon_rev, c6x_num_cores);
- }
-
- seq_printf(m,
- "\n"
- "processor\t: %d\n"
- "cpu\t\t: %s\n"
- "core revision\t: %s\n"
- "core voltage\t: %s\n"
- "core id\t\t: %d\n"
- "mmu\t\t: %s\n"
- "fpu\t\t: %s\n"
- "cpu MHz\t\t: %u\n"
- "bogomips\t: %lu.%02lu\n\n",
- n,
- p->cpu_name, p->cpu_rev, p->cpu_voltage,
- p->core_id, p->mmu, p->fpu,
- (c6x_core_freq + 500000) / 1000000,
- (loops_per_jiffy/(500000/HZ)),
- (loops_per_jiffy/(5000/HZ))%100);
-
- return 0;
-}
-
-static void *c_start(struct seq_file *m, loff_t *pos)
-{
- return *pos < nr_cpu_ids ? cpu_to_ptr(*pos) : NULL;
-}
-static void *c_next(struct seq_file *m, void *v, loff_t *pos)
-{
- ++*pos;
- return NULL;
-}
-static void c_stop(struct seq_file *m, void *v)
-{
-}
-
-const struct seq_operations cpuinfo_op = {
- c_start,
- c_stop,
- c_next,
- show_cpuinfo
-};
-
-static struct cpu cpu_devices[NR_CPUS];
-
-static int __init topology_init(void)
-{
- int i;
-
- for_each_present_cpu(i)
- register_cpu(&cpu_devices[i], i);
-
- return 0;
-}
-
-subsys_initcall(topology_init);
diff --git a/arch/c6x/kernel/signal.c b/arch/c6x/kernel/signal.c
deleted file mode 100644
index 862460c3b183..000000000000
--- a/arch/c6x/kernel/signal.c
+++ /dev/null
@@ -1,322 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Port on Texas Instruments TMS320C6x architecture
- *
- * Copyright (C) 2004, 2006, 2009, 2010, 2011 Texas Instruments Incorporated
- * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
- *
- * Updated for 2.6.34: Mark Salter <msalter@redhat.com>
- */
-
-#include <linux/kernel.h>
-#include <linux/uaccess.h>
-#include <linux/syscalls.h>
-#include <linux/tracehook.h>
-
-#include <asm/asm-offsets.h>
-#include <asm/ucontext.h>
-#include <asm/cacheflush.h>
-
-
-/*
- * Do a signal return, undo the signal stack.
- */
-
-#define RETCODE_SIZE (9 << 2) /* 9 instructions = 36 bytes */
-
-struct rt_sigframe {
- struct siginfo __user *pinfo;
- void __user *puc;
- struct siginfo info;
- struct ucontext uc;
- unsigned long retcode[RETCODE_SIZE >> 2];
-};
-
-static int restore_sigcontext(struct pt_regs *regs,
- struct sigcontext __user *sc)
-{
- int err = 0;
-
- /* The access_ok check was done by caller, so use __get_user here */
-#define COPY(x) (err |= __get_user(regs->x, &sc->sc_##x))
-
- COPY(sp); COPY(a4); COPY(b4); COPY(a6); COPY(b6); COPY(a8); COPY(b8);
- COPY(a0); COPY(a1); COPY(a2); COPY(a3); COPY(a5); COPY(a7); COPY(a9);
- COPY(b0); COPY(b1); COPY(b2); COPY(b3); COPY(b5); COPY(b7); COPY(b9);
-
- COPY(a16); COPY(a17); COPY(a18); COPY(a19);
- COPY(a20); COPY(a21); COPY(a22); COPY(a23);
- COPY(a24); COPY(a25); COPY(a26); COPY(a27);
- COPY(a28); COPY(a29); COPY(a30); COPY(a31);
- COPY(b16); COPY(b17); COPY(b18); COPY(b19);
- COPY(b20); COPY(b21); COPY(b22); COPY(b23);
- COPY(b24); COPY(b25); COPY(b26); COPY(b27);
- COPY(b28); COPY(b29); COPY(b30); COPY(b31);
-
- COPY(csr); COPY(pc);
-
-#undef COPY
-
- return err;
-}
-
-asmlinkage int do_rt_sigreturn(struct pt_regs *regs)
-{
- struct rt_sigframe __user *frame;
- sigset_t set;
-
- /* Always make any pending restarted system calls return -EINTR */
- current->restart_block.fn = do_no_restart_syscall;
-
- /*
- * Since we stacked the signal on a dword boundary,
- * 'sp' should be dword aligned here. If it's
- * not, then the user is trying to mess with us.
- */
- if (regs->sp & 7)
- goto badframe;
-
- frame = (struct rt_sigframe __user *) ((unsigned long) regs->sp + 8);
-
- if (!access_ok(frame, sizeof(*frame)))
- goto badframe;
- if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
- goto badframe;
-
- set_current_blocked(&set);
-
- if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
- goto badframe;
-
- return regs->a4;
-
-badframe:
- force_sig(SIGSEGV);
- return 0;
-}
-
-static int setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
- unsigned long mask)
-{
- int err = 0;
-
- err |= __put_user(mask, &sc->sc_mask);
-
- /* The access_ok check was done by caller, so use __put_user here */
-#define COPY(x) (err |= __put_user(regs->x, &sc->sc_##x))
-
- COPY(sp); COPY(a4); COPY(b4); COPY(a6); COPY(b6); COPY(a8); COPY(b8);
- COPY(a0); COPY(a1); COPY(a2); COPY(a3); COPY(a5); COPY(a7); COPY(a9);
- COPY(b0); COPY(b1); COPY(b2); COPY(b3); COPY(b5); COPY(b7); COPY(b9);
-
- COPY(a16); COPY(a17); COPY(a18); COPY(a19);
- COPY(a20); COPY(a21); COPY(a22); COPY(a23);
- COPY(a24); COPY(a25); COPY(a26); COPY(a27);
- COPY(a28); COPY(a29); COPY(a30); COPY(a31);
- COPY(b16); COPY(b17); COPY(b18); COPY(b19);
- COPY(b20); COPY(b21); COPY(b22); COPY(b23);
- COPY(b24); COPY(b25); COPY(b26); COPY(b27);
- COPY(b28); COPY(b29); COPY(b30); COPY(b31);
-
- COPY(csr); COPY(pc);
-
-#undef COPY
-
- return err;
-}
-
-static inline void __user *get_sigframe(struct ksignal *ksig,
- struct pt_regs *regs,
- unsigned long framesize)
-{
- unsigned long sp = sigsp(regs->sp, ksig);
-
- /*
- * No matter what happens, 'sp' must be dword
- * aligned. Otherwise, nasty things will happen
- */
- return (void __user *)((sp - framesize) & ~7);
-}
-
-static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
- struct pt_regs *regs)
-{
- struct rt_sigframe __user *frame;
- unsigned long __user *retcode;
- int err = 0;
-
- frame = get_sigframe(ksig, regs, sizeof(*frame));
-
- if (!access_ok(frame, sizeof(*frame)))
- return -EFAULT;
-
- err |= __put_user(&frame->info, &frame->pinfo);
- err |= __put_user(&frame->uc, &frame->puc);
- err |= copy_siginfo_to_user(&frame->info, &ksig->info);
-
- /* Clear all the bits of the ucontext we don't use. */
- err |= __clear_user(&frame->uc, offsetof(struct ucontext, uc_mcontext));
-
- err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0]);
- err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
-
- /* Set up to return from userspace */
- retcode = (unsigned long __user *) &frame->retcode;
-
- /* The access_ok check was done above, so use __put_user here */
-#define COPY(x) (err |= __put_user(x, retcode++))
-
- COPY(0x0000002AUL | (__NR_rt_sigreturn << 7));
- /* MVK __NR_rt_sigreturn,B0 */
- COPY(0x10000000UL); /* SWE */
- COPY(0x00006000UL); /* NOP 4 */
- COPY(0x00006000UL); /* NOP 4 */
- COPY(0x00006000UL); /* NOP 4 */
- COPY(0x00006000UL); /* NOP 4 */
- COPY(0x00006000UL); /* NOP 4 */
- COPY(0x00006000UL); /* NOP 4 */
- COPY(0x00006000UL); /* NOP 4 */
-
-#undef COPY
-
- if (err)
- return -EFAULT;
-
- flush_icache_range((unsigned long) &frame->retcode,
- (unsigned long) &frame->retcode + RETCODE_SIZE);
-
- retcode = (unsigned long __user *) &frame->retcode;
-
- /* Change user context to branch to signal handler */
- regs->sp = (unsigned long) frame - 8;
- regs->b3 = (unsigned long) retcode;
- regs->pc = (unsigned long) ksig->ka.sa.sa_handler;
-
- /* Give the signal number to the handler */
- regs->a4 = ksig->sig;
-
- /*
- * For realtime signals we must also set the second and third
- * arguments for the signal handler.
- * -- Peter Maydell <pmaydell@chiark.greenend.org.uk> 2000-12-06
- */
- regs->b4 = (unsigned long)&frame->info;
- regs->a6 = (unsigned long)&frame->uc;
-
- return 0;
-}
-
-static inline void
-handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler)
-{
- switch (regs->a4) {
- case -ERESTARTNOHAND:
- if (!has_handler)
- goto do_restart;
- regs->a4 = -EINTR;
- break;
-
- case -ERESTARTSYS:
- if (has_handler && !(ka->sa.sa_flags & SA_RESTART)) {
- regs->a4 = -EINTR;
- break;
- }
- fallthrough;
- case -ERESTARTNOINTR:
-do_restart:
- regs->a4 = regs->orig_a4;
- regs->pc -= 4;
- break;
- }
-}
-
-/*
- * handle the actual delivery of a signal to userspace
- */
-static void handle_signal(struct ksignal *ksig, struct pt_regs *regs,
- int syscall)
-{
- int ret;
-
- /* Are we from a system call? */
- if (syscall) {
- /* If so, check system call restarting.. */
- switch (regs->a4) {
- case -ERESTART_RESTARTBLOCK:
- case -ERESTARTNOHAND:
- regs->a4 = -EINTR;
- break;
-
- case -ERESTARTSYS:
- if (!(ksig->ka.sa.sa_flags & SA_RESTART)) {
- regs->a4 = -EINTR;
- break;
- }
-
- fallthrough;
- case -ERESTARTNOINTR:
- regs->a4 = regs->orig_a4;
- regs->pc -= 4;
- }
- }
-
- /* Set up the stack frame */
- ret = setup_rt_frame(ksig, sigmask_to_save(), regs);
- signal_setup_done(ret, ksig, 0);
-}
-
-/*
- * handle a potential signal
- */
-static void do_signal(struct pt_regs *regs, int syscall)
-{
- struct ksignal ksig;
-
- /* we want the common case to go fast, which is why we may in certain
- * cases get here from kernel mode */
- if (!user_mode(regs))
- return;
-
- if (get_signal(&ksig)) {
- handle_signal(&ksig, regs, syscall);
- return;
- }
-
- /* did we come from a system call? */
- if (syscall) {
- /* restart the system call - no handlers present */
- switch (regs->a4) {
- case -ERESTARTNOHAND:
- case -ERESTARTSYS:
- case -ERESTARTNOINTR:
- regs->a4 = regs->orig_a4;
- regs->pc -= 4;
- break;
-
- case -ERESTART_RESTARTBLOCK:
- regs->a4 = regs->orig_a4;
- regs->b0 = __NR_restart_syscall;
- regs->pc -= 4;
- break;
- }
- }
-
- /* if there's no signal to deliver, we just put the saved sigmask
- * back */
- restore_saved_sigmask();
-}
-
-/*
- * notification of userspace execution resumption
- * - triggered by current->work.notify_resume
- */
-asmlinkage void do_notify_resume(struct pt_regs *regs, u32 thread_info_flags,
- int syscall)
-{
- /* deal with pending signal delivery */
- if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
- do_signal(regs, syscall);
-
- if (thread_info_flags & (1 << TIF_NOTIFY_RESUME))
- tracehook_notify_resume(regs);
-}
diff --git a/arch/c6x/kernel/soc.c b/arch/c6x/kernel/soc.c
deleted file mode 100644
index 8362f9390e03..000000000000
--- a/arch/c6x/kernel/soc.c
+++ /dev/null
@@ -1,87 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Miscellaneous SoC-specific hooks.
- *
- * Copyright (C) 2011 Texas Instruments Incorporated
- * Author: Mark Salter <msalter@redhat.com>
- */
-#include <linux/module.h>
-#include <linux/ctype.h>
-#include <linux/etherdevice.h>
-#include <asm/setup.h>
-#include <asm/soc.h>
-
-struct soc_ops soc_ops;
-
-int soc_get_exception(void)
-{
- if (!soc_ops.get_exception)
- return -1;
- return soc_ops.get_exception();
-}
-
-void soc_assert_event(unsigned int evt)
-{
- if (soc_ops.assert_event)
- soc_ops.assert_event(evt);
-}
-
-static u8 cmdline_mac[6];
-
-static int __init get_mac_addr_from_cmdline(char *str)
-{
- int count, i, val;
-
- for (count = 0; count < 6 && *str; count++, str += 3) {
- if (!isxdigit(str[0]) || !isxdigit(str[1]))
- return 0;
- if (str[2] != ((count < 5) ? ':' : '\0'))
- return 0;
-
- for (i = 0, val = 0; i < 2; i++) {
- val = val << 4;
- val |= isdigit(str[i]) ?
- str[i] - '0' : toupper(str[i]) - 'A' + 10;
- }
- cmdline_mac[count] = val;
- }
- return 1;
-}
-__setup("emac_addr=", get_mac_addr_from_cmdline);
-
-/*
- * Setup the MAC address for SoC ethernet devices.
- *
- * Before calling this function, the ethernet driver will have
- * initialized the addr with local-mac-address from the device
- * tree (if found). Allow command line to override, but not
- * the fused address.
- */
-int soc_mac_addr(unsigned int index, u8 *addr)
-{
- int i, have_dt_mac = 0, have_cmdline_mac = 0, have_fuse_mac = 0;
-
- for (i = 0; i < 6; i++) {
- if (cmdline_mac[i])
- have_cmdline_mac = 1;
- if (c6x_fuse_mac[i])
- have_fuse_mac = 1;
- if (addr[i])
- have_dt_mac = 1;
- }
-
- /* cmdline overrides all */
- if (have_cmdline_mac)
- memcpy(addr, cmdline_mac, 6);
- else if (!have_dt_mac) {
- if (have_fuse_mac)
- memcpy(addr, c6x_fuse_mac, 6);
- else
- eth_random_addr(addr);
- }
-
- /* adjust for specific EMAC device */
- addr[5] += index * c6x_num_cores;
- return 1;
-}
-EXPORT_SYMBOL_GPL(soc_mac_addr);
diff --git a/arch/c6x/kernel/switch_to.S b/arch/c6x/kernel/switch_to.S
deleted file mode 100644
index b7f9f607042e..000000000000
--- a/arch/c6x/kernel/switch_to.S
+++ /dev/null
@@ -1,71 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2011 Texas Instruments Incorporated
- * Author: Mark Salter (msalter@redhat.com)
- */
-
-#include <linux/linkage.h>
-#include <asm/asm-offsets.h>
-
-#define SP B15
-
- /*
- * void __switch_to(struct thread_info *prev,
- * struct thread_info *next,
- * struct task_struct *tsk) ;
- */
-ENTRY(__switch_to)
- LDDW .D2T2 *+B4(THREAD_B15_14),B7:B6
- || MV .L2X A4,B5 ; prev
- || MV .L1X B4,A5 ; next
- || MVC .S2 RILC,B1
-
- STW .D2T2 B3,*+B5(THREAD_PC)
- || STDW .D1T1 A13:A12,*+A4(THREAD_A13_12)
- || MVC .S2 ILC,B0
-
- LDW .D2T2 *+B4(THREAD_PC),B3
- || LDDW .D1T1 *+A5(THREAD_A13_12),A13:A12
-
- STDW .D1T1 A11:A10,*+A4(THREAD_A11_10)
- || STDW .D2T2 B1:B0,*+B5(THREAD_RICL_ICL)
-#ifndef __DSBT__
- || MVKL .S2 current_ksp,B1
-#endif
-
- STDW .D2T2 B15:B14,*+B5(THREAD_B15_14)
- || STDW .D1T1 A15:A14,*+A4(THREAD_A15_14)
-#ifndef __DSBT__
- || MVKH .S2 current_ksp,B1
-#endif
-
- ;; Switch to next SP
- MV .S2 B7,SP
-#ifdef __DSBT__
- || STW .D2T2 B7,*+B14(current_ksp)
-#else
- || STW .D2T2 B7,*B1
- || MV .L2 B6,B14
-#endif
- || LDDW .D1T1 *+A5(THREAD_RICL_ICL),A1:A0
-
- STDW .D2T2 B11:B10,*+B5(THREAD_B11_10)
- || LDDW .D1T1 *+A5(THREAD_A15_14),A15:A14
-
- STDW .D2T2 B13:B12,*+B5(THREAD_B13_12)
- || LDDW .D1T1 *+A5(THREAD_A11_10),A11:A10
-
- B .S2 B3 ; return in next E1
- || LDDW .D2T2 *+B4(THREAD_B13_12),B13:B12
-
- LDDW .D2T2 *+B4(THREAD_B11_10),B11:B10
- NOP
-
- MV .L2X A0,B0
- || MV .S1 A6,A4
-
- MVC .S2 B0,ILC
- || MV .L2X A1,B1
-
- MVC .S2 B1,RILC
-ENDPROC(__switch_to)
diff --git a/arch/c6x/kernel/sys_c6x.c b/arch/c6x/kernel/sys_c6x.c
deleted file mode 100644
index 600277f057cf..000000000000
--- a/arch/c6x/kernel/sys_c6x.c
+++ /dev/null
@@ -1,71 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Port on Texas Instruments TMS320C6x architecture
- *
- * Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated
- * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
- */
-#include <linux/module.h>
-#include <linux/syscalls.h>
-#include <linux/uaccess.h>
-
-#include <asm/syscalls.h>
-
-#ifdef CONFIG_ACCESS_CHECK
-int _access_ok(unsigned long addr, unsigned long size)
-{
- if (!size)
- return 1;
-
- if (!addr || addr > (0xffffffffUL - (size - 1)))
- goto _bad_access;
-
- if (uaccess_kernel())
- return 1;
-
- if (memory_start <= addr && (addr + size - 1) < memory_end)
- return 1;
-
-_bad_access:
- pr_debug("Bad access attempt: pid[%d] addr[%08lx] size[0x%lx]\n",
- current->pid, addr, size);
- return 0;
-}
-EXPORT_SYMBOL(_access_ok);
-#endif
-
-/* sys_cache_sync -- sync caches over given range */
-asmlinkage int sys_cache_sync(unsigned long s, unsigned long e)
-{
- L1D_cache_block_writeback_invalidate(s, e);
- L1P_cache_block_invalidate(s, e);
-
- return 0;
-}
-
-/* Provide the actual syscall number to call mapping. */
-#undef __SYSCALL
-#define __SYSCALL(nr, call) [nr] = (call),
-
-/*
- * Use trampolines
- */
-#define sys_pread64 sys_pread_c6x
-#define sys_pwrite64 sys_pwrite_c6x
-#define sys_truncate64 sys_truncate64_c6x
-#define sys_ftruncate64 sys_ftruncate64_c6x
-#define sys_fadvise64 sys_fadvise64_c6x
-#define sys_fadvise64_64 sys_fadvise64_64_c6x
-#define sys_fallocate sys_fallocate_c6x
-
-/* Use sys_mmap_pgoff directly */
-#define sys_mmap2 sys_mmap_pgoff
-
-/*
- * Note that we can't include <linux/unistd.h> here since the header
- * guard will defeat us; <asm/unistd.h> checks for __SYSCALL as well.
- */
-void *sys_call_table[__NR_syscalls] = {
- [0 ... __NR_syscalls-1] = sys_ni_syscall,
-#include <asm/unistd.h>
-};
diff --git a/arch/c6x/kernel/time.c b/arch/c6x/kernel/time.c
deleted file mode 100644
index f3ec91a87f4f..000000000000
--- a/arch/c6x/kernel/time.c
+++ /dev/null
@@ -1,63 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Port on Texas Instruments TMS320C6x architecture
- *
- * Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated
- * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
- */
-
-#include <linux/kernel.h>
-#include <linux/clocksource.h>
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/param.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/timex.h>
-#include <linux/profile.h>
-
-#include <asm/special_insns.h>
-#include <asm/timer64.h>
-
-static u32 sched_clock_multiplier;
-#define SCHED_CLOCK_SHIFT 16
-
-static u64 tsc_read(struct clocksource *cs)
-{
- return get_cycles();
-}
-
-static struct clocksource clocksource_tsc = {
- .name = "timestamp",
- .rating = 300,
- .read = tsc_read,
- .mask = CLOCKSOURCE_MASK(64),
- .flags = CLOCK_SOURCE_IS_CONTINUOUS,
-};
-
-/*
- * scheduler clock - returns current time in nanoseconds.
- */
-u64 sched_clock(void)
-{
- u64 tsc = get_cycles();
-
- return (tsc * sched_clock_multiplier) >> SCHED_CLOCK_SHIFT;
-}
-
-void __init time_init(void)
-{
- u64 tmp = (u64)NSEC_PER_SEC << SCHED_CLOCK_SHIFT;
-
- do_div(tmp, c6x_core_freq);
- sched_clock_multiplier = tmp;
-
- clocksource_register_hz(&clocksource_tsc, c6x_core_freq);
-
- /* write anything into TSCL to enable counting */
- set_creg(TSCL, 0);
-
- /* probe for timer64 event timer */
- timer64_init();
-}
diff --git a/arch/c6x/kernel/traps.c b/arch/c6x/kernel/traps.c
deleted file mode 100644
index 2b9121c755be..000000000000
--- a/arch/c6x/kernel/traps.c
+++ /dev/null
@@ -1,409 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Port on Texas Instruments TMS320C6x architecture
- *
- * Copyright (C) 2004, 2006, 2009, 2010, 2011 Texas Instruments Incorporated
- * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
- */
-#include <linux/module.h>
-#include <linux/ptrace.h>
-#include <linux/sched/debug.h>
-#include <linux/bug.h>
-
-#include <asm/soc.h>
-#include <asm/special_insns.h>
-#include <asm/traps.h>
-
-int (*c6x_nmi_handler)(struct pt_regs *regs);
-
-void __init trap_init(void)
-{
- ack_exception(EXCEPT_TYPE_NXF);
- ack_exception(EXCEPT_TYPE_EXC);
- ack_exception(EXCEPT_TYPE_IXF);
- ack_exception(EXCEPT_TYPE_SXF);
- enable_exception();
-}
-
-void show_regs(struct pt_regs *regs)
-{
- pr_err("\n");
- show_regs_print_info(KERN_ERR);
- pr_err("PC: %08lx SP: %08lx\n", regs->pc, regs->sp);
- pr_err("Status: %08lx ORIG_A4: %08lx\n", regs->csr, regs->orig_a4);
- pr_err("A0: %08lx B0: %08lx\n", regs->a0, regs->b0);
- pr_err("A1: %08lx B1: %08lx\n", regs->a1, regs->b1);
- pr_err("A2: %08lx B2: %08lx\n", regs->a2, regs->b2);
- pr_err("A3: %08lx B3: %08lx\n", regs->a3, regs->b3);
- pr_err("A4: %08lx B4: %08lx\n", regs->a4, regs->b4);
- pr_err("A5: %08lx B5: %08lx\n", regs->a5, regs->b5);
- pr_err("A6: %08lx B6: %08lx\n", regs->a6, regs->b6);
- pr_err("A7: %08lx B7: %08lx\n", regs->a7, regs->b7);
- pr_err("A8: %08lx B8: %08lx\n", regs->a8, regs->b8);
- pr_err("A9: %08lx B9: %08lx\n", regs->a9, regs->b9);
- pr_err("A10: %08lx B10: %08lx\n", regs->a10, regs->b10);
- pr_err("A11: %08lx B11: %08lx\n", regs->a11, regs->b11);
- pr_err("A12: %08lx B12: %08lx\n", regs->a12, regs->b12);
- pr_err("A13: %08lx B13: %08lx\n", regs->a13, regs->b13);
- pr_err("A14: %08lx B14: %08lx\n", regs->a14, regs->dp);
- pr_err("A15: %08lx B15: %08lx\n", regs->a15, regs->sp);
- pr_err("A16: %08lx B16: %08lx\n", regs->a16, regs->b16);
- pr_err("A17: %08lx B17: %08lx\n", regs->a17, regs->b17);
- pr_err("A18: %08lx B18: %08lx\n", regs->a18, regs->b18);
- pr_err("A19: %08lx B19: %08lx\n", regs->a19, regs->b19);
- pr_err("A20: %08lx B20: %08lx\n", regs->a20, regs->b20);
- pr_err("A21: %08lx B21: %08lx\n", regs->a21, regs->b21);
- pr_err("A22: %08lx B22: %08lx\n", regs->a22, regs->b22);
- pr_err("A23: %08lx B23: %08lx\n", regs->a23, regs->b23);
- pr_err("A24: %08lx B24: %08lx\n", regs->a24, regs->b24);
- pr_err("A25: %08lx B25: %08lx\n", regs->a25, regs->b25);
- pr_err("A26: %08lx B26: %08lx\n", regs->a26, regs->b26);
- pr_err("A27: %08lx B27: %08lx\n", regs->a27, regs->b27);
- pr_err("A28: %08lx B28: %08lx\n", regs->a28, regs->b28);
- pr_err("A29: %08lx B29: %08lx\n", regs->a29, regs->b29);
- pr_err("A30: %08lx B30: %08lx\n", regs->a30, regs->b30);
- pr_err("A31: %08lx B31: %08lx\n", regs->a31, regs->b31);
-}
-
-void die(char *str, struct pt_regs *fp, int nr)
-{
- console_verbose();
- pr_err("%s: %08x\n", str, nr);
- show_regs(fp);
-
- pr_err("Process %s (pid: %d, stackpage=%08lx)\n",
- current->comm, current->pid, (PAGE_SIZE +
- (unsigned long) current));
-
- dump_stack();
- while (1)
- ;
-}
-
-static void die_if_kernel(char *str, struct pt_regs *fp, int nr)
-{
- if (user_mode(fp))
- return;
-
- die(str, fp, nr);
-}
-
-
-/* Internal exceptions */
-static struct exception_info iexcept_table[10] = {
- { "Oops - instruction fetch", SIGBUS, BUS_ADRERR },
- { "Oops - fetch packet", SIGBUS, BUS_ADRERR },
- { "Oops - execute packet", SIGILL, ILL_ILLOPC },
- { "Oops - undefined instruction", SIGILL, ILL_ILLOPC },
- { "Oops - resource conflict", SIGILL, ILL_ILLOPC },
- { "Oops - resource access", SIGILL, ILL_PRVREG },
- { "Oops - privilege", SIGILL, ILL_PRVOPC },
- { "Oops - loops buffer", SIGILL, ILL_ILLOPC },
- { "Oops - software exception", SIGILL, ILL_ILLTRP },
- { "Oops - unknown exception", SIGILL, ILL_ILLOPC }
-};
-
-/* External exceptions */
-static struct exception_info eexcept_table[128] = {
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
-
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
-
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
-
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - external exception", SIGBUS, BUS_ADRERR },
- { "Oops - CPU memory protection fault", SIGSEGV, SEGV_ACCERR },
- { "Oops - CPU memory protection fault in L1P", SIGSEGV, SEGV_ACCERR },
- { "Oops - DMA memory protection fault in L1P", SIGSEGV, SEGV_ACCERR },
- { "Oops - CPU memory protection fault in L1D", SIGSEGV, SEGV_ACCERR },
- { "Oops - DMA memory protection fault in L1D", SIGSEGV, SEGV_ACCERR },
- { "Oops - CPU memory protection fault in L2", SIGSEGV, SEGV_ACCERR },
- { "Oops - DMA memory protection fault in L2", SIGSEGV, SEGV_ACCERR },
- { "Oops - EMC CPU memory protection fault", SIGSEGV, SEGV_ACCERR },
- { "Oops - EMC bus error", SIGBUS, BUS_ADRERR }
-};
-
-static void do_trap(struct exception_info *except_info, struct pt_regs *regs)
-{
- unsigned long addr = instruction_pointer(regs);
-
- if (except_info->code != TRAP_BRKPT)
- pr_err("TRAP: %s PC[0x%lx] signo[%d] code[%d]\n",
- except_info->kernel_str, regs->pc,
- except_info->signo, except_info->code);
-
- die_if_kernel(except_info->kernel_str, regs, addr);
-
- force_sig_fault(except_info->signo, except_info->code,
- (void __user *)addr);
-}
-
-/*
- * Process an internal exception (non maskable)
- */
-static int process_iexcept(struct pt_regs *regs)
-{
- unsigned int iexcept_report = get_iexcept();
- unsigned int iexcept_num;
-
- ack_exception(EXCEPT_TYPE_IXF);
-
- pr_err("IEXCEPT: PC[0x%lx]\n", regs->pc);
-
- while (iexcept_report) {
- iexcept_num = __ffs(iexcept_report);
- iexcept_report &= ~(1 << iexcept_num);
- set_iexcept(iexcept_report);
- if (*(unsigned int *)regs->pc == BKPT_OPCODE) {
- /* This is a breakpoint */
- struct exception_info bkpt_exception = {
- "Oops - undefined instruction",
- SIGTRAP, TRAP_BRKPT
- };
- do_trap(&bkpt_exception, regs);
- iexcept_report &= ~(0xFF);
- set_iexcept(iexcept_report);
- continue;
- }
-
- do_trap(&iexcept_table[iexcept_num], regs);
- }
- return 0;
-}
-
-/*
- * Process an external exception (maskable)
- */
-static void process_eexcept(struct pt_regs *regs)
-{
- int evt;
-
- pr_err("EEXCEPT: PC[0x%lx]\n", regs->pc);
-
- while ((evt = soc_get_exception()) >= 0)
- do_trap(&eexcept_table[evt], regs);
-
- ack_exception(EXCEPT_TYPE_EXC);
-}
-
-/*
- * Main exception processing
- */
-asmlinkage int process_exception(struct pt_regs *regs)
-{
- unsigned int type;
- unsigned int type_num;
- unsigned int ie_num = 9; /* default is unknown exception */
-
- while ((type = get_except_type()) != 0) {
- type_num = fls(type) - 1;
-
- switch (type_num) {
- case EXCEPT_TYPE_NXF:
- ack_exception(EXCEPT_TYPE_NXF);
- if (c6x_nmi_handler)
- (c6x_nmi_handler)(regs);
- else
- pr_alert("NMI interrupt!\n");
- break;
-
- case EXCEPT_TYPE_IXF:
- if (process_iexcept(regs))
- return 1;
- break;
-
- case EXCEPT_TYPE_EXC:
- process_eexcept(regs);
- break;
-
- case EXCEPT_TYPE_SXF:
- ie_num = 8;
- default:
- ack_exception(type_num);
- do_trap(&iexcept_table[ie_num], regs);
- break;
- }
- }
- return 0;
-}
-
-static int kstack_depth_to_print = 48;
-
-static void show_trace(unsigned long *stack, unsigned long *endstack,
- const char *loglvl)
-{
- unsigned long addr;
- int i;
-
- printk("%sCall trace:", loglvl);
- i = 0;
- while (stack + 1 <= endstack) {
- addr = *stack++;
- /*
- * If the address is either in the text segment of the
- * kernel, or in the region which contains vmalloc'ed
- * memory, it *may* be the address of a calling
- * routine; if so, print it so that someone tracing
- * down the cause of the crash will be able to figure
- * out the call path that was taken.
- */
- if (__kernel_text_address(addr)) {
-#ifndef CONFIG_KALLSYMS
- if (i % 5 == 0)
- printk("%s\n ", loglvl);
-#endif
- printk("%s [<%08lx>] %pS\n", loglvl, addr, (void *)addr);
- i++;
- }
- }
- printk("%s\n", loglvl);
-}
-
-void show_stack(struct task_struct *task, unsigned long *stack,
- const char *loglvl)
-{
- unsigned long *p, *endstack;
- int i;
-
- if (!stack) {
- if (task && task != current)
- /* We know this is a kernel stack,
- so this is the start/end */
- stack = (unsigned long *)thread_saved_ksp(task);
- else
- stack = (unsigned long *)&stack;
- }
- endstack = (unsigned long *)(((unsigned long)stack + THREAD_SIZE - 1)
- & -THREAD_SIZE);
-
- pr_debug("Stack from %08lx:", (unsigned long)stack);
- for (i = 0, p = stack; i < kstack_depth_to_print; i++) {
- if (p + 1 > endstack)
- break;
- if (i % 8 == 0)
- pr_cont("\n ");
- pr_cont(" %08lx", *p++);
- }
- pr_cont("\n");
- show_trace(stack, endstack, loglvl);
-}
-
-int is_valid_bugaddr(unsigned long addr)
-{
- return __kernel_text_address(addr);
-}
diff --git a/arch/c6x/kernel/vectors.S b/arch/c6x/kernel/vectors.S
deleted file mode 100644
index ad3dc006a6d3..000000000000
--- a/arch/c6x/kernel/vectors.S
+++ /dev/null
@@ -1,78 +0,0 @@
-; SPDX-License-Identifier: GPL-2.0-only
-;
-; Port on Texas Instruments TMS320C6x architecture
-;
-; Copyright (C) 2004, 2006, 2009, 2010, 2011 Texas Instruments Incorporated
-; Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
-;
-; This section handles all the interrupt vector routines.
-; At RESET the processor sets up the DRAM timing parameters and
-; branches to the label _c_int00 which handles initialization for the C code.
-;
-
-#define ALIGNMENT 5
-
- .macro IRQVEC name, handler
- .align ALIGNMENT
- .hidden \name
- .global \name
-\name:
-#ifdef CONFIG_C6X_BIG_KERNEL
- STW .D2T1 A0,*B15--[2]
- || MVKL .S1 \handler,A0
- MVKH .S1 \handler,A0
- B .S2X A0
- LDW .D2T1 *++B15[2],A0
- NOP 4
- NOP
- NOP
- .endm
-#else /* CONFIG_C6X_BIG_KERNEL */
- B .S2 \handler
- NOP
- NOP
- NOP
- NOP
- NOP
- NOP
- NOP
- .endm
-#endif /* CONFIG_C6X_BIG_KERNEL */
-
- .sect ".vectors","ax"
- .align ALIGNMENT
- .global RESET
- .hidden RESET
-RESET:
-#ifdef CONFIG_C6X_BIG_KERNEL
- MVKL .S1 _c_int00,A0 ; branch to _c_int00
- MVKH .S1 _c_int00,A0
- B .S2X A0
-#else
- B .S2 _c_int00
- NOP
- NOP
-#endif
- NOP
- NOP
- NOP
- NOP
- NOP
-
-
- IRQVEC NMI,_nmi_handler ; NMI interrupt
- IRQVEC AINT,_bad_interrupt ; reserved
- IRQVEC MSGINT,_bad_interrupt ; reserved
-
- IRQVEC INT4,_int4_handler
- IRQVEC INT5,_int5_handler
- IRQVEC INT6,_int6_handler
- IRQVEC INT7,_int7_handler
- IRQVEC INT8,_int8_handler
- IRQVEC INT9,_int9_handler
- IRQVEC INT10,_int10_handler
- IRQVEC INT11,_int11_handler
- IRQVEC INT12,_int12_handler
- IRQVEC INT13,_int13_handler
- IRQVEC INT14,_int14_handler
- IRQVEC INT15,_int15_handler
diff --git a/arch/c6x/kernel/vmlinux.lds.S b/arch/c6x/kernel/vmlinux.lds.S
deleted file mode 100644
index ac99ba0864bf..000000000000
--- a/arch/c6x/kernel/vmlinux.lds.S
+++ /dev/null
@@ -1,151 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * ld script for the c6x kernel
- *
- * Copyright (C) 2010, 2011 Texas Instruments Incorporated
- * Mark Salter <msalter@redhat.com>
- */
-
-#define RO_EXCEPTION_TABLE_ALIGN 16
-
-#include <asm-generic/vmlinux.lds.h>
-#include <asm/thread_info.h>
-#include <asm/page.h>
-
-ENTRY(_c_int00)
-
-#if defined(CONFIG_CPU_BIG_ENDIAN)
-jiffies = jiffies_64 + 4;
-#else
-jiffies = jiffies_64;
-#endif
-
-#define READONLY_SEGMENT_START \
- . = PAGE_OFFSET;
-#define READWRITE_SEGMENT_START \
- . = ALIGN(128); \
- _data_lma = .;
-
-SECTIONS
-{
- /*
- * Start kernel read only segment
- */
- READONLY_SEGMENT_START
-
- .vectors :
- {
- _vectors_start = .;
- *(.vectors)
- . = ALIGN(0x400);
- _vectors_end = .;
- }
-
- /*
- * This section contains data which may be shared with other
- * cores. It needs to be a fixed offset from PAGE_OFFSET
- * regardless of kernel configuration.
- */
- .virtio_ipc_dev :
- {
- *(.virtio_ipc_dev)
- }
-
- . = ALIGN(PAGE_SIZE);
- __init_begin = .;
- .init :
- {
- _sinittext = .;
- HEAD_TEXT
- INIT_TEXT
- _einittext = .;
- }
-
- INIT_DATA_SECTION(16)
-
- PERCPU_SECTION(128)
-
- . = ALIGN(PAGE_SIZE);
- __init_end = .;
-
- .text :
- {
- _text = .;
- _stext = .;
- TEXT_TEXT
- SCHED_TEXT
- CPUIDLE_TEXT
- LOCK_TEXT
- IRQENTRY_TEXT
- SOFTIRQENTRY_TEXT
- KPROBES_TEXT
- *(.fixup)
- *(.gnu.warning)
- }
-
- RO_DATA(PAGE_SIZE)
- .const :
- {
- *(.const .const.* .gnu.linkonce.r.*)
- *(.switch)
- }
-
- _etext = .;
-
- /*
- * Start kernel read-write segment.
- */
- READWRITE_SEGMENT_START
- _sdata = .;
-
- .fardata : AT(ADDR(.fardata) - LOAD_OFFSET)
- {
- INIT_TASK_DATA(THREAD_SIZE)
- NOSAVE_DATA
- PAGE_ALIGNED_DATA(PAGE_SIZE)
- CACHELINE_ALIGNED_DATA(128)
- READ_MOSTLY_DATA(128)
- DATA_DATA
- CONSTRUCTORS
- *(.data1)
- *(.fardata .fardata.*)
- *(.data.debug_bpt)
- }
-
- .neardata ALIGN(8) : AT(ADDR(.neardata) - LOAD_OFFSET)
- {
- *(.neardata2 .neardata2.* .gnu.linkonce.s2.*)
- *(.neardata .neardata.* .gnu.linkonce.s.*)
- . = ALIGN(8);
- }
-
- BUG_TABLE
-
- _edata = .;
-
- __bss_start = .;
- SBSS(8)
- BSS(8)
- .far :
- {
- . = ALIGN(8);
- *(.dynfar)
- *(.far .far.* .gnu.linkonce.b.*)
- . = ALIGN(8);
- }
- __bss_stop = .;
-
- _end = .;
-
- DWARF_DEBUG
-
- /DISCARD/ :
- {
- EXIT_TEXT
- EXIT_DATA
- EXIT_CALL
- *(.discard)
- *(.discard.*)
- *(.interp)
- }
-}
diff --git a/arch/c6x/lib/Makefile b/arch/c6x/lib/Makefile
deleted file mode 100644
index e182004f82fe..000000000000
--- a/arch/c6x/lib/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Makefile for arch/c6x/lib/
-#
-
-lib-y := divu.o divi.o pop_rts.o push_rts.o remi.o remu.o strasgi.o llshru.o
-lib-y += llshr.o llshl.o negll.o mpyll.o divremi.o divremu.o
-lib-y += checksum.o csum_64plus.o memcpy_64plus.o strasgi_64plus.o
diff --git a/arch/c6x/lib/checksum.c b/arch/c6x/lib/checksum.c
deleted file mode 100644
index dff2e2ec6e64..000000000000
--- a/arch/c6x/lib/checksum.c
+++ /dev/null
@@ -1,11 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- */
-#include <linux/module.h>
-#include <net/checksum.h>
-
-/* These are from csum_64plus.S */
-EXPORT_SYMBOL(csum_partial);
-EXPORT_SYMBOL(csum_partial_copy_nocheck);
-EXPORT_SYMBOL(ip_compute_csum);
-EXPORT_SYMBOL(ip_fast_csum);
diff --git a/arch/c6x/lib/csum_64plus.S b/arch/c6x/lib/csum_64plus.S
deleted file mode 100644
index 57148866d8d3..000000000000
--- a/arch/c6x/lib/csum_64plus.S
+++ /dev/null
@@ -1,414 +0,0 @@
-; SPDX-License-Identifier: GPL-2.0-only
-;
-; linux/arch/c6x/lib/csum_64plus.s
-;
-; Port on Texas Instruments TMS320C6x architecture
-;
-; Copyright (C) 2006, 2009, 2010, 2011 Texas Instruments Incorporated
-; Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
-;
-#include <linux/linkage.h>
-
-;
-;unsigned int csum_partial_copy_nocheck(const char *src, char * dst,
-; int len, int sum)
-;
-; A4: src
-; B4: dst
-; A6: len
-; B6: sum
-; return csum in A4
-;
-
- .text
-ENTRY(csum_partial_copy_nocheck)
- MVC .S2 ILC,B30
-
- ZERO .D1 A9 ; csum (a side)
-|| ZERO .D2 B9 ; csum (b side)
-|| SHRU .S2X A6,2,B5 ; len / 4
-
- ;; Check alignment and size
- AND .S1 3,A4,A1
-|| AND .S2 3,B4,B0
- OR .L2X B0,A1,B0 ; non aligned condition
-|| MVC .S2 B5,ILC
-|| MVK .D2 1,B2
-|| MV .D1X B5,A1 ; words condition
- [!A1] B .S1 L8
- [B0] BNOP .S1 L6,5
-
- SPLOOP 1
-
- ;; Main loop for aligned words
- LDW .D1T1 *A4++,A7
- NOP 4
- MV .S2X A7,B7
-|| EXTU .S1 A7,0,16,A16
- STW .D2T2 B7,*B4++
-|| MPYU .M2 B7,B2,B8
-|| ADD .L1 A16,A9,A9
- NOP
- SPKERNEL 8,0
-|| ADD .L2 B8,B9,B9
-
- ZERO .D1 A1
-|| ADD .L1X A9,B9,A9 ; add csum from a and b sides
-
-L6:
- [!A1] BNOP .S1 L8,5
-
- ;; Main loop for non-aligned words
- SPLOOP 2
- || MVK .L1 1,A2
-
- LDNW .D1T1 *A4++,A7
- NOP 3
-
- NOP
- MV .S2X A7,B7
- || EXTU .S1 A7,0,16,A16
- || MPYU .M1 A7,A2,A8
-
- ADD .L1 A16,A9,A9
- SPKERNEL 6,0
- || STNW .D2T2 B7,*B4++
- || ADD .L1 A8,A9,A9
-
-L8: AND .S2X 2,A6,B5
- CMPGT .L2 B5,0,B0
- [!B0] BNOP .S1 L82,4
-
- ;; Manage half-word
- ZERO .L1 A7
-|| ZERO .D1 A8
-
-#ifdef CONFIG_CPU_BIG_ENDIAN
-
- LDBU .D1T1 *A4++,A7
- LDBU .D1T1 *A4++,A8
- NOP 3
- SHL .S1 A7,8,A0
- ADD .S1 A8,A9,A9
- STB .D2T1 A7,*B4++
-|| ADD .S1 A0,A9,A9
- STB .D2T1 A8,*B4++
-
-#else
-
- LDBU .D1T1 *A4++,A7
- LDBU .D1T1 *A4++,A8
- NOP 3
- ADD .S1 A7,A9,A9
- SHL .S1 A8,8,A0
-
- STB .D2T1 A7,*B4++
-|| ADD .S1 A0,A9,A9
- STB .D2T1 A8,*B4++
-
-#endif
-
- ;; Manage eventually the last byte
-L82: AND .S2X 1,A6,B0
- [!B0] BNOP .S1 L9,5
-
-|| ZERO .L1 A7
-
-L83: LDBU .D1T1 *A4++,A7
- NOP 4
-
- MV .L2X A7,B7
-
-#ifdef CONFIG_CPU_BIG_ENDIAN
-
- STB .D2T2 B7,*B4++
-|| SHL .S1 A7,8,A7
- ADD .S1 A7,A9,A9
-
-#else
-
- STB .D2T2 B7,*B4++
-|| ADD .S1 A7,A9,A9
-
-#endif
-
- ;; Fold the csum
-L9: SHRU .S2X A9,16,B0
- [!B0] BNOP .S1 L10,5
-
-L91: SHRU .S2X A9,16,B4
-|| EXTU .S1 A9,16,16,A3
- ADD .D1X A3,B4,A9
-
- SHRU .S1 A9,16,A0
- [A0] BNOP .S1 L91,5
-
-L10: MV .D1 A9,A4
-
- BNOP .S2 B3,4
- MVC .S2 B30,ILC
-ENDPROC(csum_partial_copy_nocheck)
-
-;
-;unsigned short
-;ip_fast_csum(unsigned char *iph, unsigned int ihl)
-;{
-; unsigned int checksum = 0;
-; unsigned short *tosum = (unsigned short *) iph;
-; int len;
-;
-; len = ihl*4;
-;
-; if (len <= 0)
-; return 0;
-;
-; while(len) {
-; len -= 2;
-; checksum += *tosum++;
-; }
-; if (len & 1)
-; checksum += *(unsigned char*) tosum;
-;
-; while(checksum >> 16)
-; checksum = (checksum & 0xffff) + (checksum >> 16);
-;
-; return ~checksum;
-;}
-;
-; A4: iph
-; B4: ihl
-; return checksum in A4
-;
- .text
-
-ENTRY(ip_fast_csum)
- ZERO .D1 A5
- || MVC .S2 ILC,B30
- SHL .S2 B4,2,B0
- CMPGT .L2 B0,0,B1
- [!B1] BNOP .S1 L15,4
- [!B1] ZERO .D1 A3
-
- [!B0] B .S1 L12
- SHRU .S2 B0,1,B0
- MVC .S2 B0,ILC
- NOP 3
-
- SPLOOP 1
- LDHU .D1T1 *A4++,A3
- NOP 3
- NOP
- SPKERNEL 5,0
- || ADD .L1 A3,A5,A5
-
-L12: SHRU .S1 A5,16,A0
- [!A0] BNOP .S1 L14,5
-
-L13: SHRU .S2X A5,16,B4
- EXTU .S1 A5,16,16,A3
- ADD .D1X A3,B4,A5
- SHRU .S1 A5,16,A0
- [A0] BNOP .S1 L13,5
-
-L14: NOT .D1 A5,A3
- EXTU .S1 A3,16,16,A3
-
-L15: BNOP .S2 B3,3
- MVC .S2 B30,ILC
- MV .D1 A3,A4
-ENDPROC(ip_fast_csum)
-
-;
-;unsigned short
-;do_csum(unsigned char *buff, unsigned int len)
-;{
-; int odd, count;
-; unsigned int result = 0;
-;
-; if (len <= 0)
-; goto out;
-; odd = 1 & (unsigned long) buff;
-; if (odd) {
-;#ifdef __LITTLE_ENDIAN
-; result += (*buff << 8);
-;#else
-; result = *buff;
-;#endif
-; len--;
-; buff++;
-; }
-; count = len >> 1; /* nr of 16-bit words.. */
-; if (count) {
-; if (2 & (unsigned long) buff) {
-; result += *(unsigned short *) buff;
-; count--;
-; len -= 2;
-; buff += 2;
-; }
-; count >>= 1; /* nr of 32-bit words.. */
-; if (count) {
-; unsigned int carry = 0;
-; do {
-; unsigned int w = *(unsigned int *) buff;
-; count--;
-; buff += 4;
-; result += carry;
-; result += w;
-; carry = (w > result);
-; } while (count);
-; result += carry;
-; result = (result & 0xffff) + (result >> 16);
-; }
-; if (len & 2) {
-; result += *(unsigned short *) buff;
-; buff += 2;
-; }
-; }
-; if (len & 1)
-;#ifdef __LITTLE_ENDIAN
-; result += *buff;
-;#else
-; result += (*buff << 8);
-;#endif
-; result = (result & 0xffff) + (result >> 16);
-; /* add up carry.. */
-; result = (result & 0xffff) + (result >> 16);
-; if (odd)
-; result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
-;out:
-; return result;
-;}
-;
-; A4: buff
-; B4: len
-; return checksum in A4
-;
-
-ENTRY(do_csum)
- CMPGT .L2 B4,0,B0
- [!B0] BNOP .S1 L26,3
- EXTU .S1 A4,31,31,A0
-
- MV .L1 A0,A3
-|| MV .S1X B3,A5
-|| MV .L2 B4,B3
-|| ZERO .D1 A1
-
-#ifdef CONFIG_CPU_BIG_ENDIAN
- [A0] SUB .L2 B3,1,B3
-|| [A0] LDBU .D1T1 *A4++,A1
-#else
- [!A0] BNOP .S1 L21,5
-|| [A0] LDBU .D1T1 *A4++,A0
- SUB .L2 B3,1,B3
-|| SHL .S1 A0,8,A1
-L21:
-#endif
- SHR .S2 B3,1,B0
- [!B0] BNOP .S1 L24,3
- MVK .L1 2,A0
- AND .L1 A4,A0,A0
-
- [!A0] BNOP .S1 L22,5
-|| [A0] LDHU .D1T1 *A4++,A0
- SUB .L2 B0,1,B0
-|| SUB .S2 B3,2,B3
-|| ADD .L1 A0,A1,A1
-L22:
- SHR .S2 B0,1,B0
-|| ZERO .L1 A0
-
- [!B0] BNOP .S1 L23,5
-|| [B0] MVC .S2 B0,ILC
-
- SPLOOP 3
- SPMASK L1
-|| MV .L1 A1,A2
-|| LDW .D1T1 *A4++,A1
-
- NOP 4
- ADD .L1 A0,A1,A0
- ADD .L1 A2,A0,A2
-
- SPKERNEL 1,2
-|| CMPGTU .L1 A1,A2,A0
-
- ADD .L1 A0,A2,A6
- EXTU .S1 A6,16,16,A7
- SHRU .S2X A6,16,B0
- NOP 1
- ADD .L1X A7,B0,A1
-L23:
- MVK .L2 2,B0
- AND .L2 B3,B0,B0
- [B0] LDHU .D1T1 *A4++,A0
- NOP 4
- [B0] ADD .L1 A0,A1,A1
-L24:
- EXTU .S2 B3,31,31,B0
-#ifdef CONFIG_CPU_BIG_ENDIAN
- [!B0] BNOP .S1 L25,4
-|| [B0] LDBU .D1T1 *A4,A0
- SHL .S1 A0,8,A0
- ADD .L1 A0,A1,A1
-L25:
-#else
- [B0] LDBU .D1T1 *A4,A0
- NOP 4
- [B0] ADD .L1 A0,A1,A1
-#endif
- EXTU .S1 A1,16,16,A0
- SHRU .S2X A1,16,B0
- NOP 1
- ADD .L1X A0,B0,A0
- SHRU .S1 A0,16,A1
- ADD .L1 A0,A1,A0
- EXTU .S1 A0,16,16,A1
- EXTU .S1 A1,16,24,A2
-
- EXTU .S1 A1,24,16,A0
-|| MV .L2X A3,B0
-
- [B0] OR .L1 A0,A2,A1
-L26:
- NOP 1
- BNOP .S2X A5,4
- MV .L1 A1,A4
-ENDPROC(do_csum)
-
-;__wsum csum_partial(const void *buff, int len, __wsum wsum)
-;{
-; unsigned int sum = (__force unsigned int)wsum;
-; unsigned int result = do_csum(buff, len);
-;
-; /* add in old sum, and carry.. */
-; result += sum;
-; if (sum > result)
-; result += 1;
-; return (__force __wsum)result;
-;}
-;
-ENTRY(csum_partial)
- MV .L1X B3,A9
-|| CALLP .S2 do_csum,B3
-|| MV .S1 A6,A8
- BNOP .S2X A9,2
- ADD .L1 A8,A4,A1
- CMPGTU .L1 A8,A1,A0
- ADD .L1 A1,A0,A4
-ENDPROC(csum_partial)
-
-;unsigned short
-;ip_compute_csum(unsigned char *buff, unsigned int len)
-;
-; A4: buff
-; B4: len
-; return checksum in A4
-
-ENTRY(ip_compute_csum)
- MV .L1X B3,A9
-|| CALLP .S2 do_csum,B3
- BNOP .S2X A9,3
- NOT .S1 A4,A4
- CLR .S1 A4,16,31,A4
-ENDPROC(ip_compute_csum)
diff --git a/arch/c6x/lib/divi.S b/arch/c6x/lib/divi.S
deleted file mode 100644
index d1764ae0b519..000000000000
--- a/arch/c6x/lib/divi.S
+++ /dev/null
@@ -1,41 +0,0 @@
-;; SPDX-License-Identifier: GPL-2.0-or-later
-;; Copyright 2010 Free Software Foundation, Inc.
-;; Contributed by Bernd Schmidt <bernds@codesourcery.com>.
-;;
-
-#include <linux/linkage.h>
-
- ;; ABI considerations for the divide functions
- ;; The following registers are call-used:
- ;; __c6xabi_divi A0,A1,A2,A4,A6,B0,B1,B2,B4,B5
- ;; __c6xabi_divu A0,A1,A2,A4,A6,B0,B1,B2,B4
- ;; __c6xabi_remi A1,A2,A4,A5,A6,B0,B1,B2,B4
- ;; __c6xabi_remu A1,A4,A5,A7,B0,B1,B2,B4
- ;;
- ;; In our implementation, divu and remu are leaf functions,
- ;; while both divi and remi call into divu.
- ;; A0 is not clobbered by any of the functions.
- ;; divu does not clobber B2 either, which is taken advantage of
- ;; in remi.
- ;; divi uses B5 to hold the original return address during
- ;; the call to divu.
- ;; remi uses B2 and A5 to hold the input values during the
- ;; call to divu. It stores B3 in on the stack.
-
- .text
-ENTRY(__c6xabi_divi)
- call .s2 __c6xabi_divu
-|| mv .d2 B3, B5
-|| cmpgt .l1 0, A4, A1
-|| cmpgt .l2 0, B4, B1
-
- [A1] neg .l1 A4, A4
-|| [B1] neg .l2 B4, B4
-|| xor .s1x A1, B1, A1
- [A1] addkpc .s2 _divu_ret, B3, 4
-_divu_ret:
- neg .l1 A4, A4
-|| mv .l2 B3,B5
-|| ret .s2 B5
- nop 5
-ENDPROC(__c6xabi_divi)
diff --git a/arch/c6x/lib/divremi.S b/arch/c6x/lib/divremi.S
deleted file mode 100644
index 575fc57a8a76..000000000000
--- a/arch/c6x/lib/divremi.S
+++ /dev/null
@@ -1,34 +0,0 @@
-;; SPDX-License-Identifier: GPL-2.0-or-later
-;; Copyright 2010 Free Software Foundation, Inc.
-;; Contributed by Bernd Schmidt <bernds@codesourcery.com>.
-;;
-
-#include <linux/linkage.h>
-
- .text
-ENTRY(__c6xabi_divremi)
- stw .d2t2 B3, *B15--[2]
-|| cmpgt .l1 0, A4, A1
-|| cmpgt .l2 0, B4, B2
-|| mv .s1 A4, A5
-|| call .s2 __c6xabi_divu
-
- [A1] neg .l1 A4, A4
-|| [B2] neg .l2 B4, B4
-|| xor .s2x B2, A1, B0
-|| mv .d2 B4, B2
-
- [B0] addkpc .s2 _divu_ret_1, B3, 1
- [!B0] addkpc .s2 _divu_ret_2, B3, 1
- nop 2
-_divu_ret_1:
- neg .l1 A4, A4
-_divu_ret_2:
- ldw .d2t2 *++B15[2], B3
-
- mpy32 .m1x A4, B2, A6
- nop 3
- ret .s2 B3
- sub .l1 A5, A6, A5
- nop 4
-ENDPROC(__c6xabi_divremi)
diff --git a/arch/c6x/lib/divremu.S b/arch/c6x/lib/divremu.S
deleted file mode 100644
index 5f6a6a2997ae..000000000000
--- a/arch/c6x/lib/divremu.S
+++ /dev/null
@@ -1,75 +0,0 @@
-;; SPDX-License-Identifier: GPL-2.0-or-later
-;; Copyright 2011 Free Software Foundation, Inc.
-;; Contributed by Bernd Schmidt <bernds@codesourcery.com>.
-;;
-
-#include <linux/linkage.h>
-
- .text
-ENTRY(__c6xabi_divremu)
- ;; We use a series of up to 31 subc instructions. First, we find
- ;; out how many leading zero bits there are in the divisor. This
- ;; gives us both a shift count for aligning (shifting) the divisor
- ;; to the, and the number of times we have to execute subc.
-
- ;; At the end, we have both the remainder and most of the quotient
- ;; in A4. The top bit of the quotient is computed first and is
- ;; placed in A2.
-
- ;; Return immediately if the dividend is zero. Setting B4 to 1
- ;; is a trick to allow us to leave the following insns in the jump
- ;; delay slot without affecting the result.
- mv .s2x A4, B1
-
- [b1] lmbd .l2 1, B4, B1
-||[!b1] b .s2 B3 ; RETURN A
-||[!b1] mvk .d2 1, B4
-
-||[!b1] zero .s1 A5
- mv .l1x B1, A6
-|| shl .s2 B4, B1, B4
-
- ;; The loop performs a maximum of 28 steps, so we do the
- ;; first 3 here.
- cmpltu .l1x A4, B4, A2
- [!A2] sub .l1x A4, B4, A4
-|| shru .s2 B4, 1, B4
-|| xor .s1 1, A2, A2
-
- shl .s1 A2, 31, A2
-|| [b1] subc .l1x A4,B4,A4
-|| [b1] add .s2 -1, B1, B1
- [b1] subc .l1x A4,B4,A4
-|| [b1] add .s2 -1, B1, B1
-
- ;; RETURN A may happen here (note: must happen before the next branch)
-__divremu0:
- cmpgt .l2 B1, 7, B0
-|| [b1] subc .l1x A4,B4,A4
-|| [b1] add .s2 -1, B1, B1
- [b1] subc .l1x A4,B4,A4
-|| [b1] add .s2 -1, B1, B1
-|| [b0] b .s1 __divremu0
- [b1] subc .l1x A4,B4,A4
-|| [b1] add .s2 -1, B1, B1
- [b1] subc .l1x A4,B4,A4
-|| [b1] add .s2 -1, B1, B1
- [b1] subc .l1x A4,B4,A4
-|| [b1] add .s2 -1, B1, B1
- [b1] subc .l1x A4,B4,A4
-|| [b1] add .s2 -1, B1, B1
- [b1] subc .l1x A4,B4,A4
-|| [b1] add .s2 -1, B1, B1
- ;; loop backwards branch happens here
-
- ret .s2 B3
-|| mvk .s1 32, A1
- sub .l1 A1, A6, A6
-|| extu .s1 A4, A6, A5
- shl .s1 A4, A6, A4
- shru .s1 A4, 1, A4
-|| sub .l1 A6, 1, A6
- or .l1 A2, A4, A4
- shru .s1 A4, A6, A4
- nop
-ENDPROC(__c6xabi_divremu)
diff --git a/arch/c6x/lib/divu.S b/arch/c6x/lib/divu.S
deleted file mode 100644
index f0f6082944c2..000000000000
--- a/arch/c6x/lib/divu.S
+++ /dev/null
@@ -1,86 +0,0 @@
-;; SPDX-License-Identifier: GPL-2.0-or-later
-;; Copyright 2010 Free Software Foundation, Inc.
-;; Contributed by Bernd Schmidt <bernds@codesourcery.com>.
-;;
-
-#include <linux/linkage.h>
-
- ;; ABI considerations for the divide functions
- ;; The following registers are call-used:
- ;; __c6xabi_divi A0,A1,A2,A4,A6,B0,B1,B2,B4,B5
- ;; __c6xabi_divu A0,A1,A2,A4,A6,B0,B1,B2,B4
- ;; __c6xabi_remi A1,A2,A4,A5,A6,B0,B1,B2,B4
- ;; __c6xabi_remu A1,A4,A5,A7,B0,B1,B2,B4
- ;;
- ;; In our implementation, divu and remu are leaf functions,
- ;; while both divi and remi call into divu.
- ;; A0 is not clobbered by any of the functions.
- ;; divu does not clobber B2 either, which is taken advantage of
- ;; in remi.
- ;; divi uses B5 to hold the original return address during
- ;; the call to divu.
- ;; remi uses B2 and A5 to hold the input values during the
- ;; call to divu. It stores B3 in on the stack.
-
- .text
-ENTRY(__c6xabi_divu)
- ;; We use a series of up to 31 subc instructions. First, we find
- ;; out how many leading zero bits there are in the divisor. This
- ;; gives us both a shift count for aligning (shifting) the divisor
- ;; to the, and the number of times we have to execute subc.
-
- ;; At the end, we have both the remainder and most of the quotient
- ;; in A4. The top bit of the quotient is computed first and is
- ;; placed in A2.
-
- ;; Return immediately if the dividend is zero.
- mv .s2x A4, B1
- [B1] lmbd .l2 1, B4, B1
-|| [!B1] b .s2 B3 ; RETURN A
-|| [!B1] mvk .d2 1, B4
- mv .l1x B1, A6
-|| shl .s2 B4, B1, B4
-
- ;; The loop performs a maximum of 28 steps, so we do the
- ;; first 3 here.
- cmpltu .l1x A4, B4, A2
- [!A2] sub .l1x A4, B4, A4
-|| shru .s2 B4, 1, B4
-|| xor .s1 1, A2, A2
-
- shl .s1 A2, 31, A2
-|| [B1] subc .l1x A4,B4,A4
-|| [B1] add .s2 -1, B1, B1
- [B1] subc .l1x A4,B4,A4
-|| [B1] add .s2 -1, B1, B1
-
- ;; RETURN A may happen here (note: must happen before the next branch)
-_divu_loop:
- cmpgt .l2 B1, 7, B0
-|| [B1] subc .l1x A4,B4,A4
-|| [B1] add .s2 -1, B1, B1
- [B1] subc .l1x A4,B4,A4
-|| [B1] add .s2 -1, B1, B1
-|| [B0] b .s1 _divu_loop
- [B1] subc .l1x A4,B4,A4
-|| [B1] add .s2 -1, B1, B1
- [B1] subc .l1x A4,B4,A4
-|| [B1] add .s2 -1, B1, B1
- [B1] subc .l1x A4,B4,A4
-|| [B1] add .s2 -1, B1, B1
- [B1] subc .l1x A4,B4,A4
-|| [B1] add .s2 -1, B1, B1
- [B1] subc .l1x A4,B4,A4
-|| [B1] add .s2 -1, B1, B1
- ;; loop backwards branch happens here
-
- ret .s2 B3
-|| mvk .s1 32, A1
- sub .l1 A1, A6, A6
- shl .s1 A4, A6, A4
- shru .s1 A4, 1, A4
-|| sub .l1 A6, 1, A6
- or .l1 A2, A4, A4
- shru .s1 A4, A6, A4
- nop
-ENDPROC(__c6xabi_divu)
diff --git a/arch/c6x/lib/llshl.S b/arch/c6x/lib/llshl.S
deleted file mode 100644
index 3272499618e0..000000000000
--- a/arch/c6x/lib/llshl.S
+++ /dev/null
@@ -1,25 +0,0 @@
-;; SPDX-License-Identifier: GPL-2.0-or-later
-;; Copyright (C) 2010 Texas Instruments Incorporated
-;; Contributed by Mark Salter <msalter@redhat.com>.
-;;
-
-;; uint64_t __c6xabi_llshl(uint64_t val, uint shift)
-
-#include <linux/linkage.h>
-
- .text
-ENTRY(__c6xabi_llshl)
- mv .l1x B4,A1
- [!A1] b .s2 B3 ; just return if zero shift
- mvk .s1 32,A0
- sub .d1 A0,A1,A0
- cmplt .l1 0,A0,A2
- [A2] shru .s1 A4,A0,A0
- [!A2] neg .l1 A0,A5
-|| [A2] shl .s1 A5,A1,A5
- [!A2] shl .s1 A4,A5,A5
-|| [A2] or .d1 A5,A0,A5
-|| [!A2] mvk .l1 0,A4
- [A2] shl .s1 A4,A1,A4
- bnop .s2 B3,5
-ENDPROC(__c6xabi_llshl)
diff --git a/arch/c6x/lib/llshr.S b/arch/c6x/lib/llshr.S
deleted file mode 100644
index 6bfaacd15e73..000000000000
--- a/arch/c6x/lib/llshr.S
+++ /dev/null
@@ -1,26 +0,0 @@
-;; SPDX-License-Identifier: GPL-2.0-or-later
-;; Copyright (C) 2010 Texas Instruments Incorporated
-;; Contributed by Mark Salter <msalter@redhat.com>.
-;;
-
-;; uint64_t __c6xabi_llshr(uint64_t val, uint shift)
-
-#include <linux/linkage.h>
-
- .text
-ENTRY(__c6xabi_llshr)
- mv .l1x B4,A1
- [!A1] b .s2 B3 ; return if zero shift count
- mvk .s1 32,A0
- sub .d1 A0,A1,A0
- cmplt .l1 0,A0,A2
- [A2] shl .s1 A5,A0,A0
- nop
- [!A2] neg .l1 A0,A4
-|| [A2] shru .s1 A4,A1,A4
- [!A2] shr .s1 A5,A4,A4
-|| [A2] or .d1 A4,A0,A4
- [!A2] shr .s1 A5,0x1f,A5
- [A2] shr .s1 A5,A1,A5
- bnop .s2 B3,5
-ENDPROC(__c6xabi_llshr)
diff --git a/arch/c6x/lib/llshru.S b/arch/c6x/lib/llshru.S
deleted file mode 100644
index 103128f50770..000000000000
--- a/arch/c6x/lib/llshru.S
+++ /dev/null
@@ -1,26 +0,0 @@
-;; SPDX-License-Identifier: GPL-2.0-or-later
-;; Copyright (C) 2010 Texas Instruments Incorporated
-;; Contributed by Mark Salter <msalter@redhat.com>.
-;;
-
-;; uint64_t __c6xabi_llshru(uint64_t val, uint shift)
-
-#include <linux/linkage.h>
-
- .text
-ENTRY(__c6xabi_llshru)
- mv .l1x B4,A1
- [!A1] b .s2 B3 ; return if zero shift count
- mvk .s1 32,A0
- sub .d1 A0,A1,A0
- cmplt .l1 0,A0,A2
- [A2] shl .s1 A5,A0,A0
- nop
- [!A2] neg .l1 A0,A4
-|| [A2] shru .s1 A4,A1,A4
- [!A2] shru .s1 A5,A4,A4
-|| [A2] or .d1 A4,A0,A4
-|| [!A2] mvk .l1 0,A5
- [A2] shru .s1 A5,A1,A5
- bnop .s2 B3,5
-ENDPROC(__c6xabi_llshru)
diff --git a/arch/c6x/lib/memcpy_64plus.S b/arch/c6x/lib/memcpy_64plus.S
deleted file mode 100644
index 157a30486bfd..000000000000
--- a/arch/c6x/lib/memcpy_64plus.S
+++ /dev/null
@@ -1,43 +0,0 @@
-; SPDX-License-Identifier: GPL-2.0-only
-; Port on Texas Instruments TMS320C6x architecture
-;
-; Copyright (C) 2006, 2009, 2010 Texas Instruments Incorporated
-; Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
-;
-
-#include <linux/linkage.h>
-
- .text
-
-ENTRY(memcpy)
- AND .L1 0x1,A6,A0
- || AND .S1 0x2,A6,A1
- || AND .L2X 0x4,A6,B0
- || MV .D1 A4,A3
- || MVC .S2 ILC,B2
-
- [A0] LDB .D2T1 *B4++,A5
- [A1] LDB .D2T1 *B4++,A7
- [A1] LDB .D2T1 *B4++,A8
- [B0] LDNW .D2T1 *B4++,A9
- || SHRU .S2X A6,0x3,B1
- [!B1] BNOP .S2 B3,1
-
- [A0] STB .D1T1 A5,*A3++
- ||[B1] MVC .S2 B1,ILC
- [A1] STB .D1T1 A7,*A3++
- [A1] STB .D1T1 A8,*A3++
- [B0] STNW .D1T1 A9,*A3++ ; return when len < 8
-
- SPLOOP 2
-
- LDNDW .D2T1 *B4++,A9:A8
- NOP 3
-
- NOP
- SPKERNEL 0,0
- || STNDW .D1T1 A9:A8,*A3++
-
- BNOP .S2 B3,4
- MVC .S2 B2,ILC
-ENDPROC(memcpy)
diff --git a/arch/c6x/lib/mpyll.S b/arch/c6x/lib/mpyll.S
deleted file mode 100644
index d07c13ec4fd4..000000000000
--- a/arch/c6x/lib/mpyll.S
+++ /dev/null
@@ -1,37 +0,0 @@
-;; SPDX-License-Identifier: GPL-2.0-or-later
-;; Copyright (C) 2010 Texas Instruments Incorporated
-;; Contributed by Mark Salter <msalter@redhat.com>.
-;;
-
-#include <linux/linkage.h>
-
- ;; uint64_t __c6xabi_mpyll(uint64_t x, uint64_t y)
- ;;
- ;; 64x64 multiply
- ;; First compute partial results using 32-bit parts of x and y:
- ;;
- ;; b63 b32 b31 b0
- ;; -----------------------------
- ;; | 1 | 0 |
- ;; -----------------------------
- ;;
- ;; P0 = X0*Y0
- ;; P1 = X0*Y1 + X1*Y0
- ;; P2 = X1*Y1
- ;;
- ;; result = (P2 << 64) + (P1 << 32) + P0
- ;;
- ;; Since the result is also 64-bit, we can skip the P2 term.
-
- .text
-ENTRY(__c6xabi_mpyll)
- mpy32u .m1x A4,B4,A1:A0 ; X0*Y0
- b .s2 B3
- || mpy32u .m2x B5,A4,B1:B0 ; X0*Y1 (don't need upper 32-bits)
- || mpy32u .m1x A5,B4,A3:A2 ; X1*Y0 (don't need upper 32-bits)
- nop
- nop
- mv .s1 A0,A4
- add .l1x A2,B0,A5
- add .s1 A1,A5,A5
-ENDPROC(__c6xabi_mpyll)
diff --git a/arch/c6x/lib/negll.S b/arch/c6x/lib/negll.S
deleted file mode 100644
index 9ba434db5366..000000000000
--- a/arch/c6x/lib/negll.S
+++ /dev/null
@@ -1,19 +0,0 @@
-;; SPDX-License-Identifier: GPL-2.0-or-later
-;; Copyright (C) 2010 Texas Instruments Incorporated
-;; Contributed by Mark Salter <msalter@redhat.com>.
-;;
-
-;; int64_t __c6xabi_negll(int64_t val)
-
-#include <linux/linkage.h>
-
- .text
-ENTRY(__c6xabi_negll)
- b .s2 B3
- mvk .l1 0,A0
- subu .l1 A0,A4,A3:A2
- sub .l1 A0,A5,A0
-|| ext .s1 A3,24,24,A5
- add .l1 A5,A0,A5
- mv .s1 A2,A4
-ENDPROC(__c6xabi_negll)
diff --git a/arch/c6x/lib/pop_rts.S b/arch/c6x/lib/pop_rts.S
deleted file mode 100644
index f129e32943c5..000000000000
--- a/arch/c6x/lib/pop_rts.S
+++ /dev/null
@@ -1,20 +0,0 @@
-;; SPDX-License-Identifier: GPL-2.0-or-later
-;; Copyright 2010 Free Software Foundation, Inc.
-;; Contributed by Bernd Schmidt <bernds@codesourcery.com>.
-;;
-
-#include <linux/linkage.h>
-
- .text
-
-ENTRY(__c6xabi_pop_rts)
- lddw .d2t2 *++B15, B3:B2
- lddw .d2t1 *++B15, A11:A10
- lddw .d2t2 *++B15, B11:B10
- lddw .d2t1 *++B15, A13:A12
- lddw .d2t2 *++B15, B13:B12
- lddw .d2t1 *++B15, A15:A14
-|| b .s2 B3
- ldw .d2t2 *++B15[2], B14
- nop 4
-ENDPROC(__c6xabi_pop_rts)
diff --git a/arch/c6x/lib/push_rts.S b/arch/c6x/lib/push_rts.S
deleted file mode 100644
index 40b0a4fe937c..000000000000
--- a/arch/c6x/lib/push_rts.S
+++ /dev/null
@@ -1,19 +0,0 @@
-;; SPDX-License-Identifier: GPL-2.0-or-later
-;; Copyright 2010 Free Software Foundation, Inc.
-;; Contributed by Bernd Schmidt <bernds@codesourcery.com>.
-;;
-
-#include <linux/linkage.h>
-
- .text
-
-ENTRY(__c6xabi_push_rts)
- stw .d2t2 B14, *B15--[2]
- stdw .d2t1 A15:A14, *B15--
-|| b .s2x A3
- stdw .d2t2 B13:B12, *B15--
- stdw .d2t1 A13:A12, *B15--
- stdw .d2t2 B11:B10, *B15--
- stdw .d2t1 A11:A10, *B15--
- stdw .d2t2 B3:B2, *B15--
-ENDPROC(__c6xabi_push_rts)
diff --git a/arch/c6x/lib/remi.S b/arch/c6x/lib/remi.S
deleted file mode 100644
index 96a1335eac20..000000000000
--- a/arch/c6x/lib/remi.S
+++ /dev/null
@@ -1,52 +0,0 @@
-;; SPDX-License-Identifier: GPL-2.0-or-later
-;; Copyright 2010 Free Software Foundation, Inc.
-;; Contributed by Bernd Schmidt <bernds@codesourcery.com>.
-;;
-
-#include <linux/linkage.h>
-
- ;; ABI considerations for the divide functions
- ;; The following registers are call-used:
- ;; __c6xabi_divi A0,A1,A2,A4,A6,B0,B1,B2,B4,B5
- ;; __c6xabi_divu A0,A1,A2,A4,A6,B0,B1,B2,B4
- ;; __c6xabi_remi A1,A2,A4,A5,A6,B0,B1,B2,B4
- ;; __c6xabi_remu A1,A4,A5,A7,B0,B1,B2,B4
- ;;
- ;; In our implementation, divu and remu are leaf functions,
- ;; while both divi and remi call into divu.
- ;; A0 is not clobbered by any of the functions.
- ;; divu does not clobber B2 either, which is taken advantage of
- ;; in remi.
- ;; divi uses B5 to hold the original return address during
- ;; the call to divu.
- ;; remi uses B2 and A5 to hold the input values during the
- ;; call to divu. It stores B3 in on the stack.
-
- .text
-
-ENTRY(__c6xabi_remi)
- stw .d2t2 B3, *B15--[2]
-|| cmpgt .l1 0, A4, A1
-|| cmpgt .l2 0, B4, B2
-|| mv .s1 A4, A5
-|| call .s2 __c6xabi_divu
-
- [A1] neg .l1 A4, A4
-|| [B2] neg .l2 B4, B4
-|| xor .s2x B2, A1, B0
-|| mv .d2 B4, B2
-
- [B0] addkpc .s2 _divu_ret_1, B3, 1
- [!B0] addkpc .s2 _divu_ret_2, B3, 1
- nop 2
-_divu_ret_1:
- neg .l1 A4, A4
-_divu_ret_2:
- ldw .d2t2 *++B15[2], B3
-
- mpy32 .m1x A4, B2, A6
- nop 3
- ret .s2 B3
- sub .l1 A5, A6, A4
- nop 4
-ENDPROC(__c6xabi_remi)
diff --git a/arch/c6x/lib/remu.S b/arch/c6x/lib/remu.S
deleted file mode 100644
index 428feb9c06c0..000000000000
--- a/arch/c6x/lib/remu.S
+++ /dev/null
@@ -1,70 +0,0 @@
-;; SPDX-License-Identifier: GPL-2.0-or-later
-;; Copyright 2010 Free Software Foundation, Inc.
-;; Contributed by Bernd Schmidt <bernds@codesourcery.com>.
-;;
-
-#include <linux/linkage.h>
-
- ;; ABI considerations for the divide functions
- ;; The following registers are call-used:
- ;; __c6xabi_divi A0,A1,A2,A4,A6,B0,B1,B2,B4,B5
- ;; __c6xabi_divu A0,A1,A2,A4,A6,B0,B1,B2,B4
- ;; __c6xabi_remi A1,A2,A4,A5,A6,B0,B1,B2,B4
- ;; __c6xabi_remu A1,A4,A5,A7,B0,B1,B2,B4
- ;;
- ;; In our implementation, divu and remu are leaf functions,
- ;; while both divi and remi call into divu.
- ;; A0 is not clobbered by any of the functions.
- ;; divu does not clobber B2 either, which is taken advantage of
- ;; in remi.
- ;; divi uses B5 to hold the original return address during
- ;; the call to divu.
- ;; remi uses B2 and A5 to hold the input values during the
- ;; call to divu. It stores B3 in on the stack.
-
-
- .text
-
-ENTRY(__c6xabi_remu)
- ;; The ABI seems designed to prevent these functions calling each other,
- ;; so we duplicate most of the divsi3 code here.
- mv .s2x A4, B1
- lmbd .l2 1, B4, B1
-|| [!B1] b .s2 B3 ; RETURN A
-|| [!B1] mvk .d2 1, B4
-
- mv .l1x B1, A7
-|| shl .s2 B4, B1, B4
-
- cmpltu .l1x A4, B4, A1
- [!A1] sub .l1x A4, B4, A4
- shru .s2 B4, 1, B4
-
-_remu_loop:
- cmpgt .l2 B1, 7, B0
-|| [B1] subc .l1x A4,B4,A4
-|| [B1] add .s2 -1, B1, B1
- ;; RETURN A may happen here (note: must happen before the next branch)
- [B1] subc .l1x A4,B4,A4
-|| [B1] add .s2 -1, B1, B1
-|| [B0] b .s1 _remu_loop
- [B1] subc .l1x A4,B4,A4
-|| [B1] add .s2 -1, B1, B1
- [B1] subc .l1x A4,B4,A4
-|| [B1] add .s2 -1, B1, B1
- [B1] subc .l1x A4,B4,A4
-|| [B1] add .s2 -1, B1, B1
- [B1] subc .l1x A4,B4,A4
-|| [B1] add .s2 -1, B1, B1
- [B1] subc .l1x A4,B4,A4
-|| [B1] add .s2 -1, B1, B1
- ;; loop backwards branch happens here
-
- ret .s2 B3
- [B1] subc .l1x A4,B4,A4
-|| [B1] add .s2 -1, B1, B1
- [B1] subc .l1x A4,B4,A4
-
- extu .s1 A4, A7, A4
- nop 2
-ENDPROC(__c6xabi_remu)
diff --git a/arch/c6x/lib/strasgi.S b/arch/c6x/lib/strasgi.S
deleted file mode 100644
index 715aeb200792..000000000000
--- a/arch/c6x/lib/strasgi.S
+++ /dev/null
@@ -1,77 +0,0 @@
-;; SPDX-License-Identifier: GPL-2.0-or-later
-;; Copyright 2010 Free Software Foundation, Inc.
-;; Contributed by Bernd Schmidt <bernds@codesourcery.com>.
-;;
-
-#include <linux/linkage.h>
-
- .text
-
-ENTRY(__c6xabi_strasgi)
- ;; This is essentially memcpy, with alignment known to be at least
- ;; 4, and the size a multiple of 4 greater than or equal to 28.
- ldw .d2t1 *B4++, A0
-|| mvk .s2 16, B1
- ldw .d2t1 *B4++, A1
-|| mvk .s2 20, B2
-|| sub .d1 A6, 24, A6
- ldw .d2t1 *B4++, A5
- ldw .d2t1 *B4++, A7
-|| mv .l2x A6, B7
- ldw .d2t1 *B4++, A8
- ldw .d2t1 *B4++, A9
-|| mv .s2x A0, B5
-|| cmpltu .l2 B2, B7, B0
-
-_strasgi_loop:
- stw .d1t2 B5, *A4++
-|| [B0] ldw .d2t1 *B4++, A0
-|| mv .s2x A1, B5
-|| mv .l2 B7, B6
-
- [B0] sub .d2 B6, 24, B7
-|| [B0] b .s2 _strasgi_loop
-|| cmpltu .l2 B1, B6, B0
-
- [B0] ldw .d2t1 *B4++, A1
-|| stw .d1t2 B5, *A4++
-|| mv .s2x A5, B5
-|| cmpltu .l2 12, B6, B0
-
- [B0] ldw .d2t1 *B4++, A5
-|| stw .d1t2 B5, *A4++
-|| mv .s2x A7, B5
-|| cmpltu .l2 8, B6, B0
-
- [B0] ldw .d2t1 *B4++, A7
-|| stw .d1t2 B5, *A4++
-|| mv .s2x A8, B5
-|| cmpltu .l2 4, B6, B0
-
- [B0] ldw .d2t1 *B4++, A8
-|| stw .d1t2 B5, *A4++
-|| mv .s2x A9, B5
-|| cmpltu .l2 0, B6, B0
-
- [B0] ldw .d2t1 *B4++, A9
-|| stw .d1t2 B5, *A4++
-|| mv .s2x A0, B5
-|| cmpltu .l2 B2, B7, B0
-
- ;; loop back branch happens here
-
- cmpltu .l2 B1, B6, B0
-|| ret .s2 b3
-
- [B0] stw .d1t1 A1, *A4++
-|| cmpltu .l2 12, B6, B0
- [B0] stw .d1t1 A5, *A4++
-|| cmpltu .l2 8, B6, B0
- [B0] stw .d1t1 A7, *A4++
-|| cmpltu .l2 4, B6, B0
- [B0] stw .d1t1 A8, *A4++
-|| cmpltu .l2 0, B6, B0
- [B0] stw .d1t1 A9, *A4++
-
- ;; return happens here
-ENDPROC(__c6xabi_strasgi)
diff --git a/arch/c6x/lib/strasgi_64plus.S b/arch/c6x/lib/strasgi_64plus.S
deleted file mode 100644
index d10aa2dc3249..000000000000
--- a/arch/c6x/lib/strasgi_64plus.S
+++ /dev/null
@@ -1,27 +0,0 @@
-;; SPDX-License-Identifier: GPL-2.0-or-later
-;; Copyright 2010 Free Software Foundation, Inc.
-;; Contributed by Bernd Schmidt <bernds@codesourcery.com>.
-;;
-
-#include <linux/linkage.h>
-
- .text
-
-ENTRY(__c6xabi_strasgi_64plus)
- shru .s2x a6, 2, b31
-|| mv .s1 a4, a30
-|| mv .d2 b4, b30
-
- add .s2 -4, b31, b31
-
- sploopd 1
-|| mvc .s2 b31, ilc
- ldw .d2t2 *b30++, b31
- nop 4
- mv .s1x b31,a31
- spkernel 6, 0
-|| stw .d1t1 a31, *a30++
-
- ret .s2 b3
- nop 5
-ENDPROC(__c6xabi_strasgi_64plus)
diff --git a/arch/c6x/mm/Makefile b/arch/c6x/mm/Makefile
deleted file mode 100644
index 19d05e972dd1..000000000000
--- a/arch/c6x/mm/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Makefile for the linux c6x-specific parts of the memory manager.
-#
-
-obj-y := init.o dma-coherent.o
diff --git a/arch/c6x/mm/dma-coherent.c b/arch/c6x/mm/dma-coherent.c
deleted file mode 100644
index 03df07a831fc..000000000000
--- a/arch/c6x/mm/dma-coherent.c
+++ /dev/null
@@ -1,173 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Port on Texas Instruments TMS320C6x architecture
- *
- * Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated
- * Author: Aurelien Jacquiot <aurelien.jacquiot@ti.com>
- *
- * DMA uncached mapping support.
- *
- * Using code pulled from ARM
- * Copyright (C) 2000-2004 Russell King
- */
-#include <linux/slab.h>
-#include <linux/bitmap.h>
-#include <linux/bitops.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/dma-map-ops.h>
-#include <linux/memblock.h>
-
-#include <asm/cacheflush.h>
-#include <asm/page.h>
-#include <asm/setup.h>
-
-/*
- * DMA coherent memory management, can be redefined using the memdma=
- * kernel command line
- */
-
-/* none by default */
-static phys_addr_t dma_base;
-static u32 dma_size;
-static u32 dma_pages;
-
-static unsigned long *dma_bitmap;
-
-/* bitmap lock */
-static DEFINE_SPINLOCK(dma_lock);
-
-/*
- * Return a DMA coherent and contiguous memory chunk from the DMA memory
- */
-static inline u32 __alloc_dma_pages(int order)
-{
- unsigned long flags;
- u32 pos;
-
- spin_lock_irqsave(&dma_lock, flags);
- pos = bitmap_find_free_region(dma_bitmap, dma_pages, order);
- spin_unlock_irqrestore(&dma_lock, flags);
-
- return dma_base + (pos << PAGE_SHIFT);
-}
-
-static void __free_dma_pages(u32 addr, int order)
-{
- unsigned long flags;
- u32 pos = (addr - dma_base) >> PAGE_SHIFT;
-
- if (addr < dma_base || (pos + (1 << order)) >= dma_pages) {
- printk(KERN_ERR "%s: freeing outside range.\n", __func__);
- BUG();
- }
-
- spin_lock_irqsave(&dma_lock, flags);
- bitmap_release_region(dma_bitmap, pos, order);
- spin_unlock_irqrestore(&dma_lock, flags);
-}
-
-/*
- * Allocate DMA coherent memory space and return both the kernel
- * virtual and DMA address for that space.
- */
-void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
- gfp_t gfp, unsigned long attrs)
-{
- void *ret;
- u32 paddr;
- int order;
-
- if (!dma_size || !size)
- return NULL;
-
- order = get_count_order(((size - 1) >> PAGE_SHIFT) + 1);
-
- paddr = __alloc_dma_pages(order);
-
- if (handle)
- *handle = paddr;
-
- if (!paddr)
- return NULL;
-
- ret = phys_to_virt(paddr);
- memset(ret, 0, 1 << order);
- return ret;
-}
-
-/*
- * Free DMA coherent memory as defined by the above mapping.
- */
-void arch_dma_free(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, unsigned long attrs)
-{
- int order;
-
- if (!dma_size || !size)
- return;
-
- order = get_count_order(((size - 1) >> PAGE_SHIFT) + 1);
-
- __free_dma_pages(virt_to_phys(vaddr), order);
-}
-
-/*
- * Initialise the coherent DMA memory allocator using the given uncached region.
- */
-void __init coherent_mem_init(phys_addr_t start, u32 size)
-{
- if (!size)
- return;
-
- printk(KERN_INFO
- "Coherent memory (DMA) region start=0x%x size=0x%x\n",
- start, size);
-
- dma_base = start;
- dma_size = size;
-
- /* allocate bitmap */
- dma_pages = dma_size >> PAGE_SHIFT;
- if (dma_size & (PAGE_SIZE - 1))
- ++dma_pages;
-
- dma_bitmap = memblock_alloc(BITS_TO_LONGS(dma_pages) * sizeof(long),
- sizeof(long));
- if (!dma_bitmap)
- panic("%s: Failed to allocate %zu bytes align=0x%zx\n",
- __func__, BITS_TO_LONGS(dma_pages) * sizeof(long),
- sizeof(long));
-}
-
-static void c6x_dma_sync(phys_addr_t paddr, size_t size,
- enum dma_data_direction dir)
-{
- BUG_ON(!valid_dma_direction(dir));
-
- switch (dir) {
- case DMA_FROM_DEVICE:
- L2_cache_block_invalidate(paddr, paddr + size);
- break;
- case DMA_TO_DEVICE:
- L2_cache_block_writeback(paddr, paddr + size);
- break;
- case DMA_BIDIRECTIONAL:
- L2_cache_block_writeback_invalidate(paddr, paddr + size);
- break;
- default:
- break;
- }
-}
-
-void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
- enum dma_data_direction dir)
-{
- return c6x_dma_sync(paddr, size, dir);
-}
-
-void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
- enum dma_data_direction dir)
-{
- return c6x_dma_sync(paddr, size, dir);
-}
diff --git a/arch/c6x/mm/init.c b/arch/c6x/mm/init.c
deleted file mode 100644
index a97e51a3e26d..000000000000
--- a/arch/c6x/mm/init.c
+++ /dev/null
@@ -1,65 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Port on Texas Instruments TMS320C6x architecture
- *
- * Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated
- * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
- */
-#include <linux/mm.h>
-#include <linux/swap.h>
-#include <linux/module.h>
-#include <linux/memblock.h>
-#ifdef CONFIG_BLK_DEV_RAM
-#include <linux/blkdev.h>
-#endif
-#include <linux/initrd.h>
-
-#include <asm/sections.h>
-#include <linux/uaccess.h>
-
-/*
- * ZERO_PAGE is a special page that is used for zero-initialized
- * data and COW.
- */
-unsigned long empty_zero_page;
-EXPORT_SYMBOL(empty_zero_page);
-
-/*
- * paging_init() continues the virtual memory environment setup which
- * was begun by the code in arch/head.S.
- * The parameters are pointers to where to stick the starting and ending
- * addresses of available kernel virtual memory.
- */
-void __init paging_init(void)
-{
- struct pglist_data *pgdat = NODE_DATA(0);
- unsigned long max_zone_pfn[MAX_NR_ZONES] = {0, };
-
- empty_zero_page = (unsigned long) memblock_alloc(PAGE_SIZE,
- PAGE_SIZE);
- if (!empty_zero_page)
- panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
- __func__, PAGE_SIZE, PAGE_SIZE);
-
- /*
- * Set up user data space
- */
- set_fs(KERNEL_DS);
-
- /*
- * Define zones
- */
- max_zone_pfn[ZONE_NORMAL] = memory_end >> PAGE_SHIFT;
-
- free_area_init(max_zone_pfn);
-}
-
-void __init mem_init(void)
-{
- high_memory = (void *)(memory_end & PAGE_MASK);
-
- /* this will put all memory onto the freelists */
- memblock_free_all();
-
- mem_init_print_info(NULL);
-}
diff --git a/arch/c6x/platforms/Kconfig b/arch/c6x/platforms/Kconfig
deleted file mode 100644
index f3a9ae6e0e82..000000000000
--- a/arch/c6x/platforms/Kconfig
+++ /dev/null
@@ -1,21 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-
-config SOC_TMS320C6455
- bool "TMS320C6455"
- default n
-
-config SOC_TMS320C6457
- bool "TMS320C6457"
- default n
-
-config SOC_TMS320C6472
- bool "TMS320C6472"
- default n
-
-config SOC_TMS320C6474
- bool "TMS320C6474"
- default n
-
-config SOC_TMS320C6678
- bool "TMS320C6678"
- default n
diff --git a/arch/c6x/platforms/Makefile b/arch/c6x/platforms/Makefile
deleted file mode 100644
index b320f1c68884..000000000000
--- a/arch/c6x/platforms/Makefile
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Makefile for arch/c6x/platforms
-#
-# Copyright 2010, 2011 Texas Instruments Incorporated
-#
-
-obj-y = cache.o megamod-pic.o pll.o plldata.o timer64.o
-obj-y += dscr.o
-
-# SoC objects
-obj-$(CONFIG_SOC_TMS320C6455) += emif.o
-obj-$(CONFIG_SOC_TMS320C6457) += emif.o
diff --git a/arch/c6x/platforms/cache.c b/arch/c6x/platforms/cache.c
deleted file mode 100644
index fff027b72513..000000000000
--- a/arch/c6x/platforms/cache.c
+++ /dev/null
@@ -1,444 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2011 Texas Instruments Incorporated
- * Author: Mark Salter <msalter@redhat.com>
- */
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/io.h>
-
-#include <asm/cache.h>
-#include <asm/soc.h>
-
-/*
- * Internal Memory Control Registers for caches
- */
-#define IMCR_CCFG 0x0000
-#define IMCR_L1PCFG 0x0020
-#define IMCR_L1PCC 0x0024
-#define IMCR_L1DCFG 0x0040
-#define IMCR_L1DCC 0x0044
-#define IMCR_L2ALLOC0 0x2000
-#define IMCR_L2ALLOC1 0x2004
-#define IMCR_L2ALLOC2 0x2008
-#define IMCR_L2ALLOC3 0x200c
-#define IMCR_L2WBAR 0x4000
-#define IMCR_L2WWC 0x4004
-#define IMCR_L2WIBAR 0x4010
-#define IMCR_L2WIWC 0x4014
-#define IMCR_L2IBAR 0x4018
-#define IMCR_L2IWC 0x401c
-#define IMCR_L1PIBAR 0x4020
-#define IMCR_L1PIWC 0x4024
-#define IMCR_L1DWIBAR 0x4030
-#define IMCR_L1DWIWC 0x4034
-#define IMCR_L1DWBAR 0x4040
-#define IMCR_L1DWWC 0x4044
-#define IMCR_L1DIBAR 0x4048
-#define IMCR_L1DIWC 0x404c
-#define IMCR_L2WB 0x5000
-#define IMCR_L2WBINV 0x5004
-#define IMCR_L2INV 0x5008
-#define IMCR_L1PINV 0x5028
-#define IMCR_L1DWB 0x5040
-#define IMCR_L1DWBINV 0x5044
-#define IMCR_L1DINV 0x5048
-#define IMCR_MAR_BASE 0x8000
-#define IMCR_MAR96_111 0x8180
-#define IMCR_MAR128_191 0x8200
-#define IMCR_MAR224_239 0x8380
-#define IMCR_L2MPFAR 0xa000
-#define IMCR_L2MPFSR 0xa004
-#define IMCR_L2MPFCR 0xa008
-#define IMCR_L2MPLK0 0xa100
-#define IMCR_L2MPLK1 0xa104
-#define IMCR_L2MPLK2 0xa108
-#define IMCR_L2MPLK3 0xa10c
-#define IMCR_L2MPLKCMD 0xa110
-#define IMCR_L2MPLKSTAT 0xa114
-#define IMCR_L2MPPA_BASE 0xa200
-#define IMCR_L1PMPFAR 0xa400
-#define IMCR_L1PMPFSR 0xa404
-#define IMCR_L1PMPFCR 0xa408
-#define IMCR_L1PMPLK0 0xa500
-#define IMCR_L1PMPLK1 0xa504
-#define IMCR_L1PMPLK2 0xa508
-#define IMCR_L1PMPLK3 0xa50c
-#define IMCR_L1PMPLKCMD 0xa510
-#define IMCR_L1PMPLKSTAT 0xa514
-#define IMCR_L1PMPPA_BASE 0xa600
-#define IMCR_L1DMPFAR 0xac00
-#define IMCR_L1DMPFSR 0xac04
-#define IMCR_L1DMPFCR 0xac08
-#define IMCR_L1DMPLK0 0xad00
-#define IMCR_L1DMPLK1 0xad04
-#define IMCR_L1DMPLK2 0xad08
-#define IMCR_L1DMPLK3 0xad0c
-#define IMCR_L1DMPLKCMD 0xad10
-#define IMCR_L1DMPLKSTAT 0xad14
-#define IMCR_L1DMPPA_BASE 0xae00
-#define IMCR_L2PDWAKE0 0xc040
-#define IMCR_L2PDWAKE1 0xc044
-#define IMCR_L2PDSLEEP0 0xc050
-#define IMCR_L2PDSLEEP1 0xc054
-#define IMCR_L2PDSTAT0 0xc060
-#define IMCR_L2PDSTAT1 0xc064
-
-/*
- * CCFG register values and bits
- */
-#define L2MODE_0K_CACHE 0x0
-#define L2MODE_32K_CACHE 0x1
-#define L2MODE_64K_CACHE 0x2
-#define L2MODE_128K_CACHE 0x3
-#define L2MODE_256K_CACHE 0x7
-
-#define L2PRIO_URGENT 0x0
-#define L2PRIO_HIGH 0x1
-#define L2PRIO_MEDIUM 0x2
-#define L2PRIO_LOW 0x3
-
-#define CCFG_ID 0x100 /* Invalidate L1P bit */
-#define CCFG_IP 0x200 /* Invalidate L1D bit */
-
-static void __iomem *cache_base;
-
-/*
- * L1 & L2 caches generic functions
- */
-#define imcr_get(reg) soc_readl(cache_base + (reg))
-#define imcr_set(reg, value) \
-do { \
- soc_writel((value), cache_base + (reg)); \
- soc_readl(cache_base + (reg)); \
-} while (0)
-
-static void cache_block_operation_wait(unsigned int wc_reg)
-{
- /* Wait for completion */
- while (imcr_get(wc_reg))
- cpu_relax();
-}
-
-static DEFINE_SPINLOCK(cache_lock);
-
-/*
- * Generic function to perform a block cache operation as
- * invalidate or writeback/invalidate
- */
-static void cache_block_operation(unsigned int *start,
- unsigned int *end,
- unsigned int bar_reg,
- unsigned int wc_reg)
-{
- unsigned long flags;
- unsigned int wcnt =
- (L2_CACHE_ALIGN_CNT((unsigned int) end)
- - L2_CACHE_ALIGN_LOW((unsigned int) start)) >> 2;
- unsigned int wc = 0;
-
- for (; wcnt; wcnt -= wc, start += wc) {
-loop:
- spin_lock_irqsave(&cache_lock, flags);
-
- /*
- * If another cache operation is occurring
- */
- if (unlikely(imcr_get(wc_reg))) {
- spin_unlock_irqrestore(&cache_lock, flags);
-
- /* Wait for previous operation completion */
- cache_block_operation_wait(wc_reg);
-
- /* Try again */
- goto loop;
- }
-
- imcr_set(bar_reg, L2_CACHE_ALIGN_LOW((unsigned int) start));
-
- if (wcnt > 0xffff)
- wc = 0xffff;
- else
- wc = wcnt;
-
- /* Set word count value in the WC register */
- imcr_set(wc_reg, wc & 0xffff);
-
- spin_unlock_irqrestore(&cache_lock, flags);
-
- /* Wait for completion */
- cache_block_operation_wait(wc_reg);
- }
-}
-
-static void cache_block_operation_nowait(unsigned int *start,
- unsigned int *end,
- unsigned int bar_reg,
- unsigned int wc_reg)
-{
- unsigned long flags;
- unsigned int wcnt =
- (L2_CACHE_ALIGN_CNT((unsigned int) end)
- - L2_CACHE_ALIGN_LOW((unsigned int) start)) >> 2;
- unsigned int wc = 0;
-
- for (; wcnt; wcnt -= wc, start += wc) {
-
- spin_lock_irqsave(&cache_lock, flags);
-
- imcr_set(bar_reg, L2_CACHE_ALIGN_LOW((unsigned int) start));
-
- if (wcnt > 0xffff)
- wc = 0xffff;
- else
- wc = wcnt;
-
- /* Set word count value in the WC register */
- imcr_set(wc_reg, wc & 0xffff);
-
- spin_unlock_irqrestore(&cache_lock, flags);
-
- /* Don't wait for completion on last cache operation */
- if (wcnt > 0xffff)
- cache_block_operation_wait(wc_reg);
- }
-}
-
-/*
- * L1 caches management
- */
-
-/*
- * Disable L1 caches
- */
-void L1_cache_off(void)
-{
- unsigned int dummy;
-
- imcr_set(IMCR_L1PCFG, 0);
- dummy = imcr_get(IMCR_L1PCFG);
-
- imcr_set(IMCR_L1DCFG, 0);
- dummy = imcr_get(IMCR_L1DCFG);
-}
-
-/*
- * Enable L1 caches
- */
-void L1_cache_on(void)
-{
- unsigned int dummy;
-
- imcr_set(IMCR_L1PCFG, 7);
- dummy = imcr_get(IMCR_L1PCFG);
-
- imcr_set(IMCR_L1DCFG, 7);
- dummy = imcr_get(IMCR_L1DCFG);
-}
-
-/*
- * L1P global-invalidate all
- */
-void L1P_cache_global_invalidate(void)
-{
- unsigned int set = 1;
- imcr_set(IMCR_L1PINV, set);
- while (imcr_get(IMCR_L1PINV) & 1)
- cpu_relax();
-}
-
-/*
- * L1D global-invalidate all
- *
- * Warning: this operation causes all updated data in L1D to
- * be discarded rather than written back to the lower levels of
- * memory
- */
-void L1D_cache_global_invalidate(void)
-{
- unsigned int set = 1;
- imcr_set(IMCR_L1DINV, set);
- while (imcr_get(IMCR_L1DINV) & 1)
- cpu_relax();
-}
-
-void L1D_cache_global_writeback(void)
-{
- unsigned int set = 1;
- imcr_set(IMCR_L1DWB, set);
- while (imcr_get(IMCR_L1DWB) & 1)
- cpu_relax();
-}
-
-void L1D_cache_global_writeback_invalidate(void)
-{
- unsigned int set = 1;
- imcr_set(IMCR_L1DWBINV, set);
- while (imcr_get(IMCR_L1DWBINV) & 1)
- cpu_relax();
-}
-
-/*
- * L2 caches management
- */
-
-/*
- * Set L2 operation mode
- */
-void L2_cache_set_mode(unsigned int mode)
-{
- unsigned int ccfg = imcr_get(IMCR_CCFG);
-
- /* Clear and set the L2MODE bits in CCFG */
- ccfg &= ~7;
- ccfg |= (mode & 7);
- imcr_set(IMCR_CCFG, ccfg);
- ccfg = imcr_get(IMCR_CCFG);
-}
-
-/*
- * L2 global-writeback and global-invalidate all
- */
-void L2_cache_global_writeback_invalidate(void)
-{
- imcr_set(IMCR_L2WBINV, 1);
- while (imcr_get(IMCR_L2WBINV))
- cpu_relax();
-}
-
-/*
- * L2 global-writeback all
- */
-void L2_cache_global_writeback(void)
-{
- imcr_set(IMCR_L2WB, 1);
- while (imcr_get(IMCR_L2WB))
- cpu_relax();
-}
-
-/*
- * Cacheability controls
- */
-void enable_caching(unsigned long start, unsigned long end)
-{
- unsigned int mar = IMCR_MAR_BASE + ((start >> 24) << 2);
- unsigned int mar_e = IMCR_MAR_BASE + ((end >> 24) << 2);
-
- for (; mar <= mar_e; mar += 4)
- imcr_set(mar, imcr_get(mar) | 1);
-}
-
-void disable_caching(unsigned long start, unsigned long end)
-{
- unsigned int mar = IMCR_MAR_BASE + ((start >> 24) << 2);
- unsigned int mar_e = IMCR_MAR_BASE + ((end >> 24) << 2);
-
- for (; mar <= mar_e; mar += 4)
- imcr_set(mar, imcr_get(mar) & ~1);
-}
-
-
-/*
- * L1 block operations
- */
-void L1P_cache_block_invalidate(unsigned int start, unsigned int end)
-{
- cache_block_operation((unsigned int *) start,
- (unsigned int *) end,
- IMCR_L1PIBAR, IMCR_L1PIWC);
-}
-EXPORT_SYMBOL(L1P_cache_block_invalidate);
-
-void L1D_cache_block_invalidate(unsigned int start, unsigned int end)
-{
- cache_block_operation((unsigned int *) start,
- (unsigned int *) end,
- IMCR_L1DIBAR, IMCR_L1DIWC);
-}
-
-void L1D_cache_block_writeback_invalidate(unsigned int start, unsigned int end)
-{
- cache_block_operation((unsigned int *) start,
- (unsigned int *) end,
- IMCR_L1DWIBAR, IMCR_L1DWIWC);
-}
-
-void L1D_cache_block_writeback(unsigned int start, unsigned int end)
-{
- cache_block_operation((unsigned int *) start,
- (unsigned int *) end,
- IMCR_L1DWBAR, IMCR_L1DWWC);
-}
-EXPORT_SYMBOL(L1D_cache_block_writeback);
-
-/*
- * L2 block operations
- */
-void L2_cache_block_invalidate(unsigned int start, unsigned int end)
-{
- cache_block_operation((unsigned int *) start,
- (unsigned int *) end,
- IMCR_L2IBAR, IMCR_L2IWC);
-}
-
-void L2_cache_block_writeback(unsigned int start, unsigned int end)
-{
- cache_block_operation((unsigned int *) start,
- (unsigned int *) end,
- IMCR_L2WBAR, IMCR_L2WWC);
-}
-
-void L2_cache_block_writeback_invalidate(unsigned int start, unsigned int end)
-{
- cache_block_operation((unsigned int *) start,
- (unsigned int *) end,
- IMCR_L2WIBAR, IMCR_L2WIWC);
-}
-
-void L2_cache_block_invalidate_nowait(unsigned int start, unsigned int end)
-{
- cache_block_operation_nowait((unsigned int *) start,
- (unsigned int *) end,
- IMCR_L2IBAR, IMCR_L2IWC);
-}
-
-void L2_cache_block_writeback_nowait(unsigned int start, unsigned int end)
-{
- cache_block_operation_nowait((unsigned int *) start,
- (unsigned int *) end,
- IMCR_L2WBAR, IMCR_L2WWC);
-}
-
-void L2_cache_block_writeback_invalidate_nowait(unsigned int start,
- unsigned int end)
-{
- cache_block_operation_nowait((unsigned int *) start,
- (unsigned int *) end,
- IMCR_L2WIBAR, IMCR_L2WIWC);
-}
-
-
-/*
- * L1 and L2 caches configuration
- */
-void __init c6x_cache_init(void)
-{
- struct device_node *node;
-
- node = of_find_compatible_node(NULL, NULL, "ti,c64x+cache");
- if (!node)
- return;
-
- cache_base = of_iomap(node, 0);
-
- of_node_put(node);
-
- if (!cache_base)
- return;
-
- /* Set L2 caches on the the whole L2 SRAM memory */
- L2_cache_set_mode(L2MODE_SIZE);
-
- /* Enable L1 */
- L1_cache_on();
-}
diff --git a/arch/c6x/platforms/dscr.c b/arch/c6x/platforms/dscr.c
deleted file mode 100644
index 4571615b589f..000000000000
--- a/arch/c6x/platforms/dscr.c
+++ /dev/null
@@ -1,595 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Device State Control Registers driver
- *
- * Copyright (C) 2011 Texas Instruments Incorporated
- * Author: Mark Salter <msalter@redhat.com>
- */
-
-/*
- * The Device State Control Registers (DSCR) provide SoC level control over
- * a number of peripherals. Details vary considerably among the various SoC
- * parts. In general, the DSCR block will provide one or more configuration
- * registers often protected by a lock register. One or more key values must
- * be written to a lock register in order to unlock the configuration register.
- * The configuration register may be used to enable (and disable in some
- * cases) SoC pin drivers, peripheral clock sources (internal or pin), etc.
- * In some cases, a configuration register is write once or the individual
- * bits are write once. That is, you may be able to enable a device, but
- * will not be able to disable it.
- *
- * In addition to device configuration, the DSCR block may provide registers
- * which are used to reset SoC peripherals, provide device ID information,
- * provide MAC addresses, and other miscellaneous functions.
- */
-
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
-#include <linux/module.h>
-#include <linux/io.h>
-#include <linux/delay.h>
-#include <asm/soc.h>
-#include <asm/dscr.h>
-
-#define MAX_DEVSTATE_IDS 32
-#define MAX_DEVCTL_REGS 8
-#define MAX_DEVSTAT_REGS 8
-#define MAX_LOCKED_REGS 4
-#define MAX_SOC_EMACS 2
-
-struct rmii_reset_reg {
- u32 reg;
- u32 mask;
-};
-
-/*
- * Some registerd may be locked. In order to write to these
- * registers, the key value must first be written to the lockreg.
- */
-struct locked_reg {
- u32 reg; /* offset from base */
- u32 lockreg; /* offset from base */
- u32 key; /* unlock key */
-};
-
-/*
- * This describes a contiguous area of like control bits used to enable/disable
- * SoC devices. Each controllable device is given an ID which is used by the
- * individual device drivers to control the device state. These IDs start at
- * zero and are assigned sequentially to the control bitfield ranges described
- * by this structure.
- */
-struct devstate_ctl_reg {
- u32 reg; /* register holding the control bits */
- u8 start_id; /* start id of this range */
- u8 num_ids; /* number of devices in this range */
- u8 enable_only; /* bits are write-once to enable only */
- u8 enable; /* value used to enable device */
- u8 disable; /* value used to disable device */
- u8 shift; /* starting (rightmost) bit in range */
- u8 nbits; /* number of bits per device */
-};
-
-
-/*
- * This describes a region of status bits indicating the state of
- * various devices. This is used internally to wait for status
- * change completion when enabling/disabling a device. Status is
- * optional and not all device controls will have a corresponding
- * status.
- */
-struct devstate_stat_reg {
- u32 reg; /* register holding the status bits */
- u8 start_id; /* start id of this range */
- u8 num_ids; /* number of devices in this range */
- u8 enable; /* value indicating enabled state */
- u8 disable; /* value indicating disabled state */
- u8 shift; /* starting (rightmost) bit in range */
- u8 nbits; /* number of bits per device */
-};
-
-struct devstate_info {
- struct devstate_ctl_reg *ctl;
- struct devstate_stat_reg *stat;
-};
-
-/* These are callbacks to SOC-specific code. */
-struct dscr_ops {
- void (*init)(struct device_node *node);
-};
-
-struct dscr_regs {
- spinlock_t lock;
- void __iomem *base;
- u32 kick_reg[2];
- u32 kick_key[2];
- struct locked_reg locked[MAX_LOCKED_REGS];
- struct devstate_info devstate_info[MAX_DEVSTATE_IDS];
- struct rmii_reset_reg rmii_resets[MAX_SOC_EMACS];
- struct devstate_ctl_reg devctl[MAX_DEVCTL_REGS];
- struct devstate_stat_reg devstat[MAX_DEVSTAT_REGS];
-};
-
-static struct dscr_regs dscr;
-
-static struct locked_reg *find_locked_reg(u32 reg)
-{
- int i;
-
- for (i = 0; i < MAX_LOCKED_REGS; i++)
- if (dscr.locked[i].key && reg == dscr.locked[i].reg)
- return &dscr.locked[i];
- return NULL;
-}
-
-/*
- * Write to a register with one lock
- */
-static void dscr_write_locked1(u32 reg, u32 val,
- u32 lock, u32 key)
-{
- void __iomem *reg_addr = dscr.base + reg;
- void __iomem *lock_addr = dscr.base + lock;
-
- /*
- * For some registers, the lock is relocked after a short number
- * of cycles. We have to put the lock write and register write in
- * the same fetch packet to meet this timing. The .align ensures
- * the two stw instructions are in the same fetch packet.
- */
- asm volatile ("b .s2 0f\n"
- "nop 5\n"
- " .align 5\n"
- "0:\n"
- "stw .D1T2 %3,*%2\n"
- "stw .D1T2 %1,*%0\n"
- :
- : "a"(reg_addr), "b"(val), "a"(lock_addr), "b"(key)
- );
-
- /* in case the hw doesn't reset the lock */
- soc_writel(0, lock_addr);
-}
-
-/*
- * Write to a register protected by two lock registers
- */
-static void dscr_write_locked2(u32 reg, u32 val,
- u32 lock0, u32 key0,
- u32 lock1, u32 key1)
-{
- soc_writel(key0, dscr.base + lock0);
- soc_writel(key1, dscr.base + lock1);
- soc_writel(val, dscr.base + reg);
- soc_writel(0, dscr.base + lock0);
- soc_writel(0, dscr.base + lock1);
-}
-
-static void dscr_write(u32 reg, u32 val)
-{
- struct locked_reg *lock;
-
- lock = find_locked_reg(reg);
- if (lock)
- dscr_write_locked1(reg, val, lock->lockreg, lock->key);
- else if (dscr.kick_key[0])
- dscr_write_locked2(reg, val, dscr.kick_reg[0], dscr.kick_key[0],
- dscr.kick_reg[1], dscr.kick_key[1]);
- else
- soc_writel(val, dscr.base + reg);
-}
-
-
-/*
- * Drivers can use this interface to enable/disable SoC IP blocks.
- */
-void dscr_set_devstate(int id, enum dscr_devstate_t state)
-{
- struct devstate_ctl_reg *ctl;
- struct devstate_stat_reg *stat;
- struct devstate_info *info;
- u32 ctl_val, val;
- int ctl_shift, ctl_mask;
- unsigned long flags;
-
- if (!dscr.base)
- return;
-
- if (id < 0 || id >= MAX_DEVSTATE_IDS)
- return;
-
- info = &dscr.devstate_info[id];
- ctl = info->ctl;
- stat = info->stat;
-
- if (ctl == NULL)
- return;
-
- ctl_shift = ctl->shift + ctl->nbits * (id - ctl->start_id);
- ctl_mask = ((1 << ctl->nbits) - 1) << ctl_shift;
-
- switch (state) {
- case DSCR_DEVSTATE_ENABLED:
- ctl_val = ctl->enable << ctl_shift;
- break;
- case DSCR_DEVSTATE_DISABLED:
- if (ctl->enable_only)
- return;
- ctl_val = ctl->disable << ctl_shift;
- break;
- default:
- return;
- }
-
- spin_lock_irqsave(&dscr.lock, flags);
-
- val = soc_readl(dscr.base + ctl->reg);
- val &= ~ctl_mask;
- val |= ctl_val;
-
- dscr_write(ctl->reg, val);
-
- spin_unlock_irqrestore(&dscr.lock, flags);
-
- if (!stat)
- return;
-
- ctl_shift = stat->shift + stat->nbits * (id - stat->start_id);
-
- if (state == DSCR_DEVSTATE_ENABLED)
- ctl_val = stat->enable;
- else
- ctl_val = stat->disable;
-
- do {
- val = soc_readl(dscr.base + stat->reg);
- val >>= ctl_shift;
- val &= ((1 << stat->nbits) - 1);
- } while (val != ctl_val);
-}
-EXPORT_SYMBOL(dscr_set_devstate);
-
-/*
- * Drivers can use this to reset RMII module.
- */
-void dscr_rmii_reset(int id, int assert)
-{
- struct rmii_reset_reg *r;
- unsigned long flags;
- u32 val;
-
- if (id < 0 || id >= MAX_SOC_EMACS)
- return;
-
- r = &dscr.rmii_resets[id];
- if (r->mask == 0)
- return;
-
- spin_lock_irqsave(&dscr.lock, flags);
-
- val = soc_readl(dscr.base + r->reg);
- if (assert)
- dscr_write(r->reg, val | r->mask);
- else
- dscr_write(r->reg, val & ~(r->mask));
-
- spin_unlock_irqrestore(&dscr.lock, flags);
-}
-EXPORT_SYMBOL(dscr_rmii_reset);
-
-static void __init dscr_parse_devstat(struct device_node *node,
- void __iomem *base)
-{
- u32 val;
- int err;
-
- err = of_property_read_u32_array(node, "ti,dscr-devstat", &val, 1);
- if (!err)
- c6x_devstat = soc_readl(base + val);
- printk(KERN_INFO "DEVSTAT: %08x\n", c6x_devstat);
-}
-
-static void __init dscr_parse_silicon_rev(struct device_node *node,
- void __iomem *base)
-{
- u32 vals[3];
- int err;
-
- err = of_property_read_u32_array(node, "ti,dscr-silicon-rev", vals, 3);
- if (!err) {
- c6x_silicon_rev = soc_readl(base + vals[0]);
- c6x_silicon_rev >>= vals[1];
- c6x_silicon_rev &= vals[2];
- }
-}
-
-/*
- * Some SoCs will have a pair of fuse registers which hold
- * an ethernet MAC address. The "ti,dscr-mac-fuse-regs"
- * property is a mapping from fuse register bytes to MAC
- * address bytes. The expected format is:
- *
- * ti,dscr-mac-fuse-regs = <reg0 b3 b2 b1 b0
- * reg1 b3 b2 b1 b0>
- *
- * reg0 and reg1 are the offsets of the two fuse registers.
- * b3-b0 positionally represent bytes within the fuse register.
- * b3 is the most significant byte and b0 is the least.
- * Allowable values for b3-b0 are:
- *
- * 0 = fuse register byte not used in MAC address
- * 1-6 = index+1 into c6x_fuse_mac[]
- */
-static void __init dscr_parse_mac_fuse(struct device_node *node,
- void __iomem *base)
-{
- u32 vals[10], fuse;
- int f, i, j, err;
-
- err = of_property_read_u32_array(node, "ti,dscr-mac-fuse-regs",
- vals, 10);
- if (err)
- return;
-
- for (f = 0; f < 2; f++) {
- fuse = soc_readl(base + vals[f * 5]);
- for (j = (f * 5) + 1, i = 24; i >= 0; i -= 8, j++)
- if (vals[j] && vals[j] <= 6)
- c6x_fuse_mac[vals[j] - 1] = fuse >> i;
- }
-}
-
-static void __init dscr_parse_rmii_resets(struct device_node *node,
- void __iomem *base)
-{
- const __be32 *p;
- int i, size;
-
- /* look for RMII reset registers */
- p = of_get_property(node, "ti,dscr-rmii-resets", &size);
- if (p) {
- /* parse all the reg/mask pairs we can handle */
- size /= (sizeof(*p) * 2);
- if (size > MAX_SOC_EMACS)
- size = MAX_SOC_EMACS;
-
- for (i = 0; i < size; i++) {
- dscr.rmii_resets[i].reg = be32_to_cpup(p++);
- dscr.rmii_resets[i].mask = be32_to_cpup(p++);
- }
- }
-}
-
-
-static void __init dscr_parse_privperm(struct device_node *node,
- void __iomem *base)
-{
- u32 vals[2];
- int err;
-
- err = of_property_read_u32_array(node, "ti,dscr-privperm", vals, 2);
- if (err)
- return;
- dscr_write(vals[0], vals[1]);
-}
-
-/*
- * SoCs may have "locked" DSCR registers which can only be written
- * to only after writing a key value to a lock registers. These
- * regisers can be described with the "ti,dscr-locked-regs" property.
- * This property provides a list of register descriptions with each
- * description consisting of three values.
- *
- * ti,dscr-locked-regs = <reg0 lockreg0 key0
- * ...
- * regN lockregN keyN>;
- *
- * reg is the offset of the locked register
- * lockreg is the offset of the lock register
- * key is the unlock key written to lockreg
- *
- */
-static void __init dscr_parse_locked_regs(struct device_node *node,
- void __iomem *base)
-{
- struct locked_reg *r;
- const __be32 *p;
- int i, size;
-
- p = of_get_property(node, "ti,dscr-locked-regs", &size);
- if (p) {
- /* parse all the register descriptions we can handle */
- size /= (sizeof(*p) * 3);
- if (size > MAX_LOCKED_REGS)
- size = MAX_LOCKED_REGS;
-
- for (i = 0; i < size; i++) {
- r = &dscr.locked[i];
-
- r->reg = be32_to_cpup(p++);
- r->lockreg = be32_to_cpup(p++);
- r->key = be32_to_cpup(p++);
- }
- }
-}
-
-/*
- * SoCs may have DSCR registers which are only write enabled after
- * writing specific key values to two registers. The two key registers
- * and the key values can be parsed from a "ti,dscr-kick-regs"
- * propety with the following layout:
- *
- * ti,dscr-kick-regs = <kickreg0 key0 kickreg1 key1>
- *
- * kickreg is the offset of the "kick" register
- * key is the value which unlocks writing for protected regs
- */
-static void __init dscr_parse_kick_regs(struct device_node *node,
- void __iomem *base)
-{
- u32 vals[4];
- int err;
-
- err = of_property_read_u32_array(node, "ti,dscr-kick-regs", vals, 4);
- if (!err) {
- dscr.kick_reg[0] = vals[0];
- dscr.kick_key[0] = vals[1];
- dscr.kick_reg[1] = vals[2];
- dscr.kick_key[1] = vals[3];
- }
-}
-
-
-/*
- * SoCs may provide controls to enable/disable individual IP blocks. These
- * controls in the DSCR usually control pin drivers but also may control
- * clocking and or resets. The device tree is used to describe the bitfields
- * in registers used to control device state. The number of bits and their
- * values may vary even within the same register.
- *
- * The layout of these bitfields is described by the ti,dscr-devstate-ctl-regs
- * property. This property is a list where each element describes a contiguous
- * range of control fields with like properties. Each element of the list
- * consists of 7 cells with the following values:
- *
- * start_id num_ids reg enable disable start_bit nbits
- *
- * start_id is device id for the first device control in the range
- * num_ids is the number of device controls in the range
- * reg is the offset of the register holding the control bits
- * enable is the value to enable a device
- * disable is the value to disable a device (0xffffffff if cannot disable)
- * start_bit is the bit number of the first bit in the range
- * nbits is the number of bits per device control
- */
-static void __init dscr_parse_devstate_ctl_regs(struct device_node *node,
- void __iomem *base)
-{
- struct devstate_ctl_reg *r;
- const __be32 *p;
- int i, j, size;
-
- p = of_get_property(node, "ti,dscr-devstate-ctl-regs", &size);
- if (p) {
- /* parse all the ranges we can handle */
- size /= (sizeof(*p) * 7);
- if (size > MAX_DEVCTL_REGS)
- size = MAX_DEVCTL_REGS;
-
- for (i = 0; i < size; i++) {
- r = &dscr.devctl[i];
-
- r->start_id = be32_to_cpup(p++);
- r->num_ids = be32_to_cpup(p++);
- r->reg = be32_to_cpup(p++);
- r->enable = be32_to_cpup(p++);
- r->disable = be32_to_cpup(p++);
- if (r->disable == 0xffffffff)
- r->enable_only = 1;
- r->shift = be32_to_cpup(p++);
- r->nbits = be32_to_cpup(p++);
-
- for (j = r->start_id;
- j < (r->start_id + r->num_ids);
- j++)
- dscr.devstate_info[j].ctl = r;
- }
- }
-}
-
-/*
- * SoCs may provide status registers indicating the state (enabled/disabled) of
- * devices on the SoC. The device tree is used to describe the bitfields in
- * registers used to provide device status. The number of bits and their
- * values used to provide status may vary even within the same register.
- *
- * The layout of these bitfields is described by the ti,dscr-devstate-stat-regs
- * property. This property is a list where each element describes a contiguous
- * range of status fields with like properties. Each element of the list
- * consists of 7 cells with the following values:
- *
- * start_id num_ids reg enable disable start_bit nbits
- *
- * start_id is device id for the first device status in the range
- * num_ids is the number of devices covered by the range
- * reg is the offset of the register holding the status bits
- * enable is the value indicating device is enabled
- * disable is the value indicating device is disabled
- * start_bit is the bit number of the first bit in the range
- * nbits is the number of bits per device status
- */
-static void __init dscr_parse_devstate_stat_regs(struct device_node *node,
- void __iomem *base)
-{
- struct devstate_stat_reg *r;
- const __be32 *p;
- int i, j, size;
-
- p = of_get_property(node, "ti,dscr-devstate-stat-regs", &size);
- if (p) {
- /* parse all the ranges we can handle */
- size /= (sizeof(*p) * 7);
- if (size > MAX_DEVSTAT_REGS)
- size = MAX_DEVSTAT_REGS;
-
- for (i = 0; i < size; i++) {
- r = &dscr.devstat[i];
-
- r->start_id = be32_to_cpup(p++);
- r->num_ids = be32_to_cpup(p++);
- r->reg = be32_to_cpup(p++);
- r->enable = be32_to_cpup(p++);
- r->disable = be32_to_cpup(p++);
- r->shift = be32_to_cpup(p++);
- r->nbits = be32_to_cpup(p++);
-
- for (j = r->start_id;
- j < (r->start_id + r->num_ids);
- j++)
- dscr.devstate_info[j].stat = r;
- }
- }
-}
-
-static struct of_device_id dscr_ids[] __initdata = {
- { .compatible = "ti,c64x+dscr" },
- {}
-};
-
-/*
- * Probe for DSCR area.
- *
- * This has to be done early on in case timer or interrupt controller
- * needs something. e.g. On C6455 SoC, timer must be enabled through
- * DSCR before it is functional.
- */
-void __init dscr_probe(void)
-{
- struct device_node *node;
- void __iomem *base;
-
- spin_lock_init(&dscr.lock);
-
- node = of_find_matching_node(NULL, dscr_ids);
- if (!node)
- return;
-
- base = of_iomap(node, 0);
- if (!base) {
- of_node_put(node);
- return;
- }
-
- dscr.base = base;
-
- dscr_parse_devstat(node, base);
- dscr_parse_silicon_rev(node, base);
- dscr_parse_mac_fuse(node, base);
- dscr_parse_rmii_resets(node, base);
- dscr_parse_locked_regs(node, base);
- dscr_parse_kick_regs(node, base);
- dscr_parse_devstate_ctl_regs(node, base);
- dscr_parse_devstate_stat_regs(node, base);
- dscr_parse_privperm(node, base);
-}
diff --git a/arch/c6x/platforms/emif.c b/arch/c6x/platforms/emif.c
deleted file mode 100644
index 6142ecc2cd88..000000000000
--- a/arch/c6x/platforms/emif.c
+++ /dev/null
@@ -1,84 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * External Memory Interface
- *
- * Copyright (C) 2011 Texas Instruments Incorporated
- * Author: Mark Salter <msalter@redhat.com>
- */
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/io.h>
-#include <asm/soc.h>
-#include <asm/dscr.h>
-
-#define NUM_EMIFA_CHIP_ENABLES 4
-
-struct emifa_regs {
- u32 midr;
- u32 stat;
- u32 reserved1[6];
- u32 bprio;
- u32 reserved2[23];
- u32 cecfg[NUM_EMIFA_CHIP_ENABLES];
- u32 reserved3[4];
- u32 awcc;
- u32 reserved4[7];
- u32 intraw;
- u32 intmsk;
- u32 intmskset;
- u32 intmskclr;
-};
-
-static struct of_device_id emifa_match[] __initdata = {
- { .compatible = "ti,c64x+emifa" },
- {}
-};
-
-/*
- * Parse device tree for existence of an EMIF (External Memory Interface)
- * and initialize it if found.
- */
-static int __init c6x_emifa_init(void)
-{
- struct emifa_regs __iomem *regs;
- struct device_node *node;
- const __be32 *p;
- u32 val;
- int i, len, err;
-
- node = of_find_matching_node(NULL, emifa_match);
- if (!node)
- return 0;
-
- regs = of_iomap(node, 0);
- if (!regs)
- return 0;
-
- /* look for a dscr-based enable for emifa pin buffers */
- err = of_property_read_u32_array(node, "ti,dscr-dev-enable", &val, 1);
- if (!err)
- dscr_set_devstate(val, DSCR_DEVSTATE_ENABLED);
-
- /* set up the chip enables */
- p = of_get_property(node, "ti,emifa-ce-config", &len);
- if (p) {
- len /= sizeof(u32);
- if (len > NUM_EMIFA_CHIP_ENABLES)
- len = NUM_EMIFA_CHIP_ENABLES;
- for (i = 0; i <= len; i++)
- soc_writel(be32_to_cpup(&p[i]), &regs->cecfg[i]);
- }
-
- err = of_property_read_u32_array(node, "ti,emifa-burst-priority", &val, 1);
- if (!err)
- soc_writel(val, &regs->bprio);
-
- err = of_property_read_u32_array(node, "ti,emifa-async-wait-control", &val, 1);
- if (!err)
- soc_writel(val, &regs->awcc);
-
- iounmap(regs);
- of_node_put(node);
- return 0;
-}
-pure_initcall(c6x_emifa_init);
diff --git a/arch/c6x/platforms/megamod-pic.c b/arch/c6x/platforms/megamod-pic.c
deleted file mode 100644
index 56189e50728c..000000000000
--- a/arch/c6x/platforms/megamod-pic.c
+++ /dev/null
@@ -1,344 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Support for C64x+ Megamodule Interrupt Controller
- *
- * Copyright (C) 2010, 2011 Texas Instruments Incorporated
- * Contributed by: Mark Salter <msalter@redhat.com>
- */
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/of_irq.h>
-#include <linux/of_address.h>
-#include <linux/slab.h>
-#include <asm/soc.h>
-#include <asm/megamod-pic.h>
-
-#define NR_COMBINERS 4
-#define NR_MUX_OUTPUTS 12
-
-#define IRQ_UNMAPPED 0xffff
-
-/*
- * Megamodule Interrupt Controller register layout
- */
-struct megamod_regs {
- u32 evtflag[8];
- u32 evtset[8];
- u32 evtclr[8];
- u32 reserved0[8];
- u32 evtmask[8];
- u32 mevtflag[8];
- u32 expmask[8];
- u32 mexpflag[8];
- u32 intmux_unused;
- u32 intmux[7];
- u32 reserved1[8];
- u32 aegmux[2];
- u32 reserved2[14];
- u32 intxstat;
- u32 intxclr;
- u32 intdmask;
- u32 reserved3[13];
- u32 evtasrt;
-};
-
-struct megamod_pic {
- struct irq_domain *irqhost;
- struct megamod_regs __iomem *regs;
- raw_spinlock_t lock;
-
- /* hw mux mapping */
- unsigned int output_to_irq[NR_MUX_OUTPUTS];
-};
-
-static struct megamod_pic *mm_pic;
-
-struct megamod_cascade_data {
- struct megamod_pic *pic;
- int index;
-};
-
-static struct megamod_cascade_data cascade_data[NR_COMBINERS];
-
-static void mask_megamod(struct irq_data *data)
-{
- struct megamod_pic *pic = irq_data_get_irq_chip_data(data);
- irq_hw_number_t src = irqd_to_hwirq(data);
- u32 __iomem *evtmask = &pic->regs->evtmask[src / 32];
-
- raw_spin_lock(&pic->lock);
- soc_writel(soc_readl(evtmask) | (1 << (src & 31)), evtmask);
- raw_spin_unlock(&pic->lock);
-}
-
-static void unmask_megamod(struct irq_data *data)
-{
- struct megamod_pic *pic = irq_data_get_irq_chip_data(data);
- irq_hw_number_t src = irqd_to_hwirq(data);
- u32 __iomem *evtmask = &pic->regs->evtmask[src / 32];
-
- raw_spin_lock(&pic->lock);
- soc_writel(soc_readl(evtmask) & ~(1 << (src & 31)), evtmask);
- raw_spin_unlock(&pic->lock);
-}
-
-static struct irq_chip megamod_chip = {
- .name = "megamod",
- .irq_mask = mask_megamod,
- .irq_unmask = unmask_megamod,
-};
-
-static void megamod_irq_cascade(struct irq_desc *desc)
-{
- struct megamod_cascade_data *cascade;
- struct megamod_pic *pic;
- unsigned int irq;
- u32 events;
- int n, idx;
-
- cascade = irq_desc_get_handler_data(desc);
-
- pic = cascade->pic;
- idx = cascade->index;
-
- while ((events = soc_readl(&pic->regs->mevtflag[idx])) != 0) {
- n = __ffs(events);
-
- irq = irq_linear_revmap(pic->irqhost, idx * 32 + n);
-
- soc_writel(1 << n, &pic->regs->evtclr[idx]);
-
- generic_handle_irq(irq);
- }
-}
-
-static int megamod_map(struct irq_domain *h, unsigned int virq,
- irq_hw_number_t hw)
-{
- struct megamod_pic *pic = h->host_data;
- int i;
-
- /* We shouldn't see a hwirq which is muxed to core controller */
- for (i = 0; i < NR_MUX_OUTPUTS; i++)
- if (pic->output_to_irq[i] == hw)
- return -1;
-
- irq_set_chip_data(virq, pic);
- irq_set_chip_and_handler(virq, &megamod_chip, handle_level_irq);
-
- /* Set default irq type */
- irq_set_irq_type(virq, IRQ_TYPE_NONE);
-
- return 0;
-}
-
-static const struct irq_domain_ops megamod_domain_ops = {
- .map = megamod_map,
- .xlate = irq_domain_xlate_onecell,
-};
-
-static void __init set_megamod_mux(struct megamod_pic *pic, int src, int output)
-{
- int index, offset;
- u32 val;
-
- if (src < 0 || src >= (NR_COMBINERS * 32)) {
- pic->output_to_irq[output] = IRQ_UNMAPPED;
- return;
- }
-
- /* four mappings per mux register */
- index = output / 4;
- offset = (output & 3) * 8;
-
- val = soc_readl(&pic->regs->intmux[index]);
- val &= ~(0xff << offset);
- val |= src << offset;
- soc_writel(val, &pic->regs->intmux[index]);
-}
-
-/*
- * Parse the MUX mapping, if one exists.
- *
- * The MUX map is an array of up to 12 cells; one for each usable core priority
- * interrupt. The value of a given cell is the megamodule interrupt source
- * which is to me MUXed to the output corresponding to the cell position
- * withing the array. The first cell in the array corresponds to priority
- * 4 and the last (12th) cell corresponds to priority 15. The allowed
- * values are 4 - ((NR_COMBINERS * 32) - 1). Note that the combined interrupt
- * sources (0 - 3) are not allowed to be mapped through this property. They
- * are handled through the "interrupts" property. This allows us to use a
- * value of zero as a "do not map" placeholder.
- */
-static void __init parse_priority_map(struct megamod_pic *pic,
- int *mapping, int size)
-{
- struct device_node *np = irq_domain_get_of_node(pic->irqhost);
- const __be32 *map;
- int i, maplen;
- u32 val;
-
- map = of_get_property(np, "ti,c64x+megamod-pic-mux", &maplen);
- if (map) {
- maplen /= 4;
- if (maplen > size)
- maplen = size;
-
- for (i = 0; i < maplen; i++) {
- val = be32_to_cpup(map);
- if (val && val >= 4)
- mapping[i] = val;
- ++map;
- }
- }
-}
-
-static struct megamod_pic * __init init_megamod_pic(struct device_node *np)
-{
- struct megamod_pic *pic;
- int i, irq;
- int mapping[NR_MUX_OUTPUTS];
-
- pr_info("Initializing C64x+ Megamodule PIC\n");
-
- pic = kzalloc(sizeof(struct megamod_pic), GFP_KERNEL);
- if (!pic) {
- pr_err("%pOF: Could not alloc PIC structure.\n", np);
- return NULL;
- }
-
- pic->irqhost = irq_domain_add_linear(np, NR_COMBINERS * 32,
- &megamod_domain_ops, pic);
- if (!pic->irqhost) {
- pr_err("%pOF: Could not alloc host.\n", np);
- goto error_free;
- }
-
- pic->irqhost->host_data = pic;
-
- raw_spin_lock_init(&pic->lock);
-
- pic->regs = of_iomap(np, 0);
- if (!pic->regs) {
- pr_err("%pOF: Could not map registers.\n", np);
- goto error_free;
- }
-
- /* Initialize MUX map */
- for (i = 0; i < ARRAY_SIZE(mapping); i++)
- mapping[i] = IRQ_UNMAPPED;
-
- parse_priority_map(pic, mapping, ARRAY_SIZE(mapping));
-
- /*
- * We can have up to 12 interrupts cascading to the core controller.
- * These cascades can be from the combined interrupt sources or for
- * individual interrupt sources. The "interrupts" property only
- * deals with the cascaded combined interrupts. The individual
- * interrupts muxed to the core controller use the core controller
- * as their interrupt parent.
- */
- for (i = 0; i < NR_COMBINERS; i++) {
- struct irq_data *irq_data;
- irq_hw_number_t hwirq;
-
- irq = irq_of_parse_and_map(np, i);
- if (irq == NO_IRQ)
- continue;
-
- irq_data = irq_get_irq_data(irq);
- if (!irq_data) {
- pr_err("%pOF: combiner-%d no irq_data for virq %d!\n",
- np, i, irq);
- continue;
- }
-
- hwirq = irq_data->hwirq;
-
- /*
- * Check that device tree provided something in the range
- * of the core priority interrupts (4 - 15).
- */
- if (hwirq < 4 || hwirq >= NR_PRIORITY_IRQS) {
- pr_err("%pOF: combiner-%d core irq %ld out of range!\n",
- np, i, hwirq);
- continue;
- }
-
- /* record the mapping */
- mapping[hwirq - 4] = i;
-
- pr_debug("%pOF: combiner-%d cascading to hwirq %ld\n",
- np, i, hwirq);
-
- cascade_data[i].pic = pic;
- cascade_data[i].index = i;
-
- /* mask and clear all events in combiner */
- soc_writel(~0, &pic->regs->evtmask[i]);
- soc_writel(~0, &pic->regs->evtclr[i]);
-
- irq_set_chained_handler_and_data(irq, megamod_irq_cascade,
- &cascade_data[i]);
- }
-
- /* Finally, set up the MUX registers */
- for (i = 0; i < NR_MUX_OUTPUTS; i++) {
- if (mapping[i] != IRQ_UNMAPPED) {
- pr_debug("%pOF: setting mux %d to priority %d\n",
- np, mapping[i], i + 4);
- set_megamod_mux(pic, mapping[i], i);
- }
- }
-
- return pic;
-
-error_free:
- kfree(pic);
-
- return NULL;
-}
-
-/*
- * Return next active event after ACK'ing it.
- * Return -1 if no events active.
- */
-static int get_exception(void)
-{
- int i, bit;
- u32 mask;
-
- for (i = 0; i < NR_COMBINERS; i++) {
- mask = soc_readl(&mm_pic->regs->mexpflag[i]);
- if (mask) {
- bit = __ffs(mask);
- soc_writel(1 << bit, &mm_pic->regs->evtclr[i]);
- return (i * 32) + bit;
- }
- }
- return -1;
-}
-
-static void assert_event(unsigned int val)
-{
- soc_writel(val, &mm_pic->regs->evtasrt);
-}
-
-void __init megamod_pic_init(void)
-{
- struct device_node *np;
-
- np = of_find_compatible_node(NULL, NULL, "ti,c64x+megamod-pic");
- if (!np)
- return;
-
- mm_pic = init_megamod_pic(np);
- of_node_put(np);
-
- soc_ops.get_exception = get_exception;
- soc_ops.assert_event = assert_event;
-
- return;
-}
diff --git a/arch/c6x/platforms/pll.c b/arch/c6x/platforms/pll.c
deleted file mode 100644
index 6fdf20d64dc7..000000000000
--- a/arch/c6x/platforms/pll.c
+++ /dev/null
@@ -1,440 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Clock and PLL control for C64x+ devices
- *
- * Copyright (C) 2010, 2011 Texas Instruments.
- * Contributed by: Mark Salter <msalter@redhat.com>
- *
- * Copied heavily from arm/mach-davinci/clock.c, so:
- *
- * Copyright (C) 2006-2007 Texas Instruments.
- * Copyright (C) 2008-2009 Deep Root Systems, LLC
- */
-
-#include <linux/module.h>
-#include <linux/clkdev.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/err.h>
-
-#include <asm/clock.h>
-#include <asm/soc.h>
-
-static LIST_HEAD(clocks);
-static DEFINE_MUTEX(clocks_mutex);
-static DEFINE_SPINLOCK(clockfw_lock);
-
-static void __clk_enable(struct clk *clk)
-{
- if (clk->parent)
- __clk_enable(clk->parent);
- clk->usecount++;
-}
-
-static void __clk_disable(struct clk *clk)
-{
- if (WARN_ON(clk->usecount == 0))
- return;
- --clk->usecount;
-
- if (clk->parent)
- __clk_disable(clk->parent);
-}
-
-int clk_enable(struct clk *clk)
-{
- unsigned long flags;
-
- if (clk == NULL || IS_ERR(clk))
- return -EINVAL;
-
- spin_lock_irqsave(&clockfw_lock, flags);
- __clk_enable(clk);
- spin_unlock_irqrestore(&clockfw_lock, flags);
-
- return 0;
-}
-EXPORT_SYMBOL(clk_enable);
-
-void clk_disable(struct clk *clk)
-{
- unsigned long flags;
-
- if (clk == NULL || IS_ERR(clk))
- return;
-
- spin_lock_irqsave(&clockfw_lock, flags);
- __clk_disable(clk);
- spin_unlock_irqrestore(&clockfw_lock, flags);
-}
-EXPORT_SYMBOL(clk_disable);
-
-unsigned long clk_get_rate(struct clk *clk)
-{
- if (clk == NULL || IS_ERR(clk))
- return -EINVAL;
-
- return clk->rate;
-}
-EXPORT_SYMBOL(clk_get_rate);
-
-long clk_round_rate(struct clk *clk, unsigned long rate)
-{
- if (clk == NULL || IS_ERR(clk))
- return -EINVAL;
-
- if (clk->round_rate)
- return clk->round_rate(clk, rate);
-
- return clk->rate;
-}
-EXPORT_SYMBOL(clk_round_rate);
-
-/* Propagate rate to children */
-static void propagate_rate(struct clk *root)
-{
- struct clk *clk;
-
- list_for_each_entry(clk, &root->children, childnode) {
- if (clk->recalc)
- clk->rate = clk->recalc(clk);
- propagate_rate(clk);
- }
-}
-
-int clk_set_rate(struct clk *clk, unsigned long rate)
-{
- unsigned long flags;
- int ret = -EINVAL;
-
- if (clk == NULL || IS_ERR(clk))
- return ret;
-
- if (clk->set_rate)
- ret = clk->set_rate(clk, rate);
-
- spin_lock_irqsave(&clockfw_lock, flags);
- if (ret == 0) {
- if (clk->recalc)
- clk->rate = clk->recalc(clk);
- propagate_rate(clk);
- }
- spin_unlock_irqrestore(&clockfw_lock, flags);
-
- return ret;
-}
-EXPORT_SYMBOL(clk_set_rate);
-
-int clk_set_parent(struct clk *clk, struct clk *parent)
-{
- unsigned long flags;
-
- if (clk == NULL || IS_ERR(clk))
- return -EINVAL;
-
- /* Cannot change parent on enabled clock */
- if (WARN_ON(clk->usecount))
- return -EINVAL;
-
- mutex_lock(&clocks_mutex);
- clk->parent = parent;
- list_del_init(&clk->childnode);
- list_add(&clk->childnode, &clk->parent->children);
- mutex_unlock(&clocks_mutex);
-
- spin_lock_irqsave(&clockfw_lock, flags);
- if (clk->recalc)
- clk->rate = clk->recalc(clk);
- propagate_rate(clk);
- spin_unlock_irqrestore(&clockfw_lock, flags);
-
- return 0;
-}
-EXPORT_SYMBOL(clk_set_parent);
-
-int clk_register(struct clk *clk)
-{
- if (clk == NULL || IS_ERR(clk))
- return -EINVAL;
-
- if (WARN(clk->parent && !clk->parent->rate,
- "CLK: %s parent %s has no rate!\n",
- clk->name, clk->parent->name))
- return -EINVAL;
-
- mutex_lock(&clocks_mutex);
- list_add_tail(&clk->node, &clocks);
- if (clk->parent)
- list_add_tail(&clk->childnode, &clk->parent->children);
- mutex_unlock(&clocks_mutex);
-
- /* If rate is already set, use it */
- if (clk->rate)
- return 0;
-
- /* Else, see if there is a way to calculate it */
- if (clk->recalc)
- clk->rate = clk->recalc(clk);
-
- /* Otherwise, default to parent rate */
- else if (clk->parent)
- clk->rate = clk->parent->rate;
-
- return 0;
-}
-EXPORT_SYMBOL(clk_register);
-
-void clk_unregister(struct clk *clk)
-{
- if (clk == NULL || IS_ERR(clk))
- return;
-
- mutex_lock(&clocks_mutex);
- list_del(&clk->node);
- list_del(&clk->childnode);
- mutex_unlock(&clocks_mutex);
-}
-EXPORT_SYMBOL(clk_unregister);
-
-
-static u32 pll_read(struct pll_data *pll, int reg)
-{
- return soc_readl(pll->base + reg);
-}
-
-static unsigned long clk_sysclk_recalc(struct clk *clk)
-{
- u32 v, plldiv = 0;
- struct pll_data *pll;
- unsigned long rate = clk->rate;
-
- if (WARN_ON(!clk->parent))
- return rate;
-
- rate = clk->parent->rate;
-
- /* the parent must be a PLL */
- if (WARN_ON(!clk->parent->pll_data))
- return rate;
-
- pll = clk->parent->pll_data;
-
- /* If pre-PLL, source clock is before the multiplier and divider(s) */
- if (clk->flags & PRE_PLL)
- rate = pll->input_rate;
-
- if (!clk->div) {
- pr_debug("%s: (no divider) rate = %lu KHz\n",
- clk->name, rate / 1000);
- return rate;
- }
-
- if (clk->flags & FIXED_DIV_PLL) {
- rate /= clk->div;
- pr_debug("%s: (fixed divide by %d) rate = %lu KHz\n",
- clk->name, clk->div, rate / 1000);
- return rate;
- }
-
- v = pll_read(pll, clk->div);
- if (v & PLLDIV_EN)
- plldiv = (v & PLLDIV_RATIO_MASK) + 1;
-
- if (plldiv == 0)
- plldiv = 1;
-
- rate /= plldiv;
-
- pr_debug("%s: (divide by %d) rate = %lu KHz\n",
- clk->name, plldiv, rate / 1000);
-
- return rate;
-}
-
-static unsigned long clk_leafclk_recalc(struct clk *clk)
-{
- if (WARN_ON(!clk->parent))
- return clk->rate;
-
- pr_debug("%s: (parent %s) rate = %lu KHz\n",
- clk->name, clk->parent->name, clk->parent->rate / 1000);
-
- return clk->parent->rate;
-}
-
-static unsigned long clk_pllclk_recalc(struct clk *clk)
-{
- u32 ctrl, mult = 0, prediv = 0, postdiv = 0;
- u8 bypass;
- struct pll_data *pll = clk->pll_data;
- unsigned long rate = clk->rate;
-
- if (clk->flags & FIXED_RATE_PLL)
- return rate;
-
- ctrl = pll_read(pll, PLLCTL);
- rate = pll->input_rate = clk->parent->rate;
-
- if (ctrl & PLLCTL_PLLEN)
- bypass = 0;
- else
- bypass = 1;
-
- if (pll->flags & PLL_HAS_MUL) {
- mult = pll_read(pll, PLLM);
- mult = (mult & PLLM_PLLM_MASK) + 1;
- }
- if (pll->flags & PLL_HAS_PRE) {
- prediv = pll_read(pll, PLLPRE);
- if (prediv & PLLDIV_EN)
- prediv = (prediv & PLLDIV_RATIO_MASK) + 1;
- else
- prediv = 0;
- }
- if (pll->flags & PLL_HAS_POST) {
- postdiv = pll_read(pll, PLLPOST);
- if (postdiv & PLLDIV_EN)
- postdiv = (postdiv & PLLDIV_RATIO_MASK) + 1;
- else
- postdiv = 1;
- }
-
- if (!bypass) {
- if (prediv)
- rate /= prediv;
- if (mult)
- rate *= mult;
- if (postdiv)
- rate /= postdiv;
-
- pr_debug("PLL%d: input = %luMHz, pre[%d] mul[%d] post[%d] "
- "--> %luMHz output.\n",
- pll->num, clk->parent->rate / 1000000,
- prediv, mult, postdiv, rate / 1000000);
- } else
- pr_debug("PLL%d: input = %luMHz, bypass mode.\n",
- pll->num, clk->parent->rate / 1000000);
-
- return rate;
-}
-
-
-static void __init __init_clk(struct clk *clk)
-{
- INIT_LIST_HEAD(&clk->node);
- INIT_LIST_HEAD(&clk->children);
- INIT_LIST_HEAD(&clk->childnode);
-
- if (!clk->recalc) {
-
- /* Check if clock is a PLL */
- if (clk->pll_data)
- clk->recalc = clk_pllclk_recalc;
-
- /* Else, if it is a PLL-derived clock */
- else if (clk->flags & CLK_PLL)
- clk->recalc = clk_sysclk_recalc;
-
- /* Otherwise, it is a leaf clock (PSC clock) */
- else if (clk->parent)
- clk->recalc = clk_leafclk_recalc;
- }
-}
-
-void __init c6x_clks_init(struct clk_lookup *clocks)
-{
- struct clk_lookup *c;
- struct clk *clk;
- size_t num_clocks = 0;
-
- for (c = clocks; c->clk; c++) {
- clk = c->clk;
-
- __init_clk(clk);
- clk_register(clk);
- num_clocks++;
-
- /* Turn on clocks that Linux doesn't otherwise manage */
- if (clk->flags & ALWAYS_ENABLED)
- clk_enable(clk);
- }
-
- clkdev_add_table(clocks, num_clocks);
-}
-
-#ifdef CONFIG_DEBUG_FS
-
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-
-#define CLKNAME_MAX 10 /* longest clock name */
-#define NEST_DELTA 2
-#define NEST_MAX 4
-
-static void
-dump_clock(struct seq_file *s, unsigned nest, struct clk *parent)
-{
- char *state;
- char buf[CLKNAME_MAX + NEST_DELTA * NEST_MAX];
- struct clk *clk;
- unsigned i;
-
- if (parent->flags & CLK_PLL)
- state = "pll";
- else
- state = "";
-
- /* <nest spaces> name <pad to end> */
- memset(buf, ' ', sizeof(buf) - 1);
- buf[sizeof(buf) - 1] = 0;
- i = strlen(parent->name);
- memcpy(buf + nest, parent->name,
- min(i, (unsigned)(sizeof(buf) - 1 - nest)));
-
- seq_printf(s, "%s users=%2d %-3s %9ld Hz\n",
- buf, parent->usecount, state, clk_get_rate(parent));
- /* REVISIT show device associations too */
-
- /* cost is now small, but not linear... */
- list_for_each_entry(clk, &parent->children, childnode) {
- dump_clock(s, nest + NEST_DELTA, clk);
- }
-}
-
-static int c6x_ck_show(struct seq_file *m, void *v)
-{
- struct clk *clk;
-
- /*
- * Show clock tree; We trust nonzero usecounts equate to PSC enables...
- */
- mutex_lock(&clocks_mutex);
- list_for_each_entry(clk, &clocks, node)
- if (!clk->parent)
- dump_clock(m, 0, clk);
- mutex_unlock(&clocks_mutex);
-
- return 0;
-}
-
-static int c6x_ck_open(struct inode *inode, struct file *file)
-{
- return single_open(file, c6x_ck_show, NULL);
-}
-
-static const struct file_operations c6x_ck_operations = {
- .open = c6x_ck_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int __init c6x_clk_debugfs_init(void)
-{
- debugfs_create_file("c6x_clocks", S_IFREG | S_IRUGO, NULL, NULL,
- &c6x_ck_operations);
-
- return 0;
-}
-device_initcall(c6x_clk_debugfs_init);
-#endif /* CONFIG_DEBUG_FS */
diff --git a/arch/c6x/platforms/plldata.c b/arch/c6x/platforms/plldata.c
deleted file mode 100644
index a799e04edefe..000000000000
--- a/arch/c6x/platforms/plldata.c
+++ /dev/null
@@ -1,467 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Port on Texas Instruments TMS320C6x architecture
- *
- * Copyright (C) 2011 Texas Instruments Incorporated
- * Author: Mark Salter <msalter@redhat.com>
- */
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/ioport.h>
-#include <linux/clkdev.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-
-#include <asm/clock.h>
-#include <asm/setup.h>
-#include <asm/special_insns.h>
-#include <asm/irq.h>
-
-/*
- * Common SoC clock support.
- */
-
-/* Default input for PLL1 */
-struct clk clkin1 = {
- .name = "clkin1",
- .node = LIST_HEAD_INIT(clkin1.node),
- .children = LIST_HEAD_INIT(clkin1.children),
- .childnode = LIST_HEAD_INIT(clkin1.childnode),
-};
-
-struct pll_data c6x_soc_pll1 = {
- .num = 1,
- .sysclks = {
- {
- .name = "pll1",
- .parent = &clkin1,
- .pll_data = &c6x_soc_pll1,
- .flags = CLK_PLL,
- },
- {
- .name = "pll1_sysclk1",
- .parent = &c6x_soc_pll1.sysclks[0],
- .flags = CLK_PLL,
- },
- {
- .name = "pll1_sysclk2",
- .parent = &c6x_soc_pll1.sysclks[0],
- .flags = CLK_PLL,
- },
- {
- .name = "pll1_sysclk3",
- .parent = &c6x_soc_pll1.sysclks[0],
- .flags = CLK_PLL,
- },
- {
- .name = "pll1_sysclk4",
- .parent = &c6x_soc_pll1.sysclks[0],
- .flags = CLK_PLL,
- },
- {
- .name = "pll1_sysclk5",
- .parent = &c6x_soc_pll1.sysclks[0],
- .flags = CLK_PLL,
- },
- {
- .name = "pll1_sysclk6",
- .parent = &c6x_soc_pll1.sysclks[0],
- .flags = CLK_PLL,
- },
- {
- .name = "pll1_sysclk7",
- .parent = &c6x_soc_pll1.sysclks[0],
- .flags = CLK_PLL,
- },
- {
- .name = "pll1_sysclk8",
- .parent = &c6x_soc_pll1.sysclks[0],
- .flags = CLK_PLL,
- },
- {
- .name = "pll1_sysclk9",
- .parent = &c6x_soc_pll1.sysclks[0],
- .flags = CLK_PLL,
- },
- {
- .name = "pll1_sysclk10",
- .parent = &c6x_soc_pll1.sysclks[0],
- .flags = CLK_PLL,
- },
- {
- .name = "pll1_sysclk11",
- .parent = &c6x_soc_pll1.sysclks[0],
- .flags = CLK_PLL,
- },
- {
- .name = "pll1_sysclk12",
- .parent = &c6x_soc_pll1.sysclks[0],
- .flags = CLK_PLL,
- },
- {
- .name = "pll1_sysclk13",
- .parent = &c6x_soc_pll1.sysclks[0],
- .flags = CLK_PLL,
- },
- {
- .name = "pll1_sysclk14",
- .parent = &c6x_soc_pll1.sysclks[0],
- .flags = CLK_PLL,
- },
- {
- .name = "pll1_sysclk15",
- .parent = &c6x_soc_pll1.sysclks[0],
- .flags = CLK_PLL,
- },
- {
- .name = "pll1_sysclk16",
- .parent = &c6x_soc_pll1.sysclks[0],
- .flags = CLK_PLL,
- },
- },
-};
-
-/* CPU core clock */
-struct clk c6x_core_clk = {
- .name = "core",
-};
-
-/* miscellaneous IO clocks */
-struct clk c6x_i2c_clk = {
- .name = "i2c",
-};
-
-struct clk c6x_watchdog_clk = {
- .name = "watchdog",
-};
-
-struct clk c6x_mcbsp1_clk = {
- .name = "mcbsp1",
-};
-
-struct clk c6x_mcbsp2_clk = {
- .name = "mcbsp2",
-};
-
-struct clk c6x_mdio_clk = {
- .name = "mdio",
-};
-
-
-#ifdef CONFIG_SOC_TMS320C6455
-static struct clk_lookup c6455_clks[] = {
- CLK(NULL, "pll1", &c6x_soc_pll1.sysclks[0]),
- CLK(NULL, "pll1_sysclk2", &c6x_soc_pll1.sysclks[2]),
- CLK(NULL, "pll1_sysclk3", &c6x_soc_pll1.sysclks[3]),
- CLK(NULL, "pll1_sysclk4", &c6x_soc_pll1.sysclks[4]),
- CLK(NULL, "pll1_sysclk5", &c6x_soc_pll1.sysclks[5]),
- CLK(NULL, "core", &c6x_core_clk),
- CLK("i2c_davinci.1", NULL, &c6x_i2c_clk),
- CLK("watchdog", NULL, &c6x_watchdog_clk),
- CLK("2c81800.mdio", NULL, &c6x_mdio_clk),
- CLK("", NULL, NULL)
-};
-
-
-static void __init c6455_setup_clocks(struct device_node *node)
-{
- struct pll_data *pll = &c6x_soc_pll1;
- struct clk *sysclks = pll->sysclks;
-
- pll->flags = PLL_HAS_PRE | PLL_HAS_MUL;
-
- sysclks[2].flags |= FIXED_DIV_PLL;
- sysclks[2].div = 3;
- sysclks[3].flags |= FIXED_DIV_PLL;
- sysclks[3].div = 6;
- sysclks[4].div = PLLDIV4;
- sysclks[5].div = PLLDIV5;
-
- c6x_core_clk.parent = &sysclks[0];
- c6x_i2c_clk.parent = &sysclks[3];
- c6x_watchdog_clk.parent = &sysclks[3];
- c6x_mdio_clk.parent = &sysclks[3];
-
- c6x_clks_init(c6455_clks);
-}
-#endif /* CONFIG_SOC_TMS320C6455 */
-
-#ifdef CONFIG_SOC_TMS320C6457
-static struct clk_lookup c6457_clks[] = {
- CLK(NULL, "pll1", &c6x_soc_pll1.sysclks[0]),
- CLK(NULL, "pll1_sysclk1", &c6x_soc_pll1.sysclks[1]),
- CLK(NULL, "pll1_sysclk2", &c6x_soc_pll1.sysclks[2]),
- CLK(NULL, "pll1_sysclk3", &c6x_soc_pll1.sysclks[3]),
- CLK(NULL, "pll1_sysclk4", &c6x_soc_pll1.sysclks[4]),
- CLK(NULL, "pll1_sysclk5", &c6x_soc_pll1.sysclks[5]),
- CLK(NULL, "core", &c6x_core_clk),
- CLK("i2c_davinci.1", NULL, &c6x_i2c_clk),
- CLK("watchdog", NULL, &c6x_watchdog_clk),
- CLK("2c81800.mdio", NULL, &c6x_mdio_clk),
- CLK("", NULL, NULL)
-};
-
-static void __init c6457_setup_clocks(struct device_node *node)
-{
- struct pll_data *pll = &c6x_soc_pll1;
- struct clk *sysclks = pll->sysclks;
-
- pll->flags = PLL_HAS_MUL | PLL_HAS_POST;
-
- sysclks[1].flags |= FIXED_DIV_PLL;
- sysclks[1].div = 1;
- sysclks[2].flags |= FIXED_DIV_PLL;
- sysclks[2].div = 3;
- sysclks[3].flags |= FIXED_DIV_PLL;
- sysclks[3].div = 6;
- sysclks[4].div = PLLDIV4;
- sysclks[5].div = PLLDIV5;
-
- c6x_core_clk.parent = &sysclks[1];
- c6x_i2c_clk.parent = &sysclks[3];
- c6x_watchdog_clk.parent = &sysclks[5];
- c6x_mdio_clk.parent = &sysclks[5];
-
- c6x_clks_init(c6457_clks);
-}
-#endif /* CONFIG_SOC_TMS320C6455 */
-
-#ifdef CONFIG_SOC_TMS320C6472
-static struct clk_lookup c6472_clks[] = {
- CLK(NULL, "pll1", &c6x_soc_pll1.sysclks[0]),
- CLK(NULL, "pll1_sysclk1", &c6x_soc_pll1.sysclks[1]),
- CLK(NULL, "pll1_sysclk2", &c6x_soc_pll1.sysclks[2]),
- CLK(NULL, "pll1_sysclk3", &c6x_soc_pll1.sysclks[3]),
- CLK(NULL, "pll1_sysclk4", &c6x_soc_pll1.sysclks[4]),
- CLK(NULL, "pll1_sysclk5", &c6x_soc_pll1.sysclks[5]),
- CLK(NULL, "pll1_sysclk6", &c6x_soc_pll1.sysclks[6]),
- CLK(NULL, "pll1_sysclk7", &c6x_soc_pll1.sysclks[7]),
- CLK(NULL, "pll1_sysclk8", &c6x_soc_pll1.sysclks[8]),
- CLK(NULL, "pll1_sysclk9", &c6x_soc_pll1.sysclks[9]),
- CLK(NULL, "pll1_sysclk10", &c6x_soc_pll1.sysclks[10]),
- CLK(NULL, "core", &c6x_core_clk),
- CLK("i2c_davinci.1", NULL, &c6x_i2c_clk),
- CLK("watchdog", NULL, &c6x_watchdog_clk),
- CLK("2c81800.mdio", NULL, &c6x_mdio_clk),
- CLK("", NULL, NULL)
-};
-
-/* assumptions used for delay loop calculations */
-#define MIN_CLKIN1_KHz 15625
-#define MAX_CORE_KHz 700000
-#define MIN_PLLOUT_KHz MIN_CLKIN1_KHz
-
-static void __init c6472_setup_clocks(struct device_node *node)
-{
- struct pll_data *pll = &c6x_soc_pll1;
- struct clk *sysclks = pll->sysclks;
- int i;
-
- pll->flags = PLL_HAS_MUL;
-
- for (i = 1; i <= 6; i++) {
- sysclks[i].flags |= FIXED_DIV_PLL;
- sysclks[i].div = 1;
- }
-
- sysclks[7].flags |= FIXED_DIV_PLL;
- sysclks[7].div = 3;
- sysclks[8].flags |= FIXED_DIV_PLL;
- sysclks[8].div = 6;
- sysclks[9].flags |= FIXED_DIV_PLL;
- sysclks[9].div = 2;
- sysclks[10].div = PLLDIV10;
-
- c6x_core_clk.parent = &sysclks[get_coreid() + 1];
- c6x_i2c_clk.parent = &sysclks[8];
- c6x_watchdog_clk.parent = &sysclks[8];
- c6x_mdio_clk.parent = &sysclks[5];
-
- c6x_clks_init(c6472_clks);
-}
-#endif /* CONFIG_SOC_TMS320C6472 */
-
-
-#ifdef CONFIG_SOC_TMS320C6474
-static struct clk_lookup c6474_clks[] = {
- CLK(NULL, "pll1", &c6x_soc_pll1.sysclks[0]),
- CLK(NULL, "pll1_sysclk7", &c6x_soc_pll1.sysclks[7]),
- CLK(NULL, "pll1_sysclk9", &c6x_soc_pll1.sysclks[9]),
- CLK(NULL, "pll1_sysclk10", &c6x_soc_pll1.sysclks[10]),
- CLK(NULL, "pll1_sysclk11", &c6x_soc_pll1.sysclks[11]),
- CLK(NULL, "pll1_sysclk12", &c6x_soc_pll1.sysclks[12]),
- CLK(NULL, "pll1_sysclk13", &c6x_soc_pll1.sysclks[13]),
- CLK(NULL, "core", &c6x_core_clk),
- CLK("i2c_davinci.1", NULL, &c6x_i2c_clk),
- CLK("mcbsp.1", NULL, &c6x_mcbsp1_clk),
- CLK("mcbsp.2", NULL, &c6x_mcbsp2_clk),
- CLK("watchdog", NULL, &c6x_watchdog_clk),
- CLK("2c81800.mdio", NULL, &c6x_mdio_clk),
- CLK("", NULL, NULL)
-};
-
-static void __init c6474_setup_clocks(struct device_node *node)
-{
- struct pll_data *pll = &c6x_soc_pll1;
- struct clk *sysclks = pll->sysclks;
-
- pll->flags = PLL_HAS_MUL;
-
- sysclks[7].flags |= FIXED_DIV_PLL;
- sysclks[7].div = 1;
- sysclks[9].flags |= FIXED_DIV_PLL;
- sysclks[9].div = 3;
- sysclks[10].flags |= FIXED_DIV_PLL;
- sysclks[10].div = 6;
-
- sysclks[11].div = PLLDIV11;
-
- sysclks[12].flags |= FIXED_DIV_PLL;
- sysclks[12].div = 2;
-
- sysclks[13].div = PLLDIV13;
-
- c6x_core_clk.parent = &sysclks[7];
- c6x_i2c_clk.parent = &sysclks[10];
- c6x_watchdog_clk.parent = &sysclks[10];
- c6x_mcbsp1_clk.parent = &sysclks[10];
- c6x_mcbsp2_clk.parent = &sysclks[10];
-
- c6x_clks_init(c6474_clks);
-}
-#endif /* CONFIG_SOC_TMS320C6474 */
-
-#ifdef CONFIG_SOC_TMS320C6678
-static struct clk_lookup c6678_clks[] = {
- CLK(NULL, "pll1", &c6x_soc_pll1.sysclks[0]),
- CLK(NULL, "pll1_refclk", &c6x_soc_pll1.sysclks[1]),
- CLK(NULL, "pll1_sysclk2", &c6x_soc_pll1.sysclks[2]),
- CLK(NULL, "pll1_sysclk3", &c6x_soc_pll1.sysclks[3]),
- CLK(NULL, "pll1_sysclk4", &c6x_soc_pll1.sysclks[4]),
- CLK(NULL, "pll1_sysclk5", &c6x_soc_pll1.sysclks[5]),
- CLK(NULL, "pll1_sysclk6", &c6x_soc_pll1.sysclks[6]),
- CLK(NULL, "pll1_sysclk7", &c6x_soc_pll1.sysclks[7]),
- CLK(NULL, "pll1_sysclk8", &c6x_soc_pll1.sysclks[8]),
- CLK(NULL, "pll1_sysclk9", &c6x_soc_pll1.sysclks[9]),
- CLK(NULL, "pll1_sysclk10", &c6x_soc_pll1.sysclks[10]),
- CLK(NULL, "pll1_sysclk11", &c6x_soc_pll1.sysclks[11]),
- CLK(NULL, "core", &c6x_core_clk),
- CLK("", NULL, NULL)
-};
-
-static void __init c6678_setup_clocks(struct device_node *node)
-{
- struct pll_data *pll = &c6x_soc_pll1;
- struct clk *sysclks = pll->sysclks;
-
- pll->flags = PLL_HAS_MUL;
-
- sysclks[1].flags |= FIXED_DIV_PLL;
- sysclks[1].div = 1;
-
- sysclks[2].div = PLLDIV2;
-
- sysclks[3].flags |= FIXED_DIV_PLL;
- sysclks[3].div = 2;
-
- sysclks[4].flags |= FIXED_DIV_PLL;
- sysclks[4].div = 3;
-
- sysclks[5].div = PLLDIV5;
-
- sysclks[6].flags |= FIXED_DIV_PLL;
- sysclks[6].div = 64;
-
- sysclks[7].flags |= FIXED_DIV_PLL;
- sysclks[7].div = 6;
-
- sysclks[8].div = PLLDIV8;
-
- sysclks[9].flags |= FIXED_DIV_PLL;
- sysclks[9].div = 12;
-
- sysclks[10].flags |= FIXED_DIV_PLL;
- sysclks[10].div = 3;
-
- sysclks[11].flags |= FIXED_DIV_PLL;
- sysclks[11].div = 6;
-
- c6x_core_clk.parent = &sysclks[0];
- c6x_i2c_clk.parent = &sysclks[7];
-
- c6x_clks_init(c6678_clks);
-}
-#endif /* CONFIG_SOC_TMS320C6678 */
-
-static struct of_device_id c6x_clkc_match[] __initdata = {
-#ifdef CONFIG_SOC_TMS320C6455
- { .compatible = "ti,c6455-pll", .data = c6455_setup_clocks },
-#endif
-#ifdef CONFIG_SOC_TMS320C6457
- { .compatible = "ti,c6457-pll", .data = c6457_setup_clocks },
-#endif
-#ifdef CONFIG_SOC_TMS320C6472
- { .compatible = "ti,c6472-pll", .data = c6472_setup_clocks },
-#endif
-#ifdef CONFIG_SOC_TMS320C6474
- { .compatible = "ti,c6474-pll", .data = c6474_setup_clocks },
-#endif
-#ifdef CONFIG_SOC_TMS320C6678
- { .compatible = "ti,c6678-pll", .data = c6678_setup_clocks },
-#endif
- { .compatible = "ti,c64x+pll" },
- {}
-};
-
-void __init c64x_setup_clocks(void)
-{
- void (*__setup_clocks)(struct device_node *np);
- struct pll_data *pll = &c6x_soc_pll1;
- struct device_node *node;
- const struct of_device_id *id;
- int err;
- u32 val;
-
- node = of_find_matching_node(NULL, c6x_clkc_match);
- if (!node)
- return;
-
- pll->base = of_iomap(node, 0);
- if (!pll->base)
- goto out;
-
- err = of_property_read_u32(node, "clock-frequency", &val);
- if (err || val == 0) {
- pr_err("%pOF: no clock-frequency found! Using %dMHz\n",
- node, (int)val / 1000000);
- val = 25000000;
- }
- clkin1.rate = val;
-
- err = of_property_read_u32(node, "ti,c64x+pll-bypass-delay", &val);
- if (err)
- val = 5000;
- pll->bypass_delay = val;
-
- err = of_property_read_u32(node, "ti,c64x+pll-reset-delay", &val);
- if (err)
- val = 30000;
- pll->reset_delay = val;
-
- err = of_property_read_u32(node, "ti,c64x+pll-lock-delay", &val);
- if (err)
- val = 30000;
- pll->lock_delay = val;
-
- /* id->data is a pointer to SoC-specific setup */
- id = of_match_node(c6x_clkc_match, node);
- if (id && id->data) {
- __setup_clocks = id->data;
- __setup_clocks(node);
- }
-
-out:
- of_node_put(node);
-}
diff --git a/arch/c6x/platforms/timer64.c b/arch/c6x/platforms/timer64.c
deleted file mode 100644
index 661f4c7c6ef6..000000000000
--- a/arch/c6x/platforms/timer64.c
+++ /dev/null
@@ -1,241 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2010, 2011 Texas Instruments Incorporated
- * Contributed by: Mark Salter (msalter@redhat.com)
- */
-
-#include <linux/clockchips.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/of_irq.h>
-#include <linux/of_address.h>
-#include <asm/soc.h>
-#include <asm/dscr.h>
-#include <asm/special_insns.h>
-#include <asm/timer64.h>
-
-struct timer_regs {
- u32 reserved0;
- u32 emumgt;
- u32 reserved1;
- u32 reserved2;
- u32 cntlo;
- u32 cnthi;
- u32 prdlo;
- u32 prdhi;
- u32 tcr;
- u32 tgcr;
- u32 wdtcr;
-};
-
-static struct timer_regs __iomem *timer;
-
-#define TCR_TSTATLO 0x001
-#define TCR_INVOUTPLO 0x002
-#define TCR_INVINPLO 0x004
-#define TCR_CPLO 0x008
-#define TCR_ENAMODELO_ONCE 0x040
-#define TCR_ENAMODELO_CONT 0x080
-#define TCR_ENAMODELO_MASK 0x0c0
-#define TCR_PWIDLO_MASK 0x030
-#define TCR_CLKSRCLO 0x100
-#define TCR_TIENLO 0x200
-#define TCR_TSTATHI (0x001 << 16)
-#define TCR_INVOUTPHI (0x002 << 16)
-#define TCR_CPHI (0x008 << 16)
-#define TCR_PWIDHI_MASK (0x030 << 16)
-#define TCR_ENAMODEHI_ONCE (0x040 << 16)
-#define TCR_ENAMODEHI_CONT (0x080 << 16)
-#define TCR_ENAMODEHI_MASK (0x0c0 << 16)
-
-#define TGCR_TIMLORS 0x001
-#define TGCR_TIMHIRS 0x002
-#define TGCR_TIMMODE_UD32 0x004
-#define TGCR_TIMMODE_WDT64 0x008
-#define TGCR_TIMMODE_CD32 0x00c
-#define TGCR_TIMMODE_MASK 0x00c
-#define TGCR_PSCHI_MASK (0x00f << 8)
-#define TGCR_TDDRHI_MASK (0x00f << 12)
-
-/*
- * Timer clocks are divided down from the CPU clock
- * The divisor is in the EMUMGTCLKSPD register
- */
-#define TIMER_DIVISOR \
- ((soc_readl(&timer->emumgt) & (0xf << 16)) >> 16)
-
-#define TIMER64_RATE (c6x_core_freq / TIMER_DIVISOR)
-
-#define TIMER64_MODE_DISABLED 0
-#define TIMER64_MODE_ONE_SHOT TCR_ENAMODELO_ONCE
-#define TIMER64_MODE_PERIODIC TCR_ENAMODELO_CONT
-
-static int timer64_mode;
-static int timer64_devstate_id = -1;
-
-static void timer64_config(unsigned long period)
-{
- u32 tcr = soc_readl(&timer->tcr) & ~TCR_ENAMODELO_MASK;
-
- soc_writel(tcr, &timer->tcr);
- soc_writel(period - 1, &timer->prdlo);
- soc_writel(0, &timer->cntlo);
- tcr |= timer64_mode;
- soc_writel(tcr, &timer->tcr);
-}
-
-static void timer64_enable(void)
-{
- u32 val;
-
- if (timer64_devstate_id >= 0)
- dscr_set_devstate(timer64_devstate_id, DSCR_DEVSTATE_ENABLED);
-
- /* disable timer, reset count */
- soc_writel(soc_readl(&timer->tcr) & ~TCR_ENAMODELO_MASK, &timer->tcr);
- soc_writel(0, &timer->prdlo);
-
- /* use internal clock and 1 cycle pulse width */
- val = soc_readl(&timer->tcr);
- soc_writel(val & ~(TCR_CLKSRCLO | TCR_PWIDLO_MASK), &timer->tcr);
-
- /* dual 32-bit unchained mode */
- val = soc_readl(&timer->tgcr) & ~TGCR_TIMMODE_MASK;
- soc_writel(val, &timer->tgcr);
- soc_writel(val | (TGCR_TIMLORS | TGCR_TIMMODE_UD32), &timer->tgcr);
-}
-
-static void timer64_disable(void)
-{
- /* disable timer, reset count */
- soc_writel(soc_readl(&timer->tcr) & ~TCR_ENAMODELO_MASK, &timer->tcr);
- soc_writel(0, &timer->prdlo);
-
- if (timer64_devstate_id >= 0)
- dscr_set_devstate(timer64_devstate_id, DSCR_DEVSTATE_DISABLED);
-}
-
-static int next_event(unsigned long delta,
- struct clock_event_device *evt)
-{
- timer64_config(delta);
- return 0;
-}
-
-static int set_periodic(struct clock_event_device *evt)
-{
- timer64_enable();
- timer64_mode = TIMER64_MODE_PERIODIC;
- timer64_config(TIMER64_RATE / HZ);
- return 0;
-}
-
-static int set_oneshot(struct clock_event_device *evt)
-{
- timer64_enable();
- timer64_mode = TIMER64_MODE_ONE_SHOT;
- return 0;
-}
-
-static int shutdown(struct clock_event_device *evt)
-{
- timer64_mode = TIMER64_MODE_DISABLED;
- timer64_disable();
- return 0;
-}
-
-static struct clock_event_device t64_clockevent_device = {
- .name = "TIMER64_EVT32_TIMER",
- .features = CLOCK_EVT_FEAT_ONESHOT |
- CLOCK_EVT_FEAT_PERIODIC,
- .rating = 200,
- .set_state_shutdown = shutdown,
- .set_state_periodic = set_periodic,
- .set_state_oneshot = set_oneshot,
- .set_next_event = next_event,
-};
-
-static irqreturn_t timer_interrupt(int irq, void *dev_id)
-{
- struct clock_event_device *cd = &t64_clockevent_device;
-
- cd->event_handler(cd);
-
- return IRQ_HANDLED;
-}
-
-void __init timer64_init(void)
-{
- struct clock_event_device *cd = &t64_clockevent_device;
- struct device_node *np, *first = NULL;
- u32 val;
- int err, found = 0;
-
- for_each_compatible_node(np, NULL, "ti,c64x+timer64") {
- err = of_property_read_u32(np, "ti,core-mask", &val);
- if (!err) {
- if (val & (1 << get_coreid())) {
- found = 1;
- break;
- }
- } else if (!first)
- first = np;
- }
- if (!found) {
- /* try first one with no core-mask */
- if (first)
- np = of_node_get(first);
- else {
- pr_debug("Cannot find ti,c64x+timer64 timer.\n");
- return;
- }
- }
-
- timer = of_iomap(np, 0);
- if (!timer) {
- pr_debug("%pOF: Cannot map timer registers.\n", np);
- goto out;
- }
- pr_debug("%pOF: Timer registers=%p.\n", np, timer);
-
- cd->irq = irq_of_parse_and_map(np, 0);
- if (cd->irq == NO_IRQ) {
- pr_debug("%pOF: Cannot find interrupt.\n", np);
- iounmap(timer);
- goto out;
- }
-
- /* If there is a device state control, save the ID. */
- err = of_property_read_u32(np, "ti,dscr-dev-enable", &val);
- if (!err) {
- timer64_devstate_id = val;
-
- /*
- * It is necessary to enable the timer block here because
- * the TIMER_DIVISOR macro needs to read a timer register
- * to get the divisor.
- */
- dscr_set_devstate(timer64_devstate_id, DSCR_DEVSTATE_ENABLED);
- }
-
- pr_debug("%pOF: Timer irq=%d.\n", np, cd->irq);
-
- clockevents_calc_mult_shift(cd, c6x_core_freq / TIMER_DIVISOR, 5);
-
- cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
- cd->max_delta_ticks = 0x7fffffff;
- cd->min_delta_ns = clockevent_delta2ns(250, cd);
- cd->min_delta_ticks = 250;
-
- cd->cpumask = cpumask_of(smp_processor_id());
-
- clockevents_register_device(cd);
- if (request_irq(cd->irq, timer_interrupt, IRQF_TIMER, "timer",
- &t64_clockevent_device))
- pr_err("Failed to request irq %d (timer)\n", cd->irq);
-
-out:
- of_node_put(np);
- return;
-}
diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig
index 89dd2fcf38fa..34e91224adc3 100644
--- a/arch/csky/Kconfig
+++ b/arch/csky/Kconfig
@@ -7,7 +7,7 @@ config CSKY
select ARCH_HAS_SYNC_DMA_FOR_CPU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select ARCH_USE_BUILTIN_BSWAP
- select ARCH_USE_QUEUED_RWLOCKS if NR_CPUS>2
+ select ARCH_USE_QUEUED_RWLOCKS
select ARCH_WANT_FRAME_POINTERS if !CPU_CK610
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
select COMMON_CLK
@@ -35,6 +35,9 @@ config CSKY
select GENERIC_IRQ_MULTI_HANDLER
select GENERIC_SCHED_CLOCK
select GENERIC_SMP_IDLE_THREAD
+ select GENERIC_TIME_VSYSCALL
+ select GENERIC_VDSO_32
+ select GENERIC_GETTIMEOFDAY
select GX6605S_TIMER if CPU_CK610
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_AUDITSYSCALL
@@ -43,11 +46,14 @@ config CSKY
select HAVE_CONTEXT_TRACKING
select HAVE_VIRT_CPU_ACCOUNTING_GEN
select HAVE_DEBUG_BUGVERBOSE
+ select HAVE_DEBUG_KMEMLEAK
select HAVE_DYNAMIC_FTRACE
select HAVE_DYNAMIC_FTRACE_WITH_REGS
+ select HAVE_GENERIC_VDSO
select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_ERROR_INJECTION
+ select HAVE_FUTEX_CMPXCHG if FUTEX && SMP
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZO
@@ -192,6 +198,22 @@ config CPU_CK860
endchoice
choice
+ prompt "PAGE OFFSET"
+ default PAGE_OFFSET_80000000
+
+config PAGE_OFFSET_80000000
+ bool "PAGE OFFSET 2G (user:kernel = 2:2)"
+
+config PAGE_OFFSET_A0000000
+ bool "PAGE OFFSET 2.5G (user:kernel = 2.5:1.5)"
+endchoice
+
+config PAGE_OFFSET
+ hex
+ default 0x80000000 if PAGE_OFFSET_80000000
+ default 0xa0000000 if PAGE_OFFSET_A0000000
+choice
+
prompt "C-SKY PMU type"
depends on PERF_EVENTS
depends on CPU_CK807 || CPU_CK810 || CPU_CK860
diff --git a/arch/csky/abiv1/inc/abi/cacheflush.h b/arch/csky/abiv1/inc/abi/cacheflush.h
index d3e04208d53c..6cab7afae962 100644
--- a/arch/csky/abiv1/inc/abi/cacheflush.h
+++ b/arch/csky/abiv1/inc/abi/cacheflush.h
@@ -1,5 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
-// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#ifndef __ABI_CSKY_CACHEFLUSH_H
#define __ABI_CSKY_CACHEFLUSH_H
diff --git a/arch/csky/abiv1/inc/abi/ckmmu.h b/arch/csky/abiv1/inc/abi/ckmmu.h
index ba8eb5870835..416b30c57983 100644
--- a/arch/csky/abiv1/inc/abi/ckmmu.h
+++ b/arch/csky/abiv1/inc/abi/ckmmu.h
@@ -1,5 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
-// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#ifndef __ASM_CSKY_CKMMUV1_H
#define __ASM_CSKY_CKMMUV1_H
@@ -89,13 +88,14 @@ static inline void tlb_invalid_indexed(void)
cpwcr("cpcr8", 0x02000000);
}
-static inline void setup_pgd(unsigned long pgd, bool kernel)
+static inline void setup_pgd(pgd_t *pgd, int asid)
{
- cpwcr("cpcr29", pgd | BIT(0));
+ cpwcr("cpcr29", __pa(pgd) | BIT(0));
+ write_mmu_entryhi(asid);
}
-static inline unsigned long get_pgd(void)
+static inline pgd_t *get_pgd(void)
{
- return cprcr("cpcr29") & ~BIT(0);
+ return __va(cprcr("cpcr29") & ~BIT(0));
}
#endif /* __ASM_CSKY_CKMMUV1_H */
diff --git a/arch/csky/abiv1/inc/abi/entry.h b/arch/csky/abiv1/inc/abi/entry.h
index 13c23e2c707c..b6a2109b895e 100644
--- a/arch/csky/abiv1/inc/abi/entry.h
+++ b/arch/csky/abiv1/inc/abi/entry.h
@@ -1,5 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
-// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#ifndef __ASM_CSKY_ENTRY_H
#define __ASM_CSKY_ENTRY_H
diff --git a/arch/csky/abiv1/inc/abi/page.h b/arch/csky/abiv1/inc/abi/page.h
index c864519117c7..2d2159933b76 100644
--- a/arch/csky/abiv1/inc/abi/page.h
+++ b/arch/csky/abiv1/inc/abi/page.h
@@ -1,5 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
-// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#include <asm/shmparam.h>
diff --git a/arch/csky/abiv1/inc/abi/pgtable-bits.h b/arch/csky/abiv1/inc/abi/pgtable-bits.h
index d605445aad9a..752c8b3f9194 100644
--- a/arch/csky/abiv1/inc/abi/pgtable-bits.h
+++ b/arch/csky/abiv1/inc/abi/pgtable-bits.h
@@ -1,37 +1,49 @@
/* SPDX-License-Identifier: GPL-2.0 */
-// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#ifndef __ASM_CSKY_PGTABLE_BITS_H
#define __ASM_CSKY_PGTABLE_BITS_H
/* implemented in software */
-#define _PAGE_ACCESSED (1<<3)
-#define PAGE_ACCESSED_BIT (3)
-
+#define _PAGE_PRESENT (1<<0)
#define _PAGE_READ (1<<1)
#define _PAGE_WRITE (1<<2)
-#define _PAGE_PRESENT (1<<0)
-
+#define _PAGE_ACCESSED (1<<3)
#define _PAGE_MODIFIED (1<<4)
-#define PAGE_MODIFIED_BIT (4)
/* implemented in hardware */
#define _PAGE_GLOBAL (1<<6)
-
#define _PAGE_VALID (1<<7)
-#define PAGE_VALID_BIT (7)
-
#define _PAGE_DIRTY (1<<8)
-#define PAGE_DIRTY_BIT (8)
#define _PAGE_CACHE (3<<9)
#define _PAGE_UNCACHE (2<<9)
#define _PAGE_SO _PAGE_UNCACHE
-
#define _CACHE_MASK (7<<9)
-#define _CACHE_CACHED (_PAGE_VALID | _PAGE_CACHE)
-#define _CACHE_UNCACHED (_PAGE_VALID | _PAGE_UNCACHE)
+#define _CACHE_CACHED _PAGE_CACHE
+#define _CACHE_UNCACHED _PAGE_UNCACHE
+
+#define _PAGE_PROT_NONE _PAGE_READ
+
+/*
+ * Encode and decode a swap entry
+ *
+ * Format of swap PTE:
+ * bit 0: _PAGE_PRESENT (zero)
+ * bit 1: _PAGE_READ (zero)
+ * bit 2 - 5: swap type[0 - 3]
+ * bit 6: _PAGE_GLOBAL (zero)
+ * bit 7: _PAGE_VALID (zero)
+ * bit 8: swap type[4]
+ * bit 9 - 31: swap offset
+ */
+#define __swp_type(x) ((((x).val >> 2) & 0xf) | \
+ (((x).val >> 4) & 0x10))
+#define __swp_offset(x) ((x).val >> 9)
+#define __swp_entry(type, offset) ((swp_entry_t) { \
+ ((type & 0xf) << 2) | \
+ ((type & 0x10) << 4) | \
+ ((offset) << 9)})
#define HAVE_ARCH_UNMAPPED_AREA
diff --git a/arch/csky/abiv1/inc/abi/reg_ops.h b/arch/csky/abiv1/inc/abi/reg_ops.h
index a153bd3918f7..abd01a243388 100644
--- a/arch/csky/abiv1/inc/abi/reg_ops.h
+++ b/arch/csky/abiv1/inc/abi/reg_ops.h
@@ -1,5 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
-// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#ifndef __ABI_REG_OPS_H
#define __ABI_REG_OPS_H
diff --git a/arch/csky/abiv1/inc/abi/regdef.h b/arch/csky/abiv1/inc/abi/regdef.h
index 104707fbdcc1..7b386fd67070 100644
--- a/arch/csky/abiv1/inc/abi/regdef.h
+++ b/arch/csky/abiv1/inc/abi/regdef.h
@@ -1,10 +1,14 @@
/* SPDX-License-Identifier: GPL-2.0 */
-// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#ifndef __ASM_CSKY_REGDEF_H
#define __ASM_CSKY_REGDEF_H
+#ifdef __ASSEMBLY__
#define syscallid r1
+#else
+#define syscallid "r1"
+#endif
+
#define regs_syscallid(regs) regs->regs[9]
#define regs_fp(regs) regs->regs[2]
diff --git a/arch/csky/abiv1/inc/abi/string.h b/arch/csky/abiv1/inc/abi/string.h
index 0cd43384f8d2..9d95594b0feb 100644
--- a/arch/csky/abiv1/inc/abi/string.h
+++ b/arch/csky/abiv1/inc/abi/string.h
@@ -1,5 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
-// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#ifndef __ABI_CSKY_STRING_H
#define __ABI_CSKY_STRING_H
diff --git a/arch/csky/abiv1/inc/abi/switch_context.h b/arch/csky/abiv1/inc/abi/switch_context.h
index 17c82686498e..ec73fd7c9f87 100644
--- a/arch/csky/abiv1/inc/abi/switch_context.h
+++ b/arch/csky/abiv1/inc/abi/switch_context.h
@@ -1,5 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
-// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#ifndef __ABI_CSKY_PTRACE_H
#define __ABI_CSKY_PTRACE_H
diff --git a/arch/csky/abiv1/inc/abi/vdso.h b/arch/csky/abiv1/inc/abi/vdso.h
index 14352f524f1d..9e6d0a2fdd2b 100644
--- a/arch/csky/abiv1/inc/abi/vdso.h
+++ b/arch/csky/abiv1/inc/abi/vdso.h
@@ -1,17 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#include <linux/uaccess.h>
+#ifndef __ABI_CSKY_VDSO_H
+#define __ABI_CSKY_VDSO_H
-static inline int setup_vdso_page(unsigned short *ptr)
-{
- int err = 0;
+/* movi r1, 127; addi r1, (139 - 127) */
+#define SET_SYSCALL_ID .long 0x20b167f1
- /* movi r1, 127 */
- err |= __put_user(0x67f1, ptr + 0);
- /* addi r1, (139 - 127) */
- err |= __put_user(0x20b1, ptr + 1);
- /* trap 0 */
- err |= __put_user(0x0008, ptr + 2);
-
- return err;
-}
+#endif /* __ABI_CSKY_VDSO_H */
diff --git a/arch/csky/abiv2/cacheflush.c b/arch/csky/abiv2/cacheflush.c
index 790f1ebfba44..39c51399dd81 100644
--- a/arch/csky/abiv2/cacheflush.c
+++ b/arch/csky/abiv2/cacheflush.c
@@ -12,6 +12,9 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
unsigned long addr;
struct page *page;
+ if (!pfn_valid(pte_pfn(*pte)))
+ return;
+
page = pfn_to_page(pte_pfn(*pte));
if (page == ZERO_PAGE(0))
return;
diff --git a/arch/csky/abiv2/inc/abi/ckmmu.h b/arch/csky/abiv2/inc/abi/ckmmu.h
index 73ded7c72482..64215f2380f1 100644
--- a/arch/csky/abiv2/inc/abi/ckmmu.h
+++ b/arch/csky/abiv2/inc/abi/ckmmu.h
@@ -1,5 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
-// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#ifndef __ASM_CSKY_CKMMUV2_H
#define __ASM_CSKY_CKMMUV2_H
@@ -78,8 +77,13 @@ static inline void tlb_read(void)
static inline void tlb_invalid_all(void)
{
#ifdef CONFIG_CPU_HAS_TLBI
- asm volatile("tlbi.alls\n":::"memory");
sync_is();
+ asm volatile(
+ "tlbi.alls \n"
+ "sync.i \n"
+ :
+ :
+ : "memory");
#else
mtcr("cr<8, 15>", 0x04000000);
#endif
@@ -88,8 +92,13 @@ static inline void tlb_invalid_all(void)
static inline void local_tlb_invalid_all(void)
{
#ifdef CONFIG_CPU_HAS_TLBI
- asm volatile("tlbi.all\n":::"memory");
sync_is();
+ asm volatile(
+ "tlbi.all \n"
+ "sync.i \n"
+ :
+ :
+ : "memory");
#else
tlb_invalid_all();
#endif
@@ -100,16 +109,31 @@ static inline void tlb_invalid_indexed(void)
mtcr("cr<8, 15>", 0x02000000);
}
-static inline void setup_pgd(unsigned long pgd, bool kernel)
+#define NOP32 ".long 0x4820c400\n"
+
+static inline void setup_pgd(pgd_t *pgd, int asid)
{
- if (kernel)
- mtcr("cr<28, 15>", pgd | BIT(0));
- else
- mtcr("cr<29, 15>", pgd | BIT(0));
+#ifdef CONFIG_CPU_HAS_TLBI
+ sync_is();
+#else
+ mb();
+#endif
+ asm volatile(
+#ifdef CONFIG_CPU_HAS_TLBI
+ "mtcr %1, cr<28, 15> \n"
+#endif
+ "mtcr %1, cr<29, 15> \n"
+ "mtcr %0, cr< 4, 15> \n"
+ ".rept 64 \n"
+ NOP32
+ ".endr \n"
+ :
+ :"r"(asid), "r"(__pa(pgd) | BIT(0))
+ :"memory");
}
-static inline unsigned long get_pgd(void)
+static inline pgd_t *get_pgd(void)
{
- return mfcr("cr<29, 15>") & ~BIT(0);
+ return __va(mfcr("cr<29, 15>") & ~BIT(0));
}
#endif /* __ASM_CSKY_CKMMUV2_H */
diff --git a/arch/csky/abiv2/inc/abi/entry.h b/arch/csky/abiv2/inc/abi/entry.h
index bedcc6f06bba..cca63e699b58 100644
--- a/arch/csky/abiv2/inc/abi/entry.h
+++ b/arch/csky/abiv2/inc/abi/entry.h
@@ -1,5 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
-// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#ifndef __ASM_CSKY_ENTRY_H
#define __ASM_CSKY_ENTRY_H
@@ -26,6 +25,9 @@
stw tls, (sp, 0)
stw lr, (sp, 4)
+ RD_MEH lr
+ WR_MEH lr
+
mfcr lr, epc
movi tls, \epc_inc
add lr, tls
@@ -231,6 +233,16 @@
mtcr \rx, cr<8, 15>
.endm
+#ifdef CONFIG_PAGE_OFFSET_80000000
+#define MSA_SET cr<30, 15>
+#define MSA_CLR cr<31, 15>
+#endif
+
+#ifdef CONFIG_PAGE_OFFSET_A0000000
+#define MSA_SET cr<31, 15>
+#define MSA_CLR cr<30, 15>
+#endif
+
.macro SETUP_MMU
/* Init psr and enable ee */
lrw r6, DEFAULT_PSR_VALUE
@@ -281,15 +293,15 @@
* 31 - 29 | 28 - 9 | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0
* BA Reserved SH WA B SO SEC C D V
*/
- mfcr r6, cr<30, 15> /* Get MSA0 */
+ mfcr r6, MSA_SET /* Get MSA */
2:
lsri r6, 29
lsli r6, 29
addi r6, 0x1ce
- mtcr r6, cr<30, 15> /* Set MSA0 */
+ mtcr r6, MSA_SET /* Set MSA */
movi r6, 0
- mtcr r6, cr<31, 15> /* Clr MSA1 */
+ mtcr r6, MSA_CLR /* Clr MSA */
/* enable MMU */
mfcr r6, cr18
diff --git a/arch/csky/abiv2/inc/abi/fpu.h b/arch/csky/abiv2/inc/abi/fpu.h
index 09e2700a3693..aabb79355013 100644
--- a/arch/csky/abiv2/inc/abi/fpu.h
+++ b/arch/csky/abiv2/inc/abi/fpu.h
@@ -1,5 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
-// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#ifndef __ASM_CSKY_FPU_H
#define __ASM_CSKY_FPU_H
diff --git a/arch/csky/abiv2/inc/abi/page.h b/arch/csky/abiv2/inc/abi/page.h
index 0a70cb553dca..cf005f13cd15 100644
--- a/arch/csky/abiv2/inc/abi/page.h
+++ b/arch/csky/abiv2/inc/abi/page.h
@@ -1,5 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
-// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
static inline void clear_user_page(void *addr, unsigned long vaddr,
struct page *page)
diff --git a/arch/csky/abiv2/inc/abi/pgtable-bits.h b/arch/csky/abiv2/inc/abi/pgtable-bits.h
index 137f7932c83b..7e7f389f546f 100644
--- a/arch/csky/abiv2/inc/abi/pgtable-bits.h
+++ b/arch/csky/abiv2/inc/abi/pgtable-bits.h
@@ -1,37 +1,48 @@
/* SPDX-License-Identifier: GPL-2.0 */
-// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#ifndef __ASM_CSKY_PGTABLE_BITS_H
#define __ASM_CSKY_PGTABLE_BITS_H
/* implemented in software */
#define _PAGE_ACCESSED (1<<7)
-#define PAGE_ACCESSED_BIT (7)
-
#define _PAGE_READ (1<<8)
#define _PAGE_WRITE (1<<9)
#define _PAGE_PRESENT (1<<10)
-
#define _PAGE_MODIFIED (1<<11)
-#define PAGE_MODIFIED_BIT (11)
/* implemented in hardware */
#define _PAGE_GLOBAL (1<<0)
-
#define _PAGE_VALID (1<<1)
-#define PAGE_VALID_BIT (1)
-
#define _PAGE_DIRTY (1<<2)
-#define PAGE_DIRTY_BIT (2)
#define _PAGE_SO (1<<5)
#define _PAGE_BUF (1<<6)
-
#define _PAGE_CACHE (1<<3)
-
#define _CACHE_MASK _PAGE_CACHE
-#define _CACHE_CACHED (_PAGE_VALID | _PAGE_CACHE | _PAGE_BUF)
-#define _CACHE_UNCACHED (_PAGE_VALID)
+#define _CACHE_CACHED (_PAGE_CACHE | _PAGE_BUF)
+#define _CACHE_UNCACHED (0)
+
+#define _PAGE_PROT_NONE _PAGE_WRITE
+
+/*
+ * Encode and decode a swap entry
+ *
+ * Format of swap PTE:
+ * bit 0: _PAGE_GLOBAL (zero)
+ * bit 1: _PAGE_VALID (zero)
+ * bit 2 - 6: swap type
+ * bit 7 - 8: swap offset[0 - 1]
+ * bit 9: _PAGE_WRITE (zero)
+ * bit 10: _PAGE_PRESENT (zero)
+ * bit 11 - 31: swap offset[2 - 22]
+ */
+#define __swp_type(x) (((x).val >> 2) & 0x1f)
+#define __swp_offset(x) ((((x).val >> 7) & 0x3) | \
+ (((x).val >> 9) & 0x7ffffc))
+#define __swp_entry(type, offset) ((swp_entry_t) { \
+ ((type & 0x1f) << 2) | \
+ ((offset & 0x3) << 7) | \
+ ((offset & 0x7ffffc) << 9)})
#endif /* __ASM_CSKY_PGTABLE_BITS_H */
diff --git a/arch/csky/abiv2/inc/abi/reg_ops.h b/arch/csky/abiv2/inc/abi/reg_ops.h
index ae82c3f26a6b..49ba18a64751 100644
--- a/arch/csky/abiv2/inc/abi/reg_ops.h
+++ b/arch/csky/abiv2/inc/abi/reg_ops.h
@@ -1,5 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
-// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#ifndef __ABI_REG_OPS_H
#define __ABI_REG_OPS_H
diff --git a/arch/csky/abiv2/inc/abi/regdef.h b/arch/csky/abiv2/inc/abi/regdef.h
index d7328bbc1ce7..0933addbc27b 100644
--- a/arch/csky/abiv2/inc/abi/regdef.h
+++ b/arch/csky/abiv2/inc/abi/regdef.h
@@ -1,10 +1,14 @@
/* SPDX-License-Identifier: GPL-2.0 */
-// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#ifndef __ASM_CSKY_REGDEF_H
#define __ASM_CSKY_REGDEF_H
+#ifdef __ASSEMBLY__
#define syscallid r7
+#else
+#define syscallid "r7"
+#endif
+
#define regs_syscallid(regs) regs->regs[3]
#define regs_fp(regs) regs->regs[4]
diff --git a/arch/csky/abiv2/inc/abi/switch_context.h b/arch/csky/abiv2/inc/abi/switch_context.h
index 73a81245a3b3..5dd5c3f4ee7e 100644
--- a/arch/csky/abiv2/inc/abi/switch_context.h
+++ b/arch/csky/abiv2/inc/abi/switch_context.h
@@ -1,5 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
-// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#ifndef __ABI_CSKY_PTRACE_H
#define __ABI_CSKY_PTRACE_H
diff --git a/arch/csky/abiv2/inc/abi/vdso.h b/arch/csky/abiv2/inc/abi/vdso.h
index b60d4a070326..40fd10d893ff 100644
--- a/arch/csky/abiv2/inc/abi/vdso.h
+++ b/arch/csky/abiv2/inc/abi/vdso.h
@@ -3,21 +3,7 @@
#ifndef __ABI_CSKY_VDSO_H
#define __ABI_CSKY_VDSO_H
-#include <linux/uaccess.h>
+/* movi r7, 173 */
+#define SET_SYSCALL_ID .long 0x008bea07
-static inline int setup_vdso_page(unsigned short *ptr)
-{
- int err = 0;
-
- /* movi r7, 173 */
- err |= __put_user(0xea07, ptr);
- err |= __put_user(0x008b, ptr+1);
-
- /* trap 0 */
- err |= __put_user(0xc000, ptr+2);
- err |= __put_user(0x2020, ptr+3);
-
- return err;
-}
-
-#endif /* __ABI_CSKY_STRING_H */
+#endif /* __ABI_CSKY_VDSO_H */
diff --git a/arch/csky/abiv2/sysdep.h b/arch/csky/abiv2/sysdep.h
index bbbedfd34777..61abe9201c50 100644
--- a/arch/csky/abiv2/sysdep.h
+++ b/arch/csky/abiv2/sysdep.h
@@ -1,5 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
-// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#ifndef __SYSDEP_H
#define __SYSDEP_H
diff --git a/arch/csky/include/asm/addrspace.h b/arch/csky/include/asm/addrspace.h
index d1c2ede692ed..6fc05d44536c 100644
--- a/arch/csky/include/asm/addrspace.h
+++ b/arch/csky/include/asm/addrspace.h
@@ -1,5 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
-// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#ifndef __ASM_CSKY_ADDRSPACE_H
#define __ASM_CSKY_ADDRSPACE_H
diff --git a/arch/csky/include/asm/atomic.h b/arch/csky/include/asm/atomic.h
deleted file mode 100644
index e369d73b13e3..000000000000
--- a/arch/csky/include/asm/atomic.h
+++ /dev/null
@@ -1,212 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-#ifndef __ASM_CSKY_ATOMIC_H
-#define __ASM_CSKY_ATOMIC_H
-
-#include <linux/version.h>
-#include <asm/cmpxchg.h>
-#include <asm/barrier.h>
-
-#ifdef CONFIG_CPU_HAS_LDSTEX
-
-#define __atomic_add_unless __atomic_add_unless
-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
-{
- unsigned long tmp, ret;
-
- smp_mb();
-
- asm volatile (
- "1: ldex.w %0, (%3) \n"
- " mov %1, %0 \n"
- " cmpne %0, %4 \n"
- " bf 2f \n"
- " add %0, %2 \n"
- " stex.w %0, (%3) \n"
- " bez %0, 1b \n"
- "2: \n"
- : "=&r" (tmp), "=&r" (ret)
- : "r" (a), "r"(&v->counter), "r"(u)
- : "memory");
-
- if (ret != u)
- smp_mb();
-
- return ret;
-}
-
-#define ATOMIC_OP(op, c_op) \
-static inline void atomic_##op(int i, atomic_t *v) \
-{ \
- unsigned long tmp; \
- \
- asm volatile ( \
- "1: ldex.w %0, (%2) \n" \
- " " #op " %0, %1 \n" \
- " stex.w %0, (%2) \n" \
- " bez %0, 1b \n" \
- : "=&r" (tmp) \
- : "r" (i), "r"(&v->counter) \
- : "memory"); \
-}
-
-#define ATOMIC_OP_RETURN(op, c_op) \
-static inline int atomic_##op##_return(int i, atomic_t *v) \
-{ \
- unsigned long tmp, ret; \
- \
- smp_mb(); \
- asm volatile ( \
- "1: ldex.w %0, (%3) \n" \
- " " #op " %0, %2 \n" \
- " mov %1, %0 \n" \
- " stex.w %0, (%3) \n" \
- " bez %0, 1b \n" \
- : "=&r" (tmp), "=&r" (ret) \
- : "r" (i), "r"(&v->counter) \
- : "memory"); \
- smp_mb(); \
- \
- return ret; \
-}
-
-#define ATOMIC_FETCH_OP(op, c_op) \
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
-{ \
- unsigned long tmp, ret; \
- \
- smp_mb(); \
- asm volatile ( \
- "1: ldex.w %0, (%3) \n" \
- " mov %1, %0 \n" \
- " " #op " %0, %2 \n" \
- " stex.w %0, (%3) \n" \
- " bez %0, 1b \n" \
- : "=&r" (tmp), "=&r" (ret) \
- : "r" (i), "r"(&v->counter) \
- : "memory"); \
- smp_mb(); \
- \
- return ret; \
-}
-
-#else /* CONFIG_CPU_HAS_LDSTEX */
-
-#include <linux/irqflags.h>
-
-#define __atomic_add_unless __atomic_add_unless
-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
-{
- unsigned long tmp, ret, flags;
-
- raw_local_irq_save(flags);
-
- asm volatile (
- " ldw %0, (%3) \n"
- " mov %1, %0 \n"
- " cmpne %0, %4 \n"
- " bf 2f \n"
- " add %0, %2 \n"
- " stw %0, (%3) \n"
- "2: \n"
- : "=&r" (tmp), "=&r" (ret)
- : "r" (a), "r"(&v->counter), "r"(u)
- : "memory");
-
- raw_local_irq_restore(flags);
-
- return ret;
-}
-
-#define ATOMIC_OP(op, c_op) \
-static inline void atomic_##op(int i, atomic_t *v) \
-{ \
- unsigned long tmp, flags; \
- \
- raw_local_irq_save(flags); \
- \
- asm volatile ( \
- " ldw %0, (%2) \n" \
- " " #op " %0, %1 \n" \
- " stw %0, (%2) \n" \
- : "=&r" (tmp) \
- : "r" (i), "r"(&v->counter) \
- : "memory"); \
- \
- raw_local_irq_restore(flags); \
-}
-
-#define ATOMIC_OP_RETURN(op, c_op) \
-static inline int atomic_##op##_return(int i, atomic_t *v) \
-{ \
- unsigned long tmp, ret, flags; \
- \
- raw_local_irq_save(flags); \
- \
- asm volatile ( \
- " ldw %0, (%3) \n" \
- " " #op " %0, %2 \n" \
- " stw %0, (%3) \n" \
- " mov %1, %0 \n" \
- : "=&r" (tmp), "=&r" (ret) \
- : "r" (i), "r"(&v->counter) \
- : "memory"); \
- \
- raw_local_irq_restore(flags); \
- \
- return ret; \
-}
-
-#define ATOMIC_FETCH_OP(op, c_op) \
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
-{ \
- unsigned long tmp, ret, flags; \
- \
- raw_local_irq_save(flags); \
- \
- asm volatile ( \
- " ldw %0, (%3) \n" \
- " mov %1, %0 \n" \
- " " #op " %0, %2 \n" \
- " stw %0, (%3) \n" \
- : "=&r" (tmp), "=&r" (ret) \
- : "r" (i), "r"(&v->counter) \
- : "memory"); \
- \
- raw_local_irq_restore(flags); \
- \
- return ret; \
-}
-
-#endif /* CONFIG_CPU_HAS_LDSTEX */
-
-#define atomic_add_return atomic_add_return
-ATOMIC_OP_RETURN(add, +)
-#define atomic_sub_return atomic_sub_return
-ATOMIC_OP_RETURN(sub, -)
-
-#define atomic_fetch_add atomic_fetch_add
-ATOMIC_FETCH_OP(add, +)
-#define atomic_fetch_sub atomic_fetch_sub
-ATOMIC_FETCH_OP(sub, -)
-#define atomic_fetch_and atomic_fetch_and
-ATOMIC_FETCH_OP(and, &)
-#define atomic_fetch_or atomic_fetch_or
-ATOMIC_FETCH_OP(or, |)
-#define atomic_fetch_xor atomic_fetch_xor
-ATOMIC_FETCH_OP(xor, ^)
-
-#define atomic_and atomic_and
-ATOMIC_OP(and, &)
-#define atomic_or atomic_or
-ATOMIC_OP(or, |)
-#define atomic_xor atomic_xor
-ATOMIC_OP(xor, ^)
-
-#undef ATOMIC_FETCH_OP
-#undef ATOMIC_OP_RETURN
-#undef ATOMIC_OP
-
-#include <asm-generic/atomic.h>
-
-#endif /* __ASM_CSKY_ATOMIC_H */
diff --git a/arch/csky/include/asm/barrier.h b/arch/csky/include/asm/barrier.h
index a430e7fddf35..84fc600c8b45 100644
--- a/arch/csky/include/asm/barrier.h
+++ b/arch/csky/include/asm/barrier.h
@@ -1,5 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
-// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#ifndef __ASM_CSKY_BARRIER_H
#define __ASM_CSKY_BARRIER_H
@@ -8,6 +7,61 @@
#define nop() asm volatile ("nop\n":::"memory")
+#ifdef CONFIG_SMP
+
+/*
+ * bar.brwarws: ordering barrier for all load/store instructions
+ * before/after
+ *
+ * |31|30 26|25 21|20 16|15 10|9 5|4 0|
+ * 1 10000 00000 00000 100001 00001 0 bw br aw ar
+ *
+ * b: before
+ * a: after
+ * r: read
+ * w: write
+ *
+ * Here are all combinations:
+ *
+ * bar.brw
+ * bar.br
+ * bar.bw
+ * bar.arw
+ * bar.ar
+ * bar.aw
+ * bar.brwarw
+ * bar.brarw
+ * bar.bwarw
+ * bar.brwar
+ * bar.brwaw
+ * bar.brar
+ * bar.bwaw
+ */
+#define __bar_brw() asm volatile (".long 0x842cc000\n":::"memory")
+#define __bar_br() asm volatile (".long 0x8424c000\n":::"memory")
+#define __bar_bw() asm volatile (".long 0x8428c000\n":::"memory")
+#define __bar_arw() asm volatile (".long 0x8423c000\n":::"memory")
+#define __bar_ar() asm volatile (".long 0x8421c000\n":::"memory")
+#define __bar_aw() asm volatile (".long 0x8422c000\n":::"memory")
+#define __bar_brwarw() asm volatile (".long 0x842fc000\n":::"memory")
+#define __bar_brarw() asm volatile (".long 0x8427c000\n":::"memory")
+#define __bar_bwarw() asm volatile (".long 0x842bc000\n":::"memory")
+#define __bar_brwar() asm volatile (".long 0x842dc000\n":::"memory")
+#define __bar_brwaw() asm volatile (".long 0x842ec000\n":::"memory")
+#define __bar_brar() asm volatile (".long 0x8425c000\n":::"memory")
+#define __bar_brar() asm volatile (".long 0x8425c000\n":::"memory")
+#define __bar_bwaw() asm volatile (".long 0x842ac000\n":::"memory")
+
+#define __smp_mb() __bar_brwarw()
+#define __smp_rmb() __bar_brar()
+#define __smp_wmb() __bar_bwaw()
+
+#define ACQUIRE_FENCE ".long 0x8427c000\n"
+#define __smp_acquire_fence() __bar_brarw()
+#define __smp_release_fence() __bar_brwaw()
+
+#endif /* CONFIG_SMP */
+
/*
* sync: completion barrier, all sync.xx instructions
* guarantee the last response recieved by bus transaction
@@ -15,31 +69,14 @@
* sync.s: inherit from sync, but also shareable to other cores
* sync.i: inherit from sync, but also flush cpu pipeline
* sync.is: the same with sync.i + sync.s
- *
- * bar.brwarw: ordering barrier for all load/store instructions before it
- * bar.brwarws: ordering barrier for all load/store instructions before it
- * and shareable to other cores
- * bar.brar: ordering barrier for all load instructions before it
- * bar.brars: ordering barrier for all load instructions before it
- * and shareable to other cores
- * bar.bwaw: ordering barrier for all store instructions before it
- * bar.bwaws: ordering barrier for all store instructions before it
- * and shareable to other cores
*/
+#define mb() asm volatile ("sync\n":::"memory")
#ifdef CONFIG_CPU_HAS_CACHEV2
-#define mb() asm volatile ("sync.s\n":::"memory")
-
-#ifdef CONFIG_SMP
-#define __smp_mb() asm volatile ("bar.brwarws\n":::"memory")
-#define __smp_rmb() asm volatile ("bar.brars\n":::"memory")
-#define __smp_wmb() asm volatile ("bar.bwaws\n":::"memory")
-#endif /* CONFIG_SMP */
-
-#define sync_is() asm volatile ("sync.is\n":::"memory")
-
-#else /* !CONFIG_CPU_HAS_CACHEV2 */
-#define mb() asm volatile ("sync\n":::"memory")
+/*
+ * Using three sync.is to prevent speculative PTW
+ */
+#define sync_is() asm volatile ("sync.is\nsync.is\nsync.is\n":::"memory")
#endif
#include <asm-generic/barrier.h>
diff --git a/arch/csky/include/asm/bitops.h b/arch/csky/include/asm/bitops.h
index 43b9838bff63..91818787d860 100644
--- a/arch/csky/include/asm/bitops.h
+++ b/arch/csky/include/asm/bitops.h
@@ -1,5 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
-// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#ifndef __ASM_CSKY_BITOPS_H
#define __ASM_CSKY_BITOPS_H
diff --git a/arch/csky/include/asm/bug.h b/arch/csky/include/asm/bug.h
index 33ebd16b9c78..03f1a5f9184a 100644
--- a/arch/csky/include/asm/bug.h
+++ b/arch/csky/include/asm/bug.h
@@ -1,5 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
-// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#ifndef __ASM_CSKY_BUG_H
#define __ASM_CSKY_BUG_H
@@ -21,6 +20,8 @@ do { \
struct pt_regs;
void die(struct pt_regs *regs, const char *str);
+void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr);
+
void show_regs(struct pt_regs *regs);
void show_code(struct pt_regs *regs);
diff --git a/arch/csky/include/asm/cacheflush.h b/arch/csky/include/asm/cacheflush.h
index f0b8f25429a2..d0f9eafe8988 100644
--- a/arch/csky/include/asm/cacheflush.h
+++ b/arch/csky/include/asm/cacheflush.h
@@ -1,5 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
-// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#ifndef __ASM_CSKY_CACHEFLUSH_H
#define __ASM_CSKY_CACHEFLUSH_H
diff --git a/arch/csky/include/asm/checksum.h b/arch/csky/include/asm/checksum.h
index 7685824291b1..aa12ef4b9080 100644
--- a/arch/csky/include/asm/checksum.h
+++ b/arch/csky/include/asm/checksum.h
@@ -1,5 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
-// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#ifndef __ASM_CSKY_CHECKSUM_H
#define __ASM_CSKY_CHECKSUM_H
diff --git a/arch/csky/include/asm/clocksource.h b/arch/csky/include/asm/clocksource.h
new file mode 100644
index 000000000000..54da0e49efa1
--- /dev/null
+++ b/arch/csky/include/asm/clocksource.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_VDSO_CSKY_CLOCKSOURCE_H
+#define __ASM_VDSO_CSKY_CLOCKSOURCE_H
+
+#include <asm/vdso/clocksource.h>
+
+#endif
diff --git a/arch/csky/include/asm/cmpxchg.h b/arch/csky/include/asm/cmpxchg.h
index 89224530a0ee..dabc8e46ce7b 100644
--- a/arch/csky/include/asm/cmpxchg.h
+++ b/arch/csky/include/asm/cmpxchg.h
@@ -3,12 +3,12 @@
#ifndef __ASM_CSKY_CMPXCHG_H
#define __ASM_CSKY_CMPXCHG_H
-#ifdef CONFIG_CPU_HAS_LDSTEX
+#ifdef CONFIG_SMP
#include <asm/barrier.h>
extern void __bad_xchg(void);
-#define __xchg(new, ptr, size) \
+#define __xchg_relaxed(new, ptr, size) \
({ \
__typeof__(ptr) __ptr = (ptr); \
__typeof__(new) __new = (new); \
@@ -16,7 +16,6 @@ extern void __bad_xchg(void);
unsigned long tmp; \
switch (size) { \
case 4: \
- smp_mb(); \
asm volatile ( \
"1: ldex.w %0, (%3) \n" \
" mov %1, %2 \n" \
@@ -25,7 +24,6 @@ extern void __bad_xchg(void);
: "=&r" (__ret), "=&r" (tmp) \
: "r" (__new), "r"(__ptr) \
:); \
- smp_mb(); \
break; \
default: \
__bad_xchg(); \
@@ -33,9 +31,10 @@ extern void __bad_xchg(void);
__ret; \
})
-#define xchg(ptr, x) (__xchg((x), (ptr), sizeof(*(ptr))))
+#define xchg_relaxed(ptr, x) \
+ (__xchg_relaxed((x), (ptr), sizeof(*(ptr))))
-#define __cmpxchg(ptr, old, new, size) \
+#define __cmpxchg_relaxed(ptr, old, new, size) \
({ \
__typeof__(ptr) __ptr = (ptr); \
__typeof__(new) __new = (new); \
@@ -44,7 +43,6 @@ extern void __bad_xchg(void);
__typeof__(*(ptr)) __ret; \
switch (size) { \
case 4: \
- smp_mb(); \
asm volatile ( \
"1: ldex.w %0, (%3) \n" \
" cmpne %0, %4 \n" \
@@ -56,7 +54,6 @@ extern void __bad_xchg(void);
: "=&r" (__ret), "=&r" (__tmp) \
: "r" (__new), "r"(__ptr), "r"(__old) \
:); \
- smp_mb(); \
break; \
default: \
__bad_xchg(); \
@@ -64,8 +61,18 @@ extern void __bad_xchg(void);
__ret; \
})
-#define cmpxchg(ptr, o, n) \
- (__cmpxchg((ptr), (o), (n), sizeof(*(ptr))))
+#define cmpxchg_relaxed(ptr, o, n) \
+ (__cmpxchg_relaxed((ptr), (o), (n), sizeof(*(ptr))))
+
+#define cmpxchg(ptr, o, n) \
+({ \
+ __typeof__(*(ptr)) __ret; \
+ __smp_release_fence(); \
+ __ret = cmpxchg_relaxed(ptr, o, n); \
+ __smp_acquire_fence(); \
+ __ret; \
+})
+
#else
#include <asm-generic/cmpxchg.h>
#endif
diff --git a/arch/csky/include/asm/elf.h b/arch/csky/include/asm/elf.h
index eb2cc5a673b5..48b83e283ed4 100644
--- a/arch/csky/include/asm/elf.h
+++ b/arch/csky/include/asm/elf.h
@@ -1,5 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
-// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#ifndef __ASM_CSKY_ELF_H
#define __ASM_CSKY_ELF_H
diff --git a/arch/csky/include/asm/fixmap.h b/arch/csky/include/asm/fixmap.h
index 4b589cc20900..49a77cbbe2a9 100644
--- a/arch/csky/include/asm/fixmap.h
+++ b/arch/csky/include/asm/fixmap.h
@@ -1,5 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
-// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#ifndef __ASM_CSKY_FIXMAP_H
#define __ASM_CSKY_FIXMAP_H
diff --git a/arch/csky/include/asm/ftrace.h b/arch/csky/include/asm/ftrace.h
index fae72b0b1374..9b86341731b6 100644
--- a/arch/csky/include/asm/ftrace.h
+++ b/arch/csky/include/asm/ftrace.h
@@ -1,5 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
-// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#ifndef __ASM_CSKY_FTRACE_H
#define __ASM_CSKY_FTRACE_H
diff --git a/arch/csky/include/asm/futex.h b/arch/csky/include/asm/futex.h
new file mode 100644
index 000000000000..6cfd312723fa
--- /dev/null
+++ b/arch/csky/include/asm/futex.h
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_FUTEX_H
+#define __ASM_CSKY_FUTEX_H
+
+#ifndef CONFIG_SMP
+#include <asm-generic/futex.h>
+#else
+#include <linux/atomic.h>
+#include <linux/futex.h>
+#include <linux/uaccess.h>
+#include <linux/errno.h>
+
+#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
+{ \
+ u32 tmp; \
+ \
+ __atomic_pre_full_fence(); \
+ \
+ __asm__ __volatile__ ( \
+ "1: ldex.w %[ov], %[u] \n" \
+ " "insn" \n" \
+ "2: stex.w %[t], %[u] \n" \
+ " bez %[t], 1b \n" \
+ " br 4f \n" \
+ "3: mov %[r], %[e] \n" \
+ "4: \n" \
+ " .section __ex_table,\"a\" \n" \
+ " .balign 4 \n" \
+ " .long 1b, 3b \n" \
+ " .long 2b, 3b \n" \
+ " .previous \n" \
+ : [r] "+r" (ret), [ov] "=&r" (oldval), \
+ [u] "+m" (*uaddr), [t] "=&r" (tmp) \
+ : [op] "Jr" (oparg), [e] "jr" (-EFAULT) \
+ : "memory"); \
+ \
+ __atomic_post_full_fence(); \
+}
+
+static inline int
+arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
+{
+ int oldval = 0, ret = 0;
+
+ if (!access_ok(uaddr, sizeof(u32)))
+ return -EFAULT;
+
+ switch (op) {
+ case FUTEX_OP_SET:
+ __futex_atomic_op("mov %[t], %[ov]",
+ ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ADD:
+ __futex_atomic_op("add %[t], %[ov], %[op]",
+ ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_OR:
+ __futex_atomic_op("or %[t], %[ov], %[op]",
+ ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ANDN:
+ __futex_atomic_op("and %[t], %[ov], %[op]",
+ ret, oldval, uaddr, ~oparg);
+ break;
+ case FUTEX_OP_XOR:
+ __futex_atomic_op("xor %[t], %[ov], %[op]",
+ ret, oldval, uaddr, oparg);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+ if (!ret)
+ *oval = oldval;
+
+ return ret;
+}
+
+
+
+static inline int
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ u32 oldval, u32 newval)
+{
+ int ret = 0;
+ u32 val, tmp;
+
+ if (!access_ok(uaddr, sizeof(u32)))
+ return -EFAULT;
+
+ __atomic_pre_full_fence();
+
+ __asm__ __volatile__ (
+ "1: ldex.w %[v], %[u] \n"
+ " cmpne %[v], %[ov] \n"
+ " bt 4f \n"
+ " mov %[t], %[nv] \n"
+ "2: stex.w %[t], %[u] \n"
+ " bez %[t], 1b \n"
+ " br 4f \n"
+ "3: mov %[r], %[e] \n"
+ "4: \n"
+ " .section __ex_table,\"a\" \n"
+ " .balign 4 \n"
+ " .long 1b, 3b \n"
+ " .long 2b, 3b \n"
+ " .previous \n"
+ : [r] "+r" (ret), [v] "=&r" (val), [u] "+m" (*uaddr),
+ [t] "=&r" (tmp)
+ : [ov] "Jr" (oldval), [nv] "Jr" (newval), [e] "Jr" (-EFAULT)
+ : "memory");
+
+ __atomic_post_full_fence();
+
+ *uval = val;
+ return ret;
+}
+
+#endif /* CONFIG_SMP */
+#endif /* __ASM_CSKY_FUTEX_H */
diff --git a/arch/csky/include/asm/highmem.h b/arch/csky/include/asm/highmem.h
index 1f4ed3f4c0d9..1ed810effb3d 100644
--- a/arch/csky/include/asm/highmem.h
+++ b/arch/csky/include/asm/highmem.h
@@ -1,5 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
-// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#ifndef __ASM_CSKY_HIGHMEM_H
#define __ASM_CSKY_HIGHMEM_H
diff --git a/arch/csky/include/asm/io.h b/arch/csky/include/asm/io.h
index e909587f24c5..f82654053dc0 100644
--- a/arch/csky/include/asm/io.h
+++ b/arch/csky/include/asm/io.h
@@ -1,5 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
-// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#ifndef __ASM_CSKY_IO_H
#define __ASM_CSKY_IO_H
diff --git a/arch/csky/include/asm/memory.h b/arch/csky/include/asm/memory.h
index a65c6759f537..d12179801ae3 100644
--- a/arch/csky/include/asm/memory.h
+++ b/arch/csky/include/asm/memory.h
@@ -10,7 +10,7 @@
#define FIXADDR_TOP _AC(0xffffc000, UL)
#define PKMAP_BASE _AC(0xff800000, UL)
-#define VMALLOC_START _AC(0xc0008000, UL)
+#define VMALLOC_START (PAGE_OFFSET + LOWMEM_LIMIT + (PAGE_SIZE * 8))
#define VMALLOC_END (PKMAP_BASE - (PAGE_SIZE * 2))
#ifdef CONFIG_HAVE_TCM
diff --git a/arch/csky/include/asm/mmu.h b/arch/csky/include/asm/mmu.h
index 26fbb1d15df0..d78321901d06 100644
--- a/arch/csky/include/asm/mmu.h
+++ b/arch/csky/include/asm/mmu.h
@@ -1,5 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
-// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#ifndef __ASM_CSKY_MMU_H
#define __ASM_CSKY_MMU_H
diff --git a/arch/csky/include/asm/mmu_context.h b/arch/csky/include/asm/mmu_context.h
index b227d29393a8..95d99b30792c 100644
--- a/arch/csky/include/asm/mmu_context.h
+++ b/arch/csky/include/asm/mmu_context.h
@@ -1,5 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
-// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#ifndef __ASM_CSKY_MMU_CONTEXT_H
#define __ASM_CSKY_MMU_CONTEXT_H
@@ -14,12 +13,6 @@
#include <linux/sched.h>
#include <abi/ckmmu.h>
-#define TLBMISS_HANDLER_SETUP_PGD(pgd) \
- setup_pgd(__pa(pgd), false)
-
-#define TLBMISS_HANDLER_SETUP_PGD_KERNEL(pgd) \
- setup_pgd(__pa(pgd), true)
-
#define ASID_MASK ((1 << CONFIG_CPU_ASID_BITS) - 1)
#define cpu_asid(mm) (atomic64_read(&mm->context.asid) & ASID_MASK)
@@ -36,8 +29,7 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
if (prev != next)
check_and_switch_context(next, cpu);
- TLBMISS_HANDLER_SETUP_PGD(next->pgd);
- write_mmu_entryhi(next->context.asid.counter);
+ setup_pgd(next->pgd, next->context.asid.counter);
flush_icache_deferred(next);
}
diff --git a/arch/csky/include/asm/page.h b/arch/csky/include/asm/page.h
index 9b98bf31d57c..3b91fc3cf36f 100644
--- a/arch/csky/include/asm/page.h
+++ b/arch/csky/include/asm/page.h
@@ -24,7 +24,7 @@
* address region. We use them mapping kernel 1GB direct-map address area and
* for more than 1GB of memory we use highmem.
*/
-#define PAGE_OFFSET 0x80000000
+#define PAGE_OFFSET CONFIG_PAGE_OFFSET
#define SSEG_SIZE 0x20000000
#define LOWMEM_LIMIT (SSEG_SIZE * 2)
diff --git a/arch/csky/include/asm/perf_event.h b/arch/csky/include/asm/perf_event.h
index 572093e11001..249905d8a4e8 100644
--- a/arch/csky/include/asm/perf_event.h
+++ b/arch/csky/include/asm/perf_event.h
@@ -1,5 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
-// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#ifndef __ASM_CSKY_PERF_EVENT_H
#define __ASM_CSKY_PERF_EVENT_H
diff --git a/arch/csky/include/asm/pgalloc.h b/arch/csky/include/asm/pgalloc.h
index d58d8146b729..cd211aabbefd 100644
--- a/arch/csky/include/asm/pgalloc.h
+++ b/arch/csky/include/asm/pgalloc.h
@@ -1,5 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
-// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#ifndef __ASM_CSKY_PGALLOC_H
#define __ASM_CSKY_PGALLOC_H
@@ -71,7 +70,7 @@ do { \
} while (0)
extern void pagetable_init(void);
-extern void pre_mmu_init(void);
+extern void mmu_init(unsigned long min_pfn, unsigned long max_pfn);
extern void pre_trap_init(void);
#endif /* __ASM_CSKY_PGALLOC_H */
diff --git a/arch/csky/include/asm/pgtable.h b/arch/csky/include/asm/pgtable.h
index 2002cb7f1053..0d60367b6bfa 100644
--- a/arch/csky/include/asm/pgtable.h
+++ b/arch/csky/include/asm/pgtable.h
@@ -1,5 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
-// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#ifndef __ASM_CSKY_PGTABLE_H
#define __ASM_CSKY_PGTABLE_H
@@ -14,7 +13,7 @@
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1))
-#define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE)
+#define USER_PTRS_PER_PGD (PAGE_OFFSET/PGDIR_SIZE)
#define FIRST_USER_ADDRESS 0UL
/*
@@ -34,23 +33,13 @@
#define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
#define pte_clear(mm, addr, ptep) set_pte((ptep), \
- (((unsigned int) addr & PAGE_OFFSET) ? __pte(_PAGE_GLOBAL) : __pte(0)))
+ (((unsigned int) addr >= PAGE_OFFSET) ? __pte(_PAGE_GLOBAL) : __pte(0)))
#define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL))
#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
#define pte_pfn(x) ((unsigned long)((x).pte_low >> PAGE_SHIFT))
#define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << PAGE_SHIFT) \
| pgprot_val(prot))
-#define __READABLE (_PAGE_READ | _PAGE_VALID | _PAGE_ACCESSED)
-#define __WRITEABLE (_PAGE_WRITE | _PAGE_DIRTY | _PAGE_MODIFIED)
-
-#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED | \
- _CACHE_MASK)
-
-#define __swp_type(x) (((x).val >> 4) & 0xff)
-#define __swp_offset(x) ((x).val >> 12)
-#define __swp_entry(type, offset) ((swp_entry_t) {((type) << 4) | \
- ((offset) << 12) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
@@ -59,41 +48,52 @@
pgprot_val(pgprot))
/*
- * CSKY can't do page protection for execute, and considers that the same like
- * read. Also, write permissions imply read permissions. This is the closest
- * we can get by reasonable means..
+ * C-SKY only has VALID and DIRTY bit in hardware. So we need to use the
+ * two bits emulate PRESENT, READ, WRITE, EXEC, MODIFIED, ACCESSED.
*/
-#define PAGE_NONE __pgprot(_PAGE_PRESENT | _CACHE_CACHED)
-#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
+#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED)
+
+#define PAGE_NONE __pgprot(_PAGE_PROT_NONE)
+#define PAGE_READ __pgprot(_PAGE_BASE | _PAGE_READ | \
_CACHE_CACHED)
-#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_READ | _CACHE_CACHED)
-#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_READ | _CACHE_CACHED)
-#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
- _PAGE_GLOBAL | _CACHE_CACHED)
-#define PAGE_USERIO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
+#define PAGE_WRITE __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE | \
_CACHE_CACHED)
+#define PAGE_SHARED PAGE_WRITE
+
+#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_VALID | \
+ _PAGE_WRITE | _PAGE_DIRTY | _PAGE_MODIFIED | \
+ _PAGE_GLOBAL | \
+ _CACHE_CACHED)
+
+#define _PAGE_IOREMAP (_PAGE_BASE | _PAGE_READ | _PAGE_VALID | \
+ _PAGE_WRITE | _PAGE_DIRTY | _PAGE_MODIFIED | \
+ _PAGE_GLOBAL | \
+ _CACHE_UNCACHED | _PAGE_SO)
+
+#define _PAGE_CHG_MASK (~(unsigned long) \
+ (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
+ _CACHE_MASK | _PAGE_GLOBAL))
-#define _PAGE_IOREMAP \
- (_PAGE_PRESENT | __READABLE | __WRITEABLE | _PAGE_GLOBAL | \
- _CACHE_UNCACHED | _PAGE_SO)
+#define MAX_SWAPFILES_CHECK() \
+ BUILD_BUG_ON(MAX_SWAPFILES_SHIFT != 5)
#define __P000 PAGE_NONE
-#define __P001 PAGE_READONLY
-#define __P010 PAGE_COPY
-#define __P011 PAGE_COPY
-#define __P100 PAGE_READONLY
-#define __P101 PAGE_READONLY
-#define __P110 PAGE_COPY
-#define __P111 PAGE_COPY
+#define __P001 PAGE_READ
+#define __P010 PAGE_READ
+#define __P011 PAGE_READ
+#define __P100 PAGE_READ
+#define __P101 PAGE_READ
+#define __P110 PAGE_READ
+#define __P111 PAGE_READ
#define __S000 PAGE_NONE
-#define __S001 PAGE_READONLY
-#define __S010 PAGE_SHARED
-#define __S011 PAGE_SHARED
-#define __S100 PAGE_READONLY
-#define __S101 PAGE_READONLY
-#define __S110 PAGE_SHARED
-#define __S111 PAGE_SHARED
+#define __S001 PAGE_READ
+#define __S010 PAGE_WRITE
+#define __S011 PAGE_WRITE
+#define __S100 PAGE_READ
+#define __S101 PAGE_READ
+#define __S110 PAGE_WRITE
+#define __S111 PAGE_WRITE
extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
diff --git a/arch/csky/include/asm/processor.h b/arch/csky/include/asm/processor.h
index 4800f6563abb..9e933021fe8e 100644
--- a/arch/csky/include/asm/processor.h
+++ b/arch/csky/include/asm/processor.h
@@ -1,5 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
-// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#ifndef __ASM_CSKY_PROCESSOR_H
#define __ASM_CSKY_PROCESSOR_H
@@ -28,7 +27,7 @@ extern struct cpuinfo_csky cpu_data[];
* for a 64 bit kernel expandable to 8192EB, of which the current CSKY
* implementations will "only" be able to use 1TB ...
*/
-#define TASK_SIZE 0x7fff8000UL
+#define TASK_SIZE (PAGE_OFFSET - (PAGE_SIZE * 8))
#ifdef __KERNEL__
#define STACK_TOP TASK_SIZE
diff --git a/arch/csky/include/asm/ptrace.h b/arch/csky/include/asm/ptrace.h
index 91ceb1b454c9..4202aab6df42 100644
--- a/arch/csky/include/asm/ptrace.h
+++ b/arch/csky/include/asm/ptrace.h
@@ -1,5 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
-// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#ifndef __ASM_CSKY_PTRACE_H
#define __ASM_CSKY_PTRACE_H
diff --git a/arch/csky/include/asm/segment.h b/arch/csky/include/asm/segment.h
index 79ede9b1a646..589e8321dc14 100644
--- a/arch/csky/include/asm/segment.h
+++ b/arch/csky/include/asm/segment.h
@@ -1,5 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
-// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#ifndef __ASM_CSKY_SEGMENT_H
#define __ASM_CSKY_SEGMENT_H
@@ -10,7 +9,7 @@ typedef struct {
#define KERNEL_DS ((mm_segment_t) { 0xFFFFFFFF })
-#define USER_DS ((mm_segment_t) { 0x80000000UL })
+#define USER_DS ((mm_segment_t) { PAGE_OFFSET })
#define get_fs() (current_thread_info()->addr_limit)
#define set_fs(x) (current_thread_info()->addr_limit = (x))
#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg)
diff --git a/arch/csky/include/asm/shmparam.h b/arch/csky/include/asm/shmparam.h
index efafe4c79fed..2fe6cea0dae9 100644
--- a/arch/csky/include/asm/shmparam.h
+++ b/arch/csky/include/asm/shmparam.h
@@ -1,5 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
-// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#ifndef __ASM_CSKY_SHMPARAM_H
#define __ASM_CSKY_SHMPARAM_H
diff --git a/arch/csky/include/asm/spinlock.h b/arch/csky/include/asm/spinlock.h
index 7cf3f2b34cea..69f5aa249c5f 100644
--- a/arch/csky/include/asm/spinlock.h
+++ b/arch/csky/include/asm/spinlock.h
@@ -6,8 +6,6 @@
#include <linux/spinlock_types.h>
#include <asm/barrier.h>
-#ifdef CONFIG_QUEUED_RWLOCKS
-
/*
* Ticket-based spin-locking.
*/
@@ -88,169 +86,4 @@ static inline int arch_spin_is_contended(arch_spinlock_t *lock)
#include <asm/qrwlock.h>
-/* See include/linux/spinlock.h */
-#define smp_mb__after_spinlock() smp_mb()
-
-#else /* CONFIG_QUEUED_RWLOCKS */
-
-/*
- * Test-and-set spin-locking.
- */
-static inline void arch_spin_lock(arch_spinlock_t *lock)
-{
- u32 *p = &lock->lock;
- u32 tmp;
-
- asm volatile (
- "1: ldex.w %0, (%1) \n"
- " bnez %0, 1b \n"
- " movi %0, 1 \n"
- " stex.w %0, (%1) \n"
- " bez %0, 1b \n"
- : "=&r" (tmp)
- : "r"(p)
- : "cc");
- smp_mb();
-}
-
-static inline void arch_spin_unlock(arch_spinlock_t *lock)
-{
- smp_mb();
- WRITE_ONCE(lock->lock, 0);
-}
-
-static inline int arch_spin_trylock(arch_spinlock_t *lock)
-{
- u32 *p = &lock->lock;
- u32 tmp;
-
- asm volatile (
- "1: ldex.w %0, (%1) \n"
- " bnez %0, 2f \n"
- " movi %0, 1 \n"
- " stex.w %0, (%1) \n"
- " bez %0, 1b \n"
- " movi %0, 0 \n"
- "2: \n"
- : "=&r" (tmp)
- : "r"(p)
- : "cc");
-
- if (!tmp)
- smp_mb();
-
- return !tmp;
-}
-
-#define arch_spin_is_locked(x) (READ_ONCE((x)->lock) != 0)
-
-/*
- * read lock/unlock/trylock
- */
-static inline void arch_read_lock(arch_rwlock_t *lock)
-{
- u32 *p = &lock->lock;
- u32 tmp;
-
- asm volatile (
- "1: ldex.w %0, (%1) \n"
- " blz %0, 1b \n"
- " addi %0, 1 \n"
- " stex.w %0, (%1) \n"
- " bez %0, 1b \n"
- : "=&r" (tmp)
- : "r"(p)
- : "cc");
- smp_mb();
-}
-
-static inline void arch_read_unlock(arch_rwlock_t *lock)
-{
- u32 *p = &lock->lock;
- u32 tmp;
-
- smp_mb();
- asm volatile (
- "1: ldex.w %0, (%1) \n"
- " subi %0, 1 \n"
- " stex.w %0, (%1) \n"
- " bez %0, 1b \n"
- : "=&r" (tmp)
- : "r"(p)
- : "cc");
-}
-
-static inline int arch_read_trylock(arch_rwlock_t *lock)
-{
- u32 *p = &lock->lock;
- u32 tmp;
-
- asm volatile (
- "1: ldex.w %0, (%1) \n"
- " blz %0, 2f \n"
- " addi %0, 1 \n"
- " stex.w %0, (%1) \n"
- " bez %0, 1b \n"
- " movi %0, 0 \n"
- "2: \n"
- : "=&r" (tmp)
- : "r"(p)
- : "cc");
-
- if (!tmp)
- smp_mb();
-
- return !tmp;
-}
-
-/*
- * write lock/unlock/trylock
- */
-static inline void arch_write_lock(arch_rwlock_t *lock)
-{
- u32 *p = &lock->lock;
- u32 tmp;
-
- asm volatile (
- "1: ldex.w %0, (%1) \n"
- " bnez %0, 1b \n"
- " subi %0, 1 \n"
- " stex.w %0, (%1) \n"
- " bez %0, 1b \n"
- : "=&r" (tmp)
- : "r"(p)
- : "cc");
- smp_mb();
-}
-
-static inline void arch_write_unlock(arch_rwlock_t *lock)
-{
- smp_mb();
- WRITE_ONCE(lock->lock, 0);
-}
-
-static inline int arch_write_trylock(arch_rwlock_t *lock)
-{
- u32 *p = &lock->lock;
- u32 tmp;
-
- asm volatile (
- "1: ldex.w %0, (%1) \n"
- " bnez %0, 2f \n"
- " subi %0, 1 \n"
- " stex.w %0, (%1) \n"
- " bez %0, 1b \n"
- " movi %0, 0 \n"
- "2: \n"
- : "=&r" (tmp)
- : "r"(p)
- : "cc");
-
- if (!tmp)
- smp_mb();
-
- return !tmp;
-}
-
-#endif /* CONFIG_QUEUED_RWLOCKS */
#endif /* __ASM_CSKY_SPINLOCK_H */
diff --git a/arch/csky/include/asm/spinlock_types.h b/arch/csky/include/asm/spinlock_types.h
index 88b82438b182..8ff0f6ff3a00 100644
--- a/arch/csky/include/asm/spinlock_types.h
+++ b/arch/csky/include/asm/spinlock_types.h
@@ -22,16 +22,6 @@ typedef struct {
#define __ARCH_SPIN_LOCK_UNLOCKED { { 0 } }
-#ifdef CONFIG_QUEUED_RWLOCKS
#include <asm-generic/qrwlock_types.h>
-#else /* CONFIG_NR_CPUS > 2 */
-
-typedef struct {
- u32 lock;
-} arch_rwlock_t;
-
-#define __ARCH_RW_LOCK_UNLOCKED { 0 }
-
-#endif /* CONFIG_QUEUED_RWLOCKS */
#endif /* __ASM_CSKY_SPINLOCK_TYPES_H */
diff --git a/arch/csky/include/asm/string.h b/arch/csky/include/asm/string.h
index 73142de18355..a0d81e9d6b8f 100644
--- a/arch/csky/include/asm/string.h
+++ b/arch/csky/include/asm/string.h
@@ -1,5 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
-// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#ifndef _CSKY_STRING_MM_H_
#define _CSKY_STRING_MM_H_
diff --git a/arch/csky/include/asm/switch_to.h b/arch/csky/include/asm/switch_to.h
index 35a39e88933d..731e466415e2 100644
--- a/arch/csky/include/asm/switch_to.h
+++ b/arch/csky/include/asm/switch_to.h
@@ -1,5 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
-// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#ifndef __ASM_CSKY_SWITCH_TO_H
#define __ASM_CSKY_SWITCH_TO_H
diff --git a/arch/csky/include/asm/syscalls.h b/arch/csky/include/asm/syscalls.h
index 5d48e5e0082e..ea9ce6138b9b 100644
--- a/arch/csky/include/asm/syscalls.h
+++ b/arch/csky/include/asm/syscalls.h
@@ -1,5 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
-// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#ifndef __ASM_CSKY_SYSCALLS_H
#define __ASM_CSKY_SYSCALLS_H
diff --git a/arch/csky/include/asm/thread_info.h b/arch/csky/include/asm/thread_info.h
index 21456a3737c2..8c349a8f904d 100644
--- a/arch/csky/include/asm/thread_info.h
+++ b/arch/csky/include/asm/thread_info.h
@@ -1,12 +1,10 @@
/* SPDX-License-Identifier: GPL-2.0 */
-// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#ifndef _ASM_CSKY_THREAD_INFO_H
#define _ASM_CSKY_THREAD_INFO_H
#ifndef __ASSEMBLY__
-#include <linux/version.h>
#include <asm/types.h>
#include <asm/page.h>
#include <asm/processor.h>
diff --git a/arch/csky/include/asm/tlb.h b/arch/csky/include/asm/tlb.h
index fdff9b8d70c8..3498e65f59f8 100644
--- a/arch/csky/include/asm/tlb.h
+++ b/arch/csky/include/asm/tlb.h
@@ -1,5 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
-// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#ifndef __ASM_CSKY_TLB_H
#define __ASM_CSKY_TLB_H
diff --git a/arch/csky/include/asm/tlbflush.h b/arch/csky/include/asm/tlbflush.h
index 6845b0667703..407160b4fde7 100644
--- a/arch/csky/include/asm/tlbflush.h
+++ b/arch/csky/include/asm/tlbflush.h
@@ -1,5 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
-// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#ifndef __ASM_TLBFLUSH_H
#define __ASM_TLBFLUSH_H
diff --git a/arch/csky/include/asm/traps.h b/arch/csky/include/asm/traps.h
index 1c081805b962..421a4195e2fe 100644
--- a/arch/csky/include/asm/traps.h
+++ b/arch/csky/include/asm/traps.h
@@ -1,5 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
-// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#ifndef __ASM_CSKY_TRAPS_H
#define __ASM_CSKY_TRAPS_H
diff --git a/arch/csky/include/asm/uaccess.h b/arch/csky/include/asm/uaccess.h
index 1633ffe5ae15..3dec272e1fa3 100644
--- a/arch/csky/include/asm/uaccess.h
+++ b/arch/csky/include/asm/uaccess.h
@@ -1,5 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
-// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#ifndef __ASM_CSKY_UACCESS_H
#define __ASM_CSKY_UACCESS_H
diff --git a/arch/csky/include/asm/unistd.h b/arch/csky/include/asm/unistd.h
index da7a18295615..9cf97de9a26d 100644
--- a/arch/csky/include/asm/unistd.h
+++ b/arch/csky/include/asm/unistd.h
@@ -1,5 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
-// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#include <uapi/asm/unistd.h>
diff --git a/arch/csky/include/asm/vdso.h b/arch/csky/include/asm/vdso.h
index d963d691f3a1..eb5142f9c564 100644
--- a/arch/csky/include/asm/vdso.h
+++ b/arch/csky/include/asm/vdso.h
@@ -3,10 +3,25 @@
#ifndef __ASM_CSKY_VDSO_H
#define __ASM_CSKY_VDSO_H
-#include <abi/vdso.h>
+#include <linux/types.h>
-struct csky_vdso {
- unsigned short rt_signal_retcode[4];
+#ifndef GENERIC_TIME_VSYSCALL
+struct vdso_data {
};
+#endif
+
+/*
+ * The VDSO symbols are mapped into Linux so we can just use regular symbol
+ * addressing to get their offsets in userspace. The symbols are mapped at an
+ * offset of 0, but since the linker must support setting weak undefined
+ * symbols to the absolute address 0 it also happens to support other low
+ * addresses even when the code model suggests those low addresses would not
+ * otherwise be availiable.
+ */
+#define VDSO_SYMBOL(base, name) \
+({ \
+ extern const char __vdso_##name[]; \
+ (void __user *)((unsigned long)(base) + __vdso_##name); \
+})
#endif /* __ASM_CSKY_VDSO_H */
diff --git a/arch/csky/include/asm/vdso/clocksource.h b/arch/csky/include/asm/vdso/clocksource.h
new file mode 100644
index 000000000000..dfca7b4724b7
--- /dev/null
+++ b/arch/csky/include/asm/vdso/clocksource.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_VDSO_CSKY_CLOCKSOURCE_H
+#define __ASM_VDSO_CSKY_CLOCKSOURCE_H
+
+#define VDSO_ARCH_CLOCKMODES \
+ VDSO_CLOCKMODE_ARCHTIMER
+
+#endif /* __ASM_VDSO_CSKY_CLOCKSOURCE_H */
diff --git a/arch/csky/include/asm/vdso/gettimeofday.h b/arch/csky/include/asm/vdso/gettimeofday.h
new file mode 100644
index 000000000000..6c4f1446944f
--- /dev/null
+++ b/arch/csky/include/asm/vdso/gettimeofday.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_VDSO_CSKY_GETTIMEOFDAY_H
+#define __ASM_VDSO_CSKY_GETTIMEOFDAY_H
+
+#ifndef __ASSEMBLY__
+
+#include <asm/barrier.h>
+#include <asm/unistd.h>
+#include <abi/regdef.h>
+#include <uapi/linux/time.h>
+
+#define VDSO_HAS_CLOCK_GETRES 1
+
+static __always_inline
+int gettimeofday_fallback(struct __kernel_old_timeval *_tv,
+ struct timezone *_tz)
+{
+ register struct __kernel_old_timeval *tv asm("a0") = _tv;
+ register struct timezone *tz asm("a1") = _tz;
+ register long ret asm("a0");
+ register long nr asm(syscallid) = __NR_gettimeofday;
+
+ asm volatile ("trap 0\n"
+ : "=r" (ret)
+ : "r"(tv), "r"(tz), "r"(nr)
+ : "memory");
+
+ return ret;
+}
+
+static __always_inline
+long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
+{
+ register clockid_t clkid asm("a0") = _clkid;
+ register struct __kernel_timespec *ts asm("a1") = _ts;
+ register long ret asm("a0");
+ register long nr asm(syscallid) = __NR_clock_gettime64;
+
+ asm volatile ("trap 0\n"
+ : "=r" (ret)
+ : "r"(clkid), "r"(ts), "r"(nr)
+ : "memory");
+
+ return ret;
+}
+
+static __always_inline
+long clock_gettime32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
+{
+ register clockid_t clkid asm("a0") = _clkid;
+ register struct old_timespec32 *ts asm("a1") = _ts;
+ register long ret asm("a0");
+ register long nr asm(syscallid) = __NR_clock_gettime;
+
+ asm volatile ("trap 0\n"
+ : "=r" (ret)
+ : "r"(clkid), "r"(ts), "r"(nr)
+ : "memory");
+
+ return ret;
+}
+
+static __always_inline
+int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
+{
+ register clockid_t clkid asm("a0") = _clkid;
+ register struct __kernel_timespec *ts asm("a1") = _ts;
+ register long ret asm("a0");
+ register long nr asm(syscallid) = __NR_clock_getres_time64;
+
+ asm volatile ("trap 0\n"
+ : "=r" (ret)
+ : "r"(clkid), "r"(ts), "r"(nr)
+ : "memory");
+
+ return ret;
+}
+
+static __always_inline
+int clock_getres32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
+{
+ register clockid_t clkid asm("a0") = _clkid;
+ register struct old_timespec32 *ts asm("a1") = _ts;
+ register long ret asm("a0");
+ register long nr asm(syscallid) = __NR_clock_getres;
+
+ asm volatile ("trap 0\n"
+ : "=r" (ret)
+ : "r"(clkid), "r"(ts), "r"(nr)
+ : "memory");
+
+ return ret;
+}
+
+uint64_t csky_pmu_read_cc(void);
+static __always_inline u64 __arch_get_hw_counter(s32 clock_mode,
+ const struct vdso_data *vd)
+{
+#ifdef CONFIG_CSKY_PMU_V1
+ return csky_pmu_read_cc();
+#else
+ return 0;
+#endif
+}
+
+static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
+{
+ return _vdso_data;
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_CSKY_GETTIMEOFDAY_H */
diff --git a/arch/csky/include/asm/vdso/processor.h b/arch/csky/include/asm/vdso/processor.h
new file mode 100644
index 000000000000..39a6b561d0cc
--- /dev/null
+++ b/arch/csky/include/asm/vdso/processor.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_VDSO_CSKY_PROCESSOR_H
+#define __ASM_VDSO_CSKY_PROCESSOR_H
+
+#ifndef __ASSEMBLY__
+
+#define cpu_relax() barrier()
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_CSKY_PROCESSOR_H */
diff --git a/arch/csky/include/asm/vdso/vsyscall.h b/arch/csky/include/asm/vdso/vsyscall.h
new file mode 100644
index 000000000000..c276211a7c4d
--- /dev/null
+++ b/arch/csky/include/asm/vdso/vsyscall.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_VDSO_CSKY_VSYSCALL_H
+#define __ASM_VDSO_CSKY_VSYSCALL_H
+
+#ifndef __ASSEMBLY__
+
+#include <vdso/datapage.h>
+
+extern struct vdso_data *vdso_data;
+
+static __always_inline struct vdso_data *__csky_get_k_vdso_data(void)
+{
+ return vdso_data;
+}
+#define __arch_get_k_vdso_data __csky_get_k_vdso_data
+
+#include <asm-generic/vdso/vsyscall.h>
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_CSKY_VSYSCALL_H */
diff --git a/arch/csky/include/uapi/asm/byteorder.h b/arch/csky/include/uapi/asm/byteorder.h
index d150cd664873..1aedd513b65a 100644
--- a/arch/csky/include/uapi/asm/byteorder.h
+++ b/arch/csky/include/uapi/asm/byteorder.h
@@ -1,5 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#ifndef __ASM_CSKY_BYTEORDER_H
#define __ASM_CSKY_BYTEORDER_H
diff --git a/arch/csky/include/uapi/asm/perf_regs.h b/arch/csky/include/uapi/asm/perf_regs.h
index 49d4e147a559..d0a8ac6a1b77 100644
--- a/arch/csky/include/uapi/asm/perf_regs.h
+++ b/arch/csky/include/uapi/asm/perf_regs.h
@@ -1,5 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-// Copyright (C) 2019 Hangzhou C-SKY Microsystems co.,ltd.
#ifndef _ASM_CSKY_PERF_REGS_H
#define _ASM_CSKY_PERF_REGS_H
diff --git a/arch/csky/include/uapi/asm/ptrace.h b/arch/csky/include/uapi/asm/ptrace.h
index 66b2268e324e..3be9c14334a6 100644
--- a/arch/csky/include/uapi/asm/ptrace.h
+++ b/arch/csky/include/uapi/asm/ptrace.h
@@ -1,5 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#ifndef _CSKY_PTRACE_H
#define _CSKY_PTRACE_H
diff --git a/arch/csky/include/uapi/asm/sigcontext.h b/arch/csky/include/uapi/asm/sigcontext.h
index 670c020f2cb8..859afb602477 100644
--- a/arch/csky/include/uapi/asm/sigcontext.h
+++ b/arch/csky/include/uapi/asm/sigcontext.h
@@ -1,5 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#ifndef __ASM_CSKY_SIGCONTEXT_H
#define __ASM_CSKY_SIGCONTEXT_H
diff --git a/arch/csky/include/uapi/asm/unistd.h b/arch/csky/include/uapi/asm/unistd.h
index ba4018929733..7ff6a2466af1 100644
--- a/arch/csky/include/uapi/asm/unistd.h
+++ b/arch/csky/include/uapi/asm/unistd.h
@@ -1,5 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#define __ARCH_WANT_STAT64
#define __ARCH_WANT_NEW_STAT
diff --git a/arch/csky/kernel/Makefile b/arch/csky/kernel/Makefile
index 37f37c0e934a..6c0f36010ed0 100644
--- a/arch/csky/kernel/Makefile
+++ b/arch/csky/kernel/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
extra-y := head.o vmlinux.lds
-obj-y += entry.o atomic.o signal.o traps.o irq.o time.o vdso.o
+obj-y += entry.o atomic.o signal.o traps.o irq.o time.o vdso.o vdso/
obj-y += power.o syscall.o syscall_table.o setup.o
obj-y += process.o cpu-probe.o ptrace.o stacktrace.o
obj-y += probes/
diff --git a/arch/csky/kernel/atomic.S b/arch/csky/kernel/atomic.S
index 3821ef9b7567..e73e548f7855 100644
--- a/arch/csky/kernel/atomic.S
+++ b/arch/csky/kernel/atomic.S
@@ -14,6 +14,10 @@
*/
ENTRY(csky_cmpxchg)
USPTOKSP
+
+ RD_MEH a3
+ WR_MEH a3
+
mfcr a3, epc
addi a3, TRAP0_SIZE
@@ -36,11 +40,11 @@ ENTRY(csky_cmpxchg)
2:
sync.is
#else
-1:
+GLOBAL(csky_cmpxchg_ldw)
ldw a3, (a2)
cmpne a0, a3
bt16 3f
-2:
+GLOBAL(csky_cmpxchg_stw)
stw a1, (a2)
3:
#endif
@@ -55,19 +59,3 @@ ENTRY(csky_cmpxchg)
KSPTOUSP
rte
END(csky_cmpxchg)
-
-#ifndef CONFIG_CPU_HAS_LDSTEX
-/*
- * Called from tlbmodified exception
- */
-ENTRY(csky_cmpxchg_fixup)
- mfcr a0, epc
- lrw a1, 2b
- cmpne a1, a0
- bt 1f
- subi a1, (2b - 1b)
- stw a1, (sp, LSAVE_PC)
-1:
- rts
-END(csky_cmpxchg_fixup)
-#endif
diff --git a/arch/csky/kernel/entry.S b/arch/csky/kernel/entry.S
index 5a5cabd076e1..c1bd7a6b4ab6 100644
--- a/arch/csky/kernel/entry.S
+++ b/arch/csky/kernel/entry.S
@@ -13,10 +13,6 @@
#include <asm/page.h>
#include <asm/thread_info.h>
-#define PTE_INDX_MSK 0xffc
-#define PTE_INDX_SHIFT 10
-#define _PGDIR_SHIFT 22
-
.macro zero_fp
#ifdef CONFIG_STACKTRACE
movi r8, 0
@@ -41,108 +37,15 @@
#endif
.endm
-.macro tlbop_begin name, val0, val1, val2
-ENTRY(csky_\name)
- mtcr a3, ss2
- mtcr r6, ss3
- mtcr a2, ss4
-
- RD_PGDR r6
- RD_MEH a3
-#ifdef CONFIG_CPU_HAS_TLBI
- tlbi.vaas a3
- sync.is
-
- btsti a3, 31
- bf 1f
- RD_PGDR_K r6
-1:
-#else
- bgeni a2, 31
- WR_MCIR a2
- bgeni a2, 25
- WR_MCIR a2
-#endif
- bclri r6, 0
- lrw a2, va_pa_offset
- ld.w a2, (a2, 0)
- subu r6, a2
- bseti r6, 31
-
- mov a2, a3
- lsri a2, _PGDIR_SHIFT
- lsli a2, 2
- addu r6, a2
- ldw r6, (r6)
-
- lrw a2, va_pa_offset
- ld.w a2, (a2, 0)
- subu r6, a2
- bseti r6, 31
-
- lsri a3, PTE_INDX_SHIFT
- lrw a2, PTE_INDX_MSK
- and a3, a2
- addu r6, a3
- ldw a3, (r6)
-
- movi a2, (_PAGE_PRESENT | \val0)
- and a3, a2
- cmpne a3, a2
- bt \name
-
- /* First read/write the page, just update the flags */
- ldw a3, (r6)
- bgeni a2, PAGE_VALID_BIT
- bseti a2, PAGE_ACCESSED_BIT
- bseti a2, \val1
- bseti a2, \val2
- or a3, a2
- stw a3, (r6)
-
- /* Some cpu tlb-hardrefill bypass the cache */
-#ifdef CONFIG_CPU_NEED_TLBSYNC
- movi a2, 0x22
- bseti a2, 6
- mtcr r6, cr22
- mtcr a2, cr17
- sync
-#endif
-
- mfcr a3, ss2
- mfcr r6, ss3
- mfcr a2, ss4
- rte
-\name:
- mfcr a3, ss2
- mfcr r6, ss3
- mfcr a2, ss4
+.text
+ENTRY(csky_pagefault)
SAVE_ALL 0
-.endm
-.macro tlbop_end is_write
zero_fp
context_tracking
- RD_MEH a2
- psrset ee, ie
+ psrset ee
mov a0, sp
- movi a1, \is_write
jbsr do_page_fault
jmpi ret_from_exception
-.endm
-
-.text
-
-tlbop_begin tlbinvalidl, _PAGE_READ, PAGE_VALID_BIT, PAGE_ACCESSED_BIT
-tlbop_end 0
-
-tlbop_begin tlbinvalids, _PAGE_WRITE, PAGE_DIRTY_BIT, PAGE_MODIFIED_BIT
-tlbop_end 1
-
-tlbop_begin tlbmodified, _PAGE_WRITE, PAGE_DIRTY_BIT, PAGE_MODIFIED_BIT
-#ifndef CONFIG_CPU_HAS_LDSTEX
-jbsr csky_cmpxchg_fixup
-#endif
-tlbop_end 1
ENTRY(csky_systemcall)
SAVE_ALL TRAP0_SIZE
@@ -314,6 +217,9 @@ ENTRY(csky_trap)
ENTRY(csky_get_tls)
USPTOKSP
+ RD_MEH a0
+ WR_MEH a0
+
/* increase epc for continue */
mfcr a0, epc
addi a0, TRAP0_SIZE
diff --git a/arch/csky/kernel/head.S b/arch/csky/kernel/head.S
index 17ed9d250480..7e3e4f15b052 100644
--- a/arch/csky/kernel/head.S
+++ b/arch/csky/kernel/head.S
@@ -21,10 +21,16 @@ END(_start)
ENTRY(_start_smp_secondary)
SETUP_MMU
- /* copy msa1 from CPU0 */
- lrw r6, secondary_msa1
+#ifdef CONFIG_PAGE_OFFSET_80000000
+ lrw r6, secondary_msa1
ld.w r6, (r6, 0)
mtcr r6, cr<31, 15>
+#endif
+
+ lrw r6, secondary_pgd
+ ld.w r6, (r6, 0)
+ mtcr r6, cr<28, 15>
+ mtcr r6, cr<29, 15>
/* set stack point */
lrw r6, secondary_stack
diff --git a/arch/csky/kernel/perf_event.c b/arch/csky/kernel/perf_event.c
index 1a29f1157449..e5f18420ce64 100644
--- a/arch/csky/kernel/perf_event.c
+++ b/arch/csky/kernel/perf_event.c
@@ -87,7 +87,7 @@ static int csky_pmu_irq;
})
/* cycle counter */
-static uint64_t csky_pmu_read_cc(void)
+uint64_t csky_pmu_read_cc(void)
{
uint32_t lo, hi, tmp;
uint64_t result;
@@ -1319,7 +1319,7 @@ int csky_pmu_device_probe(struct platform_device *pdev,
pr_notice("[perf] PMU request irq fail!\n");
}
- ret = cpuhp_setup_state(CPUHP_AP_PERF_ONLINE, "AP_PERF_ONLINE",
+ ret = cpuhp_setup_state(CPUHP_AP_PERF_CSKY_ONLINE, "AP_PERF_ONLINE",
csky_pmu_starting_cpu,
csky_pmu_dying_cpu);
if (ret) {
diff --git a/arch/csky/kernel/probes/simulate-insn.c b/arch/csky/kernel/probes/simulate-insn.c
index 4e464fed52ec..d6e8d092c9b7 100644
--- a/arch/csky/kernel/probes/simulate-insn.c
+++ b/arch/csky/kernel/probes/simulate-insn.c
@@ -274,9 +274,9 @@ void __kprobes
simulate_bnezad32(u32 opcode, long addr, struct pt_regs *regs)
{
unsigned long tmp = opcode & 0x1f;
- unsigned long val;
+ long val;
- csky_insn_reg_get_val(regs, tmp, &val);
+ csky_insn_reg_get_val(regs, tmp, (unsigned long *)&val);
val -= 1;
@@ -286,7 +286,7 @@ simulate_bnezad32(u32 opcode, long addr, struct pt_regs *regs)
} else
instruction_pointer_set(regs, addr + 4);
- csky_insn_reg_set_val(regs, tmp, val);
+ csky_insn_reg_set_val(regs, tmp, (unsigned long)val);
}
void __kprobes
@@ -297,13 +297,11 @@ simulate_bhsz32(u32 opcode, long addr, struct pt_regs *regs)
csky_insn_reg_get_val(regs, tmp, &val);
- if (val >= 0) {
+ if ((long) val >= 0) {
instruction_pointer_set(regs,
addr + sign_extend32((opcode & 0xffff0000) >> 15, 15));
} else
instruction_pointer_set(regs, addr + 4);
-
- csky_insn_reg_set_val(regs, tmp, val);
}
void __kprobes
@@ -314,13 +312,11 @@ simulate_bhz32(u32 opcode, long addr, struct pt_regs *regs)
csky_insn_reg_get_val(regs, tmp, &val);
- if (val > 0) {
+ if ((long) val > 0) {
instruction_pointer_set(regs,
addr + sign_extend32((opcode & 0xffff0000) >> 15, 15));
} else
instruction_pointer_set(regs, addr + 4);
-
- csky_insn_reg_set_val(regs, tmp, val);
}
void __kprobes
@@ -331,13 +327,11 @@ simulate_blsz32(u32 opcode, long addr, struct pt_regs *regs)
csky_insn_reg_get_val(regs, tmp, &val);
- if (val <= 0) {
+ if ((long) val <= 0) {
instruction_pointer_set(regs,
addr + sign_extend32((opcode & 0xffff0000) >> 15, 15));
} else
instruction_pointer_set(regs, addr + 4);
-
- csky_insn_reg_set_val(regs, tmp, val);
}
void __kprobes
@@ -348,13 +342,11 @@ simulate_blz32(u32 opcode, long addr, struct pt_regs *regs)
csky_insn_reg_get_val(regs, tmp, &val);
- if (val < 0) {
+ if ((long) val < 0) {
instruction_pointer_set(regs,
addr + sign_extend32((opcode & 0xffff0000) >> 15, 15));
} else
instruction_pointer_set(regs, addr + 4);
-
- csky_insn_reg_set_val(regs, tmp, val);
}
void __kprobes
diff --git a/arch/csky/kernel/process.c b/arch/csky/kernel/process.c
index 69af6bc87e64..3d0ca22cd0e2 100644
--- a/arch/csky/kernel/process.c
+++ b/arch/csky/kernel/process.c
@@ -49,7 +49,7 @@ int copy_thread(unsigned long clone_flags,
/* setup thread.sp for switch_to !!! */
p->thread.sp = (unsigned long)childstack;
- if (unlikely(p->flags & PF_KTHREAD)) {
+ if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
memset(childregs, 0, sizeof(struct pt_regs));
childstack->r15 = (unsigned long) ret_from_kernel_thread;
childstack->r10 = kthread_arg;
diff --git a/arch/csky/kernel/ptrace.c b/arch/csky/kernel/ptrace.c
index d822144906ac..0105ac81b432 100644
--- a/arch/csky/kernel/ptrace.c
+++ b/arch/csky/kernel/ptrace.c
@@ -22,6 +22,7 @@
#include <asm/asm-offsets.h>
#include <abi/regdef.h>
+#include <abi/ckmmu.h>
#define CREATE_TRACE_POINTS
#include <trace/events/syscalls.h>
@@ -83,7 +84,7 @@ static int gpr_get(struct task_struct *target,
/* Abiv1 regs->tls is fake and we need sync here. */
regs->tls = task_thread_info(target)->tp_value;
- return membuf_write(&to, regs, sizeof(regs));
+ return membuf_write(&to, regs, sizeof(*regs));
}
static int gpr_set(struct task_struct *target,
@@ -343,6 +344,124 @@ asmlinkage void syscall_trace_exit(struct pt_regs *regs)
trace_sys_exit(regs, syscall_get_return_value(current, regs));
}
+#ifdef CONFIG_CPU_CK860
+static void show_iutlb(void)
+{
+ int entry, i;
+ unsigned long flags;
+ unsigned long oldpid;
+ unsigned long entryhi[16], entrylo0[16], entrylo1[16];
+
+ oldpid = read_mmu_entryhi();
+
+ entry = 0x8000;
+
+ local_irq_save(flags);
+
+ for (i = 0; i < 16; i++) {
+ write_mmu_index(entry);
+ tlb_read();
+ entryhi[i] = read_mmu_entryhi();
+ entrylo0[i] = read_mmu_entrylo0();
+ entrylo1[i] = read_mmu_entrylo1();
+
+ entry++;
+ }
+
+ local_irq_restore(flags);
+
+ write_mmu_entryhi(oldpid);
+
+ printk("\n\n\n");
+ for (i = 0; i < 16; i++)
+ printk("iutlb[%d]: entryhi - 0x%lx; entrylo0 - 0x%lx;"
+ " entrylo1 - 0x%lx\n",
+ i, entryhi[i], entrylo0[i], entrylo1[i]);
+ printk("\n\n\n");
+}
+
+static void show_dutlb(void)
+{
+ int entry, i;
+ unsigned long flags;
+ unsigned long oldpid;
+ unsigned long entryhi[16], entrylo0[16], entrylo1[16];
+
+ oldpid = read_mmu_entryhi();
+
+ entry = 0x4000;
+
+ local_irq_save(flags);
+
+ for (i = 0; i < 16; i++) {
+ write_mmu_index(entry);
+ tlb_read();
+ entryhi[i] = read_mmu_entryhi();
+ entrylo0[i] = read_mmu_entrylo0();
+ entrylo1[i] = read_mmu_entrylo1();
+
+ entry++;
+ }
+
+ local_irq_restore(flags);
+
+ write_mmu_entryhi(oldpid);
+
+ printk("\n\n\n");
+ for (i = 0; i < 16; i++)
+ printk("dutlb[%d]: entryhi - 0x%lx; entrylo0 - 0x%lx;"
+ " entrylo1 - 0x%lx\n",
+ i, entryhi[i], entrylo0[i], entrylo1[i]);
+ printk("\n\n\n");
+}
+
+static unsigned long entryhi[1024], entrylo0[1024], entrylo1[1024];
+static void show_jtlb(void)
+{
+ int entry;
+ unsigned long flags;
+ unsigned long oldpid;
+
+ oldpid = read_mmu_entryhi();
+
+ entry = 0;
+
+ local_irq_save(flags);
+ while (entry < 1024) {
+ write_mmu_index(entry);
+ tlb_read();
+ entryhi[entry] = read_mmu_entryhi();
+ entrylo0[entry] = read_mmu_entrylo0();
+ entrylo1[entry] = read_mmu_entrylo1();
+
+ entry++;
+ }
+ local_irq_restore(flags);
+
+ write_mmu_entryhi(oldpid);
+
+ printk("\n\n\n");
+
+ for (entry = 0; entry < 1024; entry++)
+ printk("jtlb[%x]: entryhi - 0x%lx; entrylo0 - 0x%lx;"
+ " entrylo1 - 0x%lx\n",
+ entry, entryhi[entry], entrylo0[entry], entrylo1[entry]);
+ printk("\n\n\n");
+}
+
+static void show_tlb(void)
+{
+ show_iutlb();
+ show_dutlb();
+ show_jtlb();
+}
+#else
+static void show_tlb(void)
+{
+ return;
+}
+#endif
+
void show_regs(struct pt_regs *fp)
{
pr_info("\nCURRENT PROCESS:\n\n");
@@ -363,9 +482,10 @@ void show_regs(struct pt_regs *fp)
pr_info("PC: 0x%08lx (%pS)\n", (long)fp->pc, (void *)fp->pc);
pr_info("LR: 0x%08lx (%pS)\n", (long)fp->lr, (void *)fp->lr);
- pr_info("SP: 0x%08lx\n", (long)fp);
- pr_info("orig_a0: 0x%08lx\n", fp->orig_a0);
+ pr_info("SP: 0x%08lx\n", (long)fp->usp);
pr_info("PSR: 0x%08lx\n", (long)fp->sr);
+ pr_info("orig_a0: 0x%08lx\n", fp->orig_a0);
+ pr_info("PT_REGS: 0x%08lx\n", (long)fp);
pr_info(" a0: 0x%08lx a1: 0x%08lx a2: 0x%08lx a3: 0x%08lx\n",
fp->a0, fp->a1, fp->a2, fp->a3);
@@ -395,5 +515,7 @@ void show_regs(struct pt_regs *fp)
fp->regs[8], fp->regs[9]);
#endif
+ show_tlb();
+
return;
}
diff --git a/arch/csky/kernel/setup.c b/arch/csky/kernel/setup.c
index e4cab16056d6..e93bc6f74432 100644
--- a/arch/csky/kernel/setup.c
+++ b/arch/csky/kernel/setup.c
@@ -45,13 +45,17 @@ static void __init csky_memblock_init(void)
if (size >= lowmem_size) {
max_low_pfn = min_low_pfn + lowmem_size;
+#ifdef CONFIG_PAGE_OFFSET_80000000
write_mmu_msa1(read_mmu_msa0() + SSEG_SIZE);
+#endif
} else if (size > sseg_size) {
max_low_pfn = min_low_pfn + sseg_size;
}
max_zone_pfn[ZONE_NORMAL] = max_low_pfn;
+ mmu_init(min_low_pfn, max_low_pfn);
+
#ifdef CONFIG_HIGHMEM
max_zone_pfn[ZONE_HIGHMEM] = max_pfn;
@@ -101,16 +105,26 @@ void __init setup_arch(char **cmdline_p)
unsigned long va_pa_offset;
EXPORT_SYMBOL(va_pa_offset);
+static inline unsigned long read_mmu_msa(void)
+{
+#ifdef CONFIG_PAGE_OFFSET_80000000
+ return read_mmu_msa0();
+#endif
+
+#ifdef CONFIG_PAGE_OFFSET_A0000000
+ return read_mmu_msa1();
+#endif
+}
+
asmlinkage __visible void __init csky_start(unsigned int unused,
void *dtb_start)
{
/* Clean up bss section */
memset(__bss_start, 0, __bss_stop - __bss_start);
- va_pa_offset = read_mmu_msa0() & ~(SSEG_SIZE - 1);
+ va_pa_offset = read_mmu_msa() & ~(SSEG_SIZE - 1);
pre_trap_init();
- pre_mmu_init();
if (dtb_start == NULL)
early_init_dt_scan(__dtb_start);
diff --git a/arch/csky/kernel/signal.c b/arch/csky/kernel/signal.c
index 37ea64ed3c12..312f046d452d 100644
--- a/arch/csky/kernel/signal.c
+++ b/arch/csky/kernel/signal.c
@@ -134,7 +134,6 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
{
struct rt_sigframe *frame;
int err = 0;
- struct csky_vdso *vdso = current->mm->context.vdso;
frame = get_sigframe(ksig, regs, sizeof(*frame));
if (!access_ok(frame, sizeof(*frame)))
@@ -152,7 +151,8 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
return -EFAULT;
/* Set up to return from userspace. */
- regs->lr = (unsigned long)(vdso->rt_signal_retcode);
+ regs->lr = (unsigned long)VDSO_SYMBOL(
+ current->mm->context.vdso, rt_sigreturn);
/*
* Set up registers for signal handler.
diff --git a/arch/csky/kernel/smp.c b/arch/csky/kernel/smp.c
index 041d0de6a1b6..0f9f5eef9338 100644
--- a/arch/csky/kernel/smp.c
+++ b/arch/csky/kernel/smp.c
@@ -203,8 +203,8 @@ volatile unsigned int secondary_hint;
volatile unsigned int secondary_hint2;
volatile unsigned int secondary_ccr;
volatile unsigned int secondary_stack;
-
-unsigned long secondary_msa1;
+volatile unsigned int secondary_msa1;
+volatile unsigned int secondary_pgd;
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
@@ -216,6 +216,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
secondary_hint2 = mfcr("cr<21, 1>");
secondary_ccr = mfcr("cr18");
secondary_msa1 = read_mmu_msa1();
+ secondary_pgd = mfcr("cr<29, 15>");
/*
* Because other CPUs are in reset status, we must flush data
@@ -262,8 +263,6 @@ void csky_start_secondary(void)
flush_tlb_all();
write_mmu_pagemask(0);
- TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir);
- TLBMISS_HANDLER_SETUP_PGD_KERNEL(swapper_pg_dir);
#ifdef CONFIG_CPU_HAS_FPU
init_fpu();
diff --git a/arch/csky/kernel/traps.c b/arch/csky/kernel/traps.c
index 959a917c989d..e5fbf8653a21 100644
--- a/arch/csky/kernel/traps.c
+++ b/arch/csky/kernel/traps.c
@@ -39,9 +39,7 @@ asmlinkage void csky_cmpxchg(void);
asmlinkage void csky_get_tls(void);
asmlinkage void csky_irq(void);
-asmlinkage void csky_tlbinvalidl(void);
-asmlinkage void csky_tlbinvalids(void);
-asmlinkage void csky_tlbmodified(void);
+asmlinkage void csky_pagefault(void);
/* Defined in head.S */
asmlinkage void _start_smp_secondary(void);
@@ -66,9 +64,9 @@ void __init trap_init(void)
VEC_INIT(VEC_TRAP3, csky_get_tls);
/* setup MMU TLB exception */
- VEC_INIT(VEC_TLBINVALIDL, csky_tlbinvalidl);
- VEC_INIT(VEC_TLBINVALIDS, csky_tlbinvalids);
- VEC_INIT(VEC_TLBMODIFIED, csky_tlbmodified);
+ VEC_INIT(VEC_TLBINVALIDL, csky_pagefault);
+ VEC_INIT(VEC_TLBINVALIDS, csky_pagefault);
+ VEC_INIT(VEC_TLBMODIFIED, csky_pagefault);
#ifdef CONFIG_CPU_HAS_FPU
init_fpu();
diff --git a/arch/csky/kernel/vdso.c b/arch/csky/kernel/vdso.c
index abc3dbc658d4..16c20d64d165 100644
--- a/arch/csky/kernel/vdso.c
+++ b/arch/csky/kernel/vdso.c
@@ -1,86 +1,107 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
-#include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/init.h>
#include <linux/binfmts.h>
#include <linux/elf.h>
-#include <linux/vmalloc.h>
-#include <linux/unistd.h>
-#include <linux/uaccess.h>
+#include <linux/err.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <asm/page.h>
+#ifdef GENERIC_TIME_VSYSCALL
+#include <vdso/datapage.h>
+#else
#include <asm/vdso.h>
-#include <asm/cacheflush.h>
+#endif
-static struct page *vdso_page;
+extern char vdso_start[], vdso_end[];
-static int __init init_vdso(void)
-{
- struct csky_vdso *vdso;
- int err = 0;
-
- vdso_page = alloc_page(GFP_KERNEL);
- if (!vdso_page)
- panic("Cannot allocate vdso");
+static unsigned int vdso_pages;
+static struct page **vdso_pagelist;
- vdso = vmap(&vdso_page, 1, 0, PAGE_KERNEL);
- if (!vdso)
- panic("Cannot map vdso");
+/*
+ * The vDSO data page.
+ */
+static union {
+ struct vdso_data data;
+ u8 page[PAGE_SIZE];
+} vdso_data_store __page_aligned_data;
+struct vdso_data *vdso_data = &vdso_data_store.data;
- clear_page(vdso);
-
- err = setup_vdso_page(vdso->rt_signal_retcode);
- if (err)
- panic("Cannot set signal return code, err: %x.", err);
+static int __init vdso_init(void)
+{
+ unsigned int i;
+
+ vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
+ vdso_pagelist =
+ kcalloc(vdso_pages + 1, sizeof(struct page *), GFP_KERNEL);
+ if (unlikely(vdso_pagelist == NULL)) {
+ pr_err("vdso: pagelist allocation failed\n");
+ return -ENOMEM;
+ }
- dcache_wb_range((unsigned long)vdso, (unsigned long)vdso + 16);
+ for (i = 0; i < vdso_pages; i++) {
+ struct page *pg;
- vunmap(vdso);
+ pg = virt_to_page(vdso_start + (i << PAGE_SHIFT));
+ vdso_pagelist[i] = pg;
+ }
+ vdso_pagelist[i] = virt_to_page(vdso_data);
return 0;
}
-subsys_initcall(init_vdso);
+arch_initcall(vdso_init);
-int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+int arch_setup_additional_pages(struct linux_binprm *bprm,
+ int uses_interp)
{
- int ret;
- unsigned long addr;
struct mm_struct *mm = current->mm;
+ unsigned long vdso_base, vdso_len;
+ int ret;
- mmap_write_lock(mm);
+ vdso_len = (vdso_pages + 1) << PAGE_SHIFT;
- addr = get_unmapped_area(NULL, STACK_TOP, PAGE_SIZE, 0, 0);
- if (IS_ERR_VALUE(addr)) {
- ret = addr;
- goto up_fail;
+ mmap_write_lock(mm);
+ vdso_base = get_unmapped_area(NULL, 0, vdso_len, 0, 0);
+ if (IS_ERR_VALUE(vdso_base)) {
+ ret = vdso_base;
+ goto end;
}
- ret = install_special_mapping(
- mm,
- addr,
- PAGE_SIZE,
- VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
- &vdso_page);
- if (ret)
- goto up_fail;
+ /*
+ * Put vDSO base into mm struct. We need to do this before calling
+ * install_special_mapping or the perf counter mmap tracking code
+ * will fail to recognise it as a vDSO (since arch_vma_name fails).
+ */
+ mm->context.vdso = (void *)vdso_base;
+
+ ret =
+ install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
+ (VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC),
+ vdso_pagelist);
+
+ if (unlikely(ret)) {
+ mm->context.vdso = NULL;
+ goto end;
+ }
- mm->context.vdso = (void *)addr;
+ vdso_base += (vdso_pages << PAGE_SHIFT);
+ ret = install_special_mapping(mm, vdso_base, PAGE_SIZE,
+ (VM_READ | VM_MAYREAD), &vdso_pagelist[vdso_pages]);
-up_fail:
+ if (unlikely(ret))
+ mm->context.vdso = NULL;
+end:
mmap_write_unlock(mm);
return ret;
}
const char *arch_vma_name(struct vm_area_struct *vma)
{
- if (vma->vm_mm == NULL)
- return NULL;
-
- if (vma->vm_start == (long)vma->vm_mm->context.vdso)
+ if (vma->vm_mm && (vma->vm_start == (long)vma->vm_mm->context.vdso))
return "[vdso]";
- else
- return NULL;
+ if (vma->vm_mm && (vma->vm_start ==
+ (long)vma->vm_mm->context.vdso + PAGE_SIZE))
+ return "[vdso_data]";
+ return NULL;
}
diff --git a/arch/arm/mach-efm32/Makefile b/arch/csky/kernel/vdso/.gitignore
index dede3fa55a76..3a19def868ec 100644
--- a/arch/arm/mach-efm32/Makefile
+++ b/arch/csky/kernel/vdso/.gitignore
@@ -1,2 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-y += dtmachine.o
+vdso.lds
+*.tmp
+vdso-syms.S
diff --git a/arch/csky/kernel/vdso/Makefile b/arch/csky/kernel/vdso/Makefile
new file mode 100644
index 000000000000..0b6909f10667
--- /dev/null
+++ b/arch/csky/kernel/vdso/Makefile
@@ -0,0 +1,72 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+# Absolute relocation type $(ARCH_REL_TYPE_ABS) needs to be defined before
+# the inclusion of generic Makefile.
+ARCH_REL_TYPE_ABS := R_CKCORE_ADDR32|R_CKCORE_JUMP_SLOT
+include $(srctree)/lib/vdso/Makefile
+
+# Symbols present in the vdso
+vdso-syms += rt_sigreturn
+vdso-syms += vgettimeofday
+
+# Files to link into the vdso
+obj-vdso = $(patsubst %, %.o, $(vdso-syms)) note.o
+
+ifneq ($(c-gettimeofday-y),)
+ CFLAGS_vgettimeofday.o += -include $(c-gettimeofday-y)
+endif
+
+ccflags-y := -fno-stack-protector -DBUILD_VDSO32
+
+# Build rules
+targets := $(obj-vdso) vdso.so vdso.so.dbg vdso.lds vdso-dummy.o
+obj-vdso := $(addprefix $(obj)/, $(obj-vdso))
+
+obj-y += vdso.o vdso-syms.o
+CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
+
+# Disable gcov profiling for VDSO code
+GCOV_PROFILE := n
+KCOV_INSTRUMENT := n
+
+# Force dependency
+$(obj)/vdso.o: $(obj)/vdso.so
+
+SYSCFLAGS_vdso.so.dbg = $(c_flags)
+$(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) FORCE
+ $(call if_changed,vdsold)
+SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \
+ -Wl,--build-id=sha1 -Wl,--hash-style=both
+
+$(obj)/vdso-syms.S: $(obj)/vdso.so FORCE
+ $(call if_changed,so2s)
+
+# strip rule for the .so file
+$(obj)/%.so: OBJCOPYFLAGS := -S
+$(obj)/%.so: $(obj)/%.so.dbg FORCE
+ $(call if_changed,objcopy)
+
+# actual build commands
+# The DSO images are built using a special linker script
+# Make sure only to export the intended __vdso_xxx symbol offsets.
+quiet_cmd_vdsold = VDSOLD $@
+ cmd_vdsold = $(CC) $(KBUILD_CFLAGS) $(call cc-option, -no-pie) -nostdlib -nostartfiles $(SYSCFLAGS_$(@F)) \
+ -Wl,-T,$(filter-out FORCE,$^) -o $@.tmp && \
+ $(CROSS_COMPILE)objcopy \
+ $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@ && \
+ rm $@.tmp
+
+# Extracts symbol offsets from the VDSO, converting them into an assembly file
+# that contains the same symbols at the same offsets.
+quiet_cmd_so2s = SO2S $@
+ cmd_so2s = $(NM) -D $< | $(srctree)/$(src)/so2s.sh > $@
+
+# install commands for the unstripped file
+quiet_cmd_vdso_install = INSTALL $@
+ cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
+
+vdso.so: $(obj)/vdso.so.dbg
+ @mkdir -p $(MODLIB)/vdso
+ $(call cmd,vdso_install)
+
+vdso_install: vdso.so
diff --git a/arch/csky/kernel/vdso/note.S b/arch/csky/kernel/vdso/note.S
new file mode 100644
index 000000000000..2a956c942211
--- /dev/null
+++ b/arch/csky/kernel/vdso/note.S
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text.
+ * Here we can supply some information useful to userland.
+ */
+
+#include <linux/elfnote.h>
+#include <linux/version.h>
+
+ELFNOTE_START(Linux, 0, "a")
+ .long LINUX_VERSION_CODE
+ELFNOTE_END
diff --git a/arch/csky/kernel/vdso/rt_sigreturn.S b/arch/csky/kernel/vdso/rt_sigreturn.S
new file mode 100644
index 000000000000..0a6bd1216118
--- /dev/null
+++ b/arch/csky/kernel/vdso/rt_sigreturn.S
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#include <linux/linkage.h>
+#include <asm/unistd.h>
+#include <abi/vdso.h>
+
+ .text
+ENTRY(__vdso_rt_sigreturn)
+ .cfi_startproc
+ .cfi_signal_frame
+ SET_SYSCALL_ID
+ trap 0
+ .cfi_endproc
+ENDPROC(__vdso_rt_sigreturn)
diff --git a/arch/csky/kernel/vdso/so2s.sh b/arch/csky/kernel/vdso/so2s.sh
new file mode 100755
index 000000000000..69da3d529c6d
--- /dev/null
+++ b/arch/csky/kernel/vdso/so2s.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
+
+sed 's!\([0-9a-f]*\) T \([a-z0-9_]*\)\(@@LINUX_5.10\)*!.global \2\n.set \2,0x\1!' \
+| grep '^\.'
diff --git a/arch/csky/kernel/vdso/vdso.S b/arch/csky/kernel/vdso/vdso.S
new file mode 100644
index 000000000000..5162ca069494
--- /dev/null
+++ b/arch/csky/kernel/vdso/vdso.S
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#include <linux/init.h>
+#include <linux/linkage.h>
+#include <asm/page.h>
+
+ __PAGE_ALIGNED_DATA
+
+ .globl vdso_start, vdso_end
+ .balign PAGE_SIZE
+vdso_start:
+ .incbin "arch/csky/kernel/vdso/vdso.so"
+ .balign PAGE_SIZE
+vdso_end:
+
+ .previous
diff --git a/arch/csky/kernel/vdso/vdso.lds.S b/arch/csky/kernel/vdso/vdso.lds.S
new file mode 100644
index 000000000000..590a6c79fff7
--- /dev/null
+++ b/arch/csky/kernel/vdso/vdso.lds.S
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#include <asm/page.h>
+
+OUTPUT_ARCH(csky)
+
+SECTIONS
+{
+ PROVIDE(_vdso_data = . + PAGE_SIZE);
+ . = SIZEOF_HEADERS;
+
+ .hash : { *(.hash) } :text
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+
+ .note : { *(.note.*) } :text :note
+ .dynamic : { *(.dynamic) } :text :dynamic
+
+ .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
+ .eh_frame : { KEEP (*(.eh_frame)) } :text
+
+ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
+
+ . = 0x800;
+ .text : { *(.text .text.*) } :text
+
+ .data : {
+ *(.got.plt) *(.got)
+ *(.data .data.* .gnu.linkonce.d.*)
+ *(.dynbss)
+ *(.bss .bss.* .gnu.linkonce.b.*)
+ }
+}
+
+PHDRS
+{
+ text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */
+ dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
+ note PT_NOTE FLAGS(4); /* PF_R */
+ eh_frame_hdr PT_GNU_EH_FRAME;
+}
+
+VERSION
+{
+ LINUX_5.10 {
+ global:
+ __vdso_rt_sigreturn;
+ __vdso_clock_gettime;
+ __vdso_clock_gettime64;
+ __vdso_gettimeofday;
+ __vdso_clock_getres;
+ local: *;
+ };
+}
diff --git a/arch/csky/kernel/vdso/vgettimeofday.c b/arch/csky/kernel/vdso/vgettimeofday.c
new file mode 100644
index 000000000000..da491832c098
--- /dev/null
+++ b/arch/csky/kernel/vdso/vgettimeofday.c
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/time.h>
+#include <linux/types.h>
+
+int __vdso_clock_gettime(clockid_t clock,
+ struct old_timespec32 *ts)
+{
+ return __cvdso_clock_gettime32(clock, ts);
+}
+
+int __vdso_clock_gettime64(clockid_t clock,
+ struct __kernel_timespec *ts)
+{
+ return __cvdso_clock_gettime(clock, ts);
+}
+
+int __vdso_gettimeofday(struct __kernel_old_timeval *tv,
+ struct timezone *tz)
+{
+ return __cvdso_gettimeofday(tv, tz);
+}
+
+int __vdso_clock_getres(clockid_t clock_id,
+ struct old_timespec32 *res)
+{
+ return __cvdso_clock_getres_time32(clock_id, res);
+}
diff --git a/arch/csky/kernel/vmlinux.lds.S b/arch/csky/kernel/vmlinux.lds.S
index f03033e17c29..e8b1a4a49798 100644
--- a/arch/csky/kernel/vmlinux.lds.S
+++ b/arch/csky/kernel/vmlinux.lds.S
@@ -33,6 +33,7 @@ SECTIONS
.text : AT(ADDR(.text) - LOAD_OFFSET) {
_text = .;
+ VBR_BASE
IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
TEXT_TEXT
@@ -104,7 +105,6 @@ SECTIONS
EXCEPTION_TABLE(L1_CACHE_BYTES)
BSS_SECTION(L1_CACHE_BYTES, PAGE_SIZE, L1_CACHE_BYTES)
- VBR_BASE
_end = . ;
STABS_DEBUG
diff --git a/arch/csky/mm/fault.c b/arch/csky/mm/fault.c
index 081b178b41b1..1482de56f4f7 100644
--- a/arch/csky/mm/fault.c
+++ b/arch/csky/mm/fault.c
@@ -1,29 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
-#include <linux/signal.h>
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/ptrace.h>
-#include <linux/mman.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/version.h>
-#include <linux/vt_kern.h>
#include <linux/extable.h>
-#include <linux/uaccess.h>
-#include <linux/perf_event.h>
#include <linux/kprobes.h>
-
-#include <asm/hardirq.h>
-#include <asm/mmu_context.h>
-#include <asm/traps.h>
-#include <asm/page.h>
+#include <linux/mmu_context.h>
+#include <linux/perf_event.h>
int fixup_exception(struct pt_regs *regs)
{
@@ -39,180 +20,287 @@ int fixup_exception(struct pt_regs *regs)
return 0;
}
-/*
- * This routine handles page faults. It determines the address,
- * and the problem, and then passes it off to one of the appropriate
- * routines.
- */
-asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
- unsigned long mmu_meh)
+static inline bool is_write(struct pt_regs *regs)
{
- struct vm_area_struct *vma = NULL;
- struct task_struct *tsk = current;
- struct mm_struct *mm = tsk->mm;
- int si_code;
- int fault;
- unsigned long address = mmu_meh & PAGE_MASK;
+ switch (trap_no(regs)) {
+ case VEC_TLBINVALIDS:
+ return true;
+ case VEC_TLBMODIFIED:
+ return true;
+ }
- if (kprobe_page_fault(regs, tsk->thread.trap_no))
+ return false;
+}
+
+#ifdef CONFIG_CPU_HAS_LDSTEX
+static inline void csky_cmpxchg_fixup(struct pt_regs *regs)
+{
+ return;
+}
+#else
+extern unsigned long csky_cmpxchg_ldw;
+extern unsigned long csky_cmpxchg_stw;
+static inline void csky_cmpxchg_fixup(struct pt_regs *regs)
+{
+ if (trap_no(regs) != VEC_TLBMODIFIED)
return;
- si_code = SEGV_MAPERR;
+ if (instruction_pointer(regs) == csky_cmpxchg_stw)
+ instruction_pointer_set(regs, csky_cmpxchg_ldw);
+ return;
+}
+#endif
+
+static inline void no_context(struct pt_regs *regs, unsigned long addr)
+{
+ current->thread.trap_no = trap_no(regs);
+
+ /* Are we prepared to handle this kernel fault? */
+ if (fixup_exception(regs))
+ return;
-#ifndef CONFIG_CPU_HAS_TLBI
/*
- * We fault-in kernel-space virtual memory on-demand. The
- * 'reference' page table is init_mm.pgd.
- *
- * NOTE! We MUST NOT take any locks for this case. We may
- * be in an interrupt or a critical region, and should
- * only copy the information from the master page table,
- * nothing more.
+ * Oops. The kernel tried to access some bad page. We'll have to
+ * terminate things with extreme prejudice.
*/
- if (unlikely(address >= VMALLOC_START) &&
- unlikely(address <= VMALLOC_END)) {
- /*
- * Synchronize this task's top level page-table
- * with the 'reference' page table.
- *
- * Do _not_ use "tsk" here. We might be inside
- * an interrupt in the middle of a task switch..
- */
- int offset = pgd_index(address);
- pgd_t *pgd, *pgd_k;
- pud_t *pud, *pud_k;
- pmd_t *pmd, *pmd_k;
- pte_t *pte_k;
+ bust_spinlocks(1);
+ pr_alert("Unable to handle kernel paging request at virtual "
+ "addr 0x%08lx, pc: 0x%08lx\n", addr, regs->pc);
+ die(regs, "Oops");
+ do_exit(SIGKILL);
+}
- unsigned long pgd_base;
+static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_fault_t fault)
+{
+ current->thread.trap_no = trap_no(regs);
- pgd_base = (unsigned long)__va(get_pgd());
- pgd = (pgd_t *)pgd_base + offset;
- pgd_k = init_mm.pgd + offset;
+ if (fault & VM_FAULT_OOM) {
+ /*
+ * We ran out of memory, call the OOM killer, and return the userspace
+ * (which will retry the fault, or kill us if we got oom-killed).
+ */
+ if (!user_mode(regs)) {
+ no_context(regs, addr);
+ return;
+ }
+ pagefault_out_of_memory();
+ return;
+ } else if (fault & VM_FAULT_SIGBUS) {
+ /* Kernel mode? Handle exceptions or die */
+ if (!user_mode(regs)) {
+ no_context(regs, addr);
+ return;
+ }
+ do_trap(regs, SIGBUS, BUS_ADRERR, addr);
+ return;
+ }
+ BUG();
+}
- if (!pgd_present(*pgd_k))
- goto no_context;
- set_pgd(pgd, *pgd_k);
+static inline void bad_area(struct pt_regs *regs, struct mm_struct *mm, int code, unsigned long addr)
+{
+ /*
+ * Something tried to access memory that isn't in our memory map.
+ * Fix it, but check if it's kernel or user first.
+ */
+ mmap_read_unlock(mm);
+ /* User mode accesses just cause a SIGSEGV */
+ if (user_mode(regs)) {
+ do_trap(regs, SIGSEGV, code, addr);
+ return;
+ }
- pud = (pud_t *)pgd;
- pud_k = (pud_t *)pgd_k;
- if (!pud_present(*pud_k))
- goto no_context;
+ no_context(regs, addr);
+}
- pmd = pmd_offset(pud, address);
- pmd_k = pmd_offset(pud_k, address);
- if (!pmd_present(*pmd_k))
- goto no_context;
- set_pmd(pmd, *pmd_k);
+static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long addr)
+{
+ pgd_t *pgd, *pgd_k;
+ pud_t *pud, *pud_k;
+ pmd_t *pmd, *pmd_k;
+ pte_t *pte_k;
+ int offset;
- pte_k = pte_offset_kernel(pmd_k, address);
- if (!pte_present(*pte_k))
- goto no_context;
+ /* User mode accesses just cause a SIGSEGV */
+ if (user_mode(regs)) {
+ do_trap(regs, SIGSEGV, code, addr);
return;
}
-#endif
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
/*
- * If we're in an interrupt or have no user
- * context, we must not take the fault..
+ * Synchronize this task's top level page-table
+ * with the 'reference' page table.
+ *
+ * Do _not_ use "tsk" here. We might be inside
+ * an interrupt in the middle of a task switch..
*/
- if (in_atomic() || !mm)
- goto bad_area_nosemaphore;
+ offset = pgd_index(addr);
- mmap_read_lock(mm);
- vma = find_vma(mm, address);
- if (!vma)
- goto bad_area;
- if (vma->vm_start <= address)
- goto good_area;
- if (!(vma->vm_flags & VM_GROWSDOWN))
- goto bad_area;
- if (expand_stack(vma, address))
- goto bad_area;
- /*
- * Ok, we have a good vm_area for this memory access, so
- * we can handle it..
- */
-good_area:
- si_code = SEGV_ACCERR;
+ pgd = get_pgd() + offset;
+ pgd_k = init_mm.pgd + offset;
- if (write) {
+ if (!pgd_present(*pgd_k)) {
+ no_context(regs, addr);
+ return;
+ }
+ set_pgd(pgd, *pgd_k);
+
+ pud = (pud_t *)pgd;
+ pud_k = (pud_t *)pgd_k;
+ if (!pud_present(*pud_k)) {
+ no_context(regs, addr);
+ return;
+ }
+
+ pmd = pmd_offset(pud, addr);
+ pmd_k = pmd_offset(pud_k, addr);
+ if (!pmd_present(*pmd_k)) {
+ no_context(regs, addr);
+ return;
+ }
+ set_pmd(pmd, *pmd_k);
+
+ pte_k = pte_offset_kernel(pmd_k, addr);
+ if (!pte_present(*pte_k)) {
+ no_context(regs, addr);
+ return;
+ }
+
+ flush_tlb_one(addr);
+}
+
+static inline bool access_error(struct pt_regs *regs, struct vm_area_struct *vma)
+{
+ if (is_write(regs)) {
if (!(vma->vm_flags & VM_WRITE))
- goto bad_area;
+ return true;
} else {
if (unlikely(!vma_is_accessible(vma)))
- goto bad_area;
+ return true;
}
+ return false;
+}
+
+/*
+ * This routine handles page faults. It determines the address and the
+ * problem, and then passes it off to one of the appropriate routines.
+ */
+asmlinkage void do_page_fault(struct pt_regs *regs)
+{
+ struct task_struct *tsk;
+ struct vm_area_struct *vma;
+ struct mm_struct *mm;
+ unsigned long addr = read_mmu_entryhi() & PAGE_MASK;
+ unsigned int flags = FAULT_FLAG_DEFAULT;
+ int code = SEGV_MAPERR;
+ vm_fault_t fault;
+
+ tsk = current;
+ mm = tsk->mm;
+
+ csky_cmpxchg_fixup(regs);
+
+ if (kprobe_page_fault(regs, tsk->thread.trap_no))
+ return;
/*
- * If for any reason at all we couldn't handle the fault,
- * make sure we exit gracefully rather than endlessly redo
- * the fault.
+ * Fault-in kernel-space virtual memory on-demand.
+ * The 'reference' page table is init_mm.pgd.
+ *
+ * NOTE! We MUST NOT take any locks for this case. We may
+ * be in an interrupt or a critical region, and should
+ * only copy the information from the master page table,
+ * nothing more.
*/
- fault = handle_mm_fault(vma, address, write ? FAULT_FLAG_WRITE : 0,
- regs);
- if (unlikely(fault & VM_FAULT_ERROR)) {
- if (fault & VM_FAULT_OOM)
- goto out_of_memory;
- else if (fault & VM_FAULT_SIGBUS)
- goto do_sigbus;
- else if (fault & VM_FAULT_SIGSEGV)
- goto bad_area;
- BUG();
+ if (unlikely((addr >= VMALLOC_START) && (addr <= VMALLOC_END))) {
+ vmalloc_fault(regs, code, addr);
+ return;
}
- mmap_read_unlock(mm);
- return;
+
+ /* Enable interrupts if they were enabled in the parent context. */
+ if (likely(regs->sr & BIT(6)))
+ local_irq_enable();
/*
- * Something tried to access memory that isn't in our memory map..
- * Fix it, but check if it's kernel or user first..
+ * If we're in an interrupt, have no user context, or are running
+ * in an atomic region, then we must not take the fault.
*/
-bad_area:
- mmap_read_unlock(mm);
-
-bad_area_nosemaphore:
- /* User mode accesses just cause a SIGSEGV */
- if (user_mode(regs)) {
- tsk->thread.trap_no = trap_no(regs);
- force_sig_fault(SIGSEGV, si_code, (void __user *)address);
+ if (unlikely(faulthandler_disabled() || !mm)) {
+ no_context(regs, addr);
return;
}
-no_context:
- tsk->thread.trap_no = trap_no(regs);
+ if (user_mode(regs))
+ flags |= FAULT_FLAG_USER;
- /* Are we prepared to handle this kernel fault? */
- if (fixup_exception(regs))
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
+
+ if (is_write(regs))
+ flags |= FAULT_FLAG_WRITE;
+retry:
+ mmap_read_lock(mm);
+ vma = find_vma(mm, addr);
+ if (unlikely(!vma)) {
+ bad_area(regs, mm, code, addr);
+ return;
+ }
+ if (likely(vma->vm_start <= addr))
+ goto good_area;
+ if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
+ bad_area(regs, mm, code, addr);
return;
+ }
+ if (unlikely(expand_stack(vma, addr))) {
+ bad_area(regs, mm, code, addr);
+ return;
+ }
/*
- * Oops. The kernel tried to access some bad page. We'll have to
- * terminate things with extreme prejudice.
+ * Ok, we have a good vm_area for this memory access, so
+ * we can handle it.
*/
- bust_spinlocks(1);
- pr_alert("Unable to handle kernel paging request at virtual "
- "address 0x%08lx, pc: 0x%08lx\n", address, regs->pc);
- die(regs, "Oops");
+good_area:
+ code = SEGV_ACCERR;
+
+ if (unlikely(access_error(regs, vma))) {
+ bad_area(regs, mm, code, addr);
+ return;
+ }
-out_of_memory:
- tsk->thread.trap_no = trap_no(regs);
+ /*
+ * If for any reason at all we could not handle the fault,
+ * make sure we exit gracefully rather than endlessly redo
+ * the fault.
+ */
+ fault = handle_mm_fault(vma, addr, flags, regs);
/*
- * We ran out of memory, call the OOM killer, and return the userspace
- * (which will retry the fault, or kill us if we got oom-killed).
+ * If we need to retry but a fatal signal is pending, handle the
+ * signal first. We do not need to release the mmap_lock because it
+ * would already be released in __lock_page_or_retry in mm/filemap.c.
*/
- pagefault_out_of_memory();
- return;
+ if (fault_signal_pending(fault, regs)) {
+ if (!user_mode(regs))
+ no_context(regs, addr);
+ return;
+ }
-do_sigbus:
- tsk->thread.trap_no = trap_no(regs);
+ if (unlikely((fault & VM_FAULT_RETRY) && (flags & FAULT_FLAG_ALLOW_RETRY))) {
+ flags |= FAULT_FLAG_TRIED;
- mmap_read_unlock(mm);
+ /*
+ * No need to mmap_read_unlock(mm) as we would
+ * have already released it in __lock_page_or_retry
+ * in mm/filemap.c.
+ */
+ goto retry;
+ }
- /* Kernel mode? Handle exceptions or die */
- if (!user_mode(regs))
- goto no_context;
+ mmap_read_unlock(mm);
- force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address);
+ if (unlikely(fault & VM_FAULT_ERROR)) {
+ mm_fault_error(regs, addr, fault);
+ return;
+ }
+ return;
}
diff --git a/arch/csky/mm/init.c b/arch/csky/mm/init.c
index af627128314f..894050a8ce09 100644
--- a/arch/csky/mm/init.c
+++ b/arch/csky/mm/init.c
@@ -28,9 +28,15 @@
#include <asm/mmu_context.h>
#include <asm/sections.h>
#include <asm/tlb.h>
+#include <asm/cacheflush.h>
+
+#define PTRS_KERN_TABLE \
+ ((PTRS_PER_PGD - USER_PTRS_PER_PGD) * PTRS_PER_PTE)
pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
+pte_t kernel_pte_tables[PTRS_KERN_TABLE] __page_aligned_bss;
+
EXPORT_SYMBOL(invalid_pte_table);
unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
__page_aligned_bss;
@@ -80,9 +86,9 @@ void __init mem_init(void)
#ifdef CONFIG_HIGHMEM
unsigned long tmp;
- max_mapnr = highend_pfn;
+ set_max_mapnr(highend_pfn - ARCH_PFN_OFFSET);
#else
- max_mapnr = max_low_pfn;
+ set_max_mapnr(max_low_pfn - ARCH_PFN_OFFSET);
#endif
high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
@@ -104,24 +110,9 @@ void __init mem_init(void)
mem_init_print_info(NULL);
}
-extern char __init_begin[], __init_end[];
-
void free_initmem(void)
{
- unsigned long addr;
-
- addr = (unsigned long) &__init_begin;
-
- while (addr < (unsigned long) &__init_end) {
- ClearPageReserved(virt_to_page(addr));
- init_page_count(virt_to_page(addr));
- free_page(addr);
- totalram_pages_inc();
- addr += PAGE_SIZE;
- }
-
- pr_info("Freeing unused kernel memory: %dk freed\n",
- ((unsigned int)&__init_end - (unsigned int)&__init_begin) >> 10);
+ free_initmem_default(-1);
}
void pgd_init(unsigned long *p)
@@ -130,20 +121,35 @@ void pgd_init(unsigned long *p)
for (i = 0; i < PTRS_PER_PGD; i++)
p[i] = __pa(invalid_pte_table);
+
+ flush_tlb_all();
+ local_icache_inv_all(NULL);
}
-void __init pre_mmu_init(void)
+void __init mmu_init(unsigned long min_pfn, unsigned long max_pfn)
{
- /*
- * Setup page-table and enable TLB-hardrefill
- */
+ int i;
+
+ for (i = 0; i < USER_PTRS_PER_PGD; i++)
+ swapper_pg_dir[i].pgd = __pa(invalid_pte_table);
+
+ for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++)
+ swapper_pg_dir[i].pgd =
+ __pa(kernel_pte_tables + (PTRS_PER_PTE * (i - USER_PTRS_PER_PGD)));
+
+ for (i = 0; i < PTRS_KERN_TABLE; i++)
+ set_pte(&kernel_pte_tables[i], __pte(_PAGE_GLOBAL));
+
+ for (i = min_pfn; i < max_pfn; i++)
+ set_pte(&kernel_pte_tables[i - PFN_DOWN(va_pa_offset)], pfn_pte(i, PAGE_KERNEL));
+
flush_tlb_all();
- pgd_init((unsigned long *)swapper_pg_dir);
- TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir);
- TLBMISS_HANDLER_SETUP_PGD_KERNEL(swapper_pg_dir);
+ local_icache_inv_all(NULL);
/* Setup page mask to 4k */
write_mmu_pagemask(0);
+
+ setup_pgd(swapper_pg_dir, 0);
}
void __init fixrange_init(unsigned long start, unsigned long end,
diff --git a/arch/csky/mm/tlb.c b/arch/csky/mm/tlb.c
index ed1512381112..9234c5e5ceaf 100644
--- a/arch/csky/mm/tlb.c
+++ b/arch/csky/mm/tlb.c
@@ -24,7 +24,13 @@ void flush_tlb_all(void)
void flush_tlb_mm(struct mm_struct *mm)
{
#ifdef CONFIG_CPU_HAS_TLBI
- asm volatile("tlbi.asids %0"::"r"(cpu_asid(mm)));
+ sync_is();
+ asm volatile(
+ "tlbi.asids %0 \n"
+ "sync.i \n"
+ :
+ : "r" (cpu_asid(mm))
+ : "memory");
#else
tlb_invalid_all();
#endif
@@ -53,11 +59,17 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
end &= TLB_ENTRY_SIZE_MASK;
#ifdef CONFIG_CPU_HAS_TLBI
+ sync_is();
while (start < end) {
- asm volatile("tlbi.vas %0"::"r"(start | newpid));
+ asm volatile(
+ "tlbi.vas %0 \n"
+ :
+ : "r" (start | newpid)
+ : "memory");
+
start += 2*PAGE_SIZE;
}
- sync_is();
+ asm volatile("sync.i\n");
#else
{
unsigned long flags, oldpid;
@@ -87,11 +99,17 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
end &= TLB_ENTRY_SIZE_MASK;
#ifdef CONFIG_CPU_HAS_TLBI
+ sync_is();
while (start < end) {
- asm volatile("tlbi.vaas %0"::"r"(start));
+ asm volatile(
+ "tlbi.vaas %0 \n"
+ :
+ : "r" (start)
+ : "memory");
+
start += 2*PAGE_SIZE;
}
- sync_is();
+ asm volatile("sync.i\n");
#else
{
unsigned long flags, oldpid;
@@ -121,8 +139,13 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
addr &= TLB_ENTRY_SIZE_MASK;
#ifdef CONFIG_CPU_HAS_TLBI
- asm volatile("tlbi.vas %0"::"r"(addr | newpid));
sync_is();
+ asm volatile(
+ "tlbi.vas %0 \n"
+ "sync.i \n"
+ :
+ : "r" (addr | newpid)
+ : "memory");
#else
{
int oldpid, idx;
@@ -147,8 +170,13 @@ void flush_tlb_one(unsigned long addr)
addr &= TLB_ENTRY_SIZE_MASK;
#ifdef CONFIG_CPU_HAS_TLBI
- asm volatile("tlbi.vaas %0"::"r"(addr));
sync_is();
+ asm volatile(
+ "tlbi.vaas %0 \n"
+ "sync.i \n"
+ :
+ : "r" (addr)
+ : "memory");
#else
{
int oldpid, idx;
diff --git a/arch/h8300/kernel/asm-offsets.c b/arch/h8300/kernel/asm-offsets.c
index 85e60509f0a8..d4b53af657c8 100644
--- a/arch/h8300/kernel/asm-offsets.c
+++ b/arch/h8300/kernel/asm-offsets.c
@@ -63,6 +63,9 @@ int main(void)
OFFSET(TI_FLAGS, thread_info, flags);
OFFSET(TI_CPU, thread_info, cpu);
OFFSET(TI_PRE, thread_info, preempt_count);
+#ifdef CONFIG_PREEMPTION
+ DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count));
+#endif
return 0;
}
diff --git a/arch/h8300/kernel/process.c b/arch/h8300/kernel/process.c
index bc1364db58fe..46b1342ce515 100644
--- a/arch/h8300/kernel/process.c
+++ b/arch/h8300/kernel/process.c
@@ -112,7 +112,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
childregs = (struct pt_regs *) (THREAD_SIZE + task_stack_page(p)) - 1;
- if (unlikely(p->flags & PF_KTHREAD)) {
+ if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
memset(childregs, 0, sizeof(struct pt_regs));
childregs->retpc = (unsigned long) ret_from_kernel_thread;
childregs->er4 = topstk; /* arg */
diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig
index 6e00c16a36b5..44a409967af1 100644
--- a/arch/hexagon/Kconfig
+++ b/arch/hexagon/Kconfig
@@ -7,7 +7,6 @@ config HEXAGON
select ARCH_32BIT_OFF_T
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select ARCH_NO_PREEMPT
- select HAVE_OPROFILE
# Other pending projects/to-do items.
# select HAVE_REGS_AND_STACK_ACCESS_API
# select HAVE_HW_BREAKPOINT if PERF_EVENTS
diff --git a/arch/hexagon/configs/comet_defconfig b/arch/hexagon/configs/comet_defconfig
index e324f65f41e7..f19ae2ab0aaa 100644
--- a/arch/hexagon/configs/comet_defconfig
+++ b/arch/hexagon/configs/comet_defconfig
@@ -1,7 +1,6 @@
CONFIG_SMP=y
CONFIG_DEFAULT_MMAP_MIN_ADDR=0
CONFIG_HZ_100=y
-CONFIG_EXPERIMENTAL=y
CONFIG_CROSS_COMPILE="hexagon-"
CONFIG_LOCALVERSION="-smp"
# CONFIG_LOCALVERSION_AUTO is not set
diff --git a/arch/hexagon/kernel/process.c b/arch/hexagon/kernel/process.c
index 6a980cba7b29..c61165c99ae0 100644
--- a/arch/hexagon/kernel/process.c
+++ b/arch/hexagon/kernel/process.c
@@ -73,7 +73,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg,
sizeof(*ss));
ss->lr = (unsigned long)ret_from_fork;
p->thread.switch_sp = ss;
- if (unlikely(p->flags & PF_KTHREAD)) {
+ if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
memset(childregs, 0, sizeof(struct pt_regs));
/* r24 <- fn, r25 <- arg */
ss->r24 = usp;
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index eed59ec32657..2ad7a8d29fcc 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -24,7 +24,6 @@ config IA64
select HAVE_UNSTABLE_SCHED_CLOCK
select HAVE_EXIT_THREAD
select HAVE_IDE
- select HAVE_OPROFILE
select HAVE_KPROBES
select HAVE_KRETPROBES
select HAVE_FTRACE_MCOUNT_RECORD
diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
index 703b1c4f6d12..467b7e7f967c 100644
--- a/arch/ia64/Makefile
+++ b/arch/ia64/Makefile
@@ -14,7 +14,6 @@
KBUILD_DEFCONFIG := generic_defconfig
NM := $(CROSS_COMPILE)nm -B
-READELF := $(CROSS_COMPILE)readelf
CHECKFLAGS += -D__ia64=1 -D__ia64__=1 -D_LP64 -D__LP64__
@@ -52,7 +51,6 @@ core-y += arch/ia64/kernel/ arch/ia64/mm/
core-$(CONFIG_IA64_SGI_UV) += arch/ia64/uv/
drivers-y += arch/ia64/pci/ arch/ia64/hp/common/
-drivers-$(CONFIG_OPROFILE) += arch/ia64/oprofile/
PHONY += compressed check
@@ -69,7 +67,7 @@ vmlinux.bin: vmlinux FORCE
$(call if_changed,objcopy)
unwcheck: vmlinux
- -$(Q)READELF=$(READELF) $(PYTHON) $(srctree)/arch/ia64/scripts/unwcheck.py $<
+ -$(Q)READELF=$(READELF) $(PYTHON3) $(srctree)/arch/ia64/scripts/unwcheck.py $<
archclean:
@@ -86,9 +84,3 @@ define archhelp
echo ' install - Install compressed kernel image'
echo '* unwcheck - Check vmlinux for invalid unwind info'
endef
-
-archprepare: make_nr_irqs_h
-PHONY += make_nr_irqs_h
-
-make_nr_irqs_h:
- $(Q)$(MAKE) $(build)=arch/ia64/kernel include/generated/nr-irqs.h
diff --git a/arch/ia64/configs/bigsur_defconfig b/arch/ia64/configs/bigsur_defconfig
index cfed5ed89301..c409756b5396 100644
--- a/arch/ia64/configs/bigsur_defconfig
+++ b/arch/ia64/configs/bigsur_defconfig
@@ -2,7 +2,6 @@ CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_LOG_BUF_SHIFT=16
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_PARTITION_ADVANCED=y
diff --git a/arch/ia64/include/asm/efi.h b/arch/ia64/include/asm/efi.h
new file mode 100644
index 000000000000..6a4a50d8f19a
--- /dev/null
+++ b/arch/ia64/include/asm/efi.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_EFI_H
+#define _ASM_EFI_H
+
+typedef int (*efi_freemem_callback_t) (u64 start, u64 end, void *arg);
+
+void *efi_get_pal_addr(void);
+void efi_map_pal_code(void);
+void efi_memmap_walk(efi_freemem_callback_t, void *);
+void efi_memmap_walk_uc(efi_freemem_callback_t, void *);
+void efi_gettimeofday(struct timespec64 *ts);
+
+#endif
diff --git a/arch/ia64/include/asm/hw_irq.h b/arch/ia64/include/asm/hw_irq.h
index f6ff95b4ecb1..5d267132f8cb 100644
--- a/arch/ia64/include/asm/hw_irq.h
+++ b/arch/ia64/include/asm/hw_irq.h
@@ -69,7 +69,6 @@ extern int ia64_last_device_vector;
#define IA64_NUM_DEVICE_VECTORS (IA64_LAST_DEVICE_VECTOR - IA64_FIRST_DEVICE_VECTOR + 1)
#define IA64_MCA_RENDEZ_VECTOR 0xe8 /* MCA rendez interrupt */
-#define IA64_PERFMON_VECTOR 0xee /* performance monitor interrupt vector */
#define IA64_TIMER_VECTOR 0xef /* use highest-prio group 15 interrupt for timer */
#define IA64_MCA_WAKEUP_VECTOR 0xf0 /* MCA wakeup (must be >MCA_RENDEZ_VECTOR) */
#define IA64_IPI_LOCAL_TLB_FLUSH 0xfc /* SMP flush local TLB */
diff --git a/arch/ia64/include/asm/irq.h b/arch/ia64/include/asm/irq.h
index 5acf52e90872..0eccf33dfe8b 100644
--- a/arch/ia64/include/asm/irq.h
+++ b/arch/ia64/include/asm/irq.h
@@ -14,7 +14,9 @@
#include <linux/types.h>
#include <linux/cpumask.h>
-#include <generated/nr-irqs.h>
+#include <asm/native/irq.h>
+
+#define NR_IRQS IA64_NATIVE_NR_IRQS
static __inline__ int
irq_canonicalize (int irq)
diff --git a/arch/ia64/include/asm/mca.h b/arch/ia64/include/asm/mca.h
index 726df17f1b51..05805249296c 100644
--- a/arch/ia64/include/asm/mca.h
+++ b/arch/ia64/include/asm/mca.h
@@ -14,13 +14,10 @@
#if !defined(__ASSEMBLY__)
-#include <linux/interrupt.h>
+#include <linux/percpu.h>
+#include <linux/threads.h>
#include <linux/types.h>
-
-#include <asm/param.h>
-#include <asm/sal.h>
-#include <asm/processor.h>
-#include <asm/mca_asm.h>
+#include <asm/ptrace.h>
#define IA64_MCA_RENDEZ_TIMEOUT (20 * 1000) /* value in milliseconds - 20 seconds */
@@ -83,7 +80,7 @@ struct ia64_sal_os_state {
/* common */
unsigned long sal_ra; /* Return address in SAL, physical */
unsigned long sal_gp; /* GP of the SAL - physical */
- pal_min_state_area_t *pal_min_state; /* from R17. physical in asm, virtual in C */
+ struct pal_min_state_area *pal_min_state; /* from R17. physical in asm, virtual in C */
/* Previous values of IA64_KR(CURRENT) and IA64_KR(CURRENT_STACK).
* Note: if the MCA/INIT recovery code wants to resume to a new context
* then it must change these values to reflect the new kernel stack.
diff --git a/arch/ia64/include/asm/pal.h b/arch/ia64/include/asm/pal.h
index f9d2b3b2dfad..b1d87955e8cc 100644
--- a/arch/ia64/include/asm/pal.h
+++ b/arch/ia64/include/asm/pal.h
@@ -750,7 +750,7 @@ typedef union pal_mc_error_info_u {
* for PAL.
*/
-typedef struct pal_min_state_area_s {
+struct pal_min_state_area {
u64 pmsa_nat_bits; /* nat bits for saved GRs */
u64 pmsa_gr[15]; /* GR1 - GR15 */
u64 pmsa_bank0_gr[16]; /* GR16 - GR31 */
@@ -766,7 +766,7 @@ typedef struct pal_min_state_area_s {
u64 pmsa_xfs; /* previous ifs */
u64 pmsa_br1; /* branch register 1 */
u64 pmsa_reserved[70]; /* pal_min_state_area should total to 1KB */
-} pal_min_state_area_t;
+};
struct ia64_pal_retval {
diff --git a/arch/ia64/include/asm/perfmon.h b/arch/ia64/include/asm/perfmon.h
deleted file mode 100644
index e0545869cc8c..000000000000
--- a/arch/ia64/include/asm/perfmon.h
+++ /dev/null
@@ -1,111 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2001-2003 Hewlett-Packard Co
- * Stephane Eranian <eranian@hpl.hp.com>
- */
-#ifndef _ASM_IA64_PERFMON_H
-#define _ASM_IA64_PERFMON_H
-
-#include <uapi/asm/perfmon.h>
-
-
-extern long perfmonctl(int fd, int cmd, void *arg, int narg);
-
-typedef struct {
- void (*handler)(int irq, void *arg, struct pt_regs *regs);
-} pfm_intr_handler_desc_t;
-
-extern void pfm_save_regs (struct task_struct *);
-extern void pfm_load_regs (struct task_struct *);
-
-extern void pfm_exit_thread(struct task_struct *);
-extern int pfm_use_debug_registers(struct task_struct *);
-extern int pfm_release_debug_registers(struct task_struct *);
-extern void pfm_syst_wide_update_task(struct task_struct *, unsigned long info, int is_ctxswin);
-extern void pfm_inherit(struct task_struct *task, struct pt_regs *regs);
-extern void pfm_init_percpu(void);
-extern void pfm_handle_work(void);
-extern int pfm_install_alt_pmu_interrupt(pfm_intr_handler_desc_t *h);
-extern int pfm_remove_alt_pmu_interrupt(pfm_intr_handler_desc_t *h);
-
-
-
-/*
- * Reset PMD register flags
- */
-#define PFM_PMD_SHORT_RESET 0
-#define PFM_PMD_LONG_RESET 1
-
-typedef union {
- unsigned int val;
- struct {
- unsigned int notify_user:1; /* notify user program of overflow */
- unsigned int reset_ovfl_pmds:1; /* reset overflowed PMDs */
- unsigned int block_task:1; /* block monitored task on kernel exit */
- unsigned int mask_monitoring:1; /* mask monitors via PMCx.plm */
- unsigned int reserved:28; /* for future use */
- } bits;
-} pfm_ovfl_ctrl_t;
-
-typedef struct {
- unsigned char ovfl_pmd; /* index of overflowed PMD */
- unsigned char ovfl_notify; /* =1 if monitor requested overflow notification */
- unsigned short active_set; /* event set active at the time of the overflow */
- pfm_ovfl_ctrl_t ovfl_ctrl; /* return: perfmon controls to set by handler */
-
- unsigned long pmd_last_reset; /* last reset value of of the PMD */
- unsigned long smpl_pmds[4]; /* bitmask of other PMD of interest on overflow */
- unsigned long smpl_pmds_values[PMU_MAX_PMDS]; /* values for the other PMDs of interest */
- unsigned long pmd_value; /* current 64-bit value of the PMD */
- unsigned long pmd_eventid; /* eventid associated with PMD */
-} pfm_ovfl_arg_t;
-
-
-typedef struct {
- char *fmt_name;
- pfm_uuid_t fmt_uuid;
- size_t fmt_arg_size;
- unsigned long fmt_flags;
-
- int (*fmt_validate)(struct task_struct *task, unsigned int flags, int cpu, void *arg);
- int (*fmt_getsize)(struct task_struct *task, unsigned int flags, int cpu, void *arg, unsigned long *size);
- int (*fmt_init)(struct task_struct *task, void *buf, unsigned int flags, int cpu, void *arg);
- int (*fmt_handler)(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct pt_regs *regs, unsigned long stamp);
- int (*fmt_restart)(struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs);
- int (*fmt_restart_active)(struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs);
- int (*fmt_exit)(struct task_struct *task, void *buf, struct pt_regs *regs);
-
- struct list_head fmt_list;
-} pfm_buffer_fmt_t;
-
-extern int pfm_register_buffer_fmt(pfm_buffer_fmt_t *fmt);
-extern int pfm_unregister_buffer_fmt(pfm_uuid_t uuid);
-
-/*
- * perfmon interface exported to modules
- */
-extern int pfm_mod_read_pmds(struct task_struct *, void *req, unsigned int nreq, struct pt_regs *regs);
-extern int pfm_mod_write_pmcs(struct task_struct *, void *req, unsigned int nreq, struct pt_regs *regs);
-extern int pfm_mod_write_ibrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs);
-extern int pfm_mod_write_dbrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs);
-
-/*
- * describe the content of the local_cpu_date->pfm_syst_info field
- */
-#define PFM_CPUINFO_SYST_WIDE 0x1 /* if set a system wide session exists */
-#define PFM_CPUINFO_DCR_PP 0x2 /* if set the system wide session has started */
-#define PFM_CPUINFO_EXCL_IDLE 0x4 /* the system wide session excludes the idle task */
-
-/*
- * sysctl control structure. visible to sampling formats
- */
-typedef struct {
- int debug; /* turn on/off debugging via syslog */
- int debug_ovfl; /* turn on/off debug printk in overflow handler */
- int fastctxsw; /* turn on/off fast (unsecure) ctxsw */
- int expert_mode; /* turn on/off value checking */
-} pfm_sysctl_t;
-extern pfm_sysctl_t pfm_sysctl;
-
-
-#endif /* _ASM_IA64_PERFMON_H */
diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
index 779b6972aa84..9b4efe89e62d 100644
--- a/arch/ia64/include/asm/pgtable.h
+++ b/arch/ia64/include/asm/pgtable.h
@@ -517,12 +517,6 @@ extern struct page *zero_page_memmap_ptr;
__changed; \
})
#endif
-
-# ifdef CONFIG_VIRTUAL_MEM_MAP
- /* arch mem_map init routine is needed due to holes in a virtual mem_map */
- extern void memmap_init (unsigned long size, int nid, unsigned long zone,
- unsigned long start_pfn);
-# endif /* CONFIG_VIRTUAL_MEM_MAP */
# endif /* !__ASSEMBLY__ */
/*
diff --git a/arch/ia64/include/asm/sal.h b/arch/ia64/include/asm/sal.h
index 08f5b6aaed73..78f4f7b40435 100644
--- a/arch/ia64/include/asm/sal.h
+++ b/arch/ia64/include/asm/sal.h
@@ -385,7 +385,7 @@ typedef struct sal_processor_static_info {
fr : 1,
reserved : 58;
} valid;
- pal_min_state_area_t min_state_area;
+ struct pal_min_state_area min_state_area;
u64 br[8];
u64 cr[128];
u64 ar[128];
diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h
index 8d9da6f08a62..a15fe0809aae 100644
--- a/arch/ia64/include/asm/tlb.h
+++ b/arch/ia64/include/asm/tlb.h
@@ -23,7 +23,7 @@
* unmapping a portion of the virtual address space, these hooks are called according to
* the following template:
*
- * tlb <- tlb_gather_mmu(mm, start, end); // start unmap for address space MM
+ * tlb <- tlb_gather_mmu(mm); // start unmap for address space MM
* {
* for each vma that needs a shootdown do {
* tlb_start_vma(tlb, vma);
@@ -36,7 +36,7 @@
* tlb_end_vma(tlb, vma);
* }
* }
- * tlb_finish_mmu(tlb, start, end); // finish unmap for address space MM
+ * tlb_finish_mmu(tlb); // finish unmap for address space MM
*/
#include <linux/mm.h>
#include <linux/pagemap.h>
diff --git a/arch/ia64/include/uapi/asm/cmpxchg.h b/arch/ia64/include/uapi/asm/cmpxchg.h
index d69c979936d4..5d90307fd6e0 100644
--- a/arch/ia64/include/uapi/asm/cmpxchg.h
+++ b/arch/ia64/include/uapi/asm/cmpxchg.h
@@ -54,7 +54,7 @@ extern void ia64_xchg_called_with_bad_pointer(void);
})
#define xchg(ptr, x) \
-((__typeof__(*(ptr))) __xchg((unsigned long) (x), (ptr), sizeof(*(ptr))))
+({(__typeof__(*(ptr))) __xchg((unsigned long) (x), (ptr), sizeof(*(ptr)));})
/*
* Atomic compare and exchange. Compare OLD with MEM, if identical,
diff --git a/arch/ia64/include/uapi/asm/perfmon.h b/arch/ia64/include/uapi/asm/perfmon.h
deleted file mode 100644
index 017548365e5c..000000000000
--- a/arch/ia64/include/uapi/asm/perfmon.h
+++ /dev/null
@@ -1,178 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * Copyright (C) 2001-2003 Hewlett-Packard Co
- * Stephane Eranian <eranian@hpl.hp.com>
- */
-
-#ifndef _UAPI_ASM_IA64_PERFMON_H
-#define _UAPI_ASM_IA64_PERFMON_H
-
-/*
- * perfmon commands supported on all CPU models
- */
-#define PFM_WRITE_PMCS 0x01
-#define PFM_WRITE_PMDS 0x02
-#define PFM_READ_PMDS 0x03
-#define PFM_STOP 0x04
-#define PFM_START 0x05
-#define PFM_ENABLE 0x06 /* obsolete */
-#define PFM_DISABLE 0x07 /* obsolete */
-#define PFM_CREATE_CONTEXT 0x08
-#define PFM_DESTROY_CONTEXT 0x09 /* obsolete use close() */
-#define PFM_RESTART 0x0a
-#define PFM_PROTECT_CONTEXT 0x0b /* obsolete */
-#define PFM_GET_FEATURES 0x0c
-#define PFM_DEBUG 0x0d
-#define PFM_UNPROTECT_CONTEXT 0x0e /* obsolete */
-#define PFM_GET_PMC_RESET_VAL 0x0f
-#define PFM_LOAD_CONTEXT 0x10
-#define PFM_UNLOAD_CONTEXT 0x11
-
-/*
- * PMU model specific commands (may not be supported on all PMU models)
- */
-#define PFM_WRITE_IBRS 0x20
-#define PFM_WRITE_DBRS 0x21
-
-/*
- * context flags
- */
-#define PFM_FL_NOTIFY_BLOCK 0x01 /* block task on user level notifications */
-#define PFM_FL_SYSTEM_WIDE 0x02 /* create a system wide context */
-#define PFM_FL_OVFL_NO_MSG 0x80 /* do not post overflow/end messages for notification */
-
-/*
- * event set flags
- */
-#define PFM_SETFL_EXCL_IDLE 0x01 /* exclude idle task (syswide only) XXX: DO NOT USE YET */
-
-/*
- * PMC flags
- */
-#define PFM_REGFL_OVFL_NOTIFY 0x1 /* send notification on overflow */
-#define PFM_REGFL_RANDOM 0x2 /* randomize sampling interval */
-
-/*
- * PMD/PMC/IBR/DBR return flags (ignored on input)
- *
- * Those flags are used on output and must be checked in case EAGAIN is returned
- * by any of the calls using a pfarg_reg_t or pfarg_dbreg_t structure.
- */
-#define PFM_REG_RETFL_NOTAVAIL (1UL<<31) /* set if register is implemented but not available */
-#define PFM_REG_RETFL_EINVAL (1UL<<30) /* set if register entry is invalid */
-#define PFM_REG_RETFL_MASK (PFM_REG_RETFL_NOTAVAIL|PFM_REG_RETFL_EINVAL)
-
-#define PFM_REG_HAS_ERROR(flag) (((flag) & PFM_REG_RETFL_MASK) != 0)
-
-typedef unsigned char pfm_uuid_t[16]; /* custom sampling buffer identifier type */
-
-/*
- * Request structure used to define a context
- */
-typedef struct {
- pfm_uuid_t ctx_smpl_buf_id; /* which buffer format to use (if needed) */
- unsigned long ctx_flags; /* noblock/block */
- unsigned short ctx_nextra_sets; /* number of extra event sets (you always get 1) */
- unsigned short ctx_reserved1; /* for future use */
- int ctx_fd; /* return arg: unique identification for context */
- void *ctx_smpl_vaddr; /* return arg: virtual address of sampling buffer, is used */
- unsigned long ctx_reserved2[11];/* for future use */
-} pfarg_context_t;
-
-/*
- * Request structure used to write/read a PMC or PMD
- */
-typedef struct {
- unsigned int reg_num; /* which register */
- unsigned short reg_set; /* event set for this register */
- unsigned short reg_reserved1; /* for future use */
-
- unsigned long reg_value; /* initial pmc/pmd value */
- unsigned long reg_flags; /* input: pmc/pmd flags, return: reg error */
-
- unsigned long reg_long_reset; /* reset after buffer overflow notification */
- unsigned long reg_short_reset; /* reset after counter overflow */
-
- unsigned long reg_reset_pmds[4]; /* which other counters to reset on overflow */
- unsigned long reg_random_seed; /* seed value when randomization is used */
- unsigned long reg_random_mask; /* bitmask used to limit random value */
- unsigned long reg_last_reset_val;/* return: PMD last reset value */
-
- unsigned long reg_smpl_pmds[4]; /* which pmds are accessed when PMC overflows */
- unsigned long reg_smpl_eventid; /* opaque sampling event identifier */
-
- unsigned long reg_reserved2[3]; /* for future use */
-} pfarg_reg_t;
-
-typedef struct {
- unsigned int dbreg_num; /* which debug register */
- unsigned short dbreg_set; /* event set for this register */
- unsigned short dbreg_reserved1; /* for future use */
- unsigned long dbreg_value; /* value for debug register */
- unsigned long dbreg_flags; /* return: dbreg error */
- unsigned long dbreg_reserved2[1]; /* for future use */
-} pfarg_dbreg_t;
-
-typedef struct {
- unsigned int ft_version; /* perfmon: major [16-31], minor [0-15] */
- unsigned int ft_reserved; /* reserved for future use */
- unsigned long reserved[4]; /* for future use */
-} pfarg_features_t;
-
-typedef struct {
- pid_t load_pid; /* process to load the context into */
- unsigned short load_set; /* first event set to load */
- unsigned short load_reserved1; /* for future use */
- unsigned long load_reserved2[3]; /* for future use */
-} pfarg_load_t;
-
-typedef struct {
- int msg_type; /* generic message header */
- int msg_ctx_fd; /* generic message header */
- unsigned long msg_ovfl_pmds[4]; /* which PMDs overflowed */
- unsigned short msg_active_set; /* active set at the time of overflow */
- unsigned short msg_reserved1; /* for future use */
- unsigned int msg_reserved2; /* for future use */
- unsigned long msg_tstamp; /* for perf tuning/debug */
-} pfm_ovfl_msg_t;
-
-typedef struct {
- int msg_type; /* generic message header */
- int msg_ctx_fd; /* generic message header */
- unsigned long msg_tstamp; /* for perf tuning */
-} pfm_end_msg_t;
-
-typedef struct {
- int msg_type; /* type of the message */
- int msg_ctx_fd; /* unique identifier for the context */
- unsigned long msg_tstamp; /* for perf tuning */
-} pfm_gen_msg_t;
-
-#define PFM_MSG_OVFL 1 /* an overflow happened */
-#define PFM_MSG_END 2 /* task to which context was attached ended */
-
-typedef union {
- pfm_ovfl_msg_t pfm_ovfl_msg;
- pfm_end_msg_t pfm_end_msg;
- pfm_gen_msg_t pfm_gen_msg;
-} pfm_msg_t;
-
-/*
- * Define the version numbers for both perfmon as a whole and the sampling buffer format.
- */
-#define PFM_VERSION_MAJ 2U
-#define PFM_VERSION_MIN 0U
-#define PFM_VERSION (((PFM_VERSION_MAJ&0xffff)<<16)|(PFM_VERSION_MIN & 0xffff))
-#define PFM_VERSION_MAJOR(x) (((x)>>16) & 0xffff)
-#define PFM_VERSION_MINOR(x) ((x) & 0xffff)
-
-
-/*
- * miscellaneous architected definitions
- */
-#define PMU_FIRST_COUNTER 4 /* first counting monitor (PMC/PMD) */
-#define PMU_MAX_PMCS 256 /* maximum architected number of PMC registers */
-#define PMU_MAX_PMDS 256 /* maximum architected number of PMD registers */
-
-
-#endif /* _UAPI_ASM_IA64_PERFMON_H */
diff --git a/arch/ia64/include/uapi/asm/perfmon_default_smpl.h b/arch/ia64/include/uapi/asm/perfmon_default_smpl.h
deleted file mode 100644
index d3f36aff0e1f..000000000000
--- a/arch/ia64/include/uapi/asm/perfmon_default_smpl.h
+++ /dev/null
@@ -1,84 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * Copyright (C) 2002-2003 Hewlett-Packard Co
- * Stephane Eranian <eranian@hpl.hp.com>
- *
- * This file implements the default sampling buffer format
- * for Linux/ia64 perfmon subsystem.
- */
-#ifndef __PERFMON_DEFAULT_SMPL_H__
-#define __PERFMON_DEFAULT_SMPL_H__ 1
-
-#define PFM_DEFAULT_SMPL_UUID { \
- 0x4d, 0x72, 0xbe, 0xc0, 0x06, 0x64, 0x41, 0x43, 0x82, 0xb4, 0xd3, 0xfd, 0x27, 0x24, 0x3c, 0x97}
-
-/*
- * format specific parameters (passed at context creation)
- */
-typedef struct {
- unsigned long buf_size; /* size of the buffer in bytes */
- unsigned int flags; /* buffer specific flags */
- unsigned int res1; /* for future use */
- unsigned long reserved[2]; /* for future use */
-} pfm_default_smpl_arg_t;
-
-/*
- * combined context+format specific structure. Can be passed
- * to PFM_CONTEXT_CREATE
- */
-typedef struct {
- pfarg_context_t ctx_arg;
- pfm_default_smpl_arg_t buf_arg;
-} pfm_default_smpl_ctx_arg_t;
-
-/*
- * This header is at the beginning of the sampling buffer returned to the user.
- * It is directly followed by the first record.
- */
-typedef struct {
- unsigned long hdr_count; /* how many valid entries */
- unsigned long hdr_cur_offs; /* current offset from top of buffer */
- unsigned long hdr_reserved2; /* reserved for future use */
-
- unsigned long hdr_overflows; /* how many times the buffer overflowed */
- unsigned long hdr_buf_size; /* how many bytes in the buffer */
-
- unsigned int hdr_version; /* contains perfmon version (smpl format diffs) */
- unsigned int hdr_reserved1; /* for future use */
- unsigned long hdr_reserved[10]; /* for future use */
-} pfm_default_smpl_hdr_t;
-
-/*
- * Entry header in the sampling buffer. The header is directly followed
- * with the values of the PMD registers of interest saved in increasing
- * index order: PMD4, PMD5, and so on. How many PMDs are present depends
- * on how the session was programmed.
- *
- * In the case where multiple counters overflow at the same time, multiple
- * entries are written consecutively.
- *
- * last_reset_value member indicates the initial value of the overflowed PMD.
- */
-typedef struct {
- int pid; /* thread id (for NPTL, this is gettid()) */
- unsigned char reserved1[3]; /* reserved for future use */
- unsigned char ovfl_pmd; /* index of overflowed PMD */
-
- unsigned long last_reset_val; /* initial value of overflowed PMD */
- unsigned long ip; /* where did the overflow interrupt happened */
- unsigned long tstamp; /* ar.itc when entering perfmon intr. handler */
-
- unsigned short cpu; /* cpu on which the overflow occurred */
- unsigned short set; /* event set active when overflow occurred */
- int tgid; /* thread group id (for NPTL, this is getpid()) */
-} pfm_default_smpl_entry_t;
-
-#define PFM_DEFAULT_MAX_PMDS 64 /* how many pmds supported by data structures (sizeof(unsigned long) */
-#define PFM_DEFAULT_MAX_ENTRY_SIZE (sizeof(pfm_default_smpl_entry_t)+(sizeof(unsigned long)*PFM_DEFAULT_MAX_PMDS))
-#define PFM_DEFAULT_SMPL_MIN_BUF_SIZE (sizeof(pfm_default_smpl_hdr_t)+PFM_DEFAULT_MAX_ENTRY_SIZE)
-
-#define PFM_DEFAULT_SMPL_VERSION_MAJ 2U
-#define PFM_DEFAULT_SMPL_VERSION_MIN 0U
-#define PFM_DEFAULT_SMPL_VERSION (((PFM_DEFAULT_SMPL_VERSION_MAJ&0xffff)<<16)|(PFM_DEFAULT_SMPL_VERSION_MIN & 0xffff))
-
-#endif /* __PERFMON_DEFAULT_SMPL_H__ */
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index c89bd5f8cbf8..78717819131c 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -47,8 +47,3 @@ CFLAGS_traps.o += -mfixed-range=f2-f5,f16-f31
# The gate DSO image is built using a special linker script.
include $(src)/Makefile.gate
-
-include/generated/nr-irqs.h: arch/$(SRCARCH)/kernel/nr-irqs.s FORCE
- $(call filechk,offsets,__ASM_NR_IRQS_H__)
-
-targets += nr-irqs.s
diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c
index fb0deb8a4221..be3b90fef2e9 100644
--- a/arch/ia64/kernel/asm-offsets.c
+++ b/arch/ia64/kernel/asm-offsets.c
@@ -245,23 +245,23 @@ void foo(void)
BLANK();
DEFINE(IA64_PMSA_GR_OFFSET,
- offsetof (struct pal_min_state_area_s, pmsa_gr));
+ offsetof(struct pal_min_state_area, pmsa_gr));
DEFINE(IA64_PMSA_BANK1_GR_OFFSET,
- offsetof (struct pal_min_state_area_s, pmsa_bank1_gr));
+ offsetof(struct pal_min_state_area, pmsa_bank1_gr));
DEFINE(IA64_PMSA_PR_OFFSET,
- offsetof (struct pal_min_state_area_s, pmsa_pr));
+ offsetof(struct pal_min_state_area, pmsa_pr));
DEFINE(IA64_PMSA_BR0_OFFSET,
- offsetof (struct pal_min_state_area_s, pmsa_br0));
+ offsetof(struct pal_min_state_area, pmsa_br0));
DEFINE(IA64_PMSA_RSC_OFFSET,
- offsetof (struct pal_min_state_area_s, pmsa_rsc));
+ offsetof(struct pal_min_state_area, pmsa_rsc));
DEFINE(IA64_PMSA_IIP_OFFSET,
- offsetof (struct pal_min_state_area_s, pmsa_iip));
+ offsetof(struct pal_min_state_area, pmsa_iip));
DEFINE(IA64_PMSA_IPSR_OFFSET,
- offsetof (struct pal_min_state_area_s, pmsa_ipsr));
+ offsetof(struct pal_min_state_area, pmsa_ipsr));
DEFINE(IA64_PMSA_IFS_OFFSET,
- offsetof (struct pal_min_state_area_s, pmsa_ifs));
+ offsetof(struct pal_min_state_area, pmsa_ifs));
DEFINE(IA64_PMSA_XIP_OFFSET,
- offsetof (struct pal_min_state_area_s, pmsa_xip));
+ offsetof(struct pal_min_state_area, pmsa_xip));
BLANK();
/* used by fsys_gettimeofday in arch/ia64/kernel/fsys.S */
diff --git a/arch/ia64/kernel/crash.c b/arch/ia64/kernel/crash.c
index fec70d662d0c..76730f34685c 100644
--- a/arch/ia64/kernel/crash.c
+++ b/arch/ia64/kernel/crash.c
@@ -15,6 +15,7 @@
#include <linux/memblock.h>
#include <linux/kexec.h>
#include <linux/elfcore.h>
+#include <linux/reboot.h>
#include <linux/sysctl.h>
#include <linux/init.h>
#include <linux/kdebug.h>
@@ -43,7 +44,7 @@ crash_save_this_cpu(void)
elf_greg_t *dst = (elf_greg_t *)&(prstatus->pr_reg);
memset(prstatus, 0, sizeof(*prstatus));
- prstatus->pr_pid = current->pid;
+ prstatus->common.pr_pid = current->pid;
ia64_dump_cpu_regs(dst);
cfm = dst[43];
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index f932b25fb817..c5fe21de46a8 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -34,11 +34,13 @@
#include <linux/kexec.h>
#include <linux/mm.h>
+#include <asm/efi.h>
#include <asm/io.h>
#include <asm/kregs.h>
#include <asm/meminit.h>
#include <asm/processor.h>
#include <asm/mca.h>
+#include <asm/sal.h>
#include <asm/setup.h>
#include <asm/tlbflush.h>
diff --git a/arch/ia64/kernel/machine_kexec.c b/arch/ia64/kernel/machine_kexec.c
index efc9b568401c..af310dc8a356 100644
--- a/arch/ia64/kernel/machine_kexec.c
+++ b/arch/ia64/kernel/machine_kexec.c
@@ -16,6 +16,7 @@
#include <linux/numa.h>
#include <linux/mmzone.h>
+#include <asm/efi.h>
#include <asm/numa.h>
#include <asm/mmu_context.h>
#include <asm/setup.h>
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 2703f7795672..d4cae2fc69ca 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -91,11 +91,13 @@
#include <linux/gfp.h>
#include <asm/delay.h>
+#include <asm/efi.h>
#include <asm/meminit.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include <asm/sal.h>
#include <asm/mca.h>
+#include <asm/mca_asm.h>
#include <asm/kexec.h>
#include <asm/irq.h>
@@ -894,7 +896,7 @@ static void
finish_pt_regs(struct pt_regs *regs, struct ia64_sal_os_state *sos,
unsigned long *nat)
{
- const pal_min_state_area_t *ms = sos->pal_min_state;
+ const struct pal_min_state_area *ms = sos->pal_min_state;
const u64 *bank;
/* If ipsr.ic then use pmsa_{iip,ipsr,ifs}, else use
@@ -970,7 +972,7 @@ ia64_mca_modify_original_stack(struct pt_regs *regs,
char *p;
ia64_va va;
extern char ia64_leave_kernel[]; /* Need asm address, not function descriptor */
- const pal_min_state_area_t *ms = sos->pal_min_state;
+ const struct pal_min_state_area *ms = sos->pal_min_state;
struct task_struct *previous_current;
struct pt_regs *old_regs;
struct switch_stack *old_sw;
diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c
index 4d0ab323dee8..36a69b4e6169 100644
--- a/arch/ia64/kernel/mca_drv.c
+++ b/arch/ia64/kernel/mca_drv.c
@@ -496,7 +496,7 @@ recover_from_read_error(slidx_table_t *slidx,
struct ia64_sal_os_state *sos)
{
u64 target_identifier;
- pal_min_state_area_t *pmsa;
+ struct pal_min_state_area *pmsa;
struct ia64_psr *psr1, *psr2;
ia64_fptr_t *mca_hdlr_bh = (ia64_fptr_t*)mca_handler_bhhook;
diff --git a/arch/ia64/kernel/nr-irqs.c b/arch/ia64/kernel/nr-irqs.c
deleted file mode 100644
index f2633b22d3be..000000000000
--- a/arch/ia64/kernel/nr-irqs.c
+++ /dev/null
@@ -1,22 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * calculate
- * NR_IRQS = max(IA64_NATIVE_NR_IRQS, XEN_NR_IRQS, FOO_NR_IRQS...)
- * depending on config.
- * This must be calculated before processing asm-offset.c.
- */
-
-#define ASM_OFFSETS_C 1
-
-#include <linux/kbuild.h>
-#include <linux/threads.h>
-#include <asm/native/irq.h>
-
-void foo(void)
-{
- union paravirt_nr_irqs_max {
- char ia64_native_nr_irqs[IA64_NATIVE_NR_IRQS];
- };
-
- DEFINE(NR_IRQS, sizeof (union paravirt_nr_irqs_max));
-}
diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
index 78fa6579c9ea..64189f04c1a4 100644
--- a/arch/ia64/kernel/palinfo.c
+++ b/arch/ia64/kernel/palinfo.c
@@ -648,46 +648,6 @@ static int version_info(struct seq_file *m)
return 0;
}
-static int perfmon_info(struct seq_file *m)
-{
- u64 pm_buffer[16];
- pal_perf_mon_info_u_t pm_info;
-
- if (ia64_pal_perf_mon_info(pm_buffer, &pm_info) != 0)
- return 0;
-
- seq_printf(m,
- "PMC/PMD pairs : %d\n"
- "Counter width : %d bits\n"
- "Cycle event number : %d\n"
- "Retired event number : %d\n"
- "Implemented PMC : ",
- pm_info.pal_perf_mon_info_s.generic,
- pm_info.pal_perf_mon_info_s.width,
- pm_info.pal_perf_mon_info_s.cycles,
- pm_info.pal_perf_mon_info_s.retired);
-
- bitregister_process(m, pm_buffer, 256);
- seq_puts(m, "\nImplemented PMD : ");
- bitregister_process(m, pm_buffer+4, 256);
- seq_puts(m, "\nCycles count capable : ");
- bitregister_process(m, pm_buffer+8, 256);
- seq_puts(m, "\nRetired bundles count capable : ");
-
-#ifdef CONFIG_ITANIUM
- /*
- * PAL_PERF_MON_INFO reports that only PMC4 can be used to count CPU_CYCLES
- * which is wrong, both PMC4 and PMD5 support it.
- */
- if (pm_buffer[12] == 0x10)
- pm_buffer[12]=0x30;
-#endif
-
- bitregister_process(m, pm_buffer+12, 256);
- seq_putc(m, '\n');
- return 0;
-}
-
static int frequency_info(struct seq_file *m)
{
struct pal_freq_ratio proc, itc, bus;
@@ -816,7 +776,6 @@ static const palinfo_entry_t palinfo_entries[]={
{ "power_info", power_info, },
{ "register_info", register_info, },
{ "processor_info", processor_info, },
- { "perfmon_info", perfmon_info, },
{ "frequency_info", frequency_info, },
{ "bus_info", bus_info },
{ "tr_info", tr_info, }
diff --git a/arch/ia64/kernel/perfmon_default_smpl.c b/arch/ia64/kernel/perfmon_default_smpl.c
deleted file mode 100644
index a40c56020fc5..000000000000
--- a/arch/ia64/kernel/perfmon_default_smpl.c
+++ /dev/null
@@ -1,297 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2002-2003 Hewlett-Packard Co
- * Stephane Eranian <eranian@hpl.hp.com>
- *
- * This file implements the default sampling buffer format
- * for the Linux/ia64 perfmon-2 subsystem.
- */
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <asm/delay.h>
-#include <linux/smp.h>
-
-#include <asm/perfmon.h>
-#include <asm/perfmon_default_smpl.h>
-
-MODULE_AUTHOR("Stephane Eranian <eranian@hpl.hp.com>");
-MODULE_DESCRIPTION("perfmon default sampling format");
-MODULE_LICENSE("GPL");
-
-#define DEFAULT_DEBUG 1
-
-#ifdef DEFAULT_DEBUG
-#define DPRINT(a) \
- do { \
- if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d ", __func__, __LINE__, smp_processor_id()); printk a; } \
- } while (0)
-
-#define DPRINT_ovfl(a) \
- do { \
- if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d ", __func__, __LINE__, smp_processor_id()); printk a; } \
- } while (0)
-
-#else
-#define DPRINT(a)
-#define DPRINT_ovfl(a)
-#endif
-
-static int
-default_validate(struct task_struct *task, unsigned int flags, int cpu, void *data)
-{
- pfm_default_smpl_arg_t *arg = (pfm_default_smpl_arg_t*)data;
- int ret = 0;
-
- if (data == NULL) {
- DPRINT(("[%d] no argument passed\n", task_pid_nr(task)));
- return -EINVAL;
- }
-
- DPRINT(("[%d] validate flags=0x%x CPU%d\n", task_pid_nr(task), flags, cpu));
-
- /*
- * must hold at least the buffer header + one minimally sized entry
- */
- if (arg->buf_size < PFM_DEFAULT_SMPL_MIN_BUF_SIZE) return -EINVAL;
-
- DPRINT(("buf_size=%lu\n", arg->buf_size));
-
- return ret;
-}
-
-static int
-default_get_size(struct task_struct *task, unsigned int flags, int cpu, void *data, unsigned long *size)
-{
- pfm_default_smpl_arg_t *arg = (pfm_default_smpl_arg_t *)data;
-
- /*
- * size has been validated in default_validate
- */
- *size = arg->buf_size;
-
- return 0;
-}
-
-static int
-default_init(struct task_struct *task, void *buf, unsigned int flags, int cpu, void *data)
-{
- pfm_default_smpl_hdr_t *hdr;
- pfm_default_smpl_arg_t *arg = (pfm_default_smpl_arg_t *)data;
-
- hdr = (pfm_default_smpl_hdr_t *)buf;
-
- hdr->hdr_version = PFM_DEFAULT_SMPL_VERSION;
- hdr->hdr_buf_size = arg->buf_size;
- hdr->hdr_cur_offs = sizeof(*hdr);
- hdr->hdr_overflows = 0UL;
- hdr->hdr_count = 0UL;
-
- DPRINT(("[%d] buffer=%p buf_size=%lu hdr_size=%lu hdr_version=%u cur_offs=%lu\n",
- task_pid_nr(task),
- buf,
- hdr->hdr_buf_size,
- sizeof(*hdr),
- hdr->hdr_version,
- hdr->hdr_cur_offs));
-
- return 0;
-}
-
-static int
-default_handler(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct pt_regs *regs, unsigned long stamp)
-{
- pfm_default_smpl_hdr_t *hdr;
- pfm_default_smpl_entry_t *ent;
- void *cur, *last;
- unsigned long *e, entry_size;
- unsigned int npmds, i;
- unsigned char ovfl_pmd;
- unsigned char ovfl_notify;
-
- if (unlikely(buf == NULL || arg == NULL|| regs == NULL || task == NULL)) {
- DPRINT(("[%d] invalid arguments buf=%p arg=%p\n", task->pid, buf, arg));
- return -EINVAL;
- }
-
- hdr = (pfm_default_smpl_hdr_t *)buf;
- cur = buf+hdr->hdr_cur_offs;
- last = buf+hdr->hdr_buf_size;
- ovfl_pmd = arg->ovfl_pmd;
- ovfl_notify = arg->ovfl_notify;
-
- /*
- * precheck for sanity
- */
- if ((last - cur) < PFM_DEFAULT_MAX_ENTRY_SIZE) goto full;
-
- npmds = hweight64(arg->smpl_pmds[0]);
-
- ent = (pfm_default_smpl_entry_t *)cur;
-
- prefetch(arg->smpl_pmds_values);
-
- entry_size = sizeof(*ent) + (npmds << 3);
-
- /* position for first pmd */
- e = (unsigned long *)(ent+1);
-
- hdr->hdr_count++;
-
- DPRINT_ovfl(("[%d] count=%lu cur=%p last=%p free_bytes=%lu ovfl_pmd=%d ovfl_notify=%d npmds=%u\n",
- task->pid,
- hdr->hdr_count,
- cur, last,
- last-cur,
- ovfl_pmd,
- ovfl_notify, npmds));
-
- /*
- * current = task running at the time of the overflow.
- *
- * per-task mode:
- * - this is usually the task being monitored.
- * Under certain conditions, it might be a different task
- *
- * system-wide:
- * - this is not necessarily the task controlling the session
- */
- ent->pid = current->pid;
- ent->ovfl_pmd = ovfl_pmd;
- ent->last_reset_val = arg->pmd_last_reset; //pmd[0].reg_last_reset_val;
-
- /*
- * where did the fault happen (includes slot number)
- */
- ent->ip = regs->cr_iip | ((regs->cr_ipsr >> 41) & 0x3);
-
- ent->tstamp = stamp;
- ent->cpu = smp_processor_id();
- ent->set = arg->active_set;
- ent->tgid = current->tgid;
-
- /*
- * selectively store PMDs in increasing index number
- */
- if (npmds) {
- unsigned long *val = arg->smpl_pmds_values;
- for(i=0; i < npmds; i++) {
- *e++ = *val++;
- }
- }
-
- /*
- * update position for next entry
- */
- hdr->hdr_cur_offs += entry_size;
- cur += entry_size;
-
- /*
- * post check to avoid losing the last sample
- */
- if ((last - cur) < PFM_DEFAULT_MAX_ENTRY_SIZE) goto full;
-
- /*
- * keep same ovfl_pmds, ovfl_notify
- */
- arg->ovfl_ctrl.bits.notify_user = 0;
- arg->ovfl_ctrl.bits.block_task = 0;
- arg->ovfl_ctrl.bits.mask_monitoring = 0;
- arg->ovfl_ctrl.bits.reset_ovfl_pmds = 1; /* reset before returning from interrupt handler */
-
- return 0;
-full:
- DPRINT_ovfl(("sampling buffer full free=%lu, count=%lu, ovfl_notify=%d\n", last-cur, hdr->hdr_count, ovfl_notify));
-
- /*
- * increment number of buffer overflow.
- * important to detect duplicate set of samples.
- */
- hdr->hdr_overflows++;
-
- /*
- * if no notification requested, then we saturate the buffer
- */
- if (ovfl_notify == 0) {
- arg->ovfl_ctrl.bits.notify_user = 0;
- arg->ovfl_ctrl.bits.block_task = 0;
- arg->ovfl_ctrl.bits.mask_monitoring = 1;
- arg->ovfl_ctrl.bits.reset_ovfl_pmds = 0;
- } else {
- arg->ovfl_ctrl.bits.notify_user = 1;
- arg->ovfl_ctrl.bits.block_task = 1; /* ignored for non-blocking context */
- arg->ovfl_ctrl.bits.mask_monitoring = 1;
- arg->ovfl_ctrl.bits.reset_ovfl_pmds = 0; /* no reset now */
- }
- return -1; /* we are full, sorry */
-}
-
-static int
-default_restart(struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs)
-{
- pfm_default_smpl_hdr_t *hdr;
-
- hdr = (pfm_default_smpl_hdr_t *)buf;
-
- hdr->hdr_count = 0UL;
- hdr->hdr_cur_offs = sizeof(*hdr);
-
- ctrl->bits.mask_monitoring = 0;
- ctrl->bits.reset_ovfl_pmds = 1; /* uses long-reset values */
-
- return 0;
-}
-
-static int
-default_exit(struct task_struct *task, void *buf, struct pt_regs *regs)
-{
- DPRINT(("[%d] exit(%p)\n", task_pid_nr(task), buf));
- return 0;
-}
-
-static pfm_buffer_fmt_t default_fmt={
- .fmt_name = "default_format",
- .fmt_uuid = PFM_DEFAULT_SMPL_UUID,
- .fmt_arg_size = sizeof(pfm_default_smpl_arg_t),
- .fmt_validate = default_validate,
- .fmt_getsize = default_get_size,
- .fmt_init = default_init,
- .fmt_handler = default_handler,
- .fmt_restart = default_restart,
- .fmt_restart_active = default_restart,
- .fmt_exit = default_exit,
-};
-
-static int __init
-pfm_default_smpl_init_module(void)
-{
- int ret;
-
- ret = pfm_register_buffer_fmt(&default_fmt);
- if (ret == 0) {
- printk("perfmon_default_smpl: %s v%u.%u registered\n",
- default_fmt.fmt_name,
- PFM_DEFAULT_SMPL_VERSION_MAJ,
- PFM_DEFAULT_SMPL_VERSION_MIN);
- } else {
- printk("perfmon_default_smpl: %s cannot register ret=%d\n",
- default_fmt.fmt_name,
- ret);
- }
-
- return ret;
-}
-
-static void __exit
-pfm_default_smpl_cleanup_module(void)
-{
- int ret;
- ret = pfm_unregister_buffer_fmt(default_fmt.fmt_uuid);
-
- printk("perfmon_default_smpl: unregister %s=%d\n", default_fmt.fmt_name, ret);
-}
-
-module_init(pfm_default_smpl_init_module);
-module_exit(pfm_default_smpl_cleanup_module);
-
diff --git a/arch/ia64/kernel/perfmon_generic.h b/arch/ia64/kernel/perfmon_generic.h
deleted file mode 100644
index 96af4696cea9..000000000000
--- a/arch/ia64/kernel/perfmon_generic.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This file contains the generic PMU register description tables
- * and pmc checker used by perfmon.c.
- *
- * Copyright (C) 2002-2003 Hewlett Packard Co
- * Stephane Eranian <eranian@hpl.hp.com>
- */
-
-static pfm_reg_desc_t pfm_gen_pmc_desc[PMU_MAX_PMCS]={
-/* pmc0 */ { PFM_REG_CONTROL , 0, 0x1UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc1 */ { PFM_REG_CONTROL , 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc2 */ { PFM_REG_CONTROL , 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc3 */ { PFM_REG_CONTROL , 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc4 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {RDEP(4),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc5 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {RDEP(5),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc6 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {RDEP(6),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc7 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {RDEP(7),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
- { PFM_REG_END , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}}, /* end marker */
-};
-
-static pfm_reg_desc_t pfm_gen_pmd_desc[PMU_MAX_PMDS]={
-/* pmd0 */ { PFM_REG_NOTIMPL , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}},
-/* pmd1 */ { PFM_REG_NOTIMPL , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}},
-/* pmd2 */ { PFM_REG_NOTIMPL , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}},
-/* pmd3 */ { PFM_REG_NOTIMPL , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}},
-/* pmd4 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(4),0UL, 0UL, 0UL}},
-/* pmd5 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(5),0UL, 0UL, 0UL}},
-/* pmd6 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(6),0UL, 0UL, 0UL}},
-/* pmd7 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(7),0UL, 0UL, 0UL}},
- { PFM_REG_END , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}}, /* end marker */
-};
-
-/*
- * impl_pmcs, impl_pmds are computed at runtime to minimize errors!
- */
-static pmu_config_t pmu_conf_gen={
- .pmu_name = "Generic",
- .pmu_family = 0xff, /* any */
- .ovfl_val = (1UL << 32) - 1,
- .num_ibrs = 0, /* does not use */
- .num_dbrs = 0, /* does not use */
- .pmd_desc = pfm_gen_pmd_desc,
- .pmc_desc = pfm_gen_pmc_desc
-};
-
diff --git a/arch/ia64/kernel/perfmon_itanium.h b/arch/ia64/kernel/perfmon_itanium.h
index f2d348648a03..dbd04028aafa 100644
--- a/arch/ia64/kernel/perfmon_itanium.h
+++ b/arch/ia64/kernel/perfmon_itanium.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* This file contains the Itanium PMU register description tables
- * and pmc checker used by perfmon.c.
+ * and pmc checker.
*
* Copyright (C) 2002-2003 Hewlett Packard Co
* Stephane Eranian <eranian@hpl.hp.com>
diff --git a/arch/ia64/kernel/perfmon_mckinley.h b/arch/ia64/kernel/perfmon_mckinley.h
deleted file mode 100644
index a993249e58bc..000000000000
--- a/arch/ia64/kernel/perfmon_mckinley.h
+++ /dev/null
@@ -1,188 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This file contains the McKinley PMU register description tables
- * and pmc checker used by perfmon.c.
- *
- * Copyright (C) 2002-2003 Hewlett Packard Co
- * Stephane Eranian <eranian@hpl.hp.com>
- */
-static int pfm_mck_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
-
-static pfm_reg_desc_t pfm_mck_pmc_desc[PMU_MAX_PMCS]={
-/* pmc0 */ { PFM_REG_CONTROL , 0, 0x1UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc1 */ { PFM_REG_CONTROL , 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc2 */ { PFM_REG_CONTROL , 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc3 */ { PFM_REG_CONTROL , 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc4 */ { PFM_REG_COUNTING, 6, 0x0000000000800000UL, 0xfffff7fUL, NULL, pfm_mck_pmc_check, {RDEP(4),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc5 */ { PFM_REG_COUNTING, 6, 0x0UL, 0xfffff7fUL, NULL, pfm_mck_pmc_check, {RDEP(5),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc6 */ { PFM_REG_COUNTING, 6, 0x0UL, 0xfffff7fUL, NULL, pfm_mck_pmc_check, {RDEP(6),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc7 */ { PFM_REG_COUNTING, 6, 0x0UL, 0xfffff7fUL, NULL, pfm_mck_pmc_check, {RDEP(7),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc8 */ { PFM_REG_CONFIG , 0, 0xffffffff3fffffffUL, 0xffffffff3ffffffbUL, NULL, pfm_mck_pmc_check, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc9 */ { PFM_REG_CONFIG , 0, 0xffffffff3ffffffcUL, 0xffffffff3ffffffbUL, NULL, pfm_mck_pmc_check, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc10 */ { PFM_REG_MONITOR , 4, 0x0UL, 0xffffUL, NULL, pfm_mck_pmc_check, {RDEP(0)|RDEP(1),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc11 */ { PFM_REG_MONITOR , 6, 0x0UL, 0x30f01cf, NULL, pfm_mck_pmc_check, {RDEP(2)|RDEP(3)|RDEP(17),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc12 */ { PFM_REG_MONITOR , 6, 0x0UL, 0xffffUL, NULL, pfm_mck_pmc_check, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc13 */ { PFM_REG_CONFIG , 0, 0x00002078fefefefeUL, 0x1e00018181818UL, NULL, pfm_mck_pmc_check, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc14 */ { PFM_REG_CONFIG , 0, 0x0db60db60db60db6UL, 0x2492UL, NULL, pfm_mck_pmc_check, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc15 */ { PFM_REG_CONFIG , 0, 0x00000000fffffff0UL, 0xfUL, NULL, pfm_mck_pmc_check, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
- { PFM_REG_END , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}}, /* end marker */
-};
-
-static pfm_reg_desc_t pfm_mck_pmd_desc[PMU_MAX_PMDS]={
-/* pmd0 */ { PFM_REG_BUFFER , 0, 0x0UL, -1UL, NULL, NULL, {RDEP(1),0UL, 0UL, 0UL}, {RDEP(10),0UL, 0UL, 0UL}},
-/* pmd1 */ { PFM_REG_BUFFER , 0, 0x0UL, -1UL, NULL, NULL, {RDEP(0),0UL, 0UL, 0UL}, {RDEP(10),0UL, 0UL, 0UL}},
-/* pmd2 */ { PFM_REG_BUFFER , 0, 0x0UL, -1UL, NULL, NULL, {RDEP(3)|RDEP(17),0UL, 0UL, 0UL}, {RDEP(11),0UL, 0UL, 0UL}},
-/* pmd3 */ { PFM_REG_BUFFER , 0, 0x0UL, -1UL, NULL, NULL, {RDEP(2)|RDEP(17),0UL, 0UL, 0UL}, {RDEP(11),0UL, 0UL, 0UL}},
-/* pmd4 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(4),0UL, 0UL, 0UL}},
-/* pmd5 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(5),0UL, 0UL, 0UL}},
-/* pmd6 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(6),0UL, 0UL, 0UL}},
-/* pmd7 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(7),0UL, 0UL, 0UL}},
-/* pmd8 */ { PFM_REG_BUFFER , 0, 0x0UL, -1UL, NULL, NULL, {RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
-/* pmd9 */ { PFM_REG_BUFFER , 0, 0x0UL, -1UL, NULL, NULL, {RDEP(8)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
-/* pmd10 */ { PFM_REG_BUFFER , 0, 0x0UL, -1UL, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
-/* pmd11 */ { PFM_REG_BUFFER , 0, 0x0UL, -1UL, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
-/* pmd12 */ { PFM_REG_BUFFER , 0, 0x0UL, -1UL, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
-/* pmd13 */ { PFM_REG_BUFFER , 0, 0x0UL, -1UL, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
-/* pmd14 */ { PFM_REG_BUFFER , 0, 0x0UL, -1UL, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
-/* pmd15 */ { PFM_REG_BUFFER , 0, 0x0UL, -1UL, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
-/* pmd16 */ { PFM_REG_BUFFER , 0, 0x0UL, -1UL, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
-/* pmd17 */ { PFM_REG_BUFFER , 0, 0x0UL, -1UL, NULL, NULL, {RDEP(2)|RDEP(3),0UL, 0UL, 0UL}, {RDEP(11),0UL, 0UL, 0UL}},
- { PFM_REG_END , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}}, /* end marker */
-};
-
-/*
- * PMC reserved fields must have their power-up values preserved
- */
-static int
-pfm_mck_reserved(unsigned int cnum, unsigned long *val, struct pt_regs *regs)
-{
- unsigned long tmp1, tmp2, ival = *val;
-
- /* remove reserved areas from user value */
- tmp1 = ival & PMC_RSVD_MASK(cnum);
-
- /* get reserved fields values */
- tmp2 = PMC_DFL_VAL(cnum) & ~PMC_RSVD_MASK(cnum);
-
- *val = tmp1 | tmp2;
-
- DPRINT(("pmc[%d]=0x%lx, mask=0x%lx, reset=0x%lx, val=0x%lx\n",
- cnum, ival, PMC_RSVD_MASK(cnum), PMC_DFL_VAL(cnum), *val));
- return 0;
-}
-
-/*
- * task can be NULL if the context is unloaded
- */
-static int
-pfm_mck_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs)
-{
- int ret = 0, check_case1 = 0;
- unsigned long val8 = 0, val14 = 0, val13 = 0;
- int is_loaded;
-
- /* first preserve the reserved fields */
- pfm_mck_reserved(cnum, val, regs);
-
- /* sanitfy check */
- if (ctx == NULL) return -EINVAL;
-
- is_loaded = ctx->ctx_state == PFM_CTX_LOADED || ctx->ctx_state == PFM_CTX_MASKED;
-
- /*
- * we must clear the debug registers if pmc13 has a value which enable
- * memory pipeline event constraints. In this case we need to clear the
- * the debug registers if they have not yet been accessed. This is required
- * to avoid picking stale state.
- * PMC13 is "active" if:
- * one of the pmc13.cfg_dbrpXX field is different from 0x3
- * AND
- * at the corresponding pmc13.ena_dbrpXX is set.
- */
- DPRINT(("cnum=%u val=0x%lx, using_dbreg=%d loaded=%d\n", cnum, *val, ctx->ctx_fl_using_dbreg, is_loaded));
-
- if (cnum == 13 && is_loaded
- && (*val & 0x1e00000000000UL) && (*val & 0x18181818UL) != 0x18181818UL && ctx->ctx_fl_using_dbreg == 0) {
-
- DPRINT(("pmc[%d]=0x%lx has active pmc13 settings, clearing dbr\n", cnum, *val));
-
- /* don't mix debug with perfmon */
- if (task && (task->thread.flags & IA64_THREAD_DBG_VALID) != 0) return -EINVAL;
-
- /*
- * a count of 0 will mark the debug registers as in use and also
- * ensure that they are properly cleared.
- */
- ret = pfm_write_ibr_dbr(PFM_DATA_RR, ctx, NULL, 0, regs);
- if (ret) return ret;
- }
- /*
- * we must clear the (instruction) debug registers if any pmc14.ibrpX bit is enabled
- * before they are (fl_using_dbreg==0) to avoid picking up stale information.
- */
- if (cnum == 14 && is_loaded && ((*val & 0x2222UL) != 0x2222UL) && ctx->ctx_fl_using_dbreg == 0) {
-
- DPRINT(("pmc[%d]=0x%lx has active pmc14 settings, clearing ibr\n", cnum, *val));
-
- /* don't mix debug with perfmon */
- if (task && (task->thread.flags & IA64_THREAD_DBG_VALID) != 0) return -EINVAL;
-
- /*
- * a count of 0 will mark the debug registers as in use and also
- * ensure that they are properly cleared.
- */
- ret = pfm_write_ibr_dbr(PFM_CODE_RR, ctx, NULL, 0, regs);
- if (ret) return ret;
-
- }
-
- switch(cnum) {
- case 4: *val |= 1UL << 23; /* force power enable bit */
- break;
- case 8: val8 = *val;
- val13 = ctx->ctx_pmcs[13];
- val14 = ctx->ctx_pmcs[14];
- check_case1 = 1;
- break;
- case 13: val8 = ctx->ctx_pmcs[8];
- val13 = *val;
- val14 = ctx->ctx_pmcs[14];
- check_case1 = 1;
- break;
- case 14: val8 = ctx->ctx_pmcs[8];
- val13 = ctx->ctx_pmcs[13];
- val14 = *val;
- check_case1 = 1;
- break;
- }
- /* check illegal configuration which can produce inconsistencies in tagging
- * i-side events in L1D and L2 caches
- */
- if (check_case1) {
- ret = ((val13 >> 45) & 0xf) == 0
- && ((val8 & 0x1) == 0)
- && ((((val14>>1) & 0x3) == 0x2 || ((val14>>1) & 0x3) == 0x0)
- ||(((val14>>4) & 0x3) == 0x2 || ((val14>>4) & 0x3) == 0x0));
-
- if (ret) DPRINT((KERN_DEBUG "perfmon: failure check_case1\n"));
- }
-
- return ret ? -EINVAL : 0;
-}
-
-/*
- * impl_pmcs, impl_pmds are computed at runtime to minimize errors!
- */
-static pmu_config_t pmu_conf_mck={
- .pmu_name = "Itanium 2",
- .pmu_family = 0x1f,
- .flags = PFM_PMU_IRQ_RESEND,
- .ovfl_val = (1UL << 47) - 1,
- .pmd_desc = pfm_mck_pmd_desc,
- .pmc_desc = pfm_mck_pmc_desc,
- .num_ibrs = 8,
- .num_dbrs = 8,
- .use_rr_dbregs = 1 /* debug register are use for range restrictions */
-};
-
-
diff --git a/arch/ia64/kernel/perfmon_montecito.h b/arch/ia64/kernel/perfmon_montecito.h
deleted file mode 100644
index c0b5b9110c88..000000000000
--- a/arch/ia64/kernel/perfmon_montecito.h
+++ /dev/null
@@ -1,270 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This file contains the Montecito PMU register description tables
- * and pmc checker used by perfmon.c.
- *
- * Copyright (c) 2005-2006 Hewlett-Packard Development Company, L.P.
- * Contributed by Stephane Eranian <eranian@hpl.hp.com>
- */
-static int pfm_mont_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
-
-#define RDEP_MONT_ETB (RDEP(38)|RDEP(39)|RDEP(48)|RDEP(49)|RDEP(50)|RDEP(51)|RDEP(52)|RDEP(53)|RDEP(54)|\
- RDEP(55)|RDEP(56)|RDEP(57)|RDEP(58)|RDEP(59)|RDEP(60)|RDEP(61)|RDEP(62)|RDEP(63))
-#define RDEP_MONT_DEAR (RDEP(32)|RDEP(33)|RDEP(36))
-#define RDEP_MONT_IEAR (RDEP(34)|RDEP(35))
-
-static pfm_reg_desc_t pfm_mont_pmc_desc[PMU_MAX_PMCS]={
-/* pmc0 */ { PFM_REG_CONTROL , 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {0,0, 0, 0}},
-/* pmc1 */ { PFM_REG_CONTROL , 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {0,0, 0, 0}},
-/* pmc2 */ { PFM_REG_CONTROL , 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {0,0, 0, 0}},
-/* pmc3 */ { PFM_REG_CONTROL , 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {0,0, 0, 0}},
-/* pmc4 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(4),0, 0, 0}, {0,0, 0, 0}},
-/* pmc5 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(5),0, 0, 0}, {0,0, 0, 0}},
-/* pmc6 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(6),0, 0, 0}, {0,0, 0, 0}},
-/* pmc7 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(7),0, 0, 0}, {0,0, 0, 0}},
-/* pmc8 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(8),0, 0, 0}, {0,0, 0, 0}},
-/* pmc9 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(9),0, 0, 0}, {0,0, 0, 0}},
-/* pmc10 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(10),0, 0, 0}, {0,0, 0, 0}},
-/* pmc11 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(11),0, 0, 0}, {0,0, 0, 0}},
-/* pmc12 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(12),0, 0, 0}, {0,0, 0, 0}},
-/* pmc13 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(13),0, 0, 0}, {0,0, 0, 0}},
-/* pmc14 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(14),0, 0, 0}, {0,0, 0, 0}},
-/* pmc15 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(15),0, 0, 0}, {0,0, 0, 0}},
-/* pmc16 */ { PFM_REG_NOTIMPL, },
-/* pmc17 */ { PFM_REG_NOTIMPL, },
-/* pmc18 */ { PFM_REG_NOTIMPL, },
-/* pmc19 */ { PFM_REG_NOTIMPL, },
-/* pmc20 */ { PFM_REG_NOTIMPL, },
-/* pmc21 */ { PFM_REG_NOTIMPL, },
-/* pmc22 */ { PFM_REG_NOTIMPL, },
-/* pmc23 */ { PFM_REG_NOTIMPL, },
-/* pmc24 */ { PFM_REG_NOTIMPL, },
-/* pmc25 */ { PFM_REG_NOTIMPL, },
-/* pmc26 */ { PFM_REG_NOTIMPL, },
-/* pmc27 */ { PFM_REG_NOTIMPL, },
-/* pmc28 */ { PFM_REG_NOTIMPL, },
-/* pmc29 */ { PFM_REG_NOTIMPL, },
-/* pmc30 */ { PFM_REG_NOTIMPL, },
-/* pmc31 */ { PFM_REG_NOTIMPL, },
-/* pmc32 */ { PFM_REG_CONFIG, 0, 0x30f01ffffffffffUL, 0x30f01ffffffffffUL, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
-/* pmc33 */ { PFM_REG_CONFIG, 0, 0x0, 0x1ffffffffffUL, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
-/* pmc34 */ { PFM_REG_CONFIG, 0, 0xf01ffffffffffUL, 0xf01ffffffffffUL, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
-/* pmc35 */ { PFM_REG_CONFIG, 0, 0x0, 0x1ffffffffffUL, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
-/* pmc36 */ { PFM_REG_CONFIG, 0, 0xfffffff0, 0xf, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
-/* pmc37 */ { PFM_REG_MONITOR, 4, 0x0, 0x3fff, NULL, pfm_mont_pmc_check, {RDEP_MONT_IEAR, 0, 0, 0}, {0, 0, 0, 0}},
-/* pmc38 */ { PFM_REG_CONFIG, 0, 0xdb6, 0x2492, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
-/* pmc39 */ { PFM_REG_MONITOR, 6, 0x0, 0xffcf, NULL, pfm_mont_pmc_check, {RDEP_MONT_ETB,0, 0, 0}, {0,0, 0, 0}},
-/* pmc40 */ { PFM_REG_MONITOR, 6, 0x2000000, 0xf01cf, NULL, pfm_mont_pmc_check, {RDEP_MONT_DEAR,0, 0, 0}, {0,0, 0, 0}},
-/* pmc41 */ { PFM_REG_CONFIG, 0, 0x00002078fefefefeUL, 0x1e00018181818UL, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
-/* pmc42 */ { PFM_REG_MONITOR, 6, 0x0, 0x7ff4f, NULL, pfm_mont_pmc_check, {RDEP_MONT_ETB,0, 0, 0}, {0,0, 0, 0}},
- { PFM_REG_END , 0, 0x0, -1, NULL, NULL, {0,}, {0,}}, /* end marker */
-};
-
-static pfm_reg_desc_t pfm_mont_pmd_desc[PMU_MAX_PMDS]={
-/* pmd0 */ { PFM_REG_NOTIMPL, },
-/* pmd1 */ { PFM_REG_NOTIMPL, },
-/* pmd2 */ { PFM_REG_NOTIMPL, },
-/* pmd3 */ { PFM_REG_NOTIMPL, },
-/* pmd4 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(4),0, 0, 0}},
-/* pmd5 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(5),0, 0, 0}},
-/* pmd6 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(6),0, 0, 0}},
-/* pmd7 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(7),0, 0, 0}},
-/* pmd8 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(8),0, 0, 0}},
-/* pmd9 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(9),0, 0, 0}},
-/* pmd10 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(10),0, 0, 0}},
-/* pmd11 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(11),0, 0, 0}},
-/* pmd12 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(12),0, 0, 0}},
-/* pmd13 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(13),0, 0, 0}},
-/* pmd14 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(14),0, 0, 0}},
-/* pmd15 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(15),0, 0, 0}},
-/* pmd16 */ { PFM_REG_NOTIMPL, },
-/* pmd17 */ { PFM_REG_NOTIMPL, },
-/* pmd18 */ { PFM_REG_NOTIMPL, },
-/* pmd19 */ { PFM_REG_NOTIMPL, },
-/* pmd20 */ { PFM_REG_NOTIMPL, },
-/* pmd21 */ { PFM_REG_NOTIMPL, },
-/* pmd22 */ { PFM_REG_NOTIMPL, },
-/* pmd23 */ { PFM_REG_NOTIMPL, },
-/* pmd24 */ { PFM_REG_NOTIMPL, },
-/* pmd25 */ { PFM_REG_NOTIMPL, },
-/* pmd26 */ { PFM_REG_NOTIMPL, },
-/* pmd27 */ { PFM_REG_NOTIMPL, },
-/* pmd28 */ { PFM_REG_NOTIMPL, },
-/* pmd29 */ { PFM_REG_NOTIMPL, },
-/* pmd30 */ { PFM_REG_NOTIMPL, },
-/* pmd31 */ { PFM_REG_NOTIMPL, },
-/* pmd32 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP(33)|RDEP(36),0, 0, 0}, {RDEP(40),0, 0, 0}},
-/* pmd33 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP(32)|RDEP(36),0, 0, 0}, {RDEP(40),0, 0, 0}},
-/* pmd34 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP(35),0, 0, 0}, {RDEP(37),0, 0, 0}},
-/* pmd35 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP(34),0, 0, 0}, {RDEP(37),0, 0, 0}},
-/* pmd36 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP(32)|RDEP(33),0, 0, 0}, {RDEP(40),0, 0, 0}},
-/* pmd37 */ { PFM_REG_NOTIMPL, },
-/* pmd38 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
-/* pmd39 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
-/* pmd40 */ { PFM_REG_NOTIMPL, },
-/* pmd41 */ { PFM_REG_NOTIMPL, },
-/* pmd42 */ { PFM_REG_NOTIMPL, },
-/* pmd43 */ { PFM_REG_NOTIMPL, },
-/* pmd44 */ { PFM_REG_NOTIMPL, },
-/* pmd45 */ { PFM_REG_NOTIMPL, },
-/* pmd46 */ { PFM_REG_NOTIMPL, },
-/* pmd47 */ { PFM_REG_NOTIMPL, },
-/* pmd48 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
-/* pmd49 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
-/* pmd50 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
-/* pmd51 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
-/* pmd52 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
-/* pmd53 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
-/* pmd54 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
-/* pmd55 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
-/* pmd56 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
-/* pmd57 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
-/* pmd58 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
-/* pmd59 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
-/* pmd60 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
-/* pmd61 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
-/* pmd62 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
-/* pmd63 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
- { PFM_REG_END , 0, 0x0, -1, NULL, NULL, {0,}, {0,}}, /* end marker */
-};
-
-/*
- * PMC reserved fields must have their power-up values preserved
- */
-static int
-pfm_mont_reserved(unsigned int cnum, unsigned long *val, struct pt_regs *regs)
-{
- unsigned long tmp1, tmp2, ival = *val;
-
- /* remove reserved areas from user value */
- tmp1 = ival & PMC_RSVD_MASK(cnum);
-
- /* get reserved fields values */
- tmp2 = PMC_DFL_VAL(cnum) & ~PMC_RSVD_MASK(cnum);
-
- *val = tmp1 | tmp2;
-
- DPRINT(("pmc[%d]=0x%lx, mask=0x%lx, reset=0x%lx, val=0x%lx\n",
- cnum, ival, PMC_RSVD_MASK(cnum), PMC_DFL_VAL(cnum), *val));
- return 0;
-}
-
-/*
- * task can be NULL if the context is unloaded
- */
-static int
-pfm_mont_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs)
-{
- int ret = 0;
- unsigned long val32 = 0, val38 = 0, val41 = 0;
- unsigned long tmpval;
- int check_case1 = 0;
- int is_loaded;
-
- /* first preserve the reserved fields */
- pfm_mont_reserved(cnum, val, regs);
-
- tmpval = *val;
-
- /* sanity check */
- if (ctx == NULL) return -EINVAL;
-
- is_loaded = ctx->ctx_state == PFM_CTX_LOADED || ctx->ctx_state == PFM_CTX_MASKED;
-
- /*
- * we must clear the debug registers if pmc41 has a value which enable
- * memory pipeline event constraints. In this case we need to clear the
- * the debug registers if they have not yet been accessed. This is required
- * to avoid picking stale state.
- * PMC41 is "active" if:
- * one of the pmc41.cfg_dtagXX field is different from 0x3
- * AND
- * at the corresponding pmc41.en_dbrpXX is set.
- * AND
- * ctx_fl_using_dbreg == 0 (i.e., dbr not yet used)
- */
- DPRINT(("cnum=%u val=0x%lx, using_dbreg=%d loaded=%d\n", cnum, tmpval, ctx->ctx_fl_using_dbreg, is_loaded));
-
- if (cnum == 41 && is_loaded
- && (tmpval & 0x1e00000000000UL) && (tmpval & 0x18181818UL) != 0x18181818UL && ctx->ctx_fl_using_dbreg == 0) {
-
- DPRINT(("pmc[%d]=0x%lx has active pmc41 settings, clearing dbr\n", cnum, tmpval));
-
- /* don't mix debug with perfmon */
- if (task && (task->thread.flags & IA64_THREAD_DBG_VALID) != 0) return -EINVAL;
-
- /*
- * a count of 0 will mark the debug registers if:
- * AND
- */
- ret = pfm_write_ibr_dbr(PFM_DATA_RR, ctx, NULL, 0, regs);
- if (ret) return ret;
- }
- /*
- * we must clear the (instruction) debug registers if:
- * pmc38.ig_ibrpX is 0 (enabled)
- * AND
- * ctx_fl_using_dbreg == 0 (i.e., dbr not yet used)
- */
- if (cnum == 38 && is_loaded && ((tmpval & 0x492UL) != 0x492UL) && ctx->ctx_fl_using_dbreg == 0) {
-
- DPRINT(("pmc38=0x%lx has active pmc38 settings, clearing ibr\n", tmpval));
-
- /* don't mix debug with perfmon */
- if (task && (task->thread.flags & IA64_THREAD_DBG_VALID) != 0) return -EINVAL;
-
- /*
- * a count of 0 will mark the debug registers as in use and also
- * ensure that they are properly cleared.
- */
- ret = pfm_write_ibr_dbr(PFM_CODE_RR, ctx, NULL, 0, regs);
- if (ret) return ret;
-
- }
- switch(cnum) {
- case 32: val32 = *val;
- val38 = ctx->ctx_pmcs[38];
- val41 = ctx->ctx_pmcs[41];
- check_case1 = 1;
- break;
- case 38: val38 = *val;
- val32 = ctx->ctx_pmcs[32];
- val41 = ctx->ctx_pmcs[41];
- check_case1 = 1;
- break;
- case 41: val41 = *val;
- val32 = ctx->ctx_pmcs[32];
- val38 = ctx->ctx_pmcs[38];
- check_case1 = 1;
- break;
- }
- /* check illegal configuration which can produce inconsistencies in tagging
- * i-side events in L1D and L2 caches
- */
- if (check_case1) {
- ret = (((val41 >> 45) & 0xf) == 0 && ((val32>>57) & 0x1) == 0)
- && ((((val38>>1) & 0x3) == 0x2 || ((val38>>1) & 0x3) == 0)
- || (((val38>>4) & 0x3) == 0x2 || ((val38>>4) & 0x3) == 0));
- if (ret) {
- DPRINT(("invalid config pmc38=0x%lx pmc41=0x%lx pmc32=0x%lx\n", val38, val41, val32));
- return -EINVAL;
- }
- }
- *val = tmpval;
- return 0;
-}
-
-/*
- * impl_pmcs, impl_pmds are computed at runtime to minimize errors!
- */
-static pmu_config_t pmu_conf_mont={
- .pmu_name = "Montecito",
- .pmu_family = 0x20,
- .flags = PFM_PMU_IRQ_RESEND,
- .ovfl_val = (1UL << 47) - 1,
- .pmd_desc = pfm_mont_pmd_desc,
- .pmc_desc = pfm_mont_pmc_desc,
- .num_ibrs = 8,
- .num_dbrs = 8,
- .use_rr_dbregs = 1 /* debug register are use for range retrictions */
-};
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index 4ebbfa076a26..7e1a1525e202 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -338,7 +338,7 @@ copy_thread(unsigned long clone_flags, unsigned long user_stack_base,
ia64_drop_fpu(p); /* don't pick up stale state from a CPU's fph */
- if (unlikely(p->flags & PF_KTHREAD)) {
+ if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
if (unlikely(!user_stack_base)) {
/* fork_idle() called us */
return 0;
diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c
index e67b22fc3c60..c1b299760bf7 100644
--- a/arch/ia64/kernel/signal.c
+++ b/arch/ia64/kernel/signal.c
@@ -341,7 +341,8 @@ ia64_do_signal (struct sigscratch *scr, long in_syscall)
* need to push through a forced SIGSEGV.
*/
while (1) {
- get_signal(&ksig);
+ if (!get_signal(&ksig))
+ break;
/*
* get_signal() may have run a debugger (via notify_parent())
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index 093040f7e626..49b488580939 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -45,6 +45,7 @@
#include <asm/cache.h>
#include <asm/current.h>
#include <asm/delay.h>
+#include <asm/efi.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/mca.h>
diff --git a/arch/ia64/kernel/syscalls/Makefile b/arch/ia64/kernel/syscalls/Makefile
index 813a58cba39c..bf4bda0f63eb 100644
--- a/arch/ia64/kernel/syscalls/Makefile
+++ b/arch/ia64/kernel/syscalls/Makefile
@@ -5,7 +5,7 @@ uapi := arch/$(SRCARCH)/include/generated/uapi/asm
_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \
$(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
-syscall := $(srctree)/$(src)/syscall.tbl
+syscall := $(src)/syscall.tbl
syshdr := $(srctree)/$(src)/syscallhdr.sh
systbl := $(srctree)/$(src)/syscalltbl.sh
@@ -22,19 +22,20 @@ quiet_cmd_systbl = SYSTBL $@
'$(systbl_offset_$(basetarget))'
syshdr_offset_unistd_64 := __NR_Linux
-$(uapi)/unistd_64.h: $(syscall) $(syshdr)
+$(uapi)/unistd_64.h: $(syscall) $(syshdr) FORCE
$(call if_changed,syshdr)
systbl_offset_syscall_table := 1024
-$(kapi)/syscall_table.h: $(syscall) $(systbl)
+$(kapi)/syscall_table.h: $(syscall) $(systbl) FORCE
$(call if_changed,systbl)
uapisyshdr-y += unistd_64.h
kapisyshdr-y += syscall_table.h
-targets += $(uapisyshdr-y) $(kapisyshdr-y)
+uapisyshdr-y := $(addprefix $(uapi)/, $(uapisyshdr-y))
+kapisyshdr-y := $(addprefix $(kapi)/, $(kapisyshdr-y))
+targets += $(addprefix ../../../../, $(uapisyshdr-y) $(kapisyshdr-y))
PHONY += all
-all: $(addprefix $(uapi)/,$(uapisyshdr-y))
-all: $(addprefix $(kapi)/,$(kapisyshdr-y))
+all: $(uapisyshdr-y) $(kapisyshdr-y)
@:
diff --git a/arch/ia64/kernel/syscalls/syscall.tbl b/arch/ia64/kernel/syscalls/syscall.tbl
index bfc00f2bd437..d89231166e19 100644
--- a/arch/ia64/kernel/syscalls/syscall.tbl
+++ b/arch/ia64/kernel/syscalls/syscall.tbl
@@ -362,3 +362,4 @@
439 common faccessat2 sys_faccessat2
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2
+442 common mount_setattr sys_mount_setattr
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index ed9fc3d057a6..fa9c0ab8c6fc 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -26,6 +26,7 @@
#include <linux/sched/cputime.h>
#include <asm/delay.h>
+#include <asm/efi.h>
#include <asm/hw_irq.h>
#include <asm/ptrace.h>
#include <asm/sal.h>
@@ -171,29 +172,34 @@ void vtime_account_hardirq(struct task_struct *tsk)
static irqreturn_t
timer_interrupt (int irq, void *dev_id)
{
- unsigned long cur_itm, new_itm, ticks;
+ unsigned long new_itm;
if (cpu_is_offline(smp_processor_id())) {
return IRQ_HANDLED;
}
new_itm = local_cpu_data->itm_next;
- cur_itm = ia64_get_itc();
- if (!time_after(cur_itm, new_itm)) {
+ if (!time_after(ia64_get_itc(), new_itm))
printk(KERN_ERR "Oops: timer tick before it's due (itc=%lx,itm=%lx)\n",
- cur_itm, new_itm);
- ticks = 1;
- } else {
- ticks = DIV_ROUND_UP(cur_itm - new_itm,
- local_cpu_data->itm_delta);
- new_itm += ticks * local_cpu_data->itm_delta;
- }
+ ia64_get_itc(), new_itm);
+
+ while (1) {
+ new_itm += local_cpu_data->itm_delta;
+
+ legacy_timer_tick(smp_processor_id() == time_keeper_id);
- if (smp_processor_id() != time_keeper_id)
- ticks = 0;
+ local_cpu_data->itm_next = new_itm;
- legacy_timer_tick(ticks);
+ if (time_after(new_itm, ia64_get_itc()))
+ break;
+
+ /*
+ * Allow IPIs to interrupt the timer loop.
+ */
+ local_irq_enable();
+ local_irq_disable();
+ }
do {
/*
diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c
index 0750f367837d..51883a66aeb5 100644
--- a/arch/ia64/kernel/uncached.c
+++ b/arch/ia64/kernel/uncached.c
@@ -20,14 +20,12 @@
#include <linux/genalloc.h>
#include <linux/gfp.h>
#include <linux/pgtable.h>
+#include <asm/efi.h>
#include <asm/page.h>
#include <asm/pal.h>
#include <linux/atomic.h>
#include <asm/tlbflush.h>
-
-extern void __init efi_memmap_walk_uc(efi_freemem_callback_t, void *);
-
struct uncached_pool {
struct gen_pool *pool;
struct mutex add_chunk_mutex; /* serialize adding a converted chunk */
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index bfc4ecd0a2ab..62fe80a16f42 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -21,6 +21,7 @@
#include <linux/swap.h>
#include <linux/sizes.h>
+#include <asm/efi.h>
#include <asm/meminit.h>
#include <asm/sections.h>
#include <asm/mca.h>
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index c7311131156e..03b3a02375ff 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -24,6 +24,7 @@
#include <linux/efi.h>
#include <linux/nodemask.h>
#include <linux/slab.h>
+#include <asm/efi.h>
#include <asm/tlb.h>
#include <asm/meminit.h>
#include <asm/numa.h>
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index e76386a3479e..16d0d7d22657 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -27,6 +27,7 @@
#include <linux/swiotlb.h>
#include <asm/dma.h>
+#include <asm/efi.h>
#include <asm/io.h>
#include <asm/numa.h>
#include <asm/patch.h>
@@ -535,18 +536,20 @@ virtual_memmap_init(u64 start, u64 end, void *arg)
/ sizeof(struct page));
if (map_start < map_end)
- memmap_init_zone((unsigned long)(map_end - map_start),
+ memmap_init_range((unsigned long)(map_end - map_start),
args->nid, args->zone, page_to_pfn(map_start), page_to_pfn(map_end),
MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
return 0;
}
-void __meminit
-memmap_init (unsigned long size, int nid, unsigned long zone,
- unsigned long start_pfn)
+void __meminit memmap_init_zone(struct zone *zone)
{
+ int nid = zone_to_nid(zone), zone_id = zone_idx(zone);
+ unsigned long start_pfn = zone->zone_start_pfn;
+ unsigned long size = zone->spanned_pages;
+
if (!vmem_map) {
- memmap_init_zone(size, nid, zone, start_pfn, start_pfn + size,
+ memmap_init_range(size, nid, zone_id, start_pfn, start_pfn + size,
MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
} else {
struct page *start;
@@ -556,7 +559,7 @@ memmap_init (unsigned long size, int nid, unsigned long zone,
args.start = start;
args.end = start + size;
args.nid = nid;
- args.zone = zone;
+ args.zone = zone_id;
efi_memmap_walk(virtual_memmap_init, &args);
}
diff --git a/arch/ia64/oprofile/Makefile b/arch/ia64/oprofile/Makefile
deleted file mode 100644
index fc7944d462f4..000000000000
--- a/arch/ia64/oprofile/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_OPROFILE) += oprofile.o
-
-DRIVER_OBJS := $(addprefix ../../../drivers/oprofile/, \
- oprof.o cpu_buffer.o buffer_sync.o \
- event_buffer.o oprofile_files.o \
- oprofilefs.o oprofile_stats.o \
- timer_int.o )
-
-oprofile-y := $(DRIVER_OBJS) init.o backtrace.o
diff --git a/arch/ia64/oprofile/backtrace.c b/arch/ia64/oprofile/backtrace.c
deleted file mode 100644
index 6a219a946050..000000000000
--- a/arch/ia64/oprofile/backtrace.c
+++ /dev/null
@@ -1,131 +0,0 @@
-/**
- * @file backtrace.c
- *
- * @remark Copyright 2004 Silicon Graphics Inc. All Rights Reserved.
- * @remark Read the file COPYING
- *
- * @author Greg Banks <gnb@melbourne.sgi.com>
- * @author Keith Owens <kaos@melbourne.sgi.com>
- * Based on work done for the ia64 port of the SGI kernprof patch, which is
- * Copyright (c) 2003-2004 Silicon Graphics Inc. All Rights Reserved.
- */
-
-#include <linux/oprofile.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <asm/ptrace.h>
-
-/*
- * For IA64 we need to perform a complex little dance to get both
- * the struct pt_regs and a synthetic struct switch_stack in place
- * to allow the unwind code to work. This dance requires our unwind
- * using code to be called from a function called from unw_init_running().
- * There we only get a single void* data pointer, so use this struct
- * to hold all the data we need during the unwind.
- */
-typedef struct
-{
- unsigned int depth;
- struct pt_regs *regs;
- struct unw_frame_info frame;
- unsigned long *prev_pfs_loc; /* state for WAR for old spinlock ool code */
-} ia64_backtrace_t;
-
-/* Returns non-zero if the PC is in the Interrupt Vector Table */
-static __inline__ int in_ivt_code(unsigned long pc)
-{
- extern char ia64_ivt[];
- return (pc >= (u_long)ia64_ivt && pc < (u_long)ia64_ivt+32768);
-}
-
-/*
- * Unwind to next stack frame.
- */
-static __inline__ int next_frame(ia64_backtrace_t *bt)
-{
- /*
- * Avoid unsightly console message from unw_unwind() when attempting
- * to unwind through the Interrupt Vector Table which has no unwind
- * information.
- */
- if (in_ivt_code(bt->frame.ip))
- return 0;
-
- /*
- * WAR for spinlock contention from leaf functions. ia64_spinlock_contention_pre3_4
- * has ar.pfs == r0. Leaf functions do not modify ar.pfs so ar.pfs remains
- * as 0, stopping the backtrace. Record the previous ar.pfs when the current
- * IP is in ia64_spinlock_contention_pre3_4 then unwind, if pfs_loc has not changed
- * after unwind then use pt_regs.ar_pfs which is where the real ar.pfs is for
- * leaf functions.
- */
- if (bt->prev_pfs_loc && bt->regs && bt->frame.pfs_loc == bt->prev_pfs_loc)
- bt->frame.pfs_loc = &bt->regs->ar_pfs;
- bt->prev_pfs_loc = NULL;
-
- return unw_unwind(&bt->frame) == 0;
-}
-
-
-static void do_ia64_backtrace(struct unw_frame_info *info, void *vdata)
-{
- ia64_backtrace_t *bt = vdata;
- struct switch_stack *sw;
- int count = 0;
- u_long pc, sp;
-
- sw = (struct switch_stack *)(info+1);
- /* padding from unw_init_running */
- sw = (struct switch_stack *)(((unsigned long)sw + 15) & ~15);
-
- unw_init_frame_info(&bt->frame, current, sw);
-
- /* skip over interrupt frame and oprofile calls */
- do {
- unw_get_sp(&bt->frame, &sp);
- if (sp >= (u_long)bt->regs)
- break;
- if (!next_frame(bt))
- return;
- } while (count++ < 200);
-
- /* finally, grab the actual sample */
- while (bt->depth-- && next_frame(bt)) {
- unw_get_ip(&bt->frame, &pc);
- oprofile_add_trace(pc);
- if (unw_is_intr_frame(&bt->frame)) {
- /*
- * Interrupt received on kernel stack; this can
- * happen when timer interrupt fires while processing
- * a softirq from the tail end of a hardware interrupt
- * which interrupted a system call. Don't laugh, it
- * happens! Splice the backtrace into two parts to
- * avoid spurious cycles in the gprof output.
- */
- /* TODO: split rather than drop the 2nd half */
- break;
- }
- }
-}
-
-void
-ia64_backtrace(struct pt_regs * const regs, unsigned int depth)
-{
- ia64_backtrace_t bt;
- unsigned long flags;
-
- /*
- * On IA64 there is little hope of getting backtraces from
- * user space programs -- the problems of getting the unwind
- * information from arbitrary user programs are extreme.
- */
- if (user_mode(regs))
- return;
-
- bt.depth = depth;
- bt.regs = regs;
- bt.prev_pfs_loc = NULL;
- local_irq_save(flags);
- unw_init_running(do_ia64_backtrace, &bt);
- local_irq_restore(flags);
-}
diff --git a/arch/ia64/oprofile/init.c b/arch/ia64/oprofile/init.c
deleted file mode 100644
index a692ba16a07b..000000000000
--- a/arch/ia64/oprofile/init.c
+++ /dev/null
@@ -1,28 +0,0 @@
-/**
- * @file init.c
- *
- * @remark Copyright 2002 OProfile authors
- * @remark Read the file COPYING
- *
- * @author John Levon <levon@movementarian.org>
- */
-
-#include <linux/kernel.h>
-#include <linux/oprofile.h>
-#include <linux/init.h>
-#include <linux/errno.h>
-
-extern int perfmon_init(struct oprofile_operations *ops);
-extern void perfmon_exit(void);
-extern void ia64_backtrace(struct pt_regs * const regs, unsigned int depth);
-
-int __init oprofile_arch_init(struct oprofile_operations *ops)
-{
- ops->backtrace = ia64_backtrace;
- return -ENODEV;
-}
-
-
-void oprofile_arch_exit(void)
-{
-}
diff --git a/arch/ia64/scripts/unwcheck.py b/arch/ia64/scripts/unwcheck.py
index bfd1b671e35f..9581742f0db2 100644
--- a/arch/ia64/scripts/unwcheck.py
+++ b/arch/ia64/scripts/unwcheck.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0
#
# Usage: unwcheck.py FILE
diff --git a/arch/m68k/coldfire/clk.c b/arch/m68k/coldfire/clk.c
index 7bc666e482eb..076a9caa9557 100644
--- a/arch/m68k/coldfire/clk.c
+++ b/arch/m68k/coldfire/clk.c
@@ -90,6 +90,10 @@ EXPORT_SYMBOL(clk_get);
int clk_enable(struct clk *clk)
{
unsigned long flags;
+
+ if (!clk)
+ return -EINVAL;
+
spin_lock_irqsave(&clk_lock, flags);
if ((clk->enabled++ == 0) && clk->clk_ops)
clk->clk_ops->enable(clk);
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index 19b40b6bc4b7..786656090c50 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -128,6 +128,7 @@ CONFIG_NFT_SYNPROXY=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NFT_FIB_NETDEV=m
+CONFIG_NFT_REJECT_NETDEV=m
CONFIG_NF_FLOW_TABLE_INET=m
CONFIG_NF_FLOW_TABLE=m
CONFIG_NETFILTER_XT_SET=m
@@ -655,7 +656,9 @@ CONFIG_FIND_BIT_BENCHMARK=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
CONFIG_BITFIELD_KUNIT=m
+CONFIG_RESOURCE_KUNIT_TEST=m
CONFIG_LINEAR_RANGES_TEST=m
+CONFIG_CMDLINE_KUNIT_TEST=m
CONFIG_BITS_TEST=m
CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index 07516abe0489..9bb12be4a38e 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -124,6 +124,7 @@ CONFIG_NFT_SYNPROXY=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NFT_FIB_NETDEV=m
+CONFIG_NFT_REJECT_NETDEV=m
CONFIG_NF_FLOW_TABLE_INET=m
CONFIG_NF_FLOW_TABLE=m
CONFIG_NETFILTER_XT_SET=m
@@ -611,7 +612,9 @@ CONFIG_FIND_BIT_BENCHMARK=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
CONFIG_BITFIELD_KUNIT=m
+CONFIG_RESOURCE_KUNIT_TEST=m
CONFIG_LINEAR_RANGES_TEST=m
+CONFIG_CMDLINE_KUNIT_TEST=m
CONFIG_BITS_TEST=m
CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index cc901c4e9492..413232626d9d 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -131,6 +131,7 @@ CONFIG_NFT_SYNPROXY=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NFT_FIB_NETDEV=m
+CONFIG_NFT_REJECT_NETDEV=m
CONFIG_NF_FLOW_TABLE_INET=m
CONFIG_NF_FLOW_TABLE=m
CONFIG_NETFILTER_XT_SET=m
@@ -633,7 +634,9 @@ CONFIG_FIND_BIT_BENCHMARK=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
CONFIG_BITFIELD_KUNIT=m
+CONFIG_RESOURCE_KUNIT_TEST=m
CONFIG_LINEAR_RANGES_TEST=m
+CONFIG_CMDLINE_KUNIT_TEST=m
CONFIG_BITS_TEST=m
CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index fc9a94aa7d6b..819cc70b06d8 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -121,6 +121,7 @@ CONFIG_NFT_SYNPROXY=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NFT_FIB_NETDEV=m
+CONFIG_NFT_REJECT_NETDEV=m
CONFIG_NF_FLOW_TABLE_INET=m
CONFIG_NF_FLOW_TABLE=m
CONFIG_NETFILTER_XT_SET=m
@@ -604,7 +605,9 @@ CONFIG_FIND_BIT_BENCHMARK=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
CONFIG_BITFIELD_KUNIT=m
+CONFIG_RESOURCE_KUNIT_TEST=m
CONFIG_LINEAR_RANGES_TEST=m
+CONFIG_CMDLINE_KUNIT_TEST=m
CONFIG_BITS_TEST=m
CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index 260f1206c810..8f8d5968713b 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -123,6 +123,7 @@ CONFIG_NFT_SYNPROXY=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NFT_FIB_NETDEV=m
+CONFIG_NFT_REJECT_NETDEV=m
CONFIG_NF_FLOW_TABLE_INET=m
CONFIG_NF_FLOW_TABLE=m
CONFIG_NETFILTER_XT_SET=m
@@ -613,7 +614,9 @@ CONFIG_FIND_BIT_BENCHMARK=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
CONFIG_BITFIELD_KUNIT=m
+CONFIG_RESOURCE_KUNIT_TEST=m
CONFIG_LINEAR_RANGES_TEST=m
+CONFIG_CMDLINE_KUNIT_TEST=m
CONFIG_BITS_TEST=m
CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index f6d50b3fe8c2..bf15e6c1c939 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -122,6 +122,7 @@ CONFIG_NFT_SYNPROXY=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NFT_FIB_NETDEV=m
+CONFIG_NFT_REJECT_NETDEV=m
CONFIG_NF_FLOW_TABLE_INET=m
CONFIG_NF_FLOW_TABLE=m
CONFIG_NETFILTER_XT_SET=m
@@ -636,7 +637,9 @@ CONFIG_FIND_BIT_BENCHMARK=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
CONFIG_BITFIELD_KUNIT=m
+CONFIG_RESOURCE_KUNIT_TEST=m
CONFIG_LINEAR_RANGES_TEST=m
+CONFIG_CMDLINE_KUNIT_TEST=m
CONFIG_BITS_TEST=m
CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index fbe000ca0003..5466d48fcd9d 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -142,6 +142,7 @@ CONFIG_NFT_SYNPROXY=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NFT_FIB_NETDEV=m
+CONFIG_NFT_REJECT_NETDEV=m
CONFIG_NF_FLOW_TABLE_INET=m
CONFIG_NF_FLOW_TABLE=m
CONFIG_NETFILTER_XT_SET=m
@@ -722,7 +723,9 @@ CONFIG_FIND_BIT_BENCHMARK=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
CONFIG_BITFIELD_KUNIT=m
+CONFIG_RESOURCE_KUNIT_TEST=m
CONFIG_LINEAR_RANGES_TEST=m
+CONFIG_CMDLINE_KUNIT_TEST=m
CONFIG_BITS_TEST=m
CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index 25ca836a5701..93c305918838 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -120,6 +120,7 @@ CONFIG_NFT_SYNPROXY=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NFT_FIB_NETDEV=m
+CONFIG_NFT_REJECT_NETDEV=m
CONFIG_NF_FLOW_TABLE_INET=m
CONFIG_NF_FLOW_TABLE=m
CONFIG_NETFILTER_XT_SET=m
@@ -603,7 +604,9 @@ CONFIG_FIND_BIT_BENCHMARK=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
CONFIG_BITFIELD_KUNIT=m
+CONFIG_RESOURCE_KUNIT_TEST=m
CONFIG_LINEAR_RANGES_TEST=m
+CONFIG_CMDLINE_KUNIT_TEST=m
CONFIG_BITS_TEST=m
CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index 5794e43a2acb..cacd6c617f69 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -121,6 +121,7 @@ CONFIG_NFT_SYNPROXY=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NFT_FIB_NETDEV=m
+CONFIG_NFT_REJECT_NETDEV=m
CONFIG_NF_FLOW_TABLE_INET=m
CONFIG_NF_FLOW_TABLE=m
CONFIG_NETFILTER_XT_SET=m
@@ -604,7 +605,9 @@ CONFIG_FIND_BIT_BENCHMARK=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
CONFIG_BITFIELD_KUNIT=m
+CONFIG_RESOURCE_KUNIT_TEST=m
CONFIG_LINEAR_RANGES_TEST=m
+CONFIG_CMDLINE_KUNIT_TEST=m
CONFIG_BITS_TEST=m
CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index dbfb18938e11..3ae421cb24a4 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -122,6 +122,7 @@ CONFIG_NFT_SYNPROXY=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NFT_FIB_NETDEV=m
+CONFIG_NFT_REJECT_NETDEV=m
CONFIG_NF_FLOW_TABLE_INET=m
CONFIG_NF_FLOW_TABLE=m
CONFIG_NETFILTER_XT_SET=m
@@ -622,7 +623,9 @@ CONFIG_FIND_BIT_BENCHMARK=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
CONFIG_BITFIELD_KUNIT=m
+CONFIG_RESOURCE_KUNIT_TEST=m
CONFIG_LINEAR_RANGES_TEST=m
+CONFIG_CMDLINE_KUNIT_TEST=m
CONFIG_BITS_TEST=m
CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index e6afbeee7c4a..6da97e28c48e 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -118,6 +118,7 @@ CONFIG_NFT_SYNPROXY=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NFT_FIB_NETDEV=m
+CONFIG_NFT_REJECT_NETDEV=m
CONFIG_NF_FLOW_TABLE_INET=m
CONFIG_NF_FLOW_TABLE=m
CONFIG_NETFILTER_XT_SET=m
@@ -605,7 +606,9 @@ CONFIG_FIND_BIT_BENCHMARK=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
CONFIG_BITFIELD_KUNIT=m
+CONFIG_RESOURCE_KUNIT_TEST=m
CONFIG_LINEAR_RANGES_TEST=m
+CONFIG_CMDLINE_KUNIT_TEST=m
CONFIG_BITS_TEST=m
CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index 5340507a9fff..f54481bb789a 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -118,6 +118,7 @@ CONFIG_NFT_SYNPROXY=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NFT_FIB_NETDEV=m
+CONFIG_NFT_REJECT_NETDEV=m
CONFIG_NF_FLOW_TABLE_INET=m
CONFIG_NF_FLOW_TABLE=m
CONFIG_NETFILTER_XT_SET=m
@@ -605,7 +606,9 @@ CONFIG_FIND_BIT_BENCHMARK=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
CONFIG_BITFIELD_KUNIT=m
+CONFIG_RESOURCE_KUNIT_TEST=m
CONFIG_LINEAR_RANGES_TEST=m
+CONFIG_CMDLINE_KUNIT_TEST=m
CONFIG_BITS_TEST=m
CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
diff --git a/arch/m68k/emu/nfblock.c b/arch/m68k/emu/nfblock.c
index 92d26c812441..ba808543161a 100644
--- a/arch/m68k/emu/nfblock.c
+++ b/arch/m68k/emu/nfblock.c
@@ -61,7 +61,7 @@ struct nfhd_device {
static blk_qc_t nfhd_submit_bio(struct bio *bio)
{
- struct nfhd_device *dev = bio->bi_disk->private_data;
+ struct nfhd_device *dev = bio->bi_bdev->bd_disk->private_data;
struct bio_vec bvec;
struct bvec_iter iter;
int dir, len, shift;
diff --git a/arch/m68k/include/asm/page.h b/arch/m68k/include/asm/page.h
index 6116d7094292..97087dd3ca6d 100644
--- a/arch/m68k/include/asm/page.h
+++ b/arch/m68k/include/asm/page.h
@@ -62,7 +62,7 @@ extern unsigned long _ramend;
#include <asm/page_no.h>
#endif
-#ifdef CONFIG_DISCONTIGMEM
+#if !defined(CONFIG_MMU) || defined(CONFIG_DISCONTIGMEM)
#define __phys_to_pfn(paddr) ((unsigned long)((paddr) >> PAGE_SHIFT))
#define __pfn_to_phys(pfn) PFN_PHYS(pfn)
#endif
diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c
index 08359a6e058f..da83cc83e791 100644
--- a/arch/m68k/kernel/process.c
+++ b/arch/m68k/kernel/process.c
@@ -157,7 +157,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg,
*/
p->thread.fs = get_fs().seg;
- if (unlikely(p->flags & PF_KTHREAD)) {
+ if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
/* kernel thread */
memset(frame, 0, sizeof(struct fork_frame));
frame->regs.sr = PS_S;
diff --git a/arch/m68k/kernel/syscalls/Makefile b/arch/m68k/kernel/syscalls/Makefile
index 659faefdcb1d..285aaba832d9 100644
--- a/arch/m68k/kernel/syscalls/Makefile
+++ b/arch/m68k/kernel/syscalls/Makefile
@@ -5,7 +5,7 @@ uapi := arch/$(SRCARCH)/include/generated/uapi/asm
_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \
$(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
-syscall := $(srctree)/$(src)/syscall.tbl
+syscall := $(src)/syscall.tbl
syshdr := $(srctree)/$(src)/syscallhdr.sh
systbl := $(srctree)/$(src)/syscalltbl.sh
@@ -21,18 +21,19 @@ quiet_cmd_systbl = SYSTBL $@
'$(systbl_abi_$(basetarget))' \
'$(systbl_offset_$(basetarget))'
-$(uapi)/unistd_32.h: $(syscall) $(syshdr)
+$(uapi)/unistd_32.h: $(syscall) $(syshdr) FORCE
$(call if_changed,syshdr)
-$(kapi)/syscall_table.h: $(syscall) $(systbl)
+$(kapi)/syscall_table.h: $(syscall) $(systbl) FORCE
$(call if_changed,systbl)
uapisyshdr-y += unistd_32.h
kapisyshdr-y += syscall_table.h
-targets += $(uapisyshdr-y) $(kapisyshdr-y)
+uapisyshdr-y := $(addprefix $(uapi)/, $(uapisyshdr-y))
+kapisyshdr-y := $(addprefix $(kapi)/, $(kapisyshdr-y))
+targets += $(addprefix ../../../../, $(uapisyshdr-y) $(kapisyshdr-y))
PHONY += all
-all: $(addprefix $(uapi)/,$(uapisyshdr-y))
-all: $(addprefix $(kapi)/,$(kapisyshdr-y))
+all: $(uapisyshdr-y) $(kapisyshdr-y)
@:
diff --git a/arch/m68k/kernel/syscalls/syscall.tbl b/arch/m68k/kernel/syscalls/syscall.tbl
index 7fe4e45c864c..72bde6707dd3 100644
--- a/arch/m68k/kernel/syscalls/syscall.tbl
+++ b/arch/m68k/kernel/syscalls/syscall.tbl
@@ -441,3 +441,4 @@
439 common faccessat2 sys_faccessat2
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2
+442 common mount_setattr sys_mount_setattr
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index f82795592ce5..0660f47012bc 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -30,7 +30,6 @@ config MICROBLAZE
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER
- select HAVE_OPROFILE
select HAVE_PCI
select IRQ_DOMAIN
select XILINX_INTC
@@ -39,7 +38,6 @@ config MICROBLAZE
select OF_EARLY_FLATTREE
select PCI_DOMAINS_GENERIC if PCI
select PCI_SYSCALL if PCI
- select TRACING_SUPPORT
select VIRT_TO_BUS
select CPU_NO_EFFICIENT_FFS
select MMU_GATHER_NO_RANGE
diff --git a/arch/microblaze/Makefile b/arch/microblaze/Makefile
index bb980891816d..b41f323e1fde 100644
--- a/arch/microblaze/Makefile
+++ b/arch/microblaze/Makefile
@@ -54,8 +54,6 @@ core-y += arch/microblaze/kernel/
core-y += arch/microblaze/mm/
core-$(CONFIG_PCI) += arch/microblaze/pci/
-drivers-$(CONFIG_OPROFILE) += arch/microblaze/oprofile/
-
boot := arch/microblaze/boot
# Are we making a simpleImage.<boardname> target? If so, crack out the boardname
diff --git a/arch/microblaze/kernel/module.c b/arch/microblaze/kernel/module.c
index 9f12e3c2bb42..e5db3a57b9e3 100644
--- a/arch/microblaze/kernel/module.c
+++ b/arch/microblaze/kernel/module.c
@@ -24,9 +24,6 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
Elf32_Sym *sym;
unsigned long int *location;
unsigned long int value;
-#if __GNUC__ < 4
- unsigned long int old_value;
-#endif
pr_debug("Applying add relocation section %u to %u\n",
relsec, sechdrs[relsec].sh_info);
@@ -49,40 +46,17 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
*/
case R_MICROBLAZE_32:
-#if __GNUC__ < 4
- old_value = *location;
- *location = value + old_value;
-
- pr_debug("R_MICROBLAZE_32 (%08lx->%08lx)\n",
- old_value, value);
-#else
*location = value;
-#endif
break;
case R_MICROBLAZE_64:
-#if __GNUC__ < 4
- /* Split relocs only required/used pre gcc4.1.1 */
- old_value = ((location[0] & 0x0000FFFF) << 16) |
- (location[1] & 0x0000FFFF);
- value += old_value;
-#endif
location[0] = (location[0] & 0xFFFF0000) |
(value >> 16);
location[1] = (location[1] & 0xFFFF0000) |
(value & 0xFFFF);
-#if __GNUC__ < 4
- pr_debug("R_MICROBLAZE_64 (%08lx->%08lx)\n",
- old_value, value);
-#endif
break;
case R_MICROBLAZE_64_PCREL:
-#if __GNUC__ < 4
- old_value = (location[0] & 0xFFFF) << 16 |
- (location[1] & 0xFFFF);
- value -= old_value;
-#endif
value -= (unsigned long int)(location) + 4;
location[0] = (location[0] & 0xFFFF0000) |
(value >> 16);
diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c
index 657c2beb665e..62aa237180b6 100644
--- a/arch/microblaze/kernel/process.c
+++ b/arch/microblaze/kernel/process.c
@@ -59,7 +59,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg,
struct pt_regs *childregs = task_pt_regs(p);
struct thread_info *ti = task_thread_info(p);
- if (unlikely(p->flags & PF_KTHREAD)) {
+ if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
/* if we're creating a new kernel thread then just zeroing all
* the registers. That's OK for a brand new thread.*/
memset(childregs, 0, sizeof(struct pt_regs));
diff --git a/arch/microblaze/kernel/syscalls/Makefile b/arch/microblaze/kernel/syscalls/Makefile
index 659faefdcb1d..285aaba832d9 100644
--- a/arch/microblaze/kernel/syscalls/Makefile
+++ b/arch/microblaze/kernel/syscalls/Makefile
@@ -5,7 +5,7 @@ uapi := arch/$(SRCARCH)/include/generated/uapi/asm
_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \
$(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
-syscall := $(srctree)/$(src)/syscall.tbl
+syscall := $(src)/syscall.tbl
syshdr := $(srctree)/$(src)/syscallhdr.sh
systbl := $(srctree)/$(src)/syscalltbl.sh
@@ -21,18 +21,19 @@ quiet_cmd_systbl = SYSTBL $@
'$(systbl_abi_$(basetarget))' \
'$(systbl_offset_$(basetarget))'
-$(uapi)/unistd_32.h: $(syscall) $(syshdr)
+$(uapi)/unistd_32.h: $(syscall) $(syshdr) FORCE
$(call if_changed,syshdr)
-$(kapi)/syscall_table.h: $(syscall) $(systbl)
+$(kapi)/syscall_table.h: $(syscall) $(systbl) FORCE
$(call if_changed,systbl)
uapisyshdr-y += unistd_32.h
kapisyshdr-y += syscall_table.h
-targets += $(uapisyshdr-y) $(kapisyshdr-y)
+uapisyshdr-y := $(addprefix $(uapi)/, $(uapisyshdr-y))
+kapisyshdr-y := $(addprefix $(kapi)/, $(kapisyshdr-y))
+targets += $(addprefix ../../../../, $(uapisyshdr-y) $(kapisyshdr-y))
PHONY += all
-all: $(addprefix $(uapi)/,$(uapisyshdr-y))
-all: $(addprefix $(kapi)/,$(kapisyshdr-y))
+all: $(uapisyshdr-y) $(kapisyshdr-y)
@:
diff --git a/arch/microblaze/kernel/syscalls/syscall.tbl b/arch/microblaze/kernel/syscalls/syscall.tbl
index a522adf194ab..d603a5ec9338 100644
--- a/arch/microblaze/kernel/syscalls/syscall.tbl
+++ b/arch/microblaze/kernel/syscalls/syscall.tbl
@@ -447,3 +447,4 @@
439 common faccessat2 sys_faccessat2
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2
+442 common mount_setattr sys_mount_setattr
diff --git a/arch/microblaze/kernel/vmlinux.lds.S b/arch/microblaze/kernel/vmlinux.lds.S
index df07b3d06cd6..fb31747ec092 100644
--- a/arch/microblaze/kernel/vmlinux.lds.S
+++ b/arch/microblaze/kernel/vmlinux.lds.S
@@ -45,7 +45,7 @@ SECTIONS {
_etext = . ;
}
- . = ALIGN (4) ;
+ . = ALIGN (8) ;
__fdt_blob : AT(ADDR(__fdt_blob) - LOAD_OFFSET) {
_fdt_start = . ; /* place for fdt blob */
*(__fdt_blob) ; /* Any link-placed DTB */
diff --git a/arch/microblaze/oprofile/Makefile b/arch/microblaze/oprofile/Makefile
deleted file mode 100644
index 107f2f55d995..000000000000
--- a/arch/microblaze/oprofile/Makefile
+++ /dev/null
@@ -1,14 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# arch/microblaze/oprofile/Makefile
-#
-
-obj-$(CONFIG_OPROFILE) += oprofile.o
-
-DRIVER_OBJS := $(addprefix ../../../drivers/oprofile/, \
- oprof.o cpu_buffer.o buffer_sync.o \
- event_buffer.o oprofile_files.o \
- oprofilefs.o oprofile_stats.o \
- timer_int.o )
-
-oprofile-y := $(DRIVER_OBJS) microblaze_oprofile.o
diff --git a/arch/microblaze/oprofile/microblaze_oprofile.c b/arch/microblaze/oprofile/microblaze_oprofile.c
deleted file mode 100644
index def17e59888e..000000000000
--- a/arch/microblaze/oprofile/microblaze_oprofile.c
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Microblaze oprofile code
- *
- * Copyright (C) 2009 Michal Simek <monstr@monstr.eu>
- * Copyright (C) 2009 PetaLogix
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#include <linux/oprofile.h>
-#include <linux/init.h>
-
-int __init oprofile_arch_init(struct oprofile_operations *ops)
-{
- return -1;
-}
-
-void oprofile_arch_exit(void)
-{
-}
diff --git a/arch/mips/Kbuild.platforms b/arch/mips/Kbuild.platforms
index 5483e38b5dc7..e4f6e49417a9 100644
--- a/arch/mips/Kbuild.platforms
+++ b/arch/mips/Kbuild.platforms
@@ -18,6 +18,7 @@ platform-$(CONFIG_MACH_LOONGSON2EF) += loongson2ef/
platform-$(CONFIG_MACH_LOONGSON32) += loongson32/
platform-$(CONFIG_MACH_LOONGSON64) += loongson64/
platform-$(CONFIG_MIPS_MALTA) += mti-malta/
+platform-$(CONFIG_MACH_NINTENDO64) += n64/
platform-$(CONFIG_NLM_COMMON) += netlogic/
platform-$(CONFIG_PIC32MZDA) += pic32/
platform-$(CONFIG_MACH_PISTACHIO) += pistachio/
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 0a17bedf4f0d..d89efba3d8a4 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -18,6 +18,7 @@ config MIPS
select ARCH_USE_QUEUED_SPINLOCKS
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
select ARCH_WANT_IPC_PARSE_VERSION
+ select ARCH_WANT_LD_ORPHAN_WARN
select BUILDTIME_TABLE_SORT
select CLONE_BACKWARDS
select CPU_NO_EFFICIENT_FFS if (TARGET_ISA_REV < 1)
@@ -42,7 +43,7 @@ config MIPS
select HANDLE_DOMAIN_IRQ
select HAVE_ARCH_COMPILER_H
select HAVE_ARCH_JUMP_LABEL
- select HAVE_ARCH_KGDB
+ select HAVE_ARCH_KGDB if MIPS_FP_SUPPORT
select HAVE_ARCH_MMAP_RND_BITS if MMU
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if MMU && COMPAT
select HAVE_ARCH_SECCOMP_FILTER
@@ -74,8 +75,9 @@ config MIPS
select HAVE_LD_DEAD_CODE_DATA_ELIMINATION
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI
- select HAVE_OPROFILE
select HAVE_PERF_EVENTS
+ select HAVE_PERF_REGS
+ select HAVE_PERF_USER_STACK_DUMP
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RSEQ
select HAVE_SPARSE_SYSCALL_NR
@@ -92,6 +94,7 @@ config MIPS
select SET_FS
select SYSCTL_EXCEPTION_TRACE
select VIRT_TO_BUS
+ select ARCH_HAS_ELFCORE_COMPAT
config MIPS_FIXUP_BIGPHYS_ADDR
bool
@@ -123,6 +126,7 @@ choice
config MIPS_GENERIC_KERNEL
bool "Generic board-agnostic MIPS kernel"
+ select ARCH_HAS_SETUP_DMA_OPS
select MIPS_GENERIC
select BOOT_RAW
select BUILTIN_DTB
@@ -132,7 +136,7 @@ config MIPS_GENERIC_KERNEL
select CPU_MIPSR2_IRQ_EI
select CPU_MIPSR2_IRQ_VI
select CSRC_R4K
- select DMA_PERDEV_COHERENT
+ select DMA_NONCOHERENT
select HAVE_PCI
select IRQ_MIPS_CPU
select MIPS_AUTO_PFN_OFFSET
@@ -181,7 +185,7 @@ config MIPS_ALCHEMY
select CEVT_R4K
select CSRC_R4K
select IRQ_MIPS_CPU
- select DMA_MAYBE_COHERENT # Au1000,1500,1100 aren't, rest is
+ select DMA_NONCOHERENT # Au1000,1500,1100 aren't, rest is
select MIPS_FIXUP_BIGPHYS_ADDR if PCI
select SYS_HAS_CPU_MIPS32_R1
select SYS_SUPPORTS_32BIT_KERNEL
@@ -408,6 +412,7 @@ config MACH_JAZZ
select SYS_SUPPORTS_32BIT_KERNEL
select SYS_SUPPORTS_64BIT_KERNEL
select SYS_SUPPORTS_100HZ
+ select SYS_SUPPORTS_LITTLE_ENDIAN
help
This a family of machines based on the MIPS R4030 chipset which was
used by several vendors to build RISC/os and Windows NT workstations.
@@ -491,8 +496,6 @@ config MACH_LOONGSON64
select SYS_SUPPORTS_ZBOOT
select SYS_SUPPORTS_RELOCATABLE
select ZONE_DMA32
- select NUMA
- select SMP
select COMMON_CLK
select USE_OF
select BUILTIN_DTB
@@ -546,7 +549,7 @@ config MIPS_MALTA
select CLKSRC_MIPS_GIC
select COMMON_CLK
select CSRC_R4K
- select DMA_MAYBE_COHERENT
+ select DMA_NONCOHERENT
select GENERIC_ISA_DMA
select HAVE_PCSPKR_PLATFORM
select HAVE_PCI
@@ -608,6 +611,18 @@ config MACH_VR41XX
select SYS_SUPPORTS_MIPS16
select GPIOLIB
+config MACH_NINTENDO64
+ bool "Nintendo 64 console"
+ select CEVT_R4K
+ select CSRC_R4K
+ select SYS_HAS_CPU_R4300
+ select SYS_SUPPORTS_BIG_ENDIAN
+ select SYS_SUPPORTS_ZBOOT
+ select SYS_SUPPORTS_32BIT_KERNEL
+ select SYS_SUPPORTS_64BIT_KERNEL
+ select DMA_NONCOHERENT
+ select IRQ_MIPS_CPU
+
config RALINK
bool "Ralink based machines"
select CEVT_R4K
@@ -627,6 +642,27 @@ config RALINK
select ARCH_HAS_RESET_CONTROLLER
select RESET_CONTROLLER
+config MACH_REALTEK_RTL
+ bool "Realtek RTL838x/RTL839x based machines"
+ select MIPS_GENERIC
+ select DMA_NONCOHERENT
+ select IRQ_MIPS_CPU
+ select CSRC_R4K
+ select CEVT_R4K
+ select SYS_HAS_CPU_MIPS32_R1
+ select SYS_HAS_CPU_MIPS32_R2
+ select SYS_SUPPORTS_BIG_ENDIAN
+ select SYS_SUPPORTS_32BIT_KERNEL
+ select SYS_SUPPORTS_MIPS16
+ select SYS_SUPPORTS_MULTITHREADING
+ select SYS_SUPPORTS_VPE_LOADER
+ select SYS_HAS_EARLY_PRINTK
+ select SYS_HAS_EARLY_PRINTK_8250
+ select USE_GENERIC_EARLY_PRINTK_8250
+ select BOOT_RAW
+ select PINCTRL
+ select USE_OF
+
config SGI_IP22
bool "SGI IP22 (Indy/Indigo2)"
select ARC_MEMORY
@@ -1127,11 +1163,6 @@ config FW_CFE
config ARCH_SUPPORTS_UPROBES
bool
-config DMA_MAYBE_COHERENT
- select ARCH_HAS_DMA_COHERENCE_H
- select DMA_NONCOHERENT
- bool
-
config DMA_PERDEV_COHERENT
bool
select ARCH_HAS_SETUP_DMA_OPS
@@ -1258,9 +1289,6 @@ config SYS_SUPPORTS_HUGETLBFS
config MIPS_HUGE_TLB_SUPPORT
def_bool HUGETLB_PAGE || TRANSPARENT_HUGEPAGE
-config IRQ_CPU_RM7K
- bool
-
config IRQ_MSP_SLP
bool
@@ -1664,6 +1692,15 @@ config CPU_VR41XX
kernel built with this option will not run on any other type of
processor or vice versa.
+config CPU_R4300
+ bool "R4300"
+ depends on SYS_HAS_CPU_R4300
+ select CPU_SUPPORTS_32BIT_KERNEL
+ select CPU_SUPPORTS_64BIT_KERNEL
+ select CPU_HAS_LOAD_STORE_LR
+ help
+ MIPS Technologies R4300-series processors.
+
config CPU_R4X00
bool "R4x00"
depends on SYS_HAS_CPU_R4X00
@@ -1998,6 +2035,9 @@ config SYS_HAS_CPU_TX39XX
config SYS_HAS_CPU_VR41XX
bool
+config SYS_HAS_CPU_R4300
+ bool
+
config SYS_HAS_CPU_R4X00
bool
@@ -2182,7 +2222,7 @@ endchoice
config KVM_GUEST
bool "KVM Guest Kernel"
depends on CPU_MIPS32_R2
- depends on BROKEN_ON_SMP
+ depends on !64BIT && BROKEN_ON_SMP
help
Select this option if building a guest kernel for KVM (Trap & Emulate)
mode.
@@ -2758,6 +2798,7 @@ config ARCH_SPARSEMEM_ENABLE
config NUMA
bool "NUMA Support"
depends on SYS_SUPPORTS_NUMA
+ select SMP
help
Say Y to compile the kernel to support NUMA (Non-Uniform Memory
Access). This option improves performance on systems with more
@@ -2844,7 +2885,7 @@ config NODES_SHIFT
config HW_PERF_EVENTS
bool "Enable hardware performance counter support for perf events"
- depends on PERF_EVENTS && !OPROFILE && (CPU_MIPS32 || CPU_MIPS64 || CPU_R10000 || CPU_SB1 || CPU_CAVIUM_OCTEON || CPU_XLP || CPU_LOONGSON64)
+ depends on PERF_EVENTS && (CPU_MIPS32 || CPU_MIPS64 || CPU_R10000 || CPU_SB1 || CPU_CAVIUM_OCTEON || CPU_XLP || CPU_LOONGSON64)
default y
help
Enable hardware performance counter support for perf events. If
@@ -3300,11 +3341,6 @@ config MIPS32_N32
If unsure, say N.
-config BINFMT_ELF32
- bool
- default y if MIPS32_O32 || MIPS32_N32
- select ELFCORE
-
menu "Power management options"
config ARCH_HIBERNATION_POSSIBLE
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index cd4343edeb11..e71d587af49c 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -136,11 +136,31 @@ cflags-$(CONFIG_SB1XXX_CORELIS) += $(call cc-option,-mno-sched-prolog) \
#
cflags-y += -fno-stack-check
+# binutils from v2.35 when built with --enable-mips-fix-loongson3-llsc=yes,
+# supports an -mfix-loongson3-llsc flag which emits a sync prior to each ll
+# instruction to work around a CPU bug (see __SYNC_loongson3_war in asm/sync.h
+# for a description).
+#
+# We disable this in order to prevent the assembler meddling with the
+# instruction that labels refer to, ie. if we label an ll instruction:
+#
+# 1: ll v0, 0(a0)
+#
+# ...then with the assembler fix applied the label may actually point at a sync
+# instruction inserted by the assembler, and if we were using the label in an
+# exception table the table would no longer contain the address of the ll
+# instruction.
+#
+# Avoid this by explicitly disabling that assembler behaviour.
+#
+cflags-y += $(call as-option,-Wa$(comma)-mno-fix-loongson3-llsc,)
+
#
# CPU-dependent compiler/assembler options for optimization.
#
cflags-$(CONFIG_CPU_R3000) += -march=r3000
cflags-$(CONFIG_CPU_TX39XX) += -march=r3900
+cflags-$(CONFIG_CPU_R4300) += -march=r4300 -Wa,--trap
cflags-$(CONFIG_CPU_VR41XX) += -march=r4100 -Wa,--trap
cflags-$(CONFIG_CPU_R4X00) += -march=r4600 -Wa,--trap
cflags-$(CONFIG_CPU_TX49XX) += -march=r4600 -Wa,--trap
@@ -316,7 +336,6 @@ libs-$(CONFIG_MIPS_FP_SUPPORT) += arch/mips/math-emu/
core-y += arch/mips/
drivers-y += arch/mips/crypto/
-drivers-$(CONFIG_OPROFILE) += arch/mips/oprofile/
# suspend and hibernation support
drivers-$(CONFIG_PM) += arch/mips/power/
diff --git a/arch/mips/alchemy/common/prom.c b/arch/mips/alchemy/common/prom.c
index d910c0a64de9..b13d8adf3be4 100644
--- a/arch/mips/alchemy/common/prom.c
+++ b/arch/mips/alchemy/common/prom.c
@@ -143,7 +143,3 @@ int __init prom_get_ethernet_addr(char *ethernet_addr)
return 0;
}
-
-void __init prom_free_prom_memory(void)
-{
-}
diff --git a/arch/mips/alchemy/common/setup.c b/arch/mips/alchemy/common/setup.c
index 0f60efe0481e..2388d68786f4 100644
--- a/arch/mips/alchemy/common/setup.c
+++ b/arch/mips/alchemy/common/setup.c
@@ -28,8 +28,8 @@
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/mm.h>
+#include <linux/dma-map-ops.h> /* for dma_default_coherent */
-#include <asm/dma-coherence.h>
#include <asm/mipsregs.h>
#include <au1000.h>
@@ -37,6 +37,23 @@
extern void __init board_setup(void);
extern void __init alchemy_set_lpj(void);
+static bool alchemy_dma_coherent(void)
+{
+ switch (alchemy_get_cputype()) {
+ case ALCHEMY_CPU_AU1000:
+ case ALCHEMY_CPU_AU1500:
+ case ALCHEMY_CPU_AU1100:
+ return false;
+ case ALCHEMY_CPU_AU1200:
+ /* Au1200 AB USB does not support coherent memory */
+ if ((read_c0_prid() & PRID_REV_MASK) == 0)
+ return false;
+ return true;
+ default:
+ return true;
+ }
+}
+
void __init plat_mem_setup(void)
{
alchemy_set_lpj();
@@ -48,20 +65,7 @@ void __init plat_mem_setup(void)
/* Clear to obtain best system bus performance */
clear_c0_config(1 << 19); /* Clear Config[OD] */
- hw_coherentio = 0;
- coherentio = IO_COHERENCE_ENABLED;
- switch (alchemy_get_cputype()) {
- case ALCHEMY_CPU_AU1000:
- case ALCHEMY_CPU_AU1500:
- case ALCHEMY_CPU_AU1100:
- coherentio = IO_COHERENCE_DISABLED;
- break;
- case ALCHEMY_CPU_AU1200:
- /* Au1200 AB USB does not support coherent memory */
- if (0 == (read_c0_prid() & PRID_REV_MASK))
- coherentio = IO_COHERENCE_DISABLED;
- break;
- }
+ dma_default_coherent = alchemy_dma_coherent();
board_setup(); /* board specific setup */
diff --git a/arch/mips/ar7/memory.c b/arch/mips/ar7/memory.c
index 787716c5e946..ce8024c1a54e 100644
--- a/arch/mips/ar7/memory.c
+++ b/arch/mips/ar7/memory.c
@@ -49,8 +49,3 @@ void __init prom_meminit(void)
pages = memsize() >> PAGE_SHIFT;
memblock_add(PHYS_OFFSET, pages << PAGE_SHIFT);
}
-
-void __init prom_free_prom_memory(void)
-{
- /* Nothing to free */
-}
diff --git a/arch/mips/ath25/prom.c b/arch/mips/ath25/prom.c
index edf82be8870d..4466e14feaa4 100644
--- a/arch/mips/ath25/prom.c
+++ b/arch/mips/ath25/prom.c
@@ -20,7 +20,3 @@
void __init prom_init(void)
{
}
-
-void __init prom_free_prom_memory(void)
-{
-}
diff --git a/arch/mips/ath79/prom.c b/arch/mips/ath79/prom.c
index 25724b4e97fd..cc6dc5600677 100644
--- a/arch/mips/ath79/prom.c
+++ b/arch/mips/ath79/prom.c
@@ -32,8 +32,3 @@ void __init prom_init(void)
}
#endif
}
-
-void __init prom_free_prom_memory(void)
-{
- /* We do not have to prom memory to free */
-}
diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c
index 7e7bf9c2ad26..891f495c4c3c 100644
--- a/arch/mips/ath79/setup.c
+++ b/arch/mips/ath79/setup.c
@@ -213,16 +213,17 @@ unsigned int get_c0_compare_int(void)
void __init plat_mem_setup(void)
{
- unsigned long fdt_start;
+ void *dtb;
set_io_port_base(KSEG1);
/* Get the position of the FDT passed by the bootloader */
- fdt_start = fw_getenvl("fdt_start");
- if (fdt_start)
- __dt_setup_arch((void *)KSEG0ADDR(fdt_start));
- else if (fw_passed_dtb)
- __dt_setup_arch((void *)KSEG0ADDR(fw_passed_dtb));
+ dtb = (void *)fw_getenvl("fdt_start");
+ if (dtb == NULL)
+ dtb = get_fdt();
+
+ if (dtb)
+ __dt_setup_arch((void *)KSEG0ADDR(dtb));
ath79_reset_base = ioremap(AR71XX_RESET_BASE,
AR71XX_RESET_SIZE);
diff --git a/arch/mips/bcm47xx/prom.c b/arch/mips/bcm47xx/prom.c
index 3e2a8166377f..0a63721d0fbf 100644
--- a/arch/mips/bcm47xx/prom.c
+++ b/arch/mips/bcm47xx/prom.c
@@ -113,10 +113,6 @@ void __init prom_init(void)
setup_8250_early_printk_port(CKSEG1ADDR(BCM47XX_SERIAL_ADDR), 0, 0);
}
-void __init prom_free_prom_memory(void)
-{
-}
-
#if defined(CONFIG_BCM47XX_BCMA) && defined(CONFIG_HIGHMEM)
#define EXTVBASE 0xc0000000
diff --git a/arch/mips/bcm63xx/prom.c b/arch/mips/bcm63xx/prom.c
index df69eaa453a1..c3a2ea62c5c3 100644
--- a/arch/mips/bcm63xx/prom.c
+++ b/arch/mips/bcm63xx/prom.c
@@ -94,7 +94,3 @@ void __init prom_init(void)
*/
}
}
-
-void __init prom_free_prom_memory(void)
-{
-}
diff --git a/arch/mips/bmips/setup.c b/arch/mips/bmips/setup.c
index 19308df5f577..31bcfa4e08b9 100644
--- a/arch/mips/bmips/setup.c
+++ b/arch/mips/bmips/setup.c
@@ -129,10 +129,6 @@ void __init prom_init(void)
register_bmips_smp_ops();
}
-void __init prom_free_prom_memory(void)
-{
-}
-
const char *get_system_type(void)
{
return "Generic BMIPS kernel";
@@ -165,11 +161,10 @@ void __init plat_mem_setup(void)
/* intended to somewhat resemble ARM; see Documentation/arm/booting.rst */
if (fw_arg0 == 0 && fw_arg1 == 0xffffffff)
dtb = phys_to_virt(fw_arg2);
- else if (fw_passed_dtb) /* UHI interface or appended dtb */
- dtb = (void *)fw_passed_dtb;
- else if (__dtb_start != __dtb_end)
- dtb = (void *)__dtb_start;
else
+ dtb = get_fdt();
+
+ if (!dtb)
panic("no dtb found");
__dt_setup_arch(dtb);
@@ -201,4 +196,4 @@ static int __init plat_dev_init(void)
return 0;
}
-device_initcall(plat_dev_init);
+arch_initcall(plat_dev_init);
diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile
index 47cd9dc7454a..f93f72bcba97 100644
--- a/arch/mips/boot/compressed/Makefile
+++ b/arch/mips/boot/compressed/Makefile
@@ -37,6 +37,7 @@ KBUILD_AFLAGS := $(KBUILD_AFLAGS) -D__ASSEMBLY__ \
# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
KCOV_INSTRUMENT := n
GCOV_PROFILE := n
+UBSAN_SANITIZE := n
# decompressor objects (linked with vmlinuz)
vmlinuzobjs-y := $(obj)/head.o $(obj)/decompress.o $(obj)/string.o
diff --git a/arch/mips/boot/compressed/head.S b/arch/mips/boot/compressed/head.S
index 409cb483a9ff..5795d0af1e1b 100644
--- a/arch/mips/boot/compressed/head.S
+++ b/arch/mips/boot/compressed/head.S
@@ -15,10 +15,7 @@
#include <asm/asm.h>
#include <asm/regdef.h>
- .set noreorder
- .cprestore
LEAF(start)
-start:
/* Save boot rom start args */
move s0, a0
move s1, a1
@@ -29,27 +26,26 @@ start:
PTR_LA a0, _edata
PTR_LA a2, _end
1: sw zero, 0(a0)
+ addiu a0, a0, 4
bne a2, a0, 1b
- addiu a0, a0, 4
PTR_LA a0, (.heap) /* heap address */
PTR_LA sp, (.stack + 8192) /* stack address */
- PTR_LA ra, 2f
- PTR_LA k0, decompress_kernel
- jr k0
- nop
+ PTR_LA t9, decompress_kernel
+ jalr t9
+
2:
move a0, s0
move a1, s1
move a2, s2
move a3, s3
- PTR_LI k0, KERNEL_ENTRY
- jr k0
- nop
+ PTR_LI t9, KERNEL_ENTRY
+ jalr t9
+
3:
b 3b
- nop
+
END(start)
.comm .heap,BOOT_HEAP_SIZE,4
diff --git a/arch/mips/boot/dts/Makefile b/arch/mips/boot/dts/Makefile
index 0259238d7a2e..60bd7d2a9ad8 100644
--- a/arch/mips/boot/dts/Makefile
+++ b/arch/mips/boot/dts/Makefile
@@ -14,6 +14,7 @@ subdir-$(CONFIG_FIT_IMAGE_FDT_NI169445) += ni
subdir-$(CONFIG_MACH_PIC32) += pic32
subdir-$(CONFIG_ATH79) += qca
subdir-$(CONFIG_RALINK) += ralink
+subdir-$(CONFIG_MACH_REALTEK_RTL) += realtek
subdir-$(CONFIG_FIT_IMAGE_FDT_XILFPGA) += xilfpga
obj-$(CONFIG_BUILTIN_DTB) := $(addsuffix /, $(subdir-y))
diff --git a/arch/mips/boot/dts/realtek/Makefile b/arch/mips/boot/dts/realtek/Makefile
new file mode 100644
index 000000000000..fba4e93187a6
--- /dev/null
+++ b/arch/mips/boot/dts/realtek/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+dtb-y += cisco_sg220-26.dtb
diff --git a/arch/mips/boot/dts/realtek/cisco_sg220-26.dts b/arch/mips/boot/dts/realtek/cisco_sg220-26.dts
new file mode 100644
index 000000000000..1cdbb09297ef
--- /dev/null
+++ b/arch/mips/boot/dts/realtek/cisco_sg220-26.dts
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR BSD-2-Clause
+
+/dts-v1/;
+
+#include "rtl83xx.dtsi"
+#include "rtl838x.dtsi"
+
+/ {
+ model = "Cisco SG220-26";
+ compatible = "cisco,sg220-26", "realtek,rtl8382-soc";
+
+ chosen {
+ stdout-path = "serial0:9600n8";
+ bootargs = "earlycon console=ttyS0,9600";
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x8000000>;
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
diff --git a/arch/mips/boot/dts/realtek/rtl838x.dtsi b/arch/mips/boot/dts/realtek/rtl838x.dtsi
new file mode 100644
index 000000000000..6cc4ff5c0d19
--- /dev/null
+++ b/arch/mips/boot/dts/realtek/rtl838x.dtsi
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR BSD-2-Clause
+
+/ {
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ compatible = "mips,mips4KEc";
+ reg = <0>;
+ clocks = <&baseclk 0>;
+ clock-names = "cpu";
+ };
+ };
+
+ baseclk: baseclk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <500000000>;
+ };
+};
diff --git a/arch/mips/boot/dts/realtek/rtl83xx.dtsi b/arch/mips/boot/dts/realtek/rtl83xx.dtsi
new file mode 100644
index 000000000000..de65a111b626
--- /dev/null
+++ b/arch/mips/boot/dts/realtek/rtl83xx.dtsi
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR BSD-2-Clause
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ aliases {
+ serial0 = &uart0;
+ serial1 = &uart1;
+ };
+
+ cpuintc: cpuintc {
+ compatible = "mti,cpu-interrupt-controller";
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ };
+
+ soc: soc {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x18000000 0x10000>;
+
+ uart0: uart@2000 {
+ compatible = "ns16550a";
+ reg = <0x2000 0x100>;
+
+ clock-frequency = <200000000>;
+
+ interrupt-parent = <&cpuintc>;
+ interrupts = <31>;
+
+ reg-io-width = <1>;
+ reg-shift = <2>;
+ fifo-size = <1>;
+ no-loopback-test;
+
+ status = "disabled";
+ };
+
+ uart1: uart@2100 {
+ compatible = "ns16550a";
+ reg = <0x2100 0x100>;
+
+ clock-frequency = <200000000>;
+
+ interrupt-parent = <&cpuintc>;
+ interrupts = <30>;
+
+ reg-io-width = <1>;
+ reg-shift = <2>;
+ fifo-size = <1>;
+ no-loopback-test;
+
+ status = "disabled";
+ };
+ };
+};
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
index 982826ba0ef7..ce4e2806159b 100644
--- a/arch/mips/cavium-octeon/setup.c
+++ b/arch/mips/cavium-octeon/setup.c
@@ -1149,12 +1149,15 @@ void __init device_tree_init(void)
bool do_prune;
bool fill_mac;
- if (fw_passed_dtb) {
- fdt = (void *)fw_passed_dtb;
+#ifdef CONFIG_MIPS_ELF_APPENDED_DTB
+ if (!fdt_check_header(&__appended_dtb)) {
+ fdt = &__appended_dtb;
do_prune = false;
fill_mac = true;
pr_info("Using appended Device Tree.\n");
- } else if (octeon_bootinfo->minor_version >= 3 && octeon_bootinfo->fdt_addr) {
+ } else
+#endif
+ if (octeon_bootinfo->minor_version >= 3 && octeon_bootinfo->fdt_addr) {
fdt = phys_to_virt(octeon_bootinfo->fdt_addr);
if (fdt_check_header(fdt))
panic("Corrupt Device Tree passed to kernel.");
diff --git a/arch/mips/cobalt/setup.c b/arch/mips/cobalt/setup.c
index 46581e686882..2e099d55a564 100644
--- a/arch/mips/cobalt/setup.c
+++ b/arch/mips/cobalt/setup.c
@@ -117,8 +117,3 @@ void __init prom_init(void)
setup_8250_early_printk_port(CKSEG1ADDR(0x1c800000), 0, 0);
}
-
-void __init prom_free_prom_memory(void)
-{
- /* Nothing to do! */
-}
diff --git a/arch/mips/configs/fuloong2e_defconfig b/arch/mips/configs/fuloong2e_defconfig
index 023b4e644b1c..5c24ac7fdf56 100644
--- a/arch/mips/configs/fuloong2e_defconfig
+++ b/arch/mips/configs/fuloong2e_defconfig
@@ -22,7 +22,6 @@ CONFIG_MIPS32_N32=y
# CONFIG_SUSPEND is not set
CONFIG_HIBERNATION=y
CONFIG_PM_STD_PARTITION="/dev/sda3"
-CONFIG_OPROFILE=m
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
diff --git a/arch/mips/configs/ip32_defconfig b/arch/mips/configs/ip32_defconfig
index 7b1fab518317..1ae48f7d9ddd 100644
--- a/arch/mips/configs/ip32_defconfig
+++ b/arch/mips/configs/ip32_defconfig
@@ -14,7 +14,6 @@ CONFIG_SGI_IP32=y
CONFIG_PCI=y
CONFIG_MIPS32_O32=y
CONFIG_MIPS32_N32=y
-CONFIG_OPROFILE=m
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_PARTITION_ADVANCED=y
diff --git a/arch/mips/configs/lemote2f_defconfig b/arch/mips/configs/lemote2f_defconfig
index 688c91918db2..aaf9d5e0aa2c 100644
--- a/arch/mips/configs/lemote2f_defconfig
+++ b/arch/mips/configs/lemote2f_defconfig
@@ -21,7 +21,6 @@ CONFIG_MIPS32_O32=y
CONFIG_MIPS32_N32=y
CONFIG_HIBERNATION=y
CONFIG_PM_STD_PARTITION="/dev/hda3"
-CONFIG_OPROFILE=m
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
diff --git a/arch/mips/configs/loongson3_defconfig b/arch/mips/configs/loongson3_defconfig
index 9c5fadef38cb..0e79f81217bc 100644
--- a/arch/mips/configs/loongson3_defconfig
+++ b/arch/mips/configs/loongson3_defconfig
@@ -31,6 +31,8 @@ CONFIG_PERF_EVENTS=y
CONFIG_MACH_LOONGSON64=y
CONFIG_CPU_HAS_MSA=y
CONFIG_NR_CPUS=16
+CONFIG_NUMA=y
+CONFIG_SMP=y
CONFIG_HZ_256=y
CONFIG_KEXEC=y
CONFIG_MIPS32_O32=y
diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig
index bbe0f39f8088..205d3b34528c 100644
--- a/arch/mips/configs/mtx1_defconfig
+++ b/arch/mips/configs/mtx1_defconfig
@@ -17,7 +17,6 @@ CONFIG_PCCARD=m
CONFIG_YENTA=m
CONFIG_PD6729=m
CONFIG_I82092=m
-CONFIG_OPROFILE=m
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
diff --git a/arch/mips/configs/nlm_xlp_defconfig b/arch/mips/configs/nlm_xlp_defconfig
index 72a211d2d556..32c290611723 100644
--- a/arch/mips/configs/nlm_xlp_defconfig
+++ b/arch/mips/configs/nlm_xlp_defconfig
@@ -549,7 +549,6 @@ CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_INFO=y
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_FRAME_WARN=1024
-CONFIG_UNUSED_SYMBOLS=y
CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_SCHEDSTATS=y
diff --git a/arch/mips/configs/nlm_xlr_defconfig b/arch/mips/configs/nlm_xlr_defconfig
index 4ecb157e56d4..bf9b9244929e 100644
--- a/arch/mips/configs/nlm_xlr_defconfig
+++ b/arch/mips/configs/nlm_xlr_defconfig
@@ -500,7 +500,6 @@ CONFIG_CRC7=m
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_INFO=y
# CONFIG_ENABLE_MUST_CHECK is not set
-CONFIG_UNUSED_SYMBOLS=y
CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_SCHEDSTATS=y
diff --git a/arch/mips/configs/rs90_defconfig b/arch/mips/configs/rs90_defconfig
index 4f540bb94628..7ce3b814fdc8 100644
--- a/arch/mips/configs/rs90_defconfig
+++ b/arch/mips/configs/rs90_defconfig
@@ -30,7 +30,6 @@ CONFIG_PM=y
CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y
CONFIG_CPUFREQ_DT=y
-CONFIG_OPROFILE=y
CONFIG_JUMP_LABEL=y
# CONFIG_STACKPROTECTOR is not set
# CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/mips/fw/arc/memory.c b/arch/mips/fw/arc/memory.c
index 37625ae5e35d..ef5fc1ca1b5d 100644
--- a/arch/mips/fw/arc/memory.c
+++ b/arch/mips/fw/arc/memory.c
@@ -173,7 +173,7 @@ void __weak __init prom_cleanup(void)
{
}
-void __weak __init prom_free_prom_memory(void)
+void __init prom_free_prom_memory(void)
{
int i;
diff --git a/arch/mips/fw/sni/sniprom.c b/arch/mips/fw/sni/sniprom.c
index 8f6730376a42..74975e115950 100644
--- a/arch/mips/fw/sni/sniprom.c
+++ b/arch/mips/fw/sni/sniprom.c
@@ -87,10 +87,6 @@ void *prom_get_hwconf(void)
return (void *)CKSEG1ADDR(hwconf);
}
-void __init prom_free_prom_memory(void)
-{
-}
-
/*
* /proc/cpuinfo system type
*
diff --git a/arch/mips/generic/init.c b/arch/mips/generic/init.c
index 66a19337d2ab..1842cddd8356 100644
--- a/arch/mips/generic/init.c
+++ b/arch/mips/generic/init.c
@@ -39,14 +39,13 @@ void __init *plat_get_fdt(void)
/* Already set up */
return (void *)fdt;
- if (fw_passed_dtb && !fdt_check_header((void *)fw_passed_dtb)) {
+ fdt = (void *)get_fdt();
+ if (fdt && !fdt_check_header(fdt)) {
/*
* We have been provided with the appropriate device tree for
* the board. Make use of it & search for any machine struct
* based upon the root compatible string.
*/
- fdt = (void *)fw_passed_dtb;
-
for_each_mips_machine(check_mach) {
match = mips_machine_is_compatible(check_mach, fdt);
if (match) {
@@ -202,7 +201,3 @@ void __init arch_init_irq(void)
irqchip_init();
}
-
-void __init prom_free_prom_memory(void)
-{
-}
diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild
index 95b4fa7bd0d1..8f6fe69674b7 100644
--- a/arch/mips/include/asm/Kbuild
+++ b/arch/mips/include/asm/Kbuild
@@ -4,6 +4,10 @@ generated-y += syscall_table_32_o32.h
generated-y += syscall_table_64_n32.h
generated-y += syscall_table_64_n64.h
generated-y += syscall_table_64_o32.h
+generated-y += unistd_nr_n32.h
+generated-y += unistd_nr_n64.h
+generated-y += unistd_nr_o32.h
+
generic-y += export.h
generic-y += kvm_para.h
generic-y += mcs_spinlock.h
diff --git a/arch/mips/include/asm/asm.h b/arch/mips/include/asm/asm.h
index 3682d1a0bb80..ea4b62ece336 100644
--- a/arch/mips/include/asm/asm.h
+++ b/arch/mips/include/asm/asm.h
@@ -20,10 +20,27 @@
#include <asm/sgidefs.h>
#include <asm/asm-eva.h>
+#ifndef __VDSO__
+/*
+ * Emit CFI data in .debug_frame sections, not .eh_frame sections.
+ * We don't do DWARF unwinding at runtime, so only the offline DWARF
+ * information is useful to anyone. Note we should change this if we
+ * ever decide to enable DWARF unwinding at runtime.
+ */
+#define CFI_SECTIONS .cfi_sections .debug_frame
+#else
+ /*
+ * For the vDSO, emit both runtime unwind information and debug
+ * symbols for the .dbg file.
+ */
+#define CFI_SECTIONS
+#endif
+
/*
* LEAF - declare leaf routine
*/
#define LEAF(symbol) \
+ CFI_SECTIONS; \
.globl symbol; \
.align 2; \
.type symbol, @function; \
@@ -36,6 +53,7 @@ symbol: .frame sp, 0, ra; \
* NESTED - declare nested routine entry point
*/
#define NESTED(symbol, framesize, rpc) \
+ CFI_SECTIONS; \
.globl symbol; \
.align 2; \
.type symbol, @function; \
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index f904084fcb1f..27ad76791539 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -248,7 +248,7 @@ static __inline__ int pfx##_sub_if_positive(type i, pfx##_t * v) \
* bltz that can branch to code outside of the LL/SC loop. As \
* such, we don't need to emit another barrier here. \
*/ \
- if (!__SYNC_loongson3_war) \
+ if (__SYNC_loongson3_war == 0) \
smp_mb__after_atomic(); \
\
return result; \
diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h
index a74769940fbd..dc2a6234dd3c 100644
--- a/arch/mips/include/asm/bitops.h
+++ b/arch/mips/include/asm/bitops.h
@@ -26,7 +26,7 @@
#include <asm/war.h>
#define __bit_op(mem, insn, inputs...) do { \
- unsigned long temp; \
+ unsigned long __temp; \
\
asm volatile( \
" .set push \n" \
@@ -37,13 +37,13 @@
" " __SC "%0, %1 \n" \
" " __SC_BEQZ "%0, 1b \n" \
" .set pop \n" \
- : "=&r"(temp), "+" GCC_OFF_SMALL_ASM()(mem) \
+ : "=&r"(__temp), "+" GCC_OFF_SMALL_ASM()(mem) \
: inputs \
: __LLSC_CLOBBER); \
} while (0)
#define __test_bit_op(mem, ll_dst, insn, inputs...) ({ \
- unsigned long orig, temp; \
+ unsigned long __orig, __temp; \
\
asm volatile( \
" .set push \n" \
@@ -54,12 +54,12 @@
" " __SC "%1, %2 \n" \
" " __SC_BEQZ "%1, 1b \n" \
" .set pop \n" \
- : "=&r"(orig), "=&r"(temp), \
+ : "=&r"(__orig), "=&r"(__temp), \
"+" GCC_OFF_SMALL_ASM()(mem) \
: inputs \
: __LLSC_CLOBBER); \
\
- orig; \
+ __orig; \
})
/*
@@ -435,7 +435,7 @@ static inline int fls(unsigned int x)
*
* This is defined the same way as
* the libc and compiler builtin ffs routines, therefore
- * differs in spirit from the above ffz (man ffs).
+ * differs in spirit from the below ffz (man ffs).
*/
static inline int ffs(int word)
{
diff --git a/arch/mips/include/asm/bootinfo.h b/arch/mips/include/asm/bootinfo.h
index aa03b1237155..5be10ece3ef0 100644
--- a/arch/mips/include/asm/bootinfo.h
+++ b/arch/mips/include/asm/bootinfo.h
@@ -112,7 +112,27 @@ extern char arcs_cmdline[COMMAND_LINE_SIZE];
extern unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3;
#ifdef CONFIG_USE_OF
-extern unsigned long fw_passed_dtb;
+#include <linux/libfdt.h>
+#include <linux/of_fdt.h>
+
+extern char __appended_dtb[];
+
+static inline void *get_fdt(void)
+{
+ if (IS_ENABLED(CONFIG_MIPS_RAW_APPENDED_DTB) ||
+ IS_ENABLED(CONFIG_MIPS_ELF_APPENDED_DTB))
+ if (fdt_magic(&__appended_dtb) == FDT_MAGIC)
+ return &__appended_dtb;
+
+ if (fw_arg0 == -2) /* UHI interface */
+ return (void *)fw_arg1;
+
+ if (IS_ENABLED(CONFIG_BUILTIN_DTB))
+ if (&__dtb_start != &__dtb_end)
+ return &__dtb_start;
+
+ return NULL;
+}
#endif
/*
diff --git a/arch/mips/include/asm/checksum.h b/arch/mips/include/asm/checksum.h
index 5f80c28f5253..1e6c1354f245 100644
--- a/arch/mips/include/asm/checksum.h
+++ b/arch/mips/include/asm/checksum.h
@@ -130,6 +130,8 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
__u32 len, __u8 proto,
__wsum sum)
{
+ unsigned long tmp = (__force unsigned long)sum;
+
__asm__(
" .set push # csum_tcpudp_nofold\n"
" .set noat \n"
@@ -157,7 +159,7 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
" addu %0, $1 \n"
#endif
" .set pop"
- : "=r" (sum)
+ : "=r" (tmp)
: "0" ((__force unsigned long)daddr),
"r" ((__force unsigned long)saddr),
#ifdef __MIPSEL__
@@ -167,7 +169,7 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
#endif
"r" ((__force unsigned long)sum));
- return sum;
+ return (__force __wsum)tmp;
}
#define csum_tcpudp_nofold csum_tcpudp_nofold
diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h
index 5b0b3a6777ea..ed8f3f3c4304 100644
--- a/arch/mips/include/asm/cmpxchg.h
+++ b/arch/mips/include/asm/cmpxchg.h
@@ -99,7 +99,7 @@ unsigned long __xchg(volatile void *ptr, unsigned long x, int size)
* contains a completion barrier prior to the LL, so we don't \
* need to emit an extra one here. \
*/ \
- if (!__SYNC_loongson3_war) \
+ if (__SYNC_loongson3_war == 0) \
smp_mb__before_llsc(); \
\
__res = (__typeof__(*(ptr))) \
@@ -191,7 +191,7 @@ unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
* contains a completion barrier prior to the LL, so we don't \
* need to emit an extra one here. \
*/ \
- if (!__SYNC_loongson3_war) \
+ if (__SYNC_loongson3_war == 0) \
smp_mb__before_llsc(); \
\
__res = cmpxchg_local((ptr), (old), (new)); \
@@ -201,7 +201,7 @@ unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
* contains a completion barrier after the SC, so we don't \
* need to emit an extra one here. \
*/ \
- if (!__SYNC_loongson3_war) \
+ if (__SYNC_loongson3_war == 0) \
smp_llsc_mb(); \
\
__res; \
diff --git a/arch/mips/include/asm/cpu-type.h b/arch/mips/include/asm/cpu-type.h
index 3288cef4b168..2be5d7b5de68 100644
--- a/arch/mips/include/asm/cpu-type.h
+++ b/arch/mips/include/asm/cpu-type.h
@@ -122,6 +122,11 @@ static inline int __pure __get_cpu_type(const int cpu_type)
case CPU_VR4181A:
#endif
+#ifdef CONFIG_SYS_HAS_CPU_R4300
+ case CPU_R4300:
+ case CPU_R4310:
+#endif
+
#ifdef CONFIG_SYS_HAS_CPU_R4X00
case CPU_R4000PC:
case CPU_R4000SC:
diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h
index c9222cc2244f..9e6211e6d76b 100644
--- a/arch/mips/include/asm/cpu.h
+++ b/arch/mips/include/asm/cpu.h
@@ -302,7 +302,7 @@ enum cpu_type_enum {
/*
* R4000 class processors
*/
- CPU_R4000PC, CPU_R4000SC, CPU_R4000MC, CPU_R4200,
+ CPU_R4000PC, CPU_R4000SC, CPU_R4000MC, CPU_R4200, CPU_R4300, CPU_R4310,
CPU_R4400PC, CPU_R4400SC, CPU_R4400MC, CPU_R4600, CPU_R4640, CPU_R4650,
CPU_R4700, CPU_R5000, CPU_R5500, CPU_NEVADA, CPU_R10000,
CPU_R12000, CPU_R14000, CPU_R16000, CPU_VR41XX, CPU_VR4111, CPU_VR4121,
diff --git a/arch/mips/include/asm/dma-coherence.h b/arch/mips/include/asm/dma-coherence.h
deleted file mode 100644
index 5eaa1fcc878a..000000000000
--- a/arch/mips/include/asm/dma-coherence.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org>
- *
- */
-#ifndef __ASM_DMA_COHERENCE_H
-#define __ASM_DMA_COHERENCE_H
-
-enum coherent_io_user_state {
- IO_COHERENCE_DEFAULT,
- IO_COHERENCE_ENABLED,
- IO_COHERENCE_DISABLED,
-};
-
-#if defined(CONFIG_DMA_PERDEV_COHERENT)
-/* Don't provide (hw_)coherentio to avoid misuse */
-#elif defined(CONFIG_DMA_MAYBE_COHERENT)
-extern enum coherent_io_user_state coherentio;
-extern int hw_coherentio;
-
-static inline bool dev_is_dma_coherent(struct device *dev)
-{
- return coherentio == IO_COHERENCE_ENABLED ||
- (coherentio == IO_COHERENCE_DEFAULT && hw_coherentio);
-}
-#else
-#ifdef CONFIG_DMA_NONCOHERENT
-#define coherentio IO_COHERENCE_DISABLED
-#else
-#define coherentio IO_COHERENCE_ENABLED
-#endif
-#define hw_coherentio 0
-#endif /* CONFIG_DMA_MAYBE_COHERENT */
-
-#endif
diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
index 71c7622025d1..dc8d2863752c 100644
--- a/arch/mips/include/asm/elf.h
+++ b/arch/mips/include/asm/elf.h
@@ -201,7 +201,6 @@ struct mips_elf_abiflags_v0 {
uint32_t flags2;
};
-#ifndef ELF_ARCH
/* ELF register definitions */
#define ELF_NGREG 45
#define ELF_NFPREG 33
@@ -219,7 +218,7 @@ void mips_dump_regs64(u64 *uregs, const struct pt_regs *regs);
/*
* This is used to ensure we don't load something for the wrong architecture.
*/
-#define elf_check_arch elfo32_check_arch
+#define elf_check_arch elf32_check_arch
/*
* These are used to set parameters in the core dumps.
@@ -235,7 +234,8 @@ void mips_dump_regs64(u64 *uregs, const struct pt_regs *regs);
/*
* This is used to ensure we don't load something for the wrong architecture.
*/
-#define elf_check_arch elfn64_check_arch
+#define elf_check_arch elf64_check_arch
+#define compat_elf_check_arch elf32_check_arch
/*
* These are used to set parameters in the core dumps.
@@ -257,8 +257,6 @@ void mips_dump_regs64(u64 *uregs, const struct pt_regs *regs);
#endif
#define ELF_ARCH EM_MIPS
-#endif /* !defined(ELF_ARCH) */
-
/*
* In order to be sure that we don't attempt to execute an O32 binary which
* requires 64 bit FP (FR=1) on a system which does not support it we refuse
@@ -277,9 +275,9 @@ void mips_dump_regs64(u64 *uregs, const struct pt_regs *regs);
#define vmcore_elf64_check_arch mips_elf_check_machine
/*
- * Return non-zero if HDR identifies an o32 ELF binary.
+ * Return non-zero if HDR identifies an o32 or n32 ELF binary.
*/
-#define elfo32_check_arch(hdr) \
+#define elf32_check_arch(hdr) \
({ \
int __res = 1; \
struct elfhdr *__h = (hdr); \
@@ -288,21 +286,26 @@ void mips_dump_regs64(u64 *uregs, const struct pt_regs *regs);
__res = 0; \
if (__h->e_ident[EI_CLASS] != ELFCLASS32) \
__res = 0; \
- if ((__h->e_flags & EF_MIPS_ABI2) != 0) \
- __res = 0; \
- if (((__h->e_flags & EF_MIPS_ABI) != 0) && \
- ((__h->e_flags & EF_MIPS_ABI) != EF_MIPS_ABI_O32)) \
- __res = 0; \
- if (__h->e_flags & __MIPS_O32_FP64_MUST_BE_ZERO) \
- __res = 0; \
- \
+ if ((__h->e_flags & EF_MIPS_ABI2) != 0) { \
+ if (!IS_ENABLED(CONFIG_MIPS32_N32) || \
+ (__h->e_flags & EF_MIPS_ABI)) \
+ __res = 0; \
+ } else { \
+ if (IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_MIPS32_O32)) \
+ __res = 0; \
+ if (((__h->e_flags & EF_MIPS_ABI) != 0) && \
+ ((__h->e_flags & EF_MIPS_ABI) != EF_MIPS_ABI_O32)) \
+ __res = 0; \
+ if (__h->e_flags & __MIPS_O32_FP64_MUST_BE_ZERO) \
+ __res = 0; \
+ } \
__res; \
})
/*
* Return non-zero if HDR identifies an n64 ELF binary.
*/
-#define elfn64_check_arch(hdr) \
+#define elf64_check_arch(hdr) \
({ \
int __res = 1; \
struct elfhdr *__h = (hdr); \
@@ -315,25 +318,6 @@ void mips_dump_regs64(u64 *uregs, const struct pt_regs *regs);
__res; \
})
-/*
- * Return non-zero if HDR identifies an n32 ELF binary.
- */
-#define elfn32_check_arch(hdr) \
-({ \
- int __res = 1; \
- struct elfhdr *__h = (hdr); \
- \
- if (!mips_elf_check_machine(__h)) \
- __res = 0; \
- if (__h->e_ident[EI_CLASS] != ELFCLASS32) \
- __res = 0; \
- if (((__h->e_flags & EF_MIPS_ABI2) == 0) || \
- ((__h->e_flags & EF_MIPS_ABI) != 0)) \
- __res = 0; \
- \
- __res; \
-})
-
struct mips_abi;
extern struct mips_abi mips_abi;
@@ -469,9 +453,7 @@ extern const char *__elf_base_platform;
the loader. We need to make sure that it is out of the way of the program
that it will "exec", and that there is sufficient room for the brk. */
-#ifndef ELF_ET_DYN_BASE
#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
-#endif
/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */
#define ARCH_DLINFO \
diff --git a/arch/mips/include/asm/elfcore-compat.h b/arch/mips/include/asm/elfcore-compat.h
new file mode 100644
index 000000000000..2f0f0103c75b
--- /dev/null
+++ b/arch/mips/include/asm/elfcore-compat.h
@@ -0,0 +1,29 @@
+#ifndef _ASM_MIPS_ELFCORE_COMPAT_H
+#define _ASM_MIPS_ELFCORE_COMPAT_H
+
+/*
+ * On mips we have two 32bit ABIs - o32 and n32. The latter
+ * has bigger registers, so we use it for compat_elf_regset_t.
+ * The former uses o32_elf_prstatus and PRSTATUS_SIZE/SET_PR_FPVALID
+ * are used to choose the size and location of ->pr_fpvalid of
+ * the layout actually used.
+ */
+typedef elf_gregset_t compat_elf_gregset_t;
+
+struct o32_elf_prstatus
+{
+ struct compat_elf_prstatus_common common;
+ unsigned int pr_reg[ELF_NGREG];
+ compat_int_t pr_fpvalid;
+};
+
+#define PRSTATUS_SIZE \
+ (!test_thread_flag(TIF_32BIT_REGS) \
+ ? sizeof(struct compat_elf_prstatus) \
+ : sizeof(struct o32_elf_prstatus))
+#define SET_PR_FPVALID(S) \
+ (*(!test_thread_flag(TIF_32BIT_REGS) \
+ ? &(S)->pr_fpvalid \
+ : &((struct o32_elf_prstatus *)(S))->pr_fpvalid) = 1)
+
+#endif
diff --git a/arch/mips/include/asm/inst.h b/arch/mips/include/asm/inst.h
index 22912f78401c..2f98ced30263 100644
--- a/arch/mips/include/asm/inst.h
+++ b/arch/mips/include/asm/inst.h
@@ -65,11 +65,11 @@
#define I_FR_SFT 21
#define MIPSInst_FR(x) ((MIPSInst(x) & 0x03e00000) >> I_FR_SFT)
-#define I_FMA_FUNC_SFT 2
-#define MIPSInst_FMA_FUNC(x) ((MIPSInst(x) & 0x0000003c) >> I_FMA_FUNC_SFT)
+#define I_FMA_FUNC_SFT 3
+#define MIPSInst_FMA_FUNC(x) ((MIPSInst(x) & 0x00000038) >> I_FMA_FUNC_SFT)
#define I_FMA_FFMT_SFT 0
-#define MIPSInst_FMA_FFMT(x) (MIPSInst(x) & 0x00000003)
+#define MIPSInst_FMA_FFMT(x) (MIPSInst(x) & 0x00000007)
typedef unsigned int mips_instruction;
diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h
index c5d351786416..f021de661c3a 100644
--- a/arch/mips/include/asm/irq.h
+++ b/arch/mips/include/asm/irq.h
@@ -20,6 +20,7 @@
#define IRQ_STACK_SIZE THREAD_SIZE
#define IRQ_STACK_START (IRQ_STACK_SIZE - 16)
+extern void __init init_IRQ(void);
extern void *irq_stack[NR_CPUS];
/*
diff --git a/arch/mips/include/asm/irq_cpu.h b/arch/mips/include/asm/irq_cpu.h
index 8d321180b5c2..83d7331ab215 100644
--- a/arch/mips/include/asm/irq_cpu.h
+++ b/arch/mips/include/asm/irq_cpu.h
@@ -10,8 +10,6 @@
#define _ASM_IRQ_CPU_H
extern void mips_cpu_irq_init(void);
-extern void rm7k_cpu_irq_init(void);
-extern void rm9k_cpu_irq_init(void);
#ifdef CONFIG_IRQ_DOMAIN
struct device_node;
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index 24f3d0f9996b..3a5612e7304c 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -83,7 +83,6 @@
#define KVM_MAX_VCPUS 16
-#define KVM_USER_MEM_SLOTS 16
/* memory slots that does not exposed to userspace */
#define KVM_PRIVATE_MEM_SLOTS 0
diff --git a/arch/mips/include/asm/mach-generic/irq.h b/arch/mips/include/asm/mach-generic/irq.h
index 079889ced4f3..4249af4bef84 100644
--- a/arch/mips/include/asm/mach-generic/irq.h
+++ b/arch/mips/include/asm/mach-generic/irq.h
@@ -28,12 +28,6 @@
#endif /* CONFIG_I8259 */
#endif
-#ifdef CONFIG_IRQ_CPU_RM7K
-#ifndef RM7K_CPU_IRQ_BASE
-#define RM7K_CPU_IRQ_BASE (MIPS_CPU_IRQ_BASE+8)
-#endif
-#endif
-
#endif /* CONFIG_IRQ_MIPS_CPU */
#endif /* __ASM_MACH_GENERIC_IRQ_H */
diff --git a/arch/mips/include/asm/mach-loongson2ef/loongson.h b/arch/mips/include/asm/mach-loongson2ef/loongson.h
index 57e571128489..ca039b8dcde3 100644
--- a/arch/mips/include/asm/mach-loongson2ef/loongson.h
+++ b/arch/mips/include/asm/mach-loongson2ef/loongson.h
@@ -56,15 +56,6 @@ extern int mach_i8259_irq(void);
(*(volatile u32 *)((char *)CKSEG1ADDR(LOONGSON_REG_BASE) + (x)))
#define LOONGSON_IRQ_BASE 32
-#define LOONGSON2_PERFCNT_IRQ (MIPS_CPU_IRQ_BASE + 6) /* cpu perf counter */
-
-#include <linux/interrupt.h>
-static inline void do_perfcnt_IRQ(void)
-{
-#if IS_ENABLED(CONFIG_OPROFILE)
- do_IRQ(LOONGSON2_PERFCNT_IRQ);
-#endif
-}
#define LOONGSON_FLASH_BASE 0x1c000000
#define LOONGSON_FLASH_SIZE 0x02000000 /* 32M */
diff --git a/arch/mips/include/asm/mach-loongson64/loongson.h b/arch/mips/include/asm/mach-loongson64/loongson.h
index fde1b75c45ea..ac1c20e172a2 100644
--- a/arch/mips/include/asm/mach-loongson64/loongson.h
+++ b/arch/mips/include/asm/mach-loongson64/loongson.h
@@ -23,8 +23,8 @@ extern u32 memsize, highmemsize;
extern const struct plat_smp_ops loongson3_smp_ops;
/* loongson-specific command line, env and memory initialization */
-extern void __init prom_init_memory(void);
extern void __init prom_init_env(void);
+extern void __init szmem(unsigned int node);
extern void *loongson_fdt_blob;
/* irq operation functions */
diff --git a/arch/mips/include/asm/mach-n64/irq.h b/arch/mips/include/asm/mach-n64/irq.h
new file mode 100644
index 000000000000..7e260fcb2a51
--- /dev/null
+++ b/arch/mips/include/asm/mach-n64/irq.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_MACH_N64_IRQ_H
+#define __ASM_MACH_N64_IRQ_H
+
+#define NR_IRQS 8
+
+#include <asm/mach-generic/irq.h>
+
+#endif /* __ASM_MACH_N64_IRQ_H */
diff --git a/arch/mips/include/asm/mach-n64/kmalloc.h b/arch/mips/include/asm/mach-n64/kmalloc.h
new file mode 100644
index 000000000000..e8b8d0b19571
--- /dev/null
+++ b/arch/mips/include/asm/mach-n64/kmalloc.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_MACH_N64_KMALLOC_H
+#define __ASM_MACH_N64_KMALLOC_H
+
+/* The default of 128 bytes wastes too much, use 32 (the largest cacheline, I) */
+#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
+
+#endif /* __ASM_MACH_N64_KMALLOC_H */
diff --git a/arch/mips/include/asm/mach-pistachio/irq.h b/arch/mips/include/asm/mach-pistachio/irq.h
deleted file mode 100644
index 74ac016503ad..000000000000
--- a/arch/mips/include/asm/mach-pistachio/irq.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Pistachio IRQ setup
- *
- * Copyright (C) 2014 Google, Inc.
- */
-
-#ifndef __ASM_MACH_PISTACHIO_IRQ_H
-#define __ASM_MACH_PISTACHIO_IRQ_H
-
-#define NR_IRQS 256
-
-#include <asm/mach-generic/irq.h>
-
-#endif /* __ASM_MACH_PISTACHIO_IRQ_H */
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index a0e8ae5497b6..9c8099a6ffed 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -1085,6 +1085,10 @@
#define CVMVMCONF_RMMUSIZEM1_S 0
#define CVMVMCONF_RMMUSIZEM1 (_U64CAST_(0xff) << CVMVMCONF_RMMUSIZEM1_S)
+/* Debug register field definitions */
+#define MIPS_DEBUG_DBP_SHIFT 1
+#define MIPS_DEBUG_DBP (_ULCAST_(1) << MIPS_DEBUG_DBP_SHIFT)
+
/*
* Coprocessor 1 (FPU) register names
*/
diff --git a/arch/mips/include/asm/octeon/octeon.h b/arch/mips/include/asm/octeon/octeon.h
index 08d48f37c046..7e714aefc76d 100644
--- a/arch/mips/include/asm/octeon/octeon.h
+++ b/arch/mips/include/asm/octeon/octeon.h
@@ -282,7 +282,6 @@ union octeon_cvmemctl {
extern void octeon_check_cpu_bist(void);
int octeon_prune_device_tree(void);
-extern const char __appended_dtb;
extern const char __dtb_octeon_3xxx_begin;
extern const char __dtb_octeon_68xx_begin;
diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
index 6a77bc4a6eec..65acab9c41f9 100644
--- a/arch/mips/include/asm/page.h
+++ b/arch/mips/include/asm/page.h
@@ -202,14 +202,13 @@ static inline unsigned long ___pa(unsigned long x)
/*
* RELOC_HIDE was originally added by 6007b903dfe5f1d13e0c711ac2894bdd4a61b1ad
* (lmo) rsp. 8431fd094d625b94d364fe393076ccef88e6ce18 (kernel.org). The
- * discussion can be found in lkml posting
- * <a2ebde260608230500o3407b108hc03debb9da6e62c@mail.gmail.com> which is
- * archived at http://lists.linuxcoding.com/kernel/2006-q3/msg17360.html
+ * discussion can be found in
+ * https://lore.kernel.org/lkml/a2ebde260608230500o3407b108hc03debb9da6e62c@mail.gmail.com
*
* It is unclear if the misscompilations mentioned in
- * http://lkml.org/lkml/2010/8/8/138 also affect MIPS so we keep this one
- * until GCC 3.x has been retired before we can apply
- * https://patchwork.linux-mips.org/patch/1541/
+ * https://lore.kernel.org/lkml/1281303490-390-1-git-send-email-namhyung@gmail.com
+ * also affect MIPS so we keep this one until GCC 3.x has been retired
+ * before we can apply https://patchwork.linux-mips.org/patch/1541/
*/
#ifndef __pa_symbol
@@ -255,6 +254,12 @@ extern bool __virt_addr_valid(const volatile void *kaddr);
#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_TSK_EXEC
+extern unsigned long __kaslr_offset;
+static inline unsigned long kaslr_offset(void)
+{
+ return __kaslr_offset;
+}
+
#include <asm-generic/memory_model.h>
#include <asm-generic/getorder.h>
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index 4f9c37616d42..804889b70965 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -64,6 +64,7 @@ struct vm_area_struct;
#define __S111 __pgprot(0)
extern unsigned long _page_cachable_default;
+extern void __update_cache(unsigned long address, pte_t pte);
/*
* ZERO_PAGE is a global shared page that is always zero; used
@@ -94,31 +95,31 @@ extern void paging_init(void);
#define htw_stop() \
do { \
- unsigned long flags; \
+ unsigned long __flags; \
\
if (cpu_has_htw) { \
- local_irq_save(flags); \
+ local_irq_save(__flags); \
if(!raw_current_cpu_data.htw_seq++) { \
write_c0_pwctl(read_c0_pwctl() & \
~(1 << MIPS_PWCTL_PWEN_SHIFT)); \
back_to_back_c0_hazard(); \
} \
- local_irq_restore(flags); \
+ local_irq_restore(__flags); \
} \
} while(0)
#define htw_start() \
do { \
- unsigned long flags; \
+ unsigned long __flags; \
\
if (cpu_has_htw) { \
- local_irq_save(flags); \
+ local_irq_save(__flags); \
if (!--raw_current_cpu_data.htw_seq) { \
write_c0_pwctl(read_c0_pwctl() | \
(1 << MIPS_PWCTL_PWEN_SHIFT)); \
back_to_back_c0_hazard(); \
} \
- local_irq_restore(flags); \
+ local_irq_restore(__flags); \
} \
} while(0)
@@ -224,7 +225,6 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pteval)
{
- extern void __update_cache(unsigned long address, pte_t pte);
if (!pte_present(pteval))
goto cache_sync_done;
diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h
index 1e76774b36dd..daf3cf244ea9 100644
--- a/arch/mips/include/asm/ptrace.h
+++ b/arch/mips/include/asm/ptrace.h
@@ -53,7 +53,7 @@ struct pt_regs {
static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
{
- return regs->regs[31];
+ return regs->regs[29];
}
static inline void instruction_pointer_set(struct pt_regs *regs,
diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h
index 15ab16f99f28..af3788589ee6 100644
--- a/arch/mips/include/asm/r4kcache.h
+++ b/arch/mips/include/asm/r4kcache.h
@@ -23,7 +23,6 @@
#include <asm/mipsmtregs.h>
#include <asm/mmzone.h>
#include <asm/unroll.h>
-#include <linux/uaccess.h> /* for uaccess_kernel() */
extern void (*r4k_blast_dcache)(void);
extern void (*r4k_blast_icache)(void);
@@ -102,14 +101,17 @@ static inline void flush_scache_line(unsigned long addr)
cache_op(Hit_Writeback_Inv_SD, addr);
}
-#define protected_cache_op(op,addr) \
+#ifdef CONFIG_EVA
+
+#define protected_cache_op(op, addr) \
({ \
int __err = 0; \
__asm__ __volatile__( \
" .set push \n" \
" .set noreorder \n" \
- " .set "MIPS_ISA_ARCH_LEVEL" \n" \
- "1: cache %1, (%2) \n" \
+ " .set mips0 \n" \
+ " .set eva \n" \
+ "1: cachee %1, (%2) \n" \
"2: .insn \n" \
" .set pop \n" \
" .section .fixup,\"ax\" \n" \
@@ -123,17 +125,16 @@ static inline void flush_scache_line(unsigned long addr)
: "i" (op), "r" (addr), "i" (-EFAULT)); \
__err; \
})
+#else
-
-#define protected_cachee_op(op,addr) \
+#define protected_cache_op(op, addr) \
({ \
int __err = 0; \
__asm__ __volatile__( \
" .set push \n" \
" .set noreorder \n" \
- " .set mips0 \n" \
- " .set eva \n" \
- "1: cachee %1, (%2) \n" \
+ " .set "MIPS_ISA_ARCH_LEVEL" \n" \
+ "1: cache %1, (%2) \n" \
"2: .insn \n" \
" .set pop \n" \
" .section .fixup,\"ax\" \n" \
@@ -147,6 +148,7 @@ static inline void flush_scache_line(unsigned long addr)
: "i" (op), "r" (addr), "i" (-EFAULT)); \
__err; \
})
+#endif
/*
* The next two are for badland addresses like signal trampolines.
@@ -158,11 +160,7 @@ static inline int protected_flush_icache_line(unsigned long addr)
return protected_cache_op(Hit_Invalidate_I_Loongson2, addr);
default:
-#ifdef CONFIG_EVA
- return protected_cachee_op(Hit_Invalidate_I, addr);
-#else
return protected_cache_op(Hit_Invalidate_I, addr);
-#endif
}
}
@@ -174,20 +172,12 @@ static inline int protected_flush_icache_line(unsigned long addr)
*/
static inline int protected_writeback_dcache_line(unsigned long addr)
{
-#ifdef CONFIG_EVA
- return protected_cachee_op(Hit_Writeback_Inv_D, addr);
-#else
return protected_cache_op(Hit_Writeback_Inv_D, addr);
-#endif
}
static inline int protected_writeback_scache_line(unsigned long addr)
{
-#ifdef CONFIG_EVA
- return protected_cachee_op(Hit_Writeback_Inv_SD, addr);
-#else
return protected_cache_op(Hit_Writeback_Inv_SD, addr);
-#endif
}
/*
@@ -307,43 +297,8 @@ static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start,
} \
}
-#ifndef CONFIG_EVA
-
__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, )
__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, )
-
-#else
-
-#define __BUILD_PROT_BLAST_CACHE_RANGE(pfx, desc, hitop) \
-static inline void protected_blast_##pfx##cache##_range(unsigned long start,\
- unsigned long end) \
-{ \
- unsigned long lsize = cpu_##desc##_line_size(); \
- unsigned long addr = start & ~(lsize - 1); \
- unsigned long aend = (end - 1) & ~(lsize - 1); \
- \
- if (!uaccess_kernel()) { \
- while (1) { \
- protected_cachee_op(hitop, addr); \
- if (addr == aend) \
- break; \
- addr += lsize; \
- } \
- } else { \
- while (1) { \
- protected_cache_op(hitop, addr); \
- if (addr == aend) \
- break; \
- addr += lsize; \
- } \
- \
- } \
-}
-
-__BUILD_PROT_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D)
-__BUILD_PROT_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I)
-
-#endif
__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, )
__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson2, \
protected_, loongson2_)
diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h
index 8a88eb265516..6ce2117e49f6 100644
--- a/arch/mips/include/asm/spinlock.h
+++ b/arch/mips/include/asm/spinlock.h
@@ -10,7 +10,6 @@
#define _ASM_SPINLOCK_H
#include <asm/processor.h>
-#include <asm/qrwlock.h>
#include <asm-generic/qspinlock_types.h>
@@ -27,5 +26,6 @@ static inline void queued_spin_unlock(struct qspinlock *lock)
}
#include <asm/qspinlock.h>
+#include <asm/qrwlock.h>
#endif /* _ASM_SPINLOCK_H */
diff --git a/arch/mips/include/asm/spram.h b/arch/mips/include/asm/spram.h
index 63cb90fd4148..373f2a5d495d 100644
--- a/arch/mips/include/asm/spram.h
+++ b/arch/mips/include/asm/spram.h
@@ -5,7 +5,7 @@
#if defined(CONFIG_MIPS_SPRAM)
extern __init void spram_config(void);
#else
-static inline void spram_config(void) { };
+static inline void spram_config(void) { }
#endif /* CONFIG_MIPS_SPRAM */
#endif /* _MIPS_SPRAM_H */
diff --git a/arch/mips/include/asm/traps.h b/arch/mips/include/asm/traps.h
index 6a0864bb604d..6aa8f126a43d 100644
--- a/arch/mips/include/asm/traps.h
+++ b/arch/mips/include/asm/traps.h
@@ -24,6 +24,7 @@ extern void (*board_ebase_setup)(void);
extern void (*board_cache_error_setup)(void);
extern int register_nmi_notifier(struct notifier_block *nb);
+extern char except_vec_nmi[];
#define nmi_notifier(fn, pri) \
({ \
diff --git a/arch/mips/include/asm/vermagic.h b/arch/mips/include/asm/vermagic.h
index 4d2dae0c7c57..371c1873df0d 100644
--- a/arch/mips/include/asm/vermagic.h
+++ b/arch/mips/include/asm/vermagic.h
@@ -26,6 +26,8 @@
#define MODULE_PROC_FAMILY "TX39XX "
#elif defined CONFIG_CPU_VR41XX
#define MODULE_PROC_FAMILY "VR41XX "
+#elif defined CONFIG_CPU_R4300
+#define MODULE_PROC_FAMILY "R4300 "
#elif defined CONFIG_CPU_R4X00
#define MODULE_PROC_FAMILY "R4X00 "
#elif defined CONFIG_CPU_TX49XX
diff --git a/arch/mips/include/asm/vpe.h b/arch/mips/include/asm/vpe.h
index 80e70dbd1f64..baa949a744cb 100644
--- a/arch/mips/include/asm/vpe.h
+++ b/arch/mips/include/asm/vpe.h
@@ -26,7 +26,6 @@
#endif
#define MAX_VPES 16
-#define VPE_PATH_MAX 256
static inline int aprp_cpu_index(void)
{
@@ -62,7 +61,6 @@ struct vpe {
unsigned long len;
char *pbuffer;
unsigned long plen;
- char cwd[VPE_PATH_MAX];
unsigned long __start;
@@ -111,7 +109,6 @@ extern const struct file_operations vpe_fops;
int vpe_notify(int index, struct vpe_notifications *notify);
void *vpe_get_shared(int index);
-char *vpe_getcwd(int index);
struct vpe *get_vpe(int minor);
struct tc *get_tc(int index);
diff --git a/arch/mips/include/uapi/asm/Kbuild b/arch/mips/include/uapi/asm/Kbuild
index 6db08385d3d8..fdb9c5412cd9 100644
--- a/arch/mips/include/uapi/asm/Kbuild
+++ b/arch/mips/include/uapi/asm/Kbuild
@@ -2,8 +2,5 @@
generated-y += unistd_n32.h
generated-y += unistd_n64.h
generated-y += unistd_o32.h
-generated-y += unistd_nr_n32.h
-generated-y += unistd_nr_n64.h
-generated-y += unistd_nr_o32.h
generic-y += kvm_para.h
diff --git a/arch/mips/include/uapi/asm/perf_regs.h b/arch/mips/include/uapi/asm/perf_regs.h
new file mode 100644
index 000000000000..d0f4ecd616cf
--- /dev/null
+++ b/arch/mips/include/uapi/asm/perf_regs.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ASM_MIPS_PERF_REGS_H
+#define _ASM_MIPS_PERF_REGS_H
+
+enum perf_event_mips_regs {
+ PERF_REG_MIPS_PC,
+ PERF_REG_MIPS_R1,
+ PERF_REG_MIPS_R2,
+ PERF_REG_MIPS_R3,
+ PERF_REG_MIPS_R4,
+ PERF_REG_MIPS_R5,
+ PERF_REG_MIPS_R6,
+ PERF_REG_MIPS_R7,
+ PERF_REG_MIPS_R8,
+ PERF_REG_MIPS_R9,
+ PERF_REG_MIPS_R10,
+ PERF_REG_MIPS_R11,
+ PERF_REG_MIPS_R12,
+ PERF_REG_MIPS_R13,
+ PERF_REG_MIPS_R14,
+ PERF_REG_MIPS_R15,
+ PERF_REG_MIPS_R16,
+ PERF_REG_MIPS_R17,
+ PERF_REG_MIPS_R18,
+ PERF_REG_MIPS_R19,
+ PERF_REG_MIPS_R20,
+ PERF_REG_MIPS_R21,
+ PERF_REG_MIPS_R22,
+ PERF_REG_MIPS_R23,
+ PERF_REG_MIPS_R24,
+ PERF_REG_MIPS_R25,
+ PERF_REG_MIPS_R26,
+ PERF_REG_MIPS_R27,
+ PERF_REG_MIPS_R28,
+ PERF_REG_MIPS_R29,
+ PERF_REG_MIPS_R30,
+ PERF_REG_MIPS_R31,
+ PERF_REG_MIPS_MAX = PERF_REG_MIPS_R31 + 1,
+};
+#endif /* _ASM_MIPS_PERF_REGS_H */
diff --git a/arch/mips/jazz/Kconfig b/arch/mips/jazz/Kconfig
index 06838f80a5d7..42932ca98db9 100644
--- a/arch/mips/jazz/Kconfig
+++ b/arch/mips/jazz/Kconfig
@@ -3,7 +3,6 @@ config ACER_PICA_61
bool "Support for Acer PICA 1 chipset"
depends on MACH_JAZZ
select DMA_NONCOHERENT
- select SYS_SUPPORTS_LITTLE_ENDIAN
help
This is a machine with a R4400 133/150 MHz CPU. To compile a Linux
kernel that runs on these, say Y here. For details about Linux on
@@ -15,7 +14,6 @@ config MIPS_MAGNUM_4000
depends on MACH_JAZZ
select DMA_NONCOHERENT
select SYS_SUPPORTS_BIG_ENDIAN
- select SYS_SUPPORTS_LITTLE_ENDIAN
help
This is a machine with a R4000 100 MHz CPU. To compile a Linux
kernel that runs on these, say Y here. For details about Linux on
@@ -26,7 +24,6 @@ config OLIVETTI_M700
bool "Support for Olivetti M700-10"
depends on MACH_JAZZ
select DMA_NONCOHERENT
- select SYS_SUPPORTS_LITTLE_ENDIAN
help
This is a machine with a R4000 100 MHz CPU. To compile a Linux
kernel that runs on these, say Y here. For details about Linux on
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 2a05b923f579..b4a57f1de772 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -71,7 +71,6 @@ obj-$(CONFIG_MIPS_VPE_APSP_API) += rtlx.o
obj-$(CONFIG_MIPS_VPE_APSP_API_CMP) += rtlx-cmp.o
obj-$(CONFIG_MIPS_VPE_APSP_API_MT) += rtlx-mt.o
-obj-$(CONFIG_IRQ_CPU_RM7K) += irq-rm7000.o
obj-$(CONFIG_MIPS_MSC) += irq-msc01.o
obj-$(CONFIG_IRQ_TXX9) += irq_txx9.o
obj-$(CONFIG_IRQ_GT641XX) += irq-gt641xx.o
@@ -80,8 +79,8 @@ obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_32BIT) += scall32-o32.o
obj-$(CONFIG_64BIT) += scall64-n64.o
obj-$(CONFIG_MIPS32_COMPAT) += linux32.o ptrace32.o signal32.o
-obj-$(CONFIG_MIPS32_N32) += binfmt_elfn32.o scall64-n32.o signal_n32.o
-obj-$(CONFIG_MIPS32_O32) += binfmt_elfo32.o scall64-o32.o signal_o32.o
+obj-$(CONFIG_MIPS32_N32) += scall64-n32.o signal_n32.o
+obj-$(CONFIG_MIPS32_O32) += scall64-o32.o signal_o32.o
obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_PROC_FS) += proc.o
@@ -104,7 +103,7 @@ obj-$(CONFIG_MIPSR2_TO_R6_EMULATOR) += mips-r2-to-r6-emul.o
CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -x c /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi)
-obj-$(CONFIG_PERF_EVENTS) += perf_event.o
+obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_regs.o
obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_mipsxx.o
obj-$(CONFIG_JUMP_LABEL) += jump_label.o
diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
deleted file mode 100644
index c4441416e96b..000000000000
--- a/arch/mips/kernel/binfmt_elfn32.c
+++ /dev/null
@@ -1,113 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Support for n32 Linux/MIPS ELF binaries.
- * Author: Ralf Baechle (ralf@linux-mips.org)
- *
- * Copyright (C) 1999, 2001 Ralf Baechle
- * Copyright (C) 1999, 2001 Silicon Graphics, Inc.
- *
- * Heavily inspired by the 32-bit Sparc compat code which is
- * Copyright (C) 1995, 1996, 1997, 1998 David S. Miller (davem@redhat.com)
- * Copyright (C) 1995, 1996, 1997, 1998 Jakub Jelinek (jj@ultra.linux.cz)
- */
-
-#define ELF_ARCH EM_MIPS
-#define ELF_CLASS ELFCLASS32
-#ifdef __MIPSEB__
-#define ELF_DATA ELFDATA2MSB;
-#else /* __MIPSEL__ */
-#define ELF_DATA ELFDATA2LSB;
-#endif
-
-/* ELF register definitions */
-#define ELF_NGREG 45
-#define ELF_NFPREG 33
-
-typedef unsigned long elf_greg_t;
-typedef elf_greg_t elf_gregset_t[ELF_NGREG];
-
-typedef double elf_fpreg_t;
-typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
-
-/*
- * This is used to ensure we don't load something for the wrong architecture.
- */
-#define elf_check_arch elfn32_check_arch
-
-#define TASK32_SIZE 0x7fff8000UL
-#undef ELF_ET_DYN_BASE
-#define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
-
-#include <asm/processor.h>
-#include <linux/elfcore.h>
-#include <linux/compat.h>
-#include <linux/math64.h>
-
-#define elf_prstatus elf_prstatus32
-struct elf_prstatus32
-{
- struct elf_siginfo pr_info; /* Info associated with signal */
- short pr_cursig; /* Current signal */
- unsigned int pr_sigpend; /* Set of pending signals */
- unsigned int pr_sighold; /* Set of held signals */
- pid_t pr_pid;
- pid_t pr_ppid;
- pid_t pr_pgrp;
- pid_t pr_sid;
- struct old_timeval32 pr_utime; /* User time */
- struct old_timeval32 pr_stime; /* System time */
- struct old_timeval32 pr_cutime;/* Cumulative user time */
- struct old_timeval32 pr_cstime;/* Cumulative system time */
- elf_gregset_t pr_reg; /* GP registers */
- int pr_fpvalid; /* True if math co-processor being used. */
-};
-
-#define elf_prpsinfo elf_prpsinfo32
-struct elf_prpsinfo32
-{
- char pr_state; /* numeric process state */
- char pr_sname; /* char for pr_state */
- char pr_zomb; /* zombie */
- char pr_nice; /* nice val */
- unsigned int pr_flag; /* flags */
- __kernel_uid_t pr_uid;
- __kernel_gid_t pr_gid;
- pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
- /* Lots missing */
- char pr_fname[16]; /* filename of executable */
- char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
-};
-
-#define elf_caddr_t u32
-#define init_elf_binfmt init_elfn32_binfmt
-
-#define jiffies_to_timeval jiffies_to_old_timeval32
-static __inline__ void
-jiffies_to_old_timeval32(unsigned long jiffies, struct old_timeval32 *value)
-{
- /*
- * Convert jiffies to nanoseconds and separate with
- * one divide.
- */
- u64 nsec = (u64)jiffies * TICK_NSEC;
- u32 rem;
- value->tv_sec = div_u64_rem(nsec, NSEC_PER_SEC, &rem);
- value->tv_usec = rem / NSEC_PER_USEC;
-}
-
-#define ELF_CORE_EFLAGS EF_MIPS_ABI2
-
-#undef TASK_SIZE
-#define TASK_SIZE TASK_SIZE32
-
-#undef ns_to_kernel_old_timeval
-#define ns_to_kernel_old_timeval ns_to_old_timeval32
-
-/*
- * Some data types as stored in coredump.
- */
-#define user_long_t compat_long_t
-#define user_siginfo_t compat_siginfo_t
-#define copy_siginfo_to_external copy_siginfo_to_external32
-
-#include "../../../fs/binfmt_elf.c"
diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
deleted file mode 100644
index 7b2a23f48c1a..000000000000
--- a/arch/mips/kernel/binfmt_elfo32.c
+++ /dev/null
@@ -1,116 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Support for o32 Linux/MIPS ELF binaries.
- * Author: Ralf Baechle (ralf@linux-mips.org)
- *
- * Copyright (C) 1999, 2001 Ralf Baechle
- * Copyright (C) 1999, 2001 Silicon Graphics, Inc.
- *
- * Heavily inspired by the 32-bit Sparc compat code which is
- * Copyright (C) 1995, 1996, 1997, 1998 David S. Miller (davem@redhat.com)
- * Copyright (C) 1995, 1996, 1997, 1998 Jakub Jelinek (jj@ultra.linux.cz)
- */
-
-#define ELF_ARCH EM_MIPS
-#define ELF_CLASS ELFCLASS32
-#ifdef __MIPSEB__
-#define ELF_DATA ELFDATA2MSB;
-#else /* __MIPSEL__ */
-#define ELF_DATA ELFDATA2LSB;
-#endif
-
-/* ELF register definitions */
-#define ELF_NGREG 45
-#define ELF_NFPREG 33
-
-typedef unsigned int elf_greg_t;
-typedef elf_greg_t elf_gregset_t[ELF_NGREG];
-
-typedef double elf_fpreg_t;
-typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
-
-/*
- * This is used to ensure we don't load something for the wrong architecture.
- */
-#define elf_check_arch elfo32_check_arch
-
-#ifdef CONFIG_KVM_GUEST
-#define TASK32_SIZE 0x3fff8000UL
-#else
-#define TASK32_SIZE 0x7fff8000UL
-#endif
-#undef ELF_ET_DYN_BASE
-#define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
-
-#include <asm/processor.h>
-
-#include <linux/elfcore.h>
-#include <linux/compat.h>
-#include <linux/math64.h>
-
-#define elf_prstatus elf_prstatus32
-struct elf_prstatus32
-{
- struct elf_siginfo pr_info; /* Info associated with signal */
- short pr_cursig; /* Current signal */
- unsigned int pr_sigpend; /* Set of pending signals */
- unsigned int pr_sighold; /* Set of held signals */
- pid_t pr_pid;
- pid_t pr_ppid;
- pid_t pr_pgrp;
- pid_t pr_sid;
- struct old_timeval32 pr_utime; /* User time */
- struct old_timeval32 pr_stime; /* System time */
- struct old_timeval32 pr_cutime;/* Cumulative user time */
- struct old_timeval32 pr_cstime;/* Cumulative system time */
- elf_gregset_t pr_reg; /* GP registers */
- int pr_fpvalid; /* True if math co-processor being used. */
-};
-
-#define elf_prpsinfo elf_prpsinfo32
-struct elf_prpsinfo32
-{
- char pr_state; /* numeric process state */
- char pr_sname; /* char for pr_state */
- char pr_zomb; /* zombie */
- char pr_nice; /* nice val */
- unsigned int pr_flag; /* flags */
- __kernel_uid_t pr_uid;
- __kernel_gid_t pr_gid;
- pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
- /* Lots missing */
- char pr_fname[16]; /* filename of executable */
- char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
-};
-
-#define elf_caddr_t u32
-#define init_elf_binfmt init_elf32_binfmt
-
-#define jiffies_to_timeval jiffies_to_old_timeval32
-static inline void
-jiffies_to_old_timeval32(unsigned long jiffies, struct old_timeval32 *value)
-{
- /*
- * Convert jiffies to nanoseconds and separate with
- * one divide.
- */
- u64 nsec = (u64)jiffies * TICK_NSEC;
- u32 rem;
- value->tv_sec = div_u64_rem(nsec, NSEC_PER_SEC, &rem);
- value->tv_usec = rem / NSEC_PER_USEC;
-}
-
-#undef TASK_SIZE
-#define TASK_SIZE TASK_SIZE32
-
-#undef ns_to_kernel_old_timeval
-#define ns_to_kernel_old_timeval ns_to_old_timeval32
-
-/*
- * Some data types as stored in coredump.
- */
-#define user_long_t compat_long_t
-#define user_siginfo_t compat_siginfo_t
-#define copy_siginfo_to_external copy_siginfo_to_external32
-
-#include "../../../fs/binfmt_elf.c"
diff --git a/arch/mips/kernel/cacheinfo.c b/arch/mips/kernel/cacheinfo.c
index 47312c529410..53d8ea7d36e6 100644
--- a/arch/mips/kernel/cacheinfo.c
+++ b/arch/mips/kernel/cacheinfo.c
@@ -35,6 +35,11 @@ static int __init_cache_level(unsigned int cpu)
leaves += (c->icache.waysize) ? 2 : 1;
+ if (c->vcache.waysize) {
+ levels++;
+ leaves++;
+ }
+
if (c->scache.waysize) {
levels++;
leaves++;
@@ -74,25 +79,36 @@ static int __populate_cache_leaves(unsigned int cpu)
struct cpuinfo_mips *c = &current_cpu_data;
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
struct cacheinfo *this_leaf = this_cpu_ci->info_list;
+ int level = 1;
if (c->icache.waysize) {
- /* L1 caches are per core */
+ /* I/D caches are per core */
fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map);
- populate_cache(dcache, this_leaf, 1, CACHE_TYPE_DATA);
+ populate_cache(dcache, this_leaf, level, CACHE_TYPE_DATA);
fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map);
- populate_cache(icache, this_leaf, 1, CACHE_TYPE_INST);
+ populate_cache(icache, this_leaf, level, CACHE_TYPE_INST);
+ level++;
} else {
- populate_cache(dcache, this_leaf, 1, CACHE_TYPE_UNIFIED);
+ populate_cache(dcache, this_leaf, level, CACHE_TYPE_UNIFIED);
+ level++;
+ }
+
+ if (c->vcache.waysize) {
+ /* Vcache is per core as well */
+ fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map);
+ populate_cache(vcache, this_leaf, level, CACHE_TYPE_UNIFIED);
+ level++;
}
if (c->scache.waysize) {
- /* L2 cache is per cluster */
+ /* Scache is per cluster */
fill_cpumask_cluster(cpu, &this_leaf->shared_cpu_map);
- populate_cache(scache, this_leaf, 2, CACHE_TYPE_UNIFIED);
+ populate_cache(scache, this_leaf, level, CACHE_TYPE_UNIFIED);
+ level++;
}
if (c->tcache.waysize)
- populate_cache(tcache, this_leaf, 3, CACHE_TYPE_UNIFIED);
+ populate_cache(tcache, this_leaf, level, CACHE_TYPE_UNIFIED);
this_cpu_ci->cpu_map_populated = true;
diff --git a/arch/mips/kernel/cevt-txx9.c b/arch/mips/kernel/cevt-txx9.c
index 5709469c21ff..d761ead2e7fe 100644
--- a/arch/mips/kernel/cevt-txx9.c
+++ b/arch/mips/kernel/cevt-txx9.c
@@ -193,7 +193,7 @@ void __init txx9_clockevent_init(unsigned long baseaddr, int irq,
cd->min_delta_ns = clockevent_delta2ns(0xf, cd);
cd->min_delta_ticks = 0xf;
cd->irq = irq;
- cd->cpumask = cpumask_of(0),
+ cd->cpumask = cpumask_of(0);
clockevents_register_device(cd);
if (request_irq(irq, txx9tmr_interrupt, IRQF_PERCPU | IRQF_TIMER,
"txx9tmr", &txx9_clock_event_device))
diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S
index 4db7ff055c9f..975343240148 100644
--- a/arch/mips/kernel/cps-vec.S
+++ b/arch/mips/kernel/cps-vec.S
@@ -91,7 +91,6 @@
.set pop
.endm
-.section .text.cps-vec
.balign 0x1000
LEAF(mips_cps_core_entry)
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index e6853697a056..9a89637b4ecf 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -1154,6 +1154,15 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
break;
}
break;
+ case PRID_IMP_R4300:
+ c->cputype = CPU_R4300;
+ __cpu_name[cpu] = "R4300";
+ set_isa(c, MIPS_CPU_ISA_III);
+ c->fpu_msk31 |= FPU_CSR_CONDX;
+ c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
+ MIPS_CPU_LLSC;
+ c->tlbsize = 32;
+ break;
case PRID_IMP_R4600:
c->cputype = CPU_R4600;
__cpu_name[cpu] = "R4600";
@@ -1830,16 +1839,17 @@ static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu)
*/
case PRID_COMP_INGENIC_D0:
c->isa_level &= ~MIPS_CPU_ISA_M32R2;
- break;
+ fallthrough;
/*
* The config0 register in the XBurst CPUs with a processor ID of
- * PRID_COMP_INGENIC_D1 has an abandoned huge page tlb mode, this
- * mode is not compatible with the MIPS standard, it will cause
- * tlbmiss and into an infinite loop (line 21 in the tlb-funcs.S)
- * when starting the init process. After chip reset, the default
- * is HPTLB mode, Write 0xa9000000 to cp0 register 5 sel 4 to
- * switch back to VTLB mode to prevent getting stuck.
+ * PRID_COMP_INGENIC_D0 or PRID_COMP_INGENIC_D1 has an abandoned
+ * huge page tlb mode, this mode is not compatible with the MIPS
+ * standard, it will cause tlbmiss and into an infinite loop
+ * (line 21 in the tlb-funcs.S) when starting the init process.
+ * After chip reset, the default is HPTLB mode, Write 0xa9000000
+ * to cp0 register 5 sel 4 to switch back to VTLB mode to prevent
+ * getting stuck.
*/
case PRID_COMP_INGENIC_D1:
write_c0_page_ctrl(XBURST_PAGECTRL_HPTLB_DIS);
diff --git a/arch/mips/kernel/crash_dump.c b/arch/mips/kernel/crash_dump.c
index 01b2bd95ba1f..2e50f55185a6 100644
--- a/arch/mips/kernel/crash_dump.c
+++ b/arch/mips/kernel/crash_dump.c
@@ -1,11 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/highmem.h>
-#include <linux/memblock.h>
#include <linux/crash_dump.h>
-#include <linux/uaccess.h>
-#include <linux/slab.h>
-
-static void *kdump_buf_page;
/**
* copy_oldmem_page - copy one page from "oldmem"
@@ -19,10 +14,6 @@ static void *kdump_buf_page;
*
* Copy a page from "oldmem". For this page, there is no pte mapped
* in the current kernel.
- *
- * Calling copy_to_user() in atomic context is not desirable. Hence first
- * copying the data to a pre-allocated kernel page and then copying to user
- * space in non-atomic context.
*/
ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
size_t csize, unsigned long offset, int userbuf)
@@ -32,36 +23,16 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
if (!csize)
return 0;
- vaddr = kmap_atomic_pfn(pfn);
+ vaddr = kmap_local_pfn(pfn);
if (!userbuf) {
- memcpy(buf, (vaddr + offset), csize);
- kunmap_atomic(vaddr);
+ memcpy(buf, vaddr + offset, csize);
} else {
- if (!kdump_buf_page) {
- pr_warn("Kdump: Kdump buffer page not allocated\n");
-
- return -EFAULT;
- }
- copy_page(kdump_buf_page, vaddr);
- kunmap_atomic(vaddr);
- if (copy_to_user(buf, (kdump_buf_page + offset), csize))
- return -EFAULT;
+ if (copy_to_user(buf, vaddr + offset, csize))
+ csize = -EFAULT;
}
- return csize;
-}
-
-static int __init kdump_buf_page_init(void)
-{
- int ret = 0;
+ kunmap_local(vaddr);
- kdump_buf_page = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (!kdump_buf_page) {
- pr_warn("Kdump: Failed to allocate kdump buffer page\n");
- ret = -ENOMEM;
- }
-
- return ret;
+ return csize;
}
-arch_initcall(kdump_buf_page_init);
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
index f57e68f40a34..666b9969c1bd 100644
--- a/arch/mips/kernel/ftrace.c
+++ b/arch/mips/kernel/ftrace.c
@@ -73,7 +73,6 @@ static inline void ftrace_dyn_arch_init_insns(void)
static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
{
int faulted;
- mm_segment_t old_fs;
/* *(unsigned int *)ip = new_code; */
safe_store_code(new_code, ip, faulted);
@@ -81,10 +80,7 @@ static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
if (unlikely(faulted))
return -EFAULT;
- old_fs = get_fs();
- set_fs(KERNEL_DS);
flush_icache_range(ip, ip + 8);
- set_fs(old_fs);
return 0;
}
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index bcce32a3de10..743d75927b71 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -349,8 +349,8 @@ NESTED(ejtag_debug_handler, PT_SIZE, sp)
MTC0 k0, CP0_DESAVE
mfc0 k0, CP0_DEBUG
- sll k0, k0, 30 # Check for SDBBP.
- bgez k0, ejtag_return
+ andi k0, k0, MIPS_DEBUG_DBP # Check for SDBBP.
+ beqz k0, ejtag_return
#ifdef CONFIG_SMP
1: PTR_LA k0, ejtag_debug_buffer_spinlock
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S
index 61b73580b877..b825ed4476c7 100644
--- a/arch/mips/kernel/head.S
+++ b/arch/mips/kernel/head.S
@@ -93,33 +93,6 @@ NESTED(kernel_entry, 16, sp) # kernel entry point
jr t0
0:
-#ifdef CONFIG_USE_OF
-#if defined(CONFIG_MIPS_RAW_APPENDED_DTB) || \
- defined(CONFIG_MIPS_ELF_APPENDED_DTB)
-
- PTR_LA t2, __appended_dtb
-
-#ifdef CONFIG_CPU_BIG_ENDIAN
- li t1, 0xd00dfeed
-#else /* !CONFIG_CPU_BIG_ENDIAN */
- li t1, 0xedfe0dd0
-#endif /* !CONFIG_CPU_BIG_ENDIAN */
- lw t0, (t2)
- beq t0, t1, dtb_found
-#endif /* CONFIG_MIPS_RAW_APPENDED_DTB || CONFIG_MIPS_ELF_APPENDED_DTB */
- li t1, -2
- move t2, a1
- beq a0, t1, dtb_found
-
-#ifdef CONFIG_BUILTIN_DTB
- PTR_LA t2, __dtb_start
- PTR_LA t1, __dtb_end
- bne t1, t2, dtb_found
-#endif /* CONFIG_BUILTIN_DTB */
-
- li t2, 0
-dtb_found:
-#endif /* CONFIG_USE_OF */
PTR_LA t0, __bss_start # clear .bss
LONG_S zero, (t0)
PTR_LA t1, __bss_stop - LONGSIZE
@@ -133,10 +106,6 @@ dtb_found:
LONG_S a2, fw_arg2
LONG_S a3, fw_arg3
-#ifdef CONFIG_USE_OF
- LONG_S t2, fw_passed_dtb
-#endif
-
MTC0 zero, CP0_CONTEXT # clear context register
#ifdef CONFIG_64BIT
MTC0 zero, CP0_XCONTEXT
diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c
index 18e69ebf5691..1aca3b4db904 100644
--- a/arch/mips/kernel/idle.c
+++ b/arch/mips/kernel/idle.c
@@ -151,6 +151,7 @@ void __init check_wait(void)
cpu_wait = r39xx_wait;
break;
case CPU_R4200:
+/* case CPU_R4300: */
case CPU_R4600:
case CPU_R4640:
case CPU_R4650:
diff --git a/arch/mips/kernel/irq-rm7000.c b/arch/mips/kernel/irq-rm7000.c
deleted file mode 100644
index e1a497f639d7..000000000000
--- a/arch/mips/kernel/irq-rm7000.c
+++ /dev/null
@@ -1,45 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2003 Ralf Baechle
- *
- * Handler for RM7000 extended interrupts. These are a non-standard
- * feature so we handle them separately from standard interrupts.
- */
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/kernel.h>
-
-#include <asm/irq_cpu.h>
-#include <asm/mipsregs.h>
-
-static inline void unmask_rm7k_irq(struct irq_data *d)
-{
- set_c0_intcontrol(0x100 << (d->irq - RM7K_CPU_IRQ_BASE));
-}
-
-static inline void mask_rm7k_irq(struct irq_data *d)
-{
- clear_c0_intcontrol(0x100 << (d->irq - RM7K_CPU_IRQ_BASE));
-}
-
-static struct irq_chip rm7k_irq_controller = {
- .name = "RM7000",
- .irq_ack = mask_rm7k_irq,
- .irq_mask = mask_rm7k_irq,
- .irq_mask_ack = mask_rm7k_irq,
- .irq_unmask = unmask_rm7k_irq,
- .irq_eoi = unmask_rm7k_irq
-};
-
-void __init rm7k_cpu_irq_init(void)
-{
- int base = RM7K_CPU_IRQ_BASE;
- int i;
-
- clear_c0_intcontrol(0x00000f00); /* Mask all */
-
- for (i = base; i < base + 4; i++)
- irq_set_chip_and_handler(i, &rm7k_irq_controller,
- handle_percpu_irq);
-}
diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c
index ea781b29f7f1..09a2d7bb9eef 100644
--- a/arch/mips/kernel/kgdb.c
+++ b/arch/mips/kernel/kgdb.c
@@ -32,7 +32,6 @@
#include <asm/cacheflush.h>
#include <asm/processor.h>
#include <asm/sigcontext.h>
-#include <linux/uaccess.h>
#include <asm/irq_regs.h>
static struct hard_trap_info {
@@ -208,18 +207,6 @@ void arch_kgdb_breakpoint(void)
".set\treorder");
}
-void kgdb_call_nmi_hook(void *ignored)
-{
- mm_segment_t old_fs;
-
- old_fs = get_fs();
- set_fs(KERNEL_DS);
-
- kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs());
-
- set_fs(old_fs);
-}
-
static int compute_signal(int tt)
{
struct hard_trap_info *ht;
@@ -302,7 +289,6 @@ static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd,
struct die_args *args = (struct die_args *)ptr;
struct pt_regs *regs = args->regs;
int trap = (regs->cp0_cause & 0x7c) >> 2;
- mm_segment_t old_fs;
#ifdef CONFIG_KPROBES
/*
@@ -317,17 +303,11 @@ static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd,
if (user_mode(regs))
return NOTIFY_DONE;
- /* Kernel mode. Set correct address limit */
- old_fs = get_fs();
- set_fs(KERNEL_DS);
-
if (atomic_read(&kgdb_active) != -1)
kgdb_nmicallback(smp_processor_id(), regs);
- if (kgdb_handle_exception(trap, compute_signal(trap), cmd, regs)) {
- set_fs(old_fs);
+ if (kgdb_handle_exception(trap, compute_signal(trap), cmd, regs))
return NOTIFY_DONE;
- }
if (atomic_read(&kgdb_setting_breakpoint))
if ((trap == 9) && (regs->cp0_epc == (unsigned long)breakinst))
@@ -337,7 +317,6 @@ static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd,
local_irq_enable();
__flush_cache_all();
- set_fs(old_fs);
return NOTIFY_STOP;
}
diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c
index 3c0c3d1260c1..14f46d17500a 100644
--- a/arch/mips/kernel/module.c
+++ b/arch/mips/kernel/module.c
@@ -40,22 +40,13 @@ void *module_alloc(unsigned long size)
}
#endif
-static int apply_r_mips_none(struct module *me, u32 *location,
- u32 base, Elf_Addr v, bool rela)
-{
- return 0;
-}
-
-static int apply_r_mips_32(struct module *me, u32 *location,
- u32 base, Elf_Addr v, bool rela)
+static void apply_r_mips_32(u32 *location, u32 base, Elf_Addr v)
{
*location = base + v;
-
- return 0;
}
-static int apply_r_mips_26(struct module *me, u32 *location,
- u32 base, Elf_Addr v, bool rela)
+static int apply_r_mips_26(struct module *me, u32 *location, u32 base,
+ Elf_Addr v)
{
if (v % 4) {
pr_err("module %s: dangerous R_MIPS_26 relocation\n",
@@ -75,8 +66,8 @@ static int apply_r_mips_26(struct module *me, u32 *location,
return 0;
}
-static int apply_r_mips_hi16(struct module *me, u32 *location,
- u32 base, Elf_Addr v, bool rela)
+static int apply_r_mips_hi16(struct module *me, u32 *location, Elf_Addr v,
+ bool rela)
{
struct mips_hi16 *n;
@@ -217,26 +208,25 @@ static int apply_r_mips_pc(struct module *me, u32 *location, u32 base,
return 0;
}
-static int apply_r_mips_pc16(struct module *me, u32 *location,
- u32 base, Elf_Addr v, bool rela)
+static int apply_r_mips_pc16(struct module *me, u32 *location, u32 base,
+ Elf_Addr v)
{
return apply_r_mips_pc(me, location, base, v, 16);
}
-static int apply_r_mips_pc21(struct module *me, u32 *location,
- u32 base, Elf_Addr v, bool rela)
+static int apply_r_mips_pc21(struct module *me, u32 *location, u32 base,
+ Elf_Addr v)
{
return apply_r_mips_pc(me, location, base, v, 21);
}
-static int apply_r_mips_pc26(struct module *me, u32 *location,
- u32 base, Elf_Addr v, bool rela)
+static int apply_r_mips_pc26(struct module *me, u32 *location, u32 base,
+ Elf_Addr v)
{
return apply_r_mips_pc(me, location, base, v, 26);
}
-static int apply_r_mips_64(struct module *me, u32 *location,
- u32 base, Elf_Addr v, bool rela)
+static int apply_r_mips_64(u32 *location, Elf_Addr v, bool rela)
{
if (WARN_ON(!rela))
return -EINVAL;
@@ -246,8 +236,7 @@ static int apply_r_mips_64(struct module *me, u32 *location,
return 0;
}
-static int apply_r_mips_higher(struct module *me, u32 *location,
- u32 base, Elf_Addr v, bool rela)
+static int apply_r_mips_higher(u32 *location, Elf_Addr v, bool rela)
{
if (WARN_ON(!rela))
return -EINVAL;
@@ -258,8 +247,7 @@ static int apply_r_mips_higher(struct module *me, u32 *location,
return 0;
}
-static int apply_r_mips_highest(struct module *me, u32 *location,
- u32 base, Elf_Addr v, bool rela)
+static int apply_r_mips_highest(u32 *location, Elf_Addr v, bool rela)
{
if (WARN_ON(!rela))
return -EINVAL;
@@ -272,12 +260,14 @@ static int apply_r_mips_highest(struct module *me, u32 *location,
/**
* reloc_handler() - Apply a particular relocation to a module
+ * @type: type of the relocation to apply
* @me: the module to apply the reloc to
* @location: the address at which the reloc is to be applied
* @base: the existing value at location for REL-style; 0 for RELA-style
* @v: the value of the reloc, with addend for RELA-style
+ * @rela: indication of is this a RELA (true) or REL (false) relocation
*
- * Each implemented reloc_handler function applies a particular type of
+ * Each implemented relocation function applies a particular type of
* relocation to the module @me. Relocs that may be found in either REL or RELA
* variants can be handled by making use of the @base & @v parameters which are
* set to values which abstract the difference away from the particular reloc
@@ -285,23 +275,40 @@ static int apply_r_mips_highest(struct module *me, u32 *location,
*
* Return: 0 upon success, else -ERRNO
*/
-typedef int (*reloc_handler)(struct module *me, u32 *location,
- u32 base, Elf_Addr v, bool rela);
-
-/* The handlers for known reloc types */
-static reloc_handler reloc_handlers[] = {
- [R_MIPS_NONE] = apply_r_mips_none,
- [R_MIPS_32] = apply_r_mips_32,
- [R_MIPS_26] = apply_r_mips_26,
- [R_MIPS_HI16] = apply_r_mips_hi16,
- [R_MIPS_LO16] = apply_r_mips_lo16,
- [R_MIPS_PC16] = apply_r_mips_pc16,
- [R_MIPS_64] = apply_r_mips_64,
- [R_MIPS_HIGHER] = apply_r_mips_higher,
- [R_MIPS_HIGHEST] = apply_r_mips_highest,
- [R_MIPS_PC21_S2] = apply_r_mips_pc21,
- [R_MIPS_PC26_S2] = apply_r_mips_pc26,
-};
+static int reloc_handler(u32 type, struct module *me, u32 *location, u32 base,
+ Elf_Addr v, bool rela)
+{
+ switch (type) {
+ case R_MIPS_NONE:
+ break;
+ case R_MIPS_32:
+ apply_r_mips_32(location, base, v);
+ break;
+ case R_MIPS_26:
+ return apply_r_mips_26(me, location, base, v);
+ case R_MIPS_HI16:
+ return apply_r_mips_hi16(me, location, v, rela);
+ case R_MIPS_LO16:
+ return apply_r_mips_lo16(me, location, base, v, rela);
+ case R_MIPS_PC16:
+ return apply_r_mips_pc16(me, location, base, v);
+ case R_MIPS_PC21_S2:
+ return apply_r_mips_pc21(me, location, base, v);
+ case R_MIPS_PC26_S2:
+ return apply_r_mips_pc26(me, location, base, v);
+ case R_MIPS_64:
+ return apply_r_mips_64(location, v, rela);
+ case R_MIPS_HIGHER:
+ return apply_r_mips_higher(location, v, rela);
+ case R_MIPS_HIGHEST:
+ return apply_r_mips_highest(location, v, rela);
+ default:
+ pr_err("%s: Unknown relocation type %u\n", me->name, type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
static int __apply_relocate(Elf_Shdr *sechdrs, const char *strtab,
unsigned int symindex, unsigned int relsec,
@@ -311,7 +318,6 @@ static int __apply_relocate(Elf_Shdr *sechdrs, const char *strtab,
Elf_Mips_Rel *rel;
Elf_Mips_Rela *rela;
} r;
- reloc_handler handler;
Elf_Sym *sym;
u32 *location, base;
unsigned int i, type;
@@ -343,17 +349,6 @@ static int __apply_relocate(Elf_Shdr *sechdrs, const char *strtab,
}
type = ELF_MIPS_R_TYPE(*r.rel);
- if (type < ARRAY_SIZE(reloc_handlers))
- handler = reloc_handlers[type];
- else
- handler = NULL;
-
- if (!handler) {
- pr_err("%s: Unknown relocation type %u\n",
- me->name, type);
- err = -EINVAL;
- goto out;
- }
if (rela) {
v = sym->st_value + r.rela->r_addend;
@@ -365,7 +360,7 @@ static int __apply_relocate(Elf_Shdr *sechdrs, const char *strtab,
r.rel = &r.rel[1];
}
- err = handler(me, location, base, v, rela);
+ err = reloc_handler(type, me, location, base, v, rela);
if (err)
goto out;
}
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index 011eb6bbf81a..22e22c2de1c9 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -1919,19 +1919,22 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
static const struct mips_perf_event *octeon_pmu_map_raw_event(u64 config)
{
- unsigned int raw_id = config & 0xff;
- unsigned int base_id = raw_id & 0x7f;
+ unsigned int base_id = config & 0x7f;
+ unsigned int event_max;
raw_event.cntr_mask = CNTR_ALL;
raw_event.event_id = base_id;
- if (current_cpu_type() == CPU_CAVIUM_OCTEON2) {
- if (base_id > 0x42)
- return ERR_PTR(-EOPNOTSUPP);
- } else {
- if (base_id > 0x3a)
- return ERR_PTR(-EOPNOTSUPP);
+ if (current_cpu_type() == CPU_CAVIUM_OCTEON3)
+ event_max = 0x5f;
+ else if (current_cpu_type() == CPU_CAVIUM_OCTEON2)
+ event_max = 0x42;
+ else
+ event_max = 0x3a;
+
+ if (base_id > event_max) {
+ return ERR_PTR(-EOPNOTSUPP);
}
switch (base_id) {
@@ -1941,7 +1944,7 @@ static const struct mips_perf_event *octeon_pmu_map_raw_event(u64 config)
case 0x1f:
case 0x2f:
case 0x34:
- case 0x3b ... 0x3f:
+ case 0x3e ... 0x3f:
return ERR_PTR(-EOPNOTSUPP);
default:
break;
@@ -2077,6 +2080,7 @@ init_hw_perf_events(void)
case CPU_CAVIUM_OCTEON:
case CPU_CAVIUM_OCTEON_PLUS:
case CPU_CAVIUM_OCTEON2:
+ case CPU_CAVIUM_OCTEON3:
mipspmu.name = "octeon";
mipspmu.general_event_map = &octeon_event_map;
mipspmu.cache_event_map = &octeon_cache_map;
diff --git a/arch/mips/kernel/perf_regs.c b/arch/mips/kernel/perf_regs.c
new file mode 100644
index 000000000000..e686780d1647
--- /dev/null
+++ b/arch/mips/kernel/perf_regs.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Some parts derived from x86 version of this file.
+ *
+ * Copyright (C) 2013 Cavium, Inc.
+ */
+
+#include <linux/perf_event.h>
+
+#include <asm/ptrace.h>
+
+#ifdef CONFIG_32BIT
+u64 perf_reg_abi(struct task_struct *tsk)
+{
+ return PERF_SAMPLE_REGS_ABI_32;
+}
+#else /* Must be CONFIG_64BIT */
+u64 perf_reg_abi(struct task_struct *tsk)
+{
+ if (test_tsk_thread_flag(tsk, TIF_32BIT_REGS))
+ return PERF_SAMPLE_REGS_ABI_32;
+ else
+ return PERF_SAMPLE_REGS_ABI_64;
+}
+#endif /* CONFIG_32BIT */
+
+int perf_reg_validate(u64 mask)
+{
+ if (!mask)
+ return -EINVAL;
+ if (mask & ~((1ull << PERF_REG_MIPS_MAX) - 1))
+ return -EINVAL;
+ return 0;
+}
+
+u64 perf_reg_value(struct pt_regs *regs, int idx)
+{
+ long v;
+
+ switch (idx) {
+ case PERF_REG_MIPS_PC:
+ v = regs->cp0_epc;
+ break;
+ case PERF_REG_MIPS_R1 ... PERF_REG_MIPS_R25:
+ v = regs->regs[idx - PERF_REG_MIPS_R1 + 1];
+ break;
+ case PERF_REG_MIPS_R28 ... PERF_REG_MIPS_R31:
+ v = regs->regs[idx - PERF_REG_MIPS_R28 + 28];
+ break;
+
+ default:
+ WARN_ON_ONCE(1);
+ return 0;
+ }
+
+ return (s64)v; /* Sign extend if 32-bit. */
+}
+
+void perf_get_regs_user(struct perf_regs *regs_user,
+ struct pt_regs *regs)
+{
+ regs_user->regs = task_pt_regs(current);
+ regs_user->abi = perf_reg_abi(current);
+}
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index d7e288f3a1e7..7efa0d1a4c2b 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -9,50 +9,35 @@
* Copyright (C) 2004 Thiemo Seufer
* Copyright (C) 2013 Imagination Technologies Ltd.
*/
+#include <linux/cpu.h>
#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/sched/debug.h>
-#include <linux/sched/task.h>
-#include <linux/sched/task_stack.h>
-#include <linux/tick.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/stddef.h>
-#include <linux/unistd.h>
-#include <linux/export.h>
-#include <linux/ptrace.h>
-#include <linux/mman.h>
-#include <linux/personality.h>
-#include <linux/sys.h>
#include <linux/init.h>
-#include <linux/completion.h>
#include <linux/kallsyms.h>
-#include <linux/random.h>
-#include <linux/prctl.h>
+#include <linux/kernel.h>
#include <linux/nmi.h>
-#include <linux/cpu.h>
+#include <linux/personality.h>
+#include <linux/prctl.h>
+#include <linux/random.h>
+#include <linux/sched.h>
+#include <linux/sched/debug.h>
+#include <linux/sched/task_stack.h>
#include <asm/abi.h>
#include <asm/asm.h>
-#include <asm/bootinfo.h>
-#include <asm/cpu.h>
#include <asm/dsemul.h>
#include <asm/dsp.h>
+#include <asm/exec.h>
#include <asm/fpu.h>
+#include <asm/inst.h>
#include <asm/irq.h>
-#include <asm/mips-cps.h>
+#include <asm/irq_regs.h>
+#include <asm/isadep.h>
#include <asm/msa.h>
+#include <asm/mips-cps.h>
#include <asm/mipsregs.h>
#include <asm/processor.h>
#include <asm/reg.h>
-#include <linux/uaccess.h>
-#include <asm/io.h>
-#include <asm/elf.h>
-#include <asm/isadep.h>
-#include <asm/inst.h>
#include <asm/stacktrace.h>
-#include <asm/irq_regs.h>
-#include <asm/exec.h>
#ifdef CONFIG_HOTPLUG_CPU
void arch_cpu_idle_dead(void)
@@ -135,7 +120,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
/* Put the stack after the struct pt_regs. */
childksp = (unsigned long) childregs;
p->thread.cp0_status = (read_c0_status() & ~(ST0_CU2|ST0_CU1)) | ST0_KERNEL_CUMASK;
- if (unlikely(p->flags & PF_KTHREAD)) {
+ if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
/* kernel thread */
unsigned long status = p->thread.cp0_status;
memset(childregs, 0, sizeof(struct pt_regs));
@@ -205,6 +190,36 @@ struct mips_frame_info {
#define J_TARGET(pc,target) \
(((unsigned long)(pc) & 0xf0000000) | ((target) << 2))
+static inline int is_jr_ra_ins(union mips_instruction *ip)
+{
+#ifdef CONFIG_CPU_MICROMIPS
+ /*
+ * jr16 ra
+ * jr ra
+ */
+ if (mm_insn_16bit(ip->word >> 16)) {
+ if (ip->mm16_r5_format.opcode == mm_pool16c_op &&
+ ip->mm16_r5_format.rt == mm_jr16_op &&
+ ip->mm16_r5_format.imm == 31)
+ return 1;
+ return 0;
+ }
+
+ if (ip->r_format.opcode == mm_pool32a_op &&
+ ip->r_format.func == mm_pool32axf_op &&
+ ((ip->u_format.uimmediate >> 6) & GENMASK(9, 0)) == mm_jalr_op &&
+ ip->r_format.rt == 31)
+ return 1;
+ return 0;
+#else
+ if (ip->r_format.opcode == spec_op &&
+ ip->r_format.func == jr_op &&
+ ip->r_format.rs == 31)
+ return 1;
+ return 0;
+#endif
+}
+
static inline int is_ra_save_ins(union mips_instruction *ip, int *poff)
{
#ifdef CONFIG_CPU_MICROMIPS
@@ -390,10 +405,8 @@ static inline int is_sp_move_ins(union mips_instruction *ip, int *frame_size)
static int get_frame_info(struct mips_frame_info *info)
{
bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS);
- union mips_instruction insn, *ip;
- const unsigned int max_insns = 128;
+ union mips_instruction insn, *ip, *ip_end;
unsigned int last_insn_size = 0;
- unsigned int i;
bool saw_jump = false;
info->pc_offset = -1;
@@ -403,7 +416,9 @@ static int get_frame_info(struct mips_frame_info *info)
if (!ip)
goto err;
- for (i = 0; i < max_insns; i++) {
+ ip_end = (void *)ip + (info->func_size ? info->func_size : 512);
+
+ while (ip < ip_end) {
ip = (void *)ip + last_insn_size;
if (is_mmips && mm_insn_16bit(ip->halfword[0])) {
@@ -417,7 +432,9 @@ static int get_frame_info(struct mips_frame_info *info)
last_insn_size = 4;
}
- if (!info->frame_size) {
+ if (is_jr_ra_ins(ip)) {
+ break;
+ } else if (!info->frame_size) {
is_sp_move_ins(&insn, &info->frame_size);
continue;
} else if (!saw_jump && is_jump_ins(ip)) {
diff --git a/arch/mips/kernel/r4k-bugs64.c b/arch/mips/kernel/r4k-bugs64.c
index 1ff19f1ea5ca..35729c9e6cfa 100644
--- a/arch/mips/kernel/r4k-bugs64.c
+++ b/arch/mips/kernel/r4k-bugs64.c
@@ -18,7 +18,7 @@
static char bug64hit[] __initdata =
"reliable operation impossible!\n%s";
static char nowar[] __initdata =
- "Please report to <linux-mips@linux-mips.org>.";
+ "Please report to <linux-mips@vger.kernel.org>.";
static char r4kwar[] __initdata =
"Enable CPU_R4000_WORKAROUNDS to rectify.";
static char daddiwar[] __initdata =
diff --git a/arch/mips/kernel/relocate.c b/arch/mips/kernel/relocate.c
index 0e365b7c742d..499a5357c09f 100644
--- a/arch/mips/kernel/relocate.c
+++ b/arch/mips/kernel/relocate.c
@@ -70,18 +70,14 @@ static void __init sync_icache(void *kbase, unsigned long kernel_length)
__sync();
}
-static int __init apply_r_mips_64_rel(u32 *loc_orig, u32 *loc_new, long offset)
+static void __init apply_r_mips_64_rel(u32 *loc_new, long offset)
{
*(u64 *)loc_new += offset;
-
- return 0;
}
-static int __init apply_r_mips_32_rel(u32 *loc_orig, u32 *loc_new, long offset)
+static void __init apply_r_mips_32_rel(u32 *loc_new, long offset)
{
*loc_new += offset;
-
- return 0;
}
static int __init apply_r_mips_26_rel(u32 *loc_orig, u32 *loc_new, long offset)
@@ -114,7 +110,8 @@ static int __init apply_r_mips_26_rel(u32 *loc_orig, u32 *loc_new, long offset)
}
-static int __init apply_r_mips_hi16_rel(u32 *loc_orig, u32 *loc_new, long offset)
+static void __init apply_r_mips_hi16_rel(u32 *loc_orig, u32 *loc_new,
+ long offset)
{
unsigned long insn = *loc_orig;
unsigned long target = (insn & 0xffff) << 16; /* high 16bits of target */
@@ -122,17 +119,33 @@ static int __init apply_r_mips_hi16_rel(u32 *loc_orig, u32 *loc_new, long offset
target += offset;
*loc_new = (insn & ~0xffff) | ((target >> 16) & 0xffff);
- return 0;
}
-static int (*reloc_handlers_rel[]) (u32 *, u32 *, long) __initdata = {
- [R_MIPS_64] = apply_r_mips_64_rel,
- [R_MIPS_32] = apply_r_mips_32_rel,
- [R_MIPS_26] = apply_r_mips_26_rel,
- [R_MIPS_HI16] = apply_r_mips_hi16_rel,
-};
+static int __init reloc_handler(u32 type, u32 *loc_orig, u32 *loc_new,
+ long offset)
+{
+ switch (type) {
+ case R_MIPS_64:
+ apply_r_mips_64_rel(loc_new, offset);
+ break;
+ case R_MIPS_32:
+ apply_r_mips_32_rel(loc_new, offset);
+ break;
+ case R_MIPS_26:
+ return apply_r_mips_26_rel(loc_orig, loc_new, offset);
+ case R_MIPS_HI16:
+ apply_r_mips_hi16_rel(loc_orig, loc_new, offset);
+ break;
+ default:
+ pr_err("Unhandled relocation type %d at 0x%pK\n", type,
+ loc_orig);
+ return -ENOEXEC;
+ }
+
+ return 0;
+}
-int __init do_relocations(void *kbase_old, void *kbase_new, long offset)
+static int __init do_relocations(void *kbase_old, void *kbase_new, long offset)
{
u32 *r;
u32 *loc_orig;
@@ -149,14 +162,7 @@ int __init do_relocations(void *kbase_old, void *kbase_new, long offset)
loc_orig = kbase_old + ((*r & 0x00ffffff) << 2);
loc_new = RELOCATED(loc_orig);
- if (reloc_handlers_rel[type] == NULL) {
- /* Unsupported relocation */
- pr_err("Unhandled relocation type %d at 0x%pK\n",
- type, loc_orig);
- return -ENOEXEC;
- }
-
- res = reloc_handlers_rel[type](loc_orig, loc_new, offset);
+ res = reloc_handler(type, loc_orig, loc_new, offset);
if (res)
return res;
}
@@ -300,6 +306,13 @@ static inline int __init relocation_addr_valid(void *loc_new)
return 1;
}
+static inline void __init update_kaslr_offset(unsigned long *addr, long offset)
+{
+ unsigned long *new_addr = (unsigned long *)RELOCATED(addr);
+
+ *new_addr = (unsigned long)offset;
+}
+
#if defined(CONFIG_USE_OF)
void __weak *plat_get_fdt(void)
{
@@ -410,6 +423,9 @@ void *__init relocate_kernel(void)
/* Return the new kernel's entry point */
kernel_entry = RELOCATED(start_kernel);
+
+ /* Error may occur before, so keep it at last */
+ update_kaslr_offset(&__kaslr_offset, offset);
}
out:
return kernel_entry;
@@ -418,15 +434,11 @@ out:
/*
* Show relocation information on panic.
*/
-void show_kernel_relocation(const char *level)
+static void show_kernel_relocation(const char *level)
{
- unsigned long offset;
-
- offset = __pa_symbol(_text) - __pa_symbol(VMLINUX_LOAD_ADDRESS);
-
- if (IS_ENABLED(CONFIG_RELOCATABLE) && offset > 0) {
+ if (__kaslr_offset > 0) {
printk(level);
- pr_cont("Kernel relocated by 0x%pK\n", (void *)offset);
+ pr_cont("Kernel relocated by 0x%pK\n", (void *)__kaslr_offset);
pr_cont(" .text @ 0x%pK\n", _text);
pr_cont(" .data @ 0x%pK\n", _sdata);
pr_cont(" .bss @ 0x%pK\n", __bss_start);
diff --git a/arch/mips/kernel/scall64-n64.S b/arch/mips/kernel/scall64-n64.S
index 23b2e2b1609c..5e9c497ce099 100644
--- a/arch/mips/kernel/scall64-n64.S
+++ b/arch/mips/kernel/scall64-n64.S
@@ -20,7 +20,7 @@
#include <asm/unistd.h>
#include <asm/war.h>
-#ifndef CONFIG_BINFMT_ELF32
+#ifndef CONFIG_MIPS32_COMPAT
/* Neither O32 nor N32, so define handle_sys here */
#define handle_sys64 handle_sys
#endif
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 7e1f8e277437..279be0153f8b 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -27,8 +27,8 @@
#include <linux/dma-map-ops.h>
#include <linux/decompress/generic.h>
#include <linux/of_fdt.h>
-#include <linux/of_reserved_mem.h>
#include <linux/dmi.h>
+#include <linux/crash_dump.h>
#include <asm/addrspace.h>
#include <asm/bootinfo.h>
@@ -37,7 +37,6 @@
#include <asm/cdmm.h>
#include <asm/cpu.h>
#include <asm/debug.h>
-#include <asm/dma-coherence.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/smp-ops.h>
@@ -84,6 +83,9 @@ static struct resource code_resource = { .name = "Kernel code", };
static struct resource data_resource = { .name = "Kernel data", };
static struct resource bss_resource = { .name = "Kernel bss", };
+unsigned long __kaslr_offset __ro_after_init;
+EXPORT_SYMBOL(__kaslr_offset);
+
static void *detect_magic __initdata = detect_memory_region;
#ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
@@ -404,34 +406,32 @@ static int __init early_parse_memmap(char *p)
}
early_param("memmap", early_parse_memmap);
-#ifdef CONFIG_PROC_VMCORE
-static unsigned long setup_elfcorehdr, setup_elfcorehdr_size;
-static int __init early_parse_elfcorehdr(char *p)
+static void __init mips_reserve_vmcore(void)
{
+#ifdef CONFIG_PROC_VMCORE
phys_addr_t start, end;
u64 i;
- setup_elfcorehdr = memparse(p, &p);
-
- for_each_mem_range(i, &start, &end) {
- if (setup_elfcorehdr >= start && setup_elfcorehdr < end) {
- /*
- * Reserve from the elf core header to the end of
- * the memory segment, that should all be kdump
- * reserved memory.
- */
- setup_elfcorehdr_size = end - setup_elfcorehdr;
- break;
+ if (!elfcorehdr_size) {
+ for_each_mem_range(i, &start, &end) {
+ if (elfcorehdr_addr >= start && elfcorehdr_addr < end) {
+ /*
+ * Reserve from the elf core header to the end of
+ * the memory segment, that should all be kdump
+ * reserved memory.
+ */
+ elfcorehdr_size = end - elfcorehdr_addr;
+ break;
+ }
}
}
- /*
- * If we don't find it in the memory map, then we shouldn't
- * have to worry about it, as the new kernel won't use it.
- */
- return 0;
-}
-early_param("elfcorehdr", early_parse_elfcorehdr);
+
+ pr_info("Reserving %ldKB of memory at %ldKB for kdump\n",
+ (unsigned long)elfcorehdr_size >> 10, (unsigned long)elfcorehdr_addr >> 10);
+
+ memblock_reserve(elfcorehdr_addr, elfcorehdr_size);
#endif
+}
#ifdef CONFIG_KEXEC
@@ -653,13 +653,7 @@ static void __init arch_mem_init(char **cmdline_p)
*/
memblock_set_current_limit(PFN_PHYS(max_low_pfn));
-#ifdef CONFIG_PROC_VMCORE
- if (setup_elfcorehdr && setup_elfcorehdr_size) {
- printk(KERN_INFO "kdump reserved memory at %lx-%lx\n",
- setup_elfcorehdr, setup_elfcorehdr_size);
- memblock_reserve(setup_elfcorehdr, setup_elfcorehdr_size);
- }
-#endif
+ mips_reserve_vmcore();
mips_parse_crashkernel();
#ifdef CONFIG_KEXEC
@@ -686,8 +680,6 @@ static void __init arch_mem_init(char **cmdline_p)
memblock_reserve(__pa_symbol(&__nosave_begin),
__pa_symbol(&__nosave_end) - __pa_symbol(&__nosave_begin));
- fdt_init_reserved_mem();
-
early_memtest(PFN_PHYS(ARCH_PFN_OFFSET), PFN_PHYS(max_low_pfn));
}
@@ -792,10 +784,6 @@ void __init setup_arch(char **cmdline_p)
unsigned long kernelsp[NR_CPUS];
unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3;
-#ifdef CONFIG_USE_OF
-unsigned long fw_passed_dtb;
-#endif
-
#ifdef CONFIG_DEBUG_FS
struct dentry *mips_debugfs_dir;
static int __init debugfs_mips(void)
@@ -806,15 +794,10 @@ static int __init debugfs_mips(void)
arch_initcall(debugfs_mips);
#endif
-#ifdef CONFIG_DMA_MAYBE_COHERENT
-/* User defined DMA coherency from command line. */
-enum coherent_io_user_state coherentio = IO_COHERENCE_DEFAULT;
-EXPORT_SYMBOL_GPL(coherentio);
-int hw_coherentio; /* Actual hardware supported DMA coherency setting. */
-
+#ifdef CONFIG_DMA_NONCOHERENT
static int __init setcoherentio(char *str)
{
- coherentio = IO_COHERENCE_ENABLED;
+ dma_default_coherent = true;
pr_info("Hardware DMA cache coherency (command line)\n");
return 0;
}
@@ -822,7 +805,7 @@ early_param("coherentio", setcoherentio);
static int __init setnocoherentio(char *str)
{
- coherentio = IO_COHERENCE_DISABLED;
+ dma_default_coherent = true;
pr_info("Software DMA cache coherency (command line)\n");
return 0;
}
diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c
index 8b027c72b8ef..bcd6a944b839 100644
--- a/arch/mips/kernel/smp-cps.c
+++ b/arch/mips/kernel/smp-cps.c
@@ -451,9 +451,6 @@ static int cps_cpu_disable(void)
unsigned cpu = smp_processor_id();
struct core_boot_config *core_cfg;
- if (!cpu)
- return -EBUSY;
-
if (!cps_pm_support_state(CPS_PM_POWER_GATED))
return -EINVAL;
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 74b9102fd06e..ef86fbad8546 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -59,7 +59,7 @@ static DECLARE_COMPLETION(cpu_starting);
static DECLARE_COMPLETION(cpu_running);
/*
- * A logcal cpu mask containing only one VPE per core to
+ * A logical cpu mask containing only one VPE per core to
* reduce the number of IPIs on large MT systems.
*/
cpumask_t cpu_foreign_map[NR_CPUS] __read_mostly;
@@ -510,8 +510,8 @@ static inline void smp_on_each_tlb(void (*func) (void *info), void *info)
* address spaces, a new context is obtained on the current cpu, and tlb
* context on other cpus are invalidated to force a new context allocation
* at switch_mm time, should the mm ever be used on other cpus. For
- * multithreaded address spaces, intercpu interrupts have to be sent.
- * Another case where intercpu interrupts are required is when the target
+ * multithreaded address spaces, inter-CPU interrupts have to be sent.
+ * Another case where inter-CPU interrupts are required is when the target
* mm might be active on another cpu (eg debuggers doing the flushes on
* behalf of debugees, kswapd stealing pages from another process etc).
* Kanoj 07/00.
diff --git a/arch/mips/kernel/syscalls/Makefile b/arch/mips/kernel/syscalls/Makefile
index 6efb2f6889a7..51f8b805f2ed 100644
--- a/arch/mips/kernel/syscalls/Makefile
+++ b/arch/mips/kernel/syscalls/Makefile
@@ -5,9 +5,9 @@ uapi := arch/$(SRCARCH)/include/generated/uapi/asm
_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \
$(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
-syscalln32 := $(srctree)/$(src)/syscall_n32.tbl
-syscalln64 := $(srctree)/$(src)/syscall_n64.tbl
-syscallo32 := $(srctree)/$(src)/syscall_o32.tbl
+syscalln32 := $(src)/syscall_n32.tbl
+syscalln64 := $(src)/syscall_n64.tbl
+syscallo32 := $(src)/syscall_o32.tbl
syshdr := $(srctree)/$(src)/syscallhdr.sh
sysnr := $(srctree)/$(src)/syscallnr.sh
systbl := $(srctree)/$(src)/syscalltbl.sh
@@ -31,66 +31,67 @@ quiet_cmd_systbl = SYSTBL $@
'$(systbl_offset_$(basetarget))'
syshdr_offset_unistd_n32 := __NR_Linux
-$(uapi)/unistd_n32.h: $(syscalln32) $(syshdr)
+$(uapi)/unistd_n32.h: $(syscalln32) $(syshdr) FORCE
$(call if_changed,syshdr)
syshdr_offset_unistd_n64 := __NR_Linux
-$(uapi)/unistd_n64.h: $(syscalln64) $(syshdr)
+$(uapi)/unistd_n64.h: $(syscalln64) $(syshdr) FORCE
$(call if_changed,syshdr)
syshdr_offset_unistd_o32 := __NR_Linux
-$(uapi)/unistd_o32.h: $(syscallo32) $(syshdr)
+$(uapi)/unistd_o32.h: $(syscallo32) $(syshdr) FORCE
$(call if_changed,syshdr)
sysnr_pfx_unistd_nr_n32 := N32
sysnr_offset_unistd_nr_n32 := 6000
-$(uapi)/unistd_nr_n32.h: $(syscalln32) $(sysnr)
+$(kapi)/unistd_nr_n32.h: $(syscalln32) $(sysnr) FORCE
$(call if_changed,sysnr)
sysnr_pfx_unistd_nr_n64 := 64
sysnr_offset_unistd_nr_n64 := 5000
-$(uapi)/unistd_nr_n64.h: $(syscalln64) $(sysnr)
+$(kapi)/unistd_nr_n64.h: $(syscalln64) $(sysnr) FORCE
$(call if_changed,sysnr)
sysnr_pfx_unistd_nr_o32 := O32
sysnr_offset_unistd_nr_o32 := 4000
-$(uapi)/unistd_nr_o32.h: $(syscallo32) $(sysnr)
+$(kapi)/unistd_nr_o32.h: $(syscallo32) $(sysnr) FORCE
$(call if_changed,sysnr)
systbl_abi_syscall_table_32_o32 := 32_o32
systbl_offset_syscall_table_32_o32 := 4000
-$(kapi)/syscall_table_32_o32.h: $(syscallo32) $(systbl)
+$(kapi)/syscall_table_32_o32.h: $(syscallo32) $(systbl) FORCE
$(call if_changed,systbl)
systbl_abi_syscall_table_64_n32 := 64_n32
systbl_offset_syscall_table_64_n32 := 6000
-$(kapi)/syscall_table_64_n32.h: $(syscalln32) $(systbl)
+$(kapi)/syscall_table_64_n32.h: $(syscalln32) $(systbl) FORCE
$(call if_changed,systbl)
systbl_abi_syscall_table_64_n64 := 64_n64
systbl_offset_syscall_table_64_n64 := 5000
-$(kapi)/syscall_table_64_n64.h: $(syscalln64) $(systbl)
+$(kapi)/syscall_table_64_n64.h: $(syscalln64) $(systbl) FORCE
$(call if_changed,systbl)
systbl_abi_syscall_table_64_o32 := 64_o32
systbl_offset_syscall_table_64_o32 := 4000
-$(kapi)/syscall_table_64_o32.h: $(syscallo32) $(systbl)
+$(kapi)/syscall_table_64_o32.h: $(syscallo32) $(systbl) FORCE
$(call if_changed,systbl)
uapisyshdr-y += unistd_n32.h \
unistd_n64.h \
- unistd_o32.h \
- unistd_nr_n32.h \
- unistd_nr_n64.h \
- unistd_nr_o32.h
+ unistd_o32.h
kapisyshdr-y += syscall_table_32_o32.h \
syscall_table_64_n32.h \
syscall_table_64_n64.h \
- syscall_table_64_o32.h
+ syscall_table_64_o32.h \
+ unistd_nr_n32.h \
+ unistd_nr_n64.h \
+ unistd_nr_o32.h
-targets += $(uapisyshdr-y) $(kapisyshdr-y)
+uapisyshdr-y := $(addprefix $(uapi)/, $(uapisyshdr-y))
+kapisyshdr-y := $(addprefix $(kapi)/, $(kapisyshdr-y))
+targets += $(addprefix ../../../../, $(uapisyshdr-y) $(kapisyshdr-y))
PHONY += all
-all: $(addprefix $(uapi)/,$(uapisyshdr-y))
-all: $(addprefix $(kapi)/,$(kapisyshdr-y))
+all: $(uapisyshdr-y) $(kapisyshdr-y)
@:
diff --git a/arch/mips/kernel/syscalls/syscall_n32.tbl b/arch/mips/kernel/syscalls/syscall_n32.tbl
index 0f03ad223f33..8fd8c1790941 100644
--- a/arch/mips/kernel/syscalls/syscall_n32.tbl
+++ b/arch/mips/kernel/syscalls/syscall_n32.tbl
@@ -380,3 +380,4 @@
439 n32 faccessat2 sys_faccessat2
440 n32 process_madvise sys_process_madvise
441 n32 epoll_pwait2 compat_sys_epoll_pwait2
+442 n32 mount_setattr sys_mount_setattr
diff --git a/arch/mips/kernel/syscalls/syscall_n64.tbl b/arch/mips/kernel/syscalls/syscall_n64.tbl
index 91649690b52f..169f21438065 100644
--- a/arch/mips/kernel/syscalls/syscall_n64.tbl
+++ b/arch/mips/kernel/syscalls/syscall_n64.tbl
@@ -356,3 +356,4 @@
439 n64 faccessat2 sys_faccessat2
440 n64 process_madvise sys_process_madvise
441 n64 epoll_pwait2 sys_epoll_pwait2
+442 n64 mount_setattr sys_mount_setattr
diff --git a/arch/mips/kernel/syscalls/syscall_o32.tbl b/arch/mips/kernel/syscalls/syscall_o32.tbl
index 4bad0c40aed6..090d29ca80ff 100644
--- a/arch/mips/kernel/syscalls/syscall_o32.tbl
+++ b/arch/mips/kernel/syscalls/syscall_o32.tbl
@@ -429,3 +429,4 @@
439 o32 faccessat2 sys_faccessat2
440 o32 process_madvise sys_process_madvise
441 o32 epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2
+442 o32 mount_setattr sys_mount_setattr
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index 5e97e9d02f98..c1c345be04ff 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -66,9 +66,10 @@ SECTIONS
KPROBES_TEXT
IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
- *(.text.*)
*(.fixup)
*(.gnu.warning)
+ . = ALIGN(16);
+ *(.got) /* Global offset table */
} :text = 0
_etext = .; /* End of text section */
@@ -90,6 +91,7 @@ SECTIONS
INIT_TASK_DATA(THREAD_SIZE)
NOSAVE_DATA
+ PAGE_ALIGNED_DATA(PAGE_SIZE)
CACHELINE_ALIGNED_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
READ_MOSTLY_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
DATA_DATA
@@ -137,6 +139,11 @@ SECTIONS
PERCPU_SECTION(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
#endif
+ .rel.dyn : ALIGN(8) {
+ *(.rel)
+ *(.rel*)
+ }
+
#ifdef CONFIG_MIPS_ELF_APPENDED_DTB
.appended_dtb : AT(ADDR(.appended_dtb) - LOAD_OFFSET) {
*(.appended_dtb)
@@ -220,9 +227,9 @@ SECTIONS
/* ABI crap starts here */
*(.MIPS.abiflags)
*(.MIPS.options)
+ *(.gnu.attributes)
*(.options)
*(.pdr)
*(.reginfo)
- *(.eh_frame)
}
}
diff --git a/arch/mips/kernel/vpe-cmp.c b/arch/mips/kernel/vpe-cmp.c
index 9268ebc0f61e..e673603e11e5 100644
--- a/arch/mips/kernel/vpe-cmp.c
+++ b/arch/mips/kernel/vpe-cmp.c
@@ -117,8 +117,8 @@ int __init vpe_module_init(void)
}
device_initialize(&vpe_device);
- vpe_device.class = &vpe_class,
- vpe_device.parent = NULL,
+ vpe_device.class = &vpe_class;
+ vpe_device.parent = NULL;
dev_set_name(&vpe_device, "vpe_sp");
vpe_device.devt = MKDEV(major, VPE_MODULE_MINOR);
err = device_add(&vpe_device);
diff --git a/arch/mips/kernel/vpe-mt.c b/arch/mips/kernel/vpe-mt.c
index 2e003b11a098..bad6b0891b2b 100644
--- a/arch/mips/kernel/vpe-mt.c
+++ b/arch/mips/kernel/vpe-mt.c
@@ -365,8 +365,8 @@ int __init vpe_module_init(void)
}
device_initialize(&vpe_device);
- vpe_device.class = &vpe_class,
- vpe_device.parent = NULL,
+ vpe_device.class = &vpe_class;
+ vpe_device.parent = NULL;
dev_set_name(&vpe_device, "vpe1");
vpe_device.devt = MKDEV(major, VPE_MODULE_MINOR);
err = device_add(&vpe_device);
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
index d0d832ab3d3b..13294972707b 100644
--- a/arch/mips/kernel/vpe.c
+++ b/arch/mips/kernel/vpe.c
@@ -746,28 +746,12 @@ static int vpe_elfload(struct vpe *v)
return 0;
}
-static int getcwd(char *buff, int size)
-{
- mm_segment_t old_fs;
- int ret;
-
- old_fs = get_fs();
- set_fs(KERNEL_DS);
-
- ret = sys_getcwd(buff, size);
-
- set_fs(old_fs);
-
- return ret;
-}
-
/* checks VPE is unused and gets ready to load program */
static int vpe_open(struct inode *inode, struct file *filp)
{
enum vpe_state state;
struct vpe_notifications *notifier;
struct vpe *v;
- int ret;
if (VPE_MODULE_MINOR != iminor(inode)) {
/* assume only 1 device at the moment. */
@@ -803,12 +787,6 @@ static int vpe_open(struct inode *inode, struct file *filp)
v->plen = P_SIZE;
v->load_addr = NULL;
v->len = 0;
-
- v->cwd[0] = 0;
- ret = getcwd(v->cwd, VPE_PATH_MAX);
- if (ret < 0)
- pr_warn("VPE loader: open, getcwd returned %d\n", ret);
-
v->shared_ptr = NULL;
v->__start = 0;
@@ -915,17 +893,6 @@ int vpe_notify(int index, struct vpe_notifications *notify)
}
EXPORT_SYMBOL(vpe_notify);
-char *vpe_getcwd(int index)
-{
- struct vpe *v = get_vpe(index);
-
- if (v == NULL)
- return NULL;
-
- return v->cwd;
-}
-EXPORT_SYMBOL(vpe_getcwd);
-
module_init(vpe_module_init);
module_exit(vpe_module_exit);
MODULE_DESCRIPTION("MIPS VPE Loader");
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 3d6a7f5827b1..58a8812e2fa5 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -148,7 +148,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
default:
/* Unsupported KVM type */
return -EINVAL;
- };
+ }
/* Allocate page table to map GPA -> RPA */
kvm->arch.gpa_mm.pgd = kvm_pgd_alloc();
diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
index df8eed3875f6..acfbdc01b0ac 100644
--- a/arch/mips/lantiq/irq.c
+++ b/arch/mips/lantiq/irq.c
@@ -8,6 +8,7 @@
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/sched.h>
+#include <linux/irqchip.h>
#include <linux/irqdomain.h>
#include <linux/of_platform.h>
#include <linux/of_address.h>
@@ -302,7 +303,7 @@ static void ltq_hw_irq_handler(struct irq_desc *desc)
generic_handle_irq(irq_linear_revmap(ltq_domain, hwirq));
/* if this is a EBU irq, we need to ack it or get a deadlock */
- if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0) && LTQ_EBU_PCC_ISTAT)
+ if (irq == LTQ_ICU_EBU_IRQ && !module && LTQ_EBU_PCC_ISTAT != 0)
ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_ISTAT) | 0x10,
LTQ_EBU_PCC_ISTAT);
}
@@ -422,12 +423,9 @@ unsigned int get_c0_compare_int(void)
return CP0_LEGACY_COMPARE_IRQ;
}
-static const struct of_device_id of_irq_ids[] __initconst = {
- { .compatible = "lantiq,icu", .data = icu_of_init },
- {},
-};
+IRQCHIP_DECLARE(lantiq_icu, "lantiq,icu", icu_of_init);
void __init arch_init_irq(void)
{
- of_irq_init(of_irq_ids);
+ irqchip_init();
}
diff --git a/arch/mips/lantiq/prom.c b/arch/mips/lantiq/prom.c
index 51a218f04fe0..bc9f58fcbdf9 100644
--- a/arch/mips/lantiq/prom.c
+++ b/arch/mips/lantiq/prom.c
@@ -44,10 +44,6 @@ int ltq_soc_type(void)
return soc_info.type;
}
-void __init prom_free_prom_memory(void)
-{
-}
-
static void __init prom_init_cmdline(void)
{
int argc = fw_arg0;
@@ -77,11 +73,8 @@ void __init plat_mem_setup(void)
set_io_port_base((unsigned long) KSEG1);
- if (fw_passed_dtb) /* UHI interface */
- dtb = (void *)fw_passed_dtb;
- else if (__dtb_start != __dtb_end)
- dtb = (void *)__dtb_start;
- else
+ dtb = get_fdt();
+ if (dtb == NULL)
panic("no dtb found");
/*
diff --git a/arch/mips/lib/iomap-pci.c b/arch/mips/lib/iomap-pci.c
index 210f5a95ecb1..a9cb28813f0b 100644
--- a/arch/mips/lib/iomap-pci.c
+++ b/arch/mips/lib/iomap-pci.c
@@ -32,7 +32,7 @@ void __iomem *__pci_ioport_map(struct pci_dev *dev,
sprintf(name, "%04x:%02x", pci_domain_nr(bus), bus->number);
printk(KERN_WARNING "io_map_base of root PCI bus %s unset. "
"Trying to continue but you better\nfix this issue or "
- "report it to linux-mips@linux-mips.org or your "
+ "report it to linux-mips@vger.kernel.org or your "
"vendor.\n", name);
#ifdef CONFIG_PCI_DOMAINS
panic("To avoid data corruption io_map_base MUST be set with "
diff --git a/arch/mips/loongson2ef/common/init.c b/arch/mips/loongson2ef/common/init.c
index ce3f02f75e2a..7797359359e4 100644
--- a/arch/mips/loongson2ef/common/init.c
+++ b/arch/mips/loongson2ef/common/init.c
@@ -19,7 +19,6 @@ unsigned long __maybe_unused _loongson_addrwincfg_base;
static void __init mips_nmi_setup(void)
{
void *base;
- extern char except_vec_nmi[];
base = (void *)(CAC_BASE + 0x380);
memcpy(base, except_vec_nmi, 0x80);
@@ -46,7 +45,3 @@ void __init prom_init(void)
prom_init_uart_base();
board_nmi_handler_setup = mips_nmi_setup;
}
-
-void __init prom_free_prom_memory(void)
-{
-}
diff --git a/arch/mips/loongson2ef/common/mem.c b/arch/mips/loongson2ef/common/mem.c
index 057d58bb470e..fceb3ee47eb0 100644
--- a/arch/mips/loongson2ef/common/mem.c
+++ b/arch/mips/loongson2ef/common/mem.c
@@ -41,14 +41,3 @@ void __init prom_init_memory(void)
memblock_add(LOONGSON_HIGHMEM_START, highmemsize << 20);
#endif /* !CONFIG_64BIT */
}
-
-/* override of arch/mips/mm/cache.c: __uncached_access */
-int __uncached_access(struct file *file, unsigned long addr)
-{
- if (file->f_flags & O_DSYNC)
- return 1;
-
- return addr >= __pa(high_memory) ||
- ((addr >= LOONGSON_MMIO_MEM_START) &&
- (addr < LOONGSON_MMIO_MEM_END));
-}
diff --git a/arch/mips/loongson2ef/fuloong-2e/irq.c b/arch/mips/loongson2ef/fuloong-2e/irq.c
index 305aa2eb74ad..b1c9d4ee0335 100644
--- a/arch/mips/loongson2ef/fuloong-2e/irq.c
+++ b/arch/mips/loongson2ef/fuloong-2e/irq.c
@@ -26,7 +26,7 @@ asmlinkage void mach_irq_dispatch(unsigned int pending)
if (pending & CAUSEF_IP7)
do_IRQ(MIPS_CPU_IRQ_BASE + 7);
else if (pending & CAUSEF_IP6) /* perf counter loverflow */
- do_perfcnt_IRQ();
+ return;
else if (pending & CAUSEF_IP5)
i8259_irqdispatch();
else if (pending & CAUSEF_IP2)
diff --git a/arch/mips/loongson2ef/lemote-2f/irq.c b/arch/mips/loongson2ef/lemote-2f/irq.c
index 6f00579971a3..f5a731a2a35f 100644
--- a/arch/mips/loongson2ef/lemote-2f/irq.c
+++ b/arch/mips/loongson2ef/lemote-2f/irq.c
@@ -75,7 +75,6 @@ void mach_irq_dispatch(unsigned int pending)
if (pending & CAUSEF_IP7)
do_IRQ(LOONGSON_TIMER_IRQ);
else if (pending & CAUSEF_IP6) { /* North Bridge, Perf counter */
- do_perfcnt_IRQ();
bonito_irqdispatch();
} else if (pending & CAUSEF_IP3) /* CPU UART */
do_IRQ(LOONGSON_UART_IRQ);
diff --git a/arch/mips/loongson32/common/prom.c b/arch/mips/loongson32/common/prom.c
index c133b5adf34e..fc580a22748e 100644
--- a/arch/mips/loongson32/common/prom.c
+++ b/arch/mips/loongson32/common/prom.c
@@ -36,10 +36,6 @@ void __init prom_init(void)
setup_8250_early_printk_port((unsigned long)uart_base, 0, 0);
}
-void __init prom_free_prom_memory(void)
-{
-}
-
void __init plat_mem_setup(void)
{
memblock_add(0x0, (memsize << 20));
diff --git a/arch/mips/loongson64/Platform b/arch/mips/loongson64/Platform
index ec42c5085905..3e660d6d3c2b 100644
--- a/arch/mips/loongson64/Platform
+++ b/arch/mips/loongson64/Platform
@@ -6,28 +6,6 @@
cflags-$(CONFIG_CPU_LOONGSON64) += -Wa,--trap
#
-# Some versions of binutils, not currently mainline as of 2019/02/04, support
-# an -mfix-loongson3-llsc flag which emits a sync prior to each ll instruction
-# to work around a CPU bug (see __SYNC_loongson3_war in asm/sync.h for a
-# description).
-#
-# We disable this in order to prevent the assembler meddling with the
-# instruction that labels refer to, ie. if we label an ll instruction:
-#
-# 1: ll v0, 0(a0)
-#
-# ...then with the assembler fix applied the label may actually point at a sync
-# instruction inserted by the assembler, and if we were using the label in an
-# exception table the table would no longer contain the address of the ll
-# instruction.
-#
-# Avoid this by explicitly disabling that assembler behaviour. If upstream
-# binutils does not merge support for the flag then we can revisit & remove
-# this later - for now it ensures vendor toolchains don't cause problems.
-#
-cflags-$(CONFIG_CPU_LOONGSON64) += $(call as-option,-Wa$(comma)-mno-fix-loongson3-llsc,)
-
-#
# binutils from v2.25 on and gcc starting from v4.9.0 treat -march=loongson3a
# as MIPS64 R2; older versions as just R1. This leaves the possibility open
# that GCC might generate R2 code for -march=loongson3a which then is rejected
@@ -35,7 +13,7 @@ cflags-$(CONFIG_CPU_LOONGSON64) += $(call as-option,-Wa$(comma)-mno-fix-loongson
# can't easily be used safely within the kbuild framework.
#
ifeq ($(call cc-ifversion, -ge, 0409, y), y)
- ifeq ($(call ld-ifversion, -ge, 225000000, y), y)
+ ifeq ($(call ld-ifversion, -ge, 22500, y), y)
cflags-$(CONFIG_CPU_LOONGSON64) += \
$(call cc-option,-march=loongson3a -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64)
else
diff --git a/arch/mips/loongson64/init.c b/arch/mips/loongson64/init.c
index ed75f7971261..cfa788bca871 100644
--- a/arch/mips/loongson64/init.c
+++ b/arch/mips/loongson64/init.c
@@ -25,7 +25,6 @@ u32 node_id_offset;
static void __init mips_nmi_setup(void)
{
void *base;
- extern char except_vec_nmi[];
base = (void *)(CAC_BASE + 0x380);
memcpy(base, except_vec_nmi, 0x80);
@@ -47,6 +46,51 @@ void virtual_early_config(void)
node_id_offset = 44;
}
+void __init szmem(unsigned int node)
+{
+ u32 i, mem_type;
+ static unsigned long num_physpages;
+ u64 node_id, node_psize, start_pfn, end_pfn, mem_start, mem_size;
+
+ /* Parse memory information and activate */
+ for (i = 0; i < loongson_memmap->nr_map; i++) {
+ node_id = loongson_memmap->map[i].node_id;
+ if (node_id != node)
+ continue;
+
+ mem_type = loongson_memmap->map[i].mem_type;
+ mem_size = loongson_memmap->map[i].mem_size;
+ mem_start = loongson_memmap->map[i].mem_start;
+
+ switch (mem_type) {
+ case SYSTEM_RAM_LOW:
+ case SYSTEM_RAM_HIGH:
+ start_pfn = ((node_id << 44) + mem_start) >> PAGE_SHIFT;
+ node_psize = (mem_size << 20) >> PAGE_SHIFT;
+ end_pfn = start_pfn + node_psize;
+ num_physpages += node_psize;
+ pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx MB\n",
+ (u32)node_id, mem_type, mem_start, mem_size);
+ pr_info(" start_pfn:0x%llx, end_pfn:0x%llx, num_physpages:0x%lx\n",
+ start_pfn, end_pfn, num_physpages);
+ memblock_add_node(PFN_PHYS(start_pfn), PFN_PHYS(node_psize), node);
+ break;
+ case SYSTEM_RAM_RESERVED:
+ pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx MB\n",
+ (u32)node_id, mem_type, mem_start, mem_size);
+ memblock_reserve(((node_id << 44) + mem_start), mem_size << 20);
+ break;
+ }
+ }
+}
+
+#ifndef CONFIG_NUMA
+static void __init prom_init_memory(void)
+{
+ szmem(0);
+}
+#endif
+
void __init prom_init(void)
{
fw_init_cmdline();
@@ -57,7 +101,11 @@ void __init prom_init(void)
loongson_sysconf.early_config();
+#ifdef CONFIG_NUMA
prom_init_numa_memory();
+#else
+ prom_init_memory();
+#endif
/* Hardcode to CPU UART 0 */
setup_8250_early_printk_port(TO_UNCAC(LOONGSON_REG_BASE + 0x1e0), 0, 1024);
@@ -66,10 +114,6 @@ void __init prom_init(void)
board_nmi_handler_setup = mips_nmi_setup;
}
-void __init prom_free_prom_memory(void)
-{
-}
-
static int __init add_legacy_isa_io(struct fwnode_handle *fwnode, resource_size_t hw_start,
resource_size_t size)
{
diff --git a/arch/mips/loongson64/numa.c b/arch/mips/loongson64/numa.c
index c6f0c48384f8..a8f57bf01285 100644
--- a/arch/mips/loongson64/numa.c
+++ b/arch/mips/loongson64/numa.c
@@ -25,6 +25,7 @@
#include <asm/time.h>
#include <asm/wbflush.h>
#include <boot_param.h>
+#include <loongson.h>
static struct pglist_data prealloc__node_data[MAX_NUMNODES];
unsigned char __node_distances[MAX_NUMNODES][MAX_NUMNODES];
@@ -81,57 +82,6 @@ static void __init init_topology_matrix(void)
}
}
-static void __init szmem(unsigned int node)
-{
- u32 i, mem_type;
- static unsigned long num_physpages;
- u64 node_id, node_psize, start_pfn, end_pfn, mem_start, mem_size;
-
- /* Parse memory information and activate */
- for (i = 0; i < loongson_memmap->nr_map; i++) {
- node_id = loongson_memmap->map[i].node_id;
- if (node_id != node)
- continue;
-
- mem_type = loongson_memmap->map[i].mem_type;
- mem_size = loongson_memmap->map[i].mem_size;
- mem_start = loongson_memmap->map[i].mem_start;
-
- switch (mem_type) {
- case SYSTEM_RAM_LOW:
- start_pfn = ((node_id << 44) + mem_start) >> PAGE_SHIFT;
- node_psize = (mem_size << 20) >> PAGE_SHIFT;
- end_pfn = start_pfn + node_psize;
- num_physpages += node_psize;
- pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx MB\n",
- (u32)node_id, mem_type, mem_start, mem_size);
- pr_info(" start_pfn:0x%llx, end_pfn:0x%llx, num_physpages:0x%lx\n",
- start_pfn, end_pfn, num_physpages);
- memblock_add_node(PFN_PHYS(start_pfn),
- PFN_PHYS(node_psize), node);
- break;
- case SYSTEM_RAM_HIGH:
- start_pfn = ((node_id << 44) + mem_start) >> PAGE_SHIFT;
- node_psize = (mem_size << 20) >> PAGE_SHIFT;
- end_pfn = start_pfn + node_psize;
- num_physpages += node_psize;
- pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx MB\n",
- (u32)node_id, mem_type, mem_start, mem_size);
- pr_info(" start_pfn:0x%llx, end_pfn:0x%llx, num_physpages:0x%lx\n",
- start_pfn, end_pfn, num_physpages);
- memblock_add_node(PFN_PHYS(start_pfn),
- PFN_PHYS(node_psize), node);
- break;
- case SYSTEM_RAM_RESERVED:
- pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx MB\n",
- (u32)node_id, mem_type, mem_start, mem_size);
- memblock_reserve(((node_id << 44) + mem_start),
- mem_size << 20);
- break;
- }
- }
-}
-
static void __init node_mem_init(unsigned int node)
{
unsigned long node_addrspace_offset;
diff --git a/arch/mips/loongson64/smp.c b/arch/mips/loongson64/smp.c
index b8c1fc3158fd..6acde65f601b 100644
--- a/arch/mips/loongson64/smp.c
+++ b/arch/mips/loongson64/smp.c
@@ -483,7 +483,8 @@ static void __init loongson3_smp_setup(void)
init_cpu_possible(cpu_none_mask);
/* For unified kernel, NR_CPUS is the maximum possible value,
- * loongson_sysconf.nr_cpus is the really present value */
+ * loongson_sysconf.nr_cpus is the really present value
+ */
while (i < loongson_sysconf.nr_cpus) {
if (loongson_sysconf.reserved_cpus_mask & (1<<i)) {
/* Reserved physical CPU cores */
@@ -492,6 +493,8 @@ static void __init loongson3_smp_setup(void)
__cpu_number_map[i] = num;
__cpu_logical_map[num] = i;
set_cpu_possible(num, true);
+ /* Loongson processors are always grouped by 4 */
+ cpu_set_cluster(&cpu_data[num], i / 4);
num++;
}
i++;
@@ -567,7 +570,8 @@ static void loongson3_cpu_die(unsigned int cpu)
/* To shutdown a core in Loongson 3, the target core should go to CKSEG1 and
* flush all L1 entries at first. Then, another core (usually Core 0) can
* safely disable the clock of the target core. loongson3_play_dead() is
- * called via CKSEG1 (uncached and unmmaped) */
+ * called via CKSEG1 (uncached and unmmaped)
+ */
static void loongson3_type1_play_dead(int *state_addr)
{
register int val;
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 4f976d687ab0..74b09e801c3a 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -19,6 +19,7 @@
#include <linux/mm.h>
#include <linux/export.h>
#include <linux/bitops.h>
+#include <linux/dma-map-ops.h> /* for dma_default_coherent */
#include <asm/bcache.h>
#include <asm/bootinfo.h>
@@ -35,7 +36,6 @@
#include <asm/war.h>
#include <asm/cacheflush.h> /* for run_uncached() */
#include <asm/traps.h>
-#include <asm/dma-coherence.h>
#include <asm/mips-cps.h>
/*
@@ -1164,6 +1164,7 @@ static void probe_pcache(void)
case CPU_R4400PC:
case CPU_R4400SC:
case CPU_R4400MC:
+ case CPU_R4300:
icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
c->icache.ways = 1;
@@ -1593,7 +1594,7 @@ static int probe_scache(void)
return 1;
}
-static void __init loongson2_sc_init(void)
+static void loongson2_sc_init(void)
{
struct cpuinfo_mips *c = &current_cpu_data;
@@ -1913,15 +1914,11 @@ void r4k_cache_init(void)
__local_flush_icache_user_range = local_r4k_flush_icache_user_range;
#ifdef CONFIG_DMA_NONCOHERENT
-#ifdef CONFIG_DMA_MAYBE_COHERENT
- if (coherentio == IO_COHERENCE_ENABLED ||
- (coherentio == IO_COHERENCE_DEFAULT && hw_coherentio)) {
+ if (dma_default_coherent) {
_dma_cache_wback_inv = (void *)cache_noop;
_dma_cache_wback = (void *)cache_noop;
_dma_cache_inv = (void *)cache_noop;
- } else
-#endif /* CONFIG_DMA_MAYBE_COHERENT */
- {
+ } else {
_dma_cache_wback_inv = r4k_dma_cache_wback_inv;
_dma_cache_wback = r4k_dma_cache_wback_inv;
_dma_cache_inv = r4k_dma_cache_inv;
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index 23b16bfd97b2..7719d632df8d 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -21,6 +21,7 @@
#include <asm/cpu.h>
#include <asm/cpu-features.h>
#include <asm/setup.h>
+#include <asm/pgtable.h>
/* Cache operations. */
void (*flush_cache_all)(void);
@@ -156,29 +157,31 @@ unsigned long _page_cachable_default;
EXPORT_SYMBOL(_page_cachable_default);
#define PM(p) __pgprot(_page_cachable_default | (p))
+#define PVA(p) PM(_PAGE_VALID | _PAGE_ACCESSED | (p))
static inline void setup_protection_map(void)
{
protection_map[0] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
- protection_map[1] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
- protection_map[2] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
- protection_map[3] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
- protection_map[4] = PM(_PAGE_PRESENT);
- protection_map[5] = PM(_PAGE_PRESENT);
- protection_map[6] = PM(_PAGE_PRESENT);
- protection_map[7] = PM(_PAGE_PRESENT);
+ protection_map[1] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC);
+ protection_map[2] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
+ protection_map[3] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC);
+ protection_map[4] = PVA(_PAGE_PRESENT);
+ protection_map[5] = PVA(_PAGE_PRESENT);
+ protection_map[6] = PVA(_PAGE_PRESENT);
+ protection_map[7] = PVA(_PAGE_PRESENT);
protection_map[8] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
- protection_map[9] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
- protection_map[10] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE |
+ protection_map[9] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC);
+ protection_map[10] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE |
_PAGE_NO_READ);
- protection_map[11] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
- protection_map[12] = PM(_PAGE_PRESENT);
- protection_map[13] = PM(_PAGE_PRESENT);
- protection_map[14] = PM(_PAGE_PRESENT | _PAGE_WRITE);
- protection_map[15] = PM(_PAGE_PRESENT | _PAGE_WRITE);
+ protection_map[11] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
+ protection_map[12] = PVA(_PAGE_PRESENT);
+ protection_map[13] = PVA(_PAGE_PRESENT);
+ protection_map[14] = PVA(_PAGE_PRESENT);
+ protection_map[15] = PVA(_PAGE_PRESENT);
}
+#undef _PVA
#undef PM
void cpu_cache_init(void)
@@ -207,11 +210,3 @@ void cpu_cache_init(void)
setup_protection_map();
}
-
-int __weak __uncached_access(struct file *file, unsigned long addr)
-{
- if (file->f_flags & O_DSYNC)
- return 1;
-
- return addr >= __pa(high_memory);
-}
diff --git a/arch/mips/mm/dma-noncoherent.c b/arch/mips/mm/dma-noncoherent.c
index 38d3d9143b47..212f3ce75a6b 100644
--- a/arch/mips/mm/dma-noncoherent.c
+++ b/arch/mips/mm/dma-noncoherent.c
@@ -10,7 +10,6 @@
#include <asm/cache.h>
#include <asm/cpu-type.h>
-#include <asm/dma-coherence.h>
#include <asm/io.h>
/*
@@ -136,7 +135,7 @@ void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
}
#endif
-#ifdef CONFIG_DMA_PERDEV_COHERENT
+#ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct iommu_ops *iommu, bool coherent)
{
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index 7c871b14e74a..e7abda9c013f 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -156,8 +156,11 @@ good_area:
*/
fault = handle_mm_fault(vma, address, flags, regs);
- if (fault_signal_pending(fault, regs))
+ if (fault_signal_pending(fault, regs)) {
+ if (!user_mode(regs))
+ goto no_context;
return;
+ }
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index bc80893e5c0f..5cb73bf74a8b 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -495,6 +495,11 @@ void free_init_pages(const char *what, unsigned long begin, unsigned long end)
void (*free_init_pages_eva)(void *begin, void *end) = NULL;
+void __weak __init prom_free_prom_memory(void)
+{
+ /* nothing to do */
+}
+
void __ref free_initmem(void)
{
prom_free_prom_memory();
diff --git a/arch/mips/mm/pgtable-32.c b/arch/mips/mm/pgtable-32.c
index bd4b0656add3..61891af25019 100644
--- a/arch/mips/mm/pgtable-32.c
+++ b/arch/mips/mm/pgtable-32.c
@@ -45,7 +45,6 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd)
{
*pmdp = pmd;
- flush_tlb_all();
}
#endif /* defined(CONFIG_TRANSPARENT_HUGEPAGE) */
diff --git a/arch/mips/mm/pgtable-64.c b/arch/mips/mm/pgtable-64.c
index 183ff9f9c026..7536f7804c44 100644
--- a/arch/mips/mm/pgtable-64.c
+++ b/arch/mips/mm/pgtable-64.c
@@ -100,7 +100,6 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd)
{
*pmdp = pmd;
- flush_tlb_all();
}
void __init pagetable_init(void)
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index a7521b8f7658..0fb1db8a8ef7 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -549,6 +549,7 @@ void build_tlb_write_entry(u32 **p, struct uasm_label **l,
tlbw(p);
break;
+ case CPU_R4300:
case CPU_5KC:
case CPU_TX49XX:
case CPU_PR4450:
diff --git a/arch/mips/mti-malta/malta-init.c b/arch/mips/mti-malta/malta-init.c
index 893af377aacc..b03cac5fdc02 100644
--- a/arch/mips/mti-malta/malta-init.c
+++ b/arch/mips/mti-malta/malta-init.c
@@ -90,7 +90,6 @@ static void __init console_config(void)
static void __init mips_nmi_setup(void)
{
void *base;
- extern char except_vec_nmi[];
base = cpu_has_veic ?
(void *)(CAC_BASE + 0xa80) :
diff --git a/arch/mips/mti-malta/malta-memory.c b/arch/mips/mti-malta/malta-memory.c
index 7c25a0a2345c..952018812885 100644
--- a/arch/mips/mti-malta/malta-memory.c
+++ b/arch/mips/mti-malta/malta-memory.c
@@ -37,10 +37,6 @@ void __init fw_meminit(void)
free_init_pages_eva = eva ? free_init_pages_eva_malta : NULL;
}
-void __init prom_free_prom_memory(void)
-{
-}
-
phys_addr_t mips_cdmm_phys_base(void)
{
/* This address is "typically unused" */
diff --git a/arch/mips/mti-malta/malta-setup.c b/arch/mips/mti-malta/malta-setup.c
index e1fb8b534944..21cb3ac1237b 100644
--- a/arch/mips/mti-malta/malta-setup.c
+++ b/arch/mips/mti-malta/malta-setup.c
@@ -13,8 +13,8 @@
#include <linux/pci.h>
#include <linux/screen_info.h>
#include <linux/time.h>
+#include <linux/dma-map-ops.h> /* for dma_default_coherent */
-#include <asm/dma-coherence.h>
#include <asm/fw/fw.h>
#include <asm/mips-cps.h>
#include <asm/mips-boards/generic.h>
@@ -90,16 +90,15 @@ static void __init fd_activate(void)
}
#endif
-static int __init plat_enable_iocoherency(void)
+static void __init plat_setup_iocoherency(void)
{
- int supported = 0;
u32 cfg;
if (mips_revision_sconid == MIPS_REVISION_SCON_BONITO) {
if (BONITO_PCICACHECTRL & BONITO_PCICACHECTRL_CPUCOH_PRES) {
BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_CPUCOH_EN;
pr_info("Enabled Bonito CPU coherency\n");
- supported = 1;
+ dma_default_coherent = true;
}
if (strstr(fw_getcmdline(), "iobcuncached")) {
BONITO_PCICACHECTRL &= ~BONITO_PCICACHECTRL_IOBCCOH_EN;
@@ -118,29 +117,16 @@ static int __init plat_enable_iocoherency(void)
/* Nothing special needs to be done to enable coherency */
pr_info("CMP IOCU detected\n");
cfg = __raw_readl((u32 *)CKSEG1ADDR(ROCIT_CONFIG_GEN0));
- if (!(cfg & ROCIT_CONFIG_GEN0_PCI_IOCU)) {
+ if (cfg & ROCIT_CONFIG_GEN0_PCI_IOCU)
+ dma_default_coherent = true;
+ else
pr_crit("IOCU OPERATION DISABLED BY SWITCH - DEFAULTING TO SW IO COHERENCY\n");
- return 0;
- }
- supported = 1;
}
- hw_coherentio = supported;
- return supported;
-}
-static void __init plat_setup_iocoherency(void)
-{
- if (plat_enable_iocoherency()) {
- if (coherentio == IO_COHERENCE_DISABLED)
- pr_info("Hardware DMA cache coherency disabled\n");
- else
- pr_info("Hardware DMA cache coherency enabled\n");
- } else {
- if (coherentio == IO_COHERENCE_ENABLED)
- pr_info("Hardware DMA cache coherency unsupported, but enabled from command line!\n");
- else
- pr_info("Software DMA cache coherency enabled\n");
- }
+ if (dma_default_coherent)
+ pr_info("Hardware DMA cache coherency enabled\n");
+ else
+ pr_info("Software DMA cache coherency enabled\n");
}
static void __init pci_clock_check(void)
diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c
index 7efcfe0c9cd4..567720374d57 100644
--- a/arch/mips/mti-malta/malta-time.c
+++ b/arch/mips/mti-malta/malta-time.c
@@ -138,7 +138,7 @@ int get_c0_fdc_int(void)
case CPU_INTERAPTIV:
case CPU_PROAPTIV:
return -1;
- };
+ }
if (cpu_has_veic)
return -1;
diff --git a/arch/mips/n64/Makefile b/arch/mips/n64/Makefile
new file mode 100644
index 000000000000..b64a05ae218e
--- /dev/null
+++ b/arch/mips/n64/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Nintendo 64
+#
+
+obj-y := init.o irq.o
diff --git a/arch/mips/n64/Platform b/arch/mips/n64/Platform
new file mode 100644
index 000000000000..24647831356c
--- /dev/null
+++ b/arch/mips/n64/Platform
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Nintendo 64
+#
+
+cflags-$(CONFIG_MACH_NINTENDO64) += -I$(srctree)/arch/mips/include/asm/mach-n64
+load-$(CONFIG_MACH_NINTENDO64) += 0xffffffff80101000
diff --git a/arch/mips/n64/init.c b/arch/mips/n64/init.c
new file mode 100644
index 000000000000..dfbd864f4667
--- /dev/null
+++ b/arch/mips/n64/init.c
@@ -0,0 +1,164 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Nintendo 64 init.
+ *
+ * Copyright (C) 2021 Lauri Kasanen
+ */
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/irq.h>
+#include <linux/memblock.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/simplefb.h>
+#include <linux/string.h>
+
+#include <asm/bootinfo.h>
+#include <asm/fw/fw.h>
+#include <asm/time.h>
+
+#define IO_MEM_RESOURCE_START 0UL
+#define IO_MEM_RESOURCE_END 0x1fffffffUL
+
+/*
+ * System-specifc irq names for clarity
+ */
+#define MIPS_CPU_IRQ(x) (MIPS_CPU_IRQ_BASE + (x))
+#define MIPS_SOFTINT0_IRQ MIPS_CPU_IRQ(0)
+#define MIPS_SOFTINT1_IRQ MIPS_CPU_IRQ(1)
+#define RCP_IRQ MIPS_CPU_IRQ(2)
+#define CART_IRQ MIPS_CPU_IRQ(3)
+#define PRENMI_IRQ MIPS_CPU_IRQ(4)
+#define RDBR_IRQ MIPS_CPU_IRQ(5)
+#define RDBW_IRQ MIPS_CPU_IRQ(6)
+#define TIMER_IRQ MIPS_CPU_IRQ(7)
+
+static void __init iomem_resource_init(void)
+{
+ iomem_resource.start = IO_MEM_RESOURCE_START;
+ iomem_resource.end = IO_MEM_RESOURCE_END;
+}
+
+const char *get_system_type(void)
+{
+ return "Nintendo 64";
+}
+
+void __init prom_init(void)
+{
+ fw_init_cmdline();
+}
+
+#define W 320
+#define H 240
+#define REG_BASE ((u32 *) CKSEG1ADDR(0x4400000))
+
+static void __init n64rdp_write_reg(const u8 reg, const u32 value)
+{
+ __raw_writel(value, REG_BASE + reg);
+}
+
+#undef REG_BASE
+
+static const u32 ntsc_320[] __initconst = {
+ 0x00013212, 0x00000000, 0x00000140, 0x00000200,
+ 0x00000000, 0x03e52239, 0x0000020d, 0x00000c15,
+ 0x0c150c15, 0x006c02ec, 0x002501ff, 0x000e0204,
+ 0x00000200, 0x00000400
+};
+
+#define MI_REG_BASE 0x4300000
+#define NUM_MI_REGS 4
+#define AI_REG_BASE 0x4500000
+#define NUM_AI_REGS 6
+#define PI_REG_BASE 0x4600000
+#define NUM_PI_REGS 5
+#define SI_REG_BASE 0x4800000
+#define NUM_SI_REGS 7
+
+static int __init n64_platform_init(void)
+{
+ static const char simplefb_resname[] = "FB";
+ static const struct simplefb_platform_data mode = {
+ .width = W,
+ .height = H,
+ .stride = W * 2,
+ .format = "r5g5b5a1"
+ };
+ struct resource res[3];
+ void *orig;
+ unsigned long phys;
+ unsigned i;
+
+ memset(res, 0, sizeof(struct resource) * 3);
+ res[0].flags = IORESOURCE_MEM;
+ res[0].start = MI_REG_BASE;
+ res[0].end = MI_REG_BASE + NUM_MI_REGS * 4 - 1;
+
+ res[1].flags = IORESOURCE_MEM;
+ res[1].start = AI_REG_BASE;
+ res[1].end = AI_REG_BASE + NUM_AI_REGS * 4 - 1;
+
+ res[2].flags = IORESOURCE_IRQ;
+ res[2].start = RCP_IRQ;
+ res[2].end = RCP_IRQ;
+
+ platform_device_register_simple("n64audio", -1, res, 3);
+
+ memset(&res[0], 0, sizeof(res[0]));
+ res[0].flags = IORESOURCE_MEM;
+ res[0].start = PI_REG_BASE;
+ res[0].end = PI_REG_BASE + NUM_PI_REGS * 4 - 1;
+
+ platform_device_register_simple("n64cart", -1, res, 1);
+
+ memset(&res[0], 0, sizeof(res[0]));
+ res[0].flags = IORESOURCE_MEM;
+ res[0].start = SI_REG_BASE;
+ res[0].end = SI_REG_BASE + NUM_SI_REGS * 4 - 1;
+
+ platform_device_register_simple("n64joy", -1, res, 1);
+
+ /* The framebuffer needs 64-byte alignment */
+ orig = kzalloc(W * H * 2 + 63, GFP_DMA | GFP_KERNEL);
+ if (!orig)
+ return -ENOMEM;
+ phys = virt_to_phys(orig);
+ phys += 63;
+ phys &= ~63;
+
+ for (i = 0; i < ARRAY_SIZE(ntsc_320); i++) {
+ if (i == 1)
+ n64rdp_write_reg(i, phys);
+ else
+ n64rdp_write_reg(i, ntsc_320[i]);
+ }
+
+ /* setup IORESOURCE_MEM as framebuffer memory */
+ memset(&res[0], 0, sizeof(res[0]));
+ res[0].flags = IORESOURCE_MEM;
+ res[0].name = simplefb_resname;
+ res[0].start = phys;
+ res[0].end = phys + W * H * 2 - 1;
+
+ platform_device_register_resndata(NULL, "simple-framebuffer", 0,
+ &res[0], 1, &mode, sizeof(mode));
+
+ return 0;
+}
+
+#undef W
+#undef H
+
+arch_initcall(n64_platform_init);
+
+void __init plat_mem_setup(void)
+{
+ iomem_resource_init();
+ memblock_add(0x0, 8 * 1024 * 1024); /* Bootloader blocks the 4mb config */
+}
+
+void __init plat_time_init(void)
+{
+ /* 93.75 MHz cpu, count register runs at half rate */
+ mips_hpt_frequency = 93750000 / 2;
+}
diff --git a/arch/mips/n64/irq.c b/arch/mips/n64/irq.c
new file mode 100644
index 000000000000..1861e962db42
--- /dev/null
+++ b/arch/mips/n64/irq.c
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * N64 IRQ
+ *
+ * Copyright (C) 2021 Lauri Kasanen
+ */
+#include <linux/export.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+
+#include <asm/irq_cpu.h>
+
+void __init arch_init_irq(void)
+{
+ mips_cpu_irq_init();
+}
diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c
index 561154cbcc40..939dd06764bc 100644
--- a/arch/mips/net/ebpf_jit.c
+++ b/arch/mips/net/ebpf_jit.c
@@ -1423,8 +1423,8 @@ jeq_common:
case BPF_STX | BPF_H | BPF_MEM:
case BPF_STX | BPF_W | BPF_MEM:
case BPF_STX | BPF_DW | BPF_MEM:
- case BPF_STX | BPF_W | BPF_XADD:
- case BPF_STX | BPF_DW | BPF_XADD:
+ case BPF_STX | BPF_W | BPF_ATOMIC:
+ case BPF_STX | BPF_DW | BPF_ATOMIC:
if (insn->dst_reg == BPF_REG_10) {
ctx->flags |= EBPF_SEEN_FP;
dst = MIPS_R_SP;
@@ -1438,7 +1438,12 @@ jeq_common:
src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
if (src < 0)
return src;
- if (BPF_MODE(insn->code) == BPF_XADD) {
+ if (BPF_MODE(insn->code) == BPF_ATOMIC) {
+ if (insn->imm != BPF_ADD) {
+ pr_err("ATOMIC OP %02x NOT HANDLED\n", insn->imm);
+ return -EINVAL;
+ }
+
/*
* If mem_off does not fit within the 9 bit ll/sc
* instruction immediate field, use a temp reg.
diff --git a/arch/mips/netlogic/xlp/setup.c b/arch/mips/netlogic/xlp/setup.c
index 9adc0c1b4ffc..9fbaa1e5b340 100644
--- a/arch/mips/netlogic/xlp/setup.c
+++ b/arch/mips/netlogic/xlp/setup.c
@@ -130,11 +130,6 @@ const char *get_system_type(void)
}
}
-void __init prom_free_prom_memory(void)
-{
- /* Nothing yet */
-}
-
void xlp_mmu_init(void)
{
u32 conf4;
diff --git a/arch/mips/netlogic/xlr/setup.c b/arch/mips/netlogic/xlr/setup.c
index 627e88101316..aa83d691df0f 100644
--- a/arch/mips/netlogic/xlr/setup.c
+++ b/arch/mips/netlogic/xlr/setup.c
@@ -89,11 +89,6 @@ unsigned int nlm_get_cpu_frequency(void)
return (unsigned int)nlm_prom_info.cpu_frequency;
}
-void __init prom_free_prom_memory(void)
-{
- /* Nothing yet */
-}
-
void nlm_percpu_init(int hwcpuid)
{
if (hwcpuid % 4 == 0)
diff --git a/arch/mips/oprofile/Makefile b/arch/mips/oprofile/Makefile
deleted file mode 100644
index e10f216d0422..000000000000
--- a/arch/mips/oprofile/Makefile
+++ /dev/null
@@ -1,18 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_OPROFILE) += oprofile.o
-
-DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
- oprof.o cpu_buffer.o buffer_sync.o \
- event_buffer.o oprofile_files.o \
- oprofilefs.o oprofile_stats.o \
- timer_int.o )
-
-oprofile-y := $(DRIVER_OBJS) common.o backtrace.o
-
-oprofile-$(CONFIG_CPU_MIPS32) += op_model_mipsxx.o
-oprofile-$(CONFIG_CPU_MIPS64) += op_model_mipsxx.o
-oprofile-$(CONFIG_CPU_R10000) += op_model_mipsxx.o
-oprofile-$(CONFIG_CPU_SB1) += op_model_mipsxx.o
-oprofile-$(CONFIG_CPU_XLR) += op_model_mipsxx.o
-oprofile-$(CONFIG_CPU_LOONGSON2EF) += op_model_loongson2.o
-oprofile-$(CONFIG_CPU_LOONGSON64) += op_model_loongson3.o
diff --git a/arch/mips/oprofile/backtrace.c b/arch/mips/oprofile/backtrace.c
deleted file mode 100644
index 07d98ba7f49e..000000000000
--- a/arch/mips/oprofile/backtrace.c
+++ /dev/null
@@ -1,177 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/oprofile.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/uaccess.h>
-#include <asm/ptrace.h>
-#include <asm/stacktrace.h>
-#include <linux/stacktrace.h>
-#include <linux/kernel.h>
-#include <asm/sections.h>
-#include <asm/inst.h>
-
-struct stackframe {
- unsigned long sp;
- unsigned long pc;
- unsigned long ra;
-};
-
-static inline int get_mem(unsigned long addr, unsigned long *result)
-{
- unsigned long *address = (unsigned long *) addr;
- if (!access_ok(address, sizeof(unsigned long)))
- return -1;
- if (__copy_from_user_inatomic(result, address, sizeof(unsigned long)))
- return -3;
- return 0;
-}
-
-/*
- * These two instruction helpers were taken from process.c
- */
-static inline int is_ra_save_ins(union mips_instruction *ip)
-{
- /* sw / sd $ra, offset($sp) */
- return (ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op)
- && ip->i_format.rs == 29 && ip->i_format.rt == 31;
-}
-
-static inline int is_sp_move_ins(union mips_instruction *ip)
-{
- /* addiu/daddiu sp,sp,-imm */
- if (ip->i_format.rs != 29 || ip->i_format.rt != 29)
- return 0;
- if (ip->i_format.opcode == addiu_op || ip->i_format.opcode == daddiu_op)
- return 1;
- return 0;
-}
-
-/*
- * Looks for specific instructions that mark the end of a function.
- * This usually means we ran into the code area of the previous function.
- */
-static inline int is_end_of_function_marker(union mips_instruction *ip)
-{
- /* jr ra */
- if (ip->r_format.func == jr_op && ip->r_format.rs == 31)
- return 1;
- /* lui gp */
- if (ip->i_format.opcode == lui_op && ip->i_format.rt == 28)
- return 1;
- return 0;
-}
-
-/*
- * TODO for userspace stack unwinding:
- * - handle cases where the stack is adjusted inside a function
- * (generally doesn't happen)
- * - find optimal value for max_instr_check
- * - try to find a better way to handle leaf functions
- */
-
-static inline int unwind_user_frame(struct stackframe *old_frame,
- const unsigned int max_instr_check)
-{
- struct stackframe new_frame = *old_frame;
- off_t ra_offset = 0;
- size_t stack_size = 0;
- unsigned long addr;
-
- if (old_frame->pc == 0 || old_frame->sp == 0 || old_frame->ra == 0)
- return -9;
-
- for (addr = new_frame.pc; (addr + max_instr_check > new_frame.pc)
- && (!ra_offset || !stack_size); --addr) {
- union mips_instruction ip;
-
- if (get_mem(addr, (unsigned long *) &ip))
- return -11;
-
- if (is_sp_move_ins(&ip)) {
- int stack_adjustment = ip.i_format.simmediate;
- if (stack_adjustment > 0)
- /* This marks the end of the previous function,
- which means we overran. */
- break;
- stack_size = (unsigned long) stack_adjustment;
- } else if (is_ra_save_ins(&ip)) {
- int ra_slot = ip.i_format.simmediate;
- if (ra_slot < 0)
- /* This shouldn't happen. */
- break;
- ra_offset = ra_slot;
- } else if (is_end_of_function_marker(&ip))
- break;
- }
-
- if (!ra_offset || !stack_size)
- goto done;
-
- if (ra_offset) {
- new_frame.ra = old_frame->sp + ra_offset;
- if (get_mem(new_frame.ra, &(new_frame.ra)))
- return -13;
- }
-
- if (stack_size) {
- new_frame.sp = old_frame->sp + stack_size;
- if (get_mem(new_frame.sp, &(new_frame.sp)))
- return -14;
- }
-
- if (new_frame.sp > old_frame->sp)
- return -2;
-
-done:
- new_frame.pc = old_frame->ra;
- *old_frame = new_frame;
-
- return 0;
-}
-
-static inline void do_user_backtrace(unsigned long low_addr,
- struct stackframe *frame,
- unsigned int depth)
-{
- const unsigned int max_instr_check = 512;
- const unsigned long high_addr = low_addr + THREAD_SIZE;
-
- while (depth-- && !unwind_user_frame(frame, max_instr_check)) {
- oprofile_add_trace(frame->ra);
- if (frame->sp < low_addr || frame->sp > high_addr)
- break;
- }
-}
-
-#ifndef CONFIG_KALLSYMS
-static inline void do_kernel_backtrace(unsigned long low_addr,
- struct stackframe *frame,
- unsigned int depth) { }
-#else
-static inline void do_kernel_backtrace(unsigned long low_addr,
- struct stackframe *frame,
- unsigned int depth)
-{
- while (depth-- && frame->pc) {
- frame->pc = unwind_stack_by_address(low_addr,
- &(frame->sp),
- frame->pc,
- &(frame->ra));
- oprofile_add_trace(frame->ra);
- }
-}
-#endif
-
-void notrace op_mips_backtrace(struct pt_regs *const regs, unsigned int depth)
-{
- struct stackframe frame = { .sp = regs->regs[29],
- .pc = regs->cp0_epc,
- .ra = regs->regs[31] };
- const int userspace = user_mode(regs);
- const unsigned long low_addr = ALIGN(frame.sp, THREAD_SIZE);
-
- if (userspace)
- do_user_backtrace(low_addr, &frame, depth);
- else
- do_kernel_backtrace(low_addr, &frame, depth);
-}
diff --git a/arch/mips/oprofile/common.c b/arch/mips/oprofile/common.c
deleted file mode 100644
index d3996c4c6440..000000000000
--- a/arch/mips/oprofile/common.c
+++ /dev/null
@@ -1,147 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2004, 2005 Ralf Baechle
- * Copyright (C) 2005 MIPS Technologies, Inc.
- */
-#include <linux/compiler.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/oprofile.h>
-#include <linux/smp.h>
-#include <asm/cpu-info.h>
-#include <asm/cpu-type.h>
-
-#include "op_impl.h"
-
-extern struct op_mips_model op_model_mipsxx_ops __weak;
-extern struct op_mips_model op_model_loongson2_ops __weak;
-extern struct op_mips_model op_model_loongson3_ops __weak;
-
-static struct op_mips_model *model;
-
-static struct op_counter_config ctr[20];
-
-static int op_mips_setup(void)
-{
- /* Pre-compute the values to stuff in the hardware registers. */
- model->reg_setup(ctr);
-
- /* Configure the registers on all cpus. */
- on_each_cpu(model->cpu_setup, NULL, 1);
-
- return 0;
-}
-
-static int op_mips_create_files(struct dentry *root)
-{
- int i;
-
- for (i = 0; i < model->num_counters; ++i) {
- struct dentry *dir;
- char buf[4];
-
- snprintf(buf, sizeof buf, "%d", i);
- dir = oprofilefs_mkdir(root, buf);
-
- oprofilefs_create_ulong(dir, "enabled", &ctr[i].enabled);
- oprofilefs_create_ulong(dir, "event", &ctr[i].event);
- oprofilefs_create_ulong(dir, "count", &ctr[i].count);
- oprofilefs_create_ulong(dir, "kernel", &ctr[i].kernel);
- oprofilefs_create_ulong(dir, "user", &ctr[i].user);
- oprofilefs_create_ulong(dir, "exl", &ctr[i].exl);
- /* Dummy. */
- oprofilefs_create_ulong(dir, "unit_mask", &ctr[i].unit_mask);
- }
-
- return 0;
-}
-
-static int op_mips_start(void)
-{
- on_each_cpu(model->cpu_start, NULL, 1);
-
- return 0;
-}
-
-static void op_mips_stop(void)
-{
- /* Disable performance monitoring for all counters. */
- on_each_cpu(model->cpu_stop, NULL, 1);
-}
-
-int __init oprofile_arch_init(struct oprofile_operations *ops)
-{
- struct op_mips_model *lmodel = NULL;
- int res;
-
- switch (boot_cpu_type()) {
- case CPU_5KC:
- case CPU_M14KC:
- case CPU_M14KEC:
- case CPU_20KC:
- case CPU_24K:
- case CPU_25KF:
- case CPU_34K:
- case CPU_1004K:
- case CPU_74K:
- case CPU_1074K:
- case CPU_INTERAPTIV:
- case CPU_PROAPTIV:
- case CPU_P5600:
- case CPU_I6400:
- case CPU_M5150:
- case CPU_LOONGSON32:
- case CPU_SB1:
- case CPU_SB1A:
- case CPU_R10000:
- case CPU_R12000:
- case CPU_R14000:
- case CPU_R16000:
- case CPU_XLR:
- lmodel = &op_model_mipsxx_ops;
- break;
-
- case CPU_LOONGSON2EF:
- lmodel = &op_model_loongson2_ops;
- break;
- case CPU_LOONGSON64:
- lmodel = &op_model_loongson3_ops;
- break;
- }
-
- /*
- * Always set the backtrace. This allows unsupported CPU types to still
- * use timer-based oprofile.
- */
- ops->backtrace = op_mips_backtrace;
-
- if (!lmodel)
- return -ENODEV;
-
- res = lmodel->init();
- if (res)
- return res;
-
- model = lmodel;
-
- ops->create_files = op_mips_create_files;
- ops->setup = op_mips_setup;
- //ops->shutdown = op_mips_shutdown;
- ops->start = op_mips_start;
- ops->stop = op_mips_stop;
- ops->cpu_type = lmodel->cpu_type;
-
- printk(KERN_INFO "oprofile: using %s performance monitoring.\n",
- lmodel->cpu_type);
-
- return 0;
-}
-
-void oprofile_arch_exit(void)
-{
- if (model)
- model->exit();
-}
diff --git a/arch/mips/oprofile/op_impl.h b/arch/mips/oprofile/op_impl.h
deleted file mode 100644
index a4e758a39af4..000000000000
--- a/arch/mips/oprofile/op_impl.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/**
- * @file arch/alpha/oprofile/op_impl.h
- *
- * @remark Copyright 2002 OProfile authors
- * @remark Read the file COPYING
- *
- * @author Richard Henderson <rth@twiddle.net>
- */
-
-#ifndef OP_IMPL_H
-#define OP_IMPL_H 1
-
-extern int (*perf_irq)(void);
-
-/* Per-counter configuration as set via oprofilefs. */
-struct op_counter_config {
- unsigned long enabled;
- unsigned long event;
- unsigned long count;
- /* Dummies because I am too lazy to hack the userspace tools. */
- unsigned long kernel;
- unsigned long user;
- unsigned long exl;
- unsigned long unit_mask;
-};
-
-/* Per-architecture configure and hooks. */
-struct op_mips_model {
- void (*reg_setup) (struct op_counter_config *);
- void (*cpu_setup) (void *dummy);
- int (*init)(void);
- void (*exit)(void);
- void (*cpu_start)(void *args);
- void (*cpu_stop)(void *args);
- char *cpu_type;
- unsigned char num_counters;
-};
-
-void op_mips_backtrace(struct pt_regs * const regs, unsigned int depth);
-
-#endif
diff --git a/arch/mips/oprofile/op_model_loongson2.c b/arch/mips/oprofile/op_model_loongson2.c
deleted file mode 100644
index b249ec0bebb2..000000000000
--- a/arch/mips/oprofile/op_model_loongson2.c
+++ /dev/null
@@ -1,161 +0,0 @@
-/*
- * Loongson2 performance counter driver for oprofile
- *
- * Copyright (C) 2009 Lemote Inc.
- * Author: Yanhua <yanh@lemote.com>
- * Author: Wu Zhangjin <wuzhangjin@gmail.com>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#include <linux/init.h>
-#include <linux/oprofile.h>
-#include <linux/interrupt.h>
-
-#include <loongson.h> /* LOONGSON2_PERFCNT_IRQ */
-#include "op_impl.h"
-
-#define LOONGSON2_CPU_TYPE "mips/loongson2"
-
-#define LOONGSON2_PERFCNT_OVERFLOW (1ULL << 31)
-
-#define LOONGSON2_PERFCTRL_EXL (1UL << 0)
-#define LOONGSON2_PERFCTRL_KERNEL (1UL << 1)
-#define LOONGSON2_PERFCTRL_SUPERVISOR (1UL << 2)
-#define LOONGSON2_PERFCTRL_USER (1UL << 3)
-#define LOONGSON2_PERFCTRL_ENABLE (1UL << 4)
-#define LOONGSON2_PERFCTRL_EVENT(idx, event) \
- (((event) & 0x0f) << ((idx) ? 9 : 5))
-
-#define read_c0_perfctrl() __read_64bit_c0_register($24, 0)
-#define write_c0_perfctrl(val) __write_64bit_c0_register($24, 0, val)
-#define read_c0_perfcnt() __read_64bit_c0_register($25, 0)
-#define write_c0_perfcnt(val) __write_64bit_c0_register($25, 0, val)
-
-static struct loongson2_register_config {
- unsigned int ctrl;
- unsigned long long reset_counter1;
- unsigned long long reset_counter2;
- int cnt1_enabled, cnt2_enabled;
-} reg;
-
-static char *oprofid = "LoongsonPerf";
-static irqreturn_t loongson2_perfcount_handler(int irq, void *dev_id);
-
-static void reset_counters(void *arg)
-{
- write_c0_perfctrl(0);
- write_c0_perfcnt(0);
-}
-
-static void loongson2_reg_setup(struct op_counter_config *cfg)
-{
- unsigned int ctrl = 0;
-
- reg.reset_counter1 = 0;
- reg.reset_counter2 = 0;
-
- /*
- * Compute the performance counter ctrl word.
- * For now, count kernel and user mode.
- */
- if (cfg[0].enabled) {
- ctrl |= LOONGSON2_PERFCTRL_EVENT(0, cfg[0].event);
- reg.reset_counter1 = 0x80000000ULL - cfg[0].count;
- }
-
- if (cfg[1].enabled) {
- ctrl |= LOONGSON2_PERFCTRL_EVENT(1, cfg[1].event);
- reg.reset_counter2 = 0x80000000ULL - cfg[1].count;
- }
-
- if (cfg[0].enabled || cfg[1].enabled) {
- ctrl |= LOONGSON2_PERFCTRL_EXL | LOONGSON2_PERFCTRL_ENABLE;
- if (cfg[0].kernel || cfg[1].kernel)
- ctrl |= LOONGSON2_PERFCTRL_KERNEL;
- if (cfg[0].user || cfg[1].user)
- ctrl |= LOONGSON2_PERFCTRL_USER;
- }
-
- reg.ctrl = ctrl;
-
- reg.cnt1_enabled = cfg[0].enabled;
- reg.cnt2_enabled = cfg[1].enabled;
-}
-
-static void loongson2_cpu_setup(void *args)
-{
- write_c0_perfcnt((reg.reset_counter2 << 32) | reg.reset_counter1);
-}
-
-static void loongson2_cpu_start(void *args)
-{
- /* Start all counters on current CPU */
- if (reg.cnt1_enabled || reg.cnt2_enabled)
- write_c0_perfctrl(reg.ctrl);
-}
-
-static void loongson2_cpu_stop(void *args)
-{
- /* Stop all counters on current CPU */
- write_c0_perfctrl(0);
- memset(&reg, 0, sizeof(reg));
-}
-
-static irqreturn_t loongson2_perfcount_handler(int irq, void *dev_id)
-{
- uint64_t counter, counter1, counter2;
- struct pt_regs *regs = get_irq_regs();
- int enabled;
-
- /* Check whether the irq belongs to me */
- enabled = read_c0_perfctrl() & LOONGSON2_PERFCTRL_ENABLE;
- if (!enabled)
- return IRQ_NONE;
- enabled = reg.cnt1_enabled | reg.cnt2_enabled;
- if (!enabled)
- return IRQ_NONE;
-
- counter = read_c0_perfcnt();
- counter1 = counter & 0xffffffff;
- counter2 = counter >> 32;
-
- if (counter1 & LOONGSON2_PERFCNT_OVERFLOW) {
- if (reg.cnt1_enabled)
- oprofile_add_sample(regs, 0);
- counter1 = reg.reset_counter1;
- }
- if (counter2 & LOONGSON2_PERFCNT_OVERFLOW) {
- if (reg.cnt2_enabled)
- oprofile_add_sample(regs, 1);
- counter2 = reg.reset_counter2;
- }
-
- write_c0_perfcnt((counter2 << 32) | counter1);
-
- return IRQ_HANDLED;
-}
-
-static int __init loongson2_init(void)
-{
- return request_irq(LOONGSON2_PERFCNT_IRQ, loongson2_perfcount_handler,
- IRQF_SHARED, "Perfcounter", oprofid);
-}
-
-static void loongson2_exit(void)
-{
- reset_counters(NULL);
- free_irq(LOONGSON2_PERFCNT_IRQ, oprofid);
-}
-
-struct op_mips_model op_model_loongson2_ops = {
- .reg_setup = loongson2_reg_setup,
- .cpu_setup = loongson2_cpu_setup,
- .init = loongson2_init,
- .exit = loongson2_exit,
- .cpu_start = loongson2_cpu_start,
- .cpu_stop = loongson2_cpu_stop,
- .cpu_type = LOONGSON2_CPU_TYPE,
- .num_counters = 2
-};
diff --git a/arch/mips/oprofile/op_model_loongson3.c b/arch/mips/oprofile/op_model_loongson3.c
deleted file mode 100644
index 436b1fc99f2c..000000000000
--- a/arch/mips/oprofile/op_model_loongson3.c
+++ /dev/null
@@ -1,213 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- */
-#include <linux/init.h>
-#include <linux/cpu.h>
-#include <linux/smp.h>
-#include <linux/proc_fs.h>
-#include <linux/oprofile.h>
-#include <linux/spinlock.h>
-#include <linux/interrupt.h>
-#include <linux/uaccess.h>
-#include <irq.h>
-#include <loongson.h>
-#include "op_impl.h"
-
-#define LOONGSON3_PERFCNT_OVERFLOW (1ULL << 63)
-
-#define LOONGSON3_PERFCTRL_EXL (1UL << 0)
-#define LOONGSON3_PERFCTRL_KERNEL (1UL << 1)
-#define LOONGSON3_PERFCTRL_SUPERVISOR (1UL << 2)
-#define LOONGSON3_PERFCTRL_USER (1UL << 3)
-#define LOONGSON3_PERFCTRL_ENABLE (1UL << 4)
-#define LOONGSON3_PERFCTRL_W (1UL << 30)
-#define LOONGSON3_PERFCTRL_M (1UL << 31)
-#define LOONGSON3_PERFCTRL_EVENT(idx, event) \
- (((event) & (idx ? 0x0f : 0x3f)) << 5)
-
-/* Loongson-3 PerfCount performance counter1 register */
-#define read_c0_perflo1() __read_64bit_c0_register($25, 0)
-#define write_c0_perflo1(val) __write_64bit_c0_register($25, 0, val)
-#define read_c0_perfhi1() __read_64bit_c0_register($25, 1)
-#define write_c0_perfhi1(val) __write_64bit_c0_register($25, 1, val)
-
-/* Loongson-3 PerfCount performance counter2 register */
-#define read_c0_perflo2() __read_64bit_c0_register($25, 2)
-#define write_c0_perflo2(val) __write_64bit_c0_register($25, 2, val)
-#define read_c0_perfhi2() __read_64bit_c0_register($25, 3)
-#define write_c0_perfhi2(val) __write_64bit_c0_register($25, 3, val)
-
-static int (*save_perf_irq)(void);
-
-static struct loongson3_register_config {
- unsigned int control1;
- unsigned int control2;
- unsigned long long reset_counter1;
- unsigned long long reset_counter2;
- int ctr1_enable, ctr2_enable;
-} reg;
-
-static void reset_counters(void *arg)
-{
- write_c0_perfhi1(0);
- write_c0_perfhi2(0);
- write_c0_perflo1(0xc0000000);
- write_c0_perflo2(0x40000000);
-}
-
-/* Compute all of the registers in preparation for enabling profiling. */
-static void loongson3_reg_setup(struct op_counter_config *ctr)
-{
- unsigned int control1 = 0;
- unsigned int control2 = 0;
-
- reg.reset_counter1 = 0;
- reg.reset_counter2 = 0;
- /* Compute the performance counter control word. */
- /* For now count kernel and user mode */
- if (ctr[0].enabled) {
- control1 |= LOONGSON3_PERFCTRL_EVENT(0, ctr[0].event) |
- LOONGSON3_PERFCTRL_ENABLE;
- if (ctr[0].kernel)
- control1 |= LOONGSON3_PERFCTRL_KERNEL;
- if (ctr[0].user)
- control1 |= LOONGSON3_PERFCTRL_USER;
- reg.reset_counter1 = 0x8000000000000000ULL - ctr[0].count;
- }
-
- if (ctr[1].enabled) {
- control2 |= LOONGSON3_PERFCTRL_EVENT(1, ctr[1].event) |
- LOONGSON3_PERFCTRL_ENABLE;
- if (ctr[1].kernel)
- control2 |= LOONGSON3_PERFCTRL_KERNEL;
- if (ctr[1].user)
- control2 |= LOONGSON3_PERFCTRL_USER;
- reg.reset_counter2 = 0x8000000000000000ULL - ctr[1].count;
- }
-
- if (ctr[0].enabled)
- control1 |= LOONGSON3_PERFCTRL_EXL;
- if (ctr[1].enabled)
- control2 |= LOONGSON3_PERFCTRL_EXL;
-
- reg.control1 = control1;
- reg.control2 = control2;
- reg.ctr1_enable = ctr[0].enabled;
- reg.ctr2_enable = ctr[1].enabled;
-}
-
-/* Program all of the registers in preparation for enabling profiling. */
-static void loongson3_cpu_setup(void *args)
-{
- uint64_t perfcount1, perfcount2;
-
- perfcount1 = reg.reset_counter1;
- perfcount2 = reg.reset_counter2;
- write_c0_perfhi1(perfcount1);
- write_c0_perfhi2(perfcount2);
-}
-
-static void loongson3_cpu_start(void *args)
-{
- /* Start all counters on current CPU */
- reg.control1 |= (LOONGSON3_PERFCTRL_W|LOONGSON3_PERFCTRL_M);
- reg.control2 |= (LOONGSON3_PERFCTRL_W|LOONGSON3_PERFCTRL_M);
-
- if (reg.ctr1_enable)
- write_c0_perflo1(reg.control1);
- if (reg.ctr2_enable)
- write_c0_perflo2(reg.control2);
-}
-
-static void loongson3_cpu_stop(void *args)
-{
- /* Stop all counters on current CPU */
- write_c0_perflo1(0xc0000000);
- write_c0_perflo2(0x40000000);
- memset(&reg, 0, sizeof(reg));
-}
-
-static int loongson3_perfcount_handler(void)
-{
- unsigned long flags;
- uint64_t counter1, counter2;
- uint32_t cause, handled = IRQ_NONE;
- struct pt_regs *regs = get_irq_regs();
-
- cause = read_c0_cause();
- if (!(cause & CAUSEF_PCI))
- return handled;
-
- counter1 = read_c0_perfhi1();
- counter2 = read_c0_perfhi2();
-
- local_irq_save(flags);
-
- if (counter1 & LOONGSON3_PERFCNT_OVERFLOW) {
- if (reg.ctr1_enable)
- oprofile_add_sample(regs, 0);
- counter1 = reg.reset_counter1;
- }
- if (counter2 & LOONGSON3_PERFCNT_OVERFLOW) {
- if (reg.ctr2_enable)
- oprofile_add_sample(regs, 1);
- counter2 = reg.reset_counter2;
- }
-
- local_irq_restore(flags);
-
- write_c0_perfhi1(counter1);
- write_c0_perfhi2(counter2);
-
- if (!(cause & CAUSEF_TI))
- handled = IRQ_HANDLED;
-
- return handled;
-}
-
-static int loongson3_starting_cpu(unsigned int cpu)
-{
- write_c0_perflo1(reg.control1);
- write_c0_perflo2(reg.control2);
- return 0;
-}
-
-static int loongson3_dying_cpu(unsigned int cpu)
-{
- write_c0_perflo1(0xc0000000);
- write_c0_perflo2(0x40000000);
- return 0;
-}
-
-static int __init loongson3_init(void)
-{
- on_each_cpu(reset_counters, NULL, 1);
- cpuhp_setup_state_nocalls(CPUHP_AP_MIPS_OP_LOONGSON3_STARTING,
- "mips/oprofile/loongson3:starting",
- loongson3_starting_cpu, loongson3_dying_cpu);
- save_perf_irq = perf_irq;
- perf_irq = loongson3_perfcount_handler;
-
- return 0;
-}
-
-static void loongson3_exit(void)
-{
- on_each_cpu(reset_counters, NULL, 1);
- cpuhp_remove_state_nocalls(CPUHP_AP_MIPS_OP_LOONGSON3_STARTING);
- perf_irq = save_perf_irq;
-}
-
-struct op_mips_model op_model_loongson3_ops = {
- .reg_setup = loongson3_reg_setup,
- .cpu_setup = loongson3_cpu_setup,
- .init = loongson3_init,
- .exit = loongson3_exit,
- .cpu_start = loongson3_cpu_start,
- .cpu_stop = loongson3_cpu_stop,
- .cpu_type = "mips/loongson3",
- .num_counters = 2
-};
diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c
deleted file mode 100644
index 55d7b7fd18b6..000000000000
--- a/arch/mips/oprofile/op_model_mipsxx.c
+++ /dev/null
@@ -1,479 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2004, 05, 06 by Ralf Baechle
- * Copyright (C) 2005 by MIPS Technologies, Inc.
- */
-#include <linux/cpumask.h>
-#include <linux/oprofile.h>
-#include <linux/interrupt.h>
-#include <linux/smp.h>
-#include <asm/irq_regs.h>
-#include <asm/time.h>
-
-#include "op_impl.h"
-
-#define M_PERFCTL_EVENT(event) (((event) << MIPS_PERFCTRL_EVENT_S) & \
- MIPS_PERFCTRL_EVENT)
-#define M_PERFCTL_VPEID(vpe) ((vpe) << MIPS_PERFCTRL_VPEID_S)
-
-#define M_COUNTER_OVERFLOW (1UL << 31)
-
-static int (*save_perf_irq)(void);
-static int perfcount_irq;
-
-/*
- * XLR has only one set of counters per core. Designate the
- * first hardware thread in the core for setup and init.
- * Skip CPUs with non-zero hardware thread id (4 hwt per core)
- */
-#if defined(CONFIG_CPU_XLR) && defined(CONFIG_SMP)
-#define oprofile_skip_cpu(c) ((cpu_logical_map(c) & 0x3) != 0)
-#else
-#define oprofile_skip_cpu(c) 0
-#endif
-
-#ifdef CONFIG_MIPS_MT_SMP
-#define WHAT (MIPS_PERFCTRL_MT_EN_VPE | \
- M_PERFCTL_VPEID(cpu_vpe_id(&current_cpu_data)))
-#define vpe_id() (cpu_has_mipsmt_pertccounters ? \
- 0 : cpu_vpe_id(&current_cpu_data))
-
-/*
- * The number of bits to shift to convert between counters per core and
- * counters per VPE. There is no reasonable interface atm to obtain the
- * number of VPEs used by Linux and in the 34K this number is fixed to two
- * anyways so we hardcore a few things here for the moment. The way it's
- * done here will ensure that oprofile VSMP kernel will run right on a lesser
- * core like a 24K also or with maxcpus=1.
- */
-static inline unsigned int vpe_shift(void)
-{
- if (num_possible_cpus() > 1)
- return 1;
-
- return 0;
-}
-
-#else
-
-#define WHAT 0
-#define vpe_id() 0
-
-static inline unsigned int vpe_shift(void)
-{
- return 0;
-}
-
-#endif
-
-static inline unsigned int counters_total_to_per_cpu(unsigned int counters)
-{
- return counters >> vpe_shift();
-}
-
-static inline unsigned int counters_per_cpu_to_total(unsigned int counters)
-{
- return counters << vpe_shift();
-}
-
-#define __define_perf_accessors(r, n, np) \
- \
-static inline unsigned int r_c0_ ## r ## n(void) \
-{ \
- unsigned int cpu = vpe_id(); \
- \
- switch (cpu) { \
- case 0: \
- return read_c0_ ## r ## n(); \
- case 1: \
- return read_c0_ ## r ## np(); \
- default: \
- BUG(); \
- } \
- return 0; \
-} \
- \
-static inline void w_c0_ ## r ## n(unsigned int value) \
-{ \
- unsigned int cpu = vpe_id(); \
- \
- switch (cpu) { \
- case 0: \
- write_c0_ ## r ## n(value); \
- return; \
- case 1: \
- write_c0_ ## r ## np(value); \
- return; \
- default: \
- BUG(); \
- } \
- return; \
-} \
-
-__define_perf_accessors(perfcntr, 0, 2)
-__define_perf_accessors(perfcntr, 1, 3)
-__define_perf_accessors(perfcntr, 2, 0)
-__define_perf_accessors(perfcntr, 3, 1)
-
-__define_perf_accessors(perfctrl, 0, 2)
-__define_perf_accessors(perfctrl, 1, 3)
-__define_perf_accessors(perfctrl, 2, 0)
-__define_perf_accessors(perfctrl, 3, 1)
-
-struct op_mips_model op_model_mipsxx_ops;
-
-static struct mipsxx_register_config {
- unsigned int control[4];
- unsigned int counter[4];
-} reg;
-
-/* Compute all of the registers in preparation for enabling profiling. */
-
-static void mipsxx_reg_setup(struct op_counter_config *ctr)
-{
- unsigned int counters = op_model_mipsxx_ops.num_counters;
- int i;
-
- /* Compute the performance counter control word. */
- for (i = 0; i < counters; i++) {
- reg.control[i] = 0;
- reg.counter[i] = 0;
-
- if (!ctr[i].enabled)
- continue;
-
- reg.control[i] = M_PERFCTL_EVENT(ctr[i].event) |
- MIPS_PERFCTRL_IE;
- if (ctr[i].kernel)
- reg.control[i] |= MIPS_PERFCTRL_K;
- if (ctr[i].user)
- reg.control[i] |= MIPS_PERFCTRL_U;
- if (ctr[i].exl)
- reg.control[i] |= MIPS_PERFCTRL_EXL;
- if (boot_cpu_type() == CPU_XLR)
- reg.control[i] |= XLR_PERFCTRL_ALLTHREADS;
- reg.counter[i] = 0x80000000 - ctr[i].count;
- }
-}
-
-/* Program all of the registers in preparation for enabling profiling. */
-
-static void mipsxx_cpu_setup(void *args)
-{
- unsigned int counters = op_model_mipsxx_ops.num_counters;
-
- if (oprofile_skip_cpu(smp_processor_id()))
- return;
-
- switch (counters) {
- case 4:
- w_c0_perfctrl3(0);
- w_c0_perfcntr3(reg.counter[3]);
- fallthrough;
- case 3:
- w_c0_perfctrl2(0);
- w_c0_perfcntr2(reg.counter[2]);
- fallthrough;
- case 2:
- w_c0_perfctrl1(0);
- w_c0_perfcntr1(reg.counter[1]);
- fallthrough;
- case 1:
- w_c0_perfctrl0(0);
- w_c0_perfcntr0(reg.counter[0]);
- }
-}
-
-/* Start all counters on current CPU */
-static void mipsxx_cpu_start(void *args)
-{
- unsigned int counters = op_model_mipsxx_ops.num_counters;
-
- if (oprofile_skip_cpu(smp_processor_id()))
- return;
-
- switch (counters) {
- case 4:
- w_c0_perfctrl3(WHAT | reg.control[3]);
- fallthrough;
- case 3:
- w_c0_perfctrl2(WHAT | reg.control[2]);
- fallthrough;
- case 2:
- w_c0_perfctrl1(WHAT | reg.control[1]);
- fallthrough;
- case 1:
- w_c0_perfctrl0(WHAT | reg.control[0]);
- }
-}
-
-/* Stop all counters on current CPU */
-static void mipsxx_cpu_stop(void *args)
-{
- unsigned int counters = op_model_mipsxx_ops.num_counters;
-
- if (oprofile_skip_cpu(smp_processor_id()))
- return;
-
- switch (counters) {
- case 4:
- w_c0_perfctrl3(0);
- fallthrough;
- case 3:
- w_c0_perfctrl2(0);
- fallthrough;
- case 2:
- w_c0_perfctrl1(0);
- fallthrough;
- case 1:
- w_c0_perfctrl0(0);
- }
-}
-
-static int mipsxx_perfcount_handler(void)
-{
- unsigned int counters = op_model_mipsxx_ops.num_counters;
- unsigned int control;
- unsigned int counter;
- int handled = IRQ_NONE;
-
- if (cpu_has_mips_r2 && !(read_c0_cause() & CAUSEF_PCI))
- return handled;
-
- switch (counters) {
-#define HANDLE_COUNTER(n) \
- case n + 1: \
- control = r_c0_perfctrl ## n(); \
- counter = r_c0_perfcntr ## n(); \
- if ((control & MIPS_PERFCTRL_IE) && \
- (counter & M_COUNTER_OVERFLOW)) { \
- oprofile_add_sample(get_irq_regs(), n); \
- w_c0_perfcntr ## n(reg.counter[n]); \
- handled = IRQ_HANDLED; \
- }
- HANDLE_COUNTER(3)
- fallthrough;
- HANDLE_COUNTER(2)
- fallthrough;
- HANDLE_COUNTER(1)
- fallthrough;
- HANDLE_COUNTER(0)
- }
-
- return handled;
-}
-
-static inline int __n_counters(void)
-{
- if (!cpu_has_perf)
- return 0;
- if (!(read_c0_perfctrl0() & MIPS_PERFCTRL_M))
- return 1;
- if (!(read_c0_perfctrl1() & MIPS_PERFCTRL_M))
- return 2;
- if (!(read_c0_perfctrl2() & MIPS_PERFCTRL_M))
- return 3;
-
- return 4;
-}
-
-static inline int n_counters(void)
-{
- int counters;
-
- switch (current_cpu_type()) {
- case CPU_R10000:
- counters = 2;
- break;
-
- case CPU_R12000:
- case CPU_R14000:
- case CPU_R16000:
- counters = 4;
- break;
-
- default:
- counters = __n_counters();
- }
-
- return counters;
-}
-
-static void reset_counters(void *arg)
-{
- int counters = (int)(long)arg;
- switch (counters) {
- case 4:
- w_c0_perfctrl3(0);
- w_c0_perfcntr3(0);
- fallthrough;
- case 3:
- w_c0_perfctrl2(0);
- w_c0_perfcntr2(0);
- fallthrough;
- case 2:
- w_c0_perfctrl1(0);
- w_c0_perfcntr1(0);
- fallthrough;
- case 1:
- w_c0_perfctrl0(0);
- w_c0_perfcntr0(0);
- }
-}
-
-static irqreturn_t mipsxx_perfcount_int(int irq, void *dev_id)
-{
- return mipsxx_perfcount_handler();
-}
-
-static int __init mipsxx_init(void)
-{
- int counters;
-
- counters = n_counters();
- if (counters == 0) {
- printk(KERN_ERR "Oprofile: CPU has no performance counters\n");
- return -ENODEV;
- }
-
-#ifdef CONFIG_MIPS_MT_SMP
- if (!cpu_has_mipsmt_pertccounters)
- counters = counters_total_to_per_cpu(counters);
-#endif
- on_each_cpu(reset_counters, (void *)(long)counters, 1);
-
- op_model_mipsxx_ops.num_counters = counters;
- switch (current_cpu_type()) {
- case CPU_M14KC:
- op_model_mipsxx_ops.cpu_type = "mips/M14Kc";
- break;
-
- case CPU_M14KEC:
- op_model_mipsxx_ops.cpu_type = "mips/M14KEc";
- break;
-
- case CPU_20KC:
- op_model_mipsxx_ops.cpu_type = "mips/20K";
- break;
-
- case CPU_24K:
- op_model_mipsxx_ops.cpu_type = "mips/24K";
- break;
-
- case CPU_25KF:
- op_model_mipsxx_ops.cpu_type = "mips/25K";
- break;
-
- case CPU_1004K:
- case CPU_34K:
- op_model_mipsxx_ops.cpu_type = "mips/34K";
- break;
-
- case CPU_1074K:
- case CPU_74K:
- op_model_mipsxx_ops.cpu_type = "mips/74K";
- break;
-
- case CPU_INTERAPTIV:
- op_model_mipsxx_ops.cpu_type = "mips/interAptiv";
- break;
-
- case CPU_PROAPTIV:
- op_model_mipsxx_ops.cpu_type = "mips/proAptiv";
- break;
-
- case CPU_P5600:
- op_model_mipsxx_ops.cpu_type = "mips/P5600";
- break;
-
- case CPU_I6400:
- op_model_mipsxx_ops.cpu_type = "mips/I6400";
- break;
-
- case CPU_M5150:
- op_model_mipsxx_ops.cpu_type = "mips/M5150";
- break;
-
- case CPU_5KC:
- op_model_mipsxx_ops.cpu_type = "mips/5K";
- break;
-
- case CPU_R10000:
- if ((current_cpu_data.processor_id & 0xff) == 0x20)
- op_model_mipsxx_ops.cpu_type = "mips/r10000-v2.x";
- else
- op_model_mipsxx_ops.cpu_type = "mips/r10000";
- break;
-
- case CPU_R12000:
- case CPU_R14000:
- op_model_mipsxx_ops.cpu_type = "mips/r12000";
- break;
-
- case CPU_R16000:
- op_model_mipsxx_ops.cpu_type = "mips/r16000";
- break;
-
- case CPU_SB1:
- case CPU_SB1A:
- op_model_mipsxx_ops.cpu_type = "mips/sb1";
- break;
-
- case CPU_LOONGSON32:
- op_model_mipsxx_ops.cpu_type = "mips/loongson1";
- break;
-
- case CPU_XLR:
- op_model_mipsxx_ops.cpu_type = "mips/xlr";
- break;
-
- default:
- printk(KERN_ERR "Profiling unsupported for this CPU\n");
-
- return -ENODEV;
- }
-
- save_perf_irq = perf_irq;
- perf_irq = mipsxx_perfcount_handler;
-
- if (get_c0_perfcount_int)
- perfcount_irq = get_c0_perfcount_int();
- else if (cp0_perfcount_irq >= 0)
- perfcount_irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
- else
- perfcount_irq = -1;
-
- if (perfcount_irq >= 0)
- return request_irq(perfcount_irq, mipsxx_perfcount_int,
- IRQF_PERCPU | IRQF_NOBALANCING |
- IRQF_NO_THREAD | IRQF_NO_SUSPEND |
- IRQF_SHARED,
- "Perfcounter", save_perf_irq);
-
- return 0;
-}
-
-static void mipsxx_exit(void)
-{
- int counters = op_model_mipsxx_ops.num_counters;
-
- if (perfcount_irq >= 0)
- free_irq(perfcount_irq, save_perf_irq);
-
- counters = counters_per_cpu_to_total(counters);
- on_each_cpu(reset_counters, (void *)(long)counters, 1);
-
- perf_irq = save_perf_irq;
-}
-
-struct op_mips_model op_model_mipsxx_ops = {
- .reg_setup = mipsxx_reg_setup,
- .cpu_setup = mipsxx_cpu_setup,
- .init = mipsxx_init,
- .exit = mipsxx_exit,
- .cpu_start = mipsxx_cpu_start,
- .cpu_stop = mipsxx_cpu_stop,
-};
diff --git a/arch/mips/pci/pci-alchemy.c b/arch/mips/pci/pci-alchemy.c
index 7285b5667568..1c722dd0c130 100644
--- a/arch/mips/pci/pci-alchemy.c
+++ b/arch/mips/pci/pci-alchemy.c
@@ -17,8 +17,8 @@
#include <linux/init.h>
#include <linux/syscore_ops.h>
#include <linux/vmalloc.h>
+#include <linux/dma-map-ops.h> /* for dma_default_coherent */
-#include <asm/dma-coherence.h>
#include <asm/mach-au1x00/au1000.h>
#include <asm/tlbmisc.h>
@@ -429,9 +429,8 @@ static int alchemy_pci_probe(struct platform_device *pdev)
ctx->alchemy_pci_ctrl.io_map_base = (unsigned long)virt_io;
/* Au1500 revisions older than AD have borked coherent PCI */
- if ((alchemy_get_cputype() == ALCHEMY_CPU_AU1500) &&
- (read_c0_prid() < 0x01030202) &&
- (coherentio == IO_COHERENCE_DISABLED)) {
+ if (alchemy_get_cputype() == ALCHEMY_CPU_AU1500 &&
+ read_c0_prid() < 0x01030202 && !dma_default_coherent) {
val = __raw_readl(ctx->regs + PCI_REG_CONFIG);
val |= PCI_CONFIG_NC;
__raw_writel(val, ctx->regs + PCI_REG_CONFIG);
diff --git a/arch/mips/pci/pci-ar2315.c b/arch/mips/pci/pci-ar2315.c
index 0b15730cef88..f741b8c528e4 100644
--- a/arch/mips/pci/pci-ar2315.c
+++ b/arch/mips/pci/pci-ar2315.c
@@ -483,11 +483,11 @@ static int ar2315_pci_probe(struct platform_device *pdev)
apc->io_res.name = "AR2315 IO space";
apc->io_res.start = 0;
apc->io_res.end = 0;
- apc->io_res.flags = IORESOURCE_IO,
+ apc->io_res.flags = IORESOURCE_IO;
apc->pci_ctrl.pci_ops = &ar2315_pci_ops;
- apc->pci_ctrl.mem_resource = &apc->mem_res,
- apc->pci_ctrl.io_resource = &apc->io_res,
+ apc->pci_ctrl.mem_resource = &apc->mem_res;
+ apc->pci_ctrl.io_resource = &apc->io_res;
register_pci_controller(&apc->pci_ctrl);
diff --git a/arch/mips/pic32/pic32mzda/init.c b/arch/mips/pic32/pic32mzda/init.c
index 50f376f058f4..764f2d022fae 100644
--- a/arch/mips/pic32/pic32mzda/init.c
+++ b/arch/mips/pic32/pic32mzda/init.c
@@ -21,24 +21,11 @@ const char *get_system_type(void)
return "PIC32MZDA";
}
-static ulong get_fdtaddr(void)
-{
- ulong ftaddr = 0;
-
- if (fw_passed_dtb && !fw_arg2 && !fw_arg3)
- return (ulong)fw_passed_dtb;
-
- if (__dtb_start < __dtb_end)
- ftaddr = (ulong)__dtb_start;
-
- return ftaddr;
-}
-
void __init plat_mem_setup(void)
{
void *dtb;
- dtb = (void *)get_fdtaddr();
+ dtb = get_fdt();
if (!dtb) {
pr_err("pic32: no DTB found.\n");
return;
@@ -91,10 +78,6 @@ void __init prom_init(void)
pic32_init_cmdline((int)fw_arg0, (char **)fw_arg1);
}
-void __init prom_free_prom_memory(void)
-{
-}
-
void __init device_tree_init(void)
{
if (!initial_boot_params)
diff --git a/arch/mips/pistachio/Platform b/arch/mips/pistachio/Platform
index f73a1a929965..c59de86dbddf 100644
--- a/arch/mips/pistachio/Platform
+++ b/arch/mips/pistachio/Platform
@@ -1,8 +1,6 @@
#
# IMG Pistachio SoC
#
-cflags-$(CONFIG_MACH_PISTACHIO) += \
- -I$(srctree)/arch/mips/include/asm/mach-pistachio
load-$(CONFIG_MACH_PISTACHIO) += 0xffffffff80400000
zload-$(CONFIG_MACH_PISTACHIO) += 0xffffffff81000000
all-$(CONFIG_MACH_PISTACHIO) := uImage.gz
diff --git a/arch/mips/pistachio/init.c b/arch/mips/pistachio/init.c
index 558995ed6fe8..e0bacfc3c6b4 100644
--- a/arch/mips/pistachio/init.c
+++ b/arch/mips/pistachio/init.c
@@ -13,7 +13,6 @@
#include <linux/of_fdt.h>
#include <asm/cacheflush.h>
-#include <asm/dma-coherence.h>
#include <asm/fw/fw.h>
#include <asm/mips-boards/generic.h>
#include <asm/mips-cps.h>
@@ -83,7 +82,6 @@ phys_addr_t mips_cdmm_phys_base(void)
static void __init mips_nmi_setup(void)
{
void *base;
- extern char except_vec_nmi[];
base = cpu_has_veic ?
(void *)(CAC_BASE + 0xa80) :
@@ -118,10 +116,6 @@ void __init prom_init(void)
pr_info("SoC Type: %s\n", get_system_type());
}
-void __init prom_free_prom_memory(void)
-{
-}
-
void __init device_tree_init(void)
{
if (!initial_boot_params)
diff --git a/arch/mips/ralink/of.c b/arch/mips/ralink/of.c
index cbae9d23ab7f..8286c3521476 100644
--- a/arch/mips/ralink/of.c
+++ b/arch/mips/ralink/of.c
@@ -64,20 +64,15 @@ static int __init early_init_dt_find_memory(unsigned long node,
void __init plat_mem_setup(void)
{
- void *dtb = NULL;
+ void *dtb;
set_io_port_base(KSEG1);
/*
* Load the builtin devicetree. This causes the chosen node to be
- * parsed resulting in our memory appearing. fw_passed_dtb is used
- * by CONFIG_MIPS_APPENDED_RAW_DTB as well.
+ * parsed resulting in our memory appearing.
*/
- if (fw_passed_dtb)
- dtb = (void *)fw_passed_dtb;
- else if (__dtb_start != __dtb_end)
- dtb = (void *)__dtb_start;
-
+ dtb = get_fdt();
__dt_setup_arch(dtb);
of_scan_flat_dt(early_init_dt_find_memory, NULL);
diff --git a/arch/mips/ralink/prom.c b/arch/mips/ralink/prom.c
index 02e7878dc427..25728def3503 100644
--- a/arch/mips/ralink/prom.c
+++ b/arch/mips/ralink/prom.c
@@ -66,7 +66,3 @@ void __init prom_init(void)
prom_init_cmdline();
}
-
-void __init prom_free_prom_memory(void)
-{
-}
diff --git a/arch/mips/ralink/reset.c b/arch/mips/ralink/reset.c
index 8126f1260407..274d33078c5e 100644
--- a/arch/mips/ralink/reset.c
+++ b/arch/mips/ralink/reset.c
@@ -27,7 +27,7 @@ static int ralink_assert_device(struct reset_controller_dev *rcdev,
{
u32 val;
- if (id < 8)
+ if (id == 0)
return -1;
val = rt_sysc_r32(SYSC_REG_RESET_CTRL);
@@ -42,7 +42,7 @@ static int ralink_deassert_device(struct reset_controller_dev *rcdev,
{
u32 val;
- if (id < 8)
+ if (id == 0)
return -1;
val = rt_sysc_r32(SYSC_REG_RESET_CTRL);
diff --git a/arch/mips/rb532/prom.c b/arch/mips/rb532/prom.c
index a9d1f2019dc3..23ad8dd9aa5e 100644
--- a/arch/mips/rb532/prom.c
+++ b/arch/mips/rb532/prom.c
@@ -34,11 +34,6 @@ static struct resource ddr_reg[] = {
}
};
-void __init prom_free_prom_memory(void)
-{
- /* No prom memory to free */
-}
-
static inline int match_tag(char *arg, const char *tag)
{
return strncmp(arg, tag, strlen(tag)) == 0;
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c
index d411e0a90a5b..87bb6945ec25 100644
--- a/arch/mips/sgi-ip27/ip27-memory.c
+++ b/arch/mips/sgi-ip27/ip27-memory.c
@@ -404,11 +404,6 @@ void __init prom_meminit(void)
}
}
-void __init prom_free_prom_memory(void)
-{
- /* We got nothing to free here ... */
-}
-
extern void setup_zero_pages(void);
void __init paging_init(void)
diff --git a/arch/mips/sgi-ip32/ip32-irq.c b/arch/mips/sgi-ip32/ip32-irq.c
index 1bbd5bfb5458..e21ea1de05e3 100644
--- a/arch/mips/sgi-ip32/ip32-irq.c
+++ b/arch/mips/sgi-ip32/ip32-irq.c
@@ -343,7 +343,7 @@ static void ip32_unknown_interrupt(void)
printk("Register dump:\n");
show_regs(get_irq_regs());
- printk("Please mail this report to linux-mips@linux-mips.org\n");
+ printk("Please mail this report to linux-mips@vger.kernel.org\n");
printk("Spinning...");
while(1) ;
}
diff --git a/arch/mips/sgi-ip32/ip32-memory.c b/arch/mips/sgi-ip32/ip32-memory.c
index 0f53fed39da6..3fc8d0a0bdfa 100644
--- a/arch/mips/sgi-ip32/ip32-memory.c
+++ b/arch/mips/sgi-ip32/ip32-memory.c
@@ -40,8 +40,3 @@ void __init prom_meminit(void)
memblock_add(base, size);
}
}
-
-
-void __init prom_free_prom_memory(void)
-{
-}
diff --git a/arch/mips/sibyte/common/cfe.c b/arch/mips/sibyte/common/cfe.c
index 89f7fca45152..a3323f8dcc1b 100644
--- a/arch/mips/sibyte/common/cfe.c
+++ b/arch/mips/sibyte/common/cfe.c
@@ -316,11 +316,6 @@ void __init prom_init(void)
#endif
}
-void __init prom_free_prom_memory(void)
-{
- /* Not sure what I'm supposed to do here. Nothing, I think */
-}
-
void prom_putchar(char c)
{
int ret;
diff --git a/arch/mips/txx9/generic/setup.c b/arch/mips/txx9/generic/setup.c
index 6d0fd0e055b4..42ba1e97dff0 100644
--- a/arch/mips/txx9/generic/setup.c
+++ b/arch/mips/txx9/generic/setup.c
@@ -340,10 +340,6 @@ void __init prom_init(void)
txx9_board_vec->prom_init();
}
-void __init prom_free_prom_memory(void)
-{
-}
-
const char *get_system_type(void)
{
return txx9_system_type;
diff --git a/arch/mips/vdso/Kconfig b/arch/mips/vdso/Kconfig
index 7aec721398d5..a665f6108cb5 100644
--- a/arch/mips/vdso/Kconfig
+++ b/arch/mips/vdso/Kconfig
@@ -12,7 +12,7 @@
# the lack of relocations. As such, we disable the VDSO for microMIPS builds.
config MIPS_LD_CAN_LINK_VDSO
- def_bool LD_VERSION >= 225000000 || LD_IS_LLD
+ def_bool LD_VERSION >= 22500 || LD_IS_LLD
config MIPS_DISABLE_VDSO
def_bool CPU_MICROMIPS || (!CPU_MIPSR6 && !MIPS_LD_CAN_LINK_VDSO)
diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile
index 5810cc12bc1d..2131d3fd7333 100644
--- a/arch/mips/vdso/Makefile
+++ b/arch/mips/vdso/Makefile
@@ -16,16 +16,13 @@ ccflags-vdso := \
$(filter -march=%,$(KBUILD_CFLAGS)) \
$(filter -m%-float,$(KBUILD_CFLAGS)) \
$(filter -mno-loongson-%,$(KBUILD_CFLAGS)) \
+ $(CLANG_FLAGS) \
-D__VDSO__
ifndef CONFIG_64BIT
ccflags-vdso += -DBUILD_VDSO32
endif
-ifdef CONFIG_CC_IS_CLANG
-ccflags-vdso += $(filter --target=%,$(KBUILD_CFLAGS))
-endif
-
#
# The -fno-jump-tables flag only prevents the compiler from generating
# jump tables but does not prevent the compiler from emitting absolute
diff --git a/arch/mips/vr41xx/common/init.c b/arch/mips/vr41xx/common/init.c
index ca53ac3060ef..628dddf79a05 100644
--- a/arch/mips/vr41xx/common/init.c
+++ b/arch/mips/vr41xx/common/init.c
@@ -58,7 +58,3 @@ void __init prom_init(void)
strlcat(arcs_cmdline, " ", COMMAND_LINE_SIZE);
}
}
-
-void __init prom_free_prom_memory(void)
-{
-}
diff --git a/arch/nds32/configs/defconfig b/arch/nds32/configs/defconfig
index 40313a635075..f9a89cf00aa6 100644
--- a/arch/nds32/configs/defconfig
+++ b/arch/nds32/configs/defconfig
@@ -1,4 +1,3 @@
-CONFIG_CROSS_COMPILE="nds32le-linux-"
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_HIGH_RES_TIMERS=y
diff --git a/arch/nds32/kernel/process.c b/arch/nds32/kernel/process.c
index e01ad5d17224..c1327e552ec6 100644
--- a/arch/nds32/kernel/process.c
+++ b/arch/nds32/kernel/process.c
@@ -156,7 +156,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context));
- if (unlikely(p->flags & PF_KTHREAD)) {
+ if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
memset(childregs, 0, sizeof(struct pt_regs));
/* kernel thread fn */
p->thread.cpu_context.r6 = stack_start;
diff --git a/arch/nds32/kernel/setup.c b/arch/nds32/kernel/setup.c
index c356e484dcab..af82e996f412 100644
--- a/arch/nds32/kernel/setup.c
+++ b/arch/nds32/kernel/setup.c
@@ -52,7 +52,7 @@ EXPORT_SYMBOL(elf_hwcap);
/*
* The following string table, must sync with HWCAP_xx bitmask,
- * which is defined in <asm/procinfo.h>
+ * which is defined above
*/
static const char *hwcap_str[] = {
"mfusr_pc",
diff --git a/arch/nds32/kernel/time.c b/arch/nds32/kernel/time.c
index ac9d78ce3a81..574a3d0a8539 100644
--- a/arch/nds32/kernel/time.c
+++ b/arch/nds32/kernel/time.c
@@ -2,7 +2,7 @@
// Copyright (C) 2005-2017 Andes Technology Corporation
#include <linux/clocksource.h>
-#include <linux/clk-provider.h>
+#include <linux/of_clk.h>
void __init time_init(void)
{
diff --git a/arch/nds32/kernel/traps.c b/arch/nds32/kernel/traps.c
index 6a9772ba7392..ee0d9ae192a5 100644
--- a/arch/nds32/kernel/traps.c
+++ b/arch/nds32/kernel/traps.c
@@ -25,17 +25,8 @@ extern void show_pte(struct mm_struct *mm, unsigned long addr);
void dump_mem(const char *lvl, unsigned long bottom, unsigned long top)
{
unsigned long first;
- mm_segment_t fs;
int i;
- /*
- * We need to switch to kernel mode so that we can use __get_user
- * to safely read from kernel space. Note that we now dump the
- * code first, just in case the backtrace kills us.
- */
- fs = get_fs();
- set_fs(KERNEL_DS);
-
pr_emerg("%s(0x%08lx to 0x%08lx)\n", lvl, bottom, top);
for (first = bottom & ~31; first < top; first += 32) {
@@ -48,7 +39,9 @@ void dump_mem(const char *lvl, unsigned long bottom, unsigned long top)
for (p = first, i = 0; i < 8 && p < top; i++, p += 4) {
if (p >= bottom && p < top) {
unsigned long val;
- if (__get_user(val, (unsigned long *)p) == 0)
+
+ if (get_kernel_nofault(val,
+ (unsigned long *)p) == 0)
sprintf(str + i * 9, " %08lx", val);
else
sprintf(str + i * 9, " ????????");
@@ -56,46 +49,10 @@ void dump_mem(const char *lvl, unsigned long bottom, unsigned long top)
}
pr_emerg("%s%04lx:%s\n", lvl, first & 0xffff, str);
}
-
- set_fs(fs);
}
EXPORT_SYMBOL(dump_mem);
-static void dump_instr(struct pt_regs *regs)
-{
- unsigned long addr = instruction_pointer(regs);
- mm_segment_t fs;
- char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
- int i;
-
- return;
- /*
- * We need to switch to kernel mode so that we can use __get_user
- * to safely read from kernel space. Note that we now dump the
- * code first, just in case the backtrace kills us.
- */
- fs = get_fs();
- set_fs(KERNEL_DS);
-
- pr_emerg("Code: ");
- for (i = -4; i < 1; i++) {
- unsigned int val, bad;
-
- bad = __get_user(val, &((u32 *) addr)[i]);
-
- if (!bad) {
- p += sprintf(p, i == 0 ? "(%08x) " : "%08x ", val);
- } else {
- p += sprintf(p, "bad PC value");
- break;
- }
- }
- pr_emerg("Code: %s\n", str);
-
- set_fs(fs);
-}
-
#define LOOP_TIMES (100)
static void __dump(struct task_struct *tsk, unsigned long *base_reg,
const char *loglvl)
@@ -179,7 +136,6 @@ void die(const char *str, struct pt_regs *regs, int err)
if (!user_mode(regs) || in_interrupt()) {
dump_mem("Stack: ", regs->sp, (regs->sp + PAGE_SIZE) & PAGE_MASK);
- dump_instr(regs);
dump_stack();
}
diff --git a/arch/nios2/kernel/entry.S b/arch/nios2/kernel/entry.S
index da8442450e46..0794cd7803df 100644
--- a/arch/nios2/kernel/entry.S
+++ b/arch/nios2/kernel/entry.S
@@ -389,7 +389,10 @@ ENTRY(ret_from_interrupt)
*/
ENTRY(sys_clone)
SAVE_SWITCH_STACK
+ subi sp, sp, 4 /* make space for tls pointer */
+ stw r8, 0(sp) /* pass tls pointer (r8) via stack (5th argument) */
call nios2_clone
+ addi sp, sp, 4
RESTORE_SWITCH_STACK
ret
diff --git a/arch/nios2/kernel/process.c b/arch/nios2/kernel/process.c
index 50b4eb19a6cc..c5f916ca6845 100644
--- a/arch/nios2/kernel/process.c
+++ b/arch/nios2/kernel/process.c
@@ -109,7 +109,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg,
struct switch_stack *childstack =
((struct switch_stack *)childregs) - 1;
- if (unlikely(p->flags & PF_KTHREAD)) {
+ if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
memset(childstack, 0,
sizeof(struct switch_stack) + sizeof(struct pt_regs));
diff --git a/arch/nios2/kernel/setup.c b/arch/nios2/kernel/setup.c
index 3c6e3c813a0b..d2f21957e99c 100644
--- a/arch/nios2/kernel/setup.c
+++ b/arch/nios2/kernel/setup.c
@@ -32,8 +32,6 @@ EXPORT_SYMBOL(memory_start);
unsigned long memory_end;
EXPORT_SYMBOL(memory_end);
-unsigned long memory_size;
-
static struct pt_regs fake_regs = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
0};
@@ -141,16 +139,22 @@ asmlinkage void __init nios2_boot_init(unsigned r4, unsigned r5, unsigned r6,
parse_early_param();
}
+static void __init find_limits(unsigned long *min, unsigned long *max_low,
+ unsigned long *max_high)
+{
+ *max_low = PFN_DOWN(memblock_get_current_limit());
+ *min = PFN_UP(memblock_start_of_DRAM());
+ *max_high = PFN_DOWN(memblock_end_of_DRAM());
+}
+
void __init setup_arch(char **cmdline_p)
{
int dram_start;
console_verbose();
- dram_start = memblock_start_of_DRAM();
- memory_size = memblock_phys_mem_size();
- memory_start = PAGE_ALIGN((unsigned long)__pa(_end));
- memory_end = (unsigned long) CONFIG_NIOS2_MEM_BASE + memory_size;
+ memory_start = memblock_start_of_DRAM();
+ memory_end = memblock_end_of_DRAM();
init_mm.start_code = (unsigned long) _stext;
init_mm.end_code = (unsigned long) _etext;
@@ -161,11 +165,10 @@ void __init setup_arch(char **cmdline_p)
/* Keep a copy of command line */
*cmdline_p = boot_command_line;
- min_low_pfn = PFN_UP(memory_start);
- max_low_pfn = PFN_DOWN(memory_end);
+ find_limits(&min_low_pfn, &max_low_pfn, &max_pfn);
max_mapnr = max_low_pfn;
- memblock_reserve(dram_start, memory_start - dram_start);
+ memblock_reserve(__pa_symbol(_stext), _end - _stext);
#ifdef CONFIG_BLK_DEV_INITRD
if (initrd_start) {
memblock_reserve(virt_to_phys((void *)initrd_start),
diff --git a/arch/nios2/kernel/sys_nios2.c b/arch/nios2/kernel/sys_nios2.c
index cd390ec4f88b..b1ca85699952 100644
--- a/arch/nios2/kernel/sys_nios2.c
+++ b/arch/nios2/kernel/sys_nios2.c
@@ -22,6 +22,7 @@ asmlinkage int sys_cacheflush(unsigned long addr, unsigned long len,
unsigned int op)
{
struct vm_area_struct *vma;
+ struct mm_struct *mm = current->mm;
if (len == 0)
return 0;
@@ -34,16 +35,22 @@ asmlinkage int sys_cacheflush(unsigned long addr, unsigned long len,
if (addr + len < addr)
return -EFAULT;
+ if (mmap_read_lock_killable(mm))
+ return -EINTR;
+
/*
* Verify that the specified address region actually belongs
* to this process.
*/
- vma = find_vma(current->mm, addr);
- if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end)
+ vma = find_vma(mm, addr);
+ if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end) {
+ mmap_read_unlock(mm);
return -EFAULT;
+ }
flush_cache_range(vma, addr, addr + len);
+ mmap_read_unlock(mm);
return 0;
}
diff --git a/arch/openrisc/Kbuild b/arch/openrisc/Kbuild
new file mode 100644
index 000000000000..4234b4c03e72
--- /dev/null
+++ b/arch/openrisc/Kbuild
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-y += lib/ kernel/ mm/
+obj-y += boot/dts/
diff --git a/arch/openrisc/Makefile b/arch/openrisc/Makefile
index bf10141c7426..410e7abfac69 100644
--- a/arch/openrisc/Makefile
+++ b/arch/openrisc/Makefile
@@ -24,6 +24,10 @@ LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
KBUILD_CFLAGS += -pipe -ffixed-r10 -D__linux__
+all: vmlinux.bin
+
+boot := arch/$(ARCH)/boot
+
ifeq ($(CONFIG_OPENRISC_HAVE_INST_MUL),y)
KBUILD_CFLAGS += $(call cc-option,-mhard-mul)
else
@@ -38,14 +42,13 @@ endif
head-y := arch/openrisc/kernel/head.o
-core-y += arch/openrisc/lib/ \
- arch/openrisc/kernel/ \
- arch/openrisc/mm/
+core-y += arch/openrisc/
libs-y += $(LIBGCC)
-ifneq '$(CONFIG_OPENRISC_BUILTIN_DTB)' '""'
-BUILTIN_DTB := y
-else
-BUILTIN_DTB := n
-endif
-core-$(BUILTIN_DTB) += arch/openrisc/boot/dts/
+PHONY += vmlinux.bin
+
+vmlinux.bin: vmlinux
+ $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
+
+archclean:
+ $(Q)$(MAKE) $(clean)=$(boot)
diff --git a/arch/c6x/include/uapi/asm/Kbuild b/arch/openrisc/boot/.gitignore
index e78470141932..007d6fea3145 100644
--- a/arch/c6x/include/uapi/asm/Kbuild
+++ b/arch/openrisc/boot/.gitignore
@@ -1,2 +1,2 @@
# SPDX-License-Identifier: GPL-2.0
-generic-y += ucontext.h
+vmlinux.bin
diff --git a/arch/c6x/boot/Makefile b/arch/openrisc/boot/Makefile
index 842b7b0bfe80..5b28538f4dd1 100644
--- a/arch/c6x/boot/Makefile
+++ b/arch/openrisc/boot/Makefile
@@ -3,9 +3,8 @@
# Makefile for bootable kernel images
#
+targets += vmlinux.bin
+
OBJCOPYFLAGS_vmlinux.bin := -O binary
$(obj)/vmlinux.bin: vmlinux FORCE
$(call if_changed,objcopy)
-
-$(obj)/dtbImage.%: vmlinux
- $(call if_changed,objcopy)
diff --git a/arch/openrisc/kernel/process.c b/arch/openrisc/kernel/process.c
index 3c98728cce24..eb62429681fc 100644
--- a/arch/openrisc/kernel/process.c
+++ b/arch/openrisc/kernel/process.c
@@ -34,6 +34,7 @@
#include <linux/init_task.h>
#include <linux/mqueue.h>
#include <linux/fs.h>
+#include <linux/reboot.h>
#include <linux/uaccess.h>
#include <asm/io.h>
@@ -49,10 +50,16 @@
*/
struct thread_info *current_thread_info_set[NR_CPUS] = { &init_thread_info, };
-void machine_restart(void)
+void machine_restart(char *cmd)
{
- printk(KERN_INFO "*** MACHINE RESTART ***\n");
- __asm__("l.nop 1");
+ do_kernel_restart(cmd);
+
+ /* Give a grace period for failure to restart of 1s */
+ mdelay(1000);
+
+ /* Whoops - the platform was unable to reboot. Tell the user! */
+ pr_emerg("Reboot failed -- System halted\n");
+ while (1);
}
/*
@@ -167,7 +174,7 @@ copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg,
sp -= sizeof(struct pt_regs);
kregs = (struct pt_regs *)sp;
- if (unlikely(p->flags & PF_KTHREAD)) {
+ if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
memset(kregs, 0, sizeof(struct pt_regs));
kregs->gpr[20] = usp; /* fn, kernel thread */
kregs->gpr[22] = arg;
diff --git a/arch/openrisc/kernel/smp.c b/arch/openrisc/kernel/smp.c
index 29c82ef2e207..48e1092a64de 100644
--- a/arch/openrisc/kernel/smp.c
+++ b/arch/openrisc/kernel/smp.c
@@ -16,6 +16,7 @@
#include <linux/sched.h>
#include <linux/sched/mm.h>
#include <linux/irq.h>
+#include <linux/of.h>
#include <asm/cpuinfo.h>
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
@@ -60,22 +61,32 @@ void __init smp_prepare_boot_cpu(void)
void __init smp_init_cpus(void)
{
- int i;
+ struct device_node *cpu;
+ u32 cpu_id;
- for (i = 0; i < NR_CPUS; i++)
- set_cpu_possible(i, true);
+ for_each_of_cpu_node(cpu) {
+ if (of_property_read_u32(cpu, "reg", &cpu_id)) {
+ pr_warn("%s missing reg property", cpu->full_name);
+ continue;
+ }
+
+ if (cpu_id < NR_CPUS)
+ set_cpu_possible(cpu_id, true);
+ }
}
void __init smp_prepare_cpus(unsigned int max_cpus)
{
- int i;
+ unsigned int cpu;
/*
* Initialise the present map, which describes the set of CPUs
* actually populated at the present time.
*/
- for (i = 0; i < max_cpus; i++)
- set_cpu_present(i, true);
+ for_each_possible_cpu(cpu) {
+ if (cpu < max_cpus)
+ set_cpu_present(cpu, true);
+ }
}
void __init smp_cpus_done(unsigned int max_cpus)
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 78b17621ee4a..4e53ac46e857 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -4,7 +4,6 @@ config PARISC
select ARCH_32BIT_OFF_T if !64BIT
select ARCH_MIGHT_HAVE_PC_PARPORT
select HAVE_IDE
- select HAVE_OPROFILE
select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_SYSCALL_TRACEPOINTS
@@ -35,6 +34,7 @@ config PARISC
select GENERIC_SMP_IDLE_THREAD
select GENERIC_CPU_DEVICES
select GENERIC_STRNCPY_FROM_USER
+ select GENERIC_LIB_DEVMEM_IS_ALLOWED
select SYSCTL_ARCH_UNALIGN_ALLOW
select SYSCTL_EXCEPTION_TRACE
select HAVE_MOD_ARCH_SPECIFIC
@@ -61,8 +61,10 @@ config PARISC
select HAVE_KRETPROBES
select HAVE_DYNAMIC_FTRACE if $(cc-option,-fpatchable-function-entry=1,1)
select HAVE_FTRACE_MCOUNT_RECORD if HAVE_DYNAMIC_FTRACE
+ select FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY if DYNAMIC_FTRACE
select HAVE_KPROBES_ON_FTRACE
select HAVE_DYNAMIC_FTRACE_WITH_REGS
+ select HAVE_SOFTIRQ_ON_OWN_STACK if IRQSTACKS
select SET_FS
help
@@ -202,9 +204,8 @@ config PREFETCH
depends on PA8X00 || PA7200
config MLONGCALLS
- bool "Enable the -mlong-calls compiler option for big kernels"
- default y if !MODULES || UBSAN || FTRACE
- default n
+ def_bool y if !MODULES || UBSAN || FTRACE
+ bool "Enable the -mlong-calls compiler option for big kernels" if MODULES && !UBSAN && !FTRACE
depends on PA8X00
help
If you configure the kernel to include many drivers built-in instead
@@ -312,6 +313,16 @@ config IRQSTACKS
for handling hard and soft interrupts. This can help avoid
overflowing the process kernel stacks.
+config TLB_PTLOCK
+ bool "Use page table locks in TLB fault handler"
+ depends on SMP
+ default n
+ help
+ Select this option to enable page table locking in the TLB
+ fault handler. This ensures that page table entries are
+ updated consistently on SMP machines at the expense of some
+ loss in performance.
+
config HOTPLUG_CPU
bool
default y if SMP
@@ -336,7 +347,6 @@ source "kernel/Kconfig.hz"
config COMPAT
def_bool y
depends on 64BIT
- select COMPAT_BINFMT_ELF if BINFMT_ELF
config SYSVIPC_COMPAT
def_bool y
diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile
index 5140c602207f..7d9f71aa829a 100644
--- a/arch/parisc/Makefile
+++ b/arch/parisc/Makefile
@@ -116,8 +116,6 @@ kernel-y := mm/ kernel/ math-emu/
core-y += $(addprefix arch/parisc/, $(kernel-y))
libs-y += arch/parisc/lib/ $(LIBGCC)
-drivers-$(CONFIG_OPROFILE) += arch/parisc/oprofile/
-
boot := arch/parisc/boot
PALO := $(shell if (which palo 2>&1); then : ; \
diff --git a/arch/parisc/configs/generic-32bit_defconfig b/arch/parisc/configs/generic-32bit_defconfig
index 3cbcfad5f724..7611d48c599e 100644
--- a/arch/parisc/configs/generic-32bit_defconfig
+++ b/arch/parisc/configs/generic-32bit_defconfig
@@ -22,7 +22,6 @@ CONFIG_PCI_LBA=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
-CONFIG_UNUSED_SYMBOLS=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_BINFMT_MISC=m
diff --git a/arch/parisc/configs/generic-64bit_defconfig b/arch/parisc/configs/generic-64bit_defconfig
index 8f81fcbf04c4..53054b81461a 100644
--- a/arch/parisc/configs/generic-64bit_defconfig
+++ b/arch/parisc/configs/generic-64bit_defconfig
@@ -31,7 +31,6 @@ CONFIG_MODULE_FORCE_LOAD=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
-CONFIG_UNUSED_SYMBOLS=y
CONFIG_BLK_DEV_INTEGRITY=y
CONFIG_BINFMT_MISC=m
# CONFIG_COMPACTION is not set
diff --git a/arch/parisc/include/asm/compat.h b/arch/parisc/include/asm/compat.h
index 8f33085ff1bd..1a609d38f667 100644
--- a/arch/parisc/include/asm/compat.h
+++ b/arch/parisc/include/asm/compat.h
@@ -179,7 +179,7 @@ static __inline__ void __user *arch_compat_alloc_user_space(long len)
static inline int __is_compat_task(struct task_struct *t)
{
- return test_ti_thread_flag(task_thread_info(t), TIF_32BIT);
+ return test_tsk_thread_flag(t, TIF_32BIT);
}
static inline int is_compat_task(void)
diff --git a/arch/parisc/include/asm/hardirq.h b/arch/parisc/include/asm/hardirq.h
index fad29aa6f45f..1e4fbd0fd944 100644
--- a/arch/parisc/include/asm/hardirq.h
+++ b/arch/parisc/include/asm/hardirq.h
@@ -12,10 +12,6 @@
#include <linux/threads.h>
#include <linux/irq.h>
-#ifdef CONFIG_IRQSTACKS
-#define __ARCH_HAS_DO_SOFTIRQ
-#endif
-
typedef struct {
unsigned int __softirq_pending;
unsigned int kernel_stack_usage;
diff --git a/arch/parisc/include/asm/io.h b/arch/parisc/include/asm/io.h
index 45e20d38dc59..8a11b8cf4719 100644
--- a/arch/parisc/include/asm/io.h
+++ b/arch/parisc/include/asm/io.h
@@ -321,4 +321,6 @@ extern void iowrite64be(u64 val, void __iomem *addr);
*/
#define xlate_dev_kmem_ptr(p) p
+extern int devmem_is_allowed(unsigned long pfn);
+
#endif
diff --git a/arch/parisc/include/asm/irq.h b/arch/parisc/include/asm/irq.h
index 959e79cd2c14..378f63c4015b 100644
--- a/arch/parisc/include/asm/irq.h
+++ b/arch/parisc/include/asm/irq.h
@@ -47,7 +47,4 @@ extern unsigned long txn_affinity_addr(unsigned int irq, int cpu);
extern int cpu_claim_irq(unsigned int irq, struct irq_chip *, void *);
extern int cpu_check_affinity(struct irq_data *d, const struct cpumask *dest);
-/* soft power switch support (power.c) */
-extern struct tasklet_struct power_tasklet;
-
#endif /* _ASM_PARISC_IRQ_H */
diff --git a/arch/parisc/include/asm/mmu_context.h b/arch/parisc/include/asm/mmu_context.h
index 46f8c22c5977..726257648d9f 100644
--- a/arch/parisc/include/asm/mmu_context.h
+++ b/arch/parisc/include/asm/mmu_context.h
@@ -5,6 +5,7 @@
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/atomic.h>
+#include <linux/spinlock.h>
#include <asm-generic/mm_hooks.h>
/* on PA-RISC, we actually have enough contexts to justify an allocator
@@ -50,6 +51,12 @@ static inline void switch_mm_irqs_off(struct mm_struct *prev,
struct mm_struct *next, struct task_struct *tsk)
{
if (prev != next) {
+#ifdef CONFIG_TLB_PTLOCK
+ /* put physical address of page_table_lock in cr28 (tr4)
+ for TLB faults */
+ spinlock_t *pgd_lock = &next->page_table_lock;
+ mtctl(__pa(__ldcw_align(&pgd_lock->rlock.raw_lock)), 28);
+#endif
mtctl(__pa(next->pgd), 25);
load_context(next->context);
}
diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h
index 6b3f6740a6a6..d00313d1274e 100644
--- a/arch/parisc/include/asm/page.h
+++ b/arch/parisc/include/asm/page.h
@@ -112,7 +112,7 @@ extern int npmem_ranges;
#else
#define BITS_PER_PTE_ENTRY 2
#define BITS_PER_PMD_ENTRY 2
-#define BITS_PER_PGD_ENTRY BITS_PER_PMD_ENTRY
+#define BITS_PER_PGD_ENTRY 2
#endif
#define PGD_ENTRY_SIZE (1UL << BITS_PER_PGD_ENTRY)
#define PMD_ENTRY_SIZE (1UL << BITS_PER_PMD_ENTRY)
diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
index a6482b2ce0ea..dda557085311 100644
--- a/arch/parisc/include/asm/pgalloc.h
+++ b/arch/parisc/include/asm/pgalloc.h
@@ -15,47 +15,23 @@
#define __HAVE_ARCH_PGD_FREE
#include <asm-generic/pgalloc.h>
-/* Allocate the top level pgd (page directory)
- *
- * Here (for 64 bit kernels) we implement a Hybrid L2/L3 scheme: we
- * allocate the first pmd adjacent to the pgd. This means that we can
- * subtract a constant offset to get to it. The pmd and pgd sizes are
- * arranged so that a single pmd covers 4GB (giving a full 64-bit
- * process access to 8TB) so our lookups are effectively L2 for the
- * first 4GB of the kernel (i.e. for all ILP32 processes and all the
- * kernel for machines with under 4GB of memory) */
+/* Allocate the top level pgd (page directory) */
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
- pgd_t *pgd = (pgd_t *)__get_free_pages(GFP_KERNEL,
- PGD_ALLOC_ORDER);
- pgd_t *actual_pgd = pgd;
+ pgd_t *pgd;
- if (likely(pgd != NULL)) {
- memset(pgd, 0, PAGE_SIZE<<PGD_ALLOC_ORDER);
-#if CONFIG_PGTABLE_LEVELS == 3
- actual_pgd += PTRS_PER_PGD;
- /* Populate first pmd with allocated memory. We mark it
- * with PxD_FLAG_ATTACHED as a signal to the system that this
- * pmd entry may not be cleared. */
- set_pgd(actual_pgd, __pgd((PxD_FLAG_PRESENT |
- PxD_FLAG_VALID |
- PxD_FLAG_ATTACHED)
- + (__u32)(__pa((unsigned long)pgd) >> PxD_VALUE_SHIFT)));
- /* The first pmd entry also is marked with PxD_FLAG_ATTACHED as
- * a signal that this pmd may not be freed */
- set_pgd(pgd, __pgd(PxD_FLAG_ATTACHED));
-#endif
- }
- spin_lock_init(pgd_spinlock(actual_pgd));
- return actual_pgd;
+ pgd = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER);
+ if (unlikely(pgd == NULL))
+ return NULL;
+
+ memset(pgd, 0, PAGE_SIZE << PGD_ORDER);
+
+ return pgd;
}
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
-#if CONFIG_PGTABLE_LEVELS == 3
- pgd -= PTRS_PER_PGD;
-#endif
- free_pages((unsigned long)pgd, PGD_ALLOC_ORDER);
+ free_pages((unsigned long)pgd, PGD_ORDER);
}
#if CONFIG_PGTABLE_LEVELS == 3
@@ -70,41 +46,25 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
{
- return (pmd_t *)__get_free_pages(GFP_PGTABLE_KERNEL, PMD_ORDER);
+ pmd_t *pmd;
+
+ pmd = (pmd_t *)__get_free_pages(GFP_PGTABLE_KERNEL, PMD_ORDER);
+ if (likely(pmd))
+ memset ((void *)pmd, 0, PAGE_SIZE << PMD_ORDER);
+ return pmd;
}
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
- if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED) {
- /*
- * This is the permanent pmd attached to the pgd;
- * cannot free it.
- * Increment the counter to compensate for the decrement
- * done by generic mm code.
- */
- mm_inc_nr_pmds(mm);
- return;
- }
free_pages((unsigned long)pmd, PMD_ORDER);
}
-
#endif
static inline void
pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
{
-#if CONFIG_PGTABLE_LEVELS == 3
- /* preserve the gateway marker if this is the beginning of
- * the permanent pmd */
- if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
- set_pmd(pmd, __pmd((PxD_FLAG_PRESENT |
- PxD_FLAG_VALID |
- PxD_FLAG_ATTACHED)
- + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT)));
- else
-#endif
- set_pmd(pmd, __pmd((PxD_FLAG_PRESENT | PxD_FLAG_VALID)
- + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT)));
+ set_pmd(pmd, __pmd((PxD_FLAG_PRESENT | PxD_FLAG_VALID)
+ + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT)));
}
#define pmd_populate(mm, pmd, pte_page) \
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index 75cf84070fc9..39017210dbf0 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -23,8 +23,6 @@
#include <asm/processor.h>
#include <asm/cache.h>
-static inline spinlock_t *pgd_spinlock(pgd_t *);
-
/*
* kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
* memory. For the return value to be meaningful, ADDR must be >=
@@ -42,12 +40,8 @@ static inline spinlock_t *pgd_spinlock(pgd_t *);
/* This is for the serialization of PxTLB broadcasts. At least on the N class
* systems, only one PxTLB inter processor broadcast can be active at any one
- * time on the Merced bus.
-
- * PTE updates are protected by locks in the PMD.
- */
+ * time on the Merced bus. */
extern spinlock_t pa_tlb_flush_lock;
-extern spinlock_t pa_swapper_pg_lock;
#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
extern int pa_serialize_tlb_flushes;
#else
@@ -86,18 +80,16 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
* within a page table are directly modified. Thus, the following
* hook is made available.
*/
-#define set_pte(pteptr, pteval) \
- do{ \
- *(pteptr) = (pteval); \
- } while(0)
-
-#define set_pte_at(mm, addr, ptep, pteval) \
- do { \
- unsigned long flags; \
- spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags);\
- set_pte(ptep, pteval); \
- purge_tlb_entries(mm, addr); \
- spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags);\
+#define set_pte(pteptr, pteval) \
+ do { \
+ *(pteptr) = (pteval); \
+ barrier(); \
+ } while(0)
+
+#define set_pte_at(mm, addr, pteptr, pteval) \
+ do { \
+ *(pteptr) = (pteval); \
+ purge_tlb_entries(mm, addr); \
} while (0)
#endif /* !__ASSEMBLY__ */
@@ -120,12 +112,10 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
#define KERNEL_INITIAL_SIZE (1 << KERNEL_INITIAL_ORDER)
#if CONFIG_PGTABLE_LEVELS == 3
-#define PGD_ORDER 1 /* Number of pages per pgd */
-#define PMD_ORDER 1 /* Number of pages per pmd */
-#define PGD_ALLOC_ORDER (2 + 1) /* first pgd contains pmd */
+#define PMD_ORDER 1
+#define PGD_ORDER 0
#else
-#define PGD_ORDER 1 /* Number of pages per pgd */
-#define PGD_ALLOC_ORDER (PGD_ORDER + 1)
+#define PGD_ORDER 1
#endif
/* Definitions for 3rd level (we use PLD here for Page Lower directory
@@ -240,11 +230,9 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
* able to effectively address 40/42/44-bits of physical address space
* depending on 4k/16k/64k PAGE_SIZE */
#define _PxD_PRESENT_BIT 31
-#define _PxD_ATTACHED_BIT 30
-#define _PxD_VALID_BIT 29
+#define _PxD_VALID_BIT 30
#define PxD_FLAG_PRESENT (1 << xlate_pabit(_PxD_PRESENT_BIT))
-#define PxD_FLAG_ATTACHED (1 << xlate_pabit(_PxD_ATTACHED_BIT))
#define PxD_FLAG_VALID (1 << xlate_pabit(_PxD_VALID_BIT))
#define PxD_FLAG_MASK (0xf)
#define PxD_FLAG_SHIFT (4)
@@ -326,23 +314,10 @@ extern unsigned long *empty_zero_page;
#define pgd_flag(x) (pgd_val(x) & PxD_FLAG_MASK)
#define pgd_address(x) ((unsigned long)(pgd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
-#if CONFIG_PGTABLE_LEVELS == 3
-/* The first entry of the permanent pmd is not there if it contains
- * the gateway marker */
-#define pmd_none(x) (!pmd_val(x) || pmd_flag(x) == PxD_FLAG_ATTACHED)
-#else
#define pmd_none(x) (!pmd_val(x))
-#endif
#define pmd_bad(x) (!(pmd_flag(x) & PxD_FLAG_VALID))
#define pmd_present(x) (pmd_flag(x) & PxD_FLAG_PRESENT)
static inline void pmd_clear(pmd_t *pmd) {
-#if CONFIG_PGTABLE_LEVELS == 3
- if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
- /* This is the entry pointing to the permanent pmd
- * attached to the pgd; cannot clear it */
- set_pmd(pmd, __pmd(PxD_FLAG_ATTACHED));
- else
-#endif
set_pmd(pmd, __pmd(0));
}
@@ -358,12 +333,6 @@ static inline void pmd_clear(pmd_t *pmd) {
#define pud_bad(x) (!(pud_flag(x) & PxD_FLAG_VALID))
#define pud_present(x) (pud_flag(x) & PxD_FLAG_PRESENT)
static inline void pud_clear(pud_t *pud) {
-#if CONFIG_PGTABLE_LEVELS == 3
- if(pud_flag(*pud) & PxD_FLAG_ATTACHED)
- /* This is the permanent pmd attached to the pud; cannot
- * free it */
- return;
-#endif
set_pud(pud, __pud(0));
}
#endif
@@ -456,32 +425,18 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
-
-static inline spinlock_t *pgd_spinlock(pgd_t *pgd)
-{
- if (unlikely(pgd == swapper_pg_dir))
- return &pa_swapper_pg_lock;
- return (spinlock_t *)((char *)pgd + (PAGE_SIZE << (PGD_ALLOC_ORDER - 1)));
-}
-
-
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
{
pte_t pte;
- unsigned long flags;
if (!pte_young(*ptep))
return 0;
- spin_lock_irqsave(pgd_spinlock(vma->vm_mm->pgd), flags);
pte = *ptep;
if (!pte_young(pte)) {
- spin_unlock_irqrestore(pgd_spinlock(vma->vm_mm->pgd), flags);
return 0;
}
- set_pte(ptep, pte_mkold(pte));
- purge_tlb_entries(vma->vm_mm, addr);
- spin_unlock_irqrestore(pgd_spinlock(vma->vm_mm->pgd), flags);
+ set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte));
return 1;
}
@@ -489,24 +444,16 @@ struct mm_struct;
static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
pte_t old_pte;
- unsigned long flags;
- spin_lock_irqsave(pgd_spinlock(mm->pgd), flags);
old_pte = *ptep;
- set_pte(ptep, __pte(0));
- purge_tlb_entries(mm, addr);
- spin_unlock_irqrestore(pgd_spinlock(mm->pgd), flags);
+ set_pte_at(mm, addr, ptep, __pte(0));
return old_pte;
}
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
- unsigned long flags;
- spin_lock_irqsave(pgd_spinlock(mm->pgd), flags);
- set_pte(ptep, pte_wrprotect(*ptep));
- purge_tlb_entries(mm, addr);
- spin_unlock_irqrestore(pgd_spinlock(mm->pgd), flags);
+ set_pte_at(mm, addr, ptep, pte_wrprotect(*ptep));
}
#define pte_same(A,B) (pte_val(A) == pte_val(B))
diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c
index 305768a40773..cd2cc1b1648c 100644
--- a/arch/parisc/kernel/asm-offsets.c
+++ b/arch/parisc/kernel/asm-offsets.c
@@ -268,7 +268,6 @@ int main(void)
DEFINE(ASM_BITS_PER_PGD, BITS_PER_PGD);
DEFINE(ASM_BITS_PER_PMD, BITS_PER_PMD);
DEFINE(ASM_BITS_PER_PTE, BITS_PER_PTE);
- DEFINE(ASM_PGD_PMD_OFFSET, -(PAGE_SIZE << PGD_ORDER));
DEFINE(ASM_PMD_ENTRY, ((PAGE_OFFSET & PMD_MASK) >> PMD_SHIFT));
DEFINE(ASM_PGD_ENTRY, PAGE_OFFSET >> PGDIR_SHIFT);
DEFINE(ASM_PGD_ENTRY_SIZE, PGD_ENTRY_SIZE);
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index beba9816cc6c..9f939afe6b88 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -35,10 +35,9 @@
.level 2.0
#endif
- .import pa_tlb_lock,data
- .macro load_pa_tlb_lock reg
- mfctl %cr25,\reg
- addil L%(PAGE_SIZE << (PGD_ALLOC_ORDER - 1)),\reg
+ /* Get aligned page_table_lock address for this mm from cr28/tr4 */
+ .macro get_ptl reg
+ mfctl %cr28,\reg
.endm
/* space_to_prot macro creates a prot id from a space id */
@@ -407,7 +406,9 @@
# endif
#endif
dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
+#if CONFIG_PGTABLE_LEVELS < 3
copy %r0,\pte
+#endif
ldw,s \index(\pmd),\pmd
bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault
dep %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
@@ -417,38 +418,23 @@
shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */
.endm
- /* Look up PTE in a 3-Level scheme.
- *
- * Here we implement a Hybrid L2/L3 scheme: we allocate the
- * first pmd adjacent to the pgd. This means that we can
- * subtract a constant offset to get to it. The pmd and pgd
- * sizes are arranged so that a single pmd covers 4GB (giving
- * a full LP64 process access to 8TB) so our lookups are
- * effectively L2 for the first 4GB of the kernel (i.e. for
- * all ILP32 processes and all the kernel for machines with
- * under 4GB of memory) */
+ /* Look up PTE in a 3-Level scheme. */
.macro L3_ptep pgd,pte,index,va,fault
-#if CONFIG_PGTABLE_LEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */
+#if CONFIG_PGTABLE_LEVELS == 3
+ copy %r0,\pte
extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
- extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
ldw,s \index(\pgd),\pgd
- extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
bb,>=,n \pgd,_PxD_PRESENT_BIT,\fault
- extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
- shld \pgd,PxD_VALUE_SHIFT,\index
- extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
- copy \index,\pgd
- extrd,u,*<> \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
- ldo ASM_PGD_PMD_OFFSET(\pgd),\pgd
+ shld \pgd,PxD_VALUE_SHIFT,\pgd
#endif
L2_ptep \pgd,\pte,\index,\va,\fault
.endm
- /* Acquire pa_tlb_lock lock and check page is present. */
- .macro tlb_lock spc,ptp,pte,tmp,tmp1,fault
-#ifdef CONFIG_SMP
+ /* Acquire page_table_lock and check page is present. */
+ .macro ptl_lock spc,ptp,pte,tmp,tmp1,fault
+#ifdef CONFIG_TLB_PTLOCK
98: cmpib,COND(=),n 0,\spc,2f
- load_pa_tlb_lock \tmp
+ get_ptl \tmp
1: LDCW 0(\tmp),\tmp1
cmpib,COND(=) 0,\tmp1,1b
nop
@@ -463,26 +449,26 @@
3:
.endm
- /* Release pa_tlb_lock lock without reloading lock address.
+ /* Release page_table_lock without reloading lock address.
Note that the values in the register spc are limited to
NR_SPACE_IDS (262144). Thus, the stw instruction always
stores a nonzero value even when register spc is 64 bits.
We use an ordered store to ensure all prior accesses are
performed prior to releasing the lock. */
- .macro tlb_unlock0 spc,tmp
-#ifdef CONFIG_SMP
+ .macro ptl_unlock0 spc,tmp
+#ifdef CONFIG_TLB_PTLOCK
98: or,COND(=) %r0,\spc,%r0
stw,ma \spc,0(\tmp)
99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
#endif
.endm
- /* Release pa_tlb_lock lock. */
- .macro tlb_unlock1 spc,tmp
-#ifdef CONFIG_SMP
-98: load_pa_tlb_lock \tmp
+ /* Release page_table_lock. */
+ .macro ptl_unlock1 spc,tmp
+#ifdef CONFIG_TLB_PTLOCK
+98: get_ptl \tmp
+ ptl_unlock0 \spc,\tmp
99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
- tlb_unlock0 \spc,\tmp
#endif
.endm
@@ -997,10 +983,17 @@ intr_do_preempt:
bb,<,n %r20, 31 - PSW_SM_I, intr_restore
nop
+ /* ssm PSW_SM_I done later in intr_restore */
+#ifdef CONFIG_MLONGCALLS
+ ldil L%intr_restore, %r2
+ load32 preempt_schedule_irq, %r1
+ bv %r0(%r1)
+ ldo R%intr_restore(%r2), %r2
+#else
+ ldil L%intr_restore, %r1
BL preempt_schedule_irq, %r2
- nop
-
- b,n intr_restore /* ssm PSW_SM_I done by intr_restore */
+ ldo R%intr_restore(%r1), %r2
+#endif
#endif /* CONFIG_PREEMPTION */
/*
@@ -1158,14 +1151,14 @@ dtlb_miss_20w:
L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w
- tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20w
+ ptl_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20w
update_accessed ptp,pte,t0,t1
make_insert_tlb spc,pte,prot,t1
idtlbt pte,prot
- tlb_unlock1 spc,t0
+ ptl_unlock1 spc,t0
rfir
nop
@@ -1184,14 +1177,14 @@ nadtlb_miss_20w:
L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w
- tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20w
+ ptl_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20w
update_accessed ptp,pte,t0,t1
make_insert_tlb spc,pte,prot,t1
idtlbt pte,prot
- tlb_unlock1 spc,t0
+ ptl_unlock1 spc,t0
rfir
nop
@@ -1212,7 +1205,7 @@ dtlb_miss_11:
L2_ptep ptp,pte,t0,va,dtlb_check_alias_11
- tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_11
+ ptl_lock spc,ptp,pte,t0,t1,dtlb_check_alias_11
update_accessed ptp,pte,t0,t1
make_insert_tlb_11 spc,pte,prot
@@ -1225,7 +1218,7 @@ dtlb_miss_11:
mtsp t1, %sr1 /* Restore sr1 */
- tlb_unlock1 spc,t0
+ ptl_unlock1 spc,t0
rfir
nop
@@ -1245,7 +1238,7 @@ nadtlb_miss_11:
L2_ptep ptp,pte,t0,va,nadtlb_check_alias_11
- tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_11
+ ptl_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_11
update_accessed ptp,pte,t0,t1
make_insert_tlb_11 spc,pte,prot
@@ -1258,7 +1251,7 @@ nadtlb_miss_11:
mtsp t1, %sr1 /* Restore sr1 */
- tlb_unlock1 spc,t0
+ ptl_unlock1 spc,t0
rfir
nop
@@ -1278,7 +1271,7 @@ dtlb_miss_20:
L2_ptep ptp,pte,t0,va,dtlb_check_alias_20
- tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20
+ ptl_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20
update_accessed ptp,pte,t0,t1
make_insert_tlb spc,pte,prot,t1
@@ -1287,7 +1280,7 @@ dtlb_miss_20:
idtlbt pte,prot
- tlb_unlock1 spc,t0
+ ptl_unlock1 spc,t0
rfir
nop
@@ -1306,7 +1299,7 @@ nadtlb_miss_20:
L2_ptep ptp,pte,t0,va,nadtlb_check_alias_20
- tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20
+ ptl_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20
update_accessed ptp,pte,t0,t1
make_insert_tlb spc,pte,prot,t1
@@ -1315,7 +1308,7 @@ nadtlb_miss_20:
idtlbt pte,prot
- tlb_unlock1 spc,t0
+ ptl_unlock1 spc,t0
rfir
nop
@@ -1415,14 +1408,14 @@ itlb_miss_20w:
L3_ptep ptp,pte,t0,va,itlb_fault
- tlb_lock spc,ptp,pte,t0,t1,itlb_fault
+ ptl_lock spc,ptp,pte,t0,t1,itlb_fault
update_accessed ptp,pte,t0,t1
make_insert_tlb spc,pte,prot,t1
iitlbt pte,prot
- tlb_unlock1 spc,t0
+ ptl_unlock1 spc,t0
rfir
nop
@@ -1439,14 +1432,14 @@ naitlb_miss_20w:
L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w
- tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20w
+ ptl_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20w
update_accessed ptp,pte,t0,t1
make_insert_tlb spc,pte,prot,t1
iitlbt pte,prot
- tlb_unlock1 spc,t0
+ ptl_unlock1 spc,t0
rfir
nop
@@ -1467,7 +1460,7 @@ itlb_miss_11:
L2_ptep ptp,pte,t0,va,itlb_fault
- tlb_lock spc,ptp,pte,t0,t1,itlb_fault
+ ptl_lock spc,ptp,pte,t0,t1,itlb_fault
update_accessed ptp,pte,t0,t1
make_insert_tlb_11 spc,pte,prot
@@ -1480,7 +1473,7 @@ itlb_miss_11:
mtsp t1, %sr1 /* Restore sr1 */
- tlb_unlock1 spc,t0
+ ptl_unlock1 spc,t0
rfir
nop
@@ -1491,7 +1484,7 @@ naitlb_miss_11:
L2_ptep ptp,pte,t0,va,naitlb_check_alias_11
- tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_11
+ ptl_lock spc,ptp,pte,t0,t1,naitlb_check_alias_11
update_accessed ptp,pte,t0,t1
make_insert_tlb_11 spc,pte,prot
@@ -1504,7 +1497,7 @@ naitlb_miss_11:
mtsp t1, %sr1 /* Restore sr1 */
- tlb_unlock1 spc,t0
+ ptl_unlock1 spc,t0
rfir
nop
@@ -1525,7 +1518,7 @@ itlb_miss_20:
L2_ptep ptp,pte,t0,va,itlb_fault
- tlb_lock spc,ptp,pte,t0,t1,itlb_fault
+ ptl_lock spc,ptp,pte,t0,t1,itlb_fault
update_accessed ptp,pte,t0,t1
make_insert_tlb spc,pte,prot,t1
@@ -1534,7 +1527,7 @@ itlb_miss_20:
iitlbt pte,prot
- tlb_unlock1 spc,t0
+ ptl_unlock1 spc,t0
rfir
nop
@@ -1545,7 +1538,7 @@ naitlb_miss_20:
L2_ptep ptp,pte,t0,va,naitlb_check_alias_20
- tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20
+ ptl_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20
update_accessed ptp,pte,t0,t1
make_insert_tlb spc,pte,prot,t1
@@ -1554,7 +1547,7 @@ naitlb_miss_20:
iitlbt pte,prot
- tlb_unlock1 spc,t0
+ ptl_unlock1 spc,t0
rfir
nop
@@ -1577,14 +1570,14 @@ dbit_trap_20w:
L3_ptep ptp,pte,t0,va,dbit_fault
- tlb_lock spc,ptp,pte,t0,t1,dbit_fault
+ ptl_lock spc,ptp,pte,t0,t1,dbit_fault
update_dirty ptp,pte,t1
make_insert_tlb spc,pte,prot,t1
idtlbt pte,prot
- tlb_unlock0 spc,t0
+ ptl_unlock0 spc,t0
rfir
nop
#else
@@ -1597,7 +1590,7 @@ dbit_trap_11:
L2_ptep ptp,pte,t0,va,dbit_fault
- tlb_lock spc,ptp,pte,t0,t1,dbit_fault
+ ptl_lock spc,ptp,pte,t0,t1,dbit_fault
update_dirty ptp,pte,t1
make_insert_tlb_11 spc,pte,prot
@@ -1610,7 +1603,7 @@ dbit_trap_11:
mtsp t1, %sr1 /* Restore sr1 */
- tlb_unlock0 spc,t0
+ ptl_unlock0 spc,t0
rfir
nop
@@ -1621,7 +1614,7 @@ dbit_trap_20:
L2_ptep ptp,pte,t0,va,dbit_fault
- tlb_lock spc,ptp,pte,t0,t1,dbit_fault
+ ptl_lock spc,ptp,pte,t0,t1,dbit_fault
update_dirty ptp,pte,t1
make_insert_tlb spc,pte,prot,t1
@@ -1630,7 +1623,7 @@ dbit_trap_20:
idtlbt pte,prot
- tlb_unlock0 spc,t0
+ ptl_unlock0 spc,t0
rfir
nop
#endif
diff --git a/arch/parisc/kernel/hpmc.S b/arch/parisc/kernel/hpmc.S
index 81de5e2b391c..c2981401775c 100644
--- a/arch/parisc/kernel/hpmc.S
+++ b/arch/parisc/kernel/hpmc.S
@@ -289,13 +289,3 @@ os_hpmc_6:
b .
nop
.align 16 /* make function length multiple of 16 bytes */
-.os_hpmc_end:
-
-
- __INITRODATA
-.globl os_hpmc_size
- .align 4
- .type os_hpmc_size, @object
- .size os_hpmc_size, 4
-os_hpmc_size:
- .word .os_hpmc_end-.os_hpmc
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index 49cd6d2caefb..0d46b19dc4d3 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -17,6 +17,7 @@
#include <linux/types.h>
#include <asm/io.h>
+#include <asm/softirq_stack.h>
#include <asm/smp.h>
#include <asm/ldcw.h>
@@ -373,7 +374,11 @@ static inline int eirr_to_irq(unsigned long eirr)
/*
* IRQ STACK - used for irq handler
*/
+#ifdef CONFIG_64BIT
+#define IRQ_STACK_SIZE (4096 << 4) /* 64k irq stack size */
+#else
#define IRQ_STACK_SIZE (4096 << 3) /* 32k irq stack size */
+#endif
union irq_stack_union {
unsigned long stack[IRQ_STACK_SIZE/sizeof(unsigned long)];
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
index a92a23d6acd9..b144fbe29bc1 100644
--- a/arch/parisc/kernel/process.c
+++ b/arch/parisc/kernel/process.c
@@ -200,7 +200,7 @@ copy_thread(unsigned long clone_flags, unsigned long usp,
extern void * const ret_from_kernel_thread;
extern void * const child_return;
- if (unlikely(p->flags & PF_KTHREAD)) {
+ if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
/* kernel thread */
memset(cregs, 0, sizeof(struct pt_regs));
if (!usp) /* idle thread */
@@ -260,6 +260,8 @@ get_wchan(struct task_struct *p)
do {
if (unwind_once(&info) < 0)
return 0;
+ if (p->state == TASK_RUNNING)
+ return 0;
ip = info.ip;
if (!in_sched_functions(ip))
return ip;
diff --git a/arch/parisc/kernel/syscalls/Makefile b/arch/parisc/kernel/syscalls/Makefile
index c22a21c39f30..283f64407b07 100644
--- a/arch/parisc/kernel/syscalls/Makefile
+++ b/arch/parisc/kernel/syscalls/Makefile
@@ -5,7 +5,7 @@ uapi := arch/$(SRCARCH)/include/generated/uapi/asm
_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \
$(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
-syscall := $(srctree)/$(src)/syscall.tbl
+syscall := $(src)/syscall.tbl
syshdr := $(srctree)/$(src)/syscallhdr.sh
systbl := $(srctree)/$(src)/syscalltbl.sh
@@ -22,24 +22,24 @@ quiet_cmd_systbl = SYSTBL $@
'$(systbl_offset_$(basetarget))'
syshdr_abis_unistd_32 := common,32
-$(uapi)/unistd_32.h: $(syscall) $(syshdr)
+$(uapi)/unistd_32.h: $(syscall) $(syshdr) FORCE
$(call if_changed,syshdr)
syshdr_abis_unistd_64 := common,64
-$(uapi)/unistd_64.h: $(syscall) $(syshdr)
+$(uapi)/unistd_64.h: $(syscall) $(syshdr) FORCE
$(call if_changed,syshdr)
systbl_abis_syscall_table_32 := common,32
-$(kapi)/syscall_table_32.h: $(syscall) $(systbl)
+$(kapi)/syscall_table_32.h: $(syscall) $(systbl) FORCE
$(call if_changed,systbl)
systbl_abis_syscall_table_64 := common,64
-$(kapi)/syscall_table_64.h: $(syscall) $(systbl)
+$(kapi)/syscall_table_64.h: $(syscall) $(systbl) FORCE
$(call if_changed,systbl)
systbl_abis_syscall_table_c32 := common,32
systbl_abi_syscall_table_c32 := c32
-$(kapi)/syscall_table_c32.h: $(syscall) $(systbl)
+$(kapi)/syscall_table_c32.h: $(syscall) $(systbl) FORCE
$(call if_changed,systbl)
uapisyshdr-y += unistd_32.h unistd_64.h
@@ -47,9 +47,10 @@ kapisyshdr-y += syscall_table_32.h \
syscall_table_64.h \
syscall_table_c32.h
-targets += $(uapisyshdr-y) $(kapisyshdr-y)
+uapisyshdr-y := $(addprefix $(uapi)/, $(uapisyshdr-y))
+kapisyshdr-y := $(addprefix $(kapi)/, $(kapisyshdr-y))
+targets += $(addprefix ../../../../, $(uapisyshdr-y) $(kapisyshdr-y))
PHONY += all
-all: $(addprefix $(uapi)/,$(uapisyshdr-y))
-all: $(addprefix $(kapi)/,$(kapisyshdr-y))
+all: $(uapisyshdr-y) $(kapisyshdr-y)
@:
diff --git a/arch/parisc/kernel/syscalls/syscall.tbl b/arch/parisc/kernel/syscalls/syscall.tbl
index 6bcc31966b44..271a92519683 100644
--- a/arch/parisc/kernel/syscalls/syscall.tbl
+++ b/arch/parisc/kernel/syscalls/syscall.tbl
@@ -439,3 +439,4 @@
439 common faccessat2 sys_faccessat2
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2
+442 common mount_setattr sys_mount_setattr
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index a52c7abf2ca4..8d8441d4562a 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -798,14 +798,13 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
void __init initialize_ivt(const void *iva)
{
- extern u32 os_hpmc_size;
extern const u32 os_hpmc[];
int i;
u32 check = 0;
u32 *ivap;
u32 *hpmcp;
- u32 length, instr;
+ u32 instr;
if (strcmp((const char *)iva, "cows can fly"))
panic("IVT invalid");
@@ -836,18 +835,14 @@ void __init initialize_ivt(const void *iva)
/* Setup IVA and compute checksum for HPMC handler */
ivap[6] = (u32)__pa(os_hpmc);
- length = os_hpmc_size;
- ivap[7] = length;
hpmcp = (u32 *)os_hpmc;
- for (i=0; i<length/4; i++)
- check += *hpmcp++;
-
for (i=0; i<8; i++)
check += ivap[i];
ivap[5] = -check;
+ pr_debug("initialize_ivt: IVA[6] = 0x%08x\n", ivap[6]);
}
diff --git a/arch/parisc/mm/hugetlbpage.c b/arch/parisc/mm/hugetlbpage.c
index d7ba014a7fbb..43652de5f139 100644
--- a/arch/parisc/mm/hugetlbpage.c
+++ b/arch/parisc/mm/hugetlbpage.c
@@ -142,24 +142,17 @@ static void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t entry)
{
- unsigned long flags;
-
- spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags);
__set_huge_pte_at(mm, addr, ptep, entry);
- spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags);
}
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
- unsigned long flags;
pte_t entry;
- spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags);
entry = *ptep;
__set_huge_pte_at(mm, addr, ptep, __pte(0));
- spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags);
return entry;
}
@@ -168,29 +161,23 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
- unsigned long flags;
pte_t old_pte;
- spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags);
old_pte = *ptep;
__set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
- spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags);
}
int huge_ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t pte, int dirty)
{
- unsigned long flags;
int changed;
struct mm_struct *mm = vma->vm_mm;
- spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags);
changed = !pte_same(*ptep, pte);
if (changed) {
__set_huge_pte_at(mm, addr, ptep, pte);
}
- spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags);
return changed;
}
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index 3ec633b11b54..9ca4e4ff6895 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -37,11 +37,6 @@ extern int data_start;
extern void parisc_kernel_start(void); /* Kernel entry point in head.S */
#if CONFIG_PGTABLE_LEVELS == 3
-/* NOTE: This layout exactly conforms to the hybrid L2/L3 page table layout
- * with the first pmd adjacent to the pgd and below it. gcc doesn't actually
- * guarantee that global objects will be laid out in memory in the same order
- * as the order of declaration, so put these in different sections and use
- * the linker script to order them. */
pmd_t pmd0[PTRS_PER_PMD] __section(".data..vm0.pmd") __attribute__ ((aligned(PAGE_SIZE)));
#endif
@@ -559,6 +554,11 @@ void __init mem_init(void)
BUILD_BUG_ON(PGD_ENTRY_SIZE != sizeof(pgd_t));
BUILD_BUG_ON(PAGE_SHIFT + BITS_PER_PTE + BITS_PER_PMD + BITS_PER_PGD
> BITS_PER_LONG);
+#if CONFIG_PGTABLE_LEVELS == 3
+ BUILD_BUG_ON(PT_INITIAL > PTRS_PER_PMD);
+#else
+ BUILD_BUG_ON(PT_INITIAL > PTRS_PER_PGD);
+#endif
high_memory = __va((max_pfn << PAGE_SHIFT));
set_max_mapnr(max_low_pfn);
diff --git a/arch/parisc/oprofile/Makefile b/arch/parisc/oprofile/Makefile
deleted file mode 100644
index 86a1ccc328eb..000000000000
--- a/arch/parisc/oprofile/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_OPROFILE) += oprofile.o
-
-DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
- oprof.o cpu_buffer.o buffer_sync.o \
- event_buffer.o oprofile_files.o \
- oprofilefs.o oprofile_stats.o \
- timer_int.o )
-
-oprofile-y := $(DRIVER_OBJS) init.o
diff --git a/arch/parisc/oprofile/init.c b/arch/parisc/oprofile/init.c
deleted file mode 100644
index 026cba2af07a..000000000000
--- a/arch/parisc/oprofile/init.c
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * @file init.c
- *
- * @remark Copyright 2002 OProfile authors
- * @remark Read the file COPYING
- *
- * @author John Levon <levon@movementarian.org>
- */
-
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/oprofile.h>
-
-int __init oprofile_arch_init(struct oprofile_operations *ops)
-{
- return -ENODEV;
-}
-
-
-void oprofile_arch_exit(void)
-{
-}
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 107bb4319e0e..386ae12d8523 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -196,7 +196,6 @@ config PPC
select HAVE_STACKPROTECTOR if PPC64 && $(cc-option,-mstack-protector-guard=tls -mstack-protector-guard-reg=r13)
select HAVE_STACKPROTECTOR if PPC32 && $(cc-option,-mstack-protector-guard=tls -mstack-protector-guard-reg=r2)
select HAVE_CONTEXT_TRACKING if PPC64
- select HAVE_TIF_NOHZ if PPC64
select HAVE_DEBUG_KMEMLEAK
select HAVE_DEBUG_STACKOVERFLOW
select HAVE_DYNAMIC_FTRACE
@@ -226,7 +225,6 @@ config PPC
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI if PERF_EVENTS || (PPC64 && PPC_BOOK3S)
select HAVE_HARDLOCKUP_DETECTOR_ARCH if (PPC64 && PPC_BOOK3S)
- select HAVE_OPROFILE
select HAVE_OPTPROBES if PPC64
select HAVE_PERF_EVENTS
select HAVE_PERF_EVENTS_NMI if PPC64
@@ -237,6 +235,7 @@ config PPC
select MMU_GATHER_PAGE_SIZE
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RELIABLE_STACKTRACE if PPC_BOOK3S_64 && CPU_LITTLE_ENDIAN
+ select HAVE_SOFTIRQ_ON_OWN_STACK
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_VIRT_CPU_ACCOUNTING
select HAVE_IRQ_TIME_ACCOUNTING
@@ -282,7 +281,6 @@ config COMPAT
bool "Enable support for 32bit binaries"
depends on PPC64
default y if !CPU_LITTLE_ENDIAN
- select COMPAT_BINFMT_ELF
select ARCH_WANT_OLD_COMPAT_IPC
select COMPAT_OLD_SIGACTION
@@ -505,18 +503,14 @@ config HOTPLUG_CPU
Say N if you are unsure.
config PPC_QUEUED_SPINLOCKS
- bool "Queued spinlocks"
+ bool "Queued spinlocks" if EXPERT
depends on SMP
+ default PPC_BOOK3S_64
help
Say Y here to use queued spinlocks which give better scalability and
fairness on large SMP and NUMA systems without harming single threaded
performance.
- This option is currently experimental, the code is more complex and
- less tested so it defaults to "N" for the moment.
-
- If unsure, say "N".
-
config ARCH_CPU_PROBE_RELEASE
def_bool y
depends on HOTPLUG_CPU
@@ -720,18 +714,6 @@ config ARCH_MEMORY_PROBE
def_bool y
depends on MEMORY_HOTPLUG
-config STDBINUTILS
- bool "Using standard binutils settings"
- depends on 44x
- default y
- help
- Turning this option off allows you to select 256KB PAGE_SIZE on 44x.
- Note, that kernel will be able to run only those applications,
- which had been compiled using binutils later than 2.17.50.0.3 with
- '-zmax-page-size' set to 256K (the default is 64K). Or, if using
- the older binutils, you can patch them with a trivial patch, which
- changes the ELF_MAXPAGESIZE definition from 0x10000 to 0x40000.
-
choice
prompt "Page size"
default PPC_4K_PAGES
@@ -771,17 +753,15 @@ config PPC_64K_PAGES
select HAVE_ARCH_SOFT_DIRTY if PPC_BOOK3S_64
config PPC_256K_PAGES
- bool "256k page size"
- depends on 44x && !STDBINUTILS
+ bool "256k page size (Requires non-standard binutils settings)"
+ depends on 44x && !PPC_47x
help
Make the page size 256k.
- As the ELF standard only requires alignment to support page
- sizes up to 64k, you will need to compile all of your user
- space applications with a non-standard binutils settings
- (see the STDBINUTILS description for details).
-
- Say N unless you know what you are doing.
+ The kernel will only be able to run applications that have been
+ compiled with '-zmax-page-size' set to 256K (the default is 64K) using
+ binutils later than 2.17.50.0.3, or by patching the ELF_MAXPAGESIZE
+ definition from 0x10000 to 0x40000 in older versions.
endchoice
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index b88900f4832f..ae084357994e 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -88,6 +88,7 @@ config PPC_IRQ_SOFT_MASK_DEBUG
config XMON
bool "Include xmon kernel debugger"
depends on DEBUG_KERNEL
+ select CONSOLE_POLL if SERIAL_CPM_CONSOLE
help
Include in-kernel hooks for the xmon kernel monitor/debugger.
Unless you are intending to debug the kernel, say N here.
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 08cf0eade56a..5f8544cf724a 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -65,7 +65,7 @@ UTS_MACHINE := $(subst $(space),,$(machine-y))
ifdef CONFIG_PPC32
KBUILD_LDFLAGS_MODULE += arch/powerpc/lib/crtsavres.o
else
-ifeq ($(call ld-ifversion, -ge, 225000000, y),y)
+ifeq ($(call ld-ifversion, -ge, 22500, y),y)
# Have the linker provide sfpr if possible.
# There is a corresponding test in arch/powerpc/lib/Makefile
KBUILD_LDFLAGS_MODULE += --save-restore-funcs
@@ -276,8 +276,6 @@ head-$(CONFIG_PPC_OF_BOOT_TRAMPOLINE) += arch/powerpc/kernel/prom_init.o
# See arch/powerpc/Kbuild for content of core part of the kernel
core-y += arch/powerpc/
-drivers-$(CONFIG_OPROFILE) += arch/powerpc/oprofile/
-
# Default to zImage, override when needed
all: zImage
diff --git a/arch/powerpc/configs/44x/akebono_defconfig b/arch/powerpc/configs/44x/akebono_defconfig
index 3894ba8f8ffc..4bc549c6edc5 100644
--- a/arch/powerpc/configs/44x/akebono_defconfig
+++ b/arch/powerpc/configs/44x/akebono_defconfig
@@ -8,7 +8,6 @@ CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
# CONFIG_SLUB_CPU_PARTIAL is not set
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
@@ -21,6 +20,7 @@ CONFIG_IRQ_ALL_CPUS=y
# CONFIG_COMPACTION is not set
# CONFIG_SUSPEND is not set
CONFIG_NET=y
+CONFIG_NETDEVICES=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_INET=y
@@ -41,7 +41,9 @@ CONFIG_BLK_DEV_RAM_SIZE=35000
# CONFIG_SCSI_PROC_FS is not set
CONFIG_BLK_DEV_SD=y
# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_ATA=y
# CONFIG_SATA_PMP is not set
+CONFIG_SATA_AHCI_PLATFORM=y
# CONFIG_ATA_SFF is not set
# CONFIG_NET_VENDOR_3COM is not set
# CONFIG_NET_VENDOR_ADAPTEC is not set
@@ -98,6 +100,8 @@ CONFIG_USB_OHCI_HCD=y
# CONFIG_USB_OHCI_HCD_PCI is not set
CONFIG_USB_STORAGE=y
CONFIG_MMC=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_M41T80=y
CONFIG_EXT2_FS=y
diff --git a/arch/powerpc/configs/44x/currituck_defconfig b/arch/powerpc/configs/44x/currituck_defconfig
index 34c86b3abecb..717827219921 100644
--- a/arch/powerpc/configs/44x/currituck_defconfig
+++ b/arch/powerpc/configs/44x/currituck_defconfig
@@ -6,7 +6,6 @@ CONFIG_LOG_BUF_SHIFT=14
CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/powerpc/configs/44x/fsp2_defconfig b/arch/powerpc/configs/44x/fsp2_defconfig
index 30845ce0885a..8da316e61a08 100644
--- a/arch/powerpc/configs/44x/fsp2_defconfig
+++ b/arch/powerpc/configs/44x/fsp2_defconfig
@@ -17,7 +17,6 @@ CONFIG_KALLSYMS_ALL=y
CONFIG_BPF_SYSCALL=y
CONFIG_EMBEDDED=y
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/powerpc/configs/44x/iss476-smp_defconfig b/arch/powerpc/configs/44x/iss476-smp_defconfig
index 2c3834eebca3..c11e777b2f3d 100644
--- a/arch/powerpc/configs/44x/iss476-smp_defconfig
+++ b/arch/powerpc/configs/44x/iss476-smp_defconfig
@@ -7,7 +7,6 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/powerpc/configs/cell_defconfig b/arch/powerpc/configs/cell_defconfig
index 42fbc70cec33..cc2c0d51f493 100644
--- a/arch/powerpc/configs/cell_defconfig
+++ b/arch/powerpc/configs/cell_defconfig
@@ -14,7 +14,6 @@ CONFIG_CPUSETS=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
-CONFIG_OPROFILE=m
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_PARTITION_ADVANCED=y
diff --git a/arch/powerpc/configs/g5_defconfig b/arch/powerpc/configs/g5_defconfig
index 1de0dbf6cbba..63d611cc160f 100644
--- a/arch/powerpc/configs/g5_defconfig
+++ b/arch/powerpc/configs/g5_defconfig
@@ -12,7 +12,6 @@ CONFIG_CGROUPS=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
diff --git a/arch/powerpc/configs/maple_defconfig b/arch/powerpc/configs/maple_defconfig
index 161351a18517..9424c1e67e1c 100644
--- a/arch/powerpc/configs/maple_defconfig
+++ b/arch/powerpc/configs/maple_defconfig
@@ -9,7 +9,6 @@ CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
-CONFIG_OPROFILE=m
CONFIG_KPROBES=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
diff --git a/arch/powerpc/configs/pasemi_defconfig b/arch/powerpc/configs/pasemi_defconfig
index 15ed8d0aa014..78606b7e42df 100644
--- a/arch/powerpc/configs/pasemi_defconfig
+++ b/arch/powerpc/configs/pasemi_defconfig
@@ -7,7 +7,6 @@ CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/powerpc/configs/pmac32_defconfig b/arch/powerpc/configs/pmac32_defconfig
index 665a8d7cded0..7aefac5afab0 100644
--- a/arch/powerpc/configs/pmac32_defconfig
+++ b/arch/powerpc/configs/pmac32_defconfig
@@ -10,7 +10,6 @@ CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
diff --git a/arch/powerpc/configs/powernv_defconfig b/arch/powerpc/configs/powernv_defconfig
index 60a30fffeda0..2c87e856d839 100644
--- a/arch/powerpc/configs/powernv_defconfig
+++ b/arch/powerpc/configs/powernv_defconfig
@@ -30,7 +30,6 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_BPF_SYSCALL=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
-CONFIG_OPROFILE=m
CONFIG_KPROBES=y
CONFIG_JUMP_LABEL=y
CONFIG_MODULES=y
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index 48759656a067..4f05a6652478 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -62,7 +62,6 @@ CONFIG_VIRTUALIZATION=y
CONFIG_KVM_BOOK3S_64=m
CONFIG_KVM_BOOK3S_64_HV=m
CONFIG_VHOST_NET=m
-CONFIG_OPROFILE=m
CONFIG_KPROBES=y
CONFIG_JUMP_LABEL=y
CONFIG_MODULES=y
diff --git a/arch/powerpc/configs/ppc64e_defconfig b/arch/powerpc/configs/ppc64e_defconfig
index 33a01a9e86be..5cf49a515f8e 100644
--- a/arch/powerpc/configs/ppc64e_defconfig
+++ b/arch/powerpc/configs/ppc64e_defconfig
@@ -14,7 +14,6 @@ CONFIG_CPUSETS=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig
index ef09f3cce1fa..6677ac0da45a 100644
--- a/arch/powerpc/configs/ppc6xx_defconfig
+++ b/arch/powerpc/configs/ppc6xx_defconfig
@@ -19,7 +19,6 @@ CONFIG_USER_NS=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
-CONFIG_OPROFILE=m
CONFIG_KPROBES=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
@@ -1072,7 +1071,6 @@ CONFIG_NLS_ISO8859_15=m
CONFIG_NLS_KOI8_R=m
CONFIG_NLS_KOI8_U=m
CONFIG_DEBUG_INFO=y
-CONFIG_UNUSED_SYMBOLS=y
CONFIG_HEADERS_INSTALL=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
diff --git a/arch/powerpc/configs/ps3_defconfig b/arch/powerpc/configs/ps3_defconfig
index 142f1321fa58..f300dcb937cc 100644
--- a/arch/powerpc/configs/ps3_defconfig
+++ b/arch/powerpc/configs/ps3_defconfig
@@ -13,7 +13,6 @@ CONFIG_EMBEDDED=y
# CONFIG_COMPAT_BRK is not set
CONFIG_SLAB=y
CONFIG_PROFILING=y
-CONFIG_OPROFILE=m
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_PPC_POWERNV is not set
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index d5dece981c02..777221775c83 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -29,7 +29,6 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_BPF_SYSCALL=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
-CONFIG_OPROFILE=m
CONFIG_KPROBES=y
CONFIG_JUMP_LABEL=y
CONFIG_MODULES=y
diff --git a/arch/powerpc/crypto/sha256-spe-glue.c b/arch/powerpc/crypto/sha256-spe-glue.c
index a6e650a97d8f..ffedea7e4bef 100644
--- a/arch/powerpc/crypto/sha256-spe-glue.c
+++ b/arch/powerpc/crypto/sha256-spe-glue.c
@@ -129,7 +129,7 @@ static int ppc_spe_sha256_update(struct shash_desc *desc, const u8 *data,
src += bytes;
len -= bytes;
- };
+ }
memcpy((char *)sctx->buf, src, len);
return 0;
diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h
index d0b832cbbec8..939f3c94c8f3 100644
--- a/arch/powerpc/include/asm/asm-prototypes.h
+++ b/arch/powerpc/include/asm/asm-prototypes.h
@@ -56,35 +56,6 @@ int exit_vmx_usercopy(void);
int enter_vmx_ops(void);
void *exit_vmx_ops(void *dest);
-/* Traps */
-long machine_check_early(struct pt_regs *regs);
-long hmi_exception_realmode(struct pt_regs *regs);
-void SMIException(struct pt_regs *regs);
-void handle_hmi_exception(struct pt_regs *regs);
-void instruction_breakpoint_exception(struct pt_regs *regs);
-void RunModeException(struct pt_regs *regs);
-void single_step_exception(struct pt_regs *regs);
-void program_check_exception(struct pt_regs *regs);
-void alignment_exception(struct pt_regs *regs);
-void StackOverflow(struct pt_regs *regs);
-void stack_overflow_exception(struct pt_regs *regs);
-void kernel_fp_unavailable_exception(struct pt_regs *regs);
-void altivec_unavailable_exception(struct pt_regs *regs);
-void vsx_unavailable_exception(struct pt_regs *regs);
-void fp_unavailable_tm(struct pt_regs *regs);
-void altivec_unavailable_tm(struct pt_regs *regs);
-void vsx_unavailable_tm(struct pt_regs *regs);
-void facility_unavailable_exception(struct pt_regs *regs);
-void TAUException(struct pt_regs *regs);
-void altivec_assist_exception(struct pt_regs *regs);
-void unrecoverable_exception(struct pt_regs *regs);
-void kernel_bad_stack(struct pt_regs *regs);
-void system_reset_exception(struct pt_regs *regs);
-void machine_check_exception(struct pt_regs *regs);
-void emulation_assist_interrupt(struct pt_regs *regs);
-long do_slb_fault(struct pt_regs *regs, unsigned long ea);
-void do_bad_slb_fault(struct pt_regs *regs, unsigned long ea, long err);
-
/* signals, syscalls and interrupts */
long sys_swapcontext(struct ucontext __user *old_ctx,
struct ucontext __user *new_ctx,
diff --git a/arch/powerpc/include/asm/book3s/32/kup.h b/arch/powerpc/include/asm/book3s/32/kup.h
index a0117a9d5b06..73bc5d2c431d 100644
--- a/arch/powerpc/include/asm/book3s/32/kup.h
+++ b/arch/powerpc/include/asm/book3s/32/kup.h
@@ -95,12 +95,12 @@ static inline void kuap_update_sr(u32 sr, u32 addr, u32 end)
addr &= 0xf0000000; /* align addr to start of segment */
barrier(); /* make sure thread.kuap is updated before playing with SRs */
while (addr < end) {
- mtsrin(sr, addr);
+ mtsr(sr, addr);
sr += 0x111; /* next VSID */
sr &= 0xf0ffffff; /* clear VSID overflow */
addr += 0x10000000; /* address of next segment */
}
- isync(); /* Context sync required after mtsrin() */
+ isync(); /* Context sync required after mtsr() */
}
static __always_inline void allow_user_access(void __user *to, const void __user *from,
@@ -122,7 +122,7 @@ static __always_inline void allow_user_access(void __user *to, const void __user
end = min(addr + size, TASK_SIZE);
current->thread.kuap = (addr & 0xf0000000) | ((((end - 1) >> 28) + 1) & 0xf);
- kuap_update_sr(mfsrin(addr) & ~SR_KS, addr, end); /* Clear Ks */
+ kuap_update_sr(mfsr(addr) & ~SR_KS, addr, end); /* Clear Ks */
}
static __always_inline void prevent_user_access(void __user *to, const void __user *from,
@@ -151,7 +151,7 @@ static __always_inline void prevent_user_access(void __user *to, const void __us
}
current->thread.kuap = 0;
- kuap_update_sr(mfsrin(addr) | SR_KS, addr, end); /* set Ks */
+ kuap_update_sr(mfsr(addr) | SR_KS, addr, end); /* set Ks */
}
static inline unsigned long prevent_user_access_return(void)
diff --git a/arch/powerpc/include/asm/book3s/32/mmu-hash.h b/arch/powerpc/include/asm/book3s/32/mmu-hash.h
index 685c589e723f..b85f8e114a9c 100644
--- a/arch/powerpc/include/asm/book3s/32/mmu-hash.h
+++ b/arch/powerpc/include/asm/book3s/32/mmu-hash.h
@@ -94,7 +94,7 @@ typedef struct {
} mm_context_t;
void update_bats(void);
-static inline void cleanup_cpu_mmu_context(void) { };
+static inline void cleanup_cpu_mmu_context(void) { }
/* patch sites */
extern s32 patch__hash_page_A0, patch__hash_page_A1, patch__hash_page_A2;
diff --git a/arch/powerpc/include/asm/book3s/64/kup.h b/arch/powerpc/include/asm/book3s/64/kup.h
index f50f72e535aa..8bd905050896 100644
--- a/arch/powerpc/include/asm/book3s/64/kup.h
+++ b/arch/powerpc/include/asm/book3s/64/kup.h
@@ -199,25 +199,31 @@ DECLARE_STATIC_KEY_FALSE(uaccess_flush_key);
#ifdef CONFIG_PPC_PKEY
+extern u64 __ro_after_init default_uamor;
+extern u64 __ro_after_init default_amr;
+extern u64 __ro_after_init default_iamr;
+
#include <asm/mmu.h>
#include <asm/ptrace.h>
-/*
- * For kernel thread that doesn't have thread.regs return
- * default AMR/IAMR values.
+/* usage of kthread_use_mm() should inherit the
+ * AMR value of the operating address space. But, the AMR value is
+ * thread-specific and we inherit the address space and not thread
+ * access restrictions. Because of this ignore AMR value when accessing
+ * userspace via kernel thread.
*/
static inline u64 current_thread_amr(void)
{
if (current->thread.regs)
return current->thread.regs->amr;
- return AMR_KUAP_BLOCKED;
+ return default_amr;
}
static inline u64 current_thread_iamr(void)
{
if (current->thread.regs)
return current->thread.regs->iamr;
- return AMR_KUEP_BLOCKED;
+ return default_iamr;
}
#endif /* CONFIG_PPC_PKEY */
@@ -333,7 +339,7 @@ static inline unsigned long get_kuap(void)
* This has no effect in terms of actually blocking things on hash,
* so it doesn't break anything.
*/
- if (!early_mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
+ if (!mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
return AMR_KUAP_BLOCKED;
return mfspr(SPRN_AMR);
@@ -341,7 +347,7 @@ static inline unsigned long get_kuap(void)
static inline void set_kuap(unsigned long value)
{
- if (!early_mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
+ if (!mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
return;
/*
diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
index 066b1d34c7bc..f911bdb68d8b 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
@@ -454,6 +454,8 @@ static inline unsigned long hpt_hash(unsigned long vpn,
#define HPTE_NOHPTE_UPDATE 0x2
#define HPTE_USE_KERNEL_KEY 0x4
+long hpte_insert_repeating(unsigned long hash, unsigned long vpn, unsigned long pa,
+ unsigned long rlags, unsigned long vflags, int psize, int ssize);
extern int __hash_page_4K(unsigned long ea, unsigned long access,
unsigned long vsid, pte_t *ptep, unsigned long trap,
unsigned long flags, int ssize, int subpage_prot);
@@ -467,6 +469,8 @@ extern int hash_page_mm(struct mm_struct *mm, unsigned long ea,
unsigned long flags);
extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap,
unsigned long dsisr);
+void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc);
+int __hash_page(unsigned long trap, unsigned long ea, unsigned long dsisr, unsigned long msr);
int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
pte_t *ptep, unsigned long trap, unsigned long flags,
int ssize, unsigned int shift, unsigned int mmu_psize);
@@ -521,6 +525,7 @@ void slb_dump_contents(struct slb_entry *slb_ptr);
extern void slb_vmalloc_update(void);
extern void slb_set_size(u16 size);
+void preload_new_slb_context(unsigned long start, unsigned long sp);
#endif /* __ASSEMBLY__ */
/*
diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h
index 995bbcdd0ef8..eace8c3f7b0a 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu.h
@@ -239,7 +239,7 @@ static inline void setup_initial_memory_limit(phys_addr_t first_memblock_base,
#ifdef CONFIG_PPC_PSERIES
extern void radix_init_pseries(void);
#else
-static inline void radix_init_pseries(void) { };
+static inline void radix_init_pseries(void) { }
#endif
#ifdef CONFIG_HOTPLUG_CPU
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index a39886681629..058601efbc8a 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -388,11 +388,28 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
({ \
- int __r; \
- __r = __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep); \
- __r; \
+ __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep); \
})
+/*
+ * On Book3S CPUs, clearing the accessed bit without a TLB flush
+ * doesn't cause data corruption. [ It could cause incorrect
+ * page aging and the (mistaken) reclaim of hot pages, but the
+ * chance of that should be relatively low. ]
+ *
+ * So as a performance optimization don't flush the TLB when
+ * clearing the accessed bit, it will eventually be flushed by
+ * a context switch or a VM operation anyway. [ In the rare
+ * event of it not getting flushed for a long time the delay
+ * shouldn't really matter because there's no real memory
+ * pressure for swapout to react to. ]
+ */
+#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
+#define ptep_clear_flush_young ptep_test_and_clear_young
+
+#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
+#define pmdp_clear_flush_young pmdp_test_and_clear_young
+
static inline int __pte_write(pte_t pte)
{
return !!(pte_raw(pte) & cpu_to_be64(_PAGE_WRITE));
diff --git a/arch/powerpc/include/asm/book3s/64/pkeys.h b/arch/powerpc/include/asm/book3s/64/pkeys.h
index 3b8640498f5b..5b178139f3c0 100644
--- a/arch/powerpc/include/asm/book3s/64/pkeys.h
+++ b/arch/powerpc/include/asm/book3s/64/pkeys.h
@@ -5,10 +5,6 @@
#include <asm/book3s/64/hash-pkey.h>
-extern u64 __ro_after_init default_uamor;
-extern u64 __ro_after_init default_amr;
-extern u64 __ro_after_init default_iamr;
-
static inline u64 vmflag_to_pte_pkey_bits(u64 vm_flags)
{
if (!mmu_has_feature(MMU_FTR_PKEY))
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
index 94439e0cefc9..8b33601cdb9d 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
@@ -35,7 +35,7 @@ extern void radix__flush_pwc_lpid(unsigned int lpid);
extern void radix__flush_all_lpid(unsigned int lpid);
extern void radix__flush_all_lpid_guest(unsigned int lpid);
#else
-static inline void radix__tlbiel_all(unsigned int action) { WARN_ON(1); };
+static inline void radix__tlbiel_all(unsigned int action) { WARN_ON(1); }
static inline void radix__flush_tlb_lpid_page(unsigned int lpid,
unsigned long addr,
unsigned long page_size)
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush.h b/arch/powerpc/include/asm/book3s/64/tlbflush.h
index dcb5c3839d2f..215973b4cb26 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush.h
@@ -31,7 +31,7 @@ static inline void tlbiel_all(void)
hash__tlbiel_all(TLB_INVAL_SCOPE_GLOBAL);
}
#else
-static inline void tlbiel_all(void) { BUG(); };
+static inline void tlbiel_all(void) { BUG(); }
#endif
static inline void tlbiel_all_lpid(bool radix)
diff --git a/arch/powerpc/include/asm/bug.h b/arch/powerpc/include/asm/bug.h
index 464f8ca8a5c9..d1635ffbb179 100644
--- a/arch/powerpc/include/asm/bug.h
+++ b/arch/powerpc/include/asm/bug.h
@@ -111,12 +111,15 @@
#ifndef __ASSEMBLY__
struct pt_regs;
-extern int do_page_fault(struct pt_regs *, unsigned long, unsigned long);
-extern void bad_page_fault(struct pt_regs *, unsigned long, int);
-void __bad_page_fault(struct pt_regs *regs, unsigned long address, int sig);
+long do_page_fault(struct pt_regs *);
+long hash__do_page_fault(struct pt_regs *);
+void bad_page_fault(struct pt_regs *, int);
+void __bad_page_fault(struct pt_regs *regs, int sig);
+void do_bad_page_fault_segv(struct pt_regs *regs);
extern void _exception(int, struct pt_regs *, int, unsigned long);
extern void _exception_pkey(struct pt_regs *, unsigned long, int);
extern void die(const char *, struct pt_regs *, long);
+void die_mce(const char *str, struct pt_regs *regs, long err);
extern bool die_will_crash(void);
extern void panic_flush_kmsg_start(void);
extern void panic_flush_kmsg_end(void);
diff --git a/arch/powerpc/include/asm/cacheflush.h b/arch/powerpc/include/asm/cacheflush.h
index 138e46d8c04e..f63495109f63 100644
--- a/arch/powerpc/include/asm/cacheflush.h
+++ b/arch/powerpc/include/asm/cacheflush.h
@@ -8,6 +8,12 @@
#include <asm/cputable.h>
#include <asm/cpu_has_feature.h>
+/*
+ * This flag is used to indicate that the page pointed to by a pte is clean
+ * and does not require cleaning before returning it to the user.
+ */
+#define PG_dcache_clean PG_arch_1
+
#ifdef CONFIG_PPC_BOOK3S_64
/*
* Book3s has no ptesync after setting a pte, so without this ptesync it's
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index 5f21a5bab467..e85c849214a2 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -17,16 +17,6 @@ struct cpu_spec;
typedef void (*cpu_setup_t)(unsigned long offset, struct cpu_spec* spec);
typedef void (*cpu_restore_t)(void);
-enum powerpc_oprofile_type {
- PPC_OPROFILE_INVALID = 0,
- PPC_OPROFILE_RS64 = 1,
- PPC_OPROFILE_POWER4 = 2,
- PPC_OPROFILE_G4 = 3,
- PPC_OPROFILE_FSL_EMB = 4,
- PPC_OPROFILE_CELL = 5,
- PPC_OPROFILE_PA6T = 6,
-};
-
enum powerpc_pmc_type {
PPC_PMC_DEFAULT = 0,
PPC_PMC_IBM = 1,
@@ -83,16 +73,6 @@ struct cpu_spec {
/* Used by oprofile userspace to select the right counters */
char *oprofile_cpu_type;
- /* Processor specific oprofile operations */
- enum powerpc_oprofile_type oprofile_type;
-
- /* Bit locations inside the mmcra change */
- unsigned long oprofile_mmcra_sihv;
- unsigned long oprofile_mmcra_sipr;
-
- /* Bits to clear during an oprofile exception */
- unsigned long oprofile_mmcra_clear;
-
/* Name of processor class, for the ELF AT_PLATFORM entry */
char *platform;
diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h
index ed75d1c318e3..504f7fe6711a 100644
--- a/arch/powerpc/include/asm/cputime.h
+++ b/arch/powerpc/include/asm/cputime.h
@@ -87,6 +87,17 @@ static notrace inline void account_cpu_user_exit(void)
acct->starttime_user = tb;
}
+static notrace inline void account_stolen_time(void)
+{
+#ifdef CONFIG_PPC_SPLPAR
+ if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
+ struct lppaca *lp = local_paca->lppaca_ptr;
+
+ if (unlikely(local_paca->dtl_ridx != be64_to_cpu(lp->dtl_idx)))
+ accumulate_stolen_time();
+ }
+#endif
+}
#endif /* __KERNEL__ */
#else /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
@@ -96,5 +107,8 @@ static inline void account_cpu_user_entry(void)
static inline void account_cpu_user_exit(void)
{
}
+static notrace inline void account_stolen_time(void)
+{
+}
#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
#endif /* __POWERPC_CPUTIME_H */
diff --git a/arch/powerpc/include/asm/debug.h b/arch/powerpc/include/asm/debug.h
index ec57daf87f40..86a14736c76c 100644
--- a/arch/powerpc/include/asm/debug.h
+++ b/arch/powerpc/include/asm/debug.h
@@ -50,10 +50,6 @@ bool ppc_breakpoint_available(void);
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
extern void do_send_trap(struct pt_regs *regs, unsigned long address,
unsigned long error_code, int brkpt);
-#else
-
-extern void do_break(struct pt_regs *regs, unsigned long address,
- unsigned long error_code);
#endif
#endif /* _ASM_POWERPC_DEBUG_H */
diff --git a/arch/powerpc/include/asm/firmware.h b/arch/powerpc/include/asm/firmware.h
index aa6a5ef5d483..7604673787d6 100644
--- a/arch/powerpc/include/asm/firmware.h
+++ b/arch/powerpc/include/asm/firmware.h
@@ -137,7 +137,7 @@ extern unsigned int __start___fw_ftr_fixup, __stop___fw_ftr_fixup;
#ifdef CONFIG_PPC_PSERIES
void pseries_probe_fw_features(void);
#else
-static inline void pseries_probe_fw_features(void) { };
+static inline void pseries_probe_fw_features(void) { }
#endif
#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h
index 013165e62618..f18c543bc01d 100644
--- a/arch/powerpc/include/asm/hugetlb.h
+++ b/arch/powerpc/include/asm/hugetlb.h
@@ -17,8 +17,6 @@ extern bool hugetlb_disabled;
void hugetlbpage_init_default(void);
-void flush_dcache_icache_hugepage(struct page *page);
-
int slice_is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
unsigned long len);
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index c98f5141e3fc..ed6086d57b22 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -535,9 +535,12 @@ struct h_cpu_char_result {
u64 behaviour;
};
-/* Register state for entering a nested guest with H_ENTER_NESTED */
+/*
+ * Register state for entering a nested guest with H_ENTER_NESTED.
+ * New member must be added at the end.
+ */
struct hv_guest_state {
- u64 version; /* version of this structure layout */
+ u64 version; /* version of this structure layout, must be first */
u32 lpid;
u32 vcpu_token;
/* These registers are hypervisor privileged (at least for writing) */
@@ -566,10 +569,26 @@ struct hv_guest_state {
u64 pidr;
u64 cfar;
u64 ppr;
+ /* Version 1 ends here */
+ u64 dawr1;
+ u64 dawrx1;
+ /* Version 2 ends here */
};
/* Latest version of hv_guest_state structure */
-#define HV_GUEST_STATE_VERSION 1
+#define HV_GUEST_STATE_VERSION 2
+
+static inline int hv_guest_state_size(unsigned int version)
+{
+ switch (version) {
+ case 1:
+ return offsetofend(struct hv_guest_state, ppr);
+ case 2:
+ return offsetofend(struct hv_guest_state, dawrx1);
+ default:
+ return -1;
+ }
+}
/*
* From the document "H_GetPerformanceCounterInfo Interface" v1.07
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index 0363734ff56e..56a98936a6a9 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -38,6 +38,8 @@
#define PACA_IRQ_MUST_HARD_MASK (PACA_IRQ_EE)
#endif
+#endif /* CONFIG_PPC64 */
+
/*
* flags for paca->irq_soft_mask
*/
@@ -46,18 +48,56 @@
#define IRQS_PMI_DISABLED 2
#define IRQS_ALL_DISABLED (IRQS_DISABLED | IRQS_PMI_DISABLED)
-#endif /* CONFIG_PPC64 */
-
#ifndef __ASSEMBLY__
-extern void replay_system_reset(void);
-extern void replay_soft_interrupts(void);
+static inline void __hard_irq_enable(void)
+{
+ if (IS_ENABLED(CONFIG_BOOKE) || IS_ENABLED(CONFIG_40x))
+ wrtee(MSR_EE);
+ else if (IS_ENABLED(CONFIG_PPC_8xx))
+ wrtspr(SPRN_EIE);
+ else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
+ __mtmsrd(MSR_EE | MSR_RI, 1);
+ else
+ mtmsr(mfmsr() | MSR_EE);
+}
-extern void timer_interrupt(struct pt_regs *);
-extern void timer_broadcast_interrupt(void);
-extern void performance_monitor_exception(struct pt_regs *regs);
-extern void WatchdogException(struct pt_regs *regs);
-extern void unknown_exception(struct pt_regs *regs);
+static inline void __hard_irq_disable(void)
+{
+ if (IS_ENABLED(CONFIG_BOOKE) || IS_ENABLED(CONFIG_40x))
+ wrtee(0);
+ else if (IS_ENABLED(CONFIG_PPC_8xx))
+ wrtspr(SPRN_EID);
+ else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
+ __mtmsrd(MSR_RI, 1);
+ else
+ mtmsr(mfmsr() & ~MSR_EE);
+}
+
+static inline void __hard_EE_RI_disable(void)
+{
+ if (IS_ENABLED(CONFIG_BOOKE) || IS_ENABLED(CONFIG_40x))
+ wrtee(0);
+ else if (IS_ENABLED(CONFIG_PPC_8xx))
+ wrtspr(SPRN_NRI);
+ else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
+ __mtmsrd(0, 1);
+ else
+ mtmsr(mfmsr() & ~(MSR_EE | MSR_RI));
+}
+
+static inline void __hard_RI_enable(void)
+{
+ if (IS_ENABLED(CONFIG_BOOKE) || IS_ENABLED(CONFIG_40x))
+ return;
+
+ if (IS_ENABLED(CONFIG_PPC_8xx))
+ wrtspr(SPRN_EID);
+ else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
+ __mtmsrd(MSR_RI, 1);
+ else
+ mtmsr(mfmsr() | MSR_RI);
+}
#ifdef CONFIG_PPC64
#include <asm/paca.h>
@@ -221,18 +261,6 @@ static inline bool arch_irqs_disabled(void)
#endif /* CONFIG_PPC_BOOK3S */
-#ifdef CONFIG_PPC_BOOK3E
-#define __hard_irq_enable() wrtee(MSR_EE)
-#define __hard_irq_disable() wrtee(0)
-#define __hard_EE_RI_disable() wrtee(0)
-#define __hard_RI_enable() do { } while (0)
-#else
-#define __hard_irq_enable() __mtmsrd(MSR_EE|MSR_RI, 1)
-#define __hard_irq_disable() __mtmsrd(MSR_RI, 1)
-#define __hard_EE_RI_disable() __mtmsrd(0, 1)
-#define __hard_RI_enable() __mtmsrd(MSR_RI, 1)
-#endif
-
#define hard_irq_disable() do { \
unsigned long flags; \
__hard_irq_disable(); \
@@ -296,8 +324,17 @@ extern void irq_set_pending_from_srr1(unsigned long srr1);
extern void force_external_irq_replay(void);
+static inline void irq_soft_mask_regs_set_state(struct pt_regs *regs, unsigned long val)
+{
+ regs->softe = val;
+}
#else /* CONFIG_PPC64 */
+static inline notrace unsigned long irq_soft_mask_return(void)
+{
+ return 0;
+}
+
static inline unsigned long arch_local_save_flags(void)
{
return mfmsr();
@@ -327,22 +364,12 @@ static inline unsigned long arch_local_irq_save(void)
static inline void arch_local_irq_disable(void)
{
- if (IS_ENABLED(CONFIG_BOOKE))
- wrtee(0);
- else if (IS_ENABLED(CONFIG_PPC_8xx))
- wrtspr(SPRN_EID);
- else
- mtmsr(mfmsr() & ~MSR_EE);
+ __hard_irq_disable();
}
static inline void arch_local_irq_enable(void)
{
- if (IS_ENABLED(CONFIG_BOOKE))
- wrtee(MSR_EE);
- else if (IS_ENABLED(CONFIG_PPC_8xx))
- wrtspr(SPRN_EIE);
- else
- mtmsr(mfmsr() | MSR_EE);
+ __hard_irq_enable();
}
static inline bool arch_irqs_disabled_flags(unsigned long flags)
@@ -364,6 +391,9 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
static inline void may_hard_irq_enable(void) { }
+static inline void irq_soft_mask_regs_set_state(struct pt_regs *regs, unsigned long val)
+{
+}
#endif /* CONFIG_PPC64 */
#define ARCH_IRQ_INIT_FLAGS IRQ_NOREQUEST
diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h
new file mode 100644
index 000000000000..aedfba29e43a
--- /dev/null
+++ b/arch/powerpc/include/asm/interrupt.h
@@ -0,0 +1,449 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _ASM_POWERPC_INTERRUPT_H
+#define _ASM_POWERPC_INTERRUPT_H
+
+#include <linux/context_tracking.h>
+#include <linux/hardirq.h>
+#include <asm/cputime.h>
+#include <asm/ftrace.h>
+#include <asm/kprobes.h>
+#include <asm/runlatch.h>
+
+struct interrupt_state {
+#ifdef CONFIG_PPC_BOOK3E_64
+ enum ctx_state ctx_state;
+#endif
+};
+
+static inline void booke_restore_dbcr0(void)
+{
+#ifdef CONFIG_PPC_ADV_DEBUG_REGS
+ unsigned long dbcr0 = current->thread.debug.dbcr0;
+
+ if (IS_ENABLED(CONFIG_PPC32) && unlikely(dbcr0 & DBCR0_IDM)) {
+ mtspr(SPRN_DBSR, -1);
+ mtspr(SPRN_DBCR0, global_dbcr0[smp_processor_id()]);
+ }
+#endif
+}
+
+static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrupt_state *state)
+{
+ /*
+ * Book3E reconciles irq soft mask in asm
+ */
+#ifdef CONFIG_PPC_BOOK3S_64
+ if (irq_soft_mask_set_return(IRQS_ALL_DISABLED) == IRQS_ENABLED)
+ trace_hardirqs_off();
+ local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
+
+ if (user_mode(regs)) {
+ CT_WARN_ON(ct_state() != CONTEXT_USER);
+ user_exit_irqoff();
+
+ account_cpu_user_entry();
+ account_stolen_time();
+ } else {
+ /*
+ * CT_WARN_ON comes here via program_check_exception,
+ * so avoid recursion.
+ */
+ if (TRAP(regs) != 0x700)
+ CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
+ }
+#endif
+
+#ifdef CONFIG_PPC_BOOK3E_64
+ state->ctx_state = exception_enter();
+ if (user_mode(regs))
+ account_cpu_user_entry();
+#endif
+}
+
+/*
+ * Care should be taken to note that interrupt_exit_prepare and
+ * interrupt_async_exit_prepare do not necessarily return immediately to
+ * regs context (e.g., if regs is usermode, we don't necessarily return to
+ * user mode). Other interrupts might be taken between here and return,
+ * context switch / preemption may occur in the exit path after this, or a
+ * signal may be delivered, etc.
+ *
+ * The real interrupt exit code is platform specific, e.g.,
+ * interrupt_exit_user_prepare / interrupt_exit_kernel_prepare for 64s.
+ *
+ * However interrupt_nmi_exit_prepare does return directly to regs, because
+ * NMIs do not do "exit work" or replay soft-masked interrupts.
+ */
+static inline void interrupt_exit_prepare(struct pt_regs *regs, struct interrupt_state *state)
+{
+#ifdef CONFIG_PPC_BOOK3E_64
+ exception_exit(state->ctx_state);
+#endif
+
+ /*
+ * Book3S exits to user via interrupt_exit_user_prepare(), which does
+ * context tracking, which is a cleaner way to handle PREEMPT=y
+ * and avoid context entry/exit in e.g., preempt_schedule_irq()),
+ * which is likely to be where the core code wants to end up.
+ *
+ * The above comment explains why we can't do the
+ *
+ * if (user_mode(regs))
+ * user_exit_irqoff();
+ *
+ * sequence here.
+ */
+}
+
+static inline void interrupt_async_enter_prepare(struct pt_regs *regs, struct interrupt_state *state)
+{
+#ifdef CONFIG_PPC_BOOK3S_64
+ if (cpu_has_feature(CPU_FTR_CTRL) &&
+ !test_thread_local_flags(_TLF_RUNLATCH))
+ __ppc64_runlatch_on();
+#endif
+
+ interrupt_enter_prepare(regs, state);
+ irq_enter();
+}
+
+static inline void interrupt_async_exit_prepare(struct pt_regs *regs, struct interrupt_state *state)
+{
+ irq_exit();
+ interrupt_exit_prepare(regs, state);
+}
+
+struct interrupt_nmi_state {
+#ifdef CONFIG_PPC64
+#ifdef CONFIG_PPC_BOOK3S_64
+ u8 irq_soft_mask;
+ u8 irq_happened;
+#endif
+ u8 ftrace_enabled;
+#endif
+};
+
+static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state)
+{
+#ifdef CONFIG_PPC64
+#ifdef CONFIG_PPC_BOOK3S_64
+ state->irq_soft_mask = local_paca->irq_soft_mask;
+ state->irq_happened = local_paca->irq_happened;
+
+ /*
+ * Set IRQS_ALL_DISABLED unconditionally so irqs_disabled() does
+ * the right thing, and set IRQ_HARD_DIS. We do not want to reconcile
+ * because that goes through irq tracing which we don't want in NMI.
+ */
+ local_paca->irq_soft_mask = IRQS_ALL_DISABLED;
+ local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
+
+ /* Don't do any per-CPU operations until interrupt state is fixed */
+#endif
+ /* Allow DEC and PMI to be traced when they are soft-NMI */
+ if (TRAP(regs) != 0x900 && TRAP(regs) != 0xf00 && TRAP(regs) != 0x260) {
+ state->ftrace_enabled = this_cpu_get_ftrace_enabled();
+ this_cpu_set_ftrace_enabled(0);
+ }
+#endif
+
+ /*
+ * Do not use nmi_enter() for pseries hash guest taking a real-mode
+ * NMI because not everything it touches is within the RMA limit.
+ */
+ if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) ||
+ !firmware_has_feature(FW_FEATURE_LPAR) ||
+ radix_enabled() || (mfmsr() & MSR_DR))
+ nmi_enter();
+}
+
+static inline void interrupt_nmi_exit_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state)
+{
+ if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) ||
+ !firmware_has_feature(FW_FEATURE_LPAR) ||
+ radix_enabled() || (mfmsr() & MSR_DR))
+ nmi_exit();
+
+#ifdef CONFIG_PPC64
+ if (TRAP(regs) != 0x900 && TRAP(regs) != 0xf00 && TRAP(regs) != 0x260)
+ this_cpu_set_ftrace_enabled(state->ftrace_enabled);
+
+#ifdef CONFIG_PPC_BOOK3S_64
+ /* Check we didn't change the pending interrupt mask. */
+ WARN_ON_ONCE((state->irq_happened | PACA_IRQ_HARD_DIS) != local_paca->irq_happened);
+ local_paca->irq_happened = state->irq_happened;
+ local_paca->irq_soft_mask = state->irq_soft_mask;
+#endif
+#endif
+}
+
+/*
+ * Don't use noinstr here like x86, but rather add NOKPROBE_SYMBOL to each
+ * function definition. The reason for this is the noinstr section is placed
+ * after the main text section, i.e., very far away from the interrupt entry
+ * asm. That creates problems with fitting linker stubs when building large
+ * kernels.
+ */
+#define interrupt_handler __visible noinline notrace __no_kcsan __no_sanitize_address
+
+/**
+ * DECLARE_INTERRUPT_HANDLER_RAW - Declare raw interrupt handler function
+ * @func: Function name of the entry point
+ * @returns: Returns a value back to asm caller
+ */
+#define DECLARE_INTERRUPT_HANDLER_RAW(func) \
+ __visible long func(struct pt_regs *regs)
+
+/**
+ * DEFINE_INTERRUPT_HANDLER_RAW - Define raw interrupt handler function
+ * @func: Function name of the entry point
+ * @returns: Returns a value back to asm caller
+ *
+ * @func is called from ASM entry code.
+ *
+ * This is a plain function which does no tracing, reconciling, etc.
+ * The macro is written so it acts as function definition. Append the
+ * body with a pair of curly brackets.
+ *
+ * raw interrupt handlers must not enable or disable interrupts, or
+ * schedule, tracing and instrumentation (ftrace, lockdep, etc) would
+ * not be advisable either, although may be possible in a pinch, the
+ * trace will look odd at least.
+ *
+ * A raw handler may call one of the other interrupt handler functions
+ * to be converted into that interrupt context without these restrictions.
+ *
+ * On PPC64, _RAW handlers may return with fast_interrupt_return.
+ *
+ * Specific handlers may have additional restrictions.
+ */
+#define DEFINE_INTERRUPT_HANDLER_RAW(func) \
+static __always_inline long ____##func(struct pt_regs *regs); \
+ \
+interrupt_handler long func(struct pt_regs *regs) \
+{ \
+ long ret; \
+ \
+ ret = ____##func (regs); \
+ \
+ return ret; \
+} \
+NOKPROBE_SYMBOL(func); \
+ \
+static __always_inline long ____##func(struct pt_regs *regs)
+
+/**
+ * DECLARE_INTERRUPT_HANDLER - Declare synchronous interrupt handler function
+ * @func: Function name of the entry point
+ */
+#define DECLARE_INTERRUPT_HANDLER(func) \
+ __visible void func(struct pt_regs *regs)
+
+/**
+ * DEFINE_INTERRUPT_HANDLER - Define synchronous interrupt handler function
+ * @func: Function name of the entry point
+ *
+ * @func is called from ASM entry code.
+ *
+ * The macro is written so it acts as function definition. Append the
+ * body with a pair of curly brackets.
+ */
+#define DEFINE_INTERRUPT_HANDLER(func) \
+static __always_inline void ____##func(struct pt_regs *regs); \
+ \
+interrupt_handler void func(struct pt_regs *regs) \
+{ \
+ struct interrupt_state state; \
+ \
+ interrupt_enter_prepare(regs, &state); \
+ \
+ ____##func (regs); \
+ \
+ interrupt_exit_prepare(regs, &state); \
+} \
+NOKPROBE_SYMBOL(func); \
+ \
+static __always_inline void ____##func(struct pt_regs *regs)
+
+/**
+ * DECLARE_INTERRUPT_HANDLER_RET - Declare synchronous interrupt handler function
+ * @func: Function name of the entry point
+ * @returns: Returns a value back to asm caller
+ */
+#define DECLARE_INTERRUPT_HANDLER_RET(func) \
+ __visible long func(struct pt_regs *regs)
+
+/**
+ * DEFINE_INTERRUPT_HANDLER_RET - Define synchronous interrupt handler function
+ * @func: Function name of the entry point
+ * @returns: Returns a value back to asm caller
+ *
+ * @func is called from ASM entry code.
+ *
+ * The macro is written so it acts as function definition. Append the
+ * body with a pair of curly brackets.
+ */
+#define DEFINE_INTERRUPT_HANDLER_RET(func) \
+static __always_inline long ____##func(struct pt_regs *regs); \
+ \
+interrupt_handler long func(struct pt_regs *regs) \
+{ \
+ struct interrupt_state state; \
+ long ret; \
+ \
+ interrupt_enter_prepare(regs, &state); \
+ \
+ ret = ____##func (regs); \
+ \
+ interrupt_exit_prepare(regs, &state); \
+ \
+ return ret; \
+} \
+NOKPROBE_SYMBOL(func); \
+ \
+static __always_inline long ____##func(struct pt_regs *regs)
+
+/**
+ * DECLARE_INTERRUPT_HANDLER_ASYNC - Declare asynchronous interrupt handler function
+ * @func: Function name of the entry point
+ */
+#define DECLARE_INTERRUPT_HANDLER_ASYNC(func) \
+ __visible void func(struct pt_regs *regs)
+
+/**
+ * DEFINE_INTERRUPT_HANDLER_ASYNC - Define asynchronous interrupt handler function
+ * @func: Function name of the entry point
+ *
+ * @func is called from ASM entry code.
+ *
+ * The macro is written so it acts as function definition. Append the
+ * body with a pair of curly brackets.
+ */
+#define DEFINE_INTERRUPT_HANDLER_ASYNC(func) \
+static __always_inline void ____##func(struct pt_regs *regs); \
+ \
+interrupt_handler void func(struct pt_regs *regs) \
+{ \
+ struct interrupt_state state; \
+ \
+ interrupt_async_enter_prepare(regs, &state); \
+ \
+ ____##func (regs); \
+ \
+ interrupt_async_exit_prepare(regs, &state); \
+} \
+NOKPROBE_SYMBOL(func); \
+ \
+static __always_inline void ____##func(struct pt_regs *regs)
+
+/**
+ * DECLARE_INTERRUPT_HANDLER_NMI - Declare NMI interrupt handler function
+ * @func: Function name of the entry point
+ * @returns: Returns a value back to asm caller
+ */
+#define DECLARE_INTERRUPT_HANDLER_NMI(func) \
+ __visible long func(struct pt_regs *regs)
+
+/**
+ * DEFINE_INTERRUPT_HANDLER_NMI - Define NMI interrupt handler function
+ * @func: Function name of the entry point
+ * @returns: Returns a value back to asm caller
+ *
+ * @func is called from ASM entry code.
+ *
+ * The macro is written so it acts as function definition. Append the
+ * body with a pair of curly brackets.
+ */
+#define DEFINE_INTERRUPT_HANDLER_NMI(func) \
+static __always_inline long ____##func(struct pt_regs *regs); \
+ \
+interrupt_handler long func(struct pt_regs *regs) \
+{ \
+ struct interrupt_nmi_state state; \
+ long ret; \
+ \
+ interrupt_nmi_enter_prepare(regs, &state); \
+ \
+ ret = ____##func (regs); \
+ \
+ interrupt_nmi_exit_prepare(regs, &state); \
+ \
+ return ret; \
+} \
+NOKPROBE_SYMBOL(func); \
+ \
+static __always_inline long ____##func(struct pt_regs *regs)
+
+
+/* Interrupt handlers */
+/* kernel/traps.c */
+DECLARE_INTERRUPT_HANDLER_NMI(system_reset_exception);
+#ifdef CONFIG_PPC_BOOK3S_64
+DECLARE_INTERRUPT_HANDLER_ASYNC(machine_check_exception);
+#else
+DECLARE_INTERRUPT_HANDLER_NMI(machine_check_exception);
+#endif
+DECLARE_INTERRUPT_HANDLER(SMIException);
+DECLARE_INTERRUPT_HANDLER(handle_hmi_exception);
+DECLARE_INTERRUPT_HANDLER(unknown_exception);
+DECLARE_INTERRUPT_HANDLER_ASYNC(unknown_async_exception);
+DECLARE_INTERRUPT_HANDLER(instruction_breakpoint_exception);
+DECLARE_INTERRUPT_HANDLER(RunModeException);
+DECLARE_INTERRUPT_HANDLER(single_step_exception);
+DECLARE_INTERRUPT_HANDLER(program_check_exception);
+DECLARE_INTERRUPT_HANDLER(emulation_assist_interrupt);
+DECLARE_INTERRUPT_HANDLER(alignment_exception);
+DECLARE_INTERRUPT_HANDLER(StackOverflow);
+DECLARE_INTERRUPT_HANDLER(stack_overflow_exception);
+DECLARE_INTERRUPT_HANDLER(kernel_fp_unavailable_exception);
+DECLARE_INTERRUPT_HANDLER(altivec_unavailable_exception);
+DECLARE_INTERRUPT_HANDLER(vsx_unavailable_exception);
+DECLARE_INTERRUPT_HANDLER(facility_unavailable_exception);
+DECLARE_INTERRUPT_HANDLER(fp_unavailable_tm);
+DECLARE_INTERRUPT_HANDLER(altivec_unavailable_tm);
+DECLARE_INTERRUPT_HANDLER(vsx_unavailable_tm);
+DECLARE_INTERRUPT_HANDLER_NMI(performance_monitor_exception_nmi);
+DECLARE_INTERRUPT_HANDLER_ASYNC(performance_monitor_exception_async);
+DECLARE_INTERRUPT_HANDLER_RAW(performance_monitor_exception);
+DECLARE_INTERRUPT_HANDLER(DebugException);
+DECLARE_INTERRUPT_HANDLER(altivec_assist_exception);
+DECLARE_INTERRUPT_HANDLER(CacheLockingException);
+DECLARE_INTERRUPT_HANDLER(SPEFloatingPointException);
+DECLARE_INTERRUPT_HANDLER(SPEFloatingPointRoundException);
+DECLARE_INTERRUPT_HANDLER(unrecoverable_exception);
+DECLARE_INTERRUPT_HANDLER(WatchdogException);
+DECLARE_INTERRUPT_HANDLER(kernel_bad_stack);
+
+/* slb.c */
+DECLARE_INTERRUPT_HANDLER_RAW(do_slb_fault);
+DECLARE_INTERRUPT_HANDLER(do_bad_slb_fault);
+
+/* hash_utils.c */
+DECLARE_INTERRUPT_HANDLER_RAW(do_hash_fault);
+
+/* fault.c */
+DECLARE_INTERRUPT_HANDLER_RET(do_page_fault);
+DECLARE_INTERRUPT_HANDLER(do_bad_page_fault_segv);
+
+/* process.c */
+DECLARE_INTERRUPT_HANDLER(do_break);
+
+/* time.c */
+DECLARE_INTERRUPT_HANDLER_ASYNC(timer_interrupt);
+
+/* mce.c */
+DECLARE_INTERRUPT_HANDLER_NMI(machine_check_early);
+DECLARE_INTERRUPT_HANDLER_NMI(hmi_exception_realmode);
+
+DECLARE_INTERRUPT_HANDLER_ASYNC(TAUException);
+
+void replay_system_reset(void);
+void replay_soft_interrupts(void);
+
+static inline void interrupt_cond_local_irq_enable(struct pt_regs *regs)
+{
+ if (!arch_irq_disabled_regs(regs))
+ local_irq_enable();
+}
+
+#endif /* _ASM_POWERPC_INTERRUPT_H */
diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h
index 4f983ca4030a..f3f264e441a7 100644
--- a/arch/powerpc/include/asm/irq.h
+++ b/arch/powerpc/include/asm/irq.h
@@ -37,8 +37,6 @@ extern int distribute_irqs;
struct pt_regs;
-#define __ARCH_HAS_DO_SOFTIRQ
-
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
/*
* Per-cpu stacks for handling critical, debug and machine check
diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h
index 55d6ede30c19..9ab344d29a54 100644
--- a/arch/powerpc/include/asm/kexec.h
+++ b/arch/powerpc/include/asm/kexec.h
@@ -136,6 +136,7 @@ int load_crashdump_segments_ppc64(struct kimage *image,
int setup_purgatory_ppc64(struct kimage *image, const void *slave_code,
const void *fdt, unsigned long kernel_load_addr,
unsigned long fdt_load_addr);
+unsigned int kexec_fdt_totalsize_ppc64(struct kimage *image);
int setup_new_fdt_ppc64(const struct kimage *image, void *fdt,
unsigned long initrd_load_addr,
unsigned long initrd_len, const char *cmdline);
diff --git a/arch/powerpc/include/asm/kup.h b/arch/powerpc/include/asm/kup.h
index bf221a2a523e..7ec21af49a45 100644
--- a/arch/powerpc/include/asm/kup.h
+++ b/arch/powerpc/include/asm/kup.h
@@ -91,6 +91,7 @@ static __always_inline void setup_kup(void)
static inline void allow_read_from_user(const void __user *from, unsigned long size)
{
+ barrier_nospec();
allow_user_access(NULL, from, size, KUAP_READ);
}
@@ -102,6 +103,7 @@ static inline void allow_write_to_user(void __user *to, unsigned long size)
static inline void allow_read_write_user(void __user *to, const void __user *from,
unsigned long size)
{
+ barrier_nospec();
allow_user_access(to, from, size, KUAP_READ_WRITE);
}
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index d32ec9ae73bd..2f5f919f6cd3 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -277,6 +277,13 @@ extern int kvmppc_hcall_impl_hv_realmode(unsigned long cmd);
extern void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu);
extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu);
+long kvmppc_read_intr(void);
+void kvmppc_bad_interrupt(struct pt_regs *regs);
+void kvmhv_p9_set_lpcr(struct kvm_split_mode *sip);
+void kvmhv_p9_restore_lpcr(struct kvm_split_mode *sip);
+void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr);
+void kvmppc_inject_interrupt_hv(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags);
+
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu);
void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu);
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h
index 078f4648ea27..b6d31bff5209 100644
--- a/arch/powerpc/include/asm/kvm_book3s_asm.h
+++ b/arch/powerpc/include/asm/kvm_book3s_asm.h
@@ -74,16 +74,6 @@ struct kvm_split_mode {
u8 do_nap;
u8 napped[MAX_SMT_THREADS];
struct kvmppc_vcore *vc[MAX_SUBCORES];
- /* Bits for changing lpcr on P9 */
- unsigned long lpcr_req;
- unsigned long lpidr_req;
- unsigned long host_lpcr;
- u32 do_set;
- u32 do_restore;
- union {
- u32 allphases;
- u8 phase[4];
- } lpcr_sync;
};
/*
@@ -110,7 +100,6 @@ struct kvmppc_host_state {
u8 hwthread_state;
u8 host_ipi;
u8 ptid; /* thread number within subcore when split */
- u8 tid; /* thread number within whole core */
u8 fake_suspend;
struct kvm_vcpu *kvm_vcpu;
struct kvmppc_vcore *kvm_vcore;
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index d67a470e95a3..05fb00d37609 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -28,7 +28,6 @@
#define KVM_MAX_VCPUS NR_CPUS
#define KVM_MAX_VCORES NR_CPUS
-#define KVM_USER_MEM_SLOTS 512
#include <asm/cputhreads.h>
@@ -307,6 +306,7 @@ struct kvm_arch {
u8 svm_enabled;
bool threads_indep;
bool nested_enable;
+ bool dawr1_enabled;
pgd_t *pgtable;
u64 process_table;
struct dentry *debugfs_dir;
@@ -584,8 +584,10 @@ struct kvm_vcpu_arch {
u32 ctrl;
u32 dabrx;
ulong dabr;
- ulong dawr;
- ulong dawrx;
+ ulong dawr0;
+ ulong dawrx0;
+ ulong dawr1;
+ ulong dawrx1;
ulong ciabr;
ulong cfar;
ulong ppr;
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 0a056c64c317..8aacd76bb702 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -314,6 +314,8 @@ struct kvmppc_ops {
int size);
int (*enable_svm)(struct kvm *kvm);
int (*svm_off)(struct kvm *kvm);
+ int (*enable_dawr1)(struct kvm *kvm);
+ bool (*hash_v3_possible)(void);
};
extern struct kvmppc_ops *kvmppc_hv_ops;
@@ -627,9 +629,9 @@ extern int h_ipi_redirect;
static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
struct kvm *kvm)
{ return NULL; }
-static inline void kvmppc_alloc_host_rm_ops(void) {};
-static inline void kvmppc_free_host_rm_ops(void) {};
-static inline void kvmppc_free_pimap(struct kvm *kvm) {};
+static inline void kvmppc_alloc_host_rm_ops(void) {}
+static inline void kvmppc_free_host_rm_ops(void) {}
+static inline void kvmppc_free_pimap(struct kvm *kvm) {}
static inline int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall)
{ return 0; }
static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
@@ -881,9 +883,9 @@ static inline void kvmppc_mmu_flush_icache(kvm_pfn_t pfn)
/* Clear i-cache for new pages */
page = pfn_to_page(pfn);
- if (!test_bit(PG_arch_1, &page->flags)) {
+ if (!test_bit(PG_dcache_clean, &page->flags)) {
flush_dcache_icache_page(page);
- set_bit(PG_arch_1, &page->flags);
+ set_bit(PG_dcache_clean, &page->flags);
}
}
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index cf6ebbc16cb4..764f2732a821 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -59,6 +59,9 @@ struct machdep_calls {
int (*pcibios_root_bridge_prepare)(struct pci_host_bridge
*bridge);
+ /* finds all the pci_controllers present at boot */
+ void (*discover_phbs)(void);
+
/* To setup PHBs when using automatic OF platform driver for PCI */
int (*pci_setup_phb)(struct pci_controller *host);
diff --git a/arch/powerpc/include/asm/mce.h b/arch/powerpc/include/asm/mce.h
index e6c27ae843dc..331d944280b8 100644
--- a/arch/powerpc/include/asm/mce.h
+++ b/arch/powerpc/include/asm/mce.h
@@ -204,7 +204,18 @@ struct mce_error_info {
bool ignore_event;
};
-#define MAX_MC_EVT 100
+#define MAX_MC_EVT 10
+
+struct mce_info {
+ int mce_nest_count;
+ struct machine_check_event mce_event[MAX_MC_EVT];
+ /* Queue for delayed MCE events. */
+ int mce_queue_count;
+ struct machine_check_event mce_event_queue[MAX_MC_EVT];
+ /* Queue for delayed MCE UE events. */
+ int mce_ue_count;
+ struct machine_check_event mce_ue_event_queue[MAX_MC_EVT];
+};
/* Release flags for get_mce_event() */
#define MCE_EVENT_RELEASE true
@@ -234,4 +245,11 @@ long __machine_check_early_realmode_p8(struct pt_regs *regs);
long __machine_check_early_realmode_p9(struct pt_regs *regs);
long __machine_check_early_realmode_p10(struct pt_regs *regs);
#endif /* CONFIG_PPC_BOOK3S_64 */
+
+#ifdef CONFIG_PPC_BOOK3S_64
+void mce_init(void);
+#else
+static inline void mce_init(void) { };
+#endif /* CONFIG_PPC_BOOK3S_64 */
+
#endif /* __ASM_PPC64_MCE_H__ */
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index d5821834dba9..652ce85f9410 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -282,9 +282,6 @@ static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
}
#define pkey_mm_init(mm)
-#define thread_pkey_regs_save(thread)
-#define thread_pkey_regs_restore(new_thread, old_thread)
-#define thread_pkey_regs_init(thread)
#define arch_dup_pkeys(oldmm, mm)
static inline u64 pte_to_hpte_pkey_bits(u64 pteflags, unsigned long flags)
diff --git a/arch/powerpc/include/asm/nmi.h b/arch/powerpc/include/asm/nmi.h
index 84b4cfe73edd..160abcb8e9fa 100644
--- a/arch/powerpc/include/asm/nmi.h
+++ b/arch/powerpc/include/asm/nmi.h
@@ -4,6 +4,7 @@
#ifdef CONFIG_PPC_WATCHDOG
extern void arch_touch_nmi_watchdog(void);
+long soft_nmi_interrupt(struct pt_regs *regs);
#else
static inline void arch_touch_nmi_watchdog(void) {}
#endif
diff --git a/arch/powerpc/include/asm/oprofile_impl.h b/arch/powerpc/include/asm/oprofile_impl.h
deleted file mode 100644
index 2a166c297f97..000000000000
--- a/arch/powerpc/include/asm/oprofile_impl.h
+++ /dev/null
@@ -1,135 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
- *
- * Based on alpha version.
- */
-
-#ifndef _ASM_POWERPC_OPROFILE_IMPL_H
-#define _ASM_POWERPC_OPROFILE_IMPL_H
-#ifdef __KERNEL__
-
-#define OP_MAX_COUNTER 8
-
-/* Per-counter configuration as set via oprofilefs. */
-struct op_counter_config {
- unsigned long enabled;
- unsigned long event;
- unsigned long count;
- /* Classic doesn't support per-counter user/kernel selection */
- unsigned long kernel;
- unsigned long user;
- unsigned long unit_mask;
-};
-
-/* System-wide configuration as set via oprofilefs. */
-struct op_system_config {
-#ifdef CONFIG_PPC64
- unsigned long mmcr0;
- unsigned long mmcr1;
- unsigned long mmcra;
-#ifdef CONFIG_OPROFILE_CELL
- /* Register for oprofile user tool to check cell kernel profiling
- * support.
- */
- unsigned long cell_support;
-#endif
-#endif
- unsigned long enable_kernel;
- unsigned long enable_user;
-};
-
-/* Per-arch configuration */
-struct op_powerpc_model {
- int (*reg_setup) (struct op_counter_config *,
- struct op_system_config *,
- int num_counters);
- int (*cpu_setup) (struct op_counter_config *);
- int (*start) (struct op_counter_config *);
- int (*global_start) (struct op_counter_config *);
- void (*stop) (void);
- void (*global_stop) (void);
- int (*sync_start)(void);
- int (*sync_stop)(void);
- void (*handle_interrupt) (struct pt_regs *,
- struct op_counter_config *);
- int num_counters;
-};
-
-extern struct op_powerpc_model op_model_fsl_emb;
-extern struct op_powerpc_model op_model_power4;
-extern struct op_powerpc_model op_model_7450;
-extern struct op_powerpc_model op_model_cell;
-extern struct op_powerpc_model op_model_pa6t;
-
-
-/* All the classic PPC parts use these */
-static inline unsigned int classic_ctr_read(unsigned int i)
-{
- switch(i) {
- case 0:
- return mfspr(SPRN_PMC1);
- case 1:
- return mfspr(SPRN_PMC2);
- case 2:
- return mfspr(SPRN_PMC3);
- case 3:
- return mfspr(SPRN_PMC4);
- case 4:
- return mfspr(SPRN_PMC5);
- case 5:
- return mfspr(SPRN_PMC6);
-
-/* No PPC32 chip has more than 6 so far */
-#ifdef CONFIG_PPC64
- case 6:
- return mfspr(SPRN_PMC7);
- case 7:
- return mfspr(SPRN_PMC8);
-#endif
- default:
- return 0;
- }
-}
-
-static inline void classic_ctr_write(unsigned int i, unsigned int val)
-{
- switch(i) {
- case 0:
- mtspr(SPRN_PMC1, val);
- break;
- case 1:
- mtspr(SPRN_PMC2, val);
- break;
- case 2:
- mtspr(SPRN_PMC3, val);
- break;
- case 3:
- mtspr(SPRN_PMC4, val);
- break;
- case 4:
- mtspr(SPRN_PMC5, val);
- break;
- case 5:
- mtspr(SPRN_PMC6, val);
- break;
-
-/* No PPC32 chip has more than 6, yet */
-#ifdef CONFIG_PPC64
- case 6:
- mtspr(SPRN_PMC7, val);
- break;
- case 7:
- mtspr(SPRN_PMC8, val);
- break;
-#endif
- default:
- break;
- }
-}
-
-
-extern void op_powerpc_backtrace(struct pt_regs * const regs, unsigned int depth);
-
-#endif /* __KERNEL__ */
-#endif /* _ASM_POWERPC_OPROFILE_IMPL_H */
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index 9454d29ff4b4..ec18ac818e3a 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -29,6 +29,7 @@
#include <asm/hmi.h>
#include <asm/cpuidle.h>
#include <asm/atomic.h>
+#include <asm/mce.h>
#include <asm-generic/mmiowb_types.h>
@@ -108,8 +109,7 @@ struct paca_struct {
*/
/* used for most interrupts/exceptions */
u64 exgen[EX_SIZE] __attribute__((aligned(0x80)));
- u64 exslb[EX_SIZE]; /* used for SLB/segment table misses
- * on the linear mapping */
+
/* SLB related definitions */
u16 vmalloc_sllp;
u8 slb_cache_ptr;
@@ -273,6 +273,9 @@ struct paca_struct {
#ifdef CONFIG_MMIOWB
struct mmiowb_state mmiowb_state;
#endif
+#ifdef CONFIG_PPC_BOOK3S_64
+ struct mce_info *mce_info;
+#endif /* CONFIG_PPC_BOOK3S_64 */
} ____cacheline_aligned;
extern void copy_mm_to_paca(struct mm_struct *mm);
@@ -285,9 +288,9 @@ extern void free_unused_pacas(void);
#else /* CONFIG_PPC64 */
-static inline void allocate_paca_ptrs(void) { };
-static inline void allocate_paca(int cpu) { };
-static inline void free_unused_pacas(void) { };
+static inline void allocate_paca_ptrs(void) { }
+static inline void allocate_paca(int cpu) { }
+static inline void free_unused_pacas(void) { }
#endif /* CONFIG_PPC64 */
diff --git a/arch/powerpc/include/asm/paravirt.h b/arch/powerpc/include/asm/paravirt.h
index edc08f04aef7..5d1726bb28e7 100644
--- a/arch/powerpc/include/asm/paravirt.h
+++ b/arch/powerpc/include/asm/paravirt.h
@@ -10,6 +10,7 @@
#endif
#ifdef CONFIG_PPC_SPLPAR
+#include <linux/smp.h>
#include <asm/kvm_guest.h>
#include <asm/cputhreads.h>
diff --git a/arch/powerpc/include/asm/perf_event.h b/arch/powerpc/include/asm/perf_event.h
index daec64d41b44..164e910bf654 100644
--- a/arch/powerpc/include/asm/perf_event.h
+++ b/arch/powerpc/include/asm/perf_event.h
@@ -14,6 +14,7 @@
#include <asm/perf_event_server.h>
#else
static inline bool is_sier_available(void) { return false; }
+static inline unsigned long get_pmcs_ext_regs(int idx) { return 0; }
#endif
#ifdef CONFIG_FSL_EMB_PERF_EVENT
@@ -40,6 +41,7 @@ static inline bool is_sier_available(void) { return false; }
/* To support perf_regs sier update */
extern bool is_sier_available(void);
+extern unsigned long get_pmcs_ext_regs(int idx);
/* To define perf extended regs mask value */
extern u64 PERF_REG_EXTENDED_MASK;
#define PERF_REG_EXTENDED_MASK PERF_REG_EXTENDED_MASK
diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h
index 3b7baba01c92..00e7e671bb4b 100644
--- a/arch/powerpc/include/asm/perf_event_server.h
+++ b/arch/powerpc/include/asm/perf_event_server.h
@@ -36,9 +36,9 @@ struct power_pmu {
unsigned long test_adder;
int (*compute_mmcr)(u64 events[], int n_ev,
unsigned int hwc[], struct mmcr_regs *mmcr,
- struct perf_event *pevents[]);
+ struct perf_event *pevents[], u32 flags);
int (*get_constraint)(u64 event_id, unsigned long *mskp,
- unsigned long *valp);
+ unsigned long *valp, u64 event_config1);
int (*get_alternatives)(u64 event_id, unsigned int flags,
u64 alt[]);
void (*get_mem_data_src)(union perf_mem_data_src *dsrc,
@@ -83,6 +83,7 @@ struct power_pmu {
#define PPMU_NO_SIAR 0x00000100 /* Do not use SIAR */
#define PPMU_ARCH_31 0x00000200 /* Has MMCR3, SIER2 and SIER3 */
#define PPMU_P10_DD1 0x00000400 /* Is power10 DD1 processor version */
+#define PPMU_HAS_ATTR_CONFIG1 0x00000800 /* Using config1 attribute */
/*
* Values for flags to get_alternatives()
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index f7613f43c9cf..4eed82172e33 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -162,6 +162,9 @@ static inline bool is_ioremap_addr(const void *x)
return addr >= IOREMAP_BASE && addr < IOREMAP_END;
}
+
+struct seq_file;
+void arch_report_meminfo(struct seq_file *m);
#endif /* CONFIG_PPC64 */
#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/pkeys.h b/arch/powerpc/include/asm/pkeys.h
index a7951049e129..59a2c7dbc78f 100644
--- a/arch/powerpc/include/asm/pkeys.h
+++ b/arch/powerpc/include/asm/pkeys.h
@@ -169,10 +169,4 @@ static inline bool arch_pkeys_enabled(void)
}
extern void pkey_mm_init(struct mm_struct *mm);
-extern bool arch_supports_pkeys(int cap);
-extern unsigned int arch_usable_pkeys(void);
-extern void thread_pkey_regs_save(struct thread_struct *thread);
-extern void thread_pkey_regs_restore(struct thread_struct *new_thread,
- struct thread_struct *old_thread);
-extern void thread_pkey_regs_init(struct thread_struct *thread);
#endif /*_ASM_POWERPC_KEYS_H */
diff --git a/arch/powerpc/include/asm/ppc-pci.h b/arch/powerpc/include/asm/ppc-pci.h
index 7f4be5a05eb3..2b9edbf6e929 100644
--- a/arch/powerpc/include/asm/ppc-pci.h
+++ b/arch/powerpc/include/asm/ppc-pci.h
@@ -13,10 +13,6 @@
extern unsigned long isa_io_base;
-extern void pci_setup_phb_io(struct pci_controller *hose, int primary);
-extern void pci_setup_phb_io_dynamic(struct pci_controller *hose, int primary);
-
-
extern struct list_head hose_list;
extern struct pci_dev *isa_bridge_pcidev; /* may be NULL if no ISA bus */
@@ -32,9 +28,6 @@ struct pci_dn;
void *pci_traverse_device_nodes(struct device_node *start,
void *(*fn)(struct device_node *, void *),
void *data);
-void *traverse_pci_dn(struct pci_dn *root,
- void *(*fn)(struct pci_dn *, void *),
- void *data);
extern void pci_devs_phb_init_dynamic(struct pci_controller *phb);
/* From rtas_pci.h */
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index cc1bca571332..3dceb64fc9af 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -25,7 +25,6 @@
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
#define ACCOUNT_CPU_USER_ENTRY(ptr, ra, rb)
#define ACCOUNT_CPU_USER_EXIT(ptr, ra, rb)
-#define ACCOUNT_STOLEN_TIME
#else
#define ACCOUNT_CPU_USER_ENTRY(ptr, ra, rb) \
MFTB(ra); /* get timebase */ \
@@ -44,29 +43,6 @@
PPC_LL ra, ACCOUNT_SYSTEM_TIME(ptr); \
add ra,ra,rb; /* add on to system time */ \
PPC_STL ra, ACCOUNT_SYSTEM_TIME(ptr)
-
-#ifdef CONFIG_PPC_SPLPAR
-#define ACCOUNT_STOLEN_TIME \
-BEGIN_FW_FTR_SECTION; \
- beq 33f; \
- /* from user - see if there are any DTL entries to process */ \
- ld r10,PACALPPACAPTR(r13); /* get ptr to VPA */ \
- ld r11,PACA_DTL_RIDX(r13); /* get log read index */ \
- addi r10,r10,LPPACA_DTLIDX; \
- LDX_BE r10,0,r10; /* get log write index */ \
- cmpd cr1,r11,r10; \
- beq+ cr1,33f; \
- bl accumulate_stolen_time; \
- ld r12,_MSR(r1); \
- andi. r10,r12,MSR_PR; /* Restore cr0 (coming from user) */ \
-33: \
-END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
-
-#else /* CONFIG_PPC_SPLPAR */
-#define ACCOUNT_STOLEN_TIME
-
-#endif /* CONFIG_PPC_SPLPAR */
-
#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
/*
diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
index 58f9dc060a7b..975ba260006a 100644
--- a/arch/powerpc/include/asm/ptrace.h
+++ b/arch/powerpc/include/asm/ptrace.h
@@ -70,6 +70,9 @@ struct pt_regs
};
#endif
+
+#define STACK_FRAME_WITH_PT_REGS (STACK_FRAME_OVERHEAD + sizeof(struct pt_regs))
+
#ifdef __powerpc64__
/*
@@ -229,6 +232,11 @@ static inline bool trap_is_scv(struct pt_regs *regs)
return (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && TRAP(regs) == 0x3000);
}
+static inline bool trap_is_unsupported_scv(struct pt_regs *regs)
+{
+ return IS_ENABLED(CONFIG_PPC_BOOK3S_64) && TRAP(regs) == 0x7ff0;
+}
+
static inline bool trap_is_syscall(struct pt_regs *regs)
{
return (trap_is_scv(regs) || TRAP(regs) == 0xc00);
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index e40a921d78f9..da103e92c112 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -1375,6 +1375,7 @@
#define mtmsr(v) asm volatile("mtmsr %0" : \
: "r" ((unsigned long)(v)) \
: "memory")
+#define __mtmsrd(v, l) BUILD_BUG()
#define __MTMSR "mtmsr"
#endif
@@ -1413,13 +1414,24 @@ static inline void msr_check_and_clear(unsigned long bits)
}
#ifdef CONFIG_PPC32
-#define mfsrin(v) ({unsigned int rval; \
- asm volatile("mfsrin %0,%1" : "=r" (rval) : "r" (v)); \
- rval;})
+static inline u32 mfsr(u32 idx)
+{
+ u32 val;
+
+ if (__builtin_constant_p(idx))
+ asm volatile("mfsr %0, %1" : "=r" (val): "i" (idx >> 28));
+ else
+ asm volatile("mfsrin %0, %1" : "=r" (val): "r" (idx));
-static inline void mtsrin(u32 val, u32 idx)
+ return val;
+}
+
+static inline void mtsr(u32 val, u32 idx)
{
- asm volatile("mtsrin %0, %1" : : "r" (val), "r" (idx));
+ if (__builtin_constant_p(idx))
+ asm volatile("mtsr %1, %0" : : "r" (val), "i" (idx >> 28));
+ else
+ asm volatile("mtsrin %0, %1" : : "r" (val), "r" (idx));
}
#endif
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index 262782f08fd4..17b8dcd9a40d 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -691,6 +691,9 @@
#define mttmr(rn, v) asm volatile(MTTMR(rn, %0) : \
: "r" ((unsigned long)(v)) \
: "memory")
+
+extern unsigned long global_dbcr0[];
+
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_POWERPC_REG_BOOKE_H__ */
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
index 332e1000ca0f..658448ca5b8a 100644
--- a/arch/powerpc/include/asm/rtas.h
+++ b/arch/powerpc/include/asm/rtas.h
@@ -369,7 +369,7 @@ void rtas_initialize(void);
#else
static inline int page_is_rtas_user_buf(unsigned long pfn) { return 0;}
static inline void pSeries_coalesce_init(void) { }
-static inline void rtas_initialize(void) { };
+static inline void rtas_initialize(void) { }
#endif
extern int call_rtas(const char *, int, int, unsigned long *, ...);
diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
index a466749703f1..e89bfebd4e00 100644
--- a/arch/powerpc/include/asm/setup.h
+++ b/arch/powerpc/include/asm/setup.h
@@ -58,7 +58,7 @@ void do_rfi_flush_fixups(enum l1d_flush_type types);
#ifdef CONFIG_PPC_BARRIER_NOSPEC
void setup_barrier_nospec(void);
#else
-static inline void setup_barrier_nospec(void) { };
+static inline void setup_barrier_nospec(void) { }
#endif
void do_uaccess_flush_fixups(enum l1d_flush_type types);
void do_entry_flush_fixups(enum l1d_flush_type types);
@@ -68,13 +68,13 @@ extern bool barrier_nospec_enabled;
#ifdef CONFIG_PPC_BARRIER_NOSPEC
void do_barrier_nospec_fixups_range(bool enable, void *start, void *end);
#else
-static inline void do_barrier_nospec_fixups_range(bool enable, void *start, void *end) { };
+static inline void do_barrier_nospec_fixups_range(bool enable, void *start, void *end) { }
#endif
#ifdef CONFIG_PPC_FSL_BOOK3E
void setup_spectre_v2(void);
#else
-static inline void setup_spectre_v2(void) {};
+static inline void setup_spectre_v2(void) {}
#endif
void do_btb_flush_fixups(void);
diff --git a/arch/powerpc/include/asm/simple_spinlock.h b/arch/powerpc/include/asm/simple_spinlock.h
index 9c3c30534333..5b862de29dff 100644
--- a/arch/powerpc/include/asm/simple_spinlock.h
+++ b/arch/powerpc/include/asm/simple_spinlock.h
@@ -90,8 +90,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
void splpar_spin_yield(arch_spinlock_t *lock);
void splpar_rw_yield(arch_rwlock_t *lock);
#else /* SPLPAR */
-static inline void splpar_spin_yield(arch_spinlock_t *lock) {};
-static inline void splpar_rw_yield(arch_rwlock_t *lock) {};
+static inline void splpar_spin_yield(arch_spinlock_t *lock) {}
+static inline void splpar_rw_yield(arch_rwlock_t *lock) {}
#endif
static inline void spin_yield(arch_spinlock_t *lock)
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index c4e2d53acd2b..7a13bc20f0a0 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -236,7 +236,7 @@ static inline void set_hard_smp_processor_id(int cpu, int phys)
#if defined(CONFIG_PPC64) && (defined(CONFIG_SMP) || defined(CONFIG_KEXEC_CORE))
extern void smp_release_cpus(void);
#else
-static inline void smp_release_cpus(void) { };
+static inline void smp_release_cpus(void) { }
#endif
extern int smt_enabled_at_boot;
diff --git a/arch/powerpc/include/asm/spu.h b/arch/powerpc/include/asm/spu.h
index 9666491bcb8a..8a2d11ba0dae 100644
--- a/arch/powerpc/include/asm/spu.h
+++ b/arch/powerpc/include/asm/spu.h
@@ -201,20 +201,6 @@ int spu_64k_pages_available(void);
struct mm_struct;
extern void spu_flush_all_slbs(struct mm_struct *mm);
-/* This interface allows a profiler (e.g., OProfile) to store a ref
- * to spu context information that it creates. This caching technique
- * avoids the need to recreate this information after a save/restore operation.
- *
- * Assumes the caller has already incremented the ref count to
- * profile_info; then spu_context_destroy must call kref_put
- * on prof_info_kref.
- */
-void spu_set_profile_private_kref(struct spu_context *ctx,
- struct kref *prof_info_kref,
- void ( * prof_info_release) (struct kref *kref));
-
-void *spu_get_profile_private_kref(struct spu_context *ctx);
-
/* system callbacks from the SPU */
struct spu_syscall_block {
u64 nr_ret;
@@ -266,25 +252,6 @@ void spu_remove_dev_attr(struct device_attribute *attr);
int spu_add_dev_attr_group(struct attribute_group *attrs);
void spu_remove_dev_attr_group(struct attribute_group *attrs);
-/*
- * Notifier blocks:
- *
- * oprofile can get notified when a context switch is performed
- * on an spe. The notifer function that gets called is passed
- * a pointer to the SPU structure as well as the object-id that
- * identifies the binary running on that SPU now.
- *
- * For a context save, the object-id that is passed is zero,
- * identifying that the kernel will run from that moment on.
- *
- * For a context restore, the object-id is the value written
- * to object-id spufs file from user space and the notifer
- * function can assume that spu->ctx is valid.
- */
-struct notifier_block;
-int spu_switch_event_register(struct notifier_block * n);
-int spu_switch_event_unregister(struct notifier_block * n);
-
extern void notify_spus_active(void);
extern void do_notify_spus_active(void);
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index 3d8a47af7a25..386d576673a1 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -94,7 +94,6 @@ void arch_setup_new_exec(void);
#define TIF_PATCH_PENDING 6 /* pending live patching update */
#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
#define TIF_SINGLESTEP 8 /* singlestepping active */
-#define TIF_NOHZ 9 /* in adaptive nohz mode */
#define TIF_SECCOMP 10 /* secure computing */
#define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
#define TIF_NOERROR 12 /* Force successful syscall return */
@@ -128,11 +127,10 @@ void arch_setup_new_exec(void);
#define _TIF_UPROBE (1<<TIF_UPROBE)
#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
#define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
-#define _TIF_NOHZ (1<<TIF_NOHZ)
#define _TIF_SYSCALL_EMU (1<<TIF_SYSCALL_EMU)
#define _TIF_SYSCALL_DOTRACE (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
_TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
- _TIF_NOHZ | _TIF_SYSCALL_EMU)
+ _TIF_SYSCALL_EMU)
#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
_TIF_NOTIFY_RESUME | _TIF_UPROBE | \
diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h
index 8f789b597bae..8dd3cdb25338 100644
--- a/arch/powerpc/include/asm/time.h
+++ b/arch/powerpc/include/asm/time.h
@@ -102,6 +102,8 @@ DECLARE_PER_CPU(u64, decrementers_next_tb);
/* Convert timebase ticks to nanoseconds */
unsigned long long tb_to_ns(unsigned long long tb_ticks);
+void timer_broadcast_interrupt(void);
+
/* SPLPAR */
void accumulate_stolen_time(void);
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index 501c9a79038c..78e2a3990eab 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -52,8 +52,6 @@ static inline bool __access_ok(unsigned long addr, unsigned long size)
__get_user_nocheck((x), (ptr), sizeof(*(ptr)), true)
#define __put_user(x, ptr) \
__put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
-#define __put_user_goto(x, ptr, label) \
- __put_user_nocheck_goto((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label)
#define __get_user_allowed(x, ptr) \
__get_user_nocheck((x), (ptr), sizeof(*(ptr)), false)
@@ -110,22 +108,18 @@ static inline bool __access_ok(unsigned long addr, unsigned long size)
extern long __put_user_bad(void);
-#define __put_user_size_allowed(x, ptr, size, retval) \
+#define __put_user_size(x, ptr, size, retval) \
do { \
__label__ __pu_failed; \
\
retval = 0; \
+ allow_write_to_user(ptr, size); \
__put_user_size_goto(x, ptr, size, __pu_failed); \
+ prevent_write_to_user(ptr, size); \
break; \
\
__pu_failed: \
retval = -EFAULT; \
-} while (0)
-
-#define __put_user_size(x, ptr, size, retval) \
-do { \
- allow_write_to_user(ptr, size); \
- __put_user_size_allowed(x, ptr, size, retval); \
prevent_write_to_user(ptr, size); \
} while (0)
@@ -213,11 +207,9 @@ do { \
} \
} while (0)
-#define __put_user_nocheck_goto(x, ptr, size, label) \
+#define __unsafe_put_user_goto(x, ptr, size, label) \
do { \
__typeof__(*(ptr)) __user *__pu_addr = (ptr); \
- if (!is_kernel_addr((unsigned long)__pu_addr)) \
- might_fault(); \
__chk_user_ptr(ptr); \
__put_user_size_goto((x), __pu_addr, (size), label); \
} while (0)
@@ -313,9 +305,8 @@ do { \
__typeof__(size) __gu_size = (size); \
\
__chk_user_ptr(__gu_addr); \
- if (!is_kernel_addr((unsigned long)__gu_addr)) \
+ if (do_allow && !is_kernel_addr((unsigned long)__gu_addr)) \
might_fault(); \
- barrier_nospec(); \
if (do_allow) \
__get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \
else \
@@ -333,10 +324,8 @@ do { \
__typeof__(size) __gu_size = (size); \
\
might_fault(); \
- if (access_ok(__gu_addr, __gu_size)) { \
- barrier_nospec(); \
+ if (access_ok(__gu_addr, __gu_size)) \
__get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \
- } \
(x) = (__force __typeof__(*(ptr)))__gu_val; \
\
__gu_err; \
@@ -350,7 +339,6 @@ do { \
__typeof__(size) __gu_size = (size); \
\
__chk_user_ptr(__gu_addr); \
- barrier_nospec(); \
__get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \
(x) = (__force __typeof__(*(ptr)))__gu_val; \
\
@@ -395,7 +383,6 @@ raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
{
unsigned long ret;
- barrier_nospec();
allow_read_write_user(to, from, n);
ret = __copy_tofrom_user(to, from, n);
prevent_read_write_user(to, from, n);
@@ -407,32 +394,7 @@ static inline unsigned long raw_copy_from_user(void *to,
const void __user *from, unsigned long n)
{
unsigned long ret;
- if (__builtin_constant_p(n) && (n <= 8)) {
- ret = 1;
-
- switch (n) {
- case 1:
- barrier_nospec();
- __get_user_size(*(u8 *)to, from, 1, ret);
- break;
- case 2:
- barrier_nospec();
- __get_user_size(*(u16 *)to, from, 2, ret);
- break;
- case 4:
- barrier_nospec();
- __get_user_size(*(u32 *)to, from, 4, ret);
- break;
- case 8:
- barrier_nospec();
- __get_user_size(*(u64 *)to, from, 8, ret);
- break;
- }
- if (ret == 0)
- return 0;
- }
- barrier_nospec();
allow_read_from_user(from, n);
ret = __copy_tofrom_user((__force void __user *)to, from, n);
prevent_read_from_user(from, n);
@@ -440,39 +402,12 @@ static inline unsigned long raw_copy_from_user(void *to,
}
static inline unsigned long
-raw_copy_to_user_allowed(void __user *to, const void *from, unsigned long n)
-{
- if (__builtin_constant_p(n) && (n <= 8)) {
- unsigned long ret = 1;
-
- switch (n) {
- case 1:
- __put_user_size_allowed(*(u8 *)from, (u8 __user *)to, 1, ret);
- break;
- case 2:
- __put_user_size_allowed(*(u16 *)from, (u16 __user *)to, 2, ret);
- break;
- case 4:
- __put_user_size_allowed(*(u32 *)from, (u32 __user *)to, 4, ret);
- break;
- case 8:
- __put_user_size_allowed(*(u64 *)from, (u64 __user *)to, 8, ret);
- break;
- }
- if (ret == 0)
- return 0;
- }
-
- return __copy_tofrom_user(to, (__force const void __user *)from, n);
-}
-
-static inline unsigned long
raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{
unsigned long ret;
allow_write_to_user(to, n);
- ret = raw_copy_to_user_allowed(to, from, n);
+ ret = __copy_tofrom_user(to, (__force const void __user *)from, n);
prevent_write_to_user(to, n);
return ret;
}
@@ -508,6 +443,9 @@ static __must_check inline bool user_access_begin(const void __user *ptr, size_t
{
if (unlikely(!access_ok(ptr, len)))
return false;
+
+ might_fault();
+
allow_read_write_user((void __user *)ptr, ptr, len);
return true;
}
@@ -521,6 +459,9 @@ user_read_access_begin(const void __user *ptr, size_t len)
{
if (unlikely(!access_ok(ptr, len)))
return false;
+
+ might_fault();
+
allow_read_from_user(ptr, len);
return true;
}
@@ -532,6 +473,9 @@ user_write_access_begin(const void __user *ptr, size_t len)
{
if (unlikely(!access_ok(ptr, len)))
return false;
+
+ might_fault();
+
allow_write_to_user((void __user *)ptr, len);
return true;
}
@@ -540,7 +484,8 @@ user_write_access_begin(const void __user *ptr, size_t len)
#define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0)
#define unsafe_get_user(x, p, e) unsafe_op_wrap(__get_user_allowed(x, p), e)
-#define unsafe_put_user(x, p, e) __put_user_goto(x, p, e)
+#define unsafe_put_user(x, p, e) \
+ __unsafe_put_user_goto((__typeof__(*(p)))(x), (p), sizeof(*(p)), e)
#define unsafe_copy_to_user(d, s, l, e) \
do { \
@@ -550,17 +495,17 @@ do { \
int _i; \
\
for (_i = 0; _i < (_len & ~(sizeof(long) - 1)); _i += sizeof(long)) \
- __put_user_goto(*(long*)(_src + _i), (long __user *)(_dst + _i), e);\
+ unsafe_put_user(*(long*)(_src + _i), (long __user *)(_dst + _i), e); \
if (IS_ENABLED(CONFIG_PPC64) && (_len & 4)) { \
- __put_user_goto(*(u32*)(_src + _i), (u32 __user *)(_dst + _i), e); \
+ unsafe_put_user(*(u32*)(_src + _i), (u32 __user *)(_dst + _i), e); \
_i += 4; \
} \
if (_len & 2) { \
- __put_user_goto(*(u16*)(_src + _i), (u16 __user *)(_dst + _i), e); \
+ unsafe_put_user(*(u16*)(_src + _i), (u16 __user *)(_dst + _i), e); \
_i += 2; \
} \
if (_len & 1) \
- __put_user_goto(*(u8*)(_src + _i), (u8 __user *)(_dst + _i), e);\
+ unsafe_put_user(*(u8*)(_src + _i), (u8 __user *)(_dst + _i), e); \
} while (0)
#define HAVE_GET_KERNEL_NOFAULT
diff --git a/arch/powerpc/include/asm/vdso/timebase.h b/arch/powerpc/include/asm/vdso/timebase.h
index 881f655caa0a..891c9d5eaabe 100644
--- a/arch/powerpc/include/asm/vdso/timebase.h
+++ b/arch/powerpc/include/asm/vdso/timebase.h
@@ -43,12 +43,6 @@
#define mttbl(v) asm volatile("mttbl %0":: "r"(v))
#define mttbu(v) asm volatile("mttbu %0":: "r"(v))
-/* For compatibility, get_tbl() is defined as get_tb() on ppc64 */
-static inline unsigned long get_tbl(void)
-{
- return mftb();
-}
-
static __always_inline u64 get_tb(void)
{
unsigned int tbhi, tblo, tbhi2;
diff --git a/arch/powerpc/include/asm/xmon.h b/arch/powerpc/include/asm/xmon.h
index 454a7fc6113b..68bfb2361f03 100644
--- a/arch/powerpc/include/asm/xmon.h
+++ b/arch/powerpc/include/asm/xmon.h
@@ -17,8 +17,8 @@ struct pt_regs;
extern int xmon(struct pt_regs *excp);
extern irqreturn_t xmon_irq(int, void *);
#else
-static inline void xmon_setup(void) { };
-static inline void xmon_register_spus(struct list_head *list) { };
+static inline void xmon_setup(void) { }
+static inline void xmon_register_spus(struct list_head *list) { }
#endif
#if defined(CONFIG_XMON) && defined(CONFIG_SMP)
diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h
index c3af3f324c5a..9f18fa090f1f 100644
--- a/arch/powerpc/include/uapi/asm/kvm.h
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -644,6 +644,8 @@ struct kvm_ppc_cpu_char {
#define KVM_REG_PPC_MMCR3 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc1)
#define KVM_REG_PPC_SIER2 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc2)
#define KVM_REG_PPC_SIER3 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc3)
+#define KVM_REG_PPC_DAWR1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc4)
+#define KVM_REG_PPC_DAWRX1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc5)
/* Transactional Memory checkpointed state:
* This is all GPRs, all VSX regs and a subset of SPRs
diff --git a/arch/powerpc/include/uapi/asm/perf_regs.h b/arch/powerpc/include/uapi/asm/perf_regs.h
index bdf5f10f8b9f..578b3ee86105 100644
--- a/arch/powerpc/include/uapi/asm/perf_regs.h
+++ b/arch/powerpc/include/uapi/asm/perf_regs.h
@@ -55,17 +55,33 @@ enum perf_event_powerpc_regs {
PERF_REG_POWERPC_MMCR3,
PERF_REG_POWERPC_SIER2,
PERF_REG_POWERPC_SIER3,
+ PERF_REG_POWERPC_PMC1,
+ PERF_REG_POWERPC_PMC2,
+ PERF_REG_POWERPC_PMC3,
+ PERF_REG_POWERPC_PMC4,
+ PERF_REG_POWERPC_PMC5,
+ PERF_REG_POWERPC_PMC6,
/* Max regs without the extended regs */
PERF_REG_POWERPC_MAX = PERF_REG_POWERPC_MMCRA + 1,
};
#define PERF_REG_PMU_MASK ((1ULL << PERF_REG_POWERPC_MAX) - 1)
-/* PERF_REG_EXTENDED_MASK value for CPU_FTR_ARCH_300 */
-#define PERF_REG_PMU_MASK_300 (((1ULL << (PERF_REG_POWERPC_MMCR2 + 1)) - 1) - PERF_REG_PMU_MASK)
-/* PERF_REG_EXTENDED_MASK value for CPU_FTR_ARCH_31 */
-#define PERF_REG_PMU_MASK_31 (((1ULL << (PERF_REG_POWERPC_SIER3 + 1)) - 1) - PERF_REG_PMU_MASK)
+/* Exclude MMCR3, SIER2, SIER3 for CPU_FTR_ARCH_300 */
+#define PERF_EXCLUDE_REG_EXT_300 (7ULL << PERF_REG_POWERPC_MMCR3)
-#define PERF_REG_MAX_ISA_300 (PERF_REG_POWERPC_MMCR2 + 1)
-#define PERF_REG_MAX_ISA_31 (PERF_REG_POWERPC_SIER3 + 1)
+/*
+ * PERF_REG_EXTENDED_MASK value for CPU_FTR_ARCH_300
+ * includes 9 SPRS from MMCR0 to PMC6 excluding the
+ * unsupported SPRS in PERF_EXCLUDE_REG_EXT_300.
+ */
+#define PERF_REG_PMU_MASK_300 ((0xfffULL << PERF_REG_POWERPC_MMCR0) - PERF_EXCLUDE_REG_EXT_300)
+
+/*
+ * PERF_REG_EXTENDED_MASK value for CPU_FTR_ARCH_31
+ * includes 12 SPRs from MMCR0 to PMC6.
+ */
+#define PERF_REG_PMU_MASK_31 (0xfffULL << PERF_REG_POWERPC_MMCR0)
+
+#define PERF_REG_EXTENDED_MAX (PERF_REG_POWERPC_PMC6 + 1)
#endif /* _UAPI_ASM_POWERPC_PERF_REGS_H */
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index fe2ef598e2ea..6084fa499aa3 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -46,12 +46,12 @@ obj-y := cputable.o syscalls.o \
prom.o traps.o setup-common.o \
udbg.o misc.o io.o misc_$(BITS).o \
of_platform.o prom_parse.o firmware.o \
- hw_breakpoint_constraints.o
+ hw_breakpoint_constraints.o interrupt.o
obj-y += ptrace/
obj-$(CONFIG_PPC64) += setup_64.o \
- paca.o nvram_64.o note.o syscall_64.o
+ paca.o nvram_64.o note.o
obj-$(CONFIG_COMPAT) += sys_ppc32.o signal_32.o
-obj-$(CONFIG_VDSO32) += vdso32/
+obj-$(CONFIG_VDSO32) += vdso32_wrapper.o
obj-$(CONFIG_PPC_WATCHDOG) += watchdog.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
obj-$(CONFIG_PPC_DAWR) += dawr.o
@@ -60,7 +60,7 @@ obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o
obj-$(CONFIG_PPC_BOOK3S_64) += mce.o mce_power.o
obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_book3e.o
obj-$(CONFIG_PPC_BARRIER_NOSPEC) += security.o
-obj-$(CONFIG_PPC64) += vdso64/
+obj-$(CONFIG_PPC64) += vdso64_wrapper.o
obj-$(CONFIG_ALTIVEC) += vecemu.o
obj-$(CONFIG_PPC_BOOK3S_IDLE) += idle_book3s.o
procfs-y := proc_powerpc.o
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index b12d7c049bfe..f3a662201a9f 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -255,7 +255,6 @@ int main(void)
#endif /* CONFIG_PPC_MM_SLICES */
OFFSET(PACA_EXGEN, paca_struct, exgen);
OFFSET(PACA_EXMC, paca_struct, exmc);
- OFFSET(PACA_EXSLB, paca_struct, exslb);
OFFSET(PACA_EXNMI, paca_struct, exnmi);
#ifdef CONFIG_PPC_PSERIES
OFFSET(PACALPPACAPTR, paca_struct, lppaca_ptr);
@@ -309,7 +308,7 @@ int main(void)
/* Interrupt register frame */
DEFINE(INT_FRAME_SIZE, STACK_INT_FRAME_SIZE);
- DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs));
+ DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_WITH_PT_REGS);
STACK_PT_REGS_OFFSET(GPR0, gpr[0]);
STACK_PT_REGS_OFFSET(GPR1, gpr[1]);
STACK_PT_REGS_OFFSET(GPR2, gpr[2]);
@@ -526,8 +525,10 @@ int main(void)
OFFSET(VCPU_CTRL, kvm_vcpu, arch.ctrl);
OFFSET(VCPU_DABR, kvm_vcpu, arch.dabr);
OFFSET(VCPU_DABRX, kvm_vcpu, arch.dabrx);
- OFFSET(VCPU_DAWR, kvm_vcpu, arch.dawr);
- OFFSET(VCPU_DAWRX, kvm_vcpu, arch.dawrx);
+ OFFSET(VCPU_DAWR0, kvm_vcpu, arch.dawr0);
+ OFFSET(VCPU_DAWRX0, kvm_vcpu, arch.dawrx0);
+ OFFSET(VCPU_DAWR1, kvm_vcpu, arch.dawr1);
+ OFFSET(VCPU_DAWRX1, kvm_vcpu, arch.dawrx1);
OFFSET(VCPU_CIABR, kvm_vcpu, arch.ciabr);
OFFSET(VCPU_HFLAGS, kvm_vcpu, arch.hflags);
OFFSET(VCPU_DEC, kvm_vcpu, arch.dec);
@@ -668,7 +669,6 @@ int main(void)
HSTATE_FIELD(HSTATE_SAVED_XIRR, saved_xirr);
HSTATE_FIELD(HSTATE_HOST_IPI, host_ipi);
HSTATE_FIELD(HSTATE_PTID, ptid);
- HSTATE_FIELD(HSTATE_TID, tid);
HSTATE_FIELD(HSTATE_FAKE_SUSPEND, fake_suspend);
HSTATE_FIELD(HSTATE_MMCR0, host_mmcr[0]);
HSTATE_FIELD(HSTATE_MMCR1, host_mmcr[1]);
@@ -698,8 +698,6 @@ int main(void)
OFFSET(KVM_SPLIT_LDBAR, kvm_split_mode, ldbar);
OFFSET(KVM_SPLIT_DO_NAP, kvm_split_mode, do_nap);
OFFSET(KVM_SPLIT_NAPPED, kvm_split_mode, napped);
- OFFSET(KVM_SPLIT_DO_SET, kvm_split_mode, do_set);
- OFFSET(KVM_SPLIT_DO_RESTORE, kvm_split_mode, do_restore);
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
#ifdef CONFIG_PPC_BOOK3S_64
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 65f35ec052d4..ae0fdef0ac11 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -13,7 +13,6 @@
#include <linux/export.h>
#include <linux/jump_label.h>
-#include <asm/oprofile_impl.h>
#include <asm/cputable.h>
#include <asm/prom.h> /* for PTRRELOC on ARCH=ppc */
#include <asm/mce.h>
@@ -151,7 +150,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_setup = __setup_cpu_ppc970,
.cpu_restore = __restore_cpu_ppc970,
.oprofile_cpu_type = "ppc64/970",
- .oprofile_type = PPC_OPROFILE_POWER4,
.platform = "ppc970",
},
{ /* PPC970FX */
@@ -169,7 +167,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_setup = __setup_cpu_ppc970,
.cpu_restore = __restore_cpu_ppc970,
.oprofile_cpu_type = "ppc64/970",
- .oprofile_type = PPC_OPROFILE_POWER4,
.platform = "ppc970",
},
{ /* PPC970MP DD1.0 - no DEEPNAP, use regular 970 init */
@@ -187,7 +184,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_setup = __setup_cpu_ppc970,
.cpu_restore = __restore_cpu_ppc970,
.oprofile_cpu_type = "ppc64/970MP",
- .oprofile_type = PPC_OPROFILE_POWER4,
.platform = "ppc970",
},
{ /* PPC970MP */
@@ -205,7 +201,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_setup = __setup_cpu_ppc970MP,
.cpu_restore = __restore_cpu_ppc970,
.oprofile_cpu_type = "ppc64/970MP",
- .oprofile_type = PPC_OPROFILE_POWER4,
.platform = "ppc970",
},
{ /* PPC970GX */
@@ -222,7 +217,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pmc_type = PPC_PMC_IBM,
.cpu_setup = __setup_cpu_ppc970,
.oprofile_cpu_type = "ppc64/970",
- .oprofile_type = PPC_OPROFILE_POWER4,
.platform = "ppc970",
},
{ /* Power5 GR */
@@ -237,12 +231,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
.oprofile_cpu_type = "ppc64/power5",
- .oprofile_type = PPC_OPROFILE_POWER4,
- /* SIHV / SIPR bits are implemented on POWER4+ (GQ)
- * and above but only works on POWER5 and above
- */
- .oprofile_mmcra_sihv = MMCRA_SIHV,
- .oprofile_mmcra_sipr = MMCRA_SIPR,
.platform = "power5",
},
{ /* Power5++ */
@@ -256,9 +244,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.dcache_bsize = 128,
.num_pmcs = 6,
.oprofile_cpu_type = "ppc64/power5++",
- .oprofile_type = PPC_OPROFILE_POWER4,
- .oprofile_mmcra_sihv = MMCRA_SIHV,
- .oprofile_mmcra_sipr = MMCRA_SIPR,
.platform = "power5+",
},
{ /* Power5 GS */
@@ -273,9 +258,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
.oprofile_cpu_type = "ppc64/power5+",
- .oprofile_type = PPC_OPROFILE_POWER4,
- .oprofile_mmcra_sihv = MMCRA_SIHV,
- .oprofile_mmcra_sipr = MMCRA_SIPR,
.platform = "power5+",
},
{ /* POWER6 in P5+ mode; 2.04-compliant processor */
@@ -288,7 +270,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.icache_bsize = 128,
.dcache_bsize = 128,
.oprofile_cpu_type = "ppc64/ibm-compat-v1",
- .oprofile_type = PPC_OPROFILE_POWER4,
.platform = "power5+",
},
{ /* Power6 */
@@ -304,11 +285,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
.oprofile_cpu_type = "ppc64/power6",
- .oprofile_type = PPC_OPROFILE_POWER4,
- .oprofile_mmcra_sihv = POWER6_MMCRA_SIHV,
- .oprofile_mmcra_sipr = POWER6_MMCRA_SIPR,
- .oprofile_mmcra_clear = POWER6_MMCRA_THRM |
- POWER6_MMCRA_OTHER,
.platform = "power6x",
},
{ /* 2.05-compliant processor, i.e. Power6 "architected" mode */
@@ -321,7 +297,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.icache_bsize = 128,
.dcache_bsize = 128,
.oprofile_cpu_type = "ppc64/ibm-compat-v1",
- .oprofile_type = PPC_OPROFILE_POWER4,
.platform = "power6",
},
{ /* 2.06-compliant processor, i.e. Power7 "architected" mode */
@@ -334,7 +309,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.mmu_features = MMU_FTRS_POWER7,
.icache_bsize = 128,
.dcache_bsize = 128,
- .oprofile_type = PPC_OPROFILE_POWER4,
.oprofile_cpu_type = "ppc64/ibm-compat-v1",
.cpu_setup = __setup_cpu_power7,
.cpu_restore = __restore_cpu_power7,
@@ -351,7 +325,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.mmu_features = MMU_FTRS_POWER8,
.icache_bsize = 128,
.dcache_bsize = 128,
- .oprofile_type = PPC_OPROFILE_INVALID,
.oprofile_cpu_type = "ppc64/ibm-compat-v1",
.cpu_setup = __setup_cpu_power8,
.cpu_restore = __restore_cpu_power8,
@@ -368,7 +341,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.mmu_features = MMU_FTRS_POWER9,
.icache_bsize = 128,
.dcache_bsize = 128,
- .oprofile_type = PPC_OPROFILE_INVALID,
.oprofile_cpu_type = "ppc64/ibm-compat-v1",
.cpu_setup = __setup_cpu_power9,
.cpu_restore = __restore_cpu_power9,
@@ -384,7 +356,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.mmu_features = MMU_FTRS_POWER10,
.icache_bsize = 128,
.dcache_bsize = 128,
- .oprofile_type = PPC_OPROFILE_INVALID,
.oprofile_cpu_type = "ppc64/ibm-compat-v1",
.cpu_setup = __setup_cpu_power10,
.cpu_restore = __restore_cpu_power10,
@@ -403,7 +374,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
.oprofile_cpu_type = "ppc64/power7",
- .oprofile_type = PPC_OPROFILE_POWER4,
.cpu_setup = __setup_cpu_power7,
.cpu_restore = __restore_cpu_power7,
.machine_check_early = __machine_check_early_realmode_p7,
@@ -422,7 +392,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
.oprofile_cpu_type = "ppc64/power7",
- .oprofile_type = PPC_OPROFILE_POWER4,
.cpu_setup = __setup_cpu_power7,
.cpu_restore = __restore_cpu_power7,
.machine_check_early = __machine_check_early_realmode_p7,
@@ -441,7 +410,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
.oprofile_cpu_type = "ppc64/power8",
- .oprofile_type = PPC_OPROFILE_INVALID,
.cpu_setup = __setup_cpu_power8,
.cpu_restore = __restore_cpu_power8,
.machine_check_early = __machine_check_early_realmode_p8,
@@ -460,7 +428,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
.oprofile_cpu_type = "ppc64/power8",
- .oprofile_type = PPC_OPROFILE_INVALID,
.cpu_setup = __setup_cpu_power8,
.cpu_restore = __restore_cpu_power8,
.machine_check_early = __machine_check_early_realmode_p8,
@@ -479,7 +446,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
.oprofile_cpu_type = "ppc64/power8",
- .oprofile_type = PPC_OPROFILE_INVALID,
.cpu_setup = __setup_cpu_power8,
.cpu_restore = __restore_cpu_power8,
.machine_check_early = __machine_check_early_realmode_p8,
@@ -498,7 +464,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
.oprofile_cpu_type = "ppc64/power9",
- .oprofile_type = PPC_OPROFILE_INVALID,
.cpu_setup = __setup_cpu_power9,
.cpu_restore = __restore_cpu_power9,
.machine_check_early = __machine_check_early_realmode_p9,
@@ -517,7 +482,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
.oprofile_cpu_type = "ppc64/power9",
- .oprofile_type = PPC_OPROFILE_INVALID,
.cpu_setup = __setup_cpu_power9,
.cpu_restore = __restore_cpu_power9,
.machine_check_early = __machine_check_early_realmode_p9,
@@ -536,7 +500,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
.oprofile_cpu_type = "ppc64/power9",
- .oprofile_type = PPC_OPROFILE_INVALID,
.cpu_setup = __setup_cpu_power9,
.cpu_restore = __restore_cpu_power9,
.machine_check_early = __machine_check_early_realmode_p9,
@@ -555,7 +518,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
.oprofile_cpu_type = "ppc64/power10",
- .oprofile_type = PPC_OPROFILE_INVALID,
.cpu_setup = __setup_cpu_power10,
.cpu_restore = __restore_cpu_power10,
.machine_check_early = __machine_check_early_realmode_p10,
@@ -575,7 +537,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.num_pmcs = 4,
.pmc_type = PPC_PMC_IBM,
.oprofile_cpu_type = "ppc64/cell-be",
- .oprofile_type = PPC_OPROFILE_CELL,
.platform = "ppc-cell-be",
},
{ /* PA Semi PA6T */
@@ -592,7 +553,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_setup = __setup_cpu_pa6t,
.cpu_restore = __restore_cpu_pa6t,
.oprofile_cpu_type = "ppc64/pa6t",
- .oprofile_type = PPC_OPROFILE_PA6T,
.platform = "pa6t",
},
{ /* default match */
@@ -757,7 +717,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check = machine_check_generic,
.platform = "ppc750",
.oprofile_cpu_type = "ppc/750",
- .oprofile_type = PPC_OPROFILE_G4,
},
{ /* 745/755 */
.pvr_mask = 0xfffff000,
@@ -789,7 +748,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check = machine_check_generic,
.platform = "ppc750",
.oprofile_cpu_type = "ppc/750",
- .oprofile_type = PPC_OPROFILE_G4,
},
{ /* 750FX rev 2.0 must disable HID0[DPM] */
.pvr_mask = 0xffffffff,
@@ -806,7 +764,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check = machine_check_generic,
.platform = "ppc750",
.oprofile_cpu_type = "ppc/750",
- .oprofile_type = PPC_OPROFILE_G4,
},
{ /* 750FX (All revs except 2.0) */
.pvr_mask = 0xffff0000,
@@ -823,7 +780,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check = machine_check_generic,
.platform = "ppc750",
.oprofile_cpu_type = "ppc/750",
- .oprofile_type = PPC_OPROFILE_G4,
},
{ /* 750GX */
.pvr_mask = 0xffff0000,
@@ -840,7 +796,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check = machine_check_generic,
.platform = "ppc750",
.oprofile_cpu_type = "ppc/750",
- .oprofile_type = PPC_OPROFILE_G4,
},
{ /* 740/750 (L2CR bit need fixup for 740) */
.pvr_mask = 0xffff0000,
@@ -919,7 +874,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pmc_type = PPC_PMC_G4,
.cpu_setup = __setup_cpu_745x,
.oprofile_cpu_type = "ppc/7450",
- .oprofile_type = PPC_OPROFILE_G4,
.machine_check = machine_check_generic,
.platform = "ppc7450",
},
@@ -937,7 +891,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pmc_type = PPC_PMC_G4,
.cpu_setup = __setup_cpu_745x,
.oprofile_cpu_type = "ppc/7450",
- .oprofile_type = PPC_OPROFILE_G4,
.machine_check = machine_check_generic,
.platform = "ppc7450",
},
@@ -955,7 +908,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pmc_type = PPC_PMC_G4,
.cpu_setup = __setup_cpu_745x,
.oprofile_cpu_type = "ppc/7450",
- .oprofile_type = PPC_OPROFILE_G4,
.machine_check = machine_check_generic,
.platform = "ppc7450",
},
@@ -973,7 +925,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pmc_type = PPC_PMC_G4,
.cpu_setup = __setup_cpu_745x,
.oprofile_cpu_type = "ppc/7450",
- .oprofile_type = PPC_OPROFILE_G4,
.machine_check = machine_check_generic,
.platform = "ppc7450",
},
@@ -991,7 +942,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pmc_type = PPC_PMC_G4,
.cpu_setup = __setup_cpu_745x,
.oprofile_cpu_type = "ppc/7450",
- .oprofile_type = PPC_OPROFILE_G4,
.machine_check = machine_check_generic,
.platform = "ppc7450",
},
@@ -1009,7 +959,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pmc_type = PPC_PMC_G4,
.cpu_setup = __setup_cpu_745x,
.oprofile_cpu_type = "ppc/7450",
- .oprofile_type = PPC_OPROFILE_G4,
.machine_check = machine_check_generic,
.platform = "ppc7450",
},
@@ -1027,7 +976,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pmc_type = PPC_PMC_G4,
.cpu_setup = __setup_cpu_745x,
.oprofile_cpu_type = "ppc/7450",
- .oprofile_type = PPC_OPROFILE_G4,
.machine_check = machine_check_generic,
.platform = "ppc7450",
},
@@ -1045,7 +993,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pmc_type = PPC_PMC_G4,
.cpu_setup = __setup_cpu_745x,
.oprofile_cpu_type = "ppc/7450",
- .oprofile_type = PPC_OPROFILE_G4,
.machine_check = machine_check_generic,
.platform = "ppc7450",
},
@@ -1062,7 +1009,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pmc_type = PPC_PMC_G4,
.cpu_setup = __setup_cpu_745x,
.oprofile_cpu_type = "ppc/7450",
- .oprofile_type = PPC_OPROFILE_G4,
.machine_check = machine_check_generic,
.platform = "ppc7450",
},
@@ -1080,7 +1026,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pmc_type = PPC_PMC_G4,
.cpu_setup = __setup_cpu_745x,
.oprofile_cpu_type = "ppc/7450",
- .oprofile_type = PPC_OPROFILE_G4,
.machine_check = machine_check_generic,
.platform = "ppc7450",
},
@@ -1098,7 +1043,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pmc_type = PPC_PMC_G4,
.cpu_setup = __setup_cpu_745x,
.oprofile_cpu_type = "ppc/7450",
- .oprofile_type = PPC_OPROFILE_G4,
.machine_check = machine_check_generic,
.platform = "ppc7450",
},
@@ -1211,7 +1155,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check = machine_check_83xx,
.num_pmcs = 4,
.oprofile_cpu_type = "ppc/e300",
- .oprofile_type = PPC_OPROFILE_FSL_EMB,
.platform = "ppc603",
},
{ /* e300c4 (e300c1, plus one IU) */
@@ -1228,7 +1171,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check = machine_check_83xx,
.num_pmcs = 4,
.oprofile_cpu_type = "ppc/e300",
- .oprofile_type = PPC_OPROFILE_FSL_EMB,
.platform = "ppc603",
},
#endif
@@ -1925,7 +1867,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.dcache_bsize = 32,
.num_pmcs = 4,
.oprofile_cpu_type = "ppc/e500",
- .oprofile_type = PPC_OPROFILE_FSL_EMB,
.cpu_setup = __setup_cpu_e500v1,
.machine_check = machine_check_e500,
.platform = "ppc8540",
@@ -1945,7 +1886,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.dcache_bsize = 32,
.num_pmcs = 4,
.oprofile_cpu_type = "ppc/e500",
- .oprofile_type = PPC_OPROFILE_FSL_EMB,
.cpu_setup = __setup_cpu_e500v2,
.machine_check = machine_check_e500,
.platform = "ppc8548",
@@ -1965,7 +1905,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.dcache_bsize = 64,
.num_pmcs = 4,
.oprofile_cpu_type = "ppc/e500mc",
- .oprofile_type = PPC_OPROFILE_FSL_EMB,
.cpu_setup = __setup_cpu_e500mc,
.machine_check = machine_check_e500mc,
.platform = "ppce500mc",
@@ -1987,7 +1926,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.dcache_bsize = 64,
.num_pmcs = 4,
.oprofile_cpu_type = "ppc/e500mc",
- .oprofile_type = PPC_OPROFILE_FSL_EMB,
.cpu_setup = __setup_cpu_e5500,
#ifndef CONFIG_PPC32
.cpu_restore = __restore_cpu_e5500,
@@ -2010,7 +1948,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.dcache_bsize = 64,
.num_pmcs = 6,
.oprofile_cpu_type = "ppc/e6500",
- .oprofile_type = PPC_OPROFILE_FSL_EMB,
.cpu_setup = __setup_cpu_e6500,
#ifndef CONFIG_PPC32
.cpu_restore = __restore_cpu_e6500,
@@ -2076,10 +2013,6 @@ static struct cpu_spec * __init setup_cpu_spec(unsigned long offset,
if (old.num_pmcs && !s->num_pmcs) {
t->num_pmcs = old.num_pmcs;
t->pmc_type = old.pmc_type;
- t->oprofile_type = old.oprofile_type;
- t->oprofile_mmcra_sihv = old.oprofile_mmcra_sihv;
- t->oprofile_mmcra_sipr = old.oprofile_mmcra_sipr;
- t->oprofile_mmcra_clear = old.oprofile_mmcra_clear;
/*
* If we have passed through this logic once before and
diff --git a/arch/powerpc/kernel/dbell.c b/arch/powerpc/kernel/dbell.c
index 52680cf07c9d..5545c9cd17c1 100644
--- a/arch/powerpc/kernel/dbell.c
+++ b/arch/powerpc/kernel/dbell.c
@@ -12,17 +12,17 @@
#include <linux/hardirq.h>
#include <asm/dbell.h>
+#include <asm/interrupt.h>
#include <asm/irq_regs.h>
#include <asm/kvm_ppc.h>
#include <asm/trace.h>
#ifdef CONFIG_SMP
-void doorbell_exception(struct pt_regs *regs)
+DEFINE_INTERRUPT_HANDLER_ASYNC(doorbell_exception)
{
struct pt_regs *old_regs = set_irq_regs(regs);
- irq_enter();
trace_doorbell_entry(regs);
ppc_msgsync();
@@ -35,13 +35,12 @@ void doorbell_exception(struct pt_regs *regs)
smp_ipi_demux_relaxed(); /* already performed the barrier */
trace_doorbell_exit(regs);
- irq_exit();
+
set_irq_regs(old_regs);
}
#else /* CONFIG_SMP */
-void doorbell_exception(struct pt_regs *regs)
+DEFINE_INTERRUPT_HANDLER_ASYNC(doorbell_exception)
{
printk(KERN_WARNING "Received doorbell on non-smp system\n");
}
#endif /* CONFIG_SMP */
-
diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c
index b5478b72c08c..358aee7c2d79 100644
--- a/arch/powerpc/kernel/dt_cpu_ftrs.c
+++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
@@ -19,7 +19,6 @@
#include <asm/dt_cpu_ftrs.h>
#include <asm/mce.h>
#include <asm/mmu.h>
-#include <asm/oprofile_impl.h>
#include <asm/prom.h>
#include <asm/setup.h>
@@ -103,7 +102,6 @@ static struct cpu_spec __initdata base_cpu_spec = {
.num_pmcs = 0,
.pmc_type = PPC_PMC_DEFAULT,
.oprofile_cpu_type = NULL,
- .oprofile_type = PPC_OPROFILE_INVALID,
.cpu_setup = NULL,
.cpu_restore = __restore_cpu_cpufeatures,
.machine_check_early = NULL,
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index 813713c9120c..cd60bc1c8701 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -1596,6 +1596,35 @@ static int proc_eeh_show(struct seq_file *m, void *v)
}
#ifdef CONFIG_DEBUG_FS
+
+
+static struct pci_dev *eeh_debug_lookup_pdev(struct file *filp,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ uint32_t domain, bus, dev, fn;
+ struct pci_dev *pdev;
+ char buf[20];
+ int ret;
+
+ memset(buf, 0, sizeof(buf));
+ ret = simple_write_to_buffer(buf, sizeof(buf)-1, ppos, user_buf, count);
+ if (!ret)
+ return ERR_PTR(-EFAULT);
+
+ ret = sscanf(buf, "%x:%x:%x.%x", &domain, &bus, &dev, &fn);
+ if (ret != 4) {
+ pr_err("%s: expected 4 args, got %d\n", __func__, ret);
+ return ERR_PTR(-EINVAL);
+ }
+
+ pdev = pci_get_domain_bus_and_slot(domain, bus, (dev << 3) | fn);
+ if (!pdev)
+ return ERR_PTR(-ENODEV);
+
+ return pdev;
+}
+
static int eeh_enable_dbgfs_set(void *data, u64 val)
{
if (val)
@@ -1688,26 +1717,13 @@ static ssize_t eeh_dev_check_write(struct file *filp,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
- uint32_t domain, bus, dev, fn;
struct pci_dev *pdev;
struct eeh_dev *edev;
- char buf[20];
int ret;
- memset(buf, 0, sizeof(buf));
- ret = simple_write_to_buffer(buf, sizeof(buf)-1, ppos, user_buf, count);
- if (!ret)
- return -EFAULT;
-
- ret = sscanf(buf, "%x:%x:%x.%x", &domain, &bus, &dev, &fn);
- if (ret != 4) {
- pr_err("%s: expected 4 args, got %d\n", __func__, ret);
- return -EINVAL;
- }
-
- pdev = pci_get_domain_bus_and_slot(domain, bus, (dev << 3) | fn);
- if (!pdev)
- return -ENODEV;
+ pdev = eeh_debug_lookup_pdev(filp, user_buf, count, ppos);
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
edev = pci_dev_to_eeh_dev(pdev);
if (!edev) {
@@ -1717,8 +1733,8 @@ static ssize_t eeh_dev_check_write(struct file *filp,
}
ret = eeh_dev_check_failure(edev);
- pci_info(pdev, "eeh_dev_check_failure(%04x:%02x:%02x.%01x) = %d\n",
- domain, bus, dev, fn, ret);
+ pci_info(pdev, "eeh_dev_check_failure(%s) = %d\n",
+ pci_name(pdev), ret);
pci_dev_put(pdev);
@@ -1829,25 +1845,12 @@ static ssize_t eeh_dev_break_write(struct file *filp,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
- uint32_t domain, bus, dev, fn;
struct pci_dev *pdev;
- char buf[20];
int ret;
- memset(buf, 0, sizeof(buf));
- ret = simple_write_to_buffer(buf, sizeof(buf)-1, ppos, user_buf, count);
- if (!ret)
- return -EFAULT;
-
- ret = sscanf(buf, "%x:%x:%x.%x", &domain, &bus, &dev, &fn);
- if (ret != 4) {
- pr_err("%s: expected 4 args, got %d\n", __func__, ret);
- return -EINVAL;
- }
-
- pdev = pci_get_domain_bus_and_slot(domain, bus, (dev << 3) | fn);
- if (!pdev)
- return -ENODEV;
+ pdev = eeh_debug_lookup_pdev(filp, user_buf, count, ppos);
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
ret = eeh_debugfs_break_device(pdev);
pci_dev_put(pdev);
@@ -1865,6 +1868,53 @@ static const struct file_operations eeh_dev_break_fops = {
.read = eeh_debugfs_dev_usage,
};
+static ssize_t eeh_dev_can_recover(struct file *filp,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct pci_driver *drv;
+ struct pci_dev *pdev;
+ size_t ret;
+
+ pdev = eeh_debug_lookup_pdev(filp, user_buf, count, ppos);
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
+
+ /*
+ * In order for error recovery to work the driver needs to implement
+ * .error_detected(), so it can quiesce IO to the device, and
+ * .slot_reset() so it can re-initialise the device after a reset.
+ *
+ * Ideally they'd implement .resume() too, but some drivers which
+ * we need to support (notably IPR) don't so I guess we can tolerate
+ * that.
+ *
+ * .mmio_enabled() is mostly there as a work-around for devices which
+ * take forever to re-init after a hot reset. Implementing that is
+ * strictly optional.
+ */
+ drv = pci_dev_driver(pdev);
+ if (drv &&
+ drv->err_handler &&
+ drv->err_handler->error_detected &&
+ drv->err_handler->slot_reset) {
+ ret = count;
+ } else {
+ ret = -EOPNOTSUPP;
+ }
+
+ pci_dev_put(pdev);
+
+ return ret;
+}
+
+static const struct file_operations eeh_dev_can_recover_fops = {
+ .open = simple_open,
+ .llseek = no_llseek,
+ .write = eeh_dev_can_recover,
+ .read = eeh_debugfs_dev_usage,
+};
+
#endif
static int __init eeh_init_proc(void)
@@ -1889,6 +1939,9 @@ static int __init eeh_init_proc(void)
debugfs_create_file_unsafe("eeh_force_recover", 0600,
powerpc_debugfs_root, NULL,
&eeh_force_recover_fops);
+ debugfs_create_file_unsafe("eeh_dev_can_recover", 0600,
+ powerpc_debugfs_root, NULL,
+ &eeh_dev_can_recover_fops);
eeh_cache_debugfs_init();
#endif
}
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 1c9b0ccc2172..78c430b7f9d9 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -175,14 +175,11 @@ transfer_to_handler:
addi r11,r11,global_dbcr0@l
#ifdef CONFIG_SMP
lwz r9,TASK_CPU(r2)
- slwi r9,r9,3
+ slwi r9,r9,2
add r11,r11,r9
#endif
lwz r12,0(r11)
mtspr SPRN_DBCR0,r12
- lwz r12,4(r11)
- addi r12,r12,-1
- stw r12,4(r11)
#endif
b 3f
@@ -276,8 +273,7 @@ reenable_mmu:
* We save a bunch of GPRs,
* r3 can be different from GPR3(r1) at this point, r9 and r11
* contains the old MSR and handler address respectively,
- * r4 & r5 can contain page fault arguments that need to be passed
- * along as well. r0, r6-r8, r12, CCR, CTR, XER etc... are left
+ * r0, r4-r8, r12, CCR, CTR, XER etc... are left
* clobbered as they aren't useful past this point.
*/
@@ -285,15 +281,11 @@ reenable_mmu:
stw r9,8(r1)
stw r11,12(r1)
stw r3,16(r1)
- stw r4,20(r1)
- stw r5,24(r1)
/* If we are disabling interrupts (normal case), simply log it with
* lockdep
*/
1: bl trace_hardirqs_off
- lwz r5,24(r1)
- lwz r4,20(r1)
lwz r3,16(r1)
lwz r11,12(r1)
lwz r9,8(r1)
@@ -334,132 +326,29 @@ stack_ovf:
_ASM_NOKPROBE_SYMBOL(stack_ovf)
#endif
-#ifdef CONFIG_TRACE_IRQFLAGS
-trace_syscall_entry_irq_off:
- /*
- * Syscall shouldn't happen while interrupts are disabled,
- * so let's do a warning here.
- */
-0: trap
- EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING
- bl trace_hardirqs_on
-
- /* Now enable for real */
- LOAD_REG_IMMEDIATE(r10, MSR_KERNEL | MSR_EE)
- mtmsr r10
-
- REST_GPR(0, r1)
- REST_4GPRS(3, r1)
- REST_2GPRS(7, r1)
- b DoSyscall
-#endif /* CONFIG_TRACE_IRQFLAGS */
-
.globl transfer_to_syscall
transfer_to_syscall:
-#ifdef CONFIG_TRACE_IRQFLAGS
- andi. r12,r9,MSR_EE
- beq- trace_syscall_entry_irq_off
-#endif /* CONFIG_TRACE_IRQFLAGS */
+ SAVE_NVGPRS(r1)
+#ifdef CONFIG_PPC_BOOK3S_32
+ kuep_lock r11, r12
+#endif
-/*
- * Handle a system call.
- */
- .stabs "arch/powerpc/kernel/",N_SO,0,0,0f
- .stabs "entry_32.S",N_SO,0,0,0f
-0:
-
-_GLOBAL(DoSyscall)
- stw r3,ORIG_GPR3(r1)
- li r12,0
- stw r12,RESULT(r1)
-#ifdef CONFIG_TRACE_IRQFLAGS
- /* Make sure interrupts are enabled */
- mfmsr r11
- andi. r12,r11,MSR_EE
- /* We came in with interrupts disabled, we WARN and mark them enabled
- * for lockdep now */
-0: tweqi r12, 0
- EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING
-#endif /* CONFIG_TRACE_IRQFLAGS */
- lwz r11,TI_FLAGS(r2)
- andi. r11,r11,_TIF_SYSCALL_DOTRACE
- bne- syscall_dotrace
-syscall_dotrace_cont:
- cmplwi 0,r0,NR_syscalls
- lis r10,sys_call_table@h
- ori r10,r10,sys_call_table@l
- slwi r0,r0,2
- bge- 66f
-
- barrier_nospec_asm
- /*
- * Prevent the load of the handler below (based on the user-passed
- * system call number) being speculatively executed until the test
- * against NR_syscalls and branch to .66f above has
- * committed.
- */
+ /* Calling convention has r9 = orig r0, r10 = regs */
+ addi r10,r1,STACK_FRAME_OVERHEAD
+ mr r9,r0
+ stw r10,THREAD+PT_REGS(r2)
+ bl system_call_exception
- lwzx r10,r10,r0 /* Fetch system call handler [ptr] */
- mtlr r10
- addi r9,r1,STACK_FRAME_OVERHEAD
- PPC440EP_ERR42
- blrl /* Call handler */
- .globl ret_from_syscall
ret_from_syscall:
-#ifdef CONFIG_DEBUG_RSEQ
- /* Check whether the syscall is issued inside a restartable sequence */
- stw r3,GPR3(r1)
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl rseq_syscall
- lwz r3,GPR3(r1)
-#endif
- mr r6,r3
- /* disable interrupts so current_thread_info()->flags can't change */
- LOAD_REG_IMMEDIATE(r10,MSR_KERNEL) /* doesn't include MSR_EE */
- /* Note: We don't bother telling lockdep about it */
- mtmsr r10
- lwz r9,TI_FLAGS(r2)
- li r8,-MAX_ERRNO
- andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
- bne- syscall_exit_work
- cmplw 0,r3,r8
- blt+ syscall_exit_cont
- lwz r11,_CCR(r1) /* Load CR */
- neg r3,r3
- oris r11,r11,0x1000 /* Set SO bit in CR */
- stw r11,_CCR(r1)
-syscall_exit_cont:
- lwz r8,_MSR(r1)
-#ifdef CONFIG_TRACE_IRQFLAGS
- /* If we are going to return from the syscall with interrupts
- * off, we trace that here. It shouldn't normally happen.
- */
- andi. r10,r8,MSR_EE
- bne+ 1f
- stw r3,GPR3(r1)
- bl trace_hardirqs_off
- lwz r3,GPR3(r1)
-1:
-#endif /* CONFIG_TRACE_IRQFLAGS */
-#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
- /* If the process has its own DBCR0 value, load it up. The internal
- debug mode bit tells us that dbcr0 should be loaded. */
- lwz r0,THREAD+THREAD_DBCR0(r2)
- andis. r10,r0,DBCR0_IDM@h
- bnel- load_dbcr0
-#endif
+ addi r4,r1,STACK_FRAME_OVERHEAD
+ li r5,0
+ bl syscall_exit_prepare
#ifdef CONFIG_PPC_47x
lis r4,icache_44x_need_flush@ha
lwz r5,icache_44x_need_flush@l(r4)
cmplwi cr0,r5,0
bne- 2f
#endif /* CONFIG_PPC_47x */
-1:
-BEGIN_FTR_SECTION
- lwarx r7,0,r1
-END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
- stwcx. r0,0,r1 /* to clear the reservation */
- ACCOUNT_CPU_USER_EXIT(r2, r5, r7)
#ifdef CONFIG_PPC_BOOK3S_32
kuep_unlock r5, r7
#endif
@@ -467,21 +356,36 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
lwz r4,_LINK(r1)
lwz r5,_CCR(r1)
mtlr r4
- mtcr r5
lwz r7,_NIP(r1)
- lwz r2,GPR2(r1)
- lwz r1,GPR1(r1)
+ lwz r8,_MSR(r1)
+ cmpwi r3,0
+ lwz r3,GPR3(r1)
syscall_exit_finish:
-#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
- mtspr SPRN_NRI, r0
-#endif
mtspr SPRN_SRR0,r7
mtspr SPRN_SRR1,r8
+
+ bne 3f
+ mtcr r5
+
+1: lwz r2,GPR2(r1)
+ lwz r1,GPR1(r1)
rfi
#ifdef CONFIG_40x
b . /* Prevent prefetch past rfi */
#endif
-_ASM_NOKPROBE_SYMBOL(syscall_exit_finish)
+
+3: mtcr r5
+ lwz r4,_CTR(r1)
+ lwz r5,_XER(r1)
+ REST_NVGPRS(r1)
+ mtctr r4
+ mtxer r5
+ lwz r0,GPR0(r1)
+ lwz r3,GPR3(r1)
+ REST_8GPRS(4,r1)
+ lwz r12,GPR12(r1)
+ b 1b
+
#ifdef CONFIG_44x
2: li r7,0
iccci r0,r0
@@ -489,9 +393,6 @@ _ASM_NOKPROBE_SYMBOL(syscall_exit_finish)
b 1b
#endif /* CONFIG_44x */
-66: li r3,-ENOSYS
- b ret_from_syscall
-
.globl ret_from_fork
ret_from_fork:
REST_NVGPRS(r1)
@@ -510,157 +411,6 @@ ret_from_kernel_thread:
li r3,0
b ret_from_syscall
-/* Traced system call support */
-syscall_dotrace:
- SAVE_NVGPRS(r1)
- li r0,0xc00
- stw r0,_TRAP(r1)
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl do_syscall_trace_enter
- /*
- * Restore argument registers possibly just changed.
- * We use the return value of do_syscall_trace_enter
- * for call number to look up in the table (r0).
- */
- mr r0,r3
- lwz r3,GPR3(r1)
- lwz r4,GPR4(r1)
- lwz r5,GPR5(r1)
- lwz r6,GPR6(r1)
- lwz r7,GPR7(r1)
- lwz r8,GPR8(r1)
- REST_NVGPRS(r1)
-
- cmplwi r0,NR_syscalls
- /* Return code is already in r3 thanks to do_syscall_trace_enter() */
- bge- ret_from_syscall
- b syscall_dotrace_cont
-
-syscall_exit_work:
- andi. r0,r9,_TIF_RESTOREALL
- beq+ 0f
- REST_NVGPRS(r1)
- b 2f
-0: cmplw 0,r3,r8
- blt+ 1f
- andi. r0,r9,_TIF_NOERROR
- bne- 1f
- lwz r11,_CCR(r1) /* Load CR */
- neg r3,r3
- oris r11,r11,0x1000 /* Set SO bit in CR */
- stw r11,_CCR(r1)
-
-1: stw r6,RESULT(r1) /* Save result */
- stw r3,GPR3(r1) /* Update return value */
-2: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
- beq 4f
-
- /* Clear per-syscall TIF flags if any are set. */
-
- li r11,_TIF_PERSYSCALL_MASK
- addi r12,r2,TI_FLAGS
-3: lwarx r8,0,r12
- andc r8,r8,r11
- stwcx. r8,0,r12
- bne- 3b
-
-4: /* Anything which requires enabling interrupts? */
- andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
- beq ret_from_except
-
- /* Re-enable interrupts. There is no need to trace that with
- * lockdep as we are supposed to have IRQs on at this point
- */
- ori r10,r10,MSR_EE
- mtmsr r10
-
- /* Save NVGPRS if they're not saved already */
- lwz r4,_TRAP(r1)
- andi. r4,r4,1
- beq 5f
- SAVE_NVGPRS(r1)
- li r4,0xc00
- stw r4,_TRAP(r1)
-5:
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl do_syscall_trace_leave
- b ret_from_except_full
-
- /*
- * System call was called from kernel. We get here with SRR1 in r9.
- * Mark the exception as recoverable once we have retrieved SRR0,
- * trap a warning and return ENOSYS with CR[SO] set.
- */
- .globl ret_from_kernel_syscall
-ret_from_kernel_syscall:
- mfspr r9, SPRN_SRR0
- mfspr r10, SPRN_SRR1
-#if !defined(CONFIG_4xx) && !defined(CONFIG_BOOKE)
- LOAD_REG_IMMEDIATE(r11, MSR_KERNEL & ~(MSR_IR|MSR_DR))
- mtmsr r11
-#endif
-
-0: trap
- EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING
-
- li r3, ENOSYS
- crset so
-#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
- mtspr SPRN_NRI, r0
-#endif
- mtspr SPRN_SRR0, r9
- mtspr SPRN_SRR1, r10
- rfi
-#ifdef CONFIG_40x
- b . /* Prevent prefetch past rfi */
-#endif
-_ASM_NOKPROBE_SYMBOL(ret_from_kernel_syscall)
-
-/*
- * The fork/clone functions need to copy the full register set into
- * the child process. Therefore we need to save all the nonvolatile
- * registers (r13 - r31) before calling the C code.
- */
- .globl ppc_fork
-ppc_fork:
- SAVE_NVGPRS(r1)
- lwz r0,_TRAP(r1)
- rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
- stw r0,_TRAP(r1) /* register set saved */
- b sys_fork
-
- .globl ppc_vfork
-ppc_vfork:
- SAVE_NVGPRS(r1)
- lwz r0,_TRAP(r1)
- rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
- stw r0,_TRAP(r1) /* register set saved */
- b sys_vfork
-
- .globl ppc_clone
-ppc_clone:
- SAVE_NVGPRS(r1)
- lwz r0,_TRAP(r1)
- rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
- stw r0,_TRAP(r1) /* register set saved */
- b sys_clone
-
- .globl ppc_clone3
-ppc_clone3:
- SAVE_NVGPRS(r1)
- lwz r0,_TRAP(r1)
- rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
- stw r0,_TRAP(r1) /* register set saved */
- b sys_clone3
-
- .globl ppc_swapcontext
-ppc_swapcontext:
- SAVE_NVGPRS(r1)
- lwz r0,_TRAP(r1)
- rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
- stw r0,_TRAP(r1) /* register set saved */
- b sys_swapcontext
-
/*
* Top-level page fault handling.
* This is in assembler because if do_page_fault tells us that
@@ -670,10 +420,6 @@ ppc_swapcontext:
.globl handle_page_fault
handle_page_fault:
addi r3,r1,STACK_FRAME_OVERHEAD
-#ifdef CONFIG_PPC_BOOK3S_32
- andis. r0,r5,DSISR_DABRMATCH@h
- bne- handle_dabr_fault
-#endif
bl do_page_fault
cmpwi r3,0
beq+ ret_from_except
@@ -681,23 +427,11 @@ handle_page_fault:
lwz r0,_TRAP(r1)
clrrwi r0,r0,1
stw r0,_TRAP(r1)
- mr r5,r3
+ mr r4,r3 /* err arg for bad_page_fault */
addi r3,r1,STACK_FRAME_OVERHEAD
- lwz r4,_DAR(r1)
bl __bad_page_fault
b ret_from_except_full
-#ifdef CONFIG_PPC_BOOK3S_32
- /* We have a data breakpoint exception - handle it */
-handle_dabr_fault:
- SAVE_NVGPRS(r1)
- lwz r0,_TRAP(r1)
- clrrwi r0,r0,1
- stw r0,_TRAP(r1)
- bl do_break
- b ret_from_except_full
-#endif
-
/*
* This routine switches between two different tasks. The process
* state of one is saved on its kernel stack. Then the state
@@ -1237,14 +971,11 @@ load_dbcr0:
addi r11,r11,global_dbcr0@l
#ifdef CONFIG_SMP
lwz r9,TASK_CPU(r2)
- slwi r9,r9,3
+ slwi r9,r9,2
add r11,r11,r9
#endif
stw r10,0(r11)
mtspr SPRN_DBCR0,r0
- lwz r10,4(r11)
- addi r10,r10,1
- stw r10,4(r11)
li r11,-1
mtspr SPRN_DBSR,r11 /* clear all pending debug events */
blr
@@ -1253,7 +984,7 @@ load_dbcr0:
.align 4
.global global_dbcr0
global_dbcr0:
- .space 8*NR_CPUS
+ .space 4*NR_CPUS
.previous
#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 33ddfeef4fe9..6c4d9e276c4d 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -108,7 +108,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM)
li r11,\trapnr
std r11,_TRAP(r1)
std r12,_CCR(r1)
- std r3,ORIG_GPR3(r1)
addi r10,r1,STACK_FRAME_OVERHEAD
ld r11,exception_marker@toc(r2)
std r11,-16(r10) /* "regshere" marker */
@@ -226,6 +225,12 @@ _ASM_NOKPROBE_SYMBOL(system_call_vectored_emulate)
#endif
.balign IFETCH_ALIGN_BYTES
+ .globl system_call_common_real
+system_call_common_real:
+ ld r10,PACAKMSR(r13) /* get MSR value for kernel */
+ mtmsrd r10
+
+ .balign IFETCH_ALIGN_BYTES
.globl system_call_common
system_call_common:
_ASM_NOKPROBE_SYMBOL(system_call_common)
@@ -278,7 +283,6 @@ END_BTB_FLUSH_SECTION
std r10,_LINK(r1)
std r11,_TRAP(r1)
std r12,_CCR(r1)
- std r3,ORIG_GPR3(r1)
addi r10,r1,STACK_FRAME_OVERHEAD
ld r11,exception_marker@toc(r2)
std r11,-16(r10) /* "regshere" marker */
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index 74d07dc0bb48..e8eb9992a270 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -398,7 +398,6 @@ exc_##n##_common: \
std r10,_NIP(r1); /* save SRR0 to stackframe */ \
std r11,_MSR(r1); /* save SRR1 to stackframe */ \
beq 2f; /* if from kernel mode */ \
- ACCOUNT_CPU_USER_ENTRY(r13,r10,r11);/* accounting (uses cr0+eq) */ \
2: ld r3,excf+EX_R10(r13); /* get back r10 */ \
ld r4,excf+EX_R11(r13); /* get back r11 */ \
mfspr r5,scratch; /* get back r13 */ \
@@ -791,7 +790,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
EXCEPTION_COMMON_CRIT(0xd00)
std r14,_DSISR(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
- mr r4,r14
ld r14,PACA_EXCRIT+EX_R14(r13)
ld r15,PACA_EXCRIT+EX_R15(r13)
bl save_nvgprs
@@ -864,7 +862,6 @@ kernel_dbg_exc:
INTS_DISABLE
std r14,_DSISR(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
- mr r4,r14
ld r14,PACA_EXDBG+EX_R14(r13)
ld r15,PACA_EXDBG+EX_R15(r13)
bl save_nvgprs
@@ -1011,8 +1008,6 @@ storage_fault_common:
std r14,_DAR(r1)
std r15,_DSISR(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
- mr r4,r14
- mr r5,r15
ld r14,PACA_EXGEN+EX_R14(r13)
ld r15,PACA_EXGEN+EX_R15(r13)
bl do_page_fault
@@ -1020,9 +1015,8 @@ storage_fault_common:
bne- 1f
b ret_from_except_lite
1: bl save_nvgprs
- mr r5,r3
+ mr r4,r3
addi r3,r1,STACK_FRAME_OVERHEAD
- ld r4,_DAR(r1)
bl __bad_page_fault
b ret_from_except
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 6e53f7638737..60d3051a8bc8 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -139,7 +139,6 @@ name:
#define IKVM_VIRT .L_IKVM_VIRT_\name\() /* Virt entry tests KVM */
#define ISTACK .L_ISTACK_\name\() /* Set regular kernel stack */
#define __ISTACK(name) .L_ISTACK_ ## name
-#define IRECONCILE .L_IRECONCILE_\name\() /* Do RECONCILE_IRQ_STATE */
#define IKUAP .L_IKUAP_\name\() /* Do KUAP lock */
#define INT_DEFINE_BEGIN(n) \
@@ -203,9 +202,6 @@ do_define_int n
.ifndef ISTACK
ISTACK=1
.endif
- .ifndef IRECONCILE
- IRECONCILE=1
- .endif
.ifndef IKUAP
IKUAP=1
.endif
@@ -581,7 +577,6 @@ DEFINE_FIXED_SYMBOL(\name\()_common_real)
kuap_save_amr_and_lock r9, r10, cr1, cr0
.endif
beq 101f /* if from kernel mode */
- ACCOUNT_CPU_USER_ENTRY(r13, r9, r10)
BEGIN_FTR_SECTION
ld r9,IAREA+EX_PPR(r13) /* Read PPR from paca */
std r9,_PPR(r1)
@@ -649,14 +644,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
ld r11,exception_marker@toc(r2)
std r10,RESULT(r1) /* clear regs->result */
std r11,STACK_FRAME_OVERHEAD-16(r1) /* mark the frame */
-
- .if ISTACK
- ACCOUNT_STOLEN_TIME
- .endif
-
- .if IRECONCILE
- RECONCILE_IRQ_STATE(r10, r11)
- .endif
.endm
/*
@@ -705,14 +692,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
ld r1,GPR1(r1)
.endm
-#define RUNLATCH_ON \
-BEGIN_FTR_SECTION \
- ld r3, PACA_THREAD_INFO(r13); \
- ld r4,TI_LOCAL_FLAGS(r3); \
- andi. r0,r4,_TLF_RUNLATCH; \
- beql ppc64_runlatch_on_trampoline; \
-END_FTR_SECTION_IFSET(CPU_FTR_CTRL)
-
/*
* When the idle code in power4_idle puts the CPU into NAP mode,
* it has to do so in a loop, and relies on the external interrupt
@@ -935,7 +914,6 @@ INT_DEFINE_BEGIN(system_reset)
*/
ISET_RI=0
ISTACK=0
- IRECONCILE=0
IKVM_REAL=1
INT_DEFINE_END(system_reset)
@@ -1022,20 +1000,6 @@ EXC_COMMON_BEGIN(system_reset_common)
ld r1,PACA_NMI_EMERG_SP(r13)
subi r1,r1,INT_FRAME_SIZE
__GEN_COMMON_BODY system_reset
- /*
- * Set IRQS_ALL_DISABLED unconditionally so irqs_disabled() does
- * the right thing. We do not want to reconcile because that goes
- * through irq tracing which we don't want in NMI.
- *
- * Save PACAIRQHAPPENED to RESULT (otherwise unused), and set HARD_DIS
- * as we are running with MSR[EE]=0.
- */
- li r10,IRQS_ALL_DISABLED
- stb r10,PACAIRQSOFTMASK(r13)
- lbz r10,PACAIRQHAPPENED(r13)
- std r10,RESULT(r1)
- ori r10,r10,PACA_IRQ_HARD_DIS
- stb r10,PACAIRQHAPPENED(r13)
addi r3,r1,STACK_FRAME_OVERHEAD
bl system_reset_exception
@@ -1051,14 +1015,6 @@ EXC_COMMON_BEGIN(system_reset_common)
subi r10,r10,1
sth r10,PACA_IN_NMI(r13)
- /*
- * Restore soft mask settings.
- */
- ld r10,RESULT(r1)
- stb r10,PACAIRQHAPPENED(r13)
- ld r10,SOFTE(r1)
- stb r10,PACAIRQSOFTMASK(r13)
-
kuap_kernel_restore r9, r10
EXCEPTION_RESTORE_REGS
RFI_TO_USER_OR_KERNEL
@@ -1123,7 +1079,6 @@ INT_DEFINE_BEGIN(machine_check_early)
ISTACK=0
IDAR=1
IDSISR=1
- IRECONCILE=0
IKUAP=0 /* We don't touch AMR here, we never go to virtual mode */
INT_DEFINE_END(machine_check_early)
@@ -1205,30 +1160,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
li r10,MSR_RI
mtmsrd r10,1
- /*
- * Set IRQS_ALL_DISABLED and save PACAIRQHAPPENED (see
- * system_reset_common)
- */
- li r10,IRQS_ALL_DISABLED
- stb r10,PACAIRQSOFTMASK(r13)
- lbz r10,PACAIRQHAPPENED(r13)
- std r10,RESULT(r1)
- ori r10,r10,PACA_IRQ_HARD_DIS
- stb r10,PACAIRQHAPPENED(r13)
-
addi r3,r1,STACK_FRAME_OVERHEAD
bl machine_check_early
std r3,RESULT(r1) /* Save result */
ld r12,_MSR(r1)
- /*
- * Restore soft mask settings.
- */
- ld r10,RESULT(r1)
- stb r10,PACAIRQHAPPENED(r13)
- ld r10,SOFTE(r1)
- stb r10,PACAIRQSOFTMASK(r13)
-
#ifdef CONFIG_PPC_P7_NAP
/*
* Check if thread was in power saving mode. We come here when any
@@ -1401,14 +1337,15 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
*
* Handling:
* - Hash MMU
- * Go to do_hash_page first to see if the HPT can be filled from an entry in
- * the Linux page table. Hash faults can hit in kernel mode in a fairly
+ * Go to do_hash_fault, which attempts to fill the HPT from an entry in the
+ * Linux page table. Hash faults can hit in kernel mode in a fairly
* arbitrary state (e.g., interrupts disabled, locks held) when accessing
* "non-bolted" regions, e.g., vmalloc space. However these should always be
- * backed by Linux page tables.
+ * backed by Linux page table entries.
*
- * If none is found, do a Linux page fault. Linux page faults can happen in
- * kernel mode due to user copy operations of course.
+ * If no entry is found the Linux page fault handler is invoked (by
+ * do_hash_fault). Linux page faults can happen in kernel mode due to user
+ * copy operations of course.
*
* KVM: The KVM HDSI handler may perform a load with MSR[DR]=1 in guest
* MMU context, which may cause a DSI in the host, which must go to the
@@ -1437,15 +1374,24 @@ EXC_VIRT_BEGIN(data_access, 0x4300, 0x80)
EXC_VIRT_END(data_access, 0x4300, 0x80)
EXC_COMMON_BEGIN(data_access_common)
GEN_COMMON data_access
- ld r4,_DAR(r1)
- ld r5,_DSISR(r1)
+ ld r4,_DSISR(r1)
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ andis. r0,r4,DSISR_DABRMATCH@h
+ bne- 1f
BEGIN_MMU_FTR_SECTION
- ld r6,_MSR(r1)
- li r3,0x300
- b do_hash_page /* Try to handle as hpte fault */
+ bl do_hash_fault
MMU_FTR_SECTION_ELSE
- b handle_page_fault
+ bl do_page_fault
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
+ b interrupt_return
+
+1: bl do_break
+ /*
+ * do_break() may have changed the NV GPRS while handling a breakpoint.
+ * If so, we need to restore them with their updated values.
+ */
+ REST_NVGPRS(r1)
+ b interrupt_return
GEN_KVM data_access
@@ -1466,14 +1412,9 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
* on user-handler data structures.
*
* KVM: Same as 0x300, DSLB must test for KVM guest.
- *
- * A dedicated save area EXSLB is used (XXX: but it actually need not be
- * these days, we could use EXGEN).
*/
INT_DEFINE_BEGIN(data_access_slb)
IVEC=0x380
- IAREA=PACA_EXSLB
- IRECONCILE=0
IDAR=1
IKVM_SKIP=1
IKVM_REAL=1
@@ -1487,10 +1428,9 @@ EXC_VIRT_BEGIN(data_access_slb, 0x4380, 0x80)
EXC_VIRT_END(data_access_slb, 0x4380, 0x80)
EXC_COMMON_BEGIN(data_access_slb_common)
GEN_COMMON data_access_slb
- ld r4,_DAR(r1)
- addi r3,r1,STACK_FRAME_OVERHEAD
BEGIN_MMU_FTR_SECTION
/* HPT case, do SLB fault */
+ addi r3,r1,STACK_FRAME_OVERHEAD
bl do_slb_fault
cmpdi r3,0
bne- 1f
@@ -1501,9 +1441,6 @@ MMU_FTR_SECTION_ELSE
li r3,-EFAULT
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
std r3,RESULT(r1)
- RECONCILE_IRQ_STATE(r10, r11)
- ld r4,_DAR(r1)
- ld r5,RESULT(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
bl do_bad_slb_fault
b interrupt_return
@@ -1538,15 +1475,13 @@ EXC_VIRT_BEGIN(instruction_access, 0x4400, 0x80)
EXC_VIRT_END(instruction_access, 0x4400, 0x80)
EXC_COMMON_BEGIN(instruction_access_common)
GEN_COMMON instruction_access
- ld r4,_DAR(r1)
- ld r5,_DSISR(r1)
+ addi r3,r1,STACK_FRAME_OVERHEAD
BEGIN_MMU_FTR_SECTION
- ld r6,_MSR(r1)
- li r3,0x400
- b do_hash_page /* Try to handle as hpte fault */
+ bl do_hash_fault
MMU_FTR_SECTION_ELSE
- b handle_page_fault
+ bl do_page_fault
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
+ b interrupt_return
GEN_KVM instruction_access
@@ -1562,8 +1497,6 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
*/
INT_DEFINE_BEGIN(instruction_access_slb)
IVEC=0x480
- IAREA=PACA_EXSLB
- IRECONCILE=0
IISIDE=1
IDAR=1
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
@@ -1579,10 +1512,9 @@ EXC_VIRT_BEGIN(instruction_access_slb, 0x4480, 0x80)
EXC_VIRT_END(instruction_access_slb, 0x4480, 0x80)
EXC_COMMON_BEGIN(instruction_access_slb_common)
GEN_COMMON instruction_access_slb
- ld r4,_DAR(r1)
- addi r3,r1,STACK_FRAME_OVERHEAD
BEGIN_MMU_FTR_SECTION
/* HPT case, do SLB fault */
+ addi r3,r1,STACK_FRAME_OVERHEAD
bl do_slb_fault
cmpdi r3,0
bne- 1f
@@ -1593,9 +1525,6 @@ MMU_FTR_SECTION_ELSE
li r3,-EFAULT
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
std r3,RESULT(r1)
- RECONCILE_IRQ_STATE(r10, r11)
- ld r4,_DAR(r1)
- ld r5,RESULT(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
bl do_bad_slb_fault
b interrupt_return
@@ -1643,7 +1572,6 @@ EXC_VIRT_END(hardware_interrupt, 0x4500, 0x100)
EXC_COMMON_BEGIN(hardware_interrupt_common)
GEN_COMMON hardware_interrupt
FINISH_NAP
- RUNLATCH_ON
addi r3,r1,STACK_FRAME_OVERHEAD
bl do_IRQ
b interrupt_return
@@ -1697,6 +1625,51 @@ INT_DEFINE_BEGIN(program_check)
INT_DEFINE_END(program_check)
EXC_REAL_BEGIN(program_check, 0x700, 0x100)
+
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+ /*
+ * There's a short window during boot where although the kernel is
+ * running little endian, any exceptions will cause the CPU to switch
+ * back to big endian. For example a WARN() boils down to a trap
+ * instruction, which will cause a program check, and we end up here but
+ * with the CPU in big endian mode. The first instruction of the program
+ * check handler (in GEN_INT_ENTRY below) is an mtsprg, which when
+ * executed in the wrong endian is an lhzu with a ~3GB displacement from
+ * r3. The content of r3 is random, so that is a load from some random
+ * location, and depending on the system can easily lead to a checkstop,
+ * or an infinitely recursive page fault.
+ *
+ * So to handle that case we have a trampoline here that can detect we
+ * are in the wrong endian and flip us back to the correct endian. We
+ * can't flip MSR[LE] using mtmsr, so we have to use rfid. That requires
+ * backing up SRR0/1 as well as a GPR. To do that we use SPRG0/2/3, as
+ * SPRG1 is already used for the paca. SPRG3 is user readable, but this
+ * trampoline is only active very early in boot, and SPRG3 will be
+ * reinitialised in vdso_getcpu_init() before userspace starts.
+ */
+BEGIN_FTR_SECTION
+ tdi 0,0,0x48 // Trap never, or in reverse endian: b . + 8
+ b 1f // Skip trampoline if endian is correct
+ .long 0xa643707d // mtsprg 0, r11 Backup r11
+ .long 0xa6027a7d // mfsrr0 r11
+ .long 0xa643727d // mtsprg 2, r11 Backup SRR0 in SPRG2
+ .long 0xa6027b7d // mfsrr1 r11
+ .long 0xa643737d // mtsprg 3, r11 Backup SRR1 in SPRG3
+ .long 0xa600607d // mfmsr r11
+ .long 0x01006b69 // xori r11, r11, 1 Invert MSR[LE]
+ .long 0xa6037b7d // mtsrr1 r11
+ .long 0x34076039 // li r11, 0x734
+ .long 0xa6037a7d // mtsrr0 r11
+ .long 0x2400004c // rfid
+ mfsprg r11, 3
+ mtsrr1 r11 // Restore SRR1
+ mfsprg r11, 2
+ mtsrr0 r11 // Restore SRR0
+ mfsprg r11, 0 // Restore r11
+1:
+END_FTR_SECTION(0, 1) // nop out after boot
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+
GEN_INT_ENTRY program_check, virt=0
EXC_REAL_END(program_check, 0x700, 0x100)
EXC_VIRT_BEGIN(program_check, 0x4700, 0x100)
@@ -1755,7 +1728,6 @@ EXC_COMMON_BEGIN(program_check_common)
*/
INT_DEFINE_BEGIN(fp_unavailable)
IVEC=0x800
- IRECONCILE=0
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
IKVM_REAL=1
#endif
@@ -1770,7 +1742,6 @@ EXC_VIRT_END(fp_unavailable, 0x4800, 0x100)
EXC_COMMON_BEGIN(fp_unavailable_common)
GEN_COMMON fp_unavailable
bne 1f /* if from user, just load it up */
- RECONCILE_IRQ_STATE(r10, r11)
addi r3,r1,STACK_FRAME_OVERHEAD
bl kernel_fp_unavailable_exception
0: trap
@@ -1789,7 +1760,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM)
b fast_interrupt_return
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2: /* User process was in a transaction */
- RECONCILE_IRQ_STATE(r10, r11)
addi r3,r1,STACK_FRAME_OVERHEAD
bl fp_unavailable_tm
b interrupt_return
@@ -1832,7 +1802,6 @@ EXC_VIRT_END(decrementer, 0x4900, 0x80)
EXC_COMMON_BEGIN(decrementer_common)
GEN_COMMON decrementer
FINISH_NAP
- RUNLATCH_ON
addi r3,r1,STACK_FRAME_OVERHEAD
bl timer_interrupt
b interrupt_return
@@ -1854,7 +1823,6 @@ INT_DEFINE_BEGIN(hdecrementer)
IVEC=0x980
IHSRR=1
ISTACK=0
- IRECONCILE=0
IKVM_REAL=1
IKVM_VIRT=1
INT_DEFINE_END(hdecrementer)
@@ -1919,12 +1887,11 @@ EXC_VIRT_END(doorbell_super, 0x4a00, 0x100)
EXC_COMMON_BEGIN(doorbell_super_common)
GEN_COMMON doorbell_super
FINISH_NAP
- RUNLATCH_ON
addi r3,r1,STACK_FRAME_OVERHEAD
#ifdef CONFIG_PPC_DOORBELL
bl doorbell_exception
#else
- bl unknown_exception
+ bl unknown_async_exception
#endif
b interrupt_return
@@ -2001,12 +1968,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
HMT_MEDIUM
.if ! \virt
- __LOAD_HANDLER(r10, system_call_common)
- mtspr SPRN_SRR0,r10
- ld r10,PACAKMSR(r13)
- mtspr SPRN_SRR1,r10
- RFI_TO_KERNEL
- b . /* prevent speculative execution */
+ __LOAD_HANDLER(r10, system_call_common_real)
+ mtctr r10
+ bctr
.else
li r10,MSR_RI
mtmsrd r10,1 /* Set RI (EE=0) */
@@ -2137,9 +2101,7 @@ EXC_COMMON_BEGIN(h_data_storage_common)
GEN_COMMON h_data_storage
addi r3,r1,STACK_FRAME_OVERHEAD
BEGIN_MMU_FTR_SECTION
- ld r4,_DAR(r1)
- li r5,SIGSEGV
- bl bad_page_fault
+ bl do_bad_page_fault_segv
MMU_FTR_SECTION_ELSE
bl unknown_exception
ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_TYPE_RADIX)
@@ -2230,7 +2192,6 @@ INT_DEFINE_BEGIN(hmi_exception_early)
IHSRR=1
IREALMODE_COMMON=1
ISTACK=0
- IRECONCILE=0
IKUAP=0 /* We don't touch AMR here, we never go to virtual mode */
IKVM_REAL=1
INT_DEFINE_END(hmi_exception_early)
@@ -2277,7 +2238,6 @@ EXC_COMMON_BEGIN(hmi_exception_early_common)
EXC_COMMON_BEGIN(hmi_exception_common)
GEN_COMMON hmi_exception
FINISH_NAP
- RUNLATCH_ON
addi r3,r1,STACK_FRAME_OVERHEAD
bl handle_hmi_exception
b interrupt_return
@@ -2307,12 +2267,11 @@ EXC_VIRT_END(h_doorbell, 0x4e80, 0x20)
EXC_COMMON_BEGIN(h_doorbell_common)
GEN_COMMON h_doorbell
FINISH_NAP
- RUNLATCH_ON
addi r3,r1,STACK_FRAME_OVERHEAD
#ifdef CONFIG_PPC_DOORBELL
bl doorbell_exception
#else
- bl unknown_exception
+ bl unknown_async_exception
#endif
b interrupt_return
@@ -2341,7 +2300,6 @@ EXC_VIRT_END(h_virt_irq, 0x4ea0, 0x20)
EXC_COMMON_BEGIN(h_virt_irq_common)
GEN_COMMON h_virt_irq
FINISH_NAP
- RUNLATCH_ON
addi r3,r1,STACK_FRAME_OVERHEAD
bl do_IRQ
b interrupt_return
@@ -2388,7 +2346,6 @@ EXC_VIRT_END(performance_monitor, 0x4f00, 0x20)
EXC_COMMON_BEGIN(performance_monitor_common)
GEN_COMMON performance_monitor
FINISH_NAP
- RUNLATCH_ON
addi r3,r1,STACK_FRAME_OVERHEAD
bl performance_monitor_exception
b interrupt_return
@@ -2404,7 +2361,6 @@ EXC_COMMON_BEGIN(performance_monitor_common)
*/
INT_DEFINE_BEGIN(altivec_unavailable)
IVEC=0xf20
- IRECONCILE=0
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
IKVM_REAL=1
#endif
@@ -2434,7 +2390,6 @@ BEGIN_FTR_SECTION
b fast_interrupt_return
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2: /* User process was in a transaction */
- RECONCILE_IRQ_STATE(r10, r11)
addi r3,r1,STACK_FRAME_OVERHEAD
bl altivec_unavailable_tm
b interrupt_return
@@ -2442,7 +2397,6 @@ BEGIN_FTR_SECTION
1:
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif
- RECONCILE_IRQ_STATE(r10, r11)
addi r3,r1,STACK_FRAME_OVERHEAD
bl altivec_unavailable_exception
b interrupt_return
@@ -2458,7 +2412,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
*/
INT_DEFINE_BEGIN(vsx_unavailable)
IVEC=0xf40
- IRECONCILE=0
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
IKVM_REAL=1
#endif
@@ -2487,7 +2440,6 @@ BEGIN_FTR_SECTION
b load_up_vsx
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2: /* User process was in a transaction */
- RECONCILE_IRQ_STATE(r10, r11)
addi r3,r1,STACK_FRAME_OVERHEAD
bl vsx_unavailable_tm
b interrupt_return
@@ -2495,7 +2447,6 @@ BEGIN_FTR_SECTION
1:
END_FTR_SECTION_IFSET(CPU_FTR_VSX)
#endif
- RECONCILE_IRQ_STATE(r10, r11)
addi r3,r1,STACK_FRAME_OVERHEAD
bl vsx_unavailable_exception
b interrupt_return
@@ -2830,7 +2781,6 @@ EXC_VIRT_NONE(0x5800, 0x100)
INT_DEFINE_BEGIN(soft_nmi)
IVEC=0x900
ISTACK=0
- IRECONCILE=0 /* Soft-NMI may fire under local_irq_disable */
INT_DEFINE_END(soft_nmi)
/*
@@ -2849,17 +2799,6 @@ EXC_COMMON_BEGIN(soft_nmi_common)
subi r1,r1,INT_FRAME_SIZE
__GEN_COMMON_BODY soft_nmi
- /*
- * Set IRQS_ALL_DISABLED and save PACAIRQHAPPENED (see
- * system_reset_common)
- */
- li r10,IRQS_ALL_DISABLED
- stb r10,PACAIRQSOFTMASK(r13)
- lbz r10,PACAIRQHAPPENED(r13)
- std r10,RESULT(r1)
- ori r10,r10,PACA_IRQ_HARD_DIS
- stb r10,PACAIRQHAPPENED(r13)
-
addi r3,r1,STACK_FRAME_OVERHEAD
bl soft_nmi_interrupt
@@ -2867,14 +2806,6 @@ EXC_COMMON_BEGIN(soft_nmi_common)
li r9,0
mtmsrd r9,1
- /*
- * Restore soft mask settings.
- */
- ld r10,RESULT(r1)
- stb r10,PACAIRQHAPPENED(r13)
- ld r10,SOFTE(r1)
- stb r10,PACAIRQSOFTMASK(r13)
-
kuap_kernel_restore r9, r10
EXCEPTION_RESTORE_REGS hsrr=0
RFI_TO_KERNEL
@@ -3148,9 +3079,6 @@ kvmppc_skip_Hinterrupt:
* come here.
*/
-EXC_COMMON_BEGIN(ppc64_runlatch_on_trampoline)
- b __ppc64_runlatch_on
-
USE_FIXED_SECTION(virt_trampolines)
/*
* All code below __end_interrupts is treated as soft-masked. If
@@ -3221,99 +3149,3 @@ disable_machine_check:
RFI_TO_KERNEL
1: mtlr r0
blr
-
-/*
- * Hash table stuff
- */
- .balign IFETCH_ALIGN_BYTES
-do_hash_page:
-#ifdef CONFIG_PPC_BOOK3S_64
- lis r0,(DSISR_BAD_FAULT_64S | DSISR_DABRMATCH | DSISR_KEYFAULT)@h
- ori r0,r0,DSISR_BAD_FAULT_64S@l
- and. r0,r5,r0 /* weird error? */
- bne- handle_page_fault /* if not, try to insert a HPTE */
-
- /*
- * If we are in an "NMI" (e.g., an interrupt when soft-disabled), then
- * don't call hash_page, just fail the fault. This is required to
- * prevent re-entrancy problems in the hash code, namely perf
- * interrupts hitting while something holds H_PAGE_BUSY, and taking a
- * hash fault. See the comment in hash_preload().
- */
- ld r11, PACA_THREAD_INFO(r13)
- lwz r0,TI_PREEMPT(r11)
- andis. r0,r0,NMI_MASK@h
- bne 77f
-
- /*
- * r3 contains the trap number
- * r4 contains the faulting address
- * r5 contains dsisr
- * r6 msr
- *
- * at return r3 = 0 for success, 1 for page fault, negative for error
- */
- bl __hash_page /* build HPTE if possible */
- cmpdi r3,0 /* see if __hash_page succeeded */
-
- /* Success */
- beq interrupt_return /* Return from exception on success */
-
- /* Error */
- blt- 13f
-
- /* Reload DAR/DSISR into r4/r5 for the DABR check below */
- ld r4,_DAR(r1)
- ld r5,_DSISR(r1)
-#endif /* CONFIG_PPC_BOOK3S_64 */
-
-/* Here we have a page fault that hash_page can't handle. */
-handle_page_fault:
-11: andis. r0,r5,DSISR_DABRMATCH@h
- bne- handle_dabr_fault
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl do_page_fault
- cmpdi r3,0
- beq+ interrupt_return
- mr r5,r3
- addi r3,r1,STACK_FRAME_OVERHEAD
- ld r4,_DAR(r1)
- bl __bad_page_fault
- b interrupt_return
-
-/* We have a data breakpoint exception - handle it */
-handle_dabr_fault:
- ld r4,_DAR(r1)
- ld r5,_DSISR(r1)
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl do_break
- /*
- * do_break() may have changed the NV GPRS while handling a breakpoint.
- * If so, we need to restore them with their updated values.
- */
- REST_NVGPRS(r1)
- b interrupt_return
-
-
-#ifdef CONFIG_PPC_BOOK3S_64
-/* We have a page fault that hash_page could handle but HV refused
- * the PTE insertion
- */
-13: mr r5,r3
- addi r3,r1,STACK_FRAME_OVERHEAD
- ld r4,_DAR(r1)
- bl low_hash_fault
- b interrupt_return
-#endif
-
-/*
- * We come here as a result of a DSI at a point where we don't want
- * to call hash_page, such as when we are accessing memory (possibly
- * user memory) inside a PMU interrupt that occurred while interrupts
- * were soft-disabled. We want to invoke the exception handler for
- * the access, or panic if there isn't a handler.
- */
-77: addi r3,r1,STACK_FRAME_OVERHEAD
- li r5,SIGSEGV
- bl bad_page_fault
- b interrupt_return
diff --git a/arch/powerpc/kernel/head_32.h b/arch/powerpc/kernel/head_32.h
index a2f72c966baf..5d4706c14572 100644
--- a/arch/powerpc/kernel/head_32.h
+++ b/arch/powerpc/kernel/head_32.h
@@ -47,7 +47,7 @@
lwz r1,TASK_STACK-THREAD(r1)
addi r1, r1, THREAD_SIZE - INT_FRAME_SIZE
1:
- mtcrf 0x7f, r1
+ mtcrf 0x3f, r1
bt 32 - THREAD_ALIGN_SHIFT, stack_overflow
#else
subi r11, r1, INT_FRAME_SIZE /* use r1 if kernel */
@@ -116,114 +116,44 @@
.endm
.macro SYSCALL_ENTRY trapno
- mfspr r12,SPRN_SPRG_THREAD
mfspr r9, SPRN_SRR1
-#ifdef CONFIG_VMAP_STACK
- mfspr r11, SPRN_SRR0
- mtctr r11
- andi. r11, r9, MSR_PR
+ mfspr r10, SPRN_SRR0
+ LOAD_REG_IMMEDIATE(r11, MSR_KERNEL) /* can take exceptions */
+ lis r12, 1f@h
+ ori r12, r12, 1f@l
+ mtspr SPRN_SRR1, r11
+ mtspr SPRN_SRR0, r12
+ mfspr r12,SPRN_SPRG_THREAD
mr r11, r1
lwz r1,TASK_STACK-THREAD(r12)
- beq- 99f
- addi r1, r1, THREAD_SIZE - INT_FRAME_SIZE
- li r10, MSR_KERNEL & ~(MSR_IR | MSR_RI) /* can take DTLB miss */
- mtmsr r10
- isync
tovirt(r12, r12)
+ addi r1, r1, THREAD_SIZE - INT_FRAME_SIZE
+ rfi
+1:
stw r11,GPR1(r1)
stw r11,0(r1)
mr r11, r1
-#else
- andi. r11, r9, MSR_PR
- lwz r11,TASK_STACK-THREAD(r12)
- beq- 99f
- addi r11, r11, THREAD_SIZE - INT_FRAME_SIZE
- tophys(r11, r11)
- stw r1,GPR1(r11)
- stw r1,0(r11)
- tovirt(r1, r11) /* set new kernel sp */
-#endif
+ stw r10,_NIP(r11)
mflr r10
stw r10, _LINK(r11)
-#ifdef CONFIG_VMAP_STACK
- mfctr r10
-#else
- mfspr r10,SPRN_SRR0
-#endif
- stw r10,_NIP(r11)
mfcr r10
rlwinm r10,r10,0,4,2 /* Clear SO bit in CR */
stw r10,_CCR(r11) /* save registers */
#ifdef CONFIG_40x
rlwinm r9,r9,0,14,12 /* clear MSR_WE (necessary?) */
-#else
-#ifdef CONFIG_VMAP_STACK
- LOAD_REG_IMMEDIATE(r10, MSR_KERNEL & ~MSR_IR) /* can take exceptions */
-#else
- LOAD_REG_IMMEDIATE(r10, MSR_KERNEL & ~(MSR_IR|MSR_DR)) /* can take exceptions */
-#endif
- mtmsr r10 /* (except for mach check in rtas) */
#endif
lis r10,STACK_FRAME_REGS_MARKER@ha /* exception frame marker */
stw r2,GPR2(r11)
addi r10,r10,STACK_FRAME_REGS_MARKER@l
stw r9,_MSR(r11)
- li r2, \trapno + 1
+ li r2, \trapno
stw r10,8(r11)
stw r2,_TRAP(r11)
SAVE_GPR(0, r11)
SAVE_4GPRS(3, r11)
SAVE_2GPRS(7, r11)
- addi r11,r1,STACK_FRAME_OVERHEAD
addi r2,r12,-THREAD
- stw r11,PT_REGS(r12)
-#if defined(CONFIG_40x)
- /* Check to see if the dbcr0 register is set up to debug. Use the
- internal debug mode bit to do this. */
- lwz r12,THREAD_DBCR0(r12)
- andis. r12,r12,DBCR0_IDM@h
-#endif
- ACCOUNT_CPU_USER_ENTRY(r2, r11, r12)
-#if defined(CONFIG_40x)
- beq+ 3f
- /* From user and task is ptraced - load up global dbcr0 */
- li r12,-1 /* clear all pending debug events */
- mtspr SPRN_DBSR,r12
- lis r11,global_dbcr0@ha
- tophys(r11,r11)
- addi r11,r11,global_dbcr0@l
- lwz r12,0(r11)
- mtspr SPRN_DBCR0,r12
- lwz r12,4(r11)
- addi r12,r12,-1
- stw r12,4(r11)
-#endif
-
-3:
- tovirt_novmstack r2, r2 /* set r2 to current */
- lis r11, transfer_to_syscall@h
- ori r11, r11, transfer_to_syscall@l
-#ifdef CONFIG_TRACE_IRQFLAGS
- /*
- * If MSR is changing we need to keep interrupts disabled at this point
- * otherwise we might risk taking an interrupt before we tell lockdep
- * they are enabled.
- */
- LOAD_REG_IMMEDIATE(r10, MSR_KERNEL)
- rlwimi r10, r9, 0, MSR_EE
-#else
- LOAD_REG_IMMEDIATE(r10, MSR_KERNEL | MSR_EE)
-#endif
-#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
- mtspr SPRN_NRI, r0
-#endif
- mtspr SPRN_SRR1,r10
- mtspr SPRN_SRR0,r11
- rfi /* jump to handler, enable MMU */
-#ifdef CONFIG_40x
- b . /* Prevent prefetch past rfi */
-#endif
-99: b ret_from_kernel_syscall
+ b transfer_to_syscall /* jump to handler */
.endm
.macro save_dar_dsisr_on_stack reg1, reg2, sp
diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S
index a1ae00689e0f..24724a7dad49 100644
--- a/arch/powerpc/kernel/head_40x.S
+++ b/arch/powerpc/kernel/head_40x.S
@@ -179,9 +179,9 @@ _ENTRY(saved_ksp_limit)
*/
START_EXCEPTION(0x0300, DataStorage)
EXCEPTION_PROLOG
- mfspr r5, SPRN_ESR /* Grab the ESR, save it, pass arg3 */
+ mfspr r5, SPRN_ESR /* Grab the ESR, save it */
stw r5, _ESR(r11)
- mfspr r4, SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */
+ mfspr r4, SPRN_DEAR /* Grab the DEAR, save it */
stw r4, _DEAR(r11)
EXC_XFER_LITE(0x300, handle_page_fault)
@@ -191,9 +191,9 @@ _ENTRY(saved_ksp_limit)
*/
START_EXCEPTION(0x0400, InstructionAccess)
EXCEPTION_PROLOG
- mr r4,r12 /* Pass SRR0 as arg2 */
- stw r4, _DEAR(r11)
- li r5,0 /* Pass zero as arg3 */
+ li r5,0
+ stw r5, _ESR(r11) /* Zero ESR */
+ stw r12, _DEAR(r11) /* SRR0 as DEAR */
EXC_XFER_LITE(0x400, handle_page_fault)
/* 0x0500 - External Interrupt Exception */
@@ -476,6 +476,7 @@ _ENTRY(saved_ksp_limit)
/* continue normal handling for a critical exception... */
2: mfspr r4,SPRN_DBSR
+ stw r4,_ESR(r11) /* DebugException takes DBSR in _ESR */
addi r3,r1,STACK_FRAME_OVERHEAD
EXC_XFER_TEMPLATE(DebugException, 0x2002, \
(MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S
index 8e36718f3167..813fa305c33b 100644
--- a/arch/powerpc/kernel/head_44x.S
+++ b/arch/powerpc/kernel/head_44x.S
@@ -376,7 +376,7 @@ interrupt_base:
/* Load the next available TLB index */
lwz r13,tlb_44x_index@l(r10)
- bne 2f /* Bail if permission mismach */
+ bne 2f /* Bail if permission mismatch */
/* Increment, rollover, and store TLB index */
addi r13,r13,1
@@ -471,7 +471,7 @@ interrupt_base:
/* Load the next available TLB index */
lwz r13,tlb_44x_index@l(r10)
- bne 2f /* Bail if permission mismach */
+ bne 2f /* Bail if permission mismatch */
/* Increment, rollover, and store TLB index */
addi r13,r13,1
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index 52702f3db6df..46dff3f9c31f 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -165,7 +165,7 @@ SystemCall:
/* On the MPC8xx, this is a software emulation interrupt. It occurs
* for all unimplemented and illegal instructions.
*/
- EXCEPTION(0x1000, SoftEmu, program_check_exception, EXC_XFER_STD)
+ EXCEPTION(0x1000, SoftEmu, emulation_assist_interrupt, EXC_XFER_STD)
. = 0x1100
/*
@@ -312,14 +312,14 @@ DataStoreTLBMiss:
. = 0x1300
InstructionTLBError:
EXCEPTION_PROLOG
- mr r4,r12
andis. r5,r9,DSISR_SRR1_MATCH_32S@h /* Filter relevant SRR1 bits */
andis. r10,r9,SRR1_ISI_NOPT@h
beq+ .Litlbie
- tlbie r4
+ tlbie r12
/* 0x400 is InstructionAccess exception, needed by bad_page_fault() */
.Litlbie:
- stw r4, _DAR(r11)
+ stw r12, _DAR(r11)
+ stw r5, _DSISR(r11)
EXC_XFER_LITE(0x400, handle_page_fault)
/* This is the data TLB error on the MPC8xx. This could be due to
@@ -364,10 +364,9 @@ do_databreakpoint:
addi r3,r1,STACK_FRAME_OVERHEAD
mfspr r4,SPRN_BAR
stw r4,_DAR(r11)
-#ifdef CONFIG_VMAP_STACK
- lwz r5,_DSISR(r11)
-#else
+#ifndef CONFIG_VMAP_STACK
mfspr r5,SPRN_DSISR
+ stw r5,_DSISR(r11)
#endif
EXC_XFER_STD(0x1c00, do_break)
diff --git a/arch/powerpc/kernel/head_book3s_32.S b/arch/powerpc/kernel/head_book3s_32.S
index 858fbc8b19f3..727fdab557c9 100644
--- a/arch/powerpc/kernel/head_book3s_32.S
+++ b/arch/powerpc/kernel/head_book3s_32.S
@@ -238,8 +238,8 @@ __secondary_hold_acknowledge:
/* System reset */
/* core99 pmac starts the seconary here by changing the vector, and
- putting it back to what it was (unknown_exception) when done. */
- EXCEPTION(0x100, Reset, unknown_exception, EXC_XFER_STD)
+ putting it back to what it was (unknown_async_exception) when done. */
+ EXCEPTION(0x100, Reset, unknown_async_exception, EXC_XFER_STD)
/* Machine check */
/*
@@ -278,12 +278,6 @@ MachineCheck:
7: EXCEPTION_PROLOG_2
addi r3,r1,STACK_FRAME_OVERHEAD
#ifdef CONFIG_PPC_CHRP
-#ifdef CONFIG_VMAP_STACK
- mfspr r4, SPRN_SPRG_THREAD
- tovirt(r4, r4)
- lwz r4, RTAS_SP(r4)
- cmpwi cr1, r4, 0
-#endif
beq cr1, machine_check_tramp
twi 31, 0, 0
#else
@@ -295,6 +289,7 @@ MachineCheck:
DO_KVM 0x300
DataAccess:
#ifdef CONFIG_VMAP_STACK
+#ifdef CONFIG_PPC_BOOK3S_604
BEGIN_MMU_FTR_SECTION
mtspr SPRN_SPRG_SCRATCH2,r10
mfspr r10, SPRN_SPRG_THREAD
@@ -311,12 +306,14 @@ BEGIN_MMU_FTR_SECTION
MMU_FTR_SECTION_ELSE
b 1f
ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE)
+#endif
1: EXCEPTION_PROLOG_0 handle_dar_dsisr=1
EXCEPTION_PROLOG_1
b handle_page_fault_tramp_1
#else /* CONFIG_VMAP_STACK */
EXCEPTION_PROLOG handle_dar_dsisr=1
get_and_save_dar_dsisr_on_stack r4, r5, r11
+#ifdef CONFIG_PPC_BOOK3S_604
BEGIN_MMU_FTR_SECTION
andis. r0, r5, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH)@h
bne handle_page_fault_tramp_2 /* if not, try to put a PTE */
@@ -324,8 +321,11 @@ BEGIN_MMU_FTR_SECTION
bl hash_page
b handle_page_fault_tramp_1
MMU_FTR_SECTION_ELSE
+#endif
b handle_page_fault_tramp_2
+#ifdef CONFIG_PPC_BOOK3S_604
ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE)
+#endif
#endif /* CONFIG_VMAP_STACK */
/* Instruction access exception. */
@@ -341,12 +341,14 @@ InstructionAccess:
mfspr r11, SPRN_SRR1 /* check whether user or kernel */
stw r11, SRR1(r10)
mfcr r10
+#ifdef CONFIG_PPC_BOOK3S_604
BEGIN_MMU_FTR_SECTION
andis. r11, r11, SRR1_ISI_NOPT@h /* no pte found? */
bne hash_page_isi
.Lhash_page_isi_cont:
mfspr r11, SPRN_SRR1 /* check whether user or kernel */
END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
+#endif
andi. r11, r11, MSR_PR
EXCEPTION_PROLOG_1
@@ -357,13 +359,15 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
beq 1f /* if so, try to put a PTE */
li r3,0 /* into the hash table */
mr r4,r12 /* SRR0 is fault address */
+#ifdef CONFIG_PPC_BOOK3S_604
BEGIN_MMU_FTR_SECTION
bl hash_page
END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
+#endif
#endif /* CONFIG_VMAP_STACK */
-1: mr r4,r12
andis. r5,r9,DSISR_SRR1_MATCH_32S@h /* Filter relevant SRR1 bits */
- stw r4, _DAR(r11)
+ stw r5, _DSISR(r11)
+ stw r12, _DAR(r11)
EXC_XFER_LITE(0x400, handle_page_fault)
/* External interrupt */
@@ -640,7 +644,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
#endif
#ifndef CONFIG_TAU_INT
-#define TAUException unknown_exception
+#define TAUException unknown_async_exception
#endif
EXCEPTION(0x1300, Trap_13, instruction_breakpoint_exception, EXC_XFER_STD)
@@ -685,13 +689,16 @@ handle_page_fault_tramp_1:
#ifdef CONFIG_VMAP_STACK
EXCEPTION_PROLOG_2 handle_dar_dsisr=1
#endif
- lwz r4, _DAR(r11)
lwz r5, _DSISR(r11)
/* fall through */
handle_page_fault_tramp_2:
+ andis. r0, r5, DSISR_DABRMATCH@h
+ bne- 1f
EXC_XFER_LITE(0x300, handle_page_fault)
+1: EXC_XFER_STD(0x300, do_break)
#ifdef CONFIG_VMAP_STACK
+#ifdef CONFIG_PPC_BOOK3S_604
.macro save_regs_thread thread
stw r0, THR0(\thread)
stw r3, THR3(\thread)
@@ -763,6 +770,7 @@ fast_hash_page_return:
mfspr r11, SPRN_SPRG_SCRATCH1
mfspr r10, SPRN_SPRG_SCRATCH0
rfi
+#endif /* CONFIG_PPC_BOOK3S_604 */
stack_overflow:
vmap_stack_overflow_exception
diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h
index 74e230c200fb..47857795f50a 100644
--- a/arch/powerpc/kernel/head_booke.h
+++ b/arch/powerpc/kernel/head_booke.h
@@ -106,10 +106,8 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV)
#endif
mfspr r9, SPRN_SRR1
BOOKE_CLEAR_BTB(r11)
- andi. r11, r9, MSR_PR
lwz r11, TASK_STACK - THREAD(r10)
rlwinm r12,r12,0,4,2 /* Clear SO bit in CR */
- beq- 99f
ALLOC_STACK_FRAME(r11, THREAD_SIZE - INT_FRAME_SIZE)
stw r12, _CCR(r11) /* save various registers */
mflr r12
@@ -124,60 +122,15 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV)
stw r2,GPR2(r11)
addi r12, r12, STACK_FRAME_REGS_MARKER@l
stw r9,_MSR(r11)
- li r2, \trapno + 1
+ li r2, \trapno
stw r12, 8(r11)
stw r2,_TRAP(r11)
SAVE_GPR(0, r11)
SAVE_4GPRS(3, r11)
SAVE_2GPRS(7, r11)
- addi r11,r1,STACK_FRAME_OVERHEAD
addi r2,r10,-THREAD
- stw r11,PT_REGS(r10)
- /* Check to see if the dbcr0 register is set up to debug. Use the
- internal debug mode bit to do this. */
- lwz r12,THREAD_DBCR0(r10)
- andis. r12,r12,DBCR0_IDM@h
- ACCOUNT_CPU_USER_ENTRY(r2, r11, r12)
- beq+ 3f
- /* From user and task is ptraced - load up global dbcr0 */
- li r12,-1 /* clear all pending debug events */
- mtspr SPRN_DBSR,r12
- lis r11,global_dbcr0@ha
- tophys(r11,r11)
- addi r11,r11,global_dbcr0@l
-#ifdef CONFIG_SMP
- lwz r10, TASK_CPU(r2)
- slwi r10, r10, 3
- add r11, r11, r10
-#endif
- lwz r12,0(r11)
- mtspr SPRN_DBCR0,r12
- lwz r12,4(r11)
- addi r12,r12,-1
- stw r12,4(r11)
-
-3:
- tovirt(r2, r2) /* set r2 to current */
- lis r11, transfer_to_syscall@h
- ori r11, r11, transfer_to_syscall@l
-#ifdef CONFIG_TRACE_IRQFLAGS
- /*
- * If MSR is changing we need to keep interrupts disabled at this point
- * otherwise we might risk taking an interrupt before we tell lockdep
- * they are enabled.
- */
- lis r10, MSR_KERNEL@h
- ori r10, r10, MSR_KERNEL@l
- rlwimi r10, r9, 0, MSR_EE
-#else
- lis r10, (MSR_KERNEL | MSR_EE)@h
- ori r10, r10, (MSR_KERNEL | MSR_EE)@l
-#endif
- mtspr SPRN_SRR1,r10
- mtspr SPRN_SRR0,r11
- rfi /* jump to handler, enable MMU */
-99: b ret_from_kernel_syscall
+ b transfer_to_syscall /* jump to handler */
.endm
/* To handle the additional exception priority levels on 40x and Book-E
@@ -406,6 +359,7 @@ label:
\
/* continue normal handling for a debug exception... */ \
2: mfspr r4,SPRN_DBSR; \
+ stw r4,_ESR(r11); /* DebugException takes DBSR in _ESR */\
addi r3,r1,STACK_FRAME_OVERHEAD; \
EXC_XFER_TEMPLATE(DebugException, 0x2008, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), debug_transfer_to_handler, ret_from_debug_exc)
@@ -459,6 +413,7 @@ label:
\
/* continue normal handling for a critical exception... */ \
2: mfspr r4,SPRN_DBSR; \
+ stw r4,_ESR(r11); /* DebugException takes DBSR in _ESR */\
addi r3,r1,STACK_FRAME_OVERHEAD; \
EXC_XFER_TEMPLATE(DebugException, 0x2002, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), crit_transfer_to_handler, ret_from_crit_exc)
@@ -476,9 +431,7 @@ label:
NORMAL_EXCEPTION_PROLOG(INST_STORAGE); \
mfspr r5,SPRN_ESR; /* Grab the ESR and save it */ \
stw r5,_ESR(r11); \
- mr r4,r12; /* Pass SRR0 as arg2 */ \
- stw r4, _DEAR(r11); \
- li r5,0; /* Pass zero as arg3 */ \
+ stw r12, _DEAR(r11); /* Pass SRR0 as arg2 */ \
EXC_XFER_LITE(0x0400, handle_page_fault)
#define ALIGNMENT_EXCEPTION \
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index fdd4d274c245..3f4a40cccef5 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -364,12 +364,12 @@ interrupt_base:
/* Data Storage Interrupt */
START_EXCEPTION(DataStorage)
NORMAL_EXCEPTION_PROLOG(DATA_STORAGE)
- mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */
+ mfspr r5,SPRN_ESR /* Grab the ESR, save it */
stw r5,_ESR(r11)
- mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */
+ mfspr r4,SPRN_DEAR /* Grab the DEAR, save it */
+ stw r4, _DEAR(r11)
andis. r10,r5,(ESR_ILK|ESR_DLK)@h
bne 1f
- stw r4, _DEAR(r11)
EXC_XFER_LITE(0x0300, handle_page_fault)
1:
addi r3,r1,STACK_FRAME_OVERHEAD
diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S
index 22f249b6f58d..f9e6d83e6720 100644
--- a/arch/powerpc/kernel/idle_book3s.S
+++ b/arch/powerpc/kernel/idle_book3s.S
@@ -52,28 +52,32 @@ _GLOBAL(isa300_idle_stop_mayloss)
std r1,PACAR1(r13)
mflr r4
mfcr r5
- /* use stack red zone rather than a new frame for saving regs */
- std r2,-8*0(r1)
- std r14,-8*1(r1)
- std r15,-8*2(r1)
- std r16,-8*3(r1)
- std r17,-8*4(r1)
- std r18,-8*5(r1)
- std r19,-8*6(r1)
- std r20,-8*7(r1)
- std r21,-8*8(r1)
- std r22,-8*9(r1)
- std r23,-8*10(r1)
- std r24,-8*11(r1)
- std r25,-8*12(r1)
- std r26,-8*13(r1)
- std r27,-8*14(r1)
- std r28,-8*15(r1)
- std r29,-8*16(r1)
- std r30,-8*17(r1)
- std r31,-8*18(r1)
- std r4,-8*19(r1)
- std r5,-8*20(r1)
+ /*
+ * Use the stack red zone rather than a new frame for saving regs since
+ * in the case of no GPR loss the wakeup code branches directly back to
+ * the caller without deallocating the stack frame first.
+ */
+ std r2,-8*1(r1)
+ std r14,-8*2(r1)
+ std r15,-8*3(r1)
+ std r16,-8*4(r1)
+ std r17,-8*5(r1)
+ std r18,-8*6(r1)
+ std r19,-8*7(r1)
+ std r20,-8*8(r1)
+ std r21,-8*9(r1)
+ std r22,-8*10(r1)
+ std r23,-8*11(r1)
+ std r24,-8*12(r1)
+ std r25,-8*13(r1)
+ std r26,-8*14(r1)
+ std r27,-8*15(r1)
+ std r28,-8*16(r1)
+ std r29,-8*17(r1)
+ std r30,-8*18(r1)
+ std r31,-8*19(r1)
+ std r4,-8*20(r1)
+ std r5,-8*21(r1)
/* 168 bytes */
PPC_STOP
b . /* catch bugs */
@@ -89,8 +93,8 @@ _GLOBAL(isa300_idle_stop_mayloss)
*/
_GLOBAL(idle_return_gpr_loss)
ld r1,PACAR1(r13)
- ld r4,-8*19(r1)
- ld r5,-8*20(r1)
+ ld r4,-8*20(r1)
+ ld r5,-8*21(r1)
mtlr r4
mtcr r5
/*
@@ -98,25 +102,25 @@ _GLOBAL(idle_return_gpr_loss)
* from PACATOC. This could be avoided for that less common case
* if KVM saved its r2.
*/
- ld r2,-8*0(r1)
- ld r14,-8*1(r1)
- ld r15,-8*2(r1)
- ld r16,-8*3(r1)
- ld r17,-8*4(r1)
- ld r18,-8*5(r1)
- ld r19,-8*6(r1)
- ld r20,-8*7(r1)
- ld r21,-8*8(r1)
- ld r22,-8*9(r1)
- ld r23,-8*10(r1)
- ld r24,-8*11(r1)
- ld r25,-8*12(r1)
- ld r26,-8*13(r1)
- ld r27,-8*14(r1)
- ld r28,-8*15(r1)
- ld r29,-8*16(r1)
- ld r30,-8*17(r1)
- ld r31,-8*18(r1)
+ ld r2,-8*1(r1)
+ ld r14,-8*2(r1)
+ ld r15,-8*3(r1)
+ ld r16,-8*4(r1)
+ ld r17,-8*5(r1)
+ ld r18,-8*6(r1)
+ ld r19,-8*7(r1)
+ ld r20,-8*8(r1)
+ ld r21,-8*9(r1)
+ ld r22,-8*10(r1)
+ ld r23,-8*11(r1)
+ ld r24,-8*12(r1)
+ ld r25,-8*13(r1)
+ ld r26,-8*14(r1)
+ ld r27,-8*15(r1)
+ ld r28,-8*16(r1)
+ ld r29,-8*17(r1)
+ ld r30,-8*18(r1)
+ ld r31,-8*19(r1)
blr
/*
@@ -154,28 +158,32 @@ _GLOBAL(isa206_idle_insn_mayloss)
std r1,PACAR1(r13)
mflr r4
mfcr r5
- /* use stack red zone rather than a new frame for saving regs */
- std r2,-8*0(r1)
- std r14,-8*1(r1)
- std r15,-8*2(r1)
- std r16,-8*3(r1)
- std r17,-8*4(r1)
- std r18,-8*5(r1)
- std r19,-8*6(r1)
- std r20,-8*7(r1)
- std r21,-8*8(r1)
- std r22,-8*9(r1)
- std r23,-8*10(r1)
- std r24,-8*11(r1)
- std r25,-8*12(r1)
- std r26,-8*13(r1)
- std r27,-8*14(r1)
- std r28,-8*15(r1)
- std r29,-8*16(r1)
- std r30,-8*17(r1)
- std r31,-8*18(r1)
- std r4,-8*19(r1)
- std r5,-8*20(r1)
+ /*
+ * Use the stack red zone rather than a new frame for saving regs since
+ * in the case of no GPR loss the wakeup code branches directly back to
+ * the caller without deallocating the stack frame first.
+ */
+ std r2,-8*1(r1)
+ std r14,-8*2(r1)
+ std r15,-8*3(r1)
+ std r16,-8*4(r1)
+ std r17,-8*5(r1)
+ std r18,-8*6(r1)
+ std r19,-8*7(r1)
+ std r20,-8*8(r1)
+ std r21,-8*9(r1)
+ std r22,-8*10(r1)
+ std r23,-8*11(r1)
+ std r24,-8*12(r1)
+ std r25,-8*13(r1)
+ std r26,-8*14(r1)
+ std r27,-8*15(r1)
+ std r28,-8*16(r1)
+ std r29,-8*17(r1)
+ std r30,-8*18(r1)
+ std r31,-8*19(r1)
+ std r4,-8*20(r1)
+ std r5,-8*21(r1)
cmpwi r3,PNV_THREAD_NAP
bne 1f
IDLE_STATE_ENTER_SEQ_NORET(PPC_NAP)
diff --git a/arch/powerpc/kernel/syscall_64.c b/arch/powerpc/kernel/interrupt.c
index 7c85ed04a164..398cd86b6ada 100644
--- a/arch/powerpc/kernel/syscall_64.c
+++ b/arch/powerpc/kernel/interrupt.c
@@ -1,10 +1,15 @@
// SPDX-License-Identifier: GPL-2.0-or-later
+#include <linux/context_tracking.h>
#include <linux/err.h>
+#include <linux/compat.h>
+
#include <asm/asm-prototypes.h>
#include <asm/kup.h>
#include <asm/cputime.h>
+#include <asm/interrupt.h>
#include <asm/hw_irq.h>
+#include <asm/interrupt.h>
#include <asm/kprobes.h>
#include <asm/paca.h>
#include <asm/ptrace.h>
@@ -24,16 +29,21 @@ notrace long system_call_exception(long r3, long r4, long r5,
{
syscall_fn f;
+ regs->orig_gpr3 = r3;
+
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
BUG_ON(irq_soft_mask_return() != IRQS_ALL_DISABLED);
+ CT_WARN_ON(ct_state() == CONTEXT_KERNEL);
+ user_exit_irqoff();
+
trace_hardirqs_off(); /* finish reconciling */
- if (IS_ENABLED(CONFIG_PPC_BOOK3S))
+ if (!IS_ENABLED(CONFIG_BOOKE) && !IS_ENABLED(CONFIG_40x))
BUG_ON(!(regs->msr & MSR_RI));
BUG_ON(!(regs->msr & MSR_PR));
BUG_ON(!FULL_REGS(regs));
- BUG_ON(regs->softe != IRQS_ENABLED);
+ BUG_ON(arch_irq_disabled_regs(regs));
#ifdef CONFIG_PPC_PKEY
if (mmu_has_feature(MMU_FTR_PKEY)) {
@@ -59,19 +69,15 @@ notrace long system_call_exception(long r3, long r4, long r5,
isync();
} else
#endif
+#ifdef CONFIG_PPC64
kuap_check_amr();
+#endif
- account_cpu_user_entry();
+ booke_restore_dbcr0();
-#ifdef CONFIG_PPC_SPLPAR
- if (IS_ENABLED(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) &&
- firmware_has_feature(FW_FEATURE_SPLPAR)) {
- struct lppaca *lp = local_paca->lppaca_ptr;
+ account_cpu_user_entry();
- if (unlikely(local_paca->dtl_ridx != be64_to_cpu(lp->dtl_idx)))
- accumulate_stolen_time();
- }
-#endif
+ account_stolen_time();
/*
* This is not required for the syscall exit path, but makes the
@@ -79,12 +85,12 @@ notrace long system_call_exception(long r3, long r4, long r5,
* frame, or if the unwinder was taught the first stack frame always
* returns to user with IRQS_ENABLED, this store could be avoided!
*/
- regs->softe = IRQS_ENABLED;
+ irq_soft_mask_regs_set_state(regs, IRQS_ENABLED);
local_irq_enable();
if (unlikely(current_thread_info()->flags & _TIF_SYSCALL_DOTRACE)) {
- if (unlikely(regs->trap == 0x7ff0)) {
+ if (unlikely(trap_is_unsupported_scv(regs))) {
/* Unsupported scv vector */
_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
return regs->gpr[3];
@@ -107,7 +113,7 @@ notrace long system_call_exception(long r3, long r4, long r5,
r8 = regs->gpr[8];
} else if (unlikely(r0 >= NR_syscalls)) {
- if (unlikely(regs->trap == 0x7ff0)) {
+ if (unlikely(trap_is_unsupported_scv(regs))) {
/* Unsupported scv vector */
_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
return regs->gpr[3];
@@ -118,7 +124,7 @@ notrace long system_call_exception(long r3, long r4, long r5,
/* May be faster to do array_index_nospec? */
barrier_nospec();
- if (unlikely(is_32bit_task())) {
+ if (unlikely(is_compat_task())) {
f = (void *)compat_sys_call_table[r0];
r3 &= 0x00000000ffffffffULL;
@@ -138,8 +144,12 @@ notrace long system_call_exception(long r3, long r4, long r5,
/*
* local irqs must be disabled. Returns false if the caller must re-enable
* them, check for new work, and try again.
+ *
+ * This should be called with local irqs disabled, but if they were previously
+ * enabled when the interrupt handler returns (indicating a process-context /
+ * synchronous interrupt) then irqs_enabled should be true.
*/
-static notrace inline bool prep_irq_for_enabled_exit(bool clear_ri)
+static notrace inline bool __prep_irq_for_enabled_exit(bool clear_ri)
{
/* This must be done with RI=1 because tracing may touch vmaps */
trace_hardirqs_on();
@@ -149,6 +159,7 @@ static notrace inline bool prep_irq_for_enabled_exit(bool clear_ri)
__hard_EE_RI_disable();
else
__hard_irq_disable();
+#ifdef CONFIG_PPC64
if (unlikely(lazy_irq_pending_nocheck())) {
/* Took an interrupt, may have more exit work to do. */
if (clear_ri)
@@ -160,10 +171,63 @@ static notrace inline bool prep_irq_for_enabled_exit(bool clear_ri)
}
local_paca->irq_happened = 0;
irq_soft_mask_set(IRQS_ENABLED);
-
+#endif
return true;
}
+static notrace inline bool prep_irq_for_enabled_exit(bool clear_ri, bool irqs_enabled)
+{
+ if (__prep_irq_for_enabled_exit(clear_ri))
+ return true;
+
+ /*
+ * Must replay pending soft-masked interrupts now. Don't just
+ * local_irq_enabe(); local_irq_disable(); because if we are
+ * returning from an asynchronous interrupt here, another one
+ * might hit after irqs are enabled, and it would exit via this
+ * same path allowing another to fire, and so on unbounded.
+ *
+ * If interrupts were enabled when this interrupt exited,
+ * indicating a process context (synchronous) interrupt,
+ * local_irq_enable/disable can be used, which will enable
+ * interrupts rather than keeping them masked (unclear how
+ * much benefit this is over just replaying for all cases,
+ * because we immediately disable again, so all we're really
+ * doing is allowing hard interrupts to execute directly for
+ * a very small time, rather than being masked and replayed).
+ */
+ if (irqs_enabled) {
+ local_irq_enable();
+ local_irq_disable();
+ } else {
+ replay_soft_interrupts();
+ }
+
+ return false;
+}
+
+static notrace void booke_load_dbcr0(void)
+{
+#ifdef CONFIG_PPC_ADV_DEBUG_REGS
+ unsigned long dbcr0 = current->thread.debug.dbcr0;
+
+ if (likely(!(dbcr0 & DBCR0_IDM)))
+ return;
+
+ /*
+ * Check to see if the dbcr0 register is set up to debug.
+ * Use the internal debug mode bit to do this.
+ */
+ mtmsr(mfmsr() & ~MSR_DE);
+ if (IS_ENABLED(CONFIG_PPC32)) {
+ isync();
+ global_dbcr0[smp_processor_id()] = mfspr(SPRN_DBCR0);
+ }
+ mtspr(SPRN_DBCR0, dbcr0);
+ mtspr(SPRN_DBSR, -1);
+#endif
+}
+
/*
* This should be called after a syscall returns, with r3 the return value
* from the syscall. If this function returns non-zero, the system call
@@ -177,20 +241,24 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3,
struct pt_regs *regs,
long scv)
{
- unsigned long *ti_flagsp = &current_thread_info()->flags;
unsigned long ti_flags;
unsigned long ret = 0;
+ bool is_not_scv = !IS_ENABLED(CONFIG_PPC_BOOK3S_64) || !scv;
+ CT_WARN_ON(ct_state() == CONTEXT_USER);
+
+#ifdef CONFIG_PPC64
kuap_check_amr();
+#endif
regs->result = r3;
/* Check whether the syscall is issued inside a restartable sequence */
rseq_syscall(regs);
- ti_flags = *ti_flagsp;
+ ti_flags = current_thread_info()->flags;
- if (unlikely(r3 >= (unsigned long)-MAX_ERRNO) && !scv) {
+ if (unlikely(r3 >= (unsigned long)-MAX_ERRNO) && is_not_scv) {
if (likely(!(ti_flags & (_TIF_NOERROR | _TIF_RESTOREALL)))) {
r3 = -r3;
regs->ccr |= 0x10000000; /* Set SO bit in CR */
@@ -202,7 +270,7 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3,
ret = _TIF_RESTOREALL;
else
regs->gpr[3] = r3;
- clear_bits(_TIF_PERSYSCALL_MASK, ti_flagsp);
+ clear_bits(_TIF_PERSYSCALL_MASK, &current_thread_info()->flags);
} else {
regs->gpr[3] = r3;
}
@@ -212,9 +280,10 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3,
ret |= _TIF_RESTOREALL;
}
-again:
local_irq_disable();
- ti_flags = READ_ONCE(*ti_flagsp);
+
+again:
+ ti_flags = READ_ONCE(current_thread_info()->flags);
while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) {
local_irq_enable();
if (ti_flags & _TIF_NEED_RESCHED) {
@@ -230,7 +299,7 @@ again:
do_notify_resume(regs, ti_flags);
}
local_irq_disable();
- ti_flags = READ_ONCE(*ti_flagsp);
+ ti_flags = READ_ONCE(current_thread_info()->flags);
}
if (IS_ENABLED(CONFIG_PPC_BOOK3S) && IS_ENABLED(CONFIG_PPC_FPU)) {
@@ -257,9 +326,13 @@ again:
}
}
+ user_enter_irqoff();
+
/* scv need not set RI=0 because SRRs are not used */
- if (unlikely(!prep_irq_for_enabled_exit(!scv))) {
+ if (unlikely(!__prep_irq_for_enabled_exit(is_not_scv))) {
+ user_exit_irqoff();
local_irq_enable();
+ local_irq_disable();
goto again;
}
@@ -267,9 +340,11 @@ again:
local_paca->tm_scratch = regs->msr;
#endif
+ booke_load_dbcr0();
+
account_cpu_user_exit();
-#ifdef CONFIG_PPC_BOOK3S /* BOOK3E not yet using this */
+#ifdef CONFIG_PPC_BOOK3S_64 /* BOOK3E and ppc32 not using this */
/*
* We do this at the end so that we do context switch with KERNEL AMR
*/
@@ -278,33 +353,32 @@ again:
return ret;
}
-#ifdef CONFIG_PPC_BOOK3S /* BOOK3E not yet using this */
+#ifndef CONFIG_PPC_BOOK3E_64 /* BOOK3E not yet using this */
notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned long msr)
{
-#ifdef CONFIG_PPC_BOOK3E
- struct thread_struct *ts = &current->thread;
-#endif
- unsigned long *ti_flagsp = &current_thread_info()->flags;
unsigned long ti_flags;
unsigned long flags;
unsigned long ret = 0;
- if (IS_ENABLED(CONFIG_PPC_BOOK3S))
+ if (!IS_ENABLED(CONFIG_BOOKE) && !IS_ENABLED(CONFIG_40x))
BUG_ON(!(regs->msr & MSR_RI));
BUG_ON(!(regs->msr & MSR_PR));
BUG_ON(!FULL_REGS(regs));
- BUG_ON(regs->softe != IRQS_ENABLED);
+ BUG_ON(arch_irq_disabled_regs(regs));
+ CT_WARN_ON(ct_state() == CONTEXT_USER);
/*
* We don't need to restore AMR on the way back to userspace for KUAP.
* AMR can only have been unlocked if we interrupted the kernel.
*/
+#ifdef CONFIG_PPC64
kuap_check_amr();
+#endif
local_irq_save(flags);
again:
- ti_flags = READ_ONCE(*ti_flagsp);
+ ti_flags = READ_ONCE(current_thread_info()->flags);
while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) {
local_irq_enable(); /* returning to user: may enable */
if (ti_flags & _TIF_NEED_RESCHED) {
@@ -315,7 +389,7 @@ again:
do_notify_resume(regs, ti_flags);
}
local_irq_disable();
- ti_flags = READ_ONCE(*ti_flagsp);
+ ti_flags = READ_ONCE(current_thread_info()->flags);
}
if (IS_ENABLED(CONFIG_PPC_BOOK3S) && IS_ENABLED(CONFIG_PPC_FPU)) {
@@ -336,23 +410,16 @@ again:
}
}
- if (unlikely(!prep_irq_for_enabled_exit(true))) {
+ user_enter_irqoff();
+
+ if (unlikely(!__prep_irq_for_enabled_exit(true))) {
+ user_exit_irqoff();
local_irq_enable();
local_irq_disable();
goto again;
}
-#ifdef CONFIG_PPC_BOOK3E
- if (unlikely(ts->debug.dbcr0 & DBCR0_IDM)) {
- /*
- * Check to see if the dbcr0 register is set up to debug.
- * Use the internal debug mode bit to do this.
- */
- mtmsr(mfmsr() & ~MSR_DE);
- mtspr(SPRN_DBCR0, ts->debug.dbcr0);
- mtspr(SPRN_DBSR, -1);
- }
-#endif
+ booke_load_dbcr0();
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
local_paca->tm_scratch = regs->msr;
@@ -363,7 +430,9 @@ again:
/*
* We do this at the end so that we do context switch with KERNEL AMR
*/
+#ifdef CONFIG_PPC64
kuap_user_restore(regs);
+#endif
return ret;
}
@@ -372,56 +441,56 @@ void preempt_schedule_irq(void);
notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsigned long msr)
{
- unsigned long *ti_flagsp = &current_thread_info()->flags;
unsigned long flags;
unsigned long ret = 0;
+#ifdef CONFIG_PPC64
unsigned long amr;
+#endif
- if (IS_ENABLED(CONFIG_PPC_BOOK3S) && unlikely(!(regs->msr & MSR_RI)))
+ if (!IS_ENABLED(CONFIG_BOOKE) && !IS_ENABLED(CONFIG_40x) &&
+ unlikely(!(regs->msr & MSR_RI)))
unrecoverable_exception(regs);
BUG_ON(regs->msr & MSR_PR);
BUG_ON(!FULL_REGS(regs));
+ /*
+ * CT_WARN_ON comes here via program_check_exception,
+ * so avoid recursion.
+ */
+ if (TRAP(regs) != 0x700)
+ CT_WARN_ON(ct_state() == CONTEXT_USER);
+#ifdef CONFIG_PPC64
amr = kuap_get_and_check_amr();
+#endif
- if (unlikely(*ti_flagsp & _TIF_EMULATE_STACK_STORE)) {
- clear_bits(_TIF_EMULATE_STACK_STORE, ti_flagsp);
+ if (unlikely(current_thread_info()->flags & _TIF_EMULATE_STACK_STORE)) {
+ clear_bits(_TIF_EMULATE_STACK_STORE, &current_thread_info()->flags);
ret = 1;
}
local_irq_save(flags);
- if (regs->softe == IRQS_ENABLED) {
+ if (!arch_irq_disabled_regs(regs)) {
/* Returning to a kernel context with local irqs enabled. */
WARN_ON_ONCE(!(regs->msr & MSR_EE));
again:
if (IS_ENABLED(CONFIG_PREEMPT)) {
/* Return to preemptible kernel context */
- if (unlikely(*ti_flagsp & _TIF_NEED_RESCHED)) {
+ if (unlikely(current_thread_info()->flags & _TIF_NEED_RESCHED)) {
if (preempt_count() == 0)
preempt_schedule_irq();
}
}
- if (unlikely(!prep_irq_for_enabled_exit(true))) {
- /*
- * Can't local_irq_restore to replay if we were in
- * interrupt context. Must replay directly.
- */
- if (irqs_disabled_flags(flags)) {
- replay_soft_interrupts();
- } else {
- local_irq_restore(flags);
- local_irq_save(flags);
- }
- /* Took an interrupt, may have more exit work to do. */
+ if (unlikely(!prep_irq_for_enabled_exit(true, !irqs_disabled_flags(flags))))
goto again;
- }
} else {
/* Returning to a kernel context with local irqs disabled. */
__hard_EE_RI_disable();
+#ifdef CONFIG_PPC64
if (regs->msr & MSR_EE)
local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
+#endif
}
@@ -434,7 +503,9 @@ again:
* which would cause Read-After-Write stalls. Hence, we take the AMR
* value from the check above.
*/
+#ifdef CONFIG_PPC64
kuap_kernel_restore(regs, amr);
+#endif
return ret;
}
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 5b69a6a72a0e..c00214a4355c 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -25,6 +25,7 @@
#include <linux/pci.h>
#include <linux/iommu.h>
#include <linux/sched.h>
+#include <linux/debugfs.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/iommu.h>
@@ -38,6 +39,47 @@
#define DBG(...)
+#ifdef CONFIG_IOMMU_DEBUGFS
+static int iommu_debugfs_weight_get(void *data, u64 *val)
+{
+ struct iommu_table *tbl = data;
+ *val = bitmap_weight(tbl->it_map, tbl->it_size);
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(iommu_debugfs_fops_weight, iommu_debugfs_weight_get, NULL, "%llu\n");
+
+static void iommu_debugfs_add(struct iommu_table *tbl)
+{
+ char name[10];
+ struct dentry *liobn_entry;
+
+ sprintf(name, "%08lx", tbl->it_index);
+ liobn_entry = debugfs_create_dir(name, iommu_debugfs_dir);
+
+ debugfs_create_file_unsafe("weight", 0400, liobn_entry, tbl, &iommu_debugfs_fops_weight);
+ debugfs_create_ulong("it_size", 0400, liobn_entry, &tbl->it_size);
+ debugfs_create_ulong("it_page_shift", 0400, liobn_entry, &tbl->it_page_shift);
+ debugfs_create_ulong("it_reserved_start", 0400, liobn_entry, &tbl->it_reserved_start);
+ debugfs_create_ulong("it_reserved_end", 0400, liobn_entry, &tbl->it_reserved_end);
+ debugfs_create_ulong("it_indirect_levels", 0400, liobn_entry, &tbl->it_indirect_levels);
+ debugfs_create_ulong("it_level_size", 0400, liobn_entry, &tbl->it_level_size);
+}
+
+static void iommu_debugfs_del(struct iommu_table *tbl)
+{
+ char name[10];
+ struct dentry *liobn_entry;
+
+ sprintf(name, "%08lx", tbl->it_index);
+ liobn_entry = debugfs_lookup(name, iommu_debugfs_dir);
+ if (liobn_entry)
+ debugfs_remove(liobn_entry);
+}
+#else
+static void iommu_debugfs_add(struct iommu_table *tbl){}
+static void iommu_debugfs_del(struct iommu_table *tbl){}
+#endif
+
static int novmerge;
static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int);
@@ -725,6 +767,8 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid,
welcomed = 1;
}
+ iommu_debugfs_add(tbl);
+
return tbl;
}
@@ -744,6 +788,8 @@ static void iommu_table_free(struct kref *kref)
return;
}
+ iommu_debugfs_del(tbl);
+
iommu_table_release_pages(tbl);
/* verify that table contains no entries */
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 6b1eca53e36c..d71fd10a1dd4 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -54,6 +54,7 @@
#include <linux/pgtable.h>
#include <linux/uaccess.h>
+#include <asm/interrupt.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/cache.h>
@@ -65,6 +66,7 @@
#include <asm/livepatch.h>
#include <asm/asm-prototypes.h>
#include <asm/hw_irq.h>
+#include <asm/softirq_stack.h>
#ifdef CONFIG_PPC64
#include <asm/paca.h>
@@ -180,13 +182,18 @@ void notrace restore_interrupts(void)
void replay_soft_interrupts(void)
{
+ struct pt_regs regs;
+
/*
- * We use local_paca rather than get_paca() to avoid all
- * the debug_smp_processor_id() business in this low level
- * function
+ * Be careful here, calling these interrupt handlers can cause
+ * softirqs to be raised, which they may run when calling irq_exit,
+ * which will cause local_irq_enable() to be run, which can then
+ * recurse into this function. Don't keep any state across
+ * interrupt handler calls which may change underneath us.
+ *
+ * We use local_paca rather than get_paca() to avoid all the
+ * debug_smp_processor_id() business in this low level function.
*/
- unsigned char happened = local_paca->irq_happened;
- struct pt_regs regs;
ppc_save_regs(&regs);
regs.softe = IRQS_ENABLED;
@@ -209,7 +216,7 @@ again:
* This is a higher priority interrupt than the others, so
* replay it first.
*/
- if (IS_ENABLED(CONFIG_PPC_BOOK3S) && (happened & PACA_IRQ_HMI)) {
+ if (IS_ENABLED(CONFIG_PPC_BOOK3S) && (local_paca->irq_happened & PACA_IRQ_HMI)) {
local_paca->irq_happened &= ~PACA_IRQ_HMI;
regs.trap = 0xe60;
handle_hmi_exception(&regs);
@@ -217,7 +224,7 @@ again:
hard_irq_disable();
}
- if (happened & PACA_IRQ_DEC) {
+ if (local_paca->irq_happened & PACA_IRQ_DEC) {
local_paca->irq_happened &= ~PACA_IRQ_DEC;
regs.trap = 0x900;
timer_interrupt(&regs);
@@ -225,7 +232,7 @@ again:
hard_irq_disable();
}
- if (happened & PACA_IRQ_EE) {
+ if (local_paca->irq_happened & PACA_IRQ_EE) {
local_paca->irq_happened &= ~PACA_IRQ_EE;
regs.trap = 0x500;
do_IRQ(&regs);
@@ -233,7 +240,7 @@ again:
hard_irq_disable();
}
- if (IS_ENABLED(CONFIG_PPC_DOORBELL) && (happened & PACA_IRQ_DBELL)) {
+ if (IS_ENABLED(CONFIG_PPC_DOORBELL) && (local_paca->irq_happened & PACA_IRQ_DBELL)) {
local_paca->irq_happened &= ~PACA_IRQ_DBELL;
if (IS_ENABLED(CONFIG_PPC_BOOK3E))
regs.trap = 0x280;
@@ -245,7 +252,7 @@ again:
}
/* Book3E does not support soft-masking PMI interrupts */
- if (IS_ENABLED(CONFIG_PPC_BOOK3S) && (happened & PACA_IRQ_PMI)) {
+ if (IS_ENABLED(CONFIG_PPC_BOOK3S) && (local_paca->irq_happened & PACA_IRQ_PMI)) {
local_paca->irq_happened &= ~PACA_IRQ_PMI;
regs.trap = 0xf00;
performance_monitor_exception(&regs);
@@ -253,8 +260,7 @@ again:
hard_irq_disable();
}
- happened = local_paca->irq_happened;
- if (happened & ~PACA_IRQ_HARD_DIS) {
+ if (local_paca->irq_happened & ~PACA_IRQ_HARD_DIS) {
/*
* We are responding to the next interrupt, so interrupt-off
* latencies should be reset here.
@@ -265,6 +271,31 @@ again:
}
}
+#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_PPC_KUAP)
+static inline void replay_soft_interrupts_irqrestore(void)
+{
+ unsigned long kuap_state = get_kuap();
+
+ /*
+ * Check if anything calls local_irq_enable/restore() when KUAP is
+ * disabled (user access enabled). We handle that case here by saving
+ * and re-locking AMR but we shouldn't get here in the first place,
+ * hence the warning.
+ */
+ kuap_check_amr();
+
+ if (kuap_state != AMR_KUAP_BLOCKED)
+ set_kuap(AMR_KUAP_BLOCKED);
+
+ replay_soft_interrupts();
+
+ if (kuap_state != AMR_KUAP_BLOCKED)
+ set_kuap(kuap_state);
+}
+#else
+#define replay_soft_interrupts_irqrestore() replay_soft_interrupts()
+#endif
+
notrace void arch_local_irq_restore(unsigned long mask)
{
unsigned char irq_happened;
@@ -328,7 +359,7 @@ notrace void arch_local_irq_restore(unsigned long mask)
irq_soft_mask_set(IRQS_ALL_DISABLED);
trace_hardirqs_off();
- replay_soft_interrupts();
+ replay_soft_interrupts_irqrestore();
local_paca->irq_happened = 0;
trace_hardirqs_on();
@@ -640,8 +671,6 @@ void __do_irq(struct pt_regs *regs)
{
unsigned int irq;
- irq_enter();
-
trace_irq_entry(regs);
/*
@@ -661,11 +690,9 @@ void __do_irq(struct pt_regs *regs)
generic_handle_irq(irq);
trace_irq_exit(regs);
-
- irq_exit();
}
-void do_IRQ(struct pt_regs *regs)
+DEFINE_INTERRUPT_HANDLER_ASYNC(do_IRQ)
{
struct pt_regs *old_regs = set_irq_regs(regs);
void *cursp, *irqsp, *sirqsp;
diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c
index 9f3e133b57b7..11f0cae086ed 100644
--- a/arch/powerpc/kernel/mce.c
+++ b/arch/powerpc/kernel/mce.c
@@ -17,22 +17,15 @@
#include <linux/irq_work.h>
#include <linux/extable.h>
#include <linux/ftrace.h>
+#include <linux/memblock.h>
+#include <asm/interrupt.h>
#include <asm/machdep.h>
#include <asm/mce.h>
#include <asm/nmi.h>
+#include <asm/asm-prototypes.h>
-static DEFINE_PER_CPU(int, mce_nest_count);
-static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT], mce_event);
-
-/* Queue for delayed MCE events. */
-static DEFINE_PER_CPU(int, mce_queue_count);
-static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT], mce_event_queue);
-
-/* Queue for delayed MCE UE events. */
-static DEFINE_PER_CPU(int, mce_ue_count);
-static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT],
- mce_ue_event_queue);
+#include "setup.h"
static void machine_check_process_queued_event(struct irq_work *work);
static void machine_check_ue_irq_work(struct irq_work *work);
@@ -103,9 +96,10 @@ void save_mce_event(struct pt_regs *regs, long handled,
struct mce_error_info *mce_err,
uint64_t nip, uint64_t addr, uint64_t phys_addr)
{
- int index = __this_cpu_inc_return(mce_nest_count) - 1;
- struct machine_check_event *mce = this_cpu_ptr(&mce_event[index]);
+ int index = local_paca->mce_info->mce_nest_count++;
+ struct machine_check_event *mce;
+ mce = &local_paca->mce_info->mce_event[index];
/*
* Return if we don't have enough space to log mce event.
* mce_nest_count may go beyond MAX_MC_EVT but that's ok,
@@ -191,7 +185,7 @@ void save_mce_event(struct pt_regs *regs, long handled,
*/
int get_mce_event(struct machine_check_event *mce, bool release)
{
- int index = __this_cpu_read(mce_nest_count) - 1;
+ int index = local_paca->mce_info->mce_nest_count - 1;
struct machine_check_event *mc_evt;
int ret = 0;
@@ -201,7 +195,7 @@ int get_mce_event(struct machine_check_event *mce, bool release)
/* Check if we have MCE info to process. */
if (index < MAX_MC_EVT) {
- mc_evt = this_cpu_ptr(&mce_event[index]);
+ mc_evt = &local_paca->mce_info->mce_event[index];
/* Copy the event structure and release the original */
if (mce)
*mce = *mc_evt;
@@ -211,7 +205,7 @@ int get_mce_event(struct machine_check_event *mce, bool release)
}
/* Decrement the count to free the slot. */
if (release)
- __this_cpu_dec(mce_nest_count);
+ local_paca->mce_info->mce_nest_count--;
return ret;
}
@@ -233,13 +227,14 @@ static void machine_check_ue_event(struct machine_check_event *evt)
{
int index;
- index = __this_cpu_inc_return(mce_ue_count) - 1;
+ index = local_paca->mce_info->mce_ue_count++;
/* If queue is full, just return for now. */
if (index >= MAX_MC_EVT) {
- __this_cpu_dec(mce_ue_count);
+ local_paca->mce_info->mce_ue_count--;
return;
}
- memcpy(this_cpu_ptr(&mce_ue_event_queue[index]), evt, sizeof(*evt));
+ memcpy(&local_paca->mce_info->mce_ue_event_queue[index],
+ evt, sizeof(*evt));
/* Queue work to process this event later. */
irq_work_queue(&mce_ue_event_irq_work);
@@ -256,13 +251,14 @@ void machine_check_queue_event(void)
if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
return;
- index = __this_cpu_inc_return(mce_queue_count) - 1;
+ index = local_paca->mce_info->mce_queue_count++;
/* If queue is full, just return for now. */
if (index >= MAX_MC_EVT) {
- __this_cpu_dec(mce_queue_count);
+ local_paca->mce_info->mce_queue_count--;
return;
}
- memcpy(this_cpu_ptr(&mce_event_queue[index]), &evt, sizeof(evt));
+ memcpy(&local_paca->mce_info->mce_event_queue[index],
+ &evt, sizeof(evt));
/* Queue irq work to process this event later. */
irq_work_queue(&mce_event_process_work);
@@ -289,9 +285,9 @@ static void machine_process_ue_event(struct work_struct *work)
int index;
struct machine_check_event *evt;
- while (__this_cpu_read(mce_ue_count) > 0) {
- index = __this_cpu_read(mce_ue_count) - 1;
- evt = this_cpu_ptr(&mce_ue_event_queue[index]);
+ while (local_paca->mce_info->mce_ue_count > 0) {
+ index = local_paca->mce_info->mce_ue_count - 1;
+ evt = &local_paca->mce_info->mce_ue_event_queue[index];
blocking_notifier_call_chain(&mce_notifier_list, 0, evt);
#ifdef CONFIG_MEMORY_FAILURE
/*
@@ -304,7 +300,7 @@ static void machine_process_ue_event(struct work_struct *work)
*/
if (evt->error_type == MCE_ERROR_TYPE_UE) {
if (evt->u.ue_error.ignore_event) {
- __this_cpu_dec(mce_ue_count);
+ local_paca->mce_info->mce_ue_count--;
continue;
}
@@ -320,7 +316,7 @@ static void machine_process_ue_event(struct work_struct *work)
"was generated\n");
}
#endif
- __this_cpu_dec(mce_ue_count);
+ local_paca->mce_info->mce_ue_count--;
}
}
/*
@@ -338,17 +334,17 @@ static void machine_check_process_queued_event(struct irq_work *work)
* For now just print it to console.
* TODO: log this error event to FSP or nvram.
*/
- while (__this_cpu_read(mce_queue_count) > 0) {
- index = __this_cpu_read(mce_queue_count) - 1;
- evt = this_cpu_ptr(&mce_event_queue[index]);
+ while (local_paca->mce_info->mce_queue_count > 0) {
+ index = local_paca->mce_info->mce_queue_count - 1;
+ evt = &local_paca->mce_info->mce_event_queue[index];
if (evt->error_type == MCE_ERROR_TYPE_UE &&
evt->u.ue_error.ignore_event) {
- __this_cpu_dec(mce_queue_count);
+ local_paca->mce_info->mce_queue_count--;
continue;
}
machine_check_print_event_info(evt, false, false);
- __this_cpu_dec(mce_queue_count);
+ local_paca->mce_info->mce_queue_count--;
}
}
@@ -588,15 +584,9 @@ EXPORT_SYMBOL_GPL(machine_check_print_event_info);
*
* regs->nip and regs->msr contains srr0 and ssr1.
*/
-long notrace machine_check_early(struct pt_regs *regs)
+DEFINE_INTERRUPT_HANDLER_NMI(machine_check_early)
{
long handled = 0;
- u8 ftrace_enabled = this_cpu_get_ftrace_enabled();
-
- this_cpu_set_ftrace_enabled(0);
- /* Do not use nmi_enter/exit for pseries hpte guest */
- if (radix_enabled() || !firmware_has_feature(FW_FEATURE_LPAR))
- nmi_enter();
hv_nmi_check_nonrecoverable(regs);
@@ -606,11 +596,6 @@ long notrace machine_check_early(struct pt_regs *regs)
if (ppc_md.machine_check_early)
handled = ppc_md.machine_check_early(regs);
- if (radix_enabled() || !firmware_has_feature(FW_FEATURE_LPAR))
- nmi_exit();
-
- this_cpu_set_ftrace_enabled(ftrace_enabled);
-
return handled;
}
@@ -722,7 +707,7 @@ long hmi_handle_debugtrig(struct pt_regs *regs)
/*
* Return values:
*/
-long hmi_exception_realmode(struct pt_regs *regs)
+DEFINE_INTERRUPT_HANDLER_NMI(hmi_exception_realmode)
{
int ret;
@@ -741,3 +726,24 @@ long hmi_exception_realmode(struct pt_regs *regs)
return 1;
}
+
+void __init mce_init(void)
+{
+ struct mce_info *mce_info;
+ u64 limit;
+ int i;
+
+ limit = min(ppc64_bolted_size(), ppc64_rma_size);
+ for_each_possible_cpu(i) {
+ mce_info = memblock_alloc_try_nid(sizeof(*mce_info),
+ __alignof__(*mce_info),
+ MEMBLOCK_LOW_LIMIT,
+ limit, cpu_to_node(i));
+ if (!mce_info)
+ goto err;
+ paca_ptrs[i]->mce_info = mce_info;
+ }
+ return;
+err:
+ panic("Failed to allocate memory for MCE event data\n");
+}
diff --git a/arch/powerpc/kernel/optprobes.c b/arch/powerpc/kernel/optprobes.c
index 69bfe96884e2..7f7cdbeacd1a 100644
--- a/arch/powerpc/kernel/optprobes.c
+++ b/arch/powerpc/kernel/optprobes.c
@@ -142,29 +142,10 @@ void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
}
/*
- * emulate_step() requires insn to be emulated as
- * second parameter. Load register 'r4' with the
- * instruction.
- */
-void patch_imm32_load_insns(unsigned int val, kprobe_opcode_t *addr)
-{
- /* addis r4,0,(insn)@h */
- patch_instruction((struct ppc_inst *)addr,
- ppc_inst(PPC_INST_ADDIS | ___PPC_RT(4) |
- ((val >> 16) & 0xffff)));
- addr++;
-
- /* ori r4,r4,(insn)@l */
- patch_instruction((struct ppc_inst *)addr,
- ppc_inst(PPC_INST_ORI | ___PPC_RA(4) |
- ___PPC_RS(4) | (val & 0xffff)));
-}
-
-/*
* Generate instructions to load provided immediate 64-bit value
* to register 'reg' and patch these instructions at 'addr'.
*/
-void patch_imm64_load_insns(unsigned long val, int reg, kprobe_opcode_t *addr)
+static void patch_imm64_load_insns(unsigned long val, int reg, kprobe_opcode_t *addr)
{
/* lis reg,(op)@highest */
patch_instruction((struct ppc_inst *)addr,
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 2b555997b295..001e90cd8948 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -1699,3 +1699,13 @@ static void fixup_hide_host_resource_fsl(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MOTOROLA, PCI_ANY_ID, fixup_hide_host_resource_fsl);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, fixup_hide_host_resource_fsl);
+
+
+static int __init discover_phbs(void)
+{
+ if (ppc_md.discover_phbs)
+ ppc_md.discover_phbs();
+
+ return 0;
+}
+core_initcall(discover_phbs);
diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c
index e99b7c547d7e..61571ae23953 100644
--- a/arch/powerpc/kernel/pci_dn.c
+++ b/arch/powerpc/kernel/pci_dn.c
@@ -443,46 +443,6 @@ void *pci_traverse_device_nodes(struct device_node *start,
}
EXPORT_SYMBOL_GPL(pci_traverse_device_nodes);
-static struct pci_dn *pci_dn_next_one(struct pci_dn *root,
- struct pci_dn *pdn)
-{
- struct list_head *next = pdn->child_list.next;
-
- if (next != &pdn->child_list)
- return list_entry(next, struct pci_dn, list);
-
- while (1) {
- if (pdn == root)
- return NULL;
-
- next = pdn->list.next;
- if (next != &pdn->parent->child_list)
- break;
-
- pdn = pdn->parent;
- }
-
- return list_entry(next, struct pci_dn, list);
-}
-
-void *traverse_pci_dn(struct pci_dn *root,
- void *(*fn)(struct pci_dn *, void *),
- void *data)
-{
- struct pci_dn *pdn = root;
- void *ret;
-
- /* Only scan the child nodes */
- for (pdn = pci_dn_next_one(root, pdn); pdn;
- pdn = pci_dn_next_one(root, pdn)) {
- ret = fn(pdn, data);
- if (ret)
- return ret;
- }
-
- return NULL;
-}
-
static void *add_pdn(struct device_node *dn, void *data)
{
struct pci_controller *hose = data;
@@ -521,28 +481,6 @@ void pci_devs_phb_init_dynamic(struct pci_controller *phb)
pci_traverse_device_nodes(dn, add_pdn, phb);
}
-/**
- * pci_devs_phb_init - Initialize phbs and pci devs under them.
- *
- * This routine walks over all phb's (pci-host bridges) on the
- * system, and sets up assorted pci-related structures
- * (including pci info in the device node structs) for each
- * pci device found underneath. This routine runs once,
- * early in the boot sequence.
- */
-static int __init pci_devs_phb_init(void)
-{
- struct pci_controller *phb, *tmp;
-
- /* This must be done first so the device nodes have valid pci info! */
- list_for_each_entry_safe(phb, tmp, &hose_list, list_node)
- pci_devs_phb_init_dynamic(phb);
-
- return 0;
-}
-
-core_initcall(pci_devs_phb_init);
-
static void pci_dev_pdn_setup(struct pci_dev *pdev)
{
struct pci_dn *pdn;
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index a66f435dabbf..3231c2df9e26 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -41,6 +41,7 @@
#include <linux/pkeys.h>
#include <linux/seq_buf.h>
+#include <asm/interrupt.h>
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/mmu.h>
@@ -659,11 +660,10 @@ static void do_break_handler(struct pt_regs *regs)
}
}
-void do_break (struct pt_regs *regs, unsigned long address,
- unsigned long error_code)
+DEFINE_INTERRUPT_HANDLER(do_break)
{
current->thread.trap_nr = TRAP_HWBKPT;
- if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
+ if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, regs->dsisr,
11, SIGSEGV) == NOTIFY_STOP)
return;
@@ -681,7 +681,7 @@ void do_break (struct pt_regs *regs, unsigned long address,
do_break_handler(regs);
/* Deliver the signal to userspace */
- force_sig_fault(SIGTRAP, TRAP_HWBKPT, (void __user *)address);
+ force_sig_fault(SIGTRAP, TRAP_HWBKPT, (void __user *)regs->dar);
}
#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
@@ -1670,7 +1670,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
/* Copy registers */
sp -= sizeof(struct pt_regs);
childregs = (struct pt_regs *) sp;
- if (unlikely(p->flags & PF_KTHREAD)) {
+ if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
/* kernel thread */
memset(childregs, 0, sizeof(struct pt_regs));
childregs->gpr[1] = sp + sizeof(struct pt_regs);
@@ -2047,6 +2047,9 @@ static inline int valid_emergency_stack(unsigned long sp, struct task_struct *p,
unsigned long stack_page;
unsigned long cpu = task_cpu(p);
+ if (!paca_ptrs)
+ return 0;
+
stack_page = (unsigned long)paca_ptrs[cpu]->emergency_sp - THREAD_SIZE;
if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
return 1;
@@ -2176,7 +2179,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack,
* See if this is an exception frame.
* We look for the "regshere" marker in the current frame.
*/
- if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE)
+ if (validate_sp(sp, tsk, STACK_FRAME_WITH_PT_REGS)
&& stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
struct pt_regs *regs = (struct pt_regs *)
(sp + STACK_FRAME_OVERHEAD);
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index ae3c41730367..9a4797d1d40d 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -707,7 +707,7 @@ static void __init save_fscr_to_task(void)
init_task.thread.fscr = mfspr(SPRN_FSCR);
}
#else
-static inline void save_fscr_to_task(void) {};
+static inline void save_fscr_to_task(void) {}
#endif
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index e9d4eb6144e1..ccf77b985c8f 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -1331,14 +1331,10 @@ static void __init prom_check_platform_support(void)
if (prop_len > sizeof(vec))
prom_printf("WARNING: ibm,arch-vec-5-platform-support longer than expected (len: %d)\n",
prop_len);
- prom_getprop(prom.chosen, "ibm,arch-vec-5-platform-support",
- &vec, sizeof(vec));
- for (i = 0; i < sizeof(vec); i += 2) {
- prom_debug("%d: index = 0x%x val = 0x%x\n", i / 2
- , vec[i]
- , vec[i + 1]);
- prom_parse_platform_support(vec[i], vec[i + 1],
- &supported);
+ prom_getprop(prom.chosen, "ibm,arch-vec-5-platform-support", &vec, sizeof(vec));
+ for (i = 0; i < prop_len; i += 2) {
+ prom_debug("%d: index = 0x%x val = 0x%x\n", i / 2, vec[i], vec[i + 1]);
+ prom_parse_platform_support(vec[i], vec[i + 1], &supported);
}
}
diff --git a/arch/powerpc/kernel/ptrace/ptrace.c b/arch/powerpc/kernel/ptrace/ptrace.c
index 3d44b73adb83..4f3d4ff3728c 100644
--- a/arch/powerpc/kernel/ptrace/ptrace.c
+++ b/arch/powerpc/kernel/ptrace/ptrace.c
@@ -262,8 +262,6 @@ long do_syscall_trace_enter(struct pt_regs *regs)
{
u32 flags;
- user_exit();
-
flags = READ_ONCE(current_thread_info()->flags) &
(_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE);
@@ -340,8 +338,6 @@ void do_syscall_trace_leave(struct pt_regs *regs)
step = test_thread_flag(TIF_SINGLESTEP);
if (step || test_thread_flag(TIF_SYSCALL_TRACE))
tracehook_report_syscall_exit(regs, step);
-
- user_enter();
}
void __init pt_regs_check(void);
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 71f38e9248be..bee984b1887b 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -64,6 +64,7 @@
#include <asm/mmu_context.h>
#include <asm/cpu_has_feature.h>
#include <asm/kasan.h>
+#include <asm/mce.h>
#include "setup.h"
@@ -237,18 +238,17 @@ static int show_cpuinfo(struct seq_file *m, void *v)
maj = (pvr >> 8) & 0xFF;
min = pvr & 0xFF;
- seq_printf(m, "processor\t: %lu\n", cpu_id);
- seq_printf(m, "cpu\t\t: ");
+ seq_printf(m, "processor\t: %lu\ncpu\t\t: ", cpu_id);
if (cur_cpu_spec->pvr_mask && cur_cpu_spec->cpu_name)
- seq_printf(m, "%s", cur_cpu_spec->cpu_name);
+ seq_puts(m, cur_cpu_spec->cpu_name);
else
seq_printf(m, "unknown (%08x)", pvr);
if (cpu_has_feature(CPU_FTR_ALTIVEC))
- seq_printf(m, ", altivec supported");
+ seq_puts(m, ", altivec supported");
- seq_printf(m, "\n");
+ seq_putc(m, '\n');
#ifdef CONFIG_TAU
if (cpu_has_feature(CPU_FTR_TAU)) {
@@ -327,7 +327,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
seq_printf(m, "bogomips\t: %lu.%02lu\n", loops_per_jiffy / (500000 / HZ),
(loops_per_jiffy / (5000 / HZ)) % 100);
- seq_printf(m, "\n");
+ seq_putc(m, '\n');
/* If this is the last cpu, print the summary */
if (cpumask_next(cpu_id, cpu_online_mask) >= nr_cpu_ids)
@@ -938,6 +938,7 @@ void __init setup_arch(char **cmdline_p)
exc_lvl_early_init();
emergency_stack_init();
+ mce_init();
smp_release_cpus();
initmem_init();
diff --git a/arch/powerpc/kernel/setup.h b/arch/powerpc/kernel/setup.h
index 2dd0d9cb5a20..84058bbc8fe9 100644
--- a/arch/powerpc/kernel/setup.h
+++ b/arch/powerpc/kernel/setup.h
@@ -14,31 +14,31 @@ void irqstack_early_init(void);
#ifdef CONFIG_PPC32
void setup_power_save(void);
#else
-static inline void setup_power_save(void) { };
+static inline void setup_power_save(void) { }
#endif
#if defined(CONFIG_PPC64) && defined(CONFIG_SMP)
void check_smt_enabled(void);
#else
-static inline void check_smt_enabled(void) { };
+static inline void check_smt_enabled(void) { }
#endif
#if defined(CONFIG_PPC_BOOK3E) && defined(CONFIG_SMP)
void setup_tlb_core_data(void);
#else
-static inline void setup_tlb_core_data(void) { };
+static inline void setup_tlb_core_data(void) { }
#endif
#if defined(CONFIG_PPC_BOOK3E) || defined(CONFIG_BOOKE) || defined(CONFIG_40x)
void exc_lvl_early_init(void);
#else
-static inline void exc_lvl_early_init(void) { };
+static inline void exc_lvl_early_init(void) { }
#endif
#if defined(CONFIG_PPC64) || defined(CONFIG_VMAP_STACK)
void emergency_stack_init(void);
#else
-static inline void emergency_stack_init(void) { };
+static inline void emergency_stack_init(void) { }
#endif
#ifdef CONFIG_PPC64
@@ -55,7 +55,7 @@ extern unsigned long spr_default_dscr;
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
void kvm_cma_reserve(void);
#else
-static inline void kvm_cma_reserve(void) { };
+static inline void kvm_cma_reserve(void) { }
#endif
#ifdef CONFIG_TAU
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index c28e949cc222..560ed8b975e7 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -67,6 +67,7 @@
#include <asm/kup.h>
#include <asm/early_ioremap.h>
#include <asm/pgalloc.h>
+#include <asm/asm-prototypes.h>
#include "setup.h"
@@ -258,7 +259,7 @@ static void cpu_ready_for_interrupts(void)
unsigned long spr_default_dscr = 0;
-void __init record_spr_defaults(void)
+static void __init record_spr_defaults(void)
{
if (early_cpu_has_feature(CPU_FTR_DSCR))
spr_default_dscr = mfspr(SPRN_DSCR);
@@ -1008,7 +1009,7 @@ void rfi_flush_enable(bool enable)
rfi_flush = enable;
}
-void entry_flush_enable(bool enable)
+static void entry_flush_enable(bool enable)
{
if (enable) {
do_entry_flush_fixups(enabled_flush_types);
@@ -1020,7 +1021,7 @@ void entry_flush_enable(bool enable)
entry_flush = enable;
}
-void uaccess_flush_enable(bool enable)
+static void uaccess_flush_enable(bool enable)
{
if (enable) {
do_uaccess_flush_fixups(enabled_flush_types);
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
index 53782aa60ade..9ded046edb0e 100644
--- a/arch/powerpc/kernel/signal.c
+++ b/arch/powerpc/kernel/signal.c
@@ -282,8 +282,6 @@ static void do_signal(struct task_struct *tsk)
void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
{
- user_exit();
-
if (thread_info_flags & _TIF_UPROBE)
uprobe_notify_resume(regs);
@@ -299,8 +297,6 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
tracehook_notify_resume(regs);
rseq_handle_notify_resume(NULL, regs);
}
-
- user_enter();
}
static unsigned long get_tm_stackpointer(struct task_struct *tsk)
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 934cbdf6dd10..75ee918a120a 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -929,8 +929,9 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
regs->gpr[3] = ksig->sig;
regs->gpr[4] = (unsigned long) sc;
regs->nip = (unsigned long)ksig->ka.sa.sa_handler;
- /* enter the signal handler in big-endian mode */
+ /* enter the signal handler in native-endian mode */
regs->msr &= ~MSR_LE;
+ regs->msr |= (MSR_KERNEL & MSR_LE);
return 0;
failed:
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 9e2246e80efd..5a4d59a1070d 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -34,6 +34,7 @@
#include <linux/random.h>
#include <linux/stackprotector.h>
#include <linux/pgtable.h>
+#include <linux/clockchips.h>
#include <asm/ptrace.h>
#include <linux/atomic.h>
@@ -576,7 +577,7 @@ void tick_broadcast(const struct cpumask *mask)
#endif
#ifdef CONFIG_DEBUGGER
-void debugger_ipi_callback(struct pt_regs *regs)
+static void debugger_ipi_callback(struct pt_regs *regs)
{
debugger_ipi(regs);
}
diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c
index d36c6391eaf5..16ff0399a257 100644
--- a/arch/powerpc/kernel/sys_ppc32.c
+++ b/arch/powerpc/kernel/sys_ppc32.c
@@ -59,57 +59,64 @@ unsigned long compat_sys_mmap2(unsigned long addr, size_t len,
/*
* long long munging:
* The 32 bit ABI passes long longs in an odd even register pair.
+ * High and low parts are swapped depending on endian mode,
+ * so define a macro (similar to mips linux32) to handle that.
*/
+#ifdef __LITTLE_ENDIAN__
+#define merge_64(low, high) ((u64)high << 32) | low
+#else
+#define merge_64(high, low) ((u64)high << 32) | low
+#endif
compat_ssize_t compat_sys_pread64(unsigned int fd, char __user *ubuf, compat_size_t count,
- u32 reg6, u32 poshi, u32 poslo)
+ u32 reg6, u32 pos1, u32 pos2)
{
- return ksys_pread64(fd, ubuf, count, ((loff_t)poshi << 32) | poslo);
+ return ksys_pread64(fd, ubuf, count, merge_64(pos1, pos2));
}
compat_ssize_t compat_sys_pwrite64(unsigned int fd, const char __user *ubuf, compat_size_t count,
- u32 reg6, u32 poshi, u32 poslo)
+ u32 reg6, u32 pos1, u32 pos2)
{
- return ksys_pwrite64(fd, ubuf, count, ((loff_t)poshi << 32) | poslo);
+ return ksys_pwrite64(fd, ubuf, count, merge_64(pos1, pos2));
}
-compat_ssize_t compat_sys_readahead(int fd, u32 r4, u32 offhi, u32 offlo, u32 count)
+compat_ssize_t compat_sys_readahead(int fd, u32 r4, u32 offset1, u32 offset2, u32 count)
{
- return ksys_readahead(fd, ((loff_t)offhi << 32) | offlo, count);
+ return ksys_readahead(fd, merge_64(offset1, offset2), count);
}
asmlinkage int compat_sys_truncate64(const char __user * path, u32 reg4,
- unsigned long high, unsigned long low)
+ unsigned long len1, unsigned long len2)
{
- return ksys_truncate(path, (high << 32) | low);
+ return ksys_truncate(path, merge_64(len1, len2));
}
-asmlinkage long compat_sys_fallocate(int fd, int mode, u32 offhi, u32 offlo,
- u32 lenhi, u32 lenlo)
+asmlinkage long compat_sys_fallocate(int fd, int mode, u32 offset1, u32 offset2,
+ u32 len1, u32 len2)
{
- return ksys_fallocate(fd, mode, ((loff_t)offhi << 32) | offlo,
- ((loff_t)lenhi << 32) | lenlo);
+ return ksys_fallocate(fd, mode, ((loff_t)offset1 << 32) | offset2,
+ merge_64(len1, len2));
}
-asmlinkage int compat_sys_ftruncate64(unsigned int fd, u32 reg4, unsigned long high,
- unsigned long low)
+asmlinkage int compat_sys_ftruncate64(unsigned int fd, u32 reg4, unsigned long len1,
+ unsigned long len2)
{
- return ksys_ftruncate(fd, (high << 32) | low);
+ return ksys_ftruncate(fd, merge_64(len1, len2));
}
-long ppc32_fadvise64(int fd, u32 unused, u32 offset_high, u32 offset_low,
+long ppc32_fadvise64(int fd, u32 unused, u32 offset1, u32 offset2,
size_t len, int advice)
{
- return ksys_fadvise64_64(fd, (u64)offset_high << 32 | offset_low, len,
+ return ksys_fadvise64_64(fd, merge_64(offset1, offset2), len,
advice);
}
asmlinkage long compat_sys_sync_file_range2(int fd, unsigned int flags,
- unsigned offset_hi, unsigned offset_lo,
- unsigned nbytes_hi, unsigned nbytes_lo)
+ unsigned offset1, unsigned offset2,
+ unsigned nbytes1, unsigned nbytes2)
{
- loff_t offset = ((loff_t)offset_hi << 32) | offset_lo;
- loff_t nbytes = ((loff_t)nbytes_hi << 32) | nbytes_lo;
+ loff_t offset = merge_64(offset1, offset2);
+ loff_t nbytes = merge_64(nbytes1, nbytes2);
return ksys_sync_file_range(fd, offset, nbytes, flags);
}
diff --git a/arch/powerpc/kernel/syscalls/Makefile b/arch/powerpc/kernel/syscalls/Makefile
index 27b48954808d..9e3be295dbba 100644
--- a/arch/powerpc/kernel/syscalls/Makefile
+++ b/arch/powerpc/kernel/syscalls/Makefile
@@ -5,7 +5,7 @@ uapi := arch/$(SRCARCH)/include/generated/uapi/asm
_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \
$(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
-syscall := $(srctree)/$(src)/syscall.tbl
+syscall := $(src)/syscall.tbl
syshdr := $(srctree)/$(src)/syscallhdr.sh
systbl := $(srctree)/$(src)/syscalltbl.sh
@@ -22,31 +22,31 @@ quiet_cmd_systbl = SYSTBL $@
'$(systbl_offset_$(basetarget))'
syshdr_abis_unistd_32 := common,nospu,32
-$(uapi)/unistd_32.h: $(syscall) $(syshdr)
+$(uapi)/unistd_32.h: $(syscall) $(syshdr) FORCE
$(call if_changed,syshdr)
syshdr_abis_unistd_64 := common,nospu,64
-$(uapi)/unistd_64.h: $(syscall) $(syshdr)
+$(uapi)/unistd_64.h: $(syscall) $(syshdr) FORCE
$(call if_changed,syshdr)
systbl_abis_syscall_table_32 := common,nospu,32
systbl_abi_syscall_table_32 := 32
-$(kapi)/syscall_table_32.h: $(syscall) $(systbl)
+$(kapi)/syscall_table_32.h: $(syscall) $(systbl) FORCE
$(call if_changed,systbl)
systbl_abis_syscall_table_64 := common,nospu,64
systbl_abi_syscall_table_64 := 64
-$(kapi)/syscall_table_64.h: $(syscall) $(systbl)
+$(kapi)/syscall_table_64.h: $(syscall) $(systbl) FORCE
$(call if_changed,systbl)
systbl_abis_syscall_table_c32 := common,nospu,32
systbl_abi_syscall_table_c32 := c32
-$(kapi)/syscall_table_c32.h: $(syscall) $(systbl)
+$(kapi)/syscall_table_c32.h: $(syscall) $(systbl) FORCE
$(call if_changed,systbl)
systbl_abis_syscall_table_spu := common,spu
systbl_abi_syscall_table_spu := spu
-$(kapi)/syscall_table_spu.h: $(syscall) $(systbl)
+$(kapi)/syscall_table_spu.h: $(syscall) $(systbl) FORCE
$(call if_changed,systbl)
uapisyshdr-y += unistd_32.h unistd_64.h
@@ -55,9 +55,10 @@ kapisyshdr-y += syscall_table_32.h \
syscall_table_c32.h \
syscall_table_spu.h
-targets += $(uapisyshdr-y) $(kapisyshdr-y)
+uapisyshdr-y := $(addprefix $(uapi)/, $(uapisyshdr-y))
+kapisyshdr-y := $(addprefix $(kapi)/, $(kapisyshdr-y))
+targets += $(addprefix ../../../../, $(uapisyshdr-y) $(kapisyshdr-y))
PHONY += all
-all: $(addprefix $(uapi)/,$(uapisyshdr-y))
-all: $(addprefix $(kapi)/,$(kapisyshdr-y))
+all: $(uapisyshdr-y) $(kapisyshdr-y)
@:
diff --git a/arch/powerpc/kernel/syscalls/syscall.tbl b/arch/powerpc/kernel/syscalls/syscall.tbl
index f744eb5cba88..0b2480cf3e47 100644
--- a/arch/powerpc/kernel/syscalls/syscall.tbl
+++ b/arch/powerpc/kernel/syscalls/syscall.tbl
@@ -9,9 +9,7 @@
#
0 nospu restart_syscall sys_restart_syscall
1 nospu exit sys_exit
-2 32 fork ppc_fork sys_fork
-2 64 fork sys_fork
-2 spu fork sys_ni_syscall
+2 nospu fork sys_fork
3 common read sys_read
4 common write sys_write
5 common open sys_open compat_sys_open
@@ -160,9 +158,7 @@
119 32 sigreturn sys_sigreturn compat_sys_sigreturn
119 64 sigreturn sys_ni_syscall
119 spu sigreturn sys_ni_syscall
-120 32 clone ppc_clone sys_clone
-120 64 clone sys_clone
-120 spu clone sys_ni_syscall
+120 nospu clone sys_clone
121 common setdomainname sys_setdomainname
122 common uname sys_newuname
123 common modify_ldt sys_ni_syscall
@@ -244,9 +240,7 @@
186 spu sendfile sys_sendfile64
187 common getpmsg sys_ni_syscall
188 common putpmsg sys_ni_syscall
-189 32 vfork ppc_vfork sys_vfork
-189 64 vfork sys_vfork
-189 spu vfork sys_ni_syscall
+189 nospu vfork sys_vfork
190 common ugetrlimit sys_getrlimit compat_sys_getrlimit
191 common readahead sys_readahead compat_sys_readahead
192 32 mmap2 sys_mmap2 compat_sys_mmap2
@@ -322,9 +316,7 @@
248 32 clock_nanosleep sys_clock_nanosleep_time32
248 64 clock_nanosleep sys_clock_nanosleep
248 spu clock_nanosleep sys_clock_nanosleep
-249 32 swapcontext ppc_swapcontext compat_sys_swapcontext
-249 64 swapcontext sys_swapcontext
-249 spu swapcontext sys_ni_syscall
+249 nospu swapcontext sys_swapcontext compat_sys_swapcontext
250 common tgkill sys_tgkill
251 32 utimes sys_utimes_time32
251 64 utimes sys_utimes
@@ -522,12 +514,11 @@
432 common fsmount sys_fsmount
433 common fspick sys_fspick
434 common pidfd_open sys_pidfd_open
-435 32 clone3 ppc_clone3 sys_clone3
-435 64 clone3 sys_clone3
-435 spu clone3 sys_ni_syscall
+435 nospu clone3 sys_clone3
436 common close_range sys_close_range
437 common openat2 sys_openat2
438 common pidfd_getfd sys_pidfd_getfd
439 common faccessat2 sys_faccessat2
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2
+442 common mount_setattr sys_mount_setattr
diff --git a/arch/powerpc/kernel/tau_6xx.c b/arch/powerpc/kernel/tau_6xx.c
index 0b4694b8d248..6c31af7f4fa8 100644
--- a/arch/powerpc/kernel/tau_6xx.c
+++ b/arch/powerpc/kernel/tau_6xx.c
@@ -22,6 +22,7 @@
#include <linux/delay.h>
#include <linux/workqueue.h>
+#include <asm/interrupt.h>
#include <asm/io.h>
#include <asm/reg.h>
#include <asm/nvram.h>
@@ -100,16 +101,13 @@ static void TAUupdate(int cpu)
* with interrupts disabled
*/
-void TAUException(struct pt_regs * regs)
+DEFINE_INTERRUPT_HANDLER_ASYNC(TAUException)
{
int cpu = smp_processor_id();
- irq_enter();
tau[cpu].interrupts++;
TAUupdate(cpu);
-
- irq_exit();
}
#endif /* CONFIG_TAU_INT */
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 67feb3524460..b67d93a609a2 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -53,9 +53,11 @@
#include <linux/of_clk.h>
#include <linux/suspend.h>
#include <linux/sched/cputime.h>
+#include <linux/sched/clock.h>
#include <linux/processor.h>
#include <asm/trace.h>
+#include <asm/interrupt.h>
#include <asm/io.h>
#include <asm/nvram.h>
#include <asm/cache.h>
@@ -570,7 +572,7 @@ void arch_irq_work_raise(void)
* timer_interrupt - gets called when the decrementer overflows,
* with interrupts disabled.
*/
-void timer_interrupt(struct pt_regs *regs)
+DEFINE_INTERRUPT_HANDLER_ASYNC(timer_interrupt)
{
struct clock_event_device *evt = this_cpu_ptr(&decrementers);
u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
@@ -609,7 +611,7 @@ void timer_interrupt(struct pt_regs *regs)
#endif
old_regs = set_irq_regs(regs);
- irq_enter();
+
trace_timer_interrupt_entry(regs);
if (test_irq_work_pending()) {
@@ -634,7 +636,7 @@ void timer_interrupt(struct pt_regs *regs)
}
trace_timer_interrupt_exit(regs);
- irq_exit();
+
set_irq_regs(old_regs);
}
EXPORT_SYMBOL(timer_interrupt);
@@ -1030,6 +1032,7 @@ void __init time_init(void)
tick_setup_hrtimer_broadcast();
of_clk_init(NULL);
+ enable_sched_clock_irqtime();
}
/*
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 3ec7b443fe6b..1583fd1c6010 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -41,6 +41,7 @@
#include <asm/emulated_ops.h>
#include <linux/uaccess.h>
#include <asm/debugfs.h>
+#include <asm/interrupt.h>
#include <asm/io.h>
#include <asm/machdep.h>
#include <asm/rtas.h>
@@ -342,8 +343,8 @@ static bool exception_common(int signr, struct pt_regs *regs, int code,
show_signal_msg(signr, regs, code, addr);
- if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs))
- local_irq_enable();
+ if (arch_irqs_disabled())
+ interrupt_cond_local_irq_enable(regs);
current->thread.trap_nr = code;
@@ -430,16 +431,10 @@ nonrecoverable:
regs->msr &= ~MSR_RI;
#endif
}
-
-void system_reset_exception(struct pt_regs *regs)
+DEFINE_INTERRUPT_HANDLER_NMI(system_reset_exception)
{
unsigned long hsrr0, hsrr1;
bool saved_hsrrs = false;
- u8 ftrace_enabled = this_cpu_get_ftrace_enabled();
-
- this_cpu_set_ftrace_enabled(0);
-
- nmi_enter();
/*
* System reset can interrupt code where HSRRs are live and MSR[RI]=1.
@@ -503,19 +498,20 @@ out:
die("Unrecoverable nested System Reset", regs, SIGABRT);
#endif
/* Must die if the interrupt is not recoverable */
- if (!(regs->msr & MSR_RI))
+ if (!(regs->msr & MSR_RI)) {
+ /* For the reason explained in die_mce, nmi_exit before die */
+ nmi_exit();
die("Unrecoverable System Reset", regs, SIGABRT);
+ }
if (saved_hsrrs) {
mtspr(SPRN_HSRR0, hsrr0);
mtspr(SPRN_HSRR1, hsrr1);
}
- nmi_exit();
-
- this_cpu_set_ftrace_enabled(ftrace_enabled);
-
/* What should we do here? We could issue a shutdown or hard reset. */
+
+ return 0;
}
/*
@@ -788,23 +784,33 @@ int machine_check_generic(struct pt_regs *regs)
}
#endif /* everything else */
-void machine_check_exception(struct pt_regs *regs)
+void die_mce(const char *str, struct pt_regs *regs, long err)
{
- int recover = 0;
-
/*
- * BOOK3S_64 does not call this handler as a non-maskable interrupt
- * (it uses its own early real-mode handler to handle the MCE proper
- * and then raises irq_work to call this handler when interrupts are
- * enabled).
- *
- * This is silly. The BOOK3S_64 should just call a different function
- * rather than expecting semantics to magically change. Something
- * like 'non_nmi_machine_check_exception()', perhaps?
+ * The machine check wants to kill the interrupted context, but
+ * do_exit() checks for in_interrupt() and panics in that case, so
+ * exit the irq/nmi before calling die.
*/
- const bool nmi = !IS_ENABLED(CONFIG_PPC_BOOK3S_64);
+ if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
+ irq_exit();
+ else
+ nmi_exit();
+ die(str, regs, err);
+}
- if (nmi) nmi_enter();
+/*
+ * BOOK3S_64 does not call this handler as a non-maskable interrupt
+ * (it uses its own early real-mode handler to handle the MCE proper
+ * and then raises irq_work to call this handler when interrupts are
+ * enabled).
+ */
+#ifdef CONFIG_PPC_BOOK3S_64
+DEFINE_INTERRUPT_HANDLER_ASYNC(machine_check_exception)
+#else
+DEFINE_INTERRUPT_HANDLER_NMI(machine_check_exception)
+#endif
+{
+ int recover = 0;
__this_cpu_inc(irq_stat.mce_exceptions);
@@ -830,21 +836,21 @@ void machine_check_exception(struct pt_regs *regs)
if (check_io_access(regs))
goto bail;
- if (nmi) nmi_exit();
-
- die("Machine check", regs, SIGBUS);
+ die_mce("Machine check", regs, SIGBUS);
+bail:
/* Must die if the interrupt is not recoverable */
if (!(regs->msr & MSR_RI))
- die("Unrecoverable Machine check", regs, SIGBUS);
+ die_mce("Unrecoverable Machine check", regs, SIGBUS);
+#ifdef CONFIG_PPC_BOOK3S_64
return;
-
-bail:
- if (nmi) nmi_exit();
+#else
+ return 0;
+#endif
}
-void SMIException(struct pt_regs *regs)
+DEFINE_INTERRUPT_HANDLER(SMIException) /* async? */
{
die("System Management Interrupt", regs, SIGABRT);
}
@@ -1030,12 +1036,11 @@ static void p9_hmi_special_emu(struct pt_regs *regs)
}
#endif /* CONFIG_VSX */
-void handle_hmi_exception(struct pt_regs *regs)
+DEFINE_INTERRUPT_HANDLER_ASYNC(handle_hmi_exception)
{
struct pt_regs *old_regs;
old_regs = set_irq_regs(regs);
- irq_enter();
#ifdef CONFIG_VSX
/* Real mode flagged P9 special emu is needed */
@@ -1055,46 +1060,42 @@ void handle_hmi_exception(struct pt_regs *regs)
if (ppc_md.handle_hmi_exception)
ppc_md.handle_hmi_exception(regs);
- irq_exit();
set_irq_regs(old_regs);
}
-void unknown_exception(struct pt_regs *regs)
+DEFINE_INTERRUPT_HANDLER(unknown_exception)
{
- enum ctx_state prev_state = exception_enter();
-
printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
regs->nip, regs->msr, regs->trap);
_exception(SIGTRAP, regs, TRAP_UNK, 0);
-
- exception_exit(prev_state);
}
-void instruction_breakpoint_exception(struct pt_regs *regs)
+DEFINE_INTERRUPT_HANDLER_ASYNC(unknown_async_exception)
{
- enum ctx_state prev_state = exception_enter();
+ printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
+ regs->nip, regs->msr, regs->trap);
+ _exception(SIGTRAP, regs, TRAP_UNK, 0);
+}
+
+DEFINE_INTERRUPT_HANDLER(instruction_breakpoint_exception)
+{
if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5,
5, SIGTRAP) == NOTIFY_STOP)
- goto bail;
+ return;
if (debugger_iabr_match(regs))
- goto bail;
+ return;
_exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
-
-bail:
- exception_exit(prev_state);
}
-void RunModeException(struct pt_regs *regs)
+DEFINE_INTERRUPT_HANDLER(RunModeException)
{
_exception(SIGTRAP, regs, TRAP_UNK, 0);
}
-void single_step_exception(struct pt_regs *regs)
+DEFINE_INTERRUPT_HANDLER(single_step_exception)
{
- enum ctx_state prev_state = exception_enter();
-
clear_single_step(regs);
clear_br_trace(regs);
@@ -1103,16 +1104,12 @@ void single_step_exception(struct pt_regs *regs)
if (notify_die(DIE_SSTEP, "single_step", regs, 5,
5, SIGTRAP) == NOTIFY_STOP)
- goto bail;
+ return;
if (debugger_sstep(regs))
- goto bail;
+ return;
_exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
-
-bail:
- exception_exit(prev_state);
}
-NOKPROBE_SYMBOL(single_step_exception);
/*
* After we have successfully emulated an instruction, we have to
@@ -1436,9 +1433,8 @@ static int emulate_math(struct pt_regs *regs)
static inline int emulate_math(struct pt_regs *regs) { return -1; }
#endif
-void program_check_exception(struct pt_regs *regs)
+static void do_program_check(struct pt_regs *regs)
{
- enum ctx_state prev_state = exception_enter();
unsigned int reason = get_reason(regs);
/* We can now get here via a FP Unavailable exception if the core
@@ -1447,22 +1443,22 @@ void program_check_exception(struct pt_regs *regs)
if (reason & REASON_FP) {
/* IEEE FP exception */
parse_fpe(regs);
- goto bail;
+ return;
}
if (reason & REASON_TRAP) {
unsigned long bugaddr;
/* Debugger is first in line to stop recursive faults in
* rcu_lock, notify_die, or atomic_notifier_call_chain */
if (debugger_bpt(regs))
- goto bail;
+ return;
if (kprobe_handler(regs))
- goto bail;
+ return;
/* trap exception */
if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP)
== NOTIFY_STOP)
- goto bail;
+ return;
bugaddr = regs->nip;
/*
@@ -1474,10 +1470,10 @@ void program_check_exception(struct pt_regs *regs)
if (!(regs->msr & MSR_PR) && /* not user-mode */
report_bug(bugaddr, regs) == BUG_TRAP_TYPE_WARN) {
regs->nip += 4;
- goto bail;
+ return;
}
_exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
- goto bail;
+ return;
}
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (reason & REASON_TM) {
@@ -1498,7 +1494,7 @@ void program_check_exception(struct pt_regs *regs)
*/
if (user_mode(regs)) {
_exception(SIGILL, regs, ILL_ILLOPN, regs->nip);
- goto bail;
+ return;
} else {
printk(KERN_EMERG "Unexpected TM Bad Thing exception "
"at %lx (msr 0x%lx) tm_scratch=%llx\n",
@@ -1518,9 +1514,7 @@ void program_check_exception(struct pt_regs *regs)
if (!user_mode(regs))
goto sigill;
- /* We restore the interrupt state now */
- if (!arch_irq_disabled_regs(regs))
- local_irq_enable();
+ interrupt_cond_local_irq_enable(regs);
/* (reason & REASON_ILLEGAL) would be the obvious thing here,
* but there seems to be a hardware bug on the 405GP (RevD)
@@ -1531,7 +1525,7 @@ void program_check_exception(struct pt_regs *regs)
* pattern to occurrences etc. -dgibson 31/Mar/2003
*/
if (!emulate_math(regs))
- goto bail;
+ return;
/* Try to emulate it if we should. */
if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) {
@@ -1539,10 +1533,10 @@ void program_check_exception(struct pt_regs *regs)
case 0:
regs->nip += 4;
emulate_single_step(regs);
- goto bail;
+ return;
case -EFAULT:
_exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
- goto bail;
+ return;
}
}
@@ -1552,34 +1546,31 @@ sigill:
else
_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
-bail:
- exception_exit(prev_state);
}
-NOKPROBE_SYMBOL(program_check_exception);
+
+DEFINE_INTERRUPT_HANDLER(program_check_exception)
+{
+ do_program_check(regs);
+}
/*
* This occurs when running in hypervisor mode on POWER6 or later
* and an illegal instruction is encountered.
*/
-void emulation_assist_interrupt(struct pt_regs *regs)
+DEFINE_INTERRUPT_HANDLER(emulation_assist_interrupt)
{
regs->msr |= REASON_ILLEGAL;
- program_check_exception(regs);
+ do_program_check(regs);
}
-NOKPROBE_SYMBOL(emulation_assist_interrupt);
-void alignment_exception(struct pt_regs *regs)
+DEFINE_INTERRUPT_HANDLER(alignment_exception)
{
- enum ctx_state prev_state = exception_enter();
int sig, code, fixed = 0;
unsigned long reason;
- /* We restore the interrupt state now */
- if (!arch_irq_disabled_regs(regs))
- local_irq_enable();
+ interrupt_cond_local_irq_enable(regs);
reason = get_reason(regs);
-
if (reason & REASON_BOUNDARY) {
sig = SIGBUS;
code = BUS_ADRALN;
@@ -1587,7 +1578,7 @@ void alignment_exception(struct pt_regs *regs)
}
if (tm_abort_check(regs, TM_CAUSE_ALIGNMENT | TM_CAUSE_PERSISTENT))
- goto bail;
+ return;
/* we don't implement logging of alignment exceptions */
if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
@@ -1597,7 +1588,7 @@ void alignment_exception(struct pt_regs *regs)
/* skip over emulated instruction */
regs->nip += inst_length(reason);
emulate_single_step(regs);
- goto bail;
+ return;
}
/* Operand address was bad */
@@ -1612,13 +1603,10 @@ bad:
if (user_mode(regs))
_exception(sig, regs, code, regs->dar);
else
- bad_page_fault(regs, regs->dar, sig);
-
-bail:
- exception_exit(prev_state);
+ bad_page_fault(regs, sig);
}
-void StackOverflow(struct pt_regs *regs)
+DEFINE_INTERRUPT_HANDLER(StackOverflow)
{
pr_crit("Kernel stack overflow in process %s[%d], r1=%lx\n",
current->comm, task_pid_nr(current), regs->gpr[1]);
@@ -1627,46 +1615,33 @@ void StackOverflow(struct pt_regs *regs)
panic("kernel stack overflow");
}
-void stack_overflow_exception(struct pt_regs *regs)
+DEFINE_INTERRUPT_HANDLER(stack_overflow_exception)
{
- enum ctx_state prev_state = exception_enter();
-
die("Kernel stack overflow", regs, SIGSEGV);
-
- exception_exit(prev_state);
}
-void kernel_fp_unavailable_exception(struct pt_regs *regs)
+DEFINE_INTERRUPT_HANDLER(kernel_fp_unavailable_exception)
{
- enum ctx_state prev_state = exception_enter();
-
printk(KERN_EMERG "Unrecoverable FP Unavailable Exception "
"%lx at %lx\n", regs->trap, regs->nip);
die("Unrecoverable FP Unavailable Exception", regs, SIGABRT);
-
- exception_exit(prev_state);
}
-void altivec_unavailable_exception(struct pt_regs *regs)
+DEFINE_INTERRUPT_HANDLER(altivec_unavailable_exception)
{
- enum ctx_state prev_state = exception_enter();
-
if (user_mode(regs)) {
/* A user program has executed an altivec instruction,
but this kernel doesn't support altivec. */
_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
- goto bail;
+ return;
}
printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception "
"%lx at %lx\n", regs->trap, regs->nip);
die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);
-
-bail:
- exception_exit(prev_state);
}
-void vsx_unavailable_exception(struct pt_regs *regs)
+DEFINE_INTERRUPT_HANDLER(vsx_unavailable_exception)
{
if (user_mode(regs)) {
/* A user program has executed an vsx instruction,
@@ -1697,7 +1672,7 @@ static void tm_unavailable(struct pt_regs *regs)
die("Unrecoverable TM Unavailable Exception", regs, SIGABRT);
}
-void facility_unavailable_exception(struct pt_regs *regs)
+DEFINE_INTERRUPT_HANDLER(facility_unavailable_exception)
{
static char *facility_strings[] = {
[FSCR_FP_LG] = "FPU",
@@ -1737,9 +1712,7 @@ void facility_unavailable_exception(struct pt_regs *regs)
die("Unexpected facility unavailable exception", regs, SIGABRT);
}
- /* We restore the interrupt state now */
- if (!arch_irq_disabled_regs(regs))
- local_irq_enable();
+ interrupt_cond_local_irq_enable(regs);
if (status == FSCR_DSCR_LG) {
/*
@@ -1817,7 +1790,7 @@ out:
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-void fp_unavailable_tm(struct pt_regs *regs)
+DEFINE_INTERRUPT_HANDLER(fp_unavailable_tm)
{
/* Note: This does not handle any kind of FP laziness. */
@@ -1850,7 +1823,7 @@ void fp_unavailable_tm(struct pt_regs *regs)
tm_recheckpoint(&current->thread);
}
-void altivec_unavailable_tm(struct pt_regs *regs)
+DEFINE_INTERRUPT_HANDLER(altivec_unavailable_tm)
{
/* See the comments in fp_unavailable_tm(). This function operates
* the same way.
@@ -1865,7 +1838,7 @@ void altivec_unavailable_tm(struct pt_regs *regs)
current->thread.used_vr = 1;
}
-void vsx_unavailable_tm(struct pt_regs *regs)
+DEFINE_INTERRUPT_HANDLER(vsx_unavailable_tm)
{
/* See the comments in fp_unavailable_tm(). This works similarly,
* though we're loading both FP and VEC registers in here.
@@ -1890,11 +1863,40 @@ void vsx_unavailable_tm(struct pt_regs *regs)
}
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
-void performance_monitor_exception(struct pt_regs *regs)
+#ifdef CONFIG_PPC64
+DECLARE_INTERRUPT_HANDLER_NMI(performance_monitor_exception_nmi);
+DEFINE_INTERRUPT_HANDLER_NMI(performance_monitor_exception_nmi)
{
__this_cpu_inc(irq_stat.pmu_irqs);
perf_irq(regs);
+
+ return 0;
+}
+#endif
+
+DECLARE_INTERRUPT_HANDLER_ASYNC(performance_monitor_exception_async);
+DEFINE_INTERRUPT_HANDLER_ASYNC(performance_monitor_exception_async)
+{
+ __this_cpu_inc(irq_stat.pmu_irqs);
+
+ perf_irq(regs);
+}
+
+DEFINE_INTERRUPT_HANDLER_RAW(performance_monitor_exception)
+{
+ /*
+ * On 64-bit, if perf interrupts hit in a local_irq_disable
+ * (soft-masked) region, we consider them as NMIs. This is required to
+ * prevent hash faults on user addresses when reading callchains (and
+ * looks better from an irq tracing perspective).
+ */
+ if (IS_ENABLED(CONFIG_PPC64) && unlikely(arch_irq_disabled_regs(regs)))
+ performance_monitor_exception_nmi(regs);
+ else
+ performance_monitor_exception_async(regs);
+
+ return 0;
}
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
@@ -1957,8 +1959,10 @@ static void handle_debug(struct pt_regs *regs, unsigned long debug_status)
mtspr(SPRN_DBCR0, current->thread.debug.dbcr0);
}
-void DebugException(struct pt_regs *regs, unsigned long debug_status)
+DEFINE_INTERRUPT_HANDLER(DebugException)
{
+ unsigned long debug_status = regs->dsisr;
+
current->thread.debug.dbsr = debug_status;
/* Hack alert: On BookE, Branch Taken stops on the branch itself, while
@@ -2024,11 +2028,10 @@ void DebugException(struct pt_regs *regs, unsigned long debug_status)
} else
handle_debug(regs, debug_status);
}
-NOKPROBE_SYMBOL(DebugException);
#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
#ifdef CONFIG_ALTIVEC
-void altivec_assist_exception(struct pt_regs *regs)
+DEFINE_INTERRUPT_HANDLER(altivec_assist_exception)
{
int err;
@@ -2062,9 +2065,10 @@ void altivec_assist_exception(struct pt_regs *regs)
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_FSL_BOOKE
-void CacheLockingException(struct pt_regs *regs, unsigned long address,
- unsigned long error_code)
+DEFINE_INTERRUPT_HANDLER(CacheLockingException)
{
+ unsigned long error_code = regs->dsisr;
+
/* We treat cache locking instructions from the user
* as priv ops, in the future we could try to do
* something smarter
@@ -2076,7 +2080,7 @@ void CacheLockingException(struct pt_regs *regs, unsigned long address,
#endif /* CONFIG_FSL_BOOKE */
#ifdef CONFIG_SPE
-void SPEFloatingPointException(struct pt_regs *regs)
+DEFINE_INTERRUPT_HANDLER(SPEFloatingPointException)
{
extern int do_spe_mathemu(struct pt_regs *regs);
unsigned long spefscr;
@@ -2084,9 +2088,7 @@ void SPEFloatingPointException(struct pt_regs *regs)
int code = FPE_FLTUNK;
int err;
- /* We restore the interrupt state now */
- if (!arch_irq_disabled_regs(regs))
- local_irq_enable();
+ interrupt_cond_local_irq_enable(regs);
flush_spe_to_thread(current);
@@ -2128,14 +2130,12 @@ void SPEFloatingPointException(struct pt_regs *regs)
return;
}
-void SPEFloatingPointRoundException(struct pt_regs *regs)
+DEFINE_INTERRUPT_HANDLER(SPEFloatingPointRoundException)
{
extern int speround_handler(struct pt_regs *regs);
int err;
- /* We restore the interrupt state now */
- if (!arch_irq_disabled_regs(regs))
- local_irq_enable();
+ interrupt_cond_local_irq_enable(regs);
preempt_disable();
if (regs->msr & MSR_SPE)
@@ -2170,13 +2170,12 @@ void SPEFloatingPointRoundException(struct pt_regs *regs)
* in the MSR is 0. This indicates that SRR0/1 are live, and that
* we therefore lost state by taking this exception.
*/
-void unrecoverable_exception(struct pt_regs *regs)
+DEFINE_INTERRUPT_HANDLER(unrecoverable_exception)
{
pr_emerg("Unrecoverable exception %lx at %lx (msr=%lx)\n",
regs->trap, regs->nip, regs->msr);
die("Unrecoverable exception", regs, SIGABRT);
}
-NOKPROBE_SYMBOL(unrecoverable_exception);
#if defined(CONFIG_BOOKE_WDT) || defined(CONFIG_40x)
/*
@@ -2190,7 +2189,7 @@ void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs)
return;
}
-void WatchdogException(struct pt_regs *regs)
+DEFINE_INTERRUPT_HANDLER(WatchdogException) /* XXX NMI? async? */
{
printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n");
WatchdogHandler(regs);
@@ -2201,13 +2200,12 @@ void WatchdogException(struct pt_regs *regs)
* We enter here if we discover during exception entry that we are
* running in supervisor mode with a userspace value in the stack pointer.
*/
-void kernel_bad_stack(struct pt_regs *regs)
+DEFINE_INTERRUPT_HANDLER(kernel_bad_stack)
{
printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n",
regs->gpr[1], regs->nip);
die("Bad kernel stack pointer", regs, SIGABRT);
}
-NOKPROBE_SYMBOL(kernel_bad_stack);
void __init trap_init(void)
{
diff --git a/arch/powerpc/kernel/vdso32/Makefile b/arch/powerpc/kernel/vdso32/Makefile
index 9cb6f524854b..7d9a6fee0e3d 100644
--- a/arch/powerpc/kernel/vdso32/Makefile
+++ b/arch/powerpc/kernel/vdso32/Makefile
@@ -30,7 +30,7 @@ CC32FLAGS += -m32
KBUILD_CFLAGS := $(filter-out -mcmodel=medium -mabi=elfv1 -mabi=elfv2 -mcall-aixdesc,$(KBUILD_CFLAGS))
endif
-targets := $(obj-vdso32) vdso32.so.dbg
+targets := $(obj-vdso32) vdso32.so.dbg vgettimeofday.o
obj-vdso32 := $(addprefix $(obj)/, $(obj-vdso32))
GCOV_PROFILE := n
@@ -46,9 +46,6 @@ obj-y += vdso32_wrapper.o
targets += vdso32.lds
CPPFLAGS_vdso32.lds += -P -C -Upowerpc
-# Force dependency (incbin is bad)
-$(obj)/vdso32_wrapper.o : $(obj)/vdso32.so.dbg
-
# link rule for the .so file, .lds has to be first
$(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32) $(obj)/vgettimeofday.o FORCE
$(call if_changed,vdso32ld_and_check)
diff --git a/arch/powerpc/kernel/vdso32/vdso32_wrapper.S b/arch/powerpc/kernel/vdso32_wrapper.S
index 3f5ef035b0a9..3f5ef035b0a9 100644
--- a/arch/powerpc/kernel/vdso32/vdso32_wrapper.S
+++ b/arch/powerpc/kernel/vdso32_wrapper.S
diff --git a/arch/powerpc/kernel/vdso64/Makefile b/arch/powerpc/kernel/vdso64/Makefile
index bf363ff37152..2813e3f98db6 100644
--- a/arch/powerpc/kernel/vdso64/Makefile
+++ b/arch/powerpc/kernel/vdso64/Makefile
@@ -17,7 +17,7 @@ endif
# Build rules
-targets := $(obj-vdso64) vdso64.so.dbg
+targets := $(obj-vdso64) vdso64.so.dbg vgettimeofday.o
obj-vdso64 := $(addprefix $(obj)/, $(obj-vdso64))
GCOV_PROFILE := n
@@ -29,15 +29,9 @@ ccflags-y := -shared -fno-common -fno-builtin -nostdlib \
-Wl,-soname=linux-vdso64.so.1 -Wl,--hash-style=both
asflags-y := -D__VDSO64__ -s
-obj-y += vdso64_wrapper.o
targets += vdso64.lds
CPPFLAGS_vdso64.lds += -P -C -U$(ARCH)
-$(obj)/vgettimeofday.o: %.o: %.c FORCE
-
-# Force dependency (incbin is bad)
-$(obj)/vdso64_wrapper.o : $(obj)/vdso64.so.dbg
-
# link rule for the .so file, .lds has to be first
$(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64) $(obj)/vgettimeofday.o FORCE
$(call if_changed,vdso64ld_and_check)
diff --git a/arch/powerpc/kernel/vdso64/sigtramp.S b/arch/powerpc/kernel/vdso64/sigtramp.S
index bbf68cd01088..2d4067561293 100644
--- a/arch/powerpc/kernel/vdso64/sigtramp.S
+++ b/arch/powerpc/kernel/vdso64/sigtramp.S
@@ -15,11 +15,20 @@
.text
+/*
+ * __kernel_start_sigtramp_rt64 and __kernel_sigtramp_rt64 together
+ * are one function split in two parts. The kernel jumps to the former
+ * and the signal handler indirectly (by blr) returns to the latter.
+ * __kernel_sigtramp_rt64 needs to point to the return address so
+ * glibc can correctly identify the trampoline stack frame.
+ */
.balign 8
.balign IFETCH_ALIGN_BYTES
-V_FUNCTION_BEGIN(__kernel_sigtramp_rt64)
+V_FUNCTION_BEGIN(__kernel_start_sigtramp_rt64)
.Lsigrt_start:
bctrl /* call the handler */
+V_FUNCTION_END(__kernel_start_sigtramp_rt64)
+V_FUNCTION_BEGIN(__kernel_sigtramp_rt64)
addi r1, r1, __SIGNAL_FRAMESIZE
li r0,__NR_rt_sigreturn
sc
diff --git a/arch/powerpc/kernel/vdso64/vdso64.lds.S b/arch/powerpc/kernel/vdso64/vdso64.lds.S
index 6164d1a1ba11..2f3c359cacd3 100644
--- a/arch/powerpc/kernel/vdso64/vdso64.lds.S
+++ b/arch/powerpc/kernel/vdso64/vdso64.lds.S
@@ -131,4 +131,4 @@ VERSION
/*
* Make the sigreturn code visible to the kernel.
*/
-VDSO_sigtramp_rt64 = __kernel_sigtramp_rt64;
+VDSO_sigtramp_rt64 = __kernel_start_sigtramp_rt64;
diff --git a/arch/powerpc/kernel/vdso64/vdso64_wrapper.S b/arch/powerpc/kernel/vdso64_wrapper.S
index 1d56d81fe3b3..1d56d81fe3b3 100644
--- a/arch/powerpc/kernel/vdso64/vdso64_wrapper.S
+++ b/arch/powerpc/kernel/vdso64_wrapper.S
diff --git a/arch/powerpc/kernel/watchdog.c b/arch/powerpc/kernel/watchdog.c
index af3c15a1d41e..c9a8f4781a10 100644
--- a/arch/powerpc/kernel/watchdog.c
+++ b/arch/powerpc/kernel/watchdog.c
@@ -26,7 +26,9 @@
#include <linux/delay.h>
#include <linux/smp.h>
+#include <asm/interrupt.h>
#include <asm/paca.h>
+#include <asm/nmi.h>
/*
* The powerpc watchdog ensures that each CPU is able to service timers.
@@ -247,16 +249,17 @@ static void watchdog_timer_interrupt(int cpu)
watchdog_smp_panic(cpu, tb);
}
-void soft_nmi_interrupt(struct pt_regs *regs)
+DEFINE_INTERRUPT_HANDLER_NMI(soft_nmi_interrupt)
{
unsigned long flags;
int cpu = raw_smp_processor_id();
u64 tb;
- if (!cpumask_test_cpu(cpu, &wd_cpus_enabled))
- return;
+ /* should only arrive from kernel, with irqs disabled */
+ WARN_ON_ONCE(!arch_irq_disabled_regs(regs));
- nmi_enter();
+ if (!cpumask_test_cpu(cpu, &wd_cpus_enabled))
+ return 0;
__this_cpu_inc(irq_stat.soft_nmi_irqs);
@@ -265,7 +268,7 @@ void soft_nmi_interrupt(struct pt_regs *regs)
wd_smp_lock(&flags);
if (cpumask_test_cpu(cpu, &wd_smp_cpus_stuck)) {
wd_smp_unlock(&flags);
- goto out;
+ return 0;
}
set_cpu_stuck(cpu, tb);
@@ -289,8 +292,7 @@ void soft_nmi_interrupt(struct pt_regs *regs)
if (wd_panic_timeout_tb < 0x7fffffff)
mtspr(SPRN_DEC, wd_panic_timeout_tb);
-out:
- nmi_exit();
+ return 0;
}
static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
diff --git a/arch/powerpc/kexec/elf_64.c b/arch/powerpc/kexec/elf_64.c
index d0e459bb2f05..9842e33533df 100644
--- a/arch/powerpc/kexec/elf_64.c
+++ b/arch/powerpc/kexec/elf_64.c
@@ -102,7 +102,7 @@ static void *elf64_load(struct kimage *image, char *kernel_buf,
pr_debug("Loaded initrd at 0x%lx\n", initrd_load_addr);
}
- fdt_size = fdt_totalsize(initial_boot_params) * 2;
+ fdt_size = kexec_fdt_totalsize_ppc64(image);
fdt = kmalloc(fdt_size, GFP_KERNEL);
if (!fdt) {
pr_err("Not enough memory for the device tree.\n");
diff --git a/arch/powerpc/kexec/file_load_64.c b/arch/powerpc/kexec/file_load_64.c
index c69bcf9b547a..02b9e4d0dc40 100644
--- a/arch/powerpc/kexec/file_load_64.c
+++ b/arch/powerpc/kexec/file_load_64.c
@@ -21,6 +21,7 @@
#include <linux/memblock.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
+#include <asm/setup.h>
#include <asm/drmem.h>
#include <asm/kexec_ranges.h>
#include <asm/crashdump-ppc64.h>
@@ -926,6 +927,40 @@ out:
}
/**
+ * kexec_fdt_totalsize_ppc64 - Return the estimated size needed to setup FDT
+ * for kexec/kdump kernel.
+ * @image: kexec image being loaded.
+ *
+ * Returns the estimated size needed for kexec/kdump kernel FDT.
+ */
+unsigned int kexec_fdt_totalsize_ppc64(struct kimage *image)
+{
+ unsigned int fdt_size;
+ u64 usm_entries;
+
+ /*
+ * The below estimate more than accounts for a typical kexec case where
+ * the additional space is to accommodate things like kexec cmdline,
+ * chosen node with properties for initrd start & end addresses and
+ * a property to indicate kexec boot..
+ */
+ fdt_size = fdt_totalsize(initial_boot_params) + (2 * COMMAND_LINE_SIZE);
+ if (image->type != KEXEC_TYPE_CRASH)
+ return fdt_size;
+
+ /*
+ * For kdump kernel, also account for linux,usable-memory and
+ * linux,drconf-usable-memory properties. Get an approximate on the
+ * number of usable memory entries and use for FDT size estimation.
+ */
+ usm_entries = ((memblock_end_of_DRAM() / drmem_lmb_size()) +
+ (2 * (resource_size(&crashk_res) / drmem_lmb_size())));
+ fdt_size += (unsigned int)(usm_entries * sizeof(u64));
+
+ return fdt_size;
+}
+
+/**
* setup_new_fdt_ppc64 - Update the flattend device-tree of the kernel
* being loaded.
* @image: kexec image being loaded.
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index 549591d9aaa2..e45644657d49 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -54,6 +54,7 @@ config KVM_BOOK3S_32
select KVM
select KVM_BOOK3S_32_HANDLER
select KVM_BOOK3S_PR_POSSIBLE
+ select PPC_FPU
help
Support running unmodified book3s_32 guest kernels
in virtual machines on book3s_32 host processors.
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 38ea396a23d6..bb6773594cf8 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -27,6 +27,7 @@
#include <asm/cputable.h>
#include <asm/pte-walk.h>
+#include "book3s.h"
#include "trace_hv.h"
//#define DEBUG_RESIZE_HPT 1
@@ -590,7 +591,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu,
} else {
/* Call KVM generic code to do the slow-path check */
pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL,
- writing, &write_ok);
+ writing, &write_ok, NULL);
if (is_error_noslot_pfn(pfn))
return -EFAULT;
page = NULL;
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
index bb35490400e9..e603de7ade52 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -822,7 +822,7 @@ int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
/* Call KVM generic code to do the slow-path check */
pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL,
- writing, upgrade_p);
+ writing, upgrade_p, NULL);
if (is_error_noslot_pfn(pfn))
return -EFAULT;
page = NULL;
diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c
index b08cc15f31c7..fdb57be71aa6 100644
--- a/arch/powerpc/kvm/book3s_emulate.c
+++ b/arch/powerpc/kvm/book3s_emulate.c
@@ -61,10 +61,6 @@
#define SPRN_GQR6 918
#define SPRN_GQR7 919
-/* Book3S_32 defines mfsrin(v) - but that messes up our abstract
- * function pointers, so let's just disable the define. */
-#undef mfsrin
-
enum priv_level {
PRIV_PROBLEM = 0,
PRIV_SUPER = 1,
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 6f612d240392..13bad6bf4c95 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -53,6 +53,7 @@
#include <asm/cputable.h>
#include <asm/cacheflush.h>
#include <linux/uaccess.h>
+#include <asm/interrupt.h>
#include <asm/io.h>
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
@@ -134,7 +135,7 @@ static inline bool nesting_enabled(struct kvm *kvm)
}
/* If set, the threads on each CPU core have to be in the same MMU mode */
-static bool no_mixing_hpt_and_radix;
+static bool no_mixing_hpt_and_radix __read_mostly;
static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
@@ -782,8 +783,24 @@ static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags,
return H_UNSUPPORTED_FLAG_START;
if (value2 & DABRX_HYP)
return H_P4;
- vcpu->arch.dawr = value1;
- vcpu->arch.dawrx = value2;
+ vcpu->arch.dawr0 = value1;
+ vcpu->arch.dawrx0 = value2;
+ return H_SUCCESS;
+ case H_SET_MODE_RESOURCE_SET_DAWR1:
+ if (!kvmppc_power8_compatible(vcpu))
+ return H_P2;
+ if (!ppc_breakpoint_available())
+ return H_P2;
+ if (!cpu_has_feature(CPU_FTR_DAWR1))
+ return H_P2;
+ if (!vcpu->kvm->arch.dawr1_enabled)
+ return H_FUNCTION;
+ if (mflags)
+ return H_UNSUPPORTED_FLAG_START;
+ if (value2 & DABRX_HYP)
+ return H_P4;
+ vcpu->arch.dawr1 = value1;
+ vcpu->arch.dawrx1 = value2;
return H_SUCCESS;
case H_SET_MODE_RESOURCE_ADDR_TRANS_MODE:
/* KVM does not support mflags=2 (AIL=2) */
@@ -1759,10 +1776,16 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
*val = get_reg_val(id, vcpu->arch.vcore->vtb);
break;
case KVM_REG_PPC_DAWR:
- *val = get_reg_val(id, vcpu->arch.dawr);
+ *val = get_reg_val(id, vcpu->arch.dawr0);
break;
case KVM_REG_PPC_DAWRX:
- *val = get_reg_val(id, vcpu->arch.dawrx);
+ *val = get_reg_val(id, vcpu->arch.dawrx0);
+ break;
+ case KVM_REG_PPC_DAWR1:
+ *val = get_reg_val(id, vcpu->arch.dawr1);
+ break;
+ case KVM_REG_PPC_DAWRX1:
+ *val = get_reg_val(id, vcpu->arch.dawrx1);
break;
case KVM_REG_PPC_CIABR:
*val = get_reg_val(id, vcpu->arch.ciabr);
@@ -1991,10 +2014,16 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
vcpu->arch.vcore->vtb = set_reg_val(id, *val);
break;
case KVM_REG_PPC_DAWR:
- vcpu->arch.dawr = set_reg_val(id, *val);
+ vcpu->arch.dawr0 = set_reg_val(id, *val);
break;
case KVM_REG_PPC_DAWRX:
- vcpu->arch.dawrx = set_reg_val(id, *val) & ~DAWRX_HYP;
+ vcpu->arch.dawrx0 = set_reg_val(id, *val) & ~DAWRX_HYP;
+ break;
+ case KVM_REG_PPC_DAWR1:
+ vcpu->arch.dawr1 = set_reg_val(id, *val);
+ break;
+ case KVM_REG_PPC_DAWRX1:
+ vcpu->arch.dawrx1 = set_reg_val(id, *val) & ~DAWRX_HYP;
break;
case KVM_REG_PPC_CIABR:
vcpu->arch.ciabr = set_reg_val(id, *val);
@@ -2862,11 +2891,6 @@ static bool can_dynamic_split(struct kvmppc_vcore *vc, struct core_info *cip)
if (one_vm_per_core && vc->kvm != cip->vc[0]->kvm)
return false;
- /* Some POWER9 chips require all threads to be in the same MMU mode */
- if (no_mixing_hpt_and_radix &&
- kvm_is_radix(vc->kvm) != kvm_is_radix(cip->vc[0]->kvm))
- return false;
-
if (n_threads < cip->max_subcore_threads)
n_threads = cip->max_subcore_threads;
if (!subcore_config_ok(cip->n_subcores + 1, n_threads))
@@ -2905,6 +2929,9 @@ static void prepare_threads(struct kvmppc_vcore *vc)
for_each_runnable_thread(i, vcpu, vc) {
if (signal_pending(vcpu->arch.run_task))
vcpu->arch.ret = -EINTR;
+ else if (no_mixing_hpt_and_radix &&
+ kvm_is_radix(vc->kvm) != radix_enabled())
+ vcpu->arch.ret = -EINVAL;
else if (vcpu->arch.vpa.update_pending ||
vcpu->arch.slb_shadow.update_pending ||
vcpu->arch.dtl.update_pending)
@@ -3110,7 +3137,6 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
int controlled_threads;
int trap;
bool is_power8;
- bool hpt_on_radix;
/*
* Remove from the list any threads that have a signal pending
@@ -3143,11 +3169,8 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
* this is a HPT guest on a radix host machine where the
* CPU threads may not be in different MMU modes.
*/
- hpt_on_radix = no_mixing_hpt_and_radix && radix_enabled() &&
- !kvm_is_radix(vc->kvm);
- if (((controlled_threads > 1) &&
- ((vc->num_threads > threads_per_subcore) || !on_primary_thread())) ||
- (hpt_on_radix && vc->kvm->arch.threads_indep)) {
+ if ((controlled_threads > 1) &&
+ ((vc->num_threads > threads_per_subcore) || !on_primary_thread())) {
for_each_runnable_thread(i, vcpu, vc) {
vcpu->arch.ret = -EBUSY;
kvmppc_remove_runnable(vc, vcpu);
@@ -3215,7 +3238,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
is_power8 = cpu_has_feature(CPU_FTR_ARCH_207S)
&& !cpu_has_feature(CPU_FTR_ARCH_300);
- if (split > 1 || hpt_on_radix) {
+ if (split > 1) {
sip = &split_info;
memset(&split_info, 0, sizeof(split_info));
for (sub = 0; sub < core_info.n_subcores; ++sub)
@@ -3237,13 +3260,6 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
split_info.subcore_size = subcore_size;
} else {
split_info.subcore_size = 1;
- if (hpt_on_radix) {
- /* Use the split_info for LPCR/LPIDR changes */
- split_info.lpcr_req = vc->lpcr;
- split_info.lpidr_req = vc->kvm->arch.lpid;
- split_info.host_lpcr = vc->kvm->arch.host_lpcr;
- split_info.do_set = 1;
- }
}
/* order writes to split_info before kvm_split_mode pointer */
@@ -3253,7 +3269,6 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
for (thr = 0; thr < controlled_threads; ++thr) {
struct paca_struct *paca = paca_ptrs[pcpu + thr];
- paca->kvm_hstate.tid = thr;
paca->kvm_hstate.napping = 0;
paca->kvm_hstate.kvm_split_mode = sip;
}
@@ -3327,10 +3342,8 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
* When doing micro-threading, poke the inactive threads as well.
* This gets them to the nap instruction after kvm_do_nap,
* which reduces the time taken to unsplit later.
- * For POWER9 HPT guest on radix host, we need all the secondary
- * threads woken up so they can do the LPCR/LPIDR change.
*/
- if (cmd_bit || hpt_on_radix) {
+ if (cmd_bit) {
split_info.do_nap = 1; /* ask secondaries to nap when done */
for (thr = 1; thr < threads_per_subcore; ++thr)
if (!(active & (1 << thr)))
@@ -3391,24 +3404,14 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
cpu_relax();
++loops;
}
- } else if (hpt_on_radix) {
- /* Wait for all threads to have seen final sync */
- for (thr = 1; thr < controlled_threads; ++thr) {
- struct paca_struct *paca = paca_ptrs[pcpu + thr];
-
- while (paca->kvm_hstate.kvm_split_mode) {
- HMT_low();
- barrier();
- }
- HMT_medium();
- }
+ split_info.do_nap = 0;
}
- split_info.do_nap = 0;
kvmppc_set_host_core(pcpu);
+ guest_exit_irqoff();
+
local_irq_enable();
- guest_exit();
/* Let secondaries go back to the offline loop */
for (i = 0; i < controlled_threads; ++i) {
@@ -3449,10 +3452,17 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
int trap;
unsigned long host_hfscr = mfspr(SPRN_HFSCR);
unsigned long host_ciabr = mfspr(SPRN_CIABR);
- unsigned long host_dawr = mfspr(SPRN_DAWR0);
- unsigned long host_dawrx = mfspr(SPRN_DAWRX0);
+ unsigned long host_dawr0 = mfspr(SPRN_DAWR0);
+ unsigned long host_dawrx0 = mfspr(SPRN_DAWRX0);
unsigned long host_psscr = mfspr(SPRN_PSSCR);
unsigned long host_pidr = mfspr(SPRN_PID);
+ unsigned long host_dawr1 = 0;
+ unsigned long host_dawrx1 = 0;
+
+ if (cpu_has_feature(CPU_FTR_DAWR1)) {
+ host_dawr1 = mfspr(SPRN_DAWR1);
+ host_dawrx1 = mfspr(SPRN_DAWRX1);
+ }
/*
* P8 and P9 suppress the HDEC exception when LPCR[HDICE] = 0,
@@ -3489,8 +3499,12 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
mtspr(SPRN_SPURR, vcpu->arch.spurr);
if (dawr_enabled()) {
- mtspr(SPRN_DAWR0, vcpu->arch.dawr);
- mtspr(SPRN_DAWRX0, vcpu->arch.dawrx);
+ mtspr(SPRN_DAWR0, vcpu->arch.dawr0);
+ mtspr(SPRN_DAWRX0, vcpu->arch.dawrx0);
+ if (cpu_has_feature(CPU_FTR_DAWR1)) {
+ mtspr(SPRN_DAWR1, vcpu->arch.dawr1);
+ mtspr(SPRN_DAWRX1, vcpu->arch.dawrx1);
+ }
}
mtspr(SPRN_CIABR, vcpu->arch.ciabr);
mtspr(SPRN_IC, vcpu->arch.ic);
@@ -3542,8 +3556,12 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
(local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG));
mtspr(SPRN_HFSCR, host_hfscr);
mtspr(SPRN_CIABR, host_ciabr);
- mtspr(SPRN_DAWR0, host_dawr);
- mtspr(SPRN_DAWRX0, host_dawrx);
+ mtspr(SPRN_DAWR0, host_dawr0);
+ mtspr(SPRN_DAWRX0, host_dawrx0);
+ if (cpu_has_feature(CPU_FTR_DAWR1)) {
+ mtspr(SPRN_DAWR1, host_dawr1);
+ mtspr(SPRN_DAWRX1, host_dawrx1);
+ }
mtspr(SPRN_PID, host_pidr);
/*
@@ -3595,6 +3613,7 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
unsigned long host_tidr = mfspr(SPRN_TIDR);
unsigned long host_iamr = mfspr(SPRN_IAMR);
unsigned long host_amr = mfspr(SPRN_AMR);
+ unsigned long host_fscr = mfspr(SPRN_FSCR);
s64 dec;
u64 tb;
int trap, save_pmu;
@@ -3735,6 +3754,9 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
if (host_amr != vcpu->arch.amr)
mtspr(SPRN_AMR, host_amr);
+ if (host_fscr != vcpu->arch.fscr)
+ mtspr(SPRN_FSCR, host_fscr);
+
msr_check_and_set(MSR_FP | MSR_VEC | MSR_VSX);
store_fp_state(&vcpu->arch.fp);
#ifdef CONFIG_ALTIVEC
@@ -4173,7 +4195,6 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
kvmppc_clear_host_core(pcpu);
- local_paca->kvm_hstate.tid = 0;
local_paca->kvm_hstate.napping = 0;
local_paca->kvm_hstate.kvm_split_mode = NULL;
kvmppc_start_thread(vcpu, vc);
@@ -4217,8 +4238,9 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
kvmppc_set_host_core(pcpu);
+ guest_exit_irqoff();
+
local_irq_enable();
- guest_exit();
cpumask_clear_cpu(pcpu, &kvm->arch.cpu_in_guest);
@@ -4358,15 +4380,11 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)
do {
/*
- * The early POWER9 chips that can't mix radix and HPT threads
- * on the same core also need the workaround for the problem
- * where the TLB would prefetch entries in the guest exit path
- * for radix guests using the guest PIDR value and LPID 0.
- * The workaround is in the old path (kvmppc_run_vcpu())
- * but not the new path (kvmhv_run_single_vcpu()).
+ * The TLB prefetch bug fixup is only in the kvmppc_run_vcpu
+ * path, which also handles hash and dependent threads mode.
*/
if (kvm->arch.threads_indep && kvm_is_radix(kvm) &&
- !no_mixing_hpt_and_radix)
+ !cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG))
r = kvmhv_run_single_vcpu(vcpu, ~(u64)0,
vcpu->arch.vcore->lpcr);
else
@@ -5599,6 +5617,26 @@ out:
return ret;
}
+static int kvmhv_enable_dawr1(struct kvm *kvm)
+{
+ if (!cpu_has_feature(CPU_FTR_DAWR1))
+ return -ENODEV;
+
+ /* kvm == NULL means the caller is testing if the capability exists */
+ if (kvm)
+ kvm->arch.dawr1_enabled = true;
+ return 0;
+}
+
+static bool kvmppc_hash_v3_possible(void)
+{
+ if (radix_enabled() && no_mixing_hpt_and_radix)
+ return false;
+
+ return cpu_has_feature(CPU_FTR_ARCH_300) &&
+ cpu_has_feature(CPU_FTR_HVMODE);
+}
+
static struct kvmppc_ops kvm_ops_hv = {
.get_sregs = kvm_arch_vcpu_ioctl_get_sregs_hv,
.set_sregs = kvm_arch_vcpu_ioctl_set_sregs_hv,
@@ -5642,6 +5680,8 @@ static struct kvmppc_ops kvm_ops_hv = {
.store_to_eaddr = kvmhv_store_to_eaddr,
.enable_svm = kvmhv_enable_svm,
.svm_off = kvmhv_svm_off,
+ .enable_dawr1 = kvmhv_enable_dawr1,
+ .hash_v3_possible = kvmppc_hash_v3_possible,
};
static int kvm_init_subcore_bitmap(void)
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index 8053efdf7ea7..158d309b42a3 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -17,6 +17,7 @@
#include <asm/asm-prototypes.h>
#include <asm/cputable.h>
+#include <asm/interrupt.h>
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
#include <asm/archrandom.h>
@@ -277,8 +278,7 @@ void kvmhv_commence_exit(int trap)
struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore;
int ptid = local_paca->kvm_hstate.ptid;
struct kvm_split_mode *sip = local_paca->kvm_hstate.kvm_split_mode;
- int me, ee, i, t;
- int cpu0;
+ int me, ee, i;
/* Set our bit in the threads-exiting-guest map in the 0xff00
bits of vcore->entry_exit_map */
@@ -320,22 +320,6 @@ void kvmhv_commence_exit(int trap)
if ((ee >> 8) == 0)
kvmhv_interrupt_vcore(vc, ee);
}
-
- /*
- * On POWER9 when running a HPT guest on a radix host (sip != NULL),
- * we have to interrupt inactive CPU threads to get them to
- * restore the host LPCR value.
- */
- if (sip->lpcr_req) {
- if (cmpxchg(&sip->do_restore, 0, 1) == 0) {
- vc = local_paca->kvm_hstate.kvm_vcore;
- cpu0 = vc->pcpu + ptid - local_paca->kvm_hstate.tid;
- for (t = 1; t < threads_per_core; ++t) {
- if (sip->napped[t])
- kvmhv_rm_send_ipi(cpu0 + t);
- }
- }
- }
}
struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv;
@@ -667,95 +651,6 @@ void kvmppc_bad_interrupt(struct pt_regs *regs)
panic("Bad KVM trap");
}
-/*
- * Functions used to switch LPCR HR and UPRT bits on all threads
- * when entering and exiting HPT guests on a radix host.
- */
-
-#define PHASE_REALMODE 1 /* in real mode */
-#define PHASE_SET_LPCR 2 /* have set LPCR */
-#define PHASE_OUT_OF_GUEST 4 /* have finished executing in guest */
-#define PHASE_RESET_LPCR 8 /* have reset LPCR to host value */
-
-#define ALL(p) (((p) << 24) | ((p) << 16) | ((p) << 8) | (p))
-
-static void wait_for_sync(struct kvm_split_mode *sip, int phase)
-{
- int thr = local_paca->kvm_hstate.tid;
-
- sip->lpcr_sync.phase[thr] |= phase;
- phase = ALL(phase);
- while ((sip->lpcr_sync.allphases & phase) != phase) {
- HMT_low();
- barrier();
- }
- HMT_medium();
-}
-
-void kvmhv_p9_set_lpcr(struct kvm_split_mode *sip)
-{
- int num_sets;
- unsigned long rb, set;
-
- /* wait for every other thread to get to real mode */
- wait_for_sync(sip, PHASE_REALMODE);
-
- /* Set LPCR and LPIDR */
- mtspr(SPRN_LPCR, sip->lpcr_req);
- mtspr(SPRN_LPID, sip->lpidr_req);
- isync();
-
- /*
- * P10 will flush all the congruence class with a single tlbiel
- */
- if (cpu_has_feature(CPU_FTR_ARCH_31))
- num_sets = 1;
- else
- num_sets = POWER9_TLB_SETS_RADIX;
-
- /* Invalidate the TLB on thread 0 */
- if (local_paca->kvm_hstate.tid == 0) {
- sip->do_set = 0;
- asm volatile("ptesync" : : : "memory");
- for (set = 0; set < num_sets; ++set) {
- rb = TLBIEL_INVAL_SET_LPID +
- (set << TLBIEL_INVAL_SET_SHIFT);
- asm volatile(PPC_TLBIEL(%0, %1, 0, 0, 0) : :
- "r" (rb), "r" (0));
- }
- asm volatile("ptesync" : : : "memory");
- }
-
- /* indicate that we have done so and wait for others */
- wait_for_sync(sip, PHASE_SET_LPCR);
- /* order read of sip->lpcr_sync.allphases vs. sip->do_set */
- smp_rmb();
-}
-
-/*
- * Called when a thread that has been in the guest needs
- * to reload the host LPCR value - but only on POWER9 when
- * running a HPT guest on a radix host.
- */
-void kvmhv_p9_restore_lpcr(struct kvm_split_mode *sip)
-{
- /* we're out of the guest... */
- wait_for_sync(sip, PHASE_OUT_OF_GUEST);
-
- mtspr(SPRN_LPID, 0);
- mtspr(SPRN_LPCR, sip->host_lpcr);
- isync();
-
- if (local_paca->kvm_hstate.tid == 0) {
- sip->do_restore = 0;
- smp_wmb(); /* order store of do_restore vs. phase */
- }
-
- wait_for_sync(sip, PHASE_RESET_LPCR);
- smp_mb();
- local_paca->kvm_hstate.kvm_split_mode = NULL;
-}
-
static void kvmppc_end_cede(struct kvm_vcpu *vcpu)
{
vcpu->arch.ceded = 0;
diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c
index 33b58549a9aa..0cd0e7aad588 100644
--- a/arch/powerpc/kvm/book3s_hv_nested.c
+++ b/arch/powerpc/kvm/book3s_hv_nested.c
@@ -33,8 +33,8 @@ void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr)
hr->dpdes = vc->dpdes;
hr->hfscr = vcpu->arch.hfscr;
hr->tb_offset = vc->tb_offset;
- hr->dawr0 = vcpu->arch.dawr;
- hr->dawrx0 = vcpu->arch.dawrx;
+ hr->dawr0 = vcpu->arch.dawr0;
+ hr->dawrx0 = vcpu->arch.dawrx0;
hr->ciabr = vcpu->arch.ciabr;
hr->purr = vcpu->arch.purr;
hr->spurr = vcpu->arch.spurr;
@@ -49,6 +49,8 @@ void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr)
hr->pidr = vcpu->arch.pid;
hr->cfar = vcpu->arch.cfar;
hr->ppr = vcpu->arch.ppr;
+ hr->dawr1 = vcpu->arch.dawr1;
+ hr->dawrx1 = vcpu->arch.dawrx1;
}
static void byteswap_pt_regs(struct pt_regs *regs)
@@ -91,6 +93,8 @@ static void byteswap_hv_regs(struct hv_guest_state *hr)
hr->pidr = swab64(hr->pidr);
hr->cfar = swab64(hr->cfar);
hr->ppr = swab64(hr->ppr);
+ hr->dawr1 = swab64(hr->dawr1);
+ hr->dawrx1 = swab64(hr->dawrx1);
}
static void save_hv_return_state(struct kvm_vcpu *vcpu, int trap,
@@ -138,6 +142,7 @@ static void sanitise_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr)
/* Don't let data address watchpoint match in hypervisor state */
hr->dawrx0 &= ~DAWRX_HYP;
+ hr->dawrx1 &= ~DAWRX_HYP;
/* Don't let completed instruction address breakpt match in HV state */
if ((hr->ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER)
@@ -151,8 +156,8 @@ static void restore_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr)
vc->pcr = hr->pcr | PCR_MASK;
vc->dpdes = hr->dpdes;
vcpu->arch.hfscr = hr->hfscr;
- vcpu->arch.dawr = hr->dawr0;
- vcpu->arch.dawrx = hr->dawrx0;
+ vcpu->arch.dawr0 = hr->dawr0;
+ vcpu->arch.dawrx0 = hr->dawrx0;
vcpu->arch.ciabr = hr->ciabr;
vcpu->arch.purr = hr->purr;
vcpu->arch.spurr = hr->spurr;
@@ -167,6 +172,8 @@ static void restore_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr)
vcpu->arch.pid = hr->pidr;
vcpu->arch.cfar = hr->cfar;
vcpu->arch.ppr = hr->ppr;
+ vcpu->arch.dawr1 = hr->dawr1;
+ vcpu->arch.dawrx1 = hr->dawrx1;
}
void kvmhv_restore_hv_return_state(struct kvm_vcpu *vcpu,
@@ -215,12 +222,51 @@ static void kvmhv_nested_mmio_needed(struct kvm_vcpu *vcpu, u64 regs_ptr)
}
}
+static int kvmhv_read_guest_state_and_regs(struct kvm_vcpu *vcpu,
+ struct hv_guest_state *l2_hv,
+ struct pt_regs *l2_regs,
+ u64 hv_ptr, u64 regs_ptr)
+{
+ int size;
+
+ if (kvm_vcpu_read_guest(vcpu, hv_ptr, &l2_hv->version,
+ sizeof(l2_hv->version)))
+ return -1;
+
+ if (kvmppc_need_byteswap(vcpu))
+ l2_hv->version = swab64(l2_hv->version);
+
+ size = hv_guest_state_size(l2_hv->version);
+ if (size < 0)
+ return -1;
+
+ return kvm_vcpu_read_guest(vcpu, hv_ptr, l2_hv, size) ||
+ kvm_vcpu_read_guest(vcpu, regs_ptr, l2_regs,
+ sizeof(struct pt_regs));
+}
+
+static int kvmhv_write_guest_state_and_regs(struct kvm_vcpu *vcpu,
+ struct hv_guest_state *l2_hv,
+ struct pt_regs *l2_regs,
+ u64 hv_ptr, u64 regs_ptr)
+{
+ int size;
+
+ size = hv_guest_state_size(l2_hv->version);
+ if (size < 0)
+ return -1;
+
+ return kvm_vcpu_write_guest(vcpu, hv_ptr, l2_hv, size) ||
+ kvm_vcpu_write_guest(vcpu, regs_ptr, l2_regs,
+ sizeof(struct pt_regs));
+}
+
long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
{
long int err, r;
struct kvm_nested_guest *l2;
struct pt_regs l2_regs, saved_l1_regs;
- struct hv_guest_state l2_hv, saved_l1_hv;
+ struct hv_guest_state l2_hv = {0}, saved_l1_hv;
struct kvmppc_vcore *vc = vcpu->arch.vcore;
u64 hv_ptr, regs_ptr;
u64 hdec_exp;
@@ -235,17 +281,15 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
hv_ptr = kvmppc_get_gpr(vcpu, 4);
regs_ptr = kvmppc_get_gpr(vcpu, 5);
vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
- err = kvm_vcpu_read_guest(vcpu, hv_ptr, &l2_hv,
- sizeof(struct hv_guest_state)) ||
- kvm_vcpu_read_guest(vcpu, regs_ptr, &l2_regs,
- sizeof(struct pt_regs));
+ err = kvmhv_read_guest_state_and_regs(vcpu, &l2_hv, &l2_regs,
+ hv_ptr, regs_ptr);
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
if (err)
return H_PARAMETER;
if (kvmppc_need_byteswap(vcpu))
byteswap_hv_regs(&l2_hv);
- if (l2_hv.version != HV_GUEST_STATE_VERSION)
+ if (l2_hv.version > HV_GUEST_STATE_VERSION)
return H_P2;
if (kvmppc_need_byteswap(vcpu))
@@ -325,10 +369,8 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
byteswap_pt_regs(&l2_regs);
}
vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
- err = kvm_vcpu_write_guest(vcpu, hv_ptr, &l2_hv,
- sizeof(struct hv_guest_state)) ||
- kvm_vcpu_write_guest(vcpu, regs_ptr, &l2_regs,
- sizeof(struct pt_regs));
+ err = kvmhv_write_guest_state_and_regs(vcpu, &l2_hv, &l2_regs,
+ hv_ptr, regs_ptr);
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
if (err)
return H_AUTHORITY;
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index cd9995ee8441..5e634db4809b 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -52,11 +52,13 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
#define STACK_SLOT_PID (SFS-32)
#define STACK_SLOT_IAMR (SFS-40)
#define STACK_SLOT_CIABR (SFS-48)
-#define STACK_SLOT_DAWR (SFS-56)
-#define STACK_SLOT_DAWRX (SFS-64)
+#define STACK_SLOT_DAWR0 (SFS-56)
+#define STACK_SLOT_DAWRX0 (SFS-64)
#define STACK_SLOT_HFSCR (SFS-72)
#define STACK_SLOT_AMR (SFS-80)
#define STACK_SLOT_UAMOR (SFS-88)
+#define STACK_SLOT_DAWR1 (SFS-96)
+#define STACK_SLOT_DAWRX1 (SFS-104)
/* the following is used by the P9 short path */
#define STACK_SLOT_NVGPRS (SFS-152) /* 18 gprs */
@@ -85,19 +87,6 @@ _GLOBAL_TOC(kvmppc_hv_entry_trampoline)
RFI_TO_KERNEL
kvmppc_call_hv_entry:
-BEGIN_FTR_SECTION
- /* On P9, do LPCR setting, if necessary */
- ld r3, HSTATE_SPLIT_MODE(r13)
- cmpdi r3, 0
- beq 46f
- lwz r4, KVM_SPLIT_DO_SET(r3)
- cmpwi r4, 0
- beq 46f
- bl kvmhv_p9_set_lpcr
- nop
-46:
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
-
ld r4, HSTATE_KVM_VCPU(r13)
bl kvmppc_hv_entry
@@ -361,11 +350,11 @@ kvm_secondary_got_guest:
LOAD_REG_ADDR(r6, decrementer_max)
ld r6, 0(r6)
mtspr SPRN_HDEC, r6
+BEGIN_FTR_SECTION
/* and set per-LPAR registers, if doing dynamic micro-threading */
ld r6, HSTATE_SPLIT_MODE(r13)
cmpdi r6, 0
beq 63f
-BEGIN_FTR_SECTION
ld r0, KVM_SPLIT_RPR(r6)
mtspr SPRN_RPR, r0
ld r0, KVM_SPLIT_PMMAR(r6)
@@ -373,16 +362,7 @@ BEGIN_FTR_SECTION
ld r0, KVM_SPLIT_LDBAR(r6)
mtspr SPRN_LDBAR, r0
isync
-FTR_SECTION_ELSE
- /* On P9 we use the split_info for coordinating LPCR changes */
- lwz r4, KVM_SPLIT_DO_SET(r6)
- cmpwi r4, 0
- beq 1f
- mr r3, r6
- bl kvmhv_p9_set_lpcr
- nop
-1:
-ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
+END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
63:
/* Order load of vcpu after load of vcore */
lwsync
@@ -452,19 +432,15 @@ kvm_no_guest:
mtcr r5
blr
-53: HMT_LOW
+53:
+BEGIN_FTR_SECTION
+ HMT_LOW
ld r5, HSTATE_KVM_VCORE(r13)
cmpdi r5, 0
bne 60f
ld r3, HSTATE_SPLIT_MODE(r13)
cmpdi r3, 0
beq kvm_no_guest
- lwz r0, KVM_SPLIT_DO_SET(r3)
- cmpwi r0, 0
- bne kvmhv_do_set
- lwz r0, KVM_SPLIT_DO_RESTORE(r3)
- cmpwi r0, 0
- bne kvmhv_do_restore
lbz r0, KVM_SPLIT_DO_NAP(r3)
cmpwi r0, 0
beq kvm_no_guest
@@ -472,24 +448,19 @@ kvm_no_guest:
b kvm_unsplit_nap
60: HMT_MEDIUM
b kvm_secondary_got_guest
+FTR_SECTION_ELSE
+ HMT_LOW
+ ld r5, HSTATE_KVM_VCORE(r13)
+ cmpdi r5, 0
+ beq kvm_no_guest
+ HMT_MEDIUM
+ b kvm_secondary_got_guest
+ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
54: li r0, KVM_HWTHREAD_IN_KVM
stb r0, HSTATE_HWTHREAD_STATE(r13)
b kvm_no_guest
-kvmhv_do_set:
- /* Set LPCR, LPIDR etc. on P9 */
- HMT_MEDIUM
- bl kvmhv_p9_set_lpcr
- nop
- b kvm_no_guest
-
-kvmhv_do_restore:
- HMT_MEDIUM
- bl kvmhv_p9_restore_lpcr
- nop
- b kvm_no_guest
-
/*
* Here the primary thread is trying to return the core to
* whole-core mode, so we need to nap.
@@ -527,7 +498,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
/* Set kvm_split_mode.napped[tid] = 1 */
ld r3, HSTATE_SPLIT_MODE(r13)
li r0, 1
- lbz r4, HSTATE_TID(r13)
+ lhz r4, PACAPACAINDEX(r13)
+ clrldi r4, r4, 61 /* micro-threading => P8 => 8 threads/core */
addi r4, r4, KVM_SPLIT_NAPPED
stbx r0, r3, r4
/* Check the do_nap flag again after setting napped[] */
@@ -711,10 +683,16 @@ BEGIN_FTR_SECTION
mfspr r7, SPRN_DAWRX0
mfspr r8, SPRN_IAMR
std r5, STACK_SLOT_CIABR(r1)
- std r6, STACK_SLOT_DAWR(r1)
- std r7, STACK_SLOT_DAWRX(r1)
+ std r6, STACK_SLOT_DAWR0(r1)
+ std r7, STACK_SLOT_DAWRX0(r1)
std r8, STACK_SLOT_IAMR(r1)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
+BEGIN_FTR_SECTION
+ mfspr r6, SPRN_DAWR1
+ mfspr r7, SPRN_DAWRX1
+ std r6, STACK_SLOT_DAWR1(r1)
+ std r7, STACK_SLOT_DAWRX1(r1)
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S | CPU_FTR_DAWR1)
mfspr r5, SPRN_AMR
std r5, STACK_SLOT_AMR(r1)
@@ -801,10 +779,16 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
lbz r5, 0(r5)
cmpdi r5, 0
beq 1f
- ld r5, VCPU_DAWR(r4)
- ld r6, VCPU_DAWRX(r4)
+ ld r5, VCPU_DAWR0(r4)
+ ld r6, VCPU_DAWRX0(r4)
mtspr SPRN_DAWR0, r5
mtspr SPRN_DAWRX0, r6
+BEGIN_FTR_SECTION
+ ld r5, VCPU_DAWR1(r4)
+ ld r6, VCPU_DAWRX1(r4)
+ mtspr SPRN_DAWR1, r5
+ mtspr SPRN_DAWRX1, r6
+END_FTR_SECTION_IFSET(CPU_FTR_DAWR1)
1:
ld r7, VCPU_CIABR(r4)
ld r8, VCPU_TAR(r4)
@@ -918,15 +902,19 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
cmpdi r3, 512 /* 1 microsecond */
blt hdec_soon
- /* For hash guest, clear out and reload the SLB */
ld r6, VCPU_KVM(r4)
lbz r0, KVM_RADIX(r6)
cmpwi r0, 0
bne 9f
+
+ /* For hash guest, clear out and reload the SLB */
+BEGIN_MMU_FTR_SECTION
+ /* Radix host won't have populated the SLB, so no need to clear */
li r6, 0
slbmte r6, r6
- slbia
+ PPC_SLBIA(6)
ptesync
+END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
/* Load up guest SLB entries (N.B. slb_max will be 0 for radix) */
lwz r5,VCPU_SLB_MAX(r4)
@@ -1187,6 +1175,20 @@ EXPORT_SYMBOL_GPL(__kvmhv_vcpu_entry_p9)
mr r4, r3
b fast_guest_entry_c
guest_exit_short_path:
+ /*
+ * Malicious or buggy radix guests may have inserted SLB entries
+ * (only 0..3 because radix always runs with UPRT=1), so these must
+ * be cleared here to avoid side-channels. slbmte is used rather
+ * than slbia, as it won't clear cached translations.
+ */
+ li r0,0
+ slbmte r0,r0
+ li r4,1
+ slbmte r0,r4
+ li r4,2
+ slbmte r0,r4
+ li r4,3
+ slbmte r0,r4
li r0, KVM_GUEST_MODE_NONE
stb r0, HSTATE_IN_GUEST(r13)
@@ -1499,7 +1501,7 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
lbz r0, KVM_RADIX(r5)
li r5, 0
cmpwi r0, 0
- bne 3f /* for radix, save 0 entries */
+ bne 0f /* for radix, save 0 entries */
lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
mtctr r0
li r6,0
@@ -1518,13 +1520,13 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
/* Finally clear out the SLB */
li r0,0
slbmte r0,r0
- slbia
+ PPC_SLBIA(6)
ptesync
-3: stw r5,VCPU_SLB_MAX(r9)
+ stw r5,VCPU_SLB_MAX(r9)
/* load host SLB entries */
BEGIN_MMU_FTR_SECTION
- b 0f
+ b guest_bypass
END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
ld r8,PACA_SLBSHADOWPTR(r13)
@@ -1538,7 +1540,21 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
slbmte r6,r5
1: addi r8,r8,16
.endr
-0:
+ b guest_bypass
+
+0: /*
+ * Sanitise radix guest SLB, see guest_exit_short_path comment.
+ * We clear vcpu->arch.slb_max to match earlier behaviour.
+ */
+ li r0,0
+ stw r0,VCPU_SLB_MAX(r9)
+ slbmte r0,r0
+ li r4,1
+ slbmte r0,r4
+ li r4,2
+ slbmte r0,r4
+ li r4,3
+ slbmte r0,r4
guest_bypass:
stw r12, STACK_SLOT_TRAP(r1)
@@ -1759,8 +1775,8 @@ END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
/* Restore host values of some registers */
BEGIN_FTR_SECTION
ld r5, STACK_SLOT_CIABR(r1)
- ld r6, STACK_SLOT_DAWR(r1)
- ld r7, STACK_SLOT_DAWRX(r1)
+ ld r6, STACK_SLOT_DAWR0(r1)
+ ld r7, STACK_SLOT_DAWRX0(r1)
mtspr SPRN_CIABR, r5
/*
* If the DAWR doesn't work, it's ok to write these here as
@@ -1770,6 +1786,12 @@ BEGIN_FTR_SECTION
mtspr SPRN_DAWRX0, r7
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
BEGIN_FTR_SECTION
+ ld r6, STACK_SLOT_DAWR1(r1)
+ ld r7, STACK_SLOT_DAWRX1(r1)
+ mtspr SPRN_DAWR1, r6
+ mtspr SPRN_DAWRX1, r7
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S | CPU_FTR_DAWR1)
+BEGIN_FTR_SECTION
ld r5, STACK_SLOT_TID(r1)
ld r6, STACK_SLOT_PSSCR(r1)
ld r7, STACK_SLOT_PID(r1)
@@ -1938,24 +1960,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
19: lis r8,0x7fff /* MAX_INT@h */
mtspr SPRN_HDEC,r8
-16:
-BEGIN_FTR_SECTION
- /* On POWER9 with HPT-on-radix we need to wait for all other threads */
- ld r3, HSTATE_SPLIT_MODE(r13)
- cmpdi r3, 0
- beq 47f
- lwz r8, KVM_SPLIT_DO_RESTORE(r3)
- cmpwi r8, 0
- beq 47f
- bl kvmhv_p9_restore_lpcr
- nop
- b 48f
-47:
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
- ld r8,KVM_HOST_LPCR(r4)
+16: ld r8,KVM_HOST_LPCR(r4)
mtspr SPRN_LPCR,r8
isync
-48:
+
#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
/* Finish timing, if we have a vcpu */
ld r4, HSTATE_KVM_VCPU(r13)
@@ -2574,8 +2582,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW
rlwimi r5, r4, 2, DAWRX_WT
clrrdi r4, r4, 3
- std r4, VCPU_DAWR(r3)
- std r5, VCPU_DAWRX(r3)
+ std r4, VCPU_DAWR0(r3)
+ std r5, VCPU_DAWRX0(r3)
/*
* If came in through the real mode hcall handler then it is necessary
* to write the registers since the return path won't. Otherwise it is
@@ -2779,8 +2787,10 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
beq kvm_end_cede
cmpwi r0, NAPPING_NOVCPU
beq kvm_novcpu_wakeup
+BEGIN_FTR_SECTION
cmpwi r0, NAPPING_UNSPLIT
beq kvm_unsplit_wakeup
+END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
twi 31,0,0 /* Nap state must not be zero */
33: mr r4, r3
@@ -3343,13 +3353,18 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
mtspr SPRN_IAMR, r0
mtspr SPRN_CIABR, r0
mtspr SPRN_DAWRX0, r0
+BEGIN_FTR_SECTION
+ mtspr SPRN_DAWRX1, r0
+END_FTR_SECTION_IFSET(CPU_FTR_DAWR1)
+
+ /* Clear hash and radix guest SLB, see guest_exit_short_path comment. */
+ slbmte r0, r0
+ PPC_SLBIA(6)
BEGIN_MMU_FTR_SECTION
b 4f
END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
- slbmte r0, r0
- slbia
ptesync
ld r8, PACA_SLBSHADOWPTR(r13)
.rept SLB_NUM_BOLTED
diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c
index 30dfeac731c6..e7219b6f5f9a 100644
--- a/arch/powerpc/kvm/book3s_xive.c
+++ b/arch/powerpc/kvm/book3s_xive.c
@@ -1813,9 +1813,9 @@ int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
return -EINVAL;
if ((level == 1 && state->lsi) || level == KVM_INTERRUPT_SET_LEVEL)
- state->asserted = 1;
+ state->asserted = true;
else if (level == 0 || level == KVM_INTERRUPT_UNSET) {
- state->asserted = 0;
+ state->asserted = false;
return 0;
}
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 288a9820ec01..7d5fe43f85c4 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -20,6 +20,7 @@
#include <asm/cputable.h>
#include <linux/uaccess.h>
+#include <asm/interrupt.h>
#include <asm/kvm_ppc.h>
#include <asm/cacheflush.h>
#include <asm/dbell.h>
@@ -698,7 +699,7 @@ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
r = 1;
- };
+ }
return r;
}
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index cf52d26f49cd..a2a68a958fa0 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -611,8 +611,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
r = !!(hv_enabled && radix_enabled());
break;
case KVM_CAP_PPC_MMU_HASH_V3:
- r = !!(hv_enabled && cpu_has_feature(CPU_FTR_ARCH_300) &&
- cpu_has_feature(CPU_FTR_HVMODE));
+ r = !!(hv_enabled && kvmppc_hv_ops->hash_v3_possible &&
+ kvmppc_hv_ops->hash_v3_possible());
break;
case KVM_CAP_PPC_NESTED_HV:
r = !!(hv_enabled && kvmppc_hv_ops->enable_nested &&
@@ -678,6 +678,10 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
r = hv_enabled && kvmppc_hv_ops->enable_svm &&
!kvmppc_hv_ops->enable_svm(NULL);
break;
+ case KVM_CAP_PPC_DAWR1:
+ r = !!(hv_enabled && kvmppc_hv_ops->enable_dawr1 &&
+ !kvmppc_hv_ops->enable_dawr1(NULL));
+ break;
#endif
default:
r = 0;
@@ -1518,7 +1522,7 @@ int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu,
return emulated;
}
-int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val)
+static int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val)
{
union kvmppc_one_reg reg;
int vmx_offset = 0;
@@ -1536,7 +1540,7 @@ int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val)
return result;
}
-int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val)
+static int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val)
{
union kvmppc_one_reg reg;
int vmx_offset = 0;
@@ -1554,7 +1558,7 @@ int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val)
return result;
}
-int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val)
+static int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val)
{
union kvmppc_one_reg reg;
int vmx_offset = 0;
@@ -1572,7 +1576,7 @@ int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val)
return result;
}
-int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val)
+static int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val)
{
union kvmppc_one_reg reg;
int vmx_offset = 0;
@@ -2187,6 +2191,12 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
break;
r = kvm->arch.kvm_ops->enable_svm(kvm);
break;
+ case KVM_CAP_PPC_DAWR1:
+ r = -EINVAL;
+ if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_dawr1)
+ break;
+ r = kvm->arch.kvm_ops->enable_dawr1(kvm);
+ break;
#endif
default:
r = -EINVAL;
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index 69a91b571845..d4efc182662a 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -31,7 +31,7 @@ obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
# 64-bit linker creates .sfpr on demand for final link (vmlinux),
# so it is only needed for modules, and only for older linkers which
# do not support --save-restore-funcs
-ifeq ($(call ld-ifversion, -lt, 225000000, y),y)
+ifeq ($(call ld-ifversion, -lt, 22500, y),y)
extra-$(CONFIG_PPC64) += crtsavres.o
endif
diff --git a/arch/powerpc/lib/pmem.c b/arch/powerpc/lib/pmem.c
index 1550e0d2513a..eb2919ddf9b9 100644
--- a/arch/powerpc/lib/pmem.c
+++ b/arch/powerpc/lib/pmem.c
@@ -6,6 +6,7 @@
#include <linux/string.h>
#include <linux/export.h>
#include <linux/uaccess.h>
+#include <linux/libnvdimm.h>
#include <asm/cacheflush.h>
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index bf7a7d62ae8b..bb5c20d4ca91 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -818,13 +818,15 @@ void emulate_vsx_store(struct instruction_op *op, const union vsx_reg *reg,
break;
if (rev) {
/* reverse 32 bytes */
- buf.d[0] = byterev_8(reg->d[3]);
- buf.d[1] = byterev_8(reg->d[2]);
- buf.d[2] = byterev_8(reg->d[1]);
- buf.d[3] = byterev_8(reg->d[0]);
- reg = &buf;
+ union vsx_reg buf32[2];
+ buf32[0].d[0] = byterev_8(reg[1].d[1]);
+ buf32[0].d[1] = byterev_8(reg[1].d[0]);
+ buf32[1].d[0] = byterev_8(reg[0].d[1]);
+ buf32[1].d[1] = byterev_8(reg[0].d[0]);
+ memcpy(mem, buf32, size);
+ } else {
+ memcpy(mem, reg, size);
}
- memcpy(mem, reg, size);
break;
case 16:
/* stxv, stxvx, stxvl, stxvll */
@@ -1304,9 +1306,11 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
if ((word & 0xfe2) == 2)
op->type = SYSCALL;
else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) &&
- (word & 0xfe3) == 1)
+ (word & 0xfe3) == 1) { /* scv */
op->type = SYSCALL_VECTORED_0;
- else
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ goto unknown_opcode;
+ } else
op->type = UNKNOWN;
return 0;
#endif
@@ -1410,7 +1414,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
#ifdef __powerpc64__
case 1:
if (!cpu_has_feature(CPU_FTR_ARCH_31))
- return -1;
+ goto unknown_opcode;
prefix_r = GET_PREFIX_R(word);
ra = GET_PREFIX_RA(suffix);
@@ -1443,8 +1447,13 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
#ifdef __powerpc64__
case 4:
+ /*
+ * There are very many instructions with this primary opcode
+ * introduced in the ISA as early as v2.03. However, the ones
+ * we currently emulate were all introduced with ISA 3.0
+ */
if (!cpu_has_feature(CPU_FTR_ARCH_300))
- return -1;
+ goto unknown_opcode;
switch (word & 0x3f) {
case 48: /* maddhd */
@@ -1470,7 +1479,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
* There are other instructions from ISA 3.0 with the same
* primary opcode which do not have emulation support yet.
*/
- return -1;
+ goto unknown_opcode;
#endif
case 7: /* mulli */
@@ -1530,6 +1539,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
case 19:
if (((word >> 1) & 0x1f) == 2) {
/* addpcis */
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ goto unknown_opcode;
imm = (short) (word & 0xffc1); /* d0 + d2 fields */
imm |= (word >> 15) & 0x3e; /* d1 field */
op->val = regs->nip + (imm << 16) + 4;
@@ -1842,7 +1853,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
#ifdef __powerpc64__
case 265: /* modud */
if (!cpu_has_feature(CPU_FTR_ARCH_300))
- return -1;
+ goto unknown_opcode;
op->val = regs->gpr[ra] % regs->gpr[rb];
goto compute_done;
#endif
@@ -1852,7 +1863,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
case 267: /* moduw */
if (!cpu_has_feature(CPU_FTR_ARCH_300))
- return -1;
+ goto unknown_opcode;
op->val = (unsigned int) regs->gpr[ra] %
(unsigned int) regs->gpr[rb];
goto compute_done;
@@ -1889,7 +1900,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
#endif
case 755: /* darn */
if (!cpu_has_feature(CPU_FTR_ARCH_300))
- return -1;
+ goto unknown_opcode;
switch (ra & 0x3) {
case 0:
/* 32-bit conditioned */
@@ -1907,18 +1918,18 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
goto compute_done;
}
- return -1;
+ goto unknown_opcode;
#ifdef __powerpc64__
case 777: /* modsd */
if (!cpu_has_feature(CPU_FTR_ARCH_300))
- return -1;
+ goto unknown_opcode;
op->val = (long int) regs->gpr[ra] %
(long int) regs->gpr[rb];
goto compute_done;
#endif
case 779: /* modsw */
if (!cpu_has_feature(CPU_FTR_ARCH_300))
- return -1;
+ goto unknown_opcode;
op->val = (int) regs->gpr[ra] %
(int) regs->gpr[rb];
goto compute_done;
@@ -1995,14 +2006,14 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
#endif
case 538: /* cnttzw */
if (!cpu_has_feature(CPU_FTR_ARCH_300))
- return -1;
+ goto unknown_opcode;
val = (unsigned int) regs->gpr[rd];
op->val = (val ? __builtin_ctz(val) : 32);
goto logical_done;
#ifdef __powerpc64__
case 570: /* cnttzd */
if (!cpu_has_feature(CPU_FTR_ARCH_300))
- return -1;
+ goto unknown_opcode;
val = regs->gpr[rd];
op->val = (val ? __builtin_ctzl(val) : 64);
goto logical_done;
@@ -2112,7 +2123,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
case 890: /* extswsli with sh_5 = 0 */
case 891: /* extswsli with sh_5 = 1 */
if (!cpu_has_feature(CPU_FTR_ARCH_300))
- return -1;
+ goto unknown_opcode;
op->type = COMPUTE + SETREG;
sh = rb | ((word & 2) << 4);
val = (signed int) regs->gpr[rd];
@@ -2439,6 +2450,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
break;
case 268: /* lxvx */
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ goto unknown_opcode;
op->reg = rd | ((word & 1) << 5);
op->type = MKOP(LOAD_VSX, 0, 16);
op->element_size = 16;
@@ -2448,6 +2461,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
case 269: /* lxvl */
case 301: { /* lxvll */
int nb;
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ goto unknown_opcode;
op->reg = rd | ((word & 1) << 5);
op->ea = ra ? regs->gpr[ra] : 0;
nb = regs->gpr[rb] & 0xff;
@@ -2468,13 +2483,15 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
case 333: /* lxvpx */
if (!cpu_has_feature(CPU_FTR_ARCH_31))
- return -1;
+ goto unknown_opcode;
op->reg = VSX_REGISTER_XTP(rd);
op->type = MKOP(LOAD_VSX, 0, 32);
op->element_size = 32;
break;
case 364: /* lxvwsx */
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ goto unknown_opcode;
op->reg = rd | ((word & 1) << 5);
op->type = MKOP(LOAD_VSX, 0, 4);
op->element_size = 4;
@@ -2482,6 +2499,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
break;
case 396: /* stxvx */
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ goto unknown_opcode;
op->reg = rd | ((word & 1) << 5);
op->type = MKOP(STORE_VSX, 0, 16);
op->element_size = 16;
@@ -2491,6 +2510,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
case 397: /* stxvl */
case 429: { /* stxvll */
int nb;
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ goto unknown_opcode;
op->reg = rd | ((word & 1) << 5);
op->ea = ra ? regs->gpr[ra] : 0;
nb = regs->gpr[rb] & 0xff;
@@ -2504,7 +2525,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
}
case 461: /* stxvpx */
if (!cpu_has_feature(CPU_FTR_ARCH_31))
- return -1;
+ goto unknown_opcode;
op->reg = VSX_REGISTER_XTP(rd);
op->type = MKOP(STORE_VSX, 0, 32);
op->element_size = 32;
@@ -2542,6 +2563,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
break;
case 781: /* lxsibzx */
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ goto unknown_opcode;
op->reg = rd | ((word & 1) << 5);
op->type = MKOP(LOAD_VSX, 0, 1);
op->element_size = 8;
@@ -2549,6 +2572,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
break;
case 812: /* lxvh8x */
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ goto unknown_opcode;
op->reg = rd | ((word & 1) << 5);
op->type = MKOP(LOAD_VSX, 0, 16);
op->element_size = 2;
@@ -2556,6 +2581,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
break;
case 813: /* lxsihzx */
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ goto unknown_opcode;
op->reg = rd | ((word & 1) << 5);
op->type = MKOP(LOAD_VSX, 0, 2);
op->element_size = 8;
@@ -2569,6 +2596,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
break;
case 876: /* lxvb16x */
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ goto unknown_opcode;
op->reg = rd | ((word & 1) << 5);
op->type = MKOP(LOAD_VSX, 0, 16);
op->element_size = 1;
@@ -2582,6 +2611,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
break;
case 909: /* stxsibx */
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ goto unknown_opcode;
op->reg = rd | ((word & 1) << 5);
op->type = MKOP(STORE_VSX, 0, 1);
op->element_size = 8;
@@ -2589,6 +2620,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
break;
case 940: /* stxvh8x */
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ goto unknown_opcode;
op->reg = rd | ((word & 1) << 5);
op->type = MKOP(STORE_VSX, 0, 16);
op->element_size = 2;
@@ -2596,6 +2629,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
break;
case 941: /* stxsihx */
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ goto unknown_opcode;
op->reg = rd | ((word & 1) << 5);
op->type = MKOP(STORE_VSX, 0, 2);
op->element_size = 8;
@@ -2609,6 +2644,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
break;
case 1004: /* stxvb16x */
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ goto unknown_opcode;
op->reg = rd | ((word & 1) << 5);
op->type = MKOP(STORE_VSX, 0, 16);
op->element_size = 1;
@@ -2717,12 +2754,16 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
op->type = MKOP(LOAD_FP, 0, 16);
break;
case 2: /* lxsd */
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ goto unknown_opcode;
op->reg = rd + 32;
op->type = MKOP(LOAD_VSX, 0, 8);
op->element_size = 8;
op->vsx_flags = VSX_CHECK_VEC;
break;
case 3: /* lxssp */
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ goto unknown_opcode;
op->reg = rd + 32;
op->type = MKOP(LOAD_VSX, 0, 4);
op->element_size = 8;
@@ -2752,7 +2793,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
#ifdef CONFIG_VSX
case 6:
if (!cpu_has_feature(CPU_FTR_ARCH_31))
- return -1;
+ goto unknown_opcode;
op->ea = dqform_ea(word, regs);
op->reg = VSX_REGISTER_XTP(rd);
op->element_size = 32;
@@ -2775,6 +2816,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
break;
case 1: /* lxv */
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ goto unknown_opcode;
op->ea = dqform_ea(word, regs);
if (word & 8)
op->reg = rd + 32;
@@ -2785,6 +2828,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
case 2: /* stxsd with LSB of DS field = 0 */
case 6: /* stxsd with LSB of DS field = 1 */
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ goto unknown_opcode;
op->ea = dsform_ea(word, regs);
op->reg = rd + 32;
op->type = MKOP(STORE_VSX, 0, 8);
@@ -2794,6 +2839,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
case 3: /* stxssp with LSB of DS field = 0 */
case 7: /* stxssp with LSB of DS field = 1 */
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ goto unknown_opcode;
op->ea = dsform_ea(word, regs);
op->reg = rd + 32;
op->type = MKOP(STORE_VSX, 0, 4);
@@ -2802,6 +2849,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
break;
case 5: /* stxv */
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ goto unknown_opcode;
op->ea = dqform_ea(word, regs);
if (word & 8)
op->reg = rd + 32;
@@ -2831,7 +2880,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
break;
case 1: /* Prefixed instructions */
if (!cpu_has_feature(CPU_FTR_ARCH_31))
- return -1;
+ goto unknown_opcode;
prefix_r = GET_PREFIX_R(word);
ra = GET_PREFIX_RA(suffix);
@@ -2970,6 +3019,20 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
}
+ if (OP_IS_LOAD_STORE(op->type) && (op->type & UPDATE)) {
+ switch (GETTYPE(op->type)) {
+ case LOAD:
+ if (ra == rd)
+ goto unknown_opcode;
+ fallthrough;
+ case STORE:
+ case LOAD_FP:
+ case STORE_FP:
+ if (ra == 0)
+ goto unknown_opcode;
+ }
+ }
+
#ifdef CONFIG_VSX
if ((GETTYPE(op->type) == LOAD_VSX ||
GETTYPE(op->type) == STORE_VSX) &&
@@ -2980,6 +3043,10 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
return 0;
+ unknown_opcode:
+ op->type = UNKNOWN;
+ return 0;
+
logical_done:
if (word & 1)
set_cr0(regs, op);
diff --git a/arch/powerpc/mm/book3s32/Makefile b/arch/powerpc/mm/book3s32/Makefile
index 3f972db17761..446d9de88ce4 100644
--- a/arch/powerpc/mm/book3s32/Makefile
+++ b/arch/powerpc/mm/book3s32/Makefile
@@ -6,4 +6,6 @@ ifdef CONFIG_KASAN
CFLAGS_mmu.o += -DDISABLE_BRANCH_PROFILING
endif
-obj-y += mmu.o hash_low.o mmu_context.o tlb.o nohash_low.o
+obj-y += mmu.o mmu_context.o
+obj-$(CONFIG_PPC_BOOK3S_603) += nohash_low.o
+obj-$(CONFIG_PPC_BOOK3S_604) += hash_low.o tlb.o
diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c
index 859e5bd603ac..d7eb266a3f7a 100644
--- a/arch/powerpc/mm/book3s32/mmu.c
+++ b/arch/powerpc/mm/book3s32/mmu.c
@@ -234,7 +234,7 @@ void mmu_mark_initmem_nx(void)
if (is_module_segment(i << 28))
continue;
- mtsrin(mfsrin(i << 28) | 0x10000000, i << 28);
+ mtsr(mfsr(i << 28) | 0x10000000, i << 28);
}
}
diff --git a/arch/powerpc/mm/book3s64/hash_hugetlbpage.c b/arch/powerpc/mm/book3s64/hash_hugetlbpage.c
index b5e9fff8c217..a688e1324ae5 100644
--- a/arch/powerpc/mm/book3s64/hash_hugetlbpage.c
+++ b/arch/powerpc/mm/book3s64/hash_hugetlbpage.c
@@ -16,10 +16,6 @@
unsigned int hpage_shift;
EXPORT_SYMBOL(hpage_shift);
-extern long hpte_insert_repeating(unsigned long hash, unsigned long vpn,
- unsigned long pa, unsigned long rlags,
- unsigned long vflags, int psize, int ssize);
-
int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
pte_t *ptep, unsigned long trap, unsigned long flags,
int ssize, unsigned int shift, unsigned int mmu_psize)
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index 73b06adb6eeb..581b20a2feaf 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -38,6 +38,7 @@
#include <linux/pgtable.h>
#include <asm/debugfs.h>
+#include <asm/interrupt.h>
#include <asm/processor.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
@@ -1143,10 +1144,10 @@ unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap)
page = pte_page(pte);
/* page is dirty */
- if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) {
+ if (!test_bit(PG_dcache_clean, &page->flags) && !PageReserved(page)) {
if (trap == 0x400) {
flush_dcache_icache_page(page);
- set_bit(PG_arch_1, &page->flags);
+ set_bit(PG_dcache_clean, &page->flags);
} else
pp |= HPTE_R_N;
}
@@ -1288,7 +1289,6 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea,
unsigned long flags)
{
bool is_thp;
- enum ctx_state prev_state = exception_enter();
pgd_t *pgdir;
unsigned long vsid;
pte_t *ptep;
@@ -1490,7 +1490,6 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea,
DBG_LOW(" -> rc=%d\n", rc);
bail:
- exception_exit(prev_state);
return rc;
}
EXPORT_SYMBOL_GPL(hash_page_mm);
@@ -1512,16 +1511,22 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap,
}
EXPORT_SYMBOL_GPL(hash_page);
-int __hash_page(unsigned long trap, unsigned long ea, unsigned long dsisr,
- unsigned long msr)
+DECLARE_INTERRUPT_HANDLER_RET(__do_hash_fault);
+DEFINE_INTERRUPT_HANDLER_RET(__do_hash_fault)
{
+ unsigned long ea = regs->dar;
+ unsigned long dsisr = regs->dsisr;
unsigned long access = _PAGE_PRESENT | _PAGE_READ;
unsigned long flags = 0;
- struct mm_struct *mm = current->mm;
- unsigned int region_id = get_region_id(ea);
+ struct mm_struct *mm;
+ unsigned int region_id;
+ long err;
+ region_id = get_region_id(ea);
if ((region_id == VMALLOC_REGION_ID) || (region_id == IO_REGION_ID))
mm = &init_mm;
+ else
+ mm = current->mm;
if (dsisr & DSISR_NOHPTE)
flags |= HPTE_NOHPTE_UPDATE;
@@ -1537,13 +1542,66 @@ int __hash_page(unsigned long trap, unsigned long ea, unsigned long dsisr,
* 2) user space access kernel space.
*/
access |= _PAGE_PRIVILEGED;
- if ((msr & MSR_PR) || (region_id == USER_REGION_ID))
+ if (user_mode(regs) || (region_id == USER_REGION_ID))
access &= ~_PAGE_PRIVILEGED;
- if (trap == 0x400)
+ if (regs->trap == 0x400)
access |= _PAGE_EXEC;
- return hash_page_mm(mm, ea, access, trap, flags);
+ err = hash_page_mm(mm, ea, access, regs->trap, flags);
+ if (unlikely(err < 0)) {
+ // failed to instert a hash PTE due to an hypervisor error
+ if (user_mode(regs)) {
+ if (IS_ENABLED(CONFIG_PPC_SUBPAGE_PROT) && err == -2)
+ _exception(SIGSEGV, regs, SEGV_ACCERR, ea);
+ else
+ _exception(SIGBUS, regs, BUS_ADRERR, ea);
+ } else {
+ bad_page_fault(regs, SIGBUS);
+ }
+ err = 0;
+ }
+
+ return err;
+}
+
+/*
+ * The _RAW interrupt entry checks for the in_nmi() case before
+ * running the full handler.
+ */
+DEFINE_INTERRUPT_HANDLER_RAW(do_hash_fault)
+{
+ unsigned long dsisr = regs->dsisr;
+ long err;
+
+ if (unlikely(dsisr & (DSISR_BAD_FAULT_64S | DSISR_KEYFAULT)))
+ goto page_fault;
+
+ /*
+ * If we are in an "NMI" (e.g., an interrupt when soft-disabled), then
+ * don't call hash_page, just fail the fault. This is required to
+ * prevent re-entrancy problems in the hash code, namely perf
+ * interrupts hitting while something holds H_PAGE_BUSY, and taking a
+ * hash fault. See the comment in hash_preload().
+ *
+ * We come here as a result of a DSI at a point where we don't want
+ * to call hash_page, such as when we are accessing memory (possibly
+ * user memory) inside a PMU interrupt that occurred while interrupts
+ * were soft-disabled. We want to invoke the exception handler for
+ * the access, or panic if there isn't a handler.
+ */
+ if (unlikely(in_nmi())) {
+ do_bad_page_fault_segv(regs);
+ return 0;
+ }
+
+ err = __do_hash_fault(regs);
+ if (err) {
+page_fault:
+ err = hash__do_page_fault(regs);
+ }
+
+ return err;
}
#ifdef CONFIG_PPC_MM_SLICES
@@ -1843,27 +1901,6 @@ void flush_hash_range(unsigned long number, int local)
}
}
-/*
- * low_hash_fault is called when we the low level hash code failed
- * to instert a PTE due to an hypervisor error
- */
-void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc)
-{
- enum ctx_state prev_state = exception_enter();
-
- if (user_mode(regs)) {
-#ifdef CONFIG_PPC_SUBPAGE_PROT
- if (rc == -2)
- _exception(SIGSEGV, regs, SEGV_ACCERR, address);
- else
-#endif
- _exception(SIGBUS, regs, BUS_ADRERR, address);
- } else
- bad_page_fault(regs, address, SIGBUS);
-
- exception_exit(prev_state);
-}
-
long hpte_insert_repeating(unsigned long hash, unsigned long vpn,
unsigned long pa, unsigned long rflags,
unsigned long vflags, int psize, int ssize)
diff --git a/arch/powerpc/mm/book3s64/internal.h b/arch/powerpc/mm/book3s64/internal.h
index c12d78ee42f5..5045048ce244 100644
--- a/arch/powerpc/mm/book3s64/internal.h
+++ b/arch/powerpc/mm/book3s64/internal.h
@@ -15,4 +15,6 @@ static inline bool stress_slb(void)
void slb_setup_new_exec(void);
+void exit_lazy_flush_tlb(struct mm_struct *mm, bool always_flush);
+
#endif /* ARCH_POWERPC_MM_BOOK3S64_INTERNAL_H */
diff --git a/arch/powerpc/mm/book3s64/iommu_api.c b/arch/powerpc/mm/book3s64/iommu_api.c
index 685d7bb3d26f..cd18e94d0843 100644
--- a/arch/powerpc/mm/book3s64/iommu_api.c
+++ b/arch/powerpc/mm/book3s64/iommu_api.c
@@ -129,7 +129,8 @@ good_exit:
mutex_lock(&mem_list_mutex);
- list_for_each_entry_rcu(mem2, &mm->context.iommu_group_mem_list, next) {
+ list_for_each_entry_rcu(mem2, &mm->context.iommu_group_mem_list, next,
+ lockdep_is_held(&mem_list_mutex)) {
/* Overlap? */
if ((mem2->ua < (ua + (entries << PAGE_SHIFT))) &&
(ua < (mem2->ua +
@@ -289,6 +290,7 @@ struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
{
struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
+ rcu_read_lock();
list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
if ((mem->ua <= ua) &&
(ua + size <= mem->ua +
@@ -297,6 +299,7 @@ struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
break;
}
}
+ rcu_read_unlock();
return ret;
}
@@ -327,7 +330,8 @@ struct mm_iommu_table_group_mem_t *mm_iommu_get(struct mm_struct *mm,
mutex_lock(&mem_list_mutex);
- list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
+ list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next,
+ lockdep_is_held(&mem_list_mutex)) {
if ((mem->ua == ua) && (mem->entries == entries)) {
ret = mem;
++mem->used;
@@ -421,6 +425,7 @@ bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa,
struct mm_iommu_table_group_mem_t *mem;
unsigned long end;
+ rcu_read_lock();
list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
if (mem->dev_hpa == MM_IOMMU_TABLE_INVALID_HPA)
continue;
@@ -437,6 +442,7 @@ bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa,
return true;
}
}
+ rcu_read_unlock();
return false;
}
diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c
index 5b3a3bae21aa..9ffa65074cb0 100644
--- a/arch/powerpc/mm/book3s64/pgtable.c
+++ b/arch/powerpc/mm/book3s64/pgtable.c
@@ -20,6 +20,8 @@
#include <mm/mmu_decl.h>
#include <trace/events/thp.h>
+#include "internal.h"
+
unsigned long __pmd_frag_nr;
EXPORT_SYMBOL(__pmd_frag_nr);
unsigned long __pmd_frag_size_shift;
@@ -79,10 +81,15 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
}
-static void do_nothing(void *unused)
+static void do_serialize(void *arg)
{
-
+ /* We've taken the IPI, so try to trim the mask while here */
+ if (radix_enabled()) {
+ struct mm_struct *mm = arg;
+ exit_lazy_flush_tlb(mm, false);
+ }
}
+
/*
* Serialize against find_current_mm_pte which does lock-less
* lookup in page tables with local interrupts disabled. For huge pages
@@ -96,7 +103,7 @@ static void do_nothing(void *unused)
void serialize_against_pte_lookup(struct mm_struct *mm)
{
smp_mb();
- smp_call_function_many(mm_cpumask(mm), do_nothing, NULL, 1);
+ smp_call_function_many(mm_cpumask(mm), do_serialize, mm, 1);
}
/*
diff --git a/arch/powerpc/mm/book3s64/pkeys.c b/arch/powerpc/mm/book3s64/pkeys.c
index f1c6f264ed91..15dcc5ad91c5 100644
--- a/arch/powerpc/mm/book3s64/pkeys.c
+++ b/arch/powerpc/mm/book3s64/pkeys.c
@@ -31,6 +31,7 @@ static u32 initial_allocation_mask __ro_after_init;
u64 default_amr __ro_after_init = ~0x0UL;
u64 default_iamr __ro_after_init = 0x5555555555555555UL;
u64 default_uamor __ro_after_init;
+EXPORT_SYMBOL(default_amr);
/*
* Key used to implement PROT_EXEC mmap. Denies READ/WRITE
* We pick key 2 because 0 is special key and 1 is reserved as per ISA.
diff --git a/arch/powerpc/mm/book3s64/radix_tlb.c b/arch/powerpc/mm/book3s64/radix_tlb.c
index fb66d154b26c..409e61210789 100644
--- a/arch/powerpc/mm/book3s64/radix_tlb.c
+++ b/arch/powerpc/mm/book3s64/radix_tlb.c
@@ -18,6 +18,8 @@
#include <asm/cputhreads.h>
#include <asm/plpar_wrappers.h>
+#include "internal.h"
+
#define RIC_FLUSH_TLB 0
#define RIC_FLUSH_PWC 1
#define RIC_FLUSH_ALL 2
@@ -627,15 +629,6 @@ void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmadd
}
EXPORT_SYMBOL(radix__local_flush_tlb_page);
-static bool mm_is_singlethreaded(struct mm_struct *mm)
-{
- if (atomic_read(&mm->context.copros) > 0)
- return false;
- if (atomic_read(&mm->mm_users) <= 1 && current->mm == mm)
- return true;
- return false;
-}
-
static bool mm_needs_flush_escalation(struct mm_struct *mm)
{
/*
@@ -648,21 +641,24 @@ static bool mm_needs_flush_escalation(struct mm_struct *mm)
return false;
}
-#ifdef CONFIG_SMP
-static void do_exit_flush_lazy_tlb(void *arg)
+/*
+ * If always_flush is true, then flush even if this CPU can't be removed
+ * from mm_cpumask.
+ */
+void exit_lazy_flush_tlb(struct mm_struct *mm, bool always_flush)
{
- struct mm_struct *mm = arg;
unsigned long pid = mm->context.id;
+ int cpu = smp_processor_id();
/*
* A kthread could have done a mmget_not_zero() after the flushing CPU
- * checked mm_is_singlethreaded, and be in the process of
- * kthread_use_mm when interrupted here. In that case, current->mm will
- * be set to mm, because kthread_use_mm() setting ->mm and switching to
- * the mm is done with interrupts off.
+ * checked mm_cpumask, and be in the process of kthread_use_mm when
+ * interrupted here. In that case, current->mm will be set to mm,
+ * because kthread_use_mm() setting ->mm and switching to the mm is
+ * done with interrupts off.
*/
if (current->mm == mm)
- goto out_flush;
+ goto out;
if (current->active_mm == mm) {
WARN_ON_ONCE(current->mm != NULL);
@@ -673,11 +669,30 @@ static void do_exit_flush_lazy_tlb(void *arg)
mmdrop(mm);
}
- atomic_dec(&mm->context.active_cpus);
- cpumask_clear_cpu(smp_processor_id(), mm_cpumask(mm));
+ /*
+ * This IPI may be initiated from any source including those not
+ * running the mm, so there may be a racing IPI that comes after
+ * this one which finds the cpumask already clear. Check and avoid
+ * underflowing the active_cpus count in that case. The race should
+ * not otherwise be a problem, but the TLB must be flushed because
+ * that's what the caller expects.
+ */
+ if (cpumask_test_cpu(cpu, mm_cpumask(mm))) {
+ atomic_dec(&mm->context.active_cpus);
+ cpumask_clear_cpu(cpu, mm_cpumask(mm));
+ always_flush = true;
+ }
-out_flush:
- _tlbiel_pid(pid, RIC_FLUSH_ALL);
+out:
+ if (always_flush)
+ _tlbiel_pid(pid, RIC_FLUSH_ALL);
+}
+
+#ifdef CONFIG_SMP
+static void do_exit_flush_lazy_tlb(void *arg)
+{
+ struct mm_struct *mm = arg;
+ exit_lazy_flush_tlb(mm, true);
}
static void exit_flush_lazy_tlbs(struct mm_struct *mm)
@@ -693,9 +708,110 @@ static void exit_flush_lazy_tlbs(struct mm_struct *mm)
(void *)mm, 1);
}
+#else /* CONFIG_SMP */
+static inline void exit_flush_lazy_tlbs(struct mm_struct *mm) { }
+#endif /* CONFIG_SMP */
+
+static DEFINE_PER_CPU(unsigned int, mm_cpumask_trim_clock);
+
+/*
+ * Interval between flushes at which we send out IPIs to check whether the
+ * mm_cpumask can be trimmed for the case where it's not a single-threaded
+ * process flushing its own mm. The intent is to reduce the cost of later
+ * flushes. Don't want this to be so low that it adds noticable cost to TLB
+ * flushing, or so high that it doesn't help reduce global TLBIEs.
+ */
+static unsigned long tlb_mm_cpumask_trim_timer = 1073;
+
+static bool tick_and_test_trim_clock(void)
+{
+ if (__this_cpu_inc_return(mm_cpumask_trim_clock) ==
+ tlb_mm_cpumask_trim_timer) {
+ __this_cpu_write(mm_cpumask_trim_clock, 0);
+ return true;
+ }
+ return false;
+}
+
+enum tlb_flush_type {
+ FLUSH_TYPE_NONE,
+ FLUSH_TYPE_LOCAL,
+ FLUSH_TYPE_GLOBAL,
+};
+
+static enum tlb_flush_type flush_type_needed(struct mm_struct *mm, bool fullmm)
+{
+ int active_cpus = atomic_read(&mm->context.active_cpus);
+ int cpu = smp_processor_id();
+
+ if (active_cpus == 0)
+ return FLUSH_TYPE_NONE;
+ if (active_cpus == 1 && cpumask_test_cpu(cpu, mm_cpumask(mm))) {
+ if (current->mm != mm) {
+ /*
+ * Asynchronous flush sources may trim down to nothing
+ * if the process is not running, so occasionally try
+ * to trim.
+ */
+ if (tick_and_test_trim_clock()) {
+ exit_lazy_flush_tlb(mm, true);
+ return FLUSH_TYPE_NONE;
+ }
+ }
+ return FLUSH_TYPE_LOCAL;
+ }
+
+ /* Coprocessors require TLBIE to invalidate nMMU. */
+ if (atomic_read(&mm->context.copros) > 0)
+ return FLUSH_TYPE_GLOBAL;
+
+ /*
+ * In the fullmm case there's no point doing the exit_flush_lazy_tlbs
+ * because the mm is being taken down anyway, and a TLBIE tends to
+ * be faster than an IPI+TLBIEL.
+ */
+ if (fullmm)
+ return FLUSH_TYPE_GLOBAL;
+
+ /*
+ * If we are running the only thread of a single-threaded process,
+ * then we should almost always be able to trim off the rest of the
+ * CPU mask (except in the case of use_mm() races), so always try
+ * trimming the mask.
+ */
+ if (atomic_read(&mm->mm_users) <= 1 && current->mm == mm) {
+ exit_flush_lazy_tlbs(mm);
+ /*
+ * use_mm() race could prevent IPIs from being able to clear
+ * the cpumask here, however those users are established
+ * after our first check (and so after the PTEs are removed),
+ * and the TLB still gets flushed by the IPI, so this CPU
+ * will only require a local flush.
+ */
+ return FLUSH_TYPE_LOCAL;
+ }
+
+ /*
+ * Occasionally try to trim down the cpumask. It's possible this can
+ * bring the mask to zero, which results in no flush.
+ */
+ if (tick_and_test_trim_clock()) {
+ exit_flush_lazy_tlbs(mm);
+ if (current->mm == mm)
+ return FLUSH_TYPE_LOCAL;
+ if (cpumask_test_cpu(cpu, mm_cpumask(mm)))
+ exit_lazy_flush_tlb(mm, true);
+ return FLUSH_TYPE_NONE;
+ }
+
+ return FLUSH_TYPE_GLOBAL;
+}
+
+#ifdef CONFIG_SMP
void radix__flush_tlb_mm(struct mm_struct *mm)
{
unsigned long pid;
+ enum tlb_flush_type type;
pid = mm->context.id;
if (unlikely(pid == MMU_NO_CONTEXT))
@@ -703,16 +819,15 @@ void radix__flush_tlb_mm(struct mm_struct *mm)
preempt_disable();
/*
- * Order loads of mm_cpumask vs previous stores to clear ptes before
- * the invalidate. See barrier in switch_mm_irqs_off
+ * Order loads of mm_cpumask (in flush_type_needed) vs previous
+ * stores to clear ptes before the invalidate. See barrier in
+ * switch_mm_irqs_off
*/
smp_mb();
- if (!mm_is_thread_local(mm)) {
- if (unlikely(mm_is_singlethreaded(mm))) {
- exit_flush_lazy_tlbs(mm);
- goto local;
- }
-
+ type = flush_type_needed(mm, false);
+ if (type == FLUSH_TYPE_LOCAL) {
+ _tlbiel_pid(pid, RIC_FLUSH_TLB);
+ } else if (type == FLUSH_TYPE_GLOBAL) {
if (!mmu_has_feature(MMU_FTR_GTSE)) {
unsigned long tgt = H_RPTI_TARGET_CMMU;
@@ -728,9 +843,6 @@ void radix__flush_tlb_mm(struct mm_struct *mm)
} else {
_tlbiel_pid_multicast(mm, pid, RIC_FLUSH_TLB);
}
- } else {
-local:
- _tlbiel_pid(pid, RIC_FLUSH_TLB);
}
preempt_enable();
}
@@ -739,6 +851,7 @@ EXPORT_SYMBOL(radix__flush_tlb_mm);
static void __flush_all_mm(struct mm_struct *mm, bool fullmm)
{
unsigned long pid;
+ enum tlb_flush_type type;
pid = mm->context.id;
if (unlikely(pid == MMU_NO_CONTEXT))
@@ -746,13 +859,10 @@ static void __flush_all_mm(struct mm_struct *mm, bool fullmm)
preempt_disable();
smp_mb(); /* see radix__flush_tlb_mm */
- if (!mm_is_thread_local(mm)) {
- if (unlikely(mm_is_singlethreaded(mm))) {
- if (!fullmm) {
- exit_flush_lazy_tlbs(mm);
- goto local;
- }
- }
+ type = flush_type_needed(mm, fullmm);
+ if (type == FLUSH_TYPE_LOCAL) {
+ _tlbiel_pid(pid, RIC_FLUSH_ALL);
+ } else if (type == FLUSH_TYPE_GLOBAL) {
if (!mmu_has_feature(MMU_FTR_GTSE)) {
unsigned long tgt = H_RPTI_TARGET_CMMU;
unsigned long type = H_RPTI_TYPE_TLB | H_RPTI_TYPE_PWC |
@@ -766,9 +876,6 @@ static void __flush_all_mm(struct mm_struct *mm, bool fullmm)
_tlbie_pid(pid, RIC_FLUSH_ALL);
else
_tlbiel_pid_multicast(mm, pid, RIC_FLUSH_ALL);
- } else {
-local:
- _tlbiel_pid(pid, RIC_FLUSH_ALL);
}
preempt_enable();
}
@@ -783,6 +890,7 @@ void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
int psize)
{
unsigned long pid;
+ enum tlb_flush_type type;
pid = mm->context.id;
if (unlikely(pid == MMU_NO_CONTEXT))
@@ -790,11 +898,10 @@ void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
preempt_disable();
smp_mb(); /* see radix__flush_tlb_mm */
- if (!mm_is_thread_local(mm)) {
- if (unlikely(mm_is_singlethreaded(mm))) {
- exit_flush_lazy_tlbs(mm);
- goto local;
- }
+ type = flush_type_needed(mm, false);
+ if (type == FLUSH_TYPE_LOCAL) {
+ _tlbiel_va(vmaddr, pid, psize, RIC_FLUSH_TLB);
+ } else if (type == FLUSH_TYPE_GLOBAL) {
if (!mmu_has_feature(MMU_FTR_GTSE)) {
unsigned long tgt, pg_sizes, size;
@@ -811,9 +918,6 @@ void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
_tlbie_va(vmaddr, pid, psize, RIC_FLUSH_TLB);
else
_tlbiel_va_multicast(mm, vmaddr, pid, psize, RIC_FLUSH_TLB);
- } else {
-local:
- _tlbiel_va(vmaddr, pid, psize, RIC_FLUSH_TLB);
}
preempt_enable();
}
@@ -828,8 +932,6 @@ void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
}
EXPORT_SYMBOL(radix__flush_tlb_page);
-#else /* CONFIG_SMP */
-static inline void exit_flush_lazy_tlbs(struct mm_struct *mm) { }
#endif /* CONFIG_SMP */
static void do_tlbiel_kernel(void *info)
@@ -893,7 +995,9 @@ static inline void __radix__flush_tlb_range(struct mm_struct *mm,
unsigned int page_shift = mmu_psize_defs[mmu_virtual_psize].shift;
unsigned long page_size = 1UL << page_shift;
unsigned long nr_pages = (end - start) >> page_shift;
- bool local, full;
+ bool fullmm = (end == TLB_FLUSH_ALL);
+ bool flush_pid;
+ enum tlb_flush_type type;
pid = mm->context.id;
if (unlikely(pid == MMU_NO_CONTEXT))
@@ -901,24 +1005,18 @@ static inline void __radix__flush_tlb_range(struct mm_struct *mm,
preempt_disable();
smp_mb(); /* see radix__flush_tlb_mm */
- if (!mm_is_thread_local(mm)) {
- if (unlikely(mm_is_singlethreaded(mm))) {
- if (end != TLB_FLUSH_ALL) {
- exit_flush_lazy_tlbs(mm);
- goto is_local;
- }
- }
- local = false;
- full = (end == TLB_FLUSH_ALL ||
- nr_pages > tlb_single_page_flush_ceiling);
- } else {
-is_local:
- local = true;
- full = (end == TLB_FLUSH_ALL ||
- nr_pages > tlb_local_single_page_flush_ceiling);
- }
+ type = flush_type_needed(mm, fullmm);
+ if (type == FLUSH_TYPE_NONE)
+ goto out;
+
+ if (fullmm)
+ flush_pid = true;
+ else if (type == FLUSH_TYPE_GLOBAL)
+ flush_pid = nr_pages > tlb_single_page_flush_ceiling;
+ else
+ flush_pid = nr_pages > tlb_local_single_page_flush_ceiling;
- if (!mmu_has_feature(MMU_FTR_GTSE) && !local) {
+ if (!mmu_has_feature(MMU_FTR_GTSE) && type == FLUSH_TYPE_GLOBAL) {
unsigned long tgt = H_RPTI_TARGET_CMMU;
unsigned long pg_sizes = psize_to_rpti_pgsize(mmu_virtual_psize);
@@ -928,8 +1026,8 @@ is_local:
tgt |= H_RPTI_TARGET_NMMU;
pseries_rpt_invalidate(pid, tgt, H_RPTI_TYPE_TLB, pg_sizes,
start, end);
- } else if (full) {
- if (local) {
+ } else if (flush_pid) {
+ if (type == FLUSH_TYPE_LOCAL) {
_tlbiel_pid(pid, RIC_FLUSH_TLB);
} else {
if (cputlb_use_tlbie()) {
@@ -952,7 +1050,7 @@ is_local:
hflush = true;
}
- if (local) {
+ if (type == FLUSH_TYPE_LOCAL) {
asm volatile("ptesync": : :"memory");
__tlbiel_va_range(start, end, pid, page_size, mmu_virtual_psize);
if (hflush)
@@ -974,6 +1072,7 @@ is_local:
hstart, hend, pid, PMD_SIZE, MMU_PAGE_2M, false);
}
}
+out:
preempt_enable();
}
@@ -1085,32 +1184,30 @@ static __always_inline void __radix__flush_tlb_range_psize(struct mm_struct *mm,
unsigned int page_shift = mmu_psize_defs[psize].shift;
unsigned long page_size = 1UL << page_shift;
unsigned long nr_pages = (end - start) >> page_shift;
- bool local, full;
+ bool fullmm = (end == TLB_FLUSH_ALL);
+ bool flush_pid;
+ enum tlb_flush_type type;
pid = mm->context.id;
if (unlikely(pid == MMU_NO_CONTEXT))
return;
+ fullmm = (end == TLB_FLUSH_ALL);
+
preempt_disable();
smp_mb(); /* see radix__flush_tlb_mm */
- if (!mm_is_thread_local(mm)) {
- if (unlikely(mm_is_singlethreaded(mm))) {
- if (end != TLB_FLUSH_ALL) {
- exit_flush_lazy_tlbs(mm);
- goto is_local;
- }
- }
- local = false;
- full = (end == TLB_FLUSH_ALL ||
- nr_pages > tlb_single_page_flush_ceiling);
- } else {
-is_local:
- local = true;
- full = (end == TLB_FLUSH_ALL ||
- nr_pages > tlb_local_single_page_flush_ceiling);
- }
+ type = flush_type_needed(mm, fullmm);
+ if (type == FLUSH_TYPE_NONE)
+ goto out;
+
+ if (fullmm)
+ flush_pid = true;
+ else if (type == FLUSH_TYPE_GLOBAL)
+ flush_pid = nr_pages > tlb_single_page_flush_ceiling;
+ else
+ flush_pid = nr_pages > tlb_local_single_page_flush_ceiling;
- if (!mmu_has_feature(MMU_FTR_GTSE) && !local) {
+ if (!mmu_has_feature(MMU_FTR_GTSE) && type == FLUSH_TYPE_GLOBAL) {
unsigned long tgt = H_RPTI_TARGET_CMMU;
unsigned long type = H_RPTI_TYPE_TLB;
unsigned long pg_sizes = psize_to_rpti_pgsize(psize);
@@ -1120,8 +1217,8 @@ is_local:
if (atomic_read(&mm->context.copros) > 0)
tgt |= H_RPTI_TARGET_NMMU;
pseries_rpt_invalidate(pid, tgt, type, pg_sizes, start, end);
- } else if (full) {
- if (local) {
+ } else if (flush_pid) {
+ if (type == FLUSH_TYPE_LOCAL) {
_tlbiel_pid(pid, also_pwc ? RIC_FLUSH_ALL : RIC_FLUSH_TLB);
} else {
if (cputlb_use_tlbie()) {
@@ -1137,7 +1234,7 @@ is_local:
}
} else {
- if (local)
+ if (type == FLUSH_TYPE_LOCAL)
_tlbiel_va_range(start, end, pid, page_size, psize, also_pwc);
else if (cputlb_use_tlbie())
_tlbie_va_range(start, end, pid, page_size, psize, also_pwc);
@@ -1145,6 +1242,7 @@ is_local:
_tlbiel_va_range_multicast(mm,
start, end, pid, page_size, psize, also_pwc);
}
+out:
preempt_enable();
}
@@ -1164,6 +1262,7 @@ static void radix__flush_tlb_pwc_range_psize(struct mm_struct *mm, unsigned long
void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr)
{
unsigned long pid, end;
+ enum tlb_flush_type type;
pid = mm->context.id;
if (unlikely(pid == MMU_NO_CONTEXT))
@@ -1180,11 +1279,10 @@ void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr)
/* Otherwise first do the PWC, then iterate the pages. */
preempt_disable();
smp_mb(); /* see radix__flush_tlb_mm */
- if (!mm_is_thread_local(mm)) {
- if (unlikely(mm_is_singlethreaded(mm))) {
- exit_flush_lazy_tlbs(mm);
- goto local;
- }
+ type = flush_type_needed(mm, false);
+ if (type == FLUSH_TYPE_LOCAL) {
+ _tlbiel_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true);
+ } else if (type == FLUSH_TYPE_GLOBAL) {
if (!mmu_has_feature(MMU_FTR_GTSE)) {
unsigned long tgt, type, pg_sizes;
@@ -1202,9 +1300,6 @@ void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr)
else
_tlbiel_va_range_multicast(mm,
addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true);
- } else {
-local:
- _tlbiel_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true);
}
preempt_enable();
diff --git a/arch/powerpc/mm/book3s64/slb.c b/arch/powerpc/mm/book3s64/slb.c
index 584567970c11..c91bd85eb90e 100644
--- a/arch/powerpc/mm/book3s64/slb.c
+++ b/arch/powerpc/mm/book3s64/slb.c
@@ -10,6 +10,7 @@
*/
#include <asm/asm-prototypes.h>
+#include <asm/interrupt.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
#include <asm/paca.h>
@@ -813,8 +814,9 @@ static long slb_allocate_user(struct mm_struct *mm, unsigned long ea)
return slb_insert_entry(ea, context, flags, ssize, false);
}
-long do_slb_fault(struct pt_regs *regs, unsigned long ea)
+DEFINE_INTERRUPT_HANDLER_RAW(do_slb_fault)
{
+ unsigned long ea = regs->dar;
unsigned long id = get_region_id(ea);
/* IRQs are not reconciled here, so can't check irqs_disabled */
@@ -824,19 +826,21 @@ long do_slb_fault(struct pt_regs *regs, unsigned long ea)
return -EINVAL;
/*
- * SLB kernel faults must be very careful not to touch anything
- * that is not bolted. E.g., PACA and global variables are okay,
- * mm->context stuff is not.
- *
- * SLB user faults can access all of kernel memory, but must be
- * careful not to touch things like IRQ state because it is not
- * "reconciled" here. The difficulty is that we must use
- * fast_exception_return to return from kernel SLB faults without
- * looking at possible non-bolted memory. We could test user vs
- * kernel faults in the interrupt handler asm and do a full fault,
- * reconcile, ret_from_except for user faults which would make them
- * first class kernel code. But for performance it's probably nicer
- * if they go via fast_exception_return too.
+ * SLB kernel faults must be very careful not to touch anything that is
+ * not bolted. E.g., PACA and global variables are okay, mm->context
+ * stuff is not. SLB user faults may access all of memory (and induce
+ * one recursive SLB kernel fault), so the kernel fault must not
+ * trample on the user fault state at those points.
+ */
+
+ /*
+ * This is a raw interrupt handler, for performance, so that
+ * fast_interrupt_return can be used. The handler must not touch local
+ * irq state, or schedule. We could test for usermode and upgrade to a
+ * normal process context (synchronous) interrupt for those, which
+ * would make them first-class kernel code and able to be traced and
+ * instrumented, although performance would suffer a bit, it would
+ * probably be a good tradeoff.
*/
if (id >= LINEAR_MAP_REGION_ID) {
long err;
@@ -865,13 +869,15 @@ long do_slb_fault(struct pt_regs *regs, unsigned long ea)
}
}
-void do_bad_slb_fault(struct pt_regs *regs, unsigned long ea, long err)
+DEFINE_INTERRUPT_HANDLER(do_bad_slb_fault)
{
+ int err = regs->result;
+
if (err == -EFAULT) {
if (user_mode(regs))
- _exception(SIGSEGV, regs, SEGV_BNDERR, ea);
+ _exception(SIGSEGV, regs, SEGV_BNDERR, regs->dar);
else
- bad_page_fault(regs, ea, SIGSEGV);
+ bad_page_fault(regs, SIGSEGV);
} else if (err == -EINVAL) {
unrecoverable_exception(regs);
} else {
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 8961b44f350c..bb368257b55c 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -34,6 +34,7 @@
#include <linux/uaccess.h>
#include <asm/firmware.h>
+#include <asm/interrupt.h>
#include <asm/page.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
@@ -377,18 +378,16 @@ static void sanity_check_fault(bool is_write, bool is_user,
/*
* For 600- and 800-family processors, the error_code parameter is DSISR
- * for a data fault, SRR1 for an instruction fault. For 400-family processors
- * the error_code parameter is ESR for a data fault, 0 for an instruction
- * fault.
- * For 64-bit processors, the error_code parameter is
- * - DSISR for a non-SLB data access fault,
- * - SRR1 & 0x08000000 for a non-SLB instruction access fault
- * - 0 any SLB fault.
+ * for a data fault, SRR1 for an instruction fault.
+ * For 400-family processors the error_code parameter is ESR for a data fault,
+ * 0 for an instruction fault.
+ * For 64-bit processors, the error_code parameter is DSISR for a data access
+ * fault, SRR1 & 0x08000000 for an instruction access fault.
*
* The return value is 0 if the fault was handled, or the signal
* number if this is a kernel fault that can't be handled here.
*/
-static int __do_page_fault(struct pt_regs *regs, unsigned long address,
+static int ___do_page_fault(struct pt_regs *regs, unsigned long address,
unsigned long error_code)
{
struct vm_area_struct * vma;
@@ -435,9 +434,7 @@ static int __do_page_fault(struct pt_regs *regs, unsigned long address,
return bad_area_nosemaphore(regs, address);
}
- /* We restore the interrupt state now */
- if (!arch_irq_disabled_regs(regs))
- local_irq_enable();
+ interrupt_cond_local_irq_enable(regs);
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
@@ -540,34 +537,51 @@ retry:
return 0;
}
-NOKPROBE_SYMBOL(__do_page_fault);
+NOKPROBE_SYMBOL(___do_page_fault);
-int do_page_fault(struct pt_regs *regs, unsigned long address,
- unsigned long error_code)
+static long __do_page_fault(struct pt_regs *regs)
{
const struct exception_table_entry *entry;
- enum ctx_state prev_state = exception_enter();
- int rc = __do_page_fault(regs, address, error_code);
- exception_exit(prev_state);
- if (likely(!rc))
- return 0;
+ long err;
+
+ err = ___do_page_fault(regs, regs->dar, regs->dsisr);
+ if (likely(!err))
+ return err;
entry = search_exception_tables(regs->nip);
- if (unlikely(!entry))
- return rc;
+ if (likely(entry)) {
+ instruction_pointer_set(regs, extable_fixup(entry));
+ return 0;
+ } else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) {
+ __bad_page_fault(regs, err);
+ return 0;
+ } else {
+ /* 32 and 64e handle the bad page fault in asm */
+ return err;
+ }
+}
+NOKPROBE_SYMBOL(__do_page_fault);
- instruction_pointer_set(regs, extable_fixup(entry));
+DEFINE_INTERRUPT_HANDLER_RET(do_page_fault)
+{
+ return __do_page_fault(regs);
+}
- return 0;
+#ifdef CONFIG_PPC_BOOK3S_64
+/* Same as do_page_fault but interrupt entry has already run in do_hash_fault */
+long hash__do_page_fault(struct pt_regs *regs)
+{
+ return __do_page_fault(regs);
}
-NOKPROBE_SYMBOL(do_page_fault);
+NOKPROBE_SYMBOL(hash__do_page_fault);
+#endif
/*
* bad_page_fault is called when we have a bad access from the kernel.
* It is called from the DSI and ISI handlers in head.S and from some
* of the procedures in traps.c.
*/
-void __bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
+void __bad_page_fault(struct pt_regs *regs, int sig)
{
int is_write = page_fault_is_write(regs->dsisr);
@@ -605,7 +619,7 @@ void __bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
die("Kernel access of bad area", regs, sig);
}
-void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
+void bad_page_fault(struct pt_regs *regs, int sig)
{
const struct exception_table_entry *entry;
@@ -614,5 +628,12 @@ void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
if (entry)
instruction_pointer_set(regs, extable_fixup(entry));
else
- __bad_page_fault(regs, address, sig);
+ __bad_page_fault(regs, sig);
}
+
+#ifdef CONFIG_PPC_BOOK3S_64
+DEFINE_INTERRUPT_HANDLER(do_bad_page_fault_segv)
+{
+ bad_page_fault(regs, SIGSEGV);
+}
+#endif
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 8b3cc4d688e8..d142b76d507d 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -217,7 +217,7 @@ void __init pseries_add_gpage(u64 addr, u64 page_size, unsigned long number_of_p
}
}
-int __init pseries_alloc_bootmem_huge_page(struct hstate *hstate)
+static int __init pseries_alloc_bootmem_huge_page(struct hstate *hstate)
{
struct huge_bootmem_page *m;
if (nr_gpages == 0)
@@ -663,24 +663,6 @@ static int __init hugetlbpage_init(void)
arch_initcall(hugetlbpage_init);
-void flush_dcache_icache_hugepage(struct page *page)
-{
- int i;
- void *start;
-
- BUG_ON(!PageCompound(page));
-
- for (i = 0; i < compound_nr(page); i++) {
- if (!PageHighMem(page)) {
- __flush_dcache_icache(page_address(page+i));
- } else {
- start = kmap_atomic(page+i);
- __flush_dcache_icache(start);
- kunmap_atomic(start);
- }
- }
-}
-
void __init gigantic_hugetlb_cma_reserve(void)
{
unsigned long order = 0;
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index afab328d0887..4e8ce6d85232 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -91,27 +91,6 @@ int __weak remove_section_mapping(unsigned long start, unsigned long end)
return -ENODEV;
}
-#define FLUSH_CHUNK_SIZE SZ_1G
-/**
- * flush_dcache_range_chunked(): Write any modified data cache blocks out to
- * memory and invalidate them, in chunks of up to FLUSH_CHUNK_SIZE
- * Does not invalidate the corresponding instruction cache blocks.
- *
- * @start: the start address
- * @stop: the stop address (exclusive)
- * @chunk: the max size of the chunks
- */
-static void flush_dcache_range_chunked(unsigned long start, unsigned long stop,
- unsigned long chunk)
-{
- unsigned long i;
-
- for (i = start; i < stop; i += chunk) {
- flush_dcache_range(i, min(stop, i + chunk));
- cond_resched();
- }
-}
-
int __ref arch_create_linear_mapping(int nid, u64 start, u64 size,
struct mhp_params *params)
{
@@ -136,7 +115,6 @@ void __ref arch_remove_linear_mapping(u64 start, u64 size)
/* Remove htab bolted mappings for this section of memory */
start = (unsigned long)__va(start);
- flush_dcache_range_chunked(start, start + size, FLUSH_CHUNK_SIZE);
mutex_lock(&linear_mapping_mutex);
ret = remove_section_mapping(start, start + size);
@@ -489,19 +467,35 @@ void flush_dcache_page(struct page *page)
if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
return;
/* avoid an atomic op if possible */
- if (test_bit(PG_arch_1, &page->flags))
- clear_bit(PG_arch_1, &page->flags);
+ if (test_bit(PG_dcache_clean, &page->flags))
+ clear_bit(PG_dcache_clean, &page->flags);
}
EXPORT_SYMBOL(flush_dcache_page);
-void flush_dcache_icache_page(struct page *page)
+static void flush_dcache_icache_hugepage(struct page *page)
{
-#ifdef CONFIG_HUGETLB_PAGE
- if (PageCompound(page)) {
- flush_dcache_icache_hugepage(page);
- return;
+ int i;
+ void *start;
+
+ BUG_ON(!PageCompound(page));
+
+ for (i = 0; i < compound_nr(page); i++) {
+ if (!PageHighMem(page)) {
+ __flush_dcache_icache(page_address(page+i));
+ } else {
+ start = kmap_atomic(page+i);
+ __flush_dcache_icache(start);
+ kunmap_atomic(start);
+ }
}
-#endif
+}
+
+void flush_dcache_icache_page(struct page *page)
+{
+
+ if (PageCompound(page))
+ return flush_dcache_icache_hugepage(page);
+
#if defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC64)
/* On 8xx there is no need to kmap since highmem is not supported */
__flush_dcache_icache(page_address(page));
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index 15555c95cebc..354611940118 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -26,6 +26,7 @@
#include <asm/tlbflush.h>
#include <asm/tlb.h>
#include <asm/hugetlb.h>
+#include <asm/pte-walk.h>
static inline int is_exec_fault(void)
{
@@ -81,9 +82,9 @@ static pte_t set_pte_filter_hash(pte_t pte)
struct page *pg = maybe_pte_to_page(pte);
if (!pg)
return pte;
- if (!test_bit(PG_arch_1, &pg->flags)) {
+ if (!test_bit(PG_dcache_clean, &pg->flags)) {
flush_dcache_icache_page(pg);
- set_bit(PG_arch_1, &pg->flags);
+ set_bit(PG_dcache_clean, &pg->flags);
}
}
return pte;
@@ -116,13 +117,13 @@ static inline pte_t set_pte_filter(pte_t pte)
return pte;
/* If the page clean, we move on */
- if (test_bit(PG_arch_1, &pg->flags))
+ if (test_bit(PG_dcache_clean, &pg->flags))
return pte;
/* If it's an exec fault, we flush the cache and make it clean */
if (is_exec_fault()) {
flush_dcache_icache_page(pg);
- set_bit(PG_arch_1, &pg->flags);
+ set_bit(PG_dcache_clean, &pg->flags);
return pte;
}
@@ -161,12 +162,12 @@ static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma,
goto bail;
/* If the page is already clean, we move on */
- if (test_bit(PG_arch_1, &pg->flags))
+ if (test_bit(PG_dcache_clean, &pg->flags))
goto bail;
- /* Clean the page and set PG_arch_1 */
+ /* Clean the page and set PG_dcache_clean */
flush_dcache_icache_page(pg);
- set_bit(PG_arch_1, &pg->flags);
+ set_bit(PG_dcache_clean, &pg->flags);
bail:
return pte_mkexec(pte);
diff --git a/arch/powerpc/mm/ptdump/segment_regs.c b/arch/powerpc/mm/ptdump/segment_regs.c
index dde2fe8de4b2..565048a0c9be 100644
--- a/arch/powerpc/mm/ptdump/segment_regs.c
+++ b/arch/powerpc/mm/ptdump/segment_regs.c
@@ -10,7 +10,7 @@
static void seg_show(struct seq_file *m, int i)
{
- u32 val = mfsrin(i << 28);
+ u32 val = mfsr(i << 28);
seq_printf(m, "0x%01x0000000-0x%01xfffffff ", i, i);
seq_printf(m, "Kern key %d ", (val >> 30) & 1);
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index 022103c6a201..aaf1a887f653 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -683,10 +683,18 @@ emit_clear:
break;
/*
- * BPF_STX XADD (atomic_add)
+ * BPF_STX ATOMIC (atomic ops)
*/
- /* *(u32 *)(dst + off) += src */
- case BPF_STX | BPF_XADD | BPF_W:
+ case BPF_STX | BPF_ATOMIC | BPF_W:
+ if (insn->imm != BPF_ADD) {
+ pr_err_ratelimited(
+ "eBPF filter atomic op code %02x (@%d) unsupported\n",
+ code, i);
+ return -ENOTSUPP;
+ }
+
+ /* *(u32 *)(dst + off) += src */
+
/* Get EA into TMP_REG_1 */
EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], dst_reg, off));
tmp_idx = ctx->idx * 4;
@@ -699,8 +707,15 @@ emit_clear:
/* we're done if this succeeded */
PPC_BCC_SHORT(COND_NE, tmp_idx);
break;
- /* *(u64 *)(dst + off) += src */
- case BPF_STX | BPF_XADD | BPF_DW:
+ case BPF_STX | BPF_ATOMIC | BPF_DW:
+ if (insn->imm != BPF_ADD) {
+ pr_err_ratelimited(
+ "eBPF filter atomic op code %02x (@%d) unsupported\n",
+ code, i);
+ return -ENOTSUPP;
+ }
+ /* *(u64 *)(dst + off) += src */
+
EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], dst_reg, off));
tmp_idx = ctx->idx * 4;
EMIT(PPC_RAW_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0));
diff --git a/arch/powerpc/oprofile/Makefile b/arch/powerpc/oprofile/Makefile
deleted file mode 100644
index bb2d94c8cbe6..000000000000
--- a/arch/powerpc/oprofile/Makefile
+++ /dev/null
@@ -1,19 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-
-ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
-
-obj-$(CONFIG_OPROFILE) += oprofile.o
-
-DRIVER_OBJS := $(addprefix ../../../drivers/oprofile/, \
- oprof.o cpu_buffer.o buffer_sync.o \
- event_buffer.o oprofile_files.o \
- oprofilefs.o oprofile_stats.o \
- timer_int.o )
-
-oprofile-y := $(DRIVER_OBJS) common.o backtrace.o
-oprofile-$(CONFIG_OPROFILE_CELL) += op_model_cell.o \
- cell/spu_profiler.o cell/vma_map.o \
- cell/spu_task_sync.o
-oprofile-$(CONFIG_PPC_BOOK3S_64) += op_model_power4.o op_model_pa6t.o
-oprofile-$(CONFIG_FSL_EMB_PERFMON) += op_model_fsl_emb.o
-oprofile-$(CONFIG_PPC_BOOK3S_32) += op_model_7450.o
diff --git a/arch/powerpc/oprofile/backtrace.c b/arch/powerpc/oprofile/backtrace.c
deleted file mode 100644
index 9db7ada79d10..000000000000
--- a/arch/powerpc/oprofile/backtrace.c
+++ /dev/null
@@ -1,120 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/**
- * Copyright (C) 2005 Brian Rogan <bcr6@cornell.edu>, IBM
- *
-**/
-
-#include <linux/time.h>
-#include <linux/oprofile.h>
-#include <linux/sched.h>
-#include <asm/processor.h>
-#include <linux/uaccess.h>
-#include <linux/compat.h>
-#include <asm/oprofile_impl.h>
-
-#define STACK_SP(STACK) *(STACK)
-
-#define STACK_LR64(STACK) *((unsigned long *)(STACK) + 2)
-#define STACK_LR32(STACK) *((unsigned int *)(STACK) + 1)
-
-#ifdef CONFIG_PPC64
-#define STACK_LR(STACK) STACK_LR64(STACK)
-#else
-#define STACK_LR(STACK) STACK_LR32(STACK)
-#endif
-
-static unsigned int user_getsp32(unsigned int sp, int is_first)
-{
- unsigned int stack_frame[2];
- void __user *p = compat_ptr(sp);
-
- /*
- * The most likely reason for this is that we returned -EFAULT,
- * which means that we've done all that we can do from
- * interrupt context.
- */
- if (copy_from_user_nofault(stack_frame, (void __user *)p,
- sizeof(stack_frame)))
- return 0;
-
- if (!is_first)
- oprofile_add_trace(STACK_LR32(stack_frame));
-
- /*
- * We do not enforce increasing stack addresses here because
- * we may transition to a different stack, eg a signal handler.
- */
- return STACK_SP(stack_frame);
-}
-
-#ifdef CONFIG_PPC64
-static unsigned long user_getsp64(unsigned long sp, int is_first)
-{
- unsigned long stack_frame[3];
-
- if (copy_from_user_nofault(stack_frame, (void __user *)sp,
- sizeof(stack_frame)))
- return 0;
-
- if (!is_first)
- oprofile_add_trace(STACK_LR64(stack_frame));
-
- return STACK_SP(stack_frame);
-}
-#endif
-
-static unsigned long kernel_getsp(unsigned long sp, int is_first)
-{
- unsigned long *stack_frame = (unsigned long *)sp;
-
- if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD))
- return 0;
-
- if (!is_first)
- oprofile_add_trace(STACK_LR(stack_frame));
-
- /*
- * We do not enforce increasing stack addresses here because
- * we might be transitioning from an interrupt stack to a kernel
- * stack. validate_sp() is designed to understand this, so just
- * use it.
- */
- return STACK_SP(stack_frame);
-}
-
-void op_powerpc_backtrace(struct pt_regs * const regs, unsigned int depth)
-{
- unsigned long sp = regs->gpr[1];
- int first_frame = 1;
-
- /* We ditch the top stackframe so need to loop through an extra time */
- depth += 1;
-
- if (!user_mode(regs)) {
- while (depth--) {
- sp = kernel_getsp(sp, first_frame);
- if (!sp)
- break;
- first_frame = 0;
- }
- } else {
-#ifdef CONFIG_PPC64
- if (!is_32bit_task()) {
- while (depth--) {
- sp = user_getsp64(sp, first_frame);
- if (!sp)
- break;
- first_frame = 0;
- }
- return;
- }
-#endif
-
- while (depth--) {
- sp = user_getsp32(sp, first_frame);
- if (!sp)
- break;
- first_frame = 0;
- }
- }
-}
diff --git a/arch/powerpc/oprofile/cell/pr_util.h b/arch/powerpc/oprofile/cell/pr_util.h
deleted file mode 100644
index e198efa9113a..000000000000
--- a/arch/powerpc/oprofile/cell/pr_util.h
+++ /dev/null
@@ -1,110 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
- /*
- * Cell Broadband Engine OProfile Support
- *
- * (C) Copyright IBM Corporation 2006
- *
- * Author: Maynard Johnson <maynardj@us.ibm.com>
- */
-
-#ifndef PR_UTIL_H
-#define PR_UTIL_H
-
-#include <linux/cpumask.h>
-#include <linux/oprofile.h>
-#include <asm/cell-pmu.h>
-#include <asm/cell-regs.h>
-#include <asm/spu.h>
-
-/* Defines used for sync_start */
-#define SKIP_GENERIC_SYNC 0
-#define SYNC_START_ERROR -1
-#define DO_GENERIC_SYNC 1
-#define SPUS_PER_NODE 8
-#define DEFAULT_TIMER_EXPIRE (HZ / 10)
-
-extern struct delayed_work spu_work;
-extern int spu_prof_running;
-
-#define TRACE_ARRAY_SIZE 1024
-
-extern spinlock_t oprof_spu_smpl_arry_lck;
-
-struct spu_overlay_info { /* map of sections within an SPU overlay */
- unsigned int vma; /* SPU virtual memory address from elf */
- unsigned int size; /* size of section from elf */
- unsigned int offset; /* offset of section into elf file */
- unsigned int buf;
-};
-
-struct vma_to_fileoffset_map { /* map of sections within an SPU program */
- struct vma_to_fileoffset_map *next; /* list pointer */
- unsigned int vma; /* SPU virtual memory address from elf */
- unsigned int size; /* size of section from elf */
- unsigned int offset; /* offset of section into elf file */
- unsigned int guard_ptr;
- unsigned int guard_val;
- /*
- * The guard pointer is an entry in the _ovly_buf_table,
- * computed using ovly.buf as the index into the table. Since
- * ovly.buf values begin at '1' to reference the first (or 0th)
- * entry in the _ovly_buf_table, the computation subtracts 1
- * from ovly.buf.
- * The guard value is stored in the _ovly_buf_table entry and
- * is an index (starting at 1) back to the _ovly_table entry
- * that is pointing at this _ovly_buf_table entry. So, for
- * example, for an overlay scenario with one overlay segment
- * and two overlay sections:
- * - Section 1 points to the first entry of the
- * _ovly_buf_table, which contains a guard value
- * of '1', referencing the first (index=0) entry of
- * _ovly_table.
- * - Section 2 points to the second entry of the
- * _ovly_buf_table, which contains a guard value
- * of '2', referencing the second (index=1) entry of
- * _ovly_table.
- */
-
-};
-
-struct spu_buffer {
- int last_guard_val;
- int ctx_sw_seen;
- unsigned long *buff;
- unsigned int head, tail;
-};
-
-
-/* The three functions below are for maintaining and accessing
- * the vma-to-fileoffset map.
- */
-struct vma_to_fileoffset_map *create_vma_map(const struct spu *spu,
- unsigned long objectid);
-unsigned int vma_map_lookup(struct vma_to_fileoffset_map *map,
- unsigned int vma, const struct spu *aSpu,
- int *grd_val);
-void vma_map_free(struct vma_to_fileoffset_map *map);
-
-/*
- * Entry point for SPU profiling.
- * cycles_reset is the SPU_CYCLES count value specified by the user.
- */
-int start_spu_profiling_cycles(unsigned int cycles_reset);
-void start_spu_profiling_events(void);
-
-void stop_spu_profiling_cycles(void);
-void stop_spu_profiling_events(void);
-
-/* add the necessary profiling hooks */
-int spu_sync_start(void);
-
-/* remove the hooks */
-int spu_sync_stop(void);
-
-/* Record SPU program counter samples to the oprofile event buffer. */
-void spu_sync_buffer(int spu_num, unsigned int *samples,
- int num_samples);
-
-void set_spu_profiling_frequency(unsigned int freq_khz, unsigned int cycles_reset);
-
-#endif /* PR_UTIL_H */
diff --git a/arch/powerpc/oprofile/cell/spu_profiler.c b/arch/powerpc/oprofile/cell/spu_profiler.c
deleted file mode 100644
index cdf883445a9f..000000000000
--- a/arch/powerpc/oprofile/cell/spu_profiler.c
+++ /dev/null
@@ -1,248 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Cell Broadband Engine OProfile Support
- *
- * (C) Copyright IBM Corporation 2006
- *
- * Authors: Maynard Johnson <maynardj@us.ibm.com>
- * Carl Love <carll@us.ibm.com>
- */
-
-#include <linux/hrtimer.h>
-#include <linux/smp.h>
-#include <linux/slab.h>
-#include <asm/cell-pmu.h>
-#include <asm/time.h>
-#include "pr_util.h"
-
-#define SCALE_SHIFT 14
-
-static u32 *samples;
-
-/* spu_prof_running is a flag used to indicate if spu profiling is enabled
- * or not. It is set by the routines start_spu_profiling_cycles() and
- * start_spu_profiling_events(). The flag is cleared by the routines
- * stop_spu_profiling_cycles() and stop_spu_profiling_events(). These
- * routines are called via global_start() and global_stop() which are called in
- * op_powerpc_start() and op_powerpc_stop(). These routines are called once
- * per system as a result of the user starting/stopping oprofile. Hence, only
- * one CPU per user at a time will be changing the value of spu_prof_running.
- * In general, OProfile does not protect against multiple users trying to run
- * OProfile at a time.
- */
-int spu_prof_running;
-static unsigned int profiling_interval;
-
-#define NUM_SPU_BITS_TRBUF 16
-#define SPUS_PER_TB_ENTRY 4
-
-#define SPU_PC_MASK 0xFFFF
-
-DEFINE_SPINLOCK(oprof_spu_smpl_arry_lck);
-static unsigned long oprof_spu_smpl_arry_lck_flags;
-
-void set_spu_profiling_frequency(unsigned int freq_khz, unsigned int cycles_reset)
-{
- unsigned long ns_per_cyc;
-
- if (!freq_khz)
- freq_khz = ppc_proc_freq/1000;
-
- /* To calculate a timeout in nanoseconds, the basic
- * formula is ns = cycles_reset * (NSEC_PER_SEC / cpu frequency).
- * To avoid floating point math, we use the scale math
- * technique as described in linux/jiffies.h. We use
- * a scale factor of SCALE_SHIFT, which provides 4 decimal places
- * of precision. This is close enough for the purpose at hand.
- *
- * The value of the timeout should be small enough that the hw
- * trace buffer will not get more than about 1/3 full for the
- * maximum user specified (the LFSR value) hw sampling frequency.
- * This is to ensure the trace buffer will never fill even if the
- * kernel thread scheduling varies under a heavy system load.
- */
-
- ns_per_cyc = (USEC_PER_SEC << SCALE_SHIFT)/freq_khz;
- profiling_interval = (ns_per_cyc * cycles_reset) >> SCALE_SHIFT;
-
-}
-
-/*
- * Extract SPU PC from trace buffer entry
- */
-static void spu_pc_extract(int cpu, int entry)
-{
- /* the trace buffer is 128 bits */
- u64 trace_buffer[2];
- u64 spu_mask;
- int spu;
-
- spu_mask = SPU_PC_MASK;
-
- /* Each SPU PC is 16 bits; hence, four spus in each of
- * the two 64-bit buffer entries that make up the
- * 128-bit trace_buffer entry. Process two 64-bit values
- * simultaneously.
- * trace[0] SPU PC contents are: 0 1 2 3
- * trace[1] SPU PC contents are: 4 5 6 7
- */
-
- cbe_read_trace_buffer(cpu, trace_buffer);
-
- for (spu = SPUS_PER_TB_ENTRY-1; spu >= 0; spu--) {
- /* spu PC trace entry is upper 16 bits of the
- * 18 bit SPU program counter
- */
- samples[spu * TRACE_ARRAY_SIZE + entry]
- = (spu_mask & trace_buffer[0]) << 2;
- samples[(spu + SPUS_PER_TB_ENTRY) * TRACE_ARRAY_SIZE + entry]
- = (spu_mask & trace_buffer[1]) << 2;
-
- trace_buffer[0] = trace_buffer[0] >> NUM_SPU_BITS_TRBUF;
- trace_buffer[1] = trace_buffer[1] >> NUM_SPU_BITS_TRBUF;
- }
-}
-
-static int cell_spu_pc_collection(int cpu)
-{
- u32 trace_addr;
- int entry;
-
- /* process the collected SPU PC for the node */
-
- entry = 0;
-
- trace_addr = cbe_read_pm(cpu, trace_address);
- while (!(trace_addr & CBE_PM_TRACE_BUF_EMPTY)) {
- /* there is data in the trace buffer to process */
- spu_pc_extract(cpu, entry);
-
- entry++;
-
- if (entry >= TRACE_ARRAY_SIZE)
- /* spu_samples is full */
- break;
-
- trace_addr = cbe_read_pm(cpu, trace_address);
- }
-
- return entry;
-}
-
-
-static enum hrtimer_restart profile_spus(struct hrtimer *timer)
-{
- ktime_t kt;
- int cpu, node, k, num_samples, spu_num;
-
- if (!spu_prof_running)
- goto stop;
-
- for_each_online_cpu(cpu) {
- if (cbe_get_hw_thread_id(cpu))
- continue;
-
- node = cbe_cpu_to_node(cpu);
-
- /* There should only be one kernel thread at a time processing
- * the samples. In the very unlikely case that the processing
- * is taking a very long time and multiple kernel threads are
- * started to process the samples. Make sure only one kernel
- * thread is working on the samples array at a time. The
- * sample array must be loaded and then processed for a given
- * cpu. The sample array is not per cpu.
- */
- spin_lock_irqsave(&oprof_spu_smpl_arry_lck,
- oprof_spu_smpl_arry_lck_flags);
- num_samples = cell_spu_pc_collection(cpu);
-
- if (num_samples == 0) {
- spin_unlock_irqrestore(&oprof_spu_smpl_arry_lck,
- oprof_spu_smpl_arry_lck_flags);
- continue;
- }
-
- for (k = 0; k < SPUS_PER_NODE; k++) {
- spu_num = k + (node * SPUS_PER_NODE);
- spu_sync_buffer(spu_num,
- samples + (k * TRACE_ARRAY_SIZE),
- num_samples);
- }
-
- spin_unlock_irqrestore(&oprof_spu_smpl_arry_lck,
- oprof_spu_smpl_arry_lck_flags);
-
- }
- smp_wmb(); /* insure spu event buffer updates are written */
- /* don't want events intermingled... */
-
- kt = profiling_interval;
- if (!spu_prof_running)
- goto stop;
- hrtimer_forward(timer, timer->base->get_time(), kt);
- return HRTIMER_RESTART;
-
- stop:
- printk(KERN_INFO "SPU_PROF: spu-prof timer ending\n");
- return HRTIMER_NORESTART;
-}
-
-static struct hrtimer timer;
-/*
- * Entry point for SPU cycle profiling.
- * NOTE: SPU profiling is done system-wide, not per-CPU.
- *
- * cycles_reset is the count value specified by the user when
- * setting up OProfile to count SPU_CYCLES.
- */
-int start_spu_profiling_cycles(unsigned int cycles_reset)
-{
- ktime_t kt;
-
- pr_debug("timer resolution: %lu\n", TICK_NSEC);
- kt = profiling_interval;
- hrtimer_init(&timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- hrtimer_set_expires(&timer, kt);
- timer.function = profile_spus;
-
- /* Allocate arrays for collecting SPU PC samples */
- samples = kcalloc(SPUS_PER_NODE * TRACE_ARRAY_SIZE, sizeof(u32),
- GFP_KERNEL);
-
- if (!samples)
- return -ENOMEM;
-
- spu_prof_running = 1;
- hrtimer_start(&timer, kt, HRTIMER_MODE_REL);
- schedule_delayed_work(&spu_work, DEFAULT_TIMER_EXPIRE);
-
- return 0;
-}
-
-/*
- * Entry point for SPU event profiling.
- * NOTE: SPU profiling is done system-wide, not per-CPU.
- *
- * cycles_reset is the count value specified by the user when
- * setting up OProfile to count SPU_CYCLES.
- */
-void start_spu_profiling_events(void)
-{
- spu_prof_running = 1;
- schedule_delayed_work(&spu_work, DEFAULT_TIMER_EXPIRE);
-
- return;
-}
-
-void stop_spu_profiling_cycles(void)
-{
- spu_prof_running = 0;
- hrtimer_cancel(&timer);
- kfree(samples);
- pr_debug("SPU_PROF: stop_spu_profiling_cycles issued\n");
-}
-
-void stop_spu_profiling_events(void)
-{
- spu_prof_running = 0;
-}
diff --git a/arch/powerpc/oprofile/cell/spu_task_sync.c b/arch/powerpc/oprofile/cell/spu_task_sync.c
deleted file mode 100644
index 489f993100d5..000000000000
--- a/arch/powerpc/oprofile/cell/spu_task_sync.c
+++ /dev/null
@@ -1,657 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Cell Broadband Engine OProfile Support
- *
- * (C) Copyright IBM Corporation 2006
- *
- * Author: Maynard Johnson <maynardj@us.ibm.com>
- */
-
-/* The purpose of this file is to handle SPU event task switching
- * and to record SPU context information into the OProfile
- * event buffer.
- *
- * Additionally, the spu_sync_buffer function is provided as a helper
- * for recoding actual SPU program counter samples to the event buffer.
- */
-#include <linux/dcookies.h>
-#include <linux/kref.h>
-#include <linux/mm.h>
-#include <linux/fs.h>
-#include <linux/file.h>
-#include <linux/module.h>
-#include <linux/notifier.h>
-#include <linux/numa.h>
-#include <linux/oprofile.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include "pr_util.h"
-
-#define RELEASE_ALL 9999
-
-static DEFINE_SPINLOCK(buffer_lock);
-static DEFINE_SPINLOCK(cache_lock);
-static int num_spu_nodes;
-static int spu_prof_num_nodes;
-
-struct spu_buffer spu_buff[MAX_NUMNODES * SPUS_PER_NODE];
-struct delayed_work spu_work;
-static unsigned max_spu_buff;
-
-static void spu_buff_add(unsigned long int value, int spu)
-{
- /* spu buff is a circular buffer. Add entries to the
- * head. Head is the index to store the next value.
- * The buffer is full when there is one available entry
- * in the queue, i.e. head and tail can't be equal.
- * That way we can tell the difference between the
- * buffer being full versus empty.
- *
- * ASSUMPTION: the buffer_lock is held when this function
- * is called to lock the buffer, head and tail.
- */
- int full = 1;
-
- if (spu_buff[spu].head >= spu_buff[spu].tail) {
- if ((spu_buff[spu].head - spu_buff[spu].tail)
- < (max_spu_buff - 1))
- full = 0;
-
- } else if (spu_buff[spu].tail > spu_buff[spu].head) {
- if ((spu_buff[spu].tail - spu_buff[spu].head)
- > 1)
- full = 0;
- }
-
- if (!full) {
- spu_buff[spu].buff[spu_buff[spu].head] = value;
- spu_buff[spu].head++;
-
- if (spu_buff[spu].head >= max_spu_buff)
- spu_buff[spu].head = 0;
- } else {
- /* From the user's perspective make the SPU buffer
- * size management/overflow look like we are using
- * per cpu buffers. The user uses the same
- * per cpu parameter to adjust the SPU buffer size.
- * Increment the sample_lost_overflow to inform
- * the user the buffer size needs to be increased.
- */
- oprofile_cpu_buffer_inc_smpl_lost();
- }
-}
-
-/* This function copies the per SPU buffers to the
- * OProfile kernel buffer.
- */
-static void sync_spu_buff(void)
-{
- int spu;
- unsigned long flags;
- int curr_head;
-
- for (spu = 0; spu < num_spu_nodes; spu++) {
- /* In case there was an issue and the buffer didn't
- * get created skip it.
- */
- if (spu_buff[spu].buff == NULL)
- continue;
-
- /* Hold the lock to make sure the head/tail
- * doesn't change while spu_buff_add() is
- * deciding if the buffer is full or not.
- * Being a little paranoid.
- */
- spin_lock_irqsave(&buffer_lock, flags);
- curr_head = spu_buff[spu].head;
- spin_unlock_irqrestore(&buffer_lock, flags);
-
- /* Transfer the current contents to the kernel buffer.
- * data can still be added to the head of the buffer.
- */
- oprofile_put_buff(spu_buff[spu].buff,
- spu_buff[spu].tail,
- curr_head, max_spu_buff);
-
- spin_lock_irqsave(&buffer_lock, flags);
- spu_buff[spu].tail = curr_head;
- spin_unlock_irqrestore(&buffer_lock, flags);
- }
-
-}
-
-static void wq_sync_spu_buff(struct work_struct *work)
-{
- /* move data from spu buffers to kernel buffer */
- sync_spu_buff();
-
- /* only reschedule if profiling is not done */
- if (spu_prof_running)
- schedule_delayed_work(&spu_work, DEFAULT_TIMER_EXPIRE);
-}
-
-/* Container for caching information about an active SPU task. */
-struct cached_info {
- struct vma_to_fileoffset_map *map;
- struct spu *the_spu; /* needed to access pointer to local_store */
- struct kref cache_ref;
-};
-
-static struct cached_info *spu_info[MAX_NUMNODES * 8];
-
-static void destroy_cached_info(struct kref *kref)
-{
- struct cached_info *info;
-
- info = container_of(kref, struct cached_info, cache_ref);
- vma_map_free(info->map);
- kfree(info);
- module_put(THIS_MODULE);
-}
-
-/* Return the cached_info for the passed SPU number.
- * ATTENTION: Callers are responsible for obtaining the
- * cache_lock if needed prior to invoking this function.
- */
-static struct cached_info *get_cached_info(struct spu *the_spu, int spu_num)
-{
- struct kref *ref;
- struct cached_info *ret_info;
-
- if (spu_num >= num_spu_nodes) {
- printk(KERN_ERR "SPU_PROF: "
- "%s, line %d: Invalid index %d into spu info cache\n",
- __func__, __LINE__, spu_num);
- ret_info = NULL;
- goto out;
- }
- if (!spu_info[spu_num] && the_spu) {
- ref = spu_get_profile_private_kref(the_spu->ctx);
- if (ref) {
- spu_info[spu_num] = container_of(ref, struct cached_info, cache_ref);
- kref_get(&spu_info[spu_num]->cache_ref);
- }
- }
-
- ret_info = spu_info[spu_num];
- out:
- return ret_info;
-}
-
-
-/* Looks for cached info for the passed spu. If not found, the
- * cached info is created for the passed spu.
- * Returns 0 for success; otherwise, -1 for error.
- */
-static int
-prepare_cached_spu_info(struct spu *spu, unsigned long objectId)
-{
- unsigned long flags;
- struct vma_to_fileoffset_map *new_map;
- int retval = 0;
- struct cached_info *info;
-
- /* We won't bother getting cache_lock here since
- * don't do anything with the cached_info that's returned.
- */
- info = get_cached_info(spu, spu->number);
-
- if (info) {
- pr_debug("Found cached SPU info.\n");
- goto out;
- }
-
- /* Create cached_info and set spu_info[spu->number] to point to it.
- * spu->number is a system-wide value, not a per-node value.
- */
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info) {
- printk(KERN_ERR "SPU_PROF: "
- "%s, line %d: create vma_map failed\n",
- __func__, __LINE__);
- retval = -ENOMEM;
- goto err_alloc;
- }
- new_map = create_vma_map(spu, objectId);
- if (!new_map) {
- printk(KERN_ERR "SPU_PROF: "
- "%s, line %d: create vma_map failed\n",
- __func__, __LINE__);
- retval = -ENOMEM;
- goto err_alloc;
- }
-
- pr_debug("Created vma_map\n");
- info->map = new_map;
- info->the_spu = spu;
- kref_init(&info->cache_ref);
- spin_lock_irqsave(&cache_lock, flags);
- spu_info[spu->number] = info;
- /* Increment count before passing off ref to SPUFS. */
- kref_get(&info->cache_ref);
-
- /* We increment the module refcount here since SPUFS is
- * responsible for the final destruction of the cached_info,
- * and it must be able to access the destroy_cached_info()
- * function defined in the OProfile module. We decrement
- * the module refcount in destroy_cached_info.
- */
- try_module_get(THIS_MODULE);
- spu_set_profile_private_kref(spu->ctx, &info->cache_ref,
- destroy_cached_info);
- spin_unlock_irqrestore(&cache_lock, flags);
- goto out;
-
-err_alloc:
- kfree(info);
-out:
- return retval;
-}
-
-/*
- * NOTE: The caller is responsible for locking the
- * cache_lock prior to calling this function.
- */
-static int release_cached_info(int spu_index)
-{
- int index, end;
-
- if (spu_index == RELEASE_ALL) {
- end = num_spu_nodes;
- index = 0;
- } else {
- if (spu_index >= num_spu_nodes) {
- printk(KERN_ERR "SPU_PROF: "
- "%s, line %d: "
- "Invalid index %d into spu info cache\n",
- __func__, __LINE__, spu_index);
- goto out;
- }
- end = spu_index + 1;
- index = spu_index;
- }
- for (; index < end; index++) {
- if (spu_info[index]) {
- kref_put(&spu_info[index]->cache_ref,
- destroy_cached_info);
- spu_info[index] = NULL;
- }
- }
-
-out:
- return 0;
-}
-
-/* The source code for fast_get_dcookie was "borrowed"
- * from drivers/oprofile/buffer_sync.c.
- */
-
-/* Optimisation. We can manage without taking the dcookie sem
- * because we cannot reach this code without at least one
- * dcookie user still being registered (namely, the reader
- * of the event buffer).
- */
-static inline unsigned long fast_get_dcookie(const struct path *path)
-{
- unsigned long cookie;
-
- if (path->dentry->d_flags & DCACHE_COOKIE)
- return (unsigned long)path->dentry;
- get_dcookie(path, &cookie);
- return cookie;
-}
-
-/* Look up the dcookie for the task's mm->exe_file,
- * which corresponds loosely to "application name". Also, determine
- * the offset for the SPU ELF object. If computed offset is
- * non-zero, it implies an embedded SPU object; otherwise, it's a
- * separate SPU binary, in which case we retrieve it's dcookie.
- * For the embedded case, we must determine if SPU ELF is embedded
- * in the executable application or another file (i.e., shared lib).
- * If embedded in a shared lib, we must get the dcookie and return
- * that to the caller.
- */
-static unsigned long
-get_exec_dcookie_and_offset(struct spu *spu, unsigned int *offsetp,
- unsigned long *spu_bin_dcookie,
- unsigned long spu_ref)
-{
- unsigned long app_cookie = 0;
- unsigned int my_offset = 0;
- struct vm_area_struct *vma;
- struct file *exe_file;
- struct mm_struct *mm = spu->mm;
-
- if (!mm)
- goto out;
-
- exe_file = get_mm_exe_file(mm);
- if (exe_file) {
- app_cookie = fast_get_dcookie(&exe_file->f_path);
- pr_debug("got dcookie for %pD\n", exe_file);
- fput(exe_file);
- }
-
- mmap_read_lock(mm);
- for (vma = mm->mmap; vma; vma = vma->vm_next) {
- if (vma->vm_start > spu_ref || vma->vm_end <= spu_ref)
- continue;
- my_offset = spu_ref - vma->vm_start;
- if (!vma->vm_file)
- goto fail_no_image_cookie;
-
- pr_debug("Found spu ELF at %X(object-id:%lx) for file %pD\n",
- my_offset, spu_ref, vma->vm_file);
- *offsetp = my_offset;
- break;
- }
-
- *spu_bin_dcookie = fast_get_dcookie(&vma->vm_file->f_path);
- pr_debug("got dcookie for %pD\n", vma->vm_file);
-
- mmap_read_unlock(mm);
-
-out:
- return app_cookie;
-
-fail_no_image_cookie:
- mmap_read_unlock(mm);
-
- printk(KERN_ERR "SPU_PROF: "
- "%s, line %d: Cannot find dcookie for SPU binary\n",
- __func__, __LINE__);
- goto out;
-}
-
-
-
-/* This function finds or creates cached context information for the
- * passed SPU and records SPU context information into the OProfile
- * event buffer.
- */
-static int process_context_switch(struct spu *spu, unsigned long objectId)
-{
- unsigned long flags;
- int retval;
- unsigned int offset = 0;
- unsigned long spu_cookie = 0, app_dcookie;
-
- retval = prepare_cached_spu_info(spu, objectId);
- if (retval)
- goto out;
-
- /* Get dcookie first because a mutex_lock is taken in that
- * code path, so interrupts must not be disabled.
- */
- app_dcookie = get_exec_dcookie_and_offset(spu, &offset, &spu_cookie, objectId);
- if (!app_dcookie || !spu_cookie) {
- retval = -ENOENT;
- goto out;
- }
-
- /* Record context info in event buffer */
- spin_lock_irqsave(&buffer_lock, flags);
- spu_buff_add(ESCAPE_CODE, spu->number);
- spu_buff_add(SPU_CTX_SWITCH_CODE, spu->number);
- spu_buff_add(spu->number, spu->number);
- spu_buff_add(spu->pid, spu->number);
- spu_buff_add(spu->tgid, spu->number);
- spu_buff_add(app_dcookie, spu->number);
- spu_buff_add(spu_cookie, spu->number);
- spu_buff_add(offset, spu->number);
-
- /* Set flag to indicate SPU PC data can now be written out. If
- * the SPU program counter data is seen before an SPU context
- * record is seen, the postprocessing will fail.
- */
- spu_buff[spu->number].ctx_sw_seen = 1;
-
- spin_unlock_irqrestore(&buffer_lock, flags);
- smp_wmb(); /* insure spu event buffer updates are written */
- /* don't want entries intermingled... */
-out:
- return retval;
-}
-
-/*
- * This function is invoked on either a bind_context or unbind_context.
- * If called for an unbind_context, the val arg is 0; otherwise,
- * it is the object-id value for the spu context.
- * The data arg is of type 'struct spu *'.
- */
-static int spu_active_notify(struct notifier_block *self, unsigned long val,
- void *data)
-{
- int retval;
- unsigned long flags;
- struct spu *the_spu = data;
-
- pr_debug("SPU event notification arrived\n");
- if (!val) {
- spin_lock_irqsave(&cache_lock, flags);
- retval = release_cached_info(the_spu->number);
- spin_unlock_irqrestore(&cache_lock, flags);
- } else {
- retval = process_context_switch(the_spu, val);
- }
- return retval;
-}
-
-static struct notifier_block spu_active = {
- .notifier_call = spu_active_notify,
-};
-
-static int number_of_online_nodes(void)
-{
- u32 cpu; u32 tmp;
- int nodes = 0;
- for_each_online_cpu(cpu) {
- tmp = cbe_cpu_to_node(cpu) + 1;
- if (tmp > nodes)
- nodes++;
- }
- return nodes;
-}
-
-static int oprofile_spu_buff_create(void)
-{
- int spu;
-
- max_spu_buff = oprofile_get_cpu_buffer_size();
-
- for (spu = 0; spu < num_spu_nodes; spu++) {
- /* create circular buffers to store the data in.
- * use locks to manage accessing the buffers
- */
- spu_buff[spu].head = 0;
- spu_buff[spu].tail = 0;
-
- /*
- * Create a buffer for each SPU. Can't reliably
- * create a single buffer for all spus due to not
- * enough contiguous kernel memory.
- */
-
- spu_buff[spu].buff = kzalloc((max_spu_buff
- * sizeof(unsigned long)),
- GFP_KERNEL);
-
- if (!spu_buff[spu].buff) {
- printk(KERN_ERR "SPU_PROF: "
- "%s, line %d: oprofile_spu_buff_create "
- "failed to allocate spu buffer %d.\n",
- __func__, __LINE__, spu);
-
- /* release the spu buffers that have been allocated */
- while (spu >= 0) {
- kfree(spu_buff[spu].buff);
- spu_buff[spu].buff = 0;
- spu--;
- }
- return -ENOMEM;
- }
- }
- return 0;
-}
-
-/* The main purpose of this function is to synchronize
- * OProfile with SPUFS by registering to be notified of
- * SPU task switches.
- *
- * NOTE: When profiling SPUs, we must ensure that only
- * spu_sync_start is invoked and not the generic sync_start
- * in drivers/oprofile/oprof.c. A return value of
- * SKIP_GENERIC_SYNC or SYNC_START_ERROR will
- * accomplish this.
- */
-int spu_sync_start(void)
-{
- int spu;
- int ret = SKIP_GENERIC_SYNC;
- int register_ret;
- unsigned long flags = 0;
-
- spu_prof_num_nodes = number_of_online_nodes();
- num_spu_nodes = spu_prof_num_nodes * 8;
- INIT_DELAYED_WORK(&spu_work, wq_sync_spu_buff);
-
- /* create buffer for storing the SPU data to put in
- * the kernel buffer.
- */
- ret = oprofile_spu_buff_create();
- if (ret)
- goto out;
-
- spin_lock_irqsave(&buffer_lock, flags);
- for (spu = 0; spu < num_spu_nodes; spu++) {
- spu_buff_add(ESCAPE_CODE, spu);
- spu_buff_add(SPU_PROFILING_CODE, spu);
- spu_buff_add(num_spu_nodes, spu);
- }
- spin_unlock_irqrestore(&buffer_lock, flags);
-
- for (spu = 0; spu < num_spu_nodes; spu++) {
- spu_buff[spu].ctx_sw_seen = 0;
- spu_buff[spu].last_guard_val = 0;
- }
-
- /* Register for SPU events */
- register_ret = spu_switch_event_register(&spu_active);
- if (register_ret) {
- ret = SYNC_START_ERROR;
- goto out;
- }
-
- pr_debug("spu_sync_start -- running.\n");
-out:
- return ret;
-}
-
-/* Record SPU program counter samples to the oprofile event buffer. */
-void spu_sync_buffer(int spu_num, unsigned int *samples,
- int num_samples)
-{
- unsigned long long file_offset;
- unsigned long flags;
- int i;
- struct vma_to_fileoffset_map *map;
- struct spu *the_spu;
- unsigned long long spu_num_ll = spu_num;
- unsigned long long spu_num_shifted = spu_num_ll << 32;
- struct cached_info *c_info;
-
- /* We need to obtain the cache_lock here because it's
- * possible that after getting the cached_info, the SPU job
- * corresponding to this cached_info may end, thus resulting
- * in the destruction of the cached_info.
- */
- spin_lock_irqsave(&cache_lock, flags);
- c_info = get_cached_info(NULL, spu_num);
- if (!c_info) {
- /* This legitimately happens when the SPU task ends before all
- * samples are recorded.
- * No big deal -- so we just drop a few samples.
- */
- pr_debug("SPU_PROF: No cached SPU context "
- "for SPU #%d. Dropping samples.\n", spu_num);
- goto out;
- }
-
- map = c_info->map;
- the_spu = c_info->the_spu;
- spin_lock(&buffer_lock);
- for (i = 0; i < num_samples; i++) {
- unsigned int sample = *(samples+i);
- int grd_val = 0;
- file_offset = 0;
- if (sample == 0)
- continue;
- file_offset = vma_map_lookup( map, sample, the_spu, &grd_val);
-
- /* If overlays are used by this SPU application, the guard
- * value is non-zero, indicating which overlay section is in
- * use. We need to discard samples taken during the time
- * period which an overlay occurs (i.e., guard value changes).
- */
- if (grd_val && grd_val != spu_buff[spu_num].last_guard_val) {
- spu_buff[spu_num].last_guard_val = grd_val;
- /* Drop the rest of the samples. */
- break;
- }
-
- /* We must ensure that the SPU context switch has been written
- * out before samples for the SPU. Otherwise, the SPU context
- * information is not available and the postprocessing of the
- * SPU PC will fail with no available anonymous map information.
- */
- if (spu_buff[spu_num].ctx_sw_seen)
- spu_buff_add((file_offset | spu_num_shifted),
- spu_num);
- }
- spin_unlock(&buffer_lock);
-out:
- spin_unlock_irqrestore(&cache_lock, flags);
-}
-
-
-int spu_sync_stop(void)
-{
- unsigned long flags = 0;
- int ret;
- int k;
-
- ret = spu_switch_event_unregister(&spu_active);
-
- if (ret)
- printk(KERN_ERR "SPU_PROF: "
- "%s, line %d: spu_switch_event_unregister " \
- "returned %d\n",
- __func__, __LINE__, ret);
-
- /* flush any remaining data in the per SPU buffers */
- sync_spu_buff();
-
- spin_lock_irqsave(&cache_lock, flags);
- ret = release_cached_info(RELEASE_ALL);
- spin_unlock_irqrestore(&cache_lock, flags);
-
- /* remove scheduled work queue item rather then waiting
- * for every queued entry to execute. Then flush pending
- * system wide buffer to event buffer.
- */
- cancel_delayed_work(&spu_work);
-
- for (k = 0; k < num_spu_nodes; k++) {
- spu_buff[k].ctx_sw_seen = 0;
-
- /*
- * spu_sys_buff will be null if there was a problem
- * allocating the buffer. Only delete if it exists.
- */
- kfree(spu_buff[k].buff);
- spu_buff[k].buff = 0;
- }
- pr_debug("spu_sync_stop -- done.\n");
- return ret;
-}
-
diff --git a/arch/powerpc/oprofile/cell/vma_map.c b/arch/powerpc/oprofile/cell/vma_map.c
deleted file mode 100644
index 7c4b19cfde88..000000000000
--- a/arch/powerpc/oprofile/cell/vma_map.c
+++ /dev/null
@@ -1,279 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Cell Broadband Engine OProfile Support
- *
- * (C) Copyright IBM Corporation 2006
- *
- * Author: Maynard Johnson <maynardj@us.ibm.com>
- */
-
-/* The code in this source file is responsible for generating
- * vma-to-fileOffset maps for both overlay and non-overlay SPU
- * applications.
- */
-
-#include <linux/mm.h>
-#include <linux/string.h>
-#include <linux/uaccess.h>
-#include <linux/elf.h>
-#include <linux/slab.h>
-#include "pr_util.h"
-
-
-void vma_map_free(struct vma_to_fileoffset_map *map)
-{
- while (map) {
- struct vma_to_fileoffset_map *next = map->next;
- kfree(map);
- map = next;
- }
-}
-
-unsigned int
-vma_map_lookup(struct vma_to_fileoffset_map *map, unsigned int vma,
- const struct spu *aSpu, int *grd_val)
-{
- /*
- * Default the offset to the physical address + a flag value.
- * Addresses of dynamically generated code can't be found in the vma
- * map. For those addresses the flagged value will be sent on to
- * the user space tools so they can be reported rather than just
- * thrown away.
- */
- u32 offset = 0x10000000 + vma;
- u32 ovly_grd;
-
- for (; map; map = map->next) {
- if (vma < map->vma || vma >= map->vma + map->size)
- continue;
-
- if (map->guard_ptr) {
- ovly_grd = *(u32 *)(aSpu->local_store + map->guard_ptr);
- if (ovly_grd != map->guard_val)
- continue;
- *grd_val = ovly_grd;
- }
- offset = vma - map->vma + map->offset;
- break;
- }
-
- return offset;
-}
-
-static struct vma_to_fileoffset_map *
-vma_map_add(struct vma_to_fileoffset_map *map, unsigned int vma,
- unsigned int size, unsigned int offset, unsigned int guard_ptr,
- unsigned int guard_val)
-{
- struct vma_to_fileoffset_map *new = kzalloc(sizeof(*new), GFP_KERNEL);
-
- if (!new) {
- printk(KERN_ERR "SPU_PROF: %s, line %d: malloc failed\n",
- __func__, __LINE__);
- vma_map_free(map);
- return NULL;
- }
-
- new->next = map;
- new->vma = vma;
- new->size = size;
- new->offset = offset;
- new->guard_ptr = guard_ptr;
- new->guard_val = guard_val;
-
- return new;
-}
-
-
-/* Parse SPE ELF header and generate a list of vma_maps.
- * A pointer to the first vma_map in the generated list
- * of vma_maps is returned. */
-struct vma_to_fileoffset_map *create_vma_map(const struct spu *aSpu,
- unsigned long __spu_elf_start)
-{
- static const unsigned char expected[EI_PAD] = {
- [EI_MAG0] = ELFMAG0,
- [EI_MAG1] = ELFMAG1,
- [EI_MAG2] = ELFMAG2,
- [EI_MAG3] = ELFMAG3,
- [EI_CLASS] = ELFCLASS32,
- [EI_DATA] = ELFDATA2MSB,
- [EI_VERSION] = EV_CURRENT,
- [EI_OSABI] = ELFOSABI_NONE
- };
-
- int grd_val;
- struct vma_to_fileoffset_map *map = NULL;
- void __user *spu_elf_start = (void __user *)__spu_elf_start;
- struct spu_overlay_info ovly;
- unsigned int overlay_tbl_offset = -1;
- Elf32_Phdr __user *phdr_start;
- Elf32_Shdr __user *shdr_start;
- Elf32_Ehdr ehdr;
- Elf32_Phdr phdr;
- Elf32_Shdr shdr, shdr_str;
- Elf32_Sym sym;
- int i, j;
- char name[32];
-
- unsigned int ovly_table_sym = 0;
- unsigned int ovly_buf_table_sym = 0;
- unsigned int ovly_table_end_sym = 0;
- unsigned int ovly_buf_table_end_sym = 0;
- struct spu_overlay_info __user *ovly_table;
- unsigned int n_ovlys;
-
- /* Get and validate ELF header. */
-
- if (copy_from_user(&ehdr, spu_elf_start, sizeof (ehdr)))
- goto fail;
-
- if (memcmp(ehdr.e_ident, expected, EI_PAD) != 0) {
- printk(KERN_ERR "SPU_PROF: "
- "%s, line %d: Unexpected e_ident parsing SPU ELF\n",
- __func__, __LINE__);
- goto fail;
- }
- if (ehdr.e_machine != EM_SPU) {
- printk(KERN_ERR "SPU_PROF: "
- "%s, line %d: Unexpected e_machine parsing SPU ELF\n",
- __func__, __LINE__);
- goto fail;
- }
- if (ehdr.e_type != ET_EXEC) {
- printk(KERN_ERR "SPU_PROF: "
- "%s, line %d: Unexpected e_type parsing SPU ELF\n",
- __func__, __LINE__);
- goto fail;
- }
- phdr_start = spu_elf_start + ehdr.e_phoff;
- shdr_start = spu_elf_start + ehdr.e_shoff;
-
- /* Traverse program headers. */
- for (i = 0; i < ehdr.e_phnum; i++) {
- if (copy_from_user(&phdr, phdr_start + i, sizeof(phdr)))
- goto fail;
-
- if (phdr.p_type != PT_LOAD)
- continue;
- if (phdr.p_flags & (1 << 27))
- continue;
-
- map = vma_map_add(map, phdr.p_vaddr, phdr.p_memsz,
- phdr.p_offset, 0, 0);
- if (!map)
- goto fail;
- }
-
- pr_debug("SPU_PROF: Created non-overlay maps\n");
- /* Traverse section table and search for overlay-related symbols. */
- for (i = 0; i < ehdr.e_shnum; i++) {
- if (copy_from_user(&shdr, shdr_start + i, sizeof(shdr)))
- goto fail;
-
- if (shdr.sh_type != SHT_SYMTAB)
- continue;
- if (shdr.sh_entsize != sizeof (sym))
- continue;
-
- if (copy_from_user(&shdr_str,
- shdr_start + shdr.sh_link,
- sizeof(shdr)))
- goto fail;
-
- if (shdr_str.sh_type != SHT_STRTAB)
- goto fail;
-
- for (j = 0; j < shdr.sh_size / sizeof (sym); j++) {
- if (copy_from_user(&sym, spu_elf_start +
- shdr.sh_offset +
- j * sizeof (sym),
- sizeof (sym)))
- goto fail;
-
- if (copy_from_user(name,
- spu_elf_start + shdr_str.sh_offset +
- sym.st_name,
- 20))
- goto fail;
-
- if (memcmp(name, "_ovly_table", 12) == 0)
- ovly_table_sym = sym.st_value;
- if (memcmp(name, "_ovly_buf_table", 16) == 0)
- ovly_buf_table_sym = sym.st_value;
- if (memcmp(name, "_ovly_table_end", 16) == 0)
- ovly_table_end_sym = sym.st_value;
- if (memcmp(name, "_ovly_buf_table_end", 20) == 0)
- ovly_buf_table_end_sym = sym.st_value;
- }
- }
-
- /* If we don't have overlays, we're done. */
- if (ovly_table_sym == 0 || ovly_buf_table_sym == 0
- || ovly_table_end_sym == 0 || ovly_buf_table_end_sym == 0) {
- pr_debug("SPU_PROF: No overlay table found\n");
- goto out;
- } else {
- pr_debug("SPU_PROF: Overlay table found\n");
- }
-
- /* The _ovly_table symbol represents a table with one entry
- * per overlay section. The _ovly_buf_table symbol represents
- * a table with one entry per overlay region.
- * The struct spu_overlay_info gives the structure of the _ovly_table
- * entries. The structure of _ovly_table_buf is simply one
- * u32 word per entry.
- */
- overlay_tbl_offset = vma_map_lookup(map, ovly_table_sym,
- aSpu, &grd_val);
- if (overlay_tbl_offset > 0x10000000) {
- printk(KERN_ERR "SPU_PROF: "
- "%s, line %d: Error finding SPU overlay table\n",
- __func__, __LINE__);
- goto fail;
- }
- ovly_table = spu_elf_start + overlay_tbl_offset;
-
- n_ovlys = (ovly_table_end_sym -
- ovly_table_sym) / sizeof (ovly);
-
- /* Traverse overlay table. */
- for (i = 0; i < n_ovlys; i++) {
- if (copy_from_user(&ovly, ovly_table + i, sizeof (ovly)))
- goto fail;
-
- /* The ovly.vma/size/offset arguments are analogous to the same
- * arguments used above for non-overlay maps. The final two
- * args are referred to as the guard pointer and the guard
- * value.
- * The guard pointer is an entry in the _ovly_buf_table,
- * computed using ovly.buf as the index into the table. Since
- * ovly.buf values begin at '1' to reference the first (or 0th)
- * entry in the _ovly_buf_table, the computation subtracts 1
- * from ovly.buf.
- * The guard value is stored in the _ovly_buf_table entry and
- * is an index (starting at 1) back to the _ovly_table entry
- * that is pointing at this _ovly_buf_table entry. So, for
- * example, for an overlay scenario with one overlay segment
- * and two overlay sections:
- * - Section 1 points to the first entry of the
- * _ovly_buf_table, which contains a guard value
- * of '1', referencing the first (index=0) entry of
- * _ovly_table.
- * - Section 2 points to the second entry of the
- * _ovly_buf_table, which contains a guard value
- * of '2', referencing the second (index=1) entry of
- * _ovly_table.
- */
- map = vma_map_add(map, ovly.vma, ovly.size, ovly.offset,
- ovly_buf_table_sym + (ovly.buf-1) * 4, i+1);
- if (!map)
- goto fail;
- }
- goto out;
-
- fail:
- map = NULL;
- out:
- return map;
-}
diff --git a/arch/powerpc/oprofile/common.c b/arch/powerpc/oprofile/common.c
deleted file mode 100644
index 0fb528c2b3a1..000000000000
--- a/arch/powerpc/oprofile/common.c
+++ /dev/null
@@ -1,243 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * PPC 64 oprofile support:
- * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
- * PPC 32 oprofile support: (based on PPC 64 support)
- * Copyright (C) Freescale Semiconductor, Inc 2004
- * Author: Andy Fleming
- *
- * Based on alpha version.
- */
-
-#include <linux/oprofile.h>
-#include <linux/init.h>
-#include <linux/smp.h>
-#include <linux/errno.h>
-#include <asm/ptrace.h>
-#include <asm/pmc.h>
-#include <asm/cputable.h>
-#include <asm/oprofile_impl.h>
-#include <asm/firmware.h>
-
-static struct op_powerpc_model *model;
-
-static struct op_counter_config ctr[OP_MAX_COUNTER];
-static struct op_system_config sys;
-
-static int op_per_cpu_rc;
-
-static void op_handle_interrupt(struct pt_regs *regs)
-{
- model->handle_interrupt(regs, ctr);
-}
-
-static void op_powerpc_cpu_setup(void *dummy)
-{
- int ret;
-
- ret = model->cpu_setup(ctr);
-
- if (ret != 0)
- op_per_cpu_rc = ret;
-}
-
-static int op_powerpc_setup(void)
-{
- int err;
-
- op_per_cpu_rc = 0;
-
- /* Grab the hardware */
- err = reserve_pmc_hardware(op_handle_interrupt);
- if (err)
- return err;
-
- /* Pre-compute the values to stuff in the hardware registers. */
- op_per_cpu_rc = model->reg_setup(ctr, &sys, model->num_counters);
-
- if (op_per_cpu_rc)
- goto out;
-
- /* Configure the registers on all cpus. If an error occurs on one
- * of the cpus, op_per_cpu_rc will be set to the error */
- on_each_cpu(op_powerpc_cpu_setup, NULL, 1);
-
-out: if (op_per_cpu_rc) {
- /* error on setup release the performance counter hardware */
- release_pmc_hardware();
- }
-
- return op_per_cpu_rc;
-}
-
-static void op_powerpc_shutdown(void)
-{
- release_pmc_hardware();
-}
-
-static void op_powerpc_cpu_start(void *dummy)
-{
- /* If any of the cpus have return an error, set the
- * global flag to the error so it can be returned
- * to the generic OProfile caller.
- */
- int ret;
-
- ret = model->start(ctr);
- if (ret != 0)
- op_per_cpu_rc = ret;
-}
-
-static int op_powerpc_start(void)
-{
- op_per_cpu_rc = 0;
-
- if (model->global_start)
- return model->global_start(ctr);
- if (model->start) {
- on_each_cpu(op_powerpc_cpu_start, NULL, 1);
- return op_per_cpu_rc;
- }
- return -EIO; /* No start function is defined for this
- power architecture */
-}
-
-static inline void op_powerpc_cpu_stop(void *dummy)
-{
- model->stop();
-}
-
-static void op_powerpc_stop(void)
-{
- if (model->stop)
- on_each_cpu(op_powerpc_cpu_stop, NULL, 1);
- if (model->global_stop)
- model->global_stop();
-}
-
-static int op_powerpc_create_files(struct dentry *root)
-{
- int i;
-
-#ifdef CONFIG_PPC64
- /*
- * There is one mmcr0, mmcr1 and mmcra for setting the events for
- * all of the counters.
- */
- oprofilefs_create_ulong(root, "mmcr0", &sys.mmcr0);
- oprofilefs_create_ulong(root, "mmcr1", &sys.mmcr1);
- oprofilefs_create_ulong(root, "mmcra", &sys.mmcra);
-#ifdef CONFIG_OPROFILE_CELL
- /* create a file the user tool can check to see what level of profiling
- * support exits with this kernel. Initialize bit mask to indicate
- * what support the kernel has:
- * bit 0 - Supports SPU event profiling in addition to PPU
- * event and cycles; and SPU cycle profiling
- * bits 1-31 - Currently unused.
- *
- * If the file does not exist, then the kernel only supports SPU
- * cycle profiling, PPU event and cycle profiling.
- */
- oprofilefs_create_ulong(root, "cell_support", &sys.cell_support);
- sys.cell_support = 0x1; /* Note, the user OProfile tool must check
- * that this bit is set before attempting to
- * user SPU event profiling. Older kernels
- * will not have this file, hence the user
- * tool is not allowed to do SPU event
- * profiling on older kernels. Older kernels
- * will accept SPU events but collected data
- * is garbage.
- */
-#endif
-#endif
-
- for (i = 0; i < model->num_counters; ++i) {
- struct dentry *dir;
- char buf[4];
-
- snprintf(buf, sizeof buf, "%d", i);
- dir = oprofilefs_mkdir(root, buf);
-
- oprofilefs_create_ulong(dir, "enabled", &ctr[i].enabled);
- oprofilefs_create_ulong(dir, "event", &ctr[i].event);
- oprofilefs_create_ulong(dir, "count", &ctr[i].count);
-
- /*
- * Classic PowerPC doesn't support per-counter
- * control like this, but the options are
- * expected, so they remain. For Freescale
- * Book-E style performance monitors, we do
- * support them.
- */
- oprofilefs_create_ulong(dir, "kernel", &ctr[i].kernel);
- oprofilefs_create_ulong(dir, "user", &ctr[i].user);
-
- oprofilefs_create_ulong(dir, "unit_mask", &ctr[i].unit_mask);
- }
-
- oprofilefs_create_ulong(root, "enable_kernel", &sys.enable_kernel);
- oprofilefs_create_ulong(root, "enable_user", &sys.enable_user);
-
- /* Default to tracing both kernel and user */
- sys.enable_kernel = 1;
- sys.enable_user = 1;
-
- return 0;
-}
-
-int __init oprofile_arch_init(struct oprofile_operations *ops)
-{
- if (!cur_cpu_spec->oprofile_cpu_type)
- return -ENODEV;
-
- switch (cur_cpu_spec->oprofile_type) {
-#ifdef CONFIG_PPC_BOOK3S_64
-#ifdef CONFIG_OPROFILE_CELL
- case PPC_OPROFILE_CELL:
- if (firmware_has_feature(FW_FEATURE_LPAR))
- return -ENODEV;
- model = &op_model_cell;
- ops->sync_start = model->sync_start;
- ops->sync_stop = model->sync_stop;
- break;
-#endif
- case PPC_OPROFILE_POWER4:
- model = &op_model_power4;
- break;
- case PPC_OPROFILE_PA6T:
- model = &op_model_pa6t;
- break;
-#endif
-#ifdef CONFIG_PPC_BOOK3S_32
- case PPC_OPROFILE_G4:
- model = &op_model_7450;
- break;
-#endif
-#if defined(CONFIG_FSL_EMB_PERFMON)
- case PPC_OPROFILE_FSL_EMB:
- model = &op_model_fsl_emb;
- break;
-#endif
- default:
- return -ENODEV;
- }
-
- model->num_counters = cur_cpu_spec->num_pmcs;
-
- ops->cpu_type = cur_cpu_spec->oprofile_cpu_type;
- ops->create_files = op_powerpc_create_files;
- ops->setup = op_powerpc_setup;
- ops->shutdown = op_powerpc_shutdown;
- ops->start = op_powerpc_start;
- ops->stop = op_powerpc_stop;
- ops->backtrace = op_powerpc_backtrace;
-
- printk(KERN_DEBUG "oprofile: using %s performance monitoring.\n",
- ops->cpu_type);
-
- return 0;
-}
-
-void oprofile_arch_exit(void)
-{
-}
diff --git a/arch/powerpc/oprofile/op_model_7450.c b/arch/powerpc/oprofile/op_model_7450.c
deleted file mode 100644
index 5ebc25188a72..000000000000
--- a/arch/powerpc/oprofile/op_model_7450.c
+++ /dev/null
@@ -1,207 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * arch/powerpc/oprofile/op_model_7450.c
- *
- * Freescale 745x/744x oprofile support, based on fsl_booke support
- * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
- *
- * Copyright (c) 2004 Freescale Semiconductor, Inc
- *
- * Author: Andy Fleming
- * Maintainer: Kumar Gala <galak@kernel.crashing.org>
- */
-
-#include <linux/oprofile.h>
-#include <linux/smp.h>
-#include <asm/ptrace.h>
-#include <asm/processor.h>
-#include <asm/cputable.h>
-#include <asm/page.h>
-#include <asm/pmc.h>
-#include <asm/oprofile_impl.h>
-
-static unsigned long reset_value[OP_MAX_COUNTER];
-
-static int oprofile_running;
-static u32 mmcr0_val, mmcr1_val, mmcr2_val, num_pmcs;
-
-#define MMCR0_PMC1_SHIFT 6
-#define MMCR0_PMC2_SHIFT 0
-#define MMCR1_PMC3_SHIFT 27
-#define MMCR1_PMC4_SHIFT 22
-#define MMCR1_PMC5_SHIFT 17
-#define MMCR1_PMC6_SHIFT 11
-
-#define mmcr0_event1(event) \
- ((event << MMCR0_PMC1_SHIFT) & MMCR0_PMC1SEL)
-#define mmcr0_event2(event) \
- ((event << MMCR0_PMC2_SHIFT) & MMCR0_PMC2SEL)
-
-#define mmcr1_event3(event) \
- ((event << MMCR1_PMC3_SHIFT) & MMCR1_PMC3SEL)
-#define mmcr1_event4(event) \
- ((event << MMCR1_PMC4_SHIFT) & MMCR1_PMC4SEL)
-#define mmcr1_event5(event) \
- ((event << MMCR1_PMC5_SHIFT) & MMCR1_PMC5SEL)
-#define mmcr1_event6(event) \
- ((event << MMCR1_PMC6_SHIFT) & MMCR1_PMC6SEL)
-
-#define MMCR0_INIT (MMCR0_FC | MMCR0_FCS | MMCR0_FCP | MMCR0_FCM1 | MMCR0_FCM0)
-
-/* Unfreezes the counters on this CPU, enables the interrupt,
- * enables the counters to trigger the interrupt, and sets the
- * counters to only count when the mark bit is not set.
- */
-static void pmc_start_ctrs(void)
-{
- u32 mmcr0 = mfspr(SPRN_MMCR0);
-
- mmcr0 &= ~(MMCR0_FC | MMCR0_FCM0);
- mmcr0 |= (MMCR0_FCECE | MMCR0_PMC1CE | MMCR0_PMCnCE | MMCR0_PMXE);
-
- mtspr(SPRN_MMCR0, mmcr0);
-}
-
-/* Disables the counters on this CPU, and freezes them */
-static void pmc_stop_ctrs(void)
-{
- u32 mmcr0 = mfspr(SPRN_MMCR0);
-
- mmcr0 |= MMCR0_FC;
- mmcr0 &= ~(MMCR0_FCECE | MMCR0_PMC1CE | MMCR0_PMCnCE | MMCR0_PMXE);
-
- mtspr(SPRN_MMCR0, mmcr0);
-}
-
-/* Configures the counters on this CPU based on the global
- * settings */
-static int fsl7450_cpu_setup(struct op_counter_config *ctr)
-{
- /* freeze all counters */
- pmc_stop_ctrs();
-
- mtspr(SPRN_MMCR0, mmcr0_val);
- mtspr(SPRN_MMCR1, mmcr1_val);
- if (num_pmcs > 4)
- mtspr(SPRN_MMCR2, mmcr2_val);
-
- return 0;
-}
-
-/* Configures the global settings for the countes on all CPUs. */
-static int fsl7450_reg_setup(struct op_counter_config *ctr,
- struct op_system_config *sys,
- int num_ctrs)
-{
- int i;
-
- num_pmcs = num_ctrs;
- /* Our counters count up, and "count" refers to
- * how much before the next interrupt, and we interrupt
- * on overflow. So we calculate the starting value
- * which will give us "count" until overflow.
- * Then we set the events on the enabled counters */
- for (i = 0; i < num_ctrs; ++i)
- reset_value[i] = 0x80000000UL - ctr[i].count;
-
- /* Set events for Counters 1 & 2 */
- mmcr0_val = MMCR0_INIT | mmcr0_event1(ctr[0].event)
- | mmcr0_event2(ctr[1].event);
-
- /* Setup user/kernel bits */
- if (sys->enable_kernel)
- mmcr0_val &= ~(MMCR0_FCS);
-
- if (sys->enable_user)
- mmcr0_val &= ~(MMCR0_FCP);
-
- /* Set events for Counters 3-6 */
- mmcr1_val = mmcr1_event3(ctr[2].event)
- | mmcr1_event4(ctr[3].event);
- if (num_ctrs > 4)
- mmcr1_val |= mmcr1_event5(ctr[4].event)
- | mmcr1_event6(ctr[5].event);
-
- mmcr2_val = 0;
-
- return 0;
-}
-
-/* Sets the counters on this CPU to the chosen values, and starts them */
-static int fsl7450_start(struct op_counter_config *ctr)
-{
- int i;
-
- mtmsr(mfmsr() | MSR_PMM);
-
- for (i = 0; i < num_pmcs; ++i) {
- if (ctr[i].enabled)
- classic_ctr_write(i, reset_value[i]);
- else
- classic_ctr_write(i, 0);
- }
-
- /* Clear the freeze bit, and enable the interrupt.
- * The counters won't actually start until the rfi clears
- * the PMM bit */
- pmc_start_ctrs();
-
- oprofile_running = 1;
-
- return 0;
-}
-
-/* Stop the counters on this CPU */
-static void fsl7450_stop(void)
-{
- /* freeze counters */
- pmc_stop_ctrs();
-
- oprofile_running = 0;
-
- mb();
-}
-
-
-/* Handle the interrupt on this CPU, and log a sample for each
- * event that triggered the interrupt */
-static void fsl7450_handle_interrupt(struct pt_regs *regs,
- struct op_counter_config *ctr)
-{
- unsigned long pc;
- int is_kernel;
- int val;
- int i;
-
- /* set the PMM bit (see comment below) */
- mtmsr(mfmsr() | MSR_PMM);
-
- pc = mfspr(SPRN_SIAR);
- is_kernel = is_kernel_addr(pc);
-
- for (i = 0; i < num_pmcs; ++i) {
- val = classic_ctr_read(i);
- if (val < 0) {
- if (oprofile_running && ctr[i].enabled) {
- oprofile_add_ext_sample(pc, regs, i, is_kernel);
- classic_ctr_write(i, reset_value[i]);
- } else {
- classic_ctr_write(i, 0);
- }
- }
- }
-
- /* The freeze bit was set by the interrupt. */
- /* Clear the freeze bit, and reenable the interrupt.
- * The counters won't actually start until the rfi clears
- * the PM/M bit */
- pmc_start_ctrs();
-}
-
-struct op_powerpc_model op_model_7450= {
- .reg_setup = fsl7450_reg_setup,
- .cpu_setup = fsl7450_cpu_setup,
- .start = fsl7450_start,
- .stop = fsl7450_stop,
- .handle_interrupt = fsl7450_handle_interrupt,
-};
diff --git a/arch/powerpc/oprofile/op_model_cell.c b/arch/powerpc/oprofile/op_model_cell.c
deleted file mode 100644
index 7eb73070b7be..000000000000
--- a/arch/powerpc/oprofile/op_model_cell.c
+++ /dev/null
@@ -1,1709 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Cell Broadband Engine OProfile Support
- *
- * (C) Copyright IBM Corporation 2006
- *
- * Author: David Erb (djerb@us.ibm.com)
- * Modifications:
- * Carl Love <carll@us.ibm.com>
- * Maynard Johnson <maynardj@us.ibm.com>
- */
-
-#include <linux/cpufreq.h>
-#include <linux/delay.h>
-#include <linux/jiffies.h>
-#include <linux/kthread.h>
-#include <linux/oprofile.h>
-#include <linux/percpu.h>
-#include <linux/smp.h>
-#include <linux/spinlock.h>
-#include <linux/timer.h>
-#include <asm/cell-pmu.h>
-#include <asm/cputable.h>
-#include <asm/firmware.h>
-#include <asm/io.h>
-#include <asm/oprofile_impl.h>
-#include <asm/processor.h>
-#include <asm/prom.h>
-#include <asm/ptrace.h>
-#include <asm/reg.h>
-#include <asm/rtas.h>
-#include <asm/cell-regs.h>
-
-#include "../platforms/cell/interrupt.h"
-#include "cell/pr_util.h"
-
-#define PPU_PROFILING 0
-#define SPU_PROFILING_CYCLES 1
-#define SPU_PROFILING_EVENTS 2
-
-#define SPU_EVENT_NUM_START 4100
-#define SPU_EVENT_NUM_STOP 4399
-#define SPU_PROFILE_EVENT_ADDR 4363 /* spu, address trace, decimal */
-#define SPU_PROFILE_EVENT_ADDR_MASK_A 0x146 /* sub unit set to zero */
-#define SPU_PROFILE_EVENT_ADDR_MASK_B 0x186 /* sub unit set to zero */
-
-#define NUM_SPUS_PER_NODE 8
-#define SPU_CYCLES_EVENT_NUM 2 /* event number for SPU_CYCLES */
-
-#define PPU_CYCLES_EVENT_NUM 1 /* event number for CYCLES */
-#define PPU_CYCLES_GRP_NUM 1 /* special group number for identifying
- * PPU_CYCLES event
- */
-#define CBE_COUNT_ALL_CYCLES 0x42800000 /* PPU cycle event specifier */
-
-#define NUM_THREADS 2 /* number of physical threads in
- * physical processor
- */
-#define NUM_DEBUG_BUS_WORDS 4
-#define NUM_INPUT_BUS_WORDS 2
-
-#define MAX_SPU_COUNT 0xFFFFFF /* maximum 24 bit LFSR value */
-
-/* Minimum HW interval timer setting to send value to trace buffer is 10 cycle.
- * To configure counter to send value every N cycles set counter to
- * 2^32 - 1 - N.
- */
-#define NUM_INTERVAL_CYC 0xFFFFFFFF - 10
-
-/*
- * spu_cycle_reset is the number of cycles between samples.
- * This variable is used for SPU profiling and should ONLY be set
- * at the beginning of cell_reg_setup; otherwise, it's read-only.
- */
-static unsigned int spu_cycle_reset;
-static unsigned int profiling_mode;
-static int spu_evnt_phys_spu_indx;
-
-struct pmc_cntrl_data {
- unsigned long vcntr;
- unsigned long evnts;
- unsigned long masks;
- unsigned long enabled;
-};
-
-/*
- * ibm,cbe-perftools rtas parameters
- */
-struct pm_signal {
- u16 cpu; /* Processor to modify */
- u16 sub_unit; /* hw subunit this applies to (if applicable)*/
- short int signal_group; /* Signal Group to Enable/Disable */
- u8 bus_word; /* Enable/Disable on this Trace/Trigger/Event
- * Bus Word(s) (bitmask)
- */
- u8 bit; /* Trigger/Event bit (if applicable) */
-};
-
-/*
- * rtas call arguments
- */
-enum {
- SUBFUNC_RESET = 1,
- SUBFUNC_ACTIVATE = 2,
- SUBFUNC_DEACTIVATE = 3,
-
- PASSTHRU_IGNORE = 0,
- PASSTHRU_ENABLE = 1,
- PASSTHRU_DISABLE = 2,
-};
-
-struct pm_cntrl {
- u16 enable;
- u16 stop_at_max;
- u16 trace_mode;
- u16 freeze;
- u16 count_mode;
- u16 spu_addr_trace;
- u8 trace_buf_ovflw;
-};
-
-static struct {
- u32 group_control;
- u32 debug_bus_control;
- struct pm_cntrl pm_cntrl;
- u32 pm07_cntrl[NR_PHYS_CTRS];
-} pm_regs;
-
-#define GET_SUB_UNIT(x) ((x & 0x0000f000) >> 12)
-#define GET_BUS_WORD(x) ((x & 0x000000f0) >> 4)
-#define GET_BUS_TYPE(x) ((x & 0x00000300) >> 8)
-#define GET_POLARITY(x) ((x & 0x00000002) >> 1)
-#define GET_COUNT_CYCLES(x) (x & 0x00000001)
-#define GET_INPUT_CONTROL(x) ((x & 0x00000004) >> 2)
-
-static DEFINE_PER_CPU(unsigned long[NR_PHYS_CTRS], pmc_values);
-static unsigned long spu_pm_cnt[MAX_NUMNODES * NUM_SPUS_PER_NODE];
-static struct pmc_cntrl_data pmc_cntrl[NUM_THREADS][NR_PHYS_CTRS];
-
-/*
- * The CELL profiling code makes rtas calls to setup the debug bus to
- * route the performance signals. Additionally, SPU profiling requires
- * a second rtas call to setup the hardware to capture the SPU PCs.
- * The EIO error value is returned if the token lookups or the rtas
- * call fail. The EIO error number is the best choice of the existing
- * error numbers. The probability of rtas related error is very low. But
- * by returning EIO and printing additional information to dmsg the user
- * will know that OProfile did not start and dmesg will tell them why.
- * OProfile does not support returning errors on Stop. Not a huge issue
- * since failure to reset the debug bus or stop the SPU PC collection is
- * not a fatel issue. Chances are if the Stop failed, Start doesn't work
- * either.
- */
-
-/*
- * Interpetation of hdw_thread:
- * 0 - even virtual cpus 0, 2, 4,...
- * 1 - odd virtual cpus 1, 3, 5, ...
- *
- * FIXME: this is strictly wrong, we need to clean this up in a number
- * of places. It works for now. -arnd
- */
-static u32 hdw_thread;
-
-static u32 virt_cntr_inter_mask;
-static struct timer_list timer_virt_cntr;
-static struct timer_list timer_spu_event_swap;
-
-/*
- * pm_signal needs to be global since it is initialized in
- * cell_reg_setup at the time when the necessary information
- * is available.
- */
-static struct pm_signal pm_signal[NR_PHYS_CTRS];
-static int pm_rtas_token; /* token for debug bus setup call */
-static int spu_rtas_token; /* token for SPU cycle profiling */
-
-static u32 reset_value[NR_PHYS_CTRS];
-static int num_counters;
-static int oprofile_running;
-static DEFINE_SPINLOCK(cntr_lock);
-
-static u32 ctr_enabled;
-
-static unsigned char input_bus[NUM_INPUT_BUS_WORDS];
-
-/*
- * Firmware interface functions
- */
-static int
-rtas_ibm_cbe_perftools(int subfunc, int passthru,
- void *address, unsigned long length)
-{
- u64 paddr = __pa(address);
-
- return rtas_call(pm_rtas_token, 5, 1, NULL, subfunc,
- passthru, paddr >> 32, paddr & 0xffffffff, length);
-}
-
-static void pm_rtas_reset_signals(u32 node)
-{
- int ret;
- struct pm_signal pm_signal_local;
-
- /*
- * The debug bus is being set to the passthru disable state.
- * However, the FW still expects at least one legal signal routing
- * entry or it will return an error on the arguments. If we don't
- * supply a valid entry, we must ignore all return values. Ignoring
- * all return values means we might miss an error we should be
- * concerned about.
- */
-
- /* fw expects physical cpu #. */
- pm_signal_local.cpu = node;
- pm_signal_local.signal_group = 21;
- pm_signal_local.bus_word = 1;
- pm_signal_local.sub_unit = 0;
- pm_signal_local.bit = 0;
-
- ret = rtas_ibm_cbe_perftools(SUBFUNC_RESET, PASSTHRU_DISABLE,
- &pm_signal_local,
- sizeof(struct pm_signal));
-
- if (unlikely(ret))
- /*
- * Not a fatal error. For Oprofile stop, the oprofile
- * functions do not support returning an error for
- * failure to stop OProfile.
- */
- printk(KERN_WARNING "%s: rtas returned: %d\n",
- __func__, ret);
-}
-
-static int pm_rtas_activate_signals(u32 node, u32 count)
-{
- int ret;
- int i, j;
- struct pm_signal pm_signal_local[NR_PHYS_CTRS];
-
- /*
- * There is no debug setup required for the cycles event.
- * Note that only events in the same group can be used.
- * Otherwise, there will be conflicts in correctly routing
- * the signals on the debug bus. It is the responsibility
- * of the OProfile user tool to check the events are in
- * the same group.
- */
- i = 0;
- for (j = 0; j < count; j++) {
- if (pm_signal[j].signal_group != PPU_CYCLES_GRP_NUM) {
-
- /* fw expects physical cpu # */
- pm_signal_local[i].cpu = node;
- pm_signal_local[i].signal_group
- = pm_signal[j].signal_group;
- pm_signal_local[i].bus_word = pm_signal[j].bus_word;
- pm_signal_local[i].sub_unit = pm_signal[j].sub_unit;
- pm_signal_local[i].bit = pm_signal[j].bit;
- i++;
- }
- }
-
- if (i != 0) {
- ret = rtas_ibm_cbe_perftools(SUBFUNC_ACTIVATE, PASSTHRU_ENABLE,
- pm_signal_local,
- i * sizeof(struct pm_signal));
-
- if (unlikely(ret)) {
- printk(KERN_WARNING "%s: rtas returned: %d\n",
- __func__, ret);
- return -EIO;
- }
- }
-
- return 0;
-}
-
-/*
- * PM Signal functions
- */
-static void set_pm_event(u32 ctr, int event, u32 unit_mask)
-{
- struct pm_signal *p;
- u32 signal_bit;
- u32 bus_word, bus_type, count_cycles, polarity, input_control;
- int j, i;
-
- if (event == PPU_CYCLES_EVENT_NUM) {
- /* Special Event: Count all cpu cycles */
- pm_regs.pm07_cntrl[ctr] = CBE_COUNT_ALL_CYCLES;
- p = &(pm_signal[ctr]);
- p->signal_group = PPU_CYCLES_GRP_NUM;
- p->bus_word = 1;
- p->sub_unit = 0;
- p->bit = 0;
- goto out;
- } else {
- pm_regs.pm07_cntrl[ctr] = 0;
- }
-
- bus_word = GET_BUS_WORD(unit_mask);
- bus_type = GET_BUS_TYPE(unit_mask);
- count_cycles = GET_COUNT_CYCLES(unit_mask);
- polarity = GET_POLARITY(unit_mask);
- input_control = GET_INPUT_CONTROL(unit_mask);
- signal_bit = (event % 100);
-
- p = &(pm_signal[ctr]);
-
- p->signal_group = event / 100;
- p->bus_word = bus_word;
- p->sub_unit = GET_SUB_UNIT(unit_mask);
-
- pm_regs.pm07_cntrl[ctr] = 0;
- pm_regs.pm07_cntrl[ctr] |= PM07_CTR_COUNT_CYCLES(count_cycles);
- pm_regs.pm07_cntrl[ctr] |= PM07_CTR_POLARITY(polarity);
- pm_regs.pm07_cntrl[ctr] |= PM07_CTR_INPUT_CONTROL(input_control);
-
- /*
- * Some of the islands signal selection is based on 64 bit words.
- * The debug bus words are 32 bits, the input words to the performance
- * counters are defined as 32 bits. Need to convert the 64 bit island
- * specification to the appropriate 32 input bit and bus word for the
- * performance counter event selection. See the CELL Performance
- * monitoring signals manual and the Perf cntr hardware descriptions
- * for the details.
- */
- if (input_control == 0) {
- if (signal_bit > 31) {
- signal_bit -= 32;
- if (bus_word == 0x3)
- bus_word = 0x2;
- else if (bus_word == 0xc)
- bus_word = 0x8;
- }
-
- if ((bus_type == 0) && p->signal_group >= 60)
- bus_type = 2;
- if ((bus_type == 1) && p->signal_group >= 50)
- bus_type = 0;
-
- pm_regs.pm07_cntrl[ctr] |= PM07_CTR_INPUT_MUX(signal_bit);
- } else {
- pm_regs.pm07_cntrl[ctr] = 0;
- p->bit = signal_bit;
- }
-
- for (i = 0; i < NUM_DEBUG_BUS_WORDS; i++) {
- if (bus_word & (1 << i)) {
- pm_regs.debug_bus_control |=
- (bus_type << (30 - (2 * i)));
-
- for (j = 0; j < NUM_INPUT_BUS_WORDS; j++) {
- if (input_bus[j] == 0xff) {
- input_bus[j] = i;
- pm_regs.group_control |=
- (i << (30 - (2 * j)));
-
- break;
- }
- }
- }
- }
-out:
- ;
-}
-
-static void write_pm_cntrl(int cpu)
-{
- /*
- * Oprofile will use 32 bit counters, set bits 7:10 to 0
- * pmregs.pm_cntrl is a global
- */
-
- u32 val = 0;
- if (pm_regs.pm_cntrl.enable == 1)
- val |= CBE_PM_ENABLE_PERF_MON;
-
- if (pm_regs.pm_cntrl.stop_at_max == 1)
- val |= CBE_PM_STOP_AT_MAX;
-
- if (pm_regs.pm_cntrl.trace_mode != 0)
- val |= CBE_PM_TRACE_MODE_SET(pm_regs.pm_cntrl.trace_mode);
-
- if (pm_regs.pm_cntrl.trace_buf_ovflw == 1)
- val |= CBE_PM_TRACE_BUF_OVFLW(pm_regs.pm_cntrl.trace_buf_ovflw);
- if (pm_regs.pm_cntrl.freeze == 1)
- val |= CBE_PM_FREEZE_ALL_CTRS;
-
- val |= CBE_PM_SPU_ADDR_TRACE_SET(pm_regs.pm_cntrl.spu_addr_trace);
-
- /*
- * Routine set_count_mode must be called previously to set
- * the count mode based on the user selection of user and kernel.
- */
- val |= CBE_PM_COUNT_MODE_SET(pm_regs.pm_cntrl.count_mode);
- cbe_write_pm(cpu, pm_control, val);
-}
-
-static inline void
-set_count_mode(u32 kernel, u32 user)
-{
- /*
- * The user must specify user and kernel if they want them. If
- * neither is specified, OProfile will count in hypervisor mode.
- * pm_regs.pm_cntrl is a global
- */
- if (kernel) {
- if (user)
- pm_regs.pm_cntrl.count_mode = CBE_COUNT_ALL_MODES;
- else
- pm_regs.pm_cntrl.count_mode =
- CBE_COUNT_SUPERVISOR_MODE;
- } else {
- if (user)
- pm_regs.pm_cntrl.count_mode = CBE_COUNT_PROBLEM_MODE;
- else
- pm_regs.pm_cntrl.count_mode =
- CBE_COUNT_HYPERVISOR_MODE;
- }
-}
-
-static inline void enable_ctr(u32 cpu, u32 ctr, u32 *pm07_cntrl)
-{
-
- pm07_cntrl[ctr] |= CBE_PM_CTR_ENABLE;
- cbe_write_pm07_control(cpu, ctr, pm07_cntrl[ctr]);
-}
-
-/*
- * Oprofile is expected to collect data on all CPUs simultaneously.
- * However, there is one set of performance counters per node. There are
- * two hardware threads or virtual CPUs on each node. Hence, OProfile must
- * multiplex in time the performance counter collection on the two virtual
- * CPUs. The multiplexing of the performance counters is done by this
- * virtual counter routine.
- *
- * The pmc_values used below is defined as 'per-cpu' but its use is
- * more akin to 'per-node'. We need to store two sets of counter
- * values per node -- one for the previous run and one for the next.
- * The per-cpu[NR_PHYS_CTRS] gives us the storage we need. Each odd/even
- * pair of per-cpu arrays is used for storing the previous and next
- * pmc values for a given node.
- * NOTE: We use the per-cpu variable to improve cache performance.
- *
- * This routine will alternate loading the virtual counters for
- * virtual CPUs
- */
-static void cell_virtual_cntr(struct timer_list *unused)
-{
- int i, prev_hdw_thread, next_hdw_thread;
- u32 cpu;
- unsigned long flags;
-
- /*
- * Make sure that the interrupt_hander and the virt counter are
- * not both playing with the counters on the same node.
- */
-
- spin_lock_irqsave(&cntr_lock, flags);
-
- prev_hdw_thread = hdw_thread;
-
- /* switch the cpu handling the interrupts */
- hdw_thread = 1 ^ hdw_thread;
- next_hdw_thread = hdw_thread;
-
- pm_regs.group_control = 0;
- pm_regs.debug_bus_control = 0;
-
- for (i = 0; i < NUM_INPUT_BUS_WORDS; i++)
- input_bus[i] = 0xff;
-
- /*
- * There are some per thread events. Must do the
- * set event, for the thread that is being started
- */
- for (i = 0; i < num_counters; i++)
- set_pm_event(i,
- pmc_cntrl[next_hdw_thread][i].evnts,
- pmc_cntrl[next_hdw_thread][i].masks);
-
- /*
- * The following is done only once per each node, but
- * we need cpu #, not node #, to pass to the cbe_xxx functions.
- */
- for_each_online_cpu(cpu) {
- if (cbe_get_hw_thread_id(cpu))
- continue;
-
- /*
- * stop counters, save counter values, restore counts
- * for previous thread
- */
- cbe_disable_pm(cpu);
- cbe_disable_pm_interrupts(cpu);
- for (i = 0; i < num_counters; i++) {
- per_cpu(pmc_values, cpu + prev_hdw_thread)[i]
- = cbe_read_ctr(cpu, i);
-
- if (per_cpu(pmc_values, cpu + next_hdw_thread)[i]
- == 0xFFFFFFFF)
- /* If the cntr value is 0xffffffff, we must
- * reset that to 0xfffffff0 when the current
- * thread is restarted. This will generate a
- * new interrupt and make sure that we never
- * restore the counters to the max value. If
- * the counters were restored to the max value,
- * they do not increment and no interrupts are
- * generated. Hence no more samples will be
- * collected on that cpu.
- */
- cbe_write_ctr(cpu, i, 0xFFFFFFF0);
- else
- cbe_write_ctr(cpu, i,
- per_cpu(pmc_values,
- cpu +
- next_hdw_thread)[i]);
- }
-
- /*
- * Switch to the other thread. Change the interrupt
- * and control regs to be scheduled on the CPU
- * corresponding to the thread to execute.
- */
- for (i = 0; i < num_counters; i++) {
- if (pmc_cntrl[next_hdw_thread][i].enabled) {
- /*
- * There are some per thread events.
- * Must do the set event, enable_cntr
- * for each cpu.
- */
- enable_ctr(cpu, i,
- pm_regs.pm07_cntrl);
- } else {
- cbe_write_pm07_control(cpu, i, 0);
- }
- }
-
- /* Enable interrupts on the CPU thread that is starting */
- cbe_enable_pm_interrupts(cpu, next_hdw_thread,
- virt_cntr_inter_mask);
- cbe_enable_pm(cpu);
- }
-
- spin_unlock_irqrestore(&cntr_lock, flags);
-
- mod_timer(&timer_virt_cntr, jiffies + HZ / 10);
-}
-
-static void start_virt_cntrs(void)
-{
- timer_setup(&timer_virt_cntr, cell_virtual_cntr, 0);
- timer_virt_cntr.expires = jiffies + HZ / 10;
- add_timer(&timer_virt_cntr);
-}
-
-static int cell_reg_setup_spu_cycles(struct op_counter_config *ctr,
- struct op_system_config *sys, int num_ctrs)
-{
- spu_cycle_reset = ctr[0].count;
-
- /*
- * Each node will need to make the rtas call to start
- * and stop SPU profiling. Get the token once and store it.
- */
- spu_rtas_token = rtas_token("ibm,cbe-spu-perftools");
-
- if (unlikely(spu_rtas_token == RTAS_UNKNOWN_SERVICE)) {
- printk(KERN_ERR
- "%s: rtas token ibm,cbe-spu-perftools unknown\n",
- __func__);
- return -EIO;
- }
- return 0;
-}
-
-/* Unfortunately, the hardware will only support event profiling
- * on one SPU per node at a time. Therefore, we must time slice
- * the profiling across all SPUs in the node. Note, we do this
- * in parallel for each node. The following routine is called
- * periodically based on kernel timer to switch which SPU is
- * being monitored in a round robbin fashion.
- */
-static void spu_evnt_swap(struct timer_list *unused)
-{
- int node;
- int cur_phys_spu, nxt_phys_spu, cur_spu_evnt_phys_spu_indx;
- unsigned long flags;
- int cpu;
- int ret;
- u32 interrupt_mask;
-
-
- /* enable interrupts on cntr 0 */
- interrupt_mask = CBE_PM_CTR_OVERFLOW_INTR(0);
-
- hdw_thread = 0;
-
- /* Make sure spu event interrupt handler and spu event swap
- * don't access the counters simultaneously.
- */
- spin_lock_irqsave(&cntr_lock, flags);
-
- cur_spu_evnt_phys_spu_indx = spu_evnt_phys_spu_indx;
-
- if (++(spu_evnt_phys_spu_indx) == NUM_SPUS_PER_NODE)
- spu_evnt_phys_spu_indx = 0;
-
- pm_signal[0].sub_unit = spu_evnt_phys_spu_indx;
- pm_signal[1].sub_unit = spu_evnt_phys_spu_indx;
- pm_signal[2].sub_unit = spu_evnt_phys_spu_indx;
-
- /* switch the SPU being profiled on each node */
- for_each_online_cpu(cpu) {
- if (cbe_get_hw_thread_id(cpu))
- continue;
-
- node = cbe_cpu_to_node(cpu);
- cur_phys_spu = (node * NUM_SPUS_PER_NODE)
- + cur_spu_evnt_phys_spu_indx;
- nxt_phys_spu = (node * NUM_SPUS_PER_NODE)
- + spu_evnt_phys_spu_indx;
-
- /*
- * stop counters, save counter values, restore counts
- * for previous physical SPU
- */
- cbe_disable_pm(cpu);
- cbe_disable_pm_interrupts(cpu);
-
- spu_pm_cnt[cur_phys_spu]
- = cbe_read_ctr(cpu, 0);
-
- /* restore previous count for the next spu to sample */
- /* NOTE, hardware issue, counter will not start if the
- * counter value is at max (0xFFFFFFFF).
- */
- if (spu_pm_cnt[nxt_phys_spu] >= 0xFFFFFFFF)
- cbe_write_ctr(cpu, 0, 0xFFFFFFF0);
- else
- cbe_write_ctr(cpu, 0, spu_pm_cnt[nxt_phys_spu]);
-
- pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
-
- /* setup the debug bus measure the one event and
- * the two events to route the next SPU's PC on
- * the debug bus
- */
- ret = pm_rtas_activate_signals(cbe_cpu_to_node(cpu), 3);
- if (ret)
- printk(KERN_ERR "%s: pm_rtas_activate_signals failed, "
- "SPU event swap\n", __func__);
-
- /* clear the trace buffer, don't want to take PC for
- * previous SPU*/
- cbe_write_pm(cpu, trace_address, 0);
-
- enable_ctr(cpu, 0, pm_regs.pm07_cntrl);
-
- /* Enable interrupts on the CPU thread that is starting */
- cbe_enable_pm_interrupts(cpu, hdw_thread,
- interrupt_mask);
- cbe_enable_pm(cpu);
- }
-
- spin_unlock_irqrestore(&cntr_lock, flags);
-
- /* swap approximately every 0.1 seconds */
- mod_timer(&timer_spu_event_swap, jiffies + HZ / 25);
-}
-
-static void start_spu_event_swap(void)
-{
- timer_setup(&timer_spu_event_swap, spu_evnt_swap, 0);
- timer_spu_event_swap.expires = jiffies + HZ / 25;
- add_timer(&timer_spu_event_swap);
-}
-
-static int cell_reg_setup_spu_events(struct op_counter_config *ctr,
- struct op_system_config *sys, int num_ctrs)
-{
- int i;
-
- /* routine is called once for all nodes */
-
- spu_evnt_phys_spu_indx = 0;
- /*
- * For all events except PPU CYCLEs, each node will need to make
- * the rtas cbe-perftools call to setup and reset the debug bus.
- * Make the token lookup call once and store it in the global
- * variable pm_rtas_token.
- */
- pm_rtas_token = rtas_token("ibm,cbe-perftools");
-
- if (unlikely(pm_rtas_token == RTAS_UNKNOWN_SERVICE)) {
- printk(KERN_ERR
- "%s: rtas token ibm,cbe-perftools unknown\n",
- __func__);
- return -EIO;
- }
-
- /* setup the pm_control register settings,
- * settings will be written per node by the
- * cell_cpu_setup() function.
- */
- pm_regs.pm_cntrl.trace_buf_ovflw = 1;
-
- /* Use the occurrence trace mode to have SPU PC saved
- * to the trace buffer. Occurrence data in trace buffer
- * is not used. Bit 2 must be set to store SPU addresses.
- */
- pm_regs.pm_cntrl.trace_mode = 2;
-
- pm_regs.pm_cntrl.spu_addr_trace = 0x1; /* using debug bus
- event 2 & 3 */
-
- /* setup the debug bus event array with the SPU PC routing events.
- * Note, pm_signal[0] will be filled in by set_pm_event() call below.
- */
- pm_signal[1].signal_group = SPU_PROFILE_EVENT_ADDR / 100;
- pm_signal[1].bus_word = GET_BUS_WORD(SPU_PROFILE_EVENT_ADDR_MASK_A);
- pm_signal[1].bit = SPU_PROFILE_EVENT_ADDR % 100;
- pm_signal[1].sub_unit = spu_evnt_phys_spu_indx;
-
- pm_signal[2].signal_group = SPU_PROFILE_EVENT_ADDR / 100;
- pm_signal[2].bus_word = GET_BUS_WORD(SPU_PROFILE_EVENT_ADDR_MASK_B);
- pm_signal[2].bit = SPU_PROFILE_EVENT_ADDR % 100;
- pm_signal[2].sub_unit = spu_evnt_phys_spu_indx;
-
- /* Set the user selected spu event to profile on,
- * note, only one SPU profiling event is supported
- */
- num_counters = 1; /* Only support one SPU event at a time */
- set_pm_event(0, ctr[0].event, ctr[0].unit_mask);
-
- reset_value[0] = 0xFFFFFFFF - ctr[0].count;
-
- /* global, used by cell_cpu_setup */
- ctr_enabled |= 1;
-
- /* Initialize the count for each SPU to the reset value */
- for (i=0; i < MAX_NUMNODES * NUM_SPUS_PER_NODE; i++)
- spu_pm_cnt[i] = reset_value[0];
-
- return 0;
-}
-
-static int cell_reg_setup_ppu(struct op_counter_config *ctr,
- struct op_system_config *sys, int num_ctrs)
-{
- /* routine is called once for all nodes */
- int i, j, cpu;
-
- num_counters = num_ctrs;
-
- if (unlikely(num_ctrs > NR_PHYS_CTRS)) {
- printk(KERN_ERR
- "%s: Oprofile, number of specified events " \
- "exceeds number of physical counters\n",
- __func__);
- return -EIO;
- }
-
- set_count_mode(sys->enable_kernel, sys->enable_user);
-
- /* Setup the thread 0 events */
- for (i = 0; i < num_ctrs; ++i) {
-
- pmc_cntrl[0][i].evnts = ctr[i].event;
- pmc_cntrl[0][i].masks = ctr[i].unit_mask;
- pmc_cntrl[0][i].enabled = ctr[i].enabled;
- pmc_cntrl[0][i].vcntr = i;
-
- for_each_possible_cpu(j)
- per_cpu(pmc_values, j)[i] = 0;
- }
-
- /*
- * Setup the thread 1 events, map the thread 0 event to the
- * equivalent thread 1 event.
- */
- for (i = 0; i < num_ctrs; ++i) {
- if ((ctr[i].event >= 2100) && (ctr[i].event <= 2111))
- pmc_cntrl[1][i].evnts = ctr[i].event + 19;
- else if (ctr[i].event == 2203)
- pmc_cntrl[1][i].evnts = ctr[i].event;
- else if ((ctr[i].event >= 2200) && (ctr[i].event <= 2215))
- pmc_cntrl[1][i].evnts = ctr[i].event + 16;
- else
- pmc_cntrl[1][i].evnts = ctr[i].event;
-
- pmc_cntrl[1][i].masks = ctr[i].unit_mask;
- pmc_cntrl[1][i].enabled = ctr[i].enabled;
- pmc_cntrl[1][i].vcntr = i;
- }
-
- for (i = 0; i < NUM_INPUT_BUS_WORDS; i++)
- input_bus[i] = 0xff;
-
- /*
- * Our counters count up, and "count" refers to
- * how much before the next interrupt, and we interrupt
- * on overflow. So we calculate the starting value
- * which will give us "count" until overflow.
- * Then we set the events on the enabled counters.
- */
- for (i = 0; i < num_counters; ++i) {
- /* start with virtual counter set 0 */
- if (pmc_cntrl[0][i].enabled) {
- /* Using 32bit counters, reset max - count */
- reset_value[i] = 0xFFFFFFFF - ctr[i].count;
- set_pm_event(i,
- pmc_cntrl[0][i].evnts,
- pmc_cntrl[0][i].masks);
-
- /* global, used by cell_cpu_setup */
- ctr_enabled |= (1 << i);
- }
- }
-
- /* initialize the previous counts for the virtual cntrs */
- for_each_online_cpu(cpu)
- for (i = 0; i < num_counters; ++i) {
- per_cpu(pmc_values, cpu)[i] = reset_value[i];
- }
-
- return 0;
-}
-
-
-/* This function is called once for all cpus combined */
-static int cell_reg_setup(struct op_counter_config *ctr,
- struct op_system_config *sys, int num_ctrs)
-{
- int ret=0;
- spu_cycle_reset = 0;
-
- /* initialize the spu_arr_trace value, will be reset if
- * doing spu event profiling.
- */
- pm_regs.group_control = 0;
- pm_regs.debug_bus_control = 0;
- pm_regs.pm_cntrl.stop_at_max = 1;
- pm_regs.pm_cntrl.trace_mode = 0;
- pm_regs.pm_cntrl.freeze = 1;
- pm_regs.pm_cntrl.trace_buf_ovflw = 0;
- pm_regs.pm_cntrl.spu_addr_trace = 0;
-
- /*
- * For all events except PPU CYCLEs, each node will need to make
- * the rtas cbe-perftools call to setup and reset the debug bus.
- * Make the token lookup call once and store it in the global
- * variable pm_rtas_token.
- */
- pm_rtas_token = rtas_token("ibm,cbe-perftools");
-
- if (unlikely(pm_rtas_token == RTAS_UNKNOWN_SERVICE)) {
- printk(KERN_ERR
- "%s: rtas token ibm,cbe-perftools unknown\n",
- __func__);
- return -EIO;
- }
-
- if (ctr[0].event == SPU_CYCLES_EVENT_NUM) {
- profiling_mode = SPU_PROFILING_CYCLES;
- ret = cell_reg_setup_spu_cycles(ctr, sys, num_ctrs);
- } else if ((ctr[0].event >= SPU_EVENT_NUM_START) &&
- (ctr[0].event <= SPU_EVENT_NUM_STOP)) {
- profiling_mode = SPU_PROFILING_EVENTS;
- spu_cycle_reset = ctr[0].count;
-
- /* for SPU event profiling, need to setup the
- * pm_signal array with the events to route the
- * SPU PC before making the FW call. Note, only
- * one SPU event for profiling can be specified
- * at a time.
- */
- cell_reg_setup_spu_events(ctr, sys, num_ctrs);
- } else {
- profiling_mode = PPU_PROFILING;
- ret = cell_reg_setup_ppu(ctr, sys, num_ctrs);
- }
-
- return ret;
-}
-
-
-
-/* This function is called once for each cpu */
-static int cell_cpu_setup(struct op_counter_config *cntr)
-{
- u32 cpu = smp_processor_id();
- u32 num_enabled = 0;
- int i;
- int ret;
-
- /* Cycle based SPU profiling does not use the performance
- * counters. The trace array is configured to collect
- * the data.
- */
- if (profiling_mode == SPU_PROFILING_CYCLES)
- return 0;
-
- /* There is one performance monitor per processor chip (i.e. node),
- * so we only need to perform this function once per node.
- */
- if (cbe_get_hw_thread_id(cpu))
- return 0;
-
- /* Stop all counters */
- cbe_disable_pm(cpu);
- cbe_disable_pm_interrupts(cpu);
-
- cbe_write_pm(cpu, pm_start_stop, 0);
- cbe_write_pm(cpu, group_control, pm_regs.group_control);
- cbe_write_pm(cpu, debug_bus_control, pm_regs.debug_bus_control);
- write_pm_cntrl(cpu);
-
- for (i = 0; i < num_counters; ++i) {
- if (ctr_enabled & (1 << i)) {
- pm_signal[num_enabled].cpu = cbe_cpu_to_node(cpu);
- num_enabled++;
- }
- }
-
- /*
- * The pm_rtas_activate_signals will return -EIO if the FW
- * call failed.
- */
- if (profiling_mode == SPU_PROFILING_EVENTS) {
- /* For SPU event profiling also need to setup the
- * pm interval timer
- */
- ret = pm_rtas_activate_signals(cbe_cpu_to_node(cpu),
- num_enabled+2);
- /* store PC from debug bus to Trace buffer as often
- * as possible (every 10 cycles)
- */
- cbe_write_pm(cpu, pm_interval, NUM_INTERVAL_CYC);
- return ret;
- } else
- return pm_rtas_activate_signals(cbe_cpu_to_node(cpu),
- num_enabled);
-}
-
-#define ENTRIES 303
-#define MAXLFSR 0xFFFFFF
-
-/* precomputed table of 24 bit LFSR values */
-static int initial_lfsr[] = {
- 8221349, 12579195, 5379618, 10097839, 7512963, 7519310, 3955098, 10753424,
- 15507573, 7458917, 285419, 2641121, 9780088, 3915503, 6668768, 1548716,
- 4885000, 8774424, 9650099, 2044357, 2304411, 9326253, 10332526, 4421547,
- 3440748, 10179459, 13332843, 10375561, 1313462, 8375100, 5198480, 6071392,
- 9341783, 1526887, 3985002, 1439429, 13923762, 7010104, 11969769, 4547026,
- 2040072, 4025602, 3437678, 7939992, 11444177, 4496094, 9803157, 10745556,
- 3671780, 4257846, 5662259, 13196905, 3237343, 12077182, 16222879, 7587769,
- 14706824, 2184640, 12591135, 10420257, 7406075, 3648978, 11042541, 15906893,
- 11914928, 4732944, 10695697, 12928164, 11980531, 4430912, 11939291, 2917017,
- 6119256, 4172004, 9373765, 8410071, 14788383, 5047459, 5474428, 1737756,
- 15967514, 13351758, 6691285, 8034329, 2856544, 14394753, 11310160, 12149558,
- 7487528, 7542781, 15668898, 12525138, 12790975, 3707933, 9106617, 1965401,
- 16219109, 12801644, 2443203, 4909502, 8762329, 3120803, 6360315, 9309720,
- 15164599, 10844842, 4456529, 6667610, 14924259, 884312, 6234963, 3326042,
- 15973422, 13919464, 5272099, 6414643, 3909029, 2764324, 5237926, 4774955,
- 10445906, 4955302, 5203726, 10798229, 11443419, 2303395, 333836, 9646934,
- 3464726, 4159182, 568492, 995747, 10318756, 13299332, 4836017, 8237783,
- 3878992, 2581665, 11394667, 5672745, 14412947, 3159169, 9094251, 16467278,
- 8671392, 15230076, 4843545, 7009238, 15504095, 1494895, 9627886, 14485051,
- 8304291, 252817, 12421642, 16085736, 4774072, 2456177, 4160695, 15409741,
- 4902868, 5793091, 13162925, 16039714, 782255, 11347835, 14884586, 366972,
- 16308990, 11913488, 13390465, 2958444, 10340278, 1177858, 1319431, 10426302,
- 2868597, 126119, 5784857, 5245324, 10903900, 16436004, 3389013, 1742384,
- 14674502, 10279218, 8536112, 10364279, 6877778, 14051163, 1025130, 6072469,
- 1988305, 8354440, 8216060, 16342977, 13112639, 3976679, 5913576, 8816697,
- 6879995, 14043764, 3339515, 9364420, 15808858, 12261651, 2141560, 5636398,
- 10345425, 10414756, 781725, 6155650, 4746914, 5078683, 7469001, 6799140,
- 10156444, 9667150, 10116470, 4133858, 2121972, 1124204, 1003577, 1611214,
- 14304602, 16221850, 13878465, 13577744, 3629235, 8772583, 10881308, 2410386,
- 7300044, 5378855, 9301235, 12755149, 4977682, 8083074, 10327581, 6395087,
- 9155434, 15501696, 7514362, 14520507, 15808945, 3244584, 4741962, 9658130,
- 14336147, 8654727, 7969093, 15759799, 14029445, 5038459, 9894848, 8659300,
- 13699287, 8834306, 10712885, 14753895, 10410465, 3373251, 309501, 9561475,
- 5526688, 14647426, 14209836, 5339224, 207299, 14069911, 8722990, 2290950,
- 3258216, 12505185, 6007317, 9218111, 14661019, 10537428, 11731949, 9027003,
- 6641507, 9490160, 200241, 9720425, 16277895, 10816638, 1554761, 10431375,
- 7467528, 6790302, 3429078, 14633753, 14428997, 11463204, 3576212, 2003426,
- 6123687, 820520, 9992513, 15784513, 5778891, 6428165, 8388607
-};
-
-/*
- * The hardware uses an LFSR counting sequence to determine when to capture
- * the SPU PCs. An LFSR sequence is like a puesdo random number sequence
- * where each number occurs once in the sequence but the sequence is not in
- * numerical order. The SPU PC capture is done when the LFSR sequence reaches
- * the last value in the sequence. Hence the user specified value N
- * corresponds to the LFSR number that is N from the end of the sequence.
- *
- * To avoid the time to compute the LFSR, a lookup table is used. The 24 bit
- * LFSR sequence is broken into four ranges. The spacing of the precomputed
- * values is adjusted in each range so the error between the user specified
- * number (N) of events between samples and the actual number of events based
- * on the precomputed value will be les then about 6.2%. Note, if the user
- * specifies N < 2^16, the LFSR value that is 2^16 from the end will be used.
- * This is to prevent the loss of samples because the trace buffer is full.
- *
- * User specified N Step between Index in
- * precomputed values precomputed
- * table
- * 0 to 2^16-1 ---- 0
- * 2^16 to 2^16+2^19-1 2^12 1 to 128
- * 2^16+2^19 to 2^16+2^19+2^22-1 2^15 129 to 256
- * 2^16+2^19+2^22 to 2^24-1 2^18 257 to 302
- *
- *
- * For example, the LFSR values in the second range are computed for 2^16,
- * 2^16+2^12, ... , 2^19-2^16, 2^19 and stored in the table at indicies
- * 1, 2,..., 127, 128.
- *
- * The 24 bit LFSR value for the nth number in the sequence can be
- * calculated using the following code:
- *
- * #define size 24
- * int calculate_lfsr(int n)
- * {
- * int i;
- * unsigned int newlfsr0;
- * unsigned int lfsr = 0xFFFFFF;
- * unsigned int howmany = n;
- *
- * for (i = 2; i < howmany + 2; i++) {
- * newlfsr0 = (((lfsr >> (size - 1 - 0)) & 1) ^
- * ((lfsr >> (size - 1 - 1)) & 1) ^
- * (((lfsr >> (size - 1 - 6)) & 1) ^
- * ((lfsr >> (size - 1 - 23)) & 1)));
- *
- * lfsr >>= 1;
- * lfsr = lfsr | (newlfsr0 << (size - 1));
- * }
- * return lfsr;
- * }
- */
-
-#define V2_16 (0x1 << 16)
-#define V2_19 (0x1 << 19)
-#define V2_22 (0x1 << 22)
-
-static int calculate_lfsr(int n)
-{
- /*
- * The ranges and steps are in powers of 2 so the calculations
- * can be done using shifts rather then divide.
- */
- int index;
-
- if ((n >> 16) == 0)
- index = 0;
- else if (((n - V2_16) >> 19) == 0)
- index = ((n - V2_16) >> 12) + 1;
- else if (((n - V2_16 - V2_19) >> 22) == 0)
- index = ((n - V2_16 - V2_19) >> 15 ) + 1 + 128;
- else if (((n - V2_16 - V2_19 - V2_22) >> 24) == 0)
- index = ((n - V2_16 - V2_19 - V2_22) >> 18 ) + 1 + 256;
- else
- index = ENTRIES-1;
-
- /* make sure index is valid */
- if ((index >= ENTRIES) || (index < 0))
- index = ENTRIES-1;
-
- return initial_lfsr[index];
-}
-
-static int pm_rtas_activate_spu_profiling(u32 node)
-{
- int ret, i;
- struct pm_signal pm_signal_local[NUM_SPUS_PER_NODE];
-
- /*
- * Set up the rtas call to configure the debug bus to
- * route the SPU PCs. Setup the pm_signal for each SPU
- */
- for (i = 0; i < ARRAY_SIZE(pm_signal_local); i++) {
- pm_signal_local[i].cpu = node;
- pm_signal_local[i].signal_group = 41;
- /* spu i on word (i/2) */
- pm_signal_local[i].bus_word = 1 << i / 2;
- /* spu i */
- pm_signal_local[i].sub_unit = i;
- pm_signal_local[i].bit = 63;
- }
-
- ret = rtas_ibm_cbe_perftools(SUBFUNC_ACTIVATE,
- PASSTHRU_ENABLE, pm_signal_local,
- (ARRAY_SIZE(pm_signal_local)
- * sizeof(struct pm_signal)));
-
- if (unlikely(ret)) {
- printk(KERN_WARNING "%s: rtas returned: %d\n",
- __func__, ret);
- return -EIO;
- }
-
- return 0;
-}
-
-#ifdef CONFIG_CPU_FREQ
-static int
-oprof_cpufreq_notify(struct notifier_block *nb, unsigned long val, void *data)
-{
- int ret = 0;
- struct cpufreq_freqs *frq = data;
- if ((val == CPUFREQ_PRECHANGE && frq->old < frq->new) ||
- (val == CPUFREQ_POSTCHANGE && frq->old > frq->new))
- set_spu_profiling_frequency(frq->new, spu_cycle_reset);
- return ret;
-}
-
-static struct notifier_block cpu_freq_notifier_block = {
- .notifier_call = oprof_cpufreq_notify
-};
-#endif
-
-/*
- * Note the generic OProfile stop calls do not support returning
- * an error on stop. Hence, will not return an error if the FW
- * calls fail on stop. Failure to reset the debug bus is not an issue.
- * Failure to disable the SPU profiling is not an issue. The FW calls
- * to enable the performance counters and debug bus will work even if
- * the hardware was not cleanly reset.
- */
-static void cell_global_stop_spu_cycles(void)
-{
- int subfunc, rtn_value;
- unsigned int lfsr_value;
- int cpu;
-
- oprofile_running = 0;
- smp_wmb();
-
-#ifdef CONFIG_CPU_FREQ
- cpufreq_unregister_notifier(&cpu_freq_notifier_block,
- CPUFREQ_TRANSITION_NOTIFIER);
-#endif
-
- for_each_online_cpu(cpu) {
- if (cbe_get_hw_thread_id(cpu))
- continue;
-
- subfunc = 3; /*
- * 2 - activate SPU tracing,
- * 3 - deactivate
- */
- lfsr_value = 0x8f100000;
-
- rtn_value = rtas_call(spu_rtas_token, 3, 1, NULL,
- subfunc, cbe_cpu_to_node(cpu),
- lfsr_value);
-
- if (unlikely(rtn_value != 0)) {
- printk(KERN_ERR
- "%s: rtas call ibm,cbe-spu-perftools " \
- "failed, return = %d\n",
- __func__, rtn_value);
- }
-
- /* Deactivate the signals */
- pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
- }
-
- stop_spu_profiling_cycles();
-}
-
-static void cell_global_stop_spu_events(void)
-{
- int cpu;
- oprofile_running = 0;
-
- stop_spu_profiling_events();
- smp_wmb();
-
- for_each_online_cpu(cpu) {
- if (cbe_get_hw_thread_id(cpu))
- continue;
-
- cbe_sync_irq(cbe_cpu_to_node(cpu));
- /* Stop the counters */
- cbe_disable_pm(cpu);
- cbe_write_pm07_control(cpu, 0, 0);
-
- /* Deactivate the signals */
- pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
-
- /* Deactivate interrupts */
- cbe_disable_pm_interrupts(cpu);
- }
- del_timer_sync(&timer_spu_event_swap);
-}
-
-static void cell_global_stop_ppu(void)
-{
- int cpu;
-
- /*
- * This routine will be called once for the system.
- * There is one performance monitor per node, so we
- * only need to perform this function once per node.
- */
- del_timer_sync(&timer_virt_cntr);
- oprofile_running = 0;
- smp_wmb();
-
- for_each_online_cpu(cpu) {
- if (cbe_get_hw_thread_id(cpu))
- continue;
-
- cbe_sync_irq(cbe_cpu_to_node(cpu));
- /* Stop the counters */
- cbe_disable_pm(cpu);
-
- /* Deactivate the signals */
- pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
-
- /* Deactivate interrupts */
- cbe_disable_pm_interrupts(cpu);
- }
-}
-
-static void cell_global_stop(void)
-{
- if (profiling_mode == PPU_PROFILING)
- cell_global_stop_ppu();
- else if (profiling_mode == SPU_PROFILING_EVENTS)
- cell_global_stop_spu_events();
- else
- cell_global_stop_spu_cycles();
-}
-
-static int cell_global_start_spu_cycles(struct op_counter_config *ctr)
-{
- int subfunc;
- unsigned int lfsr_value;
- int cpu;
- int ret;
- int rtas_error;
- unsigned int cpu_khzfreq = 0;
-
- /* The SPU profiling uses time-based profiling based on
- * cpu frequency, so if configured with the CPU_FREQ
- * option, we should detect frequency changes and react
- * accordingly.
- */
-#ifdef CONFIG_CPU_FREQ
- ret = cpufreq_register_notifier(&cpu_freq_notifier_block,
- CPUFREQ_TRANSITION_NOTIFIER);
- if (ret < 0)
- /* this is not a fatal error */
- printk(KERN_ERR "CPU freq change registration failed: %d\n",
- ret);
-
- else
- cpu_khzfreq = cpufreq_quick_get(smp_processor_id());
-#endif
-
- set_spu_profiling_frequency(cpu_khzfreq, spu_cycle_reset);
-
- for_each_online_cpu(cpu) {
- if (cbe_get_hw_thread_id(cpu))
- continue;
-
- /*
- * Setup SPU cycle-based profiling.
- * Set perf_mon_control bit 0 to a zero before
- * enabling spu collection hardware.
- */
- cbe_write_pm(cpu, pm_control, 0);
-
- if (spu_cycle_reset > MAX_SPU_COUNT)
- /* use largest possible value */
- lfsr_value = calculate_lfsr(MAX_SPU_COUNT-1);
- else
- lfsr_value = calculate_lfsr(spu_cycle_reset);
-
- /* must use a non zero value. Zero disables data collection. */
- if (lfsr_value == 0)
- lfsr_value = calculate_lfsr(1);
-
- lfsr_value = lfsr_value << 8; /* shift lfsr to correct
- * register location
- */
-
- /* debug bus setup */
- ret = pm_rtas_activate_spu_profiling(cbe_cpu_to_node(cpu));
-
- if (unlikely(ret)) {
- rtas_error = ret;
- goto out;
- }
-
-
- subfunc = 2; /* 2 - activate SPU tracing, 3 - deactivate */
-
- /* start profiling */
- ret = rtas_call(spu_rtas_token, 3, 1, NULL, subfunc,
- cbe_cpu_to_node(cpu), lfsr_value);
-
- if (unlikely(ret != 0)) {
- printk(KERN_ERR
- "%s: rtas call ibm,cbe-spu-perftools failed, " \
- "return = %d\n", __func__, ret);
- rtas_error = -EIO;
- goto out;
- }
- }
-
- rtas_error = start_spu_profiling_cycles(spu_cycle_reset);
- if (rtas_error)
- goto out_stop;
-
- oprofile_running = 1;
- return 0;
-
-out_stop:
- cell_global_stop_spu_cycles(); /* clean up the PMU/debug bus */
-out:
- return rtas_error;
-}
-
-static int cell_global_start_spu_events(struct op_counter_config *ctr)
-{
- int cpu;
- u32 interrupt_mask = 0;
- int rtn = 0;
-
- hdw_thread = 0;
-
- /* spu event profiling, uses the performance counters to generate
- * an interrupt. The hardware is setup to store the SPU program
- * counter into the trace array. The occurrence mode is used to
- * enable storing data to the trace buffer. The bits are set
- * to send/store the SPU address in the trace buffer. The debug
- * bus must be setup to route the SPU program counter onto the
- * debug bus. The occurrence data in the trace buffer is not used.
- */
-
- /* This routine gets called once for the system.
- * There is one performance monitor per node, so we
- * only need to perform this function once per node.
- */
-
- for_each_online_cpu(cpu) {
- if (cbe_get_hw_thread_id(cpu))
- continue;
-
- /*
- * Setup SPU event-based profiling.
- * Set perf_mon_control bit 0 to a zero before
- * enabling spu collection hardware.
- *
- * Only support one SPU event on one SPU per node.
- */
- if (ctr_enabled & 1) {
- cbe_write_ctr(cpu, 0, reset_value[0]);
- enable_ctr(cpu, 0, pm_regs.pm07_cntrl);
- interrupt_mask |=
- CBE_PM_CTR_OVERFLOW_INTR(0);
- } else {
- /* Disable counter */
- cbe_write_pm07_control(cpu, 0, 0);
- }
-
- cbe_get_and_clear_pm_interrupts(cpu);
- cbe_enable_pm_interrupts(cpu, hdw_thread, interrupt_mask);
- cbe_enable_pm(cpu);
-
- /* clear the trace buffer */
- cbe_write_pm(cpu, trace_address, 0);
- }
-
- /* Start the timer to time slice collecting the event profile
- * on each of the SPUs. Note, can collect profile on one SPU
- * per node at a time.
- */
- start_spu_event_swap();
- start_spu_profiling_events();
- oprofile_running = 1;
- smp_wmb();
-
- return rtn;
-}
-
-static int cell_global_start_ppu(struct op_counter_config *ctr)
-{
- u32 cpu, i;
- u32 interrupt_mask = 0;
-
- /* This routine gets called once for the system.
- * There is one performance monitor per node, so we
- * only need to perform this function once per node.
- */
- for_each_online_cpu(cpu) {
- if (cbe_get_hw_thread_id(cpu))
- continue;
-
- interrupt_mask = 0;
-
- for (i = 0; i < num_counters; ++i) {
- if (ctr_enabled & (1 << i)) {
- cbe_write_ctr(cpu, i, reset_value[i]);
- enable_ctr(cpu, i, pm_regs.pm07_cntrl);
- interrupt_mask |= CBE_PM_CTR_OVERFLOW_INTR(i);
- } else {
- /* Disable counter */
- cbe_write_pm07_control(cpu, i, 0);
- }
- }
-
- cbe_get_and_clear_pm_interrupts(cpu);
- cbe_enable_pm_interrupts(cpu, hdw_thread, interrupt_mask);
- cbe_enable_pm(cpu);
- }
-
- virt_cntr_inter_mask = interrupt_mask;
- oprofile_running = 1;
- smp_wmb();
-
- /*
- * NOTE: start_virt_cntrs will result in cell_virtual_cntr() being
- * executed which manipulates the PMU. We start the "virtual counter"
- * here so that we do not need to synchronize access to the PMU in
- * the above for-loop.
- */
- start_virt_cntrs();
-
- return 0;
-}
-
-static int cell_global_start(struct op_counter_config *ctr)
-{
- if (profiling_mode == SPU_PROFILING_CYCLES)
- return cell_global_start_spu_cycles(ctr);
- else if (profiling_mode == SPU_PROFILING_EVENTS)
- return cell_global_start_spu_events(ctr);
- else
- return cell_global_start_ppu(ctr);
-}
-
-
-/* The SPU interrupt handler
- *
- * SPU event profiling works as follows:
- * The pm_signal[0] holds the one SPU event to be measured. It is routed on
- * the debug bus using word 0 or 1. The value of pm_signal[1] and
- * pm_signal[2] contain the necessary events to route the SPU program
- * counter for the selected SPU onto the debug bus using words 2 and 3.
- * The pm_interval register is setup to write the SPU PC value into the
- * trace buffer at the maximum rate possible. The trace buffer is configured
- * to store the PCs, wrapping when it is full. The performance counter is
- * initialized to the max hardware count minus the number of events, N, between
- * samples. Once the N events have occurred, a HW counter overflow occurs
- * causing the generation of a HW counter interrupt which also stops the
- * writing of the SPU PC values to the trace buffer. Hence the last PC
- * written to the trace buffer is the SPU PC that we want. Unfortunately,
- * we have to read from the beginning of the trace buffer to get to the
- * last value written. We just hope the PPU has nothing better to do then
- * service this interrupt. The PC for the specific SPU being profiled is
- * extracted from the trace buffer processed and stored. The trace buffer
- * is cleared, interrupts are cleared, the counter is reset to max - N.
- * A kernel timer is used to periodically call the routine spu_evnt_swap()
- * to switch to the next physical SPU in the node to profile in round robbin
- * order. This way data is collected for all SPUs on the node. It does mean
- * that we need to use a relatively small value of N to ensure enough samples
- * on each SPU are collected each SPU is being profiled 1/8 of the time.
- * It may also be necessary to use a longer sample collection period.
- */
-static void cell_handle_interrupt_spu(struct pt_regs *regs,
- struct op_counter_config *ctr)
-{
- u32 cpu, cpu_tmp;
- u64 trace_entry;
- u32 interrupt_mask;
- u64 trace_buffer[2];
- u64 last_trace_buffer;
- u32 sample;
- u32 trace_addr;
- unsigned long sample_array_lock_flags;
- int spu_num;
- unsigned long flags;
-
- /* Make sure spu event interrupt handler and spu event swap
- * don't access the counters simultaneously.
- */
- cpu = smp_processor_id();
- spin_lock_irqsave(&cntr_lock, flags);
-
- cpu_tmp = cpu;
- cbe_disable_pm(cpu);
-
- interrupt_mask = cbe_get_and_clear_pm_interrupts(cpu);
-
- sample = 0xABCDEF;
- trace_entry = 0xfedcba;
- last_trace_buffer = 0xdeadbeaf;
-
- if ((oprofile_running == 1) && (interrupt_mask != 0)) {
- /* disable writes to trace buff */
- cbe_write_pm(cpu, pm_interval, 0);
-
- /* only have one perf cntr being used, cntr 0 */
- if ((interrupt_mask & CBE_PM_CTR_OVERFLOW_INTR(0))
- && ctr[0].enabled)
- /* The SPU PC values will be read
- * from the trace buffer, reset counter
- */
-
- cbe_write_ctr(cpu, 0, reset_value[0]);
-
- trace_addr = cbe_read_pm(cpu, trace_address);
-
- while (!(trace_addr & CBE_PM_TRACE_BUF_EMPTY)) {
- /* There is data in the trace buffer to process
- * Read the buffer until you get to the last
- * entry. This is the value we want.
- */
-
- cbe_read_trace_buffer(cpu, trace_buffer);
- trace_addr = cbe_read_pm(cpu, trace_address);
- }
-
- /* SPU Address 16 bit count format for 128 bit
- * HW trace buffer is used for the SPU PC storage
- * HDR bits 0:15
- * SPU Addr 0 bits 16:31
- * SPU Addr 1 bits 32:47
- * unused bits 48:127
- *
- * HDR: bit4 = 1 SPU Address 0 valid
- * HDR: bit5 = 1 SPU Address 1 valid
- * - unfortunately, the valid bits don't seem to work
- *
- * Note trace_buffer[0] holds bits 0:63 of the HW
- * trace buffer, trace_buffer[1] holds bits 64:127
- */
-
- trace_entry = trace_buffer[0]
- & 0x00000000FFFF0000;
-
- /* only top 16 of the 18 bit SPU PC address
- * is stored in trace buffer, hence shift right
- * by 16 -2 bits */
- sample = trace_entry >> 14;
- last_trace_buffer = trace_buffer[0];
-
- spu_num = spu_evnt_phys_spu_indx
- + (cbe_cpu_to_node(cpu) * NUM_SPUS_PER_NODE);
-
- /* make sure only one process at a time is calling
- * spu_sync_buffer()
- */
- spin_lock_irqsave(&oprof_spu_smpl_arry_lck,
- sample_array_lock_flags);
- spu_sync_buffer(spu_num, &sample, 1);
- spin_unlock_irqrestore(&oprof_spu_smpl_arry_lck,
- sample_array_lock_flags);
-
- smp_wmb(); /* insure spu event buffer updates are written
- * don't want events intermingled... */
-
- /* The counters were frozen by the interrupt.
- * Reenable the interrupt and restart the counters.
- */
- cbe_write_pm(cpu, pm_interval, NUM_INTERVAL_CYC);
- cbe_enable_pm_interrupts(cpu, hdw_thread,
- virt_cntr_inter_mask);
-
- /* clear the trace buffer, re-enable writes to trace buff */
- cbe_write_pm(cpu, trace_address, 0);
- cbe_write_pm(cpu, pm_interval, NUM_INTERVAL_CYC);
-
- /* The writes to the various performance counters only writes
- * to a latch. The new values (interrupt setting bits, reset
- * counter value etc.) are not copied to the actual registers
- * until the performance monitor is enabled. In order to get
- * this to work as desired, the performance monitor needs to
- * be disabled while writing to the latches. This is a
- * HW design issue.
- */
- write_pm_cntrl(cpu);
- cbe_enable_pm(cpu);
- }
- spin_unlock_irqrestore(&cntr_lock, flags);
-}
-
-static void cell_handle_interrupt_ppu(struct pt_regs *regs,
- struct op_counter_config *ctr)
-{
- u32 cpu;
- u64 pc;
- int is_kernel;
- unsigned long flags = 0;
- u32 interrupt_mask;
- int i;
-
- cpu = smp_processor_id();
-
- /*
- * Need to make sure the interrupt handler and the virt counter
- * routine are not running at the same time. See the
- * cell_virtual_cntr() routine for additional comments.
- */
- spin_lock_irqsave(&cntr_lock, flags);
-
- /*
- * Need to disable and reenable the performance counters
- * to get the desired behavior from the hardware. This
- * is hardware specific.
- */
-
- cbe_disable_pm(cpu);
-
- interrupt_mask = cbe_get_and_clear_pm_interrupts(cpu);
-
- /*
- * If the interrupt mask has been cleared, then the virt cntr
- * has cleared the interrupt. When the thread that generated
- * the interrupt is restored, the data count will be restored to
- * 0xffffff0 to cause the interrupt to be regenerated.
- */
-
- if ((oprofile_running == 1) && (interrupt_mask != 0)) {
- pc = regs->nip;
- is_kernel = is_kernel_addr(pc);
-
- for (i = 0; i < num_counters; ++i) {
- if ((interrupt_mask & CBE_PM_CTR_OVERFLOW_INTR(i))
- && ctr[i].enabled) {
- oprofile_add_ext_sample(pc, regs, i, is_kernel);
- cbe_write_ctr(cpu, i, reset_value[i]);
- }
- }
-
- /*
- * The counters were frozen by the interrupt.
- * Reenable the interrupt and restart the counters.
- * If there was a race between the interrupt handler and
- * the virtual counter routine. The virtual counter
- * routine may have cleared the interrupts. Hence must
- * use the virt_cntr_inter_mask to re-enable the interrupts.
- */
- cbe_enable_pm_interrupts(cpu, hdw_thread,
- virt_cntr_inter_mask);
-
- /*
- * The writes to the various performance counters only writes
- * to a latch. The new values (interrupt setting bits, reset
- * counter value etc.) are not copied to the actual registers
- * until the performance monitor is enabled. In order to get
- * this to work as desired, the performance monitor needs to
- * be disabled while writing to the latches. This is a
- * HW design issue.
- */
- cbe_enable_pm(cpu);
- }
- spin_unlock_irqrestore(&cntr_lock, flags);
-}
-
-static void cell_handle_interrupt(struct pt_regs *regs,
- struct op_counter_config *ctr)
-{
- if (profiling_mode == PPU_PROFILING)
- cell_handle_interrupt_ppu(regs, ctr);
- else
- cell_handle_interrupt_spu(regs, ctr);
-}
-
-/*
- * This function is called from the generic OProfile
- * driver. When profiling PPUs, we need to do the
- * generic sync start; otherwise, do spu_sync_start.
- */
-static int cell_sync_start(void)
-{
- if ((profiling_mode == SPU_PROFILING_CYCLES) ||
- (profiling_mode == SPU_PROFILING_EVENTS))
- return spu_sync_start();
- else
- return DO_GENERIC_SYNC;
-}
-
-static int cell_sync_stop(void)
-{
- if ((profiling_mode == SPU_PROFILING_CYCLES) ||
- (profiling_mode == SPU_PROFILING_EVENTS))
- return spu_sync_stop();
- else
- return 1;
-}
-
-struct op_powerpc_model op_model_cell = {
- .reg_setup = cell_reg_setup,
- .cpu_setup = cell_cpu_setup,
- .global_start = cell_global_start,
- .global_stop = cell_global_stop,
- .sync_start = cell_sync_start,
- .sync_stop = cell_sync_stop,
- .handle_interrupt = cell_handle_interrupt,
-};
diff --git a/arch/powerpc/oprofile/op_model_fsl_emb.c b/arch/powerpc/oprofile/op_model_fsl_emb.c
deleted file mode 100644
index 25dc6813ecee..000000000000
--- a/arch/powerpc/oprofile/op_model_fsl_emb.c
+++ /dev/null
@@ -1,380 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Freescale Embedded oprofile support, based on ppc64 oprofile support
- * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
- *
- * Copyright (c) 2004, 2010 Freescale Semiconductor, Inc
- *
- * Author: Andy Fleming
- * Maintainer: Kumar Gala <galak@kernel.crashing.org>
- */
-
-#include <linux/oprofile.h>
-#include <linux/smp.h>
-#include <asm/ptrace.h>
-#include <asm/processor.h>
-#include <asm/cputable.h>
-#include <asm/reg_fsl_emb.h>
-#include <asm/page.h>
-#include <asm/pmc.h>
-#include <asm/oprofile_impl.h>
-
-static unsigned long reset_value[OP_MAX_COUNTER];
-
-static int num_counters;
-static int oprofile_running;
-
-static inline u32 get_pmlca(int ctr)
-{
- u32 pmlca;
-
- switch (ctr) {
- case 0:
- pmlca = mfpmr(PMRN_PMLCA0);
- break;
- case 1:
- pmlca = mfpmr(PMRN_PMLCA1);
- break;
- case 2:
- pmlca = mfpmr(PMRN_PMLCA2);
- break;
- case 3:
- pmlca = mfpmr(PMRN_PMLCA3);
- break;
- case 4:
- pmlca = mfpmr(PMRN_PMLCA4);
- break;
- case 5:
- pmlca = mfpmr(PMRN_PMLCA5);
- break;
- default:
- panic("Bad ctr number\n");
- }
-
- return pmlca;
-}
-
-static inline void set_pmlca(int ctr, u32 pmlca)
-{
- switch (ctr) {
- case 0:
- mtpmr(PMRN_PMLCA0, pmlca);
- break;
- case 1:
- mtpmr(PMRN_PMLCA1, pmlca);
- break;
- case 2:
- mtpmr(PMRN_PMLCA2, pmlca);
- break;
- case 3:
- mtpmr(PMRN_PMLCA3, pmlca);
- break;
- case 4:
- mtpmr(PMRN_PMLCA4, pmlca);
- break;
- case 5:
- mtpmr(PMRN_PMLCA5, pmlca);
- break;
- default:
- panic("Bad ctr number\n");
- }
-}
-
-static inline unsigned int ctr_read(unsigned int i)
-{
- switch(i) {
- case 0:
- return mfpmr(PMRN_PMC0);
- case 1:
- return mfpmr(PMRN_PMC1);
- case 2:
- return mfpmr(PMRN_PMC2);
- case 3:
- return mfpmr(PMRN_PMC3);
- case 4:
- return mfpmr(PMRN_PMC4);
- case 5:
- return mfpmr(PMRN_PMC5);
- default:
- return 0;
- }
-}
-
-static inline void ctr_write(unsigned int i, unsigned int val)
-{
- switch(i) {
- case 0:
- mtpmr(PMRN_PMC0, val);
- break;
- case 1:
- mtpmr(PMRN_PMC1, val);
- break;
- case 2:
- mtpmr(PMRN_PMC2, val);
- break;
- case 3:
- mtpmr(PMRN_PMC3, val);
- break;
- case 4:
- mtpmr(PMRN_PMC4, val);
- break;
- case 5:
- mtpmr(PMRN_PMC5, val);
- break;
- default:
- break;
- }
-}
-
-
-static void init_pmc_stop(int ctr)
-{
- u32 pmlca = (PMLCA_FC | PMLCA_FCS | PMLCA_FCU |
- PMLCA_FCM1 | PMLCA_FCM0);
- u32 pmlcb = 0;
-
- switch (ctr) {
- case 0:
- mtpmr(PMRN_PMLCA0, pmlca);
- mtpmr(PMRN_PMLCB0, pmlcb);
- break;
- case 1:
- mtpmr(PMRN_PMLCA1, pmlca);
- mtpmr(PMRN_PMLCB1, pmlcb);
- break;
- case 2:
- mtpmr(PMRN_PMLCA2, pmlca);
- mtpmr(PMRN_PMLCB2, pmlcb);
- break;
- case 3:
- mtpmr(PMRN_PMLCA3, pmlca);
- mtpmr(PMRN_PMLCB3, pmlcb);
- break;
- case 4:
- mtpmr(PMRN_PMLCA4, pmlca);
- mtpmr(PMRN_PMLCB4, pmlcb);
- break;
- case 5:
- mtpmr(PMRN_PMLCA5, pmlca);
- mtpmr(PMRN_PMLCB5, pmlcb);
- break;
- default:
- panic("Bad ctr number!\n");
- }
-}
-
-static void set_pmc_event(int ctr, int event)
-{
- u32 pmlca;
-
- pmlca = get_pmlca(ctr);
-
- pmlca = (pmlca & ~PMLCA_EVENT_MASK) |
- ((event << PMLCA_EVENT_SHIFT) &
- PMLCA_EVENT_MASK);
-
- set_pmlca(ctr, pmlca);
-}
-
-static void set_pmc_user_kernel(int ctr, int user, int kernel)
-{
- u32 pmlca;
-
- pmlca = get_pmlca(ctr);
-
- if(user)
- pmlca &= ~PMLCA_FCU;
- else
- pmlca |= PMLCA_FCU;
-
- if(kernel)
- pmlca &= ~PMLCA_FCS;
- else
- pmlca |= PMLCA_FCS;
-
- set_pmlca(ctr, pmlca);
-}
-
-static void set_pmc_marked(int ctr, int mark0, int mark1)
-{
- u32 pmlca = get_pmlca(ctr);
-
- if(mark0)
- pmlca &= ~PMLCA_FCM0;
- else
- pmlca |= PMLCA_FCM0;
-
- if(mark1)
- pmlca &= ~PMLCA_FCM1;
- else
- pmlca |= PMLCA_FCM1;
-
- set_pmlca(ctr, pmlca);
-}
-
-static void pmc_start_ctr(int ctr, int enable)
-{
- u32 pmlca = get_pmlca(ctr);
-
- pmlca &= ~PMLCA_FC;
-
- if (enable)
- pmlca |= PMLCA_CE;
- else
- pmlca &= ~PMLCA_CE;
-
- set_pmlca(ctr, pmlca);
-}
-
-static void pmc_start_ctrs(int enable)
-{
- u32 pmgc0 = mfpmr(PMRN_PMGC0);
-
- pmgc0 &= ~PMGC0_FAC;
- pmgc0 |= PMGC0_FCECE;
-
- if (enable)
- pmgc0 |= PMGC0_PMIE;
- else
- pmgc0 &= ~PMGC0_PMIE;
-
- mtpmr(PMRN_PMGC0, pmgc0);
-}
-
-static void pmc_stop_ctrs(void)
-{
- u32 pmgc0 = mfpmr(PMRN_PMGC0);
-
- pmgc0 |= PMGC0_FAC;
-
- pmgc0 &= ~(PMGC0_PMIE | PMGC0_FCECE);
-
- mtpmr(PMRN_PMGC0, pmgc0);
-}
-
-static int fsl_emb_cpu_setup(struct op_counter_config *ctr)
-{
- int i;
-
- /* freeze all counters */
- pmc_stop_ctrs();
-
- for (i = 0;i < num_counters;i++) {
- init_pmc_stop(i);
-
- set_pmc_event(i, ctr[i].event);
-
- set_pmc_user_kernel(i, ctr[i].user, ctr[i].kernel);
- }
-
- return 0;
-}
-
-static int fsl_emb_reg_setup(struct op_counter_config *ctr,
- struct op_system_config *sys,
- int num_ctrs)
-{
- int i;
-
- num_counters = num_ctrs;
-
- /* Our counters count up, and "count" refers to
- * how much before the next interrupt, and we interrupt
- * on overflow. So we calculate the starting value
- * which will give us "count" until overflow.
- * Then we set the events on the enabled counters */
- for (i = 0; i < num_counters; ++i)
- reset_value[i] = 0x80000000UL - ctr[i].count;
-
- return 0;
-}
-
-static int fsl_emb_start(struct op_counter_config *ctr)
-{
- int i;
-
- mtmsr(mfmsr() | MSR_PMM);
-
- for (i = 0; i < num_counters; ++i) {
- if (ctr[i].enabled) {
- ctr_write(i, reset_value[i]);
- /* Set each enabled counter to only
- * count when the Mark bit is *not* set */
- set_pmc_marked(i, 1, 0);
- pmc_start_ctr(i, 1);
- } else {
- ctr_write(i, 0);
-
- /* Set the ctr to be stopped */
- pmc_start_ctr(i, 0);
- }
- }
-
- /* Clear the freeze bit, and enable the interrupt.
- * The counters won't actually start until the rfi clears
- * the PMM bit */
- pmc_start_ctrs(1);
-
- oprofile_running = 1;
-
- pr_debug("start on cpu %d, pmgc0 %x\n", smp_processor_id(),
- mfpmr(PMRN_PMGC0));
-
- return 0;
-}
-
-static void fsl_emb_stop(void)
-{
- /* freeze counters */
- pmc_stop_ctrs();
-
- oprofile_running = 0;
-
- pr_debug("stop on cpu %d, pmgc0 %x\n", smp_processor_id(),
- mfpmr(PMRN_PMGC0));
-
- mb();
-}
-
-
-static void fsl_emb_handle_interrupt(struct pt_regs *regs,
- struct op_counter_config *ctr)
-{
- unsigned long pc;
- int is_kernel;
- int val;
- int i;
-
- pc = regs->nip;
- is_kernel = is_kernel_addr(pc);
-
- for (i = 0; i < num_counters; ++i) {
- val = ctr_read(i);
- if (val < 0) {
- if (oprofile_running && ctr[i].enabled) {
- oprofile_add_ext_sample(pc, regs, i, is_kernel);
- ctr_write(i, reset_value[i]);
- } else {
- ctr_write(i, 0);
- }
- }
- }
-
- /* The freeze bit was set by the interrupt. */
- /* Clear the freeze bit, and reenable the interrupt. The
- * counters won't actually start until the rfi clears the PMM
- * bit. The PMM bit should not be set until after the interrupt
- * is cleared to avoid it getting lost in some hypervisor
- * environments.
- */
- mtmsr(mfmsr() | MSR_PMM);
- pmc_start_ctrs(1);
-}
-
-struct op_powerpc_model op_model_fsl_emb = {
- .reg_setup = fsl_emb_reg_setup,
- .cpu_setup = fsl_emb_cpu_setup,
- .start = fsl_emb_start,
- .stop = fsl_emb_stop,
- .handle_interrupt = fsl_emb_handle_interrupt,
-};
diff --git a/arch/powerpc/oprofile/op_model_pa6t.c b/arch/powerpc/oprofile/op_model_pa6t.c
deleted file mode 100644
index d23061cf76bc..000000000000
--- a/arch/powerpc/oprofile/op_model_pa6t.c
+++ /dev/null
@@ -1,227 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2006-2007 PA Semi, Inc
- *
- * Author: Shashi Rao, PA Semi
- *
- * Maintained by: Olof Johansson <olof@lixom.net>
- *
- * Based on arch/powerpc/oprofile/op_model_power4.c
- */
-
-#include <linux/oprofile.h>
-#include <linux/smp.h>
-#include <linux/percpu.h>
-#include <asm/processor.h>
-#include <asm/cputable.h>
-#include <asm/oprofile_impl.h>
-#include <asm/reg.h>
-
-static unsigned char oprofile_running;
-
-/* mmcr values are set in pa6t_reg_setup, used in pa6t_cpu_setup */
-static u64 mmcr0_val;
-static u64 mmcr1_val;
-
-/* inited in pa6t_reg_setup */
-static u64 reset_value[OP_MAX_COUNTER];
-
-static inline u64 ctr_read(unsigned int i)
-{
- switch (i) {
- case 0:
- return mfspr(SPRN_PA6T_PMC0);
- case 1:
- return mfspr(SPRN_PA6T_PMC1);
- case 2:
- return mfspr(SPRN_PA6T_PMC2);
- case 3:
- return mfspr(SPRN_PA6T_PMC3);
- case 4:
- return mfspr(SPRN_PA6T_PMC4);
- case 5:
- return mfspr(SPRN_PA6T_PMC5);
- default:
- printk(KERN_ERR "ctr_read called with bad arg %u\n", i);
- return 0;
- }
-}
-
-static inline void ctr_write(unsigned int i, u64 val)
-{
- switch (i) {
- case 0:
- mtspr(SPRN_PA6T_PMC0, val);
- break;
- case 1:
- mtspr(SPRN_PA6T_PMC1, val);
- break;
- case 2:
- mtspr(SPRN_PA6T_PMC2, val);
- break;
- case 3:
- mtspr(SPRN_PA6T_PMC3, val);
- break;
- case 4:
- mtspr(SPRN_PA6T_PMC4, val);
- break;
- case 5:
- mtspr(SPRN_PA6T_PMC5, val);
- break;
- default:
- printk(KERN_ERR "ctr_write called with bad arg %u\n", i);
- break;
- }
-}
-
-
-/* precompute the values to stuff in the hardware registers */
-static int pa6t_reg_setup(struct op_counter_config *ctr,
- struct op_system_config *sys,
- int num_ctrs)
-{
- int pmc;
-
- /*
- * adjust the mmcr0.en[0-5] and mmcr0.inten[0-5] values obtained from the
- * event_mappings file by turning off the counters that the user doesn't
- * care about
- *
- * setup user and kernel profiling
- */
- for (pmc = 0; pmc < cur_cpu_spec->num_pmcs; pmc++)
- if (!ctr[pmc].enabled) {
- sys->mmcr0 &= ~(0x1UL << pmc);
- sys->mmcr0 &= ~(0x1UL << (pmc+12));
- pr_debug("turned off counter %u\n", pmc);
- }
-
- if (sys->enable_kernel)
- sys->mmcr0 |= PA6T_MMCR0_SUPEN | PA6T_MMCR0_HYPEN;
- else
- sys->mmcr0 &= ~(PA6T_MMCR0_SUPEN | PA6T_MMCR0_HYPEN);
-
- if (sys->enable_user)
- sys->mmcr0 |= PA6T_MMCR0_PREN;
- else
- sys->mmcr0 &= ~PA6T_MMCR0_PREN;
-
- /*
- * The performance counter event settings are given in the mmcr0 and
- * mmcr1 values passed from the user in the op_system_config
- * structure (sys variable).
- */
- mmcr0_val = sys->mmcr0;
- mmcr1_val = sys->mmcr1;
- pr_debug("mmcr0_val inited to %016lx\n", sys->mmcr0);
- pr_debug("mmcr1_val inited to %016lx\n", sys->mmcr1);
-
- for (pmc = 0; pmc < cur_cpu_spec->num_pmcs; pmc++) {
- /* counters are 40 bit. Move to cputable at some point? */
- reset_value[pmc] = (0x1UL << 39) - ctr[pmc].count;
- pr_debug("reset_value for pmc%u inited to 0x%llx\n",
- pmc, reset_value[pmc]);
- }
-
- return 0;
-}
-
-/* configure registers on this cpu */
-static int pa6t_cpu_setup(struct op_counter_config *ctr)
-{
- u64 mmcr0 = mmcr0_val;
- u64 mmcr1 = mmcr1_val;
-
- /* Default is all PMCs off */
- mmcr0 &= ~(0x3FUL);
- mtspr(SPRN_PA6T_MMCR0, mmcr0);
-
- /* program selected programmable events in */
- mtspr(SPRN_PA6T_MMCR1, mmcr1);
-
- pr_debug("setup on cpu %d, mmcr0 %016lx\n", smp_processor_id(),
- mfspr(SPRN_PA6T_MMCR0));
- pr_debug("setup on cpu %d, mmcr1 %016lx\n", smp_processor_id(),
- mfspr(SPRN_PA6T_MMCR1));
-
- return 0;
-}
-
-static int pa6t_start(struct op_counter_config *ctr)
-{
- int i;
-
- /* Hold off event counting until rfid */
- u64 mmcr0 = mmcr0_val | PA6T_MMCR0_HANDDIS;
-
- for (i = 0; i < cur_cpu_spec->num_pmcs; i++)
- if (ctr[i].enabled)
- ctr_write(i, reset_value[i]);
- else
- ctr_write(i, 0UL);
-
- mtspr(SPRN_PA6T_MMCR0, mmcr0);
-
- oprofile_running = 1;
-
- pr_debug("start on cpu %d, mmcr0 %llx\n", smp_processor_id(), mmcr0);
-
- return 0;
-}
-
-static void pa6t_stop(void)
-{
- u64 mmcr0;
-
- /* freeze counters */
- mmcr0 = mfspr(SPRN_PA6T_MMCR0);
- mmcr0 |= PA6T_MMCR0_FCM0;
- mtspr(SPRN_PA6T_MMCR0, mmcr0);
-
- oprofile_running = 0;
-
- pr_debug("stop on cpu %d, mmcr0 %llx\n", smp_processor_id(), mmcr0);
-}
-
-/* handle the perfmon overflow vector */
-static void pa6t_handle_interrupt(struct pt_regs *regs,
- struct op_counter_config *ctr)
-{
- unsigned long pc = mfspr(SPRN_PA6T_SIAR);
- int is_kernel = is_kernel_addr(pc);
- u64 val;
- int i;
- u64 mmcr0;
-
- /* disable perfmon counting until rfid */
- mmcr0 = mfspr(SPRN_PA6T_MMCR0);
- mtspr(SPRN_PA6T_MMCR0, mmcr0 | PA6T_MMCR0_HANDDIS);
-
- /* Record samples. We've got one global bit for whether a sample
- * was taken, so add it for any counter that triggered overflow.
- */
- for (i = 0; i < cur_cpu_spec->num_pmcs; i++) {
- val = ctr_read(i);
- if (val & (0x1UL << 39)) { /* Overflow bit set */
- if (oprofile_running && ctr[i].enabled) {
- if (mmcr0 & PA6T_MMCR0_SIARLOG)
- oprofile_add_ext_sample(pc, regs, i, is_kernel);
- ctr_write(i, reset_value[i]);
- } else {
- ctr_write(i, 0UL);
- }
- }
- }
-
- /* Restore mmcr0 to a good known value since the PMI changes it */
- mmcr0 = mmcr0_val | PA6T_MMCR0_HANDDIS;
- mtspr(SPRN_PA6T_MMCR0, mmcr0);
-}
-
-struct op_powerpc_model op_model_pa6t = {
- .reg_setup = pa6t_reg_setup,
- .cpu_setup = pa6t_cpu_setup,
- .start = pa6t_start,
- .stop = pa6t_stop,
- .handle_interrupt = pa6t_handle_interrupt,
-};
diff --git a/arch/powerpc/oprofile/op_model_power4.c b/arch/powerpc/oprofile/op_model_power4.c
deleted file mode 100644
index 2ae6b86ff97b..000000000000
--- a/arch/powerpc/oprofile/op_model_power4.c
+++ /dev/null
@@ -1,438 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
- * Added mmcra[slot] support:
- * Copyright (C) 2006-2007 Will Schmidt <willschm@us.ibm.com>, IBM
- */
-
-#include <linux/oprofile.h>
-#include <linux/smp.h>
-#include <asm/firmware.h>
-#include <asm/ptrace.h>
-#include <asm/processor.h>
-#include <asm/cputable.h>
-#include <asm/rtas.h>
-#include <asm/oprofile_impl.h>
-#include <asm/reg.h>
-
-#define dbg(args...)
-#define OPROFILE_PM_PMCSEL_MSK 0xffULL
-#define OPROFILE_PM_UNIT_SHIFT 60
-#define OPROFILE_PM_UNIT_MSK 0xfULL
-#define OPROFILE_MAX_PMC_NUM 3
-#define OPROFILE_PMSEL_FIELD_WIDTH 8
-#define OPROFILE_UNIT_FIELD_WIDTH 4
-#define MMCRA_SIAR_VALID_MASK 0x10000000ULL
-
-static unsigned long reset_value[OP_MAX_COUNTER];
-
-static int oprofile_running;
-static int use_slot_nums;
-
-/* mmcr values are set in power4_reg_setup, used in power4_cpu_setup */
-static u32 mmcr0_val;
-static u64 mmcr1_val;
-static u64 mmcra_val;
-static u32 cntr_marked_events;
-
-static int power7_marked_instr_event(u64 mmcr1)
-{
- u64 psel, unit;
- int pmc, cntr_marked_events = 0;
-
- /* Given the MMCR1 value, look at the field for each counter to
- * determine if it is a marked event. Code based on the function
- * power7_marked_instr_event() in file arch/powerpc/perf/power7-pmu.c.
- */
- for (pmc = 0; pmc < 4; pmc++) {
- psel = mmcr1 & (OPROFILE_PM_PMCSEL_MSK
- << (OPROFILE_MAX_PMC_NUM - pmc)
- * OPROFILE_PMSEL_FIELD_WIDTH);
- psel = (psel >> ((OPROFILE_MAX_PMC_NUM - pmc)
- * OPROFILE_PMSEL_FIELD_WIDTH)) & ~1ULL;
- unit = mmcr1 & (OPROFILE_PM_UNIT_MSK
- << (OPROFILE_PM_UNIT_SHIFT
- - (pmc * OPROFILE_PMSEL_FIELD_WIDTH )));
- unit = unit >> (OPROFILE_PM_UNIT_SHIFT
- - (pmc * OPROFILE_PMSEL_FIELD_WIDTH));
-
- switch (psel >> 4) {
- case 2:
- cntr_marked_events |= (pmc == 1 || pmc == 3) << pmc;
- break;
- case 3:
- if (psel == 0x3c) {
- cntr_marked_events |= (pmc == 0) << pmc;
- break;
- }
-
- if (psel == 0x3e) {
- cntr_marked_events |= (pmc != 1) << pmc;
- break;
- }
-
- cntr_marked_events |= 1 << pmc;
- break;
- case 4:
- case 5:
- cntr_marked_events |= (unit == 0xd) << pmc;
- break;
- case 6:
- if (psel == 0x64)
- cntr_marked_events |= (pmc >= 2) << pmc;
- break;
- case 8:
- cntr_marked_events |= (unit == 0xd) << pmc;
- break;
- }
- }
- return cntr_marked_events;
-}
-
-static int power4_reg_setup(struct op_counter_config *ctr,
- struct op_system_config *sys,
- int num_ctrs)
-{
- int i;
-
- /*
- * The performance counter event settings are given in the mmcr0,
- * mmcr1 and mmcra values passed from the user in the
- * op_system_config structure (sys variable).
- */
- mmcr0_val = sys->mmcr0;
- mmcr1_val = sys->mmcr1;
- mmcra_val = sys->mmcra;
-
- /* Power 7+ and newer architectures:
- * Determine which counter events in the group (the group of events is
- * specified by the bit settings in the MMCR1 register) are marked
- * events for use in the interrupt handler. Do the calculation once
- * before OProfile starts. Information is used in the interrupt
- * handler. Starting with Power 7+ we only record the sample for
- * marked events if the SIAR valid bit is set. For non marked events
- * the sample is always recorded.
- */
- if (pvr_version_is(PVR_POWER7p))
- cntr_marked_events = power7_marked_instr_event(mmcr1_val);
- else
- cntr_marked_events = 0; /* For older processors, set the bit map
- * to zero so the sample will always be
- * be recorded.
- */
-
- for (i = 0; i < cur_cpu_spec->num_pmcs; ++i)
- reset_value[i] = 0x80000000UL - ctr[i].count;
-
- /* setup user and kernel profiling */
- if (sys->enable_kernel)
- mmcr0_val &= ~MMCR0_KERNEL_DISABLE;
- else
- mmcr0_val |= MMCR0_KERNEL_DISABLE;
-
- if (sys->enable_user)
- mmcr0_val &= ~MMCR0_PROBLEM_DISABLE;
- else
- mmcr0_val |= MMCR0_PROBLEM_DISABLE;
-
- if (pvr_version_is(PVR_POWER4) || pvr_version_is(PVR_POWER4p) ||
- pvr_version_is(PVR_970) || pvr_version_is(PVR_970FX) ||
- pvr_version_is(PVR_970MP) || pvr_version_is(PVR_970GX) ||
- pvr_version_is(PVR_POWER5) || pvr_version_is(PVR_POWER5p))
- use_slot_nums = 1;
-
- return 0;
-}
-
-extern void ppc_enable_pmcs(void);
-
-/*
- * Older CPUs require the MMCRA sample bit to be always set, but newer
- * CPUs only want it set for some groups. Eventually we will remove all
- * knowledge of this bit in the kernel, oprofile userspace should be
- * setting it when required.
- *
- * In order to keep current installations working we force the bit for
- * those older CPUs. Once everyone has updated their oprofile userspace we
- * can remove this hack.
- */
-static inline int mmcra_must_set_sample(void)
-{
- if (pvr_version_is(PVR_POWER4) || pvr_version_is(PVR_POWER4p) ||
- pvr_version_is(PVR_970) || pvr_version_is(PVR_970FX) ||
- pvr_version_is(PVR_970MP) || pvr_version_is(PVR_970GX))
- return 1;
-
- return 0;
-}
-
-static int power4_cpu_setup(struct op_counter_config *ctr)
-{
- unsigned int mmcr0 = mmcr0_val;
- unsigned long mmcra = mmcra_val;
-
- ppc_enable_pmcs();
-
- /* set the freeze bit */
- mmcr0 |= MMCR0_FC;
- mtspr(SPRN_MMCR0, mmcr0);
-
- mmcr0 |= MMCR0_FCM1|MMCR0_PMXE|MMCR0_FCECE;
- mmcr0 |= MMCR0_PMC1CE|MMCR0_PMCjCE;
- mtspr(SPRN_MMCR0, mmcr0);
-
- mtspr(SPRN_MMCR1, mmcr1_val);
-
- if (mmcra_must_set_sample())
- mmcra |= MMCRA_SAMPLE_ENABLE;
- mtspr(SPRN_MMCRA, mmcra);
-
- dbg("setup on cpu %d, mmcr0 %lx\n", smp_processor_id(),
- mfspr(SPRN_MMCR0));
- dbg("setup on cpu %d, mmcr1 %lx\n", smp_processor_id(),
- mfspr(SPRN_MMCR1));
- dbg("setup on cpu %d, mmcra %lx\n", smp_processor_id(),
- mfspr(SPRN_MMCRA));
-
- return 0;
-}
-
-static int power4_start(struct op_counter_config *ctr)
-{
- int i;
- unsigned int mmcr0;
-
- /* set the PMM bit (see comment below) */
- mtmsr(mfmsr() | MSR_PMM);
-
- for (i = 0; i < cur_cpu_spec->num_pmcs; ++i) {
- if (ctr[i].enabled) {
- classic_ctr_write(i, reset_value[i]);
- } else {
- classic_ctr_write(i, 0);
- }
- }
-
- mmcr0 = mfspr(SPRN_MMCR0);
-
- /*
- * We must clear the PMAO bit on some (GQ) chips. Just do it
- * all the time
- */
- mmcr0 &= ~MMCR0_PMAO;
-
- /*
- * now clear the freeze bit, counting will not start until we
- * rfid from this excetion, because only at that point will
- * the PMM bit be cleared
- */
- mmcr0 &= ~MMCR0_FC;
- mtspr(SPRN_MMCR0, mmcr0);
-
- oprofile_running = 1;
-
- dbg("start on cpu %d, mmcr0 %x\n", smp_processor_id(), mmcr0);
- return 0;
-}
-
-static void power4_stop(void)
-{
- unsigned int mmcr0;
-
- /* freeze counters */
- mmcr0 = mfspr(SPRN_MMCR0);
- mmcr0 |= MMCR0_FC;
- mtspr(SPRN_MMCR0, mmcr0);
-
- oprofile_running = 0;
-
- dbg("stop on cpu %d, mmcr0 %x\n", smp_processor_id(), mmcr0);
-
- mb();
-}
-
-/* Fake functions used by canonicalize_pc */
-static void __used hypervisor_bucket(void)
-{
-}
-
-static void __used rtas_bucket(void)
-{
-}
-
-static void __used kernel_unknown_bucket(void)
-{
-}
-
-/*
- * On GQ and newer the MMCRA stores the HV and PR bits at the time
- * the SIAR was sampled. We use that to work out if the SIAR was sampled in
- * the hypervisor, our exception vectors or RTAS.
- * If the MMCRA_SAMPLE_ENABLE bit is set, we can use the MMCRA[slot] bits
- * to more accurately identify the address of the sampled instruction. The
- * mmcra[slot] bits represent the slot number of a sampled instruction
- * within an instruction group. The slot will contain a value between 1
- * and 5 if MMCRA_SAMPLE_ENABLE is set, otherwise 0.
- */
-static unsigned long get_pc(struct pt_regs *regs)
-{
- unsigned long pc = mfspr(SPRN_SIAR);
- unsigned long mmcra;
- unsigned long slot;
-
- /* Can't do much about it */
- if (!cur_cpu_spec->oprofile_mmcra_sihv)
- return pc;
-
- mmcra = mfspr(SPRN_MMCRA);
-
- if (use_slot_nums && (mmcra & MMCRA_SAMPLE_ENABLE)) {
- slot = ((mmcra & MMCRA_SLOT) >> MMCRA_SLOT_SHIFT);
- if (slot > 1)
- pc += 4 * (slot - 1);
- }
-
- /* Were we in the hypervisor? */
- if (firmware_has_feature(FW_FEATURE_LPAR) &&
- (mmcra & cur_cpu_spec->oprofile_mmcra_sihv))
- /* function descriptor madness */
- return *((unsigned long *)hypervisor_bucket);
-
- /* We were in userspace, nothing to do */
- if (mmcra & cur_cpu_spec->oprofile_mmcra_sipr)
- return pc;
-
-#ifdef CONFIG_PPC_RTAS
- /* Were we in RTAS? */
- if (pc >= rtas.base && pc < (rtas.base + rtas.size))
- /* function descriptor madness */
- return *((unsigned long *)rtas_bucket);
-#endif
-
- /* Were we in our exception vectors or SLB real mode miss handler? */
- if (pc < 0x1000000UL)
- return (unsigned long)__va(pc);
-
- /* Not sure where we were */
- if (!is_kernel_addr(pc))
- /* function descriptor madness */
- return *((unsigned long *)kernel_unknown_bucket);
-
- return pc;
-}
-
-static int get_kernel(unsigned long pc, unsigned long mmcra)
-{
- int is_kernel;
-
- if (!cur_cpu_spec->oprofile_mmcra_sihv) {
- is_kernel = is_kernel_addr(pc);
- } else {
- is_kernel = ((mmcra & cur_cpu_spec->oprofile_mmcra_sipr) == 0);
- }
-
- return is_kernel;
-}
-
-static bool pmc_overflow(unsigned long val)
-{
- if ((int)val < 0)
- return true;
-
- /*
- * Events on POWER7 can roll back if a speculative event doesn't
- * eventually complete. Unfortunately in some rare cases they will
- * raise a performance monitor exception. We need to catch this to
- * ensure we reset the PMC. In all cases the PMC will be 256 or less
- * cycles from overflow.
- *
- * We only do this if the first pass fails to find any overflowing
- * PMCs because a user might set a period of less than 256 and we
- * don't want to mistakenly reset them.
- */
- if (pvr_version_is(PVR_POWER7) && ((0x80000000 - val) <= 256))
- return true;
-
- return false;
-}
-
-static void power4_handle_interrupt(struct pt_regs *regs,
- struct op_counter_config *ctr)
-{
- unsigned long pc;
- int is_kernel;
- int val;
- int i;
- unsigned int mmcr0;
- unsigned long mmcra;
- bool siar_valid = false;
-
- mmcra = mfspr(SPRN_MMCRA);
-
- pc = get_pc(regs);
- is_kernel = get_kernel(pc, mmcra);
-
- /* set the PMM bit (see comment below) */
- mtmsr(mfmsr() | MSR_PMM);
-
- /* Check that the SIAR valid bit in MMCRA is set to 1. */
- if ((mmcra & MMCRA_SIAR_VALID_MASK) == MMCRA_SIAR_VALID_MASK)
- siar_valid = true;
-
- for (i = 0; i < cur_cpu_spec->num_pmcs; ++i) {
- val = classic_ctr_read(i);
- if (pmc_overflow(val)) {
- if (oprofile_running && ctr[i].enabled) {
- /* Power 7+ and newer architectures:
- * If the event is a marked event, then only
- * save the sample if the SIAR valid bit is
- * set. If the event is not marked, then
- * always save the sample.
- * Note, the Sample enable bit in the MMCRA
- * register must be set to 1 if the group
- * contains a marked event.
- */
- if ((siar_valid &&
- (cntr_marked_events & (1 << i)))
- || !(cntr_marked_events & (1 << i)))
- oprofile_add_ext_sample(pc, regs, i,
- is_kernel);
-
- classic_ctr_write(i, reset_value[i]);
- } else {
- classic_ctr_write(i, 0);
- }
- }
- }
-
- mmcr0 = mfspr(SPRN_MMCR0);
-
- /* reset the perfmon trigger */
- mmcr0 |= MMCR0_PMXE;
-
- /*
- * We must clear the PMAO bit on some (GQ) chips. Just do it
- * all the time
- */
- mmcr0 &= ~MMCR0_PMAO;
-
- /* Clear the appropriate bits in the MMCRA */
- mmcra &= ~cur_cpu_spec->oprofile_mmcra_clear;
- mtspr(SPRN_MMCRA, mmcra);
-
- /*
- * now clear the freeze bit, counting will not start until we
- * rfid from this exception, because only at that point will
- * the PMM bit be cleared
- */
- mmcr0 &= ~MMCR0_FC;
- mtspr(SPRN_MMCR0, mmcr0);
-}
-
-struct op_powerpc_model op_model_power4 = {
- .reg_setup = power4_reg_setup,
- .cpu_setup = power4_cpu_setup,
- .start = power4_start,
- .stop = power4_stop,
- .handle_interrupt = power4_handle_interrupt,
-};
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 28206b1fe172..6817331e22ff 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -54,6 +54,9 @@ struct cpu_hw_events {
struct perf_branch_stack bhrb_stack;
struct perf_branch_entry bhrb_entries[BHRB_MAX_ENTRIES];
u64 ic_init;
+
+ /* Store the PMC values */
+ unsigned long pmcs[MAX_HWEVENTS];
};
static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
@@ -110,10 +113,6 @@ static inline void perf_read_regs(struct pt_regs *regs)
{
regs->result = 0;
}
-static inline int perf_intr_is_nmi(struct pt_regs *regs)
-{
- return 0;
-}
static inline int siar_valid(struct pt_regs *regs)
{
@@ -147,6 +146,17 @@ bool is_sier_available(void)
return false;
}
+/*
+ * Return PMC value corresponding to the
+ * index passed.
+ */
+unsigned long get_pmcs_ext_regs(int idx)
+{
+ struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
+
+ return cpuhw->pmcs[idx];
+}
+
static bool regs_use_siar(struct pt_regs *regs)
{
/*
@@ -354,15 +364,6 @@ static inline void perf_read_regs(struct pt_regs *regs)
}
/*
- * If interrupts were soft-disabled when a PMU interrupt occurs, treat
- * it as an NMI.
- */
-static inline int perf_intr_is_nmi(struct pt_regs *regs)
-{
- return (regs->softe & IRQS_DISABLED);
-}
-
-/*
* On processors like P7+ that have the SIAR-Valid bit, marked instructions
* must be sampled only if the SIAR-valid bit is set.
*
@@ -915,7 +916,7 @@ void perf_event_print_debug(void)
*/
static int power_check_constraints(struct cpu_hw_events *cpuhw,
u64 event_id[], unsigned int cflags[],
- int n_ev)
+ int n_ev, struct perf_event **event)
{
unsigned long mask, value, nv;
unsigned long smasks[MAX_HWEVENTS], svalues[MAX_HWEVENTS];
@@ -938,7 +939,7 @@ static int power_check_constraints(struct cpu_hw_events *cpuhw,
event_id[i] = cpuhw->alternatives[i][0];
}
if (ppmu->get_constraint(event_id[i], &cpuhw->amasks[i][0],
- &cpuhw->avalues[i][0]))
+ &cpuhw->avalues[i][0], event[i]->attr.config1))
return -1;
}
value = mask = 0;
@@ -973,7 +974,8 @@ static int power_check_constraints(struct cpu_hw_events *cpuhw,
for (j = 1; j < n_alt[i]; ++j)
ppmu->get_constraint(cpuhw->alternatives[i][j],
&cpuhw->amasks[i][j],
- &cpuhw->avalues[i][j]);
+ &cpuhw->avalues[i][j],
+ event[i]->attr.config1);
}
/* enumerate all possibilities and see if any will work */
@@ -1391,7 +1393,7 @@ static void power_pmu_enable(struct pmu *pmu)
memset(&cpuhw->mmcr, 0, sizeof(cpuhw->mmcr));
if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_events, hwc_index,
- &cpuhw->mmcr, cpuhw->event)) {
+ &cpuhw->mmcr, cpuhw->event, ppmu->flags)) {
/* shouldn't ever get here */
printk(KERN_ERR "oops compute_mmcr failed\n");
goto out;
@@ -1579,7 +1581,7 @@ static int power_pmu_add(struct perf_event *event, int ef_flags)
if (check_excludes(cpuhw->event, cpuhw->flags, n0, 1))
goto out;
- if (power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n0 + 1))
+ if (power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n0 + 1, cpuhw->event))
goto out;
event->hw.config = cpuhw->events[n0];
@@ -1789,7 +1791,7 @@ static int power_pmu_commit_txn(struct pmu *pmu)
n = cpuhw->n_events;
if (check_excludes(cpuhw->event, cpuhw->flags, 0, n))
return -EAGAIN;
- i = power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n);
+ i = power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n, cpuhw->event);
if (i < 0)
return -EAGAIN;
@@ -2027,7 +2029,7 @@ static int power_pmu_event_init(struct perf_event *event)
local_irq_save(irq_flags);
cpuhw = this_cpu_ptr(&cpu_hw_events);
- err = power_check_constraints(cpuhw, events, cflags, n + 1);
+ err = power_check_constraints(cpuhw, events, cflags, n + 1, ctrs);
if (has_branch_stack(event)) {
u64 bhrb_filter = -1;
@@ -2149,7 +2151,17 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
left += period;
if (left <= 0)
left = period;
- record = siar_valid(regs);
+
+ /*
+ * If address is not requested in the sample via
+ * PERF_SAMPLE_IP, just record that sample irrespective
+ * of SIAR valid check.
+ */
+ if (event->attr.sample_type & PERF_SAMPLE_IP)
+ record = siar_valid(regs);
+ else
+ record = 1;
+
event->hw.last_period = event->hw.sample_period;
}
if (left < 0x80000000LL)
@@ -2167,9 +2179,10 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
* MMCR2. Check attr.exclude_kernel and address to drop the sample in
* these cases.
*/
- if (event->attr.exclude_kernel && record)
- if (is_kernel_addr(mfspr(SPRN_SIAR)))
- record = 0;
+ if (event->attr.exclude_kernel &&
+ (event->attr.sample_type & PERF_SAMPLE_IP) &&
+ is_kernel_addr(mfspr(SPRN_SIAR)))
+ record = 0;
/*
* Finally record data if requested.
@@ -2195,7 +2208,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
if (event->attr.sample_type & PERF_SAMPLE_WEIGHT &&
ppmu->get_mem_weight)
- ppmu->get_mem_weight(&data.weight);
+ ppmu->get_mem_weight(&data.weight.full);
if (perf_event_overflow(event, &data, regs))
power_pmu_stop(event, 0);
@@ -2277,9 +2290,7 @@ static void __perf_event_interrupt(struct pt_regs *regs)
int i, j;
struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
struct perf_event *event;
- unsigned long val[8];
int found, active;
- int nmi;
if (cpuhw->n_limited)
freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5),
@@ -2287,26 +2298,14 @@ static void __perf_event_interrupt(struct pt_regs *regs)
perf_read_regs(regs);
- /*
- * If perf interrupts hit in a local_irq_disable (soft-masked) region,
- * we consider them as NMIs. This is required to prevent hash faults on
- * user addresses when reading callchains. See the NMI test in
- * do_hash_page.
- */
- nmi = perf_intr_is_nmi(regs);
- if (nmi)
- nmi_enter();
- else
- irq_enter();
-
/* Read all the PMCs since we'll need them a bunch of times */
for (i = 0; i < ppmu->n_counter; ++i)
- val[i] = read_pmc(i + 1);
+ cpuhw->pmcs[i] = read_pmc(i + 1);
/* Try to find what caused the IRQ */
found = 0;
for (i = 0; i < ppmu->n_counter; ++i) {
- if (!pmc_overflow(val[i]))
+ if (!pmc_overflow(cpuhw->pmcs[i]))
continue;
if (is_limited_pmc(i + 1))
continue; /* these won't generate IRQs */
@@ -2321,7 +2320,7 @@ static void __perf_event_interrupt(struct pt_regs *regs)
event = cpuhw->event[j];
if (event->hw.idx == (i + 1)) {
active = 1;
- record_and_restart(event, val[i], regs);
+ record_and_restart(event, cpuhw->pmcs[i], regs);
break;
}
}
@@ -2335,17 +2334,17 @@ static void __perf_event_interrupt(struct pt_regs *regs)
event = cpuhw->event[i];
if (!event->hw.idx || is_limited_pmc(event->hw.idx))
continue;
- if (pmc_overflow_power7(val[event->hw.idx - 1])) {
+ if (pmc_overflow_power7(cpuhw->pmcs[event->hw.idx - 1])) {
/* event has overflowed in a buggy way*/
found = 1;
record_and_restart(event,
- val[event->hw.idx - 1],
+ cpuhw->pmcs[event->hw.idx - 1],
regs);
}
}
}
- if (!found && !nmi && printk_ratelimit())
- printk(KERN_WARNING "Can't find PMC that caused IRQ\n");
+ if (unlikely(!found) && !arch_irq_disabled_regs(regs))
+ printk_ratelimited(KERN_WARNING "Can't find PMC that caused IRQ\n");
/*
* Reset MMCR0 to its normal value. This will set PMXE and
@@ -2356,10 +2355,9 @@ static void __perf_event_interrupt(struct pt_regs *regs)
*/
write_mmcr0(cpuhw, cpuhw->mmcr.mmcr0);
- if (nmi)
- nmi_exit();
- else
- irq_exit();
+ /* Clear the cpuhw->pmcs */
+ memset(&cpuhw->pmcs, 0, sizeof(cpuhw->pmcs));
+
}
static void perf_event_interrupt(struct pt_regs *regs)
diff --git a/arch/powerpc/perf/core-fsl-emb.c b/arch/powerpc/perf/core-fsl-emb.c
index e0e7e276bfd2..ee721f420a7b 100644
--- a/arch/powerpc/perf/core-fsl-emb.c
+++ b/arch/powerpc/perf/core-fsl-emb.c
@@ -31,19 +31,6 @@ static atomic_t num_events;
/* Used to avoid races in calling reserve/release_pmc_hardware */
static DEFINE_MUTEX(pmc_reserve_mutex);
-/*
- * If interrupts were soft-disabled when a PMU interrupt occurs, treat
- * it as an NMI.
- */
-static inline int perf_intr_is_nmi(struct pt_regs *regs)
-{
-#ifdef __powerpc64__
- return (regs->softe & IRQS_DISABLED);
-#else
- return 0;
-#endif
-}
-
static void perf_event_interrupt(struct pt_regs *regs);
/*
@@ -659,13 +646,6 @@ static void perf_event_interrupt(struct pt_regs *regs)
struct perf_event *event;
unsigned long val;
int found = 0;
- int nmi;
-
- nmi = perf_intr_is_nmi(regs);
- if (nmi)
- nmi_enter();
- else
- irq_enter();
for (i = 0; i < ppmu->n_counter; ++i) {
event = cpuhw->event[i];
@@ -690,11 +670,6 @@ static void perf_event_interrupt(struct pt_regs *regs)
mtmsr(mfmsr() | MSR_PMM);
mtpmr(PMRN_PMGC0, PMGC0_PMIE | PMGC0_FCECE);
isync();
-
- if (nmi)
- nmi_exit();
- else
- irq_exit();
}
void hw_perf_event_setup(int cpu)
diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c
index 6e7e820508df..e5eb33255066 100644
--- a/arch/powerpc/perf/hv-24x7.c
+++ b/arch/powerpc/perf/hv-24x7.c
@@ -764,6 +764,14 @@ static ssize_t catalog_event_len_validate(struct hv_24x7_event_data *event,
return ev_len;
}
+/*
+ * Return true incase of invalid or dummy events with names like RESERVED*
+ */
+static bool ignore_event(const char *name)
+{
+ return strncmp(name, "RESERVED", 8) == 0;
+}
+
#define MAX_4K (SIZE_MAX / 4096)
static int create_events_from_catalog(struct attribute ***events_,
@@ -894,6 +902,10 @@ static int create_events_from_catalog(struct attribute ***events_,
name = event_name(event, &nl);
+ if (ignore_event(name)) {
+ junk_events++;
+ continue;
+ }
if (event->event_group_record_len == 0) {
pr_devel("invalid event %zu (%.*s): group_record_len == 0, skipping\n",
event_idx, nl, name);
@@ -955,6 +967,9 @@ static int create_events_from_catalog(struct attribute ***events_,
continue;
name = event_name(event, &nl);
+ if (ignore_event(name))
+ continue;
+
nonce = event_uniq_add(&ev_uniq, name, nl, event->domain);
ct = event_data_to_attrs(event_idx, events + event_attr_ct,
event, nonce);
diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c
index 6ab5b272090a..e4f577da33d8 100644
--- a/arch/powerpc/perf/isa207-common.c
+++ b/arch/powerpc/perf/isa207-common.c
@@ -108,12 +108,57 @@ static void mmcra_sdar_mode(u64 event, unsigned long *mmcra)
*mmcra |= MMCRA_SDAR_MODE_TLB;
}
+static u64 p10_thresh_cmp_val(u64 value)
+{
+ int exp = 0;
+ u64 result = value;
+
+ if (!value)
+ return value;
+
+ /*
+ * Incase of P10, thresh_cmp value is not part of raw event code
+ * and provided via attr.config1 parameter. To program threshold in MMCRA,
+ * take a 18 bit number N and shift right 2 places and increment
+ * the exponent E by 1 until the upper 10 bits of N are zero.
+ * Write E to the threshold exponent and write the lower 8 bits of N
+ * to the threshold mantissa.
+ * The max threshold that can be written is 261120.
+ */
+ if (cpu_has_feature(CPU_FTR_ARCH_31)) {
+ if (value > 261120)
+ value = 261120;
+ while ((64 - __builtin_clzl(value)) > 8) {
+ exp++;
+ value >>= 2;
+ }
+
+ /*
+ * Note that it is invalid to write a mantissa with the
+ * upper 2 bits of mantissa being zero, unless the
+ * exponent is also zero.
+ */
+ if (!(value & 0xC0) && exp)
+ result = 0;
+ else
+ result = (exp << 8) | value;
+ }
+ return result;
+}
+
static u64 thresh_cmp_val(u64 value)
{
+ if (cpu_has_feature(CPU_FTR_ARCH_31))
+ value = p10_thresh_cmp_val(value);
+
+ /*
+ * Since location of threshold compare bits in MMCRA
+ * is different for p8, using different shift value.
+ */
if (cpu_has_feature(CPU_FTR_ARCH_300))
return value << p9_MMCRA_THR_CMP_SHIFT;
-
- return value << MMCRA_THR_CMP_SHIFT;
+ else
+ return value << MMCRA_THR_CMP_SHIFT;
}
static unsigned long combine_from_event(u64 event)
@@ -141,13 +186,13 @@ static bool is_thresh_cmp_valid(u64 event)
{
unsigned int cmp, exp;
+ if (cpu_has_feature(CPU_FTR_ARCH_31))
+ return p10_thresh_cmp_val(event) != 0;
+
/*
* Check the mantissa upper two bits are not zero, unless the
* exponent is also zero. See the THRESH_CMP_MANTISSA doc.
- * Power10: thresh_cmp is replaced by l2_l3 event select.
*/
- if (cpu_has_feature(CPU_FTR_ARCH_31))
- return false;
cmp = (event >> EVENT_THR_CMP_SHIFT) & EVENT_THR_CMP_MASK;
exp = cmp >> 7;
@@ -256,7 +301,7 @@ void isa207_get_mem_weight(u64 *weight)
*weight = mantissa << (2 * exp);
}
-int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
+int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp, u64 event_config1)
{
unsigned int unit, pmc, cache, ebb;
unsigned long mask, value;
@@ -355,9 +400,11 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
}
if (cpu_has_feature(CPU_FTR_ARCH_31)) {
- if (event_is_threshold(event)) {
+ if (event_is_threshold(event) && is_thresh_cmp_valid(event_config1)) {
mask |= CNST_THRESH_CTL_SEL_MASK;
value |= CNST_THRESH_CTL_SEL_VAL(event >> EVENT_THRESH_SHIFT);
+ mask |= p10_CNST_THRESH_CMP_MASK;
+ value |= p10_CNST_THRESH_CMP_VAL(p10_thresh_cmp_val(event_config1));
}
} else if (cpu_has_feature(CPU_FTR_ARCH_300)) {
if (event_is_threshold(event) && is_thresh_cmp_valid(event)) {
@@ -411,7 +458,7 @@ ebb_bhrb:
int isa207_compute_mmcr(u64 event[], int n_ev,
unsigned int hwc[], struct mmcr_regs *mmcr,
- struct perf_event *pevents[])
+ struct perf_event *pevents[], u32 flags)
{
unsigned long mmcra, mmcr1, mmcr2, unit, combine, psel, cache, val;
unsigned long mmcr3;
@@ -504,6 +551,10 @@ int isa207_compute_mmcr(u64 event[], int n_ev,
val = (event[i] >> EVENT_THR_CMP_SHIFT) &
EVENT_THR_CMP_MASK;
mmcra |= thresh_cmp_val(val);
+ } else if (flags & PPMU_HAS_ATTR_CONFIG1) {
+ val = (pevents[i]->attr.config1 >> p10_EVENT_THR_CMP_SHIFT) &
+ p10_EVENT_THR_CMP_MASK;
+ mmcra |= thresh_cmp_val(val);
}
}
diff --git a/arch/powerpc/perf/isa207-common.h b/arch/powerpc/perf/isa207-common.h
index 454b32c31440..1af0e8c97ac7 100644
--- a/arch/powerpc/perf/isa207-common.h
+++ b/arch/powerpc/perf/isa207-common.h
@@ -105,6 +105,10 @@
#define p10_EVENT_RADIX_SCOPE_QUAL_MASK 0x1
#define p10_MMCR1_RADIX_SCOPE_QUAL_SHIFT 45
+/* Event Threshold Compare bit constant for power10 in config1 attribute */
+#define p10_EVENT_THR_CMP_SHIFT 0
+#define p10_EVENT_THR_CMP_MASK 0x3FFFFull
+
#define p10_EVENT_VALID_MASK \
((p10_SDAR_MODE_MASK << p10_SDAR_MODE_SHIFT | \
(p10_EVENT_THRESH_MASK << EVENT_THRESH_SHIFT) | \
@@ -124,8 +128,8 @@
* 60 56 52 48 44 40 36 32
* | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
* [ fab_match ] [ thresh_cmp ] [ thresh_ctl ] [ ]
- * |
- * thresh_sel -*
+ * | |
+ * [ thresh_cmp bits for p10] thresh_sel -*
*
* 28 24 20 16 12 8 4 0
* | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
@@ -152,6 +156,9 @@
#define CNST_THRESH_CTL_SEL_VAL(v) (((v) & 0x7ffull) << 32)
#define CNST_THRESH_CTL_SEL_MASK CNST_THRESH_CTL_SEL_VAL(0x7ff)
+#define p10_CNST_THRESH_CMP_VAL(v) (((v) & 0x7ffull) << 43)
+#define p10_CNST_THRESH_CMP_MASK p10_CNST_THRESH_CMP_VAL(0x7ff)
+
#define CNST_EBB_VAL(v) (((v) & EVENT_EBB_MASK) << 24)
#define CNST_EBB_MASK CNST_EBB_VAL(EVENT_EBB_MASK)
@@ -262,10 +269,10 @@
#define PH(a, b) (P(LVL, HIT) | P(a, b))
#define PM(a, b) (P(LVL, MISS) | P(a, b))
-int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp);
+int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp, u64 event_config1);
int isa207_compute_mmcr(u64 event[], int n_ev,
unsigned int hwc[], struct mmcr_regs *mmcr,
- struct perf_event *pevents[]);
+ struct perf_event *pevents[], u32 flags);
void isa207_disable_pmc(unsigned int pmc, struct mmcr_regs *mmcr);
int isa207_get_alternatives(u64 event, u64 alt[], int size, unsigned int flags,
const unsigned int ev_alt[][MAX_ALT]);
diff --git a/arch/powerpc/perf/mpc7450-pmu.c b/arch/powerpc/perf/mpc7450-pmu.c
index 1919e9df9165..e39b15b79a83 100644
--- a/arch/powerpc/perf/mpc7450-pmu.c
+++ b/arch/powerpc/perf/mpc7450-pmu.c
@@ -148,7 +148,7 @@ static u32 classbits[N_CLASSES - 1][2] = {
};
static int mpc7450_get_constraint(u64 event, unsigned long *maskp,
- unsigned long *valp)
+ unsigned long *valp, u64 event_config1 __maybe_unused)
{
int pmc, class;
u32 mask, value;
@@ -258,7 +258,8 @@ static const u32 pmcsel_mask[N_COUNTER] = {
*/
static int mpc7450_compute_mmcr(u64 event[], int n_ev, unsigned int hwc[],
struct mmcr_regs *mmcr,
- struct perf_event *pevents[])
+ struct perf_event *pevents[],
+ u32 flags __maybe_unused)
{
u8 event_index[N_CLASSES][N_COUNTER];
int n_classevent[N_CLASSES];
diff --git a/arch/powerpc/perf/perf_regs.c b/arch/powerpc/perf/perf_regs.c
index 6f681b105eec..b931eed482c9 100644
--- a/arch/powerpc/perf/perf_regs.c
+++ b/arch/powerpc/perf/perf_regs.c
@@ -75,6 +75,8 @@ static unsigned int pt_regs_offset[PERF_REG_POWERPC_MAX] = {
static u64 get_ext_regs_value(int idx)
{
switch (idx) {
+ case PERF_REG_POWERPC_PMC1 ... PERF_REG_POWERPC_PMC6:
+ return get_pmcs_ext_regs(idx - PERF_REG_POWERPC_PMC1);
case PERF_REG_POWERPC_MMCR0:
return mfspr(SPRN_MMCR0);
case PERF_REG_POWERPC_MMCR1:
@@ -95,13 +97,6 @@ static u64 get_ext_regs_value(int idx)
u64 perf_reg_value(struct pt_regs *regs, int idx)
{
- u64 perf_reg_extended_max = PERF_REG_POWERPC_MAX;
-
- if (cpu_has_feature(CPU_FTR_ARCH_31))
- perf_reg_extended_max = PERF_REG_MAX_ISA_31;
- else if (cpu_has_feature(CPU_FTR_ARCH_300))
- perf_reg_extended_max = PERF_REG_MAX_ISA_300;
-
if (idx == PERF_REG_POWERPC_SIER &&
(IS_ENABLED(CONFIG_FSL_EMB_PERF_EVENT) ||
IS_ENABLED(CONFIG_PPC32) ||
@@ -113,14 +108,14 @@ u64 perf_reg_value(struct pt_regs *regs, int idx)
IS_ENABLED(CONFIG_PPC32)))
return 0;
- if (idx >= PERF_REG_POWERPC_MAX && idx < perf_reg_extended_max)
+ if (idx >= PERF_REG_POWERPC_MAX && idx < PERF_REG_EXTENDED_MAX)
return get_ext_regs_value(idx);
/*
* If the idx is referring to value beyond the
* supported registers, return 0 with a warning
*/
- if (WARN_ON_ONCE(idx >= perf_reg_extended_max))
+ if (WARN_ON_ONCE(idx >= PERF_REG_EXTENDED_MAX))
return 0;
return regs_get_register(regs, pt_regs_offset[idx]);
diff --git a/arch/powerpc/perf/power10-pmu.c b/arch/powerpc/perf/power10-pmu.c
index 79e0206ca454..a901c1348cad 100644
--- a/arch/powerpc/perf/power10-pmu.c
+++ b/arch/powerpc/perf/power10-pmu.c
@@ -216,6 +216,7 @@ PMU_FORMAT_ATTR(invert_bit, "config:47");
PMU_FORMAT_ATTR(src_mask, "config:48-53");
PMU_FORMAT_ATTR(src_match, "config:54-59");
PMU_FORMAT_ATTR(radix_scope, "config:9");
+PMU_FORMAT_ATTR(thresh_cmp, "config1:0-17");
static struct attribute *power10_pmu_format_attr[] = {
&format_attr_event.attr,
@@ -236,6 +237,7 @@ static struct attribute *power10_pmu_format_attr[] = {
&format_attr_src_mask.attr,
&format_attr_src_match.attr,
&format_attr_radix_scope.attr,
+ &format_attr_thresh_cmp.attr,
NULL,
};
@@ -550,7 +552,7 @@ static struct power_pmu power10_pmu = {
.get_mem_weight = isa207_get_mem_weight,
.disable_pmc = isa207_disable_pmc,
.flags = PPMU_HAS_SIER | PPMU_ARCH_207S |
- PPMU_ARCH_31,
+ PPMU_ARCH_31 | PPMU_HAS_ATTR_CONFIG1,
.n_generic = ARRAY_SIZE(power10_generic_events),
.generic_events = power10_generic_events,
.cache_events = &power10_cache_events,
diff --git a/arch/powerpc/perf/power5+-pmu.c b/arch/powerpc/perf/power5+-pmu.c
index 3e64b4a1511f..18732267993a 100644
--- a/arch/powerpc/perf/power5+-pmu.c
+++ b/arch/powerpc/perf/power5+-pmu.c
@@ -132,7 +132,7 @@ static unsigned long unit_cons[PM_LASTUNIT+1][2] = {
};
static int power5p_get_constraint(u64 event, unsigned long *maskp,
- unsigned long *valp)
+ unsigned long *valp, u64 event_config1 __maybe_unused)
{
int pmc, byte, unit, sh;
int bit, fmask;
@@ -451,7 +451,8 @@ static int power5p_marked_instr_event(u64 event)
static int power5p_compute_mmcr(u64 event[], int n_ev,
unsigned int hwc[], struct mmcr_regs *mmcr,
- struct perf_event *pevents[])
+ struct perf_event *pevents[],
+ u32 flags __maybe_unused)
{
unsigned long mmcr1 = 0;
unsigned long mmcra = 0;
diff --git a/arch/powerpc/perf/power5-pmu.c b/arch/powerpc/perf/power5-pmu.c
index 017bb19b73fb..cb611c1e7abe 100644
--- a/arch/powerpc/perf/power5-pmu.c
+++ b/arch/powerpc/perf/power5-pmu.c
@@ -136,7 +136,7 @@ static unsigned long unit_cons[PM_LASTUNIT+1][2] = {
};
static int power5_get_constraint(u64 event, unsigned long *maskp,
- unsigned long *valp)
+ unsigned long *valp, u64 event_config1 __maybe_unused)
{
int pmc, byte, unit, sh;
int bit, fmask;
@@ -382,7 +382,8 @@ static int power5_marked_instr_event(u64 event)
static int power5_compute_mmcr(u64 event[], int n_ev,
unsigned int hwc[], struct mmcr_regs *mmcr,
- struct perf_event *pevents[])
+ struct perf_event *pevents[],
+ u32 flags __maybe_unused)
{
unsigned long mmcr1 = 0;
unsigned long mmcra = MMCRA_SDAR_DCACHE_MISS | MMCRA_SDAR_ERAT_MISS;
diff --git a/arch/powerpc/perf/power6-pmu.c b/arch/powerpc/perf/power6-pmu.c
index 189974478e9f..69ef38216418 100644
--- a/arch/powerpc/perf/power6-pmu.c
+++ b/arch/powerpc/perf/power6-pmu.c
@@ -173,7 +173,8 @@ static int power6_marked_instr_event(u64 event)
* Assign PMC numbers and compute MMCR1 value for a set of events
*/
static int p6_compute_mmcr(u64 event[], int n_ev,
- unsigned int hwc[], struct mmcr_regs *mmcr, struct perf_event *pevents[])
+ unsigned int hwc[], struct mmcr_regs *mmcr, struct perf_event *pevents[],
+ u32 flags __maybe_unused)
{
unsigned long mmcr1 = 0;
unsigned long mmcra = MMCRA_SDAR_DCACHE_MISS | MMCRA_SDAR_ERAT_MISS;
@@ -266,7 +267,7 @@ static int p6_compute_mmcr(u64 event[], int n_ev,
* 32-34 select field: nest (subunit) event selector
*/
static int p6_get_constraint(u64 event, unsigned long *maskp,
- unsigned long *valp)
+ unsigned long *valp, u64 event_config1 __maybe_unused)
{
int pmc, byte, sh, subunit;
unsigned long mask = 0, value = 0;
diff --git a/arch/powerpc/perf/power7-pmu.c b/arch/powerpc/perf/power7-pmu.c
index bacfab104a1a..894c17f9a762 100644
--- a/arch/powerpc/perf/power7-pmu.c
+++ b/arch/powerpc/perf/power7-pmu.c
@@ -81,7 +81,7 @@ enum {
*/
static int power7_get_constraint(u64 event, unsigned long *maskp,
- unsigned long *valp)
+ unsigned long *valp, u64 event_config1 __maybe_unused)
{
int pmc, sh, unit;
unsigned long mask = 0, value = 0;
@@ -245,7 +245,8 @@ static int power7_marked_instr_event(u64 event)
static int power7_compute_mmcr(u64 event[], int n_ev,
unsigned int hwc[], struct mmcr_regs *mmcr,
- struct perf_event *pevents[])
+ struct perf_event *pevents[],
+ u32 flags __maybe_unused)
{
unsigned long mmcr1 = 0;
unsigned long mmcra = MMCRA_SDAR_DCACHE_MISS | MMCRA_SDAR_ERAT_MISS;
diff --git a/arch/powerpc/perf/ppc970-pmu.c b/arch/powerpc/perf/ppc970-pmu.c
index 7d78df97f272..1f8263785286 100644
--- a/arch/powerpc/perf/ppc970-pmu.c
+++ b/arch/powerpc/perf/ppc970-pmu.c
@@ -190,7 +190,7 @@ static unsigned long unit_cons[PM_LASTUNIT+1][2] = {
};
static int p970_get_constraint(u64 event, unsigned long *maskp,
- unsigned long *valp)
+ unsigned long *valp, u64 event_config1 __maybe_unused)
{
int pmc, byte, unit, sh, spcsel;
unsigned long mask = 0, value = 0;
@@ -256,7 +256,8 @@ static int p970_get_alternatives(u64 event, unsigned int flags, u64 alt[])
static int p970_compute_mmcr(u64 event[], int n_ev,
unsigned int hwc[], struct mmcr_regs *mmcr,
- struct perf_event *pevents[])
+ struct perf_event *pevents[],
+ u32 flags __maybe_unused)
{
unsigned long mmcr0 = 0, mmcr1 = 0, mmcra = 0;
unsigned int pmc, unit, byte, psel;
diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig
index 78ac6d67a935..7d41e9264510 100644
--- a/arch/powerpc/platforms/44x/Kconfig
+++ b/arch/powerpc/platforms/44x/Kconfig
@@ -206,17 +206,10 @@ config AKEBONO
select PPC4xx_HSTA_MSI
select I2C
select I2C_IBM_IIC
- select NETDEVICES
- select ETHERNET
- select NET_VENDOR_IBM
select IBM_EMAC_EMAC4 if IBM_EMAC
select USB if USB_SUPPORT
select USB_OHCI_HCD_PLATFORM if USB_OHCI_HCD
select USB_EHCI_HCD_PLATFORM if USB_EHCI_HCD
- select MMC_SDHCI
- select MMC_SDHCI_PLTFM
- select ATA
- select SATA_AHCI_PLATFORM
help
This option enables support for the IBM Akebono (476gtr) evaluation board
diff --git a/arch/powerpc/platforms/512x/mpc5121_ads.c b/arch/powerpc/platforms/512x/mpc5121_ads.c
index 6303fbfc4e4f..9d030c2e0004 100644
--- a/arch/powerpc/platforms/512x/mpc5121_ads.c
+++ b/arch/powerpc/platforms/512x/mpc5121_ads.c
@@ -24,21 +24,23 @@
static void __init mpc5121_ads_setup_arch(void)
{
-#ifdef CONFIG_PCI
- struct device_node *np;
-#endif
printk(KERN_INFO "MPC5121 ADS board from Freescale Semiconductor\n");
/*
* cpld regs are needed early
*/
mpc5121_ads_cpld_map();
+ mpc512x_setup_arch();
+}
+
+static void __init mpc5121_ads_setup_pci(void)
+{
#ifdef CONFIG_PCI
+ struct device_node *np;
+
for_each_compatible_node(np, "pci", "fsl,mpc5121-pci")
mpc83xx_add_bridge(np);
#endif
-
- mpc512x_setup_arch();
}
static void __init mpc5121_ads_init_IRQ(void)
@@ -64,6 +66,7 @@ define_machine(mpc5121_ads) {
.name = "MPC5121 ADS",
.probe = mpc5121_ads_probe,
.setup_arch = mpc5121_ads_setup_arch,
+ .discover_phbs = mpc5121_ads_setup_pci,
.init = mpc512x_init,
.init_IRQ = mpc5121_ads_init_IRQ,
.get_irq = ipic_get_irq,
diff --git a/arch/powerpc/platforms/52xx/efika.c b/arch/powerpc/platforms/52xx/efika.c
index 4514a6f7458a..3b7d70d71692 100644
--- a/arch/powerpc/platforms/52xx/efika.c
+++ b/arch/powerpc/platforms/52xx/efika.c
@@ -185,8 +185,6 @@ static void __init efika_setup_arch(void)
/* Map important registers from the internal memory map */
mpc52xx_map_common_devices();
- efika_pcisetup();
-
#ifdef CONFIG_PM
mpc52xx_suspend.board_suspend_prepare = efika_suspend_prepare;
mpc52xx_pm_init();
@@ -218,6 +216,7 @@ define_machine(efika)
.name = EFIKA_PLATFORM_NAME,
.probe = efika_probe,
.setup_arch = efika_setup_arch,
+ .discover_phbs = efika_pcisetup,
.init = mpc52xx_declare_of_platform_devices,
.show_cpuinfo = efika_show_cpuinfo,
.init_IRQ = mpc52xx_init_irq,
diff --git a/arch/powerpc/platforms/52xx/lite5200.c b/arch/powerpc/platforms/52xx/lite5200.c
index 3181aac08225..04cc97397095 100644
--- a/arch/powerpc/platforms/52xx/lite5200.c
+++ b/arch/powerpc/platforms/52xx/lite5200.c
@@ -165,8 +165,6 @@ static void __init lite5200_setup_arch(void)
mpc52xx_suspend.board_resume_finish = lite5200_resume_finish;
lite5200_pm_init();
#endif
-
- mpc52xx_setup_pci();
}
static const char * const board[] __initconst = {
@@ -187,6 +185,7 @@ define_machine(lite5200) {
.name = "lite5200",
.probe = lite5200_probe,
.setup_arch = lite5200_setup_arch,
+ .discover_phbs = mpc52xx_setup_pci,
.init = mpc52xx_declare_of_platform_devices,
.init_IRQ = mpc52xx_init_irq,
.get_irq = mpc52xx_get_irq,
diff --git a/arch/powerpc/platforms/52xx/media5200.c b/arch/powerpc/platforms/52xx/media5200.c
index 07c5bc4ed0b5..efb8bdecbcc7 100644
--- a/arch/powerpc/platforms/52xx/media5200.c
+++ b/arch/powerpc/platforms/52xx/media5200.c
@@ -202,8 +202,6 @@ static void __init media5200_setup_arch(void)
/* Some mpc5200 & mpc5200b related configuration */
mpc5200_setup_xlb_arbiter();
- mpc52xx_setup_pci();
-
np = of_find_matching_node(NULL, mpc5200_gpio_ids);
gpio = of_iomap(np, 0);
of_node_put(np);
@@ -244,6 +242,7 @@ define_machine(media5200_platform) {
.name = "media5200-platform",
.probe = media5200_probe,
.setup_arch = media5200_setup_arch,
+ .discover_phbs = mpc52xx_setup_pci,
.init = mpc52xx_declare_of_platform_devices,
.init_IRQ = media5200_init_irq,
.get_irq = mpc52xx_get_irq,
diff --git a/arch/powerpc/platforms/52xx/mpc5200_simple.c b/arch/powerpc/platforms/52xx/mpc5200_simple.c
index 2d01e9b2e779..b9f5675b0a1d 100644
--- a/arch/powerpc/platforms/52xx/mpc5200_simple.c
+++ b/arch/powerpc/platforms/52xx/mpc5200_simple.c
@@ -40,8 +40,6 @@ static void __init mpc5200_simple_setup_arch(void)
/* Some mpc5200 & mpc5200b related configuration */
mpc5200_setup_xlb_arbiter();
-
- mpc52xx_setup_pci();
}
/* list of the supported boards */
@@ -73,6 +71,7 @@ define_machine(mpc5200_simple_platform) {
.name = "mpc5200-simple-platform",
.probe = mpc5200_simple_probe,
.setup_arch = mpc5200_simple_setup_arch,
+ .discover_phbs = mpc52xx_setup_pci,
.init = mpc52xx_declare_of_platform_devices,
.init_IRQ = mpc52xx_init_irq,
.get_irq = mpc52xx_get_irq,
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
index 05e19470d523..b91ebebd9ff2 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
@@ -229,7 +229,7 @@ static irqreturn_t mpc52xx_lpbfifo_irq(int irq, void *dev_id)
int dma, write, poll_dma;
spin_lock_irqsave(&lpbfifo.lock, flags);
- ts = get_tbl();
+ ts = mftb();
req = lpbfifo.req;
if (!req) {
@@ -307,7 +307,7 @@ static irqreturn_t mpc52xx_lpbfifo_irq(int irq, void *dev_id)
if (irq != 0) /* don't increment on polled case */
req->irq_count++;
- req->irq_ticks += get_tbl() - ts;
+ req->irq_ticks += mftb() - ts;
spin_unlock_irqrestore(&lpbfifo.lock, flags);
/* Spinlock is released; it is now safe to call the callback */
@@ -330,7 +330,7 @@ static irqreturn_t mpc52xx_lpbfifo_bcom_irq(int irq, void *dev_id)
u32 ts;
spin_lock_irqsave(&lpbfifo.lock, flags);
- ts = get_tbl();
+ ts = mftb();
req = lpbfifo.req;
if (!req || (req->flags & MPC52XX_LPBFIFO_FLAG_NO_DMA)) {
@@ -361,7 +361,7 @@ static irqreturn_t mpc52xx_lpbfifo_bcom_irq(int irq, void *dev_id)
lpbfifo.req = NULL;
/* Release the lock before calling out to the callback. */
- req->irq_ticks += get_tbl() - ts;
+ req->irq_ticks += mftb() - ts;
spin_unlock_irqrestore(&lpbfifo.lock, flags);
if (req->callback)
diff --git a/arch/powerpc/platforms/82xx/mpc8272_ads.c b/arch/powerpc/platforms/82xx/mpc8272_ads.c
index 3fe1a6593280..0b5b9dec16d5 100644
--- a/arch/powerpc/platforms/82xx/mpc8272_ads.c
+++ b/arch/powerpc/platforms/82xx/mpc8272_ads.c
@@ -171,7 +171,6 @@ static void __init mpc8272_ads_setup_arch(void)
iounmap(bcsr);
init_ioports();
- pq2_init_pci();
if (ppc_md.progress)
ppc_md.progress("mpc8272_ads_setup_arch(), finish", 0);
@@ -205,6 +204,7 @@ define_machine(mpc8272_ads)
.name = "Freescale MPC8272 ADS",
.probe = mpc8272_ads_probe,
.setup_arch = mpc8272_ads_setup_arch,
+ .discover_phbs = pq2_init_pci,
.init_IRQ = mpc8272_ads_pic_init,
.get_irq = cpm2_get_irq,
.calibrate_decr = generic_calibrate_decr,
diff --git a/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c b/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c
index 096cc0d59fd8..f82f75a6085c 100644
--- a/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c
+++ b/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c
@@ -123,20 +123,17 @@ int __init pq2ads_pci_init_irq(void)
np = of_find_compatible_node(NULL, NULL, "fsl,pq2ads-pci-pic");
if (!np) {
printk(KERN_ERR "No pci pic node in device tree.\n");
- of_node_put(np);
goto out;
}
irq = irq_of_parse_and_map(np, 0);
if (!irq) {
printk(KERN_ERR "No interrupt in pci pic node.\n");
- of_node_put(np);
- goto out;
+ goto out_put_node;
}
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) {
- of_node_put(np);
ret = -ENOMEM;
goto out_unmap_irq;
}
@@ -161,17 +158,17 @@ int __init pq2ads_pci_init_irq(void)
priv->host = host;
irq_set_handler_data(irq, priv);
irq_set_chained_handler(irq, pq2ads_pci_irq_demux);
-
- of_node_put(np);
- return 0;
+ ret = 0;
+ goto out_put_node;
out_unmap_regs:
iounmap(priv->regs);
out_free_kmalloc:
kfree(priv);
- of_node_put(np);
out_unmap_irq:
irq_dispose_mapping(irq);
+out_put_node:
+ of_node_put(np);
out:
return ret;
}
diff --git a/arch/powerpc/platforms/82xx/pq2fads.c b/arch/powerpc/platforms/82xx/pq2fads.c
index a74082140718..ac9113d524af 100644
--- a/arch/powerpc/platforms/82xx/pq2fads.c
+++ b/arch/powerpc/platforms/82xx/pq2fads.c
@@ -150,8 +150,6 @@ static void __init pq2fads_setup_arch(void)
/* Enable external IRQs */
clrbits32(&cpm2_immr->im_siu_conf.siu_82xx.sc_siumcr, 0x0c000000);
- pq2_init_pci();
-
if (ppc_md.progress)
ppc_md.progress("pq2fads_setup_arch(), finish", 0);
}
@@ -184,6 +182,7 @@ define_machine(pq2fads)
.name = "Freescale PQ2FADS",
.probe = pq2fads_probe,
.setup_arch = pq2fads_setup_arch,
+ .discover_phbs = pq2_init_pci,
.init_IRQ = pq2fads_pic_init,
.get_irq = cpm2_get_irq,
.calibrate_decr = generic_calibrate_decr,
diff --git a/arch/powerpc/platforms/83xx/asp834x.c b/arch/powerpc/platforms/83xx/asp834x.c
index 28474876f41b..68061c2a57c1 100644
--- a/arch/powerpc/platforms/83xx/asp834x.c
+++ b/arch/powerpc/platforms/83xx/asp834x.c
@@ -44,6 +44,7 @@ define_machine(asp834x) {
.name = "ASP8347E",
.probe = asp834x_probe,
.setup_arch = asp834x_setup_arch,
+ .discover_phbs = mpc83xx_setup_pci,
.init_IRQ = mpc83xx_ipic_init_IRQ,
.get_irq = ipic_get_irq,
.restart = mpc83xx_restart,
diff --git a/arch/powerpc/platforms/83xx/km83xx.c b/arch/powerpc/platforms/83xx/km83xx.c
index bcdc2c203ec9..108e1e4d2683 100644
--- a/arch/powerpc/platforms/83xx/km83xx.c
+++ b/arch/powerpc/platforms/83xx/km83xx.c
@@ -180,6 +180,7 @@ define_machine(mpc83xx_km) {
.name = "mpc83xx-km-platform",
.probe = mpc83xx_km_probe,
.setup_arch = mpc83xx_km_setup_arch,
+ .discover_phbs = mpc83xx_setup_pci,
.init_IRQ = mpc83xx_ipic_init_IRQ,
.get_irq = ipic_get_irq,
.restart = mpc83xx_restart,
diff --git a/arch/powerpc/platforms/83xx/misc.c b/arch/powerpc/platforms/83xx/misc.c
index a952e91db3ee..3285dabcf923 100644
--- a/arch/powerpc/platforms/83xx/misc.c
+++ b/arch/powerpc/platforms/83xx/misc.c
@@ -132,8 +132,6 @@ void __init mpc83xx_setup_arch(void)
setbat(-1, va, immrbase, immrsize, PAGE_KERNEL_NCG);
update_bats();
}
-
- mpc83xx_setup_pci();
}
int machine_check_83xx(struct pt_regs *regs)
diff --git a/arch/powerpc/platforms/83xx/mpc830x_rdb.c b/arch/powerpc/platforms/83xx/mpc830x_rdb.c
index 51426e88ec67..956d4389effa 100644
--- a/arch/powerpc/platforms/83xx/mpc830x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc830x_rdb.c
@@ -48,6 +48,7 @@ define_machine(mpc830x_rdb) {
.name = "MPC830x RDB",
.probe = mpc830x_rdb_probe,
.setup_arch = mpc830x_rdb_setup_arch,
+ .discover_phbs = mpc83xx_setup_pci,
.init_IRQ = mpc83xx_ipic_init_IRQ,
.get_irq = ipic_get_irq,
.restart = mpc83xx_restart,
diff --git a/arch/powerpc/platforms/83xx/mpc831x_rdb.c b/arch/powerpc/platforms/83xx/mpc831x_rdb.c
index 5ccd57a48492..3b578f080e3b 100644
--- a/arch/powerpc/platforms/83xx/mpc831x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc831x_rdb.c
@@ -48,6 +48,7 @@ define_machine(mpc831x_rdb) {
.name = "MPC831x RDB",
.probe = mpc831x_rdb_probe,
.setup_arch = mpc831x_rdb_setup_arch,
+ .discover_phbs = mpc83xx_setup_pci,
.init_IRQ = mpc83xx_ipic_init_IRQ,
.get_irq = ipic_get_irq,
.restart = mpc83xx_restart,
diff --git a/arch/powerpc/platforms/83xx/mpc832x_mds.c b/arch/powerpc/platforms/83xx/mpc832x_mds.c
index 6fa5402ebf20..850d566ef900 100644
--- a/arch/powerpc/platforms/83xx/mpc832x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc832x_mds.c
@@ -101,6 +101,7 @@ define_machine(mpc832x_mds) {
.name = "MPC832x MDS",
.probe = mpc832x_sys_probe,
.setup_arch = mpc832x_sys_setup_arch,
+ .discover_phbs = mpc83xx_setup_pci,
.init_IRQ = mpc83xx_ipic_init_IRQ,
.get_irq = ipic_get_irq,
.restart = mpc83xx_restart,
diff --git a/arch/powerpc/platforms/83xx/mpc832x_rdb.c b/arch/powerpc/platforms/83xx/mpc832x_rdb.c
index 622c625d5ce4..b6133a237a70 100644
--- a/arch/powerpc/platforms/83xx/mpc832x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc832x_rdb.c
@@ -219,6 +219,7 @@ define_machine(mpc832x_rdb) {
.name = "MPC832x RDB",
.probe = mpc832x_rdb_probe,
.setup_arch = mpc832x_rdb_setup_arch,
+ .discover_phbs = mpc83xx_setup_pci,
.init_IRQ = mpc83xx_ipic_init_IRQ,
.get_irq = ipic_get_irq,
.restart = mpc83xx_restart,
diff --git a/arch/powerpc/platforms/83xx/mpc834x_itx.c b/arch/powerpc/platforms/83xx/mpc834x_itx.c
index ebfd139bca20..9630f3aa4d9c 100644
--- a/arch/powerpc/platforms/83xx/mpc834x_itx.c
+++ b/arch/powerpc/platforms/83xx/mpc834x_itx.c
@@ -70,6 +70,7 @@ define_machine(mpc834x_itx) {
.name = "MPC834x ITX",
.probe = mpc834x_itx_probe,
.setup_arch = mpc834x_itx_setup_arch,
+ .discover_phbs = mpc83xx_setup_pci,
.init_IRQ = mpc83xx_ipic_init_IRQ,
.get_irq = ipic_get_irq,
.restart = mpc83xx_restart,
diff --git a/arch/powerpc/platforms/83xx/mpc834x_mds.c b/arch/powerpc/platforms/83xx/mpc834x_mds.c
index 356228e35279..6d91bdce0a18 100644
--- a/arch/powerpc/platforms/83xx/mpc834x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc834x_mds.c
@@ -91,6 +91,7 @@ define_machine(mpc834x_mds) {
.name = "MPC834x MDS",
.probe = mpc834x_mds_probe,
.setup_arch = mpc834x_mds_setup_arch,
+ .discover_phbs = mpc83xx_setup_pci,
.init_IRQ = mpc83xx_ipic_init_IRQ,
.get_irq = ipic_get_irq,
.restart = mpc83xx_restart,
diff --git a/arch/powerpc/platforms/83xx/mpc836x_mds.c b/arch/powerpc/platforms/83xx/mpc836x_mds.c
index 90d9cbfae659..da4cf52cb55b 100644
--- a/arch/powerpc/platforms/83xx/mpc836x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc836x_mds.c
@@ -201,6 +201,7 @@ define_machine(mpc836x_mds) {
.name = "MPC836x MDS",
.probe = mpc836x_mds_probe,
.setup_arch = mpc836x_mds_setup_arch,
+ .discover_phbs = mpc83xx_setup_pci,
.init_IRQ = mpc83xx_ipic_init_IRQ,
.get_irq = ipic_get_irq,
.restart = mpc83xx_restart,
diff --git a/arch/powerpc/platforms/83xx/mpc836x_rdk.c b/arch/powerpc/platforms/83xx/mpc836x_rdk.c
index b4aac2cde849..3427ad0d9d38 100644
--- a/arch/powerpc/platforms/83xx/mpc836x_rdk.c
+++ b/arch/powerpc/platforms/83xx/mpc836x_rdk.c
@@ -41,6 +41,7 @@ define_machine(mpc836x_rdk) {
.name = "MPC836x RDK",
.probe = mpc836x_rdk_probe,
.setup_arch = mpc836x_rdk_setup_arch,
+ .discover_phbs = mpc83xx_setup_pci,
.init_IRQ = mpc83xx_ipic_init_IRQ,
.get_irq = ipic_get_irq,
.restart = mpc83xx_restart,
diff --git a/arch/powerpc/platforms/83xx/mpc837x_mds.c b/arch/powerpc/platforms/83xx/mpc837x_mds.c
index 9d3721c965be..f28d166ea7db 100644
--- a/arch/powerpc/platforms/83xx/mpc837x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc837x_mds.c
@@ -93,6 +93,7 @@ define_machine(mpc837x_mds) {
.name = "MPC837x MDS",
.probe = mpc837x_mds_probe,
.setup_arch = mpc837x_mds_setup_arch,
+ .discover_phbs = mpc83xx_setup_pci,
.init_IRQ = mpc83xx_ipic_init_IRQ,
.get_irq = ipic_get_irq,
.restart = mpc83xx_restart,
diff --git a/arch/powerpc/platforms/83xx/mpc837x_rdb.c b/arch/powerpc/platforms/83xx/mpc837x_rdb.c
index 7c45f7ac2607..7fb7684c256b 100644
--- a/arch/powerpc/platforms/83xx/mpc837x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc837x_rdb.c
@@ -73,6 +73,7 @@ define_machine(mpc837x_rdb) {
.name = "MPC837x RDB/WLAN",
.probe = mpc837x_rdb_probe,
.setup_arch = mpc837x_rdb_setup_arch,
+ .discover_phbs = mpc83xx_setup_pci,
.init_IRQ = mpc83xx_ipic_init_IRQ,
.get_irq = ipic_get_irq,
.restart = mpc83xx_restart,
diff --git a/arch/powerpc/platforms/83xx/mpc83xx.h b/arch/powerpc/platforms/83xx/mpc83xx.h
index f37d04332fc7..a30d30588cf6 100644
--- a/arch/powerpc/platforms/83xx/mpc83xx.h
+++ b/arch/powerpc/platforms/83xx/mpc83xx.h
@@ -76,7 +76,7 @@ extern void mpc83xx_ipic_init_IRQ(void);
#ifdef CONFIG_PCI
extern void mpc83xx_setup_pci(void);
#else
-#define mpc83xx_setup_pci() do {} while (0)
+#define mpc83xx_setup_pci NULL
#endif
extern int mpc83xx_declare_of_platform_devices(void);
diff --git a/arch/powerpc/platforms/8xx/machine_check.c b/arch/powerpc/platforms/8xx/machine_check.c
index 88dedf38eccd..656365975895 100644
--- a/arch/powerpc/platforms/8xx/machine_check.c
+++ b/arch/powerpc/platforms/8xx/machine_check.c
@@ -26,7 +26,7 @@ int machine_check_8xx(struct pt_regs *regs)
* to deal with that than having a wart in the mcheck handler.
* -- BenH
*/
- bad_page_fault(regs, regs->dar, SIGBUS);
+ bad_page_fault(regs, SIGBUS);
return 1;
#else
return 0;
diff --git a/arch/powerpc/platforms/amigaone/setup.c b/arch/powerpc/platforms/amigaone/setup.c
index f5d0bf999759..9d252c554f7f 100644
--- a/arch/powerpc/platforms/amigaone/setup.c
+++ b/arch/powerpc/platforms/amigaone/setup.c
@@ -66,6 +66,12 @@ static int __init amigaone_add_bridge(struct device_node *dev)
void __init amigaone_setup_arch(void)
{
+ if (ppc_md.progress)
+ ppc_md.progress("Linux/PPC "UTS_RELEASE"\n", 0);
+}
+
+static void __init amigaone_discover_phbs(void)
+{
struct device_node *np;
int phb = -ENODEV;
@@ -74,9 +80,6 @@ void __init amigaone_setup_arch(void)
phb = amigaone_add_bridge(np);
BUG_ON(phb != 0);
-
- if (ppc_md.progress)
- ppc_md.progress("Linux/PPC "UTS_RELEASE"\n", 0);
}
void __init amigaone_init_IRQ(void)
@@ -159,6 +162,7 @@ define_machine(amigaone) {
.name = "AmigaOne",
.probe = amigaone_probe,
.setup_arch = amigaone_setup_arch,
+ .discover_phbs = amigaone_discover_phbs,
.show_cpuinfo = amigaone_show_cpuinfo,
.init_IRQ = amigaone_init_IRQ,
.restart = amigaone_restart,
diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig
index f2ff359041ee..e7c976bcadff 100644
--- a/arch/powerpc/platforms/cell/Kconfig
+++ b/arch/powerpc/platforms/cell/Kconfig
@@ -100,8 +100,3 @@ config CBE_CPUFREQ_SPU_GOVERNOR
the minimal possible frequency.
endmenu
-
-config OPROFILE_CELL
- def_bool y
- depends on PPC_CELL_NATIVE && (OPROFILE = m || OPROFILE = y) && SPU_BASE
-
diff --git a/arch/powerpc/platforms/cell/Makefile b/arch/powerpc/platforms/cell/Makefile
index 10064a33ca96..7ea6692f67e2 100644
--- a/arch/powerpc/platforms/cell/Makefile
+++ b/arch/powerpc/platforms/cell/Makefile
@@ -19,7 +19,6 @@ spu-priv1-$(CONFIG_PPC_CELL_COMMON) += spu_priv1_mmio.o
spu-manage-$(CONFIG_PPC_CELL_COMMON) += spu_manage.o
obj-$(CONFIG_SPU_BASE) += spu_callbacks.o spu_base.o \
- spu_notify.o \
spu_syscalls.o \
$(spu-priv1-y) \
$(spu-manage-y) \
diff --git a/arch/powerpc/platforms/cell/pervasive.c b/arch/powerpc/platforms/cell/pervasive.c
index 9068edef71f7..5b9a7e9f144b 100644
--- a/arch/powerpc/platforms/cell/pervasive.c
+++ b/arch/powerpc/platforms/cell/pervasive.c
@@ -25,6 +25,7 @@
#include <asm/cpu_has_feature.h>
#include "pervasive.h"
+#include "ras.h"
static void cbe_power_save(void)
{
diff --git a/arch/powerpc/platforms/cell/pervasive.h b/arch/powerpc/platforms/cell/pervasive.h
index c6fccad6caee..0da74ab10716 100644
--- a/arch/powerpc/platforms/cell/pervasive.h
+++ b/arch/powerpc/platforms/cell/pervasive.h
@@ -13,9 +13,6 @@
#define PERVASIVE_H
extern void cbe_pervasive_init(void);
-extern void cbe_system_error_exception(struct pt_regs *regs);
-extern void cbe_maintenance_exception(struct pt_regs *regs);
-extern void cbe_thermal_exception(struct pt_regs *regs);
#ifdef CONFIG_PPC_IBM_CELL_RESETBUTTON
extern int cbe_sysreset_hack(void);
diff --git a/arch/powerpc/platforms/cell/ras.c b/arch/powerpc/platforms/cell/ras.c
index 6ea480539419..4325c05bedd9 100644
--- a/arch/powerpc/platforms/cell/ras.c
+++ b/arch/powerpc/platforms/cell/ras.c
@@ -49,7 +49,7 @@ static void dump_fir(int cpu)
}
-void cbe_system_error_exception(struct pt_regs *regs)
+DEFINE_INTERRUPT_HANDLER(cbe_system_error_exception)
{
int cpu = smp_processor_id();
@@ -58,7 +58,7 @@ void cbe_system_error_exception(struct pt_regs *regs)
dump_stack();
}
-void cbe_maintenance_exception(struct pt_regs *regs)
+DEFINE_INTERRUPT_HANDLER(cbe_maintenance_exception)
{
int cpu = smp_processor_id();
@@ -70,7 +70,7 @@ void cbe_maintenance_exception(struct pt_regs *regs)
dump_stack();
}
-void cbe_thermal_exception(struct pt_regs *regs)
+DEFINE_INTERRUPT_HANDLER(cbe_thermal_exception)
{
int cpu = smp_processor_id();
diff --git a/arch/powerpc/platforms/cell/ras.h b/arch/powerpc/platforms/cell/ras.h
index 6c2e6bc0062e..226dbd48efad 100644
--- a/arch/powerpc/platforms/cell/ras.h
+++ b/arch/powerpc/platforms/cell/ras.h
@@ -2,9 +2,12 @@
#ifndef RAS_H
#define RAS_H
-extern void cbe_system_error_exception(struct pt_regs *regs);
-extern void cbe_maintenance_exception(struct pt_regs *regs);
-extern void cbe_thermal_exception(struct pt_regs *regs);
+#include <asm/interrupt.h>
+
+DECLARE_INTERRUPT_HANDLER(cbe_system_error_exception);
+DECLARE_INTERRUPT_HANDLER(cbe_maintenance_exception);
+DECLARE_INTERRUPT_HANDLER(cbe_thermal_exception);
+
extern void cbe_ras_init(void);
#endif /* RAS_H */
diff --git a/arch/powerpc/platforms/cell/spu_notify.c b/arch/powerpc/platforms/cell/spu_notify.c
deleted file mode 100644
index 67870abf3715..000000000000
--- a/arch/powerpc/platforms/cell/spu_notify.c
+++ /dev/null
@@ -1,55 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Move OProfile dependencies from spufs module to the kernel so it
- * can run on non-cell PPC.
- *
- * Copyright (C) IBM 2005
- */
-
-#undef DEBUG
-
-#include <linux/export.h>
-#include <linux/notifier.h>
-#include <asm/spu.h>
-#include "spufs/spufs.h"
-
-static BLOCKING_NOTIFIER_HEAD(spu_switch_notifier);
-
-void spu_switch_notify(struct spu *spu, struct spu_context *ctx)
-{
- blocking_notifier_call_chain(&spu_switch_notifier,
- ctx ? ctx->object_id : 0, spu);
-}
-EXPORT_SYMBOL_GPL(spu_switch_notify);
-
-int spu_switch_event_register(struct notifier_block *n)
-{
- int ret;
- ret = blocking_notifier_chain_register(&spu_switch_notifier, n);
- if (!ret)
- notify_spus_active();
- return ret;
-}
-EXPORT_SYMBOL_GPL(spu_switch_event_register);
-
-int spu_switch_event_unregister(struct notifier_block *n)
-{
- return blocking_notifier_chain_unregister(&spu_switch_notifier, n);
-}
-EXPORT_SYMBOL_GPL(spu_switch_event_unregister);
-
-void spu_set_profile_private_kref(struct spu_context *ctx,
- struct kref *prof_info_kref,
- void (* prof_info_release) (struct kref *kref))
-{
- ctx->prof_priv_kref = prof_info_kref;
- ctx->prof_priv_release = prof_info_release;
-}
-EXPORT_SYMBOL_GPL(spu_set_profile_private_kref);
-
-void *spu_get_profile_private_kref(struct spu_context *ctx)
-{
- return ctx->prof_priv_kref;
-}
-EXPORT_SYMBOL_GPL(spu_get_profile_private_kref);
-
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index 25390569e24c..b83a3670bd74 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -91,14 +91,15 @@ out:
}
static int
-spufs_setattr(struct dentry *dentry, struct iattr *attr)
+spufs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+ struct iattr *attr)
{
struct inode *inode = d_inode(dentry);
if ((attr->ia_valid & ATTR_SIZE) &&
(attr->ia_size != inode->i_size))
return -EINVAL;
- setattr_copy(inode, attr);
+ setattr_copy(&init_user_ns, inode, attr);
mark_inode_dirty(inode);
return 0;
}
diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c
index 3f2380f40f99..ce52b87496d2 100644
--- a/arch/powerpc/platforms/cell/spufs/run.c
+++ b/arch/powerpc/platforms/cell/spufs/run.c
@@ -353,7 +353,6 @@ static int spu_process_callback(struct spu_context *ctx)
long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event)
{
int ret;
- struct spu *spu;
u32 status;
if (mutex_lock_interruptible(&ctx->run_mutex))
@@ -386,13 +385,10 @@ long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event)
mutex_lock(&ctx->state_mutex);
break;
}
- spu = ctx->spu;
if (unlikely(test_and_clear_bit(SPU_SCHED_NOTIFY_ACTIVE,
&ctx->sched_flags))) {
- if (!(status & SPU_STATUS_STOPPED_BY_STOP)) {
- spu_switch_notify(spu, ctx);
+ if (!(status & SPU_STATUS_STOPPED_BY_STOP))
continue;
- }
}
spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index f18d5067cd0f..369206489895 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -72,7 +72,7 @@ static struct timer_list spuloadavg_timer;
#define DEF_SPU_TIMESLICE (100 * HZ / (1000 * SPUSCHED_TICK))
#define SCALE_PRIO(x, prio) \
- max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_SPU_TIMESLICE)
+ max(x * (MAX_PRIO - prio) / (NICE_WIDTH / 2), MIN_SPU_TIMESLICE)
/*
* scale user-nice values [ -20 ... 0 ... 19 ] to time slice values:
@@ -181,9 +181,6 @@ void do_notify_spus_active(void)
/*
* Wake up the active spu_contexts.
- *
- * When the awakened processes see their "notify_active" flag is set,
- * they will call spu_switch_notify().
*/
for_each_online_node(node) {
struct spu *spu;
@@ -239,7 +236,6 @@ static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
spu_switch_log_notify(spu, ctx, SWITCH_LOG_START, 0);
spu_restore(&ctx->csa, spu);
spu->timestamp = jiffies;
- spu_switch_notify(spu, ctx);
ctx->state = SPU_STATE_RUNNABLE;
spuctx_switch_state(ctx, SPU_UTIL_USER);
@@ -440,7 +436,6 @@ static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
*/
atomic_dec_if_positive(&ctx->gang->aff_sched_count);
- spu_switch_notify(spu, NULL);
spu_unmap_mappings(ctx);
spu_save(&ctx->csa, spu);
spu_switch_log_notify(spu, ctx, SWITCH_LOG_STOP, 0);
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index 1ba4d884febf..afc1d6604d12 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -281,7 +281,6 @@ void spu_del_from_rq(struct spu_context *ctx);
int spu_activate(struct spu_context *ctx, unsigned long flags);
void spu_deactivate(struct spu_context *ctx);
void spu_yield(struct spu_context *ctx);
-void spu_switch_notify(struct spu *spu, struct spu_context *ctx);
void spu_switch_log_notify(struct spu *spu, struct spu_context *ctx,
u32 type, u32 val);
void spu_set_timeslice(struct spu_context *ctx);
diff --git a/arch/powerpc/platforms/chrp/pci.c b/arch/powerpc/platforms/chrp/pci.c
index b2c2bf35b76c..8c421dc78b28 100644
--- a/arch/powerpc/platforms/chrp/pci.c
+++ b/arch/powerpc/platforms/chrp/pci.c
@@ -314,6 +314,14 @@ chrp_find_bridges(void)
}
}
of_node_put(root);
+
+ /*
+ * "Temporary" fixes for PCI devices.
+ * -- Geert
+ */
+ hydra_init(); /* Mac I/O */
+
+ pci_create_OF_bus_map();
}
/* SL82C105 IDE Control/Status Register */
diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c
index c45435aa5e36..3cfc382841e5 100644
--- a/arch/powerpc/platforms/chrp/setup.c
+++ b/arch/powerpc/platforms/chrp/setup.c
@@ -334,22 +334,11 @@ static void __init chrp_setup_arch(void)
/* On pegasos, enable the L2 cache if not already done by OF */
pegasos_set_l2cr();
- /* Lookup PCI host bridges */
- chrp_find_bridges();
-
- /*
- * Temporary fixes for PCI devices.
- * -- Geert
- */
- hydra_init(); /* Mac I/O */
-
/*
* Fix the Super I/O configuration
*/
sio_init();
- pci_create_OF_bus_map();
-
/*
* Print the banner, then scroll down so boot progress
* can be printed. -- Cort
@@ -582,6 +571,7 @@ define_machine(chrp) {
.name = "CHRP",
.probe = chrp_probe,
.setup_arch = chrp_setup_arch,
+ .discover_phbs = chrp_find_bridges,
.init = chrp_init2,
.show_cpuinfo = chrp_show_cpuinfo,
.init_IRQ = chrp_init_IRQ,
diff --git a/arch/powerpc/platforms/embedded6xx/holly.c b/arch/powerpc/platforms/embedded6xx/holly.c
index d8f2e2c737bb..53065d564161 100644
--- a/arch/powerpc/platforms/embedded6xx/holly.c
+++ b/arch/powerpc/platforms/embedded6xx/holly.c
@@ -108,15 +108,13 @@ static void holly_remap_bridge(void)
tsi108_write_reg(TSI108_PCI_P2O_BAR2, 0x0);
}
-static void __init holly_setup_arch(void)
+static void __init holly_init_pci(void)
{
struct device_node *np;
if (ppc_md.progress)
ppc_md.progress("holly_setup_arch():set_bridge", 0);
- tsi108_csr_vir_base = get_vir_csrbase();
-
/* setup PCI host bridge */
holly_remap_bridge();
@@ -127,6 +125,11 @@ static void __init holly_setup_arch(void)
ppc_md.pci_exclude_device = holly_exclude_device;
if (ppc_md.progress)
ppc_md.progress("tsi108: resources set", 0x100);
+}
+
+static void __init holly_setup_arch(void)
+{
+ tsi108_csr_vir_base = get_vir_csrbase();
printk(KERN_INFO "PPC750GX/CL Platform\n");
}
@@ -259,6 +262,7 @@ define_machine(holly){
.name = "PPC750 GX/CL TSI",
.probe = holly_probe,
.setup_arch = holly_setup_arch,
+ .discover_phbs = holly_init_pci,
.init_IRQ = holly_init_IRQ,
.show_cpuinfo = holly_show_cpuinfo,
.get_irq = mpic_get_irq,
diff --git a/arch/powerpc/platforms/embedded6xx/linkstation.c b/arch/powerpc/platforms/embedded6xx/linkstation.c
index f514d5d28cd4..eb8342e7f84e 100644
--- a/arch/powerpc/platforms/embedded6xx/linkstation.c
+++ b/arch/powerpc/platforms/embedded6xx/linkstation.c
@@ -64,14 +64,17 @@ static int __init linkstation_add_bridge(struct device_node *dev)
static void __init linkstation_setup_arch(void)
{
+ printk(KERN_INFO "BUFFALO Network Attached Storage Series\n");
+ printk(KERN_INFO "(C) 2002-2005 BUFFALO INC.\n");
+}
+
+static void __init linkstation_setup_pci(void)
+{
struct device_node *np;
/* Lookup PCI host bridges */
for_each_compatible_node(np, "pci", "mpc10x-pci")
linkstation_add_bridge(np);
-
- printk(KERN_INFO "BUFFALO Network Attached Storage Series\n");
- printk(KERN_INFO "(C) 2002-2005 BUFFALO INC.\n");
}
/*
@@ -153,6 +156,7 @@ define_machine(linkstation){
.name = "Buffalo Linkstation",
.probe = linkstation_probe,
.setup_arch = linkstation_setup_arch,
+ .discover_phbs = linkstation_setup_pci,
.init_IRQ = linkstation_init_IRQ,
.show_cpuinfo = linkstation_show_cpuinfo,
.get_irq = mpic_get_irq,
diff --git a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
index b95c3380d2b5..5565647dc879 100644
--- a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
+++ b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
@@ -58,16 +58,14 @@ int mpc7448_hpc2_exclude_device(struct pci_controller *hose,
return PCIBIOS_SUCCESSFUL;
}
-static void __init mpc7448_hpc2_setup_arch(void)
+static void __init mpc7448_hpc2_setup_pci(void)
{
+#ifdef CONFIG_PCI
struct device_node *np;
if (ppc_md.progress)
- ppc_md.progress("mpc7448_hpc2_setup_arch():set_bridge", 0);
-
- tsi108_csr_vir_base = get_vir_csrbase();
+ ppc_md.progress("mpc7448_hpc2_setup_pci():set_bridge", 0);
/* setup PCI host bridge */
-#ifdef CONFIG_PCI
for_each_compatible_node(np, "pci", "tsi108-pci")
tsi108_setup_pci(np, MPC7448HPC2_PCI_CFG_PHYS, 0);
@@ -75,6 +73,11 @@ static void __init mpc7448_hpc2_setup_arch(void)
if (ppc_md.progress)
ppc_md.progress("tsi108: resources set", 0x100);
#endif
+}
+
+static void __init mpc7448_hpc2_setup_arch(void)
+{
+ tsi108_csr_vir_base = get_vir_csrbase();
printk(KERN_INFO "MPC7448HPC2 (TAIGA) Platform\n");
printk(KERN_INFO
@@ -181,6 +184,7 @@ define_machine(mpc7448_hpc2){
.name = "MPC7448 HPC2",
.probe = mpc7448_hpc2_probe,
.setup_arch = mpc7448_hpc2_setup_arch,
+ .discover_phbs = mpc7448_hpc2_setup_pci,
.init_IRQ = mpc7448_hpc2_init_IRQ,
.show_cpuinfo = mpc7448_hpc2_show_cpuinfo,
.get_irq = mpic_get_irq,
diff --git a/arch/powerpc/platforms/embedded6xx/mvme5100.c b/arch/powerpc/platforms/embedded6xx/mvme5100.c
index 1cd488daa0bf..c06a0490d157 100644
--- a/arch/powerpc/platforms/embedded6xx/mvme5100.c
+++ b/arch/powerpc/platforms/embedded6xx/mvme5100.c
@@ -154,17 +154,19 @@ static const struct of_device_id mvme5100_of_bus_ids[] __initconst = {
*/
static void __init mvme5100_setup_arch(void)
{
- struct device_node *np;
-
if (ppc_md.progress)
ppc_md.progress("mvme5100_setup_arch()", 0);
- for_each_compatible_node(np, "pci", "hawk-pci")
- mvme5100_add_bridge(np);
-
restart = ioremap(BOARD_MODRST_REG, 4);
}
+static void __init mvme5100_setup_pci(void)
+{
+ struct device_node *np;
+
+ for_each_compatible_node(np, "pci", "hawk-pci")
+ mvme5100_add_bridge(np);
+}
static void mvme5100_show_cpuinfo(struct seq_file *m)
{
@@ -205,6 +207,7 @@ define_machine(mvme5100) {
.name = "MVME5100",
.probe = mvme5100_probe,
.setup_arch = mvme5100_setup_arch,
+ .discover_phbs = mvme5100_setup_pci,
.init_IRQ = mvme5100_pic_init,
.show_cpuinfo = mvme5100_show_cpuinfo,
.get_irq = mpic_get_irq,
diff --git a/arch/powerpc/platforms/embedded6xx/storcenter.c b/arch/powerpc/platforms/embedded6xx/storcenter.c
index e346ddcef45e..e188b90f7016 100644
--- a/arch/powerpc/platforms/embedded6xx/storcenter.c
+++ b/arch/powerpc/platforms/embedded6xx/storcenter.c
@@ -66,13 +66,16 @@ static int __init storcenter_add_bridge(struct device_node *dev)
static void __init storcenter_setup_arch(void)
{
+ printk(KERN_INFO "IOMEGA StorCenter\n");
+}
+
+static void __init storcenter_setup_pci(void)
+{
struct device_node *np;
/* Lookup PCI host bridges */
for_each_compatible_node(np, "pci", "mpc10x-pci")
storcenter_add_bridge(np);
-
- printk(KERN_INFO "IOMEGA StorCenter\n");
}
/*
@@ -117,6 +120,7 @@ define_machine(storcenter){
.name = "IOMEGA StorCenter",
.probe = storcenter_probe,
.setup_arch = storcenter_setup_arch,
+ .discover_phbs = storcenter_setup_pci,
.init_IRQ = storcenter_init_IRQ,
.get_irq = mpic_get_irq,
.restart = storcenter_restart,
diff --git a/arch/powerpc/platforms/maple/pci.c b/arch/powerpc/platforms/maple/pci.c
index c86a66d5e998..a20b9576de22 100644
--- a/arch/powerpc/platforms/maple/pci.c
+++ b/arch/powerpc/platforms/maple/pci.c
@@ -536,6 +536,9 @@ static int __init maple_add_bridge(struct device_node *dev)
/* Check for legacy IOs */
isa_bridge_find_early(hose);
+ /* create pci_dn's for DT nodes under this PHB */
+ pci_devs_phb_init_dynamic(hose);
+
return 0;
}
diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c
index f7e66a2005b4..4e9ad5bf3efb 100644
--- a/arch/powerpc/platforms/maple/setup.c
+++ b/arch/powerpc/platforms/maple/setup.c
@@ -179,9 +179,6 @@ static void __init maple_setup_arch(void)
#ifdef CONFIG_SMP
smp_ops = &maple_smp_ops;
#endif
- /* Lookup PCI hosts */
- maple_pci_init();
-
maple_use_rtas_reboot_and_halt_if_present();
printk(KERN_DEBUG "Using native/NAP idle loop\n");
@@ -351,6 +348,7 @@ define_machine(maple) {
.name = "Maple",
.probe = maple_probe,
.setup_arch = maple_setup_arch,
+ .discover_phbs = maple_pci_init,
.init_IRQ = maple_init_IRQ,
.pci_irq_fixup = maple_pci_irq_fixup,
.pci_get_legacy_ide_irq = maple_pci_get_legacy_ide_irq,
diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c
index b612474f8f8e..376797eb7894 100644
--- a/arch/powerpc/platforms/pasemi/setup.c
+++ b/arch/powerpc/platforms/pasemi/setup.c
@@ -144,8 +144,6 @@ static void __init pas_setup_arch(void)
/* Setup SMP callback */
smp_ops = &pas_smp_ops;
#endif
- /* Lookup PCI hosts */
- pas_pci_init();
/* Remap SDC register for doing reset */
/* XXXOJN This should maybe come out of the device tree */
@@ -446,6 +444,7 @@ define_machine(pasemi) {
.name = "PA Semi PWRficient",
.probe = pas_probe,
.setup_arch = pas_setup_arch,
+ .discover_phbs = pas_pci_init,
.init_IRQ = pas_init_IRQ,
.get_irq = mpic_get_irq,
.restart = pas_restart,
diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c
index e35eaa9cf938..e9abe0f2e7f0 100644
--- a/arch/powerpc/platforms/powermac/pci.c
+++ b/arch/powerpc/platforms/powermac/pci.c
@@ -850,6 +850,10 @@ static int __init pmac_add_bridge(struct device_node *dev)
/* Fixup "bus-range" OF property */
fixup_bus_range(dev);
+ /* create pci_dn's for DT nodes under this PHB */
+ if (IS_ENABLED(CONFIG_PPC64))
+ pci_devs_phb_init_dynamic(hose);
+
return 0;
}
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
index 2e2cc0c75d87..86aee3f2483f 100644
--- a/arch/powerpc/platforms/powermac/setup.c
+++ b/arch/powerpc/platforms/powermac/setup.c
@@ -298,9 +298,6 @@ static void __init pmac_setup_arch(void)
of_node_put(ic);
}
- /* Lookup PCI hosts */
- pmac_pci_init();
-
#ifdef CONFIG_PPC32
ohare_init();
l2cr_init();
@@ -600,6 +597,7 @@ define_machine(powermac) {
.name = "PowerMac",
.probe = pmac_probe,
.setup_arch = pmac_setup_arch,
+ .discover_phbs = pmac_pci_init,
.show_cpuinfo = pmac_show_cpuinfo,
.init_IRQ = pmac_pic_init,
.get_irq = NULL, /* changed later */
diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c
index e6f461812856..999997d9e9a9 100644
--- a/arch/powerpc/platforms/powernv/idle.c
+++ b/arch/powerpc/platforms/powernv/idle.c
@@ -14,6 +14,7 @@
#include <asm/asm-prototypes.h>
#include <asm/firmware.h>
+#include <asm/interrupt.h>
#include <asm/machdep.h>
#include <asm/opal.h>
#include <asm/cputhreads.h>
diff --git a/arch/powerpc/platforms/powernv/memtrace.c b/arch/powerpc/platforms/powernv/memtrace.c
index 5fc9408bb0b3..019669eb21d2 100644
--- a/arch/powerpc/platforms/powernv/memtrace.c
+++ b/arch/powerpc/platforms/powernv/memtrace.c
@@ -19,6 +19,7 @@
#include <linux/numa.h>
#include <asm/machdep.h>
#include <asm/debugfs.h>
+#include <asm/cacheflush.h>
/* This enables us to keep track of the memory removed from each node. */
struct memtrace_entry {
@@ -51,6 +52,27 @@ static const struct file_operations memtrace_fops = {
.open = simple_open,
};
+#define FLUSH_CHUNK_SIZE SZ_1G
+/**
+ * flush_dcache_range_chunked(): Write any modified data cache blocks out to
+ * memory and invalidate them, in chunks of up to FLUSH_CHUNK_SIZE
+ * Does not invalidate the corresponding instruction cache blocks.
+ *
+ * @start: the start address
+ * @stop: the stop address (exclusive)
+ * @chunk: the max size of the chunks
+ */
+static void flush_dcache_range_chunked(unsigned long start, unsigned long stop,
+ unsigned long chunk)
+{
+ unsigned long i;
+
+ for (i = start; i < stop; i += chunk) {
+ flush_dcache_range(i, min(stop, i + chunk));
+ cond_resched();
+ }
+}
+
static void memtrace_clear_range(unsigned long start_pfn,
unsigned long nr_pages)
{
@@ -62,6 +84,13 @@ static void memtrace_clear_range(unsigned long start_pfn,
cond_resched();
clear_page(__va(PFN_PHYS(pfn)));
}
+ /*
+ * Before we go ahead and use this range as cache inhibited range
+ * flush the cache.
+ */
+ flush_dcache_range_chunked(PFN_PHYS(start_pfn),
+ PFN_PHYS(start_pfn + nr_pages),
+ FLUSH_CHUNK_SIZE);
}
static u64 memtrace_alloc_node(u32 nid, u64 size)
diff --git a/arch/powerpc/platforms/powernv/opal-core.c b/arch/powerpc/platforms/powernv/opal-core.c
index 23571f0b555a..0d9ba70f7251 100644
--- a/arch/powerpc/platforms/powernv/opal-core.c
+++ b/arch/powerpc/platforms/powernv/opal-core.c
@@ -119,8 +119,8 @@ static void fill_prstatus(struct elf_prstatus *prstatus, int pir,
* As a PIR value could also be '0', add an offset of '100'
* to every PIR to avoid misinterpretations in GDB.
*/
- prstatus->pr_pid = cpu_to_be32(100 + pir);
- prstatus->pr_ppid = cpu_to_be32(1);
+ prstatus->common.pr_pid = cpu_to_be32(100 + pir);
+ prstatus->common.pr_ppid = cpu_to_be32(1);
/*
* Indicate SIGUSR1 for crash initiated from kernel.
@@ -130,7 +130,7 @@ static void fill_prstatus(struct elf_prstatus *prstatus, int pir,
short sig;
sig = kernel_initiated ? SIGUSR1 : SIGTERM;
- prstatus->pr_cursig = cpu_to_be16(sig);
+ prstatus->common.pr_cursig = cpu_to_be16(sig);
}
}
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index c61c3b62c8c6..303d7c775740 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -624,7 +624,7 @@ static int opal_recover_mce(struct pt_regs *regs,
*/
recovered = 0;
} else {
- die("Machine check", regs, SIGBUS);
+ die_mce("Machine check", regs, SIGBUS);
recovered = 1;
}
}
diff --git a/arch/powerpc/platforms/powernv/pci-cxl.c b/arch/powerpc/platforms/powernv/pci-cxl.c
index 8c739c94ed28..53172862d23b 100644
--- a/arch/powerpc/platforms/powernv/pci-cxl.c
+++ b/arch/powerpc/platforms/powernv/pci-cxl.c
@@ -150,25 +150,3 @@ int pnv_cxl_ioda_msi_setup(struct pci_dev *dev, unsigned int hwirq,
return 0;
}
EXPORT_SYMBOL(pnv_cxl_ioda_msi_setup);
-
-#if IS_MODULE(CONFIG_CXL)
-static inline int get_cxl_module(void)
-{
- struct module *cxl_module;
-
- mutex_lock(&module_mutex);
-
- cxl_module = find_module("cxl");
- if (cxl_module)
- __module_get(cxl_module);
-
- mutex_unlock(&module_mutex);
-
- if (!cxl_module)
- return -ENODEV;
-
- return 0;
-}
-#else
-static inline int get_cxl_module(void) { return 0; }
-#endif
diff --git a/arch/powerpc/platforms/powernv/pci-ioda-tce.c b/arch/powerpc/platforms/powernv/pci-ioda-tce.c
index 5218f5da2737..30551bbd7988 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda-tce.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda-tce.c
@@ -380,6 +380,8 @@ void pnv_pci_unlink_table_and_group(struct iommu_table *tbl,
/* Remove link to a group from table's list of attached groups */
found = false;
+
+ rcu_read_lock();
list_for_each_entry_rcu(tgl, &tbl->it_group_list, next) {
if (tgl->table_group == table_group) {
list_del_rcu(&tgl->next);
@@ -388,6 +390,8 @@ void pnv_pci_unlink_table_and_group(struct iommu_table *tbl,
break;
}
}
+ rcu_read_unlock();
+
if (WARN_ON(!found))
return;
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index c4f72cdc9b51..f0f901683a2f 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -2402,9 +2402,6 @@ static void pnv_pci_ioda_create_dbgfs(void)
list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
phb = hose->private_data;
- /* Notify initialization of PHB done */
- phb->initialized = 1;
-
sprintf(name, "PCI%04x", hose->global_number);
phb->dbgfs = debugfs_create_dir(name, powerpc_debugfs_root);
@@ -2601,17 +2598,8 @@ static resource_size_t pnv_pci_default_alignment(void)
*/
static bool pnv_pci_enable_device_hook(struct pci_dev *dev)
{
- struct pnv_phb *phb = pci_bus_to_pnvhb(dev->bus);
struct pci_dn *pdn;
- /* The function is probably called while the PEs have
- * not be created yet. For example, resource reassignment
- * during PCI probe period. We just skip the check if
- * PEs isn't ready.
- */
- if (!phb->initialized)
- return true;
-
pdn = pci_get_pdn(dev);
if (!pdn || pdn->pe_number == IODA_INVALID_PE) {
pci_err(dev, "pci_enable_device() blocked, no PE assigned.\n");
@@ -2623,14 +2611,9 @@ static bool pnv_pci_enable_device_hook(struct pci_dev *dev)
static bool pnv_ocapi_enable_device_hook(struct pci_dev *dev)
{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
- struct pnv_phb *phb = hose->private_data;
struct pci_dn *pdn;
struct pnv_ioda_pe *pe;
- if (!phb->initialized)
- return true;
-
pdn = pci_get_pdn(dev);
if (!pdn)
return false;
@@ -2938,7 +2921,7 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
phb_id = be64_to_cpup(prop64);
pr_debug(" PHB-ID : 0x%016llx\n", phb_id);
- phb = memblock_alloc(sizeof(*phb), SMP_CACHE_BYTES);
+ phb = kzalloc(sizeof(*phb), GFP_KERNEL);
if (!phb)
panic("%s: Failed to allocate %zu bytes\n", __func__,
sizeof(*phb));
@@ -2987,7 +2970,7 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
else
phb->diag_data_size = PNV_PCI_DIAG_BUF_SIZE;
- phb->diag_data = memblock_alloc(phb->diag_data_size, SMP_CACHE_BYTES);
+ phb->diag_data = kzalloc(phb->diag_data_size, GFP_KERNEL);
if (!phb->diag_data)
panic("%s: Failed to allocate %u bytes\n", __func__,
phb->diag_data_size);
@@ -3049,9 +3032,10 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
}
pemap_off = size;
size += phb->ioda.total_pe_num * sizeof(struct pnv_ioda_pe);
- aux = memblock_alloc(size, SMP_CACHE_BYTES);
+ aux = kzalloc(size, GFP_KERNEL);
if (!aux)
panic("%s: Failed to allocate %lu bytes\n", __func__, size);
+
phb->ioda.pe_alloc = aux;
phb->ioda.m64_segmap = aux + m64map_off;
phb->ioda.m32_segmap = aux + m32map_off;
@@ -3178,6 +3162,9 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
/* Remove M64 resource if we can't configure it successfully */
if (!phb->init_m64 || phb->init_m64(phb))
hose->mem_resources[1].flags = 0;
+
+ /* create pci_dn's for DT nodes under this PHB */
+ pci_devs_phb_init_dynamic(hose);
}
void __init pnv_pci_init_ioda2_phb(struct device_node *np)
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index 739a0b3b72e1..36d22920f5a3 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -119,7 +119,6 @@ struct pnv_phb {
int flags;
void __iomem *regs;
u64 regs_phys;
- int initialized;
spinlock_t lock;
#ifdef CONFIG_DEBUG_FS
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index 4426a109ec2f..aadf932c4e61 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -180,9 +180,6 @@ static void __init pnv_setup_arch(void)
/* Initialize SMP */
pnv_smp_init();
- /* Setup PCI */
- pnv_pci_init();
-
/* Setup RTC and NVRAM callbacks */
if (firmware_has_feature(FW_FEATURE_OPAL))
opal_nvram_init();
@@ -547,6 +544,7 @@ define_machine(powernv) {
.init_IRQ = pnv_init_IRQ,
.show_cpuinfo = pnv_show_cpuinfo,
.get_proc_freq = pnv_get_proc_freq,
+ .discover_phbs = pnv_pci_init,
.progress = pnv_progress,
.machine_shutdown = pnv_shutdown,
.power_save = NULL,
diff --git a/arch/powerpc/platforms/powernv/subcore.h b/arch/powerpc/platforms/powernv/subcore.h
index c8f574d1c04a..77feee8436d4 100644
--- a/arch/powerpc/platforms/powernv/subcore.h
+++ b/arch/powerpc/platforms/powernv/subcore.h
@@ -15,7 +15,7 @@
void split_core_secondary_loop(u8 *state);
extern void update_subcore_sibling_mask(void);
#else
-static inline void update_subcore_sibling_mask(void) { };
+static inline void update_subcore_sibling_mask(void) { }
#endif /* CONFIG_SMP */
#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/platforms/powernv/vas.c b/arch/powerpc/platforms/powernv/vas.c
index 598e4cd563fb..b65256a63e87 100644
--- a/arch/powerpc/platforms/powernv/vas.c
+++ b/arch/powerpc/platforms/powernv/vas.c
@@ -28,12 +28,10 @@ static DEFINE_PER_CPU(int, cpu_vas_id);
static int vas_irq_fault_window_setup(struct vas_instance *vinst)
{
- char devname[64];
int rc = 0;
- snprintf(devname, sizeof(devname), "vas-%d", vinst->vas_id);
rc = request_threaded_irq(vinst->virq, vas_fault_handler,
- vas_fault_thread_fn, 0, devname, vinst);
+ vas_fault_thread_fn, 0, vinst->name, vinst);
if (rc) {
pr_err("VAS[%d]: Request IRQ(%d) failed with %d\n",
@@ -80,6 +78,12 @@ static int init_vas_instance(struct platform_device *pdev)
if (!vinst)
return -ENOMEM;
+ vinst->name = kasprintf(GFP_KERNEL, "vas-%d", vasid);
+ if (!vinst->name) {
+ kfree(vinst);
+ return -ENOMEM;
+ }
+
INIT_LIST_HEAD(&vinst->node);
ida_init(&vinst->ida);
mutex_init(&vinst->mutex);
@@ -162,6 +166,7 @@ static int init_vas_instance(struct platform_device *pdev)
return 0;
free_vinst:
+ kfree(vinst->name);
kfree(vinst);
return -ENODEV;
diff --git a/arch/powerpc/platforms/powernv/vas.h b/arch/powerpc/platforms/powernv/vas.h
index 70f793e8f6cc..c7db3190baca 100644
--- a/arch/powerpc/platforms/powernv/vas.h
+++ b/arch/powerpc/platforms/powernv/vas.h
@@ -340,6 +340,7 @@ struct vas_instance {
struct vas_window *rxwin[VAS_COP_TYPE_MAX];
struct vas_window *windows[VAS_WINDOWS_PER_CHIP];
+ char *name;
char *dbgname;
struct dentry *dbgdir;
};
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index 16e86ba8aa20..233503fcf8f0 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -127,7 +127,6 @@ void dlpar_free_cc_nodes(struct device_node *dn)
#define NEXT_PROPERTY 3
#define PREV_PARENT 4
#define MORE_MEMORY 5
-#define CALL_AGAIN -2
#define ERR_CFG_USE -9003
struct device_node *dlpar_configure_connector(__be32 drc_index,
@@ -168,6 +167,9 @@ struct device_node *dlpar_configure_connector(__be32 drc_index,
spin_unlock(&rtas_data_buf_lock);
+ if (rtas_busy_delay(rc))
+ continue;
+
switch (rc) {
case COMPLETE:
break;
@@ -216,9 +218,6 @@ struct device_node *dlpar_configure_connector(__be32 drc_index,
last_dn = last_dn->parent;
break;
- case CALL_AGAIN:
- break;
-
case MORE_MEMORY:
case ERR_CFG_USE:
default:
@@ -521,11 +520,8 @@ static ssize_t dlpar_store(struct class *class, struct class_attribute *attr,
int rc;
args = argbuf = kstrdup(buf, GFP_KERNEL);
- if (!argbuf) {
- pr_info("Could not allocate resources for DLPAR operation\n");
- kfree(argbuf);
+ if (!argbuf)
return -ENOMEM;
- }
/*
* Parse out the request from the user, this will be in the form:
diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
index cf024fa37bda..bc15200852b7 100644
--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
+++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
@@ -43,7 +43,7 @@ static int ibm_get_config_addr_info;
static int ibm_get_config_addr_info2;
static int ibm_configure_pe;
-void pseries_pcibios_bus_add_device(struct pci_dev *pdev)
+static void pseries_pcibios_bus_add_device(struct pci_dev *pdev)
{
struct pci_dn *pdn = pci_get_pdn(pdev);
@@ -694,8 +694,7 @@ static int pseries_eeh_write_config(struct eeh_dev *edev, int where, int size, u
}
#ifdef CONFIG_PCI_IOV
-int pseries_send_allow_unfreeze(struct pci_dn *pdn,
- u16 *vf_pe_array, int cur_vfs)
+static int pseries_send_allow_unfreeze(struct pci_dn *pdn, u16 *vf_pe_array, int cur_vfs)
{
int rc;
int ibm_allow_unfreeze = rtas_token("ibm,open-sriov-allow-unfreeze");
diff --git a/arch/powerpc/platforms/pseries/ibmebus.c b/arch/powerpc/platforms/pseries/ibmebus.c
index 8c6e509f6967..a15ab33646b3 100644
--- a/arch/powerpc/platforms/pseries/ibmebus.c
+++ b/arch/powerpc/platforms/pseries/ibmebus.c
@@ -355,12 +355,12 @@ static int ibmebus_bus_device_probe(struct device *dev)
if (!drv->probe)
return error;
- of_dev_get(of_dev);
+ get_device(dev);
if (of_driver_match_device(dev, dev->driver))
error = drv->probe(of_dev);
if (error)
- of_dev_put(of_dev);
+ put_device(dev);
return error;
}
diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c
index 72a4d4167849..1bffbd1c9a94 100644
--- a/arch/powerpc/platforms/pseries/pci.c
+++ b/arch/powerpc/platforms/pseries/pci.c
@@ -55,9 +55,8 @@ struct pe_map_bar_entry {
__be32 reserved; /* Reserved Space */
};
-int pseries_send_map_pe(struct pci_dev *pdev,
- u16 num_vfs,
- struct pe_map_bar_entry *vf_pe_array)
+static int pseries_send_map_pe(struct pci_dev *pdev, u16 num_vfs,
+ struct pe_map_bar_entry *vf_pe_array)
{
struct pci_dn *pdn;
int rc;
@@ -88,7 +87,7 @@ int pseries_send_map_pe(struct pci_dev *pdev,
return rc;
}
-void pseries_set_pe_num(struct pci_dev *pdev, u16 vf_index, __be16 pe_num)
+static void pseries_set_pe_num(struct pci_dev *pdev, u16 vf_index, __be16 pe_num)
{
struct pci_dn *pdn;
@@ -102,7 +101,7 @@ void pseries_set_pe_num(struct pci_dev *pdev, u16 vf_index, __be16 pe_num)
pdn->pe_num_map[vf_index]);
}
-int pseries_associate_pes(struct pci_dev *pdev, u16 num_vfs)
+static int pseries_associate_pes(struct pci_dev *pdev, u16 num_vfs)
{
struct pci_dn *pdn;
int i, rc, vf_index;
@@ -146,7 +145,7 @@ int pseries_associate_pes(struct pci_dev *pdev, u16 num_vfs)
return rc;
}
-int pseries_pci_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
+static int pseries_pci_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
{
struct pci_dn *pdn;
int rc;
@@ -189,14 +188,14 @@ int pseries_pci_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
return rc;
}
-int pseries_pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
+static int pseries_pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
{
/* Allocate PCI data */
add_sriov_vf_pdns(pdev);
return pseries_pci_sriov_enable(pdev, num_vfs);
}
-int pseries_pcibios_sriov_disable(struct pci_dev *pdev)
+static int pseries_pcibios_sriov_disable(struct pci_dev *pdev)
{
struct pci_dn *pdn;
diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h
index 593840847cd3..4fe48c04c6c2 100644
--- a/arch/powerpc/platforms/pseries/pseries.h
+++ b/arch/powerpc/platforms/pseries/pseries.h
@@ -33,7 +33,7 @@ int smp_query_cpu_stopped(unsigned int pcpu);
#define QCSS_HARDWARE_ERROR -1
#define QCSS_HARDWARE_BUSY -2
#else
-static inline void smp_init_pseries(void) { };
+static inline void smp_init_pseries(void) { }
#endif
extern void pseries_kexec_cpu_down(int crash_shutdown, int secondary);
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index 149cec2212e6..f8b390a9d9fb 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -122,7 +122,7 @@ static inline u8 rtas_mc_error_sub_type(const struct pseries_mc_errorlog *mlog)
* devices or systems (e.g. hugepages) that have not been initialized at the
* subsys stage.
*/
-int __init init_ras_hotplug_IRQ(void)
+static int __init init_ras_hotplug_IRQ(void)
{
struct device_node *np;
@@ -315,12 +315,10 @@ static irqreturn_t ras_hotplug_interrupt(int irq, void *dev_id)
/* Handle environmental and power warning (EPOW) interrupts. */
static irqreturn_t ras_epow_interrupt(int irq, void *dev_id)
{
- int status;
int state;
int critical;
- status = rtas_get_sensor_fast(EPOW_SENSOR_TOKEN, EPOW_SENSOR_INDEX,
- &state);
+ rtas_get_sensor_fast(EPOW_SENSOR_TOKEN, EPOW_SENSOR_INDEX, &state);
if (state > 3)
critical = 1; /* Time Critical */
@@ -329,12 +327,9 @@ static irqreturn_t ras_epow_interrupt(int irq, void *dev_id)
spin_lock(&ras_log_buf_lock);
- status = rtas_call(ras_check_exception_token, 6, 1, NULL,
- RTAS_VECTOR_EXTERNAL_INTERRUPT,
- virq_to_hw(irq),
- RTAS_EPOW_WARNING,
- critical, __pa(&ras_log_buf),
- rtas_get_error_log_max());
+ rtas_call(ras_check_exception_token, 6, 1, NULL, RTAS_VECTOR_EXTERNAL_INTERRUPT,
+ virq_to_hw(irq), RTAS_EPOW_WARNING, critical, __pa(&ras_log_buf),
+ rtas_get_error_log_max());
log_error(ras_log_buf, ERR_TYPE_RTAS_LOG, 0);
@@ -722,6 +717,7 @@ static int mce_handle_error(struct pt_regs *regs, struct rtas_error_log *errp)
struct pseries_errorlog *pseries_log;
struct pseries_mc_errorlog *mce_log = NULL;
int disposition = rtas_error_disposition(errp);
+ unsigned long msr;
u8 error_type;
if (!rtas_error_extended(errp))
@@ -747,9 +743,21 @@ static int mce_handle_error(struct pt_regs *regs, struct rtas_error_log *errp)
* SLB multihit is done by now.
*/
out:
- mtmsr(mfmsr() | MSR_IR | MSR_DR);
+ msr = mfmsr();
+ mtmsr(msr | MSR_IR | MSR_DR);
+
disposition = mce_handle_err_virtmode(regs, errp, mce_log,
disposition);
+
+ /*
+ * Queue irq work to log this rtas event later.
+ * irq_work_queue uses per-cpu variables, so do this in virt
+ * mode as well.
+ */
+ irq_work_queue(&mce_errlog_process_work);
+
+ mtmsr(msr);
+
return disposition;
}
@@ -813,7 +821,7 @@ static int recover_mce(struct pt_regs *regs, struct machine_check_event *evt)
*/
recovered = 0;
} else {
- die("Machine check", regs, SIGBUS);
+ die_mce("Machine check", regs, SIGBUS);
recovered = 1;
}
}
@@ -865,10 +873,8 @@ long pseries_machine_check_realmode(struct pt_regs *regs)
* virtual mode.
*/
disposition = mce_handle_error(regs, errp);
- fwnmi_release_errinfo();
- /* Queue irq work to log this rtas event later. */
- irq_work_queue(&mce_errlog_process_work);
+ fwnmi_release_errinfo();
if (disposition == RTAS_DISP_FULLY_RECOVERED)
return 1;
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 090c13f6c881..46e1540abc22 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -463,7 +463,7 @@ void pseries_little_endian_exceptions(void)
}
#endif
-static void __init find_and_init_phbs(void)
+static void __init pSeries_discover_phbs(void)
{
struct device_node *node;
struct pci_controller *phb;
@@ -481,6 +481,9 @@ static void __init find_and_init_phbs(void)
pci_process_bridge_OF_ranges(phb, node, 0);
isa_bridge_find_early(phb);
phb->controller_ops = pseries_pci_controller_ops;
+
+ /* create pci_dn's for DT nodes under this PHB */
+ pci_devs_phb_init_dynamic(phb);
}
of_node_put(root);
@@ -607,8 +610,8 @@ enum get_iov_fw_value_index {
WDW_SIZE = 3 /* Get Window Size */
};
-resource_size_t pseries_get_iov_fw_value(struct pci_dev *dev, int resno,
- enum get_iov_fw_value_index value)
+static resource_size_t pseries_get_iov_fw_value(struct pci_dev *dev, int resno,
+ enum get_iov_fw_value_index value)
{
const int *indexes;
struct device_node *dn = pci_device_to_OF_node(dev);
@@ -643,7 +646,7 @@ resource_size_t pseries_get_iov_fw_value(struct pci_dev *dev, int resno,
return ret;
}
-void of_pci_set_vf_bar_size(struct pci_dev *dev, const int *indexes)
+static void of_pci_set_vf_bar_size(struct pci_dev *dev, const int *indexes)
{
struct resource *res;
resource_size_t base, size;
@@ -665,7 +668,7 @@ void of_pci_set_vf_bar_size(struct pci_dev *dev, const int *indexes)
}
}
-void of_pci_parse_iov_addrs(struct pci_dev *dev, const int *indexes)
+static void of_pci_parse_iov_addrs(struct pci_dev *dev, const int *indexes)
{
struct resource *res, *root, *conflict;
resource_size_t base, size;
@@ -786,7 +789,6 @@ static void __init pSeries_setup_arch(void)
/* Find and initialize PCI host bridges */
init_pci_config_tokens();
- find_and_init_phbs();
of_reconfig_notifier_register(&pci_dn_reconfig_nb);
pSeries_nvram_init();
@@ -1050,6 +1052,7 @@ define_machine(pseries) {
.init_IRQ = pseries_init_irq,
.show_cpuinfo = pSeries_show_cpuinfo,
.log_error = pSeries_log_error,
+ .discover_phbs = pSeries_discover_phbs,
.pcibios_fixup = pSeries_final_fixup,
.restart = rtas_restart,
.halt = rtas_halt,
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index dcd817ca2edf..3fe37495f63d 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -1383,7 +1383,6 @@ static long check_bp_loc(unsigned long addr)
return 1;
}
-#ifndef CONFIG_PPC_8xx
static int find_free_data_bpt(void)
{
int i;
@@ -1395,7 +1394,6 @@ static int find_free_data_bpt(void)
printf("Couldn't find free breakpoint register\n");
return -1;
}
-#endif
static void print_data_bpts(void)
{
@@ -1435,7 +1433,6 @@ bpt_cmds(void)
cmd = inchar();
switch (cmd) {
-#ifndef CONFIG_PPC_8xx
static const char badaddr[] = "Only kernel addresses are permitted for breakpoints\n";
int mode;
case 'd': /* bd - hardware data breakpoint */
@@ -1497,7 +1494,6 @@ bpt_cmds(void)
force_enable_xmon();
}
break;
-#endif
case 'c':
if (!scanhex(&a)) {
@@ -3723,7 +3719,7 @@ void dump_segments(void)
printf("sr0-15 =");
for (i = 0; i < 16; ++i)
- printf(" %x", mfsrin(i << 28));
+ printf(" %x", mfsr(i << 28));
printf("\n");
}
#endif
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index e9e2c1f0a690..85d626b8ce5e 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -57,6 +57,7 @@ config RISCV
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_JUMP_LABEL_RELATIVE
select HAVE_ARCH_KASAN if MMU && 64BIT
+ select HAVE_ARCH_KASAN_VMALLOC if MMU && 64BIT
select HAVE_ARCH_KGDB
select HAVE_ARCH_KGDB_QXFER_PKT
select HAVE_ARCH_MMAP_RND_BITS if MMU
@@ -67,14 +68,19 @@ config RISCV
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_CONTIGUOUS if MMU
select HAVE_EBPF_JIT if MMU
+ select HAVE_FUNCTION_ERROR_INJECTION
select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_GCC_PLUGINS
select HAVE_GENERIC_VDSO if MMU && 64BIT
select HAVE_IRQ_TIME_ACCOUNTING
+ select HAVE_KPROBES
+ select HAVE_KPROBES_ON_FTRACE
+ select HAVE_KRETPROBES
select HAVE_PCI
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
+ select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS
select IRQ_DOMAIN
@@ -143,7 +149,7 @@ config PAGE_OFFSET
default 0xffffffe000000000 if 64BIT && MAXPHYSMEM_128GB
config ARCH_FLATMEM_ENABLE
- def_bool y
+ def_bool !NUMA
config ARCH_SPARSEMEM_ENABLE
def_bool y
@@ -156,6 +162,9 @@ config ARCH_SELECT_MEMORY_MODEL
config ARCH_WANT_GENERAL_HUGETLB
def_bool y
+config ARCH_SUPPORTS_UPROBES
+ def_bool y
+
config SYS_SUPPORTS_HUGETLBFS
depends on MMU
def_bool y
@@ -252,8 +261,10 @@ choice
default MAXPHYSMEM_128GB if 64BIT && CMODEL_MEDANY
config MAXPHYSMEM_1GB
+ depends on 32BIT
bool "1GiB"
config MAXPHYSMEM_2GB
+ depends on 64BIT && CMODEL_MEDLOW
bool "2GiB"
config MAXPHYSMEM_128GB
depends on 64BIT && CMODEL_MEDANY
@@ -300,6 +311,36 @@ config TUNE_GENERIC
endchoice
+# Common NUMA Features
+config NUMA
+ bool "NUMA Memory Allocation and Scheduler Support"
+ depends on SMP
+ select GENERIC_ARCH_NUMA
+ select OF_NUMA
+ select ARCH_SUPPORTS_NUMA_BALANCING
+ help
+ Enable NUMA (Non-Uniform Memory Access) support.
+
+ The kernel will try to allocate memory used by a CPU on the
+ local memory of the CPU and add some more NUMA awareness to the kernel.
+
+config NODES_SHIFT
+ int "Maximum NUMA Nodes (as a power of 2)"
+ range 1 10
+ default "2"
+ depends on NEED_MULTIPLE_NODES
+ help
+ Specify the maximum number of NUMA Nodes available on the target
+ system. Increases memory reserved to accommodate various tables.
+
+config USE_PERCPU_NUMA_NODE_ID
+ def_bool y
+ depends on NUMA
+
+config NEED_PER_CPU_EMBED_FIRST_CHUNK
+ def_bool y
+ depends on NUMA
+
config RISCV_ISA_C
bool "Emit compressed instructions when building Linux"
default y
@@ -414,11 +455,17 @@ config EFI
allow the kernel to be booted as an EFI application. This
is only useful on systems that have UEFI firmware.
+config CC_HAVE_STACKPROTECTOR_TLS
+ def_bool $(cc-option,-mstack-protector-guard=tls -mstack-protector-guard-reg=tp -mstack-protector-guard-offset=0)
+
+config STACKPROTECTOR_PER_TASK
+ def_bool y
+ depends on STACKPROTECTOR && CC_HAVE_STACKPROTECTOR_TLS
+
endmenu
config BUILTIN_DTB
def_bool n
- depends on RISCV_M_MODE
depends on OF
menu "Power management options"
diff --git a/arch/riscv/Kconfig.socs b/arch/riscv/Kconfig.socs
index 3284d5c291be..7efcece8896c 100644
--- a/arch/riscv/Kconfig.socs
+++ b/arch/riscv/Kconfig.socs
@@ -22,30 +22,41 @@ config SOC_VIRT
help
This enables support for QEMU Virt Machine.
-config SOC_KENDRYTE
- bool "Kendryte K210 SoC"
+config SOC_CANAAN
+ bool "Canaan Kendryte K210 SoC"
depends on !MMU
select CLINT_TIMER if RISCV_M_MODE
select SERIAL_SIFIVE if TTY
select SERIAL_SIFIVE_CONSOLE if TTY
select SIFIVE_PLIC
+ select ARCH_HAS_RESET_CONTROLLER
+ select PINCTRL
help
- This enables support for Kendryte K210 SoC platform hardware.
+ This enables support for Canaan Kendryte K210 SoC platform hardware.
-config SOC_KENDRYTE_K210_DTB
- def_bool y
- depends on SOC_KENDRYTE_K210_DTB_BUILTIN
+if SOC_CANAAN
-config SOC_KENDRYTE_K210_DTB_BUILTIN
- bool "Builtin device tree for the Kendryte K210"
- depends on SOC_KENDRYTE
+config SOC_CANAAN_K210_DTB_BUILTIN
+ bool "Builtin device tree for the Canaan Kendryte K210"
+ depends on SOC_CANAAN
default y
select OF
select BUILTIN_DTB
- select SOC_KENDRYTE_K210_DTB
help
- Builds a device tree for the Kendryte K210 into the Linux image.
+ Build a device tree for the Kendryte K210 into the Linux image.
This option should be selected if no bootloader is being used.
If unsure, say Y.
+config SOC_CANAAN_K210_DTB_SOURCE
+ string "Source file for the Canaan Kendryte K210 builtin DTB"
+ depends on SOC_CANAAN
+ depends on SOC_CANAAN_K210_DTB_BUILTIN
+ default "k210_generic"
+ help
+ Base name (without suffix, relative to arch/riscv/boot/dts/canaan)
+ for the DTS file that will be used to produce the DTB linked into the
+ kernel.
+
+endif
+
endmenu
diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
index 8c29e553ef7f..1368d943f1f3 100644
--- a/arch/riscv/Makefile
+++ b/arch/riscv/Makefile
@@ -12,6 +12,8 @@ OBJCOPYFLAGS := -O binary
LDFLAGS_vmlinux :=
ifeq ($(CONFIG_DYNAMIC_FTRACE),y)
LDFLAGS_vmlinux := --no-relax
+ KBUILD_CPPFLAGS += -DCC_USING_PATCHABLE_FUNCTION_ENTRY
+ CC_FLAGS_FTRACE := -fpatchable-function-entry=8
endif
ifeq ($(CONFIG_64BIT)$(CONFIG_CMODEL_MEDLOW),yy)
@@ -65,6 +67,16 @@ KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-relax)
# architectures. It's faster to have GCC emit only aligned accesses.
KBUILD_CFLAGS += $(call cc-option,-mstrict-align)
+ifeq ($(CONFIG_STACKPROTECTOR_PER_TASK),y)
+prepare: stack_protector_prepare
+stack_protector_prepare: prepare0
+ $(eval KBUILD_CFLAGS += -mstack-protector-guard=tls \
+ -mstack-protector-guard-reg=tp \
+ -mstack-protector-guard-offset=$(shell \
+ awk '{if ($$2 == "TSK_STACK_CANARY") print $$3;}' \
+ include/generated/asm-offsets.h))
+endif
+
# arch specific predefines for sparse
CHECKFLAGS += -D__riscv -D__riscv_xlen=$(BITS)
@@ -83,7 +95,7 @@ PHONY += vdso_install
vdso_install:
$(Q)$(MAKE) $(build)=arch/riscv/kernel/vdso $@
-ifeq ($(CONFIG_RISCV_M_MODE)$(CONFIG_SOC_KENDRYTE),yy)
+ifeq ($(CONFIG_RISCV_M_MODE)$(CONFIG_SOC_CANAAN),yy)
KBUILD_IMAGE := $(boot)/loader.bin
else
KBUILD_IMAGE := $(boot)/Image.gz
diff --git a/arch/riscv/boot/dts/Makefile b/arch/riscv/boot/dts/Makefile
index ca1f8cbd78c0..7ffd502e3e7b 100644
--- a/arch/riscv/boot/dts/Makefile
+++ b/arch/riscv/boot/dts/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
subdir-y += sifive
-subdir-y += kendryte
+subdir-$(CONFIG_SOC_CANAAN_K210_DTB_BUILTIN) += canaan
obj-$(CONFIG_BUILTIN_DTB) := $(addsuffix /, $(subdir-y))
diff --git a/arch/riscv/boot/dts/canaan/Makefile b/arch/riscv/boot/dts/canaan/Makefile
new file mode 100644
index 000000000000..9ee7156c0c31
--- /dev/null
+++ b/arch/riscv/boot/dts/canaan/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+ifneq ($(CONFIG_SOC_CANAAN_K210_DTB_SOURCE),"")
+dtb-y += $(strip $(shell echo $(CONFIG_SOC_CANAAN_K210_DTB_SOURCE))).dtb
+obj-$(CONFIG_SOC_CANAAN_K210_DTB_BUILTIN) += $(addsuffix .o, $(dtb-y))
+endif
diff --git a/arch/riscv/boot/dts/canaan/canaan_kd233.dts b/arch/riscv/boot/dts/canaan/canaan_kd233.dts
new file mode 100644
index 000000000000..039b92abf046
--- /dev/null
+++ b/arch/riscv/boot/dts/canaan/canaan_kd233.dts
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019-20 Sean Anderson <seanga2@gmail.com>
+ * Copyright (C) 2020 Western Digital Corporation or its affiliates.
+ */
+
+/dts-v1/;
+
+#include "k210.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+
+/ {
+ model = "Kendryte KD233";
+ compatible = "canaan,kendryte-kd233", "canaan,kendryte-k210";
+
+ chosen {
+ bootargs = "earlycon console=ttySIF0";
+ stdout-path = "serial0:115200n8";
+ };
+
+ gpio-leds {
+ compatible = "gpio-leds";
+
+ led0 {
+ gpios = <&gpio0 8 GPIO_ACTIVE_LOW>;
+ };
+
+ led1 {
+ gpios = <&gpio0 9 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ key0 {
+ label = "KEY0";
+ linux,code = <BTN_0>;
+ gpios = <&gpio0 10 GPIO_ACTIVE_LOW>;
+ };
+ };
+};
+
+&fpioa {
+ pinctrl-0 = <&jtag_pinctrl>;
+ pinctrl-names = "default";
+ status = "okay";
+
+ jtag_pinctrl: jtag-pinmux {
+ pinmux = <K210_FPIOA(0, K210_PCF_JTAG_TCLK)>,
+ <K210_FPIOA(1, K210_PCF_JTAG_TDI)>,
+ <K210_FPIOA(2, K210_PCF_JTAG_TMS)>,
+ <K210_FPIOA(3, K210_PCF_JTAG_TDO)>;
+ };
+
+ uarths_pinctrl: uarths-pinmux {
+ pinmux = <K210_FPIOA(4, K210_PCF_UARTHS_RX)>,
+ <K210_FPIOA(5, K210_PCF_UARTHS_TX)>;
+ };
+
+ spi0_pinctrl: spi0-pinmux {
+ pinmux = <K210_FPIOA(6, K210_PCF_GPIOHS20)>, /* cs */
+ <K210_FPIOA(7, K210_PCF_SPI0_SCLK)>, /* wr */
+ <K210_FPIOA(8, K210_PCF_GPIOHS21)>; /* dc */
+ };
+
+ dvp_pinctrl: dvp-pinmux {
+ pinmux = <K210_FPIOA(9, K210_PCF_SCCB_SCLK)>,
+ <K210_FPIOA(10, K210_PCF_SCCB_SDA)>,
+ <K210_FPIOA(11, K210_PCF_DVP_RST)>,
+ <K210_FPIOA(12, K210_PCF_DVP_VSYNC)>,
+ <K210_FPIOA(13, K210_PCF_DVP_PWDN)>,
+ <K210_FPIOA(14, K210_PCF_DVP_XCLK)>,
+ <K210_FPIOA(15, K210_PCF_DVP_PCLK)>,
+ <K210_FPIOA(17, K210_PCF_DVP_HSYNC)>;
+ };
+
+ gpiohs_pinctrl: gpiohs-pinmux {
+ pinmux = <K210_FPIOA(16, K210_PCF_GPIOHS0)>,
+ <K210_FPIOA(20, K210_PCF_GPIOHS4)>, /* Rot. dip sw line 8 */
+ <K210_FPIOA(21, K210_PCF_GPIOHS5)>, /* Rot. dip sw line 4 */
+ <K210_FPIOA(22, K210_PCF_GPIOHS6)>, /* Rot. dip sw line 2 */
+ <K210_FPIOA(23, K210_PCF_GPIOHS7)>, /* Rot. dip sw line 1 */
+ <K210_FPIOA(24, K210_PCF_GPIOHS8)>,
+ <K210_FPIOA(25, K210_PCF_GPIOHS9)>,
+ <K210_FPIOA(26, K210_PCF_GPIOHS10)>;
+ };
+
+ spi1_pinctrl: spi1-pinmux {
+ pinmux = <K210_FPIOA(29, K210_PCF_SPI1_SCLK)>,
+ <K210_FPIOA(30, K210_PCF_SPI1_D0)>,
+ <K210_FPIOA(31, K210_PCF_SPI1_D1)>,
+ <K210_FPIOA(32, K210_PCF_GPIOHS16)>; /* cs */
+ };
+
+ i2s0_pinctrl: i2s0-pinmux {
+ pinmux = <K210_FPIOA(33, K210_PCF_I2S0_IN_D0)>,
+ <K210_FPIOA(34, K210_PCF_I2S0_WS)>,
+ <K210_FPIOA(35, K210_PCF_I2S0_SCLK)>;
+ };
+};
+
+&uarths0 {
+ pinctrl-0 = <&uarths_pinctrl>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+&gpio0 {
+ pinctrl-0 = <&gpiohs_pinctrl>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+&i2s0 {
+ #sound-dai-cells = <1>;
+ pinctrl-0 = <&i2s0_pinctrl>;
+ pinctrl-names = "default";
+};
+
+&spi0 {
+ pinctrl-0 = <&spi0_pinctrl>;
+ pinctrl-names = "default";
+ num-cs = <1>;
+ cs-gpios = <&gpio0 20 GPIO_ACTIVE_HIGH>;
+
+ panel@0 {
+ compatible = "ilitek,ili9341";
+ reg = <0>;
+ dc-gpios = <&gpio0 21 GPIO_ACTIVE_HIGH>;
+ spi-max-frequency = <15000000>;
+ status = "disabled";
+ };
+};
+
+&spi1 {
+ pinctrl-0 = <&spi1_pinctrl>;
+ pinctrl-names = "default";
+ num-cs = <1>;
+ cs-gpios = <&gpio0 16 GPIO_ACTIVE_LOW>;
+ status = "okay";
+
+ slot@0 {
+ compatible = "mmc-spi-slot";
+ reg = <0>;
+ voltage-ranges = <3300 3300>;
+ spi-max-frequency = <25000000>;
+ broken-cd;
+ };
+};
diff --git a/arch/riscv/boot/dts/canaan/k210.dtsi b/arch/riscv/boot/dts/canaan/k210.dtsi
new file mode 100644
index 000000000000..5e8ca8142482
--- /dev/null
+++ b/arch/riscv/boot/dts/canaan/k210.dtsi
@@ -0,0 +1,459 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019-20 Sean Anderson <seanga2@gmail.com>
+ * Copyright (C) 2020 Western Digital Corporation or its affiliates.
+ */
+#include <dt-bindings/clock/k210-clk.h>
+#include <dt-bindings/pinctrl/k210-fpioa.h>
+#include <dt-bindings/reset/k210-rst.h>
+
+/ {
+ /*
+ * Although the K210 is a 64-bit CPU, the address bus is only 32-bits
+ * wide, and the upper half of all addresses is ignored.
+ */
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "canaan,kendryte-k210";
+
+ aliases {
+ serial0 = &uarths0;
+ serial1 = &uart1;
+ serial2 = &uart2;
+ serial3 = &uart3;
+ };
+
+ /*
+ * The K210 has an sv39 MMU following the privileged specification v1.9.
+ * Since this is a non-ratified draft specification, the kernel does not
+ * support it and the K210 support enabled only for the !MMU case.
+ * Be consistent with this by setting the CPUs MMU type to "none".
+ */
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ timebase-frequency = <7800000>;
+ cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "canaan,k210", "riscv";
+ reg = <0>;
+ riscv,isa = "rv64imafdc";
+ mmu-type = "riscv,none";
+ i-cache-block-size = <64>;
+ i-cache-size = <0x8000>;
+ d-cache-block-size = <64>;
+ d-cache-size = <0x8000>;
+ cpu0_intc: interrupt-controller {
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ compatible = "riscv,cpu-intc";
+ };
+ };
+ cpu1: cpu@1 {
+ device_type = "cpu";
+ compatible = "canaan,k210", "riscv";
+ reg = <1>;
+ riscv,isa = "rv64imafdc";
+ mmu-type = "riscv,none";
+ i-cache-block-size = <64>;
+ i-cache-size = <0x8000>;
+ d-cache-block-size = <64>;
+ d-cache-size = <0x8000>;
+ cpu1_intc: interrupt-controller {
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ compatible = "riscv,cpu-intc";
+ };
+ };
+ };
+
+ sram: memory@80000000 {
+ device_type = "memory";
+ compatible = "canaan,k210-sram";
+ reg = <0x80000000 0x400000>,
+ <0x80400000 0x200000>,
+ <0x80600000 0x200000>;
+ reg-names = "sram0", "sram1", "aisram";
+ clocks = <&sysclk K210_CLK_SRAM0>,
+ <&sysclk K210_CLK_SRAM1>,
+ <&sysclk K210_CLK_AI>;
+ clock-names = "sram0", "sram1", "aisram";
+ };
+
+ clocks {
+ in0: oscillator {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <26000000>;
+ };
+ };
+
+ soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "simple-bus";
+ ranges;
+ interrupt-parent = <&plic0>;
+
+ rom0: nvmem@1000 {
+ reg = <0x1000 0x1000>;
+ read-only;
+ };
+
+ clint0: timer@2000000 {
+ compatible = "canaan,k210-clint", "sifive,clint0";
+ reg = <0x2000000 0xC000>;
+ interrupts-extended = <&cpu0_intc 3 &cpu0_intc 7
+ &cpu1_intc 3 &cpu1_intc 7>;
+ };
+
+ plic0: interrupt-controller@c000000 {
+ #interrupt-cells = <1>;
+ #address-cells = <0>;
+ compatible = "canaan,k210-plic", "sifive,plic-1.0.0";
+ reg = <0xC000000 0x4000000>;
+ interrupt-controller;
+ interrupts-extended = <&cpu0_intc 11 &cpu1_intc 11>;
+ riscv,ndev = <65>;
+ };
+
+ uarths0: serial@38000000 {
+ compatible = "canaan,k210-uarths", "sifive,uart0";
+ reg = <0x38000000 0x1000>;
+ interrupts = <33>;
+ clocks = <&sysclk K210_CLK_CPU>;
+ };
+
+ gpio0: gpio-controller@38001000 {
+ #interrupt-cells = <2>;
+ #gpio-cells = <2>;
+ compatible = "canaan,k210-gpiohs", "sifive,gpio0";
+ reg = <0x38001000 0x1000>;
+ interrupt-controller;
+ interrupts = <34 35 36 37 38 39 40 41
+ 42 43 44 45 46 47 48 49
+ 50 51 52 53 54 55 56 57
+ 58 59 60 61 62 63 64 65>;
+ gpio-controller;
+ ngpios = <32>;
+ };
+
+ dmac0: dma-controller@50000000 {
+ compatible = "snps,axi-dma-1.01a";
+ reg = <0x50000000 0x1000>;
+ interrupts = <27 28 29 30 31 32>;
+ #dma-cells = <1>;
+ clocks = <&sysclk K210_CLK_DMA>, <&sysclk K210_CLK_DMA>;
+ clock-names = "core-clk", "cfgr-clk";
+ resets = <&sysrst K210_RST_DMA>;
+ dma-channels = <6>;
+ snps,dma-masters = <2>;
+ snps,priority = <0 1 2 3 4 5>;
+ snps,data-width = <5>;
+ snps,block-size = <0x200000 0x200000 0x200000
+ 0x200000 0x200000 0x200000>;
+ snps,axi-max-burst-len = <256>;
+ };
+
+ apb0: bus@50200000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "simple-pm-bus";
+ ranges;
+ clocks = <&sysclk K210_CLK_APB0>;
+
+ gpio1: gpio@50200000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,dw-apb-gpio";
+ reg = <0x50200000 0x80>;
+ clocks = <&sysclk K210_CLK_APB0>,
+ <&sysclk K210_CLK_GPIO>;
+ clock-names = "bus", "db";
+ resets = <&sysrst K210_RST_GPIO>;
+
+ gpio1_0: gpio-port@0 {
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
+ compatible = "snps,dw-apb-gpio-port";
+ reg = <0>;
+ interrupt-controller;
+ interrupts = <23>;
+ gpio-controller;
+ ngpios = <8>;
+ };
+ };
+
+ uart1: serial@50210000 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x50210000 0x100>;
+ interrupts = <11>;
+ clocks = <&sysclk K210_CLK_UART1>,
+ <&sysclk K210_CLK_APB0>;
+ clock-names = "baudclk", "apb_pclk";
+ resets = <&sysrst K210_RST_UART1>;
+ reg-io-width = <4>;
+ reg-shift = <2>;
+ dcd-override;
+ dsr-override;
+ cts-override;
+ ri-override;
+ };
+
+ uart2: serial@50220000 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x50220000 0x100>;
+ interrupts = <12>;
+ clocks = <&sysclk K210_CLK_UART2>,
+ <&sysclk K210_CLK_APB0>;
+ clock-names = "baudclk", "apb_pclk";
+ resets = <&sysrst K210_RST_UART2>;
+ reg-io-width = <4>;
+ reg-shift = <2>;
+ dcd-override;
+ dsr-override;
+ cts-override;
+ ri-override;
+ };
+
+ uart3: serial@50230000 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x50230000 0x100>;
+ interrupts = <13>;
+ clocks = <&sysclk K210_CLK_UART3>,
+ <&sysclk K210_CLK_APB0>;
+ clock-names = "baudclk", "apb_pclk";
+ resets = <&sysrst K210_RST_UART3>;
+ reg-io-width = <4>;
+ reg-shift = <2>;
+ dcd-override;
+ dsr-override;
+ cts-override;
+ ri-override;
+ };
+
+ spi2: spi@50240000 {
+ compatible = "canaan,k210-spi";
+ spi-slave;
+ reg = <0x50240000 0x100>;
+ #address-cells = <0>;
+ #size-cells = <0>;
+ interrupts = <3>;
+ clocks = <&sysclk K210_CLK_SPI2>,
+ <&sysclk K210_CLK_APB0>;
+ clock-names = "ssi_clk", "pclk";
+ resets = <&sysrst K210_RST_SPI2>;
+ spi-max-frequency = <25000000>;
+ };
+
+ i2s0: i2s@50250000 {
+ compatible = "snps,designware-i2s";
+ reg = <0x50250000 0x200>;
+ interrupts = <5>;
+ clocks = <&sysclk K210_CLK_I2S0>;
+ clock-names = "i2sclk";
+ resets = <&sysrst K210_RST_I2S0>;
+ };
+
+ i2s1: i2s@50260000 {
+ compatible = "snps,designware-i2s";
+ reg = <0x50260000 0x200>;
+ interrupts = <6>;
+ clocks = <&sysclk K210_CLK_I2S1>;
+ clock-names = "i2sclk";
+ resets = <&sysrst K210_RST_I2S1>;
+ };
+
+ i2s2: i2s@50270000 {
+ compatible = "snps,designware-i2s";
+ reg = <0x50270000 0x200>;
+ interrupts = <7>;
+ clocks = <&sysclk K210_CLK_I2S2>;
+ clock-names = "i2sclk";
+ resets = <&sysrst K210_RST_I2S2>;
+ };
+
+ i2c0: i2c@50280000 {
+ compatible = "snps,designware-i2c";
+ reg = <0x50280000 0x100>;
+ interrupts = <8>;
+ clocks = <&sysclk K210_CLK_I2C0>,
+ <&sysclk K210_CLK_APB0>;
+ clock-names = "ref", "pclk";
+ resets = <&sysrst K210_RST_I2C0>;
+ };
+
+ i2c1: i2c@50290000 {
+ compatible = "snps,designware-i2c";
+ reg = <0x50290000 0x100>;
+ interrupts = <9>;
+ clocks = <&sysclk K210_CLK_I2C1>,
+ <&sysclk K210_CLK_APB0>;
+ clock-names = "ref", "pclk";
+ resets = <&sysrst K210_RST_I2C1>;
+ };
+
+ i2c2: i2c@502a0000 {
+ compatible = "snps,designware-i2c";
+ reg = <0x502A0000 0x100>;
+ interrupts = <10>;
+ clocks = <&sysclk K210_CLK_I2C2>,
+ <&sysclk K210_CLK_APB0>;
+ clock-names = "ref", "pclk";
+ resets = <&sysrst K210_RST_I2C2>;
+ };
+
+ fpioa: pinmux@502b0000 {
+ compatible = "canaan,k210-fpioa";
+ reg = <0x502B0000 0x100>;
+ clocks = <&sysclk K210_CLK_FPIOA>,
+ <&sysclk K210_CLK_APB0>;
+ clock-names = "ref", "pclk";
+ resets = <&sysrst K210_RST_FPIOA>;
+ canaan,k210-sysctl-power = <&sysctl 108>;
+ };
+
+ timer0: timer@502d0000 {
+ compatible = "snps,dw-apb-timer";
+ reg = <0x502D0000 0x100>;
+ interrupts = <14 15>;
+ clocks = <&sysclk K210_CLK_TIMER0>,
+ <&sysclk K210_CLK_APB0>;
+ clock-names = "timer", "pclk";
+ resets = <&sysrst K210_RST_TIMER0>;
+ };
+
+ timer1: timer@502e0000 {
+ compatible = "snps,dw-apb-timer";
+ reg = <0x502E0000 0x100>;
+ interrupts = <16 17>;
+ clocks = <&sysclk K210_CLK_TIMER1>,
+ <&sysclk K210_CLK_APB0>;
+ clock-names = "timer", "pclk";
+ resets = <&sysrst K210_RST_TIMER1>;
+ };
+
+ timer2: timer@502f0000 {
+ compatible = "snps,dw-apb-timer";
+ reg = <0x502F0000 0x100>;
+ interrupts = <18 19>;
+ clocks = <&sysclk K210_CLK_TIMER2>,
+ <&sysclk K210_CLK_APB0>;
+ clock-names = "timer", "pclk";
+ resets = <&sysrst K210_RST_TIMER2>;
+ };
+ };
+
+ apb1: bus@50400000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "simple-pm-bus";
+ ranges;
+ clocks = <&sysclk K210_CLK_APB1>;
+
+ wdt0: watchdog@50400000 {
+ compatible = "snps,dw-wdt";
+ reg = <0x50400000 0x100>;
+ interrupts = <21>;
+ clocks = <&sysclk K210_CLK_WDT0>,
+ <&sysclk K210_CLK_APB1>;
+ clock-names = "tclk", "pclk";
+ resets = <&sysrst K210_RST_WDT0>;
+ };
+
+ wdt1: watchdog@50410000 {
+ compatible = "snps,dw-wdt";
+ reg = <0x50410000 0x100>;
+ interrupts = <22>;
+ clocks = <&sysclk K210_CLK_WDT1>,
+ <&sysclk K210_CLK_APB1>;
+ clock-names = "tclk", "pclk";
+ resets = <&sysrst K210_RST_WDT1>;
+ };
+
+ sysctl: syscon@50440000 {
+ compatible = "canaan,k210-sysctl",
+ "syscon", "simple-mfd";
+ reg = <0x50440000 0x100>;
+ clocks = <&sysclk K210_CLK_APB1>;
+ clock-names = "pclk";
+
+ sysclk: clock-controller {
+ #clock-cells = <1>;
+ compatible = "canaan,k210-clk";
+ clocks = <&in0>;
+ };
+
+ sysrst: reset-controller {
+ compatible = "canaan,k210-rst";
+ #reset-cells = <1>;
+ };
+
+ reboot: syscon-reboot {
+ compatible = "syscon-reboot";
+ regmap = <&sysctl>;
+ offset = <48>;
+ mask = <1>;
+ value = <1>;
+ };
+ };
+ };
+
+ apb2: bus@52000000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "simple-pm-bus";
+ ranges;
+ clocks = <&sysclk K210_CLK_APB2>;
+
+ spi0: spi@52000000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "canaan,k210-spi";
+ reg = <0x52000000 0x100>;
+ interrupts = <1>;
+ clocks = <&sysclk K210_CLK_SPI0>,
+ <&sysclk K210_CLK_APB2>;
+ clock-names = "ssi_clk", "pclk";
+ resets = <&sysrst K210_RST_SPI0>;
+ reset-names = "spi";
+ spi-max-frequency = <25000000>;
+ num-cs = <4>;
+ reg-io-width = <4>;
+ };
+
+ spi1: spi@53000000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "canaan,k210-spi";
+ reg = <0x53000000 0x100>;
+ interrupts = <2>;
+ clocks = <&sysclk K210_CLK_SPI1>,
+ <&sysclk K210_CLK_APB2>;
+ clock-names = "ssi_clk", "pclk";
+ resets = <&sysrst K210_RST_SPI1>;
+ reset-names = "spi";
+ spi-max-frequency = <25000000>;
+ num-cs = <4>;
+ reg-io-width = <4>;
+ };
+
+ spi3: spi@54000000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,dwc-ssi-1.01a";
+ reg = <0x54000000 0x200>;
+ interrupts = <4>;
+ clocks = <&sysclk K210_CLK_SPI3>,
+ <&sysclk K210_CLK_APB2>;
+ clock-names = "ssi_clk", "pclk";
+ resets = <&sysrst K210_RST_SPI3>;
+ reset-names = "spi";
+ /* Could possibly go up to 200 MHz */
+ spi-max-frequency = <100000000>;
+ num-cs = <4>;
+ reg-io-width = <4>;
+ };
+ };
+ };
+};
diff --git a/arch/riscv/boot/dts/canaan/k210_generic.dts b/arch/riscv/boot/dts/canaan/k210_generic.dts
new file mode 100644
index 000000000000..396c8ca4d24d
--- /dev/null
+++ b/arch/riscv/boot/dts/canaan/k210_generic.dts
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019-20 Sean Anderson <seanga2@gmail.com>
+ * Copyright (C) 2020 Western Digital Corporation or its affiliates.
+ */
+
+/dts-v1/;
+
+#include "k210.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+
+/ {
+ model = "Kendryte K210 generic";
+ compatible = "canaan,kendryte-k210";
+
+ chosen {
+ bootargs = "earlycon console=ttySIF0";
+ stdout-path = "serial0:115200n8";
+ };
+};
+
+&fpioa {
+ pinctrl-0 = <&jtag_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+
+ jtag_pins: jtag-pinmux {
+ pinmux = <K210_FPIOA(0, K210_PCF_JTAG_TCLK)>,
+ <K210_FPIOA(1, K210_PCF_JTAG_TDI)>,
+ <K210_FPIOA(2, K210_PCF_JTAG_TMS)>,
+ <K210_FPIOA(3, K210_PCF_JTAG_TDO)>;
+ };
+
+ uarths_pins: uarths-pinmux {
+ pinmux = <K210_FPIOA(4, K210_PCF_UARTHS_RX)>,
+ <K210_FPIOA(5, K210_PCF_UARTHS_TX)>;
+ };
+};
+
+&uarths0 {
+ pinctrl-0 = <&uarths_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+};
diff --git a/arch/riscv/boot/dts/canaan/sipeed_maix_bit.dts b/arch/riscv/boot/dts/canaan/sipeed_maix_bit.dts
new file mode 100644
index 000000000000..0bcaf35045e7
--- /dev/null
+++ b/arch/riscv/boot/dts/canaan/sipeed_maix_bit.dts
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019-20 Sean Anderson <seanga2@gmail.com>
+ * Copyright (C) 2020 Western Digital Corporation or its affiliates.
+ */
+
+/dts-v1/;
+
+#include "k210.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
+
+/ {
+ model = "SiPeed MAIX BiT";
+ compatible = "sipeed,maix-bit", "sipeed,maix-bitm",
+ "canaan,kendryte-k210";
+
+ chosen {
+ bootargs = "earlycon console=ttySIF0";
+ stdout-path = "serial0:115200n8";
+ };
+
+ gpio-leds {
+ compatible = "gpio-leds";
+
+ led0 {
+ color = <LED_COLOR_ID_GREEN>;
+ label = "green";
+ gpios = <&gpio1_0 4 GPIO_ACTIVE_LOW>;
+ };
+
+ led1 {
+ color = <LED_COLOR_ID_RED>;
+ label = "red";
+ gpios = <&gpio1_0 5 GPIO_ACTIVE_LOW>;
+ };
+
+ led2 {
+ color = <LED_COLOR_ID_BLUE>;
+ label = "blue";
+ gpios = <&gpio1_0 6 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ boot {
+ label = "BOOT";
+ linux,code = <BTN_0>;
+ gpios = <&gpio0 0 GPIO_ACTIVE_LOW>;
+ };
+ };
+};
+
+&fpioa {
+ pinctrl-names = "default";
+ pinctrl-0 = <&jtag_pinctrl>;
+ status = "okay";
+
+ jtag_pinctrl: jtag-pinmux {
+ pinmux = <K210_FPIOA(0, K210_PCF_JTAG_TCLK)>,
+ <K210_FPIOA(1, K210_PCF_JTAG_TDI)>,
+ <K210_FPIOA(2, K210_PCF_JTAG_TMS)>,
+ <K210_FPIOA(3, K210_PCF_JTAG_TDO)>;
+ };
+
+ uarths_pinctrl: uarths-pinmux {
+ pinmux = <K210_FPIOA(4, K210_PCF_UARTHS_RX)>,
+ <K210_FPIOA(5, K210_PCF_UARTHS_TX)>;
+ };
+
+ gpio_pinctrl: gpio-pinmux {
+ pinmux = <K210_FPIOA(8, K210_PCF_GPIO0)>,
+ <K210_FPIOA(9, K210_PCF_GPIO1)>,
+ <K210_FPIOA(10, K210_PCF_GPIO2)>,
+ <K210_FPIOA(11, K210_PCF_GPIO3)>,
+ <K210_FPIOA(12, K210_PCF_GPIO4)>,
+ <K210_FPIOA(13, K210_PCF_GPIO5)>,
+ <K210_FPIOA(14, K210_PCF_GPIO6)>,
+ <K210_FPIOA(15, K210_PCF_GPIO7)>;
+ };
+
+ gpiohs_pinctrl: gpiohs-pinmux {
+ pinmux = <K210_FPIOA(16, K210_PCF_GPIOHS0)>,
+ <K210_FPIOA(17, K210_PCF_GPIOHS1)>,
+ <K210_FPIOA(21, K210_PCF_GPIOHS5)>,
+ <K210_FPIOA(22, K210_PCF_GPIOHS6)>,
+ <K210_FPIOA(23, K210_PCF_GPIOHS7)>,
+ <K210_FPIOA(24, K210_PCF_GPIOHS8)>,
+ <K210_FPIOA(25, K210_PCF_GPIOHS9)>,
+ <K210_FPIOA(32, K210_PCF_GPIOHS16)>,
+ <K210_FPIOA(33, K210_PCF_GPIOHS17)>,
+ <K210_FPIOA(34, K210_PCF_GPIOHS18)>,
+ <K210_FPIOA(35, K210_PCF_GPIOHS19)>;
+ };
+
+ i2s0_pinctrl: i2s0-pinmux {
+ pinmux = <K210_FPIOA(18, K210_PCF_I2S0_SCLK)>,
+ <K210_FPIOA(19, K210_PCF_I2S0_WS)>,
+ <K210_FPIOA(20, K210_PCF_I2S0_IN_D0)>;
+ };
+
+ dvp_pinctrl: dvp-pinmux {
+ pinmux = <K210_FPIOA(40, K210_PCF_SCCB_SDA)>,
+ <K210_FPIOA(41, K210_PCF_SCCB_SCLK)>,
+ <K210_FPIOA(42, K210_PCF_DVP_RST)>,
+ <K210_FPIOA(43, K210_PCF_DVP_VSYNC)>,
+ <K210_FPIOA(44, K210_PCF_DVP_PWDN)>,
+ <K210_FPIOA(45, K210_PCF_DVP_HSYNC)>,
+ <K210_FPIOA(46, K210_PCF_DVP_XCLK)>,
+ <K210_FPIOA(47, K210_PCF_DVP_PCLK)>;
+ };
+
+ spi0_pinctrl: spi0-pinmux {
+ pinmux = <K210_FPIOA(36, K210_PCF_GPIOHS20)>, /* cs */
+ <K210_FPIOA(37, K210_PCF_GPIOHS21)>, /* rst */
+ <K210_FPIOA(38, K210_PCF_GPIOHS22)>, /* dc */
+ <K210_FPIOA(39, K210_PCF_SPI0_SCLK)>; /* wr */
+ };
+
+ spi1_pinctrl: spi1-pinmux {
+ pinmux = <K210_FPIOA(26, K210_PCF_SPI1_D1)>,
+ <K210_FPIOA(27, K210_PCF_SPI1_SCLK)>,
+ <K210_FPIOA(28, K210_PCF_SPI1_D0)>,
+ <K210_FPIOA(29, K210_PCF_GPIOHS13)>; /* cs */
+ };
+
+ i2c1_pinctrl: i2c1-pinmux {
+ pinmux = <K210_FPIOA(30, K210_PCF_I2C1_SCLK)>,
+ <K210_FPIOA(31, K210_PCF_I2C1_SDA)>;
+ };
+};
+
+&uarths0 {
+ pinctrl-0 = <&uarths_pinctrl>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+&gpio0 {
+ pinctrl-0 = <&gpiohs_pinctrl>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+&gpio1 {
+ pinctrl-0 = <&gpio_pinctrl>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+&i2s0 {
+ #sound-dai-cells = <1>;
+ pinctrl-0 = <&i2s0_pinctrl>;
+ pinctrl-names = "default";
+};
+
+&i2c1 {
+ pinctrl-0 = <&i2c1_pinctrl>;
+ pinctrl-names = "default";
+ clock-frequency = <400000>;
+ status = "okay";
+};
+
+&spi0 {
+ pinctrl-0 = <&spi0_pinctrl>;
+ pinctrl-names = "default";
+ num-cs = <1>;
+ cs-gpios = <&gpio0 20 GPIO_ACTIVE_HIGH>;
+
+ panel@0 {
+ compatible = "sitronix,st7789v";
+ reg = <0>;
+ reset-gpios = <&gpio0 21 GPIO_ACTIVE_LOW>;
+ dc-gpios = <&gpio0 22 GPIO_ACTIVE_HIGH>;
+ spi-max-frequency = <15000000>;
+ spi-cs-high;
+ status = "disabled";
+ };
+};
+
+&spi1 {
+ pinctrl-0 = <&spi1_pinctrl>;
+ pinctrl-names = "default";
+ num-cs = <1>;
+ cs-gpios = <&gpio0 13 GPIO_ACTIVE_LOW>;
+ status = "okay";
+
+ slot@0 {
+ compatible = "mmc-spi-slot";
+ reg = <0>;
+ voltage-ranges = <3300 3300>;
+ spi-max-frequency = <25000000>;
+ broken-cd;
+ };
+};
+
+&spi3 {
+ spi-flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ spi-max-frequency = <50000000>;
+ m25p,fast-read;
+ broken-flash-reset;
+ };
+};
diff --git a/arch/riscv/boot/dts/canaan/sipeed_maix_dock.dts b/arch/riscv/boot/dts/canaan/sipeed_maix_dock.dts
new file mode 100644
index 000000000000..ac8a03f5867a
--- /dev/null
+++ b/arch/riscv/boot/dts/canaan/sipeed_maix_dock.dts
@@ -0,0 +1,211 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019-20 Sean Anderson <seanga2@gmail.com>
+ * Copyright (C) 2020 Western Digital Corporation or its affiliates.
+ */
+
+/dts-v1/;
+
+#include "k210.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
+
+/ {
+ model = "SiPeed MAIX Dock";
+ compatible = "sipeed,maix-dock-m1", "sipeed,maix-dock-m1w",
+ "canaan,kendryte-k210";
+
+ chosen {
+ bootargs = "earlycon console=ttySIF0";
+ stdout-path = "serial0:115200n8";
+ };
+
+ gpio-leds {
+ compatible = "gpio-leds";
+
+ /*
+ * Note: the board wiring drawing documents green on
+ * gpio #4, red on gpio #5 and blue on gpio #6. However,
+ * the board is actually wired differently as defined here.
+ */
+ led0 {
+ color = <LED_COLOR_ID_BLUE>;
+ label = "blue";
+ gpios = <&gpio1_0 4 GPIO_ACTIVE_LOW>;
+ };
+
+ led1 {
+ color = <LED_COLOR_ID_GREEN>;
+ label = "green";
+ gpios = <&gpio1_0 5 GPIO_ACTIVE_LOW>;
+ };
+
+ led2 {
+ color = <LED_COLOR_ID_RED>;
+ label = "red";
+ gpios = <&gpio1_0 6 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ boot {
+ label = "BOOT";
+ linux,code = <BTN_0>;
+ gpios = <&gpio0 0 GPIO_ACTIVE_LOW>;
+ };
+ };
+};
+
+&fpioa {
+ pinctrl-0 = <&jtag_pinctrl>;
+ pinctrl-names = "default";
+ status = "okay";
+
+ jtag_pinctrl: jtag-pinmux {
+ pinmux = <K210_FPIOA(0, K210_PCF_JTAG_TCLK)>,
+ <K210_FPIOA(1, K210_PCF_JTAG_TDI)>,
+ <K210_FPIOA(2, K210_PCF_JTAG_TMS)>,
+ <K210_FPIOA(3, K210_PCF_JTAG_TDO)>;
+ };
+
+ uarths_pinctrl: uarths-pinmux {
+ pinmux = <K210_FPIOA(4, K210_PCF_UARTHS_RX)>,
+ <K210_FPIOA(5, K210_PCF_UARTHS_TX)>;
+ };
+
+ gpio_pinctrl: gpio-pinmux {
+ pinmux = <K210_FPIOA(8, K210_PCF_GPIO0)>,
+ <K210_FPIOA(11, K210_PCF_GPIO3)>,
+ <K210_FPIOA(12, K210_PCF_GPIO4)>,
+ <K210_FPIOA(13, K210_PCF_GPIO5)>,
+ <K210_FPIOA(14, K210_PCF_GPIO6)>,
+ <K210_FPIOA(15, K210_PCF_GPIO7)>;
+ };
+
+ gpiohs_pinctrl: gpiohs-pinmux {
+ pinmux = <K210_FPIOA(16, K210_PCF_GPIOHS0)>,
+ <K210_FPIOA(17, K210_PCF_GPIOHS1)>,
+ <K210_FPIOA(21, K210_PCF_GPIOHS5)>,
+ <K210_FPIOA(22, K210_PCF_GPIOHS6)>,
+ <K210_FPIOA(23, K210_PCF_GPIOHS7)>,
+ <K210_FPIOA(24, K210_PCF_GPIOHS8)>,
+ <K210_FPIOA(25, K210_PCF_GPIOHS9)>,
+ <K210_FPIOA(32, K210_PCF_GPIOHS16)>,
+ <K210_FPIOA(33, K210_PCF_GPIOHS17)>,
+ <K210_FPIOA(34, K210_PCF_GPIOHS18)>,
+ <K210_FPIOA(35, K210_PCF_GPIOHS19)>;
+ };
+
+ i2s0_pinctrl: i2s0-pinmux {
+ pinmux = <K210_FPIOA(18, K210_PCF_I2S0_SCLK)>,
+ <K210_FPIOA(19, K210_PCF_I2S0_WS)>,
+ <K210_FPIOA(20, K210_PCF_I2S0_IN_D0)>;
+ };
+
+ dvp_pinctrl: dvp-pinmux {
+ pinmux = <K210_FPIOA(40, K210_PCF_SCCB_SDA)>,
+ <K210_FPIOA(41, K210_PCF_SCCB_SCLK)>,
+ <K210_FPIOA(42, K210_PCF_DVP_RST)>,
+ <K210_FPIOA(43, K210_PCF_DVP_VSYNC)>,
+ <K210_FPIOA(44, K210_PCF_DVP_PWDN)>,
+ <K210_FPIOA(45, K210_PCF_DVP_HSYNC)>,
+ <K210_FPIOA(46, K210_PCF_DVP_XCLK)>,
+ <K210_FPIOA(47, K210_PCF_DVP_PCLK)>;
+ };
+
+ spi0_pinctrl: spi0-pinmux {
+ pinmux = <K210_FPIOA(36, K210_PCF_GPIOHS20)>, /* cs */
+ <K210_FPIOA(37, K210_PCF_GPIOHS21)>, /* rst */
+ <K210_FPIOA(38, K210_PCF_GPIOHS22)>, /* dc */
+ <K210_FPIOA(39, K210_PCF_SPI0_SCLK)>; /* wr */
+ };
+
+ spi1_pinctrl: spi1-pinmux {
+ pinmux = <K210_FPIOA(26, K210_PCF_SPI1_D1)>,
+ <K210_FPIOA(27, K210_PCF_SPI1_SCLK)>,
+ <K210_FPIOA(28, K210_PCF_SPI1_D0)>,
+ <K210_FPIOA(29, K210_PCF_GPIOHS13)>; /* cs */
+ };
+
+ i2c1_pinctrl: i2c1-pinmux {
+ pinmux = <K210_FPIOA(9, K210_PCF_I2C1_SCLK)>,
+ <K210_FPIOA(10, K210_PCF_I2C1_SDA)>;
+ };
+};
+
+&uarths0 {
+ pinctrl-0 = <&uarths_pinctrl>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+&gpio0 {
+ pinctrl-0 = <&gpiohs_pinctrl>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+&gpio1 {
+ pinctrl-0 = <&gpio_pinctrl>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+&i2s0 {
+ #sound-dai-cells = <1>;
+ pinctrl-0 = <&i2s0_pinctrl>;
+ pinctrl-names = "default";
+};
+
+&i2c1 {
+ pinctrl-0 = <&i2c1_pinctrl>;
+ pinctrl-names = "default";
+ clock-frequency = <400000>;
+ status = "okay";
+};
+
+&spi0 {
+ pinctrl-0 = <&spi0_pinctrl>;
+ pinctrl-names = "default";
+ num-cs = <1>;
+ cs-gpios = <&gpio0 20 GPIO_ACTIVE_HIGH>;
+
+ panel@0 {
+ compatible = "sitronix,st7789v";
+ reg = <0>;
+ reset-gpios = <&gpio0 21 GPIO_ACTIVE_LOW>;
+ dc-gpios = <&gpio0 22 0>;
+ spi-max-frequency = <15000000>;
+ status = "disabled";
+ };
+};
+
+&spi1 {
+ pinctrl-0 = <&spi1_pinctrl>;
+ pinctrl-names = "default";
+ num-cs = <1>;
+ cs-gpios = <&gpio0 13 GPIO_ACTIVE_LOW>;
+ status = "okay";
+
+ slot@0 {
+ compatible = "mmc-spi-slot";
+ reg = <0>;
+ voltage-ranges = <3300 3300>;
+ spi-max-frequency = <25000000>;
+ broken-cd;
+ };
+};
+
+&spi3 {
+ spi-flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ spi-max-frequency = <50000000>;
+ m25p,fast-read;
+ broken-flash-reset;
+ };
+};
diff --git a/arch/riscv/boot/dts/canaan/sipeed_maix_go.dts b/arch/riscv/boot/dts/canaan/sipeed_maix_go.dts
new file mode 100644
index 000000000000..623998194bc1
--- /dev/null
+++ b/arch/riscv/boot/dts/canaan/sipeed_maix_go.dts
@@ -0,0 +1,219 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019-20 Sean Anderson <seanga2@gmail.com>
+ * Copyright (C) 2020 Western Digital Corporation or its affiliates.
+ */
+
+/dts-v1/;
+
+#include "k210.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
+
+/ {
+ model = "SiPeed MAIX GO";
+ compatible = "sipeed,maix-go", "canaan,kendryte-k210";
+
+ chosen {
+ bootargs = "earlycon console=ttySIF0";
+ stdout-path = "serial0:115200n8";
+ };
+
+ gpio-leds {
+ compatible = "gpio-leds";
+
+ led0 {
+ color = <LED_COLOR_ID_GREEN>;
+ label = "green";
+ gpios = <&gpio1_0 4 GPIO_ACTIVE_LOW>;
+ };
+
+ led1 {
+ color = <LED_COLOR_ID_RED>;
+ label = "red";
+ gpios = <&gpio1_0 5 GPIO_ACTIVE_LOW>;
+ };
+
+ led2 {
+ color = <LED_COLOR_ID_BLUE>;
+ label = "blue";
+ gpios = <&gpio1_0 6 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ up {
+ label = "UP";
+ linux,code = <BTN_1>;
+ gpios = <&gpio1_0 7 GPIO_ACTIVE_LOW>;
+ };
+
+ press {
+ label = "PRESS";
+ linux,code = <BTN_0>;
+ gpios = <&gpio0 0 GPIO_ACTIVE_LOW>;
+ };
+
+ down {
+ label = "DOWN";
+ linux,code = <BTN_2>;
+ gpios = <&gpio0 1 GPIO_ACTIVE_LOW>;
+ };
+ };
+};
+
+&fpioa {
+ pinctrl-0 = <&jtag_pinctrl>;
+ pinctrl-names = "default";
+ status = "okay";
+
+ jtag_pinctrl: jtag-pinmux {
+ pinmux = <K210_FPIOA(0, K210_PCF_JTAG_TCLK)>,
+ <K210_FPIOA(1, K210_PCF_JTAG_TDI)>,
+ <K210_FPIOA(2, K210_PCF_JTAG_TMS)>,
+ <K210_FPIOA(3, K210_PCF_JTAG_TDO)>;
+ };
+
+ uarths_pinctrl: uarths-pinmux {
+ pinmux = <K210_FPIOA(4, K210_PCF_UARTHS_RX)>,
+ <K210_FPIOA(5, K210_PCF_UARTHS_TX)>;
+ };
+
+ gpio_pinctrl: gpio-pinmux {
+ pinmux = <K210_FPIOA(8, K210_PCF_GPIO0)>,
+ <K210_FPIOA(9, K210_PCF_GPIO1)>,
+ <K210_FPIOA(10, K210_PCF_GPIO2)>,
+ <K210_FPIOA(11, K210_PCF_GPIO3)>,
+ <K210_FPIOA(12, K210_PCF_GPIO4)>,
+ <K210_FPIOA(13, K210_PCF_GPIO5)>,
+ <K210_FPIOA(14, K210_PCF_GPIO6)>,
+ <K210_FPIOA(15, K210_PCF_GPIO7)>;
+ };
+
+ gpiohs_pinctrl: gpiohs-pinmux {
+ pinmux = <K210_FPIOA(16, K210_PCF_GPIOHS0)>,
+ <K210_FPIOA(17, K210_PCF_GPIOHS1)>,
+ <K210_FPIOA(21, K210_PCF_GPIOHS5)>,
+ <K210_FPIOA(22, K210_PCF_GPIOHS6)>,
+ <K210_FPIOA(23, K210_PCF_GPIOHS7)>,
+ <K210_FPIOA(24, K210_PCF_GPIOHS8)>,
+ <K210_FPIOA(25, K210_PCF_GPIOHS9)>,
+ <K210_FPIOA(32, K210_PCF_GPIOHS16)>,
+ <K210_FPIOA(33, K210_PCF_GPIOHS17)>,
+ <K210_FPIOA(34, K210_PCF_GPIOHS18)>,
+ <K210_FPIOA(35, K210_PCF_GPIOHS19)>;
+ };
+
+ i2s0_pinctrl: i2s0-pinmux {
+ pinmux = <K210_FPIOA(18, K210_PCF_I2S0_SCLK)>,
+ <K210_FPIOA(19, K210_PCF_I2S0_WS)>,
+ <K210_FPIOA(20, K210_PCF_I2S0_IN_D0)>;
+ };
+
+ dvp_pinctrl: dvp-pinmux {
+ pinmux = <K210_FPIOA(40, K210_PCF_SCCB_SDA)>,
+ <K210_FPIOA(41, K210_PCF_SCCB_SCLK)>,
+ <K210_FPIOA(42, K210_PCF_DVP_RST)>,
+ <K210_FPIOA(43, K210_PCF_DVP_VSYNC)>,
+ <K210_FPIOA(44, K210_PCF_DVP_PWDN)>,
+ <K210_FPIOA(45, K210_PCF_DVP_HSYNC)>,
+ <K210_FPIOA(46, K210_PCF_DVP_XCLK)>,
+ <K210_FPIOA(47, K210_PCF_DVP_PCLK)>;
+ };
+
+ spi0_pinctrl: spi0-pinmux {
+ pinmux = <K210_FPIOA(36, K210_PCF_GPIOHS20)>, /* cs */
+ <K210_FPIOA(37, K210_PCF_GPIOHS21)>, /* rst */
+ <K210_FPIOA(38, K210_PCF_GPIOHS22)>, /* dc */
+ <K210_FPIOA(39, K210_PCF_SPI0_SCLK)>; /* wr */
+ };
+
+ spi1_pinctrl: spi1-pinmux {
+ pinmux = <K210_FPIOA(26, K210_PCF_SPI1_D1)>,
+ <K210_FPIOA(27, K210_PCF_SPI1_SCLK)>,
+ <K210_FPIOA(28, K210_PCF_SPI1_D0)>,
+ <K210_FPIOA(29, K210_PCF_GPIOHS13)>; /* cs */
+ };
+
+ i2c1_pinctrl: i2c1-pinmux {
+ pinmux = <K210_FPIOA(30, K210_PCF_I2C1_SCLK)>,
+ <K210_FPIOA(31, K210_PCF_I2C1_SDA)>;
+ };
+};
+
+&uarths0 {
+ pinctrl-0 = <&uarths_pinctrl>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+&gpio0 {
+ pinctrl-0 = <&gpiohs_pinctrl>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+&gpio1 {
+ pinctrl-0 = <&gpio_pinctrl>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+&i2s0 {
+ #sound-dai-cells = <1>;
+ pinctrl-0 = <&i2s0_pinctrl>;
+ pinctrl-names = "default";
+};
+
+&i2c1 {
+ pinctrl-0 = <&i2c1_pinctrl>;
+ pinctrl-names = "default";
+ clock-frequency = <400000>;
+ status = "okay";
+};
+
+&spi0 {
+ pinctrl-0 = <&spi0_pinctrl>;
+ pinctrl-names = "default";
+ num-cs = <1>;
+ cs-gpios = <&gpio0 20 GPIO_ACTIVE_HIGH>;
+
+ panel@0 {
+ compatible = "sitronix,st7789v";
+ reg = <0>;
+ reset-gpios = <&gpio0 21 GPIO_ACTIVE_LOW>;
+ dc-gpios = <&gpio0 22 GPIO_ACTIVE_HIGH>;
+ spi-max-frequency = <15000000>;
+ status = "disabled";
+ };
+};
+
+&spi1 {
+ pinctrl-0 = <&spi1_pinctrl>;
+ pinctrl-names = "default";
+ num-cs = <1>;
+ cs-gpios = <&gpio0 13 GPIO_ACTIVE_LOW>;
+ status = "okay";
+
+ slot@0 {
+ compatible = "mmc-spi-slot";
+ reg = <0>;
+ voltage-ranges = <3300 3300>;
+ spi-max-frequency = <25000000>;
+ broken-cd;
+ };
+};
+
+&spi3 {
+ spi-flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ spi-max-frequency = <50000000>;
+ m25p,fast-read;
+ broken-flash-reset;
+ };
+};
diff --git a/arch/riscv/boot/dts/canaan/sipeed_maixduino.dts b/arch/riscv/boot/dts/canaan/sipeed_maixduino.dts
new file mode 100644
index 000000000000..cf605ba0d67e
--- /dev/null
+++ b/arch/riscv/boot/dts/canaan/sipeed_maixduino.dts
@@ -0,0 +1,184 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019-20 Sean Anderson <seanga2@gmail.com>
+ * Copyright (C) 2020 Western Digital Corporation or its affiliates.
+ */
+
+/dts-v1/;
+
+#include "k210.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+
+/ {
+ model = "SiPeed MAIXDUINO";
+ compatible = "sipeed,maixduino", "canaan,kendryte-k210";
+
+ chosen {
+ bootargs = "earlycon console=ttySIF0";
+ stdout-path = "serial0:115200n8";
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ boot {
+ label = "BOOT";
+ linux,code = <BTN_0>;
+ gpios = <&gpio0 0 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ vcc_3v3: regulator-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+};
+
+&fpioa {
+ status = "okay";
+
+ uarths_pinctrl: uarths-pinmux {
+ pinmux = <K210_FPIOA(4, K210_PCF_UARTHS_RX)>, /* Header "0" */
+ <K210_FPIOA(5, K210_PCF_UARTHS_TX)>; /* Header "1" */
+ };
+
+ gpio_pinctrl: gpio-pinmux {
+ pinmux = <K210_FPIOA(8, K210_PCF_GPIO0)>,
+ <K210_FPIOA(9, K210_PCF_GPIO1)>;
+ };
+
+ gpiohs_pinctrl: gpiohs-pinmux {
+ pinmux = <K210_FPIOA(16, K210_PCF_GPIOHS0)>, /* BOOT */
+ <K210_FPIOA(21, K210_PCF_GPIOHS2)>, /* Header "2" */
+ <K210_FPIOA(22, K210_PCF_GPIOHS3)>, /* Header "3" */
+ <K210_FPIOA(23, K210_PCF_GPIOHS4)>, /* Header "4" */
+ <K210_FPIOA(24, K210_PCF_GPIOHS5)>, /* Header "5" */
+ <K210_FPIOA(32, K210_PCF_GPIOHS6)>, /* Header "6" */
+ <K210_FPIOA(15, K210_PCF_GPIOHS7)>, /* Header "7" */
+ <K210_FPIOA(14, K210_PCF_GPIOHS8)>, /* Header "8" */
+ <K210_FPIOA(13, K210_PCF_GPIOHS9)>, /* Header "9" */
+ <K210_FPIOA(12, K210_PCF_GPIOHS10)>, /* Header "10" */
+ <K210_FPIOA(11, K210_PCF_GPIOHS11)>, /* Header "11" */
+ <K210_FPIOA(10, K210_PCF_GPIOHS12)>, /* Header "12" */
+ <K210_FPIOA(3, K210_PCF_GPIOHS13)>; /* Header "13" */
+ };
+
+ i2s0_pinctrl: i2s0-pinmux {
+ pinmux = <K210_FPIOA(18, K210_PCF_I2S0_SCLK)>,
+ <K210_FPIOA(19, K210_PCF_I2S0_WS)>,
+ <K210_FPIOA(20, K210_PCF_I2S0_IN_D0)>;
+ };
+
+ spi1_pinctrl: spi1-pinmux {
+ pinmux = <K210_FPIOA(26, K210_PCF_SPI1_D1)>,
+ <K210_FPIOA(27, K210_PCF_SPI1_SCLK)>,
+ <K210_FPIOA(28, K210_PCF_SPI1_D0)>,
+ <K210_FPIOA(29, K210_PCF_GPIO2)>; /* cs */
+ };
+
+ i2c1_pinctrl: i2c1-pinmux {
+ pinmux = <K210_FPIOA(30, K210_PCF_I2C1_SCLK)>, /* Header "scl" */
+ <K210_FPIOA(31, K210_PCF_I2C1_SDA)>; /* Header "sda" */
+ };
+
+ i2s1_pinctrl: i2s1-pinmux {
+ pinmux = <K210_FPIOA(33, K210_PCF_I2S1_WS)>,
+ <K210_FPIOA(34, K210_PCF_I2S1_IN_D0)>,
+ <K210_FPIOA(35, K210_PCF_I2S1_SCLK)>;
+ };
+
+ spi0_pinctrl: spi0-pinmux {
+ pinmux = <K210_FPIOA(36, K210_PCF_GPIOHS20)>, /* cs */
+ <K210_FPIOA(37, K210_PCF_GPIOHS21)>, /* rst */
+ <K210_FPIOA(38, K210_PCF_GPIOHS22)>, /* dc */
+ <K210_FPIOA(39, K210_PCF_SPI0_SCLK)>; /* wr */
+ };
+
+ dvp_pinctrl: dvp-pinmux {
+ pinmux = <K210_FPIOA(40, K210_PCF_SCCB_SDA)>,
+ <K210_FPIOA(41, K210_PCF_SCCB_SCLK)>,
+ <K210_FPIOA(42, K210_PCF_DVP_RST)>,
+ <K210_FPIOA(43, K210_PCF_DVP_VSYNC)>,
+ <K210_FPIOA(44, K210_PCF_DVP_PWDN)>,
+ <K210_FPIOA(45, K210_PCF_DVP_HSYNC)>,
+ <K210_FPIOA(46, K210_PCF_DVP_XCLK)>,
+ <K210_FPIOA(47, K210_PCF_DVP_PCLK)>;
+ };
+};
+
+&uarths0 {
+ pinctrl-0 = <&uarths_pinctrl>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+&gpio0 {
+ pinctrl-0 = <&gpiohs_pinctrl>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+&gpio1 {
+ pinctrl-0 = <&gpio_pinctrl>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+&i2s0 {
+ #sound-dai-cells = <1>;
+ pinctrl-0 = <&i2s0_pinctrl>;
+ pinctrl-names = "default";
+};
+
+&i2c1 {
+ pinctrl-0 = <&i2c1_pinctrl>;
+ pinctrl-names = "default";
+ clock-frequency = <400000>;
+ status = "okay";
+};
+
+&spi0 {
+ pinctrl-0 = <&spi0_pinctrl>;
+ pinctrl-names = "default";
+ num-cs = <1>;
+ cs-gpios = <&gpio0 20 GPIO_ACTIVE_HIGH>;
+
+ panel@0 {
+ compatible = "sitronix,st7789v";
+ reg = <0>;
+ reset-gpios = <&gpio0 21 GPIO_ACTIVE_LOW>;
+ dc-gpios = <&gpio0 22 0>;
+ spi-max-frequency = <15000000>;
+ power-supply = <&vcc_3v3>;
+ };
+};
+
+&spi1 {
+ pinctrl-0 = <&spi1_pinctrl>;
+ pinctrl-names = "default";
+ num-cs = <1>;
+ cs-gpios = <&gpio1_0 2 GPIO_ACTIVE_LOW>;
+ status = "okay";
+
+ slot@0 {
+ compatible = "mmc-spi-slot";
+ reg = <0>;
+ voltage-ranges = <3300 3300>;
+ spi-max-frequency = <25000000>;
+ broken-cd;
+ };
+};
+
+&spi3 {
+ spi-flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ spi-max-frequency = <50000000>;
+ m25p,fast-read;
+ broken-flash-reset;
+ };
+};
diff --git a/arch/riscv/boot/dts/kendryte/Makefile b/arch/riscv/boot/dts/kendryte/Makefile
deleted file mode 100644
index 1a88e616f18e..000000000000
--- a/arch/riscv/boot/dts/kendryte/Makefile
+++ /dev/null
@@ -1,4 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-dtb-$(CONFIG_SOC_KENDRYTE_K210_DTB) += k210.dtb
-
-obj-$(CONFIG_SOC_KENDRYTE_K210_DTB_BUILTIN) += $(addsuffix .o, $(dtb-y))
diff --git a/arch/riscv/boot/dts/kendryte/k210.dts b/arch/riscv/boot/dts/kendryte/k210.dts
deleted file mode 100644
index 0d1f28fce6b2..000000000000
--- a/arch/riscv/boot/dts/kendryte/k210.dts
+++ /dev/null
@@ -1,23 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Copyright (C) 2020 Western Digital Corporation or its affiliates.
- */
-
-/dts-v1/;
-
-#include "k210.dtsi"
-
-/ {
- model = "Kendryte K210 generic";
- compatible = "kendryte,k210";
-
- chosen {
- bootargs = "earlycon console=ttySIF0";
- stdout-path = "serial0";
- };
-};
-
-&uarths0 {
- status = "okay";
-};
-
diff --git a/arch/riscv/boot/dts/kendryte/k210.dtsi b/arch/riscv/boot/dts/kendryte/k210.dtsi
deleted file mode 100644
index d2d0ff645632..000000000000
--- a/arch/riscv/boot/dts/kendryte/k210.dtsi
+++ /dev/null
@@ -1,125 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Copyright (C) 2019 Sean Anderson <seanga2@gmail.com>
- * Copyright (C) 2020 Western Digital Corporation or its affiliates.
- */
-#include <dt-bindings/clock/k210-clk.h>
-
-/ {
- /*
- * Although the K210 is a 64-bit CPU, the address bus is only 32-bits
- * wide, and the upper half of all addresses is ignored.
- */
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "kendryte,k210";
-
- aliases {
- serial0 = &uarths0;
- };
-
- /*
- * The K210 has an sv39 MMU following the priviledge specification v1.9.
- * Since this is a non-ratified draft specification, the kernel does not
- * support it and the K210 support enabled only for the !MMU case.
- * Be consistent with this by setting the CPUs MMU type to "none".
- */
- cpus {
- #address-cells = <1>;
- #size-cells = <0>;
- timebase-frequency = <7800000>;
- cpu0: cpu@0 {
- device_type = "cpu";
- reg = <0>;
- compatible = "kendryte,k210", "sifive,rocket0", "riscv";
- riscv,isa = "rv64imafdc";
- mmu-type = "none";
- i-cache-size = <0x8000>;
- i-cache-block-size = <64>;
- d-cache-size = <0x8000>;
- d-cache-block-size = <64>;
- clocks = <&sysctl K210_CLK_CPU>;
- clock-frequency = <390000000>;
- cpu0_intc: interrupt-controller {
- #interrupt-cells = <1>;
- interrupt-controller;
- compatible = "riscv,cpu-intc";
- };
- };
- cpu1: cpu@1 {
- device_type = "cpu";
- reg = <1>;
- compatible = "kendryte,k210", "sifive,rocket0", "riscv";
- riscv,isa = "rv64imafdc";
- mmu-type = "none";
- i-cache-size = <0x8000>;
- i-cache-block-size = <64>;
- d-cache-size = <0x8000>;
- d-cache-block-size = <64>;
- clocks = <&sysctl K210_CLK_CPU>;
- clock-frequency = <390000000>;
- cpu1_intc: interrupt-controller {
- #interrupt-cells = <1>;
- interrupt-controller;
- compatible = "riscv,cpu-intc";
- };
- };
- };
-
- sram: memory@80000000 {
- device_type = "memory";
- reg = <0x80000000 0x400000>,
- <0x80400000 0x200000>,
- <0x80600000 0x200000>;
- reg-names = "sram0", "sram1", "aisram";
- };
-
- clocks {
- in0: oscillator {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <26000000>;
- };
- };
-
- soc {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "kendryte,k210-soc", "simple-bus";
- ranges;
- interrupt-parent = <&plic0>;
-
- sysctl: sysctl@50440000 {
- compatible = "kendryte,k210-sysctl", "simple-mfd";
- reg = <0x50440000 0x1000>;
- #clock-cells = <1>;
- };
-
- clint0: clint@2000000 {
- #interrupt-cells = <1>;
- compatible = "riscv,clint0";
- reg = <0x2000000 0xC000>;
- interrupts-extended = <&cpu0_intc 3 &cpu0_intc 7
- &cpu1_intc 3 &cpu1_intc 7>;
- clocks = <&sysctl K210_CLK_ACLK>;
- };
-
- plic0: interrupt-controller@c000000 {
- #interrupt-cells = <1>;
- interrupt-controller;
- compatible = "kendryte,k210-plic0", "riscv,plic0";
- reg = <0xC000000 0x4000000>;
- interrupts-extended = <&cpu0_intc 11>, <&cpu0_intc 0xffffffff>,
- <&cpu1_intc 11>, <&cpu1_intc 0xffffffff>;
- riscv,ndev = <65>;
- riscv,max-priority = <7>;
- };
-
- uarths0: serial@38000000 {
- compatible = "kendryte,k210-uarths", "sifive,uart0";
- reg = <0x38000000 0x1000>;
- interrupts = <33>;
- clocks = <&sysctl K210_CLK_CPU>;
- };
- };
-};
diff --git a/arch/riscv/boot/dts/sifive/Makefile b/arch/riscv/boot/dts/sifive/Makefile
index 6d6189e6e4af..74c47fe9fc22 100644
--- a/arch/riscv/boot/dts/sifive/Makefile
+++ b/arch/riscv/boot/dts/sifive/Makefile
@@ -1,2 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
-dtb-$(CONFIG_SOC_SIFIVE) += hifive-unleashed-a00.dtb
+dtb-$(CONFIG_SOC_SIFIVE) += hifive-unleashed-a00.dtb \
+ hifive-unmatched-a00.dtb
diff --git a/arch/riscv/boot/dts/sifive/fu740-c000.dtsi b/arch/riscv/boot/dts/sifive/fu740-c000.dtsi
new file mode 100644
index 000000000000..eeb4f8c3e0e7
--- /dev/null
+++ b/arch/riscv/boot/dts/sifive/fu740-c000.dtsi
@@ -0,0 +1,293 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Copyright (c) 2020 SiFive, Inc */
+
+/dts-v1/;
+
+#include <dt-bindings/clock/sifive-fu740-prci.h>
+
+/ {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ compatible = "sifive,fu740-c000", "sifive,fu740";
+
+ aliases {
+ serial0 = &uart0;
+ serial1 = &uart1;
+ ethernet0 = &eth0;
+ };
+
+ chosen {
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cpu0: cpu@0 {
+ compatible = "sifive,bullet0", "riscv";
+ device_type = "cpu";
+ i-cache-block-size = <64>;
+ i-cache-sets = <128>;
+ i-cache-size = <16384>;
+ next-level-cache = <&ccache>;
+ reg = <0x0>;
+ riscv,isa = "rv64imac";
+ status = "disabled";
+ cpu0_intc: interrupt-controller {
+ #interrupt-cells = <1>;
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ };
+ };
+ cpu1: cpu@1 {
+ compatible = "sifive,bullet0", "riscv";
+ d-cache-block-size = <64>;
+ d-cache-sets = <64>;
+ d-cache-size = <32768>;
+ d-tlb-sets = <1>;
+ d-tlb-size = <40>;
+ device_type = "cpu";
+ i-cache-block-size = <64>;
+ i-cache-sets = <128>;
+ i-cache-size = <32768>;
+ i-tlb-sets = <1>;
+ i-tlb-size = <40>;
+ mmu-type = "riscv,sv39";
+ next-level-cache = <&ccache>;
+ reg = <0x1>;
+ riscv,isa = "rv64imafdc";
+ tlb-split;
+ cpu1_intc: interrupt-controller {
+ #interrupt-cells = <1>;
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ };
+ };
+ cpu2: cpu@2 {
+ compatible = "sifive,bullet0", "riscv";
+ d-cache-block-size = <64>;
+ d-cache-sets = <64>;
+ d-cache-size = <32768>;
+ d-tlb-sets = <1>;
+ d-tlb-size = <40>;
+ device_type = "cpu";
+ i-cache-block-size = <64>;
+ i-cache-sets = <128>;
+ i-cache-size = <32768>;
+ i-tlb-sets = <1>;
+ i-tlb-size = <40>;
+ mmu-type = "riscv,sv39";
+ next-level-cache = <&ccache>;
+ reg = <0x2>;
+ riscv,isa = "rv64imafdc";
+ tlb-split;
+ cpu2_intc: interrupt-controller {
+ #interrupt-cells = <1>;
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ };
+ };
+ cpu3: cpu@3 {
+ compatible = "sifive,bullet0", "riscv";
+ d-cache-block-size = <64>;
+ d-cache-sets = <64>;
+ d-cache-size = <32768>;
+ d-tlb-sets = <1>;
+ d-tlb-size = <40>;
+ device_type = "cpu";
+ i-cache-block-size = <64>;
+ i-cache-sets = <128>;
+ i-cache-size = <32768>;
+ i-tlb-sets = <1>;
+ i-tlb-size = <40>;
+ mmu-type = "riscv,sv39";
+ next-level-cache = <&ccache>;
+ reg = <0x3>;
+ riscv,isa = "rv64imafdc";
+ tlb-split;
+ cpu3_intc: interrupt-controller {
+ #interrupt-cells = <1>;
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ };
+ };
+ cpu4: cpu@4 {
+ compatible = "sifive,bullet0", "riscv";
+ d-cache-block-size = <64>;
+ d-cache-sets = <64>;
+ d-cache-size = <32768>;
+ d-tlb-sets = <1>;
+ d-tlb-size = <40>;
+ device_type = "cpu";
+ i-cache-block-size = <64>;
+ i-cache-sets = <128>;
+ i-cache-size = <32768>;
+ i-tlb-sets = <1>;
+ i-tlb-size = <40>;
+ mmu-type = "riscv,sv39";
+ next-level-cache = <&ccache>;
+ reg = <0x4>;
+ riscv,isa = "rv64imafdc";
+ tlb-split;
+ cpu4_intc: interrupt-controller {
+ #interrupt-cells = <1>;
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ };
+ };
+ };
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ compatible = "simple-bus";
+ ranges;
+ plic0: interrupt-controller@c000000 {
+ #interrupt-cells = <1>;
+ #address-cells = <0>;
+ compatible = "sifive,fu540-c000-plic", "sifive,plic-1.0.0";
+ reg = <0x0 0xc000000 0x0 0x4000000>;
+ riscv,ndev = <69>;
+ interrupt-controller;
+ interrupts-extended = <
+ &cpu0_intc 0xffffffff
+ &cpu1_intc 0xffffffff &cpu1_intc 9
+ &cpu2_intc 0xffffffff &cpu2_intc 9
+ &cpu3_intc 0xffffffff &cpu3_intc 9
+ &cpu4_intc 0xffffffff &cpu4_intc 9>;
+ };
+ prci: clock-controller@10000000 {
+ compatible = "sifive,fu740-c000-prci";
+ reg = <0x0 0x10000000 0x0 0x1000>;
+ clocks = <&hfclk>, <&rtcclk>;
+ #clock-cells = <1>;
+ };
+ uart0: serial@10010000 {
+ compatible = "sifive,fu740-c000-uart", "sifive,uart0";
+ reg = <0x0 0x10010000 0x0 0x1000>;
+ interrupt-parent = <&plic0>;
+ interrupts = <39>;
+ clocks = <&prci PRCI_CLK_PCLK>;
+ status = "disabled";
+ };
+ uart1: serial@10011000 {
+ compatible = "sifive,fu740-c000-uart", "sifive,uart0";
+ reg = <0x0 0x10011000 0x0 0x1000>;
+ interrupt-parent = <&plic0>;
+ interrupts = <40>;
+ clocks = <&prci PRCI_CLK_PCLK>;
+ status = "disabled";
+ };
+ i2c0: i2c@10030000 {
+ compatible = "sifive,fu740-c000-i2c", "sifive,i2c0";
+ reg = <0x0 0x10030000 0x0 0x1000>;
+ interrupt-parent = <&plic0>;
+ interrupts = <52>;
+ clocks = <&prci PRCI_CLK_PCLK>;
+ reg-shift = <2>;
+ reg-io-width = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+ i2c1: i2c@10031000 {
+ compatible = "sifive,fu740-c000-i2c", "sifive,i2c0";
+ reg = <0x0 0x10031000 0x0 0x1000>;
+ interrupt-parent = <&plic0>;
+ interrupts = <53>;
+ clocks = <&prci PRCI_CLK_PCLK>;
+ reg-shift = <2>;
+ reg-io-width = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+ qspi0: spi@10040000 {
+ compatible = "sifive,fu740-c000-spi", "sifive,spi0";
+ reg = <0x0 0x10040000 0x0 0x1000>,
+ <0x0 0x20000000 0x0 0x10000000>;
+ interrupt-parent = <&plic0>;
+ interrupts = <41>;
+ clocks = <&prci PRCI_CLK_PCLK>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+ qspi1: spi@10041000 {
+ compatible = "sifive,fu740-c000-spi", "sifive,spi0";
+ reg = <0x0 0x10041000 0x0 0x1000>,
+ <0x0 0x30000000 0x0 0x10000000>;
+ interrupt-parent = <&plic0>;
+ interrupts = <42>;
+ clocks = <&prci PRCI_CLK_PCLK>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+ spi0: spi@10050000 {
+ compatible = "sifive,fu740-c000-spi", "sifive,spi0";
+ reg = <0x0 0x10050000 0x0 0x1000>;
+ interrupt-parent = <&plic0>;
+ interrupts = <43>;
+ clocks = <&prci PRCI_CLK_PCLK>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+ eth0: ethernet@10090000 {
+ compatible = "sifive,fu540-c000-gem";
+ interrupt-parent = <&plic0>;
+ interrupts = <55>;
+ reg = <0x0 0x10090000 0x0 0x2000>,
+ <0x0 0x100a0000 0x0 0x1000>;
+ local-mac-address = [00 00 00 00 00 00];
+ clock-names = "pclk", "hclk";
+ clocks = <&prci PRCI_CLK_GEMGXLPLL>,
+ <&prci PRCI_CLK_GEMGXLPLL>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+ pwm0: pwm@10020000 {
+ compatible = "sifive,fu740-c000-pwm", "sifive,pwm0";
+ reg = <0x0 0x10020000 0x0 0x1000>;
+ interrupt-parent = <&plic0>;
+ interrupts = <44>, <45>, <46>, <47>;
+ clocks = <&prci PRCI_CLK_PCLK>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+ pwm1: pwm@10021000 {
+ compatible = "sifive,fu740-c000-pwm", "sifive,pwm0";
+ reg = <0x0 0x10021000 0x0 0x1000>;
+ interrupt-parent = <&plic0>;
+ interrupts = <48>, <49>, <50>, <51>;
+ clocks = <&prci PRCI_CLK_PCLK>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+ ccache: cache-controller@2010000 {
+ compatible = "sifive,fu740-c000-ccache", "cache";
+ cache-block-size = <64>;
+ cache-level = <2>;
+ cache-sets = <2048>;
+ cache-size = <2097152>;
+ cache-unified;
+ interrupt-parent = <&plic0>;
+ interrupts = <19 20 21 22>;
+ reg = <0x0 0x2010000 0x0 0x1000>;
+ };
+ gpio: gpio@10060000 {
+ compatible = "sifive,fu740-c000-gpio", "sifive,gpio0";
+ interrupt-parent = <&plic0>;
+ interrupts = <23>, <24>, <25>, <26>, <27>, <28>, <29>,
+ <30>, <31>, <32>, <33>, <34>, <35>, <36>,
+ <37>, <38>;
+ reg = <0x0 0x10060000 0x0 0x1000>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&prci PRCI_CLK_PCLK>;
+ status = "disabled";
+ };
+ };
+};
diff --git a/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts b/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts
index 24d75a146e02..60846e88ae4b 100644
--- a/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts
+++ b/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts
@@ -90,7 +90,6 @@
phy0: ethernet-phy@0 {
compatible = "ethernet-phy-id0007.0771";
reg = <0>;
- reset-gpios = <&gpio 12 GPIO_ACTIVE_LOW>;
};
};
diff --git a/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts b/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts
new file mode 100644
index 000000000000..b1c3c596578f
--- /dev/null
+++ b/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts
@@ -0,0 +1,253 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Copyright (c) 2020 SiFive, Inc */
+
+#include "fu740-c000.dtsi"
+#include <dt-bindings/interrupt-controller/irq.h>
+
+/* Clock frequency (in Hz) of the PCB crystal for rtcclk */
+#define RTCCLK_FREQ 1000000
+
+/ {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ model = "SiFive HiFive Unmatched A00";
+ compatible = "sifive,hifive-unmatched-a00", "sifive,fu740-c000",
+ "sifive,fu740";
+
+ chosen {
+ stdout-path = "serial0";
+ };
+
+ cpus {
+ timebase-frequency = <RTCCLK_FREQ>;
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0x0 0x80000000 0x2 0x00000000>;
+ };
+
+ soc {
+ };
+
+ hfclk: hfclk {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <26000000>;
+ clock-output-names = "hfclk";
+ };
+
+ rtcclk: rtcclk {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <RTCCLK_FREQ>;
+ clock-output-names = "rtcclk";
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
+
+&uart1 {
+ status = "okay";
+};
+
+&i2c0 {
+ status = "okay";
+
+ temperature-sensor@4c {
+ compatible = "ti,tmp451";
+ reg = <0x4c>;
+ interrupt-parent = <&gpio>;
+ interrupts = <6 IRQ_TYPE_LEVEL_LOW>;
+ };
+
+ pmic@58 {
+ compatible = "dlg,da9063";
+ reg = <0x58>;
+ interrupt-parent = <&gpio>;
+ interrupts = <1 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-controller;
+
+ regulators {
+ vdd_bcore1: bcore1 {
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <900000>;
+ regulator-min-microamp = <5000000>;
+ regulator-max-microamp = <5000000>;
+ regulator-always-on;
+ };
+
+ vdd_bcore2: bcore2 {
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <900000>;
+ regulator-min-microamp = <5000000>;
+ regulator-max-microamp = <5000000>;
+ regulator-always-on;
+ };
+
+ vdd_bpro: bpro {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-min-microamp = <2500000>;
+ regulator-max-microamp = <2500000>;
+ regulator-always-on;
+ };
+
+ vdd_bperi: bperi {
+ regulator-min-microvolt = <1050000>;
+ regulator-max-microvolt = <1050000>;
+ regulator-min-microamp = <1500000>;
+ regulator-max-microamp = <1500000>;
+ regulator-always-on;
+ };
+
+ vdd_bmem: bmem {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-min-microamp = <3000000>;
+ regulator-max-microamp = <3000000>;
+ regulator-always-on;
+ };
+
+ vdd_bio: bio {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-min-microamp = <3000000>;
+ regulator-max-microamp = <3000000>;
+ regulator-always-on;
+ };
+
+ vdd_ldo1: ldo1 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-min-microamp = <100000>;
+ regulator-max-microamp = <100000>;
+ regulator-always-on;
+ };
+
+ vdd_ldo2: ldo2 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-min-microamp = <200000>;
+ regulator-max-microamp = <200000>;
+ regulator-always-on;
+ };
+
+ vdd_ldo3: ldo3 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-min-microamp = <200000>;
+ regulator-max-microamp = <200000>;
+ regulator-always-on;
+ };
+
+ vdd_ldo4: ldo4 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-min-microamp = <200000>;
+ regulator-max-microamp = <200000>;
+ regulator-always-on;
+ };
+
+ vdd_ldo5: ldo5 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-min-microamp = <100000>;
+ regulator-max-microamp = <100000>;
+ regulator-always-on;
+ };
+
+ vdd_ldo6: ldo6 {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-min-microamp = <200000>;
+ regulator-max-microamp = <200000>;
+ regulator-always-on;
+ };
+
+ vdd_ldo7: ldo7 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-min-microamp = <200000>;
+ regulator-max-microamp = <200000>;
+ regulator-always-on;
+ };
+
+ vdd_ldo8: ldo8 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-min-microamp = <200000>;
+ regulator-max-microamp = <200000>;
+ regulator-always-on;
+ };
+
+ vdd_ld09: ldo9 {
+ regulator-min-microvolt = <1050000>;
+ regulator-max-microvolt = <1050000>;
+ regulator-min-microamp = <200000>;
+ regulator-max-microamp = <200000>;
+ };
+
+ vdd_ldo10: ldo10 {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ regulator-min-microamp = <300000>;
+ regulator-max-microamp = <300000>;
+ };
+
+ vdd_ldo11: ldo11 {
+ regulator-min-microvolt = <2500000>;
+ regulator-max-microvolt = <2500000>;
+ regulator-min-microamp = <300000>;
+ regulator-max-microamp = <300000>;
+ regulator-always-on;
+ };
+ };
+ };
+};
+
+&qspi0 {
+ status = "okay";
+ flash@0 {
+ compatible = "issi,is25wp256", "jedec,spi-nor";
+ reg = <0>;
+ spi-max-frequency = <50000000>;
+ m25p,fast-read;
+ spi-tx-bus-width = <4>;
+ spi-rx-bus-width = <4>;
+ };
+};
+
+&spi0 {
+ status = "okay";
+ mmc@0 {
+ compatible = "mmc-spi-slot";
+ reg = <0>;
+ spi-max-frequency = <20000000>;
+ voltage-ranges = <3300 3300>;
+ disable-wp;
+ };
+};
+
+&eth0 {
+ status = "okay";
+ phy-mode = "gmii";
+ phy-handle = <&phy0>;
+ phy0: ethernet-phy@0 {
+ reg = <0>;
+ };
+};
+
+&pwm0 {
+ status = "okay";
+};
+
+&pwm1 {
+ status = "okay";
+};
+
+&gpio {
+ status = "okay";
+};
diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig
index 8c3d1e451703..6c0625aa96c7 100644
--- a/arch/riscv/configs/defconfig
+++ b/arch/riscv/configs/defconfig
@@ -17,6 +17,7 @@ CONFIG_BPF_SYSCALL=y
CONFIG_SOC_SIFIVE=y
CONFIG_SOC_VIRT=y
CONFIG_SMP=y
+CONFIG_HOTPLUG_CPU=y
CONFIG_JUMP_LABEL=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
diff --git a/arch/riscv/configs/nommu_k210_defconfig b/arch/riscv/configs/nommu_k210_defconfig
index cd1df62b13c7..b16a2a12c82a 100644
--- a/arch/riscv/configs/nommu_k210_defconfig
+++ b/arch/riscv/configs/nommu_k210_defconfig
@@ -1,17 +1,19 @@
# CONFIG_CPU_ISOLATION is not set
-CONFIG_LOG_BUF_SHIFT=15
+CONFIG_LOG_BUF_SHIFT=13
CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=12
CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_FORCE=y
+# CONFIG_RD_GZIP is not set
# CONFIG_RD_BZIP2 is not set
# CONFIG_RD_LZMA is not set
# CONFIG_RD_XZ is not set
# CONFIG_RD_LZO is not set
# CONFIG_RD_LZ4 is not set
+# CONFIG_RD_ZSTD is not set
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
# CONFIG_SYSFS_SYSCALL is not set
# CONFIG_FHANDLE is not set
# CONFIG_BASE_FULL is not set
+# CONFIG_FUTEX is not set
# CONFIG_EPOLL is not set
# CONFIG_SIGNALFD is not set
# CONFIG_TIMERFD is not set
@@ -25,15 +27,17 @@ CONFIG_EMBEDDED=y
# CONFIG_VM_EVENT_COUNTERS is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_SLOB=y
-# CONFIG_SLAB_MERGE_DEFAULT is not set
# CONFIG_MMU is not set
-CONFIG_SOC_KENDRYTE=y
+CONFIG_SOC_CANAAN=y
+CONFIG_SOC_CANAAN_K210_DTB_SOURCE="k210_generic"
CONFIG_MAXPHYSMEM_2GB=y
CONFIG_SMP=y
CONFIG_NR_CPUS=2
CONFIG_CMDLINE="earlycon console=ttySIF0"
CONFIG_CMDLINE_FORCE=y
-CONFIG_JUMP_LABEL=y
+# CONFIG_SECCOMP is not set
+# CONFIG_STACKPROTECTOR is not set
+# CONFIG_GCC_PLUGINS is not set
# CONFIG_BLOCK is not set
CONFIG_BINFMT_FLAT=y
# CONFIG_COREDUMP is not set
@@ -41,23 +45,47 @@ CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
# CONFIG_FW_LOADER is not set
# CONFIG_ALLOW_DEV_COREDUMP is not set
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+# CONFIG_UNIX98_PTYS is not set
# CONFIG_LEGACY_PTYS is not set
# CONFIG_LDISC_AUTOLOAD is not set
# CONFIG_HW_RANDOM is not set
# CONFIG_DEVMEM is not set
+CONFIG_I2C=y
+# CONFIG_I2C_COMPAT is not set
+CONFIG_I2C_CHARDEV=y
+# CONFIG_I2C_HELPER_AUTO is not set
+CONFIG_I2C_DESIGNWARE_PLATFORM=y
+CONFIG_SPI=y
+# CONFIG_SPI_MEM is not set
+CONFIG_SPI_DESIGNWARE=y
+CONFIG_SPI_DW_MMIO=y
+# CONFIG_GPIO_CDEV_V1 is not set
+CONFIG_GPIO_DWAPB=y
+CONFIG_GPIO_SIFIVE=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_SYSCON=y
# CONFIG_HWMON is not set
-# CONFIG_VGA_CONSOLE is not set
-# CONFIG_HID is not set
# CONFIG_USB_SUPPORT is not set
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_USER=y
# CONFIG_VIRTIO_MENU is not set
+# CONFIG_VHOST_MENU is not set
+# CONFIG_SURFACE_PLATFORMS is not set
+# CONFIG_FILE_LOCKING is not set
# CONFIG_DNOTIFY is not set
# CONFIG_INOTIFY_USER is not set
# CONFIG_MISC_FILESYSTEMS is not set
CONFIG_LSM="[]"
CONFIG_PRINTK_TIME=y
+# CONFIG_SYMBOLIC_ERRNAME is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
+# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
+# CONFIG_FRAME_POINTER is not set
# CONFIG_DEBUG_MISC is not set
CONFIG_PANIC_ON_OOPS=y
# CONFIG_SCHED_DEBUG is not set
diff --git a/arch/riscv/configs/nommu_k210_sdcard_defconfig b/arch/riscv/configs/nommu_k210_sdcard_defconfig
new file mode 100644
index 000000000000..61f887f65419
--- /dev/null
+++ b/arch/riscv/configs/nommu_k210_sdcard_defconfig
@@ -0,0 +1,92 @@
+# CONFIG_CPU_ISOLATION is not set
+CONFIG_LOG_BUF_SHIFT=13
+CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=12
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+# CONFIG_SYSFS_SYSCALL is not set
+# CONFIG_FHANDLE is not set
+# CONFIG_BASE_FULL is not set
+# CONFIG_FUTEX is not set
+# CONFIG_EPOLL is not set
+# CONFIG_SIGNALFD is not set
+# CONFIG_TIMERFD is not set
+# CONFIG_EVENTFD is not set
+# CONFIG_AIO is not set
+# CONFIG_IO_URING is not set
+# CONFIG_ADVISE_SYSCALLS is not set
+# CONFIG_MEMBARRIER is not set
+# CONFIG_KALLSYMS is not set
+CONFIG_EMBEDDED=y
+# CONFIG_VM_EVENT_COUNTERS is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLOB=y
+# CONFIG_MMU is not set
+CONFIG_SOC_CANAAN=y
+CONFIG_SOC_CANAAN_K210_DTB_SOURCE="k210_generic"
+CONFIG_MAXPHYSMEM_2GB=y
+CONFIG_SMP=y
+CONFIG_NR_CPUS=2
+CONFIG_CMDLINE="earlycon console=ttySIF0 rootdelay=2 root=/dev/mmcblk0p1 ro"
+CONFIG_CMDLINE_FORCE=y
+# CONFIG_SECCOMP is not set
+# CONFIG_STACKPROTECTOR is not set
+# CONFIG_GCC_PLUGINS is not set
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_MQ_IOSCHED_DEADLINE is not set
+# CONFIG_MQ_IOSCHED_KYBER is not set
+CONFIG_BINFMT_FLAT=y
+# CONFIG_COREDUMP is not set
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_FW_LOADER is not set
+# CONFIG_ALLOW_DEV_COREDUMP is not set
+# CONFIG_BLK_DEV is not set
+# CONFIG_INPUT is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_LDISC_AUTOLOAD is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_DEVMEM is not set
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+# CONFIG_I2C_HELPER_AUTO is not set
+CONFIG_I2C_DESIGNWARE_PLATFORM=y
+CONFIG_SPI=y
+# CONFIG_SPI_MEM is not set
+CONFIG_SPI_DESIGNWARE=y
+CONFIG_SPI_DW_MMIO=y
+# CONFIG_GPIO_CDEV_V1 is not set
+CONFIG_GPIO_DWAPB=y
+CONFIG_GPIO_SIFIVE=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_SYSCON=y
+# CONFIG_HWMON is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_MMC=y
+# CONFIG_PWRSEQ_EMMC is not set
+# CONFIG_PWRSEQ_SIMPLE is not set
+CONFIG_MMC_SPI=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_USER=y
+# CONFIG_VIRTIO_MENU is not set
+# CONFIG_VHOST_MENU is not set
+# CONFIG_SURFACE_PLATFORMS is not set
+CONFIG_EXT2_FS=y
+# CONFIG_FILE_LOCKING is not set
+# CONFIG_DNOTIFY is not set
+# CONFIG_INOTIFY_USER is not set
+# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_LSM="[]"
+CONFIG_PRINTK_TIME=y
+# CONFIG_SYMBOLIC_ERRNAME is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
+# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
+# CONFIG_FRAME_POINTER is not set
+# CONFIG_DEBUG_MISC is not set
+CONFIG_PANIC_ON_OOPS=y
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_RCU_TRACE is not set
+# CONFIG_FTRACE is not set
+# CONFIG_RUNTIME_TESTING_MENU is not set
diff --git a/arch/riscv/configs/rv32_defconfig b/arch/riscv/configs/rv32_defconfig
index 2c2cda6cc1c5..8dd02b842fef 100644
--- a/arch/riscv/configs/rv32_defconfig
+++ b/arch/riscv/configs/rv32_defconfig
@@ -18,6 +18,7 @@ CONFIG_SOC_SIFIVE=y
CONFIG_SOC_VIRT=y
CONFIG_ARCH_RV32I=y
CONFIG_SMP=y
+CONFIG_HOTPLUG_CPU=y
CONFIG_JUMP_LABEL=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
diff --git a/arch/riscv/include/asm/bug.h b/arch/riscv/include/asm/bug.h
index d6f1ec08d97b..d3804a2f9aad 100644
--- a/arch/riscv/include/asm/bug.h
+++ b/arch/riscv/include/asm/bug.h
@@ -85,6 +85,7 @@ do { \
struct pt_regs;
struct task_struct;
+void __show_regs(struct pt_regs *regs);
void die(struct pt_regs *regs, const char *str);
void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr);
diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h
index cec462e198ce..caadfc1d7487 100644
--- a/arch/riscv/include/asm/csr.h
+++ b/arch/riscv/include/asm/csr.h
@@ -41,10 +41,16 @@
#define SATP_PPN _AC(0x003FFFFF, UL)
#define SATP_MODE_32 _AC(0x80000000, UL)
#define SATP_MODE SATP_MODE_32
+#define SATP_ASID_BITS 9
+#define SATP_ASID_SHIFT 22
+#define SATP_ASID_MASK _AC(0x1FF, UL)
#else
#define SATP_PPN _AC(0x00000FFFFFFFFFFF, UL)
#define SATP_MODE_39 _AC(0x8000000000000000, UL)
#define SATP_MODE SATP_MODE_39
+#define SATP_ASID_BITS 16
+#define SATP_ASID_SHIFT 44
+#define SATP_ASID_MASK _AC(0xFFFF, UL)
#endif
/* Exception cause high bit - is an interrupt if set */
diff --git a/arch/riscv/include/asm/kasan.h b/arch/riscv/include/asm/kasan.h
index b04028c6218c..a2b3d9cdbc86 100644
--- a/arch/riscv/include/asm/kasan.h
+++ b/arch/riscv/include/asm/kasan.h
@@ -8,12 +8,28 @@
#ifdef CONFIG_KASAN
+/*
+ * The following comment was copied from arm64:
+ * KASAN_SHADOW_START: beginning of the kernel virtual addresses.
+ * KASAN_SHADOW_END: KASAN_SHADOW_START + 1/N of kernel virtual addresses,
+ * where N = (1 << KASAN_SHADOW_SCALE_SHIFT).
+ *
+ * KASAN_SHADOW_OFFSET:
+ * This value is used to map an address to the corresponding shadow
+ * address by the following formula:
+ * shadow_addr = (address >> KASAN_SHADOW_SCALE_SHIFT) + KASAN_SHADOW_OFFSET
+ *
+ * (1 << (64 - KASAN_SHADOW_SCALE_SHIFT)) shadow addresses that lie in range
+ * [KASAN_SHADOW_OFFSET, KASAN_SHADOW_END) cover all 64-bits of virtual
+ * addresses. So KASAN_SHADOW_OFFSET should satisfy the following equation:
+ * KASAN_SHADOW_OFFSET = KASAN_SHADOW_END -
+ * (1ULL << (64 - KASAN_SHADOW_SCALE_SHIFT))
+ */
#define KASAN_SHADOW_SCALE_SHIFT 3
-#define KASAN_SHADOW_SIZE (UL(1) << (38 - KASAN_SHADOW_SCALE_SHIFT))
-#define KASAN_SHADOW_START KERN_VIRT_START /* 2^64 - 2^38 */
+#define KASAN_SHADOW_SIZE (UL(1) << ((CONFIG_VA_BITS - 1) - KASAN_SHADOW_SCALE_SHIFT))
+#define KASAN_SHADOW_START KERN_VIRT_START
#define KASAN_SHADOW_END (KASAN_SHADOW_START + KASAN_SHADOW_SIZE)
-
#define KASAN_SHADOW_OFFSET (KASAN_SHADOW_END - (1ULL << \
(64 - KASAN_SHADOW_SCALE_SHIFT)))
diff --git a/arch/riscv/include/asm/kprobes.h b/arch/riscv/include/asm/kprobes.h
index 56a98ea30731..4647d38018f6 100644
--- a/arch/riscv/include/asm/kprobes.h
+++ b/arch/riscv/include/asm/kprobes.h
@@ -11,4 +11,44 @@
#include <asm-generic/kprobes.h>
+#ifdef CONFIG_KPROBES
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/percpu.h>
+
+#define __ARCH_WANT_KPROBES_INSN_SLOT
+#define MAX_INSN_SIZE 2
+
+#define flush_insn_slot(p) do { } while (0)
+#define kretprobe_blacklist_size 0
+
+#include <asm/probes.h>
+
+struct prev_kprobe {
+ struct kprobe *kp;
+ unsigned int status;
+};
+
+/* Single step context for kprobe */
+struct kprobe_step_ctx {
+ unsigned long ss_pending;
+ unsigned long match_addr;
+};
+
+/* per-cpu kprobe control block */
+struct kprobe_ctlblk {
+ unsigned int kprobe_status;
+ unsigned long saved_status;
+ struct prev_kprobe prev_kprobe;
+ struct kprobe_step_ctx ss_ctx;
+};
+
+void arch_remove_kprobe(struct kprobe *p);
+int kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr);
+bool kprobe_breakpoint_handler(struct pt_regs *regs);
+bool kprobe_single_step_handler(struct pt_regs *regs);
+void kretprobe_trampoline(void);
+void __kprobes *trampoline_probe_handler(struct pt_regs *regs);
+
+#endif /* CONFIG_KPROBES */
#endif /* _ASM_RISCV_KPROBES_H */
diff --git a/arch/riscv/include/asm/mmu.h b/arch/riscv/include/asm/mmu.h
index dabcf2cfb3dc..0099dc116168 100644
--- a/arch/riscv/include/asm/mmu.h
+++ b/arch/riscv/include/asm/mmu.h
@@ -12,6 +12,8 @@
typedef struct {
#ifndef CONFIG_MMU
unsigned long end_brk;
+#else
+ atomic_long_t id;
#endif
void *vdso;
#ifdef CONFIG_SMP
diff --git a/arch/riscv/include/asm/mmu_context.h b/arch/riscv/include/asm/mmu_context.h
index 250defa06f3a..b0659413a080 100644
--- a/arch/riscv/include/asm/mmu_context.h
+++ b/arch/riscv/include/asm/mmu_context.h
@@ -23,6 +23,16 @@ static inline void activate_mm(struct mm_struct *prev,
switch_mm(prev, next, NULL);
}
+#define init_new_context init_new_context
+static inline int init_new_context(struct task_struct *tsk,
+ struct mm_struct *mm)
+{
+#ifdef CONFIG_MMU
+ atomic_long_set(&mm->context.id, 0);
+#endif
+ return 0;
+}
+
#include <asm-generic/mmu_context.h>
#endif /* _ASM_RISCV_MMU_CONTEXT_H */
diff --git a/arch/riscv/include/asm/mmzone.h b/arch/riscv/include/asm/mmzone.h
new file mode 100644
index 000000000000..fa17e01d9ab2
--- /dev/null
+++ b/arch/riscv/include/asm/mmzone.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_MMZONE_H
+#define __ASM_MMZONE_H
+
+#ifdef CONFIG_NUMA
+
+#include <asm/numa.h>
+
+extern struct pglist_data *node_data[];
+#define NODE_DATA(nid) (node_data[(nid)])
+
+#endif /* CONFIG_NUMA */
+#endif /* __ASM_MMZONE_H */
diff --git a/arch/riscv/include/asm/numa.h b/arch/riscv/include/asm/numa.h
new file mode 100644
index 000000000000..8c8cf4297cc3
--- /dev/null
+++ b/arch/riscv/include/asm/numa.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_NUMA_H
+#define __ASM_NUMA_H
+
+#include <asm/topology.h>
+#include <asm-generic/numa.h>
+
+#endif /* __ASM_NUMA_H */
diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
index 2d50f76efe48..adc9d26f3d75 100644
--- a/arch/riscv/include/asm/page.h
+++ b/arch/riscv/include/asm/page.h
@@ -97,9 +97,6 @@ extern unsigned long pfn_base;
#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
#endif /* CONFIG_MMU */
-extern unsigned long max_low_pfn;
-extern unsigned long min_low_pfn;
-
#define __pa_to_va_nodebug(x) ((void *)((unsigned long) (x) + va_pa_offset))
#define __va_to_pa_nodebug(x) ((unsigned long)(x) - va_pa_offset)
@@ -135,7 +132,10 @@ extern phys_addr_t __phys_addr_symbol(unsigned long x);
#endif /* __ASSEMBLY__ */
-#define virt_addr_valid(vaddr) (pfn_valid(virt_to_pfn(vaddr)))
+#define virt_addr_valid(vaddr) ({ \
+ unsigned long _addr = (unsigned long)vaddr; \
+ (unsigned long)(_addr) >= PAGE_OFFSET && pfn_valid(virt_to_pfn(_addr)); \
+})
#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_NON_EXEC
diff --git a/arch/riscv/include/asm/pci.h b/arch/riscv/include/asm/pci.h
index 1c473a1bd986..658e112c3ce7 100644
--- a/arch/riscv/include/asm/pci.h
+++ b/arch/riscv/include/asm/pci.h
@@ -32,6 +32,20 @@ static inline int pci_proc_domain(struct pci_bus *bus)
/* always show the domain in /proc */
return 1;
}
+
+#ifdef CONFIG_NUMA
+
+static inline int pcibus_to_node(struct pci_bus *bus)
+{
+ return dev_to_node(&bus->dev);
+}
+#ifndef cpumask_of_pcibus
+#define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \
+ cpu_all_mask : \
+ cpumask_of_node(pcibus_to_node(bus)))
+#endif
+#endif /* CONFIG_NUMA */
+
#endif /* CONFIG_PCI */
#endif /* _ASM_RISCV_PCI_H */
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 251e1db088fa..ebf817c1bdf4 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -186,6 +186,11 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
return (unsigned long)pfn_to_virt(pmd_val(pmd) >> _PAGE_PFN_SHIFT);
}
+static inline pte_t pmd_pte(pmd_t pmd)
+{
+ return __pte(pmd_val(pmd));
+}
+
/* Yields the page frame number (PFN) of a page table entry */
static inline unsigned long pte_pfn(pte_t pte)
{
@@ -289,6 +294,21 @@ static inline pte_t pte_mkhuge(pte_t pte)
return pte;
}
+#ifdef CONFIG_NUMA_BALANCING
+/*
+ * See the comment in include/asm-generic/pgtable.h
+ */
+static inline int pte_protnone(pte_t pte)
+{
+ return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE)) == _PAGE_PROT_NONE;
+}
+
+static inline int pmd_protnone(pmd_t pmd)
+{
+ return pte_protnone(pmd_pte(pmd));
+}
+#endif
+
/* Modify page protection bits */
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
@@ -468,6 +488,7 @@ extern void *dtb_early_va;
extern uintptr_t dtb_early_pa;
void setup_bootmem(void);
void paging_init(void);
+void misc_mem_init(void);
#define FIRST_USER_ADDRESS 0
diff --git a/arch/riscv/include/asm/probes.h b/arch/riscv/include/asm/probes.h
new file mode 100644
index 000000000000..a787e6d537b9
--- /dev/null
+++ b/arch/riscv/include/asm/probes.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_RISCV_PROBES_H
+#define _ASM_RISCV_PROBES_H
+
+typedef u32 probe_opcode_t;
+typedef bool (probes_handler_t) (u32 opcode, unsigned long addr, struct pt_regs *);
+
+/* architecture specific copy of original instruction */
+struct arch_probe_insn {
+ probe_opcode_t *insn;
+ probes_handler_t *handler;
+ /* restore address after simulation */
+ unsigned long restore;
+};
+
+#ifdef CONFIG_KPROBES
+typedef u32 kprobe_opcode_t;
+struct arch_specific_insn {
+ struct arch_probe_insn api;
+};
+#endif
+
+#endif /* _ASM_RISCV_PROBES_H */
diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h
index bdddcd5c1b71..3a240037bde2 100644
--- a/arch/riscv/include/asm/processor.h
+++ b/arch/riscv/include/asm/processor.h
@@ -34,6 +34,7 @@ struct thread_struct {
unsigned long sp; /* Kernel mode stack */
unsigned long s[12]; /* s[0]: frame pointer */
struct __riscv_d_ext_state fstate;
+ unsigned long bad_cause;
};
#define INIT_THREAD { \
diff --git a/arch/riscv/include/asm/ptrace.h b/arch/riscv/include/asm/ptrace.h
index ee49f80c9533..cb4abb639e8d 100644
--- a/arch/riscv/include/asm/ptrace.h
+++ b/arch/riscv/include/asm/ptrace.h
@@ -8,6 +8,7 @@
#include <uapi/asm/ptrace.h>
#include <asm/csr.h>
+#include <linux/compiler.h>
#ifndef __ASSEMBLY__
@@ -60,6 +61,7 @@ struct pt_regs {
#define user_mode(regs) (((regs)->status & SR_PP) == 0)
+#define MAX_REG_OFFSET offsetof(struct pt_regs, orig_a0)
/* Helpers for working with the instruction pointer */
static inline unsigned long instruction_pointer(struct pt_regs *regs)
@@ -85,6 +87,12 @@ static inline void user_stack_pointer_set(struct pt_regs *regs,
regs->sp = val;
}
+/* Valid only for Kernel mode traps. */
+static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
+{
+ return regs->sp;
+}
+
/* Helpers for working with the frame pointer */
static inline unsigned long frame_pointer(struct pt_regs *regs)
{
@@ -101,6 +109,33 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
return regs->a0;
}
+static inline void regs_set_return_value(struct pt_regs *regs,
+ unsigned long val)
+{
+ regs->a0 = val;
+}
+
+extern int regs_query_register_offset(const char *name);
+extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
+ unsigned int n);
+
+/**
+ * regs_get_register() - get register value from its offset
+ * @regs: pt_regs from which register value is gotten
+ * @offset: offset of the register.
+ *
+ * regs_get_register returns the value of a register whose offset from @regs.
+ * The @offset is the offset of the register in struct pt_regs.
+ * If @offset is bigger than MAX_REG_OFFSET, this returns 0.
+ */
+static inline unsigned long regs_get_register(struct pt_regs *regs,
+ unsigned int offset)
+{
+ if (unlikely(offset > MAX_REG_OFFSET))
+ return 0;
+
+ return *(unsigned long *)((unsigned long)regs + offset);
+}
#endif /* __ASSEMBLY__ */
#endif /* _ASM_RISCV_PTRACE_H */
diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h
index 653edb25d495..99895d9c3bdd 100644
--- a/arch/riscv/include/asm/sbi.h
+++ b/arch/riscv/include/asm/sbi.h
@@ -89,7 +89,7 @@ struct sbiret {
long value;
};
-int sbi_init(void);
+void sbi_init(void);
struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0,
unsigned long arg1, unsigned long arg2,
unsigned long arg3, unsigned long arg4,
@@ -100,13 +100,13 @@ int sbi_console_getchar(void);
void sbi_set_timer(uint64_t stime_value);
void sbi_shutdown(void);
void sbi_clear_ipi(void);
-void sbi_send_ipi(const unsigned long *hart_mask);
-void sbi_remote_fence_i(const unsigned long *hart_mask);
-void sbi_remote_sfence_vma(const unsigned long *hart_mask,
+int sbi_send_ipi(const unsigned long *hart_mask);
+int sbi_remote_fence_i(const unsigned long *hart_mask);
+int sbi_remote_sfence_vma(const unsigned long *hart_mask,
unsigned long start,
unsigned long size);
-void sbi_remote_sfence_vma_asid(const unsigned long *hart_mask,
+int sbi_remote_sfence_vma_asid(const unsigned long *hart_mask,
unsigned long start,
unsigned long size,
unsigned long asid);
@@ -147,11 +147,7 @@ static inline unsigned long sbi_minor_version(void)
int sbi_err_map_linux_errno(int err);
#else /* CONFIG_RISCV_SBI */
-/* stubs for code that is only reachable under IS_ENABLED(CONFIG_RISCV_SBI): */
-void sbi_set_timer(uint64_t stime_value);
-void sbi_clear_ipi(void);
-void sbi_send_ipi(const unsigned long *hart_mask);
-void sbi_remote_fence_i(const unsigned long *hart_mask);
-void sbi_init(void);
+static inline int sbi_remote_fence_i(const unsigned long *hart_mask) { return -1; }
+static inline void sbi_init(void) {}
#endif /* CONFIG_RISCV_SBI */
#endif /* _ASM_RISCV_SBI_H */
diff --git a/arch/riscv/include/asm/set_memory.h b/arch/riscv/include/asm/set_memory.h
index 211eb8244a45..6887b3d9f371 100644
--- a/arch/riscv/include/asm/set_memory.h
+++ b/arch/riscv/include/asm/set_memory.h
@@ -22,7 +22,7 @@ static inline int set_memory_ro(unsigned long addr, int numpages) { return 0; }
static inline int set_memory_rw(unsigned long addr, int numpages) { return 0; }
static inline int set_memory_x(unsigned long addr, int numpages) { return 0; }
static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; }
-static inline void protect_kernel_text_data(void) {};
+static inline void protect_kernel_text_data(void) {}
static inline int set_memory_rw_nx(unsigned long addr, int numpages) { return 0; }
#endif
@@ -32,14 +32,14 @@ bool kernel_page_present(struct page *page);
#endif /* __ASSEMBLY__ */
-#ifdef CONFIG_ARCH_HAS_STRICT_KERNEL_RWX
+#ifdef CONFIG_STRICT_KERNEL_RWX
#ifdef CONFIG_64BIT
#define SECTION_ALIGN (1 << 21)
#else
#define SECTION_ALIGN (1 << 22)
#endif
-#else /* !CONFIG_ARCH_HAS_STRICT_KERNEL_RWX */
+#else /* !CONFIG_STRICT_KERNEL_RWX */
#define SECTION_ALIGN L1_CACHE_BYTES
-#endif /* CONFIG_ARCH_HAS_STRICT_KERNEL_RWX */
+#endif /* CONFIG_STRICT_KERNEL_RWX */
#endif /* _ASM_RISCV_SET_MEMORY_H */
diff --git a/arch/riscv/include/asm/soc.h b/arch/riscv/include/asm/soc.h
index 6c8363b1f327..f494066051a2 100644
--- a/arch/riscv/include/asm/soc.h
+++ b/arch/riscv/include/asm/soc.h
@@ -21,42 +21,4 @@ void soc_early_init(void);
extern unsigned long __soc_early_init_table_start;
extern unsigned long __soc_early_init_table_end;
-/*
- * Allows Linux to provide a device tree, which is necessary for SOCs that
- * don't provide a useful one on their own.
- */
-struct soc_builtin_dtb {
- unsigned long vendor_id;
- unsigned long arch_id;
- unsigned long imp_id;
- void *(*dtb_func)(void);
-};
-
-/*
- * The argument name must specify a valid DTS file name without the dts
- * extension.
- */
-#define SOC_BUILTIN_DTB_DECLARE(name, vendor, arch, impl) \
- extern void *__dtb_##name##_begin; \
- \
- static __init __used \
- void *__soc_builtin_dtb_f__##name(void) \
- { \
- return (void *)&__dtb_##name##_begin; \
- } \
- \
- static const struct soc_builtin_dtb __soc_builtin_dtb__##name \
- __used __section("__soc_builtin_dtb_table") = \
- { \
- .vendor_id = vendor, \
- .arch_id = arch, \
- .imp_id = impl, \
- .dtb_func = __soc_builtin_dtb_f__##name, \
- }
-
-extern unsigned long __soc_builtin_dtb_table_start;
-extern unsigned long __soc_builtin_dtb_table_end;
-
-void *soc_lookup_builtin_dtb(void);
-
#endif
diff --git a/arch/riscv/include/asm/stackprotector.h b/arch/riscv/include/asm/stackprotector.h
index 5962f8891f06..09093af46565 100644
--- a/arch/riscv/include/asm/stackprotector.h
+++ b/arch/riscv/include/asm/stackprotector.h
@@ -24,6 +24,7 @@ static __always_inline void boot_init_stack_canary(void)
canary &= CANARY_MASK;
current->stack_canary = canary;
- __stack_chk_guard = current->stack_canary;
+ if (!IS_ENABLED(CONFIG_STACKPROTECTOR_PER_TASK))
+ __stack_chk_guard = current->stack_canary;
}
#endif /* _ASM_RISCV_STACKPROTECTOR_H */
diff --git a/arch/riscv/include/asm/stacktrace.h b/arch/riscv/include/asm/stacktrace.h
index 470a65c4ccdc..3450c1912afd 100644
--- a/arch/riscv/include/asm/stacktrace.h
+++ b/arch/riscv/include/asm/stacktrace.h
@@ -13,5 +13,7 @@ struct stackframe {
extern void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
bool (*fn)(void *, unsigned long), void *arg);
+extern void dump_backtrace(struct pt_regs *regs, struct task_struct *task,
+ const char *loglvl);
#endif /* _ASM_RISCV_STACKTRACE_H */
diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h
index 97bf5a1575d2..0e549a3089b3 100644
--- a/arch/riscv/include/asm/thread_info.h
+++ b/arch/riscv/include/asm/thread_info.h
@@ -75,6 +75,7 @@ struct thread_info {
#define TIF_SYSCALL_AUDIT 7 /* syscall auditing */
#define TIF_SECCOMP 8 /* syscall secure computing */
#define TIF_NOTIFY_SIGNAL 9 /* signal notifications exist */
+#define TIF_UPROBE 10 /* uprobe breakpoint or singlestep */
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
@@ -84,10 +85,11 @@ struct thread_info {
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL)
+#define _TIF_UPROBE (1 << TIF_UPROBE)
#define _TIF_WORK_MASK \
(_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NEED_RESCHED | \
- _TIF_NOTIFY_SIGNAL)
+ _TIF_NOTIFY_SIGNAL | _TIF_UPROBE)
#define _TIF_SYSCALL_WORK \
(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_AUDIT | \
diff --git a/arch/riscv/include/asm/uprobes.h b/arch/riscv/include/asm/uprobes.h
new file mode 100644
index 000000000000..f2183e00fdd2
--- /dev/null
+++ b/arch/riscv/include/asm/uprobes.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _ASM_RISCV_UPROBES_H
+#define _ASM_RISCV_UPROBES_H
+
+#include <asm/probes.h>
+#include <asm/patch.h>
+#include <asm/bug.h>
+
+#define MAX_UINSN_BYTES 8
+
+#ifdef CONFIG_RISCV_ISA_C
+#define UPROBE_SWBP_INSN __BUG_INSN_16
+#define UPROBE_SWBP_INSN_SIZE 2
+#else
+#define UPROBE_SWBP_INSN __BUG_INSN_32
+#define UPROBE_SWBP_INSN_SIZE 4
+#endif
+#define UPROBE_XOL_SLOT_BYTES MAX_UINSN_BYTES
+
+typedef u32 uprobe_opcode_t;
+
+struct arch_uprobe_task {
+ unsigned long saved_cause;
+};
+
+struct arch_uprobe {
+ union {
+ u8 insn[MAX_UINSN_BYTES];
+ u8 ixol[MAX_UINSN_BYTES];
+ };
+ struct arch_probe_insn api;
+ unsigned long insn_size;
+ bool simulate;
+};
+
+bool uprobe_breakpoint_handler(struct pt_regs *regs);
+bool uprobe_single_step_handler(struct pt_regs *regs);
+
+#endif /* _ASM_RISCV_UPROBES_H */
diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile
index f6caf4d9ca15..3dc0abde988a 100644
--- a/arch/riscv/kernel/Makefile
+++ b/arch/riscv/kernel/Makefile
@@ -4,8 +4,9 @@
#
ifdef CONFIG_FTRACE
-CFLAGS_REMOVE_ftrace.o = -pg
-CFLAGS_REMOVE_patch.o = -pg
+CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_patch.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_sbi.o = $(CC_FLAGS_FTRACE)
endif
extra-y += head.o
@@ -29,6 +30,7 @@ obj-y += riscv_ksyms.o
obj-y += stacktrace.o
obj-y += cacheinfo.o
obj-y += patch.o
+obj-y += probes/
obj-$(CONFIG_MMU) += vdso.o vdso/
obj-$(CONFIG_RISCV_M_MODE) += traps_misaligned.o
diff --git a/arch/riscv/kernel/asm-offsets.c b/arch/riscv/kernel/asm-offsets.c
index b79ffa3561fd..9ef33346853c 100644
--- a/arch/riscv/kernel/asm-offsets.c
+++ b/arch/riscv/kernel/asm-offsets.c
@@ -68,6 +68,9 @@ void asm_offsets(void)
OFFSET(TASK_THREAD_F30, task_struct, thread.fstate.f[30]);
OFFSET(TASK_THREAD_F31, task_struct, thread.fstate.f[31]);
OFFSET(TASK_THREAD_FCSR, task_struct, thread.fstate.fcsr);
+#ifdef CONFIG_STACKPROTECTOR
+ OFFSET(TSK_STACK_CANARY, task_struct, stack_canary);
+#endif
DEFINE(PT_SIZE, sizeof(struct pt_regs));
OFFSET(PT_EPC, pt_regs, epc);
diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c
index 765b62434f30..7f1e5203de88 100644
--- a/arch/riscv/kernel/ftrace.c
+++ b/arch/riscv/kernel/ftrace.c
@@ -72,29 +72,56 @@ static int __ftrace_modify_call(unsigned long hook_pos, unsigned long target,
return 0;
}
+/*
+ * Put 5 instructions with 16 bytes at the front of function within
+ * patchable function entry nops' area.
+ *
+ * 0: REG_S ra, -SZREG(sp)
+ * 1: auipc ra, 0x?
+ * 2: jalr -?(ra)
+ * 3: REG_L ra, -SZREG(sp)
+ *
+ * So the opcodes is:
+ * 0: 0xfe113c23 (sd)/0xfe112e23 (sw)
+ * 1: 0x???????? -> auipc
+ * 2: 0x???????? -> jalr
+ * 3: 0xff813083 (ld)/0xffc12083 (lw)
+ */
+#if __riscv_xlen == 64
+#define INSN0 0xfe113c23
+#define INSN3 0xff813083
+#elif __riscv_xlen == 32
+#define INSN0 0xfe112e23
+#define INSN3 0xffc12083
+#endif
+
+#define FUNC_ENTRY_SIZE 16
+#define FUNC_ENTRY_JMP 4
+
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
- int ret = ftrace_check_current_call(rec->ip, NULL);
+ unsigned int call[4] = {INSN0, 0, 0, INSN3};
+ unsigned long target = addr;
+ unsigned long caller = rec->ip + FUNC_ENTRY_JMP;
- if (ret)
- return ret;
+ call[1] = to_auipc_insn((unsigned int)(target - caller));
+ call[2] = to_jalr_insn((unsigned int)(target - caller));
- return __ftrace_modify_call(rec->ip, addr, true);
+ if (patch_text_nosync((void *)rec->ip, call, FUNC_ENTRY_SIZE))
+ return -EPERM;
+
+ return 0;
}
int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
unsigned long addr)
{
- unsigned int call[2];
- int ret;
+ unsigned int nops[4] = {NOP4, NOP4, NOP4, NOP4};
- make_call(rec->ip, addr, call);
- ret = ftrace_check_current_call(rec->ip, call);
-
- if (ret)
- return ret;
+ if (patch_text_nosync((void *)rec->ip, nops, FUNC_ENTRY_SIZE))
+ return -EPERM;
- return __ftrace_modify_call(rec->ip, addr, false);
+ return 0;
}
@@ -139,15 +166,16 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
unsigned long addr)
{
unsigned int call[2];
+ unsigned long caller = rec->ip + FUNC_ENTRY_JMP;
int ret;
- make_call(rec->ip, old_addr, call);
- ret = ftrace_check_current_call(rec->ip, call);
+ make_call(caller, old_addr, call);
+ ret = ftrace_check_current_call(caller, call);
if (ret)
return ret;
- return __ftrace_modify_call(rec->ip, addr, true);
+ return __ftrace_modify_call(caller, addr, true);
}
#endif
@@ -176,53 +204,30 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
#ifdef CONFIG_DYNAMIC_FTRACE
extern void ftrace_graph_call(void);
+extern void ftrace_graph_regs_call(void);
int ftrace_enable_ftrace_graph_caller(void)
{
- unsigned int call[2];
- static int init_graph = 1;
int ret;
- make_call(&ftrace_graph_call, &ftrace_stub, call);
-
- /*
- * When enabling graph tracer for the first time, ftrace_graph_call
- * should contains a call to ftrace_stub. Once it has been disabled,
- * the 8-bytes at the position becomes NOPs.
- */
- if (init_graph) {
- ret = ftrace_check_current_call((unsigned long)&ftrace_graph_call,
- call);
- init_graph = 0;
- } else {
- ret = ftrace_check_current_call((unsigned long)&ftrace_graph_call,
- NULL);
- }
-
+ ret = __ftrace_modify_call((unsigned long)&ftrace_graph_call,
+ (unsigned long)&prepare_ftrace_return, true);
if (ret)
return ret;
- return __ftrace_modify_call((unsigned long)&ftrace_graph_call,
+ return __ftrace_modify_call((unsigned long)&ftrace_graph_regs_call,
(unsigned long)&prepare_ftrace_return, true);
}
int ftrace_disable_ftrace_graph_caller(void)
{
- unsigned int call[2];
int ret;
- make_call(&ftrace_graph_call, &prepare_ftrace_return, call);
-
- /*
- * This is to make sure that ftrace_enable_ftrace_graph_caller
- * did the right thing.
- */
- ret = ftrace_check_current_call((unsigned long)&ftrace_graph_call,
- call);
-
+ ret = __ftrace_modify_call((unsigned long)&ftrace_graph_call,
+ (unsigned long)&prepare_ftrace_return, false);
if (ret)
return ret;
- return __ftrace_modify_call((unsigned long)&ftrace_graph_call,
+ return __ftrace_modify_call((unsigned long)&ftrace_graph_regs_call,
(unsigned long)&prepare_ftrace_return, false);
}
#endif /* CONFIG_DYNAMIC_FTRACE */
diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S
index 16e9941900c4..f5a9bad86e58 100644
--- a/arch/riscv/kernel/head.S
+++ b/arch/riscv/kernel/head.S
@@ -260,7 +260,11 @@ clear_bss_done:
/* Initialize page tables and relocate to virtual addresses */
la sp, init_thread_union + THREAD_SIZE
+#ifdef CONFIG_BUILTIN_DTB
+ la a0, __dtb_start
+#else
mv a0, s1
+#endif /* CONFIG_BUILTIN_DTB */
call setup_vm
#ifdef CONFIG_MMU
la a0, early_pg_dir
diff --git a/arch/riscv/kernel/image-vars.h b/arch/riscv/kernel/image-vars.h
index 8c212efb37a6..71a76a623257 100644
--- a/arch/riscv/kernel/image-vars.h
+++ b/arch/riscv/kernel/image-vars.h
@@ -3,7 +3,7 @@
* Copyright (C) 2020 Western Digital Corporation or its affiliates.
* Linker script variables to be set after section resolution, as
* ld.lld does not like variables assigned before SECTIONS is processed.
- * Based on arch/arm64/kerne/image-vars.h
+ * Based on arch/arm64/kernel/image-vars.h
*/
#ifndef __RISCV_KERNEL_IMAGE_VARS_H
#define __RISCV_KERNEL_IMAGE_VARS_H
diff --git a/arch/riscv/kernel/mcount-dyn.S b/arch/riscv/kernel/mcount-dyn.S
index 35a6ed76cb8b..d171eca623b6 100644
--- a/arch/riscv/kernel/mcount-dyn.S
+++ b/arch/riscv/kernel/mcount-dyn.S
@@ -13,224 +13,186 @@
.text
- .macro SAVE_ABI_STATE
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- addi sp, sp, -48
- sd s0, 32(sp)
- sd ra, 40(sp)
- addi s0, sp, 48
- sd t0, 24(sp)
- sd t1, 16(sp)
-#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
- sd t2, 8(sp)
-#endif
-#else
- addi sp, sp, -16
- sd s0, 0(sp)
- sd ra, 8(sp)
- addi s0, sp, 16
-#endif
+#define FENTRY_RA_OFFSET 12
+#define ABI_SIZE_ON_STACK 72
+#define ABI_A0 0
+#define ABI_A1 8
+#define ABI_A2 16
+#define ABI_A3 24
+#define ABI_A4 32
+#define ABI_A5 40
+#define ABI_A6 48
+#define ABI_A7 56
+#define ABI_RA 64
+
+ .macro SAVE_ABI
+ addi sp, sp, -SZREG
+ addi sp, sp, -ABI_SIZE_ON_STACK
+
+ REG_S a0, ABI_A0(sp)
+ REG_S a1, ABI_A1(sp)
+ REG_S a2, ABI_A2(sp)
+ REG_S a3, ABI_A3(sp)
+ REG_S a4, ABI_A4(sp)
+ REG_S a5, ABI_A5(sp)
+ REG_S a6, ABI_A6(sp)
+ REG_S a7, ABI_A7(sp)
+ REG_S ra, ABI_RA(sp)
.endm
- .macro RESTORE_ABI_STATE
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- ld s0, 32(sp)
- ld ra, 40(sp)
- addi sp, sp, 48
-#else
- ld ra, 8(sp)
- ld s0, 0(sp)
- addi sp, sp, 16
-#endif
+ .macro RESTORE_ABI
+ REG_L a0, ABI_A0(sp)
+ REG_L a1, ABI_A1(sp)
+ REG_L a2, ABI_A2(sp)
+ REG_L a3, ABI_A3(sp)
+ REG_L a4, ABI_A4(sp)
+ REG_L a5, ABI_A5(sp)
+ REG_L a6, ABI_A6(sp)
+ REG_L a7, ABI_A7(sp)
+ REG_L ra, ABI_RA(sp)
+
+ addi sp, sp, ABI_SIZE_ON_STACK
+ addi sp, sp, SZREG
.endm
- .macro RESTORE_GRAPH_ARGS
- ld a0, 24(sp)
- ld a1, 16(sp)
-#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
- ld a2, 8(sp)
-#endif
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+ .macro SAVE_ALL
+ addi sp, sp, -SZREG
+ addi sp, sp, -PT_SIZE_ON_STACK
+
+ REG_S x1, PT_EPC(sp)
+ addi sp, sp, PT_SIZE_ON_STACK
+ REG_L x1, (sp)
+ addi sp, sp, -PT_SIZE_ON_STACK
+ REG_S x1, PT_RA(sp)
+ REG_L x1, PT_EPC(sp)
+
+ REG_S x2, PT_SP(sp)
+ REG_S x3, PT_GP(sp)
+ REG_S x4, PT_TP(sp)
+ REG_S x5, PT_T0(sp)
+ REG_S x6, PT_T1(sp)
+ REG_S x7, PT_T2(sp)
+ REG_S x8, PT_S0(sp)
+ REG_S x9, PT_S1(sp)
+ REG_S x10, PT_A0(sp)
+ REG_S x11, PT_A1(sp)
+ REG_S x12, PT_A2(sp)
+ REG_S x13, PT_A3(sp)
+ REG_S x14, PT_A4(sp)
+ REG_S x15, PT_A5(sp)
+ REG_S x16, PT_A6(sp)
+ REG_S x17, PT_A7(sp)
+ REG_S x18, PT_S2(sp)
+ REG_S x19, PT_S3(sp)
+ REG_S x20, PT_S4(sp)
+ REG_S x21, PT_S5(sp)
+ REG_S x22, PT_S6(sp)
+ REG_S x23, PT_S7(sp)
+ REG_S x24, PT_S8(sp)
+ REG_S x25, PT_S9(sp)
+ REG_S x26, PT_S10(sp)
+ REG_S x27, PT_S11(sp)
+ REG_S x28, PT_T3(sp)
+ REG_S x29, PT_T4(sp)
+ REG_S x30, PT_T5(sp)
+ REG_S x31, PT_T6(sp)
.endm
-ENTRY(ftrace_graph_caller)
- addi sp, sp, -16
- sd s0, 0(sp)
- sd ra, 8(sp)
- addi s0, sp, 16
-ftrace_graph_call:
- .global ftrace_graph_call
- /*
- * Calling ftrace_enable/disable_ftrace_graph_caller would overwrite the
- * call below. Check ftrace_modify_all_code for details.
- */
- call ftrace_stub
- ld ra, 8(sp)
- ld s0, 0(sp)
- addi sp, sp, 16
- ret
-ENDPROC(ftrace_graph_caller)
+ .macro RESTORE_ALL
+ REG_L x1, PT_RA(sp)
+ addi sp, sp, PT_SIZE_ON_STACK
+ REG_S x1, (sp)
+ addi sp, sp, -PT_SIZE_ON_STACK
+ REG_L x1, PT_EPC(sp)
+ REG_L x2, PT_SP(sp)
+ REG_L x3, PT_GP(sp)
+ REG_L x4, PT_TP(sp)
+ REG_L x5, PT_T0(sp)
+ REG_L x6, PT_T1(sp)
+ REG_L x7, PT_T2(sp)
+ REG_L x8, PT_S0(sp)
+ REG_L x9, PT_S1(sp)
+ REG_L x10, PT_A0(sp)
+ REG_L x11, PT_A1(sp)
+ REG_L x12, PT_A2(sp)
+ REG_L x13, PT_A3(sp)
+ REG_L x14, PT_A4(sp)
+ REG_L x15, PT_A5(sp)
+ REG_L x16, PT_A6(sp)
+ REG_L x17, PT_A7(sp)
+ REG_L x18, PT_S2(sp)
+ REG_L x19, PT_S3(sp)
+ REG_L x20, PT_S4(sp)
+ REG_L x21, PT_S5(sp)
+ REG_L x22, PT_S6(sp)
+ REG_L x23, PT_S7(sp)
+ REG_L x24, PT_S8(sp)
+ REG_L x25, PT_S9(sp)
+ REG_L x26, PT_S10(sp)
+ REG_L x27, PT_S11(sp)
+ REG_L x28, PT_T3(sp)
+ REG_L x29, PT_T4(sp)
+ REG_L x30, PT_T5(sp)
+ REG_L x31, PT_T6(sp)
+
+ addi sp, sp, PT_SIZE_ON_STACK
+ addi sp, sp, SZREG
+ .endm
+#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
ENTRY(ftrace_caller)
- /*
- * a0: the address in the caller when calling ftrace_caller
- * a1: the caller's return address
- * a2: the address of global variable function_trace_op
- */
- ld a1, -8(s0)
- addi a0, ra, -MCOUNT_INSN_SIZE
- la t5, function_trace_op
- ld a2, 0(t5)
+ SAVE_ABI
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- /*
- * the graph tracer (specifically, prepare_ftrace_return) needs these
- * arguments but for now the function tracer occupies the regs, so we
- * save them in temporary regs to recover later.
- */
- addi t0, s0, -8
- mv t1, a0
-#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
- ld t2, -16(s0)
-#endif
-#endif
+ addi a0, ra, -FENTRY_RA_OFFSET
+ la a1, function_trace_op
+ REG_L a2, 0(a1)
+ REG_L a1, ABI_SIZE_ON_STACK(sp)
+ mv a3, sp
- SAVE_ABI_STATE
ftrace_call:
.global ftrace_call
- /*
- * For the dynamic ftrace to work, here we should reserve at least
- * 8 bytes for a functional auipc-jalr pair. The following call
- * serves this purpose.
- *
- * Calling ftrace_update_ftrace_func would overwrite the nops below.
- * Check ftrace_modify_all_code for details.
- */
call ftrace_stub
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- RESTORE_GRAPH_ARGS
- call ftrace_graph_caller
+ addi a0, sp, ABI_SIZE_ON_STACK
+ REG_L a1, ABI_RA(sp)
+ addi a1, a1, -FENTRY_RA_OFFSET
+#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
+ mv a2, s0
#endif
-
- RESTORE_ABI_STATE
+ftrace_graph_call:
+ .global ftrace_graph_call
+ call ftrace_stub
+#endif
+ RESTORE_ABI
ret
ENDPROC(ftrace_caller)
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
- .macro SAVE_ALL
- addi sp, sp, -(PT_SIZE_ON_STACK+16)
- sd s0, (PT_SIZE_ON_STACK)(sp)
- sd ra, (PT_SIZE_ON_STACK+8)(sp)
- addi s0, sp, (PT_SIZE_ON_STACK+16)
-
- sd x1, PT_RA(sp)
- sd x2, PT_SP(sp)
- sd x3, PT_GP(sp)
- sd x4, PT_TP(sp)
- sd x5, PT_T0(sp)
- sd x6, PT_T1(sp)
- sd x7, PT_T2(sp)
- sd x8, PT_S0(sp)
- sd x9, PT_S1(sp)
- sd x10, PT_A0(sp)
- sd x11, PT_A1(sp)
- sd x12, PT_A2(sp)
- sd x13, PT_A3(sp)
- sd x14, PT_A4(sp)
- sd x15, PT_A5(sp)
- sd x16, PT_A6(sp)
- sd x17, PT_A7(sp)
- sd x18, PT_S2(sp)
- sd x19, PT_S3(sp)
- sd x20, PT_S4(sp)
- sd x21, PT_S5(sp)
- sd x22, PT_S6(sp)
- sd x23, PT_S7(sp)
- sd x24, PT_S8(sp)
- sd x25, PT_S9(sp)
- sd x26, PT_S10(sp)
- sd x27, PT_S11(sp)
- sd x28, PT_T3(sp)
- sd x29, PT_T4(sp)
- sd x30, PT_T5(sp)
- sd x31, PT_T6(sp)
- .endm
-
- .macro RESTORE_ALL
- ld x1, PT_RA(sp)
- ld x2, PT_SP(sp)
- ld x3, PT_GP(sp)
- ld x4, PT_TP(sp)
- ld x5, PT_T0(sp)
- ld x6, PT_T1(sp)
- ld x7, PT_T2(sp)
- ld x8, PT_S0(sp)
- ld x9, PT_S1(sp)
- ld x10, PT_A0(sp)
- ld x11, PT_A1(sp)
- ld x12, PT_A2(sp)
- ld x13, PT_A3(sp)
- ld x14, PT_A4(sp)
- ld x15, PT_A5(sp)
- ld x16, PT_A6(sp)
- ld x17, PT_A7(sp)
- ld x18, PT_S2(sp)
- ld x19, PT_S3(sp)
- ld x20, PT_S4(sp)
- ld x21, PT_S5(sp)
- ld x22, PT_S6(sp)
- ld x23, PT_S7(sp)
- ld x24, PT_S8(sp)
- ld x25, PT_S9(sp)
- ld x26, PT_S10(sp)
- ld x27, PT_S11(sp)
- ld x28, PT_T3(sp)
- ld x29, PT_T4(sp)
- ld x30, PT_T5(sp)
- ld x31, PT_T6(sp)
-
- ld s0, (PT_SIZE_ON_STACK)(sp)
- ld ra, (PT_SIZE_ON_STACK+8)(sp)
- addi sp, sp, (PT_SIZE_ON_STACK+16)
- .endm
-
- .macro RESTORE_GRAPH_REG_ARGS
- ld a0, PT_T0(sp)
- ld a1, PT_T1(sp)
-#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
- ld a2, PT_T2(sp)
-#endif
- .endm
-
-/*
- * Most of the contents are the same as ftrace_caller.
- */
ENTRY(ftrace_regs_caller)
- /*
- * a3: the address of all registers in the stack
- */
- ld a1, -8(s0)
- addi a0, ra, -MCOUNT_INSN_SIZE
- la t5, function_trace_op
- ld a2, 0(t5)
- addi a3, sp, -(PT_SIZE_ON_STACK+16)
-
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- addi t0, s0, -8
- mv t1, a0
-#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
- ld t2, -16(s0)
-#endif
-#endif
SAVE_ALL
+ addi a0, ra, -FENTRY_RA_OFFSET
+ la a1, function_trace_op
+ REG_L a2, 0(a1)
+ REG_L a1, PT_SIZE_ON_STACK(sp)
+ mv a3, sp
+
ftrace_regs_call:
.global ftrace_regs_call
call ftrace_stub
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- RESTORE_GRAPH_REG_ARGS
- call ftrace_graph_caller
+ addi a0, sp, PT_RA
+ REG_L a1, PT_EPC(sp)
+ addi a1, a1, -FENTRY_RA_OFFSET
+#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
+ mv a2, s0
+#endif
+ftrace_graph_regs_call:
+ .global ftrace_graph_regs_call
+ call ftrace_stub
#endif
RESTORE_ALL
diff --git a/arch/riscv/kernel/patch.c b/arch/riscv/kernel/patch.c
index 3fe7a5296aa5..0b552873a577 100644
--- a/arch/riscv/kernel/patch.c
+++ b/arch/riscv/kernel/patch.c
@@ -20,7 +20,12 @@ struct patch_insn {
};
#ifdef CONFIG_MMU
-static void *patch_map(void *addr, int fixmap)
+/*
+ * The fix_to_virt(, idx) needs a const value (not a dynamic variable of
+ * reg-a0) or BUILD_BUG_ON failed with "idx >= __end_of_fixed_addresses".
+ * So use '__always_inline' and 'const unsigned int fixmap' here.
+ */
+static __always_inline void *patch_map(void *addr, const unsigned int fixmap)
{
uintptr_t uintaddr = (uintptr_t) addr;
struct page *page;
@@ -37,7 +42,6 @@ static void *patch_map(void *addr, int fixmap)
return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
(uintaddr & ~PAGE_MASK));
}
-NOKPROBE_SYMBOL(patch_map);
static void patch_unmap(int fixmap)
{
diff --git a/arch/riscv/kernel/probes/Makefile b/arch/riscv/kernel/probes/Makefile
new file mode 100644
index 000000000000..7f0840dcc31b
--- /dev/null
+++ b/arch/riscv/kernel/probes/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_KPROBES) += kprobes.o decode-insn.o simulate-insn.o
+obj-$(CONFIG_KPROBES) += kprobes_trampoline.o
+obj-$(CONFIG_KPROBES_ON_FTRACE) += ftrace.o
+obj-$(CONFIG_UPROBES) += uprobes.o decode-insn.o simulate-insn.o
+CFLAGS_REMOVE_simulate-insn.o = $(CC_FLAGS_FTRACE)
diff --git a/arch/riscv/kernel/probes/decode-insn.c b/arch/riscv/kernel/probes/decode-insn.c
new file mode 100644
index 000000000000..0ed043acc882
--- /dev/null
+++ b/arch/riscv/kernel/probes/decode-insn.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/kernel.h>
+#include <linux/kprobes.h>
+#include <linux/module.h>
+#include <linux/kallsyms.h>
+#include <asm/sections.h>
+
+#include "decode-insn.h"
+#include "simulate-insn.h"
+
+/* Return:
+ * INSN_REJECTED If instruction is one not allowed to kprobe,
+ * INSN_GOOD_NO_SLOT If instruction is supported but doesn't use its slot.
+ */
+enum probe_insn __kprobes
+riscv_probe_decode_insn(probe_opcode_t *addr, struct arch_probe_insn *api)
+{
+ probe_opcode_t insn = *addr;
+
+ /*
+ * Reject instructions list:
+ */
+ RISCV_INSN_REJECTED(system, insn);
+ RISCV_INSN_REJECTED(fence, insn);
+
+ /*
+ * Simulate instructions list:
+ * TODO: the REJECTED ones below need to be implemented
+ */
+#ifdef CONFIG_RISCV_ISA_C
+ RISCV_INSN_REJECTED(c_j, insn);
+ RISCV_INSN_REJECTED(c_jr, insn);
+ RISCV_INSN_REJECTED(c_jal, insn);
+ RISCV_INSN_REJECTED(c_jalr, insn);
+ RISCV_INSN_REJECTED(c_beqz, insn);
+ RISCV_INSN_REJECTED(c_bnez, insn);
+ RISCV_INSN_REJECTED(c_ebreak, insn);
+#endif
+
+ RISCV_INSN_REJECTED(auipc, insn);
+ RISCV_INSN_REJECTED(branch, insn);
+
+ RISCV_INSN_SET_SIMULATE(jal, insn);
+ RISCV_INSN_SET_SIMULATE(jalr, insn);
+
+ return INSN_GOOD;
+}
diff --git a/arch/riscv/kernel/probes/decode-insn.h b/arch/riscv/kernel/probes/decode-insn.h
new file mode 100644
index 000000000000..42269a7d676d
--- /dev/null
+++ b/arch/riscv/kernel/probes/decode-insn.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef _RISCV_KERNEL_KPROBES_DECODE_INSN_H
+#define _RISCV_KERNEL_KPROBES_DECODE_INSN_H
+
+#include <asm/sections.h>
+#include <asm/kprobes.h>
+
+enum probe_insn {
+ INSN_REJECTED,
+ INSN_GOOD_NO_SLOT,
+ INSN_GOOD,
+};
+
+enum probe_insn __kprobes
+riscv_probe_decode_insn(probe_opcode_t *addr, struct arch_probe_insn *asi);
+
+#endif /* _RISCV_KERNEL_KPROBES_DECODE_INSN_H */
diff --git a/arch/riscv/kernel/probes/ftrace.c b/arch/riscv/kernel/probes/ftrace.c
new file mode 100644
index 000000000000..e6372490aa0b
--- /dev/null
+++ b/arch/riscv/kernel/probes/ftrace.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/kprobes.h>
+
+/* Ftrace callback handler for kprobes -- called under preepmt disabed */
+void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *ops, struct ftrace_regs *regs)
+{
+ struct kprobe *p;
+ struct kprobe_ctlblk *kcb;
+
+ p = get_kprobe((kprobe_opcode_t *)ip);
+ if (unlikely(!p) || kprobe_disabled(p))
+ return;
+
+ kcb = get_kprobe_ctlblk();
+ if (kprobe_running()) {
+ kprobes_inc_nmissed_count(p);
+ } else {
+ unsigned long orig_ip = instruction_pointer(&(regs->regs));
+
+ instruction_pointer_set(&(regs->regs), ip);
+
+ __this_cpu_write(current_kprobe, p);
+ kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+ if (!p->pre_handler || !p->pre_handler(p, &(regs->regs))) {
+ /*
+ * Emulate singlestep (and also recover regs->pc)
+ * as if there is a nop
+ */
+ instruction_pointer_set(&(regs->regs),
+ (unsigned long)p->addr + MCOUNT_INSN_SIZE);
+ if (unlikely(p->post_handler)) {
+ kcb->kprobe_status = KPROBE_HIT_SSDONE;
+ p->post_handler(p, &(regs->regs), 0);
+ }
+ instruction_pointer_set(&(regs->regs), orig_ip);
+ }
+
+ /*
+ * If pre_handler returns !0, it changes regs->pc. We have to
+ * skip emulating post_handler.
+ */
+ __this_cpu_write(current_kprobe, NULL);
+ }
+}
+NOKPROBE_SYMBOL(kprobe_ftrace_handler);
+
+int arch_prepare_kprobe_ftrace(struct kprobe *p)
+{
+ p->ainsn.api.insn = NULL;
+ return 0;
+}
diff --git a/arch/riscv/kernel/probes/kprobes.c b/arch/riscv/kernel/probes/kprobes.c
new file mode 100644
index 000000000000..a2ec18662fee
--- /dev/null
+++ b/arch/riscv/kernel/probes/kprobes.c
@@ -0,0 +1,398 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/kprobes.h>
+#include <linux/extable.h>
+#include <linux/slab.h>
+#include <linux/stop_machine.h>
+#include <asm/ptrace.h>
+#include <linux/uaccess.h>
+#include <asm/sections.h>
+#include <asm/cacheflush.h>
+#include <asm/bug.h>
+#include <asm/patch.h>
+
+#include "decode-insn.h"
+
+DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
+DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
+
+static void __kprobes
+post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *);
+
+static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
+{
+ unsigned long offset = GET_INSN_LENGTH(p->opcode);
+
+ p->ainsn.api.restore = (unsigned long)p->addr + offset;
+
+ patch_text(p->ainsn.api.insn, p->opcode);
+ patch_text((void *)((unsigned long)(p->ainsn.api.insn) + offset),
+ __BUG_INSN_32);
+}
+
+static void __kprobes arch_prepare_simulate(struct kprobe *p)
+{
+ p->ainsn.api.restore = 0;
+}
+
+static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs)
+{
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ if (p->ainsn.api.handler)
+ p->ainsn.api.handler((u32)p->opcode,
+ (unsigned long)p->addr, regs);
+
+ post_kprobe_handler(kcb, regs);
+}
+
+int __kprobes arch_prepare_kprobe(struct kprobe *p)
+{
+ unsigned long probe_addr = (unsigned long)p->addr;
+
+ if (probe_addr & 0x1) {
+ pr_warn("Address not aligned.\n");
+
+ return -EINVAL;
+ }
+
+ /* copy instruction */
+ p->opcode = *p->addr;
+
+ /* decode instruction */
+ switch (riscv_probe_decode_insn(p->addr, &p->ainsn.api)) {
+ case INSN_REJECTED: /* insn not supported */
+ return -EINVAL;
+
+ case INSN_GOOD_NO_SLOT: /* insn need simulation */
+ p->ainsn.api.insn = NULL;
+ break;
+
+ case INSN_GOOD: /* instruction uses slot */
+ p->ainsn.api.insn = get_insn_slot();
+ if (!p->ainsn.api.insn)
+ return -ENOMEM;
+ break;
+ }
+
+ /* prepare the instruction */
+ if (p->ainsn.api.insn)
+ arch_prepare_ss_slot(p);
+ else
+ arch_prepare_simulate(p);
+
+ return 0;
+}
+
+/* install breakpoint in text */
+void __kprobes arch_arm_kprobe(struct kprobe *p)
+{
+ if ((p->opcode & __INSN_LENGTH_MASK) == __INSN_LENGTH_32)
+ patch_text(p->addr, __BUG_INSN_32);
+ else
+ patch_text(p->addr, __BUG_INSN_16);
+}
+
+/* remove breakpoint from text */
+void __kprobes arch_disarm_kprobe(struct kprobe *p)
+{
+ patch_text(p->addr, p->opcode);
+}
+
+void __kprobes arch_remove_kprobe(struct kprobe *p)
+{
+}
+
+static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+ kcb->prev_kprobe.kp = kprobe_running();
+ kcb->prev_kprobe.status = kcb->kprobe_status;
+}
+
+static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+ __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
+ kcb->kprobe_status = kcb->prev_kprobe.status;
+}
+
+static void __kprobes set_current_kprobe(struct kprobe *p)
+{
+ __this_cpu_write(current_kprobe, p);
+}
+
+/*
+ * Interrupts need to be disabled before single-step mode is set, and not
+ * reenabled until after single-step mode ends.
+ * Without disabling interrupt on local CPU, there is a chance of
+ * interrupt occurrence in the period of exception return and start of
+ * out-of-line single-step, that result in wrongly single stepping
+ * into the interrupt handler.
+ */
+static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb,
+ struct pt_regs *regs)
+{
+ kcb->saved_status = regs->status;
+ regs->status &= ~SR_SPIE;
+}
+
+static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb,
+ struct pt_regs *regs)
+{
+ regs->status = kcb->saved_status;
+}
+
+static void __kprobes
+set_ss_context(struct kprobe_ctlblk *kcb, unsigned long addr, struct kprobe *p)
+{
+ unsigned long offset = GET_INSN_LENGTH(p->opcode);
+
+ kcb->ss_ctx.ss_pending = true;
+ kcb->ss_ctx.match_addr = addr + offset;
+}
+
+static void __kprobes clear_ss_context(struct kprobe_ctlblk *kcb)
+{
+ kcb->ss_ctx.ss_pending = false;
+ kcb->ss_ctx.match_addr = 0;
+}
+
+static void __kprobes setup_singlestep(struct kprobe *p,
+ struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb, int reenter)
+{
+ unsigned long slot;
+
+ if (reenter) {
+ save_previous_kprobe(kcb);
+ set_current_kprobe(p);
+ kcb->kprobe_status = KPROBE_REENTER;
+ } else {
+ kcb->kprobe_status = KPROBE_HIT_SS;
+ }
+
+ if (p->ainsn.api.insn) {
+ /* prepare for single stepping */
+ slot = (unsigned long)p->ainsn.api.insn;
+
+ set_ss_context(kcb, slot, p); /* mark pending ss */
+
+ /* IRQs and single stepping do not mix well. */
+ kprobes_save_local_irqflag(kcb, regs);
+
+ instruction_pointer_set(regs, slot);
+ } else {
+ /* insn simulation */
+ arch_simulate_insn(p, regs);
+ }
+}
+
+static int __kprobes reenter_kprobe(struct kprobe *p,
+ struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb)
+{
+ switch (kcb->kprobe_status) {
+ case KPROBE_HIT_SSDONE:
+ case KPROBE_HIT_ACTIVE:
+ kprobes_inc_nmissed_count(p);
+ setup_singlestep(p, regs, kcb, 1);
+ break;
+ case KPROBE_HIT_SS:
+ case KPROBE_REENTER:
+ pr_warn("Unrecoverable kprobe detected.\n");
+ dump_kprobe(p);
+ BUG();
+ break;
+ default:
+ WARN_ON(1);
+ return 0;
+ }
+
+ return 1;
+}
+
+static void __kprobes
+post_kprobe_handler(struct kprobe_ctlblk *kcb, struct pt_regs *regs)
+{
+ struct kprobe *cur = kprobe_running();
+
+ if (!cur)
+ return;
+
+ /* return addr restore if non-branching insn */
+ if (cur->ainsn.api.restore != 0)
+ regs->epc = cur->ainsn.api.restore;
+
+ /* restore back original saved kprobe variables and continue */
+ if (kcb->kprobe_status == KPROBE_REENTER) {
+ restore_previous_kprobe(kcb);
+ return;
+ }
+
+ /* call post handler */
+ kcb->kprobe_status = KPROBE_HIT_SSDONE;
+ if (cur->post_handler) {
+ /* post_handler can hit breakpoint and single step
+ * again, so we enable D-flag for recursive exception.
+ */
+ cur->post_handler(cur, regs, 0);
+ }
+
+ reset_current_kprobe();
+}
+
+int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr)
+{
+ struct kprobe *cur = kprobe_running();
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ switch (kcb->kprobe_status) {
+ case KPROBE_HIT_SS:
+ case KPROBE_REENTER:
+ /*
+ * We are here because the instruction being single
+ * stepped caused a page fault. We reset the current
+ * kprobe and the ip points back to the probe address
+ * and allow the page fault handler to continue as a
+ * normal page fault.
+ */
+ regs->epc = (unsigned long) cur->addr;
+ if (!instruction_pointer(regs))
+ BUG();
+
+ if (kcb->kprobe_status == KPROBE_REENTER)
+ restore_previous_kprobe(kcb);
+ else
+ reset_current_kprobe();
+
+ break;
+ case KPROBE_HIT_ACTIVE:
+ case KPROBE_HIT_SSDONE:
+ /*
+ * We increment the nmissed count for accounting,
+ * we can also use npre/npostfault count for accounting
+ * these specific fault cases.
+ */
+ kprobes_inc_nmissed_count(cur);
+
+ /*
+ * We come here because instructions in the pre/post
+ * handler caused the page_fault, this could happen
+ * if handler tries to access user space by
+ * copy_from_user(), get_user() etc. Let the
+ * user-specified handler try to fix it first.
+ */
+ if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
+ return 1;
+
+ /*
+ * In case the user-specified fault handler returned
+ * zero, try to fix up.
+ */
+ if (fixup_exception(regs))
+ return 1;
+ }
+ return 0;
+}
+
+bool __kprobes
+kprobe_breakpoint_handler(struct pt_regs *regs)
+{
+ struct kprobe *p, *cur_kprobe;
+ struct kprobe_ctlblk *kcb;
+ unsigned long addr = instruction_pointer(regs);
+
+ kcb = get_kprobe_ctlblk();
+ cur_kprobe = kprobe_running();
+
+ p = get_kprobe((kprobe_opcode_t *) addr);
+
+ if (p) {
+ if (cur_kprobe) {
+ if (reenter_kprobe(p, regs, kcb))
+ return true;
+ } else {
+ /* Probe hit */
+ set_current_kprobe(p);
+ kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+
+ /*
+ * If we have no pre-handler or it returned 0, we
+ * continue with normal processing. If we have a
+ * pre-handler and it returned non-zero, it will
+ * modify the execution path and no need to single
+ * stepping. Let's just reset current kprobe and exit.
+ *
+ * pre_handler can hit a breakpoint and can step thru
+ * before return.
+ */
+ if (!p->pre_handler || !p->pre_handler(p, regs))
+ setup_singlestep(p, regs, kcb, 0);
+ else
+ reset_current_kprobe();
+ }
+ return true;
+ }
+
+ /*
+ * The breakpoint instruction was removed right
+ * after we hit it. Another cpu has removed
+ * either a probepoint or a debugger breakpoint
+ * at this address. In either case, no further
+ * handling of this interrupt is appropriate.
+ * Return back to original instruction, and continue.
+ */
+ return false;
+}
+
+bool __kprobes
+kprobe_single_step_handler(struct pt_regs *regs)
+{
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ if ((kcb->ss_ctx.ss_pending)
+ && (kcb->ss_ctx.match_addr == instruction_pointer(regs))) {
+ clear_ss_context(kcb); /* clear pending ss */
+
+ kprobes_restore_local_irqflag(kcb, regs);
+
+ post_kprobe_handler(kcb, regs);
+ return true;
+ }
+ return false;
+}
+
+/*
+ * Provide a blacklist of symbols identifying ranges which cannot be kprobed.
+ * This blacklist is exposed to userspace via debugfs (kprobes/blacklist).
+ */
+int __init arch_populate_kprobe_blacklist(void)
+{
+ int ret;
+
+ ret = kprobe_add_area_blacklist((unsigned long)__irqentry_text_start,
+ (unsigned long)__irqentry_text_end);
+ return ret;
+}
+
+void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs)
+{
+ return (void *)kretprobe_trampoline_handler(regs, &kretprobe_trampoline, NULL);
+}
+
+void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ struct pt_regs *regs)
+{
+ ri->ret_addr = (kprobe_opcode_t *)regs->ra;
+ ri->fp = NULL;
+ regs->ra = (unsigned long) &kretprobe_trampoline;
+}
+
+int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+{
+ return 0;
+}
+
+int __init arch_init_kprobes(void)
+{
+ return 0;
+}
diff --git a/arch/riscv/kernel/probes/kprobes_trampoline.S b/arch/riscv/kernel/probes/kprobes_trampoline.S
new file mode 100644
index 000000000000..6e85d021e2a2
--- /dev/null
+++ b/arch/riscv/kernel/probes/kprobes_trampoline.S
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Author: Patrick Stählin <me@packi.ch>
+ */
+#include <linux/linkage.h>
+
+#include <asm/asm.h>
+#include <asm/asm-offsets.h>
+
+ .text
+ .altmacro
+
+ .macro save_all_base_regs
+ REG_S x1, PT_RA(sp)
+ REG_S x3, PT_GP(sp)
+ REG_S x4, PT_TP(sp)
+ REG_S x5, PT_T0(sp)
+ REG_S x6, PT_T1(sp)
+ REG_S x7, PT_T2(sp)
+ REG_S x8, PT_S0(sp)
+ REG_S x9, PT_S1(sp)
+ REG_S x10, PT_A0(sp)
+ REG_S x11, PT_A1(sp)
+ REG_S x12, PT_A2(sp)
+ REG_S x13, PT_A3(sp)
+ REG_S x14, PT_A4(sp)
+ REG_S x15, PT_A5(sp)
+ REG_S x16, PT_A6(sp)
+ REG_S x17, PT_A7(sp)
+ REG_S x18, PT_S2(sp)
+ REG_S x19, PT_S3(sp)
+ REG_S x20, PT_S4(sp)
+ REG_S x21, PT_S5(sp)
+ REG_S x22, PT_S6(sp)
+ REG_S x23, PT_S7(sp)
+ REG_S x24, PT_S8(sp)
+ REG_S x25, PT_S9(sp)
+ REG_S x26, PT_S10(sp)
+ REG_S x27, PT_S11(sp)
+ REG_S x28, PT_T3(sp)
+ REG_S x29, PT_T4(sp)
+ REG_S x30, PT_T5(sp)
+ REG_S x31, PT_T6(sp)
+ .endm
+
+ .macro restore_all_base_regs
+ REG_L x3, PT_GP(sp)
+ REG_L x4, PT_TP(sp)
+ REG_L x5, PT_T0(sp)
+ REG_L x6, PT_T1(sp)
+ REG_L x7, PT_T2(sp)
+ REG_L x8, PT_S0(sp)
+ REG_L x9, PT_S1(sp)
+ REG_L x10, PT_A0(sp)
+ REG_L x11, PT_A1(sp)
+ REG_L x12, PT_A2(sp)
+ REG_L x13, PT_A3(sp)
+ REG_L x14, PT_A4(sp)
+ REG_L x15, PT_A5(sp)
+ REG_L x16, PT_A6(sp)
+ REG_L x17, PT_A7(sp)
+ REG_L x18, PT_S2(sp)
+ REG_L x19, PT_S3(sp)
+ REG_L x20, PT_S4(sp)
+ REG_L x21, PT_S5(sp)
+ REG_L x22, PT_S6(sp)
+ REG_L x23, PT_S7(sp)
+ REG_L x24, PT_S8(sp)
+ REG_L x25, PT_S9(sp)
+ REG_L x26, PT_S10(sp)
+ REG_L x27, PT_S11(sp)
+ REG_L x28, PT_T3(sp)
+ REG_L x29, PT_T4(sp)
+ REG_L x30, PT_T5(sp)
+ REG_L x31, PT_T6(sp)
+ .endm
+
+ENTRY(kretprobe_trampoline)
+ addi sp, sp, -(PT_SIZE_ON_STACK)
+ save_all_base_regs
+
+ move a0, sp /* pt_regs */
+
+ call trampoline_probe_handler
+
+ /* use the result as the return-address */
+ move ra, a0
+
+ restore_all_base_regs
+ addi sp, sp, PT_SIZE_ON_STACK
+
+ ret
+ENDPROC(kretprobe_trampoline)
diff --git a/arch/riscv/kernel/probes/simulate-insn.c b/arch/riscv/kernel/probes/simulate-insn.c
new file mode 100644
index 000000000000..2519ce26377d
--- /dev/null
+++ b/arch/riscv/kernel/probes/simulate-insn.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/kprobes.h>
+
+#include "decode-insn.h"
+#include "simulate-insn.h"
+
+static inline bool rv_insn_reg_get_val(struct pt_regs *regs, u32 index,
+ unsigned long *ptr)
+{
+ if (index == 0)
+ *ptr = 0;
+ else if (index <= 31)
+ *ptr = *((unsigned long *)regs + index);
+ else
+ return false;
+
+ return true;
+}
+
+static inline bool rv_insn_reg_set_val(struct pt_regs *regs, u32 index,
+ unsigned long val)
+{
+ if (index == 0)
+ return false;
+ else if (index <= 31)
+ *((unsigned long *)regs + index) = val;
+ else
+ return false;
+
+ return true;
+}
+
+bool __kprobes simulate_jal(u32 opcode, unsigned long addr, struct pt_regs *regs)
+{
+ /*
+ * 31 30 21 20 19 12 11 7 6 0
+ * imm [20] | imm[10:1] | imm[11] | imm[19:12] | rd | opcode
+ * 1 10 1 8 5 JAL/J
+ */
+ bool ret;
+ u32 imm;
+ u32 index = (opcode >> 7) & 0x1f;
+
+ ret = rv_insn_reg_set_val(regs, index, addr + 4);
+ if (!ret)
+ return ret;
+
+ imm = ((opcode >> 21) & 0x3ff) << 1;
+ imm |= ((opcode >> 20) & 0x1) << 11;
+ imm |= ((opcode >> 12) & 0xff) << 12;
+ imm |= ((opcode >> 31) & 0x1) << 20;
+
+ instruction_pointer_set(regs, addr + sign_extend32((imm), 20));
+
+ return ret;
+}
+
+bool __kprobes simulate_jalr(u32 opcode, unsigned long addr, struct pt_regs *regs)
+{
+ /*
+ * 31 20 19 15 14 12 11 7 6 0
+ * offset[11:0] | rs1 | 010 | rd | opcode
+ * 12 5 3 5 JALR/JR
+ */
+ bool ret;
+ unsigned long base_addr;
+ u32 imm = (opcode >> 20) & 0xfff;
+ u32 rd_index = (opcode >> 7) & 0x1f;
+ u32 rs1_index = (opcode >> 15) & 0x1f;
+
+ ret = rv_insn_reg_set_val(regs, rd_index, addr + 4);
+ if (!ret)
+ return ret;
+
+ ret = rv_insn_reg_get_val(regs, rs1_index, &base_addr);
+ if (!ret)
+ return ret;
+
+ instruction_pointer_set(regs, (base_addr + sign_extend32((imm), 11))&~1);
+
+ return ret;
+}
diff --git a/arch/riscv/kernel/probes/simulate-insn.h b/arch/riscv/kernel/probes/simulate-insn.h
new file mode 100644
index 000000000000..cb6ff7dccb92
--- /dev/null
+++ b/arch/riscv/kernel/probes/simulate-insn.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef _RISCV_KERNEL_PROBES_SIMULATE_INSN_H
+#define _RISCV_KERNEL_PROBES_SIMULATE_INSN_H
+
+#define __RISCV_INSN_FUNCS(name, mask, val) \
+static __always_inline bool riscv_insn_is_##name(probe_opcode_t code) \
+{ \
+ BUILD_BUG_ON(~(mask) & (val)); \
+ return (code & (mask)) == (val); \
+} \
+bool simulate_##name(u32 opcode, unsigned long addr, \
+ struct pt_regs *regs)
+
+#define RISCV_INSN_REJECTED(name, code) \
+ do { \
+ if (riscv_insn_is_##name(code)) { \
+ return INSN_REJECTED; \
+ } \
+ } while (0)
+
+__RISCV_INSN_FUNCS(system, 0x7f, 0x73);
+__RISCV_INSN_FUNCS(fence, 0x7f, 0x0f);
+
+#define RISCV_INSN_SET_SIMULATE(name, code) \
+ do { \
+ if (riscv_insn_is_##name(code)) { \
+ api->handler = simulate_##name; \
+ return INSN_GOOD_NO_SLOT; \
+ } \
+ } while (0)
+
+__RISCV_INSN_FUNCS(c_j, 0xe003, 0xa001);
+__RISCV_INSN_FUNCS(c_jr, 0xf007, 0x8002);
+__RISCV_INSN_FUNCS(c_jal, 0xe003, 0x2001);
+__RISCV_INSN_FUNCS(c_jalr, 0xf007, 0x9002);
+__RISCV_INSN_FUNCS(c_beqz, 0xe003, 0xc001);
+__RISCV_INSN_FUNCS(c_bnez, 0xe003, 0xe001);
+__RISCV_INSN_FUNCS(c_ebreak, 0xffff, 0x9002);
+
+__RISCV_INSN_FUNCS(auipc, 0x7f, 0x17);
+__RISCV_INSN_FUNCS(branch, 0x7f, 0x63);
+
+__RISCV_INSN_FUNCS(jal, 0x7f, 0x6f);
+__RISCV_INSN_FUNCS(jalr, 0x707f, 0x67);
+
+#endif /* _RISCV_KERNEL_PROBES_SIMULATE_INSN_H */
diff --git a/arch/riscv/kernel/probes/uprobes.c b/arch/riscv/kernel/probes/uprobes.c
new file mode 100644
index 000000000000..7a057b5f0adc
--- /dev/null
+++ b/arch/riscv/kernel/probes/uprobes.c
@@ -0,0 +1,186 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/highmem.h>
+#include <linux/ptrace.h>
+#include <linux/uprobes.h>
+
+#include "decode-insn.h"
+
+#define UPROBE_TRAP_NR UINT_MAX
+
+bool is_swbp_insn(uprobe_opcode_t *insn)
+{
+#ifdef CONFIG_RISCV_ISA_C
+ return (*insn & 0xffff) == UPROBE_SWBP_INSN;
+#else
+ return *insn == UPROBE_SWBP_INSN;
+#endif
+}
+
+unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
+{
+ return instruction_pointer(regs);
+}
+
+int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
+ unsigned long addr)
+{
+ probe_opcode_t opcode;
+
+ opcode = *(probe_opcode_t *)(&auprobe->insn[0]);
+
+ auprobe->insn_size = GET_INSN_LENGTH(opcode);
+
+ switch (riscv_probe_decode_insn(&opcode, &auprobe->api)) {
+ case INSN_REJECTED:
+ return -EINVAL;
+
+ case INSN_GOOD_NO_SLOT:
+ auprobe->simulate = true;
+ break;
+
+ case INSN_GOOD:
+ auprobe->simulate = false;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+ struct uprobe_task *utask = current->utask;
+
+ utask->autask.saved_cause = current->thread.bad_cause;
+ current->thread.bad_cause = UPROBE_TRAP_NR;
+
+ instruction_pointer_set(regs, utask->xol_vaddr);
+
+ regs->status &= ~SR_SPIE;
+
+ return 0;
+}
+
+int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+ struct uprobe_task *utask = current->utask;
+
+ WARN_ON_ONCE(current->thread.bad_cause != UPROBE_TRAP_NR);
+
+ instruction_pointer_set(regs, utask->vaddr + auprobe->insn_size);
+
+ regs->status |= SR_SPIE;
+
+ return 0;
+}
+
+bool arch_uprobe_xol_was_trapped(struct task_struct *t)
+{
+ if (t->thread.bad_cause != UPROBE_TRAP_NR)
+ return true;
+
+ return false;
+}
+
+bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+ probe_opcode_t insn;
+ unsigned long addr;
+
+ if (!auprobe->simulate)
+ return false;
+
+ insn = *(probe_opcode_t *)(&auprobe->insn[0]);
+ addr = instruction_pointer(regs);
+
+ if (auprobe->api.handler)
+ auprobe->api.handler(insn, addr, regs);
+
+ return true;
+}
+
+void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+ struct uprobe_task *utask = current->utask;
+
+ /*
+ * Task has received a fatal signal, so reset back to probbed
+ * address.
+ */
+ instruction_pointer_set(regs, utask->vaddr);
+
+ regs->status &= ~SR_SPIE;
+}
+
+bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
+ struct pt_regs *regs)
+{
+ if (ctx == RP_CHECK_CHAIN_CALL)
+ return regs->sp <= ret->stack;
+ else
+ return regs->sp < ret->stack;
+}
+
+unsigned long
+arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr,
+ struct pt_regs *regs)
+{
+ unsigned long ra;
+
+ ra = regs->ra;
+
+ regs->ra = trampoline_vaddr;
+
+ return ra;
+}
+
+int arch_uprobe_exception_notify(struct notifier_block *self,
+ unsigned long val, void *data)
+{
+ return NOTIFY_DONE;
+}
+
+bool uprobe_breakpoint_handler(struct pt_regs *regs)
+{
+ if (uprobe_pre_sstep_notifier(regs))
+ return true;
+
+ return false;
+}
+
+bool uprobe_single_step_handler(struct pt_regs *regs)
+{
+ if (uprobe_post_sstep_notifier(regs))
+ return true;
+
+ return false;
+}
+
+void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
+ void *src, unsigned long len)
+{
+ /* Initialize the slot */
+ void *kaddr = kmap_atomic(page);
+ void *dst = kaddr + (vaddr & ~PAGE_MASK);
+
+ memcpy(dst, src, len);
+
+ /* Add ebreak behind opcode to simulate singlestep */
+ if (vaddr) {
+ dst += GET_INSN_LENGTH(*(probe_opcode_t *)src);
+ *(uprobe_opcode_t *)dst = __BUG_INSN_32;
+ }
+
+ kunmap_atomic(kaddr);
+
+ /*
+ * We probably need flush_icache_user_page() but it needs vma.
+ * This should work on most of architectures by default. If
+ * architecture needs to do something different it can define
+ * its own version of the function.
+ */
+ flush_dcache_page(page);
+}
diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c
index dd5f985b1f40..6f728e731bed 100644
--- a/arch/riscv/kernel/process.c
+++ b/arch/riscv/kernel/process.c
@@ -18,13 +18,14 @@
#include <asm/unistd.h>
#include <asm/processor.h>
#include <asm/csr.h>
+#include <asm/stacktrace.h>
#include <asm/string.h>
#include <asm/switch_to.h>
#include <asm/thread_info.h>
register unsigned long gp_in_global __asm__("gp");
-#ifdef CONFIG_STACKPROTECTOR
+#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
#include <linux/stackprotector.h>
unsigned long __stack_chk_guard __read_mostly;
EXPORT_SYMBOL(__stack_chk_guard);
@@ -39,11 +40,16 @@ void arch_cpu_idle(void)
raw_local_irq_enable();
}
-void show_regs(struct pt_regs *regs)
+void __show_regs(struct pt_regs *regs)
{
show_regs_print_info(KERN_DEFAULT);
- pr_cont("epc: " REG_FMT " ra : " REG_FMT " sp : " REG_FMT "\n",
+ if (!user_mode(regs)) {
+ pr_cont("epc : %pS\n", (void *)regs->epc);
+ pr_cont(" ra : %pS\n", (void *)regs->ra);
+ }
+
+ pr_cont("epc : " REG_FMT " ra : " REG_FMT " sp : " REG_FMT "\n",
regs->epc, regs->ra, regs->sp);
pr_cont(" gp : " REG_FMT " tp : " REG_FMT " t0 : " REG_FMT "\n",
regs->gp, regs->tp, regs->t0);
@@ -69,6 +75,12 @@ void show_regs(struct pt_regs *regs)
pr_cont("status: " REG_FMT " badaddr: " REG_FMT " cause: " REG_FMT "\n",
regs->status, regs->badaddr, regs->cause);
}
+void show_regs(struct pt_regs *regs)
+{
+ __show_regs(regs);
+ if (!user_mode(regs))
+ dump_backtrace(regs, NULL, KERN_DEFAULT);
+}
void start_thread(struct pt_regs *regs, unsigned long pc,
unsigned long sp)
@@ -112,7 +124,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg,
struct pt_regs *childregs = task_pt_regs(p);
/* p->thread holds context to be restored by __switch_to() */
- if (unlikely(p->flags & PF_KTHREAD)) {
+ if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
/* Kernel thread */
memset(childregs, 0, sizeof(struct pt_regs));
childregs->gp = gp_in_global;
diff --git a/arch/riscv/kernel/ptrace.c b/arch/riscv/kernel/ptrace.c
index 2d6395f5ad54..1a85305720e8 100644
--- a/arch/riscv/kernel/ptrace.c
+++ b/arch/riscv/kernel/ptrace.c
@@ -114,6 +114,105 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
return &riscv_user_native_view;
}
+struct pt_regs_offset {
+ const char *name;
+ int offset;
+};
+
+#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
+#define REG_OFFSET_END {.name = NULL, .offset = 0}
+
+static const struct pt_regs_offset regoffset_table[] = {
+ REG_OFFSET_NAME(epc),
+ REG_OFFSET_NAME(ra),
+ REG_OFFSET_NAME(sp),
+ REG_OFFSET_NAME(gp),
+ REG_OFFSET_NAME(tp),
+ REG_OFFSET_NAME(t0),
+ REG_OFFSET_NAME(t1),
+ REG_OFFSET_NAME(t2),
+ REG_OFFSET_NAME(s0),
+ REG_OFFSET_NAME(s1),
+ REG_OFFSET_NAME(a0),
+ REG_OFFSET_NAME(a1),
+ REG_OFFSET_NAME(a2),
+ REG_OFFSET_NAME(a3),
+ REG_OFFSET_NAME(a4),
+ REG_OFFSET_NAME(a5),
+ REG_OFFSET_NAME(a6),
+ REG_OFFSET_NAME(a7),
+ REG_OFFSET_NAME(s2),
+ REG_OFFSET_NAME(s3),
+ REG_OFFSET_NAME(s4),
+ REG_OFFSET_NAME(s5),
+ REG_OFFSET_NAME(s6),
+ REG_OFFSET_NAME(s7),
+ REG_OFFSET_NAME(s8),
+ REG_OFFSET_NAME(s9),
+ REG_OFFSET_NAME(s10),
+ REG_OFFSET_NAME(s11),
+ REG_OFFSET_NAME(t3),
+ REG_OFFSET_NAME(t4),
+ REG_OFFSET_NAME(t5),
+ REG_OFFSET_NAME(t6),
+ REG_OFFSET_NAME(status),
+ REG_OFFSET_NAME(badaddr),
+ REG_OFFSET_NAME(cause),
+ REG_OFFSET_NAME(orig_a0),
+ REG_OFFSET_END,
+};
+
+/**
+ * regs_query_register_offset() - query register offset from its name
+ * @name: the name of a register
+ *
+ * regs_query_register_offset() returns the offset of a register in struct
+ * pt_regs from its name. If the name is invalid, this returns -EINVAL;
+ */
+int regs_query_register_offset(const char *name)
+{
+ const struct pt_regs_offset *roff;
+
+ for (roff = regoffset_table; roff->name != NULL; roff++)
+ if (!strcmp(roff->name, name))
+ return roff->offset;
+ return -EINVAL;
+}
+
+/**
+ * regs_within_kernel_stack() - check the address in the stack
+ * @regs: pt_regs which contains kernel stack pointer.
+ * @addr: address which is checked.
+ *
+ * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
+ * If @addr is within the kernel stack, it returns true. If not, returns false.
+ */
+static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
+{
+ return (addr & ~(THREAD_SIZE - 1)) ==
+ (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1));
+}
+
+/**
+ * regs_get_kernel_stack_nth() - get Nth entry of the stack
+ * @regs: pt_regs which contains kernel stack pointer.
+ * @n: stack entry number.
+ *
+ * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
+ * is specified by @regs. If the @n th entry is NOT in the kernel stack,
+ * this returns 0.
+ */
+unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
+{
+ unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
+
+ addr += n;
+ if (regs_within_kernel_stack(regs, (unsigned long)addr))
+ return *addr;
+ else
+ return 0;
+}
+
void ptrace_disable(struct task_struct *child)
{
clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
diff --git a/arch/riscv/kernel/sbi.c b/arch/riscv/kernel/sbi.c
index 226ccce0f9e0..f4a7db3d309e 100644
--- a/arch/riscv/kernel/sbi.c
+++ b/arch/riscv/kernel/sbi.c
@@ -351,7 +351,7 @@ static int __sbi_rfence_v02(int fid, const unsigned long *hart_mask,
* sbi_set_timer() - Program the timer for next timer event.
* @stime_value: The value after which next timer event should fire.
*
- * Return: None
+ * Return: None.
*/
void sbi_set_timer(uint64_t stime_value)
{
@@ -362,11 +362,11 @@ void sbi_set_timer(uint64_t stime_value)
* sbi_send_ipi() - Send an IPI to any hart.
* @hart_mask: A cpu mask containing all the target harts.
*
- * Return: None
+ * Return: 0 on success, appropriate linux error code otherwise.
*/
-void sbi_send_ipi(const unsigned long *hart_mask)
+int sbi_send_ipi(const unsigned long *hart_mask)
{
- __sbi_send_ipi(hart_mask);
+ return __sbi_send_ipi(hart_mask);
}
EXPORT_SYMBOL(sbi_send_ipi);
@@ -374,12 +374,12 @@ EXPORT_SYMBOL(sbi_send_ipi);
* sbi_remote_fence_i() - Execute FENCE.I instruction on given remote harts.
* @hart_mask: A cpu mask containing all the target harts.
*
- * Return: None
+ * Return: 0 on success, appropriate linux error code otherwise.
*/
-void sbi_remote_fence_i(const unsigned long *hart_mask)
+int sbi_remote_fence_i(const unsigned long *hart_mask)
{
- __sbi_rfence(SBI_EXT_RFENCE_REMOTE_FENCE_I,
- hart_mask, 0, 0, 0, 0);
+ return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_FENCE_I,
+ hart_mask, 0, 0, 0, 0);
}
EXPORT_SYMBOL(sbi_remote_fence_i);
@@ -390,14 +390,14 @@ EXPORT_SYMBOL(sbi_remote_fence_i);
* @start: Start of the virtual address
* @size: Total size of the virtual address range.
*
- * Return: None
+ * Return: 0 on success, appropriate linux error code otherwise.
*/
-void sbi_remote_sfence_vma(const unsigned long *hart_mask,
+int sbi_remote_sfence_vma(const unsigned long *hart_mask,
unsigned long start,
unsigned long size)
{
- __sbi_rfence(SBI_EXT_RFENCE_REMOTE_SFENCE_VMA,
- hart_mask, start, size, 0, 0);
+ return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_SFENCE_VMA,
+ hart_mask, start, size, 0, 0);
}
EXPORT_SYMBOL(sbi_remote_sfence_vma);
@@ -410,15 +410,15 @@ EXPORT_SYMBOL(sbi_remote_sfence_vma);
* @size: Total size of the virtual address range.
* @asid: The value of address space identifier (ASID).
*
- * Return: None
+ * Return: 0 on success, appropriate linux error code otherwise.
*/
-void sbi_remote_sfence_vma_asid(const unsigned long *hart_mask,
+int sbi_remote_sfence_vma_asid(const unsigned long *hart_mask,
unsigned long start,
unsigned long size,
unsigned long asid)
{
- __sbi_rfence(SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID,
- hart_mask, start, size, asid, 0);
+ return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID,
+ hart_mask, start, size, asid, 0);
}
EXPORT_SYMBOL(sbi_remote_sfence_vma_asid);
@@ -560,7 +560,7 @@ static struct riscv_ipi_ops sbi_ipi_ops = {
.ipi_inject = sbi_send_cpumask_ipi
};
-int __init sbi_init(void)
+void __init sbi_init(void)
{
int ret;
@@ -600,6 +600,4 @@ int __init sbi_init(void)
}
riscv_set_ipi_ops(&sbi_ipi_ops);
-
- return 0;
}
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index 3fa3f26dde85..e85bacff1b50 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -216,8 +216,15 @@ static void __init init_resources(void)
static void __init parse_dtb(void)
{
/* Early scan of device tree from init memory */
- if (early_init_dt_scan(dtb_early_va))
+ if (early_init_dt_scan(dtb_early_va)) {
+ const char *name = of_flat_dt_get_machine_name();
+
+ if (name) {
+ pr_info("Machine model: %s\n", name);
+ dump_stack_set_arch_desc("%s (DT)", name);
+ }
return;
+ }
pr_err("No DTB passed to the kernel\n");
#ifdef CONFIG_CMDLINE_FORCE
@@ -252,9 +259,9 @@ void __init setup_arch(char **cmdline_p)
else
pr_err("No DTB found in kernel mappings\n");
#endif
+ misc_mem_init();
- if (IS_ENABLED(CONFIG_RISCV_SBI))
- sbi_init();
+ sbi_init();
if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX))
protect_kernel_text_data();
@@ -275,13 +282,19 @@ void __init setup_arch(char **cmdline_p)
static int __init topology_init(void)
{
- int i;
+ int i, ret;
+
+ for_each_online_node(i)
+ register_one_node(i);
for_each_possible_cpu(i) {
struct cpu *cpu = &per_cpu(cpu_devices, i);
cpu->hotpluggable = cpu_has_hotplug(i);
- register_cpu(cpu, i);
+ ret = register_cpu(cpu, i);
+ if (unlikely(ret))
+ pr_warn("Warning: %s: register_cpu %d failed (%d)\n",
+ __func__, i, ret);
}
return 0;
@@ -293,6 +306,8 @@ void free_initmem(void)
unsigned long init_begin = (unsigned long)__init_begin;
unsigned long init_end = (unsigned long)__init_end;
- set_memory_rw_nx(init_begin, (init_end - init_begin) >> PAGE_SHIFT);
+ if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX))
+ set_memory_rw_nx(init_begin, (init_end - init_begin) >> PAGE_SHIFT);
+
free_initmem_default(POISON_FREE_INITMEM);
}
diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c
index 469aef8ed922..65942b3748b4 100644
--- a/arch/riscv/kernel/signal.c
+++ b/arch/riscv/kernel/signal.c
@@ -309,6 +309,9 @@ static void do_signal(struct pt_regs *regs)
asmlinkage __visible void do_notify_resume(struct pt_regs *regs,
unsigned long thread_info_flags)
{
+ if (thread_info_flags & _TIF_UPROBE)
+ uprobe_notify_resume(regs);
+
/* Handle pending signal delivery */
if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
do_signal(regs);
diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c
index 96167d55ed98..5e276c25646f 100644
--- a/arch/riscv/kernel/smpboot.c
+++ b/arch/riscv/kernel/smpboot.c
@@ -27,6 +27,7 @@
#include <asm/cpu_ops.h>
#include <asm/irq.h>
#include <asm/mmu_context.h>
+#include <asm/numa.h>
#include <asm/tlbflush.h>
#include <asm/sections.h>
#include <asm/sbi.h>
@@ -45,13 +46,18 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
{
int cpuid;
int ret;
+ unsigned int curr_cpuid;
+
+ curr_cpuid = smp_processor_id();
+ numa_store_cpu_info(curr_cpuid);
+ numa_add_cpu(curr_cpuid);
/* This covers non-smp usecase mandated by "nosmp" option */
if (max_cpus == 0)
return;
for_each_possible_cpu(cpuid) {
- if (cpuid == smp_processor_id())
+ if (cpuid == curr_cpuid)
continue;
if (cpu_ops[cpuid]->cpu_prepare) {
ret = cpu_ops[cpuid]->cpu_prepare(cpuid);
@@ -59,6 +65,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
continue;
}
set_cpu_present(cpuid, true);
+ numa_store_cpu_info(cpuid);
}
}
@@ -79,6 +86,7 @@ void __init setup_smp(void)
if (hart == cpuid_to_hartid_map(0)) {
BUG_ON(found_boot_cpu);
found_boot_cpu = 1;
+ early_map_cpu_to_node(0, of_node_to_nid(dn));
continue;
}
if (cpuid >= NR_CPUS) {
@@ -88,6 +96,7 @@ void __init setup_smp(void)
}
cpuid_to_hartid_map(cpuid) = hart;
+ early_map_cpu_to_node(cpuid, of_node_to_nid(dn));
cpuid++;
}
@@ -153,6 +162,7 @@ asmlinkage __visible void smp_callin(void)
current->active_mm = mm;
notify_cpu_starting(curr_cpuid);
+ numa_add_cpu(curr_cpuid);
update_siblings_masks(curr_cpuid);
set_cpu_online(curr_cpuid, 1);
diff --git a/arch/riscv/kernel/soc.c b/arch/riscv/kernel/soc.c
index c7b0a73e382e..a0516172a33c 100644
--- a/arch/riscv/kernel/soc.c
+++ b/arch/riscv/kernel/soc.c
@@ -26,30 +26,3 @@ void __init soc_early_init(void)
}
}
}
-
-static bool soc_builtin_dtb_match(unsigned long vendor_id,
- unsigned long arch_id, unsigned long imp_id,
- const struct soc_builtin_dtb *entry)
-{
- return entry->vendor_id == vendor_id &&
- entry->arch_id == arch_id &&
- entry->imp_id == imp_id;
-}
-
-void * __init soc_lookup_builtin_dtb(void)
-{
- unsigned long vendor_id, arch_id, imp_id;
- const struct soc_builtin_dtb *s;
-
- __asm__ ("csrr %0, mvendorid" : "=r"(vendor_id));
- __asm__ ("csrr %0, marchid" : "=r"(arch_id));
- __asm__ ("csrr %0, mimpid" : "=r"(imp_id));
-
- for (s = (void *)&__soc_builtin_dtb_table_start;
- (void *)s < (void *)&__soc_builtin_dtb_table_end; s++) {
- if (soc_builtin_dtb_match(vendor_id, arch_id, imp_id, s))
- return s->dtb_func();
- }
-
- return NULL;
-}
diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c
index df5d2da7c40b..3f893c9d9d85 100644
--- a/arch/riscv/kernel/stacktrace.c
+++ b/arch/riscv/kernel/stacktrace.c
@@ -53,9 +53,15 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
/* Unwind stack frame */
frame = (struct stackframe *)fp - 1;
sp = fp;
- fp = frame->fp;
- pc = ftrace_graph_ret_addr(current, NULL, frame->ra,
- (unsigned long *)(fp - 8));
+ if (regs && (regs->epc == pc) && (frame->fp & 0x7)) {
+ fp = frame->ra;
+ pc = regs->ra;
+ } else {
+ fp = frame->fp;
+ pc = ftrace_graph_ret_addr(current, NULL, frame->ra,
+ (unsigned long *)(fp - 8));
+ }
+
}
}
@@ -100,10 +106,16 @@ static bool print_trace_address(void *arg, unsigned long pc)
return true;
}
+void dump_backtrace(struct pt_regs *regs, struct task_struct *task,
+ const char *loglvl)
+{
+ pr_cont("%sCall Trace:\n", loglvl);
+ walk_stackframe(task, regs, print_trace_address, (void *)loglvl);
+}
+
void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
{
- pr_cont("Call Trace:\n");
- walk_stackframe(task, NULL, print_trace_address, (void *)loglvl);
+ dump_backtrace(NULL, task, loglvl);
}
static bool save_wchan(void *arg, unsigned long pc)
diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
index ad14f4466d92..3ed2c23601a0 100644
--- a/arch/riscv/kernel/traps.c
+++ b/arch/riscv/kernel/traps.c
@@ -12,10 +12,12 @@
#include <linux/signal.h>
#include <linux/kdebug.h>
#include <linux/uaccess.h>
+#include <linux/kprobes.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/irq.h>
+#include <asm/bug.h>
#include <asm/processor.h>
#include <asm/ptrace.h>
#include <asm/csr.h>
@@ -66,7 +68,7 @@ void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr)
tsk->comm, task_pid_nr(tsk), signo, code, addr);
print_vma_addr(KERN_CONT " in ", instruction_pointer(regs));
pr_cont("\n");
- show_regs(regs);
+ __show_regs(regs);
}
force_sig_fault(signo, code, (void __user *)addr);
@@ -75,6 +77,8 @@ void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr)
static void do_trap_error(struct pt_regs *regs, int signo, int code,
unsigned long addr, const char *str)
{
+ current->thread.bad_cause = regs->cause;
+
if (user_mode(regs)) {
do_trap(regs, signo, code, addr);
} else {
@@ -145,6 +149,22 @@ static inline unsigned long get_break_insn_length(unsigned long pc)
asmlinkage __visible void do_trap_break(struct pt_regs *regs)
{
+#ifdef CONFIG_KPROBES
+ if (kprobe_single_step_handler(regs))
+ return;
+
+ if (kprobe_breakpoint_handler(regs))
+ return;
+#endif
+#ifdef CONFIG_UPROBES
+ if (uprobe_single_step_handler(regs))
+ return;
+
+ if (uprobe_breakpoint_handler(regs))
+ return;
+#endif
+ current->thread.bad_cause = regs->cause;
+
if (user_mode(regs))
force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->epc);
#ifdef CONFIG_KGDB
diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile
index 0cfd6da784f8..71a315e73cbe 100644
--- a/arch/riscv/kernel/vdso/Makefile
+++ b/arch/riscv/kernel/vdso/Makefile
@@ -32,9 +32,10 @@ CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
# Disable -pg to prevent insert call site
CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os
-# Disable gcov profiling for VDSO code
+# Disable profiling and instrumentation for VDSO code
GCOV_PROFILE := n
KCOV_INSTRUMENT := n
+KASAN_SANITIZE := n
# Force dependency
$(obj)/vdso.o: $(obj)/vdso.so
diff --git a/arch/riscv/lib/Makefile b/arch/riscv/lib/Makefile
index ac6171e9c19e..25d5c9664e57 100644
--- a/arch/riscv/lib/Makefile
+++ b/arch/riscv/lib/Makefile
@@ -5,3 +5,5 @@ lib-y += memset.o
lib-y += memmove.o
lib-$(CONFIG_MMU) += uaccess.o
lib-$(CONFIG_64BIT) += tishift.o
+
+obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
diff --git a/arch/riscv/lib/error-inject.c b/arch/riscv/lib/error-inject.c
new file mode 100644
index 000000000000..d667ade2bc41
--- /dev/null
+++ b/arch/riscv/lib/error-inject.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/error-injection.h>
+#include <linux/kprobes.h>
+
+void override_function_with_return(struct pt_regs *regs)
+{
+ instruction_pointer_set(regs, regs->ra);
+}
+NOKPROBE_SYMBOL(override_function_with_return);
diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile
index c0185e556ca5..7ebaef10ea1b 100644
--- a/arch/riscv/mm/Makefile
+++ b/arch/riscv/mm/Makefile
@@ -2,7 +2,8 @@
CFLAGS_init.o := -mcmodel=medany
ifdef CONFIG_FTRACE
-CFLAGS_REMOVE_init.o = -pg
+CFLAGS_REMOVE_init.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_cacheflush.o = $(CC_FLAGS_FTRACE)
endif
KCOV_INSTRUMENT_init.o := n
diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c
index 613ec81a8979..68aa312fc352 100644
--- a/arch/riscv/mm/context.c
+++ b/arch/riscv/mm/context.c
@@ -2,13 +2,273 @@
/*
* Copyright (C) 2012 Regents of the University of California
* Copyright (C) 2017 SiFive
+ * Copyright (C) 2021 Western Digital Corporation or its affiliates.
*/
+#include <linux/bitops.h>
+#include <linux/cpumask.h>
#include <linux/mm.h>
+#include <linux/percpu.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/static_key.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
#include <asm/mmu_context.h>
+#ifdef CONFIG_MMU
+
+static DEFINE_STATIC_KEY_FALSE(use_asid_allocator);
+
+static unsigned long asid_bits;
+static unsigned long num_asids;
+static unsigned long asid_mask;
+
+static atomic_long_t current_version;
+
+static DEFINE_RAW_SPINLOCK(context_lock);
+static cpumask_t context_tlb_flush_pending;
+static unsigned long *context_asid_map;
+
+static DEFINE_PER_CPU(atomic_long_t, active_context);
+static DEFINE_PER_CPU(unsigned long, reserved_context);
+
+static bool check_update_reserved_context(unsigned long cntx,
+ unsigned long newcntx)
+{
+ int cpu;
+ bool hit = false;
+
+ /*
+ * Iterate over the set of reserved CONTEXT looking for a match.
+ * If we find one, then we can update our mm to use new CONTEXT
+ * (i.e. the same CONTEXT in the current_version) but we can't
+ * exit the loop early, since we need to ensure that all copies
+ * of the old CONTEXT are updated to reflect the mm. Failure to do
+ * so could result in us missing the reserved CONTEXT in a future
+ * version.
+ */
+ for_each_possible_cpu(cpu) {
+ if (per_cpu(reserved_context, cpu) == cntx) {
+ hit = true;
+ per_cpu(reserved_context, cpu) = newcntx;
+ }
+ }
+
+ return hit;
+}
+
+static void __flush_context(void)
+{
+ int i;
+ unsigned long cntx;
+
+ /* Must be called with context_lock held */
+ lockdep_assert_held(&context_lock);
+
+ /* Update the list of reserved ASIDs and the ASID bitmap. */
+ bitmap_clear(context_asid_map, 0, num_asids);
+
+ /* Mark already active ASIDs as used */
+ for_each_possible_cpu(i) {
+ cntx = atomic_long_xchg_relaxed(&per_cpu(active_context, i), 0);
+ /*
+ * If this CPU has already been through a rollover, but
+ * hasn't run another task in the meantime, we must preserve
+ * its reserved CONTEXT, as this is the only trace we have of
+ * the process it is still running.
+ */
+ if (cntx == 0)
+ cntx = per_cpu(reserved_context, i);
+
+ __set_bit(cntx & asid_mask, context_asid_map);
+ per_cpu(reserved_context, i) = cntx;
+ }
+
+ /* Mark ASID #0 as used because it is used at boot-time */
+ __set_bit(0, context_asid_map);
+
+ /* Queue a TLB invalidation for each CPU on next context-switch */
+ cpumask_setall(&context_tlb_flush_pending);
+}
+
+static unsigned long __new_context(struct mm_struct *mm)
+{
+ static u32 cur_idx = 1;
+ unsigned long cntx = atomic_long_read(&mm->context.id);
+ unsigned long asid, ver = atomic_long_read(&current_version);
+
+ /* Must be called with context_lock held */
+ lockdep_assert_held(&context_lock);
+
+ if (cntx != 0) {
+ unsigned long newcntx = ver | (cntx & asid_mask);
+
+ /*
+ * If our current CONTEXT was active during a rollover, we
+ * can continue to use it and this was just a false alarm.
+ */
+ if (check_update_reserved_context(cntx, newcntx))
+ return newcntx;
+
+ /*
+ * We had a valid CONTEXT in a previous life, so try to
+ * re-use it if possible.
+ */
+ if (!__test_and_set_bit(cntx & asid_mask, context_asid_map))
+ return newcntx;
+ }
+
+ /*
+ * Allocate a free ASID. If we can't find one then increment
+ * current_version and flush all ASIDs.
+ */
+ asid = find_next_zero_bit(context_asid_map, num_asids, cur_idx);
+ if (asid != num_asids)
+ goto set_asid;
+
+ /* We're out of ASIDs, so increment current_version */
+ ver = atomic_long_add_return_relaxed(num_asids, &current_version);
+
+ /* Flush everything */
+ __flush_context();
+
+ /* We have more ASIDs than CPUs, so this will always succeed */
+ asid = find_next_zero_bit(context_asid_map, num_asids, 1);
+
+set_asid:
+ __set_bit(asid, context_asid_map);
+ cur_idx = asid;
+ return asid | ver;
+}
+
+static void set_mm_asid(struct mm_struct *mm, unsigned int cpu)
+{
+ unsigned long flags;
+ bool need_flush_tlb = false;
+ unsigned long cntx, old_active_cntx;
+
+ cntx = atomic_long_read(&mm->context.id);
+
+ /*
+ * If our active_context is non-zero and the context matches the
+ * current_version, then we update the active_context entry with a
+ * relaxed cmpxchg.
+ *
+ * Following is how we handle racing with a concurrent rollover:
+ *
+ * - We get a zero back from the cmpxchg and end up waiting on the
+ * lock. Taking the lock synchronises with the rollover and so
+ * we are forced to see the updated verion.
+ *
+ * - We get a valid context back from the cmpxchg then we continue
+ * using old ASID because __flush_context() would have marked ASID
+ * of active_context as used and next context switch we will
+ * allocate new context.
+ */
+ old_active_cntx = atomic_long_read(&per_cpu(active_context, cpu));
+ if (old_active_cntx &&
+ ((cntx & ~asid_mask) == atomic_long_read(&current_version)) &&
+ atomic_long_cmpxchg_relaxed(&per_cpu(active_context, cpu),
+ old_active_cntx, cntx))
+ goto switch_mm_fast;
+
+ raw_spin_lock_irqsave(&context_lock, flags);
+
+ /* Check that our ASID belongs to the current_version. */
+ cntx = atomic_long_read(&mm->context.id);
+ if ((cntx & ~asid_mask) != atomic_long_read(&current_version)) {
+ cntx = __new_context(mm);
+ atomic_long_set(&mm->context.id, cntx);
+ }
+
+ if (cpumask_test_and_clear_cpu(cpu, &context_tlb_flush_pending))
+ need_flush_tlb = true;
+
+ atomic_long_set(&per_cpu(active_context, cpu), cntx);
+
+ raw_spin_unlock_irqrestore(&context_lock, flags);
+
+switch_mm_fast:
+ csr_write(CSR_SATP, virt_to_pfn(mm->pgd) |
+ ((cntx & asid_mask) << SATP_ASID_SHIFT) |
+ SATP_MODE);
+
+ if (need_flush_tlb)
+ local_flush_tlb_all();
+}
+
+static void set_mm_noasid(struct mm_struct *mm)
+{
+ /* Switch the page table and blindly nuke entire local TLB */
+ csr_write(CSR_SATP, virt_to_pfn(mm->pgd) | SATP_MODE);
+ local_flush_tlb_all();
+}
+
+static inline void set_mm(struct mm_struct *mm, unsigned int cpu)
+{
+ if (static_branch_unlikely(&use_asid_allocator))
+ set_mm_asid(mm, cpu);
+ else
+ set_mm_noasid(mm);
+}
+
+static int asids_init(void)
+{
+ unsigned long old;
+
+ /* Figure-out number of ASID bits in HW */
+ old = csr_read(CSR_SATP);
+ asid_bits = old | (SATP_ASID_MASK << SATP_ASID_SHIFT);
+ csr_write(CSR_SATP, asid_bits);
+ asid_bits = (csr_read(CSR_SATP) >> SATP_ASID_SHIFT) & SATP_ASID_MASK;
+ asid_bits = fls_long(asid_bits);
+ csr_write(CSR_SATP, old);
+
+ /*
+ * In the process of determining number of ASID bits (above)
+ * we polluted the TLB of current HART so let's do TLB flushed
+ * to remove unwanted TLB enteries.
+ */
+ local_flush_tlb_all();
+
+ /* Pre-compute ASID details */
+ num_asids = 1 << asid_bits;
+ asid_mask = num_asids - 1;
+
+ /*
+ * Use ASID allocator only if number of HW ASIDs are
+ * at-least twice more than CPUs
+ */
+ if (num_asids > (2 * num_possible_cpus())) {
+ atomic_long_set(&current_version, num_asids);
+
+ context_asid_map = kcalloc(BITS_TO_LONGS(num_asids),
+ sizeof(*context_asid_map), GFP_KERNEL);
+ if (!context_asid_map)
+ panic("Failed to allocate bitmap for %lu ASIDs\n",
+ num_asids);
+
+ __set_bit(0, context_asid_map);
+
+ static_branch_enable(&use_asid_allocator);
+
+ pr_info("ASID allocator using %lu bits (%lu entries)\n",
+ asid_bits, num_asids);
+ } else {
+ pr_info("ASID allocator disabled\n");
+ }
+
+ return 0;
+}
+early_initcall(asids_init);
+#else
+static inline void set_mm(struct mm_struct *mm, unsigned int cpu)
+{
+ /* Nothing to do here when there is no MMU */
+}
+#endif
+
/*
* When necessary, performs a deferred icache flush for the given MM context,
* on the local CPU. RISC-V has no direct mechanism for instruction cache
@@ -58,10 +318,7 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next,
cpumask_clear_cpu(cpu, mm_cpumask(prev));
cpumask_set_cpu(cpu, mm_cpumask(next));
-#ifdef CONFIG_MMU
- csr_write(CSR_SATP, virt_to_pfn(next->pgd) | SATP_MODE);
- local_flush_tlb_all();
-#endif
+ set_mm(next, cpu);
flush_icache_deferred(next);
}
diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c
index 3c8b9e433c67..8f17519208c7 100644
--- a/arch/riscv/mm/fault.c
+++ b/arch/riscv/mm/fault.c
@@ -13,14 +13,30 @@
#include <linux/perf_event.h>
#include <linux/signal.h>
#include <linux/uaccess.h>
+#include <linux/kprobes.h>
#include <asm/ptrace.h>
#include <asm/tlbflush.h>
#include "../kernel/head.h"
+static void die_kernel_fault(const char *msg, unsigned long addr,
+ struct pt_regs *regs)
+{
+ bust_spinlocks(1);
+
+ pr_alert("Unable to handle kernel %s at virtual address " REG_FMT "\n", msg,
+ addr);
+
+ bust_spinlocks(0);
+ die(regs, "Oops");
+ do_exit(SIGKILL);
+}
+
static inline void no_context(struct pt_regs *regs, unsigned long addr)
{
+ const char *msg;
+
/* Are we prepared to handle this kernel fault? */
if (fixup_exception(regs))
return;
@@ -29,12 +45,8 @@ static inline void no_context(struct pt_regs *regs, unsigned long addr)
* Oops. The kernel tried to access some bad page. We'll have to
* terminate things with extreme prejudice.
*/
- bust_spinlocks(1);
- pr_alert("Unable to handle kernel %s at virtual address " REG_FMT "\n",
- (addr < PAGE_SIZE) ? "NULL pointer dereference" :
- "paging request", addr);
- die(regs, "Oops");
- do_exit(SIGKILL);
+ msg = (addr < PAGE_SIZE) ? "NULL pointer dereference" : "paging request";
+ die_kernel_fault(msg, addr, regs);
}
static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_fault_t fault)
@@ -202,6 +214,9 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
tsk = current;
mm = tsk->mm;
+ if (kprobe_page_fault(regs, cause))
+ return;
+
/*
* Fault-in kernel-space virtual memory on-demand.
* The 'reference' page table is init_mm.pgd.
@@ -225,6 +240,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
* in an atomic region, then we must not take the fault.
*/
if (unlikely(faulthandler_disabled() || !mm)) {
+ tsk->thread.bad_cause = cause;
no_context(regs, addr);
return;
}
@@ -232,6 +248,11 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
if (user_mode(regs))
flags |= FAULT_FLAG_USER;
+ if (!user_mode(regs) && addr < TASK_SIZE &&
+ unlikely(!(regs->status & SR_SUM)))
+ die_kernel_fault("access to user memory without uaccess routines",
+ addr, regs);
+
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
if (cause == EXC_STORE_PAGE_FAULT)
@@ -242,16 +263,19 @@ retry:
mmap_read_lock(mm);
vma = find_vma(mm, addr);
if (unlikely(!vma)) {
+ tsk->thread.bad_cause = cause;
bad_area(regs, mm, code, addr);
return;
}
if (likely(vma->vm_start <= addr))
goto good_area;
if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
+ tsk->thread.bad_cause = cause;
bad_area(regs, mm, code, addr);
return;
}
if (unlikely(expand_stack(vma, addr))) {
+ tsk->thread.bad_cause = cause;
bad_area(regs, mm, code, addr);
return;
}
@@ -264,6 +288,7 @@ good_area:
code = SEGV_ACCERR;
if (unlikely(access_error(cause, vma))) {
+ tsk->thread.bad_cause = cause;
bad_area(regs, mm, code, addr);
return;
}
@@ -297,6 +322,7 @@ good_area:
mmap_read_unlock(mm);
if (unlikely(fault & VM_FAULT_ERROR)) {
+ tsk->thread.bad_cause = cause;
mm_fault_error(regs, addr, fault);
return;
}
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index 7cd4993f4ff2..067583ab1bd7 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -21,6 +21,7 @@
#include <asm/soc.h>
#include <asm/io.h>
#include <asm/ptdump.h>
+#include <asm/numa.h>
#include "../kernel/head.h"
@@ -105,85 +106,19 @@ void __init mem_init(void)
print_vm_layout();
}
-#ifdef CONFIG_BLK_DEV_INITRD
-static void __init setup_initrd(void)
-{
- phys_addr_t start;
- unsigned long size;
-
- /* Ignore the virtul address computed during device tree parsing */
- initrd_start = initrd_end = 0;
-
- if (!phys_initrd_size)
- return;
- /*
- * Round the memory region to page boundaries as per free_initrd_mem()
- * This allows us to detect whether the pages overlapping the initrd
- * are in use, but more importantly, reserves the entire set of pages
- * as we don't want these pages allocated for other purposes.
- */
- start = round_down(phys_initrd_start, PAGE_SIZE);
- size = phys_initrd_size + (phys_initrd_start - start);
- size = round_up(size, PAGE_SIZE);
-
- if (!memblock_is_region_memory(start, size)) {
- pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region",
- (u64)start, size);
- goto disable;
- }
-
- if (memblock_is_region_reserved(start, size)) {
- pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region\n",
- (u64)start, size);
- goto disable;
- }
-
- memblock_reserve(start, size);
- /* Now convert initrd to virtual addresses */
- initrd_start = (unsigned long)__va(phys_initrd_start);
- initrd_end = initrd_start + phys_initrd_size;
- initrd_below_start_ok = 1;
-
- pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n",
- (void *)(initrd_start), size);
- return;
-disable:
- pr_cont(" - disabling initrd\n");
- initrd_start = 0;
- initrd_end = 0;
-}
-#endif /* CONFIG_BLK_DEV_INITRD */
-
void __init setup_bootmem(void)
{
- phys_addr_t mem_start = 0;
- phys_addr_t start, dram_end, end = 0;
phys_addr_t vmlinux_end = __pa_symbol(&_end);
phys_addr_t vmlinux_start = __pa_symbol(&_start);
+ phys_addr_t dram_end = memblock_end_of_DRAM();
phys_addr_t max_mapped_addr = __pa(~(ulong)0);
- u64 i;
- /* Find the memory region containing the kernel */
- for_each_mem_range(i, &start, &end) {
- phys_addr_t size = end - start;
- if (!mem_start)
- mem_start = start;
- if (start <= vmlinux_start && vmlinux_end <= end)
- BUG_ON(size == 0);
- }
-
- /*
- * The maximal physical memory size is -PAGE_OFFSET.
- * Make sure that any memory beyond mem_start + (-PAGE_OFFSET) is removed
- * as it is unusable by kernel.
- */
+ /* The maximal physical memory size is -PAGE_OFFSET. */
memblock_enforce_memory_limit(-PAGE_OFFSET);
/* Reserve from the start of the kernel to the end of the kernel */
memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
- dram_end = memblock_end_of_DRAM();
-
/*
* memblock allocator is not aware of the fact that last 4K bytes of
* the addressable memory can not be mapped because of IS_ERR_VALUE
@@ -196,22 +131,21 @@ void __init setup_bootmem(void)
max_pfn = PFN_DOWN(dram_end);
max_low_pfn = max_pfn;
dma32_phys_limit = min(4UL * SZ_1G, (unsigned long)PFN_PHYS(max_low_pfn));
- set_max_mapnr(max_low_pfn);
-
-#ifdef CONFIG_BLK_DEV_INITRD
- setup_initrd();
-#endif /* CONFIG_BLK_DEV_INITRD */
+ set_max_mapnr(max_low_pfn - ARCH_PFN_OFFSET);
+ reserve_initrd_mem();
/*
- * Avoid using early_init_fdt_reserve_self() since __pa() does
+ * If DTB is built in, no need to reserve its memblock.
+ * Otherwise, do reserve it but avoid using
+ * early_init_fdt_reserve_self() since __pa() does
* not work for DTB pointers that are fixmap addresses
*/
- memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va));
+ if (!IS_ENABLED(CONFIG_BUILTIN_DTB))
+ memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va));
early_init_fdt_scan_reserved_mem();
dma_contiguous_reserve(dma32_phys_limit);
memblock_allow_resize();
- memblock_dump_all();
}
#ifdef CONFIG_MMU
@@ -226,8 +160,6 @@ pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss;
-#define MAX_EARLY_MAPPING_SIZE SZ_128M
-
pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
@@ -302,13 +234,7 @@ static void __init create_pte_mapping(pte_t *ptep,
pmd_t trampoline_pmd[PTRS_PER_PMD] __page_aligned_bss;
pmd_t fixmap_pmd[PTRS_PER_PMD] __page_aligned_bss;
-
-#if MAX_EARLY_MAPPING_SIZE < PGDIR_SIZE
-#define NUM_EARLY_PMDS 1UL
-#else
-#define NUM_EARLY_PMDS (1UL + MAX_EARLY_MAPPING_SIZE / PGDIR_SIZE)
-#endif
-pmd_t early_pmd[PTRS_PER_PMD * NUM_EARLY_PMDS] __initdata __aligned(PAGE_SIZE);
+pmd_t early_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE);
pmd_t early_dtb_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE);
static pmd_t *__init get_pmd_virt_early(phys_addr_t pa)
@@ -330,11 +256,9 @@ static pmd_t *get_pmd_virt_late(phys_addr_t pa)
static phys_addr_t __init alloc_pmd_early(uintptr_t va)
{
- uintptr_t pmd_num;
+ BUG_ON((va - PAGE_OFFSET) >> PGDIR_SHIFT);
- pmd_num = (va - PAGE_OFFSET) >> PGDIR_SHIFT;
- BUG_ON(pmd_num >= NUM_EARLY_PMDS);
- return (uintptr_t)&early_pmd[pmd_num * PTRS_PER_PMD];
+ return (uintptr_t)early_pmd;
}
static phys_addr_t __init alloc_pmd_fixmap(uintptr_t va)
@@ -452,7 +376,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
uintptr_t va, pa, end_va;
uintptr_t load_pa = (uintptr_t)(&_start);
uintptr_t load_sz = (uintptr_t)(&_end) - load_pa;
- uintptr_t map_size = best_map_size(load_pa, MAX_EARLY_MAPPING_SIZE);
+ uintptr_t map_size;
#ifndef __PAGETABLE_PMD_FOLDED
pmd_t fix_bmap_spmd, fix_bmap_epmd;
#endif
@@ -464,12 +388,11 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
* Enforce boot alignment requirements of RV32 and
* RV64 by only allowing PMD or PGD mappings.
*/
- BUG_ON(map_size == PAGE_SIZE);
+ map_size = PMD_SIZE;
/* Sanity check alignment and size */
BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0);
BUG_ON((load_pa % map_size) != 0);
- BUG_ON(load_sz > MAX_EARLY_MAPPING_SIZE);
pt_ops.alloc_pte = alloc_pte_early;
pt_ops.get_pte_virt = get_pte_virt_early;
@@ -511,6 +434,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
/* Setup early PMD for DTB */
create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA,
(uintptr_t)early_dtb_pmd, PGDIR_SIZE, PAGE_TABLE);
+#ifndef CONFIG_BUILTIN_DTB
/* Create two consecutive PMD mappings for FDT early scan */
pa = dtb_pa & ~(PMD_SIZE - 1);
create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA,
@@ -518,7 +442,11 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA + PMD_SIZE,
pa + PMD_SIZE, PMD_SIZE, PAGE_KERNEL);
dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PMD_SIZE - 1));
+#else /* CONFIG_BUILTIN_DTB */
+ dtb_early_va = __va(dtb_pa);
+#endif /* CONFIG_BUILTIN_DTB */
#else
+#ifndef CONFIG_BUILTIN_DTB
/* Create two consecutive PGD mappings for FDT early scan */
pa = dtb_pa & ~(PGDIR_SIZE - 1);
create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA,
@@ -526,6 +454,9 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA + PGDIR_SIZE,
pa + PGDIR_SIZE, PGDIR_SIZE, PAGE_KERNEL);
dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PGDIR_SIZE - 1));
+#else /* CONFIG_BUILTIN_DTB */
+ dtb_early_va = __va(dtb_pa);
+#endif /* CONFIG_BUILTIN_DTB */
#endif
dtb_early_pa = dtb_pa;
@@ -616,15 +547,7 @@ static void __init setup_vm_final(void)
#else
asmlinkage void __init setup_vm(uintptr_t dtb_pa)
{
-#ifdef CONFIG_BUILTIN_DTB
- dtb_early_va = soc_lookup_builtin_dtb();
- if (!dtb_early_va) {
- /* Fallback to first available DTS */
- dtb_early_va = (void *) __dtb_start;
- }
-#else
dtb_early_va = (void *)dtb_pa;
-#endif
dtb_early_pa = dtb_pa;
}
@@ -665,9 +588,15 @@ void mark_rodata_ro(void)
void __init paging_init(void)
{
setup_vm_final();
- sparse_init();
setup_zero_page();
+}
+
+void __init misc_mem_init(void)
+{
+ arch_numa_init();
+ sparse_init();
zone_sizes_init();
+ memblock_dump_all();
}
#ifdef CONFIG_SPARSEMEM_VMEMMAP
diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c
index a8a2ffd9114a..3fc18f469efb 100644
--- a/arch/riscv/mm/kasan_init.c
+++ b/arch/riscv/mm/kasan_init.c
@@ -9,6 +9,19 @@
#include <linux/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/fixmap.h>
+#include <asm/pgalloc.h>
+
+static __init void *early_alloc(size_t size, int node)
+{
+ void *ptr = memblock_alloc_try_nid(size, size,
+ __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, node);
+
+ if (!ptr)
+ panic("%pS: Failed to allocate %zu bytes align=%zx nid=%d from=%llx\n",
+ __func__, size, size, node, (u64)__pa(MAX_DMA_ADDRESS));
+
+ return ptr;
+}
extern pgd_t early_pg_dir[PTRS_PER_PGD];
asmlinkage void __init kasan_early_init(void)
@@ -47,40 +60,133 @@ asmlinkage void __init kasan_early_init(void)
local_flush_tlb_all();
}
-static void __init populate(void *start, void *end)
+static void kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned long end)
+{
+ phys_addr_t phys_addr;
+ pte_t *ptep, *base_pte;
+
+ if (pmd_none(*pmd))
+ base_pte = memblock_alloc(PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE);
+ else
+ base_pte = (pte_t *)pmd_page_vaddr(*pmd);
+
+ ptep = base_pte + pte_index(vaddr);
+
+ do {
+ if (pte_none(*ptep)) {
+ phys_addr = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
+ set_pte(ptep, pfn_pte(PFN_DOWN(phys_addr), PAGE_KERNEL));
+ }
+ } while (ptep++, vaddr += PAGE_SIZE, vaddr != end);
+
+ set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(base_pte)), PAGE_TABLE));
+}
+
+static void kasan_populate_pmd(pgd_t *pgd, unsigned long vaddr, unsigned long end)
+{
+ phys_addr_t phys_addr;
+ pmd_t *pmdp, *base_pmd;
+ unsigned long next;
+
+ base_pmd = (pmd_t *)pgd_page_vaddr(*pgd);
+ if (base_pmd == lm_alias(kasan_early_shadow_pmd))
+ base_pmd = memblock_alloc(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
+
+ pmdp = base_pmd + pmd_index(vaddr);
+
+ do {
+ next = pmd_addr_end(vaddr, end);
+
+ if (pmd_none(*pmdp) && IS_ALIGNED(vaddr, PMD_SIZE) && (next - vaddr) >= PMD_SIZE) {
+ phys_addr = memblock_phys_alloc(PMD_SIZE, PMD_SIZE);
+ if (phys_addr) {
+ set_pmd(pmdp, pfn_pmd(PFN_DOWN(phys_addr), PAGE_KERNEL));
+ continue;
+ }
+ }
+
+ kasan_populate_pte(pmdp, vaddr, next);
+ } while (pmdp++, vaddr = next, vaddr != end);
+
+ /*
+ * Wait for the whole PGD to be populated before setting the PGD in
+ * the page table, otherwise, if we did set the PGD before populating
+ * it entirely, memblock could allocate a page at a physical address
+ * where KASAN is not populated yet and then we'd get a page fault.
+ */
+ set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(base_pmd)), PAGE_TABLE));
+}
+
+static void kasan_populate_pgd(unsigned long vaddr, unsigned long end)
+{
+ phys_addr_t phys_addr;
+ pgd_t *pgdp = pgd_offset_k(vaddr);
+ unsigned long next;
+
+ do {
+ next = pgd_addr_end(vaddr, end);
+
+ /*
+ * pgdp can't be none since kasan_early_init initialized all KASAN
+ * shadow region with kasan_early_shadow_pmd: if this is stillthe case,
+ * that means we can try to allocate a hugepage as a replacement.
+ */
+ if (pgd_page_vaddr(*pgdp) == (unsigned long)lm_alias(kasan_early_shadow_pmd) &&
+ IS_ALIGNED(vaddr, PGDIR_SIZE) && (next - vaddr) >= PGDIR_SIZE) {
+ phys_addr = memblock_phys_alloc(PGDIR_SIZE, PGDIR_SIZE);
+ if (phys_addr) {
+ set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_KERNEL));
+ continue;
+ }
+ }
+
+ kasan_populate_pmd(pgdp, vaddr, next);
+ } while (pgdp++, vaddr = next, vaddr != end);
+}
+
+static void __init kasan_populate(void *start, void *end)
{
- unsigned long i, offset;
unsigned long vaddr = (unsigned long)start & PAGE_MASK;
unsigned long vend = PAGE_ALIGN((unsigned long)end);
- unsigned long n_pages = (vend - vaddr) / PAGE_SIZE;
- unsigned long n_ptes =
- ((n_pages + PTRS_PER_PTE) & -PTRS_PER_PTE) / PTRS_PER_PTE;
- unsigned long n_pmds =
- ((n_ptes + PTRS_PER_PMD) & -PTRS_PER_PMD) / PTRS_PER_PMD;
-
- pte_t *pte =
- memblock_alloc(n_ptes * PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE);
- pmd_t *pmd =
- memblock_alloc(n_pmds * PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
- pgd_t *pgd = pgd_offset_k(vaddr);
-
- for (i = 0; i < n_pages; i++) {
- phys_addr_t phys = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
- set_pte(&pte[i], pfn_pte(PHYS_PFN(phys), PAGE_KERNEL));
- }
-
- for (i = 0, offset = 0; i < n_ptes; i++, offset += PTRS_PER_PTE)
- set_pmd(&pmd[i],
- pfn_pmd(PFN_DOWN(__pa(&pte[offset])),
- __pgprot(_PAGE_TABLE)));
- for (i = 0, offset = 0; i < n_pmds; i++, offset += PTRS_PER_PMD)
- set_pgd(&pgd[i],
- pfn_pgd(PFN_DOWN(__pa(&pmd[offset])),
- __pgprot(_PAGE_TABLE)));
+ kasan_populate_pgd(vaddr, vend);
local_flush_tlb_all();
- memset(start, 0, end - start);
+ memset(start, KASAN_SHADOW_INIT, end - start);
+}
+
+void __init kasan_shallow_populate(void *start, void *end)
+{
+ unsigned long vaddr = (unsigned long)start & PAGE_MASK;
+ unsigned long vend = PAGE_ALIGN((unsigned long)end);
+ unsigned long pfn;
+ int index;
+ void *p;
+ pud_t *pud_dir, *pud_k;
+ pgd_t *pgd_dir, *pgd_k;
+ p4d_t *p4d_dir, *p4d_k;
+
+ while (vaddr < vend) {
+ index = pgd_index(vaddr);
+ pfn = csr_read(CSR_SATP) & SATP_PPN;
+ pgd_dir = (pgd_t *)pfn_to_virt(pfn) + index;
+ pgd_k = init_mm.pgd + index;
+ pgd_dir = pgd_offset_k(vaddr);
+ set_pgd(pgd_dir, *pgd_k);
+
+ p4d_dir = p4d_offset(pgd_dir, vaddr);
+ p4d_k = p4d_offset(pgd_k, vaddr);
+
+ vaddr = (vaddr + PUD_SIZE) & PUD_MASK;
+ pud_dir = pud_offset(p4d_dir, vaddr);
+ pud_k = pud_offset(p4d_k, vaddr);
+
+ if (pud_present(*pud_dir)) {
+ p = early_alloc(PAGE_SIZE, NUMA_NO_NODE);
+ pud_populate(&init_mm, pud_dir, p);
+ }
+ vaddr += PAGE_SIZE;
+ }
}
void __init kasan_init(void)
@@ -90,7 +196,15 @@ void __init kasan_init(void)
kasan_populate_early_shadow((void *)KASAN_SHADOW_START,
(void *)kasan_mem_to_shadow((void *)
- VMALLOC_END));
+ VMEMMAP_END));
+ if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
+ kasan_shallow_populate(
+ (void *)kasan_mem_to_shadow((void *)VMALLOC_START),
+ (void *)kasan_mem_to_shadow((void *)VMALLOC_END));
+ else
+ kasan_populate_early_shadow(
+ (void *)kasan_mem_to_shadow((void *)VMALLOC_START),
+ (void *)kasan_mem_to_shadow((void *)VMALLOC_END));
for_each_mem_range(i, &_start, &_end) {
void *start = (void *)__va(_start);
@@ -99,7 +213,7 @@ void __init kasan_init(void)
if (start >= end)
break;
- populate(kasan_mem_to_shadow(start), kasan_mem_to_shadow(end));
+ kasan_populate(kasan_mem_to_shadow(start), kasan_mem_to_shadow(end));
};
for (i = 0; i < PTRS_PER_PTE; i++)
@@ -108,6 +222,6 @@ void __init kasan_init(void)
__pgprot(_PAGE_PRESENT | _PAGE_READ |
_PAGE_ACCESSED)));
- memset(kasan_early_shadow_page, 0, PAGE_SIZE);
+ memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
init_task.kasan_depth = 0;
}
diff --git a/arch/riscv/net/bpf_jit_comp32.c b/arch/riscv/net/bpf_jit_comp32.c
index 579575f9cdae..81de865f4c7c 100644
--- a/arch/riscv/net/bpf_jit_comp32.c
+++ b/arch/riscv/net/bpf_jit_comp32.c
@@ -881,7 +881,7 @@ static int emit_store_r64(const s8 *dst, const s8 *src, s16 off,
const s8 *rd = bpf_get_reg64(dst, tmp1, ctx);
const s8 *rs = bpf_get_reg64(src, tmp2, ctx);
- if (mode == BPF_XADD && size != BPF_W)
+ if (mode == BPF_ATOMIC && size != BPF_W)
return -1;
emit_imm(RV_REG_T0, off, ctx);
@@ -899,7 +899,7 @@ static int emit_store_r64(const s8 *dst, const s8 *src, s16 off,
case BPF_MEM:
emit(rv_sw(RV_REG_T0, 0, lo(rs)), ctx);
break;
- case BPF_XADD:
+ case BPF_ATOMIC: /* Only BPF_ADD supported */
emit(rv_amoadd_w(RV_REG_ZERO, lo(rs), RV_REG_T0, 0, 0),
ctx);
break;
@@ -1260,7 +1260,6 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
case BPF_STX | BPF_MEM | BPF_H:
case BPF_STX | BPF_MEM | BPF_W:
case BPF_STX | BPF_MEM | BPF_DW:
- case BPF_STX | BPF_XADD | BPF_W:
if (BPF_CLASS(code) == BPF_ST) {
emit_imm32(tmp2, imm, ctx);
src = tmp2;
@@ -1271,8 +1270,21 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
return -1;
break;
+ case BPF_STX | BPF_ATOMIC | BPF_W:
+ if (insn->imm != BPF_ADD) {
+ pr_info_once(
+ "bpf-jit: not supported: atomic operation %02x ***\n",
+ insn->imm);
+ return -EFAULT;
+ }
+
+ if (emit_store_r64(dst, src, off, ctx, BPF_SIZE(code),
+ BPF_MODE(code)))
+ return -1;
+ break;
+
/* No hardware support for 8-byte atomics in RV32. */
- case BPF_STX | BPF_XADD | BPF_DW:
+ case BPF_STX | BPF_ATOMIC | BPF_DW:
/* Fallthrough. */
notsupported:
diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c
index 8a56b5293117..b44ff52f84a6 100644
--- a/arch/riscv/net/bpf_jit_comp64.c
+++ b/arch/riscv/net/bpf_jit_comp64.c
@@ -1027,10 +1027,18 @@ out_be:
emit_add(RV_REG_T1, RV_REG_T1, rd, ctx);
emit_sd(RV_REG_T1, 0, rs, ctx);
break;
- /* STX XADD: lock *(u32 *)(dst + off) += src */
- case BPF_STX | BPF_XADD | BPF_W:
- /* STX XADD: lock *(u64 *)(dst + off) += src */
- case BPF_STX | BPF_XADD | BPF_DW:
+ case BPF_STX | BPF_ATOMIC | BPF_W:
+ case BPF_STX | BPF_ATOMIC | BPF_DW:
+ if (insn->imm != BPF_ADD) {
+ pr_err("bpf-jit: not supported: atomic operation %02x ***\n",
+ insn->imm);
+ return -EINVAL;
+ }
+
+ /* atomic_add: lock *(u32 *)(dst + off) += src
+ * atomic_add: lock *(u64 *)(dst + off) += src
+ */
+
if (off) {
if (is_12b_int(off)) {
emit_addi(RV_REG_T1, rd, off, ctx);
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index c72874f09741..c1ff874e6c2e 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -58,6 +58,7 @@ config S390
# Note: keep this list sorted alphabetically
#
imply IMA_SECURE_AND_OR_TRUSTED_BOOT
+ select ARCH_32BIT_USTAT_F_TINODE
select ARCH_BINFMT_ELF_STATE
select ARCH_HAS_DEBUG_VM_PGTABLE
select ARCH_HAS_DEBUG_WX
@@ -123,11 +124,13 @@ config S390
select GENERIC_ALLOCATOR
select GENERIC_CPU_AUTOPROBE
select GENERIC_CPU_VULNERABILITIES
+ select GENERIC_ENTRY
select GENERIC_FIND_FIRST_BIT
select GENERIC_GETTIMEOFDAY
select GENERIC_PTDUMP
select GENERIC_SMP_IDLE_THREAD
select GENERIC_TIME_VSYSCALL
+ select GENERIC_VDSO_TIME_NS
select HAVE_ALIGNED_STRUCT_PAGE if SLUB
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_JUMP_LABEL
@@ -174,7 +177,6 @@ config S390
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI
select HAVE_NOP_MCOUNT
- select HAVE_OPROFILE
select HAVE_PCI
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
@@ -182,6 +184,7 @@ config S390
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RELIABLE_STACKTRACE
select HAVE_RSEQ
+ select HAVE_SOFTIRQ_ON_OWN_STACK
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_VIRT_CPU_ACCOUNTING
select HAVE_VIRT_CPU_ACCOUNTING_IDLE
@@ -426,7 +429,6 @@ config 64BIT
config COMPAT
def_bool y
prompt "Kernel support for 31 bit emulation"
- select COMPAT_BINFMT_ELF if BINFMT_ELF
select ARCH_WANT_OLD_COMPAT_IPC
select COMPAT_OLD_SIGACTION
select HAVE_UID16
diff --git a/arch/s390/Kconfig.debug b/arch/s390/Kconfig.debug
index 6bfaceebbbc0..ef96c25fa921 100644
--- a/arch/s390/Kconfig.debug
+++ b/arch/s390/Kconfig.debug
@@ -6,10 +6,12 @@ config TRACE_IRQFLAGS_SUPPORT
config EARLY_PRINTK
def_bool y
-config DEBUG_USER_ASCE
- bool "Debug User ASCE"
+config DEBUG_ENTRY
+ bool "Debug low-level entry code"
+ depends on DEBUG_KERNEL
help
- Check on exit to user space that address space control
- elements are setup correctly.
+ This option enables sanity checks in s390 low-level entry code.
+ Some of these sanity checks may slow down kernel entries and
+ exits or otherwise impact performance.
If unsure, say N.
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index 8db267d2a543..e443ed9947bd 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -134,9 +134,6 @@ core-y += arch/s390/
libs-y += arch/s390/lib/
drivers-y += drivers/s390/
-# must be linked after kernel
-drivers-$(CONFIG_OPROFILE) += arch/s390/oprofile/
-
boot := arch/s390/boot
syscalls := arch/s390/kernel/syscalls
tools := arch/s390/tools
diff --git a/arch/s390/boot/uv.c b/arch/s390/boot/uv.c
index a15c033f53ca..87641dd65ccf 100644
--- a/arch/s390/boot/uv.c
+++ b/arch/s390/boot/uv.c
@@ -35,7 +35,7 @@ void uv_query_info(void)
uv_info.guest_cpu_stor_len = uvcb.cpu_stor_len;
uv_info.max_sec_stor_addr = ALIGN(uvcb.max_guest_stor_addr, PAGE_SIZE);
uv_info.max_num_sec_conf = uvcb.max_num_sec_conf;
- uv_info.max_guest_cpus = uvcb.max_guest_cpus;
+ uv_info.max_guest_cpu_id = uvcb.max_guest_cpu_id;
}
#ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig
index c4f6ff98a612..02056b024091 100644
--- a/arch/s390/configs/debug_defconfig
+++ b/arch/s390/configs/debug_defconfig
@@ -40,6 +40,7 @@ CONFIG_USERFAULTFD=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
CONFIG_LIVEPATCH=y
+CONFIG_MARCH_ZEC12=y
CONFIG_TUNE_ZEC12=y
CONFIG_NR_CPUS=512
CONFIG_NUMA=y
@@ -57,7 +58,6 @@ CONFIG_CMM=m
CONFIG_APPLDATA_BASE=y
CONFIG_KVM=m
CONFIG_S390_UNWIND_SELFTEST=y
-CONFIG_OPROFILE=m
CONFIG_KPROBES=y
CONFIG_JUMP_LABEL=y
CONFIG_STATIC_KEYS_SELFTEST=y
@@ -71,7 +71,6 @@ CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
CONFIG_MODULE_SIG_SHA256=y
-CONFIG_UNUSED_SYMBOLS=y
CONFIG_BLK_DEV_INTEGRITY=y
CONFIG_BLK_DEV_THROTTLING=y
CONFIG_BLK_WBT=y
@@ -177,13 +176,17 @@ CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_CT_NETLINK=m
CONFIG_NF_CT_NETLINK_TIMEOUT=m
CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_INET=y
CONFIG_NFT_CT=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NFT_NAT=m
+CONFIG_NFT_OBJREF=m
+CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
+CONFIG_NFT_FIB_INET=m
CONFIG_NETFILTER_XT_SET=m
CONFIG_NETFILTER_XT_TARGET_AUDIT=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
@@ -275,6 +278,7 @@ CONFIG_IP_VS_NQ=m
CONFIG_IP_VS_FTP=m
CONFIG_IP_VS_PE_SIP=m
CONFIG_NF_TABLES_IPV4=y
+CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
@@ -295,6 +299,7 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_TABLES_IPV6=y
+CONFIG_NFT_FIB_IPV6=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_EUI64=m
@@ -630,7 +635,6 @@ CONFIG_NTFS_RW=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
-CONFIG_TMPFS_INODE64=y
CONFIG_HUGETLBFS=y
CONFIG_CONFIGFS_FS=m
CONFIG_ECRYPT_FS=m
@@ -792,6 +796,8 @@ CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y
CONFIG_SLUB_DEBUG_ON=y
CONFIG_SLUB_STATS=y
+CONFIG_DEBUG_KMEMLEAK=y
+CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y
CONFIG_DEBUG_STACK_USAGE=y
CONFIG_DEBUG_VM=y
CONFIG_DEBUG_VM_VMACACHE=y
@@ -832,7 +838,6 @@ CONFIG_BPF_KPROBE_OVERRIDE=y
CONFIG_HIST_TRIGGERS=y
CONFIG_FTRACE_STARTUP_TEST=y
# CONFIG_EVENT_TRACE_STARTUP_TEST is not set
-CONFIG_DEBUG_USER_ASCE=y
CONFIG_NOTIFIER_ERROR_INJECTION=m
CONFIG_NETDEV_NOTIFIER_ERROR_INJECT=m
CONFIG_FAULT_INJECTION=y
@@ -856,3 +861,4 @@ CONFIG_PERCPU_TEST=m
CONFIG_ATOMIC64_SELFTEST=y
CONFIG_TEST_BITOPS=m
CONFIG_TEST_BPF=m
+CONFIG_DEBUG_ENTRY=y
diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig
index 51135893cffe..bac721a501da 100644
--- a/arch/s390/configs/defconfig
+++ b/arch/s390/configs/defconfig
@@ -38,6 +38,7 @@ CONFIG_USERFAULTFD=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
CONFIG_LIVEPATCH=y
+CONFIG_MARCH_ZEC12=y
CONFIG_TUNE_ZEC12=y
CONFIG_NR_CPUS=512
CONFIG_NUMA=y
@@ -55,7 +56,6 @@ CONFIG_CMM=m
CONFIG_APPLDATA_BASE=y
CONFIG_KVM=m
CONFIG_S390_UNWIND_SELFTEST=m
-CONFIG_OPROFILE=m
CONFIG_KPROBES=y
CONFIG_JUMP_LABEL=y
# CONFIG_GCC_PLUGINS is not set
@@ -66,7 +66,6 @@ CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
CONFIG_MODULE_SIG_SHA256=y
-CONFIG_UNUSED_SYMBOLS=y
CONFIG_BLK_DEV_THROTTLING=y
CONFIG_BLK_WBT=y
CONFIG_BLK_CGROUP_IOLATENCY=y
@@ -168,13 +167,17 @@ CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_CT_NETLINK=m
CONFIG_NF_CT_NETLINK_TIMEOUT=m
CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_INET=y
CONFIG_NFT_CT=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NFT_NAT=m
+CONFIG_NFT_OBJREF=m
+CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
+CONFIG_NFT_FIB_INET=m
CONFIG_NETFILTER_XT_SET=m
CONFIG_NETFILTER_XT_TARGET_AUDIT=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
@@ -266,6 +269,7 @@ CONFIG_IP_VS_NQ=m
CONFIG_IP_VS_FTP=m
CONFIG_IP_VS_PE_SIP=m
CONFIG_NF_TABLES_IPV4=y
+CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
@@ -286,6 +290,7 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_TABLES_IPV6=y
+CONFIG_NFT_FIB_IPV6=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_EUI64=m
@@ -618,7 +623,6 @@ CONFIG_NTFS_RW=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
-CONFIG_TMPFS_INODE64=y
CONFIG_HUGETLBFS=y
CONFIG_CONFIGFS_FS=m
CONFIG_ECRYPT_FS=m
@@ -780,7 +784,6 @@ CONFIG_FTRACE_SYSCALLS=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_BPF_KPROBE_OVERRIDE=y
CONFIG_HIST_TRIGGERS=y
-CONFIG_DEBUG_USER_ASCE=y
CONFIG_LKDTM=m
CONFIG_PERCPU_TEST=m
CONFIG_ATOMIC64_SELFTEST=y
diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig
index 1ef211dae77a..acf982a2ae4c 100644
--- a/arch/s390/configs/zfcpdump_defconfig
+++ b/arch/s390/configs/zfcpdump_defconfig
@@ -3,11 +3,13 @@ CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
# CONFIG_CPU_ISOLATION is not set
# CONFIG_UTS_NS is not set
+# CONFIG_TIME_NS is not set
# CONFIG_PID_NS is not set
# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
# CONFIG_COMPAT_BRK is not set
+CONFIG_MARCH_ZEC12=y
CONFIG_TUNE_ZEC12=y
# CONFIG_COMPAT is not set
CONFIG_NR_CPUS=2
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index 73044634d342..54c7536f2482 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -21,6 +21,7 @@
#include <crypto/algapi.h>
#include <crypto/ghash.h>
#include <crypto/internal/aead.h>
+#include <crypto/internal/cipher.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include <linux/err.h>
@@ -1055,3 +1056,4 @@ MODULE_ALIAS_CRYPTO("aes-all");
MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(CRYPTO_INTERNAL);
diff --git a/arch/s390/crypto/paes_s390.c b/arch/s390/crypto/paes_s390.c
index f3caeb17c85b..a279b7d23a5e 100644
--- a/arch/s390/crypto/paes_s390.c
+++ b/arch/s390/crypto/paes_s390.c
@@ -22,6 +22,7 @@
#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
+#include <linux/delay.h>
#include <crypto/internal/skcipher.h>
#include <crypto/xts.h>
#include <asm/cpacf.h>
@@ -128,6 +129,9 @@ static inline int __paes_keyblob2pkey(struct key_blob *kb,
/* try three times in case of failure */
for (i = 0; i < 3; i++) {
+ if (i > 0 && ret == -EAGAIN && in_task())
+ if (msleep_interruptible(1000))
+ return -EINTR;
ret = pkey_keyblob2pkey(kb->key, kb->keylen, pk);
if (ret == 0)
break;
@@ -138,10 +142,12 @@ static inline int __paes_keyblob2pkey(struct key_blob *kb,
static inline int __paes_convert_key(struct s390_paes_ctx *ctx)
{
+ int ret;
struct pkey_protkey pkey;
- if (__paes_keyblob2pkey(&ctx->kb, &pkey))
- return -EINVAL;
+ ret = __paes_keyblob2pkey(&ctx->kb, &pkey);
+ if (ret)
+ return ret;
spin_lock_bh(&ctx->pk_lock);
memcpy(&ctx->pk, &pkey, sizeof(pkey));
@@ -169,10 +175,12 @@ static void ecb_paes_exit(struct crypto_skcipher *tfm)
static inline int __ecb_paes_set_key(struct s390_paes_ctx *ctx)
{
+ int rc;
unsigned long fc;
- if (__paes_convert_key(ctx))
- return -EINVAL;
+ rc = __paes_convert_key(ctx);
+ if (rc)
+ return rc;
/* Pick the correct function code based on the protected key type */
fc = (ctx->pk.type == PKEY_KEYTYPE_AES_128) ? CPACF_KM_PAES_128 :
@@ -282,10 +290,12 @@ static void cbc_paes_exit(struct crypto_skcipher *tfm)
static inline int __cbc_paes_set_key(struct s390_paes_ctx *ctx)
{
+ int rc;
unsigned long fc;
- if (__paes_convert_key(ctx))
- return -EINVAL;
+ rc = __paes_convert_key(ctx);
+ if (rc)
+ return rc;
/* Pick the correct function code based on the protected key type */
fc = (ctx->pk.type == PKEY_KEYTYPE_AES_128) ? CPACF_KMC_PAES_128 :
@@ -577,10 +587,12 @@ static void ctr_paes_exit(struct crypto_skcipher *tfm)
static inline int __ctr_paes_set_key(struct s390_paes_ctx *ctx)
{
+ int rc;
unsigned long fc;
- if (__paes_convert_key(ctx))
- return -EINVAL;
+ rc = __paes_convert_key(ctx);
+ if (rc)
+ return rc;
/* Pick the correct function code based on the protected key type */
fc = (ctx->pk.type == PKEY_KEYTYPE_AES_128) ? CPACF_KMCTR_PAES_128 :
diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c
index b2f219ec379c..234d791ca59d 100644
--- a/arch/s390/crypto/prng.c
+++ b/arch/s390/crypto/prng.c
@@ -414,7 +414,7 @@ static int __init prng_sha512_instantiate(void)
}
/* append the seed by 16 bytes of unique nonce */
- get_tod_clock_ext(seed + seedlen);
+ store_tod_clock_ext((union tod_clock *)(seed + seedlen));
seedlen += 16;
/* now initial seed of the prno drng */
diff --git a/arch/s390/hypfs/hypfs_diag0c.c b/arch/s390/hypfs/hypfs_diag0c.c
index 3235e4d82f2d..6c43d2ba2079 100644
--- a/arch/s390/hypfs/hypfs_diag0c.c
+++ b/arch/s390/hypfs/hypfs_diag0c.c
@@ -84,7 +84,7 @@ static int dbfs_diag0c_create(void **data, void **data_free_ptr, size_t *size)
if (IS_ERR(diag0c_data))
return PTR_ERR(diag0c_data);
memset(&diag0c_data->hdr, 0, sizeof(diag0c_data->hdr));
- get_tod_clock_ext(diag0c_data->hdr.tod_ext);
+ store_tod_clock_ext((union tod_clock *)diag0c_data->hdr.tod_ext);
diag0c_data->hdr.len = count * sizeof(struct hypfs_diag0c_entry);
diag0c_data->hdr.version = DBFS_D0C_HDR_VERSION;
diag0c_data->hdr.count = count;
diff --git a/arch/s390/hypfs/hypfs_vm.c b/arch/s390/hypfs/hypfs_vm.c
index e1fcc03159ef..33f973ff9744 100644
--- a/arch/s390/hypfs/hypfs_vm.c
+++ b/arch/s390/hypfs/hypfs_vm.c
@@ -234,7 +234,7 @@ failed:
struct dbfs_d2fc_hdr {
u64 len; /* Length of d2fc buffer without header */
u16 version; /* Version of header */
- char tod_ext[STORE_CLOCK_EXT_SIZE]; /* TOD clock for d2fc */
+ union tod_clock tod_ext; /* TOD clock for d2fc */
u64 count; /* Number of VM guests in d2fc buffer */
char reserved[30];
} __attribute__ ((packed));
@@ -252,7 +252,7 @@ static int dbfs_diag2fc_create(void **data, void **data_free_ptr, size_t *size)
d2fc = diag2fc_store(guest_query, &count, sizeof(d2fc->hdr));
if (IS_ERR(d2fc))
return PTR_ERR(d2fc);
- get_tod_clock_ext(d2fc->hdr.tod_ext);
+ store_tod_clock_ext(&d2fc->hdr.tod_ext);
d2fc->hdr.len = count * sizeof(struct diag2fc_data);
d2fc->hdr.version = DBFS_D2FC_HDR_VERSION;
d2fc->hdr.count = count;
diff --git a/arch/s390/include/asm/alternative.h b/arch/s390/include/asm/alternative.h
index 1c8a38f762a3..d3880ca764ee 100644
--- a/arch/s390/include/asm/alternative.h
+++ b/arch/s390/include/asm/alternative.h
@@ -145,6 +145,22 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
asm_inline volatile(ALTERNATIVE_2(oldinstr, altinstr1, facility1, \
altinstr2, facility2) ::: "memory")
+/* Alternative inline assembly with input. */
+#define alternative_input(oldinstr, newinstr, feature, input...) \
+ asm_inline volatile (ALTERNATIVE(oldinstr, newinstr, feature) \
+ : : input)
+
+/* Like alternative_input, but with a single output argument */
+#define alternative_io(oldinstr, altinstr, facility, output, input...) \
+ asm_inline volatile(ALTERNATIVE(oldinstr, altinstr, facility) \
+ : output : input)
+
+/* Use this macro if more than one output parameter is needed. */
+#define ASM_OUTPUT2(a...) a
+
+/* Use this macro if clobbers are needed without inputs. */
+#define ASM_NO_INPUT_CLOBBER(clobber...) : clobber
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_S390_ALTERNATIVE_H */
diff --git a/arch/s390/include/asm/ap.h b/arch/s390/include/asm/ap.h
index aea32dda3d14..837d1699b109 100644
--- a/arch/s390/include/asm/ap.h
+++ b/arch/s390/include/asm/ap.h
@@ -368,7 +368,7 @@ static inline struct ap_queue_status ap_dqap(ap_qid_t qid,
#if IS_ENABLED(CONFIG_ZCRYPT)
void ap_bus_cfg_chg(void);
#else
-static inline void ap_bus_cfg_chg(void){};
+static inline void ap_bus_cfg_chg(void){}
#endif
#endif /* _ASM_S390_AP_H_ */
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index 11c5952e1afa..5860ae790f2d 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -44,16 +44,6 @@ static inline int atomic_fetch_add(int i, atomic_t *v)
static inline void atomic_add(int i, atomic_t *v)
{
-#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
- /*
- * Order of conditions is important to circumvent gcc 10 bug:
- * https://gcc.gnu.org/pipermail/gcc-patches/2020-July/549318.html
- */
- if ((i > -129) && (i < 128) && __builtin_constant_p(i)) {
- __atomic_add_const(i, &v->counter);
- return;
- }
-#endif
__atomic_add(i, &v->counter);
}
@@ -115,16 +105,6 @@ static inline s64 atomic64_fetch_add(s64 i, atomic64_t *v)
static inline void atomic64_add(s64 i, atomic64_t *v)
{
-#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
- /*
- * Order of conditions is important to circumvent gcc 10 bug:
- * https://gcc.gnu.org/pipermail/gcc-patches/2020-July/549318.html
- */
- if ((i > -129) && (i < 128) && __builtin_constant_p(i)) {
- __atomic64_add_const(i, (long *)&v->counter);
- return;
- }
-#endif
__atomic64_add(i, (long *)&v->counter);
}
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index 431e208a5ea4..31121d32f81d 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -61,18 +61,6 @@ static __always_inline void arch_set_bit(unsigned long nr, volatile unsigned lon
unsigned long *addr = __bitops_word(nr, ptr);
unsigned long mask;
-#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
- if (__builtin_constant_p(nr)) {
- unsigned char *caddr = __bitops_byte(nr, ptr);
-
- asm volatile(
- "oi %0,%b1\n"
- : "+Q" (*caddr)
- : "i" (1 << (nr & 7))
- : "cc", "memory");
- return;
- }
-#endif
mask = 1UL << (nr & (BITS_PER_LONG - 1));
__atomic64_or(mask, (long *)addr);
}
@@ -82,18 +70,6 @@ static __always_inline void arch_clear_bit(unsigned long nr, volatile unsigned l
unsigned long *addr = __bitops_word(nr, ptr);
unsigned long mask;
-#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
- if (__builtin_constant_p(nr)) {
- unsigned char *caddr = __bitops_byte(nr, ptr);
-
- asm volatile(
- "ni %0,%b1\n"
- : "+Q" (*caddr)
- : "i" (~(1 << (nr & 7)))
- : "cc", "memory");
- return;
- }
-#endif
mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
__atomic64_and(mask, (long *)addr);
}
@@ -104,18 +80,6 @@ static __always_inline void arch_change_bit(unsigned long nr,
unsigned long *addr = __bitops_word(nr, ptr);
unsigned long mask;
-#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
- if (__builtin_constant_p(nr)) {
- unsigned char *caddr = __bitops_byte(nr, ptr);
-
- asm volatile(
- "xi %0,%b1\n"
- : "+Q" (*caddr)
- : "i" (1 << (nr & 7))
- : "cc", "memory");
- return;
- }
-#endif
mask = 1UL << (nr & (BITS_PER_LONG - 1));
__atomic64_xor(mask, (long *)addr);
}
diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h
index cb729d111e20..1d389847b588 100644
--- a/arch/s390/include/asm/cputime.h
+++ b/arch/s390/include/asm/cputime.h
@@ -35,4 +35,6 @@ u64 arch_cpu_idle_time(int cpu);
#define arch_idle_time(cpu) arch_cpu_idle_time(cpu)
+void account_idle_time_irq(void);
+
#endif /* _S390_CPUTIME_H */
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index 5775fc22f410..66d51ad090ab 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -233,8 +233,7 @@ extern char elf_platform[];
do { \
set_personality(PER_LINUX | \
(current->personality & (~PER_MASK))); \
- current->thread.sys_call_table = \
- (unsigned long) &sys_call_table; \
+ current->thread.sys_call_table = sys_call_table; \
} while (0)
#else /* CONFIG_COMPAT */
#define SET_PERSONALITY(ex) \
@@ -245,11 +244,11 @@ do { \
if ((ex).e_ident[EI_CLASS] == ELFCLASS32) { \
set_thread_flag(TIF_31BIT); \
current->thread.sys_call_table = \
- (unsigned long) &sys_call_table_emu; \
+ sys_call_table_emu; \
} else { \
clear_thread_flag(TIF_31BIT); \
current->thread.sys_call_table = \
- (unsigned long) &sys_call_table; \
+ sys_call_table; \
} \
} while (0)
#endif /* CONFIG_COMPAT */
diff --git a/arch/s390/include/asm/entry-common.h b/arch/s390/include/asm/entry-common.h
new file mode 100644
index 000000000000..75cebc80474e
--- /dev/null
+++ b/arch/s390/include/asm/entry-common.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef ARCH_S390_ENTRY_COMMON_H
+#define ARCH_S390_ENTRY_COMMON_H
+
+#include <linux/sched.h>
+#include <linux/audit.h>
+#include <linux/tracehook.h>
+#include <linux/processor.h>
+#include <linux/uaccess.h>
+#include <asm/fpu/api.h>
+
+#define ARCH_EXIT_TO_USER_MODE_WORK (_TIF_GUARDED_STORAGE | _TIF_PER_TRAP)
+
+void do_per_trap(struct pt_regs *regs);
+void do_syscall(struct pt_regs *regs);
+
+typedef void (*pgm_check_func)(struct pt_regs *regs);
+
+extern pgm_check_func pgm_check_table[128];
+
+#ifdef CONFIG_DEBUG_ENTRY
+static __always_inline void arch_check_user_regs(struct pt_regs *regs)
+{
+ debug_user_asce(0);
+}
+
+#define arch_check_user_regs arch_check_user_regs
+#endif /* CONFIG_DEBUG_ENTRY */
+
+static __always_inline void arch_exit_to_user_mode_work(struct pt_regs *regs,
+ unsigned long ti_work)
+{
+ if (ti_work & _TIF_PER_TRAP) {
+ clear_thread_flag(TIF_PER_TRAP);
+ do_per_trap(regs);
+ }
+
+ if (ti_work & _TIF_GUARDED_STORAGE)
+ gs_load_bc_cb(regs);
+}
+
+#define arch_exit_to_user_mode_work arch_exit_to_user_mode_work
+
+static __always_inline void arch_exit_to_user_mode(void)
+{
+ if (test_cpu_flag(CIF_FPU))
+ __load_fpu_regs();
+
+ if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
+ debug_user_asce(1);
+}
+
+#define arch_exit_to_user_mode arch_exit_to_user_mode
+
+static inline bool on_thread_stack(void)
+{
+ return !(((unsigned long)(current->stack) ^ current_stack_pointer()) & ~(THREAD_SIZE - 1));
+}
+
+#endif
diff --git a/arch/s390/include/asm/facility.h b/arch/s390/include/asm/facility.h
index 68c476b20b57..91b5d714d28f 100644
--- a/arch/s390/include/asm/facility.h
+++ b/arch/s390/include/asm/facility.h
@@ -44,7 +44,7 @@ static inline int __test_facility(unsigned long nr, void *facilities)
}
/*
- * The test_facility function uses the bit odering where the MSB is bit 0.
+ * The test_facility function uses the bit ordering where the MSB is bit 0.
* That makes it easier to query facility bits with the bit number as
* documented in the Principles of Operation.
*/
diff --git a/arch/s390/include/asm/fpu/api.h b/arch/s390/include/asm/fpu/api.h
index 34a7ae68485c..a959b815a58b 100644
--- a/arch/s390/include/asm/fpu/api.h
+++ b/arch/s390/include/asm/fpu/api.h
@@ -47,6 +47,8 @@
#include <linux/preempt.h>
void save_fpu_regs(void);
+void load_fpu_regs(void);
+void __load_fpu_regs(void);
static inline int test_fp_ctl(u32 fpc)
{
diff --git a/arch/s390/include/asm/hardirq.h b/arch/s390/include/asm/hardirq.h
index dfbc3c6c0674..58668ffb5488 100644
--- a/arch/s390/include/asm/hardirq.h
+++ b/arch/s390/include/asm/hardirq.h
@@ -18,7 +18,6 @@
#define or_softirq_pending(x) (S390_lowcore.softirq_pending |= (x))
#define __ARCH_IRQ_STAT
-#define __ARCH_HAS_DO_SOFTIRQ
#define __ARCH_IRQ_EXIT_IRQS_DISABLED
static inline void ack_bad_irq(unsigned int irq)
diff --git a/arch/s390/include/asm/idle.h b/arch/s390/include/asm/idle.h
index 6d4226dcf42a..b04f6a794cdf 100644
--- a/arch/s390/include/asm/idle.h
+++ b/arch/s390/include/asm/idle.h
@@ -20,11 +20,13 @@ struct s390_idle_data {
unsigned long long clock_idle_exit;
unsigned long long timer_idle_enter;
unsigned long long timer_idle_exit;
+ unsigned long mt_cycles_enter[8];
};
extern struct device_attribute dev_attr_idle_count;
extern struct device_attribute dev_attr_idle_time_us;
-void psw_idle(struct s390_idle_data *, unsigned long);
+void psw_idle(struct s390_idle_data *data, unsigned long psw_mask);
+void psw_idle_exit(void);
#endif /* _S390_IDLE_H */
diff --git a/arch/s390/include/asm/irq_work.h b/arch/s390/include/asm/irq_work.h
new file mode 100644
index 000000000000..603783766d0a
--- /dev/null
+++ b/arch/s390/include/asm/irq_work.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_S390_IRQ_WORK_H
+#define _ASM_S390_IRQ_WORK_H
+
+static inline bool arch_irq_work_has_interrupt(void)
+{
+ return true;
+}
+
+void arch_irq_work_raise(void);
+
+#endif /* _ASM_S390_IRQ_WORK_H */
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 74f9a036bab2..6bcfc5614bbc 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -28,7 +28,6 @@
#define KVM_S390_BSCA_CPU_SLOTS 64
#define KVM_S390_ESCA_CPU_SLOTS 248
#define KVM_MAX_VCPUS 255
-#define KVM_USER_MEM_SLOTS 32
/*
* These seem to be used for allocating ->chip in the routing table, which we
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 69ce9191eaf1..22bceeeba4bc 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -81,8 +81,8 @@ struct lowcore {
psw_t return_mcck_psw; /* 0x02a0 */
/* CPU accounting and timing values. */
- __u64 sync_enter_timer; /* 0x02b0 */
- __u64 async_enter_timer; /* 0x02b8 */
+ __u64 sys_enter_timer; /* 0x02b0 */
+ __u8 pad_0x02b8[0x02c0-0x02b8]; /* 0x02b8 */
__u64 mcck_enter_timer; /* 0x02c0 */
__u64 exit_timer; /* 0x02c8 */
__u64 user_timer; /* 0x02d0 */
@@ -107,16 +107,15 @@ struct lowcore {
__u64 async_stack; /* 0x0350 */
__u64 nodat_stack; /* 0x0358 */
__u64 restart_stack; /* 0x0360 */
-
+ __u64 mcck_stack; /* 0x0368 */
/* Restart function and parameter. */
- __u64 restart_fn; /* 0x0368 */
- __u64 restart_data; /* 0x0370 */
- __u64 restart_source; /* 0x0378 */
+ __u64 restart_fn; /* 0x0370 */
+ __u64 restart_data; /* 0x0378 */
+ __u64 restart_source; /* 0x0380 */
/* Address space pointer. */
- __u64 kernel_asce; /* 0x0380 */
- __u64 user_asce; /* 0x0388 */
- __u8 pad_0x0390[0x0398-0x0390]; /* 0x0390 */
+ __u64 kernel_asce; /* 0x0388 */
+ __u64 user_asce; /* 0x0390 */
/*
* The lpp and current_pid fields form a
diff --git a/arch/s390/include/asm/nmi.h b/arch/s390/include/asm/nmi.h
index 5afee80cff58..20e51c9ff240 100644
--- a/arch/s390/include/asm/nmi.h
+++ b/arch/s390/include/asm/nmi.h
@@ -99,6 +99,7 @@ int nmi_alloc_per_cpu(struct lowcore *lc);
void nmi_free_per_cpu(struct lowcore *lc);
void s390_handle_mcck(void);
+void __s390_handle_mcck(void);
int s390_do_machine_check(struct pt_regs *regs);
#endif /* __ASSEMBLY__ */
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index 212628932ddc..053fe8b8dec7 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -201,7 +201,7 @@ extern unsigned int s390_pci_no_rid;
Prototypes
----------------------------------------------------------------------------- */
/* Base stuff */
-int zpci_create_device(struct zpci_dev *);
+int zpci_create_device(u32 fid, u32 fh, enum zpci_state state);
void zpci_remove_device(struct zpci_dev *zdev);
int zpci_enable_device(struct zpci_dev *);
int zpci_disable_device(struct zpci_dev *);
@@ -212,7 +212,7 @@ void zpci_remove_reserved_devices(void);
/* CLP */
int clp_setup_writeback_mio(void);
int clp_scan_pci_devices(void);
-int clp_add_pci_device(u32, u32, int);
+int clp_query_pci_fn(struct zpci_dev *zdev);
int clp_enable_fh(struct zpci_dev *, u8);
int clp_disable_fh(struct zpci_dev *);
int clp_get_state(u32 fid, enum zpci_state *state);
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index d1297d6bbdcf..6b187cd72251 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -135,7 +135,7 @@ static inline void pmd_populate(struct mm_struct *mm,
#define pmd_populate_kernel(mm, pmd, pte) pmd_populate(mm, pmd, pte)
#define pmd_pgtable(pmd) \
- (pgtable_t)(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE)
+ ((pgtable_t)__va(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE))
/*
* page table entry allocation/free routines.
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 794746a32806..29c7ecd5ad1d 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -1219,8 +1219,8 @@ static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
-#define p4d_deref(pud) (p4d_val(pud) & _REGION_ENTRY_ORIGIN)
-#define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
+#define p4d_deref(pud) ((unsigned long)__va(p4d_val(pud) & _REGION_ENTRY_ORIGIN))
+#define pgd_deref(pgd) ((unsigned long)__va(pgd_val(pgd) & _REGION_ENTRY_ORIGIN))
static inline unsigned long pmd_deref(pmd_t pmd)
{
@@ -1229,12 +1229,12 @@ static inline unsigned long pmd_deref(pmd_t pmd)
origin_mask = _SEGMENT_ENTRY_ORIGIN;
if (pmd_large(pmd))
origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
- return pmd_val(pmd) & origin_mask;
+ return (unsigned long)__va(pmd_val(pmd) & origin_mask);
}
static inline unsigned long pmd_pfn(pmd_t pmd)
{
- return pmd_deref(pmd) >> PAGE_SHIFT;
+ return __pa(pmd_deref(pmd)) >> PAGE_SHIFT;
}
static inline unsigned long pud_deref(pud_t pud)
@@ -1244,12 +1244,12 @@ static inline unsigned long pud_deref(pud_t pud)
origin_mask = _REGION_ENTRY_ORIGIN;
if (pud_large(pud))
origin_mask = _REGION3_ENTRY_ORIGIN_LARGE;
- return pud_val(pud) & origin_mask;
+ return (unsigned long)__va(pud_val(pud) & origin_mask);
}
static inline unsigned long pud_pfn(pud_t pud)
{
- return pud_deref(pud) >> PAGE_SHIFT;
+ return __pa(pud_deref(pud)) >> PAGE_SHIFT;
}
/*
@@ -1329,7 +1329,7 @@ static inline bool gup_fast_permitted(unsigned long start, unsigned long end)
}
#define gup_fast_permitted gup_fast_permitted
-#define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
+#define pfn_pte(pfn, pgprot) mk_pte_phys(((pfn) << PAGE_SHIFT), (pgprot))
#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
#define pte_page(x) pfn_to_page(pte_pfn(x))
@@ -1636,7 +1636,7 @@ static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
}
#define pmdp_collapse_flush pmdp_collapse_flush
-#define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot))
+#define pfn_pmd(pfn, pgprot) mk_pmd_phys(((pfn) << PAGE_SHIFT), (pgprot))
#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
static inline int pmd_trans_huge(pmd_t pmd)
diff --git a/arch/s390/include/asm/preempt.h b/arch/s390/include/asm/preempt.h
index 6ede29907fbf..b49e0492842c 100644
--- a/arch/s390/include/asm/preempt.h
+++ b/arch/s390/include/asm/preempt.h
@@ -131,9 +131,9 @@ static inline bool should_resched(int preempt_offset)
#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
#ifdef CONFIG_PREEMPTION
-extern asmlinkage void preempt_schedule(void);
+extern void preempt_schedule(void);
#define __preempt_schedule() preempt_schedule()
-extern asmlinkage void preempt_schedule_notrace(void);
+extern void preempt_schedule_notrace(void);
#define __preempt_schedule_notrace() preempt_schedule_notrace()
#endif /* CONFIG_PREEMPTION */
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 2058a435add4..023a15dc25a3 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -38,6 +38,9 @@
#include <asm/runtime_instr.h>
#include <asm/fpu/types.h>
#include <asm/fpu/internal.h>
+#include <asm/irqflags.h>
+
+typedef long (*sys_call_ptr_t)(struct pt_regs *regs);
static inline void set_cpu_flag(int flag)
{
@@ -101,31 +104,32 @@ extern void __bpon(void);
*/
struct thread_struct {
unsigned int acrs[NUM_ACRS];
- unsigned long ksp; /* kernel stack pointer */
- unsigned long user_timer; /* task cputime in user space */
- unsigned long guest_timer; /* task cputime in kvm guest */
- unsigned long system_timer; /* task cputime in kernel space */
- unsigned long hardirq_timer; /* task cputime in hardirq context */
- unsigned long softirq_timer; /* task cputime in softirq context */
- unsigned long sys_call_table; /* system call table address */
- unsigned long gmap_addr; /* address of last gmap fault. */
- unsigned int gmap_write_flag; /* gmap fault write indication */
- unsigned int gmap_int_code; /* int code of last gmap fault */
- unsigned int gmap_pfault; /* signal of a pending guest pfault */
+ unsigned long ksp; /* kernel stack pointer */
+ unsigned long user_timer; /* task cputime in user space */
+ unsigned long guest_timer; /* task cputime in kvm guest */
+ unsigned long system_timer; /* task cputime in kernel space */
+ unsigned long hardirq_timer; /* task cputime in hardirq context */
+ unsigned long softirq_timer; /* task cputime in softirq context */
+ const sys_call_ptr_t *sys_call_table; /* system call table address */
+ unsigned long gmap_addr; /* address of last gmap fault. */
+ unsigned int gmap_write_flag; /* gmap fault write indication */
+ unsigned int gmap_int_code; /* int code of last gmap fault */
+ unsigned int gmap_pfault; /* signal of a pending guest pfault */
+
/* Per-thread information related to debugging */
- struct per_regs per_user; /* User specified PER registers */
- struct per_event per_event; /* Cause of the last PER trap */
- unsigned long per_flags; /* Flags to control debug behavior */
- unsigned int system_call; /* system call number in signal */
- unsigned long last_break; /* last breaking-event-address. */
- /* pfault_wait is used to block the process on a pfault event */
+ struct per_regs per_user; /* User specified PER registers */
+ struct per_event per_event; /* Cause of the last PER trap */
+ unsigned long per_flags; /* Flags to control debug behavior */
+ unsigned int system_call; /* system call number in signal */
+ unsigned long last_break; /* last breaking-event-address. */
+ /* pfault_wait is used to block the process on a pfault event */
unsigned long pfault_wait;
struct list_head list;
/* cpu runtime instrumentation */
struct runtime_instr_cb *ri_cb;
- struct gs_cb *gs_cb; /* Current guarded storage cb */
- struct gs_cb *gs_bc_cb; /* Broadcast guarded storage cb */
- unsigned char trap_tdb[256]; /* Transaction abort diagnose block */
+ struct gs_cb *gs_cb; /* Current guarded storage cb */
+ struct gs_cb *gs_bc_cb; /* Broadcast guarded storage cb */
+ unsigned char trap_tdb[256]; /* Transaction abort diagnose block */
/*
* Warning: 'fpu' is dynamically-sized. It *MUST* be at
* the end.
@@ -184,6 +188,7 @@ static inline void release_thread(struct task_struct *tsk) { }
/* Free guarded storage control block */
void guarded_storage_release(struct task_struct *tsk);
+void gs_load_bc_cb(struct pt_regs *regs);
unsigned long get_wchan(struct task_struct *p);
#define task_pt_regs(tsk) ((struct pt_regs *) \
@@ -324,6 +329,11 @@ extern void memcpy_absolute(void *, void *, size_t);
extern int s390_isolate_bp(void);
extern int s390_isolate_bp_guest(void);
+static __always_inline bool regs_irqs_disabled(struct pt_regs *regs)
+{
+ return arch_irqs_disabled_flags(regs->psw.mask);
+}
+
#endif /* __ASSEMBLY__ */
#endif /* __ASM_S390_PROCESSOR_H */
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
index 73ca7f7cac33..f828be78937f 100644
--- a/arch/s390/include/asm/ptrace.h
+++ b/arch/s390/include/asm/ptrace.h
@@ -11,13 +11,13 @@
#include <uapi/asm/ptrace.h>
#define PIF_SYSCALL 0 /* inside a system call */
-#define PIF_PER_TRAP 1 /* deliver sigtrap on return to user */
-#define PIF_SYSCALL_RESTART 2 /* restart the current system call */
+#define PIF_SYSCALL_RESTART 1 /* restart the current system call */
+#define PIF_SYSCALL_RET_SET 2 /* return value was set via ptrace */
#define PIF_GUEST_FAULT 3 /* indicates program check in sie64a */
#define _PIF_SYSCALL BIT(PIF_SYSCALL)
-#define _PIF_PER_TRAP BIT(PIF_PER_TRAP)
#define _PIF_SYSCALL_RESTART BIT(PIF_SYSCALL_RESTART)
+#define _PIF_SYSCALL_RET_SET BIT(PIF_SYSCALL_RET_SET)
#define _PIF_GUEST_FAULT BIT(PIF_GUEST_FAULT)
#ifndef __ASSEMBLY__
@@ -68,6 +68,9 @@ enum {
&(*(struct psw_bits *)(&(__psw))); \
}))
+#define PGM_INT_CODE_MASK 0x7f
+#define PGM_INT_CODE_PER 0x80
+
/*
* The pt_regs struct defines the way the registers are stored on
* the stack during a system call.
diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
index 19e84c95d1e7..d9215c7106f0 100644
--- a/arch/s390/include/asm/qdio.h
+++ b/arch/s390/include/asm/qdio.h
@@ -250,17 +250,13 @@ struct slsb {
* struct qdio_outbuf_state - SBAL related asynchronous operation information
* (for communication with upper layer programs)
* (only required for use with completion queues)
- * @flags: flags indicating state of buffer
* @user: pointer to upper layer program's state information related to SBAL
* (stored in user1 data of QAOB)
*/
struct qdio_outbuf_state {
- u8 flags;
void *user;
};
-#define QDIO_OUTBUF_STATE_FLAG_PENDING 0x01
-
#define CHSC_AC1_INITIATE_INPUTQ 0x80
@@ -315,6 +311,7 @@ typedef void qdio_handler_t(struct ccw_device *, unsigned int, int,
#define QDIO_ERROR_GET_BUF_STATE 0x0002
#define QDIO_ERROR_SET_BUF_STATE 0x0004
#define QDIO_ERROR_SLSB_STATE 0x0100
+#define QDIO_ERROR_SLSB_PENDING 0x0200
#define QDIO_ERROR_FATAL 0x00ff
#define QDIO_ERROR_TEMPORARY 0xff00
@@ -336,7 +333,7 @@ typedef void qdio_handler_t(struct ccw_device *, unsigned int, int,
* @no_output_qs: number of output queues
* @input_handler: handler to be called for input queues
* @output_handler: handler to be called for output queues
- * @irq_poll: Data IRQ polling handler (NULL when not supported)
+ * @irq_poll: Data IRQ polling handler
* @scan_threshold: # of in-use buffers that triggers scan on output queue
* @int_parm: interruption parameter
* @input_sbal_addr_array: per-queue array, each element points to 128 SBALs
diff --git a/arch/s390/include/asm/scsw.h b/arch/s390/include/asm/scsw.h
index c00f7b031628..a7c3ccf681da 100644
--- a/arch/s390/include/asm/scsw.h
+++ b/arch/s390/include/asm/scsw.h
@@ -525,8 +525,7 @@ static inline int scsw_cmd_is_valid_pno(union scsw *scsw)
return (scsw->cmd.fctl != 0) &&
(scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) &&
(!(scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) ||
- ((scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) &&
- (scsw->cmd.actl & SCSW_ACTL_SUSPENDED)));
+ (scsw->cmd.actl & SCSW_ACTL_SUSPENDED));
}
/**
diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h
index d9d5de0f67ff..9107e3dab68c 100644
--- a/arch/s390/include/asm/syscall.h
+++ b/arch/s390/include/asm/syscall.h
@@ -14,8 +14,8 @@
#include <linux/err.h>
#include <asm/ptrace.h>
-extern const unsigned long sys_call_table[];
-extern const unsigned long sys_call_table_emu[];
+extern const sys_call_ptr_t sys_call_table[];
+extern const sys_call_ptr_t sys_call_table_emu[];
static inline long syscall_get_nr(struct task_struct *task,
struct pt_regs *regs)
@@ -56,6 +56,7 @@ static inline void syscall_set_return_value(struct task_struct *task,
struct pt_regs *regs,
int error, long val)
{
+ set_pt_regs_flag(regs, PIF_SYSCALL_RET_SET);
regs->gprs[2] = error ? error : val;
}
@@ -97,4 +98,10 @@ static inline int syscall_get_arch(struct task_struct *task)
#endif
return AUDIT_ARCH_S390X;
}
+
+static inline bool arch_syscall_is_vdso_sigreturn(struct pt_regs *regs)
+{
+ return false;
+}
+
#endif /* _ASM_SYSCALL_H */
diff --git a/arch/s390/include/asm/syscall_wrapper.h b/arch/s390/include/asm/syscall_wrapper.h
index 1320f4213d80..ad2c996e7e93 100644
--- a/arch/s390/include/asm/syscall_wrapper.h
+++ b/arch/s390/include/asm/syscall_wrapper.h
@@ -7,6 +7,33 @@
#ifndef _ASM_S390_SYSCALL_WRAPPER_H
#define _ASM_S390_SYSCALL_WRAPPER_H
+#define __SC_TYPE(t, a) t
+
+#define SYSCALL_PT_ARG6(regs, m, t1, t2, t3, t4, t5, t6)\
+ SYSCALL_PT_ARG5(regs, m, t1, t2, t3, t4, t5), \
+ m(t6, (regs->gprs[7]))
+
+#define SYSCALL_PT_ARG5(regs, m, t1, t2, t3, t4, t5) \
+ SYSCALL_PT_ARG4(regs, m, t1, t2, t3, t4), \
+ m(t5, (regs->gprs[6]))
+
+#define SYSCALL_PT_ARG4(regs, m, t1, t2, t3, t4) \
+ SYSCALL_PT_ARG3(regs, m, t1, t2, t3), \
+ m(t4, (regs->gprs[5]))
+
+#define SYSCALL_PT_ARG3(regs, m, t1, t2, t3) \
+ SYSCALL_PT_ARG2(regs, m, t1, t2), \
+ m(t3, (regs->gprs[4]))
+
+#define SYSCALL_PT_ARG2(regs, m, t1, t2) \
+ SYSCALL_PT_ARG1(regs, m, t1), \
+ m(t2, (regs->gprs[3]))
+
+#define SYSCALL_PT_ARG1(regs, m, t1) \
+ m(t1, (regs->orig_gpr2))
+
+#define SYSCALL_PT_ARGS(x, ...) SYSCALL_PT_ARG##x(__VA_ARGS__)
+
#ifdef CONFIG_COMPAT
#define __SC_COMPAT_TYPE(t, a) \
__typeof(__builtin_choose_expr(sizeof(t) > 4, 0L, (t)0)) a
@@ -29,14 +56,15 @@
(t)__ReS; \
})
-#define __S390_SYS_STUBx(x, name, ...) \
- asmlinkage long __s390_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__));\
- ALLOW_ERROR_INJECTION(__s390_sys##name, ERRNO); \
- asmlinkage long __s390_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__))\
- { \
- long ret = __s390x_sys##name(__MAP(x,__SC_COMPAT_CAST,__VA_ARGS__));\
- __MAP(x,__SC_TEST,__VA_ARGS__); \
- return ret; \
+#define __S390_SYS_STUBx(x, name, ...) \
+ long __s390_sys##name(struct pt_regs *regs); \
+ ALLOW_ERROR_INJECTION(__s390_sys##name, ERRNO); \
+ long __s390_sys##name(struct pt_regs *regs) \
+ { \
+ long ret = __do_sys##name(SYSCALL_PT_ARGS(x, regs, \
+ __SC_COMPAT_CAST, __MAP(x, __SC_TYPE, __VA_ARGS__))); \
+ __MAP(x,__SC_TEST,__VA_ARGS__); \
+ return ret; \
}
/*
@@ -45,17 +73,17 @@
*/
#define COMPAT_SYSCALL_DEFINE0(sname) \
SYSCALL_METADATA(_##sname, 0); \
- asmlinkage long __s390_compat_sys_##sname(void); \
+ long __s390_compat_sys_##sname(void); \
ALLOW_ERROR_INJECTION(__s390_compat_sys_##sname, ERRNO); \
- asmlinkage long __s390_compat_sys_##sname(void)
+ long __s390_compat_sys_##sname(void)
#define SYSCALL_DEFINE0(sname) \
SYSCALL_METADATA(_##sname, 0); \
- asmlinkage long __s390x_sys_##sname(void); \
+ long __s390x_sys_##sname(void); \
ALLOW_ERROR_INJECTION(__s390x_sys_##sname, ERRNO); \
- asmlinkage long __s390_sys_##sname(void) \
+ long __s390_sys_##sname(void) \
__attribute__((alias(__stringify(__s390x_sys_##sname)))); \
- asmlinkage long __s390x_sys_##sname(void)
+ long __s390x_sys_##sname(void)
#define COND_SYSCALL(name) \
cond_syscall(__s390x_sys_##name); \
@@ -65,23 +93,24 @@
SYSCALL_ALIAS(__s390x_sys_##name, sys_ni_posix_timers); \
SYSCALL_ALIAS(__s390_sys_##name, sys_ni_posix_timers)
-#define COMPAT_SYSCALL_DEFINEx(x, name, ...) \
- __diag_push(); \
- __diag_ignore(GCC, 8, "-Wattribute-alias", \
- "Type aliasing is used to sanitize syscall arguments");\
- asmlinkage long __s390_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \
- asmlinkage long __s390_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) \
- __attribute__((alias(__stringify(__se_compat_sys##name)))); \
- ALLOW_ERROR_INJECTION(__s390_compat_sys##name, ERRNO); \
- static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__));\
- asmlinkage long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
- asmlinkage long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \
- { \
- long ret = __do_compat_sys##name(__MAP(x,__SC_DELOUSE,__VA_ARGS__));\
- __MAP(x,__SC_TEST,__VA_ARGS__); \
- return ret; \
- } \
- __diag_pop(); \
+#define COMPAT_SYSCALL_DEFINEx(x, name, ...) \
+ __diag_push(); \
+ __diag_ignore(GCC, 8, "-Wattribute-alias", \
+ "Type aliasing is used to sanitize syscall arguments"); \
+ long __s390_compat_sys##name(struct pt_regs *regs); \
+ long __s390_compat_sys##name(struct pt_regs *regs) \
+ __attribute__((alias(__stringify(__se_compat_sys##name)))); \
+ ALLOW_ERROR_INJECTION(__s390_compat_sys##name, ERRNO); \
+ static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \
+ long __se_compat_sys##name(struct pt_regs *regs); \
+ long __se_compat_sys##name(struct pt_regs *regs) \
+ { \
+ long ret = __do_compat_sys##name(SYSCALL_PT_ARGS(x, regs, __SC_DELOUSE, \
+ __MAP(x, __SC_TYPE, __VA_ARGS__))); \
+ __MAP(x,__SC_TEST,__VA_ARGS__); \
+ return ret; \
+ } \
+ __diag_pop(); \
static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
/*
@@ -101,9 +130,9 @@
#define SYSCALL_DEFINE0(sname) \
SYSCALL_METADATA(_##sname, 0); \
- asmlinkage long __s390x_sys_##sname(void); \
+ long __s390x_sys_##sname(void); \
ALLOW_ERROR_INJECTION(__s390x_sys_##sname, ERRNO); \
- asmlinkage long __s390x_sys_##sname(void)
+ long __s390x_sys_##sname(void)
#define COND_SYSCALL(name) \
cond_syscall(__s390x_sys_##name)
@@ -113,23 +142,24 @@
#endif /* CONFIG_COMPAT */
-#define __SYSCALL_DEFINEx(x, name, ...) \
- __diag_push(); \
- __diag_ignore(GCC, 8, "-Wattribute-alias", \
- "Type aliasing is used to sanitize syscall arguments");\
- asmlinkage long __s390x_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) \
- __attribute__((alias(__stringify(__se_sys##name)))); \
- ALLOW_ERROR_INJECTION(__s390x_sys##name, ERRNO); \
- long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
- static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \
- __S390_SYS_STUBx(x, name, __VA_ARGS__) \
- asmlinkage long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \
- { \
- long ret = __do_sys##name(__MAP(x,__SC_CAST,__VA_ARGS__)); \
- __MAP(x,__SC_TEST,__VA_ARGS__); \
- return ret; \
- } \
- __diag_pop(); \
+#define __SYSCALL_DEFINEx(x, name, ...) \
+ __diag_push(); \
+ __diag_ignore(GCC, 8, "-Wattribute-alias", \
+ "Type aliasing is used to sanitize syscall arguments"); \
+ long __s390x_sys##name(struct pt_regs *regs) \
+ __attribute__((alias(__stringify(__se_sys##name)))); \
+ ALLOW_ERROR_INJECTION(__s390x_sys##name, ERRNO); \
+ static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \
+ long __se_sys##name(struct pt_regs *regs); \
+ __S390_SYS_STUBx(x, name, __VA_ARGS__) \
+ long __se_sys##name(struct pt_regs *regs) \
+ { \
+ long ret = __do_sys##name(SYSCALL_PT_ARGS(x, regs, \
+ __SC_CAST, __MAP(x, __SC_TYPE, __VA_ARGS__))); \
+ __MAP(x,__SC_TEST,__VA_ARGS__); \
+ return ret; \
+ } \
+ __diag_pop(); \
static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
#endif /* _ASM_X86_SYSCALL_WRAPPER_H */
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index 3c5b1f909b6d..e6674796aa6f 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -36,6 +36,7 @@
*/
struct thread_info {
unsigned long flags; /* low level flags */
+ unsigned long syscall_work; /* SYSCALL_WORK_ flags */
};
/*
@@ -46,6 +47,8 @@ struct thread_info {
.flags = 0, \
}
+struct task_struct;
+
void arch_release_task_struct(struct task_struct *tsk);
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
@@ -68,6 +71,7 @@ void arch_setup_new_exec(void);
#define TIF_NOTIFY_SIGNAL 7 /* signal notifications exist */
#define TIF_ISOLATE_BP 8 /* Run process with isolated BP */
#define TIF_ISOLATE_BP_GUEST 9 /* Run KVM guests with isolated BP */
+#define TIF_PER_TRAP 10 /* Need to handle PER trap on exit to usermode */
#define TIF_31BIT 16 /* 32bit process */
#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
@@ -91,6 +95,7 @@ void arch_setup_new_exec(void);
#define _TIF_PATCH_PENDING BIT(TIF_PATCH_PENDING)
#define _TIF_ISOLATE_BP BIT(TIF_ISOLATE_BP)
#define _TIF_ISOLATE_BP_GUEST BIT(TIF_ISOLATE_BP_GUEST)
+#define _TIF_PER_TRAP BIT(TIF_PER_TRAP)
#define _TIF_31BIT BIT(TIF_31BIT)
#define _TIF_SINGLE_STEP BIT(TIF_SINGLE_STEP)
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index c8e244ecdfde..c4e23e925665 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -19,6 +19,25 @@
extern u64 clock_comparator_max;
+union tod_clock {
+ __uint128_t val;
+ struct {
+ __uint128_t ei : 8; /* epoch index */
+ __uint128_t tod : 64; /* bits 0-63 of tod clock */
+ __uint128_t : 40;
+ __uint128_t pf : 16; /* programmable field */
+ };
+ struct {
+ __uint128_t eitod : 72; /* epoch index + bits 0-63 tod clock */
+ __uint128_t : 56;
+ };
+ struct {
+ __uint128_t us : 60; /* micro-seconds */
+ __uint128_t sus : 12; /* sub-microseconds */
+ __uint128_t : 56;
+ };
+} __packed;
+
/* Inline functions for clock register access. */
static inline int set_tod_clock(__u64 time)
{
@@ -32,18 +51,23 @@ static inline int set_tod_clock(__u64 time)
return cc;
}
-static inline int store_tod_clock(__u64 *time)
+static inline int store_tod_clock_ext_cc(union tod_clock *clk)
{
int cc;
asm volatile(
- " stck %1\n"
+ " stcke %1\n"
" ipm %0\n"
" srl %0,28\n"
- : "=d" (cc), "=Q" (*time) : : "cc");
+ : "=d" (cc), "=Q" (*clk) : : "cc");
return cc;
}
+static inline void store_tod_clock_ext(union tod_clock *tod)
+{
+ asm volatile("stcke %0" : "=Q" (*tod) : : "cc");
+}
+
static inline void set_clock_comparator(__u64 time)
{
asm volatile("sckc %0" : : "Q" (time));
@@ -144,23 +168,15 @@ static inline void local_tick_enable(unsigned long long comp)
}
#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
-#define STORE_CLOCK_EXT_SIZE 16 /* stcke writes 16 bytes */
typedef unsigned long long cycles_t;
-static inline void get_tod_clock_ext(char *clk)
-{
- typedef struct { char _[STORE_CLOCK_EXT_SIZE]; } addrtype;
-
- asm volatile("stcke %0" : "=Q" (*(addrtype *) clk) : : "cc");
-}
-
static inline unsigned long long get_tod_clock(void)
{
- char clk[STORE_CLOCK_EXT_SIZE];
+ union tod_clock clk;
- get_tod_clock_ext(clk);
- return *((unsigned long long *)&clk[1]);
+ store_tod_clock_ext(&clk);
+ return clk.tod;
}
static inline unsigned long long get_tod_clock_fast(void)
@@ -183,7 +199,7 @@ static inline cycles_t get_cycles(void)
int get_phys_clock(unsigned long *clock);
void init_cpu_timer(void);
-extern unsigned char tod_clock_base[16] __aligned(8);
+extern union tod_clock tod_clock_base;
/**
* get_clock_monotonic - returns current time in clock rate units
@@ -197,7 +213,7 @@ static inline unsigned long long get_tod_clock_monotonic(void)
unsigned long long tod;
preempt_disable_notrace();
- tod = get_tod_clock() - *(unsigned long long *) &tod_clock_base[1];
+ tod = get_tod_clock() - tod_clock_base.tod;
preempt_enable_notrace();
return tod;
}
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index 954fa8ca6cbd..fe6407f0eb1b 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -66,7 +66,7 @@ static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
__tlb_adjust_range(tlb, address, PAGE_SIZE);
tlb->mm->context.flush_mm = 1;
tlb->freed_tables = 1;
- tlb->cleared_ptes = 1;
+ tlb->cleared_pmds = 1;
/*
* page_table_free_rcu takes care of the allocation bit masks
* of the 2K table fragments in the 4K page table page,
@@ -110,7 +110,6 @@ static inline void p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
__tlb_adjust_range(tlb, address, PAGE_SIZE);
tlb->mm->context.flush_mm = 1;
tlb->freed_tables = 1;
- tlb->cleared_p4ds = 1;
tlb_remove_table(tlb, p4d);
}
@@ -128,7 +127,7 @@ static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
return;
tlb->mm->context.flush_mm = 1;
tlb->freed_tables = 1;
- tlb->cleared_puds = 1;
+ tlb->cleared_p4ds = 1;
tlb_remove_table(tlb, pud);
}
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index c6707885e7c2..4756d2937e54 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -18,7 +18,7 @@
#include <asm/extable.h>
#include <asm/facility.h>
-void debug_user_asce(void);
+void debug_user_asce(int exit);
static inline int __range_ok(unsigned long addr, unsigned long size)
{
diff --git a/arch/s390/include/asm/uv.h b/arch/s390/include/asm/uv.h
index 0325fc0469b7..7b98d4caee77 100644
--- a/arch/s390/include/asm/uv.h
+++ b/arch/s390/include/asm/uv.h
@@ -96,7 +96,7 @@ struct uv_cb_qui {
u32 max_num_sec_conf;
u64 max_guest_stor_addr;
u8 reserved88[158 - 136];
- u16 max_guest_cpus;
+ u16 max_guest_cpu_id;
u8 reserveda0[200 - 160];
} __packed __aligned(8);
@@ -273,7 +273,7 @@ struct uv_info {
unsigned long guest_cpu_stor_len;
unsigned long max_sec_stor_addr;
unsigned int max_num_sec_conf;
- unsigned short max_guest_cpus;
+ unsigned short max_guest_cpu_id;
};
extern struct uv_info uv_info;
diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h
index f65590889054..b45e3dddd2c2 100644
--- a/arch/s390/include/asm/vdso.h
+++ b/arch/s390/include/asm/vdso.h
@@ -4,17 +4,18 @@
#include <vdso/datapage.h>
-/* Default link addresses for the vDSOs */
-#define VDSO32_LBASE 0
+/* Default link address for the vDSO */
#define VDSO64_LBASE 0
+#define __VVAR_PAGES 2
+
#define VDSO_VERSION_STRING LINUX_2.6.29
#ifndef __ASSEMBLY__
extern struct vdso_data *vdso_data;
-void vdso_getcpu_init(void);
+int vdso_getcpu_init(void);
#endif /* __ASSEMBLY__ */
#endif /* __S390_VDSO_H__ */
diff --git a/arch/s390/include/asm/vdso/gettimeofday.h b/arch/s390/include/asm/vdso/gettimeofday.h
index bf123065ad3b..ed89ef742530 100644
--- a/arch/s390/include/asm/vdso/gettimeofday.h
+++ b/arch/s390/include/asm/vdso/gettimeofday.h
@@ -24,13 +24,12 @@ static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
static inline u64 __arch_get_hw_counter(s32 clock_mode, const struct vdso_data *vd)
{
- const struct vdso_data *vdso = __arch_get_vdso_data();
u64 adj, now;
now = get_tod_clock();
- adj = vdso->arch_data.tod_steering_end - now;
+ adj = vd->arch_data.tod_steering_end - now;
if (unlikely((s64) adj > 0))
- now += (vdso->arch_data.tod_steering_delta < 0) ? (adj >> 15) : -(adj >> 15);
+ now += (vd->arch_data.tod_steering_delta < 0) ? (adj >> 15) : -(adj >> 15);
return now;
}
@@ -68,4 +67,11 @@ long clock_getres_fallback(clockid_t clkid, struct __kernel_timespec *ts)
return r2;
}
+#ifdef CONFIG_TIME_NS
+static __always_inline const struct vdso_data *__arch_get_timens_vdso_data(void)
+{
+ return _timens_data;
+}
+#endif
+
#endif
diff --git a/arch/s390/include/asm/vtime.h b/arch/s390/include/asm/vtime.h
index fac6a67988eb..fe17e448c0c5 100644
--- a/arch/s390/include/asm/vtime.h
+++ b/arch/s390/include/asm/vtime.h
@@ -4,4 +4,18 @@
#define __ARCH_HAS_VTIME_TASK_SWITCH
+static inline void update_timer_sys(void)
+{
+ S390_lowcore.system_timer += S390_lowcore.last_update_timer - S390_lowcore.exit_timer;
+ S390_lowcore.user_timer += S390_lowcore.exit_timer - S390_lowcore.sys_enter_timer;
+ S390_lowcore.last_update_timer = S390_lowcore.sys_enter_timer;
+}
+
+static inline void update_timer_mcck(void)
+{
+ S390_lowcore.system_timer += S390_lowcore.last_update_timer - S390_lowcore.exit_timer;
+ S390_lowcore.user_timer += S390_lowcore.exit_timer - S390_lowcore.mcck_enter_timer;
+ S390_lowcore.last_update_timer = S390_lowcore.mcck_enter_timer;
+}
+
#endif /* _S390_VTIME_H */
diff --git a/arch/s390/include/uapi/asm/perf_cpum_cf_diag.h b/arch/s390/include/uapi/asm/perf_cpum_cf_diag.h
new file mode 100644
index 000000000000..3d8284b95f87
--- /dev/null
+++ b/arch/s390/include/uapi/asm/perf_cpum_cf_diag.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright IBM Corp. 2021
+ * Interface implementation for communication with the CPU Measurement
+ * counter facility device driver.
+ *
+ * Author(s): Thomas Richter <tmricht@linux.ibm.com>
+ *
+ * Define for ioctl() commands to communicate with the CPU Measurement
+ * counter facility device driver.
+ */
+
+#ifndef _PERF_CPUM_CF_DIAG_H
+#define _PERF_CPUM_CF_DIAG_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+#define S390_HWCTR_DEVICE "hwctr"
+#define S390_HWCTR_START_VERSION 1
+
+struct s390_ctrset_start { /* Set CPUs to operate on */
+ __u64 version; /* Version of interface */
+ __u64 data_bytes; /* # of bytes required */
+ __u64 cpumask_len; /* Length of CPU mask in bytes */
+ __u64 *cpumask; /* Pointer to CPU mask */
+ __u64 counter_sets; /* Bit mask of counter sets to get */
+};
+
+struct s390_ctrset_setdata { /* Counter set data */
+ __u32 set; /* Counter set number */
+ __u32 no_cnts; /* # of counters stored in cv[] */
+ __u64 cv[0]; /* Counter values (variable length) */
+};
+
+struct s390_ctrset_cpudata { /* Counter set data per CPU */
+ __u32 cpu_nr; /* CPU number */
+ __u32 no_sets; /* # of counters sets in data[] */
+ struct s390_ctrset_setdata data[0];
+};
+
+struct s390_ctrset_read { /* Structure to get all ctr sets */
+ __u64 no_cpus; /* Total # of CPUs data taken from */
+ struct s390_ctrset_cpudata data[0];
+};
+
+#define S390_HWCTR_MAGIC 'C' /* Random magic # for ioctls */
+#define S390_HWCTR_START _IOWR(S390_HWCTR_MAGIC, 1, struct s390_ctrset_start)
+#define S390_HWCTR_STOP _IO(S390_HWCTR_MAGIC, 2)
+#define S390_HWCTR_READ _IOWR(S390_HWCTR_MAGIC, 3, struct s390_ctrset_read)
+#endif
diff --git a/arch/s390/include/uapi/asm/ptrace.h b/arch/s390/include/uapi/asm/ptrace.h
index 543dd70e12c8..ad64d673b5e6 100644
--- a/arch/s390/include/uapi/asm/ptrace.h
+++ b/arch/s390/include/uapi/asm/ptrace.h
@@ -179,8 +179,9 @@
#define ACR_SIZE 4
-#define PTRACE_OLDSETOPTIONS 21
-
+#define PTRACE_OLDSETOPTIONS 21
+#define PTRACE_SYSEMU 31
+#define PTRACE_SYSEMU_SINGLESTEP 32
#ifndef __ASSEMBLY__
#include <linux/stddef.h>
#include <linux/types.h>
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index dd73b7f07423..c97818a382f3 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -34,7 +34,7 @@ CFLAGS_dumpstack.o += -fno-optimize-sibling-calls
CFLAGS_unwind_bc.o += -fno-optimize-sibling-calls
obj-y := traps.o time.o process.o base.o early.o setup.o idle.o vtime.o
-obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
+obj-y += processor.o syscall.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
obj-y += debug.o irq.o ipl.o dis.o diag.o vdso.o
obj-y += sysinfo.o lgr.o os_info.o machine_kexec.o pgm_check.o
obj-y += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o sthyi.o
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 79724d861dc9..15e637728a4b 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -26,26 +26,14 @@ int main(void)
BLANK();
/* thread struct offsets */
OFFSET(__THREAD_ksp, thread_struct, ksp);
- OFFSET(__THREAD_sysc_table, thread_struct, sys_call_table);
- OFFSET(__THREAD_last_break, thread_struct, last_break);
- OFFSET(__THREAD_FPU_fpc, thread_struct, fpu.fpc);
- OFFSET(__THREAD_FPU_regs, thread_struct, fpu.regs);
- OFFSET(__THREAD_per_cause, thread_struct, per_event.cause);
- OFFSET(__THREAD_per_address, thread_struct, per_event.address);
- OFFSET(__THREAD_per_paid, thread_struct, per_event.paid);
- OFFSET(__THREAD_trap_tdb, thread_struct, trap_tdb);
BLANK();
/* thread info offsets */
OFFSET(__TI_flags, task_struct, thread_info.flags);
BLANK();
/* pt_regs offsets */
- OFFSET(__PT_ARGS, pt_regs, args);
OFFSET(__PT_PSW, pt_regs, psw);
OFFSET(__PT_GPRS, pt_regs, gprs);
OFFSET(__PT_ORIG_GPR2, pt_regs, orig_gpr2);
- OFFSET(__PT_INT_CODE, pt_regs, int_code);
- OFFSET(__PT_INT_PARM, pt_regs, int_parm);
- OFFSET(__PT_INT_PARM_LONG, pt_regs, int_parm_long);
OFFSET(__PT_FLAGS, pt_regs, flags);
OFFSET(__PT_CR1, pt_regs, cr1);
DEFINE(__PT_SIZE, sizeof(struct pt_regs));
@@ -64,6 +52,7 @@ int main(void)
OFFSET(__CLOCK_IDLE_EXIT, s390_idle_data, clock_idle_exit);
OFFSET(__TIMER_IDLE_ENTER, s390_idle_data, timer_idle_enter);
OFFSET(__TIMER_IDLE_EXIT, s390_idle_data, timer_idle_exit);
+ OFFSET(__MT_CYCLES_ENTER, s390_idle_data, mt_cycles_enter);
BLANK();
/* hardware defined lowcore locations 0x000 - 0x1ff */
OFFSET(__LC_EXT_PARAMS, lowcore, ext_params);
@@ -115,13 +104,9 @@ int main(void)
OFFSET(__LC_CPU_FLAGS, lowcore, cpu_flags);
OFFSET(__LC_RETURN_PSW, lowcore, return_psw);
OFFSET(__LC_RETURN_MCCK_PSW, lowcore, return_mcck_psw);
- OFFSET(__LC_SYNC_ENTER_TIMER, lowcore, sync_enter_timer);
- OFFSET(__LC_ASYNC_ENTER_TIMER, lowcore, async_enter_timer);
+ OFFSET(__LC_SYS_ENTER_TIMER, lowcore, sys_enter_timer);
OFFSET(__LC_MCCK_ENTER_TIMER, lowcore, mcck_enter_timer);
OFFSET(__LC_EXIT_TIMER, lowcore, exit_timer);
- OFFSET(__LC_USER_TIMER, lowcore, user_timer);
- OFFSET(__LC_SYSTEM_TIMER, lowcore, system_timer);
- OFFSET(__LC_STEAL_TIMER, lowcore, steal_timer);
OFFSET(__LC_LAST_UPDATE_TIMER, lowcore, last_update_timer);
OFFSET(__LC_LAST_UPDATE_CLOCK, lowcore, last_update_clock);
OFFSET(__LC_INT_CLOCK, lowcore, int_clock);
@@ -133,6 +118,7 @@ int main(void)
OFFSET(__LC_ASYNC_STACK, lowcore, async_stack);
OFFSET(__LC_NODAT_STACK, lowcore, nodat_stack);
OFFSET(__LC_RESTART_STACK, lowcore, restart_stack);
+ OFFSET(__LC_MCCK_STACK, lowcore, mcck_stack);
OFFSET(__LC_RESTART_FN, lowcore, restart_fn);
OFFSET(__LC_RESTART_DATA, lowcore, restart_data);
OFFSET(__LC_RESTART_SOURCE, lowcore, restart_source);
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index 38d4bdbc34b9..1d0e17ec93eb 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -118,6 +118,7 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs)
fpregs_load((_s390_fp_regs *) &user_sregs.fpregs, &current->thread.fpu);
clear_pt_regs_flag(regs, PIF_SYSCALL); /* No longer in a system call */
+ clear_pt_regs_flag(regs, PIF_SYSCALL_RESTART);
return 0;
}
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
index 205b2e2648aa..0e36dfc9ccd6 100644
--- a/arch/s390/kernel/crash_dump.c
+++ b/arch/s390/kernel/crash_dump.c
@@ -365,7 +365,7 @@ static void *fill_cpu_elf_notes(void *ptr, int cpu, struct save_area *sa)
memcpy(&nt_prstatus.pr_reg.gprs, sa->gprs, sizeof(sa->gprs));
memcpy(&nt_prstatus.pr_reg.psw, sa->psw, sizeof(sa->psw));
memcpy(&nt_prstatus.pr_reg.acrs, sa->acrs, sizeof(sa->acrs));
- nt_prstatus.pr_pid = cpu;
+ nt_prstatus.common.pr_pid = cpu;
/* Prepare fpregset (floating point) note */
memset(&nt_fpregset, 0, sizeof(nt_fpregset));
memcpy(&nt_fpregset.fpc, &sa->fpc, sizeof(sa->fpc));
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index b6619ae9a3e0..bb958d32bd81 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -829,11 +829,11 @@ static inline debug_entry_t *get_active_entry(debug_info_t *id)
static inline void debug_finish_entry(debug_info_t *id, debug_entry_t *active,
int level, int exception)
{
- unsigned char clk[STORE_CLOCK_EXT_SIZE];
unsigned long timestamp;
+ union tod_clock clk;
- get_tod_clock_ext(clk);
- timestamp = *(unsigned long *) &clk[0] >> 4;
+ store_tod_clock_ext(&clk);
+ timestamp = clk.us;
timestamp -= TOD_UNIX_EPOCH >> 12;
active->clock = timestamp;
active->cpu = smp_processor_id();
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index cc89763a4d3c..a361d2e70025 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -35,16 +35,16 @@
static void __init reset_tod_clock(void)
{
- u64 time;
+ union tod_clock clk;
- if (store_tod_clock(&time) == 0)
+ if (store_tod_clock_ext_cc(&clk) == 0)
return;
/* TOD clock not running. Set the clock to Unix Epoch. */
- if (set_tod_clock(TOD_UNIX_EPOCH) != 0 || store_tod_clock(&time) != 0)
+ if (set_tod_clock(TOD_UNIX_EPOCH) || store_tod_clock_ext_cc(&clk))
disabled_wait();
- memset(tod_clock_base, 0, 16);
- *(__u64 *) &tod_clock_base[1] = TOD_UNIX_EPOCH;
+ memset(&tod_clock_base, 0, sizeof(tod_clock_base));
+ tod_clock_base.tod = TOD_UNIX_EPOCH;
S390_lowcore.last_update_clock = TOD_UNIX_EPOCH;
}
@@ -230,7 +230,7 @@ static __init void detect_machine_facilities(void)
}
if (test_facility(133))
S390_lowcore.machine_flags |= MACHINE_FLAG_GS;
- if (test_facility(139) && (tod_clock_base[1] & 0x80)) {
+ if (test_facility(139) && (tod_clock_base.tod >> 63)) {
/* Enabled signed clock comparator comparisons */
S390_lowcore.machine_flags |= MACHINE_FLAG_SCC;
clock_comparator_max = -1ULL >> 1;
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index f1ba197b10c0..c10b9f31eef7 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -51,38 +51,8 @@ STACK_SHIFT = PAGE_SHIFT + THREAD_SIZE_ORDER
STACK_SIZE = 1 << STACK_SHIFT
STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
-_TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
- _TIF_UPROBE | _TIF_GUARDED_STORAGE | _TIF_PATCH_PENDING | \
- _TIF_NOTIFY_SIGNAL)
-_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
- _TIF_SYSCALL_TRACEPOINT)
-_CIF_WORK = (_CIF_FPU)
-_PIF_WORK = (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART)
-
_LPP_OFFSET = __LC_LPP
- .macro TRACE_IRQS_ON
-#ifdef CONFIG_TRACE_IRQFLAGS
- basr %r2,%r0
- brasl %r14,trace_hardirqs_on_caller
-#endif
- .endm
-
- .macro TRACE_IRQS_OFF
-#ifdef CONFIG_TRACE_IRQFLAGS
- basr %r2,%r0
- brasl %r14,trace_hardirqs_off_caller
-#endif
- .endm
-
- .macro LOCKDEP_SYS_EXIT
-#ifdef CONFIG_LOCKDEP
- tm __PT_PSW+1(%r11),0x01 # returning to user ?
- jz .+10
- brasl %r14,lockdep_sys_exit
-#endif
- .endm
-
.macro CHECK_STACK savearea
#ifdef CONFIG_CHECK_STACK
tml %r15,STACK_SIZE - CONFIG_STACK_GUARD
@@ -91,12 +61,6 @@ _LPP_OFFSET = __LC_LPP
#endif
.endm
- .macro DEBUG_USER_ASCE
-#ifdef CONFIG_DEBUG_USER_ASCE
- brasl %r14,debug_user_asce
-#endif
- .endm
-
.macro CHECK_VMAP_STACK savearea,oklabel
#ifdef CONFIG_VMAP_STACK
lgr %r14,%r15
@@ -106,6 +70,8 @@ _LPP_OFFSET = __LC_LPP
je \oklabel
clg %r14,__LC_ASYNC_STACK
je \oklabel
+ clg %r14,__LC_MCCK_STACK
+ je \oklabel
clg %r14,__LC_NODAT_STACK
je \oklabel
clg %r14,__LC_RESTART_STACK
@@ -117,113 +83,9 @@ _LPP_OFFSET = __LC_LPP
#endif
.endm
- .macro SWITCH_ASYNC savearea,timer,clock
- tmhh %r8,0x0001 # interrupting from user ?
- jnz 4f
-#if IS_ENABLED(CONFIG_KVM)
- lgr %r14,%r9
- larl %r13,.Lsie_gmap
- slgr %r14,%r13
- lghi %r13,.Lsie_done - .Lsie_gmap
- clgr %r14,%r13
- jhe 0f
- lghi %r11,\savearea # inside critical section, do cleanup
- brasl %r14,.Lcleanup_sie
-#endif
-0: larl %r13,.Lpsw_idle_exit
- cgr %r13,%r9
- jne 3f
-
- larl %r1,smp_cpu_mtid
- llgf %r1,0(%r1)
- ltgr %r1,%r1
- jz 2f # no SMT, skip mt_cycles calculation
- .insn rsy,0xeb0000000017,%r1,5,__SF_EMPTY+80(%r15)
- larl %r3,mt_cycles
- ag %r3,__LC_PERCPU_OFFSET
- la %r4,__SF_EMPTY+16(%r15)
-1: lg %r0,0(%r3)
- slg %r0,0(%r4)
- alg %r0,64(%r4)
- stg %r0,0(%r3)
- la %r3,8(%r3)
- la %r4,8(%r4)
- brct %r1,1b
-
-2: mvc __CLOCK_IDLE_EXIT(8,%r2), \clock
- mvc __TIMER_IDLE_EXIT(8,%r2), \timer
- # account system time going idle
- ni __LC_CPU_FLAGS+7,255-_CIF_ENABLED_WAIT
-
- lg %r13,__LC_STEAL_TIMER
- alg %r13,__CLOCK_IDLE_ENTER(%r2)
- slg %r13,__LC_LAST_UPDATE_CLOCK
- stg %r13,__LC_STEAL_TIMER
-
- mvc __LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2)
-
- lg %r13,__LC_SYSTEM_TIMER
- alg %r13,__LC_LAST_UPDATE_TIMER
- slg %r13,__TIMER_IDLE_ENTER(%r2)
- stg %r13,__LC_SYSTEM_TIMER
- mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2)
-
- nihh %r8,0xfcfd # clear wait state and irq bits
-3: lg %r14,__LC_ASYNC_STACK # are we already on the target stack?
- slgr %r14,%r15
- srag %r14,%r14,STACK_SHIFT
- jnz 5f
- CHECK_STACK \savearea
- aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
- j 6f
-4: UPDATE_VTIME %r14,%r15,\timer
- BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
-5: lg %r15,__LC_ASYNC_STACK # load async stack
-6: la %r11,STACK_FRAME_OVERHEAD(%r15)
- .endm
-
- .macro UPDATE_VTIME w1,w2,enter_timer
- lg \w1,__LC_EXIT_TIMER
- lg \w2,__LC_LAST_UPDATE_TIMER
- slg \w1,\enter_timer
- slg \w2,__LC_EXIT_TIMER
- alg \w1,__LC_USER_TIMER
- alg \w2,__LC_SYSTEM_TIMER
- stg \w1,__LC_USER_TIMER
- stg \w2,__LC_SYSTEM_TIMER
- mvc __LC_LAST_UPDATE_TIMER(8),\enter_timer
- .endm
-
- .macro RESTORE_SM_CLEAR_PER
- stg %r8,__LC_RETURN_PSW
- ni __LC_RETURN_PSW,0xbf
- ssm __LC_RETURN_PSW
- .endm
-
- .macro ENABLE_INTS
- stosm __SF_EMPTY(%r15),3
- .endm
-
- .macro ENABLE_INTS_TRACE
- TRACE_IRQS_ON
- ENABLE_INTS
- .endm
-
- .macro DISABLE_INTS
- stnsm __SF_EMPTY(%r15),0xfc
- .endm
-
- .macro DISABLE_INTS_TRACE
- DISABLE_INTS
- TRACE_IRQS_OFF
- .endm
-
.macro STCK savearea
-#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
- .insn s,0xb27c0000,\savearea # store clock fast
-#else
- .insn s,0xb2050000,\savearea # store clock
-#endif
+ ALTERNATIVE ".insn s,0xb2050000,\savearea", \
+ ".insn s,0xb27c0000,\savearea", 25
.endm
/*
@@ -267,18 +129,17 @@ _LPP_OFFSET = __LC_LPP
"jnz .+8; .long 0xb2e8d000", 82
.endm
- GEN_BR_THUNK %r9
GEN_BR_THUNK %r14
- GEN_BR_THUNK %r14,%r11
+ GEN_BR_THUNK %r14,%r13
.section .kprobes.text, "ax"
.Ldummy:
/*
- * This nop exists only in order to avoid that __switch_to starts at
+ * This nop exists only in order to avoid that __bpon starts at
* the beginning of the kprobes text section. In that case we would
* have several symbols at the same address. E.g. objdump would take
* an arbitrary symbol name when disassembling this code.
- * With the added nop in between the __switch_to symbol is unique
+ * With the added nop in between the __bpon symbol is unique
* again.
*/
nop 0
@@ -327,10 +188,6 @@ ENTRY(sie64a)
stg %r3,__SF_SIE_SAVEAREA(%r15) # save guest register save area
xc __SF_SIE_REASON(8,%r15),__SF_SIE_REASON(%r15) # reason code = 0
mvc __SF_SIE_FLAGS(8,%r15),__TI_flags(%r12) # copy thread flags
- TSTMSK __LC_CPU_FLAGS,_CIF_FPU # load guest fp/vx registers ?
- jno .Lsie_load_guest_gprs
- brasl %r14,load_fpu_regs # load guest fp/vx regs
-.Lsie_load_guest_gprs:
lmg %r0,%r13,0(%r3) # load guest gprs 0-13
lg %r14,__LC_GMAP # get gmap pointer
ltgr %r14,%r14
@@ -357,7 +214,7 @@ ENTRY(sie64a)
# are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable.
# Other instructions between sie64a and .Lsie_done should not cause program
# interrupts. So lets use 3 nops as a landing pad for all possible rewinds.
-# See also .Lcleanup_sie
+# See also .Lcleanup_sie_mcck/.Lcleanup_sie_int
.Lrewind_pad6:
nopr 7
.Lrewind_pad4:
@@ -370,7 +227,6 @@ sie_exit:
stmg %r0,%r13,0(%r14) # save guest gprs 0-13
xgr %r0,%r0 # clear guest registers to
xgr %r1,%r1 # prevent speculative use
- xgr %r2,%r2
xgr %r3,%r3
xgr %r4,%r4
xgr %r5,%r5
@@ -397,249 +253,68 @@ EXPORT_SYMBOL(sie_exit)
*/
ENTRY(system_call)
- stpt __LC_SYNC_ENTER_TIMER
+ stpt __LC_SYS_ENTER_TIMER
stmg %r8,%r15,__LC_SAVE_AREA_SYNC
BPOFF
- lg %r12,__LC_CURRENT
- lghi %r14,_PIF_SYSCALL
+ lghi %r14,0
.Lsysc_per:
lctlg %c1,%c1,__LC_KERNEL_ASCE
- lghi %r13,__TASK_thread
+ lg %r12,__LC_CURRENT
lg %r15,__LC_KERNEL_STACK
- la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
- UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER
- BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
- stmg %r0,%r7,__PT_R0(%r11)
- mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
- mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW
- mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
- stg %r14,__PT_FLAGS(%r11)
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
- ENABLE_INTS
-.Lsysc_do_svc:
+ stmg %r0,%r7,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
+ BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
# clear user controlled register to prevent speculative use
xgr %r0,%r0
- # load address of system call table
- lg %r10,__THREAD_sysc_table(%r13,%r12)
- llgh %r8,__PT_INT_CODE+2(%r11)
- slag %r8,%r8,3 # shift and test for svc 0
- jnz .Lsysc_nr_ok
- # svc 0: system call number in %r1
- llgfr %r1,%r1 # clear high word in r1
- sth %r1,__PT_INT_CODE+2(%r11)
- cghi %r1,NR_syscalls
- jnl .Lsysc_nr_ok
- slag %r8,%r1,3
-.Lsysc_nr_ok:
- stg %r2,__PT_ORIG_GPR2(%r11)
- stg %r7,STACK_FRAME_OVERHEAD(%r15)
- lg %r9,0(%r8,%r10) # get system call add.
- TSTMSK __TI_flags(%r12),_TIF_TRACE
- jnz .Lsysc_tracesys
- BASR_EX %r14,%r9 # call sys_xxxx
- stg %r2,__PT_R2(%r11) # store return value
-
-.Lsysc_return:
-#ifdef CONFIG_DEBUG_RSEQ
- lgr %r2,%r11
- brasl %r14,rseq_syscall
-#endif
- LOCKDEP_SYS_EXIT
-.Lsysc_tif:
- DISABLE_INTS
- TSTMSK __PT_FLAGS(%r11),_PIF_WORK
- jnz .Lsysc_work
- TSTMSK __TI_flags(%r12),_TIF_WORK
- jnz .Lsysc_work # check for work
- DEBUG_USER_ASCE
+ xgr %r1,%r1
+ xgr %r4,%r4
+ xgr %r5,%r5
+ xgr %r6,%r6
+ xgr %r7,%r7
+ xgr %r8,%r8
+ xgr %r9,%r9
+ xgr %r10,%r10
+ xgr %r11,%r11
+ la %r2,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
+ lgr %r3,%r14
+ brasl %r14,__do_syscall
lctlg %c1,%c1,__LC_USER_ASCE
- BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
- TSTMSK __LC_CPU_FLAGS, _CIF_FPU
- jz .Lsysc_skip_fpu
- brasl %r14,load_fpu_regs
-.Lsysc_skip_fpu:
- mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
+ mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
+ BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
+ lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
stpt __LC_EXIT_TIMER
- lmg %r0,%r15,__PT_R0(%r11)
b __LC_RETURN_LPSWE
-
-#
-# One of the work bits is on. Find out which one.
-#
-.Lsysc_work:
- ENABLE_INTS
- TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED
- jo .Lsysc_reschedule
- TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL_RESTART
- jo .Lsysc_syscall_restart
-#ifdef CONFIG_UPROBES
- TSTMSK __TI_flags(%r12),_TIF_UPROBE
- jo .Lsysc_uprobe_notify
-#endif
- TSTMSK __TI_flags(%r12),_TIF_GUARDED_STORAGE
- jo .Lsysc_guarded_storage
- TSTMSK __PT_FLAGS(%r11),_PIF_PER_TRAP
- jo .Lsysc_singlestep
-#ifdef CONFIG_LIVEPATCH
- TSTMSK __TI_flags(%r12),_TIF_PATCH_PENDING
- jo .Lsysc_patch_pending # handle live patching just before
- # signals and possible syscall restart
-#endif
- TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL_RESTART
- jo .Lsysc_syscall_restart
- TSTMSK __TI_flags(%r12),(_TIF_SIGPENDING|_TIF_NOTIFY_SIGNAL)
- jnz .Lsysc_sigpending
- TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME
- jo .Lsysc_notify_resume
- j .Lsysc_return
-
-#
-# _TIF_NEED_RESCHED is set, call schedule
-#
-.Lsysc_reschedule:
- larl %r14,.Lsysc_return
- jg schedule
-
-#
-# _TIF_SIGPENDING is set, call do_signal
-#
-.Lsysc_sigpending:
- lgr %r2,%r11 # pass pointer to pt_regs
- brasl %r14,do_signal
- TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL
- jno .Lsysc_return
-.Lsysc_do_syscall:
- lghi %r13,__TASK_thread
- lmg %r2,%r7,__PT_R2(%r11) # load svc arguments
- lghi %r1,0 # svc 0 returns -ENOSYS
- j .Lsysc_do_svc
-
-#
-# _TIF_NOTIFY_RESUME is set, call do_notify_resume
-#
-.Lsysc_notify_resume:
- lgr %r2,%r11 # pass pointer to pt_regs
- larl %r14,.Lsysc_return
- jg do_notify_resume
-
-#
-# _TIF_UPROBE is set, call uprobe_notify_resume
-#
-#ifdef CONFIG_UPROBES
-.Lsysc_uprobe_notify:
- lgr %r2,%r11 # pass pointer to pt_regs
- larl %r14,.Lsysc_return
- jg uprobe_notify_resume
-#endif
-
-#
-# _TIF_GUARDED_STORAGE is set, call guarded_storage_load
-#
-.Lsysc_guarded_storage:
- lgr %r2,%r11 # pass pointer to pt_regs
- larl %r14,.Lsysc_return
- jg gs_load_bc_cb
-#
-# _TIF_PATCH_PENDING is set, call klp_update_patch_state
-#
-#ifdef CONFIG_LIVEPATCH
-.Lsysc_patch_pending:
- lg %r2,__LC_CURRENT # pass pointer to task struct
- larl %r14,.Lsysc_return
- jg klp_update_patch_state
-#endif
-
-#
-# _PIF_PER_TRAP is set, call do_per_trap
-#
-.Lsysc_singlestep:
- ni __PT_FLAGS+7(%r11),255-_PIF_PER_TRAP
- lgr %r2,%r11 # pass pointer to pt_regs
- larl %r14,.Lsysc_return
- jg do_per_trap
-
-#
-# _PIF_SYSCALL_RESTART is set, repeat the current system call
-#
-.Lsysc_syscall_restart:
- ni __PT_FLAGS+7(%r11),255-_PIF_SYSCALL_RESTART
- lmg %r1,%r7,__PT_R1(%r11) # load svc arguments
- lg %r2,__PT_ORIG_GPR2(%r11)
- j .Lsysc_do_svc
-
-#
-# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
-# and after the system call
-#
-.Lsysc_tracesys:
- lgr %r2,%r11 # pass pointer to pt_regs
- la %r3,0
- llgh %r0,__PT_INT_CODE+2(%r11)
- stg %r0,__PT_R2(%r11)
- brasl %r14,do_syscall_trace_enter
- lghi %r0,NR_syscalls
- clgr %r0,%r2
- jnh .Lsysc_tracenogo
- sllg %r8,%r2,3
- lg %r9,0(%r8,%r10)
- lmg %r3,%r7,__PT_R3(%r11)
- stg %r7,STACK_FRAME_OVERHEAD(%r15)
- lg %r2,__PT_ORIG_GPR2(%r11)
- BASR_EX %r14,%r9 # call sys_xxx
- stg %r2,__PT_R2(%r11) # store return value
-.Lsysc_tracenogo:
- TSTMSK __TI_flags(%r12),_TIF_TRACE
- jz .Lsysc_return
- lgr %r2,%r11 # pass pointer to pt_regs
- larl %r14,.Lsysc_return
- jg do_syscall_trace_exit
ENDPROC(system_call)
#
# a new process exits the kernel with ret_from_fork
#
ENTRY(ret_from_fork)
- la %r11,STACK_FRAME_OVERHEAD(%r15)
- lg %r12,__LC_CURRENT
- brasl %r14,schedule_tail
- tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ?
- jne .Lsysc_tracenogo
- # it's a kernel thread
- lmg %r9,%r10,__PT_R9(%r11) # load gprs
- la %r2,0(%r10)
- BASR_EX %r14,%r9
- j .Lsysc_tracenogo
+ lgr %r3,%r11
+ brasl %r14,__ret_from_fork
+ lctlg %c1,%c1,__LC_USER_ASCE
+ mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
+ BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
+ lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
+ stpt __LC_EXIT_TIMER
+ b __LC_RETURN_LPSWE
ENDPROC(ret_from_fork)
-ENTRY(kernel_thread_starter)
- la %r2,0(%r10)
- BASR_EX %r14,%r9
- j .Lsysc_tracenogo
-ENDPROC(kernel_thread_starter)
-
/*
* Program check handler routine
*/
ENTRY(pgm_check_handler)
- stpt __LC_SYNC_ENTER_TIMER
+ stpt __LC_SYS_ENTER_TIMER
BPOFF
stmg %r8,%r15,__LC_SAVE_AREA_SYNC
- lg %r10,__LC_LAST_BREAK
- srag %r11,%r10,12
- jnz 0f
- /* if __LC_LAST_BREAK is < 4096, it contains one of
- * the lpswe addresses in lowcore. Set it to 1 (initial state)
- * to prevent leaking that address to userspace.
- */
- lghi %r10,1
-0: lg %r12,__LC_CURRENT
- lghi %r11,0
+ lg %r12,__LC_CURRENT
+ lghi %r10,0
lmg %r8,%r9,__LC_PGM_OLD_PSW
tmhh %r8,0x0001 # coming from user space?
jno .Lpgm_skip_asce
lctlg %c1,%c1,__LC_KERNEL_ASCE
- j 3f
+ j 3f # -> fault in user space
.Lpgm_skip_asce:
#if IS_ENABLED(CONFIG_KVM)
# cleanup critical section for program checks in sie64a
@@ -653,7 +328,7 @@ ENTRY(pgm_check_handler)
ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
lctlg %c1,%c1,__LC_KERNEL_ASCE # load primary asce
larl %r9,sie_exit # skip forward to sie_exit
- lghi %r11,_PIF_GUEST_FAULT
+ lghi %r10,_PIF_GUEST_FAULT
#endif
1: tmhh %r8,0x4000 # PER bit set in old PSW ?
jnz 2f # -> enabled, can't be a double fault
@@ -661,109 +336,84 @@ ENTRY(pgm_check_handler)
jnz .Lpgm_svcper # -> single stepped svc
2: CHECK_STACK __LC_SAVE_AREA_SYNC
aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
- # CHECK_VMAP_STACK branches to stack_overflow or 5f
- CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,5f
-3: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
- BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
+ # CHECK_VMAP_STACK branches to stack_overflow or 4f
+ CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,4f
+3: BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
lg %r15,__LC_KERNEL_STACK
- lgr %r14,%r12
- aghi %r14,__TASK_thread # pointer to thread_struct
- lghi %r13,__LC_PGM_TDB
- tm __LC_PGM_ILC+2,0x02 # check for transaction abort
- jz 4f
- mvc __THREAD_trap_tdb(256,%r14),0(%r13)
-4: stg %r10,__THREAD_last_break(%r14)
-5: lgr %r13,%r11
- la %r11,STACK_FRAME_OVERHEAD(%r15)
+4: la %r11,STACK_FRAME_OVERHEAD(%r15)
+ stg %r10,__PT_FLAGS(%r11)
+ xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
stmg %r0,%r7,__PT_R0(%r11)
+ mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
+ stmg %r8,%r9,__PT_PSW(%r11)
+
# clear user controlled registers to prevent speculative use
xgr %r0,%r0
xgr %r1,%r1
- xgr %r2,%r2
xgr %r3,%r3
xgr %r4,%r4
xgr %r5,%r5
xgr %r6,%r6
xgr %r7,%r7
- mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
- stmg %r8,%r9,__PT_PSW(%r11)
- mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC
- mvc __PT_INT_PARM_LONG(8,%r11),__LC_TRANS_EXC_CODE
- stg %r13,__PT_FLAGS(%r11)
- stg %r10,__PT_ARGS(%r11)
- tm __LC_PGM_ILC+3,0x80 # check for per exception
- jz 6f
- tmhh %r8,0x0001 # kernel per event ?
- jz .Lpgm_kprobe
- oi __PT_FLAGS+7(%r11),_PIF_PER_TRAP
- mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS
- mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE
- mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID
-6: xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
- RESTORE_SM_CLEAR_PER
- larl %r1,pgm_check_table
- llgh %r10,__PT_INT_CODE+2(%r11)
- nill %r10,0x007f
- sll %r10,3
- je .Lpgm_return
- lg %r9,0(%r10,%r1) # load address of handler routine
- lgr %r2,%r11 # pass pointer to pt_regs
- BASR_EX %r14,%r9 # branch to interrupt-handler
-.Lpgm_return:
- LOCKDEP_SYS_EXIT
- tm __PT_PSW+1(%r11),0x01 # returning to user ?
- jno .Lpgm_restore
- TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL
- jo .Lsysc_do_syscall
- j .Lsysc_tif
-.Lpgm_restore:
- DISABLE_INTS
- TSTMSK __LC_CPU_FLAGS, _CIF_FPU
- jz .Lpgm_skip_fpu
- brasl %r14,load_fpu_regs
-.Lpgm_skip_fpu:
- mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
+ lgr %r2,%r11
+ brasl %r14,__do_pgm_check
+ tmhh %r8,0x0001 # returning to user space?
+ jno .Lpgm_exit_kernel
+ lctlg %c1,%c1,__LC_USER_ASCE
+ BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
stpt __LC_EXIT_TIMER
- lmg %r0,%r15,__PT_R0(%r11)
+.Lpgm_exit_kernel:
+ mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
+ lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
b __LC_RETURN_LPSWE
#
-# PER event in supervisor state, must be kprobes
-#
-.Lpgm_kprobe:
- xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
- RESTORE_SM_CLEAR_PER
- lgr %r2,%r11 # pass pointer to pt_regs
- brasl %r14,do_per_trap
- j .Lpgm_return
-
-#
# single stepped system call
#
.Lpgm_svcper:
mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW
larl %r14,.Lsysc_per
stg %r14,__LC_RETURN_PSW+8
- lghi %r14,_PIF_SYSCALL | _PIF_PER_TRAP
+ lghi %r14,1
lpswe __LC_RETURN_PSW # branch to .Lsysc_per
ENDPROC(pgm_check_handler)
/*
- * IO interrupt handler routine
+ * Interrupt handler macro used for external and IO interrupts.
*/
-ENTRY(io_int_handler)
+.macro INT_HANDLER name,lc_old_psw,handler
+ENTRY(\name)
STCK __LC_INT_CLOCK
- stpt __LC_ASYNC_ENTER_TIMER
+ stpt __LC_SYS_ENTER_TIMER
BPOFF
stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
lg %r12,__LC_CURRENT
- lmg %r8,%r9,__LC_IO_OLD_PSW
- SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER,__LC_INT_CLOCK
+ lmg %r8,%r9,\lc_old_psw
+ tmhh %r8,0x0001 # interrupting from user ?
+ jnz 1f
+#if IS_ENABLED(CONFIG_KVM)
+ lgr %r14,%r9
+ larl %r13,.Lsie_gmap
+ slgr %r14,%r13
+ lghi %r13,.Lsie_done - .Lsie_gmap
+ clgr %r14,%r13
+ jhe 0f
+ brasl %r14,.Lcleanup_sie_int
+#endif
+0: CHECK_STACK __LC_SAVE_AREA_ASYNC
+ lgr %r11,%r15
+ aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+ stg %r11,__SF_BACKCHAIN(%r15)
+ j 2f
+1: BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
+ lctlg %c1,%c1,__LC_KERNEL_ASCE
+ lg %r15,__LC_KERNEL_STACK
+ xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+2: la %r11,STACK_FRAME_OVERHEAD(%r15)
stmg %r0,%r7,__PT_R0(%r11)
# clear user controlled registers to prevent speculative use
xgr %r0,%r0
xgr %r1,%r1
- xgr %r2,%r2
xgr %r3,%r3
xgr %r4,%r4
xgr %r5,%r5
@@ -772,323 +422,49 @@ ENTRY(io_int_handler)
xgr %r10,%r10
mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
stmg %r8,%r9,__PT_PSW(%r11)
- tm __PT_PSW+1(%r11),0x01 # coming from user space?
- jno .Lio_skip_asce
+ tm %r8,0x0001 # coming from user space?
+ jno 1f
lctlg %c1,%c1,__LC_KERNEL_ASCE
-.Lio_skip_asce:
- mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
- xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
- xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
- TRACE_IRQS_OFF
-.Lio_loop:
- lgr %r2,%r11 # pass pointer to pt_regs
- lghi %r3,IO_INTERRUPT
- tm __PT_INT_CODE+8(%r11),0x80 # adapter interrupt ?
- jz .Lio_call
- lghi %r3,THIN_INTERRUPT
-.Lio_call:
- brasl %r14,do_IRQ
- TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_LPAR
- jz .Lio_return
- tpi 0
- jz .Lio_return
- mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
- j .Lio_loop
-.Lio_return:
- LOCKDEP_SYS_EXIT
- TSTMSK __TI_flags(%r12),_TIF_WORK
- jnz .Lio_work # there is work to do (signals etc.)
- TSTMSK __LC_CPU_FLAGS,_CIF_WORK
- jnz .Lio_work
-.Lio_restore:
- TRACE_IRQS_ON
+1: lgr %r2,%r11 # pass pointer to pt_regs
+ brasl %r14,\handler
mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
- tm __PT_PSW+1(%r11),0x01 # returning to user ?
- jno .Lio_exit_kernel
- DEBUG_USER_ASCE
+ tmhh %r8,0x0001 # returning to user ?
+ jno 2f
lctlg %c1,%c1,__LC_USER_ASCE
BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
stpt __LC_EXIT_TIMER
-.Lio_exit_kernel:
- lmg %r0,%r15,__PT_R0(%r11)
+2: lmg %r0,%r15,__PT_R0(%r11)
b __LC_RETURN_LPSWE
-.Lio_done:
+ENDPROC(\name)
+.endm
-#
-# There is work todo, find out in which context we have been interrupted:
-# 1) if we return to user space we can do all _TIF_WORK work
-# 2) if we return to kernel code and kvm is enabled check if we need to
-# modify the psw to leave SIE
-# 3) if we return to kernel code and preemptive scheduling is enabled check
-# the preemption counter and if it is zero call preempt_schedule_irq
-# Before any work can be done, a switch to the kernel stack is required.
-#
-.Lio_work:
- tm __PT_PSW+1(%r11),0x01 # returning to user ?
- jo .Lio_work_user # yes -> do resched & signal
-#ifdef CONFIG_PREEMPTION
- # check for preemptive scheduling
- icm %r0,15,__LC_PREEMPT_COUNT
- jnz .Lio_restore # preemption is disabled
- TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED
- jno .Lio_restore
- # switch to kernel stack
- lg %r1,__PT_R15(%r11)
- aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
- mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
- xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
- la %r11,STACK_FRAME_OVERHEAD(%r1)
- lgr %r15,%r1
- brasl %r14,preempt_schedule_irq
- j .Lio_return
-#else
- j .Lio_restore
-#endif
-
-#
-# Need to do work before returning to userspace, switch to kernel stack
-#
-.Lio_work_user:
- lg %r1,__LC_KERNEL_STACK
- mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
- xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
- la %r11,STACK_FRAME_OVERHEAD(%r1)
- lgr %r15,%r1
-
-#
-# One of the work bits is on. Find out which one.
-#
- TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED
- jo .Lio_reschedule
-#ifdef CONFIG_LIVEPATCH
- TSTMSK __TI_flags(%r12),_TIF_PATCH_PENDING
- jo .Lio_patch_pending
-#endif
- TSTMSK __TI_flags(%r12),(_TIF_SIGPENDING|_TIF_NOTIFY_SIGNAL)
- jnz .Lio_sigpending
- TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME
- jo .Lio_notify_resume
- TSTMSK __TI_flags(%r12),_TIF_GUARDED_STORAGE
- jo .Lio_guarded_storage
- TSTMSK __LC_CPU_FLAGS,_CIF_FPU
- jo .Lio_vxrs
- j .Lio_return
-
-#
-# CIF_FPU is set, restore floating-point controls and floating-point registers.
-#
-.Lio_vxrs:
- larl %r14,.Lio_return
- jg load_fpu_regs
-
-#
-# _TIF_GUARDED_STORAGE is set, call guarded_storage_load
-#
-.Lio_guarded_storage:
- ENABLE_INTS_TRACE
- lgr %r2,%r11 # pass pointer to pt_regs
- brasl %r14,gs_load_bc_cb
- DISABLE_INTS_TRACE
- j .Lio_return
-
-#
-# _TIF_NEED_RESCHED is set, call schedule
-#
-.Lio_reschedule:
- ENABLE_INTS_TRACE
- brasl %r14,schedule # call scheduler
- DISABLE_INTS_TRACE
- j .Lio_return
-
-#
-# _TIF_PATCH_PENDING is set, call klp_update_patch_state
-#
-#ifdef CONFIG_LIVEPATCH
-.Lio_patch_pending:
- lg %r2,__LC_CURRENT # pass pointer to task struct
- larl %r14,.Lio_return
- jg klp_update_patch_state
-#endif
-
-#
-# _TIF_SIGPENDING or is set, call do_signal
-#
-.Lio_sigpending:
- ENABLE_INTS_TRACE
- lgr %r2,%r11 # pass pointer to pt_regs
- brasl %r14,do_signal
- DISABLE_INTS_TRACE
- j .Lio_return
-
-#
-# _TIF_NOTIFY_RESUME or is set, call do_notify_resume
-#
-.Lio_notify_resume:
- ENABLE_INTS_TRACE
- lgr %r2,%r11 # pass pointer to pt_regs
- brasl %r14,do_notify_resume
- DISABLE_INTS_TRACE
- j .Lio_return
-ENDPROC(io_int_handler)
-
-/*
- * External interrupt handler routine
- */
-ENTRY(ext_int_handler)
- STCK __LC_INT_CLOCK
- stpt __LC_ASYNC_ENTER_TIMER
- BPOFF
- stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
- lg %r12,__LC_CURRENT
- lmg %r8,%r9,__LC_EXT_OLD_PSW
- SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER,__LC_INT_CLOCK
- stmg %r0,%r7,__PT_R0(%r11)
- # clear user controlled registers to prevent speculative use
- xgr %r0,%r0
- xgr %r1,%r1
- xgr %r2,%r2
- xgr %r3,%r3
- xgr %r4,%r4
- xgr %r5,%r5
- xgr %r6,%r6
- xgr %r7,%r7
- xgr %r10,%r10
- mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
- stmg %r8,%r9,__PT_PSW(%r11)
- tm __PT_PSW+1(%r11),0x01 # coming from user space?
- jno .Lext_skip_asce
- lctlg %c1,%c1,__LC_KERNEL_ASCE
-.Lext_skip_asce:
- lghi %r1,__LC_EXT_PARAMS2
- mvc __PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR
- mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS
- mvc __PT_INT_PARM_LONG(8,%r11),0(%r1)
- xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
- xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
- TRACE_IRQS_OFF
- lgr %r2,%r11 # pass pointer to pt_regs
- lghi %r3,EXT_INTERRUPT
- brasl %r14,do_IRQ
- j .Lio_return
-ENDPROC(ext_int_handler)
+INT_HANDLER ext_int_handler,__LC_EXT_OLD_PSW,do_ext_irq
+INT_HANDLER io_int_handler,__LC_IO_OLD_PSW,do_io_irq
/*
* Load idle PSW.
*/
ENTRY(psw_idle)
stg %r3,__SF_EMPTY(%r15)
- larl %r1,.Lpsw_idle_exit
+ larl %r1,psw_idle_exit
stg %r1,__SF_EMPTY+8(%r15)
larl %r1,smp_cpu_mtid
llgf %r1,0(%r1)
ltgr %r1,%r1
jz .Lpsw_idle_stcctm
- .insn rsy,0xeb0000000017,%r1,5,__SF_EMPTY+16(%r15)
+ .insn rsy,0xeb0000000017,%r1,5,__MT_CYCLES_ENTER(%r2)
.Lpsw_idle_stcctm:
oi __LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT
BPON
STCK __CLOCK_IDLE_ENTER(%r2)
stpt __TIMER_IDLE_ENTER(%r2)
lpswe __SF_EMPTY(%r15)
-.Lpsw_idle_exit:
+.globl psw_idle_exit
+psw_idle_exit:
BR_EX %r14
ENDPROC(psw_idle)
/*
- * Store floating-point controls and floating-point or vector register
- * depending whether the vector facility is available. A critical section
- * cleanup assures that the registers are stored even if interrupted for
- * some other work. The CIF_FPU flag is set to trigger a lazy restore
- * of the register contents at return from io or a system call.
- */
-ENTRY(save_fpu_regs)
- stnsm __SF_EMPTY(%r15),0xfc
- lg %r2,__LC_CURRENT
- aghi %r2,__TASK_thread
- TSTMSK __LC_CPU_FLAGS,_CIF_FPU
- jo .Lsave_fpu_regs_exit
- stfpc __THREAD_FPU_fpc(%r2)
- lg %r3,__THREAD_FPU_regs(%r2)
- TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
- jz .Lsave_fpu_regs_fp # no -> store FP regs
- VSTM %v0,%v15,0,%r3 # vstm 0,15,0(3)
- VSTM %v16,%v31,256,%r3 # vstm 16,31,256(3)
- j .Lsave_fpu_regs_done # -> set CIF_FPU flag
-.Lsave_fpu_regs_fp:
- std 0,0(%r3)
- std 1,8(%r3)
- std 2,16(%r3)
- std 3,24(%r3)
- std 4,32(%r3)
- std 5,40(%r3)
- std 6,48(%r3)
- std 7,56(%r3)
- std 8,64(%r3)
- std 9,72(%r3)
- std 10,80(%r3)
- std 11,88(%r3)
- std 12,96(%r3)
- std 13,104(%r3)
- std 14,112(%r3)
- std 15,120(%r3)
-.Lsave_fpu_regs_done:
- oi __LC_CPU_FLAGS+7,_CIF_FPU
-.Lsave_fpu_regs_exit:
- ssm __SF_EMPTY(%r15)
- BR_EX %r14
-.Lsave_fpu_regs_end:
-ENDPROC(save_fpu_regs)
-EXPORT_SYMBOL(save_fpu_regs)
-
-/*
- * Load floating-point controls and floating-point or vector registers.
- * A critical section cleanup assures that the register contents are
- * loaded even if interrupted for some other work.
- *
- * There are special calling conventions to fit into sysc and io return work:
- * %r15: <kernel stack>
- * The function requires:
- * %r4
- */
-load_fpu_regs:
- stnsm __SF_EMPTY(%r15),0xfc
- lg %r4,__LC_CURRENT
- aghi %r4,__TASK_thread
- TSTMSK __LC_CPU_FLAGS,_CIF_FPU
- jno .Lload_fpu_regs_exit
- lfpc __THREAD_FPU_fpc(%r4)
- TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
- lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area
- jz .Lload_fpu_regs_fp # -> no VX, load FP regs
- VLM %v0,%v15,0,%r4
- VLM %v16,%v31,256,%r4
- j .Lload_fpu_regs_done
-.Lload_fpu_regs_fp:
- ld 0,0(%r4)
- ld 1,8(%r4)
- ld 2,16(%r4)
- ld 3,24(%r4)
- ld 4,32(%r4)
- ld 5,40(%r4)
- ld 6,48(%r4)
- ld 7,56(%r4)
- ld 8,64(%r4)
- ld 9,72(%r4)
- ld 10,80(%r4)
- ld 11,88(%r4)
- ld 12,96(%r4)
- ld 13,104(%r4)
- ld 14,112(%r4)
- ld 15,120(%r4)
-.Lload_fpu_regs_done:
- ni __LC_CPU_FLAGS+7,255-_CIF_FPU
-.Lload_fpu_regs_exit:
- ssm __SF_EMPTY(%r15)
- BR_EX %r14
-.Lload_fpu_regs_end:
-ENDPROC(load_fpu_regs)
-
-/*
* Machine check handler routines
*/
ENTRY(mcck_int_handler)
@@ -1146,11 +522,8 @@ ENTRY(mcck_int_handler)
mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
TSTMSK __LC_MCCK_CODE,MCCK_CODE_CPU_TIMER_VALID
jo 3f
- la %r14,__LC_SYNC_ENTER_TIMER
- clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER
- jl 0f
- la %r14,__LC_ASYNC_ENTER_TIMER
-0: clc 0(8,%r14),__LC_EXIT_TIMER
+ la %r14,__LC_SYS_ENTER_TIMER
+ clc 0(8,%r14),__LC_EXIT_TIMER
jl 1f
la %r14,__LC_EXIT_TIMER
1: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER
@@ -1165,14 +538,32 @@ ENTRY(mcck_int_handler)
TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_IA_VALID
jno .Lmcck_panic
4: ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off
- SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_MCCK_ENTER_TIMER,__LC_MCCK_CLOCK
+ tmhh %r8,0x0001 # interrupting from user ?
+ jnz .Lmcck_user
+#if IS_ENABLED(CONFIG_KVM)
+ lgr %r14,%r9
+ larl %r13,.Lsie_gmap
+ slgr %r14,%r13
+ lghi %r13,.Lsie_done - .Lsie_gmap
+ clgr %r14,%r13
+ jhe .Lmcck_stack
+ brasl %r14,.Lcleanup_sie_mcck
+#endif
+ j .Lmcck_stack
+.Lmcck_user:
+ BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
+.Lmcck_stack:
+ lg %r15,__LC_MCCK_STACK
.Lmcck_skip:
+ la %r11,STACK_FRAME_OVERHEAD(%r15)
+ stctg %c1,%c1,__PT_CR1(%r11)
+ lctlg %c1,%c1,__LC_KERNEL_ASCE
+ xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
lghi %r14,__LC_GPREGS_SAVE_AREA+64
stmg %r0,%r7,__PT_R0(%r11)
# clear user controlled registers to prevent speculative use
xgr %r0,%r0
xgr %r1,%r1
- xgr %r2,%r2
xgr %r3,%r3
xgr %r4,%r4
xgr %r5,%r5
@@ -1181,9 +572,6 @@ ENTRY(mcck_int_handler)
xgr %r10,%r10
mvc __PT_R8(64,%r11),0(%r14)
stmg %r8,%r9,__PT_PSW(%r11)
- la %r14,4095
- mvc __PT_CR1(8,%r11),__LC_CREGS_SAVE_AREA-4095+8(%r14)
- lctlg %c1,%c1,__LC_KERNEL_ASCE
xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
lgr %r2,%r11 # pass pointer to pt_regs
@@ -1195,9 +583,7 @@ ENTRY(mcck_int_handler)
xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
la %r11,STACK_FRAME_OVERHEAD(%r1)
lgr %r15,%r1
- TRACE_IRQS_OFF
brasl %r14,s390_handle_mcck
- TRACE_IRQS_ON
.Lmcck_return:
lctlg %c1,%c1,__PT_CR1(%r11)
lmg %r0,%r10,__PT_R0(%r11)
@@ -1211,7 +597,6 @@ ENTRY(mcck_int_handler)
.Lmcck_panic:
lg %r15,__LC_NODAT_STACK
- la %r11,STACK_FRAME_OVERHEAD(%r15)
j .Lmcck_skip
ENDPROC(mcck_int_handler)
@@ -1264,21 +649,20 @@ ENDPROC(stack_overflow)
#endif
#if IS_ENABLED(CONFIG_KVM)
-.Lcleanup_sie:
- cghi %r11,__LC_SAVE_AREA_ASYNC #Is this in normal interrupt?
- je 1f
+.Lcleanup_sie_mcck:
larl %r13,.Lsie_entry
slgr %r9,%r13
larl %r13,.Lsie_skip
clgr %r9,%r13
- jh 1f
+ jh .Lcleanup_sie_int
oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST
-1: BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
+.Lcleanup_sie_int:
+ BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
lg %r9,__SF_SIE_CONTROL(%r15) # get control block pointer
ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE
lctlg %c1,%c1,__LC_KERNEL_ASCE
larl %r9,sie_exit # skip forward to sie_exit
- BR_EX %r14,%r11
+ BR_EX %r14,%r13
#endif
.section .rodata, "a"
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index a16c33b32ab0..3d0c0ac5c20e 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -17,8 +17,9 @@ void io_int_handler(void);
void mcck_int_handler(void);
void restart_int_handler(void);
-asmlinkage long do_syscall_trace_enter(struct pt_regs *regs);
-asmlinkage void do_syscall_trace_exit(struct pt_regs *regs);
+void __ret_from_fork(struct task_struct *prev, struct pt_regs *regs);
+void __do_pgm_check(struct pt_regs *regs);
+void __do_syscall(struct pt_regs *regs, int per_trap);
void do_protection_exception(struct pt_regs *regs);
void do_dat_exception(struct pt_regs *regs);
@@ -48,9 +49,7 @@ void translation_exception(struct pt_regs *regs);
void vector_exception(struct pt_regs *regs);
void monitor_event_exception(struct pt_regs *regs);
-void do_per_trap(struct pt_regs *regs);
void do_report_trap(struct pt_regs *regs, int si_signo, int si_code, char *str);
-void syscall_trace(struct pt_regs *regs, int entryexit);
void kernel_stack_overflow(struct pt_regs * regs);
void do_signal(struct pt_regs *regs);
void handle_signal32(struct ksignal *ksig, sigset_t *oldset,
@@ -58,7 +57,8 @@ void handle_signal32(struct ksignal *ksig, sigset_t *oldset,
void do_notify_resume(struct pt_regs *regs);
void __init init_IRQ(void);
-void do_IRQ(struct pt_regs *regs, int irq);
+void do_io_irq(struct pt_regs *regs);
+void do_ext_irq(struct pt_regs *regs);
void do_restart(void);
void __init startup_init(void);
void die(struct pt_regs *regs, const char *str);
@@ -82,8 +82,6 @@ long sys_s390_sthyi(unsigned long function_code, void __user *buffer, u64 __user
DECLARE_PER_CPU(u64, mt_cycles[8]);
-void gs_load_bc_cb(struct pt_regs *regs);
-
unsigned long stack_alloc(void);
void stack_free(unsigned long stack);
diff --git a/arch/s390/kernel/fpu.c b/arch/s390/kernel/fpu.c
index 0da378e2eb25..d864c9a325e2 100644
--- a/arch/s390/kernel/fpu.c
+++ b/arch/s390/kernel/fpu.c
@@ -175,3 +175,91 @@ void __kernel_fpu_end(struct kernel_fpu *state, u32 flags)
: "1", "cc");
}
EXPORT_SYMBOL(__kernel_fpu_end);
+
+void __load_fpu_regs(void)
+{
+ struct fpu *state = &current->thread.fpu;
+ unsigned long *regs = current->thread.fpu.regs;
+
+ asm volatile("lfpc %0" : : "Q" (state->fpc));
+ if (likely(MACHINE_HAS_VX)) {
+ asm volatile("lgr 1,%0\n"
+ "VLM 0,15,0,1\n"
+ "VLM 16,31,256,1\n"
+ :
+ : "d" (regs)
+ : "1", "cc", "memory");
+ } else {
+ asm volatile("ld 0,%0" : : "Q" (regs[0]));
+ asm volatile("ld 1,%0" : : "Q" (regs[1]));
+ asm volatile("ld 2,%0" : : "Q" (regs[2]));
+ asm volatile("ld 3,%0" : : "Q" (regs[3]));
+ asm volatile("ld 4,%0" : : "Q" (regs[4]));
+ asm volatile("ld 5,%0" : : "Q" (regs[5]));
+ asm volatile("ld 6,%0" : : "Q" (regs[6]));
+ asm volatile("ld 7,%0" : : "Q" (regs[7]));
+ asm volatile("ld 8,%0" : : "Q" (regs[8]));
+ asm volatile("ld 9,%0" : : "Q" (regs[9]));
+ asm volatile("ld 10,%0" : : "Q" (regs[10]));
+ asm volatile("ld 11,%0" : : "Q" (regs[11]));
+ asm volatile("ld 12,%0" : : "Q" (regs[12]));
+ asm volatile("ld 13,%0" : : "Q" (regs[13]));
+ asm volatile("ld 14,%0" : : "Q" (regs[14]));
+ asm volatile("ld 15,%0" : : "Q" (regs[15]));
+ }
+ clear_cpu_flag(CIF_FPU);
+}
+EXPORT_SYMBOL(__load_fpu_regs);
+
+void load_fpu_regs(void)
+{
+ raw_local_irq_disable();
+ __load_fpu_regs();
+ raw_local_irq_enable();
+}
+EXPORT_SYMBOL(load_fpu_regs);
+
+void save_fpu_regs(void)
+{
+ unsigned long flags, *regs;
+ struct fpu *state;
+
+ local_irq_save(flags);
+
+ if (test_cpu_flag(CIF_FPU))
+ goto out;
+
+ state = &current->thread.fpu;
+ regs = current->thread.fpu.regs;
+
+ asm volatile("stfpc %0" : "=Q" (state->fpc));
+ if (likely(MACHINE_HAS_VX)) {
+ asm volatile("lgr 1,%0\n"
+ "VSTM 0,15,0,1\n"
+ "VSTM 16,31,256,1\n"
+ :
+ : "d" (regs)
+ : "1", "cc", "memory");
+ } else {
+ asm volatile("std 0,%0" : "=Q" (regs[0]));
+ asm volatile("std 1,%0" : "=Q" (regs[1]));
+ asm volatile("std 2,%0" : "=Q" (regs[2]));
+ asm volatile("std 3,%0" : "=Q" (regs[3]));
+ asm volatile("std 4,%0" : "=Q" (regs[4]));
+ asm volatile("std 5,%0" : "=Q" (regs[5]));
+ asm volatile("std 6,%0" : "=Q" (regs[6]));
+ asm volatile("std 7,%0" : "=Q" (regs[7]));
+ asm volatile("std 8,%0" : "=Q" (regs[8]));
+ asm volatile("std 9,%0" : "=Q" (regs[9]));
+ asm volatile("std 10,%0" : "=Q" (regs[10]));
+ asm volatile("std 11,%0" : "=Q" (regs[11]));
+ asm volatile("std 12,%0" : "=Q" (regs[12]));
+ asm volatile("std 13,%0" : "=Q" (regs[13]));
+ asm volatile("std 14,%0" : "=Q" (regs[14]));
+ asm volatile("std 15,%0" : "=Q" (regs[15]));
+ }
+ set_cpu_flag(CIF_FPU);
+out:
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL(save_fpu_regs);
diff --git a/arch/s390/kernel/idle.c b/arch/s390/kernel/idle.c
index a5d4d80d6ede..812073ea073e 100644
--- a/arch/s390/kernel/idle.c
+++ b/arch/s390/kernel/idle.c
@@ -14,12 +14,36 @@
#include <linux/cpu.h>
#include <linux/sched/cputime.h>
#include <trace/events/power.h>
+#include <asm/cpu_mf.h>
#include <asm/nmi.h>
#include <asm/smp.h>
#include "entry.h"
static DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
+void account_idle_time_irq(void)
+{
+ struct s390_idle_data *idle = this_cpu_ptr(&s390_idle);
+ u64 cycles_new[8];
+ int i;
+
+ clear_cpu_flag(CIF_ENABLED_WAIT);
+ if (smp_cpu_mtid) {
+ stcctm(MT_DIAG, smp_cpu_mtid, cycles_new);
+ for (i = 0; i < smp_cpu_mtid; i++)
+ this_cpu_add(mt_cycles[i], cycles_new[i] - idle->mt_cycles_enter[i]);
+ }
+
+ idle->clock_idle_exit = S390_lowcore.int_clock;
+ idle->timer_idle_exit = S390_lowcore.sys_enter_timer;
+
+ S390_lowcore.steal_timer += idle->clock_idle_enter - S390_lowcore.last_update_clock;
+ S390_lowcore.last_update_clock = idle->clock_idle_exit;
+
+ S390_lowcore.system_timer += S390_lowcore.last_update_timer - idle->timer_idle_enter;
+ S390_lowcore.last_update_timer = idle->timer_idle_exit;
+}
+
void arch_cpu_idle(void)
{
struct s390_idle_data *idle = this_cpu_ptr(&s390_idle);
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index f8a8b9428ae2..601c21791338 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -21,12 +21,14 @@
#include <linux/init.h>
#include <linux/cpu.h>
#include <linux/irq.h>
+#include <linux/entry-common.h>
#include <asm/irq_regs.h>
#include <asm/cputime.h>
#include <asm/lowcore.h>
#include <asm/irq.h>
#include <asm/hw_irq.h>
#include <asm/stacktrace.h>
+#include <asm/softirq_stack.h>
#include "entry.h"
DEFINE_PER_CPU_SHARED_ALIGNED(struct irq_stat, irq_stat);
@@ -95,19 +97,97 @@ static const struct irq_class irqclass_sub_desc[] = {
{.irq = CPU_RST, .name = "RST", .desc = "[CPU] CPU Restart"},
};
-void do_IRQ(struct pt_regs *regs, int irq)
+static void do_IRQ(struct pt_regs *regs, int irq)
{
- struct pt_regs *old_regs;
-
- old_regs = set_irq_regs(regs);
- irq_enter();
if (tod_after_eq(S390_lowcore.int_clock,
S390_lowcore.clock_comparator))
/* Serve timer interrupts first. */
clock_comparator_work();
generic_handle_irq(irq);
+}
+
+static int on_async_stack(void)
+{
+ unsigned long frame = current_frame_address();
+
+ return !!!((S390_lowcore.async_stack - frame) >> (PAGE_SHIFT + THREAD_SIZE_ORDER));
+}
+
+static void do_irq_async(struct pt_regs *regs, int irq)
+{
+ if (on_async_stack())
+ do_IRQ(regs, irq);
+ else
+ CALL_ON_STACK(do_IRQ, S390_lowcore.async_stack, 2, regs, irq);
+}
+
+static int irq_pending(struct pt_regs *regs)
+{
+ int cc;
+
+ asm volatile("tpi 0\n"
+ "ipm %0" : "=d" (cc) : : "cc");
+ return cc >> 28;
+}
+
+void noinstr do_io_irq(struct pt_regs *regs)
+{
+ irqentry_state_t state = irqentry_enter(regs);
+ struct pt_regs *old_regs = set_irq_regs(regs);
+ int from_idle;
+
+ irq_enter();
+
+ if (user_mode(regs))
+ update_timer_sys();
+
+ from_idle = !user_mode(regs) && regs->psw.addr == (unsigned long)psw_idle_exit;
+ if (from_idle)
+ account_idle_time_irq();
+
+ do {
+ memcpy(&regs->int_code, &S390_lowcore.subchannel_id, 12);
+ if (S390_lowcore.io_int_word & BIT(31))
+ do_irq_async(regs, THIN_INTERRUPT);
+ else
+ do_irq_async(regs, IO_INTERRUPT);
+ } while (MACHINE_IS_LPAR && irq_pending(regs));
+
+ irq_exit();
+ set_irq_regs(old_regs);
+ irqentry_exit(regs, state);
+
+ if (from_idle)
+ regs->psw.mask &= ~(PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_WAIT);
+}
+
+void noinstr do_ext_irq(struct pt_regs *regs)
+{
+ irqentry_state_t state = irqentry_enter(regs);
+ struct pt_regs *old_regs = set_irq_regs(regs);
+ int from_idle;
+
+ irq_enter();
+
+ if (user_mode(regs))
+ update_timer_sys();
+
+ memcpy(&regs->int_code, &S390_lowcore.ext_cpu_addr, 4);
+ regs->int_parm = S390_lowcore.ext_params;
+ regs->int_parm_long = *(unsigned long *)S390_lowcore.ext_params2;
+
+ from_idle = !user_mode(regs) && regs->psw.addr == (unsigned long)psw_idle_exit;
+ if (from_idle)
+ account_idle_time_irq();
+
+ do_irq_async(regs, EXT_INTERRUPT);
+
irq_exit();
set_irq_regs(old_regs);
+ irqentry_exit(regs, state);
+
+ if (from_idle)
+ regs->psw.mask &= ~(PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_WAIT);
}
static void show_msi_interrupt(struct seq_file *p, int irq)
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index 86c8d5370e7f..11f8c296f60d 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -131,12 +131,11 @@ static notrace void s390_handle_damage(void)
NOKPROBE_SYMBOL(s390_handle_damage);
/*
- * Main machine check handler function. Will be called with interrupts enabled
- * or disabled and machine checks enabled or disabled.
+ * Main machine check handler function. Will be called with interrupts disabled
+ * and machine checks enabled.
*/
-void s390_handle_mcck(void)
+void __s390_handle_mcck(void)
{
- unsigned long flags;
struct mcck_struct mcck;
/*
@@ -144,12 +143,10 @@ void s390_handle_mcck(void)
* machine checks. Afterwards delete the old state and enable machine
* checks again.
*/
- local_irq_save(flags);
local_mcck_disable();
mcck = *this_cpu_ptr(&cpu_mcck);
memset(this_cpu_ptr(&cpu_mcck), 0, sizeof(mcck));
local_mcck_enable();
- local_irq_restore(flags);
if (mcck.channel_report)
crw_handle_channel_report();
@@ -181,8 +178,13 @@ void s390_handle_mcck(void)
do_exit(SIGSEGV);
}
}
-EXPORT_SYMBOL_GPL(s390_handle_mcck);
+void noinstr s390_handle_mcck(void)
+{
+ trace_hardirqs_off();
+ __s390_handle_mcck();
+ trace_hardirqs_on();
+}
/*
* returns 0 if all required registers are available
* returns 1 otherwise
@@ -344,6 +346,9 @@ int notrace s390_do_machine_check(struct pt_regs *regs)
int mcck_pending = 0;
nmi_enter();
+
+ if (user_mode(regs))
+ update_timer_mcck();
inc_irq_stat(NMI_NMI);
mci.val = S390_lowcore.mcck_interruption_code;
mcck = this_cpu_ptr(&cpu_mcck);
diff --git a/arch/s390/kernel/perf_cpum_cf_diag.c b/arch/s390/kernel/perf_cpum_cf_diag.c
index e949ab832ed7..db4877bbb9aa 100644
--- a/arch/s390/kernel/perf_cpum_cf_diag.c
+++ b/arch/s390/kernel/perf_cpum_cf_diag.c
@@ -2,7 +2,7 @@
/*
* Performance event support for s390x - CPU-measurement Counter Sets
*
- * Copyright IBM Corp. 2019
+ * Copyright IBM Corp. 2019, 2021
* Author(s): Hendrik Brueckner <brueckner@linux.ibm.com>
* Thomas Richer <tmricht@linux.ibm.com>
*/
@@ -17,6 +17,8 @@
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/processor.h>
+#include <linux/miscdevice.h>
+#include <linux/mutex.h>
#include <asm/ctl_reg.h>
#include <asm/irq.h>
@@ -24,15 +26,20 @@
#include <asm/timex.h>
#include <asm/debug.h>
-#define CF_DIAG_CTRSET_DEF 0xfeef /* Counter set header mark */
+#include <asm/perf_cpum_cf_diag.h>
+#define CF_DIAG_CTRSET_DEF 0xfeef /* Counter set header mark */
+#define CF_DIAG_MIN_INTERVAL 60 /* Minimum counter set read */
+ /* interval in seconds */
+static unsigned long cf_diag_interval = CF_DIAG_MIN_INTERVAL;
static unsigned int cf_diag_cpu_speed;
static debug_info_t *cf_diag_dbg;
-struct cf_diag_csd { /* Counter set data per CPU */
+struct cf_diag_csd { /* Counter set data per CPU */
size_t used; /* Bytes used in data/start */
unsigned char start[PAGE_SIZE]; /* Counter set at event start */
unsigned char data[PAGE_SIZE]; /* Counter set at event delete */
+ unsigned int sets; /* # Counter set saved in data */
};
static DEFINE_PER_CPU(struct cf_diag_csd, cf_diag_csd);
@@ -118,8 +125,8 @@ static void cf_diag_trailer(struct cf_trailer_entry *te)
if (te->cpu_speed)
te->speed = 1;
te->clock_base = 1; /* Save clock base */
- memcpy(&te->tod_base, &tod_clock_base[1], 8);
- store_tod_clock((__u64 *)&te->timestamp);
+ te->tod_base = tod_clock_base.tod;
+ te->timestamp = get_tod_clock_fast();
}
/*
@@ -178,18 +185,35 @@ static void cf_diag_disable(struct pmu *pmu)
/* Number of perf events counting hardware events */
static atomic_t cf_diag_events = ATOMIC_INIT(0);
+/* Used to avoid races in calling reserve/release_cpumf_hardware */
+static DEFINE_MUTEX(cf_diag_reserve_mutex);
/* Release the PMU if event is the last perf event */
static void cf_diag_perf_event_destroy(struct perf_event *event)
{
debug_sprintf_event(cf_diag_dbg, 5,
"%s event %p cpu %d cf_diag_events %d\n",
- __func__, event, event->cpu,
+ __func__, event, smp_processor_id(),
atomic_read(&cf_diag_events));
if (atomic_dec_return(&cf_diag_events) == 0)
__kernel_cpumcf_end();
}
+static int get_authctrsets(void)
+{
+ struct cpu_cf_events *cpuhw;
+ unsigned long auth = 0;
+ enum cpumf_ctr_set i;
+
+ cpuhw = &get_cpu_var(cpu_cf_events);
+ for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) {
+ if (cpuhw->info.auth_ctl & cpumf_ctr_ctl[i])
+ auth |= cpumf_ctr_ctl[i];
+ }
+ put_cpu_var(cpu_cf_events);
+ return auth;
+}
+
/* Setup the event. Test for authorized counter sets and only include counter
* sets which are authorized at the time of the setup. Including unauthorized
* counter sets result in specification exception (and panic).
@@ -197,15 +221,12 @@ static void cf_diag_perf_event_destroy(struct perf_event *event)
static int __hw_perf_event_init(struct perf_event *event)
{
struct perf_event_attr *attr = &event->attr;
- struct cpu_cf_events *cpuhw;
- enum cpumf_ctr_set i;
int err = 0;
debug_sprintf_event(cf_diag_dbg, 5, "%s event %p cpu %d\n", __func__,
event, event->cpu);
event->hw.config = attr->config;
- event->hw.config_base = 0;
/* Add all authorized counter sets to config_base. The
* the hardware init function is either called per-cpu or just once
@@ -215,11 +236,7 @@ static int __hw_perf_event_init(struct perf_event *event)
* Checking the authorization on any CPU is fine as the hardware
* applies the same authorization settings to all CPUs.
*/
- cpuhw = &get_cpu_var(cpu_cf_events);
- for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i)
- if (cpuhw->info.auth_ctl & cpumf_ctr_ctl[i])
- event->hw.config_base |= cpumf_ctr_ctl[i];
- put_cpu_var(cpu_cf_events);
+ event->hw.config_base = get_authctrsets();
/* No authorized counter sets, nothing to count/sample */
if (!event->hw.config_base) {
@@ -237,6 +254,25 @@ out:
return err;
}
+/* Return 0 if the CPU-measurement counter facility is currently free
+ * and an error otherwise.
+ */
+static int cf_diag_perf_event_inuse(void)
+{
+ int err = 0;
+
+ if (!atomic_inc_not_zero(&cf_diag_events)) {
+ mutex_lock(&cf_diag_reserve_mutex);
+ if (atomic_read(&cf_diag_events) == 0 &&
+ __kernel_cpumcf_begin())
+ err = -EBUSY;
+ else
+ err = atomic_inc_return(&cf_diag_events);
+ mutex_unlock(&cf_diag_reserve_mutex);
+ }
+ return err;
+}
+
static int cf_diag_event_init(struct perf_event *event)
{
struct perf_event_attr *attr = &event->attr;
@@ -264,13 +300,9 @@ static int cf_diag_event_init(struct perf_event *event)
}
/* Initialize for using the CPU-measurement counter facility */
- if (atomic_inc_return(&cf_diag_events) == 1) {
- if (__kernel_cpumcf_begin()) {
- atomic_dec(&cf_diag_events);
- err = -EBUSY;
- goto out;
- }
- }
+ err = cf_diag_perf_event_inuse();
+ if (err < 0)
+ goto out;
event->destroy = cf_diag_perf_event_destroy;
err = __hw_perf_event_init(event);
@@ -599,6 +631,8 @@ static void cf_diag_del(struct perf_event *event, int flags)
cpuhw->flags &= ~PMU_F_IN_USE;
}
+/* Default counter set events and format attribute groups */
+
CPUMF_EVENT_ATTR(CF_DIAG, CF_DIAG, PERF_EVENT_CPUM_CF_DIAG);
static struct attribute *cf_diag_events_attr[] = {
@@ -663,6 +697,452 @@ static void cf_diag_get_cpu_speed(void)
}
}
+/* Code to create device and file I/O operations */
+static atomic_t ctrset_opencnt = ATOMIC_INIT(0); /* Excl. access */
+
+static int cf_diag_open(struct inode *inode, struct file *file)
+{
+ int err = 0;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ if (atomic_xchg(&ctrset_opencnt, 1))
+ return -EBUSY;
+
+ /* Avoid concurrent access with perf_event_open() system call */
+ mutex_lock(&cf_diag_reserve_mutex);
+ if (atomic_read(&cf_diag_events) || __kernel_cpumcf_begin())
+ err = -EBUSY;
+ mutex_unlock(&cf_diag_reserve_mutex);
+ if (err) {
+ atomic_set(&ctrset_opencnt, 0);
+ return err;
+ }
+ file->private_data = NULL;
+ debug_sprintf_event(cf_diag_dbg, 2, "%s\n", __func__);
+ /* nonseekable_open() never fails */
+ return nonseekable_open(inode, file);
+}
+
+/* Variables for ioctl() interface support */
+static DEFINE_MUTEX(cf_diag_ctrset_mutex);
+static struct cf_diag_ctrset {
+ unsigned long ctrset; /* Bit mask of counter set to read */
+ cpumask_t mask; /* CPU mask to read from */
+ time64_t lastread; /* Epoch counter set last read */
+} cf_diag_ctrset;
+
+static void cf_diag_ctrset_clear(void)
+{
+ cpumask_clear(&cf_diag_ctrset.mask);
+ cf_diag_ctrset.ctrset = 0;
+}
+
+static void cf_diag_release_cpu(void *p)
+{
+ struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
+
+ debug_sprintf_event(cf_diag_dbg, 3, "%s cpu %d\n", __func__,
+ smp_processor_id());
+ lcctl(0); /* Reset counter sets */
+ cpuhw->state = 0; /* Save state in CPU hardware state */
+}
+
+/* Release function is also called when application gets terminated without
+ * doing a proper ioctl(..., S390_HWCTR_STOP, ...) command.
+ * Since only one application is allowed to open the device, simple stop all
+ * CPU counter sets.
+ */
+static int cf_diag_release(struct inode *inode, struct file *file)
+{
+ on_each_cpu(cf_diag_release_cpu, NULL, 1);
+ cf_diag_ctrset_clear();
+ atomic_set(&ctrset_opencnt, 0);
+ __kernel_cpumcf_end();
+ debug_sprintf_event(cf_diag_dbg, 2, "%s\n", __func__);
+ return 0;
+}
+
+struct cf_diag_call_on_cpu_parm { /* Parm struct for smp_call_on_cpu */
+ unsigned int sets; /* Counter set bit mask */
+ atomic_t cpus_ack; /* # CPUs successfully executed func */
+};
+
+static int cf_diag_all_copy(unsigned long arg, cpumask_t *mask)
+{
+ struct s390_ctrset_read __user *ctrset_read;
+ unsigned int cpu, cpus, rc;
+ void __user *uptr;
+
+ ctrset_read = (struct s390_ctrset_read __user *)arg;
+ uptr = ctrset_read->data;
+ for_each_cpu(cpu, mask) {
+ struct cf_diag_csd *csd = per_cpu_ptr(&cf_diag_csd, cpu);
+ struct s390_ctrset_cpudata __user *ctrset_cpudata;
+
+ ctrset_cpudata = uptr;
+ debug_sprintf_event(cf_diag_dbg, 5, "%s cpu %d used %zd\n",
+ __func__, cpu, csd->used);
+ rc = put_user(cpu, &ctrset_cpudata->cpu_nr);
+ rc |= put_user(csd->sets, &ctrset_cpudata->no_sets);
+ rc |= copy_to_user(ctrset_cpudata->data, csd->data, csd->used);
+ if (rc)
+ return -EFAULT;
+ uptr += sizeof(struct s390_ctrset_cpudata) + csd->used;
+ cond_resched();
+ }
+ cpus = cpumask_weight(mask);
+ if (put_user(cpus, &ctrset_read->no_cpus))
+ return -EFAULT;
+ debug_sprintf_event(cf_diag_dbg, 5, "%s copied %ld\n",
+ __func__, uptr - (void __user *)ctrset_read->data);
+ return 0;
+}
+
+static size_t cf_diag_cpuset_read(struct s390_ctrset_setdata *p, int ctrset,
+ int ctrset_size, size_t room)
+{
+ size_t need = 0;
+ int rc = -1;
+
+ need = sizeof(*p) + sizeof(u64) * ctrset_size;
+ debug_sprintf_event(cf_diag_dbg, 5,
+ "%s room %zd need %zd set %#x set_size %d\n",
+ __func__, room, need, ctrset, ctrset_size);
+ if (need <= room) {
+ p->set = cpumf_ctr_ctl[ctrset];
+ p->no_cnts = ctrset_size;
+ rc = ctr_stcctm(ctrset, ctrset_size, (u64 *)p->cv);
+ if (rc == 3) /* Nothing stored */
+ need = 0;
+ }
+ debug_sprintf_event(cf_diag_dbg, 5, "%s need %zd rc %d\n", __func__,
+ need, rc);
+ return need;
+}
+
+/* Read all counter sets. Since the perf_event_open() system call with
+ * event cpum_cf_diag/.../ is blocked when this interface is active, reuse
+ * the perf_event_open() data buffer to store the counter sets.
+ */
+static void cf_diag_cpu_read(void *parm)
+{
+ struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
+ struct cf_diag_csd *csd = this_cpu_ptr(&cf_diag_csd);
+ struct cf_diag_call_on_cpu_parm *p = parm;
+ int set, set_size;
+ size_t space;
+
+ debug_sprintf_event(cf_diag_dbg, 5,
+ "%s new %#x flags %#x state %#llx\n",
+ __func__, p->sets, cpuhw->flags,
+ cpuhw->state);
+ /* No data saved yet */
+ csd->used = 0;
+ csd->sets = 0;
+ memset(csd->data, 0, sizeof(csd->data));
+
+ /* Scan the counter sets */
+ for (set = CPUMF_CTR_SET_BASIC; set < CPUMF_CTR_SET_MAX; ++set) {
+ struct s390_ctrset_setdata *sp = (void *)csd->data + csd->used;
+
+ if (!(p->sets & cpumf_ctr_ctl[set]))
+ continue; /* Counter set not in list */
+ set_size = cf_diag_ctrset_size(set, &cpuhw->info);
+ space = sizeof(csd->data) - csd->used;
+ space = cf_diag_cpuset_read(sp, set, set_size, space);
+ if (space) {
+ csd->used += space;
+ csd->sets += 1;
+ }
+ debug_sprintf_event(cf_diag_dbg, 5, "%s sp %px space %zd\n",
+ __func__, sp, space);
+ }
+ debug_sprintf_event(cf_diag_dbg, 5, "%s sets %d used %zd\n", __func__,
+ csd->sets, csd->used);
+}
+
+static int cf_diag_all_read(unsigned long arg)
+{
+ struct cf_diag_call_on_cpu_parm p;
+ cpumask_var_t mask;
+ time64_t now;
+ int rc = 0;
+
+ debug_sprintf_event(cf_diag_dbg, 5, "%s\n", __func__);
+ if (!alloc_cpumask_var(&mask, GFP_KERNEL))
+ return -ENOMEM;
+ now = ktime_get_seconds();
+ if (cf_diag_ctrset.lastread + cf_diag_interval > now) {
+ debug_sprintf_event(cf_diag_dbg, 5, "%s now %lld "
+ " lastread %lld\n", __func__, now,
+ cf_diag_ctrset.lastread);
+ rc = -EAGAIN;
+ goto out;
+ } else {
+ cf_diag_ctrset.lastread = now;
+ }
+ p.sets = cf_diag_ctrset.ctrset;
+ cpumask_and(mask, &cf_diag_ctrset.mask, cpu_online_mask);
+ on_each_cpu_mask(mask, cf_diag_cpu_read, &p, 1);
+ rc = cf_diag_all_copy(arg, mask);
+out:
+ free_cpumask_var(mask);
+ debug_sprintf_event(cf_diag_dbg, 5, "%s rc %d\n", __func__, rc);
+ return rc;
+}
+
+/* Stop all counter sets via ioctl interface */
+static void cf_diag_ioctl_off(void *parm)
+{
+ struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
+ struct cf_diag_call_on_cpu_parm *p = parm;
+ int rc;
+
+ debug_sprintf_event(cf_diag_dbg, 5,
+ "%s new %#x flags %#x state %#llx\n",
+ __func__, p->sets, cpuhw->flags,
+ cpuhw->state);
+
+ ctr_set_multiple_disable(&cpuhw->state, p->sets);
+ ctr_set_multiple_stop(&cpuhw->state, p->sets);
+ rc = lcctl(cpuhw->state); /* Stop counter sets */
+ if (!cpuhw->state)
+ cpuhw->flags &= ~PMU_F_IN_USE;
+ debug_sprintf_event(cf_diag_dbg, 5,
+ "%s rc %d flags %#x state %#llx\n", __func__,
+ rc, cpuhw->flags, cpuhw->state);
+}
+
+/* Start counter sets on particular CPU */
+static void cf_diag_ioctl_on(void *parm)
+{
+ struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
+ struct cf_diag_call_on_cpu_parm *p = parm;
+ int rc;
+
+ debug_sprintf_event(cf_diag_dbg, 5,
+ "%s new %#x flags %#x state %#llx\n",
+ __func__, p->sets, cpuhw->flags,
+ cpuhw->state);
+
+ if (!(cpuhw->flags & PMU_F_IN_USE))
+ cpuhw->state = 0;
+ cpuhw->flags |= PMU_F_IN_USE;
+ rc = lcctl(cpuhw->state); /* Reset unused counter sets */
+ ctr_set_multiple_enable(&cpuhw->state, p->sets);
+ ctr_set_multiple_start(&cpuhw->state, p->sets);
+ rc |= lcctl(cpuhw->state); /* Start counter sets */
+ if (!rc)
+ atomic_inc(&p->cpus_ack);
+ debug_sprintf_event(cf_diag_dbg, 5, "%s rc %d state %#llx\n",
+ __func__, rc, cpuhw->state);
+}
+
+static int cf_diag_all_stop(void)
+{
+ struct cf_diag_call_on_cpu_parm p = {
+ .sets = cf_diag_ctrset.ctrset,
+ };
+ cpumask_var_t mask;
+
+ if (!alloc_cpumask_var(&mask, GFP_KERNEL))
+ return -ENOMEM;
+ cpumask_and(mask, &cf_diag_ctrset.mask, cpu_online_mask);
+ on_each_cpu_mask(mask, cf_diag_ioctl_off, &p, 1);
+ free_cpumask_var(mask);
+ return 0;
+}
+
+static int cf_diag_all_start(void)
+{
+ struct cf_diag_call_on_cpu_parm p = {
+ .sets = cf_diag_ctrset.ctrset,
+ .cpus_ack = ATOMIC_INIT(0),
+ };
+ cpumask_var_t mask;
+ int rc = 0;
+
+ if (!alloc_cpumask_var(&mask, GFP_KERNEL))
+ return -ENOMEM;
+ cpumask_and(mask, &cf_diag_ctrset.mask, cpu_online_mask);
+ on_each_cpu_mask(mask, cf_diag_ioctl_on, &p, 1);
+ if (atomic_read(&p.cpus_ack) != cpumask_weight(mask)) {
+ on_each_cpu_mask(mask, cf_diag_ioctl_off, &p, 1);
+ rc = -EIO;
+ }
+ free_cpumask_var(mask);
+ return rc;
+}
+
+/* Return the maximum required space for all possible CPUs in case one
+ * CPU will be onlined during the START, READ, STOP cycles.
+ * To find out the size of the counter sets, any one CPU will do. They
+ * all have the same counter sets.
+ */
+static size_t cf_diag_needspace(unsigned int sets)
+{
+ struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
+ size_t bytes = 0;
+ int i;
+
+ for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) {
+ if (!(sets & cpumf_ctr_ctl[i]))
+ continue;
+ bytes += cf_diag_ctrset_size(i, &cpuhw->info) * sizeof(u64) +
+ sizeof(((struct s390_ctrset_setdata *)0)->set) +
+ sizeof(((struct s390_ctrset_setdata *)0)->no_cnts);
+ }
+ bytes = sizeof(((struct s390_ctrset_read *)0)->no_cpus) + nr_cpu_ids *
+ (bytes + sizeof(((struct s390_ctrset_cpudata *)0)->cpu_nr) +
+ sizeof(((struct s390_ctrset_cpudata *)0)->no_sets));
+ debug_sprintf_event(cf_diag_dbg, 5, "%s bytes %ld\n", __func__,
+ bytes);
+ return bytes;
+}
+
+static long cf_diag_ioctl_read(unsigned long arg)
+{
+ struct s390_ctrset_read read;
+ int ret = 0;
+
+ debug_sprintf_event(cf_diag_dbg, 5, "%s\n", __func__);
+ if (copy_from_user(&read, (char __user *)arg, sizeof(read)))
+ return -EFAULT;
+ ret = cf_diag_all_read(arg);
+ debug_sprintf_event(cf_diag_dbg, 5, "%s ret %d\n", __func__, ret);
+ return ret;
+}
+
+static long cf_diag_ioctl_stop(void)
+{
+ int ret;
+
+ debug_sprintf_event(cf_diag_dbg, 5, "%s\n", __func__);
+ ret = cf_diag_all_stop();
+ cf_diag_ctrset_clear();
+ debug_sprintf_event(cf_diag_dbg, 5, "%s ret %d\n", __func__, ret);
+ return ret;
+}
+
+static long cf_diag_ioctl_start(unsigned long arg)
+{
+ struct s390_ctrset_start __user *ustart;
+ struct s390_ctrset_start start;
+ void __user *umask;
+ unsigned int len;
+ int ret = 0;
+ size_t need;
+
+ if (cf_diag_ctrset.ctrset)
+ return -EBUSY;
+ ustart = (struct s390_ctrset_start __user *)arg;
+ if (copy_from_user(&start, ustart, sizeof(start)))
+ return -EFAULT;
+ if (start.version != S390_HWCTR_START_VERSION)
+ return -EINVAL;
+ if (start.counter_sets & ~(cpumf_ctr_ctl[CPUMF_CTR_SET_BASIC] |
+ cpumf_ctr_ctl[CPUMF_CTR_SET_USER] |
+ cpumf_ctr_ctl[CPUMF_CTR_SET_CRYPTO] |
+ cpumf_ctr_ctl[CPUMF_CTR_SET_EXT] |
+ cpumf_ctr_ctl[CPUMF_CTR_SET_MT_DIAG]))
+ return -EINVAL; /* Invalid counter set */
+ if (!start.counter_sets)
+ return -EINVAL; /* No counter set at all? */
+ cpumask_clear(&cf_diag_ctrset.mask);
+ len = min_t(u64, start.cpumask_len, cpumask_size());
+ umask = (void __user *)start.cpumask;
+ if (copy_from_user(&cf_diag_ctrset.mask, umask, len))
+ return -EFAULT;
+ if (cpumask_empty(&cf_diag_ctrset.mask))
+ return -EINVAL;
+ need = cf_diag_needspace(start.counter_sets);
+ if (put_user(need, &ustart->data_bytes))
+ ret = -EFAULT;
+ if (ret)
+ goto out;
+ cf_diag_ctrset.ctrset = start.counter_sets;
+ ret = cf_diag_all_start();
+out:
+ if (ret)
+ cf_diag_ctrset_clear();
+ debug_sprintf_event(cf_diag_dbg, 2, "%s sets %#lx need %ld ret %d\n",
+ __func__, cf_diag_ctrset.ctrset, need, ret);
+ return ret;
+}
+
+static long cf_diag_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret;
+
+ debug_sprintf_event(cf_diag_dbg, 2, "%s cmd %#x arg %lx\n", __func__,
+ cmd, arg);
+ get_online_cpus();
+ mutex_lock(&cf_diag_ctrset_mutex);
+ switch (cmd) {
+ case S390_HWCTR_START:
+ ret = cf_diag_ioctl_start(arg);
+ break;
+ case S390_HWCTR_STOP:
+ ret = cf_diag_ioctl_stop();
+ break;
+ case S390_HWCTR_READ:
+ ret = cf_diag_ioctl_read(arg);
+ break;
+ default:
+ ret = -ENOTTY;
+ break;
+ }
+ mutex_unlock(&cf_diag_ctrset_mutex);
+ put_online_cpus();
+ debug_sprintf_event(cf_diag_dbg, 2, "%s ret %d\n", __func__, ret);
+ return ret;
+}
+
+static const struct file_operations cf_diag_fops = {
+ .owner = THIS_MODULE,
+ .open = cf_diag_open,
+ .release = cf_diag_release,
+ .unlocked_ioctl = cf_diag_ioctl,
+ .compat_ioctl = cf_diag_ioctl,
+ .llseek = no_llseek
+};
+
+static struct miscdevice cf_diag_dev = {
+ .name = S390_HWCTR_DEVICE,
+ .minor = MISC_DYNAMIC_MINOR,
+ .fops = &cf_diag_fops,
+};
+
+static int cf_diag_online_cpu(unsigned int cpu)
+{
+ struct cf_diag_call_on_cpu_parm p;
+
+ mutex_lock(&cf_diag_ctrset_mutex);
+ if (!cf_diag_ctrset.ctrset)
+ goto out;
+ p.sets = cf_diag_ctrset.ctrset;
+ cf_diag_ioctl_on(&p);
+out:
+ mutex_unlock(&cf_diag_ctrset_mutex);
+ return 0;
+}
+
+static int cf_diag_offline_cpu(unsigned int cpu)
+{
+ struct cf_diag_call_on_cpu_parm p;
+
+ mutex_lock(&cf_diag_ctrset_mutex);
+ if (!cf_diag_ctrset.ctrset)
+ goto out;
+ p.sets = cf_diag_ctrset.ctrset;
+ cf_diag_ioctl_off(&p);
+out:
+ mutex_unlock(&cf_diag_ctrset_mutex);
+ return 0;
+}
+
/* Initialize the counter set PMU to generate complete counter set data as
* event raw data. This relies on the CPU Measurement Counter Facility device
* already being loaded and initialized.
@@ -685,21 +1165,43 @@ static int __init cf_diag_init(void)
return -ENOMEM;
}
+ rc = misc_register(&cf_diag_dev);
+ if (rc) {
+ pr_err("Registration of /dev/" S390_HWCTR_DEVICE
+ "failed rc=%d\n", rc);
+ goto out;
+ }
+
/* Setup s390dbf facility */
cf_diag_dbg = debug_register(KMSG_COMPONENT, 2, 1, 128);
if (!cf_diag_dbg) {
pr_err("Registration of s390dbf(cpum_cf_diag) failed\n");
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto out_dbf;
}
debug_register_view(cf_diag_dbg, &debug_sprintf_view);
rc = perf_pmu_register(&cf_diag, "cpum_cf_diag", -1);
if (rc) {
- debug_unregister_view(cf_diag_dbg, &debug_sprintf_view);
- debug_unregister(cf_diag_dbg);
pr_err("Registration of PMU(cpum_cf_diag) failed with rc=%i\n",
rc);
+ goto out_perf;
}
+ rc = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_S390_CFD_ONLINE,
+ "perf/s390/cfd:online",
+ cf_diag_online_cpu, cf_diag_offline_cpu);
+ if (!rc)
+ goto out;
+
+ pr_err("Registration of CPUHP_AP_PERF_S390_CFD_ONLINE failed rc=%i\n",
+ rc);
+ perf_pmu_unregister(&cf_diag);
+out_perf:
+ debug_unregister_view(cf_diag_dbg, &debug_sprintf_view);
+ debug_unregister(cf_diag_dbg);
+out_dbf:
+ misc_deregister(&cf_diag_dev);
+out:
return rc;
}
-arch_initcall(cf_diag_init);
+device_initcall(cf_diag_init);
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index 19cd7b961c45..db62def4ef28 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -1682,7 +1682,7 @@ static void aux_sdb_init(unsigned long sdb)
/* Save clock base */
te->clock_base = 1;
- memcpy(&te->progusage2, &tod_clock_base[1], 8);
+ te->progusage2 = tod_clock_base.tod;
}
/*
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index bc3ca54edfb4..e20bed1ed34a 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -29,6 +29,7 @@
#include <linux/random.h>
#include <linux/export.h>
#include <linux/init_task.h>
+#include <linux/entry-common.h>
#include <asm/cpu_mf.h>
#include <asm/io.h>
#include <asm/processor.h>
@@ -43,9 +44,22 @@
#include <asm/unwind.h>
#include "entry.h"
-asmlinkage void ret_from_fork(void) asm ("ret_from_fork");
+void ret_from_fork(void) asm("ret_from_fork");
-extern void kernel_thread_starter(void);
+void __ret_from_fork(struct task_struct *prev, struct pt_regs *regs)
+{
+ void (*func)(void *arg);
+
+ schedule_tail(prev);
+
+ if (!user_mode(regs)) {
+ /* Kernel thread */
+ func = (void *)regs->gprs[9];
+ func((void *)regs->gprs[10]);
+ }
+ clear_pt_regs_flag(regs, PIF_SYSCALL);
+ syscall_exit_to_user_mode(regs);
+}
void flush_thread(void)
{
@@ -108,22 +122,24 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
p->thread.last_break = 1;
frame->sf.back_chain = 0;
+ frame->sf.gprs[5] = (unsigned long)frame + sizeof(struct stack_frame);
+ frame->sf.gprs[6] = (unsigned long)p;
/* new return point is ret_from_fork */
- frame->sf.gprs[8] = (unsigned long) ret_from_fork;
+ frame->sf.gprs[8] = (unsigned long)ret_from_fork;
/* fake return stack for resume(), don't go back to schedule */
- frame->sf.gprs[9] = (unsigned long) frame;
+ frame->sf.gprs[9] = (unsigned long)frame;
/* Store access registers to kernel stack of new process. */
- if (unlikely(p->flags & PF_KTHREAD)) {
+ if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
/* kernel thread */
memset(&frame->childregs, 0, sizeof(struct pt_regs));
frame->childregs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT |
PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
frame->childregs.psw.addr =
- (unsigned long) kernel_thread_starter;
+ (unsigned long)__ret_from_fork;
frame->childregs.gprs[9] = new_stackp; /* function */
frame->childregs.gprs[10] = arg;
- frame->childregs.gprs[11] = (unsigned long) do_exit;
+ frame->childregs.gprs[11] = (unsigned long)do_exit;
frame->childregs.orig_gpr2 = -1;
return 0;
@@ -153,7 +169,7 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
return 0;
}
-asmlinkage void execve_tail(void)
+void execve_tail(void)
{
current->thread.fpu.fpc = 0;
asm volatile("sfpc %0" : : "d" (0));
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index a76dd27fb2e8..18b3416fd663 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -7,6 +7,7 @@
* Martin Schwidefsky (schwidefsky@de.ibm.com)
*/
+#include "asm/ptrace.h"
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
@@ -37,9 +38,6 @@
#include "compat_ptrace.h"
#endif
-#define CREATE_TRACE_POINTS
-#include <trace/events/syscalls.h>
-
void update_cr_regs(struct task_struct *task)
{
struct pt_regs *regs = task_pt_regs(task);
@@ -140,7 +138,7 @@ void ptrace_disable(struct task_struct *task)
memset(&task->thread.per_user, 0, sizeof(task->thread.per_user));
memset(&task->thread.per_event, 0, sizeof(task->thread.per_event));
clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
- clear_pt_regs_flag(task_pt_regs(task), PIF_PER_TRAP);
+ clear_tsk_thread_flag(task, TIF_PER_TRAP);
task->thread.per_flags = 0;
}
@@ -322,25 +320,6 @@ static inline void __poke_user_per(struct task_struct *child,
child->thread.per_user.end = data;
}
-static void fixup_int_code(struct task_struct *child, addr_t data)
-{
- struct pt_regs *regs = task_pt_regs(child);
- int ilc = regs->int_code >> 16;
- u16 insn;
-
- if (ilc > 6)
- return;
-
- if (ptrace_access_vm(child, regs->psw.addr - (regs->int_code >> 16),
- &insn, sizeof(insn), FOLL_FORCE) != sizeof(insn))
- return;
-
- /* double check that tracee stopped on svc instruction */
- if ((insn >> 8) != 0xa)
- return;
-
- regs->int_code = 0x20000 | (data & 0xffff);
-}
/*
* Write a word to the user area of a process at location addr. This
* operation does have an additional problem compared to peek_user.
@@ -374,10 +353,12 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
}
if (test_pt_regs_flag(regs, PIF_SYSCALL) &&
- addr == offsetof(struct user, regs.gprs[2]))
- fixup_int_code(child, data);
- *(addr_t *)((addr_t) &regs->psw + addr) = data;
+ addr == offsetof(struct user, regs.gprs[2])) {
+ struct pt_regs *regs = task_pt_regs(child);
+ regs->int_code = 0x20000 | (data & 0xffff);
+ }
+ *(addr_t *)((addr_t) &regs->psw + addr) = data;
} else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) {
/*
* access registers are stored in the thread structure
@@ -742,10 +723,12 @@ static int __poke_user_compat(struct task_struct *child,
regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) |
(__u64)(tmp & PSW32_ADDR_AMODE);
} else {
-
if (test_pt_regs_flag(regs, PIF_SYSCALL) &&
- addr == offsetof(struct compat_user, regs.gprs[2]))
- fixup_int_code(child, data);
+ addr == offsetof(struct compat_user, regs.gprs[2])) {
+ struct pt_regs *regs = task_pt_regs(child);
+
+ regs->int_code = 0x20000 | (data & 0xffff);
+ }
/* gpr 0-15 */
*(__u32*)((addr_t) &regs->psw + addr*2 + 4) = tmp;
}
@@ -862,82 +845,6 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
}
#endif
-asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
-{
- unsigned long mask = -1UL;
- long ret = -1;
-
- if (is_compat_task())
- mask = 0xffffffff;
-
- /*
- * The sysc_tracesys code in entry.S stored the system
- * call number to gprs[2].
- */
- if (test_thread_flag(TIF_SYSCALL_TRACE) &&
- tracehook_report_syscall_entry(regs)) {
- /*
- * Tracing decided this syscall should not happen. Skip
- * the system call and the system call restart handling.
- */
- goto skip;
- }
-
-#ifdef CONFIG_SECCOMP
- /* Do the secure computing check after ptrace. */
- if (unlikely(test_thread_flag(TIF_SECCOMP))) {
- struct seccomp_data sd;
-
- if (is_compat_task()) {
- sd.instruction_pointer = regs->psw.addr & 0x7fffffff;
- sd.arch = AUDIT_ARCH_S390;
- } else {
- sd.instruction_pointer = regs->psw.addr;
- sd.arch = AUDIT_ARCH_S390X;
- }
-
- sd.nr = regs->int_code & 0xffff;
- sd.args[0] = regs->orig_gpr2 & mask;
- sd.args[1] = regs->gprs[3] & mask;
- sd.args[2] = regs->gprs[4] & mask;
- sd.args[3] = regs->gprs[5] & mask;
- sd.args[4] = regs->gprs[6] & mask;
- sd.args[5] = regs->gprs[7] & mask;
-
- if (__secure_computing(&sd) == -1)
- goto skip;
- }
-#endif /* CONFIG_SECCOMP */
-
- if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
- trace_sys_enter(regs, regs->int_code & 0xffff);
-
-
- audit_syscall_entry(regs->int_code & 0xffff, regs->orig_gpr2 & mask,
- regs->gprs[3] &mask, regs->gprs[4] &mask,
- regs->gprs[5] &mask);
-
- if ((signed long)regs->gprs[2] >= NR_syscalls) {
- regs->gprs[2] = -ENOSYS;
- ret = -ENOSYS;
- }
- return regs->gprs[2];
-skip:
- clear_pt_regs_flag(regs, PIF_SYSCALL);
- return ret;
-}
-
-asmlinkage void do_syscall_trace_exit(struct pt_regs *regs)
-{
- audit_syscall_exit(regs);
-
- if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
- trace_sys_exit(regs, regs->gprs[2]);
-
- if (test_thread_flag(TIF_SYSCALL_TRACE))
- tracehook_report_syscall_exit(regs, 0);
-}
-
/*
* user_regset definitions.
*/
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 1fbed91c73bc..60da976eee6f 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -338,7 +338,7 @@ int __init arch_early_irq_init(void)
return 0;
}
-static int __init async_stack_realloc(void)
+static int __init stack_realloc(void)
{
unsigned long old, new;
@@ -346,11 +346,18 @@ static int __init async_stack_realloc(void)
new = stack_alloc();
if (!new)
panic("Couldn't allocate async stack");
- S390_lowcore.async_stack = new + STACK_INIT_OFFSET;
+ WRITE_ONCE(S390_lowcore.async_stack, new + STACK_INIT_OFFSET);
free_pages(old, THREAD_SIZE_ORDER);
+
+ old = S390_lowcore.mcck_stack - STACK_INIT_OFFSET;
+ new = stack_alloc();
+ if (!new)
+ panic("Couldn't allocate machine check stack");
+ WRITE_ONCE(S390_lowcore.mcck_stack, new + STACK_INIT_OFFSET);
+ memblock_free(old, THREAD_SIZE);
return 0;
}
-early_initcall(async_stack_realloc);
+early_initcall(stack_realloc);
void __init arch_call_rest_init(void)
{
@@ -372,6 +379,7 @@ void __init arch_call_rest_init(void)
static void __init setup_lowcore_dat_off(void)
{
unsigned long int_psw_mask = PSW_KERNEL_BITS;
+ unsigned long mcck_stack;
struct lowcore *lc;
if (IS_ENABLED(CONFIG_KASAN))
@@ -411,8 +419,7 @@ static void __init setup_lowcore_dat_off(void)
memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list,
sizeof(lc->alt_stfle_fac_list));
nmi_alloc_boot_cpu(lc);
- lc->sync_enter_timer = S390_lowcore.sync_enter_timer;
- lc->async_enter_timer = S390_lowcore.async_enter_timer;
+ lc->sys_enter_timer = S390_lowcore.sys_enter_timer;
lc->exit_timer = S390_lowcore.exit_timer;
lc->user_timer = S390_lowcore.user_timer;
lc->system_timer = S390_lowcore.system_timer;
@@ -440,6 +447,12 @@ static void __init setup_lowcore_dat_off(void)
lc->restart_data = 0;
lc->restart_source = -1UL;
+ mcck_stack = (unsigned long)memblock_alloc(THREAD_SIZE, THREAD_SIZE);
+ if (!mcck_stack)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, THREAD_SIZE, THREAD_SIZE);
+ lc->mcck_stack = mcck_stack + STACK_INIT_OFFSET;
+
/* Setup absolute zero lowcore */
mem_assign_absolute(S390_lowcore.restart_stack, lc->restart_stack);
mem_assign_absolute(S390_lowcore.restart_fn, lc->restart_fn);
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index b27b6c1f058d..90163e6184f5 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -16,6 +16,7 @@
#include <linux/smp.h>
#include <linux/kernel.h>
#include <linux/signal.h>
+#include <linux/entry-common.h>
#include <linux/errno.h>
#include <linux/wait.h>
#include <linux/ptrace.h>
@@ -170,6 +171,7 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
fpregs_load(&user_sregs.fpregs, &current->thread.fpu);
clear_pt_regs_flag(regs, PIF_SYSCALL); /* No longer in a system call */
+ clear_pt_regs_flag(regs, PIF_SYSCALL_RESTART);
return 0;
}
@@ -459,7 +461,8 @@ static void handle_signal(struct ksignal *ksig, sigset_t *oldset,
* the kernel can handle, and then we build all the user-level signal handling
* stack-frames in one go after that.
*/
-void do_signal(struct pt_regs *regs)
+
+void arch_do_signal_or_restart(struct pt_regs *regs, bool has_signal)
{
struct ksignal ksig;
sigset_t *oldset = sigmask_to_save();
@@ -472,7 +475,7 @@ void do_signal(struct pt_regs *regs)
current->thread.system_call =
test_pt_regs_flag(regs, PIF_SYSCALL) ? regs->int_code : 0;
- if (test_thread_flag(TIF_SIGPENDING) && get_signal(&ksig)) {
+ if (has_signal && get_signal(&ksig)) {
/* Whee! Actually deliver the signal. */
if (current->thread.system_call) {
regs->int_code = current->thread.system_call;
@@ -498,6 +501,7 @@ void do_signal(struct pt_regs *regs)
}
/* No longer in a system call */
clear_pt_regs_flag(regs, PIF_SYSCALL);
+ clear_pt_regs_flag(regs, PIF_SYSCALL_RESTART);
rseq_signal_deliver(&ksig, regs);
if (is_compat_task())
handle_signal32(&ksig, oldset, regs);
@@ -508,6 +512,7 @@ void do_signal(struct pt_regs *regs)
/* No handlers present - check for system call restart */
clear_pt_regs_flag(regs, PIF_SYSCALL);
+ clear_pt_regs_flag(regs, PIF_SYSCALL_RESTART);
if (current->thread.system_call) {
regs->int_code = current->thread.system_call;
switch (regs->gprs[2]) {
@@ -520,9 +525,9 @@ void do_signal(struct pt_regs *regs)
case -ERESTARTNOINTR:
/* Restart system call with magic TIF bit. */
regs->gprs[2] = regs->orig_gpr2;
- set_pt_regs_flag(regs, PIF_SYSCALL);
+ set_pt_regs_flag(regs, PIF_SYSCALL_RESTART);
if (test_thread_flag(TIF_SINGLE_STEP))
- clear_pt_regs_flag(regs, PIF_PER_TRAP);
+ clear_thread_flag(TIF_PER_TRAP);
break;
}
}
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 27c763014114..58c8afa3da65 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -30,6 +30,7 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/irqflags.h>
+#include <linux/irq_work.h>
#include <linux/cpu.h>
#include <linux/slab.h>
#include <linux/sched/hotplug.h>
@@ -62,6 +63,7 @@ enum {
ec_call_function_single,
ec_stop_cpu,
ec_mcck_pending,
+ ec_irq_work,
};
enum {
@@ -189,7 +191,7 @@ static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit)
static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
{
- unsigned long async_stack, nodat_stack;
+ unsigned long async_stack, nodat_stack, mcck_stack;
struct lowcore *lc;
if (pcpu != &pcpu_devices[0]) {
@@ -202,13 +204,15 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
nodat_stack = pcpu->lowcore->nodat_stack - STACK_INIT_OFFSET;
}
async_stack = stack_alloc();
- if (!async_stack)
- goto out;
+ mcck_stack = stack_alloc();
+ if (!async_stack || !mcck_stack)
+ goto out_stack;
lc = pcpu->lowcore;
memcpy(lc, &S390_lowcore, 512);
memset((char *) lc + 512, 0, sizeof(*lc) - 512);
lc->async_stack = async_stack + STACK_INIT_OFFSET;
lc->nodat_stack = nodat_stack + STACK_INIT_OFFSET;
+ lc->mcck_stack = mcck_stack + STACK_INIT_OFFSET;
lc->cpu_nr = cpu;
lc->spinlock_lockval = arch_spin_lockval(cpu);
lc->spinlock_index = 0;
@@ -216,12 +220,13 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
if (nmi_alloc_per_cpu(lc))
- goto out_async;
+ goto out_stack;
lowcore_ptr[cpu] = lc;
pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc);
return 0;
-out_async:
+out_stack:
+ stack_free(mcck_stack);
stack_free(async_stack);
out:
if (pcpu != &pcpu_devices[0]) {
@@ -233,16 +238,18 @@ out:
static void pcpu_free_lowcore(struct pcpu *pcpu)
{
- unsigned long async_stack, nodat_stack, lowcore;
+ unsigned long async_stack, nodat_stack, mcck_stack, lowcore;
nodat_stack = pcpu->lowcore->nodat_stack - STACK_INIT_OFFSET;
async_stack = pcpu->lowcore->async_stack - STACK_INIT_OFFSET;
+ mcck_stack = pcpu->lowcore->mcck_stack - STACK_INIT_OFFSET;
lowcore = (unsigned long) pcpu->lowcore;
pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0);
lowcore_ptr[pcpu - pcpu_devices] = NULL;
nmi_free_per_cpu(pcpu->lowcore);
stack_free(async_stack);
+ stack_free(mcck_stack);
if (pcpu == &pcpu_devices[0])
return;
free_pages(nodat_stack, THREAD_SIZE_ORDER);
@@ -429,10 +436,12 @@ void notrace smp_yield_cpu(int cpu)
*/
void notrace smp_emergency_stop(void)
{
- cpumask_t cpumask;
+ static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED;
+ static cpumask_t cpumask;
u64 end;
int cpu;
+ arch_spin_lock(&lock);
cpumask_copy(&cpumask, cpu_online_mask);
cpumask_clear_cpu(smp_processor_id(), &cpumask);
@@ -453,6 +462,7 @@ void notrace smp_emergency_stop(void)
break;
cpu_relax();
}
+ arch_spin_unlock(&lock);
}
NOKPROBE_SYMBOL(smp_emergency_stop);
@@ -499,7 +509,9 @@ static void smp_handle_ext_call(void)
if (test_bit(ec_call_function_single, &bits))
generic_smp_call_function_single_interrupt();
if (test_bit(ec_mcck_pending, &bits))
- s390_handle_mcck();
+ __s390_handle_mcck();
+ if (test_bit(ec_irq_work, &bits))
+ irq_work_run();
}
static void do_ext_call_interrupt(struct ext_code ext_code,
@@ -532,6 +544,13 @@ void smp_send_reschedule(int cpu)
pcpu_ec_call(pcpu_devices + cpu, ec_schedule);
}
+#ifdef CONFIG_IRQ_WORK
+void arch_irq_work_raise(void)
+{
+ pcpu_ec_call(pcpu_devices + smp_processor_id(), ec_irq_work);
+}
+#endif
+
/*
* parameter area for the set/clear control bit callbacks
*/
@@ -770,11 +789,13 @@ static int smp_add_core(struct sclp_core_entry *core, cpumask_t *avail,
static int __smp_rescan_cpus(struct sclp_core_info *info, bool early)
{
struct sclp_core_entry *core;
- cpumask_t avail;
+ static cpumask_t avail;
bool configured;
u16 core_id;
int nr, i;
+ get_online_cpus();
+ mutex_lock(&smp_cpu_state_mutex);
nr = 0;
cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
/*
@@ -795,6 +816,8 @@ static int __smp_rescan_cpus(struct sclp_core_info *info, bool early)
configured = i < info->configured;
nr += smp_add_core(&info->core[i], &avail, configured, early);
}
+ mutex_unlock(&smp_cpu_state_mutex);
+ put_online_cpus();
return nr;
}
@@ -842,9 +865,7 @@ void __init smp_detect_cpus(void)
pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
/* Add CPUs present at boot */
- get_online_cpus();
__smp_rescan_cpus(info, true);
- put_online_cpus();
memblock_free_early((unsigned long)info, sizeof(*info));
}
@@ -1173,11 +1194,7 @@ int __ref smp_rescan_cpus(void)
if (!info)
return -ENOMEM;
smp_get_core_info(info, 0);
- get_online_cpus();
- mutex_lock(&smp_cpu_state_mutex);
nr = __smp_rescan_cpus(info, false);
- mutex_unlock(&smp_cpu_state_mutex);
- put_online_cpus();
kfree(info);
if (nr)
topology_schedule_update();
diff --git a/arch/s390/kernel/sys_s390.c b/arch/s390/kernel/syscall.c
index 202fa73ac167..bc8e650e377d 100644
--- a/arch/s390/kernel/sys_s390.c
+++ b/arch/s390/kernel/syscall.c
@@ -29,6 +29,13 @@
#include <linux/unistd.h>
#include <linux/ipc.h>
#include <linux/uaccess.h>
+#include <linux/string.h>
+#include <linux/thread_info.h>
+#include <linux/entry-common.h>
+
+#include <asm/ptrace.h>
+#include <asm/vtime.h>
+
#include "entry.h"
/*
@@ -100,3 +107,62 @@ SYSCALL_DEFINE0(ni_syscall)
{
return -ENOSYS;
}
+
+void do_syscall(struct pt_regs *regs)
+{
+ unsigned long nr;
+
+ nr = regs->int_code & 0xffff;
+ if (!nr) {
+ nr = regs->gprs[1] & 0xffff;
+ regs->int_code &= ~0xffffUL;
+ regs->int_code |= nr;
+ }
+
+ regs->gprs[2] = nr;
+
+ nr = syscall_enter_from_user_mode_work(regs, nr);
+
+ /*
+ * In the s390 ptrace ABI, both the syscall number and the return value
+ * use gpr2. However, userspace puts the syscall number either in the
+ * svc instruction itself, or uses gpr1. To make at least skipping syscalls
+ * work, the ptrace code sets PIF_SYSCALL_RET_SET, which is checked here
+ * and if set, the syscall will be skipped.
+ */
+ if (!test_pt_regs_flag(regs, PIF_SYSCALL_RET_SET)) {
+ regs->gprs[2] = -ENOSYS;
+ if (likely(nr < NR_syscalls))
+ regs->gprs[2] = current->thread.sys_call_table[nr](regs);
+ } else {
+ clear_pt_regs_flag(regs, PIF_SYSCALL_RET_SET);
+ }
+ syscall_exit_to_user_mode_work(regs);
+}
+
+void noinstr __do_syscall(struct pt_regs *regs, int per_trap)
+{
+ enter_from_user_mode(regs);
+
+ memcpy(&regs->gprs[8], S390_lowcore.save_area_sync, 8 * sizeof(unsigned long));
+ memcpy(&regs->int_code, &S390_lowcore.svc_ilc, sizeof(regs->int_code));
+ regs->psw = S390_lowcore.svc_old_psw;
+
+ update_timer_sys();
+
+ local_irq_enable();
+ regs->orig_gpr2 = regs->gprs[2];
+
+ if (per_trap)
+ set_thread_flag(TIF_PER_TRAP);
+
+ for (;;) {
+ regs->flags = 0;
+ set_pt_regs_flag(regs, PIF_SYSCALL);
+ do_syscall(regs);
+ if (!test_pt_regs_flag(regs, PIF_SYSCALL_RESTART))
+ break;
+ local_irq_enable();
+ }
+ exit_to_user_mode();
+}
diff --git a/arch/s390/kernel/syscalls/syscall.tbl b/arch/s390/kernel/syscalls/syscall.tbl
index d443423495e5..3abef2144dac 100644
--- a/arch/s390/kernel/syscalls/syscall.tbl
+++ b/arch/s390/kernel/syscalls/syscall.tbl
@@ -444,3 +444,4 @@
439 common faccessat2 sys_faccessat2 sys_faccessat2
440 common process_madvise sys_process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2
+442 common mount_setattr sys_mount_setattr sys_mount_setattr
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index c59cb44fbb7d..06bcfa636638 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -55,11 +55,7 @@
#include <asm/cio.h>
#include "entry.h"
-unsigned char tod_clock_base[16] __aligned(8) = {
- /* Force to data section. */
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
-};
+union tod_clock tod_clock_base __section(".data");
EXPORT_SYMBOL_GPL(tod_clock_base);
u64 clock_comparator_max = -1ULL;
@@ -86,7 +82,7 @@ void __init time_early_init(void)
struct ptff_qui qui;
/* Initialize TOD steering parameters */
- tod_steering_end = *(unsigned long long *) &tod_clock_base[1];
+ tod_steering_end = tod_clock_base.tod;
vdso_data->arch_data.tod_steering_end = tod_steering_end;
if (!test_facility(28))
@@ -113,18 +109,13 @@ unsigned long long notrace sched_clock(void)
}
NOKPROBE_SYMBOL(sched_clock);
-static void ext_to_timespec64(unsigned char *clk, struct timespec64 *xt)
+static void ext_to_timespec64(union tod_clock *clk, struct timespec64 *xt)
{
- unsigned long long high, low, rem, sec, nsec;
+ unsigned long rem, sec, nsec;
- /* Split extendnd TOD clock to micro-seconds and sub-micro-seconds */
- high = (*(unsigned long long *) clk) >> 4;
- low = (*(unsigned long long *)&clk[7]) << 4;
- /* Calculate seconds and nano-seconds */
- sec = high;
+ sec = clk->us;
rem = do_div(sec, 1000000);
- nsec = (((low >> 32) + (rem << 32)) * 1000) >> 32;
-
+ nsec = ((clk->sus + (rem << 12)) * 125) >> 9;
xt->tv_sec = sec;
xt->tv_nsec = nsec;
}
@@ -204,30 +195,26 @@ static void stp_reset(void);
void read_persistent_clock64(struct timespec64 *ts)
{
- unsigned char clk[STORE_CLOCK_EXT_SIZE];
- __u64 delta;
+ union tod_clock clk;
+ u64 delta;
delta = initial_leap_seconds + TOD_UNIX_EPOCH;
- get_tod_clock_ext(clk);
- *(__u64 *) &clk[1] -= delta;
- if (*(__u64 *) &clk[1] > delta)
- clk[0]--;
- ext_to_timespec64(clk, ts);
+ store_tod_clock_ext(&clk);
+ clk.eitod -= delta;
+ ext_to_timespec64(&clk, ts);
}
void __init read_persistent_wall_and_boot_offset(struct timespec64 *wall_time,
struct timespec64 *boot_offset)
{
- unsigned char clk[STORE_CLOCK_EXT_SIZE];
struct timespec64 boot_time;
- __u64 delta;
+ union tod_clock clk;
+ u64 delta;
delta = initial_leap_seconds + TOD_UNIX_EPOCH;
- memcpy(clk, tod_clock_base, STORE_CLOCK_EXT_SIZE);
- *(__u64 *)&clk[1] -= delta;
- if (*(__u64 *)&clk[1] > delta)
- clk[0]--;
- ext_to_timespec64(clk, &boot_time);
+ clk = tod_clock_base;
+ clk.eitod -= delta;
+ ext_to_timespec64(&clk, &boot_time);
read_persistent_clock64(wall_time);
*boot_offset = timespec64_sub(*wall_time, boot_time);
@@ -381,10 +368,7 @@ static void clock_sync_global(unsigned long long delta)
struct ptff_qto qto;
/* Fixup the monotonic sched clock. */
- *(unsigned long long *) &tod_clock_base[1] += delta;
- if (*(unsigned long long *) &tod_clock_base[1] < delta)
- /* Epoch overflow */
- tod_clock_base[0]++;
+ tod_clock_base.eitod += delta;
/* Adjust TOD steering parameters. */
now = get_tod_clock();
adj = tod_steering_end - now;
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index ca47141a5be9..e7ce447651b9 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -62,16 +62,16 @@ static struct mask_info drawer_info;
struct cpu_topology_s390 cpu_topology[NR_CPUS];
EXPORT_SYMBOL_GPL(cpu_topology);
-static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
+static void cpu_group_map(cpumask_t *dst, struct mask_info *info, unsigned int cpu)
{
- cpumask_t mask;
+ static cpumask_t mask;
cpumask_copy(&mask, cpumask_of(cpu));
switch (topology_mode) {
case TOPOLOGY_MODE_HW:
while (info) {
if (cpumask_test_cpu(cpu, &info->mask)) {
- mask = info->mask;
+ cpumask_copy(&mask, &info->mask);
break;
}
info = info->next;
@@ -89,23 +89,24 @@ static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
break;
}
cpumask_and(&mask, &mask, cpu_online_mask);
- return mask;
+ cpumask_copy(dst, &mask);
}
-static cpumask_t cpu_thread_map(unsigned int cpu)
+static void cpu_thread_map(cpumask_t *dst, unsigned int cpu)
{
- cpumask_t mask;
+ static cpumask_t mask;
int i;
cpumask_copy(&mask, cpumask_of(cpu));
if (topology_mode != TOPOLOGY_MODE_HW)
- return mask;
+ goto out;
cpu -= cpu % (smp_cpu_mtid + 1);
for (i = 0; i <= smp_cpu_mtid; i++)
if (cpu_present(cpu + i))
cpumask_set_cpu(cpu + i, &mask);
cpumask_and(&mask, &mask, cpu_online_mask);
- return mask;
+out:
+ cpumask_copy(dst, &mask);
}
#define TOPOLOGY_CORE_BITS 64
@@ -250,10 +251,10 @@ void update_cpu_masks(void)
for_each_possible_cpu(cpu) {
topo = &cpu_topology[cpu];
- topo->thread_mask = cpu_thread_map(cpu);
- topo->core_mask = cpu_group_map(&socket_info, cpu);
- topo->book_mask = cpu_group_map(&book_info, cpu);
- topo->drawer_mask = cpu_group_map(&drawer_info, cpu);
+ cpu_thread_map(&topo->thread_mask, cpu);
+ cpu_group_map(&topo->core_mask, &socket_info, cpu);
+ cpu_group_map(&topo->book_mask, &book_info, cpu);
+ cpu_group_map(&topo->drawer_mask, &drawer_info, cpu);
topo->booted_cores = 0;
if (topology_mode != TOPOLOGY_MODE_HW) {
id = topology_mode == TOPOLOGY_MODE_PACKAGE ? 0 : cpu;
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 8d1e8a1a97df..db7dd59b570c 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -13,6 +13,8 @@
* 'Traps.c' handles hardware traps and faults after we have saved some
* state in 'asm.s'.
*/
+#include "asm/irqflags.h"
+#include "asm/ptrace.h"
#include <linux/kprobes.h>
#include <linux/kdebug.h>
#include <linux/extable.h>
@@ -23,7 +25,9 @@
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/cpu.h>
+#include <linux/entry-common.h>
#include <asm/fpu/api.h>
+#include <asm/vtime.h>
#include "entry.h"
static inline void __user *get_trap_ip(struct pt_regs *regs)
@@ -288,3 +292,64 @@ void __init trap_init(void)
local_mcck_enable();
test_monitor_call();
}
+
+void noinstr __do_pgm_check(struct pt_regs *regs)
+{
+ unsigned long last_break = S390_lowcore.breaking_event_addr;
+ unsigned int trapnr, syscall_redirect = 0;
+ irqentry_state_t state;
+
+ regs->int_code = *(u32 *)&S390_lowcore.pgm_ilc;
+ regs->int_parm_long = S390_lowcore.trans_exc_code;
+
+ state = irqentry_enter(regs);
+
+ if (user_mode(regs)) {
+ update_timer_sys();
+ if (last_break < 4096)
+ last_break = 1;
+ current->thread.last_break = last_break;
+ regs->args[0] = last_break;
+ }
+
+ if (S390_lowcore.pgm_code & 0x0200) {
+ /* transaction abort */
+ memcpy(&current->thread.trap_tdb, &S390_lowcore.pgm_tdb, 256);
+ }
+
+ if (S390_lowcore.pgm_code & PGM_INT_CODE_PER) {
+ if (user_mode(regs)) {
+ struct per_event *ev = &current->thread.per_event;
+
+ set_thread_flag(TIF_PER_TRAP);
+ ev->address = S390_lowcore.per_address;
+ ev->cause = *(u16 *)&S390_lowcore.per_code;
+ ev->paid = S390_lowcore.per_access_id;
+ } else {
+ /* PER event in kernel is kprobes */
+ __arch_local_irq_ssm(regs->psw.mask & ~PSW_MASK_PER);
+ do_per_trap(regs);
+ goto out;
+ }
+ }
+
+ if (!irqs_disabled_flags(regs->psw.mask))
+ trace_hardirqs_on();
+ __arch_local_irq_ssm(regs->psw.mask & ~PSW_MASK_PER);
+
+ trapnr = regs->int_code & PGM_INT_CODE_MASK;
+ if (trapnr)
+ pgm_check_table[trapnr](regs);
+ syscall_redirect = user_mode(regs) && test_pt_regs_flag(regs, PIF_SYSCALL);
+out:
+ local_irq_disable();
+ irqentry_exit(regs, state);
+
+ if (syscall_redirect) {
+ enter_from_user_mode(regs);
+ local_irq_enable();
+ regs->orig_gpr2 = regs->gprs[2];
+ do_syscall(regs);
+ exit_to_user_mode();
+ }
+}
diff --git a/arch/s390/kernel/uprobes.c b/arch/s390/kernel/uprobes.c
index 5007fac01bb5..bbf8622bbf5d 100644
--- a/arch/s390/kernel/uprobes.c
+++ b/arch/s390/kernel/uprobes.c
@@ -32,7 +32,7 @@ int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
return -EINVAL;
if (!is_compat_task() && psw_bits(regs->psw).eaba == PSW_BITS_AMODE_31BIT)
return -EINVAL;
- clear_pt_regs_flag(regs, PIF_PER_TRAP);
+ clear_thread_flag(TIF_PER_TRAP);
auprobe->saved_per = psw_bits(regs->psw).per;
auprobe->saved_int_code = regs->int_code;
regs->int_code = UPROBE_TRAP_NR;
@@ -103,7 +103,7 @@ int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
/* fix per address */
current->thread.per_event.address = utask->vaddr;
/* trigger per event */
- set_pt_regs_flag(regs, PIF_PER_TRAP);
+ set_thread_flag(TIF_PER_TRAP);
}
return 0;
}
@@ -259,7 +259,7 @@ static void sim_stor_event(struct pt_regs *regs, void *addr, int len)
return;
current->thread.per_event.address = regs->psw.addr;
current->thread.per_event.cause = PER_EVENT_STORE >> 16;
- set_pt_regs_flag(regs, PIF_PER_TRAP);
+ set_thread_flag(TIF_PER_TRAP);
}
/*
diff --git a/arch/s390/kernel/uv.c b/arch/s390/kernel/uv.c
index 883bfed9f5c2..b2d2ad153067 100644
--- a/arch/s390/kernel/uv.c
+++ b/arch/s390/kernel/uv.c
@@ -368,7 +368,7 @@ static ssize_t uv_query_max_guest_cpus(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
return scnprintf(page, PAGE_SIZE, "%d\n",
- uv_info.max_guest_cpus);
+ uv_info.max_guest_cpu_id + 1);
}
static struct kobj_attribute uv_query_max_guest_cpus_attr =
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index 8bc269c55fd3..8c4e07d533c8 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -6,186 +6,226 @@
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*/
-#include <linux/init.h>
+#include <linux/binfmts.h>
+#include <linux/compat.h>
+#include <linux/elf.h>
#include <linux/errno.h>
-#include <linux/sched.h>
+#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/stddef.h>
-#include <linux/unistd.h>
#include <linux/slab.h>
-#include <linux/user.h>
-#include <linux/elf.h>
-#include <linux/security.h>
-#include <linux/memblock.h>
-#include <linux/compat.h>
-#include <linux/binfmts.h>
+#include <linux/smp.h>
+#include <linux/time_namespace.h>
#include <vdso/datapage.h>
-#include <asm/asm-offsets.h>
-#include <asm/processor.h>
-#include <asm/mmu.h>
-#include <asm/mmu_context.h>
-#include <asm/sections.h>
#include <asm/vdso.h>
-#include <asm/facility.h>
-#include <asm/timex.h>
-extern char vdso64_start, vdso64_end;
-static void *vdso64_kbase = &vdso64_start;
-static unsigned int vdso64_pages;
-static struct page **vdso64_pagelist;
+extern char vdso64_start[], vdso64_end[];
+static unsigned int vdso_pages;
+
+static struct vm_special_mapping vvar_mapping;
+
+static union {
+ struct vdso_data data[CS_BASES];
+ u8 page[PAGE_SIZE];
+} vdso_data_store __page_aligned_data;
+
+struct vdso_data *vdso_data = vdso_data_store.data;
+
+enum vvar_pages {
+ VVAR_DATA_PAGE_OFFSET,
+ VVAR_TIMENS_PAGE_OFFSET,
+ VVAR_NR_PAGES,
+};
-/*
- * Should the kernel map a VDSO page into processes and pass its
- * address down to glibc upon exec()?
- */
unsigned int __read_mostly vdso_enabled = 1;
-static vm_fault_t vdso_fault(const struct vm_special_mapping *sm,
- struct vm_area_struct *vma, struct vm_fault *vmf)
+static int __init vdso_setup(char *str)
+{
+ bool enabled;
+
+ if (!kstrtobool(str, &enabled))
+ vdso_enabled = enabled;
+ return 1;
+}
+__setup("vdso=", vdso_setup);
+
+#ifdef CONFIG_TIME_NS
+struct vdso_data *arch_get_vdso_data(void *vvar_page)
+{
+ return (struct vdso_data *)(vvar_page);
+}
+
+static struct page *find_timens_vvar_page(struct vm_area_struct *vma)
{
- struct page **vdso_pagelist;
- unsigned long vdso_pages;
+ if (likely(vma->vm_mm == current->mm))
+ return current->nsproxy->time_ns->vvar_page;
+ /*
+ * VM_PFNMAP | VM_IO protect .fault() handler from being called
+ * through interfaces like /proc/$pid/mem or
+ * process_vm_{readv,writev}() as long as there's no .access()
+ * in special_mapping_vmops().
+ * For more details check_vma_flags() and __access_remote_vm()
+ */
+ WARN(1, "vvar_page accessed remotely");
+ return NULL;
+}
- vdso_pagelist = vdso64_pagelist;
- vdso_pages = vdso64_pages;
+/*
+ * The VVAR page layout depends on whether a task belongs to the root or
+ * non-root time namespace. Whenever a task changes its namespace, the VVAR
+ * page tables are cleared and then they will be re-faulted with a
+ * corresponding layout.
+ * See also the comment near timens_setup_vdso_data() for details.
+ */
+int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
+{
+ struct mm_struct *mm = task->mm;
+ struct vm_area_struct *vma;
- if (vmf->pgoff >= vdso_pages)
- return VM_FAULT_SIGBUS;
+ mmap_read_lock(mm);
+ for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ unsigned long size = vma->vm_end - vma->vm_start;
- vmf->page = vdso_pagelist[vmf->pgoff];
- get_page(vmf->page);
+ if (!vma_is_special_mapping(vma, &vvar_mapping))
+ continue;
+ zap_page_range(vma, vma->vm_start, size);
+ break;
+ }
+ mmap_read_unlock(mm);
return 0;
}
+#else
+static inline struct page *find_timens_vvar_page(struct vm_area_struct *vma)
+{
+ return NULL;
+}
+#endif
+
+static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
+ struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct page *timens_page = find_timens_vvar_page(vma);
+ unsigned long addr, pfn;
+ vm_fault_t err;
+
+ switch (vmf->pgoff) {
+ case VVAR_DATA_PAGE_OFFSET:
+ pfn = virt_to_pfn(vdso_data);
+ if (timens_page) {
+ /*
+ * Fault in VVAR page too, since it will be accessed
+ * to get clock data anyway.
+ */
+ addr = vmf->address + VVAR_TIMENS_PAGE_OFFSET * PAGE_SIZE;
+ err = vmf_insert_pfn(vma, addr, pfn);
+ if (unlikely(err & VM_FAULT_ERROR))
+ return err;
+ pfn = page_to_pfn(timens_page);
+ }
+ break;
+#ifdef CONFIG_TIME_NS
+ case VVAR_TIMENS_PAGE_OFFSET:
+ /*
+ * If a task belongs to a time namespace then a namespace
+ * specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and
+ * the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET
+ * offset.
+ * See also the comment near timens_setup_vdso_data().
+ */
+ if (!timens_page)
+ return VM_FAULT_SIGBUS;
+ pfn = virt_to_pfn(vdso_data);
+ break;
+#endif /* CONFIG_TIME_NS */
+ default:
+ return VM_FAULT_SIGBUS;
+ }
+ return vmf_insert_pfn(vma, vmf->address, pfn);
+}
static int vdso_mremap(const struct vm_special_mapping *sm,
struct vm_area_struct *vma)
{
current->mm->context.vdso_base = vma->vm_start;
-
return 0;
}
-static const struct vm_special_mapping vdso_mapping = {
+static struct vm_special_mapping vvar_mapping = {
+ .name = "[vvar]",
+ .fault = vvar_fault,
+};
+
+static struct vm_special_mapping vdso_mapping = {
.name = "[vdso]",
- .fault = vdso_fault,
.mremap = vdso_mremap,
};
-static int __init vdso_setup(char *str)
-{
- bool enabled;
-
- if (!kstrtobool(str, &enabled))
- vdso_enabled = enabled;
- return 1;
-}
-__setup("vdso=", vdso_setup);
-
-/*
- * The vdso data page
- */
-static union {
- struct vdso_data data;
- u8 page[PAGE_SIZE];
-} vdso_data_store __page_aligned_data;
-struct vdso_data *vdso_data = (struct vdso_data *)&vdso_data_store.data;
-
-void vdso_getcpu_init(void)
+int vdso_getcpu_init(void)
{
set_tod_programmable_field(smp_processor_id());
+ return 0;
}
+early_initcall(vdso_getcpu_init); /* Must be called before SMP init */
-/*
- * This is called from binfmt_elf, we create the special vma for the
- * vDSO and insert it into the mm struct tree
- */
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
+ unsigned long vdso_text_len, vdso_mapping_len;
+ unsigned long vvar_start, vdso_text_start;
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
- unsigned long vdso_pages;
- unsigned long vdso_base;
int rc;
- if (!vdso_enabled)
- return 0;
-
- if (is_compat_task())
- return 0;
-
- vdso_pages = vdso64_pages;
- /*
- * vDSO has a problem and was disabled, just don't "enable" it for
- * the process
- */
- if (vdso_pages == 0)
+ BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES);
+ if (!vdso_enabled || is_compat_task())
return 0;
-
- /*
- * pick a base address for the vDSO in process space. We try to put
- * it at vdso_base which is the "natural" base for it, but we might
- * fail and end up putting it elsewhere.
- */
if (mmap_write_lock_killable(mm))
return -EINTR;
- vdso_base = get_unmapped_area(NULL, 0, vdso_pages << PAGE_SHIFT, 0, 0);
- if (IS_ERR_VALUE(vdso_base)) {
- rc = vdso_base;
- goto out_up;
- }
-
- /*
- * our vma flags don't have VM_WRITE so by default, the process
- * isn't allowed to write those pages.
- * gdb can break that with ptrace interface, and thus trigger COW
- * on those pages but it's then your responsibility to never do that
- * on the "data" page of the vDSO or you'll stop getting kernel
- * updates and your nice userland gettimeofday will be totally dead.
- * It's fine to use that for setting breakpoints in the vDSO code
- * pages though.
- */
- vma = _install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
+ vdso_text_len = vdso_pages << PAGE_SHIFT;
+ vdso_mapping_len = vdso_text_len + VVAR_NR_PAGES * PAGE_SIZE;
+ vvar_start = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
+ rc = vvar_start;
+ if (IS_ERR_VALUE(vvar_start))
+ goto out;
+ vma = _install_special_mapping(mm, vvar_start, VVAR_NR_PAGES*PAGE_SIZE,
+ VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP|
+ VM_PFNMAP,
+ &vvar_mapping);
+ rc = PTR_ERR(vma);
+ if (IS_ERR(vma))
+ goto out;
+ vdso_text_start = vvar_start + VVAR_NR_PAGES * PAGE_SIZE;
+ /* VM_MAYWRITE for COW so gdb can set breakpoints */
+ vma = _install_special_mapping(mm, vdso_text_start, vdso_text_len,
VM_READ|VM_EXEC|
VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
&vdso_mapping);
if (IS_ERR(vma)) {
+ do_munmap(mm, vvar_start, PAGE_SIZE, NULL);
rc = PTR_ERR(vma);
- goto out_up;
+ } else {
+ current->mm->context.vdso_base = vdso_text_start;
+ rc = 0;
}
-
- current->mm->context.vdso_base = vdso_base;
- rc = 0;
-
-out_up:
+out:
mmap_write_unlock(mm);
return rc;
}
static int __init vdso_init(void)
{
+ struct page **pages;
int i;
- vdso_getcpu_init();
- /* Calculate the size of the 64 bit vDSO */
- vdso64_pages = ((&vdso64_end - &vdso64_start
- + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
-
- /* Make sure pages are in the correct state */
- vdso64_pagelist = kcalloc(vdso64_pages + 1, sizeof(struct page *),
- GFP_KERNEL);
- BUG_ON(vdso64_pagelist == NULL);
- for (i = 0; i < vdso64_pages - 1; i++) {
- struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE);
- get_page(pg);
- vdso64_pagelist[i] = pg;
+ vdso_pages = (vdso64_end - vdso64_start) >> PAGE_SHIFT;
+ pages = kcalloc(vdso_pages + 1, sizeof(struct page *), GFP_KERNEL);
+ if (!pages) {
+ vdso_enabled = 0;
+ return -ENOMEM;
}
- vdso64_pagelist[vdso64_pages - 1] = virt_to_page(vdso_data);
- vdso64_pagelist[vdso64_pages] = NULL;
-
- get_page(virt_to_page(vdso_data));
-
+ for (i = 0; i < vdso_pages; i++)
+ pages[i] = virt_to_page(vdso64_start + i * PAGE_SIZE);
+ pages[vdso_pages] = NULL;
+ vdso_mapping.pages = pages;
return 0;
}
-early_initcall(vdso_init);
+arch_initcall(vdso_init);
diff --git a/arch/s390/kernel/vdso64/getcpu.c b/arch/s390/kernel/vdso64/getcpu.c
index 5b2bc7494d5b..5c5d4a848b76 100644
--- a/arch/s390/kernel/vdso64/getcpu.c
+++ b/arch/s390/kernel/vdso64/getcpu.c
@@ -8,12 +8,12 @@
int __s390_vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *unused)
{
- __u16 todval[8];
+ union tod_clock clk;
/* CPU number is stored in the programmable field of the TOD clock */
- get_tod_clock_ext((char *)todval);
+ store_tod_clock_ext(&clk);
if (cpu)
- *cpu = todval[7];
+ *cpu = clk.pf;
/* NUMA node is always zero */
if (node)
*node = 0;
diff --git a/arch/s390/kernel/vdso64/vdso64.lds.S b/arch/s390/kernel/vdso64/vdso64.lds.S
index 7bde3909290f..518f1ea405f4 100644
--- a/arch/s390/kernel/vdso64/vdso64.lds.S
+++ b/arch/s390/kernel/vdso64/vdso64.lds.S
@@ -13,6 +13,10 @@ ENTRY(_start)
SECTIONS
{
+ PROVIDE(_vdso_data = . - __VVAR_PAGES * PAGE_SIZE);
+#ifdef CONFIG_TIME_NS
+ PROVIDE(_timens_data = _vdso_data + PAGE_SIZE);
+#endif
. = VDSO64_LBASE + SIZEOF_HEADERS;
.hash : { *(.hash) } :text
@@ -94,9 +98,6 @@ SECTIONS
.debug_ranges 0 : { *(.debug_ranges) }
.gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
- . = ALIGN(PAGE_SIZE);
- PROVIDE(_vdso_data = .);
-
/DISCARD/ : {
*(.note.GNU-stack)
*(.branch_lt)
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 5aaa2ca6a928..73c7afcc0527 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -13,7 +13,7 @@
#include <linux/timex.h>
#include <linux/types.h>
#include <linux/time.h>
-
+#include <asm/alternative.h>
#include <asm/vtimer.h>
#include <asm/vtime.h>
#include <asm/cpu_mf.h>
@@ -128,15 +128,13 @@ static int do_account_vtime(struct task_struct *tsk)
timer = S390_lowcore.last_update_timer;
clock = S390_lowcore.last_update_clock;
- asm volatile(
- " stpt %0\n" /* Store current cpu timer value */
-#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
- " stckf %1" /* Store current tod clock value */
-#else
- " stck %1" /* Store current tod clock value */
-#endif
- : "=Q" (S390_lowcore.last_update_timer),
- "=Q" (S390_lowcore.last_update_clock));
+ /* Use STORE CLOCK by default, STORE CLOCK FAST if available. */
+ alternative_io("stpt %0\n .insn s,0xb2050000,%1\n",
+ "stpt %0\n .insn s,0xb27c0000,%1\n",
+ 25,
+ ASM_OUTPUT2("=Q" (S390_lowcore.last_update_timer),
+ "=Q" (S390_lowcore.last_update_clock)),
+ ASM_NO_INPUT_CLOBBER("cc"));
clock = S390_lowcore.last_update_clock - clock;
timer -= S390_lowcore.last_update_timer;
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index dbafd057ca6a..2f09e9d7dc95 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -45,6 +45,7 @@
#include <asm/timex.h>
#include <asm/ap.h>
#include <asm/uv.h>
+#include <asm/fpu/api.h>
#include "kvm-s390.h"
#include "gaccess.h"
@@ -164,12 +165,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ NULL }
};
-struct kvm_s390_tod_clock_ext {
- __u8 epoch_idx;
- __u64 tod;
- __u8 reserved[7];
-} __packed;
-
/* allow nested virtualization in KVM (if enabled by user space) */
static int nested;
module_param(nested, int, S_IRUGO);
@@ -1165,17 +1160,17 @@ static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
static void kvm_s390_get_tod_clock(struct kvm *kvm,
struct kvm_s390_vm_tod_clock *gtod)
{
- struct kvm_s390_tod_clock_ext htod;
+ union tod_clock clk;
preempt_disable();
- get_tod_clock_ext((char *)&htod);
+ store_tod_clock_ext(&clk);
- gtod->tod = htod.tod + kvm->arch.epoch;
+ gtod->tod = clk.tod + kvm->arch.epoch;
gtod->epoch_idx = 0;
if (test_kvm_facility(kvm, 139)) {
- gtod->epoch_idx = htod.epoch_idx + kvm->arch.epdx;
- if (gtod->tod < htod.tod)
+ gtod->epoch_idx = clk.ei + kvm->arch.epdx;
+ if (gtod->tod < clk.tod)
gtod->epoch_idx += 1;
}
@@ -3866,18 +3861,18 @@ void kvm_s390_set_tod_clock(struct kvm *kvm,
const struct kvm_s390_vm_tod_clock *gtod)
{
struct kvm_vcpu *vcpu;
- struct kvm_s390_tod_clock_ext htod;
+ union tod_clock clk;
int i;
mutex_lock(&kvm->lock);
preempt_disable();
- get_tod_clock_ext((char *)&htod);
+ store_tod_clock_ext(&clk);
- kvm->arch.epoch = gtod->tod - htod.tod;
+ kvm->arch.epoch = gtod->tod - clk.tod;
kvm->arch.epdx = 0;
if (test_kvm_facility(kvm, 139)) {
- kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx;
+ kvm->arch.epdx = gtod->epoch_idx - clk.ei;
if (kvm->arch.epoch > gtod->tod)
kvm->arch.epdx -= 1;
}
@@ -4147,6 +4142,8 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
vcpu->run->s.regs.gprs,
sizeof(sie_page->pv_grregs));
}
+ if (test_cpu_flag(CIF_FPU))
+ load_fpu_regs();
exit_reason = sie64a(vcpu->arch.sie_block,
vcpu->run->s.regs.gprs);
if (kvm_s390_pv_cpu_is_protected(vcpu)) {
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
index c5d0a58b2c29..bd803e091918 100644
--- a/arch/s390/kvm/vsie.c
+++ b/arch/s390/kvm/vsie.c
@@ -18,6 +18,7 @@
#include <asm/sclp.h>
#include <asm/nmi.h>
#include <asm/dis.h>
+#include <asm/fpu/api.h>
#include "kvm-s390.h"
#include "gaccess.h"
@@ -1028,6 +1029,8 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
*/
vcpu->arch.sie_block->prog0c |= PROG_IN_SIE;
barrier();
+ if (test_cpu_flag(CIF_FPU))
+ load_fpu_regs();
if (!kvm_s390_vcpu_sie_inhibited(vcpu))
rc = sie64a(scb_s, vcpu->run->s.regs.gprs);
barrier();
diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c
index e8f642446fed..2fece1fd210a 100644
--- a/arch/s390/lib/uaccess.c
+++ b/arch/s390/lib/uaccess.c
@@ -16,8 +16,8 @@
#include <asm/mmu_context.h>
#include <asm/facility.h>
-#ifdef CONFIG_DEBUG_USER_ASCE
-void debug_user_asce(void)
+#ifdef CONFIG_DEBUG_ENTRY
+void debug_user_asce(int exit)
{
unsigned long cr1, cr7;
@@ -25,12 +25,14 @@ void debug_user_asce(void)
__ctl_store(cr7, 7, 7);
if (cr1 == S390_lowcore.kernel_asce && cr7 == S390_lowcore.user_asce)
return;
- panic("incorrect ASCE on kernel exit\n"
+ panic("incorrect ASCE on kernel %s\n"
"cr1: %016lx cr7: %016lx\n"
"kernel: %016llx user: %016llx\n",
- cr1, cr7, S390_lowcore.kernel_asce, S390_lowcore.user_asce);
+ exit ? "exit" : "entry", cr1, cr7,
+ S390_lowcore.kernel_asce, S390_lowcore.user_asce);
+
}
-#endif /*CONFIG_DEBUG_USER_ASCE */
+#endif /*CONFIG_DEBUG_ENTRY */
#ifndef CONFIG_HAVE_MARCH_Z10_FEATURES
static DEFINE_STATIC_KEY_FALSE(have_mvcos);
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index b8210103de14..e30c7c781172 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -385,7 +385,7 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
* The instruction that caused the program check has
* been nullified. Don't signal single step via SIGTRAP.
*/
- clear_pt_regs_flag(regs, PIF_PER_TRAP);
+ clear_thread_flag(TIF_PER_TRAP);
if (kprobe_page_fault(regs, 14))
return 0;
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 73a163065b95..0e76b2127dc6 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -297,6 +297,7 @@ int arch_add_memory(int nid, u64 start, u64 size,
if (WARN_ON_ONCE(params->pgprot.pgprot != PAGE_KERNEL.pgprot))
return -EINVAL;
+ VM_BUG_ON(!mhp_range_allowed(start, size, true));
rc = vmem_add_mapping(start, size);
if (rc)
return rc;
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index 4e87c819ddea..781965f7210e 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -58,7 +58,7 @@ unsigned long *crst_table_alloc(struct mm_struct *mm)
if (!page)
return NULL;
arch_set_page_dat(page, 2);
- return (unsigned long *) page_to_phys(page);
+ return (unsigned long *) page_to_virt(page);
}
void crst_table_free(struct mm_struct *mm, unsigned long *table)
@@ -161,7 +161,7 @@ struct page *page_table_alloc_pgste(struct mm_struct *mm)
page = alloc_page(GFP_KERNEL);
if (page) {
- table = (u64 *)page_to_phys(page);
+ table = (u64 *)page_to_virt(page);
memset64(table, _PAGE_INVALID, PTRS_PER_PTE);
memset64(table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
}
@@ -194,7 +194,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
mask = atomic_read(&page->_refcount) >> 24;
mask = (mask | (mask >> 4)) & 3;
if (mask != 3) {
- table = (unsigned long *) page_to_phys(page);
+ table = (unsigned long *) page_to_virt(page);
bit = mask & 1; /* =1 -> second 2K */
if (bit)
table += PTRS_PER_PTE;
@@ -217,7 +217,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
}
arch_set_page_dat(page, 0);
/* Initialize page table */
- table = (unsigned long *) page_to_phys(page);
+ table = (unsigned long *) page_to_virt(page);
if (mm_alloc_pgste(mm)) {
/* Return 4K page table with PGSTEs */
atomic_xor_bits(&page->_refcount, 3 << 24);
@@ -239,10 +239,10 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
struct page *page;
unsigned int bit, mask;
- page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
+ page = virt_to_page(table);
if (!mm_alloc_pgste(mm)) {
/* Free 2K page table fragment of a 4K page */
- bit = (__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t));
+ bit = ((unsigned long) table & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t));
spin_lock_bh(&mm->context.lock);
mask = atomic_xor_bits(&page->_refcount, 1U << (bit + 24));
mask >>= 24;
@@ -269,14 +269,14 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
unsigned int bit, mask;
mm = tlb->mm;
- page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
+ page = virt_to_page(table);
if (mm_alloc_pgste(mm)) {
gmap_unlink(mm, table, vmaddr);
- table = (unsigned long *) (__pa(table) | 3);
+ table = (unsigned long *) ((unsigned long)table | 3);
tlb_remove_table(tlb, table);
return;
}
- bit = (__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t));
+ bit = ((unsigned long) table & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t));
spin_lock_bh(&mm->context.lock);
mask = atomic_xor_bits(&page->_refcount, 0x11U << (bit + 24));
mask >>= 24;
@@ -285,7 +285,7 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
else
list_del(&page->lru);
spin_unlock_bh(&mm->context.lock);
- table = (unsigned long *) (__pa(table) | (1U << bit));
+ table = (unsigned long *) ((unsigned long) table | (1U << bit));
tlb_remove_table(tlb, table);
}
@@ -293,7 +293,7 @@ void __tlb_remove_table(void *_table)
{
unsigned int mask = (unsigned long) _table & 3;
void *table = (void *)((unsigned long) _table ^ mask);
- struct page *page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
+ struct page *page = virt_to_page(table);
switch (mask) {
case 0: /* pmd, pud, or p4d */
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 01f3a5f58e64..96897fab89dc 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -4,6 +4,7 @@
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
*/
+#include <linux/memory_hotplug.h>
#include <linux/memblock.h>
#include <linux/pfn.h>
#include <linux/mm.h>
@@ -26,14 +27,14 @@ static void __ref *vmem_alloc_pages(unsigned int order)
if (slab_is_available())
return (void *)__get_free_pages(GFP_KERNEL, order);
- return (void *) memblock_phys_alloc(size, size);
+ return memblock_alloc(size, size);
}
static void vmem_free_pages(unsigned long addr, int order)
{
/* We don't expect boot memory to be removed ever. */
if (!slab_is_available() ||
- WARN_ON_ONCE(PageReserved(phys_to_page(addr))))
+ WARN_ON_ONCE(PageReserved(virt_to_page(addr))))
return;
free_pages(addr, order);
}
@@ -56,7 +57,7 @@ pte_t __ref *vmem_pte_alloc(void)
if (slab_is_available())
pte = (pte_t *) page_table_alloc(&init_mm);
else
- pte = (pte_t *) memblock_phys_alloc(size, size);
+ pte = (pte_t *) memblock_alloc(size, size);
if (!pte)
return NULL;
memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
@@ -84,7 +85,7 @@ static void vmemmap_flush_unused_sub_pmd(void)
{
if (!unused_sub_pmd_start)
return;
- memset(__va(unused_sub_pmd_start), PAGE_UNUSED,
+ memset((void *)unused_sub_pmd_start, PAGE_UNUSED,
ALIGN(unused_sub_pmd_start, PMD_SIZE) - unused_sub_pmd_start);
unused_sub_pmd_start = 0;
}
@@ -97,7 +98,7 @@ static void vmemmap_mark_sub_pmd_used(unsigned long start, unsigned long end)
* getting removed (just in case the memmap never gets initialized,
* e.g., because the memory block never gets onlined).
*/
- memset(__va(start), 0, sizeof(struct page));
+ memset((void *)start, 0, sizeof(struct page));
}
static void vmemmap_use_sub_pmd(unsigned long start, unsigned long end)
@@ -118,7 +119,7 @@ static void vmemmap_use_sub_pmd(unsigned long start, unsigned long end)
static void vmemmap_use_new_sub_pmd(unsigned long start, unsigned long end)
{
- void *page = __va(ALIGN_DOWN(start, PMD_SIZE));
+ unsigned long page = ALIGN_DOWN(start, PMD_SIZE);
vmemmap_flush_unused_sub_pmd();
@@ -127,7 +128,7 @@ static void vmemmap_use_new_sub_pmd(unsigned long start, unsigned long end)
/* Mark the unused parts of the new memmap page PAGE_UNUSED. */
if (!IS_ALIGNED(start, PMD_SIZE))
- memset(page, PAGE_UNUSED, start - __pa(page));
+ memset((void *)page, PAGE_UNUSED, start - page);
/*
* We want to avoid memset(PAGE_UNUSED) when populating the vmemmap of
* consecutive sections. Remember for the last added PMD the last
@@ -140,11 +141,11 @@ static void vmemmap_use_new_sub_pmd(unsigned long start, unsigned long end)
/* Returns true if the PMD is completely unused and can be freed. */
static bool vmemmap_unuse_sub_pmd(unsigned long start, unsigned long end)
{
- void *page = __va(ALIGN_DOWN(start, PMD_SIZE));
+ unsigned long page = ALIGN_DOWN(start, PMD_SIZE);
vmemmap_flush_unused_sub_pmd();
- memset(__va(start), PAGE_UNUSED, end - start);
- return !memchr_inv(page, PAGE_UNUSED, PMD_SIZE);
+ memset((void *)start, PAGE_UNUSED, end - start);
+ return !memchr_inv((void *)page, PAGE_UNUSED, PMD_SIZE);
}
/* __ref: we'll only call vmemmap_alloc_block() via vmemmap_populate() */
@@ -165,7 +166,7 @@ static int __ref modify_pte_table(pmd_t *pmd, unsigned long addr,
if (pte_none(*pte))
continue;
if (!direct)
- vmem_free_pages(pfn_to_phys(pte_pfn(*pte)), 0);
+ vmem_free_pages((unsigned long) pfn_to_virt(pte_pfn(*pte)), 0);
pte_clear(&init_mm, addr, pte);
} else if (pte_none(*pte)) {
if (!direct) {
@@ -175,7 +176,7 @@ static int __ref modify_pte_table(pmd_t *pmd, unsigned long addr,
goto out;
pte_val(*pte) = __pa(new_page) | prot;
} else {
- pte_val(*pte) = addr | prot;
+ pte_val(*pte) = __pa(addr) | prot;
}
} else {
continue;
@@ -200,7 +201,7 @@ static void try_free_pte_table(pmd_t *pmd, unsigned long start)
if (!pte_none(*pte))
return;
}
- vmem_pte_free(__va(pmd_deref(*pmd)));
+ vmem_pte_free((unsigned long *) pmd_deref(*pmd));
pmd_clear(pmd);
}
@@ -241,7 +242,7 @@ static int __ref modify_pmd_table(pud_t *pud, unsigned long addr,
IS_ALIGNED(next, PMD_SIZE) &&
MACHINE_HAS_EDAT1 && addr && direct &&
!debug_pagealloc_enabled()) {
- pmd_val(*pmd) = addr | prot;
+ pmd_val(*pmd) = __pa(addr) | prot;
pages++;
continue;
} else if (!direct && MACHINE_HAS_EDAT1) {
@@ -337,7 +338,7 @@ static int modify_pud_table(p4d_t *p4d, unsigned long addr, unsigned long end,
IS_ALIGNED(next, PUD_SIZE) &&
MACHINE_HAS_EDAT2 && addr && direct &&
!debug_pagealloc_enabled()) {
- pud_val(*pud) = addr | prot;
+ pud_val(*pud) = __pa(addr) | prot;
pages++;
continue;
}
@@ -532,11 +533,22 @@ void vmem_remove_mapping(unsigned long start, unsigned long size)
mutex_unlock(&vmem_mutex);
}
+struct range arch_get_mappable_range(void)
+{
+ struct range mhp_range;
+
+ mhp_range.start = 0;
+ mhp_range.end = VMEM_MAX_PHYS - 1;
+ return mhp_range;
+}
+
int vmem_add_mapping(unsigned long start, unsigned long size)
{
+ struct range range = arch_get_mappable_range();
int ret;
- if (start + size > VMEM_MAX_PHYS ||
+ if (start < range.start ||
+ start + size > range.end + 1 ||
start + size < start)
return -ERANGE;
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 0a4182792876..f973e2ead197 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -1205,18 +1205,23 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
jit->seen |= SEEN_MEM;
break;
/*
- * BPF_STX XADD (atomic_add)
+ * BPF_ATOMIC
*/
- case BPF_STX | BPF_XADD | BPF_W: /* *(u32 *)(dst + off) += src */
- /* laal %w0,%src,off(%dst) */
- EMIT6_DISP_LH(0xeb000000, 0x00fa, REG_W0, src_reg,
- dst_reg, off);
- jit->seen |= SEEN_MEM;
- break;
- case BPF_STX | BPF_XADD | BPF_DW: /* *(u64 *)(dst + off) += src */
- /* laalg %w0,%src,off(%dst) */
- EMIT6_DISP_LH(0xeb000000, 0x00ea, REG_W0, src_reg,
- dst_reg, off);
+ case BPF_STX | BPF_ATOMIC | BPF_DW:
+ case BPF_STX | BPF_ATOMIC | BPF_W:
+ if (insn->imm != BPF_ADD) {
+ pr_err("Unknown atomic operation %02x\n", insn->imm);
+ return -1;
+ }
+
+ /* *(u32/u64 *)(dst + off) += src
+ *
+ * BFW_W: laal %w0,%src,off(%dst)
+ * BPF_DW: laalg %w0,%src,off(%dst)
+ */
+ EMIT6_DISP_LH(0xeb000000,
+ BPF_SIZE(insn->code) == BPF_W ? 0x00fa : 0x00ea,
+ REG_W0, src_reg, dst_reg, off);
jit->seen |= SEEN_MEM;
break;
/*
diff --git a/arch/s390/oprofile/Makefile b/arch/s390/oprofile/Makefile
deleted file mode 100644
index 36261f9d360b..000000000000
--- a/arch/s390/oprofile/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_OPROFILE) += oprofile.o
-
-DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
- oprof.o cpu_buffer.o buffer_sync.o \
- event_buffer.o oprofile_files.o \
- oprofilefs.o oprofile_stats.o \
- timer_int.o )
-
-oprofile-y := $(DRIVER_OBJS) init.o
diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c
deleted file mode 100644
index 7441857df51b..000000000000
--- a/arch/s390/oprofile/init.c
+++ /dev/null
@@ -1,37 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * S390 Version
- * Copyright IBM Corp. 2002, 2011
- * Author(s): Thomas Spatzier (tspat@de.ibm.com)
- * Author(s): Mahesh Salgaonkar (mahesh@linux.vnet.ibm.com)
- * Author(s): Heinz Graalfs (graalfs@linux.vnet.ibm.com)
- * Author(s): Andreas Krebbel (krebbel@linux.vnet.ibm.com)
- *
- * @remark Copyright 2002-2011 OProfile authors
- */
-
-#include <linux/oprofile.h>
-#include <linux/init.h>
-#include <asm/processor.h>
-#include <asm/unwind.h>
-
-static void s390_backtrace(struct pt_regs *regs, unsigned int depth)
-{
- struct unwind_state state;
-
- unwind_for_each_frame(&state, current, regs, 0) {
- if (depth-- == 0)
- break;
- oprofile_add_trace(state.ip);
- }
-}
-
-int __init oprofile_arch_init(struct oprofile_operations *ops)
-{
- ops->backtrace = s390_backtrace;
- return 0;
-}
-
-void oprofile_arch_exit(void)
-{
-}
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index 41df8fcfddde..600881d894dd 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -695,43 +695,68 @@ void zpci_remove_device(struct zpci_dev *zdev)
}
}
-int zpci_create_device(struct zpci_dev *zdev)
+/**
+ * zpci_create_device() - Create a new zpci_dev and add it to the zbus
+ * @fid: Function ID of the device to be created
+ * @fh: Current Function Handle of the device to be created
+ * @state: Initial state after creation either Standby or Configured
+ *
+ * Creates a new zpci device and adds it to its, possibly newly created, zbus
+ * as well as zpci_list.
+ *
+ * Returns: 0 on success, an error value otherwise
+ */
+int zpci_create_device(u32 fid, u32 fh, enum zpci_state state)
{
+ struct zpci_dev *zdev;
int rc;
- kref_init(&zdev->kref);
+ zpci_dbg(3, "add fid:%x, fh:%x, c:%d\n", fid, fh, state);
+ zdev = kzalloc(sizeof(*zdev), GFP_KERNEL);
+ if (!zdev)
+ return -ENOMEM;
- spin_lock(&zpci_list_lock);
- list_add_tail(&zdev->entry, &zpci_list);
- spin_unlock(&zpci_list_lock);
+ /* FID and Function Handle are the static/dynamic identifiers */
+ zdev->fid = fid;
+ zdev->fh = fh;
- rc = zpci_init_iommu(zdev);
+ /* Query function properties and update zdev */
+ rc = clp_query_pci_fn(zdev);
if (rc)
- goto out;
+ goto error;
+ zdev->state = state;
+ kref_init(&zdev->kref);
mutex_init(&zdev->lock);
+
+ rc = zpci_init_iommu(zdev);
+ if (rc)
+ goto error;
+
if (zdev->state == ZPCI_FN_STATE_CONFIGURED) {
rc = zpci_enable_device(zdev);
if (rc)
- goto out_destroy_iommu;
+ goto error_destroy_iommu;
}
rc = zpci_bus_device_register(zdev, &pci_root_ops);
if (rc)
- goto out_disable;
+ goto error_disable;
+
+ spin_lock(&zpci_list_lock);
+ list_add_tail(&zdev->entry, &zpci_list);
+ spin_unlock(&zpci_list_lock);
return 0;
-out_disable:
+error_disable:
if (zdev->state == ZPCI_FN_STATE_ONLINE)
zpci_disable_device(zdev);
-
-out_destroy_iommu:
+error_destroy_iommu:
zpci_destroy_iommu(zdev);
-out:
- spin_lock(&zpci_list_lock);
- list_del(&zdev->entry);
- spin_unlock(&zpci_list_lock);
+error:
+ zpci_dbg(0, "add fid:%x, rc:%d\n", fid, rc);
+ kfree(zdev);
return rc;
}
diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
index 153720d21ae7..d3331596ddbe 100644
--- a/arch/s390/pci/pci_clp.c
+++ b/arch/s390/pci/pci_clp.c
@@ -181,7 +181,7 @@ static int clp_store_query_pci_fn(struct zpci_dev *zdev,
return 0;
}
-static int clp_query_pci_fn(struct zpci_dev *zdev, u32 fh)
+int clp_query_pci_fn(struct zpci_dev *zdev)
{
struct clp_req_rsp_query_pci *rrb;
int rc;
@@ -194,7 +194,7 @@ static int clp_query_pci_fn(struct zpci_dev *zdev, u32 fh)
rrb->request.hdr.len = sizeof(rrb->request);
rrb->request.hdr.cmd = CLP_QUERY_PCI_FN;
rrb->response.hdr.len = sizeof(rrb->response);
- rrb->request.fh = fh;
+ rrb->request.fh = zdev->fh;
rc = clp_req(rrb, CLP_LPS_PCI);
if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) {
@@ -212,40 +212,6 @@ out:
return rc;
}
-int clp_add_pci_device(u32 fid, u32 fh, int configured)
-{
- struct zpci_dev *zdev;
- int rc = -ENOMEM;
-
- zpci_dbg(3, "add fid:%x, fh:%x, c:%d\n", fid, fh, configured);
- zdev = kzalloc(sizeof(*zdev), GFP_KERNEL);
- if (!zdev)
- goto error;
-
- zdev->fh = fh;
- zdev->fid = fid;
-
- /* Query function properties and update zdev */
- rc = clp_query_pci_fn(zdev, fh);
- if (rc)
- goto error;
-
- if (configured)
- zdev->state = ZPCI_FN_STATE_CONFIGURED;
- else
- zdev->state = ZPCI_FN_STATE_STANDBY;
-
- rc = zpci_create_device(zdev);
- if (rc)
- goto error;
- return 0;
-
-error:
- zpci_dbg(0, "add fid:%x, rc:%d\n", fid, rc);
- kfree(zdev);
- return rc;
-}
-
static int clp_refresh_fh(u32 fid);
/*
* Enable/Disable a given PCI function and update its function handle if
@@ -408,7 +374,7 @@ static void __clp_add(struct clp_fh_list_entry *entry, void *data)
zdev = get_zdev_by_fid(entry->fid);
if (!zdev)
- clp_add_pci_device(entry->fid, entry->fh, entry->config_state);
+ zpci_create_device(entry->fid, entry->fh, entry->config_state);
}
int clp_scan_pci_devices(void)
diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c
index 9a6bae503fe6..b4162da4e8a2 100644
--- a/arch/s390/pci/pci_event.c
+++ b/arch/s390/pci/pci_event.c
@@ -80,7 +80,7 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
enum zpci_state state;
int ret;
- if (zdev && zdev->zbus && zdev->zbus->bus)
+ if (zdev && zdev->zbus->bus)
pdev = pci_get_slot(zdev->zbus->bus, zdev->devfn);
zpci_err("avail CCDF:\n");
@@ -89,7 +89,7 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
switch (ccdf->pec) {
case 0x0301: /* Reserved|Standby -> Configured */
if (!zdev) {
- ret = clp_add_pci_device(ccdf->fid, ccdf->fh, 1);
+ zpci_create_device(ccdf->fid, ccdf->fh, ZPCI_FN_STATE_CONFIGURED);
break;
}
/* the configuration request may be stale */
@@ -116,7 +116,7 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
break;
case 0x0302: /* Reserved -> Standby */
if (!zdev) {
- clp_add_pci_device(ccdf->fid, ccdf->fh, 0);
+ zpci_create_device(ccdf->fid, ccdf->fh, ZPCI_FN_STATE_STANDBY);
break;
}
zdev->fh = ccdf->fh;
diff --git a/arch/s390/pci/pci_mmio.c b/arch/s390/pci/pci_mmio.c
index 18f2d10c3176..474617b88648 100644
--- a/arch/s390/pci/pci_mmio.c
+++ b/arch/s390/pci/pci_mmio.c
@@ -170,7 +170,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr,
if (!(vma->vm_flags & VM_WRITE))
goto out_unlock_mmap;
- ret = follow_pte(vma->vm_mm, mmio_addr, NULL, &ptep, NULL, &ptl);
+ ret = follow_pte(vma->vm_mm, mmio_addr, &ptep, &ptl);
if (ret)
goto out_unlock_mmap;
@@ -311,7 +311,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr,
if (!(vma->vm_flags & VM_WRITE))
goto out_unlock_mmap;
- ret = follow_pte(vma->vm_mm, mmio_addr, NULL, &ptep, NULL, &ptl);
+ ret = follow_pte(vma->vm_mm, mmio_addr, &ptep, &ptl);
if (ret)
goto out_unlock_mmap;
diff --git a/arch/s390/tools/opcodes.txt b/arch/s390/tools/opcodes.txt
index 46d8ed96cf06..0e207c46e8da 100644
--- a/arch/s390/tools/opcodes.txt
+++ b/arch/s390/tools/opcodes.txt
@@ -597,7 +597,7 @@ b9b3 cu42 RRE_RR
b9bd trtre RRF_U0RR
b9be srstu RRE_RR
b9bf trte RRF_U0RR
-b9c0 selhhhr RRF_RURR
+b9c0 selfhr RRF_RURR
b9c8 ahhhr RRF_R0RR2
b9c9 shhhr RRF_R0RR2
b9ca alhhhr RRF_R0RR2
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 52646f52f130..e798e55915c2 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -50,11 +50,11 @@ config SUPERH
select HAVE_MIXED_BREAKPOINTS_REGS
select HAVE_MOD_ARCH_SPECIFIC if DWARF_UNWINDER
select HAVE_NMI
- select HAVE_OPROFILE
select HAVE_PATA_PLATFORM
select HAVE_PERF_EVENTS
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_UID16
+ select HAVE_SOFTIRQ_ON_OWN_STACK if IRQSTACKS
select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS
select IRQ_FORCED_THREADING
diff --git a/arch/sh/Makefile b/arch/sh/Makefile
index 2faebfd72eca..3bcbf52fb30e 100644
--- a/arch/sh/Makefile
+++ b/arch/sh/Makefile
@@ -170,7 +170,6 @@ cpuincdir-$(CONFIG_CPU_SH4) += cpu-sh4
cpuincdir-y += cpu-common # Must be last
drivers-y += arch/sh/drivers/
-drivers-$(CONFIG_OPROFILE) += arch/sh/oprofile/
cflags-y += $(foreach d, $(cpuincdir-y), -I $(srctree)/arch/sh/include/$(d)) \
$(foreach d, $(machdir-y), -I $(srctree)/arch/sh/include/$(d))
diff --git a/arch/sh/boards/mach-landisk/gio.c b/arch/sh/boards/mach-landisk/gio.c
index 1c0da99dfc60..ff2200fec29a 100644
--- a/arch/sh/boards/mach-landisk/gio.c
+++ b/arch/sh/boards/mach-landisk/gio.c
@@ -27,11 +27,10 @@ static int openCnt;
static int gio_open(struct inode *inode, struct file *filp)
{
- int minor;
+ int minor = iminor(inode);
int ret = -ENOENT;
preempt_disable();
- minor = MINOR(inode->i_rdev);
if (minor < DEVCOUNT) {
if (openCnt > 0) {
ret = -EALREADY;
@@ -46,9 +45,8 @@ static int gio_open(struct inode *inode, struct file *filp)
static int gio_close(struct inode *inode, struct file *filp)
{
- int minor;
+ int minor = iminor(inode);
- minor = MINOR(inode->i_rdev);
if (minor < DEVCOUNT) {
openCnt--;
}
diff --git a/arch/sh/configs/edosk7760_defconfig b/arch/sh/configs/edosk7760_defconfig
index 02ba62298576..d77f54e906fd 100644
--- a/arch/sh/configs/edosk7760_defconfig
+++ b/arch/sh/configs/edosk7760_defconfig
@@ -102,7 +102,6 @@ CONFIG_NLS_UTF8=y
CONFIG_PRINTK_TIME=y
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_MAGIC_SYSRQ=y
-CONFIG_UNUSED_SYMBOLS=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_SHIRQ=y
CONFIG_DETECT_HUNG_TASK=y
diff --git a/arch/sh/configs/espt_defconfig b/arch/sh/configs/espt_defconfig
index 9a988c347e9d..2804cb760a76 100644
--- a/arch/sh/configs/espt_defconfig
+++ b/arch/sh/configs/espt_defconfig
@@ -7,7 +7,6 @@ CONFIG_UTS_NS=y
CONFIG_IPC_NS=y
CONFIG_SLAB=y
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
CONFIG_MODULES=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_CPU_SUBTYPE_SH7763=y
diff --git a/arch/sh/configs/migor_defconfig b/arch/sh/configs/migor_defconfig
index a24cf8cd2cea..4859cd30cfc4 100644
--- a/arch/sh/configs/migor_defconfig
+++ b/arch/sh/configs/migor_defconfig
@@ -6,7 +6,6 @@ CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_SLAB=y
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
CONFIG_MODULES=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_CPU_SUBTYPE_SH7722=y
diff --git a/arch/sh/configs/r7780mp_defconfig b/arch/sh/configs/r7780mp_defconfig
index e922659fdadb..f823cc6b18f9 100644
--- a/arch/sh/configs/r7780mp_defconfig
+++ b/arch/sh/configs/r7780mp_defconfig
@@ -7,7 +7,6 @@ CONFIG_LOG_BUF_SHIFT=14
# CONFIG_EPOLL is not set
CONFIG_SLAB=y
CONFIG_PROFILING=y
-CONFIG_OPROFILE=m
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/sh/configs/r7785rp_defconfig b/arch/sh/configs/r7785rp_defconfig
index 5978866358ec..f96bc20d4b1a 100644
--- a/arch/sh/configs/r7785rp_defconfig
+++ b/arch/sh/configs/r7785rp_defconfig
@@ -9,7 +9,6 @@ CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_SLAB=y
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
CONFIG_KPROBES=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
diff --git a/arch/sh/configs/rsk7201_defconfig b/arch/sh/configs/rsk7201_defconfig
index 841809b5c2dc..e41526120be1 100644
--- a/arch/sh/configs/rsk7201_defconfig
+++ b/arch/sh/configs/rsk7201_defconfig
@@ -12,7 +12,6 @@ CONFIG_BLK_DEV_INITRD=y
# CONFIG_AIO is not set
CONFIG_SLOB=y
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
CONFIG_MODULES=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_CPU_SUBTYPE_SH7201=y
diff --git a/arch/sh/configs/rsk7203_defconfig b/arch/sh/configs/rsk7203_defconfig
index 0055031664ad..6af08fa1ddf8 100644
--- a/arch/sh/configs/rsk7203_defconfig
+++ b/arch/sh/configs/rsk7203_defconfig
@@ -13,7 +13,6 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_KALLSYMS_ALL=y
CONFIG_SLOB=y
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
CONFIG_MODULES=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_CPU_SUBTYPE_SH7203=y
diff --git a/arch/sh/configs/rts7751r2d1_defconfig b/arch/sh/configs/rts7751r2d1_defconfig
index fc9c22152b08..96263a4912b7 100644
--- a/arch/sh/configs/rts7751r2d1_defconfig
+++ b/arch/sh/configs/rts7751r2d1_defconfig
@@ -3,7 +3,6 @@ CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_SLAB=y
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
CONFIG_MODULES=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_CPU_SUBTYPE_SH7751R=y
diff --git a/arch/sh/configs/rts7751r2dplus_defconfig b/arch/sh/configs/rts7751r2dplus_defconfig
index ff3fd6787fd6..92e586e6c974 100644
--- a/arch/sh/configs/rts7751r2dplus_defconfig
+++ b/arch/sh/configs/rts7751r2dplus_defconfig
@@ -3,7 +3,6 @@ CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_SLAB=y
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
CONFIG_MODULES=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_CPU_SUBTYPE_SH7751R=y
diff --git a/arch/sh/configs/sdk7780_defconfig b/arch/sh/configs/sdk7780_defconfig
index d00376eb044f..6c719ab4332a 100644
--- a/arch/sh/configs/sdk7780_defconfig
+++ b/arch/sh/configs/sdk7780_defconfig
@@ -128,7 +128,6 @@ CONFIG_NLS_ISO8859_15=y
CONFIG_NLS_UTF8=y
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_MAGIC_SYSRQ=y
-CONFIG_UNUSED_SYMBOLS=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
# CONFIG_SCHED_DEBUG is not set
diff --git a/arch/sh/configs/sdk7786_defconfig b/arch/sh/configs/sdk7786_defconfig
index 4a44cac640bc..f776a1d0d277 100644
--- a/arch/sh/configs/sdk7786_defconfig
+++ b/arch/sh/configs/sdk7786_defconfig
@@ -35,7 +35,6 @@ CONFIG_RD_LZO=y
# CONFIG_COMPAT_BRK is not set
CONFIG_SLAB=y
CONFIG_PROFILING=y
-CONFIG_OPROFILE=m
CONFIG_KPROBES=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
diff --git a/arch/sh/configs/se7206_defconfig b/arch/sh/configs/se7206_defconfig
index ff5bb4489922..315b04a8dd2f 100644
--- a/arch/sh/configs/se7206_defconfig
+++ b/arch/sh/configs/se7206_defconfig
@@ -23,7 +23,6 @@ CONFIG_KALLSYMS_ALL=y
# CONFIG_COMPAT_BRK is not set
CONFIG_SLOB=y
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/sh/configs/sh03_defconfig b/arch/sh/configs/sh03_defconfig
index 48b457d59e79..ff502683132e 100644
--- a/arch/sh/configs/sh03_defconfig
+++ b/arch/sh/configs/sh03_defconfig
@@ -5,7 +5,6 @@ CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
CONFIG_SLAB=y
CONFIG_PROFILING=y
-CONFIG_OPROFILE=m
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
diff --git a/arch/sh/configs/sh7724_generic_defconfig b/arch/sh/configs/sh7724_generic_defconfig
index 9adee9010319..2c46c0004780 100644
--- a/arch/sh/configs/sh7724_generic_defconfig
+++ b/arch/sh/configs/sh7724_generic_defconfig
@@ -4,7 +4,6 @@ CONFIG_CGROUPS=y
# CONFIG_UID16 is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_CPU_SUBTYPE_SH7724=y
CONFIG_NO_HZ=y
diff --git a/arch/sh/configs/sh7763rdp_defconfig b/arch/sh/configs/sh7763rdp_defconfig
index 26c5fd02c87a..8a6a446f9eb8 100644
--- a/arch/sh/configs/sh7763rdp_defconfig
+++ b/arch/sh/configs/sh7763rdp_defconfig
@@ -7,7 +7,6 @@ CONFIG_UTS_NS=y
CONFIG_IPC_NS=y
CONFIG_SLAB=y
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
CONFIG_MODULES=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_CPU_SUBTYPE_SH7763=y
diff --git a/arch/sh/configs/sh7770_generic_defconfig b/arch/sh/configs/sh7770_generic_defconfig
index c17590f0df67..88193153e51b 100644
--- a/arch/sh/configs/sh7770_generic_defconfig
+++ b/arch/sh/configs/sh7770_generic_defconfig
@@ -4,7 +4,6 @@ CONFIG_CGROUPS=y
# CONFIG_UID16 is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_CPU_SUBTYPE_SH7770=y
CONFIG_SH_PCLK_FREQ=41666666
diff --git a/arch/sh/configs/shx3_defconfig b/arch/sh/configs/shx3_defconfig
index dc2be2514b62..32ec6eb1eabc 100644
--- a/arch/sh/configs/shx3_defconfig
+++ b/arch/sh/configs/shx3_defconfig
@@ -22,7 +22,6 @@ CONFIG_PID_NS=y
CONFIG_KALLSYMS_ALL=y
CONFIG_SLOB=y
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
CONFIG_KPROBES=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
diff --git a/arch/sh/include/asm/irq.h b/arch/sh/include/asm/irq.h
index 6d44c32ef047..839551ce398c 100644
--- a/arch/sh/include/asm/irq.h
+++ b/arch/sh/include/asm/irq.h
@@ -51,7 +51,6 @@ asmlinkage int do_IRQ(unsigned int irq, struct pt_regs *regs);
#ifdef CONFIG_IRQSTACKS
extern void irq_ctx_init(int cpu);
extern void irq_ctx_exit(int cpu);
-# define __ARCH_HAS_DO_SOFTIRQ
#else
# define irq_ctx_init(cpu) do { } while (0)
# define irq_ctx_exit(cpu) do { } while (0)
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
index ab5f790b0cd2..ef0f0827cf57 100644
--- a/arch/sh/kernel/irq.c
+++ b/arch/sh/kernel/irq.c
@@ -20,6 +20,7 @@
#include <linux/uaccess.h>
#include <asm/thread_info.h>
#include <cpu/mmu_context.h>
+#include <asm/softirq_stack.h>
atomic_t irq_err_count;
diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c
index 80a5d1c66a51..1aa508eb0823 100644
--- a/arch/sh/kernel/process_32.c
+++ b/arch/sh/kernel/process_32.c
@@ -114,7 +114,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg,
childregs = task_pt_regs(p);
p->thread.sp = (unsigned long) childregs;
- if (unlikely(p->flags & PF_KTHREAD)) {
+ if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
memset(childregs, 0, sizeof(struct pt_regs));
p->thread.pc = (unsigned long) ret_from_kernel_thread;
childregs->regs[4] = arg;
diff --git a/arch/sh/kernel/syscalls/Makefile b/arch/sh/kernel/syscalls/Makefile
index 659faefdcb1d..285aaba832d9 100644
--- a/arch/sh/kernel/syscalls/Makefile
+++ b/arch/sh/kernel/syscalls/Makefile
@@ -5,7 +5,7 @@ uapi := arch/$(SRCARCH)/include/generated/uapi/asm
_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \
$(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
-syscall := $(srctree)/$(src)/syscall.tbl
+syscall := $(src)/syscall.tbl
syshdr := $(srctree)/$(src)/syscallhdr.sh
systbl := $(srctree)/$(src)/syscalltbl.sh
@@ -21,18 +21,19 @@ quiet_cmd_systbl = SYSTBL $@
'$(systbl_abi_$(basetarget))' \
'$(systbl_offset_$(basetarget))'
-$(uapi)/unistd_32.h: $(syscall) $(syshdr)
+$(uapi)/unistd_32.h: $(syscall) $(syshdr) FORCE
$(call if_changed,syshdr)
-$(kapi)/syscall_table.h: $(syscall) $(systbl)
+$(kapi)/syscall_table.h: $(syscall) $(systbl) FORCE
$(call if_changed,systbl)
uapisyshdr-y += unistd_32.h
kapisyshdr-y += syscall_table.h
-targets += $(uapisyshdr-y) $(kapisyshdr-y)
+uapisyshdr-y := $(addprefix $(uapi)/, $(uapisyshdr-y))
+kapisyshdr-y := $(addprefix $(kapi)/, $(kapisyshdr-y))
+targets += $(addprefix ../../../../, $(uapisyshdr-y) $(kapisyshdr-y))
PHONY += all
-all: $(addprefix $(uapi)/,$(uapisyshdr-y))
-all: $(addprefix $(kapi)/,$(kapisyshdr-y))
+all: $(uapisyshdr-y) $(kapisyshdr-y)
@:
diff --git a/arch/sh/kernel/syscalls/syscall.tbl b/arch/sh/kernel/syscalls/syscall.tbl
index 9df40ac0ebc0..d08eebad6b7f 100644
--- a/arch/sh/kernel/syscalls/syscall.tbl
+++ b/arch/sh/kernel/syscalls/syscall.tbl
@@ -444,3 +444,4 @@
439 common faccessat2 sys_faccessat2
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2
+442 common mount_setattr sys_mount_setattr
diff --git a/arch/sh/oprofile/Makefile b/arch/sh/oprofile/Makefile
deleted file mode 100644
index d478dd8dac0b..000000000000
--- a/arch/sh/oprofile/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_OPROFILE) += oprofile.o
-
-CFLAGS_common.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
-
-DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
- oprof.o cpu_buffer.o buffer_sync.o \
- event_buffer.o oprofile_files.o \
- oprofilefs.o oprofile_stats.o \
- timer_int.o )
-
-ifeq ($(CONFIG_HW_PERF_EVENTS),y)
-DRIVER_OBJS += $(addprefix ../../../drivers/oprofile/, oprofile_perf.o)
-endif
-
-oprofile-y := $(DRIVER_OBJS) common.o backtrace.o
diff --git a/arch/sh/oprofile/backtrace.c b/arch/sh/oprofile/backtrace.c
deleted file mode 100644
index cc16cf86cd92..000000000000
--- a/arch/sh/oprofile/backtrace.c
+++ /dev/null
@@ -1,80 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * SH specific backtracing code for oprofile
- *
- * Copyright 2007 STMicroelectronics Ltd.
- *
- * Author: Dave Peverley <dpeverley@mpc-data.co.uk>
- *
- * Based on ARM oprofile backtrace code by Richard Purdie and in turn, i386
- * oprofile backtrace code by John Levon, David Smith
- */
-#include <linux/oprofile.h>
-#include <linux/sched.h>
-#include <linux/kallsyms.h>
-#include <linux/mm.h>
-#include <asm/unwinder.h>
-#include <asm/ptrace.h>
-#include <linux/uaccess.h>
-#include <asm/sections.h>
-#include <asm/stacktrace.h>
-
-static void backtrace_address(void *data, unsigned long addr, int reliable)
-{
- unsigned int *depth = data;
-
- if ((*depth)--)
- oprofile_add_trace(addr);
-}
-
-static struct stacktrace_ops backtrace_ops = {
- .address = backtrace_address,
-};
-
-/* Limit to stop backtracing too far. */
-static int backtrace_limit = 20;
-
-static unsigned long *
-user_backtrace(unsigned long *stackaddr, struct pt_regs *regs)
-{
- unsigned long buf_stack;
-
- /* Also check accessibility of address */
- if (!access_ok(stackaddr, sizeof(unsigned long)))
- return NULL;
-
- if (__copy_from_user_inatomic(&buf_stack, stackaddr, sizeof(unsigned long)))
- return NULL;
-
- /* Quick paranoia check */
- if (buf_stack & 3)
- return NULL;
-
- oprofile_add_trace(buf_stack);
-
- stackaddr++;
-
- return stackaddr;
-}
-
-void sh_backtrace(struct pt_regs * const regs, unsigned int depth)
-{
- unsigned long *stackaddr;
-
- /*
- * Paranoia - clip max depth as we could get lost in the weeds.
- */
- if (depth > backtrace_limit)
- depth = backtrace_limit;
-
- stackaddr = (unsigned long *)kernel_stack_pointer(regs);
- if (!user_mode(regs)) {
- if (depth)
- unwind_stack(NULL, regs, stackaddr,
- &backtrace_ops, &depth);
- return;
- }
-
- while (depth-- && (stackaddr != NULL))
- stackaddr = user_backtrace(stackaddr, regs);
-}
diff --git a/arch/sh/oprofile/common.c b/arch/sh/oprofile/common.c
deleted file mode 100644
index e4dd5d5a1115..000000000000
--- a/arch/sh/oprofile/common.c
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * arch/sh/oprofile/init.c
- *
- * Copyright (C) 2003 - 2010 Paul Mundt
- *
- * Based on arch/mips/oprofile/common.c:
- *
- * Copyright (C) 2004, 2005 Ralf Baechle
- * Copyright (C) 2005 MIPS Technologies, Inc.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#include <linux/kernel.h>
-#include <linux/oprofile.h>
-#include <linux/init.h>
-#include <linux/errno.h>
-#include <linux/smp.h>
-#include <linux/perf_event.h>
-#include <linux/slab.h>
-#include <asm/processor.h>
-
-extern void sh_backtrace(struct pt_regs * const regs, unsigned int depth);
-
-#ifdef CONFIG_HW_PERF_EVENTS
-/*
- * This will need to be reworked when multiple PMUs are supported.
- */
-static char *sh_pmu_op_name;
-
-char *op_name_from_perf_id(void)
-{
- return sh_pmu_op_name;
-}
-
-int __init oprofile_arch_init(struct oprofile_operations *ops)
-{
- ops->backtrace = sh_backtrace;
-
- if (perf_num_counters() == 0)
- return -ENODEV;
-
- sh_pmu_op_name = kasprintf(GFP_KERNEL, "%s/%s",
- UTS_MACHINE, perf_pmu_name());
- if (unlikely(!sh_pmu_op_name))
- return -ENOMEM;
-
- return oprofile_perf_init(ops);
-}
-
-void oprofile_arch_exit(void)
-{
- oprofile_perf_exit();
- kfree(sh_pmu_op_name);
-}
-#else
-int __init oprofile_arch_init(struct oprofile_operations *ops)
-{
- ops->backtrace = sh_backtrace;
- return -ENODEV;
-}
-void oprofile_arch_exit(void) {}
-#endif /* CONFIG_HW_PERF_EVENTS */
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index c9c34dc52b7d..164a5254c91c 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -20,7 +20,6 @@ config SPARC
select OF_PROMTREE
select HAVE_ASM_MODVERSIONS
select HAVE_IDE
- select HAVE_OPROFILE
select HAVE_ARCH_KGDB if !SMP || SPARC64
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_SECCOMP if SPARC64
@@ -97,6 +96,7 @@ config SPARC64
select ARCH_HAS_PTE_SPECIAL
select PCI_DOMAINS if PCI
select ARCH_HAS_GIGANTIC_PAGE
+ select HAVE_SOFTIRQ_ON_OWN_STACK
config ARCH_PROC_KCORE_TEXT
def_bool y
@@ -176,7 +176,7 @@ config SMP
Management" code will be disabled if you say Y here.
See also <file:Documentation/admin-guide/lockup-watchdogs.rst> and the SMP-HOWTO
- available at <http://www.tldp.org/docs.html#howto>.
+ available at <https://www.tldp.org/docs.html#howto>.
If you don't know what to do here, say N.
@@ -494,7 +494,6 @@ config COMPAT
bool
depends on SPARC64
default y
- select COMPAT_BINFMT_ELF
select HAVE_UID16
select ARCH_WANT_OLD_COMPAT_IPC
select COMPAT_OLD_SIGACTION
diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
index 4a0919581697..bee99e65fe23 100644
--- a/arch/sparc/Makefile
+++ b/arch/sparc/Makefile
@@ -65,7 +65,6 @@ libs-y += arch/sparc/prom/
libs-y += arch/sparc/lib/
drivers-$(CONFIG_PM) += arch/sparc/power/
-drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
boot := arch/sparc/boot
diff --git a/arch/sparc/boot/piggyback.c b/arch/sparc/boot/piggyback.c
index a7a38fb4ece0..6d74064add0a 100644
--- a/arch/sparc/boot/piggyback.c
+++ b/arch/sparc/boot/piggyback.c
@@ -154,6 +154,10 @@ static off_t get_hdrs_offset(int kernelfd, const char *filename)
offset -= LOOKBACK;
/* skip a.out header */
offset += AOUT_TEXT_OFFSET;
+ if (offset < 0) {
+ errno = -EINVAL;
+ die("Calculated a negative offset, probably elftoaout generated an invalid image. Did you use a recent elftoaout ?");
+ }
if (lseek(kernelfd, offset, SEEK_SET) < 0)
die("lseek");
if (read(kernelfd, buffer, BUFSIZE) != BUFSIZE)
diff --git a/arch/sparc/configs/sparc64_defconfig b/arch/sparc/configs/sparc64_defconfig
index bde4d21a8ac8..148f44b33890 100644
--- a/arch/sparc/configs/sparc64_defconfig
+++ b/arch/sparc/configs/sparc64_defconfig
@@ -8,7 +8,6 @@ CONFIG_PERF_EVENTS=y
# CONFIG_COMPAT_BRK is not set
CONFIG_SLAB=y
CONFIG_PROFILING=y
-CONFIG_OPROFILE=m
CONFIG_KPROBES=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
@@ -66,9 +65,8 @@ CONFIG_CDROM_PKTCDVD=m
CONFIG_CDROM_PKTCDVD_WCACHE=y
CONFIG_ATA_OVER_ETH=m
CONFIG_SUNVDC=m
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_BLK_DEV_ALI15X3=y
+CONFIG_ATA=y
+CONFIG_PATA_ALI=y
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
@@ -236,3 +234,9 @@ CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRC16=m
CONFIG_LIBCRC32C=m
CONFIG_VCC=m
+CONFIG_ATA=y
+CONFIG_PATA_CMD64X=y
+CONFIG_HAPPYMEAL=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_DEVTMPFS=y
diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild
index 3688fdae50e4..aec20406145e 100644
--- a/arch/sparc/include/asm/Kbuild
+++ b/arch/sparc/include/asm/Kbuild
@@ -1,6 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-# User exported sparc header files
-
generated-y += syscall_table_32.h
generated-y += syscall_table_64.h
generated-y += syscall_table_c32.h
diff --git a/arch/sparc/include/asm/backoff.h b/arch/sparc/include/asm/backoff.h
index 8625946d8d00..597a22953bc5 100644
--- a/arch/sparc/include/asm/backoff.h
+++ b/arch/sparc/include/asm/backoff.h
@@ -18,7 +18,7 @@
*
* When we spin, we try to use an operation that will cause the
* current cpu strand to block, and therefore make the core fully
- * available to any other other runnable strands. There are two
+ * available to any other runnable strands. There are two
* options, based upon cpu capabilities.
*
* On all cpus prior to SPARC-T4 we do three dummy reads of the
diff --git a/arch/sparc/include/asm/cmpxchg_32.h b/arch/sparc/include/asm/cmpxchg_32.h
index c73b5a3ab7b9..a53d744d4212 100644
--- a/arch/sparc/include/asm/cmpxchg_32.h
+++ b/arch/sparc/include/asm/cmpxchg_32.h
@@ -25,7 +25,7 @@ static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int
return x;
}
-#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
+#define xchg(ptr,x) ({(__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)));})
/* Emulate cmpxchg() the same way we emulate atomics,
* by hashing the object address and indexing into an array
diff --git a/arch/sparc/include/asm/irq_64.h b/arch/sparc/include/asm/irq_64.h
index 4d748e93b974..154df2cf19f4 100644
--- a/arch/sparc/include/asm/irq_64.h
+++ b/arch/sparc/include/asm/irq_64.h
@@ -93,7 +93,6 @@ void arch_trigger_cpumask_backtrace(const struct cpumask *mask,
extern void *hardirq_stack[NR_CPUS];
extern void *softirq_stack[NR_CPUS];
-#define __ARCH_HAS_DO_SOFTIRQ
#define NO_IRQ 0xffffffff
diff --git a/arch/sparc/include/asm/mman.h b/arch/sparc/include/asm/mman.h
index f94532f25db1..274217e7ed70 100644
--- a/arch/sparc/include/asm/mman.h
+++ b/arch/sparc/include/asm/mman.h
@@ -57,35 +57,39 @@ static inline int sparc_validate_prot(unsigned long prot, unsigned long addr)
{
if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM | PROT_ADI))
return 0;
- if (prot & PROT_ADI) {
- if (!adi_capable())
- return 0;
+ return 1;
+}
- if (addr) {
- struct vm_area_struct *vma;
+#define arch_validate_flags(vm_flags) arch_validate_flags(vm_flags)
+/* arch_validate_flags() - Ensure combination of flags is valid for a
+ * VMA.
+ */
+static inline bool arch_validate_flags(unsigned long vm_flags)
+{
+ /* If ADI is being enabled on this VMA, check for ADI
+ * capability on the platform and ensure VMA is suitable
+ * for ADI
+ */
+ if (vm_flags & VM_SPARC_ADI) {
+ if (!adi_capable())
+ return false;
- vma = find_vma(current->mm, addr);
- if (vma) {
- /* ADI can not be enabled on PFN
- * mapped pages
- */
- if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
- return 0;
+ /* ADI can not be enabled on PFN mapped pages */
+ if (vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
+ return false;
- /* Mergeable pages can become unmergeable
- * if ADI is enabled on them even if they
- * have identical data on them. This can be
- * because ADI enabled pages with identical
- * data may still not have identical ADI
- * tags on them. Disallow ADI on mergeable
- * pages.
- */
- if (vma->vm_flags & VM_MERGEABLE)
- return 0;
- }
- }
+ /* Mergeable pages can become unmergeable
+ * if ADI is enabled on them even if they
+ * have identical data on them. This can be
+ * because ADI enabled pages with identical
+ * data may still not have identical ADI
+ * tags on them. Disallow ADI on mergeable
+ * pages.
+ */
+ if (vm_flags & VM_MERGEABLE)
+ return false;
}
- return 1;
+ return true;
}
#endif /* CONFIG_SPARC64 */
diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
index 7708d015712b..6067925972d9 100644
--- a/arch/sparc/include/asm/pgtsrmmu.h
+++ b/arch/sparc/include/asm/pgtsrmmu.h
@@ -113,7 +113,7 @@ extern unsigned long last_valid_pfn;
extern void *srmmu_nocache_pool;
#define __nocache_pa(VADDR) (((unsigned long)VADDR) - SRMMU_NOCACHE_VADDR + __pa((unsigned long)srmmu_nocache_pool))
#define __nocache_va(PADDR) (__va((unsigned long)PADDR) - (unsigned long)srmmu_nocache_pool + SRMMU_NOCACHE_VADDR)
-#define __nocache_fix(VADDR) __va(__nocache_pa(VADDR))
+#define __nocache_fix(VADDR) ((__typeof__(VADDR))__va(__nocache_pa(VADDR)))
/* Accessing the MMU control register. */
unsigned int srmmu_get_mmureg(void);
diff --git a/arch/sparc/include/asm/signal.h b/arch/sparc/include/asm/signal.h
index 827b73a97f8a..28f81081e37d 100644
--- a/arch/sparc/include/asm/signal.h
+++ b/arch/sparc/include/asm/signal.h
@@ -9,18 +9,6 @@
#include <uapi/asm/signal.h>
#ifndef __ASSEMBLY__
-/*
- * DJHR
- * SA_STATIC_ALLOC is used for the sparc32 system to indicate that this
- * interrupt handler's irq structure should be statically allocated
- * by the request_irq routine.
- * The alternative is that arch/sparc/kernel/irq.c has carnal knowledge
- * of interrupt usage and that sucks. Also without a flag like this
- * it may be possible for the free_irq routine to attempt to free
- * statically allocated data.. which is NOT GOOD.
- *
- */
-#define SA_STATIC_ALLOC 0x8000
#define __ARCH_HAS_KA_RESTORER
#define __ARCH_HAS_SA_RESTORER
diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
index 7fc82a233f49..3a9a0b0c7465 100644
--- a/arch/sparc/include/asm/spinlock_64.h
+++ b/arch/sparc/include/asm/spinlock_64.h
@@ -11,8 +11,8 @@
#include <asm/processor.h>
#include <asm/barrier.h>
-#include <asm/qrwlock.h>
#include <asm/qspinlock.h>
+#include <asm/qrwlock.h>
#endif /* !(__ASSEMBLY__) */
diff --git a/arch/sparc/include/asm/tlb_64.h b/arch/sparc/include/asm/tlb_64.h
index e841cae544c2..779a5a0f0608 100644
--- a/arch/sparc/include/asm/tlb_64.h
+++ b/arch/sparc/include/asm/tlb_64.h
@@ -24,7 +24,6 @@ void flush_tlb_pending(void);
#define tlb_start_vma(tlb, vma) do { } while (0)
#define tlb_end_vma(tlb, vma) do { } while (0)
-#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
#define tlb_flush(tlb) flush_tlb_pending()
/*
diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S
index d58940280f8d..a269ad2fe6df 100644
--- a/arch/sparc/kernel/entry.S
+++ b/arch/sparc/kernel/entry.S
@@ -994,7 +994,7 @@ do_syscall:
andcc %l5, _TIF_SYSCALL_TRACE, %g0
mov %i4, %o4
bne linux_syscall_trace
- mov %i0, %l5
+ mov %i0, %l6
2:
call %l7
mov %i5, %o5
@@ -1003,16 +1003,15 @@ do_syscall:
st %o0, [%sp + STACKFRAME_SZ + PT_I0]
ret_sys_call:
- ld [%curptr + TI_FLAGS], %l6
+ ld [%curptr + TI_FLAGS], %l5
cmp %o0, -ERESTART_RESTARTBLOCK
ld [%sp + STACKFRAME_SZ + PT_PSR], %g3
set PSR_C, %g2
bgeu 1f
- andcc %l6, _TIF_SYSCALL_TRACE, %g0
+ andcc %l5, _TIF_SYSCALL_TRACE, %g0
/* System call success, clear Carry condition code. */
andn %g3, %g2, %g3
- clr %l6
st %g3, [%sp + STACKFRAME_SZ + PT_PSR]
bne linux_syscall_trace2
ld [%sp + STACKFRAME_SZ + PT_NPC], %l1 /* pc = npc */
@@ -1027,7 +1026,6 @@ ret_sys_call:
sub %g0, %o0, %o0
or %g3, %g2, %g3
st %o0, [%sp + STACKFRAME_SZ + PT_I0]
- mov 1, %l6
st %g3, [%sp + STACKFRAME_SZ + PT_PSR]
bne linux_syscall_trace2
ld [%sp + STACKFRAME_SZ + PT_NPC], %l1 /* pc = npc */
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index 3ec9f1402aad..c8848bb681a1 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -42,6 +42,7 @@
#include <asm/head.h>
#include <asm/hypervisor.h>
#include <asm/cacheflush.h>
+#include <asm/softirq_stack.h>
#include "entry.h"
#include "cpumap.h"
diff --git a/arch/sparc/kernel/led.c b/arch/sparc/kernel/led.c
index bd48575172c3..3a66e62eb2a0 100644
--- a/arch/sparc/kernel/led.c
+++ b/arch/sparc/kernel/led.c
@@ -50,6 +50,7 @@ static void led_blink(struct timer_list *unused)
add_timer(&led_blink_timer);
}
+#ifdef CONFIG_PROC_FS
static int led_proc_show(struct seq_file *m, void *v)
{
if (get_auxio() & AUXIO_LED)
@@ -111,6 +112,7 @@ static const struct proc_ops led_proc_ops = {
.proc_release = single_release,
.proc_write = led_proc_write,
};
+#endif
static struct proc_dir_entry *led;
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
index 5d45b6d766d6..9c2b720bfd20 100644
--- a/arch/sparc/kernel/pci.c
+++ b/arch/sparc/kernel/pci.c
@@ -552,9 +552,8 @@ static void pci_of_scan_bus(struct pci_pbm_info *pbm,
pci_info(bus, "scan_bus[%pOF] bus no %d\n",
node, bus->number);
- child = NULL;
prev_devfn = -1;
- while ((child = of_get_next_child(node, child)) != NULL) {
+ for_each_child_of_node(node, child) {
if (ofpci_verbose)
pci_info(bus, " * %pOF\n", child);
reg = of_get_property(child, "reg", &reglen);
diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
index a02363735915..b91e88058e0c 100644
--- a/arch/sparc/kernel/process_32.c
+++ b/arch/sparc/kernel/process_32.c
@@ -183,7 +183,7 @@ void exit_thread(struct task_struct *tsk)
#ifndef CONFIG_SMP
if (last_task_used_math == tsk) {
#else
- if (test_ti_thread_flag(task_thread_info(tsk), TIF_USEDFPU)) {
+ if (test_tsk_thread_flag(tsk, TIF_USEDFPU)) {
#endif
/* Keep process from leaving FPU in a bogon state. */
put_psr(get_psr() | PSR_EF);
@@ -309,7 +309,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg,
ti->ksp = (unsigned long) new_stack;
p->thread.kregs = childregs;
- if (unlikely(p->flags & PF_KTHREAD)) {
+ if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
extern int nwindows;
unsigned long psr;
memset(new_stack, 0, STACKFRAME_SZ + TRACEREG_SZ);
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index 6f8c7822fc06..7afd0a859a78 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -597,7 +597,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg,
sizeof(struct sparc_stackf));
t->fpsaved[0] = 0;
- if (unlikely(p->flags & PF_KTHREAD)) {
+ if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
memset(child_trap_frame, 0, child_stack_sz);
__thread_flag_byte_ptr(t)[TI_FLAG_BYTE_CWP] =
(current_pt_regs()->tstate + 1) & TSTATE_CWP;
diff --git a/arch/sparc/kernel/rtrap_32.S b/arch/sparc/kernel/rtrap_32.S
index dca8ed810046..8931fe266346 100644
--- a/arch/sparc/kernel/rtrap_32.S
+++ b/arch/sparc/kernel/rtrap_32.S
@@ -75,7 +75,7 @@ signal_p:
ld [%sp + STACKFRAME_SZ + PT_PSR], %t_psr
mov %g2, %o2
- mov %l5, %o1
+ mov %l6, %o1
call do_notify_resume
add %sp, STACKFRAME_SZ, %o0 ! pt_regs ptr
diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c
index 11cf2281b581..02f3ad55dfe3 100644
--- a/arch/sparc/kernel/signal_32.c
+++ b/arch/sparc/kernel/signal_32.c
@@ -400,8 +400,8 @@ static int setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs,
else {
regs->u_regs[UREG_I7] = (unsigned long)(&(sf->insns[0]) - 2);
- /* mov __NR_sigreturn, %g1 */
- err |= __put_user(0x821020d8, &sf->insns[0]);
+ /* mov __NR_rt_sigreturn, %g1 */
+ err |= __put_user(0x82102065, &sf->insns[0]);
/* t 0x10 */
err |= __put_user(0x91d02010, &sf->insns[1]);
diff --git a/arch/sparc/kernel/syscalls/Makefile b/arch/sparc/kernel/syscalls/Makefile
index c22a21c39f30..283f64407b07 100644
--- a/arch/sparc/kernel/syscalls/Makefile
+++ b/arch/sparc/kernel/syscalls/Makefile
@@ -5,7 +5,7 @@ uapi := arch/$(SRCARCH)/include/generated/uapi/asm
_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \
$(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
-syscall := $(srctree)/$(src)/syscall.tbl
+syscall := $(src)/syscall.tbl
syshdr := $(srctree)/$(src)/syscallhdr.sh
systbl := $(srctree)/$(src)/syscalltbl.sh
@@ -22,24 +22,24 @@ quiet_cmd_systbl = SYSTBL $@
'$(systbl_offset_$(basetarget))'
syshdr_abis_unistd_32 := common,32
-$(uapi)/unistd_32.h: $(syscall) $(syshdr)
+$(uapi)/unistd_32.h: $(syscall) $(syshdr) FORCE
$(call if_changed,syshdr)
syshdr_abis_unistd_64 := common,64
-$(uapi)/unistd_64.h: $(syscall) $(syshdr)
+$(uapi)/unistd_64.h: $(syscall) $(syshdr) FORCE
$(call if_changed,syshdr)
systbl_abis_syscall_table_32 := common,32
-$(kapi)/syscall_table_32.h: $(syscall) $(systbl)
+$(kapi)/syscall_table_32.h: $(syscall) $(systbl) FORCE
$(call if_changed,systbl)
systbl_abis_syscall_table_64 := common,64
-$(kapi)/syscall_table_64.h: $(syscall) $(systbl)
+$(kapi)/syscall_table_64.h: $(syscall) $(systbl) FORCE
$(call if_changed,systbl)
systbl_abis_syscall_table_c32 := common,32
systbl_abi_syscall_table_c32 := c32
-$(kapi)/syscall_table_c32.h: $(syscall) $(systbl)
+$(kapi)/syscall_table_c32.h: $(syscall) $(systbl) FORCE
$(call if_changed,systbl)
uapisyshdr-y += unistd_32.h unistd_64.h
@@ -47,9 +47,10 @@ kapisyshdr-y += syscall_table_32.h \
syscall_table_64.h \
syscall_table_c32.h
-targets += $(uapisyshdr-y) $(kapisyshdr-y)
+uapisyshdr-y := $(addprefix $(uapi)/, $(uapisyshdr-y))
+kapisyshdr-y := $(addprefix $(kapi)/, $(kapisyshdr-y))
+targets += $(addprefix ../../../../, $(uapisyshdr-y) $(kapisyshdr-y))
PHONY += all
-all: $(addprefix $(uapi)/,$(uapisyshdr-y))
-all: $(addprefix $(kapi)/,$(kapisyshdr-y))
+all: $(uapisyshdr-y) $(kapisyshdr-y)
@:
diff --git a/arch/sparc/kernel/syscalls/syscall.tbl b/arch/sparc/kernel/syscalls/syscall.tbl
index 40d8c7cd8298..84403a99039c 100644
--- a/arch/sparc/kernel/syscalls/syscall.tbl
+++ b/arch/sparc/kernel/syscalls/syscall.tbl
@@ -487,3 +487,4 @@
439 common faccessat2 sys_faccessat2
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2
+442 common mount_setattr sys_mount_setattr
diff --git a/arch/sparc/kernel/viohs.c b/arch/sparc/kernel/viohs.c
index 7db5aabe9708..e27afd233bf5 100644
--- a/arch/sparc/kernel/viohs.c
+++ b/arch/sparc/kernel/viohs.c
@@ -428,7 +428,7 @@ static int process_dreg_info(struct vio_driver_state *vio,
struct vio_dring_register *pkt)
{
struct vio_dring_state *dr;
- int i, len;
+ int i;
viodbg(HS, "GOT DRING_REG INFO ident[%llx] "
"ndesc[%u] dsz[%u] opt[0x%x] ncookies[%u]\n",
@@ -482,9 +482,7 @@ static int process_dreg_info(struct vio_driver_state *vio,
pkt->num_descr, pkt->descr_size, pkt->options,
pkt->num_cookies);
- len = (sizeof(*pkt) +
- (dr->ncookies * sizeof(struct ldc_trans_cookie)));
- if (send_ctrl(vio, &pkt->tag, len) < 0)
+ if (send_ctrl(vio, &pkt->tag, struct_size(pkt, cookies, dr->ncookies)) < 0)
goto send_nack;
vio->dr_state |= VIO_DR_STATE_RXREG;
diff --git a/arch/sparc/lib/memset.S b/arch/sparc/lib/memset.S
index b89d42b29e34..f427f34b8b79 100644
--- a/arch/sparc/lib/memset.S
+++ b/arch/sparc/lib/memset.S
@@ -142,6 +142,7 @@ __bzero:
ZERO_LAST_BLOCKS(%o0, 0x48, %g2)
ZERO_LAST_BLOCKS(%o0, 0x08, %g2)
13:
+ EXT(12b, 13b, 21f)
be 8f
andcc %o1, 4, %g0
diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
index eb2946b1df8a..6139c5700ccc 100644
--- a/arch/sparc/mm/init_32.c
+++ b/arch/sparc/mm/init_32.c
@@ -197,6 +197,9 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
size = memblock_phys_mem_size() - memblock_reserved_size();
*pages_avail = (size >> PAGE_SHIFT) - high_pages;
+ /* Only allow low memory to be allocated via memblock allocation */
+ memblock_set_current_limit(max_low_pfn << PAGE_SHIFT);
+
return max_pfn;
}
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index a03caa5f6628..a9aa6a92c7fe 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -351,7 +351,7 @@ pgtable_t pte_alloc_one(struct mm_struct *mm)
pte_t *ptep;
struct page *page;
- if ((ptep = pte_alloc_one_kernel(mm)) == 0)
+ if (!(ptep = pte_alloc_one_kernel(mm)))
return NULL;
page = pfn_to_page(__nocache_pa((unsigned long)ptep) >> PAGE_SHIFT);
spin_lock(&mm->page_table_lock);
@@ -689,7 +689,7 @@ static void __init srmmu_early_allocate_ptable_skeleton(unsigned long start,
pgdp = pgd_offset_k(start);
p4dp = p4d_offset(pgdp, start);
pudp = pud_offset(p4dp, start);
- if (pud_none(*(pud_t *)__nocache_fix(pudp))) {
+ if (pud_none(*__nocache_fix(pudp))) {
pmdp = __srmmu_get_nocache(
SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
if (pmdp == NULL)
@@ -698,7 +698,7 @@ static void __init srmmu_early_allocate_ptable_skeleton(unsigned long start,
pud_set(__nocache_fix(pudp), pmdp);
}
pmdp = pmd_offset(__nocache_fix(pudp), start);
- if (srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
+ if (srmmu_pmd_none(*__nocache_fix(pmdp))) {
ptep = __srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
if (ptep == NULL)
early_pgtable_allocfail("pte");
@@ -810,11 +810,11 @@ static void __init srmmu_inherit_prom_mappings(unsigned long start,
p4dp = p4d_offset(pgdp, start);
pudp = pud_offset(p4dp, start);
if (what == 2) {
- *(pgd_t *)__nocache_fix(pgdp) = __pgd(probed);
+ *__nocache_fix(pgdp) = __pgd(probed);
start += PGDIR_SIZE;
continue;
}
- if (pud_none(*(pud_t *)__nocache_fix(pudp))) {
+ if (pud_none(*__nocache_fix(pudp))) {
pmdp = __srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE,
SRMMU_PMD_TABLE_SIZE);
if (pmdp == NULL)
@@ -822,13 +822,13 @@ static void __init srmmu_inherit_prom_mappings(unsigned long start,
memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
pud_set(__nocache_fix(pudp), pmdp);
}
- pmdp = pmd_offset(__nocache_fix(pgdp), start);
+ pmdp = pmd_offset(__nocache_fix(pudp), start);
if (what == 1) {
*(pmd_t *)__nocache_fix(pmdp) = __pmd(probed);
start += PMD_SIZE;
continue;
}
- if (srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
+ if (srmmu_pmd_none(*__nocache_fix(pmdp))) {
ptep = __srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
if (ptep == NULL)
early_pgtable_allocfail("pte");
@@ -836,7 +836,7 @@ static void __init srmmu_inherit_prom_mappings(unsigned long start,
pmd_set(__nocache_fix(pmdp), ptep);
}
ptep = pte_offset_kernel(__nocache_fix(pmdp), start);
- *(pte_t *)__nocache_fix(ptep) = __pte(probed);
+ *__nocache_fix(ptep) = __pte(probed);
start += PAGE_SIZE;
}
}
@@ -850,7 +850,7 @@ static void __init do_large_mapping(unsigned long vaddr, unsigned long phys_base
unsigned long big_pte;
big_pte = KERNEL_PTE(phys_base >> 4);
- *(pgd_t *)__nocache_fix(pgdp) = __pgd(big_pte);
+ *__nocache_fix(pgdp) = __pgd(big_pte);
}
/* Map sp_bank entry SP_ENTRY, starting at virtual address VBASE. */
@@ -940,7 +940,7 @@ void __init srmmu_paging_init(void)
srmmu_ctx_table_phys = (ctxd_t *)__nocache_pa(srmmu_context_table);
for (i = 0; i < num_contexts; i++)
- srmmu_ctxd_set((ctxd_t *)__nocache_fix(&srmmu_context_table[i]), srmmu_swapper_pg_dir);
+ srmmu_ctxd_set(__nocache_fix(&srmmu_context_table[i]), srmmu_swapper_pg_dir);
flush_cache_all();
srmmu_set_ctable_ptr((unsigned long)srmmu_ctx_table_phys);
diff --git a/arch/sparc/net/bpf_jit_comp_64.c b/arch/sparc/net/bpf_jit_comp_64.c
index 3364e2a00989..4b8d3c65d266 100644
--- a/arch/sparc/net/bpf_jit_comp_64.c
+++ b/arch/sparc/net/bpf_jit_comp_64.c
@@ -1366,12 +1366,18 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
break;
}
- /* STX XADD: lock *(u32 *)(dst + off) += src */
- case BPF_STX | BPF_XADD | BPF_W: {
+ case BPF_STX | BPF_ATOMIC | BPF_W: {
const u8 tmp = bpf2sparc[TMP_REG_1];
const u8 tmp2 = bpf2sparc[TMP_REG_2];
const u8 tmp3 = bpf2sparc[TMP_REG_3];
+ if (insn->imm != BPF_ADD) {
+ pr_err_once("unknown atomic op %02x\n", insn->imm);
+ return -EINVAL;
+ }
+
+ /* lock *(u32 *)(dst + off) += src */
+
if (insn->dst_reg == BPF_REG_FP)
ctx->saw_frame_pointer = true;
@@ -1390,11 +1396,16 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
break;
}
/* STX XADD: lock *(u64 *)(dst + off) += src */
- case BPF_STX | BPF_XADD | BPF_DW: {
+ case BPF_STX | BPF_ATOMIC | BPF_DW: {
const u8 tmp = bpf2sparc[TMP_REG_1];
const u8 tmp2 = bpf2sparc[TMP_REG_2];
const u8 tmp3 = bpf2sparc[TMP_REG_3];
+ if (insn->imm != BPF_ADD) {
+ pr_err_once("unknown atomic op %02x\n", insn->imm);
+ return -EINVAL;
+ }
+
if (insn->dst_reg == BPF_REG_FP)
ctx->saw_frame_pointer = true;
diff --git a/arch/sparc/oprofile/Makefile b/arch/sparc/oprofile/Makefile
deleted file mode 100644
index fe906e403d3a..000000000000
--- a/arch/sparc/oprofile/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_OPROFILE) += oprofile.o
-
-DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
- oprof.o cpu_buffer.o buffer_sync.o \
- event_buffer.o oprofile_files.o \
- oprofilefs.o oprofile_stats.o \
- timer_int.o )
-
-oprofile-y := $(DRIVER_OBJS) init.o
diff --git a/arch/sparc/oprofile/init.c b/arch/sparc/oprofile/init.c
deleted file mode 100644
index 43730c9b1c86..000000000000
--- a/arch/sparc/oprofile/init.c
+++ /dev/null
@@ -1,87 +0,0 @@
-/**
- * @file init.c
- *
- * @remark Copyright 2002 OProfile authors
- * @remark Read the file COPYING
- *
- * @author John Levon <levon@movementarian.org>
- */
-
-#include <linux/kernel.h>
-#include <linux/oprofile.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/param.h> /* for HZ */
-
-#ifdef CONFIG_SPARC64
-#include <linux/notifier.h>
-#include <linux/rcupdate.h>
-#include <linux/kdebug.h>
-#include <asm/nmi.h>
-
-static int profile_timer_exceptions_notify(struct notifier_block *self,
- unsigned long val, void *data)
-{
- struct die_args *args = data;
- int ret = NOTIFY_DONE;
-
- switch (val) {
- case DIE_NMI:
- oprofile_add_sample(args->regs, 0);
- ret = NOTIFY_STOP;
- break;
- default:
- break;
- }
- return ret;
-}
-
-static struct notifier_block profile_timer_exceptions_nb = {
- .notifier_call = profile_timer_exceptions_notify,
-};
-
-static int timer_start(void)
-{
- if (register_die_notifier(&profile_timer_exceptions_nb))
- return 1;
- nmi_adjust_hz(HZ);
- return 0;
-}
-
-
-static void timer_stop(void)
-{
- nmi_adjust_hz(1);
- unregister_die_notifier(&profile_timer_exceptions_nb);
- synchronize_rcu(); /* Allow already-started NMIs to complete. */
-}
-
-static int op_nmi_timer_init(struct oprofile_operations *ops)
-{
- if (atomic_read(&nmi_active) <= 0)
- return -ENODEV;
-
- ops->start = timer_start;
- ops->stop = timer_stop;
- ops->cpu_type = "timer";
- printk(KERN_INFO "oprofile: Using perfctr NMI timer interrupt.\n");
- return 0;
-}
-#endif
-
-int __init oprofile_arch_init(struct oprofile_operations *ops)
-{
- int ret = -ENODEV;
-
-#ifdef CONFIG_SPARC64
- ret = op_nmi_timer_init(ops);
- if (!ret)
- return ret;
-#endif
-
- return ret;
-}
-
-void oprofile_arch_exit(void)
-{
-}
diff --git a/arch/um/Kconfig b/arch/um/Kconfig
index 34d302d1a07f..c3030db3325f 100644
--- a/arch/um/Kconfig
+++ b/arch/um/Kconfig
@@ -15,7 +15,6 @@ config UML
select HAVE_DEBUG_KMEMLEAK
select HAVE_DEBUG_BUGVERBOSE
select NO_DMA
- select ARCH_HAS_SET_MEMORY
select GENERIC_IRQ_SHOW
select GENERIC_CPU_DEVICES
select HAVE_GCC_PLUGINS
diff --git a/arch/um/drivers/Kconfig b/arch/um/drivers/Kconfig
index 2e7b8e0e7194..03ba34b61115 100644
--- a/arch/um/drivers/Kconfig
+++ b/arch/um/drivers/Kconfig
@@ -323,7 +323,7 @@ config UML_NET_SLIRP
frames. In general, slirp allows the UML the same IP connectivity
to the outside world that the host user is permitted, and unlike
other transports, SLiRP works without the need of root level
- privleges, setuid binaries, or SLIP devices on the host. This
+ privileges, setuid binaries, or SLIP devices on the host. This
also means not every type of connection is possible, but most
situations can be accommodated with carefully crafted slirp
commands that can be passed along as part of the network device's
@@ -346,3 +346,14 @@ config VIRTIO_UML
help
This driver provides support for virtio based paravirtual device
drivers over vhost-user sockets.
+
+config UML_RTC
+ bool "UML RTC driver"
+ depends on RTC_CLASS
+ # there's no use in this if PM_SLEEP isn't enabled ...
+ depends on PM_SLEEP
+ help
+ When PM_SLEEP is configured, it may be desirable to wake up using
+ rtcwake, especially in time-travel mode. This driver enables that
+ by providing a fake RTC clock that causes a wakeup at the right
+ time.
diff --git a/arch/um/drivers/Makefile b/arch/um/drivers/Makefile
index 2a249f619467..dcc64a02f81f 100644
--- a/arch/um/drivers/Makefile
+++ b/arch/um/drivers/Makefile
@@ -17,6 +17,7 @@ hostaudio-objs := hostaudio_kern.o
ubd-objs := ubd_kern.o ubd_user.o
port-objs := port_kern.o port_user.o
harddog-objs := harddog_kern.o harddog_user.o
+rtc-objs := rtc_kern.o rtc_user.o
LDFLAGS_pcap.o = $(shell $(CC) $(KBUILD_CFLAGS) -print-file-name=libpcap.a)
@@ -62,6 +63,7 @@ obj-$(CONFIG_UML_WATCHDOG) += harddog.o
obj-$(CONFIG_BLK_DEV_COW_COMMON) += cow_user.o
obj-$(CONFIG_UML_RANDOM) += random.o
obj-$(CONFIG_VIRTIO_UML) += virtio_uml.o
+obj-$(CONFIG_UML_RTC) += rtc.o
# pcap_user.o must be added explicitly.
USER_OBJS := fd.o null.o pty.o tty.o xterm.o slip_common.o pcap_user.o vde_user.o vector_user.o
diff --git a/arch/um/drivers/rtc.h b/arch/um/drivers/rtc.h
new file mode 100644
index 000000000000..95e41c7d35c4
--- /dev/null
+++ b/arch/um/drivers/rtc.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020 Intel Corporation
+ * Author: Johannes Berg <johannes@sipsolutions.net>
+ */
+#ifndef __UM_RTC_H__
+#define __UM_RTC_H__
+
+int uml_rtc_start(bool timetravel);
+int uml_rtc_enable_alarm(unsigned long long delta_seconds);
+void uml_rtc_disable_alarm(void);
+void uml_rtc_stop(bool timetravel);
+void uml_rtc_send_timetravel_alarm(void);
+
+#endif /* __UM_RTC_H__ */
diff --git a/arch/um/drivers/rtc_kern.c b/arch/um/drivers/rtc_kern.c
new file mode 100644
index 000000000000..97ceb205cfe6
--- /dev/null
+++ b/arch/um/drivers/rtc_kern.c
@@ -0,0 +1,211 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Intel Corporation
+ * Author: Johannes Berg <johannes@sipsolutions.net>
+ */
+#include <linux/platform_device.h>
+#include <linux/time-internal.h>
+#include <linux/suspend.h>
+#include <linux/err.h>
+#include <linux/rtc.h>
+#include <kern_util.h>
+#include <irq_kern.h>
+#include <os.h>
+#include "rtc.h"
+
+static time64_t uml_rtc_alarm_time;
+static bool uml_rtc_alarm_enabled;
+static struct rtc_device *uml_rtc;
+static int uml_rtc_irq_fd, uml_rtc_irq;
+
+#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
+
+static void uml_rtc_time_travel_alarm(struct time_travel_event *ev)
+{
+ uml_rtc_send_timetravel_alarm();
+}
+
+static struct time_travel_event uml_rtc_alarm_event = {
+ .fn = uml_rtc_time_travel_alarm,
+};
+#endif
+
+static int uml_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct timespec64 ts;
+
+ /* Use this to get correct time in time-travel mode */
+ read_persistent_clock64(&ts);
+ rtc_time64_to_tm(timespec64_to_ktime(ts) / NSEC_PER_SEC, tm);
+
+ return 0;
+}
+
+static int uml_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ rtc_time64_to_tm(uml_rtc_alarm_time, &alrm->time);
+ alrm->enabled = uml_rtc_alarm_enabled;
+
+ return 0;
+}
+
+static int uml_rtc_alarm_irq_enable(struct device *dev, unsigned int enable)
+{
+ unsigned long long secs;
+
+ if (!enable && !uml_rtc_alarm_enabled)
+ return 0;
+
+ uml_rtc_alarm_enabled = enable;
+
+ secs = uml_rtc_alarm_time - ktime_get_real_seconds();
+
+ if (time_travel_mode == TT_MODE_OFF) {
+ if (!enable) {
+ uml_rtc_disable_alarm();
+ return 0;
+ }
+
+ /* enable or update */
+ return uml_rtc_enable_alarm(secs);
+ } else {
+ time_travel_del_event(&uml_rtc_alarm_event);
+
+ if (enable)
+ time_travel_add_event_rel(&uml_rtc_alarm_event,
+ secs * NSEC_PER_SEC);
+ }
+
+ return 0;
+}
+
+static int uml_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ uml_rtc_alarm_irq_enable(dev, 0);
+ uml_rtc_alarm_time = rtc_tm_to_time64(&alrm->time);
+ uml_rtc_alarm_irq_enable(dev, alrm->enabled);
+
+ return 0;
+}
+
+static const struct rtc_class_ops uml_rtc_ops = {
+ .read_time = uml_rtc_read_time,
+ .read_alarm = uml_rtc_read_alarm,
+ .alarm_irq_enable = uml_rtc_alarm_irq_enable,
+ .set_alarm = uml_rtc_set_alarm,
+};
+
+static irqreturn_t uml_rtc_interrupt(int irq, void *data)
+{
+ unsigned long long c = 0;
+
+ /* alarm triggered, it's now off */
+ uml_rtc_alarm_enabled = false;
+
+ os_read_file(uml_rtc_irq_fd, &c, sizeof(c));
+ WARN_ON(c == 0);
+
+ pm_system_wakeup();
+ rtc_update_irq(uml_rtc, 1, RTC_IRQF | RTC_AF);
+
+ return IRQ_HANDLED;
+}
+
+static int uml_rtc_setup(void)
+{
+ int err;
+
+ err = uml_rtc_start(time_travel_mode != TT_MODE_OFF);
+ if (WARN(err < 0, "err = %d\n", err))
+ return err;
+
+ uml_rtc_irq_fd = err;
+
+ err = um_request_irq(UM_IRQ_ALLOC, uml_rtc_irq_fd, IRQ_READ,
+ uml_rtc_interrupt, 0, "rtc", NULL);
+ if (err < 0) {
+ uml_rtc_stop(time_travel_mode != TT_MODE_OFF);
+ return err;
+ }
+
+ irq_set_irq_wake(err, 1);
+
+ uml_rtc_irq = err;
+ return 0;
+}
+
+static void uml_rtc_cleanup(void)
+{
+ um_free_irq(uml_rtc_irq, NULL);
+ uml_rtc_stop(time_travel_mode != TT_MODE_OFF);
+}
+
+static int uml_rtc_probe(struct platform_device *pdev)
+{
+ int err;
+
+ err = uml_rtc_setup();
+ if (err)
+ return err;
+
+ uml_rtc = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(uml_rtc)) {
+ err = PTR_ERR(uml_rtc);
+ goto cleanup;
+ }
+
+ uml_rtc->ops = &uml_rtc_ops;
+
+ device_init_wakeup(&pdev->dev, 1);
+
+ err = devm_rtc_register_device(uml_rtc);
+ if (err)
+ goto cleanup;
+
+ return 0;
+cleanup:
+ uml_rtc_cleanup();
+ return err;
+}
+
+static int uml_rtc_remove(struct platform_device *pdev)
+{
+ device_init_wakeup(&pdev->dev, 0);
+ uml_rtc_cleanup();
+ return 0;
+}
+
+static struct platform_driver uml_rtc_driver = {
+ .probe = uml_rtc_probe,
+ .remove = uml_rtc_remove,
+ .driver = {
+ .name = "uml-rtc",
+ },
+};
+
+static int __init uml_rtc_init(void)
+{
+ struct platform_device *pdev;
+ int err;
+
+ err = platform_driver_register(&uml_rtc_driver);
+ if (err)
+ return err;
+
+ pdev = platform_device_alloc("uml-rtc", 0);
+ if (!pdev) {
+ err = -ENOMEM;
+ goto unregister;
+ }
+
+ err = platform_device_add(pdev);
+ if (err)
+ goto unregister;
+ return 0;
+
+unregister:
+ platform_device_put(pdev);
+ platform_driver_unregister(&uml_rtc_driver);
+ return err;
+}
+device_initcall(uml_rtc_init);
diff --git a/arch/um/drivers/rtc_user.c b/arch/um/drivers/rtc_user.c
new file mode 100644
index 000000000000..4016bc1d577e
--- /dev/null
+++ b/arch/um/drivers/rtc_user.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Intel Corporation
+ * Author: Johannes Berg <johannes@sipsolutions.net>
+ */
+#include <os.h>
+#include <errno.h>
+#include <sched.h>
+#include <unistd.h>
+#include <kern_util.h>
+#include <sys/select.h>
+#include <stdio.h>
+#include <sys/timerfd.h>
+#include "rtc.h"
+
+static int uml_rtc_irq_fds[2];
+
+void uml_rtc_send_timetravel_alarm(void)
+{
+ unsigned long long c = 1;
+
+ CATCH_EINTR(write(uml_rtc_irq_fds[1], &c, sizeof(c)));
+}
+
+int uml_rtc_start(bool timetravel)
+{
+ int err;
+
+ if (timetravel) {
+ int err = os_pipe(uml_rtc_irq_fds, 1, 1);
+ if (err)
+ goto fail;
+ } else {
+ uml_rtc_irq_fds[0] = timerfd_create(CLOCK_REALTIME, TFD_CLOEXEC);
+ if (uml_rtc_irq_fds[0] < 0) {
+ err = -errno;
+ goto fail;
+ }
+
+ /* apparently timerfd won't send SIGIO, use workaround */
+ sigio_broken(uml_rtc_irq_fds[0]);
+ err = add_sigio_fd(uml_rtc_irq_fds[0]);
+ if (err < 0) {
+ close(uml_rtc_irq_fds[0]);
+ goto fail;
+ }
+ }
+
+ return uml_rtc_irq_fds[0];
+fail:
+ uml_rtc_stop(timetravel);
+ return err;
+}
+
+int uml_rtc_enable_alarm(unsigned long long delta_seconds)
+{
+ struct itimerspec it = {
+ .it_value = {
+ .tv_sec = delta_seconds,
+ },
+ };
+
+ if (timerfd_settime(uml_rtc_irq_fds[0], 0, &it, NULL))
+ return -errno;
+ return 0;
+}
+
+void uml_rtc_disable_alarm(void)
+{
+ uml_rtc_enable_alarm(0);
+}
+
+void uml_rtc_stop(bool timetravel)
+{
+ if (timetravel)
+ os_close_file(uml_rtc_irq_fds[1]);
+ else
+ ignore_sigio_fd(uml_rtc_irq_fds[0]);
+ os_close_file(uml_rtc_irq_fds[0]);
+}
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index 13b1fe694b90..8e0b43cf089f 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -375,11 +375,11 @@ break_loop:
file = NULL;
backing_file = strsep(&str, ",:");
- if (*backing_file == '\0')
+ if (backing_file && *backing_file == '\0')
backing_file = NULL;
serial = strsep(&str, ",:");
- if (*serial == '\0')
+ if (serial && *serial == '\0')
serial = NULL;
if (backing_file && ubd_dev->no_cow) {
@@ -1241,7 +1241,7 @@ static int __init ubd_driver_init(void){
/* Letting ubd=sync be like using ubd#s= instead of ubd#= is
* enough. So use anyway the io thread. */
}
- stack = alloc_stack(0);
+ stack = alloc_stack(0, 0);
io_pid = start_io_thread(stack + PAGE_SIZE - sizeof(void *),
&thread_fd);
if(io_pid < 0){
diff --git a/arch/um/drivers/virtio_uml.c b/arch/um/drivers/virtio_uml.c
index 27e92d3881ff..91ddf74ca888 100644
--- a/arch/um/drivers/virtio_uml.c
+++ b/arch/um/drivers/virtio_uml.c
@@ -55,16 +55,16 @@ struct virtio_uml_device {
u64 protocol_features;
u8 status;
u8 registered:1;
+ u8 suspended:1;
+
+ u8 config_changed_irq:1;
+ uint64_t vq_irq_vq_map;
};
struct virtio_uml_vq_info {
int kick_fd, call_fd;
char name[32];
-#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
- struct virtqueue *vq;
- vq_callback_t *callback;
- struct time_travel_event defer;
-#endif
+ bool suspended;
};
extern unsigned long long physmem_size, highmem;
@@ -97,6 +97,9 @@ static int full_read(int fd, void *buf, int len, bool abortable)
{
int rc;
+ if (!len)
+ return 0;
+
do {
rc = os_read_file(fd, buf, len);
if (rc > 0) {
@@ -347,9 +350,9 @@ static void vhost_user_reply(struct virtio_uml_device *vu_dev,
rc, size);
}
-static irqreturn_t vu_req_interrupt(int irq, void *data)
+static irqreturn_t vu_req_read_message(struct virtio_uml_device *vu_dev,
+ struct time_travel_event *ev)
{
- struct virtio_uml_device *vu_dev = data;
struct virtqueue *vq;
int response = 1;
struct {
@@ -367,14 +370,14 @@ static irqreturn_t vu_req_interrupt(int irq, void *data)
switch (msg.msg.header.request) {
case VHOST_USER_SLAVE_CONFIG_CHANGE_MSG:
- virtio_config_changed(&vu_dev->vdev);
+ vu_dev->config_changed_irq = true;
response = 0;
break;
case VHOST_USER_SLAVE_VRING_CALL:
virtio_device_for_each_vq((&vu_dev->vdev), vq) {
if (vq->index == msg.msg.payload.vring_state.index) {
response = 0;
- vring_interrupt(0 /* ignored */, vq);
+ vu_dev->vq_irq_vq_map |= BIT_ULL(vq->index);
break;
}
}
@@ -388,12 +391,45 @@ static irqreturn_t vu_req_interrupt(int irq, void *data)
msg.msg.header.request);
}
+ if (ev && !vu_dev->suspended)
+ time_travel_add_irq_event(ev);
+
if (msg.msg.header.flags & VHOST_USER_FLAG_NEED_REPLY)
vhost_user_reply(vu_dev, &msg.msg, response);
return IRQ_HANDLED;
}
+static irqreturn_t vu_req_interrupt(int irq, void *data)
+{
+ struct virtio_uml_device *vu_dev = data;
+ irqreturn_t ret = IRQ_HANDLED;
+
+ if (!um_irq_timetravel_handler_used())
+ ret = vu_req_read_message(vu_dev, NULL);
+
+ if (vu_dev->vq_irq_vq_map) {
+ struct virtqueue *vq;
+
+ virtio_device_for_each_vq((&vu_dev->vdev), vq) {
+ if (vu_dev->vq_irq_vq_map & BIT_ULL(vq->index))
+ vring_interrupt(0 /* ignored */, vq);
+ }
+ vu_dev->vq_irq_vq_map = 0;
+ } else if (vu_dev->config_changed_irq) {
+ virtio_config_changed(&vu_dev->vdev);
+ vu_dev->config_changed_irq = false;
+ }
+
+ return ret;
+}
+
+static void vu_req_interrupt_comm_handler(int irq, int fd, void *data,
+ struct time_travel_event *ev)
+{
+ vu_req_read_message(data, ev);
+}
+
static int vhost_user_init_slave_req(struct virtio_uml_device *vu_dev)
{
int rc, req_fds[2];
@@ -404,9 +440,10 @@ static int vhost_user_init_slave_req(struct virtio_uml_device *vu_dev)
return rc;
vu_dev->req_fd = req_fds[0];
- rc = um_request_irq(UM_IRQ_ALLOC, vu_dev->req_fd, IRQ_READ,
- vu_req_interrupt, IRQF_SHARED,
- vu_dev->pdev->name, vu_dev);
+ rc = um_request_irq_tt(UM_IRQ_ALLOC, vu_dev->req_fd, IRQ_READ,
+ vu_req_interrupt, IRQF_SHARED,
+ vu_dev->pdev->name, vu_dev,
+ vu_req_interrupt_comm_handler);
if (rc < 0)
goto err_close;
@@ -722,6 +759,9 @@ static bool vu_notify(struct virtqueue *vq)
const uint64_t n = 1;
int rc;
+ if (info->suspended)
+ return true;
+
time_travel_propagate_time();
if (info->kick_fd < 0) {
@@ -875,23 +915,6 @@ out:
return rc;
}
-#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
-static void vu_defer_irq_handle(struct time_travel_event *d)
-{
- struct virtio_uml_vq_info *info;
-
- info = container_of(d, struct virtio_uml_vq_info, defer);
- info->callback(info->vq);
-}
-
-static void vu_defer_irq_callback(struct virtqueue *vq)
-{
- struct virtio_uml_vq_info *info = vq->priv;
-
- time_travel_add_irq_event(&info->defer);
-}
-#endif
-
static struct virtqueue *vu_setup_vq(struct virtio_device *vdev,
unsigned index, vq_callback_t *callback,
const char *name, bool ctx)
@@ -911,19 +934,6 @@ static struct virtqueue *vu_setup_vq(struct virtio_device *vdev,
snprintf(info->name, sizeof(info->name), "%s.%d-%s", pdev->name,
pdev->id, name);
-#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
- /*
- * When we get an interrupt, we must bounce it through the simulation
- * calendar (the simtime device), except for the simtime device itself
- * since that's part of the simulation control.
- */
- if (time_travel_mode == TT_MODE_EXTERNAL && callback) {
- info->callback = callback;
- callback = vu_defer_irq_callback;
- time_travel_set_event_fn(&info->defer, vu_defer_irq_handle);
- }
-#endif
-
vq = vring_create_virtqueue(index, num, PAGE_SIZE, vdev, true, true,
ctx, vu_notify, callback, info->name);
if (!vq) {
@@ -932,9 +942,6 @@ static struct virtqueue *vu_setup_vq(struct virtio_device *vdev,
}
vq->priv = info;
num = virtqueue_get_vring_size(vq);
-#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
- info->vq = vq;
-#endif
if (vu_dev->protocol_features &
BIT_ULL(VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS)) {
@@ -993,6 +1000,10 @@ static int vu_find_vqs(struct virtio_device *vdev, unsigned nvqs,
int i, queue_idx = 0, rc;
struct virtqueue *vq;
+ /* not supported for now */
+ if (WARN_ON(nvqs > 64))
+ return -EINVAL;
+
rc = vhost_user_set_mem_table(vu_dev);
if (rc)
return rc;
@@ -1084,6 +1095,7 @@ static void virtio_uml_release_dev(struct device *d)
}
os_close_file(vu_dev->sock);
+ kfree(vu_dev);
}
/* Platform device */
@@ -1097,7 +1109,7 @@ static int virtio_uml_probe(struct platform_device *pdev)
if (!pdata)
return -EINVAL;
- vu_dev = devm_kzalloc(&pdev->dev, sizeof(*vu_dev), GFP_KERNEL);
+ vu_dev = kzalloc(sizeof(*vu_dev), GFP_KERNEL);
if (!vu_dev)
return -ENOMEM;
@@ -1124,6 +1136,8 @@ static int virtio_uml_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, vu_dev);
+ device_set_wakeup_capable(&vu_dev->vdev.dev, true);
+
rc = register_virtio_device(&vu_dev->vdev);
if (rc)
put_device(&vu_dev->vdev.dev);
@@ -1285,6 +1299,46 @@ static const struct of_device_id virtio_uml_match[] = {
};
MODULE_DEVICE_TABLE(of, virtio_uml_match);
+static int virtio_uml_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct virtio_uml_device *vu_dev = platform_get_drvdata(pdev);
+ struct virtqueue *vq;
+
+ virtio_device_for_each_vq((&vu_dev->vdev), vq) {
+ struct virtio_uml_vq_info *info = vq->priv;
+
+ info->suspended = true;
+ vhost_user_set_vring_enable(vu_dev, vq->index, false);
+ }
+
+ if (!device_may_wakeup(&vu_dev->vdev.dev)) {
+ vu_dev->suspended = true;
+ return 0;
+ }
+
+ return irq_set_irq_wake(vu_dev->irq, 1);
+}
+
+static int virtio_uml_resume(struct platform_device *pdev)
+{
+ struct virtio_uml_device *vu_dev = platform_get_drvdata(pdev);
+ struct virtqueue *vq;
+
+ virtio_device_for_each_vq((&vu_dev->vdev), vq) {
+ struct virtio_uml_vq_info *info = vq->priv;
+
+ info->suspended = false;
+ vhost_user_set_vring_enable(vu_dev, vq->index, true);
+ }
+
+ vu_dev->suspended = false;
+
+ if (!device_may_wakeup(&vu_dev->vdev.dev))
+ return 0;
+
+ return irq_set_irq_wake(vu_dev->irq, 0);
+}
+
static struct platform_driver virtio_uml_driver = {
.probe = virtio_uml_probe,
.remove = virtio_uml_remove,
@@ -1292,6 +1346,8 @@ static struct platform_driver virtio_uml_driver = {
.name = "virtio-uml",
.of_match_table = virtio_uml_match,
},
+ .suspend = virtio_uml_suspend,
+ .resume = virtio_uml_resume,
};
static int __init virtio_uml_init(void)
diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild
index 1c63b260ecc4..d7492e5a1bbb 100644
--- a/arch/um/include/asm/Kbuild
+++ b/arch/um/include/asm/Kbuild
@@ -14,15 +14,16 @@ generic-y += irq_regs.h
generic-y += irq_work.h
generic-y += kdebug.h
generic-y += mcs_spinlock.h
-generic-y += mm-arch-hooks.h
generic-y += mmiowb.h
generic-y += module.lds.h
generic-y += param.h
generic-y += pci.h
generic-y += percpu.h
generic-y += preempt.h
+generic-y += softirq_stack.h
generic-y += switch_to.h
generic-y += topology.h
generic-y += trace_clock.h
generic-y += word-at-a-time.h
generic-y += kprobes.h
+generic-y += mm_hooks.h
diff --git a/arch/um/include/asm/io.h b/arch/um/include/asm/io.h
index 96f77b5232aa..6ce18d343997 100644
--- a/arch/um/include/asm/io.h
+++ b/arch/um/include/asm/io.h
@@ -1,11 +1,12 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_UM_IO_H
#define _ASM_UM_IO_H
+#include <linux/types.h>
#define ioremap ioremap
static inline void __iomem *ioremap(phys_addr_t offset, size_t size)
{
- return (void __iomem *)(unsigned long)offset;
+ return NULL;
}
#define iounmap iounmap
diff --git a/arch/um/include/asm/irq.h b/arch/um/include/asm/irq.h
index 547bff7b3a89..3f5d3e8228fc 100644
--- a/arch/um/include/asm/irq.h
+++ b/arch/um/include/asm/irq.h
@@ -33,4 +33,5 @@
#define NR_IRQS 64
+#include <asm-generic/irq.h>
#endif
diff --git a/arch/um/include/asm/mmu_context.h b/arch/um/include/asm/mmu_context.h
index f8a100770691..68e2eb9cfb47 100644
--- a/arch/um/include/asm/mmu_context.h
+++ b/arch/um/include/asm/mmu_context.h
@@ -10,33 +10,9 @@
#include <linux/mm_types.h>
#include <linux/mmap_lock.h>
+#include <asm/mm_hooks.h>
#include <asm/mmu.h>
-extern void uml_setup_stubs(struct mm_struct *mm);
-/*
- * Needed since we do not use the asm-generic/mm_hooks.h:
- */
-static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
-{
- uml_setup_stubs(mm);
- return 0;
-}
-extern void arch_exit_mmap(struct mm_struct *mm);
-static inline void arch_unmap(struct mm_struct *mm,
- unsigned long start, unsigned long end)
-{
-}
-static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
- bool write, bool execute, bool foreign)
-{
- /* by default, allow everything */
- return true;
-}
-
-/*
- * end asm-generic/mm_hooks.h functions
- */
-
extern void force_flush_all(void);
#define activate_mm activate_mm
@@ -47,9 +23,6 @@ static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
* when the new ->mm is used for the first time.
*/
__switch_mm(&new->context.id);
- mmap_write_lock_nested(new, SINGLE_DEPTH_NESTING);
- uml_setup_stubs(new);
- mmap_write_unlock(new);
}
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
diff --git a/arch/um/include/asm/pgtable.h b/arch/um/include/asm/pgtable.h
index 39376bb63abf..def376194dce 100644
--- a/arch/um/include/asm/pgtable.h
+++ b/arch/um/include/asm/pgtable.h
@@ -55,15 +55,12 @@ extern unsigned long end_iomem;
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
#define __PAGE_KERNEL_EXEC \
(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
-#define __PAGE_KERNEL_RO \
- (_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED)
#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
-#define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO)
/*
* The i386 can't do page protection for execute, and considers that the same
diff --git a/arch/um/include/asm/set_memory.h b/arch/um/include/asm/set_memory.h
deleted file mode 100644
index 24266c63720d..000000000000
--- a/arch/um/include/asm/set_memory.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/set_memory.h>
diff --git a/arch/um/include/linux/time-internal.h b/arch/um/include/linux/time-internal.h
index 68e45e950137..759956ab0108 100644
--- a/arch/um/include/linux/time-internal.h
+++ b/arch/um/include/linux/time-internal.h
@@ -7,6 +7,7 @@
#ifndef __TIMER_INTERNAL_H__
#define __TIMER_INTERNAL_H__
#include <linux/list.h>
+#include <asm/bug.h>
#define TIMER_MULTIPLIER 256
#define TIMER_MIN_DELTA 500
@@ -54,6 +55,9 @@ static inline void time_travel_wait_readable(int fd)
}
void time_travel_add_irq_event(struct time_travel_event *e);
+void time_travel_add_event_rel(struct time_travel_event *e,
+ unsigned long long delay_ns);
+bool time_travel_del_event(struct time_travel_event *e);
#else
struct time_travel_event {
};
@@ -74,6 +78,19 @@ static inline void time_travel_propagate_time(void)
static inline void time_travel_wait_readable(int fd)
{
}
+
+static inline void time_travel_add_irq_event(struct time_travel_event *e)
+{
+ WARN_ON(1);
+}
+
+/*
+ * not inlines so the data structure need not exist,
+ * cause linker failures
+ */
+extern void time_travel_not_configured(void);
+#define time_travel_add_event_rel(...) time_travel_not_configured()
+#define time_travel_del_event(...) time_travel_not_configured()
#endif /* CONFIG_UML_TIME_TRAVEL_SUPPORT */
/*
diff --git a/arch/um/include/shared/as-layout.h b/arch/um/include/shared/as-layout.h
index 5f286ef2721b..9a0bd648d872 100644
--- a/arch/um/include/shared/as-layout.h
+++ b/arch/um/include/shared/as-layout.h
@@ -20,18 +20,10 @@
* 'UL' and other type specifiers unilaterally. We
* use the following macros to deal with this.
*/
-
-#ifdef __ASSEMBLY__
-#define _UML_AC(X, Y) (Y)
-#else
-#define __UML_AC(X, Y) (X(Y))
-#define _UML_AC(X, Y) __UML_AC(X, Y)
-#endif
-
-#define STUB_START _UML_AC(, 0x100000)
-#define STUB_CODE _UML_AC((unsigned long), STUB_START)
-#define STUB_DATA _UML_AC((unsigned long), STUB_CODE + UM_KERN_PAGE_SIZE)
-#define STUB_END _UML_AC((unsigned long), STUB_DATA + UM_KERN_PAGE_SIZE)
+#define STUB_START stub_start
+#define STUB_CODE STUB_START
+#define STUB_DATA (STUB_CODE + UM_KERN_PAGE_SIZE)
+#define STUB_END (STUB_DATA + UM_KERN_PAGE_SIZE)
#ifndef __ASSEMBLY__
@@ -54,6 +46,7 @@ extern unsigned long long highmem;
extern unsigned long brk_start;
extern unsigned long host_task_size;
+extern unsigned long stub_start;
extern int linux_main(int argc, char **argv);
extern void uml_finishsetup(void);
diff --git a/arch/um/include/shared/common-offsets.h b/arch/um/include/shared/common-offsets.h
index 16a51a8c800f..edc90ab73734 100644
--- a/arch/um/include/shared/common-offsets.h
+++ b/arch/um/include/shared/common-offsets.h
@@ -1,5 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* for use by sys-$SUBARCH/kernel-offsets.c */
+#include <stub-data.h>
DEFINE(KERNEL_MADV_REMOVE, MADV_REMOVE);
@@ -43,3 +44,8 @@ DEFINE(UML_CONFIG_64BIT, CONFIG_64BIT);
#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
DEFINE(UML_CONFIG_UML_TIME_TRAVEL_SUPPORT, CONFIG_UML_TIME_TRAVEL_SUPPORT);
#endif
+
+/* for stub */
+DEFINE(UML_STUB_FIELD_OFFSET, offsetof(struct stub_data, offset));
+DEFINE(UML_STUB_FIELD_CHILD_ERR, offsetof(struct stub_data, child_err));
+DEFINE(UML_STUB_FIELD_FD, offsetof(struct stub_data, fd));
diff --git a/arch/um/include/shared/irq_kern.h b/arch/um/include/shared/irq_kern.h
index 7807de593bda..f2dc817abb7c 100644
--- a/arch/um/include/shared/irq_kern.h
+++ b/arch/um/include/shared/irq_kern.h
@@ -7,6 +7,7 @@
#define __IRQ_KERN_H__
#include <linux/interrupt.h>
+#include <linux/time-internal.h>
#include <asm/ptrace.h>
#include "irq_user.h"
@@ -15,5 +16,64 @@
int um_request_irq(int irq, int fd, enum um_irq_type type,
irq_handler_t handler, unsigned long irqflags,
const char *devname, void *dev_id);
+
+#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
+/**
+ * um_request_irq_tt - request an IRQ with timetravel handler
+ *
+ * @irq: the IRQ number, or %UM_IRQ_ALLOC
+ * @fd: The file descriptor to request an IRQ for
+ * @type: read or write
+ * @handler: the (generic style) IRQ handler
+ * @irqflags: Linux IRQ flags
+ * @devname: name for this to show
+ * @dev_id: data pointer to pass to the IRQ handler
+ * @timetravel_handler: the timetravel interrupt handler, invoked with the IRQ
+ * number, fd, dev_id and time-travel event pointer.
+ *
+ * Returns: The interrupt number assigned or a negative error.
+ *
+ * Note that the timetravel handler is invoked only if the time_travel_mode is
+ * %TT_MODE_EXTERNAL, and then it is invoked even while the system is suspended!
+ * This function must call time_travel_add_irq_event() for the event passed with
+ * an appropriate delay, before sending an ACK on the socket it was invoked for.
+ *
+ * If this was called while the system is suspended, then adding the event will
+ * cause the system to resume.
+ *
+ * Since this function will almost certainly have to handle the FD's condition,
+ * a read will consume the message, and after that it is up to the code using
+ * it to pass such a message to the @handler in whichever way it can.
+ *
+ * If time_travel_mode is not %TT_MODE_EXTERNAL the @timetravel_handler will
+ * not be invoked at all and the @handler must handle the FD becoming
+ * readable (or writable) instead. Use um_irq_timetravel_handler_used() to
+ * distinguish these cases.
+ *
+ * See virtio_uml.c for an example.
+ */
+int um_request_irq_tt(int irq, int fd, enum um_irq_type type,
+ irq_handler_t handler, unsigned long irqflags,
+ const char *devname, void *dev_id,
+ void (*timetravel_handler)(int, int, void *,
+ struct time_travel_event *));
+#else
+static inline
+int um_request_irq_tt(int irq, int fd, enum um_irq_type type,
+ irq_handler_t handler, unsigned long irqflags,
+ const char *devname, void *dev_id,
+ void (*timetravel_handler)(int, int, void *,
+ struct time_travel_event *))
+{
+ return um_request_irq(irq, fd, type, handler, irqflags,
+ devname, dev_id);
+}
+#endif
+
+static inline bool um_irq_timetravel_handler_used(void)
+{
+ return time_travel_mode == TT_MODE_EXTERNAL;
+}
+
void um_free_irq(int irq, void *dev_id);
#endif
diff --git a/arch/um/include/shared/kern_util.h b/arch/um/include/shared/kern_util.h
index d8c279e3312f..2888ec812f6e 100644
--- a/arch/um/include/shared/kern_util.h
+++ b/arch/um/include/shared/kern_util.h
@@ -19,7 +19,7 @@ extern int kmalloc_ok;
#define UML_ROUND_UP(addr) \
((((unsigned long) addr) + PAGE_SIZE - 1) & PAGE_MASK)
-extern unsigned long alloc_stack(int atomic);
+extern unsigned long alloc_stack(int order, int atomic);
extern void free_stack(unsigned long stack, int order);
struct pt_regs;
diff --git a/arch/um/include/shared/skas/mm_id.h b/arch/um/include/shared/skas/mm_id.h
index 4337b4ced095..e82e203f5f41 100644
--- a/arch/um/include/shared/skas/mm_id.h
+++ b/arch/um/include/shared/skas/mm_id.h
@@ -12,6 +12,7 @@ struct mm_id {
int pid;
} u;
unsigned long stack;
+ int kill;
};
#endif
diff --git a/arch/um/include/shared/skas/stub-data.h b/arch/um/include/shared/skas/stub-data.h
index 6b01d97a9386..5e3ade3fb38b 100644
--- a/arch/um/include/shared/skas/stub-data.h
+++ b/arch/um/include/shared/skas/stub-data.h
@@ -11,7 +11,7 @@
struct stub_data {
unsigned long offset;
int fd;
- long err;
+ long parent_err, child_err;
};
#endif
diff --git a/arch/um/kernel/exec.c b/arch/um/kernel/exec.c
index e8fd5d540b05..4d8498100341 100644
--- a/arch/um/kernel/exec.c
+++ b/arch/um/kernel/exec.c
@@ -26,9 +26,7 @@ void flush_thread(void)
arch_flush_thread(&current->thread.arch);
- ret = unmap(&current->mm->context.id, 0, STUB_START, 0, &data);
- ret = ret || unmap(&current->mm->context.id, STUB_END,
- host_task_size - STUB_END, 1, &data);
+ ret = unmap(&current->mm->context.id, 0, TASK_SIZE, 1, &data);
if (ret) {
printk(KERN_ERR "flush_thread - clearing address space failed, "
"err = %d\n", ret);
diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c
index 3741d2380060..82af5191e73d 100644
--- a/arch/um/kernel/irq.c
+++ b/arch/um/kernel/irq.c
@@ -20,7 +20,7 @@
#include <os.h>
#include <irq_user.h>
#include <irq_kern.h>
-#include <as-layout.h>
+#include <linux/time-internal.h>
extern void free_irqs(void);
@@ -38,6 +38,12 @@ struct irq_reg {
bool active;
bool pending;
bool wakeup;
+#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
+ bool pending_on_resume;
+ void (*timetravel_handler)(int, int, void *,
+ struct time_travel_event *);
+ struct time_travel_event event;
+#endif
};
struct irq_entry {
@@ -51,6 +57,7 @@ struct irq_entry {
static DEFINE_SPINLOCK(irq_lock);
static LIST_HEAD(active_fds);
static DECLARE_BITMAP(irqs_allocated, NR_IRQS);
+static bool irqs_suspended;
static void irq_io_loop(struct irq_reg *irq, struct uml_pt_regs *regs)
{
@@ -74,9 +81,65 @@ static void irq_io_loop(struct irq_reg *irq, struct uml_pt_regs *regs)
}
}
-void sigio_handler_suspend(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
+#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
+static void irq_event_handler(struct time_travel_event *ev)
{
- /* nothing */
+ struct irq_reg *reg = container_of(ev, struct irq_reg, event);
+
+ /* do nothing if suspended - just to cause a wakeup */
+ if (irqs_suspended)
+ return;
+
+ generic_handle_irq(reg->irq);
+}
+
+static bool irq_do_timetravel_handler(struct irq_entry *entry,
+ enum um_irq_type t)
+{
+ struct irq_reg *reg = &entry->reg[t];
+
+ if (!reg->timetravel_handler)
+ return false;
+
+ /* prevent nesting - we'll get it again later when we SIGIO ourselves */
+ if (reg->pending_on_resume)
+ return true;
+
+ reg->timetravel_handler(reg->irq, entry->fd, reg->id, &reg->event);
+
+ if (!reg->event.pending)
+ return false;
+
+ if (irqs_suspended)
+ reg->pending_on_resume = true;
+ return true;
+}
+#else
+static bool irq_do_timetravel_handler(struct irq_entry *entry,
+ enum um_irq_type t)
+{
+ return false;
+}
+#endif
+
+static void sigio_reg_handler(int idx, struct irq_entry *entry, enum um_irq_type t,
+ struct uml_pt_regs *regs)
+{
+ struct irq_reg *reg = &entry->reg[t];
+
+ if (!reg->events)
+ return;
+
+ if (os_epoll_triggered(idx, reg->events) <= 0)
+ return;
+
+ if (irq_do_timetravel_handler(entry, t))
+ return;
+
+ if (irqs_suspended)
+ return;
+
+ irq_io_loop(reg, regs);
}
void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
@@ -84,6 +147,9 @@ void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
struct irq_entry *irq_entry;
int n, i;
+ if (irqs_suspended && !um_irq_timetravel_handler_used())
+ return;
+
while (1) {
/* This is now lockless - epoll keeps back-referencesto the irqs
* which have trigger it so there is no need to walk the irq
@@ -105,19 +171,13 @@ void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
irq_entry = os_epoll_get_data_pointer(i);
- for (t = 0; t < NUM_IRQ_TYPES; t++) {
- int events = irq_entry->reg[t].events;
-
- if (!events)
- continue;
-
- if (os_epoll_triggered(i, events) > 0)
- irq_io_loop(&irq_entry->reg[t], regs);
- }
+ for (t = 0; t < NUM_IRQ_TYPES; t++)
+ sigio_reg_handler(i, irq_entry, t, regs);
}
}
- free_irqs();
+ if (!irqs_suspended)
+ free_irqs();
}
static struct irq_entry *get_irq_entry_by_fd(int fd)
@@ -169,7 +229,9 @@ static void update_or_free_irq_entry(struct irq_entry *entry)
free_irq_entry(entry, false);
}
-static int activate_fd(int irq, int fd, enum um_irq_type type, void *dev_id)
+static int activate_fd(int irq, int fd, enum um_irq_type type, void *dev_id,
+ void (*timetravel_handler)(int, int, void *,
+ struct time_travel_event *))
{
struct irq_entry *irq_entry;
int err, events = os_event_mask(type);
@@ -206,6 +268,13 @@ static int activate_fd(int irq, int fd, enum um_irq_type type, void *dev_id)
irq_entry->reg[type].active = true;
irq_entry->reg[type].events = events;
+#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
+ if (um_irq_timetravel_handler_used()) {
+ irq_entry->reg[type].timetravel_handler = timetravel_handler;
+ irq_entry->reg[type].event.fn = irq_event_handler;
+ }
+#endif
+
WARN_ON(!update_irq_entry(irq_entry));
spin_unlock_irqrestore(&irq_lock, flags);
@@ -339,9 +408,12 @@ void um_free_irq(int irq, void *dev)
}
EXPORT_SYMBOL(um_free_irq);
-int um_request_irq(int irq, int fd, enum um_irq_type type,
- irq_handler_t handler, unsigned long irqflags,
- const char *devname, void *dev_id)
+static int
+_um_request_irq(int irq, int fd, enum um_irq_type type,
+ irq_handler_t handler, unsigned long irqflags,
+ const char *devname, void *dev_id,
+ void (*timetravel_handler)(int, int, void *,
+ struct time_travel_event *))
{
int err;
@@ -360,7 +432,7 @@ int um_request_irq(int irq, int fd, enum um_irq_type type,
return -ENOSPC;
if (fd != -1) {
- err = activate_fd(irq, fd, type, dev_id);
+ err = activate_fd(irq, fd, type, dev_id, timetravel_handler);
if (err)
goto error;
}
@@ -374,20 +446,41 @@ error:
clear_bit(irq, irqs_allocated);
return err;
}
+
+int um_request_irq(int irq, int fd, enum um_irq_type type,
+ irq_handler_t handler, unsigned long irqflags,
+ const char *devname, void *dev_id)
+{
+ return _um_request_irq(irq, fd, type, handler, irqflags,
+ devname, dev_id, NULL);
+}
EXPORT_SYMBOL(um_request_irq);
+#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
+int um_request_irq_tt(int irq, int fd, enum um_irq_type type,
+ irq_handler_t handler, unsigned long irqflags,
+ const char *devname, void *dev_id,
+ void (*timetravel_handler)(int, int, void *,
+ struct time_travel_event *))
+{
+ return _um_request_irq(irq, fd, type, handler, irqflags,
+ devname, dev_id, timetravel_handler);
+}
+EXPORT_SYMBOL(um_request_irq_tt);
+#endif
+
#ifdef CONFIG_PM_SLEEP
void um_irqs_suspend(void)
{
struct irq_entry *entry;
unsigned long flags;
- sig_info[SIGIO] = sigio_handler_suspend;
+ irqs_suspended = true;
spin_lock_irqsave(&irq_lock, flags);
list_for_each_entry(entry, &active_fds, list) {
enum um_irq_type t;
- bool wake = false;
+ bool clear = true;
for (t = 0; t < NUM_IRQ_TYPES; t++) {
if (!entry->reg[t].events)
@@ -400,13 +493,17 @@ void um_irqs_suspend(void)
* any FDs that should be suspended.
*/
if (entry->reg[t].wakeup ||
- entry->reg[t].irq == SIGIO_WRITE_IRQ) {
- wake = true;
+ entry->reg[t].irq == SIGIO_WRITE_IRQ
+#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
+ || entry->reg[t].timetravel_handler
+#endif
+ ) {
+ clear = false;
break;
}
}
- if (!wake) {
+ if (clear) {
entry->suspended = true;
os_clear_fd_async(entry->fd);
entry->sigio_workaround =
@@ -421,7 +518,31 @@ void um_irqs_resume(void)
struct irq_entry *entry;
unsigned long flags;
- spin_lock_irqsave(&irq_lock, flags);
+
+ local_irq_save(flags);
+#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
+ /*
+ * We don't need to lock anything here since we're in resume
+ * and nothing else is running, but have disabled IRQs so we
+ * don't try anything else with the interrupt list from there.
+ */
+ list_for_each_entry(entry, &active_fds, list) {
+ enum um_irq_type t;
+
+ for (t = 0; t < NUM_IRQ_TYPES; t++) {
+ struct irq_reg *reg = &entry->reg[t];
+
+ if (reg->pending_on_resume) {
+ irq_enter();
+ generic_handle_irq(reg->irq);
+ irq_exit();
+ reg->pending_on_resume = false;
+ }
+ }
+ }
+#endif
+
+ spin_lock(&irq_lock);
list_for_each_entry(entry, &active_fds, list) {
if (entry->suspended) {
int err = os_set_fd_async(entry->fd);
@@ -437,7 +558,7 @@ void um_irqs_resume(void)
}
spin_unlock_irqrestore(&irq_lock, flags);
- sig_info[SIGIO] = sigio_handler;
+ irqs_suspended = false;
send_sigio_to_self();
}
diff --git a/arch/um/kernel/kmsg_dump.c b/arch/um/kernel/kmsg_dump.c
index e4abac6c9727..6516ef1f8274 100644
--- a/arch/um/kernel/kmsg_dump.c
+++ b/arch/um/kernel/kmsg_dump.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/kmsg_dump.h>
#include <linux/console.h>
+#include <linux/string.h>
#include <shared/init.h>
#include <shared/kern.h>
#include <os.h>
@@ -16,8 +17,12 @@ static void kmsg_dumper_stdout(struct kmsg_dumper *dumper,
if (!console_trylock())
return;
- for_each_console(con)
- break;
+ for_each_console(con) {
+ if(strcmp(con->name, "tty") == 0 &&
+ (con->flags & (CON_ENABLED | CON_CONSDEV)) != 0) {
+ break;
+ }
+ }
console_unlock();
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index 2a986ece5478..c5011064b5dd 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -32,7 +32,6 @@
#include <os.h>
#include <skas.h>
#include <linux/time-internal.h>
-#include <asm/set_memory.h>
/*
* This is a per-cpu array. A processor only modifies its entry and it only
@@ -63,18 +62,16 @@ void free_stack(unsigned long stack, int order)
free_pages(stack, order);
}
-unsigned long alloc_stack(int atomic)
+unsigned long alloc_stack(int order, int atomic)
{
- unsigned long addr;
+ unsigned long page;
gfp_t flags = GFP_KERNEL;
if (atomic)
flags = GFP_ATOMIC;
- addr = __get_free_pages(flags, 1);
+ page = __get_free_pages(flags, order);
- set_memory_ro(addr, 1);
-
- return addr + PAGE_SIZE;
+ return page;
}
static inline void set_current(struct task_struct *task)
@@ -160,7 +157,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
unsigned long arg, struct task_struct * p, unsigned long tls)
{
void (*handler)(void);
- int kthread = current->flags & PF_KTHREAD;
+ int kthread = current->flags & (PF_KTHREAD | PF_IO_WORKER);
int ret = 0;
p->thread = (struct thread_struct) INIT_THREAD;
diff --git a/arch/um/kernel/skas/clone.c b/arch/um/kernel/skas/clone.c
index bfb70c456b30..592cdb138441 100644
--- a/arch/um/kernel/skas/clone.c
+++ b/arch/um/kernel/skas/clone.c
@@ -24,29 +24,25 @@
void __attribute__ ((__section__ (".__syscall_stub")))
stub_clone_handler(void)
{
- struct stub_data *data = (struct stub_data *) STUB_DATA;
+ int stack;
+ struct stub_data *data = (void *) ((unsigned long)&stack & ~(UM_KERN_PAGE_SIZE - 1));
long err;
err = stub_syscall2(__NR_clone, CLONE_PARENT | CLONE_FILES | SIGCHLD,
- STUB_DATA + UM_KERN_PAGE_SIZE / 2 - sizeof(void *));
- if (err != 0)
- goto out;
+ (unsigned long)data + UM_KERN_PAGE_SIZE / 2 - sizeof(void *));
+ if (err) {
+ data->parent_err = err;
+ goto done;
+ }
err = stub_syscall4(__NR_ptrace, PTRACE_TRACEME, 0, 0, 0);
- if (err)
- goto out;
+ if (err) {
+ data->child_err = err;
+ goto done;
+ }
- remap_stack(data->fd, data->offset);
- goto done;
+ remap_stack_and_trap();
- out:
- /*
- * save current result.
- * Parent: pid;
- * child: retcode of mmap already saved and it jumps around this
- * assignment
- */
- data->err = err;
done:
trap_myself();
}
diff --git a/arch/um/kernel/skas/mmu.c b/arch/um/kernel/skas/mmu.c
index d9961163da66..125df465e8ea 100644
--- a/arch/um/kernel/skas/mmu.c
+++ b/arch/um/kernel/skas/mmu.c
@@ -14,47 +14,6 @@
#include <os.h>
#include <skas.h>
-static int init_stub_pte(struct mm_struct *mm, unsigned long proc,
- unsigned long kernel)
-{
- pgd_t *pgd;
- p4d_t *p4d;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
-
- pgd = pgd_offset(mm, proc);
-
- p4d = p4d_alloc(mm, pgd, proc);
- if (!p4d)
- goto out;
-
- pud = pud_alloc(mm, p4d, proc);
- if (!pud)
- goto out_pud;
-
- pmd = pmd_alloc(mm, pud, proc);
- if (!pmd)
- goto out_pmd;
-
- pte = pte_alloc_map(mm, pmd, proc);
- if (!pte)
- goto out_pte;
-
- *pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT));
- *pte = pte_mkread(*pte);
- return 0;
-
- out_pte:
- pmd_free(mm, pmd);
- out_pmd:
- pud_free(mm, pud);
- out_pud:
- p4d_free(mm, p4d);
- out:
- return -ENOMEM;
-}
-
int init_new_context(struct task_struct *task, struct mm_struct *mm)
{
struct mm_context *from_mm = NULL;
@@ -98,52 +57,6 @@ int init_new_context(struct task_struct *task, struct mm_struct *mm)
return ret;
}
-void uml_setup_stubs(struct mm_struct *mm)
-{
- int err, ret;
-
- ret = init_stub_pte(mm, STUB_CODE,
- (unsigned long) __syscall_stub_start);
- if (ret)
- goto out;
-
- ret = init_stub_pte(mm, STUB_DATA, mm->context.id.stack);
- if (ret)
- goto out;
-
- mm->context.stub_pages[0] = virt_to_page(__syscall_stub_start);
- mm->context.stub_pages[1] = virt_to_page(mm->context.id.stack);
-
- /* dup_mmap already holds mmap_lock */
- err = install_special_mapping(mm, STUB_START, STUB_END - STUB_START,
- VM_READ | VM_MAYREAD | VM_EXEC |
- VM_MAYEXEC | VM_DONTCOPY | VM_PFNMAP,
- mm->context.stub_pages);
- if (err) {
- printk(KERN_ERR "install_special_mapping returned %d\n", err);
- goto out;
- }
- return;
-
-out:
- force_sigsegv(SIGSEGV);
-}
-
-void arch_exit_mmap(struct mm_struct *mm)
-{
- pte_t *pte;
-
- pte = virt_to_pte(mm, STUB_CODE);
- if (pte != NULL)
- pte_clear(mm, STUB_CODE, pte);
-
- pte = virt_to_pte(mm, STUB_DATA);
- if (pte == NULL)
- return;
-
- pte_clear(mm, STUB_DATA, pte);
-}
-
void destroy_context(struct mm_struct *mm)
{
struct mm_context *mmu = &mm->context;
diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c
index f4db89b5b5a6..e0cdb9694fb8 100644
--- a/arch/um/kernel/time.c
+++ b/arch/um/kernel/time.c
@@ -278,6 +278,7 @@ static void __time_travel_add_event(struct time_travel_event *e,
{
struct time_travel_event *tmp;
bool inserted = false;
+ unsigned long flags;
if (e->pending)
return;
@@ -285,6 +286,7 @@ static void __time_travel_add_event(struct time_travel_event *e,
e->pending = true;
e->time = time;
+ local_irq_save(flags);
list_for_each_entry(tmp, &time_travel_events, list) {
/*
* Add the new entry before one with higher time,
@@ -307,6 +309,7 @@ static void __time_travel_add_event(struct time_travel_event *e,
tmp = time_travel_first_event();
time_travel_ext_update_request(tmp->time);
time_travel_next_event = tmp->time;
+ local_irq_restore(flags);
}
static void time_travel_add_event(struct time_travel_event *e,
@@ -318,6 +321,12 @@ static void time_travel_add_event(struct time_travel_event *e,
__time_travel_add_event(e, time);
}
+void time_travel_add_event_rel(struct time_travel_event *e,
+ unsigned long long delay_ns)
+{
+ time_travel_add_event(e, time_travel_time + delay_ns);
+}
+
void time_travel_periodic_timer(struct time_travel_event *e)
{
time_travel_add_event(&time_travel_timer_event,
@@ -381,12 +390,16 @@ static void time_travel_deliver_event(struct time_travel_event *e)
}
}
-static bool time_travel_del_event(struct time_travel_event *e)
+bool time_travel_del_event(struct time_travel_event *e)
{
+ unsigned long flags;
+
if (!e->pending)
return false;
+ local_irq_save(flags);
list_del(&e->list);
e->pending = false;
+ local_irq_restore(flags);
return true;
}
@@ -535,6 +548,31 @@ invalid_number:
return 1;
}
+
+static void time_travel_set_start(void)
+{
+ if (time_travel_start_set)
+ return;
+
+ switch (time_travel_mode) {
+ case TT_MODE_EXTERNAL:
+ time_travel_start = time_travel_ext_req(UM_TIMETRAVEL_GET_TOD, -1);
+ /* controller gave us the *current* time, so adjust by that */
+ time_travel_ext_get_time();
+ time_travel_start -= time_travel_time;
+ break;
+ case TT_MODE_INFCPU:
+ case TT_MODE_BASIC:
+ if (!time_travel_start_set)
+ time_travel_start = os_persistent_clock_emulation();
+ break;
+ case TT_MODE_OFF:
+ /* we just read the host clock with os_persistent_clock_emulation() */
+ break;
+ }
+
+ time_travel_start_set = true;
+}
#else /* CONFIG_UML_TIME_TRAVEL_SUPPORT */
#define time_travel_start_set 0
#define time_travel_start 0
@@ -553,11 +591,17 @@ static void time_travel_set_interval(unsigned long long interval)
{
}
+static inline void time_travel_set_start(void)
+{
+}
+
/* fail link if this actually gets used */
extern u64 time_travel_ext_req(u32 op, u64 time);
/* these are empty macros so the struct/fn need not exist */
#define time_travel_add_event(e, time) do { } while (0)
+/* externally not usable - redefine here so we can */
+#undef time_travel_del_event
#define time_travel_del_event(e) do { } while (0)
#endif
@@ -731,6 +775,8 @@ void read_persistent_clock64(struct timespec64 *ts)
{
long long nsecs;
+ time_travel_set_start();
+
if (time_travel_mode != TT_MODE_OFF)
nsecs = time_travel_start + time_travel_time;
else
@@ -742,25 +788,6 @@ void read_persistent_clock64(struct timespec64 *ts)
void __init time_init(void)
{
-#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
- switch (time_travel_mode) {
- case TT_MODE_EXTERNAL:
- time_travel_start = time_travel_ext_req(UM_TIMETRAVEL_GET_TOD, -1);
- /* controller gave us the *current* time, so adjust by that */
- time_travel_ext_get_time();
- time_travel_start -= time_travel_time;
- break;
- case TT_MODE_INFCPU:
- case TT_MODE_BASIC:
- if (!time_travel_start_set)
- time_travel_start = os_persistent_clock_emulation();
- break;
- case TT_MODE_OFF:
- /* we just read the host clock with os_persistent_clock_emulation() */
- break;
- }
-#endif
-
timer_set_signal_handler();
late_time_init = um_timer_setup;
}
diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c
index 437d1f1cc5ec..bc38f79ca3a3 100644
--- a/arch/um/kernel/tlb.c
+++ b/arch/um/kernel/tlb.c
@@ -162,9 +162,6 @@ static int add_munmap(unsigned long addr, unsigned long len,
struct host_vm_op *last;
int ret = 0;
- if ((addr >= STUB_START) && (addr < STUB_END))
- return -EINVAL;
-
if (hvc->index != 0) {
last = &hvc->ops[hvc->index - 1];
if ((last->type == MUNMAP) &&
@@ -226,9 +223,6 @@ static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
pte = pte_offset_kernel(pmd, addr);
do {
- if ((addr >= STUB_START) && (addr < STUB_END))
- continue;
-
r = pte_read(*pte);
w = pte_write(*pte);
x = pte_exec(*pte);
@@ -346,12 +340,11 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
/* This is not an else because ret is modified above */
if (ret) {
+ struct mm_id *mm_idp = &current->mm->context.id;
+
printk(KERN_ERR "fix_range_common: failed, killing current "
"process: %d\n", task_tgid_vnr(current));
- /* We are under mmap_lock, release it such that current can terminate */
- mmap_write_unlock(current->mm);
- force_sig(SIGKILL);
- do_signal(&current->thread.regs);
+ mm_idp->kill = 1;
}
}
@@ -472,6 +465,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
struct mm_id *mm_id;
address &= PAGE_MASK;
+
pgd = pgd_offset(mm, address);
if (!pgd_present(*pgd))
goto kill;
@@ -608,57 +602,3 @@ void force_flush_all(void)
vma = vma->vm_next;
}
}
-
-struct page_change_data {
- unsigned int set_mask, clear_mask;
-};
-
-static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
-{
- struct page_change_data *cdata = data;
- pte_t pte = READ_ONCE(*ptep);
-
- pte_clear_bits(pte, cdata->clear_mask);
- pte_set_bits(pte, cdata->set_mask);
-
- set_pte(ptep, pte);
- return 0;
-}
-
-static int change_memory(unsigned long start, unsigned long pages,
- unsigned int set_mask, unsigned int clear_mask)
-{
- unsigned long size = pages * PAGE_SIZE;
- struct page_change_data data;
- int ret;
-
- data.set_mask = set_mask;
- data.clear_mask = clear_mask;
-
- ret = apply_to_page_range(&init_mm, start, size, change_page_range,
- &data);
-
- flush_tlb_kernel_range(start, start + size);
-
- return ret;
-}
-
-int set_memory_ro(unsigned long addr, int numpages)
-{
- return change_memory(addr, numpages, 0, _PAGE_RW);
-}
-
-int set_memory_rw(unsigned long addr, int numpages)
-{
- return change_memory(addr, numpages, _PAGE_RW, 0);
-}
-
-int set_memory_nx(unsigned long addr, int numpages)
-{
- return -EOPNOTSUPP;
-}
-
-int set_memory_x(unsigned long addr, int numpages)
-{
- return -EOPNOTSUPP;
-}
diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c
index 31d356b1ffd8..74e07e748a9b 100644
--- a/arch/um/kernel/um_arch.c
+++ b/arch/um/kernel/um_arch.c
@@ -26,7 +26,8 @@
#include <mem_user.h>
#include <os.h>
-#define DEFAULT_COMMAND_LINE "root=98:0"
+#define DEFAULT_COMMAND_LINE_ROOT "root=98:0"
+#define DEFAULT_COMMAND_LINE_CONSOLE "console=tty"
/* Changed in add_arg and setup_arch, which run before SMP is started */
static char __initdata command_line[COMMAND_LINE_SIZE] = { 0 };
@@ -109,7 +110,8 @@ unsigned long end_vm;
int ncpus = 1;
/* Set in early boot */
-static int have_root __initdata = 0;
+static int have_root __initdata;
+static int have_console __initdata;
/* Set in uml_mem_setup and modified in linux_main */
long long physmem_size = 32 * 1024 * 1024;
@@ -161,6 +163,17 @@ __uml_setup("debug", no_skas_debug_setup,
" this flag is not needed to run gdb on UML in skas mode\n\n"
);
+static int __init uml_console_setup(char *line, int *add)
+{
+ have_console = 1;
+ return 0;
+}
+
+__uml_setup("console=", uml_console_setup,
+"console=<preferred console>\n"
+" Specify the preferred console output driver\n\n"
+);
+
static int __init Usage(char *line, int *add)
{
const char **p;
@@ -236,6 +249,7 @@ void uml_finishsetup(void)
}
/* Set during early boot */
+unsigned long stub_start;
unsigned long task_size;
EXPORT_SYMBOL(task_size);
@@ -264,9 +278,16 @@ int __init linux_main(int argc, char **argv)
add_arg(argv[i]);
}
if (have_root == 0)
- add_arg(DEFAULT_COMMAND_LINE);
+ add_arg(DEFAULT_COMMAND_LINE_ROOT);
+
+ if (have_console == 0)
+ add_arg(DEFAULT_COMMAND_LINE_CONSOLE);
host_task_size = os_get_top_address();
+ /* reserve two pages for the stubs */
+ host_task_size -= 2 * PAGE_SIZE;
+ stub_start = host_task_size;
+
/*
* TASK_SIZE needs to be PGDIR_SIZE aligned or else exit_mmap craps
* out
diff --git a/arch/um/os-Linux/helper.c b/arch/um/os-Linux/helper.c
index feb48d796e00..9fa6e4187d4f 100644
--- a/arch/um/os-Linux/helper.c
+++ b/arch/um/os-Linux/helper.c
@@ -45,7 +45,7 @@ int run_helper(void (*pre_exec)(void *), void *pre_data, char **argv)
unsigned long stack, sp;
int pid, fds[2], ret, n;
- stack = alloc_stack(__cant_sleep());
+ stack = alloc_stack(0, __cant_sleep());
if (stack == 0)
return -ENOMEM;
@@ -116,7 +116,7 @@ int run_helper_thread(int (*proc)(void *), void *arg, unsigned int flags,
unsigned long stack, sp;
int pid, status, err;
- stack = alloc_stack(__cant_sleep());
+ stack = alloc_stack(0, __cant_sleep());
if (stack == 0)
return -ENOMEM;
diff --git a/arch/um/os-Linux/skas/mem.c b/arch/um/os-Linux/skas/mem.c
index c546d16f8dfe..3b4975ee67e2 100644
--- a/arch/um/os-Linux/skas/mem.c
+++ b/arch/um/os-Linux/skas/mem.c
@@ -40,6 +40,8 @@ static int __init init_syscall_regs(void)
syscall_regs[REGS_IP_INDEX] = STUB_CODE +
((unsigned long) batch_syscall_stub -
(unsigned long) __syscall_stub_start);
+ syscall_regs[REGS_SP_INDEX] = STUB_DATA;
+
return 0;
}
diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c
index 0621d521208e..fba674fac8b7 100644
--- a/arch/um/os-Linux/skas/process.c
+++ b/arch/um/os-Linux/skas/process.c
@@ -28,6 +28,54 @@ int is_skas_winch(int pid, int fd, void *data)
return pid == getpgrp();
}
+static const char *ptrace_reg_name(int idx)
+{
+#define R(n) case HOST_##n: return #n
+
+ switch (idx) {
+#ifdef __x86_64__
+ R(BX);
+ R(CX);
+ R(DI);
+ R(SI);
+ R(DX);
+ R(BP);
+ R(AX);
+ R(R8);
+ R(R9);
+ R(R10);
+ R(R11);
+ R(R12);
+ R(R13);
+ R(R14);
+ R(R15);
+ R(ORIG_AX);
+ R(CS);
+ R(SS);
+ R(EFLAGS);
+#elif defined(__i386__)
+ R(IP);
+ R(SP);
+ R(EFLAGS);
+ R(AX);
+ R(BX);
+ R(CX);
+ R(DX);
+ R(SI);
+ R(DI);
+ R(BP);
+ R(CS);
+ R(SS);
+ R(DS);
+ R(FS);
+ R(ES);
+ R(GS);
+ R(ORIG_AX);
+#endif
+ }
+ return "";
+}
+
static int ptrace_dump_regs(int pid)
{
unsigned long regs[MAX_REG_NR];
@@ -37,8 +85,11 @@ static int ptrace_dump_regs(int pid)
return -errno;
printk(UM_KERN_ERR "Stub registers -\n");
- for (i = 0; i < ARRAY_SIZE(regs); i++)
- printk(UM_KERN_ERR "\t%d - %lx\n", i, regs[i]);
+ for (i = 0; i < ARRAY_SIZE(regs); i++) {
+ const char *regname = ptrace_reg_name(i);
+
+ printk(UM_KERN_ERR "\t%s\t(%2d): %lx\n", regname, i, regs[i]);
+ }
return 0;
}
@@ -200,10 +251,6 @@ static int userspace_tramp(void *stack)
signal(SIGTERM, SIG_DFL);
signal(SIGWINCH, SIG_IGN);
- /*
- * This has a pte, but it can't be mapped in with the usual
- * tlb_flush mechanism because this is part of that mechanism
- */
fd = phys_mapping(to_phys(__syscall_stub_start), &offset);
addr = mmap64((void *) STUB_CODE, UM_KERN_PAGE_SIZE,
PROT_EXEC, MAP_FIXED | MAP_PRIVATE, fd, offset);
@@ -249,6 +296,7 @@ static int userspace_tramp(void *stack)
}
int userspace_pid[NR_CPUS];
+int kill_userspace_mm[NR_CPUS];
/**
* start_userspace() - prepare a new userspace process
@@ -342,6 +390,8 @@ void userspace(struct uml_pt_regs *regs, unsigned long *aux_fp_regs)
interrupt_end();
while (1) {
+ if (kill_userspace_mm[0])
+ fatal_sigsegv();
/*
* This can legitimately fail if the process loads a
@@ -491,8 +541,14 @@ int copy_context_skas0(unsigned long new_stack, int pid)
* and child's mmap2 calls
*/
*data = ((struct stub_data) {
- .offset = MMAP_OFFSET(new_offset),
- .fd = new_fd
+ .offset = MMAP_OFFSET(new_offset),
+ .fd = new_fd,
+ .parent_err = -ESRCH,
+ .child_err = 0,
+ });
+
+ *child_data = ((struct stub_data) {
+ .child_err = -ESRCH,
});
err = ptrace_setregs(pid, thread_regs);
@@ -510,9 +566,6 @@ int copy_context_skas0(unsigned long new_stack, int pid)
return err;
}
- /* set a well known return code for detection of child write failure */
- child_data->err = 12345678;
-
/*
* Wait, until parent has finished its work: read child's pid from
* parent's stack, and check, if bad result.
@@ -527,7 +580,7 @@ int copy_context_skas0(unsigned long new_stack, int pid)
wait_stub_done(pid);
- pid = data->err;
+ pid = data->parent_err;
if (pid < 0) {
printk(UM_KERN_ERR "copy_context_skas0 - stub-parent reports "
"error %d\n", -pid);
@@ -539,10 +592,10 @@ int copy_context_skas0(unsigned long new_stack, int pid)
* child's stack and check it.
*/
wait_stub_done(pid);
- if (child_data->err != STUB_DATA) {
- printk(UM_KERN_ERR "copy_context_skas0 - stub-child reports "
- "error %ld\n", child_data->err);
- err = child_data->err;
+ if (child_data->child_err != STUB_DATA) {
+ printk(UM_KERN_ERR "copy_context_skas0 - stub-child %d reports "
+ "error %ld\n", pid, data->child_err);
+ err = data->child_err;
goto out_kill;
}
@@ -663,4 +716,5 @@ void reboot_skas(void)
void __switch_mm(struct mm_id *mm_idp)
{
userspace_pid[0] = mm_idp->u.pid;
+ kill_userspace_mm[0] = mm_idp->kill;
}
diff --git a/arch/um/os-Linux/time.c b/arch/um/os-Linux/time.c
index a61cbf73a179..6c5041c5560b 100644
--- a/arch/um/os-Linux/time.c
+++ b/arch/um/os-Linux/time.c
@@ -104,5 +104,18 @@ long long os_nsecs(void)
*/
void os_idle_sleep(void)
{
- pause();
+ struct itimerspec its;
+ sigset_t set, old;
+
+ /* block SIGALRM while we analyze the timer state */
+ sigemptyset(&set);
+ sigaddset(&set, SIGALRM);
+ sigprocmask(SIG_BLOCK, &set, &old);
+
+ /* check the timer, and if it'll fire then wait for it */
+ timer_gettime(event_high_res_timer, &its);
+ if (its.it_value.tv_sec || its.it_value.tv_nsec)
+ sigsuspend(&old);
+ /* either way, restore the signal mask */
+ sigprocmask(SIG_UNBLOCK, &set, NULL);
}
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 21f851179ff0..2792879d398e 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -32,6 +32,7 @@ config X86_64
select MODULES_USE_ELF_RELA
select NEED_DMA_MAP_STATE
select SWIOTLB
+ select ARCH_HAS_ELFCORE_COMPAT
config FORCE_DYNAMIC_FTRACE
def_bool y
@@ -96,6 +97,8 @@ config X86
select ARCH_SUPPORTS_DEBUG_PAGEALLOC
select ARCH_SUPPORTS_NUMA_BALANCING if X86_64
select ARCH_SUPPORTS_KMAP_LOCAL_FORCE_MAP if NR_CPUS <= 4096
+ select ARCH_SUPPORTS_LTO_CLANG if X86_64
+ select ARCH_SUPPORTS_LTO_CLANG_THIN if X86_64
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USE_QUEUED_SPINLOCKS
@@ -148,6 +151,7 @@ config X86
select HAVE_ARCH_JUMP_LABEL_RELATIVE
select HAVE_ARCH_KASAN if X86_64
select HAVE_ARCH_KASAN_VMALLOC if X86_64
+ select HAVE_ARCH_KFENCE
select HAVE_ARCH_KGDB
select HAVE_ARCH_MMAP_RND_BITS if MMU
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if MMU && COMPAT
@@ -168,6 +172,7 @@ config X86
select HAVE_CONTEXT_TRACKING if X86_64
select HAVE_CONTEXT_TRACKING_OFFSTACK if HAVE_CONTEXT_TRACKING
select HAVE_C_RECORDMCOUNT
+ select HAVE_OBJTOOL_MCOUNT if STACK_VALIDATION
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_CONTIGUOUS
select HAVE_DYNAMIC_FTRACE
@@ -187,6 +192,7 @@ config X86
select HAVE_HW_BREAKPOINT
select HAVE_IDE
select HAVE_IOREMAP_PROT
+ select HAVE_IRQ_EXIT_ON_IRQ_STACK if X86_64
select HAVE_IRQ_TIME_ACCOUNTING
select HAVE_KERNEL_BZIP2
select HAVE_KERNEL_GZIP
@@ -206,7 +212,6 @@ config X86
select HAVE_MOVE_PMD
select HAVE_MOVE_PUD
select HAVE_NMI
- select HAVE_OPROFILE
select HAVE_OPTPROBES
select HAVE_PCSPKR_PLATFORM
select HAVE_PERF_EVENTS
@@ -220,10 +225,12 @@ config X86
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RELIABLE_STACKTRACE if X86_64 && (UNWINDER_FRAME_POINTER || UNWINDER_ORC) && STACK_VALIDATION
select HAVE_FUNCTION_ARG_ACCESS_API
+ select HAVE_SOFTIRQ_ON_OWN_STACK
select HAVE_STACKPROTECTOR if CC_HAS_SANE_STACKPROTECTOR
select HAVE_STACK_VALIDATION if X86_64
select HAVE_STATIC_CALL
select HAVE_STATIC_CALL_INLINE if HAVE_STACK_VALIDATION
+ select HAVE_PREEMPT_DYNAMIC
select HAVE_RSEQ
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_UNSTABLE_SCHED_CLOCK
@@ -444,7 +451,7 @@ config X86_X2APIC
If you don't know what to do here, say N.
config X86_MPPARSE
- bool "Enable MPS table" if ACPI || SFI
+ bool "Enable MPS table" if ACPI
default y
depends on X86_LOCAL_APIC
help
@@ -603,7 +610,6 @@ config X86_INTEL_MID
depends on PCI
depends on X86_64 || (PCI_GOANY && X86_32)
depends on X86_IO_APIC
- select SFI
select I2C
select DW_APB_TIMER
select APB_TIMER
@@ -890,19 +896,7 @@ config HPET_TIMER
config HPET_EMULATE_RTC
def_bool y
- depends on HPET_TIMER && (RTC=y || RTC=m || RTC_DRV_CMOS=m || RTC_DRV_CMOS=y)
-
-config APB_TIMER
- def_bool y if X86_INTEL_MID
- prompt "Intel MID APB Timer Support" if X86_INTEL_MID
- select DW_APB_TIMER
- depends on X86_INTEL_MID && SFI
- help
- APB timer is the replacement for 8254, HPET on X86 MID platforms.
- The APBT provides a stable time base on SMP
- systems, unlike the TSC, but it is more expensive to access,
- as it is off-chip. APB timers are always running regardless of CPU
- C states, they are used as per CPU clockevent device when possible.
+ depends on HPET_TIMER && (RTC_DRV_CMOS=m || RTC_DRV_CMOS=y)
# Mark as expert because too many people got it wrong.
# The code disables itself when not needed.
@@ -1158,10 +1152,6 @@ config X86_MCE_INJECT
If you don't know what a machine check is and you don't do kernel
QA it is safe to say n.
-config X86_THERMAL_VECTOR
- def_bool y
- depends on X86_MCE_INTEL
-
source "arch/x86/events/Kconfig"
config X86_LEGACY_VM86
@@ -2469,8 +2459,6 @@ source "kernel/power/Kconfig"
source "drivers/acpi/Kconfig"
-source "drivers/sfi/Kconfig"
-
config X86_APM_BOOT
def_bool y
depends on APM
@@ -2657,7 +2645,7 @@ config PCI_DIRECT
config PCI_MMCONFIG
bool "Support mmconfig PCI config space access" if X86_64
default y
- depends on PCI && (ACPI || SFI || JAILHOUSE_GUEST)
+ depends on PCI && (ACPI || JAILHOUSE_GUEST)
depends on X86_64 || (PCI_GOANY || PCI_GOMMCONFIG)
config PCI_OLPC
@@ -2864,7 +2852,6 @@ config IA32_EMULATION
depends on X86_64
select ARCH_WANT_OLD_COMPAT_IPC
select BINFMT_ELF
- select COMPAT_BINFMT_ELF
select COMPAT_OLD_SIGACTION
help
Include code to run legacy 32-bit programs under a
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 7116da3980be..2d6d5a28c3bf 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -50,6 +50,9 @@ export BITS
KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow
KBUILD_CFLAGS += $(call cc-option,-mno-avx,)
+# Intel CET isn't enabled in the kernel
+KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none)
+
ifeq ($(CONFIG_X86_32),y)
BITS := 32
UTS_MACHINE := i386
@@ -166,6 +169,11 @@ ifeq ($(ACCUMULATE_OUTGOING_ARGS), 1)
KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args,)
endif
+ifdef CONFIG_LTO_CLANG
+KBUILD_LDFLAGS += -plugin-opt=-code-model=kernel \
+ -plugin-opt=-stack-alignment=$(if $(CONFIG_X86_32),4,8)
+endif
+
# Workaround for a gcc prelease that unfortunately was shipped in a suse release
KBUILD_CFLAGS += -Wno-sign-compare
#
@@ -229,9 +237,6 @@ core-y += arch/x86/
drivers-$(CONFIG_MATH_EMULATION) += arch/x86/math-emu/
drivers-$(CONFIG_PCI) += arch/x86/pci/
-# must be linked after kernel/
-drivers-$(CONFIG_OPROFILE) += arch/x86/oprofile/
-
# suspend and hibernation support
drivers-$(CONFIG_PM) += arch/x86/power/
@@ -292,16 +297,20 @@ archclean:
$(Q)$(MAKE) $(clean)=arch/x86/tools
define archhelp
- echo '* bzImage - Compressed kernel image (arch/x86/boot/bzImage)'
- echo ' install - Install kernel using'
- echo ' (your) ~/bin/$(INSTALLKERNEL) or'
- echo ' (distribution) /sbin/$(INSTALLKERNEL) or'
- echo ' install to $$(INSTALL_PATH) and run lilo'
- echo ' fdimage - Create 1.4MB boot floppy image (arch/x86/boot/fdimage)'
- echo ' fdimage144 - Create 1.4MB boot floppy image (arch/x86/boot/fdimage)'
- echo ' fdimage288 - Create 2.8MB boot floppy image (arch/x86/boot/fdimage)'
- echo ' isoimage - Create a boot CD-ROM image (arch/x86/boot/image.iso)'
- echo ' bzdisk/fdimage*/isoimage also accept:'
- echo ' FDARGS="..." arguments for the booted kernel'
- echo ' FDINITRD=file initrd for the booted kernel'
+ echo '* bzImage - Compressed kernel image (arch/x86/boot/bzImage)'
+ echo ' install - Install kernel using (your) ~/bin/$(INSTALLKERNEL) or'
+ echo ' (distribution) /sbin/$(INSTALLKERNEL) or install to '
+ echo ' $$(INSTALL_PATH) and run lilo'
+ echo ''
+ echo ' fdimage - Create 1.4MB boot floppy image (arch/x86/boot/fdimage)'
+ echo ' fdimage144 - Create 1.4MB boot floppy image (arch/x86/boot/fdimage)'
+ echo ' fdimage288 - Create 2.8MB boot floppy image (arch/x86/boot/fdimage)'
+ echo ' isoimage - Create a boot CD-ROM image (arch/x86/boot/image.iso)'
+ echo ' bzdisk/fdimage*/isoimage also accept:'
+ echo ' FDARGS="..." arguments for the booted kernel'
+ echo ' FDINITRD=file initrd for the booted kernel'
+ echo ''
+ echo ' kvm_guest.config - Enable Kconfig items for running this kernel as a KVM guest'
+ echo ' xen.config - Enable Kconfig items for running this kernel as a Xen guest'
+
endef
diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig
index 78210793d357..9c9c4a888b1d 100644
--- a/arch/x86/configs/i386_defconfig
+++ b/arch/x86/configs/i386_defconfig
@@ -50,7 +50,6 @@ CONFIG_JUMP_LABEL=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
-# CONFIG_UNUSED_SYMBOLS is not set
CONFIG_BINFMT_MISC=y
CONFIG_NET=y
CONFIG_PACKET=y
diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig
index 9936528e1939..b60bd2d86034 100644
--- a/arch/x86/configs/x86_64_defconfig
+++ b/arch/x86/configs/x86_64_defconfig
@@ -48,7 +48,6 @@ CONFIG_JUMP_LABEL=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
-# CONFIG_UNUSED_SYMBOLS is not set
CONFIG_BINFMT_MISC=y
CONFIG_NET=y
CONFIG_PACKET=y
diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile
index a31de0c6ccde..b28e36b7c96b 100644
--- a/arch/x86/crypto/Makefile
+++ b/arch/x86/crypto/Makefile
@@ -4,8 +4,6 @@
OBJECT_FILES_NON_STANDARD := y
-obj-$(CONFIG_CRYPTO_GLUE_HELPER_X86) += glue_helper.o
-
obj-$(CONFIG_CRYPTO_TWOFISH_586) += twofish-i586.o
twofish-i586-y := twofish-i586-asm_32.o twofish_glue.o
obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
index d1436c37008b..4e3972570916 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -43,10 +43,6 @@
#ifdef __x86_64__
# constants in mergeable sections, linker can reorder and merge
-.section .rodata.cst16.gf128mul_x_ble_mask, "aM", @progbits, 16
-.align 16
-.Lgf128mul_x_ble_mask:
- .octa 0x00000000000000010000000000000087
.section .rodata.cst16.POLY, "aM", @progbits, 16
.align 16
POLY: .octa 0xC2000000000000000000000000000001
@@ -146,7 +142,7 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff
#define CTR %xmm11
#define INC %xmm12
-#define GF128MUL_MASK %xmm10
+#define GF128MUL_MASK %xmm7
#ifdef __x86_64__
#define AREG %rax
@@ -2577,13 +2573,140 @@ SYM_FUNC_START(aesni_cbc_dec)
ret
SYM_FUNC_END(aesni_cbc_dec)
-#ifdef __x86_64__
+/*
+ * void aesni_cts_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
+ * size_t len, u8 *iv)
+ */
+SYM_FUNC_START(aesni_cts_cbc_enc)
+ FRAME_BEGIN
+#ifndef __x86_64__
+ pushl IVP
+ pushl LEN
+ pushl KEYP
+ pushl KLEN
+ movl (FRAME_OFFSET+20)(%esp), KEYP # ctx
+ movl (FRAME_OFFSET+24)(%esp), OUTP # dst
+ movl (FRAME_OFFSET+28)(%esp), INP # src
+ movl (FRAME_OFFSET+32)(%esp), LEN # len
+ movl (FRAME_OFFSET+36)(%esp), IVP # iv
+ lea .Lcts_permute_table, T1
+#else
+ lea .Lcts_permute_table(%rip), T1
+#endif
+ mov 480(KEYP), KLEN
+ movups (IVP), STATE
+ sub $16, LEN
+ mov T1, IVP
+ add $32, IVP
+ add LEN, T1
+ sub LEN, IVP
+ movups (T1), %xmm4
+ movups (IVP), %xmm5
+
+ movups (INP), IN1
+ add LEN, INP
+ movups (INP), IN2
+
+ pxor IN1, STATE
+ call _aesni_enc1
+
+ pshufb %xmm5, IN2
+ pxor STATE, IN2
+ pshufb %xmm4, STATE
+ add OUTP, LEN
+ movups STATE, (LEN)
+
+ movaps IN2, STATE
+ call _aesni_enc1
+ movups STATE, (OUTP)
+
+#ifndef __x86_64__
+ popl KLEN
+ popl KEYP
+ popl LEN
+ popl IVP
+#endif
+ FRAME_END
+ ret
+SYM_FUNC_END(aesni_cts_cbc_enc)
+
+/*
+ * void aesni_cts_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
+ * size_t len, u8 *iv)
+ */
+SYM_FUNC_START(aesni_cts_cbc_dec)
+ FRAME_BEGIN
+#ifndef __x86_64__
+ pushl IVP
+ pushl LEN
+ pushl KEYP
+ pushl KLEN
+ movl (FRAME_OFFSET+20)(%esp), KEYP # ctx
+ movl (FRAME_OFFSET+24)(%esp), OUTP # dst
+ movl (FRAME_OFFSET+28)(%esp), INP # src
+ movl (FRAME_OFFSET+32)(%esp), LEN # len
+ movl (FRAME_OFFSET+36)(%esp), IVP # iv
+ lea .Lcts_permute_table, T1
+#else
+ lea .Lcts_permute_table(%rip), T1
+#endif
+ mov 480(KEYP), KLEN
+ add $240, KEYP
+ movups (IVP), IV
+ sub $16, LEN
+ mov T1, IVP
+ add $32, IVP
+ add LEN, T1
+ sub LEN, IVP
+ movups (T1), %xmm4
+
+ movups (INP), STATE
+ add LEN, INP
+ movups (INP), IN1
+
+ call _aesni_dec1
+ movaps STATE, IN2
+ pshufb %xmm4, STATE
+ pxor IN1, STATE
+
+ add OUTP, LEN
+ movups STATE, (LEN)
+
+ movups (IVP), %xmm0
+ pshufb %xmm0, IN1
+ pblendvb IN2, IN1
+ movaps IN1, STATE
+ call _aesni_dec1
+
+ pxor IV, STATE
+ movups STATE, (OUTP)
+
+#ifndef __x86_64__
+ popl KLEN
+ popl KEYP
+ popl LEN
+ popl IVP
+#endif
+ FRAME_END
+ ret
+SYM_FUNC_END(aesni_cts_cbc_dec)
+
.pushsection .rodata
.align 16
+.Lcts_permute_table:
+ .byte 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80
+ .byte 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80
+ .byte 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07
+ .byte 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f
+ .byte 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80
+ .byte 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80
+#ifdef __x86_64__
.Lbswap_mask:
.byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
+#endif
.popsection
+#ifdef __x86_64__
/*
* _aesni_inc_init: internal ABI
* setup registers used by _aesni_inc
@@ -2696,6 +2819,14 @@ SYM_FUNC_START(aesni_ctr_enc)
ret
SYM_FUNC_END(aesni_ctr_enc)
+#endif
+
+.section .rodata.cst16.gf128mul_x_ble_mask, "aM", @progbits, 16
+.align 16
+.Lgf128mul_x_ble_mask:
+ .octa 0x00000000000000010000000000000087
+.previous
+
/*
* _aesni_gf128mul_x_ble: internal ABI
* Multiply in GF(2^128) for XTS IVs
@@ -2708,120 +2839,325 @@ SYM_FUNC_END(aesni_ctr_enc)
* CTR: == temporary value
*/
#define _aesni_gf128mul_x_ble() \
- pshufd $0x13, IV, CTR; \
+ pshufd $0x13, IV, KEY; \
paddq IV, IV; \
- psrad $31, CTR; \
- pand GF128MUL_MASK, CTR; \
- pxor CTR, IV;
+ psrad $31, KEY; \
+ pand GF128MUL_MASK, KEY; \
+ pxor KEY, IV;
/*
- * void aesni_xts_crypt8(const struct crypto_aes_ctx *ctx, u8 *dst,
- * const u8 *src, bool enc, le128 *iv)
+ * void aesni_xts_encrypt(const struct crypto_aes_ctx *ctx, u8 *dst,
+ * const u8 *src, unsigned int len, le128 *iv)
*/
-SYM_FUNC_START(aesni_xts_crypt8)
+SYM_FUNC_START(aesni_xts_encrypt)
FRAME_BEGIN
- testb %cl, %cl
- movl $0, %ecx
- movl $240, %r10d
- leaq _aesni_enc4, %r11
- leaq _aesni_dec4, %rax
- cmovel %r10d, %ecx
- cmoveq %rax, %r11
-
+#ifndef __x86_64__
+ pushl IVP
+ pushl LEN
+ pushl KEYP
+ pushl KLEN
+ movl (FRAME_OFFSET+20)(%esp), KEYP # ctx
+ movl (FRAME_OFFSET+24)(%esp), OUTP # dst
+ movl (FRAME_OFFSET+28)(%esp), INP # src
+ movl (FRAME_OFFSET+32)(%esp), LEN # len
+ movl (FRAME_OFFSET+36)(%esp), IVP # iv
movdqa .Lgf128mul_x_ble_mask, GF128MUL_MASK
+#else
+ movdqa .Lgf128mul_x_ble_mask(%rip), GF128MUL_MASK
+#endif
movups (IVP), IV
mov 480(KEYP), KLEN
- addq %rcx, KEYP
+
+.Lxts_enc_loop4:
+ sub $64, LEN
+ jl .Lxts_enc_1x
movdqa IV, STATE1
- movdqu 0x00(INP), INC
- pxor INC, STATE1
+ movdqu 0x00(INP), IN
+ pxor IN, STATE1
movdqu IV, 0x00(OUTP)
_aesni_gf128mul_x_ble()
movdqa IV, STATE2
- movdqu 0x10(INP), INC
- pxor INC, STATE2
+ movdqu 0x10(INP), IN
+ pxor IN, STATE2
movdqu IV, 0x10(OUTP)
_aesni_gf128mul_x_ble()
movdqa IV, STATE3
- movdqu 0x20(INP), INC
- pxor INC, STATE3
+ movdqu 0x20(INP), IN
+ pxor IN, STATE3
movdqu IV, 0x20(OUTP)
_aesni_gf128mul_x_ble()
movdqa IV, STATE4
- movdqu 0x30(INP), INC
- pxor INC, STATE4
+ movdqu 0x30(INP), IN
+ pxor IN, STATE4
movdqu IV, 0x30(OUTP)
- CALL_NOSPEC r11
+ call _aesni_enc4
- movdqu 0x00(OUTP), INC
- pxor INC, STATE1
+ movdqu 0x00(OUTP), IN
+ pxor IN, STATE1
movdqu STATE1, 0x00(OUTP)
+ movdqu 0x10(OUTP), IN
+ pxor IN, STATE2
+ movdqu STATE2, 0x10(OUTP)
+
+ movdqu 0x20(OUTP), IN
+ pxor IN, STATE3
+ movdqu STATE3, 0x20(OUTP)
+
+ movdqu 0x30(OUTP), IN
+ pxor IN, STATE4
+ movdqu STATE4, 0x30(OUTP)
+
_aesni_gf128mul_x_ble()
- movdqa IV, STATE1
- movdqu 0x40(INP), INC
- pxor INC, STATE1
- movdqu IV, 0x40(OUTP)
- movdqu 0x10(OUTP), INC
- pxor INC, STATE2
- movdqu STATE2, 0x10(OUTP)
+ add $64, INP
+ add $64, OUTP
+ test LEN, LEN
+ jnz .Lxts_enc_loop4
+.Lxts_enc_ret_iv:
+ movups IV, (IVP)
+
+.Lxts_enc_ret:
+#ifndef __x86_64__
+ popl KLEN
+ popl KEYP
+ popl LEN
+ popl IVP
+#endif
+ FRAME_END
+ ret
+
+.Lxts_enc_1x:
+ add $64, LEN
+ jz .Lxts_enc_ret_iv
+ sub $16, LEN
+ jl .Lxts_enc_cts4
+
+.Lxts_enc_loop1:
+ movdqu (INP), STATE
+ pxor IV, STATE
+ call _aesni_enc1
+ pxor IV, STATE
_aesni_gf128mul_x_ble()
- movdqa IV, STATE2
- movdqu 0x50(INP), INC
- pxor INC, STATE2
- movdqu IV, 0x50(OUTP)
- movdqu 0x20(OUTP), INC
- pxor INC, STATE3
- movdqu STATE3, 0x20(OUTP)
+ test LEN, LEN
+ jz .Lxts_enc_out
+
+ add $16, INP
+ sub $16, LEN
+ jl .Lxts_enc_cts1
+
+ movdqu STATE, (OUTP)
+ add $16, OUTP
+ jmp .Lxts_enc_loop1
+
+.Lxts_enc_out:
+ movdqu STATE, (OUTP)
+ jmp .Lxts_enc_ret_iv
+
+.Lxts_enc_cts4:
+ movdqa STATE4, STATE
+ sub $16, OUTP
+
+.Lxts_enc_cts1:
+#ifndef __x86_64__
+ lea .Lcts_permute_table, T1
+#else
+ lea .Lcts_permute_table(%rip), T1
+#endif
+ add LEN, INP /* rewind input pointer */
+ add $16, LEN /* # bytes in final block */
+ movups (INP), IN1
+
+ mov T1, IVP
+ add $32, IVP
+ add LEN, T1
+ sub LEN, IVP
+ add OUTP, LEN
+
+ movups (T1), %xmm4
+ movaps STATE, IN2
+ pshufb %xmm4, STATE
+ movups STATE, (LEN)
+
+ movups (IVP), %xmm0
+ pshufb %xmm0, IN1
+ pblendvb IN2, IN1
+ movaps IN1, STATE
+
+ pxor IV, STATE
+ call _aesni_enc1
+ pxor IV, STATE
+
+ movups STATE, (OUTP)
+ jmp .Lxts_enc_ret
+SYM_FUNC_END(aesni_xts_encrypt)
+
+/*
+ * void aesni_xts_decrypt(const struct crypto_aes_ctx *ctx, u8 *dst,
+ * const u8 *src, unsigned int len, le128 *iv)
+ */
+SYM_FUNC_START(aesni_xts_decrypt)
+ FRAME_BEGIN
+#ifndef __x86_64__
+ pushl IVP
+ pushl LEN
+ pushl KEYP
+ pushl KLEN
+ movl (FRAME_OFFSET+20)(%esp), KEYP # ctx
+ movl (FRAME_OFFSET+24)(%esp), OUTP # dst
+ movl (FRAME_OFFSET+28)(%esp), INP # src
+ movl (FRAME_OFFSET+32)(%esp), LEN # len
+ movl (FRAME_OFFSET+36)(%esp), IVP # iv
+ movdqa .Lgf128mul_x_ble_mask, GF128MUL_MASK
+#else
+ movdqa .Lgf128mul_x_ble_mask(%rip), GF128MUL_MASK
+#endif
+ movups (IVP), IV
+
+ mov 480(KEYP), KLEN
+ add $240, KEYP
+
+ test $15, LEN
+ jz .Lxts_dec_loop4
+ sub $16, LEN
+
+.Lxts_dec_loop4:
+ sub $64, LEN
+ jl .Lxts_dec_1x
+
+ movdqa IV, STATE1
+ movdqu 0x00(INP), IN
+ pxor IN, STATE1
+ movdqu IV, 0x00(OUTP)
_aesni_gf128mul_x_ble()
- movdqa IV, STATE3
- movdqu 0x60(INP), INC
- pxor INC, STATE3
- movdqu IV, 0x60(OUTP)
+ movdqa IV, STATE2
+ movdqu 0x10(INP), IN
+ pxor IN, STATE2
+ movdqu IV, 0x10(OUTP)
- movdqu 0x30(OUTP), INC
- pxor INC, STATE4
- movdqu STATE4, 0x30(OUTP)
+ _aesni_gf128mul_x_ble()
+ movdqa IV, STATE3
+ movdqu 0x20(INP), IN
+ pxor IN, STATE3
+ movdqu IV, 0x20(OUTP)
_aesni_gf128mul_x_ble()
movdqa IV, STATE4
- movdqu 0x70(INP), INC
- pxor INC, STATE4
- movdqu IV, 0x70(OUTP)
+ movdqu 0x30(INP), IN
+ pxor IN, STATE4
+ movdqu IV, 0x30(OUTP)
- _aesni_gf128mul_x_ble()
- movups IV, (IVP)
+ call _aesni_dec4
+
+ movdqu 0x00(OUTP), IN
+ pxor IN, STATE1
+ movdqu STATE1, 0x00(OUTP)
+
+ movdqu 0x10(OUTP), IN
+ pxor IN, STATE2
+ movdqu STATE2, 0x10(OUTP)
- CALL_NOSPEC r11
+ movdqu 0x20(OUTP), IN
+ pxor IN, STATE3
+ movdqu STATE3, 0x20(OUTP)
- movdqu 0x40(OUTP), INC
- pxor INC, STATE1
- movdqu STATE1, 0x40(OUTP)
+ movdqu 0x30(OUTP), IN
+ pxor IN, STATE4
+ movdqu STATE4, 0x30(OUTP)
- movdqu 0x50(OUTP), INC
- pxor INC, STATE2
- movdqu STATE2, 0x50(OUTP)
+ _aesni_gf128mul_x_ble()
- movdqu 0x60(OUTP), INC
- pxor INC, STATE3
- movdqu STATE3, 0x60(OUTP)
+ add $64, INP
+ add $64, OUTP
+ test LEN, LEN
+ jnz .Lxts_dec_loop4
- movdqu 0x70(OUTP), INC
- pxor INC, STATE4
- movdqu STATE4, 0x70(OUTP)
+.Lxts_dec_ret_iv:
+ movups IV, (IVP)
+.Lxts_dec_ret:
+#ifndef __x86_64__
+ popl KLEN
+ popl KEYP
+ popl LEN
+ popl IVP
+#endif
FRAME_END
ret
-SYM_FUNC_END(aesni_xts_crypt8)
+.Lxts_dec_1x:
+ add $64, LEN
+ jz .Lxts_dec_ret_iv
+
+.Lxts_dec_loop1:
+ movdqu (INP), STATE
+
+ add $16, INP
+ sub $16, LEN
+ jl .Lxts_dec_cts1
+
+ pxor IV, STATE
+ call _aesni_dec1
+ pxor IV, STATE
+ _aesni_gf128mul_x_ble()
+
+ test LEN, LEN
+ jz .Lxts_dec_out
+
+ movdqu STATE, (OUTP)
+ add $16, OUTP
+ jmp .Lxts_dec_loop1
+
+.Lxts_dec_out:
+ movdqu STATE, (OUTP)
+ jmp .Lxts_dec_ret_iv
+
+.Lxts_dec_cts1:
+ movdqa IV, STATE4
+ _aesni_gf128mul_x_ble()
+
+ pxor IV, STATE
+ call _aesni_dec1
+ pxor IV, STATE
+
+#ifndef __x86_64__
+ lea .Lcts_permute_table, T1
+#else
+ lea .Lcts_permute_table(%rip), T1
#endif
+ add LEN, INP /* rewind input pointer */
+ add $16, LEN /* # bytes in final block */
+ movups (INP), IN1
+
+ mov T1, IVP
+ add $32, IVP
+ add LEN, T1
+ sub LEN, IVP
+ add OUTP, LEN
+
+ movups (T1), %xmm4
+ movaps STATE, IN2
+ pshufb %xmm4, STATE
+ movups STATE, (LEN)
+
+ movups (IVP), %xmm0
+ pshufb %xmm0, IN1
+ pblendvb IN2, IN1
+ movaps IN1, STATE
+
+ pxor STATE4, STATE
+ call _aesni_dec1
+ pxor STATE4, STATE
+
+ movups STATE, (OUTP)
+ jmp .Lxts_dec_ret
+SYM_FUNC_END(aesni_xts_decrypt)
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index ad8a7188a2bf..2144e54a6c89 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -31,11 +31,10 @@
#include <crypto/internal/aead.h>
#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
+#include <linux/jump_label.h>
#include <linux/workqueue.h>
#include <linux/spinlock.h>
-#ifdef CONFIG_X86_64
-#include <asm/crypto/glue_helper.h>
-#endif
+#include <linux/static_call.h>
#define AESNI_ALIGN 16
@@ -93,62 +92,25 @@ asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
const u8 *in, unsigned int len, u8 *iv);
asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
const u8 *in, unsigned int len, u8 *iv);
+asmlinkage void aesni_cts_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
+ const u8 *in, unsigned int len, u8 *iv);
+asmlinkage void aesni_cts_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
+ const u8 *in, unsigned int len, u8 *iv);
#define AVX_GEN2_OPTSIZE 640
#define AVX_GEN4_OPTSIZE 4096
+asmlinkage void aesni_xts_encrypt(const struct crypto_aes_ctx *ctx, u8 *out,
+ const u8 *in, unsigned int len, u8 *iv);
+
+asmlinkage void aesni_xts_decrypt(const struct crypto_aes_ctx *ctx, u8 *out,
+ const u8 *in, unsigned int len, u8 *iv);
+
#ifdef CONFIG_X86_64
-static void (*aesni_ctr_enc_tfm)(struct crypto_aes_ctx *ctx, u8 *out,
- const u8 *in, unsigned int len, u8 *iv);
asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
const u8 *in, unsigned int len, u8 *iv);
-
-asmlinkage void aesni_xts_crypt8(const struct crypto_aes_ctx *ctx, u8 *out,
- const u8 *in, bool enc, le128 *iv);
-
-/* asmlinkage void aesni_gcm_enc()
- * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
- * struct gcm_context_data. May be uninitialized.
- * u8 *out, Ciphertext output. Encrypt in-place is allowed.
- * const u8 *in, Plaintext input
- * unsigned long plaintext_len, Length of data in bytes for encryption.
- * u8 *iv, Pre-counter block j0: 12 byte IV concatenated with 0x00000001.
- * 16-byte aligned pointer.
- * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
- * const u8 *aad, Additional Authentication Data (AAD)
- * unsigned long aad_len, Length of AAD in bytes.
- * u8 *auth_tag, Authenticated Tag output.
- * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
- * Valid values are 16 (most likely), 12 or 8.
- */
-asmlinkage void aesni_gcm_enc(void *ctx,
- struct gcm_context_data *gdata, u8 *out,
- const u8 *in, unsigned long plaintext_len, u8 *iv,
- u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
- u8 *auth_tag, unsigned long auth_tag_len);
-
-/* asmlinkage void aesni_gcm_dec()
- * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
- * struct gcm_context_data. May be uninitialized.
- * u8 *out, Plaintext output. Decrypt in-place is allowed.
- * const u8 *in, Ciphertext input
- * unsigned long ciphertext_len, Length of data in bytes for decryption.
- * u8 *iv, Pre-counter block j0: 12 byte IV concatenated with 0x00000001.
- * 16-byte aligned pointer.
- * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
- * const u8 *aad, Additional Authentication Data (AAD)
- * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
- * to be 8 or 12 bytes
- * u8 *auth_tag, Authenticated Tag output.
- * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
- * Valid values are 16 (most likely), 12 or 8.
- */
-asmlinkage void aesni_gcm_dec(void *ctx,
- struct gcm_context_data *gdata, u8 *out,
- const u8 *in, unsigned long ciphertext_len, u8 *iv,
- u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
- u8 *auth_tag, unsigned long auth_tag_len);
+DEFINE_STATIC_CALL(aesni_ctr_enc_tfm, aesni_ctr_enc);
/* Scatter / Gather routines, with args similar to above */
asmlinkage void aesni_gcm_init(void *ctx,
@@ -167,24 +129,6 @@ asmlinkage void aesni_gcm_finalize(void *ctx,
struct gcm_context_data *gdata,
u8 *auth_tag, unsigned long auth_tag_len);
-static const struct aesni_gcm_tfm_s {
- void (*init)(void *ctx, struct gcm_context_data *gdata, u8 *iv,
- u8 *hash_subkey, const u8 *aad, unsigned long aad_len);
- void (*enc_update)(void *ctx, struct gcm_context_data *gdata, u8 *out,
- const u8 *in, unsigned long plaintext_len);
- void (*dec_update)(void *ctx, struct gcm_context_data *gdata, u8 *out,
- const u8 *in, unsigned long ciphertext_len);
- void (*finalize)(void *ctx, struct gcm_context_data *gdata,
- u8 *auth_tag, unsigned long auth_tag_len);
-} *aesni_gcm_tfm;
-
-static const struct aesni_gcm_tfm_s aesni_gcm_tfm_sse = {
- .init = &aesni_gcm_init,
- .enc_update = &aesni_gcm_enc_update,
- .dec_update = &aesni_gcm_dec_update,
- .finalize = &aesni_gcm_finalize,
-};
-
asmlinkage void aes_ctr_enc_128_avx_by8(const u8 *in, u8 *iv,
void *keys, u8 *out, unsigned int num_bytes);
asmlinkage void aes_ctr_enc_192_avx_by8(const u8 *in, u8 *iv,
@@ -214,25 +158,6 @@ asmlinkage void aesni_gcm_finalize_avx_gen2(void *ctx,
struct gcm_context_data *gdata,
u8 *auth_tag, unsigned long auth_tag_len);
-asmlinkage void aesni_gcm_enc_avx_gen2(void *ctx,
- struct gcm_context_data *gdata, u8 *out,
- const u8 *in, unsigned long plaintext_len, u8 *iv,
- const u8 *aad, unsigned long aad_len,
- u8 *auth_tag, unsigned long auth_tag_len);
-
-asmlinkage void aesni_gcm_dec_avx_gen2(void *ctx,
- struct gcm_context_data *gdata, u8 *out,
- const u8 *in, unsigned long ciphertext_len, u8 *iv,
- const u8 *aad, unsigned long aad_len,
- u8 *auth_tag, unsigned long auth_tag_len);
-
-static const struct aesni_gcm_tfm_s aesni_gcm_tfm_avx_gen2 = {
- .init = &aesni_gcm_init_avx_gen2,
- .enc_update = &aesni_gcm_enc_update_avx_gen2,
- .dec_update = &aesni_gcm_dec_update_avx_gen2,
- .finalize = &aesni_gcm_finalize_avx_gen2,
-};
-
/*
* asmlinkage void aesni_gcm_init_avx_gen4()
* gcm_data *my_ctx_data, context data
@@ -256,24 +181,8 @@ asmlinkage void aesni_gcm_finalize_avx_gen4(void *ctx,
struct gcm_context_data *gdata,
u8 *auth_tag, unsigned long auth_tag_len);
-asmlinkage void aesni_gcm_enc_avx_gen4(void *ctx,
- struct gcm_context_data *gdata, u8 *out,
- const u8 *in, unsigned long plaintext_len, u8 *iv,
- const u8 *aad, unsigned long aad_len,
- u8 *auth_tag, unsigned long auth_tag_len);
-
-asmlinkage void aesni_gcm_dec_avx_gen4(void *ctx,
- struct gcm_context_data *gdata, u8 *out,
- const u8 *in, unsigned long ciphertext_len, u8 *iv,
- const u8 *aad, unsigned long aad_len,
- u8 *auth_tag, unsigned long auth_tag_len);
-
-static const struct aesni_gcm_tfm_s aesni_gcm_tfm_avx_gen4 = {
- .init = &aesni_gcm_init_avx_gen4,
- .enc_update = &aesni_gcm_enc_update_avx_gen4,
- .dec_update = &aesni_gcm_dec_update_avx_gen4,
- .finalize = &aesni_gcm_finalize_avx_gen4,
-};
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(gcm_use_avx);
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(gcm_use_avx2);
static inline struct
aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
@@ -374,16 +283,16 @@ static int ecb_encrypt(struct skcipher_request *req)
unsigned int nbytes;
int err;
- err = skcipher_walk_virt(&walk, req, true);
+ err = skcipher_walk_virt(&walk, req, false);
- kernel_fpu_begin();
while ((nbytes = walk.nbytes)) {
+ kernel_fpu_begin();
aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
nbytes & AES_BLOCK_MASK);
+ kernel_fpu_end();
nbytes &= AES_BLOCK_SIZE - 1;
err = skcipher_walk_done(&walk, nbytes);
}
- kernel_fpu_end();
return err;
}
@@ -396,16 +305,16 @@ static int ecb_decrypt(struct skcipher_request *req)
unsigned int nbytes;
int err;
- err = skcipher_walk_virt(&walk, req, true);
+ err = skcipher_walk_virt(&walk, req, false);
- kernel_fpu_begin();
while ((nbytes = walk.nbytes)) {
+ kernel_fpu_begin();
aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
nbytes & AES_BLOCK_MASK);
+ kernel_fpu_end();
nbytes &= AES_BLOCK_SIZE - 1;
err = skcipher_walk_done(&walk, nbytes);
}
- kernel_fpu_end();
return err;
}
@@ -418,16 +327,16 @@ static int cbc_encrypt(struct skcipher_request *req)
unsigned int nbytes;
int err;
- err = skcipher_walk_virt(&walk, req, true);
+ err = skcipher_walk_virt(&walk, req, false);
- kernel_fpu_begin();
while ((nbytes = walk.nbytes)) {
+ kernel_fpu_begin();
aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
nbytes & AES_BLOCK_MASK, walk.iv);
+ kernel_fpu_end();
nbytes &= AES_BLOCK_SIZE - 1;
err = skcipher_walk_done(&walk, nbytes);
}
- kernel_fpu_end();
return err;
}
@@ -440,36 +349,133 @@ static int cbc_decrypt(struct skcipher_request *req)
unsigned int nbytes;
int err;
- err = skcipher_walk_virt(&walk, req, true);
+ err = skcipher_walk_virt(&walk, req, false);
- kernel_fpu_begin();
while ((nbytes = walk.nbytes)) {
+ kernel_fpu_begin();
aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
nbytes & AES_BLOCK_MASK, walk.iv);
+ kernel_fpu_end();
nbytes &= AES_BLOCK_SIZE - 1;
err = skcipher_walk_done(&walk, nbytes);
}
- kernel_fpu_end();
return err;
}
-#ifdef CONFIG_X86_64
-static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
- struct skcipher_walk *walk)
+static int cts_cbc_encrypt(struct skcipher_request *req)
{
- u8 *ctrblk = walk->iv;
- u8 keystream[AES_BLOCK_SIZE];
- u8 *src = walk->src.virt.addr;
- u8 *dst = walk->dst.virt.addr;
- unsigned int nbytes = walk->nbytes;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
+ int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
+ struct scatterlist *src = req->src, *dst = req->dst;
+ struct scatterlist sg_src[2], sg_dst[2];
+ struct skcipher_request subreq;
+ struct skcipher_walk walk;
+ int err;
+
+ skcipher_request_set_tfm(&subreq, tfm);
+ skcipher_request_set_callback(&subreq, skcipher_request_flags(req),
+ NULL, NULL);
+
+ if (req->cryptlen <= AES_BLOCK_SIZE) {
+ if (req->cryptlen < AES_BLOCK_SIZE)
+ return -EINVAL;
+ cbc_blocks = 1;
+ }
+
+ if (cbc_blocks > 0) {
+ skcipher_request_set_crypt(&subreq, req->src, req->dst,
+ cbc_blocks * AES_BLOCK_SIZE,
+ req->iv);
+
+ err = cbc_encrypt(&subreq);
+ if (err)
+ return err;
+
+ if (req->cryptlen == AES_BLOCK_SIZE)
+ return 0;
+
+ dst = src = scatterwalk_ffwd(sg_src, req->src, subreq.cryptlen);
+ if (req->dst != req->src)
+ dst = scatterwalk_ffwd(sg_dst, req->dst,
+ subreq.cryptlen);
+ }
+
+ /* handle ciphertext stealing */
+ skcipher_request_set_crypt(&subreq, src, dst,
+ req->cryptlen - cbc_blocks * AES_BLOCK_SIZE,
+ req->iv);
+
+ err = skcipher_walk_virt(&walk, &subreq, false);
+ if (err)
+ return err;
+
+ kernel_fpu_begin();
+ aesni_cts_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+ walk.nbytes, walk.iv);
+ kernel_fpu_end();
+
+ return skcipher_walk_done(&walk, 0);
+}
+
+static int cts_cbc_decrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
+ int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
+ struct scatterlist *src = req->src, *dst = req->dst;
+ struct scatterlist sg_src[2], sg_dst[2];
+ struct skcipher_request subreq;
+ struct skcipher_walk walk;
+ int err;
+
+ skcipher_request_set_tfm(&subreq, tfm);
+ skcipher_request_set_callback(&subreq, skcipher_request_flags(req),
+ NULL, NULL);
+
+ if (req->cryptlen <= AES_BLOCK_SIZE) {
+ if (req->cryptlen < AES_BLOCK_SIZE)
+ return -EINVAL;
+ cbc_blocks = 1;
+ }
+
+ if (cbc_blocks > 0) {
+ skcipher_request_set_crypt(&subreq, req->src, req->dst,
+ cbc_blocks * AES_BLOCK_SIZE,
+ req->iv);
+
+ err = cbc_decrypt(&subreq);
+ if (err)
+ return err;
+
+ if (req->cryptlen == AES_BLOCK_SIZE)
+ return 0;
+
+ dst = src = scatterwalk_ffwd(sg_src, req->src, subreq.cryptlen);
+ if (req->dst != req->src)
+ dst = scatterwalk_ffwd(sg_dst, req->dst,
+ subreq.cryptlen);
+ }
+
+ /* handle ciphertext stealing */
+ skcipher_request_set_crypt(&subreq, src, dst,
+ req->cryptlen - cbc_blocks * AES_BLOCK_SIZE,
+ req->iv);
- aesni_enc(ctx, keystream, ctrblk);
- crypto_xor_cpy(dst, keystream, src, nbytes);
+ err = skcipher_walk_virt(&walk, &subreq, false);
+ if (err)
+ return err;
+
+ kernel_fpu_begin();
+ aesni_cts_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+ walk.nbytes, walk.iv);
+ kernel_fpu_end();
- crypto_inc(ctrblk, AES_BLOCK_SIZE);
+ return skcipher_walk_done(&walk, 0);
}
+#ifdef CONFIG_X86_64
static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out,
const u8 *in, unsigned int len, u8 *iv)
{
@@ -491,120 +497,36 @@ static int ctr_crypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
+ u8 keystream[AES_BLOCK_SIZE];
struct skcipher_walk walk;
unsigned int nbytes;
int err;
- err = skcipher_walk_virt(&walk, req, true);
+ err = skcipher_walk_virt(&walk, req, false);
- kernel_fpu_begin();
- while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
- aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr,
- nbytes & AES_BLOCK_MASK, walk.iv);
- nbytes &= AES_BLOCK_SIZE - 1;
+ while ((nbytes = walk.nbytes) > 0) {
+ kernel_fpu_begin();
+ if (nbytes & AES_BLOCK_MASK)
+ static_call(aesni_ctr_enc_tfm)(ctx, walk.dst.virt.addr,
+ walk.src.virt.addr,
+ nbytes & AES_BLOCK_MASK,
+ walk.iv);
+ nbytes &= ~AES_BLOCK_MASK;
+
+ if (walk.nbytes == walk.total && nbytes > 0) {
+ aesni_enc(ctx, keystream, walk.iv);
+ crypto_xor_cpy(walk.dst.virt.addr + walk.nbytes - nbytes,
+ walk.src.virt.addr + walk.nbytes - nbytes,
+ keystream, nbytes);
+ crypto_inc(walk.iv, AES_BLOCK_SIZE);
+ nbytes = 0;
+ }
+ kernel_fpu_end();
err = skcipher_walk_done(&walk, nbytes);
}
- if (walk.nbytes) {
- ctr_crypt_final(ctx, &walk);
- err = skcipher_walk_done(&walk, 0);
- }
- kernel_fpu_end();
-
return err;
}
-static int xts_aesni_setkey(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
- int err;
-
- err = xts_verify_key(tfm, key, keylen);
- if (err)
- return err;
-
- keylen /= 2;
-
- /* first half of xts-key is for crypt */
- err = aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_crypt_ctx,
- key, keylen);
- if (err)
- return err;
-
- /* second half of xts-key is for tweak */
- return aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_tweak_ctx,
- key + keylen, keylen);
-}
-
-
-static void aesni_xts_enc(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
-{
- glue_xts_crypt_128bit_one(ctx, dst, src, iv, aesni_enc);
-}
-
-static void aesni_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
-{
- glue_xts_crypt_128bit_one(ctx, dst, src, iv, aesni_dec);
-}
-
-static void aesni_xts_enc8(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
-{
- aesni_xts_crypt8(ctx, dst, src, true, iv);
-}
-
-static void aesni_xts_dec8(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
-{
- aesni_xts_crypt8(ctx, dst, src, false, iv);
-}
-
-static const struct common_glue_ctx aesni_enc_xts = {
- .num_funcs = 2,
- .fpu_blocks_limit = 1,
-
- .funcs = { {
- .num_blocks = 8,
- .fn_u = { .xts = aesni_xts_enc8 }
- }, {
- .num_blocks = 1,
- .fn_u = { .xts = aesni_xts_enc }
- } }
-};
-
-static const struct common_glue_ctx aesni_dec_xts = {
- .num_funcs = 2,
- .fpu_blocks_limit = 1,
-
- .funcs = { {
- .num_blocks = 8,
- .fn_u = { .xts = aesni_xts_dec8 }
- }, {
- .num_blocks = 1,
- .fn_u = { .xts = aesni_xts_dec }
- } }
-};
-
-static int xts_encrypt(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- return glue_xts_req_128bit(&aesni_enc_xts, req, aesni_enc,
- aes_ctx(ctx->raw_tweak_ctx),
- aes_ctx(ctx->raw_crypt_ctx),
- false);
-}
-
-static int xts_decrypt(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- return glue_xts_req_128bit(&aesni_dec_xts, req, aesni_enc,
- aes_ctx(ctx->raw_tweak_ctx),
- aes_ctx(ctx->raw_crypt_ctx),
- true);
-}
-
static int
rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
{
@@ -681,42 +603,35 @@ static int generic_gcmaes_set_authsize(struct crypto_aead *tfm,
static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
unsigned int assoclen, u8 *hash_subkey,
- u8 *iv, void *aes_ctx)
+ u8 *iv, void *aes_ctx, u8 *auth_tag,
+ unsigned long auth_tag_len)
{
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- unsigned long auth_tag_len = crypto_aead_authsize(tfm);
- const struct aesni_gcm_tfm_s *gcm_tfm = aesni_gcm_tfm;
- struct gcm_context_data data AESNI_ALIGN_ATTR;
- struct scatter_walk dst_sg_walk = {};
+ u8 databuf[sizeof(struct gcm_context_data) + (AESNI_ALIGN - 8)] __aligned(8);
+ struct gcm_context_data *data = PTR_ALIGN((void *)databuf, AESNI_ALIGN);
unsigned long left = req->cryptlen;
- unsigned long len, srclen, dstlen;
struct scatter_walk assoc_sg_walk;
- struct scatter_walk src_sg_walk;
- struct scatterlist src_start[2];
- struct scatterlist dst_start[2];
- struct scatterlist *src_sg;
- struct scatterlist *dst_sg;
- u8 *src, *dst, *assoc;
+ struct skcipher_walk walk;
+ bool do_avx, do_avx2;
u8 *assocmem = NULL;
- u8 authTag[16];
+ u8 *assoc;
+ int err;
if (!enc)
left -= auth_tag_len;
- if (left < AVX_GEN4_OPTSIZE && gcm_tfm == &aesni_gcm_tfm_avx_gen4)
- gcm_tfm = &aesni_gcm_tfm_avx_gen2;
- if (left < AVX_GEN2_OPTSIZE && gcm_tfm == &aesni_gcm_tfm_avx_gen2)
- gcm_tfm = &aesni_gcm_tfm_sse;
+ do_avx = (left >= AVX_GEN2_OPTSIZE);
+ do_avx2 = (left >= AVX_GEN4_OPTSIZE);
/* Linearize assoc, if not already linear */
- if (req->src->length >= assoclen && req->src->length &&
- (!PageHighMem(sg_page(req->src)) ||
- req->src->offset + req->src->length <= PAGE_SIZE)) {
+ if (req->src->length >= assoclen && req->src->length) {
scatterwalk_start(&assoc_sg_walk, req->src);
assoc = scatterwalk_map(&assoc_sg_walk);
} else {
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+
/* assoc can be any length, so must be on heap */
- assocmem = kmalloc(assoclen, GFP_ATOMIC);
+ assocmem = kmalloc(assoclen, flags);
if (unlikely(!assocmem))
return -ENOMEM;
assoc = assocmem;
@@ -724,62 +639,15 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
scatterwalk_map_and_copy(assoc, req->src, 0, assoclen, 0);
}
- if (left) {
- src_sg = scatterwalk_ffwd(src_start, req->src, req->assoclen);
- scatterwalk_start(&src_sg_walk, src_sg);
- if (req->src != req->dst) {
- dst_sg = scatterwalk_ffwd(dst_start, req->dst,
- req->assoclen);
- scatterwalk_start(&dst_sg_walk, dst_sg);
- }
- }
-
kernel_fpu_begin();
- gcm_tfm->init(aes_ctx, &data, iv,
- hash_subkey, assoc, assoclen);
- if (req->src != req->dst) {
- while (left) {
- src = scatterwalk_map(&src_sg_walk);
- dst = scatterwalk_map(&dst_sg_walk);
- srclen = scatterwalk_clamp(&src_sg_walk, left);
- dstlen = scatterwalk_clamp(&dst_sg_walk, left);
- len = min(srclen, dstlen);
- if (len) {
- if (enc)
- gcm_tfm->enc_update(aes_ctx, &data,
- dst, src, len);
- else
- gcm_tfm->dec_update(aes_ctx, &data,
- dst, src, len);
- }
- left -= len;
-
- scatterwalk_unmap(src);
- scatterwalk_unmap(dst);
- scatterwalk_advance(&src_sg_walk, len);
- scatterwalk_advance(&dst_sg_walk, len);
- scatterwalk_done(&src_sg_walk, 0, left);
- scatterwalk_done(&dst_sg_walk, 1, left);
- }
- } else {
- while (left) {
- dst = src = scatterwalk_map(&src_sg_walk);
- len = scatterwalk_clamp(&src_sg_walk, left);
- if (len) {
- if (enc)
- gcm_tfm->enc_update(aes_ctx, &data,
- src, src, len);
- else
- gcm_tfm->dec_update(aes_ctx, &data,
- src, src, len);
- }
- left -= len;
- scatterwalk_unmap(src);
- scatterwalk_advance(&src_sg_walk, len);
- scatterwalk_done(&src_sg_walk, 1, left);
- }
- }
- gcm_tfm->finalize(aes_ctx, &data, authTag, auth_tag_len);
+ if (static_branch_likely(&gcm_use_avx2) && do_avx2)
+ aesni_gcm_init_avx_gen4(aes_ctx, data, iv, hash_subkey, assoc,
+ assoclen);
+ else if (static_branch_likely(&gcm_use_avx) && do_avx)
+ aesni_gcm_init_avx_gen2(aes_ctx, data, iv, hash_subkey, assoc,
+ assoclen);
+ else
+ aesni_gcm_init(aes_ctx, data, iv, hash_subkey, assoc, assoclen);
kernel_fpu_end();
if (!assocmem)
@@ -787,24 +655,58 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
else
kfree(assocmem);
- if (!enc) {
- u8 authTagMsg[16];
+ err = enc ? skcipher_walk_aead_encrypt(&walk, req, false)
+ : skcipher_walk_aead_decrypt(&walk, req, false);
- /* Copy out original authTag */
- scatterwalk_map_and_copy(authTagMsg, req->src,
- req->assoclen + req->cryptlen -
- auth_tag_len,
- auth_tag_len, 0);
+ while (walk.nbytes > 0) {
+ kernel_fpu_begin();
+ if (static_branch_likely(&gcm_use_avx2) && do_avx2) {
+ if (enc)
+ aesni_gcm_enc_update_avx_gen4(aes_ctx, data,
+ walk.dst.virt.addr,
+ walk.src.virt.addr,
+ walk.nbytes);
+ else
+ aesni_gcm_dec_update_avx_gen4(aes_ctx, data,
+ walk.dst.virt.addr,
+ walk.src.virt.addr,
+ walk.nbytes);
+ } else if (static_branch_likely(&gcm_use_avx) && do_avx) {
+ if (enc)
+ aesni_gcm_enc_update_avx_gen2(aes_ctx, data,
+ walk.dst.virt.addr,
+ walk.src.virt.addr,
+ walk.nbytes);
+ else
+ aesni_gcm_dec_update_avx_gen2(aes_ctx, data,
+ walk.dst.virt.addr,
+ walk.src.virt.addr,
+ walk.nbytes);
+ } else if (enc) {
+ aesni_gcm_enc_update(aes_ctx, data, walk.dst.virt.addr,
+ walk.src.virt.addr, walk.nbytes);
+ } else {
+ aesni_gcm_dec_update(aes_ctx, data, walk.dst.virt.addr,
+ walk.src.virt.addr, walk.nbytes);
+ }
+ kernel_fpu_end();
- /* Compare generated tag with passed in tag. */
- return crypto_memneq(authTagMsg, authTag, auth_tag_len) ?
- -EBADMSG : 0;
+ err = skcipher_walk_done(&walk, 0);
}
- /* Copy in the authTag */
- scatterwalk_map_and_copy(authTag, req->dst,
- req->assoclen + req->cryptlen,
- auth_tag_len, 1);
+ if (err)
+ return err;
+
+ kernel_fpu_begin();
+ if (static_branch_likely(&gcm_use_avx2) && do_avx2)
+ aesni_gcm_finalize_avx_gen4(aes_ctx, data, auth_tag,
+ auth_tag_len);
+ else if (static_branch_likely(&gcm_use_avx) && do_avx)
+ aesni_gcm_finalize_avx_gen2(aes_ctx, data, auth_tag,
+ auth_tag_len);
+ else
+ aesni_gcm_finalize(aes_ctx, data, auth_tag, auth_tag_len);
+ kernel_fpu_end();
return 0;
}
@@ -812,15 +714,47 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
static int gcmaes_encrypt(struct aead_request *req, unsigned int assoclen,
u8 *hash_subkey, u8 *iv, void *aes_ctx)
{
- return gcmaes_crypt_by_sg(true, req, assoclen, hash_subkey, iv,
- aes_ctx);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ unsigned long auth_tag_len = crypto_aead_authsize(tfm);
+ u8 auth_tag[16];
+ int err;
+
+ err = gcmaes_crypt_by_sg(true, req, assoclen, hash_subkey, iv, aes_ctx,
+ auth_tag, auth_tag_len);
+ if (err)
+ return err;
+
+ scatterwalk_map_and_copy(auth_tag, req->dst,
+ req->assoclen + req->cryptlen,
+ auth_tag_len, 1);
+ return 0;
}
static int gcmaes_decrypt(struct aead_request *req, unsigned int assoclen,
u8 *hash_subkey, u8 *iv, void *aes_ctx)
{
- return gcmaes_crypt_by_sg(false, req, assoclen, hash_subkey, iv,
- aes_ctx);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ unsigned long auth_tag_len = crypto_aead_authsize(tfm);
+ u8 auth_tag_msg[16];
+ u8 auth_tag[16];
+ int err;
+
+ err = gcmaes_crypt_by_sg(false, req, assoclen, hash_subkey, iv, aes_ctx,
+ auth_tag, auth_tag_len);
+ if (err)
+ return err;
+
+ /* Copy out original auth_tag */
+ scatterwalk_map_and_copy(auth_tag_msg, req->src,
+ req->assoclen + req->cryptlen - auth_tag_len,
+ auth_tag_len, 0);
+
+ /* Compare generated tag with passed in tag. */
+ if (crypto_memneq(auth_tag_msg, auth_tag, auth_tag_len)) {
+ memzero_explicit(auth_tag, sizeof(auth_tag));
+ return -EBADMSG;
+ }
+ return 0;
}
static int helper_rfc4106_encrypt(struct aead_request *req)
@@ -828,7 +762,8 @@ static int helper_rfc4106_encrypt(struct aead_request *req)
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
void *aes_ctx = &(ctx->aes_key_expanded);
- u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
+ u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
+ u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
unsigned int i;
__be32 counter = cpu_to_be32(1);
@@ -855,7 +790,8 @@ static int helper_rfc4106_decrypt(struct aead_request *req)
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
void *aes_ctx = &(ctx->aes_key_expanded);
- u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
+ u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
+ u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
unsigned int i;
if (unlikely(req->assoclen != 16 && req->assoclen != 20))
@@ -877,6 +813,128 @@ static int helper_rfc4106_decrypt(struct aead_request *req)
}
#endif
+static int xts_aesni_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int err;
+
+ err = xts_verify_key(tfm, key, keylen);
+ if (err)
+ return err;
+
+ keylen /= 2;
+
+ /* first half of xts-key is for crypt */
+ err = aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_crypt_ctx,
+ key, keylen);
+ if (err)
+ return err;
+
+ /* second half of xts-key is for tweak */
+ return aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_tweak_ctx,
+ key + keylen, keylen);
+}
+
+static int xts_crypt(struct skcipher_request *req, bool encrypt)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int tail = req->cryptlen % AES_BLOCK_SIZE;
+ struct skcipher_request subreq;
+ struct skcipher_walk walk;
+ int err;
+
+ if (req->cryptlen < AES_BLOCK_SIZE)
+ return -EINVAL;
+
+ err = skcipher_walk_virt(&walk, req, false);
+
+ if (unlikely(tail > 0 && walk.nbytes < walk.total)) {
+ int blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
+
+ skcipher_walk_abort(&walk);
+
+ skcipher_request_set_tfm(&subreq, tfm);
+ skcipher_request_set_callback(&subreq,
+ skcipher_request_flags(req),
+ NULL, NULL);
+ skcipher_request_set_crypt(&subreq, req->src, req->dst,
+ blocks * AES_BLOCK_SIZE, req->iv);
+ req = &subreq;
+ err = skcipher_walk_virt(&walk, req, false);
+ } else {
+ tail = 0;
+ }
+
+ kernel_fpu_begin();
+
+ /* calculate first value of T */
+ aesni_enc(aes_ctx(ctx->raw_tweak_ctx), walk.iv, walk.iv);
+
+ while (walk.nbytes > 0) {
+ int nbytes = walk.nbytes;
+
+ if (nbytes < walk.total)
+ nbytes &= ~(AES_BLOCK_SIZE - 1);
+
+ if (encrypt)
+ aesni_xts_encrypt(aes_ctx(ctx->raw_crypt_ctx),
+ walk.dst.virt.addr, walk.src.virt.addr,
+ nbytes, walk.iv);
+ else
+ aesni_xts_decrypt(aes_ctx(ctx->raw_crypt_ctx),
+ walk.dst.virt.addr, walk.src.virt.addr,
+ nbytes, walk.iv);
+ kernel_fpu_end();
+
+ err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
+
+ if (walk.nbytes > 0)
+ kernel_fpu_begin();
+ }
+
+ if (unlikely(tail > 0 && !err)) {
+ struct scatterlist sg_src[2], sg_dst[2];
+ struct scatterlist *src, *dst;
+
+ dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen);
+ if (req->dst != req->src)
+ dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen);
+
+ skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail,
+ req->iv);
+
+ err = skcipher_walk_virt(&walk, &subreq, false);
+ if (err)
+ return err;
+
+ kernel_fpu_begin();
+ if (encrypt)
+ aesni_xts_encrypt(aes_ctx(ctx->raw_crypt_ctx),
+ walk.dst.virt.addr, walk.src.virt.addr,
+ walk.nbytes, walk.iv);
+ else
+ aesni_xts_decrypt(aes_ctx(ctx->raw_crypt_ctx),
+ walk.dst.virt.addr, walk.src.virt.addr,
+ walk.nbytes, walk.iv);
+ kernel_fpu_end();
+
+ err = skcipher_walk_done(&walk, 0);
+ }
+ return err;
+}
+
+static int xts_encrypt(struct skcipher_request *req)
+{
+ return xts_crypt(req, true);
+}
+
+static int xts_decrypt(struct skcipher_request *req)
+{
+ return xts_crypt(req, false);
+}
+
static struct crypto_alg aesni_cipher_alg = {
.cra_name = "aes",
.cra_driver_name = "aes-aesni",
@@ -928,6 +986,23 @@ static struct skcipher_alg aesni_skciphers[] = {
.setkey = aesni_skcipher_setkey,
.encrypt = cbc_encrypt,
.decrypt = cbc_decrypt,
+ }, {
+ .base = {
+ .cra_name = "__cts(cbc(aes))",
+ .cra_driver_name = "__cts-cbc-aes-aesni",
+ .cra_priority = 400,
+ .cra_flags = CRYPTO_ALG_INTERNAL,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = CRYPTO_AES_CTX_SIZE,
+ .cra_module = THIS_MODULE,
+ },
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .walksize = 2 * AES_BLOCK_SIZE,
+ .setkey = aesni_skcipher_setkey,
+ .encrypt = cts_cbc_encrypt,
+ .decrypt = cts_cbc_decrypt,
#ifdef CONFIG_X86_64
}, {
.base = {
@@ -946,6 +1021,7 @@ static struct skcipher_alg aesni_skciphers[] = {
.setkey = aesni_skcipher_setkey,
.encrypt = ctr_crypt,
.decrypt = ctr_crypt,
+#endif
}, {
.base = {
.cra_name = "__xts(aes)",
@@ -959,10 +1035,10 @@ static struct skcipher_alg aesni_skciphers[] = {
.min_keysize = 2 * AES_MIN_KEY_SIZE,
.max_keysize = 2 * AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
+ .walksize = 2 * AES_BLOCK_SIZE,
.setkey = xts_aesni_setkey,
.encrypt = xts_encrypt,
.decrypt = xts_decrypt,
-#endif
}
};
@@ -985,7 +1061,8 @@ static int generic_gcmaes_encrypt(struct aead_request *req)
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm);
void *aes_ctx = &(ctx->aes_key_expanded);
- u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
+ u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
+ u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
__be32 counter = cpu_to_be32(1);
memcpy(iv, req->iv, 12);
@@ -1001,7 +1078,8 @@ static int generic_gcmaes_decrypt(struct aead_request *req)
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm);
void *aes_ctx = &(ctx->aes_key_expanded);
- u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
+ u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
+ u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
memcpy(iv, req->iv, 12);
*((__be32 *)(iv+12)) = counter;
@@ -1066,19 +1144,18 @@ static int __init aesni_init(void)
#ifdef CONFIG_X86_64
if (boot_cpu_has(X86_FEATURE_AVX2)) {
pr_info("AVX2 version of gcm_enc/dec engaged.\n");
- aesni_gcm_tfm = &aesni_gcm_tfm_avx_gen4;
+ static_branch_enable(&gcm_use_avx);
+ static_branch_enable(&gcm_use_avx2);
} else
if (boot_cpu_has(X86_FEATURE_AVX)) {
pr_info("AVX version of gcm_enc/dec engaged.\n");
- aesni_gcm_tfm = &aesni_gcm_tfm_avx_gen2;
+ static_branch_enable(&gcm_use_avx);
} else {
pr_info("SSE version of gcm_enc/dec engaged.\n");
- aesni_gcm_tfm = &aesni_gcm_tfm_sse;
}
- aesni_ctr_enc_tfm = aesni_ctr_enc;
if (boot_cpu_has(X86_FEATURE_AVX)) {
/* optimize performance of ctr mode encryption transform */
- aesni_ctr_enc_tfm = aesni_ctr_enc_avx_tfm;
+ static_call_update(aesni_ctr_enc_tfm, aesni_ctr_enc_avx_tfm);
pr_info("AES CTR mode by8 optimization enabled\n");
}
#endif
diff --git a/arch/x86/crypto/blake2s-glue.c b/arch/x86/crypto/blake2s-glue.c
index c025a01cf708..a40365ab301e 100644
--- a/arch/x86/crypto/blake2s-glue.c
+++ b/arch/x86/crypto/blake2s-glue.c
@@ -58,138 +58,40 @@ void blake2s_compress_arch(struct blake2s_state *state,
}
EXPORT_SYMBOL(blake2s_compress_arch);
-static int crypto_blake2s_setkey(struct crypto_shash *tfm, const u8 *key,
- unsigned int keylen)
+static int crypto_blake2s_update_x86(struct shash_desc *desc,
+ const u8 *in, unsigned int inlen)
{
- struct blake2s_tfm_ctx *tctx = crypto_shash_ctx(tfm);
-
- if (keylen == 0 || keylen > BLAKE2S_KEY_SIZE)
- return -EINVAL;
-
- memcpy(tctx->key, key, keylen);
- tctx->keylen = keylen;
-
- return 0;
+ return crypto_blake2s_update(desc, in, inlen, blake2s_compress_arch);
}
-static int crypto_blake2s_init(struct shash_desc *desc)
+static int crypto_blake2s_final_x86(struct shash_desc *desc, u8 *out)
{
- struct blake2s_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
- struct blake2s_state *state = shash_desc_ctx(desc);
- const int outlen = crypto_shash_digestsize(desc->tfm);
-
- if (tctx->keylen)
- blake2s_init_key(state, outlen, tctx->key, tctx->keylen);
- else
- blake2s_init(state, outlen);
-
- return 0;
+ return crypto_blake2s_final(desc, out, blake2s_compress_arch);
}
-static int crypto_blake2s_update(struct shash_desc *desc, const u8 *in,
- unsigned int inlen)
-{
- struct blake2s_state *state = shash_desc_ctx(desc);
- const size_t fill = BLAKE2S_BLOCK_SIZE - state->buflen;
-
- if (unlikely(!inlen))
- return 0;
- if (inlen > fill) {
- memcpy(state->buf + state->buflen, in, fill);
- blake2s_compress_arch(state, state->buf, 1, BLAKE2S_BLOCK_SIZE);
- state->buflen = 0;
- in += fill;
- inlen -= fill;
+#define BLAKE2S_ALG(name, driver_name, digest_size) \
+ { \
+ .base.cra_name = name, \
+ .base.cra_driver_name = driver_name, \
+ .base.cra_priority = 200, \
+ .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, \
+ .base.cra_blocksize = BLAKE2S_BLOCK_SIZE, \
+ .base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx), \
+ .base.cra_module = THIS_MODULE, \
+ .digestsize = digest_size, \
+ .setkey = crypto_blake2s_setkey, \
+ .init = crypto_blake2s_init, \
+ .update = crypto_blake2s_update_x86, \
+ .final = crypto_blake2s_final_x86, \
+ .descsize = sizeof(struct blake2s_state), \
}
- if (inlen > BLAKE2S_BLOCK_SIZE) {
- const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2S_BLOCK_SIZE);
- /* Hash one less (full) block than strictly possible */
- blake2s_compress_arch(state, in, nblocks - 1, BLAKE2S_BLOCK_SIZE);
- in += BLAKE2S_BLOCK_SIZE * (nblocks - 1);
- inlen -= BLAKE2S_BLOCK_SIZE * (nblocks - 1);
- }
- memcpy(state->buf + state->buflen, in, inlen);
- state->buflen += inlen;
-
- return 0;
-}
-
-static int crypto_blake2s_final(struct shash_desc *desc, u8 *out)
-{
- struct blake2s_state *state = shash_desc_ctx(desc);
-
- blake2s_set_lastblock(state);
- memset(state->buf + state->buflen, 0,
- BLAKE2S_BLOCK_SIZE - state->buflen); /* Padding */
- blake2s_compress_arch(state, state->buf, 1, state->buflen);
- cpu_to_le32_array(state->h, ARRAY_SIZE(state->h));
- memcpy(out, state->h, state->outlen);
- memzero_explicit(state, sizeof(*state));
-
- return 0;
-}
-static struct shash_alg blake2s_algs[] = {{
- .base.cra_name = "blake2s-128",
- .base.cra_driver_name = "blake2s-128-x86",
- .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
- .base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx),
- .base.cra_priority = 200,
- .base.cra_blocksize = BLAKE2S_BLOCK_SIZE,
- .base.cra_module = THIS_MODULE,
-
- .digestsize = BLAKE2S_128_HASH_SIZE,
- .setkey = crypto_blake2s_setkey,
- .init = crypto_blake2s_init,
- .update = crypto_blake2s_update,
- .final = crypto_blake2s_final,
- .descsize = sizeof(struct blake2s_state),
-}, {
- .base.cra_name = "blake2s-160",
- .base.cra_driver_name = "blake2s-160-x86",
- .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
- .base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx),
- .base.cra_priority = 200,
- .base.cra_blocksize = BLAKE2S_BLOCK_SIZE,
- .base.cra_module = THIS_MODULE,
-
- .digestsize = BLAKE2S_160_HASH_SIZE,
- .setkey = crypto_blake2s_setkey,
- .init = crypto_blake2s_init,
- .update = crypto_blake2s_update,
- .final = crypto_blake2s_final,
- .descsize = sizeof(struct blake2s_state),
-}, {
- .base.cra_name = "blake2s-224",
- .base.cra_driver_name = "blake2s-224-x86",
- .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
- .base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx),
- .base.cra_priority = 200,
- .base.cra_blocksize = BLAKE2S_BLOCK_SIZE,
- .base.cra_module = THIS_MODULE,
-
- .digestsize = BLAKE2S_224_HASH_SIZE,
- .setkey = crypto_blake2s_setkey,
- .init = crypto_blake2s_init,
- .update = crypto_blake2s_update,
- .final = crypto_blake2s_final,
- .descsize = sizeof(struct blake2s_state),
-}, {
- .base.cra_name = "blake2s-256",
- .base.cra_driver_name = "blake2s-256-x86",
- .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
- .base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx),
- .base.cra_priority = 200,
- .base.cra_blocksize = BLAKE2S_BLOCK_SIZE,
- .base.cra_module = THIS_MODULE,
-
- .digestsize = BLAKE2S_256_HASH_SIZE,
- .setkey = crypto_blake2s_setkey,
- .init = crypto_blake2s_init,
- .update = crypto_blake2s_update,
- .final = crypto_blake2s_final,
- .descsize = sizeof(struct blake2s_state),
-}};
+static struct shash_alg blake2s_algs[] = {
+ BLAKE2S_ALG("blake2s-128", "blake2s-128-x86", BLAKE2S_128_HASH_SIZE),
+ BLAKE2S_ALG("blake2s-160", "blake2s-160-x86", BLAKE2S_160_HASH_SIZE),
+ BLAKE2S_ALG("blake2s-224", "blake2s-224-x86", BLAKE2S_224_HASH_SIZE),
+ BLAKE2S_ALG("blake2s-256", "blake2s-256-x86", BLAKE2S_256_HASH_SIZE),
+};
static int __init blake2s_mod_init(void)
{
diff --git a/arch/x86/crypto/blowfish_glue.c b/arch/x86/crypto/blowfish_glue.c
index cedfdba69ce3..a880e0b1c255 100644
--- a/arch/x86/crypto/blowfish_glue.c
+++ b/arch/x86/crypto/blowfish_glue.c
@@ -6,8 +6,6 @@
*
* CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by:
* Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
- * CTR part based on code (crypto/ctr.c) by:
- * (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com>
*/
#include <crypto/algapi.h>
@@ -247,97 +245,6 @@ static int cbc_decrypt(struct skcipher_request *req)
return err;
}
-static void ctr_crypt_final(struct bf_ctx *ctx, struct skcipher_walk *walk)
-{
- u8 *ctrblk = walk->iv;
- u8 keystream[BF_BLOCK_SIZE];
- u8 *src = walk->src.virt.addr;
- u8 *dst = walk->dst.virt.addr;
- unsigned int nbytes = walk->nbytes;
-
- blowfish_enc_blk(ctx, keystream, ctrblk);
- crypto_xor_cpy(dst, keystream, src, nbytes);
-
- crypto_inc(ctrblk, BF_BLOCK_SIZE);
-}
-
-static unsigned int __ctr_crypt(struct bf_ctx *ctx, struct skcipher_walk *walk)
-{
- unsigned int bsize = BF_BLOCK_SIZE;
- unsigned int nbytes = walk->nbytes;
- u64 *src = (u64 *)walk->src.virt.addr;
- u64 *dst = (u64 *)walk->dst.virt.addr;
- u64 ctrblk = be64_to_cpu(*(__be64 *)walk->iv);
- __be64 ctrblocks[4];
-
- /* Process four block batch */
- if (nbytes >= bsize * 4) {
- do {
- if (dst != src) {
- dst[0] = src[0];
- dst[1] = src[1];
- dst[2] = src[2];
- dst[3] = src[3];
- }
-
- /* create ctrblks for parallel encrypt */
- ctrblocks[0] = cpu_to_be64(ctrblk++);
- ctrblocks[1] = cpu_to_be64(ctrblk++);
- ctrblocks[2] = cpu_to_be64(ctrblk++);
- ctrblocks[3] = cpu_to_be64(ctrblk++);
-
- blowfish_enc_blk_xor_4way(ctx, (u8 *)dst,
- (u8 *)ctrblocks);
-
- src += 4;
- dst += 4;
- } while ((nbytes -= bsize * 4) >= bsize * 4);
-
- if (nbytes < bsize)
- goto done;
- }
-
- /* Handle leftovers */
- do {
- if (dst != src)
- *dst = *src;
-
- ctrblocks[0] = cpu_to_be64(ctrblk++);
-
- blowfish_enc_blk_xor(ctx, (u8 *)dst, (u8 *)ctrblocks);
-
- src += 1;
- dst += 1;
- } while ((nbytes -= bsize) >= bsize);
-
-done:
- *(__be64 *)walk->iv = cpu_to_be64(ctrblk);
- return nbytes;
-}
-
-static int ctr_crypt(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct bf_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct skcipher_walk walk;
- unsigned int nbytes;
- int err;
-
- err = skcipher_walk_virt(&walk, req, false);
-
- while ((nbytes = walk.nbytes) >= BF_BLOCK_SIZE) {
- nbytes = __ctr_crypt(ctx, &walk);
- err = skcipher_walk_done(&walk, nbytes);
- }
-
- if (nbytes) {
- ctr_crypt_final(ctx, &walk);
- err = skcipher_walk_done(&walk, 0);
- }
-
- return err;
-}
-
static struct crypto_alg bf_cipher_alg = {
.cra_name = "blowfish",
.cra_driver_name = "blowfish-asm",
@@ -384,20 +291,6 @@ static struct skcipher_alg bf_skcipher_algs[] = {
.setkey = blowfish_setkey_skcipher,
.encrypt = cbc_encrypt,
.decrypt = cbc_decrypt,
- }, {
- .base.cra_name = "ctr(blowfish)",
- .base.cra_driver_name = "ctr-blowfish-asm",
- .base.cra_priority = 300,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct bf_ctx),
- .base.cra_module = THIS_MODULE,
- .min_keysize = BF_MIN_KEY_SIZE,
- .max_keysize = BF_MAX_KEY_SIZE,
- .ivsize = BF_BLOCK_SIZE,
- .chunksize = BF_BLOCK_SIZE,
- .setkey = blowfish_setkey_skcipher,
- .encrypt = ctr_crypt,
- .decrypt = ctr_crypt,
},
};
diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
index ecc0a9a905c4..e2a0e0f4bf9d 100644
--- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S
+++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
@@ -17,7 +17,6 @@
#include <linux/linkage.h>
#include <asm/frame.h>
-#include <asm/nospec-branch.h>
#define CAMELLIA_TABLE_BYTE_LEN 272
@@ -589,14 +588,6 @@ SYM_FUNC_END(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
.long 0x80808080
.long 0x80808080
-/* For CTR-mode IV byteswap */
-.Lbswap128_mask:
- .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
-
-/* For XTS mode IV generation */
-.Lxts_gf128mul_and_shl1_mask:
- .byte 0x87, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0
-
/*
* pre-SubByte transform
*
@@ -998,292 +989,3 @@ SYM_FUNC_START(camellia_cbc_dec_16way)
FRAME_END
ret;
SYM_FUNC_END(camellia_cbc_dec_16way)
-
-#define inc_le128(x, minus_one, tmp) \
- vpcmpeqq minus_one, x, tmp; \
- vpsubq minus_one, x, x; \
- vpslldq $8, tmp, tmp; \
- vpsubq tmp, x, x;
-
-SYM_FUNC_START(camellia_ctr_16way)
- /* input:
- * %rdi: ctx, CTX
- * %rsi: dst (16 blocks)
- * %rdx: src (16 blocks)
- * %rcx: iv (little endian, 128bit)
- */
- FRAME_BEGIN
-
- subq $(16 * 16), %rsp;
- movq %rsp, %rax;
-
- vmovdqa .Lbswap128_mask, %xmm14;
-
- /* load IV and byteswap */
- vmovdqu (%rcx), %xmm0;
- vpshufb %xmm14, %xmm0, %xmm15;
- vmovdqu %xmm15, 15 * 16(%rax);
-
- vpcmpeqd %xmm15, %xmm15, %xmm15;
- vpsrldq $8, %xmm15, %xmm15; /* low: -1, high: 0 */
-
- /* construct IVs */
- inc_le128(%xmm0, %xmm15, %xmm13);
- vpshufb %xmm14, %xmm0, %xmm13;
- vmovdqu %xmm13, 14 * 16(%rax);
- inc_le128(%xmm0, %xmm15, %xmm13);
- vpshufb %xmm14, %xmm0, %xmm13;
- vmovdqu %xmm13, 13 * 16(%rax);
- inc_le128(%xmm0, %xmm15, %xmm13);
- vpshufb %xmm14, %xmm0, %xmm12;
- inc_le128(%xmm0, %xmm15, %xmm13);
- vpshufb %xmm14, %xmm0, %xmm11;
- inc_le128(%xmm0, %xmm15, %xmm13);
- vpshufb %xmm14, %xmm0, %xmm10;
- inc_le128(%xmm0, %xmm15, %xmm13);
- vpshufb %xmm14, %xmm0, %xmm9;
- inc_le128(%xmm0, %xmm15, %xmm13);
- vpshufb %xmm14, %xmm0, %xmm8;
- inc_le128(%xmm0, %xmm15, %xmm13);
- vpshufb %xmm14, %xmm0, %xmm7;
- inc_le128(%xmm0, %xmm15, %xmm13);
- vpshufb %xmm14, %xmm0, %xmm6;
- inc_le128(%xmm0, %xmm15, %xmm13);
- vpshufb %xmm14, %xmm0, %xmm5;
- inc_le128(%xmm0, %xmm15, %xmm13);
- vpshufb %xmm14, %xmm0, %xmm4;
- inc_le128(%xmm0, %xmm15, %xmm13);
- vpshufb %xmm14, %xmm0, %xmm3;
- inc_le128(%xmm0, %xmm15, %xmm13);
- vpshufb %xmm14, %xmm0, %xmm2;
- inc_le128(%xmm0, %xmm15, %xmm13);
- vpshufb %xmm14, %xmm0, %xmm1;
- inc_le128(%xmm0, %xmm15, %xmm13);
- vmovdqa %xmm0, %xmm13;
- vpshufb %xmm14, %xmm0, %xmm0;
- inc_le128(%xmm13, %xmm15, %xmm14);
- vmovdqu %xmm13, (%rcx);
-
- /* inpack16_pre: */
- vmovq (key_table)(CTX), %xmm15;
- vpshufb .Lpack_bswap, %xmm15, %xmm15;
- vpxor %xmm0, %xmm15, %xmm0;
- vpxor %xmm1, %xmm15, %xmm1;
- vpxor %xmm2, %xmm15, %xmm2;
- vpxor %xmm3, %xmm15, %xmm3;
- vpxor %xmm4, %xmm15, %xmm4;
- vpxor %xmm5, %xmm15, %xmm5;
- vpxor %xmm6, %xmm15, %xmm6;
- vpxor %xmm7, %xmm15, %xmm7;
- vpxor %xmm8, %xmm15, %xmm8;
- vpxor %xmm9, %xmm15, %xmm9;
- vpxor %xmm10, %xmm15, %xmm10;
- vpxor %xmm11, %xmm15, %xmm11;
- vpxor %xmm12, %xmm15, %xmm12;
- vpxor 13 * 16(%rax), %xmm15, %xmm13;
- vpxor 14 * 16(%rax), %xmm15, %xmm14;
- vpxor 15 * 16(%rax), %xmm15, %xmm15;
-
- call __camellia_enc_blk16;
-
- addq $(16 * 16), %rsp;
-
- vpxor 0 * 16(%rdx), %xmm7, %xmm7;
- vpxor 1 * 16(%rdx), %xmm6, %xmm6;
- vpxor 2 * 16(%rdx), %xmm5, %xmm5;
- vpxor 3 * 16(%rdx), %xmm4, %xmm4;
- vpxor 4 * 16(%rdx), %xmm3, %xmm3;
- vpxor 5 * 16(%rdx), %xmm2, %xmm2;
- vpxor 6 * 16(%rdx), %xmm1, %xmm1;
- vpxor 7 * 16(%rdx), %xmm0, %xmm0;
- vpxor 8 * 16(%rdx), %xmm15, %xmm15;
- vpxor 9 * 16(%rdx), %xmm14, %xmm14;
- vpxor 10 * 16(%rdx), %xmm13, %xmm13;
- vpxor 11 * 16(%rdx), %xmm12, %xmm12;
- vpxor 12 * 16(%rdx), %xmm11, %xmm11;
- vpxor 13 * 16(%rdx), %xmm10, %xmm10;
- vpxor 14 * 16(%rdx), %xmm9, %xmm9;
- vpxor 15 * 16(%rdx), %xmm8, %xmm8;
- write_output(%xmm7, %xmm6, %xmm5, %xmm4, %xmm3, %xmm2, %xmm1, %xmm0,
- %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
- %xmm8, %rsi);
-
- FRAME_END
- ret;
-SYM_FUNC_END(camellia_ctr_16way)
-
-#define gf128mul_x_ble(iv, mask, tmp) \
- vpsrad $31, iv, tmp; \
- vpaddq iv, iv, iv; \
- vpshufd $0x13, tmp, tmp; \
- vpand mask, tmp, tmp; \
- vpxor tmp, iv, iv;
-
-.align 8
-SYM_FUNC_START_LOCAL(camellia_xts_crypt_16way)
- /* input:
- * %rdi: ctx, CTX
- * %rsi: dst (16 blocks)
- * %rdx: src (16 blocks)
- * %rcx: iv (t ⊕ α⿠∈ GF(2¹²â¸))
- * %r8: index for input whitening key
- * %r9: pointer to __camellia_enc_blk16 or __camellia_dec_blk16
- */
- FRAME_BEGIN
-
- subq $(16 * 16), %rsp;
- movq %rsp, %rax;
-
- vmovdqa .Lxts_gf128mul_and_shl1_mask, %xmm14;
-
- /* load IV */
- vmovdqu (%rcx), %xmm0;
- vpxor 0 * 16(%rdx), %xmm0, %xmm15;
- vmovdqu %xmm15, 15 * 16(%rax);
- vmovdqu %xmm0, 0 * 16(%rsi);
-
- /* construct IVs */
- gf128mul_x_ble(%xmm0, %xmm14, %xmm15);
- vpxor 1 * 16(%rdx), %xmm0, %xmm15;
- vmovdqu %xmm15, 14 * 16(%rax);
- vmovdqu %xmm0, 1 * 16(%rsi);
-
- gf128mul_x_ble(%xmm0, %xmm14, %xmm15);
- vpxor 2 * 16(%rdx), %xmm0, %xmm13;
- vmovdqu %xmm0, 2 * 16(%rsi);
-
- gf128mul_x_ble(%xmm0, %xmm14, %xmm15);
- vpxor 3 * 16(%rdx), %xmm0, %xmm12;
- vmovdqu %xmm0, 3 * 16(%rsi);
-
- gf128mul_x_ble(%xmm0, %xmm14, %xmm15);
- vpxor 4 * 16(%rdx), %xmm0, %xmm11;
- vmovdqu %xmm0, 4 * 16(%rsi);
-
- gf128mul_x_ble(%xmm0, %xmm14, %xmm15);
- vpxor 5 * 16(%rdx), %xmm0, %xmm10;
- vmovdqu %xmm0, 5 * 16(%rsi);
-
- gf128mul_x_ble(%xmm0, %xmm14, %xmm15);
- vpxor 6 * 16(%rdx), %xmm0, %xmm9;
- vmovdqu %xmm0, 6 * 16(%rsi);
-
- gf128mul_x_ble(%xmm0, %xmm14, %xmm15);
- vpxor 7 * 16(%rdx), %xmm0, %xmm8;
- vmovdqu %xmm0, 7 * 16(%rsi);
-
- gf128mul_x_ble(%xmm0, %xmm14, %xmm15);
- vpxor 8 * 16(%rdx), %xmm0, %xmm7;
- vmovdqu %xmm0, 8 * 16(%rsi);
-
- gf128mul_x_ble(%xmm0, %xmm14, %xmm15);
- vpxor 9 * 16(%rdx), %xmm0, %xmm6;
- vmovdqu %xmm0, 9 * 16(%rsi);
-
- gf128mul_x_ble(%xmm0, %xmm14, %xmm15);
- vpxor 10 * 16(%rdx), %xmm0, %xmm5;
- vmovdqu %xmm0, 10 * 16(%rsi);
-
- gf128mul_x_ble(%xmm0, %xmm14, %xmm15);
- vpxor 11 * 16(%rdx), %xmm0, %xmm4;
- vmovdqu %xmm0, 11 * 16(%rsi);
-
- gf128mul_x_ble(%xmm0, %xmm14, %xmm15);
- vpxor 12 * 16(%rdx), %xmm0, %xmm3;
- vmovdqu %xmm0, 12 * 16(%rsi);
-
- gf128mul_x_ble(%xmm0, %xmm14, %xmm15);
- vpxor 13 * 16(%rdx), %xmm0, %xmm2;
- vmovdqu %xmm0, 13 * 16(%rsi);
-
- gf128mul_x_ble(%xmm0, %xmm14, %xmm15);
- vpxor 14 * 16(%rdx), %xmm0, %xmm1;
- vmovdqu %xmm0, 14 * 16(%rsi);
-
- gf128mul_x_ble(%xmm0, %xmm14, %xmm15);
- vpxor 15 * 16(%rdx), %xmm0, %xmm15;
- vmovdqu %xmm15, 0 * 16(%rax);
- vmovdqu %xmm0, 15 * 16(%rsi);
-
- gf128mul_x_ble(%xmm0, %xmm14, %xmm15);
- vmovdqu %xmm0, (%rcx);
-
- /* inpack16_pre: */
- vmovq (key_table)(CTX, %r8, 8), %xmm15;
- vpshufb .Lpack_bswap, %xmm15, %xmm15;
- vpxor 0 * 16(%rax), %xmm15, %xmm0;
- vpxor %xmm1, %xmm15, %xmm1;
- vpxor %xmm2, %xmm15, %xmm2;
- vpxor %xmm3, %xmm15, %xmm3;
- vpxor %xmm4, %xmm15, %xmm4;
- vpxor %xmm5, %xmm15, %xmm5;
- vpxor %xmm6, %xmm15, %xmm6;
- vpxor %xmm7, %xmm15, %xmm7;
- vpxor %xmm8, %xmm15, %xmm8;
- vpxor %xmm9, %xmm15, %xmm9;
- vpxor %xmm10, %xmm15, %xmm10;
- vpxor %xmm11, %xmm15, %xmm11;
- vpxor %xmm12, %xmm15, %xmm12;
- vpxor %xmm13, %xmm15, %xmm13;
- vpxor 14 * 16(%rax), %xmm15, %xmm14;
- vpxor 15 * 16(%rax), %xmm15, %xmm15;
-
- CALL_NOSPEC r9;
-
- addq $(16 * 16), %rsp;
-
- vpxor 0 * 16(%rsi), %xmm7, %xmm7;
- vpxor 1 * 16(%rsi), %xmm6, %xmm6;
- vpxor 2 * 16(%rsi), %xmm5, %xmm5;
- vpxor 3 * 16(%rsi), %xmm4, %xmm4;
- vpxor 4 * 16(%rsi), %xmm3, %xmm3;
- vpxor 5 * 16(%rsi), %xmm2, %xmm2;
- vpxor 6 * 16(%rsi), %xmm1, %xmm1;
- vpxor 7 * 16(%rsi), %xmm0, %xmm0;
- vpxor 8 * 16(%rsi), %xmm15, %xmm15;
- vpxor 9 * 16(%rsi), %xmm14, %xmm14;
- vpxor 10 * 16(%rsi), %xmm13, %xmm13;
- vpxor 11 * 16(%rsi), %xmm12, %xmm12;
- vpxor 12 * 16(%rsi), %xmm11, %xmm11;
- vpxor 13 * 16(%rsi), %xmm10, %xmm10;
- vpxor 14 * 16(%rsi), %xmm9, %xmm9;
- vpxor 15 * 16(%rsi), %xmm8, %xmm8;
- write_output(%xmm7, %xmm6, %xmm5, %xmm4, %xmm3, %xmm2, %xmm1, %xmm0,
- %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
- %xmm8, %rsi);
-
- FRAME_END
- ret;
-SYM_FUNC_END(camellia_xts_crypt_16way)
-
-SYM_FUNC_START(camellia_xts_enc_16way)
- /* input:
- * %rdi: ctx, CTX
- * %rsi: dst (16 blocks)
- * %rdx: src (16 blocks)
- * %rcx: iv (t ⊕ α⿠∈ GF(2¹²â¸))
- */
- xorl %r8d, %r8d; /* input whitening key, 0 for enc */
-
- leaq __camellia_enc_blk16, %r9;
-
- jmp camellia_xts_crypt_16way;
-SYM_FUNC_END(camellia_xts_enc_16way)
-
-SYM_FUNC_START(camellia_xts_dec_16way)
- /* input:
- * %rdi: ctx, CTX
- * %rsi: dst (16 blocks)
- * %rdx: src (16 blocks)
- * %rcx: iv (t ⊕ α⿠∈ GF(2¹²â¸))
- */
-
- cmpl $16, key_length(CTX);
- movl $32, %r8d;
- movl $24, %eax;
- cmovel %eax, %r8d; /* input whitening key, last for dec */
-
- leaq __camellia_dec_blk16, %r9;
-
- jmp camellia_xts_crypt_16way;
-SYM_FUNC_END(camellia_xts_dec_16way)
diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
index 0907243c501c..782e9712a1ec 100644
--- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
+++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
@@ -7,7 +7,6 @@
#include <linux/linkage.h>
#include <asm/frame.h>
-#include <asm/nospec-branch.h>
#define CAMELLIA_TABLE_BYTE_LEN 272
@@ -625,16 +624,6 @@ SYM_FUNC_END(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
.section .rodata.cst16, "aM", @progbits, 16
.align 16
-/* For CTR-mode IV byteswap */
-.Lbswap128_mask:
- .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
-
-/* For XTS mode */
-.Lxts_gf128mul_and_shl1_mask_0:
- .byte 0x87, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0
-.Lxts_gf128mul_and_shl1_mask_1:
- .byte 0x0e, 1, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0
-
/*
* pre-SubByte transform
*
@@ -1061,343 +1050,3 @@ SYM_FUNC_START(camellia_cbc_dec_32way)
FRAME_END
ret;
SYM_FUNC_END(camellia_cbc_dec_32way)
-
-#define inc_le128(x, minus_one, tmp) \
- vpcmpeqq minus_one, x, tmp; \
- vpsubq minus_one, x, x; \
- vpslldq $8, tmp, tmp; \
- vpsubq tmp, x, x;
-
-#define add2_le128(x, minus_one, minus_two, tmp1, tmp2) \
- vpcmpeqq minus_one, x, tmp1; \
- vpcmpeqq minus_two, x, tmp2; \
- vpsubq minus_two, x, x; \
- vpor tmp2, tmp1, tmp1; \
- vpslldq $8, tmp1, tmp1; \
- vpsubq tmp1, x, x;
-
-SYM_FUNC_START(camellia_ctr_32way)
- /* input:
- * %rdi: ctx, CTX
- * %rsi: dst (32 blocks)
- * %rdx: src (32 blocks)
- * %rcx: iv (little endian, 128bit)
- */
- FRAME_BEGIN
-
- vzeroupper;
-
- movq %rsp, %r10;
- cmpq %rsi, %rdx;
- je .Lctr_use_stack;
-
- /* dst can be used as temporary storage, src is not overwritten. */
- movq %rsi, %rax;
- jmp .Lctr_continue;
-
-.Lctr_use_stack:
- subq $(16 * 32), %rsp;
- movq %rsp, %rax;
-
-.Lctr_continue:
- vpcmpeqd %ymm15, %ymm15, %ymm15;
- vpsrldq $8, %ymm15, %ymm15; /* ab: -1:0 ; cd: -1:0 */
- vpaddq %ymm15, %ymm15, %ymm12; /* ab: -2:0 ; cd: -2:0 */
-
- /* load IV and byteswap */
- vmovdqu (%rcx), %xmm0;
- vmovdqa %xmm0, %xmm1;
- inc_le128(%xmm0, %xmm15, %xmm14);
- vbroadcasti128 .Lbswap128_mask, %ymm14;
- vinserti128 $1, %xmm0, %ymm1, %ymm0;
- vpshufb %ymm14, %ymm0, %ymm13;
- vmovdqu %ymm13, 15 * 32(%rax);
-
- /* construct IVs */
- add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13); /* ab:le2 ; cd:le3 */
- vpshufb %ymm14, %ymm0, %ymm13;
- vmovdqu %ymm13, 14 * 32(%rax);
- add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13);
- vpshufb %ymm14, %ymm0, %ymm13;
- vmovdqu %ymm13, 13 * 32(%rax);
- add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13);
- vpshufb %ymm14, %ymm0, %ymm13;
- vmovdqu %ymm13, 12 * 32(%rax);
- add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13);
- vpshufb %ymm14, %ymm0, %ymm13;
- vmovdqu %ymm13, 11 * 32(%rax);
- add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13);
- vpshufb %ymm14, %ymm0, %ymm10;
- add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13);
- vpshufb %ymm14, %ymm0, %ymm9;
- add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13);
- vpshufb %ymm14, %ymm0, %ymm8;
- add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13);
- vpshufb %ymm14, %ymm0, %ymm7;
- add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13);
- vpshufb %ymm14, %ymm0, %ymm6;
- add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13);
- vpshufb %ymm14, %ymm0, %ymm5;
- add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13);
- vpshufb %ymm14, %ymm0, %ymm4;
- add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13);
- vpshufb %ymm14, %ymm0, %ymm3;
- add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13);
- vpshufb %ymm14, %ymm0, %ymm2;
- add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13);
- vpshufb %ymm14, %ymm0, %ymm1;
- add2_le128(%ymm0, %ymm15, %ymm12, %ymm11, %ymm13);
- vextracti128 $1, %ymm0, %xmm13;
- vpshufb %ymm14, %ymm0, %ymm0;
- inc_le128(%xmm13, %xmm15, %xmm14);
- vmovdqu %xmm13, (%rcx);
-
- /* inpack32_pre: */
- vpbroadcastq (key_table)(CTX), %ymm15;
- vpshufb .Lpack_bswap, %ymm15, %ymm15;
- vpxor %ymm0, %ymm15, %ymm0;
- vpxor %ymm1, %ymm15, %ymm1;
- vpxor %ymm2, %ymm15, %ymm2;
- vpxor %ymm3, %ymm15, %ymm3;
- vpxor %ymm4, %ymm15, %ymm4;
- vpxor %ymm5, %ymm15, %ymm5;
- vpxor %ymm6, %ymm15, %ymm6;
- vpxor %ymm7, %ymm15, %ymm7;
- vpxor %ymm8, %ymm15, %ymm8;
- vpxor %ymm9, %ymm15, %ymm9;
- vpxor %ymm10, %ymm15, %ymm10;
- vpxor 11 * 32(%rax), %ymm15, %ymm11;
- vpxor 12 * 32(%rax), %ymm15, %ymm12;
- vpxor 13 * 32(%rax), %ymm15, %ymm13;
- vpxor 14 * 32(%rax), %ymm15, %ymm14;
- vpxor 15 * 32(%rax), %ymm15, %ymm15;
-
- call __camellia_enc_blk32;
-
- movq %r10, %rsp;
-
- vpxor 0 * 32(%rdx), %ymm7, %ymm7;
- vpxor 1 * 32(%rdx), %ymm6, %ymm6;
- vpxor 2 * 32(%rdx), %ymm5, %ymm5;
- vpxor 3 * 32(%rdx), %ymm4, %ymm4;
- vpxor 4 * 32(%rdx), %ymm3, %ymm3;
- vpxor 5 * 32(%rdx), %ymm2, %ymm2;
- vpxor 6 * 32(%rdx), %ymm1, %ymm1;
- vpxor 7 * 32(%rdx), %ymm0, %ymm0;
- vpxor 8 * 32(%rdx), %ymm15, %ymm15;
- vpxor 9 * 32(%rdx), %ymm14, %ymm14;
- vpxor 10 * 32(%rdx), %ymm13, %ymm13;
- vpxor 11 * 32(%rdx), %ymm12, %ymm12;
- vpxor 12 * 32(%rdx), %ymm11, %ymm11;
- vpxor 13 * 32(%rdx), %ymm10, %ymm10;
- vpxor 14 * 32(%rdx), %ymm9, %ymm9;
- vpxor 15 * 32(%rdx), %ymm8, %ymm8;
- write_output(%ymm7, %ymm6, %ymm5, %ymm4, %ymm3, %ymm2, %ymm1, %ymm0,
- %ymm15, %ymm14, %ymm13, %ymm12, %ymm11, %ymm10, %ymm9,
- %ymm8, %rsi);
-
- vzeroupper;
-
- FRAME_END
- ret;
-SYM_FUNC_END(camellia_ctr_32way)
-
-#define gf128mul_x_ble(iv, mask, tmp) \
- vpsrad $31, iv, tmp; \
- vpaddq iv, iv, iv; \
- vpshufd $0x13, tmp, tmp; \
- vpand mask, tmp, tmp; \
- vpxor tmp, iv, iv;
-
-#define gf128mul_x2_ble(iv, mask1, mask2, tmp0, tmp1) \
- vpsrad $31, iv, tmp0; \
- vpaddq iv, iv, tmp1; \
- vpsllq $2, iv, iv; \
- vpshufd $0x13, tmp0, tmp0; \
- vpsrad $31, tmp1, tmp1; \
- vpand mask2, tmp0, tmp0; \
- vpshufd $0x13, tmp1, tmp1; \
- vpxor tmp0, iv, iv; \
- vpand mask1, tmp1, tmp1; \
- vpxor tmp1, iv, iv;
-
-.align 8
-SYM_FUNC_START_LOCAL(camellia_xts_crypt_32way)
- /* input:
- * %rdi: ctx, CTX
- * %rsi: dst (32 blocks)
- * %rdx: src (32 blocks)
- * %rcx: iv (t ⊕ α⿠∈ GF(2¹²â¸))
- * %r8: index for input whitening key
- * %r9: pointer to __camellia_enc_blk32 or __camellia_dec_blk32
- */
- FRAME_BEGIN
-
- vzeroupper;
-
- subq $(16 * 32), %rsp;
- movq %rsp, %rax;
-
- vbroadcasti128 .Lxts_gf128mul_and_shl1_mask_0, %ymm12;
-
- /* load IV and construct second IV */
- vmovdqu (%rcx), %xmm0;
- vmovdqa %xmm0, %xmm15;
- gf128mul_x_ble(%xmm0, %xmm12, %xmm13);
- vbroadcasti128 .Lxts_gf128mul_and_shl1_mask_1, %ymm13;
- vinserti128 $1, %xmm0, %ymm15, %ymm0;
- vpxor 0 * 32(%rdx), %ymm0, %ymm15;
- vmovdqu %ymm15, 15 * 32(%rax);
- vmovdqu %ymm0, 0 * 32(%rsi);
-
- /* construct IVs */
- gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15);
- vpxor 1 * 32(%rdx), %ymm0, %ymm15;
- vmovdqu %ymm15, 14 * 32(%rax);
- vmovdqu %ymm0, 1 * 32(%rsi);
-
- gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15);
- vpxor 2 * 32(%rdx), %ymm0, %ymm15;
- vmovdqu %ymm15, 13 * 32(%rax);
- vmovdqu %ymm0, 2 * 32(%rsi);
-
- gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15);
- vpxor 3 * 32(%rdx), %ymm0, %ymm15;
- vmovdqu %ymm15, 12 * 32(%rax);
- vmovdqu %ymm0, 3 * 32(%rsi);
-
- gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15);
- vpxor 4 * 32(%rdx), %ymm0, %ymm11;
- vmovdqu %ymm0, 4 * 32(%rsi);
-
- gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15);
- vpxor 5 * 32(%rdx), %ymm0, %ymm10;
- vmovdqu %ymm0, 5 * 32(%rsi);
-
- gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15);
- vpxor 6 * 32(%rdx), %ymm0, %ymm9;
- vmovdqu %ymm0, 6 * 32(%rsi);
-
- gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15);
- vpxor 7 * 32(%rdx), %ymm0, %ymm8;
- vmovdqu %ymm0, 7 * 32(%rsi);
-
- gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15);
- vpxor 8 * 32(%rdx), %ymm0, %ymm7;
- vmovdqu %ymm0, 8 * 32(%rsi);
-
- gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15);
- vpxor 9 * 32(%rdx), %ymm0, %ymm6;
- vmovdqu %ymm0, 9 * 32(%rsi);
-
- gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15);
- vpxor 10 * 32(%rdx), %ymm0, %ymm5;
- vmovdqu %ymm0, 10 * 32(%rsi);
-
- gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15);
- vpxor 11 * 32(%rdx), %ymm0, %ymm4;
- vmovdqu %ymm0, 11 * 32(%rsi);
-
- gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15);
- vpxor 12 * 32(%rdx), %ymm0, %ymm3;
- vmovdqu %ymm0, 12 * 32(%rsi);
-
- gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15);
- vpxor 13 * 32(%rdx), %ymm0, %ymm2;
- vmovdqu %ymm0, 13 * 32(%rsi);
-
- gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15);
- vpxor 14 * 32(%rdx), %ymm0, %ymm1;
- vmovdqu %ymm0, 14 * 32(%rsi);
-
- gf128mul_x2_ble(%ymm0, %ymm12, %ymm13, %ymm14, %ymm15);
- vpxor 15 * 32(%rdx), %ymm0, %ymm15;
- vmovdqu %ymm15, 0 * 32(%rax);
- vmovdqu %ymm0, 15 * 32(%rsi);
-
- vextracti128 $1, %ymm0, %xmm0;
- gf128mul_x_ble(%xmm0, %xmm12, %xmm15);
- vmovdqu %xmm0, (%rcx);
-
- /* inpack32_pre: */
- vpbroadcastq (key_table)(CTX, %r8, 8), %ymm15;
- vpshufb .Lpack_bswap, %ymm15, %ymm15;
- vpxor 0 * 32(%rax), %ymm15, %ymm0;
- vpxor %ymm1, %ymm15, %ymm1;
- vpxor %ymm2, %ymm15, %ymm2;
- vpxor %ymm3, %ymm15, %ymm3;
- vpxor %ymm4, %ymm15, %ymm4;
- vpxor %ymm5, %ymm15, %ymm5;
- vpxor %ymm6, %ymm15, %ymm6;
- vpxor %ymm7, %ymm15, %ymm7;
- vpxor %ymm8, %ymm15, %ymm8;
- vpxor %ymm9, %ymm15, %ymm9;
- vpxor %ymm10, %ymm15, %ymm10;
- vpxor %ymm11, %ymm15, %ymm11;
- vpxor 12 * 32(%rax), %ymm15, %ymm12;
- vpxor 13 * 32(%rax), %ymm15, %ymm13;
- vpxor 14 * 32(%rax), %ymm15, %ymm14;
- vpxor 15 * 32(%rax), %ymm15, %ymm15;
-
- CALL_NOSPEC r9;
-
- addq $(16 * 32), %rsp;
-
- vpxor 0 * 32(%rsi), %ymm7, %ymm7;
- vpxor 1 * 32(%rsi), %ymm6, %ymm6;
- vpxor 2 * 32(%rsi), %ymm5, %ymm5;
- vpxor 3 * 32(%rsi), %ymm4, %ymm4;
- vpxor 4 * 32(%rsi), %ymm3, %ymm3;
- vpxor 5 * 32(%rsi), %ymm2, %ymm2;
- vpxor 6 * 32(%rsi), %ymm1, %ymm1;
- vpxor 7 * 32(%rsi), %ymm0, %ymm0;
- vpxor 8 * 32(%rsi), %ymm15, %ymm15;
- vpxor 9 * 32(%rsi), %ymm14, %ymm14;
- vpxor 10 * 32(%rsi), %ymm13, %ymm13;
- vpxor 11 * 32(%rsi), %ymm12, %ymm12;
- vpxor 12 * 32(%rsi), %ymm11, %ymm11;
- vpxor 13 * 32(%rsi), %ymm10, %ymm10;
- vpxor 14 * 32(%rsi), %ymm9, %ymm9;
- vpxor 15 * 32(%rsi), %ymm8, %ymm8;
- write_output(%ymm7, %ymm6, %ymm5, %ymm4, %ymm3, %ymm2, %ymm1, %ymm0,
- %ymm15, %ymm14, %ymm13, %ymm12, %ymm11, %ymm10, %ymm9,
- %ymm8, %rsi);
-
- vzeroupper;
-
- FRAME_END
- ret;
-SYM_FUNC_END(camellia_xts_crypt_32way)
-
-SYM_FUNC_START(camellia_xts_enc_32way)
- /* input:
- * %rdi: ctx, CTX
- * %rsi: dst (32 blocks)
- * %rdx: src (32 blocks)
- * %rcx: iv (t ⊕ α⿠∈ GF(2¹²â¸))
- */
-
- xorl %r8d, %r8d; /* input whitening key, 0 for enc */
-
- leaq __camellia_enc_blk32, %r9;
-
- jmp camellia_xts_crypt_32way;
-SYM_FUNC_END(camellia_xts_enc_32way)
-
-SYM_FUNC_START(camellia_xts_dec_32way)
- /* input:
- * %rdi: ctx, CTX
- * %rsi: dst (32 blocks)
- * %rdx: src (32 blocks)
- * %rcx: iv (t ⊕ α⿠∈ GF(2¹²â¸))
- */
-
- cmpl $16, key_length(CTX);
- movl $32, %r8d;
- movl $24, %eax;
- cmovel %eax, %r8d; /* input whitening key, last for dec */
-
- leaq __camellia_dec_blk32, %r9;
-
- jmp camellia_xts_crypt_32way;
-SYM_FUNC_END(camellia_xts_dec_32way)
diff --git a/arch/x86/include/asm/crypto/camellia.h b/arch/x86/crypto/camellia.h
index f6d91861cb14..1dcea79e8f8e 100644
--- a/arch/x86/include/asm/crypto/camellia.h
+++ b/arch/x86/crypto/camellia.h
@@ -19,18 +19,10 @@ struct camellia_ctx {
u32 key_length;
};
-struct camellia_xts_ctx {
- struct camellia_ctx tweak_ctx;
- struct camellia_ctx crypt_ctx;
-};
-
extern int __camellia_setkey(struct camellia_ctx *cctx,
const unsigned char *key,
unsigned int key_len);
-extern int xts_camellia_setkey(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int keylen);
-
/* regular block cipher functions */
asmlinkage void __camellia_enc_blk(const void *ctx, u8 *dst, const u8 *src,
bool xor);
@@ -46,13 +38,6 @@ asmlinkage void camellia_ecb_enc_16way(const void *ctx, u8 *dst, const u8 *src);
asmlinkage void camellia_ecb_dec_16way(const void *ctx, u8 *dst, const u8 *src);
asmlinkage void camellia_cbc_dec_16way(const void *ctx, u8 *dst, const u8 *src);
-asmlinkage void camellia_ctr_16way(const void *ctx, u8 *dst, const u8 *src,
- le128 *iv);
-
-asmlinkage void camellia_xts_enc_16way(const void *ctx, u8 *dst, const u8 *src,
- le128 *iv);
-asmlinkage void camellia_xts_dec_16way(const void *ctx, u8 *dst, const u8 *src,
- le128 *iv);
static inline void camellia_enc_blk(const void *ctx, u8 *dst, const u8 *src)
{
@@ -78,14 +63,5 @@ static inline void camellia_enc_blk_xor_2way(const void *ctx, u8 *dst,
/* glue helpers */
extern void camellia_decrypt_cbc_2way(const void *ctx, u8 *dst, const u8 *src);
-extern void camellia_crypt_ctr(const void *ctx, u8 *dst, const u8 *src,
- le128 *iv);
-extern void camellia_crypt_ctr_2way(const void *ctx, u8 *dst, const u8 *src,
- le128 *iv);
-
-extern void camellia_xts_enc(const void *ctx, u8 *dst, const u8 *src,
- le128 *iv);
-extern void camellia_xts_dec(const void *ctx, u8 *dst, const u8 *src,
- le128 *iv);
#endif /* ASM_X86_CAMELLIA_H */
diff --git a/arch/x86/crypto/camellia_aesni_avx2_glue.c b/arch/x86/crypto/camellia_aesni_avx2_glue.c
index ccda647422d6..e7e4d64e9577 100644
--- a/arch/x86/crypto/camellia_aesni_avx2_glue.c
+++ b/arch/x86/crypto/camellia_aesni_avx2_glue.c
@@ -5,16 +5,16 @@
* Copyright © 2013 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
*/
-#include <asm/crypto/camellia.h>
-#include <asm/crypto/glue_helper.h>
#include <crypto/algapi.h>
#include <crypto/internal/simd.h>
-#include <crypto/xts.h>
#include <linux/crypto.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/types.h>
+#include "camellia.h"
+#include "ecb_cbc_helpers.h"
+
#define CAMELLIA_AESNI_PARALLEL_BLOCKS 16
#define CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS 32
@@ -23,121 +23,6 @@ asmlinkage void camellia_ecb_enc_32way(const void *ctx, u8 *dst, const u8 *src);
asmlinkage void camellia_ecb_dec_32way(const void *ctx, u8 *dst, const u8 *src);
asmlinkage void camellia_cbc_dec_32way(const void *ctx, u8 *dst, const u8 *src);
-asmlinkage void camellia_ctr_32way(const void *ctx, u8 *dst, const u8 *src,
- le128 *iv);
-
-asmlinkage void camellia_xts_enc_32way(const void *ctx, u8 *dst, const u8 *src,
- le128 *iv);
-asmlinkage void camellia_xts_dec_32way(const void *ctx, u8 *dst, const u8 *src,
- le128 *iv);
-
-static const struct common_glue_ctx camellia_enc = {
- .num_funcs = 4,
- .fpu_blocks_limit = CAMELLIA_AESNI_PARALLEL_BLOCKS,
-
- .funcs = { {
- .num_blocks = CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS,
- .fn_u = { .ecb = camellia_ecb_enc_32way }
- }, {
- .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS,
- .fn_u = { .ecb = camellia_ecb_enc_16way }
- }, {
- .num_blocks = 2,
- .fn_u = { .ecb = camellia_enc_blk_2way }
- }, {
- .num_blocks = 1,
- .fn_u = { .ecb = camellia_enc_blk }
- } }
-};
-
-static const struct common_glue_ctx camellia_ctr = {
- .num_funcs = 4,
- .fpu_blocks_limit = CAMELLIA_AESNI_PARALLEL_BLOCKS,
-
- .funcs = { {
- .num_blocks = CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS,
- .fn_u = { .ctr = camellia_ctr_32way }
- }, {
- .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS,
- .fn_u = { .ctr = camellia_ctr_16way }
- }, {
- .num_blocks = 2,
- .fn_u = { .ctr = camellia_crypt_ctr_2way }
- }, {
- .num_blocks = 1,
- .fn_u = { .ctr = camellia_crypt_ctr }
- } }
-};
-
-static const struct common_glue_ctx camellia_enc_xts = {
- .num_funcs = 3,
- .fpu_blocks_limit = CAMELLIA_AESNI_PARALLEL_BLOCKS,
-
- .funcs = { {
- .num_blocks = CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS,
- .fn_u = { .xts = camellia_xts_enc_32way }
- }, {
- .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS,
- .fn_u = { .xts = camellia_xts_enc_16way }
- }, {
- .num_blocks = 1,
- .fn_u = { .xts = camellia_xts_enc }
- } }
-};
-
-static const struct common_glue_ctx camellia_dec = {
- .num_funcs = 4,
- .fpu_blocks_limit = CAMELLIA_AESNI_PARALLEL_BLOCKS,
-
- .funcs = { {
- .num_blocks = CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS,
- .fn_u = { .ecb = camellia_ecb_dec_32way }
- }, {
- .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS,
- .fn_u = { .ecb = camellia_ecb_dec_16way }
- }, {
- .num_blocks = 2,
- .fn_u = { .ecb = camellia_dec_blk_2way }
- }, {
- .num_blocks = 1,
- .fn_u = { .ecb = camellia_dec_blk }
- } }
-};
-
-static const struct common_glue_ctx camellia_dec_cbc = {
- .num_funcs = 4,
- .fpu_blocks_limit = CAMELLIA_AESNI_PARALLEL_BLOCKS,
-
- .funcs = { {
- .num_blocks = CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS,
- .fn_u = { .cbc = camellia_cbc_dec_32way }
- }, {
- .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS,
- .fn_u = { .cbc = camellia_cbc_dec_16way }
- }, {
- .num_blocks = 2,
- .fn_u = { .cbc = camellia_decrypt_cbc_2way }
- }, {
- .num_blocks = 1,
- .fn_u = { .cbc = camellia_dec_blk }
- } }
-};
-
-static const struct common_glue_ctx camellia_dec_xts = {
- .num_funcs = 3,
- .fpu_blocks_limit = CAMELLIA_AESNI_PARALLEL_BLOCKS,
-
- .funcs = { {
- .num_blocks = CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS,
- .fn_u = { .xts = camellia_xts_dec_32way }
- }, {
- .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS,
- .fn_u = { .xts = camellia_xts_dec_16way }
- }, {
- .num_blocks = 1,
- .fn_u = { .xts = camellia_xts_dec }
- } }
-};
static int camellia_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
@@ -147,45 +32,39 @@ static int camellia_setkey(struct crypto_skcipher *tfm, const u8 *key,
static int ecb_encrypt(struct skcipher_request *req)
{
- return glue_ecb_req_128bit(&camellia_enc, req);
+ ECB_WALK_START(req, CAMELLIA_BLOCK_SIZE, CAMELLIA_AESNI_PARALLEL_BLOCKS);
+ ECB_BLOCK(CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS, camellia_ecb_enc_32way);
+ ECB_BLOCK(CAMELLIA_AESNI_PARALLEL_BLOCKS, camellia_ecb_enc_16way);
+ ECB_BLOCK(2, camellia_enc_blk_2way);
+ ECB_BLOCK(1, camellia_enc_blk);
+ ECB_WALK_END();
}
static int ecb_decrypt(struct skcipher_request *req)
{
- return glue_ecb_req_128bit(&camellia_dec, req);
+ ECB_WALK_START(req, CAMELLIA_BLOCK_SIZE, CAMELLIA_AESNI_PARALLEL_BLOCKS);
+ ECB_BLOCK(CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS, camellia_ecb_dec_32way);
+ ECB_BLOCK(CAMELLIA_AESNI_PARALLEL_BLOCKS, camellia_ecb_dec_16way);
+ ECB_BLOCK(2, camellia_dec_blk_2way);
+ ECB_BLOCK(1, camellia_dec_blk);
+ ECB_WALK_END();
}
static int cbc_encrypt(struct skcipher_request *req)
{
- return glue_cbc_encrypt_req_128bit(camellia_enc_blk, req);
+ CBC_WALK_START(req, CAMELLIA_BLOCK_SIZE, -1);
+ CBC_ENC_BLOCK(camellia_enc_blk);
+ CBC_WALK_END();
}
static int cbc_decrypt(struct skcipher_request *req)
{
- return glue_cbc_decrypt_req_128bit(&camellia_dec_cbc, req);
-}
-
-static int ctr_crypt(struct skcipher_request *req)
-{
- return glue_ctr_req_128bit(&camellia_ctr, req);
-}
-
-static int xts_encrypt(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- return glue_xts_req_128bit(&camellia_enc_xts, req, camellia_enc_blk,
- &ctx->tweak_ctx, &ctx->crypt_ctx, false);
-}
-
-static int xts_decrypt(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- return glue_xts_req_128bit(&camellia_dec_xts, req, camellia_enc_blk,
- &ctx->tweak_ctx, &ctx->crypt_ctx, true);
+ CBC_WALK_START(req, CAMELLIA_BLOCK_SIZE, CAMELLIA_AESNI_PARALLEL_BLOCKS);
+ CBC_DEC_BLOCK(CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS, camellia_cbc_dec_32way);
+ CBC_DEC_BLOCK(CAMELLIA_AESNI_PARALLEL_BLOCKS, camellia_cbc_dec_16way);
+ CBC_DEC_BLOCK(2, camellia_decrypt_cbc_2way);
+ CBC_DEC_BLOCK(1, camellia_dec_blk);
+ CBC_WALK_END();
}
static struct skcipher_alg camellia_algs[] = {
@@ -216,35 +95,6 @@ static struct skcipher_alg camellia_algs[] = {
.setkey = camellia_setkey,
.encrypt = cbc_encrypt,
.decrypt = cbc_decrypt,
- }, {
- .base.cra_name = "__ctr(camellia)",
- .base.cra_driver_name = "__ctr-camellia-aesni-avx2",
- .base.cra_priority = 500,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct camellia_ctx),
- .base.cra_module = THIS_MODULE,
- .min_keysize = CAMELLIA_MIN_KEY_SIZE,
- .max_keysize = CAMELLIA_MAX_KEY_SIZE,
- .ivsize = CAMELLIA_BLOCK_SIZE,
- .chunksize = CAMELLIA_BLOCK_SIZE,
- .setkey = camellia_setkey,
- .encrypt = ctr_crypt,
- .decrypt = ctr_crypt,
- }, {
- .base.cra_name = "__xts(camellia)",
- .base.cra_driver_name = "__xts-camellia-aesni-avx2",
- .base.cra_priority = 500,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
- .base.cra_blocksize = CAMELLIA_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct camellia_xts_ctx),
- .base.cra_module = THIS_MODULE,
- .min_keysize = 2 * CAMELLIA_MIN_KEY_SIZE,
- .max_keysize = 2 * CAMELLIA_MAX_KEY_SIZE,
- .ivsize = CAMELLIA_BLOCK_SIZE,
- .setkey = xts_camellia_setkey,
- .encrypt = xts_encrypt,
- .decrypt = xts_decrypt,
},
};
diff --git a/arch/x86/crypto/camellia_aesni_avx_glue.c b/arch/x86/crypto/camellia_aesni_avx_glue.c
index 4e5de6ef206e..c7ccf63e741e 100644
--- a/arch/x86/crypto/camellia_aesni_avx_glue.c
+++ b/arch/x86/crypto/camellia_aesni_avx_glue.c
@@ -5,16 +5,16 @@
* Copyright © 2012-2013 Jussi Kivilinna <jussi.kivilinna@iki.fi>
*/
-#include <asm/crypto/camellia.h>
-#include <asm/crypto/glue_helper.h>
#include <crypto/algapi.h>
#include <crypto/internal/simd.h>
-#include <crypto/xts.h>
#include <linux/crypto.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/types.h>
+#include "camellia.h"
+#include "ecb_cbc_helpers.h"
+
#define CAMELLIA_AESNI_PARALLEL_BLOCKS 16
/* 16-way parallel cipher functions (avx/aes-ni) */
@@ -27,120 +27,6 @@ EXPORT_SYMBOL_GPL(camellia_ecb_dec_16way);
asmlinkage void camellia_cbc_dec_16way(const void *ctx, u8 *dst, const u8 *src);
EXPORT_SYMBOL_GPL(camellia_cbc_dec_16way);
-asmlinkage void camellia_ctr_16way(const void *ctx, u8 *dst, const u8 *src,
- le128 *iv);
-EXPORT_SYMBOL_GPL(camellia_ctr_16way);
-
-asmlinkage void camellia_xts_enc_16way(const void *ctx, u8 *dst, const u8 *src,
- le128 *iv);
-EXPORT_SYMBOL_GPL(camellia_xts_enc_16way);
-
-asmlinkage void camellia_xts_dec_16way(const void *ctx, u8 *dst, const u8 *src,
- le128 *iv);
-EXPORT_SYMBOL_GPL(camellia_xts_dec_16way);
-
-void camellia_xts_enc(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
-{
- glue_xts_crypt_128bit_one(ctx, dst, src, iv, camellia_enc_blk);
-}
-EXPORT_SYMBOL_GPL(camellia_xts_enc);
-
-void camellia_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
-{
- glue_xts_crypt_128bit_one(ctx, dst, src, iv, camellia_dec_blk);
-}
-EXPORT_SYMBOL_GPL(camellia_xts_dec);
-
-static const struct common_glue_ctx camellia_enc = {
- .num_funcs = 3,
- .fpu_blocks_limit = CAMELLIA_AESNI_PARALLEL_BLOCKS,
-
- .funcs = { {
- .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS,
- .fn_u = { .ecb = camellia_ecb_enc_16way }
- }, {
- .num_blocks = 2,
- .fn_u = { .ecb = camellia_enc_blk_2way }
- }, {
- .num_blocks = 1,
- .fn_u = { .ecb = camellia_enc_blk }
- } }
-};
-
-static const struct common_glue_ctx camellia_ctr = {
- .num_funcs = 3,
- .fpu_blocks_limit = CAMELLIA_AESNI_PARALLEL_BLOCKS,
-
- .funcs = { {
- .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS,
- .fn_u = { .ctr = camellia_ctr_16way }
- }, {
- .num_blocks = 2,
- .fn_u = { .ctr = camellia_crypt_ctr_2way }
- }, {
- .num_blocks = 1,
- .fn_u = { .ctr = camellia_crypt_ctr }
- } }
-};
-
-static const struct common_glue_ctx camellia_enc_xts = {
- .num_funcs = 2,
- .fpu_blocks_limit = CAMELLIA_AESNI_PARALLEL_BLOCKS,
-
- .funcs = { {
- .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS,
- .fn_u = { .xts = camellia_xts_enc_16way }
- }, {
- .num_blocks = 1,
- .fn_u = { .xts = camellia_xts_enc }
- } }
-};
-
-static const struct common_glue_ctx camellia_dec = {
- .num_funcs = 3,
- .fpu_blocks_limit = CAMELLIA_AESNI_PARALLEL_BLOCKS,
-
- .funcs = { {
- .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS,
- .fn_u = { .ecb = camellia_ecb_dec_16way }
- }, {
- .num_blocks = 2,
- .fn_u = { .ecb = camellia_dec_blk_2way }
- }, {
- .num_blocks = 1,
- .fn_u = { .ecb = camellia_dec_blk }
- } }
-};
-
-static const struct common_glue_ctx camellia_dec_cbc = {
- .num_funcs = 3,
- .fpu_blocks_limit = CAMELLIA_AESNI_PARALLEL_BLOCKS,
-
- .funcs = { {
- .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS,
- .fn_u = { .cbc = camellia_cbc_dec_16way }
- }, {
- .num_blocks = 2,
- .fn_u = { .cbc = camellia_decrypt_cbc_2way }
- }, {
- .num_blocks = 1,
- .fn_u = { .cbc = camellia_dec_blk }
- } }
-};
-
-static const struct common_glue_ctx camellia_dec_xts = {
- .num_funcs = 2,
- .fpu_blocks_limit = CAMELLIA_AESNI_PARALLEL_BLOCKS,
-
- .funcs = { {
- .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS,
- .fn_u = { .xts = camellia_xts_dec_16way }
- }, {
- .num_blocks = 1,
- .fn_u = { .xts = camellia_xts_dec }
- } }
-};
-
static int camellia_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
@@ -149,65 +35,36 @@ static int camellia_setkey(struct crypto_skcipher *tfm, const u8 *key,
static int ecb_encrypt(struct skcipher_request *req)
{
- return glue_ecb_req_128bit(&camellia_enc, req);
+ ECB_WALK_START(req, CAMELLIA_BLOCK_SIZE, CAMELLIA_AESNI_PARALLEL_BLOCKS);
+ ECB_BLOCK(CAMELLIA_AESNI_PARALLEL_BLOCKS, camellia_ecb_enc_16way);
+ ECB_BLOCK(2, camellia_enc_blk_2way);
+ ECB_BLOCK(1, camellia_enc_blk);
+ ECB_WALK_END();
}
static int ecb_decrypt(struct skcipher_request *req)
{
- return glue_ecb_req_128bit(&camellia_dec, req);
+ ECB_WALK_START(req, CAMELLIA_BLOCK_SIZE, CAMELLIA_AESNI_PARALLEL_BLOCKS);
+ ECB_BLOCK(CAMELLIA_AESNI_PARALLEL_BLOCKS, camellia_ecb_dec_16way);
+ ECB_BLOCK(2, camellia_dec_blk_2way);
+ ECB_BLOCK(1, camellia_dec_blk);
+ ECB_WALK_END();
}
static int cbc_encrypt(struct skcipher_request *req)
{
- return glue_cbc_encrypt_req_128bit(camellia_enc_blk, req);
+ CBC_WALK_START(req, CAMELLIA_BLOCK_SIZE, -1);
+ CBC_ENC_BLOCK(camellia_enc_blk);
+ CBC_WALK_END();
}
static int cbc_decrypt(struct skcipher_request *req)
{
- return glue_cbc_decrypt_req_128bit(&camellia_dec_cbc, req);
-}
-
-static int ctr_crypt(struct skcipher_request *req)
-{
- return glue_ctr_req_128bit(&camellia_ctr, req);
-}
-
-int xts_camellia_setkey(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
- int err;
-
- err = xts_verify_key(tfm, key, keylen);
- if (err)
- return err;
-
- /* first half of xts-key is for crypt */
- err = __camellia_setkey(&ctx->crypt_ctx, key, keylen / 2);
- if (err)
- return err;
-
- /* second half of xts-key is for tweak */
- return __camellia_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2);
-}
-EXPORT_SYMBOL_GPL(xts_camellia_setkey);
-
-static int xts_encrypt(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- return glue_xts_req_128bit(&camellia_enc_xts, req, camellia_enc_blk,
- &ctx->tweak_ctx, &ctx->crypt_ctx, false);
-}
-
-static int xts_decrypt(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- return glue_xts_req_128bit(&camellia_dec_xts, req, camellia_enc_blk,
- &ctx->tweak_ctx, &ctx->crypt_ctx, true);
+ CBC_WALK_START(req, CAMELLIA_BLOCK_SIZE, CAMELLIA_AESNI_PARALLEL_BLOCKS);
+ CBC_DEC_BLOCK(CAMELLIA_AESNI_PARALLEL_BLOCKS, camellia_cbc_dec_16way);
+ CBC_DEC_BLOCK(2, camellia_decrypt_cbc_2way);
+ CBC_DEC_BLOCK(1, camellia_dec_blk);
+ CBC_WALK_END();
}
static struct skcipher_alg camellia_algs[] = {
@@ -238,36 +95,7 @@ static struct skcipher_alg camellia_algs[] = {
.setkey = camellia_setkey,
.encrypt = cbc_encrypt,
.decrypt = cbc_decrypt,
- }, {
- .base.cra_name = "__ctr(camellia)",
- .base.cra_driver_name = "__ctr-camellia-aesni",
- .base.cra_priority = 400,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct camellia_ctx),
- .base.cra_module = THIS_MODULE,
- .min_keysize = CAMELLIA_MIN_KEY_SIZE,
- .max_keysize = CAMELLIA_MAX_KEY_SIZE,
- .ivsize = CAMELLIA_BLOCK_SIZE,
- .chunksize = CAMELLIA_BLOCK_SIZE,
- .setkey = camellia_setkey,
- .encrypt = ctr_crypt,
- .decrypt = ctr_crypt,
- }, {
- .base.cra_name = "__xts(camellia)",
- .base.cra_driver_name = "__xts-camellia-aesni",
- .base.cra_priority = 400,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
- .base.cra_blocksize = CAMELLIA_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct camellia_xts_ctx),
- .base.cra_module = THIS_MODULE,
- .min_keysize = 2 * CAMELLIA_MIN_KEY_SIZE,
- .max_keysize = 2 * CAMELLIA_MAX_KEY_SIZE,
- .ivsize = CAMELLIA_BLOCK_SIZE,
- .setkey = xts_camellia_setkey,
- .encrypt = xts_encrypt,
- .decrypt = xts_decrypt,
- },
+ }
};
static struct simd_skcipher_alg *camellia_simd_algs[ARRAY_SIZE(camellia_algs)];
diff --git a/arch/x86/crypto/camellia_glue.c b/arch/x86/crypto/camellia_glue.c
index 242c056e5fa8..66c435ba9d3d 100644
--- a/arch/x86/crypto/camellia_glue.c
+++ b/arch/x86/crypto/camellia_glue.c
@@ -14,8 +14,9 @@
#include <linux/module.h>
#include <linux/types.h>
#include <crypto/algapi.h>
-#include <asm/crypto/camellia.h>
-#include <asm/crypto/glue_helper.h>
+
+#include "camellia.h"
+#include "ecb_cbc_helpers.h"
/* regular block cipher functions */
asmlinkage void __camellia_enc_blk(const void *ctx, u8 *dst, const u8 *src,
@@ -1262,129 +1263,47 @@ static int camellia_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key,
return camellia_setkey(&tfm->base, key, key_len);
}
-void camellia_decrypt_cbc_2way(const void *ctx, u8 *d, const u8 *s)
+void camellia_decrypt_cbc_2way(const void *ctx, u8 *dst, const u8 *src)
{
- u128 *dst = (u128 *)d;
- const u128 *src = (const u128 *)s;
- u128 iv = *src;
-
- camellia_dec_blk_2way(ctx, (u8 *)dst, (u8 *)src);
+ u8 buf[CAMELLIA_BLOCK_SIZE];
+ const u8 *iv = src;
- u128_xor(&dst[1], &dst[1], &iv);
+ if (dst == src)
+ iv = memcpy(buf, iv, sizeof(buf));
+ camellia_dec_blk_2way(ctx, dst, src);
+ crypto_xor(dst + CAMELLIA_BLOCK_SIZE, iv, CAMELLIA_BLOCK_SIZE);
}
EXPORT_SYMBOL_GPL(camellia_decrypt_cbc_2way);
-void camellia_crypt_ctr(const void *ctx, u8 *d, const u8 *s, le128 *iv)
-{
- be128 ctrblk;
- u128 *dst = (u128 *)d;
- const u128 *src = (const u128 *)s;
-
- if (dst != src)
- *dst = *src;
-
- le128_to_be128(&ctrblk, iv);
- le128_inc(iv);
-
- camellia_enc_blk_xor(ctx, (u8 *)dst, (u8 *)&ctrblk);
-}
-EXPORT_SYMBOL_GPL(camellia_crypt_ctr);
-
-void camellia_crypt_ctr_2way(const void *ctx, u8 *d, const u8 *s, le128 *iv)
-{
- be128 ctrblks[2];
- u128 *dst = (u128 *)d;
- const u128 *src = (const u128 *)s;
-
- if (dst != src) {
- dst[0] = src[0];
- dst[1] = src[1];
- }
-
- le128_to_be128(&ctrblks[0], iv);
- le128_inc(iv);
- le128_to_be128(&ctrblks[1], iv);
- le128_inc(iv);
-
- camellia_enc_blk_xor_2way(ctx, (u8 *)dst, (u8 *)ctrblks);
-}
-EXPORT_SYMBOL_GPL(camellia_crypt_ctr_2way);
-
-static const struct common_glue_ctx camellia_enc = {
- .num_funcs = 2,
- .fpu_blocks_limit = -1,
-
- .funcs = { {
- .num_blocks = 2,
- .fn_u = { .ecb = camellia_enc_blk_2way }
- }, {
- .num_blocks = 1,
- .fn_u = { .ecb = camellia_enc_blk }
- } }
-};
-
-static const struct common_glue_ctx camellia_ctr = {
- .num_funcs = 2,
- .fpu_blocks_limit = -1,
-
- .funcs = { {
- .num_blocks = 2,
- .fn_u = { .ctr = camellia_crypt_ctr_2way }
- }, {
- .num_blocks = 1,
- .fn_u = { .ctr = camellia_crypt_ctr }
- } }
-};
-
-static const struct common_glue_ctx camellia_dec = {
- .num_funcs = 2,
- .fpu_blocks_limit = -1,
-
- .funcs = { {
- .num_blocks = 2,
- .fn_u = { .ecb = camellia_dec_blk_2way }
- }, {
- .num_blocks = 1,
- .fn_u = { .ecb = camellia_dec_blk }
- } }
-};
-
-static const struct common_glue_ctx camellia_dec_cbc = {
- .num_funcs = 2,
- .fpu_blocks_limit = -1,
-
- .funcs = { {
- .num_blocks = 2,
- .fn_u = { .cbc = camellia_decrypt_cbc_2way }
- }, {
- .num_blocks = 1,
- .fn_u = { .cbc = camellia_dec_blk }
- } }
-};
-
static int ecb_encrypt(struct skcipher_request *req)
{
- return glue_ecb_req_128bit(&camellia_enc, req);
+ ECB_WALK_START(req, CAMELLIA_BLOCK_SIZE, -1);
+ ECB_BLOCK(2, camellia_enc_blk_2way);
+ ECB_BLOCK(1, camellia_enc_blk);
+ ECB_WALK_END();
}
static int ecb_decrypt(struct skcipher_request *req)
{
- return glue_ecb_req_128bit(&camellia_dec, req);
+ ECB_WALK_START(req, CAMELLIA_BLOCK_SIZE, -1);
+ ECB_BLOCK(2, camellia_dec_blk_2way);
+ ECB_BLOCK(1, camellia_dec_blk);
+ ECB_WALK_END();
}
static int cbc_encrypt(struct skcipher_request *req)
{
- return glue_cbc_encrypt_req_128bit(camellia_enc_blk, req);
+ CBC_WALK_START(req, CAMELLIA_BLOCK_SIZE, -1);
+ CBC_ENC_BLOCK(camellia_enc_blk);
+ CBC_WALK_END();
}
static int cbc_decrypt(struct skcipher_request *req)
{
- return glue_cbc_decrypt_req_128bit(&camellia_dec_cbc, req);
-}
-
-static int ctr_crypt(struct skcipher_request *req)
-{
- return glue_ctr_req_128bit(&camellia_ctr, req);
+ CBC_WALK_START(req, CAMELLIA_BLOCK_SIZE, -1);
+ CBC_DEC_BLOCK(2, camellia_decrypt_cbc_2way);
+ CBC_DEC_BLOCK(1, camellia_dec_blk);
+ CBC_WALK_END();
}
static struct crypto_alg camellia_cipher_alg = {
@@ -1433,20 +1352,6 @@ static struct skcipher_alg camellia_skcipher_algs[] = {
.setkey = camellia_setkey_skcipher,
.encrypt = cbc_encrypt,
.decrypt = cbc_decrypt,
- }, {
- .base.cra_name = "ctr(camellia)",
- .base.cra_driver_name = "ctr-camellia-asm",
- .base.cra_priority = 300,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct camellia_ctx),
- .base.cra_module = THIS_MODULE,
- .min_keysize = CAMELLIA_MIN_KEY_SIZE,
- .max_keysize = CAMELLIA_MAX_KEY_SIZE,
- .ivsize = CAMELLIA_BLOCK_SIZE,
- .chunksize = CAMELLIA_BLOCK_SIZE,
- .setkey = camellia_setkey_skcipher,
- .encrypt = ctr_crypt,
- .decrypt = ctr_crypt,
}
};
diff --git a/arch/x86/crypto/cast5_avx_glue.c b/arch/x86/crypto/cast5_avx_glue.c
index 384ccb00f9e1..3976a87f92ad 100644
--- a/arch/x86/crypto/cast5_avx_glue.c
+++ b/arch/x86/crypto/cast5_avx_glue.c
@@ -6,7 +6,6 @@
* <Johannes.Goetzfried@informatik.stud.uni-erlangen.de>
*/
-#include <asm/crypto/glue_helper.h>
#include <crypto/algapi.h>
#include <crypto/cast5.h>
#include <crypto/internal/simd.h>
@@ -15,6 +14,8 @@
#include <linux/module.h>
#include <linux/types.h>
+#include "ecb_cbc_helpers.h"
+
#define CAST5_PARALLEL_BLOCKS 16
asmlinkage void cast5_ecb_enc_16way(struct cast5_ctx *ctx, u8 *dst,
@@ -23,8 +24,6 @@ asmlinkage void cast5_ecb_dec_16way(struct cast5_ctx *ctx, u8 *dst,
const u8 *src);
asmlinkage void cast5_cbc_dec_16way(struct cast5_ctx *ctx, u8 *dst,
const u8 *src);
-asmlinkage void cast5_ctr_16way(struct cast5_ctx *ctx, u8 *dst, const u8 *src,
- __be64 *iv);
static int cast5_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
@@ -32,272 +31,35 @@ static int cast5_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key,
return cast5_setkey(&tfm->base, key, keylen);
}
-static inline bool cast5_fpu_begin(bool fpu_enabled, struct skcipher_walk *walk,
- unsigned int nbytes)
-{
- return glue_fpu_begin(CAST5_BLOCK_SIZE, CAST5_PARALLEL_BLOCKS,
- walk, fpu_enabled, nbytes);
-}
-
-static inline void cast5_fpu_end(bool fpu_enabled)
-{
- return glue_fpu_end(fpu_enabled);
-}
-
-static int ecb_crypt(struct skcipher_request *req, bool enc)
-{
- bool fpu_enabled = false;
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct cast5_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct skcipher_walk walk;
- const unsigned int bsize = CAST5_BLOCK_SIZE;
- unsigned int nbytes;
- void (*fn)(struct cast5_ctx *ctx, u8 *dst, const u8 *src);
- int err;
-
- err = skcipher_walk_virt(&walk, req, false);
-
- while ((nbytes = walk.nbytes)) {
- u8 *wsrc = walk.src.virt.addr;
- u8 *wdst = walk.dst.virt.addr;
-
- fpu_enabled = cast5_fpu_begin(fpu_enabled, &walk, nbytes);
-
- /* Process multi-block batch */
- if (nbytes >= bsize * CAST5_PARALLEL_BLOCKS) {
- fn = (enc) ? cast5_ecb_enc_16way : cast5_ecb_dec_16way;
- do {
- fn(ctx, wdst, wsrc);
-
- wsrc += bsize * CAST5_PARALLEL_BLOCKS;
- wdst += bsize * CAST5_PARALLEL_BLOCKS;
- nbytes -= bsize * CAST5_PARALLEL_BLOCKS;
- } while (nbytes >= bsize * CAST5_PARALLEL_BLOCKS);
-
- if (nbytes < bsize)
- goto done;
- }
-
- fn = (enc) ? __cast5_encrypt : __cast5_decrypt;
-
- /* Handle leftovers */
- do {
- fn(ctx, wdst, wsrc);
-
- wsrc += bsize;
- wdst += bsize;
- nbytes -= bsize;
- } while (nbytes >= bsize);
-
-done:
- err = skcipher_walk_done(&walk, nbytes);
- }
-
- cast5_fpu_end(fpu_enabled);
- return err;
-}
-
static int ecb_encrypt(struct skcipher_request *req)
{
- return ecb_crypt(req, true);
+ ECB_WALK_START(req, CAST5_BLOCK_SIZE, CAST5_PARALLEL_BLOCKS);
+ ECB_BLOCK(CAST5_PARALLEL_BLOCKS, cast5_ecb_enc_16way);
+ ECB_BLOCK(1, __cast5_encrypt);
+ ECB_WALK_END();
}
static int ecb_decrypt(struct skcipher_request *req)
{
- return ecb_crypt(req, false);
+ ECB_WALK_START(req, CAST5_BLOCK_SIZE, CAST5_PARALLEL_BLOCKS);
+ ECB_BLOCK(CAST5_PARALLEL_BLOCKS, cast5_ecb_dec_16way);
+ ECB_BLOCK(1, __cast5_decrypt);
+ ECB_WALK_END();
}
static int cbc_encrypt(struct skcipher_request *req)
{
- const unsigned int bsize = CAST5_BLOCK_SIZE;
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct cast5_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct skcipher_walk walk;
- unsigned int nbytes;
- int err;
-
- err = skcipher_walk_virt(&walk, req, false);
-
- while ((nbytes = walk.nbytes)) {
- u64 *src = (u64 *)walk.src.virt.addr;
- u64 *dst = (u64 *)walk.dst.virt.addr;
- u64 *iv = (u64 *)walk.iv;
-
- do {
- *dst = *src ^ *iv;
- __cast5_encrypt(ctx, (u8 *)dst, (u8 *)dst);
- iv = dst;
- src++;
- dst++;
- nbytes -= bsize;
- } while (nbytes >= bsize);
-
- *(u64 *)walk.iv = *iv;
- err = skcipher_walk_done(&walk, nbytes);
- }
-
- return err;
-}
-
-static unsigned int __cbc_decrypt(struct cast5_ctx *ctx,
- struct skcipher_walk *walk)
-{
- const unsigned int bsize = CAST5_BLOCK_SIZE;
- unsigned int nbytes = walk->nbytes;
- u64 *src = (u64 *)walk->src.virt.addr;
- u64 *dst = (u64 *)walk->dst.virt.addr;
- u64 last_iv;
-
- /* Start of the last block. */
- src += nbytes / bsize - 1;
- dst += nbytes / bsize - 1;
-
- last_iv = *src;
-
- /* Process multi-block batch */
- if (nbytes >= bsize * CAST5_PARALLEL_BLOCKS) {
- do {
- nbytes -= bsize * (CAST5_PARALLEL_BLOCKS - 1);
- src -= CAST5_PARALLEL_BLOCKS - 1;
- dst -= CAST5_PARALLEL_BLOCKS - 1;
-
- cast5_cbc_dec_16way(ctx, (u8 *)dst, (u8 *)src);
-
- nbytes -= bsize;
- if (nbytes < bsize)
- goto done;
-
- *dst ^= *(src - 1);
- src -= 1;
- dst -= 1;
- } while (nbytes >= bsize * CAST5_PARALLEL_BLOCKS);
- }
-
- /* Handle leftovers */
- for (;;) {
- __cast5_decrypt(ctx, (u8 *)dst, (u8 *)src);
-
- nbytes -= bsize;
- if (nbytes < bsize)
- break;
-
- *dst ^= *(src - 1);
- src -= 1;
- dst -= 1;
- }
-
-done:
- *dst ^= *(u64 *)walk->iv;
- *(u64 *)walk->iv = last_iv;
-
- return nbytes;
+ CBC_WALK_START(req, CAST5_BLOCK_SIZE, -1);
+ CBC_ENC_BLOCK(__cast5_encrypt);
+ CBC_WALK_END();
}
static int cbc_decrypt(struct skcipher_request *req)
{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct cast5_ctx *ctx = crypto_skcipher_ctx(tfm);
- bool fpu_enabled = false;
- struct skcipher_walk walk;
- unsigned int nbytes;
- int err;
-
- err = skcipher_walk_virt(&walk, req, false);
-
- while ((nbytes = walk.nbytes)) {
- fpu_enabled = cast5_fpu_begin(fpu_enabled, &walk, nbytes);
- nbytes = __cbc_decrypt(ctx, &walk);
- err = skcipher_walk_done(&walk, nbytes);
- }
-
- cast5_fpu_end(fpu_enabled);
- return err;
-}
-
-static void ctr_crypt_final(struct skcipher_walk *walk, struct cast5_ctx *ctx)
-{
- u8 *ctrblk = walk->iv;
- u8 keystream[CAST5_BLOCK_SIZE];
- u8 *src = walk->src.virt.addr;
- u8 *dst = walk->dst.virt.addr;
- unsigned int nbytes = walk->nbytes;
-
- __cast5_encrypt(ctx, keystream, ctrblk);
- crypto_xor_cpy(dst, keystream, src, nbytes);
-
- crypto_inc(ctrblk, CAST5_BLOCK_SIZE);
-}
-
-static unsigned int __ctr_crypt(struct skcipher_walk *walk,
- struct cast5_ctx *ctx)
-{
- const unsigned int bsize = CAST5_BLOCK_SIZE;
- unsigned int nbytes = walk->nbytes;
- u64 *src = (u64 *)walk->src.virt.addr;
- u64 *dst = (u64 *)walk->dst.virt.addr;
-
- /* Process multi-block batch */
- if (nbytes >= bsize * CAST5_PARALLEL_BLOCKS) {
- do {
- cast5_ctr_16way(ctx, (u8 *)dst, (u8 *)src,
- (__be64 *)walk->iv);
-
- src += CAST5_PARALLEL_BLOCKS;
- dst += CAST5_PARALLEL_BLOCKS;
- nbytes -= bsize * CAST5_PARALLEL_BLOCKS;
- } while (nbytes >= bsize * CAST5_PARALLEL_BLOCKS);
-
- if (nbytes < bsize)
- goto done;
- }
-
- /* Handle leftovers */
- do {
- u64 ctrblk;
-
- if (dst != src)
- *dst = *src;
-
- ctrblk = *(u64 *)walk->iv;
- be64_add_cpu((__be64 *)walk->iv, 1);
-
- __cast5_encrypt(ctx, (u8 *)&ctrblk, (u8 *)&ctrblk);
- *dst ^= ctrblk;
-
- src += 1;
- dst += 1;
- nbytes -= bsize;
- } while (nbytes >= bsize);
-
-done:
- return nbytes;
-}
-
-static int ctr_crypt(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct cast5_ctx *ctx = crypto_skcipher_ctx(tfm);
- bool fpu_enabled = false;
- struct skcipher_walk walk;
- unsigned int nbytes;
- int err;
-
- err = skcipher_walk_virt(&walk, req, false);
-
- while ((nbytes = walk.nbytes) >= CAST5_BLOCK_SIZE) {
- fpu_enabled = cast5_fpu_begin(fpu_enabled, &walk, nbytes);
- nbytes = __ctr_crypt(&walk, ctx);
- err = skcipher_walk_done(&walk, nbytes);
- }
-
- cast5_fpu_end(fpu_enabled);
-
- if (walk.nbytes) {
- ctr_crypt_final(&walk, ctx);
- err = skcipher_walk_done(&walk, 0);
- }
-
- return err;
+ CBC_WALK_START(req, CAST5_BLOCK_SIZE, CAST5_PARALLEL_BLOCKS);
+ CBC_DEC_BLOCK(CAST5_PARALLEL_BLOCKS, cast5_cbc_dec_16way);
+ CBC_DEC_BLOCK(1, __cast5_decrypt);
+ CBC_WALK_END();
}
static struct skcipher_alg cast5_algs[] = {
@@ -328,21 +90,6 @@ static struct skcipher_alg cast5_algs[] = {
.setkey = cast5_setkey_skcipher,
.encrypt = cbc_encrypt,
.decrypt = cbc_decrypt,
- }, {
- .base.cra_name = "__ctr(cast5)",
- .base.cra_driver_name = "__ctr-cast5-avx",
- .base.cra_priority = 200,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct cast5_ctx),
- .base.cra_module = THIS_MODULE,
- .min_keysize = CAST5_MIN_KEY_SIZE,
- .max_keysize = CAST5_MAX_KEY_SIZE,
- .ivsize = CAST5_BLOCK_SIZE,
- .chunksize = CAST5_BLOCK_SIZE,
- .setkey = cast5_setkey_skcipher,
- .encrypt = ctr_crypt,
- .decrypt = ctr_crypt,
}
};
diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
index 932a3ce32a88..fbddcecc3e3f 100644
--- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
+++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
@@ -212,8 +212,6 @@
.section .rodata.cst16, "aM", @progbits, 16
.align 16
-.Lxts_gf128mul_and_shl1_mask:
- .byte 0x87, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0
.Lbswap_mask:
.byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
.Lbswap128_mask:
@@ -412,85 +410,3 @@ SYM_FUNC_START(cast6_cbc_dec_8way)
FRAME_END
ret;
SYM_FUNC_END(cast6_cbc_dec_8way)
-
-SYM_FUNC_START(cast6_ctr_8way)
- /* input:
- * %rdi: ctx, CTX
- * %rsi: dst
- * %rdx: src
- * %rcx: iv (little endian, 128bit)
- */
- FRAME_BEGIN
- pushq %r12;
- pushq %r15
-
- movq %rdi, CTX;
- movq %rsi, %r11;
- movq %rdx, %r12;
-
- load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
- RD2, RX, RKR, RKM);
-
- call __cast6_enc_blk8;
-
- store_ctr_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
-
- popq %r15;
- popq %r12;
- FRAME_END
- ret;
-SYM_FUNC_END(cast6_ctr_8way)
-
-SYM_FUNC_START(cast6_xts_enc_8way)
- /* input:
- * %rdi: ctx, CTX
- * %rsi: dst
- * %rdx: src
- * %rcx: iv (t ⊕ α⿠∈ GF(2¹²â¸))
- */
- FRAME_BEGIN
- pushq %r15;
-
- movq %rdi, CTX
- movq %rsi, %r11;
-
- /* regs <= src, dst <= IVs, regs <= regs xor IVs */
- load_xts_8way(%rcx, %rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2,
- RX, RKR, RKM, .Lxts_gf128mul_and_shl1_mask);
-
- call __cast6_enc_blk8;
-
- /* dst <= regs xor IVs(in dst) */
- store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
-
- popq %r15;
- FRAME_END
- ret;
-SYM_FUNC_END(cast6_xts_enc_8way)
-
-SYM_FUNC_START(cast6_xts_dec_8way)
- /* input:
- * %rdi: ctx, CTX
- * %rsi: dst
- * %rdx: src
- * %rcx: iv (t ⊕ α⿠∈ GF(2¹²â¸))
- */
- FRAME_BEGIN
- pushq %r15;
-
- movq %rdi, CTX
- movq %rsi, %r11;
-
- /* regs <= src, dst <= IVs, regs <= regs xor IVs */
- load_xts_8way(%rcx, %rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2,
- RX, RKR, RKM, .Lxts_gf128mul_and_shl1_mask);
-
- call __cast6_dec_blk8;
-
- /* dst <= regs xor IVs(in dst) */
- store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
-
- popq %r15;
- FRAME_END
- ret;
-SYM_FUNC_END(cast6_xts_dec_8way)
diff --git a/arch/x86/crypto/cast6_avx_glue.c b/arch/x86/crypto/cast6_avx_glue.c
index 48e0f37796fa..7e2aea372349 100644
--- a/arch/x86/crypto/cast6_avx_glue.c
+++ b/arch/x86/crypto/cast6_avx_glue.c
@@ -15,8 +15,8 @@
#include <crypto/algapi.h>
#include <crypto/cast6.h>
#include <crypto/internal/simd.h>
-#include <crypto/xts.h>
-#include <asm/crypto/glue_helper.h>
+
+#include "ecb_cbc_helpers.h"
#define CAST6_PARALLEL_BLOCKS 8
@@ -24,13 +24,6 @@ asmlinkage void cast6_ecb_enc_8way(const void *ctx, u8 *dst, const u8 *src);
asmlinkage void cast6_ecb_dec_8way(const void *ctx, u8 *dst, const u8 *src);
asmlinkage void cast6_cbc_dec_8way(const void *ctx, u8 *dst, const u8 *src);
-asmlinkage void cast6_ctr_8way(const void *ctx, u8 *dst, const u8 *src,
- le128 *iv);
-
-asmlinkage void cast6_xts_enc_8way(const void *ctx, u8 *dst, const u8 *src,
- le128 *iv);
-asmlinkage void cast6_xts_dec_8way(const void *ctx, u8 *dst, const u8 *src,
- le128 *iv);
static int cast6_setkey_skcipher(struct crypto_skcipher *tfm,
const u8 *key, unsigned int keylen)
@@ -38,172 +31,35 @@ static int cast6_setkey_skcipher(struct crypto_skcipher *tfm,
return cast6_setkey(&tfm->base, key, keylen);
}
-static void cast6_xts_enc(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
-{
- glue_xts_crypt_128bit_one(ctx, dst, src, iv, __cast6_encrypt);
-}
-
-static void cast6_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
-{
- glue_xts_crypt_128bit_one(ctx, dst, src, iv, __cast6_decrypt);
-}
-
-static void cast6_crypt_ctr(const void *ctx, u8 *d, const u8 *s, le128 *iv)
-{
- be128 ctrblk;
- u128 *dst = (u128 *)d;
- const u128 *src = (const u128 *)s;
-
- le128_to_be128(&ctrblk, iv);
- le128_inc(iv);
-
- __cast6_encrypt(ctx, (u8 *)&ctrblk, (u8 *)&ctrblk);
- u128_xor(dst, src, (u128 *)&ctrblk);
-}
-
-static const struct common_glue_ctx cast6_enc = {
- .num_funcs = 2,
- .fpu_blocks_limit = CAST6_PARALLEL_BLOCKS,
-
- .funcs = { {
- .num_blocks = CAST6_PARALLEL_BLOCKS,
- .fn_u = { .ecb = cast6_ecb_enc_8way }
- }, {
- .num_blocks = 1,
- .fn_u = { .ecb = __cast6_encrypt }
- } }
-};
-
-static const struct common_glue_ctx cast6_ctr = {
- .num_funcs = 2,
- .fpu_blocks_limit = CAST6_PARALLEL_BLOCKS,
-
- .funcs = { {
- .num_blocks = CAST6_PARALLEL_BLOCKS,
- .fn_u = { .ctr = cast6_ctr_8way }
- }, {
- .num_blocks = 1,
- .fn_u = { .ctr = cast6_crypt_ctr }
- } }
-};
-
-static const struct common_glue_ctx cast6_enc_xts = {
- .num_funcs = 2,
- .fpu_blocks_limit = CAST6_PARALLEL_BLOCKS,
-
- .funcs = { {
- .num_blocks = CAST6_PARALLEL_BLOCKS,
- .fn_u = { .xts = cast6_xts_enc_8way }
- }, {
- .num_blocks = 1,
- .fn_u = { .xts = cast6_xts_enc }
- } }
-};
-
-static const struct common_glue_ctx cast6_dec = {
- .num_funcs = 2,
- .fpu_blocks_limit = CAST6_PARALLEL_BLOCKS,
-
- .funcs = { {
- .num_blocks = CAST6_PARALLEL_BLOCKS,
- .fn_u = { .ecb = cast6_ecb_dec_8way }
- }, {
- .num_blocks = 1,
- .fn_u = { .ecb = __cast6_decrypt }
- } }
-};
-
-static const struct common_glue_ctx cast6_dec_cbc = {
- .num_funcs = 2,
- .fpu_blocks_limit = CAST6_PARALLEL_BLOCKS,
-
- .funcs = { {
- .num_blocks = CAST6_PARALLEL_BLOCKS,
- .fn_u = { .cbc = cast6_cbc_dec_8way }
- }, {
- .num_blocks = 1,
- .fn_u = { .cbc = __cast6_decrypt }
- } }
-};
-
-static const struct common_glue_ctx cast6_dec_xts = {
- .num_funcs = 2,
- .fpu_blocks_limit = CAST6_PARALLEL_BLOCKS,
-
- .funcs = { {
- .num_blocks = CAST6_PARALLEL_BLOCKS,
- .fn_u = { .xts = cast6_xts_dec_8way }
- }, {
- .num_blocks = 1,
- .fn_u = { .xts = cast6_xts_dec }
- } }
-};
-
static int ecb_encrypt(struct skcipher_request *req)
{
- return glue_ecb_req_128bit(&cast6_enc, req);
+ ECB_WALK_START(req, CAST6_BLOCK_SIZE, CAST6_PARALLEL_BLOCKS);
+ ECB_BLOCK(CAST6_PARALLEL_BLOCKS, cast6_ecb_enc_8way);
+ ECB_BLOCK(1, __cast6_encrypt);
+ ECB_WALK_END();
}
static int ecb_decrypt(struct skcipher_request *req)
{
- return glue_ecb_req_128bit(&cast6_dec, req);
+ ECB_WALK_START(req, CAST6_BLOCK_SIZE, CAST6_PARALLEL_BLOCKS);
+ ECB_BLOCK(CAST6_PARALLEL_BLOCKS, cast6_ecb_dec_8way);
+ ECB_BLOCK(1, __cast6_decrypt);
+ ECB_WALK_END();
}
static int cbc_encrypt(struct skcipher_request *req)
{
- return glue_cbc_encrypt_req_128bit(__cast6_encrypt, req);
+ CBC_WALK_START(req, CAST6_BLOCK_SIZE, -1);
+ CBC_ENC_BLOCK(__cast6_encrypt);
+ CBC_WALK_END();
}
static int cbc_decrypt(struct skcipher_request *req)
{
- return glue_cbc_decrypt_req_128bit(&cast6_dec_cbc, req);
-}
-
-static int ctr_crypt(struct skcipher_request *req)
-{
- return glue_ctr_req_128bit(&cast6_ctr, req);
-}
-
-struct cast6_xts_ctx {
- struct cast6_ctx tweak_ctx;
- struct cast6_ctx crypt_ctx;
-};
-
-static int xts_cast6_setkey(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct cast6_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
- int err;
-
- err = xts_verify_key(tfm, key, keylen);
- if (err)
- return err;
-
- /* first half of xts-key is for crypt */
- err = __cast6_setkey(&ctx->crypt_ctx, key, keylen / 2);
- if (err)
- return err;
-
- /* second half of xts-key is for tweak */
- return __cast6_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2);
-}
-
-static int xts_encrypt(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct cast6_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- return glue_xts_req_128bit(&cast6_enc_xts, req, __cast6_encrypt,
- &ctx->tweak_ctx, &ctx->crypt_ctx, false);
-}
-
-static int xts_decrypt(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct cast6_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- return glue_xts_req_128bit(&cast6_dec_xts, req, __cast6_encrypt,
- &ctx->tweak_ctx, &ctx->crypt_ctx, true);
+ CBC_WALK_START(req, CAST6_BLOCK_SIZE, CAST6_PARALLEL_BLOCKS);
+ CBC_DEC_BLOCK(CAST6_PARALLEL_BLOCKS, cast6_cbc_dec_8way);
+ CBC_DEC_BLOCK(1, __cast6_decrypt);
+ CBC_WALK_END();
}
static struct skcipher_alg cast6_algs[] = {
@@ -234,35 +90,6 @@ static struct skcipher_alg cast6_algs[] = {
.setkey = cast6_setkey_skcipher,
.encrypt = cbc_encrypt,
.decrypt = cbc_decrypt,
- }, {
- .base.cra_name = "__ctr(cast6)",
- .base.cra_driver_name = "__ctr-cast6-avx",
- .base.cra_priority = 200,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct cast6_ctx),
- .base.cra_module = THIS_MODULE,
- .min_keysize = CAST6_MIN_KEY_SIZE,
- .max_keysize = CAST6_MAX_KEY_SIZE,
- .ivsize = CAST6_BLOCK_SIZE,
- .chunksize = CAST6_BLOCK_SIZE,
- .setkey = cast6_setkey_skcipher,
- .encrypt = ctr_crypt,
- .decrypt = ctr_crypt,
- }, {
- .base.cra_name = "__xts(cast6)",
- .base.cra_driver_name = "__xts-cast6-avx",
- .base.cra_priority = 200,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
- .base.cra_blocksize = CAST6_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct cast6_xts_ctx),
- .base.cra_module = THIS_MODULE,
- .min_keysize = 2 * CAST6_MIN_KEY_SIZE,
- .max_keysize = 2 * CAST6_MAX_KEY_SIZE,
- .ivsize = CAST6_BLOCK_SIZE,
- .setkey = xts_cast6_setkey,
- .encrypt = xts_encrypt,
- .decrypt = xts_decrypt,
},
};
diff --git a/arch/x86/crypto/des3_ede_glue.c b/arch/x86/crypto/des3_ede_glue.c
index 89830e531350..e7cb68a3db3b 100644
--- a/arch/x86/crypto/des3_ede_glue.c
+++ b/arch/x86/crypto/des3_ede_glue.c
@@ -6,8 +6,6 @@
*
* CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by:
* Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
- * CTR part based on code (crypto/ctr.c) by:
- * (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com>
*/
#include <crypto/algapi.h>
@@ -253,94 +251,6 @@ static int cbc_decrypt(struct skcipher_request *req)
return err;
}
-static void ctr_crypt_final(struct des3_ede_x86_ctx *ctx,
- struct skcipher_walk *walk)
-{
- u8 *ctrblk = walk->iv;
- u8 keystream[DES3_EDE_BLOCK_SIZE];
- u8 *src = walk->src.virt.addr;
- u8 *dst = walk->dst.virt.addr;
- unsigned int nbytes = walk->nbytes;
-
- des3_ede_enc_blk(ctx, keystream, ctrblk);
- crypto_xor_cpy(dst, keystream, src, nbytes);
-
- crypto_inc(ctrblk, DES3_EDE_BLOCK_SIZE);
-}
-
-static unsigned int __ctr_crypt(struct des3_ede_x86_ctx *ctx,
- struct skcipher_walk *walk)
-{
- unsigned int bsize = DES3_EDE_BLOCK_SIZE;
- unsigned int nbytes = walk->nbytes;
- __be64 *src = (__be64 *)walk->src.virt.addr;
- __be64 *dst = (__be64 *)walk->dst.virt.addr;
- u64 ctrblk = be64_to_cpu(*(__be64 *)walk->iv);
- __be64 ctrblocks[3];
-
- /* Process four block batch */
- if (nbytes >= bsize * 3) {
- do {
- /* create ctrblks for parallel encrypt */
- ctrblocks[0] = cpu_to_be64(ctrblk++);
- ctrblocks[1] = cpu_to_be64(ctrblk++);
- ctrblocks[2] = cpu_to_be64(ctrblk++);
-
- des3_ede_enc_blk_3way(ctx, (u8 *)ctrblocks,
- (u8 *)ctrblocks);
-
- dst[0] = src[0] ^ ctrblocks[0];
- dst[1] = src[1] ^ ctrblocks[1];
- dst[2] = src[2] ^ ctrblocks[2];
-
- src += 3;
- dst += 3;
- } while ((nbytes -= bsize * 3) >= bsize * 3);
-
- if (nbytes < bsize)
- goto done;
- }
-
- /* Handle leftovers */
- do {
- ctrblocks[0] = cpu_to_be64(ctrblk++);
-
- des3_ede_enc_blk(ctx, (u8 *)ctrblocks, (u8 *)ctrblocks);
-
- dst[0] = src[0] ^ ctrblocks[0];
-
- src += 1;
- dst += 1;
- } while ((nbytes -= bsize) >= bsize);
-
-done:
- *(__be64 *)walk->iv = cpu_to_be64(ctrblk);
- return nbytes;
-}
-
-static int ctr_crypt(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct des3_ede_x86_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct skcipher_walk walk;
- unsigned int nbytes;
- int err;
-
- err = skcipher_walk_virt(&walk, req, false);
-
- while ((nbytes = walk.nbytes) >= DES3_EDE_BLOCK_SIZE) {
- nbytes = __ctr_crypt(ctx, &walk);
- err = skcipher_walk_done(&walk, nbytes);
- }
-
- if (nbytes) {
- ctr_crypt_final(ctx, &walk);
- err = skcipher_walk_done(&walk, 0);
- }
-
- return err;
-}
-
static int des3_ede_x86_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen)
{
@@ -428,20 +338,6 @@ static struct skcipher_alg des3_ede_skciphers[] = {
.setkey = des3_ede_x86_setkey_skcipher,
.encrypt = cbc_encrypt,
.decrypt = cbc_decrypt,
- }, {
- .base.cra_name = "ctr(des3_ede)",
- .base.cra_driver_name = "ctr-des3_ede-asm",
- .base.cra_priority = 300,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct des3_ede_x86_ctx),
- .base.cra_module = THIS_MODULE,
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .chunksize = DES3_EDE_BLOCK_SIZE,
- .setkey = des3_ede_x86_setkey_skcipher,
- .encrypt = ctr_crypt,
- .decrypt = ctr_crypt,
}
};
diff --git a/arch/x86/crypto/ecb_cbc_helpers.h b/arch/x86/crypto/ecb_cbc_helpers.h
new file mode 100644
index 000000000000..eaa15c7b29d6
--- /dev/null
+++ b/arch/x86/crypto/ecb_cbc_helpers.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _CRYPTO_ECB_CBC_HELPER_H
+#define _CRYPTO_ECB_CBC_HELPER_H
+
+#include <crypto/internal/skcipher.h>
+#include <asm/fpu/api.h>
+
+/*
+ * Mode helpers to instantiate parameterized skcipher ECB/CBC modes without
+ * having to rely on indirect calls and retpolines.
+ */
+
+#define ECB_WALK_START(req, bsize, fpu_blocks) do { \
+ void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); \
+ const int __bsize = (bsize); \
+ struct skcipher_walk walk; \
+ int err = skcipher_walk_virt(&walk, (req), false); \
+ while (walk.nbytes > 0) { \
+ unsigned int nbytes = walk.nbytes; \
+ bool do_fpu = (fpu_blocks) != -1 && \
+ nbytes >= (fpu_blocks) * __bsize; \
+ const u8 *src = walk.src.virt.addr; \
+ u8 *dst = walk.dst.virt.addr; \
+ u8 __maybe_unused buf[(bsize)]; \
+ if (do_fpu) kernel_fpu_begin()
+
+#define CBC_WALK_START(req, bsize, fpu_blocks) \
+ ECB_WALK_START(req, bsize, fpu_blocks)
+
+#define ECB_WALK_ADVANCE(blocks) do { \
+ dst += (blocks) * __bsize; \
+ src += (blocks) * __bsize; \
+ nbytes -= (blocks) * __bsize; \
+} while (0)
+
+#define ECB_BLOCK(blocks, func) do { \
+ while (nbytes >= (blocks) * __bsize) { \
+ (func)(ctx, dst, src); \
+ ECB_WALK_ADVANCE(blocks); \
+ } \
+} while (0)
+
+#define CBC_ENC_BLOCK(func) do { \
+ const u8 *__iv = walk.iv; \
+ while (nbytes >= __bsize) { \
+ crypto_xor_cpy(dst, src, __iv, __bsize); \
+ (func)(ctx, dst, dst); \
+ __iv = dst; \
+ ECB_WALK_ADVANCE(1); \
+ } \
+ memcpy(walk.iv, __iv, __bsize); \
+} while (0)
+
+#define CBC_DEC_BLOCK(blocks, func) do { \
+ while (nbytes >= (blocks) * __bsize) { \
+ const u8 *__iv = src + ((blocks) - 1) * __bsize; \
+ if (dst == src) \
+ __iv = memcpy(buf, __iv, __bsize); \
+ (func)(ctx, dst, src); \
+ crypto_xor(dst, walk.iv, __bsize); \
+ memcpy(walk.iv, __iv, __bsize); \
+ ECB_WALK_ADVANCE(blocks); \
+ } \
+} while (0)
+
+#define ECB_WALK_END() \
+ if (do_fpu) kernel_fpu_end(); \
+ err = skcipher_walk_done(&walk, nbytes); \
+ } \
+ return err; \
+} while (0)
+
+#define CBC_WALK_END() ECB_WALK_END()
+
+#endif
diff --git a/arch/x86/crypto/glue_helper-asm-avx.S b/arch/x86/crypto/glue_helper-asm-avx.S
index d08fc575ef7f..3da385271227 100644
--- a/arch/x86/crypto/glue_helper-asm-avx.S
+++ b/arch/x86/crypto/glue_helper-asm-avx.S
@@ -34,107 +34,3 @@
vpxor (5*16)(src), x6, x6; \
vpxor (6*16)(src), x7, x7; \
store_8way(dst, x0, x1, x2, x3, x4, x5, x6, x7);
-
-#define inc_le128(x, minus_one, tmp) \
- vpcmpeqq minus_one, x, tmp; \
- vpsubq minus_one, x, x; \
- vpslldq $8, tmp, tmp; \
- vpsubq tmp, x, x;
-
-#define load_ctr_8way(iv, bswap, x0, x1, x2, x3, x4, x5, x6, x7, t0, t1, t2) \
- vpcmpeqd t0, t0, t0; \
- vpsrldq $8, t0, t0; /* low: -1, high: 0 */ \
- vmovdqa bswap, t1; \
- \
- /* load IV and byteswap */ \
- vmovdqu (iv), x7; \
- vpshufb t1, x7, x0; \
- \
- /* construct IVs */ \
- inc_le128(x7, t0, t2); \
- vpshufb t1, x7, x1; \
- inc_le128(x7, t0, t2); \
- vpshufb t1, x7, x2; \
- inc_le128(x7, t0, t2); \
- vpshufb t1, x7, x3; \
- inc_le128(x7, t0, t2); \
- vpshufb t1, x7, x4; \
- inc_le128(x7, t0, t2); \
- vpshufb t1, x7, x5; \
- inc_le128(x7, t0, t2); \
- vpshufb t1, x7, x6; \
- inc_le128(x7, t0, t2); \
- vmovdqa x7, t2; \
- vpshufb t1, x7, x7; \
- inc_le128(t2, t0, t1); \
- vmovdqu t2, (iv);
-
-#define store_ctr_8way(src, dst, x0, x1, x2, x3, x4, x5, x6, x7) \
- vpxor (0*16)(src), x0, x0; \
- vpxor (1*16)(src), x1, x1; \
- vpxor (2*16)(src), x2, x2; \
- vpxor (3*16)(src), x3, x3; \
- vpxor (4*16)(src), x4, x4; \
- vpxor (5*16)(src), x5, x5; \
- vpxor (6*16)(src), x6, x6; \
- vpxor (7*16)(src), x7, x7; \
- store_8way(dst, x0, x1, x2, x3, x4, x5, x6, x7);
-
-#define gf128mul_x_ble(iv, mask, tmp) \
- vpsrad $31, iv, tmp; \
- vpaddq iv, iv, iv; \
- vpshufd $0x13, tmp, tmp; \
- vpand mask, tmp, tmp; \
- vpxor tmp, iv, iv;
-
-#define load_xts_8way(iv, src, dst, x0, x1, x2, x3, x4, x5, x6, x7, tiv, t0, \
- t1, xts_gf128mul_and_shl1_mask) \
- vmovdqa xts_gf128mul_and_shl1_mask, t0; \
- \
- /* load IV */ \
- vmovdqu (iv), tiv; \
- vpxor (0*16)(src), tiv, x0; \
- vmovdqu tiv, (0*16)(dst); \
- \
- /* construct and store IVs, also xor with source */ \
- gf128mul_x_ble(tiv, t0, t1); \
- vpxor (1*16)(src), tiv, x1; \
- vmovdqu tiv, (1*16)(dst); \
- \
- gf128mul_x_ble(tiv, t0, t1); \
- vpxor (2*16)(src), tiv, x2; \
- vmovdqu tiv, (2*16)(dst); \
- \
- gf128mul_x_ble(tiv, t0, t1); \
- vpxor (3*16)(src), tiv, x3; \
- vmovdqu tiv, (3*16)(dst); \
- \
- gf128mul_x_ble(tiv, t0, t1); \
- vpxor (4*16)(src), tiv, x4; \
- vmovdqu tiv, (4*16)(dst); \
- \
- gf128mul_x_ble(tiv, t0, t1); \
- vpxor (5*16)(src), tiv, x5; \
- vmovdqu tiv, (5*16)(dst); \
- \
- gf128mul_x_ble(tiv, t0, t1); \
- vpxor (6*16)(src), tiv, x6; \
- vmovdqu tiv, (6*16)(dst); \
- \
- gf128mul_x_ble(tiv, t0, t1); \
- vpxor (7*16)(src), tiv, x7; \
- vmovdqu tiv, (7*16)(dst); \
- \
- gf128mul_x_ble(tiv, t0, t1); \
- vmovdqu tiv, (iv);
-
-#define store_xts_8way(dst, x0, x1, x2, x3, x4, x5, x6, x7) \
- vpxor (0*16)(dst), x0, x0; \
- vpxor (1*16)(dst), x1, x1; \
- vpxor (2*16)(dst), x2, x2; \
- vpxor (3*16)(dst), x3, x3; \
- vpxor (4*16)(dst), x4, x4; \
- vpxor (5*16)(dst), x5, x5; \
- vpxor (6*16)(dst), x6, x6; \
- vpxor (7*16)(dst), x7, x7; \
- store_8way(dst, x0, x1, x2, x3, x4, x5, x6, x7);
diff --git a/arch/x86/crypto/glue_helper-asm-avx2.S b/arch/x86/crypto/glue_helper-asm-avx2.S
index d84508c85c13..c77e9049431f 100644
--- a/arch/x86/crypto/glue_helper-asm-avx2.S
+++ b/arch/x86/crypto/glue_helper-asm-avx2.S
@@ -37,139 +37,3 @@
vpxor (5*32+16)(src), x6, x6; \
vpxor (6*32+16)(src), x7, x7; \
store_16way(dst, x0, x1, x2, x3, x4, x5, x6, x7);
-
-#define inc_le128(x, minus_one, tmp) \
- vpcmpeqq minus_one, x, tmp; \
- vpsubq minus_one, x, x; \
- vpslldq $8, tmp, tmp; \
- vpsubq tmp, x, x;
-
-#define add2_le128(x, minus_one, minus_two, tmp1, tmp2) \
- vpcmpeqq minus_one, x, tmp1; \
- vpcmpeqq minus_two, x, tmp2; \
- vpsubq minus_two, x, x; \
- vpor tmp2, tmp1, tmp1; \
- vpslldq $8, tmp1, tmp1; \
- vpsubq tmp1, x, x;
-
-#define load_ctr_16way(iv, bswap, x0, x1, x2, x3, x4, x5, x6, x7, t0, t0x, t1, \
- t1x, t2, t2x, t3, t3x, t4, t5) \
- vpcmpeqd t0, t0, t0; \
- vpsrldq $8, t0, t0; /* ab: -1:0 ; cd: -1:0 */ \
- vpaddq t0, t0, t4; /* ab: -2:0 ; cd: -2:0 */\
- \
- /* load IV and byteswap */ \
- vmovdqu (iv), t2x; \
- vmovdqa t2x, t3x; \
- inc_le128(t2x, t0x, t1x); \
- vbroadcasti128 bswap, t1; \
- vinserti128 $1, t2x, t3, t2; /* ab: le0 ; cd: le1 */ \
- vpshufb t1, t2, x0; \
- \
- /* construct IVs */ \
- add2_le128(t2, t0, t4, t3, t5); /* ab: le2 ; cd: le3 */ \
- vpshufb t1, t2, x1; \
- add2_le128(t2, t0, t4, t3, t5); \
- vpshufb t1, t2, x2; \
- add2_le128(t2, t0, t4, t3, t5); \
- vpshufb t1, t2, x3; \
- add2_le128(t2, t0, t4, t3, t5); \
- vpshufb t1, t2, x4; \
- add2_le128(t2, t0, t4, t3, t5); \
- vpshufb t1, t2, x5; \
- add2_le128(t2, t0, t4, t3, t5); \
- vpshufb t1, t2, x6; \
- add2_le128(t2, t0, t4, t3, t5); \
- vpshufb t1, t2, x7; \
- vextracti128 $1, t2, t2x; \
- inc_le128(t2x, t0x, t3x); \
- vmovdqu t2x, (iv);
-
-#define store_ctr_16way(src, dst, x0, x1, x2, x3, x4, x5, x6, x7) \
- vpxor (0*32)(src), x0, x0; \
- vpxor (1*32)(src), x1, x1; \
- vpxor (2*32)(src), x2, x2; \
- vpxor (3*32)(src), x3, x3; \
- vpxor (4*32)(src), x4, x4; \
- vpxor (5*32)(src), x5, x5; \
- vpxor (6*32)(src), x6, x6; \
- vpxor (7*32)(src), x7, x7; \
- store_16way(dst, x0, x1, x2, x3, x4, x5, x6, x7);
-
-#define gf128mul_x_ble(iv, mask, tmp) \
- vpsrad $31, iv, tmp; \
- vpaddq iv, iv, iv; \
- vpshufd $0x13, tmp, tmp; \
- vpand mask, tmp, tmp; \
- vpxor tmp, iv, iv;
-
-#define gf128mul_x2_ble(iv, mask1, mask2, tmp0, tmp1) \
- vpsrad $31, iv, tmp0; \
- vpaddq iv, iv, tmp1; \
- vpsllq $2, iv, iv; \
- vpshufd $0x13, tmp0, tmp0; \
- vpsrad $31, tmp1, tmp1; \
- vpand mask2, tmp0, tmp0; \
- vpshufd $0x13, tmp1, tmp1; \
- vpxor tmp0, iv, iv; \
- vpand mask1, tmp1, tmp1; \
- vpxor tmp1, iv, iv;
-
-#define load_xts_16way(iv, src, dst, x0, x1, x2, x3, x4, x5, x6, x7, tiv, \
- tivx, t0, t0x, t1, t1x, t2, t2x, t3, \
- xts_gf128mul_and_shl1_mask_0, \
- xts_gf128mul_and_shl1_mask_1) \
- vbroadcasti128 xts_gf128mul_and_shl1_mask_0, t1; \
- \
- /* load IV and construct second IV */ \
- vmovdqu (iv), tivx; \
- vmovdqa tivx, t0x; \
- gf128mul_x_ble(tivx, t1x, t2x); \
- vbroadcasti128 xts_gf128mul_and_shl1_mask_1, t2; \
- vinserti128 $1, tivx, t0, tiv; \
- vpxor (0*32)(src), tiv, x0; \
- vmovdqu tiv, (0*32)(dst); \
- \
- /* construct and store IVs, also xor with source */ \
- gf128mul_x2_ble(tiv, t1, t2, t0, t3); \
- vpxor (1*32)(src), tiv, x1; \
- vmovdqu tiv, (1*32)(dst); \
- \
- gf128mul_x2_ble(tiv, t1, t2, t0, t3); \
- vpxor (2*32)(src), tiv, x2; \
- vmovdqu tiv, (2*32)(dst); \
- \
- gf128mul_x2_ble(tiv, t1, t2, t0, t3); \
- vpxor (3*32)(src), tiv, x3; \
- vmovdqu tiv, (3*32)(dst); \
- \
- gf128mul_x2_ble(tiv, t1, t2, t0, t3); \
- vpxor (4*32)(src), tiv, x4; \
- vmovdqu tiv, (4*32)(dst); \
- \
- gf128mul_x2_ble(tiv, t1, t2, t0, t3); \
- vpxor (5*32)(src), tiv, x5; \
- vmovdqu tiv, (5*32)(dst); \
- \
- gf128mul_x2_ble(tiv, t1, t2, t0, t3); \
- vpxor (6*32)(src), tiv, x6; \
- vmovdqu tiv, (6*32)(dst); \
- \
- gf128mul_x2_ble(tiv, t1, t2, t0, t3); \
- vpxor (7*32)(src), tiv, x7; \
- vmovdqu tiv, (7*32)(dst); \
- \
- vextracti128 $1, tiv, tivx; \
- gf128mul_x_ble(tivx, t1x, t2x); \
- vmovdqu tivx, (iv);
-
-#define store_xts_16way(dst, x0, x1, x2, x3, x4, x5, x6, x7) \
- vpxor (0*32)(dst), x0, x0; \
- vpxor (1*32)(dst), x1, x1; \
- vpxor (2*32)(dst), x2, x2; \
- vpxor (3*32)(dst), x3, x3; \
- vpxor (4*32)(dst), x4, x4; \
- vpxor (5*32)(dst), x5, x5; \
- vpxor (6*32)(dst), x6, x6; \
- vpxor (7*32)(dst), x7, x7; \
- store_16way(dst, x0, x1, x2, x3, x4, x5, x6, x7);
diff --git a/arch/x86/crypto/glue_helper.c b/arch/x86/crypto/glue_helper.c
deleted file mode 100644
index d3d91a0abf88..000000000000
--- a/arch/x86/crypto/glue_helper.c
+++ /dev/null
@@ -1,381 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Shared glue code for 128bit block ciphers
- *
- * Copyright © 2012-2013 Jussi Kivilinna <jussi.kivilinna@iki.fi>
- *
- * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by:
- * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
- * CTR part based on code (crypto/ctr.c) by:
- * (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com>
- */
-
-#include <linux/module.h>
-#include <crypto/b128ops.h>
-#include <crypto/gf128mul.h>
-#include <crypto/internal/skcipher.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/xts.h>
-#include <asm/crypto/glue_helper.h>
-
-int glue_ecb_req_128bit(const struct common_glue_ctx *gctx,
- struct skcipher_request *req)
-{
- void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
- const unsigned int bsize = 128 / 8;
- struct skcipher_walk walk;
- bool fpu_enabled = false;
- unsigned int nbytes;
- int err;
-
- err = skcipher_walk_virt(&walk, req, false);
-
- while ((nbytes = walk.nbytes)) {
- const u8 *src = walk.src.virt.addr;
- u8 *dst = walk.dst.virt.addr;
- unsigned int func_bytes;
- unsigned int i;
-
- fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
- &walk, fpu_enabled, nbytes);
- for (i = 0; i < gctx->num_funcs; i++) {
- func_bytes = bsize * gctx->funcs[i].num_blocks;
-
- if (nbytes < func_bytes)
- continue;
-
- /* Process multi-block batch */
- do {
- gctx->funcs[i].fn_u.ecb(ctx, dst, src);
- src += func_bytes;
- dst += func_bytes;
- nbytes -= func_bytes;
- } while (nbytes >= func_bytes);
-
- if (nbytes < bsize)
- break;
- }
- err = skcipher_walk_done(&walk, nbytes);
- }
-
- glue_fpu_end(fpu_enabled);
- return err;
-}
-EXPORT_SYMBOL_GPL(glue_ecb_req_128bit);
-
-int glue_cbc_encrypt_req_128bit(const common_glue_func_t fn,
- struct skcipher_request *req)
-{
- void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
- const unsigned int bsize = 128 / 8;
- struct skcipher_walk walk;
- unsigned int nbytes;
- int err;
-
- err = skcipher_walk_virt(&walk, req, false);
-
- while ((nbytes = walk.nbytes)) {
- const u128 *src = (u128 *)walk.src.virt.addr;
- u128 *dst = (u128 *)walk.dst.virt.addr;
- u128 *iv = (u128 *)walk.iv;
-
- do {
- u128_xor(dst, src, iv);
- fn(ctx, (u8 *)dst, (u8 *)dst);
- iv = dst;
- src++;
- dst++;
- nbytes -= bsize;
- } while (nbytes >= bsize);
-
- *(u128 *)walk.iv = *iv;
- err = skcipher_walk_done(&walk, nbytes);
- }
- return err;
-}
-EXPORT_SYMBOL_GPL(glue_cbc_encrypt_req_128bit);
-
-int glue_cbc_decrypt_req_128bit(const struct common_glue_ctx *gctx,
- struct skcipher_request *req)
-{
- void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
- const unsigned int bsize = 128 / 8;
- struct skcipher_walk walk;
- bool fpu_enabled = false;
- unsigned int nbytes;
- int err;
-
- err = skcipher_walk_virt(&walk, req, false);
-
- while ((nbytes = walk.nbytes)) {
- const u128 *src = walk.src.virt.addr;
- u128 *dst = walk.dst.virt.addr;
- unsigned int func_bytes, num_blocks;
- unsigned int i;
- u128 last_iv;
-
- fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
- &walk, fpu_enabled, nbytes);
- /* Start of the last block. */
- src += nbytes / bsize - 1;
- dst += nbytes / bsize - 1;
-
- last_iv = *src;
-
- for (i = 0; i < gctx->num_funcs; i++) {
- num_blocks = gctx->funcs[i].num_blocks;
- func_bytes = bsize * num_blocks;
-
- if (nbytes < func_bytes)
- continue;
-
- /* Process multi-block batch */
- do {
- src -= num_blocks - 1;
- dst -= num_blocks - 1;
-
- gctx->funcs[i].fn_u.cbc(ctx, (u8 *)dst,
- (const u8 *)src);
-
- nbytes -= func_bytes;
- if (nbytes < bsize)
- goto done;
-
- u128_xor(dst, dst, --src);
- dst--;
- } while (nbytes >= func_bytes);
- }
-done:
- u128_xor(dst, dst, (u128 *)walk.iv);
- *(u128 *)walk.iv = last_iv;
- err = skcipher_walk_done(&walk, nbytes);
- }
-
- glue_fpu_end(fpu_enabled);
- return err;
-}
-EXPORT_SYMBOL_GPL(glue_cbc_decrypt_req_128bit);
-
-int glue_ctr_req_128bit(const struct common_glue_ctx *gctx,
- struct skcipher_request *req)
-{
- void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
- const unsigned int bsize = 128 / 8;
- struct skcipher_walk walk;
- bool fpu_enabled = false;
- unsigned int nbytes;
- int err;
-
- err = skcipher_walk_virt(&walk, req, false);
-
- while ((nbytes = walk.nbytes) >= bsize) {
- const u128 *src = walk.src.virt.addr;
- u128 *dst = walk.dst.virt.addr;
- unsigned int func_bytes, num_blocks;
- unsigned int i;
- le128 ctrblk;
-
- fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
- &walk, fpu_enabled, nbytes);
-
- be128_to_le128(&ctrblk, (be128 *)walk.iv);
-
- for (i = 0; i < gctx->num_funcs; i++) {
- num_blocks = gctx->funcs[i].num_blocks;
- func_bytes = bsize * num_blocks;
-
- if (nbytes < func_bytes)
- continue;
-
- /* Process multi-block batch */
- do {
- gctx->funcs[i].fn_u.ctr(ctx, (u8 *)dst,
- (const u8 *)src,
- &ctrblk);
- src += num_blocks;
- dst += num_blocks;
- nbytes -= func_bytes;
- } while (nbytes >= func_bytes);
-
- if (nbytes < bsize)
- break;
- }
-
- le128_to_be128((be128 *)walk.iv, &ctrblk);
- err = skcipher_walk_done(&walk, nbytes);
- }
-
- glue_fpu_end(fpu_enabled);
-
- if (nbytes) {
- le128 ctrblk;
- u128 tmp;
-
- be128_to_le128(&ctrblk, (be128 *)walk.iv);
- memcpy(&tmp, walk.src.virt.addr, nbytes);
- gctx->funcs[gctx->num_funcs - 1].fn_u.ctr(ctx, (u8 *)&tmp,
- (const u8 *)&tmp,
- &ctrblk);
- memcpy(walk.dst.virt.addr, &tmp, nbytes);
- le128_to_be128((be128 *)walk.iv, &ctrblk);
-
- err = skcipher_walk_done(&walk, 0);
- }
-
- return err;
-}
-EXPORT_SYMBOL_GPL(glue_ctr_req_128bit);
-
-static unsigned int __glue_xts_req_128bit(const struct common_glue_ctx *gctx,
- void *ctx,
- struct skcipher_walk *walk)
-{
- const unsigned int bsize = 128 / 8;
- unsigned int nbytes = walk->nbytes;
- u128 *src = walk->src.virt.addr;
- u128 *dst = walk->dst.virt.addr;
- unsigned int num_blocks, func_bytes;
- unsigned int i;
-
- /* Process multi-block batch */
- for (i = 0; i < gctx->num_funcs; i++) {
- num_blocks = gctx->funcs[i].num_blocks;
- func_bytes = bsize * num_blocks;
-
- if (nbytes >= func_bytes) {
- do {
- gctx->funcs[i].fn_u.xts(ctx, (u8 *)dst,
- (const u8 *)src,
- walk->iv);
-
- src += num_blocks;
- dst += num_blocks;
- nbytes -= func_bytes;
- } while (nbytes >= func_bytes);
-
- if (nbytes < bsize)
- goto done;
- }
- }
-
-done:
- return nbytes;
-}
-
-int glue_xts_req_128bit(const struct common_glue_ctx *gctx,
- struct skcipher_request *req,
- common_glue_func_t tweak_fn, void *tweak_ctx,
- void *crypt_ctx, bool decrypt)
-{
- const bool cts = (req->cryptlen % XTS_BLOCK_SIZE);
- const unsigned int bsize = 128 / 8;
- struct skcipher_request subreq;
- struct skcipher_walk walk;
- bool fpu_enabled = false;
- unsigned int nbytes, tail;
- int err;
-
- if (req->cryptlen < XTS_BLOCK_SIZE)
- return -EINVAL;
-
- if (unlikely(cts)) {
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
-
- tail = req->cryptlen % XTS_BLOCK_SIZE + XTS_BLOCK_SIZE;
-
- skcipher_request_set_tfm(&subreq, tfm);
- skcipher_request_set_callback(&subreq,
- crypto_skcipher_get_flags(tfm),
- NULL, NULL);
- skcipher_request_set_crypt(&subreq, req->src, req->dst,
- req->cryptlen - tail, req->iv);
- req = &subreq;
- }
-
- err = skcipher_walk_virt(&walk, req, false);
- nbytes = walk.nbytes;
- if (err)
- return err;
-
- /* set minimum length to bsize, for tweak_fn */
- fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
- &walk, fpu_enabled,
- nbytes < bsize ? bsize : nbytes);
-
- /* calculate first value of T */
- tweak_fn(tweak_ctx, walk.iv, walk.iv);
-
- while (nbytes) {
- nbytes = __glue_xts_req_128bit(gctx, crypt_ctx, &walk);
-
- err = skcipher_walk_done(&walk, nbytes);
- nbytes = walk.nbytes;
- }
-
- if (unlikely(cts)) {
- u8 *next_tweak, *final_tweak = req->iv;
- struct scatterlist *src, *dst;
- struct scatterlist s[2], d[2];
- le128 b[2];
-
- dst = src = scatterwalk_ffwd(s, req->src, req->cryptlen);
- if (req->dst != req->src)
- dst = scatterwalk_ffwd(d, req->dst, req->cryptlen);
-
- if (decrypt) {
- next_tweak = memcpy(b, req->iv, XTS_BLOCK_SIZE);
- gf128mul_x_ble(b, b);
- } else {
- next_tweak = req->iv;
- }
-
- skcipher_request_set_crypt(&subreq, src, dst, XTS_BLOCK_SIZE,
- next_tweak);
-
- err = skcipher_walk_virt(&walk, req, false) ?:
- skcipher_walk_done(&walk,
- __glue_xts_req_128bit(gctx, crypt_ctx, &walk));
- if (err)
- goto out;
-
- scatterwalk_map_and_copy(b, dst, 0, XTS_BLOCK_SIZE, 0);
- memcpy(b + 1, b, tail - XTS_BLOCK_SIZE);
- scatterwalk_map_and_copy(b, src, XTS_BLOCK_SIZE,
- tail - XTS_BLOCK_SIZE, 0);
- scatterwalk_map_and_copy(b, dst, 0, tail, 1);
-
- skcipher_request_set_crypt(&subreq, dst, dst, XTS_BLOCK_SIZE,
- final_tweak);
-
- err = skcipher_walk_virt(&walk, req, false) ?:
- skcipher_walk_done(&walk,
- __glue_xts_req_128bit(gctx, crypt_ctx, &walk));
- }
-
-out:
- glue_fpu_end(fpu_enabled);
-
- return err;
-}
-EXPORT_SYMBOL_GPL(glue_xts_req_128bit);
-
-void glue_xts_crypt_128bit_one(const void *ctx, u8 *dst, const u8 *src,
- le128 *iv, common_glue_func_t fn)
-{
- le128 ivblk = *iv;
-
- /* generate next IV */
- gf128mul_x_ble(iv, &ivblk);
-
- /* CC <- T xor C */
- u128_xor((u128 *)dst, (const u128 *)src, (u128 *)&ivblk);
-
- /* PP <- D(Key2,CC) */
- fn(ctx, dst, dst);
-
- /* P <- T xor PP */
- u128_xor((u128 *)dst, (u128 *)dst, (u128 *)&ivblk);
-}
-EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit_one);
-
-MODULE_LICENSE("GPL");
diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
index ba9e4c1e7f5c..b7ee24df7fba 100644
--- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
+++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
@@ -18,10 +18,6 @@
.align 16
.Lbswap128_mask:
.byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
-.section .rodata.cst16.xts_gf128mul_and_shl1_mask, "aM", @progbits, 16
-.align 16
-.Lxts_gf128mul_and_shl1_mask:
- .byte 0x87, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0
.text
@@ -715,67 +711,3 @@ SYM_FUNC_START(serpent_cbc_dec_8way_avx)
FRAME_END
ret;
SYM_FUNC_END(serpent_cbc_dec_8way_avx)
-
-SYM_FUNC_START(serpent_ctr_8way_avx)
- /* input:
- * %rdi: ctx, CTX
- * %rsi: dst
- * %rdx: src
- * %rcx: iv (little endian, 128bit)
- */
- FRAME_BEGIN
-
- load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
- RD2, RK0, RK1, RK2);
-
- call __serpent_enc_blk8_avx;
-
- store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
-
- FRAME_END
- ret;
-SYM_FUNC_END(serpent_ctr_8way_avx)
-
-SYM_FUNC_START(serpent_xts_enc_8way_avx)
- /* input:
- * %rdi: ctx, CTX
- * %rsi: dst
- * %rdx: src
- * %rcx: iv (t ⊕ α⿠∈ GF(2¹²â¸))
- */
- FRAME_BEGIN
-
- /* regs <= src, dst <= IVs, regs <= regs xor IVs */
- load_xts_8way(%rcx, %rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2,
- RK0, RK1, RK2, .Lxts_gf128mul_and_shl1_mask);
-
- call __serpent_enc_blk8_avx;
-
- /* dst <= regs xor IVs(in dst) */
- store_xts_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
-
- FRAME_END
- ret;
-SYM_FUNC_END(serpent_xts_enc_8way_avx)
-
-SYM_FUNC_START(serpent_xts_dec_8way_avx)
- /* input:
- * %rdi: ctx, CTX
- * %rsi: dst
- * %rdx: src
- * %rcx: iv (t ⊕ α⿠∈ GF(2¹²â¸))
- */
- FRAME_BEGIN
-
- /* regs <= src, dst <= IVs, regs <= regs xor IVs */
- load_xts_8way(%rcx, %rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2,
- RK0, RK1, RK2, .Lxts_gf128mul_and_shl1_mask);
-
- call __serpent_dec_blk8_avx;
-
- /* dst <= regs xor IVs(in dst) */
- store_xts_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
-
- FRAME_END
- ret;
-SYM_FUNC_END(serpent_xts_dec_8way_avx)
diff --git a/arch/x86/crypto/serpent-avx.h b/arch/x86/crypto/serpent-avx.h
new file mode 100644
index 000000000000..23f3361a0e72
--- /dev/null
+++ b/arch/x86/crypto/serpent-avx.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef ASM_X86_SERPENT_AVX_H
+#define ASM_X86_SERPENT_AVX_H
+
+#include <crypto/b128ops.h>
+#include <crypto/serpent.h>
+#include <linux/types.h>
+
+struct crypto_skcipher;
+
+#define SERPENT_PARALLEL_BLOCKS 8
+
+asmlinkage void serpent_ecb_enc_8way_avx(const void *ctx, u8 *dst,
+ const u8 *src);
+asmlinkage void serpent_ecb_dec_8way_avx(const void *ctx, u8 *dst,
+ const u8 *src);
+
+asmlinkage void serpent_cbc_dec_8way_avx(const void *ctx, u8 *dst,
+ const u8 *src);
+
+#endif
diff --git a/arch/x86/crypto/serpent-avx2-asm_64.S b/arch/x86/crypto/serpent-avx2-asm_64.S
index c9648aeae705..9161b6e441f3 100644
--- a/arch/x86/crypto/serpent-avx2-asm_64.S
+++ b/arch/x86/crypto/serpent-avx2-asm_64.S
@@ -20,16 +20,6 @@
.Lbswap128_mask:
.byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
-.section .rodata.cst16.xts_gf128mul_and_shl1_mask_0, "aM", @progbits, 16
-.align 16
-.Lxts_gf128mul_and_shl1_mask_0:
- .byte 0x87, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0
-
-.section .rodata.cst16.xts_gf128mul_and_shl1_mask_1, "aM", @progbits, 16
-.align 16
-.Lxts_gf128mul_and_shl1_mask_1:
- .byte 0x0e, 1, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0
-
.text
#define CTX %rdi
@@ -734,80 +724,3 @@ SYM_FUNC_START(serpent_cbc_dec_16way)
FRAME_END
ret;
SYM_FUNC_END(serpent_cbc_dec_16way)
-
-SYM_FUNC_START(serpent_ctr_16way)
- /* input:
- * %rdi: ctx, CTX
- * %rsi: dst (16 blocks)
- * %rdx: src (16 blocks)
- * %rcx: iv (little endian, 128bit)
- */
- FRAME_BEGIN
-
- vzeroupper;
-
- load_ctr_16way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
- RD2, RK0, RK0x, RK1, RK1x, RK2, RK2x, RK3, RK3x, RNOT,
- tp);
-
- call __serpent_enc_blk16;
-
- store_ctr_16way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
-
- vzeroupper;
-
- FRAME_END
- ret;
-SYM_FUNC_END(serpent_ctr_16way)
-
-SYM_FUNC_START(serpent_xts_enc_16way)
- /* input:
- * %rdi: ctx, CTX
- * %rsi: dst (16 blocks)
- * %rdx: src (16 blocks)
- * %rcx: iv (t ⊕ α⿠∈ GF(2¹²â¸))
- */
- FRAME_BEGIN
-
- vzeroupper;
-
- load_xts_16way(%rcx, %rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
- RD2, RK0, RK0x, RK1, RK1x, RK2, RK2x, RK3, RK3x, RNOT,
- .Lxts_gf128mul_and_shl1_mask_0,
- .Lxts_gf128mul_and_shl1_mask_1);
-
- call __serpent_enc_blk16;
-
- store_xts_16way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
-
- vzeroupper;
-
- FRAME_END
- ret;
-SYM_FUNC_END(serpent_xts_enc_16way)
-
-SYM_FUNC_START(serpent_xts_dec_16way)
- /* input:
- * %rdi: ctx, CTX
- * %rsi: dst (16 blocks)
- * %rdx: src (16 blocks)
- * %rcx: iv (t ⊕ α⿠∈ GF(2¹²â¸))
- */
- FRAME_BEGIN
-
- vzeroupper;
-
- load_xts_16way(%rcx, %rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
- RD2, RK0, RK0x, RK1, RK1x, RK2, RK2x, RK3, RK3x, RNOT,
- .Lxts_gf128mul_and_shl1_mask_0,
- .Lxts_gf128mul_and_shl1_mask_1);
-
- call __serpent_dec_blk16;
-
- store_xts_16way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
-
- vzeroupper;
-
- FRAME_END
- ret;
-SYM_FUNC_END(serpent_xts_dec_16way)
diff --git a/arch/x86/include/asm/crypto/serpent-sse2.h b/arch/x86/crypto/serpent-sse2.h
index 860ca248914b..860ca248914b 100644
--- a/arch/x86/include/asm/crypto/serpent-sse2.h
+++ b/arch/x86/crypto/serpent-sse2.h
diff --git a/arch/x86/crypto/serpent_avx2_glue.c b/arch/x86/crypto/serpent_avx2_glue.c
index f973ace44ad3..ccf0b5fa4933 100644
--- a/arch/x86/crypto/serpent_avx2_glue.c
+++ b/arch/x86/crypto/serpent_avx2_glue.c
@@ -12,9 +12,9 @@
#include <crypto/algapi.h>
#include <crypto/internal/simd.h>
#include <crypto/serpent.h>
-#include <crypto/xts.h>
-#include <asm/crypto/glue_helper.h>
-#include <asm/crypto/serpent-avx.h>
+
+#include "serpent-avx.h"
+#include "ecb_cbc_helpers.h"
#define SERPENT_AVX2_PARALLEL_BLOCKS 16
@@ -23,158 +23,44 @@ asmlinkage void serpent_ecb_enc_16way(const void *ctx, u8 *dst, const u8 *src);
asmlinkage void serpent_ecb_dec_16way(const void *ctx, u8 *dst, const u8 *src);
asmlinkage void serpent_cbc_dec_16way(const void *ctx, u8 *dst, const u8 *src);
-asmlinkage void serpent_ctr_16way(const void *ctx, u8 *dst, const u8 *src,
- le128 *iv);
-asmlinkage void serpent_xts_enc_16way(const void *ctx, u8 *dst, const u8 *src,
- le128 *iv);
-asmlinkage void serpent_xts_dec_16way(const void *ctx, u8 *dst, const u8 *src,
- le128 *iv);
-
static int serpent_setkey_skcipher(struct crypto_skcipher *tfm,
const u8 *key, unsigned int keylen)
{
return __serpent_setkey(crypto_skcipher_ctx(tfm), key, keylen);
}
-static const struct common_glue_ctx serpent_enc = {
- .num_funcs = 3,
- .fpu_blocks_limit = 8,
-
- .funcs = { {
- .num_blocks = 16,
- .fn_u = { .ecb = serpent_ecb_enc_16way }
- }, {
- .num_blocks = 8,
- .fn_u = { .ecb = serpent_ecb_enc_8way_avx }
- }, {
- .num_blocks = 1,
- .fn_u = { .ecb = __serpent_encrypt }
- } }
-};
-
-static const struct common_glue_ctx serpent_ctr = {
- .num_funcs = 3,
- .fpu_blocks_limit = 8,
-
- .funcs = { {
- .num_blocks = 16,
- .fn_u = { .ctr = serpent_ctr_16way }
- }, {
- .num_blocks = 8,
- .fn_u = { .ctr = serpent_ctr_8way_avx }
- }, {
- .num_blocks = 1,
- .fn_u = { .ctr = __serpent_crypt_ctr }
- } }
-};
-
-static const struct common_glue_ctx serpent_enc_xts = {
- .num_funcs = 3,
- .fpu_blocks_limit = 8,
-
- .funcs = { {
- .num_blocks = 16,
- .fn_u = { .xts = serpent_xts_enc_16way }
- }, {
- .num_blocks = 8,
- .fn_u = { .xts = serpent_xts_enc_8way_avx }
- }, {
- .num_blocks = 1,
- .fn_u = { .xts = serpent_xts_enc }
- } }
-};
-
-static const struct common_glue_ctx serpent_dec = {
- .num_funcs = 3,
- .fpu_blocks_limit = 8,
-
- .funcs = { {
- .num_blocks = 16,
- .fn_u = { .ecb = serpent_ecb_dec_16way }
- }, {
- .num_blocks = 8,
- .fn_u = { .ecb = serpent_ecb_dec_8way_avx }
- }, {
- .num_blocks = 1,
- .fn_u = { .ecb = __serpent_decrypt }
- } }
-};
-
-static const struct common_glue_ctx serpent_dec_cbc = {
- .num_funcs = 3,
- .fpu_blocks_limit = 8,
-
- .funcs = { {
- .num_blocks = 16,
- .fn_u = { .cbc = serpent_cbc_dec_16way }
- }, {
- .num_blocks = 8,
- .fn_u = { .cbc = serpent_cbc_dec_8way_avx }
- }, {
- .num_blocks = 1,
- .fn_u = { .cbc = __serpent_decrypt }
- } }
-};
-
-static const struct common_glue_ctx serpent_dec_xts = {
- .num_funcs = 3,
- .fpu_blocks_limit = 8,
-
- .funcs = { {
- .num_blocks = 16,
- .fn_u = { .xts = serpent_xts_dec_16way }
- }, {
- .num_blocks = 8,
- .fn_u = { .xts = serpent_xts_dec_8way_avx }
- }, {
- .num_blocks = 1,
- .fn_u = { .xts = serpent_xts_dec }
- } }
-};
-
static int ecb_encrypt(struct skcipher_request *req)
{
- return glue_ecb_req_128bit(&serpent_enc, req);
+ ECB_WALK_START(req, SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS);
+ ECB_BLOCK(SERPENT_AVX2_PARALLEL_BLOCKS, serpent_ecb_enc_16way);
+ ECB_BLOCK(SERPENT_PARALLEL_BLOCKS, serpent_ecb_enc_8way_avx);
+ ECB_BLOCK(1, __serpent_encrypt);
+ ECB_WALK_END();
}
static int ecb_decrypt(struct skcipher_request *req)
{
- return glue_ecb_req_128bit(&serpent_dec, req);
+ ECB_WALK_START(req, SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS);
+ ECB_BLOCK(SERPENT_AVX2_PARALLEL_BLOCKS, serpent_ecb_dec_16way);
+ ECB_BLOCK(SERPENT_PARALLEL_BLOCKS, serpent_ecb_dec_8way_avx);
+ ECB_BLOCK(1, __serpent_decrypt);
+ ECB_WALK_END();
}
static int cbc_encrypt(struct skcipher_request *req)
{
- return glue_cbc_encrypt_req_128bit(__serpent_encrypt, req);
+ CBC_WALK_START(req, SERPENT_BLOCK_SIZE, -1);
+ CBC_ENC_BLOCK(__serpent_encrypt);
+ CBC_WALK_END();
}
static int cbc_decrypt(struct skcipher_request *req)
{
- return glue_cbc_decrypt_req_128bit(&serpent_dec_cbc, req);
-}
-
-static int ctr_crypt(struct skcipher_request *req)
-{
- return glue_ctr_req_128bit(&serpent_ctr, req);
-}
-
-static int xts_encrypt(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- return glue_xts_req_128bit(&serpent_enc_xts, req,
- __serpent_encrypt, &ctx->tweak_ctx,
- &ctx->crypt_ctx, false);
-}
-
-static int xts_decrypt(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- return glue_xts_req_128bit(&serpent_dec_xts, req,
- __serpent_encrypt, &ctx->tweak_ctx,
- &ctx->crypt_ctx, true);
+ CBC_WALK_START(req, SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS);
+ CBC_DEC_BLOCK(SERPENT_AVX2_PARALLEL_BLOCKS, serpent_cbc_dec_16way);
+ CBC_DEC_BLOCK(SERPENT_PARALLEL_BLOCKS, serpent_cbc_dec_8way_avx);
+ CBC_DEC_BLOCK(1, __serpent_decrypt);
+ CBC_WALK_END();
}
static struct skcipher_alg serpent_algs[] = {
@@ -205,35 +91,6 @@ static struct skcipher_alg serpent_algs[] = {
.setkey = serpent_setkey_skcipher,
.encrypt = cbc_encrypt,
.decrypt = cbc_decrypt,
- }, {
- .base.cra_name = "__ctr(serpent)",
- .base.cra_driver_name = "__ctr-serpent-avx2",
- .base.cra_priority = 600,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct serpent_ctx),
- .base.cra_module = THIS_MODULE,
- .min_keysize = SERPENT_MIN_KEY_SIZE,
- .max_keysize = SERPENT_MAX_KEY_SIZE,
- .ivsize = SERPENT_BLOCK_SIZE,
- .chunksize = SERPENT_BLOCK_SIZE,
- .setkey = serpent_setkey_skcipher,
- .encrypt = ctr_crypt,
- .decrypt = ctr_crypt,
- }, {
- .base.cra_name = "__xts(serpent)",
- .base.cra_driver_name = "__xts-serpent-avx2",
- .base.cra_priority = 600,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
- .base.cra_blocksize = SERPENT_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct serpent_xts_ctx),
- .base.cra_module = THIS_MODULE,
- .min_keysize = 2 * SERPENT_MIN_KEY_SIZE,
- .max_keysize = 2 * SERPENT_MAX_KEY_SIZE,
- .ivsize = SERPENT_BLOCK_SIZE,
- .setkey = xts_serpent_setkey,
- .encrypt = xts_encrypt,
- .decrypt = xts_decrypt,
},
};
diff --git a/arch/x86/crypto/serpent_avx_glue.c b/arch/x86/crypto/serpent_avx_glue.c
index 7806d1cbe854..6c248e1ea4ef 100644
--- a/arch/x86/crypto/serpent_avx_glue.c
+++ b/arch/x86/crypto/serpent_avx_glue.c
@@ -15,9 +15,9 @@
#include <crypto/algapi.h>
#include <crypto/internal/simd.h>
#include <crypto/serpent.h>
-#include <crypto/xts.h>
-#include <asm/crypto/glue_helper.h>
-#include <asm/crypto/serpent-avx.h>
+
+#include "serpent-avx.h"
+#include "ecb_cbc_helpers.h"
/* 8-way parallel cipher functions */
asmlinkage void serpent_ecb_enc_8way_avx(const void *ctx, u8 *dst,
@@ -32,191 +32,41 @@ asmlinkage void serpent_cbc_dec_8way_avx(const void *ctx, u8 *dst,
const u8 *src);
EXPORT_SYMBOL_GPL(serpent_cbc_dec_8way_avx);
-asmlinkage void serpent_ctr_8way_avx(const void *ctx, u8 *dst, const u8 *src,
- le128 *iv);
-EXPORT_SYMBOL_GPL(serpent_ctr_8way_avx);
-
-asmlinkage void serpent_xts_enc_8way_avx(const void *ctx, u8 *dst,
- const u8 *src, le128 *iv);
-EXPORT_SYMBOL_GPL(serpent_xts_enc_8way_avx);
-
-asmlinkage void serpent_xts_dec_8way_avx(const void *ctx, u8 *dst,
- const u8 *src, le128 *iv);
-EXPORT_SYMBOL_GPL(serpent_xts_dec_8way_avx);
-
-void __serpent_crypt_ctr(const void *ctx, u8 *d, const u8 *s, le128 *iv)
-{
- be128 ctrblk;
- u128 *dst = (u128 *)d;
- const u128 *src = (const u128 *)s;
-
- le128_to_be128(&ctrblk, iv);
- le128_inc(iv);
-
- __serpent_encrypt(ctx, (u8 *)&ctrblk, (u8 *)&ctrblk);
- u128_xor(dst, src, (u128 *)&ctrblk);
-}
-EXPORT_SYMBOL_GPL(__serpent_crypt_ctr);
-
-void serpent_xts_enc(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
-{
- glue_xts_crypt_128bit_one(ctx, dst, src, iv, __serpent_encrypt);
-}
-EXPORT_SYMBOL_GPL(serpent_xts_enc);
-
-void serpent_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
-{
- glue_xts_crypt_128bit_one(ctx, dst, src, iv, __serpent_decrypt);
-}
-EXPORT_SYMBOL_GPL(serpent_xts_dec);
-
static int serpent_setkey_skcipher(struct crypto_skcipher *tfm,
const u8 *key, unsigned int keylen)
{
return __serpent_setkey(crypto_skcipher_ctx(tfm), key, keylen);
}
-int xts_serpent_setkey(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
- int err;
-
- err = xts_verify_key(tfm, key, keylen);
- if (err)
- return err;
-
- /* first half of xts-key is for crypt */
- err = __serpent_setkey(&ctx->crypt_ctx, key, keylen / 2);
- if (err)
- return err;
-
- /* second half of xts-key is for tweak */
- return __serpent_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2);
-}
-EXPORT_SYMBOL_GPL(xts_serpent_setkey);
-
-static const struct common_glue_ctx serpent_enc = {
- .num_funcs = 2,
- .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS,
-
- .funcs = { {
- .num_blocks = SERPENT_PARALLEL_BLOCKS,
- .fn_u = { .ecb = serpent_ecb_enc_8way_avx }
- }, {
- .num_blocks = 1,
- .fn_u = { .ecb = __serpent_encrypt }
- } }
-};
-
-static const struct common_glue_ctx serpent_ctr = {
- .num_funcs = 2,
- .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS,
-
- .funcs = { {
- .num_blocks = SERPENT_PARALLEL_BLOCKS,
- .fn_u = { .ctr = serpent_ctr_8way_avx }
- }, {
- .num_blocks = 1,
- .fn_u = { .ctr = __serpent_crypt_ctr }
- } }
-};
-
-static const struct common_glue_ctx serpent_enc_xts = {
- .num_funcs = 2,
- .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS,
-
- .funcs = { {
- .num_blocks = SERPENT_PARALLEL_BLOCKS,
- .fn_u = { .xts = serpent_xts_enc_8way_avx }
- }, {
- .num_blocks = 1,
- .fn_u = { .xts = serpent_xts_enc }
- } }
-};
-
-static const struct common_glue_ctx serpent_dec = {
- .num_funcs = 2,
- .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS,
-
- .funcs = { {
- .num_blocks = SERPENT_PARALLEL_BLOCKS,
- .fn_u = { .ecb = serpent_ecb_dec_8way_avx }
- }, {
- .num_blocks = 1,
- .fn_u = { .ecb = __serpent_decrypt }
- } }
-};
-
-static const struct common_glue_ctx serpent_dec_cbc = {
- .num_funcs = 2,
- .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS,
-
- .funcs = { {
- .num_blocks = SERPENT_PARALLEL_BLOCKS,
- .fn_u = { .cbc = serpent_cbc_dec_8way_avx }
- }, {
- .num_blocks = 1,
- .fn_u = { .cbc = __serpent_decrypt }
- } }
-};
-
-static const struct common_glue_ctx serpent_dec_xts = {
- .num_funcs = 2,
- .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS,
-
- .funcs = { {
- .num_blocks = SERPENT_PARALLEL_BLOCKS,
- .fn_u = { .xts = serpent_xts_dec_8way_avx }
- }, {
- .num_blocks = 1,
- .fn_u = { .xts = serpent_xts_dec }
- } }
-};
-
static int ecb_encrypt(struct skcipher_request *req)
{
- return glue_ecb_req_128bit(&serpent_enc, req);
+ ECB_WALK_START(req, SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS);
+ ECB_BLOCK(SERPENT_PARALLEL_BLOCKS, serpent_ecb_enc_8way_avx);
+ ECB_BLOCK(1, __serpent_encrypt);
+ ECB_WALK_END();
}
static int ecb_decrypt(struct skcipher_request *req)
{
- return glue_ecb_req_128bit(&serpent_dec, req);
+ ECB_WALK_START(req, SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS);
+ ECB_BLOCK(SERPENT_PARALLEL_BLOCKS, serpent_ecb_dec_8way_avx);
+ ECB_BLOCK(1, __serpent_decrypt);
+ ECB_WALK_END();
}
static int cbc_encrypt(struct skcipher_request *req)
{
- return glue_cbc_encrypt_req_128bit(__serpent_encrypt, req);
+ CBC_WALK_START(req, SERPENT_BLOCK_SIZE, -1);
+ CBC_ENC_BLOCK(__serpent_encrypt);
+ CBC_WALK_END();
}
static int cbc_decrypt(struct skcipher_request *req)
{
- return glue_cbc_decrypt_req_128bit(&serpent_dec_cbc, req);
-}
-
-static int ctr_crypt(struct skcipher_request *req)
-{
- return glue_ctr_req_128bit(&serpent_ctr, req);
-}
-
-static int xts_encrypt(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- return glue_xts_req_128bit(&serpent_enc_xts, req,
- __serpent_encrypt, &ctx->tweak_ctx,
- &ctx->crypt_ctx, false);
-}
-
-static int xts_decrypt(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- return glue_xts_req_128bit(&serpent_dec_xts, req,
- __serpent_encrypt, &ctx->tweak_ctx,
- &ctx->crypt_ctx, true);
+ CBC_WALK_START(req, SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS);
+ CBC_DEC_BLOCK(SERPENT_PARALLEL_BLOCKS, serpent_cbc_dec_8way_avx);
+ CBC_DEC_BLOCK(1, __serpent_decrypt);
+ CBC_WALK_END();
}
static struct skcipher_alg serpent_algs[] = {
@@ -247,35 +97,6 @@ static struct skcipher_alg serpent_algs[] = {
.setkey = serpent_setkey_skcipher,
.encrypt = cbc_encrypt,
.decrypt = cbc_decrypt,
- }, {
- .base.cra_name = "__ctr(serpent)",
- .base.cra_driver_name = "__ctr-serpent-avx",
- .base.cra_priority = 500,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct serpent_ctx),
- .base.cra_module = THIS_MODULE,
- .min_keysize = SERPENT_MIN_KEY_SIZE,
- .max_keysize = SERPENT_MAX_KEY_SIZE,
- .ivsize = SERPENT_BLOCK_SIZE,
- .chunksize = SERPENT_BLOCK_SIZE,
- .setkey = serpent_setkey_skcipher,
- .encrypt = ctr_crypt,
- .decrypt = ctr_crypt,
- }, {
- .base.cra_name = "__xts(serpent)",
- .base.cra_driver_name = "__xts-serpent-avx",
- .base.cra_priority = 500,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
- .base.cra_blocksize = SERPENT_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct serpent_xts_ctx),
- .base.cra_module = THIS_MODULE,
- .min_keysize = 2 * SERPENT_MIN_KEY_SIZE,
- .max_keysize = 2 * SERPENT_MAX_KEY_SIZE,
- .ivsize = SERPENT_BLOCK_SIZE,
- .setkey = xts_serpent_setkey,
- .encrypt = xts_encrypt,
- .decrypt = xts_decrypt,
},
};
diff --git a/arch/x86/crypto/serpent_sse2_glue.c b/arch/x86/crypto/serpent_sse2_glue.c
index 4fed8d26b91a..d78f37e9b2cf 100644
--- a/arch/x86/crypto/serpent_sse2_glue.c
+++ b/arch/x86/crypto/serpent_sse2_glue.c
@@ -10,8 +10,6 @@
*
* CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by:
* Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
- * CTR part based on code (crypto/ctr.c) by:
- * (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com>
*/
#include <linux/module.h>
@@ -22,8 +20,9 @@
#include <crypto/b128ops.h>
#include <crypto/internal/simd.h>
#include <crypto/serpent.h>
-#include <asm/crypto/serpent-sse2.h>
-#include <asm/crypto/glue_helper.h>
+
+#include "serpent-sse2.h"
+#include "ecb_cbc_helpers.h"
static int serpent_setkey_skcipher(struct crypto_skcipher *tfm,
const u8 *key, unsigned int keylen)
@@ -31,130 +30,46 @@ static int serpent_setkey_skcipher(struct crypto_skcipher *tfm,
return __serpent_setkey(crypto_skcipher_ctx(tfm), key, keylen);
}
-static void serpent_decrypt_cbc_xway(const void *ctx, u8 *d, const u8 *s)
-{
- u128 ivs[SERPENT_PARALLEL_BLOCKS - 1];
- u128 *dst = (u128 *)d;
- const u128 *src = (const u128 *)s;
- unsigned int j;
-
- for (j = 0; j < SERPENT_PARALLEL_BLOCKS - 1; j++)
- ivs[j] = src[j];
-
- serpent_dec_blk_xway(ctx, (u8 *)dst, (u8 *)src);
-
- for (j = 0; j < SERPENT_PARALLEL_BLOCKS - 1; j++)
- u128_xor(dst + (j + 1), dst + (j + 1), ivs + j);
-}
-
-static void serpent_crypt_ctr(const void *ctx, u8 *d, const u8 *s, le128 *iv)
-{
- be128 ctrblk;
- u128 *dst = (u128 *)d;
- const u128 *src = (const u128 *)s;
-
- le128_to_be128(&ctrblk, iv);
- le128_inc(iv);
-
- __serpent_encrypt(ctx, (u8 *)&ctrblk, (u8 *)&ctrblk);
- u128_xor(dst, src, (u128 *)&ctrblk);
-}
-
-static void serpent_crypt_ctr_xway(const void *ctx, u8 *d, const u8 *s,
- le128 *iv)
+static void serpent_decrypt_cbc_xway(const void *ctx, u8 *dst, const u8 *src)
{
- be128 ctrblks[SERPENT_PARALLEL_BLOCKS];
- u128 *dst = (u128 *)d;
- const u128 *src = (const u128 *)s;
- unsigned int i;
-
- for (i = 0; i < SERPENT_PARALLEL_BLOCKS; i++) {
- if (dst != src)
- dst[i] = src[i];
-
- le128_to_be128(&ctrblks[i], iv);
- le128_inc(iv);
- }
+ u8 buf[SERPENT_PARALLEL_BLOCKS - 1][SERPENT_BLOCK_SIZE];
+ const u8 *s = src;
- serpent_enc_blk_xway_xor(ctx, (u8 *)dst, (u8 *)ctrblks);
+ if (dst == src)
+ s = memcpy(buf, src, sizeof(buf));
+ serpent_dec_blk_xway(ctx, dst, src);
+ crypto_xor(dst + SERPENT_BLOCK_SIZE, s, sizeof(buf));
}
-static const struct common_glue_ctx serpent_enc = {
- .num_funcs = 2,
- .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS,
-
- .funcs = { {
- .num_blocks = SERPENT_PARALLEL_BLOCKS,
- .fn_u = { .ecb = serpent_enc_blk_xway }
- }, {
- .num_blocks = 1,
- .fn_u = { .ecb = __serpent_encrypt }
- } }
-};
-
-static const struct common_glue_ctx serpent_ctr = {
- .num_funcs = 2,
- .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS,
-
- .funcs = { {
- .num_blocks = SERPENT_PARALLEL_BLOCKS,
- .fn_u = { .ctr = serpent_crypt_ctr_xway }
- }, {
- .num_blocks = 1,
- .fn_u = { .ctr = serpent_crypt_ctr }
- } }
-};
-
-static const struct common_glue_ctx serpent_dec = {
- .num_funcs = 2,
- .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS,
-
- .funcs = { {
- .num_blocks = SERPENT_PARALLEL_BLOCKS,
- .fn_u = { .ecb = serpent_dec_blk_xway }
- }, {
- .num_blocks = 1,
- .fn_u = { .ecb = __serpent_decrypt }
- } }
-};
-
-static const struct common_glue_ctx serpent_dec_cbc = {
- .num_funcs = 2,
- .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS,
-
- .funcs = { {
- .num_blocks = SERPENT_PARALLEL_BLOCKS,
- .fn_u = { .cbc = serpent_decrypt_cbc_xway }
- }, {
- .num_blocks = 1,
- .fn_u = { .cbc = __serpent_decrypt }
- } }
-};
-
static int ecb_encrypt(struct skcipher_request *req)
{
- return glue_ecb_req_128bit(&serpent_enc, req);
+ ECB_WALK_START(req, SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS);
+ ECB_BLOCK(SERPENT_PARALLEL_BLOCKS, serpent_enc_blk_xway);
+ ECB_BLOCK(1, __serpent_encrypt);
+ ECB_WALK_END();
}
static int ecb_decrypt(struct skcipher_request *req)
{
- return glue_ecb_req_128bit(&serpent_dec, req);
+ ECB_WALK_START(req, SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS);
+ ECB_BLOCK(SERPENT_PARALLEL_BLOCKS, serpent_dec_blk_xway);
+ ECB_BLOCK(1, __serpent_decrypt);
+ ECB_WALK_END();
}
static int cbc_encrypt(struct skcipher_request *req)
{
- return glue_cbc_encrypt_req_128bit(__serpent_encrypt,
- req);
+ CBC_WALK_START(req, SERPENT_BLOCK_SIZE, -1);
+ CBC_ENC_BLOCK(__serpent_encrypt);
+ CBC_WALK_END();
}
static int cbc_decrypt(struct skcipher_request *req)
{
- return glue_cbc_decrypt_req_128bit(&serpent_dec_cbc, req);
-}
-
-static int ctr_crypt(struct skcipher_request *req)
-{
- return glue_ctr_req_128bit(&serpent_ctr, req);
+ CBC_WALK_START(req, SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS);
+ CBC_DEC_BLOCK(SERPENT_PARALLEL_BLOCKS, serpent_decrypt_cbc_xway);
+ CBC_DEC_BLOCK(1, __serpent_decrypt);
+ CBC_WALK_END();
}
static struct skcipher_alg serpent_algs[] = {
@@ -185,21 +100,6 @@ static struct skcipher_alg serpent_algs[] = {
.setkey = serpent_setkey_skcipher,
.encrypt = cbc_encrypt,
.decrypt = cbc_decrypt,
- }, {
- .base.cra_name = "__ctr(serpent)",
- .base.cra_driver_name = "__ctr-serpent-sse2",
- .base.cra_priority = 400,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct serpent_ctx),
- .base.cra_module = THIS_MODULE,
- .min_keysize = SERPENT_MIN_KEY_SIZE,
- .max_keysize = SERPENT_MAX_KEY_SIZE,
- .ivsize = SERPENT_BLOCK_SIZE,
- .chunksize = SERPENT_BLOCK_SIZE,
- .setkey = serpent_setkey_skcipher,
- .encrypt = ctr_crypt,
- .decrypt = ctr_crypt,
},
};
diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
index a5151393bb2f..37e63b3c664e 100644
--- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
+++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
@@ -19,11 +19,6 @@
.Lbswap128_mask:
.byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
-.section .rodata.cst16.xts_gf128mul_and_shl1_mask, "aM", @progbits, 16
-.align 16
-.Lxts_gf128mul_and_shl1_mask:
- .byte 0x87, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0
-
.text
/* structure of crypto context */
@@ -379,78 +374,3 @@ SYM_FUNC_START(twofish_cbc_dec_8way)
FRAME_END
ret;
SYM_FUNC_END(twofish_cbc_dec_8way)
-
-SYM_FUNC_START(twofish_ctr_8way)
- /* input:
- * %rdi: ctx, CTX
- * %rsi: dst
- * %rdx: src
- * %rcx: iv (little endian, 128bit)
- */
- FRAME_BEGIN
-
- pushq %r12;
-
- movq %rsi, %r11;
- movq %rdx, %r12;
-
- load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
- RD2, RX0, RX1, RY0);
-
- call __twofish_enc_blk8;
-
- store_ctr_8way(%r12, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
-
- popq %r12;
-
- FRAME_END
- ret;
-SYM_FUNC_END(twofish_ctr_8way)
-
-SYM_FUNC_START(twofish_xts_enc_8way)
- /* input:
- * %rdi: ctx, CTX
- * %rsi: dst
- * %rdx: src
- * %rcx: iv (t ⊕ α⿠∈ GF(2¹²â¸))
- */
- FRAME_BEGIN
-
- movq %rsi, %r11;
-
- /* regs <= src, dst <= IVs, regs <= regs xor IVs */
- load_xts_8way(%rcx, %rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2,
- RX0, RX1, RY0, .Lxts_gf128mul_and_shl1_mask);
-
- call __twofish_enc_blk8;
-
- /* dst <= regs xor IVs(in dst) */
- store_xts_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
-
- FRAME_END
- ret;
-SYM_FUNC_END(twofish_xts_enc_8way)
-
-SYM_FUNC_START(twofish_xts_dec_8way)
- /* input:
- * %rdi: ctx, CTX
- * %rsi: dst
- * %rdx: src
- * %rcx: iv (t ⊕ α⿠∈ GF(2¹²â¸))
- */
- FRAME_BEGIN
-
- movq %rsi, %r11;
-
- /* regs <= src, dst <= IVs, regs <= regs xor IVs */
- load_xts_8way(%rcx, %rdx, %rsi, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2,
- RX0, RX1, RY0, .Lxts_gf128mul_and_shl1_mask);
-
- call __twofish_dec_blk8;
-
- /* dst <= regs xor IVs(in dst) */
- store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
-
- FRAME_END
- ret;
-SYM_FUNC_END(twofish_xts_dec_8way)
diff --git a/arch/x86/include/asm/crypto/twofish.h b/arch/x86/crypto/twofish.h
index 2c377a8042e1..12df400e6d53 100644
--- a/arch/x86/include/asm/crypto/twofish.h
+++ b/arch/x86/crypto/twofish.h
@@ -17,9 +17,5 @@ asmlinkage void twofish_dec_blk_3way(const void *ctx, u8 *dst, const u8 *src);
/* helpers from twofish_x86_64-3way module */
extern void twofish_dec_blk_cbc_3way(const void *ctx, u8 *dst, const u8 *src);
-extern void twofish_enc_blk_ctr(const void *ctx, u8 *dst, const u8 *src,
- le128 *iv);
-extern void twofish_enc_blk_ctr_3way(const void *ctx, u8 *dst, const u8 *src,
- le128 *iv);
#endif /* ASM_X86_TWOFISH_H */
diff --git a/arch/x86/crypto/twofish_avx_glue.c b/arch/x86/crypto/twofish_avx_glue.c
index 2dbc8ce3730e..3eb3440b477a 100644
--- a/arch/x86/crypto/twofish_avx_glue.c
+++ b/arch/x86/crypto/twofish_avx_glue.c
@@ -15,9 +15,9 @@
#include <crypto/algapi.h>
#include <crypto/internal/simd.h>
#include <crypto/twofish.h>
-#include <crypto/xts.h>
-#include <asm/crypto/glue_helper.h>
-#include <asm/crypto/twofish.h>
+
+#include "twofish.h"
+#include "ecb_cbc_helpers.h"
#define TWOFISH_PARALLEL_BLOCKS 8
@@ -26,13 +26,6 @@ asmlinkage void twofish_ecb_enc_8way(const void *ctx, u8 *dst, const u8 *src);
asmlinkage void twofish_ecb_dec_8way(const void *ctx, u8 *dst, const u8 *src);
asmlinkage void twofish_cbc_dec_8way(const void *ctx, u8 *dst, const u8 *src);
-asmlinkage void twofish_ctr_8way(const void *ctx, u8 *dst, const u8 *src,
- le128 *iv);
-
-asmlinkage void twofish_xts_enc_8way(const void *ctx, u8 *dst, const u8 *src,
- le128 *iv);
-asmlinkage void twofish_xts_dec_8way(const void *ctx, u8 *dst, const u8 *src,
- le128 *iv);
static int twofish_setkey_skcipher(struct crypto_skcipher *tfm,
const u8 *key, unsigned int keylen)
@@ -45,171 +38,38 @@ static inline void twofish_enc_blk_3way(const void *ctx, u8 *dst, const u8 *src)
__twofish_enc_blk_3way(ctx, dst, src, false);
}
-static void twofish_xts_enc(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
-{
- glue_xts_crypt_128bit_one(ctx, dst, src, iv, twofish_enc_blk);
-}
-
-static void twofish_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
-{
- glue_xts_crypt_128bit_one(ctx, dst, src, iv, twofish_dec_blk);
-}
-
-struct twofish_xts_ctx {
- struct twofish_ctx tweak_ctx;
- struct twofish_ctx crypt_ctx;
-};
-
-static int xts_twofish_setkey(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct twofish_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
- int err;
-
- err = xts_verify_key(tfm, key, keylen);
- if (err)
- return err;
-
- /* first half of xts-key is for crypt */
- err = __twofish_setkey(&ctx->crypt_ctx, key, keylen / 2);
- if (err)
- return err;
-
- /* second half of xts-key is for tweak */
- return __twofish_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2);
-}
-
-static const struct common_glue_ctx twofish_enc = {
- .num_funcs = 3,
- .fpu_blocks_limit = TWOFISH_PARALLEL_BLOCKS,
-
- .funcs = { {
- .num_blocks = TWOFISH_PARALLEL_BLOCKS,
- .fn_u = { .ecb = twofish_ecb_enc_8way }
- }, {
- .num_blocks = 3,
- .fn_u = { .ecb = twofish_enc_blk_3way }
- }, {
- .num_blocks = 1,
- .fn_u = { .ecb = twofish_enc_blk }
- } }
-};
-
-static const struct common_glue_ctx twofish_ctr = {
- .num_funcs = 3,
- .fpu_blocks_limit = TWOFISH_PARALLEL_BLOCKS,
-
- .funcs = { {
- .num_blocks = TWOFISH_PARALLEL_BLOCKS,
- .fn_u = { .ctr = twofish_ctr_8way }
- }, {
- .num_blocks = 3,
- .fn_u = { .ctr = twofish_enc_blk_ctr_3way }
- }, {
- .num_blocks = 1,
- .fn_u = { .ctr = twofish_enc_blk_ctr }
- } }
-};
-
-static const struct common_glue_ctx twofish_enc_xts = {
- .num_funcs = 2,
- .fpu_blocks_limit = TWOFISH_PARALLEL_BLOCKS,
-
- .funcs = { {
- .num_blocks = TWOFISH_PARALLEL_BLOCKS,
- .fn_u = { .xts = twofish_xts_enc_8way }
- }, {
- .num_blocks = 1,
- .fn_u = { .xts = twofish_xts_enc }
- } }
-};
-
-static const struct common_glue_ctx twofish_dec = {
- .num_funcs = 3,
- .fpu_blocks_limit = TWOFISH_PARALLEL_BLOCKS,
-
- .funcs = { {
- .num_blocks = TWOFISH_PARALLEL_BLOCKS,
- .fn_u = { .ecb = twofish_ecb_dec_8way }
- }, {
- .num_blocks = 3,
- .fn_u = { .ecb = twofish_dec_blk_3way }
- }, {
- .num_blocks = 1,
- .fn_u = { .ecb = twofish_dec_blk }
- } }
-};
-
-static const struct common_glue_ctx twofish_dec_cbc = {
- .num_funcs = 3,
- .fpu_blocks_limit = TWOFISH_PARALLEL_BLOCKS,
-
- .funcs = { {
- .num_blocks = TWOFISH_PARALLEL_BLOCKS,
- .fn_u = { .cbc = twofish_cbc_dec_8way }
- }, {
- .num_blocks = 3,
- .fn_u = { .cbc = twofish_dec_blk_cbc_3way }
- }, {
- .num_blocks = 1,
- .fn_u = { .cbc = twofish_dec_blk }
- } }
-};
-
-static const struct common_glue_ctx twofish_dec_xts = {
- .num_funcs = 2,
- .fpu_blocks_limit = TWOFISH_PARALLEL_BLOCKS,
-
- .funcs = { {
- .num_blocks = TWOFISH_PARALLEL_BLOCKS,
- .fn_u = { .xts = twofish_xts_dec_8way }
- }, {
- .num_blocks = 1,
- .fn_u = { .xts = twofish_xts_dec }
- } }
-};
-
static int ecb_encrypt(struct skcipher_request *req)
{
- return glue_ecb_req_128bit(&twofish_enc, req);
+ ECB_WALK_START(req, TF_BLOCK_SIZE, TWOFISH_PARALLEL_BLOCKS);
+ ECB_BLOCK(TWOFISH_PARALLEL_BLOCKS, twofish_ecb_enc_8way);
+ ECB_BLOCK(3, twofish_enc_blk_3way);
+ ECB_BLOCK(1, twofish_enc_blk);
+ ECB_WALK_END();
}
static int ecb_decrypt(struct skcipher_request *req)
{
- return glue_ecb_req_128bit(&twofish_dec, req);
+ ECB_WALK_START(req, TF_BLOCK_SIZE, TWOFISH_PARALLEL_BLOCKS);
+ ECB_BLOCK(TWOFISH_PARALLEL_BLOCKS, twofish_ecb_dec_8way);
+ ECB_BLOCK(3, twofish_dec_blk_3way);
+ ECB_BLOCK(1, twofish_dec_blk);
+ ECB_WALK_END();
}
static int cbc_encrypt(struct skcipher_request *req)
{
- return glue_cbc_encrypt_req_128bit(twofish_enc_blk, req);
+ CBC_WALK_START(req, TF_BLOCK_SIZE, -1);
+ CBC_ENC_BLOCK(twofish_enc_blk);
+ CBC_WALK_END();
}
static int cbc_decrypt(struct skcipher_request *req)
{
- return glue_cbc_decrypt_req_128bit(&twofish_dec_cbc, req);
-}
-
-static int ctr_crypt(struct skcipher_request *req)
-{
- return glue_ctr_req_128bit(&twofish_ctr, req);
-}
-
-static int xts_encrypt(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct twofish_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- return glue_xts_req_128bit(&twofish_enc_xts, req, twofish_enc_blk,
- &ctx->tweak_ctx, &ctx->crypt_ctx, false);
-}
-
-static int xts_decrypt(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct twofish_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- return glue_xts_req_128bit(&twofish_dec_xts, req, twofish_enc_blk,
- &ctx->tweak_ctx, &ctx->crypt_ctx, true);
+ CBC_WALK_START(req, TF_BLOCK_SIZE, TWOFISH_PARALLEL_BLOCKS);
+ CBC_DEC_BLOCK(TWOFISH_PARALLEL_BLOCKS, twofish_cbc_dec_8way);
+ CBC_DEC_BLOCK(3, twofish_dec_blk_cbc_3way);
+ CBC_DEC_BLOCK(1, twofish_dec_blk);
+ CBC_WALK_END();
}
static struct skcipher_alg twofish_algs[] = {
@@ -240,35 +100,6 @@ static struct skcipher_alg twofish_algs[] = {
.setkey = twofish_setkey_skcipher,
.encrypt = cbc_encrypt,
.decrypt = cbc_decrypt,
- }, {
- .base.cra_name = "__ctr(twofish)",
- .base.cra_driver_name = "__ctr-twofish-avx",
- .base.cra_priority = 400,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct twofish_ctx),
- .base.cra_module = THIS_MODULE,
- .min_keysize = TF_MIN_KEY_SIZE,
- .max_keysize = TF_MAX_KEY_SIZE,
- .ivsize = TF_BLOCK_SIZE,
- .chunksize = TF_BLOCK_SIZE,
- .setkey = twofish_setkey_skcipher,
- .encrypt = ctr_crypt,
- .decrypt = ctr_crypt,
- }, {
- .base.cra_name = "__xts(twofish)",
- .base.cra_driver_name = "__xts-twofish-avx",
- .base.cra_priority = 400,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
- .base.cra_blocksize = TF_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct twofish_xts_ctx),
- .base.cra_module = THIS_MODULE,
- .min_keysize = 2 * TF_MIN_KEY_SIZE,
- .max_keysize = 2 * TF_MAX_KEY_SIZE,
- .ivsize = TF_BLOCK_SIZE,
- .setkey = xts_twofish_setkey,
- .encrypt = xts_encrypt,
- .decrypt = xts_decrypt,
},
};
diff --git a/arch/x86/crypto/twofish_glue_3way.c b/arch/x86/crypto/twofish_glue_3way.c
index 768af6075479..03725696397c 100644
--- a/arch/x86/crypto/twofish_glue_3way.c
+++ b/arch/x86/crypto/twofish_glue_3way.c
@@ -5,17 +5,16 @@
* Copyright (c) 2011 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
*/
-#include <asm/crypto/glue_helper.h>
-#include <asm/crypto/twofish.h>
#include <crypto/algapi.h>
-#include <crypto/b128ops.h>
-#include <crypto/internal/skcipher.h>
#include <crypto/twofish.h>
#include <linux/crypto.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/types.h>
+#include "twofish.h"
+#include "ecb_cbc_helpers.h"
+
EXPORT_SYMBOL_GPL(__twofish_enc_blk_3way);
EXPORT_SYMBOL_GPL(twofish_dec_blk_3way);
@@ -30,143 +29,48 @@ static inline void twofish_enc_blk_3way(const void *ctx, u8 *dst, const u8 *src)
__twofish_enc_blk_3way(ctx, dst, src, false);
}
-static inline void twofish_enc_blk_xor_3way(const void *ctx, u8 *dst,
- const u8 *src)
-{
- __twofish_enc_blk_3way(ctx, dst, src, true);
-}
-
-void twofish_dec_blk_cbc_3way(const void *ctx, u8 *d, const u8 *s)
+void twofish_dec_blk_cbc_3way(const void *ctx, u8 *dst, const u8 *src)
{
- u128 ivs[2];
- u128 *dst = (u128 *)d;
- const u128 *src = (const u128 *)s;
-
- ivs[0] = src[0];
- ivs[1] = src[1];
+ u8 buf[2][TF_BLOCK_SIZE];
+ const u8 *s = src;
- twofish_dec_blk_3way(ctx, (u8 *)dst, (u8 *)src);
+ if (dst == src)
+ s = memcpy(buf, src, sizeof(buf));
+ twofish_dec_blk_3way(ctx, dst, src);
+ crypto_xor(dst + TF_BLOCK_SIZE, s, sizeof(buf));
- u128_xor(&dst[1], &dst[1], &ivs[0]);
- u128_xor(&dst[2], &dst[2], &ivs[1]);
}
EXPORT_SYMBOL_GPL(twofish_dec_blk_cbc_3way);
-void twofish_enc_blk_ctr(const void *ctx, u8 *d, const u8 *s, le128 *iv)
-{
- be128 ctrblk;
- u128 *dst = (u128 *)d;
- const u128 *src = (const u128 *)s;
-
- if (dst != src)
- *dst = *src;
-
- le128_to_be128(&ctrblk, iv);
- le128_inc(iv);
-
- twofish_enc_blk(ctx, (u8 *)&ctrblk, (u8 *)&ctrblk);
- u128_xor(dst, dst, (u128 *)&ctrblk);
-}
-EXPORT_SYMBOL_GPL(twofish_enc_blk_ctr);
-
-void twofish_enc_blk_ctr_3way(const void *ctx, u8 *d, const u8 *s, le128 *iv)
-{
- be128 ctrblks[3];
- u128 *dst = (u128 *)d;
- const u128 *src = (const u128 *)s;
-
- if (dst != src) {
- dst[0] = src[0];
- dst[1] = src[1];
- dst[2] = src[2];
- }
-
- le128_to_be128(&ctrblks[0], iv);
- le128_inc(iv);
- le128_to_be128(&ctrblks[1], iv);
- le128_inc(iv);
- le128_to_be128(&ctrblks[2], iv);
- le128_inc(iv);
-
- twofish_enc_blk_xor_3way(ctx, (u8 *)dst, (u8 *)ctrblks);
-}
-EXPORT_SYMBOL_GPL(twofish_enc_blk_ctr_3way);
-
-static const struct common_glue_ctx twofish_enc = {
- .num_funcs = 2,
- .fpu_blocks_limit = -1,
-
- .funcs = { {
- .num_blocks = 3,
- .fn_u = { .ecb = twofish_enc_blk_3way }
- }, {
- .num_blocks = 1,
- .fn_u = { .ecb = twofish_enc_blk }
- } }
-};
-
-static const struct common_glue_ctx twofish_ctr = {
- .num_funcs = 2,
- .fpu_blocks_limit = -1,
-
- .funcs = { {
- .num_blocks = 3,
- .fn_u = { .ctr = twofish_enc_blk_ctr_3way }
- }, {
- .num_blocks = 1,
- .fn_u = { .ctr = twofish_enc_blk_ctr }
- } }
-};
-
-static const struct common_glue_ctx twofish_dec = {
- .num_funcs = 2,
- .fpu_blocks_limit = -1,
-
- .funcs = { {
- .num_blocks = 3,
- .fn_u = { .ecb = twofish_dec_blk_3way }
- }, {
- .num_blocks = 1,
- .fn_u = { .ecb = twofish_dec_blk }
- } }
-};
-
-static const struct common_glue_ctx twofish_dec_cbc = {
- .num_funcs = 2,
- .fpu_blocks_limit = -1,
-
- .funcs = { {
- .num_blocks = 3,
- .fn_u = { .cbc = twofish_dec_blk_cbc_3way }
- }, {
- .num_blocks = 1,
- .fn_u = { .cbc = twofish_dec_blk }
- } }
-};
-
static int ecb_encrypt(struct skcipher_request *req)
{
- return glue_ecb_req_128bit(&twofish_enc, req);
+ ECB_WALK_START(req, TF_BLOCK_SIZE, -1);
+ ECB_BLOCK(3, twofish_enc_blk_3way);
+ ECB_BLOCK(1, twofish_enc_blk);
+ ECB_WALK_END();
}
static int ecb_decrypt(struct skcipher_request *req)
{
- return glue_ecb_req_128bit(&twofish_dec, req);
+ ECB_WALK_START(req, TF_BLOCK_SIZE, -1);
+ ECB_BLOCK(3, twofish_dec_blk_3way);
+ ECB_BLOCK(1, twofish_dec_blk);
+ ECB_WALK_END();
}
static int cbc_encrypt(struct skcipher_request *req)
{
- return glue_cbc_encrypt_req_128bit(twofish_enc_blk, req);
+ CBC_WALK_START(req, TF_BLOCK_SIZE, -1);
+ CBC_ENC_BLOCK(twofish_enc_blk);
+ CBC_WALK_END();
}
static int cbc_decrypt(struct skcipher_request *req)
{
- return glue_cbc_decrypt_req_128bit(&twofish_dec_cbc, req);
-}
-
-static int ctr_crypt(struct skcipher_request *req)
-{
- return glue_ctr_req_128bit(&twofish_ctr, req);
+ CBC_WALK_START(req, TF_BLOCK_SIZE, -1);
+ CBC_DEC_BLOCK(3, twofish_dec_blk_cbc_3way);
+ CBC_DEC_BLOCK(1, twofish_dec_blk);
+ CBC_WALK_END();
}
static struct skcipher_alg tf_skciphers[] = {
@@ -195,20 +99,6 @@ static struct skcipher_alg tf_skciphers[] = {
.setkey = twofish_setkey_skcipher,
.encrypt = cbc_encrypt,
.decrypt = cbc_decrypt,
- }, {
- .base.cra_name = "ctr(twofish)",
- .base.cra_driver_name = "ctr-twofish-3way",
- .base.cra_priority = 300,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct twofish_ctx),
- .base.cra_module = THIS_MODULE,
- .min_keysize = TF_MIN_KEY_SIZE,
- .max_keysize = TF_MAX_KEY_SIZE,
- .ivsize = TF_BLOCK_SIZE,
- .chunksize = TF_BLOCK_SIZE,
- .setkey = twofish_setkey_skcipher,
- .encrypt = ctr_crypt,
- .decrypt = ctr_crypt,
},
};
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
index 0904f5676e4d..a2433ae8a65e 100644
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -249,30 +249,23 @@ static __always_inline bool get_and_clear_inhcall(void) { return false; }
static __always_inline void restore_inhcall(bool inhcall) { }
#endif
-static void __xen_pv_evtchn_do_upcall(void)
+static void __xen_pv_evtchn_do_upcall(struct pt_regs *regs)
{
- irq_enter_rcu();
+ struct pt_regs *old_regs = set_irq_regs(regs);
+
inc_irq_stat(irq_hv_callback_count);
xen_hvm_evtchn_do_upcall();
- irq_exit_rcu();
+ set_irq_regs(old_regs);
}
__visible noinstr void xen_pv_evtchn_do_upcall(struct pt_regs *regs)
{
- struct pt_regs *old_regs;
+ irqentry_state_t state = irqentry_enter(regs);
bool inhcall;
- irqentry_state_t state;
- state = irqentry_enter(regs);
- old_regs = set_irq_regs(regs);
-
- instrumentation_begin();
- run_on_irqstack_cond(__xen_pv_evtchn_do_upcall, regs);
- instrumentation_begin();
-
- set_irq_regs(old_regs);
+ run_sysvec_on_irqstack_cond(__xen_pv_evtchn_do_upcall, regs);
inhcall = get_and_clear_inhcall();
if (inhcall && !WARN_ON_ONCE(state.exit_rcu)) {
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index cad08703c4ad..400908dff42e 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -46,14 +46,6 @@
.code64
.section .entry.text, "ax"
-#ifdef CONFIG_PARAVIRT_XXL
-SYM_CODE_START(native_usergs_sysret64)
- UNWIND_HINT_EMPTY
- swapgs
- sysretq
-SYM_CODE_END(native_usergs_sysret64)
-#endif /* CONFIG_PARAVIRT_XXL */
-
/*
* 64-bit SYSCALL instruction entry. Up to 6 arguments in registers.
*
@@ -123,7 +115,12 @@ SYM_INNER_LABEL(entry_SYSCALL_64_after_hwframe, SYM_L_GLOBAL)
* Try to use SYSRET instead of IRET if we're returning to
* a completely clean 64-bit userspace context. If we're not,
* go to the slow exit path.
+ * In the Xen PV case we must use iret anyway.
*/
+
+ ALTERNATIVE "", "jmp swapgs_restore_regs_and_return_to_usermode", \
+ X86_FEATURE_XENPV
+
movq RCX(%rsp), %rcx
movq RIP(%rsp), %r11
@@ -215,7 +212,8 @@ syscall_return_via_sysret:
popq %rdi
popq %rsp
- USERGS_SYSRET64
+ swapgs
+ sysretq
SYM_CODE_END(entry_SYSCALL_64)
/*
@@ -669,7 +667,7 @@ native_irq_return_ldt:
*/
pushq %rdi /* Stash user RDI */
- SWAPGS /* to kernel GS */
+ swapgs /* to kernel GS */
SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi /* to kernel CR3 */
movq PER_CPU_VAR(espfix_waddr), %rdi
@@ -699,7 +697,7 @@ native_irq_return_ldt:
orq PER_CPU_VAR(espfix_stack), %rax
SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi
- SWAPGS /* to user GS */
+ swapgs /* to user GS */
popq %rdi /* Restore user RDI */
movq %rax, %rsp
@@ -756,47 +754,6 @@ SYM_CODE_START_LOCAL_NOALIGN(.Lbad_gs)
SYM_CODE_END(.Lbad_gs)
.previous
-/*
- * rdi: New stack pointer points to the top word of the stack
- * rsi: Function pointer
- * rdx: Function argument (can be NULL if none)
- */
-SYM_FUNC_START(asm_call_on_stack)
-SYM_INNER_LABEL(asm_call_sysvec_on_stack, SYM_L_GLOBAL)
-SYM_INNER_LABEL(asm_call_irq_on_stack, SYM_L_GLOBAL)
- /*
- * Save the frame pointer unconditionally. This allows the ORC
- * unwinder to handle the stack switch.
- */
- pushq %rbp
- mov %rsp, %rbp
-
- /*
- * The unwinder relies on the word at the top of the new stack
- * page linking back to the previous RSP.
- */
- mov %rsp, (%rdi)
- mov %rdi, %rsp
- /* Move the argument to the right place */
- mov %rdx, %rdi
-
-1:
- .pushsection .discard.instr_begin
- .long 1b - .
- .popsection
-
- CALL_NOSPEC rsi
-
-2:
- .pushsection .discard.instr_end
- .long 2b - .
- .popsection
-
- /* Restore the previous stack pointer from RBP. */
- leaveq
- ret
-SYM_FUNC_END(asm_call_on_stack)
-
#ifdef CONFIG_XEN_PV
/*
* A note on the "critical region" in our callback handler.
@@ -943,7 +900,7 @@ SYM_CODE_START_LOCAL(paranoid_entry)
ret
.Lparanoid_entry_swapgs:
- SWAPGS
+ swapgs
/*
* The above SAVE_AND_SWITCH_TO_KERNEL_CR3 macro doesn't do an
@@ -1001,7 +958,7 @@ SYM_CODE_START_LOCAL(paranoid_exit)
jnz restore_regs_and_return_to_kernel
/* We are returning to a context with user GSBASE */
- SWAPGS_UNSAFE_STACK
+ swapgs
jmp restore_regs_and_return_to_kernel
SYM_CODE_END(paranoid_exit)
@@ -1426,7 +1383,7 @@ nmi_no_fsgsbase:
jnz nmi_restore
nmi_swapgs:
- SWAPGS_UNSAFE_STACK
+ swapgs
nmi_restore:
POP_REGS
diff --git a/arch/x86/entry/syscalls/Makefile b/arch/x86/entry/syscalls/Makefile
index 6fb9b57ed5ba..d8c4f6c9eadc 100644
--- a/arch/x86/entry/syscalls/Makefile
+++ b/arch/x86/entry/syscalls/Makefile
@@ -6,8 +6,8 @@ uapi := arch/$(SRCARCH)/include/generated/uapi/asm
_dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)') \
$(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)')
-syscall32 := $(srctree)/$(src)/syscall_32.tbl
-syscall64 := $(srctree)/$(src)/syscall_64.tbl
+syscall32 := $(src)/syscall_32.tbl
+syscall64 := $(src)/syscall_64.tbl
syshdr := $(srctree)/$(src)/syscallhdr.sh
systbl := $(srctree)/$(src)/syscalltbl.sh
@@ -21,37 +21,37 @@ quiet_cmd_systbl = SYSTBL $@
cmd_systbl = $(CONFIG_SHELL) '$(systbl)' $< $@
quiet_cmd_hypercalls = HYPERCALLS $@
- cmd_hypercalls = $(CONFIG_SHELL) '$<' $@ $(filter-out $<,$^)
+ cmd_hypercalls = $(CONFIG_SHELL) '$<' $@ $(filter-out $<, $(real-prereqs))
syshdr_abi_unistd_32 := i386
-$(uapi)/unistd_32.h: $(syscall32) $(syshdr)
+$(uapi)/unistd_32.h: $(syscall32) $(syshdr) FORCE
$(call if_changed,syshdr)
syshdr_abi_unistd_32_ia32 := i386
syshdr_pfx_unistd_32_ia32 := ia32_
-$(out)/unistd_32_ia32.h: $(syscall32) $(syshdr)
+$(out)/unistd_32_ia32.h: $(syscall32) $(syshdr) FORCE
$(call if_changed,syshdr)
syshdr_abi_unistd_x32 := common,x32
syshdr_offset_unistd_x32 := __X32_SYSCALL_BIT
-$(uapi)/unistd_x32.h: $(syscall64) $(syshdr)
+$(uapi)/unistd_x32.h: $(syscall64) $(syshdr) FORCE
$(call if_changed,syshdr)
syshdr_abi_unistd_64 := common,64
-$(uapi)/unistd_64.h: $(syscall64) $(syshdr)
+$(uapi)/unistd_64.h: $(syscall64) $(syshdr) FORCE
$(call if_changed,syshdr)
syshdr_abi_unistd_64_x32 := x32
syshdr_pfx_unistd_64_x32 := x32_
-$(out)/unistd_64_x32.h: $(syscall64) $(syshdr)
+$(out)/unistd_64_x32.h: $(syscall64) $(syshdr) FORCE
$(call if_changed,syshdr)
-$(out)/syscalls_32.h: $(syscall32) $(systbl)
+$(out)/syscalls_32.h: $(syscall32) $(systbl) FORCE
$(call if_changed,systbl)
-$(out)/syscalls_64.h: $(syscall64) $(systbl)
+$(out)/syscalls_64.h: $(syscall64) $(systbl) FORCE
$(call if_changed,systbl)
-$(out)/xen-hypercalls.h: $(srctree)/scripts/xen-hypercalls.sh
+$(out)/xen-hypercalls.h: $(srctree)/scripts/xen-hypercalls.sh FORCE
$(call if_changed,hypercalls)
$(out)/xen-hypercalls.h: $(srctree)/include/xen/interface/xen*.h
@@ -62,9 +62,10 @@ syshdr-$(CONFIG_X86_64) += unistd_32_ia32.h unistd_64_x32.h
syshdr-$(CONFIG_X86_64) += syscalls_64.h
syshdr-$(CONFIG_XEN) += xen-hypercalls.h
-targets += $(uapisyshdr-y) $(syshdr-y)
+uapisyshdr-y := $(addprefix $(uapi)/, $(uapisyshdr-y))
+syshdr-y := $(addprefix $(out)/, $(syshdr-y))
+targets += $(addprefix ../../../../, $(uapisyshdr-y) $(syshdr-y))
PHONY += all
-all: $(addprefix $(uapi)/,$(uapisyshdr-y))
-all: $(addprefix $(out)/,$(syshdr-y))
+all: $(uapisyshdr-y) $(syshdr-y)
@:
diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl
index 874aeacde2dd..a1c9f496fca6 100644
--- a/arch/x86/entry/syscalls/syscall_32.tbl
+++ b/arch/x86/entry/syscalls/syscall_32.tbl
@@ -446,3 +446,4 @@
439 i386 faccessat2 sys_faccessat2
440 i386 process_madvise sys_process_madvise
441 i386 epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2
+442 i386 mount_setattr sys_mount_setattr
diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl
index 78672124d28b..7bf01cbe582f 100644
--- a/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/arch/x86/entry/syscalls/syscall_64.tbl
@@ -363,6 +363,7 @@
439 common faccessat2 sys_faccessat2
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2
+442 common mount_setattr sys_mount_setattr
#
# Due to a historical design error, certain syscalls are numbered differently
diff --git a/arch/x86/entry/thunk_64.S b/arch/x86/entry/thunk_64.S
index ccd32877a3c4..496b11ec469d 100644
--- a/arch/x86/entry/thunk_64.S
+++ b/arch/x86/entry/thunk_64.S
@@ -10,7 +10,7 @@
#include <asm/export.h>
/* rdi: arg1 ... normal C conventions. rax is saved/restored. */
- .macro THUNK name, func, put_ret_addr_in_rdi=0
+ .macro THUNK name, func
SYM_FUNC_START_NOALIGN(\name)
pushq %rbp
movq %rsp, %rbp
@@ -25,13 +25,8 @@ SYM_FUNC_START_NOALIGN(\name)
pushq %r10
pushq %r11
- .if \put_ret_addr_in_rdi
- /* 8(%rbp) is return addr on stack */
- movq 8(%rbp), %rdi
- .endif
-
call \func
- jmp .L_restore
+ jmp __thunk_restore
SYM_FUNC_END(\name)
_ASM_NOKPROBE(\name)
.endm
@@ -44,7 +39,7 @@ SYM_FUNC_END(\name)
#endif
#ifdef CONFIG_PREEMPTION
-SYM_CODE_START_LOCAL_NOALIGN(.L_restore)
+SYM_CODE_START_LOCAL_NOALIGN(__thunk_restore)
popq %r11
popq %r10
popq %r9
@@ -56,6 +51,6 @@ SYM_CODE_START_LOCAL_NOALIGN(.L_restore)
popq %rdi
popq %rbp
ret
- _ASM_NOKPROBE(.L_restore)
-SYM_CODE_END(.L_restore)
+ _ASM_NOKPROBE(__thunk_restore)
+SYM_CODE_END(__thunk_restore)
#endif
diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile
index 02e3e42f380b..05c4abc2fdfd 100644
--- a/arch/x86/entry/vdso/Makefile
+++ b/arch/x86/entry/vdso/Makefile
@@ -91,7 +91,7 @@ ifneq ($(RETPOLINE_VDSO_CFLAGS),)
endif
endif
-$(vobjs): KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS) $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS)) $(CFL)
+$(vobjs): KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_LTO) $(GCC_PLUGINS_CFLAGS) $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS)) $(CFL)
#
# vDSO code runs in userspace and -pg doesn't help with profiling anyway.
@@ -150,6 +150,7 @@ KBUILD_CFLAGS_32 := $(filter-out -fno-pic,$(KBUILD_CFLAGS_32))
KBUILD_CFLAGS_32 := $(filter-out -mfentry,$(KBUILD_CFLAGS_32))
KBUILD_CFLAGS_32 := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS_32))
KBUILD_CFLAGS_32 := $(filter-out $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS_32))
+KBUILD_CFLAGS_32 := $(filter-out $(CC_FLAGS_LTO),$(KBUILD_CFLAGS_32))
KBUILD_CFLAGS_32 += -m32 -msoft-float -mregparm=0 -fpic
KBUILD_CFLAGS_32 += -fno-stack-protector
KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls)
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index e37de298a495..6ddeed3cd2ac 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -81,6 +81,8 @@ DEFINE_STATIC_CALL_NULL(x86_pmu_swap_task_ctx, *x86_pmu.swap_task_ctx);
DEFINE_STATIC_CALL_NULL(x86_pmu_drain_pebs, *x86_pmu.drain_pebs);
DEFINE_STATIC_CALL_NULL(x86_pmu_pebs_aliases, *x86_pmu.pebs_aliases);
+DEFINE_STATIC_CALL_NULL(x86_pmu_guest_get_msrs, *x86_pmu.guest_get_msrs);
+
u64 __read_mostly hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
@@ -253,6 +255,8 @@ static bool check_hw_exists(void)
if (ret)
goto msr_fail;
for (i = 0; i < x86_pmu.num_counters_fixed; i++) {
+ if (fixed_counter_disabled(i))
+ continue;
if (val & (0x03 << i*4)) {
bios_fail = 1;
val_fail = val;
@@ -665,6 +669,12 @@ void x86_pmu_disable_all(void)
}
}
+struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
+{
+ return static_call(x86_pmu_guest_get_msrs)(nr);
+}
+EXPORT_SYMBOL_GPL(perf_guest_get_msrs);
+
/*
* There may be PMI landing after enabled=0. The PMI hitting could be before or
* after disable_all.
@@ -1523,6 +1533,8 @@ void perf_event_print_debug(void)
cpu, idx, prev_left);
}
for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
+ if (fixed_counter_disabled(idx))
+ continue;
rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
@@ -1923,6 +1935,8 @@ static void x86_pmu_static_call_update(void)
static_call_update(x86_pmu_drain_pebs, x86_pmu.drain_pebs);
static_call_update(x86_pmu_pebs_aliases, x86_pmu.pebs_aliases);
+
+ static_call_update(x86_pmu_guest_get_msrs, x86_pmu.guest_get_msrs);
}
static void _x86_pmu_read(struct perf_event *event)
@@ -1930,6 +1944,13 @@ static void _x86_pmu_read(struct perf_event *event)
x86_perf_event_update(event);
}
+static inline struct perf_guest_switch_msr *
+perf_guest_get_msrs_nop(int *nr)
+{
+ *nr = 0;
+ return NULL;
+}
+
static int __init init_hw_perf_events(void)
{
struct x86_pmu_quirk *quirk;
@@ -1995,12 +2016,17 @@ static int __init init_hw_perf_events(void)
pr_info("... generic registers: %d\n", x86_pmu.num_counters);
pr_info("... value mask: %016Lx\n", x86_pmu.cntval_mask);
pr_info("... max period: %016Lx\n", x86_pmu.max_period);
- pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed);
+ pr_info("... fixed-purpose events: %lu\n",
+ hweight64((((1ULL << x86_pmu.num_counters_fixed) - 1)
+ << INTEL_PMC_IDX_FIXED) & x86_pmu.intel_ctrl));
pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl);
if (!x86_pmu.read)
x86_pmu.read = _x86_pmu_read;
+ if (!x86_pmu.guest_get_msrs)
+ x86_pmu.guest_get_msrs = perf_guest_get_msrs_nop;
+
x86_pmu_static_call_update();
/*
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index d4569bfa83e3..5bac48d5c18e 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -275,6 +275,55 @@ static struct extra_reg intel_icl_extra_regs[] __read_mostly = {
EVENT_EXTRA_END
};
+static struct extra_reg intel_spr_extra_regs[] __read_mostly = {
+ INTEL_UEVENT_EXTRA_REG(0x012a, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
+ INTEL_UEVENT_EXTRA_REG(0x012b, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
+ INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
+ INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
+ EVENT_EXTRA_END
+};
+
+static struct event_constraint intel_spr_event_constraints[] = {
+ FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
+ FIXED_EVENT_CONSTRAINT(0x01c0, 0), /* INST_RETIRED.PREC_DIST */
+ FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
+ FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
+ FIXED_EVENT_CONSTRAINT(0x0400, 3), /* SLOTS */
+ METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_RETIRING, 0),
+ METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BAD_SPEC, 1),
+ METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FE_BOUND, 2),
+ METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BE_BOUND, 3),
+ METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_HEAVY_OPS, 4),
+ METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BR_MISPREDICT, 5),
+ METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FETCH_LAT, 6),
+ METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_MEM_BOUND, 7),
+
+ INTEL_EVENT_CONSTRAINT(0x2e, 0xff),
+ INTEL_EVENT_CONSTRAINT(0x3c, 0xff),
+ /*
+ * Generally event codes < 0x90 are restricted to counters 0-3.
+ * The 0x2E and 0x3C are exception, which has no restriction.
+ */
+ INTEL_EVENT_CONSTRAINT_RANGE(0x01, 0x8f, 0xf),
+
+ INTEL_UEVENT_CONSTRAINT(0x01a3, 0xf),
+ INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf),
+ INTEL_UEVENT_CONSTRAINT(0x08a3, 0xf),
+ INTEL_UEVENT_CONSTRAINT(0x04a4, 0x1),
+ INTEL_UEVENT_CONSTRAINT(0x08a4, 0x1),
+ INTEL_UEVENT_CONSTRAINT(0x02cd, 0x1),
+ INTEL_EVENT_CONSTRAINT(0xce, 0x1),
+ INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xdf, 0xf),
+ /*
+ * Generally event codes >= 0x90 are likely to have no restrictions.
+ * The exception are defined as above.
+ */
+ INTEL_EVENT_CONSTRAINT_RANGE(0x90, 0xfe, 0xff),
+
+ EVENT_CONSTRAINT_END
+};
+
+
EVENT_ATTR_STR(mem-loads, mem_ld_nhm, "event=0x0b,umask=0x10,ldlat=3");
EVENT_ATTR_STR(mem-loads, mem_ld_snb, "event=0xcd,umask=0x1,ldlat=3");
EVENT_ATTR_STR(mem-stores, mem_st_snb, "event=0xcd,umask=0x2");
@@ -314,11 +363,15 @@ EVENT_ATTR_STR_HT(topdown-recovery-bubbles, td_recovery_bubbles,
EVENT_ATTR_STR_HT(topdown-recovery-bubbles.scale, td_recovery_bubbles_scale,
"4", "2");
-EVENT_ATTR_STR(slots, slots, "event=0x00,umask=0x4");
-EVENT_ATTR_STR(topdown-retiring, td_retiring, "event=0x00,umask=0x80");
-EVENT_ATTR_STR(topdown-bad-spec, td_bad_spec, "event=0x00,umask=0x81");
-EVENT_ATTR_STR(topdown-fe-bound, td_fe_bound, "event=0x00,umask=0x82");
-EVENT_ATTR_STR(topdown-be-bound, td_be_bound, "event=0x00,umask=0x83");
+EVENT_ATTR_STR(slots, slots, "event=0x00,umask=0x4");
+EVENT_ATTR_STR(topdown-retiring, td_retiring, "event=0x00,umask=0x80");
+EVENT_ATTR_STR(topdown-bad-spec, td_bad_spec, "event=0x00,umask=0x81");
+EVENT_ATTR_STR(topdown-fe-bound, td_fe_bound, "event=0x00,umask=0x82");
+EVENT_ATTR_STR(topdown-be-bound, td_be_bound, "event=0x00,umask=0x83");
+EVENT_ATTR_STR(topdown-heavy-ops, td_heavy_ops, "event=0x00,umask=0x84");
+EVENT_ATTR_STR(topdown-br-mispredict, td_br_mispredict, "event=0x00,umask=0x85");
+EVENT_ATTR_STR(topdown-fetch-lat, td_fetch_lat, "event=0x00,umask=0x86");
+EVENT_ATTR_STR(topdown-mem-bound, td_mem_bound, "event=0x00,umask=0x87");
static struct attribute *snb_events_attrs[] = {
EVENT_PTR(td_slots_issued),
@@ -384,6 +437,108 @@ static u64 intel_pmu_event_map(int hw_event)
return intel_perfmon_event_map[hw_event];
}
+static __initconst const u64 spr_hw_cache_event_ids
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] =
+{
+ [ C(L1D ) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x81d0,
+ [ C(RESULT_MISS) ] = 0xe124,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0x82d0,
+ },
+ },
+ [ C(L1I ) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_MISS) ] = 0xe424,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+ [ C(LL ) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x12a,
+ [ C(RESULT_MISS) ] = 0x12a,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0x12a,
+ [ C(RESULT_MISS) ] = 0x12a,
+ },
+ },
+ [ C(DTLB) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x81d0,
+ [ C(RESULT_MISS) ] = 0xe12,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0x82d0,
+ [ C(RESULT_MISS) ] = 0xe13,
+ },
+ },
+ [ C(ITLB) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = 0xe11,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+ [ C(BPU ) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x4c4,
+ [ C(RESULT_MISS) ] = 0x4c5,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+ [ C(NODE) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x12a,
+ [ C(RESULT_MISS) ] = 0x12a,
+ },
+ },
+};
+
+static __initconst const u64 spr_hw_cache_extra_regs
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] =
+{
+ [ C(LL ) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x10001,
+ [ C(RESULT_MISS) ] = 0x3fbfc00001,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0x3f3ffc0002,
+ [ C(RESULT_MISS) ] = 0x3f3fc00002,
+ },
+ },
+ [ C(NODE) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x10c000001,
+ [ C(RESULT_MISS) ] = 0x3fb3000001,
+ },
+ },
+};
+
/*
* Notes on the events:
* - data reads do not include code reads (comparable to earlier tables)
@@ -2134,18 +2289,6 @@ static void intel_tfa_pmu_enable_all(int added)
intel_pmu_enable_all(added);
}
-static void enable_counter_freeze(void)
-{
- update_debugctlmsr(get_debugctlmsr() |
- DEBUGCTLMSR_FREEZE_PERFMON_ON_PMI);
-}
-
-static void disable_counter_freeze(void)
-{
- update_debugctlmsr(get_debugctlmsr() &
- ~DEBUGCTLMSR_FREEZE_PERFMON_ON_PMI);
-}
-
static inline u64 intel_pmu_get_status(void)
{
u64 status;
@@ -2337,8 +2480,8 @@ static void __icl_update_topdown_event(struct perf_event *event,
}
}
-static void update_saved_topdown_regs(struct perf_event *event,
- u64 slots, u64 metrics)
+static void update_saved_topdown_regs(struct perf_event *event, u64 slots,
+ u64 metrics, int metric_end)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct perf_event *other;
@@ -2347,7 +2490,7 @@ static void update_saved_topdown_regs(struct perf_event *event,
event->hw.saved_slots = slots;
event->hw.saved_metric = metrics;
- for_each_set_bit(idx, cpuc->active_mask, INTEL_PMC_IDX_TD_BE_BOUND + 1) {
+ for_each_set_bit(idx, cpuc->active_mask, metric_end + 1) {
if (!is_topdown_idx(idx))
continue;
other = cpuc->events[idx];
@@ -2362,7 +2505,8 @@ static void update_saved_topdown_regs(struct perf_event *event,
* The PERF_METRICS and Fixed counter 3 are read separately. The values may be
* modify by a NMI. PMU has to be disabled before calling this function.
*/
-static u64 icl_update_topdown_event(struct perf_event *event)
+
+static u64 intel_update_topdown_event(struct perf_event *event, int metric_end)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct perf_event *other;
@@ -2378,7 +2522,7 @@ static u64 icl_update_topdown_event(struct perf_event *event)
/* read PERF_METRICS */
rdpmcl(INTEL_PMC_FIXED_RDPMC_METRICS, metrics);
- for_each_set_bit(idx, cpuc->active_mask, INTEL_PMC_IDX_TD_BE_BOUND + 1) {
+ for_each_set_bit(idx, cpuc->active_mask, metric_end + 1) {
if (!is_topdown_idx(idx))
continue;
other = cpuc->events[idx];
@@ -2404,7 +2548,7 @@ static u64 icl_update_topdown_event(struct perf_event *event)
* Don't need to reset the PERF_METRICS and Fixed counter 3.
* Because the values will be restored in next schedule in.
*/
- update_saved_topdown_regs(event, slots, metrics);
+ update_saved_topdown_regs(event, slots, metrics, metric_end);
reset = false;
}
@@ -2413,12 +2557,18 @@ static u64 icl_update_topdown_event(struct perf_event *event)
wrmsrl(MSR_CORE_PERF_FIXED_CTR3, 0);
wrmsrl(MSR_PERF_METRICS, 0);
if (event)
- update_saved_topdown_regs(event, 0, 0);
+ update_saved_topdown_regs(event, 0, 0, metric_end);
}
return slots;
}
+static u64 icl_update_topdown_event(struct perf_event *event)
+{
+ return intel_update_topdown_event(event, INTEL_PMC_IDX_METRIC_BASE +
+ x86_pmu.num_topdown_events - 1);
+}
+
static void intel_pmu_read_topdown_event(struct perf_event *event)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
@@ -2573,8 +2723,11 @@ static void intel_pmu_reset(void)
wrmsrl_safe(x86_pmu_config_addr(idx), 0ull);
wrmsrl_safe(x86_pmu_event_addr(idx), 0ull);
}
- for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++)
+ for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
+ if (fixed_counter_disabled(idx))
+ continue;
wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
+ }
if (ds)
ds->bts_index = ds->bts_buffer_base;
@@ -2709,95 +2862,6 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
return handled;
}
-static bool disable_counter_freezing = true;
-static int __init intel_perf_counter_freezing_setup(char *s)
-{
- bool res;
-
- if (kstrtobool(s, &res))
- return -EINVAL;
-
- disable_counter_freezing = !res;
- return 1;
-}
-__setup("perf_v4_pmi=", intel_perf_counter_freezing_setup);
-
-/*
- * Simplified handler for Arch Perfmon v4:
- * - We rely on counter freezing/unfreezing to enable/disable the PMU.
- * This is done automatically on PMU ack.
- * - Ack the PMU only after the APIC.
- */
-
-static int intel_pmu_handle_irq_v4(struct pt_regs *regs)
-{
- struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
- int handled = 0;
- bool bts = false;
- u64 status;
- int pmu_enabled = cpuc->enabled;
- int loops = 0;
-
- /* PMU has been disabled because of counter freezing */
- cpuc->enabled = 0;
- if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
- bts = true;
- intel_bts_disable_local();
- handled = intel_pmu_drain_bts_buffer();
- handled += intel_bts_interrupt();
- }
- status = intel_pmu_get_status();
- if (!status)
- goto done;
-again:
- intel_pmu_lbr_read();
- if (++loops > 100) {
- static bool warned;
-
- if (!warned) {
- WARN(1, "perfevents: irq loop stuck!\n");
- perf_event_print_debug();
- warned = true;
- }
- intel_pmu_reset();
- goto done;
- }
-
-
- handled += handle_pmi_common(regs, status);
-done:
- /* Ack the PMI in the APIC */
- apic_write(APIC_LVTPC, APIC_DM_NMI);
-
- /*
- * The counters start counting immediately while ack the status.
- * Make it as close as possible to IRET. This avoids bogus
- * freezing on Skylake CPUs.
- */
- if (status) {
- intel_pmu_ack_status(status);
- } else {
- /*
- * CPU may issues two PMIs very close to each other.
- * When the PMI handler services the first one, the
- * GLOBAL_STATUS is already updated to reflect both.
- * When it IRETs, the second PMI is immediately
- * handled and it sees clear status. At the meantime,
- * there may be a third PMI, because the freezing bit
- * isn't set since the ack in first PMI handlers.
- * Double check if there is more work to be done.
- */
- status = intel_pmu_get_status();
- if (status)
- goto again;
- }
-
- if (bts)
- intel_bts_enable_local();
- cpuc->enabled = pmu_enabled;
- return handled;
-}
-
/*
* This handler is triggered by the local APIC, so the APIC IRQ handling
* rules apply:
@@ -3563,6 +3627,26 @@ static int core_pmu_hw_config(struct perf_event *event)
return intel_pmu_bts_config(event);
}
+#define INTEL_TD_METRIC_AVAILABLE_MAX (INTEL_TD_METRIC_RETIRING + \
+ ((x86_pmu.num_topdown_events - 1) << 8))
+
+static bool is_available_metric_event(struct perf_event *event)
+{
+ return is_metric_event(event) &&
+ event->attr.config <= INTEL_TD_METRIC_AVAILABLE_MAX;
+}
+
+static inline bool is_mem_loads_event(struct perf_event *event)
+{
+ return (event->attr.config & INTEL_ARCH_EVENT_MASK) == X86_CONFIG(.event=0xcd, .umask=0x01);
+}
+
+static inline bool is_mem_loads_aux_event(struct perf_event *event)
+{
+ return (event->attr.config & INTEL_ARCH_EVENT_MASK) == X86_CONFIG(.event=0x03, .umask=0x82);
+}
+
+
static int intel_pmu_hw_config(struct perf_event *event)
{
int ret = x86_pmu_hw_config(event);
@@ -3636,7 +3720,7 @@ static int intel_pmu_hw_config(struct perf_event *event)
if (event->attr.config & X86_ALL_EVENT_FLAGS)
return -EINVAL;
- if (is_metric_event(event)) {
+ if (is_available_metric_event(event)) {
struct perf_event *leader = event->group_leader;
/* The metric events don't support sampling. */
@@ -3665,6 +3749,33 @@ static int intel_pmu_hw_config(struct perf_event *event)
}
}
+ /*
+ * The load latency event X86_CONFIG(.event=0xcd, .umask=0x01) on SPR
+ * doesn't function quite right. As a work-around it needs to always be
+ * co-scheduled with a auxiliary event X86_CONFIG(.event=0x03, .umask=0x82).
+ * The actual count of this second event is irrelevant it just needs
+ * to be active to make the first event function correctly.
+ *
+ * In a group, the auxiliary event must be in front of the load latency
+ * event. The rule is to simplify the implementation of the check.
+ * That's because perf cannot have a complete group at the moment.
+ */
+ if (x86_pmu.flags & PMU_FL_MEM_LOADS_AUX &&
+ (event->attr.sample_type & PERF_SAMPLE_DATA_SRC) &&
+ is_mem_loads_event(event)) {
+ struct perf_event *leader = event->group_leader;
+ struct perf_event *sibling = NULL;
+
+ if (!is_mem_loads_aux_event(leader)) {
+ for_each_sibling_event(sibling, leader) {
+ if (is_mem_loads_aux_event(sibling))
+ break;
+ }
+ if (list_entry_is_head(sibling, &leader->sibling_list, sibling_list))
+ return -ENODATA;
+ }
+ }
+
if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY))
return 0;
@@ -3680,26 +3791,6 @@ static int intel_pmu_hw_config(struct perf_event *event)
return 0;
}
-#ifdef CONFIG_RETPOLINE
-static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr);
-static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr);
-#endif
-
-struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
-{
-#ifdef CONFIG_RETPOLINE
- if (x86_pmu.guest_get_msrs == intel_guest_get_msrs)
- return intel_guest_get_msrs(nr);
- else if (x86_pmu.guest_get_msrs == core_guest_get_msrs)
- return core_guest_get_msrs(nr);
-#endif
- if (x86_pmu.guest_get_msrs)
- return x86_pmu.guest_get_msrs(nr);
- *nr = 0;
- return NULL;
-}
-EXPORT_SYMBOL_GPL(perf_guest_get_msrs);
-
static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
@@ -3865,6 +3956,29 @@ icl_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
}
static struct event_constraint *
+spr_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
+ struct perf_event *event)
+{
+ struct event_constraint *c;
+
+ c = icl_get_event_constraints(cpuc, idx, event);
+
+ /*
+ * The :ppp indicates the Precise Distribution (PDist) facility, which
+ * is only supported on the GP counter 0. If a :ppp event which is not
+ * available on the GP counter 0, error out.
+ */
+ if (event->attr.precise_ip == 3) {
+ if (c->idxmsk64 & BIT_ULL(0))
+ return &counter0_constraint;
+
+ return &emptyconstraint;
+ }
+
+ return c;
+}
+
+static struct event_constraint *
glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
struct perf_event *event)
{
@@ -3953,6 +4067,14 @@ static u64 nhm_limit_period(struct perf_event *event, u64 left)
return max(left, 32ULL);
}
+static u64 spr_limit_period(struct perf_event *event, u64 left)
+{
+ if (event->attr.precise_ip == 3)
+ return max(left, 128ULL);
+
+ return left;
+}
+
PMU_FORMAT_ATTR(event, "config:0-7" );
PMU_FORMAT_ATTR(umask, "config:8-15" );
PMU_FORMAT_ATTR(edge, "config:18" );
@@ -4094,9 +4216,6 @@ static void intel_pmu_cpu_starting(int cpu)
if (x86_pmu.version > 1)
flip_smm_bit(&x86_pmu.attr_freeze_on_smi);
- if (x86_pmu.counter_freezing)
- enable_counter_freeze();
-
/* Disable perf metrics if any added CPU doesn't support it. */
if (x86_pmu.intel_cap.perf_metrics) {
union perf_capabilities perf_cap;
@@ -4167,9 +4286,6 @@ static void free_excl_cntrs(struct cpu_hw_events *cpuc)
static void intel_pmu_cpu_dying(int cpu)
{
fini_debug_store_on_cpu(cpu);
-
- if (x86_pmu.counter_freezing)
- disable_counter_freeze();
}
void intel_cpuc_finish(struct cpu_hw_events *cpuc)
@@ -4397,6 +4513,9 @@ static const struct x86_cpu_desc isolation_ucodes[] = {
INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_X, 2, 0x0b000014),
INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 3, 0x00000021),
INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 4, 0x00000000),
+ INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 5, 0x00000000),
+ INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 6, 0x00000000),
+ INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 7, 0x00000000),
INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_L, 3, 0x0000007c),
INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE, 3, 0x0000007c),
INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 9, 0x0000004e),
@@ -4561,39 +4680,6 @@ static __init void intel_nehalem_quirk(void)
}
}
-static const struct x86_cpu_desc counter_freezing_ucodes[] = {
- INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT, 2, 0x0000000e),
- INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT, 9, 0x0000002e),
- INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT, 10, 0x00000008),
- INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT_D, 1, 0x00000028),
- INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT_PLUS, 1, 0x00000028),
- INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT_PLUS, 8, 0x00000006),
- {}
-};
-
-static bool intel_counter_freezing_broken(void)
-{
- return !x86_cpu_has_min_microcode_rev(counter_freezing_ucodes);
-}
-
-static __init void intel_counter_freezing_quirk(void)
-{
- /* Check if it's already disabled */
- if (disable_counter_freezing)
- return;
-
- /*
- * If the system starts with the wrong ucode, leave the
- * counter-freezing feature permanently disabled.
- */
- if (intel_counter_freezing_broken()) {
- pr_info("PMU counter freezing disabled due to CPU errata,"
- "please upgrade microcode\n");
- x86_pmu.counter_freezing = false;
- x86_pmu.handle_irq = intel_pmu_handle_irq;
- }
-}
-
/*
* enable software workaround for errata:
* SNB: BJ122
@@ -4703,6 +4789,42 @@ static struct attribute *icl_tsx_events_attrs[] = {
NULL,
};
+
+EVENT_ATTR_STR(mem-stores, mem_st_spr, "event=0xcd,umask=0x2");
+EVENT_ATTR_STR(mem-loads-aux, mem_ld_aux, "event=0x03,umask=0x82");
+
+static struct attribute *spr_events_attrs[] = {
+ EVENT_PTR(mem_ld_hsw),
+ EVENT_PTR(mem_st_spr),
+ EVENT_PTR(mem_ld_aux),
+ NULL,
+};
+
+static struct attribute *spr_td_events_attrs[] = {
+ EVENT_PTR(slots),
+ EVENT_PTR(td_retiring),
+ EVENT_PTR(td_bad_spec),
+ EVENT_PTR(td_fe_bound),
+ EVENT_PTR(td_be_bound),
+ EVENT_PTR(td_heavy_ops),
+ EVENT_PTR(td_br_mispredict),
+ EVENT_PTR(td_fetch_lat),
+ EVENT_PTR(td_mem_bound),
+ NULL,
+};
+
+static struct attribute *spr_tsx_events_attrs[] = {
+ EVENT_PTR(tx_start),
+ EVENT_PTR(tx_abort),
+ EVENT_PTR(tx_commit),
+ EVENT_PTR(tx_capacity_read),
+ EVENT_PTR(tx_capacity_write),
+ EVENT_PTR(tx_conflict),
+ EVENT_PTR(cycles_t),
+ EVENT_PTR(cycles_ct),
+ NULL,
+};
+
static ssize_t freeze_on_smi_show(struct device *cdev,
struct device_attribute *attr,
char *buf)
@@ -4926,7 +5048,7 @@ __init int intel_pmu_init(void)
union cpuid10_eax eax;
union cpuid10_ebx ebx;
struct event_constraint *c;
- unsigned int unused;
+ unsigned int fixed_mask;
struct extra_reg *er;
bool pmem = false;
int version, i;
@@ -4948,7 +5070,7 @@ __init int intel_pmu_init(void)
* Check whether the Architectural PerfMon supports
* Branch Misses Retired hw_event or not.
*/
- cpuid(10, &eax.full, &ebx.full, &unused, &edx.full);
+ cpuid(10, &eax.full, &ebx.full, &fixed_mask, &edx.full);
if (eax.split.mask_length < ARCH_PERFMON_EVENTS_COUNT)
return -ENODEV;
@@ -4972,15 +5094,15 @@ __init int intel_pmu_init(void)
* Quirk: v2 perfmon does not report fixed-purpose events, so
* assume at least 3 events, when not running in a hypervisor:
*/
- if (version > 1) {
+ if (version > 1 && version < 5) {
int assume = 3 * !boot_cpu_has(X86_FEATURE_HYPERVISOR);
x86_pmu.num_counters_fixed =
max((int)edx.split.num_counters_fixed, assume);
- }
- if (version >= 4)
- x86_pmu.counter_freezing = !disable_counter_freezing;
+ fixed_mask = (1L << x86_pmu.num_counters_fixed) - 1;
+ } else if (version >= 5)
+ x86_pmu.num_counters_fixed = fls(fixed_mask);
if (boot_cpu_has(X86_FEATURE_PDCM)) {
u64 capabilities;
@@ -5109,7 +5231,6 @@ __init int intel_pmu_init(void)
case INTEL_FAM6_ATOM_GOLDMONT:
case INTEL_FAM6_ATOM_GOLDMONT_D:
- x86_add_quirk(intel_counter_freezing_quirk);
memcpy(hw_cache_event_ids, glm_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, glm_hw_cache_extra_regs,
@@ -5136,7 +5257,6 @@ __init int intel_pmu_init(void)
break;
case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
- x86_add_quirk(intel_counter_freezing_quirk);
memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, glp_hw_cache_extra_regs,
@@ -5483,12 +5603,50 @@ __init int intel_pmu_init(void)
x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xc9, .umask=0x04);
x86_pmu.lbr_pt_coexist = true;
intel_pmu_pebs_data_source_skl(pmem);
+ x86_pmu.num_topdown_events = 4;
x86_pmu.update_topdown_event = icl_update_topdown_event;
x86_pmu.set_topdown_event_period = icl_set_topdown_event_period;
pr_cont("Icelake events, ");
name = "icelake";
break;
+ case INTEL_FAM6_SAPPHIRERAPIDS_X:
+ pmem = true;
+ x86_pmu.late_ack = true;
+ memcpy(hw_cache_event_ids, spr_hw_cache_event_ids, sizeof(hw_cache_event_ids));
+ memcpy(hw_cache_extra_regs, spr_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
+
+ x86_pmu.event_constraints = intel_spr_event_constraints;
+ x86_pmu.pebs_constraints = intel_spr_pebs_event_constraints;
+ x86_pmu.extra_regs = intel_spr_extra_regs;
+ x86_pmu.limit_period = spr_limit_period;
+ x86_pmu.pebs_aliases = NULL;
+ x86_pmu.pebs_prec_dist = true;
+ x86_pmu.pebs_block = true;
+ x86_pmu.flags |= PMU_FL_HAS_RSP_1;
+ x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
+ x86_pmu.flags |= PMU_FL_PEBS_ALL;
+ x86_pmu.flags |= PMU_FL_INSTR_LATENCY;
+ x86_pmu.flags |= PMU_FL_MEM_LOADS_AUX;
+
+ x86_pmu.hw_config = hsw_hw_config;
+ x86_pmu.get_event_constraints = spr_get_event_constraints;
+ extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
+ hsw_format_attr : nhm_format_attr;
+ extra_skl_attr = skl_format_attr;
+ mem_attr = spr_events_attrs;
+ td_attr = spr_td_events_attrs;
+ tsx_attr = spr_tsx_events_attrs;
+ x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xc9, .umask=0x04);
+ x86_pmu.lbr_pt_coexist = true;
+ intel_pmu_pebs_data_source_skl(pmem);
+ x86_pmu.num_topdown_events = 8;
+ x86_pmu.update_topdown_event = icl_update_topdown_event;
+ x86_pmu.set_topdown_event_period = icl_set_topdown_event_period;
+ pr_cont("Sapphire Rapids events, ");
+ name = "sapphire_rapids";
+ break;
+
default:
switch (x86_pmu.version) {
case 1:
@@ -5531,8 +5689,7 @@ __init int intel_pmu_init(void)
x86_pmu.num_counters_fixed = INTEL_PMC_MAX_FIXED;
}
- x86_pmu.intel_ctrl |=
- ((1LL << x86_pmu.num_counters_fixed)-1) << INTEL_PMC_IDX_FIXED;
+ x86_pmu.intel_ctrl |= (u64)fixed_mask << INTEL_PMC_IDX_FIXED;
/* AnyThread may be deprecated on arch perfmon v5 or later */
if (x86_pmu.intel_cap.anythread_deprecated)
@@ -5549,13 +5706,22 @@ __init int intel_pmu_init(void)
* events to the generic counters.
*/
if (c->idxmsk64 & INTEL_PMC_MSK_TOPDOWN) {
+ /*
+ * Disable topdown slots and metrics events,
+ * if slots event is not in CPUID.
+ */
+ if (!(INTEL_PMC_MSK_FIXED_SLOTS & x86_pmu.intel_ctrl))
+ c->idxmsk64 = 0;
c->weight = hweight64(c->idxmsk64);
continue;
}
- if (c->cmask == FIXED_EVENT_FLAGS
- && c->idxmsk64 != INTEL_PMC_MSK_FIXED_REF_CYCLES) {
- c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
+ if (c->cmask == FIXED_EVENT_FLAGS) {
+ /* Disabled fixed counters which are not in CPUID */
+ c->idxmsk64 &= x86_pmu.intel_ctrl;
+
+ if (c->idxmsk64 != INTEL_PMC_MSK_FIXED_REF_CYCLES)
+ c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
}
c->idxmsk64 &=
~(~0ULL << (INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed));
@@ -5601,13 +5767,6 @@ __init int intel_pmu_init(void)
pr_cont("full-width counters, ");
}
- /*
- * For arch perfmon 4 use counter freezing to avoid
- * several MSR accesses in the PMI.
- */
- if (x86_pmu.counter_freezing)
- x86_pmu.handle_irq = intel_pmu_handle_irq_v4;
-
if (x86_pmu.intel_cap.perf_metrics)
x86_pmu.intel_ctrl |= 1ULL << GLOBAL_CTRL_EN_PERF_METRICS;
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index 67dbc91bccfe..7ebae1826403 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -36,7 +36,9 @@ union intel_x86_pebs_dse {
unsigned int ld_dse:4;
unsigned int ld_stlb_miss:1;
unsigned int ld_locked:1;
- unsigned int ld_reserved:26;
+ unsigned int ld_data_blk:1;
+ unsigned int ld_addr_blk:1;
+ unsigned int ld_reserved:24;
};
struct {
unsigned int st_l1d_hit:1;
@@ -45,6 +47,12 @@ union intel_x86_pebs_dse {
unsigned int st_locked:1;
unsigned int st_reserved2:26;
};
+ struct {
+ unsigned int st_lat_dse:4;
+ unsigned int st_lat_stlb_miss:1;
+ unsigned int st_lat_locked:1;
+ unsigned int ld_reserved3:26;
+ };
};
@@ -198,6 +206,63 @@ static u64 load_latency_data(u64 status)
if (dse.ld_locked)
val |= P(LOCK, LOCKED);
+ /*
+ * Ice Lake and earlier models do not support block infos.
+ */
+ if (!x86_pmu.pebs_block) {
+ val |= P(BLK, NA);
+ return val;
+ }
+ /*
+ * bit 6: load was blocked since its data could not be forwarded
+ * from a preceding store
+ */
+ if (dse.ld_data_blk)
+ val |= P(BLK, DATA);
+
+ /*
+ * bit 7: load was blocked due to potential address conflict with
+ * a preceding store
+ */
+ if (dse.ld_addr_blk)
+ val |= P(BLK, ADDR);
+
+ if (!dse.ld_data_blk && !dse.ld_addr_blk)
+ val |= P(BLK, NA);
+
+ return val;
+}
+
+static u64 store_latency_data(u64 status)
+{
+ union intel_x86_pebs_dse dse;
+ u64 val;
+
+ dse.val = status;
+
+ /*
+ * use the mapping table for bit 0-3
+ */
+ val = pebs_data_source[dse.st_lat_dse];
+
+ /*
+ * bit 4: TLB access
+ * 0 = did not miss 2nd level TLB
+ * 1 = missed 2nd level TLB
+ */
+ if (dse.st_lat_stlb_miss)
+ val |= P(TLB, MISS) | P(TLB, L2);
+ else
+ val |= P(TLB, HIT) | P(TLB, L1) | P(TLB, L2);
+
+ /*
+ * bit 5: locked prefix
+ */
+ if (dse.st_lat_locked)
+ val |= P(LOCK, LOCKED);
+
+ val |= P(BLK, NA);
+
return val;
}
@@ -870,6 +935,28 @@ struct event_constraint intel_icl_pebs_event_constraints[] = {
EVENT_CONSTRAINT_END
};
+struct event_constraint intel_spr_pebs_event_constraints[] = {
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x1c0, 0x100000000ULL),
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x0400, 0x800000000ULL),
+
+ INTEL_FLAGS_EVENT_CONSTRAINT(0xc0, 0xfe),
+ INTEL_PLD_CONSTRAINT(0x1cd, 0xfe),
+ INTEL_PSD_CONSTRAINT(0x2cd, 0x1),
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x1d0, 0xf),
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x2d0, 0xf),
+
+ INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(0xd1, 0xd4, 0xf),
+
+ INTEL_FLAGS_EVENT_CONSTRAINT(0xd0, 0xf),
+
+ /*
+ * Everything else is handled by PMU_FL_PEBS_ALL, because we
+ * need the full constraints from the main table.
+ */
+
+ EVENT_CONSTRAINT_END
+};
+
struct event_constraint *intel_pebs_constraints(struct perf_event *event)
{
struct event_constraint *c;
@@ -960,7 +1047,8 @@ static void adaptive_pebs_record_size_update(void)
}
#define PERF_PEBS_MEMINFO_TYPE (PERF_SAMPLE_ADDR | PERF_SAMPLE_DATA_SRC | \
- PERF_SAMPLE_PHYS_ADDR | PERF_SAMPLE_WEIGHT | \
+ PERF_SAMPLE_PHYS_ADDR | \
+ PERF_SAMPLE_WEIGHT_TYPE | \
PERF_SAMPLE_TRANSACTION | \
PERF_SAMPLE_DATA_PAGE_SIZE)
@@ -987,7 +1075,7 @@ static u64 pebs_update_adaptive_cfg(struct perf_event *event)
gprs = (sample_type & PERF_SAMPLE_REGS_INTR) &&
(attr->sample_regs_intr & PEBS_GP_REGS);
- tsx_weight = (sample_type & PERF_SAMPLE_WEIGHT) &&
+ tsx_weight = (sample_type & PERF_SAMPLE_WEIGHT_TYPE) &&
((attr->config & INTEL_ARCH_EVENT_MASK) ==
x86_pmu.rtm_abort_event);
@@ -1331,6 +1419,8 @@ static u64 get_data_src(struct perf_event *event, u64 aux)
if (fl & PERF_X86_EVENT_PEBS_LDLAT)
val = load_latency_data(aux);
+ else if (fl & PERF_X86_EVENT_PEBS_STLAT)
+ val = store_latency_data(aux);
else if (fst && (fl & PERF_X86_EVENT_PEBS_HSW_PREC))
val = precise_datala_hsw(event, aux);
else if (fst)
@@ -1369,8 +1459,8 @@ static void setup_pebs_fixed_sample_data(struct perf_event *event,
/*
* Use latency for weight (only avail with PEBS-LL)
*/
- if (fll && (sample_type & PERF_SAMPLE_WEIGHT))
- data->weight = pebs->lat;
+ if (fll && (sample_type & PERF_SAMPLE_WEIGHT_TYPE))
+ data->weight.full = pebs->lat;
/*
* data.data_src encodes the data source
@@ -1462,8 +1552,8 @@ static void setup_pebs_fixed_sample_data(struct perf_event *event,
if (x86_pmu.intel_cap.pebs_format >= 2) {
/* Only set the TSX weight when no memory weight. */
- if ((sample_type & PERF_SAMPLE_WEIGHT) && !fll)
- data->weight = intel_get_tsx_weight(pebs->tsx_tuning);
+ if ((sample_type & PERF_SAMPLE_WEIGHT_TYPE) && !fll)
+ data->weight.full = intel_get_tsx_weight(pebs->tsx_tuning);
if (sample_type & PERF_SAMPLE_TRANSACTION)
data->txn = intel_get_tsx_transaction(pebs->tsx_tuning,
@@ -1507,6 +1597,9 @@ static void adaptive_pebs_save_regs(struct pt_regs *regs,
#endif
}
+#define PEBS_LATENCY_MASK 0xffff
+#define PEBS_CACHE_LATENCY_OFFSET 32
+
/*
* With adaptive PEBS the layout depends on what fields are configured.
*/
@@ -1577,9 +1670,27 @@ static void setup_pebs_adaptive_sample_data(struct perf_event *event,
}
if (format_size & PEBS_DATACFG_MEMINFO) {
- if (sample_type & PERF_SAMPLE_WEIGHT)
- data->weight = meminfo->latency ?:
- intel_get_tsx_weight(meminfo->tsx_tuning);
+ if (sample_type & PERF_SAMPLE_WEIGHT_TYPE) {
+ u64 weight = meminfo->latency;
+
+ if (x86_pmu.flags & PMU_FL_INSTR_LATENCY) {
+ data->weight.var2_w = weight & PEBS_LATENCY_MASK;
+ weight >>= PEBS_CACHE_LATENCY_OFFSET;
+ }
+
+ /*
+ * Although meminfo::latency is defined as a u64,
+ * only the lower 32 bits include the valid data
+ * in practice on Ice Lake and earlier platforms.
+ */
+ if (sample_type & PERF_SAMPLE_WEIGHT) {
+ data->weight.full = weight ?:
+ intel_get_tsx_weight(meminfo->tsx_tuning);
+ } else {
+ data->weight.var1_dw = (u32)(weight & PEBS_LATENCY_MASK) ?:
+ intel_get_tsx_weight(meminfo->tsx_tuning);
+ }
+ }
if (sample_type & PERF_SAMPLE_DATA_SRC)
data->data_src.val = get_data_src(event, meminfo->aux);
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
index 357258f82dc8..33c8180d5a87 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -31,21 +31,21 @@ struct event_constraint uncore_constraint_empty =
MODULE_LICENSE("GPL");
-int uncore_pcibus_to_physid(struct pci_bus *bus)
+int uncore_pcibus_to_dieid(struct pci_bus *bus)
{
struct pci2phy_map *map;
- int phys_id = -1;
+ int die_id = -1;
raw_spin_lock(&pci2phy_map_lock);
list_for_each_entry(map, &pci2phy_map_head, list) {
if (map->segment == pci_domain_nr(bus)) {
- phys_id = map->pbus_to_physid[bus->number];
+ die_id = map->pbus_to_dieid[bus->number];
break;
}
}
raw_spin_unlock(&pci2phy_map_lock);
- return phys_id;
+ return die_id;
}
static void uncore_free_pcibus_map(void)
@@ -86,7 +86,7 @@ lookup:
alloc = NULL;
map->segment = segment;
for (i = 0; i < 256; i++)
- map->pbus_to_physid[i] = -1;
+ map->pbus_to_dieid[i] = -1;
list_add_tail(&map->list, &pci2phy_map_head);
end:
@@ -332,7 +332,6 @@ static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type,
uncore_pmu_init_hrtimer(box);
box->cpu = -1;
- box->pci_phys_id = -1;
box->dieid = -1;
/* set default hrtimer timeout */
@@ -993,18 +992,11 @@ uncore_types_init(struct intel_uncore_type **types, bool setid)
/*
* Get the die information of a PCI device.
* @pdev: The PCI device.
- * @phys_id: The physical socket id which the device maps to.
* @die: The die id which the device maps to.
*/
-static int uncore_pci_get_dev_die_info(struct pci_dev *pdev,
- int *phys_id, int *die)
+static int uncore_pci_get_dev_die_info(struct pci_dev *pdev, int *die)
{
- *phys_id = uncore_pcibus_to_physid(pdev->bus);
- if (*phys_id < 0)
- return -ENODEV;
-
- *die = (topology_max_die_per_package() > 1) ? *phys_id :
- topology_phys_to_logical_pkg(*phys_id);
+ *die = uncore_pcibus_to_dieid(pdev->bus);
if (*die < 0)
return -EINVAL;
@@ -1046,13 +1038,12 @@ uncore_pci_find_dev_pmu(struct pci_dev *pdev, const struct pci_device_id *ids)
* @pdev: The PCI device.
* @type: The corresponding PMU type of the device.
* @pmu: The corresponding PMU of the device.
- * @phys_id: The physical socket id which the device maps to.
* @die: The die id which the device maps to.
*/
static int uncore_pci_pmu_register(struct pci_dev *pdev,
struct intel_uncore_type *type,
struct intel_uncore_pmu *pmu,
- int phys_id, int die)
+ int die)
{
struct intel_uncore_box *box;
int ret;
@@ -1070,7 +1061,6 @@ static int uncore_pci_pmu_register(struct pci_dev *pdev,
WARN_ON_ONCE(pmu->func_id != pdev->devfn);
atomic_inc(&box->refcnt);
- box->pci_phys_id = phys_id;
box->dieid = die;
box->pci_dev = pdev;
box->pmu = pmu;
@@ -1097,9 +1087,9 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id
{
struct intel_uncore_type *type;
struct intel_uncore_pmu *pmu = NULL;
- int phys_id, die, ret;
+ int die, ret;
- ret = uncore_pci_get_dev_die_info(pdev, &phys_id, &die);
+ ret = uncore_pci_get_dev_die_info(pdev, &die);
if (ret)
return ret;
@@ -1132,7 +1122,7 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id
pmu = &type->pmus[UNCORE_PCI_DEV_IDX(id->driver_data)];
}
- ret = uncore_pci_pmu_register(pdev, type, pmu, phys_id, die);
+ ret = uncore_pci_pmu_register(pdev, type, pmu, die);
pci_set_drvdata(pdev, pmu->boxes[die]);
@@ -1142,17 +1132,12 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id
/*
* Unregister the PMU of a PCI device
* @pmu: The corresponding PMU is unregistered.
- * @phys_id: The physical socket id which the device maps to.
* @die: The die id which the device maps to.
*/
-static void uncore_pci_pmu_unregister(struct intel_uncore_pmu *pmu,
- int phys_id, int die)
+static void uncore_pci_pmu_unregister(struct intel_uncore_pmu *pmu, int die)
{
struct intel_uncore_box *box = pmu->boxes[die];
- if (WARN_ON_ONCE(phys_id != box->pci_phys_id))
- return;
-
pmu->boxes[die] = NULL;
if (atomic_dec_return(&pmu->activeboxes) == 0)
uncore_pmu_unregister(pmu);
@@ -1164,9 +1149,9 @@ static void uncore_pci_remove(struct pci_dev *pdev)
{
struct intel_uncore_box *box;
struct intel_uncore_pmu *pmu;
- int i, phys_id, die;
+ int i, die;
- if (uncore_pci_get_dev_die_info(pdev, &phys_id, &die))
+ if (uncore_pci_get_dev_die_info(pdev, &die))
return;
box = pci_get_drvdata(pdev);
@@ -1185,7 +1170,7 @@ static void uncore_pci_remove(struct pci_dev *pdev)
pci_set_drvdata(pdev, NULL);
- uncore_pci_pmu_unregister(pmu, phys_id, die);
+ uncore_pci_pmu_unregister(pmu, die);
}
static int uncore_bus_notify(struct notifier_block *nb,
@@ -1194,7 +1179,7 @@ static int uncore_bus_notify(struct notifier_block *nb,
struct device *dev = data;
struct pci_dev *pdev = to_pci_dev(dev);
struct intel_uncore_pmu *pmu;
- int phys_id, die;
+ int die;
/* Unregister the PMU when the device is going to be deleted. */
if (action != BUS_NOTIFY_DEL_DEVICE)
@@ -1204,10 +1189,10 @@ static int uncore_bus_notify(struct notifier_block *nb,
if (!pmu)
return NOTIFY_DONE;
- if (uncore_pci_get_dev_die_info(pdev, &phys_id, &die))
+ if (uncore_pci_get_dev_die_info(pdev, &die))
return NOTIFY_DONE;
- uncore_pci_pmu_unregister(pmu, phys_id, die);
+ uncore_pci_pmu_unregister(pmu, die);
return NOTIFY_OK;
}
@@ -1224,7 +1209,7 @@ static void uncore_pci_sub_driver_init(void)
struct pci_dev *pci_sub_dev;
bool notify = false;
unsigned int devfn;
- int phys_id, die;
+ int die;
while (ids && ids->vendor) {
pci_sub_dev = NULL;
@@ -1244,12 +1229,11 @@ static void uncore_pci_sub_driver_init(void)
if (!pmu)
continue;
- if (uncore_pci_get_dev_die_info(pci_sub_dev,
- &phys_id, &die))
+ if (uncore_pci_get_dev_die_info(pci_sub_dev, &die))
continue;
if (!uncore_pci_pmu_register(pci_sub_dev, type, pmu,
- phys_id, die))
+ die))
notify = true;
}
ids++;
diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h
index 9efea154349d..a3c6e1643ad2 100644
--- a/arch/x86/events/intel/uncore.h
+++ b/arch/x86/events/intel/uncore.h
@@ -124,7 +124,6 @@ struct intel_uncore_extra_reg {
};
struct intel_uncore_box {
- int pci_phys_id;
int dieid; /* Logical die ID */
int n_active; /* number of active events */
int n_events;
@@ -173,11 +172,11 @@ struct freerunning_counters {
struct pci2phy_map {
struct list_head list;
int segment;
- int pbus_to_physid[256];
+ int pbus_to_dieid[256];
};
struct pci2phy_map *__find_pci2phy_map(int segment);
-int uncore_pcibus_to_physid(struct pci_bus *bus);
+int uncore_pcibus_to_dieid(struct pci_bus *bus);
ssize_t uncore_event_show(struct device *dev,
struct device_attribute *attr, char *buf);
diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c
index 098f893e2e22..51271288499e 100644
--- a/arch/x86/events/intel/uncore_snb.c
+++ b/arch/x86/events/intel/uncore_snb.c
@@ -657,7 +657,7 @@ int snb_pci2phy_map_init(int devid)
pci_dev_put(dev);
return -ENOMEM;
}
- map->pbus_to_physid[bus] = 0;
+ map->pbus_to_dieid[bus] = 0;
raw_spin_unlock(&pci2phy_map_lock);
pci_dev_put(dev);
diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
index 7bdb1821215d..b79951d0707c 100644
--- a/arch/x86/events/intel/uncore_snbep.c
+++ b/arch/x86/events/intel/uncore_snbep.c
@@ -1359,7 +1359,7 @@ static struct pci_driver snbep_uncore_pci_driver = {
static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool reverse)
{
struct pci_dev *ubox_dev = NULL;
- int i, bus, nodeid, segment;
+ int i, bus, nodeid, segment, die_id;
struct pci2phy_map *map;
int err = 0;
u32 config = 0;
@@ -1370,36 +1370,77 @@ static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool
if (!ubox_dev)
break;
bus = ubox_dev->bus->number;
- /* get the Node ID of the local register */
- err = pci_read_config_dword(ubox_dev, nodeid_loc, &config);
- if (err)
- break;
- nodeid = config & NODE_ID_MASK;
- /* get the Node ID mapping */
- err = pci_read_config_dword(ubox_dev, idmap_loc, &config);
- if (err)
- break;
+ /*
+ * The nodeid and idmap registers only contain enough
+ * information to handle 8 nodes. On systems with more
+ * than 8 nodes, we need to rely on NUMA information,
+ * filled in from BIOS supplied information, to determine
+ * the topology.
+ */
+ if (nr_node_ids <= 8) {
+ /* get the Node ID of the local register */
+ err = pci_read_config_dword(ubox_dev, nodeid_loc, &config);
+ if (err)
+ break;
+ nodeid = config & NODE_ID_MASK;
+ /* get the Node ID mapping */
+ err = pci_read_config_dword(ubox_dev, idmap_loc, &config);
+ if (err)
+ break;
- segment = pci_domain_nr(ubox_dev->bus);
- raw_spin_lock(&pci2phy_map_lock);
- map = __find_pci2phy_map(segment);
- if (!map) {
+ segment = pci_domain_nr(ubox_dev->bus);
+ raw_spin_lock(&pci2phy_map_lock);
+ map = __find_pci2phy_map(segment);
+ if (!map) {
+ raw_spin_unlock(&pci2phy_map_lock);
+ err = -ENOMEM;
+ break;
+ }
+
+ /*
+ * every three bits in the Node ID mapping register maps
+ * to a particular node.
+ */
+ for (i = 0; i < 8; i++) {
+ if (nodeid == ((config >> (3 * i)) & 0x7)) {
+ if (topology_max_die_per_package() > 1)
+ die_id = i;
+ else
+ die_id = topology_phys_to_logical_pkg(i);
+ map->pbus_to_dieid[bus] = die_id;
+ break;
+ }
+ }
raw_spin_unlock(&pci2phy_map_lock);
- err = -ENOMEM;
- break;
- }
+ } else {
+ int node = pcibus_to_node(ubox_dev->bus);
+ int cpu;
+
+ segment = pci_domain_nr(ubox_dev->bus);
+ raw_spin_lock(&pci2phy_map_lock);
+ map = __find_pci2phy_map(segment);
+ if (!map) {
+ raw_spin_unlock(&pci2phy_map_lock);
+ err = -ENOMEM;
+ break;
+ }
- /*
- * every three bits in the Node ID mapping register maps
- * to a particular node.
- */
- for (i = 0; i < 8; i++) {
- if (nodeid == ((config >> (3 * i)) & 0x7)) {
- map->pbus_to_physid[bus] = i;
+ die_id = -1;
+ for_each_cpu(cpu, cpumask_of_pcibus(ubox_dev->bus)) {
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
+
+ if (c->initialized && cpu_to_node(cpu) == node) {
+ map->pbus_to_dieid[bus] = die_id = c->logical_die_id;
+ break;
+ }
+ }
+ raw_spin_unlock(&pci2phy_map_lock);
+
+ if (WARN_ON_ONCE(die_id == -1)) {
+ err = -EINVAL;
break;
}
}
- raw_spin_unlock(&pci2phy_map_lock);
}
if (!err) {
@@ -1412,17 +1453,17 @@ static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool
i = -1;
if (reverse) {
for (bus = 255; bus >= 0; bus--) {
- if (map->pbus_to_physid[bus] >= 0)
- i = map->pbus_to_physid[bus];
+ if (map->pbus_to_dieid[bus] >= 0)
+ i = map->pbus_to_dieid[bus];
else
- map->pbus_to_physid[bus] = i;
+ map->pbus_to_dieid[bus] = i;
}
} else {
for (bus = 0; bus <= 255; bus++) {
- if (map->pbus_to_physid[bus] >= 0)
- i = map->pbus_to_physid[bus];
+ if (map->pbus_to_dieid[bus] >= 0)
+ i = map->pbus_to_dieid[bus];
else
- map->pbus_to_physid[bus] = i;
+ map->pbus_to_dieid[bus] = i;
}
}
}
@@ -4646,19 +4687,14 @@ int snr_uncore_pci_init(void)
static struct pci_dev *snr_uncore_get_mc_dev(int id)
{
struct pci_dev *mc_dev = NULL;
- int phys_id, pkg;
+ int pkg;
while (1) {
mc_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3451, mc_dev);
if (!mc_dev)
break;
- phys_id = uncore_pcibus_to_physid(mc_dev->bus);
- if (phys_id < 0)
- continue;
- pkg = topology_phys_to_logical_pkg(phys_id);
- if (pkg < 0)
- continue;
- else if (pkg == id)
+ pkg = uncore_pcibus_to_dieid(mc_dev->bus);
+ if (pkg == id)
break;
}
return mc_dev;
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index 7895cf4c59a7..53b2b5fc23bc 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -80,6 +80,7 @@ static inline bool constraint_match(struct event_constraint *c, u64 ecode)
#define PERF_X86_EVENT_PAIR 0x1000 /* Large Increment per Cycle */
#define PERF_X86_EVENT_LBR_SELECT 0x2000 /* Save/Restore MSR_LBR_SELECT */
#define PERF_X86_EVENT_TOPDOWN 0x4000 /* Count Topdown slots/metrics events */
+#define PERF_X86_EVENT_PEBS_STLAT 0x8000 /* st+stlat data address sampling */
static inline bool is_topdown_count(struct perf_event *event)
{
@@ -443,6 +444,10 @@ struct cpu_hw_events {
__EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LDLAT)
+#define INTEL_PSD_CONSTRAINT(c, n) \
+ __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
+ HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_STLAT)
+
#define INTEL_PST_CONSTRAINT(c, n) \
__EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST)
@@ -682,8 +687,7 @@ struct x86_pmu {
/* PMI handler bits */
unsigned int late_ack :1,
- enabled_ack :1,
- counter_freezing :1;
+ enabled_ack :1;
/*
* sysfs attrs
*/
@@ -724,7 +728,8 @@ struct x86_pmu {
pebs_broken :1,
pebs_prec_dist :1,
pebs_no_tlb :1,
- pebs_no_isolation :1;
+ pebs_no_isolation :1,
+ pebs_block :1;
int pebs_record_size;
int pebs_buffer_size;
int max_pebs_events;
@@ -776,6 +781,7 @@ struct x86_pmu {
/*
* Intel perf metrics
*/
+ int num_topdown_events;
u64 (*update_topdown_event)(struct perf_event *event);
int (*set_topdown_event_period)(struct perf_event *event);
@@ -871,6 +877,8 @@ do { \
#define PMU_FL_PEBS_ALL 0x10 /* all events are valid PEBS events */
#define PMU_FL_TFA 0x20 /* deal with TSX force abort */
#define PMU_FL_PAIR 0x40 /* merge counters for large incr. events */
+#define PMU_FL_INSTR_LATENCY 0x80 /* Support Instruction Latency in PEBS Memory Info Record */
+#define PMU_FL_MEM_LOADS_AUX 0x100 /* Require an auxiliary event for the complete memory info */
#define EVENT_VAR(_id) event_attr_##_id
#define EVENT_PTR(_id) &event_attr_##_id.attr.attr
@@ -1060,6 +1068,11 @@ ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr,
ssize_t events_ht_sysfs_show(struct device *dev, struct device_attribute *attr,
char *page);
+static inline bool fixed_counter_disabled(int i)
+{
+ return !(x86_pmu.intel_ctrl >> (i + INTEL_PMC_IDX_FIXED));
+}
+
#ifdef CONFIG_CPU_SUP_AMD
int amd_pmu_init(void);
@@ -1157,6 +1170,8 @@ extern struct event_constraint intel_skl_pebs_event_constraints[];
extern struct event_constraint intel_icl_pebs_event_constraints[];
+extern struct event_constraint intel_spr_pebs_event_constraints[];
+
struct event_constraint *intel_pebs_constraints(struct perf_event *event);
void intel_pmu_pebs_add(struct perf_event *event);
diff --git a/arch/x86/events/probe.c b/arch/x86/events/probe.c
index 136a1e847254..600bf8d15c0c 100644
--- a/arch/x86/events/probe.c
+++ b/arch/x86/events/probe.c
@@ -28,6 +28,7 @@ perf_msr_probe(struct perf_msr *msr, int cnt, bool zero, void *data)
for (bit = 0; bit < cnt; bit++) {
if (!msr[bit].no_check) {
struct attribute_group *grp = msr[bit].grp;
+ u64 mask;
/* skip entry with no group */
if (!grp)
@@ -44,8 +45,12 @@ perf_msr_probe(struct perf_msr *msr, int cnt, bool zero, void *data)
/* Virt sucks; you cannot tell if a R/O MSR is present :/ */
if (rdmsrl_safe(msr[bit].msr, &val))
continue;
+
+ mask = msr[bit].mask;
+ if (!mask)
+ mask = ~0ULL;
/* Disable zero counters if requested. */
- if (!zero && !val)
+ if (!zero && !(val & mask))
continue;
grp->is_visible = NULL;
diff --git a/arch/x86/events/probe.h b/arch/x86/events/probe.h
index 4c8e0afc5fb5..261b9bda24e3 100644
--- a/arch/x86/events/probe.h
+++ b/arch/x86/events/probe.h
@@ -4,10 +4,11 @@
#include <linux/sysfs.h>
struct perf_msr {
- u64 msr;
- struct attribute_group *grp;
+ u64 msr;
+ struct attribute_group *grp;
bool (*test)(int idx, void *data);
- bool no_check;
+ bool no_check;
+ u64 mask;
};
unsigned long
diff --git a/arch/x86/events/rapl.c b/arch/x86/events/rapl.c
index 7dbbeaacd995..f42a70496a24 100644
--- a/arch/x86/events/rapl.c
+++ b/arch/x86/events/rapl.c
@@ -454,16 +454,9 @@ static struct attribute *rapl_events_cores[] = {
NULL,
};
-static umode_t
-rapl_not_visible(struct kobject *kobj, struct attribute *attr, int i)
-{
- return 0;
-}
-
static struct attribute_group rapl_events_cores_group = {
.name = "events",
.attrs = rapl_events_cores,
- .is_visible = rapl_not_visible,
};
static struct attribute *rapl_events_pkg[] = {
@@ -476,7 +469,6 @@ static struct attribute *rapl_events_pkg[] = {
static struct attribute_group rapl_events_pkg_group = {
.name = "events",
.attrs = rapl_events_pkg,
- .is_visible = rapl_not_visible,
};
static struct attribute *rapl_events_ram[] = {
@@ -489,7 +481,6 @@ static struct attribute *rapl_events_ram[] = {
static struct attribute_group rapl_events_ram_group = {
.name = "events",
.attrs = rapl_events_ram,
- .is_visible = rapl_not_visible,
};
static struct attribute *rapl_events_gpu[] = {
@@ -502,7 +493,6 @@ static struct attribute *rapl_events_gpu[] = {
static struct attribute_group rapl_events_gpu_group = {
.name = "events",
.attrs = rapl_events_gpu,
- .is_visible = rapl_not_visible,
};
static struct attribute *rapl_events_psys[] = {
@@ -515,7 +505,6 @@ static struct attribute *rapl_events_psys[] = {
static struct attribute_group rapl_events_psys_group = {
.name = "events",
.attrs = rapl_events_psys,
- .is_visible = rapl_not_visible,
};
static bool test_msr(int idx, void *data)
@@ -523,12 +512,23 @@ static bool test_msr(int idx, void *data)
return test_bit(idx, (unsigned long *) data);
}
+/* Only lower 32bits of the MSR represents the energy counter */
+#define RAPL_MSR_MASK 0xFFFFFFFF
+
static struct perf_msr intel_rapl_msrs[] = {
- [PERF_RAPL_PP0] = { MSR_PP0_ENERGY_STATUS, &rapl_events_cores_group, test_msr },
- [PERF_RAPL_PKG] = { MSR_PKG_ENERGY_STATUS, &rapl_events_pkg_group, test_msr },
- [PERF_RAPL_RAM] = { MSR_DRAM_ENERGY_STATUS, &rapl_events_ram_group, test_msr },
- [PERF_RAPL_PP1] = { MSR_PP1_ENERGY_STATUS, &rapl_events_gpu_group, test_msr },
- [PERF_RAPL_PSYS] = { MSR_PLATFORM_ENERGY_STATUS, &rapl_events_psys_group, test_msr },
+ [PERF_RAPL_PP0] = { MSR_PP0_ENERGY_STATUS, &rapl_events_cores_group, test_msr, false, RAPL_MSR_MASK },
+ [PERF_RAPL_PKG] = { MSR_PKG_ENERGY_STATUS, &rapl_events_pkg_group, test_msr, false, RAPL_MSR_MASK },
+ [PERF_RAPL_RAM] = { MSR_DRAM_ENERGY_STATUS, &rapl_events_ram_group, test_msr, false, RAPL_MSR_MASK },
+ [PERF_RAPL_PP1] = { MSR_PP1_ENERGY_STATUS, &rapl_events_gpu_group, test_msr, false, RAPL_MSR_MASK },
+ [PERF_RAPL_PSYS] = { MSR_PLATFORM_ENERGY_STATUS, &rapl_events_psys_group, test_msr, false, RAPL_MSR_MASK },
+};
+
+static struct perf_msr intel_rapl_spr_msrs[] = {
+ [PERF_RAPL_PP0] = { MSR_PP0_ENERGY_STATUS, &rapl_events_cores_group, test_msr, false, RAPL_MSR_MASK },
+ [PERF_RAPL_PKG] = { MSR_PKG_ENERGY_STATUS, &rapl_events_pkg_group, test_msr, false, RAPL_MSR_MASK },
+ [PERF_RAPL_RAM] = { MSR_DRAM_ENERGY_STATUS, &rapl_events_ram_group, test_msr, false, RAPL_MSR_MASK },
+ [PERF_RAPL_PP1] = { MSR_PP1_ENERGY_STATUS, &rapl_events_gpu_group, test_msr, false, RAPL_MSR_MASK },
+ [PERF_RAPL_PSYS] = { MSR_PLATFORM_ENERGY_STATUS, &rapl_events_psys_group, test_msr, true, RAPL_MSR_MASK },
};
/*
@@ -761,7 +761,7 @@ static struct rapl_model model_spr = {
BIT(PERF_RAPL_PSYS),
.unit_quirk = RAPL_UNIT_QUIRK_INTEL_SPR,
.msr_power_unit = MSR_RAPL_POWER_UNIT,
- .rapl_msrs = intel_rapl_msrs,
+ .rapl_msrs = intel_rapl_spr_msrs,
};
static struct rapl_model model_amd_fam17h = {
diff --git a/arch/x86/hyperv/Makefile b/arch/x86/hyperv/Makefile
index 89b1f74d3225..48e2c51464e8 100644
--- a/arch/x86/hyperv/Makefile
+++ b/arch/x86/hyperv/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-y := hv_init.o mmu.o nested.o
-obj-$(CONFIG_X86_64) += hv_apic.o
+obj-y := hv_init.o mmu.o nested.o irqdomain.o
+obj-$(CONFIG_X86_64) += hv_apic.o hv_proc.o
ifdef CONFIG_X86_64
obj-$(CONFIG_PARAVIRT_SPINLOCKS) += hv_spinlock.o
diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c
index 6375967a8244..b81047dec1da 100644
--- a/arch/x86/hyperv/hv_init.c
+++ b/arch/x86/hyperv/hv_init.c
@@ -10,6 +10,7 @@
#include <linux/acpi.h>
#include <linux/efi.h>
#include <linux/types.h>
+#include <linux/bitfield.h>
#include <asm/apic.h>
#include <asm/desc.h>
#include <asm/hypervisor.h>
@@ -26,8 +27,11 @@
#include <linux/cpuhotplug.h>
#include <linux/syscore_ops.h>
#include <clocksource/hyperv_timer.h>
+#include <linux/highmem.h>
int hyperv_init_cpuhp;
+u64 hv_current_partition_id = ~0ull;
+EXPORT_SYMBOL_GPL(hv_current_partition_id);
void *hv_hypercall_pg;
EXPORT_SYMBOL_GPL(hv_hypercall_pg);
@@ -44,6 +48,9 @@ EXPORT_SYMBOL_GPL(hv_vp_assist_page);
void __percpu **hyperv_pcpu_input_arg;
EXPORT_SYMBOL_GPL(hyperv_pcpu_input_arg);
+void __percpu **hyperv_pcpu_output_arg;
+EXPORT_SYMBOL_GPL(hyperv_pcpu_output_arg);
+
u32 hv_max_vp_index;
EXPORT_SYMBOL_GPL(hv_max_vp_index);
@@ -76,12 +83,19 @@ static int hv_cpu_init(unsigned int cpu)
void **input_arg;
struct page *pg;
- input_arg = (void **)this_cpu_ptr(hyperv_pcpu_input_arg);
/* hv_cpu_init() can be called with IRQs disabled from hv_resume() */
- pg = alloc_page(irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL);
+ pg = alloc_pages(irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL, hv_root_partition ? 1 : 0);
if (unlikely(!pg))
return -ENOMEM;
+
+ input_arg = (void **)this_cpu_ptr(hyperv_pcpu_input_arg);
*input_arg = page_address(pg);
+ if (hv_root_partition) {
+ void **output_arg;
+
+ output_arg = (void **)this_cpu_ptr(hyperv_pcpu_output_arg);
+ *output_arg = page_address(pg + 1);
+ }
hv_get_vp_index(msr_vp_index);
@@ -208,14 +222,23 @@ static int hv_cpu_die(unsigned int cpu)
unsigned int new_cpu;
unsigned long flags;
void **input_arg;
- void *input_pg = NULL;
+ void *pg;
local_irq_save(flags);
input_arg = (void **)this_cpu_ptr(hyperv_pcpu_input_arg);
- input_pg = *input_arg;
+ pg = *input_arg;
*input_arg = NULL;
+
+ if (hv_root_partition) {
+ void **output_arg;
+
+ output_arg = (void **)this_cpu_ptr(hyperv_pcpu_output_arg);
+ *output_arg = NULL;
+ }
+
local_irq_restore(flags);
- free_page((unsigned long)input_pg);
+
+ free_pages((unsigned long)pg, hv_root_partition ? 1 : 0);
if (hv_vp_assist_page && hv_vp_assist_page[cpu])
wrmsrl(HV_X64_MSR_VP_ASSIST_PAGE, 0);
@@ -264,6 +287,9 @@ static int hv_suspend(void)
union hv_x64_msr_hypercall_contents hypercall_msr;
int ret;
+ if (hv_root_partition)
+ return -EPERM;
+
/*
* Reset the hypercall page as it is going to be invalidated
* accross hibernation. Setting hv_hypercall_pg to NULL ensures
@@ -334,6 +360,24 @@ static void __init hv_stimer_setup_percpu_clockev(void)
old_setup_percpu_clockev();
}
+static void __init hv_get_partition_id(void)
+{
+ struct hv_get_partition_id *output_page;
+ u64 status;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ output_page = *this_cpu_ptr(hyperv_pcpu_output_arg);
+ status = hv_do_hypercall(HVCALL_GET_PARTITION_ID, NULL, output_page);
+ if ((status & HV_HYPERCALL_RESULT_MASK) != HV_STATUS_SUCCESS) {
+ /* No point in proceeding if this failed */
+ pr_err("Failed to get partition ID: %lld\n", status);
+ BUG();
+ }
+ hv_current_partition_id = output_page->partition_id;
+ local_irq_restore(flags);
+}
+
/*
* This function is to be invoked early in the boot sequence after the
* hypervisor has been detected.
@@ -368,6 +412,12 @@ void __init hyperv_init(void)
BUG_ON(hyperv_pcpu_input_arg == NULL);
+ /* Allocate the per-CPU state for output arg for root */
+ if (hv_root_partition) {
+ hyperv_pcpu_output_arg = alloc_percpu(void *);
+ BUG_ON(hyperv_pcpu_output_arg == NULL);
+ }
+
/* Allocate percpu VP index */
hv_vp_index = kmalloc_array(num_possible_cpus(), sizeof(*hv_vp_index),
GFP_KERNEL);
@@ -408,8 +458,35 @@ void __init hyperv_init(void)
rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
hypercall_msr.enable = 1;
- hypercall_msr.guest_physical_address = vmalloc_to_pfn(hv_hypercall_pg);
- wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
+
+ if (hv_root_partition) {
+ struct page *pg;
+ void *src, *dst;
+
+ /*
+ * For the root partition, the hypervisor will set up its
+ * hypercall page. The hypervisor guarantees it will not show
+ * up in the root's address space. The root can't change the
+ * location of the hypercall page.
+ *
+ * Order is important here. We must enable the hypercall page
+ * so it is populated with code, then copy the code to an
+ * executable page.
+ */
+ wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
+
+ pg = vmalloc_to_page(hv_hypercall_pg);
+ dst = kmap(pg);
+ src = memremap(hypercall_msr.guest_physical_address << PAGE_SHIFT, PAGE_SIZE,
+ MEMREMAP_WB);
+ BUG_ON(!(src && dst));
+ memcpy(dst, src, HV_HYP_PAGE_SIZE);
+ memunmap(src);
+ kunmap(pg);
+ } else {
+ hypercall_msr.guest_physical_address = vmalloc_to_pfn(hv_hypercall_pg);
+ wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
+ }
/*
* hyperv_init() is called before LAPIC is initialized: see
@@ -428,6 +505,21 @@ void __init hyperv_init(void)
register_syscore_ops(&hv_syscore_ops);
hyperv_init_cpuhp = cpuhp;
+
+ if (cpuid_ebx(HYPERV_CPUID_FEATURES) & HV_ACCESS_PARTITION_ID)
+ hv_get_partition_id();
+
+ BUG_ON(hv_root_partition && hv_current_partition_id == ~0ull);
+
+#ifdef CONFIG_PCI_MSI
+ /*
+ * If we're running as root, we want to create our own PCI MSI domain.
+ * We can't set this in hv_pci_init because that would be too late.
+ */
+ if (hv_root_partition)
+ x86_init.irqs.create_pci_msi_domain = hv_create_pci_msi_domain;
+#endif
+
return;
remove_cpuhp_state:
@@ -552,6 +644,20 @@ EXPORT_SYMBOL_GPL(hv_is_hyperv_initialized);
bool hv_is_hibernation_supported(void)
{
- return acpi_sleep_state_supported(ACPI_STATE_S4);
+ return !hv_root_partition && acpi_sleep_state_supported(ACPI_STATE_S4);
}
EXPORT_SYMBOL_GPL(hv_is_hibernation_supported);
+
+enum hv_isolation_type hv_get_isolation_type(void)
+{
+ if (!(ms_hyperv.features_b & HV_ISOLATION))
+ return HV_ISOLATION_TYPE_NONE;
+ return FIELD_GET(HV_ISOLATION_TYPE, ms_hyperv.isolation_config_b);
+}
+EXPORT_SYMBOL_GPL(hv_get_isolation_type);
+
+bool hv_is_isolation_supported(void)
+{
+ return hv_get_isolation_type() != HV_ISOLATION_TYPE_NONE;
+}
+EXPORT_SYMBOL_GPL(hv_is_isolation_supported);
diff --git a/arch/x86/hyperv/hv_proc.c b/arch/x86/hyperv/hv_proc.c
new file mode 100644
index 000000000000..60461e598239
--- /dev/null
+++ b/arch/x86/hyperv/hv_proc.c
@@ -0,0 +1,219 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/types.h>
+#include <linux/version.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include <linux/clockchips.h>
+#include <linux/acpi.h>
+#include <linux/hyperv.h>
+#include <linux/slab.h>
+#include <linux/cpuhotplug.h>
+#include <linux/minmax.h>
+#include <asm/hypervisor.h>
+#include <asm/mshyperv.h>
+#include <asm/apic.h>
+
+#include <asm/trace/hyperv.h>
+
+/*
+ * See struct hv_deposit_memory. The first u64 is partition ID, the rest
+ * are GPAs.
+ */
+#define HV_DEPOSIT_MAX (HV_HYP_PAGE_SIZE / sizeof(u64) - 1)
+
+/* Deposits exact number of pages. Must be called with interrupts enabled. */
+int hv_call_deposit_pages(int node, u64 partition_id, u32 num_pages)
+{
+ struct page **pages, *page;
+ int *counts;
+ int num_allocations;
+ int i, j, page_count;
+ int order;
+ u64 status;
+ int ret;
+ u64 base_pfn;
+ struct hv_deposit_memory *input_page;
+ unsigned long flags;
+
+ if (num_pages > HV_DEPOSIT_MAX)
+ return -E2BIG;
+ if (!num_pages)
+ return 0;
+
+ /* One buffer for page pointers and counts */
+ page = alloc_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+ pages = page_address(page);
+
+ counts = kcalloc(HV_DEPOSIT_MAX, sizeof(int), GFP_KERNEL);
+ if (!counts) {
+ free_page((unsigned long)pages);
+ return -ENOMEM;
+ }
+
+ /* Allocate all the pages before disabling interrupts */
+ i = 0;
+
+ while (num_pages) {
+ /* Find highest order we can actually allocate */
+ order = 31 - __builtin_clz(num_pages);
+
+ while (1) {
+ pages[i] = alloc_pages_node(node, GFP_KERNEL, order);
+ if (pages[i])
+ break;
+ if (!order) {
+ ret = -ENOMEM;
+ num_allocations = i;
+ goto err_free_allocations;
+ }
+ --order;
+ }
+
+ split_page(pages[i], order);
+ counts[i] = 1 << order;
+ num_pages -= counts[i];
+ i++;
+ }
+ num_allocations = i;
+
+ local_irq_save(flags);
+
+ input_page = *this_cpu_ptr(hyperv_pcpu_input_arg);
+
+ input_page->partition_id = partition_id;
+
+ /* Populate gpa_page_list - these will fit on the input page */
+ for (i = 0, page_count = 0; i < num_allocations; ++i) {
+ base_pfn = page_to_pfn(pages[i]);
+ for (j = 0; j < counts[i]; ++j, ++page_count)
+ input_page->gpa_page_list[page_count] = base_pfn + j;
+ }
+ status = hv_do_rep_hypercall(HVCALL_DEPOSIT_MEMORY,
+ page_count, 0, input_page, NULL);
+ local_irq_restore(flags);
+
+ if ((status & HV_HYPERCALL_RESULT_MASK) != HV_STATUS_SUCCESS) {
+ pr_err("Failed to deposit pages: %lld\n", status);
+ ret = status;
+ goto err_free_allocations;
+ }
+
+ ret = 0;
+ goto free_buf;
+
+err_free_allocations:
+ for (i = 0; i < num_allocations; ++i) {
+ base_pfn = page_to_pfn(pages[i]);
+ for (j = 0; j < counts[i]; ++j)
+ __free_page(pfn_to_page(base_pfn + j));
+ }
+
+free_buf:
+ free_page((unsigned long)pages);
+ kfree(counts);
+ return ret;
+}
+
+int hv_call_add_logical_proc(int node, u32 lp_index, u32 apic_id)
+{
+ struct hv_add_logical_processor_in *input;
+ struct hv_add_logical_processor_out *output;
+ u64 status;
+ unsigned long flags;
+ int ret = 0;
+ int pxm = node_to_pxm(node);
+
+ /*
+ * When adding a logical processor, the hypervisor may return
+ * HV_STATUS_INSUFFICIENT_MEMORY. When that happens, we deposit more
+ * pages and retry.
+ */
+ do {
+ local_irq_save(flags);
+
+ input = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ /* We don't do anything with the output right now */
+ output = *this_cpu_ptr(hyperv_pcpu_output_arg);
+
+ input->lp_index = lp_index;
+ input->apic_id = apic_id;
+ input->flags = 0;
+ input->proximity_domain_info.domain_id = pxm;
+ input->proximity_domain_info.flags.reserved = 0;
+ input->proximity_domain_info.flags.proximity_info_valid = 1;
+ input->proximity_domain_info.flags.proximity_preferred = 1;
+ status = hv_do_hypercall(HVCALL_ADD_LOGICAL_PROCESSOR,
+ input, output);
+ local_irq_restore(flags);
+
+ status &= HV_HYPERCALL_RESULT_MASK;
+
+ if (status != HV_STATUS_INSUFFICIENT_MEMORY) {
+ if (status != HV_STATUS_SUCCESS) {
+ pr_err("%s: cpu %u apic ID %u, %lld\n", __func__,
+ lp_index, apic_id, status);
+ ret = status;
+ }
+ break;
+ }
+ ret = hv_call_deposit_pages(node, hv_current_partition_id, 1);
+ } while (!ret);
+
+ return ret;
+}
+
+int hv_call_create_vp(int node, u64 partition_id, u32 vp_index, u32 flags)
+{
+ struct hv_create_vp *input;
+ u64 status;
+ unsigned long irq_flags;
+ int ret = 0;
+ int pxm = node_to_pxm(node);
+
+ /* Root VPs don't seem to need pages deposited */
+ if (partition_id != hv_current_partition_id) {
+ /* The value 90 is empirically determined. It may change. */
+ ret = hv_call_deposit_pages(node, partition_id, 90);
+ if (ret)
+ return ret;
+ }
+
+ do {
+ local_irq_save(irq_flags);
+
+ input = *this_cpu_ptr(hyperv_pcpu_input_arg);
+
+ input->partition_id = partition_id;
+ input->vp_index = vp_index;
+ input->flags = flags;
+ input->subnode_type = HvSubnodeAny;
+ if (node != NUMA_NO_NODE) {
+ input->proximity_domain_info.domain_id = pxm;
+ input->proximity_domain_info.flags.reserved = 0;
+ input->proximity_domain_info.flags.proximity_info_valid = 1;
+ input->proximity_domain_info.flags.proximity_preferred = 1;
+ } else {
+ input->proximity_domain_info.as_uint64 = 0;
+ }
+ status = hv_do_hypercall(HVCALL_CREATE_VP, input, NULL);
+ local_irq_restore(irq_flags);
+
+ status &= HV_HYPERCALL_RESULT_MASK;
+
+ if (status != HV_STATUS_INSUFFICIENT_MEMORY) {
+ if (status != HV_STATUS_SUCCESS) {
+ pr_err("%s: vcpu %u, lp %u, %lld\n", __func__,
+ vp_index, flags, status);
+ ret = status;
+ }
+ break;
+ }
+ ret = hv_call_deposit_pages(node, partition_id, 1);
+
+ } while (!ret);
+
+ return ret;
+}
+
diff --git a/arch/x86/hyperv/irqdomain.c b/arch/x86/hyperv/irqdomain.c
new file mode 100644
index 000000000000..4421a8d92e23
--- /dev/null
+++ b/arch/x86/hyperv/irqdomain.c
@@ -0,0 +1,385 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Irqdomain for Linux to run as the root partition on Microsoft Hypervisor.
+ *
+ * Authors:
+ * Sunil Muthuswamy <sunilmut@microsoft.com>
+ * Wei Liu <wei.liu@kernel.org>
+ */
+
+#include <linux/pci.h>
+#include <linux/irq.h>
+#include <asm/mshyperv.h>
+
+static int hv_map_interrupt(union hv_device_id device_id, bool level,
+ int cpu, int vector, struct hv_interrupt_entry *entry)
+{
+ struct hv_input_map_device_interrupt *input;
+ struct hv_output_map_device_interrupt *output;
+ struct hv_device_interrupt_descriptor *intr_desc;
+ unsigned long flags;
+ u64 status;
+ int nr_bank, var_size;
+
+ local_irq_save(flags);
+
+ input = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ output = *this_cpu_ptr(hyperv_pcpu_output_arg);
+
+ intr_desc = &input->interrupt_descriptor;
+ memset(input, 0, sizeof(*input));
+ input->partition_id = hv_current_partition_id;
+ input->device_id = device_id.as_uint64;
+ intr_desc->interrupt_type = HV_X64_INTERRUPT_TYPE_FIXED;
+ intr_desc->vector_count = 1;
+ intr_desc->target.vector = vector;
+
+ if (level)
+ intr_desc->trigger_mode = HV_INTERRUPT_TRIGGER_MODE_LEVEL;
+ else
+ intr_desc->trigger_mode = HV_INTERRUPT_TRIGGER_MODE_EDGE;
+
+ intr_desc->target.vp_set.valid_bank_mask = 0;
+ intr_desc->target.vp_set.format = HV_GENERIC_SET_SPARSE_4K;
+ nr_bank = cpumask_to_vpset(&(intr_desc->target.vp_set), cpumask_of(cpu));
+ if (nr_bank < 0) {
+ local_irq_restore(flags);
+ pr_err("%s: unable to generate VP set\n", __func__);
+ return EINVAL;
+ }
+ intr_desc->target.flags = HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET;
+
+ /*
+ * var-sized hypercall, var-size starts after vp_mask (thus
+ * vp_set.format does not count, but vp_set.valid_bank_mask
+ * does).
+ */
+ var_size = nr_bank + 1;
+
+ status = hv_do_rep_hypercall(HVCALL_MAP_DEVICE_INTERRUPT, 0, var_size,
+ input, output);
+ *entry = output->interrupt_entry;
+
+ local_irq_restore(flags);
+
+ if ((status & HV_HYPERCALL_RESULT_MASK) != HV_STATUS_SUCCESS)
+ pr_err("%s: hypercall failed, status %lld\n", __func__, status);
+
+ return status & HV_HYPERCALL_RESULT_MASK;
+}
+
+static int hv_unmap_interrupt(u64 id, struct hv_interrupt_entry *old_entry)
+{
+ unsigned long flags;
+ struct hv_input_unmap_device_interrupt *input;
+ struct hv_interrupt_entry *intr_entry;
+ u64 status;
+
+ local_irq_save(flags);
+ input = *this_cpu_ptr(hyperv_pcpu_input_arg);
+
+ memset(input, 0, sizeof(*input));
+ intr_entry = &input->interrupt_entry;
+ input->partition_id = hv_current_partition_id;
+ input->device_id = id;
+ *intr_entry = *old_entry;
+
+ status = hv_do_hypercall(HVCALL_UNMAP_DEVICE_INTERRUPT, input, NULL);
+ local_irq_restore(flags);
+
+ return status & HV_HYPERCALL_RESULT_MASK;
+}
+
+#ifdef CONFIG_PCI_MSI
+struct rid_data {
+ struct pci_dev *bridge;
+ u32 rid;
+};
+
+static int get_rid_cb(struct pci_dev *pdev, u16 alias, void *data)
+{
+ struct rid_data *rd = data;
+ u8 bus = PCI_BUS_NUM(rd->rid);
+
+ if (pdev->bus->number != bus || PCI_BUS_NUM(alias) != bus) {
+ rd->bridge = pdev;
+ rd->rid = alias;
+ }
+
+ return 0;
+}
+
+static union hv_device_id hv_build_pci_dev_id(struct pci_dev *dev)
+{
+ union hv_device_id dev_id;
+ struct rid_data data = {
+ .bridge = NULL,
+ .rid = PCI_DEVID(dev->bus->number, dev->devfn)
+ };
+
+ pci_for_each_dma_alias(dev, get_rid_cb, &data);
+
+ dev_id.as_uint64 = 0;
+ dev_id.device_type = HV_DEVICE_TYPE_PCI;
+ dev_id.pci.segment = pci_domain_nr(dev->bus);
+
+ dev_id.pci.bdf.bus = PCI_BUS_NUM(data.rid);
+ dev_id.pci.bdf.device = PCI_SLOT(data.rid);
+ dev_id.pci.bdf.function = PCI_FUNC(data.rid);
+ dev_id.pci.source_shadow = HV_SOURCE_SHADOW_NONE;
+
+ if (data.bridge) {
+ int pos;
+
+ /*
+ * Microsoft Hypervisor requires a bus range when the bridge is
+ * running in PCI-X mode.
+ *
+ * To distinguish conventional vs PCI-X bridge, we can check
+ * the bridge's PCI-X Secondary Status Register, Secondary Bus
+ * Mode and Frequency bits. See PCI Express to PCI/PCI-X Bridge
+ * Specification Revision 1.0 5.2.2.1.3.
+ *
+ * Value zero means it is in conventional mode, otherwise it is
+ * in PCI-X mode.
+ */
+
+ pos = pci_find_capability(data.bridge, PCI_CAP_ID_PCIX);
+ if (pos) {
+ u16 status;
+
+ pci_read_config_word(data.bridge, pos +
+ PCI_X_BRIDGE_SSTATUS, &status);
+
+ if (status & PCI_X_SSTATUS_FREQ) {
+ /* Non-zero, PCI-X mode */
+ u8 sec_bus, sub_bus;
+
+ dev_id.pci.source_shadow = HV_SOURCE_SHADOW_BRIDGE_BUS_RANGE;
+
+ pci_read_config_byte(data.bridge, PCI_SECONDARY_BUS, &sec_bus);
+ dev_id.pci.shadow_bus_range.secondary_bus = sec_bus;
+ pci_read_config_byte(data.bridge, PCI_SUBORDINATE_BUS, &sub_bus);
+ dev_id.pci.shadow_bus_range.subordinate_bus = sub_bus;
+ }
+ }
+ }
+
+ return dev_id;
+}
+
+static int hv_map_msi_interrupt(struct pci_dev *dev, int cpu, int vector,
+ struct hv_interrupt_entry *entry)
+{
+ union hv_device_id device_id = hv_build_pci_dev_id(dev);
+
+ return hv_map_interrupt(device_id, false, cpu, vector, entry);
+}
+
+static inline void entry_to_msi_msg(struct hv_interrupt_entry *entry, struct msi_msg *msg)
+{
+ /* High address is always 0 */
+ msg->address_hi = 0;
+ msg->address_lo = entry->msi_entry.address.as_uint32;
+ msg->data = entry->msi_entry.data.as_uint32;
+}
+
+static int hv_unmap_msi_interrupt(struct pci_dev *dev, struct hv_interrupt_entry *old_entry);
+static void hv_irq_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ struct msi_desc *msidesc;
+ struct pci_dev *dev;
+ struct hv_interrupt_entry out_entry, *stored_entry;
+ struct irq_cfg *cfg = irqd_cfg(data);
+ cpumask_t *affinity;
+ int cpu;
+ u64 status;
+
+ msidesc = irq_data_get_msi_desc(data);
+ dev = msi_desc_to_pci_dev(msidesc);
+
+ if (!cfg) {
+ pr_debug("%s: cfg is NULL", __func__);
+ return;
+ }
+
+ affinity = irq_data_get_effective_affinity_mask(data);
+ cpu = cpumask_first_and(affinity, cpu_online_mask);
+
+ if (data->chip_data) {
+ /*
+ * This interrupt is already mapped. Let's unmap first.
+ *
+ * We don't use retarget interrupt hypercalls here because
+ * Microsoft Hypervisor doens't allow root to change the vector
+ * or specify VPs outside of the set that is initially used
+ * during mapping.
+ */
+ stored_entry = data->chip_data;
+ data->chip_data = NULL;
+
+ status = hv_unmap_msi_interrupt(dev, stored_entry);
+
+ kfree(stored_entry);
+
+ if (status != HV_STATUS_SUCCESS) {
+ pr_debug("%s: failed to unmap, status %lld", __func__, status);
+ return;
+ }
+ }
+
+ stored_entry = kzalloc(sizeof(*stored_entry), GFP_ATOMIC);
+ if (!stored_entry) {
+ pr_debug("%s: failed to allocate chip data\n", __func__);
+ return;
+ }
+
+ status = hv_map_msi_interrupt(dev, cpu, cfg->vector, &out_entry);
+ if (status != HV_STATUS_SUCCESS) {
+ kfree(stored_entry);
+ return;
+ }
+
+ *stored_entry = out_entry;
+ data->chip_data = stored_entry;
+ entry_to_msi_msg(&out_entry, msg);
+
+ return;
+}
+
+static int hv_unmap_msi_interrupt(struct pci_dev *dev, struct hv_interrupt_entry *old_entry)
+{
+ return hv_unmap_interrupt(hv_build_pci_dev_id(dev).as_uint64, old_entry);
+}
+
+static void hv_teardown_msi_irq_common(struct pci_dev *dev, struct msi_desc *msidesc, int irq)
+{
+ u64 status;
+ struct hv_interrupt_entry old_entry;
+ struct irq_desc *desc;
+ struct irq_data *data;
+ struct msi_msg msg;
+
+ desc = irq_to_desc(irq);
+ if (!desc) {
+ pr_debug("%s: no irq desc\n", __func__);
+ return;
+ }
+
+ data = &desc->irq_data;
+ if (!data) {
+ pr_debug("%s: no irq data\n", __func__);
+ return;
+ }
+
+ if (!data->chip_data) {
+ pr_debug("%s: no chip data\n!", __func__);
+ return;
+ }
+
+ old_entry = *(struct hv_interrupt_entry *)data->chip_data;
+ entry_to_msi_msg(&old_entry, &msg);
+
+ kfree(data->chip_data);
+ data->chip_data = NULL;
+
+ status = hv_unmap_msi_interrupt(dev, &old_entry);
+
+ if (status != HV_STATUS_SUCCESS) {
+ pr_err("%s: hypercall failed, status %lld\n", __func__, status);
+ return;
+ }
+}
+
+static void hv_msi_domain_free_irqs(struct irq_domain *domain, struct device *dev)
+{
+ int i;
+ struct msi_desc *entry;
+ struct pci_dev *pdev;
+
+ if (WARN_ON_ONCE(!dev_is_pci(dev)))
+ return;
+
+ pdev = to_pci_dev(dev);
+
+ for_each_pci_msi_entry(entry, pdev) {
+ if (entry->irq) {
+ for (i = 0; i < entry->nvec_used; i++) {
+ hv_teardown_msi_irq_common(pdev, entry, entry->irq + i);
+ irq_domain_free_irqs(entry->irq + i, 1);
+ }
+ }
+ }
+}
+
+/*
+ * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
+ * which implement the MSI or MSI-X Capability Structure.
+ */
+static struct irq_chip hv_pci_msi_controller = {
+ .name = "HV-PCI-MSI",
+ .irq_unmask = pci_msi_unmask_irq,
+ .irq_mask = pci_msi_mask_irq,
+ .irq_ack = irq_chip_ack_parent,
+ .irq_retrigger = irq_chip_retrigger_hierarchy,
+ .irq_compose_msi_msg = hv_irq_compose_msi_msg,
+ .irq_set_affinity = msi_domain_set_affinity,
+ .flags = IRQCHIP_SKIP_SET_WAKE,
+};
+
+static struct msi_domain_ops pci_msi_domain_ops = {
+ .domain_free_irqs = hv_msi_domain_free_irqs,
+ .msi_prepare = pci_msi_prepare,
+};
+
+static struct msi_domain_info hv_pci_msi_domain_info = {
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_PCI_MSIX,
+ .ops = &pci_msi_domain_ops,
+ .chip = &hv_pci_msi_controller,
+ .handler = handle_edge_irq,
+ .handler_name = "edge",
+};
+
+struct irq_domain * __init hv_create_pci_msi_domain(void)
+{
+ struct irq_domain *d = NULL;
+ struct fwnode_handle *fn;
+
+ fn = irq_domain_alloc_named_fwnode("HV-PCI-MSI");
+ if (fn)
+ d = pci_msi_create_irq_domain(fn, &hv_pci_msi_domain_info, x86_vector_domain);
+
+ /* No point in going further if we can't get an irq domain */
+ BUG_ON(!d);
+
+ return d;
+}
+
+#endif /* CONFIG_PCI_MSI */
+
+int hv_unmap_ioapic_interrupt(int ioapic_id, struct hv_interrupt_entry *entry)
+{
+ union hv_device_id device_id;
+
+ device_id.as_uint64 = 0;
+ device_id.device_type = HV_DEVICE_TYPE_IOAPIC;
+ device_id.ioapic.ioapic_id = (u8)ioapic_id;
+
+ return hv_unmap_interrupt(device_id.as_uint64, entry);
+}
+EXPORT_SYMBOL_GPL(hv_unmap_ioapic_interrupt);
+
+int hv_map_ioapic_interrupt(int ioapic_id, bool level, int cpu, int vector,
+ struct hv_interrupt_entry *entry)
+{
+ union hv_device_id device_id;
+
+ device_id.as_uint64 = 0;
+ device_id.device_type = HV_DEVICE_TYPE_IOAPIC;
+ device_id.ioapic.ioapic_id = (u8)ioapic_id;
+
+ return hv_map_interrupt(device_id, level, cpu, vector, entry);
+}
+EXPORT_SYMBOL_GPL(hv_map_ioapic_interrupt);
diff --git a/arch/x86/include/asm/acrn.h b/arch/x86/include/asm/acrn.h
new file mode 100644
index 000000000000..e003a01b7c67
--- /dev/null
+++ b/arch/x86/include/asm/acrn.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_X86_ACRN_H
+#define _ASM_X86_ACRN_H
+
+/*
+ * This CPUID returns feature bitmaps in EAX.
+ * Guest VM uses this to detect the appropriate feature bit.
+ */
+#define ACRN_CPUID_FEATURES 0x40000001
+/* Bit 0 indicates whether guest VM is privileged */
+#define ACRN_FEATURE_PRIVILEGED_VM BIT(0)
+
+void acrn_setup_intr_handler(void (*handler)(void));
+void acrn_remove_intr_handler(void);
+
+static inline u32 acrn_cpuid_base(void)
+{
+ if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
+ return hypervisor_cpuid_base("ACRNACRNACRN", 0);
+
+ return 0;
+}
+
+/*
+ * Hypercalls for ACRN
+ *
+ * - VMCALL instruction is used to implement ACRN hypercalls.
+ * - ACRN hypercall ABI:
+ * - Hypercall number is passed in R8 register.
+ * - Up to 2 arguments are passed in RDI, RSI.
+ * - Return value will be placed in RAX.
+ *
+ * Because GCC doesn't support R8 register as direct register constraints, use
+ * supported constraint as input with a explicit MOV to R8 in beginning of asm.
+ */
+static inline long acrn_hypercall0(unsigned long hcall_id)
+{
+ long result;
+
+ asm volatile("movl %1, %%r8d\n\t"
+ "vmcall\n\t"
+ : "=a" (result)
+ : "g" (hcall_id)
+ : "r8", "memory");
+
+ return result;
+}
+
+static inline long acrn_hypercall1(unsigned long hcall_id,
+ unsigned long param1)
+{
+ long result;
+
+ asm volatile("movl %1, %%r8d\n\t"
+ "vmcall\n\t"
+ : "=a" (result)
+ : "g" (hcall_id), "D" (param1)
+ : "r8", "memory");
+
+ return result;
+}
+
+static inline long acrn_hypercall2(unsigned long hcall_id,
+ unsigned long param1,
+ unsigned long param2)
+{
+ long result;
+
+ asm volatile("movl %1, %%r8d\n\t"
+ "vmcall\n\t"
+ : "=a" (result)
+ : "g" (hcall_id), "D" (param1), "S" (param2)
+ : "r8", "memory");
+
+ return result;
+}
+
+#endif /* _ASM_X86_ACRN_H */
diff --git a/arch/x86/include/asm/apb_timer.h b/arch/x86/include/asm/apb_timer.h
deleted file mode 100644
index 87ce8e963215..000000000000
--- a/arch/x86/include/asm/apb_timer.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * apb_timer.h: Driver for Langwell APB timer based on Synopsis DesignWare
- *
- * (C) Copyright 2009 Intel Corporation
- * Author: Jacob Pan (jacob.jun.pan@intel.com)
- *
- * Note:
- */
-
-#ifndef ASM_X86_APBT_H
-#define ASM_X86_APBT_H
-#include <linux/sfi.h>
-
-#ifdef CONFIG_APB_TIMER
-
-/* default memory mapped register base */
-#define LNW_SCU_ADDR 0xFF100000
-#define LNW_EXT_TIMER_OFFSET 0x1B800
-#define APBT_DEFAULT_BASE (LNW_SCU_ADDR+LNW_EXT_TIMER_OFFSET)
-#define LNW_EXT_TIMER_PGOFFSET 0x800
-
-/* APBT clock speed range from PCLK to fabric base, 25-100MHz */
-#define APBT_MAX_FREQ 50000000
-#define APBT_MIN_FREQ 1000000
-#define APBT_MMAP_SIZE 1024
-
-extern void apbt_time_init(void);
-extern void apbt_setup_secondary_clock(void);
-
-extern struct sfi_timer_table_entry *sfi_get_mtmr(int hint);
-extern void sfi_free_mtmr(struct sfi_timer_table_entry *mtmr);
-extern int sfi_mtimer_num;
-
-#else /* CONFIG_APB_TIMER */
-
-static inline void apbt_time_init(void) { }
-
-#endif
-#endif /* ASM_X86_APBT_H */
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index 34cb3c159481..412b51e059c8 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -197,16 +197,6 @@ static inline bool apic_needs_pit(void) { return true; }
#endif /* !CONFIG_X86_LOCAL_APIC */
#ifdef CONFIG_X86_X2APIC
-/*
- * Make previous memory operations globally visible before
- * sending the IPI through x2apic wrmsr. We need a serializing instruction or
- * mfence for this.
- */
-static inline void x2apic_wrmsr_fence(void)
-{
- asm volatile("mfence" : : : "memory");
-}
-
static inline void native_apic_msr_write(u32 reg, u32 v)
{
if (reg == APIC_DFR || reg == APIC_ID || reg == APIC_LDR ||
diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
index 7f828fe49797..4819d5e5a335 100644
--- a/arch/x86/include/asm/barrier.h
+++ b/arch/x86/include/asm/barrier.h
@@ -84,4 +84,22 @@ do { \
#include <asm-generic/barrier.h>
+/*
+ * Make previous memory operations globally visible before
+ * a WRMSR.
+ *
+ * MFENCE makes writes visible, but only affects load/store
+ * instructions. WRMSR is unfortunately not a load/store
+ * instruction and is unaffected by MFENCE. The LFENCE ensures
+ * that the WRMSR is not reordered.
+ *
+ * Most WRMSRs are full serializing instructions themselves and
+ * do not require this barrier. This is only required for the
+ * IA32_TSC_DEADLINE and X2APIC MSRs.
+ */
+static inline void weak_wrmsr_fence(void)
+{
+ asm volatile("mfence; lfence" : : : "memory");
+}
+
#endif /* _ASM_X86_BARRIER_H */
diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
index f145e3326c6d..be09c7eac89f 100644
--- a/arch/x86/include/asm/compat.h
+++ b/arch/x86/include/asm/compat.h
@@ -159,17 +159,6 @@ struct compat_shmid64_ds {
compat_ulong_t __unused5;
};
-/*
- * The type of struct elf_prstatus.pr_reg in compatible core dumps.
- */
-typedef struct user_regs_struct compat_elf_gregset_t;
-
-/* Full regset -- prstatus on x32, otherwise on ia32 */
-#define PRSTATUS_SIZE(S, R) (R != sizeof(S.pr_reg) ? 144 : 296)
-#define SET_PR_FPVALID(S, V, R) \
- do { *(int *) (((void *) &((S)->pr_reg)) + R) = (V); } \
- while (0)
-
#ifdef CONFIG_X86_X32_ABI
#define COMPAT_USE_64BIT_TIME \
(!!(task_pt_regs(current)->orig_ax & __X32_SYSCALL_BIT))
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 59bf91c57aa8..1728d4ce5730 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -30,6 +30,7 @@ enum cpuid_leafs
CPUID_7_ECX,
CPUID_8000_0007_EBX,
CPUID_7_EDX,
+ CPUID_8000_001F_EAX,
};
#ifdef CONFIG_X86_FEATURE_NAMES
@@ -88,8 +89,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 16, feature_bit) || \
CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 17, feature_bit) || \
CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 18, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 19, feature_bit) || \
REQUIRED_MASK_CHECK || \
- BUILD_BUG_ON_ZERO(NCAPINTS != 19))
+ BUILD_BUG_ON_ZERO(NCAPINTS != 20))
#define DISABLED_MASK_BIT_SET(feature_bit) \
( CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 0, feature_bit) || \
@@ -111,8 +113,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 16, feature_bit) || \
CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 17, feature_bit) || \
CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 18, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 19, feature_bit) || \
DISABLED_MASK_CHECK || \
- BUILD_BUG_ON_ZERO(NCAPINTS != 19))
+ BUILD_BUG_ON_ZERO(NCAPINTS != 20))
#define cpu_has(c, bit) \
(__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 84b887825f12..cc96e26d69f7 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -13,7 +13,7 @@
/*
* Defines x86 CPU feature bits
*/
-#define NCAPINTS 19 /* N 32-bit words worth of info */
+#define NCAPINTS 20 /* N 32-bit words worth of info */
#define NBUGINTS 1 /* N 32-bit bug flags */
/*
@@ -96,7 +96,7 @@
#define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in IA32 userspace */
#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in IA32 userspace */
#define X86_FEATURE_REP_GOOD ( 3*32+16) /* REP microcode works well */
-#define X86_FEATURE_SME_COHERENT ( 3*32+17) /* "" AMD hardware-enforced cache coherency */
+/* FREE! ( 3*32+17) */
#define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" LFENCE synchronizes RDTSC */
#define X86_FEATURE_ACC_POWER ( 3*32+19) /* AMD Accumulated Power Mechanism */
#define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */
@@ -201,7 +201,7 @@
#define X86_FEATURE_INVPCID_SINGLE ( 7*32+ 7) /* Effectively INVPCID && CR4.PCIDE=1 */
#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
-#define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */
+/* FREE! ( 7*32+10) */
#define X86_FEATURE_PTI ( 7*32+11) /* Kernel Page Table Isolation enabled */
#define X86_FEATURE_RETPOLINE ( 7*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */
#define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* "" AMD Retpoline mitigation for Spectre variant 2 */
@@ -211,7 +211,7 @@
#define X86_FEATURE_SSBD ( 7*32+17) /* Speculative Store Bypass Disable */
#define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */
#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */
-#define X86_FEATURE_SEV ( 7*32+20) /* AMD Secure Encrypted Virtualization */
+/* FREE! ( 7*32+20) */
#define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
#define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */
#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */
@@ -236,8 +236,6 @@
#define X86_FEATURE_EPT_AD ( 8*32+17) /* Intel Extended Page Table access-dirty bit */
#define X86_FEATURE_VMCALL ( 8*32+18) /* "" Hypervisor supports the VMCALL instruction */
#define X86_FEATURE_VMW_VMMCALL ( 8*32+19) /* "" VMware prefers VMMCALL hypercall instruction */
-#define X86_FEATURE_SEV_ES ( 8*32+20) /* AMD Secure Encrypted Virtualization - Encrypted State */
-#define X86_FEATURE_VM_PAGE_FLUSH ( 8*32+21) /* "" VM Page Flush MSR is supported */
/* Intel-defined CPU features, CPUID level 0x00000007:0 (EBX), word 9 */
#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* RDFSBASE, WRFSBASE, RDGSBASE, WRGSBASE instructions*/
@@ -294,6 +292,7 @@
#define X86_FEATURE_PER_THREAD_MBA (11*32+ 7) /* "" Per-thread Memory Bandwidth Allocation */
/* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
+#define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */
#define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* AVX512 BFLOAT16 instructions */
/* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */
@@ -337,6 +336,7 @@
#define X86_FEATURE_AVIC (15*32+13) /* Virtual Interrupt Controller */
#define X86_FEATURE_V_VMSAVE_VMLOAD (15*32+15) /* Virtual VMSAVE VMLOAD */
#define X86_FEATURE_VGIF (15*32+16) /* Virtual GIF */
+#define X86_FEATURE_SVME_ADDR_CHK (15*32+28) /* "" SVME addr check */
/* Intel-defined CPU features, CPUID level 0x00000007:0 (ECX), word 16 */
#define X86_FEATURE_AVX512VBMI (16*32+ 1) /* AVX512 Vector Bit Manipulation instructions*/
@@ -385,6 +385,13 @@
#define X86_FEATURE_CORE_CAPABILITIES (18*32+30) /* "" IA32_CORE_CAPABILITIES MSR */
#define X86_FEATURE_SPEC_CTRL_SSBD (18*32+31) /* "" Speculative Store Bypass Disable */
+/* AMD-defined memory encryption features, CPUID level 0x8000001f (EAX), word 19 */
+#define X86_FEATURE_SME (19*32+ 0) /* AMD Secure Memory Encryption */
+#define X86_FEATURE_SEV (19*32+ 1) /* AMD Secure Encrypted Virtualization */
+#define X86_FEATURE_VM_PAGE_FLUSH (19*32+ 2) /* "" VM Page Flush MSR is supported */
+#define X86_FEATURE_SEV_ES (19*32+ 3) /* AMD Secure Encrypted Virtualization - Encrypted State */
+#define X86_FEATURE_SME_COHERENT (19*32+10) /* "" AMD hardware-enforced cache coherency */
+
/*
* BUG word(s)
*/
diff --git a/arch/x86/include/asm/crypto/glue_helper.h b/arch/x86/include/asm/crypto/glue_helper.h
deleted file mode 100644
index 777c0f63418c..000000000000
--- a/arch/x86/include/asm/crypto/glue_helper.h
+++ /dev/null
@@ -1,118 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Shared glue code for 128bit block ciphers
- */
-
-#ifndef _CRYPTO_GLUE_HELPER_H
-#define _CRYPTO_GLUE_HELPER_H
-
-#include <crypto/internal/skcipher.h>
-#include <linux/kernel.h>
-#include <asm/fpu/api.h>
-#include <crypto/b128ops.h>
-
-typedef void (*common_glue_func_t)(const void *ctx, u8 *dst, const u8 *src);
-typedef void (*common_glue_cbc_func_t)(const void *ctx, u8 *dst, const u8 *src);
-typedef void (*common_glue_ctr_func_t)(const void *ctx, u8 *dst, const u8 *src,
- le128 *iv);
-typedef void (*common_glue_xts_func_t)(const void *ctx, u8 *dst, const u8 *src,
- le128 *iv);
-
-struct common_glue_func_entry {
- unsigned int num_blocks; /* number of blocks that @fn will process */
- union {
- common_glue_func_t ecb;
- common_glue_cbc_func_t cbc;
- common_glue_ctr_func_t ctr;
- common_glue_xts_func_t xts;
- } fn_u;
-};
-
-struct common_glue_ctx {
- unsigned int num_funcs;
- int fpu_blocks_limit; /* -1 means fpu not needed at all */
-
- /*
- * First funcs entry must have largest num_blocks and last funcs entry
- * must have num_blocks == 1!
- */
- struct common_glue_func_entry funcs[];
-};
-
-static inline bool glue_fpu_begin(unsigned int bsize, int fpu_blocks_limit,
- struct skcipher_walk *walk,
- bool fpu_enabled, unsigned int nbytes)
-{
- if (likely(fpu_blocks_limit < 0))
- return false;
-
- if (fpu_enabled)
- return true;
-
- /*
- * Vector-registers are only used when chunk to be processed is large
- * enough, so do not enable FPU until it is necessary.
- */
- if (nbytes < bsize * (unsigned int)fpu_blocks_limit)
- return false;
-
- /* prevent sleeping if FPU is in use */
- skcipher_walk_atomise(walk);
-
- kernel_fpu_begin();
- return true;
-}
-
-static inline void glue_fpu_end(bool fpu_enabled)
-{
- if (fpu_enabled)
- kernel_fpu_end();
-}
-
-static inline void le128_to_be128(be128 *dst, const le128 *src)
-{
- dst->a = cpu_to_be64(le64_to_cpu(src->a));
- dst->b = cpu_to_be64(le64_to_cpu(src->b));
-}
-
-static inline void be128_to_le128(le128 *dst, const be128 *src)
-{
- dst->a = cpu_to_le64(be64_to_cpu(src->a));
- dst->b = cpu_to_le64(be64_to_cpu(src->b));
-}
-
-static inline void le128_inc(le128 *i)
-{
- u64 a = le64_to_cpu(i->a);
- u64 b = le64_to_cpu(i->b);
-
- b++;
- if (!b)
- a++;
-
- i->a = cpu_to_le64(a);
- i->b = cpu_to_le64(b);
-}
-
-extern int glue_ecb_req_128bit(const struct common_glue_ctx *gctx,
- struct skcipher_request *req);
-
-extern int glue_cbc_encrypt_req_128bit(const common_glue_func_t fn,
- struct skcipher_request *req);
-
-extern int glue_cbc_decrypt_req_128bit(const struct common_glue_ctx *gctx,
- struct skcipher_request *req);
-
-extern int glue_ctr_req_128bit(const struct common_glue_ctx *gctx,
- struct skcipher_request *req);
-
-extern int glue_xts_req_128bit(const struct common_glue_ctx *gctx,
- struct skcipher_request *req,
- common_glue_func_t tweak_fn, void *tweak_ctx,
- void *crypt_ctx, bool decrypt);
-
-extern void glue_xts_crypt_128bit_one(const void *ctx, u8 *dst,
- const u8 *src, le128 *iv,
- common_glue_func_t fn);
-
-#endif /* _CRYPTO_GLUE_HELPER_H */
diff --git a/arch/x86/include/asm/crypto/serpent-avx.h b/arch/x86/include/asm/crypto/serpent-avx.h
deleted file mode 100644
index 251c2c89d7cf..000000000000
--- a/arch/x86/include/asm/crypto/serpent-avx.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef ASM_X86_SERPENT_AVX_H
-#define ASM_X86_SERPENT_AVX_H
-
-#include <crypto/b128ops.h>
-#include <crypto/serpent.h>
-#include <linux/types.h>
-
-struct crypto_skcipher;
-
-#define SERPENT_PARALLEL_BLOCKS 8
-
-struct serpent_xts_ctx {
- struct serpent_ctx tweak_ctx;
- struct serpent_ctx crypt_ctx;
-};
-
-asmlinkage void serpent_ecb_enc_8way_avx(const void *ctx, u8 *dst,
- const u8 *src);
-asmlinkage void serpent_ecb_dec_8way_avx(const void *ctx, u8 *dst,
- const u8 *src);
-
-asmlinkage void serpent_cbc_dec_8way_avx(const void *ctx, u8 *dst,
- const u8 *src);
-asmlinkage void serpent_ctr_8way_avx(const void *ctx, u8 *dst, const u8 *src,
- le128 *iv);
-
-asmlinkage void serpent_xts_enc_8way_avx(const void *ctx, u8 *dst,
- const u8 *src, le128 *iv);
-asmlinkage void serpent_xts_dec_8way_avx(const void *ctx, u8 *dst,
- const u8 *src, le128 *iv);
-
-extern void __serpent_crypt_ctr(const void *ctx, u8 *dst, const u8 *src,
- le128 *iv);
-
-extern void serpent_xts_enc(const void *ctx, u8 *dst, const u8 *src, le128 *iv);
-extern void serpent_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv);
-
-extern int xts_serpent_setkey(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int keylen);
-
-#endif
diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h
index 7947cb1782da..b7dd944dc867 100644
--- a/arch/x86/include/asm/disabled-features.h
+++ b/arch/x86/include/asm/disabled-features.h
@@ -91,6 +91,7 @@
DISABLE_ENQCMD)
#define DISABLED_MASK17 0
#define DISABLED_MASK18 0
-#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19)
+#define DISABLED_MASK19 0
+#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 20)
#endif /* _ASM_X86_DISABLED_FEATURES_H */
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index c98f78330b09..4d0b126835b8 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -12,6 +12,7 @@
#include <linux/pgtable.h>
extern unsigned long efi_fw_vendor, efi_config_table;
+extern unsigned long efi_mixed_mode_stack_pa;
/*
* We map the EFI regions needed for runtime services non-contiguously,
@@ -68,17 +69,33 @@ extern unsigned long efi_fw_vendor, efi_config_table;
#f " called with too many arguments (" #p ">" #n ")"); \
})
+static inline void efi_fpu_begin(void)
+{
+ /*
+ * The UEFI calling convention (UEFI spec 2.3.2 and 2.3.4) requires
+ * that FCW and MXCSR (64-bit) must be initialized prior to calling
+ * UEFI code. (Oddly the spec does not require that the FPU stack
+ * be empty.)
+ */
+ kernel_fpu_begin_mask(KFPU_387 | KFPU_MXCSR);
+}
+
+static inline void efi_fpu_end(void)
+{
+ kernel_fpu_end();
+}
+
#ifdef CONFIG_X86_32
#define arch_efi_call_virt_setup() \
({ \
- kernel_fpu_begin(); \
+ efi_fpu_begin(); \
firmware_restrict_branch_speculation_start(); \
})
#define arch_efi_call_virt_teardown() \
({ \
firmware_restrict_branch_speculation_end(); \
- kernel_fpu_end(); \
+ efi_fpu_end(); \
})
#define arch_efi_call_virt(p, f, args...) p->f(args)
@@ -94,22 +111,12 @@ extern asmlinkage u64 __efi_call(void *fp, ...);
__efi_call(__VA_ARGS__); \
})
-/*
- * struct efi_scratch - Scratch space used while switching to/from efi_mm
- * @phys_stack: stack used during EFI Mixed Mode
- * @prev_mm: store/restore stolen mm_struct while switching to/from efi_mm
- */
-struct efi_scratch {
- u64 phys_stack;
- struct mm_struct *prev_mm;
-} __packed;
-
#define arch_efi_call_virt_setup() \
({ \
efi_sync_low_kernel_mappings(); \
- kernel_fpu_begin(); \
+ efi_fpu_begin(); \
firmware_restrict_branch_speculation_start(); \
- efi_switch_mm(&efi_mm); \
+ efi_enter_mm(); \
})
#define arch_efi_call_virt(p, f, args...) \
@@ -117,9 +124,9 @@ struct efi_scratch {
#define arch_efi_call_virt_teardown() \
({ \
- efi_switch_mm(efi_scratch.prev_mm); \
+ efi_leave_mm(); \
firmware_restrict_branch_speculation_end(); \
- kernel_fpu_end(); \
+ efi_fpu_end(); \
})
#ifdef CONFIG_KASAN
@@ -136,7 +143,6 @@ struct efi_scratch {
#endif /* CONFIG_X86_32 */
-extern struct efi_scratch efi_scratch;
extern int __init efi_memblock_x86_reserve_range(void);
extern void __init efi_print_memmap(void);
extern void __init efi_map_region(efi_memory_desc_t *md);
@@ -149,10 +155,12 @@ extern void __init efi_dump_pagetable(void);
extern void __init efi_apply_memmap_quirks(void);
extern int __init efi_reuse_config(u64 tables, int nr_tables);
extern void efi_delete_dummy_variable(void);
-extern void efi_switch_mm(struct mm_struct *mm);
-extern void efi_recover_from_page_fault(unsigned long phys_addr);
+extern void efi_crash_gracefully_on_page_fault(unsigned long phys_addr);
extern void efi_free_boot_services(void);
+void efi_enter_mm(void);
+void efi_leave_mm(void);
+
/* kexec external ABI */
struct efi_setup_data {
u64 fw_vendor;
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index 66bdfe838d61..9224d40cdefe 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -364,7 +364,7 @@ do { \
#define COMPAT_ARCH_DLINFO \
if (exec->e_machine == EM_X86_64) \
ARCH_DLINFO_X32; \
-else \
+else if (IS_ENABLED(CONFIG_IA32_EMULATION)) \
ARCH_DLINFO_IA32
#define COMPAT_ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
diff --git a/arch/x86/include/asm/elfcore-compat.h b/arch/x86/include/asm/elfcore-compat.h
new file mode 100644
index 000000000000..f1b6c7a8d8fc
--- /dev/null
+++ b/arch/x86/include/asm/elfcore-compat.h
@@ -0,0 +1,31 @@
+#ifndef _ASM_X86_ELFCORE_COMPAT_H
+#define _ASM_X86_ELFCORE_COMPAT_H
+
+#include <asm/user32.h>
+
+/*
+ * On amd64 we have two 32bit ABIs - i386 and x32. The latter
+ * has bigger registers, so we use it for compat_elf_regset_t.
+ * The former uses i386_elf_prstatus and PRSTATUS_SIZE/SET_PR_FPVALID
+ * are used to choose the size and location of ->pr_fpvalid of
+ * the layout actually used.
+ */
+typedef struct user_regs_struct compat_elf_gregset_t;
+
+struct i386_elf_prstatus
+{
+ struct compat_elf_prstatus_common common;
+ struct user_regs_struct32 pr_reg;
+ compat_int_t pr_fpvalid;
+};
+
+#define PRSTATUS_SIZE \
+ (user_64bit_mode(task_pt_regs(current)) \
+ ? sizeof(struct compat_elf_prstatus) \
+ : sizeof(struct i386_elf_prstatus))
+#define SET_PR_FPVALID(S) \
+ (*(user_64bit_mode(task_pt_regs(current)) \
+ ? &(S)->pr_fpvalid \
+ : &((struct i386_elf_prstatus *)(S))->pr_fpvalid) = 1)
+
+#endif
diff --git a/arch/x86/include/asm/entry-common.h b/arch/x86/include/asm/entry-common.h
index 6fe54b2813c1..2b87b191b3b8 100644
--- a/arch/x86/include/asm/entry-common.h
+++ b/arch/x86/include/asm/entry-common.h
@@ -43,8 +43,6 @@ static __always_inline void arch_check_user_regs(struct pt_regs *regs)
}
#define arch_check_user_regs arch_check_user_regs
-#define ARCH_SYSCALL_EXIT_WORK (_TIF_SINGLESTEP)
-
static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
unsigned long ti_work)
{
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index 9f1a0a987e5e..d0dcefb5cc59 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -108,9 +108,6 @@ enum fixed_addresses {
#ifdef CONFIG_PARAVIRT_XXL
FIX_PARAVIRT_BOOTMAP,
#endif
-#ifdef CONFIG_X86_INTEL_MID
- FIX_LNW_VRTC,
-#endif
#ifdef CONFIG_ACPI_APEI_GHES
/* Used for GHES mapping from assorted contexts */
diff --git a/arch/x86/include/asm/fpu/api.h b/arch/x86/include/asm/fpu/api.h
index 67a4f1cb2aac..ed33a14188f6 100644
--- a/arch/x86/include/asm/fpu/api.h
+++ b/arch/x86/include/asm/fpu/api.h
@@ -32,7 +32,19 @@ extern void fpregs_mark_activate(void);
/* Code that is unaware of kernel_fpu_begin_mask() can use this */
static inline void kernel_fpu_begin(void)
{
+#ifdef CONFIG_X86_64
+ /*
+ * Any 64-bit code that uses 387 instructions must explicitly request
+ * KFPU_387.
+ */
+ kernel_fpu_begin_mask(KFPU_MXCSR);
+#else
+ /*
+ * 32-bit kernel code may use 387 operations as well as SSE2, etc,
+ * as long as it checks that the CPU has the required capability.
+ */
kernel_fpu_begin_mask(KFPU_387 | KFPU_MXCSR);
+#endif
}
/*
diff --git a/arch/x86/include/asm/hyperv-tlfs.h b/arch/x86/include/asm/hyperv-tlfs.h
index 6bf42aed387e..e6cd3fee562b 100644
--- a/arch/x86/include/asm/hyperv-tlfs.h
+++ b/arch/x86/include/asm/hyperv-tlfs.h
@@ -21,7 +21,9 @@
#define HYPERV_CPUID_FEATURES 0x40000003
#define HYPERV_CPUID_ENLIGHTMENT_INFO 0x40000004
#define HYPERV_CPUID_IMPLEMENT_LIMITS 0x40000005
+#define HYPERV_CPUID_CPU_MANAGEMENT_FEATURES 0x40000007
#define HYPERV_CPUID_NESTED_FEATURES 0x4000000A
+#define HYPERV_CPUID_ISOLATION_CONFIG 0x4000000C
#define HYPERV_CPUID_VIRT_STACK_INTERFACE 0x40000081
#define HYPERV_VS_INTERFACE_EAX_SIGNATURE 0x31235356 /* "VS#1" */
@@ -111,6 +113,15 @@
#define HV_X64_ENLIGHTENED_VMCS_RECOMMENDED BIT(14)
/*
+ * CPU management features identification.
+ * These are HYPERV_CPUID_CPU_MANAGEMENT_FEATURES.EAX bits.
+ */
+#define HV_X64_START_LOGICAL_PROCESSOR BIT(0)
+#define HV_X64_CREATE_ROOT_VIRTUAL_PROCESSOR BIT(1)
+#define HV_X64_PERFORMANCE_COUNTER_SYNC BIT(2)
+#define HV_X64_RESERVED_IDENTITY_BIT BIT(31)
+
+/*
* Virtual processor will never share a physical core with another virtual
* processor, except for virtual processors that are reported as sibling SMT
* threads.
@@ -122,6 +133,20 @@
#define HV_X64_NESTED_GUEST_MAPPING_FLUSH BIT(18)
#define HV_X64_NESTED_MSR_BITMAP BIT(19)
+/* HYPERV_CPUID_ISOLATION_CONFIG.EAX bits. */
+#define HV_PARAVISOR_PRESENT BIT(0)
+
+/* HYPERV_CPUID_ISOLATION_CONFIG.EBX bits. */
+#define HV_ISOLATION_TYPE GENMASK(3, 0)
+#define HV_SHARED_GPA_BOUNDARY_ACTIVE BIT(5)
+#define HV_SHARED_GPA_BOUNDARY_BITS GENMASK(11, 6)
+
+enum hv_isolation_type {
+ HV_ISOLATION_TYPE_NONE = 0,
+ HV_ISOLATION_TYPE_VBS = 1,
+ HV_ISOLATION_TYPE_SNP = 2
+};
+
/* Hyper-V specific model specific registers (MSRs) */
/* MSR used to identify the guest OS. */
@@ -523,6 +548,19 @@ struct hv_partition_assist_pg {
u32 tlb_lock_count;
};
+enum hv_interrupt_type {
+ HV_X64_INTERRUPT_TYPE_FIXED = 0x0000,
+ HV_X64_INTERRUPT_TYPE_LOWESTPRIORITY = 0x0001,
+ HV_X64_INTERRUPT_TYPE_SMI = 0x0002,
+ HV_X64_INTERRUPT_TYPE_REMOTEREAD = 0x0003,
+ HV_X64_INTERRUPT_TYPE_NMI = 0x0004,
+ HV_X64_INTERRUPT_TYPE_INIT = 0x0005,
+ HV_X64_INTERRUPT_TYPE_SIPI = 0x0006,
+ HV_X64_INTERRUPT_TYPE_EXTINT = 0x0007,
+ HV_X64_INTERRUPT_TYPE_LOCALINT0 = 0x0008,
+ HV_X64_INTERRUPT_TYPE_LOCALINT1 = 0x0009,
+ HV_X64_INTERRUPT_TYPE_MAXIMUM = 0x000A,
+};
#include <asm-generic/hyperv-tlfs.h>
diff --git a/arch/x86/include/asm/idtentry.h b/arch/x86/include/asm/idtentry.h
index 247a60a47331..5eb3bdf36a41 100644
--- a/arch/x86/include/asm/idtentry.h
+++ b/arch/x86/include/asm/idtentry.h
@@ -187,23 +187,22 @@ __visible noinstr void func(struct pt_regs *regs, unsigned long error_code)
* has to be done in the function body if necessary.
*/
#define DEFINE_IDTENTRY_IRQ(func) \
-static __always_inline void __##func(struct pt_regs *regs, u8 vector); \
+static void __##func(struct pt_regs *regs, u32 vector); \
\
__visible noinstr void func(struct pt_regs *regs, \
unsigned long error_code) \
{ \
irqentry_state_t state = irqentry_enter(regs); \
+ u32 vector = (u32)(u8)error_code; \
\
instrumentation_begin(); \
- irq_enter_rcu(); \
kvm_set_cpu_l1tf_flush_l1d(); \
- __##func (regs, (u8)error_code); \
- irq_exit_rcu(); \
+ run_irq_on_irqstack_cond(__##func, regs, vector); \
instrumentation_end(); \
irqentry_exit(regs, state); \
} \
\
-static __always_inline void __##func(struct pt_regs *regs, u8 vector)
+static noinline void __##func(struct pt_regs *regs, u32 vector)
/**
* DECLARE_IDTENTRY_SYSVEC - Declare functions for system vector entry points
@@ -237,10 +236,8 @@ __visible noinstr void func(struct pt_regs *regs) \
irqentry_state_t state = irqentry_enter(regs); \
\
instrumentation_begin(); \
- irq_enter_rcu(); \
kvm_set_cpu_l1tf_flush_l1d(); \
run_sysvec_on_irqstack_cond(__##func, regs); \
- irq_exit_rcu(); \
instrumentation_end(); \
irqentry_exit(regs, state); \
} \
@@ -585,6 +582,9 @@ DECLARE_IDTENTRY_MCE(X86_TRAP_MC, exc_machine_check);
#else
DECLARE_IDTENTRY_RAW(X86_TRAP_MC, exc_machine_check);
#endif
+#ifdef CONFIG_XEN_PV
+DECLARE_IDTENTRY_RAW(X86_TRAP_MC, xenpv_exc_machine_check);
+#endif
#endif
/* NMI */
@@ -605,6 +605,9 @@ DECLARE_IDTENTRY_RAW(X86_TRAP_DB, xenpv_exc_debug);
/* #DF */
DECLARE_IDTENTRY_DF(X86_TRAP_DF, exc_double_fault);
+#ifdef CONFIG_XEN_PV
+DECLARE_IDTENTRY_RAW_ERRORCODE(X86_TRAP_DF, xenpv_exc_double_fault);
+#endif
/* #VC */
#ifdef CONFIG_AMD_MEM_ENCRYPT
@@ -613,6 +616,7 @@ DECLARE_IDTENTRY_VC(X86_TRAP_VC, exc_vmm_communication);
#ifdef CONFIG_XEN_PV
DECLARE_IDTENTRY_XENCB(X86_TRAP_OTHER, exc_xen_hypervisor_callback);
+DECLARE_IDTENTRY_RAW(X86_TRAP_OTHER, exc_xen_unknown_trap);
#endif
/* Device interrupts common/spurious */
diff --git a/arch/x86/include/asm/insn.h b/arch/x86/include/asm/insn.h
index a8c3d284fa46..95a448fbb44c 100644
--- a/arch/x86/include/asm/insn.h
+++ b/arch/x86/include/asm/insn.h
@@ -7,9 +7,12 @@
* Copyright (C) IBM Corporation, 2009
*/
+#include <asm/byteorder.h>
/* insn_attr_t is defined in inat.h */
#include <asm/inat.h>
+#if defined(__BYTE_ORDER) ? __BYTE_ORDER == __LITTLE_ENDIAN : defined(__LITTLE_ENDIAN)
+
struct insn_field {
union {
insn_value_t value;
@@ -20,6 +23,48 @@ struct insn_field {
unsigned char nbytes;
};
+static inline void insn_field_set(struct insn_field *p, insn_value_t v,
+ unsigned char n)
+{
+ p->value = v;
+ p->nbytes = n;
+}
+
+static inline void insn_set_byte(struct insn_field *p, unsigned char n,
+ insn_byte_t v)
+{
+ p->bytes[n] = v;
+}
+
+#else
+
+struct insn_field {
+ insn_value_t value;
+ union {
+ insn_value_t little;
+ insn_byte_t bytes[4];
+ };
+ /* !0 if we've run insn_get_xxx() for this field */
+ unsigned char got;
+ unsigned char nbytes;
+};
+
+static inline void insn_field_set(struct insn_field *p, insn_value_t v,
+ unsigned char n)
+{
+ p->value = v;
+ p->little = __cpu_to_le32(v);
+ p->nbytes = n;
+}
+
+static inline void insn_set_byte(struct insn_field *p, unsigned char n,
+ insn_byte_t v)
+{
+ p->bytes[n] = v;
+ p->value = __le32_to_cpu(p->little);
+}
+#endif
+
struct insn {
struct insn_field prefixes; /*
* Prefixes
diff --git a/arch/x86/include/asm/intel-mid.h b/arch/x86/include/asm/intel-mid.h
index cf0e25f45422..c201083b34f6 100644
--- a/arch/x86/include/asm/intel-mid.h
+++ b/arch/x86/include/asm/intel-mid.h
@@ -1,15 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * intel-mid.h: Intel MID specific setup code
+ * Intel MID specific setup code
*
- * (C) Copyright 2009 Intel Corporation
+ * (C) Copyright 2009, 2021 Intel Corporation
*/
#ifndef _ASM_X86_INTEL_MID_H
#define _ASM_X86_INTEL_MID_H
-#include <linux/sfi.h>
#include <linux/pci.h>
-#include <linux/platform_device.h>
extern int intel_mid_pci_init(void);
extern int intel_mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state);
@@ -22,93 +20,18 @@ extern void intel_mid_pwr_power_off(void);
extern int intel_mid_pwr_get_lss_id(struct pci_dev *pdev);
-extern int get_gpio_by_name(const char *name);
-extern int __init sfi_parse_mrtc(struct sfi_table_header *table);
-extern int __init sfi_parse_mtmr(struct sfi_table_header *table);
-extern int sfi_mrtc_num;
-extern struct sfi_rtc_table_entry sfi_mrtc_array[];
-
-/*
- * Here defines the array of devices platform data that IAFW would export
- * through SFI "DEVS" table, we use name and type to match the device and
- * its platform data.
- */
-struct devs_id {
- char name[SFI_NAME_LEN + 1];
- u8 type;
- u8 delay;
- u8 msic;
- void *(*get_platform_data)(void *info);
-};
-
-#define sfi_device(i) \
- static const struct devs_id *const __intel_mid_sfi_##i##_dev __used \
- __section(".x86_intel_mid_dev.init") = &i
-
-/**
-* struct mid_sd_board_info - template for SD device creation
-* @name: identifies the driver
-* @bus_num: board-specific identifier for a given SD controller
-* @max_clk: the maximum frequency device supports
-* @platform_data: the particular data stored there is driver-specific
-*/
-struct mid_sd_board_info {
- char name[SFI_NAME_LEN];
- int bus_num;
- unsigned short addr;
- u32 max_clk;
- void *platform_data;
-};
-
-/*
- * Medfield is the follow-up of Moorestown, it combines two chip solution into
- * one. Other than that it also added always-on and constant tsc and lapic
- * timers. Medfield is the platform name, and the chip name is called Penwell
- * we treat Medfield/Penwell as a variant of Moorestown. Penwell can be
- * identified via MSRs.
- */
-enum intel_mid_cpu_type {
- /* 1 was Moorestown */
- INTEL_MID_CPU_CHIP_PENWELL = 2,
- INTEL_MID_CPU_CHIP_CLOVERVIEW,
- INTEL_MID_CPU_CHIP_TANGIER,
-};
-
-extern enum intel_mid_cpu_type __intel_mid_cpu_chip;
-
#ifdef CONFIG_X86_INTEL_MID
-static inline enum intel_mid_cpu_type intel_mid_identify_cpu(void)
-{
- return __intel_mid_cpu_chip;
-}
-
-static inline bool intel_mid_has_msic(void)
-{
- return (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_PENWELL);
-}
-
extern void intel_scu_devices_create(void);
extern void intel_scu_devices_destroy(void);
#else /* !CONFIG_X86_INTEL_MID */
-#define intel_mid_identify_cpu() 0
-#define intel_mid_has_msic() 0
-
static inline void intel_scu_devices_create(void) { }
static inline void intel_scu_devices_destroy(void) { }
#endif /* !CONFIG_X86_INTEL_MID */
-enum intel_mid_timer_options {
- INTEL_MID_TIMER_DEFAULT,
- INTEL_MID_TIMER_APBT_ONLY,
- INTEL_MID_TIMER_LAPIC_APBT,
-};
-
-extern enum intel_mid_timer_options intel_mid_timer_options;
-
/* Bus Select SoC Fuse value */
#define BSEL_SOC_FUSE_MASK 0x7
/* FSB 133MHz */
@@ -118,16 +41,4 @@ extern enum intel_mid_timer_options intel_mid_timer_options;
/* FSB 83MHz */
#define BSEL_SOC_FUSE_111 0x7
-#define SFI_MTMR_MAX_NUM 8
-#define SFI_MRTC_MAX 8
-
-/* VRTC timer */
-#define MRST_VRTC_MAP_SZ 1024
-/* #define MRST_VRTC_PGOFFSET 0xc00 */
-
-extern void intel_mid_rtc_init(void);
-
-/* The offset for the mapping of global gpio pin to irq */
-#define INTEL_MID_IRQ_OFFSET 0x100
-
#endif /* _ASM_X86_INTEL_MID_H */
diff --git a/arch/x86/include/asm/intel_mid_vrtc.h b/arch/x86/include/asm/intel_mid_vrtc.h
deleted file mode 100644
index 0b44b1abe4d9..000000000000
--- a/arch/x86/include/asm/intel_mid_vrtc.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _INTEL_MID_VRTC_H
-#define _INTEL_MID_VRTC_H
-
-extern unsigned char vrtc_cmos_read(unsigned char reg);
-extern void vrtc_cmos_write(unsigned char val, unsigned char reg);
-extern void vrtc_get_time(struct timespec64 *now);
-extern int vrtc_set_mmss(const struct timespec64 *now);
-
-#endif
diff --git a/arch/x86/include/asm/intel_scu_ipc.h b/arch/x86/include/asm/intel_scu_ipc.h
index 11d457af68c5..8537f597d20a 100644
--- a/arch/x86/include/asm/intel_scu_ipc.h
+++ b/arch/x86/include/asm/intel_scu_ipc.h
@@ -65,6 +65,4 @@ static inline int intel_scu_ipc_dev_command(struct intel_scu_ipc_dev *scu, int c
inlen, out, outlen);
}
-#include <asm/intel_scu_ipc_legacy.h>
-
#endif
diff --git a/arch/x86/include/asm/intel_scu_ipc_legacy.h b/arch/x86/include/asm/intel_scu_ipc_legacy.h
deleted file mode 100644
index 4cf13fecb673..000000000000
--- a/arch/x86/include/asm/intel_scu_ipc_legacy.h
+++ /dev/null
@@ -1,91 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_X86_INTEL_SCU_IPC_LEGACY_H_
-#define _ASM_X86_INTEL_SCU_IPC_LEGACY_H_
-
-#include <linux/notifier.h>
-
-#define IPCMSG_INDIRECT_READ 0x02
-#define IPCMSG_INDIRECT_WRITE 0x05
-
-#define IPCMSG_COLD_OFF 0x80 /* Only for Tangier */
-
-#define IPCMSG_WARM_RESET 0xF0
-#define IPCMSG_COLD_RESET 0xF1
-#define IPCMSG_SOFT_RESET 0xF2
-#define IPCMSG_COLD_BOOT 0xF3
-
-#define IPCMSG_VRTC 0xFA /* Set vRTC device */
-/* Command id associated with message IPCMSG_VRTC */
-#define IPC_CMD_VRTC_SETTIME 1 /* Set time */
-#define IPC_CMD_VRTC_SETALARM 2 /* Set alarm */
-
-/* Don't call these in new code - they will be removed eventually */
-
-/* Read single register */
-static inline int intel_scu_ipc_ioread8(u16 addr, u8 *data)
-{
- return intel_scu_ipc_dev_ioread8(NULL, addr, data);
-}
-
-/* Read a vector */
-static inline int intel_scu_ipc_readv(u16 *addr, u8 *data, int len)
-{
- return intel_scu_ipc_dev_readv(NULL, addr, data, len);
-}
-
-/* Write single register */
-static inline int intel_scu_ipc_iowrite8(u16 addr, u8 data)
-{
- return intel_scu_ipc_dev_iowrite8(NULL, addr, data);
-}
-
-/* Write a vector */
-static inline int intel_scu_ipc_writev(u16 *addr, u8 *data, int len)
-{
- return intel_scu_ipc_dev_writev(NULL, addr, data, len);
-}
-
-/* Update single register based on the mask */
-static inline int intel_scu_ipc_update_register(u16 addr, u8 data, u8 mask)
-{
- return intel_scu_ipc_dev_update(NULL, addr, data, mask);
-}
-
-/* Issue commands to the SCU with or without data */
-static inline int intel_scu_ipc_simple_command(int cmd, int sub)
-{
- return intel_scu_ipc_dev_simple_command(NULL, cmd, sub);
-}
-
-static inline int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen,
- u32 *out, int outlen)
-{
- /* New API takes both inlen and outlen as bytes so convert here */
- size_t inbytes = inlen * sizeof(u32);
- size_t outbytes = outlen * sizeof(u32);
-
- return intel_scu_ipc_dev_command_with_size(NULL, cmd, sub, in, inbytes,
- inlen, out, outbytes);
-}
-
-extern struct blocking_notifier_head intel_scu_notifier;
-
-static inline void intel_scu_notifier_add(struct notifier_block *nb)
-{
- blocking_notifier_chain_register(&intel_scu_notifier, nb);
-}
-
-static inline void intel_scu_notifier_remove(struct notifier_block *nb)
-{
- blocking_notifier_chain_unregister(&intel_scu_notifier, nb);
-}
-
-static inline int intel_scu_notifier_post(unsigned long v, void *p)
-{
- return blocking_notifier_call_chain(&intel_scu_notifier, v, p);
-}
-
-#define SCU_AVAILABLE 1
-#define SCU_DOWN 2
-
-#endif
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
index 528c8a71fe7f..768aa234cbb4 100644
--- a/arch/x86/include/asm/irq.h
+++ b/arch/x86/include/asm/irq.h
@@ -25,8 +25,6 @@ static inline int irq_canonicalize(int irq)
extern int irq_init_percpu_irqstack(unsigned int cpu);
-#define __ARCH_HAS_DO_SOFTIRQ
-
struct irq_desc;
extern void fixup_irqs(void);
@@ -40,8 +38,6 @@ extern void native_init_IRQ(void);
extern void __handle_irq(struct irq_desc *desc, struct pt_regs *regs);
-extern __visible void do_IRQ(struct pt_regs *regs, unsigned long vector);
-
extern void init_ISA_irqs(void);
extern void __init init_IRQ(void);
diff --git a/arch/x86/include/asm/irq_stack.h b/arch/x86/include/asm/irq_stack.h
index 775816965c6a..9b2a0ff76c73 100644
--- a/arch/x86/include/asm/irq_stack.h
+++ b/arch/x86/include/asm/irq_stack.h
@@ -7,100 +7,217 @@
#include <asm/processor.h>
#ifdef CONFIG_X86_64
-static __always_inline bool irqstack_active(void)
-{
- return __this_cpu_read(irq_count) != -1;
-}
-
-void asm_call_on_stack(void *sp, void (*func)(void), void *arg);
-void asm_call_sysvec_on_stack(void *sp, void (*func)(struct pt_regs *regs),
- struct pt_regs *regs);
-void asm_call_irq_on_stack(void *sp, void (*func)(struct irq_desc *desc),
- struct irq_desc *desc);
-static __always_inline void __run_on_irqstack(void (*func)(void))
-{
- void *tos = __this_cpu_read(hardirq_stack_ptr);
-
- __this_cpu_add(irq_count, 1);
- asm_call_on_stack(tos - 8, func, NULL);
- __this_cpu_sub(irq_count, 1);
+/*
+ * Macro to inline switching to an interrupt stack and invoking function
+ * calls from there. The following rules apply:
+ *
+ * - Ordering:
+ *
+ * 1. Write the stack pointer into the top most place of the irq
+ * stack. This ensures that the various unwinders can link back to the
+ * original stack.
+ *
+ * 2. Switch the stack pointer to the top of the irq stack.
+ *
+ * 3. Invoke whatever needs to be done (@asm_call argument)
+ *
+ * 4. Pop the original stack pointer from the top of the irq stack
+ * which brings it back to the original stack where it left off.
+ *
+ * - Function invocation:
+ *
+ * To allow flexible usage of the macro, the actual function code including
+ * the store of the arguments in the call ABI registers is handed in via
+ * the @asm_call argument.
+ *
+ * - Local variables:
+ *
+ * @tos:
+ * The @tos variable holds a pointer to the top of the irq stack and
+ * _must_ be allocated in a non-callee saved register as this is a
+ * restriction coming from objtool.
+ *
+ * Note, that (tos) is both in input and output constraints to ensure
+ * that the compiler does not assume that R11 is left untouched in
+ * case this macro is used in some place where the per cpu interrupt
+ * stack pointer is used again afterwards
+ *
+ * - Function arguments:
+ * The function argument(s), if any, have to be defined in register
+ * variables at the place where this is invoked. Storing the
+ * argument(s) in the proper register(s) is part of the @asm_call
+ *
+ * - Constraints:
+ *
+ * The constraints have to be done very carefully because the compiler
+ * does not know about the assembly call.
+ *
+ * output:
+ * As documented already above the @tos variable is required to be in
+ * the output constraints to make the compiler aware that R11 cannot be
+ * reused after the asm() statement.
+ *
+ * For builds with CONFIG_UNWIND_FRAME_POINTER ASM_CALL_CONSTRAINT is
+ * required as well as this prevents certain creative GCC variants from
+ * misplacing the ASM code.
+ *
+ * input:
+ * - func:
+ * Immediate, which tells the compiler that the function is referenced.
+ *
+ * - tos:
+ * Register. The actual register is defined by the variable declaration.
+ *
+ * - function arguments:
+ * The constraints are handed in via the 'argconstr' argument list. They
+ * describe the register arguments which are used in @asm_call.
+ *
+ * clobbers:
+ * Function calls can clobber anything except the callee-saved
+ * registers. Tell the compiler.
+ */
+#define call_on_irqstack(func, asm_call, argconstr...) \
+{ \
+ register void *tos asm("r11"); \
+ \
+ tos = ((void *)__this_cpu_read(hardirq_stack_ptr)); \
+ \
+ asm_inline volatile( \
+ "movq %%rsp, (%[tos]) \n" \
+ "movq %[tos], %%rsp \n" \
+ \
+ asm_call \
+ \
+ "popq %%rsp \n" \
+ \
+ : "+r" (tos), ASM_CALL_CONSTRAINT \
+ : [__func] "i" (func), [tos] "r" (tos) argconstr \
+ : "cc", "rax", "rcx", "rdx", "rsi", "rdi", "r8", "r9", "r10", \
+ "memory" \
+ ); \
}
-static __always_inline void
-__run_sysvec_on_irqstack(void (*func)(struct pt_regs *regs),
- struct pt_regs *regs)
-{
- void *tos = __this_cpu_read(hardirq_stack_ptr);
-
- __this_cpu_add(irq_count, 1);
- asm_call_sysvec_on_stack(tos - 8, func, regs);
- __this_cpu_sub(irq_count, 1);
+/* Macros to assert type correctness for run_*_on_irqstack macros */
+#define assert_function_type(func, proto) \
+ static_assert(__builtin_types_compatible_p(typeof(&func), proto))
+
+#define assert_arg_type(arg, proto) \
+ static_assert(__builtin_types_compatible_p(typeof(arg), proto))
+
+/*
+ * Macro to invoke system vector and device interrupt C handlers.
+ */
+#define call_on_irqstack_cond(func, regs, asm_call, constr, c_args...) \
+{ \
+ /* \
+ * User mode entry and interrupt on the irq stack do not \
+ * switch stacks. If from user mode the task stack is empty. \
+ */ \
+ if (user_mode(regs) || __this_cpu_read(hardirq_stack_inuse)) { \
+ irq_enter_rcu(); \
+ func(c_args); \
+ irq_exit_rcu(); \
+ } else { \
+ /* \
+ * Mark the irq stack inuse _before_ and unmark _after_ \
+ * switching stacks. Interrupts are disabled in both \
+ * places. Invoke the stack switch macro with the call \
+ * sequence which matches the above direct invocation. \
+ */ \
+ __this_cpu_write(hardirq_stack_inuse, true); \
+ call_on_irqstack(func, asm_call, constr); \
+ __this_cpu_write(hardirq_stack_inuse, false); \
+ } \
}
-static __always_inline void
-__run_irq_on_irqstack(void (*func)(struct irq_desc *desc),
- struct irq_desc *desc)
-{
- void *tos = __this_cpu_read(hardirq_stack_ptr);
-
- __this_cpu_add(irq_count, 1);
- asm_call_irq_on_stack(tos - 8, func, desc);
- __this_cpu_sub(irq_count, 1);
+/*
+ * Function call sequence for __call_on_irqstack() for system vectors.
+ *
+ * Note that irq_enter_rcu() and irq_exit_rcu() do not use the input
+ * mechanism because these functions are global and cannot be optimized out
+ * when compiling a particular source file which uses one of these macros.
+ *
+ * The argument (regs) does not need to be pushed or stashed in a callee
+ * saved register to be safe vs. the irq_enter_rcu() call because the
+ * clobbers already prevent the compiler from storing it in a callee
+ * clobbered register. As the compiler has to preserve @regs for the final
+ * call to idtentry_exit() anyway, it's likely that it does not cause extra
+ * effort for this asm magic.
+ */
+#define ASM_CALL_SYSVEC \
+ "call irq_enter_rcu \n" \
+ "movq %[arg1], %%rdi \n" \
+ "call %P[__func] \n" \
+ "call irq_exit_rcu \n"
+
+#define SYSVEC_CONSTRAINTS , [arg1] "r" (regs)
+
+#define run_sysvec_on_irqstack_cond(func, regs) \
+{ \
+ assert_function_type(func, void (*)(struct pt_regs *)); \
+ assert_arg_type(regs, struct pt_regs *); \
+ \
+ call_on_irqstack_cond(func, regs, ASM_CALL_SYSVEC, \
+ SYSVEC_CONSTRAINTS, regs); \
}
-#else /* CONFIG_X86_64 */
-static inline bool irqstack_active(void) { return false; }
-static inline void __run_on_irqstack(void (*func)(void)) { }
-static inline void __run_sysvec_on_irqstack(void (*func)(struct pt_regs *regs),
- struct pt_regs *regs) { }
-static inline void __run_irq_on_irqstack(void (*func)(struct irq_desc *desc),
- struct irq_desc *desc) { }
-#endif /* !CONFIG_X86_64 */
-
-static __always_inline bool irq_needs_irq_stack(struct pt_regs *regs)
-{
- if (IS_ENABLED(CONFIG_X86_32))
- return false;
- if (!regs)
- return !irqstack_active();
- return !user_mode(regs) && !irqstack_active();
+/*
+ * As in ASM_CALL_SYSVEC above the clobbers force the compiler to store
+ * @regs and @vector in callee saved registers.
+ */
+#define ASM_CALL_IRQ \
+ "call irq_enter_rcu \n" \
+ "movq %[arg1], %%rdi \n" \
+ "movl %[arg2], %%esi \n" \
+ "call %P[__func] \n" \
+ "call irq_exit_rcu \n"
+
+#define IRQ_CONSTRAINTS , [arg1] "r" (regs), [arg2] "r" (vector)
+
+#define run_irq_on_irqstack_cond(func, regs, vector) \
+{ \
+ assert_function_type(func, void (*)(struct pt_regs *, u32)); \
+ assert_arg_type(regs, struct pt_regs *); \
+ assert_arg_type(vector, u32); \
+ \
+ call_on_irqstack_cond(func, regs, ASM_CALL_IRQ, \
+ IRQ_CONSTRAINTS, regs, vector); \
}
-
-static __always_inline void run_on_irqstack_cond(void (*func)(void),
- struct pt_regs *regs)
-{
- lockdep_assert_irqs_disabled();
-
- if (irq_needs_irq_stack(regs))
- __run_on_irqstack(func);
- else
- func();
+#define ASM_CALL_SOFTIRQ \
+ "call %P[__func] \n"
+
+/*
+ * Macro to invoke __do_softirq on the irq stack. This is only called from
+ * task context when bottom halfs are about to be reenabled and soft
+ * interrupts are pending to be processed. The interrupt stack cannot be in
+ * use here.
+ */
+#define do_softirq_own_stack() \
+{ \
+ __this_cpu_write(hardirq_stack_inuse, true); \
+ call_on_irqstack(__do_softirq, ASM_CALL_SOFTIRQ); \
+ __this_cpu_write(hardirq_stack_inuse, false); \
}
-static __always_inline void
-run_sysvec_on_irqstack_cond(void (*func)(struct pt_regs *regs),
- struct pt_regs *regs)
-{
- lockdep_assert_irqs_disabled();
-
- if (irq_needs_irq_stack(regs))
- __run_sysvec_on_irqstack(func, regs);
- else
- func(regs);
+#else /* CONFIG_X86_64 */
+/* System vector handlers always run on the stack they interrupted. */
+#define run_sysvec_on_irqstack_cond(func, regs) \
+{ \
+ irq_enter_rcu(); \
+ func(regs); \
+ irq_exit_rcu(); \
}
-static __always_inline void
-run_irq_on_irqstack_cond(void (*func)(struct irq_desc *desc), struct irq_desc *desc,
- struct pt_regs *regs)
-{
- lockdep_assert_irqs_disabled();
-
- if (irq_needs_irq_stack(regs))
- __run_irq_on_irqstack(func, desc);
- else
- func(desc);
+/* Switches to the irq stack within func() */
+#define run_irq_on_irqstack_cond(func, regs, vector) \
+{ \
+ irq_enter_rcu(); \
+ func(regs, vector); \
+ irq_exit_rcu(); \
}
+#endif /* !CONFIG_X86_64 */
+
#endif
diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
index 2dfc8d380dab..144d70ea4393 100644
--- a/arch/x86/include/asm/irqflags.h
+++ b/arch/x86/include/asm/irqflags.h
@@ -35,15 +35,6 @@ extern __always_inline unsigned long native_save_fl(void)
return flags;
}
-extern inline void native_restore_fl(unsigned long flags);
-extern inline void native_restore_fl(unsigned long flags)
-{
- asm volatile("push %0 ; popf"
- : /* no output */
- :"g" (flags)
- :"memory", "cc");
-}
-
static __always_inline void native_irq_disable(void)
{
asm volatile("cli": : :"memory");
@@ -79,11 +70,6 @@ static __always_inline unsigned long arch_local_save_flags(void)
return native_save_fl();
}
-static __always_inline void arch_local_irq_restore(unsigned long flags)
-{
- native_restore_fl(flags);
-}
-
static __always_inline void arch_local_irq_disable(void)
{
native_irq_disable();
@@ -131,25 +117,7 @@ static __always_inline unsigned long arch_local_irq_save(void)
#define SAVE_FLAGS(x) pushfq; popq %rax
#endif
-#define SWAPGS swapgs
-/*
- * Currently paravirt can't handle swapgs nicely when we
- * don't have a stack we can rely on (such as a user space
- * stack). So we either find a way around these or just fault
- * and emulate if a guest tries to call swapgs directly.
- *
- * Either way, this is a good way to document that we don't
- * have a reliable stack. x86_64 only.
- */
-#define SWAPGS_UNSAFE_STACK swapgs
-
#define INTERRUPT_RETURN jmp native_iret
-#define USERGS_SYSRET64 \
- swapgs; \
- sysretq;
-#define USERGS_SYSRET32 \
- swapgs; \
- sysretl
#else
#define INTERRUPT_RETURN iret
@@ -170,6 +138,20 @@ static __always_inline int arch_irqs_disabled(void)
return arch_irqs_disabled_flags(flags);
}
+
+static __always_inline void arch_local_irq_restore(unsigned long flags)
+{
+ if (!arch_irqs_disabled_flags(flags))
+ arch_local_irq_enable();
+}
+#else
+#ifdef CONFIG_X86_64
+#ifdef CONFIG_XEN_PV
+#define SWAPGS ALTERNATIVE "swapgs", "", X86_FEATURE_XENPV
+#else
+#define SWAPGS swapgs
+#endif
+#endif
#endif /* !__ASSEMBLY__ */
#endif
diff --git a/arch/x86/include/asm/kfence.h b/arch/x86/include/asm/kfence.h
new file mode 100644
index 000000000000..97bbb4a9083a
--- /dev/null
+++ b/arch/x86/include/asm/kfence.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * x86 KFENCE support.
+ *
+ * Copyright (C) 2020, Google LLC.
+ */
+
+#ifndef _ASM_X86_KFENCE_H
+#define _ASM_X86_KFENCE_H
+
+#include <linux/bug.h>
+#include <linux/kfence.h>
+
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <asm/set_memory.h>
+#include <asm/tlbflush.h>
+
+/* Force 4K pages for __kfence_pool. */
+static inline bool arch_kfence_init_pool(void)
+{
+ unsigned long addr;
+
+ for (addr = (unsigned long)__kfence_pool; is_kfence_address((void *)addr);
+ addr += PAGE_SIZE) {
+ unsigned int level;
+
+ if (!lookup_address(addr, &level))
+ return false;
+
+ if (level != PG_LEVEL_4K)
+ set_memory_4k(addr, 1);
+ }
+
+ return true;
+}
+
+/* Protect the given page and flush TLB. */
+static inline bool kfence_protect_page(unsigned long addr, bool protect)
+{
+ unsigned int level;
+ pte_t *pte = lookup_address(addr, &level);
+
+ if (WARN_ON(!pte || level != PG_LEVEL_4K))
+ return false;
+
+ /*
+ * We need to avoid IPIs, as we may get KFENCE allocations or faults
+ * with interrupts disabled. Therefore, the below is best-effort, and
+ * does not flush TLBs on all CPUs. We can tolerate some inaccuracy;
+ * lazy fault handling takes care of faults after the page is PRESENT.
+ */
+
+ if (protect)
+ set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT));
+ else
+ set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT));
+
+ /* Flush this CPU's TLB. */
+ flush_tlb_one_kernel(addr);
+ return true;
+}
+
+#endif /* _ASM_X86_KFENCE_H */
diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
index 991a7ad540c7..d20a3d6be36e 100644
--- a/arch/x86/include/asm/kprobes.h
+++ b/arch/x86/include/asm/kprobes.h
@@ -58,14 +58,17 @@ struct arch_specific_insn {
/* copy of the original instruction */
kprobe_opcode_t *insn;
/*
- * boostable = false: This instruction type is not boostable.
- * boostable = true: This instruction has been boosted: we have
+ * boostable = 0: This instruction type is not boostable.
+ * boostable = 1: This instruction has been boosted: we have
* added a relative jump after the instruction copy in insn,
* so no single-step and fixup are needed (unless there's
* a post_handler).
*/
- bool boostable;
- bool if_modifier;
+ unsigned boostable:1;
+ unsigned if_modifier:1;
+ unsigned is_call:1;
+ unsigned is_pushf:1;
+ unsigned is_abs_ip:1;
/* Number of bytes of text poked */
int tp_len;
};
diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
new file mode 100644
index 000000000000..323641097f63
--- /dev/null
+++ b/arch/x86/include/asm/kvm-x86-ops.h
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#if !defined(KVM_X86_OP) || !defined(KVM_X86_OP_NULL)
+BUILD_BUG_ON(1)
+#endif
+
+/*
+ * KVM_X86_OP() and KVM_X86_OP_NULL() are used to help generate
+ * "static_call()"s. They are also intended for use when defining
+ * the vmx/svm kvm_x86_ops. KVM_X86_OP() can be used for those
+ * functions that follow the [svm|vmx]_func_name convention.
+ * KVM_X86_OP_NULL() can leave a NULL definition for the
+ * case where there is no definition or a function name that
+ * doesn't match the typical naming convention is supplied.
+ */
+KVM_X86_OP_NULL(hardware_enable)
+KVM_X86_OP_NULL(hardware_disable)
+KVM_X86_OP_NULL(hardware_unsetup)
+KVM_X86_OP_NULL(cpu_has_accelerated_tpr)
+KVM_X86_OP(has_emulated_msr)
+KVM_X86_OP(vcpu_after_set_cpuid)
+KVM_X86_OP(vm_init)
+KVM_X86_OP_NULL(vm_destroy)
+KVM_X86_OP(vcpu_create)
+KVM_X86_OP(vcpu_free)
+KVM_X86_OP(vcpu_reset)
+KVM_X86_OP(prepare_guest_switch)
+KVM_X86_OP(vcpu_load)
+KVM_X86_OP(vcpu_put)
+KVM_X86_OP(update_exception_bitmap)
+KVM_X86_OP(get_msr)
+KVM_X86_OP(set_msr)
+KVM_X86_OP(get_segment_base)
+KVM_X86_OP(get_segment)
+KVM_X86_OP(get_cpl)
+KVM_X86_OP(set_segment)
+KVM_X86_OP_NULL(get_cs_db_l_bits)
+KVM_X86_OP(set_cr0)
+KVM_X86_OP(is_valid_cr4)
+KVM_X86_OP(set_cr4)
+KVM_X86_OP(set_efer)
+KVM_X86_OP(get_idt)
+KVM_X86_OP(set_idt)
+KVM_X86_OP(get_gdt)
+KVM_X86_OP(set_gdt)
+KVM_X86_OP(sync_dirty_debug_regs)
+KVM_X86_OP(set_dr7)
+KVM_X86_OP(cache_reg)
+KVM_X86_OP(get_rflags)
+KVM_X86_OP(set_rflags)
+KVM_X86_OP(tlb_flush_all)
+KVM_X86_OP(tlb_flush_current)
+KVM_X86_OP_NULL(tlb_remote_flush)
+KVM_X86_OP_NULL(tlb_remote_flush_with_range)
+KVM_X86_OP(tlb_flush_gva)
+KVM_X86_OP(tlb_flush_guest)
+KVM_X86_OP(run)
+KVM_X86_OP_NULL(handle_exit)
+KVM_X86_OP_NULL(skip_emulated_instruction)
+KVM_X86_OP_NULL(update_emulated_instruction)
+KVM_X86_OP(set_interrupt_shadow)
+KVM_X86_OP(get_interrupt_shadow)
+KVM_X86_OP(patch_hypercall)
+KVM_X86_OP(set_irq)
+KVM_X86_OP(set_nmi)
+KVM_X86_OP(queue_exception)
+KVM_X86_OP(cancel_injection)
+KVM_X86_OP(interrupt_allowed)
+KVM_X86_OP(nmi_allowed)
+KVM_X86_OP(get_nmi_mask)
+KVM_X86_OP(set_nmi_mask)
+KVM_X86_OP(enable_nmi_window)
+KVM_X86_OP(enable_irq_window)
+KVM_X86_OP(update_cr8_intercept)
+KVM_X86_OP(check_apicv_inhibit_reasons)
+KVM_X86_OP_NULL(pre_update_apicv_exec_ctrl)
+KVM_X86_OP(refresh_apicv_exec_ctrl)
+KVM_X86_OP(hwapic_irr_update)
+KVM_X86_OP(hwapic_isr_update)
+KVM_X86_OP_NULL(guest_apic_has_interrupt)
+KVM_X86_OP(load_eoi_exitmap)
+KVM_X86_OP(set_virtual_apic_mode)
+KVM_X86_OP_NULL(set_apic_access_page_addr)
+KVM_X86_OP(deliver_posted_interrupt)
+KVM_X86_OP_NULL(sync_pir_to_irr)
+KVM_X86_OP(set_tss_addr)
+KVM_X86_OP(set_identity_map_addr)
+KVM_X86_OP(get_mt_mask)
+KVM_X86_OP(load_mmu_pgd)
+KVM_X86_OP_NULL(has_wbinvd_exit)
+KVM_X86_OP(write_l1_tsc_offset)
+KVM_X86_OP(get_exit_info)
+KVM_X86_OP(check_intercept)
+KVM_X86_OP(handle_exit_irqoff)
+KVM_X86_OP_NULL(request_immediate_exit)
+KVM_X86_OP(sched_in)
+KVM_X86_OP_NULL(update_cpu_dirty_logging)
+KVM_X86_OP_NULL(pre_block)
+KVM_X86_OP_NULL(post_block)
+KVM_X86_OP_NULL(vcpu_blocking)
+KVM_X86_OP_NULL(vcpu_unblocking)
+KVM_X86_OP_NULL(update_pi_irte)
+KVM_X86_OP_NULL(apicv_post_state_restore)
+KVM_X86_OP_NULL(dy_apicv_has_pending_interrupt)
+KVM_X86_OP_NULL(set_hv_timer)
+KVM_X86_OP_NULL(cancel_hv_timer)
+KVM_X86_OP(setup_mce)
+KVM_X86_OP(smi_allowed)
+KVM_X86_OP(pre_enter_smm)
+KVM_X86_OP(pre_leave_smm)
+KVM_X86_OP(enable_smi_window)
+KVM_X86_OP_NULL(mem_enc_op)
+KVM_X86_OP_NULL(mem_enc_reg_region)
+KVM_X86_OP_NULL(mem_enc_unreg_region)
+KVM_X86_OP(get_msr_feature)
+KVM_X86_OP(can_emulate_instruction)
+KVM_X86_OP(apic_init_signal_blocked)
+KVM_X86_OP_NULL(enable_direct_tlbflush)
+KVM_X86_OP_NULL(migrate_timers)
+KVM_X86_OP(msr_filter_changed)
+KVM_X86_OP_NULL(complete_emulated_msr)
+
+#undef KVM_X86_OP
+#undef KVM_X86_OP_NULL
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 3d6616f6f6ef..877a4025d8da 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -40,10 +40,8 @@
#define KVM_MAX_VCPUS 288
#define KVM_SOFT_MAX_VCPUS 240
#define KVM_MAX_VCPU_ID 1023
-#define KVM_USER_MEM_SLOTS 509
/* memory slots that are not exposed to userspace */
#define KVM_PRIVATE_MEM_SLOTS 3
-#define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS)
#define KVM_HALT_POLL_NS_DEFAULT 200000
@@ -52,6 +50,9 @@
#define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \
KVM_DIRTY_LOG_INITIALLY_SET)
+#define KVM_BUS_LOCK_DETECTION_VALID_MODE (KVM_BUS_LOCK_DETECTION_OFF | \
+ KVM_BUS_LOCK_DETECTION_EXIT)
+
/* x86-specific vcpu->requests bit members */
#define KVM_REQ_MIGRATE_TIMER KVM_ARCH_REQ(0)
#define KVM_REQ_REPORT_TPR_ACCESS KVM_ARCH_REQ(1)
@@ -88,6 +89,8 @@
KVM_ARCH_REQ_FLAGS(27, KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_APF_READY KVM_ARCH_REQ(28)
#define KVM_REQ_MSR_FILTER_CHANGED KVM_ARCH_REQ(29)
+#define KVM_REQ_UPDATE_CPU_DIRTY_LOGGING \
+ KVM_ARCH_REQ_FLAGS(30, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define CR0_RESERVED_BITS \
(~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
@@ -200,9 +203,17 @@ enum x86_intercept_stage;
#define DR6_BS (1 << 14)
#define DR6_BT (1 << 15)
#define DR6_RTM (1 << 16)
-#define DR6_FIXED_1 0xfffe0ff0
-#define DR6_INIT 0xffff0ff0
+/*
+ * DR6_ACTIVE_LOW combines fixed-1 and active-low bits.
+ * We can regard all the bits in DR6_FIXED_1 as active_low bits;
+ * they will never be 0 for now, but when they are defined
+ * in the future it will require no code change.
+ *
+ * DR6_ACTIVE_LOW is also used as the init/reset value for DR6.
+ */
+#define DR6_ACTIVE_LOW 0xffff0ff0
#define DR6_VOLATILE 0x0001e00f
+#define DR6_FIXED_1 (DR6_ACTIVE_LOW & ~DR6_VOLATILE)
#define DR7_BP_EN_MASK 0x000000ff
#define DR7_GE (1 << 9)
@@ -337,6 +348,8 @@ struct kvm_mmu_root_info {
#define KVM_MMU_NUM_PREV_ROOTS 3
+#define KVM_HAVE_MMU_RWLOCK
+
struct kvm_mmu_page;
/*
@@ -358,8 +371,6 @@ struct kvm_mmu {
int (*sync_page)(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp);
void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa);
- void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
- u64 *spte, const void *pte);
hpa_t root_hpa;
gpa_t root_pgd;
union kvm_mmu_role mmu_role;
@@ -510,6 +521,7 @@ struct kvm_vcpu_hv_synic {
/* Hyper-V per vcpu emulation context */
struct kvm_vcpu_hv {
+ struct kvm_vcpu *vcpu;
u32 vp_index;
u64 hv_vapic;
s64 runtime_offset;
@@ -520,6 +532,21 @@ struct kvm_vcpu_hv {
cpumask_t tlb_flush;
};
+/* Xen HVM per vcpu emulation context */
+struct kvm_vcpu_xen {
+ u64 hypercall_rip;
+ u32 current_runstate;
+ bool vcpu_info_set;
+ bool vcpu_time_info_set;
+ bool runstate_set;
+ struct gfn_to_hva_cache vcpu_info_cache;
+ struct gfn_to_hva_cache vcpu_time_info_cache;
+ struct gfn_to_hva_cache runstate_cache;
+ u64 last_steal;
+ u64 runstate_entry_time;
+ u64 runstate_times[4];
+};
+
struct kvm_vcpu_arch {
/*
* rip and regs accesses must go through
@@ -640,7 +667,7 @@ struct kvm_vcpu_arch {
int cpuid_nent;
struct kvm_cpuid_entry2 *cpuid_entries;
- unsigned long cr3_lm_rsvd_bits;
+ u64 reserved_gpa_bits;
int maxphyaddr;
int max_tdp_level;
@@ -717,7 +744,9 @@ struct kvm_vcpu_arch {
/* used for guest single stepping over the given code position */
unsigned long singlestep_rip;
- struct kvm_vcpu_hv hyperv;
+ bool hyperv_enabled;
+ struct kvm_vcpu_hv *hyperv;
+ struct kvm_vcpu_xen xen;
cpumask_var_t wbinvd_dirty_mask;
@@ -888,6 +917,14 @@ struct msr_bitmap_range {
unsigned long *bitmap;
};
+/* Xen emulation context */
+struct kvm_xen {
+ bool long_mode;
+ bool shinfo_set;
+ u8 upcall_vector;
+ struct gfn_to_hva_cache shinfo_cache;
+};
+
enum kvm_irqchip_mode {
KVM_IRQCHIP_NONE,
KVM_IRQCHIP_KERNEL, /* created with KVM_CREATE_IRQCHIP */
@@ -908,9 +945,6 @@ struct kvm_arch {
unsigned int indirect_shadow_pages;
u8 mmu_valid_gen;
struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
- /*
- * Hash table of struct kvm_mmu_page.
- */
struct list_head active_mmu_pages;
struct list_head zapped_obsolete_pages;
struct list_head lpage_disallowed_mmu_pages;
@@ -967,6 +1001,7 @@ struct kvm_arch {
struct hlist_head mask_notifier_list;
struct kvm_hv hyperv;
+ struct kvm_xen xen;
#ifdef CONFIG_KVM_MMU_AUDIT
int audit_point;
@@ -977,6 +1012,7 @@ struct kvm_arch {
u32 bsp_vcpu_id;
u64 disabled_quirks;
+ int cpu_dirty_logging_count;
enum kvm_irqchip_mode irqchip_mode;
u8 nr_reserved_ioapic_pins;
@@ -998,9 +1034,12 @@ struct kvm_arch {
struct msr_bitmap_range ranges[16];
} msr_filter;
+ bool bus_lock_detection_enabled;
+
struct kvm_pmu_event_filter *pmu_event_filter;
struct task_struct *nx_lpage_recovery_thread;
+#ifdef CONFIG_X86_64
/*
* Whether the TDP MMU is enabled for this VM. This contains a
* snapshot of the TDP MMU module parameter from when the VM was
@@ -1026,12 +1065,25 @@ struct kvm_arch {
* tdp_mmu_page set and a root_count of 0.
*/
struct list_head tdp_mmu_pages;
+
+ /*
+ * Protects accesses to the following fields when the MMU lock
+ * is held in read mode:
+ * - tdp_mmu_pages (above)
+ * - the link field of struct kvm_mmu_pages used by the TDP MMU
+ * - lpage_disallowed_mmu_pages
+ * - the lpage_disallowed_link field of struct kvm_mmu_pages used
+ * by the TDP MMU
+ * It is acceptable, but not necessary, to acquire this lock when
+ * the thread holds the MMU lock in write mode.
+ */
+ spinlock_t tdp_mmu_pages_lock;
+#endif /* CONFIG_X86_64 */
};
struct kvm_vm_stat {
ulong mmu_shadow_zapped;
ulong mmu_pte_write;
- ulong mmu_pte_updated;
ulong mmu_pde_zapped;
ulong mmu_flooded;
ulong mmu_recycled;
@@ -1225,30 +1277,11 @@ struct kvm_x86_ops {
void (*sched_in)(struct kvm_vcpu *kvm, int cpu);
/*
- * Arch-specific dirty logging hooks. These hooks are only supposed to
- * be valid if the specific arch has hardware-accelerated dirty logging
- * mechanism. Currently only for PML on VMX.
- *
- * - slot_enable_log_dirty:
- * called when enabling log dirty mode for the slot.
- * - slot_disable_log_dirty:
- * called when disabling log dirty mode for the slot.
- * also called when slot is created with log dirty disabled.
- * - flush_log_dirty:
- * called before reporting dirty_bitmap to userspace.
- * - enable_log_dirty_pt_masked:
- * called when reenabling log dirty for the GFNs in the mask after
- * corresponding bits are cleared in slot->dirty_bitmap.
+ * Size of the CPU's dirty log buffer, i.e. VMX's PML buffer. A zero
+ * value indicates CPU dirty logging is unsupported or disabled.
*/
- void (*slot_enable_log_dirty)(struct kvm *kvm,
- struct kvm_memory_slot *slot);
- void (*slot_disable_log_dirty)(struct kvm *kvm,
- struct kvm_memory_slot *slot);
- void (*flush_log_dirty)(struct kvm *kvm);
- void (*enable_log_dirty_pt_masked)(struct kvm *kvm,
- struct kvm_memory_slot *slot,
- gfn_t offset, unsigned long mask);
- int (*cpu_dirty_log_size)(void);
+ int cpu_dirty_log_size;
+ void (*update_cpu_dirty_logging)(struct kvm_vcpu *vcpu);
/* pmu operations of sub-arch */
const struct kvm_pmu_ops *pmu_ops;
@@ -1340,6 +1373,19 @@ extern u64 __read_mostly host_efer;
extern bool __read_mostly allow_smaller_maxphyaddr;
extern struct kvm_x86_ops kvm_x86_ops;
+#define KVM_X86_OP(func) \
+ DECLARE_STATIC_CALL(kvm_x86_##func, *(((struct kvm_x86_ops *)0)->func));
+#define KVM_X86_OP_NULL KVM_X86_OP
+#include <asm/kvm-x86-ops.h>
+
+static inline void kvm_ops_static_call_update(void)
+{
+#define KVM_X86_OP(func) \
+ static_call_update(kvm_x86_##func, kvm_x86_ops.func);
+#define KVM_X86_OP_NULL KVM_X86_OP
+#include <asm/kvm-x86-ops.h>
+}
+
#define __KVM_HAVE_ARCH_VM_ALLOC
static inline struct kvm *kvm_arch_alloc_vm(void)
{
@@ -1351,7 +1397,7 @@ void kvm_arch_free_vm(struct kvm *kvm);
static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)
{
if (kvm_x86_ops.tlb_remote_flush &&
- !kvm_x86_ops.tlb_remote_flush(kvm))
+ !static_call(kvm_x86_tlb_remote_flush)(kvm))
return 0;
else
return -ENOTSUPP;
@@ -1378,11 +1424,6 @@ void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
struct kvm_memory_slot *memslot);
void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
struct kvm_memory_slot *memslot);
-void kvm_mmu_slot_set_dirty(struct kvm *kvm,
- struct kvm_memory_slot *memslot);
-void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
- struct kvm_memory_slot *slot,
- gfn_t gfn_offset, unsigned long mask);
void kvm_mmu_zap_all(struct kvm *kvm);
void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen);
unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm);
@@ -1421,6 +1462,8 @@ extern u8 kvm_tsc_scaling_ratio_frac_bits;
extern u64 kvm_max_tsc_scaling_ratio;
/* 1ull << kvm_tsc_scaling_ratio_frac_bits */
extern u64 kvm_default_tsc_scaling_ratio;
+/* bus lock detection supported? */
+extern bool kvm_has_bus_lock_exit;
extern u64 kvm_mce_cap_supported;
@@ -1501,7 +1544,7 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8);
int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val);
-int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val);
+void kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val);
unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu);
void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
@@ -1552,7 +1595,6 @@ void kvm_inject_nmi(struct kvm_vcpu *vcpu);
void kvm_update_dr7(struct kvm_vcpu *vcpu);
int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
-int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
int kvm_mmu_load(struct kvm_vcpu *vcpu);
void kvm_mmu_unload(struct kvm_vcpu *vcpu);
@@ -1742,14 +1784,12 @@ static inline bool kvm_irq_is_postable(struct kvm_lapic_irq *irq)
static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
{
- if (kvm_x86_ops.vcpu_blocking)
- kvm_x86_ops.vcpu_blocking(vcpu);
+ static_call_cond(kvm_x86_vcpu_blocking)(vcpu);
}
static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
{
- if (kvm_x86_ops.vcpu_unblocking)
- kvm_x86_ops.vcpu_unblocking(vcpu);
+ static_call_cond(kvm_x86_vcpu_unblocking)(vcpu);
}
static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 56cdeaac76a0..ddfb3cad8dff 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -289,28 +289,6 @@ extern void (*mce_threshold_vector)(void);
extern void (*deferred_error_int_vector)(void);
/*
- * Thermal handler
- */
-
-void intel_init_thermal(struct cpuinfo_x86 *c);
-
-/* Interrupt Handler for core thermal thresholds */
-extern int (*platform_thermal_notify)(__u64 msr_val);
-
-/* Interrupt Handler for package thermal thresholds */
-extern int (*platform_thermal_package_notify)(__u64 msr_val);
-
-/* Callback support of rate control, return true, if
- * callback has rate control */
-extern bool (*platform_thermal_package_rate_control)(void);
-
-#ifdef CONFIG_X86_THERMAL_VECTOR
-extern void mcheck_intel_therm_init(void);
-#else
-static inline void mcheck_intel_therm_init(void) { }
-#endif
-
-/*
* Used by APEI to report memory error via /dev/mcelog
*/
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
index 2b7cc5397f80..ab45a220fac4 100644
--- a/arch/x86/include/asm/microcode.h
+++ b/arch/x86/include/asm/microcode.h
@@ -127,14 +127,12 @@ static inline unsigned int x86_cpuid_family(void)
}
#ifdef CONFIG_MICROCODE
-int __init microcode_init(void);
extern void __init load_ucode_bsp(void);
extern void load_ucode_ap(void);
void reload_early_microcode(void);
extern bool get_builtin_firmware(struct cpio_data *cd, const char *name);
extern bool initrd_gone;
#else
-static inline int __init microcode_init(void) { return 0; };
static inline void __init load_ucode_bsp(void) { }
static inline void load_ucode_ap(void) { }
static inline void reload_early_microcode(void) { }
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
index 30f76b966857..ccf60a809a17 100644
--- a/arch/x86/include/asm/mshyperv.h
+++ b/arch/x86/include/asm/mshyperv.h
@@ -78,6 +78,13 @@ extern int hyperv_init_cpuhp;
extern void *hv_hypercall_pg;
extern void __percpu **hyperv_pcpu_input_arg;
+extern void __percpu **hyperv_pcpu_output_arg;
+
+extern u64 hv_current_partition_id;
+
+int hv_call_deposit_pages(int node, u64 partition_id, u32 num_pages);
+int hv_call_add_logical_proc(int node, u32 lp_index, u32 acpi_id);
+int hv_call_create_vp(int node, u64 partition_id, u32 vp_index, u32 flags);
static inline u64 hv_do_hypercall(u64 control, void *input, void *output)
{
@@ -239,6 +246,8 @@ int hyperv_fill_flush_guest_mapping_list(
struct hv_guest_mapping_flush_list *flush,
u64 start_gfn, u64 end_gfn);
+extern bool hv_root_partition;
+
#ifdef CONFIG_X86_64
void hv_apic_init(void);
void __init hv_init_spinlocks(void);
@@ -250,10 +259,16 @@ static inline void hv_apic_init(void) {}
static inline void hv_set_msi_entry_from_desc(union hv_msi_entry *msi_entry,
struct msi_desc *msi_desc)
{
- msi_entry->address = msi_desc->msg.address_lo;
- msi_entry->data = msi_desc->msg.data;
+ msi_entry->address.as_uint32 = msi_desc->msg.address_lo;
+ msi_entry->data.as_uint32 = msi_desc->msg.data;
}
+struct irq_domain *hv_create_pci_msi_domain(void);
+
+int hv_map_ioapic_interrupt(int ioapic_id, bool level, int vcpu, int vector,
+ struct hv_interrupt_entry *entry);
+int hv_unmap_ioapic_interrupt(int ioapic_id, struct hv_interrupt_entry *entry);
+
#else /* CONFIG_HYPERV */
static inline void hyperv_init(void) {}
static inline void hyperv_setup_mmu_ops(void) {}
diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
index 9d5d949e662e..1cb9c17a4cb4 100644
--- a/arch/x86/include/asm/nmi.h
+++ b/arch/x86/include/asm/nmi.h
@@ -9,7 +9,6 @@
#ifdef CONFIG_X86_LOCAL_APIC
-extern int avail_to_resrv_perfctr_nmi_bit(unsigned int);
extern int reserve_perfctr_nmi(unsigned int);
extern void release_perfctr_nmi(unsigned int);
extern int reserve_evntsel_nmi(unsigned int);
diff --git a/arch/x86/include/asm/orc_types.h b/arch/x86/include/asm/orc_types.h
index fdbffec4cfde..5a2baf28a1dc 100644
--- a/arch/x86/include/asm/orc_types.h
+++ b/arch/x86/include/asm/orc_types.h
@@ -40,6 +40,8 @@
#define ORC_REG_MAX 15
#ifndef __ASSEMBLY__
+#include <asm/byteorder.h>
+
/*
* This struct is more or less a vastly simplified version of the DWARF Call
* Frame Information standard. It contains only the necessary parts of DWARF
@@ -51,10 +53,18 @@
struct orc_entry {
s16 sp_offset;
s16 bp_offset;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
unsigned sp_reg:4;
unsigned bp_reg:4;
unsigned type:2;
unsigned end:1;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ unsigned bp_reg:4;
+ unsigned sp_reg:4;
+ unsigned unused:5;
+ unsigned end:1;
+ unsigned type:2;
+#endif
} __packed;
#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
index 645bd1d0ee07..64297eabad63 100644
--- a/arch/x86/include/asm/page_64_types.h
+++ b/arch/x86/include/asm/page_64_types.h
@@ -66,7 +66,7 @@
* On Intel CPUs, if a SYSCALL instruction is at the highest canonical
* address, then that syscall will enter the kernel with a
* non-canonical return address, and SYSRET will explode dangerously.
- * We avoid this particular problem by preventing anything executable
+ * We avoid this particular problem by preventing anything
* from being mapped at the maximum canonical address.
*
* On AMD CPUs in the Ryzen family, there's a nasty bug in which the
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index f8dce11d2bc1..4abf110e2243 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -648,11 +648,6 @@ static inline notrace unsigned long arch_local_save_flags(void)
return PVOP_CALLEE0(unsigned long, irq.save_fl);
}
-static inline notrace void arch_local_irq_restore(unsigned long f)
-{
- PVOP_VCALLEE1(irq.restore_fl, f);
-}
-
static inline notrace void arch_local_irq_disable(void)
{
PVOP_VCALLEE0(irq.irq_disable);
@@ -776,31 +771,6 @@ extern void default_banner(void);
#ifdef CONFIG_X86_64
#ifdef CONFIG_PARAVIRT_XXL
-/*
- * If swapgs is used while the userspace stack is still current,
- * there's no way to call a pvop. The PV replacement *must* be
- * inlined, or the swapgs instruction must be trapped and emulated.
- */
-#define SWAPGS_UNSAFE_STACK \
- PARA_SITE(PARA_PATCH(PV_CPU_swapgs), swapgs)
-
-/*
- * Note: swapgs is very special, and in practise is either going to be
- * implemented with a single "swapgs" instruction or something very
- * special. Either way, we don't need to save any registers for
- * it.
- */
-#define SWAPGS \
- PARA_SITE(PARA_PATCH(PV_CPU_swapgs), \
- ANNOTATE_RETPOLINE_SAFE; \
- call PARA_INDIRECT(pv_ops+PV_CPU_swapgs); \
- )
-
-#define USERGS_SYSRET64 \
- PARA_SITE(PARA_PATCH(PV_CPU_usergs_sysret64), \
- ANNOTATE_RETPOLINE_SAFE; \
- jmp PARA_INDIRECT(pv_ops+PV_CPU_usergs_sysret64);)
-
#ifdef CONFIG_DEBUG_ENTRY
#define SAVE_FLAGS(clobbers) \
PARA_SITE(PARA_PATCH(PV_IRQ_save_fl), \
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index b6b02b7c19cc..de87087d3bde 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -156,20 +156,10 @@ struct pv_cpu_ops {
u64 (*read_pmc)(int counter);
- /*
- * Switch to usermode gs and return to 64-bit usermode using
- * sysret. Only used in 64-bit kernels to return to 64-bit
- * processes. Usermode register state, including %rsp, must
- * already be restored.
- */
- void (*usergs_sysret64)(void);
-
/* Normal iret. Jump to this with the standard iret stack
frame set up. */
void (*iret)(void);
- void (*swapgs)(void);
-
void (*start_context_switch)(struct task_struct *prev);
void (*end_context_switch)(struct task_struct *next);
#endif
@@ -178,16 +168,13 @@ struct pv_cpu_ops {
struct pv_irq_ops {
#ifdef CONFIG_PARAVIRT_XXL
/*
- * Get/set interrupt state. save_fl and restore_fl are only
- * expected to use X86_EFLAGS_IF; all other bits
- * returned from save_fl are undefined, and may be ignored by
- * restore_fl.
+ * Get/set interrupt state. save_fl is expected to use X86_EFLAGS_IF;
+ * all other bits returned from save_fl are undefined.
*
* NOTE: These functions callers expect the callee to preserve
* more registers than the standard C calling convention.
*/
struct paravirt_callee_save save_fl;
- struct paravirt_callee_save restore_fl;
struct paravirt_callee_save irq_disable;
struct paravirt_callee_save irq_enable;
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index b9a7fd0a27e2..544f41a179fb 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -261,8 +261,12 @@ struct x86_pmu_capability {
#define INTEL_PMC_IDX_TD_BAD_SPEC (INTEL_PMC_IDX_METRIC_BASE + 1)
#define INTEL_PMC_IDX_TD_FE_BOUND (INTEL_PMC_IDX_METRIC_BASE + 2)
#define INTEL_PMC_IDX_TD_BE_BOUND (INTEL_PMC_IDX_METRIC_BASE + 3)
-#define INTEL_PMC_IDX_METRIC_END INTEL_PMC_IDX_TD_BE_BOUND
-#define INTEL_PMC_MSK_TOPDOWN ((0xfull << INTEL_PMC_IDX_METRIC_BASE) | \
+#define INTEL_PMC_IDX_TD_HEAVY_OPS (INTEL_PMC_IDX_METRIC_BASE + 4)
+#define INTEL_PMC_IDX_TD_BR_MISPREDICT (INTEL_PMC_IDX_METRIC_BASE + 5)
+#define INTEL_PMC_IDX_TD_FETCH_LAT (INTEL_PMC_IDX_METRIC_BASE + 6)
+#define INTEL_PMC_IDX_TD_MEM_BOUND (INTEL_PMC_IDX_METRIC_BASE + 7)
+#define INTEL_PMC_IDX_METRIC_END INTEL_PMC_IDX_TD_MEM_BOUND
+#define INTEL_PMC_MSK_TOPDOWN ((0xffull << INTEL_PMC_IDX_METRIC_BASE) | \
INTEL_PMC_MSK_FIXED_SLOTS)
/*
@@ -280,8 +284,14 @@ struct x86_pmu_capability {
#define INTEL_TD_METRIC_BAD_SPEC 0x8100 /* Bad speculation metric */
#define INTEL_TD_METRIC_FE_BOUND 0x8200 /* FE bound metric */
#define INTEL_TD_METRIC_BE_BOUND 0x8300 /* BE bound metric */
-#define INTEL_TD_METRIC_MAX INTEL_TD_METRIC_BE_BOUND
-#define INTEL_TD_METRIC_NUM 4
+/* Level 2 metrics */
+#define INTEL_TD_METRIC_HEAVY_OPS 0x8400 /* Heavy Operations metric */
+#define INTEL_TD_METRIC_BR_MISPREDICT 0x8500 /* Branch Mispredict metric */
+#define INTEL_TD_METRIC_FETCH_LAT 0x8600 /* Fetch Latency metric */
+#define INTEL_TD_METRIC_MEM_BOUND 0x8700 /* Memory bound metric */
+
+#define INTEL_TD_METRIC_MAX INTEL_TD_METRIC_MEM_BOUND
+#define INTEL_TD_METRIC_NUM 8
static inline bool is_metric_idx(int idx)
{
@@ -483,11 +493,7 @@ static inline void perf_check_microcode(void) { }
extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr);
extern int x86_perf_get_lbr(struct x86_pmu_lbr *lbr);
#else
-static inline struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
-{
- *nr = 0;
- return NULL;
-}
+struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr);
static inline int x86_perf_get_lbr(struct x86_pmu_lbr *lbr)
{
return -1;
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index 394757ee030a..f24d7ef8fffa 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -177,8 +177,6 @@ enum page_cache_mode {
#define __pgprot(x) ((pgprot_t) { (x) } )
#define __pg(x) __pgprot(x)
-#define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
-
#define PAGE_NONE __pg( 0| 0| 0|___A| 0| 0| 0|___G)
#define PAGE_SHARED __pg(__PP|__RW|_USR|___A|__NX| 0| 0| 0)
#define PAGE_SHARED_EXEC __pg(__PP|__RW|_USR|___A| 0| 0| 0| 0)
diff --git a/arch/x86/include/asm/platform_sst_audio.h b/arch/x86/include/asm/platform_sst_audio.h
index 16b9f220bdeb..40f92270515b 100644
--- a/arch/x86/include/asm/platform_sst_audio.h
+++ b/arch/x86/include/asm/platform_sst_audio.h
@@ -10,8 +10,6 @@
#ifndef _PLATFORM_SST_AUDIO_H_
#define _PLATFORM_SST_AUDIO_H_
-#include <linux/sfi.h>
-
#define MAX_NUM_STREAMS_MRFLD 25
#define MAX_NUM_STREAMS MAX_NUM_STREAMS_MRFLD
diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
index 69485ca13665..f8cb8af4de5c 100644
--- a/arch/x86/include/asm/preempt.h
+++ b/arch/x86/include/asm/preempt.h
@@ -5,6 +5,7 @@
#include <asm/rmwcc.h>
#include <asm/percpu.h>
#include <linux/thread_info.h>
+#include <linux/static_call_types.h>
DECLARE_PER_CPU(int, __preempt_count);
@@ -103,16 +104,45 @@ static __always_inline bool should_resched(int preempt_offset)
}
#ifdef CONFIG_PREEMPTION
- extern asmlinkage void preempt_schedule_thunk(void);
-# define __preempt_schedule() \
- asm volatile ("call preempt_schedule_thunk" : ASM_CALL_CONSTRAINT)
- extern asmlinkage void preempt_schedule(void);
- extern asmlinkage void preempt_schedule_notrace_thunk(void);
-# define __preempt_schedule_notrace() \
- asm volatile ("call preempt_schedule_notrace_thunk" : ASM_CALL_CONSTRAINT)
+extern asmlinkage void preempt_schedule(void);
+extern asmlinkage void preempt_schedule_thunk(void);
- extern asmlinkage void preempt_schedule_notrace(void);
-#endif
+#define __preempt_schedule_func preempt_schedule_thunk
+
+extern asmlinkage void preempt_schedule_notrace(void);
+extern asmlinkage void preempt_schedule_notrace_thunk(void);
+
+#define __preempt_schedule_notrace_func preempt_schedule_notrace_thunk
+
+#ifdef CONFIG_PREEMPT_DYNAMIC
+
+DECLARE_STATIC_CALL(preempt_schedule, __preempt_schedule_func);
+
+#define __preempt_schedule() \
+do { \
+ __STATIC_CALL_MOD_ADDRESSABLE(preempt_schedule); \
+ asm volatile ("call " STATIC_CALL_TRAMP_STR(preempt_schedule) : ASM_CALL_CONSTRAINT); \
+} while (0)
+
+DECLARE_STATIC_CALL(preempt_schedule_notrace, __preempt_schedule_notrace_func);
+
+#define __preempt_schedule_notrace() \
+do { \
+ __STATIC_CALL_MOD_ADDRESSABLE(preempt_schedule_notrace); \
+ asm volatile ("call " STATIC_CALL_TRAMP_STR(preempt_schedule_notrace) : ASM_CALL_CONSTRAINT); \
+} while (0)
+
+#else /* PREEMPT_DYNAMIC */
+
+#define __preempt_schedule() \
+ asm volatile ("call preempt_schedule_thunk" : ASM_CALL_CONSTRAINT);
+
+#define __preempt_schedule_notrace() \
+ asm volatile ("call preempt_schedule_notrace_thunk" : ASM_CALL_CONSTRAINT);
+
+#endif /* PREEMPT_DYNAMIC */
+
+#endif /* PREEMPTION */
#endif /* __ASM_PREEMPT_H */
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index c20a52b5534b..dc6d149bf851 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -426,8 +426,6 @@ struct irq_stack {
char stack[IRQ_STACK_SIZE];
} __aligned(IRQ_STACK_SIZE);
-DECLARE_PER_CPU(struct irq_stack *, hardirq_stack_ptr);
-
#ifdef CONFIG_X86_32
DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack);
#else
@@ -454,7 +452,8 @@ static inline unsigned long cpu_kernelmode_gs_base(int cpu)
return (unsigned long)per_cpu(fixed_percpu_data.gs_base, cpu);
}
-DECLARE_PER_CPU(unsigned int, irq_count);
+DECLARE_PER_CPU(void *, hardirq_stack_ptr);
+DECLARE_PER_CPU(bool, hardirq_stack_inuse);
extern asmlinkage void ignore_sysret(void);
/* Save actual FS/GS selectors and bases to current->thread */
@@ -473,9 +472,9 @@ struct stack_canary {
};
DECLARE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
#endif
-/* Per CPU softirq stack pointer */
+DECLARE_PER_CPU(struct irq_stack *, hardirq_stack_ptr);
DECLARE_PER_CPU(struct irq_stack *, softirq_stack_ptr);
-#endif /* X86_64 */
+#endif /* !X86_64 */
extern unsigned int fpu_kernel_xstate_size;
extern unsigned int fpu_user_xstate_size;
diff --git a/arch/x86/include/asm/required-features.h b/arch/x86/include/asm/required-features.h
index 3ff0d48469f2..b2d504f11937 100644
--- a/arch/x86/include/asm/required-features.h
+++ b/arch/x86/include/asm/required-features.h
@@ -101,6 +101,7 @@
#define REQUIRED_MASK16 0
#define REQUIRED_MASK17 0
#define REQUIRED_MASK18 0
-#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19)
+#define REQUIRED_MASK19 0
+#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 20)
#endif /* _ASM_X86_REQUIRED_FEATURES_H */
diff --git a/arch/x86/include/asm/resctrl.h b/arch/x86/include/asm/resctrl.h
index 07603064df8f..d60ed0668a59 100644
--- a/arch/x86/include/asm/resctrl.h
+++ b/arch/x86/include/asm/resctrl.h
@@ -56,19 +56,22 @@ static void __resctrl_sched_in(void)
struct resctrl_pqr_state *state = this_cpu_ptr(&pqr_state);
u32 closid = state->default_closid;
u32 rmid = state->default_rmid;
+ u32 tmp;
/*
* If this task has a closid/rmid assigned, use it.
* Else use the closid/rmid assigned to this cpu.
*/
if (static_branch_likely(&rdt_alloc_enable_key)) {
- if (current->closid)
- closid = current->closid;
+ tmp = READ_ONCE(current->closid);
+ if (tmp)
+ closid = tmp;
}
if (static_branch_likely(&rdt_mon_enable_key)) {
- if (current->rmid)
- rmid = current->rmid;
+ tmp = READ_ONCE(current->rmid);
+ if (tmp)
+ rmid = tmp;
}
if (closid != state->cur_closid || rmid != state->cur_rmid) {
diff --git a/arch/x86/include/asm/softirq_stack.h b/arch/x86/include/asm/softirq_stack.h
new file mode 100644
index 000000000000..889d53d6a0e1
--- /dev/null
+++ b/arch/x86/include/asm/softirq_stack.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_X86_SOFTIRQ_STACK_H
+#define _ASM_X86_SOFTIRQ_STACK_H
+
+#ifdef CONFIG_X86_64
+# include <asm/irq_stack.h>
+#else
+# include <asm-generic/softirq_stack.h>
+#endif
+
+#endif
diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
index cc177b4431ae..1d3cbaef4bb7 100644
--- a/arch/x86/include/asm/special_insns.h
+++ b/arch/x86/include/asm/special_insns.h
@@ -243,10 +243,10 @@ static inline void serialize(void)
}
/* The dst parameter must be 64-bytes aligned */
-static inline void movdir64b(void *dst, const void *src)
+static inline void movdir64b(void __iomem *dst, const void *src)
{
const struct { char _[64]; } *__src = src;
- struct { char _[64]; } *__dst = dst;
+ struct { char _[64]; } __iomem *__dst = dst;
/*
* MOVDIR64B %(rdx), rax.
@@ -286,7 +286,7 @@ static inline void movdir64b(void *dst, const void *src)
static inline int enqcmds(void __iomem *dst, const void *src)
{
const struct { char _[64]; } *__src = src;
- struct { char _[64]; } *__dst = dst;
+ struct { char _[64]; } __iomem *__dst = dst;
int zf;
/*
diff --git a/arch/x86/include/asm/static_call.h b/arch/x86/include/asm/static_call.h
index c37f11999d0c..cbb67b6030f9 100644
--- a/arch/x86/include/asm/static_call.h
+++ b/arch/x86/include/asm/static_call.h
@@ -37,4 +37,11 @@
#define ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name) \
__ARCH_DEFINE_STATIC_CALL_TRAMP(name, "ret; nop; nop; nop; nop")
+
+#define ARCH_ADD_TRAMP_KEY(name) \
+ asm(".pushsection .static_call_tramp_key, \"a\" \n" \
+ ".long " STATIC_CALL_TRAMP_STR(name) " - . \n" \
+ ".long " STATIC_CALL_KEY_STR(name) " - . \n" \
+ ".popsection \n")
+
#endif /* _ASM_STATIC_CALL_H */
diff --git a/arch/x86/include/asm/thermal.h b/arch/x86/include/asm/thermal.h
new file mode 100644
index 000000000000..ddbdefd5b94f
--- /dev/null
+++ b/arch/x86/include/asm/thermal.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_X86_THERMAL_H
+#define _ASM_X86_THERMAL_H
+
+#ifdef CONFIG_X86_THERMAL_VECTOR
+void intel_init_thermal(struct cpuinfo_x86 *c);
+bool x86_thermal_enabled(void);
+void intel_thermal_interrupt(void);
+#else
+static inline void intel_init_thermal(struct cpuinfo_x86 *c) { }
+#endif
+
+#endif /* _ASM_X86_THERMAL_H */
diff --git a/arch/x86/include/asm/tlb.h b/arch/x86/include/asm/tlb.h
index 820082bd6880..1bfe979bb9bc 100644
--- a/arch/x86/include/asm/tlb.h
+++ b/arch/x86/include/asm/tlb.h
@@ -4,7 +4,6 @@
#define tlb_start_vma(tlb, vma) do { } while (0)
#define tlb_end_vma(tlb, vma) do { } while (0)
-#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
#define tlb_flush tlb_flush
static inline void tlb_flush(struct mmu_gather *tlb);
diff --git a/arch/x86/include/asm/unwind_hints.h b/arch/x86/include/asm/unwind_hints.h
index 664d4610d700..8e574c0afef8 100644
--- a/arch/x86/include/asm/unwind_hints.h
+++ b/arch/x86/include/asm/unwind_hints.h
@@ -48,17 +48,8 @@
UNWIND_HINT_REGS base=\base offset=\offset partial=1
.endm
-.macro UNWIND_HINT_FUNC sp_offset=8
- UNWIND_HINT sp_reg=ORC_REG_SP sp_offset=\sp_offset type=UNWIND_HINT_TYPE_CALL
-.endm
-
-/*
- * RET_OFFSET: Used on instructions that terminate a function; mostly RETURN
- * and sibling calls. On these, sp_offset denotes the expected offset from
- * initial_func_cfi.
- */
-.macro UNWIND_HINT_RET_OFFSET sp_offset=8
- UNWIND_HINT sp_reg=ORC_REG_SP type=UNWIND_HINT_TYPE_RET_OFFSET sp_offset=\sp_offset
+.macro UNWIND_HINT_FUNC
+ UNWIND_HINT sp_reg=ORC_REG_SP sp_offset=8 type=UNWIND_HINT_TYPE_FUNC
.endm
#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h
index 9aad0e0876fb..8757078d4442 100644
--- a/arch/x86/include/asm/virtext.h
+++ b/arch/x86/include/asm/virtext.h
@@ -30,16 +30,29 @@ static inline int cpu_has_vmx(void)
}
-/** Disable VMX on the current CPU
+/**
+ * cpu_vmxoff() - Disable VMX on the current CPU
*
- * vmxoff causes a undefined-opcode exception if vmxon was not run
- * on the CPU previously. Only call this function if you know VMX
- * is enabled.
+ * Disable VMX and clear CR4.VMXE (even if VMXOFF faults)
+ *
+ * Note, VMXOFF causes a #UD if the CPU is !post-VMXON, but it's impossible to
+ * atomically track post-VMXON state, e.g. this may be called in NMI context.
+ * Eat all faults as all other faults on VMXOFF faults are mode related, i.e.
+ * faults are guaranteed to be due to the !post-VMXON check unless the CPU is
+ * magically in RM, VM86, compat mode, or at CPL>0.
*/
-static inline void cpu_vmxoff(void)
+static inline int cpu_vmxoff(void)
{
- asm volatile ("vmxoff");
+ asm_volatile_goto("1: vmxoff\n\t"
+ _ASM_EXTABLE(1b, %l[fault])
+ ::: "cc", "memory" : fault);
+
+ cr4_clear_bits(X86_CR4_VMXE);
+ return 0;
+
+fault:
cr4_clear_bits(X86_CR4_VMXE);
+ return -EIO;
}
static inline int cpu_vmx_enabled(void)
diff --git a/arch/x86/include/asm/vm86.h b/arch/x86/include/asm/vm86.h
index 26efbec94448..9e8ac5073ecb 100644
--- a/arch/x86/include/asm/vm86.h
+++ b/arch/x86/include/asm/vm86.h
@@ -36,7 +36,6 @@ struct vm86 {
unsigned long saved_sp0;
unsigned long flags;
- unsigned long screen_bitmap;
unsigned long cpu_type;
struct revectored_struct int_revectored;
struct revectored_struct int21_revectored;
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 38ca445a8429..358707f60d99 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -73,6 +73,7 @@
#define SECONDARY_EXEC_PT_USE_GPA VMCS_CONTROL_BIT(PT_USE_GPA)
#define SECONDARY_EXEC_TSC_SCALING VMCS_CONTROL_BIT(TSC_SCALING)
#define SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE VMCS_CONTROL_BIT(USR_WAIT_PAUSE)
+#define SECONDARY_EXEC_BUS_LOCK_DETECTION VMCS_CONTROL_BIT(BUS_LOCK_DETECTION)
#define PIN_BASED_EXT_INTR_MASK VMCS_CONTROL_BIT(INTR_EXITING)
#define PIN_BASED_NMI_EXITING VMCS_CONTROL_BIT(NMI_EXITING)
diff --git a/arch/x86/include/asm/vmxfeatures.h b/arch/x86/include/asm/vmxfeatures.h
index 9915990fd8cf..d9a74681a77d 100644
--- a/arch/x86/include/asm/vmxfeatures.h
+++ b/arch/x86/include/asm/vmxfeatures.h
@@ -83,5 +83,6 @@
#define VMX_FEATURE_TSC_SCALING ( 2*32+ 25) /* Scale hardware TSC when read in guest */
#define VMX_FEATURE_USR_WAIT_PAUSE ( 2*32+ 26) /* Enable TPAUSE, UMONITOR, UMWAIT in guest */
#define VMX_FEATURE_ENCLV_EXITING ( 2*32+ 28) /* "" VM-Exit on ENCLV (leaf dependent) */
+#define VMX_FEATURE_BUS_LOCK_DETECTION ( 2*32+ 30) /* "" VM-Exit when bus lock caused */
#endif /* _ASM_X86_VMXFEATURES_H */
diff --git a/arch/x86/include/asm/xen/interface.h b/arch/x86/include/asm/xen/interface.h
index 9139b3e86316..baca0b00ef76 100644
--- a/arch/x86/include/asm/xen/interface.h
+++ b/arch/x86/include/asm/xen/interface.h
@@ -182,6 +182,9 @@ struct arch_shared_info {
unsigned long p2m_cr3; /* cr3 value of the p2m address space */
unsigned long p2m_vaddr; /* virtual address of the p2m list */
unsigned long p2m_generation; /* generation count of p2m mapping */
+#ifdef CONFIG_X86_32
+ uint32_t wc_sec_hi;
+#endif
};
#endif /* !__ASSEMBLY__ */
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index 1a162e559753..7068e4bb057d 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -87,6 +87,18 @@ clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
#endif
/*
+ * The maximum amount of extra memory compared to the base size. The
+ * main scaling factor is the size of struct page. At extreme ratios
+ * of base:extra, all the base memory can be filled with page
+ * structures for the extra memory, leaving no space for anything
+ * else.
+ *
+ * 10x seems like a reasonable balance between scaling flexibility and
+ * leaving a practically usable system.
+ */
+#define XEN_EXTRA_MEM_RATIO (10)
+
+/*
* Helper functions to write or read unsigned long values to/from
* memory, when the access may fault.
*/
diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h
index 8e76d3701db3..5a3022c8af82 100644
--- a/arch/x86/include/uapi/asm/kvm.h
+++ b/arch/x86/include/uapi/asm/kvm.h
@@ -112,6 +112,7 @@ struct kvm_ioapic_state {
#define KVM_NR_IRQCHIPS 3
#define KVM_RUN_X86_SMM (1 << 0)
+#define KVM_RUN_X86_BUS_LOCK (1 << 1)
/* for KVM_GET_REGS and KVM_SET_REGS */
struct kvm_regs {
diff --git a/arch/x86/include/uapi/asm/vm86.h b/arch/x86/include/uapi/asm/vm86.h
index d2ee4e307ef8..18909b8050bc 100644
--- a/arch/x86/include/uapi/asm/vm86.h
+++ b/arch/x86/include/uapi/asm/vm86.h
@@ -97,7 +97,7 @@ struct revectored_struct {
struct vm86_struct {
struct vm86_regs regs;
unsigned long flags;
- unsigned long screen_bitmap;
+ unsigned long screen_bitmap; /* unused, preserved by vm86() */
unsigned long cpu_type;
struct revectored_struct int_revectored;
struct revectored_struct int21_revectored;
@@ -106,7 +106,7 @@ struct vm86_struct {
/*
* flags masks
*/
-#define VM86_SCREEN_BITMAP 0x0001
+#define VM86_SCREEN_BITMAP 0x0001 /* no longer supported */
struct vm86plus_info_struct {
unsigned long force_return_for_pic:1;
diff --git a/arch/x86/include/uapi/asm/vmx.h b/arch/x86/include/uapi/asm/vmx.h
index ada955c5ebb6..b8e650a985e3 100644
--- a/arch/x86/include/uapi/asm/vmx.h
+++ b/arch/x86/include/uapi/asm/vmx.h
@@ -89,6 +89,7 @@
#define EXIT_REASON_XRSTORS 64
#define EXIT_REASON_UMWAIT 67
#define EXIT_REASON_TPAUSE 68
+#define EXIT_REASON_BUS_LOCK 74
#define VMX_EXIT_REASONS \
{ EXIT_REASON_EXCEPTION_NMI, "EXCEPTION_NMI" }, \
@@ -150,7 +151,8 @@
{ EXIT_REASON_XSAVES, "XSAVES" }, \
{ EXIT_REASON_XRSTORS, "XRSTORS" }, \
{ EXIT_REASON_UMWAIT, "UMWAIT" }, \
- { EXIT_REASON_TPAUSE, "TPAUSE" }
+ { EXIT_REASON_TPAUSE, "TPAUSE" }, \
+ { EXIT_REASON_BUS_LOCK, "BUS_LOCK" }
#define VMX_EXIT_REASON_FLAGS \
{ VMX_EXIT_REASONS_FAILED_VMENTRY, "FAILED_VMENTRY" }
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 5eeb808eb024..2ddf08351f0b 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -116,7 +116,6 @@ obj-$(CONFIG_VM86) += vm86_32.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_HPET_TIMER) += hpet.o
-obj-$(CONFIG_APB_TIMER) += apb_timer.o
obj-$(CONFIG_AMD_NB) += amd_nb.o
obj-$(CONFIG_DEBUG_NMI_SELFTEST) += nmi_selftest.o
diff --git a/arch/x86/kernel/acpi/Makefile b/arch/x86/kernel/acpi/Makefile
index f1bb57b0e41e..cf340d85946a 100644
--- a/arch/x86/kernel/acpi/Makefile
+++ b/arch/x86/kernel/acpi/Makefile
@@ -1,5 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-OBJECT_FILES_NON_STANDARD_wakeup_$(BITS).o := y
obj-$(CONFIG_ACPI) += boot.o
obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup_$(BITS).o
diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S
index 5d3a0b8fd379..56b6865afb2a 100644
--- a/arch/x86/kernel/acpi/wakeup_64.S
+++ b/arch/x86/kernel/acpi/wakeup_64.S
@@ -1,12 +1,14 @@
/* SPDX-License-Identifier: GPL-2.0-only */
.text
#include <linux/linkage.h>
+#include <linux/objtool.h>
#include <asm/segment.h>
#include <asm/pgtable_types.h>
#include <asm/page_types.h>
#include <asm/msr.h>
#include <asm/asm-offsets.h>
#include <asm/frame.h>
+#include <asm/nospec-branch.h>
# Copyright 2003 Pavel Machek <pavel@suse.cz
@@ -39,6 +41,7 @@ SYM_FUNC_START(wakeup_long64)
movq saved_rbp, %rbp
movq saved_rip, %rax
+ ANNOTATE_RETPOLINE_SAFE
jmp *%rax
SYM_FUNC_END(wakeup_long64)
@@ -126,6 +129,7 @@ SYM_FUNC_START(do_suspend_lowlevel)
FRAME_END
jmp restore_processor_state
SYM_FUNC_END(do_suspend_lowlevel)
+STACK_FRAME_NON_STANDARD do_suspend_lowlevel
.data
saved_rbp: .quad 0
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c
deleted file mode 100644
index 263eeaddb0aa..000000000000
--- a/arch/x86/kernel/apb_timer.c
+++ /dev/null
@@ -1,347 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * apb_timer.c: Driver for Langwell APB timers
- *
- * (C) Copyright 2009 Intel Corporation
- * Author: Jacob Pan (jacob.jun.pan@intel.com)
- *
- * Note:
- * Langwell is the south complex of Intel Moorestown MID platform. There are
- * eight external timers in total that can be used by the operating system.
- * The timer information, such as frequency and addresses, is provided to the
- * OS via SFI tables.
- * Timer interrupts are routed via FW/HW emulated IOAPIC independently via
- * individual redirection table entries (RTE).
- * Unlike HPET, there is no master counter, therefore one of the timers are
- * used as clocksource. The overall allocation looks like:
- * - timer 0 - NR_CPUs for per cpu timer
- * - one timer for clocksource
- * - one timer for watchdog driver.
- * It is also worth notice that APB timer does not support true one-shot mode,
- * free-running mode will be used here to emulate one-shot mode.
- * APB timer can also be used as broadcast timer along with per cpu local APIC
- * timer, but by default APB timer has higher rating than local APIC timers.
- */
-
-#include <linux/delay.h>
-#include <linux/dw_apb_timer.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/pm.h>
-#include <linux/sfi.h>
-#include <linux/interrupt.h>
-#include <linux/cpu.h>
-#include <linux/irq.h>
-
-#include <asm/fixmap.h>
-#include <asm/apb_timer.h>
-#include <asm/intel-mid.h>
-#include <asm/time.h>
-
-#define APBT_CLOCKEVENT_RATING 110
-#define APBT_CLOCKSOURCE_RATING 250
-
-#define APBT_CLOCKEVENT0_NUM (0)
-#define APBT_CLOCKSOURCE_NUM (2)
-
-static phys_addr_t apbt_address;
-static int apb_timer_block_enabled;
-static void __iomem *apbt_virt_address;
-
-/*
- * Common DW APB timer info
- */
-static unsigned long apbt_freq;
-
-struct apbt_dev {
- struct dw_apb_clock_event_device *timer;
- unsigned int num;
- int cpu;
- unsigned int irq;
- char name[10];
-};
-
-static struct dw_apb_clocksource *clocksource_apbt;
-
-static inline void __iomem *adev_virt_addr(struct apbt_dev *adev)
-{
- return apbt_virt_address + adev->num * APBTMRS_REG_SIZE;
-}
-
-static DEFINE_PER_CPU(struct apbt_dev, cpu_apbt_dev);
-
-#ifdef CONFIG_SMP
-static unsigned int apbt_num_timers_used;
-#endif
-
-static inline void apbt_set_mapping(void)
-{
- struct sfi_timer_table_entry *mtmr;
- int phy_cs_timer_id = 0;
-
- if (apbt_virt_address) {
- pr_debug("APBT base already mapped\n");
- return;
- }
- mtmr = sfi_get_mtmr(APBT_CLOCKEVENT0_NUM);
- if (mtmr == NULL) {
- printk(KERN_ERR "Failed to get MTMR %d from SFI\n",
- APBT_CLOCKEVENT0_NUM);
- return;
- }
- apbt_address = (phys_addr_t)mtmr->phys_addr;
- if (!apbt_address) {
- printk(KERN_WARNING "No timer base from SFI, use default\n");
- apbt_address = APBT_DEFAULT_BASE;
- }
- apbt_virt_address = ioremap(apbt_address, APBT_MMAP_SIZE);
- if (!apbt_virt_address) {
- pr_debug("Failed mapping APBT phy address at %lu\n",\
- (unsigned long)apbt_address);
- goto panic_noapbt;
- }
- apbt_freq = mtmr->freq_hz;
- sfi_free_mtmr(mtmr);
-
- /* Now figure out the physical timer id for clocksource device */
- mtmr = sfi_get_mtmr(APBT_CLOCKSOURCE_NUM);
- if (mtmr == NULL)
- goto panic_noapbt;
-
- /* Now figure out the physical timer id */
- pr_debug("Use timer %d for clocksource\n",
- (int)(mtmr->phys_addr & 0xff) / APBTMRS_REG_SIZE);
- phy_cs_timer_id = (unsigned int)(mtmr->phys_addr & 0xff) /
- APBTMRS_REG_SIZE;
-
- clocksource_apbt = dw_apb_clocksource_init(APBT_CLOCKSOURCE_RATING,
- "apbt0", apbt_virt_address + phy_cs_timer_id *
- APBTMRS_REG_SIZE, apbt_freq);
- return;
-
-panic_noapbt:
- panic("Failed to setup APB system timer\n");
-
-}
-
-static inline void apbt_clear_mapping(void)
-{
- iounmap(apbt_virt_address);
- apbt_virt_address = NULL;
-}
-
-static int __init apbt_clockevent_register(void)
-{
- struct sfi_timer_table_entry *mtmr;
- struct apbt_dev *adev = this_cpu_ptr(&cpu_apbt_dev);
-
- mtmr = sfi_get_mtmr(APBT_CLOCKEVENT0_NUM);
- if (mtmr == NULL) {
- printk(KERN_ERR "Failed to get MTMR %d from SFI\n",
- APBT_CLOCKEVENT0_NUM);
- return -ENODEV;
- }
-
- adev->num = smp_processor_id();
- adev->timer = dw_apb_clockevent_init(smp_processor_id(), "apbt0",
- intel_mid_timer_options == INTEL_MID_TIMER_LAPIC_APBT ?
- APBT_CLOCKEVENT_RATING - 100 : APBT_CLOCKEVENT_RATING,
- adev_virt_addr(adev), 0, apbt_freq);
- /* Firmware does EOI handling for us. */
- adev->timer->eoi = NULL;
-
- if (intel_mid_timer_options == INTEL_MID_TIMER_LAPIC_APBT) {
- global_clock_event = &adev->timer->ced;
- printk(KERN_DEBUG "%s clockevent registered as global\n",
- global_clock_event->name);
- }
-
- dw_apb_clockevent_register(adev->timer);
-
- sfi_free_mtmr(mtmr);
- return 0;
-}
-
-#ifdef CONFIG_SMP
-
-static void apbt_setup_irq(struct apbt_dev *adev)
-{
- irq_modify_status(adev->irq, 0, IRQ_MOVE_PCNTXT);
- irq_set_affinity(adev->irq, cpumask_of(adev->cpu));
-}
-
-/* Should be called with per cpu */
-void apbt_setup_secondary_clock(void)
-{
- struct apbt_dev *adev;
- int cpu;
-
- /* Don't register boot CPU clockevent */
- cpu = smp_processor_id();
- if (!cpu)
- return;
-
- adev = this_cpu_ptr(&cpu_apbt_dev);
- if (!adev->timer) {
- adev->timer = dw_apb_clockevent_init(cpu, adev->name,
- APBT_CLOCKEVENT_RATING, adev_virt_addr(adev),
- adev->irq, apbt_freq);
- adev->timer->eoi = NULL;
- } else {
- dw_apb_clockevent_resume(adev->timer);
- }
-
- printk(KERN_INFO "Registering CPU %d clockevent device %s, cpu %08x\n",
- cpu, adev->name, adev->cpu);
-
- apbt_setup_irq(adev);
- dw_apb_clockevent_register(adev->timer);
-
- return;
-}
-
-/*
- * this notify handler process CPU hotplug events. in case of S0i3, nonboot
- * cpus are disabled/enabled frequently, for performance reasons, we keep the
- * per cpu timer irq registered so that we do need to do free_irq/request_irq.
- *
- * TODO: it might be more reliable to directly disable percpu clockevent device
- * without the notifier chain. currently, cpu 0 may get interrupts from other
- * cpu timers during the offline process due to the ordering of notification.
- * the extra interrupt is harmless.
- */
-static int apbt_cpu_dead(unsigned int cpu)
-{
- struct apbt_dev *adev = &per_cpu(cpu_apbt_dev, cpu);
-
- dw_apb_clockevent_pause(adev->timer);
- if (system_state == SYSTEM_RUNNING) {
- pr_debug("skipping APBT CPU %u offline\n", cpu);
- } else {
- pr_debug("APBT clockevent for cpu %u offline\n", cpu);
- dw_apb_clockevent_stop(adev->timer);
- }
- return 0;
-}
-
-static __init int apbt_late_init(void)
-{
- if (intel_mid_timer_options == INTEL_MID_TIMER_LAPIC_APBT ||
- !apb_timer_block_enabled)
- return 0;
- return cpuhp_setup_state(CPUHP_X86_APB_DEAD, "x86/apb:dead", NULL,
- apbt_cpu_dead);
-}
-fs_initcall(apbt_late_init);
-#else
-
-void apbt_setup_secondary_clock(void) {}
-
-#endif /* CONFIG_SMP */
-
-static int apbt_clocksource_register(void)
-{
- u64 start, now;
- u64 t1;
-
- /* Start the counter, use timer 2 as source, timer 0/1 for event */
- dw_apb_clocksource_start(clocksource_apbt);
-
- /* Verify whether apbt counter works */
- t1 = dw_apb_clocksource_read(clocksource_apbt);
- start = rdtsc();
-
- /*
- * We don't know the TSC frequency yet, but waiting for
- * 200000 TSC cycles is safe:
- * 4 GHz == 50us
- * 1 GHz == 200us
- */
- do {
- rep_nop();
- now = rdtsc();
- } while ((now - start) < 200000UL);
-
- /* APBT is the only always on clocksource, it has to work! */
- if (t1 == dw_apb_clocksource_read(clocksource_apbt))
- panic("APBT counter not counting. APBT disabled\n");
-
- dw_apb_clocksource_register(clocksource_apbt);
-
- return 0;
-}
-
-/*
- * Early setup the APBT timer, only use timer 0 for booting then switch to
- * per CPU timer if possible.
- * returns 1 if per cpu apbt is setup
- * returns 0 if no per cpu apbt is chosen
- * panic if set up failed, this is the only platform timer on Moorestown.
- */
-void __init apbt_time_init(void)
-{
-#ifdef CONFIG_SMP
- int i;
- struct sfi_timer_table_entry *p_mtmr;
- struct apbt_dev *adev;
-#endif
-
- if (apb_timer_block_enabled)
- return;
- apbt_set_mapping();
- if (!apbt_virt_address)
- goto out_noapbt;
- /*
- * Read the frequency and check for a sane value, for ESL model
- * we extend the possible clock range to allow time scaling.
- */
-
- if (apbt_freq < APBT_MIN_FREQ || apbt_freq > APBT_MAX_FREQ) {
- pr_debug("APBT has invalid freq 0x%lx\n", apbt_freq);
- goto out_noapbt;
- }
- if (apbt_clocksource_register()) {
- pr_debug("APBT has failed to register clocksource\n");
- goto out_noapbt;
- }
- if (!apbt_clockevent_register())
- apb_timer_block_enabled = 1;
- else {
- pr_debug("APBT has failed to register clockevent\n");
- goto out_noapbt;
- }
-#ifdef CONFIG_SMP
- /* kernel cmdline disable apb timer, so we will use lapic timers */
- if (intel_mid_timer_options == INTEL_MID_TIMER_LAPIC_APBT) {
- printk(KERN_INFO "apbt: disabled per cpu timer\n");
- return;
- }
- pr_debug("%s: %d CPUs online\n", __func__, num_online_cpus());
- if (num_possible_cpus() <= sfi_mtimer_num)
- apbt_num_timers_used = num_possible_cpus();
- else
- apbt_num_timers_used = 1;
- pr_debug("%s: %d APB timers used\n", __func__, apbt_num_timers_used);
-
- /* here we set up per CPU timer data structure */
- for (i = 0; i < apbt_num_timers_used; i++) {
- adev = &per_cpu(cpu_apbt_dev, i);
- adev->num = i;
- adev->cpu = i;
- p_mtmr = sfi_get_mtmr(i);
- if (p_mtmr)
- adev->irq = p_mtmr->irq;
- else
- printk(KERN_ERR "Failed to get timer for cpu %d\n", i);
- snprintf(adev->name, sizeof(adev->name) - 1, "apbt%d", i);
- }
-#endif
-
- return;
-
-out_noapbt:
- apbt_clear_mapping();
- apb_timer_block_enabled = 0;
- panic("failed to enable APB timer\n");
-}
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 6bd20c0de8bc..bda4f2a36868 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -41,6 +41,7 @@
#include <asm/perf_event.h>
#include <asm/x86_init.h>
#include <linux/atomic.h>
+#include <asm/barrier.h>
#include <asm/mpspec.h>
#include <asm/i8259.h>
#include <asm/proto.h>
@@ -477,6 +478,9 @@ static int lapic_next_deadline(unsigned long delta,
{
u64 tsc;
+ /* This MSR is special and need a special fence: */
+ weak_wrmsr_fence();
+
tsc = rdtsc();
wrmsrl(MSR_IA32_TSC_DEADLINE, tsc + (((u64) delta) * TSC_DIVISOR));
return 0;
@@ -1743,6 +1747,7 @@ void apic_ap_setup(void)
#ifdef CONFIG_X86_X2APIC
int x2apic_mode;
+EXPORT_SYMBOL_GPL(x2apic_mode);
enum {
X2APIC_OFF,
@@ -2133,18 +2138,11 @@ void __init register_lapic_address(unsigned long address)
* Local APIC interrupts
*/
-/**
- * spurious_interrupt - Catch all for interrupts raised on unused vectors
- * @regs: Pointer to pt_regs on stack
- * @vector: The vector number
- *
- * This is invoked from ASM entry code to catch all interrupts which
- * trigger on an entry which is routed to the common_spurious idtentry
- * point.
- *
- * Also called from sysvec_spurious_apic_interrupt().
+/*
+ * Common handling code for spurious_interrupt and spurious_vector entry
+ * points below. No point in allowing the compiler to inline it twice.
*/
-DEFINE_IDTENTRY_IRQ(spurious_interrupt)
+static noinline void handle_spurious_interrupt(u8 vector)
{
u32 v;
@@ -2179,9 +2177,23 @@ out:
trace_spurious_apic_exit(vector);
}
+/**
+ * spurious_interrupt - Catch all for interrupts raised on unused vectors
+ * @regs: Pointer to pt_regs on stack
+ * @vector: The vector number
+ *
+ * This is invoked from ASM entry code to catch all interrupts which
+ * trigger on an entry which is routed to the common_spurious idtentry
+ * point.
+ */
+DEFINE_IDTENTRY_IRQ(spurious_interrupt)
+{
+ handle_spurious_interrupt(vector);
+}
+
DEFINE_IDTENTRY_SYSVEC(sysvec_spurious_apic_interrupt)
{
- __spurious_interrupt(regs, SPURIOUS_APIC_VECTOR);
+ handle_spurious_interrupt(SPURIOUS_APIC_VECTOR);
}
/*
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index e4ab4804b20d..c3b60c37c728 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -198,7 +198,7 @@ static int __init parse_noapic(char *str)
}
early_param("noapic", parse_noapic);
-/* Will be called in mpparse/acpi/sfi codes for saving IRQ info */
+/* Will be called in mpparse/ACPI codes for saving IRQ info */
void mp_save_irq(struct mpc_intsrc *m)
{
int i;
@@ -2863,7 +2863,7 @@ int mp_register_ioapic(int id, u32 address, u32 gsi_base,
/*
* If mp_register_ioapic() is called during early boot stage when
- * walking ACPI/SFI/DT tables, it's too early to create irqdomain,
+ * walking ACPI/DT tables, it's too early to create irqdomain,
* we are still using bootmem allocator. So delay it to setup_IO_APIC().
*/
if (hotplug) {
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
index df6adc5674c9..f4da9bb69a88 100644
--- a/arch/x86/kernel/apic/x2apic_cluster.c
+++ b/arch/x86/kernel/apic/x2apic_cluster.c
@@ -29,7 +29,8 @@ static void x2apic_send_IPI(int cpu, int vector)
{
u32 dest = per_cpu(x86_cpu_to_logical_apicid, cpu);
- x2apic_wrmsr_fence();
+ /* x2apic MSRs are special and need a special fence: */
+ weak_wrmsr_fence();
__x2apic_send_IPI_dest(dest, vector, APIC_DEST_LOGICAL);
}
@@ -41,7 +42,8 @@ __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest)
unsigned long flags;
u32 dest;
- x2apic_wrmsr_fence();
+ /* x2apic MSRs are special and need a special fence: */
+ weak_wrmsr_fence();
local_irq_save(flags);
tmpmsk = this_cpu_cpumask_var_ptr(ipi_mask);
diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
index 0e4e81971567..6bde05a86b4e 100644
--- a/arch/x86/kernel/apic/x2apic_phys.c
+++ b/arch/x86/kernel/apic/x2apic_phys.c
@@ -43,7 +43,8 @@ static void x2apic_send_IPI(int cpu, int vector)
{
u32 dest = per_cpu(x86_cpu_to_apicid, cpu);
- x2apic_wrmsr_fence();
+ /* x2apic MSRs are special and need a special fence: */
+ weak_wrmsr_fence();
__x2apic_send_IPI_dest(dest, vector, APIC_DEST_PHYSICAL);
}
@@ -54,7 +55,8 @@ __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest)
unsigned long this_cpu;
unsigned long flags;
- x2apic_wrmsr_fence();
+ /* x2apic MSRs are special and need a special fence: */
+ weak_wrmsr_fence();
local_irq_save(flags);
@@ -125,7 +127,8 @@ void __x2apic_send_IPI_shorthand(int vector, u32 which)
{
unsigned long cfg = __prepare_ICR(which, vector, 0);
- x2apic_wrmsr_fence();
+ /* x2apic MSRs are special and need a special fence: */
+ weak_wrmsr_fence();
native_x2apic_icr_write(cfg, 0);
}
diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
index 828be792231e..b14533af7676 100644
--- a/arch/x86/kernel/asm-offsets_64.c
+++ b/arch/x86/kernel/asm-offsets_64.c
@@ -13,9 +13,6 @@ int main(void)
{
#ifdef CONFIG_PARAVIRT
#ifdef CONFIG_PARAVIRT_XXL
- OFFSET(PV_CPU_usergs_sysret64, paravirt_patch_template,
- cpu.usergs_sysret64);
- OFFSET(PV_CPU_swapgs, paravirt_patch_template, cpu.swapgs);
#ifdef CONFIG_DEBUG_ENTRY
OFFSET(PV_IRQ_save_fl, paravirt_patch_template, irq.save_fl);
#endif
diff --git a/arch/x86/kernel/cpu/acrn.c b/arch/x86/kernel/cpu/acrn.c
index 0b2c03943ac6..23f5f27b5a02 100644
--- a/arch/x86/kernel/cpu/acrn.c
+++ b/arch/x86/kernel/cpu/acrn.c
@@ -10,6 +10,8 @@
*/
#include <linux/interrupt.h>
+
+#include <asm/acrn.h>
#include <asm/apic.h>
#include <asm/cpufeatures.h>
#include <asm/desc.h>
@@ -19,7 +21,7 @@
static u32 __init acrn_detect(void)
{
- return hypervisor_cpuid_base("ACRNACRNACRN", 0);
+ return acrn_cpuid_base();
}
static void __init acrn_init_platform(void)
@@ -55,6 +57,18 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_acrn_hv_callback)
set_irq_regs(old_regs);
}
+void acrn_setup_intr_handler(void (*handler)(void))
+{
+ acrn_intr_handler = handler;
+}
+EXPORT_SYMBOL_GPL(acrn_setup_intr_handler);
+
+void acrn_remove_intr_handler(void)
+{
+ acrn_intr_handler = NULL;
+}
+EXPORT_SYMBOL_GPL(acrn_remove_intr_handler);
+
const __initconst struct hypervisor_x86 x86_hyper_acrn = {
.name = "ACRN",
.detect = acrn_detect,
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 35ad8480c464..ab640abe26b6 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -960,6 +960,9 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
if (c->extended_cpuid_level >= 0x8000000a)
c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a);
+ if (c->extended_cpuid_level >= 0x8000001f)
+ c->x86_capability[CPUID_8000_001F_EAX] = cpuid_eax(0x8000001f);
+
init_scattered_cpuid_features(c);
init_speculation_control(c);
@@ -1739,8 +1742,8 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
&init_task;
EXPORT_PER_CPU_SYMBOL(current_task);
-DEFINE_PER_CPU(struct irq_stack *, hardirq_stack_ptr);
-DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1;
+DEFINE_PER_CPU(void *, hardirq_stack_ptr);
+DEFINE_PER_CPU(bool, hardirq_stack_inuse);
DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
EXPORT_PER_CPU_SYMBOL(__preempt_count);
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 59a1e3ce3f14..0e422a544835 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -24,6 +24,7 @@
#include <asm/traps.h>
#include <asm/resctrl.h>
#include <asm/numa.h>
+#include <asm/thermal.h>
#ifdef CONFIG_X86_64
#include <linux/topology.h>
@@ -719,6 +720,8 @@ static void init_intel(struct cpuinfo_x86 *c)
tsx_disable();
split_lock_init();
+
+ intel_init_thermal(c);
}
#ifdef CONFIG_X86_32
@@ -1159,6 +1162,7 @@ static const struct x86_cpu_id split_lock_cpu_ids[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, 1),
X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, 1),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, 1),
+ X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, 1),
{}
};
diff --git a/arch/x86/kernel/cpu/mce/Makefile b/arch/x86/kernel/cpu/mce/Makefile
index 9f020c994154..015856abdbb1 100644
--- a/arch/x86/kernel/cpu/mce/Makefile
+++ b/arch/x86/kernel/cpu/mce/Makefile
@@ -9,8 +9,6 @@ obj-$(CONFIG_X86_MCE_THRESHOLD) += threshold.o
mce-inject-y := inject.o
obj-$(CONFIG_X86_MCE_INJECT) += mce-inject.o
-obj-$(CONFIG_X86_THERMAL_VECTOR) += therm_throt.o
-
obj-$(CONFIG_ACPI_APEI) += apei.o
obj-$(CONFIG_X86_MCELOG_LEGACY) += dev-mcelog.o
diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
index e133ce1e562b..7962355436da 100644
--- a/arch/x86/kernel/cpu/mce/core.c
+++ b/arch/x86/kernel/cpu/mce/core.c
@@ -878,6 +878,12 @@ static atomic_t mce_executing;
static atomic_t mce_callin;
/*
+ * Track which CPUs entered the MCA broadcast synchronization and which not in
+ * order to print holdouts.
+ */
+static cpumask_t mce_missing_cpus = CPU_MASK_ALL;
+
+/*
* Check if a timeout waiting for other CPUs happened.
*/
static int mce_timed_out(u64 *t, const char *msg)
@@ -894,8 +900,12 @@ static int mce_timed_out(u64 *t, const char *msg)
if (!mca_cfg.monarch_timeout)
goto out;
if ((s64)*t < SPINUNIT) {
- if (mca_cfg.tolerant <= 1)
+ if (mca_cfg.tolerant <= 1) {
+ if (cpumask_and(&mce_missing_cpus, cpu_online_mask, &mce_missing_cpus))
+ pr_emerg("CPUs not responding to MCE broadcast (may include false positives): %*pbl\n",
+ cpumask_pr_args(&mce_missing_cpus));
mce_panic(msg, NULL, NULL);
+ }
cpu_missing = 1;
return 1;
}
@@ -1006,6 +1016,7 @@ static int mce_start(int *no_way_out)
* is updated before mce_callin.
*/
order = atomic_inc_return(&mce_callin);
+ cpumask_clear_cpu(smp_processor_id(), &mce_missing_cpus);
/*
* Wait for everyone.
@@ -1114,6 +1125,7 @@ static int mce_end(int order)
reset:
atomic_set(&global_nwo, 0);
atomic_set(&mce_callin, 0);
+ cpumask_setall(&mce_missing_cpus);
barrier();
/*
@@ -2178,7 +2190,6 @@ __setup("mce", mcheck_enable);
int __init mcheck_init(void)
{
- mcheck_intel_therm_init();
mce_register_decode_chain(&early_nb);
mce_register_decode_chain(&mce_uc_nb);
mce_register_decode_chain(&mce_default_nb);
@@ -2713,6 +2724,7 @@ static void mce_reset(void)
atomic_set(&mce_executing, 0);
atomic_set(&mce_callin, 0);
atomic_set(&global_nwo, 0);
+ cpumask_setall(&mce_missing_cpus);
}
static int fake_panic_get(void *data, u64 *val)
diff --git a/arch/x86/kernel/cpu/mce/intel.c b/arch/x86/kernel/cpu/mce/intel.c
index c2476fe0682e..e309476743b7 100644
--- a/arch/x86/kernel/cpu/mce/intel.c
+++ b/arch/x86/kernel/cpu/mce/intel.c
@@ -531,7 +531,6 @@ static void intel_imc_init(struct cpuinfo_x86 *c)
void mce_intel_feature_init(struct cpuinfo_x86 *c)
{
- intel_init_thermal(c);
intel_init_cmci();
intel_init_lmce();
intel_ppin_init(c);
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index ec6f0415bc6d..b935e1b5f115 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -830,7 +830,7 @@ static const struct attribute_group cpu_root_microcode_group = {
.attrs = cpu_root_microcode_attrs,
};
-int __init microcode_init(void)
+static int __init microcode_init(void)
{
struct cpuinfo_x86 *c = &boot_cpu_data;
int error;
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index 43b54bef5448..e88bc296afca 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -31,6 +31,11 @@
#include <asm/reboot.h>
#include <asm/nmi.h>
#include <clocksource/hyperv_timer.h>
+#include <asm/numa.h>
+
+/* Is Linux running as the root partition? */
+bool hv_root_partition;
+EXPORT_SYMBOL_GPL(hv_root_partition);
struct ms_hyperv_info ms_hyperv;
EXPORT_SYMBOL_GPL(ms_hyperv);
@@ -226,6 +231,32 @@ static void __init hv_smp_prepare_boot_cpu(void)
hv_init_spinlocks();
#endif
}
+
+static void __init hv_smp_prepare_cpus(unsigned int max_cpus)
+{
+#ifdef CONFIG_X86_64
+ int i;
+ int ret;
+#endif
+
+ native_smp_prepare_cpus(max_cpus);
+
+#ifdef CONFIG_X86_64
+ for_each_present_cpu(i) {
+ if (i == 0)
+ continue;
+ ret = hv_call_add_logical_proc(numa_cpu_node(i), i, cpu_physical_id(i));
+ BUG_ON(ret);
+ }
+
+ for_each_present_cpu(i) {
+ if (i == 0)
+ continue;
+ ret = hv_call_create_vp(numa_cpu_node(i), hv_current_partition_id, i, i);
+ BUG_ON(ret);
+ }
+#endif
+}
#endif
static void __init ms_hyperv_init_platform(void)
@@ -243,6 +274,7 @@ static void __init ms_hyperv_init_platform(void)
* Extract the features and hints
*/
ms_hyperv.features = cpuid_eax(HYPERV_CPUID_FEATURES);
+ ms_hyperv.features_b = cpuid_ebx(HYPERV_CPUID_FEATURES);
ms_hyperv.misc_features = cpuid_edx(HYPERV_CPUID_FEATURES);
ms_hyperv.hints = cpuid_eax(HYPERV_CPUID_ENLIGHTMENT_INFO);
@@ -256,6 +288,22 @@ static void __init ms_hyperv_init_platform(void)
ms_hyperv.max_vp_index, ms_hyperv.max_lp_index);
/*
+ * Check CPU management privilege.
+ *
+ * To mirror what Windows does we should extract CPU management
+ * features and use the ReservedIdentityBit to detect if Linux is the
+ * root partition. But that requires negotiating CPU management
+ * interface (a process to be finalized).
+ *
+ * For now, use the privilege flag as the indicator for running as
+ * root.
+ */
+ if (cpuid_ebx(HYPERV_CPUID_FEATURES) & HV_CPU_MANAGEMENT) {
+ hv_root_partition = true;
+ pr_info("Hyper-V: running as root partition\n");
+ }
+
+ /*
* Extract host information.
*/
if (cpuid_eax(HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS) >=
@@ -277,6 +325,14 @@ static void __init ms_hyperv_init_platform(void)
x86_platform.calibrate_cpu = hv_get_tsc_khz;
}
+ if (ms_hyperv.features_b & HV_ISOLATION) {
+ ms_hyperv.isolation_config_a = cpuid_eax(HYPERV_CPUID_ISOLATION_CONFIG);
+ ms_hyperv.isolation_config_b = cpuid_ebx(HYPERV_CPUID_ISOLATION_CONFIG);
+
+ pr_info("Hyper-V: Isolation Config: Group A 0x%x, Group B 0x%x\n",
+ ms_hyperv.isolation_config_a, ms_hyperv.isolation_config_b);
+ }
+
if (ms_hyperv.hints & HV_X64_ENLIGHTENED_VMCS_RECOMMENDED) {
ms_hyperv.nested_features =
cpuid_eax(HYPERV_CPUID_NESTED_FEATURES);
@@ -366,6 +422,8 @@ static void __init ms_hyperv_init_platform(void)
# ifdef CONFIG_SMP
smp_ops.smp_prepare_boot_cpu = hv_smp_prepare_boot_cpu;
+ if (hv_root_partition)
+ smp_ops.smp_prepare_cpus = hv_smp_prepare_cpus;
# endif
/*
diff --git a/arch/x86/kernel/cpu/mtrr/cleanup.c b/arch/x86/kernel/cpu/mtrr/cleanup.c
index 5bd011737272..9231640782fa 100644
--- a/arch/x86/kernel/cpu/mtrr/cleanup.c
+++ b/arch/x86/kernel/cpu/mtrr/cleanup.c
@@ -537,9 +537,9 @@ static void __init print_out_mtrr_range_state(void)
if (!size_base)
continue;
- size_base = to_size_factor(size_base, &size_factor),
+ size_base = to_size_factor(size_base, &size_factor);
start_base = range_state[i].base_pfn << (PAGE_SHIFT - 10);
- start_base = to_size_factor(start_base, &start_factor),
+ start_base = to_size_factor(start_base, &start_factor);
type = range_state[i].type;
pr_debug("reg %d, base: %ld%cB, range: %ld%cB, type %s\n",
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index a29997e6cf9e..b90f3f437765 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -3,7 +3,6 @@
* This only handles 32bit MTRR on 32bit hosts. This is strictly wrong
* because MTRRs can span up to 40 bits (36bits on most modern x86)
*/
-#define DEBUG
#include <linux/export.h>
#include <linux/init.h>
diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.c b/arch/x86/kernel/cpu/mtrr/mtrr.c
index 61eb26edc6d2..28c8a23aa42e 100644
--- a/arch/x86/kernel/cpu/mtrr/mtrr.c
+++ b/arch/x86/kernel/cpu/mtrr/mtrr.c
@@ -31,8 +31,6 @@
System Programming Guide; Section 9.11. (1997 edition - PPro).
*/
-#define DEBUG
-
#include <linux/types.h> /* FIXME: kvm_para.h needs this */
#include <linux/stop_machine.h>
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
index a5ee607a3b89..3ef5868ac588 100644
--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
@@ -3,7 +3,7 @@
* local apic based NMI watchdog for various CPUs.
*
* This file also handles reservation of performance counters for coordination
- * with other users (like oprofile).
+ * with other users.
*
* Note that these events normally don't tick when the CPU idles. This means
* the frequency varies with CPU load.
@@ -105,15 +105,6 @@ static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
}
-/* checks for a bit availability (hack for oprofile) */
-int avail_to_resrv_perfctr_nmi_bit(unsigned int counter)
-{
- BUG_ON(counter > NMI_MAX_COUNTER_BITS);
-
- return !test_bit(counter, perfctr_nmi_owner);
-}
-EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit);
-
int reserve_perfctr_nmi(unsigned int msr)
{
unsigned int counter;
diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h
index ee71c47844cb..c4d320d02fd5 100644
--- a/arch/x86/kernel/cpu/resctrl/internal.h
+++ b/arch/x86/kernel/cpu/resctrl/internal.h
@@ -572,6 +572,7 @@ union cpuid_0x10_x_edx {
void rdt_last_cmd_clear(void);
void rdt_last_cmd_puts(const char *s);
+__printf(1, 2)
void rdt_last_cmd_printf(const char *fmt, ...);
void rdt_ctrl_update(void *arg);
diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
index 460f3e0df106..f9190adc52cb 100644
--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
@@ -563,11 +563,11 @@ static int __rdtgroup_move_task(struct task_struct *tsk,
*/
if (rdtgrp->type == RDTCTRL_GROUP) {
- tsk->closid = rdtgrp->closid;
- tsk->rmid = rdtgrp->mon.rmid;
+ WRITE_ONCE(tsk->closid, rdtgrp->closid);
+ WRITE_ONCE(tsk->rmid, rdtgrp->mon.rmid);
} else if (rdtgrp->type == RDTMON_GROUP) {
if (rdtgrp->mon.parent->closid == tsk->closid) {
- tsk->rmid = rdtgrp->mon.rmid;
+ WRITE_ONCE(tsk->rmid, rdtgrp->mon.rmid);
} else {
rdt_last_cmd_puts("Can't move task to different control group\n");
return -EINVAL;
@@ -2310,22 +2310,18 @@ static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to,
for_each_process_thread(p, t) {
if (!from || is_closid_match(t, from) ||
is_rmid_match(t, from)) {
- t->closid = to->closid;
- t->rmid = to->mon.rmid;
+ WRITE_ONCE(t->closid, to->closid);
+ WRITE_ONCE(t->rmid, to->mon.rmid);
-#ifdef CONFIG_SMP
/*
- * This is safe on x86 w/o barriers as the ordering
- * of writing to task_cpu() and t->on_cpu is
- * reverse to the reading here. The detection is
- * inaccurate as tasks might move or schedule
- * before the smp function call takes place. In
- * such a case the function call is pointless, but
+ * If the task is on a CPU, set the CPU in the mask.
+ * The detection is inaccurate as tasks might move or
+ * schedule before the smp function call takes place.
+ * In such a case the function call is pointless, but
* there is no other side effect.
*/
- if (mask && t->on_cpu)
+ if (IS_ENABLED(CONFIG_SMP) && mask && task_curr(t))
cpumask_set_cpu(task_cpu(t), mask);
-#endif
}
}
read_unlock(&tasklist_lock);
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
index 236924930bf0..972ec3bfa9c0 100644
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -40,11 +40,6 @@ static const struct cpuid_bit cpuid_bits[] = {
{ X86_FEATURE_CPB, CPUID_EDX, 9, 0x80000007, 0 },
{ X86_FEATURE_PROC_FEEDBACK, CPUID_EDX, 11, 0x80000007, 0 },
{ X86_FEATURE_MBA, CPUID_EBX, 6, 0x80000008, 0 },
- { X86_FEATURE_SME, CPUID_EAX, 0, 0x8000001f, 0 },
- { X86_FEATURE_SEV, CPUID_EAX, 1, 0x8000001f, 0 },
- { X86_FEATURE_SEV_ES, CPUID_EAX, 3, 0x8000001f, 0 },
- { X86_FEATURE_SME_COHERENT, CPUID_EAX, 10, 0x8000001f, 0 },
- { X86_FEATURE_VM_PAGE_FLUSH, CPUID_EAX, 2, 0x8000001f, 0 },
{ 0, 0, 0, 0, 0 }
};
diff --git a/arch/x86/kernel/cpu/sgx/driver.c b/arch/x86/kernel/cpu/sgx/driver.c
index f2eac41bb4ff..8ce6d8371cfb 100644
--- a/arch/x86/kernel/cpu/sgx/driver.c
+++ b/arch/x86/kernel/cpu/sgx/driver.c
@@ -72,6 +72,9 @@ static int sgx_release(struct inode *inode, struct file *file)
synchronize_srcu(&encl->srcu);
mmu_notifier_unregister(&encl_mm->mmu_notifier, encl_mm->mm);
kfree(encl_mm);
+
+ /* 'encl_mm' is gone, put encl_mm->encl reference: */
+ kref_put(&encl->refcount, sgx_encl_release);
}
kref_put(&encl->refcount, sgx_encl_release);
diff --git a/arch/x86/kernel/cpu/sgx/encl.c b/arch/x86/kernel/cpu/sgx/encl.c
index ee50a5010277..7449ef33f081 100644
--- a/arch/x86/kernel/cpu/sgx/encl.c
+++ b/arch/x86/kernel/cpu/sgx/encl.c
@@ -141,7 +141,6 @@ static vm_fault_t sgx_vma_fault(struct vm_fault *vmf)
struct sgx_encl_page *entry;
unsigned long phys_addr;
struct sgx_encl *encl;
- unsigned long pfn;
vm_fault_t ret;
encl = vma->vm_private_data;
@@ -168,13 +167,6 @@ static vm_fault_t sgx_vma_fault(struct vm_fault *vmf)
phys_addr = sgx_get_epc_phys_addr(entry->epc_page);
- /* Check if another thread got here first to insert the PTE. */
- if (!follow_pfn(vma, addr, &pfn)) {
- mutex_unlock(&encl->lock);
-
- return VM_FAULT_NOPAGE;
- }
-
ret = vmf_insert_pfn(vma, addr, PFN_DOWN(phys_addr));
if (ret != VM_FAULT_NOPAGE) {
mutex_unlock(&encl->lock);
@@ -481,6 +473,9 @@ static void sgx_mmu_notifier_free(struct mmu_notifier *mn)
{
struct sgx_encl_mm *encl_mm = container_of(mn, struct sgx_encl_mm, mmu_notifier);
+ /* 'encl_mm' is going away, put encl_mm->encl reference: */
+ kref_put(&encl_mm->encl->refcount, sgx_encl_release);
+
kfree(encl_mm);
}
@@ -534,6 +529,8 @@ int sgx_encl_mm_add(struct sgx_encl *encl, struct mm_struct *mm)
if (!encl_mm)
return -ENOMEM;
+ /* Grab a refcount for the encl_mm->encl reference: */
+ kref_get(&encl->refcount);
encl_mm->encl = encl;
encl_mm->mm = mm;
encl_mm->mmu_notifier.ops = &sgx_mmu_notifier_ops;
diff --git a/arch/x86/kernel/cpu/sgx/main.c b/arch/x86/kernel/cpu/sgx/main.c
index c519fc5f6948..8df81a3ed945 100644
--- a/arch/x86/kernel/cpu/sgx/main.c
+++ b/arch/x86/kernel/cpu/sgx/main.c
@@ -700,25 +700,27 @@ static bool __init sgx_page_cache_init(void)
return true;
}
-static void __init sgx_init(void)
+static int __init sgx_init(void)
{
int ret;
int i;
if (!cpu_feature_enabled(X86_FEATURE_SGX))
- return;
+ return -ENODEV;
if (!sgx_page_cache_init())
- return;
+ return -ENOMEM;
- if (!sgx_page_reclaimer_init())
+ if (!sgx_page_reclaimer_init()) {
+ ret = -ENOMEM;
goto err_page_cache;
+ }
ret = sgx_drv_init();
if (ret)
goto err_kthread;
- return;
+ return 0;
err_kthread:
kthread_stop(ksgxd_tsk);
@@ -728,6 +730,8 @@ err_page_cache:
vfree(sgx_epc_sections[i].pages);
memunmap(sgx_epc_sections[i].virt_addr);
}
+
+ return ret;
}
device_initcall(sgx_init);
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index 1dd851397bd9..5601b95944fa 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -128,12 +128,21 @@ static __always_inline bool in_exception_stack(unsigned long *stack, struct stac
static __always_inline bool in_irq_stack(unsigned long *stack, struct stack_info *info)
{
- unsigned long *end = (unsigned long *)this_cpu_read(hardirq_stack_ptr);
- unsigned long *begin = end - (IRQ_STACK_SIZE / sizeof(long));
+ unsigned long *end = (unsigned long *)this_cpu_read(hardirq_stack_ptr);
+ unsigned long *begin;
/*
- * This is a software stack, so 'end' can be a valid stack pointer.
- * It just means the stack is empty.
+ * @end points directly to the top most stack entry to avoid a -8
+ * adjustment in the stack switch hotpath. Adjust it back before
+ * calculating @begin.
+ */
+ end++;
+ begin = end - (IRQ_STACK_SIZE / sizeof(long));
+
+ /*
+ * Due to the switching logic RSP can never be == @end because the
+ * final operation is 'popq %rsp' which means after that RSP points
+ * to the original stack and not to @end.
*/
if (stack < begin || stack >= end)
return false;
@@ -143,8 +152,9 @@ static __always_inline bool in_irq_stack(unsigned long *stack, struct stack_info
info->end = end;
/*
- * The next stack pointer is the first thing pushed by the entry code
- * after switching to the irq stack.
+ * The next stack pointer is stored at the top of the irq stack
+ * before switching to the irq stack. Actual stack entries are all
+ * below that.
*/
info->next_sp = (unsigned long *)*(end - 1);
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index 5d8047441a0a..683749b80ae2 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -167,14 +167,14 @@ void fpstate_sanitize_xstate(struct fpu *fpu)
fx->fop = 0;
fx->rip = 0;
fx->rdp = 0;
- memset(&fx->st_space[0], 0, 128);
+ memset(fx->st_space, 0, sizeof(fx->st_space));
}
/*
* SSE is in init state
*/
if (!(xfeatures & XFEATURE_MASK_SSE))
- memset(&fx->xmm_space[0], 0, 256);
+ memset(fx->xmm_space, 0, sizeof(fx->xmm_space));
/*
* First two features are FPU and SSE, which above we handled
diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S
index 0d54099c2a3a..7c273846c687 100644
--- a/arch/x86/kernel/ftrace_64.S
+++ b/arch/x86/kernel/ftrace_64.S
@@ -184,6 +184,7 @@ SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL)
* It is also used to copy the retq for trampolines.
*/
SYM_INNER_LABEL_ALIGN(ftrace_stub, SYM_L_WEAK)
+ UNWIND_HINT_FUNC
retq
SYM_FUNC_END(ftrace_epilogue)
@@ -276,7 +277,7 @@ SYM_INNER_LABEL(ftrace_regs_caller_end, SYM_L_GLOBAL)
restore_mcount_regs 8
/* Restore flags */
popfq
- UNWIND_HINT_RET_OFFSET
+ UNWIND_HINT_FUNC
jmp ftrace_epilogue
SYM_FUNC_END(ftrace_regs_caller)
@@ -333,8 +334,7 @@ SYM_FUNC_START(ftrace_graph_caller)
retq
SYM_FUNC_END(ftrace_graph_caller)
-SYM_CODE_START(return_to_handler)
- UNWIND_HINT_EMPTY
+SYM_FUNC_START(return_to_handler)
subq $24, %rsp
/* Save the return values */
@@ -349,5 +349,5 @@ SYM_CODE_START(return_to_handler)
movq (%rsp), %rax
addq $24, %rsp
JMP_NOSPEC rdi
-SYM_CODE_END(return_to_handler)
+SYM_FUNC_END(return_to_handler)
#endif
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
index 03aa33b58165..668a4a6533d9 100644
--- a/arch/x86/kernel/hw_breakpoint.c
+++ b/arch/x86/kernel/hw_breakpoint.c
@@ -269,6 +269,20 @@ static inline bool within_cpu_entry(unsigned long addr, unsigned long end)
CPU_ENTRY_AREA_TOTAL_SIZE))
return true;
+ /*
+ * When FSGSBASE is enabled, paranoid_entry() fetches the per-CPU
+ * GSBASE value via __per_cpu_offset or pcpu_unit_offsets.
+ */
+#ifdef CONFIG_SMP
+ if (within_area(addr, end, (unsigned long)__per_cpu_offset,
+ sizeof(unsigned long) * nr_cpu_ids))
+ return true;
+#else
+ if (within_area(addr, end, (unsigned long)&pcpu_unit_offsets,
+ sizeof(pcpu_unit_offsets)))
+ return true;
+#endif
+
for_each_possible_cpu(cpu) {
/* The original rw GDT is being used after load_direct_gdt() */
if (within_area(addr, end, (unsigned long)get_cpu_gdt_rw(cpu),
@@ -293,6 +307,14 @@ static inline bool within_cpu_entry(unsigned long addr, unsigned long end)
(unsigned long)&per_cpu(cpu_tlbstate, cpu),
sizeof(struct tlb_state)))
return true;
+
+ /*
+ * When in guest (X86_FEATURE_HYPERVISOR), local_db_save()
+ * will read per-cpu cpu_dr7 before clear dr7 register.
+ */
+ if (within_area(addr, end, (unsigned long)&per_cpu(cpu_dr7, cpu),
+ sizeof(cpu_dr7)))
+ return true;
}
return false;
@@ -491,15 +513,12 @@ static int hw_breakpoint_handler(struct die_args *args)
struct perf_event *bp;
unsigned long *dr6_p;
unsigned long dr6;
+ bool bpx;
/* The DR6 value is pointed by args->err */
dr6_p = (unsigned long *)ERR_PTR(args->err);
dr6 = *dr6_p;
- /* If it's a single step, TRAP bits are random */
- if (dr6 & DR_STEP)
- return NOTIFY_DONE;
-
/* Do an early return if no trap bits are set in DR6 */
if ((dr6 & DR_TRAP_BITS) == 0)
return NOTIFY_DONE;
@@ -509,28 +528,29 @@ static int hw_breakpoint_handler(struct die_args *args)
if (likely(!(dr6 & (DR_TRAP0 << i))))
continue;
+ bp = this_cpu_read(bp_per_reg[i]);
+ if (!bp)
+ continue;
+
+ bpx = bp->hw.info.type == X86_BREAKPOINT_EXECUTE;
+
/*
- * The counter may be concurrently released but that can only
- * occur from a call_rcu() path. We can then safely fetch
- * the breakpoint, use its callback, touch its counter
- * while we are in an rcu_read_lock() path.
+ * TF and data breakpoints are traps and can be merged, however
+ * instruction breakpoints are faults and will be raised
+ * separately.
+ *
+ * However DR6 can indicate both TF and instruction
+ * breakpoints. In that case take TF as that has precedence and
+ * delay the instruction breakpoint for the next exception.
*/
- rcu_read_lock();
+ if (bpx && (dr6 & DR_STEP))
+ continue;
- bp = this_cpu_read(bp_per_reg[i]);
/*
* Reset the 'i'th TRAP bit in dr6 to denote completion of
* exception handling
*/
(*dr6_p) &= ~(DR_TRAP0 << i);
- /*
- * bp can be NULL due to lazy debug register switching
- * or due to concurrent perf counter removing.
- */
- if (!bp) {
- rcu_read_unlock();
- break;
- }
perf_bp_event(bp, args->regs);
@@ -538,11 +558,10 @@ static int hw_breakpoint_handler(struct die_args *args)
* Set up resume flag to avoid breakpoint recursion when
* returning back to origin.
*/
- if (bp->hw.info.type == X86_BREAKPOINT_EXECUTE)
+ if (bpx)
args->regs->flags |= X86_EFLAGS_RF;
-
- rcu_read_unlock();
}
+
/*
* Further processing in do_debug() is needed for a) user-space
* breakpoints (to generate signals) and b) when the system has
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index c5dd50369e2f..58aa712973ac 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -21,6 +21,7 @@
#include <asm/hw_irq.h>
#include <asm/desc.h>
#include <asm/traps.h>
+#include <asm/thermal.h>
#define CREATE_TRACE_POINTS
#include <asm/trace/irq_vectors.h>
@@ -227,7 +228,7 @@ static __always_inline void handle_irq(struct irq_desc *desc,
struct pt_regs *regs)
{
if (IS_ENABLED(CONFIG_X86_64))
- run_irq_on_irqstack_cond(desc->handle_irq, desc, regs);
+ generic_handle_irq_desc(desc);
else
__handle_irq(desc, regs);
}
@@ -374,3 +375,23 @@ void fixup_irqs(void)
}
}
#endif
+
+#ifdef CONFIG_X86_THERMAL_VECTOR
+static void smp_thermal_vector(void)
+{
+ if (x86_thermal_enabled())
+ intel_thermal_interrupt();
+ else
+ pr_err("CPU%d: Unexpected LVT thermal interrupt!\n",
+ smp_processor_id());
+}
+
+DEFINE_IDTENTRY_SYSVEC(sysvec_thermal)
+{
+ trace_thermal_apic_entry(THERMAL_APIC_VECTOR);
+ inc_irq_stat(irq_thermal_count);
+ smp_thermal_vector();
+ trace_thermal_apic_exit(THERMAL_APIC_VECTOR);
+ ack_APIC_irq();
+}
+#endif
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 0b79efc87be5..044902d5a3c4 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -22,6 +22,7 @@
#include <asm/apic.h>
#include <asm/nospec-branch.h>
+#include <asm/softirq_stack.h>
#ifdef CONFIG_DEBUG_STACKOVERFLOW
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index 440eed558558..1c0fb96b9e39 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -20,6 +20,7 @@
#include <linux/sched/task_stack.h>
#include <asm/cpu_entry_area.h>
+#include <asm/softirq_stack.h>
#include <asm/irq_stack.h>
#include <asm/io_apic.h>
#include <asm/apic.h>
@@ -48,7 +49,8 @@ static int map_irq_stack(unsigned int cpu)
if (!va)
return -ENOMEM;
- per_cpu(hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE;
+ /* Store actual TOS to avoid adjustment in the hotpath */
+ per_cpu(hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE - 8;
return 0;
}
#else
@@ -60,7 +62,8 @@ static int map_irq_stack(unsigned int cpu)
{
void *va = per_cpu_ptr(&irq_stack_backing_store, cpu);
- per_cpu(hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE;
+ /* Store actual TOS to avoid adjustment in the hotpath */
+ per_cpu(hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE - 8;
return 0;
}
#endif
@@ -71,8 +74,3 @@ int irq_init_percpu_irqstack(unsigned int cpu)
return 0;
return map_irq_stack(cpu);
}
-
-void do_softirq_own_stack(void)
-{
- run_on_irqstack_cond(__do_softirq, NULL);
-}
diff --git a/arch/x86/kernel/irqflags.S b/arch/x86/kernel/irqflags.S
index 0db0375235b4..8ef35063964b 100644
--- a/arch/x86/kernel/irqflags.S
+++ b/arch/x86/kernel/irqflags.S
@@ -13,14 +13,3 @@ SYM_FUNC_START(native_save_fl)
ret
SYM_FUNC_END(native_save_fl)
EXPORT_SYMBOL(native_save_fl)
-
-/*
- * void native_restore_fl(unsigned long flags)
- * %eax/%rdi: flags
- */
-SYM_FUNC_START(native_restore_fl)
- push %_ASM_ARG1
- popf
- ret
-SYM_FUNC_END(native_restore_fl)
-EXPORT_SYMBOL(native_restore_fl)
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index a65e9e97857f..df776cdca327 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -133,26 +133,6 @@ void synthesize_relcall(void *dest, void *from, void *to)
NOKPROBE_SYMBOL(synthesize_relcall);
/*
- * Skip the prefixes of the instruction.
- */
-static kprobe_opcode_t *skip_prefixes(kprobe_opcode_t *insn)
-{
- insn_attr_t attr;
-
- attr = inat_get_opcode_attribute((insn_byte_t)*insn);
- while (inat_is_legacy_prefix(attr)) {
- insn++;
- attr = inat_get_opcode_attribute((insn_byte_t)*insn);
- }
-#ifdef CONFIG_X86_64
- if (inat_is_rex_prefix(attr))
- insn++;
-#endif
- return insn;
-}
-NOKPROBE_SYMBOL(skip_prefixes);
-
-/*
* Returns non-zero if INSN is boostable.
* RIP relative instructions are adjusted at copying time in 64 bits mode
*/
@@ -312,25 +292,6 @@ static int can_probe(unsigned long paddr)
}
/*
- * Returns non-zero if opcode modifies the interrupt flag.
- */
-static int is_IF_modifier(kprobe_opcode_t *insn)
-{
- /* Skip prefixes */
- insn = skip_prefixes(insn);
-
- switch (*insn) {
- case 0xfa: /* cli */
- case 0xfb: /* sti */
- case 0xcf: /* iret/iretd */
- case 0x9d: /* popf/popfd */
- return 1;
- }
-
- return 0;
-}
-
-/*
* Copy an instruction with recovering modified instruction by kprobes
* and adjust the displacement if the instruction uses the %rip-relative
* addressing mode. Note that since @real will be the final place of copied
@@ -411,9 +372,9 @@ static int prepare_boost(kprobe_opcode_t *buf, struct kprobe *p,
synthesize_reljump(buf + len, p->ainsn.insn + len,
p->addr + insn->length);
len += JMP32_INSN_SIZE;
- p->ainsn.boostable = true;
+ p->ainsn.boostable = 1;
} else {
- p->ainsn.boostable = false;
+ p->ainsn.boostable = 0;
}
return len;
@@ -450,6 +411,67 @@ void free_insn_page(void *page)
module_memfree(page);
}
+static void set_resume_flags(struct kprobe *p, struct insn *insn)
+{
+ insn_byte_t opcode = insn->opcode.bytes[0];
+
+ switch (opcode) {
+ case 0xfa: /* cli */
+ case 0xfb: /* sti */
+ case 0x9d: /* popf/popfd */
+ /* Check whether the instruction modifies Interrupt Flag or not */
+ p->ainsn.if_modifier = 1;
+ break;
+ case 0x9c: /* pushfl */
+ p->ainsn.is_pushf = 1;
+ break;
+ case 0xcf: /* iret */
+ p->ainsn.if_modifier = 1;
+ fallthrough;
+ case 0xc2: /* ret/lret */
+ case 0xc3:
+ case 0xca:
+ case 0xcb:
+ case 0xea: /* jmp absolute -- ip is correct */
+ /* ip is already adjusted, no more changes required */
+ p->ainsn.is_abs_ip = 1;
+ /* Without resume jump, this is boostable */
+ p->ainsn.boostable = 1;
+ break;
+ case 0xe8: /* call relative - Fix return addr */
+ p->ainsn.is_call = 1;
+ break;
+#ifdef CONFIG_X86_32
+ case 0x9a: /* call absolute -- same as call absolute, indirect */
+ p->ainsn.is_call = 1;
+ p->ainsn.is_abs_ip = 1;
+ break;
+#endif
+ case 0xff:
+ opcode = insn->opcode.bytes[1];
+ if ((opcode & 0x30) == 0x10) {
+ /*
+ * call absolute, indirect
+ * Fix return addr; ip is correct.
+ * But this is not boostable
+ */
+ p->ainsn.is_call = 1;
+ p->ainsn.is_abs_ip = 1;
+ break;
+ } else if (((opcode & 0x31) == 0x20) ||
+ ((opcode & 0x31) == 0x21)) {
+ /*
+ * jmp near and far, absolute indirect
+ * ip is correct.
+ */
+ p->ainsn.is_abs_ip = 1;
+ /* Without resume jump, this is boostable */
+ p->ainsn.boostable = 1;
+ }
+ break;
+ }
+}
+
static int arch_copy_kprobe(struct kprobe *p)
{
struct insn insn;
@@ -467,8 +489,8 @@ static int arch_copy_kprobe(struct kprobe *p)
*/
len = prepare_boost(buf, p, &insn);
- /* Check whether the instruction modifies Interrupt Flag or not */
- p->ainsn.if_modifier = is_IF_modifier(buf);
+ /* Analyze the opcode and set resume flags */
+ set_resume_flags(p, &insn);
/* Also, displacement change doesn't affect the first byte */
p->opcode = buf[0];
@@ -491,6 +513,9 @@ int arch_prepare_kprobe(struct kprobe *p)
if (!can_probe((unsigned long)p->addr))
return -EILSEQ;
+
+ memset(&p->ainsn, 0, sizeof(p->ainsn));
+
/* insn: must be on special executable page on x86. */
p->ainsn.insn = get_insn_slot();
if (!p->ainsn.insn)
@@ -806,11 +831,6 @@ NOKPROBE_SYMBOL(trampoline_handler);
* 2) If the single-stepped instruction was a call, the return address
* that is atop the stack is the address following the copied instruction.
* We need to make it the address following the original instruction.
- *
- * If this is the first time we've single-stepped the instruction at
- * this probepoint, and the instruction is boostable, boost it: add a
- * jump instruction after the copied instruction, that jumps to the next
- * instruction after the probepoint.
*/
static void resume_execution(struct kprobe *p, struct pt_regs *regs,
struct kprobe_ctlblk *kcb)
@@ -818,60 +838,20 @@ static void resume_execution(struct kprobe *p, struct pt_regs *regs,
unsigned long *tos = stack_addr(regs);
unsigned long copy_ip = (unsigned long)p->ainsn.insn;
unsigned long orig_ip = (unsigned long)p->addr;
- kprobe_opcode_t *insn = p->ainsn.insn;
-
- /* Skip prefixes */
- insn = skip_prefixes(insn);
regs->flags &= ~X86_EFLAGS_TF;
- switch (*insn) {
- case 0x9c: /* pushfl */
+
+ /* Fixup the contents of top of stack */
+ if (p->ainsn.is_pushf) {
*tos &= ~(X86_EFLAGS_TF | X86_EFLAGS_IF);
*tos |= kcb->kprobe_old_flags;
- break;
- case 0xc2: /* iret/ret/lret */
- case 0xc3:
- case 0xca:
- case 0xcb:
- case 0xcf:
- case 0xea: /* jmp absolute -- ip is correct */
- /* ip is already adjusted, no more changes required */
- p->ainsn.boostable = true;
- goto no_change;
- case 0xe8: /* call relative - Fix return addr */
+ } else if (p->ainsn.is_call) {
*tos = orig_ip + (*tos - copy_ip);
- break;
-#ifdef CONFIG_X86_32
- case 0x9a: /* call absolute -- same as call absolute, indirect */
- *tos = orig_ip + (*tos - copy_ip);
- goto no_change;
-#endif
- case 0xff:
- if ((insn[1] & 0x30) == 0x10) {
- /*
- * call absolute, indirect
- * Fix return addr; ip is correct.
- * But this is not boostable
- */
- *tos = orig_ip + (*tos - copy_ip);
- goto no_change;
- } else if (((insn[1] & 0x31) == 0x20) ||
- ((insn[1] & 0x31) == 0x21)) {
- /*
- * jmp near and far, absolute indirect
- * ip is correct. And this is boostable
- */
- p->ainsn.boostable = true;
- goto no_change;
- }
- break;
- default:
- break;
}
- regs->ip += orig_ip - copy_ip;
+ if (!p->ainsn.is_abs_ip)
+ regs->ip += orig_ip - copy_ip;
-no_change:
restore_btf();
}
NOKPROBE_SYMBOL(resume_execution);
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index b8aee71840ae..aa15132228da 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -398,9 +398,15 @@ static void free_ldt_pgtables(struct mm_struct *mm)
if (!boot_cpu_has(X86_FEATURE_PTI))
return;
- tlb_gather_mmu(&tlb, mm, start, end);
+ /*
+ * Although free_pgd_range() is intended for freeing user
+ * page-tables, it also works out for kernel mappings on x86.
+ * We use tlb_gather_mmu_fullmm() to avoid confusing the
+ * range-tracking logic in __tlb_adjust_range().
+ */
+ tlb_gather_mmu_fullmm(&tlb, mm);
free_pgd_range(&tlb, start, end, start, end);
- tlb_finish_mmu(&tlb, start, end);
+ tlb_finish_mmu(&tlb);
#endif
}
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
index 34b153cbd4ac..5e9a34b5bd74 100644
--- a/arch/x86/kernel/module.c
+++ b/arch/x86/kernel/module.c
@@ -114,6 +114,7 @@ int apply_relocate(Elf32_Shdr *sechdrs,
*location += sym->st_value;
break;
case R_386_PC32:
+ case R_386_PLT32:
/* Add the value, subtract its position */
*location += sym->st_value - (uint32_t)location;
break;
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index 8a67d1fa8dc5..ed8ac6bcbafb 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -182,6 +182,13 @@ static long msr_ioctl(struct file *file, unsigned int ioc, unsigned long arg)
err = security_locked_down(LOCKDOWN_MSR);
if (err)
break;
+
+ err = filter_write(regs[1]);
+ if (err)
+ return err;
+
+ add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
+
err = wrmsr_safe_regs_on_cpu(cpu, regs);
if (err)
break;
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 6c3407ba6ee9..c60222ab8ab9 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -135,8 +135,7 @@ unsigned paravirt_patch_default(u8 type, void *insn_buff,
else if (opfunc == _paravirt_ident_64)
ret = paravirt_patch_ident_64(insn_buff, len);
- else if (type == PARAVIRT_PATCH(cpu.iret) ||
- type == PARAVIRT_PATCH(cpu.usergs_sysret64))
+ else if (type == PARAVIRT_PATCH(cpu.iret))
/* If operation requires a jmp, then jmp */
ret = paravirt_patch_jmp(insn_buff, opfunc, addr, len);
#endif
@@ -170,7 +169,6 @@ static u64 native_steal_clock(int cpu)
/* These are in entry.S */
extern void native_iret(void);
-extern void native_usergs_sysret64(void);
static struct resource reserve_ioports = {
.start = 0,
@@ -310,9 +308,7 @@ struct paravirt_patch_template pv_ops = {
.cpu.load_sp0 = native_load_sp0,
- .cpu.usergs_sysret64 = native_usergs_sysret64,
.cpu.iret = native_iret,
- .cpu.swapgs = native_swapgs,
#ifdef CONFIG_X86_IOPL_IOPERM
.cpu.invalidate_io_bitmap = native_tss_invalidate_io_bitmap,
@@ -324,7 +320,6 @@ struct paravirt_patch_template pv_ops = {
/* Irq ops. */
.irq.save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
- .irq.restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
.irq.irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
.irq.irq_enable = __PV_IS_CALLEE_SAVE(native_irq_enable),
.irq.safe_halt = native_safe_halt,
diff --git a/arch/x86/kernel/paravirt_patch.c b/arch/x86/kernel/paravirt_patch.c
index ace6e334cb39..abd27ec67397 100644
--- a/arch/x86/kernel/paravirt_patch.c
+++ b/arch/x86/kernel/paravirt_patch.c
@@ -25,10 +25,7 @@ struct patch_xxl {
const unsigned char mmu_read_cr2[3];
const unsigned char mmu_read_cr3[3];
const unsigned char mmu_write_cr3[3];
- const unsigned char irq_restore_fl[2];
const unsigned char cpu_wbinvd[2];
- const unsigned char cpu_usergs_sysret64[6];
- const unsigned char cpu_swapgs[3];
const unsigned char mov64[3];
};
@@ -39,11 +36,7 @@ static const struct patch_xxl patch_data_xxl = {
.mmu_read_cr2 = { 0x0f, 0x20, 0xd0 }, // mov %cr2, %[re]ax
.mmu_read_cr3 = { 0x0f, 0x20, 0xd8 }, // mov %cr3, %[re]ax
.mmu_write_cr3 = { 0x0f, 0x22, 0xdf }, // mov %rdi, %cr3
- .irq_restore_fl = { 0x57, 0x9d }, // push %rdi; popfq
.cpu_wbinvd = { 0x0f, 0x09 }, // wbinvd
- .cpu_usergs_sysret64 = { 0x0f, 0x01, 0xf8,
- 0x48, 0x0f, 0x07 }, // swapgs; sysretq
- .cpu_swapgs = { 0x0f, 0x01, 0xf8 }, // swapgs
.mov64 = { 0x48, 0x89, 0xf8 }, // mov %rdi, %rax
};
@@ -76,7 +69,6 @@ unsigned int native_patch(u8 type, void *insn_buff, unsigned long addr,
switch (type) {
#ifdef CONFIG_PARAVIRT_XXL
- PATCH_CASE(irq, restore_fl, xxl, insn_buff, len);
PATCH_CASE(irq, save_fl, xxl, insn_buff, len);
PATCH_CASE(irq, irq_enable, xxl, insn_buff, len);
PATCH_CASE(irq, irq_disable, xxl, insn_buff, len);
@@ -85,8 +77,6 @@ unsigned int native_patch(u8 type, void *insn_buff, unsigned long addr,
PATCH_CASE(mmu, read_cr3, xxl, insn_buff, len);
PATCH_CASE(mmu, write_cr3, xxl, insn_buff, len);
- PATCH_CASE(cpu, usergs_sysret64, xxl, insn_buff, len);
- PATCH_CASE(cpu, swapgs, xxl, insn_buff, len);
PATCH_CASE(cpu, wbinvd, xxl, insn_buff, len);
#endif
diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
index 2e9006c1e240..42e92ec62973 100644
--- a/arch/x86/kernel/pci-iommu_table.c
+++ b/arch/x86/kernel/pci-iommu_table.c
@@ -4,9 +4,6 @@
#include <linux/string.h>
#include <linux/kallsyms.h>
-
-#define DEBUG 1
-
static struct iommu_table_entry * __init
find_dependents_of(struct iommu_table_entry *start,
struct iommu_table_entry *finish,
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 145a7ac0c19a..9c214d7085a4 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -161,7 +161,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg,
#endif
/* Kernel thread ? */
- if (unlikely(p->flags & PF_KTHREAD)) {
+ if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
memset(childregs, 0, sizeof(struct pt_regs));
kthread_frame_init(frame, sp, arg);
return 0;
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index ad582f9ac5a6..d08307df69ad 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -539,7 +539,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
int cpu = smp_processor_id();
WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) &&
- this_cpu_read(irq_count) != -1);
+ this_cpu_read(hardirq_stack_inuse));
if (!test_thread_flag(TIF_NEED_FPU_LOAD))
switch_fpu_prepare(prev_fpu, cpu);
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index bedca011459c..87a4143aa7d7 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -704,6 +704,9 @@ void ptrace_disable(struct task_struct *child)
#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
static const struct user_regset_view user_x86_32_view; /* Initialized below. */
#endif
+#ifdef CONFIG_X86_64
+static const struct user_regset_view user_x86_64_view; /* Initialized below. */
+#endif
long arch_ptrace(struct task_struct *child, long request,
unsigned long addr, unsigned long data)
@@ -711,6 +714,14 @@ long arch_ptrace(struct task_struct *child, long request,
int ret;
unsigned long __user *datap = (unsigned long __user *)data;
+#ifdef CONFIG_X86_64
+ /* This is native 64-bit ptrace() */
+ const struct user_regset_view *regset_view = &user_x86_64_view;
+#else
+ /* This is native 32-bit ptrace() */
+ const struct user_regset_view *regset_view = &user_x86_32_view;
+#endif
+
switch (request) {
/* read the word at location addr in the USER area. */
case PTRACE_PEEKUSR: {
@@ -749,28 +760,28 @@ long arch_ptrace(struct task_struct *child, long request,
case PTRACE_GETREGS: /* Get all gp regs from the child. */
return copy_regset_to_user(child,
- task_user_regset_view(current),
+ regset_view,
REGSET_GENERAL,
0, sizeof(struct user_regs_struct),
datap);
case PTRACE_SETREGS: /* Set all gp regs in the child. */
return copy_regset_from_user(child,
- task_user_regset_view(current),
+ regset_view,
REGSET_GENERAL,
0, sizeof(struct user_regs_struct),
datap);
case PTRACE_GETFPREGS: /* Get the child FPU state. */
return copy_regset_to_user(child,
- task_user_regset_view(current),
+ regset_view,
REGSET_FP,
0, sizeof(struct user_i387_struct),
datap);
case PTRACE_SETFPREGS: /* Set the child FPU state. */
return copy_regset_from_user(child,
- task_user_regset_view(current),
+ regset_view,
REGSET_FP,
0, sizeof(struct user_i387_struct),
datap);
@@ -1152,28 +1163,28 @@ static long x32_arch_ptrace(struct task_struct *child,
case PTRACE_GETREGS: /* Get all gp regs from the child. */
return copy_regset_to_user(child,
- task_user_regset_view(current),
+ &user_x86_64_view,
REGSET_GENERAL,
0, sizeof(struct user_regs_struct),
datap);
case PTRACE_SETREGS: /* Set all gp regs in the child. */
return copy_regset_from_user(child,
- task_user_regset_view(current),
+ &user_x86_64_view,
REGSET_GENERAL,
0, sizeof(struct user_regs_struct),
datap);
case PTRACE_GETFPREGS: /* Get the child FPU state. */
return copy_regset_to_user(child,
- task_user_regset_view(current),
+ &user_x86_64_view,
REGSET_FP,
0, sizeof(struct user_i387_struct),
datap);
case PTRACE_SETFPREGS: /* Set the child FPU state. */
return copy_regset_from_user(child,
- task_user_regset_view(current),
+ &user_x86_64_view,
REGSET_FP,
0, sizeof(struct user_i387_struct),
datap);
@@ -1309,6 +1320,25 @@ void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask)
xstate_fx_sw_bytes[USER_XSTATE_XCR0_WORD] = xstate_mask;
}
+/*
+ * This is used by the core dump code to decide which regset to dump. The
+ * core dump code writes out the resulting .e_machine and the corresponding
+ * regsets. This is suboptimal if the task is messing around with its CS.L
+ * field, but at worst the core dump will end up missing some information.
+ *
+ * Unfortunately, it is also used by the broken PTRACE_GETREGSET and
+ * PTRACE_SETREGSET APIs. These APIs look at the .regsets field but have
+ * no way to make sure that the e_machine they use matches the caller's
+ * expectations. The result is that the data format returned by
+ * PTRACE_GETREGSET depends on the returned CS field (and even the offset
+ * of the returned CS field depends on its value!) and the data format
+ * accepted by PTRACE_SETREGSET is determined by the old CS value. The
+ * upshot is that it is basically impossible to use these APIs correctly.
+ *
+ * The best way to fix it in the long run would probably be to add new
+ * improved ptrace() APIs to read and write registers reliably, possibly by
+ * allowing userspace to select the ELF e_machine variant that they expect.
+ */
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
{
#ifdef CONFIG_IA32_EMULATION
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index db115943e8bd..b29657b76e3f 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -477,6 +477,15 @@ static const struct dmi_system_id reboot_dmi_table[] __initconst = {
},
},
+ { /* PCIe Wifi card isn't detected after reboot otherwise */
+ .callback = set_pci_reboot,
+ .ident = "Zotac ZBOX CI327 nano",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "NA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ZBOX-CI327NANO-GS-01"),
+ },
+ },
+
/* Sony */
{ /* Handle problems with rebooting on Sony VGN-Z540N */
.callback = set_bios_reboot,
@@ -538,31 +547,21 @@ static void emergency_vmx_disable_all(void)
local_irq_disable();
/*
- * We need to disable VMX on all CPUs before rebooting, otherwise
- * we risk hanging up the machine, because the CPU ignores INIT
- * signals when VMX is enabled.
- *
- * We can't take any locks and we may be on an inconsistent
- * state, so we use NMIs as IPIs to tell the other CPUs to disable
- * VMX and halt.
+ * Disable VMX on all CPUs before rebooting, otherwise we risk hanging
+ * the machine, because the CPU blocks INIT when it's in VMX root.
*
- * For safety, we will avoid running the nmi_shootdown_cpus()
- * stuff unnecessarily, but we don't have a way to check
- * if other CPUs have VMX enabled. So we will call it only if the
- * CPU we are running on has VMX enabled.
+ * We can't take any locks and we may be on an inconsistent state, so
+ * use NMIs as IPIs to tell the other CPUs to exit VMX root and halt.
*
- * We will miss cases where VMX is not enabled on all CPUs. This
- * shouldn't do much harm because KVM always enable VMX on all
- * CPUs anyway. But we can miss it on the small window where KVM
- * is still enabling VMX.
+ * Do the NMI shootdown even if VMX if off on _this_ CPU, as that
+ * doesn't prevent a different CPU from being in VMX root operation.
*/
- if (cpu_has_vmx() && cpu_vmx_enabled()) {
- /* Disable VMX on this CPU. */
- cpu_vmxoff();
+ if (cpu_has_vmx()) {
+ /* Safely force _this_ CPU out of VMX root operation. */
+ __cpu_emergency_vmxoff();
- /* Halt and disable VMX on the other CPUs */
+ /* Halt and exit VMX root operation on the other CPUs. */
nmi_shootdown_cpus(vmxoff_nmi);
-
}
}
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 3412c4595efd..d883176ef2ce 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -16,7 +16,6 @@
#include <linux/memblock.h>
#include <linux/pci.h>
#include <linux/root_dev.h>
-#include <linux/sfi.h>
#include <linux/hugetlb.h>
#include <linux/tboot.h>
#include <linux/usb/xhci-dbgp.h>
@@ -661,6 +660,17 @@ static void __init trim_platform_memory_ranges(void)
static void __init trim_bios_range(void)
{
/*
+ * A special case is the first 4Kb of memory;
+ * This is a BIOS owned area, not kernel ram, but generally
+ * not listed as such in the E820 table.
+ *
+ * This typically reserves additional memory (64KiB by default)
+ * since some BIOSes are known to corrupt low memory. See the
+ * Kconfig help text for X86_RESERVE_LOW.
+ */
+ e820__range_update(0, PAGE_SIZE, E820_TYPE_RAM, E820_TYPE_RESERVED);
+
+ /*
* special case: Some BIOSes report the PC BIOS
* area (640Kb -> 1Mb) as RAM even though it is not.
* take them out.
@@ -717,15 +727,6 @@ early_param("reservelow", parse_reservelow);
static void __init trim_low_memory_range(void)
{
- /*
- * A special case is the first 4Kb of memory;
- * This is a BIOS owned area, not kernel ram, but generally
- * not listed as such in the E820 table.
- *
- * This typically reserves additional memory (64KiB by default)
- * since some BIOSes are known to corrupt low memory. See the
- * Kconfig help text for X86_RESERVE_LOW.
- */
memblock_reserve(0, ALIGN(reserve_low, PAGE_SIZE));
}
@@ -1183,7 +1184,6 @@ void __init setup_arch(char **cmdline_p)
* Read APIC and some other early information from ACPI tables.
*/
acpi_boot_init();
- sfi_init();
x86_dtb_init();
/*
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 117e24fbfd8a..02813a7f3a7c 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1833,6 +1833,7 @@ void arch_set_max_freq_ratio(bool turbo_disabled)
arch_max_freq_ratio = turbo_disabled ? SCHED_CAPACITY_SCALE :
arch_turbo_freq_ratio;
}
+EXPORT_SYMBOL_GPL(arch_set_max_freq_ratio);
static bool turbo_disabled(void)
{
diff --git a/arch/x86/kernel/static_call.c b/arch/x86/kernel/static_call.c
index ca9a380d9c0b..9442c4136c38 100644
--- a/arch/x86/kernel/static_call.c
+++ b/arch/x86/kernel/static_call.c
@@ -11,14 +11,26 @@ enum insn_type {
RET = 3, /* tramp / site cond-tail-call */
};
+/*
+ * data16 data16 xorq %rax, %rax - a single 5 byte instruction that clears %rax
+ * The REX.W cancels the effect of any data16.
+ */
+static const u8 xor5rax[] = { 0x66, 0x66, 0x48, 0x31, 0xc0 };
+
static void __ref __static_call_transform(void *insn, enum insn_type type, void *func)
{
+ const void *emulate = NULL;
int size = CALL_INSN_SIZE;
const void *code;
switch (type) {
case CALL:
code = text_gen_insn(CALL_INSN_OPCODE, insn, func);
+ if (func == &__static_call_return0) {
+ emulate = code;
+ code = &xor5rax;
+ }
+
break;
case NOP:
@@ -41,7 +53,7 @@ static void __ref __static_call_transform(void *insn, enum insn_type type, void
if (unlikely(system_state == SYSTEM_BOOTING))
return text_poke_early(insn, code, size);
- text_poke_bp(insn, code, size, NULL);
+ text_poke_bp(insn, code, size, emulate);
}
static void __static_call_validate(void *insn, bool tail)
@@ -54,7 +66,8 @@ static void __static_call_validate(void *insn, bool tail)
return;
} else {
if (opcode == CALL_INSN_OPCODE ||
- !memcmp(insn, ideal_nops[NOP_ATOMIC5], 5))
+ !memcmp(insn, ideal_nops[NOP_ATOMIC5], 5) ||
+ !memcmp(insn, xor5rax, 5))
return;
}
diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
index 60d2c3798ba2..0f3c307b37b3 100644
--- a/arch/x86/kernel/step.c
+++ b/arch/x86/kernel/step.c
@@ -127,12 +127,17 @@ static int enable_single_step(struct task_struct *child)
regs->flags |= X86_EFLAGS_TF;
/*
- * Always set TIF_SINGLESTEP - this guarantees that
- * we single-step system calls etc.. This will also
+ * Always set TIF_SINGLESTEP. This will also
* cause us to set TF when returning to user mode.
*/
set_tsk_thread_flag(child, TIF_SINGLESTEP);
+ /*
+ * Ensure that a trap is triggered once stepping out of a system
+ * call prior to executing any user instruction.
+ */
+ set_task_syscall_work(child, SYSCALL_EXIT_TRAP);
+
oflags = regs->flags;
/* Set TF on the kernel stack.. */
@@ -230,6 +235,7 @@ void user_disable_single_step(struct task_struct *child)
/* Always clear TIF_SINGLESTEP... */
clear_tsk_thread_flag(child, TIF_SINGLESTEP);
+ clear_task_syscall_work(child, SYSCALL_EXIT_TRAP);
/* But touch TF only if it was set by us.. */
if (test_and_clear_tsk_thread_flag(child, TIF_FORCED_TF))
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index 504fa5425bce..660b78827638 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -90,14 +90,10 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
unsigned long, prot, unsigned long, flags,
unsigned long, fd, unsigned long, off)
{
- long error;
- error = -EINVAL;
if (off & ~PAGE_MASK)
- goto out;
+ return -EINVAL;
- error = ksys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
-out:
- return error;
+ return ksys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
}
static void find_start_end(unsigned long addr, unsigned long flags,
diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c
index 73f800100066..2a1d47f47eee 100644
--- a/arch/x86/kernel/unwind_orc.c
+++ b/arch/x86/kernel/unwind_orc.c
@@ -471,7 +471,7 @@ bool unwind_next_frame(struct unwind_state *state)
break;
case ORC_REG_SP_INDIRECT:
- sp = state->sp + orc->sp_offset;
+ sp = state->sp;
indirect = true;
break;
@@ -521,6 +521,9 @@ bool unwind_next_frame(struct unwind_state *state)
if (indirect) {
if (!deref_stack_reg(state, sp, &sp))
goto err;
+
+ if (orc->sp_reg == ORC_REG_SP_INDIRECT)
+ sp += orc->sp_offset;
}
/* Find IP, SP and possibly regs: */
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index 764573de3996..e5a7a10a0164 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -134,7 +134,11 @@ void save_v86_state(struct kernel_vm86_regs *regs, int retval)
unsafe_put_user(regs->ds, &user->regs.ds, Efault_end);
unsafe_put_user(regs->fs, &user->regs.fs, Efault_end);
unsafe_put_user(regs->gs, &user->regs.gs, Efault_end);
- unsafe_put_user(vm86->screen_bitmap, &user->screen_bitmap, Efault_end);
+
+ /*
+ * Don't write screen_bitmap in case some user had a value there
+ * and expected it to remain unchanged.
+ */
user_access_end();
@@ -160,49 +164,6 @@ Efault:
do_exit(SIGSEGV);
}
-static void mark_screen_rdonly(struct mm_struct *mm)
-{
- struct vm_area_struct *vma;
- spinlock_t *ptl;
- pgd_t *pgd;
- p4d_t *p4d;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
- int i;
-
- mmap_write_lock(mm);
- pgd = pgd_offset(mm, 0xA0000);
- if (pgd_none_or_clear_bad(pgd))
- goto out;
- p4d = p4d_offset(pgd, 0xA0000);
- if (p4d_none_or_clear_bad(p4d))
- goto out;
- pud = pud_offset(p4d, 0xA0000);
- if (pud_none_or_clear_bad(pud))
- goto out;
- pmd = pmd_offset(pud, 0xA0000);
-
- if (pmd_trans_huge(*pmd)) {
- vma = find_vma(mm, 0xA0000);
- split_huge_pmd(vma, pmd, 0xA0000);
- }
- if (pmd_none_or_clear_bad(pmd))
- goto out;
- pte = pte_offset_map_lock(mm, pmd, 0xA0000, &ptl);
- for (i = 0; i < 32; i++) {
- if (pte_present(*pte))
- set_pte(pte, pte_wrprotect(*pte));
- pte++;
- }
- pte_unmap_unlock(pte, ptl);
-out:
- mmap_write_unlock(mm);
- flush_tlb_mm_range(mm, 0xA0000, 0xA0000 + 32*PAGE_SIZE, PAGE_SHIFT, false);
-}
-
-
-
static int do_vm86_irq_handling(int subfunction, int irqnumber);
static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus);
@@ -282,6 +243,15 @@ static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus)
offsetof(struct vm86_struct, int_revectored)))
return -EFAULT;
+
+ /* VM86_SCREEN_BITMAP had numerous bugs and appears to have no users. */
+ if (v.flags & VM86_SCREEN_BITMAP) {
+ char comm[TASK_COMM_LEN];
+
+ pr_info_once("vm86: '%s' uses VM86_SCREEN_BITMAP, which is no longer supported\n", get_task_comm(comm, current));
+ return -EINVAL;
+ }
+
memset(&vm86regs, 0, sizeof(vm86regs));
vm86regs.pt.bx = v.regs.ebx;
@@ -302,7 +272,6 @@ static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus)
vm86regs.gs = v.regs.gs;
vm86->flags = v.flags;
- vm86->screen_bitmap = v.screen_bitmap;
vm86->cpu_type = v.cpu_type;
if (copy_from_user(&vm86->int_revectored,
@@ -370,9 +339,6 @@ static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus)
update_task_stack(tsk);
preempt_enable();
- if (vm86->flags & VM86_SCREEN_BITMAP)
- mark_screen_rdonly(tsk->mm);
-
memcpy((struct kernel_vm86_regs *)regs, &vm86regs, sizeof(vm86regs));
return regs->ax;
}
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index 7ac592664c52..a788d5120d4d 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -103,6 +103,15 @@ config KVM_AMD_SEV
Provides support for launching Encrypted VMs (SEV) and Encrypted VMs
with Encrypted State (SEV-ES) on AMD processors.
+config KVM_XEN
+ bool "Support for Xen hypercall interface"
+ depends on KVM
+ help
+ Provides KVM support for the hosting Xen HVM guests and
+ passing Xen hypercalls to userspace.
+
+ If in doubt, say "N".
+
config KVM_MMU_AUDIT
bool "Audit KVM MMU"
depends on KVM && TRACEPOINTS
diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile
index 4bd14ab01323..1b4766fe1de2 100644
--- a/arch/x86/kvm/Makefile
+++ b/arch/x86/kvm/Makefile
@@ -17,7 +17,9 @@ kvm-$(CONFIG_KVM_ASYNC_PF) += $(KVM)/async_pf.o
kvm-y += x86.o emulate.o i8259.o irq.o lapic.o \
i8254.o ioapic.o irq_comm.o cpuid.o pmu.o mtrr.o \
hyperv.o debugfs.o mmu/mmu.o mmu/page_track.o \
- mmu/spte.o mmu/tdp_iter.o mmu/tdp_mmu.o
+ mmu/spte.o
+kvm-$(CONFIG_X86_64) += mmu/tdp_iter.o mmu/tdp_mmu.o
+kvm-$(CONFIG_KVM_XEN) += xen.o
kvm-intel-y += vmx/vmx.o vmx/vmenter.o vmx/pmu_intel.o vmx/vmcs12.o \
vmx/evmcs.o vmx/nested.o vmx/posted_intr.o
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 13036cf0b912..6bd2f8b830e4 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -173,16 +173,22 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
kvm_update_pv_runtime(vcpu);
vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
- kvm_mmu_reset_context(vcpu);
+ vcpu->arch.reserved_gpa_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu);
kvm_pmu_refresh(vcpu);
vcpu->arch.cr4_guest_rsvd_bits =
__cr4_reserved_bits(guest_cpuid_has, vcpu);
- vcpu->arch.cr3_lm_rsvd_bits = rsvd_bits(cpuid_maxphyaddr(vcpu), 63);
+ kvm_hv_set_cpuid(vcpu);
/* Invoke the vendor callback only after the above state is updated. */
- kvm_x86_ops.vcpu_after_set_cpuid(vcpu);
+ static_call(kvm_x86_vcpu_after_set_cpuid)(vcpu);
+
+ /*
+ * Except for the MMU, which needs to be reset after any vendor
+ * specific adjustments to the reserved GPA bits.
+ */
+ kvm_mmu_reset_context(vcpu);
}
static int is_efer_nx(void)
@@ -223,6 +229,16 @@ not_found:
return 36;
}
+/*
+ * This "raw" version returns the reserved GPA bits without any adjustments for
+ * encryption technologies that usurp bits. The raw mask should be used if and
+ * only if hardware does _not_ strip the usurped bits, e.g. in virtual MTRRs.
+ */
+u64 kvm_vcpu_reserved_gpa_bits_raw(struct kvm_vcpu *vcpu)
+{
+ return rsvd_bits(cpuid_maxphyaddr(vcpu), 63);
+}
+
/* when an old userspace process fills a new kernel module */
int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
struct kvm_cpuid *cpuid,
@@ -321,7 +337,7 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
if (cpuid->nent < vcpu->arch.cpuid_nent)
goto out;
r = -EFAULT;
- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
+ if (copy_to_user(entries, vcpu->arch.cpuid_entries,
vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
goto out;
return 0;
@@ -392,7 +408,7 @@ void kvm_set_cpu_caps(void)
kvm_cpu_cap_mask(CPUID_7_0_EBX,
F(FSGSBASE) | F(BMI1) | F(HLE) | F(AVX2) | F(SMEP) |
- F(BMI2) | F(ERMS) | 0 /*INVPCID*/ | F(RTM) | 0 /*MPX*/ | F(RDSEED) |
+ F(BMI2) | F(ERMS) | F(INVPCID) | F(RTM) | 0 /*MPX*/ | F(RDSEED) |
F(ADX) | F(SMAP) | F(AVX512IFMA) | F(AVX512F) | F(AVX512PF) |
F(AVX512ER) | F(AVX512CD) | F(CLFLUSHOPT) | F(CLWB) | F(AVX512DQ) |
F(SHA_NI) | F(AVX512BW) | F(AVX512VL) | 0 /*INTEL_PT*/
@@ -434,7 +450,7 @@ void kvm_set_cpu_caps(void)
kvm_cpu_cap_set(X86_FEATURE_SPEC_CTRL_SSBD);
kvm_cpu_cap_mask(CPUID_7_1_EAX,
- F(AVX512_BF16)
+ F(AVX_VNNI) | F(AVX512_BF16)
);
kvm_cpu_cap_mask(CPUID_D_1_EAX,
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index dc921d76e42e..2a0c5064497f 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -30,15 +30,32 @@ bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
u32 *ecx, u32 *edx, bool exact_only);
int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu);
+u64 kvm_vcpu_reserved_gpa_bits_raw(struct kvm_vcpu *vcpu);
static inline int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
{
return vcpu->arch.maxphyaddr;
}
+static inline bool kvm_vcpu_is_legal_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
+{
+ return !(gpa & vcpu->arch.reserved_gpa_bits);
+}
+
static inline bool kvm_vcpu_is_illegal_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
{
- return (gpa >= BIT_ULL(cpuid_maxphyaddr(vcpu)));
+ return !kvm_vcpu_is_legal_gpa(vcpu, gpa);
+}
+
+static inline bool kvm_vcpu_is_legal_aligned_gpa(struct kvm_vcpu *vcpu,
+ gpa_t gpa, gpa_t alignment)
+{
+ return IS_ALIGNED(gpa, alignment) && kvm_vcpu_is_legal_gpa(vcpu, gpa);
+}
+
+static inline bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa)
+{
+ return kvm_vcpu_is_legal_aligned_gpa(vcpu, gpa, PAGE_SIZE);
}
struct cpuid_reg {
@@ -324,11 +341,6 @@ static __always_inline void kvm_cpu_cap_check_and_set(unsigned int x86_feature)
kvm_cpu_cap_set(x86_feature);
}
-static inline bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa)
-{
- return PAGE_ALIGNED(gpa) && !(gpa >> cpuid_maxphyaddr(vcpu));
-}
-
static __always_inline bool guest_pv_has(struct kvm_vcpu *vcpu,
unsigned int kvm_feature)
{
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 56cae1ff9e3f..f7970ba6219f 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -2506,12 +2506,12 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
val = GET_SMSTATE(u32, smstate, 0x7fcc);
- if (ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1))
+ if (ctxt->ops->set_dr(ctxt, 6, val))
return X86EMUL_UNHANDLEABLE;
val = GET_SMSTATE(u32, smstate, 0x7fc8);
- if (ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1))
+ if (ctxt->ops->set_dr(ctxt, 7, val))
return X86EMUL_UNHANDLEABLE;
selector = GET_SMSTATE(u32, smstate, 0x7fc4);
@@ -2564,14 +2564,14 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
ctxt->_eip = GET_SMSTATE(u64, smstate, 0x7f78);
ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7f70) | X86_EFLAGS_FIXED;
- val = GET_SMSTATE(u32, smstate, 0x7f68);
+ val = GET_SMSTATE(u64, smstate, 0x7f68);
- if (ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1))
+ if (ctxt->ops->set_dr(ctxt, 6, val))
return X86EMUL_UNHANDLEABLE;
- val = GET_SMSTATE(u32, smstate, 0x7f60);
+ val = GET_SMSTATE(u64, smstate, 0x7f60);
- if (ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1))
+ if (ctxt->ops->set_dr(ctxt, 7, val))
return X86EMUL_UNHANDLEABLE;
cr0 = GET_SMSTATE(u64, smstate, 0x7f58);
@@ -2879,6 +2879,8 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt)
ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
*reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
(u32)msr_data;
+ if (efer & EFER_LMA)
+ ctxt->mode = X86EMUL_MODE_PROT64;
return X86EMUL_CONTINUE;
}
@@ -4327,7 +4329,7 @@ static int check_dr_read(struct x86_emulate_ctxt *ctxt)
ctxt->ops->get_dr(ctxt, 6, &dr6);
dr6 &= ~DR_TRAP_BITS;
- dr6 |= DR6_BD | DR6_RTM;
+ dr6 |= DR6_BD | DR6_ACTIVE_LOW;
ctxt->ops->set_dr(ctxt, 6, dr6);
return emulate_db(ctxt);
}
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 922c69dcca4d..58fa8c029867 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -23,6 +23,7 @@
#include "ioapic.h"
#include "cpuid.h"
#include "hyperv.h"
+#include "xen.h"
#include <linux/cpu.h>
#include <linux/kvm_host.h>
@@ -36,6 +37,9 @@
#include "trace.h"
#include "irq.h"
+/* "Hv#1" signature */
+#define HYPERV_CPUID_SIGNATURE_EAX 0x31237648
+
#define KVM_HV_MAX_SPARSE_VCPU_SET_BITS DIV_ROUND_UP(KVM_MAX_VCPUS, 64)
static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer,
@@ -128,7 +132,7 @@ static int synic_set_sint(struct kvm_vcpu_hv_synic *synic, int sint,
synic_update_vector(synic, vector);
/* Load SynIC vectors into EOI exit bitmap */
- kvm_make_request(KVM_REQ_SCAN_IOAPIC, synic_to_vcpu(synic));
+ kvm_make_request(KVM_REQ_SCAN_IOAPIC, hv_synic_to_vcpu(synic));
return 0;
}
@@ -141,10 +145,10 @@ static struct kvm_vcpu *get_vcpu_by_vpidx(struct kvm *kvm, u32 vpidx)
return NULL;
vcpu = kvm_get_vcpu(kvm, vpidx);
- if (vcpu && vcpu_to_hv_vcpu(vcpu)->vp_index == vpidx)
+ if (vcpu && kvm_hv_get_vpindex(vcpu) == vpidx)
return vcpu;
kvm_for_each_vcpu(i, vcpu, kvm)
- if (vcpu_to_hv_vcpu(vcpu)->vp_index == vpidx)
+ if (kvm_hv_get_vpindex(vcpu) == vpidx)
return vcpu;
return NULL;
}
@@ -155,17 +159,17 @@ static struct kvm_vcpu_hv_synic *synic_get(struct kvm *kvm, u32 vpidx)
struct kvm_vcpu_hv_synic *synic;
vcpu = get_vcpu_by_vpidx(kvm, vpidx);
- if (!vcpu)
+ if (!vcpu || !to_hv_vcpu(vcpu))
return NULL;
- synic = vcpu_to_synic(vcpu);
+ synic = to_hv_synic(vcpu);
return (synic->active) ? synic : NULL;
}
static void kvm_hv_notify_acked_sint(struct kvm_vcpu *vcpu, u32 sint)
{
struct kvm *kvm = vcpu->kvm;
- struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu);
- struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
+ struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
+ struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
struct kvm_vcpu_hv_stimer *stimer;
int gsi, idx;
@@ -189,8 +193,8 @@ static void kvm_hv_notify_acked_sint(struct kvm_vcpu *vcpu, u32 sint)
static void synic_exit(struct kvm_vcpu_hv_synic *synic, u32 msr)
{
- struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
- struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv;
+ struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
+ struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNIC;
hv_vcpu->exit.u.synic.msr = msr;
@@ -204,7 +208,7 @@ static void synic_exit(struct kvm_vcpu_hv_synic *synic, u32 msr)
static int synic_set_msr(struct kvm_vcpu_hv_synic *synic,
u32 msr, u64 data, bool host)
{
- struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
+ struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
int ret;
if (!synic->active && !host)
@@ -282,8 +286,7 @@ static bool kvm_hv_is_syndbg_enabled(struct kvm_vcpu *vcpu)
static int kvm_hv_syndbg_complete_userspace(struct kvm_vcpu *vcpu)
{
- struct kvm *kvm = vcpu->kvm;
- struct kvm_hv *hv = &kvm->arch.hyperv;
+ struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
if (vcpu->run->hyperv.u.syndbg.msr == HV_X64_MSR_SYNDBG_CONTROL)
hv->hv_syndbg.control.status =
@@ -293,8 +296,8 @@ static int kvm_hv_syndbg_complete_userspace(struct kvm_vcpu *vcpu)
static void syndbg_exit(struct kvm_vcpu *vcpu, u32 msr)
{
- struct kvm_hv_syndbg *syndbg = vcpu_to_hv_syndbg(vcpu);
- struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv;
+ struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu);
+ struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNDBG;
hv_vcpu->exit.u.syndbg.msr = msr;
@@ -310,13 +313,13 @@ static void syndbg_exit(struct kvm_vcpu *vcpu, u32 msr)
static int syndbg_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
{
- struct kvm_hv_syndbg *syndbg = vcpu_to_hv_syndbg(vcpu);
+ struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu);
if (!kvm_hv_is_syndbg_enabled(vcpu) && !host)
return 1;
trace_kvm_hv_syndbg_set_msr(vcpu->vcpu_id,
- vcpu_to_hv_vcpu(vcpu)->vp_index, msr, data);
+ to_hv_vcpu(vcpu)->vp_index, msr, data);
switch (msr) {
case HV_X64_MSR_SYNDBG_CONTROL:
syndbg->control.control = data;
@@ -349,7 +352,7 @@ static int syndbg_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
static int syndbg_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
{
- struct kvm_hv_syndbg *syndbg = vcpu_to_hv_syndbg(vcpu);
+ struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu);
if (!kvm_hv_is_syndbg_enabled(vcpu) && !host)
return 1;
@@ -377,9 +380,7 @@ static int syndbg_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
break;
}
- trace_kvm_hv_syndbg_get_msr(vcpu->vcpu_id,
- vcpu_to_hv_vcpu(vcpu)->vp_index, msr,
- *pdata);
+ trace_kvm_hv_syndbg_get_msr(vcpu->vcpu_id, kvm_hv_get_vpindex(vcpu), msr, *pdata);
return 0;
}
@@ -421,7 +422,7 @@ static int synic_get_msr(struct kvm_vcpu_hv_synic *synic, u32 msr, u64 *pdata,
static int synic_set_irq(struct kvm_vcpu_hv_synic *synic, u32 sint)
{
- struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
+ struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
struct kvm_lapic_irq irq;
int ret, vector;
@@ -457,7 +458,7 @@ int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vpidx, u32 sint)
void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector)
{
- struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu);
+ struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
int i;
trace_kvm_hv_synic_send_eoi(vcpu->vcpu_id, vector);
@@ -514,7 +515,7 @@ static void synic_init(struct kvm_vcpu_hv_synic *synic)
static u64 get_time_ref_counter(struct kvm *kvm)
{
- struct kvm_hv *hv = &kvm->arch.hyperv;
+ struct kvm_hv *hv = to_kvm_hv(kvm);
struct kvm_vcpu *vcpu;
u64 tsc;
@@ -534,10 +535,10 @@ static u64 get_time_ref_counter(struct kvm *kvm)
static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer,
bool vcpu_kick)
{
- struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer);
+ struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
set_bit(stimer->index,
- vcpu_to_hv_vcpu(vcpu)->stimer_pending_bitmap);
+ to_hv_vcpu(vcpu)->stimer_pending_bitmap);
kvm_make_request(KVM_REQ_HV_STIMER, vcpu);
if (vcpu_kick)
kvm_vcpu_kick(vcpu);
@@ -545,14 +546,14 @@ static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer,
static void stimer_cleanup(struct kvm_vcpu_hv_stimer *stimer)
{
- struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer);
+ struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
- trace_kvm_hv_stimer_cleanup(stimer_to_vcpu(stimer)->vcpu_id,
+ trace_kvm_hv_stimer_cleanup(hv_stimer_to_vcpu(stimer)->vcpu_id,
stimer->index);
hrtimer_cancel(&stimer->timer);
clear_bit(stimer->index,
- vcpu_to_hv_vcpu(vcpu)->stimer_pending_bitmap);
+ to_hv_vcpu(vcpu)->stimer_pending_bitmap);
stimer->msg_pending = false;
stimer->exp_time = 0;
}
@@ -562,7 +563,7 @@ static enum hrtimer_restart stimer_timer_callback(struct hrtimer *timer)
struct kvm_vcpu_hv_stimer *stimer;
stimer = container_of(timer, struct kvm_vcpu_hv_stimer, timer);
- trace_kvm_hv_stimer_callback(stimer_to_vcpu(stimer)->vcpu_id,
+ trace_kvm_hv_stimer_callback(hv_stimer_to_vcpu(stimer)->vcpu_id,
stimer->index);
stimer_mark_pending(stimer, true);
@@ -579,7 +580,7 @@ static int stimer_start(struct kvm_vcpu_hv_stimer *stimer)
u64 time_now;
ktime_t ktime_now;
- time_now = get_time_ref_counter(stimer_to_vcpu(stimer)->kvm);
+ time_now = get_time_ref_counter(hv_stimer_to_vcpu(stimer)->kvm);
ktime_now = ktime_get();
if (stimer->config.periodic) {
@@ -596,7 +597,7 @@ static int stimer_start(struct kvm_vcpu_hv_stimer *stimer)
stimer->exp_time = time_now + stimer->count;
trace_kvm_hv_stimer_start_periodic(
- stimer_to_vcpu(stimer)->vcpu_id,
+ hv_stimer_to_vcpu(stimer)->vcpu_id,
stimer->index,
time_now, stimer->exp_time);
@@ -618,7 +619,7 @@ static int stimer_start(struct kvm_vcpu_hv_stimer *stimer)
return 0;
}
- trace_kvm_hv_stimer_start_one_shot(stimer_to_vcpu(stimer)->vcpu_id,
+ trace_kvm_hv_stimer_start_one_shot(hv_stimer_to_vcpu(stimer)->vcpu_id,
stimer->index,
time_now, stimer->count);
@@ -633,13 +634,13 @@ static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config,
{
union hv_stimer_config new_config = {.as_uint64 = config},
old_config = {.as_uint64 = stimer->config.as_uint64};
- struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer);
- struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu);
+ struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
+ struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
if (!synic->active && !host)
return 1;
- trace_kvm_hv_stimer_set_config(stimer_to_vcpu(stimer)->vcpu_id,
+ trace_kvm_hv_stimer_set_config(hv_stimer_to_vcpu(stimer)->vcpu_id,
stimer->index, config, host);
stimer_cleanup(stimer);
@@ -657,13 +658,13 @@ static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config,
static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count,
bool host)
{
- struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer);
- struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu);
+ struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
+ struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
if (!synic->active && !host)
return 1;
- trace_kvm_hv_stimer_set_count(stimer_to_vcpu(stimer)->vcpu_id,
+ trace_kvm_hv_stimer_set_count(hv_stimer_to_vcpu(stimer)->vcpu_id,
stimer->index, count, host);
stimer_cleanup(stimer);
@@ -694,7 +695,7 @@ static int stimer_get_count(struct kvm_vcpu_hv_stimer *stimer, u64 *pcount)
static int synic_deliver_msg(struct kvm_vcpu_hv_synic *synic, u32 sint,
struct hv_message *src_msg, bool no_retry)
{
- struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
+ struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
int msg_off = offsetof(struct hv_message_page, sint_message[sint]);
gfn_t msg_page_gfn;
struct hv_message_header hv_hdr;
@@ -750,7 +751,7 @@ static int synic_deliver_msg(struct kvm_vcpu_hv_synic *synic, u32 sint,
static int stimer_send_msg(struct kvm_vcpu_hv_stimer *stimer)
{
- struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer);
+ struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
struct hv_message *msg = &stimer->msg;
struct hv_timer_message_payload *payload =
(struct hv_timer_message_payload *)&msg->u.payload;
@@ -763,14 +764,14 @@ static int stimer_send_msg(struct kvm_vcpu_hv_stimer *stimer)
payload->expiration_time = stimer->exp_time;
payload->delivery_time = get_time_ref_counter(vcpu->kvm);
- return synic_deliver_msg(vcpu_to_synic(vcpu),
+ return synic_deliver_msg(to_hv_synic(vcpu),
stimer->config.sintx, msg,
no_retry);
}
static int stimer_notify_direct(struct kvm_vcpu_hv_stimer *stimer)
{
- struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer);
+ struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
struct kvm_lapic_irq irq = {
.delivery_mode = APIC_DM_FIXED,
.vector = stimer->config.apic_vector
@@ -790,7 +791,7 @@ static void stimer_expiration(struct kvm_vcpu_hv_stimer *stimer)
r = stimer_send_msg(stimer);
else
r = stimer_notify_direct(stimer);
- trace_kvm_hv_stimer_expiration(stimer_to_vcpu(stimer)->vcpu_id,
+ trace_kvm_hv_stimer_expiration(hv_stimer_to_vcpu(stimer)->vcpu_id,
stimer->index, direct, r);
if (!r) {
stimer->msg_pending = false;
@@ -801,11 +802,14 @@ static void stimer_expiration(struct kvm_vcpu_hv_stimer *stimer)
void kvm_hv_process_stimers(struct kvm_vcpu *vcpu)
{
- struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
+ struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
struct kvm_vcpu_hv_stimer *stimer;
u64 time_now, exp_time;
int i;
+ if (!hv_vcpu)
+ return;
+
for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
if (test_and_clear_bit(i, hv_vcpu->stimer_pending_bitmap)) {
stimer = &hv_vcpu->stimer[i];
@@ -831,16 +835,27 @@ void kvm_hv_process_stimers(struct kvm_vcpu *vcpu)
void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu)
{
- struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
+ struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
int i;
+ if (!hv_vcpu)
+ return;
+
for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
stimer_cleanup(&hv_vcpu->stimer[i]);
+
+ kfree(hv_vcpu);
+ vcpu->arch.hyperv = NULL;
}
bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu)
{
- if (!(vcpu->arch.hyperv.hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE))
+ struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
+
+ if (!hv_vcpu)
+ return false;
+
+ if (!(hv_vcpu->hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE))
return false;
return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
}
@@ -880,28 +895,41 @@ static void stimer_init(struct kvm_vcpu_hv_stimer *stimer, int timer_index)
stimer_prepare_msg(stimer);
}
-void kvm_hv_vcpu_init(struct kvm_vcpu *vcpu)
+static int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu)
{
- struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
+ struct kvm_vcpu_hv *hv_vcpu;
int i;
+ hv_vcpu = kzalloc(sizeof(struct kvm_vcpu_hv), GFP_KERNEL_ACCOUNT);
+ if (!hv_vcpu)
+ return -ENOMEM;
+
+ vcpu->arch.hyperv = hv_vcpu;
+ hv_vcpu->vcpu = vcpu;
+
synic_init(&hv_vcpu->synic);
bitmap_zero(hv_vcpu->stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT);
for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
stimer_init(&hv_vcpu->stimer[i], i);
-}
-
-void kvm_hv_vcpu_postcreate(struct kvm_vcpu *vcpu)
-{
- struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
hv_vcpu->vp_index = kvm_vcpu_get_idx(vcpu);
+
+ return 0;
}
int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages)
{
- struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu);
+ struct kvm_vcpu_hv_synic *synic;
+ int r;
+
+ if (!to_hv_vcpu(vcpu)) {
+ r = kvm_hv_vcpu_init(vcpu);
+ if (r)
+ return r;
+ }
+
+ synic = to_hv_synic(vcpu);
/*
* Hyper-V SynIC auto EOI SINT's are
@@ -939,10 +967,9 @@ static bool kvm_hv_msr_partition_wide(u32 msr)
return r;
}
-static int kvm_hv_msr_get_crash_data(struct kvm_vcpu *vcpu,
- u32 index, u64 *pdata)
+static int kvm_hv_msr_get_crash_data(struct kvm *kvm, u32 index, u64 *pdata)
{
- struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
+ struct kvm_hv *hv = to_kvm_hv(kvm);
size_t size = ARRAY_SIZE(hv->hv_crash_param);
if (WARN_ON_ONCE(index >= size))
@@ -952,41 +979,26 @@ static int kvm_hv_msr_get_crash_data(struct kvm_vcpu *vcpu,
return 0;
}
-static int kvm_hv_msr_get_crash_ctl(struct kvm_vcpu *vcpu, u64 *pdata)
+static int kvm_hv_msr_get_crash_ctl(struct kvm *kvm, u64 *pdata)
{
- struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
+ struct kvm_hv *hv = to_kvm_hv(kvm);
*pdata = hv->hv_crash_ctl;
return 0;
}
-static int kvm_hv_msr_set_crash_ctl(struct kvm_vcpu *vcpu, u64 data, bool host)
+static int kvm_hv_msr_set_crash_ctl(struct kvm *kvm, u64 data)
{
- struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
-
- if (host)
- hv->hv_crash_ctl = data & HV_CRASH_CTL_CRASH_NOTIFY;
+ struct kvm_hv *hv = to_kvm_hv(kvm);
- if (!host && (data & HV_CRASH_CTL_CRASH_NOTIFY)) {
-
- vcpu_debug(vcpu, "hv crash (0x%llx 0x%llx 0x%llx 0x%llx 0x%llx)\n",
- hv->hv_crash_param[0],
- hv->hv_crash_param[1],
- hv->hv_crash_param[2],
- hv->hv_crash_param[3],
- hv->hv_crash_param[4]);
-
- /* Send notification about crash to user space */
- kvm_make_request(KVM_REQ_HV_CRASH, vcpu);
- }
+ hv->hv_crash_ctl = data & HV_CRASH_CTL_CRASH_NOTIFY;
return 0;
}
-static int kvm_hv_msr_set_crash_data(struct kvm_vcpu *vcpu,
- u32 index, u64 data)
+static int kvm_hv_msr_set_crash_data(struct kvm *kvm, u32 index, u64 data)
{
- struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
+ struct kvm_hv *hv = to_kvm_hv(kvm);
size_t size = ARRAY_SIZE(hv->hv_crash_param);
if (WARN_ON_ONCE(index >= size))
@@ -1068,7 +1080,7 @@ static bool compute_tsc_page_parameters(struct pvclock_vcpu_time_info *hv_clock,
void kvm_hv_setup_tsc_page(struct kvm *kvm,
struct pvclock_vcpu_time_info *hv_clock)
{
- struct kvm_hv *hv = &kvm->arch.hyperv;
+ struct kvm_hv *hv = to_kvm_hv(kvm);
u32 tsc_seq;
u64 gfn;
@@ -1078,7 +1090,7 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm,
if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
return;
- mutex_lock(&kvm->arch.hyperv.hv_lock);
+ mutex_lock(&hv->hv_lock);
if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
goto out_unlock;
@@ -1122,14 +1134,14 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm,
kvm_write_guest(kvm, gfn_to_gpa(gfn),
&hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence));
out_unlock:
- mutex_unlock(&kvm->arch.hyperv.hv_lock);
+ mutex_unlock(&hv->hv_lock);
}
static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
bool host)
{
struct kvm *kvm = vcpu->kvm;
- struct kvm_hv *hv = &kvm->arch.hyperv;
+ struct kvm_hv *hv = to_kvm_hv(kvm);
switch (msr) {
case HV_X64_MSR_GUEST_OS_ID:
@@ -1139,9 +1151,9 @@ static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
hv->hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
break;
case HV_X64_MSR_HYPERCALL: {
- u64 gfn;
- unsigned long addr;
- u8 instructions[4];
+ u8 instructions[9];
+ int i = 0;
+ u64 addr;
/* if guest os id is not set hypercall should remain disabled */
if (!hv->hv_guest_os_id)
@@ -1150,16 +1162,33 @@ static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
hv->hv_hypercall = data;
break;
}
- gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT;
- addr = gfn_to_hva(kvm, gfn);
- if (kvm_is_error_hva(addr))
- return 1;
- kvm_x86_ops.patch_hypercall(vcpu, instructions);
- ((unsigned char *)instructions)[3] = 0xc3; /* ret */
- if (__copy_to_user((void __user *)addr, instructions, 4))
+
+ /*
+ * If Xen and Hyper-V hypercalls are both enabled, disambiguate
+ * the same way Xen itself does, by setting the bit 31 of EAX
+ * which is RsvdZ in the 32-bit Hyper-V hypercall ABI and just
+ * going to be clobbered on 64-bit.
+ */
+ if (kvm_xen_hypercall_enabled(kvm)) {
+ /* orl $0x80000000, %eax */
+ instructions[i++] = 0x0d;
+ instructions[i++] = 0x00;
+ instructions[i++] = 0x00;
+ instructions[i++] = 0x00;
+ instructions[i++] = 0x80;
+ }
+
+ /* vmcall/vmmcall */
+ static_call(kvm_x86_patch_hypercall)(vcpu, instructions + i);
+ i += 3;
+
+ /* ret */
+ ((unsigned char *)instructions)[i++] = 0xc3;
+
+ addr = data & HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_MASK;
+ if (kvm_vcpu_write_guest(vcpu, addr, instructions, i))
return 1;
hv->hv_hypercall = data;
- mark_page_dirty(kvm, gfn);
break;
}
case HV_X64_MSR_REFERENCE_TSC:
@@ -1168,11 +1197,25 @@ static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
break;
case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
- return kvm_hv_msr_set_crash_data(vcpu,
+ return kvm_hv_msr_set_crash_data(kvm,
msr - HV_X64_MSR_CRASH_P0,
data);
case HV_X64_MSR_CRASH_CTL:
- return kvm_hv_msr_set_crash_ctl(vcpu, data, host);
+ if (host)
+ return kvm_hv_msr_set_crash_ctl(kvm, data);
+
+ if (data & HV_CRASH_CTL_CRASH_NOTIFY) {
+ vcpu_debug(vcpu, "hv crash (0x%llx 0x%llx 0x%llx 0x%llx 0x%llx)\n",
+ hv->hv_crash_param[0],
+ hv->hv_crash_param[1],
+ hv->hv_crash_param[2],
+ hv->hv_crash_param[3],
+ hv->hv_crash_param[4]);
+
+ /* Send notification about crash to user space */
+ kvm_make_request(KVM_REQ_HV_CRASH, vcpu);
+ }
+ break;
case HV_X64_MSR_RESET:
if (data == 1) {
vcpu_debug(vcpu, "hyper-v reset requested\n");
@@ -1216,11 +1259,11 @@ static u64 current_task_runtime_100ns(void)
static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
{
- struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv;
+ struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
switch (msr) {
case HV_X64_MSR_VP_INDEX: {
- struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
+ struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
int vcpu_idx = kvm_vcpu_get_idx(vcpu);
u32 new_vp_index = (u32)data;
@@ -1291,14 +1334,14 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
case HV_X64_MSR_SIMP:
case HV_X64_MSR_EOM:
case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
- return synic_set_msr(vcpu_to_synic(vcpu), msr, data, host);
+ return synic_set_msr(to_hv_synic(vcpu), msr, data, host);
case HV_X64_MSR_STIMER0_CONFIG:
case HV_X64_MSR_STIMER1_CONFIG:
case HV_X64_MSR_STIMER2_CONFIG:
case HV_X64_MSR_STIMER3_CONFIG: {
int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2;
- return stimer_set_config(vcpu_to_stimer(vcpu, timer_index),
+ return stimer_set_config(to_hv_stimer(vcpu, timer_index),
data, host);
}
case HV_X64_MSR_STIMER0_COUNT:
@@ -1307,7 +1350,7 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
case HV_X64_MSR_STIMER3_COUNT: {
int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2;
- return stimer_set_count(vcpu_to_stimer(vcpu, timer_index),
+ return stimer_set_count(to_hv_stimer(vcpu, timer_index),
data, host);
}
case HV_X64_MSR_TSC_FREQUENCY:
@@ -1330,7 +1373,7 @@ static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
{
u64 data = 0;
struct kvm *kvm = vcpu->kvm;
- struct kvm_hv *hv = &kvm->arch.hyperv;
+ struct kvm_hv *hv = to_kvm_hv(kvm);
switch (msr) {
case HV_X64_MSR_GUEST_OS_ID:
@@ -1346,11 +1389,11 @@ static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
data = hv->hv_tsc_page;
break;
case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
- return kvm_hv_msr_get_crash_data(vcpu,
+ return kvm_hv_msr_get_crash_data(kvm,
msr - HV_X64_MSR_CRASH_P0,
pdata);
case HV_X64_MSR_CRASH_CTL:
- return kvm_hv_msr_get_crash_ctl(vcpu, pdata);
+ return kvm_hv_msr_get_crash_ctl(kvm, pdata);
case HV_X64_MSR_RESET:
data = 0;
break;
@@ -1379,7 +1422,7 @@ static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
bool host)
{
u64 data = 0;
- struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv;
+ struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
switch (msr) {
case HV_X64_MSR_VP_INDEX:
@@ -1403,14 +1446,14 @@ static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
case HV_X64_MSR_SIMP:
case HV_X64_MSR_EOM:
case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
- return synic_get_msr(vcpu_to_synic(vcpu), msr, pdata, host);
+ return synic_get_msr(to_hv_synic(vcpu), msr, pdata, host);
case HV_X64_MSR_STIMER0_CONFIG:
case HV_X64_MSR_STIMER1_CONFIG:
case HV_X64_MSR_STIMER2_CONFIG:
case HV_X64_MSR_STIMER3_CONFIG: {
int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2;
- return stimer_get_config(vcpu_to_stimer(vcpu, timer_index),
+ return stimer_get_config(to_hv_stimer(vcpu, timer_index),
pdata);
}
case HV_X64_MSR_STIMER0_COUNT:
@@ -1419,7 +1462,7 @@ static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
case HV_X64_MSR_STIMER3_COUNT: {
int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2;
- return stimer_get_count(vcpu_to_stimer(vcpu, timer_index),
+ return stimer_get_count(to_hv_stimer(vcpu, timer_index),
pdata);
}
case HV_X64_MSR_TSC_FREQUENCY:
@@ -1438,12 +1481,22 @@ static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
{
+ struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
+
+ if (!host && !vcpu->arch.hyperv_enabled)
+ return 1;
+
+ if (!to_hv_vcpu(vcpu)) {
+ if (kvm_hv_vcpu_init(vcpu))
+ return 1;
+ }
+
if (kvm_hv_msr_partition_wide(msr)) {
int r;
- mutex_lock(&vcpu->kvm->arch.hyperv.hv_lock);
+ mutex_lock(&hv->hv_lock);
r = kvm_hv_set_msr_pw(vcpu, msr, data, host);
- mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock);
+ mutex_unlock(&hv->hv_lock);
return r;
} else
return kvm_hv_set_msr(vcpu, msr, data, host);
@@ -1451,12 +1504,22 @@ int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
{
+ struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
+
+ if (!host && !vcpu->arch.hyperv_enabled)
+ return 1;
+
+ if (!to_hv_vcpu(vcpu)) {
+ if (kvm_hv_vcpu_init(vcpu))
+ return 1;
+ }
+
if (kvm_hv_msr_partition_wide(msr)) {
int r;
- mutex_lock(&vcpu->kvm->arch.hyperv.hv_lock);
+ mutex_lock(&hv->hv_lock);
r = kvm_hv_get_msr_pw(vcpu, msr, pdata, host);
- mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock);
+ mutex_unlock(&hv->hv_lock);
return r;
} else
return kvm_hv_get_msr(vcpu, msr, pdata, host);
@@ -1466,7 +1529,7 @@ static __always_inline unsigned long *sparse_set_to_vcpu_mask(
struct kvm *kvm, u64 *sparse_banks, u64 valid_bank_mask,
u64 *vp_bitmap, unsigned long *vcpu_bitmap)
{
- struct kvm_hv *hv = &kvm->arch.hyperv;
+ struct kvm_hv *hv = to_kvm_hv(kvm);
struct kvm_vcpu *vcpu;
int i, bank, sbank = 0;
@@ -1483,18 +1546,16 @@ static __always_inline unsigned long *sparse_set_to_vcpu_mask(
bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS);
kvm_for_each_vcpu(i, vcpu, kvm) {
- if (test_bit(vcpu_to_hv_vcpu(vcpu)->vp_index,
- (unsigned long *)vp_bitmap))
+ if (test_bit(kvm_hv_get_vpindex(vcpu), (unsigned long *)vp_bitmap))
__set_bit(i, vcpu_bitmap);
}
return vcpu_bitmap;
}
-static u64 kvm_hv_flush_tlb(struct kvm_vcpu *current_vcpu, u64 ingpa,
- u16 rep_cnt, bool ex)
+static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, u64 ingpa, u16 rep_cnt, bool ex)
{
- struct kvm *kvm = current_vcpu->kvm;
- struct kvm_vcpu_hv *hv_vcpu = &current_vcpu->arch.hyperv;
+ struct kvm *kvm = vcpu->kvm;
+ struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
struct hv_tlb_flush_ex flush_ex;
struct hv_tlb_flush flush;
u64 vp_bitmap[KVM_HV_MAX_SPARSE_VCPU_SET_BITS];
@@ -1592,10 +1653,10 @@ static void kvm_send_ipi_to_many(struct kvm *kvm, u32 vector,
}
}
-static u64 kvm_hv_send_ipi(struct kvm_vcpu *current_vcpu, u64 ingpa, u64 outgpa,
+static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, u64 ingpa, u64 outgpa,
bool ex, bool fast)
{
- struct kvm *kvm = current_vcpu->kvm;
+ struct kvm *kvm = vcpu->kvm;
struct hv_send_ipi_ex send_ipi_ex;
struct hv_send_ipi send_ipi;
u64 vp_bitmap[KVM_HV_MAX_SPARSE_VCPU_SET_BITS];
@@ -1666,9 +1727,20 @@ ret_success:
return HV_STATUS_SUCCESS;
}
-bool kvm_hv_hypercall_enabled(struct kvm *kvm)
+void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu)
{
- return READ_ONCE(kvm->arch.hyperv.hv_guest_os_id) != 0;
+ struct kvm_cpuid_entry2 *entry;
+
+ entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_INTERFACE, 0);
+ if (entry && entry->eax == HYPERV_CPUID_SIGNATURE_EAX)
+ vcpu->arch.hyperv_enabled = true;
+ else
+ vcpu->arch.hyperv_enabled = false;
+}
+
+bool kvm_hv_hypercall_enabled(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.hyperv_enabled && to_kvm_hv(vcpu->kvm)->hv_guest_os_id;
}
static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
@@ -1698,6 +1770,7 @@ static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, bool fast, u64 param)
{
+ struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
struct eventfd_ctx *eventfd;
if (unlikely(!fast)) {
@@ -1726,7 +1799,7 @@ static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, bool fast, u64 param)
/* the eventfd is protected by vcpu->kvm->srcu, but conn_to_evt isn't */
rcu_read_lock();
- eventfd = idr_find(&vcpu->kvm->arch.hyperv.conn_to_evt, param);
+ eventfd = idr_find(&hv->conn_to_evt, param);
rcu_read_unlock();
if (!eventfd)
return HV_STATUS_INVALID_PORT_ID;
@@ -1745,7 +1818,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
* hypercall generates UD from non zero cpl and real mode
* per HYPER-V spec
*/
- if (kvm_x86_ops.get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
+ if (static_call(kvm_x86_get_cpl)(vcpu) != 0 || !is_protmode(vcpu)) {
kvm_queue_exception(vcpu, UD_VECTOR);
return 1;
}
@@ -1793,7 +1866,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
fallthrough; /* maybe userspace knows this conn_id */
case HVCALL_POST_MESSAGE:
/* don't bother userspace if it has no way to handle it */
- if (unlikely(rep || !vcpu_to_synic(vcpu)->active)) {
+ if (unlikely(rep || !to_hv_synic(vcpu)->active)) {
ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
break;
}
@@ -1855,7 +1928,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
}
fallthrough;
case HVCALL_RESET_DEBUG_SESSION: {
- struct kvm_hv_syndbg *syndbg = vcpu_to_hv_syndbg(vcpu);
+ struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu);
if (!kvm_hv_is_syndbg_enabled(vcpu)) {
ret = HV_STATUS_INVALID_HYPERCALL_CODE;
@@ -1885,23 +1958,26 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
void kvm_hv_init_vm(struct kvm *kvm)
{
- mutex_init(&kvm->arch.hyperv.hv_lock);
- idr_init(&kvm->arch.hyperv.conn_to_evt);
+ struct kvm_hv *hv = to_kvm_hv(kvm);
+
+ mutex_init(&hv->hv_lock);
+ idr_init(&hv->conn_to_evt);
}
void kvm_hv_destroy_vm(struct kvm *kvm)
{
+ struct kvm_hv *hv = to_kvm_hv(kvm);
struct eventfd_ctx *eventfd;
int i;
- idr_for_each_entry(&kvm->arch.hyperv.conn_to_evt, eventfd, i)
+ idr_for_each_entry(&hv->conn_to_evt, eventfd, i)
eventfd_ctx_put(eventfd);
- idr_destroy(&kvm->arch.hyperv.conn_to_evt);
+ idr_destroy(&hv->conn_to_evt);
}
static int kvm_hv_eventfd_assign(struct kvm *kvm, u32 conn_id, int fd)
{
- struct kvm_hv *hv = &kvm->arch.hyperv;
+ struct kvm_hv *hv = to_kvm_hv(kvm);
struct eventfd_ctx *eventfd;
int ret;
@@ -1925,7 +2001,7 @@ static int kvm_hv_eventfd_assign(struct kvm *kvm, u32 conn_id, int fd)
static int kvm_hv_eventfd_deassign(struct kvm *kvm, u32 conn_id)
{
- struct kvm_hv *hv = &kvm->arch.hyperv;
+ struct kvm_hv *hv = to_kvm_hv(kvm);
struct eventfd_ctx *eventfd;
mutex_lock(&hv->hv_lock);
@@ -1997,8 +2073,7 @@ int kvm_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
break;
case HYPERV_CPUID_INTERFACE:
- memcpy(signature, "Hv#1\0\0\0\0\0\0\0\0", 12);
- ent->eax = signature[0];
+ ent->eax = HYPERV_CPUID_SIGNATURE_EAX;
break;
case HYPERV_CPUID_VERSION:
diff --git a/arch/x86/kvm/hyperv.h b/arch/x86/kvm/hyperv.h
index 6d7def2b0aad..e951af1fcb2c 100644
--- a/arch/x86/kvm/hyperv.h
+++ b/arch/x86/kvm/hyperv.h
@@ -50,38 +50,46 @@
/* Hyper-V HV_X64_MSR_SYNDBG_OPTIONS bits */
#define HV_X64_SYNDBG_OPTION_USE_HCALLS BIT(2)
-static inline struct kvm_vcpu_hv *vcpu_to_hv_vcpu(struct kvm_vcpu *vcpu)
+static inline struct kvm_hv *to_kvm_hv(struct kvm *kvm)
{
- return &vcpu->arch.hyperv;
+ return &kvm->arch.hyperv;
}
-static inline struct kvm_vcpu *hv_vcpu_to_vcpu(struct kvm_vcpu_hv *hv_vcpu)
+static inline struct kvm_vcpu_hv *to_hv_vcpu(struct kvm_vcpu *vcpu)
{
- struct kvm_vcpu_arch *arch;
-
- arch = container_of(hv_vcpu, struct kvm_vcpu_arch, hyperv);
- return container_of(arch, struct kvm_vcpu, arch);
+ return vcpu->arch.hyperv;
}
-static inline struct kvm_vcpu_hv_synic *vcpu_to_synic(struct kvm_vcpu *vcpu)
+static inline struct kvm_vcpu_hv_synic *to_hv_synic(struct kvm_vcpu *vcpu)
{
- return &vcpu->arch.hyperv.synic;
+ struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
+
+ return &hv_vcpu->synic;
}
-static inline struct kvm_vcpu *synic_to_vcpu(struct kvm_vcpu_hv_synic *synic)
+static inline struct kvm_vcpu *hv_synic_to_vcpu(struct kvm_vcpu_hv_synic *synic)
{
- return hv_vcpu_to_vcpu(container_of(synic, struct kvm_vcpu_hv, synic));
+ struct kvm_vcpu_hv *hv_vcpu = container_of(synic, struct kvm_vcpu_hv, synic);
+
+ return hv_vcpu->vcpu;
}
-static inline struct kvm_hv_syndbg *vcpu_to_hv_syndbg(struct kvm_vcpu *vcpu)
+static inline struct kvm_hv_syndbg *to_hv_syndbg(struct kvm_vcpu *vcpu)
{
return &vcpu->kvm->arch.hyperv.hv_syndbg;
}
+static inline u32 kvm_hv_get_vpindex(struct kvm_vcpu *vcpu)
+{
+ struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
+
+ return hv_vcpu ? hv_vcpu->vp_index : kvm_vcpu_get_idx(vcpu);
+}
+
int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host);
int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host);
-bool kvm_hv_hypercall_enabled(struct kvm *kvm);
+bool kvm_hv_hypercall_enabled(struct kvm_vcpu *vcpu);
int kvm_hv_hypercall(struct kvm_vcpu *vcpu);
void kvm_hv_irq_routing_update(struct kvm *kvm);
@@ -89,32 +97,35 @@ int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vcpu_id, u32 sint);
void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector);
int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages);
-void kvm_hv_vcpu_init(struct kvm_vcpu *vcpu);
-void kvm_hv_vcpu_postcreate(struct kvm_vcpu *vcpu);
void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu);
bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu);
bool kvm_hv_get_assist_page(struct kvm_vcpu *vcpu,
struct hv_vp_assist_page *assist_page);
-static inline struct kvm_vcpu_hv_stimer *vcpu_to_stimer(struct kvm_vcpu *vcpu,
- int timer_index)
+static inline struct kvm_vcpu_hv_stimer *to_hv_stimer(struct kvm_vcpu *vcpu,
+ int timer_index)
{
- return &vcpu_to_hv_vcpu(vcpu)->stimer[timer_index];
+ return &to_hv_vcpu(vcpu)->stimer[timer_index];
}
-static inline struct kvm_vcpu *stimer_to_vcpu(struct kvm_vcpu_hv_stimer *stimer)
+static inline struct kvm_vcpu *hv_stimer_to_vcpu(struct kvm_vcpu_hv_stimer *stimer)
{
struct kvm_vcpu_hv *hv_vcpu;
hv_vcpu = container_of(stimer - stimer->index, struct kvm_vcpu_hv,
stimer[0]);
- return hv_vcpu_to_vcpu(hv_vcpu);
+ return hv_vcpu->vcpu;
}
static inline bool kvm_hv_has_stimer_pending(struct kvm_vcpu *vcpu)
{
- return !bitmap_empty(vcpu->arch.hyperv.stimer_pending_bitmap,
+ struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
+
+ if (!hv_vcpu)
+ return false;
+
+ return !bitmap_empty(hv_vcpu->stimer_pending_bitmap,
HV_SYNIC_STIMER_COUNT);
}
@@ -125,6 +136,7 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm,
void kvm_hv_init_vm(struct kvm *kvm);
void kvm_hv_destroy_vm(struct kvm *kvm);
+void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu);
int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args);
int kvm_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
struct kvm_cpuid_entry2 __user *entries);
diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c
index 814698e5b152..172b05343cfd 100644
--- a/arch/x86/kvm/irq.c
+++ b/arch/x86/kvm/irq.c
@@ -14,6 +14,7 @@
#include "irq.h"
#include "i8254.h"
#include "x86.h"
+#include "xen.h"
/*
* check if there are pending timer events
@@ -56,6 +57,9 @@ int kvm_cpu_has_extint(struct kvm_vcpu *v)
if (!lapic_in_kernel(v))
return v->arch.interrupt.injected;
+ if (kvm_xen_has_interrupt(v))
+ return 1;
+
if (!kvm_apic_accept_pic_intr(v))
return 0;
@@ -110,6 +114,9 @@ static int kvm_cpu_get_extint(struct kvm_vcpu *v)
if (!lapic_in_kernel(v))
return v->arch.interrupt.nr;
+ if (kvm_xen_has_interrupt(v))
+ return v->kvm->arch.xen.upcall_vector;
+
if (irqchip_split(v->kvm)) {
int vector = v->arch.pending_external_vector;
@@ -143,8 +150,7 @@ void __kvm_migrate_timers(struct kvm_vcpu *vcpu)
{
__kvm_migrate_apic_timer(vcpu);
__kvm_migrate_pit_timer(vcpu);
- if (kvm_x86_ops.migrate_timers)
- kvm_x86_ops.migrate_timers(vcpu);
+ static_call_cond(kvm_x86_migrate_timers)(vcpu);
}
bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args)
diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h
index f15bc16de07c..2e11da2f5621 100644
--- a/arch/x86/kvm/kvm_cache_regs.h
+++ b/arch/x86/kvm/kvm_cache_regs.h
@@ -9,31 +9,6 @@
(X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
| X86_CR4_OSXMMEXCPT | X86_CR4_PGE | X86_CR4_TSD | X86_CR4_FSGSBASE)
-static inline bool kvm_register_is_available(struct kvm_vcpu *vcpu,
- enum kvm_reg reg)
-{
- return test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
-}
-
-static inline bool kvm_register_is_dirty(struct kvm_vcpu *vcpu,
- enum kvm_reg reg)
-{
- return test_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
-}
-
-static inline void kvm_register_mark_available(struct kvm_vcpu *vcpu,
- enum kvm_reg reg)
-{
- __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
-}
-
-static inline void kvm_register_mark_dirty(struct kvm_vcpu *vcpu,
- enum kvm_reg reg)
-{
- __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
- __set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
-}
-
#define BUILD_KVM_GPR_ACCESSORS(lname, uname) \
static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\
{ \
@@ -43,7 +18,6 @@ static __always_inline void kvm_##lname##_write(struct kvm_vcpu *vcpu, \
unsigned long val) \
{ \
vcpu->arch.regs[VCPU_REGS_##uname] = val; \
- kvm_register_mark_dirty(vcpu, VCPU_REGS_##uname); \
}
BUILD_KVM_GPR_ACCESSORS(rax, RAX)
BUILD_KVM_GPR_ACCESSORS(rbx, RBX)
@@ -63,13 +37,38 @@ BUILD_KVM_GPR_ACCESSORS(r14, R14)
BUILD_KVM_GPR_ACCESSORS(r15, R15)
#endif
+static inline bool kvm_register_is_available(struct kvm_vcpu *vcpu,
+ enum kvm_reg reg)
+{
+ return test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
+}
+
+static inline bool kvm_register_is_dirty(struct kvm_vcpu *vcpu,
+ enum kvm_reg reg)
+{
+ return test_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
+}
+
+static inline void kvm_register_mark_available(struct kvm_vcpu *vcpu,
+ enum kvm_reg reg)
+{
+ __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
+}
+
+static inline void kvm_register_mark_dirty(struct kvm_vcpu *vcpu,
+ enum kvm_reg reg)
+{
+ __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
+ __set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
+}
+
static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, int reg)
{
if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS))
return 0;
if (!kvm_register_is_available(vcpu, reg))
- kvm_x86_ops.cache_reg(vcpu, reg);
+ static_call(kvm_x86_cache_reg)(vcpu, reg);
return vcpu->arch.regs[reg];
}
@@ -109,7 +108,7 @@ static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
might_sleep(); /* on svm */
if (!kvm_register_is_available(vcpu, VCPU_EXREG_PDPTR))
- kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_PDPTR);
+ static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_PDPTR);
return vcpu->arch.walk_mmu->pdptrs[index];
}
@@ -119,7 +118,7 @@ static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS;
if ((tmask & vcpu->arch.cr0_guest_owned_bits) &&
!kvm_register_is_available(vcpu, VCPU_EXREG_CR0))
- kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_CR0);
+ static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR0);
return vcpu->arch.cr0 & mask;
}
@@ -133,14 +132,14 @@ static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
ulong tmask = mask & KVM_POSSIBLE_CR4_GUEST_BITS;
if ((tmask & vcpu->arch.cr4_guest_owned_bits) &&
!kvm_register_is_available(vcpu, VCPU_EXREG_CR4))
- kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_CR4);
+ static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR4);
return vcpu->arch.cr4 & mask;
}
static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu)
{
if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3))
- kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_CR3);
+ static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR3);
return vcpu->arch.cr3;
}
diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h
index 43c93ffa76ed..0d359115429a 100644
--- a/arch/x86/kvm/kvm_emulate.h
+++ b/arch/x86/kvm/kvm_emulate.h
@@ -205,7 +205,7 @@ struct x86_emulate_ops {
ulong (*get_cr)(struct x86_emulate_ctxt *ctxt, int cr);
int (*set_cr)(struct x86_emulate_ctxt *ctxt, int cr, ulong val);
int (*cpl)(struct x86_emulate_ctxt *ctxt);
- int (*get_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong *dest);
+ void (*get_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong *dest);
int (*set_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong value);
u64 (*get_smbase)(struct x86_emulate_ctxt *ctxt);
void (*set_smbase)(struct x86_emulate_ctxt *ctxt, u64 smbase);
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 43cceadd073e..45d40bfacb7c 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -91,8 +91,8 @@ static inline int __apic_test_and_clear_vector(int vec, void *bitmap)
return __test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
}
-struct static_key_deferred apic_hw_disabled __read_mostly;
-struct static_key_deferred apic_sw_disabled __read_mostly;
+__read_mostly DEFINE_STATIC_KEY_DEFERRED_FALSE(apic_hw_disabled, HZ);
+__read_mostly DEFINE_STATIC_KEY_DEFERRED_FALSE(apic_sw_disabled, HZ);
static inline int apic_enabled(struct kvm_lapic *apic)
{
@@ -290,9 +290,9 @@ static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
if (enabled != apic->sw_enabled) {
apic->sw_enabled = enabled;
if (enabled)
- static_key_slow_dec_deferred(&apic_sw_disabled);
+ static_branch_slow_dec_deferred(&apic_sw_disabled);
else
- static_key_slow_inc(&apic_sw_disabled.key);
+ static_branch_inc(&apic_sw_disabled.key);
atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
}
@@ -484,7 +484,7 @@ static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
if (unlikely(vcpu->arch.apicv_active)) {
/* need to update RVI */
kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR);
- kvm_x86_ops.hwapic_irr_update(vcpu,
+ static_call(kvm_x86_hwapic_irr_update)(vcpu,
apic_find_highest_irr(apic));
} else {
apic->irr_pending = false;
@@ -515,7 +515,7 @@ static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
* just set SVI.
*/
if (unlikely(vcpu->arch.apicv_active))
- kvm_x86_ops.hwapic_isr_update(vcpu, vec);
+ static_call(kvm_x86_hwapic_isr_update)(vcpu, vec);
else {
++apic->isr_count;
BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
@@ -563,8 +563,8 @@ static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
* and must be left alone.
*/
if (unlikely(vcpu->arch.apicv_active))
- kvm_x86_ops.hwapic_isr_update(vcpu,
- apic_find_highest_isr(apic));
+ static_call(kvm_x86_hwapic_isr_update)(vcpu,
+ apic_find_highest_isr(apic));
else {
--apic->isr_count;
BUG_ON(apic->isr_count < 0);
@@ -701,7 +701,7 @@ static int apic_has_interrupt_for_ppr(struct kvm_lapic *apic, u32 ppr)
{
int highest_irr;
if (apic->vcpu->arch.apicv_active)
- highest_irr = kvm_x86_ops.sync_pir_to_irr(apic->vcpu);
+ highest_irr = static_call(kvm_x86_sync_pir_to_irr)(apic->vcpu);
else
highest_irr = apic_find_highest_irr(apic);
if (highest_irr == -1 || (highest_irr & 0xF0) <= ppr)
@@ -1090,7 +1090,7 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
apic->regs + APIC_TMR);
}
- if (kvm_x86_ops.deliver_posted_interrupt(vcpu, vector)) {
+ if (static_call(kvm_x86_deliver_posted_interrupt)(vcpu, vector)) {
kvm_lapic_set_irr(vector, apic);
kvm_make_request(KVM_REQ_EVENT, vcpu);
kvm_vcpu_kick(vcpu);
@@ -1245,7 +1245,8 @@ static int apic_set_eoi(struct kvm_lapic *apic)
apic_clear_isr(vector, apic);
apic_update_ppr(apic);
- if (test_bit(vector, vcpu_to_synic(apic->vcpu)->vec_bitmap))
+ if (to_hv_vcpu(apic->vcpu) &&
+ test_bit(vector, to_hv_synic(apic->vcpu)->vec_bitmap))
kvm_hv_synic_send_eoi(apic->vcpu, vector);
kvm_ioapic_send_eoi(apic, vector);
@@ -1814,7 +1815,7 @@ static void cancel_hv_timer(struct kvm_lapic *apic)
{
WARN_ON(preemptible());
WARN_ON(!apic->lapic_timer.hv_timer_in_use);
- kvm_x86_ops.cancel_hv_timer(apic->vcpu);
+ static_call(kvm_x86_cancel_hv_timer)(apic->vcpu);
apic->lapic_timer.hv_timer_in_use = false;
}
@@ -1831,7 +1832,7 @@ static bool start_hv_timer(struct kvm_lapic *apic)
if (!ktimer->tscdeadline)
return false;
- if (kvm_x86_ops.set_hv_timer(vcpu, ktimer->tscdeadline, &expired))
+ if (static_call(kvm_x86_set_hv_timer)(vcpu, ktimer->tscdeadline, &expired))
return false;
ktimer->hv_timer_in_use = true;
@@ -2175,10 +2176,10 @@ void kvm_free_lapic(struct kvm_vcpu *vcpu)
hrtimer_cancel(&apic->lapic_timer.timer);
if (!(vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE))
- static_key_slow_dec_deferred(&apic_hw_disabled);
+ static_branch_slow_dec_deferred(&apic_hw_disabled);
if (!apic->sw_enabled)
- static_key_slow_dec_deferred(&apic_sw_disabled);
+ static_branch_slow_dec_deferred(&apic_sw_disabled);
if (apic->regs)
free_page((unsigned long)apic->regs);
@@ -2250,9 +2251,9 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE) {
if (value & MSR_IA32_APICBASE_ENABLE) {
kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
- static_key_slow_dec_deferred(&apic_hw_disabled);
+ static_branch_slow_dec_deferred(&apic_hw_disabled);
} else {
- static_key_slow_inc(&apic_hw_disabled.key);
+ static_branch_inc(&apic_hw_disabled.key);
atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
}
}
@@ -2261,7 +2262,7 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
kvm_apic_set_x2apic_id(apic, vcpu->vcpu_id);
if ((old_value ^ value) & (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE))
- kvm_x86_ops.set_virtual_apic_mode(vcpu);
+ static_call(kvm_x86_set_virtual_apic_mode)(vcpu);
apic->base_address = apic->vcpu->arch.apic_base &
MSR_IA32_APICBASE_BASE;
@@ -2338,9 +2339,9 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
vcpu->arch.pv_eoi.msr_val = 0;
apic_update_ppr(apic);
if (vcpu->arch.apicv_active) {
- kvm_x86_ops.apicv_post_state_restore(vcpu);
- kvm_x86_ops.hwapic_irr_update(vcpu, -1);
- kvm_x86_ops.hwapic_isr_update(vcpu, -1);
+ static_call(kvm_x86_apicv_post_state_restore)(vcpu);
+ static_call(kvm_x86_hwapic_irr_update)(vcpu, -1);
+ static_call(kvm_x86_hwapic_isr_update)(vcpu, -1);
}
vcpu->arch.apic_arb_prio = 0;
@@ -2449,7 +2450,7 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu, int timer_advance_ns)
* thinking that APIC state has changed.
*/
vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE;
- static_key_slow_inc(&apic_sw_disabled.key); /* sw disabled at reset */
+ static_branch_inc(&apic_sw_disabled.key); /* sw disabled at reset */
kvm_iodevice_init(&apic->dev, &apic_mmio_ops);
return 0;
@@ -2512,7 +2513,7 @@ int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
*/
apic_clear_irr(vector, apic);
- if (test_bit(vector, vcpu_to_synic(vcpu)->auto_eoi_bitmap)) {
+ if (to_hv_vcpu(vcpu) && test_bit(vector, to_hv_synic(vcpu)->auto_eoi_bitmap)) {
/*
* For auto-EOI interrupts, there might be another pending
* interrupt above PPR, so check whether to raise another
@@ -2601,10 +2602,10 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
kvm_apic_update_apicv(vcpu);
apic->highest_isr_cache = -1;
if (vcpu->arch.apicv_active) {
- kvm_x86_ops.apicv_post_state_restore(vcpu);
- kvm_x86_ops.hwapic_irr_update(vcpu,
+ static_call(kvm_x86_apicv_post_state_restore)(vcpu);
+ static_call(kvm_x86_hwapic_irr_update)(vcpu,
apic_find_highest_irr(apic));
- kvm_x86_ops.hwapic_isr_update(vcpu,
+ static_call(kvm_x86_hwapic_isr_update)(vcpu,
apic_find_highest_isr(apic));
}
kvm_make_request(KVM_REQ_EVENT, vcpu);
@@ -2904,13 +2905,6 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
}
}
-void kvm_lapic_init(void)
-{
- /* do not patch jump label more than once per second */
- jump_label_rate_limit(&apic_hw_disabled, HZ);
- jump_label_rate_limit(&apic_sw_disabled, HZ);
-}
-
void kvm_lapic_exit(void)
{
static_key_deferred_flush(&apic_hw_disabled);
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index 4fb86e3a9dd3..997c45a5963a 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -6,6 +6,8 @@
#include <linux/kvm_host.h>
+#include "hyperv.h"
+
#define KVM_APIC_INIT 0
#define KVM_APIC_SIPI 1
#define KVM_APIC_LVT_NUM 6
@@ -125,13 +127,7 @@ int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
int kvm_hv_vapic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data);
int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
-static inline bool kvm_hv_vapic_assist_page_enabled(struct kvm_vcpu *vcpu)
-{
- return vcpu->arch.hyperv.hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE;
-}
-
int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data, unsigned long len);
-void kvm_lapic_init(void);
void kvm_lapic_exit(void);
#define VEC_POS(v) ((v) & (32 - 1))
@@ -172,29 +168,29 @@ static inline void kvm_lapic_set_reg(struct kvm_lapic *apic, int reg_off, u32 va
__kvm_lapic_set_reg(apic->regs, reg_off, val);
}
-extern struct static_key kvm_no_apic_vcpu;
+DECLARE_STATIC_KEY_FALSE(kvm_has_noapic_vcpu);
static inline bool lapic_in_kernel(struct kvm_vcpu *vcpu)
{
- if (static_key_false(&kvm_no_apic_vcpu))
+ if (static_branch_unlikely(&kvm_has_noapic_vcpu))
return vcpu->arch.apic;
return true;
}
-extern struct static_key_deferred apic_hw_disabled;
+extern struct static_key_false_deferred apic_hw_disabled;
static inline int kvm_apic_hw_enabled(struct kvm_lapic *apic)
{
- if (static_key_false(&apic_hw_disabled.key))
+ if (static_branch_unlikely(&apic_hw_disabled.key))
return apic->vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE;
return MSR_IA32_APICBASE_ENABLE;
}
-extern struct static_key_deferred apic_sw_disabled;
+extern struct static_key_false_deferred apic_sw_disabled;
static inline bool kvm_apic_sw_enabled(struct kvm_lapic *apic)
{
- if (static_key_false(&apic_sw_disabled.key))
+ if (static_branch_unlikely(&apic_sw_disabled.key))
return apic->sw_enabled;
return true;
}
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 581925e476d6..c68bfc3e2402 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -44,8 +44,15 @@
#define PT32_ROOT_LEVEL 2
#define PT32E_ROOT_LEVEL 3
-static inline u64 rsvd_bits(int s, int e)
+static __always_inline u64 rsvd_bits(int s, int e)
{
+ BUILD_BUG_ON(__builtin_constant_p(e) && __builtin_constant_p(s) && e < s);
+
+ if (__builtin_constant_p(e))
+ BUILD_BUG_ON(e > 63);
+ else
+ e &= 63;
+
if (e < s)
return 0;
@@ -95,7 +102,7 @@ static inline void kvm_mmu_load_pgd(struct kvm_vcpu *vcpu)
if (!VALID_PAGE(root_hpa))
return;
- kvm_x86_ops.load_mmu_pgd(vcpu, root_hpa | kvm_get_active_pcid(vcpu),
+ static_call(kvm_x86_load_mmu_pgd)(vcpu, root_hpa | kvm_get_active_pcid(vcpu),
vcpu->arch.mmu->shadow_root_level);
}
@@ -145,7 +152,7 @@ static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
*
* TODO: introduce APIs to split these two cases.
*/
-static inline int is_writable_pte(unsigned long pte)
+static inline bool is_writable_pte(unsigned long pte)
{
return pte & PT_WRITABLE_MASK;
}
@@ -167,8 +174,8 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
unsigned pte_access, unsigned pte_pkey,
unsigned pfec)
{
- int cpl = kvm_x86_ops.get_cpl(vcpu);
- unsigned long rflags = kvm_x86_ops.get_rflags(vcpu);
+ int cpl = static_call(kvm_x86_get_cpl)(vcpu);
+ unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu);
/*
* If CPL < 3, SMAP prevention are disabled if EFLAGS.AC = 1.
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 6d16481aa29d..d75524bc8423 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -190,7 +190,7 @@ static void kvm_flush_remote_tlbs_with_range(struct kvm *kvm,
int ret = -ENOTSUPP;
if (range && kvm_x86_ops.tlb_remote_flush_with_range)
- ret = kvm_x86_ops.tlb_remote_flush_with_range(kvm, range);
+ ret = static_call(kvm_x86_tlb_remote_flush_with_range)(kvm, range);
if (ret)
kvm_flush_remote_tlbs(kvm);
@@ -844,17 +844,17 @@ static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte,
int i, count = 0;
if (!rmap_head->val) {
- rmap_printk("pte_list_add: %p %llx 0->1\n", spte, *spte);
+ rmap_printk("%p %llx 0->1\n", spte, *spte);
rmap_head->val = (unsigned long)spte;
} else if (!(rmap_head->val & 1)) {
- rmap_printk("pte_list_add: %p %llx 1->many\n", spte, *spte);
+ rmap_printk("%p %llx 1->many\n", spte, *spte);
desc = mmu_alloc_pte_list_desc(vcpu);
desc->sptes[0] = (u64 *)rmap_head->val;
desc->sptes[1] = spte;
rmap_head->val = (unsigned long)desc | 1;
++count;
} else {
- rmap_printk("pte_list_add: %p %llx many->many\n", spte, *spte);
+ rmap_printk("%p %llx many->many\n", spte, *spte);
desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
while (desc->sptes[PTE_LIST_EXT-1]) {
count += PTE_LIST_EXT;
@@ -906,14 +906,14 @@ static void __pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head)
pr_err("%s: %p 0->BUG\n", __func__, spte);
BUG();
} else if (!(rmap_head->val & 1)) {
- rmap_printk("%s: %p 1->0\n", __func__, spte);
+ rmap_printk("%p 1->0\n", spte);
if ((u64 *)rmap_head->val != spte) {
pr_err("%s: %p 1->BUG\n", __func__, spte);
BUG();
}
rmap_head->val = 0;
} else {
- rmap_printk("%s: %p many->many\n", __func__, spte);
+ rmap_printk("%p many->many\n", spte);
desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
prev_desc = NULL;
while (desc) {
@@ -1115,7 +1115,7 @@ static bool spte_write_protect(u64 *sptep, bool pt_protect)
!(pt_protect && spte_can_locklessly_be_made_writable(spte)))
return false;
- rmap_printk("rmap_write_protect: spte %p %llx\n", sptep, *sptep);
+ rmap_printk("spte %p %llx\n", sptep, *sptep);
if (pt_protect)
spte &= ~SPTE_MMU_WRITEABLE;
@@ -1142,7 +1142,7 @@ static bool spte_clear_dirty(u64 *sptep)
{
u64 spte = *sptep;
- rmap_printk("rmap_clear_dirty: spte %p %llx\n", sptep, *sptep);
+ rmap_printk("spte %p %llx\n", sptep, *sptep);
MMU_WARN_ON(!spte_ad_enabled(spte));
spte &= ~shadow_dirty_mask;
@@ -1165,7 +1165,8 @@ static bool spte_wrprot_for_clear_dirty(u64 *sptep)
* - W bit on ad-disabled SPTEs.
* Returns true iff any D or W bits were cleared.
*/
-static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
+static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
+ struct kvm_memory_slot *slot)
{
u64 *sptep;
struct rmap_iterator iter;
@@ -1180,35 +1181,6 @@ static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
return flush;
}
-static bool spte_set_dirty(u64 *sptep)
-{
- u64 spte = *sptep;
-
- rmap_printk("rmap_set_dirty: spte %p %llx\n", sptep, *sptep);
-
- /*
- * Similar to the !kvm_x86_ops.slot_disable_log_dirty case,
- * do not bother adding back write access to pages marked
- * SPTE_AD_WRPROT_ONLY_MASK.
- */
- spte |= shadow_dirty_mask;
-
- return mmu_spte_update(sptep, spte);
-}
-
-static bool __rmap_set_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
-{
- u64 *sptep;
- struct rmap_iterator iter;
- bool flush = false;
-
- for_each_rmap_spte(rmap_head, &iter, sptep)
- if (spte_ad_enabled(*sptep))
- flush |= spte_set_dirty(sptep);
-
- return flush;
-}
-
/**
* kvm_mmu_write_protect_pt_masked - write protect selected PT level pages
* @kvm: kvm instance
@@ -1225,7 +1197,7 @@ static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
{
struct kvm_rmap_head *rmap_head;
- if (kvm->arch.tdp_mmu_enabled)
+ if (is_tdp_mmu_enabled(kvm))
kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
slot->base_gfn + gfn_offset, mask, true);
while (mask) {
@@ -1248,25 +1220,24 @@ static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
*
* Used for PML to re-log the dirty GPAs after userspace querying dirty_bitmap.
*/
-void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
- struct kvm_memory_slot *slot,
- gfn_t gfn_offset, unsigned long mask)
+static void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
+ gfn_t gfn_offset, unsigned long mask)
{
struct kvm_rmap_head *rmap_head;
- if (kvm->arch.tdp_mmu_enabled)
+ if (is_tdp_mmu_enabled(kvm))
kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
slot->base_gfn + gfn_offset, mask, false);
while (mask) {
rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
PG_LEVEL_4K, slot);
- __rmap_clear_dirty(kvm, rmap_head);
+ __rmap_clear_dirty(kvm, rmap_head, slot);
/* clear the first set bit */
mask &= mask - 1;
}
}
-EXPORT_SYMBOL_GPL(kvm_mmu_clear_dirty_pt_masked);
/**
* kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
@@ -1282,19 +1253,15 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
gfn_t gfn_offset, unsigned long mask)
{
- if (kvm_x86_ops.enable_log_dirty_pt_masked)
- kvm_x86_ops.enable_log_dirty_pt_masked(kvm, slot, gfn_offset,
- mask);
+ if (kvm_x86_ops.cpu_dirty_log_size)
+ kvm_mmu_clear_dirty_pt_masked(kvm, slot, gfn_offset, mask);
else
kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
}
int kvm_cpu_dirty_log_size(void)
{
- if (kvm_x86_ops.cpu_dirty_log_size)
- return kvm_x86_ops.cpu_dirty_log_size();
-
- return 0;
+ return kvm_x86_ops.cpu_dirty_log_size;
}
bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
@@ -1309,7 +1276,7 @@ bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
write_protected |= __rmap_write_protect(kvm, rmap_head, true);
}
- if (kvm->arch.tdp_mmu_enabled)
+ if (is_tdp_mmu_enabled(kvm))
write_protected |=
kvm_tdp_mmu_write_protect_gfn(kvm, slot, gfn);
@@ -1324,14 +1291,15 @@ static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn);
}
-static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
+static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
+ struct kvm_memory_slot *slot)
{
u64 *sptep;
struct rmap_iterator iter;
bool flush = false;
while ((sptep = rmap_get_first(rmap_head, &iter))) {
- rmap_printk("%s: spte %p %llx.\n", __func__, sptep, *sptep);
+ rmap_printk("spte %p %llx.\n", sptep, *sptep);
pte_list_remove(rmap_head, sptep);
flush = true;
@@ -1344,7 +1312,7 @@ static int kvm_unmap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
struct kvm_memory_slot *slot, gfn_t gfn, int level,
unsigned long data)
{
- return kvm_zap_rmapp(kvm, rmap_head);
+ return kvm_zap_rmapp(kvm, rmap_head, slot);
}
static int kvm_set_pte_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
@@ -1363,7 +1331,7 @@ static int kvm_set_pte_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
restart:
for_each_rmap_spte(rmap_head, &iter, sptep) {
- rmap_printk("kvm_set_pte_rmapp: spte %p %llx gfn %llx (%d)\n",
+ rmap_printk("spte %p %llx gfn %llx (%d)\n",
sptep, *sptep, gfn, level);
need_flush = 1;
@@ -1456,16 +1424,17 @@ static void slot_rmap_walk_next(struct slot_rmap_walk_iterator *iterator)
slot_rmap_walk_okay(_iter_); \
slot_rmap_walk_next(_iter_))
-static int kvm_handle_hva_range(struct kvm *kvm,
- unsigned long start,
- unsigned long end,
- unsigned long data,
- int (*handler)(struct kvm *kvm,
- struct kvm_rmap_head *rmap_head,
- struct kvm_memory_slot *slot,
- gfn_t gfn,
- int level,
- unsigned long data))
+static __always_inline int
+kvm_handle_hva_range(struct kvm *kvm,
+ unsigned long start,
+ unsigned long end,
+ unsigned long data,
+ int (*handler)(struct kvm *kvm,
+ struct kvm_rmap_head *rmap_head,
+ struct kvm_memory_slot *slot,
+ gfn_t gfn,
+ int level,
+ unsigned long data))
{
struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
@@ -1521,7 +1490,7 @@ int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
r = kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp);
- if (kvm->arch.tdp_mmu_enabled)
+ if (is_tdp_mmu_enabled(kvm))
r |= kvm_tdp_mmu_zap_hva_range(kvm, start, end);
return r;
@@ -1533,7 +1502,7 @@ int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
r = kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp);
- if (kvm->arch.tdp_mmu_enabled)
+ if (is_tdp_mmu_enabled(kvm))
r |= kvm_tdp_mmu_set_spte_hva(kvm, hva, &pte);
return r;
@@ -1588,7 +1557,7 @@ int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
int young = false;
young = kvm_handle_hva_range(kvm, start, end, 0, kvm_age_rmapp);
- if (kvm->arch.tdp_mmu_enabled)
+ if (is_tdp_mmu_enabled(kvm))
young |= kvm_tdp_mmu_age_hva_range(kvm, start, end);
return young;
@@ -1599,7 +1568,7 @@ int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
int young = false;
young = kvm_handle_hva(kvm, hva, 0, kvm_test_age_rmapp);
- if (kvm->arch.tdp_mmu_enabled)
+ if (is_tdp_mmu_enabled(kvm))
young |= kvm_tdp_mmu_test_age_hva(kvm, hva);
return young;
@@ -1723,13 +1692,6 @@ static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
return 0;
}
-static void nonpaging_update_pte(struct kvm_vcpu *vcpu,
- struct kvm_mmu_page *sp, u64 *spte,
- const void *pte)
-{
- WARN_ON(1);
-}
-
#define KVM_PAGE_ARRAY_NR 16
struct kvm_mmu_pages {
@@ -2016,9 +1978,9 @@ static void mmu_sync_children(struct kvm_vcpu *vcpu,
flush |= kvm_sync_page(vcpu, sp, &invalid_list);
mmu_pages_clear_parents(&parents);
}
- if (need_resched() || spin_needbreak(&vcpu->kvm->mmu_lock)) {
+ if (need_resched() || rwlock_needbreak(&vcpu->kvm->mmu_lock)) {
kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
- cond_resched_lock(&vcpu->kvm->mmu_lock);
+ cond_resched_rwlock_write(&vcpu->kvm->mmu_lock);
flush = false;
}
}
@@ -2417,7 +2379,7 @@ static unsigned long kvm_mmu_zap_oldest_mmu_pages(struct kvm *kvm,
return 0;
restart:
- list_for_each_entry_safe(sp, tmp, &kvm->arch.active_mmu_pages, link) {
+ list_for_each_entry_safe_reverse(sp, tmp, &kvm->arch.active_mmu_pages, link) {
/*
* Don't zap active root pages, the page itself can't be freed
* and zapping it will just force vCPUs to realloc and reload.
@@ -2470,7 +2432,7 @@ static int make_mmu_pages_available(struct kvm_vcpu *vcpu)
*/
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long goal_nr_mmu_pages)
{
- spin_lock(&kvm->mmu_lock);
+ write_lock(&kvm->mmu_lock);
if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) {
kvm_mmu_zap_oldest_mmu_pages(kvm, kvm->arch.n_used_mmu_pages -
@@ -2481,7 +2443,7 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long goal_nr_mmu_pages)
kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages;
- spin_unlock(&kvm->mmu_lock);
+ write_unlock(&kvm->mmu_lock);
}
int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
@@ -2492,7 +2454,7 @@ int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
pgprintk("%s: looking for gfn %llx\n", __func__, gfn);
r = 0;
- spin_lock(&kvm->mmu_lock);
+ write_lock(&kvm->mmu_lock);
for_each_gfn_indirect_valid_sp(kvm, sp, gfn) {
pgprintk("%s: gfn %llx role %x\n", __func__, gfn,
sp->role.word);
@@ -2500,11 +2462,25 @@ int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
}
kvm_mmu_commit_zap_page(kvm, &invalid_list);
- spin_unlock(&kvm->mmu_lock);
+ write_unlock(&kvm->mmu_lock);
+
+ return r;
+}
+
+static int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
+{
+ gpa_t gpa;
+ int r;
+
+ if (vcpu->arch.mmu->direct_map)
+ return 0;
+
+ gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
+
+ r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
return r;
}
-EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page);
static void kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
{
@@ -2758,11 +2734,18 @@ static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
if (sp->role.level > PG_LEVEL_4K)
return;
+ /*
+ * If addresses are being invalidated, skip prefetching to avoid
+ * accidentally prefetching those addresses.
+ */
+ if (unlikely(vcpu->kvm->mmu_notifier_count))
+ return;
+
__direct_pte_prefetch(vcpu, sp, sptep);
}
-static int host_pfn_mapping_level(struct kvm_vcpu *vcpu, gfn_t gfn,
- kvm_pfn_t pfn, struct kvm_memory_slot *slot)
+static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn,
+ struct kvm_memory_slot *slot)
{
unsigned long hva;
pte_t *pte;
@@ -2781,19 +2764,36 @@ static int host_pfn_mapping_level(struct kvm_vcpu *vcpu, gfn_t gfn,
*/
hva = __gfn_to_hva_memslot(slot, gfn);
- pte = lookup_address_in_mm(vcpu->kvm->mm, hva, &level);
+ pte = lookup_address_in_mm(kvm->mm, hva, &level);
if (unlikely(!pte))
return PG_LEVEL_4K;
return level;
}
+int kvm_mmu_max_mapping_level(struct kvm *kvm, struct kvm_memory_slot *slot,
+ gfn_t gfn, kvm_pfn_t pfn, int max_level)
+{
+ struct kvm_lpage_info *linfo;
+
+ max_level = min(max_level, max_huge_page_level);
+ for ( ; max_level > PG_LEVEL_4K; max_level--) {
+ linfo = lpage_info_slot(gfn, slot, max_level);
+ if (!linfo->disallow_lpage)
+ break;
+ }
+
+ if (max_level == PG_LEVEL_4K)
+ return PG_LEVEL_4K;
+
+ return host_pfn_mapping_level(kvm, gfn, pfn, slot);
+}
+
int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn,
int max_level, kvm_pfn_t *pfnp,
bool huge_page_disallowed, int *req_level)
{
struct kvm_memory_slot *slot;
- struct kvm_lpage_info *linfo;
kvm_pfn_t pfn = *pfnp;
kvm_pfn_t mask;
int level;
@@ -2810,17 +2810,7 @@ int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn,
if (!slot)
return PG_LEVEL_4K;
- max_level = min(max_level, max_huge_page_level);
- for ( ; max_level > PG_LEVEL_4K; max_level--) {
- linfo = lpage_info_slot(gfn, slot, max_level);
- if (!linfo->disallow_lpage)
- break;
- }
-
- if (max_level == PG_LEVEL_4K)
- return PG_LEVEL_4K;
-
- level = host_pfn_mapping_level(vcpu, gfn, pfn, slot);
+ level = kvm_mmu_max_mapping_level(vcpu->kvm, slot, gfn, pfn, max_level);
if (level == PG_LEVEL_4K)
return level;
@@ -3161,7 +3151,7 @@ static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,
sp = to_shadow_page(*root_hpa & PT64_BASE_ADDR_MASK);
if (kvm_mmu_put_root(kvm, sp)) {
- if (sp->tdp_mmu_page)
+ if (is_tdp_mmu_page(sp))
kvm_tdp_mmu_free_root(kvm, sp);
else if (sp->role.invalid)
kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
@@ -3192,7 +3182,7 @@ void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
return;
}
- spin_lock(&kvm->mmu_lock);
+ write_lock(&kvm->mmu_lock);
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
if (roots_to_free & KVM_MMU_ROOT_PREVIOUS(i))
@@ -3215,7 +3205,7 @@ void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
}
kvm_mmu_commit_zap_page(kvm, &invalid_list);
- spin_unlock(&kvm->mmu_lock);
+ write_unlock(&kvm->mmu_lock);
}
EXPORT_SYMBOL_GPL(kvm_mmu_free_roots);
@@ -3236,16 +3226,16 @@ static hpa_t mmu_alloc_root(struct kvm_vcpu *vcpu, gfn_t gfn, gva_t gva,
{
struct kvm_mmu_page *sp;
- spin_lock(&vcpu->kvm->mmu_lock);
+ write_lock(&vcpu->kvm->mmu_lock);
if (make_mmu_pages_available(vcpu)) {
- spin_unlock(&vcpu->kvm->mmu_lock);
+ write_unlock(&vcpu->kvm->mmu_lock);
return INVALID_PAGE;
}
sp = kvm_mmu_get_page(vcpu, gfn, gva, level, direct, ACC_ALL);
++sp->root_count;
- spin_unlock(&vcpu->kvm->mmu_lock);
+ write_unlock(&vcpu->kvm->mmu_lock);
return __pa(sp->spt);
}
@@ -3255,7 +3245,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
hpa_t root;
unsigned i;
- if (vcpu->kvm->arch.tdp_mmu_enabled) {
+ if (is_tdp_mmu_enabled(vcpu->kvm)) {
root = kvm_tdp_mmu_get_vcpu_root_hpa(vcpu);
if (!VALID_PAGE(root))
@@ -3416,17 +3406,17 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
!smp_load_acquire(&sp->unsync_children))
return;
- spin_lock(&vcpu->kvm->mmu_lock);
+ write_lock(&vcpu->kvm->mmu_lock);
kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
mmu_sync_children(vcpu, sp);
kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
- spin_unlock(&vcpu->kvm->mmu_lock);
+ write_unlock(&vcpu->kvm->mmu_lock);
return;
}
- spin_lock(&vcpu->kvm->mmu_lock);
+ write_lock(&vcpu->kvm->mmu_lock);
kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
for (i = 0; i < 4; ++i) {
@@ -3440,9 +3430,8 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
}
kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
- spin_unlock(&vcpu->kvm->mmu_lock);
+ write_unlock(&vcpu->kvm->mmu_lock);
}
-EXPORT_SYMBOL_GPL(kvm_mmu_sync_roots);
static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gpa_t vaddr,
u32 access, struct x86_exception *exception)
@@ -3658,8 +3647,8 @@ static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
}
static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
- gpa_t cr2_or_gpa, kvm_pfn_t *pfn, bool write,
- bool *writable)
+ gpa_t cr2_or_gpa, kvm_pfn_t *pfn, hva_t *hva,
+ bool write, bool *writable)
{
struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
bool async;
@@ -3672,7 +3661,8 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
}
async = false;
- *pfn = __gfn_to_pfn_memslot(slot, gfn, false, &async, write, writable);
+ *pfn = __gfn_to_pfn_memslot(slot, gfn, false, &async,
+ write, writable, hva);
if (!async)
return false; /* *pfn has correct page already */
@@ -3686,7 +3676,8 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
return true;
}
- *pfn = __gfn_to_pfn_memslot(slot, gfn, false, NULL, write, writable);
+ *pfn = __gfn_to_pfn_memslot(slot, gfn, false, NULL,
+ write, writable, hva);
return false;
}
@@ -3699,6 +3690,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
gfn_t gfn = gpa >> PAGE_SHIFT;
unsigned long mmu_seq;
kvm_pfn_t pfn;
+ hva_t hva;
int r;
if (page_fault_handle_page_track(vcpu, error_code, gfn))
@@ -3717,15 +3709,21 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
mmu_seq = vcpu->kvm->mmu_notifier_seq;
smp_rmb();
- if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable))
+ if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, &hva,
+ write, &map_writable))
return RET_PF_RETRY;
if (handle_abnormal_pfn(vcpu, is_tdp ? 0 : gpa, gfn, pfn, ACC_ALL, &r))
return r;
r = RET_PF_RETRY;
- spin_lock(&vcpu->kvm->mmu_lock);
- if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
+
+ if (is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa))
+ read_lock(&vcpu->kvm->mmu_lock);
+ else
+ write_lock(&vcpu->kvm->mmu_lock);
+
+ if (!is_noslot_pfn(pfn) && mmu_notifier_retry_hva(vcpu->kvm, mmu_seq, hva))
goto out_unlock;
r = make_mmu_pages_available(vcpu);
if (r)
@@ -3739,7 +3737,10 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
prefault, is_tdp);
out_unlock:
- spin_unlock(&vcpu->kvm->mmu_lock);
+ if (is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa))
+ read_unlock(&vcpu->kvm->mmu_lock);
+ else
+ write_unlock(&vcpu->kvm->mmu_lock);
kvm_release_pfn_clean(pfn);
return r;
}
@@ -3813,7 +3814,6 @@ static void nonpaging_init_context(struct kvm_vcpu *vcpu,
context->gva_to_gpa = nonpaging_gva_to_gpa;
context->sync_page = nonpaging_sync_page;
context->invlpg = NULL;
- context->update_pte = nonpaging_update_pte;
context->root_level = 0;
context->shadow_root_level = PT32E_ROOT_LEVEL;
context->direct_map = true;
@@ -3984,20 +3984,27 @@ static inline bool is_last_gpte(struct kvm_mmu *mmu,
static void
__reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
struct rsvd_bits_validate *rsvd_check,
- int maxphyaddr, int level, bool nx, bool gbpages,
+ u64 pa_bits_rsvd, int level, bool nx, bool gbpages,
bool pse, bool amd)
{
- u64 exb_bit_rsvd = 0;
u64 gbpages_bit_rsvd = 0;
u64 nonleaf_bit8_rsvd = 0;
+ u64 high_bits_rsvd;
rsvd_check->bad_mt_xwr = 0;
- if (!nx)
- exb_bit_rsvd = rsvd_bits(63, 63);
if (!gbpages)
gbpages_bit_rsvd = rsvd_bits(7, 7);
+ if (level == PT32E_ROOT_LEVEL)
+ high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 62);
+ else
+ high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 51);
+
+ /* Note, NX doesn't exist in PDPTEs, this is handled below. */
+ if (!nx)
+ high_bits_rsvd |= rsvd_bits(63, 63);
+
/*
* Non-leaf PML4Es and PDPEs reserve bit 8 (which would be the G bit for
* leaf entries) on AMD CPUs only.
@@ -4026,45 +4033,39 @@ __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(13, 21);
break;
case PT32E_ROOT_LEVEL:
- rsvd_check->rsvd_bits_mask[0][2] =
- rsvd_bits(maxphyaddr, 63) |
- rsvd_bits(5, 8) | rsvd_bits(1, 2); /* PDPTE */
- rsvd_check->rsvd_bits_mask[0][1] = exb_bit_rsvd |
- rsvd_bits(maxphyaddr, 62); /* PDE */
- rsvd_check->rsvd_bits_mask[0][0] = exb_bit_rsvd |
- rsvd_bits(maxphyaddr, 62); /* PTE */
- rsvd_check->rsvd_bits_mask[1][1] = exb_bit_rsvd |
- rsvd_bits(maxphyaddr, 62) |
- rsvd_bits(13, 20); /* large page */
+ rsvd_check->rsvd_bits_mask[0][2] = rsvd_bits(63, 63) |
+ high_bits_rsvd |
+ rsvd_bits(5, 8) |
+ rsvd_bits(1, 2); /* PDPTE */
+ rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd; /* PDE */
+ rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd; /* PTE */
+ rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd |
+ rsvd_bits(13, 20); /* large page */
rsvd_check->rsvd_bits_mask[1][0] =
rsvd_check->rsvd_bits_mask[0][0];
break;
case PT64_ROOT_5LEVEL:
- rsvd_check->rsvd_bits_mask[0][4] = exb_bit_rsvd |
- nonleaf_bit8_rsvd | rsvd_bits(7, 7) |
- rsvd_bits(maxphyaddr, 51);
+ rsvd_check->rsvd_bits_mask[0][4] = high_bits_rsvd |
+ nonleaf_bit8_rsvd |
+ rsvd_bits(7, 7);
rsvd_check->rsvd_bits_mask[1][4] =
rsvd_check->rsvd_bits_mask[0][4];
fallthrough;
case PT64_ROOT_4LEVEL:
- rsvd_check->rsvd_bits_mask[0][3] = exb_bit_rsvd |
- nonleaf_bit8_rsvd | rsvd_bits(7, 7) |
- rsvd_bits(maxphyaddr, 51);
- rsvd_check->rsvd_bits_mask[0][2] = exb_bit_rsvd |
- gbpages_bit_rsvd |
- rsvd_bits(maxphyaddr, 51);
- rsvd_check->rsvd_bits_mask[0][1] = exb_bit_rsvd |
- rsvd_bits(maxphyaddr, 51);
- rsvd_check->rsvd_bits_mask[0][0] = exb_bit_rsvd |
- rsvd_bits(maxphyaddr, 51);
+ rsvd_check->rsvd_bits_mask[0][3] = high_bits_rsvd |
+ nonleaf_bit8_rsvd |
+ rsvd_bits(7, 7);
+ rsvd_check->rsvd_bits_mask[0][2] = high_bits_rsvd |
+ gbpages_bit_rsvd;
+ rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd;
+ rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd;
rsvd_check->rsvd_bits_mask[1][3] =
rsvd_check->rsvd_bits_mask[0][3];
- rsvd_check->rsvd_bits_mask[1][2] = exb_bit_rsvd |
- gbpages_bit_rsvd | rsvd_bits(maxphyaddr, 51) |
- rsvd_bits(13, 29);
- rsvd_check->rsvd_bits_mask[1][1] = exb_bit_rsvd |
- rsvd_bits(maxphyaddr, 51) |
- rsvd_bits(13, 20); /* large page */
+ rsvd_check->rsvd_bits_mask[1][2] = high_bits_rsvd |
+ gbpages_bit_rsvd |
+ rsvd_bits(13, 29);
+ rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd |
+ rsvd_bits(13, 20); /* large page */
rsvd_check->rsvd_bits_mask[1][0] =
rsvd_check->rsvd_bits_mask[0][0];
break;
@@ -4075,8 +4076,8 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
struct kvm_mmu *context)
{
__reset_rsvds_bits_mask(vcpu, &context->guest_rsvd_check,
- cpuid_maxphyaddr(vcpu), context->root_level,
- context->nx,
+ vcpu->arch.reserved_gpa_bits,
+ context->root_level, context->nx,
guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES),
is_pse(vcpu),
guest_cpuid_is_amd_or_hygon(vcpu));
@@ -4084,27 +4085,22 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
static void
__reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check,
- int maxphyaddr, bool execonly)
+ u64 pa_bits_rsvd, bool execonly)
{
+ u64 high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 51);
u64 bad_mt_xwr;
- rsvd_check->rsvd_bits_mask[0][4] =
- rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 7);
- rsvd_check->rsvd_bits_mask[0][3] =
- rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 7);
- rsvd_check->rsvd_bits_mask[0][2] =
- rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 6);
- rsvd_check->rsvd_bits_mask[0][1] =
- rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 6);
- rsvd_check->rsvd_bits_mask[0][0] = rsvd_bits(maxphyaddr, 51);
+ rsvd_check->rsvd_bits_mask[0][4] = high_bits_rsvd | rsvd_bits(3, 7);
+ rsvd_check->rsvd_bits_mask[0][3] = high_bits_rsvd | rsvd_bits(3, 7);
+ rsvd_check->rsvd_bits_mask[0][2] = high_bits_rsvd | rsvd_bits(3, 6);
+ rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd | rsvd_bits(3, 6);
+ rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd;
/* large page */
rsvd_check->rsvd_bits_mask[1][4] = rsvd_check->rsvd_bits_mask[0][4];
rsvd_check->rsvd_bits_mask[1][3] = rsvd_check->rsvd_bits_mask[0][3];
- rsvd_check->rsvd_bits_mask[1][2] =
- rsvd_bits(maxphyaddr, 51) | rsvd_bits(12, 29);
- rsvd_check->rsvd_bits_mask[1][1] =
- rsvd_bits(maxphyaddr, 51) | rsvd_bits(12, 20);
+ rsvd_check->rsvd_bits_mask[1][2] = high_bits_rsvd | rsvd_bits(12, 29);
+ rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd | rsvd_bits(12, 20);
rsvd_check->rsvd_bits_mask[1][0] = rsvd_check->rsvd_bits_mask[0][0];
bad_mt_xwr = 0xFFull << (2 * 8); /* bits 3..5 must not be 2 */
@@ -4123,7 +4119,12 @@ static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
struct kvm_mmu *context, bool execonly)
{
__reset_rsvds_bits_mask_ept(&context->guest_rsvd_check,
- cpuid_maxphyaddr(vcpu), execonly);
+ vcpu->arch.reserved_gpa_bits, execonly);
+}
+
+static inline u64 reserved_hpa_bits(void)
+{
+ return rsvd_bits(shadow_phys_bits, 63);
}
/*
@@ -4145,7 +4146,7 @@ reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
*/
shadow_zero_check = &context->shadow_zero_check;
__reset_rsvds_bits_mask(vcpu, shadow_zero_check,
- shadow_phys_bits,
+ reserved_hpa_bits(),
context->shadow_root_level, uses_nx,
guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES),
is_pse(vcpu), true);
@@ -4182,14 +4183,13 @@ reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
if (boot_cpu_is_amd())
__reset_rsvds_bits_mask(vcpu, shadow_zero_check,
- shadow_phys_bits,
+ reserved_hpa_bits(),
context->shadow_root_level, false,
boot_cpu_has(X86_FEATURE_GBPAGES),
true, true);
else
__reset_rsvds_bits_mask_ept(shadow_zero_check,
- shadow_phys_bits,
- false);
+ reserved_hpa_bits(), false);
if (!shadow_me_mask)
return;
@@ -4209,7 +4209,7 @@ reset_ept_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
struct kvm_mmu *context, bool execonly)
{
__reset_rsvds_bits_mask_ept(&context->shadow_zero_check,
- shadow_phys_bits, execonly);
+ reserved_hpa_bits(), execonly);
}
#define BYTE_MASK(access) \
@@ -4395,7 +4395,6 @@ static void paging64_init_context_common(struct kvm_vcpu *vcpu,
context->gva_to_gpa = paging64_gva_to_gpa;
context->sync_page = paging64_sync_page;
context->invlpg = paging64_invlpg;
- context->update_pte = paging64_update_pte;
context->shadow_root_level = level;
context->direct_map = false;
}
@@ -4424,7 +4423,6 @@ static void paging32_init_context(struct kvm_vcpu *vcpu,
context->gva_to_gpa = paging32_gva_to_gpa;
context->sync_page = paging32_sync_page;
context->invlpg = paging32_invlpg;
- context->update_pte = paging32_update_pte;
context->shadow_root_level = PT32E_ROOT_LEVEL;
context->direct_map = false;
}
@@ -4506,7 +4504,6 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
context->page_fault = kvm_tdp_page_fault;
context->sync_page = nonpaging_sync_page;
context->invlpg = NULL;
- context->update_pte = nonpaging_update_pte;
context->shadow_root_level = kvm_mmu_get_tdp_level(vcpu);
context->direct_map = true;
context->get_guest_pgd = get_cr3;
@@ -4678,7 +4675,6 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
context->gva_to_gpa = ept_gva_to_gpa;
context->sync_page = ept_sync_page;
context->invlpg = ept_invlpg;
- context->update_pte = ept_update_pte;
context->root_level = level;
context->direct_map = false;
context->mmu_role.as_u64 = new_role.as_u64;
@@ -4811,7 +4807,7 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
if (r)
goto out;
kvm_mmu_load_pgd(vcpu);
- kvm_x86_ops.tlb_flush_current(vcpu);
+ static_call(kvm_x86_tlb_flush_current)(vcpu);
out:
return r;
}
@@ -4826,19 +4822,6 @@ void kvm_mmu_unload(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_mmu_unload);
-static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
- struct kvm_mmu_page *sp, u64 *spte,
- const void *new)
-{
- if (sp->role.level != PG_LEVEL_4K) {
- ++vcpu->kvm->stat.mmu_pde_zapped;
- return;
- }
-
- ++vcpu->kvm->stat.mmu_pte_updated;
- vcpu->arch.mmu->update_pte(vcpu, sp, spte, new);
-}
-
static bool need_remote_flush(u64 old, u64 new)
{
if (!is_shadow_present_pte(old))
@@ -4954,22 +4937,6 @@ static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte)
return spte;
}
-/*
- * Ignore various flags when determining if a SPTE can be immediately
- * overwritten for the current MMU.
- * - level: explicitly checked in mmu_pte_write_new_pte(), and will never
- * match the current MMU role, as MMU's level tracks the root level.
- * - access: updated based on the new guest PTE
- * - quadrant: handled by get_written_sptes()
- * - invalid: always false (loop only walks valid shadow pages)
- */
-static const union kvm_mmu_page_role role_ign = {
- .level = 0xf,
- .access = 0x7,
- .quadrant = 0x3,
- .invalid = 0x1,
-};
-
static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
const u8 *new, int bytes,
struct kvm_page_track_notifier_node *node)
@@ -4999,7 +4966,7 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
*/
mmu_topup_memory_caches(vcpu, true);
- spin_lock(&vcpu->kvm->mmu_lock);
+ write_lock(&vcpu->kvm->mmu_lock);
gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, &bytes);
@@ -5020,14 +4987,10 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
local_flush = true;
while (npte--) {
- u32 base_role = vcpu->arch.mmu->mmu_role.base.word;
-
entry = *spte;
mmu_page_zap_pte(vcpu->kvm, sp, spte, NULL);
- if (gentry &&
- !((sp->role.word ^ base_role) & ~role_ign.word) &&
- rmap_can_add(vcpu))
- mmu_pte_write_new_pte(vcpu, sp, spte, &gentry);
+ if (gentry && sp->role.level != PG_LEVEL_4K)
+ ++vcpu->kvm->stat.mmu_pde_zapped;
if (need_remote_flush(entry, *spte))
remote_flush = true;
++spte;
@@ -5035,24 +4998,8 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
}
kvm_mmu_flush_or_zap(vcpu, &invalid_list, remote_flush, local_flush);
kvm_mmu_audit(vcpu, AUDIT_POST_PTE_WRITE);
- spin_unlock(&vcpu->kvm->mmu_lock);
-}
-
-int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
-{
- gpa_t gpa;
- int r;
-
- if (vcpu->arch.mmu->direct_map)
- return 0;
-
- gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
-
- r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
-
- return r;
+ write_unlock(&vcpu->kvm->mmu_lock);
}
-EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
void *insn, int insn_len)
@@ -5125,7 +5072,7 @@ void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
if (is_noncanonical_address(gva, vcpu))
return;
- kvm_x86_ops.tlb_flush_gva(vcpu, gva);
+ static_call(kvm_x86_tlb_flush_gva)(vcpu, gva);
}
if (!mmu->invlpg)
@@ -5152,7 +5099,6 @@ void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
mmu->invlpg(vcpu, gva, root_hpa);
}
}
-EXPORT_SYMBOL_GPL(kvm_mmu_invalidate_gva);
void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
{
@@ -5182,7 +5128,7 @@ void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid)
}
if (tlb_flush)
- kvm_x86_ops.tlb_flush_gva(vcpu, gva);
+ static_call(kvm_x86_tlb_flush_gva)(vcpu, gva);
++vcpu->stat.invlpg;
@@ -5192,7 +5138,6 @@ void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid)
* for them.
*/
}
-EXPORT_SYMBOL_GPL(kvm_mmu_invpcid_gva);
void kvm_configure_mmu(bool enable_tdp, int tdp_max_root_level,
int tdp_huge_page_level)
@@ -5217,7 +5162,8 @@ void kvm_configure_mmu(bool enable_tdp, int tdp_max_root_level,
EXPORT_SYMBOL_GPL(kvm_configure_mmu);
/* The return value indicates if tlb flush on all vcpus is needed. */
-typedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_head);
+typedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_head,
+ struct kvm_memory_slot *slot);
/* The caller should hold mmu-lock before calling this function. */
static __always_inline bool
@@ -5231,16 +5177,16 @@ slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
for_each_slot_rmap_range(memslot, start_level, end_level, start_gfn,
end_gfn, &iterator) {
if (iterator.rmap)
- flush |= fn(kvm, iterator.rmap);
+ flush |= fn(kvm, iterator.rmap, memslot);
- if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
+ if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
if (flush && lock_flush_tlb) {
kvm_flush_remote_tlbs_with_address(kvm,
start_gfn,
iterator.gfn - start_gfn + 1);
flush = false;
}
- cond_resched_lock(&kvm->mmu_lock);
+ cond_resched_rwlock_write(&kvm->mmu_lock);
}
}
@@ -5265,22 +5211,6 @@ slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
}
static __always_inline bool
-slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
- slot_level_handler fn, bool lock_flush_tlb)
-{
- return slot_handle_level(kvm, memslot, fn, PG_LEVEL_4K,
- KVM_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
-}
-
-static __always_inline bool
-slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
- slot_level_handler fn, bool lock_flush_tlb)
-{
- return slot_handle_level(kvm, memslot, fn, PG_LEVEL_4K + 1,
- KVM_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
-}
-
-static __always_inline bool
slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot,
slot_level_handler fn, bool lock_flush_tlb)
{
@@ -5390,7 +5320,7 @@ restart:
* be in active use by the guest.
*/
if (batch >= BATCH_ZAP_PAGES &&
- cond_resched_lock(&kvm->mmu_lock)) {
+ cond_resched_rwlock_write(&kvm->mmu_lock)) {
batch = 0;
goto restart;
}
@@ -5423,7 +5353,7 @@ static void kvm_mmu_zap_all_fast(struct kvm *kvm)
{
lockdep_assert_held(&kvm->slots_lock);
- spin_lock(&kvm->mmu_lock);
+ write_lock(&kvm->mmu_lock);
trace_kvm_mmu_zap_all_fast(kvm);
/*
@@ -5447,10 +5377,10 @@ static void kvm_mmu_zap_all_fast(struct kvm *kvm)
kvm_zap_obsolete_pages(kvm);
- if (kvm->arch.tdp_mmu_enabled)
+ if (is_tdp_mmu_enabled(kvm))
kvm_tdp_mmu_zap_all(kvm);
- spin_unlock(&kvm->mmu_lock);
+ write_unlock(&kvm->mmu_lock);
}
static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm)
@@ -5492,7 +5422,7 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
int i;
bool flush;
- spin_lock(&kvm->mmu_lock);
+ write_lock(&kvm->mmu_lock);
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
slots = __kvm_memslots(kvm, i);
kvm_for_each_memslot(memslot, slots) {
@@ -5510,17 +5440,18 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
}
}
- if (kvm->arch.tdp_mmu_enabled) {
+ if (is_tdp_mmu_enabled(kvm)) {
flush = kvm_tdp_mmu_zap_gfn_range(kvm, gfn_start, gfn_end);
if (flush)
kvm_flush_remote_tlbs(kvm);
}
- spin_unlock(&kvm->mmu_lock);
+ write_unlock(&kvm->mmu_lock);
}
static bool slot_rmap_write_protect(struct kvm *kvm,
- struct kvm_rmap_head *rmap_head)
+ struct kvm_rmap_head *rmap_head,
+ struct kvm_memory_slot *slot)
{
return __rmap_write_protect(kvm, rmap_head, false);
}
@@ -5531,12 +5462,12 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
{
bool flush;
- spin_lock(&kvm->mmu_lock);
+ write_lock(&kvm->mmu_lock);
flush = slot_handle_level(kvm, memslot, slot_rmap_write_protect,
start_level, KVM_MAX_HUGEPAGE_LEVEL, false);
- if (kvm->arch.tdp_mmu_enabled)
+ if (is_tdp_mmu_enabled(kvm))
flush |= kvm_tdp_mmu_wrprot_slot(kvm, memslot, PG_LEVEL_4K);
- spin_unlock(&kvm->mmu_lock);
+ write_unlock(&kvm->mmu_lock);
/*
* We can flush all the TLBs out of the mmu lock without TLB
@@ -5554,7 +5485,8 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
}
static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
- struct kvm_rmap_head *rmap_head)
+ struct kvm_rmap_head *rmap_head,
+ struct kvm_memory_slot *slot)
{
u64 *sptep;
struct rmap_iterator iter;
@@ -5575,8 +5507,8 @@ restart:
* mapping if the indirect sp has level = 1.
*/
if (sp->role.direct && !kvm_is_reserved_pfn(pfn) &&
- (kvm_is_zone_device_pfn(pfn) ||
- PageCompound(pfn_to_page(pfn)))) {
+ sp->role.level < kvm_mmu_max_mapping_level(kvm, slot, sp->gfn,
+ pfn, PG_LEVEL_NUM)) {
pte_list_remove(rmap_head, sptep);
if (kvm_available_flush_tlb_with_range())
@@ -5596,13 +5528,14 @@ void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
const struct kvm_memory_slot *memslot)
{
/* FIXME: const-ify all uses of struct kvm_memory_slot. */
- spin_lock(&kvm->mmu_lock);
- slot_handle_leaf(kvm, (struct kvm_memory_slot *)memslot,
- kvm_mmu_zap_collapsible_spte, true);
+ struct kvm_memory_slot *slot = (struct kvm_memory_slot *)memslot;
- if (kvm->arch.tdp_mmu_enabled)
- kvm_tdp_mmu_zap_collapsible_sptes(kvm, memslot);
- spin_unlock(&kvm->mmu_lock);
+ write_lock(&kvm->mmu_lock);
+ slot_handle_leaf(kvm, slot, kvm_mmu_zap_collapsible_spte, true);
+
+ if (is_tdp_mmu_enabled(kvm))
+ kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot);
+ write_unlock(&kvm->mmu_lock);
}
void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
@@ -5625,11 +5558,11 @@ void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
{
bool flush;
- spin_lock(&kvm->mmu_lock);
+ write_lock(&kvm->mmu_lock);
flush = slot_handle_leaf(kvm, memslot, __rmap_clear_dirty, false);
- if (kvm->arch.tdp_mmu_enabled)
+ if (is_tdp_mmu_enabled(kvm))
flush |= kvm_tdp_mmu_clear_dirty_slot(kvm, memslot);
- spin_unlock(&kvm->mmu_lock);
+ write_unlock(&kvm->mmu_lock);
/*
* It's also safe to flush TLBs out of mmu lock here as currently this
@@ -5640,40 +5573,6 @@ void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
if (flush)
kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
}
-EXPORT_SYMBOL_GPL(kvm_mmu_slot_leaf_clear_dirty);
-
-void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
- struct kvm_memory_slot *memslot)
-{
- bool flush;
-
- spin_lock(&kvm->mmu_lock);
- flush = slot_handle_large_level(kvm, memslot, slot_rmap_write_protect,
- false);
- if (kvm->arch.tdp_mmu_enabled)
- flush |= kvm_tdp_mmu_wrprot_slot(kvm, memslot, PG_LEVEL_2M);
- spin_unlock(&kvm->mmu_lock);
-
- if (flush)
- kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
-}
-EXPORT_SYMBOL_GPL(kvm_mmu_slot_largepage_remove_write_access);
-
-void kvm_mmu_slot_set_dirty(struct kvm *kvm,
- struct kvm_memory_slot *memslot)
-{
- bool flush;
-
- spin_lock(&kvm->mmu_lock);
- flush = slot_handle_all_level(kvm, memslot, __rmap_set_dirty, false);
- if (kvm->arch.tdp_mmu_enabled)
- flush |= kvm_tdp_mmu_slot_set_dirty(kvm, memslot);
- spin_unlock(&kvm->mmu_lock);
-
- if (flush)
- kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
-}
-EXPORT_SYMBOL_GPL(kvm_mmu_slot_set_dirty);
void kvm_mmu_zap_all(struct kvm *kvm)
{
@@ -5681,23 +5580,23 @@ void kvm_mmu_zap_all(struct kvm *kvm)
LIST_HEAD(invalid_list);
int ign;
- spin_lock(&kvm->mmu_lock);
+ write_lock(&kvm->mmu_lock);
restart:
list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) {
if (WARN_ON(sp->role.invalid))
continue;
if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign))
goto restart;
- if (cond_resched_lock(&kvm->mmu_lock))
+ if (cond_resched_rwlock_write(&kvm->mmu_lock))
goto restart;
}
kvm_mmu_commit_zap_page(kvm, &invalid_list);
- if (kvm->arch.tdp_mmu_enabled)
+ if (is_tdp_mmu_enabled(kvm))
kvm_tdp_mmu_zap_all(kvm);
- spin_unlock(&kvm->mmu_lock);
+ write_unlock(&kvm->mmu_lock);
}
void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
@@ -5757,7 +5656,7 @@ mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
continue;
idx = srcu_read_lock(&kvm->srcu);
- spin_lock(&kvm->mmu_lock);
+ write_lock(&kvm->mmu_lock);
if (kvm_has_zapped_obsolete_pages(kvm)) {
kvm_mmu_commit_zap_page(kvm,
@@ -5768,7 +5667,7 @@ mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
freed = kvm_mmu_zap_oldest_mmu_pages(kvm, sc->nr_to_scan);
unlock:
- spin_unlock(&kvm->mmu_lock);
+ write_unlock(&kvm->mmu_lock);
srcu_read_unlock(&kvm->srcu, idx);
/*
@@ -5988,7 +5887,7 @@ static void kvm_recover_nx_lpages(struct kvm *kvm)
ulong to_zap;
rcu_idx = srcu_read_lock(&kvm->srcu);
- spin_lock(&kvm->mmu_lock);
+ write_lock(&kvm->mmu_lock);
ratio = READ_ONCE(nx_huge_pages_recovery_ratio);
to_zap = ratio ? DIV_ROUND_UP(kvm->stat.nx_lpage_splits, ratio) : 0;
@@ -6005,22 +5904,22 @@ static void kvm_recover_nx_lpages(struct kvm *kvm)
struct kvm_mmu_page,
lpage_disallowed_link);
WARN_ON_ONCE(!sp->lpage_disallowed);
- if (sp->tdp_mmu_page)
+ if (is_tdp_mmu_page(sp)) {
kvm_tdp_mmu_zap_gfn_range(kvm, sp->gfn,
sp->gfn + KVM_PAGES_PER_HPAGE(sp->role.level));
- else {
+ } else {
kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
WARN_ON_ONCE(sp->lpage_disallowed);
}
- if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
+ if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
kvm_mmu_commit_zap_page(kvm, &invalid_list);
- cond_resched_lock(&kvm->mmu_lock);
+ cond_resched_rwlock_write(&kvm->mmu_lock);
}
}
kvm_mmu_commit_zap_page(kvm, &invalid_list);
- spin_unlock(&kvm->mmu_lock);
+ write_unlock(&kvm->mmu_lock);
srcu_read_unlock(&kvm->srcu, rcu_idx);
}
diff --git a/arch/x86/kvm/mmu/mmu_audit.c b/arch/x86/kvm/mmu/mmu_audit.c
index c8d51a37e2ce..ced15fd58fde 100644
--- a/arch/x86/kvm/mmu/mmu_audit.c
+++ b/arch/x86/kvm/mmu/mmu_audit.c
@@ -234,7 +234,7 @@ static void audit_vcpu_spte(struct kvm_vcpu *vcpu)
}
static bool mmu_audit;
-static struct static_key mmu_audit_key;
+static DEFINE_STATIC_KEY_FALSE(mmu_audit_key);
static void __kvm_mmu_audit(struct kvm_vcpu *vcpu, int point)
{
@@ -250,7 +250,7 @@ static void __kvm_mmu_audit(struct kvm_vcpu *vcpu, int point)
static inline void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point)
{
- if (static_key_false((&mmu_audit_key)))
+ if (static_branch_unlikely((&mmu_audit_key)))
__kvm_mmu_audit(vcpu, point);
}
@@ -259,7 +259,7 @@ static void mmu_audit_enable(void)
if (mmu_audit)
return;
- static_key_slow_inc(&mmu_audit_key);
+ static_branch_inc(&mmu_audit_key);
mmu_audit = true;
}
@@ -268,7 +268,7 @@ static void mmu_audit_disable(void)
if (!mmu_audit)
return;
- static_key_slow_dec(&mmu_audit_key);
+ static_branch_dec(&mmu_audit_key);
mmu_audit = false;
}
diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h
index bfc6389edc28..ec4fc28b325a 100644
--- a/arch/x86/kvm/mmu/mmu_internal.h
+++ b/arch/x86/kvm/mmu/mmu_internal.h
@@ -12,7 +12,7 @@
extern bool dbg;
#define pgprintk(x...) do { if (dbg) printk(x); } while (0)
-#define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
+#define rmap_printk(fmt, args...) do { if (dbg) printk("%s: " fmt, __func__, ## args); } while (0)
#define MMU_WARN_ON(x) WARN_ON(x)
#else
#define pgprintk(x...) do { } while (0)
@@ -56,7 +56,12 @@ struct kvm_mmu_page {
/* Number of writes since the last time traversal visited this page. */
atomic_t write_flooding_count;
+#ifdef CONFIG_X86_64
bool tdp_mmu_page;
+
+ /* Used for freeing the page asyncronously if it is a TDP MMU page. */
+ struct rcu_head rcu_head;
+#endif
};
extern struct kmem_cache *mmu_page_header_cache;
@@ -76,12 +81,15 @@ static inline struct kvm_mmu_page *sptep_to_sp(u64 *sptep)
static inline bool kvm_vcpu_ad_need_write_protect(struct kvm_vcpu *vcpu)
{
/*
- * When using the EPT page-modification log, the GPAs in the log
- * would come from L2 rather than L1. Therefore, we need to rely
- * on write protection to record dirty pages. This also bypasses
- * PML, since writes now result in a vmexit.
+ * When using the EPT page-modification log, the GPAs in the CPU dirty
+ * log would come from L2 rather than L1. Therefore, we need to rely
+ * on write protection to record dirty pages, which bypasses PML, since
+ * writes now result in a vmexit. Note, the check on CPU dirty logging
+ * being enabled is mandatory as the bits used to denote WP-only SPTEs
+ * are reserved for NPT w/ PAE (32-bit KVM).
*/
- return vcpu->arch.mmu == &vcpu->arch.guest_mmu;
+ return vcpu->arch.mmu == &vcpu->arch.guest_mmu &&
+ kvm_x86_ops.cpu_dirty_log_size;
}
bool is_nx_huge_page_enabled(void);
@@ -133,6 +141,8 @@ enum {
#define SET_SPTE_NEED_REMOTE_TLB_FLUSH BIT(1)
#define SET_SPTE_SPURIOUS BIT(2)
+int kvm_mmu_max_mapping_level(struct kvm *kvm, struct kvm_memory_slot *slot,
+ gfn_t gfn, kvm_pfn_t pfn, int max_level);
int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn,
int max_level, kvm_pfn_t *pfnp,
bool huge_page_disallowed, int *req_level);
diff --git a/arch/x86/kvm/mmu/page_track.c b/arch/x86/kvm/mmu/page_track.c
index 8443a675715b..34bb0ec69bd8 100644
--- a/arch/x86/kvm/mmu/page_track.c
+++ b/arch/x86/kvm/mmu/page_track.c
@@ -184,9 +184,9 @@ kvm_page_track_register_notifier(struct kvm *kvm,
head = &kvm->arch.track_notifier_head;
- spin_lock(&kvm->mmu_lock);
+ write_lock(&kvm->mmu_lock);
hlist_add_head_rcu(&n->node, &head->track_notifier_list);
- spin_unlock(&kvm->mmu_lock);
+ write_unlock(&kvm->mmu_lock);
}
EXPORT_SYMBOL_GPL(kvm_page_track_register_notifier);
@@ -202,9 +202,9 @@ kvm_page_track_unregister_notifier(struct kvm *kvm,
head = &kvm->arch.track_notifier_head;
- spin_lock(&kvm->mmu_lock);
+ write_lock(&kvm->mmu_lock);
hlist_del_rcu(&n->node);
- spin_unlock(&kvm->mmu_lock);
+ write_unlock(&kvm->mmu_lock);
synchronize_srcu(&head->track_srcu);
}
EXPORT_SYMBOL_GPL(kvm_page_track_unregister_notifier);
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index 50e268eb8e1a..55d7b473ac44 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -601,6 +601,13 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
if (sp->role.level > PG_LEVEL_4K)
return;
+ /*
+ * If addresses are being invalidated, skip prefetching to avoid
+ * accidentally prefetching those addresses.
+ */
+ if (unlikely(vcpu->kvm->mmu_notifier_count))
+ return;
+
if (sp->role.direct)
return __direct_pte_prefetch(vcpu, sp, sptep);
@@ -790,6 +797,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code,
struct guest_walker walker;
int r;
kvm_pfn_t pfn;
+ hva_t hva;
unsigned long mmu_seq;
bool map_writable, is_self_change_mapping;
int max_level;
@@ -840,8 +848,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code,
mmu_seq = vcpu->kvm->mmu_notifier_seq;
smp_rmb();
- if (try_async_pf(vcpu, prefault, walker.gfn, addr, &pfn, write_fault,
- &map_writable))
+ if (try_async_pf(vcpu, prefault, walker.gfn, addr, &pfn, &hva,
+ write_fault, &map_writable))
return RET_PF_RETRY;
if (handle_abnormal_pfn(vcpu, addr, walker.gfn, pfn, walker.pte_access, &r))
@@ -868,8 +876,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code,
}
r = RET_PF_RETRY;
- spin_lock(&vcpu->kvm->mmu_lock);
- if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
+ write_lock(&vcpu->kvm->mmu_lock);
+ if (!is_noslot_pfn(pfn) && mmu_notifier_retry_hva(vcpu->kvm, mmu_seq, hva))
goto out_unlock;
kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT);
@@ -881,7 +889,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code,
kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT);
out_unlock:
- spin_unlock(&vcpu->kvm->mmu_lock);
+ write_unlock(&vcpu->kvm->mmu_lock);
kvm_release_pfn_clean(pfn);
return r;
}
@@ -919,7 +927,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa)
return;
}
- spin_lock(&vcpu->kvm->mmu_lock);
+ write_lock(&vcpu->kvm->mmu_lock);
for_each_shadow_entry_using_root(vcpu, root_hpa, gva, iterator) {
level = iterator.level;
sptep = iterator.sptep;
@@ -954,7 +962,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa)
if (!is_shadow_present_pte(*sptep) || !sp->unsync_children)
break;
}
- spin_unlock(&vcpu->kvm->mmu_lock);
+ write_unlock(&vcpu->kvm->mmu_lock);
}
/* Note, @addr is a GPA when gva_to_gpa() translates an L2 GPA to an L1 GPA. */
diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c
index c51ad544f25b..ef55f0bc4ccf 100644
--- a/arch/x86/kvm/mmu/spte.c
+++ b/arch/x86/kvm/mmu/spte.c
@@ -120,7 +120,7 @@ int make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level,
if (level > PG_LEVEL_4K)
spte |= PT_PAGE_SIZE_MASK;
if (tdp_enabled)
- spte |= kvm_x86_ops.get_mt_mask(vcpu, gfn,
+ spte |= static_call(kvm_x86_get_mt_mask)(vcpu, gfn,
kvm_is_mmio_pfn(pfn));
if (host_writable)
diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h
index 2b3a30bd38b0..6de3950fd704 100644
--- a/arch/x86/kvm/mmu/spte.h
+++ b/arch/x86/kvm/mmu/spte.h
@@ -131,6 +131,25 @@ extern u64 __read_mostly shadow_nonpresent_or_rsvd_mask;
#define SHADOW_ACC_TRACK_SAVED_BITS_SHIFT PT64_SECOND_AVAIL_BITS_SHIFT
/*
+ * If a thread running without exclusive control of the MMU lock must perform a
+ * multi-part operation on an SPTE, it can set the SPTE to REMOVED_SPTE as a
+ * non-present intermediate value. Other threads which encounter this value
+ * should not modify the SPTE.
+ *
+ * This constant works because it is considered non-present on both AMD and
+ * Intel CPUs and does not create a L1TF vulnerability because the pfn section
+ * is zeroed out.
+ *
+ * Only used by the TDP MMU.
+ */
+#define REMOVED_SPTE (1ull << 59)
+
+static inline bool is_removed_spte(u64 spte)
+{
+ return spte == REMOVED_SPTE;
+}
+
+/*
* In some cases, we need to preserve the GFN of a non-present or reserved
* SPTE when we usurp the upper five bits of the physical address space to
* defend against L1TF, e.g. for MMIO SPTEs. To preserve the GFN, we'll
@@ -185,23 +204,19 @@ static inline bool is_access_track_spte(u64 spte)
return !spte_ad_enabled(spte) && (spte & shadow_acc_track_mask) == 0;
}
-static inline int is_shadow_present_pte(u64 pte)
+static inline bool is_shadow_present_pte(u64 pte)
{
- return (pte != 0) && !is_mmio_spte(pte);
+ return (pte != 0) && !is_mmio_spte(pte) && !is_removed_spte(pte);
}
-static inline int is_large_pte(u64 pte)
+static inline bool is_large_pte(u64 pte)
{
return pte & PT_PAGE_SIZE_MASK;
}
-static inline int is_last_spte(u64 pte, int level)
+static inline bool is_last_spte(u64 pte, int level)
{
- if (level == PG_LEVEL_4K)
- return 1;
- if (is_large_pte(pte))
- return 1;
- return 0;
+ return (level == PG_LEVEL_4K) || is_large_pte(pte);
}
static inline bool is_executable_pte(u64 spte)
diff --git a/arch/x86/kvm/mmu/tdp_iter.c b/arch/x86/kvm/mmu/tdp_iter.c
index 87b7e16911db..e5f148106e20 100644
--- a/arch/x86/kvm/mmu/tdp_iter.c
+++ b/arch/x86/kvm/mmu/tdp_iter.c
@@ -12,7 +12,7 @@ static void tdp_iter_refresh_sptep(struct tdp_iter *iter)
{
iter->sptep = iter->pt_path[iter->level - 1] +
SHADOW_PT_INDEX(iter->gfn << PAGE_SHIFT, iter->level);
- iter->old_spte = READ_ONCE(*iter->sptep);
+ iter->old_spte = READ_ONCE(*rcu_dereference(iter->sptep));
}
static gfn_t round_gfn_for_level(gfn_t gfn, int level)
@@ -22,21 +22,22 @@ static gfn_t round_gfn_for_level(gfn_t gfn, int level)
/*
* Sets a TDP iterator to walk a pre-order traversal of the paging structure
- * rooted at root_pt, starting with the walk to translate goal_gfn.
+ * rooted at root_pt, starting with the walk to translate next_last_level_gfn.
*/
void tdp_iter_start(struct tdp_iter *iter, u64 *root_pt, int root_level,
- int min_level, gfn_t goal_gfn)
+ int min_level, gfn_t next_last_level_gfn)
{
WARN_ON(root_level < 1);
WARN_ON(root_level > PT64_ROOT_MAX_LEVEL);
- iter->goal_gfn = goal_gfn;
+ iter->next_last_level_gfn = next_last_level_gfn;
+ iter->yielded_gfn = iter->next_last_level_gfn;
iter->root_level = root_level;
iter->min_level = min_level;
iter->level = root_level;
- iter->pt_path[iter->level - 1] = root_pt;
+ iter->pt_path[iter->level - 1] = (tdp_ptep_t)root_pt;
- iter->gfn = round_gfn_for_level(iter->goal_gfn, iter->level);
+ iter->gfn = round_gfn_for_level(iter->next_last_level_gfn, iter->level);
tdp_iter_refresh_sptep(iter);
iter->valid = true;
@@ -47,7 +48,7 @@ void tdp_iter_start(struct tdp_iter *iter, u64 *root_pt, int root_level,
* address of the child page table referenced by the SPTE. Returns null if
* there is no such entry.
*/
-u64 *spte_to_child_pt(u64 spte, int level)
+tdp_ptep_t spte_to_child_pt(u64 spte, int level)
{
/*
* There's no child entry if this entry isn't present or is a
@@ -56,7 +57,7 @@ u64 *spte_to_child_pt(u64 spte, int level)
if (!is_shadow_present_pte(spte) || is_last_spte(spte, level))
return NULL;
- return __va(spte_to_pfn(spte) << PAGE_SHIFT);
+ return (tdp_ptep_t)__va(spte_to_pfn(spte) << PAGE_SHIFT);
}
/*
@@ -65,7 +66,7 @@ u64 *spte_to_child_pt(u64 spte, int level)
*/
static bool try_step_down(struct tdp_iter *iter)
{
- u64 *child_pt;
+ tdp_ptep_t child_pt;
if (iter->level == iter->min_level)
return false;
@@ -74,7 +75,7 @@ static bool try_step_down(struct tdp_iter *iter)
* Reread the SPTE before stepping down to avoid traversing into page
* tables that are no longer linked from this entry.
*/
- iter->old_spte = READ_ONCE(*iter->sptep);
+ iter->old_spte = READ_ONCE(*rcu_dereference(iter->sptep));
child_pt = spte_to_child_pt(iter->old_spte, iter->level);
if (!child_pt)
@@ -82,7 +83,7 @@ static bool try_step_down(struct tdp_iter *iter)
iter->level--;
iter->pt_path[iter->level - 1] = child_pt;
- iter->gfn = round_gfn_for_level(iter->goal_gfn, iter->level);
+ iter->gfn = round_gfn_for_level(iter->next_last_level_gfn, iter->level);
tdp_iter_refresh_sptep(iter);
return true;
@@ -106,9 +107,9 @@ static bool try_step_side(struct tdp_iter *iter)
return false;
iter->gfn += KVM_PAGES_PER_HPAGE(iter->level);
- iter->goal_gfn = iter->gfn;
+ iter->next_last_level_gfn = iter->gfn;
iter->sptep++;
- iter->old_spte = READ_ONCE(*iter->sptep);
+ iter->old_spte = READ_ONCE(*rcu_dereference(iter->sptep));
return true;
}
@@ -158,24 +159,7 @@ void tdp_iter_next(struct tdp_iter *iter)
iter->valid = false;
}
-/*
- * Restart the walk over the paging structure from the root, starting from the
- * highest gfn the iterator had previously reached. Assumes that the entire
- * paging structure, except the root page, may have been completely torn down
- * and rebuilt.
- */
-void tdp_iter_refresh_walk(struct tdp_iter *iter)
-{
- gfn_t goal_gfn = iter->goal_gfn;
-
- if (iter->gfn > goal_gfn)
- goal_gfn = iter->gfn;
-
- tdp_iter_start(iter, iter->pt_path[iter->root_level - 1],
- iter->root_level, iter->min_level, goal_gfn);
-}
-
-u64 *tdp_iter_root_pt(struct tdp_iter *iter)
+tdp_ptep_t tdp_iter_root_pt(struct tdp_iter *iter)
{
return iter->pt_path[iter->root_level - 1];
}
diff --git a/arch/x86/kvm/mmu/tdp_iter.h b/arch/x86/kvm/mmu/tdp_iter.h
index 47170d0dc98e..4cc177d75c4a 100644
--- a/arch/x86/kvm/mmu/tdp_iter.h
+++ b/arch/x86/kvm/mmu/tdp_iter.h
@@ -7,6 +7,8 @@
#include "mmu.h"
+typedef u64 __rcu *tdp_ptep_t;
+
/*
* A TDP iterator performs a pre-order walk over a TDP paging structure.
*/
@@ -15,11 +17,17 @@ struct tdp_iter {
* The iterator will traverse the paging structure towards the mapping
* for this GFN.
*/
- gfn_t goal_gfn;
+ gfn_t next_last_level_gfn;
+ /*
+ * The next_last_level_gfn at the time when the thread last
+ * yielded. Only yielding when the next_last_level_gfn !=
+ * yielded_gfn helps ensure forward progress.
+ */
+ gfn_t yielded_gfn;
/* Pointers to the page tables traversed to reach the current SPTE */
- u64 *pt_path[PT64_ROOT_MAX_LEVEL];
+ tdp_ptep_t pt_path[PT64_ROOT_MAX_LEVEL];
/* A pointer to the current SPTE */
- u64 *sptep;
+ tdp_ptep_t sptep;
/* The lowest GFN mapped by the current SPTE */
gfn_t gfn;
/* The level of the root page given to the iterator */
@@ -49,12 +57,11 @@ struct tdp_iter {
#define for_each_tdp_pte(iter, root, root_level, start, end) \
for_each_tdp_pte_min_level(iter, root, root_level, PG_LEVEL_4K, start, end)
-u64 *spte_to_child_pt(u64 pte, int level);
+tdp_ptep_t spte_to_child_pt(u64 pte, int level);
void tdp_iter_start(struct tdp_iter *iter, u64 *root_pt, int root_level,
- int min_level, gfn_t goal_gfn);
+ int min_level, gfn_t next_last_level_gfn);
void tdp_iter_next(struct tdp_iter *iter);
-void tdp_iter_refresh_walk(struct tdp_iter *iter);
-u64 *tdp_iter_root_pt(struct tdp_iter *iter);
+tdp_ptep_t tdp_iter_root_pt(struct tdp_iter *iter);
#endif /* __KVM_X86_MMU_TDP_ITER_H */
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 2ef8615f9dba..c926c6b899a1 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -7,32 +7,23 @@
#include "tdp_mmu.h"
#include "spte.h"
+#include <asm/cmpxchg.h>
#include <trace/events/kvm.h>
-#ifdef CONFIG_X86_64
static bool __read_mostly tdp_mmu_enabled = false;
module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0644);
-#endif
-
-static bool is_tdp_mmu_enabled(void)
-{
-#ifdef CONFIG_X86_64
- return tdp_enabled && READ_ONCE(tdp_mmu_enabled);
-#else
- return false;
-#endif /* CONFIG_X86_64 */
-}
/* Initializes the TDP MMU for the VM, if enabled. */
void kvm_mmu_init_tdp_mmu(struct kvm *kvm)
{
- if (!is_tdp_mmu_enabled())
+ if (!tdp_enabled || !READ_ONCE(tdp_mmu_enabled))
return;
/* This should not be changed for the lifetime of the VM. */
kvm->arch.tdp_mmu_enabled = true;
INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots);
+ spin_lock_init(&kvm->arch.tdp_mmu_pages_lock);
INIT_LIST_HEAD(&kvm->arch.tdp_mmu_pages);
}
@@ -42,6 +33,12 @@ void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
return;
WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
+
+ /*
+ * Ensure that all the outstanding RCU callbacks to free shadow pages
+ * can run before the VM is torn down.
+ */
+ rcu_barrier();
}
static void tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root)
@@ -53,7 +50,7 @@ static void tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root)
static inline bool tdp_mmu_next_root_valid(struct kvm *kvm,
struct kvm_mmu_page *root)
{
- lockdep_assert_held(&kvm->mmu_lock);
+ lockdep_assert_held_write(&kvm->mmu_lock);
if (list_entry_is_head(root, &kvm->arch.tdp_mmu_roots, link))
return false;
@@ -88,22 +85,6 @@ static inline struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
#define for_each_tdp_mmu_root(_kvm, _root) \
list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link)
-bool is_tdp_mmu_root(struct kvm *kvm, hpa_t hpa)
-{
- struct kvm_mmu_page *sp;
-
- if (!kvm->arch.tdp_mmu_enabled)
- return false;
- if (WARN_ON(!VALID_PAGE(hpa)))
- return false;
-
- sp = to_shadow_page(hpa);
- if (WARN_ON(!sp))
- return false;
-
- return sp->tdp_mmu_page && sp->root_count;
-}
-
static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
gfn_t start, gfn_t end, bool can_yield);
@@ -111,7 +92,7 @@ void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root)
{
gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
- lockdep_assert_held(&kvm->mmu_lock);
+ lockdep_assert_held_write(&kvm->mmu_lock);
WARN_ON(root->root_count);
WARN_ON(!root->tdp_mmu_page);
@@ -164,13 +145,13 @@ static struct kvm_mmu_page *get_tdp_mmu_vcpu_root(struct kvm_vcpu *vcpu)
role = page_role_for_level(vcpu, vcpu->arch.mmu->shadow_root_level);
- spin_lock(&kvm->mmu_lock);
+ write_lock(&kvm->mmu_lock);
/* Check for an existing root before allocating a new one. */
for_each_tdp_mmu_root(kvm, root) {
if (root->role.word == role.word) {
kvm_mmu_get_root(kvm, root);
- spin_unlock(&kvm->mmu_lock);
+ write_unlock(&kvm->mmu_lock);
return root;
}
}
@@ -180,7 +161,7 @@ static struct kvm_mmu_page *get_tdp_mmu_vcpu_root(struct kvm_vcpu *vcpu)
list_add(&root->link, &kvm->arch.tdp_mmu_roots);
- spin_unlock(&kvm->mmu_lock);
+ write_unlock(&kvm->mmu_lock);
return root;
}
@@ -196,8 +177,31 @@ hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu)
return __pa(root->spt);
}
+static void tdp_mmu_free_sp(struct kvm_mmu_page *sp)
+{
+ free_page((unsigned long)sp->spt);
+ kmem_cache_free(mmu_page_header_cache, sp);
+}
+
+/*
+ * This is called through call_rcu in order to free TDP page table memory
+ * safely with respect to other kernel threads that may be operating on
+ * the memory.
+ * By only accessing TDP MMU page table memory in an RCU read critical
+ * section, and freeing it after a grace period, lockless access to that
+ * memory won't use it after it is freed.
+ */
+static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head)
+{
+ struct kvm_mmu_page *sp = container_of(head, struct kvm_mmu_page,
+ rcu_head);
+
+ tdp_mmu_free_sp(sp);
+}
+
static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
- u64 old_spte, u64 new_spte, int level);
+ u64 old_spte, u64 new_spte, int level,
+ bool shared);
static int kvm_mmu_page_as_id(struct kvm_mmu_page *sp)
{
@@ -235,6 +239,128 @@ static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn,
}
/**
+ * tdp_mmu_link_page - Add a new page to the list of pages used by the TDP MMU
+ *
+ * @kvm: kvm instance
+ * @sp: the new page
+ * @shared: This operation may not be running under the exclusive use of
+ * the MMU lock and the operation must synchronize with other
+ * threads that might be adding or removing pages.
+ * @account_nx: This page replaces a NX large page and should be marked for
+ * eventual reclaim.
+ */
+static void tdp_mmu_link_page(struct kvm *kvm, struct kvm_mmu_page *sp,
+ bool shared, bool account_nx)
+{
+ if (shared)
+ spin_lock(&kvm->arch.tdp_mmu_pages_lock);
+ else
+ lockdep_assert_held_write(&kvm->mmu_lock);
+
+ list_add(&sp->link, &kvm->arch.tdp_mmu_pages);
+ if (account_nx)
+ account_huge_nx_page(kvm, sp);
+
+ if (shared)
+ spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
+}
+
+/**
+ * tdp_mmu_unlink_page - Remove page from the list of pages used by the TDP MMU
+ *
+ * @kvm: kvm instance
+ * @sp: the page to be removed
+ * @shared: This operation may not be running under the exclusive use of
+ * the MMU lock and the operation must synchronize with other
+ * threads that might be adding or removing pages.
+ */
+static void tdp_mmu_unlink_page(struct kvm *kvm, struct kvm_mmu_page *sp,
+ bool shared)
+{
+ if (shared)
+ spin_lock(&kvm->arch.tdp_mmu_pages_lock);
+ else
+ lockdep_assert_held_write(&kvm->mmu_lock);
+
+ list_del(&sp->link);
+ if (sp->lpage_disallowed)
+ unaccount_huge_nx_page(kvm, sp);
+
+ if (shared)
+ spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
+}
+
+/**
+ * handle_removed_tdp_mmu_page - handle a pt removed from the TDP structure
+ *
+ * @kvm: kvm instance
+ * @pt: the page removed from the paging structure
+ * @shared: This operation may not be running under the exclusive use
+ * of the MMU lock and the operation must synchronize with other
+ * threads that might be modifying SPTEs.
+ *
+ * Given a page table that has been removed from the TDP paging structure,
+ * iterates through the page table to clear SPTEs and free child page tables.
+ */
+static void handle_removed_tdp_mmu_page(struct kvm *kvm, u64 *pt,
+ bool shared)
+{
+ struct kvm_mmu_page *sp = sptep_to_sp(pt);
+ int level = sp->role.level;
+ gfn_t base_gfn = sp->gfn;
+ u64 old_child_spte;
+ u64 *sptep;
+ gfn_t gfn;
+ int i;
+
+ trace_kvm_mmu_prepare_zap_page(sp);
+
+ tdp_mmu_unlink_page(kvm, sp, shared);
+
+ for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
+ sptep = pt + i;
+ gfn = base_gfn + (i * KVM_PAGES_PER_HPAGE(level - 1));
+
+ if (shared) {
+ /*
+ * Set the SPTE to a nonpresent value that other
+ * threads will not overwrite. If the SPTE was
+ * already marked as removed then another thread
+ * handling a page fault could overwrite it, so
+ * set the SPTE until it is set from some other
+ * value to the removed SPTE value.
+ */
+ for (;;) {
+ old_child_spte = xchg(sptep, REMOVED_SPTE);
+ if (!is_removed_spte(old_child_spte))
+ break;
+ cpu_relax();
+ }
+ } else {
+ old_child_spte = READ_ONCE(*sptep);
+
+ /*
+ * Marking the SPTE as a removed SPTE is not
+ * strictly necessary here as the MMU lock will
+ * stop other threads from concurrently modifying
+ * this SPTE. Using the removed SPTE value keeps
+ * the two branches consistent and simplifies
+ * the function.
+ */
+ WRITE_ONCE(*sptep, REMOVED_SPTE);
+ }
+ handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn,
+ old_child_spte, REMOVED_SPTE, level - 1,
+ shared);
+ }
+
+ kvm_flush_remote_tlbs_with_address(kvm, gfn,
+ KVM_PAGES_PER_HPAGE(level));
+
+ call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback);
+}
+
+/**
* handle_changed_spte - handle bookkeeping associated with an SPTE change
* @kvm: kvm instance
* @as_id: the address space of the paging structure the SPTE was a part of
@@ -242,22 +368,22 @@ static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn,
* @old_spte: The value of the SPTE before the change
* @new_spte: The value of the SPTE after the change
* @level: the level of the PT the SPTE is part of in the paging structure
+ * @shared: This operation may not be running under the exclusive use of
+ * the MMU lock and the operation must synchronize with other
+ * threads that might be modifying SPTEs.
*
* Handle bookkeeping that might result from the modification of a SPTE.
* This function must be called for all TDP SPTE modifications.
*/
static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
- u64 old_spte, u64 new_spte, int level)
+ u64 old_spte, u64 new_spte, int level,
+ bool shared)
{
bool was_present = is_shadow_present_pte(old_spte);
bool is_present = is_shadow_present_pte(new_spte);
bool was_leaf = was_present && is_last_spte(old_spte, level);
bool is_leaf = is_present && is_last_spte(new_spte, level);
bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
- u64 *pt;
- struct kvm_mmu_page *sp;
- u64 old_child_spte;
- int i;
WARN_ON(level > PT64_ROOT_MAX_LEVEL);
WARN_ON(level < PG_LEVEL_4K);
@@ -298,15 +424,19 @@ static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
*/
if (!was_present && !is_present) {
/*
- * If this change does not involve a MMIO SPTE, it is
- * unexpected. Log the change, though it should not impact the
- * guest since both the former and current SPTEs are nonpresent.
+ * If this change does not involve a MMIO SPTE or removed SPTE,
+ * it is unexpected. Log the change, though it should not
+ * impact the guest since both the former and current SPTEs
+ * are nonpresent.
*/
- if (WARN_ON(!is_mmio_spte(old_spte) && !is_mmio_spte(new_spte)))
+ if (WARN_ON(!is_mmio_spte(old_spte) &&
+ !is_mmio_spte(new_spte) &&
+ !is_removed_spte(new_spte)))
pr_err("Unexpected SPTE change! Nonpresent SPTEs\n"
"should not be replaced with another,\n"
"different nonpresent SPTE, unless one or both\n"
- "are MMIO SPTEs.\n"
+ "are MMIO SPTEs, or the new SPTE is\n"
+ "a temporary removed SPTE.\n"
"as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
as_id, gfn, old_spte, new_spte, level);
return;
@@ -321,54 +451,127 @@ static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
* Recursively handle child PTs if the change removed a subtree from
* the paging structure.
*/
- if (was_present && !was_leaf && (pfn_changed || !is_present)) {
- pt = spte_to_child_pt(old_spte, level);
- sp = sptep_to_sp(pt);
+ if (was_present && !was_leaf && (pfn_changed || !is_present))
+ handle_removed_tdp_mmu_page(kvm,
+ spte_to_child_pt(old_spte, level), shared);
+}
- trace_kvm_mmu_prepare_zap_page(sp);
+static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
+ u64 old_spte, u64 new_spte, int level,
+ bool shared)
+{
+ __handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level,
+ shared);
+ handle_changed_spte_acc_track(old_spte, new_spte, level);
+ handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte,
+ new_spte, level);
+}
- list_del(&sp->link);
+/*
+ * tdp_mmu_set_spte_atomic - Set a TDP MMU SPTE atomically and handle the
+ * associated bookkeeping
+ *
+ * @kvm: kvm instance
+ * @iter: a tdp_iter instance currently on the SPTE that should be set
+ * @new_spte: The value the SPTE should be set to
+ * Returns: true if the SPTE was set, false if it was not. If false is returned,
+ * this function will have no side-effects.
+ */
+static inline bool tdp_mmu_set_spte_atomic(struct kvm *kvm,
+ struct tdp_iter *iter,
+ u64 new_spte)
+{
+ u64 *root_pt = tdp_iter_root_pt(iter);
+ struct kvm_mmu_page *root = sptep_to_sp(root_pt);
+ int as_id = kvm_mmu_page_as_id(root);
- if (sp->lpage_disallowed)
- unaccount_huge_nx_page(kvm, sp);
+ lockdep_assert_held_read(&kvm->mmu_lock);
- for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
- old_child_spte = READ_ONCE(*(pt + i));
- WRITE_ONCE(*(pt + i), 0);
- handle_changed_spte(kvm, as_id,
- gfn + (i * KVM_PAGES_PER_HPAGE(level - 1)),
- old_child_spte, 0, level - 1);
- }
+ /*
+ * Do not change removed SPTEs. Only the thread that froze the SPTE
+ * may modify it.
+ */
+ if (iter->old_spte == REMOVED_SPTE)
+ return false;
- kvm_flush_remote_tlbs_with_address(kvm, gfn,
- KVM_PAGES_PER_HPAGE(level));
+ if (cmpxchg64(rcu_dereference(iter->sptep), iter->old_spte,
+ new_spte) != iter->old_spte)
+ return false;
- free_page((unsigned long)pt);
- kmem_cache_free(mmu_page_header_cache, sp);
- }
+ handle_changed_spte(kvm, as_id, iter->gfn, iter->old_spte, new_spte,
+ iter->level, true);
+
+ return true;
}
-static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
- u64 old_spte, u64 new_spte, int level)
+static inline bool tdp_mmu_zap_spte_atomic(struct kvm *kvm,
+ struct tdp_iter *iter)
{
- __handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level);
- handle_changed_spte_acc_track(old_spte, new_spte, level);
- handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte,
- new_spte, level);
+ /*
+ * Freeze the SPTE by setting it to a special,
+ * non-present value. This will stop other threads from
+ * immediately installing a present entry in its place
+ * before the TLBs are flushed.
+ */
+ if (!tdp_mmu_set_spte_atomic(kvm, iter, REMOVED_SPTE))
+ return false;
+
+ kvm_flush_remote_tlbs_with_address(kvm, iter->gfn,
+ KVM_PAGES_PER_HPAGE(iter->level));
+
+ /*
+ * No other thread can overwrite the removed SPTE as they
+ * must either wait on the MMU lock or use
+ * tdp_mmu_set_spte_atomic which will not overrite the
+ * special removed SPTE value. No bookkeeping is needed
+ * here since the SPTE is going from non-present
+ * to non-present.
+ */
+ WRITE_ONCE(*iter->sptep, 0);
+
+ return true;
}
+
+/*
+ * __tdp_mmu_set_spte - Set a TDP MMU SPTE and handle the associated bookkeeping
+ * @kvm: kvm instance
+ * @iter: a tdp_iter instance currently on the SPTE that should be set
+ * @new_spte: The value the SPTE should be set to
+ * @record_acc_track: Notify the MM subsystem of changes to the accessed state
+ * of the page. Should be set unless handling an MMU
+ * notifier for access tracking. Leaving record_acc_track
+ * unset in that case prevents page accesses from being
+ * double counted.
+ * @record_dirty_log: Record the page as dirty in the dirty bitmap if
+ * appropriate for the change being made. Should be set
+ * unless performing certain dirty logging operations.
+ * Leaving record_dirty_log unset in that case prevents page
+ * writes from being double counted.
+ */
static inline void __tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
u64 new_spte, bool record_acc_track,
bool record_dirty_log)
{
- u64 *root_pt = tdp_iter_root_pt(iter);
+ tdp_ptep_t root_pt = tdp_iter_root_pt(iter);
struct kvm_mmu_page *root = sptep_to_sp(root_pt);
int as_id = kvm_mmu_page_as_id(root);
- WRITE_ONCE(*iter->sptep, new_spte);
+ lockdep_assert_held_write(&kvm->mmu_lock);
+
+ /*
+ * No thread should be using this function to set SPTEs to the
+ * temporary removed SPTE value.
+ * If operating under the MMU lock in read mode, tdp_mmu_set_spte_atomic
+ * should be used. If operating under the MMU lock in write mode, the
+ * use of the removed SPTE should not be necessary.
+ */
+ WARN_ON(iter->old_spte == REMOVED_SPTE);
+
+ WRITE_ONCE(*rcu_dereference(iter->sptep), new_spte);
__handle_changed_spte(kvm, as_id, iter->gfn, iter->old_spte, new_spte,
- iter->level);
+ iter->level, false);
if (record_acc_track)
handle_changed_spte_acc_track(iter->old_spte, new_spte,
iter->level);
@@ -413,27 +616,46 @@ static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm,
_mmu->shadow_root_level, _start, _end)
/*
- * Flush the TLB if the process should drop kvm->mmu_lock.
- * Return whether the caller still needs to flush the tlb.
+ * Yield if the MMU lock is contended or this thread needs to return control
+ * to the scheduler.
+ *
+ * If this function should yield and flush is set, it will perform a remote
+ * TLB flush before yielding.
+ *
+ * If this function yields, it will also reset the tdp_iter's walk over the
+ * paging structure and the calling function should skip to the next
+ * iteration to allow the iterator to continue its traversal from the
+ * paging structure root.
+ *
+ * Return true if this function yielded and the iterator's traversal was reset.
+ * Return false if a yield was not needed.
*/
-static bool tdp_mmu_iter_flush_cond_resched(struct kvm *kvm, struct tdp_iter *iter)
+static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm,
+ struct tdp_iter *iter, bool flush)
{
- if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
- kvm_flush_remote_tlbs(kvm);
- cond_resched_lock(&kvm->mmu_lock);
- tdp_iter_refresh_walk(iter);
+ /* Ensure forward progress has been made before yielding. */
+ if (iter->next_last_level_gfn == iter->yielded_gfn)
return false;
- } else {
+
+ if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
+ rcu_read_unlock();
+
+ if (flush)
+ kvm_flush_remote_tlbs(kvm);
+
+ cond_resched_rwlock_write(&kvm->mmu_lock);
+ rcu_read_lock();
+
+ WARN_ON(iter->gfn > iter->next_last_level_gfn);
+
+ tdp_iter_start(iter, iter->pt_path[iter->root_level - 1],
+ iter->root_level, iter->min_level,
+ iter->next_last_level_gfn);
+
return true;
}
-}
-static void tdp_mmu_iter_cond_resched(struct kvm *kvm, struct tdp_iter *iter)
-{
- if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
- cond_resched_lock(&kvm->mmu_lock);
- tdp_iter_refresh_walk(iter);
- }
+ return false;
}
/*
@@ -453,7 +675,15 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
struct tdp_iter iter;
bool flush_needed = false;
+ rcu_read_lock();
+
tdp_root_for_each_pte(iter, root, start, end) {
+ if (can_yield &&
+ tdp_mmu_iter_cond_resched(kvm, &iter, flush_needed)) {
+ flush_needed = false;
+ continue;
+ }
+
if (!is_shadow_present_pte(iter.old_spte))
continue;
@@ -468,12 +698,10 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
continue;
tdp_mmu_set_spte(kvm, &iter, 0);
-
- if (can_yield)
- flush_needed = tdp_mmu_iter_flush_cond_resched(kvm, &iter);
- else
- flush_needed = true;
+ flush_needed = true;
}
+
+ rcu_read_unlock();
return flush_needed;
}
@@ -517,21 +745,18 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, int write,
int ret = 0;
int make_spte_ret = 0;
- if (unlikely(is_noslot_pfn(pfn))) {
+ if (unlikely(is_noslot_pfn(pfn)))
new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL);
- trace_mark_mmio_spte(iter->sptep, iter->gfn, new_spte);
- } else {
+ else
make_spte_ret = make_spte(vcpu, ACC_ALL, iter->level, iter->gfn,
pfn, iter->old_spte, prefault, true,
map_writable, !shadow_accessed_mask,
&new_spte);
- trace_kvm_mmu_set_spte(iter->level, iter->gfn, iter->sptep);
- }
if (new_spte == iter->old_spte)
ret = RET_PF_SPURIOUS;
- else
- tdp_mmu_set_spte(vcpu->kvm, iter, new_spte);
+ else if (!tdp_mmu_set_spte_atomic(vcpu->kvm, iter, new_spte))
+ return RET_PF_RETRY;
/*
* If the page fault was caused by a write but the page is write
@@ -545,10 +770,16 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, int write,
}
/* If a MMIO SPTE is installed, the MMIO will need to be emulated. */
- if (unlikely(is_mmio_spte(new_spte)))
+ if (unlikely(is_mmio_spte(new_spte))) {
+ trace_mark_mmio_spte(rcu_dereference(iter->sptep), iter->gfn,
+ new_spte);
ret = RET_PF_EMULATE;
+ } else
+ trace_kvm_mmu_set_spte(iter->level, iter->gfn,
+ rcu_dereference(iter->sptep));
- trace_kvm_mmu_set_spte(iter->level, iter->gfn, iter->sptep);
+ trace_kvm_mmu_set_spte(iter->level, iter->gfn,
+ rcu_dereference(iter->sptep));
if (!prefault)
vcpu->stat.pf_fixed++;
@@ -586,6 +817,9 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
huge_page_disallowed, &req_level);
trace_kvm_mmu_spte_requested(gpa, level, pfn);
+
+ rcu_read_lock();
+
tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
if (nx_huge_page_workaround_enabled)
disallowed_hugepage_adjust(iter.old_spte, gfn,
@@ -601,49 +835,61 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
*/
if (is_shadow_present_pte(iter.old_spte) &&
is_large_pte(iter.old_spte)) {
- tdp_mmu_set_spte(vcpu->kvm, &iter, 0);
-
- kvm_flush_remote_tlbs_with_address(vcpu->kvm, iter.gfn,
- KVM_PAGES_PER_HPAGE(iter.level));
+ if (!tdp_mmu_zap_spte_atomic(vcpu->kvm, &iter))
+ break;
/*
* The iter must explicitly re-read the spte here
* because the new value informs the !present
* path below.
*/
- iter.old_spte = READ_ONCE(*iter.sptep);
+ iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
}
if (!is_shadow_present_pte(iter.old_spte)) {
sp = alloc_tdp_mmu_page(vcpu, iter.gfn, iter.level);
- list_add(&sp->link, &vcpu->kvm->arch.tdp_mmu_pages);
child_pt = sp->spt;
- clear_page(child_pt);
+
new_spte = make_nonleaf_spte(child_pt,
!shadow_accessed_mask);
- trace_kvm_mmu_get_page(sp, true);
- if (huge_page_disallowed && req_level >= iter.level)
- account_huge_nx_page(vcpu->kvm, sp);
-
- tdp_mmu_set_spte(vcpu->kvm, &iter, new_spte);
+ if (tdp_mmu_set_spte_atomic(vcpu->kvm, &iter,
+ new_spte)) {
+ tdp_mmu_link_page(vcpu->kvm, sp, true,
+ huge_page_disallowed &&
+ req_level >= iter.level);
+
+ trace_kvm_mmu_get_page(sp, true);
+ } else {
+ tdp_mmu_free_sp(sp);
+ break;
+ }
}
}
- if (WARN_ON(iter.level != level))
+ if (iter.level != level) {
+ rcu_read_unlock();
return RET_PF_RETRY;
+ }
ret = tdp_mmu_map_handle_target_level(vcpu, write, map_writable, &iter,
pfn, prefault);
+ rcu_read_unlock();
return ret;
}
-static int kvm_tdp_mmu_handle_hva_range(struct kvm *kvm, unsigned long start,
- unsigned long end, unsigned long data,
- int (*handler)(struct kvm *kvm, struct kvm_memory_slot *slot,
- struct kvm_mmu_page *root, gfn_t start,
- gfn_t end, unsigned long data))
+static __always_inline int
+kvm_tdp_mmu_handle_hva_range(struct kvm *kvm,
+ unsigned long start,
+ unsigned long end,
+ unsigned long data,
+ int (*handler)(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
+ struct kvm_mmu_page *root,
+ gfn_t start,
+ gfn_t end,
+ unsigned long data))
{
struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
@@ -705,6 +951,8 @@ static int age_gfn_range(struct kvm *kvm, struct kvm_memory_slot *slot,
int young = 0;
u64 new_spte = 0;
+ rcu_read_lock();
+
tdp_root_for_each_leaf_pte(iter, root, start, end) {
/*
* If we have a non-accessed entry we don't need to change the
@@ -736,6 +984,8 @@ static int age_gfn_range(struct kvm *kvm, struct kvm_memory_slot *slot,
trace_kvm_age_page(iter.gfn, iter.level, slot, young);
}
+ rcu_read_unlock();
+
return young;
}
@@ -781,6 +1031,8 @@ static int set_tdp_spte(struct kvm *kvm, struct kvm_memory_slot *slot,
u64 new_spte;
int need_flush = 0;
+ rcu_read_lock();
+
WARN_ON(pte_huge(*ptep));
new_pfn = pte_pfn(*ptep);
@@ -809,6 +1061,8 @@ static int set_tdp_spte(struct kvm *kvm, struct kvm_memory_slot *slot,
if (need_flush)
kvm_flush_remote_tlbs_with_address(kvm, gfn, 1);
+ rcu_read_unlock();
+
return 0;
}
@@ -832,21 +1086,27 @@ static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
u64 new_spte;
bool spte_set = false;
+ rcu_read_lock();
+
BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);
for_each_tdp_pte_min_level(iter, root->spt, root->role.level,
min_level, start, end) {
+ if (tdp_mmu_iter_cond_resched(kvm, &iter, false))
+ continue;
+
if (!is_shadow_present_pte(iter.old_spte) ||
- !is_last_spte(iter.old_spte, iter.level))
+ !is_last_spte(iter.old_spte, iter.level) ||
+ !(iter.old_spte & PT_WRITABLE_MASK))
continue;
new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
spte_set = true;
-
- tdp_mmu_iter_cond_resched(kvm, &iter);
}
+
+ rcu_read_unlock();
return spte_set;
}
@@ -888,7 +1148,12 @@ static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
u64 new_spte;
bool spte_set = false;
+ rcu_read_lock();
+
tdp_root_for_each_leaf_pte(iter, root, start, end) {
+ if (tdp_mmu_iter_cond_resched(kvm, &iter, false))
+ continue;
+
if (spte_ad_need_write_protect(iter.old_spte)) {
if (is_writable_pte(iter.old_spte))
new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
@@ -903,9 +1168,9 @@ static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
spte_set = true;
-
- tdp_mmu_iter_cond_resched(kvm, &iter);
}
+
+ rcu_read_unlock();
return spte_set;
}
@@ -947,6 +1212,8 @@ static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
struct tdp_iter iter;
u64 new_spte;
+ rcu_read_lock();
+
tdp_root_for_each_leaf_pte(iter, root, gfn + __ffs(mask),
gfn + BITS_PER_LONG) {
if (!mask)
@@ -956,6 +1223,8 @@ static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
!(mask & (1UL << (iter.gfn - gfn))))
continue;
+ mask &= ~(1UL << (iter.gfn - gfn));
+
if (wrprot || spte_ad_need_write_protect(iter.old_spte)) {
if (is_writable_pte(iter.old_spte))
new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
@@ -969,9 +1238,9 @@ static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
}
tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
-
- mask &= ~(1UL << (iter.gfn - gfn));
}
+
+ rcu_read_unlock();
}
/*
@@ -989,7 +1258,7 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
struct kvm_mmu_page *root;
int root_as_id;
- lockdep_assert_held(&kvm->mmu_lock);
+ lockdep_assert_held_write(&kvm->mmu_lock);
for_each_tdp_mmu_root(kvm, root) {
root_as_id = kvm_mmu_page_as_id(root);
if (root_as_id != slot->as_id)
@@ -1000,81 +1269,43 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
}
/*
- * Set the dirty status of all the SPTEs mapping GFNs in the memslot. This is
- * only used for PML, and so will involve setting the dirty bit on each SPTE.
- * Returns true if an SPTE has been changed and the TLBs need to be flushed.
- */
-static bool set_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
- gfn_t start, gfn_t end)
-{
- struct tdp_iter iter;
- u64 new_spte;
- bool spte_set = false;
-
- tdp_root_for_each_pte(iter, root, start, end) {
- if (!is_shadow_present_pte(iter.old_spte))
- continue;
-
- new_spte = iter.old_spte | shadow_dirty_mask;
-
- tdp_mmu_set_spte(kvm, &iter, new_spte);
- spte_set = true;
-
- tdp_mmu_iter_cond_resched(kvm, &iter);
- }
-
- return spte_set;
-}
-
-/*
- * Set the dirty status of all the SPTEs mapping GFNs in the memslot. This is
- * only used for PML, and so will involve setting the dirty bit on each SPTE.
- * Returns true if an SPTE has been changed and the TLBs need to be flushed.
- */
-bool kvm_tdp_mmu_slot_set_dirty(struct kvm *kvm, struct kvm_memory_slot *slot)
-{
- struct kvm_mmu_page *root;
- int root_as_id;
- bool spte_set = false;
-
- for_each_tdp_mmu_root_yield_safe(kvm, root) {
- root_as_id = kvm_mmu_page_as_id(root);
- if (root_as_id != slot->as_id)
- continue;
-
- spte_set |= set_dirty_gfn_range(kvm, root, slot->base_gfn,
- slot->base_gfn + slot->npages);
- }
- return spte_set;
-}
-
-/*
- * Clear non-leaf entries (and free associated page tables) which could
- * be replaced by large mappings, for GFNs within the slot.
+ * Clear leaf entries which could be replaced by large mappings, for
+ * GFNs within the slot.
*/
static void zap_collapsible_spte_range(struct kvm *kvm,
struct kvm_mmu_page *root,
- gfn_t start, gfn_t end)
+ struct kvm_memory_slot *slot)
{
+ gfn_t start = slot->base_gfn;
+ gfn_t end = start + slot->npages;
struct tdp_iter iter;
kvm_pfn_t pfn;
bool spte_set = false;
+ rcu_read_lock();
+
tdp_root_for_each_pte(iter, root, start, end) {
+ if (tdp_mmu_iter_cond_resched(kvm, &iter, spte_set)) {
+ spte_set = false;
+ continue;
+ }
+
if (!is_shadow_present_pte(iter.old_spte) ||
- is_last_spte(iter.old_spte, iter.level))
+ !is_last_spte(iter.old_spte, iter.level))
continue;
pfn = spte_to_pfn(iter.old_spte);
if (kvm_is_reserved_pfn(pfn) ||
- !PageTransCompoundMap(pfn_to_page(pfn)))
+ iter.level >= kvm_mmu_max_mapping_level(kvm, slot, iter.gfn,
+ pfn, PG_LEVEL_NUM))
continue;
tdp_mmu_set_spte(kvm, &iter, 0);
- spte_set = tdp_mmu_iter_flush_cond_resched(kvm, &iter);
+ spte_set = true;
}
+ rcu_read_unlock();
if (spte_set)
kvm_flush_remote_tlbs(kvm);
}
@@ -1084,7 +1315,7 @@ static void zap_collapsible_spte_range(struct kvm *kvm,
* be replaced by large mappings, for GFNs within the slot.
*/
void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
- const struct kvm_memory_slot *slot)
+ struct kvm_memory_slot *slot)
{
struct kvm_mmu_page *root;
int root_as_id;
@@ -1094,8 +1325,7 @@ void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
if (root_as_id != slot->as_id)
continue;
- zap_collapsible_spte_range(kvm, root, slot->base_gfn,
- slot->base_gfn + slot->npages);
+ zap_collapsible_spte_range(kvm, root, slot);
}
}
@@ -1111,6 +1341,8 @@ static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
u64 new_spte;
bool spte_set = false;
+ rcu_read_lock();
+
tdp_root_for_each_leaf_pte(iter, root, gfn, gfn + 1) {
if (!is_writable_pte(iter.old_spte))
break;
@@ -1122,6 +1354,8 @@ static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
spte_set = true;
}
+ rcu_read_unlock();
+
return spte_set;
}
@@ -1137,7 +1371,7 @@ bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
int root_as_id;
bool spte_set = false;
- lockdep_assert_held(&kvm->mmu_lock);
+ lockdep_assert_held_write(&kvm->mmu_lock);
for_each_tdp_mmu_root(kvm, root) {
root_as_id = kvm_mmu_page_as_id(root);
if (root_as_id != slot->as_id)
@@ -1162,10 +1396,14 @@ int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
*root_level = vcpu->arch.mmu->shadow_root_level;
+ rcu_read_lock();
+
tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
leaf = iter.level;
sptes[leaf] = iter.old_spte;
}
+ rcu_read_unlock();
+
return leaf;
}
diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h
index cbbdbadd1526..3b761c111bff 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.h
+++ b/arch/x86/kvm/mmu/tdp_mmu.h
@@ -5,10 +5,6 @@
#include <linux/kvm_host.h>
-void kvm_mmu_init_tdp_mmu(struct kvm *kvm);
-void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm);
-
-bool is_tdp_mmu_root(struct kvm *kvm, hpa_t root);
hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu);
void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root);
@@ -37,9 +33,8 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
gfn_t gfn, unsigned long mask,
bool wrprot);
-bool kvm_tdp_mmu_slot_set_dirty(struct kvm *kvm, struct kvm_memory_slot *slot);
void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
- const struct kvm_memory_slot *slot);
+ struct kvm_memory_slot *slot);
bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
struct kvm_memory_slot *slot, gfn_t gfn);
@@ -47,4 +42,32 @@ bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
int *root_level);
+#ifdef CONFIG_X86_64
+void kvm_mmu_init_tdp_mmu(struct kvm *kvm);
+void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm);
+static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return kvm->arch.tdp_mmu_enabled; }
+static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return sp->tdp_mmu_page; }
+#else
+static inline void kvm_mmu_init_tdp_mmu(struct kvm *kvm) {}
+static inline void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) {}
+static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return false; }
+static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return false; }
+#endif
+
+static inline bool is_tdp_mmu_root(struct kvm *kvm, hpa_t hpa)
+{
+ struct kvm_mmu_page *sp;
+
+ if (!is_tdp_mmu_enabled(kvm))
+ return false;
+ if (WARN_ON(!VALID_PAGE(hpa)))
+ return false;
+
+ sp = to_shadow_page(hpa);
+ if (WARN_ON(!sp))
+ return false;
+
+ return is_tdp_mmu_page(sp) && sp->root_count;
+}
+
#endif /* __KVM_X86_MMU_TDP_MMU_H */
diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c
index f472fdb6ae7e..a8502e02f479 100644
--- a/arch/x86/kvm/mtrr.c
+++ b/arch/x86/kvm/mtrr.c
@@ -75,7 +75,7 @@ bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
/* variable MTRRs */
WARN_ON(!(msr >= 0x200 && msr < 0x200 + 2 * KVM_NR_VAR_MTRR));
- mask = (~0ULL) << cpuid_maxphyaddr(vcpu);
+ mask = kvm_vcpu_reserved_gpa_bits_raw(vcpu);
if ((msr & 1) == 0) {
/* MTRR base */
if (!valid_mtrr_type(data & 0xff))
@@ -351,14 +351,14 @@ static void set_var_mtrr_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
if (var_mtrr_range_is_valid(cur))
list_del(&mtrr_state->var_ranges[index].node);
- /* Extend the mask with all 1 bits to the left, since those
- * bits must implicitly be 0. The bits are then cleared
- * when reading them.
+ /*
+ * Set all illegal GPA bits in the mask, since those bits must
+ * implicitly be 0. The bits are then cleared when reading them.
*/
if (!is_mtrr_mask)
cur->base = data;
else
- cur->mask = data | (-1LL << cpuid_maxphyaddr(vcpu));
+ cur->mask = data | kvm_vcpu_reserved_gpa_bits_raw(vcpu);
/* add it to the list if it's enabled. */
if (var_mtrr_range_is_valid(cur)) {
@@ -426,7 +426,7 @@ int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
else
*pdata = vcpu->arch.mtrr_state.var_ranges[index].mask;
- *pdata &= (1ULL << cpuid_maxphyaddr(vcpu)) - 1;
+ *pdata &= ~kvm_vcpu_reserved_gpa_bits_raw(vcpu);
}
return 0;
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index 67741d2a0308..827886c12c16 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -373,7 +373,7 @@ int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
return 1;
if (!(kvm_read_cr4(vcpu) & X86_CR4_PCE) &&
- (kvm_x86_ops.get_cpl(vcpu) != 0) &&
+ (static_call(kvm_x86_get_cpl)(vcpu) != 0) &&
(kvm_read_cr0(vcpu) & X86_CR0_PE))
return 1;
@@ -383,8 +383,11 @@ int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
{
- if (lapic_in_kernel(vcpu))
+ if (lapic_in_kernel(vcpu)) {
+ if (kvm_x86_ops.pmu_ops->deliver_pmi)
+ kvm_x86_ops.pmu_ops->deliver_pmi(vcpu);
kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC);
+ }
}
bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
@@ -473,6 +476,9 @@ void kvm_pmu_cleanup(struct kvm_vcpu *vcpu)
pmc_stop_counter(pmc);
}
+ if (kvm_x86_ops.pmu_ops->cleanup)
+ kvm_x86_ops.pmu_ops->cleanup(vcpu);
+
bitmap_zero(pmu->pmc_in_use, X86_PMC_IDX_MAX);
}
diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
index 067fef51760c..7b30bc967af3 100644
--- a/arch/x86/kvm/pmu.h
+++ b/arch/x86/kvm/pmu.h
@@ -39,6 +39,8 @@ struct kvm_pmu_ops {
void (*refresh)(struct kvm_vcpu *vcpu);
void (*init)(struct kvm_vcpu *vcpu);
void (*reset)(struct kvm_vcpu *vcpu);
+ void (*deliver_pmi)(struct kvm_vcpu *vcpu);
+ void (*cleanup)(struct kvm_vcpu *vcpu);
};
static inline u64 pmc_bitmask(struct kvm_pmc *pmc)
diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c
index 0ef84d57b72e..78bdcfac4e40 100644
--- a/arch/x86/kvm/svm/avic.c
+++ b/arch/x86/kvm/svm/avic.c
@@ -298,6 +298,23 @@ static int avic_init_backing_page(struct kvm_vcpu *vcpu)
return 0;
}
+static void avic_kick_target_vcpus(struct kvm *kvm, struct kvm_lapic *source,
+ u32 icrl, u32 icrh)
+{
+ struct kvm_vcpu *vcpu;
+ int i;
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ bool m = kvm_apic_match_dest(vcpu, source,
+ icrl & APIC_SHORT_MASK,
+ GET_APIC_DEST_FIELD(icrh),
+ icrl & APIC_DEST_MASK);
+
+ if (m && !avic_vcpu_is_running(vcpu))
+ kvm_vcpu_wake_up(vcpu);
+ }
+}
+
int avic_incomplete_ipi_interception(struct vcpu_svm *svm)
{
u32 icrh = svm->vmcb->control.exit_info_1 >> 32;
@@ -324,28 +341,14 @@ int avic_incomplete_ipi_interception(struct vcpu_svm *svm)
kvm_lapic_reg_write(apic, APIC_ICR2, icrh);
kvm_lapic_reg_write(apic, APIC_ICR, icrl);
break;
- case AVIC_IPI_FAILURE_TARGET_NOT_RUNNING: {
- int i;
- struct kvm_vcpu *vcpu;
- struct kvm *kvm = svm->vcpu.kvm;
- struct kvm_lapic *apic = svm->vcpu.arch.apic;
-
+ case AVIC_IPI_FAILURE_TARGET_NOT_RUNNING:
/*
* At this point, we expect that the AVIC HW has already
* set the appropriate IRR bits on the valid target
* vcpus. So, we just need to kick the appropriate vcpu.
*/
- kvm_for_each_vcpu(i, vcpu, kvm) {
- bool m = kvm_apic_match_dest(vcpu, apic,
- icrl & APIC_SHORT_MASK,
- GET_APIC_DEST_FIELD(icrh),
- icrl & APIC_DEST_MASK);
-
- if (m && !avic_vcpu_is_running(vcpu))
- kvm_vcpu_wake_up(vcpu);
- }
+ avic_kick_target_vcpus(svm->vcpu.kvm, apic, icrl, icrh);
break;
- }
case AVIC_IPI_FAILURE_INVALID_TARGET:
WARN_ONCE(1, "Invalid IPI target: index=%u, vcpu=%d, icr=%#0x:%#0x\n",
index, svm->vcpu.vcpu_id, icrh, icrl);
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index cb4c6ee10029..35891d9a1099 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -51,6 +51,23 @@ static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
nested_svm_vmexit(svm);
}
+static void svm_inject_page_fault_nested(struct kvm_vcpu *vcpu, struct x86_exception *fault)
+{
+ struct vcpu_svm *svm = to_svm(vcpu);
+ WARN_ON(!is_guest_mode(vcpu));
+
+ if (vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_EXCEPTION_OFFSET + PF_VECTOR) &&
+ !svm->nested.nested_run_pending) {
+ svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + PF_VECTOR;
+ svm->vmcb->control.exit_code_hi = 0;
+ svm->vmcb->control.exit_info_1 = fault->error_code;
+ svm->vmcb->control.exit_info_2 = fault->address;
+ nested_svm_vmexit(svm);
+ } else {
+ kvm_inject_page_fault(vcpu, fault);
+ }
+}
+
static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
{
struct vcpu_svm *svm = to_svm(vcpu);
@@ -58,7 +75,7 @@ static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
u64 pdpte;
int ret;
- ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(__sme_clr(cr3)), &pdpte,
+ ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(cr3), &pdpte,
offset_in_page(cr3) + index * 8, 8);
if (ret)
return 0;
@@ -200,6 +217,9 @@ static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
+ if (WARN_ON(!is_guest_mode(vcpu)))
+ return true;
+
if (!nested_svm_vmrun_msrpm(svm)) {
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu->run->internal.suberror =
@@ -228,6 +248,7 @@ static bool nested_vmcb_check_controls(struct vmcb_control_area *control)
static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
{
+ struct kvm_vcpu *vcpu = &svm->vcpu;
bool vmcb12_lma;
if ((vmcb12->save.efer & EFER_SVME) == 0)
@@ -241,18 +262,10 @@ static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
vmcb12_lma = (vmcb12->save.efer & EFER_LME) && (vmcb12->save.cr0 & X86_CR0_PG);
- if (!vmcb12_lma) {
- if (vmcb12->save.cr4 & X86_CR4_PAE) {
- if (vmcb12->save.cr3 & MSR_CR3_LEGACY_PAE_RESERVED_MASK)
- return false;
- } else {
- if (vmcb12->save.cr3 & MSR_CR3_LEGACY_RESERVED_MASK)
- return false;
- }
- } else {
+ if (vmcb12_lma) {
if (!(vmcb12->save.cr4 & X86_CR4_PAE) ||
!(vmcb12->save.cr0 & X86_CR0_PE) ||
- (vmcb12->save.cr3 & MSR_CR3_LONG_MBZ_MASK))
+ kvm_vcpu_is_illegal_gpa(vcpu, vmcb12->save.cr3))
return false;
}
if (!kvm_is_valid_cr4(&svm->vcpu, vmcb12->save.cr4))
@@ -349,7 +362,7 @@ static inline bool nested_npt_enabled(struct vcpu_svm *svm)
static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3,
bool nested_npt)
{
- if (cr3 & rsvd_bits(cpuid_maxphyaddr(vcpu), 63))
+ if (kvm_vcpu_is_illegal_gpa(vcpu, cr3))
return -EINVAL;
if (!nested_npt && is_pae_paging(vcpu) &&
@@ -396,7 +409,7 @@ static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
svm->vmcb->save.rsp = vmcb12->save.rsp;
svm->vmcb->save.rip = vmcb12->save.rip;
svm->vmcb->save.dr7 = vmcb12->save.dr7 | DR7_FIXED_1;
- svm->vcpu.arch.dr6 = vmcb12->save.dr6 | DR6_FIXED_1 | DR6_RTM;
+ svm->vcpu.arch.dr6 = vmcb12->save.dr6 | DR6_ACTIVE_LOW;
svm->vmcb->save.cpl = vmcb12->save.cpl;
}
@@ -440,16 +453,33 @@ int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb12_gpa,
{
int ret;
+ trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb12_gpa,
+ vmcb12->save.rip,
+ vmcb12->control.int_ctl,
+ vmcb12->control.event_inj,
+ vmcb12->control.nested_ctl);
+
+ trace_kvm_nested_intercepts(vmcb12->control.intercepts[INTERCEPT_CR] & 0xffff,
+ vmcb12->control.intercepts[INTERCEPT_CR] >> 16,
+ vmcb12->control.intercepts[INTERCEPT_EXCEPTION],
+ vmcb12->control.intercepts[INTERCEPT_WORD3],
+ vmcb12->control.intercepts[INTERCEPT_WORD4],
+ vmcb12->control.intercepts[INTERCEPT_WORD5]);
+
+
svm->nested.vmcb12_gpa = vmcb12_gpa;
load_nested_vmcb_control(svm, &vmcb12->control);
- nested_prepare_vmcb_save(svm, vmcb12);
nested_prepare_vmcb_control(svm);
+ nested_prepare_vmcb_save(svm, vmcb12);
ret = nested_svm_load_cr3(&svm->vcpu, vmcb12->save.cr3,
nested_npt_enabled(svm));
if (ret)
return ret;
+ if (!npt_enabled)
+ svm->vcpu.arch.mmu->inject_page_fault = svm_inject_page_fault_nested;
+
svm_set_gif(svm, true);
return 0;
@@ -493,18 +523,6 @@ int nested_svm_vmrun(struct vcpu_svm *svm)
goto out;
}
- trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb12_gpa,
- vmcb12->save.rip,
- vmcb12->control.int_ctl,
- vmcb12->control.event_inj,
- vmcb12->control.nested_ctl);
-
- trace_kvm_nested_intercepts(vmcb12->control.intercepts[INTERCEPT_CR] & 0xffff,
- vmcb12->control.intercepts[INTERCEPT_CR] >> 16,
- vmcb12->control.intercepts[INTERCEPT_EXCEPTION],
- vmcb12->control.intercepts[INTERCEPT_WORD3],
- vmcb12->control.intercepts[INTERCEPT_WORD4],
- vmcb12->control.intercepts[INTERCEPT_WORD5]);
/* Clear internal status */
kvm_clear_exception_queue(&svm->vcpu);
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index c8ffdbc81709..874ea309279f 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -22,6 +22,7 @@
#include "x86.h"
#include "svm.h"
+#include "svm_ops.h"
#include "cpuid.h"
#include "trace.h"
@@ -342,6 +343,8 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
unsigned long first, last;
int ret;
+ lockdep_assert_held(&kvm->lock);
+
if (ulen == 0 || uaddr + ulen < uaddr)
return ERR_PTR(-EINVAL);
@@ -1039,6 +1042,74 @@ e_unpin_memory:
return ret;
}
+static int sev_get_attestation_report(struct kvm *kvm, struct kvm_sev_cmd *argp)
+{
+ void __user *report = (void __user *)(uintptr_t)argp->data;
+ struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ struct sev_data_attestation_report *data;
+ struct kvm_sev_attestation_report params;
+ void __user *p;
+ void *blob = NULL;
+ int ret;
+
+ if (!sev_guest(kvm))
+ return -ENOTTY;
+
+ if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
+ return -EFAULT;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
+ if (!data)
+ return -ENOMEM;
+
+ /* User wants to query the blob length */
+ if (!params.len)
+ goto cmd;
+
+ p = (void __user *)(uintptr_t)params.uaddr;
+ if (p) {
+ if (params.len > SEV_FW_BLOB_MAX_SIZE) {
+ ret = -EINVAL;
+ goto e_free;
+ }
+
+ ret = -ENOMEM;
+ blob = kmalloc(params.len, GFP_KERNEL);
+ if (!blob)
+ goto e_free;
+
+ data->address = __psp_pa(blob);
+ data->len = params.len;
+ memcpy(data->mnonce, params.mnonce, sizeof(params.mnonce));
+ }
+cmd:
+ data->handle = sev->handle;
+ ret = sev_issue_cmd(kvm, SEV_CMD_ATTESTATION_REPORT, data, &argp->error);
+ /*
+ * If we query the session length, FW responded with expected data.
+ */
+ if (!params.len)
+ goto done;
+
+ if (ret)
+ goto e_free_blob;
+
+ if (blob) {
+ if (copy_to_user(p, blob, params.len))
+ ret = -EFAULT;
+ }
+
+done:
+ params.len = data->len;
+ if (copy_to_user(report, &params, sizeof(params)))
+ ret = -EFAULT;
+e_free_blob:
+ kfree(blob);
+e_free:
+ kfree(data);
+ return ret;
+}
+
int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
{
struct kvm_sev_cmd sev_cmd;
@@ -1089,6 +1160,9 @@ int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
case KVM_SEV_LAUNCH_SECRET:
r = sev_launch_secret(kvm, &sev_cmd);
break;
+ case KVM_SEV_GET_ATTESTATION_REPORT:
+ r = sev_get_attestation_report(kvm, &sev_cmd);
+ break;
default:
r = -EINVAL;
goto out;
@@ -1119,12 +1193,20 @@ int svm_register_enc_region(struct kvm *kvm,
if (!region)
return -ENOMEM;
+ mutex_lock(&kvm->lock);
region->pages = sev_pin_memory(kvm, range->addr, range->size, &region->npages, 1);
if (IS_ERR(region->pages)) {
ret = PTR_ERR(region->pages);
+ mutex_unlock(&kvm->lock);
goto e_free;
}
+ region->uaddr = range->addr;
+ region->size = range->size;
+
+ list_add_tail(&region->list, &sev->regions_list);
+ mutex_unlock(&kvm->lock);
+
/*
* The guest may change the memory encryption attribute from C=0 -> C=1
* or vice versa for this memory range. Lets make sure caches are
@@ -1133,13 +1215,6 @@ int svm_register_enc_region(struct kvm *kvm,
*/
sev_clflush_pages(region->pages, region->npages);
- region->uaddr = range->addr;
- region->size = range->size;
-
- mutex_lock(&kvm->lock);
- list_add_tail(&region->list, &sev->regions_list);
- mutex_unlock(&kvm->lock);
-
return ret;
e_free:
@@ -1415,16 +1490,13 @@ static void sev_es_sync_to_ghcb(struct vcpu_svm *svm)
* to be returned:
* GPRs RAX, RBX, RCX, RDX
*
- * Copy their values to the GHCB if they are dirty.
+ * Copy their values, even if they may not have been written during the
+ * VM-Exit. It's the guest's responsibility to not consume random data.
*/
- if (kvm_register_is_dirty(vcpu, VCPU_REGS_RAX))
- ghcb_set_rax(ghcb, vcpu->arch.regs[VCPU_REGS_RAX]);
- if (kvm_register_is_dirty(vcpu, VCPU_REGS_RBX))
- ghcb_set_rbx(ghcb, vcpu->arch.regs[VCPU_REGS_RBX]);
- if (kvm_register_is_dirty(vcpu, VCPU_REGS_RCX))
- ghcb_set_rcx(ghcb, vcpu->arch.regs[VCPU_REGS_RCX]);
- if (kvm_register_is_dirty(vcpu, VCPU_REGS_RDX))
- ghcb_set_rdx(ghcb, vcpu->arch.regs[VCPU_REGS_RDX]);
+ ghcb_set_rax(ghcb, vcpu->arch.regs[VCPU_REGS_RAX]);
+ ghcb_set_rbx(ghcb, vcpu->arch.regs[VCPU_REGS_RBX]);
+ ghcb_set_rcx(ghcb, vcpu->arch.regs[VCPU_REGS_RCX]);
+ ghcb_set_rdx(ghcb, vcpu->arch.regs[VCPU_REGS_RDX]);
}
static void sev_es_sync_from_ghcb(struct vcpu_svm *svm)
@@ -1994,29 +2066,17 @@ void sev_es_create_vcpu(struct vcpu_svm *svm)
sev_enc_bit));
}
-void sev_es_vcpu_load(struct vcpu_svm *svm, int cpu)
+void sev_es_prepare_guest_switch(struct vcpu_svm *svm, unsigned int cpu)
{
struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
struct vmcb_save_area *hostsa;
- unsigned int i;
/*
* As an SEV-ES guest, hardware will restore the host state on VMEXIT,
* of which one step is to perform a VMLOAD. Since hardware does not
* perform a VMSAVE on VMRUN, the host savearea must be updated.
*/
- asm volatile(__ex("vmsave %0") : : "a" (__sme_page_pa(sd->save_area)) : "memory");
-
- /*
- * Certain MSRs are restored on VMEXIT, only save ones that aren't
- * restored.
- */
- for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) {
- if (host_save_user_msrs[i].sev_es_restored)
- continue;
-
- rdmsrl(host_save_user_msrs[i].index, svm->host_user_msrs[i]);
- }
+ vmsave(__sme_page_pa(sd->save_area));
/* XCR0 is restored on VMEXIT, save the current host value */
hostsa = (struct vmcb_save_area *)(page_address(sd->save_area) + 0x400);
@@ -2029,22 +2089,6 @@ void sev_es_vcpu_load(struct vcpu_svm *svm, int cpu)
hostsa->xss = host_xss;
}
-void sev_es_vcpu_put(struct vcpu_svm *svm)
-{
- unsigned int i;
-
- /*
- * Certain MSRs are restored on VMEXIT and were saved with vmsave in
- * sev_es_vcpu_load() above. Only restore ones that weren't.
- */
- for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) {
- if (host_save_user_msrs[i].sev_es_restored)
- continue;
-
- wrmsrl(host_save_user_msrs[i].index, svm->host_user_msrs[i]);
- }
-}
-
void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
{
struct vcpu_svm *svm = to_svm(vcpu);
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 7ef171790d02..baee91c1e936 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -41,6 +41,7 @@
#include "trace.h"
#include "svm.h"
+#include "svm_ops.h"
#define __ex(x) __kvm_handle_fault_on_reboot(x)
@@ -200,9 +201,9 @@ module_param(sev_es, int, 0444);
bool __read_mostly dump_invalid_vmcb;
module_param(dump_invalid_vmcb, bool, 0644);
-static u8 rsm_ins_bytes[] = "\x0f\xaa";
+static bool svm_gp_erratum_intercept = true;
-static void svm_complete_interrupts(struct vcpu_svm *svm);
+static u8 rsm_ins_bytes[] = "\x0f\xaa";
static unsigned long iopm_base;
@@ -246,21 +247,6 @@ u32 svm_msrpm_offset(u32 msr)
#define MAX_INST_SIZE 15
-static inline void clgi(void)
-{
- asm volatile (__ex("clgi"));
-}
-
-static inline void stgi(void)
-{
- asm volatile (__ex("stgi"));
-}
-
-static inline void invlpga(unsigned long addr, u32 asid)
-{
- asm volatile (__ex("invlpga %1, %0") : : "c"(asid), "a"(addr));
-}
-
static int get_max_npt_level(void)
{
#ifdef CONFIG_X86_64
@@ -288,6 +274,9 @@ int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
if (!(efer & EFER_SVME)) {
svm_leave_nested(svm);
svm_set_gif(svm, true);
+ /* #GP intercept is still needed for vmware backdoor */
+ if (!enable_vmware_backdoor)
+ clr_exception_intercept(svm, GP_VECTOR);
/*
* Free the nested guest state, unless we are in SMM.
@@ -304,6 +293,9 @@ int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
vcpu->arch.efer = old_efer;
return ret;
}
+
+ if (svm_gp_erratum_intercept)
+ set_exception_intercept(svm, GP_VECTOR);
}
}
@@ -454,6 +446,11 @@ static int has_svm(void)
return 0;
}
+ if (sev_active()) {
+ pr_info("KVM is unsupported when running as an SEV guest\n");
+ return 0;
+ }
+
return 1;
}
@@ -920,15 +917,15 @@ static __init void svm_set_cpu_caps(void)
if (npt_enabled)
kvm_cpu_cap_set(X86_FEATURE_NPT);
+
+ /* Nested VM can receive #VMEXIT instead of triggering #GP */
+ kvm_cpu_cap_set(X86_FEATURE_SVME_ADDR_CHK);
}
/* CPUID 0x80000008 */
if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) ||
boot_cpu_has(X86_FEATURE_AMD_SSBD))
kvm_cpu_cap_set(X86_FEATURE_VIRT_SSBD);
-
- /* Enable INVPCID feature */
- kvm_cpu_cap_check_and_set(X86_FEATURE_INVPCID);
}
static __init int svm_hardware_setup(void)
@@ -1027,6 +1024,9 @@ static __init int svm_hardware_setup(void)
}
}
+ if (boot_cpu_has(X86_FEATURE_SVME_ADDR_CHK))
+ svm_gp_erratum_intercept = false;
+
if (vgif) {
if (!boot_cpu_has(X86_FEATURE_VGIF))
vgif = false;
@@ -1100,12 +1100,12 @@ static u64 svm_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
static void svm_check_invpcid(struct vcpu_svm *svm)
{
/*
- * Intercept INVPCID instruction only if shadow page table is
- * enabled. Interception is not required with nested page table
- * enabled.
+ * Intercept INVPCID if shadow paging is enabled to sync/free shadow
+ * roots, or if INVPCID is disabled in the guest to inject #UD.
*/
if (kvm_cpu_cap_has(X86_FEATURE_INVPCID)) {
- if (!npt_enabled)
+ if (!npt_enabled ||
+ !guest_cpuid_has(&svm->vcpu, X86_FEATURE_INVPCID))
svm_set_intercept(svm, INTERCEPT_INVPCID);
else
svm_clr_intercept(svm, INTERCEPT_INVPCID);
@@ -1200,9 +1200,10 @@ static void init_vmcb(struct vcpu_svm *svm)
init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
+ svm_set_cr4(&svm->vcpu, 0);
svm_set_efer(&svm->vcpu, 0);
save->dr6 = 0xffff0ff0;
- kvm_set_rflags(&svm->vcpu, 2);
+ kvm_set_rflags(&svm->vcpu, X86_EFLAGS_FIXED);
save->rip = 0x0000fff0;
svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;
@@ -1361,6 +1362,7 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu)
svm->vmsa = page_address(vmsa_page);
svm->asid_generation = 0;
+ svm->guest_state_loaded = false;
init_vmcb(svm);
svm_init_osvw(vcpu);
@@ -1408,30 +1410,31 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu)
__free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
}
-static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
- struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
- int i;
+ struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu);
+ unsigned int i;
- if (unlikely(cpu != vcpu->cpu)) {
- svm->asid_generation = 0;
- vmcb_mark_all_dirty(svm->vmcb);
- }
+ if (svm->guest_state_loaded)
+ return;
+
+ /*
+ * Certain MSRs are restored on VMEXIT (sev-es), or vmload of host save
+ * area (non-sev-es). Save ones that aren't so we can restore them
+ * individually later.
+ */
+ for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
+ rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
+ /*
+ * Save additional host state that will be restored on VMEXIT (sev-es)
+ * or subsequent vmload of host save area.
+ */
if (sev_es_guest(svm->vcpu.kvm)) {
- sev_es_vcpu_load(svm, cpu);
+ sev_es_prepare_guest_switch(svm, vcpu->cpu);
} else {
-#ifdef CONFIG_X86_64
- rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host.gs_base);
-#endif
- savesegment(fs, svm->host.fs);
- savesegment(gs, svm->host.gs);
- svm->host.ldt = kvm_read_ldt();
-
- for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
- rdmsrl(host_save_user_msrs[i].index,
- svm->host_user_msrs[i]);
+ vmsave(__sme_page_pa(sd->save_area));
}
if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
@@ -1441,10 +1444,42 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
wrmsrl(MSR_AMD64_TSC_RATIO, tsc_ratio);
}
}
+
/* This assumes that the kernel never uses MSR_TSC_AUX */
if (static_cpu_has(X86_FEATURE_RDTSCP))
wrmsrl(MSR_TSC_AUX, svm->tsc_aux);
+ svm->guest_state_loaded = true;
+}
+
+static void svm_prepare_host_switch(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_svm *svm = to_svm(vcpu);
+ unsigned int i;
+
+ if (!svm->guest_state_loaded)
+ return;
+
+ /*
+ * Certain MSRs are restored on VMEXIT (sev-es), or vmload of host save
+ * area (non-sev-es). Restore the ones that weren't.
+ */
+ for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
+ wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
+
+ svm->guest_state_loaded = false;
+}
+
+static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+{
+ struct vcpu_svm *svm = to_svm(vcpu);
+ struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
+
+ if (unlikely(cpu != vcpu->cpu)) {
+ svm->asid_generation = 0;
+ vmcb_mark_all_dirty(svm->vmcb);
+ }
+
if (sd->current_vmcb != svm->vmcb) {
sd->current_vmcb = svm->vmcb;
indirect_branch_prediction_barrier();
@@ -1454,30 +1489,10 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
static void svm_vcpu_put(struct kvm_vcpu *vcpu)
{
- struct vcpu_svm *svm = to_svm(vcpu);
- int i;
-
avic_vcpu_put(vcpu);
+ svm_prepare_host_switch(vcpu);
++vcpu->stat.host_state_reload;
- if (sev_es_guest(svm->vcpu.kvm)) {
- sev_es_vcpu_put(svm);
- } else {
- kvm_load_ldt(svm->host.ldt);
-#ifdef CONFIG_X86_64
- loadsegment(fs, svm->host.fs);
- wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gsbase);
- load_gs_index(svm->host.gs);
-#else
-#ifdef CONFIG_X86_32_LAZY_GS
- loadsegment(gs, svm->host.gs);
-#endif
-#endif
-
- for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
- wrmsrl(host_save_user_msrs[i].index,
- svm->host_user_msrs[i]);
- }
}
static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
@@ -1810,7 +1825,7 @@ static void svm_set_segment(struct kvm_vcpu *vcpu,
vmcb_mark_dirty(svm->vmcb, VMCB_SEG);
}
-static void update_exception_bitmap(struct kvm_vcpu *vcpu)
+static void svm_update_exception_bitmap(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
@@ -1860,7 +1875,7 @@ static void svm_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
get_debugreg(vcpu->arch.db[2], 2);
get_debugreg(vcpu->arch.db[3], 3);
/*
- * We cannot reset svm->vmcb->save.dr6 to DR6_FIXED_1|DR6_RTM here,
+ * We cannot reset svm->vmcb->save.dr6 to DR6_ACTIVE_LOW here,
* because db_interception might need it. We can do it before vmentry.
*/
vcpu->arch.dr6 = svm->vmcb->save.dr6;
@@ -1911,7 +1926,7 @@ static int db_interception(struct vcpu_svm *svm)
if (!(svm->vcpu.guest_debug &
(KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
!svm->nmi_singlestep) {
- u32 payload = (svm->vmcb->save.dr6 ^ DR6_RTM) & ~DR6_FIXED_1;
+ u32 payload = svm->vmcb->save.dr6 ^ DR6_ACTIVE_LOW;
kvm_queue_exception_p(&svm->vcpu, DB_VECTOR, payload);
return 1;
}
@@ -1957,24 +1972,6 @@ static int ac_interception(struct vcpu_svm *svm)
return 1;
}
-static int gp_interception(struct vcpu_svm *svm)
-{
- struct kvm_vcpu *vcpu = &svm->vcpu;
- u32 error_code = svm->vmcb->control.exit_info_1;
-
- WARN_ON_ONCE(!enable_vmware_backdoor);
-
- /*
- * VMware backdoor emulation on #GP interception only handles IN{S},
- * OUT{S}, and RDPMC, none of which generate a non-zero error code.
- */
- if (error_code) {
- kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
- return 1;
- }
- return kvm_emulate_instruction(vcpu, EMULTYPE_VMWARE_GP);
-}
-
static bool is_erratum_383(void)
{
int err, i;
@@ -2173,6 +2170,107 @@ static int vmrun_interception(struct vcpu_svm *svm)
return nested_svm_vmrun(svm);
}
+enum {
+ NONE_SVM_INSTR,
+ SVM_INSTR_VMRUN,
+ SVM_INSTR_VMLOAD,
+ SVM_INSTR_VMSAVE,
+};
+
+/* Return NONE_SVM_INSTR if not SVM instrs, otherwise return decode result */
+static int svm_instr_opcode(struct kvm_vcpu *vcpu)
+{
+ struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
+
+ if (ctxt->b != 0x1 || ctxt->opcode_len != 2)
+ return NONE_SVM_INSTR;
+
+ switch (ctxt->modrm) {
+ case 0xd8: /* VMRUN */
+ return SVM_INSTR_VMRUN;
+ case 0xda: /* VMLOAD */
+ return SVM_INSTR_VMLOAD;
+ case 0xdb: /* VMSAVE */
+ return SVM_INSTR_VMSAVE;
+ default:
+ break;
+ }
+
+ return NONE_SVM_INSTR;
+}
+
+static int emulate_svm_instr(struct kvm_vcpu *vcpu, int opcode)
+{
+ const int guest_mode_exit_codes[] = {
+ [SVM_INSTR_VMRUN] = SVM_EXIT_VMRUN,
+ [SVM_INSTR_VMLOAD] = SVM_EXIT_VMLOAD,
+ [SVM_INSTR_VMSAVE] = SVM_EXIT_VMSAVE,
+ };
+ int (*const svm_instr_handlers[])(struct vcpu_svm *svm) = {
+ [SVM_INSTR_VMRUN] = vmrun_interception,
+ [SVM_INSTR_VMLOAD] = vmload_interception,
+ [SVM_INSTR_VMSAVE] = vmsave_interception,
+ };
+ struct vcpu_svm *svm = to_svm(vcpu);
+ int ret;
+
+ if (is_guest_mode(vcpu)) {
+ svm->vmcb->control.exit_code = guest_mode_exit_codes[opcode];
+ svm->vmcb->control.exit_info_1 = 0;
+ svm->vmcb->control.exit_info_2 = 0;
+
+ /* Returns '1' or -errno on failure, '0' on success. */
+ ret = nested_svm_vmexit(svm);
+ if (ret)
+ return ret;
+ return 1;
+ }
+ return svm_instr_handlers[opcode](svm);
+}
+
+/*
+ * #GP handling code. Note that #GP can be triggered under the following two
+ * cases:
+ * 1) SVM VM-related instructions (VMRUN/VMSAVE/VMLOAD) that trigger #GP on
+ * some AMD CPUs when EAX of these instructions are in the reserved memory
+ * regions (e.g. SMM memory on host).
+ * 2) VMware backdoor
+ */
+static int gp_interception(struct vcpu_svm *svm)
+{
+ struct kvm_vcpu *vcpu = &svm->vcpu;
+ u32 error_code = svm->vmcb->control.exit_info_1;
+ int opcode;
+
+ /* Both #GP cases have zero error_code */
+ if (error_code)
+ goto reinject;
+
+ /* Decode the instruction for usage later */
+ if (x86_decode_emulated_instruction(vcpu, 0, NULL, 0) != EMULATION_OK)
+ goto reinject;
+
+ opcode = svm_instr_opcode(vcpu);
+
+ if (opcode == NONE_SVM_INSTR) {
+ if (!enable_vmware_backdoor)
+ goto reinject;
+
+ /*
+ * VMware backdoor emulation on #GP interception only handles
+ * IN{S}, OUT{S}, and RDPMC.
+ */
+ if (!is_guest_mode(vcpu))
+ return kvm_emulate_instruction(vcpu,
+ EMULTYPE_VMWARE_GP | EMULTYPE_NO_DECODE);
+ } else
+ return emulate_svm_instr(vcpu, opcode);
+
+reinject:
+ kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
+ return 1;
+}
+
void svm_set_gif(struct vcpu_svm *svm, bool value)
{
if (value) {
@@ -2260,11 +2358,8 @@ static int xsetbv_interception(struct vcpu_svm *svm)
u64 new_bv = kvm_read_edx_eax(&svm->vcpu);
u32 index = kvm_rcx_read(&svm->vcpu);
- if (kvm_set_xcr(&svm->vcpu, index, new_bv) == 0) {
- return kvm_skip_emulated_instruction(&svm->vcpu);
- }
-
- return 1;
+ int err = kvm_set_xcr(&svm->vcpu, index, new_bv);
+ return kvm_complete_insn_gp(&svm->vcpu, err);
}
static int rdpru_interception(struct vcpu_svm *svm)
@@ -2525,6 +2620,7 @@ static int dr_interception(struct vcpu_svm *svm)
{
int reg, dr;
unsigned long val;
+ int err = 0;
if (svm->vcpu.guest_debug == 0) {
/*
@@ -2542,20 +2638,16 @@ static int dr_interception(struct vcpu_svm *svm)
reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0;
-
- if (dr >= 16) { /* mov to DRn */
- if (!kvm_require_dr(&svm->vcpu, dr - 16))
- return 1;
+ if (dr >= 16) { /* mov to DRn */
+ dr -= 16;
val = kvm_register_read(&svm->vcpu, reg);
- kvm_set_dr(&svm->vcpu, dr - 16, val);
+ err = kvm_set_dr(&svm->vcpu, dr, val);
} else {
- if (!kvm_require_dr(&svm->vcpu, dr))
- return 1;
kvm_get_dr(&svm->vcpu, dr, &val);
kvm_register_write(&svm->vcpu, reg, val);
}
- return kvm_skip_emulated_instruction(&svm->vcpu);
+ return kvm_complete_insn_gp(&svm->vcpu, err);
}
static int cr8_write_interception(struct vcpu_svm *svm)
@@ -3349,7 +3441,7 @@ static void svm_set_irq(struct kvm_vcpu *vcpu)
SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR;
}
-static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
+static void svm_update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
{
struct vcpu_svm *svm = to_svm(vcpu);
@@ -3474,7 +3566,7 @@ static int svm_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection)
return !svm_interrupt_blocked(vcpu);
}
-static void enable_irq_window(struct kvm_vcpu *vcpu)
+static void svm_enable_irq_window(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
@@ -3498,7 +3590,7 @@ static void enable_irq_window(struct kvm_vcpu *vcpu)
}
}
-static void enable_nmi_window(struct kvm_vcpu *vcpu)
+static void svm_enable_nmi_window(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
@@ -3555,10 +3647,6 @@ static void svm_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t gva)
invlpga(gva, svm->vmcb->control.asid);
}
-static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
-{
-}
-
static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
@@ -3703,16 +3791,11 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu,
if (sev_es_guest(svm->vcpu.kvm)) {
__svm_sev_es_vcpu_run(svm->vmcb_pa);
} else {
+ struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu);
+
__svm_vcpu_run(svm->vmcb_pa, (unsigned long *)&svm->vcpu.arch.regs);
-#ifdef CONFIG_X86_64
- native_wrmsrl(MSR_GS_BASE, svm->host.gs_base);
-#else
- loadsegment(fs, svm->host.fs);
-#ifndef CONFIG_X86_32_LAZY_GS
- loadsegment(gs, svm->host.gs);
-#endif
-#endif
+ vmload(__sme_page_pa(sd->save_area));
}
/*
@@ -3739,6 +3822,8 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
+ trace_kvm_entry(vcpu);
+
svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
@@ -3776,7 +3861,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
if (unlikely(svm->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT))
svm_set_dr6(svm, vcpu->arch.dr6);
else
- svm_set_dr6(svm, DR6_FIXED_1 | DR6_RTM);
+ svm_set_dr6(svm, DR6_ACTIVE_LOW);
clgi();
kvm_load_guest_xsave_state(vcpu);
@@ -3971,7 +4056,7 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
if (sev_guest(vcpu->kvm)) {
best = kvm_find_cpuid_entry(vcpu, 0x8000001F, 0);
if (best)
- vcpu->arch.cr3_lm_rsvd_bits &= ~(1UL << (best->ebx & 0x3f));
+ vcpu->arch.reserved_gpa_bits &= ~(1UL << (best->ebx & 0x3f));
}
if (!kvm_vcpu_apicv_active(vcpu))
@@ -4278,7 +4363,7 @@ static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
return ret;
}
-static void enable_smi_window(struct kvm_vcpu *vcpu)
+static void svm_enable_smi_window(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
@@ -4432,7 +4517,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.vcpu_blocking = svm_vcpu_blocking,
.vcpu_unblocking = svm_vcpu_unblocking,
- .update_exception_bitmap = update_exception_bitmap,
+ .update_exception_bitmap = svm_update_exception_bitmap,
.get_msr_feature = svm_get_msr_feature,
.get_msr = svm_get_msr,
.set_msr = svm_set_msr,
@@ -4475,9 +4560,9 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.nmi_allowed = svm_nmi_allowed,
.get_nmi_mask = svm_get_nmi_mask,
.set_nmi_mask = svm_set_nmi_mask,
- .enable_nmi_window = enable_nmi_window,
- .enable_irq_window = enable_irq_window,
- .update_cr8_intercept = update_cr8_intercept,
+ .enable_nmi_window = svm_enable_nmi_window,
+ .enable_irq_window = svm_enable_irq_window,
+ .update_cr8_intercept = svm_update_cr8_intercept,
.set_virtual_apic_mode = svm_set_virtual_apic_mode,
.refresh_apicv_exec_ctrl = svm_refresh_apicv_exec_ctrl,
.check_apicv_inhibit_reasons = svm_check_apicv_inhibit_reasons,
@@ -4520,7 +4605,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.smi_allowed = svm_smi_allowed,
.pre_enter_smm = svm_pre_enter_smm,
.pre_leave_smm = svm_pre_leave_smm,
- .enable_smi_window = enable_smi_window,
+ .enable_smi_window = svm_enable_smi_window,
.mem_enc_op = svm_mem_enc_op,
.mem_enc_reg_region = svm_register_enc_region,
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 0fe874ae5498..39e071fdab0c 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -23,22 +23,8 @@
#define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
-static const struct svm_host_save_msrs {
- u32 index; /* Index of the MSR */
- bool sev_es_restored; /* True if MSR is restored on SEV-ES VMEXIT */
-} host_save_user_msrs[] = {
-#ifdef CONFIG_X86_64
- { .index = MSR_STAR, .sev_es_restored = true },
- { .index = MSR_LSTAR, .sev_es_restored = true },
- { .index = MSR_CSTAR, .sev_es_restored = true },
- { .index = MSR_SYSCALL_MASK, .sev_es_restored = true },
- { .index = MSR_KERNEL_GS_BASE, .sev_es_restored = true },
- { .index = MSR_FS_BASE, .sev_es_restored = true },
-#endif
- { .index = MSR_IA32_SYSENTER_CS, .sev_es_restored = true },
- { .index = MSR_IA32_SYSENTER_ESP, .sev_es_restored = true },
- { .index = MSR_IA32_SYSENTER_EIP, .sev_es_restored = true },
- { .index = MSR_TSC_AUX, .sev_es_restored = false },
+static const u32 host_save_user_msrs[] = {
+ MSR_TSC_AUX,
};
#define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs)
@@ -130,12 +116,6 @@ struct vcpu_svm {
u64 next_rip;
u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS];
- struct {
- u16 fs;
- u16 gs;
- u16 ldt;
- u64 gs_base;
- } host;
u64 spec_ctrl;
/*
@@ -192,6 +172,8 @@ struct vcpu_svm {
u64 ghcb_sa_len;
bool ghcb_sa_sync;
bool ghcb_sa_free;
+
+ bool guest_state_loaded;
};
struct svm_cpu_data {
@@ -403,9 +385,6 @@ static inline bool gif_set(struct vcpu_svm *svm)
}
/* svm.c */
-#define MSR_CR3_LEGACY_RESERVED_MASK 0xfe7U
-#define MSR_CR3_LEGACY_PAE_RESERVED_MASK 0x7U
-#define MSR_CR3_LONG_MBZ_MASK 0xfff0000000000000U
#define MSR_INVALID 0xffffffffU
extern int sev;
@@ -590,9 +569,8 @@ int sev_handle_vmgexit(struct vcpu_svm *svm);
int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in);
void sev_es_init_vmcb(struct vcpu_svm *svm);
void sev_es_create_vcpu(struct vcpu_svm *svm);
-void sev_es_vcpu_load(struct vcpu_svm *svm, int cpu);
-void sev_es_vcpu_put(struct vcpu_svm *svm);
void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
+void sev_es_prepare_guest_switch(struct vcpu_svm *svm, unsigned int cpu);
/* vmenter.S */
diff --git a/arch/x86/kvm/svm/svm_ops.h b/arch/x86/kvm/svm/svm_ops.h
new file mode 100644
index 000000000000..8170f2a5a16f
--- /dev/null
+++ b/arch/x86/kvm/svm/svm_ops.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __KVM_X86_SVM_OPS_H
+#define __KVM_X86_SVM_OPS_H
+
+#include <linux/compiler_types.h>
+
+#include <asm/kvm_host.h>
+
+#define svm_asm(insn, clobber...) \
+do { \
+ asm_volatile_goto("1: " __stringify(insn) "\n\t" \
+ _ASM_EXTABLE(1b, %l[fault]) \
+ ::: clobber : fault); \
+ return; \
+fault: \
+ kvm_spurious_fault(); \
+} while (0)
+
+#define svm_asm1(insn, op1, clobber...) \
+do { \
+ asm_volatile_goto("1: " __stringify(insn) " %0\n\t" \
+ _ASM_EXTABLE(1b, %l[fault]) \
+ :: op1 : clobber : fault); \
+ return; \
+fault: \
+ kvm_spurious_fault(); \
+} while (0)
+
+#define svm_asm2(insn, op1, op2, clobber...) \
+do { \
+ asm_volatile_goto("1: " __stringify(insn) " %1, %0\n\t" \
+ _ASM_EXTABLE(1b, %l[fault]) \
+ :: op1, op2 : clobber : fault); \
+ return; \
+fault: \
+ kvm_spurious_fault(); \
+} while (0)
+
+static inline void clgi(void)
+{
+ svm_asm(clgi);
+}
+
+static inline void stgi(void)
+{
+ svm_asm(stgi);
+}
+
+static inline void invlpga(unsigned long addr, u32 asid)
+{
+ svm_asm2(invlpga, "c"(asid), "a"(addr));
+}
+
+/*
+ * Despite being a physical address, the portion of rAX that is consumed by
+ * VMSAVE, VMLOAD, etc... is still controlled by the effective address size,
+ * hence 'unsigned long' instead of 'hpa_t'.
+ */
+static inline void vmsave(unsigned long pa)
+{
+ svm_asm1(vmsave, "a" (pa), "memory");
+}
+
+static inline void vmload(unsigned long pa)
+{
+ svm_asm1(vmload, "a" (pa), "memory");
+}
+
+#endif /* __KVM_X86_SVM_OPS_H */
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
index 2de30c20bc26..a61c015870e3 100644
--- a/arch/x86/kvm/trace.h
+++ b/arch/x86/kvm/trace.h
@@ -93,6 +93,42 @@ TRACE_EVENT(kvm_hv_hypercall,
);
/*
+ * Tracepoint for Xen hypercall.
+ */
+TRACE_EVENT(kvm_xen_hypercall,
+ TP_PROTO(unsigned long nr, unsigned long a0, unsigned long a1,
+ unsigned long a2, unsigned long a3, unsigned long a4,
+ unsigned long a5),
+ TP_ARGS(nr, a0, a1, a2, a3, a4, a5),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, nr)
+ __field(unsigned long, a0)
+ __field(unsigned long, a1)
+ __field(unsigned long, a2)
+ __field(unsigned long, a3)
+ __field(unsigned long, a4)
+ __field(unsigned long, a5)
+ ),
+
+ TP_fast_assign(
+ __entry->nr = nr;
+ __entry->a0 = a0;
+ __entry->a1 = a1;
+ __entry->a2 = a2;
+ __entry->a3 = a3;
+ __entry->a4 = a4;
+ __entry->a4 = a5;
+ ),
+
+ TP_printk("nr 0x%lx a0 0x%lx a1 0x%lx a2 0x%lx a3 0x%lx a4 0x%lx a5 %lx",
+ __entry->nr, __entry->a0, __entry->a1, __entry->a2,
+ __entry->a3, __entry->a4, __entry->a5)
+);
+
+
+
+/*
* Tracepoint for PIO.
*/
@@ -256,7 +292,7 @@ TRACE_EVENT(name, \
__entry->guest_rip = kvm_rip_read(vcpu); \
__entry->isa = isa; \
__entry->vcpu_id = vcpu->vcpu_id; \
- kvm_x86_ops.get_exit_info(vcpu, &__entry->info1, \
+ static_call(kvm_x86_get_exit_info)(vcpu, &__entry->info1, \
&__entry->info2, \
&__entry->intr_info, \
&__entry->error_code); \
@@ -738,7 +774,7 @@ TRACE_EVENT(kvm_emulate_insn,
),
TP_fast_assign(
- __entry->csbase = kvm_x86_ops.get_segment_base(vcpu, VCPU_SREG_CS);
+ __entry->csbase = static_call(kvm_x86_get_segment_base)(vcpu, VCPU_SREG_CS);
__entry->len = vcpu->arch.emulate_ctxt->fetch.ptr
- vcpu->arch.emulate_ctxt->fetch.data;
__entry->rip = vcpu->arch.emulate_ctxt->_eip - __entry->len;
diff --git a/arch/x86/kvm/vmx/capabilities.h b/arch/x86/kvm/vmx/capabilities.h
index 3a1861403d73..d1d77985e889 100644
--- a/arch/x86/kvm/vmx/capabilities.h
+++ b/arch/x86/kvm/vmx/capabilities.h
@@ -19,6 +19,9 @@ extern int __read_mostly pt_mode;
#define PT_MODE_HOST_GUEST 1
#define PMU_CAP_FW_WRITES (1ULL << 13)
+#define PMU_CAP_LBR_FMT 0x3f
+
+#define DEBUGCTLMSR_LBR_MASK (DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI)
struct nested_vmx_msrs {
/*
@@ -262,6 +265,12 @@ static inline bool cpu_has_vmx_tsc_scaling(void)
SECONDARY_EXEC_TSC_SCALING;
}
+static inline bool cpu_has_vmx_bus_lock_detection(void)
+{
+ return vmcs_config.cpu_based_2nd_exec_ctrl &
+ SECONDARY_EXEC_BUS_LOCK_DETECTION;
+}
+
static inline bool cpu_has_vmx_apicv(void)
{
return cpu_has_vmx_apic_register_virt() &&
@@ -371,11 +380,28 @@ static inline bool vmx_pt_mode_is_host_guest(void)
static inline u64 vmx_get_perf_capabilities(void)
{
+ u64 perf_cap = 0;
+
+ if (boot_cpu_has(X86_FEATURE_PDCM))
+ rdmsrl(MSR_IA32_PERF_CAPABILITIES, perf_cap);
+
+ perf_cap &= PMU_CAP_LBR_FMT;
+
/*
* Since counters are virtualized, KVM would support full
* width counting unconditionally, even if the host lacks it.
*/
- return PMU_CAP_FW_WRITES;
+ return PMU_CAP_FW_WRITES | perf_cap;
+}
+
+static inline u64 vmx_supported_debugctl(void)
+{
+ u64 debugctl = 0;
+
+ if (vmx_get_perf_capabilities() & PMU_CAP_LBR_FMT)
+ debugctl |= DEBUGCTLMSR_LBR_MASK;
+
+ return debugctl;
}
#endif /* __KVM_X86_VMX_CAPS_H */
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 0fbb46990dfc..bcca0b80e0d0 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -12,6 +12,7 @@
#include "nested.h"
#include "pmu.h"
#include "trace.h"
+#include "vmx.h"
#include "x86.h"
static bool __read_mostly enable_shadow_vmcs = 1;
@@ -411,8 +412,8 @@ static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned long *exit
if (nr == DB_VECTOR) {
if (!has_payload) {
payload = vcpu->arch.dr6;
- payload &= ~(DR6_FIXED_1 | DR6_BT);
- payload ^= DR6_RTM;
+ payload &= ~DR6_BT;
+ payload ^= DR6_ACTIVE_LOW;
}
*exit_qual = payload;
} else
@@ -744,8 +745,7 @@ static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu,
(CC(!nested_cpu_has_vid(vmcs12)) ||
CC(!nested_exit_intr_ack_set(vcpu)) ||
CC((vmcs12->posted_intr_nv & 0xff00)) ||
- CC((vmcs12->posted_intr_desc_addr & 0x3f)) ||
- CC((vmcs12->posted_intr_desc_addr >> cpuid_maxphyaddr(vcpu)))))
+ CC(!kvm_vcpu_is_legal_aligned_gpa(vcpu, vmcs12->posted_intr_desc_addr, 64))))
return -EINVAL;
/* tpr shadow is needed by all apicv features. */
@@ -758,13 +758,11 @@ static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu,
static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu,
u32 count, u64 addr)
{
- int maxphyaddr;
-
if (count == 0)
return 0;
- maxphyaddr = cpuid_maxphyaddr(vcpu);
- if (!IS_ALIGNED(addr, 16) || addr >> maxphyaddr ||
- (addr + count * sizeof(struct vmx_msr_entry) - 1) >> maxphyaddr)
+
+ if (!kvm_vcpu_is_legal_aligned_gpa(vcpu, addr, 16) ||
+ !kvm_vcpu_is_legal_gpa(vcpu, (addr + count * sizeof(struct vmx_msr_entry) - 1)))
return -EINVAL;
return 0;
@@ -1062,14 +1060,6 @@ static void prepare_vmx_msr_autostore_list(struct kvm_vcpu *vcpu,
}
}
-static bool nested_cr3_valid(struct kvm_vcpu *vcpu, unsigned long val)
-{
- unsigned long invalid_mask;
-
- invalid_mask = (~0ULL) << cpuid_maxphyaddr(vcpu);
- return (val & invalid_mask) == 0;
-}
-
/*
* Returns true if the MMU needs to be sync'd on nested VM-Enter/VM-Exit.
* tl;dr: the MMU needs a sync if L0 is using shadow paging and L1 didn't
@@ -1121,7 +1111,7 @@ static bool nested_vmx_transition_mmu_sync(struct kvm_vcpu *vcpu)
static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool nested_ept,
enum vm_entry_failure_code *entry_failure_code)
{
- if (CC(!nested_cr3_valid(vcpu, cr3))) {
+ if (CC(kvm_vcpu_is_illegal_gpa(vcpu, cr3))) {
*entry_failure_code = ENTRY_FAIL_DEFAULT;
return -EINVAL;
}
@@ -2177,15 +2167,13 @@ static void prepare_vmcs02_constant_state(struct vcpu_vmx *vmx)
vmcs_write64(MSR_BITMAP, __pa(vmx->nested.vmcs02.msr_bitmap));
/*
- * The PML address never changes, so it is constant in vmcs02.
- * Conceptually we want to copy the PML index from vmcs01 here,
- * and then back to vmcs01 on nested vmexit. But since we flush
- * the log and reset GUEST_PML_INDEX on each vmexit, the PML
- * index is also effectively constant in vmcs02.
+ * PML is emulated for L2, but never enabled in hardware as the MMU
+ * handles A/D emulation. Disabling PML for L2 also avoids having to
+ * deal with filtering out L2 GPAs from the buffer.
*/
if (enable_pml) {
- vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
- vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
+ vmcs_write64(PML_ADDRESS, 0);
+ vmcs_write16(GUEST_PML_INDEX, -1);
}
if (cpu_has_vmx_encls_vmexit())
@@ -2220,7 +2208,7 @@ static void prepare_vmcs02_early_rare(struct vcpu_vmx *vmx,
static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
{
- u32 exec_control, vmcs12_exec_ctrl;
+ u32 exec_control;
u64 guest_efer = nested_vmx_calc_efer(vmx, vmcs12);
if (vmx->nested.dirty_vmcs12 || vmx->nested.hv_evmcs)
@@ -2294,11 +2282,11 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
SECONDARY_EXEC_APIC_REGISTER_VIRT |
SECONDARY_EXEC_ENABLE_VMFUNC);
if (nested_cpu_has(vmcs12,
- CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)) {
- vmcs12_exec_ctrl = vmcs12->secondary_vm_exec_control &
- ~SECONDARY_EXEC_ENABLE_PML;
- exec_control |= vmcs12_exec_ctrl;
- }
+ CPU_BASED_ACTIVATE_SECONDARY_CONTROLS))
+ exec_control |= vmcs12->secondary_vm_exec_control;
+
+ /* PML is emulated and never enabled in hardware for L2. */
+ exec_control &= ~SECONDARY_EXEC_ENABLE_PML;
/* VMCS shadowing for L2 is emulated for now */
exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS;
@@ -2532,7 +2520,7 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
* bitwise-or of what L1 wants to trap for L2, and what we want to
* trap. Note that CR0.TS also needs updating - we do this later.
*/
- update_exception_bitmap(vcpu);
+ vmx_update_exception_bitmap(vcpu);
vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask;
vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
@@ -2635,7 +2623,6 @@ static int nested_vmx_check_nmi_controls(struct vmcs12 *vmcs12)
static bool nested_vmx_check_eptp(struct kvm_vcpu *vcpu, u64 new_eptp)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- int maxphyaddr = cpuid_maxphyaddr(vcpu);
/* Check for memory type validity */
switch (new_eptp & VMX_EPTP_MT_MASK) {
@@ -2666,7 +2653,7 @@ static bool nested_vmx_check_eptp(struct kvm_vcpu *vcpu, u64 new_eptp)
}
/* Reserved bits should not be set */
- if (CC(new_eptp >> maxphyaddr || ((new_eptp >> 7) & 0x1f)))
+ if (CC(kvm_vcpu_is_illegal_gpa(vcpu, new_eptp) || ((new_eptp >> 7) & 0x1f)))
return false;
/* AD, if set, should be supported */
@@ -2850,7 +2837,7 @@ static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu,
if (CC(!nested_host_cr0_valid(vcpu, vmcs12->host_cr0)) ||
CC(!nested_host_cr4_valid(vcpu, vmcs12->host_cr4)) ||
- CC(!nested_cr3_valid(vcpu, vmcs12->host_cr3)))
+ CC(kvm_vcpu_is_illegal_gpa(vcpu, vmcs12->host_cr3)))
return -EINVAL;
if (CC(is_noncanonical_address(vmcs12->host_ia32_sysenter_esp, vcpu)) ||
@@ -3057,35 +3044,8 @@ static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
vmx->loaded_vmcs->host_state.cr4 = cr4;
}
- asm(
- "sub $%c[wordsize], %%" _ASM_SP "\n\t" /* temporarily adjust RSP for CALL */
- "cmp %%" _ASM_SP ", %c[host_state_rsp](%[loaded_vmcs]) \n\t"
- "je 1f \n\t"
- __ex("vmwrite %%" _ASM_SP ", %[HOST_RSP]") "\n\t"
- "mov %%" _ASM_SP ", %c[host_state_rsp](%[loaded_vmcs]) \n\t"
- "1: \n\t"
- "add $%c[wordsize], %%" _ASM_SP "\n\t" /* un-adjust RSP */
-
- /* Check if vmlaunch or vmresume is needed */
- "cmpb $0, %c[launched](%[loaded_vmcs])\n\t"
-
- /*
- * VMLAUNCH and VMRESUME clear RFLAGS.{CF,ZF} on VM-Exit, set
- * RFLAGS.CF on VM-Fail Invalid and set RFLAGS.ZF on VM-Fail
- * Valid. vmx_vmenter() directly "returns" RFLAGS, and so the
- * results of VM-Enter is captured via CC_{SET,OUT} to vm_fail.
- */
- "call vmx_vmenter\n\t"
-
- CC_SET(be)
- : ASM_CALL_CONSTRAINT, CC_OUT(be) (vm_fail)
- : [HOST_RSP]"r"((unsigned long)HOST_RSP),
- [loaded_vmcs]"r"(vmx->loaded_vmcs),
- [launched]"i"(offsetof(struct loaded_vmcs, launched)),
- [host_state_rsp]"i"(offsetof(struct loaded_vmcs, host_state.rsp)),
- [wordsize]"i"(sizeof(ulong))
- : "memory"
- );
+ vm_fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs,
+ vmx->loaded_vmcs->launched);
if (vmx->msr_autoload.host.nr)
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
@@ -3124,13 +3084,9 @@ static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
return 0;
}
-static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
+static bool nested_get_evmcs_page(struct kvm_vcpu *vcpu)
{
- struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
struct vcpu_vmx *vmx = to_vmx(vcpu);
- struct kvm_host_map *map;
- struct page *page;
- u64 hpa;
/*
* hv_evmcs may end up being not mapped after migration (when
@@ -3153,6 +3109,17 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
}
}
+ return true;
+}
+
+static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
+{
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ struct kvm_host_map *map;
+ struct page *page;
+ u64 hpa;
+
if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
/*
* Translate L1 physical address to host physical
@@ -3221,6 +3188,18 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
exec_controls_setbit(vmx, CPU_BASED_USE_MSR_BITMAPS);
else
exec_controls_clearbit(vmx, CPU_BASED_USE_MSR_BITMAPS);
+
+ return true;
+}
+
+static bool vmx_get_nested_state_pages(struct kvm_vcpu *vcpu)
+{
+ if (!nested_get_evmcs_page(vcpu))
+ return false;
+
+ if (is_guest_mode(vcpu) && !nested_get_vmcs12_pages(vcpu))
+ return false;
+
return true;
}
@@ -3311,7 +3290,11 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
enum vm_entry_failure_code entry_failure_code;
bool evaluate_pending_interrupts;
- u32 exit_reason, failed_index;
+ union vmx_exit_reason exit_reason = {
+ .basic = EXIT_REASON_INVALID_STATE,
+ .failed_vmentry = 1,
+ };
+ u32 failed_index;
if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
kvm_vcpu_flush_tlb_current(vcpu);
@@ -3363,7 +3346,7 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
if (nested_vmx_check_guest_state(vcpu, vmcs12,
&entry_failure_code)) {
- exit_reason = EXIT_REASON_INVALID_STATE;
+ exit_reason.basic = EXIT_REASON_INVALID_STATE;
vmcs12->exit_qualification = entry_failure_code;
goto vmentry_fail_vmexit;
}
@@ -3374,7 +3357,7 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
vcpu->arch.tsc_offset += vmcs12->tsc_offset;
if (prepare_vmcs02(vcpu, vmcs12, &entry_failure_code)) {
- exit_reason = EXIT_REASON_INVALID_STATE;
+ exit_reason.basic = EXIT_REASON_INVALID_STATE;
vmcs12->exit_qualification = entry_failure_code;
goto vmentry_fail_vmexit_guest_mode;
}
@@ -3384,7 +3367,7 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
vmcs12->vm_entry_msr_load_addr,
vmcs12->vm_entry_msr_load_count);
if (failed_index) {
- exit_reason = EXIT_REASON_MSR_LOAD_FAIL;
+ exit_reason.basic = EXIT_REASON_MSR_LOAD_FAIL;
vmcs12->exit_qualification = failed_index;
goto vmentry_fail_vmexit_guest_mode;
}
@@ -3452,7 +3435,7 @@ vmentry_fail_vmexit:
return NVMX_VMENTRY_VMEXIT;
load_vmcs12_host_state(vcpu, vmcs12);
- vmcs12->vm_exit_reason = exit_reason | VMX_EXIT_REASONS_FAILED_VMENTRY;
+ vmcs12->vm_exit_reason = exit_reason.full;
if (enable_shadow_vmcs || vmx->nested.hv_evmcs)
vmx->nested.need_vmcs12_to_shadow_sync = true;
return NVMX_VMENTRY_VMEXIT;
@@ -4215,9 +4198,6 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &ignored))
nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL);
- if (!enable_ept)
- vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
-
nested_vmx_transition_tlb_flush(vcpu, vmcs12, false);
vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs);
@@ -4510,6 +4490,11 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
vmx_set_virtual_apic_mode(vcpu);
}
+ if (vmx->nested.update_vmcs01_cpu_dirty_logging) {
+ vmx->nested.update_vmcs01_cpu_dirty_logging = false;
+ vmx_update_cpu_dirty_logging(vcpu);
+ }
+
/* Unpin physical memory we referred to in vmcs02 */
if (vmx->nested.apic_access_page) {
kvm_release_page_clean(vmx->nested.apic_access_page);
@@ -5540,7 +5525,12 @@ static int handle_vmfunc(struct kvm_vcpu *vcpu)
return kvm_skip_emulated_instruction(vcpu);
fail:
- nested_vmx_vmexit(vcpu, vmx->exit_reason,
+ /*
+ * This is effectively a reflected VM-Exit, as opposed to a synthesized
+ * nested VM-Exit. Pass the original exit reason, i.e. don't hardcode
+ * EXIT_REASON_VMFUNC as the exit reason.
+ */
+ nested_vmx_vmexit(vcpu, vmx->exit_reason.full,
vmx_get_intr_info(vcpu),
vmx_get_exit_qual(vcpu));
return 1;
@@ -5608,7 +5598,8 @@ static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
* MSR bitmap. This may be the case even when L0 doesn't use MSR bitmaps.
*/
static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
- struct vmcs12 *vmcs12, u32 exit_reason)
+ struct vmcs12 *vmcs12,
+ union vmx_exit_reason exit_reason)
{
u32 msr_index = kvm_rcx_read(vcpu);
gpa_t bitmap;
@@ -5622,7 +5613,7 @@ static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
* First we need to figure out which of the four to use:
*/
bitmap = vmcs12->msr_bitmap;
- if (exit_reason == EXIT_REASON_MSR_WRITE)
+ if (exit_reason.basic == EXIT_REASON_MSR_WRITE)
bitmap += 2048;
if (msr_index >= 0xc0000000) {
msr_index -= 0xc0000000;
@@ -5759,11 +5750,12 @@ static bool nested_vmx_exit_handled_mtf(struct vmcs12 *vmcs12)
* Return true if L0 wants to handle an exit from L2 regardless of whether or not
* L1 wants the exit. Only call this when in is_guest_mode (L2).
*/
-static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu, u32 exit_reason)
+static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu,
+ union vmx_exit_reason exit_reason)
{
u32 intr_info;
- switch ((u16)exit_reason) {
+ switch ((u16)exit_reason.basic) {
case EXIT_REASON_EXCEPTION_NMI:
intr_info = vmx_get_intr_info(vcpu);
if (is_nmi(intr_info))
@@ -5801,7 +5793,10 @@ static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu, u32 exit_reason)
case EXIT_REASON_PREEMPTION_TIMER:
return true;
case EXIT_REASON_PML_FULL:
- /* We emulate PML support to L1. */
+ /*
+ * PML is emulated for an L1 VMM and should never be enabled in
+ * vmcs02, always "handle" PML_FULL by exiting to userspace.
+ */
return true;
case EXIT_REASON_VMFUNC:
/* VM functions are emulated through L2->L0 vmexits. */
@@ -5819,12 +5814,13 @@ static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu, u32 exit_reason)
* Return 1 if L1 wants to intercept an exit from L2. Only call this when in
* is_guest_mode (L2).
*/
-static bool nested_vmx_l1_wants_exit(struct kvm_vcpu *vcpu, u32 exit_reason)
+static bool nested_vmx_l1_wants_exit(struct kvm_vcpu *vcpu,
+ union vmx_exit_reason exit_reason)
{
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
u32 intr_info;
- switch ((u16)exit_reason) {
+ switch ((u16)exit_reason.basic) {
case EXIT_REASON_EXCEPTION_NMI:
intr_info = vmx_get_intr_info(vcpu);
if (is_nmi(intr_info))
@@ -5943,7 +5939,7 @@ static bool nested_vmx_l1_wants_exit(struct kvm_vcpu *vcpu, u32 exit_reason)
bool nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- u32 exit_reason = vmx->exit_reason;
+ union vmx_exit_reason exit_reason = vmx->exit_reason;
unsigned long exit_qual;
u32 exit_intr_info;
@@ -5962,7 +5958,7 @@ bool nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu)
goto reflect_vmexit;
}
- trace_kvm_nested_vmexit(exit_reason, vcpu, KVM_ISA_VMX);
+ trace_kvm_nested_vmexit(exit_reason.full, vcpu, KVM_ISA_VMX);
/* If L0 (KVM) wants the exit, it trumps L1's desires. */
if (nested_vmx_l0_wants_exit(vcpu, exit_reason))
@@ -5988,7 +5984,7 @@ bool nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu)
exit_qual = vmx_get_exit_qual(vcpu);
reflect_vmexit:
- nested_vmx_vmexit(vcpu, exit_reason, exit_intr_info, exit_qual);
+ nested_vmx_vmexit(vcpu, exit_reason.full, exit_intr_info, exit_qual);
return true;
}
@@ -6077,11 +6073,14 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
if (is_guest_mode(vcpu)) {
sync_vmcs02_to_vmcs12(vcpu, vmcs12);
sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
- } else if (!vmx->nested.need_vmcs12_to_shadow_sync) {
- if (vmx->nested.hv_evmcs)
- copy_enlightened_to_vmcs12(vmx);
- else if (enable_shadow_vmcs)
- copy_shadow_to_vmcs12(vmx);
+ } else {
+ copy_vmcs02_to_vmcs12_rare(vcpu, get_vmcs12(vcpu));
+ if (!vmx->nested.need_vmcs12_to_shadow_sync) {
+ if (vmx->nested.hv_evmcs)
+ copy_enlightened_to_vmcs12(vmx);
+ else if (enable_shadow_vmcs)
+ copy_shadow_to_vmcs12(vmx);
+ }
}
BUILD_BUG_ON(sizeof(user_vmx_nested_state->vmcs12) < VMCS12_SIZE);
@@ -6602,7 +6601,7 @@ struct kvm_x86_nested_ops vmx_nested_ops = {
.hv_timer_pending = nested_vmx_preemption_timer_pending,
.get_state = vmx_get_nested_state,
.set_state = vmx_set_nested_state,
- .get_nested_state_pages = nested_get_vmcs12_pages,
+ .get_nested_state_pages = vmx_get_nested_state_pages,
.write_log_dirty = nested_vmx_write_pml_buffer,
.enable_evmcs = nested_enable_evmcs,
.get_evmcs_version = nested_get_evmcs_version,
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index a886a47daebd..9efc1a6b8693 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -29,7 +29,7 @@ static struct kvm_event_hw_type_mapping intel_arch_events[] = {
[4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES },
[5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
[6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
- [7] = { 0x00, 0x30, PERF_COUNT_HW_REF_CPU_CYCLES },
+ [7] = { 0x00, 0x03, PERF_COUNT_HW_REF_CPU_CYCLES },
};
/* mapping between fixed pmc index and intel_arch_events array */
@@ -152,12 +152,17 @@ static struct kvm_pmc *intel_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
return &counters[array_index_nospec(idx, num_counters)];
}
-static inline bool fw_writes_is_enabled(struct kvm_vcpu *vcpu)
+static inline u64 vcpu_get_perf_capabilities(struct kvm_vcpu *vcpu)
{
if (!guest_cpuid_has(vcpu, X86_FEATURE_PDCM))
- return false;
+ return 0;
- return vcpu->arch.perf_capabilities & PMU_CAP_FW_WRITES;
+ return vcpu->arch.perf_capabilities;
+}
+
+static inline bool fw_writes_is_enabled(struct kvm_vcpu *vcpu)
+{
+ return (vcpu_get_perf_capabilities(vcpu) & PMU_CAP_FW_WRITES) != 0;
}
static inline struct kvm_pmc *get_fw_gp_pmc(struct kvm_pmu *pmu, u32 msr)
@@ -168,6 +173,41 @@ static inline struct kvm_pmc *get_fw_gp_pmc(struct kvm_pmu *pmu, u32 msr)
return get_gp_pmc(pmu, msr, MSR_IA32_PMC0);
}
+bool intel_pmu_lbr_is_compatible(struct kvm_vcpu *vcpu)
+{
+ /*
+ * As a first step, a guest could only enable LBR feature if its
+ * cpu model is the same as the host because the LBR registers
+ * would be pass-through to the guest and they're model specific.
+ */
+ return boot_cpu_data.x86_model == guest_cpuid_model(vcpu);
+}
+
+bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu)
+{
+ struct x86_pmu_lbr *lbr = vcpu_to_lbr_records(vcpu);
+
+ return lbr->nr && (vcpu_get_perf_capabilities(vcpu) & PMU_CAP_LBR_FMT);
+}
+
+static bool intel_pmu_is_valid_lbr_msr(struct kvm_vcpu *vcpu, u32 index)
+{
+ struct x86_pmu_lbr *records = vcpu_to_lbr_records(vcpu);
+ bool ret = false;
+
+ if (!intel_pmu_lbr_is_enabled(vcpu))
+ return ret;
+
+ ret = (index == MSR_LBR_SELECT) || (index == MSR_LBR_TOS) ||
+ (index >= records->from && index < records->from + records->nr) ||
+ (index >= records->to && index < records->to + records->nr);
+
+ if (!ret && records->info)
+ ret = (index >= records->info && index < records->info + records->nr);
+
+ return ret;
+}
+
static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
{
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
@@ -183,7 +223,8 @@ static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
default:
ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) ||
get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) ||
- get_fixed_pmc(pmu, msr) || get_fw_gp_pmc(pmu, msr);
+ get_fixed_pmc(pmu, msr) || get_fw_gp_pmc(pmu, msr) ||
+ intel_pmu_is_valid_lbr_msr(vcpu, msr);
break;
}
@@ -202,6 +243,111 @@ static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr)
return pmc;
}
+static inline void intel_pmu_release_guest_lbr_event(struct kvm_vcpu *vcpu)
+{
+ struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
+
+ if (lbr_desc->event) {
+ perf_event_release_kernel(lbr_desc->event);
+ lbr_desc->event = NULL;
+ vcpu_to_pmu(vcpu)->event_count--;
+ }
+}
+
+int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu)
+{
+ struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
+ struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
+ struct perf_event *event;
+
+ /*
+ * The perf_event_attr is constructed in the minimum efficient way:
+ * - set 'pinned = true' to make it task pinned so that if another
+ * cpu pinned event reclaims LBR, the event->oncpu will be set to -1;
+ * - set '.exclude_host = true' to record guest branches behavior;
+ *
+ * - set '.config = INTEL_FIXED_VLBR_EVENT' to indicates host perf
+ * schedule the event without a real HW counter but a fake one;
+ * check is_guest_lbr_event() and __intel_get_event_constraints();
+ *
+ * - set 'sample_type = PERF_SAMPLE_BRANCH_STACK' and
+ * 'branch_sample_type = PERF_SAMPLE_BRANCH_CALL_STACK |
+ * PERF_SAMPLE_BRANCH_USER' to configure it as a LBR callstack
+ * event, which helps KVM to save/restore guest LBR records
+ * during host context switches and reduces quite a lot overhead,
+ * check branch_user_callstack() and intel_pmu_lbr_sched_task();
+ */
+ struct perf_event_attr attr = {
+ .type = PERF_TYPE_RAW,
+ .size = sizeof(attr),
+ .config = INTEL_FIXED_VLBR_EVENT,
+ .sample_type = PERF_SAMPLE_BRANCH_STACK,
+ .pinned = true,
+ .exclude_host = true,
+ .branch_sample_type = PERF_SAMPLE_BRANCH_CALL_STACK |
+ PERF_SAMPLE_BRANCH_USER,
+ };
+
+ if (unlikely(lbr_desc->event)) {
+ __set_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use);
+ return 0;
+ }
+
+ event = perf_event_create_kernel_counter(&attr, -1,
+ current, NULL, NULL);
+ if (IS_ERR(event)) {
+ pr_debug_ratelimited("%s: failed %ld\n",
+ __func__, PTR_ERR(event));
+ return PTR_ERR(event);
+ }
+ lbr_desc->event = event;
+ pmu->event_count++;
+ __set_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use);
+ return 0;
+}
+
+/*
+ * It's safe to access LBR msrs from guest when they have not
+ * been passthrough since the host would help restore or reset
+ * the LBR msrs records when the guest LBR event is scheduled in.
+ */
+static bool intel_pmu_handle_lbr_msrs_access(struct kvm_vcpu *vcpu,
+ struct msr_data *msr_info, bool read)
+{
+ struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
+ u32 index = msr_info->index;
+
+ if (!intel_pmu_is_valid_lbr_msr(vcpu, index))
+ return false;
+
+ if (!lbr_desc->event && intel_pmu_create_guest_lbr_event(vcpu) < 0)
+ goto dummy;
+
+ /*
+ * Disable irq to ensure the LBR feature doesn't get reclaimed by the
+ * host at the time the value is read from the msr, and this avoids the
+ * host LBR value to be leaked to the guest. If LBR has been reclaimed,
+ * return 0 on guest reads.
+ */
+ local_irq_disable();
+ if (lbr_desc->event->state == PERF_EVENT_STATE_ACTIVE) {
+ if (read)
+ rdmsrl(index, msr_info->data);
+ else
+ wrmsrl(index, msr_info->data);
+ __set_bit(INTEL_PMC_IDX_FIXED_VLBR, vcpu_to_pmu(vcpu)->pmc_in_use);
+ local_irq_enable();
+ return true;
+ }
+ clear_bit(INTEL_PMC_IDX_FIXED_VLBR, vcpu_to_pmu(vcpu)->pmc_in_use);
+ local_irq_enable();
+
+dummy:
+ if (read)
+ msr_info->data = 0;
+ return true;
+}
+
static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
@@ -236,7 +382,8 @@ static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
msr_info->data = pmc->eventsel;
return 0;
- }
+ } else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, true))
+ return 0;
}
return 1;
@@ -307,7 +454,8 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
reprogram_gp_counter(pmc, data);
return 0;
}
- }
+ } else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, false))
+ return 0;
}
return 1;
@@ -316,6 +464,8 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
{
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
+ struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
+
struct x86_pmu_capability x86_pmu;
struct kvm_cpuid_entry2 *entry;
union cpuid10_eax eax;
@@ -327,7 +477,6 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
pmu->version = 0;
pmu->reserved_bits = 0xffffffff00200000ull;
- vcpu->arch.perf_capabilities = 0;
entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
if (!entry)
@@ -340,12 +489,12 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
return;
perf_get_x86_pmu_capability(&x86_pmu);
- if (guest_cpuid_has(vcpu, X86_FEATURE_PDCM))
- vcpu->arch.perf_capabilities = vmx_get_perf_capabilities();
pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters,
x86_pmu.num_counters_gp);
+ eax.split.bit_width = min_t(int, eax.split.bit_width, x86_pmu.bit_width_gp);
pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1;
+ eax.split.mask_length = min_t(int, eax.split.mask_length, x86_pmu.events_mask_len);
pmu->available_event_types = ~entry->ebx &
((1ull << eax.split.mask_length) - 1);
@@ -355,6 +504,8 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
pmu->nr_arch_fixed_counters =
min_t(int, edx.split.num_counters_fixed,
x86_pmu.num_counters_fixed);
+ edx.split.bit_width_fixed = min_t(int,
+ edx.split.bit_width_fixed, x86_pmu.bit_width_fixed);
pmu->counter_bitmask[KVM_PMC_FIXED] =
((u64)1 << edx.split.bit_width_fixed) - 1;
}
@@ -381,12 +532,21 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
INTEL_PMC_MAX_GENERIC, pmu->nr_arch_fixed_counters);
nested_vmx_pmu_entry_exit_ctls_update(vcpu);
+
+ if (intel_pmu_lbr_is_compatible(vcpu))
+ x86_perf_get_lbr(&lbr_desc->records);
+ else
+ lbr_desc->records.nr = 0;
+
+ if (lbr_desc->records.nr)
+ bitmap_set(pmu->all_valid_pmc_idx, INTEL_PMC_IDX_FIXED_VLBR, 1);
}
static void intel_pmu_init(struct kvm_vcpu *vcpu)
{
int i;
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
+ struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
pmu->gp_counters[i].type = KVM_PMC_GP;
@@ -401,6 +561,11 @@ static void intel_pmu_init(struct kvm_vcpu *vcpu)
pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED;
pmu->fixed_counters[i].current_config = 0;
}
+
+ vcpu->arch.perf_capabilities = vmx_get_perf_capabilities();
+ lbr_desc->records.nr = 0;
+ lbr_desc->event = NULL;
+ lbr_desc->msr_passthrough = false;
}
static void intel_pmu_reset(struct kvm_vcpu *vcpu)
@@ -425,6 +590,119 @@ static void intel_pmu_reset(struct kvm_vcpu *vcpu)
pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status =
pmu->global_ovf_ctrl = 0;
+
+ intel_pmu_release_guest_lbr_event(vcpu);
+}
+
+/*
+ * Emulate LBR_On_PMI behavior for 1 < pmu.version < 4.
+ *
+ * If Freeze_LBR_On_PMI = 1, the LBR is frozen on PMI and
+ * the KVM emulates to clear the LBR bit (bit 0) in IA32_DEBUGCTL.
+ *
+ * Guest needs to re-enable LBR to resume branches recording.
+ */
+static void intel_pmu_legacy_freezing_lbrs_on_pmi(struct kvm_vcpu *vcpu)
+{
+ u64 data = vmcs_read64(GUEST_IA32_DEBUGCTL);
+
+ if (data & DEBUGCTLMSR_FREEZE_LBRS_ON_PMI) {
+ data &= ~DEBUGCTLMSR_LBR;
+ vmcs_write64(GUEST_IA32_DEBUGCTL, data);
+ }
+}
+
+static void intel_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
+{
+ u8 version = vcpu_to_pmu(vcpu)->version;
+
+ if (!intel_pmu_lbr_is_enabled(vcpu))
+ return;
+
+ if (version > 1 && version < 4)
+ intel_pmu_legacy_freezing_lbrs_on_pmi(vcpu);
+}
+
+static void vmx_update_intercept_for_lbr_msrs(struct kvm_vcpu *vcpu, bool set)
+{
+ struct x86_pmu_lbr *lbr = vcpu_to_lbr_records(vcpu);
+ int i;
+
+ for (i = 0; i < lbr->nr; i++) {
+ vmx_set_intercept_for_msr(vcpu, lbr->from + i, MSR_TYPE_RW, set);
+ vmx_set_intercept_for_msr(vcpu, lbr->to + i, MSR_TYPE_RW, set);
+ if (lbr->info)
+ vmx_set_intercept_for_msr(vcpu, lbr->info + i, MSR_TYPE_RW, set);
+ }
+
+ vmx_set_intercept_for_msr(vcpu, MSR_LBR_SELECT, MSR_TYPE_RW, set);
+ vmx_set_intercept_for_msr(vcpu, MSR_LBR_TOS, MSR_TYPE_RW, set);
+}
+
+static inline void vmx_disable_lbr_msrs_passthrough(struct kvm_vcpu *vcpu)
+{
+ struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
+
+ if (!lbr_desc->msr_passthrough)
+ return;
+
+ vmx_update_intercept_for_lbr_msrs(vcpu, true);
+ lbr_desc->msr_passthrough = false;
+}
+
+static inline void vmx_enable_lbr_msrs_passthrough(struct kvm_vcpu *vcpu)
+{
+ struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
+
+ if (lbr_desc->msr_passthrough)
+ return;
+
+ vmx_update_intercept_for_lbr_msrs(vcpu, false);
+ lbr_desc->msr_passthrough = true;
+}
+
+/*
+ * Higher priority host perf events (e.g. cpu pinned) could reclaim the
+ * pmu resources (e.g. LBR) that were assigned to the guest. This is
+ * usually done via ipi calls (more details in perf_install_in_context).
+ *
+ * Before entering the non-root mode (with irq disabled here), double
+ * confirm that the pmu features enabled to the guest are not reclaimed
+ * by higher priority host events. Otherwise, disallow vcpu's access to
+ * the reclaimed features.
+ */
+void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu)
+{
+ struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
+ struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
+
+ if (!lbr_desc->event) {
+ vmx_disable_lbr_msrs_passthrough(vcpu);
+ if (vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR)
+ goto warn;
+ if (test_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use))
+ goto warn;
+ return;
+ }
+
+ if (lbr_desc->event->state < PERF_EVENT_STATE_ACTIVE) {
+ vmx_disable_lbr_msrs_passthrough(vcpu);
+ __clear_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use);
+ goto warn;
+ } else
+ vmx_enable_lbr_msrs_passthrough(vcpu);
+
+ return;
+
+warn:
+ pr_warn_ratelimited("kvm: vcpu-%d: fail to passthrough LBR.\n",
+ vcpu->vcpu_id);
+}
+
+static void intel_pmu_cleanup(struct kvm_vcpu *vcpu)
+{
+ if (!(vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR))
+ intel_pmu_release_guest_lbr_event(vcpu);
}
struct kvm_pmu_ops intel_pmu_ops = {
@@ -441,4 +719,6 @@ struct kvm_pmu_ops intel_pmu_ops = {
.refresh = intel_pmu_refresh,
.init = intel_pmu_init,
.reset = intel_pmu_reset,
+ .deliver_pmi = intel_pmu_deliver_pmi,
+ .cleanup = intel_pmu_cleanup,
};
diff --git a/arch/x86/kvm/vmx/posted_intr.c b/arch/x86/kvm/vmx/posted_intr.c
index f02962dcc72c..4831bc44ce66 100644
--- a/arch/x86/kvm/vmx/posted_intr.c
+++ b/arch/x86/kvm/vmx/posted_intr.c
@@ -54,7 +54,7 @@ void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
dest = cpu_physical_id(cpu);
- if (x2apic_enabled())
+ if (x2apic_mode)
new.ndst = dest;
else
new.ndst = (dest << 8) & 0xFF00;
@@ -104,7 +104,7 @@ static void __pi_post_block(struct kvm_vcpu *vcpu)
dest = cpu_physical_id(vcpu->cpu);
- if (x2apic_enabled())
+ if (x2apic_mode)
new.ndst = dest;
else
new.ndst = (dest << 8) & 0xFF00;
@@ -174,7 +174,7 @@ int pi_pre_block(struct kvm_vcpu *vcpu)
*/
dest = cpu_physical_id(vcpu->pre_pcpu);
- if (x2apic_enabled())
+ if (x2apic_mode)
new.ndst = dest;
else
new.ndst = (dest << 8) & 0xFF00;
diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S
index e85aa5faa22d..3a6461694fc2 100644
--- a/arch/x86/kvm/vmx/vmenter.S
+++ b/arch/x86/kvm/vmx/vmenter.S
@@ -44,7 +44,7 @@
* they VM-Fail, whereas a successful VM-Enter + VM-Exit will jump
* to vmx_vmexit.
*/
-SYM_FUNC_START(vmx_vmenter)
+SYM_FUNC_START_LOCAL(vmx_vmenter)
/* EFLAGS.ZF is set if VMCS.LAUNCHED == 0 */
je 2f
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 2af05d3b0590..50810d471462 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -50,6 +50,7 @@
#include "capabilities.h"
#include "cpuid.h"
#include "evmcs.h"
+#include "hyperv.h"
#include "irq.h"
#include "kvm_cache_regs.h"
#include "lapic.h"
@@ -552,7 +553,7 @@ static int hv_enable_direct_tlbflush(struct kvm_vcpu *vcpu)
{
struct hv_enlightened_vmcs *evmcs;
struct hv_partition_assist_pg **p_hv_pa_pg =
- &vcpu->kvm->arch.hyperv.hv_pa_pg;
+ &to_kvm_hv(vcpu->kvm)->hv_pa_pg;
/*
* Synthetic VM-Exit is not enabled in current code and so All
* evmcs in singe VM shares same assist page.
@@ -658,6 +659,14 @@ static bool is_valid_passthrough_msr(u32 msr)
case MSR_IA32_RTIT_CR3_MATCH:
case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B:
/* PT MSRs. These are handled in pt_update_intercept_for_msr() */
+ case MSR_LBR_SELECT:
+ case MSR_LBR_TOS:
+ case MSR_LBR_INFO_0 ... MSR_LBR_INFO_0 + 31:
+ case MSR_LBR_NHM_FROM ... MSR_LBR_NHM_FROM + 31:
+ case MSR_LBR_NHM_TO ... MSR_LBR_NHM_TO + 31:
+ case MSR_LBR_CORE_FROM ... MSR_LBR_CORE_FROM + 8:
+ case MSR_LBR_CORE_TO ... MSR_LBR_CORE_TO + 8:
+ /* LBR MSRs. These are handled in vmx_update_intercept_for_lbr_msrs() */
return true;
}
@@ -806,7 +815,7 @@ static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg)
return *p;
}
-void update_exception_bitmap(struct kvm_vcpu *vcpu)
+void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu)
{
u32 eb;
@@ -1102,7 +1111,7 @@ static inline bool pt_can_write_msr(struct vcpu_vmx *vmx)
static inline bool pt_output_base_valid(struct kvm_vcpu *vcpu, u64 base)
{
/* The base must be 128-byte aligned and a legal physical address. */
- return !kvm_vcpu_is_illegal_gpa(vcpu, base) && !(base & 0x7f);
+ return kvm_vcpu_is_legal_aligned_gpa(vcpu, base, 128);
}
static inline void pt_load_msr(struct pt_ctx *ctx, u32 addr_range)
@@ -1577,7 +1586,7 @@ static int skip_emulated_instruction(struct kvm_vcpu *vcpu)
* i.e. we end up advancing IP with some random value.
*/
if (!static_cpu_has(X86_FEATURE_HYPERVISOR) ||
- to_vmx(vcpu)->exit_reason != EXIT_REASON_EPT_MISCONFIG) {
+ to_vmx(vcpu)->exit_reason.basic != EXIT_REASON_EPT_MISCONFIG) {
orig_rip = kvm_rip_read(vcpu);
rip = orig_rip + vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
#ifdef CONFIG_X86_64
@@ -1924,6 +1933,9 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
!guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
return 1;
goto find_uret_msr;
+ case MSR_IA32_DEBUGCTLMSR:
+ msr_info->data = vmcs_read64(GUEST_IA32_DEBUGCTL);
+ break;
default:
find_uret_msr:
msr = vmx_find_uret_msr(vmx, msr_info->index);
@@ -1947,6 +1959,16 @@ static u64 nested_vmx_truncate_sysenter_addr(struct kvm_vcpu *vcpu,
return (unsigned long)data;
}
+static u64 vcpu_supported_debugctl(struct kvm_vcpu *vcpu)
+{
+ u64 debugctl = vmx_supported_debugctl();
+
+ if (!intel_pmu_lbr_is_enabled(vcpu))
+ debugctl &= ~DEBUGCTLMSR_LBR_MASK;
+
+ return debugctl;
+}
+
/*
* Writes msr value into the appropriate "register".
* Returns 0 on success, non-0 otherwise.
@@ -1997,14 +2019,29 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
}
vmcs_writel(GUEST_SYSENTER_ESP, data);
break;
- case MSR_IA32_DEBUGCTLMSR:
+ case MSR_IA32_DEBUGCTLMSR: {
+ u64 invalid = data & ~vcpu_supported_debugctl(vcpu);
+ if (invalid & (DEBUGCTLMSR_BTF|DEBUGCTLMSR_LBR)) {
+ if (report_ignored_msrs)
+ vcpu_unimpl(vcpu, "%s: BTF|LBR in IA32_DEBUGCTLMSR 0x%llx, nop\n",
+ __func__, data);
+ data &= ~(DEBUGCTLMSR_BTF|DEBUGCTLMSR_LBR);
+ invalid &= ~(DEBUGCTLMSR_BTF|DEBUGCTLMSR_LBR);
+ }
+
+ if (invalid)
+ return 1;
+
if (is_guest_mode(vcpu) && get_vmcs12(vcpu)->vm_exit_controls &
VM_EXIT_SAVE_DEBUG_CONTROLS)
get_vmcs12(vcpu)->guest_ia32_debugctl = data;
- ret = kvm_set_msr_common(vcpu, msr_info);
- break;
-
+ vmcs_write64(GUEST_IA32_DEBUGCTL, data);
+ if (intel_pmu_lbr_is_enabled(vcpu) && !to_vmx(vcpu)->lbr_desc.event &&
+ (data & DEBUGCTLMSR_LBR))
+ intel_pmu_create_guest_lbr_event(vcpu);
+ return 0;
+ }
case MSR_IA32_BNDCFGS:
if (!kvm_mpx_supported() ||
(!msr_info->host_initiated &&
@@ -2196,6 +2233,18 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if ((data >> 32) != 0)
return 1;
goto find_uret_msr;
+ case MSR_IA32_PERF_CAPABILITIES:
+ if (data && !vcpu_to_pmu(vcpu)->version)
+ return 1;
+ if (data & PMU_CAP_LBR_FMT) {
+ if ((data & PMU_CAP_LBR_FMT) !=
+ (vmx_get_perf_capabilities() & PMU_CAP_LBR_FMT))
+ return 1;
+ if (!intel_pmu_lbr_is_compatible(vcpu))
+ return 1;
+ }
+ ret = kvm_set_msr_common(vcpu, msr_info);
+ break;
default:
find_uret_msr:
@@ -2265,7 +2314,6 @@ static int kvm_cpu_vmxon(u64 vmxon_pointer)
u64 msr;
cr4_set_bits(X86_CR4_VMXE);
- intel_pt_handle_vmx(1);
asm_volatile_goto("1: vmxon %[vmxon_pointer]\n\t"
_ASM_EXTABLE(1b, %l[fault])
@@ -2276,7 +2324,6 @@ static int kvm_cpu_vmxon(u64 vmxon_pointer)
fault:
WARN_ONCE(1, "VMXON faulted, MSR_IA32_FEAT_CTL (0x3a) = 0x%llx\n",
rdmsrl_safe(MSR_IA32_FEAT_CTL, &msr) ? 0xdeadbeef : msr);
- intel_pt_handle_vmx(0);
cr4_clear_bits(X86_CR4_VMXE);
return -EFAULT;
@@ -2299,9 +2346,13 @@ static int hardware_enable(void)
!hv_get_vp_assist_page(cpu))
return -EFAULT;
+ intel_pt_handle_vmx(1);
+
r = kvm_cpu_vmxon(phys_addr);
- if (r)
+ if (r) {
+ intel_pt_handle_vmx(0);
return r;
+ }
if (enable_ept)
ept_sync_global();
@@ -2319,22 +2370,14 @@ static void vmclear_local_loaded_vmcss(void)
__loaded_vmcs_clear(v);
}
-
-/* Just like cpu_vmxoff(), but with the __kvm_handle_fault_on_reboot()
- * tricks.
- */
-static void kvm_cpu_vmxoff(void)
-{
- asm volatile (__ex("vmxoff"));
-
- intel_pt_handle_vmx(0);
- cr4_clear_bits(X86_CR4_VMXE);
-}
-
static void hardware_disable(void)
{
vmclear_local_loaded_vmcss();
- kvm_cpu_vmxoff();
+
+ if (cpu_vmxoff())
+ kvm_spurious_fault();
+
+ intel_pt_handle_vmx(0);
}
/*
@@ -2428,7 +2471,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf,
SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE |
SECONDARY_EXEC_PT_USE_GPA |
SECONDARY_EXEC_PT_CONCEAL_VMX |
- SECONDARY_EXEC_ENABLE_VMFUNC;
+ SECONDARY_EXEC_ENABLE_VMFUNC |
+ SECONDARY_EXEC_BUS_LOCK_DETECTION;
if (cpu_has_sgx())
opt2 |= SECONDARY_EXEC_ENCLS_EXITING;
if (adjust_vmx_controls(min2, opt2,
@@ -2739,7 +2783,7 @@ static void enter_pmode(struct kvm_vcpu *vcpu)
vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) |
(vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME));
- update_exception_bitmap(vcpu);
+ vmx_update_exception_bitmap(vcpu);
fix_pmode_seg(vcpu, VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]);
fix_pmode_seg(vcpu, VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]);
@@ -2819,7 +2863,7 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
vmcs_writel(GUEST_RFLAGS, flags);
vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME);
- update_exception_bitmap(vcpu);
+ vmx_update_exception_bitmap(vcpu);
fix_rmode_seg(VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]);
fix_rmode_seg(VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]);
@@ -3774,7 +3818,7 @@ static __always_inline void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu,
vmx_set_msr_bitmap_write(msr_bitmap, msr);
}
-static __always_inline void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu,
+void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu,
u32 msr, int type, bool value)
{
if (value)
@@ -4233,7 +4277,12 @@ static void vmx_compute_secondary_exec_control(struct vcpu_vmx *vmx)
*/
exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS;
- if (!enable_pml)
+ /*
+ * PML is enabled/disabled when dirty logging of memsmlots changes, but
+ * it needs to be set here when dirty logging is already active, e.g.
+ * if this vCPU was created after dirty logging was enabled.
+ */
+ if (!vcpu->kvm->arch.cpu_dirty_logging_count)
exec_control &= ~SECONDARY_EXEC_ENABLE_PML;
if (cpu_has_vmx_xsaves()) {
@@ -4251,24 +4300,17 @@ static void vmx_compute_secondary_exec_control(struct vcpu_vmx *vmx)
}
vmx_adjust_sec_exec_feature(vmx, &exec_control, rdtscp, RDTSCP);
-
- /*
- * Expose INVPCID if and only if PCID is also exposed to the guest.
- * INVPCID takes a #UD when it's disabled in the VMCS, but a #GP or #PF
- * if CR4.PCIDE=0. Enumerating CPUID.INVPCID=1 would lead to incorrect
- * behavior from the guest perspective (it would expect #GP or #PF).
- */
- if (!guest_cpuid_has(vcpu, X86_FEATURE_PCID))
- guest_cpuid_clear(vcpu, X86_FEATURE_INVPCID);
vmx_adjust_sec_exec_feature(vmx, &exec_control, invpcid, INVPCID);
-
vmx_adjust_sec_exec_exiting(vmx, &exec_control, rdrand, RDRAND);
vmx_adjust_sec_exec_exiting(vmx, &exec_control, rdseed, RDSEED);
vmx_adjust_sec_exec_control(vmx, &exec_control, waitpkg, WAITPKG,
ENABLE_USR_WAIT_PAUSE, false);
+ if (!vcpu->kvm->arch.bus_lock_detection_enabled)
+ exec_control &= ~SECONDARY_EXEC_BUS_LOCK_DETECTION;
+
vmx->secondary_exec_control = exec_control;
}
@@ -4467,23 +4509,23 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
vmx_set_cr4(vcpu, 0);
vmx_set_efer(vcpu, 0);
- update_exception_bitmap(vcpu);
+ vmx_update_exception_bitmap(vcpu);
vpid_sync_context(vmx->vpid);
if (init_event)
vmx_clear_hlt(vcpu);
}
-static void enable_irq_window(struct kvm_vcpu *vcpu)
+static void vmx_enable_irq_window(struct kvm_vcpu *vcpu)
{
exec_controls_setbit(to_vmx(vcpu), CPU_BASED_INTR_WINDOW_EXITING);
}
-static void enable_nmi_window(struct kvm_vcpu *vcpu)
+static void vmx_enable_nmi_window(struct kvm_vcpu *vcpu)
{
if (!enable_vnmi ||
vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) {
- enable_irq_window(vcpu);
+ vmx_enable_irq_window(vcpu);
return;
}
@@ -4824,7 +4866,7 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
kvm_queue_exception_p(vcpu, DB_VECTOR, dr6);
return 1;
}
- kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1 | DR6_RTM;
+ kvm_run->debug.arch.dr6 = dr6 | DR6_ACTIVE_LOW;
kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7);
fallthrough;
case BP_VECTOR:
@@ -5049,6 +5091,7 @@ static int handle_dr(struct kvm_vcpu *vcpu)
{
unsigned long exit_qualification;
int dr, dr7, reg;
+ int err = 1;
exit_qualification = vmx_get_exit_qual(vcpu);
dr = exit_qualification & DEBUG_REG_ACCESS_NUM;
@@ -5057,9 +5100,9 @@ static int handle_dr(struct kvm_vcpu *vcpu)
if (!kvm_require_dr(vcpu, dr))
return 1;
- /* Do not handle if the CPL > 0, will trigger GP on re-entry */
- if (!kvm_require_cpl(vcpu, 0))
- return 1;
+ if (kvm_x86_ops.get_cpl(vcpu) > 0)
+ goto out;
+
dr7 = vmcs_readl(GUEST_DR7);
if (dr7 & DR7_GD) {
/*
@@ -5068,7 +5111,7 @@ static int handle_dr(struct kvm_vcpu *vcpu)
* guest debugging itself.
*/
if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
- vcpu->run->debug.arch.dr6 = DR6_BD | DR6_RTM | DR6_FIXED_1;
+ vcpu->run->debug.arch.dr6 = DR6_BD | DR6_ACTIVE_LOW;
vcpu->run->debug.arch.dr7 = dr7;
vcpu->run->debug.arch.pc = kvm_get_linear_rip(vcpu);
vcpu->run->debug.arch.exception = DB_VECTOR;
@@ -5096,14 +5139,15 @@ static int handle_dr(struct kvm_vcpu *vcpu)
if (exit_qualification & TYPE_MOV_FROM_DR) {
unsigned long val;
- if (kvm_get_dr(vcpu, dr, &val))
- return 1;
+ kvm_get_dr(vcpu, dr, &val);
kvm_register_write(vcpu, reg, val);
- } else
- if (kvm_set_dr(vcpu, dr, kvm_register_readl(vcpu, reg)))
- return 1;
+ err = 0;
+ } else {
+ err = kvm_set_dr(vcpu, dr, kvm_register_readl(vcpu, reg));
+ }
- return kvm_skip_emulated_instruction(vcpu);
+out:
+ return kvm_complete_insn_gp(vcpu, err);
}
static void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
@@ -5177,9 +5221,8 @@ static int handle_xsetbv(struct kvm_vcpu *vcpu)
u64 new_bv = kvm_read_edx_eax(vcpu);
u32 index = kvm_rcx_read(vcpu);
- if (kvm_set_xcr(vcpu, index, new_bv) == 0)
- return kvm_skip_emulated_instruction(vcpu);
- return 1;
+ int err = kvm_set_xcr(vcpu, index, new_bv);
+ return kvm_complete_insn_gp(vcpu, err);
}
static int handle_apic_access(struct kvm_vcpu *vcpu)
@@ -5600,6 +5643,13 @@ static int handle_encls(struct kvm_vcpu *vcpu)
return 1;
}
+static int handle_bus_lock_vmexit(struct kvm_vcpu *vcpu)
+{
+ vcpu->run->exit_reason = KVM_EXIT_X86_BUS_LOCK;
+ vcpu->run->flags |= KVM_RUN_X86_BUS_LOCK;
+ return 0;
+}
+
/*
* The exit handlers return 1 if the exit was handled fully and guest execution
* may resume. Otherwise they set the kvm_run parameter to indicate what needs
@@ -5656,6 +5706,7 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
[EXIT_REASON_VMFUNC] = handle_vmx_instruction,
[EXIT_REASON_PREEMPTION_TIMER] = handle_preemption_timer,
[EXIT_REASON_ENCLS] = handle_encls,
+ [EXIT_REASON_BUS_LOCK] = handle_bus_lock_vmexit,
};
static const int kvm_vmx_max_exit_handlers =
@@ -5667,7 +5718,7 @@ static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2,
struct vcpu_vmx *vmx = to_vmx(vcpu);
*info1 = vmx_get_exit_qual(vcpu);
- if (!(vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) {
+ if (!(vmx->exit_reason.failed_vmentry)) {
*info2 = vmx->idt_vectoring_info;
*intr_info = vmx_get_intr_info(vcpu);
if (is_exception_with_error_code(*intr_info))
@@ -5720,24 +5771,6 @@ static void vmx_flush_pml_buffer(struct kvm_vcpu *vcpu)
vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
}
-/*
- * Flush all vcpus' PML buffer and update logged GPAs to dirty_bitmap.
- * Called before reporting dirty_bitmap to userspace.
- */
-static void kvm_flush_pml_buffers(struct kvm *kvm)
-{
- int i;
- struct kvm_vcpu *vcpu;
- /*
- * We only need to kick vcpu out of guest mode here, as PML buffer
- * is flushed at beginning of all VMEXITs, and it's obvious that only
- * vcpus running in guest are possible to have unflushed GPAs in PML
- * buffer.
- */
- kvm_for_each_vcpu(i, vcpu, kvm)
- kvm_vcpu_kick(vcpu);
-}
-
static void vmx_dump_sel(char *name, uint32_t sel)
{
pr_err("%s sel=0x%04x, attr=0x%05x, limit=0x%08x, base=0x%016lx\n",
@@ -5908,20 +5941,22 @@ void dump_vmcs(void)
* The guest has exited. See if we can fix it or if we need userspace
* assistance.
*/
-static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
+static int __vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- u32 exit_reason = vmx->exit_reason;
+ union vmx_exit_reason exit_reason = vmx->exit_reason;
u32 vectoring_info = vmx->idt_vectoring_info;
+ u16 exit_handler_index;
/*
* Flush logged GPAs PML buffer, this will make dirty_bitmap more
* updated. Another good is, in kvm_vm_ioctl_get_dirty_log, before
* querying dirty_bitmap, we only need to kick all vcpus out of guest
* mode as if vcpus is in root mode, the PML buffer must has been
- * flushed already.
+ * flushed already. Note, PML is never enabled in hardware while
+ * running L2.
*/
- if (enable_pml)
+ if (enable_pml && !is_guest_mode(vcpu))
vmx_flush_pml_buffer(vcpu);
/*
@@ -5938,6 +5973,13 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
if (is_guest_mode(vcpu)) {
/*
+ * PML is never enabled when running L2, bail immediately if a
+ * PML full exit occurs as something is horribly wrong.
+ */
+ if (exit_reason.basic == EXIT_REASON_PML_FULL)
+ goto unexpected_vmexit;
+
+ /*
* The host physical addresses of some pages of guest memory
* are loaded into the vmcs02 (e.g. vmcs12's Virtual APIC
* Page). The CPU may write to these pages via their host
@@ -5954,11 +5996,11 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
return 1;
}
- if (exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) {
+ if (exit_reason.failed_vmentry) {
dump_vmcs();
vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
vcpu->run->fail_entry.hardware_entry_failure_reason
- = exit_reason;
+ = exit_reason.full;
vcpu->run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu;
return 0;
}
@@ -5980,18 +6022,18 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
* will cause infinite loop.
*/
if ((vectoring_info & VECTORING_INFO_VALID_MASK) &&
- (exit_reason != EXIT_REASON_EXCEPTION_NMI &&
- exit_reason != EXIT_REASON_EPT_VIOLATION &&
- exit_reason != EXIT_REASON_PML_FULL &&
- exit_reason != EXIT_REASON_APIC_ACCESS &&
- exit_reason != EXIT_REASON_TASK_SWITCH)) {
+ (exit_reason.basic != EXIT_REASON_EXCEPTION_NMI &&
+ exit_reason.basic != EXIT_REASON_EPT_VIOLATION &&
+ exit_reason.basic != EXIT_REASON_PML_FULL &&
+ exit_reason.basic != EXIT_REASON_APIC_ACCESS &&
+ exit_reason.basic != EXIT_REASON_TASK_SWITCH)) {
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV;
vcpu->run->internal.ndata = 3;
vcpu->run->internal.data[0] = vectoring_info;
- vcpu->run->internal.data[1] = exit_reason;
+ vcpu->run->internal.data[1] = exit_reason.full;
vcpu->run->internal.data[2] = vcpu->arch.exit_qualification;
- if (exit_reason == EXIT_REASON_EPT_MISCONFIG) {
+ if (exit_reason.basic == EXIT_REASON_EPT_MISCONFIG) {
vcpu->run->internal.ndata++;
vcpu->run->internal.data[3] =
vmcs_read64(GUEST_PHYSICAL_ADDRESS);
@@ -6023,42 +6065,62 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
if (exit_fastpath != EXIT_FASTPATH_NONE)
return 1;
- if (exit_reason >= kvm_vmx_max_exit_handlers)
+ if (exit_reason.basic >= kvm_vmx_max_exit_handlers)
goto unexpected_vmexit;
#ifdef CONFIG_RETPOLINE
- if (exit_reason == EXIT_REASON_MSR_WRITE)
+ if (exit_reason.basic == EXIT_REASON_MSR_WRITE)
return kvm_emulate_wrmsr(vcpu);
- else if (exit_reason == EXIT_REASON_PREEMPTION_TIMER)
+ else if (exit_reason.basic == EXIT_REASON_PREEMPTION_TIMER)
return handle_preemption_timer(vcpu);
- else if (exit_reason == EXIT_REASON_INTERRUPT_WINDOW)
+ else if (exit_reason.basic == EXIT_REASON_INTERRUPT_WINDOW)
return handle_interrupt_window(vcpu);
- else if (exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT)
+ else if (exit_reason.basic == EXIT_REASON_EXTERNAL_INTERRUPT)
return handle_external_interrupt(vcpu);
- else if (exit_reason == EXIT_REASON_HLT)
+ else if (exit_reason.basic == EXIT_REASON_HLT)
return kvm_emulate_halt(vcpu);
- else if (exit_reason == EXIT_REASON_EPT_MISCONFIG)
+ else if (exit_reason.basic == EXIT_REASON_EPT_MISCONFIG)
return handle_ept_misconfig(vcpu);
#endif
- exit_reason = array_index_nospec(exit_reason,
- kvm_vmx_max_exit_handlers);
- if (!kvm_vmx_exit_handlers[exit_reason])
+ exit_handler_index = array_index_nospec((u16)exit_reason.basic,
+ kvm_vmx_max_exit_handlers);
+ if (!kvm_vmx_exit_handlers[exit_handler_index])
goto unexpected_vmexit;
- return kvm_vmx_exit_handlers[exit_reason](vcpu);
+ return kvm_vmx_exit_handlers[exit_handler_index](vcpu);
unexpected_vmexit:
- vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n", exit_reason);
+ vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n",
+ exit_reason.full);
dump_vmcs();
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu->run->internal.suberror =
KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON;
vcpu->run->internal.ndata = 2;
- vcpu->run->internal.data[0] = exit_reason;
+ vcpu->run->internal.data[0] = exit_reason.full;
vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu;
return 0;
}
+static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
+{
+ int ret = __vmx_handle_exit(vcpu, exit_fastpath);
+
+ /*
+ * Even when current exit reason is handled by KVM internally, we
+ * still need to exit to user space when bus lock detected to inform
+ * that there is a bus lock in guest.
+ */
+ if (to_vmx(vcpu)->exit_reason.bus_lock_detected) {
+ if (ret > 0)
+ vcpu->run->exit_reason = KVM_EXIT_X86_BUS_LOCK;
+
+ vcpu->run->flags |= KVM_RUN_X86_BUS_LOCK;
+ return 0;
+ }
+ return ret;
+}
+
/*
* Software based L1D cache flush which is used when microcode providing
* the cache control MSR is not loaded.
@@ -6129,7 +6191,7 @@ static noinstr void vmx_l1d_flush(struct kvm_vcpu *vcpu)
: "eax", "ebx", "ecx", "edx");
}
-static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
+static void vmx_update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
{
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
int tpr_threshold;
@@ -6373,9 +6435,9 @@ static void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- if (vmx->exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT)
+ if (vmx->exit_reason.basic == EXIT_REASON_EXTERNAL_INTERRUPT)
handle_external_interrupt_irqoff(vcpu);
- else if (vmx->exit_reason == EXIT_REASON_EXCEPTION_NMI)
+ else if (vmx->exit_reason.basic == EXIT_REASON_EXCEPTION_NMI)
handle_exception_nmi_irqoff(vmx);
}
@@ -6567,7 +6629,7 @@ void noinstr vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp)
static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
{
- switch (to_vmx(vcpu)->exit_reason) {
+ switch (to_vmx(vcpu)->exit_reason.basic) {
case EXIT_REASON_MSR_WRITE:
return handle_fastpath_set_msr_irqoff(vcpu);
case EXIT_REASON_PREEMPTION_TIMER:
@@ -6577,8 +6639,6 @@ static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
}
}
-bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched);
-
static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
struct vcpu_vmx *vmx)
{
@@ -6638,11 +6698,9 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
{
- fastpath_t exit_fastpath;
struct vcpu_vmx *vmx = to_vmx(vcpu);
unsigned long cr3, cr4;
-reenter_guest:
/* Record the guest's net vcpu time for enforced NMI injections. */
if (unlikely(!enable_vnmi &&
vmx->loaded_vmcs->soft_vnmi_blocked))
@@ -6653,6 +6711,8 @@ reenter_guest:
if (vmx->emulation_required)
return EXIT_FASTPATH_NONE;
+ trace_kvm_entry(vcpu);
+
if (vmx->ple_window_dirty) {
vmx->ple_window_dirty = false;
vmcs_write32(PLE_WINDOW, vmx->ple_window);
@@ -6694,6 +6754,8 @@ reenter_guest:
pt_guest_enter(vmx);
atomic_switch_perf_msrs(vmx);
+ if (intel_pmu_lbr_is_enabled(vcpu))
+ vmx_passthrough_lbr_msrs(vcpu);
if (enable_preemption_timer)
vmx_update_hv_timer(vcpu);
@@ -6732,12 +6794,12 @@ reenter_guest:
x86_spec_ctrl_restore_host(vmx->spec_ctrl, 0);
/* All fields are clean at this point */
- if (static_branch_unlikely(&enable_evmcs))
+ if (static_branch_unlikely(&enable_evmcs)) {
current_evmcs->hv_clean_fields |=
HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
- if (static_branch_unlikely(&enable_evmcs))
- current_evmcs->hv_vp_id = vcpu->arch.hyperv.vp_index;
+ current_evmcs->hv_vp_id = kvm_hv_get_vpindex(vcpu);
+ }
/* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */
if (vmx->host_debugctlmsr)
@@ -6766,21 +6828,23 @@ reenter_guest:
vmx->idt_vectoring_info = 0;
if (unlikely(vmx->fail)) {
- vmx->exit_reason = 0xdead;
+ vmx->exit_reason.full = 0xdead;
return EXIT_FASTPATH_NONE;
}
- vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
- if (unlikely((u16)vmx->exit_reason == EXIT_REASON_MCE_DURING_VMENTRY))
+ vmx->exit_reason.full = vmcs_read32(VM_EXIT_REASON);
+ if (unlikely((u16)vmx->exit_reason.basic == EXIT_REASON_MCE_DURING_VMENTRY))
kvm_machine_check();
- trace_kvm_exit(vmx->exit_reason, vcpu, KVM_ISA_VMX);
+ if (likely(!vmx->exit_reason.failed_vmentry))
+ vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
- if (unlikely(vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY))
+ trace_kvm_exit(vmx->exit_reason.full, vcpu, KVM_ISA_VMX);
+
+ if (unlikely(vmx->exit_reason.failed_vmentry))
return EXIT_FASTPATH_NONE;
vmx->loaded_vmcs->launched = 1;
- vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
vmx_recover_nmi_blocking(vmx);
vmx_complete_interrupts(vmx);
@@ -6788,22 +6852,7 @@ reenter_guest:
if (is_guest_mode(vcpu))
return EXIT_FASTPATH_NONE;
- exit_fastpath = vmx_exit_handlers_fastpath(vcpu);
- if (exit_fastpath == EXIT_FASTPATH_REENTER_GUEST) {
- if (!kvm_vcpu_exit_request(vcpu)) {
- /*
- * FIXME: this goto should be a loop in vcpu_enter_guest,
- * but it would incur the cost of a retpoline for now.
- * Revisit once static calls are available.
- */
- if (vcpu->arch.apicv_active)
- vmx_sync_pir_to_irr(vcpu);
- goto reenter_guest;
- }
- exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED;
- }
-
- return exit_fastpath;
+ return vmx_exit_handlers_fastpath(vcpu);
}
static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
@@ -6858,11 +6907,20 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
switch (index) {
case MSR_IA32_TSX_CTRL:
/*
- * No need to pass TSX_CTRL_CPUID_CLEAR through, so
- * let's avoid changing CPUID bits under the host
- * kernel's feet.
+ * TSX_CTRL_CPUID_CLEAR is handled in the CPUID
+ * interception. Keep the host value unchanged to avoid
+ * changing CPUID bits under the host kernel's feet.
+ *
+ * hle=0, rtm=0, tsx_ctrl=1 can be found with some
+ * combinations of new kernel and old userspace. If
+ * those guests run on a tsx=off host, do allow guests
+ * to use TSX_CTRL, but do not change the value on the
+ * host so that TSX remains always disabled.
*/
- vmx->guest_uret_msrs[j].mask = ~(u64)TSX_CTRL_CPUID_CLEAR;
+ if (boot_cpu_has(X86_FEATURE_RTM))
+ vmx->guest_uret_msrs[j].mask = ~(u64)TSX_CTRL_CPUID_CLEAR;
+ else
+ vmx->guest_uret_msrs[j].mask = 0;
break;
default:
vmx->guest_uret_msrs[j].mask = -1ull;
@@ -7245,7 +7303,7 @@ static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
set_cr4_guest_host_mask(vmx);
/* Refresh #PF interception to account for MAXPHYADDR changes. */
- update_exception_bitmap(vcpu);
+ vmx_update_exception_bitmap(vcpu);
}
static __init void vmx_set_cpu_caps(void)
@@ -7259,8 +7317,8 @@ static __init void vmx_set_cpu_caps(void)
/* CPUID 0x7 */
if (kvm_mpx_supported())
kvm_cpu_cap_check_and_set(X86_FEATURE_MPX);
- if (cpu_has_vmx_invpcid())
- kvm_cpu_cap_check_and_set(X86_FEATURE_INVPCID);
+ if (!cpu_has_vmx_invpcid())
+ kvm_cpu_cap_clear(X86_FEATURE_INVPCID);
if (vmx_pt_mode_is_host_guest())
kvm_cpu_cap_check_and_set(X86_FEATURE_INTEL_PT);
@@ -7438,30 +7496,24 @@ static void vmx_sched_in(struct kvm_vcpu *vcpu, int cpu)
shrink_ple_window(vcpu);
}
-static void vmx_slot_enable_log_dirty(struct kvm *kvm,
- struct kvm_memory_slot *slot)
-{
- if (!kvm_dirty_log_manual_protect_and_init_set(kvm))
- kvm_mmu_slot_leaf_clear_dirty(kvm, slot);
- kvm_mmu_slot_largepage_remove_write_access(kvm, slot);
-}
-
-static void vmx_slot_disable_log_dirty(struct kvm *kvm,
- struct kvm_memory_slot *slot)
+void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu)
{
- kvm_mmu_slot_set_dirty(kvm, slot);
-}
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
-static void vmx_flush_log_dirty(struct kvm *kvm)
-{
- kvm_flush_pml_buffers(kvm);
-}
+ if (is_guest_mode(vcpu)) {
+ vmx->nested.update_vmcs01_cpu_dirty_logging = true;
+ return;
+ }
-static void vmx_enable_log_dirty_pt_masked(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
- gfn_t offset, unsigned long mask)
-{
- kvm_mmu_clear_dirty_pt_masked(kvm, memslot, offset, mask);
+ /*
+ * Note, cpu_dirty_logging_count can be changed concurrent with this
+ * code, but in that case another update request will be made and so
+ * the guest will never run with a stale PML value.
+ */
+ if (vcpu->kvm->arch.cpu_dirty_logging_count)
+ secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_ENABLE_PML);
+ else
+ secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_ENABLE_PML);
}
static int vmx_pre_block(struct kvm_vcpu *vcpu)
@@ -7535,7 +7587,7 @@ static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
return 0;
}
-static void enable_smi_window(struct kvm_vcpu *vcpu)
+static void vmx_enable_smi_window(struct kvm_vcpu *vcpu)
{
/* RSM will cause a vmexit anyway. */
}
@@ -7571,11 +7623,6 @@ static bool vmx_check_apicv_inhibit_reasons(ulong bit)
return supported & BIT(bit);
}
-static int vmx_cpu_dirty_log_size(void)
-{
- return enable_pml ? PML_ENTITY_NUM : 0;
-}
-
static struct kvm_x86_ops vmx_x86_ops __initdata = {
.hardware_unsetup = hardware_unsetup,
@@ -7595,7 +7642,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.vcpu_load = vmx_vcpu_load,
.vcpu_put = vmx_vcpu_put,
- .update_exception_bitmap = update_exception_bitmap,
+ .update_exception_bitmap = vmx_update_exception_bitmap,
.get_msr_feature = vmx_get_msr_feature,
.get_msr = vmx_get_msr,
.set_msr = vmx_set_msr,
@@ -7638,9 +7685,9 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.nmi_allowed = vmx_nmi_allowed,
.get_nmi_mask = vmx_get_nmi_mask,
.set_nmi_mask = vmx_set_nmi_mask,
- .enable_nmi_window = enable_nmi_window,
- .enable_irq_window = enable_irq_window,
- .update_cr8_intercept = update_cr8_intercept,
+ .enable_nmi_window = vmx_enable_nmi_window,
+ .enable_irq_window = vmx_enable_irq_window,
+ .update_cr8_intercept = vmx_update_cr8_intercept,
.set_virtual_apic_mode = vmx_set_virtual_apic_mode,
.set_apic_access_page_addr = vmx_set_apic_access_page_addr,
.refresh_apicv_exec_ctrl = vmx_refresh_apicv_exec_ctrl,
@@ -7675,10 +7722,8 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.sched_in = vmx_sched_in,
- .slot_enable_log_dirty = vmx_slot_enable_log_dirty,
- .slot_disable_log_dirty = vmx_slot_disable_log_dirty,
- .flush_log_dirty = vmx_flush_log_dirty,
- .enable_log_dirty_pt_masked = vmx_enable_log_dirty_pt_masked,
+ .cpu_dirty_log_size = PML_ENTITY_NUM,
+ .update_cpu_dirty_logging = vmx_update_cpu_dirty_logging,
.pre_block = vmx_pre_block,
.post_block = vmx_post_block,
@@ -7698,7 +7743,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.smi_allowed = vmx_smi_allowed,
.pre_enter_smm = vmx_pre_enter_smm,
.pre_leave_smm = vmx_pre_leave_smm,
- .enable_smi_window = enable_smi_window,
+ .enable_smi_window = vmx_enable_smi_window,
.can_emulate_instruction = vmx_can_emulate_instruction,
.apic_init_signal_blocked = vmx_apic_init_signal_blocked,
@@ -7706,7 +7751,6 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.msr_filter_changed = vmx_msr_filter_changed,
.complete_emulated_msr = kvm_complete_insn_gp,
- .cpu_dirty_log_size = vmx_cpu_dirty_log_size,
.vcpu_deliver_sipi_vector = kvm_vcpu_deliver_sipi_vector,
};
@@ -7799,6 +7843,8 @@ static __init int hardware_setup(void)
kvm_tsc_scaling_ratio_frac_bits = 48;
}
+ kvm_has_bus_lock_exit = cpu_has_vmx_bus_lock_detection();
+
set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
if (enable_ept)
@@ -7821,13 +7867,8 @@ static __init int hardware_setup(void)
if (!enable_ept || !enable_ept_ad_bits || !cpu_has_vmx_pml())
enable_pml = 0;
- if (!enable_pml) {
- vmx_x86_ops.slot_enable_log_dirty = NULL;
- vmx_x86_ops.slot_disable_log_dirty = NULL;
- vmx_x86_ops.flush_log_dirty = NULL;
- vmx_x86_ops.enable_log_dirty_pt_masked = NULL;
- vmx_x86_ops.cpu_dirty_log_size = NULL;
- }
+ if (!enable_pml)
+ vmx_x86_ops.cpu_dirty_log_size = 0;
if (!cpu_has_vmx_preemption_timer())
enable_preemption_timer = false;
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 9d3a557949ac..89da5e1251f1 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -70,6 +70,54 @@ struct pt_desc {
struct pt_ctx guest;
};
+union vmx_exit_reason {
+ struct {
+ u32 basic : 16;
+ u32 reserved16 : 1;
+ u32 reserved17 : 1;
+ u32 reserved18 : 1;
+ u32 reserved19 : 1;
+ u32 reserved20 : 1;
+ u32 reserved21 : 1;
+ u32 reserved22 : 1;
+ u32 reserved23 : 1;
+ u32 reserved24 : 1;
+ u32 reserved25 : 1;
+ u32 bus_lock_detected : 1;
+ u32 enclave_mode : 1;
+ u32 smi_pending_mtf : 1;
+ u32 smi_from_vmx_root : 1;
+ u32 reserved30 : 1;
+ u32 failed_vmentry : 1;
+ };
+ u32 full;
+};
+
+#define vcpu_to_lbr_desc(vcpu) (&to_vmx(vcpu)->lbr_desc)
+#define vcpu_to_lbr_records(vcpu) (&to_vmx(vcpu)->lbr_desc.records)
+
+bool intel_pmu_lbr_is_compatible(struct kvm_vcpu *vcpu);
+bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu);
+
+int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu);
+void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu);
+
+struct lbr_desc {
+ /* Basic info about guest LBR records. */
+ struct x86_pmu_lbr records;
+
+ /*
+ * Emulate LBR feature via passthrough LBR registers when the
+ * per-vcpu guest LBR event is scheduled on the current pcpu.
+ *
+ * The records may be inaccurate if the host reclaims the LBR.
+ */
+ struct perf_event *event;
+
+ /* True if LBRs are marked as not intercepted in the MSR bitmap */
+ bool msr_passthrough;
+};
+
/*
* The nested_vmx structure is part of vcpu_vmx, and holds information we need
* for correct emulation of VMX (i.e., nested VMX) on this vcpu.
@@ -117,6 +165,7 @@ struct nested_vmx {
bool change_vmcs01_virtual_apic_mode;
bool reload_vmcs01_apic_access_page;
+ bool update_vmcs01_cpu_dirty_logging;
/*
* Enlightened VMCS has been enabled. It does not mean that L1 has to
@@ -244,7 +293,7 @@ struct vcpu_vmx {
int vpid;
bool emulation_required;
- u32 exit_reason;
+ union vmx_exit_reason exit_reason;
/* Posted interrupt descriptor */
struct pi_desc pi_desc;
@@ -279,6 +328,7 @@ struct vcpu_vmx {
u64 ept_pointer;
struct pt_desc pt_desc;
+ struct lbr_desc lbr_desc;
/* Save desired MSR intercept (read: pass-through) state */
#define MAX_POSSIBLE_PASSTHROUGH_MSRS 13
@@ -329,7 +379,7 @@ void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa,
int root_level);
-void update_exception_bitmap(struct kvm_vcpu *vcpu);
+void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu);
void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu);
bool vmx_nmi_blocked(struct kvm_vcpu *vcpu);
bool vmx_interrupt_blocked(struct kvm_vcpu *vcpu);
@@ -339,8 +389,12 @@ void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr);
void pt_update_intercept_for_msr(struct kvm_vcpu *vcpu);
void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp);
+bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched);
int vmx_find_loadstore_msr_slot(struct vmx_msrs *m, u32 msr);
void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu);
+void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu,
+ u32 msr, int type, bool value);
+void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu);
static inline u8 vmx_get_rvi(void)
{
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 9a8969a6dd06..2a20ce60152e 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -29,6 +29,7 @@
#include "pmu.h"
#include "hyperv.h"
#include "lapic.h"
+#include "xen.h"
#include <linux/clocksource.h>
#include <linux/interrupt.h>
@@ -105,6 +106,7 @@ static u64 __read_mostly cr4_reserved_bits = CR4_RESERVED_BITS;
static void update_cr8_intercept(struct kvm_vcpu *vcpu);
static void process_nmi(struct kvm_vcpu *vcpu);
+static void process_smi(struct kvm_vcpu *vcpu);
static void enter_smm(struct kvm_vcpu *vcpu);
static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
static void store_regs(struct kvm_vcpu *vcpu);
@@ -113,11 +115,21 @@ static int sync_regs(struct kvm_vcpu *vcpu);
struct kvm_x86_ops kvm_x86_ops __read_mostly;
EXPORT_SYMBOL_GPL(kvm_x86_ops);
+#define KVM_X86_OP(func) \
+ DEFINE_STATIC_CALL_NULL(kvm_x86_##func, \
+ *(((struct kvm_x86_ops *)0)->func));
+#define KVM_X86_OP_NULL KVM_X86_OP
+#include <asm/kvm-x86-ops.h>
+EXPORT_STATIC_CALL_GPL(kvm_x86_get_cs_db_l_bits);
+EXPORT_STATIC_CALL_GPL(kvm_x86_cache_reg);
+EXPORT_STATIC_CALL_GPL(kvm_x86_tlb_flush_current);
+
static bool __read_mostly ignore_msrs = 0;
module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR);
-static bool __read_mostly report_ignored_msrs = true;
+bool __read_mostly report_ignored_msrs = true;
module_param(report_ignored_msrs, bool, S_IRUGO | S_IWUSR);
+EXPORT_SYMBOL_GPL(report_ignored_msrs);
unsigned int min_timer_period_us = 200;
module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR);
@@ -135,6 +147,8 @@ u64 __read_mostly kvm_max_tsc_scaling_ratio;
EXPORT_SYMBOL_GPL(kvm_max_tsc_scaling_ratio);
u64 __read_mostly kvm_default_tsc_scaling_ratio;
EXPORT_SYMBOL_GPL(kvm_default_tsc_scaling_ratio);
+bool __read_mostly kvm_has_bus_lock_exit;
+EXPORT_SYMBOL_GPL(kvm_has_bus_lock_exit);
/* tsc tolerance in parts per million - default to 1/2 of the NTP threshold */
static u32 __read_mostly tsc_tolerance_ppm = 250;
@@ -233,7 +247,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns),
VM_STAT("mmu_shadow_zapped", mmu_shadow_zapped),
VM_STAT("mmu_pte_write", mmu_pte_write),
- VM_STAT("mmu_pte_updated", mmu_pte_updated),
VM_STAT("mmu_pde_zapped", mmu_pde_zapped),
VM_STAT("mmu_flooded", mmu_flooded),
VM_STAT("mmu_recycled", mmu_recycled),
@@ -394,7 +407,7 @@ int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
enum lapic_mode old_mode = kvm_get_apic_mode(vcpu);
enum lapic_mode new_mode = kvm_apic_mode(msr_info->data);
- u64 reserved_bits = ((~0ULL) << cpuid_maxphyaddr(vcpu)) | 0x2ff |
+ u64 reserved_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu) | 0x2ff |
(guest_cpuid_has(vcpu, X86_FEATURE_X2APIC) ? 0 : X2APIC_ENABLE);
if ((msr_info->data & reserved_bits) != 0 || new_mode == LAPIC_MODE_INVALID)
@@ -483,19 +496,24 @@ void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu)
*/
vcpu->arch.dr6 &= ~DR_TRAP_BITS;
/*
- * DR6.RTM is set by all #DB exceptions that don't clear it.
+ * In order to reflect the #DB exception payload in guest
+ * dr6, three components need to be considered: active low
+ * bit, FIXED_1 bits and active high bits (e.g. DR6_BD,
+ * DR6_BS and DR6_BT)
+ * DR6_ACTIVE_LOW contains the FIXED_1 and active low bits.
+ * In the target guest dr6:
+ * FIXED_1 bits should always be set.
+ * Active low bits should be cleared if 1-setting in payload.
+ * Active high bits should be set if 1-setting in payload.
+ *
+ * Note, the payload is compatible with the pending debug
+ * exceptions/exit qualification under VMX, that active_low bits
+ * are active high in payload.
+ * So they need to be flipped for DR6.
*/
- vcpu->arch.dr6 |= DR6_RTM;
+ vcpu->arch.dr6 |= DR6_ACTIVE_LOW;
vcpu->arch.dr6 |= payload;
- /*
- * Bit 16 should be set in the payload whenever the #DB
- * exception should clear DR6.RTM. This makes the payload
- * compatible with the pending debug exceptions under VMX.
- * Though not currently documented in the SDM, this also
- * makes the payload compatible with the exit qualification
- * for #DB exceptions under VMX.
- */
- vcpu->arch.dr6 ^= payload & DR6_RTM;
+ vcpu->arch.dr6 ^= payload & DR6_ACTIVE_LOW;
/*
* The #DB payload is defined as compatible with the 'pending
@@ -691,7 +709,7 @@ EXPORT_SYMBOL_GPL(kvm_requeue_exception_e);
*/
bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl)
{
- if (kvm_x86_ops.get_cpl(vcpu) <= required_cpl)
+ if (static_call(kvm_x86_get_cpl)(vcpu) <= required_cpl)
return true;
kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
return false;
@@ -741,8 +759,7 @@ static int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
static inline u64 pdptr_rsvd_bits(struct kvm_vcpu *vcpu)
{
- return rsvd_bits(cpuid_maxphyaddr(vcpu), 63) | rsvd_bits(5, 8) |
- rsvd_bits(1, 2);
+ return vcpu->arch.reserved_gpa_bits | rsvd_bits(5, 8) | rsvd_bits(1, 2);
}
/*
@@ -851,7 +868,7 @@ int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
if (!is_pae(vcpu))
return 1;
- kvm_x86_ops.get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
+ static_call(kvm_x86_get_cs_db_l_bits)(vcpu, &cs_db, &cs_l);
if (cs_l)
return 1;
}
@@ -864,7 +881,7 @@ int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
if (!(cr0 & X86_CR0_PG) && kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE))
return 1;
- kvm_x86_ops.set_cr0(vcpu, cr0);
+ static_call(kvm_x86_set_cr0)(vcpu, cr0);
kvm_post_set_cr0(vcpu, old_cr0, cr0);
@@ -969,12 +986,10 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
{
- if (kvm_x86_ops.get_cpl(vcpu) != 0 ||
- __kvm_set_xcr(vcpu, index, xcr)) {
- kvm_inject_gp(vcpu, 0);
- return 1;
- }
- return 0;
+ if (static_call(kvm_x86_get_cpl)(vcpu) == 0)
+ return __kvm_set_xcr(vcpu, index, xcr);
+
+ return 1;
}
EXPORT_SYMBOL_GPL(kvm_set_xcr);
@@ -986,7 +1001,7 @@ bool kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
if (cr4 & vcpu->arch.cr4_guest_rsvd_bits)
return false;
- return kvm_x86_ops.is_valid_cr4(vcpu, cr4);
+ return static_call(kvm_x86_is_valid_cr4)(vcpu, cr4);
}
EXPORT_SYMBOL_GPL(kvm_is_valid_cr4);
@@ -1030,7 +1045,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
return 1;
}
- kvm_x86_ops.set_cr4(vcpu, cr4);
+ static_call(kvm_x86_set_cr4)(vcpu, cr4);
kvm_post_set_cr4(vcpu, old_cr4, cr4);
@@ -1058,8 +1073,7 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
return 0;
}
- if (is_long_mode(vcpu) &&
- (cr3 & vcpu->arch.cr3_lm_rsvd_bits))
+ if (is_long_mode(vcpu) && kvm_vcpu_is_illegal_gpa(vcpu, cr3))
return 1;
else if (is_pae_paging(vcpu) &&
!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
@@ -1113,7 +1127,7 @@ void kvm_update_dr7(struct kvm_vcpu *vcpu)
dr7 = vcpu->arch.guest_debug_dr7;
else
dr7 = vcpu->arch.dr7;
- kvm_x86_ops.set_dr7(vcpu, dr7);
+ static_call(kvm_x86_set_dr7)(vcpu, dr7);
vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_BP_ENABLED;
if (dr7 & DR7_BP_EN_MASK)
vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED;
@@ -1129,7 +1143,7 @@ static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu)
return fixed;
}
-static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
+int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
{
size_t size = ARRAY_SIZE(vcpu->arch.db);
@@ -1142,13 +1156,13 @@ static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
case 4:
case 6:
if (!kvm_dr6_valid(val))
- return -1; /* #GP */
+ return 1; /* #GP */
vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu);
break;
case 5:
default: /* 7 */
if (!kvm_dr7_valid(val))
- return -1; /* #GP */
+ return 1; /* #GP */
vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
kvm_update_dr7(vcpu);
break;
@@ -1156,18 +1170,9 @@ static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
return 0;
}
-
-int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
-{
- if (__kvm_set_dr(vcpu, dr, val)) {
- kvm_inject_gp(vcpu, 0);
- return 1;
- }
- return 0;
-}
EXPORT_SYMBOL_GPL(kvm_set_dr);
-int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
+void kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
{
size_t size = ARRAY_SIZE(vcpu->arch.db);
@@ -1184,7 +1189,6 @@ int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
*val = vcpu->arch.dr7;
break;
}
- return 0;
}
EXPORT_SYMBOL_GPL(kvm_get_dr);
@@ -1393,16 +1397,24 @@ static u64 kvm_get_arch_capabilities(void)
if (!boot_cpu_has_bug(X86_BUG_MDS))
data |= ARCH_CAP_MDS_NO;
- /*
- * On TAA affected systems:
- * - nothing to do if TSX is disabled on the host.
- * - we emulate TSX_CTRL if present on the host.
- * This lets the guest use VERW to clear CPU buffers.
- */
- if (!boot_cpu_has(X86_FEATURE_RTM))
- data &= ~(ARCH_CAP_TAA_NO | ARCH_CAP_TSX_CTRL_MSR);
- else if (!boot_cpu_has_bug(X86_BUG_TAA))
+ if (!boot_cpu_has(X86_FEATURE_RTM)) {
+ /*
+ * If RTM=0 because the kernel has disabled TSX, the host might
+ * have TAA_NO or TSX_CTRL. Clear TAA_NO (the guest sees RTM=0
+ * and therefore knows that there cannot be TAA) but keep
+ * TSX_CTRL: some buggy userspaces leave it set on tsx=on hosts,
+ * and we want to allow migrating those guests to tsx=off hosts.
+ */
+ data &= ~ARCH_CAP_TAA_NO;
+ } else if (!boot_cpu_has_bug(X86_BUG_TAA)) {
data |= ARCH_CAP_TAA_NO;
+ } else {
+ /*
+ * Nothing to do here; we emulate TSX_CTRL if present on the
+ * host so the guest can choose between disabling TSX or
+ * using VERW to clear CPU buffers.
+ */
+ }
return data;
}
@@ -1417,7 +1429,7 @@ static int kvm_get_msr_feature(struct kvm_msr_entry *msr)
rdmsrl_safe(msr->index, &msr->data);
break;
default:
- return kvm_x86_ops.get_msr_feature(msr);
+ return static_call(kvm_x86_get_msr_feature)(msr);
}
return 0;
}
@@ -1493,7 +1505,7 @@ static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
efer &= ~EFER_LMA;
efer |= vcpu->arch.efer & EFER_LMA;
- r = kvm_x86_ops.set_efer(vcpu, efer);
+ r = static_call(kvm_x86_set_efer)(vcpu, efer);
if (r) {
WARN_ON(r > 0);
return r;
@@ -1590,7 +1602,7 @@ static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data,
msr.index = index;
msr.host_initiated = host_initiated;
- return kvm_x86_ops.set_msr(vcpu, &msr);
+ return static_call(kvm_x86_set_msr)(vcpu, &msr);
}
static int kvm_set_msr_ignored_check(struct kvm_vcpu *vcpu,
@@ -1623,7 +1635,7 @@ int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data,
msr.index = index;
msr.host_initiated = host_initiated;
- ret = kvm_x86_ops.get_msr(vcpu, &msr);
+ ret = static_call(kvm_x86_get_msr)(vcpu, &msr);
if (!ret)
*data = msr.data;
return ret;
@@ -1664,12 +1676,12 @@ static int complete_emulated_rdmsr(struct kvm_vcpu *vcpu)
kvm_rdx_write(vcpu, vcpu->run->msr.data >> 32);
}
- return kvm_x86_ops.complete_emulated_msr(vcpu, err);
+ return static_call(kvm_x86_complete_emulated_msr)(vcpu, err);
}
static int complete_emulated_wrmsr(struct kvm_vcpu *vcpu)
{
- return kvm_x86_ops.complete_emulated_msr(vcpu, vcpu->run->msr.error);
+ return static_call(kvm_x86_complete_emulated_msr)(vcpu, vcpu->run->msr.error);
}
static u64 kvm_msr_reason(int r)
@@ -1741,7 +1753,7 @@ int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu)
trace_kvm_msr_read_ex(ecx);
}
- return kvm_x86_ops.complete_emulated_msr(vcpu, r);
+ return static_call(kvm_x86_complete_emulated_msr)(vcpu, r);
}
EXPORT_SYMBOL_GPL(kvm_emulate_rdmsr);
@@ -1767,16 +1779,16 @@ int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu)
else
trace_kvm_msr_write_ex(ecx, data);
- return kvm_x86_ops.complete_emulated_msr(vcpu, r);
+ return static_call(kvm_x86_complete_emulated_msr)(vcpu, r);
}
EXPORT_SYMBOL_GPL(kvm_emulate_wrmsr);
-bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu)
+static inline bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu)
{
+ xfer_to_guest_mode_prepare();
return vcpu->mode == EXITING_GUEST_MODE || kvm_request_pending(vcpu) ||
xfer_to_guest_mode_work_pending();
}
-EXPORT_SYMBOL_GPL(kvm_vcpu_exit_request);
/*
* The fast path for frequent and performance sensitive wrmsr emulation,
@@ -1926,15 +1938,14 @@ static s64 get_kvmclock_base_ns(void)
}
#endif
-static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
+void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock, int sec_hi_ofs)
{
int version;
int r;
struct pvclock_wall_clock wc;
+ u32 wc_sec_hi;
u64 wall_nsec;
- kvm->arch.wall_clock = wall_clock;
-
if (!wall_clock)
return;
@@ -1963,6 +1974,12 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
+ if (sec_hi_ofs) {
+ wc_sec_hi = wall_nsec >> 32;
+ kvm_write_guest(kvm, wall_clock + sec_hi_ofs,
+ &wc_sec_hi, sizeof(wc_sec_hi));
+ }
+
version++;
kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
}
@@ -2199,7 +2216,7 @@ EXPORT_SYMBOL_GPL(kvm_read_l1_tsc);
static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
{
vcpu->arch.l1_tsc_offset = offset;
- vcpu->arch.tsc_offset = kvm_x86_ops.write_l1_tsc_offset(vcpu, offset);
+ vcpu->arch.tsc_offset = static_call(kvm_x86_write_l1_tsc_offset)(vcpu, offset);
}
static inline bool kvm_check_tsc_unstable(void)
@@ -2582,13 +2599,15 @@ u64 get_kvmclock_ns(struct kvm *kvm)
return ret;
}
-static void kvm_setup_pvclock_page(struct kvm_vcpu *v)
+static void kvm_setup_pvclock_page(struct kvm_vcpu *v,
+ struct gfn_to_hva_cache *cache,
+ unsigned int offset)
{
struct kvm_vcpu_arch *vcpu = &v->arch;
struct pvclock_vcpu_time_info guest_hv_clock;
- if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
- &guest_hv_clock, sizeof(guest_hv_clock))))
+ if (unlikely(kvm_read_guest_offset_cached(v->kvm, cache,
+ &guest_hv_clock, offset, sizeof(guest_hv_clock))))
return;
/* This VCPU is paused, but it's legal for a guest to read another
@@ -2611,9 +2630,9 @@ static void kvm_setup_pvclock_page(struct kvm_vcpu *v)
++guest_hv_clock.version; /* first time write, random junk */
vcpu->hv_clock.version = guest_hv_clock.version + 1;
- kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
- &vcpu->hv_clock,
- sizeof(vcpu->hv_clock.version));
+ kvm_write_guest_offset_cached(v->kvm, cache,
+ &vcpu->hv_clock, offset,
+ sizeof(vcpu->hv_clock.version));
smp_wmb();
@@ -2627,16 +2646,16 @@ static void kvm_setup_pvclock_page(struct kvm_vcpu *v)
trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock);
- kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
- &vcpu->hv_clock,
- sizeof(vcpu->hv_clock));
+ kvm_write_guest_offset_cached(v->kvm, cache,
+ &vcpu->hv_clock, offset,
+ sizeof(vcpu->hv_clock));
smp_wmb();
vcpu->hv_clock.version++;
- kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
- &vcpu->hv_clock,
- sizeof(vcpu->hv_clock.version));
+ kvm_write_guest_offset_cached(v->kvm, cache,
+ &vcpu->hv_clock, offset,
+ sizeof(vcpu->hv_clock.version));
}
static int kvm_guest_time_update(struct kvm_vcpu *v)
@@ -2723,7 +2742,12 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
vcpu->hv_clock.flags = pvclock_flags;
if (vcpu->pv_time_enabled)
- kvm_setup_pvclock_page(v);
+ kvm_setup_pvclock_page(v, &vcpu->pv_time, 0);
+ if (vcpu->xen.vcpu_info_set)
+ kvm_setup_pvclock_page(v, &vcpu->xen.vcpu_info_cache,
+ offsetof(struct compat_vcpu_info, time));
+ if (vcpu->xen.vcpu_time_info_set)
+ kvm_setup_pvclock_page(v, &vcpu->xen.vcpu_time_info_cache, 0);
if (v == kvm_get_vcpu(v->kvm, 0))
kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock);
return 0;
@@ -2848,32 +2872,6 @@ static int set_msr_mce(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return 0;
}
-static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
-{
- struct kvm *kvm = vcpu->kvm;
- int lm = is_long_mode(vcpu);
- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
- u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
- : kvm->arch.xen_hvm_config.blob_size_32;
- u32 page_num = data & ~PAGE_MASK;
- u64 page_addr = data & PAGE_MASK;
- u8 *page;
-
- if (page_num >= blob_size)
- return 1;
-
- page = memdup_user(blob_addr + (page_num * PAGE_SIZE), PAGE_SIZE);
- if (IS_ERR(page))
- return PTR_ERR(page);
-
- if (kvm_vcpu_write_guest(vcpu, page_addr, page, PAGE_SIZE)) {
- kfree(page);
- return 1;
- }
- return 0;
-}
-
static inline bool kvm_pv_async_pf_enabled(struct kvm_vcpu *vcpu)
{
u64 mask = KVM_ASYNC_PF_ENABLED | KVM_ASYNC_PF_DELIVERY_AS_INT;
@@ -2945,13 +2943,13 @@ static void kvmclock_reset(struct kvm_vcpu *vcpu)
static void kvm_vcpu_flush_tlb_all(struct kvm_vcpu *vcpu)
{
++vcpu->stat.tlb_flush;
- kvm_x86_ops.tlb_flush_all(vcpu);
+ static_call(kvm_x86_tlb_flush_all)(vcpu);
}
static void kvm_vcpu_flush_tlb_guest(struct kvm_vcpu *vcpu)
{
++vcpu->stat.tlb_flush;
- kvm_x86_ops.tlb_flush_guest(vcpu);
+ static_call(kvm_x86_tlb_flush_guest)(vcpu);
}
static void record_steal_time(struct kvm_vcpu *vcpu)
@@ -2959,6 +2957,11 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
struct kvm_host_map map;
struct kvm_steal_time *st;
+ if (kvm_xen_msr_enabled(vcpu->kvm)) {
+ kvm_xen_runstate_set_running(vcpu);
+ return;
+ }
+
if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
return;
@@ -3007,6 +3010,9 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
u32 msr = msr_info->index;
u64 data = msr_info->data;
+ if (msr && msr == vcpu->kvm->arch.xen_hvm_config.msr)
+ return kvm_xen_write_hypercall_page(vcpu, data);
+
switch (msr) {
case MSR_AMD64_NB_CFG:
case MSR_IA32_UCODE_WRITE:
@@ -3063,18 +3069,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return 1;
}
break;
- case MSR_IA32_DEBUGCTLMSR:
- if (!data) {
- /* We support the non-activated case already */
- break;
- } else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) {
- /* Values other than LBR and BTF are vendor-specific,
- thus reserved and should throw a #GP */
- return 1;
- } else if (report_ignored_msrs)
- vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
- __func__, data);
- break;
case 0x200 ... 0x2ff:
return kvm_mtrr_set_msr(vcpu, msr, data);
case MSR_IA32_APICBASE:
@@ -3143,13 +3137,15 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2))
return 1;
- kvm_write_wall_clock(vcpu->kvm, data);
+ vcpu->kvm->arch.wall_clock = data;
+ kvm_write_wall_clock(vcpu->kvm, data, 0);
break;
case MSR_KVM_WALL_CLOCK:
if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE))
return 1;
- kvm_write_wall_clock(vcpu->kvm, data);
+ vcpu->kvm->arch.wall_clock = data;
+ kvm_write_wall_clock(vcpu->kvm, data, 0);
break;
case MSR_KVM_SYSTEM_TIME_NEW:
if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2))
@@ -3294,8 +3290,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
vcpu->arch.msr_misc_features_enables = data;
break;
default:
- if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
- return xen_hvm_config(vcpu, data);
if (kvm_pmu_is_valid_msr(vcpu, msr))
return kvm_pmu_set_msr(vcpu, msr_info);
return KVM_MSR_RET_INVALID;
@@ -3347,7 +3341,6 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
switch (msr_info->index) {
case MSR_IA32_PLATFORM_ID:
case MSR_IA32_EBL_CR_POWERON:
- case MSR_IA32_DEBUGCTLMSR:
case MSR_IA32_LASTBRANCHFROMIP:
case MSR_IA32_LASTBRANCHTOIP:
case MSR_IA32_LASTINTFROMIP:
@@ -3729,7 +3722,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_PIT2:
case KVM_CAP_PIT_STATE2:
case KVM_CAP_SET_IDENTITY_MAP_ADDR:
- case KVM_CAP_XEN_HVM:
case KVM_CAP_VCPU_EVENTS:
case KVM_CAP_HYPERV:
case KVM_CAP_HYPERV_VAPIC:
@@ -3769,6 +3761,15 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_ENFORCE_PV_FEATURE_CPUID:
r = 1;
break;
+#ifdef CONFIG_KVM_XEN
+ case KVM_CAP_XEN_HVM:
+ r = KVM_XEN_HVM_CONFIG_HYPERCALL_MSR |
+ KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL |
+ KVM_XEN_HVM_CONFIG_SHARED_INFO;
+ if (sched_info_on())
+ r |= KVM_XEN_HVM_CONFIG_RUNSTATE;
+ break;
+#endif
case KVM_CAP_SYNC_REGS:
r = KVM_SYNC_X86_VALID_FIELDS;
break;
@@ -3790,10 +3791,10 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
* fringe case that is not enabled except via specific settings
* of the module parameters.
*/
- r = kvm_x86_ops.has_emulated_msr(kvm, MSR_IA32_SMBASE);
+ r = static_call(kvm_x86_has_emulated_msr)(kvm, MSR_IA32_SMBASE);
break;
case KVM_CAP_VAPIC:
- r = !kvm_x86_ops.cpu_has_accelerated_tpr();
+ r = !static_call(kvm_x86_cpu_has_accelerated_tpr)();
break;
case KVM_CAP_NR_VCPUS:
r = KVM_SOFT_MAX_VCPUS;
@@ -3835,6 +3836,13 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_STEAL_TIME:
r = sched_info_on();
break;
+ case KVM_CAP_X86_BUS_LOCK_EXIT:
+ if (kvm_has_bus_lock_exit)
+ r = KVM_BUS_LOCK_DETECTION_OFF |
+ KVM_BUS_LOCK_DETECTION_EXIT;
+ else
+ r = 0;
+ break;
default:
break;
}
@@ -3952,14 +3960,14 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
/* Address WBINVD may be executed by guest */
if (need_emulate_wbinvd(vcpu)) {
- if (kvm_x86_ops.has_wbinvd_exit())
+ if (static_call(kvm_x86_has_wbinvd_exit)())
cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
else if (vcpu->cpu != -1 && vcpu->cpu != cpu)
smp_call_function_single(vcpu->cpu,
wbinvd_ipi, NULL, 1);
}
- kvm_x86_ops.vcpu_load(vcpu, cpu);
+ static_call(kvm_x86_vcpu_load)(vcpu, cpu);
/* Save host pkru register if supported */
vcpu->arch.host_pkru = read_pkru();
@@ -4005,6 +4013,7 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
{
struct kvm_host_map map;
struct kvm_steal_time *st;
+ int idx;
if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
return;
@@ -4012,9 +4021,15 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
if (vcpu->arch.st.preempted)
return;
+ /*
+ * Take the srcu lock as memslots will be accessed to check the gfn
+ * cache generation against the memslots generation.
+ */
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
+
if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT, &map,
&vcpu->arch.st.cache, true))
- return;
+ goto out;
st = map.hva +
offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS);
@@ -4022,33 +4037,22 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
st->preempted = vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED;
kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, true);
+
+out:
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
}
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{
- int idx;
-
if (vcpu->preempted && !vcpu->arch.guest_state_protected)
- vcpu->arch.preempted_in_kernel = !kvm_x86_ops.get_cpl(vcpu);
+ vcpu->arch.preempted_in_kernel = !static_call(kvm_x86_get_cpl)(vcpu);
- /*
- * Disable page faults because we're in atomic context here.
- * kvm_write_guest_offset_cached() would call might_fault()
- * that relies on pagefault_disable() to tell if there's a
- * bug. NOTE: the write to guest memory may not go through if
- * during postcopy live migration or if there's heavy guest
- * paging.
- */
- pagefault_disable();
- /*
- * kvm_memslots() will be called by
- * kvm_write_guest_offset_cached() so take the srcu lock.
- */
- idx = srcu_read_lock(&vcpu->kvm->srcu);
- kvm_steal_time_set_preempted(vcpu);
- srcu_read_unlock(&vcpu->kvm->srcu, idx);
- pagefault_enable();
- kvm_x86_ops.vcpu_put(vcpu);
+ if (kvm_xen_msr_enabled(vcpu->kvm))
+ kvm_xen_runstate_set_preempted(vcpu);
+ else
+ kvm_steal_time_set_preempted(vcpu);
+
+ static_call(kvm_x86_vcpu_put)(vcpu);
vcpu->arch.last_host_tsc = rdtsc();
/*
* If userspace has set any breakpoints or watchpoints, dr6 is restored
@@ -4062,7 +4066,7 @@ static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
struct kvm_lapic_state *s)
{
if (vcpu->arch.apicv_active)
- kvm_x86_ops.sync_pir_to_irr(vcpu);
+ static_call(kvm_x86_sync_pir_to_irr)(vcpu);
return kvm_apic_get_state(vcpu, s);
}
@@ -4172,7 +4176,7 @@ static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
for (bank = 0; bank < bank_num; bank++)
vcpu->arch.mce_banks[bank*4] = ~(u64)0;
- kvm_x86_ops.setup_mce(vcpu);
+ static_call(kvm_x86_setup_mce)(vcpu);
out:
return r;
}
@@ -4230,6 +4234,9 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
{
process_nmi(vcpu);
+ if (kvm_check_request(KVM_REQ_SMI, vcpu))
+ process_smi(vcpu);
+
/*
* In guest mode, payload delivery should be deferred,
* so that the L1 hypervisor can intercept #PF before
@@ -4276,11 +4283,11 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft;
events->interrupt.nr = vcpu->arch.interrupt.nr;
events->interrupt.soft = 0;
- events->interrupt.shadow = kvm_x86_ops.get_interrupt_shadow(vcpu);
+ events->interrupt.shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu);
events->nmi.injected = vcpu->arch.nmi_injected;
events->nmi.pending = vcpu->arch.nmi_pending != 0;
- events->nmi.masked = kvm_x86_ops.get_nmi_mask(vcpu);
+ events->nmi.masked = static_call(kvm_x86_get_nmi_mask)(vcpu);
events->nmi.pad = 0;
events->sipi_vector = 0; /* never valid when reporting to user space */
@@ -4347,13 +4354,13 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
vcpu->arch.interrupt.nr = events->interrupt.nr;
vcpu->arch.interrupt.soft = events->interrupt.soft;
if (events->flags & KVM_VCPUEVENT_VALID_SHADOW)
- kvm_x86_ops.set_interrupt_shadow(vcpu,
- events->interrupt.shadow);
+ static_call(kvm_x86_set_interrupt_shadow)(vcpu,
+ events->interrupt.shadow);
vcpu->arch.nmi_injected = events->nmi.injected;
if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING)
vcpu->arch.nmi_pending = events->nmi.pending;
- kvm_x86_ops.set_nmi_mask(vcpu, events->nmi.masked);
+ static_call(kvm_x86_set_nmi_mask)(vcpu, events->nmi.masked);
if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR &&
lapic_in_kernel(vcpu))
@@ -4409,9 +4416,9 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
if (dbgregs->flags)
return -EINVAL;
- if (dbgregs->dr6 & ~0xffffffffull)
+ if (!kvm_dr6_valid(dbgregs->dr6))
return -EINVAL;
- if (dbgregs->dr7 & ~0xffffffffull)
+ if (!kvm_dr7_valid(dbgregs->dr7))
return -EINVAL;
memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
@@ -4648,7 +4655,7 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
if (!kvm_x86_ops.enable_direct_tlbflush)
return -ENOTTY;
- return kvm_x86_ops.enable_direct_tlbflush(vcpu);
+ return static_call(kvm_x86_enable_direct_tlbflush)(vcpu);
case KVM_CAP_ENFORCE_PV_FEATURE_CPUID:
vcpu->arch.pv_cpuid.enforce = cap->args[0];
@@ -5019,6 +5026,28 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
case KVM_GET_SUPPORTED_HV_CPUID:
r = kvm_ioctl_get_supported_hv_cpuid(vcpu, argp);
break;
+#ifdef CONFIG_KVM_XEN
+ case KVM_XEN_VCPU_GET_ATTR: {
+ struct kvm_xen_vcpu_attr xva;
+
+ r = -EFAULT;
+ if (copy_from_user(&xva, argp, sizeof(xva)))
+ goto out;
+ r = kvm_xen_vcpu_get_attr(vcpu, &xva);
+ if (!r && copy_to_user(argp, &xva, sizeof(xva)))
+ r = -EFAULT;
+ break;
+ }
+ case KVM_XEN_VCPU_SET_ATTR: {
+ struct kvm_xen_vcpu_attr xva;
+
+ r = -EFAULT;
+ if (copy_from_user(&xva, argp, sizeof(xva)))
+ goto out;
+ r = kvm_xen_vcpu_set_attr(vcpu, &xva);
+ break;
+ }
+#endif
default:
r = -EINVAL;
}
@@ -5040,14 +5069,14 @@ static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
if (addr > (unsigned int)(-3 * PAGE_SIZE))
return -EINVAL;
- ret = kvm_x86_ops.set_tss_addr(kvm, addr);
+ ret = static_call(kvm_x86_set_tss_addr)(kvm, addr);
return ret;
}
static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm,
u64 ident_addr)
{
- return kvm_x86_ops.set_identity_map_addr(kvm, ident_addr);
+ return static_call(kvm_x86_set_identity_map_addr)(kvm, ident_addr);
}
static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
@@ -5201,11 +5230,18 @@ static int kvm_vm_ioctl_reinject(struct kvm *kvm,
void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
{
+
/*
- * Flush potentially hardware-cached dirty pages to dirty_bitmap.
+ * Flush all CPUs' dirty log buffers to the dirty_bitmap. Called
+ * before reporting dirty_bitmap to userspace. KVM flushes the buffers
+ * on all VM-Exits, thus we only need to kick running vCPUs to force a
+ * VM-Exit.
*/
- if (kvm_x86_ops.flush_log_dirty)
- kvm_x86_ops.flush_log_dirty(kvm);
+ struct kvm_vcpu *vcpu;
+ int i;
+
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ kvm_vcpu_kick(vcpu);
}
int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
@@ -5295,6 +5331,20 @@ split_irqchip_unlock:
kvm->arch.user_space_msr_mask = cap->args[0];
r = 0;
break;
+ case KVM_CAP_X86_BUS_LOCK_EXIT:
+ r = -EINVAL;
+ if (cap->args[0] & ~KVM_BUS_LOCK_DETECTION_VALID_MODE)
+ break;
+
+ if ((cap->args[0] & KVM_BUS_LOCK_DETECTION_OFF) &&
+ (cap->args[0] & KVM_BUS_LOCK_DETECTION_EXIT))
+ break;
+
+ if (kvm_has_bus_lock_exit &&
+ cap->args[0] & KVM_BUS_LOCK_DETECTION_EXIT)
+ kvm->arch.bus_lock_detection_enabled = true;
+ r = 0;
+ break;
default:
r = -EINVAL;
break;
@@ -5619,18 +5669,36 @@ set_pit2_out:
kvm->arch.bsp_vcpu_id = arg;
mutex_unlock(&kvm->lock);
break;
+#ifdef CONFIG_KVM_XEN
case KVM_XEN_HVM_CONFIG: {
struct kvm_xen_hvm_config xhc;
r = -EFAULT;
if (copy_from_user(&xhc, argp, sizeof(xhc)))
goto out;
- r = -EINVAL;
- if (xhc.flags)
+ r = kvm_xen_hvm_config(kvm, &xhc);
+ break;
+ }
+ case KVM_XEN_HVM_GET_ATTR: {
+ struct kvm_xen_hvm_attr xha;
+
+ r = -EFAULT;
+ if (copy_from_user(&xha, argp, sizeof(xha)))
goto out;
- memcpy(&kvm->arch.xen_hvm_config, &xhc, sizeof(xhc));
- r = 0;
+ r = kvm_xen_hvm_get_attr(kvm, &xha);
+ if (!r && copy_to_user(argp, &xha, sizeof(xha)))
+ r = -EFAULT;
+ break;
+ }
+ case KVM_XEN_HVM_SET_ATTR: {
+ struct kvm_xen_hvm_attr xha;
+
+ r = -EFAULT;
+ if (copy_from_user(&xha, argp, sizeof(xha)))
+ goto out;
+ r = kvm_xen_hvm_set_attr(kvm, &xha);
break;
}
+#endif
case KVM_SET_CLOCK: {
struct kvm_clock_data user_ns;
u64 now_ns;
@@ -5673,7 +5741,7 @@ set_pit2_out:
case KVM_MEMORY_ENCRYPT_OP: {
r = -ENOTTY;
if (kvm_x86_ops.mem_enc_op)
- r = kvm_x86_ops.mem_enc_op(kvm, argp);
+ r = static_call(kvm_x86_mem_enc_op)(kvm, argp);
break;
}
case KVM_MEMORY_ENCRYPT_REG_REGION: {
@@ -5685,7 +5753,7 @@ set_pit2_out:
r = -ENOTTY;
if (kvm_x86_ops.mem_enc_reg_region)
- r = kvm_x86_ops.mem_enc_reg_region(kvm, &region);
+ r = static_call(kvm_x86_mem_enc_reg_region)(kvm, &region);
break;
}
case KVM_MEMORY_ENCRYPT_UNREG_REGION: {
@@ -5697,7 +5765,7 @@ set_pit2_out:
r = -ENOTTY;
if (kvm_x86_ops.mem_enc_unreg_region)
- r = kvm_x86_ops.mem_enc_unreg_region(kvm, &region);
+ r = static_call(kvm_x86_mem_enc_unreg_region)(kvm, &region);
break;
}
case KVM_HYPERV_EVENTFD: {
@@ -5799,7 +5867,7 @@ static void kvm_init_msr_list(void)
}
for (i = 0; i < ARRAY_SIZE(emulated_msrs_all); i++) {
- if (!kvm_x86_ops.has_emulated_msr(NULL, emulated_msrs_all[i]))
+ if (!static_call(kvm_x86_has_emulated_msr)(NULL, emulated_msrs_all[i]))
continue;
emulated_msrs[num_emulated_msrs++] = emulated_msrs_all[i];
@@ -5862,13 +5930,13 @@ static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
static void kvm_set_segment(struct kvm_vcpu *vcpu,
struct kvm_segment *var, int seg)
{
- kvm_x86_ops.set_segment(vcpu, var, seg);
+ static_call(kvm_x86_set_segment)(vcpu, var, seg);
}
void kvm_get_segment(struct kvm_vcpu *vcpu,
struct kvm_segment *var, int seg)
{
- kvm_x86_ops.get_segment(vcpu, var, seg);
+ static_call(kvm_x86_get_segment)(vcpu, var, seg);
}
gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
@@ -5888,14 +5956,14 @@ gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
struct x86_exception *exception)
{
- u32 access = (kvm_x86_ops.get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
+ u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
}
gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
struct x86_exception *exception)
{
- u32 access = (kvm_x86_ops.get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
+ u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
access |= PFERR_FETCH_MASK;
return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
}
@@ -5903,7 +5971,7 @@ gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
struct x86_exception *exception)
{
- u32 access = (kvm_x86_ops.get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
+ u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
access |= PFERR_WRITE_MASK;
return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
}
@@ -5952,7 +6020,7 @@ static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt,
struct x86_exception *exception)
{
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
- u32 access = (kvm_x86_ops.get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
+ u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
unsigned offset;
int ret;
@@ -5977,7 +6045,7 @@ int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
gva_t addr, void *val, unsigned int bytes,
struct x86_exception *exception)
{
- u32 access = (kvm_x86_ops.get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
+ u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
/*
* FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED
@@ -5998,7 +6066,7 @@ static int emulator_read_std(struct x86_emulate_ctxt *ctxt,
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
u32 access = 0;
- if (!system && kvm_x86_ops.get_cpl(vcpu) == 3)
+ if (!system && static_call(kvm_x86_get_cpl)(vcpu) == 3)
access |= PFERR_USER_MASK;
return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, exception);
@@ -6051,7 +6119,7 @@ static int emulator_write_std(struct x86_emulate_ctxt *ctxt, gva_t addr, void *v
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
u32 access = PFERR_WRITE_MASK;
- if (!system && kvm_x86_ops.get_cpl(vcpu) == 3)
+ if (!system && static_call(kvm_x86_get_cpl)(vcpu) == 3)
access |= PFERR_USER_MASK;
return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
@@ -6076,7 +6144,7 @@ int handle_ud(struct kvm_vcpu *vcpu)
char sig[5]; /* ud2; .ascii "kvm" */
struct x86_exception e;
- if (unlikely(!kvm_x86_ops.can_emulate_instruction(vcpu, NULL, 0)))
+ if (unlikely(!static_call(kvm_x86_can_emulate_instruction)(vcpu, NULL, 0)))
return 1;
if (force_emulation_prefix &&
@@ -6110,7 +6178,7 @@ static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
gpa_t *gpa, struct x86_exception *exception,
bool write)
{
- u32 access = ((kvm_x86_ops.get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0)
+ u32 access = ((static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0)
| (write ? PFERR_WRITE_MASK : 0);
/*
@@ -6518,7 +6586,7 @@ static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt,
static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
{
- return kvm_x86_ops.get_segment_base(vcpu, seg);
+ return static_call(kvm_x86_get_segment_base)(vcpu, seg);
}
static void emulator_invlpg(struct x86_emulate_ctxt *ctxt, ulong address)
@@ -6531,7 +6599,7 @@ static int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu)
if (!need_emulate_wbinvd(vcpu))
return X86EMUL_CONTINUE;
- if (kvm_x86_ops.has_wbinvd_exit()) {
+ if (static_call(kvm_x86_has_wbinvd_exit)()) {
int cpu = get_cpu();
cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
@@ -6558,17 +6626,17 @@ static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt)
kvm_emulate_wbinvd_noskip(emul_to_vcpu(ctxt));
}
-static int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr,
- unsigned long *dest)
+static void emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr,
+ unsigned long *dest)
{
- return kvm_get_dr(emul_to_vcpu(ctxt), dr, dest);
+ kvm_get_dr(emul_to_vcpu(ctxt), dr, dest);
}
static int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
unsigned long value)
{
- return __kvm_set_dr(emul_to_vcpu(ctxt), dr, value);
+ return kvm_set_dr(emul_to_vcpu(ctxt), dr, value);
}
static u64 mk_cr_64(u64 curr_cr, u32 new_val)
@@ -6636,27 +6704,27 @@ static int emulator_set_cr(struct x86_emulate_ctxt *ctxt, int cr, ulong val)
static int emulator_get_cpl(struct x86_emulate_ctxt *ctxt)
{
- return kvm_x86_ops.get_cpl(emul_to_vcpu(ctxt));
+ return static_call(kvm_x86_get_cpl)(emul_to_vcpu(ctxt));
}
static void emulator_get_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
{
- kvm_x86_ops.get_gdt(emul_to_vcpu(ctxt), dt);
+ static_call(kvm_x86_get_gdt)(emul_to_vcpu(ctxt), dt);
}
static void emulator_get_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
{
- kvm_x86_ops.get_idt(emul_to_vcpu(ctxt), dt);
+ static_call(kvm_x86_get_idt)(emul_to_vcpu(ctxt), dt);
}
static void emulator_set_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
{
- kvm_x86_ops.set_gdt(emul_to_vcpu(ctxt), dt);
+ static_call(kvm_x86_set_gdt)(emul_to_vcpu(ctxt), dt);
}
static void emulator_set_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
{
- kvm_x86_ops.set_idt(emul_to_vcpu(ctxt), dt);
+ static_call(kvm_x86_set_idt)(emul_to_vcpu(ctxt), dt);
}
static unsigned long emulator_get_cached_segment_base(
@@ -6798,7 +6866,7 @@ static int emulator_intercept(struct x86_emulate_ctxt *ctxt,
struct x86_instruction_info *info,
enum x86_intercept_stage stage)
{
- return kvm_x86_ops.check_intercept(emul_to_vcpu(ctxt), info, stage,
+ return static_call(kvm_x86_check_intercept)(emul_to_vcpu(ctxt), info, stage,
&ctxt->exception);
}
@@ -6836,7 +6904,7 @@ static void emulator_write_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg, ulon
static void emulator_set_nmi_mask(struct x86_emulate_ctxt *ctxt, bool masked)
{
- kvm_x86_ops.set_nmi_mask(emul_to_vcpu(ctxt), masked);
+ static_call(kvm_x86_set_nmi_mask)(emul_to_vcpu(ctxt), masked);
}
static unsigned emulator_get_hflags(struct x86_emulate_ctxt *ctxt)
@@ -6852,7 +6920,7 @@ static void emulator_set_hflags(struct x86_emulate_ctxt *ctxt, unsigned emul_fla
static int emulator_pre_leave_smm(struct x86_emulate_ctxt *ctxt,
const char *smstate)
{
- return kvm_x86_ops.pre_leave_smm(emul_to_vcpu(ctxt), smstate);
+ return static_call(kvm_x86_pre_leave_smm)(emul_to_vcpu(ctxt), smstate);
}
static void emulator_post_leave_smm(struct x86_emulate_ctxt *ctxt)
@@ -6914,7 +6982,7 @@ static const struct x86_emulate_ops emulate_ops = {
static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
{
- u32 int_shadow = kvm_x86_ops.get_interrupt_shadow(vcpu);
+ u32 int_shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu);
/*
* an sti; sti; sequence only disable interrupts for the first
* instruction. So, if the last instruction, be it emulated or
@@ -6925,7 +6993,7 @@ static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
if (int_shadow & mask)
mask = 0;
if (unlikely(int_shadow || mask)) {
- kvm_x86_ops.set_interrupt_shadow(vcpu, mask);
+ static_call(kvm_x86_set_interrupt_shadow)(vcpu, mask);
if (!mask)
kvm_make_request(KVM_REQ_EVENT, vcpu);
}
@@ -6967,7 +7035,7 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
int cs_db, cs_l;
- kvm_x86_ops.get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
+ static_call(kvm_x86_get_cs_db_l_bits)(vcpu, &cs_db, &cs_l);
ctxt->gpa_available = false;
ctxt->eflags = kvm_get_rflags(vcpu);
@@ -7028,7 +7096,7 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu, int emulation_type)
kvm_queue_exception(vcpu, UD_VECTOR);
- if (!is_guest_mode(vcpu) && kvm_x86_ops.get_cpl(vcpu) == 0) {
+ if (!is_guest_mode(vcpu) && static_call(kvm_x86_get_cpl)(vcpu) == 0) {
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
vcpu->run->internal.ndata = 0;
@@ -7088,9 +7156,9 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
if (vcpu->arch.mmu->direct_map) {
unsigned int indirect_shadow_pages;
- spin_lock(&vcpu->kvm->mmu_lock);
+ write_lock(&vcpu->kvm->mmu_lock);
indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages;
- spin_unlock(&vcpu->kvm->mmu_lock);
+ write_unlock(&vcpu->kvm->mmu_lock);
if (indirect_shadow_pages)
kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
@@ -7197,7 +7265,7 @@ static int kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu)
struct kvm_run *kvm_run = vcpu->run;
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
- kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 | DR6_RTM;
+ kvm_run->debug.arch.dr6 = DR6_BS | DR6_ACTIVE_LOW;
kvm_run->debug.arch.pc = kvm_get_linear_rip(vcpu);
kvm_run->debug.arch.exception = DB_VECTOR;
kvm_run->exit_reason = KVM_EXIT_DEBUG;
@@ -7209,10 +7277,10 @@ static int kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu)
int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
{
- unsigned long rflags = kvm_x86_ops.get_rflags(vcpu);
+ unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu);
int r;
- r = kvm_x86_ops.skip_emulated_instruction(vcpu);
+ r = static_call(kvm_x86_skip_emulated_instruction)(vcpu);
if (unlikely(!r))
return 0;
@@ -7241,7 +7309,7 @@ static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r)
vcpu->arch.eff_db);
if (dr6 != 0) {
- kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1 | DR6_RTM;
+ kvm_run->debug.arch.dr6 = dr6 | DR6_ACTIVE_LOW;
kvm_run->debug.arch.pc = eip;
kvm_run->debug.arch.exception = DB_VECTOR;
kvm_run->exit_reason = KVM_EXIT_DEBUG;
@@ -7298,6 +7366,42 @@ static bool is_vmware_backdoor_opcode(struct x86_emulate_ctxt *ctxt)
return false;
}
+/*
+ * Decode to be emulated instruction. Return EMULATION_OK if success.
+ */
+int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type,
+ void *insn, int insn_len)
+{
+ int r = EMULATION_OK;
+ struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
+
+ init_emulate_ctxt(vcpu);
+
+ /*
+ * We will reenter on the same instruction since we do not set
+ * complete_userspace_io. This does not handle watchpoints yet,
+ * those would be handled in the emulate_ops.
+ */
+ if (!(emulation_type & EMULTYPE_SKIP) &&
+ kvm_vcpu_check_breakpoint(vcpu, &r))
+ return r;
+
+ ctxt->interruptibility = 0;
+ ctxt->have_exception = false;
+ ctxt->exception.vector = -1;
+ ctxt->perm_ok = false;
+
+ ctxt->ud = emulation_type & EMULTYPE_TRAP_UD;
+
+ r = x86_decode_insn(ctxt, insn, insn_len);
+
+ trace_kvm_emulate_insn_start(vcpu);
+ ++vcpu->stat.insn_emulation;
+
+ return r;
+}
+EXPORT_SYMBOL_GPL(x86_decode_emulated_instruction);
+
int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
int emulation_type, void *insn, int insn_len)
{
@@ -7306,7 +7410,7 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
bool writeback = true;
bool write_fault_to_spt;
- if (unlikely(!kvm_x86_ops.can_emulate_instruction(vcpu, insn, insn_len)))
+ if (unlikely(!static_call(kvm_x86_can_emulate_instruction)(vcpu, insn, insn_len)))
return 1;
vcpu->arch.l1tf_flush_l1d = true;
@@ -7317,32 +7421,12 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
*/
write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable;
vcpu->arch.write_fault_to_shadow_pgtable = false;
- kvm_clear_exception_queue(vcpu);
if (!(emulation_type & EMULTYPE_NO_DECODE)) {
- init_emulate_ctxt(vcpu);
-
- /*
- * We will reenter on the same instruction since
- * we do not set complete_userspace_io. This does not
- * handle watchpoints yet, those would be handled in
- * the emulate_ops.
- */
- if (!(emulation_type & EMULTYPE_SKIP) &&
- kvm_vcpu_check_breakpoint(vcpu, &r))
- return r;
-
- ctxt->interruptibility = 0;
- ctxt->have_exception = false;
- ctxt->exception.vector = -1;
- ctxt->perm_ok = false;
-
- ctxt->ud = emulation_type & EMULTYPE_TRAP_UD;
-
- r = x86_decode_insn(ctxt, insn, insn_len);
+ kvm_clear_exception_queue(vcpu);
- trace_kvm_emulate_insn_start(vcpu);
- ++vcpu->stat.insn_emulation;
+ r = x86_decode_emulated_instruction(vcpu, emulation_type,
+ insn, insn_len);
if (r != EMULATION_OK) {
if ((emulation_type & EMULTYPE_TRAP_UD) ||
(emulation_type & EMULTYPE_TRAP_UD_FORCED)) {
@@ -7449,7 +7533,7 @@ restart:
r = 1;
if (writeback) {
- unsigned long rflags = kvm_x86_ops.get_rflags(vcpu);
+ unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu);
toggle_interruptibility(vcpu, ctxt->interruptibility);
vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
if (!ctxt->have_exception ||
@@ -7458,7 +7542,7 @@ restart:
if (r && (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
r = kvm_vcpu_do_singlestep(vcpu);
if (kvm_x86_ops.update_emulated_instruction)
- kvm_x86_ops.update_emulated_instruction(vcpu);
+ static_call(kvm_x86_update_emulated_instruction)(vcpu);
__kvm_set_rflags(vcpu, ctxt->eflags);
}
@@ -7787,7 +7871,7 @@ static int kvm_is_user_mode(void)
int user_mode = 3;
if (__this_cpu_read(current_vcpu))
- user_mode = kvm_x86_ops.get_cpl(__this_cpu_read(current_vcpu));
+ user_mode = static_call(kvm_x86_get_cpl)(__this_cpu_read(current_vcpu));
return user_mode != 0;
}
@@ -7932,7 +8016,6 @@ int kvm_arch_init(void *opaque)
supported_xcr0 = host_xcr0 & KVM_SUPPORTED_XCR0;
}
- kvm_lapic_init();
if (pi_inject_timer == -1)
pi_inject_timer = housekeeping_enabled(HK_FLAG_TIMER);
#ifdef CONFIG_X86_64
@@ -7974,6 +8057,10 @@ void kvm_arch_exit(void)
kvm_mmu_module_exit();
free_percpu(user_return_msrs);
kmem_cache_destroy(x86_fpu_cache);
+#ifdef CONFIG_KVM_XEN
+ static_key_deferred_flush(&kvm_xen_enabled);
+ WARN_ON(static_branch_unlikely(&kvm_xen_enabled.key));
+#endif
}
static int __kvm_vcpu_halt(struct kvm_vcpu *vcpu, int state, int reason)
@@ -8025,7 +8112,7 @@ static int kvm_pv_clock_pairing(struct kvm_vcpu *vcpu, gpa_t paddr,
if (clock_type != KVM_CLOCK_PAIRING_WALLCLOCK)
return -KVM_EOPNOTSUPP;
- if (kvm_get_walltime_and_clockread(&ts, &cycle) == false)
+ if (!kvm_get_walltime_and_clockread(&ts, &cycle))
return -KVM_EOPNOTSUPP;
clock_pairing.sec = ts.tv_sec;
@@ -8101,7 +8188,10 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
unsigned long nr, a0, a1, a2, a3, ret;
int op_64_bit;
- if (kvm_hv_hypercall_enabled(vcpu->kvm))
+ if (kvm_xen_hypercall_enabled(vcpu->kvm))
+ return kvm_xen_hypercall(vcpu);
+
+ if (kvm_hv_hypercall_enabled(vcpu))
return kvm_hv_hypercall(vcpu);
nr = kvm_rax_read(vcpu);
@@ -8121,7 +8211,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
a3 &= 0xFFFFFFFF;
}
- if (kvm_x86_ops.get_cpl(vcpu) != 0) {
+ if (static_call(kvm_x86_get_cpl)(vcpu) != 0) {
ret = -KVM_EPERM;
goto out;
}
@@ -8178,7 +8268,7 @@ static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
char instruction[3];
unsigned long rip = kvm_rip_read(vcpu);
- kvm_x86_ops.patch_hypercall(vcpu, instruction);
+ static_call(kvm_x86_patch_hypercall)(vcpu, instruction);
return emulator_write_emulated(ctxt, rip, instruction, 3,
&ctxt->exception);
@@ -8202,12 +8292,14 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu)
kvm_run->if_flag = !vcpu->arch.guest_state_protected
&& (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
- kvm_run->flags = is_smm(vcpu) ? KVM_RUN_X86_SMM : 0;
kvm_run->cr8 = kvm_get_cr8(vcpu);
kvm_run->apic_base = kvm_get_apic_base(vcpu);
kvm_run->ready_for_interrupt_injection =
pic_in_kernel(vcpu->kvm) ||
kvm_vcpu_ready_for_interrupt_injection(vcpu);
+
+ if (is_smm(vcpu))
+ kvm_run->flags |= KVM_RUN_X86_SMM;
}
static void update_cr8_intercept(struct kvm_vcpu *vcpu)
@@ -8233,7 +8325,7 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu)
tpr = kvm_lapic_get_cr8(vcpu);
- kvm_x86_ops.update_cr8_intercept(vcpu, tpr, max_irr);
+ static_call(kvm_x86_update_cr8_intercept)(vcpu, tpr, max_irr);
}
static void inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit)
@@ -8244,7 +8336,7 @@ static void inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit
/* try to reinject previous events if any */
if (vcpu->arch.exception.injected) {
- kvm_x86_ops.queue_exception(vcpu);
+ static_call(kvm_x86_queue_exception)(vcpu);
can_inject = false;
}
/*
@@ -8263,10 +8355,10 @@ static void inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit
*/
else if (!vcpu->arch.exception.pending) {
if (vcpu->arch.nmi_injected) {
- kvm_x86_ops.set_nmi(vcpu);
+ static_call(kvm_x86_set_nmi)(vcpu);
can_inject = false;
} else if (vcpu->arch.interrupt.injected) {
- kvm_x86_ops.set_irq(vcpu);
+ static_call(kvm_x86_set_irq)(vcpu);
can_inject = false;
}
}
@@ -8307,7 +8399,7 @@ static void inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit
}
}
- kvm_x86_ops.queue_exception(vcpu);
+ static_call(kvm_x86_queue_exception)(vcpu);
can_inject = false;
}
@@ -8323,7 +8415,7 @@ static void inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit
* The kvm_x86_ops hooks communicate this by returning -EBUSY.
*/
if (vcpu->arch.smi_pending) {
- r = can_inject ? kvm_x86_ops.smi_allowed(vcpu, true) : -EBUSY;
+ r = can_inject ? static_call(kvm_x86_smi_allowed)(vcpu, true) : -EBUSY;
if (r < 0)
goto busy;
if (r) {
@@ -8332,35 +8424,35 @@ static void inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit
enter_smm(vcpu);
can_inject = false;
} else
- kvm_x86_ops.enable_smi_window(vcpu);
+ static_call(kvm_x86_enable_smi_window)(vcpu);
}
if (vcpu->arch.nmi_pending) {
- r = can_inject ? kvm_x86_ops.nmi_allowed(vcpu, true) : -EBUSY;
+ r = can_inject ? static_call(kvm_x86_nmi_allowed)(vcpu, true) : -EBUSY;
if (r < 0)
goto busy;
if (r) {
--vcpu->arch.nmi_pending;
vcpu->arch.nmi_injected = true;
- kvm_x86_ops.set_nmi(vcpu);
+ static_call(kvm_x86_set_nmi)(vcpu);
can_inject = false;
- WARN_ON(kvm_x86_ops.nmi_allowed(vcpu, true) < 0);
+ WARN_ON(static_call(kvm_x86_nmi_allowed)(vcpu, true) < 0);
}
if (vcpu->arch.nmi_pending)
- kvm_x86_ops.enable_nmi_window(vcpu);
+ static_call(kvm_x86_enable_nmi_window)(vcpu);
}
if (kvm_cpu_has_injectable_intr(vcpu)) {
- r = can_inject ? kvm_x86_ops.interrupt_allowed(vcpu, true) : -EBUSY;
+ r = can_inject ? static_call(kvm_x86_interrupt_allowed)(vcpu, true) : -EBUSY;
if (r < 0)
goto busy;
if (r) {
kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu), false);
- kvm_x86_ops.set_irq(vcpu);
- WARN_ON(kvm_x86_ops.interrupt_allowed(vcpu, true) < 0);
+ static_call(kvm_x86_set_irq)(vcpu);
+ WARN_ON(static_call(kvm_x86_interrupt_allowed)(vcpu, true) < 0);
}
if (kvm_cpu_has_injectable_intr(vcpu))
- kvm_x86_ops.enable_irq_window(vcpu);
+ static_call(kvm_x86_enable_irq_window)(vcpu);
}
if (is_guest_mode(vcpu) &&
@@ -8385,7 +8477,7 @@ static void process_nmi(struct kvm_vcpu *vcpu)
* If an NMI is already in progress, limit further NMIs to just one.
* Otherwise, allow two (and we'll inject the first one immediately).
*/
- if (kvm_x86_ops.get_nmi_mask(vcpu) || vcpu->arch.nmi_injected)
+ if (static_call(kvm_x86_get_nmi_mask)(vcpu) || vcpu->arch.nmi_injected)
limit = 1;
vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0);
@@ -8475,11 +8567,11 @@ static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, char *buf)
put_smstate(u32, buf, 0x7f7c, seg.limit);
put_smstate(u32, buf, 0x7f78, enter_smm_get_segment_flags(&seg));
- kvm_x86_ops.get_gdt(vcpu, &dt);
+ static_call(kvm_x86_get_gdt)(vcpu, &dt);
put_smstate(u32, buf, 0x7f74, dt.address);
put_smstate(u32, buf, 0x7f70, dt.size);
- kvm_x86_ops.get_idt(vcpu, &dt);
+ static_call(kvm_x86_get_idt)(vcpu, &dt);
put_smstate(u32, buf, 0x7f58, dt.address);
put_smstate(u32, buf, 0x7f54, dt.size);
@@ -8529,7 +8621,7 @@ static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf)
put_smstate(u32, buf, 0x7e94, seg.limit);
put_smstate(u64, buf, 0x7e98, seg.base);
- kvm_x86_ops.get_idt(vcpu, &dt);
+ static_call(kvm_x86_get_idt)(vcpu, &dt);
put_smstate(u32, buf, 0x7e84, dt.size);
put_smstate(u64, buf, 0x7e88, dt.address);
@@ -8539,7 +8631,7 @@ static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf)
put_smstate(u32, buf, 0x7e74, seg.limit);
put_smstate(u64, buf, 0x7e78, seg.base);
- kvm_x86_ops.get_gdt(vcpu, &dt);
+ static_call(kvm_x86_get_gdt)(vcpu, &dt);
put_smstate(u32, buf, 0x7e64, dt.size);
put_smstate(u64, buf, 0x7e68, dt.address);
@@ -8569,30 +8661,30 @@ static void enter_smm(struct kvm_vcpu *vcpu)
* vCPU state (e.g. leave guest mode) after we've saved the state into
* the SMM state-save area.
*/
- kvm_x86_ops.pre_enter_smm(vcpu, buf);
+ static_call(kvm_x86_pre_enter_smm)(vcpu, buf);
vcpu->arch.hflags |= HF_SMM_MASK;
kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, buf, sizeof(buf));
- if (kvm_x86_ops.get_nmi_mask(vcpu))
+ if (static_call(kvm_x86_get_nmi_mask)(vcpu))
vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK;
else
- kvm_x86_ops.set_nmi_mask(vcpu, true);
+ static_call(kvm_x86_set_nmi_mask)(vcpu, true);
kvm_set_rflags(vcpu, X86_EFLAGS_FIXED);
kvm_rip_write(vcpu, 0x8000);
cr0 = vcpu->arch.cr0 & ~(X86_CR0_PE | X86_CR0_EM | X86_CR0_TS | X86_CR0_PG);
- kvm_x86_ops.set_cr0(vcpu, cr0);
+ static_call(kvm_x86_set_cr0)(vcpu, cr0);
vcpu->arch.cr0 = cr0;
- kvm_x86_ops.set_cr4(vcpu, 0);
+ static_call(kvm_x86_set_cr4)(vcpu, 0);
/* Undocumented: IDT limit is set to zero on entry to SMM. */
dt.address = dt.size = 0;
- kvm_x86_ops.set_idt(vcpu, &dt);
+ static_call(kvm_x86_set_idt)(vcpu, &dt);
- __kvm_set_dr(vcpu, 7, DR7_FIXED_1);
+ kvm_set_dr(vcpu, 7, DR7_FIXED_1);
cs.selector = (vcpu->arch.smbase >> 4) & 0xffff;
cs.base = vcpu->arch.smbase;
@@ -8621,7 +8713,7 @@ static void enter_smm(struct kvm_vcpu *vcpu)
#ifdef CONFIG_X86_64
if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
- kvm_x86_ops.set_efer(vcpu, 0);
+ static_call(kvm_x86_set_efer)(vcpu, 0);
#endif
kvm_update_cpuid_runtime(vcpu);
@@ -8659,7 +8751,7 @@ void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu)
vcpu->arch.apicv_active = kvm_apicv_activated(vcpu->kvm);
kvm_apic_update_apicv(vcpu);
- kvm_x86_ops.refresh_apicv_exec_ctrl(vcpu);
+ static_call(kvm_x86_refresh_apicv_exec_ctrl)(vcpu);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_update_apicv);
@@ -8676,7 +8768,7 @@ void kvm_request_apicv_update(struct kvm *kvm, bool activate, ulong bit)
unsigned long old, new, expected;
if (!kvm_x86_ops.check_apicv_inhibit_reasons ||
- !kvm_x86_ops.check_apicv_inhibit_reasons(bit))
+ !static_call(kvm_x86_check_apicv_inhibit_reasons)(bit))
return;
old = READ_ONCE(kvm->arch.apicv_inhibit_reasons);
@@ -8696,7 +8788,7 @@ void kvm_request_apicv_update(struct kvm *kvm, bool activate, ulong bit)
trace_kvm_apicv_update_request(activate, bit);
if (kvm_x86_ops.pre_update_apicv_exec_ctrl)
- kvm_x86_ops.pre_update_apicv_exec_ctrl(kvm, activate);
+ static_call(kvm_x86_pre_update_apicv_exec_ctrl)(kvm, activate);
/*
* Sending request to update APICV for all other vcpus,
@@ -8722,7 +8814,7 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors);
else {
if (vcpu->arch.apicv_active)
- kvm_x86_ops.sync_pir_to_irr(vcpu);
+ static_call(kvm_x86_sync_pir_to_irr)(vcpu);
if (ioapic_in_kernel(vcpu->kvm))
kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
}
@@ -8740,9 +8832,12 @@ static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu)
if (!kvm_apic_hw_enabled(vcpu->arch.apic))
return;
- bitmap_or((ulong *)eoi_exit_bitmap, vcpu->arch.ioapic_handled_vectors,
- vcpu_to_synic(vcpu)->vec_bitmap, 256);
- kvm_x86_ops.load_eoi_exitmap(vcpu, eoi_exit_bitmap);
+ if (to_hv_vcpu(vcpu))
+ bitmap_or((ulong *)eoi_exit_bitmap,
+ vcpu->arch.ioapic_handled_vectors,
+ to_hv_synic(vcpu)->vec_bitmap, 256);
+
+ static_call(kvm_x86_load_eoi_exitmap)(vcpu, eoi_exit_bitmap);
}
void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
@@ -8767,7 +8862,7 @@ void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
if (!kvm_x86_ops.set_apic_access_page_addr)
return;
- kvm_x86_ops.set_apic_access_page_addr(vcpu);
+ static_call(kvm_x86_set_apic_access_page_addr)(vcpu);
}
void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu)
@@ -8802,9 +8897,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (kvm_request_pending(vcpu)) {
if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) {
- if (WARN_ON_ONCE(!is_guest_mode(vcpu)))
- ;
- else if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) {
+ if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) {
r = 0;
goto out;
}
@@ -8894,8 +8987,10 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
goto out;
}
if (kvm_check_request(KVM_REQ_HV_EXIT, vcpu)) {
+ struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
+
vcpu->run->exit_reason = KVM_EXIT_HYPERV;
- vcpu->run->hyperv = vcpu->arch.hyperv.exit;
+ vcpu->run->hyperv = hv_vcpu->exit;
r = 0;
goto out;
}
@@ -8912,10 +9007,14 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (kvm_check_request(KVM_REQ_APF_READY, vcpu))
kvm_check_async_pf_completion(vcpu);
if (kvm_check_request(KVM_REQ_MSR_FILTER_CHANGED, vcpu))
- kvm_x86_ops.msr_filter_changed(vcpu);
+ static_call(kvm_x86_msr_filter_changed)(vcpu);
+
+ if (kvm_check_request(KVM_REQ_UPDATE_CPU_DIRTY_LOGGING, vcpu))
+ static_call(kvm_x86_update_cpu_dirty_logging)(vcpu);
}
- if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
+ if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win ||
+ kvm_xen_has_interrupt(vcpu)) {
++vcpu->stat.req_event;
kvm_apic_accept_events(vcpu);
if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
@@ -8925,7 +9024,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
inject_pending_event(vcpu, &req_immediate_exit);
if (req_int_win)
- kvm_x86_ops.enable_irq_window(vcpu);
+ static_call(kvm_x86_enable_irq_window)(vcpu);
if (kvm_lapic_enabled(vcpu)) {
update_cr8_intercept(vcpu);
@@ -8940,7 +9039,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
preempt_disable();
- kvm_x86_ops.prepare_guest_switch(vcpu);
+ static_call(kvm_x86_prepare_guest_switch)(vcpu);
/*
* Disable IRQs before setting IN_GUEST_MODE. Posted interrupt
@@ -8971,7 +9070,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
* notified with kvm_vcpu_kick.
*/
if (kvm_lapic_enabled(vcpu) && vcpu->arch.apicv_active)
- kvm_x86_ops.sync_pir_to_irr(vcpu);
+ static_call(kvm_x86_sync_pir_to_irr)(vcpu);
if (kvm_vcpu_exit_request(vcpu)) {
vcpu->mode = OUTSIDE_GUEST_MODE;
@@ -8985,11 +9084,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (req_immediate_exit) {
kvm_make_request(KVM_REQ_EVENT, vcpu);
- kvm_x86_ops.request_immediate_exit(vcpu);
+ static_call(kvm_x86_request_immediate_exit)(vcpu);
}
- trace_kvm_entry(vcpu);
-
fpregs_assert_state_consistent();
if (test_thread_flag(TIF_NEED_FPU_LOAD))
switch_fpu_return();
@@ -9004,7 +9101,19 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
}
- exit_fastpath = kvm_x86_ops.run(vcpu);
+ for (;;) {
+ exit_fastpath = static_call(kvm_x86_run)(vcpu);
+ if (likely(exit_fastpath != EXIT_FASTPATH_REENTER_GUEST))
+ break;
+
+ if (unlikely(kvm_vcpu_exit_request(vcpu))) {
+ exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED;
+ break;
+ }
+
+ if (vcpu->arch.apicv_active)
+ static_call(kvm_x86_sync_pir_to_irr)(vcpu);
+ }
/*
* Do this here before restoring debug registers on the host. And
@@ -9014,7 +9123,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
*/
if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) {
WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP);
- kvm_x86_ops.sync_dirty_debug_regs(vcpu);
+ static_call(kvm_x86_sync_dirty_debug_regs)(vcpu);
kvm_update_dr0123(vcpu);
kvm_update_dr7(vcpu);
vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
@@ -9036,7 +9145,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
vcpu->mode = OUTSIDE_GUEST_MODE;
smp_wmb();
- kvm_x86_ops.handle_exit_irqoff(vcpu);
+ static_call(kvm_x86_handle_exit_irqoff)(vcpu);
/*
* Consume any pending interrupts, including the possible source of
@@ -9078,13 +9187,13 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (vcpu->arch.apic_attention)
kvm_lapic_sync_from_vapic(vcpu);
- r = kvm_x86_ops.handle_exit(vcpu, exit_fastpath);
+ r = static_call(kvm_x86_handle_exit)(vcpu, exit_fastpath);
return r;
cancel_injection:
if (req_immediate_exit)
kvm_make_request(KVM_REQ_EVENT, vcpu);
- kvm_x86_ops.cancel_injection(vcpu);
+ static_call(kvm_x86_cancel_injection)(vcpu);
if (unlikely(vcpu->arch.apic_attention))
kvm_lapic_sync_from_vapic(vcpu);
out:
@@ -9094,13 +9203,13 @@ out:
static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
{
if (!kvm_arch_vcpu_runnable(vcpu) &&
- (!kvm_x86_ops.pre_block || kvm_x86_ops.pre_block(vcpu) == 0)) {
+ (!kvm_x86_ops.pre_block || static_call(kvm_x86_pre_block)(vcpu) == 0)) {
srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
kvm_vcpu_block(vcpu);
vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
if (kvm_x86_ops.post_block)
- kvm_x86_ops.post_block(vcpu);
+ static_call(kvm_x86_post_block)(vcpu);
if (!kvm_check_request(KVM_REQ_UNHALT, vcpu))
return 1;
@@ -9321,6 +9430,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
vcpu_load(vcpu);
kvm_sigset_activate(vcpu);
+ kvm_run->flags = 0;
kvm_load_guest_fpu(vcpu);
if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
@@ -9495,10 +9605,10 @@ static void __get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
- kvm_x86_ops.get_idt(vcpu, &dt);
+ static_call(kvm_x86_get_idt)(vcpu, &dt);
sregs->idt.limit = dt.size;
sregs->idt.base = dt.address;
- kvm_x86_ops.get_gdt(vcpu, &dt);
+ static_call(kvm_x86_get_gdt)(vcpu, &dt);
sregs->gdt.limit = dt.size;
sregs->gdt.base = dt.address;
@@ -9616,6 +9726,8 @@ static bool kvm_is_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
*/
if (!(sregs->cr4 & X86_CR4_PAE) || !(sregs->efer & EFER_LMA))
return false;
+ if (kvm_vcpu_is_illegal_gpa(vcpu, sregs->cr3))
+ return false;
} else {
/*
* Not in 64-bit mode: EFER.LMA is clear and the code
@@ -9649,10 +9761,10 @@ static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
dt.size = sregs->idt.limit;
dt.address = sregs->idt.base;
- kvm_x86_ops.set_idt(vcpu, &dt);
+ static_call(kvm_x86_set_idt)(vcpu, &dt);
dt.size = sregs->gdt.limit;
dt.address = sregs->gdt.base;
- kvm_x86_ops.set_gdt(vcpu, &dt);
+ static_call(kvm_x86_set_gdt)(vcpu, &dt);
vcpu->arch.cr2 = sregs->cr2;
mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3;
@@ -9662,14 +9774,14 @@ static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
kvm_set_cr8(vcpu, sregs->cr8);
mmu_reset_needed |= vcpu->arch.efer != sregs->efer;
- kvm_x86_ops.set_efer(vcpu, sregs->efer);
+ static_call(kvm_x86_set_efer)(vcpu, sregs->efer);
mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0;
- kvm_x86_ops.set_cr0(vcpu, sregs->cr0);
+ static_call(kvm_x86_set_cr0)(vcpu, sregs->cr0);
vcpu->arch.cr0 = sregs->cr0;
mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
- kvm_x86_ops.set_cr4(vcpu, sregs->cr4);
+ static_call(kvm_x86_set_cr4)(vcpu, sregs->cr4);
idx = srcu_read_lock(&vcpu->kvm->srcu);
if (is_pae_paging(vcpu)) {
@@ -9777,7 +9889,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
*/
kvm_set_rflags(vcpu, rflags);
- kvm_x86_ops.update_exception_bitmap(vcpu);
+ static_call(kvm_x86_update_exception_bitmap)(vcpu);
r = 0;
@@ -9955,7 +10067,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
if (kvm_apicv_activated(vcpu->kvm))
vcpu->arch.apicv_active = true;
} else
- static_key_slow_inc(&kvm_no_apic_vcpu);
+ static_branch_inc(&kvm_has_noapic_vcpu);
r = -ENOMEM;
@@ -9993,6 +10105,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
fx_init(vcpu);
vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
+ vcpu->arch.reserved_gpa_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu);
vcpu->arch.pat = MSR_IA32_CR_PAT_DEFAULT;
@@ -10002,9 +10115,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
vcpu->arch.pending_external_vector = -1;
vcpu->arch.preempted_in_kernel = false;
- kvm_hv_vcpu_init(vcpu);
-
- r = kvm_x86_ops.vcpu_create(vcpu);
+ r = static_call(kvm_x86_vcpu_create)(vcpu);
if (r)
goto free_guest_fpu;
@@ -10040,8 +10151,6 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = vcpu->kvm;
- kvm_hv_vcpu_postcreate(vcpu);
-
if (mutex_lock_killable(&vcpu->mutex))
return;
vcpu_load(vcpu);
@@ -10067,7 +10176,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
kvmclock_reset(vcpu);
- kvm_x86_ops.vcpu_free(vcpu);
+ static_call(kvm_x86_vcpu_free)(vcpu);
kmem_cache_free(x86_emulator_cache, vcpu->arch.emulate_ctxt);
free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
@@ -10084,7 +10193,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
free_page((unsigned long)vcpu->arch.pio_data);
kvfree(vcpu->arch.cpuid_entries);
if (!lapic_in_kernel(vcpu))
- static_key_slow_dec(&kvm_no_apic_vcpu);
+ static_branch_dec(&kvm_has_noapic_vcpu);
}
void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
@@ -10103,7 +10212,7 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
kvm_update_dr0123(vcpu);
- vcpu->arch.dr6 = DR6_INIT;
+ vcpu->arch.dr6 = DR6_ACTIVE_LOW;
vcpu->arch.dr7 = DR7_FIXED_1;
kvm_update_dr7(vcpu);
@@ -10156,7 +10265,7 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
vcpu->arch.ia32_xss = 0;
- kvm_x86_ops.vcpu_reset(vcpu, init_event);
+ static_call(kvm_x86_vcpu_reset)(vcpu, init_event);
}
void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
@@ -10182,7 +10291,7 @@ int kvm_arch_hardware_enable(void)
bool stable, backwards_tsc = false;
kvm_user_return_msr_cpu_online();
- ret = kvm_x86_ops.hardware_enable();
+ ret = static_call(kvm_x86_hardware_enable)();
if (ret != 0)
return ret;
@@ -10264,7 +10373,7 @@ int kvm_arch_hardware_enable(void)
void kvm_arch_hardware_disable(void)
{
- kvm_x86_ops.hardware_disable();
+ static_call(kvm_x86_hardware_disable)();
drop_user_return_notifiers();
}
@@ -10283,6 +10392,7 @@ int kvm_arch_hardware_setup(void *opaque)
return r;
memcpy(&kvm_x86_ops, ops->runtime_ops, sizeof(kvm_x86_ops));
+ kvm_ops_static_call_update();
if (!kvm_cpu_cap_has(X86_FEATURE_XSAVES))
supported_xss = 0;
@@ -10311,7 +10421,7 @@ int kvm_arch_hardware_setup(void *opaque)
void kvm_arch_hardware_unsetup(void)
{
- kvm_x86_ops.hardware_unsetup();
+ static_call(kvm_x86_hardware_unsetup)();
}
int kvm_arch_check_processor_compat(void *opaque)
@@ -10339,8 +10449,8 @@ bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
return (vcpu->arch.apic_base & MSR_IA32_APICBASE_BSP) != 0;
}
-struct static_key kvm_no_apic_vcpu __read_mostly;
-EXPORT_SYMBOL_GPL(kvm_no_apic_vcpu);
+__read_mostly DEFINE_STATIC_KEY_FALSE(kvm_has_noapic_vcpu);
+EXPORT_SYMBOL_GPL(kvm_has_noapic_vcpu);
void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu)
{
@@ -10351,12 +10461,12 @@ void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu)
pmu->need_cleanup = true;
kvm_make_request(KVM_REQ_PMU, vcpu);
}
- kvm_x86_ops.sched_in(vcpu, cpu);
+ static_call(kvm_x86_sched_in)(vcpu, cpu);
}
void kvm_arch_free_vm(struct kvm *kvm)
{
- kfree(kvm->arch.hyperv.hv_pa_pg);
+ kfree(to_kvm_hv(kvm)->hv_pa_pg);
vfree(kvm);
}
@@ -10395,7 +10505,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm_page_track_init(kvm);
kvm_mmu_init_vm(kvm);
- return kvm_x86_ops.vm_init(kvm);
+ return static_call(kvm_x86_vm_init)(kvm);
}
int kvm_arch_post_init_vm(struct kvm *kvm)
@@ -10494,7 +10604,7 @@ void __user * __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa,
return 0;
old_npages = slot->npages;
- hva = 0;
+ hva = slot->userspace_addr;
}
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
@@ -10540,8 +10650,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
__x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, 0, 0);
mutex_unlock(&kvm->slots_lock);
}
- if (kvm_x86_ops.vm_destroy)
- kvm_x86_ops.vm_destroy(kvm);
+ static_call_cond(kvm_x86_vm_destroy)(kvm);
for (i = 0; i < kvm->arch.msr_filter.count; i++)
kfree(kvm->arch.msr_filter.ranges[i].bitmap);
kvm_pic_destroy(kvm);
@@ -10551,6 +10660,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
kfree(srcu_dereference_check(kvm->arch.pmu_event_filter, &kvm->srcu, 1));
kvm_mmu_uninit_vm(kvm);
kvm_page_track_cleanup(kvm);
+ kvm_xen_destroy_vm(kvm);
kvm_hv_destroy_vm(kvm);
}
@@ -10669,75 +10779,96 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
return 0;
}
+
+static void kvm_mmu_update_cpu_dirty_logging(struct kvm *kvm, bool enable)
+{
+ struct kvm_arch *ka = &kvm->arch;
+
+ if (!kvm_x86_ops.cpu_dirty_log_size)
+ return;
+
+ if ((enable && ++ka->cpu_dirty_logging_count == 1) ||
+ (!enable && --ka->cpu_dirty_logging_count == 0))
+ kvm_make_all_cpus_request(kvm, KVM_REQ_UPDATE_CPU_DIRTY_LOGGING);
+
+ WARN_ON_ONCE(ka->cpu_dirty_logging_count < 0);
+}
+
static void kvm_mmu_slot_apply_flags(struct kvm *kvm,
struct kvm_memory_slot *old,
struct kvm_memory_slot *new,
enum kvm_mr_change change)
{
+ bool log_dirty_pages = new->flags & KVM_MEM_LOG_DIRTY_PAGES;
+
/*
- * Nothing to do for RO slots or CREATE/MOVE/DELETE of a slot.
- * See comments below.
+ * Update CPU dirty logging if dirty logging is being toggled. This
+ * applies to all operations.
*/
- if ((change != KVM_MR_FLAGS_ONLY) || (new->flags & KVM_MEM_READONLY))
- return;
+ if ((old->flags ^ new->flags) & KVM_MEM_LOG_DIRTY_PAGES)
+ kvm_mmu_update_cpu_dirty_logging(kvm, log_dirty_pages);
/*
- * Dirty logging tracks sptes in 4k granularity, meaning that large
- * sptes have to be split. If live migration is successful, the guest
- * in the source machine will be destroyed and large sptes will be
- * created in the destination. However, if the guest continues to run
- * in the source machine (for example if live migration fails), small
- * sptes will remain around and cause bad performance.
- *
- * Scan sptes if dirty logging has been stopped, dropping those
- * which can be collapsed into a single large-page spte. Later
- * page faults will create the large-page sptes.
+ * Nothing more to do for RO slots (which can't be dirtied and can't be
+ * made writable) or CREATE/MOVE/DELETE of a slot.
*
- * There is no need to do this in any of the following cases:
+ * For a memslot with dirty logging disabled:
* CREATE: No dirty mappings will already exist.
* MOVE/DELETE: The old mappings will already have been cleaned up by
* kvm_arch_flush_shadow_memslot()
+ *
+ * For a memslot with dirty logging enabled:
+ * CREATE: No shadow pages exist, thus nothing to write-protect
+ * and no dirty bits to clear.
+ * MOVE/DELETE: The old mappings will already have been cleaned up by
+ * kvm_arch_flush_shadow_memslot().
*/
- if ((old->flags & KVM_MEM_LOG_DIRTY_PAGES) &&
- !(new->flags & KVM_MEM_LOG_DIRTY_PAGES))
- kvm_mmu_zap_collapsible_sptes(kvm, new);
+ if ((change != KVM_MR_FLAGS_ONLY) || (new->flags & KVM_MEM_READONLY))
+ return;
/*
- * Enable or disable dirty logging for the slot.
- *
- * For KVM_MR_DELETE and KVM_MR_MOVE, the shadow pages of the old
- * slot have been zapped so no dirty logging updates are needed for
- * the old slot.
- * For KVM_MR_CREATE and KVM_MR_MOVE, once the new slot is visible
- * any mappings that might be created in it will consume the
- * properties of the new slot and do not need to be updated here.
- *
- * When PML is enabled, the kvm_x86_ops dirty logging hooks are
- * called to enable/disable dirty logging.
- *
- * When disabling dirty logging with PML enabled, the D-bit is set
- * for sptes in the slot in order to prevent unnecessary GPA
- * logging in the PML buffer (and potential PML buffer full VMEXIT).
- * This guarantees leaving PML enabled for the guest's lifetime
- * won't have any additional overhead from PML when the guest is
- * running with dirty logging disabled.
- *
- * When enabling dirty logging, large sptes are write-protected
- * so they can be split on first write. New large sptes cannot
- * be created for this slot until the end of the logging.
- * See the comments in fast_page_fault().
- * For small sptes, nothing is done if the dirty log is in the
- * initial-all-set state. Otherwise, depending on whether pml
- * is enabled the D-bit or the W-bit will be cleared.
+ * READONLY and non-flags changes were filtered out above, and the only
+ * other flag is LOG_DIRTY_PAGES, i.e. something is wrong if dirty
+ * logging isn't being toggled on or off.
*/
- if (new->flags & KVM_MEM_LOG_DIRTY_PAGES) {
- if (kvm_x86_ops.slot_enable_log_dirty) {
- kvm_x86_ops.slot_enable_log_dirty(kvm, new);
- } else {
- int level =
- kvm_dirty_log_manual_protect_and_init_set(kvm) ?
- PG_LEVEL_2M : PG_LEVEL_4K;
+ if (WARN_ON_ONCE(!((old->flags ^ new->flags) & KVM_MEM_LOG_DIRTY_PAGES)))
+ return;
+ if (!log_dirty_pages) {
+ /*
+ * Dirty logging tracks sptes in 4k granularity, meaning that
+ * large sptes have to be split. If live migration succeeds,
+ * the guest in the source machine will be destroyed and large
+ * sptes will be created in the destination. However, if the
+ * guest continues to run in the source machine (for example if
+ * live migration fails), small sptes will remain around and
+ * cause bad performance.
+ *
+ * Scan sptes if dirty logging has been stopped, dropping those
+ * which can be collapsed into a single large-page spte. Later
+ * page faults will create the large-page sptes.
+ */
+ kvm_mmu_zap_collapsible_sptes(kvm, new);
+ } else {
+ /* By default, write-protect everything to log writes. */
+ int level = PG_LEVEL_4K;
+
+ if (kvm_x86_ops.cpu_dirty_log_size) {
+ /*
+ * Clear all dirty bits, unless pages are treated as
+ * dirty from the get-go.
+ */
+ if (!kvm_dirty_log_manual_protect_and_init_set(kvm))
+ kvm_mmu_slot_leaf_clear_dirty(kvm, new);
+
+ /*
+ * Write-protect large pages on write so that dirty
+ * logging happens at 4k granularity. No need to
+ * write-protect small SPTEs since write accesses are
+ * logged by the CPU via dirty bits.
+ */
+ level = PG_LEVEL_2M;
+ } else if (kvm_dirty_log_manual_protect_and_init_set(kvm)) {
/*
* If we're with initial-all-set, we don't need
* to write protect any small page because
@@ -10746,11 +10877,9 @@ static void kvm_mmu_slot_apply_flags(struct kvm *kvm,
* so that the page split can happen lazily on
* the first write to the huge page.
*/
- kvm_mmu_slot_remove_write_access(kvm, new, level);
+ level = PG_LEVEL_2M;
}
- } else {
- if (kvm_x86_ops.slot_disable_log_dirty)
- kvm_x86_ops.slot_disable_log_dirty(kvm, new);
+ kvm_mmu_slot_remove_write_access(kvm, new, level);
}
}
@@ -10789,7 +10918,7 @@ static inline bool kvm_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
{
return (is_guest_mode(vcpu) &&
kvm_x86_ops.guest_apic_has_interrupt &&
- kvm_x86_ops.guest_apic_has_interrupt(vcpu));
+ static_call(kvm_x86_guest_apic_has_interrupt)(vcpu));
}
static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
@@ -10808,12 +10937,12 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
(vcpu->arch.nmi_pending &&
- kvm_x86_ops.nmi_allowed(vcpu, false)))
+ static_call(kvm_x86_nmi_allowed)(vcpu, false)))
return true;
if (kvm_test_request(KVM_REQ_SMI, vcpu) ||
(vcpu->arch.smi_pending &&
- kvm_x86_ops.smi_allowed(vcpu, false)))
+ static_call(kvm_x86_smi_allowed)(vcpu, false)))
return true;
if (kvm_arch_interrupt_allowed(vcpu) &&
@@ -10847,7 +10976,7 @@ bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
kvm_test_request(KVM_REQ_EVENT, vcpu))
return true;
- if (vcpu->arch.apicv_active && kvm_x86_ops.dy_apicv_has_pending_interrupt(vcpu))
+ if (vcpu->arch.apicv_active && static_call(kvm_x86_dy_apicv_has_pending_interrupt)(vcpu))
return true;
return false;
@@ -10865,7 +10994,7 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
{
- return kvm_x86_ops.interrupt_allowed(vcpu, false);
+ return static_call(kvm_x86_interrupt_allowed)(vcpu, false);
}
unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu)
@@ -10891,7 +11020,7 @@ unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu)
{
unsigned long rflags;
- rflags = kvm_x86_ops.get_rflags(vcpu);
+ rflags = static_call(kvm_x86_get_rflags)(vcpu);
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
rflags &= ~X86_EFLAGS_TF;
return rflags;
@@ -10903,7 +11032,7 @@ static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP &&
kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip))
rflags |= X86_EFLAGS_TF;
- kvm_x86_ops.set_rflags(vcpu, rflags);
+ static_call(kvm_x86_set_rflags)(vcpu, rflags);
}
void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
@@ -11033,7 +11162,7 @@ static bool kvm_can_deliver_async_pf(struct kvm_vcpu *vcpu)
return false;
if (!kvm_pv_async_pf_enabled(vcpu) ||
- (vcpu->arch.apf.send_user_only && kvm_x86_ops.get_cpl(vcpu) == 0))
+ (vcpu->arch.apf.send_user_only && static_call(kvm_x86_get_cpl)(vcpu) == 0))
return false;
return true;
@@ -11178,7 +11307,7 @@ int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
irqfd->producer = prod;
kvm_arch_start_assignment(irqfd->kvm);
- ret = kvm_x86_ops.update_pi_irte(irqfd->kvm,
+ ret = static_call(kvm_x86_update_pi_irte)(irqfd->kvm,
prod->irq, irqfd->gsi, 1);
if (ret)
@@ -11203,7 +11332,7 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
* when the irq is masked/disabled or the consumer side (KVM
* int this case doesn't want to receive the interrupts.
*/
- ret = kvm_x86_ops.update_pi_irte(irqfd->kvm, prod->irq, irqfd->gsi, 0);
+ ret = static_call(kvm_x86_update_pi_irte)(irqfd->kvm, prod->irq, irqfd->gsi, 0);
if (ret)
printk(KERN_INFO "irq bypass consumer (token %p) unregistration"
" fails: %d\n", irqfd->consumer.token, ret);
@@ -11214,7 +11343,7 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
uint32_t guest_irq, bool set)
{
- return kvm_x86_ops.update_pi_irte(kvm, host_irq, guest_irq, set);
+ return static_call(kvm_x86_update_pi_irte)(kvm, host_irq, guest_irq, set);
}
bool kvm_vector_hashing_enabled(void)
@@ -11556,6 +11685,7 @@ int kvm_sev_es_string_io(struct kvm_vcpu *vcpu, unsigned int size,
}
EXPORT_SYMBOL_GPL(kvm_sev_es_string_io);
+EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_entry);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index c5ee0f5ce0f1..39eb04887141 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -98,7 +98,7 @@ static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu)
if (!is_long_mode(vcpu))
return false;
- kvm_x86_ops.get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
+ static_call(kvm_x86_get_cs_db_l_bits)(vcpu, &cs_db, &cs_l);
return cs_l;
}
@@ -129,7 +129,7 @@ static inline bool mmu_is_nested(struct kvm_vcpu *vcpu)
static inline void kvm_vcpu_flush_tlb_current(struct kvm_vcpu *vcpu)
{
++vcpu->stat.tlb_flush;
- kvm_x86_ops.tlb_flush_current(vcpu);
+ static_call(kvm_x86_tlb_flush_current)(vcpu);
}
static inline int is_pae(struct kvm_vcpu *vcpu)
@@ -244,9 +244,10 @@ static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk)
static inline bool kvm_vcpu_latch_init(struct kvm_vcpu *vcpu)
{
- return is_smm(vcpu) || kvm_x86_ops.apic_init_signal_blocked(vcpu);
+ return is_smm(vcpu) || static_call(kvm_x86_apic_init_signal_blocked)(vcpu);
}
+void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock, int sec_hi_ofs);
void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr);
@@ -273,6 +274,8 @@ bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
int page_num);
bool kvm_vector_hashing_enabled(void);
void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code);
+int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type,
+ void *insn, int insn_len);
int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
int emulation_type, void *insn, int insn_len);
fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu);
@@ -296,6 +299,8 @@ extern int pi_inject_timer;
extern struct static_key kvm_no_apic_vcpu;
+extern bool report_ignored_msrs;
+
static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
{
return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult,
@@ -391,7 +396,6 @@ void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu);
void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu);
int kvm_spec_ctrl_test_value(u64 value);
bool kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
-bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu);
int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
struct x86_exception *e);
int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva);
@@ -425,6 +429,8 @@ bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type);
__reserved_bits |= X86_CR4_UMIP; \
if (!__cpu_has(__c, X86_FEATURE_VMX)) \
__reserved_bits |= X86_CR4_VMXE; \
+ if (!__cpu_has(__c, X86_FEATURE_PCID)) \
+ __reserved_bits |= X86_CR4_PCIDE; \
__reserved_bits; \
})
diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c
new file mode 100644
index 000000000000..ae17250e1efe
--- /dev/null
+++ b/arch/x86/kvm/xen.c
@@ -0,0 +1,721 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright © 2019 Oracle and/or its affiliates. All rights reserved.
+ * Copyright © 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * KVM Xen emulation
+ */
+
+#include "x86.h"
+#include "xen.h"
+#include "hyperv.h"
+
+#include <linux/kvm_host.h>
+#include <linux/sched/stat.h>
+
+#include <trace/events/kvm.h>
+#include <xen/interface/xen.h>
+#include <xen/interface/vcpu.h>
+
+#include "trace.h"
+
+DEFINE_STATIC_KEY_DEFERRED_FALSE(kvm_xen_enabled, HZ);
+
+static int kvm_xen_shared_info_init(struct kvm *kvm, gfn_t gfn)
+{
+ gpa_t gpa = gfn_to_gpa(gfn);
+ int wc_ofs, sec_hi_ofs;
+ int ret;
+ int idx = srcu_read_lock(&kvm->srcu);
+
+ ret = kvm_gfn_to_hva_cache_init(kvm, &kvm->arch.xen.shinfo_cache,
+ gpa, PAGE_SIZE);
+ if (ret)
+ goto out;
+
+ kvm->arch.xen.shinfo_set = true;
+
+ /* Paranoia checks on the 32-bit struct layout */
+ BUILD_BUG_ON(offsetof(struct compat_shared_info, wc) != 0x900);
+ BUILD_BUG_ON(offsetof(struct compat_shared_info, arch.wc_sec_hi) != 0x924);
+ BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0);
+
+ /* 32-bit location by default */
+ wc_ofs = offsetof(struct compat_shared_info, wc);
+ sec_hi_ofs = offsetof(struct compat_shared_info, arch.wc_sec_hi);
+
+#ifdef CONFIG_X86_64
+ /* Paranoia checks on the 64-bit struct layout */
+ BUILD_BUG_ON(offsetof(struct shared_info, wc) != 0xc00);
+ BUILD_BUG_ON(offsetof(struct shared_info, wc_sec_hi) != 0xc0c);
+
+ if (kvm->arch.xen.long_mode) {
+ wc_ofs = offsetof(struct shared_info, wc);
+ sec_hi_ofs = offsetof(struct shared_info, wc_sec_hi);
+ }
+#endif
+
+ kvm_write_wall_clock(kvm, gpa + wc_ofs, sec_hi_ofs - wc_ofs);
+ kvm_make_all_cpus_request(kvm, KVM_REQ_MASTERCLOCK_UPDATE);
+
+out:
+ srcu_read_unlock(&kvm->srcu, idx);
+ return ret;
+}
+
+static void kvm_xen_update_runstate(struct kvm_vcpu *v, int state)
+{
+ struct kvm_vcpu_xen *vx = &v->arch.xen;
+ u64 now = get_kvmclock_ns(v->kvm);
+ u64 delta_ns = now - vx->runstate_entry_time;
+ u64 run_delay = current->sched_info.run_delay;
+
+ if (unlikely(!vx->runstate_entry_time))
+ vx->current_runstate = RUNSTATE_offline;
+
+ /*
+ * Time waiting for the scheduler isn't "stolen" if the
+ * vCPU wasn't running anyway.
+ */
+ if (vx->current_runstate == RUNSTATE_running) {
+ u64 steal_ns = run_delay - vx->last_steal;
+
+ delta_ns -= steal_ns;
+
+ vx->runstate_times[RUNSTATE_runnable] += steal_ns;
+ }
+ vx->last_steal = run_delay;
+
+ vx->runstate_times[vx->current_runstate] += delta_ns;
+ vx->current_runstate = state;
+ vx->runstate_entry_time = now;
+}
+
+void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
+{
+ struct kvm_vcpu_xen *vx = &v->arch.xen;
+ uint64_t state_entry_time;
+ unsigned int offset;
+
+ kvm_xen_update_runstate(v, state);
+
+ if (!vx->runstate_set)
+ return;
+
+ BUILD_BUG_ON(sizeof(struct compat_vcpu_runstate_info) != 0x2c);
+
+ offset = offsetof(struct compat_vcpu_runstate_info, state_entry_time);
+#ifdef CONFIG_X86_64
+ /*
+ * The only difference is alignment of uint64_t in 32-bit.
+ * So the first field 'state' is accessed directly using
+ * offsetof() (where its offset happens to be zero), while the
+ * remaining fields which are all uint64_t, start at 'offset'
+ * which we tweak here by adding 4.
+ */
+ BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state_entry_time) !=
+ offsetof(struct compat_vcpu_runstate_info, state_entry_time) + 4);
+ BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, time) !=
+ offsetof(struct compat_vcpu_runstate_info, time) + 4);
+
+ if (v->kvm->arch.xen.long_mode)
+ offset = offsetof(struct vcpu_runstate_info, state_entry_time);
+#endif
+ /*
+ * First write the updated state_entry_time at the appropriate
+ * location determined by 'offset'.
+ */
+ state_entry_time = vx->runstate_entry_time;
+ state_entry_time |= XEN_RUNSTATE_UPDATE;
+
+ BUILD_BUG_ON(sizeof(((struct vcpu_runstate_info *)0)->state_entry_time) !=
+ sizeof(state_entry_time));
+ BUILD_BUG_ON(sizeof(((struct compat_vcpu_runstate_info *)0)->state_entry_time) !=
+ sizeof(state_entry_time));
+
+ if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache,
+ &state_entry_time, offset,
+ sizeof(state_entry_time)))
+ return;
+ smp_wmb();
+
+ /*
+ * Next, write the new runstate. This is in the *same* place
+ * for 32-bit and 64-bit guests, asserted here for paranoia.
+ */
+ BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state) !=
+ offsetof(struct compat_vcpu_runstate_info, state));
+ BUILD_BUG_ON(sizeof(((struct vcpu_runstate_info *)0)->state) !=
+ sizeof(vx->current_runstate));
+ BUILD_BUG_ON(sizeof(((struct compat_vcpu_runstate_info *)0)->state) !=
+ sizeof(vx->current_runstate));
+
+ if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache,
+ &vx->current_runstate,
+ offsetof(struct vcpu_runstate_info, state),
+ sizeof(vx->current_runstate)))
+ return;
+
+ /*
+ * Write the actual runstate times immediately after the
+ * runstate_entry_time.
+ */
+ BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state_entry_time) !=
+ offsetof(struct vcpu_runstate_info, time) - sizeof(u64));
+ BUILD_BUG_ON(offsetof(struct compat_vcpu_runstate_info, state_entry_time) !=
+ offsetof(struct compat_vcpu_runstate_info, time) - sizeof(u64));
+ BUILD_BUG_ON(sizeof(((struct vcpu_runstate_info *)0)->time) !=
+ sizeof(((struct compat_vcpu_runstate_info *)0)->time));
+ BUILD_BUG_ON(sizeof(((struct vcpu_runstate_info *)0)->time) !=
+ sizeof(vx->runstate_times));
+
+ if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache,
+ &vx->runstate_times[0],
+ offset + sizeof(u64),
+ sizeof(vx->runstate_times)))
+ return;
+
+ smp_wmb();
+
+ /*
+ * Finally, clear the XEN_RUNSTATE_UPDATE bit in the guest's
+ * runstate_entry_time field.
+ */
+
+ state_entry_time &= ~XEN_RUNSTATE_UPDATE;
+ if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache,
+ &state_entry_time, offset,
+ sizeof(state_entry_time)))
+ return;
+}
+
+int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
+{
+ u8 rc = 0;
+
+ /*
+ * If the global upcall vector (HVMIRQ_callback_vector) is set and
+ * the vCPU's evtchn_upcall_pending flag is set, the IRQ is pending.
+ */
+ struct gfn_to_hva_cache *ghc = &v->arch.xen.vcpu_info_cache;
+ struct kvm_memslots *slots = kvm_memslots(v->kvm);
+ unsigned int offset = offsetof(struct vcpu_info, evtchn_upcall_pending);
+
+ /* No need for compat handling here */
+ BUILD_BUG_ON(offsetof(struct vcpu_info, evtchn_upcall_pending) !=
+ offsetof(struct compat_vcpu_info, evtchn_upcall_pending));
+ BUILD_BUG_ON(sizeof(rc) !=
+ sizeof(((struct vcpu_info *)0)->evtchn_upcall_pending));
+ BUILD_BUG_ON(sizeof(rc) !=
+ sizeof(((struct compat_vcpu_info *)0)->evtchn_upcall_pending));
+
+ /*
+ * For efficiency, this mirrors the checks for using the valid
+ * cache in kvm_read_guest_offset_cached(), but just uses
+ * __get_user() instead. And falls back to the slow path.
+ */
+ if (likely(slots->generation == ghc->generation &&
+ !kvm_is_error_hva(ghc->hva) && ghc->memslot)) {
+ /* Fast path */
+ __get_user(rc, (u8 __user *)ghc->hva + offset);
+ } else {
+ /* Slow path */
+ kvm_read_guest_offset_cached(v->kvm, ghc, &rc, offset,
+ sizeof(rc));
+ }
+
+ return rc;
+}
+
+int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
+{
+ int r = -ENOENT;
+
+ mutex_lock(&kvm->lock);
+
+ switch (data->type) {
+ case KVM_XEN_ATTR_TYPE_LONG_MODE:
+ if (!IS_ENABLED(CONFIG_64BIT) && data->u.long_mode) {
+ r = -EINVAL;
+ } else {
+ kvm->arch.xen.long_mode = !!data->u.long_mode;
+ r = 0;
+ }
+ break;
+
+ case KVM_XEN_ATTR_TYPE_SHARED_INFO:
+ if (data->u.shared_info.gfn == GPA_INVALID) {
+ kvm->arch.xen.shinfo_set = false;
+ r = 0;
+ break;
+ }
+ r = kvm_xen_shared_info_init(kvm, data->u.shared_info.gfn);
+ break;
+
+
+ case KVM_XEN_ATTR_TYPE_UPCALL_VECTOR:
+ if (data->u.vector && data->u.vector < 0x10)
+ r = -EINVAL;
+ else {
+ kvm->arch.xen.upcall_vector = data->u.vector;
+ r = 0;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ mutex_unlock(&kvm->lock);
+ return r;
+}
+
+int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
+{
+ int r = -ENOENT;
+
+ mutex_lock(&kvm->lock);
+
+ switch (data->type) {
+ case KVM_XEN_ATTR_TYPE_LONG_MODE:
+ data->u.long_mode = kvm->arch.xen.long_mode;
+ r = 0;
+ break;
+
+ case KVM_XEN_ATTR_TYPE_SHARED_INFO:
+ if (kvm->arch.xen.shinfo_set)
+ data->u.shared_info.gfn = gpa_to_gfn(kvm->arch.xen.shinfo_cache.gpa);
+ else
+ data->u.shared_info.gfn = GPA_INVALID;
+ r = 0;
+ break;
+
+ case KVM_XEN_ATTR_TYPE_UPCALL_VECTOR:
+ data->u.vector = kvm->arch.xen.upcall_vector;
+ r = 0;
+ break;
+
+ default:
+ break;
+ }
+
+ mutex_unlock(&kvm->lock);
+ return r;
+}
+
+int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
+{
+ int idx, r = -ENOENT;
+
+ mutex_lock(&vcpu->kvm->lock);
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
+
+ switch (data->type) {
+ case KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO:
+ /* No compat necessary here. */
+ BUILD_BUG_ON(sizeof(struct vcpu_info) !=
+ sizeof(struct compat_vcpu_info));
+ BUILD_BUG_ON(offsetof(struct vcpu_info, time) !=
+ offsetof(struct compat_vcpu_info, time));
+
+ if (data->u.gpa == GPA_INVALID) {
+ vcpu->arch.xen.vcpu_info_set = false;
+ r = 0;
+ break;
+ }
+
+ r = kvm_gfn_to_hva_cache_init(vcpu->kvm,
+ &vcpu->arch.xen.vcpu_info_cache,
+ data->u.gpa,
+ sizeof(struct vcpu_info));
+ if (!r) {
+ vcpu->arch.xen.vcpu_info_set = true;
+ kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
+ }
+ break;
+
+ case KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO:
+ if (data->u.gpa == GPA_INVALID) {
+ vcpu->arch.xen.vcpu_time_info_set = false;
+ r = 0;
+ break;
+ }
+
+ r = kvm_gfn_to_hva_cache_init(vcpu->kvm,
+ &vcpu->arch.xen.vcpu_time_info_cache,
+ data->u.gpa,
+ sizeof(struct pvclock_vcpu_time_info));
+ if (!r) {
+ vcpu->arch.xen.vcpu_time_info_set = true;
+ kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
+ }
+ break;
+
+ case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR:
+ if (!sched_info_on()) {
+ r = -EOPNOTSUPP;
+ break;
+ }
+ if (data->u.gpa == GPA_INVALID) {
+ vcpu->arch.xen.runstate_set = false;
+ r = 0;
+ break;
+ }
+
+ r = kvm_gfn_to_hva_cache_init(vcpu->kvm,
+ &vcpu->arch.xen.runstate_cache,
+ data->u.gpa,
+ sizeof(struct vcpu_runstate_info));
+ if (!r) {
+ vcpu->arch.xen.runstate_set = true;
+ }
+ break;
+
+ case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT:
+ if (!sched_info_on()) {
+ r = -EOPNOTSUPP;
+ break;
+ }
+ if (data->u.runstate.state > RUNSTATE_offline) {
+ r = -EINVAL;
+ break;
+ }
+
+ kvm_xen_update_runstate(vcpu, data->u.runstate.state);
+ r = 0;
+ break;
+
+ case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA:
+ if (!sched_info_on()) {
+ r = -EOPNOTSUPP;
+ break;
+ }
+ if (data->u.runstate.state > RUNSTATE_offline) {
+ r = -EINVAL;
+ break;
+ }
+ if (data->u.runstate.state_entry_time !=
+ (data->u.runstate.time_running +
+ data->u.runstate.time_runnable +
+ data->u.runstate.time_blocked +
+ data->u.runstate.time_offline)) {
+ r = -EINVAL;
+ break;
+ }
+ if (get_kvmclock_ns(vcpu->kvm) <
+ data->u.runstate.state_entry_time) {
+ r = -EINVAL;
+ break;
+ }
+
+ vcpu->arch.xen.current_runstate = data->u.runstate.state;
+ vcpu->arch.xen.runstate_entry_time =
+ data->u.runstate.state_entry_time;
+ vcpu->arch.xen.runstate_times[RUNSTATE_running] =
+ data->u.runstate.time_running;
+ vcpu->arch.xen.runstate_times[RUNSTATE_runnable] =
+ data->u.runstate.time_runnable;
+ vcpu->arch.xen.runstate_times[RUNSTATE_blocked] =
+ data->u.runstate.time_blocked;
+ vcpu->arch.xen.runstate_times[RUNSTATE_offline] =
+ data->u.runstate.time_offline;
+ vcpu->arch.xen.last_steal = current->sched_info.run_delay;
+ r = 0;
+ break;
+
+ case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST:
+ if (!sched_info_on()) {
+ r = -EOPNOTSUPP;
+ break;
+ }
+ if (data->u.runstate.state > RUNSTATE_offline &&
+ data->u.runstate.state != (u64)-1) {
+ r = -EINVAL;
+ break;
+ }
+ /* The adjustment must add up */
+ if (data->u.runstate.state_entry_time !=
+ (data->u.runstate.time_running +
+ data->u.runstate.time_runnable +
+ data->u.runstate.time_blocked +
+ data->u.runstate.time_offline)) {
+ r = -EINVAL;
+ break;
+ }
+
+ if (get_kvmclock_ns(vcpu->kvm) <
+ (vcpu->arch.xen.runstate_entry_time +
+ data->u.runstate.state_entry_time)) {
+ r = -EINVAL;
+ break;
+ }
+
+ vcpu->arch.xen.runstate_entry_time +=
+ data->u.runstate.state_entry_time;
+ vcpu->arch.xen.runstate_times[RUNSTATE_running] +=
+ data->u.runstate.time_running;
+ vcpu->arch.xen.runstate_times[RUNSTATE_runnable] +=
+ data->u.runstate.time_runnable;
+ vcpu->arch.xen.runstate_times[RUNSTATE_blocked] +=
+ data->u.runstate.time_blocked;
+ vcpu->arch.xen.runstate_times[RUNSTATE_offline] +=
+ data->u.runstate.time_offline;
+
+ if (data->u.runstate.state <= RUNSTATE_offline)
+ kvm_xen_update_runstate(vcpu, data->u.runstate.state);
+ r = 0;
+ break;
+
+ default:
+ break;
+ }
+
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
+ mutex_unlock(&vcpu->kvm->lock);
+ return r;
+}
+
+int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
+{
+ int r = -ENOENT;
+
+ mutex_lock(&vcpu->kvm->lock);
+
+ switch (data->type) {
+ case KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO:
+ if (vcpu->arch.xen.vcpu_info_set)
+ data->u.gpa = vcpu->arch.xen.vcpu_info_cache.gpa;
+ else
+ data->u.gpa = GPA_INVALID;
+ r = 0;
+ break;
+
+ case KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO:
+ if (vcpu->arch.xen.vcpu_time_info_set)
+ data->u.gpa = vcpu->arch.xen.vcpu_time_info_cache.gpa;
+ else
+ data->u.gpa = GPA_INVALID;
+ r = 0;
+ break;
+
+ case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR:
+ if (!sched_info_on()) {
+ r = -EOPNOTSUPP;
+ break;
+ }
+ if (vcpu->arch.xen.runstate_set) {
+ data->u.gpa = vcpu->arch.xen.runstate_cache.gpa;
+ r = 0;
+ }
+ break;
+
+ case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT:
+ if (!sched_info_on()) {
+ r = -EOPNOTSUPP;
+ break;
+ }
+ data->u.runstate.state = vcpu->arch.xen.current_runstate;
+ r = 0;
+ break;
+
+ case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA:
+ if (!sched_info_on()) {
+ r = -EOPNOTSUPP;
+ break;
+ }
+ data->u.runstate.state = vcpu->arch.xen.current_runstate;
+ data->u.runstate.state_entry_time =
+ vcpu->arch.xen.runstate_entry_time;
+ data->u.runstate.time_running =
+ vcpu->arch.xen.runstate_times[RUNSTATE_running];
+ data->u.runstate.time_runnable =
+ vcpu->arch.xen.runstate_times[RUNSTATE_runnable];
+ data->u.runstate.time_blocked =
+ vcpu->arch.xen.runstate_times[RUNSTATE_blocked];
+ data->u.runstate.time_offline =
+ vcpu->arch.xen.runstate_times[RUNSTATE_offline];
+ r = 0;
+ break;
+
+ case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST:
+ r = -EINVAL;
+ break;
+
+ default:
+ break;
+ }
+
+ mutex_unlock(&vcpu->kvm->lock);
+ return r;
+}
+
+int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data)
+{
+ struct kvm *kvm = vcpu->kvm;
+ u32 page_num = data & ~PAGE_MASK;
+ u64 page_addr = data & PAGE_MASK;
+ bool lm = is_long_mode(vcpu);
+
+ /* Latch long_mode for shared_info pages etc. */
+ vcpu->kvm->arch.xen.long_mode = lm;
+
+ /*
+ * If Xen hypercall intercept is enabled, fill the hypercall
+ * page with VMCALL/VMMCALL instructions since that's what
+ * we catch. Else the VMM has provided the hypercall pages
+ * with instructions of its own choosing, so use those.
+ */
+ if (kvm_xen_hypercall_enabled(kvm)) {
+ u8 instructions[32];
+ int i;
+
+ if (page_num)
+ return 1;
+
+ /* mov imm32, %eax */
+ instructions[0] = 0xb8;
+
+ /* vmcall / vmmcall */
+ kvm_x86_ops.patch_hypercall(vcpu, instructions + 5);
+
+ /* ret */
+ instructions[8] = 0xc3;
+
+ /* int3 to pad */
+ memset(instructions + 9, 0xcc, sizeof(instructions) - 9);
+
+ for (i = 0; i < PAGE_SIZE / sizeof(instructions); i++) {
+ *(u32 *)&instructions[1] = i;
+ if (kvm_vcpu_write_guest(vcpu,
+ page_addr + (i * sizeof(instructions)),
+ instructions, sizeof(instructions)))
+ return 1;
+ }
+ } else {
+ /*
+ * Note, truncation is a non-issue as 'lm' is guaranteed to be
+ * false for a 32-bit kernel, i.e. when hva_t is only 4 bytes.
+ */
+ hva_t blob_addr = lm ? kvm->arch.xen_hvm_config.blob_addr_64
+ : kvm->arch.xen_hvm_config.blob_addr_32;
+ u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
+ : kvm->arch.xen_hvm_config.blob_size_32;
+ u8 *page;
+
+ if (page_num >= blob_size)
+ return 1;
+
+ blob_addr += page_num * PAGE_SIZE;
+
+ page = memdup_user((u8 __user *)blob_addr, PAGE_SIZE);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+
+ if (kvm_vcpu_write_guest(vcpu, page_addr, page, PAGE_SIZE)) {
+ kfree(page);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc)
+{
+ if (xhc->flags & ~KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL)
+ return -EINVAL;
+
+ /*
+ * With hypercall interception the kernel generates its own
+ * hypercall page so it must not be provided.
+ */
+ if ((xhc->flags & KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL) &&
+ (xhc->blob_addr_32 || xhc->blob_addr_64 ||
+ xhc->blob_size_32 || xhc->blob_size_64))
+ return -EINVAL;
+
+ mutex_lock(&kvm->lock);
+
+ if (xhc->msr && !kvm->arch.xen_hvm_config.msr)
+ static_branch_inc(&kvm_xen_enabled.key);
+ else if (!xhc->msr && kvm->arch.xen_hvm_config.msr)
+ static_branch_slow_dec_deferred(&kvm_xen_enabled);
+
+ memcpy(&kvm->arch.xen_hvm_config, xhc, sizeof(*xhc));
+
+ mutex_unlock(&kvm->lock);
+ return 0;
+}
+
+void kvm_xen_destroy_vm(struct kvm *kvm)
+{
+ if (kvm->arch.xen_hvm_config.msr)
+ static_branch_slow_dec_deferred(&kvm_xen_enabled);
+}
+
+static int kvm_xen_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
+{
+ kvm_rax_write(vcpu, result);
+ return kvm_skip_emulated_instruction(vcpu);
+}
+
+static int kvm_xen_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
+{
+ struct kvm_run *run = vcpu->run;
+
+ if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.xen.hypercall_rip)))
+ return 1;
+
+ return kvm_xen_hypercall_set_result(vcpu, run->xen.u.hcall.result);
+}
+
+int kvm_xen_hypercall(struct kvm_vcpu *vcpu)
+{
+ bool longmode;
+ u64 input, params[6];
+
+ input = (u64)kvm_register_read(vcpu, VCPU_REGS_RAX);
+
+ /* Hyper-V hypercalls get bit 31 set in EAX */
+ if ((input & 0x80000000) &&
+ kvm_hv_hypercall_enabled(vcpu))
+ return kvm_hv_hypercall(vcpu);
+
+ longmode = is_64_bit_mode(vcpu);
+ if (!longmode) {
+ params[0] = (u32)kvm_rbx_read(vcpu);
+ params[1] = (u32)kvm_rcx_read(vcpu);
+ params[2] = (u32)kvm_rdx_read(vcpu);
+ params[3] = (u32)kvm_rsi_read(vcpu);
+ params[4] = (u32)kvm_rdi_read(vcpu);
+ params[5] = (u32)kvm_rbp_read(vcpu);
+ }
+#ifdef CONFIG_X86_64
+ else {
+ params[0] = (u64)kvm_rdi_read(vcpu);
+ params[1] = (u64)kvm_rsi_read(vcpu);
+ params[2] = (u64)kvm_rdx_read(vcpu);
+ params[3] = (u64)kvm_r10_read(vcpu);
+ params[4] = (u64)kvm_r8_read(vcpu);
+ params[5] = (u64)kvm_r9_read(vcpu);
+ }
+#endif
+ trace_kvm_xen_hypercall(input, params[0], params[1], params[2],
+ params[3], params[4], params[5]);
+
+ vcpu->run->exit_reason = KVM_EXIT_XEN;
+ vcpu->run->xen.type = KVM_EXIT_XEN_HCALL;
+ vcpu->run->xen.u.hcall.longmode = longmode;
+ vcpu->run->xen.u.hcall.cpl = kvm_x86_ops.get_cpl(vcpu);
+ vcpu->run->xen.u.hcall.input = input;
+ vcpu->run->xen.u.hcall.params[0] = params[0];
+ vcpu->run->xen.u.hcall.params[1] = params[1];
+ vcpu->run->xen.u.hcall.params[2] = params[2];
+ vcpu->run->xen.u.hcall.params[3] = params[3];
+ vcpu->run->xen.u.hcall.params[4] = params[4];
+ vcpu->run->xen.u.hcall.params[5] = params[5];
+ vcpu->arch.xen.hypercall_rip = kvm_get_linear_rip(vcpu);
+ vcpu->arch.complete_userspace_io =
+ kvm_xen_hypercall_complete_userspace;
+
+ return 0;
+}
diff --git a/arch/x86/kvm/xen.h b/arch/x86/kvm/xen.h
new file mode 100644
index 000000000000..463a7844a8ca
--- /dev/null
+++ b/arch/x86/kvm/xen.h
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright © 2019 Oracle and/or its affiliates. All rights reserved.
+ * Copyright © 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * KVM Xen emulation
+ */
+
+#ifndef __ARCH_X86_KVM_XEN_H__
+#define __ARCH_X86_KVM_XEN_H__
+
+#ifdef CONFIG_KVM_XEN
+#include <linux/jump_label_ratelimit.h>
+
+extern struct static_key_false_deferred kvm_xen_enabled;
+
+int __kvm_xen_has_interrupt(struct kvm_vcpu *vcpu);
+int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data);
+int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data);
+int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data);
+int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data);
+int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data);
+int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc);
+void kvm_xen_destroy_vm(struct kvm *kvm);
+
+static inline bool kvm_xen_msr_enabled(struct kvm *kvm)
+{
+ return static_branch_unlikely(&kvm_xen_enabled.key) &&
+ kvm->arch.xen_hvm_config.msr;
+}
+
+static inline bool kvm_xen_hypercall_enabled(struct kvm *kvm)
+{
+ return static_branch_unlikely(&kvm_xen_enabled.key) &&
+ (kvm->arch.xen_hvm_config.flags &
+ KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL);
+}
+
+static inline int kvm_xen_has_interrupt(struct kvm_vcpu *vcpu)
+{
+ if (static_branch_unlikely(&kvm_xen_enabled.key) &&
+ vcpu->arch.xen.vcpu_info_set && vcpu->kvm->arch.xen.upcall_vector)
+ return __kvm_xen_has_interrupt(vcpu);
+
+ return 0;
+}
+#else
+static inline int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data)
+{
+ return 1;
+}
+
+static inline void kvm_xen_destroy_vm(struct kvm *kvm)
+{
+}
+
+static inline bool kvm_xen_msr_enabled(struct kvm *kvm)
+{
+ return false;
+}
+
+static inline bool kvm_xen_hypercall_enabled(struct kvm *kvm)
+{
+ return false;
+}
+
+static inline int kvm_xen_has_interrupt(struct kvm_vcpu *vcpu)
+{
+ return 0;
+}
+#endif
+
+int kvm_xen_hypercall(struct kvm_vcpu *vcpu);
+
+#include <asm/pvclock-abi.h>
+#include <asm/xen/interface.h>
+#include <xen/interface/vcpu.h>
+
+void kvm_xen_update_runstate_guest(struct kvm_vcpu *vcpu, int state);
+
+static inline void kvm_xen_runstate_set_running(struct kvm_vcpu *vcpu)
+{
+ kvm_xen_update_runstate_guest(vcpu, RUNSTATE_running);
+}
+
+static inline void kvm_xen_runstate_set_preempted(struct kvm_vcpu *vcpu)
+{
+ /*
+ * If the vCPU wasn't preempted but took a normal exit for
+ * some reason (hypercalls, I/O, etc.), that is accounted as
+ * still RUNSTATE_running, as the VMM is still operating on
+ * behalf of the vCPU. Only if the VMM does actually block
+ * does it need to enter RUNSTATE_blocked.
+ */
+ if (vcpu->preempted)
+ kvm_xen_update_runstate_guest(vcpu, RUNSTATE_runnable);
+}
+
+/* 32-bit compatibility definitions, also used natively in 32-bit build */
+struct compat_arch_vcpu_info {
+ unsigned int cr2;
+ unsigned int pad[5];
+};
+
+struct compat_vcpu_info {
+ uint8_t evtchn_upcall_pending;
+ uint8_t evtchn_upcall_mask;
+ uint16_t pad;
+ uint32_t evtchn_pending_sel;
+ struct compat_arch_vcpu_info arch;
+ struct pvclock_vcpu_time_info time;
+}; /* 64 bytes (x86) */
+
+struct compat_arch_shared_info {
+ unsigned int max_pfn;
+ unsigned int pfn_to_mfn_frame_list_list;
+ unsigned int nmi_reason;
+ unsigned int p2m_cr3;
+ unsigned int p2m_vaddr;
+ unsigned int p2m_generation;
+ uint32_t wc_sec_hi;
+};
+
+struct compat_shared_info {
+ struct compat_vcpu_info vcpu_info[MAX_VIRT_CPUS];
+ uint32_t evtchn_pending[32];
+ uint32_t evtchn_mask[32];
+ struct pvclock_wall_clock wc;
+ struct compat_arch_shared_info arch;
+};
+
+struct compat_vcpu_runstate_info {
+ int state;
+ uint64_t state_entry_time;
+ uint64_t time[4];
+} __attribute__((packed));
+
+#endif /* __ARCH_X86_KVM_XEN_H__ */
diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
index 404279563891..435630a6ec97 100644
--- a/arch/x86/lib/insn.c
+++ b/arch/x86/lib/insn.c
@@ -5,6 +5,7 @@
* Copyright (C) IBM Corporation, 2002, 2004, 2009
*/
+#include <linux/kernel.h>
#ifdef __KERNEL__
#include <linux/string.h>
#else
@@ -15,15 +16,28 @@
#include <asm/emulate_prefix.h>
+#define leXX_to_cpu(t, r) \
+({ \
+ __typeof__(t) v; \
+ switch (sizeof(t)) { \
+ case 4: v = le32_to_cpu(r); break; \
+ case 2: v = le16_to_cpu(r); break; \
+ case 1: v = r; break; \
+ default: \
+ BUILD_BUG(); break; \
+ } \
+ v; \
+})
+
/* Verify next sizeof(t) bytes can be on the same instruction */
#define validate_next(t, insn, n) \
((insn)->next_byte + sizeof(t) + n <= (insn)->end_kaddr)
#define __get_next(t, insn) \
- ({ t r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; })
+ ({ t r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); leXX_to_cpu(t, r); })
#define __peek_nbyte_next(t, insn, n) \
- ({ t r = *(t*)((insn)->next_byte + n); r; })
+ ({ t r = *(t*)((insn)->next_byte + n); leXX_to_cpu(t, r); })
#define get_next(t, insn) \
({ if (unlikely(!validate_next(t, insn, 0))) goto err_out; __get_next(t, insn); })
@@ -147,9 +161,9 @@ found:
b = insn->prefixes.bytes[3];
for (i = 0; i < nb; i++)
if (prefixes->bytes[i] == lb)
- prefixes->bytes[i] = b;
+ insn_set_byte(prefixes, i, b);
}
- insn->prefixes.bytes[3] = lb;
+ insn_set_byte(&insn->prefixes, 3, lb);
}
/* Decode REX prefix */
@@ -157,8 +171,7 @@ found:
b = peek_next(insn_byte_t, insn);
attr = inat_get_opcode_attribute(b);
if (inat_is_rex_prefix(attr)) {
- insn->rex_prefix.value = b;
- insn->rex_prefix.nbytes = 1;
+ insn_field_set(&insn->rex_prefix, b, 1);
insn->next_byte++;
if (X86_REX_W(b))
/* REX.W overrides opnd_size */
@@ -181,13 +194,13 @@ found:
if (X86_MODRM_MOD(b2) != 3)
goto vex_end;
}
- insn->vex_prefix.bytes[0] = b;
- insn->vex_prefix.bytes[1] = b2;
+ insn_set_byte(&insn->vex_prefix, 0, b);
+ insn_set_byte(&insn->vex_prefix, 1, b2);
if (inat_is_evex_prefix(attr)) {
b2 = peek_nbyte_next(insn_byte_t, insn, 2);
- insn->vex_prefix.bytes[2] = b2;
+ insn_set_byte(&insn->vex_prefix, 2, b2);
b2 = peek_nbyte_next(insn_byte_t, insn, 3);
- insn->vex_prefix.bytes[3] = b2;
+ insn_set_byte(&insn->vex_prefix, 3, b2);
insn->vex_prefix.nbytes = 4;
insn->next_byte += 4;
if (insn->x86_64 && X86_VEX_W(b2))
@@ -195,7 +208,7 @@ found:
insn->opnd_bytes = 8;
} else if (inat_is_vex3_prefix(attr)) {
b2 = peek_nbyte_next(insn_byte_t, insn, 2);
- insn->vex_prefix.bytes[2] = b2;
+ insn_set_byte(&insn->vex_prefix, 2, b2);
insn->vex_prefix.nbytes = 3;
insn->next_byte += 3;
if (insn->x86_64 && X86_VEX_W(b2))
@@ -207,7 +220,7 @@ found:
* Makes it easier to decode vex.W, vex.vvvv,
* vex.L and vex.pp. Masking with 0x7f sets vex.W == 0.
*/
- insn->vex_prefix.bytes[2] = b2 & 0x7f;
+ insn_set_byte(&insn->vex_prefix, 2, b2 & 0x7f);
insn->vex_prefix.nbytes = 2;
insn->next_byte += 2;
}
@@ -243,7 +256,7 @@ void insn_get_opcode(struct insn *insn)
/* Get first opcode */
op = get_next(insn_byte_t, insn);
- opcode->bytes[0] = op;
+ insn_set_byte(opcode, 0, op);
opcode->nbytes = 1;
/* Check if there is VEX prefix or not */
@@ -295,8 +308,7 @@ void insn_get_modrm(struct insn *insn)
if (inat_has_modrm(insn->attr)) {
mod = get_next(insn_byte_t, insn);
- modrm->value = mod;
- modrm->nbytes = 1;
+ insn_field_set(modrm, mod, 1);
if (inat_is_group(insn->attr)) {
pfx_id = insn_last_prefix_id(insn);
insn->attr = inat_get_group_attribute(mod, pfx_id,
@@ -334,7 +346,7 @@ int insn_rip_relative(struct insn *insn)
* For rip-relative instructions, the mod field (top 2 bits)
* is zero and the r/m field (bottom 3 bits) is 0x5.
*/
- return (modrm->nbytes && (modrm->value & 0xc7) == 0x5);
+ return (modrm->nbytes && (modrm->bytes[0] & 0xc7) == 0x5);
}
/**
@@ -353,11 +365,11 @@ void insn_get_sib(struct insn *insn)
if (!insn->modrm.got)
insn_get_modrm(insn);
if (insn->modrm.nbytes) {
- modrm = (insn_byte_t)insn->modrm.value;
+ modrm = insn->modrm.bytes[0];
if (insn->addr_bytes != 2 &&
X86_MODRM_MOD(modrm) != 3 && X86_MODRM_RM(modrm) == 4) {
- insn->sib.value = get_next(insn_byte_t, insn);
- insn->sib.nbytes = 1;
+ insn_field_set(&insn->sib,
+ get_next(insn_byte_t, insn), 1);
}
}
insn->sib.got = 1;
@@ -407,19 +419,18 @@ void insn_get_displacement(struct insn *insn)
if (mod == 3)
goto out;
if (mod == 1) {
- insn->displacement.value = get_next(signed char, insn);
- insn->displacement.nbytes = 1;
+ insn_field_set(&insn->displacement,
+ get_next(signed char, insn), 1);
} else if (insn->addr_bytes == 2) {
if ((mod == 0 && rm == 6) || mod == 2) {
- insn->displacement.value =
- get_next(short, insn);
- insn->displacement.nbytes = 2;
+ insn_field_set(&insn->displacement,
+ get_next(short, insn), 2);
}
} else {
if ((mod == 0 && rm == 5) || mod == 2 ||
(mod == 0 && base == 5)) {
- insn->displacement.value = get_next(int, insn);
- insn->displacement.nbytes = 4;
+ insn_field_set(&insn->displacement,
+ get_next(int, insn), 4);
}
}
}
@@ -435,18 +446,14 @@ static int __get_moffset(struct insn *insn)
{
switch (insn->addr_bytes) {
case 2:
- insn->moffset1.value = get_next(short, insn);
- insn->moffset1.nbytes = 2;
+ insn_field_set(&insn->moffset1, get_next(short, insn), 2);
break;
case 4:
- insn->moffset1.value = get_next(int, insn);
- insn->moffset1.nbytes = 4;
+ insn_field_set(&insn->moffset1, get_next(int, insn), 4);
break;
case 8:
- insn->moffset1.value = get_next(int, insn);
- insn->moffset1.nbytes = 4;
- insn->moffset2.value = get_next(int, insn);
- insn->moffset2.nbytes = 4;
+ insn_field_set(&insn->moffset1, get_next(int, insn), 4);
+ insn_field_set(&insn->moffset2, get_next(int, insn), 4);
break;
default: /* opnd_bytes must be modified manually */
goto err_out;
@@ -464,13 +471,11 @@ static int __get_immv32(struct insn *insn)
{
switch (insn->opnd_bytes) {
case 2:
- insn->immediate.value = get_next(short, insn);
- insn->immediate.nbytes = 2;
+ insn_field_set(&insn->immediate, get_next(short, insn), 2);
break;
case 4:
case 8:
- insn->immediate.value = get_next(int, insn);
- insn->immediate.nbytes = 4;
+ insn_field_set(&insn->immediate, get_next(int, insn), 4);
break;
default: /* opnd_bytes must be modified manually */
goto err_out;
@@ -487,18 +492,15 @@ static int __get_immv(struct insn *insn)
{
switch (insn->opnd_bytes) {
case 2:
- insn->immediate1.value = get_next(short, insn);
- insn->immediate1.nbytes = 2;
+ insn_field_set(&insn->immediate1, get_next(short, insn), 2);
break;
case 4:
- insn->immediate1.value = get_next(int, insn);
+ insn_field_set(&insn->immediate1, get_next(int, insn), 4);
insn->immediate1.nbytes = 4;
break;
case 8:
- insn->immediate1.value = get_next(int, insn);
- insn->immediate1.nbytes = 4;
- insn->immediate2.value = get_next(int, insn);
- insn->immediate2.nbytes = 4;
+ insn_field_set(&insn->immediate1, get_next(int, insn), 4);
+ insn_field_set(&insn->immediate2, get_next(int, insn), 4);
break;
default: /* opnd_bytes must be modified manually */
goto err_out;
@@ -515,12 +517,10 @@ static int __get_immptr(struct insn *insn)
{
switch (insn->opnd_bytes) {
case 2:
- insn->immediate1.value = get_next(short, insn);
- insn->immediate1.nbytes = 2;
+ insn_field_set(&insn->immediate1, get_next(short, insn), 2);
break;
case 4:
- insn->immediate1.value = get_next(int, insn);
- insn->immediate1.nbytes = 4;
+ insn_field_set(&insn->immediate1, get_next(int, insn), 4);
break;
case 8:
/* ptr16:64 is not exist (no segment) */
@@ -528,8 +528,7 @@ static int __get_immptr(struct insn *insn)
default: /* opnd_bytes must be modified manually */
goto err_out;
}
- insn->immediate2.value = get_next(unsigned short, insn);
- insn->immediate2.nbytes = 2;
+ insn_field_set(&insn->immediate2, get_next(unsigned short, insn), 2);
insn->immediate1.got = insn->immediate2.got = 1;
return 1;
@@ -565,22 +564,17 @@ void insn_get_immediate(struct insn *insn)
switch (inat_immediate_size(insn->attr)) {
case INAT_IMM_BYTE:
- insn->immediate.value = get_next(signed char, insn);
- insn->immediate.nbytes = 1;
+ insn_field_set(&insn->immediate, get_next(signed char, insn), 1);
break;
case INAT_IMM_WORD:
- insn->immediate.value = get_next(short, insn);
- insn->immediate.nbytes = 2;
+ insn_field_set(&insn->immediate, get_next(short, insn), 2);
break;
case INAT_IMM_DWORD:
- insn->immediate.value = get_next(int, insn);
- insn->immediate.nbytes = 4;
+ insn_field_set(&insn->immediate, get_next(int, insn), 4);
break;
case INAT_IMM_QWORD:
- insn->immediate1.value = get_next(int, insn);
- insn->immediate1.nbytes = 4;
- insn->immediate2.value = get_next(int, insn);
- insn->immediate2.nbytes = 4;
+ insn_field_set(&insn->immediate1, get_next(int, insn), 4);
+ insn_field_set(&insn->immediate2, get_next(int, insn), 4);
break;
case INAT_IMM_PTR:
if (!__get_immptr(insn))
@@ -599,8 +593,7 @@ void insn_get_immediate(struct insn *insn)
goto err_out;
}
if (inat_has_second_immediate(insn->attr)) {
- insn->immediate2.value = get_next(signed char, insn);
- insn->immediate2.nbytes = 1;
+ insn_field_set(&insn->immediate2, get_next(signed char, insn), 1);
}
done:
insn->immediate.got = 1;
diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S
index b4c43a9b1483..f6fb1d218dcc 100644
--- a/arch/x86/lib/retpoline.S
+++ b/arch/x86/lib/retpoline.S
@@ -28,7 +28,7 @@ SYM_FUNC_START_NOALIGN(__x86_retpoline_\reg)
jmp .Lspec_trap_\@
.Ldo_rop_\@:
mov %\reg, (%_ASM_SP)
- UNWIND_HINT_RET_OFFSET
+ UNWIND_HINT_FUNC
ret
SYM_FUNC_END(__x86_retpoline_\reg)
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index f1f1b5a0956a..a73347e2cdfc 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -9,6 +9,7 @@
#include <linux/kdebug.h> /* oops_begin/end, ... */
#include <linux/extable.h> /* search_exception_tables */
#include <linux/memblock.h> /* max_low_pfn */
+#include <linux/kfence.h> /* kfence_handle_page_fault */
#include <linux/kprobes.h> /* NOKPROBE_SYMBOL, ... */
#include <linux/mmiotrace.h> /* kmmio_handler, ... */
#include <linux/perf_event.h> /* perf_sw_event */
@@ -16,7 +17,7 @@
#include <linux/prefetch.h> /* prefetchw */
#include <linux/context_tracking.h> /* exception_enter(), ... */
#include <linux/uaccess.h> /* faulthandler_disabled() */
-#include <linux/efi.h> /* efi_recover_from_page_fault()*/
+#include <linux/efi.h> /* efi_crash_gracefully_on_page_fault()*/
#include <linux/mm_types.h>
#include <asm/cpufeature.h> /* boot_cpu_has, ... */
@@ -25,7 +26,7 @@
#include <asm/vsyscall.h> /* emulate_vsyscall */
#include <asm/vm86.h> /* struct vm86 */
#include <asm/mmu_context.h> /* vma_pkey() */
-#include <asm/efi.h> /* efi_recover_from_page_fault()*/
+#include <asm/efi.h> /* efi_crash_gracefully_on_page_fault()*/
#include <asm/desc.h> /* store_idt(), ... */
#include <asm/cpu_entry_area.h> /* exception stack */
#include <asm/pgtable_areas.h> /* VMALLOC_START, ... */
@@ -54,7 +55,7 @@ kmmio_fault(struct pt_regs *regs, unsigned long addr)
* 32-bit mode:
*
* Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
- * Check that here and ignore it.
+ * Check that here and ignore it. This is AMD erratum #91.
*
* 64-bit mode:
*
@@ -83,11 +84,7 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
#ifdef CONFIG_X86_64
case 0x40:
/*
- * In AMD64 long mode 0x40..0x4F are valid REX prefixes
- * Need to figure out under what instruction mode the
- * instruction was issued. Could check the LDT for lm,
- * but for now it's good enough to assume that long
- * mode only uses well known segments or kernel.
+ * In 64-bit mode 0x40..0x4F are valid REX prefixes
*/
return (!user_mode(regs) || user_64bit_mode(regs));
#endif
@@ -110,6 +107,15 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
}
}
+static bool is_amd_k8_pre_npt(void)
+{
+ struct cpuinfo_x86 *c = &boot_cpu_data;
+
+ return unlikely(IS_ENABLED(CONFIG_CPU_SUP_AMD) &&
+ c->x86_vendor == X86_VENDOR_AMD &&
+ c->x86 == 0xf && c->x86_model < 0x40);
+}
+
static int
is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
{
@@ -117,6 +123,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
unsigned char *instr;
int prefetch = 0;
+ /* Erratum #91 affects AMD K8, pre-NPT CPUs */
+ if (!is_amd_k8_pre_npt())
+ return 0;
+
/*
* If it was a exec (instruction fetch) fault on NX page, then
* do not ignore the fault:
@@ -127,20 +137,31 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
instr = (void *)convert_ip_to_linear(current, regs);
max_instr = instr + 15;
- if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE_MAX)
- return 0;
+ /*
+ * This code has historically always bailed out if IP points to a
+ * not-present page (e.g. due to a race). No one has ever
+ * complained about this.
+ */
+ pagefault_disable();
while (instr < max_instr) {
unsigned char opcode;
- if (get_kernel_nofault(opcode, instr))
- break;
+ if (user_mode(regs)) {
+ if (get_user(opcode, instr))
+ break;
+ } else {
+ if (get_kernel_nofault(opcode, instr))
+ break;
+ }
instr++;
if (!check_prefetch_opcode(regs, instr, opcode, &prefetch))
break;
}
+
+ pagefault_enable();
return prefetch;
}
@@ -262,25 +283,6 @@ void arch_sync_kernel_mappings(unsigned long start, unsigned long end)
}
}
-/*
- * Did it hit the DOS screen memory VA from vm86 mode?
- */
-static inline void
-check_v8086_mode(struct pt_regs *regs, unsigned long address,
- struct task_struct *tsk)
-{
-#ifdef CONFIG_VM86
- unsigned long bit;
-
- if (!v8086_mode(regs) || !tsk->thread.vm86)
- return;
-
- bit = (address - 0xA0000) >> PAGE_SHIFT;
- if (bit < 32)
- tsk->thread.vm86->screen_bitmap |= 1 << bit;
-#endif
-}
-
static bool low_pfn(unsigned long pfn)
{
return pfn < max_low_pfn;
@@ -335,15 +337,6 @@ KERN_ERR
"******* Disabling USB legacy in the BIOS may also help.\n";
#endif
-/*
- * No vm86 mode in 64-bit mode:
- */
-static inline void
-check_v8086_mode(struct pt_regs *regs, unsigned long address,
- struct task_struct *tsk)
-{
-}
-
static int bad_address(void *p)
{
unsigned long dummy;
@@ -427,6 +420,9 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
|| boot_cpu_data.x86 != 0xf)
return 0;
+ if (user_mode(regs))
+ return 0;
+
if (address != regs->ip)
return 0;
@@ -462,10 +458,12 @@ static int is_errata100(struct pt_regs *regs, unsigned long address)
}
/* Pentium F0 0F C7 C8 bug workaround: */
-static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
+static int is_f00f_bug(struct pt_regs *regs, unsigned long error_code,
+ unsigned long address)
{
#ifdef CONFIG_X86_F00F_BUG
- if (boot_cpu_has_bug(X86_BUG_F00F) && idt_is_f00f_address(address)) {
+ if (boot_cpu_has_bug(X86_BUG_F00F) && !(error_code & X86_PF_USER) &&
+ idt_is_f00f_address(address)) {
handle_invalid_op(regs);
return 1;
}
@@ -630,53 +628,20 @@ static void set_signal_archinfo(unsigned long address,
}
static noinline void
-no_context(struct pt_regs *regs, unsigned long error_code,
- unsigned long address, int signal, int si_code)
+page_fault_oops(struct pt_regs *regs, unsigned long error_code,
+ unsigned long address)
{
- struct task_struct *tsk = current;
unsigned long flags;
int sig;
if (user_mode(regs)) {
/*
- * This is an implicit supervisor-mode access from user
- * mode. Bypass all the kernel-mode recovery code and just
- * OOPS.
+ * Implicit kernel access from user mode? Skip the stack
+ * overflow and EFI special cases.
*/
goto oops;
}
- /* Are we prepared to handle this kernel fault? */
- if (fixup_exception(regs, X86_TRAP_PF, error_code, address)) {
- /*
- * Any interrupt that takes a fault gets the fixup. This makes
- * the below recursive fault logic only apply to a faults from
- * task context.
- */
- if (in_interrupt())
- return;
-
- /*
- * Per the above we're !in_interrupt(), aka. task context.
- *
- * In this case we need to make sure we're not recursively
- * faulting through the emulate_vsyscall() logic.
- */
- if (current->thread.sig_on_uaccess_err && signal) {
- sanitize_error_code(address, &error_code);
-
- set_signal_archinfo(address, error_code);
-
- /* XXX: hwpoison faults will set the wrong code. */
- force_sig_fault(signal, si_code, (void __user *)address);
- }
-
- /*
- * Barring that, we can do the fixup and be happy.
- */
- return;
- }
-
#ifdef CONFIG_VMAP_STACK
/*
* Stack overflow? During boot, we can fault near the initial
@@ -684,8 +649,8 @@ no_context(struct pt_regs *regs, unsigned long error_code,
* that we're in vmalloc space to avoid this.
*/
if (is_vmalloc_addr((void *)address) &&
- (((unsigned long)tsk->stack - 1 - address < PAGE_SIZE) ||
- address - ((unsigned long)tsk->stack + THREAD_SIZE) < PAGE_SIZE)) {
+ (((unsigned long)current->stack - 1 - address < PAGE_SIZE) ||
+ address - ((unsigned long)current->stack + THREAD_SIZE) < PAGE_SIZE)) {
unsigned long stack = __this_cpu_ist_top_va(DF) - sizeof(void *);
/*
* We're likely to be running with very little stack space
@@ -709,29 +674,18 @@ no_context(struct pt_regs *regs, unsigned long error_code,
#endif
/*
- * 32-bit:
- *
- * Valid to do another page fault here, because if this fault
- * had been triggered by is_prefetch fixup_exception would have
- * handled it.
- *
- * 64-bit:
- *
- * Hall of shame of CPU/BIOS bugs.
+ * Buggy firmware could access regions which might page fault. If
+ * this happens, EFI has a special OOPS path that will try to
+ * avoid hanging the system.
*/
- if (is_prefetch(regs, error_code, address))
- return;
+ if (IS_ENABLED(CONFIG_EFI))
+ efi_crash_gracefully_on_page_fault(address);
- if (is_errata93(regs, address))
+ /* Only not-present faults should be handled by KFENCE. */
+ if (!(error_code & X86_PF_PROT) &&
+ kfence_handle_page_fault(address, error_code & X86_PF_WRITE, regs))
return;
- /*
- * Buggy firmware could access regions which might page fault, try to
- * recover from such faults.
- */
- if (IS_ENABLED(CONFIG_EFI))
- efi_recover_from_page_fault(address);
-
oops:
/*
* Oops. The kernel tried to access some bad page. We'll have to
@@ -741,7 +695,7 @@ oops:
show_fault_oops(regs, error_code, address);
- if (task_stack_end_corrupted(tsk))
+ if (task_stack_end_corrupted(current))
printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
sig = SIGKILL;
@@ -754,6 +708,53 @@ oops:
oops_end(flags, regs, sig);
}
+static noinline void
+kernelmode_fixup_or_oops(struct pt_regs *regs, unsigned long error_code,
+ unsigned long address, int signal, int si_code)
+{
+ WARN_ON_ONCE(user_mode(regs));
+
+ /* Are we prepared to handle this kernel fault? */
+ if (fixup_exception(regs, X86_TRAP_PF, error_code, address)) {
+ /*
+ * Any interrupt that takes a fault gets the fixup. This makes
+ * the below recursive fault logic only apply to a faults from
+ * task context.
+ */
+ if (in_interrupt())
+ return;
+
+ /*
+ * Per the above we're !in_interrupt(), aka. task context.
+ *
+ * In this case we need to make sure we're not recursively
+ * faulting through the emulate_vsyscall() logic.
+ */
+ if (current->thread.sig_on_uaccess_err && signal) {
+ sanitize_error_code(address, &error_code);
+
+ set_signal_archinfo(address, error_code);
+
+ /* XXX: hwpoison faults will set the wrong code. */
+ force_sig_fault(signal, si_code, (void __user *)address);
+ }
+
+ /*
+ * Barring that, we can do the fixup and be happy.
+ */
+ return;
+ }
+
+ /*
+ * AMD erratum #91 manifests as a spurious page fault on a PREFETCH
+ * instruction.
+ */
+ if (is_prefetch(regs, error_code, address))
+ return;
+
+ page_fault_oops(regs, error_code, address);
+}
+
/*
* Print out info about fatal segfaults, if the show_unhandled_signals
* sysctl is set:
@@ -796,47 +797,49 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
{
struct task_struct *tsk = current;
- /* User mode accesses just cause a SIGSEGV */
- if (user_mode(regs) && (error_code & X86_PF_USER)) {
- /*
- * It's possible to have interrupts off here:
- */
- local_irq_enable();
-
- /*
- * Valid to do another page fault here because this one came
- * from user space:
- */
- if (is_prefetch(regs, error_code, address))
- return;
+ if (!user_mode(regs)) {
+ kernelmode_fixup_or_oops(regs, error_code, address, pkey, si_code);
+ return;
+ }
- if (is_errata100(regs, address))
- return;
+ if (!(error_code & X86_PF_USER)) {
+ /* Implicit user access to kernel memory -- just oops */
+ page_fault_oops(regs, error_code, address);
+ return;
+ }
- sanitize_error_code(address, &error_code);
+ /*
+ * User mode accesses just cause a SIGSEGV.
+ * It's possible to have interrupts off here:
+ */
+ local_irq_enable();
- if (fixup_vdso_exception(regs, X86_TRAP_PF, error_code, address))
- return;
+ /*
+ * Valid to do another page fault here because this one came
+ * from user space:
+ */
+ if (is_prefetch(regs, error_code, address))
+ return;
- if (likely(show_unhandled_signals))
- show_signal_msg(regs, error_code, address, tsk);
+ if (is_errata100(regs, address))
+ return;
- set_signal_archinfo(address, error_code);
+ sanitize_error_code(address, &error_code);
- if (si_code == SEGV_PKUERR)
- force_sig_pkuerr((void __user *)address, pkey);
+ if (fixup_vdso_exception(regs, X86_TRAP_PF, error_code, address))
+ return;
- force_sig_fault(SIGSEGV, si_code, (void __user *)address);
+ if (likely(show_unhandled_signals))
+ show_signal_msg(regs, error_code, address, tsk);
- local_irq_disable();
+ set_signal_archinfo(address, error_code);
- return;
- }
+ if (si_code == SEGV_PKUERR)
+ force_sig_pkuerr((void __user *)address, pkey);
- if (is_f00f_bug(regs, address))
- return;
+ force_sig_fault(SIGSEGV, si_code, (void __user *)address);
- no_context(regs, error_code, address, SIGSEGV, si_code);
+ local_irq_disable();
}
static noinline void
@@ -926,8 +929,8 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
vm_fault_t fault)
{
/* Kernel mode? Handle exceptions or die: */
- if (!(error_code & X86_PF_USER)) {
- no_context(regs, error_code, address, SIGBUS, BUS_ADRERR);
+ if (!user_mode(regs)) {
+ kernelmode_fixup_or_oops(regs, error_code, address, SIGBUS, BUS_ADRERR);
return;
}
@@ -961,40 +964,6 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address);
}
-static noinline void
-mm_fault_error(struct pt_regs *regs, unsigned long error_code,
- unsigned long address, vm_fault_t fault)
-{
- if (fatal_signal_pending(current) && !(error_code & X86_PF_USER)) {
- no_context(regs, error_code, address, 0, 0);
- return;
- }
-
- if (fault & VM_FAULT_OOM) {
- /* Kernel mode? Handle exceptions or die: */
- if (!(error_code & X86_PF_USER)) {
- no_context(regs, error_code, address,
- SIGSEGV, SEGV_MAPERR);
- return;
- }
-
- /*
- * We ran out of memory, call the OOM killer, and return the
- * userspace (which will retry the fault, or kill us if we got
- * oom-killed):
- */
- pagefault_out_of_memory();
- } else {
- if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
- VM_FAULT_HWPOISON_LARGE))
- do_sigbus(regs, error_code, address, fault);
- else if (fault & VM_FAULT_SIGSEGV)
- bad_area_nosemaphore(regs, error_code, address);
- else
- BUG();
- }
-}
-
static int spurious_kernel_fault_check(unsigned long error_code, pte_t *pte)
{
if ((error_code & X86_PF_WRITE) && !pte_write(*pte))
@@ -1209,6 +1178,9 @@ do_kern_addr_fault(struct pt_regs *regs, unsigned long hw_error_code,
}
#endif
+ if (is_f00f_bug(regs, hw_error_code, address))
+ return;
+
/* Was the fault spurious, caused by lazy TLB invalidation? */
if (spurious_kernel_fault(hw_error_code, address))
return;
@@ -1229,10 +1201,17 @@ do_kern_addr_fault(struct pt_regs *regs, unsigned long hw_error_code,
}
NOKPROBE_SYMBOL(do_kern_addr_fault);
-/* Handle faults in the user portion of the address space */
+/*
+ * Handle faults in the user portion of the address space. Nothing in here
+ * should check X86_PF_USER without a specific justification: for almost
+ * all purposes, we should treat a normal kernel access to user memory
+ * (e.g. get_user(), put_user(), etc.) the same as the WRUSS instruction.
+ * The one exception is AC flag handling, which is, per the x86
+ * architecture, special for WRUSS.
+ */
static inline
void do_user_addr_fault(struct pt_regs *regs,
- unsigned long hw_error_code,
+ unsigned long error_code,
unsigned long address)
{
struct vm_area_struct *vma;
@@ -1244,6 +1223,21 @@ void do_user_addr_fault(struct pt_regs *regs,
tsk = current;
mm = tsk->mm;
+ if (unlikely((error_code & (X86_PF_USER | X86_PF_INSTR)) == X86_PF_INSTR)) {
+ /*
+ * Whoops, this is kernel mode code trying to execute from
+ * user memory. Unless this is AMD erratum #93, which
+ * corrupts RIP such that it looks like a user address,
+ * this is unrecoverable. Don't even try to look up the
+ * VMA or look for extable entries.
+ */
+ if (is_errata93(regs, address))
+ return;
+
+ page_fault_oops(regs, error_code, address);
+ return;
+ }
+
/* kprobes don't want to hook the spurious faults: */
if (unlikely(kprobe_page_fault(regs, X86_TRAP_PF)))
return;
@@ -1252,8 +1246,8 @@ void do_user_addr_fault(struct pt_regs *regs,
* Reserved bits are never expected to be set on
* entries in the user portion of the page tables.
*/
- if (unlikely(hw_error_code & X86_PF_RSVD))
- pgtable_bad(regs, hw_error_code, address);
+ if (unlikely(error_code & X86_PF_RSVD))
+ pgtable_bad(regs, error_code, address);
/*
* If SMAP is on, check for invalid kernel (supervisor) access to user
@@ -1263,10 +1257,13 @@ void do_user_addr_fault(struct pt_regs *regs,
* enforcement appears to be consistent with the USER bit.
*/
if (unlikely(cpu_feature_enabled(X86_FEATURE_SMAP) &&
- !(hw_error_code & X86_PF_USER) &&
- !(regs->flags & X86_EFLAGS_AC)))
- {
- bad_area_nosemaphore(regs, hw_error_code, address);
+ !(error_code & X86_PF_USER) &&
+ !(regs->flags & X86_EFLAGS_AC))) {
+ /*
+ * No extable entry here. This was a kernel access to an
+ * invalid pointer. get_kernel_nofault() will not get here.
+ */
+ page_fault_oops(regs, error_code, address);
return;
}
@@ -1275,7 +1272,7 @@ void do_user_addr_fault(struct pt_regs *regs,
* in a region with pagefaults disabled then we must not take the fault
*/
if (unlikely(faulthandler_disabled() || !mm)) {
- bad_area_nosemaphore(regs, hw_error_code, address);
+ bad_area_nosemaphore(regs, error_code, address);
return;
}
@@ -1296,9 +1293,9 @@ void do_user_addr_fault(struct pt_regs *regs,
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
- if (hw_error_code & X86_PF_WRITE)
+ if (error_code & X86_PF_WRITE)
flags |= FAULT_FLAG_WRITE;
- if (hw_error_code & X86_PF_INSTR)
+ if (error_code & X86_PF_INSTR)
flags |= FAULT_FLAG_INSTRUCTION;
#ifdef CONFIG_X86_64
@@ -1314,7 +1311,7 @@ void do_user_addr_fault(struct pt_regs *regs,
* to consider the PF_PK bit.
*/
if (is_vsyscall_vaddr(address)) {
- if (emulate_vsyscall(hw_error_code, regs, address))
+ if (emulate_vsyscall(error_code, regs, address))
return;
}
#endif
@@ -1337,7 +1334,7 @@ void do_user_addr_fault(struct pt_regs *regs,
* Fault from code in kernel from
* which we do not expect faults.
*/
- bad_area_nosemaphore(regs, hw_error_code, address);
+ bad_area_nosemaphore(regs, error_code, address);
return;
}
retry:
@@ -1353,17 +1350,17 @@ retry:
vma = find_vma(mm, address);
if (unlikely(!vma)) {
- bad_area(regs, hw_error_code, address);
+ bad_area(regs, error_code, address);
return;
}
if (likely(vma->vm_start <= address))
goto good_area;
if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
- bad_area(regs, hw_error_code, address);
+ bad_area(regs, error_code, address);
return;
}
if (unlikely(expand_stack(vma, address))) {
- bad_area(regs, hw_error_code, address);
+ bad_area(regs, error_code, address);
return;
}
@@ -1372,8 +1369,8 @@ retry:
* we can handle it..
*/
good_area:
- if (unlikely(access_error(hw_error_code, vma))) {
- bad_area_access_error(regs, hw_error_code, address, vma);
+ if (unlikely(access_error(error_code, vma))) {
+ bad_area_access_error(regs, error_code, address, vma);
return;
}
@@ -1392,11 +1389,14 @@ good_area:
*/
fault = handle_mm_fault(vma, address, flags, regs);
- /* Quick path to respond to signals */
if (fault_signal_pending(fault, regs)) {
+ /*
+ * Quick path to respond to signals. The core mm code
+ * has unlocked the mm for us if we get here.
+ */
if (!user_mode(regs))
- no_context(regs, hw_error_code, address, SIGBUS,
- BUS_ADRERR);
+ kernelmode_fixup_or_oops(regs, error_code, address,
+ SIGBUS, BUS_ADRERR);
return;
}
@@ -1412,12 +1412,37 @@ good_area:
}
mmap_read_unlock(mm);
- if (unlikely(fault & VM_FAULT_ERROR)) {
- mm_fault_error(regs, hw_error_code, address, fault);
+ if (likely(!(fault & VM_FAULT_ERROR)))
+ return;
+
+ if (fatal_signal_pending(current) && !user_mode(regs)) {
+ kernelmode_fixup_or_oops(regs, error_code, address, 0, 0);
return;
}
- check_v8086_mode(regs, address, tsk);
+ if (fault & VM_FAULT_OOM) {
+ /* Kernel mode? Handle exceptions or die: */
+ if (!user_mode(regs)) {
+ kernelmode_fixup_or_oops(regs, error_code, address,
+ SIGSEGV, SEGV_MAPERR);
+ return;
+ }
+
+ /*
+ * We ran out of memory, call the OOM killer, and return the
+ * userspace (which will retry the fault, or kill us if we got
+ * oom-killed):
+ */
+ pagefault_out_of_memory();
+ } else {
+ if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
+ VM_FAULT_HWPOISON_LARGE))
+ do_sigbus(regs, error_code, address, fault);
+ else if (fault & VM_FAULT_SIGSEGV)
+ bad_area_nosemaphore(regs, error_code, address);
+ else
+ BUG();
+ }
}
NOKPROBE_SYMBOL(do_user_addr_fault);
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index e26f5c5c6565..dd694fb93916 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -157,16 +157,25 @@ __ref void *alloc_low_pages(unsigned int num)
}
/*
- * By default need 3 4k for initial PMD_SIZE, 3 4k for 0-ISA_END_ADDRESS.
- * With KASLR memory randomization, depending on the machine e820 memory
- * and the PUD alignment. We may need twice more pages when KASLR memory
+ * By default need to be able to allocate page tables below PGD firstly for
+ * the 0-ISA_END_ADDRESS range and secondly for the initial PMD_SIZE mapping.
+ * With KASLR memory randomization, depending on the machine e820 memory and the
+ * PUD alignment, twice that many pages may be needed when KASLR memory
* randomization is enabled.
*/
+
+#ifndef CONFIG_X86_5LEVEL
+#define INIT_PGD_PAGE_TABLES 3
+#else
+#define INIT_PGD_PAGE_TABLES 4
+#endif
+
#ifndef CONFIG_RANDOMIZE_MEMORY
-#define INIT_PGD_PAGE_COUNT 6
+#define INIT_PGD_PAGE_COUNT (2 * INIT_PGD_PAGE_TABLES)
#else
-#define INIT_PGD_PAGE_COUNT 12
+#define INIT_PGD_PAGE_COUNT (4 * INIT_PGD_PAGE_TABLES)
#endif
+
#define INIT_PGT_BUF_SIZE (INIT_PGD_PAGE_COUNT * PAGE_SIZE)
RESERVE_BRK(early_pgt_alloc, INIT_PGT_BUF_SIZE);
void __init early_alloc_pgt_buf(void)
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index c79e5736ab2b..4b01f7dbaf30 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -382,6 +382,7 @@ bool sev_active(void)
{
return sev_status & MSR_AMD64_SEV_ENABLED;
}
+EXPORT_SYMBOL_GPL(sev_active);
/* Needs to be called from non-instrumentable code */
bool noinstr sev_es_active(void)
@@ -474,9 +475,10 @@ void __init mem_encrypt_init(void)
swiotlb_update_mem_attributes();
/*
- * With SEV, we need to unroll the rep string I/O instructions.
+ * With SEV, we need to unroll the rep string I/O instructions,
+ * but SEV-ES supports them through the #VC handler.
*/
- if (sev_active())
+ if (sev_active() && !sev_es_active())
static_branch_enable(&sev_enable_key);
print_mem_encrypt_feature_info();
diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
index bd7aff5c51f7..cd768dafca9e 100644
--- a/arch/x86/mm/mmio-mod.c
+++ b/arch/x86/mm/mmio-mod.c
@@ -10,8 +10,6 @@
#define pr_fmt(fmt) "mmiotrace: " fmt
-#define DEBUG 1
-
#include <linux/moduleparam.h>
#include <linux/debugfs.h>
#include <linux/slab.h>
diff --git a/arch/x86/mm/pat/memtype.c b/arch/x86/mm/pat/memtype.c
index 8f665c352bf0..ca311aaa67b8 100644
--- a/arch/x86/mm/pat/memtype.c
+++ b/arch/x86/mm/pat/memtype.c
@@ -1164,12 +1164,14 @@ static void *memtype_seq_start(struct seq_file *seq, loff_t *pos)
static void *memtype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
+ kfree(v);
++*pos;
return memtype_get_idx(*pos);
}
static void memtype_seq_stop(struct seq_file *seq, void *v)
{
+ kfree(v);
}
static int memtype_seq_show(struct seq_file *seq, void *v)
@@ -1181,8 +1183,6 @@ static int memtype_seq_show(struct seq_file *seq, void *v)
entry_print->end,
cattr_name(entry_print->type));
- kfree(entry_print);
-
return 0;
}
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 796506dcfc42..79e7a0ec1da5 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -205,6 +205,18 @@ static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3);
}
+/* Some 1-byte opcodes for binary ALU operations */
+static u8 simple_alu_opcodes[] = {
+ [BPF_ADD] = 0x01,
+ [BPF_SUB] = 0x29,
+ [BPF_AND] = 0x21,
+ [BPF_OR] = 0x09,
+ [BPF_XOR] = 0x31,
+ [BPF_LSH] = 0xE0,
+ [BPF_RSH] = 0xE8,
+ [BPF_ARSH] = 0xF8,
+};
+
static void jit_fill_hole(void *area, unsigned int size)
{
/* Fill whole space with INT3 instructions */
@@ -681,6 +693,42 @@ static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg)
*pprog = prog;
}
+/* Emit the suffix (ModR/M etc) for addressing *(ptr_reg + off) and val_reg */
+static void emit_insn_suffix(u8 **pprog, u32 ptr_reg, u32 val_reg, int off)
+{
+ u8 *prog = *pprog;
+ int cnt = 0;
+
+ if (is_imm8(off)) {
+ /* 1-byte signed displacement.
+ *
+ * If off == 0 we could skip this and save one extra byte, but
+ * special case of x86 R13 which always needs an offset is not
+ * worth the hassle
+ */
+ EMIT2(add_2reg(0x40, ptr_reg, val_reg), off);
+ } else {
+ /* 4-byte signed displacement */
+ EMIT1_off32(add_2reg(0x80, ptr_reg, val_reg), off);
+ }
+ *pprog = prog;
+}
+
+/*
+ * Emit a REX byte if it will be necessary to address these registers
+ */
+static void maybe_emit_mod(u8 **pprog, u32 dst_reg, u32 src_reg, bool is64)
+{
+ u8 *prog = *pprog;
+ int cnt = 0;
+
+ if (is64)
+ EMIT1(add_2mod(0x48, dst_reg, src_reg));
+ else if (is_ereg(dst_reg) || is_ereg(src_reg))
+ EMIT1(add_2mod(0x40, dst_reg, src_reg));
+ *pprog = prog;
+}
+
/* LDX: dst_reg = *(u8*)(src_reg + off) */
static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
{
@@ -708,15 +756,7 @@ static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B);
break;
}
- /*
- * If insn->off == 0 we can save one extra byte, but
- * special case of x86 R13 which always needs an offset
- * is not worth the hassle
- */
- if (is_imm8(off))
- EMIT2(add_2reg(0x40, src_reg, dst_reg), off);
- else
- EMIT1_off32(add_2reg(0x80, src_reg, dst_reg), off);
+ emit_insn_suffix(&prog, src_reg, dst_reg, off);
*pprog = prog;
}
@@ -751,11 +791,51 @@ static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89);
break;
}
- if (is_imm8(off))
- EMIT2(add_2reg(0x40, dst_reg, src_reg), off);
- else
- EMIT1_off32(add_2reg(0x80, dst_reg, src_reg), off);
+ emit_insn_suffix(&prog, dst_reg, src_reg, off);
+ *pprog = prog;
+}
+
+static int emit_atomic(u8 **pprog, u8 atomic_op,
+ u32 dst_reg, u32 src_reg, s16 off, u8 bpf_size)
+{
+ u8 *prog = *pprog;
+ int cnt = 0;
+
+ EMIT1(0xF0); /* lock prefix */
+
+ maybe_emit_mod(&prog, dst_reg, src_reg, bpf_size == BPF_DW);
+
+ /* emit opcode */
+ switch (atomic_op) {
+ case BPF_ADD:
+ case BPF_SUB:
+ case BPF_AND:
+ case BPF_OR:
+ case BPF_XOR:
+ /* lock *(u32/u64*)(dst_reg + off) <op>= src_reg */
+ EMIT1(simple_alu_opcodes[atomic_op]);
+ break;
+ case BPF_ADD | BPF_FETCH:
+ /* src_reg = atomic_fetch_add(dst_reg + off, src_reg); */
+ EMIT2(0x0F, 0xC1);
+ break;
+ case BPF_XCHG:
+ /* src_reg = atomic_xchg(dst_reg + off, src_reg); */
+ EMIT1(0x87);
+ break;
+ case BPF_CMPXCHG:
+ /* r0 = atomic_cmpxchg(dst_reg + off, r0, src_reg); */
+ EMIT2(0x0F, 0xB1);
+ break;
+ default:
+ pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op);
+ return -EFAULT;
+ }
+
+ emit_insn_suffix(&prog, dst_reg, src_reg, off);
+
*pprog = prog;
+ return 0;
}
static bool ex_handler_bpf(const struct exception_table_entry *x,
@@ -789,8 +869,31 @@ static void detect_reg_usage(struct bpf_insn *insn, int insn_cnt,
}
}
+static int emit_nops(u8 **pprog, int len)
+{
+ u8 *prog = *pprog;
+ int i, noplen, cnt = 0;
+
+ while (len > 0) {
+ noplen = len;
+
+ if (noplen > ASM_NOP_MAX)
+ noplen = ASM_NOP_MAX;
+
+ for (i = 0; i < noplen; i++)
+ EMIT1(ideal_nops[noplen][i]);
+ len -= noplen;
+ }
+
+ *pprog = prog;
+
+ return cnt;
+}
+
+#define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp)))
+
static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
- int oldproglen, struct jit_context *ctx)
+ int oldproglen, struct jit_context *ctx, bool jmp_padding)
{
bool tail_call_reachable = bpf_prog->aux->tail_call_reachable;
struct bpf_insn *insn = bpf_prog->insnsi;
@@ -800,8 +903,9 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
bool seen_exit = false;
u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
int i, cnt = 0, excnt = 0;
- int proglen = 0;
+ int ilen, proglen = 0;
u8 *prog = temp;
+ int err;
detect_reg_usage(insn, insn_cnt, callee_regs_used,
&tail_call_seen);
@@ -813,17 +917,24 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
bpf_prog_was_classic(bpf_prog), tail_call_reachable,
bpf_prog->aux->func_idx != 0);
push_callee_regs(&prog, callee_regs_used);
- addrs[0] = prog - temp;
+
+ ilen = prog - temp;
+ if (image)
+ memcpy(image + proglen, temp, ilen);
+ proglen += ilen;
+ addrs[0] = proglen;
+ prog = temp;
for (i = 1; i <= insn_cnt; i++, insn++) {
const s32 imm32 = insn->imm;
u32 dst_reg = insn->dst_reg;
u32 src_reg = insn->src_reg;
u8 b2 = 0, b3 = 0;
+ u8 *start_of_ldx;
s64 jmp_offset;
u8 jmp_cond;
- int ilen;
u8 *func;
+ int nops;
switch (insn->code) {
/* ALU */
@@ -837,17 +948,9 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
case BPF_ALU64 | BPF_AND | BPF_X:
case BPF_ALU64 | BPF_OR | BPF_X:
case BPF_ALU64 | BPF_XOR | BPF_X:
- switch (BPF_OP(insn->code)) {
- case BPF_ADD: b2 = 0x01; break;
- case BPF_SUB: b2 = 0x29; break;
- case BPF_AND: b2 = 0x21; break;
- case BPF_OR: b2 = 0x09; break;
- case BPF_XOR: b2 = 0x31; break;
- }
- if (BPF_CLASS(insn->code) == BPF_ALU64)
- EMIT1(add_2mod(0x48, dst_reg, src_reg));
- else if (is_ereg(dst_reg) || is_ereg(src_reg))
- EMIT1(add_2mod(0x40, dst_reg, src_reg));
+ maybe_emit_mod(&prog, dst_reg, src_reg,
+ BPF_CLASS(insn->code) == BPF_ALU64);
+ b2 = simple_alu_opcodes[BPF_OP(insn->code)];
EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg));
break;
@@ -1027,12 +1130,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
else if (is_ereg(dst_reg))
EMIT1(add_1mod(0x40, dst_reg));
- switch (BPF_OP(insn->code)) {
- case BPF_LSH: b3 = 0xE0; break;
- case BPF_RSH: b3 = 0xE8; break;
- case BPF_ARSH: b3 = 0xF8; break;
- }
-
+ b3 = simple_alu_opcodes[BPF_OP(insn->code)];
if (imm32 == 1)
EMIT2(0xD1, add_1reg(b3, dst_reg));
else
@@ -1066,11 +1164,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
else if (is_ereg(dst_reg))
EMIT1(add_1mod(0x40, dst_reg));
- switch (BPF_OP(insn->code)) {
- case BPF_LSH: b3 = 0xE0; break;
- case BPF_RSH: b3 = 0xE8; break;
- case BPF_ARSH: b3 = 0xF8; break;
- }
+ b3 = simple_alu_opcodes[BPF_OP(insn->code)];
EMIT2(0xD3, add_1reg(b3, dst_reg));
if (src_reg != BPF_REG_4)
@@ -1185,12 +1279,30 @@ st: if (is_imm8(insn->off))
case BPF_LDX | BPF_PROBE_MEM | BPF_W:
case BPF_LDX | BPF_MEM | BPF_DW:
case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
+ if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
+ /* test src_reg, src_reg */
+ maybe_emit_mod(&prog, src_reg, src_reg, true); /* always 1 byte */
+ EMIT2(0x85, add_2reg(0xC0, src_reg, src_reg));
+ /* jne start_of_ldx */
+ EMIT2(X86_JNE, 0);
+ /* xor dst_reg, dst_reg */
+ emit_mov_imm32(&prog, false, dst_reg, 0);
+ /* jmp byte_after_ldx */
+ EMIT2(0xEB, 0);
+
+ /* populate jmp_offset for JNE above */
+ temp[4] = prog - temp - 5 /* sizeof(test + jne) */;
+ start_of_ldx = prog;
+ }
emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
struct exception_table_entry *ex;
u8 *_insn = image + proglen;
s64 delta;
+ /* populate jmp_offset for JMP above */
+ start_of_ldx[-1] = prog - start_of_ldx;
+
if (!bpf_prog->aux->extable)
break;
@@ -1230,21 +1342,56 @@ st: if (is_imm8(insn->off))
}
break;
- /* STX XADD: lock *(u32*)(dst_reg + off) += src_reg */
- case BPF_STX | BPF_XADD | BPF_W:
- /* Emit 'lock add dword ptr [rax + off], eax' */
- if (is_ereg(dst_reg) || is_ereg(src_reg))
- EMIT3(0xF0, add_2mod(0x40, dst_reg, src_reg), 0x01);
- else
- EMIT2(0xF0, 0x01);
- goto xadd;
- case BPF_STX | BPF_XADD | BPF_DW:
- EMIT3(0xF0, add_2mod(0x48, dst_reg, src_reg), 0x01);
-xadd: if (is_imm8(insn->off))
- EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off);
- else
- EMIT1_off32(add_2reg(0x80, dst_reg, src_reg),
- insn->off);
+ case BPF_STX | BPF_ATOMIC | BPF_W:
+ case BPF_STX | BPF_ATOMIC | BPF_DW:
+ if (insn->imm == (BPF_AND | BPF_FETCH) ||
+ insn->imm == (BPF_OR | BPF_FETCH) ||
+ insn->imm == (BPF_XOR | BPF_FETCH)) {
+ u8 *branch_target;
+ bool is64 = BPF_SIZE(insn->code) == BPF_DW;
+
+ /*
+ * Can't be implemented with a single x86 insn.
+ * Need to do a CMPXCHG loop.
+ */
+
+ /* Will need RAX as a CMPXCHG operand so save R0 */
+ emit_mov_reg(&prog, true, BPF_REG_AX, BPF_REG_0);
+ branch_target = prog;
+ /* Load old value */
+ emit_ldx(&prog, BPF_SIZE(insn->code),
+ BPF_REG_0, dst_reg, insn->off);
+ /*
+ * Perform the (commutative) operation locally,
+ * put the result in the AUX_REG.
+ */
+ emit_mov_reg(&prog, is64, AUX_REG, BPF_REG_0);
+ maybe_emit_mod(&prog, AUX_REG, src_reg, is64);
+ EMIT2(simple_alu_opcodes[BPF_OP(insn->imm)],
+ add_2reg(0xC0, AUX_REG, src_reg));
+ /* Attempt to swap in new value */
+ err = emit_atomic(&prog, BPF_CMPXCHG,
+ dst_reg, AUX_REG, insn->off,
+ BPF_SIZE(insn->code));
+ if (WARN_ON(err))
+ return err;
+ /*
+ * ZF tells us whether we won the race. If it's
+ * cleared we need to try again.
+ */
+ EMIT2(X86_JNE, -(prog - branch_target) - 2);
+ /* Return the pre-modification value */
+ emit_mov_reg(&prog, is64, src_reg, BPF_REG_0);
+ /* Restore R0 after clobbering RAX */
+ emit_mov_reg(&prog, true, BPF_REG_0, BPF_REG_AX);
+ break;
+
+ }
+
+ err = emit_atomic(&prog, insn->imm, dst_reg, src_reg,
+ insn->off, BPF_SIZE(insn->code));
+ if (err)
+ return err;
break;
/* call */
@@ -1295,20 +1442,16 @@ xadd: if (is_imm8(insn->off))
case BPF_JMP32 | BPF_JSGE | BPF_X:
case BPF_JMP32 | BPF_JSLE | BPF_X:
/* cmp dst_reg, src_reg */
- if (BPF_CLASS(insn->code) == BPF_JMP)
- EMIT1(add_2mod(0x48, dst_reg, src_reg));
- else if (is_ereg(dst_reg) || is_ereg(src_reg))
- EMIT1(add_2mod(0x40, dst_reg, src_reg));
+ maybe_emit_mod(&prog, dst_reg, src_reg,
+ BPF_CLASS(insn->code) == BPF_JMP);
EMIT2(0x39, add_2reg(0xC0, dst_reg, src_reg));
goto emit_cond_jmp;
case BPF_JMP | BPF_JSET | BPF_X:
case BPF_JMP32 | BPF_JSET | BPF_X:
/* test dst_reg, src_reg */
- if (BPF_CLASS(insn->code) == BPF_JMP)
- EMIT1(add_2mod(0x48, dst_reg, src_reg));
- else if (is_ereg(dst_reg) || is_ereg(src_reg))
- EMIT1(add_2mod(0x40, dst_reg, src_reg));
+ maybe_emit_mod(&prog, dst_reg, src_reg,
+ BPF_CLASS(insn->code) == BPF_JMP);
EMIT2(0x85, add_2reg(0xC0, dst_reg, src_reg));
goto emit_cond_jmp;
@@ -1344,10 +1487,8 @@ xadd: if (is_imm8(insn->off))
case BPF_JMP32 | BPF_JSLE | BPF_K:
/* test dst_reg, dst_reg to save one extra byte */
if (imm32 == 0) {
- if (BPF_CLASS(insn->code) == BPF_JMP)
- EMIT1(add_2mod(0x48, dst_reg, dst_reg));
- else if (is_ereg(dst_reg))
- EMIT1(add_2mod(0x40, dst_reg, dst_reg));
+ maybe_emit_mod(&prog, dst_reg, dst_reg,
+ BPF_CLASS(insn->code) == BPF_JMP);
EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg));
goto emit_cond_jmp;
}
@@ -1409,6 +1550,30 @@ emit_cond_jmp: /* Convert BPF opcode to x86 */
}
jmp_offset = addrs[i + insn->off] - addrs[i];
if (is_imm8(jmp_offset)) {
+ if (jmp_padding) {
+ /* To keep the jmp_offset valid, the extra bytes are
+ * padded before the jump insn, so we substract the
+ * 2 bytes of jmp_cond insn from INSN_SZ_DIFF.
+ *
+ * If the previous pass already emits an imm8
+ * jmp_cond, then this BPF insn won't shrink, so
+ * "nops" is 0.
+ *
+ * On the other hand, if the previous pass emits an
+ * imm32 jmp_cond, the extra 4 bytes(*) is padded to
+ * keep the image from shrinking further.
+ *
+ * (*) imm32 jmp_cond is 6 bytes, and imm8 jmp_cond
+ * is 2 bytes, so the size difference is 4 bytes.
+ */
+ nops = INSN_SZ_DIFF - 2;
+ if (nops != 0 && nops != 4) {
+ pr_err("unexpected jmp_cond padding: %d bytes\n",
+ nops);
+ return -EFAULT;
+ }
+ cnt += emit_nops(&prog, nops);
+ }
EMIT2(jmp_cond, jmp_offset);
} else if (is_simm32(jmp_offset)) {
EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
@@ -1431,11 +1596,55 @@ emit_cond_jmp: /* Convert BPF opcode to x86 */
else
jmp_offset = addrs[i + insn->off] - addrs[i];
- if (!jmp_offset)
- /* Optimize out nop jumps */
+ if (!jmp_offset) {
+ /*
+ * If jmp_padding is enabled, the extra nops will
+ * be inserted. Otherwise, optimize out nop jumps.
+ */
+ if (jmp_padding) {
+ /* There are 3 possible conditions.
+ * (1) This BPF_JA is already optimized out in
+ * the previous run, so there is no need
+ * to pad any extra byte (0 byte).
+ * (2) The previous pass emits an imm8 jmp,
+ * so we pad 2 bytes to match the previous
+ * insn size.
+ * (3) Similarly, the previous pass emits an
+ * imm32 jmp, and 5 bytes is padded.
+ */
+ nops = INSN_SZ_DIFF;
+ if (nops != 0 && nops != 2 && nops != 5) {
+ pr_err("unexpected nop jump padding: %d bytes\n",
+ nops);
+ return -EFAULT;
+ }
+ cnt += emit_nops(&prog, nops);
+ }
break;
+ }
emit_jmp:
if (is_imm8(jmp_offset)) {
+ if (jmp_padding) {
+ /* To avoid breaking jmp_offset, the extra bytes
+ * are padded before the actual jmp insn, so
+ * 2 bytes is substracted from INSN_SZ_DIFF.
+ *
+ * If the previous pass already emits an imm8
+ * jmp, there is nothing to pad (0 byte).
+ *
+ * If it emits an imm32 jmp (5 bytes) previously
+ * and now an imm8 jmp (2 bytes), then we pad
+ * (5 - 2 = 3) bytes to stop the image from
+ * shrinking further.
+ */
+ nops = INSN_SZ_DIFF - 2;
+ if (nops != 0 && nops != 3) {
+ pr_err("unexpected jump padding: %d bytes\n",
+ nops);
+ return -EFAULT;
+ }
+ cnt += emit_nops(&prog, INSN_SZ_DIFF - 2);
+ }
EMIT2(0xEB, jmp_offset);
} else if (is_simm32(jmp_offset)) {
EMIT1_off32(0xE9, jmp_offset);
@@ -1531,17 +1740,25 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
struct bpf_prog *p, int stack_size, bool mod_ret)
{
u8 *prog = *pprog;
+ u8 *jmp_insn;
int cnt = 0;
- if (p->aux->sleepable) {
- if (emit_call(&prog, __bpf_prog_enter_sleepable, prog))
- return -EINVAL;
- } else {
- if (emit_call(&prog, __bpf_prog_enter, prog))
+ /* arg1: mov rdi, progs[i] */
+ emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
+ if (emit_call(&prog,
+ p->aux->sleepable ? __bpf_prog_enter_sleepable :
+ __bpf_prog_enter, prog))
return -EINVAL;
- /* remember prog start time returned by __bpf_prog_enter */
- emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0);
- }
+ /* remember prog start time returned by __bpf_prog_enter */
+ emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0);
+
+ /* if (__bpf_prog_enter*(prog) == 0)
+ * goto skip_exec_of_prog;
+ */
+ EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */
+ /* emit 2 nops that will be replaced with JE insn */
+ jmp_insn = prog;
+ emit_nops(&prog, 2);
/* arg1: lea rdi, [rbp - stack_size] */
EMIT4(0x48, 0x8D, 0x7D, -stack_size);
@@ -1561,43 +1778,23 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
if (mod_ret)
emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
- if (p->aux->sleepable) {
- if (emit_call(&prog, __bpf_prog_exit_sleepable, prog))
+ /* replace 2 nops with JE insn, since jmp target is known */
+ jmp_insn[0] = X86_JE;
+ jmp_insn[1] = prog - jmp_insn - 2;
+
+ /* arg1: mov rdi, progs[i] */
+ emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
+ /* arg2: mov rsi, rbx <- start time in nsec */
+ emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6);
+ if (emit_call(&prog,
+ p->aux->sleepable ? __bpf_prog_exit_sleepable :
+ __bpf_prog_exit, prog))
return -EINVAL;
- } else {
- /* arg1: mov rdi, progs[i] */
- emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32,
- (u32) (long) p);
- /* arg2: mov rsi, rbx <- start time in nsec */
- emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6);
- if (emit_call(&prog, __bpf_prog_exit, prog))
- return -EINVAL;
- }
*pprog = prog;
return 0;
}
-static void emit_nops(u8 **pprog, unsigned int len)
-{
- unsigned int i, noplen;
- u8 *prog = *pprog;
- int cnt = 0;
-
- while (len > 0) {
- noplen = len;
-
- if (noplen > ASM_NOP_MAX)
- noplen = ASM_NOP_MAX;
-
- for (i = 0; i < noplen; i++)
- EMIT1(ideal_nops[noplen][i]);
- len -= noplen;
- }
-
- *pprog = prog;
-}
-
static void emit_align(u8 **pprog, u32 align)
{
u8 *target, *prog = *pprog;
@@ -1972,6 +2169,9 @@ struct x64_jit_data {
struct jit_context ctx;
};
+#define MAX_PASSES 20
+#define PADDING_PASSES (MAX_PASSES - 5)
+
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
{
struct bpf_binary_header *header = NULL;
@@ -1981,6 +2181,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
struct jit_context ctx = {};
bool tmp_blinded = false;
bool extra_pass = false;
+ bool padding = false;
u8 *image = NULL;
int *addrs;
int pass;
@@ -2017,6 +2218,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
image = jit_data->image;
header = jit_data->header;
extra_pass = true;
+ padding = true;
goto skip_init_addrs;
}
addrs = kmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL);
@@ -2042,8 +2244,10 @@ skip_init_addrs:
* may converge on the last pass. In such case do one more
* pass to emit the final image.
*/
- for (pass = 0; pass < 20 || image; pass++) {
- proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
+ for (pass = 0; pass < MAX_PASSES || image; pass++) {
+ if (!padding && pass >= PADDING_PASSES)
+ padding = true;
+ proglen = do_jit(prog, addrs, image, oldproglen, &ctx, padding);
if (proglen <= 0) {
out_image:
image = NULL;
diff --git a/arch/x86/net/bpf_jit_comp32.c b/arch/x86/net/bpf_jit_comp32.c
index 96fde03aa987..d17b67c69f89 100644
--- a/arch/x86/net/bpf_jit_comp32.c
+++ b/arch/x86/net/bpf_jit_comp32.c
@@ -2243,10 +2243,8 @@ emit_jmp:
return -EFAULT;
}
break;
- /* STX XADD: lock *(u32 *)(dst + off) += src */
- case BPF_STX | BPF_XADD | BPF_W:
- /* STX XADD: lock *(u64 *)(dst + off) += src */
- case BPF_STX | BPF_XADD | BPF_DW:
+ case BPF_STX | BPF_ATOMIC | BPF_W:
+ case BPF_STX | BPF_ATOMIC | BPF_DW:
goto notyet;
case BPF_JMP | BPF_EXIT:
if (seen_exit) {
diff --git a/arch/x86/oprofile/Makefile b/arch/x86/oprofile/Makefile
deleted file mode 100644
index 4d49b5a27025..000000000000
--- a/arch/x86/oprofile/Makefile
+++ /dev/null
@@ -1,12 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_OPROFILE) += oprofile.o
-
-DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
- oprof.o cpu_buffer.o buffer_sync.o \
- event_buffer.o oprofile_files.o \
- oprofilefs.o oprofile_stats.o \
- timer_int.o nmi_timer_int.o )
-
-oprofile-y := $(DRIVER_OBJS) init.o backtrace.o
-oprofile-$(CONFIG_X86_LOCAL_APIC) += nmi_int.o op_model_amd.o \
- op_model_ppro.o op_model_p4.o
diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
deleted file mode 100644
index 1d8391fcca68..000000000000
--- a/arch/x86/oprofile/backtrace.c
+++ /dev/null
@@ -1,127 +0,0 @@
-/**
- * @file backtrace.c
- *
- * @remark Copyright 2002 OProfile authors
- * @remark Read the file COPYING
- *
- * @author John Levon
- * @author David Smith
- */
-
-#include <linux/oprofile.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/compat.h>
-#include <linux/uaccess.h>
-
-#include <asm/ptrace.h>
-#include <asm/stacktrace.h>
-#include <asm/unwind.h>
-
-#ifdef CONFIG_COMPAT
-static struct stack_frame_ia32 *
-dump_user_backtrace_32(struct stack_frame_ia32 *head)
-{
- /* Also check accessibility of one struct frame_head beyond: */
- struct stack_frame_ia32 bufhead[2];
- struct stack_frame_ia32 *fp;
- unsigned long bytes;
-
- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
- if (bytes != 0)
- return NULL;
-
- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
-
- oprofile_add_trace(bufhead[0].return_address);
-
- /* frame pointers should strictly progress back up the stack
- * (towards higher addresses) */
- if (head >= fp)
- return NULL;
-
- return fp;
-}
-
-static inline int
-x86_backtrace_32(struct pt_regs * const regs, unsigned int depth)
-{
- struct stack_frame_ia32 *head;
-
- /* User process is IA32 */
- if (!current || user_64bit_mode(regs))
- return 0;
-
- head = (struct stack_frame_ia32 *) regs->bp;
- while (depth-- && head)
- head = dump_user_backtrace_32(head);
-
- return 1;
-}
-
-#else
-static inline int
-x86_backtrace_32(struct pt_regs * const regs, unsigned int depth)
-{
- return 0;
-}
-#endif /* CONFIG_COMPAT */
-
-static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
-{
- /* Also check accessibility of one struct frame_head beyond: */
- struct stack_frame bufhead[2];
- unsigned long bytes;
-
- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
- if (bytes != 0)
- return NULL;
-
- oprofile_add_trace(bufhead[0].return_address);
-
- /* frame pointers should strictly progress back up the stack
- * (towards higher addresses) */
- if (head >= bufhead[0].next_frame)
- return NULL;
-
- return bufhead[0].next_frame;
-}
-
-void
-x86_backtrace(struct pt_regs * const regs, unsigned int depth)
-{
- struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
-
- if (!user_mode(regs)) {
- struct unwind_state state;
- unsigned long addr;
-
- if (!depth)
- return;
-
- oprofile_add_trace(regs->ip);
-
- if (!--depth)
- return;
-
- for (unwind_start(&state, current, regs, NULL);
- !unwind_done(&state); unwind_next_frame(&state)) {
- addr = unwind_get_return_address(&state);
- if (!addr)
- break;
-
- oprofile_add_trace(addr);
-
- if (!--depth)
- break;
- }
-
- return;
- }
-
- if (x86_backtrace_32(regs, depth))
- return;
-
- while (depth-- && head)
- head = dump_user_backtrace(head);
-}
diff --git a/arch/x86/oprofile/init.c b/arch/x86/oprofile/init.c
deleted file mode 100644
index 9e138d00ad36..000000000000
--- a/arch/x86/oprofile/init.c
+++ /dev/null
@@ -1,38 +0,0 @@
-/**
- * @file init.c
- *
- * @remark Copyright 2002 OProfile authors
- * @remark Read the file COPYING
- *
- * @author John Levon <levon@movementarian.org>
- */
-
-#include <linux/oprofile.h>
-#include <linux/init.h>
-#include <linux/errno.h>
-
-/*
- * We support CPUs that have performance counters like the Pentium Pro
- * with the NMI mode driver.
- */
-
-#ifdef CONFIG_X86_LOCAL_APIC
-extern int op_nmi_init(struct oprofile_operations *ops);
-extern void op_nmi_exit(void);
-#else
-static int op_nmi_init(struct oprofile_operations *ops) { return -ENODEV; }
-static void op_nmi_exit(void) { }
-#endif
-
-extern void x86_backtrace(struct pt_regs * const regs, unsigned int depth);
-
-int __init oprofile_arch_init(struct oprofile_operations *ops)
-{
- ops->backtrace = x86_backtrace;
- return op_nmi_init(ops);
-}
-
-void oprofile_arch_exit(void)
-{
- op_nmi_exit();
-}
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
deleted file mode 100644
index a7a7677265b6..000000000000
--- a/arch/x86/oprofile/nmi_int.c
+++ /dev/null
@@ -1,780 +0,0 @@
-/**
- * @file nmi_int.c
- *
- * @remark Copyright 2002-2009 OProfile authors
- * @remark Read the file COPYING
- *
- * @author John Levon <levon@movementarian.org>
- * @author Robert Richter <robert.richter@amd.com>
- * @author Barry Kasindorf <barry.kasindorf@amd.com>
- * @author Jason Yeh <jason.yeh@amd.com>
- * @author Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
- */
-
-#include <linux/init.h>
-#include <linux/notifier.h>
-#include <linux/smp.h>
-#include <linux/oprofile.h>
-#include <linux/syscore_ops.h>
-#include <linux/slab.h>
-#include <linux/moduleparam.h>
-#include <linux/kdebug.h>
-#include <linux/cpu.h>
-#include <asm/nmi.h>
-#include <asm/msr.h>
-#include <asm/apic.h>
-
-#include "op_counter.h"
-#include "op_x86_model.h"
-
-static struct op_x86_model_spec *model;
-static DEFINE_PER_CPU(struct op_msrs, cpu_msrs);
-static DEFINE_PER_CPU(unsigned long, saved_lvtpc);
-
-/* must be protected with get_online_cpus()/put_online_cpus(): */
-static int nmi_enabled;
-static int ctr_running;
-
-struct op_counter_config counter_config[OP_MAX_COUNTER];
-
-/* common functions */
-
-u64 op_x86_get_ctrl(struct op_x86_model_spec const *model,
- struct op_counter_config *counter_config)
-{
- u64 val = 0;
- u16 event = (u16)counter_config->event;
-
- val |= ARCH_PERFMON_EVENTSEL_INT;
- val |= counter_config->user ? ARCH_PERFMON_EVENTSEL_USR : 0;
- val |= counter_config->kernel ? ARCH_PERFMON_EVENTSEL_OS : 0;
- val |= (counter_config->unit_mask & 0xFF) << 8;
- counter_config->extra &= (ARCH_PERFMON_EVENTSEL_INV |
- ARCH_PERFMON_EVENTSEL_EDGE |
- ARCH_PERFMON_EVENTSEL_CMASK);
- val |= counter_config->extra;
- event &= model->event_mask ? model->event_mask : 0xFF;
- val |= event & 0xFF;
- val |= (u64)(event & 0x0F00) << 24;
-
- return val;
-}
-
-
-static int profile_exceptions_notify(unsigned int val, struct pt_regs *regs)
-{
- if (ctr_running)
- model->check_ctrs(regs, this_cpu_ptr(&cpu_msrs));
- else if (!nmi_enabled)
- return NMI_DONE;
- else
- model->stop(this_cpu_ptr(&cpu_msrs));
- return NMI_HANDLED;
-}
-
-static void nmi_cpu_save_registers(struct op_msrs *msrs)
-{
- struct op_msr *counters = msrs->counters;
- struct op_msr *controls = msrs->controls;
- unsigned int i;
-
- for (i = 0; i < model->num_counters; ++i) {
- if (counters[i].addr)
- rdmsrl(counters[i].addr, counters[i].saved);
- }
-
- for (i = 0; i < model->num_controls; ++i) {
- if (controls[i].addr)
- rdmsrl(controls[i].addr, controls[i].saved);
- }
-}
-
-static void nmi_cpu_start(void *dummy)
-{
- struct op_msrs const *msrs = this_cpu_ptr(&cpu_msrs);
- if (!msrs->controls)
- WARN_ON_ONCE(1);
- else
- model->start(msrs);
-}
-
-static int nmi_start(void)
-{
- get_online_cpus();
- ctr_running = 1;
- /* make ctr_running visible to the nmi handler: */
- smp_mb();
- on_each_cpu(nmi_cpu_start, NULL, 1);
- put_online_cpus();
- return 0;
-}
-
-static void nmi_cpu_stop(void *dummy)
-{
- struct op_msrs const *msrs = this_cpu_ptr(&cpu_msrs);
- if (!msrs->controls)
- WARN_ON_ONCE(1);
- else
- model->stop(msrs);
-}
-
-static void nmi_stop(void)
-{
- get_online_cpus();
- on_each_cpu(nmi_cpu_stop, NULL, 1);
- ctr_running = 0;
- put_online_cpus();
-}
-
-#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
-
-static DEFINE_PER_CPU(int, switch_index);
-
-static inline int has_mux(void)
-{
- return !!model->switch_ctrl;
-}
-
-inline int op_x86_phys_to_virt(int phys)
-{
- return __this_cpu_read(switch_index) + phys;
-}
-
-inline int op_x86_virt_to_phys(int virt)
-{
- return virt % model->num_counters;
-}
-
-static void nmi_shutdown_mux(void)
-{
- int i;
-
- if (!has_mux())
- return;
-
- for_each_possible_cpu(i) {
- kfree(per_cpu(cpu_msrs, i).multiplex);
- per_cpu(cpu_msrs, i).multiplex = NULL;
- per_cpu(switch_index, i) = 0;
- }
-}
-
-static int nmi_setup_mux(void)
-{
- size_t multiplex_size =
- sizeof(struct op_msr) * model->num_virt_counters;
- int i;
-
- if (!has_mux())
- return 1;
-
- for_each_possible_cpu(i) {
- per_cpu(cpu_msrs, i).multiplex =
- kzalloc(multiplex_size, GFP_KERNEL);
- if (!per_cpu(cpu_msrs, i).multiplex)
- return 0;
- }
-
- return 1;
-}
-
-static void nmi_cpu_setup_mux(int cpu, struct op_msrs const * const msrs)
-{
- int i;
- struct op_msr *multiplex = msrs->multiplex;
-
- if (!has_mux())
- return;
-
- for (i = 0; i < model->num_virt_counters; ++i) {
- if (counter_config[i].enabled) {
- multiplex[i].saved = -(u64)counter_config[i].count;
- } else {
- multiplex[i].saved = 0;
- }
- }
-
- per_cpu(switch_index, cpu) = 0;
-}
-
-static void nmi_cpu_save_mpx_registers(struct op_msrs *msrs)
-{
- struct op_msr *counters = msrs->counters;
- struct op_msr *multiplex = msrs->multiplex;
- int i;
-
- for (i = 0; i < model->num_counters; ++i) {
- int virt = op_x86_phys_to_virt(i);
- if (counters[i].addr)
- rdmsrl(counters[i].addr, multiplex[virt].saved);
- }
-}
-
-static void nmi_cpu_restore_mpx_registers(struct op_msrs *msrs)
-{
- struct op_msr *counters = msrs->counters;
- struct op_msr *multiplex = msrs->multiplex;
- int i;
-
- for (i = 0; i < model->num_counters; ++i) {
- int virt = op_x86_phys_to_virt(i);
- if (counters[i].addr)
- wrmsrl(counters[i].addr, multiplex[virt].saved);
- }
-}
-
-static void nmi_cpu_switch(void *dummy)
-{
- int cpu = smp_processor_id();
- int si = per_cpu(switch_index, cpu);
- struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
-
- nmi_cpu_stop(NULL);
- nmi_cpu_save_mpx_registers(msrs);
-
- /* move to next set */
- si += model->num_counters;
- if ((si >= model->num_virt_counters) || (counter_config[si].count == 0))
- per_cpu(switch_index, cpu) = 0;
- else
- per_cpu(switch_index, cpu) = si;
-
- model->switch_ctrl(model, msrs);
- nmi_cpu_restore_mpx_registers(msrs);
-
- nmi_cpu_start(NULL);
-}
-
-
-/*
- * Quick check to see if multiplexing is necessary.
- * The check should be sufficient since counters are used
- * in ordre.
- */
-static int nmi_multiplex_on(void)
-{
- return counter_config[model->num_counters].count ? 0 : -EINVAL;
-}
-
-static int nmi_switch_event(void)
-{
- if (!has_mux())
- return -ENOSYS; /* not implemented */
- if (nmi_multiplex_on() < 0)
- return -EINVAL; /* not necessary */
-
- get_online_cpus();
- if (ctr_running)
- on_each_cpu(nmi_cpu_switch, NULL, 1);
- put_online_cpus();
-
- return 0;
-}
-
-static inline void mux_init(struct oprofile_operations *ops)
-{
- if (has_mux())
- ops->switch_events = nmi_switch_event;
-}
-
-static void mux_clone(int cpu)
-{
- if (!has_mux())
- return;
-
- memcpy(per_cpu(cpu_msrs, cpu).multiplex,
- per_cpu(cpu_msrs, 0).multiplex,
- sizeof(struct op_msr) * model->num_virt_counters);
-}
-
-#else
-
-inline int op_x86_phys_to_virt(int phys) { return phys; }
-inline int op_x86_virt_to_phys(int virt) { return virt; }
-static inline void nmi_shutdown_mux(void) { }
-static inline int nmi_setup_mux(void) { return 1; }
-static inline void
-nmi_cpu_setup_mux(int cpu, struct op_msrs const * const msrs) { }
-static inline void mux_init(struct oprofile_operations *ops) { }
-static void mux_clone(int cpu) { }
-
-#endif
-
-static void free_msrs(void)
-{
- int i;
- for_each_possible_cpu(i) {
- kfree(per_cpu(cpu_msrs, i).counters);
- per_cpu(cpu_msrs, i).counters = NULL;
- kfree(per_cpu(cpu_msrs, i).controls);
- per_cpu(cpu_msrs, i).controls = NULL;
- }
- nmi_shutdown_mux();
-}
-
-static int allocate_msrs(void)
-{
- size_t controls_size = sizeof(struct op_msr) * model->num_controls;
- size_t counters_size = sizeof(struct op_msr) * model->num_counters;
-
- int i;
- for_each_possible_cpu(i) {
- per_cpu(cpu_msrs, i).counters = kzalloc(counters_size,
- GFP_KERNEL);
- if (!per_cpu(cpu_msrs, i).counters)
- goto fail;
- per_cpu(cpu_msrs, i).controls = kzalloc(controls_size,
- GFP_KERNEL);
- if (!per_cpu(cpu_msrs, i).controls)
- goto fail;
- }
-
- if (!nmi_setup_mux())
- goto fail;
-
- return 1;
-
-fail:
- free_msrs();
- return 0;
-}
-
-static void nmi_cpu_setup(void)
-{
- int cpu = smp_processor_id();
- struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
-
- nmi_cpu_save_registers(msrs);
- raw_spin_lock(&oprofilefs_lock);
- model->setup_ctrs(model, msrs);
- nmi_cpu_setup_mux(cpu, msrs);
- raw_spin_unlock(&oprofilefs_lock);
- per_cpu(saved_lvtpc, cpu) = apic_read(APIC_LVTPC);
- apic_write(APIC_LVTPC, APIC_DM_NMI);
-}
-
-static void nmi_cpu_restore_registers(struct op_msrs *msrs)
-{
- struct op_msr *counters = msrs->counters;
- struct op_msr *controls = msrs->controls;
- unsigned int i;
-
- for (i = 0; i < model->num_controls; ++i) {
- if (controls[i].addr)
- wrmsrl(controls[i].addr, controls[i].saved);
- }
-
- for (i = 0; i < model->num_counters; ++i) {
- if (counters[i].addr)
- wrmsrl(counters[i].addr, counters[i].saved);
- }
-}
-
-static void nmi_cpu_shutdown(void)
-{
- unsigned int v;
- int cpu = smp_processor_id();
- struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
-
- /* restoring APIC_LVTPC can trigger an apic error because the delivery
- * mode and vector nr combination can be illegal. That's by design: on
- * power on apic lvt contain a zero vector nr which are legal only for
- * NMI delivery mode. So inhibit apic err before restoring lvtpc
- */
- v = apic_read(APIC_LVTERR);
- apic_write(APIC_LVTERR, v | APIC_LVT_MASKED);
- apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu));
- apic_write(APIC_LVTERR, v);
- nmi_cpu_restore_registers(msrs);
-}
-
-static int nmi_cpu_online(unsigned int cpu)
-{
- local_irq_disable();
- if (nmi_enabled)
- nmi_cpu_setup();
- if (ctr_running)
- nmi_cpu_start(NULL);
- local_irq_enable();
- return 0;
-}
-
-static int nmi_cpu_down_prep(unsigned int cpu)
-{
- local_irq_disable();
- if (ctr_running)
- nmi_cpu_stop(NULL);
- if (nmi_enabled)
- nmi_cpu_shutdown();
- local_irq_enable();
- return 0;
-}
-
-static int nmi_create_files(struct dentry *root)
-{
- unsigned int i;
-
- for (i = 0; i < model->num_virt_counters; ++i) {
- struct dentry *dir;
- char buf[4];
-
- /* quick little hack to _not_ expose a counter if it is not
- * available for use. This should protect userspace app.
- * NOTE: assumes 1:1 mapping here (that counters are organized
- * sequentially in their struct assignment).
- */
- if (!avail_to_resrv_perfctr_nmi_bit(op_x86_virt_to_phys(i)))
- continue;
-
- snprintf(buf, sizeof(buf), "%d", i);
- dir = oprofilefs_mkdir(root, buf);
- oprofilefs_create_ulong(dir, "enabled", &counter_config[i].enabled);
- oprofilefs_create_ulong(dir, "event", &counter_config[i].event);
- oprofilefs_create_ulong(dir, "count", &counter_config[i].count);
- oprofilefs_create_ulong(dir, "unit_mask", &counter_config[i].unit_mask);
- oprofilefs_create_ulong(dir, "kernel", &counter_config[i].kernel);
- oprofilefs_create_ulong(dir, "user", &counter_config[i].user);
- oprofilefs_create_ulong(dir, "extra", &counter_config[i].extra);
- }
-
- return 0;
-}
-
-static enum cpuhp_state cpuhp_nmi_online;
-
-static int nmi_setup(void)
-{
- int err = 0;
- int cpu;
-
- if (!allocate_msrs())
- return -ENOMEM;
-
- /* We need to serialize save and setup for HT because the subset
- * of msrs are distinct for save and setup operations
- */
-
- /* Assume saved/restored counters are the same on all CPUs */
- err = model->fill_in_addresses(&per_cpu(cpu_msrs, 0));
- if (err)
- goto fail;
-
- for_each_possible_cpu(cpu) {
- if (!IS_ENABLED(CONFIG_SMP) || !cpu)
- continue;
-
- memcpy(per_cpu(cpu_msrs, cpu).counters,
- per_cpu(cpu_msrs, 0).counters,
- sizeof(struct op_msr) * model->num_counters);
-
- memcpy(per_cpu(cpu_msrs, cpu).controls,
- per_cpu(cpu_msrs, 0).controls,
- sizeof(struct op_msr) * model->num_controls);
-
- mux_clone(cpu);
- }
-
- nmi_enabled = 0;
- ctr_running = 0;
- /* make variables visible to the nmi handler: */
- smp_mb();
- err = register_nmi_handler(NMI_LOCAL, profile_exceptions_notify,
- 0, "oprofile");
- if (err)
- goto fail;
-
- nmi_enabled = 1;
- /* make nmi_enabled visible to the nmi handler: */
- smp_mb();
- err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/oprofile:online",
- nmi_cpu_online, nmi_cpu_down_prep);
- if (err < 0)
- goto fail_nmi;
- cpuhp_nmi_online = err;
- return 0;
-fail_nmi:
- unregister_nmi_handler(NMI_LOCAL, "oprofile");
-fail:
- free_msrs();
- return err;
-}
-
-static void nmi_shutdown(void)
-{
- struct op_msrs *msrs;
-
- cpuhp_remove_state(cpuhp_nmi_online);
- nmi_enabled = 0;
- ctr_running = 0;
-
- /* make variables visible to the nmi handler: */
- smp_mb();
- unregister_nmi_handler(NMI_LOCAL, "oprofile");
- msrs = &get_cpu_var(cpu_msrs);
- model->shutdown(msrs);
- free_msrs();
- put_cpu_var(cpu_msrs);
-}
-
-#ifdef CONFIG_PM
-
-static int nmi_suspend(void)
-{
- /* Only one CPU left, just stop that one */
- if (nmi_enabled == 1)
- nmi_cpu_stop(NULL);
- return 0;
-}
-
-static void nmi_resume(void)
-{
- if (nmi_enabled == 1)
- nmi_cpu_start(NULL);
-}
-
-static struct syscore_ops oprofile_syscore_ops = {
- .resume = nmi_resume,
- .suspend = nmi_suspend,
-};
-
-static void __init init_suspend_resume(void)
-{
- register_syscore_ops(&oprofile_syscore_ops);
-}
-
-static void exit_suspend_resume(void)
-{
- unregister_syscore_ops(&oprofile_syscore_ops);
-}
-
-#else
-
-static inline void init_suspend_resume(void) { }
-static inline void exit_suspend_resume(void) { }
-
-#endif /* CONFIG_PM */
-
-static int __init p4_init(char **cpu_type)
-{
- __u8 cpu_model = boot_cpu_data.x86_model;
-
- if (cpu_model > 6 || cpu_model == 5)
- return 0;
-
-#ifndef CONFIG_SMP
- *cpu_type = "i386/p4";
- model = &op_p4_spec;
- return 1;
-#else
- switch (smp_num_siblings) {
- case 1:
- *cpu_type = "i386/p4";
- model = &op_p4_spec;
- return 1;
-
- case 2:
- *cpu_type = "i386/p4-ht";
- model = &op_p4_ht2_spec;
- return 1;
- }
-#endif
-
- printk(KERN_INFO "oprofile: P4 HyperThreading detected with > 2 threads\n");
- printk(KERN_INFO "oprofile: Reverting to timer mode.\n");
- return 0;
-}
-
-enum __force_cpu_type {
- reserved = 0, /* do not force */
- timer,
- arch_perfmon,
-};
-
-static int force_cpu_type;
-
-static int set_cpu_type(const char *str, const struct kernel_param *kp)
-{
- if (!strcmp(str, "timer")) {
- force_cpu_type = timer;
- printk(KERN_INFO "oprofile: forcing NMI timer mode\n");
- } else if (!strcmp(str, "arch_perfmon")) {
- force_cpu_type = arch_perfmon;
- printk(KERN_INFO "oprofile: forcing architectural perfmon\n");
- } else {
- force_cpu_type = 0;
- }
-
- return 0;
-}
-module_param_call(cpu_type, set_cpu_type, NULL, NULL, 0);
-
-static int __init ppro_init(char **cpu_type)
-{
- __u8 cpu_model = boot_cpu_data.x86_model;
- struct op_x86_model_spec *spec = &op_ppro_spec; /* default */
-
- if (force_cpu_type == arch_perfmon && boot_cpu_has(X86_FEATURE_ARCH_PERFMON))
- return 0;
-
- /*
- * Documentation on identifying Intel processors by CPU family
- * and model can be found in the Intel Software Developer's
- * Manuals (SDM):
- *
- * http://www.intel.com/products/processor/manuals/
- *
- * As of May 2010 the documentation for this was in the:
- * "Intel 64 and IA-32 Architectures Software Developer's
- * Manual Volume 3B: System Programming Guide", "Table B-1
- * CPUID Signature Values of DisplayFamily_DisplayModel".
- */
- switch (cpu_model) {
- case 0 ... 2:
- *cpu_type = "i386/ppro";
- break;
- case 3 ... 5:
- *cpu_type = "i386/pii";
- break;
- case 6 ... 8:
- case 10 ... 11:
- *cpu_type = "i386/piii";
- break;
- case 9:
- case 13:
- *cpu_type = "i386/p6_mobile";
- break;
- case 14:
- *cpu_type = "i386/core";
- break;
- case 0x0f:
- case 0x16:
- case 0x17:
- case 0x1d:
- *cpu_type = "i386/core_2";
- break;
- case 0x1a:
- case 0x1e:
- case 0x2e:
- spec = &op_arch_perfmon_spec;
- *cpu_type = "i386/core_i7";
- break;
- case 0x1c:
- *cpu_type = "i386/atom";
- break;
- default:
- /* Unknown */
- return 0;
- }
-
- model = spec;
- return 1;
-}
-
-int __init op_nmi_init(struct oprofile_operations *ops)
-{
- __u8 vendor = boot_cpu_data.x86_vendor;
- __u8 family = boot_cpu_data.x86;
- char *cpu_type = NULL;
- int ret = 0;
-
- if (!boot_cpu_has(X86_FEATURE_APIC))
- return -ENODEV;
-
- if (force_cpu_type == timer)
- return -ENODEV;
-
- switch (vendor) {
- case X86_VENDOR_AMD:
- /* Needs to be at least an Athlon (or hammer in 32bit mode) */
-
- switch (family) {
- case 6:
- cpu_type = "i386/athlon";
- break;
- case 0xf:
- /*
- * Actually it could be i386/hammer too, but
- * give user space an consistent name.
- */
- cpu_type = "x86-64/hammer";
- break;
- case 0x10:
- cpu_type = "x86-64/family10";
- break;
- case 0x11:
- cpu_type = "x86-64/family11h";
- break;
- case 0x12:
- cpu_type = "x86-64/family12h";
- break;
- case 0x14:
- cpu_type = "x86-64/family14h";
- break;
- case 0x15:
- cpu_type = "x86-64/family15h";
- break;
- default:
- return -ENODEV;
- }
- model = &op_amd_spec;
- break;
-
- case X86_VENDOR_INTEL:
- switch (family) {
- /* Pentium IV */
- case 0xf:
- p4_init(&cpu_type);
- break;
-
- /* A P6-class processor */
- case 6:
- ppro_init(&cpu_type);
- break;
-
- default:
- break;
- }
-
- if (cpu_type)
- break;
-
- if (!boot_cpu_has(X86_FEATURE_ARCH_PERFMON))
- return -ENODEV;
-
- /* use arch perfmon as fallback */
- cpu_type = "i386/arch_perfmon";
- model = &op_arch_perfmon_spec;
- break;
-
- default:
- return -ENODEV;
- }
-
- /* default values, can be overwritten by model */
- ops->create_files = nmi_create_files;
- ops->setup = nmi_setup;
- ops->shutdown = nmi_shutdown;
- ops->start = nmi_start;
- ops->stop = nmi_stop;
- ops->cpu_type = cpu_type;
-
- if (model->init)
- ret = model->init(ops);
- if (ret)
- return ret;
-
- if (!model->num_virt_counters)
- model->num_virt_counters = model->num_counters;
-
- mux_init(ops);
-
- init_suspend_resume();
-
- printk(KERN_INFO "oprofile: using NMI interrupt.\n");
- return 0;
-}
-
-void op_nmi_exit(void)
-{
- exit_suspend_resume();
-}
diff --git a/arch/x86/oprofile/op_counter.h b/arch/x86/oprofile/op_counter.h
deleted file mode 100644
index 0b7b7b179cbe..000000000000
--- a/arch/x86/oprofile/op_counter.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/**
- * @file op_counter.h
- *
- * @remark Copyright 2002 OProfile authors
- * @remark Read the file COPYING
- *
- * @author John Levon
- */
-
-#ifndef OP_COUNTER_H
-#define OP_COUNTER_H
-
-#define OP_MAX_COUNTER 32
-
-/* Per-perfctr configuration as set via
- * oprofilefs.
- */
-struct op_counter_config {
- unsigned long count;
- unsigned long enabled;
- unsigned long event;
- unsigned long kernel;
- unsigned long user;
- unsigned long unit_mask;
- unsigned long extra;
-};
-
-extern struct op_counter_config counter_config[];
-
-#endif /* OP_COUNTER_H */
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
deleted file mode 100644
index 660a83c8287b..000000000000
--- a/arch/x86/oprofile/op_model_amd.c
+++ /dev/null
@@ -1,542 +0,0 @@
-/*
- * @file op_model_amd.c
- * athlon / K7 / K8 / Family 10h model-specific MSR operations
- *
- * @remark Copyright 2002-2009 OProfile authors
- * @remark Read the file COPYING
- *
- * @author John Levon
- * @author Philippe Elie
- * @author Graydon Hoare
- * @author Robert Richter <robert.richter@amd.com>
- * @author Barry Kasindorf <barry.kasindorf@amd.com>
- * @author Jason Yeh <jason.yeh@amd.com>
- * @author Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
- */
-
-#include <linux/oprofile.h>
-#include <linux/device.h>
-#include <linux/pci.h>
-#include <linux/percpu.h>
-
-#include <asm/ptrace.h>
-#include <asm/msr.h>
-#include <asm/nmi.h>
-#include <asm/apic.h>
-#include <asm/processor.h>
-
-#include "op_x86_model.h"
-#include "op_counter.h"
-
-#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
-#define NUM_VIRT_COUNTERS 32
-#else
-#define NUM_VIRT_COUNTERS 0
-#endif
-
-#define OP_EVENT_MASK 0x0FFF
-#define OP_CTR_OVERFLOW (1ULL<<31)
-
-#define MSR_AMD_EVENTSEL_RESERVED ((0xFFFFFCF0ULL<<32)|(1ULL<<21))
-
-static int num_counters;
-static unsigned long reset_value[OP_MAX_COUNTER];
-
-#define IBS_FETCH_SIZE 6
-#define IBS_OP_SIZE 12
-
-static u32 ibs_caps;
-
-struct ibs_config {
- unsigned long op_enabled;
- unsigned long fetch_enabled;
- unsigned long max_cnt_fetch;
- unsigned long max_cnt_op;
- unsigned long rand_en;
- unsigned long dispatched_ops;
- unsigned long branch_target;
-};
-
-struct ibs_state {
- u64 ibs_op_ctl;
- int branch_target;
- unsigned long sample_size;
-};
-
-static struct ibs_config ibs_config;
-static struct ibs_state ibs_state;
-
-/*
- * IBS randomization macros
- */
-#define IBS_RANDOM_BITS 12
-#define IBS_RANDOM_MASK ((1ULL << IBS_RANDOM_BITS) - 1)
-#define IBS_RANDOM_MAXCNT_OFFSET (1ULL << (IBS_RANDOM_BITS - 5))
-
-/*
- * 16-bit Linear Feedback Shift Register (LFSR)
- *
- * 16 14 13 11
- * Feedback polynomial = X + X + X + X + 1
- */
-static unsigned int lfsr_random(void)
-{
- static unsigned int lfsr_value = 0xF00D;
- unsigned int bit;
-
- /* Compute next bit to shift in */
- bit = ((lfsr_value >> 0) ^
- (lfsr_value >> 2) ^
- (lfsr_value >> 3) ^
- (lfsr_value >> 5)) & 0x0001;
-
- /* Advance to next register value */
- lfsr_value = (lfsr_value >> 1) | (bit << 15);
-
- return lfsr_value;
-}
-
-/*
- * IBS software randomization
- *
- * The IBS periodic op counter is randomized in software. The lower 12
- * bits of the 20 bit counter are randomized. IbsOpCurCnt is
- * initialized with a 12 bit random value.
- */
-static inline u64 op_amd_randomize_ibs_op(u64 val)
-{
- unsigned int random = lfsr_random();
-
- if (!(ibs_caps & IBS_CAPS_RDWROPCNT))
- /*
- * Work around if the hw can not write to IbsOpCurCnt
- *
- * Randomize the lower 8 bits of the 16 bit
- * IbsOpMaxCnt [15:0] value in the range of -128 to
- * +127 by adding/subtracting an offset to the
- * maximum count (IbsOpMaxCnt).
- *
- * To avoid over or underflows and protect upper bits
- * starting at bit 16, the initial value for
- * IbsOpMaxCnt must fit in the range from 0x0081 to
- * 0xff80.
- */
- val += (s8)(random >> 4);
- else
- val |= (u64)(random & IBS_RANDOM_MASK) << 32;
-
- return val;
-}
-
-static inline void
-op_amd_handle_ibs(struct pt_regs * const regs,
- struct op_msrs const * const msrs)
-{
- u64 val, ctl;
- struct op_entry entry;
-
- if (!ibs_caps)
- return;
-
- if (ibs_config.fetch_enabled) {
- rdmsrl(MSR_AMD64_IBSFETCHCTL, ctl);
- if (ctl & IBS_FETCH_VAL) {
- rdmsrl(MSR_AMD64_IBSFETCHLINAD, val);
- oprofile_write_reserve(&entry, regs, val,
- IBS_FETCH_CODE, IBS_FETCH_SIZE);
- oprofile_add_data64(&entry, val);
- oprofile_add_data64(&entry, ctl);
- rdmsrl(MSR_AMD64_IBSFETCHPHYSAD, val);
- oprofile_add_data64(&entry, val);
- oprofile_write_commit(&entry);
-
- /* reenable the IRQ */
- ctl &= ~(IBS_FETCH_VAL | IBS_FETCH_CNT);
- ctl |= IBS_FETCH_ENABLE;
- wrmsrl(MSR_AMD64_IBSFETCHCTL, ctl);
- }
- }
-
- if (ibs_config.op_enabled) {
- rdmsrl(MSR_AMD64_IBSOPCTL, ctl);
- if (ctl & IBS_OP_VAL) {
- rdmsrl(MSR_AMD64_IBSOPRIP, val);
- oprofile_write_reserve(&entry, regs, val, IBS_OP_CODE,
- ibs_state.sample_size);
- oprofile_add_data64(&entry, val);
- rdmsrl(MSR_AMD64_IBSOPDATA, val);
- oprofile_add_data64(&entry, val);
- rdmsrl(MSR_AMD64_IBSOPDATA2, val);
- oprofile_add_data64(&entry, val);
- rdmsrl(MSR_AMD64_IBSOPDATA3, val);
- oprofile_add_data64(&entry, val);
- rdmsrl(MSR_AMD64_IBSDCLINAD, val);
- oprofile_add_data64(&entry, val);
- rdmsrl(MSR_AMD64_IBSDCPHYSAD, val);
- oprofile_add_data64(&entry, val);
- if (ibs_state.branch_target) {
- rdmsrl(MSR_AMD64_IBSBRTARGET, val);
- oprofile_add_data(&entry, (unsigned long)val);
- }
- oprofile_write_commit(&entry);
-
- /* reenable the IRQ */
- ctl = op_amd_randomize_ibs_op(ibs_state.ibs_op_ctl);
- wrmsrl(MSR_AMD64_IBSOPCTL, ctl);
- }
- }
-}
-
-static inline void op_amd_start_ibs(void)
-{
- u64 val;
-
- if (!ibs_caps)
- return;
-
- memset(&ibs_state, 0, sizeof(ibs_state));
-
- /*
- * Note: Since the max count settings may out of range we
- * write back the actual used values so that userland can read
- * it.
- */
-
- if (ibs_config.fetch_enabled) {
- val = ibs_config.max_cnt_fetch >> 4;
- val = min(val, IBS_FETCH_MAX_CNT);
- ibs_config.max_cnt_fetch = val << 4;
- val |= ibs_config.rand_en ? IBS_FETCH_RAND_EN : 0;
- val |= IBS_FETCH_ENABLE;
- wrmsrl(MSR_AMD64_IBSFETCHCTL, val);
- }
-
- if (ibs_config.op_enabled) {
- val = ibs_config.max_cnt_op >> 4;
- if (!(ibs_caps & IBS_CAPS_RDWROPCNT)) {
- /*
- * IbsOpCurCnt not supported. See
- * op_amd_randomize_ibs_op() for details.
- */
- val = clamp(val, 0x0081ULL, 0xFF80ULL);
- ibs_config.max_cnt_op = val << 4;
- } else {
- /*
- * The start value is randomized with a
- * positive offset, we need to compensate it
- * with the half of the randomized range. Also
- * avoid underflows.
- */
- val += IBS_RANDOM_MAXCNT_OFFSET;
- if (ibs_caps & IBS_CAPS_OPCNTEXT)
- val = min(val, IBS_OP_MAX_CNT_EXT);
- else
- val = min(val, IBS_OP_MAX_CNT);
- ibs_config.max_cnt_op =
- (val - IBS_RANDOM_MAXCNT_OFFSET) << 4;
- }
- val = ((val & ~IBS_OP_MAX_CNT) << 4) | (val & IBS_OP_MAX_CNT);
- val |= ibs_config.dispatched_ops ? IBS_OP_CNT_CTL : 0;
- val |= IBS_OP_ENABLE;
- ibs_state.ibs_op_ctl = val;
- ibs_state.sample_size = IBS_OP_SIZE;
- if (ibs_config.branch_target) {
- ibs_state.branch_target = 1;
- ibs_state.sample_size++;
- }
- val = op_amd_randomize_ibs_op(ibs_state.ibs_op_ctl);
- wrmsrl(MSR_AMD64_IBSOPCTL, val);
- }
-}
-
-static void op_amd_stop_ibs(void)
-{
- if (!ibs_caps)
- return;
-
- if (ibs_config.fetch_enabled)
- /* clear max count and enable */
- wrmsrl(MSR_AMD64_IBSFETCHCTL, 0);
-
- if (ibs_config.op_enabled)
- /* clear max count and enable */
- wrmsrl(MSR_AMD64_IBSOPCTL, 0);
-}
-
-#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
-
-static void op_mux_switch_ctrl(struct op_x86_model_spec const *model,
- struct op_msrs const * const msrs)
-{
- u64 val;
- int i;
-
- /* enable active counters */
- for (i = 0; i < num_counters; ++i) {
- int virt = op_x86_phys_to_virt(i);
- if (!reset_value[virt])
- continue;
- rdmsrl(msrs->controls[i].addr, val);
- val &= model->reserved;
- val |= op_x86_get_ctrl(model, &counter_config[virt]);
- wrmsrl(msrs->controls[i].addr, val);
- }
-}
-
-#endif
-
-/* functions for op_amd_spec */
-
-static void op_amd_shutdown(struct op_msrs const * const msrs)
-{
- int i;
-
- for (i = 0; i < num_counters; ++i) {
- if (!msrs->counters[i].addr)
- continue;
- release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
- release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
- }
-}
-
-static int op_amd_fill_in_addresses(struct op_msrs * const msrs)
-{
- int i;
-
- for (i = 0; i < num_counters; i++) {
- if (!reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i))
- goto fail;
- if (!reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i)) {
- release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
- goto fail;
- }
- /* both registers must be reserved */
- if (num_counters == AMD64_NUM_COUNTERS_CORE) {
- msrs->counters[i].addr = MSR_F15H_PERF_CTR + (i << 1);
- msrs->controls[i].addr = MSR_F15H_PERF_CTL + (i << 1);
- } else {
- msrs->controls[i].addr = MSR_K7_EVNTSEL0 + i;
- msrs->counters[i].addr = MSR_K7_PERFCTR0 + i;
- }
- continue;
- fail:
- if (!counter_config[i].enabled)
- continue;
- op_x86_warn_reserved(i);
- op_amd_shutdown(msrs);
- return -EBUSY;
- }
-
- return 0;
-}
-
-static void op_amd_setup_ctrs(struct op_x86_model_spec const *model,
- struct op_msrs const * const msrs)
-{
- u64 val;
- int i;
-
- /* setup reset_value */
- for (i = 0; i < OP_MAX_COUNTER; ++i) {
- if (counter_config[i].enabled
- && msrs->counters[op_x86_virt_to_phys(i)].addr)
- reset_value[i] = counter_config[i].count;
- else
- reset_value[i] = 0;
- }
-
- /* clear all counters */
- for (i = 0; i < num_counters; ++i) {
- if (!msrs->controls[i].addr)
- continue;
- rdmsrl(msrs->controls[i].addr, val);
- if (val & ARCH_PERFMON_EVENTSEL_ENABLE)
- op_x86_warn_in_use(i);
- val &= model->reserved;
- wrmsrl(msrs->controls[i].addr, val);
- /*
- * avoid a false detection of ctr overflows in NMI
- * handler
- */
- wrmsrl(msrs->counters[i].addr, -1LL);
- }
-
- /* enable active counters */
- for (i = 0; i < num_counters; ++i) {
- int virt = op_x86_phys_to_virt(i);
- if (!reset_value[virt])
- continue;
-
- /* setup counter registers */
- wrmsrl(msrs->counters[i].addr, -(u64)reset_value[virt]);
-
- /* setup control registers */
- rdmsrl(msrs->controls[i].addr, val);
- val &= model->reserved;
- val |= op_x86_get_ctrl(model, &counter_config[virt]);
- wrmsrl(msrs->controls[i].addr, val);
- }
-}
-
-static int op_amd_check_ctrs(struct pt_regs * const regs,
- struct op_msrs const * const msrs)
-{
- u64 val;
- int i;
-
- for (i = 0; i < num_counters; ++i) {
- int virt = op_x86_phys_to_virt(i);
- if (!reset_value[virt])
- continue;
- rdmsrl(msrs->counters[i].addr, val);
- /* bit is clear if overflowed: */
- if (val & OP_CTR_OVERFLOW)
- continue;
- oprofile_add_sample(regs, virt);
- wrmsrl(msrs->counters[i].addr, -(u64)reset_value[virt]);
- }
-
- op_amd_handle_ibs(regs, msrs);
-
- /* See op_model_ppro.c */
- return 1;
-}
-
-static void op_amd_start(struct op_msrs const * const msrs)
-{
- u64 val;
- int i;
-
- for (i = 0; i < num_counters; ++i) {
- if (!reset_value[op_x86_phys_to_virt(i)])
- continue;
- rdmsrl(msrs->controls[i].addr, val);
- val |= ARCH_PERFMON_EVENTSEL_ENABLE;
- wrmsrl(msrs->controls[i].addr, val);
- }
-
- op_amd_start_ibs();
-}
-
-static void op_amd_stop(struct op_msrs const * const msrs)
-{
- u64 val;
- int i;
-
- /*
- * Subtle: stop on all counters to avoid race with setting our
- * pm callback
- */
- for (i = 0; i < num_counters; ++i) {
- if (!reset_value[op_x86_phys_to_virt(i)])
- continue;
- rdmsrl(msrs->controls[i].addr, val);
- val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
- wrmsrl(msrs->controls[i].addr, val);
- }
-
- op_amd_stop_ibs();
-}
-
-/*
- * check and reserve APIC extended interrupt LVT offset for IBS if
- * available
- */
-
-static void init_ibs(void)
-{
- ibs_caps = get_ibs_caps();
-
- if (!ibs_caps)
- return;
-
- printk(KERN_INFO "oprofile: AMD IBS detected (0x%08x)\n", ibs_caps);
-}
-
-static int (*create_arch_files)(struct dentry *root);
-
-static int setup_ibs_files(struct dentry *root)
-{
- struct dentry *dir;
- int ret = 0;
-
- /* architecture specific files */
- if (create_arch_files)
- ret = create_arch_files(root);
-
- if (ret)
- return ret;
-
- if (!ibs_caps)
- return ret;
-
- /* model specific files */
-
- /* setup some reasonable defaults */
- memset(&ibs_config, 0, sizeof(ibs_config));
- ibs_config.max_cnt_fetch = 250000;
- ibs_config.max_cnt_op = 250000;
-
- if (ibs_caps & IBS_CAPS_FETCHSAM) {
- dir = oprofilefs_mkdir(root, "ibs_fetch");
- oprofilefs_create_ulong(dir, "enable",
- &ibs_config.fetch_enabled);
- oprofilefs_create_ulong(dir, "max_count",
- &ibs_config.max_cnt_fetch);
- oprofilefs_create_ulong(dir, "rand_enable",
- &ibs_config.rand_en);
- }
-
- if (ibs_caps & IBS_CAPS_OPSAM) {
- dir = oprofilefs_mkdir(root, "ibs_op");
- oprofilefs_create_ulong(dir, "enable",
- &ibs_config.op_enabled);
- oprofilefs_create_ulong(dir, "max_count",
- &ibs_config.max_cnt_op);
- if (ibs_caps & IBS_CAPS_OPCNT)
- oprofilefs_create_ulong(dir, "dispatched_ops",
- &ibs_config.dispatched_ops);
- if (ibs_caps & IBS_CAPS_BRNTRGT)
- oprofilefs_create_ulong(dir, "branch_target",
- &ibs_config.branch_target);
- }
-
- return 0;
-}
-
-struct op_x86_model_spec op_amd_spec;
-
-static int op_amd_init(struct oprofile_operations *ops)
-{
- init_ibs();
- create_arch_files = ops->create_files;
- ops->create_files = setup_ibs_files;
-
- if (boot_cpu_data.x86 == 0x15) {
- num_counters = AMD64_NUM_COUNTERS_CORE;
- } else {
- num_counters = AMD64_NUM_COUNTERS;
- }
-
- op_amd_spec.num_counters = num_counters;
- op_amd_spec.num_controls = num_counters;
- op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
-
- return 0;
-}
-
-struct op_x86_model_spec op_amd_spec = {
- /* num_counters/num_controls filled in at runtime */
- .reserved = MSR_AMD_EVENTSEL_RESERVED,
- .event_mask = OP_EVENT_MASK,
- .init = op_amd_init,
- .fill_in_addresses = &op_amd_fill_in_addresses,
- .setup_ctrs = &op_amd_setup_ctrs,
- .check_ctrs = &op_amd_check_ctrs,
- .start = &op_amd_start,
- .stop = &op_amd_stop,
- .shutdown = &op_amd_shutdown,
-#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
- .switch_ctrl = &op_mux_switch_ctrl,
-#endif
-};
diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c
deleted file mode 100644
index ad1d91f475ab..000000000000
--- a/arch/x86/oprofile/op_model_p4.c
+++ /dev/null
@@ -1,723 +0,0 @@
-/**
- * @file op_model_p4.c
- * P4 model-specific MSR operations
- *
- * @remark Copyright 2002 OProfile authors
- * @remark Read the file COPYING
- *
- * @author Graydon Hoare
- */
-
-#include <linux/oprofile.h>
-#include <linux/smp.h>
-#include <linux/ptrace.h>
-#include <asm/nmi.h>
-#include <asm/msr.h>
-#include <asm/fixmap.h>
-#include <asm/apic.h>
-
-
-#include "op_x86_model.h"
-#include "op_counter.h"
-
-#define NUM_EVENTS 39
-
-#define NUM_COUNTERS_NON_HT 8
-#define NUM_ESCRS_NON_HT 45
-#define NUM_CCCRS_NON_HT 18
-#define NUM_CONTROLS_NON_HT (NUM_ESCRS_NON_HT + NUM_CCCRS_NON_HT)
-
-#define NUM_COUNTERS_HT2 4
-#define NUM_ESCRS_HT2 23
-#define NUM_CCCRS_HT2 9
-#define NUM_CONTROLS_HT2 (NUM_ESCRS_HT2 + NUM_CCCRS_HT2)
-
-#define OP_CTR_OVERFLOW (1ULL<<31)
-
-static unsigned int num_counters = NUM_COUNTERS_NON_HT;
-static unsigned int num_controls = NUM_CONTROLS_NON_HT;
-
-/* this has to be checked dynamically since the
- hyper-threadedness of a chip is discovered at
- kernel boot-time. */
-static inline void setup_num_counters(void)
-{
-#ifdef CONFIG_SMP
- if (smp_num_siblings == 2) {
- num_counters = NUM_COUNTERS_HT2;
- num_controls = NUM_CONTROLS_HT2;
- }
-#endif
-}
-
-static inline int addr_increment(void)
-{
-#ifdef CONFIG_SMP
- return smp_num_siblings == 2 ? 2 : 1;
-#else
- return 1;
-#endif
-}
-
-
-/* tables to simulate simplified hardware view of p4 registers */
-struct p4_counter_binding {
- int virt_counter;
- int counter_address;
- int cccr_address;
-};
-
-struct p4_event_binding {
- int escr_select; /* value to put in CCCR */
- int event_select; /* value to put in ESCR */
- struct {
- int virt_counter; /* for this counter... */
- int escr_address; /* use this ESCR */
- } bindings[2];
-};
-
-/* nb: these CTR_* defines are a duplicate of defines in
- event/i386.p4*events. */
-
-
-#define CTR_BPU_0 (1 << 0)
-#define CTR_MS_0 (1 << 1)
-#define CTR_FLAME_0 (1 << 2)
-#define CTR_IQ_4 (1 << 3)
-#define CTR_BPU_2 (1 << 4)
-#define CTR_MS_2 (1 << 5)
-#define CTR_FLAME_2 (1 << 6)
-#define CTR_IQ_5 (1 << 7)
-
-static struct p4_counter_binding p4_counters[NUM_COUNTERS_NON_HT] = {
- { CTR_BPU_0, MSR_P4_BPU_PERFCTR0, MSR_P4_BPU_CCCR0 },
- { CTR_MS_0, MSR_P4_MS_PERFCTR0, MSR_P4_MS_CCCR0 },
- { CTR_FLAME_0, MSR_P4_FLAME_PERFCTR0, MSR_P4_FLAME_CCCR0 },
- { CTR_IQ_4, MSR_P4_IQ_PERFCTR4, MSR_P4_IQ_CCCR4 },
- { CTR_BPU_2, MSR_P4_BPU_PERFCTR2, MSR_P4_BPU_CCCR2 },
- { CTR_MS_2, MSR_P4_MS_PERFCTR2, MSR_P4_MS_CCCR2 },
- { CTR_FLAME_2, MSR_P4_FLAME_PERFCTR2, MSR_P4_FLAME_CCCR2 },
- { CTR_IQ_5, MSR_P4_IQ_PERFCTR5, MSR_P4_IQ_CCCR5 }
-};
-
-#define NUM_UNUSED_CCCRS (NUM_CCCRS_NON_HT - NUM_COUNTERS_NON_HT)
-
-/* p4 event codes in libop/op_event.h are indices into this table. */
-
-static struct p4_event_binding p4_events[NUM_EVENTS] = {
-
- { /* BRANCH_RETIRED */
- 0x05, 0x06,
- { {CTR_IQ_4, MSR_P4_CRU_ESCR2},
- {CTR_IQ_5, MSR_P4_CRU_ESCR3} }
- },
-
- { /* MISPRED_BRANCH_RETIRED */
- 0x04, 0x03,
- { { CTR_IQ_4, MSR_P4_CRU_ESCR0},
- { CTR_IQ_5, MSR_P4_CRU_ESCR1} }
- },
-
- { /* TC_DELIVER_MODE */
- 0x01, 0x01,
- { { CTR_MS_0, MSR_P4_TC_ESCR0},
- { CTR_MS_2, MSR_P4_TC_ESCR1} }
- },
-
- { /* BPU_FETCH_REQUEST */
- 0x00, 0x03,
- { { CTR_BPU_0, MSR_P4_BPU_ESCR0},
- { CTR_BPU_2, MSR_P4_BPU_ESCR1} }
- },
-
- { /* ITLB_REFERENCE */
- 0x03, 0x18,
- { { CTR_BPU_0, MSR_P4_ITLB_ESCR0},
- { CTR_BPU_2, MSR_P4_ITLB_ESCR1} }
- },
-
- { /* MEMORY_CANCEL */
- 0x05, 0x02,
- { { CTR_FLAME_0, MSR_P4_DAC_ESCR0},
- { CTR_FLAME_2, MSR_P4_DAC_ESCR1} }
- },
-
- { /* MEMORY_COMPLETE */
- 0x02, 0x08,
- { { CTR_FLAME_0, MSR_P4_SAAT_ESCR0},
- { CTR_FLAME_2, MSR_P4_SAAT_ESCR1} }
- },
-
- { /* LOAD_PORT_REPLAY */
- 0x02, 0x04,
- { { CTR_FLAME_0, MSR_P4_SAAT_ESCR0},
- { CTR_FLAME_2, MSR_P4_SAAT_ESCR1} }
- },
-
- { /* STORE_PORT_REPLAY */
- 0x02, 0x05,
- { { CTR_FLAME_0, MSR_P4_SAAT_ESCR0},
- { CTR_FLAME_2, MSR_P4_SAAT_ESCR1} }
- },
-
- { /* MOB_LOAD_REPLAY */
- 0x02, 0x03,
- { { CTR_BPU_0, MSR_P4_MOB_ESCR0},
- { CTR_BPU_2, MSR_P4_MOB_ESCR1} }
- },
-
- { /* PAGE_WALK_TYPE */
- 0x04, 0x01,
- { { CTR_BPU_0, MSR_P4_PMH_ESCR0},
- { CTR_BPU_2, MSR_P4_PMH_ESCR1} }
- },
-
- { /* BSQ_CACHE_REFERENCE */
- 0x07, 0x0c,
- { { CTR_BPU_0, MSR_P4_BSU_ESCR0},
- { CTR_BPU_2, MSR_P4_BSU_ESCR1} }
- },
-
- { /* IOQ_ALLOCATION */
- 0x06, 0x03,
- { { CTR_BPU_0, MSR_P4_FSB_ESCR0},
- { 0, 0 } }
- },
-
- { /* IOQ_ACTIVE_ENTRIES */
- 0x06, 0x1a,
- { { CTR_BPU_2, MSR_P4_FSB_ESCR1},
- { 0, 0 } }
- },
-
- { /* FSB_DATA_ACTIVITY */
- 0x06, 0x17,
- { { CTR_BPU_0, MSR_P4_FSB_ESCR0},
- { CTR_BPU_2, MSR_P4_FSB_ESCR1} }
- },
-
- { /* BSQ_ALLOCATION */
- 0x07, 0x05,
- { { CTR_BPU_0, MSR_P4_BSU_ESCR0},
- { 0, 0 } }
- },
-
- { /* BSQ_ACTIVE_ENTRIES */
- 0x07, 0x06,
- { { CTR_BPU_2, MSR_P4_BSU_ESCR1 /* guess */},
- { 0, 0 } }
- },
-
- { /* X87_ASSIST */
- 0x05, 0x03,
- { { CTR_IQ_4, MSR_P4_CRU_ESCR2},
- { CTR_IQ_5, MSR_P4_CRU_ESCR3} }
- },
-
- { /* SSE_INPUT_ASSIST */
- 0x01, 0x34,
- { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
- { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
- },
-
- { /* PACKED_SP_UOP */
- 0x01, 0x08,
- { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
- { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
- },
-
- { /* PACKED_DP_UOP */
- 0x01, 0x0c,
- { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
- { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
- },
-
- { /* SCALAR_SP_UOP */
- 0x01, 0x0a,
- { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
- { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
- },
-
- { /* SCALAR_DP_UOP */
- 0x01, 0x0e,
- { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
- { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
- },
-
- { /* 64BIT_MMX_UOP */
- 0x01, 0x02,
- { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
- { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
- },
-
- { /* 128BIT_MMX_UOP */
- 0x01, 0x1a,
- { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
- { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
- },
-
- { /* X87_FP_UOP */
- 0x01, 0x04,
- { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
- { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
- },
-
- { /* X87_SIMD_MOVES_UOP */
- 0x01, 0x2e,
- { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
- { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
- },
-
- { /* MACHINE_CLEAR */
- 0x05, 0x02,
- { { CTR_IQ_4, MSR_P4_CRU_ESCR2},
- { CTR_IQ_5, MSR_P4_CRU_ESCR3} }
- },
-
- { /* GLOBAL_POWER_EVENTS */
- 0x06, 0x13 /* older manual says 0x05, newer 0x13 */,
- { { CTR_BPU_0, MSR_P4_FSB_ESCR0},
- { CTR_BPU_2, MSR_P4_FSB_ESCR1} }
- },
-
- { /* TC_MS_XFER */
- 0x00, 0x05,
- { { CTR_MS_0, MSR_P4_MS_ESCR0},
- { CTR_MS_2, MSR_P4_MS_ESCR1} }
- },
-
- { /* UOP_QUEUE_WRITES */
- 0x00, 0x09,
- { { CTR_MS_0, MSR_P4_MS_ESCR0},
- { CTR_MS_2, MSR_P4_MS_ESCR1} }
- },
-
- { /* FRONT_END_EVENT */
- 0x05, 0x08,
- { { CTR_IQ_4, MSR_P4_CRU_ESCR2},
- { CTR_IQ_5, MSR_P4_CRU_ESCR3} }
- },
-
- { /* EXECUTION_EVENT */
- 0x05, 0x0c,
- { { CTR_IQ_4, MSR_P4_CRU_ESCR2},
- { CTR_IQ_5, MSR_P4_CRU_ESCR3} }
- },
-
- { /* REPLAY_EVENT */
- 0x05, 0x09,
- { { CTR_IQ_4, MSR_P4_CRU_ESCR2},
- { CTR_IQ_5, MSR_P4_CRU_ESCR3} }
- },
-
- { /* INSTR_RETIRED */
- 0x04, 0x02,
- { { CTR_IQ_4, MSR_P4_CRU_ESCR0},
- { CTR_IQ_5, MSR_P4_CRU_ESCR1} }
- },
-
- { /* UOPS_RETIRED */
- 0x04, 0x01,
- { { CTR_IQ_4, MSR_P4_CRU_ESCR0},
- { CTR_IQ_5, MSR_P4_CRU_ESCR1} }
- },
-
- { /* UOP_TYPE */
- 0x02, 0x02,
- { { CTR_IQ_4, MSR_P4_RAT_ESCR0},
- { CTR_IQ_5, MSR_P4_RAT_ESCR1} }
- },
-
- { /* RETIRED_MISPRED_BRANCH_TYPE */
- 0x02, 0x05,
- { { CTR_MS_0, MSR_P4_TBPU_ESCR0},
- { CTR_MS_2, MSR_P4_TBPU_ESCR1} }
- },
-
- { /* RETIRED_BRANCH_TYPE */
- 0x02, 0x04,
- { { CTR_MS_0, MSR_P4_TBPU_ESCR0},
- { CTR_MS_2, MSR_P4_TBPU_ESCR1} }
- }
-};
-
-
-#define MISC_PMC_ENABLED_P(x) ((x) & 1 << 7)
-
-#define ESCR_RESERVED_BITS 0x80000003
-#define ESCR_CLEAR(escr) ((escr) &= ESCR_RESERVED_BITS)
-#define ESCR_SET_USR_0(escr, usr) ((escr) |= (((usr) & 1) << 2))
-#define ESCR_SET_OS_0(escr, os) ((escr) |= (((os) & 1) << 3))
-#define ESCR_SET_USR_1(escr, usr) ((escr) |= (((usr) & 1)))
-#define ESCR_SET_OS_1(escr, os) ((escr) |= (((os) & 1) << 1))
-#define ESCR_SET_EVENT_SELECT(escr, sel) ((escr) |= (((sel) & 0x3f) << 25))
-#define ESCR_SET_EVENT_MASK(escr, mask) ((escr) |= (((mask) & 0xffff) << 9))
-
-#define CCCR_RESERVED_BITS 0x38030FFF
-#define CCCR_CLEAR(cccr) ((cccr) &= CCCR_RESERVED_BITS)
-#define CCCR_SET_REQUIRED_BITS(cccr) ((cccr) |= 0x00030000)
-#define CCCR_SET_ESCR_SELECT(cccr, sel) ((cccr) |= (((sel) & 0x07) << 13))
-#define CCCR_SET_PMI_OVF_0(cccr) ((cccr) |= (1<<26))
-#define CCCR_SET_PMI_OVF_1(cccr) ((cccr) |= (1<<27))
-#define CCCR_SET_ENABLE(cccr) ((cccr) |= (1<<12))
-#define CCCR_SET_DISABLE(cccr) ((cccr) &= ~(1<<12))
-#define CCCR_OVF_P(cccr) ((cccr) & (1U<<31))
-#define CCCR_CLEAR_OVF(cccr) ((cccr) &= (~(1U<<31)))
-
-
-/* this assigns a "stagger" to the current CPU, which is used throughout
- the code in this module as an extra array offset, to select the "even"
- or "odd" part of all the divided resources. */
-static unsigned int get_stagger(void)
-{
-#ifdef CONFIG_SMP
- int cpu = smp_processor_id();
- return cpu != cpumask_first(this_cpu_cpumask_var_ptr(cpu_sibling_map));
-#endif
- return 0;
-}
-
-
-/* finally, mediate access to a real hardware counter
- by passing a "virtual" counter numer to this macro,
- along with your stagger setting. */
-#define VIRT_CTR(stagger, i) ((i) + ((num_counters) * (stagger)))
-
-static unsigned long reset_value[NUM_COUNTERS_NON_HT];
-
-static void p4_shutdown(struct op_msrs const * const msrs)
-{
- int i;
-
- for (i = 0; i < num_counters; ++i) {
- if (msrs->counters[i].addr)
- release_perfctr_nmi(msrs->counters[i].addr);
- }
- /*
- * some of the control registers are specially reserved in
- * conjunction with the counter registers (hence the starting offset).
- * This saves a few bits.
- */
- for (i = num_counters; i < num_controls; ++i) {
- if (msrs->controls[i].addr)
- release_evntsel_nmi(msrs->controls[i].addr);
- }
-}
-
-static int p4_fill_in_addresses(struct op_msrs * const msrs)
-{
- unsigned int i;
- unsigned int addr, cccraddr, stag;
-
- setup_num_counters();
- stag = get_stagger();
-
- /* the counter & cccr registers we pay attention to */
- for (i = 0; i < num_counters; ++i) {
- addr = p4_counters[VIRT_CTR(stag, i)].counter_address;
- cccraddr = p4_counters[VIRT_CTR(stag, i)].cccr_address;
- if (reserve_perfctr_nmi(addr)) {
- msrs->counters[i].addr = addr;
- msrs->controls[i].addr = cccraddr;
- }
- }
-
- /* 43 ESCR registers in three or four discontiguous group */
- for (addr = MSR_P4_BSU_ESCR0 + stag;
- addr < MSR_P4_IQ_ESCR0; ++i, addr += addr_increment()) {
- if (reserve_evntsel_nmi(addr))
- msrs->controls[i].addr = addr;
- }
-
- /* no IQ_ESCR0/1 on some models, we save a seconde time BSU_ESCR0/1
- * to avoid special case in nmi_{save|restore}_registers() */
- if (boot_cpu_data.x86_model >= 0x3) {
- for (addr = MSR_P4_BSU_ESCR0 + stag;
- addr <= MSR_P4_BSU_ESCR1; ++i, addr += addr_increment()) {
- if (reserve_evntsel_nmi(addr))
- msrs->controls[i].addr = addr;
- }
- } else {
- for (addr = MSR_P4_IQ_ESCR0 + stag;
- addr <= MSR_P4_IQ_ESCR1; ++i, addr += addr_increment()) {
- if (reserve_evntsel_nmi(addr))
- msrs->controls[i].addr = addr;
- }
- }
-
- for (addr = MSR_P4_RAT_ESCR0 + stag;
- addr <= MSR_P4_SSU_ESCR0; ++i, addr += addr_increment()) {
- if (reserve_evntsel_nmi(addr))
- msrs->controls[i].addr = addr;
- }
-
- for (addr = MSR_P4_MS_ESCR0 + stag;
- addr <= MSR_P4_TC_ESCR1; ++i, addr += addr_increment()) {
- if (reserve_evntsel_nmi(addr))
- msrs->controls[i].addr = addr;
- }
-
- for (addr = MSR_P4_IX_ESCR0 + stag;
- addr <= MSR_P4_CRU_ESCR3; ++i, addr += addr_increment()) {
- if (reserve_evntsel_nmi(addr))
- msrs->controls[i].addr = addr;
- }
-
- /* there are 2 remaining non-contiguously located ESCRs */
-
- if (num_counters == NUM_COUNTERS_NON_HT) {
- /* standard non-HT CPUs handle both remaining ESCRs*/
- if (reserve_evntsel_nmi(MSR_P4_CRU_ESCR5))
- msrs->controls[i++].addr = MSR_P4_CRU_ESCR5;
- if (reserve_evntsel_nmi(MSR_P4_CRU_ESCR4))
- msrs->controls[i++].addr = MSR_P4_CRU_ESCR4;
-
- } else if (stag == 0) {
- /* HT CPUs give the first remainder to the even thread, as
- the 32nd control register */
- if (reserve_evntsel_nmi(MSR_P4_CRU_ESCR4))
- msrs->controls[i++].addr = MSR_P4_CRU_ESCR4;
-
- } else {
- /* and two copies of the second to the odd thread,
- for the 22st and 23nd control registers */
- if (reserve_evntsel_nmi(MSR_P4_CRU_ESCR5)) {
- msrs->controls[i++].addr = MSR_P4_CRU_ESCR5;
- msrs->controls[i++].addr = MSR_P4_CRU_ESCR5;
- }
- }
-
- for (i = 0; i < num_counters; ++i) {
- if (!counter_config[i].enabled)
- continue;
- if (msrs->controls[i].addr)
- continue;
- op_x86_warn_reserved(i);
- p4_shutdown(msrs);
- return -EBUSY;
- }
-
- return 0;
-}
-
-
-static void pmc_setup_one_p4_counter(unsigned int ctr)
-{
- int i;
- int const maxbind = 2;
- unsigned int cccr = 0;
- unsigned int escr = 0;
- unsigned int high = 0;
- unsigned int counter_bit;
- struct p4_event_binding *ev = NULL;
- unsigned int stag;
-
- stag = get_stagger();
-
- /* convert from counter *number* to counter *bit* */
- counter_bit = 1 << VIRT_CTR(stag, ctr);
-
- /* find our event binding structure. */
- if (counter_config[ctr].event <= 0 || counter_config[ctr].event > NUM_EVENTS) {
- printk(KERN_ERR
- "oprofile: P4 event code 0x%lx out of range\n",
- counter_config[ctr].event);
- return;
- }
-
- ev = &(p4_events[counter_config[ctr].event - 1]);
-
- for (i = 0; i < maxbind; i++) {
- if (ev->bindings[i].virt_counter & counter_bit) {
-
- /* modify ESCR */
- rdmsr(ev->bindings[i].escr_address, escr, high);
- ESCR_CLEAR(escr);
- if (stag == 0) {
- ESCR_SET_USR_0(escr, counter_config[ctr].user);
- ESCR_SET_OS_0(escr, counter_config[ctr].kernel);
- } else {
- ESCR_SET_USR_1(escr, counter_config[ctr].user);
- ESCR_SET_OS_1(escr, counter_config[ctr].kernel);
- }
- ESCR_SET_EVENT_SELECT(escr, ev->event_select);
- ESCR_SET_EVENT_MASK(escr, counter_config[ctr].unit_mask);
- wrmsr(ev->bindings[i].escr_address, escr, high);
-
- /* modify CCCR */
- rdmsr(p4_counters[VIRT_CTR(stag, ctr)].cccr_address,
- cccr, high);
- CCCR_CLEAR(cccr);
- CCCR_SET_REQUIRED_BITS(cccr);
- CCCR_SET_ESCR_SELECT(cccr, ev->escr_select);
- if (stag == 0)
- CCCR_SET_PMI_OVF_0(cccr);
- else
- CCCR_SET_PMI_OVF_1(cccr);
- wrmsr(p4_counters[VIRT_CTR(stag, ctr)].cccr_address,
- cccr, high);
- return;
- }
- }
-
- printk(KERN_ERR
- "oprofile: P4 event code 0x%lx no binding, stag %d ctr %d\n",
- counter_config[ctr].event, stag, ctr);
-}
-
-
-static void p4_setup_ctrs(struct op_x86_model_spec const *model,
- struct op_msrs const * const msrs)
-{
- unsigned int i;
- unsigned int low, high;
- unsigned int stag;
-
- stag = get_stagger();
-
- rdmsr(MSR_IA32_MISC_ENABLE, low, high);
- if (!MISC_PMC_ENABLED_P(low)) {
- printk(KERN_ERR "oprofile: P4 PMC not available\n");
- return;
- }
-
- /* clear the cccrs we will use */
- for (i = 0; i < num_counters; i++) {
- if (unlikely(!msrs->controls[i].addr))
- continue;
- rdmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high);
- CCCR_CLEAR(low);
- CCCR_SET_REQUIRED_BITS(low);
- wrmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high);
- }
-
- /* clear all escrs (including those outside our concern) */
- for (i = num_counters; i < num_controls; i++) {
- if (unlikely(!msrs->controls[i].addr))
- continue;
- wrmsr(msrs->controls[i].addr, 0, 0);
- }
-
- /* setup all counters */
- for (i = 0; i < num_counters; ++i) {
- if (counter_config[i].enabled && msrs->controls[i].addr) {
- reset_value[i] = counter_config[i].count;
- pmc_setup_one_p4_counter(i);
- wrmsrl(p4_counters[VIRT_CTR(stag, i)].counter_address,
- -(u64)counter_config[i].count);
- } else {
- reset_value[i] = 0;
- }
- }
-}
-
-
-static int p4_check_ctrs(struct pt_regs * const regs,
- struct op_msrs const * const msrs)
-{
- unsigned long ctr, low, high, stag, real;
- int i;
-
- stag = get_stagger();
-
- for (i = 0; i < num_counters; ++i) {
-
- if (!reset_value[i])
- continue;
-
- /*
- * there is some eccentricity in the hardware which
- * requires that we perform 2 extra corrections:
- *
- * - check both the CCCR:OVF flag for overflow and the
- * counter high bit for un-flagged overflows.
- *
- * - write the counter back twice to ensure it gets
- * updated properly.
- *
- * the former seems to be related to extra NMIs happening
- * during the current NMI; the latter is reported as errata
- * N15 in intel doc 249199-029, pentium 4 specification
- * update, though their suggested work-around does not
- * appear to solve the problem.
- */
-
- real = VIRT_CTR(stag, i);
-
- rdmsr(p4_counters[real].cccr_address, low, high);
- rdmsr(p4_counters[real].counter_address, ctr, high);
- if (CCCR_OVF_P(low) || !(ctr & OP_CTR_OVERFLOW)) {
- oprofile_add_sample(regs, i);
- wrmsrl(p4_counters[real].counter_address,
- -(u64)reset_value[i]);
- CCCR_CLEAR_OVF(low);
- wrmsr(p4_counters[real].cccr_address, low, high);
- wrmsrl(p4_counters[real].counter_address,
- -(u64)reset_value[i]);
- }
- }
-
- /* P4 quirk: you have to re-unmask the apic vector */
- apic_write(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED);
-
- /* See op_model_ppro.c */
- return 1;
-}
-
-
-static void p4_start(struct op_msrs const * const msrs)
-{
- unsigned int low, high, stag;
- int i;
-
- stag = get_stagger();
-
- for (i = 0; i < num_counters; ++i) {
- if (!reset_value[i])
- continue;
- rdmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high);
- CCCR_SET_ENABLE(low);
- wrmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high);
- }
-}
-
-
-static void p4_stop(struct op_msrs const * const msrs)
-{
- unsigned int low, high, stag;
- int i;
-
- stag = get_stagger();
-
- for (i = 0; i < num_counters; ++i) {
- if (!reset_value[i])
- continue;
- rdmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high);
- CCCR_SET_DISABLE(low);
- wrmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high);
- }
-}
-
-#ifdef CONFIG_SMP
-struct op_x86_model_spec op_p4_ht2_spec = {
- .num_counters = NUM_COUNTERS_HT2,
- .num_controls = NUM_CONTROLS_HT2,
- .fill_in_addresses = &p4_fill_in_addresses,
- .setup_ctrs = &p4_setup_ctrs,
- .check_ctrs = &p4_check_ctrs,
- .start = &p4_start,
- .stop = &p4_stop,
- .shutdown = &p4_shutdown
-};
-#endif
-
-struct op_x86_model_spec op_p4_spec = {
- .num_counters = NUM_COUNTERS_NON_HT,
- .num_controls = NUM_CONTROLS_NON_HT,
- .fill_in_addresses = &p4_fill_in_addresses,
- .setup_ctrs = &p4_setup_ctrs,
- .check_ctrs = &p4_check_ctrs,
- .start = &p4_start,
- .stop = &p4_stop,
- .shutdown = &p4_shutdown
-};
diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
deleted file mode 100644
index 7913b6921959..000000000000
--- a/arch/x86/oprofile/op_model_ppro.c
+++ /dev/null
@@ -1,245 +0,0 @@
-/*
- * @file op_model_ppro.h
- * Family 6 perfmon and architectural perfmon MSR operations
- *
- * @remark Copyright 2002 OProfile authors
- * @remark Copyright 2008 Intel Corporation
- * @remark Read the file COPYING
- *
- * @author John Levon
- * @author Philippe Elie
- * @author Graydon Hoare
- * @author Andi Kleen
- * @author Robert Richter <robert.richter@amd.com>
- */
-
-#include <linux/oprofile.h>
-#include <linux/slab.h>
-#include <asm/ptrace.h>
-#include <asm/msr.h>
-#include <asm/apic.h>
-#include <asm/nmi.h>
-
-#include "op_x86_model.h"
-#include "op_counter.h"
-
-static int num_counters = 2;
-static int counter_width = 32;
-
-#define MSR_PPRO_EVENTSEL_RESERVED ((0xFFFFFFFFULL<<32)|(1ULL<<21))
-
-static u64 reset_value[OP_MAX_COUNTER];
-
-static void ppro_shutdown(struct op_msrs const * const msrs)
-{
- int i;
-
- for (i = 0; i < num_counters; ++i) {
- if (!msrs->counters[i].addr)
- continue;
- release_perfctr_nmi(MSR_P6_PERFCTR0 + i);
- release_evntsel_nmi(MSR_P6_EVNTSEL0 + i);
- }
-}
-
-static int ppro_fill_in_addresses(struct op_msrs * const msrs)
-{
- int i;
-
- for (i = 0; i < num_counters; i++) {
- if (!reserve_perfctr_nmi(MSR_P6_PERFCTR0 + i))
- goto fail;
- if (!reserve_evntsel_nmi(MSR_P6_EVNTSEL0 + i)) {
- release_perfctr_nmi(MSR_P6_PERFCTR0 + i);
- goto fail;
- }
- /* both registers must be reserved */
- msrs->counters[i].addr = MSR_P6_PERFCTR0 + i;
- msrs->controls[i].addr = MSR_P6_EVNTSEL0 + i;
- continue;
- fail:
- if (!counter_config[i].enabled)
- continue;
- op_x86_warn_reserved(i);
- ppro_shutdown(msrs);
- return -EBUSY;
- }
-
- return 0;
-}
-
-
-static void ppro_setup_ctrs(struct op_x86_model_spec const *model,
- struct op_msrs const * const msrs)
-{
- u64 val;
- int i;
-
- if (boot_cpu_has(X86_FEATURE_ARCH_PERFMON)) {
- union cpuid10_eax eax;
- eax.full = cpuid_eax(0xa);
-
- /*
- * For Core2 (family 6, model 15), don't reset the
- * counter width:
- */
- if (!(eax.split.version_id == 0 &&
- __this_cpu_read(cpu_info.x86) == 6 &&
- __this_cpu_read(cpu_info.x86_model) == 15)) {
-
- if (counter_width < eax.split.bit_width)
- counter_width = eax.split.bit_width;
- }
- }
-
- /* clear all counters */
- for (i = 0; i < num_counters; ++i) {
- if (!msrs->controls[i].addr)
- continue;
- rdmsrl(msrs->controls[i].addr, val);
- if (val & ARCH_PERFMON_EVENTSEL_ENABLE)
- op_x86_warn_in_use(i);
- val &= model->reserved;
- wrmsrl(msrs->controls[i].addr, val);
- /*
- * avoid a false detection of ctr overflows in NMI *
- * handler
- */
- wrmsrl(msrs->counters[i].addr, -1LL);
- }
-
- /* enable active counters */
- for (i = 0; i < num_counters; ++i) {
- if (counter_config[i].enabled && msrs->counters[i].addr) {
- reset_value[i] = counter_config[i].count;
- wrmsrl(msrs->counters[i].addr, -reset_value[i]);
- rdmsrl(msrs->controls[i].addr, val);
- val &= model->reserved;
- val |= op_x86_get_ctrl(model, &counter_config[i]);
- wrmsrl(msrs->controls[i].addr, val);
- } else {
- reset_value[i] = 0;
- }
- }
-}
-
-
-static int ppro_check_ctrs(struct pt_regs * const regs,
- struct op_msrs const * const msrs)
-{
- u64 val;
- int i;
-
- for (i = 0; i < num_counters; ++i) {
- if (!reset_value[i])
- continue;
- rdmsrl(msrs->counters[i].addr, val);
- if (val & (1ULL << (counter_width - 1)))
- continue;
- oprofile_add_sample(regs, i);
- wrmsrl(msrs->counters[i].addr, -reset_value[i]);
- }
-
- /* Only P6 based Pentium M need to re-unmask the apic vector but it
- * doesn't hurt other P6 variant */
- apic_write(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED);
-
- /* We can't work out if we really handled an interrupt. We
- * might have caught a *second* counter just after overflowing
- * the interrupt for this counter then arrives
- * and we don't find a counter that's overflowed, so we
- * would return 0 and get dazed + confused. Instead we always
- * assume we found an overflow. This sucks.
- */
- return 1;
-}
-
-
-static void ppro_start(struct op_msrs const * const msrs)
-{
- u64 val;
- int i;
-
- for (i = 0; i < num_counters; ++i) {
- if (reset_value[i]) {
- rdmsrl(msrs->controls[i].addr, val);
- val |= ARCH_PERFMON_EVENTSEL_ENABLE;
- wrmsrl(msrs->controls[i].addr, val);
- }
- }
-}
-
-
-static void ppro_stop(struct op_msrs const * const msrs)
-{
- u64 val;
- int i;
-
- for (i = 0; i < num_counters; ++i) {
- if (!reset_value[i])
- continue;
- rdmsrl(msrs->controls[i].addr, val);
- val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
- wrmsrl(msrs->controls[i].addr, val);
- }
-}
-
-struct op_x86_model_spec op_ppro_spec = {
- .num_counters = 2,
- .num_controls = 2,
- .reserved = MSR_PPRO_EVENTSEL_RESERVED,
- .fill_in_addresses = &ppro_fill_in_addresses,
- .setup_ctrs = &ppro_setup_ctrs,
- .check_ctrs = &ppro_check_ctrs,
- .start = &ppro_start,
- .stop = &ppro_stop,
- .shutdown = &ppro_shutdown
-};
-
-/*
- * Architectural performance monitoring.
- *
- * Newer Intel CPUs (Core1+) have support for architectural
- * events described in CPUID 0xA. See the IA32 SDM Vol3b.18 for details.
- * The advantage of this is that it can be done without knowing about
- * the specific CPU.
- */
-
-static void arch_perfmon_setup_counters(void)
-{
- union cpuid10_eax eax;
-
- eax.full = cpuid_eax(0xa);
-
- /* Workaround for BIOS bugs in 6/15. Taken from perfmon2 */
- if (eax.split.version_id == 0 && boot_cpu_data.x86 == 6 &&
- boot_cpu_data.x86_model == 15) {
- eax.split.version_id = 2;
- eax.split.num_counters = 2;
- eax.split.bit_width = 40;
- }
-
- num_counters = min((int)eax.split.num_counters, OP_MAX_COUNTER);
-
- op_arch_perfmon_spec.num_counters = num_counters;
- op_arch_perfmon_spec.num_controls = num_counters;
-}
-
-static int arch_perfmon_init(struct oprofile_operations *ignore)
-{
- arch_perfmon_setup_counters();
- return 0;
-}
-
-struct op_x86_model_spec op_arch_perfmon_spec = {
- .reserved = MSR_PPRO_EVENTSEL_RESERVED,
- .init = &arch_perfmon_init,
- /* num_counters/num_controls filled in at runtime */
- .fill_in_addresses = &ppro_fill_in_addresses,
- /* user space does the cpuid check for available events */
- .setup_ctrs = &ppro_setup_ctrs,
- .check_ctrs = &ppro_check_ctrs,
- .start = &ppro_start,
- .stop = &ppro_stop,
- .shutdown = &ppro_shutdown
-};
diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
deleted file mode 100644
index 276cf79b5d24..000000000000
--- a/arch/x86/oprofile/op_x86_model.h
+++ /dev/null
@@ -1,90 +0,0 @@
-/**
- * @file op_x86_model.h
- * interface to x86 model-specific MSR operations
- *
- * @remark Copyright 2002 OProfile authors
- * @remark Read the file COPYING
- *
- * @author Graydon Hoare
- * @author Robert Richter <robert.richter@amd.com>
- */
-
-#ifndef OP_X86_MODEL_H
-#define OP_X86_MODEL_H
-
-#include <asm/types.h>
-#include <asm/perf_event.h>
-
-struct op_msr {
- unsigned long addr;
- u64 saved;
-};
-
-struct op_msrs {
- struct op_msr *counters;
- struct op_msr *controls;
- struct op_msr *multiplex;
-};
-
-struct pt_regs;
-
-struct oprofile_operations;
-
-/* The model vtable abstracts the differences between
- * various x86 CPU models' perfctr support.
- */
-struct op_x86_model_spec {
- unsigned int num_counters;
- unsigned int num_controls;
- unsigned int num_virt_counters;
- u64 reserved;
- u16 event_mask;
- int (*init)(struct oprofile_operations *ops);
- int (*fill_in_addresses)(struct op_msrs * const msrs);
- void (*setup_ctrs)(struct op_x86_model_spec const *model,
- struct op_msrs const * const msrs);
- int (*check_ctrs)(struct pt_regs * const regs,
- struct op_msrs const * const msrs);
- void (*start)(struct op_msrs const * const msrs);
- void (*stop)(struct op_msrs const * const msrs);
- void (*shutdown)(struct op_msrs const * const msrs);
-#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
- void (*switch_ctrl)(struct op_x86_model_spec const *model,
- struct op_msrs const * const msrs);
-#endif
-};
-
-struct op_counter_config;
-
-static inline void op_x86_warn_in_use(int counter)
-{
- /*
- * The warning indicates an already running counter. If
- * oprofile doesn't collect data, then try using a different
- * performance counter on your platform to monitor the desired
- * event. Delete counter #%d from the desired event by editing
- * the /usr/share/oprofile/%s/<cpu>/events file. If the event
- * cannot be monitored by any other counter, contact your
- * hardware or BIOS vendor.
- */
- pr_warn("oprofile: counter #%d on cpu #%d may already be used\n",
- counter, smp_processor_id());
-}
-
-static inline void op_x86_warn_reserved(int counter)
-{
- pr_warn("oprofile: counter #%d is already reserved\n", counter);
-}
-
-extern u64 op_x86_get_ctrl(struct op_x86_model_spec const *model,
- struct op_counter_config *counter_config);
-extern int op_x86_phys_to_virt(int phys);
-extern int op_x86_virt_to_phys(int virt);
-
-extern struct op_x86_model_spec op_ppro_spec;
-extern struct op_x86_model_spec op_p4_spec;
-extern struct op_x86_model_spec op_p4_ht2_spec;
-extern struct op_x86_model_spec op_amd_spec;
-extern struct op_x86_model_spec op_arch_perfmon_spec;
-
-#endif /* OP_X86_MODEL_H */
diff --git a/arch/x86/pci/init.c b/arch/x86/pci/init.c
index 00bfa1ebad6c..0bb3b8b44e4e 100644
--- a/arch/x86/pci/init.c
+++ b/arch/x86/pci/init.c
@@ -9,16 +9,23 @@
in the right sequence from here. */
static __init int pci_arch_init(void)
{
- int type;
-
- x86_create_pci_msi_domain();
+ int type, pcbios = 1;
type = pci_direct_probe();
if (!(pci_probe & PCI_PROBE_NOEARLY))
pci_mmcfg_early_init();
- if (x86_init.pci.arch_init && !x86_init.pci.arch_init())
+ if (x86_init.pci.arch_init)
+ pcbios = x86_init.pci.arch_init();
+
+ /*
+ * Must happen after x86_init.pci.arch_init(). Xen sets up the
+ * x86_init.irqs.create_pci_msi_domain there.
+ */
+ x86_create_pci_msi_domain();
+
+ if (!pcbios)
return 0;
pci_pcbios_init();
diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
index 95e2e6bd8d8c..8edd62206604 100644
--- a/arch/x86/pci/intel_mid_pci.c
+++ b/arch/x86/pci/intel_mid_pci.c
@@ -28,10 +28,12 @@
#include <linux/io.h>
#include <linux/smp.h>
+#include <asm/cpu_device_id.h>
#include <asm/segment.h>
#include <asm/pci_x86.h>
#include <asm/hw_irq.h>
#include <asm/io_apic.h>
+#include <asm/intel-family.h>
#include <asm/intel-mid.h>
#include <asm/acpi.h>
@@ -140,6 +142,7 @@ static int pci_device_update_fixed(struct pci_bus *bus, unsigned int devfn,
* type1_access_ok - check whether to use type 1
* @bus: bus number
* @devfn: device & function in question
+ * @reg: configuration register offset
*
* If the bus is on a Lincroft chip and it exists, or is not on a Lincroft at
* all, the we can go ahead with any reads & writes. If it's on a Lincroft,
@@ -212,10 +215,17 @@ static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
where, size, value);
}
+static const struct x86_cpu_id intel_mid_cpu_ids[] = {
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_MID, NULL),
+ {}
+};
+
static int intel_mid_pci_irq_enable(struct pci_dev *dev)
{
+ const struct x86_cpu_id *id;
struct irq_alloc_info info;
bool polarity_low;
+ u16 model = 0;
int ret;
u8 gsi;
@@ -228,8 +238,12 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev)
return ret;
}
- switch (intel_mid_identify_cpu()) {
- case INTEL_MID_CPU_CHIP_TANGIER:
+ id = x86_match_cpu(intel_mid_cpu_ids);
+ if (id)
+ model = id->model;
+
+ switch (model) {
+ case INTEL_FAM6_ATOM_SILVERMONT_MID:
polarity_low = false;
/* Special treatment for IRQ0 */
diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c
index 234998f196d4..de6bf0e7e8f8 100644
--- a/arch/x86/pci/mmconfig-shared.c
+++ b/arch/x86/pci/mmconfig-shared.c
@@ -11,9 +11,9 @@
* themselves.
*/
+#include <linux/acpi.h>
#include <linux/pci.h>
#include <linux/init.h>
-#include <linux/sfi_acpi.h>
#include <linux/bitmap.h>
#include <linux/dmi.h>
#include <linux/slab.h>
@@ -665,7 +665,7 @@ void __init pci_mmcfg_early_init(void)
if (pci_mmcfg_check_hostbridge())
known_bridge = 1;
else
- acpi_sfi_table_parse(ACPI_SIG_MCFG, pci_parse_mcfg);
+ acpi_table_parse(ACPI_SIG_MCFG, pci_parse_mcfg);
__pci_mmcfg_init(1);
set_apei_filter();
@@ -683,7 +683,7 @@ void __init pci_mmcfg_late_init(void)
/* MMCONFIG hasn't been enabled yet, try again */
if (pci_probe & PCI_PROBE_MASK & ~PCI_PROBE_MMCONF) {
- acpi_sfi_table_parse(ACPI_SIG_MCFG, pci_parse_mcfg);
+ acpi_table_parse(ACPI_SIG_MCFG, pci_parse_mcfg);
__pci_mmcfg_init(0);
}
}
diff --git a/arch/x86/platform/Makefile b/arch/x86/platform/Makefile
index d0e835470d01..3ed03a2552d0 100644
--- a/arch/x86/platform/Makefile
+++ b/arch/x86/platform/Makefile
@@ -4,13 +4,11 @@ obj-y += atom/
obj-y += ce4100/
obj-y += efi/
obj-y += geode/
-obj-y += goldfish/
obj-y += iris/
obj-y += intel/
obj-y += intel-mid/
obj-y += intel-quark/
obj-y += olpc/
obj-y += scx200/
-obj-y += sfi/
obj-y += ts5500/
obj-y += uv/
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index e1e8d4e3a213..1b82d77019b1 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -54,10 +54,7 @@
* 0xffff_ffff_0000_0000 and limit EFI VA mapping space to 64G.
*/
static u64 efi_va = EFI_VA_START;
-
-struct efi_scratch efi_scratch;
-
-EXPORT_SYMBOL_GPL(efi_mm);
+static struct mm_struct *efi_prev_mm;
/*
* We need our own copy of the higher levels of the page tables
@@ -115,31 +112,12 @@ void efi_sync_low_kernel_mappings(void)
pud_t *pud_k, *pud_efi;
pgd_t *efi_pgd = efi_mm.pgd;
- /*
- * We can share all PGD entries apart from the one entry that
- * covers the EFI runtime mapping space.
- *
- * Make sure the EFI runtime region mappings are guaranteed to
- * only span a single PGD entry and that the entry also maps
- * other important kernel regions.
- */
- MAYBE_BUILD_BUG_ON(pgd_index(EFI_VA_END) != pgd_index(MODULES_END));
- MAYBE_BUILD_BUG_ON((EFI_VA_START & PGDIR_MASK) !=
- (EFI_VA_END & PGDIR_MASK));
-
pgd_efi = efi_pgd + pgd_index(PAGE_OFFSET);
pgd_k = pgd_offset_k(PAGE_OFFSET);
num_entries = pgd_index(EFI_VA_END) - pgd_index(PAGE_OFFSET);
memcpy(pgd_efi, pgd_k, sizeof(pgd_t) * num_entries);
- /*
- * As with PGDs, we share all P4D entries apart from the one entry
- * that covers the EFI runtime mapping space.
- */
- BUILD_BUG_ON(p4d_index(EFI_VA_END) != p4d_index(MODULES_END));
- BUILD_BUG_ON((EFI_VA_START & P4D_MASK) != (EFI_VA_END & P4D_MASK));
-
pgd_efi = efi_pgd + pgd_index(EFI_VA_END);
pgd_k = pgd_offset_k(EFI_VA_END);
p4d_efi = p4d_offset(pgd_efi, 0);
@@ -256,7 +234,7 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
return 1;
}
- efi_scratch.phys_stack = page_to_phys(page + 1); /* stack grows down */
+ efi_mixed_mode_stack_pa = page_to_phys(page + 1); /* stack grows down */
npages = (_etext - _text) >> PAGE_SHIFT;
text = __pa(_text);
@@ -481,11 +459,17 @@ void __init efi_dump_pagetable(void)
* can not change under us.
* It should be ensured that there are no concurent calls to this function.
*/
-void efi_switch_mm(struct mm_struct *mm)
+void efi_enter_mm(void)
+{
+ efi_prev_mm = current->active_mm;
+ current->active_mm = &efi_mm;
+ switch_mm(efi_prev_mm, &efi_mm, NULL);
+}
+
+void efi_leave_mm(void)
{
- efi_scratch.prev_mm = current->active_mm;
- current->active_mm = mm;
- switch_mm(efi_scratch.prev_mm, mm, NULL);
+ current->active_mm = efi_prev_mm;
+ switch_mm(&efi_mm, efi_prev_mm, NULL);
}
static DEFINE_SPINLOCK(efi_runtime_lock);
@@ -549,12 +533,12 @@ efi_thunk_set_virtual_address_map(unsigned long memory_map_size,
efi_sync_low_kernel_mappings();
local_irq_save(flags);
- efi_switch_mm(&efi_mm);
+ efi_enter_mm();
status = __efi_thunk(set_virtual_address_map, memory_map_size,
descriptor_size, descriptor_version, virtual_map);
- efi_switch_mm(efi_scratch.prev_mm);
+ efi_leave_mm();
local_irq_restore(flags);
return status;
@@ -848,9 +832,9 @@ efi_set_virtual_address_map(unsigned long memory_map_size,
descriptor_size,
descriptor_version,
virtual_map);
- efi_switch_mm(&efi_mm);
+ efi_enter_mm();
- kernel_fpu_begin();
+ efi_fpu_begin();
/* Disable interrupts around EFI calls: */
local_irq_save(flags);
@@ -859,12 +843,12 @@ efi_set_virtual_address_map(unsigned long memory_map_size,
descriptor_version, virtual_map);
local_irq_restore(flags);
- kernel_fpu_end();
+ efi_fpu_end();
/* grab the virtually remapped EFI runtime services table pointer */
efi.runtime = READ_ONCE(systab->runtime);
- efi_switch_mm(efi_scratch.prev_mm);
+ efi_leave_mm();
return status;
}
diff --git a/arch/x86/platform/efi/efi_thunk_64.S b/arch/x86/platform/efi/efi_thunk_64.S
index 26f0da238c1c..fd3dd1708eba 100644
--- a/arch/x86/platform/efi/efi_thunk_64.S
+++ b/arch/x86/platform/efi/efi_thunk_64.S
@@ -33,7 +33,7 @@ SYM_CODE_START(__efi64_thunk)
* Switch to 1:1 mapped 32-bit stack pointer.
*/
movq %rsp, %rax
- movq efi_scratch(%rip), %rsp
+ movq efi_mixed_mode_stack_pa(%rip), %rsp
push %rax
/*
@@ -70,3 +70,7 @@ SYM_CODE_START(__efi64_thunk)
pushl %ebp
lret
SYM_CODE_END(__efi64_thunk)
+
+ .bss
+ .balign 8
+SYM_DATA(efi_mixed_mode_stack_pa, .quad 0)
diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c
index 5a40fe411ebd..67d93a243c35 100644
--- a/arch/x86/platform/efi/quirks.c
+++ b/arch/x86/platform/efi/quirks.c
@@ -687,15 +687,25 @@ int efi_capsule_setup_info(struct capsule_info *cap_info, void *kbuff,
* @return: Returns, if the page fault is not handled. This function
* will never return if the page fault is handled successfully.
*/
-void efi_recover_from_page_fault(unsigned long phys_addr)
+void efi_crash_gracefully_on_page_fault(unsigned long phys_addr)
{
if (!IS_ENABLED(CONFIG_X86_64))
return;
/*
+ * If we get an interrupt/NMI while processing an EFI runtime service
+ * then this is a regular OOPS, not an EFI failure.
+ */
+ if (in_interrupt())
+ return;
+
+ /*
* Make sure that an efi runtime service caused the page fault.
+ * READ_ONCE() because we might be OOPSing in a different thread,
+ * and we don't want to trip KTSAN while trying to OOPS.
*/
- if (efi_rts_work.efi_rts_id == EFI_NONE)
+ if (READ_ONCE(efi_rts_work.efi_rts_id) == EFI_NONE ||
+ current_work() != &efi_rts_work.work)
return;
/*
@@ -747,6 +757,4 @@ void efi_recover_from_page_fault(unsigned long phys_addr)
set_current_state(TASK_IDLE);
schedule();
}
-
- return;
}
diff --git a/arch/x86/platform/geode/alix.c b/arch/x86/platform/geode/alix.c
index c33f744b5388..b39bf3b5e108 100644
--- a/arch/x86/platform/geode/alix.c
+++ b/arch/x86/platform/geode/alix.c
@@ -22,6 +22,7 @@
#include <linux/platform_device.h>
#include <linux/input.h>
#include <linux/gpio_keys.h>
+#include <linux/gpio/machine.h>
#include <linux/dmi.h>
#include <asm/geode.h>
@@ -69,21 +70,15 @@ static struct platform_device alix_buttons_dev = {
static struct gpio_led alix_leds[] = {
{
.name = "alix:1",
- .gpio = 6,
.default_trigger = "default-on",
- .active_low = 1,
},
{
.name = "alix:2",
- .gpio = 25,
.default_trigger = "default-off",
- .active_low = 1,
},
{
.name = "alix:3",
- .gpio = 27,
.default_trigger = "default-off",
- .active_low = 1,
},
};
@@ -92,6 +87,17 @@ static struct gpio_led_platform_data alix_leds_data = {
.leds = alix_leds,
};
+static struct gpiod_lookup_table alix_leds_gpio_table = {
+ .dev_id = "leds-gpio",
+ .table = {
+ /* The Geode GPIOs should be on the CS5535 companion chip */
+ GPIO_LOOKUP_IDX("cs5535-gpio", 6, NULL, 0, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("cs5535-gpio", 25, NULL, 1, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("cs5535-gpio", 27, NULL, 2, GPIO_ACTIVE_LOW),
+ { }
+ },
+};
+
static struct platform_device alix_leds_dev = {
.name = "leds-gpio",
.id = -1,
@@ -106,6 +112,7 @@ static struct platform_device *alix_devs[] __initdata = {
static void __init register_alix(void)
{
/* Setup LED control through leds-gpio driver */
+ gpiod_add_lookup_table(&alix_leds_gpio_table);
platform_add_devices(alix_devs, ARRAY_SIZE(alix_devs));
}
diff --git a/arch/x86/platform/geode/geos.c b/arch/x86/platform/geode/geos.c
index 73a3f49b4eb6..d263528c90bb 100644
--- a/arch/x86/platform/geode/geos.c
+++ b/arch/x86/platform/geode/geos.c
@@ -20,6 +20,7 @@
#include <linux/platform_device.h>
#include <linux/input.h>
#include <linux/gpio_keys.h>
+#include <linux/gpio/machine.h>
#include <linux/dmi.h>
#include <asm/geode.h>
@@ -53,21 +54,15 @@ static struct platform_device geos_buttons_dev = {
static struct gpio_led geos_leds[] = {
{
.name = "geos:1",
- .gpio = 6,
.default_trigger = "default-on",
- .active_low = 1,
},
{
.name = "geos:2",
- .gpio = 25,
.default_trigger = "default-off",
- .active_low = 1,
},
{
.name = "geos:3",
- .gpio = 27,
.default_trigger = "default-off",
- .active_low = 1,
},
};
@@ -76,6 +71,17 @@ static struct gpio_led_platform_data geos_leds_data = {
.leds = geos_leds,
};
+static struct gpiod_lookup_table geos_leds_gpio_table = {
+ .dev_id = "leds-gpio",
+ .table = {
+ /* The Geode GPIOs should be on the CS5535 companion chip */
+ GPIO_LOOKUP_IDX("cs5535-gpio", 6, NULL, 0, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("cs5535-gpio", 25, NULL, 1, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("cs5535-gpio", 27, NULL, 2, GPIO_ACTIVE_LOW),
+ { }
+ },
+};
+
static struct platform_device geos_leds_dev = {
.name = "leds-gpio",
.id = -1,
@@ -90,6 +96,7 @@ static struct platform_device *geos_devs[] __initdata = {
static void __init register_geos(void)
{
/* Setup LED control through leds-gpio driver */
+ gpiod_add_lookup_table(&geos_leds_gpio_table);
platform_add_devices(geos_devs, ARRAY_SIZE(geos_devs));
}
diff --git a/arch/x86/platform/geode/net5501.c b/arch/x86/platform/geode/net5501.c
index 163e1b545517..558384acd777 100644
--- a/arch/x86/platform/geode/net5501.c
+++ b/arch/x86/platform/geode/net5501.c
@@ -20,6 +20,7 @@
#include <linux/platform_device.h>
#include <linux/input.h>
#include <linux/gpio_keys.h>
+#include <linux/gpio/machine.h>
#include <asm/geode.h>
@@ -55,9 +56,7 @@ static struct platform_device net5501_buttons_dev = {
static struct gpio_led net5501_leds[] = {
{
.name = "net5501:1",
- .gpio = 6,
.default_trigger = "default-on",
- .active_low = 0,
},
};
@@ -66,6 +65,15 @@ static struct gpio_led_platform_data net5501_leds_data = {
.leds = net5501_leds,
};
+static struct gpiod_lookup_table net5501_leds_gpio_table = {
+ .dev_id = "leds-gpio",
+ .table = {
+ /* The Geode GPIOs should be on the CS5535 companion chip */
+ GPIO_LOOKUP_IDX("cs5535-gpio", 6, NULL, 0, GPIO_ACTIVE_HIGH),
+ { }
+ },
+};
+
static struct platform_device net5501_leds_dev = {
.name = "leds-gpio",
.id = -1,
@@ -80,6 +88,7 @@ static struct platform_device *net5501_devs[] __initdata = {
static void __init register_net5501(void)
{
/* Setup LED control through leds-gpio driver */
+ gpiod_add_lookup_table(&net5501_leds_gpio_table);
platform_add_devices(net5501_devs, ARRAY_SIZE(net5501_devs));
}
diff --git a/arch/x86/platform/goldfish/Makefile b/arch/x86/platform/goldfish/Makefile
deleted file mode 100644
index 072c395379ac..000000000000
--- a/arch/x86/platform/goldfish/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_GOLDFISH) += goldfish.o
diff --git a/arch/x86/platform/goldfish/goldfish.c b/arch/x86/platform/goldfish/goldfish.c
deleted file mode 100644
index 6b6f8b4360dd..000000000000
--- a/arch/x86/platform/goldfish/goldfish.c
+++ /dev/null
@@ -1,54 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2007 Google, Inc.
- * Copyright (C) 2011 Intel, Inc.
- * Copyright (C) 2013 Intel, Inc.
- */
-
-#include <linux/kernel.h>
-#include <linux/irq.h>
-#include <linux/platform_device.h>
-
-/*
- * Where in virtual device memory the IO devices (timers, system controllers
- * and so on)
- */
-
-#define GOLDFISH_PDEV_BUS_BASE (0xff001000)
-#define GOLDFISH_PDEV_BUS_END (0xff7fffff)
-#define GOLDFISH_PDEV_BUS_IRQ (4)
-
-#define GOLDFISH_TTY_BASE (0x2000)
-
-static struct resource goldfish_pdev_bus_resources[] = {
- {
- .start = GOLDFISH_PDEV_BUS_BASE,
- .end = GOLDFISH_PDEV_BUS_END,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = GOLDFISH_PDEV_BUS_IRQ,
- .end = GOLDFISH_PDEV_BUS_IRQ,
- .flags = IORESOURCE_IRQ,
- }
-};
-
-static bool goldfish_enable __initdata;
-
-static int __init goldfish_setup(char *str)
-{
- goldfish_enable = true;
- return 0;
-}
-__setup("goldfish", goldfish_setup);
-
-static int __init goldfish_init(void)
-{
- if (!goldfish_enable)
- return -ENODEV;
-
- platform_device_register_simple("goldfish_pdev_bus", -1,
- goldfish_pdev_bus_resources, 2);
- return 0;
-}
-device_initcall(goldfish_init);
diff --git a/arch/x86/platform/intel-mid/Makefile b/arch/x86/platform/intel-mid/Makefile
index cc2549f0ccb1..ddfc08783fb8 100644
--- a/arch/x86/platform/intel-mid/Makefile
+++ b/arch/x86/platform/intel-mid/Makefile
@@ -1,7 +1,2 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_X86_INTEL_MID) += intel-mid.o intel_mid_vrtc.o pwr.o
-
-# SFI specific code
-ifdef CONFIG_X86_INTEL_MID
-obj-$(CONFIG_SFI) += sfi.o device_libs/
-endif
+obj-$(CONFIG_X86_INTEL_MID) += intel-mid.o pwr.o
diff --git a/arch/x86/platform/intel-mid/device_libs/Makefile b/arch/x86/platform/intel-mid/device_libs/Makefile
deleted file mode 100644
index 480fed21cc7d..000000000000
--- a/arch/x86/platform/intel-mid/device_libs/Makefile
+++ /dev/null
@@ -1,33 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-# Family-Level Interface Shim (FLIS)
-obj-$(subst m,y,$(CONFIG_PINCTRL_MERRIFIELD)) += platform_mrfld_pinctrl.o
-# SDHCI Devices
-obj-$(subst m,y,$(CONFIG_MMC_SDHCI_PCI)) += platform_mrfld_sd.o
-# WiFi + BT
-obj-$(subst m,y,$(CONFIG_BRCMFMAC_SDIO)) += platform_bcm43xx.o
-obj-$(subst m,y,$(CONFIG_BT_HCIUART_BCM)) += platform_bt.o
-# IPC Devices
-obj-$(subst m,y,$(CONFIG_MFD_INTEL_MSIC)) += platform_msic.o
-obj-$(subst m,y,$(CONFIG_SND_MFLD_MACHINE)) += platform_msic_audio.o
-obj-$(subst m,y,$(CONFIG_GPIO_MSIC)) += platform_msic_gpio.o
-obj-$(subst m,y,$(CONFIG_MFD_INTEL_MSIC)) += platform_msic_ocd.o
-obj-$(subst m,y,$(CONFIG_MFD_INTEL_MSIC)) += platform_msic_battery.o
-obj-$(subst m,y,$(CONFIG_INTEL_MID_POWER_BUTTON)) += platform_msic_power_btn.o
-obj-$(subst m,y,$(CONFIG_INTEL_MFLD_THERMAL)) += platform_msic_thermal.o
-# SPI Devices
-obj-$(subst m,y,$(CONFIG_SPI_SPIDEV)) += platform_mrfld_spidev.o
-# I2C Devices
-obj-$(subst m,y,$(CONFIG_SENSORS_EMC1403)) += platform_emc1403.o
-obj-$(subst m,y,$(CONFIG_SENSORS_LIS3LV02D)) += platform_lis331.o
-obj-$(subst m,y,$(CONFIG_MPU3050_I2C)) += platform_mpu3050.o
-obj-$(subst m,y,$(CONFIG_INPUT_BMA150)) += platform_bma023.o
-obj-$(subst m,y,$(CONFIG_DRM_MEDFIELD)) += platform_tc35876x.o
-# I2C GPIO Expanders
-obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_max7315.o
-obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_pcal9555a.o
-obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_tca6416.o
-# MISC Devices
-obj-$(subst m,y,$(CONFIG_KEYBOARD_GPIO)) += platform_gpio_keys.o
-obj-$(subst m,y,$(CONFIG_INTEL_MID_POWER_BUTTON)) += platform_mrfld_power_btn.o
-obj-$(subst m,y,$(CONFIG_RTC_DRV_CMOS)) += platform_mrfld_rtc.o
-obj-$(subst m,y,$(CONFIG_INTEL_MID_WATCHDOG)) += platform_mrfld_wdt.o
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_bcm43xx.c b/arch/x86/platform/intel-mid/device_libs/platform_bcm43xx.c
deleted file mode 100644
index 564c47c53f3a..000000000000
--- a/arch/x86/platform/intel-mid/device_libs/platform_bcm43xx.c
+++ /dev/null
@@ -1,101 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * platform_bcm43xx.c: bcm43xx platform data initialization file
- *
- * (C) Copyright 2016 Intel Corporation
- * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
- */
-
-#include <linux/gpio/machine.h>
-#include <linux/platform_device.h>
-#include <linux/regulator/machine.h>
-#include <linux/regulator/fixed.h>
-#include <linux/sfi.h>
-
-#include <asm/intel-mid.h>
-
-#define WLAN_SFI_GPIO_IRQ_NAME "WLAN-interrupt"
-#define WLAN_SFI_GPIO_ENABLE_NAME "WLAN-enable"
-
-#define WLAN_DEV_NAME "0000:00:01.3"
-
-static struct regulator_consumer_supply bcm43xx_vmmc_supply = {
- .dev_name = WLAN_DEV_NAME,
- .supply = "vmmc",
-};
-
-static struct regulator_init_data bcm43xx_vmmc_data = {
- .constraints = {
- .valid_ops_mask = REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = 1,
- .consumer_supplies = &bcm43xx_vmmc_supply,
-};
-
-static struct fixed_voltage_config bcm43xx_vmmc = {
- .supply_name = "bcm43xx-vmmc-regulator",
- /*
- * Announce 2.0V here to be compatible with SDIO specification. The
- * real voltage and signaling are still 1.8V.
- */
- .microvolts = 2000000, /* 1.8V */
- .startup_delay = 250 * 1000, /* 250ms */
- .enabled_at_boot = 0, /* disabled at boot */
- .init_data = &bcm43xx_vmmc_data,
-};
-
-static struct platform_device bcm43xx_vmmc_regulator = {
- .name = "reg-fixed-voltage",
- .id = PLATFORM_DEVID_AUTO,
- .dev = {
- .platform_data = &bcm43xx_vmmc,
- },
-};
-
-static struct gpiod_lookup_table bcm43xx_vmmc_gpio_table = {
- .dev_id = "reg-fixed-voltage.0",
- .table = {
- GPIO_LOOKUP("0000:00:0c.0", -1, NULL, GPIO_ACTIVE_LOW),
- {}
- },
-};
-
-static int __init bcm43xx_regulator_register(void)
-{
- struct gpiod_lookup_table *table = &bcm43xx_vmmc_gpio_table;
- struct gpiod_lookup *lookup = table->table;
- int ret;
-
- lookup[0].chip_hwnum = get_gpio_by_name(WLAN_SFI_GPIO_ENABLE_NAME);
- gpiod_add_lookup_table(table);
-
- ret = platform_device_register(&bcm43xx_vmmc_regulator);
- if (ret) {
- pr_err("%s: vmmc regulator register failed\n", __func__);
- return ret;
- }
-
- return 0;
-}
-
-static void __init *bcm43xx_platform_data(void *info)
-{
- int ret;
-
- ret = bcm43xx_regulator_register();
- if (ret)
- return NULL;
-
- pr_info("Using generic wifi platform data\n");
-
- /* For now it's empty */
- return NULL;
-}
-
-static const struct devs_id bcm43xx_clk_vmmc_dev_id __initconst = {
- .name = "bcm43xx_clk_vmmc",
- .type = SFI_DEV_TYPE_SD,
- .get_platform_data = &bcm43xx_platform_data,
-};
-
-sfi_device(bcm43xx_clk_vmmc_dev_id);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_bma023.c b/arch/x86/platform/intel-mid/device_libs/platform_bma023.c
deleted file mode 100644
index 32912a17f68e..000000000000
--- a/arch/x86/platform/intel-mid/device_libs/platform_bma023.c
+++ /dev/null
@@ -1,16 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * platform_bma023.c: bma023 platform data initialization file
- *
- * (C) Copyright 2013 Intel Corporation
- */
-
-#include <asm/intel-mid.h>
-
-static const struct devs_id bma023_dev_id __initconst = {
- .name = "bma023",
- .type = SFI_DEV_TYPE_I2C,
- .delay = 1,
-};
-
-sfi_device(bma023_dev_id);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_bt.c b/arch/x86/platform/intel-mid/device_libs/platform_bt.c
deleted file mode 100644
index 31dda18bb370..000000000000
--- a/arch/x86/platform/intel-mid/device_libs/platform_bt.c
+++ /dev/null
@@ -1,101 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Bluetooth platform data initialization file
- *
- * (C) Copyright 2017 Intel Corporation
- * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
- */
-
-#include <linux/gpio/machine.h>
-#include <linux/pci.h>
-#include <linux/platform_device.h>
-
-#include <asm/cpu_device_id.h>
-#include <asm/intel-family.h>
-#include <asm/intel-mid.h>
-
-struct bt_sfi_data {
- struct device *dev;
- const char *name;
- int (*setup)(struct bt_sfi_data *ddata);
-};
-
-static struct gpiod_lookup_table tng_bt_sfi_gpio_table = {
- .dev_id = "hci_bcm",
- .table = {
- GPIO_LOOKUP("0000:00:0c.0", -1, "device-wakeup", GPIO_ACTIVE_HIGH),
- GPIO_LOOKUP("0000:00:0c.0", -1, "shutdown", GPIO_ACTIVE_HIGH),
- GPIO_LOOKUP("0000:00:0c.0", -1, "host-wakeup", GPIO_ACTIVE_HIGH),
- { },
- },
-};
-
-#define TNG_BT_SFI_GPIO_DEVICE_WAKEUP "bt_wakeup"
-#define TNG_BT_SFI_GPIO_SHUTDOWN "BT-reset"
-#define TNG_BT_SFI_GPIO_HOST_WAKEUP "bt_uart_enable"
-
-static int __init tng_bt_sfi_setup(struct bt_sfi_data *ddata)
-{
- struct gpiod_lookup_table *table = &tng_bt_sfi_gpio_table;
- struct gpiod_lookup *lookup = table->table;
- struct pci_dev *pdev;
-
- /* Connected to /dev/ttyS0 */
- pdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(4, 1));
- if (!pdev)
- return -ENODEV;
-
- ddata->dev = &pdev->dev;
- ddata->name = table->dev_id;
-
- lookup[0].chip_hwnum = get_gpio_by_name(TNG_BT_SFI_GPIO_DEVICE_WAKEUP);
- lookup[1].chip_hwnum = get_gpio_by_name(TNG_BT_SFI_GPIO_SHUTDOWN);
- lookup[2].chip_hwnum = get_gpio_by_name(TNG_BT_SFI_GPIO_HOST_WAKEUP);
-
- gpiod_add_lookup_table(table);
- return 0;
-}
-
-static struct bt_sfi_data tng_bt_sfi_data __initdata = {
- .setup = tng_bt_sfi_setup,
-};
-
-static const struct x86_cpu_id bt_sfi_cpu_ids[] = {
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_MID, &tng_bt_sfi_data),
- {}
-};
-
-static int __init bt_sfi_init(void)
-{
- struct platform_device_info info;
- struct platform_device *pdev;
- const struct x86_cpu_id *id;
- struct bt_sfi_data *ddata;
- int ret;
-
- id = x86_match_cpu(bt_sfi_cpu_ids);
- if (!id)
- return -ENODEV;
-
- ddata = (struct bt_sfi_data *)id->driver_data;
- if (!ddata)
- return -ENODEV;
-
- ret = ddata->setup(ddata);
- if (ret)
- return ret;
-
- memset(&info, 0, sizeof(info));
- info.fwnode = ddata->dev->fwnode;
- info.parent = ddata->dev;
- info.name = ddata->name,
- info.id = PLATFORM_DEVID_NONE,
-
- pdev = platform_device_register_full(&info);
- if (IS_ERR(pdev))
- return PTR_ERR(pdev);
-
- dev_info(ddata->dev, "Registered Bluetooth device: %s\n", ddata->name);
- return 0;
-}
-device_initcall(bt_sfi_init);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_emc1403.c b/arch/x86/platform/intel-mid/device_libs/platform_emc1403.c
deleted file mode 100644
index a2508582a0b1..000000000000
--- a/arch/x86/platform/intel-mid/device_libs/platform_emc1403.c
+++ /dev/null
@@ -1,39 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * platform_emc1403.c: emc1403 platform data initialization file
- *
- * (C) Copyright 2013 Intel Corporation
- * Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com>
- */
-
-#include <linux/init.h>
-#include <linux/gpio.h>
-#include <linux/i2c.h>
-#include <asm/intel-mid.h>
-
-static void __init *emc1403_platform_data(void *info)
-{
- static short intr2nd_pdata;
- struct i2c_board_info *i2c_info = info;
- int intr = get_gpio_by_name("thermal_int");
- int intr2nd = get_gpio_by_name("thermal_alert");
-
- if (intr < 0)
- return NULL;
- if (intr2nd < 0)
- return NULL;
-
- i2c_info->irq = intr + INTEL_MID_IRQ_OFFSET;
- intr2nd_pdata = intr2nd + INTEL_MID_IRQ_OFFSET;
-
- return &intr2nd_pdata;
-}
-
-static const struct devs_id emc1403_dev_id __initconst = {
- .name = "emc1403",
- .type = SFI_DEV_TYPE_I2C,
- .delay = 1,
- .get_platform_data = &emc1403_platform_data,
-};
-
-sfi_device(emc1403_dev_id);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_gpio_keys.c b/arch/x86/platform/intel-mid/device_libs/platform_gpio_keys.c
deleted file mode 100644
index d9435d2196a4..000000000000
--- a/arch/x86/platform/intel-mid/device_libs/platform_gpio_keys.c
+++ /dev/null
@@ -1,81 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * platform_gpio_keys.c: gpio_keys platform data initialization file
- *
- * (C) Copyright 2013 Intel Corporation
- * Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com>
- */
-
-#include <linux/input.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/gpio.h>
-#include <linux/gpio_keys.h>
-#include <linux/platform_device.h>
-#include <asm/intel-mid.h>
-
-#define DEVICE_NAME "gpio-keys"
-
-/*
- * we will search these buttons in SFI GPIO table (by name)
- * and register them dynamically. Please add all possible
- * buttons here, we will shrink them if no GPIO found.
- */
-static struct gpio_keys_button gpio_button[] = {
- {KEY_POWER, -1, 1, "power_btn", EV_KEY, 0, 3000},
- {KEY_PROG1, -1, 1, "prog_btn1", EV_KEY, 0, 20},
- {KEY_PROG2, -1, 1, "prog_btn2", EV_KEY, 0, 20},
- {SW_LID, -1, 1, "lid_switch", EV_SW, 0, 20},
- {KEY_VOLUMEUP, -1, 1, "vol_up", EV_KEY, 0, 20},
- {KEY_VOLUMEDOWN, -1, 1, "vol_down", EV_KEY, 0, 20},
- {KEY_MUTE, -1, 1, "mute_enable", EV_KEY, 0, 20},
- {KEY_VOLUMEUP, -1, 1, "volume_up", EV_KEY, 0, 20},
- {KEY_VOLUMEDOWN, -1, 1, "volume_down", EV_KEY, 0, 20},
- {KEY_CAMERA, -1, 1, "camera_full", EV_KEY, 0, 20},
- {KEY_CAMERA_FOCUS, -1, 1, "camera_half", EV_KEY, 0, 20},
- {SW_KEYPAD_SLIDE, -1, 1, "MagSw1", EV_SW, 0, 20},
- {SW_KEYPAD_SLIDE, -1, 1, "MagSw2", EV_SW, 0, 20},
-};
-
-static struct gpio_keys_platform_data gpio_keys = {
- .buttons = gpio_button,
- .rep = 1,
- .nbuttons = -1, /* will fill it after search */
-};
-
-static struct platform_device pb_device = {
- .name = DEVICE_NAME,
- .id = -1,
- .dev = {
- .platform_data = &gpio_keys,
- },
-};
-
-/*
- * Shrink the non-existent buttons, register the gpio button
- * device if there is some
- */
-static int __init pb_keys_init(void)
-{
- struct gpio_keys_button *gb = gpio_button;
- int i, good = 0;
-
- for (i = 0; i < ARRAY_SIZE(gpio_button); i++) {
- gb[i].gpio = get_gpio_by_name(gb[i].desc);
- pr_debug("info[%2d]: name = %s, gpio = %d\n", i, gb[i].desc,
- gb[i].gpio);
- if (gb[i].gpio < 0)
- continue;
-
- if (i != good)
- gb[good] = gb[i];
- good++;
- }
-
- if (good) {
- gpio_keys.nbuttons = good;
- return platform_device_register(&pb_device);
- }
- return 0;
-}
-late_initcall(pb_keys_init);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_lis331.c b/arch/x86/platform/intel-mid/device_libs/platform_lis331.c
deleted file mode 100644
index a4485cd638c6..000000000000
--- a/arch/x86/platform/intel-mid/device_libs/platform_lis331.c
+++ /dev/null
@@ -1,37 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * platform_lis331.c: lis331 platform data initialization file
- *
- * (C) Copyright 2013 Intel Corporation
- * Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com>
- */
-
-#include <linux/i2c.h>
-#include <linux/gpio.h>
-#include <asm/intel-mid.h>
-
-static void __init *lis331dl_platform_data(void *info)
-{
- static short intr2nd_pdata;
- struct i2c_board_info *i2c_info = info;
- int intr = get_gpio_by_name("accel_int");
- int intr2nd = get_gpio_by_name("accel_2");
-
- if (intr < 0)
- return NULL;
- if (intr2nd < 0)
- return NULL;
-
- i2c_info->irq = intr + INTEL_MID_IRQ_OFFSET;
- intr2nd_pdata = intr2nd + INTEL_MID_IRQ_OFFSET;
-
- return &intr2nd_pdata;
-}
-
-static const struct devs_id lis331dl_dev_id __initconst = {
- .name = "i2c_accel",
- .type = SFI_DEV_TYPE_I2C,
- .get_platform_data = &lis331dl_platform_data,
-};
-
-sfi_device(lis331dl_dev_id);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_max7315.c b/arch/x86/platform/intel-mid/device_libs/platform_max7315.c
deleted file mode 100644
index e9287c3184da..000000000000
--- a/arch/x86/platform/intel-mid/device_libs/platform_max7315.c
+++ /dev/null
@@ -1,77 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * platform_max7315.c: max7315 platform data initialization file
- *
- * (C) Copyright 2013 Intel Corporation
- * Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com>
- */
-
-#include <linux/init.h>
-#include <linux/gpio.h>
-#include <linux/i2c.h>
-#include <linux/platform_data/pca953x.h>
-#include <asm/intel-mid.h>
-
-#define MAX7315_NUM 2
-
-static void __init *max7315_platform_data(void *info)
-{
- static struct pca953x_platform_data max7315_pdata[MAX7315_NUM];
- static int nr;
- struct pca953x_platform_data *max7315 = &max7315_pdata[nr];
- struct i2c_board_info *i2c_info = info;
- int gpio_base, intr;
- char base_pin_name[SFI_NAME_LEN + 1];
- char intr_pin_name[SFI_NAME_LEN + 1];
-
- if (nr == MAX7315_NUM) {
- pr_err("too many max7315s, we only support %d\n",
- MAX7315_NUM);
- return NULL;
- }
- /* we have several max7315 on the board, we only need load several
- * instances of the same pca953x driver to cover them
- */
- strcpy(i2c_info->type, "max7315");
- if (nr++) {
- snprintf(base_pin_name, sizeof(base_pin_name),
- "max7315_%d_base", nr);
- snprintf(intr_pin_name, sizeof(intr_pin_name),
- "max7315_%d_int", nr);
- } else {
- strcpy(base_pin_name, "max7315_base");
- strcpy(intr_pin_name, "max7315_int");
- }
-
- gpio_base = get_gpio_by_name(base_pin_name);
- intr = get_gpio_by_name(intr_pin_name);
-
- if (gpio_base < 0)
- return NULL;
- max7315->gpio_base = gpio_base;
- if (intr != -1) {
- i2c_info->irq = intr + INTEL_MID_IRQ_OFFSET;
- max7315->irq_base = gpio_base + INTEL_MID_IRQ_OFFSET;
- } else {
- i2c_info->irq = -1;
- max7315->irq_base = -1;
- }
- return max7315;
-}
-
-static const struct devs_id max7315_dev_id __initconst = {
- .name = "i2c_max7315",
- .type = SFI_DEV_TYPE_I2C,
- .delay = 1,
- .get_platform_data = &max7315_platform_data,
-};
-
-static const struct devs_id max7315_2_dev_id __initconst = {
- .name = "i2c_max7315_2",
- .type = SFI_DEV_TYPE_I2C,
- .delay = 1,
- .get_platform_data = &max7315_platform_data,
-};
-
-sfi_device(max7315_dev_id);
-sfi_device(max7315_2_dev_id);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mpu3050.c b/arch/x86/platform/intel-mid/device_libs/platform_mpu3050.c
deleted file mode 100644
index 28a182713934..000000000000
--- a/arch/x86/platform/intel-mid/device_libs/platform_mpu3050.c
+++ /dev/null
@@ -1,32 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * platform_mpu3050.c: mpu3050 platform data initialization file
- *
- * (C) Copyright 2013 Intel Corporation
- * Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com>
- */
-
-#include <linux/gpio.h>
-#include <linux/i2c.h>
-#include <asm/intel-mid.h>
-
-static void *mpu3050_platform_data(void *info)
-{
- struct i2c_board_info *i2c_info = info;
- int intr = get_gpio_by_name("mpu3050_int");
-
- if (intr < 0)
- return NULL;
-
- i2c_info->irq = intr + INTEL_MID_IRQ_OFFSET;
- return NULL;
-}
-
-static const struct devs_id mpu3050_dev_id __initconst = {
- .name = "mpu3050",
- .type = SFI_DEV_TYPE_I2C,
- .delay = 1,
- .get_platform_data = &mpu3050_platform_data,
-};
-
-sfi_device(mpu3050_dev_id);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_pinctrl.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_pinctrl.c
deleted file mode 100644
index 605e1f94ad89..000000000000
--- a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_pinctrl.c
+++ /dev/null
@@ -1,39 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel Merrifield FLIS platform device initialization file
- *
- * Copyright (C) 2016, Intel Corporation
- *
- * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
- */
-
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <linux/platform_device.h>
-
-#include <asm/intel-mid.h>
-
-#define FLIS_BASE_ADDR 0xff0c0000
-#define FLIS_LENGTH 0x8000
-
-static struct resource mrfld_pinctrl_mmio_resource = {
- .start = FLIS_BASE_ADDR,
- .end = FLIS_BASE_ADDR + FLIS_LENGTH - 1,
- .flags = IORESOURCE_MEM,
-};
-
-static struct platform_device mrfld_pinctrl_device = {
- .name = "pinctrl-merrifield",
- .id = PLATFORM_DEVID_NONE,
- .resource = &mrfld_pinctrl_mmio_resource,
- .num_resources = 1,
-};
-
-static int __init mrfld_pinctrl_init(void)
-{
- if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER)
- return platform_device_register(&mrfld_pinctrl_device);
-
- return -ENODEV;
-}
-arch_initcall(mrfld_pinctrl_init);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_power_btn.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_power_btn.c
deleted file mode 100644
index ec2afb41b34a..000000000000
--- a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_power_btn.c
+++ /dev/null
@@ -1,78 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel Merrifield power button support
- *
- * (C) Copyright 2017 Intel Corporation
- *
- * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
- */
-
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <linux/platform_device.h>
-#include <linux/sfi.h>
-
-#include <asm/intel-mid.h>
-#include <asm/intel_scu_ipc.h>
-
-static struct resource mrfld_power_btn_resources[] = {
- {
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device mrfld_power_btn_dev = {
- .name = "msic_power_btn",
- .id = PLATFORM_DEVID_NONE,
- .num_resources = ARRAY_SIZE(mrfld_power_btn_resources),
- .resource = mrfld_power_btn_resources,
-};
-
-static int mrfld_power_btn_scu_status_change(struct notifier_block *nb,
- unsigned long code, void *data)
-{
- if (code == SCU_DOWN) {
- platform_device_unregister(&mrfld_power_btn_dev);
- return 0;
- }
-
- return platform_device_register(&mrfld_power_btn_dev);
-}
-
-static struct notifier_block mrfld_power_btn_scu_notifier = {
- .notifier_call = mrfld_power_btn_scu_status_change,
-};
-
-static int __init register_mrfld_power_btn(void)
-{
- if (intel_mid_identify_cpu() != INTEL_MID_CPU_CHIP_TANGIER)
- return -ENODEV;
-
- /*
- * We need to be sure that the SCU IPC is ready before
- * PMIC power button device can be registered:
- */
- intel_scu_notifier_add(&mrfld_power_btn_scu_notifier);
-
- return 0;
-}
-arch_initcall(register_mrfld_power_btn);
-
-static void __init *mrfld_power_btn_platform_data(void *info)
-{
- struct resource *res = mrfld_power_btn_resources;
- struct sfi_device_table_entry *pentry = info;
-
- res->start = res->end = pentry->irq;
- return NULL;
-}
-
-static const struct devs_id mrfld_power_btn_dev_id __initconst = {
- .name = "bcove_power_btn",
- .type = SFI_DEV_TYPE_IPC,
- .delay = 1,
- .msic = 1,
- .get_platform_data = &mrfld_power_btn_platform_data,
-};
-
-sfi_device(mrfld_power_btn_dev_id);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_rtc.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_rtc.c
deleted file mode 100644
index 40e9808a9634..000000000000
--- a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_rtc.c
+++ /dev/null
@@ -1,44 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel Merrifield legacy RTC initialization file
- *
- * (C) Copyright 2017 Intel Corporation
- *
- * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
- */
-
-#include <linux/init.h>
-
-#include <asm/hw_irq.h>
-#include <asm/intel-mid.h>
-#include <asm/io_apic.h>
-#include <asm/time.h>
-#include <asm/x86_init.h>
-
-static int __init mrfld_legacy_rtc_alloc_irq(void)
-{
- struct irq_alloc_info info;
- int ret;
-
- if (!x86_platform.legacy.rtc)
- return -ENODEV;
-
- ioapic_set_alloc_attr(&info, NUMA_NO_NODE, 1, 0);
- ret = mp_map_gsi_to_irq(RTC_IRQ, IOAPIC_MAP_ALLOC, &info);
- if (ret < 0) {
- pr_info("Failed to allocate RTC interrupt. Disabling RTC\n");
- x86_platform.legacy.rtc = 0;
- return ret;
- }
-
- return 0;
-}
-
-static int __init mrfld_legacy_rtc_init(void)
-{
- if (intel_mid_identify_cpu() != INTEL_MID_CPU_CHIP_TANGIER)
- return -ENODEV;
-
- return mrfld_legacy_rtc_alloc_irq();
-}
-arch_initcall(mrfld_legacy_rtc_init);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_sd.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_sd.c
deleted file mode 100644
index fe3b7ff975f3..000000000000
--- a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_sd.c
+++ /dev/null
@@ -1,43 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * SDHCI platform data initilisation file
- *
- * (C) Copyright 2016 Intel Corporation
- * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
- */
-
-#include <linux/init.h>
-#include <linux/pci.h>
-
-#include <linux/mmc/sdhci-pci-data.h>
-
-#include <asm/intel-mid.h>
-
-#define INTEL_MRFLD_SD 2
-#define INTEL_MRFLD_SD_CD_GPIO 77
-
-static struct sdhci_pci_data mrfld_sdhci_pci_data = {
- .rst_n_gpio = -EINVAL,
- .cd_gpio = INTEL_MRFLD_SD_CD_GPIO,
-};
-
-static struct sdhci_pci_data *
-mrfld_sdhci_pci_get_data(struct pci_dev *pdev, int slotno)
-{
- unsigned int func = PCI_FUNC(pdev->devfn);
-
- if (func == INTEL_MRFLD_SD)
- return &mrfld_sdhci_pci_data;
-
- return NULL;
-}
-
-static int __init mrfld_sd_init(void)
-{
- if (intel_mid_identify_cpu() != INTEL_MID_CPU_CHIP_TANGIER)
- return -ENODEV;
-
- sdhci_pci_get_data = mrfld_sdhci_pci_get_data;
- return 0;
-}
-arch_initcall(mrfld_sd_init);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_spidev.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_spidev.c
deleted file mode 100644
index b828f4fd40be..000000000000
--- a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_spidev.c
+++ /dev/null
@@ -1,50 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * spidev platform data initialization file
- *
- * (C) Copyright 2014, 2016 Intel Corporation
- * Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
- * Dan O'Donovan <dan@emutex.com>
- */
-
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/sfi.h>
-#include <linux/spi/pxa2xx_spi.h>
-#include <linux/spi/spi.h>
-
-#include <asm/intel-mid.h>
-
-#define MRFLD_SPI_DEFAULT_DMA_BURST 8
-#define MRFLD_SPI_DEFAULT_TIMEOUT 500
-
-/* GPIO pin for spidev chipselect */
-#define MRFLD_SPIDEV_GPIO_CS 111
-
-static struct pxa2xx_spi_chip spidev_spi_chip = {
- .dma_burst_size = MRFLD_SPI_DEFAULT_DMA_BURST,
- .timeout = MRFLD_SPI_DEFAULT_TIMEOUT,
- .gpio_cs = MRFLD_SPIDEV_GPIO_CS,
-};
-
-static void __init *spidev_platform_data(void *info)
-{
- struct spi_board_info *spi_info = info;
-
- if (intel_mid_identify_cpu() != INTEL_MID_CPU_CHIP_TANGIER)
- return ERR_PTR(-ENODEV);
-
- spi_info->mode = SPI_MODE_0;
- spi_info->controller_data = &spidev_spi_chip;
-
- return NULL;
-}
-
-static const struct devs_id spidev_dev_id __initconst = {
- .name = "spidev",
- .type = SFI_DEV_TYPE_SPI,
- .delay = 0,
- .get_platform_data = &spidev_platform_data,
-};
-
-sfi_device(spidev_dev_id);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_msic.c b/arch/x86/platform/intel-mid/device_libs/platform_msic.c
deleted file mode 100644
index b17783d0d4e7..000000000000
--- a/arch/x86/platform/intel-mid/device_libs/platform_msic.c
+++ /dev/null
@@ -1,83 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * platform_msic.c: MSIC platform data initialization file
- *
- * (C) Copyright 2013 Intel Corporation
- * Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com>
- */
-
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <linux/scatterlist.h>
-#include <linux/init.h>
-#include <linux/sfi.h>
-#include <linux/mfd/intel_msic.h>
-#include <asm/intel_scu_ipc.h>
-#include <asm/intel-mid.h>
-#include "platform_msic.h"
-
-struct intel_msic_platform_data msic_pdata;
-
-static struct resource msic_resources[] = {
- {
- .start = INTEL_MSIC_IRQ_PHYS_BASE,
- .end = INTEL_MSIC_IRQ_PHYS_BASE + 64 - 1,
- .flags = IORESOURCE_MEM,
- },
-};
-
-static struct platform_device msic_device = {
- .name = "intel_msic",
- .id = -1,
- .dev = {
- .platform_data = &msic_pdata,
- },
- .num_resources = ARRAY_SIZE(msic_resources),
- .resource = msic_resources,
-};
-
-static int msic_scu_status_change(struct notifier_block *nb,
- unsigned long code, void *data)
-{
- if (code == SCU_DOWN) {
- platform_device_unregister(&msic_device);
- return 0;
- }
-
- return platform_device_register(&msic_device);
-}
-
-static int __init msic_init(void)
-{
- static struct notifier_block msic_scu_notifier = {
- .notifier_call = msic_scu_status_change,
- };
-
- /*
- * We need to be sure that the SCU IPC is ready before MSIC device
- * can be registered.
- */
- if (intel_mid_has_msic())
- intel_scu_notifier_add(&msic_scu_notifier);
-
- return 0;
-}
-arch_initcall(msic_init);
-
-/*
- * msic_generic_platform_data - sets generic platform data for the block
- * @info: pointer to the SFI device table entry for this block
- * @block: MSIC block
- *
- * Function sets IRQ number from the SFI table entry for given device to
- * the MSIC platform data.
- */
-void *msic_generic_platform_data(void *info, enum intel_msic_block block)
-{
- struct sfi_device_table_entry *entry = info;
-
- BUG_ON(block < 0 || block >= INTEL_MSIC_BLOCK_LAST);
- msic_pdata.irq[block] = entry->irq;
-
- return NULL;
-}
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_msic.h b/arch/x86/platform/intel-mid/device_libs/platform_msic.h
deleted file mode 100644
index 91deb2e65b0e..000000000000
--- a/arch/x86/platform/intel-mid/device_libs/platform_msic.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * platform_msic.h: MSIC platform data header file
- *
- * (C) Copyright 2013 Intel Corporation
- * Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com>
- */
-#ifndef _PLATFORM_MSIC_H_
-#define _PLATFORM_MSIC_H_
-
-extern struct intel_msic_platform_data msic_pdata;
-
-void *msic_generic_platform_data(void *info, enum intel_msic_block block);
-
-#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_msic_audio.c b/arch/x86/platform/intel-mid/device_libs/platform_msic_audio.c
deleted file mode 100644
index e765da78ad8c..000000000000
--- a/arch/x86/platform/intel-mid/device_libs/platform_msic_audio.c
+++ /dev/null
@@ -1,42 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * platform_msic_audio.c: MSIC audio platform data initialization file
- *
- * (C) Copyright 2013 Intel Corporation
- * Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com>
- */
-
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <linux/scatterlist.h>
-#include <linux/init.h>
-#include <linux/sfi.h>
-#include <linux/platform_device.h>
-#include <linux/mfd/intel_msic.h>
-#include <asm/intel-mid.h>
-
-#include "platform_msic.h"
-
-static void *msic_audio_platform_data(void *info)
-{
- struct platform_device *pdev;
-
- pdev = platform_device_register_simple("sst-platform", -1, NULL, 0);
-
- if (IS_ERR(pdev)) {
- pr_err("failed to create audio platform device\n");
- return NULL;
- }
-
- return msic_generic_platform_data(info, INTEL_MSIC_BLOCK_AUDIO);
-}
-
-static const struct devs_id msic_audio_dev_id __initconst = {
- .name = "msic_audio",
- .type = SFI_DEV_TYPE_IPC,
- .delay = 1,
- .msic = 1,
- .get_platform_data = &msic_audio_platform_data,
-};
-
-sfi_device(msic_audio_dev_id);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_msic_battery.c b/arch/x86/platform/intel-mid/device_libs/platform_msic_battery.c
deleted file mode 100644
index f461f84903f8..000000000000
--- a/arch/x86/platform/intel-mid/device_libs/platform_msic_battery.c
+++ /dev/null
@@ -1,32 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * platform_msic_battery.c: MSIC battery platform data initialization file
- *
- * (C) Copyright 2013 Intel Corporation
- * Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com>
- */
-
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <linux/scatterlist.h>
-#include <linux/init.h>
-#include <linux/sfi.h>
-#include <linux/mfd/intel_msic.h>
-#include <asm/intel-mid.h>
-
-#include "platform_msic.h"
-
-static void __init *msic_battery_platform_data(void *info)
-{
- return msic_generic_platform_data(info, INTEL_MSIC_BLOCK_BATTERY);
-}
-
-static const struct devs_id msic_battery_dev_id __initconst = {
- .name = "msic_battery",
- .type = SFI_DEV_TYPE_IPC,
- .delay = 1,
- .msic = 1,
- .get_platform_data = &msic_battery_platform_data,
-};
-
-sfi_device(msic_battery_dev_id);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_msic_gpio.c b/arch/x86/platform/intel-mid/device_libs/platform_msic_gpio.c
deleted file mode 100644
index 71a7d6db3878..000000000000
--- a/arch/x86/platform/intel-mid/device_libs/platform_msic_gpio.c
+++ /dev/null
@@ -1,43 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * platform_msic_gpio.c: MSIC GPIO platform data initialization file
- *
- * (C) Copyright 2013 Intel Corporation
- * Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com>
- */
-
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <linux/scatterlist.h>
-#include <linux/sfi.h>
-#include <linux/init.h>
-#include <linux/gpio.h>
-#include <linux/mfd/intel_msic.h>
-#include <asm/intel-mid.h>
-
-#include "platform_msic.h"
-
-static void __init *msic_gpio_platform_data(void *info)
-{
- static struct intel_msic_gpio_pdata msic_gpio_pdata;
-
- int gpio = get_gpio_by_name("msic_gpio_base");
-
- if (gpio < 0)
- return NULL;
-
- msic_gpio_pdata.gpio_base = gpio;
- msic_pdata.gpio = &msic_gpio_pdata;
-
- return msic_generic_platform_data(info, INTEL_MSIC_BLOCK_GPIO);
-}
-
-static const struct devs_id msic_gpio_dev_id __initconst = {
- .name = "msic_gpio",
- .type = SFI_DEV_TYPE_IPC,
- .delay = 1,
- .msic = 1,
- .get_platform_data = &msic_gpio_platform_data,
-};
-
-sfi_device(msic_gpio_dev_id);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_msic_ocd.c b/arch/x86/platform/intel-mid/device_libs/platform_msic_ocd.c
deleted file mode 100644
index 558c0d974430..000000000000
--- a/arch/x86/platform/intel-mid/device_libs/platform_msic_ocd.c
+++ /dev/null
@@ -1,44 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * platform_msic_ocd.c: MSIC OCD platform data initialization file
- *
- * (C) Copyright 2013 Intel Corporation
- * Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com>
- */
-
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <linux/scatterlist.h>
-#include <linux/sfi.h>
-#include <linux/init.h>
-#include <linux/gpio.h>
-#include <linux/mfd/intel_msic.h>
-#include <asm/intel-mid.h>
-
-#include "platform_msic.h"
-
-static void __init *msic_ocd_platform_data(void *info)
-{
- static struct intel_msic_ocd_pdata msic_ocd_pdata;
- int gpio;
-
- gpio = get_gpio_by_name("ocd_gpio");
-
- if (gpio < 0)
- return NULL;
-
- msic_ocd_pdata.gpio = gpio;
- msic_pdata.ocd = &msic_ocd_pdata;
-
- return msic_generic_platform_data(info, INTEL_MSIC_BLOCK_OCD);
-}
-
-static const struct devs_id msic_ocd_dev_id __initconst = {
- .name = "msic_ocd",
- .type = SFI_DEV_TYPE_IPC,
- .delay = 1,
- .msic = 1,
- .get_platform_data = &msic_ocd_platform_data,
-};
-
-sfi_device(msic_ocd_dev_id);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_msic_power_btn.c b/arch/x86/platform/intel-mid/device_libs/platform_msic_power_btn.c
deleted file mode 100644
index 3d3de2d59726..000000000000
--- a/arch/x86/platform/intel-mid/device_libs/platform_msic_power_btn.c
+++ /dev/null
@@ -1,31 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * platform_msic_power_btn.c: MSIC power btn platform data initialization file
- *
- * (C) Copyright 2013 Intel Corporation
- * Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com>
- */
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <linux/scatterlist.h>
-#include <linux/sfi.h>
-#include <linux/init.h>
-#include <linux/mfd/intel_msic.h>
-#include <asm/intel-mid.h>
-
-#include "platform_msic.h"
-
-static void __init *msic_power_btn_platform_data(void *info)
-{
- return msic_generic_platform_data(info, INTEL_MSIC_BLOCK_POWER_BTN);
-}
-
-static const struct devs_id msic_power_btn_dev_id __initconst = {
- .name = "msic_power_btn",
- .type = SFI_DEV_TYPE_IPC,
- .delay = 1,
- .msic = 1,
- .get_platform_data = &msic_power_btn_platform_data,
-};
-
-sfi_device(msic_power_btn_dev_id);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_msic_thermal.c b/arch/x86/platform/intel-mid/device_libs/platform_msic_thermal.c
deleted file mode 100644
index 4858da1d78c6..000000000000
--- a/arch/x86/platform/intel-mid/device_libs/platform_msic_thermal.c
+++ /dev/null
@@ -1,32 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * platform_msic_thermal.c: msic_thermal platform data initialization file
- *
- * (C) Copyright 2013 Intel Corporation
- * Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com>
- */
-
-#include <linux/input.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/gpio.h>
-#include <linux/platform_device.h>
-#include <linux/mfd/intel_msic.h>
-#include <asm/intel-mid.h>
-
-#include "platform_msic.h"
-
-static void __init *msic_thermal_platform_data(void *info)
-{
- return msic_generic_platform_data(info, INTEL_MSIC_BLOCK_THERMAL);
-}
-
-static const struct devs_id msic_thermal_dev_id __initconst = {
- .name = "msic_thermal",
- .type = SFI_DEV_TYPE_IPC,
- .delay = 1,
- .msic = 1,
- .get_platform_data = &msic_thermal_platform_data,
-};
-
-sfi_device(msic_thermal_dev_id);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_pcal9555a.c b/arch/x86/platform/intel-mid/device_libs/platform_pcal9555a.c
deleted file mode 100644
index 5609d8da3978..000000000000
--- a/arch/x86/platform/intel-mid/device_libs/platform_pcal9555a.c
+++ /dev/null
@@ -1,95 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * PCAL9555a platform data initialization file
- *
- * Copyright (C) 2016, Intel Corporation
- *
- * Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
- * Dan O'Donovan <dan@emutex.com>
- */
-
-#include <linux/gpio.h>
-#include <linux/init.h>
-#include <linux/i2c.h>
-#include <linux/platform_data/pca953x.h>
-#include <linux/sfi.h>
-
-#include <asm/intel-mid.h>
-
-#define PCAL9555A_NUM 4
-
-static struct pca953x_platform_data pcal9555a_pdata[PCAL9555A_NUM];
-static int nr;
-
-static void __init *pcal9555a_platform_data(void *info)
-{
- struct i2c_board_info *i2c_info = info;
- char *type = i2c_info->type;
- struct pca953x_platform_data *pcal9555a;
- char base_pin_name[SFI_NAME_LEN + 1];
- char intr_pin_name[SFI_NAME_LEN + 1];
- int gpio_base, intr;
-
- snprintf(base_pin_name, sizeof(base_pin_name), "%s_base", type);
- snprintf(intr_pin_name, sizeof(intr_pin_name), "%s_int", type);
-
- gpio_base = get_gpio_by_name(base_pin_name);
- intr = get_gpio_by_name(intr_pin_name);
-
- /* Check if the SFI record valid */
- if (gpio_base == -1)
- return NULL;
-
- if (nr >= PCAL9555A_NUM) {
- pr_err("%s: Too many instances, only %d supported\n", __func__,
- PCAL9555A_NUM);
- return NULL;
- }
-
- pcal9555a = &pcal9555a_pdata[nr++];
- pcal9555a->gpio_base = gpio_base;
-
- if (intr >= 0) {
- i2c_info->irq = intr + INTEL_MID_IRQ_OFFSET;
- pcal9555a->irq_base = gpio_base + INTEL_MID_IRQ_OFFSET;
- } else {
- i2c_info->irq = -1;
- pcal9555a->irq_base = -1;
- }
-
- strcpy(type, "pcal9555a");
- return pcal9555a;
-}
-
-static const struct devs_id pcal9555a_1_dev_id __initconst = {
- .name = "pcal9555a-1",
- .type = SFI_DEV_TYPE_I2C,
- .delay = 1,
- .get_platform_data = &pcal9555a_platform_data,
-};
-
-static const struct devs_id pcal9555a_2_dev_id __initconst = {
- .name = "pcal9555a-2",
- .type = SFI_DEV_TYPE_I2C,
- .delay = 1,
- .get_platform_data = &pcal9555a_platform_data,
-};
-
-static const struct devs_id pcal9555a_3_dev_id __initconst = {
- .name = "pcal9555a-3",
- .type = SFI_DEV_TYPE_I2C,
- .delay = 1,
- .get_platform_data = &pcal9555a_platform_data,
-};
-
-static const struct devs_id pcal9555a_4_dev_id __initconst = {
- .name = "pcal9555a-4",
- .type = SFI_DEV_TYPE_I2C,
- .delay = 1,
- .get_platform_data = &pcal9555a_platform_data,
-};
-
-sfi_device(pcal9555a_1_dev_id);
-sfi_device(pcal9555a_2_dev_id);
-sfi_device(pcal9555a_3_dev_id);
-sfi_device(pcal9555a_4_dev_id);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_tc35876x.c b/arch/x86/platform/intel-mid/device_libs/platform_tc35876x.c
deleted file mode 100644
index 139738bbdd36..000000000000
--- a/arch/x86/platform/intel-mid/device_libs/platform_tc35876x.c
+++ /dev/null
@@ -1,42 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * platform_tc35876x.c: tc35876x platform data initialization file
- *
- * (C) Copyright 2013 Intel Corporation
- * Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com>
- */
-
-#include <linux/gpio/machine.h>
-#include <asm/intel-mid.h>
-
-static struct gpiod_lookup_table tc35876x_gpio_table = {
- .dev_id = "i2c_disp_brig",
- .table = {
- GPIO_LOOKUP("0000:00:0c.0", -1, "bridge-reset", GPIO_ACTIVE_HIGH),
- GPIO_LOOKUP("0000:00:0c.0", -1, "bl-en", GPIO_ACTIVE_HIGH),
- GPIO_LOOKUP("0000:00:0c.0", -1, "vadd", GPIO_ACTIVE_HIGH),
- { },
- },
-};
-
-/*tc35876x DSI_LVDS bridge chip and panel platform data*/
-static void *tc35876x_platform_data(void *data)
-{
- struct gpiod_lookup_table *table = &tc35876x_gpio_table;
- struct gpiod_lookup *lookup = table->table;
-
- lookup[0].chip_hwnum = get_gpio_by_name("LCMB_RXEN");
- lookup[1].chip_hwnum = get_gpio_by_name("6S6P_BL_EN");
- lookup[2].chip_hwnum = get_gpio_by_name("EN_VREG_LCD_V3P3");
- gpiod_add_lookup_table(table);
-
- return NULL;
-}
-
-static const struct devs_id tc35876x_dev_id __initconst = {
- .name = "i2c_disp_brig",
- .type = SFI_DEV_TYPE_I2C,
- .get_platform_data = &tc35876x_platform_data,
-};
-
-sfi_device(tc35876x_dev_id);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_tca6416.c b/arch/x86/platform/intel-mid/device_libs/platform_tca6416.c
deleted file mode 100644
index e689d8f61059..000000000000
--- a/arch/x86/platform/intel-mid/device_libs/platform_tca6416.c
+++ /dev/null
@@ -1,53 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * platform_tca6416.c: tca6416 platform data initialization file
- *
- * (C) Copyright 2013 Intel Corporation
- * Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com>
- */
-
-#include <linux/platform_data/pca953x.h>
-#include <linux/i2c.h>
-#include <linux/gpio.h>
-#include <asm/intel-mid.h>
-
-#define TCA6416_NAME "tca6416"
-#define TCA6416_BASE "tca6416_base"
-#define TCA6416_INTR "tca6416_int"
-
-static void *tca6416_platform_data(void *info)
-{
- static struct pca953x_platform_data tca6416;
- struct i2c_board_info *i2c_info = info;
- int gpio_base, intr;
- char base_pin_name[SFI_NAME_LEN + 1];
- char intr_pin_name[SFI_NAME_LEN + 1];
-
- strcpy(i2c_info->type, TCA6416_NAME);
- strcpy(base_pin_name, TCA6416_BASE);
- strcpy(intr_pin_name, TCA6416_INTR);
-
- gpio_base = get_gpio_by_name(base_pin_name);
- intr = get_gpio_by_name(intr_pin_name);
-
- if (gpio_base < 0)
- return NULL;
- tca6416.gpio_base = gpio_base;
- if (intr >= 0) {
- i2c_info->irq = intr + INTEL_MID_IRQ_OFFSET;
- tca6416.irq_base = gpio_base + INTEL_MID_IRQ_OFFSET;
- } else {
- i2c_info->irq = -1;
- tca6416.irq_base = -1;
- }
- return &tca6416;
-}
-
-static const struct devs_id tca6416_dev_id __initconst = {
- .name = "tca6416",
- .type = SFI_DEV_TYPE_I2C,
- .delay = 1,
- .get_platform_data = &tca6416_platform_data,
-};
-
-sfi_device(tca6416_dev_id);
diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c
index 780728161f7d..f4592dc7a1c1 100644
--- a/arch/x86/platform/intel-mid/intel-mid.c
+++ b/arch/x86/platform/intel-mid/intel-mid.c
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * intel-mid.c: Intel MID platform setup code
+ * Intel MID platform setup code
*
- * (C) Copyright 2008, 2012 Intel Corporation
+ * (C) Copyright 2008, 2012, 2021 Intel Corporation
* Author: Jacob Pan (jacob.jun.pan@intel.com)
* Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com>
*/
@@ -14,7 +14,6 @@
#include <linux/interrupt.h>
#include <linux/regulator/machine.h>
#include <linux/scatterlist.h>
-#include <linux/sfi.h>
#include <linux/irq.h>
#include <linux/export.h>
#include <linux/notifier.h>
@@ -25,38 +24,13 @@
#include <asm/apic.h>
#include <asm/io_apic.h>
#include <asm/intel-mid.h>
-#include <asm/intel_mid_vrtc.h>
#include <asm/io.h>
#include <asm/i8259.h>
#include <asm/intel_scu_ipc.h>
-#include <asm/apb_timer.h>
#include <asm/reboot.h>
-/*
- * the clockevent devices on Moorestown/Medfield can be APBT or LAPIC clock,
- * cmdline option x86_intel_mid_timer can be used to override the configuration
- * to prefer one or the other.
- * at runtime, there are basically three timer configurations:
- * 1. per cpu apbt clock only
- * 2. per cpu always-on lapic clocks only, this is Penwell/Medfield only
- * 3. per cpu lapic clock (C3STOP) and one apbt clock, with broadcast.
- *
- * by default (without cmdline option), platform code first detects cpu type
- * to see if we are on lincroft or penwell, then set up both lapic or apbt
- * clocks accordingly.
- * i.e. by default, medfield uses configuration #2, moorestown uses #1.
- * config #3 is supported but not recommended on medfield.
- *
- * rating and feature summary:
- * lapic (with C3STOP) --------- 100
- * apbt (always-on) ------------ 110
- * lapic (always-on,ARAT) ------ 150
- */
-
-enum intel_mid_timer_options intel_mid_timer_options;
-
-enum intel_mid_cpu_type __intel_mid_cpu_chip;
-EXPORT_SYMBOL_GPL(__intel_mid_cpu_chip);
+#define IPCMSG_COLD_OFF 0x80 /* Only for Tangier */
+#define IPCMSG_COLD_RESET 0xF1
static void intel_mid_power_off(void)
{
@@ -64,69 +38,32 @@ static void intel_mid_power_off(void)
intel_mid_pwr_power_off();
/* Only for Tangier, the rest will ignore this command */
- intel_scu_ipc_simple_command(IPCMSG_COLD_OFF, 1);
+ intel_scu_ipc_dev_simple_command(NULL, IPCMSG_COLD_OFF, 1);
};
static void intel_mid_reboot(void)
{
- intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 0);
-}
-
-static void __init intel_mid_setup_bp_timer(void)
-{
- apbt_time_init();
- setup_boot_APIC_clock();
+ intel_scu_ipc_dev_simple_command(NULL, IPCMSG_COLD_RESET, 0);
}
static void __init intel_mid_time_init(void)
{
- sfi_table_parse(SFI_SIG_MTMR, NULL, NULL, sfi_parse_mtmr);
-
- switch (intel_mid_timer_options) {
- case INTEL_MID_TIMER_APBT_ONLY:
- break;
- case INTEL_MID_TIMER_LAPIC_APBT:
- /* Use apbt and local apic */
- x86_init.timers.setup_percpu_clockev = intel_mid_setup_bp_timer;
- x86_cpuinit.setup_percpu_clockev = setup_secondary_APIC_clock;
- return;
- default:
- if (!boot_cpu_has(X86_FEATURE_ARAT))
- break;
- /* Lapic only, no apbt */
- x86_init.timers.setup_percpu_clockev = setup_boot_APIC_clock;
- x86_cpuinit.setup_percpu_clockev = setup_secondary_APIC_clock;
- return;
- }
-
- x86_init.timers.setup_percpu_clockev = apbt_time_init;
+ /* Lapic only, no apbt */
+ x86_init.timers.setup_percpu_clockev = setup_boot_APIC_clock;
+ x86_cpuinit.setup_percpu_clockev = setup_secondary_APIC_clock;
}
static void intel_mid_arch_setup(void)
{
- if (boot_cpu_data.x86 != 6) {
- pr_err("Unknown Intel MID CPU (%d:%d), default to Penwell\n",
- boot_cpu_data.x86, boot_cpu_data.x86_model);
- __intel_mid_cpu_chip = INTEL_MID_CPU_CHIP_PENWELL;
- goto out;
- }
-
switch (boot_cpu_data.x86_model) {
- case 0x35:
- __intel_mid_cpu_chip = INTEL_MID_CPU_CHIP_CLOVERVIEW;
- break;
case 0x3C:
case 0x4A:
- __intel_mid_cpu_chip = INTEL_MID_CPU_CHIP_TANGIER;
x86_platform.legacy.rtc = 1;
break;
- case 0x27:
default:
- __intel_mid_cpu_chip = INTEL_MID_CPU_CHIP_PENWELL;
break;
}
-out:
/*
* Intel MID platforms are using explicitly defined regulators.
*
@@ -159,14 +96,11 @@ void __init x86_intel_mid_early_setup(void)
x86_init.timers.timer_init = intel_mid_time_init;
x86_init.timers.setup_percpu_clockev = x86_init_noop;
- x86_init.timers.wallclock_init = intel_mid_rtc_init;
x86_init.irqs.pre_vector_init = x86_init_noop;
x86_init.oem.arch_setup = intel_mid_arch_setup;
- x86_cpuinit.setup_percpu_clockev = apbt_setup_secondary_clock;
-
x86_platform.get_nmi_reason = intel_mid_get_nmi_reason;
x86_init.pci.arch_init = intel_mid_pci_init;
@@ -188,25 +122,3 @@ void __init x86_intel_mid_early_setup(void)
x86_init.mpparse.get_smp_config = x86_init_uint_noop;
set_bit(MP_BUS_ISA, mp_bus_not_pci);
}
-
-/*
- * if user does not want to use per CPU apb timer, just give it a lower rating
- * than local apic timer and skip the late per cpu timer init.
- */
-static inline int __init setup_x86_intel_mid_timer(char *arg)
-{
- if (!arg)
- return -EINVAL;
-
- if (strcmp("apbt_only", arg) == 0)
- intel_mid_timer_options = INTEL_MID_TIMER_APBT_ONLY;
- else if (strcmp("lapic_and_apbt", arg) == 0)
- intel_mid_timer_options = INTEL_MID_TIMER_LAPIC_APBT;
- else {
- pr_warn("X86 INTEL_MID timer option %s not recognised use x86_intel_mid_timer=apbt_only or lapic_and_apbt\n",
- arg);
- return -EINVAL;
- }
- return 0;
-}
-__setup("x86_intel_mid_timer=", setup_x86_intel_mid_timer);
diff --git a/arch/x86/platform/intel-mid/intel_mid_vrtc.c b/arch/x86/platform/intel-mid/intel_mid_vrtc.c
deleted file mode 100644
index 2226da4f437a..000000000000
--- a/arch/x86/platform/intel-mid/intel_mid_vrtc.c
+++ /dev/null
@@ -1,173 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * intel_mid_vrtc.c: Driver for virtual RTC device on Intel MID platform
- *
- * (C) Copyright 2009 Intel Corporation
- *
- * Note:
- * VRTC is emulated by system controller firmware, the real HW
- * RTC is located in the PMIC device. SCU FW shadows PMIC RTC
- * in a memory mapped IO space that is visible to the host IA
- * processor.
- *
- * This driver is based on RTC CMOS driver.
- */
-
-#include <linux/kernel.h>
-#include <linux/export.h>
-#include <linux/init.h>
-#include <linux/sfi.h>
-#include <linux/platform_device.h>
-#include <linux/mc146818rtc.h>
-
-#include <asm/intel-mid.h>
-#include <asm/intel_mid_vrtc.h>
-#include <asm/time.h>
-#include <asm/fixmap.h>
-
-static unsigned char __iomem *vrtc_virt_base;
-
-unsigned char vrtc_cmos_read(unsigned char reg)
-{
- unsigned char retval;
-
- /* vRTC's registers range from 0x0 to 0xD */
- if (reg > 0xd || !vrtc_virt_base)
- return 0xff;
-
- lock_cmos_prefix(reg);
- retval = __raw_readb(vrtc_virt_base + (reg << 2));
- lock_cmos_suffix(reg);
- return retval;
-}
-EXPORT_SYMBOL_GPL(vrtc_cmos_read);
-
-void vrtc_cmos_write(unsigned char val, unsigned char reg)
-{
- if (reg > 0xd || !vrtc_virt_base)
- return;
-
- lock_cmos_prefix(reg);
- __raw_writeb(val, vrtc_virt_base + (reg << 2));
- lock_cmos_suffix(reg);
-}
-EXPORT_SYMBOL_GPL(vrtc_cmos_write);
-
-void vrtc_get_time(struct timespec64 *now)
-{
- u8 sec, min, hour, mday, mon;
- unsigned long flags;
- u32 year;
-
- spin_lock_irqsave(&rtc_lock, flags);
-
- while ((vrtc_cmos_read(RTC_FREQ_SELECT) & RTC_UIP))
- cpu_relax();
-
- sec = vrtc_cmos_read(RTC_SECONDS);
- min = vrtc_cmos_read(RTC_MINUTES);
- hour = vrtc_cmos_read(RTC_HOURS);
- mday = vrtc_cmos_read(RTC_DAY_OF_MONTH);
- mon = vrtc_cmos_read(RTC_MONTH);
- year = vrtc_cmos_read(RTC_YEAR);
-
- spin_unlock_irqrestore(&rtc_lock, flags);
-
- /* vRTC YEAR reg contains the offset to 1972 */
- year += 1972;
-
- pr_info("vRTC: sec: %d min: %d hour: %d day: %d "
- "mon: %d year: %d\n", sec, min, hour, mday, mon, year);
-
- now->tv_sec = mktime64(year, mon, mday, hour, min, sec);
- now->tv_nsec = 0;
-}
-
-int vrtc_set_mmss(const struct timespec64 *now)
-{
- unsigned long flags;
- struct rtc_time tm;
- int year;
- int retval = 0;
-
- rtc_time64_to_tm(now->tv_sec, &tm);
- if (!rtc_valid_tm(&tm) && tm.tm_year >= 72) {
- /*
- * tm.year is the number of years since 1900, and the
- * vrtc need the years since 1972.
- */
- year = tm.tm_year - 72;
- spin_lock_irqsave(&rtc_lock, flags);
- vrtc_cmos_write(year, RTC_YEAR);
- vrtc_cmos_write(tm.tm_mon, RTC_MONTH);
- vrtc_cmos_write(tm.tm_mday, RTC_DAY_OF_MONTH);
- vrtc_cmos_write(tm.tm_hour, RTC_HOURS);
- vrtc_cmos_write(tm.tm_min, RTC_MINUTES);
- vrtc_cmos_write(tm.tm_sec, RTC_SECONDS);
- spin_unlock_irqrestore(&rtc_lock, flags);
- } else {
- pr_err("%s: Invalid vRTC value: write of %llx to vRTC failed\n",
- __func__, (s64)now->tv_sec);
- retval = -EINVAL;
- }
- return retval;
-}
-
-void __init intel_mid_rtc_init(void)
-{
- unsigned long vrtc_paddr;
-
- sfi_table_parse(SFI_SIG_MRTC, NULL, NULL, sfi_parse_mrtc);
-
- vrtc_paddr = sfi_mrtc_array[0].phys_addr;
- if (!sfi_mrtc_num || !vrtc_paddr)
- return;
-
- vrtc_virt_base = (void __iomem *)set_fixmap_offset_nocache(FIX_LNW_VRTC,
- vrtc_paddr);
- x86_platform.get_wallclock = vrtc_get_time;
- x86_platform.set_wallclock = vrtc_set_mmss;
-}
-
-/*
- * The Moorestown platform has a memory mapped virtual RTC device that emulates
- * the programming interface of the RTC.
- */
-
-static struct resource vrtc_resources[] = {
- [0] = {
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .flags = IORESOURCE_IRQ,
- }
-};
-
-static struct platform_device vrtc_device = {
- .name = "rtc_mrst",
- .id = -1,
- .resource = vrtc_resources,
- .num_resources = ARRAY_SIZE(vrtc_resources),
-};
-
-/* Register the RTC device if appropriate */
-static int __init intel_mid_device_create(void)
-{
- /* No Moorestown, no device */
- if (!intel_mid_identify_cpu())
- return -ENODEV;
- /* No timer, no device */
- if (!sfi_mrtc_num)
- return -ENODEV;
-
- /* iomem resource */
- vrtc_resources[0].start = sfi_mrtc_array[0].phys_addr;
- vrtc_resources[0].end = sfi_mrtc_array[0].phys_addr +
- MRST_VRTC_MAP_SZ;
- /* irq resource */
- vrtc_resources[1].start = sfi_mrtc_array[0].irq;
- vrtc_resources[1].end = sfi_mrtc_array[0].irq;
-
- return platform_device_register(&vrtc_device);
-}
-device_initcall(intel_mid_device_create);
diff --git a/arch/x86/platform/intel-mid/sfi.c b/arch/x86/platform/intel-mid/sfi.c
deleted file mode 100644
index 30bd5714a3d4..000000000000
--- a/arch/x86/platform/intel-mid/sfi.c
+++ /dev/null
@@ -1,543 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * intel_mid_sfi.c: Intel MID SFI initialization code
- *
- * (C) Copyright 2013 Intel Corporation
- * Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com>
- */
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <linux/scatterlist.h>
-#include <linux/sfi.h>
-#include <linux/spi/spi.h>
-#include <linux/i2c.h>
-#include <linux/skbuff.h>
-#include <linux/gpio.h>
-#include <linux/gpio_keys.h>
-#include <linux/input.h>
-#include <linux/platform_device.h>
-#include <linux/irq.h>
-#include <linux/export.h>
-#include <linux/notifier.h>
-#include <linux/mmc/core.h>
-#include <linux/mmc/card.h>
-#include <linux/blkdev.h>
-
-#include <asm/setup.h>
-#include <asm/mpspec_def.h>
-#include <asm/hw_irq.h>
-#include <asm/apic.h>
-#include <asm/io_apic.h>
-#include <asm/intel-mid.h>
-#include <asm/intel_mid_vrtc.h>
-#include <asm/io.h>
-#include <asm/i8259.h>
-#include <asm/intel_scu_ipc.h>
-#include <asm/apb_timer.h>
-#include <asm/reboot.h>
-
-#define SFI_SIG_OEM0 "OEM0"
-#define MAX_IPCDEVS 24
-#define MAX_SCU_SPI 24
-#define MAX_SCU_I2C 24
-
-static struct platform_device *ipc_devs[MAX_IPCDEVS];
-static struct spi_board_info *spi_devs[MAX_SCU_SPI];
-static struct i2c_board_info *i2c_devs[MAX_SCU_I2C];
-static struct sfi_gpio_table_entry *gpio_table;
-static struct sfi_timer_table_entry sfi_mtimer_array[SFI_MTMR_MAX_NUM];
-static int ipc_next_dev;
-static int spi_next_dev;
-static int i2c_next_dev;
-static int i2c_bus[MAX_SCU_I2C];
-static int gpio_num_entry;
-static u32 sfi_mtimer_usage[SFI_MTMR_MAX_NUM];
-int sfi_mrtc_num;
-int sfi_mtimer_num;
-
-struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
-EXPORT_SYMBOL_GPL(sfi_mrtc_array);
-
-struct blocking_notifier_head intel_scu_notifier =
- BLOCKING_NOTIFIER_INIT(intel_scu_notifier);
-EXPORT_SYMBOL_GPL(intel_scu_notifier);
-
-#define intel_mid_sfi_get_pdata(dev, priv) \
- ((dev)->get_platform_data ? (dev)->get_platform_data(priv) : NULL)
-
-/* parse all the mtimer info to a static mtimer array */
-int __init sfi_parse_mtmr(struct sfi_table_header *table)
-{
- struct sfi_table_simple *sb;
- struct sfi_timer_table_entry *pentry;
- struct mpc_intsrc mp_irq;
- int totallen;
-
- sb = (struct sfi_table_simple *)table;
- if (!sfi_mtimer_num) {
- sfi_mtimer_num = SFI_GET_NUM_ENTRIES(sb,
- struct sfi_timer_table_entry);
- pentry = (struct sfi_timer_table_entry *) sb->pentry;
- totallen = sfi_mtimer_num * sizeof(*pentry);
- memcpy(sfi_mtimer_array, pentry, totallen);
- }
-
- pr_debug("SFI MTIMER info (num = %d):\n", sfi_mtimer_num);
- pentry = sfi_mtimer_array;
- for (totallen = 0; totallen < sfi_mtimer_num; totallen++, pentry++) {
- pr_debug("timer[%d]: paddr = 0x%08x, freq = %dHz, irq = %d\n",
- totallen, (u32)pentry->phys_addr,
- pentry->freq_hz, pentry->irq);
- mp_irq.type = MP_INTSRC;
- mp_irq.irqtype = mp_INT;
- mp_irq.irqflag = MP_IRQTRIG_EDGE | MP_IRQPOL_ACTIVE_HIGH;
- mp_irq.srcbus = MP_BUS_ISA;
- mp_irq.srcbusirq = pentry->irq; /* IRQ */
- mp_irq.dstapic = MP_APIC_ALL;
- mp_irq.dstirq = pentry->irq;
- mp_save_irq(&mp_irq);
- mp_map_gsi_to_irq(pentry->irq, IOAPIC_MAP_ALLOC, NULL);
- }
-
- return 0;
-}
-
-struct sfi_timer_table_entry *sfi_get_mtmr(int hint)
-{
- int i;
- if (hint < sfi_mtimer_num) {
- if (!sfi_mtimer_usage[hint]) {
- pr_debug("hint taken for timer %d irq %d\n",
- hint, sfi_mtimer_array[hint].irq);
- sfi_mtimer_usage[hint] = 1;
- return &sfi_mtimer_array[hint];
- }
- }
- /* take the first timer available */
- for (i = 0; i < sfi_mtimer_num;) {
- if (!sfi_mtimer_usage[i]) {
- sfi_mtimer_usage[i] = 1;
- return &sfi_mtimer_array[i];
- }
- i++;
- }
- return NULL;
-}
-
-void sfi_free_mtmr(struct sfi_timer_table_entry *mtmr)
-{
- int i;
- for (i = 0; i < sfi_mtimer_num;) {
- if (mtmr->irq == sfi_mtimer_array[i].irq) {
- sfi_mtimer_usage[i] = 0;
- return;
- }
- i++;
- }
-}
-
-/* parse all the mrtc info to a global mrtc array */
-int __init sfi_parse_mrtc(struct sfi_table_header *table)
-{
- struct sfi_table_simple *sb;
- struct sfi_rtc_table_entry *pentry;
- struct mpc_intsrc mp_irq;
-
- int totallen;
-
- sb = (struct sfi_table_simple *)table;
- if (!sfi_mrtc_num) {
- sfi_mrtc_num = SFI_GET_NUM_ENTRIES(sb,
- struct sfi_rtc_table_entry);
- pentry = (struct sfi_rtc_table_entry *)sb->pentry;
- totallen = sfi_mrtc_num * sizeof(*pentry);
- memcpy(sfi_mrtc_array, pentry, totallen);
- }
-
- pr_debug("SFI RTC info (num = %d):\n", sfi_mrtc_num);
- pentry = sfi_mrtc_array;
- for (totallen = 0; totallen < sfi_mrtc_num; totallen++, pentry++) {
- pr_debug("RTC[%d]: paddr = 0x%08x, irq = %d\n",
- totallen, (u32)pentry->phys_addr, pentry->irq);
- mp_irq.type = MP_INTSRC;
- mp_irq.irqtype = mp_INT;
- mp_irq.irqflag = MP_IRQTRIG_LEVEL | MP_IRQPOL_ACTIVE_LOW;
- mp_irq.srcbus = MP_BUS_ISA;
- mp_irq.srcbusirq = pentry->irq; /* IRQ */
- mp_irq.dstapic = MP_APIC_ALL;
- mp_irq.dstirq = pentry->irq;
- mp_save_irq(&mp_irq);
- mp_map_gsi_to_irq(pentry->irq, IOAPIC_MAP_ALLOC, NULL);
- }
- return 0;
-}
-
-
-/*
- * Parsing GPIO table first, since the DEVS table will need this table
- * to map the pin name to the actual pin.
- */
-static int __init sfi_parse_gpio(struct sfi_table_header *table)
-{
- struct sfi_table_simple *sb;
- struct sfi_gpio_table_entry *pentry;
- int num, i;
-
- if (gpio_table)
- return 0;
- sb = (struct sfi_table_simple *)table;
- num = SFI_GET_NUM_ENTRIES(sb, struct sfi_gpio_table_entry);
- pentry = (struct sfi_gpio_table_entry *)sb->pentry;
-
- gpio_table = kmemdup(pentry, num * sizeof(*pentry), GFP_KERNEL);
- if (!gpio_table)
- return -1;
- gpio_num_entry = num;
-
- pr_debug("GPIO pin info:\n");
- for (i = 0; i < num; i++, pentry++)
- pr_debug("info[%2d]: controller = %16.16s, pin_name = %16.16s,"
- " pin = %d\n", i,
- pentry->controller_name,
- pentry->pin_name,
- pentry->pin_no);
- return 0;
-}
-
-int get_gpio_by_name(const char *name)
-{
- struct sfi_gpio_table_entry *pentry = gpio_table;
- int i;
-
- if (!pentry)
- return -1;
- for (i = 0; i < gpio_num_entry; i++, pentry++) {
- if (!strncmp(name, pentry->pin_name, SFI_NAME_LEN))
- return pentry->pin_no;
- }
- return -EINVAL;
-}
-
-static void __init intel_scu_ipc_device_register(struct platform_device *pdev)
-{
- if (ipc_next_dev == MAX_IPCDEVS)
- pr_err("too many SCU IPC devices");
- else
- ipc_devs[ipc_next_dev++] = pdev;
-}
-
-static void __init intel_scu_spi_device_register(struct spi_board_info *sdev)
-{
- struct spi_board_info *new_dev;
-
- if (spi_next_dev == MAX_SCU_SPI) {
- pr_err("too many SCU SPI devices");
- return;
- }
-
- new_dev = kzalloc(sizeof(*sdev), GFP_KERNEL);
- if (!new_dev) {
- pr_err("failed to alloc mem for delayed spi dev %s\n",
- sdev->modalias);
- return;
- }
- *new_dev = *sdev;
-
- spi_devs[spi_next_dev++] = new_dev;
-}
-
-static void __init intel_scu_i2c_device_register(int bus,
- struct i2c_board_info *idev)
-{
- struct i2c_board_info *new_dev;
-
- if (i2c_next_dev == MAX_SCU_I2C) {
- pr_err("too many SCU I2C devices");
- return;
- }
-
- new_dev = kzalloc(sizeof(*idev), GFP_KERNEL);
- if (!new_dev) {
- pr_err("failed to alloc mem for delayed i2c dev %s\n",
- idev->type);
- return;
- }
- *new_dev = *idev;
-
- i2c_bus[i2c_next_dev] = bus;
- i2c_devs[i2c_next_dev++] = new_dev;
-}
-
-/* Called by IPC driver */
-void intel_scu_devices_create(void)
-{
- int i;
-
- for (i = 0; i < ipc_next_dev; i++)
- platform_device_add(ipc_devs[i]);
-
- for (i = 0; i < spi_next_dev; i++)
- spi_register_board_info(spi_devs[i], 1);
-
- for (i = 0; i < i2c_next_dev; i++) {
- struct i2c_adapter *adapter;
- struct i2c_client *client;
-
- adapter = i2c_get_adapter(i2c_bus[i]);
- if (adapter) {
- client = i2c_new_client_device(adapter, i2c_devs[i]);
- if (IS_ERR(client))
- pr_err("can't create i2c device %s\n",
- i2c_devs[i]->type);
- } else
- i2c_register_board_info(i2c_bus[i], i2c_devs[i], 1);
- }
- intel_scu_notifier_post(SCU_AVAILABLE, NULL);
-}
-EXPORT_SYMBOL_GPL(intel_scu_devices_create);
-
-/* Called by IPC driver */
-void intel_scu_devices_destroy(void)
-{
- int i;
-
- intel_scu_notifier_post(SCU_DOWN, NULL);
-
- for (i = 0; i < ipc_next_dev; i++)
- platform_device_del(ipc_devs[i]);
-}
-EXPORT_SYMBOL_GPL(intel_scu_devices_destroy);
-
-static void __init install_irq_resource(struct platform_device *pdev, int irq)
-{
- /* Single threaded */
- static struct resource res __initdata = {
- .name = "IRQ",
- .flags = IORESOURCE_IRQ,
- };
- res.start = irq;
- platform_device_add_resources(pdev, &res, 1);
-}
-
-static void __init sfi_handle_ipc_dev(struct sfi_device_table_entry *pentry,
- struct devs_id *dev)
-{
- struct platform_device *pdev;
- void *pdata = NULL;
-
- pr_debug("IPC bus, name = %16.16s, irq = 0x%2x\n",
- pentry->name, pentry->irq);
-
- /*
- * We need to call platform init of IPC devices to fill misc_pdata
- * structure. It will be used in msic_init for initialization.
- */
- pdata = intel_mid_sfi_get_pdata(dev, pentry);
- if (IS_ERR(pdata))
- return;
-
- /*
- * On Medfield the platform device creation is handled by the MSIC
- * MFD driver so we don't need to do it here.
- */
- if (dev->msic && intel_mid_has_msic())
- return;
-
- pdev = platform_device_alloc(pentry->name, 0);
- if (pdev == NULL) {
- pr_err("out of memory for SFI platform device '%s'.\n",
- pentry->name);
- return;
- }
- install_irq_resource(pdev, pentry->irq);
-
- pdev->dev.platform_data = pdata;
- if (dev->delay)
- intel_scu_ipc_device_register(pdev);
- else
- platform_device_add(pdev);
-}
-
-static void __init sfi_handle_spi_dev(struct sfi_device_table_entry *pentry,
- struct devs_id *dev)
-{
- struct spi_board_info spi_info;
- void *pdata = NULL;
-
- memset(&spi_info, 0, sizeof(spi_info));
- strncpy(spi_info.modalias, pentry->name, SFI_NAME_LEN);
- spi_info.irq = ((pentry->irq == (u8)0xff) ? 0 : pentry->irq);
- spi_info.bus_num = pentry->host_num;
- spi_info.chip_select = pentry->addr;
- spi_info.max_speed_hz = pentry->max_freq;
- pr_debug("SPI bus=%d, name=%16.16s, irq=0x%2x, max_freq=%d, cs=%d\n",
- spi_info.bus_num,
- spi_info.modalias,
- spi_info.irq,
- spi_info.max_speed_hz,
- spi_info.chip_select);
-
- pdata = intel_mid_sfi_get_pdata(dev, &spi_info);
- if (IS_ERR(pdata))
- return;
-
- spi_info.platform_data = pdata;
- if (dev->delay)
- intel_scu_spi_device_register(&spi_info);
- else
- spi_register_board_info(&spi_info, 1);
-}
-
-static void __init sfi_handle_i2c_dev(struct sfi_device_table_entry *pentry,
- struct devs_id *dev)
-{
- struct i2c_board_info i2c_info;
- void *pdata = NULL;
-
- memset(&i2c_info, 0, sizeof(i2c_info));
- strncpy(i2c_info.type, pentry->name, SFI_NAME_LEN);
- i2c_info.irq = ((pentry->irq == (u8)0xff) ? 0 : pentry->irq);
- i2c_info.addr = pentry->addr;
- pr_debug("I2C bus = %d, name = %16.16s, irq = 0x%2x, addr = 0x%x\n",
- pentry->host_num,
- i2c_info.type,
- i2c_info.irq,
- i2c_info.addr);
- pdata = intel_mid_sfi_get_pdata(dev, &i2c_info);
- i2c_info.platform_data = pdata;
- if (IS_ERR(pdata))
- return;
-
- if (dev->delay)
- intel_scu_i2c_device_register(pentry->host_num, &i2c_info);
- else
- i2c_register_board_info(pentry->host_num, &i2c_info, 1);
-}
-
-static void __init sfi_handle_sd_dev(struct sfi_device_table_entry *pentry,
- struct devs_id *dev)
-{
- struct mid_sd_board_info sd_info;
- void *pdata;
-
- memset(&sd_info, 0, sizeof(sd_info));
- strncpy(sd_info.name, pentry->name, SFI_NAME_LEN);
- sd_info.bus_num = pentry->host_num;
- sd_info.max_clk = pentry->max_freq;
- sd_info.addr = pentry->addr;
- pr_debug("SD bus = %d, name = %16.16s, max_clk = %d, addr = 0x%x\n",
- sd_info.bus_num,
- sd_info.name,
- sd_info.max_clk,
- sd_info.addr);
- pdata = intel_mid_sfi_get_pdata(dev, &sd_info);
- if (IS_ERR(pdata))
- return;
-
- /* Nothing we can do with this for now */
- sd_info.platform_data = pdata;
-
- pr_debug("Successfully registered %16.16s", sd_info.name);
-}
-
-extern struct devs_id *const __x86_intel_mid_dev_start[],
- *const __x86_intel_mid_dev_end[];
-
-static struct devs_id __init *get_device_id(u8 type, char *name)
-{
- struct devs_id *const *dev_table;
-
- for (dev_table = __x86_intel_mid_dev_start;
- dev_table < __x86_intel_mid_dev_end; dev_table++) {
- struct devs_id *dev = *dev_table;
- if (dev->type == type &&
- !strncmp(dev->name, name, SFI_NAME_LEN)) {
- return dev;
- }
- }
-
- return NULL;
-}
-
-static int __init sfi_parse_devs(struct sfi_table_header *table)
-{
- struct sfi_table_simple *sb;
- struct sfi_device_table_entry *pentry;
- struct devs_id *dev = NULL;
- int num, i, ret;
- int polarity;
- struct irq_alloc_info info;
-
- sb = (struct sfi_table_simple *)table;
- num = SFI_GET_NUM_ENTRIES(sb, struct sfi_device_table_entry);
- pentry = (struct sfi_device_table_entry *)sb->pentry;
-
- for (i = 0; i < num; i++, pentry++) {
- int irq = pentry->irq;
-
- if (irq != (u8)0xff) { /* native RTE case */
- /* these SPI2 devices are not exposed to system as PCI
- * devices, but they have separate RTE entry in IOAPIC
- * so we have to enable them one by one here
- */
- if (intel_mid_identify_cpu() ==
- INTEL_MID_CPU_CHIP_TANGIER) {
- if (!strncmp(pentry->name, "r69001-ts-i2c", 13))
- /* active low */
- polarity = 1;
- else if (!strncmp(pentry->name,
- "synaptics_3202", 14))
- /* active low */
- polarity = 1;
- else if (irq == 41)
- /* fast_int_1 */
- polarity = 1;
- else
- /* active high */
- polarity = 0;
- } else {
- /* PNW and CLV go with active low */
- polarity = 1;
- }
-
- ioapic_set_alloc_attr(&info, NUMA_NO_NODE, 1, polarity);
- ret = mp_map_gsi_to_irq(irq, IOAPIC_MAP_ALLOC, &info);
- WARN_ON(ret < 0);
- }
-
- dev = get_device_id(pentry->type, pentry->name);
-
- if (!dev)
- continue;
-
- switch (pentry->type) {
- case SFI_DEV_TYPE_IPC:
- sfi_handle_ipc_dev(pentry, dev);
- break;
- case SFI_DEV_TYPE_SPI:
- sfi_handle_spi_dev(pentry, dev);
- break;
- case SFI_DEV_TYPE_I2C:
- sfi_handle_i2c_dev(pentry, dev);
- break;
- case SFI_DEV_TYPE_SD:
- sfi_handle_sd_dev(pentry, dev);
- break;
- case SFI_DEV_TYPE_UART:
- case SFI_DEV_TYPE_HSI:
- default:
- break;
- }
- }
- return 0;
-}
-
-static int __init intel_mid_platform_init(void)
-{
- sfi_table_parse(SFI_SIG_GPIO, NULL, NULL, sfi_parse_gpio);
- sfi_table_parse(SFI_SIG_DEVS, NULL, NULL, sfi_parse_devs);
- return 0;
-}
-arch_initcall(intel_mid_platform_init);
diff --git a/arch/x86/platform/pvh/head.S b/arch/x86/platform/pvh/head.S
index 43b4d864817e..d2ccadc247e6 100644
--- a/arch/x86/platform/pvh/head.S
+++ b/arch/x86/platform/pvh/head.S
@@ -16,6 +16,7 @@
#include <asm/boot.h>
#include <asm/processor-flags.h>
#include <asm/msr.h>
+#include <asm/nospec-branch.h>
#include <xen/interface/elfnote.h>
__HEAD
@@ -105,6 +106,7 @@ SYM_CODE_START_LOCAL(pvh_start_xen)
/* startup_64 expects boot_params in %rsi. */
mov $_pa(pvh_bootparams), %rsi
mov $_pa(startup_64), %rax
+ ANNOTATE_RETPOLINE_SAFE
jmp *%rax
#else /* CONFIG_X86_64 */
diff --git a/arch/x86/platform/sfi/sfi.c b/arch/x86/platform/sfi/sfi.c
deleted file mode 100644
index 6259563760f9..000000000000
--- a/arch/x86/platform/sfi/sfi.c
+++ /dev/null
@@ -1,100 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * sfi.c - x86 architecture SFI support.
- *
- * Copyright (c) 2009, Intel Corporation.
- */
-
-#define KMSG_COMPONENT "SFI"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
-
-#include <linux/acpi.h>
-#include <linux/init.h>
-#include <linux/sfi.h>
-#include <linux/io.h>
-
-#include <asm/irqdomain.h>
-#include <asm/io_apic.h>
-#include <asm/mpspec.h>
-#include <asm/setup.h>
-#include <asm/apic.h>
-
-#ifdef CONFIG_X86_LOCAL_APIC
-static unsigned long sfi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
-
-/* All CPUs enumerated by SFI must be present and enabled */
-static void __init mp_sfi_register_lapic(u8 id)
-{
- if (MAX_LOCAL_APIC - id <= 0) {
- pr_warn("Processor #%d invalid (max %d)\n", id, MAX_LOCAL_APIC);
- return;
- }
-
- pr_info("registering lapic[%d]\n", id);
-
- generic_processor_info(id, GET_APIC_VERSION(apic_read(APIC_LVR)));
-}
-
-static int __init sfi_parse_cpus(struct sfi_table_header *table)
-{
- struct sfi_table_simple *sb;
- struct sfi_cpu_table_entry *pentry;
- int i;
- int cpu_num;
-
- sb = (struct sfi_table_simple *)table;
- cpu_num = SFI_GET_NUM_ENTRIES(sb, struct sfi_cpu_table_entry);
- pentry = (struct sfi_cpu_table_entry *)sb->pentry;
-
- for (i = 0; i < cpu_num; i++) {
- mp_sfi_register_lapic(pentry->apic_id);
- pentry++;
- }
-
- smp_found_config = 1;
- return 0;
-}
-#endif /* CONFIG_X86_LOCAL_APIC */
-
-#ifdef CONFIG_X86_IO_APIC
-
-static int __init sfi_parse_ioapic(struct sfi_table_header *table)
-{
- struct sfi_table_simple *sb;
- struct sfi_apic_table_entry *pentry;
- int i, num;
- struct ioapic_domain_cfg cfg = {
- .type = IOAPIC_DOMAIN_STRICT,
- .ops = &mp_ioapic_irqdomain_ops,
- };
-
- sb = (struct sfi_table_simple *)table;
- num = SFI_GET_NUM_ENTRIES(sb, struct sfi_apic_table_entry);
- pentry = (struct sfi_apic_table_entry *)sb->pentry;
-
- for (i = 0; i < num; i++) {
- mp_register_ioapic(i, pentry->phys_addr, gsi_top, &cfg);
- pentry++;
- }
-
- WARN(pic_mode, KERN_WARNING
- "SFI: pic_mod shouldn't be 1 when IOAPIC table is present\n");
- pic_mode = 0;
- return 0;
-}
-#endif /* CONFIG_X86_IO_APIC */
-
-/*
- * sfi_platform_init(): register lapics & io-apics
- */
-int __init sfi_platform_init(void)
-{
-#ifdef CONFIG_X86_LOCAL_APIC
- register_lapic_address(sfi_lapic_addr);
- sfi_table_parse(SFI_SIG_CPUS, NULL, NULL, sfi_parse_cpus);
-#endif
-#ifdef CONFIG_X86_IO_APIC
- sfi_table_parse(SFI_SIG_APIC, NULL, NULL, sfi_parse_ioapic);
-#endif
- return 0;
-}
diff --git a/arch/x86/power/Makefile b/arch/x86/power/Makefile
index 6907b523e856..379777572bc9 100644
--- a/arch/x86/power/Makefile
+++ b/arch/x86/power/Makefile
@@ -1,9 +1,12 @@
# SPDX-License-Identifier: GPL-2.0
-OBJECT_FILES_NON_STANDARD_hibernate_asm_$(BITS).o := y
# __restore_processor_state() restores %gs after S3 resume and so should not
# itself be stack-protected
CFLAGS_cpu.o := -fno-stack-protector
+# Clang may incorrectly inline functions with stack protector enabled into
+# __restore_processor_state(): https://bugs.llvm.org/show_bug.cgi?id=47479
+CFLAGS_REMOVE_cpu.o := $(CC_FLAGS_LTO)
+
obj-$(CONFIG_PM_SLEEP) += cpu.o
obj-$(CONFIG_HIBERNATION) += hibernate_$(BITS).o hibernate_asm_$(BITS).o hibernate.o
diff --git a/arch/x86/power/hibernate_asm_64.S b/arch/x86/power/hibernate_asm_64.S
index 7918b8415f13..d9bed596d849 100644
--- a/arch/x86/power/hibernate_asm_64.S
+++ b/arch/x86/power/hibernate_asm_64.S
@@ -21,6 +21,53 @@
#include <asm/asm-offsets.h>
#include <asm/processor-flags.h>
#include <asm/frame.h>
+#include <asm/nospec-branch.h>
+
+ /* code below belongs to the image kernel */
+ .align PAGE_SIZE
+SYM_FUNC_START(restore_registers)
+ /* go back to the original page tables */
+ movq %r9, %cr3
+
+ /* Flush TLB, including "global" things (vmalloc) */
+ movq mmu_cr4_features(%rip), %rax
+ movq %rax, %rdx
+ andq $~(X86_CR4_PGE), %rdx
+ movq %rdx, %cr4; # turn off PGE
+ movq %cr3, %rcx; # flush TLB
+ movq %rcx, %cr3
+ movq %rax, %cr4; # turn PGE back on
+
+ /* We don't restore %rax, it must be 0 anyway */
+ movq $saved_context, %rax
+ movq pt_regs_sp(%rax), %rsp
+ movq pt_regs_bp(%rax), %rbp
+ movq pt_regs_si(%rax), %rsi
+ movq pt_regs_di(%rax), %rdi
+ movq pt_regs_bx(%rax), %rbx
+ movq pt_regs_cx(%rax), %rcx
+ movq pt_regs_dx(%rax), %rdx
+ movq pt_regs_r8(%rax), %r8
+ movq pt_regs_r9(%rax), %r9
+ movq pt_regs_r10(%rax), %r10
+ movq pt_regs_r11(%rax), %r11
+ movq pt_regs_r12(%rax), %r12
+ movq pt_regs_r13(%rax), %r13
+ movq pt_regs_r14(%rax), %r14
+ movq pt_regs_r15(%rax), %r15
+ pushq pt_regs_flags(%rax)
+ popfq
+
+ /* Saved in save_processor_state. */
+ lgdt saved_context_gdt_desc(%rax)
+
+ xorl %eax, %eax
+
+ /* tell the hibernation core that we've just restored the memory */
+ movq %rax, in_suspend(%rip)
+
+ ret
+SYM_FUNC_END(restore_registers)
SYM_FUNC_START(swsusp_arch_suspend)
movq $saved_context, %rax
@@ -52,7 +99,7 @@ SYM_FUNC_START(swsusp_arch_suspend)
ret
SYM_FUNC_END(swsusp_arch_suspend)
-SYM_CODE_START(restore_image)
+SYM_FUNC_START(restore_image)
/* prepare to jump to the image kernel */
movq restore_jump_address(%rip), %r8
movq restore_cr3(%rip), %r9
@@ -66,11 +113,12 @@ SYM_CODE_START(restore_image)
/* jump to relocated restore code */
movq relocated_restore_code(%rip), %rcx
+ ANNOTATE_RETPOLINE_SAFE
jmpq *%rcx
-SYM_CODE_END(restore_image)
+SYM_FUNC_END(restore_image)
/* code below has been relocated to a safe page */
-SYM_CODE_START(core_restore_code)
+SYM_FUNC_START(core_restore_code)
/* switch to temporary page tables */
movq %rax, %cr3
/* flush TLB */
@@ -97,51 +145,6 @@ SYM_CODE_START(core_restore_code)
.Ldone:
/* jump to the restore_registers address from the image header */
+ ANNOTATE_RETPOLINE_SAFE
jmpq *%r8
-SYM_CODE_END(core_restore_code)
-
- /* code below belongs to the image kernel */
- .align PAGE_SIZE
-SYM_FUNC_START(restore_registers)
- /* go back to the original page tables */
- movq %r9, %cr3
-
- /* Flush TLB, including "global" things (vmalloc) */
- movq mmu_cr4_features(%rip), %rax
- movq %rax, %rdx
- andq $~(X86_CR4_PGE), %rdx
- movq %rdx, %cr4; # turn off PGE
- movq %cr3, %rcx; # flush TLB
- movq %rcx, %cr3
- movq %rax, %cr4; # turn PGE back on
-
- /* We don't restore %rax, it must be 0 anyway */
- movq $saved_context, %rax
- movq pt_regs_sp(%rax), %rsp
- movq pt_regs_bp(%rax), %rbp
- movq pt_regs_si(%rax), %rsi
- movq pt_regs_di(%rax), %rdi
- movq pt_regs_bx(%rax), %rbx
- movq pt_regs_cx(%rax), %rcx
- movq pt_regs_dx(%rax), %rdx
- movq pt_regs_r8(%rax), %r8
- movq pt_regs_r9(%rax), %r9
- movq pt_regs_r10(%rax), %r10
- movq pt_regs_r11(%rax), %r11
- movq pt_regs_r12(%rax), %r12
- movq pt_regs_r13(%rax), %r13
- movq pt_regs_r14(%rax), %r14
- movq pt_regs_r15(%rax), %r15
- pushq pt_regs_flags(%rax)
- popfq
-
- /* Saved in save_processor_state. */
- lgdt saved_context_gdt_desc(%rax)
-
- xorl %eax, %eax
-
- /* tell the hibernation core that we've just restored the memory */
- movq %rax, in_suspend(%rip)
-
- ret
-SYM_FUNC_END(restore_registers)
+SYM_FUNC_END(core_restore_code)
diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile
index 55b1ab378974..bddfc9a46645 100644
--- a/arch/x86/tools/Makefile
+++ b/arch/x86/tools/Makefile
@@ -29,14 +29,14 @@ posttest: $(obj)/insn_decoder_test vmlinux $(obj)/insn_sanity
hostprogs += insn_decoder_test insn_sanity
# -I needed for generated C source and C source which in the kernel tree.
-HOSTCFLAGS_insn_decoder_test.o := -Wall -I$(objtree)/arch/x86/lib/ -I$(srctree)/arch/x86/include/uapi/ -I$(srctree)/arch/x86/include/ -I$(srctree)/arch/x86/lib/ -I$(srctree)/include/uapi/
+HOSTCFLAGS_insn_decoder_test.o := -Wall -I$(srctree)/tools/arch/x86/lib/ -I$(srctree)/tools/arch/x86/include/ -I$(objtree)/arch/x86/lib/
-HOSTCFLAGS_insn_sanity.o := -Wall -I$(objtree)/arch/x86/lib/ -I$(srctree)/arch/x86/include/ -I$(srctree)/arch/x86/lib/ -I$(srctree)/include/
+HOSTCFLAGS_insn_sanity.o := -Wall -I$(srctree)/tools/arch/x86/lib/ -I$(srctree)/tools/arch/x86/include/ -I$(objtree)/arch/x86/lib/
# Dependencies are also needed.
-$(obj)/insn_decoder_test.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
+$(obj)/insn_decoder_test.o: $(srctree)/tools/arch/x86/lib/insn.c $(srctree)/tools/arch/x86/lib/inat.c $(srctree)/tools/arch/x86/include/asm/inat_types.h $(srctree)/tools/arch/x86/include/asm/inat.h $(srctree)/tools/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
-$(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
+$(obj)/insn_sanity.o: $(srctree)/tools/arch/x86/lib/insn.c $(srctree)/tools/arch/x86/lib/inat.c $(srctree)/tools/arch/x86/include/asm/inat_types.h $(srctree)/tools/arch/x86/include/asm/inat.h $(srctree)/tools/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
HOST_EXTRACFLAGS += -I$(srctree)/tools/include
hostprogs += relocs
diff --git a/arch/x86/tools/insn_sanity.c b/arch/x86/tools/insn_sanity.c
index 185ceba9d289..c6a0000ae635 100644
--- a/arch/x86/tools/insn_sanity.c
+++ b/arch/x86/tools/insn_sanity.c
@@ -14,10 +14,6 @@
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
-
-#define unlikely(cond) (cond)
-#define ARRAY_SIZE(a) (sizeof(a)/sizeof(a[0]))
-
#include <asm/insn.h>
#include <inat.c>
#include <insn.c>
diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
index ce7188cbdae5..04c5a44b9682 100644
--- a/arch/x86/tools/relocs.c
+++ b/arch/x86/tools/relocs.c
@@ -61,8 +61,8 @@ static const char * const sym_regex_kernel[S_NSYMTYPES] = {
"(__iommu_table|__apicdrivers|__smp_locks)(|_end)|"
"__(start|end)_pci_.*|"
"__(start|end)_builtin_fw|"
- "__(start|stop)___ksymtab(|_gpl|_unused|_unused_gpl|_gpl_future)|"
- "__(start|stop)___kcrctab(|_gpl|_unused|_unused_gpl|_gpl_future)|"
+ "__(start|stop)___ksymtab(|_gpl)|"
+ "__(start|stop)___kcrctab(|_gpl)|"
"__(start|stop)___param|"
"__(start|stop)___modver|"
"__(start|stop)___bug_table|"
@@ -867,9 +867,11 @@ static int do_reloc32(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
case R_386_PC32:
case R_386_PC16:
case R_386_PC8:
+ case R_386_PLT32:
/*
- * NONE can be ignored and PC relative relocations don't
- * need to be adjusted.
+ * NONE can be ignored and PC relative relocations don't need
+ * to be adjusted. Because sym must be defined, R_386_PLT32 can
+ * be treated the same way as R_386_PC32.
*/
break;
@@ -910,9 +912,11 @@ static int do_reloc_real(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
case R_386_PC32:
case R_386_PC16:
case R_386_PC8:
+ case R_386_PLT32:
/*
- * NONE can be ignored and PC relative relocations don't
- * need to be adjusted.
+ * NONE can be ignored and PC relative relocations don't need
+ * to be adjusted. Because sym must be defined, R_386_PLT32 can
+ * be treated the same way as R_386_PC32.
*/
break;
diff --git a/arch/x86/um/os-Linux/task_size.c b/arch/x86/um/os-Linux/task_size.c
index e62174638f00..1dc9adc20b1c 100644
--- a/arch/x86/um/os-Linux/task_size.c
+++ b/arch/x86/um/os-Linux/task_size.c
@@ -145,7 +145,7 @@ out:
unsigned long os_get_top_address(void)
{
/* The old value of CONFIG_TOP_ADDR */
- return 0x7fc0000000;
+ return 0x7fc0002000;
}
#endif
diff --git a/arch/x86/um/shared/sysdep/stub_32.h b/arch/x86/um/shared/sysdep/stub_32.h
index 51fd256c75f0..c3891c1ada26 100644
--- a/arch/x86/um/shared/sysdep/stub_32.h
+++ b/arch/x86/um/shared/sysdep/stub_32.h
@@ -7,8 +7,8 @@
#define __SYSDEP_STUB_H
#include <asm/ptrace.h>
+#include <generated/asm-offsets.h>
-#define STUB_SYSCALL_RET EAX
#define STUB_MMAP_NR __NR_mmap2
#define MMAP_OFFSET(o) ((o) >> UM_KERN_PAGE_SHIFT)
@@ -77,17 +77,28 @@ static inline void trap_myself(void)
__asm("int3");
}
-static inline void remap_stack(int fd, unsigned long offset)
+static void inline remap_stack_and_trap(void)
{
- __asm__ volatile ("movl %%eax,%%ebp ; movl %0,%%eax ; int $0x80 ;"
- "movl %7, %%ebx ; movl %%eax, (%%ebx)"
- : : "g" (STUB_MMAP_NR), "b" (STUB_DATA),
- "c" (UM_KERN_PAGE_SIZE),
- "d" (PROT_READ | PROT_WRITE),
- "S" (MAP_FIXED | MAP_SHARED), "D" (fd),
- "a" (offset),
- "i" (&((struct stub_data *) STUB_DATA)->err)
- : "memory");
+ __asm__ volatile (
+ "movl %%esp,%%ebx ;"
+ "andl %0,%%ebx ;"
+ "movl %1,%%eax ;"
+ "movl %%ebx,%%edi ; addl %2,%%edi ; movl (%%edi),%%edi ;"
+ "movl %%ebx,%%ebp ; addl %3,%%ebp ; movl (%%ebp),%%ebp ;"
+ "int $0x80 ;"
+ "addl %4,%%ebx ; movl %%eax, (%%ebx) ;"
+ "int $3"
+ : :
+ "g" (~(UM_KERN_PAGE_SIZE - 1)),
+ "g" (STUB_MMAP_NR),
+ "g" (UML_STUB_FIELD_FD),
+ "g" (UML_STUB_FIELD_OFFSET),
+ "g" (UML_STUB_FIELD_CHILD_ERR),
+ "c" (UM_KERN_PAGE_SIZE),
+ "d" (PROT_READ | PROT_WRITE),
+ "S" (MAP_FIXED | MAP_SHARED)
+ :
+ "memory");
}
#endif
diff --git a/arch/x86/um/shared/sysdep/stub_64.h b/arch/x86/um/shared/sysdep/stub_64.h
index 994df93c5ed3..6e2626b77a2e 100644
--- a/arch/x86/um/shared/sysdep/stub_64.h
+++ b/arch/x86/um/shared/sysdep/stub_64.h
@@ -7,8 +7,8 @@
#define __SYSDEP_STUB_H
#include <sysdep/ptrace_user.h>
+#include <generated/asm-offsets.h>
-#define STUB_SYSCALL_RET PT_INDEX(RAX)
#define STUB_MMAP_NR __NR_mmap
#define MMAP_OFFSET(o) (o)
@@ -82,18 +82,30 @@ static inline void trap_myself(void)
__asm("int3");
}
-static inline void remap_stack(long fd, unsigned long offset)
+static inline void remap_stack_and_trap(void)
{
- __asm__ volatile ("movq %4,%%r10 ; movq %5,%%r8 ; "
- "movq %6, %%r9; " __syscall "; movq %7, %%rbx ; "
- "movq %%rax, (%%rbx)":
- : "a" (STUB_MMAP_NR), "D" (STUB_DATA),
- "S" (UM_KERN_PAGE_SIZE),
- "d" (PROT_READ | PROT_WRITE),
- "g" (MAP_FIXED | MAP_SHARED), "g" (fd),
- "g" (offset),
- "i" (&((struct stub_data *) STUB_DATA)->err)
- : __syscall_clobber, "r10", "r8", "r9" );
+ __asm__ volatile (
+ "movq %0,%%rax ;"
+ "movq %%rsp,%%rdi ;"
+ "andq %1,%%rdi ;"
+ "movq %2,%%r10 ;"
+ "movq %%rdi,%%r8 ; addq %3,%%r8 ; movq (%%r8),%%r8 ;"
+ "movq %%rdi,%%r9 ; addq %4,%%r9 ; movq (%%r9),%%r9 ;"
+ __syscall ";"
+ "movq %%rsp,%%rdi ; andq %1,%%rdi ;"
+ "addq %5,%%rdi ; movq %%rax, (%%rdi) ;"
+ "int3"
+ : :
+ "g" (STUB_MMAP_NR),
+ "g" (~(UM_KERN_PAGE_SIZE - 1)),
+ "g" (MAP_FIXED | MAP_SHARED),
+ "g" (UML_STUB_FIELD_FD),
+ "g" (UML_STUB_FIELD_OFFSET),
+ "g" (UML_STUB_FIELD_CHILD_ERR),
+ "S" (UM_KERN_PAGE_SIZE),
+ "d" (PROT_READ | PROT_WRITE)
+ :
+ __syscall_clobber, "r10", "r8", "r9");
}
#endif
diff --git a/arch/x86/um/stub_32.S b/arch/x86/um/stub_32.S
index a193e88536a9..8291899e6aaf 100644
--- a/arch/x86/um/stub_32.S
+++ b/arch/x86/um/stub_32.S
@@ -5,21 +5,22 @@
.globl batch_syscall_stub
batch_syscall_stub:
- /* load pointer to first operation */
- mov $(STUB_DATA+8), %esp
-
+ /* %esp comes in as "top of page" */
+ mov %esp, %ecx
+ /* %esp has pointer to first operation */
+ add $8, %esp
again:
/* load length of additional data */
mov 0x0(%esp), %eax
/* if(length == 0) : end of list */
/* write possible 0 to header */
- mov %eax, STUB_DATA+4
+ mov %eax, 0x4(%ecx)
cmpl $0, %eax
jz done
/* save current pointer */
- mov %esp, STUB_DATA+4
+ mov %esp, 0x4(%ecx)
/* skip additional data */
add %eax, %esp
@@ -38,6 +39,10 @@ again:
/* execute syscall */
int $0x80
+ /* restore top of page pointer in %ecx */
+ mov %esp, %ecx
+ andl $(~UM_KERN_PAGE_SIZE) + 1, %ecx
+
/* check return value */
pop %ebx
cmp %ebx, %eax
@@ -45,7 +50,7 @@ again:
done:
/* save return value */
- mov %eax, STUB_DATA
+ mov %eax, (%ecx)
/* stop */
int3
diff --git a/arch/x86/um/stub_64.S b/arch/x86/um/stub_64.S
index 8a95c5b2eaf9..f3404640197a 100644
--- a/arch/x86/um/stub_64.S
+++ b/arch/x86/um/stub_64.S
@@ -4,9 +4,8 @@
.section .__syscall_stub, "ax"
.globl batch_syscall_stub
batch_syscall_stub:
- mov $(STUB_DATA), %rbx
- /* load pointer to first operation */
- mov %rbx, %rsp
+ /* %rsp has the pointer to first operation */
+ mov %rsp, %rbx
add $0x10, %rsp
again:
/* load length of additional data */
diff --git a/arch/x86/um/stub_segv.c b/arch/x86/um/stub_segv.c
index 27361cbb7ca9..21836eaf1725 100644
--- a/arch/x86/um/stub_segv.c
+++ b/arch/x86/um/stub_segv.c
@@ -11,10 +11,11 @@
void __attribute__ ((__section__ (".__syscall_stub")))
stub_segv_handler(int sig, siginfo_t *info, void *p)
{
+ int stack;
ucontext_t *uc = p;
+ struct faultinfo *f = (void *)(((unsigned long)&stack) & ~(UM_KERN_PAGE_SIZE - 1));
- GET_FAULTINFO_FROM_MC(*((struct faultinfo *) STUB_DATA),
- &uc->uc_mcontext);
+ GET_FAULTINFO_FROM_MC(*f, &uc->uc_mcontext);
trap_myself();
}
diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile
index fc5c5ba4aacb..40b5779fce21 100644
--- a/arch/x86/xen/Makefile
+++ b/arch/x86/xen/Makefile
@@ -1,5 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-OBJECT_FILES_NON_STANDARD_xen-asm.o := y
ifdef CONFIG_FUNCTION_TRACER
# Do not profile debug and lowlevel utilities
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index 4409306364dc..dc0a337f985b 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -567,10 +567,16 @@ void noist_exc_debug(struct pt_regs *regs);
DEFINE_IDTENTRY_RAW(xenpv_exc_nmi)
{
- /* On Xen PV, NMI doesn't use IST. The C part is the sane as native. */
+ /* On Xen PV, NMI doesn't use IST. The C part is the same as native. */
exc_nmi(regs);
}
+DEFINE_IDTENTRY_RAW_ERRORCODE(xenpv_exc_double_fault)
+{
+ /* On Xen PV, DF doesn't use IST. The C part is the same as native. */
+ exc_double_fault(regs, error_code);
+}
+
DEFINE_IDTENTRY_RAW(xenpv_exc_debug)
{
/*
@@ -583,6 +589,27 @@ DEFINE_IDTENTRY_RAW(xenpv_exc_debug)
exc_debug(regs);
}
+DEFINE_IDTENTRY_RAW(exc_xen_unknown_trap)
+{
+ /* This should never happen and there is no way to handle it. */
+ pr_err("Unknown trap in Xen PV mode.");
+ BUG();
+}
+
+#ifdef CONFIG_X86_MCE
+DEFINE_IDTENTRY_RAW(xenpv_exc_machine_check)
+{
+ /*
+ * There's no IST on Xen PV, but we still need to dispatch
+ * to the correct handler.
+ */
+ if (user_mode(regs))
+ noist_exc_machine_check(regs);
+ else
+ exc_machine_check(regs);
+}
+#endif
+
struct trap_array_entry {
void (*orig)(void);
void (*xen)(void);
@@ -601,9 +628,9 @@ struct trap_array_entry {
static struct trap_array_entry trap_array[] = {
TRAP_ENTRY_REDIR(exc_debug, true ),
- TRAP_ENTRY(exc_double_fault, true ),
+ TRAP_ENTRY_REDIR(exc_double_fault, true ),
#ifdef CONFIG_X86_MCE
- TRAP_ENTRY(exc_machine_check, true ),
+ TRAP_ENTRY_REDIR(exc_machine_check, true ),
#endif
TRAP_ENTRY_REDIR(exc_nmi, true ),
TRAP_ENTRY(exc_int3, false ),
@@ -631,6 +658,7 @@ static bool __ref get_trap_addr(void **addr, unsigned int ist)
{
unsigned int nr;
bool ist_okay = false;
+ bool found = false;
/*
* Replace trap handler addresses by Xen specific ones.
@@ -645,6 +673,7 @@ static bool __ref get_trap_addr(void **addr, unsigned int ist)
if (*addr == entry->orig) {
*addr = entry->xen;
ist_okay = entry->ist_okay;
+ found = true;
break;
}
}
@@ -655,9 +684,13 @@ static bool __ref get_trap_addr(void **addr, unsigned int ist)
nr = (*addr - (void *)early_idt_handler_array[0]) /
EARLY_IDT_HANDLER_SIZE;
*addr = (void *)xen_early_idt_handler_array[nr];
+ found = true;
}
- if (WARN_ON(ist != 0 && !ist_okay))
+ if (!found)
+ *addr = (void *)xen_asm_exc_xen_unknown_trap;
+
+ if (WARN_ON(found && ist != 0 && !ist_okay))
return false;
return true;
@@ -1002,8 +1035,6 @@ void __init xen_setup_vcpu_info_placement(void)
*/
if (xen_have_vcpu_info_placement) {
pv_ops.irq.save_fl = __PV_IS_CALLEE_SAVE(xen_save_fl_direct);
- pv_ops.irq.restore_fl =
- __PV_IS_CALLEE_SAVE(xen_restore_fl_direct);
pv_ops.irq.irq_disable =
__PV_IS_CALLEE_SAVE(xen_irq_disable_direct);
pv_ops.irq.irq_enable =
@@ -1040,7 +1071,6 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
.read_pmc = xen_read_pmc,
.iret = xen_iret,
- .usergs_sysret64 = xen_sysret64,
.load_tr_desc = paravirt_nop,
.set_ldt = xen_set_ldt,
@@ -1065,9 +1095,6 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
#endif
.io_delay = xen_io_delay,
- /* Xen takes care of %gs when switching to usermode for us */
- .swapgs = paravirt_nop,
-
.start_context_switch = paravirt_start_context_switch,
.end_context_switch = xen_end_context_switch,
};
diff --git a/arch/x86/xen/irq.c b/arch/x86/xen/irq.c
index 850c93f346c7..dfa091d79c2e 100644
--- a/arch/x86/xen/irq.c
+++ b/arch/x86/xen/irq.c
@@ -42,28 +42,6 @@ asmlinkage __visible unsigned long xen_save_fl(void)
}
PV_CALLEE_SAVE_REGS_THUNK(xen_save_fl);
-__visible void xen_restore_fl(unsigned long flags)
-{
- struct vcpu_info *vcpu;
-
- /* convert from IF type flag */
- flags = !(flags & X86_EFLAGS_IF);
-
- /* See xen_irq_enable() for why preemption must be disabled. */
- preempt_disable();
- vcpu = this_cpu_read(xen_vcpu);
- vcpu->evtchn_upcall_mask = flags;
-
- if (flags == 0) {
- barrier(); /* unmask then check (avoid races) */
- if (unlikely(vcpu->evtchn_upcall_pending))
- xen_force_evtchn_callback();
- preempt_enable();
- } else
- preempt_enable_no_resched();
-}
-PV_CALLEE_SAVE_REGS_THUNK(xen_restore_fl);
-
asmlinkage __visible void xen_irq_disable(void)
{
/* There's a one instruction preempt window here. We need to
@@ -118,7 +96,6 @@ static void xen_halt(void)
static const struct pv_irq_ops xen_irq_ops __initconst = {
.save_fl = PV_CALLEE_SAVE(xen_save_fl),
- .restore_fl = PV_CALLEE_SAVE(xen_restore_fl),
.irq_disable = PV_CALLEE_SAVE(xen_irq_disable),
.irq_enable = PV_CALLEE_SAVE(xen_irq_enable),
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 3301875dd196..a3cc33091f46 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -416,6 +416,9 @@ void __init xen_vmalloc_p2m_tree(void)
xen_p2m_last_pfn = xen_max_p2m_pfn;
p2m_limit = (phys_addr_t)P2M_LIMIT * 1024 * 1024 * 1024 / PAGE_SIZE;
+ if (!p2m_limit && IS_ENABLED(CONFIG_XEN_UNPOPULATED_ALLOC))
+ p2m_limit = xen_start_info->nr_pages * XEN_EXTRA_MEM_RATIO;
+
vm.flags = VM_ALLOC;
vm.size = ALIGN(sizeof(unsigned long) * max(xen_max_p2m_pfn, p2m_limit),
PMD_SIZE * PMDS_PER_MID_PAGE);
@@ -652,10 +655,9 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
pte_t *ptep;
unsigned int level;
- if (unlikely(pfn >= xen_p2m_size)) {
- BUG_ON(mfn != INVALID_P2M_ENTRY);
- return true;
- }
+ /* Only invalid entries allowed above the highest p2m covered frame. */
+ if (unlikely(pfn >= xen_p2m_size))
+ return mfn == INVALID_P2M_ENTRY;
/*
* The interface requires atomic updates on p2m elements.
@@ -710,9 +712,12 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
for (i = 0; i < count; i++) {
unsigned long mfn, pfn;
+ struct gnttab_unmap_grant_ref unmap[2];
+ int rc;
/* Do not add to override if the map failed. */
- if (map_ops[i].status)
+ if (map_ops[i].status != GNTST_okay ||
+ (kmap_ops && kmap_ops[i].status != GNTST_okay))
continue;
if (map_ops[i].flags & GNTMAP_contains_pte) {
@@ -726,10 +731,46 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
WARN(pfn_to_mfn(pfn) != INVALID_P2M_ENTRY, "page must be ballooned");
- if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) {
- ret = -ENOMEM;
- goto out;
+ if (likely(set_phys_to_machine(pfn, FOREIGN_FRAME(mfn))))
+ continue;
+
+ /*
+ * Signal an error for this slot. This in turn requires
+ * immediate unmapping.
+ */
+ map_ops[i].status = GNTST_general_error;
+ unmap[0].host_addr = map_ops[i].host_addr,
+ unmap[0].handle = map_ops[i].handle;
+ map_ops[i].handle = ~0;
+ if (map_ops[i].flags & GNTMAP_device_map)
+ unmap[0].dev_bus_addr = map_ops[i].dev_bus_addr;
+ else
+ unmap[0].dev_bus_addr = 0;
+
+ if (kmap_ops) {
+ kmap_ops[i].status = GNTST_general_error;
+ unmap[1].host_addr = kmap_ops[i].host_addr,
+ unmap[1].handle = kmap_ops[i].handle;
+ kmap_ops[i].handle = ~0;
+ if (kmap_ops[i].flags & GNTMAP_device_map)
+ unmap[1].dev_bus_addr = kmap_ops[i].dev_bus_addr;
+ else
+ unmap[1].dev_bus_addr = 0;
}
+
+ /*
+ * Pre-populate both status fields, to be recognizable in
+ * the log message below.
+ */
+ unmap[0].status = 1;
+ unmap[1].status = 1;
+
+ rc = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
+ unmap, 1 + !!kmap_ops);
+ if (rc || unmap[0].status != GNTST_okay ||
+ unmap[1].status != GNTST_okay)
+ pr_err_once("gnttab unmap failed: rc=%d st0=%d st1=%d\n",
+ rc, unmap[0].status, unmap[1].status);
}
out:
@@ -750,17 +791,15 @@ int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
unsigned long mfn = __pfn_to_mfn(page_to_pfn(pages[i]));
unsigned long pfn = page_to_pfn(pages[i]);
- if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT)) {
+ if (mfn != INVALID_P2M_ENTRY && (mfn & FOREIGN_FRAME_BIT))
+ set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
+ else
ret = -EINVAL;
- goto out;
- }
-
- set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
}
if (kunmap_ops)
ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
- kunmap_ops, count);
-out:
+ kunmap_ops, count) ?: ret;
+
return ret;
}
EXPORT_SYMBOL_GPL(clear_foreign_p2m_mapping);
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 7eab14d56369..1a3b75652fa4 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -59,18 +59,6 @@ static struct {
} xen_remap_buf __initdata __aligned(PAGE_SIZE);
static unsigned long xen_remap_mfn __initdata = INVALID_P2M_ENTRY;
-/*
- * The maximum amount of extra memory compared to the base size. The
- * main scaling factor is the size of struct page. At extreme ratios
- * of base:extra, all the base memory can be filled with page
- * structures for the extra memory, leaving no space for anything
- * else.
- *
- * 10x seems like a reasonable balance between scaling flexibility and
- * leaving a practically usable system.
- */
-#define EXTRA_MEM_RATIO (10)
-
static bool xen_512gb_limit __initdata = IS_ENABLED(CONFIG_XEN_512GB);
static void __init xen_parse_512gb(void)
@@ -790,20 +778,13 @@ char * __init xen_memory_setup(void)
extra_pages += max_pages - max_pfn;
/*
- * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
- * factor the base size. On non-highmem systems, the base
- * size is the full initial memory allocation; on highmem it
- * is limited to the max size of lowmem, so that it doesn't
- * get completely filled.
+ * Clamp the amount of extra memory to a XEN_EXTRA_MEM_RATIO
+ * factor the base size.
*
* Make sure we have no memory above max_pages, as this area
* isn't handled by the p2m management.
- *
- * In principle there could be a problem in lowmem systems if
- * the initial memory is also very large with respect to
- * lowmem, but we won't try to deal with that here.
*/
- extra_pages = min3(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
+ extra_pages = min3(XEN_EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
extra_pages, max_pages - max_pfn);
i = 0;
addr = xen_e820_table.entries[0].addr;
diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S
index 1cb0e84b9161..1e626444712b 100644
--- a/arch/x86/xen/xen-asm.S
+++ b/arch/x86/xen/xen-asm.S
@@ -14,6 +14,7 @@
#include <asm/thread_info.h>
#include <asm/asm.h>
#include <asm/frame.h>
+#include <asm/unwind_hints.h>
#include <xen/interface/xen.h>
@@ -72,34 +73,6 @@ SYM_FUNC_START(xen_save_fl_direct)
ret
SYM_FUNC_END(xen_save_fl_direct)
-
-/*
- * In principle the caller should be passing us a value return from
- * xen_save_fl_direct, but for robustness sake we test only the
- * X86_EFLAGS_IF flag rather than the whole byte. After setting the
- * interrupt mask state, it checks for unmasked pending events and
- * enters the hypervisor to get them delivered if so.
- */
-SYM_FUNC_START(xen_restore_fl_direct)
- FRAME_BEGIN
- testw $X86_EFLAGS_IF, %di
- setz PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
- /*
- * Preempt here doesn't matter because that will deal with any
- * pending interrupts. The pending check may end up being run
- * on the wrong CPU, but that doesn't hurt.
- */
-
- /* check for unmasked and pending */
- cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
- jnz 1f
- call check_events
-1:
- FRAME_END
- ret
-SYM_FUNC_END(xen_restore_fl_direct)
-
-
/*
* Force an event check by making a hypercall, but preserve regs
* before making the call.
@@ -146,6 +119,7 @@ SYM_FUNC_END(xen_read_cr2_direct);
.macro xen_pv_trap name
SYM_CODE_START(xen_\name)
+ UNWIND_HINT_EMPTY
pop %rcx
pop %r11
jmp \name
@@ -161,7 +135,7 @@ xen_pv_trap asm_exc_overflow
xen_pv_trap asm_exc_bounds
xen_pv_trap asm_exc_invalid_op
xen_pv_trap asm_exc_device_not_available
-xen_pv_trap asm_exc_double_fault
+xen_pv_trap asm_xenpv_exc_double_fault
xen_pv_trap asm_exc_coproc_segment_overrun
xen_pv_trap asm_exc_invalid_tss
xen_pv_trap asm_exc_segment_not_present
@@ -172,18 +146,20 @@ xen_pv_trap asm_exc_spurious_interrupt_bug
xen_pv_trap asm_exc_coprocessor_error
xen_pv_trap asm_exc_alignment_check
#ifdef CONFIG_X86_MCE
-xen_pv_trap asm_exc_machine_check
+xen_pv_trap asm_xenpv_exc_machine_check
#endif /* CONFIG_X86_MCE */
xen_pv_trap asm_exc_simd_coprocessor_error
#ifdef CONFIG_IA32_EMULATION
xen_pv_trap entry_INT80_compat
#endif
+xen_pv_trap asm_exc_xen_unknown_trap
xen_pv_trap asm_exc_xen_hypervisor_callback
__INIT
SYM_CODE_START(xen_early_idt_handler_array)
i = 0
.rept NUM_EXCEPTION_VECTORS
+ UNWIND_HINT_EMPTY
pop %rcx
pop %r11
jmp early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE
@@ -210,30 +186,11 @@ hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32
* rsp->rax }
*/
SYM_CODE_START(xen_iret)
+ UNWIND_HINT_EMPTY
pushq $0
jmp hypercall_iret
SYM_CODE_END(xen_iret)
-SYM_CODE_START(xen_sysret64)
- /*
- * We're already on the usermode stack at this point, but
- * still with the kernel gs, so we can easily switch back.
- *
- * tss.sp2 is scratch space.
- */
- movq %rsp, PER_CPU_VAR(cpu_tss_rw + TSS_sp2)
- movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
-
- pushq $__USER_DS
- pushq PER_CPU_VAR(cpu_tss_rw + TSS_sp2)
- pushq %r11
- pushq $__USER_CS
- pushq %rcx
-
- pushq $VGCF_in_syscall
- jmp hypercall_iret
-SYM_CODE_END(xen_sysret64)
-
/*
* Xen handles syscall callbacks much like ordinary exceptions, which
* means we have:
@@ -250,7 +207,8 @@ SYM_CODE_END(xen_sysret64)
*/
/* Normal 64-bit system call target */
-SYM_FUNC_START(xen_syscall_target)
+SYM_CODE_START(xen_syscall_target)
+ UNWIND_HINT_EMPTY
popq %rcx
popq %r11
@@ -263,12 +221,13 @@ SYM_FUNC_START(xen_syscall_target)
movq $__USER_CS, 1*8(%rsp)
jmp entry_SYSCALL_64_after_hwframe
-SYM_FUNC_END(xen_syscall_target)
+SYM_CODE_END(xen_syscall_target)
#ifdef CONFIG_IA32_EMULATION
/* 32-bit compat syscall target */
-SYM_FUNC_START(xen_syscall32_target)
+SYM_CODE_START(xen_syscall32_target)
+ UNWIND_HINT_EMPTY
popq %rcx
popq %r11
@@ -281,10 +240,11 @@ SYM_FUNC_START(xen_syscall32_target)
movq $__USER32_CS, 1*8(%rsp)
jmp entry_SYSCALL_compat_after_hwframe
-SYM_FUNC_END(xen_syscall32_target)
+SYM_CODE_END(xen_syscall32_target)
/* 32-bit compat sysenter target */
-SYM_FUNC_START(xen_sysenter_target)
+SYM_CODE_START(xen_sysenter_target)
+ UNWIND_HINT_EMPTY
/*
* NB: Xen is polite and clears TF from EFLAGS for us. This means
* that we don't need to guard against single step exceptions here.
@@ -301,17 +261,18 @@ SYM_FUNC_START(xen_sysenter_target)
movq $__USER32_CS, 1*8(%rsp)
jmp entry_SYSENTER_compat_after_hwframe
-SYM_FUNC_END(xen_sysenter_target)
+SYM_CODE_END(xen_sysenter_target)
#else /* !CONFIG_IA32_EMULATION */
-SYM_FUNC_START_ALIAS(xen_syscall32_target)
-SYM_FUNC_START(xen_sysenter_target)
+SYM_CODE_START(xen_syscall32_target)
+SYM_CODE_START(xen_sysenter_target)
+ UNWIND_HINT_EMPTY
lea 16(%rsp), %rsp /* strip %rcx, %r11 */
mov $-ENOSYS, %rax
pushq $0
jmp hypercall_iret
-SYM_FUNC_END(xen_sysenter_target)
-SYM_FUNC_END_ALIAS(xen_syscall32_target)
+SYM_CODE_END(xen_sysenter_target)
+SYM_CODE_END(xen_syscall32_target)
#endif /* CONFIG_IA32_EMULATION */
diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
index 2d7c8f34f56c..cb6538ae2fe0 100644
--- a/arch/x86/xen/xen-head.S
+++ b/arch/x86/xen/xen-head.S
@@ -68,8 +68,9 @@ SYM_CODE_END(asm_cpu_bringup_and_idle)
.balign PAGE_SIZE
SYM_CODE_START(hypercall_page)
.rept (PAGE_SIZE / 32)
- UNWIND_HINT_EMPTY
- .skip 32
+ UNWIND_HINT_FUNC
+ .skip 31, 0x90
+ ret
.endr
#define HYPERCALL(n) \
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 9546c3384c75..8d7ec49a35fb 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -131,15 +131,12 @@ static inline void __init xen_efi_init(struct boot_params *boot_params)
__visible void xen_irq_enable_direct(void);
__visible void xen_irq_disable_direct(void);
__visible unsigned long xen_save_fl_direct(void);
-__visible void xen_restore_fl_direct(unsigned long);
__visible unsigned long xen_read_cr2(void);
__visible unsigned long xen_read_cr2_direct(void);
/* These are not functions, and cannot be called normally */
__visible void xen_iret(void);
-__visible void xen_sysret32(void);
-__visible void xen_sysret64(void);
extern int xen_panic_handler_init(void);
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index 37ce1489364e..9ad6b7b82707 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -32,7 +32,6 @@ config XTENSA
select HAVE_FUTEX_CMPXCHG if !MMU
select HAVE_HW_BREAKPOINT if PERF_EVENTS
select HAVE_IRQ_TIME_ACCOUNTING
- select HAVE_OPROFILE
select HAVE_PCI
select HAVE_PERF_EVENTS
select HAVE_STACKPROTECTOR
diff --git a/arch/xtensa/Makefile b/arch/xtensa/Makefile
index 67a7d151d1e7..cf0940708702 100644
--- a/arch/xtensa/Makefile
+++ b/arch/xtensa/Makefile
@@ -83,7 +83,6 @@ core-y += $(buildvar) $(buildplf)
core-y += arch/xtensa/boot/dts/
libs-y += arch/xtensa/lib/ $(LIBGCC)
-drivers-$(CONFIG_OPROFILE) += arch/xtensa/oprofile/
boot := arch/xtensa/boot
diff --git a/arch/xtensa/configs/audio_kc705_defconfig b/arch/xtensa/configs/audio_kc705_defconfig
index eeb4c5383c83..3be62da8089b 100644
--- a/arch/xtensa/configs/audio_kc705_defconfig
+++ b/arch/xtensa/configs/audio_kc705_defconfig
@@ -18,7 +18,6 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_XTENSA_VARIANT_CUSTOM=y
diff --git a/arch/xtensa/configs/generic_kc705_defconfig b/arch/xtensa/configs/generic_kc705_defconfig
index 412f611033cc..e9d6b6f6eca1 100644
--- a/arch/xtensa/configs/generic_kc705_defconfig
+++ b/arch/xtensa/configs/generic_kc705_defconfig
@@ -18,7 +18,6 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_XTENSA_VARIANT_DC233C=y
diff --git a/arch/xtensa/configs/smp_lx200_defconfig b/arch/xtensa/configs/smp_lx200_defconfig
index 4f1c7998b030..a47c85638ec1 100644
--- a/arch/xtensa/configs/smp_lx200_defconfig
+++ b/arch/xtensa/configs/smp_lx200_defconfig
@@ -18,7 +18,6 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_XTENSA_VARIANT_CUSTOM=y
diff --git a/arch/xtensa/configs/xip_kc705_defconfig b/arch/xtensa/configs/xip_kc705_defconfig
index f9e85c082afc..4f1ff9531f6a 100644
--- a/arch/xtensa/configs/xip_kc705_defconfig
+++ b/arch/xtensa/configs/xip_kc705_defconfig
@@ -31,7 +31,6 @@ CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0
CONFIG_USE_OF=y
CONFIG_BUILTIN_DTB_SOURCE="kc705"
# CONFIG_PARSE_BOOTPARAM is not set
-CONFIG_OPROFILE=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
# CONFIG_COMPACTION is not set
CONFIG_NET=y
diff --git a/arch/xtensa/include/asm/spinlock.h b/arch/xtensa/include/asm/spinlock.h
index 584b0de6f2ca..41c449ece2d8 100644
--- a/arch/xtensa/include/asm/spinlock.h
+++ b/arch/xtensa/include/asm/spinlock.h
@@ -12,8 +12,8 @@
#define _XTENSA_SPINLOCK_H
#include <asm/barrier.h>
-#include <asm/qrwlock.h>
#include <asm/qspinlock.h>
+#include <asm/qrwlock.h>
#define smp_mb__after_spinlock() smp_mb()
diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c
index 397a7de56377..9534ef515d74 100644
--- a/arch/xtensa/kernel/process.c
+++ b/arch/xtensa/kernel/process.c
@@ -217,7 +217,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn,
p->thread.sp = (unsigned long)childregs;
- if (!(p->flags & PF_KTHREAD)) {
+ if (!(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
struct pt_regs *regs = current_pt_regs();
unsigned long usp = usp_thread_fn ?
usp_thread_fn : regs->areg[1];
diff --git a/arch/xtensa/kernel/stacktrace.c b/arch/xtensa/kernel/stacktrace.c
index c822abb93d20..7f7755cd28f0 100644
--- a/arch/xtensa/kernel/stacktrace.c
+++ b/arch/xtensa/kernel/stacktrace.c
@@ -16,7 +16,7 @@
#include <asm/traps.h>
#include <linux/uaccess.h>
-#if IS_ENABLED(CONFIG_OPROFILE) || IS_ENABLED(CONFIG_PERF_EVENTS)
+#if IS_ENABLED(CONFIG_PERF_EVENTS)
/* Address of common_exception_return, used to check the
* transition from kernel to user space.
diff --git a/arch/xtensa/kernel/syscalls/Makefile b/arch/xtensa/kernel/syscalls/Makefile
index 659faefdcb1d..285aaba832d9 100644
--- a/arch/xtensa/kernel/syscalls/Makefile
+++ b/arch/xtensa/kernel/syscalls/Makefile
@@ -5,7 +5,7 @@ uapi := arch/$(SRCARCH)/include/generated/uapi/asm
_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \
$(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
-syscall := $(srctree)/$(src)/syscall.tbl
+syscall := $(src)/syscall.tbl
syshdr := $(srctree)/$(src)/syscallhdr.sh
systbl := $(srctree)/$(src)/syscalltbl.sh
@@ -21,18 +21,19 @@ quiet_cmd_systbl = SYSTBL $@
'$(systbl_abi_$(basetarget))' \
'$(systbl_offset_$(basetarget))'
-$(uapi)/unistd_32.h: $(syscall) $(syshdr)
+$(uapi)/unistd_32.h: $(syscall) $(syshdr) FORCE
$(call if_changed,syshdr)
-$(kapi)/syscall_table.h: $(syscall) $(systbl)
+$(kapi)/syscall_table.h: $(syscall) $(systbl) FORCE
$(call if_changed,systbl)
uapisyshdr-y += unistd_32.h
kapisyshdr-y += syscall_table.h
-targets += $(uapisyshdr-y) $(kapisyshdr-y)
+uapisyshdr-y := $(addprefix $(uapi)/, $(uapisyshdr-y))
+kapisyshdr-y := $(addprefix $(kapi)/, $(kapisyshdr-y))
+targets += $(addprefix ../../../../, $(uapisyshdr-y) $(kapisyshdr-y))
PHONY += all
-all: $(addprefix $(uapi)/,$(uapisyshdr-y))
-all: $(addprefix $(kapi)/,$(kapisyshdr-y))
+all: $(uapisyshdr-y) $(kapisyshdr-y)
@:
diff --git a/arch/xtensa/kernel/syscalls/syscall.tbl b/arch/xtensa/kernel/syscalls/syscall.tbl
index 46116a28eeed..365a9b849224 100644
--- a/arch/xtensa/kernel/syscalls/syscall.tbl
+++ b/arch/xtensa/kernel/syscalls/syscall.tbl
@@ -412,3 +412,4 @@
439 common faccessat2 sys_faccessat2
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2
+442 common mount_setattr sys_mount_setattr
diff --git a/arch/xtensa/oprofile/Makefile b/arch/xtensa/oprofile/Makefile
deleted file mode 100644
index f559b9ffbb3f..000000000000
--- a/arch/xtensa/oprofile/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_OPROFILE) += oprofile.o
-
-DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
- oprof.o cpu_buffer.o buffer_sync.o \
- event_buffer.o oprofile_files.o \
- oprofilefs.o oprofile_stats.o \
- timer_int.o )
-
-oprofile-y := $(DRIVER_OBJS) init.o backtrace.o
diff --git a/arch/xtensa/oprofile/backtrace.c b/arch/xtensa/oprofile/backtrace.c
deleted file mode 100644
index 8f952034e161..000000000000
--- a/arch/xtensa/oprofile/backtrace.c
+++ /dev/null
@@ -1,27 +0,0 @@
-/**
- * @file backtrace.c
- *
- * @remark Copyright 2008 Tensilica Inc.
- * Copyright (C) 2015 Cadence Design Systems Inc.
- * @remark Read the file COPYING
- *
- */
-
-#include <linux/oprofile.h>
-#include <asm/ptrace.h>
-#include <asm/stacktrace.h>
-
-static int xtensa_backtrace_cb(struct stackframe *frame, void *data)
-{
- oprofile_add_trace(frame->pc);
- return 0;
-}
-
-void xtensa_backtrace(struct pt_regs * const regs, unsigned int depth)
-{
- if (user_mode(regs))
- xtensa_backtrace_user(regs, depth, xtensa_backtrace_cb, NULL);
- else
- xtensa_backtrace_kernel(regs, depth, xtensa_backtrace_cb,
- xtensa_backtrace_cb, NULL);
-}
diff --git a/arch/xtensa/oprofile/init.c b/arch/xtensa/oprofile/init.c
deleted file mode 100644
index a67eea379766..000000000000
--- a/arch/xtensa/oprofile/init.c
+++ /dev/null
@@ -1,26 +0,0 @@
-/**
- * @file init.c
- *
- * @remark Copyright 2008 Tensilica Inc.
- * @remark Read the file COPYING
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/oprofile.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-
-
-extern void xtensa_backtrace(struct pt_regs *const regs, unsigned int depth);
-
-int __init oprofile_arch_init(struct oprofile_operations *ops)
-{
- ops->backtrace = xtensa_backtrace;
- return -ENODEV;
-}
-
-
-void oprofile_arch_exit(void)
-{
-}
diff --git a/arch/xtensa/platforms/iss/simdisk.c b/arch/xtensa/platforms/iss/simdisk.c
index 3447556d276d..fc09be7b1347 100644
--- a/arch/xtensa/platforms/iss/simdisk.c
+++ b/arch/xtensa/platforms/iss/simdisk.c
@@ -103,7 +103,7 @@ static void simdisk_transfer(struct simdisk *dev, unsigned long sector,
static blk_qc_t simdisk_submit_bio(struct bio *bio)
{
- struct simdisk *dev = bio->bi_disk->private_data;
+ struct simdisk *dev = bio->bi_bdev->bd_disk->private_data;
struct bio_vec bvec;
struct bvec_iter iter;
sector_t sector = bio->bi_iter.bi_sector;
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index 9e4eb0fc1c16..95586137194e 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -125,6 +125,8 @@
#include <linux/delay.h>
#include <linux/backing-dev.h>
+#include <trace/events/block.h>
+
#include "blk.h"
#include "blk-mq.h"
#include "blk-mq-tag.h"
@@ -158,10 +160,9 @@ BFQ_BFQQ_FNS(in_large_burst);
BFQ_BFQQ_FNS(coop);
BFQ_BFQQ_FNS(split_coop);
BFQ_BFQQ_FNS(softrt_update);
-BFQ_BFQQ_FNS(has_waker);
#undef BFQ_BFQQ_FNS \
-/* Expiration time of sync (0) and async (1) requests, in ns. */
+/* Expiration time of async (0) and sync (1) requests, in ns. */
static const u64 bfq_fifo_expire[2] = { NSEC_PER_SEC / 4, NSEC_PER_SEC / 8 };
/* Maximum backwards seek (magic number lifted from CFQ), in KiB. */
@@ -1024,9 +1025,16 @@ bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
else
bfq_clear_bfqq_IO_bound(bfqq);
+ bfqq->last_serv_time_ns = bic->saved_last_serv_time_ns;
+ bfqq->inject_limit = bic->saved_inject_limit;
+ bfqq->decrease_time_jif = bic->saved_decrease_time_jif;
+
bfqq->entity.new_weight = bic->saved_weight;
bfqq->ttime = bic->saved_ttime;
+ bfqq->io_start_time = bic->saved_io_start_time;
+ bfqq->tot_idle_time = bic->saved_tot_idle_time;
bfqq->wr_coeff = bic->saved_wr_coeff;
+ bfqq->service_from_wr = bic->saved_service_from_wr;
bfqq->wr_start_at_switch_to_srt = bic->saved_wr_start_at_switch_to_srt;
bfqq->last_wr_start_finish = bic->saved_last_wr_start_finish;
bfqq->wr_cur_max_time = bic->saved_wr_cur_max_time;
@@ -1647,6 +1655,8 @@ static bool bfq_bfqq_higher_class_or_weight(struct bfq_queue *bfqq,
return bfqq_weight > in_serv_weight;
}
+static bool bfq_better_to_idle(struct bfq_queue *bfqq);
+
static void bfq_bfqq_handle_idle_busy_switch(struct bfq_data *bfqd,
struct bfq_queue *bfqq,
int old_wr_coeff,
@@ -1671,15 +1681,19 @@ static void bfq_bfqq_handle_idle_busy_switch(struct bfq_data *bfqd,
* - it is sync,
* - it does not belong to a large burst,
* - it has been idle for enough time or is soft real-time,
- * - is linked to a bfq_io_cq (it is not shared in any sense).
+ * - is linked to a bfq_io_cq (it is not shared in any sense),
+ * - has a default weight (otherwise we assume the user wanted
+ * to control its weight explicitly)
*/
in_burst = bfq_bfqq_in_large_burst(bfqq);
soft_rt = bfqd->bfq_wr_max_softrt_rate > 0 &&
!BFQQ_TOTALLY_SEEKY(bfqq) &&
!in_burst &&
time_is_before_jiffies(bfqq->soft_rt_next_start) &&
- bfqq->dispatched == 0;
- *interactive = !in_burst && idle_for_long_time;
+ bfqq->dispatched == 0 &&
+ bfqq->entity.new_weight == 40;
+ *interactive = !in_burst && idle_for_long_time &&
+ bfqq->entity.new_weight == 40;
wr_or_deserves_wr = bfqd->low_latency &&
(bfqq->wr_coeff > 1 ||
(bfq_bfqq_sync(bfqq) &&
@@ -1717,17 +1731,6 @@ static void bfq_bfqq_handle_idle_busy_switch(struct bfq_data *bfqd,
bfq_clear_bfqq_just_created(bfqq);
-
- if (!bfq_bfqq_IO_bound(bfqq)) {
- if (arrived_in_time) {
- bfqq->requests_within_timer++;
- if (bfqq->requests_within_timer >=
- bfqd->bfq_requests_within_timer)
- bfq_mark_bfqq_IO_bound(bfqq);
- } else
- bfqq->requests_within_timer = 0;
- }
-
if (bfqd->low_latency) {
if (unlikely(time_is_after_jiffies(bfqq->split_time)))
/* wraparound */
@@ -1755,10 +1758,10 @@ static void bfq_bfqq_handle_idle_busy_switch(struct bfq_data *bfqd,
bfq_add_bfqq_busy(bfqd, bfqq);
/*
- * Expire in-service queue only if preemption may be needed
- * for guarantees. In particular, we care only about two
- * cases. The first is that bfqq has to recover a service
- * hole, as explained in the comments on
+ * Expire in-service queue if preemption may be needed for
+ * guarantees or throughput. As for guarantees, we care
+ * explicitly about two cases. The first is that bfqq has to
+ * recover a service hole, as explained in the comments on
* bfq_bfqq_update_budg_for_activation(), i.e., that
* bfqq_wants_to_preempt is true. However, if bfqq does not
* carry time-critical I/O, then bfqq's bandwidth is less
@@ -1785,11 +1788,23 @@ static void bfq_bfqq_handle_idle_busy_switch(struct bfq_data *bfqd,
* timestamps of the in-service queue would need to be
* updated, and this operation is quite costly (see the
* comments on bfq_bfqq_update_budg_for_activation()).
+ *
+ * As for throughput, we ask bfq_better_to_idle() whether we
+ * still need to plug I/O dispatching. If bfq_better_to_idle()
+ * says no, then plugging is not needed any longer, either to
+ * boost throughput or to perserve service guarantees. Then
+ * the best option is to stop plugging I/O, as not doing so
+ * would certainly lower throughput. We may end up in this
+ * case if: (1) upon a dispatch attempt, we detected that it
+ * was better to plug I/O dispatch, and to wait for a new
+ * request to arrive for the currently in-service queue, but
+ * (2) this switch of bfqq to busy changes the scenario.
*/
if (bfqd->in_service_queue &&
((bfqq_wants_to_preempt &&
bfqq->wr_coeff >= bfqd->in_service_queue->wr_coeff) ||
- bfq_bfqq_higher_class_or_weight(bfqq, bfqd->in_service_queue)) &&
+ bfq_bfqq_higher_class_or_weight(bfqq, bfqd->in_service_queue) ||
+ !bfq_better_to_idle(bfqd->in_service_queue)) &&
next_queue_may_preempt(bfqd))
bfq_bfqq_expire(bfqd, bfqd->in_service_queue,
false, BFQQE_PREEMPTED);
@@ -1861,6 +1876,138 @@ static void bfq_reset_inject_limit(struct bfq_data *bfqd,
bfqq->decrease_time_jif = jiffies;
}
+static void bfq_update_io_intensity(struct bfq_queue *bfqq, u64 now_ns)
+{
+ u64 tot_io_time = now_ns - bfqq->io_start_time;
+
+ if (RB_EMPTY_ROOT(&bfqq->sort_list) && bfqq->dispatched == 0)
+ bfqq->tot_idle_time +=
+ now_ns - bfqq->ttime.last_end_request;
+
+ if (unlikely(bfq_bfqq_just_created(bfqq)))
+ return;
+
+ /*
+ * Must be busy for at least about 80% of the time to be
+ * considered I/O bound.
+ */
+ if (bfqq->tot_idle_time * 5 > tot_io_time)
+ bfq_clear_bfqq_IO_bound(bfqq);
+ else
+ bfq_mark_bfqq_IO_bound(bfqq);
+
+ /*
+ * Keep an observation window of at most 200 ms in the past
+ * from now.
+ */
+ if (tot_io_time > 200 * NSEC_PER_MSEC) {
+ bfqq->io_start_time = now_ns - (tot_io_time>>1);
+ bfqq->tot_idle_time >>= 1;
+ }
+}
+
+/*
+ * Detect whether bfqq's I/O seems synchronized with that of some
+ * other queue, i.e., whether bfqq, after remaining empty, happens to
+ * receive new I/O only right after some I/O request of the other
+ * queue has been completed. We call waker queue the other queue, and
+ * we assume, for simplicity, that bfqq may have at most one waker
+ * queue.
+ *
+ * A remarkable throughput boost can be reached by unconditionally
+ * injecting the I/O of the waker queue, every time a new
+ * bfq_dispatch_request happens to be invoked while I/O is being
+ * plugged for bfqq. In addition to boosting throughput, this
+ * unblocks bfqq's I/O, thereby improving bandwidth and latency for
+ * bfqq. Note that these same results may be achieved with the general
+ * injection mechanism, but less effectively. For details on this
+ * aspect, see the comments on the choice of the queue for injection
+ * in bfq_select_queue().
+ *
+ * Turning back to the detection of a waker queue, a queue Q is deemed
+ * as a waker queue for bfqq if, for three consecutive times, bfqq
+ * happens to become non empty right after a request of Q has been
+ * completed. In particular, on the first time, Q is tentatively set
+ * as a candidate waker queue, while on the third consecutive time
+ * that Q is detected, the field waker_bfqq is set to Q, to confirm
+ * that Q is a waker queue for bfqq. These detection steps are
+ * performed only if bfqq has a long think time, so as to make it more
+ * likely that bfqq's I/O is actually being blocked by a
+ * synchronization. This last filter, plus the above three-times
+ * requirement, make false positives less likely.
+ *
+ * NOTE
+ *
+ * The sooner a waker queue is detected, the sooner throughput can be
+ * boosted by injecting I/O from the waker queue. Fortunately,
+ * detection is likely to be actually fast, for the following
+ * reasons. While blocked by synchronization, bfqq has a long think
+ * time. This implies that bfqq's inject limit is at least equal to 1
+ * (see the comments in bfq_update_inject_limit()). So, thanks to
+ * injection, the waker queue is likely to be served during the very
+ * first I/O-plugging time interval for bfqq. This triggers the first
+ * step of the detection mechanism. Thanks again to injection, the
+ * candidate waker queue is then likely to be confirmed no later than
+ * during the next I/O-plugging interval for bfqq.
+ *
+ * ISSUE
+ *
+ * On queue merging all waker information is lost.
+ */
+static void bfq_check_waker(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ u64 now_ns)
+{
+ if (!bfqd->last_completed_rq_bfqq ||
+ bfqd->last_completed_rq_bfqq == bfqq ||
+ bfq_bfqq_has_short_ttime(bfqq) ||
+ now_ns - bfqd->last_completion >= 4 * NSEC_PER_MSEC ||
+ bfqd->last_completed_rq_bfqq == bfqq->waker_bfqq)
+ return;
+
+ if (bfqd->last_completed_rq_bfqq !=
+ bfqq->tentative_waker_bfqq) {
+ /*
+ * First synchronization detected with a
+ * candidate waker queue, or with a different
+ * candidate waker queue from the current one.
+ */
+ bfqq->tentative_waker_bfqq =
+ bfqd->last_completed_rq_bfqq;
+ bfqq->num_waker_detections = 1;
+ } else /* Same tentative waker queue detected again */
+ bfqq->num_waker_detections++;
+
+ if (bfqq->num_waker_detections == 3) {
+ bfqq->waker_bfqq = bfqd->last_completed_rq_bfqq;
+ bfqq->tentative_waker_bfqq = NULL;
+
+ /*
+ * If the waker queue disappears, then
+ * bfqq->waker_bfqq must be reset. To
+ * this goal, we maintain in each
+ * waker queue a list, woken_list, of
+ * all the queues that reference the
+ * waker queue through their
+ * waker_bfqq pointer. When the waker
+ * queue exits, the waker_bfqq pointer
+ * of all the queues in the woken_list
+ * is reset.
+ *
+ * In addition, if bfqq is already in
+ * the woken_list of a waker queue,
+ * then, before being inserted into
+ * the woken_list of a new waker
+ * queue, bfqq must be removed from
+ * the woken_list of the old waker
+ * queue.
+ */
+ if (!hlist_unhashed(&bfqq->woken_list_node))
+ hlist_del_init(&bfqq->woken_list_node);
+ hlist_add_head(&bfqq->woken_list_node,
+ &bfqd->last_completed_rq_bfqq->woken_list);
+ }
+}
+
static void bfq_add_request(struct request *rq)
{
struct bfq_queue *bfqq = RQ_BFQQ(rq);
@@ -1868,117 +2015,14 @@ static void bfq_add_request(struct request *rq)
struct request *next_rq, *prev;
unsigned int old_wr_coeff = bfqq->wr_coeff;
bool interactive = false;
+ u64 now_ns = ktime_get_ns();
bfq_log_bfqq(bfqd, bfqq, "add_request %d", rq_is_sync(rq));
bfqq->queued[rq_is_sync(rq)]++;
bfqd->queued++;
if (RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_bfqq_sync(bfqq)) {
- /*
- * Detect whether bfqq's I/O seems synchronized with
- * that of some other queue, i.e., whether bfqq, after
- * remaining empty, happens to receive new I/O only
- * right after some I/O request of the other queue has
- * been completed. We call waker queue the other
- * queue, and we assume, for simplicity, that bfqq may
- * have at most one waker queue.
- *
- * A remarkable throughput boost can be reached by
- * unconditionally injecting the I/O of the waker
- * queue, every time a new bfq_dispatch_request
- * happens to be invoked while I/O is being plugged
- * for bfqq. In addition to boosting throughput, this
- * unblocks bfqq's I/O, thereby improving bandwidth
- * and latency for bfqq. Note that these same results
- * may be achieved with the general injection
- * mechanism, but less effectively. For details on
- * this aspect, see the comments on the choice of the
- * queue for injection in bfq_select_queue().
- *
- * Turning back to the detection of a waker queue, a
- * queue Q is deemed as a waker queue for bfqq if, for
- * two consecutive times, bfqq happens to become non
- * empty right after a request of Q has been
- * completed. In particular, on the first time, Q is
- * tentatively set as a candidate waker queue, while
- * on the second time, the flag
- * bfq_bfqq_has_waker(bfqq) is set to confirm that Q
- * is a waker queue for bfqq. These detection steps
- * are performed only if bfqq has a long think time,
- * so as to make it more likely that bfqq's I/O is
- * actually being blocked by a synchronization. This
- * last filter, plus the above two-times requirement,
- * make false positives less likely.
- *
- * NOTE
- *
- * The sooner a waker queue is detected, the sooner
- * throughput can be boosted by injecting I/O from the
- * waker queue. Fortunately, detection is likely to be
- * actually fast, for the following reasons. While
- * blocked by synchronization, bfqq has a long think
- * time. This implies that bfqq's inject limit is at
- * least equal to 1 (see the comments in
- * bfq_update_inject_limit()). So, thanks to
- * injection, the waker queue is likely to be served
- * during the very first I/O-plugging time interval
- * for bfqq. This triggers the first step of the
- * detection mechanism. Thanks again to injection, the
- * candidate waker queue is then likely to be
- * confirmed no later than during the next
- * I/O-plugging interval for bfqq.
- */
- if (bfqd->last_completed_rq_bfqq &&
- !bfq_bfqq_has_short_ttime(bfqq) &&
- ktime_get_ns() - bfqd->last_completion <
- 200 * NSEC_PER_USEC) {
- if (bfqd->last_completed_rq_bfqq != bfqq &&
- bfqd->last_completed_rq_bfqq !=
- bfqq->waker_bfqq) {
- /*
- * First synchronization detected with
- * a candidate waker queue, or with a
- * different candidate waker queue
- * from the current one.
- */
- bfqq->waker_bfqq = bfqd->last_completed_rq_bfqq;
-
- /*
- * If the waker queue disappears, then
- * bfqq->waker_bfqq must be reset. To
- * this goal, we maintain in each
- * waker queue a list, woken_list, of
- * all the queues that reference the
- * waker queue through their
- * waker_bfqq pointer. When the waker
- * queue exits, the waker_bfqq pointer
- * of all the queues in the woken_list
- * is reset.
- *
- * In addition, if bfqq is already in
- * the woken_list of a waker queue,
- * then, before being inserted into
- * the woken_list of a new waker
- * queue, bfqq must be removed from
- * the woken_list of the old waker
- * queue.
- */
- if (!hlist_unhashed(&bfqq->woken_list_node))
- hlist_del_init(&bfqq->woken_list_node);
- hlist_add_head(&bfqq->woken_list_node,
- &bfqd->last_completed_rq_bfqq->woken_list);
-
- bfq_clear_bfqq_has_waker(bfqq);
- } else if (bfqd->last_completed_rq_bfqq ==
- bfqq->waker_bfqq &&
- !bfq_bfqq_has_waker(bfqq)) {
- /*
- * synchronization with waker_bfqq
- * seen for the second time
- */
- bfq_mark_bfqq_has_waker(bfqq);
- }
- }
+ bfq_check_waker(bfqd, bfqq, now_ns);
/*
* Periodically reset inject limit, to make sure that
@@ -2047,6 +2091,9 @@ static void bfq_add_request(struct request *rq)
}
}
+ if (bfq_bfqq_sync(bfqq))
+ bfq_update_io_intensity(bfqq, now_ns);
+
elv_rb_add(&bfqq->sort_list, rq);
/*
@@ -2352,6 +2399,24 @@ static void bfq_requests_merged(struct request_queue *q, struct request *rq,
/* Must be called with bfqq != NULL */
static void bfq_bfqq_end_wr(struct bfq_queue *bfqq)
{
+ /*
+ * If bfqq has been enjoying interactive weight-raising, then
+ * reset soft_rt_next_start. We do it for the following
+ * reason. bfqq may have been conveying the I/O needed to load
+ * a soft real-time application. Such an application actually
+ * exhibits a soft real-time I/O pattern after it finishes
+ * loading, and finally starts doing its job. But, if bfqq has
+ * been receiving a lot of bandwidth so far (likely to happen
+ * on a fast device), then soft_rt_next_start now contains a
+ * high value that. So, without this reset, bfqq would be
+ * prevented from being possibly considered as soft_rt for a
+ * very long time.
+ */
+
+ if (bfqq->wr_cur_max_time !=
+ bfqq->bfqd->bfq_wr_rt_max_time)
+ bfqq->soft_rt_next_start = jiffies;
+
if (bfq_bfqq_busy(bfqq))
bfqq->bfqd->wr_busy_queues--;
bfqq->wr_coeff = 1;
@@ -2686,10 +2751,16 @@ static void bfq_bfqq_save_state(struct bfq_queue *bfqq)
if (!bic)
return;
+ bic->saved_last_serv_time_ns = bfqq->last_serv_time_ns;
+ bic->saved_inject_limit = bfqq->inject_limit;
+ bic->saved_decrease_time_jif = bfqq->decrease_time_jif;
+
bic->saved_weight = bfqq->entity.orig_weight;
bic->saved_ttime = bfqq->ttime;
bic->saved_has_short_ttime = bfq_bfqq_has_short_ttime(bfqq);
bic->saved_IO_bound = bfq_bfqq_IO_bound(bfqq);
+ bic->saved_io_start_time = bfqq->io_start_time;
+ bic->saved_tot_idle_time = bfqq->tot_idle_time;
bic->saved_in_large_burst = bfq_bfqq_in_large_burst(bfqq);
bic->was_in_burst_list = !hlist_unhashed(&bfqq->burst_list_node);
if (unlikely(bfq_bfqq_just_created(bfqq) &&
@@ -2712,6 +2783,7 @@ static void bfq_bfqq_save_state(struct bfq_queue *bfqq)
bic->saved_wr_coeff = bfqq->wr_coeff;
bic->saved_wr_start_at_switch_to_srt =
bfqq->wr_start_at_switch_to_srt;
+ bic->saved_service_from_wr = bfqq->service_from_wr;
bic->saved_last_wr_start_finish = bfqq->last_wr_start_finish;
bic->saved_wr_cur_max_time = bfqq->wr_cur_max_time;
}
@@ -2937,6 +3009,7 @@ static void __bfq_set_in_service_queue(struct bfq_data *bfqd,
}
bfqd->in_service_queue = bfqq;
+ bfqd->in_serv_last_pos = 0;
}
/*
@@ -3442,20 +3515,38 @@ static void bfq_dispatch_remove(struct request_queue *q, struct request *rq)
* order until all the requests already queued in the device have been
* served. The last sub-condition commented above somewhat mitigates
* this problem for weight-raised queues.
+ *
+ * However, as an additional mitigation for this problem, we preserve
+ * plugging for a special symmetric case that may suddenly turn into
+ * asymmetric: the case where only bfqq is busy. In this case, not
+ * expiring bfqq does not cause any harm to any other queues in terms
+ * of service guarantees. In contrast, it avoids the following unlucky
+ * sequence of events: (1) bfqq is expired, (2) a new queue with a
+ * lower weight than bfqq becomes busy (or more queues), (3) the new
+ * queue is served until a new request arrives for bfqq, (4) when bfqq
+ * is finally served, there are so many requests of the new queue in
+ * the drive that the pending requests for bfqq take a lot of time to
+ * be served. In particular, event (2) may case even already
+ * dispatched requests of bfqq to be delayed, inside the drive. So, to
+ * avoid this series of events, the scenario is preventively declared
+ * as asymmetric also if bfqq is the only busy queues
*/
static bool idling_needed_for_service_guarantees(struct bfq_data *bfqd,
struct bfq_queue *bfqq)
{
+ int tot_busy_queues = bfq_tot_busy_queues(bfqd);
+
/* No point in idling for bfqq if it won't get requests any longer */
if (unlikely(!bfqq_process_refs(bfqq)))
return false;
return (bfqq->wr_coeff > 1 &&
(bfqd->wr_busy_queues <
- bfq_tot_busy_queues(bfqd) ||
+ tot_busy_queues ||
bfqd->rq_in_driver >=
bfqq->dispatched + 4)) ||
- bfq_asymmetric_scenario(bfqd, bfqq);
+ bfq_asymmetric_scenario(bfqd, bfqq) ||
+ tot_busy_queues == 1;
}
static bool __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq,
@@ -3939,10 +4030,6 @@ void bfq_bfqq_expire(struct bfq_data *bfqd,
bfq_bfqq_budget_left(bfqq) >= entity->budget / 3)))
bfq_bfqq_charge_time(bfqd, bfqq, delta);
- if (reason == BFQQE_TOO_IDLE &&
- entity->service <= 2 * entity->budget / 10)
- bfq_clear_bfqq_IO_bound(bfqq);
-
if (bfqd->low_latency && bfqq->wr_coeff == 1)
bfqq->last_wr_start_finish = jiffies;
@@ -3952,30 +4039,15 @@ void bfq_bfqq_expire(struct bfq_data *bfqd,
* If we get here, and there are no outstanding
* requests, then the request pattern is isochronous
* (see the comments on the function
- * bfq_bfqq_softrt_next_start()). Thus we can compute
- * soft_rt_next_start. And we do it, unless bfqq is in
- * interactive weight raising. We do not do it in the
- * latter subcase, for the following reason. bfqq may
- * be conveying the I/O needed to load a soft
- * real-time application. Such an application will
- * actually exhibit a soft real-time I/O pattern after
- * it finally starts doing its job. But, if
- * soft_rt_next_start is computed here for an
- * interactive bfqq, and bfqq had received a lot of
- * service before remaining with no outstanding
- * request (likely to happen on a fast device), then
- * soft_rt_next_start would be assigned such a high
- * value that, for a very long time, bfqq would be
- * prevented from being possibly considered as soft
- * real time.
+ * bfq_bfqq_softrt_next_start()). Therefore we can
+ * compute soft_rt_next_start.
*
* If, instead, the queue still has outstanding
* requests, then we have to wait for the completion
* of all the outstanding requests to discover whether
* the request pattern is actually isochronous.
*/
- if (bfqq->dispatched == 0 &&
- bfqq->wr_coeff != bfqd->bfq_wr_coeff)
+ if (bfqq->dispatched == 0)
bfqq->soft_rt_next_start =
bfq_bfqq_softrt_next_start(bfqd, bfqq);
else if (bfqq->dispatched > 0) {
@@ -4497,9 +4569,9 @@ check_queue:
bfq_serv_to_charge(async_bfqq->next_rq, async_bfqq) <=
bfq_bfqq_budget_left(async_bfqq))
bfqq = bfqq->bic->bfqq[0];
- else if (bfq_bfqq_has_waker(bfqq) &&
+ else if (bfqq->waker_bfqq &&
bfq_bfqq_busy(bfqq->waker_bfqq) &&
- bfqq->next_rq &&
+ bfqq->waker_bfqq->next_rq &&
bfq_serv_to_charge(bfqq->waker_bfqq->next_rq,
bfqq->waker_bfqq) <=
bfq_bfqq_budget_left(bfqq->waker_bfqq)
@@ -4559,9 +4631,21 @@ static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq)
bfqq->wr_cur_max_time)) {
if (bfqq->wr_cur_max_time != bfqd->bfq_wr_rt_max_time ||
time_is_before_jiffies(bfqq->wr_start_at_switch_to_srt +
- bfq_wr_duration(bfqd)))
+ bfq_wr_duration(bfqd))) {
+ /*
+ * Either in interactive weight
+ * raising, or in soft_rt weight
+ * raising with the
+ * interactive-weight-raising period
+ * elapsed (so no switch back to
+ * interactive weight raising).
+ */
bfq_bfqq_end_wr(bfqq);
- else {
+ } else { /*
+ * soft_rt finishing while still in
+ * interactive period, switch back to
+ * interactive weight raising
+ */
switch_back_to_interactive_wr(bfqq, bfqd);
bfqq->entity.prio_changed = 1;
}
@@ -4640,9 +4724,6 @@ static bool bfq_has_work(struct blk_mq_hw_ctx *hctx)
{
struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
- if (!atomic_read(&hctx->elevator_queued))
- return false;
-
/*
* Avoiding lock: a race on bfqd->busy_queues should cause at
* most a call to dispatch for nothing
@@ -4892,7 +4973,6 @@ void bfq_put_queue(struct bfq_queue *bfqq)
hlist_for_each_entry_safe(item, n, &bfqq->woken_list,
woken_list_node) {
item->waker_bfqq = NULL;
- bfq_clear_bfqq_has_waker(item);
hlist_del_init(&item->woken_list_node);
}
@@ -5012,6 +5092,8 @@ bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic)
}
bfqq->entity.new_weight = bfq_ioprio_to_weight(bfqq->new_ioprio);
+ bfq_log_bfqq(bfqd, bfqq, "new_ioprio %d new_weight %d",
+ bfqq->new_ioprio, bfqq->entity.new_weight);
bfqq->entity.prio_changed = 1;
}
@@ -5049,6 +5131,8 @@ static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio)
static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
struct bfq_io_cq *bic, pid_t pid, int is_sync)
{
+ u64 now_ns = ktime_get_ns();
+
RB_CLEAR_NODE(&bfqq->entity.rb_node);
INIT_LIST_HEAD(&bfqq->fifo);
INIT_HLIST_NODE(&bfqq->burst_list_node);
@@ -5076,7 +5160,9 @@ static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
bfq_clear_bfqq_sync(bfqq);
/* set end request to minus infinity from now */
- bfqq->ttime.last_end_request = ktime_get_ns() + 1;
+ bfqq->ttime.last_end_request = now_ns + 1;
+
+ bfqq->io_start_time = now_ns;
bfq_mark_bfqq_IO_bound(bfqq);
@@ -5194,11 +5280,19 @@ static void bfq_update_io_thinktime(struct bfq_data *bfqd,
struct bfq_queue *bfqq)
{
struct bfq_ttime *ttime = &bfqq->ttime;
- u64 elapsed = ktime_get_ns() - bfqq->ttime.last_end_request;
+ u64 elapsed;
+ /*
+ * We are really interested in how long it takes for the queue to
+ * become busy when there is no outstanding IO for this queue. So
+ * ignore cases when the bfq queue has already IO queued.
+ */
+ if (bfqq->dispatched || bfq_bfqq_busy(bfqq))
+ return;
+ elapsed = ktime_get_ns() - bfqq->ttime.last_end_request;
elapsed = min_t(u64, elapsed, 2ULL * bfqd->bfq_slice_idle);
- ttime->ttime_samples = (7*bfqq->ttime.ttime_samples + 256) / 8;
+ ttime->ttime_samples = (7*ttime->ttime_samples + 256) / 8;
ttime->ttime_total = div_u64(7*ttime->ttime_total + 256*elapsed, 8);
ttime->ttime_mean = div64_ul(ttime->ttime_total + 128,
ttime->ttime_samples);
@@ -5213,8 +5307,26 @@ bfq_update_io_seektime(struct bfq_data *bfqd, struct bfq_queue *bfqq,
if (bfqq->wr_coeff > 1 &&
bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time &&
- BFQQ_TOTALLY_SEEKY(bfqq))
- bfq_bfqq_end_wr(bfqq);
+ BFQQ_TOTALLY_SEEKY(bfqq)) {
+ if (time_is_before_jiffies(bfqq->wr_start_at_switch_to_srt +
+ bfq_wr_duration(bfqd))) {
+ /*
+ * In soft_rt weight raising with the
+ * interactive-weight-raising period
+ * elapsed (so no switch back to
+ * interactive weight raising).
+ */
+ bfq_bfqq_end_wr(bfqq);
+ } else { /*
+ * stopping soft_rt weight raising
+ * while still in interactive period,
+ * switch back to interactive weight
+ * raising
+ */
+ switch_back_to_interactive_wr(bfqq, bfqd);
+ bfqq->entity.prio_changed = 1;
+ }
+ }
}
static void bfq_update_has_short_ttime(struct bfq_data *bfqd,
@@ -5238,12 +5350,13 @@ static void bfq_update_has_short_ttime(struct bfq_data *bfqd,
return;
/* Think time is infinite if no process is linked to
- * bfqq. Otherwise check average think time to
- * decide whether to mark as has_short_ttime
+ * bfqq. Otherwise check average think time to decide whether
+ * to mark as has_short_ttime. To this goal, compare average
+ * think time with half the I/O-plugging timeout.
*/
if (atomic_read(&bic->icq.ioc->active_ref) == 0 ||
(bfq_sample_valid(bfqq->ttime.ttime_samples) &&
- bfqq->ttime.ttime_mean > bfqd->bfq_slice_idle))
+ bfqq->ttime.ttime_mean > bfqd->bfq_slice_idle>>1))
has_short_ttime = false;
state_changed = has_short_ttime != bfq_bfqq_has_short_ttime(bfqq);
@@ -5510,7 +5623,7 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
spin_unlock_irq(&bfqd->lock);
- blk_mq_sched_request_inserted(rq);
+ trace_block_rq_insert(rq);
spin_lock_irq(&bfqd->lock);
bfqq = bfq_init_rq(rq);
@@ -5557,7 +5670,6 @@ static void bfq_insert_requests(struct blk_mq_hw_ctx *hctx,
rq = list_first_entry(list, struct request, queuelist);
list_del_init(&rq->queuelist);
bfq_insert_request(hctx, rq, at_head);
- atomic_inc(&hctx->elevator_queued);
}
}
@@ -5925,7 +6037,6 @@ static void bfq_finish_requeue_request(struct request *rq)
bfq_completed_request(bfqq, bfqd);
bfq_finish_requeue_request_body(bfqq);
- atomic_dec(&rq->mq_hctx->elevator_queued);
spin_unlock_irqrestore(&bfqd->lock, flags);
} else {
@@ -6332,13 +6443,13 @@ static unsigned int bfq_update_depths(struct bfq_data *bfqd,
* limit 'something'.
*/
/* no more than 50% of tags for async I/O */
- bfqd->word_depths[0][0] = max(bt->sb.depth >> 1, 1U);
+ bfqd->word_depths[0][0] = max((1U << bt->sb.shift) >> 1, 1U);
/*
* no more than 75% of tags for sync writes (25% extra tags
* w.r.t. async I/O, to prevent async I/O from starving sync
* writes)
*/
- bfqd->word_depths[0][1] = max((bt->sb.depth * 3) >> 2, 1U);
+ bfqd->word_depths[0][1] = max(((1U << bt->sb.shift) * 3) >> 2, 1U);
/*
* In-word depths in case some bfq_queue is being weight-
@@ -6348,9 +6459,9 @@ static unsigned int bfq_update_depths(struct bfq_data *bfqd,
* shortage.
*/
/* no more than ~18% of tags for async I/O */
- bfqd->word_depths[1][0] = max((bt->sb.depth * 3) >> 4, 1U);
+ bfqd->word_depths[1][0] = max(((1U << bt->sb.shift) * 3) >> 4, 1U);
/* no more than ~37% of tags for sync writes (~20% extra tags) */
- bfqd->word_depths[1][1] = max((bt->sb.depth * 6) >> 4, 1U);
+ bfqd->word_depths[1][1] = max(((1U << bt->sb.shift) * 6) >> 4, 1U);
for (i = 0; i < 2; i++)
for (j = 0; j < 2; j++)
@@ -6489,8 +6600,6 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
bfqd->bfq_slice_idle = bfq_slice_idle;
bfqd->bfq_timeout = bfq_timeout;
- bfqd->bfq_requests_within_timer = 120;
-
bfqd->bfq_large_burst_thresh = 8;
bfqd->bfq_burst_interval = msecs_to_jiffies(180);
diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h
index 703895224562..b8e793c34ff1 100644
--- a/block/bfq-iosched.h
+++ b/block/bfq-iosched.h
@@ -291,6 +291,11 @@ struct bfq_queue {
/* associated @bfq_ttime struct */
struct bfq_ttime ttime;
+ /* when bfqq started to do I/O within the last observation window */
+ u64 io_start_time;
+ /* how long bfqq has remained empty during the last observ. window */
+ u64 tot_idle_time;
+
/* bit vector: a 1 for each seeky requests in history */
u32 seek_history;
@@ -371,6 +376,11 @@ struct bfq_queue {
* bfq_select_queue().
*/
struct bfq_queue *waker_bfqq;
+ /* pointer to the curr. tentative waker queue, see bfq_check_waker() */
+ struct bfq_queue *tentative_waker_bfqq;
+ /* number of times the same tentative waker has been detected */
+ unsigned int num_waker_detections;
+
/* node for woken_list, see below */
struct hlist_node woken_list_node;
/*
@@ -407,6 +417,9 @@ struct bfq_io_cq {
*/
bool saved_IO_bound;
+ u64 saved_io_start_time;
+ u64 saved_tot_idle_time;
+
/*
* Same purpose as the previous fields for the value of the
* field keeping the queue's belonging to a large burst
@@ -432,9 +445,15 @@ struct bfq_io_cq {
*/
unsigned long saved_wr_coeff;
unsigned long saved_last_wr_start_finish;
+ unsigned long saved_service_from_wr;
unsigned long saved_wr_start_at_switch_to_srt;
unsigned int saved_wr_cur_max_time;
struct bfq_ttime saved_ttime;
+
+ /* Save also injection state */
+ u64 saved_last_serv_time_ns;
+ unsigned int saved_inject_limit;
+ unsigned long saved_decrease_time_jif;
};
/**
@@ -642,14 +661,6 @@ struct bfq_data {
unsigned int bfq_timeout;
/*
- * Number of consecutive requests that must be issued within
- * the idle time slice to set again idling to a queue which
- * was marked as non-I/O-bound (see the definition of the
- * IO_bound flag for further details).
- */
- unsigned int bfq_requests_within_timer;
-
- /*
* Force device idling whenever needed to provide accurate
* service guarantees, without caring about throughput
* issues. CAVEAT: this may even increase latencies, in case
@@ -770,7 +781,6 @@ enum bfqq_state_flags {
*/
BFQQF_coop, /* bfqq is shared */
BFQQF_split_coop, /* shared bfqq will be split */
- BFQQF_has_waker /* bfqq has a waker queue */
};
#define BFQ_BFQQ_FNS(name) \
@@ -790,7 +800,6 @@ BFQ_BFQQ_FNS(in_large_burst);
BFQ_BFQQ_FNS(coop);
BFQ_BFQQ_FNS(split_coop);
BFQ_BFQQ_FNS(softrt_update);
-BFQ_BFQQ_FNS(has_waker);
#undef BFQ_BFQQ_FNS
/* Expiration reasons. */
diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c
index 26776bdbdf36..070e34a7feb1 100644
--- a/block/bfq-wf2q.c
+++ b/block/bfq-wf2q.c
@@ -137,9 +137,6 @@ static bool bfq_update_next_in_service(struct bfq_sched_data *sd,
sd->next_in_service = next_in_service;
- if (!next_in_service)
- return parent_sched_may_change;
-
return parent_sched_may_change;
}
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index 9ffd7e289554..dfa652122a2d 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -14,8 +14,6 @@
#include <linux/slab.h>
#include "blk.h"
-#define BIP_INLINE_VECS 4
-
static struct kmem_cache *bip_slab;
static struct workqueue_struct *kintegrityd_wq;
@@ -30,7 +28,7 @@ static void __bio_integrity_free(struct bio_set *bs,
if (bs && mempool_initialized(&bs->bio_integrity_pool)) {
if (bip->bip_vec)
bvec_free(&bs->bvec_integrity_pool, bip->bip_vec,
- bip->bip_slab);
+ bip->bip_max_vcnt);
mempool_free(bip, &bs->bio_integrity_pool);
} else {
kfree(bip);
@@ -63,7 +61,7 @@ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
inline_vecs = nr_vecs;
} else {
bip = mempool_alloc(&bs->bio_integrity_pool, gfp_mask);
- inline_vecs = BIP_INLINE_VECS;
+ inline_vecs = BIO_INLINE_VECS;
}
if (unlikely(!bip))
@@ -72,14 +70,11 @@ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
memset(bip, 0, sizeof(*bip));
if (nr_vecs > inline_vecs) {
- unsigned long idx = 0;
-
- bip->bip_vec = bvec_alloc(gfp_mask, nr_vecs, &idx,
- &bs->bvec_integrity_pool);
+ bip->bip_max_vcnt = nr_vecs;
+ bip->bip_vec = bvec_alloc(&bs->bvec_integrity_pool,
+ &bip->bip_max_vcnt, gfp_mask);
if (!bip->bip_vec)
goto err;
- bip->bip_max_vcnt = bvec_nr_vecs(idx);
- bip->bip_slab = idx;
} else {
bip->bip_vec = bip->bip_inline_vecs;
bip->bip_max_vcnt = inline_vecs;
@@ -140,7 +135,7 @@ int bio_integrity_add_page(struct bio *bio, struct page *page,
iv = bip->bip_vec + bip->bip_vcnt;
if (bip->bip_vcnt &&
- bvec_gap_to_prev(bio->bi_disk->queue,
+ bvec_gap_to_prev(bio->bi_bdev->bd_disk->queue,
&bip->bip_vec[bip->bip_vcnt - 1], offset))
return 0;
@@ -162,7 +157,7 @@ EXPORT_SYMBOL(bio_integrity_add_page);
static blk_status_t bio_integrity_process(struct bio *bio,
struct bvec_iter *proc_iter, integrity_processing_fn *proc_fn)
{
- struct blk_integrity *bi = blk_get_integrity(bio->bi_disk);
+ struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
struct blk_integrity_iter iter;
struct bvec_iter bviter;
struct bio_vec bv;
@@ -171,7 +166,7 @@ static blk_status_t bio_integrity_process(struct bio *bio,
void *prot_buf = page_address(bip->bip_vec->bv_page) +
bip->bip_vec->bv_offset;
- iter.disk_name = bio->bi_disk->disk_name;
+ iter.disk_name = bio->bi_bdev->bd_disk->disk_name;
iter.interval = 1 << bi->interval_exp;
iter.seed = proc_iter->bi_sector;
iter.prot_buf = prot_buf;
@@ -208,8 +203,8 @@ static blk_status_t bio_integrity_process(struct bio *bio,
bool bio_integrity_prep(struct bio *bio)
{
struct bio_integrity_payload *bip;
- struct blk_integrity *bi = blk_get_integrity(bio->bi_disk);
- struct request_queue *q = bio->bi_disk->queue;
+ struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
+ struct request_queue *q = bio->bi_bdev->bd_disk->queue;
void *buf;
unsigned long start, end;
unsigned int len, nr_pages;
@@ -329,7 +324,7 @@ static void bio_integrity_verify_fn(struct work_struct *work)
struct bio_integrity_payload *bip =
container_of(work, struct bio_integrity_payload, bip_work);
struct bio *bio = bip->bip_bio;
- struct blk_integrity *bi = blk_get_integrity(bio->bi_disk);
+ struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
/*
* At the moment verify is called bio's iterator was advanced
@@ -355,7 +350,7 @@ static void bio_integrity_verify_fn(struct work_struct *work)
*/
bool __bio_integrity_endio(struct bio *bio)
{
- struct blk_integrity *bi = blk_get_integrity(bio->bi_disk);
+ struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
struct bio_integrity_payload *bip = bio_integrity(bio);
if (bio_op(bio) == REQ_OP_READ && !bio->bi_status &&
@@ -381,7 +376,7 @@ bool __bio_integrity_endio(struct bio *bio)
void bio_integrity_advance(struct bio *bio, unsigned int bytes_done)
{
struct bio_integrity_payload *bip = bio_integrity(bio);
- struct blk_integrity *bi = blk_get_integrity(bio->bi_disk);
+ struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
unsigned bytes = bio_integrity_bytes(bi, bytes_done >> 9);
bip->bip_iter.bi_sector += bytes_done >> 9;
@@ -397,7 +392,7 @@ void bio_integrity_advance(struct bio *bio, unsigned int bytes_done)
void bio_integrity_trim(struct bio *bio)
{
struct bio_integrity_payload *bip = bio_integrity(bio);
- struct blk_integrity *bi = blk_get_integrity(bio->bi_disk);
+ struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio));
}
@@ -470,6 +465,6 @@ void __init bio_integrity_init(void)
bip_slab = kmem_cache_create("bio_integrity_payload",
sizeof(struct bio_integrity_payload) +
- sizeof(struct bio_vec) * BIP_INLINE_VECS,
+ sizeof(struct bio_vec) * BIO_INLINE_VECS,
0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
}
diff --git a/block/bio.c b/block/bio.c
index 1f2cc1fbe283..a1c4d2900c7a 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -19,27 +19,40 @@
#include <linux/highmem.h>
#include <linux/sched/sysctl.h>
#include <linux/blk-crypto.h>
+#include <linux/xarray.h>
#include <trace/events/block.h>
#include "blk.h"
#include "blk-rq-qos.h"
-/*
- * Test patch to inline a certain number of bi_io_vec's inside the bio
- * itself, to shrink a bio data allocation from two mempool calls to one
- */
-#define BIO_INLINE_VECS 4
-
-/*
- * if you change this list, also change bvec_alloc or things will
- * break badly! cannot be bigger than what you can fit into an
- * unsigned short
- */
-#define BV(x, n) { .nr_vecs = x, .name = "biovec-"#n }
-static struct biovec_slab bvec_slabs[BVEC_POOL_NR] __read_mostly = {
- BV(1, 1), BV(4, 4), BV(16, 16), BV(64, 64), BV(128, 128), BV(BIO_MAX_PAGES, max),
+static struct biovec_slab {
+ int nr_vecs;
+ char *name;
+ struct kmem_cache *slab;
+} bvec_slabs[] __read_mostly = {
+ { .nr_vecs = 16, .name = "biovec-16" },
+ { .nr_vecs = 64, .name = "biovec-64" },
+ { .nr_vecs = 128, .name = "biovec-128" },
+ { .nr_vecs = BIO_MAX_PAGES, .name = "biovec-max" },
};
-#undef BV
+
+static struct biovec_slab *biovec_slab(unsigned short nr_vecs)
+{
+ switch (nr_vecs) {
+ /* smaller bios use inline vecs */
+ case 5 ... 16:
+ return &bvec_slabs[0];
+ case 17 ... 64:
+ return &bvec_slabs[1];
+ case 65 ... 128:
+ return &bvec_slabs[2];
+ case 129 ... BIO_MAX_PAGES:
+ return &bvec_slabs[3];
+ default:
+ BUG();
+ return NULL;
+ }
+}
/*
* fs_bio_set is the bio_set containing bio and iovec memory pools used by
@@ -58,178 +71,133 @@ struct bio_slab {
char name[8];
};
static DEFINE_MUTEX(bio_slab_lock);
-static struct bio_slab *bio_slabs;
-static unsigned int bio_slab_nr, bio_slab_max;
+static DEFINE_XARRAY(bio_slabs);
-static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
+static struct bio_slab *create_bio_slab(unsigned int size)
{
- unsigned int sz = sizeof(struct bio) + extra_size;
- struct kmem_cache *slab = NULL;
- struct bio_slab *bslab, *new_bio_slabs;
- unsigned int new_bio_slab_max;
- unsigned int i, entry = -1;
+ struct bio_slab *bslab = kzalloc(sizeof(*bslab), GFP_KERNEL);
- mutex_lock(&bio_slab_lock);
+ if (!bslab)
+ return NULL;
- i = 0;
- while (i < bio_slab_nr) {
- bslab = &bio_slabs[i];
+ snprintf(bslab->name, sizeof(bslab->name), "bio-%d", size);
+ bslab->slab = kmem_cache_create(bslab->name, size,
+ ARCH_KMALLOC_MINALIGN, SLAB_HWCACHE_ALIGN, NULL);
+ if (!bslab->slab)
+ goto fail_alloc_slab;
- if (!bslab->slab && entry == -1)
- entry = i;
- else if (bslab->slab_size == sz) {
- slab = bslab->slab;
- bslab->slab_ref++;
- break;
- }
- i++;
- }
+ bslab->slab_ref = 1;
+ bslab->slab_size = size;
- if (slab)
- goto out_unlock;
-
- if (bio_slab_nr == bio_slab_max && entry == -1) {
- new_bio_slab_max = bio_slab_max << 1;
- new_bio_slabs = krealloc(bio_slabs,
- new_bio_slab_max * sizeof(struct bio_slab),
- GFP_KERNEL);
- if (!new_bio_slabs)
- goto out_unlock;
- bio_slab_max = new_bio_slab_max;
- bio_slabs = new_bio_slabs;
- }
- if (entry == -1)
- entry = bio_slab_nr++;
+ if (!xa_err(xa_store(&bio_slabs, size, bslab, GFP_KERNEL)))
+ return bslab;
- bslab = &bio_slabs[entry];
+ kmem_cache_destroy(bslab->slab);
- snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry);
- slab = kmem_cache_create(bslab->name, sz, ARCH_KMALLOC_MINALIGN,
- SLAB_HWCACHE_ALIGN, NULL);
- if (!slab)
- goto out_unlock;
+fail_alloc_slab:
+ kfree(bslab);
+ return NULL;
+}
- bslab->slab = slab;
- bslab->slab_ref = 1;
- bslab->slab_size = sz;
-out_unlock:
+static inline unsigned int bs_bio_slab_size(struct bio_set *bs)
+{
+ return bs->front_pad + sizeof(struct bio) + bs->back_pad;
+}
+
+static struct kmem_cache *bio_find_or_create_slab(struct bio_set *bs)
+{
+ unsigned int size = bs_bio_slab_size(bs);
+ struct bio_slab *bslab;
+
+ mutex_lock(&bio_slab_lock);
+ bslab = xa_load(&bio_slabs, size);
+ if (bslab)
+ bslab->slab_ref++;
+ else
+ bslab = create_bio_slab(size);
mutex_unlock(&bio_slab_lock);
- return slab;
+
+ if (bslab)
+ return bslab->slab;
+ return NULL;
}
static void bio_put_slab(struct bio_set *bs)
{
struct bio_slab *bslab = NULL;
- unsigned int i;
+ unsigned int slab_size = bs_bio_slab_size(bs);
mutex_lock(&bio_slab_lock);
- for (i = 0; i < bio_slab_nr; i++) {
- if (bs->bio_slab == bio_slabs[i].slab) {
- bslab = &bio_slabs[i];
- break;
- }
- }
-
+ bslab = xa_load(&bio_slabs, slab_size);
if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n"))
goto out;
+ WARN_ON_ONCE(bslab->slab != bs->bio_slab);
+
WARN_ON(!bslab->slab_ref);
if (--bslab->slab_ref)
goto out;
+ xa_erase(&bio_slabs, slab_size);
+
kmem_cache_destroy(bslab->slab);
- bslab->slab = NULL;
+ kfree(bslab);
out:
mutex_unlock(&bio_slab_lock);
}
-unsigned int bvec_nr_vecs(unsigned short idx)
-{
- return bvec_slabs[--idx].nr_vecs;
-}
-
-void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned int idx)
+void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs)
{
- if (!idx)
- return;
- idx--;
+ BIO_BUG_ON(nr_vecs > BIO_MAX_PAGES);
- BIO_BUG_ON(idx >= BVEC_POOL_NR);
-
- if (idx == BVEC_POOL_MAX) {
+ if (nr_vecs == BIO_MAX_PAGES)
mempool_free(bv, pool);
- } else {
- struct biovec_slab *bvs = bvec_slabs + idx;
+ else if (nr_vecs > BIO_INLINE_VECS)
+ kmem_cache_free(biovec_slab(nr_vecs)->slab, bv);
+}
- kmem_cache_free(bvs->slab, bv);
- }
+/*
+ * Make the first allocation restricted and don't dump info on allocation
+ * failures, since we'll fall back to the mempool in case of failure.
+ */
+static inline gfp_t bvec_alloc_gfp(gfp_t gfp)
+{
+ return (gfp & ~(__GFP_DIRECT_RECLAIM | __GFP_IO)) |
+ __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
}
-struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx,
- mempool_t *pool)
+struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs,
+ gfp_t gfp_mask)
{
- struct bio_vec *bvl;
+ struct biovec_slab *bvs = biovec_slab(*nr_vecs);
- /*
- * see comment near bvec_array define!
- */
- switch (nr) {
- case 1:
- *idx = 0;
- break;
- case 2 ... 4:
- *idx = 1;
- break;
- case 5 ... 16:
- *idx = 2;
- break;
- case 17 ... 64:
- *idx = 3;
- break;
- case 65 ... 128:
- *idx = 4;
- break;
- case 129 ... BIO_MAX_PAGES:
- *idx = 5;
- break;
- default:
+ if (WARN_ON_ONCE(!bvs))
return NULL;
- }
/*
- * idx now points to the pool we want to allocate from. only the
- * 1-vec entry pool is mempool backed.
+ * Upgrade the nr_vecs request to take full advantage of the allocation.
+ * We also rely on this in the bvec_free path.
*/
- if (*idx == BVEC_POOL_MAX) {
-fallback:
- bvl = mempool_alloc(pool, gfp_mask);
- } else {
- struct biovec_slab *bvs = bvec_slabs + *idx;
- gfp_t __gfp_mask = gfp_mask & ~(__GFP_DIRECT_RECLAIM | __GFP_IO);
+ *nr_vecs = bvs->nr_vecs;
- /*
- * Make this allocation restricted and don't dump info on
- * allocation failures, since we'll fallback to the mempool
- * in case of failure.
- */
- __gfp_mask |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
+ /*
+ * Try a slab allocation first for all smaller allocations. If that
+ * fails and __GFP_DIRECT_RECLAIM is set retry with the mempool.
+ * The mempool is sized to handle up to BIO_MAX_PAGES entries.
+ */
+ if (*nr_vecs < BIO_MAX_PAGES) {
+ struct bio_vec *bvl;
- /*
- * Try a slab allocation. If this fails and __GFP_DIRECT_RECLAIM
- * is set, retry with the 1-entry mempool
- */
- bvl = kmem_cache_alloc(bvs->slab, __gfp_mask);
- if (unlikely(!bvl && (gfp_mask & __GFP_DIRECT_RECLAIM))) {
- *idx = BVEC_POOL_MAX;
- goto fallback;
- }
+ bvl = kmem_cache_alloc(bvs->slab, bvec_alloc_gfp(gfp_mask));
+ if (likely(bvl) || !(gfp_mask & __GFP_DIRECT_RECLAIM))
+ return bvl;
+ *nr_vecs = BIO_MAX_PAGES;
}
- (*idx)++;
- return bvl;
+ return mempool_alloc(pool, gfp_mask);
}
void bio_uninit(struct bio *bio)
@@ -255,7 +223,7 @@ static void bio_free(struct bio *bio)
bio_uninit(bio);
if (bs) {
- bvec_free(&bs->bvec_pool, bio->bi_io_vec, BVEC_POOL_IDX(bio));
+ bvec_free(&bs->bvec_pool, bio->bi_io_vec, bio->bi_max_vecs);
/*
* If we have front padding, adjust the bio pointer before freeing
@@ -299,12 +267,8 @@ EXPORT_SYMBOL(bio_init);
*/
void bio_reset(struct bio *bio)
{
- unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS);
-
bio_uninit(bio);
-
memset(bio, 0, BIO_RESET_BYTES);
- bio->bi_flags = flags;
atomic_set(&bio->__bi_remaining, 1);
}
EXPORT_SYMBOL(bio_reset);
@@ -405,122 +369,97 @@ static void punt_bios_to_rescuer(struct bio_set *bs)
* @nr_iovecs: number of iovecs to pre-allocate
* @bs: the bio_set to allocate from.
*
- * Description:
- * If @bs is NULL, uses kmalloc() to allocate the bio; else the allocation is
- * backed by the @bs's mempool.
+ * Allocate a bio from the mempools in @bs.
*
- * When @bs is not NULL, if %__GFP_DIRECT_RECLAIM is set then bio_alloc will
- * always be able to allocate a bio. This is due to the mempool guarantees.
- * To make this work, callers must never allocate more than 1 bio at a time
- * from this pool. Callers that need to allocate more than 1 bio must always
- * submit the previously allocated bio for IO before attempting to allocate
- * a new one. Failure to do so can cause deadlocks under memory pressure.
+ * If %__GFP_DIRECT_RECLAIM is set then bio_alloc will always be able to
+ * allocate a bio. This is due to the mempool guarantees. To make this work,
+ * callers must never allocate more than 1 bio at a time from the general pool.
+ * Callers that need to allocate more than 1 bio must always submit the
+ * previously allocated bio for IO before attempting to allocate a new one.
+ * Failure to do so can cause deadlocks under memory pressure.
*
- * Note that when running under submit_bio_noacct() (i.e. any block
- * driver), bios are not submitted until after you return - see the code in
- * submit_bio_noacct() that converts recursion into iteration, to prevent
- * stack overflows.
+ * Note that when running under submit_bio_noacct() (i.e. any block driver),
+ * bios are not submitted until after you return - see the code in
+ * submit_bio_noacct() that converts recursion into iteration, to prevent
+ * stack overflows.
*
- * This would normally mean allocating multiple bios under
- * submit_bio_noacct() would be susceptible to deadlocks, but we have
- * deadlock avoidance code that resubmits any blocked bios from a rescuer
- * thread.
+ * This would normally mean allocating multiple bios under submit_bio_noacct()
+ * would be susceptible to deadlocks, but we have
+ * deadlock avoidance code that resubmits any blocked bios from a rescuer
+ * thread.
*
- * However, we do not guarantee forward progress for allocations from other
- * mempools. Doing multiple allocations from the same mempool under
- * submit_bio_noacct() should be avoided - instead, use bio_set's front_pad
- * for per bio allocations.
+ * However, we do not guarantee forward progress for allocations from other
+ * mempools. Doing multiple allocations from the same mempool under
+ * submit_bio_noacct() should be avoided - instead, use bio_set's front_pad
+ * for per bio allocations.
*
- * RETURNS:
- * Pointer to new bio on success, NULL on failure.
+ * Returns: Pointer to new bio on success, NULL on failure.
*/
-struct bio *bio_alloc_bioset(gfp_t gfp_mask, unsigned int nr_iovecs,
+struct bio *bio_alloc_bioset(gfp_t gfp_mask, unsigned short nr_iovecs,
struct bio_set *bs)
{
gfp_t saved_gfp = gfp_mask;
- unsigned front_pad;
- unsigned inline_vecs;
- struct bio_vec *bvl = NULL;
struct bio *bio;
void *p;
- if (!bs) {
- if (nr_iovecs > UIO_MAXIOV)
- return NULL;
-
- p = kmalloc(struct_size(bio, bi_inline_vecs, nr_iovecs), gfp_mask);
- front_pad = 0;
- inline_vecs = nr_iovecs;
- } else {
- /* should not use nobvec bioset for nr_iovecs > 0 */
- if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) &&
- nr_iovecs > 0))
- return NULL;
- /*
- * submit_bio_noacct() converts recursion to iteration; this
- * means if we're running beneath it, any bios we allocate and
- * submit will not be submitted (and thus freed) until after we
- * return.
- *
- * This exposes us to a potential deadlock if we allocate
- * multiple bios from the same bio_set() while running
- * underneath submit_bio_noacct(). If we were to allocate
- * multiple bios (say a stacking block driver that was splitting
- * bios), we would deadlock if we exhausted the mempool's
- * reserve.
- *
- * We solve this, and guarantee forward progress, with a rescuer
- * workqueue per bio_set. If we go to allocate and there are
- * bios on current->bio_list, we first try the allocation
- * without __GFP_DIRECT_RECLAIM; if that fails, we punt those
- * bios we would be blocking to the rescuer workqueue before
- * we retry with the original gfp_flags.
- */
-
- if (current->bio_list &&
- (!bio_list_empty(&current->bio_list[0]) ||
- !bio_list_empty(&current->bio_list[1])) &&
- bs->rescue_workqueue)
- gfp_mask &= ~__GFP_DIRECT_RECLAIM;
+ /* should not use nobvec bioset for nr_iovecs > 0 */
+ if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) && nr_iovecs > 0))
+ return NULL;
+ /*
+ * submit_bio_noacct() converts recursion to iteration; this means if
+ * we're running beneath it, any bios we allocate and submit will not be
+ * submitted (and thus freed) until after we return.
+ *
+ * This exposes us to a potential deadlock if we allocate multiple bios
+ * from the same bio_set() while running underneath submit_bio_noacct().
+ * If we were to allocate multiple bios (say a stacking block driver
+ * that was splitting bios), we would deadlock if we exhausted the
+ * mempool's reserve.
+ *
+ * We solve this, and guarantee forward progress, with a rescuer
+ * workqueue per bio_set. If we go to allocate and there are bios on
+ * current->bio_list, we first try the allocation without
+ * __GFP_DIRECT_RECLAIM; if that fails, we punt those bios we would be
+ * blocking to the rescuer workqueue before we retry with the original
+ * gfp_flags.
+ */
+ if (current->bio_list &&
+ (!bio_list_empty(&current->bio_list[0]) ||
+ !bio_list_empty(&current->bio_list[1])) &&
+ bs->rescue_workqueue)
+ gfp_mask &= ~__GFP_DIRECT_RECLAIM;
+
+ p = mempool_alloc(&bs->bio_pool, gfp_mask);
+ if (!p && gfp_mask != saved_gfp) {
+ punt_bios_to_rescuer(bs);
+ gfp_mask = saved_gfp;
p = mempool_alloc(&bs->bio_pool, gfp_mask);
- if (!p && gfp_mask != saved_gfp) {
- punt_bios_to_rescuer(bs);
- gfp_mask = saved_gfp;
- p = mempool_alloc(&bs->bio_pool, gfp_mask);
- }
-
- front_pad = bs->front_pad;
- inline_vecs = BIO_INLINE_VECS;
}
-
if (unlikely(!p))
return NULL;
- bio = p + front_pad;
- bio_init(bio, NULL, 0);
-
- if (nr_iovecs > inline_vecs) {
- unsigned long idx = 0;
+ bio = p + bs->front_pad;
+ if (nr_iovecs > BIO_INLINE_VECS) {
+ struct bio_vec *bvl = NULL;
- bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, &bs->bvec_pool);
+ bvl = bvec_alloc(&bs->bvec_pool, &nr_iovecs, gfp_mask);
if (!bvl && gfp_mask != saved_gfp) {
punt_bios_to_rescuer(bs);
gfp_mask = saved_gfp;
- bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, &bs->bvec_pool);
+ bvl = bvec_alloc(&bs->bvec_pool, &nr_iovecs, gfp_mask);
}
-
if (unlikely(!bvl))
goto err_free;
- bio->bi_flags |= idx << BVEC_POOL_OFFSET;
+ bio_init(bio, bvl, nr_iovecs);
} else if (nr_iovecs) {
- bvl = bio->bi_inline_vecs;
+ bio_init(bio, bio->bi_inline_vecs, BIO_INLINE_VECS);
+ } else {
+ bio_init(bio, NULL, 0);
}
bio->bi_pool = bs;
- bio->bi_max_vecs = nr_iovecs;
- bio->bi_io_vec = bvl;
return bio;
err_free:
@@ -529,6 +468,31 @@ err_free:
}
EXPORT_SYMBOL(bio_alloc_bioset);
+/**
+ * bio_kmalloc - kmalloc a bio for I/O
+ * @gfp_mask: the GFP_* mask given to the slab allocator
+ * @nr_iovecs: number of iovecs to pre-allocate
+ *
+ * Use kmalloc to allocate and initialize a bio.
+ *
+ * Returns: Pointer to new bio on success, NULL on failure.
+ */
+struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned short nr_iovecs)
+{
+ struct bio *bio;
+
+ if (nr_iovecs > UIO_MAXIOV)
+ return NULL;
+
+ bio = kmalloc(struct_size(bio, bi_inline_vecs, nr_iovecs), gfp_mask);
+ if (unlikely(!bio))
+ return NULL;
+ bio_init(bio, nr_iovecs ? bio->bi_inline_vecs : NULL, nr_iovecs);
+ bio->bi_pool = NULL;
+ return bio;
+}
+EXPORT_SYMBOL(bio_kmalloc);
+
void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start)
{
unsigned long flags;
@@ -607,16 +571,7 @@ void bio_truncate(struct bio *bio, unsigned new_size)
*/
void guard_bio_eod(struct bio *bio)
{
- sector_t maxsector;
- struct block_device *part;
-
- rcu_read_lock();
- part = __disk_get_part(bio->bi_disk, bio->bi_partno);
- if (part)
- maxsector = bdev_nr_sectors(part);
- else
- maxsector = get_capacity(bio->bi_disk);
- rcu_read_unlock();
+ sector_t maxsector = bdev_nr_sectors(bio->bi_bdev);
if (!maxsector)
return;
@@ -673,17 +628,18 @@ EXPORT_SYMBOL(bio_put);
*/
void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
{
- BUG_ON(bio->bi_pool && BVEC_POOL_IDX(bio));
+ WARN_ON_ONCE(bio->bi_pool && bio->bi_max_vecs);
/*
- * most users will be overriding ->bi_disk with a new target,
+ * most users will be overriding ->bi_bdev with a new target,
* so we don't set nor calculate new physical/hw segment counts here
*/
- bio->bi_disk = bio_src->bi_disk;
- bio->bi_partno = bio_src->bi_partno;
+ bio->bi_bdev = bio_src->bi_bdev;
bio_set_flag(bio, BIO_CLONED);
if (bio_flagged(bio_src, BIO_THROTTLED))
bio_set_flag(bio, BIO_THROTTLED);
+ if (bio_flagged(bio_src, BIO_REMAPPED))
+ bio_set_flag(bio, BIO_REMAPPED);
bio->bi_opf = bio_src->bi_opf;
bio->bi_ioprio = bio_src->bi_ioprio;
bio->bi_write_hint = bio_src->bi_write_hint;
@@ -730,7 +686,7 @@ EXPORT_SYMBOL(bio_clone_fast);
const char *bio_devname(struct bio *bio, char *buf)
{
- return disk_name(bio->bi_disk, bio->bi_partno, buf);
+ return bdevname(bio->bi_bdev, buf);
}
EXPORT_SYMBOL(bio_devname);
@@ -852,6 +808,39 @@ int bio_add_pc_page(struct request_queue *q, struct bio *bio,
EXPORT_SYMBOL(bio_add_pc_page);
/**
+ * bio_add_zone_append_page - attempt to add page to zone-append bio
+ * @bio: destination bio
+ * @page: page to add
+ * @len: vec entry length
+ * @offset: vec entry offset
+ *
+ * Attempt to add a page to the bio_vec maplist of a bio that will be submitted
+ * for a zone-append request. This can fail for a number of reasons, such as the
+ * bio being full or the target block device is not a zoned block device or
+ * other limitations of the target block device. The target block device must
+ * allow bio's up to PAGE_SIZE, so it is always possible to add a single page
+ * to an empty bio.
+ *
+ * Returns: number of bytes added to the bio, or 0 in case of a failure.
+ */
+int bio_add_zone_append_page(struct bio *bio, struct page *page,
+ unsigned int len, unsigned int offset)
+{
+ struct request_queue *q = bio->bi_bdev->bd_disk->queue;
+ bool same_page = false;
+
+ if (WARN_ON_ONCE(bio_op(bio) != REQ_OP_ZONE_APPEND))
+ return 0;
+
+ if (WARN_ON_ONCE(!blk_queue_is_zoned(q)))
+ return 0;
+
+ return bio_add_hw_page(q, bio, page, len, offset,
+ queue_max_zone_append_sectors(q), &same_page);
+}
+EXPORT_SYMBOL_GPL(bio_add_zone_append_page);
+
+/**
* __bio_try_merge_page - try appending data to an existing bvec.
* @bio: destination bio
* @page: start page to add
@@ -960,21 +949,18 @@ void bio_release_pages(struct bio *bio, bool mark_dirty)
}
EXPORT_SYMBOL_GPL(bio_release_pages);
-static int __bio_iov_bvec_add_pages(struct bio *bio, struct iov_iter *iter)
+static int bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter)
{
- const struct bio_vec *bv = iter->bvec;
- unsigned int len;
- size_t size;
-
- if (WARN_ON_ONCE(iter->iov_offset > bv->bv_len))
- return -EINVAL;
-
- len = min_t(size_t, bv->bv_len - iter->iov_offset, iter->count);
- size = bio_add_page(bio, bv->bv_page, len,
- bv->bv_offset + iter->iov_offset);
- if (unlikely(size != len))
- return -EINVAL;
- iov_iter_advance(iter, size);
+ WARN_ON_ONCE(bio->bi_max_vecs);
+
+ bio->bi_vcnt = iter->nr_segs;
+ bio->bi_io_vec = (struct bio_vec *)iter->bvec;
+ bio->bi_iter.bi_bvec_done = iter->iov_offset;
+ bio->bi_iter.bi_size = iter->count;
+ bio_set_flag(bio, BIO_NO_PAGE_REF);
+ bio_set_flag(bio, BIO_CLONED);
+
+ iov_iter_advance(iter, iter->count);
return 0;
}
@@ -1037,7 +1023,7 @@ static int __bio_iov_append_get_pages(struct bio *bio, struct iov_iter *iter)
{
unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
- struct request_queue *q = bio->bi_disk->queue;
+ struct request_queue *q = bio->bi_bdev->bd_disk->queue;
unsigned int max_append_sectors = queue_max_zone_append_sectors(q);
struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
struct page **pages = (struct page **)bv;
@@ -1088,41 +1074,40 @@ static int __bio_iov_append_get_pages(struct bio *bio, struct iov_iter *iter)
* This takes either an iterator pointing to user memory, or one pointing to
* kernel pages (BVEC iterator). If we're adding user pages, we pin them and
* map them into the kernel. On IO completion, the caller should put those
- * pages. If we're adding kernel pages, and the caller told us it's safe to
- * do so, we just have to add the pages to the bio directly. We don't grab an
- * extra reference to those pages (the user should already have that), and we
- * don't put the page on IO completion. The caller needs to check if the bio is
- * flagged BIO_NO_PAGE_REF on IO completion. If it isn't, then pages should be
- * released.
+ * pages. For bvec based iterators bio_iov_iter_get_pages() uses the provided
+ * bvecs rather than copying them. Hence anyone issuing kiocb based IO needs
+ * to ensure the bvecs and pages stay referenced until the submitted I/O is
+ * completed by a call to ->ki_complete() or returns with an error other than
+ * -EIOCBQUEUED. The caller needs to check if the bio is flagged BIO_NO_PAGE_REF
+ * on IO completion. If it isn't, then pages should be released.
*
* The function tries, but does not guarantee, to pin as many pages as
* fit into the bio, or are requested in @iter, whatever is smaller. If
* MM encounters an error pinning the requested pages, it stops. Error
* is returned only if 0 pages could be pinned.
+ *
+ * It's intended for direct IO, so doesn't do PSI tracking, the caller is
+ * responsible for setting BIO_WORKINGSET if necessary.
*/
int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
{
- const bool is_bvec = iov_iter_is_bvec(iter);
- int ret;
+ int ret = 0;
- if (WARN_ON_ONCE(bio->bi_vcnt))
- return -EINVAL;
+ if (iov_iter_is_bvec(iter)) {
+ if (WARN_ON_ONCE(bio_op(bio) == REQ_OP_ZONE_APPEND))
+ return -EINVAL;
+ return bio_iov_bvec_set(bio, iter);
+ }
do {
- if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
- if (WARN_ON_ONCE(is_bvec))
- return -EINVAL;
+ if (bio_op(bio) == REQ_OP_ZONE_APPEND)
ret = __bio_iov_append_get_pages(bio, iter);
- } else {
- if (is_bvec)
- ret = __bio_iov_bvec_add_pages(bio, iter);
- else
- ret = __bio_iov_iter_get_pages(bio, iter);
- }
+ else
+ ret = __bio_iov_iter_get_pages(bio, iter);
} while (!ret && iov_iter_count(iter) && !bio_full(bio, 0));
- if (is_bvec)
- bio_set_flag(bio, BIO_NO_PAGE_REF);
+ /* don't account direct I/O as memory stall */
+ bio_clear_flag(bio, BIO_WORKINGSET);
return bio->bi_vcnt ? 0 : ret;
}
EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages);
@@ -1145,7 +1130,8 @@ static void submit_bio_wait_endio(struct bio *bio)
*/
int submit_bio_wait(struct bio *bio)
{
- DECLARE_COMPLETION_ONSTACK_MAP(done, bio->bi_disk->lockdep_map);
+ DECLARE_COMPLETION_ONSTACK_MAP(done,
+ bio->bi_bdev->bd_disk->lockdep_map);
unsigned long hang_check;
bio->bi_private = &done;
@@ -1422,8 +1408,8 @@ again:
if (!bio_integrity_endio(bio))
return;
- if (bio->bi_disk)
- rq_qos_done_bio(bio->bi_disk->queue, bio);
+ if (bio->bi_bdev)
+ rq_qos_done_bio(bio->bi_bdev->bd_disk->queue, bio);
/*
* Need to have a real endio function for chained bios, otherwise
@@ -1438,8 +1424,8 @@ again:
goto again;
}
- if (bio->bi_disk && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
- trace_block_bio_complete(bio->bi_disk->queue, bio);
+ if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
+ trace_block_bio_complete(bio->bi_bdev->bd_disk->queue, bio);
bio_clear_flag(bio, BIO_TRACE_COMPLETION);
}
@@ -1526,7 +1512,7 @@ EXPORT_SYMBOL_GPL(bio_trim);
*/
int biovec_init_pool(mempool_t *pool, int pool_entries)
{
- struct biovec_slab *bp = bvec_slabs + BVEC_POOL_MAX;
+ struct biovec_slab *bp = bvec_slabs + ARRAY_SIZE(bvec_slabs) - 1;
return mempool_init_slab_pool(pool, pool_entries, bp->slab);
}
@@ -1579,15 +1565,17 @@ int bioset_init(struct bio_set *bs,
unsigned int front_pad,
int flags)
{
- unsigned int back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
-
bs->front_pad = front_pad;
+ if (flags & BIOSET_NEED_BVECS)
+ bs->back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
+ else
+ bs->back_pad = 0;
spin_lock_init(&bs->rescue_lock);
bio_list_init(&bs->rescue_list);
INIT_WORK(&bs->rescue_work, bio_alloc_rescue);
- bs->bio_slab = bio_find_or_create_slab(front_pad + back_pad);
+ bs->bio_slab = bio_find_or_create_slab(bs);
if (!bs->bio_slab)
return -ENOMEM;
@@ -1630,39 +1618,19 @@ int bioset_init_from_src(struct bio_set *bs, struct bio_set *src)
}
EXPORT_SYMBOL(bioset_init_from_src);
-static void __init biovec_init_slabs(void)
+static int __init init_bio(void)
{
int i;
- for (i = 0; i < BVEC_POOL_NR; i++) {
- int size;
- struct biovec_slab *bvs = bvec_slabs + i;
+ bio_integrity_init();
- if (bvs->nr_vecs <= BIO_INLINE_VECS) {
- bvs->slab = NULL;
- continue;
- }
+ for (i = 0; i < ARRAY_SIZE(bvec_slabs); i++) {
+ struct biovec_slab *bvs = bvec_slabs + i;
- size = bvs->nr_vecs * sizeof(struct bio_vec);
- bvs->slab = kmem_cache_create(bvs->name, size, 0,
- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
+ bvs->slab = kmem_cache_create(bvs->name,
+ bvs->nr_vecs * sizeof(struct bio_vec), 0,
+ SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
}
-}
-
-static int __init init_bio(void)
-{
- bio_slab_max = 2;
- bio_slab_nr = 0;
- bio_slabs = kcalloc(bio_slab_max, sizeof(struct bio_slab),
- GFP_KERNEL);
-
- BUILD_BUG_ON(BIO_FLAG_LAST > BVEC_POOL_OFFSET);
-
- if (!bio_slabs)
- panic("bio: can't allocate bios\n");
-
- bio_integrity_init();
- biovec_init_slabs();
if (bioset_init(&fs_bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS))
panic("bio: can't allocate bios\n");
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 031114d454a6..a317c03d40f6 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -32,8 +32,6 @@
#include <linux/psi.h>
#include "blk.h"
-#define MAX_KEY_LEN 100
-
/*
* blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation.
* blkcg_pol_register_mutex nests outside of it and synchronizes entire
@@ -1016,6 +1014,8 @@ static void blkcg_css_offline(struct cgroup_subsys_state *css)
*/
void blkcg_destroy_blkgs(struct blkcg *blkcg)
{
+ might_sleep();
+
spin_lock_irq(&blkcg->lock);
while (!hlist_empty(&blkcg->blkg_list)) {
@@ -1023,14 +1023,20 @@ void blkcg_destroy_blkgs(struct blkcg *blkcg)
struct blkcg_gq, blkcg_node);
struct request_queue *q = blkg->q;
- if (spin_trylock(&q->queue_lock)) {
- blkg_destroy(blkg);
- spin_unlock(&q->queue_lock);
- } else {
+ if (need_resched() || !spin_trylock(&q->queue_lock)) {
+ /*
+ * Given that the system can accumulate a huge number
+ * of blkgs in pathological cases, check to see if we
+ * need to rescheduling to avoid softlockup.
+ */
spin_unlock_irq(&blkcg->lock);
- cpu_relax();
+ cond_resched();
spin_lock_irq(&blkcg->lock);
+ continue;
}
+
+ blkg_destroy(blkg);
+ spin_unlock(&q->queue_lock);
}
spin_unlock_irq(&blkcg->lock);
@@ -1757,12 +1763,15 @@ void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay)
if (unlikely(current->flags & PF_KTHREAD))
return;
- if (!blk_get_queue(q))
- return;
+ if (current->throttle_queue != q) {
+ if (!blk_get_queue(q))
+ return;
+
+ if (current->throttle_queue)
+ blk_put_queue(current->throttle_queue);
+ current->throttle_queue = q;
+ }
- if (current->throttle_queue)
- blk_put_queue(current->throttle_queue);
- current->throttle_queue = q;
if (use_memdelay)
current->use_memdelay = use_memdelay;
set_notify_resume(current);
@@ -1800,7 +1809,8 @@ static inline struct blkcg_gq *blkg_tryget_closest(struct bio *bio,
struct blkcg_gq *blkg, *ret_blkg = NULL;
rcu_read_lock();
- blkg = blkg_lookup_create(css_to_blkcg(css), bio->bi_disk->queue);
+ blkg = blkg_lookup_create(css_to_blkcg(css),
+ bio->bi_bdev->bd_disk->queue);
while (blkg) {
if (blkg_tryget(blkg)) {
ret_blkg = blkg;
@@ -1836,8 +1846,8 @@ void bio_associate_blkg_from_css(struct bio *bio,
if (css && css->parent) {
bio->bi_blkg = blkg_tryget_closest(bio, css);
} else {
- blkg_get(bio->bi_disk->queue->root_blkg);
- bio->bi_blkg = bio->bi_disk->queue->root_blkg;
+ blkg_get(bio->bi_bdev->bd_disk->queue->root_blkg);
+ bio->bi_blkg = bio->bi_bdev->bd_disk->queue->root_blkg;
}
}
EXPORT_SYMBOL_GPL(bio_associate_blkg_from_css);
diff --git a/block/blk-core.c b/block/blk-core.c
index 7663a9b94b80..fc60ff208497 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -59,6 +59,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_split);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
+EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_insert);
DEFINE_IDA(blk_queue_ida);
@@ -476,7 +477,7 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
static inline int bio_queue_enter(struct bio *bio)
{
- struct request_queue *q = bio->bi_disk->queue;
+ struct request_queue *q = bio->bi_bdev->bd_disk->queue;
bool nowait = bio->bi_opf & REQ_NOWAIT;
int ret;
@@ -531,7 +532,7 @@ struct request_queue *blk_alloc_queue(int node_id)
if (q->id < 0)
goto fail_q;
- ret = bioset_init(&q->bio_split, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
+ ret = bioset_init(&q->bio_split, BIO_POOL_SIZE, 0, 0);
if (ret)
goto fail_id;
@@ -692,11 +693,9 @@ static inline bool should_fail_request(struct block_device *part,
#endif /* CONFIG_FAIL_MAKE_REQUEST */
-static inline bool bio_check_ro(struct bio *bio, struct block_device *part)
+static inline bool bio_check_ro(struct bio *bio)
{
- const int op = bio_op(bio);
-
- if (part->bd_read_only && op_is_write(op)) {
+ if (op_is_write(bio_op(bio)) && bdev_read_only(bio->bi_bdev)) {
char b[BDEVNAME_SIZE];
if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
@@ -704,7 +703,7 @@ static inline bool bio_check_ro(struct bio *bio, struct block_device *part)
WARN_ONCE(1,
"Trying to write to read-only block-device %s (partno %d)\n",
- bio_devname(bio, b), part->bd_partno);
+ bio_devname(bio, b), bio->bi_bdev->bd_partno);
/* Older lvm-tools actually trigger this */
return false;
}
@@ -714,7 +713,7 @@ static inline bool bio_check_ro(struct bio *bio, struct block_device *part)
static noinline int should_fail_bio(struct bio *bio)
{
- if (should_fail_request(bio->bi_disk->part0, bio->bi_iter.bi_size))
+ if (should_fail_request(bdev_whole(bio->bi_bdev), bio->bi_iter.bi_size))
return -EIO;
return 0;
}
@@ -725,8 +724,9 @@ ALLOW_ERROR_INJECTION(should_fail_bio, ERRNO);
* This may well happen - the kernel calls bread() without checking the size of
* the device, e.g., when mounting a file system.
*/
-static inline int bio_check_eod(struct bio *bio, sector_t maxsector)
+static inline int bio_check_eod(struct bio *bio)
{
+ sector_t maxsector = bdev_nr_sectors(bio->bi_bdev);
unsigned int nr_sectors = bio_sectors(bio);
if (nr_sectors && maxsector &&
@@ -741,33 +741,20 @@ static inline int bio_check_eod(struct bio *bio, sector_t maxsector)
/*
* Remap block n of partition p to block n+start(p) of the disk.
*/
-static inline int blk_partition_remap(struct bio *bio)
+static int blk_partition_remap(struct bio *bio)
{
- struct block_device *p;
- int ret = -EIO;
+ struct block_device *p = bio->bi_bdev;
- rcu_read_lock();
- p = __disk_get_part(bio->bi_disk, bio->bi_partno);
- if (unlikely(!p))
- goto out;
if (unlikely(should_fail_request(p, bio->bi_iter.bi_size)))
- goto out;
- if (unlikely(bio_check_ro(bio, p)))
- goto out;
-
+ return -EIO;
if (bio_sectors(bio)) {
- if (bio_check_eod(bio, bdev_nr_sectors(p)))
- goto out;
bio->bi_iter.bi_sector += p->bd_start_sect;
trace_block_bio_remap(bio, p->bd_dev,
bio->bi_iter.bi_sector -
p->bd_start_sect);
}
- bio->bi_partno = 0;
- ret = 0;
-out:
- rcu_read_unlock();
- return ret;
+ bio_set_flag(bio, BIO_REMAPPED);
+ return 0;
}
/*
@@ -807,7 +794,8 @@ static inline blk_status_t blk_check_zone_append(struct request_queue *q,
static noinline_for_stack bool submit_bio_checks(struct bio *bio)
{
- struct request_queue *q = bio->bi_disk->queue;
+ struct block_device *bdev = bio->bi_bdev;
+ struct request_queue *q = bdev->bd_disk->queue;
blk_status_t status = BLK_STS_IOERR;
struct blk_plug *plug;
@@ -826,14 +814,12 @@ static noinline_for_stack bool submit_bio_checks(struct bio *bio)
if (should_fail_bio(bio))
goto end_io;
-
- if (bio->bi_partno) {
- if (unlikely(blk_partition_remap(bio)))
- goto end_io;
- } else {
- if (unlikely(bio_check_ro(bio, bio->bi_disk->part0)))
+ if (unlikely(bio_check_ro(bio)))
+ goto end_io;
+ if (!bio_flagged(bio, BIO_REMAPPED)) {
+ if (unlikely(bio_check_eod(bio)))
goto end_io;
- if (unlikely(bio_check_eod(bio, get_capacity(bio->bi_disk))))
+ if (bdev->bd_partno && unlikely(blk_partition_remap(bio)))
goto end_io;
}
@@ -926,7 +912,7 @@ end_io:
static blk_qc_t __submit_bio(struct bio *bio)
{
- struct gendisk *disk = bio->bi_disk;
+ struct gendisk *disk = bio->bi_bdev->bd_disk;
blk_qc_t ret = BLK_QC_T_NONE;
if (blk_crypto_bio_prep(&bio)) {
@@ -968,7 +954,7 @@ static blk_qc_t __submit_bio_noacct(struct bio *bio)
current->bio_list = bio_list_on_stack;
do {
- struct request_queue *q = bio->bi_disk->queue;
+ struct request_queue *q = bio->bi_bdev->bd_disk->queue;
struct bio_list lower, same;
if (unlikely(bio_queue_enter(bio) != 0))
@@ -989,7 +975,7 @@ static blk_qc_t __submit_bio_noacct(struct bio *bio)
bio_list_init(&lower);
bio_list_init(&same);
while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
- if (q == bio->bi_disk->queue)
+ if (q == bio->bi_bdev->bd_disk->queue)
bio_list_add(&same, bio);
else
bio_list_add(&lower, bio);
@@ -1014,7 +1000,7 @@ static blk_qc_t __submit_bio_noacct_mq(struct bio *bio)
current->bio_list = bio_list;
do {
- struct gendisk *disk = bio->bi_disk;
+ struct gendisk *disk = bio->bi_bdev->bd_disk;
if (unlikely(bio_queue_enter(bio) != 0))
continue;
@@ -1057,7 +1043,7 @@ blk_qc_t submit_bio_noacct(struct bio *bio)
return BLK_QC_T_NONE;
}
- if (!bio->bi_disk->fops->submit_bio)
+ if (!bio->bi_bdev->bd_disk->fops->submit_bio)
return __submit_bio_noacct_mq(bio);
return __submit_bio_noacct(bio);
}
@@ -1069,7 +1055,7 @@ EXPORT_SYMBOL(submit_bio_noacct);
*
* submit_bio() is used to submit I/O requests to block devices. It is passed a
* fully set up &struct bio that describes the I/O that needs to be done. The
- * bio will be send to the device described by the bi_disk and bi_partno fields.
+ * bio will be send to the device described by the bi_bdev field.
*
* The success/failure status of the request, along with notification of
* completion, is delivered asynchronously through the ->bi_end_io() callback
@@ -1089,7 +1075,8 @@ blk_qc_t submit_bio(struct bio *bio)
unsigned int count;
if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
- count = queue_logical_block_size(bio->bi_disk->queue) >> 9;
+ count = queue_logical_block_size(
+ bio->bi_bdev->bd_disk->queue) >> 9;
else
count = bio_sectors(bio);
@@ -1313,7 +1300,11 @@ void blk_account_io_start(struct request *rq)
if (!blk_do_io_stat(rq))
return;
- rq->part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
+ /* passthrough requests can hold bios that do not have ->bi_bdev set */
+ if (rq->bio && rq->bio->bi_bdev)
+ rq->part = rq->bio->bi_bdev;
+ else
+ rq->part = rq->rq_disk->part0;
part_stat_lock();
update_io_ticks(rq->part, jiffies, false);
@@ -1336,14 +1327,17 @@ static unsigned long __part_start_io_acct(struct block_device *part,
return now;
}
-unsigned long part_start_io_acct(struct gendisk *disk, struct block_device **part,
- struct bio *bio)
+/**
+ * bio_start_io_acct - start I/O accounting for bio based drivers
+ * @bio: bio to start account for
+ *
+ * Returns the start time that should be passed back to bio_end_io_acct().
+ */
+unsigned long bio_start_io_acct(struct bio *bio)
{
- *part = disk_map_sector_rcu(disk, bio->bi_iter.bi_sector);
-
- return __part_start_io_acct(*part, bio_sectors(bio), bio_op(bio));
+ return __part_start_io_acct(bio->bi_bdev, bio_sectors(bio), bio_op(bio));
}
-EXPORT_SYMBOL_GPL(part_start_io_acct);
+EXPORT_SYMBOL_GPL(bio_start_io_acct);
unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors,
unsigned int op)
@@ -1366,12 +1360,12 @@ static void __part_end_io_acct(struct block_device *part, unsigned int op,
part_stat_unlock();
}
-void part_end_io_acct(struct block_device *part, struct bio *bio,
- unsigned long start_time)
+void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time,
+ struct block_device *orig_bdev)
{
- __part_end_io_acct(part, bio_op(bio), start_time);
+ __part_end_io_acct(orig_bdev, bio_op(bio), start_time);
}
-EXPORT_SYMBOL_GPL(part_end_io_acct);
+EXPORT_SYMBOL_GPL(bio_end_io_acct_remapped);
void disk_end_io_acct(struct gendisk *disk, unsigned int op,
unsigned long start_time)
diff --git a/block/blk-crypto-fallback.c b/block/blk-crypto-fallback.c
index c162b754efbd..c176b7af56a7 100644
--- a/block/blk-crypto-fallback.c
+++ b/block/blk-crypto-fallback.c
@@ -80,6 +80,7 @@ static struct blk_crypto_keyslot {
static struct blk_keyslot_manager blk_crypto_ksm;
static struct workqueue_struct *blk_crypto_wq;
static mempool_t *blk_crypto_bounce_page_pool;
+static struct bio_set crypto_bio_split;
/*
* This is the key we set when evicting a keyslot. This *should* be the all 0's
@@ -164,10 +165,12 @@ static struct bio *blk_crypto_clone_bio(struct bio *bio_src)
struct bio_vec bv;
struct bio *bio;
- bio = bio_alloc_bioset(GFP_NOIO, bio_segments(bio_src), NULL);
+ bio = bio_kmalloc(GFP_NOIO, bio_segments(bio_src));
if (!bio)
return NULL;
- bio->bi_disk = bio_src->bi_disk;
+ bio->bi_bdev = bio_src->bi_bdev;
+ if (bio_flagged(bio_src, BIO_REMAPPED))
+ bio_set_flag(bio, BIO_REMAPPED);
bio->bi_opf = bio_src->bi_opf;
bio->bi_ioprio = bio_src->bi_ioprio;
bio->bi_write_hint = bio_src->bi_write_hint;
@@ -222,7 +225,8 @@ static bool blk_crypto_split_bio_if_needed(struct bio **bio_ptr)
if (num_sectors < bio_sectors(bio)) {
struct bio *split_bio;
- split_bio = bio_split(bio, num_sectors, GFP_NOIO, NULL);
+ split_bio = bio_split(bio, num_sectors, GFP_NOIO,
+ &crypto_bio_split);
if (!split_bio) {
bio->bi_status = BLK_STS_RESOURCE;
return false;
@@ -536,9 +540,13 @@ static int blk_crypto_fallback_init(void)
prandom_bytes(blank_key, BLK_CRYPTO_MAX_KEY_SIZE);
- err = blk_ksm_init(&blk_crypto_ksm, blk_crypto_num_keyslots);
+ err = bioset_init(&crypto_bio_split, 64, 0, 0);
if (err)
goto out;
+
+ err = blk_ksm_init(&blk_crypto_ksm, blk_crypto_num_keyslots);
+ if (err)
+ goto fail_free_bioset;
err = -ENOMEM;
blk_crypto_ksm.ksm_ll_ops = blk_crypto_ksm_ll_ops;
@@ -589,6 +597,8 @@ fail_free_wq:
destroy_workqueue(blk_crypto_wq);
fail_free_ksm:
blk_ksm_destroy(&blk_crypto_ksm);
+fail_free_bioset:
+ bioset_exit(&crypto_bio_split);
out:
return err;
}
diff --git a/block/blk-crypto.c b/block/blk-crypto.c
index 5da43f0973b4..c5bdaafffa29 100644
--- a/block/blk-crypto.c
+++ b/block/blk-crypto.c
@@ -280,7 +280,7 @@ bool __blk_crypto_bio_prep(struct bio **bio_ptr)
* Success if device supports the encryption context, or if we succeeded
* in falling back to the crypto API.
*/
- if (blk_ksm_crypto_cfg_supported(bio->bi_disk->queue->ksm,
+ if (blk_ksm_crypto_cfg_supported(bio->bi_bdev->bd_disk->queue->ksm,
&bc_key->crypto_cfg))
return true;
@@ -409,3 +409,4 @@ int blk_crypto_evict_key(struct request_queue *q,
*/
return blk_crypto_fallback_evict_key(key);
}
+EXPORT_SYMBOL_GPL(blk_crypto_evict_key);
diff --git a/block/blk-exec.c b/block/blk-exec.c
index 85324d53d072..beae70a0e5e5 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -31,8 +31,7 @@ static void blk_end_sync_rq(struct request *rq, blk_status_t error)
}
/**
- * blk_execute_rq_nowait - insert a request into queue for execution
- * @q: queue to insert the request in
+ * blk_execute_rq_nowait - insert a request to I/O scheduler for execution
* @bd_disk: matching gendisk
* @rq: request to insert
* @at_head: insert request at head or tail of queue
@@ -45,9 +44,8 @@ static void blk_end_sync_rq(struct request *rq, blk_status_t error)
* Note:
* This function will invoke @done directly if the queue is dead.
*/
-void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
- struct request *rq, int at_head,
- rq_end_io_fn *done)
+void blk_execute_rq_nowait(struct gendisk *bd_disk, struct request *rq,
+ int at_head, rq_end_io_fn *done)
{
WARN_ON(irqs_disabled());
WARN_ON(!blk_rq_is_passthrough(rq));
@@ -67,7 +65,6 @@ EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
/**
* blk_execute_rq - insert a request into queue for execution
- * @q: queue to insert the request in
* @bd_disk: matching gendisk
* @rq: request to insert
* @at_head: insert request at head or tail of queue
@@ -76,14 +73,13 @@ EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
* Insert a fully prepared request at the back of the I/O scheduler queue
* for execution and wait for completion.
*/
-void blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
- struct request *rq, int at_head)
+void blk_execute_rq(struct gendisk *bd_disk, struct request *rq, int at_head)
{
DECLARE_COMPLETION_ONSTACK(wait);
unsigned long hang_check;
rq->end_io_data = &wait;
- blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq);
+ blk_execute_rq_nowait(bd_disk, rq, at_head, blk_end_sync_rq);
/* Prevent hang_check timer from firing at us during very long I/O */
hang_check = sysctl_hung_task_timeout_secs;
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 76c1624cb06c..7942ca6ed321 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -432,23 +432,18 @@ void blk_insert_flush(struct request *rq)
/**
* blkdev_issue_flush - queue a flush
* @bdev: blockdev to issue flush for
- * @gfp_mask: memory allocation flags (for bio_alloc)
*
* Description:
* Issue a flush for the block device in question.
*/
-int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask)
+int blkdev_issue_flush(struct block_device *bdev)
{
- struct bio *bio;
- int ret = 0;
+ struct bio bio;
- bio = bio_alloc(gfp_mask, 0);
- bio_set_dev(bio, bdev);
- bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
-
- ret = submit_bio_wait(bio);
- bio_put(bio);
- return ret;
+ bio_init(&bio, NULL, 0);
+ bio_set_dev(&bio, bdev);
+ bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
+ return submit_bio_wait(&bio);
}
EXPORT_SYMBOL(blkdev_issue_flush);
diff --git a/block/blk-map.c b/block/blk-map.c
index 21630dccac62..369e204d14d0 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -150,9 +150,7 @@ static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data,
bmd->is_our_pages = !map_data;
bmd->is_null_mapped = (map_data && map_data->null_mapped);
- nr_pages = DIV_ROUND_UP(offset + len, PAGE_SIZE);
- if (nr_pages > BIO_MAX_PAGES)
- nr_pages = BIO_MAX_PAGES;
+ nr_pages = bio_max_segs(DIV_ROUND_UP(offset + len, PAGE_SIZE));
ret = -ENOMEM;
bio = bio_kmalloc(gfp_mask, nr_pages);
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 808768f6b174..ffb4aa0ea68b 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -298,14 +298,13 @@ split:
* Split a bio into two bios, chain the two bios, submit the second half and
* store a pointer to the first half in *@bio. If the second bio is still too
* big it will be split by a recursive call to this function. Since this
- * function may allocate a new bio from @bio->bi_disk->queue->bio_split, it is
- * the responsibility of the caller to ensure that
- * @bio->bi_disk->queue->bio_split is only released after processing of the
- * split bio has finished.
+ * function may allocate a new bio from q->bio_split, it is the responsibility
+ * of the caller to ensure that q->bio_split is only released after processing
+ * of the split bio has finished.
*/
void __blk_queue_split(struct bio **bio, unsigned int *nr_segs)
{
- struct request_queue *q = (*bio)->bi_disk->queue;
+ struct request_queue *q = (*bio)->bi_bdev->bd_disk->queue;
struct bio *split = NULL;
switch (bio_op(*bio)) {
@@ -358,9 +357,9 @@ void __blk_queue_split(struct bio **bio, unsigned int *nr_segs)
*
* Split a bio into two bios, chains the two bios, submit the second half and
* store a pointer to the first half in *@bio. Since this function may allocate
- * a new bio from @bio->bi_disk->queue->bio_split, it is the responsibility of
- * the caller to ensure that @bio->bi_disk->queue->bio_split is only released
- * after processing of the split bio has finished.
+ * a new bio from q->bio_split, it is the responsibility of the caller to ensure
+ * that q->bio_split is only released after processing of the split bio has
+ * finished.
*/
void blk_queue_split(struct bio **bio)
{
@@ -866,7 +865,7 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
return false;
/* must be same device */
- if (rq->rq_disk != bio->bi_disk)
+ if (rq->rq_disk != bio->bi_bdev->bd_disk)
return false;
/* only merge integrity protected bio into ditto rq */
diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
index 4de03da9a624..9ebb344e2585 100644
--- a/block/blk-mq-debugfs.c
+++ b/block/blk-mq-debugfs.c
@@ -292,7 +292,6 @@ static const char *const cmd_flag_name[] = {
#define RQF_NAME(name) [ilog2((__force u32)RQF_##name)] = #name
static const char *const rqf_name[] = {
- RQF_NAME(SORTED),
RQF_NAME(STARTED),
RQF_NAME(SOFTBARRIER),
RQF_NAME(FLUSH_SEQ),
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index deff4e826e23..e1e997af89a0 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -384,14 +384,7 @@ bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq)
}
EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge);
-void blk_mq_sched_request_inserted(struct request *rq)
-{
- trace_block_rq_insert(rq);
-}
-EXPORT_SYMBOL_GPL(blk_mq_sched_request_inserted);
-
static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
- bool has_sched,
struct request *rq)
{
/*
@@ -408,9 +401,6 @@ static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
if ((rq->rq_flags & RQF_FLUSH_SEQ) || blk_rq_is_passthrough(rq))
return true;
- if (has_sched)
- rq->rq_flags |= RQF_SORTED;
-
return false;
}
@@ -424,7 +414,7 @@ void blk_mq_sched_insert_request(struct request *rq, bool at_head,
WARN_ON(e && (rq->tag != BLK_MQ_NO_TAG));
- if (blk_mq_sched_bypass_insert(hctx, !!e, rq)) {
+ if (blk_mq_sched_bypass_insert(hctx, rq)) {
/*
* Firstly normal IO request is inserted to scheduler queue or
* sw queue, meantime we add flush request to dispatch queue(
diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
index 0476360f05f1..5b18ab915c65 100644
--- a/block/blk-mq-sched.h
+++ b/block/blk-mq-sched.h
@@ -7,7 +7,6 @@
void blk_mq_sched_assign_ioc(struct request *rq);
-void blk_mq_sched_request_inserted(struct request *rq);
bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
unsigned int nr_segs, struct request **merged_request);
bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
diff --git a/block/blk-mq.c b/block/blk-mq.c
index f285a9123a8b..d4d7c1caa439 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -41,7 +41,7 @@
#include "blk-mq-sched.h"
#include "blk-rq-qos.h"
-static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
+static DEFINE_PER_CPU(struct llist_head, blk_cpu_done);
static void blk_mq_poll_stats_start(struct request_queue *q);
static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
@@ -567,80 +567,29 @@ void blk_mq_end_request(struct request *rq, blk_status_t error)
}
EXPORT_SYMBOL(blk_mq_end_request);
-/*
- * Softirq action handler - move entries to local list and loop over them
- * while passing them to the queue registered handler.
- */
-static __latent_entropy void blk_done_softirq(struct softirq_action *h)
+static void blk_complete_reqs(struct llist_head *list)
{
- struct list_head *cpu_list, local_list;
-
- local_irq_disable();
- cpu_list = this_cpu_ptr(&blk_cpu_done);
- list_replace_init(cpu_list, &local_list);
- local_irq_enable();
-
- while (!list_empty(&local_list)) {
- struct request *rq;
+ struct llist_node *entry = llist_reverse_order(llist_del_all(list));
+ struct request *rq, *next;
- rq = list_entry(local_list.next, struct request, ipi_list);
- list_del_init(&rq->ipi_list);
+ llist_for_each_entry_safe(rq, next, entry, ipi_list)
rq->q->mq_ops->complete(rq);
- }
}
-static void blk_mq_trigger_softirq(struct request *rq)
+static __latent_entropy void blk_done_softirq(struct softirq_action *h)
{
- struct list_head *list;
- unsigned long flags;
-
- local_irq_save(flags);
- list = this_cpu_ptr(&blk_cpu_done);
- list_add_tail(&rq->ipi_list, list);
-
- /*
- * If the list only contains our just added request, signal a raise of
- * the softirq. If there are already entries there, someone already
- * raised the irq but it hasn't run yet.
- */
- if (list->next == &rq->ipi_list)
- raise_softirq_irqoff(BLOCK_SOFTIRQ);
- local_irq_restore(flags);
+ blk_complete_reqs(this_cpu_ptr(&blk_cpu_done));
}
static int blk_softirq_cpu_dead(unsigned int cpu)
{
- /*
- * If a CPU goes away, splice its entries to the current CPU
- * and trigger a run of the softirq
- */
- local_irq_disable();
- list_splice_init(&per_cpu(blk_cpu_done, cpu),
- this_cpu_ptr(&blk_cpu_done));
- raise_softirq_irqoff(BLOCK_SOFTIRQ);
- local_irq_enable();
-
+ blk_complete_reqs(&per_cpu(blk_cpu_done, cpu));
return 0;
}
-
static void __blk_mq_complete_request_remote(void *data)
{
- struct request *rq = data;
-
- /*
- * For most of single queue controllers, there is only one irq vector
- * for handling I/O completion, and the only irq's affinity is set
- * to all possible CPUs. On most of ARCHs, this affinity means the irq
- * is handled on one specific CPU.
- *
- * So complete I/O requests in softirq context in case of single queue
- * devices to avoid degrading I/O performance due to irqsoff latency.
- */
- if (rq->q->nr_hw_queues == 1)
- blk_mq_trigger_softirq(rq);
- else
- rq->q->mq_ops->complete(rq);
+ __raise_softirq_irqoff(BLOCK_SOFTIRQ);
}
static inline bool blk_mq_complete_need_ipi(struct request *rq)
@@ -669,6 +618,30 @@ static inline bool blk_mq_complete_need_ipi(struct request *rq)
return cpu_online(rq->mq_ctx->cpu);
}
+static void blk_mq_complete_send_ipi(struct request *rq)
+{
+ struct llist_head *list;
+ unsigned int cpu;
+
+ cpu = rq->mq_ctx->cpu;
+ list = &per_cpu(blk_cpu_done, cpu);
+ if (llist_add(&rq->ipi_list, list)) {
+ INIT_CSD(&rq->csd, __blk_mq_complete_request_remote, rq);
+ smp_call_function_single_async(cpu, &rq->csd);
+ }
+}
+
+static void blk_mq_raise_softirq(struct request *rq)
+{
+ struct llist_head *list;
+
+ preempt_disable();
+ list = this_cpu_ptr(&blk_cpu_done);
+ if (llist_add(&rq->ipi_list, list))
+ raise_softirq(BLOCK_SOFTIRQ);
+ preempt_enable();
+}
+
bool blk_mq_complete_request_remote(struct request *rq)
{
WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
@@ -681,15 +654,15 @@ bool blk_mq_complete_request_remote(struct request *rq)
return false;
if (blk_mq_complete_need_ipi(rq)) {
- INIT_CSD(&rq->csd, __blk_mq_complete_request_remote, rq);
- smp_call_function_single_async(rq->mq_ctx->cpu, &rq->csd);
- } else {
- if (rq->q->nr_hw_queues > 1)
- return false;
- blk_mq_trigger_softirq(rq);
+ blk_mq_complete_send_ipi(rq);
+ return true;
}
- return true;
+ if (rq->q->nr_hw_queues == 1) {
+ blk_mq_raise_softirq(rq);
+ return true;
+ }
+ return false;
}
EXPORT_SYMBOL_GPL(blk_mq_complete_request_remote);
@@ -1646,6 +1619,42 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
}
EXPORT_SYMBOL(blk_mq_run_hw_queue);
+/*
+ * Is the request queue handled by an IO scheduler that does not respect
+ * hardware queues when dispatching?
+ */
+static bool blk_mq_has_sqsched(struct request_queue *q)
+{
+ struct elevator_queue *e = q->elevator;
+
+ if (e && e->type->ops.dispatch_request &&
+ !(e->type->elevator_features & ELEVATOR_F_MQ_AWARE))
+ return true;
+ return false;
+}
+
+/*
+ * Return prefered queue to dispatch from (if any) for non-mq aware IO
+ * scheduler.
+ */
+static struct blk_mq_hw_ctx *blk_mq_get_sq_hctx(struct request_queue *q)
+{
+ struct blk_mq_hw_ctx *hctx;
+
+ /*
+ * If the IO scheduler does not respect hardware queues when
+ * dispatching, we just don't bother with multiple HW queues and
+ * dispatch from hctx for the current CPU since running multiple queues
+ * just causes lock contention inside the scheduler and pointless cache
+ * bouncing.
+ */
+ hctx = blk_mq_map_queue_type(q, HCTX_TYPE_DEFAULT,
+ raw_smp_processor_id());
+ if (!blk_mq_hctx_stopped(hctx))
+ return hctx;
+ return NULL;
+}
+
/**
* blk_mq_run_hw_queues - Run all hardware queues in a request queue.
* @q: Pointer to the request queue to run.
@@ -1653,14 +1662,23 @@ EXPORT_SYMBOL(blk_mq_run_hw_queue);
*/
void blk_mq_run_hw_queues(struct request_queue *q, bool async)
{
- struct blk_mq_hw_ctx *hctx;
+ struct blk_mq_hw_ctx *hctx, *sq_hctx;
int i;
+ sq_hctx = NULL;
+ if (blk_mq_has_sqsched(q))
+ sq_hctx = blk_mq_get_sq_hctx(q);
queue_for_each_hw_ctx(q, hctx, i) {
if (blk_mq_hctx_stopped(hctx))
continue;
-
- blk_mq_run_hw_queue(hctx, async);
+ /*
+ * Dispatch from this hctx either if there's no hctx preferred
+ * by IO scheduler or if it has requests that bypass the
+ * scheduler.
+ */
+ if (!sq_hctx || sq_hctx == hctx ||
+ !list_empty_careful(&hctx->dispatch))
+ blk_mq_run_hw_queue(hctx, async);
}
}
EXPORT_SYMBOL(blk_mq_run_hw_queues);
@@ -1672,14 +1690,23 @@ EXPORT_SYMBOL(blk_mq_run_hw_queues);
*/
void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs)
{
- struct blk_mq_hw_ctx *hctx;
+ struct blk_mq_hw_ctx *hctx, *sq_hctx;
int i;
+ sq_hctx = NULL;
+ if (blk_mq_has_sqsched(q))
+ sq_hctx = blk_mq_get_sq_hctx(q);
queue_for_each_hw_ctx(q, hctx, i) {
if (blk_mq_hctx_stopped(hctx))
continue;
-
- blk_mq_delay_run_hw_queue(hctx, msecs);
+ /*
+ * Dispatch from this hctx either if there's no hctx preferred
+ * by IO scheduler or if it has requests that bypass the
+ * scheduler.
+ */
+ if (!sq_hctx || sq_hctx == hctx ||
+ !list_empty_careful(&hctx->dispatch))
+ blk_mq_delay_run_hw_queue(hctx, msecs);
}
}
EXPORT_SYMBOL(blk_mq_delay_run_hw_queues);
@@ -2128,7 +2155,7 @@ static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
*/
blk_qc_t blk_mq_submit_bio(struct bio *bio)
{
- struct request_queue *q = bio->bi_disk->queue;
+ struct request_queue *q = bio->bi_bdev->bd_disk->queue;
const int is_sync = op_is_sync(bio->bi_opf);
const int is_flush_fua = op_is_flush(bio->bi_opf);
struct blk_mq_alloc_data data = {
@@ -2653,7 +2680,6 @@ blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set,
goto free_hctx;
atomic_set(&hctx->nr_active, 0);
- atomic_set(&hctx->elevator_queued, 0);
if (node == NUMA_NO_NODE)
node = set->numa_node;
hctx->numa_node = node;
@@ -3904,7 +3930,7 @@ static int __init blk_mq_init(void)
int i;
for_each_possible_cpu(i)
- INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
+ init_llist_head(&per_cpu(blk_cpu_done, i));
open_softirq(BLOCK_SOFTIRQ, blk_done_softirq);
cpuhp_setup_state_nocalls(CPUHP_BLOCK_SOFTIRQ_DEAD,
diff --git a/block/blk-mq.h b/block/blk-mq.h
index c1458d9502f1..3616453ca28c 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -304,7 +304,7 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
struct request_queue *q = hctx->queue;
struct blk_mq_tag_set *set = q->tag_set;
- if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &q->queue_flags))
+ if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
return true;
users = atomic_read(&set->active_queues_shared_sbitmap);
} else {
diff --git a/block/blk-pm.h b/block/blk-pm.h
index a2283cc9f716..8a5a0d4b357f 100644
--- a/block/blk-pm.h
+++ b/block/blk-pm.h
@@ -21,31 +21,6 @@ static inline void blk_pm_mark_last_busy(struct request *rq)
if (rq->q->dev && !(rq->rq_flags & RQF_PM))
pm_runtime_mark_last_busy(rq->q->dev);
}
-
-static inline void blk_pm_requeue_request(struct request *rq)
-{
- lockdep_assert_held(&rq->q->queue_lock);
-
- if (rq->q->dev && !(rq->rq_flags & RQF_PM))
- rq->q->nr_pending--;
-}
-
-static inline void blk_pm_add_request(struct request_queue *q,
- struct request *rq)
-{
- lockdep_assert_held(&q->queue_lock);
-
- if (q->dev && !(rq->rq_flags & RQF_PM))
- q->nr_pending++;
-}
-
-static inline void blk_pm_put_request(struct request *rq)
-{
- lockdep_assert_held(&rq->q->queue_lock);
-
- if (rq->q->dev && !(rq->rq_flags & RQF_PM))
- --rq->q->nr_pending;
-}
#else
static inline int blk_pm_resume_queue(const bool pm, struct request_queue *q)
{
@@ -55,19 +30,6 @@ static inline int blk_pm_resume_queue(const bool pm, struct request_queue *q)
static inline void blk_pm_mark_last_busy(struct request *rq)
{
}
-
-static inline void blk_pm_requeue_request(struct request *rq)
-{
-}
-
-static inline void blk_pm_add_request(struct request_queue *q,
- struct request *rq)
-{
-}
-
-static inline void blk_pm_put_request(struct request *rq)
-{
-}
#endif
#endif /* _BLOCK_BLK_PM_H_ */
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 43990b1d148b..b4aa2f37fab6 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -60,6 +60,7 @@ void blk_set_default_limits(struct queue_limits *lim)
lim->io_opt = 0;
lim->misaligned = 0;
lim->zoned = BLK_ZONED_NONE;
+ lim->zone_write_granularity = 0;
}
EXPORT_SYMBOL(blk_set_default_limits);
@@ -367,6 +368,28 @@ void blk_queue_physical_block_size(struct request_queue *q, unsigned int size)
EXPORT_SYMBOL(blk_queue_physical_block_size);
/**
+ * blk_queue_zone_write_granularity - set zone write granularity for the queue
+ * @q: the request queue for the zoned device
+ * @size: the zone write granularity size, in bytes
+ *
+ * Description:
+ * This should be set to the lowest possible size allowing to write in
+ * sequential zones of a zoned block device.
+ */
+void blk_queue_zone_write_granularity(struct request_queue *q,
+ unsigned int size)
+{
+ if (WARN_ON_ONCE(!blk_queue_is_zoned(q)))
+ return;
+
+ q->limits.zone_write_granularity = size;
+
+ if (q->limits.zone_write_granularity < q->limits.logical_block_size)
+ q->limits.zone_write_granularity = q->limits.logical_block_size;
+}
+EXPORT_SYMBOL_GPL(blk_queue_zone_write_granularity);
+
+/**
* blk_queue_alignment_offset - set physical block alignment offset
* @q: the request queue for the device
* @offset: alignment offset in bytes
@@ -481,6 +504,14 @@ void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
}
EXPORT_SYMBOL(blk_queue_io_opt);
+static unsigned int blk_round_down_sectors(unsigned int sectors, unsigned int lbs)
+{
+ sectors = round_down(sectors, lbs >> SECTOR_SHIFT);
+ if (sectors < PAGE_SIZE >> SECTOR_SHIFT)
+ sectors = PAGE_SIZE >> SECTOR_SHIFT;
+ return sectors;
+}
+
/**
* blk_stack_limits - adjust queue_limits for stacked devices
* @t: the stacking driver limits (top device)
@@ -607,6 +638,10 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
ret = -1;
}
+ t->max_sectors = blk_round_down_sectors(t->max_sectors, t->logical_block_size);
+ t->max_hw_sectors = blk_round_down_sectors(t->max_hw_sectors, t->logical_block_size);
+ t->max_dev_sectors = blk_round_down_sectors(t->max_dev_sectors, t->logical_block_size);
+
/* Discard alignment and granularity */
if (b->discard_granularity) {
alignment = queue_limit_discard_alignment(b, start);
@@ -631,6 +666,8 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
t->discard_granularity;
}
+ t->zone_write_granularity = max(t->zone_write_granularity,
+ b->zone_write_granularity);
t->zoned = max(t->zoned, b->zoned);
return ret;
}
@@ -847,6 +884,8 @@ EXPORT_SYMBOL_GPL(blk_queue_can_use_dma_map_merging);
*/
void blk_queue_set_zoned(struct gendisk *disk, enum blk_zoned_model model)
{
+ struct request_queue *q = disk->queue;
+
switch (model) {
case BLK_ZONED_HM:
/*
@@ -865,7 +904,7 @@ void blk_queue_set_zoned(struct gendisk *disk, enum blk_zoned_model model)
* we do nothing special as far as the block layer is concerned.
*/
if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) ||
- disk_has_partitions(disk))
+ !xa_empty(&disk->part_tbl))
model = BLK_ZONED_NONE;
break;
case BLK_ZONED_NONE:
@@ -875,7 +914,17 @@ void blk_queue_set_zoned(struct gendisk *disk, enum blk_zoned_model model)
break;
}
- disk->queue->limits.zoned = model;
+ q->limits.zoned = model;
+ if (model != BLK_ZONED_NONE) {
+ /*
+ * Set the zone write granularity to the device logical block
+ * size by default. The driver can change this value if needed.
+ */
+ blk_queue_zone_write_granularity(q,
+ queue_logical_block_size(q));
+ } else {
+ blk_queue_clear_zone_settings(q);
+ }
}
EXPORT_SYMBOL_GPL(blk_queue_set_zoned);
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index b513f1683af0..0f4f0c8a7825 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -219,6 +219,12 @@ static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page)
(unsigned long long)q->limits.max_write_zeroes_sectors << 9);
}
+static ssize_t queue_zone_write_granularity_show(struct request_queue *q,
+ char *page)
+{
+ return queue_var_show(queue_zone_write_granularity(q), page);
+}
+
static ssize_t queue_zone_append_max_show(struct request_queue *q, char *page)
{
unsigned long long max_sectors = q->limits.max_zone_append_sectors;
@@ -428,10 +434,13 @@ static ssize_t queue_poll_store(struct request_queue *q, const char *page,
if (ret < 0)
return ret;
- if (poll_on)
+ if (poll_on) {
blk_queue_flag_set(QUEUE_FLAG_POLL, q);
- else
+ } else {
+ blk_mq_freeze_queue(q);
blk_queue_flag_clear(QUEUE_FLAG_POLL, q);
+ blk_mq_unfreeze_queue(q);
+ }
return ret;
}
@@ -585,6 +594,7 @@ QUEUE_RO_ENTRY(queue_discard_zeroes_data, "discard_zeroes_data");
QUEUE_RO_ENTRY(queue_write_same_max, "write_same_max_bytes");
QUEUE_RO_ENTRY(queue_write_zeroes_max, "write_zeroes_max_bytes");
QUEUE_RO_ENTRY(queue_zone_append_max, "zone_append_max_bytes");
+QUEUE_RO_ENTRY(queue_zone_write_granularity, "zone_write_granularity");
QUEUE_RO_ENTRY(queue_zoned, "zoned");
QUEUE_RO_ENTRY(queue_nr_zones, "nr_zones");
@@ -639,6 +649,7 @@ static struct attribute *queue_attrs[] = {
&queue_write_same_max_entry.attr,
&queue_write_zeroes_max_entry.attr,
&queue_zone_append_max_entry.attr,
+ &queue_zone_write_granularity_entry.attr,
&queue_nonrot_entry.attr,
&queue_zoned_entry.attr,
&queue_nr_zones_entry.attr,
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index d52cac9f3a7c..b1b22d863bdf 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -2178,7 +2178,7 @@ static inline void throtl_update_latency_buckets(struct throtl_data *td)
bool blk_throtl_bio(struct bio *bio)
{
- struct request_queue *q = bio->bi_disk->queue;
+ struct request_queue *q = bio->bi_bdev->bd_disk->queue;
struct blkcg_gq *blkg = bio->bi_blkg;
struct throtl_qnode *qn = NULL;
struct throtl_grp *tg = blkg_to_tg(blkg);
diff --git a/block/blk-wbt.c b/block/blk-wbt.c
index 0321ca83e73f..42aed0160f86 100644
--- a/block/blk-wbt.c
+++ b/block/blk-wbt.c
@@ -518,7 +518,7 @@ static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct,
rq_qos_wait(rqw, &data, wbt_inflight_cb, wbt_cleanup_cb);
}
-static inline bool wbt_should_throttle(struct rq_wb *rwb, struct bio *bio)
+static inline bool wbt_should_throttle(struct bio *bio)
{
switch (bio_op(bio)) {
case REQ_OP_WRITE:
@@ -545,7 +545,7 @@ static enum wbt_flags bio_to_wbt_flags(struct rq_wb *rwb, struct bio *bio)
if (bio_op(bio) == REQ_OP_READ) {
flags = WBT_READ;
- } else if (wbt_should_throttle(rwb, bio)) {
+ } else if (wbt_should_throttle(bio)) {
if (current_is_kswapd())
flags |= WBT_KSWAPD;
if (bio_op(bio) == REQ_OP_DISCARD)
diff --git a/block/blk-zoned.c b/block/blk-zoned.c
index 7a68b6e4300c..833978c02e60 100644
--- a/block/blk-zoned.c
+++ b/block/blk-zoned.c
@@ -549,3 +549,20 @@ int blk_revalidate_disk_zones(struct gendisk *disk,
return ret;
}
EXPORT_SYMBOL_GPL(blk_revalidate_disk_zones);
+
+void blk_queue_clear_zone_settings(struct request_queue *q)
+{
+ blk_mq_freeze_queue(q);
+
+ blk_queue_free_zone_bitmaps(q);
+ blk_queue_flag_clear(QUEUE_FLAG_ZONE_RESETALL, q);
+ q->required_elevator_features &= ~ELEVATOR_F_ZBD_SEQ_WRITE;
+ q->nr_zones = 0;
+ q->max_open_zones = 0;
+ q->max_active_zones = 0;
+ q->limits.chunk_sectors = 0;
+ q->limits.zone_write_granularity = 0;
+ q->limits.max_zone_append_sectors = 0;
+
+ blk_mq_unfreeze_queue(q);
+}
diff --git a/block/blk.h b/block/blk.h
index 7550364c326c..3b53e44b967e 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -55,6 +55,11 @@ void blk_free_flush_queue(struct blk_flush_queue *q);
void blk_freeze_queue(struct request_queue *q);
+#define BIO_INLINE_VECS 4
+struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs,
+ gfp_t gfp_mask);
+void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs);
+
static inline bool biovec_phys_mergeable(struct request_queue *q,
struct bio_vec *vec1, struct bio_vec *vec2)
{
@@ -202,8 +207,6 @@ static inline void elevator_exit(struct request_queue *q,
__elevator_exit(q, e);
}
-struct block_device *__disk_get_part(struct gendisk *disk, int partno);
-
ssize_t part_size_show(struct device *dev, struct device_attribute *attr,
char *buf);
ssize_t part_stat_show(struct device *dev, struct device_attribute *attr,
@@ -331,12 +334,12 @@ struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp);
#ifdef CONFIG_BLK_DEV_ZONED
void blk_queue_free_zone_bitmaps(struct request_queue *q);
+void blk_queue_clear_zone_settings(struct request_queue *q);
#else
static inline void blk_queue_free_zone_bitmaps(struct request_queue *q) {}
+static inline void blk_queue_clear_zone_settings(struct request_queue *q) {}
#endif
-struct block_device *disk_map_sector_rcu(struct gendisk *disk, sector_t sector);
-
int blk_alloc_devt(struct block_device *part, dev_t *devt);
void blk_free_devt(dev_t devt);
char *disk_name(struct gendisk *hd, int partno, char *buf);
@@ -349,7 +352,6 @@ int bdev_add_partition(struct block_device *bdev, int partno,
int bdev_del_partition(struct block_device *bdev, int partno);
int bdev_resize_partition(struct block_device *bdev, int partno,
sector_t start, sector_t length);
-int disk_expand_part_tbl(struct gendisk *disk, int target);
int bio_add_hw_page(struct request_queue *q, struct bio *bio,
struct page *page, unsigned int len, unsigned int offset,
diff --git a/block/bounce.c b/block/bounce.c
index d3f51acd6e3b..87983a35079c 100644
--- a/block/bounce.c
+++ b/block/bounce.c
@@ -214,8 +214,7 @@ static void bounce_end_io_read_isa(struct bio *bio)
__bounce_end_io_read(bio, &isa_page_pool);
}
-static struct bio *bounce_clone_bio(struct bio *bio_src, gfp_t gfp_mask,
- struct bio_set *bs)
+static struct bio *bounce_clone_bio(struct bio *bio_src)
{
struct bvec_iter iter;
struct bio_vec bv;
@@ -242,11 +241,15 @@ static struct bio *bounce_clone_bio(struct bio *bio_src, gfp_t gfp_mask,
* asking for trouble and would force extra work on
* __bio_clone_fast() anyways.
*/
-
- bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs);
- if (!bio)
- return NULL;
- bio->bi_disk = bio_src->bi_disk;
+ if (bio_is_passthrough(bio_src))
+ bio = bio_kmalloc(GFP_NOIO | __GFP_NOFAIL,
+ bio_segments(bio_src));
+ else
+ bio = bio_alloc_bioset(GFP_NOIO, bio_segments(bio_src),
+ &bounce_bio_set);
+ bio->bi_bdev = bio_src->bi_bdev;
+ if (bio_flagged(bio_src, BIO_REMAPPED))
+ bio_set_flag(bio, BIO_REMAPPED);
bio->bi_opf = bio_src->bi_opf;
bio->bi_ioprio = bio_src->bi_ioprio;
bio->bi_write_hint = bio_src->bi_write_hint;
@@ -267,11 +270,11 @@ static struct bio *bounce_clone_bio(struct bio *bio_src, gfp_t gfp_mask,
break;
}
- if (bio_crypt_clone(bio, bio_src, gfp_mask) < 0)
+ if (bio_crypt_clone(bio, bio_src, GFP_NOIO) < 0)
goto err_put;
if (bio_integrity(bio_src) &&
- bio_integrity_clone(bio, bio_src, gfp_mask) < 0)
+ bio_integrity_clone(bio, bio_src, GFP_NOIO) < 0)
goto err_put;
bio_clone_blkg_association(bio, bio_src);
@@ -294,7 +297,6 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
unsigned i = 0;
bool bounce = false;
int sectors = 0;
- bool passthrough = bio_is_passthrough(*bio_orig);
bio_for_each_segment(from, *bio_orig, iter) {
if (i++ < BIO_MAX_PAGES)
@@ -305,14 +307,14 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
if (!bounce)
return;
- if (!passthrough && sectors < bio_sectors(*bio_orig)) {
+ if (!bio_is_passthrough(*bio_orig) &&
+ sectors < bio_sectors(*bio_orig)) {
bio = bio_split(*bio_orig, sectors, GFP_NOIO, &bounce_bio_split);
bio_chain(bio, *bio_orig);
submit_bio_noacct(*bio_orig);
*bio_orig = bio;
}
- bio = bounce_clone_bio(*bio_orig, GFP_NOIO, passthrough ? NULL :
- &bounce_bio_set);
+ bio = bounce_clone_bio(*bio_orig);
/*
* Bvec table can't be updated by bio_for_each_segment_all(),
diff --git a/block/bsg.c b/block/bsg.c
index d7bae94b64d9..bd10922d5cbb 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -157,8 +157,10 @@ static int bsg_sg_io(struct request_queue *q, fmode_t mode, void __user *uarg)
return PTR_ERR(rq);
ret = q->bsg_dev.ops->fill_hdr(rq, &hdr, mode);
- if (ret)
+ if (ret) {
+ blk_put_request(rq);
return ret;
+ }
rq->timeout = msecs_to_jiffies(hdr.timeout);
if (!rq->timeout)
@@ -181,7 +183,7 @@ static int bsg_sg_io(struct request_queue *q, fmode_t mode, void __user *uarg)
bio = rq->bio;
- blk_execute_rq(q, NULL, rq, !(hdr.flags & BSG_FLAG_Q_AT_TAIL));
+ blk_execute_rq(NULL, rq, !(hdr.flags & BSG_FLAG_Q_AT_TAIL));
ret = rq->q->bsg_dev.ops->complete_rq(rq, &hdr);
blk_rq_unmap_user(bio);
diff --git a/block/genhd.c b/block/genhd.c
index 419548e92d82..c55e8f0fced1 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -73,7 +73,7 @@ bool set_capacity_and_notify(struct gendisk *disk, sector_t size)
return false;
pr_info("%s: detected capacity change from %lld to %lld\n",
- disk->disk_name, size, capacity);
+ disk->disk_name, capacity, size);
/*
* Historically we did not send a uevent for changes to/from an empty
@@ -161,15 +161,6 @@ static void part_in_flight_rw(struct block_device *part,
inflight[1] = 0;
}
-struct block_device *__disk_get_part(struct gendisk *disk, int partno)
-{
- struct disk_part_tbl *ptbl = rcu_dereference(disk->part_tbl);
-
- if (unlikely(partno < 0 || partno >= ptbl->len))
- return NULL;
- return rcu_dereference(ptbl->part[partno]);
-}
-
/**
* disk_part_iter_init - initialize partition iterator
* @piter: iterator to initialize
@@ -184,26 +175,14 @@ struct block_device *__disk_get_part(struct gendisk *disk, int partno)
void disk_part_iter_init(struct disk_part_iter *piter, struct gendisk *disk,
unsigned int flags)
{
- struct disk_part_tbl *ptbl;
-
- rcu_read_lock();
- ptbl = rcu_dereference(disk->part_tbl);
-
piter->disk = disk;
piter->part = NULL;
-
- if (flags & DISK_PITER_REVERSE)
- piter->idx = ptbl->len - 1;
- else if (flags & (DISK_PITER_INCL_PART0 | DISK_PITER_INCL_EMPTY_PART0))
+ if (flags & (DISK_PITER_INCL_PART0 | DISK_PITER_INCL_EMPTY_PART0))
piter->idx = 0;
else
piter->idx = 1;
-
piter->flags = flags;
-
- rcu_read_unlock();
}
-EXPORT_SYMBOL_GPL(disk_part_iter_init);
/**
* disk_part_iter_next - proceed iterator to the next partition and return it
@@ -216,57 +195,30 @@ EXPORT_SYMBOL_GPL(disk_part_iter_init);
*/
struct block_device *disk_part_iter_next(struct disk_part_iter *piter)
{
- struct disk_part_tbl *ptbl;
- int inc, end;
+ struct block_device *part;
+ unsigned long idx;
/* put the last partition */
disk_part_iter_exit(piter);
- /* get part_tbl */
rcu_read_lock();
- ptbl = rcu_dereference(piter->disk->part_tbl);
-
- /* determine iteration parameters */
- if (piter->flags & DISK_PITER_REVERSE) {
- inc = -1;
- if (piter->flags & (DISK_PITER_INCL_PART0 |
- DISK_PITER_INCL_EMPTY_PART0))
- end = -1;
- else
- end = 0;
- } else {
- inc = 1;
- end = ptbl->len;
- }
-
- /* iterate to the next partition */
- for (; piter->idx != end; piter->idx += inc) {
- struct block_device *part;
-
- part = rcu_dereference(ptbl->part[piter->idx]);
- if (!part)
- continue;
- piter->part = bdgrab(part);
- if (!piter->part)
- continue;
+ xa_for_each_start(&piter->disk->part_tbl, idx, part, piter->idx) {
if (!bdev_nr_sectors(part) &&
!(piter->flags & DISK_PITER_INCL_EMPTY) &&
!(piter->flags & DISK_PITER_INCL_EMPTY_PART0 &&
- piter->idx == 0)) {
- bdput(piter->part);
- piter->part = NULL;
+ piter->idx == 0))
continue;
- }
- piter->idx += inc;
+ piter->part = bdgrab(part);
+ if (!piter->part)
+ continue;
+ piter->idx = idx + 1;
break;
}
-
rcu_read_unlock();
return piter->part;
}
-EXPORT_SYMBOL_GPL(disk_part_iter_next);
/**
* disk_part_iter_exit - finish up partition iteration
@@ -283,91 +235,6 @@ void disk_part_iter_exit(struct disk_part_iter *piter)
bdput(piter->part);
piter->part = NULL;
}
-EXPORT_SYMBOL_GPL(disk_part_iter_exit);
-
-static inline int sector_in_part(struct block_device *part, sector_t sector)
-{
- return part->bd_start_sect <= sector &&
- sector < part->bd_start_sect + bdev_nr_sectors(part);
-}
-
-/**
- * disk_map_sector_rcu - map sector to partition
- * @disk: gendisk of interest
- * @sector: sector to map
- *
- * Find out which partition @sector maps to on @disk. This is
- * primarily used for stats accounting.
- *
- * CONTEXT:
- * RCU read locked.
- *
- * RETURNS:
- * Found partition on success, part0 is returned if no partition matches
- * or the matched partition is being deleted.
- */
-struct block_device *disk_map_sector_rcu(struct gendisk *disk, sector_t sector)
-{
- struct disk_part_tbl *ptbl;
- struct block_device *part;
- int i;
-
- rcu_read_lock();
- ptbl = rcu_dereference(disk->part_tbl);
-
- part = rcu_dereference(ptbl->last_lookup);
- if (part && sector_in_part(part, sector))
- goto out_unlock;
-
- for (i = 1; i < ptbl->len; i++) {
- part = rcu_dereference(ptbl->part[i]);
- if (part && sector_in_part(part, sector)) {
- rcu_assign_pointer(ptbl->last_lookup, part);
- goto out_unlock;
- }
- }
-
- part = disk->part0;
-out_unlock:
- rcu_read_unlock();
- return part;
-}
-
-/**
- * disk_has_partitions
- * @disk: gendisk of interest
- *
- * Walk through the partition table and check if valid partition exists.
- *
- * CONTEXT:
- * Don't care.
- *
- * RETURNS:
- * True if the gendisk has at least one valid non-zero size partition.
- * Otherwise false.
- */
-bool disk_has_partitions(struct gendisk *disk)
-{
- struct disk_part_tbl *ptbl;
- int i;
- bool ret = false;
-
- rcu_read_lock();
- ptbl = rcu_dereference(disk->part_tbl);
-
- /* Iterate partitions skipping the whole device at index 0 */
- for (i = 1; i < ptbl->len; i++) {
- if (rcu_dereference(ptbl->part[i])) {
- ret = true;
- break;
- }
- }
-
- rcu_read_unlock();
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(disk_has_partitions);
/*
* Can be deleted altogether. Later.
@@ -603,6 +470,18 @@ static char *bdevt_str(dev_t devt, char *buf)
return buf;
}
+void disk_uevent(struct gendisk *disk, enum kobject_action action)
+{
+ struct disk_part_iter piter;
+ struct block_device *part;
+
+ disk_part_iter_init(&piter, disk, DISK_PITER_INCL_EMPTY_PART0);
+ while ((part = disk_part_iter_next(&piter)))
+ kobject_uevent(bdev_kobj(part), action);
+ disk_part_iter_exit(&piter);
+}
+EXPORT_SYMBOL_GPL(disk_uevent);
+
static void disk_scan_partitions(struct gendisk *disk)
{
struct block_device *bdev;
@@ -620,8 +499,6 @@ static void register_disk(struct device *parent, struct gendisk *disk,
const struct attribute_group **groups)
{
struct device *ddev = disk_to_dev(disk);
- struct disk_part_iter piter;
- struct block_device *part;
int err;
ddev->parent = parent;
@@ -664,15 +541,9 @@ static void register_disk(struct device *parent, struct gendisk *disk,
disk_scan_partitions(disk);
- /* announce disk after possible partitions are created */
+ /* announce the disk and partitions after all partitions are created */
dev_set_uevent_suppress(ddev, 0);
- kobject_uevent(&ddev->kobj, KOBJ_ADD);
-
- /* announce possible partitions */
- disk_part_iter_init(&piter, disk, 0);
- while ((part = disk_part_iter_next(&piter)))
- kobject_uevent(bdev_kobj(part), KOBJ_ADD);
- disk_part_iter_exit(&piter);
+ disk_uevent(disk, KOBJ_ADD);
if (disk->queue->backing_dev_info->dev) {
err = sysfs_create_link(&ddev->kobj,
@@ -828,8 +699,7 @@ void del_gendisk(struct gendisk *disk)
down_write(&bdev_lookup_sem);
/* invalidate stuff */
- disk_part_iter_init(&piter, disk,
- DISK_PITER_INCL_EMPTY | DISK_PITER_REVERSE);
+ disk_part_iter_init(&piter, disk, DISK_PITER_INCL_EMPTY);
while ((part = disk_part_iter_next(&piter))) {
invalidate_partition(part);
delete_partition(part);
@@ -928,7 +798,7 @@ struct block_device *bdget_disk(struct gendisk *disk, int partno)
struct block_device *bdev = NULL;
rcu_read_lock();
- bdev = __disk_get_part(disk, partno);
+ bdev = xa_load(&disk->part_tbl, partno);
if (bdev && !bdgrab(bdev))
bdev = NULL;
rcu_read_unlock();
@@ -1319,83 +1189,6 @@ static const struct attribute_group *disk_attr_groups[] = {
};
/**
- * disk_replace_part_tbl - replace disk->part_tbl in RCU-safe way
- * @disk: disk to replace part_tbl for
- * @new_ptbl: new part_tbl to install
- *
- * Replace disk->part_tbl with @new_ptbl in RCU-safe way. The
- * original ptbl is freed using RCU callback.
- *
- * LOCKING:
- * Matching bd_mutex locked or the caller is the only user of @disk.
- */
-static void disk_replace_part_tbl(struct gendisk *disk,
- struct disk_part_tbl *new_ptbl)
-{
- struct disk_part_tbl *old_ptbl =
- rcu_dereference_protected(disk->part_tbl, 1);
-
- rcu_assign_pointer(disk->part_tbl, new_ptbl);
-
- if (old_ptbl) {
- rcu_assign_pointer(old_ptbl->last_lookup, NULL);
- kfree_rcu(old_ptbl, rcu_head);
- }
-}
-
-/**
- * disk_expand_part_tbl - expand disk->part_tbl
- * @disk: disk to expand part_tbl for
- * @partno: expand such that this partno can fit in
- *
- * Expand disk->part_tbl such that @partno can fit in. disk->part_tbl
- * uses RCU to allow unlocked dereferencing for stats and other stuff.
- *
- * LOCKING:
- * Matching bd_mutex locked or the caller is the only user of @disk.
- * Might sleep.
- *
- * RETURNS:
- * 0 on success, -errno on failure.
- */
-int disk_expand_part_tbl(struct gendisk *disk, int partno)
-{
- struct disk_part_tbl *old_ptbl =
- rcu_dereference_protected(disk->part_tbl, 1);
- struct disk_part_tbl *new_ptbl;
- int len = old_ptbl ? old_ptbl->len : 0;
- int i, target;
-
- /*
- * check for int overflow, since we can get here from blkpg_ioctl()
- * with a user passed 'partno'.
- */
- target = partno + 1;
- if (target < 0)
- return -EINVAL;
-
- /* disk_max_parts() is zero during initialization, ignore if so */
- if (disk_max_parts(disk) && target > disk_max_parts(disk))
- return -EINVAL;
-
- if (target <= len)
- return 0;
-
- new_ptbl = kzalloc_node(struct_size(new_ptbl, part, target), GFP_KERNEL,
- disk->node_id);
- if (!new_ptbl)
- return -ENOMEM;
-
- new_ptbl->len = target;
-
- for (i = 0; i < len; i++)
- rcu_assign_pointer(new_ptbl->part[i], old_ptbl->part[i]);
-
- disk_replace_part_tbl(disk, new_ptbl);
- return 0;
-}
-
-/**
* disk_release - releases all allocated resources of the gendisk
* @dev: the device representing this disk
*
@@ -1418,7 +1211,7 @@ static void disk_release(struct device *dev)
blk_free_devt(dev->devt);
disk_release_events(disk);
kfree(disk->random);
- disk_replace_part_tbl(disk, NULL);
+ xa_destroy(&disk->part_tbl);
bdput(disk->part0);
if (disk->queue)
blk_put_queue(disk->queue);
@@ -1571,7 +1364,6 @@ dev_t blk_lookup_devt(const char *name, int partno)
struct gendisk *__alloc_disk_node(int minors, int node_id)
{
struct gendisk *disk;
- struct disk_part_tbl *ptbl;
if (minors > DISK_MAX_PARTS) {
printk(KERN_ERR
@@ -1589,11 +1381,9 @@ struct gendisk *__alloc_disk_node(int minors, int node_id)
goto out_free_disk;
disk->node_id = node_id;
- if (disk_expand_part_tbl(disk, 0))
- goto out_bdput;
-
- ptbl = rcu_dereference_protected(disk->part_tbl, 1);
- rcu_assign_pointer(ptbl->part[0], disk->part0);
+ xa_init(&disk->part_tbl);
+ if (xa_insert(&disk->part_tbl, 0, disk->part0, GFP_KERNEL))
+ goto out_destroy_part_tbl;
disk->minors = minors;
rand_initialize_disk(disk);
@@ -1602,7 +1392,8 @@ struct gendisk *__alloc_disk_node(int minors, int node_id)
device_initialize(disk_to_dev(disk));
return disk;
-out_bdput:
+out_destroy_part_tbl:
+ xa_destroy(&disk->part_tbl);
bdput(disk->part0);
out_free_disk:
kfree(disk);
@@ -1637,31 +1428,32 @@ static void set_disk_ro_uevent(struct gendisk *gd, int ro)
kobject_uevent_env(&disk_to_dev(gd)->kobj, KOBJ_CHANGE, envp);
}
-void set_disk_ro(struct gendisk *disk, int flag)
+/**
+ * set_disk_ro - set a gendisk read-only
+ * @disk: gendisk to operate on
+ * @read_only: %true to set the disk read-only, %false set the disk read/write
+ *
+ * This function is used to indicate whether a given disk device should have its
+ * read-only flag set. set_disk_ro() is typically used by device drivers to
+ * indicate whether the underlying physical device is write-protected.
+ */
+void set_disk_ro(struct gendisk *disk, bool read_only)
{
- struct disk_part_iter piter;
- struct block_device *part;
-
- if (disk->part0->bd_read_only != flag) {
- set_disk_ro_uevent(disk, flag);
- disk->part0->bd_read_only = flag;
+ if (read_only) {
+ if (test_and_set_bit(GD_READ_ONLY, &disk->state))
+ return;
+ } else {
+ if (!test_and_clear_bit(GD_READ_ONLY, &disk->state))
+ return;
}
-
- disk_part_iter_init(&piter, disk, DISK_PITER_INCL_EMPTY);
- while ((part = disk_part_iter_next(&piter)))
- part->bd_read_only = flag;
- disk_part_iter_exit(&piter);
+ set_disk_ro_uevent(disk, read_only);
}
-
EXPORT_SYMBOL(set_disk_ro);
int bdev_read_only(struct block_device *bdev)
{
- if (!bdev)
- return 0;
- return bdev->bd_read_only;
+ return bdev->bd_read_only || get_disk_ro(bdev->bd_disk);
}
-
EXPORT_SYMBOL(bdev_read_only);
/*
diff --git a/block/ioctl.c b/block/ioctl.c
index d61d652078f4..ff241e663c01 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -81,20 +81,27 @@ static int compat_blkpg_ioctl(struct block_device *bdev,
}
#endif
-static int blkdev_reread_part(struct block_device *bdev)
+static int blkdev_reread_part(struct block_device *bdev, fmode_t mode)
{
- int ret;
+ struct block_device *tmp;
if (!disk_part_scan_enabled(bdev->bd_disk) || bdev_is_partition(bdev))
return -EINVAL;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
- mutex_lock(&bdev->bd_mutex);
- ret = bdev_disk_changed(bdev, false);
- mutex_unlock(&bdev->bd_mutex);
+ /*
+ * Reopen the device to revalidate the driver state and force a
+ * partition rescan.
+ */
+ mode &= ~FMODE_EXCL;
+ set_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state);
- return ret;
+ tmp = blkdev_get_by_dev(bdev->bd_dev, mode, NULL);
+ if (IS_ERR(tmp))
+ return PTR_ERR(tmp);
+ blkdev_put(tmp, mode);
+ return 0;
}
static int blk_ioctl_discard(struct block_device *bdev, fmode_t mode,
@@ -498,7 +505,7 @@ static int blkdev_common_ioctl(struct block_device *bdev, fmode_t mode,
bdev->bd_bdi->ra_pages = (arg * 512) / PAGE_SIZE;
return 0;
case BLKRRPART:
- return blkdev_reread_part(bdev);
+ return blkdev_reread_part(bdev, mode);
case BLKTRACESTART:
case BLKTRACESTOP:
case BLKTRACETEARDOWN:
diff --git a/block/keyslot-manager.c b/block/keyslot-manager.c
index 86f8195d8039..2c4a55bea6ca 100644
--- a/block/keyslot-manager.c
+++ b/block/keyslot-manager.c
@@ -29,6 +29,7 @@
#define pr_fmt(fmt) "blk-crypto: " fmt
#include <linux/keyslot-manager.h>
+#include <linux/device.h>
#include <linux/atomic.h>
#include <linux/mutex.h>
#include <linux/pm_runtime.h>
@@ -62,6 +63,11 @@ static inline void blk_ksm_hw_exit(struct blk_keyslot_manager *ksm)
pm_runtime_put_sync(ksm->dev);
}
+static inline bool blk_ksm_is_passthrough(struct blk_keyslot_manager *ksm)
+{
+ return ksm->num_slots == 0;
+}
+
/**
* blk_ksm_init() - Initialize a keyslot manager
* @ksm: The keyslot_manager to initialize.
@@ -127,6 +133,34 @@ err_destroy_ksm:
}
EXPORT_SYMBOL_GPL(blk_ksm_init);
+static void blk_ksm_destroy_callback(void *ksm)
+{
+ blk_ksm_destroy(ksm);
+}
+
+/**
+ * devm_blk_ksm_init() - Resource-managed blk_ksm_init()
+ * @dev: The device which owns the blk_keyslot_manager.
+ * @ksm: The blk_keyslot_manager to initialize.
+ * @num_slots: The number of key slots to manage.
+ *
+ * Like blk_ksm_init(), but causes blk_ksm_destroy() to be called automatically
+ * on driver detach.
+ *
+ * Return: 0 on success, or else a negative error code.
+ */
+int devm_blk_ksm_init(struct device *dev, struct blk_keyslot_manager *ksm,
+ unsigned int num_slots)
+{
+ int err = blk_ksm_init(ksm, num_slots);
+
+ if (err)
+ return err;
+
+ return devm_add_action_or_reset(dev, blk_ksm_destroy_callback, ksm);
+}
+EXPORT_SYMBOL_GPL(devm_blk_ksm_init);
+
static inline struct hlist_head *
blk_ksm_hash_bucket_for_key(struct blk_keyslot_manager *ksm,
const struct blk_crypto_key *key)
@@ -205,6 +239,10 @@ blk_status_t blk_ksm_get_slot_for_key(struct blk_keyslot_manager *ksm,
int err;
*slot_ptr = NULL;
+
+ if (blk_ksm_is_passthrough(ksm))
+ return BLK_STS_OK;
+
down_read(&ksm->lock);
slot = blk_ksm_find_and_grab_keyslot(ksm, key);
up_read(&ksm->lock);
@@ -325,6 +363,16 @@ int blk_ksm_evict_key(struct blk_keyslot_manager *ksm,
struct blk_ksm_keyslot *slot;
int err = 0;
+ if (blk_ksm_is_passthrough(ksm)) {
+ if (ksm->ksm_ll_ops.keyslot_evict) {
+ blk_ksm_hw_enter(ksm);
+ err = ksm->ksm_ll_ops.keyslot_evict(ksm, key, -1);
+ blk_ksm_hw_exit(ksm);
+ return err;
+ }
+ return 0;
+ }
+
blk_ksm_hw_enter(ksm);
slot = blk_ksm_find_keyslot(ksm, key);
if (!slot)
@@ -360,6 +408,9 @@ void blk_ksm_reprogram_all_keys(struct blk_keyslot_manager *ksm)
{
unsigned int slot;
+ if (blk_ksm_is_passthrough(ksm))
+ return;
+
/* This is for device initialization, so don't resume the device */
down_write(&ksm->lock);
for (slot = 0; slot < ksm->num_slots; slot++) {
@@ -401,3 +452,127 @@ void blk_ksm_unregister(struct request_queue *q)
{
q->ksm = NULL;
}
+
+/**
+ * blk_ksm_intersect_modes() - restrict supported modes by child device
+ * @parent: The keyslot manager for parent device
+ * @child: The keyslot manager for child device, or NULL
+ *
+ * Clear any crypto mode support bits in @parent that aren't set in @child.
+ * If @child is NULL, then all parent bits are cleared.
+ *
+ * Only use this when setting up the keyslot manager for a layered device,
+ * before it's been exposed yet.
+ */
+void blk_ksm_intersect_modes(struct blk_keyslot_manager *parent,
+ const struct blk_keyslot_manager *child)
+{
+ if (child) {
+ unsigned int i;
+
+ parent->max_dun_bytes_supported =
+ min(parent->max_dun_bytes_supported,
+ child->max_dun_bytes_supported);
+ for (i = 0; i < ARRAY_SIZE(child->crypto_modes_supported);
+ i++) {
+ parent->crypto_modes_supported[i] &=
+ child->crypto_modes_supported[i];
+ }
+ } else {
+ parent->max_dun_bytes_supported = 0;
+ memset(parent->crypto_modes_supported, 0,
+ sizeof(parent->crypto_modes_supported));
+ }
+}
+EXPORT_SYMBOL_GPL(blk_ksm_intersect_modes);
+
+/**
+ * blk_ksm_is_superset() - Check if a KSM supports a superset of crypto modes
+ * and DUN bytes that another KSM supports. Here,
+ * "superset" refers to the mathematical meaning of the
+ * word - i.e. if two KSMs have the *same* capabilities,
+ * they *are* considered supersets of each other.
+ * @ksm_superset: The KSM that we want to verify is a superset
+ * @ksm_subset: The KSM that we want to verify is a subset
+ *
+ * Return: True if @ksm_superset supports a superset of the crypto modes and DUN
+ * bytes that @ksm_subset supports.
+ */
+bool blk_ksm_is_superset(struct blk_keyslot_manager *ksm_superset,
+ struct blk_keyslot_manager *ksm_subset)
+{
+ int i;
+
+ if (!ksm_subset)
+ return true;
+
+ if (!ksm_superset)
+ return false;
+
+ for (i = 0; i < ARRAY_SIZE(ksm_superset->crypto_modes_supported); i++) {
+ if (ksm_subset->crypto_modes_supported[i] &
+ (~ksm_superset->crypto_modes_supported[i])) {
+ return false;
+ }
+ }
+
+ if (ksm_subset->max_dun_bytes_supported >
+ ksm_superset->max_dun_bytes_supported) {
+ return false;
+ }
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(blk_ksm_is_superset);
+
+/**
+ * blk_ksm_update_capabilities() - Update the restrictions of a KSM to those of
+ * another KSM
+ * @target_ksm: The KSM whose restrictions to update.
+ * @reference_ksm: The KSM to whose restrictions this function will update
+ * @target_ksm's restrictions to.
+ *
+ * Blk-crypto requires that crypto capabilities that were
+ * advertised when a bio was created continue to be supported by the
+ * device until that bio is ended. This is turn means that a device cannot
+ * shrink its advertised crypto capabilities without any explicit
+ * synchronization with upper layers. So if there's no such explicit
+ * synchronization, @reference_ksm must support all the crypto capabilities that
+ * @target_ksm does
+ * (i.e. we need blk_ksm_is_superset(@reference_ksm, @target_ksm) == true).
+ *
+ * Note also that as long as the crypto capabilities are being expanded, the
+ * order of updates becoming visible is not important because it's alright
+ * for blk-crypto to see stale values - they only cause blk-crypto to
+ * believe that a crypto capability isn't supported when it actually is (which
+ * might result in blk-crypto-fallback being used if available, or the bio being
+ * failed).
+ */
+void blk_ksm_update_capabilities(struct blk_keyslot_manager *target_ksm,
+ struct blk_keyslot_manager *reference_ksm)
+{
+ memcpy(target_ksm->crypto_modes_supported,
+ reference_ksm->crypto_modes_supported,
+ sizeof(target_ksm->crypto_modes_supported));
+
+ target_ksm->max_dun_bytes_supported =
+ reference_ksm->max_dun_bytes_supported;
+}
+EXPORT_SYMBOL_GPL(blk_ksm_update_capabilities);
+
+/**
+ * blk_ksm_init_passthrough() - Init a passthrough keyslot manager
+ * @ksm: The keyslot manager to init
+ *
+ * Initialize a passthrough keyslot manager.
+ * Called by e.g. storage drivers to set up a keyslot manager in their
+ * request_queue, when the storage driver wants to manage its keys by itself.
+ * This is useful for inline encryption hardware that doesn't have the concept
+ * of keyslots, and for layered devices.
+ */
+void blk_ksm_init_passthrough(struct blk_keyslot_manager *ksm)
+{
+ memset(ksm, 0, sizeof(*ksm));
+ init_rwsem(&ksm->lock);
+}
+EXPORT_SYMBOL_GPL(blk_ksm_init_passthrough);
diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c
index dc89199bc8c6..33d34d69cade 100644
--- a/block/kyber-iosched.c
+++ b/block/kyber-iosched.c
@@ -13,6 +13,8 @@
#include <linux/module.h>
#include <linux/sbitmap.h>
+#include <trace/events/block.h>
+
#include "blk.h"
#include "blk-mq.h"
#include "blk-mq-debugfs.h"
@@ -353,19 +355,9 @@ static void kyber_timer_fn(struct timer_list *t)
}
}
-static unsigned int kyber_sched_tags_shift(struct request_queue *q)
-{
- /*
- * All of the hardware queues have the same depth, so we can just grab
- * the shift of the first one.
- */
- return q->queue_hw_ctx[0]->sched_tags->bitmap_tags->sb.shift;
-}
-
static struct kyber_queue_data *kyber_queue_data_alloc(struct request_queue *q)
{
struct kyber_queue_data *kqd;
- unsigned int shift;
int ret = -ENOMEM;
int i;
@@ -400,9 +392,6 @@ static struct kyber_queue_data *kyber_queue_data_alloc(struct request_queue *q)
kqd->latency_targets[i] = kyber_latency_targets[i];
}
- shift = kyber_sched_tags_shift(q);
- kqd->async_depth = (1U << shift) * KYBER_ASYNC_PERCENT / 100U;
-
return kqd;
err_buckets:
@@ -458,9 +447,19 @@ static void kyber_ctx_queue_init(struct kyber_ctx_queue *kcq)
INIT_LIST_HEAD(&kcq->rq_list[i]);
}
-static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
+static void kyber_depth_updated(struct blk_mq_hw_ctx *hctx)
{
struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data;
+ struct blk_mq_tags *tags = hctx->sched_tags;
+ unsigned int shift = tags->bitmap_tags->sb.shift;
+
+ kqd->async_depth = (1U << shift) * KYBER_ASYNC_PERCENT / 100U;
+
+ sbitmap_queue_min_shallow_depth(tags->bitmap_tags, kqd->async_depth);
+}
+
+static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
+{
struct kyber_hctx_data *khd;
int i;
@@ -502,8 +501,7 @@ static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
khd->batching = 0;
hctx->sched_data = khd;
- sbitmap_queue_min_shallow_depth(hctx->sched_tags->bitmap_tags,
- kqd->async_depth);
+ kyber_depth_updated(hctx);
return 0;
@@ -602,7 +600,7 @@ static void kyber_insert_requests(struct blk_mq_hw_ctx *hctx,
list_move_tail(&rq->queuelist, head);
sbitmap_set_bit(&khd->kcq_map[sched_domain],
rq->mq_ctx->index_hw[hctx->type]);
- blk_mq_sched_request_inserted(rq);
+ trace_block_rq_insert(rq);
spin_unlock(&kcq->lock);
}
}
@@ -1022,6 +1020,7 @@ static struct elevator_type kyber_sched = {
.completed_request = kyber_completed_request,
.dispatch_request = kyber_dispatch_request,
.has_work = kyber_has_work,
+ .depth_updated = kyber_depth_updated,
},
#ifdef CONFIG_BLK_DEBUG_FS
.queue_debugfs_attrs = kyber_queue_debugfs_attrs,
@@ -1029,6 +1028,7 @@ static struct elevator_type kyber_sched = {
#endif
.elevator_attrs = kyber_sched_attrs,
.elevator_name = "kyber",
+ .elevator_features = ELEVATOR_F_MQ_AWARE,
.elevator_owner = THIS_MODULE,
};
diff --git a/block/mq-deadline.c b/block/mq-deadline.c
index 800ac902809b..f3631a287466 100644
--- a/block/mq-deadline.c
+++ b/block/mq-deadline.c
@@ -18,6 +18,8 @@
#include <linux/rbtree.h>
#include <linux/sbitmap.h>
+#include <trace/events/block.h>
+
#include "blk.h"
#include "blk-mq.h"
#include "blk-mq-debugfs.h"
@@ -386,8 +388,6 @@ static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
spin_lock(&dd->lock);
rq = __dd_dispatch_request(dd);
spin_unlock(&dd->lock);
- if (rq)
- atomic_dec(&rq->mq_hctx->elevator_queued);
return rq;
}
@@ -498,7 +498,7 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
if (blk_mq_sched_try_insert_merge(q, rq))
return;
- blk_mq_sched_request_inserted(rq);
+ trace_block_rq_insert(rq);
if (at_head || blk_rq_is_passthrough(rq)) {
if (at_head)
@@ -535,7 +535,6 @@ static void dd_insert_requests(struct blk_mq_hw_ctx *hctx,
rq = list_first_entry(list, struct request, queuelist);
list_del_init(&rq->queuelist);
dd_insert_request(hctx, rq, at_head);
- atomic_inc(&hctx->elevator_queued);
}
spin_unlock(&dd->lock);
}
@@ -582,9 +581,6 @@ static bool dd_has_work(struct blk_mq_hw_ctx *hctx)
{
struct deadline_data *dd = hctx->queue->elevator->elevator_data;
- if (!atomic_read(&hctx->elevator_queued))
- return false;
-
return !list_empty_careful(&dd->dispatch) ||
!list_empty_careful(&dd->fifo_list[0]) ||
!list_empty_careful(&dd->fifo_list[1]);
diff --git a/block/partitions/core.c b/block/partitions/core.c
index e7d776db803b..1a7558917c47 100644
--- a/block/partitions/core.c
+++ b/block/partitions/core.c
@@ -195,7 +195,7 @@ static ssize_t part_start_show(struct device *dev,
static ssize_t part_ro_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", dev_to_bdev(dev)->bd_read_only);
+ return sprintf(buf, "%d\n", bdev_read_only(dev_to_bdev(dev)));
}
static ssize_t part_alignment_offset_show(struct device *dev,
@@ -287,13 +287,7 @@ struct device_type part_type = {
*/
void delete_partition(struct block_device *part)
{
- struct gendisk *disk = part->bd_disk;
- struct disk_part_tbl *ptbl =
- rcu_dereference_protected(disk->part_tbl, 1);
-
- rcu_assign_pointer(ptbl->part[part->bd_partno], NULL);
- rcu_assign_pointer(ptbl->last_lookup, NULL);
-
+ xa_erase(&part->bd_disk->part_tbl, part->bd_partno);
kobject_put(part->bd_holder_dir);
device_del(&part->bd_device);
@@ -325,7 +319,6 @@ static struct block_device *add_partition(struct gendisk *disk, int partno,
struct device *ddev = disk_to_dev(disk);
struct device *pdev;
struct block_device *bdev;
- struct disk_part_tbl *ptbl;
const char *dname;
int err;
@@ -341,18 +334,13 @@ static struct block_device *add_partition(struct gendisk *disk, int partno,
case BLK_ZONED_HA:
pr_info("%s: disabling host aware zoned block device support due to partitions\n",
disk->disk_name);
- disk->queue->limits.zoned = BLK_ZONED_NONE;
+ blk_queue_set_zoned(disk, BLK_ZONED_NONE);
break;
case BLK_ZONED_NONE:
break;
}
- err = disk_expand_part_tbl(disk, partno);
- if (err)
- return ERR_PTR(err);
- ptbl = rcu_dereference_protected(disk->part_tbl, 1);
-
- if (ptbl->part[partno])
+ if (xa_load(&disk->part_tbl, partno))
return ERR_PTR(-EBUSY);
bdev = bdev_alloc(disk, partno);
@@ -361,7 +349,6 @@ static struct block_device *add_partition(struct gendisk *disk, int partno,
bdev->bd_start_sect = start;
bdev_set_nr_sectors(bdev, len);
- bdev->bd_read_only = get_disk_ro(disk);
if (info) {
err = -ENOMEM;
@@ -384,7 +371,7 @@ static struct block_device *add_partition(struct gendisk *disk, int partno,
err = blk_alloc_devt(bdev, &devt);
if (err)
- goto out_bdput;
+ goto out_put;
pdev->devt = devt;
/* delay uevent until 'holders' subdir is created */
@@ -406,8 +393,10 @@ static struct block_device *add_partition(struct gendisk *disk, int partno,
}
/* everything is up and running, commence */
+ err = xa_insert(&disk->part_tbl, partno, bdev, GFP_KERNEL);
+ if (err)
+ goto out_del;
bdev_add(bdev, devt);
- rcu_assign_pointer(ptbl->part[partno], bdev);
/* suppress uevent if the disk suppresses it */
if (!dev_get_uevent_suppress(ddev))
@@ -613,7 +602,7 @@ static bool blk_add_partition(struct gendisk *disk, struct block_device *bdev,
int blk_add_partitions(struct gendisk *disk, struct block_device *bdev)
{
struct parsed_partitions *state;
- int ret = -EAGAIN, p, highest;
+ int ret = -EAGAIN, p;
if (!disk_part_scan_enabled(disk))
return 0;
@@ -661,15 +650,6 @@ int blk_add_partitions(struct gendisk *disk, struct block_device *bdev)
/* tell userspace that the media / partition table may have changed */
kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE);
- /*
- * Detect the highest partition number and preallocate disk->part_tbl.
- * This is an optimization and not strictly necessary.
- */
- for (p = 1, highest = 0; p < state->limit; p++)
- if (state->parts[p].size)
- highest = p;
- disk_expand_part_tbl(disk, highest);
-
for (p = 1; p < state->limit; p++)
if (!blk_add_partition(disk, bdev, state, p))
goto out_free_state;
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index c9f009cc0446..6599bac0a78c 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -357,7 +357,7 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
* (if he doesn't check that is his problem).
* N.B. a non-zero SCSI status is _not_ necessarily an error.
*/
- blk_execute_rq(q, bd_disk, rq, at_head);
+ blk_execute_rq(bd_disk, rq, at_head);
hdr->duration = jiffies_to_msecs(jiffies - start_time);
@@ -493,7 +493,7 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
goto error;
}
- blk_execute_rq(q, disk, rq, 0);
+ blk_execute_rq(disk, rq, 0);
err = req->result & 0xff; /* only 8 bit SCSI status */
if (err) {
@@ -532,7 +532,7 @@ static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk,
scsi_req(rq)->cmd[0] = cmd;
scsi_req(rq)->cmd[4] = data;
scsi_req(rq)->cmd_len = 6;
- blk_execute_rq(q, bd_disk, rq, 0);
+ blk_execute_rq(bd_disk, rq, 0);
err = scsi_req(rq)->result ? -EIO : 0;
blk_put_request(rq);
diff --git a/certs/blacklist.c b/certs/blacklist.c
index 6514f9ebc943..bffe4c6f4a9e 100644
--- a/certs/blacklist.c
+++ b/certs/blacklist.c
@@ -14,6 +14,7 @@
#include <linux/ctype.h>
#include <linux/err.h>
#include <linux/seq_file.h>
+#include <linux/uidgid.h>
#include <keys/system_keyring.h>
#include "blacklist.h"
@@ -37,7 +38,7 @@ static int blacklist_vet_description(const char *desc)
found_colon:
desc++;
for (; *desc; desc++) {
- if (!isxdigit(*desc))
+ if (!isxdigit(*desc) || isupper(*desc))
return -EINVAL;
n++;
}
@@ -78,7 +79,7 @@ static struct key_type key_type_blacklist = {
/**
* mark_hash_blacklisted - Add a hash to the system blacklist
- * @hash - The hash as a hex string with a type prefix (eg. "tbs:23aa429783")
+ * @hash: The hash as a hex string with a type prefix (eg. "tbs:23aa429783")
*/
int mark_hash_blacklisted(const char *hash)
{
@@ -156,13 +157,12 @@ static int __init blacklist_init(void)
blacklist_keyring =
keyring_alloc(".blacklist",
- KUIDT_INIT(0), KGIDT_INIT(0),
- current_cred(),
+ GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, current_cred(),
(KEY_POS_ALL & ~KEY_POS_SETATTR) |
KEY_USR_VIEW | KEY_USR_READ |
KEY_USR_SEARCH,
KEY_ALLOC_NOT_IN_QUOTA |
- KEY_FLAG_KEEP,
+ KEY_ALLOC_SET_KEEP,
NULL, NULL);
if (IS_ERR(blacklist_keyring))
panic("Can't allocate system blacklist keyring\n");
diff --git a/certs/system_keyring.c b/certs/system_keyring.c
index 798291177186..4b693da488f1 100644
--- a/certs/system_keyring.c
+++ b/certs/system_keyring.c
@@ -11,6 +11,7 @@
#include <linux/cred.h>
#include <linux/err.h>
#include <linux/slab.h>
+#include <linux/uidgid.h>
#include <linux/verification.h>
#include <keys/asymmetric-type.h>
#include <keys/system_keyring.h>
@@ -98,7 +99,7 @@ static __init int system_trusted_keyring_init(void)
builtin_trusted_keys =
keyring_alloc(".builtin_trusted_keys",
- KUIDT_INIT(0), KGIDT_INIT(0), current_cred(),
+ GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, current_cred(),
((KEY_POS_ALL & ~KEY_POS_SETATTR) |
KEY_USR_VIEW | KEY_USR_READ | KEY_USR_SEARCH),
KEY_ALLOC_NOT_IN_QUOTA,
@@ -109,7 +110,7 @@ static __init int system_trusted_keyring_init(void)
#ifdef CONFIG_SECONDARY_TRUSTED_KEYRING
secondary_trusted_keys =
keyring_alloc(".secondary_trusted_keys",
- KUIDT_INIT(0), KGIDT_INIT(0), current_cred(),
+ GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, current_cred(),
((KEY_POS_ALL & ~KEY_POS_SETATTR) |
KEY_USR_VIEW | KEY_USR_READ | KEY_USR_SEARCH |
KEY_USR_WRITE),
diff --git a/crypto/Kconfig b/crypto/Kconfig
index a367fcfeb5d4..15c9c28d9f53 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -210,11 +210,6 @@ config CRYPTO_SIMD
tristate
select CRYPTO_CRYPTD
-config CRYPTO_GLUE_HELPER_X86
- tristate
- depends on X86
- select CRYPTO_SKCIPHER
-
config CRYPTO_ENGINE
tristate
@@ -822,19 +817,6 @@ config CRYPTO_MICHAEL_MIC
should not be used for other purposes because of the weakness
of the algorithm.
-config CRYPTO_RMD128
- tristate "RIPEMD-128 digest algorithm"
- select CRYPTO_HASH
- help
- RIPEMD-128 (ISO/IEC 10118-3:2004).
-
- RIPEMD-128 is a 128-bit cryptographic hash function. It should only
- be used as a secure replacement for RIPEMD. For other use cases,
- RIPEMD-160 should be used.
-
- Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel.
- See <https://homes.esat.kuleuven.be/~bosselae/ripemd160.html>
-
config CRYPTO_RMD160
tristate "RIPEMD-160 digest algorithm"
select CRYPTO_HASH
@@ -852,30 +834,6 @@ config CRYPTO_RMD160
Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel.
See <https://homes.esat.kuleuven.be/~bosselae/ripemd160.html>
-config CRYPTO_RMD256
- tristate "RIPEMD-256 digest algorithm"
- select CRYPTO_HASH
- help
- RIPEMD-256 is an optional extension of RIPEMD-128 with a
- 256 bit hash. It is intended for applications that require
- longer hash-results, without needing a larger security level
- (than RIPEMD-128).
-
- Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel.
- See <https://homes.esat.kuleuven.be/~bosselae/ripemd160.html>
-
-config CRYPTO_RMD320
- tristate "RIPEMD-320 digest algorithm"
- select CRYPTO_HASH
- help
- RIPEMD-320 is an optional extension of RIPEMD-160 with a
- 320 bit hash. It is intended for applications that require
- longer hash-results, without needing a larger security level
- (than RIPEMD-160).
-
- Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel.
- See <https://homes.esat.kuleuven.be/~bosselae/ripemd160.html>
-
config CRYPTO_SHA1
tristate "SHA1 digest algorithm"
select CRYPTO_HASH
@@ -1051,19 +1009,6 @@ config CRYPTO_STREEBOG
https://tc26.ru/upload/iblock/fed/feddbb4d26b685903faa2ba11aea43f6.pdf
https://tools.ietf.org/html/rfc6986
-config CRYPTO_TGR192
- tristate "Tiger digest algorithms"
- select CRYPTO_HASH
- help
- Tiger hash algorithm 192, 160 and 128-bit hashes
-
- Tiger is a hash function optimized for 64-bit processors while
- still having decent performance on 32-bit processors.
- Tiger was developed by Ross Anderson and Eli Biham.
-
- See also:
- <https://www.cs.technion.ac.il/~biham/Reports/Tiger/>.
-
config CRYPTO_WP512
tristate "Whirlpool digest algorithms"
select CRYPTO_HASH
@@ -1133,7 +1078,6 @@ config CRYPTO_AES_NI_INTEL
select CRYPTO_LIB_AES
select CRYPTO_ALGAPI
select CRYPTO_SKCIPHER
- select CRYPTO_GLUE_HELPER_X86 if 64BIT
select CRYPTO_SIMD
help
Use Intel AES-NI instructions for AES algorithm.
@@ -1256,6 +1200,7 @@ config CRYPTO_BLOWFISH_X86_64
depends on X86 && 64BIT
select CRYPTO_SKCIPHER
select CRYPTO_BLOWFISH_COMMON
+ imply CRYPTO_CTR
help
Blowfish cipher algorithm (x86_64), by Bruce Schneier.
@@ -1286,7 +1231,7 @@ config CRYPTO_CAMELLIA_X86_64
depends on X86 && 64BIT
depends on CRYPTO
select CRYPTO_SKCIPHER
- select CRYPTO_GLUE_HELPER_X86
+ imply CRYPTO_CTR
help
Camellia cipher algorithm module (x86_64).
@@ -1304,9 +1249,8 @@ config CRYPTO_CAMELLIA_AESNI_AVX_X86_64
depends on CRYPTO
select CRYPTO_SKCIPHER
select CRYPTO_CAMELLIA_X86_64
- select CRYPTO_GLUE_HELPER_X86
select CRYPTO_SIMD
- select CRYPTO_XTS
+ imply CRYPTO_XTS
help
Camellia cipher algorithm module (x86_64/AES-NI/AVX).
@@ -1372,6 +1316,7 @@ config CRYPTO_CAST5_AVX_X86_64
select CRYPTO_CAST5
select CRYPTO_CAST_COMMON
select CRYPTO_SIMD
+ imply CRYPTO_CTR
help
The CAST5 encryption algorithm (synonymous with CAST-128) is
described in RFC2144.
@@ -1393,9 +1338,9 @@ config CRYPTO_CAST6_AVX_X86_64
select CRYPTO_SKCIPHER
select CRYPTO_CAST6
select CRYPTO_CAST_COMMON
- select CRYPTO_GLUE_HELPER_X86
select CRYPTO_SIMD
- select CRYPTO_XTS
+ imply CRYPTO_XTS
+ imply CRYPTO_CTR
help
The CAST6 encryption algorithm (synonymous with CAST-256) is
described in RFC2612.
@@ -1425,6 +1370,7 @@ config CRYPTO_DES3_EDE_X86_64
depends on X86 && 64BIT
select CRYPTO_SKCIPHER
select CRYPTO_LIB_DES
+ imply CRYPTO_CTR
help
Triple DES EDE (FIPS 46-3) algorithm.
@@ -1454,18 +1400,6 @@ config CRYPTO_KHAZAD
See also:
<http://www.larc.usp.br/~pbarreto/KhazadPage.html>
-config CRYPTO_SALSA20
- tristate "Salsa20 stream cipher algorithm"
- select CRYPTO_SKCIPHER
- help
- Salsa20 stream cipher algorithm.
-
- Salsa20 is a stream cipher submitted to eSTREAM, the ECRYPT
- Stream Cipher Project. See <https://www.ecrypt.eu.org/stream/>
-
- The Salsa20 stream cipher algorithm is designed by Daniel J.
- Bernstein <djb@cr.yp.to>. See <https://cr.yp.to/snuffle.html>
-
config CRYPTO_CHACHA20
tristate "ChaCha stream cipher algorithms"
select CRYPTO_LIB_CHACHA_GENERIC
@@ -1526,8 +1460,7 @@ config CRYPTO_SERPENT
Serpent cipher algorithm, by Anderson, Biham & Knudsen.
Keys are allowed to be from 0 to 256 bits in length, in steps
- of 8 bits. Also includes the 'Tnepres' algorithm, a reversed
- variant of Serpent for compatibility with old kerneli.org code.
+ of 8 bits.
See also:
<https://www.cl.cam.ac.uk/~rja14/serpent.html>
@@ -1536,9 +1469,9 @@ config CRYPTO_SERPENT_SSE2_X86_64
tristate "Serpent cipher algorithm (x86_64/SSE2)"
depends on X86 && 64BIT
select CRYPTO_SKCIPHER
- select CRYPTO_GLUE_HELPER_X86
select CRYPTO_SERPENT
select CRYPTO_SIMD
+ imply CRYPTO_CTR
help
Serpent cipher algorithm, by Anderson, Biham & Knudsen.
@@ -1555,9 +1488,9 @@ config CRYPTO_SERPENT_SSE2_586
tristate "Serpent cipher algorithm (i586/SSE2)"
depends on X86 && !64BIT
select CRYPTO_SKCIPHER
- select CRYPTO_GLUE_HELPER_X86
select CRYPTO_SERPENT
select CRYPTO_SIMD
+ imply CRYPTO_CTR
help
Serpent cipher algorithm, by Anderson, Biham & Knudsen.
@@ -1574,10 +1507,10 @@ config CRYPTO_SERPENT_AVX_X86_64
tristate "Serpent cipher algorithm (x86_64/AVX)"
depends on X86 && 64BIT
select CRYPTO_SKCIPHER
- select CRYPTO_GLUE_HELPER_X86
select CRYPTO_SERPENT
select CRYPTO_SIMD
- select CRYPTO_XTS
+ imply CRYPTO_XTS
+ imply CRYPTO_CTR
help
Serpent cipher algorithm, by Anderson, Biham & Knudsen.
@@ -1675,6 +1608,7 @@ config CRYPTO_TWOFISH_586
depends on (X86 || UML_X86) && !64BIT
select CRYPTO_ALGAPI
select CRYPTO_TWOFISH_COMMON
+ imply CRYPTO_CTR
help
Twofish cipher algorithm.
@@ -1691,6 +1625,7 @@ config CRYPTO_TWOFISH_X86_64
depends on (X86 || UML_X86) && 64BIT
select CRYPTO_ALGAPI
select CRYPTO_TWOFISH_COMMON
+ imply CRYPTO_CTR
help
Twofish cipher algorithm (x86_64).
@@ -1708,7 +1643,6 @@ config CRYPTO_TWOFISH_X86_64_3WAY
select CRYPTO_SKCIPHER
select CRYPTO_TWOFISH_COMMON
select CRYPTO_TWOFISH_X86_64
- select CRYPTO_GLUE_HELPER_X86
help
Twofish cipher algorithm (x86_64, 3-way parallel).
@@ -1727,11 +1661,11 @@ config CRYPTO_TWOFISH_AVX_X86_64
tristate "Twofish cipher algorithm (x86_64/AVX)"
depends on X86 && 64BIT
select CRYPTO_SKCIPHER
- select CRYPTO_GLUE_HELPER_X86
select CRYPTO_SIMD
select CRYPTO_TWOFISH_COMMON
select CRYPTO_TWOFISH_X86_64
select CRYPTO_TWOFISH_X86_64_3WAY
+ imply CRYPTO_XTS
help
Twofish cipher algorithm (x86_64/AVX).
diff --git a/crypto/Makefile b/crypto/Makefile
index b279483fba50..cf23affb1678 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -67,9 +67,7 @@ obj-$(CONFIG_CRYPTO_XCBC) += xcbc.o
obj-$(CONFIG_CRYPTO_NULL2) += crypto_null.o
obj-$(CONFIG_CRYPTO_MD4) += md4.o
obj-$(CONFIG_CRYPTO_MD5) += md5.o
-obj-$(CONFIG_CRYPTO_RMD128) += rmd128.o
obj-$(CONFIG_CRYPTO_RMD160) += rmd160.o
-obj-$(CONFIG_CRYPTO_RMD256) += rmd256.o
obj-$(CONFIG_CRYPTO_RMD320) += rmd320.o
obj-$(CONFIG_CRYPTO_SHA1) += sha1_generic.o
obj-$(CONFIG_CRYPTO_SHA256) += sha256_generic.o
@@ -79,7 +77,6 @@ obj-$(CONFIG_CRYPTO_SM3) += sm3_generic.o
obj-$(CONFIG_CRYPTO_STREEBOG) += streebog_generic.o
obj-$(CONFIG_CRYPTO_WP512) += wp512.o
CFLAGS_wp512.o := $(call cc-option,-fno-schedule-insns) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149
-obj-$(CONFIG_CRYPTO_TGR192) += tgr192.o
obj-$(CONFIG_CRYPTO_BLAKE2B) += blake2b_generic.o
obj-$(CONFIG_CRYPTO_BLAKE2S) += blake2s_generic.o
obj-$(CONFIG_CRYPTO_GF128MUL) += gf128mul.o
@@ -141,7 +138,6 @@ obj-$(CONFIG_CRYPTO_TEA) += tea.o
obj-$(CONFIG_CRYPTO_KHAZAD) += khazad.o
obj-$(CONFIG_CRYPTO_ANUBIS) += anubis.o
obj-$(CONFIG_CRYPTO_SEED) += seed.o
-obj-$(CONFIG_CRYPTO_SALSA20) += salsa20_generic.o
obj-$(CONFIG_CRYPTO_CHACHA20) += chacha_generic.o
obj-$(CONFIG_CRYPTO_POLY1305) += poly1305_generic.o
obj-$(CONFIG_CRYPTO_DEFLATE) += deflate.o
diff --git a/crypto/adiantum.c b/crypto/adiantum.c
index ce4d5725342c..84450130cb6b 100644
--- a/crypto/adiantum.c
+++ b/crypto/adiantum.c
@@ -32,6 +32,7 @@
#include <crypto/b128ops.h>
#include <crypto/chacha.h>
+#include <crypto/internal/cipher.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/poly1305.h>
#include <crypto/internal/skcipher.h>
@@ -616,3 +617,4 @@ MODULE_DESCRIPTION("Adiantum length-preserving encryption mode");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
MODULE_ALIAS_CRYPTO("adiantum");
+MODULE_IMPORT_NS(CRYPTO_INTERNAL);
diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c
index c475c1129ff2..3f512efaba3a 100644
--- a/crypto/ansi_cprng.c
+++ b/crypto/ansi_cprng.c
@@ -7,6 +7,7 @@
* (C) Neil Horman <nhorman@tuxdriver.com>
*/
+#include <crypto/internal/cipher.h>
#include <crypto/internal/rng.h>
#include <linux/err.h>
#include <linux/init.h>
@@ -470,3 +471,4 @@ subsys_initcall(prng_mod_init);
module_exit(prng_mod_fini);
MODULE_ALIAS_CRYPTO("stdrng");
MODULE_ALIAS_CRYPTO("ansi_cprng");
+MODULE_IMPORT_NS(CRYPTO_INTERNAL);
diff --git a/crypto/asymmetric_keys/asymmetric_type.c b/crypto/asymmetric_keys/asymmetric_type.c
index 33e77d846caa..ad8af3d70ac0 100644
--- a/crypto/asymmetric_keys/asymmetric_type.c
+++ b/crypto/asymmetric_keys/asymmetric_type.c
@@ -152,7 +152,8 @@ EXPORT_SYMBOL_GPL(asymmetric_key_generate_id);
/**
* asymmetric_key_id_same - Return true if two asymmetric keys IDs are the same.
- * @kid_1, @kid_2: The key IDs to compare
+ * @kid1: The key ID to compare
+ * @kid2: The key ID to compare
*/
bool asymmetric_key_id_same(const struct asymmetric_key_id *kid1,
const struct asymmetric_key_id *kid2)
@@ -168,7 +169,8 @@ EXPORT_SYMBOL_GPL(asymmetric_key_id_same);
/**
* asymmetric_key_id_partial - Return true if two asymmetric keys IDs
* partially match
- * @kid_1, @kid_2: The key IDs to compare
+ * @kid1: The key ID to compare
+ * @kid2: The key ID to compare
*/
bool asymmetric_key_id_partial(const struct asymmetric_key_id *kid1,
const struct asymmetric_key_id *kid2)
diff --git a/crypto/asymmetric_keys/pkcs7_parser.h b/crypto/asymmetric_keys/pkcs7_parser.h
index 6565fdc2d4ca..e17f7ce4fb43 100644
--- a/crypto/asymmetric_keys/pkcs7_parser.h
+++ b/crypto/asymmetric_keys/pkcs7_parser.h
@@ -41,10 +41,9 @@ struct pkcs7_signed_info {
*
* This contains the generated digest of _either_ the Content Data or
* the Authenticated Attributes [RFC2315 9.3]. If the latter, one of
- * the attributes contains the digest of the the Content Data within
- * it.
+ * the attributes contains the digest of the Content Data within it.
*
- * THis also contains the issuing cert serial number and issuer's name
+ * This also contains the issuing cert serial number and issuer's name
* [PKCS#7 or CMS ver 1] or issuing cert's SKID [CMS ver 3].
*/
struct public_key_signature *sig;
diff --git a/crypto/asymmetric_keys/pkcs7_trust.c b/crypto/asymmetric_keys/pkcs7_trust.c
index 61af3c4d82cc..b531df2013c4 100644
--- a/crypto/asymmetric_keys/pkcs7_trust.c
+++ b/crypto/asymmetric_keys/pkcs7_trust.c
@@ -16,7 +16,7 @@
#include <crypto/public_key.h>
#include "pkcs7_parser.h"
-/**
+/*
* Check the trust on one PKCS#7 SignedInfo block.
*/
static int pkcs7_validate_trust_one(struct pkcs7_message *pkcs7,
diff --git a/crypto/asymmetric_keys/pkcs7_verify.c b/crypto/asymmetric_keys/pkcs7_verify.c
index ce49820caa97..0b4d07aa8811 100644
--- a/crypto/asymmetric_keys/pkcs7_verify.c
+++ b/crypto/asymmetric_keys/pkcs7_verify.c
@@ -141,11 +141,10 @@ int pkcs7_get_digest(struct pkcs7_message *pkcs7, const u8 **buf, u32 *len,
*buf = sinfo->sig->digest;
*len = sinfo->sig->digest_size;
- for (i = 0; i < HASH_ALGO__LAST; i++)
- if (!strcmp(hash_algo_name[i], sinfo->sig->hash_algo)) {
- *hash_algo = i;
- break;
- }
+ i = match_string(hash_algo_name, HASH_ALGO__LAST,
+ sinfo->sig->hash_algo);
+ if (i >= 0)
+ *hash_algo = i;
return 0;
}
diff --git a/crypto/asymmetric_keys/restrict.c b/crypto/asymmetric_keys/restrict.c
index 77ebebada29c..84cefe3b3585 100644
--- a/crypto/asymmetric_keys/restrict.c
+++ b/crypto/asymmetric_keys/restrict.c
@@ -244,9 +244,10 @@ int restrict_link_by_key_or_keyring(struct key *dest_keyring,
* @payload: The payload of the new key.
* @trusted: A key or ring of keys that can be used to vouch for the new cert.
*
- * Check the new certificate only against the key or keys passed in the data
- * parameter. If one of those is the signing key and validates the new
- * certificate, then mark the new certificate as being ok to link.
+ * Check the new certificate against the key or keys passed in the data
+ * parameter and against the keys already linked to the destination keyring. If
+ * one of those is the signing key and validates the new certificate, then mark
+ * the new certificate as being ok to link.
*
* Returns 0 if the new certificate was accepted, -ENOKEY if we
* couldn't find a matching parent certificate in the trusted list,
diff --git a/crypto/blake2b_generic.c b/crypto/blake2b_generic.c
index a2ffe60e06d3..6704c0355889 100644
--- a/crypto/blake2b_generic.c
+++ b/crypto/blake2b_generic.c
@@ -1,55 +1,27 @@
// SPDX-License-Identifier: (GPL-2.0-only OR Apache-2.0)
/*
- * BLAKE2b reference source code package - reference C implementations
+ * Generic implementation of the BLAKE2b digest algorithm. Based on the BLAKE2b
+ * reference implementation, but it has been heavily modified for use in the
+ * kernel. The reference implementation was:
*
- * Copyright 2012, Samuel Neves <sneves@dei.uc.pt>. You may use this under the
- * terms of the CC0, the OpenSSL Licence, or the Apache Public License 2.0, at
- * your option. The terms of these licenses can be found at:
+ * Copyright 2012, Samuel Neves <sneves@dei.uc.pt>. You may use this under
+ * the terms of the CC0, the OpenSSL Licence, or the Apache Public License
+ * 2.0, at your option. The terms of these licenses can be found at:
*
- * - CC0 1.0 Universal : http://creativecommons.org/publicdomain/zero/1.0
- * - OpenSSL license : https://www.openssl.org/source/license.html
- * - Apache 2.0 : https://www.apache.org/licenses/LICENSE-2.0
+ * - CC0 1.0 Universal : http://creativecommons.org/publicdomain/zero/1.0
+ * - OpenSSL license : https://www.openssl.org/source/license.html
+ * - Apache 2.0 : https://www.apache.org/licenses/LICENSE-2.0
*
- * More information about the BLAKE2 hash function can be found at
- * https://blake2.net.
- *
- * Note: the original sources have been modified for inclusion in linux kernel
- * in terms of coding style, using generic helpers and simplifications of error
- * handling.
+ * More information about BLAKE2 can be found at https://blake2.net.
*/
#include <asm/unaligned.h>
#include <linux/module.h>
-#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/bitops.h>
+#include <crypto/internal/blake2b.h>
#include <crypto/internal/hash.h>
-#define BLAKE2B_160_DIGEST_SIZE (160 / 8)
-#define BLAKE2B_256_DIGEST_SIZE (256 / 8)
-#define BLAKE2B_384_DIGEST_SIZE (384 / 8)
-#define BLAKE2B_512_DIGEST_SIZE (512 / 8)
-
-enum blake2b_constant {
- BLAKE2B_BLOCKBYTES = 128,
- BLAKE2B_KEYBYTES = 64,
-};
-
-struct blake2b_state {
- u64 h[8];
- u64 t[2];
- u64 f[2];
- u8 buf[BLAKE2B_BLOCKBYTES];
- size_t buflen;
-};
-
-static const u64 blake2b_IV[8] = {
- 0x6a09e667f3bcc908ULL, 0xbb67ae8584caa73bULL,
- 0x3c6ef372fe94f82bULL, 0xa54ff53a5f1d36f1ULL,
- 0x510e527fade682d1ULL, 0x9b05688c2b3e6c1fULL,
- 0x1f83d9abfb41bd6bULL, 0x5be0cd19137e2179ULL
-};
-
static const u8 blake2b_sigma[12][16] = {
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
{ 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 },
@@ -95,8 +67,8 @@ static void blake2b_increment_counter(struct blake2b_state *S, const u64 inc)
G(r,7,v[ 3],v[ 4],v[ 9],v[14]); \
} while (0)
-static void blake2b_compress(struct blake2b_state *S,
- const u8 block[BLAKE2B_BLOCKBYTES])
+static void blake2b_compress_one_generic(struct blake2b_state *S,
+ const u8 block[BLAKE2B_BLOCK_SIZE])
{
u64 m[16];
u64 v[16];
@@ -108,14 +80,14 @@ static void blake2b_compress(struct blake2b_state *S,
for (i = 0; i < 8; ++i)
v[i] = S->h[i];
- v[ 8] = blake2b_IV[0];
- v[ 9] = blake2b_IV[1];
- v[10] = blake2b_IV[2];
- v[11] = blake2b_IV[3];
- v[12] = blake2b_IV[4] ^ S->t[0];
- v[13] = blake2b_IV[5] ^ S->t[1];
- v[14] = blake2b_IV[6] ^ S->f[0];
- v[15] = blake2b_IV[7] ^ S->f[1];
+ v[ 8] = BLAKE2B_IV0;
+ v[ 9] = BLAKE2B_IV1;
+ v[10] = BLAKE2B_IV2;
+ v[11] = BLAKE2B_IV3;
+ v[12] = BLAKE2B_IV4 ^ S->t[0];
+ v[13] = BLAKE2B_IV5 ^ S->t[1];
+ v[14] = BLAKE2B_IV6 ^ S->f[0];
+ v[15] = BLAKE2B_IV7 ^ S->f[1];
ROUND(0);
ROUND(1);
@@ -139,159 +111,54 @@ static void blake2b_compress(struct blake2b_state *S,
#undef G
#undef ROUND
-struct blake2b_tfm_ctx {
- u8 key[BLAKE2B_KEYBYTES];
- unsigned int keylen;
-};
-
-static int blake2b_setkey(struct crypto_shash *tfm, const u8 *key,
- unsigned int keylen)
+void blake2b_compress_generic(struct blake2b_state *state,
+ const u8 *block, size_t nblocks, u32 inc)
{
- struct blake2b_tfm_ctx *tctx = crypto_shash_ctx(tfm);
-
- if (keylen == 0 || keylen > BLAKE2B_KEYBYTES)
- return -EINVAL;
-
- memcpy(tctx->key, key, keylen);
- tctx->keylen = keylen;
-
- return 0;
+ do {
+ blake2b_increment_counter(state, inc);
+ blake2b_compress_one_generic(state, block);
+ block += BLAKE2B_BLOCK_SIZE;
+ } while (--nblocks);
}
+EXPORT_SYMBOL(blake2b_compress_generic);
-static int blake2b_init(struct shash_desc *desc)
+static int crypto_blake2b_update_generic(struct shash_desc *desc,
+ const u8 *in, unsigned int inlen)
{
- struct blake2b_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
- struct blake2b_state *state = shash_desc_ctx(desc);
- const int digestsize = crypto_shash_digestsize(desc->tfm);
-
- memset(state, 0, sizeof(*state));
- memcpy(state->h, blake2b_IV, sizeof(state->h));
-
- /* Parameter block is all zeros except index 0, no xor for 1..7 */
- state->h[0] ^= 0x01010000 | tctx->keylen << 8 | digestsize;
-
- if (tctx->keylen) {
- /*
- * Prefill the buffer with the key, next call to _update or
- * _final will process it
- */
- memcpy(state->buf, tctx->key, tctx->keylen);
- state->buflen = BLAKE2B_BLOCKBYTES;
- }
- return 0;
+ return crypto_blake2b_update(desc, in, inlen, blake2b_compress_generic);
}
-static int blake2b_update(struct shash_desc *desc, const u8 *in,
- unsigned int inlen)
+static int crypto_blake2b_final_generic(struct shash_desc *desc, u8 *out)
{
- struct blake2b_state *state = shash_desc_ctx(desc);
- const size_t left = state->buflen;
- const size_t fill = BLAKE2B_BLOCKBYTES - left;
-
- if (!inlen)
- return 0;
-
- if (inlen > fill) {
- state->buflen = 0;
- /* Fill buffer */
- memcpy(state->buf + left, in, fill);
- blake2b_increment_counter(state, BLAKE2B_BLOCKBYTES);
- /* Compress */
- blake2b_compress(state, state->buf);
- in += fill;
- inlen -= fill;
- while (inlen > BLAKE2B_BLOCKBYTES) {
- blake2b_increment_counter(state, BLAKE2B_BLOCKBYTES);
- blake2b_compress(state, in);
- in += BLAKE2B_BLOCKBYTES;
- inlen -= BLAKE2B_BLOCKBYTES;
- }
- }
- memcpy(state->buf + state->buflen, in, inlen);
- state->buflen += inlen;
-
- return 0;
+ return crypto_blake2b_final(desc, out, blake2b_compress_generic);
}
-static int blake2b_final(struct shash_desc *desc, u8 *out)
-{
- struct blake2b_state *state = shash_desc_ctx(desc);
- const int digestsize = crypto_shash_digestsize(desc->tfm);
- size_t i;
-
- blake2b_increment_counter(state, state->buflen);
- /* Set last block */
- state->f[0] = (u64)-1;
- /* Padding */
- memset(state->buf + state->buflen, 0, BLAKE2B_BLOCKBYTES - state->buflen);
- blake2b_compress(state, state->buf);
-
- /* Avoid temporary buffer and switch the internal output to LE order */
- for (i = 0; i < ARRAY_SIZE(state->h); i++)
- __cpu_to_le64s(&state->h[i]);
-
- memcpy(out, state->h, digestsize);
- return 0;
-}
+#define BLAKE2B_ALG(name, driver_name, digest_size) \
+ { \
+ .base.cra_name = name, \
+ .base.cra_driver_name = driver_name, \
+ .base.cra_priority = 100, \
+ .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, \
+ .base.cra_blocksize = BLAKE2B_BLOCK_SIZE, \
+ .base.cra_ctxsize = sizeof(struct blake2b_tfm_ctx), \
+ .base.cra_module = THIS_MODULE, \
+ .digestsize = digest_size, \
+ .setkey = crypto_blake2b_setkey, \
+ .init = crypto_blake2b_init, \
+ .update = crypto_blake2b_update_generic, \
+ .final = crypto_blake2b_final_generic, \
+ .descsize = sizeof(struct blake2b_state), \
+ }
static struct shash_alg blake2b_algs[] = {
- {
- .base.cra_name = "blake2b-160",
- .base.cra_driver_name = "blake2b-160-generic",
- .base.cra_priority = 100,
- .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
- .base.cra_blocksize = BLAKE2B_BLOCKBYTES,
- .base.cra_ctxsize = sizeof(struct blake2b_tfm_ctx),
- .base.cra_module = THIS_MODULE,
- .digestsize = BLAKE2B_160_DIGEST_SIZE,
- .setkey = blake2b_setkey,
- .init = blake2b_init,
- .update = blake2b_update,
- .final = blake2b_final,
- .descsize = sizeof(struct blake2b_state),
- }, {
- .base.cra_name = "blake2b-256",
- .base.cra_driver_name = "blake2b-256-generic",
- .base.cra_priority = 100,
- .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
- .base.cra_blocksize = BLAKE2B_BLOCKBYTES,
- .base.cra_ctxsize = sizeof(struct blake2b_tfm_ctx),
- .base.cra_module = THIS_MODULE,
- .digestsize = BLAKE2B_256_DIGEST_SIZE,
- .setkey = blake2b_setkey,
- .init = blake2b_init,
- .update = blake2b_update,
- .final = blake2b_final,
- .descsize = sizeof(struct blake2b_state),
- }, {
- .base.cra_name = "blake2b-384",
- .base.cra_driver_name = "blake2b-384-generic",
- .base.cra_priority = 100,
- .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
- .base.cra_blocksize = BLAKE2B_BLOCKBYTES,
- .base.cra_ctxsize = sizeof(struct blake2b_tfm_ctx),
- .base.cra_module = THIS_MODULE,
- .digestsize = BLAKE2B_384_DIGEST_SIZE,
- .setkey = blake2b_setkey,
- .init = blake2b_init,
- .update = blake2b_update,
- .final = blake2b_final,
- .descsize = sizeof(struct blake2b_state),
- }, {
- .base.cra_name = "blake2b-512",
- .base.cra_driver_name = "blake2b-512-generic",
- .base.cra_priority = 100,
- .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
- .base.cra_blocksize = BLAKE2B_BLOCKBYTES,
- .base.cra_ctxsize = sizeof(struct blake2b_tfm_ctx),
- .base.cra_module = THIS_MODULE,
- .digestsize = BLAKE2B_512_DIGEST_SIZE,
- .setkey = blake2b_setkey,
- .init = blake2b_init,
- .update = blake2b_update,
- .final = blake2b_final,
- .descsize = sizeof(struct blake2b_state),
- }
+ BLAKE2B_ALG("blake2b-160", "blake2b-160-generic",
+ BLAKE2B_160_HASH_SIZE),
+ BLAKE2B_ALG("blake2b-256", "blake2b-256-generic",
+ BLAKE2B_256_HASH_SIZE),
+ BLAKE2B_ALG("blake2b-384", "blake2b-384-generic",
+ BLAKE2B_384_HASH_SIZE),
+ BLAKE2B_ALG("blake2b-512", "blake2b-512-generic",
+ BLAKE2B_512_HASH_SIZE),
};
static int __init blake2b_mod_init(void)
diff --git a/crypto/blake2s_generic.c b/crypto/blake2s_generic.c
index 005783ff45ad..72fe480f9bd6 100644
--- a/crypto/blake2s_generic.c
+++ b/crypto/blake2s_generic.c
@@ -1,149 +1,55 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
+ * shash interface to the generic implementation of BLAKE2s
+ *
* Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
*/
#include <crypto/internal/blake2s.h>
-#include <crypto/internal/simd.h>
#include <crypto/internal/hash.h>
#include <linux/types.h>
-#include <linux/jump_label.h>
#include <linux/kernel.h>
#include <linux/module.h>
-static int crypto_blake2s_setkey(struct crypto_shash *tfm, const u8 *key,
- unsigned int keylen)
+static int crypto_blake2s_update_generic(struct shash_desc *desc,
+ const u8 *in, unsigned int inlen)
{
- struct blake2s_tfm_ctx *tctx = crypto_shash_ctx(tfm);
-
- if (keylen == 0 || keylen > BLAKE2S_KEY_SIZE)
- return -EINVAL;
-
- memcpy(tctx->key, key, keylen);
- tctx->keylen = keylen;
-
- return 0;
+ return crypto_blake2s_update(desc, in, inlen, blake2s_compress_generic);
}
-static int crypto_blake2s_init(struct shash_desc *desc)
+static int crypto_blake2s_final_generic(struct shash_desc *desc, u8 *out)
{
- struct blake2s_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
- struct blake2s_state *state = shash_desc_ctx(desc);
- const int outlen = crypto_shash_digestsize(desc->tfm);
-
- if (tctx->keylen)
- blake2s_init_key(state, outlen, tctx->key, tctx->keylen);
- else
- blake2s_init(state, outlen);
-
- return 0;
+ return crypto_blake2s_final(desc, out, blake2s_compress_generic);
}
-static int crypto_blake2s_update(struct shash_desc *desc, const u8 *in,
- unsigned int inlen)
-{
- struct blake2s_state *state = shash_desc_ctx(desc);
- const size_t fill = BLAKE2S_BLOCK_SIZE - state->buflen;
-
- if (unlikely(!inlen))
- return 0;
- if (inlen > fill) {
- memcpy(state->buf + state->buflen, in, fill);
- blake2s_compress_generic(state, state->buf, 1, BLAKE2S_BLOCK_SIZE);
- state->buflen = 0;
- in += fill;
- inlen -= fill;
- }
- if (inlen > BLAKE2S_BLOCK_SIZE) {
- const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2S_BLOCK_SIZE);
- /* Hash one less (full) block than strictly possible */
- blake2s_compress_generic(state, in, nblocks - 1, BLAKE2S_BLOCK_SIZE);
- in += BLAKE2S_BLOCK_SIZE * (nblocks - 1);
- inlen -= BLAKE2S_BLOCK_SIZE * (nblocks - 1);
+#define BLAKE2S_ALG(name, driver_name, digest_size) \
+ { \
+ .base.cra_name = name, \
+ .base.cra_driver_name = driver_name, \
+ .base.cra_priority = 100, \
+ .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, \
+ .base.cra_blocksize = BLAKE2S_BLOCK_SIZE, \
+ .base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx), \
+ .base.cra_module = THIS_MODULE, \
+ .digestsize = digest_size, \
+ .setkey = crypto_blake2s_setkey, \
+ .init = crypto_blake2s_init, \
+ .update = crypto_blake2s_update_generic, \
+ .final = crypto_blake2s_final_generic, \
+ .descsize = sizeof(struct blake2s_state), \
}
- memcpy(state->buf + state->buflen, in, inlen);
- state->buflen += inlen;
-
- return 0;
-}
-
-static int crypto_blake2s_final(struct shash_desc *desc, u8 *out)
-{
- struct blake2s_state *state = shash_desc_ctx(desc);
-
- blake2s_set_lastblock(state);
- memset(state->buf + state->buflen, 0,
- BLAKE2S_BLOCK_SIZE - state->buflen); /* Padding */
- blake2s_compress_generic(state, state->buf, 1, state->buflen);
- cpu_to_le32_array(state->h, ARRAY_SIZE(state->h));
- memcpy(out, state->h, state->outlen);
- memzero_explicit(state, sizeof(*state));
-
- return 0;
-}
-
-static struct shash_alg blake2s_algs[] = {{
- .base.cra_name = "blake2s-128",
- .base.cra_driver_name = "blake2s-128-generic",
- .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
- .base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx),
- .base.cra_priority = 200,
- .base.cra_blocksize = BLAKE2S_BLOCK_SIZE,
- .base.cra_module = THIS_MODULE,
-
- .digestsize = BLAKE2S_128_HASH_SIZE,
- .setkey = crypto_blake2s_setkey,
- .init = crypto_blake2s_init,
- .update = crypto_blake2s_update,
- .final = crypto_blake2s_final,
- .descsize = sizeof(struct blake2s_state),
-}, {
- .base.cra_name = "blake2s-160",
- .base.cra_driver_name = "blake2s-160-generic",
- .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
- .base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx),
- .base.cra_priority = 200,
- .base.cra_blocksize = BLAKE2S_BLOCK_SIZE,
- .base.cra_module = THIS_MODULE,
-
- .digestsize = BLAKE2S_160_HASH_SIZE,
- .setkey = crypto_blake2s_setkey,
- .init = crypto_blake2s_init,
- .update = crypto_blake2s_update,
- .final = crypto_blake2s_final,
- .descsize = sizeof(struct blake2s_state),
-}, {
- .base.cra_name = "blake2s-224",
- .base.cra_driver_name = "blake2s-224-generic",
- .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
- .base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx),
- .base.cra_priority = 200,
- .base.cra_blocksize = BLAKE2S_BLOCK_SIZE,
- .base.cra_module = THIS_MODULE,
-
- .digestsize = BLAKE2S_224_HASH_SIZE,
- .setkey = crypto_blake2s_setkey,
- .init = crypto_blake2s_init,
- .update = crypto_blake2s_update,
- .final = crypto_blake2s_final,
- .descsize = sizeof(struct blake2s_state),
-}, {
- .base.cra_name = "blake2s-256",
- .base.cra_driver_name = "blake2s-256-generic",
- .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
- .base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx),
- .base.cra_priority = 200,
- .base.cra_blocksize = BLAKE2S_BLOCK_SIZE,
- .base.cra_module = THIS_MODULE,
- .digestsize = BLAKE2S_256_HASH_SIZE,
- .setkey = crypto_blake2s_setkey,
- .init = crypto_blake2s_init,
- .update = crypto_blake2s_update,
- .final = crypto_blake2s_final,
- .descsize = sizeof(struct blake2s_state),
-}};
+static struct shash_alg blake2s_algs[] = {
+ BLAKE2S_ALG("blake2s-128", "blake2s-128-generic",
+ BLAKE2S_128_HASH_SIZE),
+ BLAKE2S_ALG("blake2s-160", "blake2s-160-generic",
+ BLAKE2S_160_HASH_SIZE),
+ BLAKE2S_ALG("blake2s-224", "blake2s-224-generic",
+ BLAKE2S_224_HASH_SIZE),
+ BLAKE2S_ALG("blake2s-256", "blake2s-256-generic",
+ BLAKE2S_256_HASH_SIZE),
+};
static int __init blake2s_mod_init(void)
{
diff --git a/crypto/blowfish_generic.c b/crypto/blowfish_generic.c
index c3c2041fe0c5..003b52c6880e 100644
--- a/crypto/blowfish_generic.c
+++ b/crypto/blowfish_generic.c
@@ -14,7 +14,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
-#include <asm/byteorder.h>
+#include <asm/unaligned.h>
#include <linux/crypto.h>
#include <linux/types.h>
#include <crypto/blowfish.h>
@@ -36,12 +36,10 @@
static void bf_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct bf_ctx *ctx = crypto_tfm_ctx(tfm);
- const __be32 *in_blk = (const __be32 *)src;
- __be32 *const out_blk = (__be32 *)dst;
const u32 *P = ctx->p;
const u32 *S = ctx->s;
- u32 yl = be32_to_cpu(in_blk[0]);
- u32 yr = be32_to_cpu(in_blk[1]);
+ u32 yl = get_unaligned_be32(src);
+ u32 yr = get_unaligned_be32(src + 4);
ROUND(yr, yl, 0);
ROUND(yl, yr, 1);
@@ -63,19 +61,17 @@ static void bf_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
yl ^= P[16];
yr ^= P[17];
- out_blk[0] = cpu_to_be32(yr);
- out_blk[1] = cpu_to_be32(yl);
+ put_unaligned_be32(yr, dst);
+ put_unaligned_be32(yl, dst + 4);
}
static void bf_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct bf_ctx *ctx = crypto_tfm_ctx(tfm);
- const __be32 *in_blk = (const __be32 *)src;
- __be32 *const out_blk = (__be32 *)dst;
const u32 *P = ctx->p;
const u32 *S = ctx->s;
- u32 yl = be32_to_cpu(in_blk[0]);
- u32 yr = be32_to_cpu(in_blk[1]);
+ u32 yl = get_unaligned_be32(src);
+ u32 yr = get_unaligned_be32(src + 4);
ROUND(yr, yl, 17);
ROUND(yl, yr, 16);
@@ -97,8 +93,8 @@ static void bf_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
yl ^= P[1];
yr ^= P[0];
- out_blk[0] = cpu_to_be32(yr);
- out_blk[1] = cpu_to_be32(yl);
+ put_unaligned_be32(yr, dst);
+ put_unaligned_be32(yl, dst + 4);
}
static struct crypto_alg alg = {
@@ -108,7 +104,6 @@ static struct crypto_alg alg = {
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = BF_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct bf_ctx),
- .cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_u = { .cipher = {
.cia_min_keysize = BF_MIN_KEY_SIZE,
diff --git a/crypto/camellia_generic.c b/crypto/camellia_generic.c
index 0b9f409f7370..fd1a88af9e77 100644
--- a/crypto/camellia_generic.c
+++ b/crypto/camellia_generic.c
@@ -9,14 +9,6 @@
* https://info.isl.ntt.co.jp/crypt/eng/camellia/specifications.html
*/
-/*
- *
- * NOTE --- NOTE --- NOTE --- NOTE
- * This implementation assumes that all memory addresses passed
- * as parameters are four-byte aligned.
- *
- */
-
#include <linux/crypto.h>
#include <linux/errno.h>
#include <linux/init.h>
@@ -994,16 +986,14 @@ camellia_set_key(struct crypto_tfm *tfm, const u8 *in_key,
static void camellia_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
const struct camellia_ctx *cctx = crypto_tfm_ctx(tfm);
- const __be32 *src = (const __be32 *)in;
- __be32 *dst = (__be32 *)out;
unsigned int max;
u32 tmp[4];
- tmp[0] = be32_to_cpu(src[0]);
- tmp[1] = be32_to_cpu(src[1]);
- tmp[2] = be32_to_cpu(src[2]);
- tmp[3] = be32_to_cpu(src[3]);
+ tmp[0] = get_unaligned_be32(in);
+ tmp[1] = get_unaligned_be32(in + 4);
+ tmp[2] = get_unaligned_be32(in + 8);
+ tmp[3] = get_unaligned_be32(in + 12);
if (cctx->key_length == 16)
max = 24;
@@ -1013,25 +1003,23 @@ static void camellia_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
camellia_do_encrypt(cctx->key_table, tmp, max);
/* do_encrypt returns 0,1 swapped with 2,3 */
- dst[0] = cpu_to_be32(tmp[2]);
- dst[1] = cpu_to_be32(tmp[3]);
- dst[2] = cpu_to_be32(tmp[0]);
- dst[3] = cpu_to_be32(tmp[1]);
+ put_unaligned_be32(tmp[2], out);
+ put_unaligned_be32(tmp[3], out + 4);
+ put_unaligned_be32(tmp[0], out + 8);
+ put_unaligned_be32(tmp[1], out + 12);
}
static void camellia_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
const struct camellia_ctx *cctx = crypto_tfm_ctx(tfm);
- const __be32 *src = (const __be32 *)in;
- __be32 *dst = (__be32 *)out;
unsigned int max;
u32 tmp[4];
- tmp[0] = be32_to_cpu(src[0]);
- tmp[1] = be32_to_cpu(src[1]);
- tmp[2] = be32_to_cpu(src[2]);
- tmp[3] = be32_to_cpu(src[3]);
+ tmp[0] = get_unaligned_be32(in);
+ tmp[1] = get_unaligned_be32(in + 4);
+ tmp[2] = get_unaligned_be32(in + 8);
+ tmp[3] = get_unaligned_be32(in + 12);
if (cctx->key_length == 16)
max = 24;
@@ -1041,10 +1029,10 @@ static void camellia_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
camellia_do_decrypt(cctx->key_table, tmp, max);
/* do_decrypt returns 0,1 swapped with 2,3 */
- dst[0] = cpu_to_be32(tmp[2]);
- dst[1] = cpu_to_be32(tmp[3]);
- dst[2] = cpu_to_be32(tmp[0]);
- dst[3] = cpu_to_be32(tmp[1]);
+ put_unaligned_be32(tmp[2], out);
+ put_unaligned_be32(tmp[3], out + 4);
+ put_unaligned_be32(tmp[0], out + 8);
+ put_unaligned_be32(tmp[1], out + 12);
}
static struct crypto_alg camellia_alg = {
@@ -1054,7 +1042,6 @@ static struct crypto_alg camellia_alg = {
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = CAMELLIA_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct camellia_ctx),
- .cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_u = {
.cipher = {
diff --git a/crypto/cast5_generic.c b/crypto/cast5_generic.c
index 4095085d4e51..0257c14cefc2 100644
--- a/crypto/cast5_generic.c
+++ b/crypto/cast5_generic.c
@@ -13,7 +13,7 @@
*/
-#include <asm/byteorder.h>
+#include <asm/unaligned.h>
#include <linux/init.h>
#include <linux/crypto.h>
#include <linux/module.h>
@@ -302,8 +302,6 @@ static const u32 sb8[256] = {
void __cast5_encrypt(struct cast5_ctx *c, u8 *outbuf, const u8 *inbuf)
{
- const __be32 *src = (const __be32 *)inbuf;
- __be32 *dst = (__be32 *)outbuf;
u32 l, r, t;
u32 I; /* used by the Fx macros */
u32 *Km;
@@ -315,8 +313,8 @@ void __cast5_encrypt(struct cast5_ctx *c, u8 *outbuf, const u8 *inbuf)
/* (L0,R0) <-- (m1...m64). (Split the plaintext into left and
* right 32-bit halves L0 = m1...m32 and R0 = m33...m64.)
*/
- l = be32_to_cpu(src[0]);
- r = be32_to_cpu(src[1]);
+ l = get_unaligned_be32(inbuf);
+ r = get_unaligned_be32(inbuf + 4);
/* (16 rounds) for i from 1 to 16, compute Li and Ri as follows:
* Li = Ri-1;
@@ -347,8 +345,8 @@ void __cast5_encrypt(struct cast5_ctx *c, u8 *outbuf, const u8 *inbuf)
/* c1...c64 <-- (R16,L16). (Exchange final blocks L16, R16 and
* concatenate to form the ciphertext.) */
- dst[0] = cpu_to_be32(r);
- dst[1] = cpu_to_be32(l);
+ put_unaligned_be32(r, outbuf);
+ put_unaligned_be32(l, outbuf + 4);
}
EXPORT_SYMBOL_GPL(__cast5_encrypt);
@@ -359,8 +357,6 @@ static void cast5_encrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf)
void __cast5_decrypt(struct cast5_ctx *c, u8 *outbuf, const u8 *inbuf)
{
- const __be32 *src = (const __be32 *)inbuf;
- __be32 *dst = (__be32 *)outbuf;
u32 l, r, t;
u32 I;
u32 *Km;
@@ -369,8 +365,8 @@ void __cast5_decrypt(struct cast5_ctx *c, u8 *outbuf, const u8 *inbuf)
Km = c->Km;
Kr = c->Kr;
- l = be32_to_cpu(src[0]);
- r = be32_to_cpu(src[1]);
+ l = get_unaligned_be32(inbuf);
+ r = get_unaligned_be32(inbuf + 4);
if (!(c->rr)) {
t = l; l = r; r = t ^ F1(r, Km[15], Kr[15]);
@@ -391,8 +387,8 @@ void __cast5_decrypt(struct cast5_ctx *c, u8 *outbuf, const u8 *inbuf)
t = l; l = r; r = t ^ F2(r, Km[1], Kr[1]);
t = l; l = r; r = t ^ F1(r, Km[0], Kr[0]);
- dst[0] = cpu_to_be32(r);
- dst[1] = cpu_to_be32(l);
+ put_unaligned_be32(r, outbuf);
+ put_unaligned_be32(l, outbuf + 4);
}
EXPORT_SYMBOL_GPL(__cast5_decrypt);
@@ -513,7 +509,6 @@ static struct crypto_alg alg = {
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = CAST5_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct cast5_ctx),
- .cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_u = {
.cipher = {
diff --git a/crypto/cast6_generic.c b/crypto/cast6_generic.c
index c77ff6c8a2b2..75346380aa0b 100644
--- a/crypto/cast6_generic.c
+++ b/crypto/cast6_generic.c
@@ -10,7 +10,7 @@
*/
-#include <asm/byteorder.h>
+#include <asm/unaligned.h>
#include <linux/init.h>
#include <linux/crypto.h>
#include <linux/module.h>
@@ -172,16 +172,14 @@ static inline void QBAR(u32 *block, const u8 *Kr, const u32 *Km)
void __cast6_encrypt(const void *ctx, u8 *outbuf, const u8 *inbuf)
{
const struct cast6_ctx *c = ctx;
- const __be32 *src = (const __be32 *)inbuf;
- __be32 *dst = (__be32 *)outbuf;
u32 block[4];
const u32 *Km;
const u8 *Kr;
- block[0] = be32_to_cpu(src[0]);
- block[1] = be32_to_cpu(src[1]);
- block[2] = be32_to_cpu(src[2]);
- block[3] = be32_to_cpu(src[3]);
+ block[0] = get_unaligned_be32(inbuf);
+ block[1] = get_unaligned_be32(inbuf + 4);
+ block[2] = get_unaligned_be32(inbuf + 8);
+ block[3] = get_unaligned_be32(inbuf + 12);
Km = c->Km[0]; Kr = c->Kr[0]; Q(block, Kr, Km);
Km = c->Km[1]; Kr = c->Kr[1]; Q(block, Kr, Km);
@@ -196,10 +194,10 @@ void __cast6_encrypt(const void *ctx, u8 *outbuf, const u8 *inbuf)
Km = c->Km[10]; Kr = c->Kr[10]; QBAR(block, Kr, Km);
Km = c->Km[11]; Kr = c->Kr[11]; QBAR(block, Kr, Km);
- dst[0] = cpu_to_be32(block[0]);
- dst[1] = cpu_to_be32(block[1]);
- dst[2] = cpu_to_be32(block[2]);
- dst[3] = cpu_to_be32(block[3]);
+ put_unaligned_be32(block[0], outbuf);
+ put_unaligned_be32(block[1], outbuf + 4);
+ put_unaligned_be32(block[2], outbuf + 8);
+ put_unaligned_be32(block[3], outbuf + 12);
}
EXPORT_SYMBOL_GPL(__cast6_encrypt);
@@ -211,16 +209,14 @@ static void cast6_encrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf)
void __cast6_decrypt(const void *ctx, u8 *outbuf, const u8 *inbuf)
{
const struct cast6_ctx *c = ctx;
- const __be32 *src = (const __be32 *)inbuf;
- __be32 *dst = (__be32 *)outbuf;
u32 block[4];
const u32 *Km;
const u8 *Kr;
- block[0] = be32_to_cpu(src[0]);
- block[1] = be32_to_cpu(src[1]);
- block[2] = be32_to_cpu(src[2]);
- block[3] = be32_to_cpu(src[3]);
+ block[0] = get_unaligned_be32(inbuf);
+ block[1] = get_unaligned_be32(inbuf + 4);
+ block[2] = get_unaligned_be32(inbuf + 8);
+ block[3] = get_unaligned_be32(inbuf + 12);
Km = c->Km[11]; Kr = c->Kr[11]; Q(block, Kr, Km);
Km = c->Km[10]; Kr = c->Kr[10]; Q(block, Kr, Km);
@@ -235,10 +231,10 @@ void __cast6_decrypt(const void *ctx, u8 *outbuf, const u8 *inbuf)
Km = c->Km[1]; Kr = c->Kr[1]; QBAR(block, Kr, Km);
Km = c->Km[0]; Kr = c->Kr[0]; QBAR(block, Kr, Km);
- dst[0] = cpu_to_be32(block[0]);
- dst[1] = cpu_to_be32(block[1]);
- dst[2] = cpu_to_be32(block[2]);
- dst[3] = cpu_to_be32(block[3]);
+ put_unaligned_be32(block[0], outbuf);
+ put_unaligned_be32(block[1], outbuf + 4);
+ put_unaligned_be32(block[2], outbuf + 8);
+ put_unaligned_be32(block[3], outbuf + 12);
}
EXPORT_SYMBOL_GPL(__cast6_decrypt);
@@ -254,7 +250,6 @@ static struct crypto_alg alg = {
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = CAST6_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct cast6_ctx),
- .cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_u = {
.cipher = {
diff --git a/crypto/cbc.c b/crypto/cbc.c
index 0d9509dff891..6c03e96b945f 100644
--- a/crypto/cbc.c
+++ b/crypto/cbc.c
@@ -6,6 +6,7 @@
*/
#include <crypto/algapi.h>
+#include <crypto/internal/cipher.h>
#include <crypto/internal/skcipher.h>
#include <linux/err.h>
#include <linux/init.h>
diff --git a/crypto/ccm.c b/crypto/ccm.c
index 494d70901186..6b815ece51c6 100644
--- a/crypto/ccm.c
+++ b/crypto/ccm.c
@@ -6,6 +6,7 @@
*/
#include <crypto/internal/aead.h>
+#include <crypto/internal/cipher.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
@@ -954,3 +955,4 @@ MODULE_ALIAS_CRYPTO("ccm_base");
MODULE_ALIAS_CRYPTO("rfc4309");
MODULE_ALIAS_CRYPTO("ccm");
MODULE_ALIAS_CRYPTO("cbcmac");
+MODULE_IMPORT_NS(CRYPTO_INTERNAL);
diff --git a/crypto/cfb.c b/crypto/cfb.c
index 4e5219bbcd19..0d664dfb47bc 100644
--- a/crypto/cfb.c
+++ b/crypto/cfb.c
@@ -20,6 +20,7 @@
*/
#include <crypto/algapi.h>
+#include <crypto/internal/cipher.h>
#include <crypto/internal/skcipher.h>
#include <linux/err.h>
#include <linux/init.h>
@@ -250,3 +251,4 @@ module_exit(crypto_cfb_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("CFB block cipher mode of operation");
MODULE_ALIAS_CRYPTO("cfb");
+MODULE_IMPORT_NS(CRYPTO_INTERNAL);
diff --git a/crypto/cipher.c b/crypto/cipher.c
index fd78150deb1c..b47141ed4a9f 100644
--- a/crypto/cipher.c
+++ b/crypto/cipher.c
@@ -9,6 +9,7 @@
*/
#include <crypto/algapi.h>
+#include <crypto/internal/cipher.h>
#include <linux/kernel.h>
#include <linux/crypto.h>
#include <linux/errno.h>
@@ -53,7 +54,7 @@ int crypto_cipher_setkey(struct crypto_cipher *tfm,
return cia->cia_setkey(crypto_cipher_tfm(tfm), key, keylen);
}
-EXPORT_SYMBOL_GPL(crypto_cipher_setkey);
+EXPORT_SYMBOL_NS_GPL(crypto_cipher_setkey, CRYPTO_INTERNAL);
static inline void cipher_crypt_one(struct crypto_cipher *tfm,
u8 *dst, const u8 *src, bool enc)
@@ -81,11 +82,11 @@ void crypto_cipher_encrypt_one(struct crypto_cipher *tfm,
{
cipher_crypt_one(tfm, dst, src, true);
}
-EXPORT_SYMBOL_GPL(crypto_cipher_encrypt_one);
+EXPORT_SYMBOL_NS_GPL(crypto_cipher_encrypt_one, CRYPTO_INTERNAL);
void crypto_cipher_decrypt_one(struct crypto_cipher *tfm,
u8 *dst, const u8 *src)
{
cipher_crypt_one(tfm, dst, src, false);
}
-EXPORT_SYMBOL_GPL(crypto_cipher_decrypt_one);
+EXPORT_SYMBOL_NS_GPL(crypto_cipher_decrypt_one, CRYPTO_INTERNAL);
diff --git a/crypto/cmac.c b/crypto/cmac.c
index df36be1efb81..f4a5d3bfb376 100644
--- a/crypto/cmac.c
+++ b/crypto/cmac.c
@@ -11,6 +11,7 @@
* Author: Kazunori Miyazawa <miyazawa@linux-ipv6.org>
*/
+#include <crypto/internal/cipher.h>
#include <crypto/internal/hash.h>
#include <linux/err.h>
#include <linux/kernel.h>
@@ -313,3 +314,4 @@ module_exit(crypto_cmac_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("CMAC keyed hash algorithm");
MODULE_ALIAS_CRYPTO("cmac");
+MODULE_IMPORT_NS(CRYPTO_INTERNAL);
diff --git a/crypto/ctr.c b/crypto/ctr.c
index c39fcffba27f..23c698b22013 100644
--- a/crypto/ctr.c
+++ b/crypto/ctr.c
@@ -7,6 +7,7 @@
#include <crypto/algapi.h>
#include <crypto/ctr.h>
+#include <crypto/internal/cipher.h>
#include <crypto/internal/skcipher.h>
#include <linux/err.h>
#include <linux/init.h>
@@ -358,3 +359,4 @@ MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("CTR block cipher mode of operation");
MODULE_ALIAS_CRYPTO("rfc3686");
MODULE_ALIAS_CRYPTO("ctr");
+MODULE_IMPORT_NS(CRYPTO_INTERNAL);
diff --git a/crypto/drbg.c b/crypto/drbg.c
index 3132967a1749..1b4587e0ddad 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -98,6 +98,7 @@
*/
#include <crypto/drbg.h>
+#include <crypto/internal/cipher.h>
#include <linux/kernel.h>
/***************************************************************
@@ -2161,3 +2162,4 @@ MODULE_DESCRIPTION("NIST SP800-90A Deterministic Random Bit Generator (DRBG) "
CRYPTO_DRBG_HMAC_STRING
CRYPTO_DRBG_CTR_STRING);
MODULE_ALIAS_CRYPTO("stdrng");
+MODULE_IMPORT_NS(CRYPTO_INTERNAL);
diff --git a/crypto/ecb.c b/crypto/ecb.c
index 69a687cbdf21..71fbb0543d64 100644
--- a/crypto/ecb.c
+++ b/crypto/ecb.c
@@ -6,6 +6,7 @@
*/
#include <crypto/algapi.h>
+#include <crypto/internal/cipher.h>
#include <crypto/internal/skcipher.h>
#include <linux/err.h>
#include <linux/init.h>
diff --git a/crypto/ecdh_helper.c b/crypto/ecdh_helper.c
index 66fcb2ea8154..fca63b559f65 100644
--- a/crypto/ecdh_helper.c
+++ b/crypto/ecdh_helper.c
@@ -67,6 +67,9 @@ int crypto_ecdh_decode_key(const char *buf, unsigned int len,
if (secret.type != CRYPTO_KPP_SECRET_TYPE_ECDH)
return -EINVAL;
+ if (unlikely(len < secret.len))
+ return -EINVAL;
+
ptr = ecdh_unpack_data(&params->curve_id, ptr, sizeof(params->curve_id));
ptr = ecdh_unpack_data(&params->key_size, ptr, sizeof(params->key_size));
if (secret.len != crypto_ecdh_key_len(params))
diff --git a/crypto/essiv.c b/crypto/essiv.c
index d012be23d496..8bcc5bdcb2a9 100644
--- a/crypto/essiv.c
+++ b/crypto/essiv.c
@@ -30,6 +30,7 @@
#include <crypto/authenc.h>
#include <crypto/internal/aead.h>
+#include <crypto/internal/cipher.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
@@ -643,3 +644,4 @@ module_exit(essiv_module_exit);
MODULE_DESCRIPTION("ESSIV skcipher/aead wrapper for block encryption");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS_CRYPTO("essiv");
+MODULE_IMPORT_NS(CRYPTO_INTERNAL);
diff --git a/crypto/fcrypt.c b/crypto/fcrypt.c
index 58f935315cf8..c36ea0c8be98 100644
--- a/crypto/fcrypt.c
+++ b/crypto/fcrypt.c
@@ -396,7 +396,6 @@ static struct crypto_alg fcrypt_alg = {
.cra_blocksize = 8,
.cra_ctxsize = sizeof(struct fcrypt_ctx),
.cra_module = THIS_MODULE,
- .cra_alignmask = 3,
.cra_u = { .cipher = {
.cia_min_keysize = 8,
.cia_max_keysize = 8,
diff --git a/crypto/keywrap.c b/crypto/keywrap.c
index 0355cce21b1e..3517773bc7f7 100644
--- a/crypto/keywrap.c
+++ b/crypto/keywrap.c
@@ -85,6 +85,7 @@
#include <linux/crypto.h>
#include <linux/scatterlist.h>
#include <crypto/scatterwalk.h>
+#include <crypto/internal/cipher.h>
#include <crypto/internal/skcipher.h>
struct crypto_kw_block {
@@ -316,3 +317,4 @@ MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>");
MODULE_DESCRIPTION("Key Wrapping (RFC3394 / NIST SP800-38F)");
MODULE_ALIAS_CRYPTO("kw");
+MODULE_IMPORT_NS(CRYPTO_INTERNAL);
diff --git a/crypto/michael_mic.c b/crypto/michael_mic.c
index 63350c4ad461..f4c31049601c 100644
--- a/crypto/michael_mic.c
+++ b/crypto/michael_mic.c
@@ -7,7 +7,7 @@
* Copyright (c) 2004 Jouni Malinen <j@w1.fi>
*/
#include <crypto/internal/hash.h>
-#include <asm/byteorder.h>
+#include <asm/unaligned.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/string.h>
@@ -19,7 +19,7 @@ struct michael_mic_ctx {
};
struct michael_mic_desc_ctx {
- u8 pending[4];
+ __le32 pending;
size_t pending_len;
u32 l, r;
@@ -60,13 +60,12 @@ static int michael_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
struct michael_mic_desc_ctx *mctx = shash_desc_ctx(desc);
- const __le32 *src;
if (mctx->pending_len) {
int flen = 4 - mctx->pending_len;
if (flen > len)
flen = len;
- memcpy(&mctx->pending[mctx->pending_len], data, flen);
+ memcpy((u8 *)&mctx->pending + mctx->pending_len, data, flen);
mctx->pending_len += flen;
data += flen;
len -= flen;
@@ -74,23 +73,21 @@ static int michael_update(struct shash_desc *desc, const u8 *data,
if (mctx->pending_len < 4)
return 0;
- src = (const __le32 *)mctx->pending;
- mctx->l ^= le32_to_cpup(src);
+ mctx->l ^= le32_to_cpu(mctx->pending);
michael_block(mctx->l, mctx->r);
mctx->pending_len = 0;
}
- src = (const __le32 *)data;
-
while (len >= 4) {
- mctx->l ^= le32_to_cpup(src++);
+ mctx->l ^= get_unaligned_le32(data);
michael_block(mctx->l, mctx->r);
+ data += 4;
len -= 4;
}
if (len > 0) {
mctx->pending_len = len;
- memcpy(mctx->pending, src, len);
+ memcpy(&mctx->pending, data, len);
}
return 0;
@@ -100,8 +97,7 @@ static int michael_update(struct shash_desc *desc, const u8 *data,
static int michael_final(struct shash_desc *desc, u8 *out)
{
struct michael_mic_desc_ctx *mctx = shash_desc_ctx(desc);
- u8 *data = mctx->pending;
- __le32 *dst = (__le32 *)out;
+ u8 *data = (u8 *)&mctx->pending;
/* Last block and padding (0x5a, 4..7 x 0) */
switch (mctx->pending_len) {
@@ -123,8 +119,8 @@ static int michael_final(struct shash_desc *desc, u8 *out)
/* l ^= 0; */
michael_block(mctx->l, mctx->r);
- dst[0] = cpu_to_le32(mctx->l);
- dst[1] = cpu_to_le32(mctx->r);
+ put_unaligned_le32(mctx->l, out);
+ put_unaligned_le32(mctx->r, out + 4);
return 0;
}
@@ -135,13 +131,11 @@ static int michael_setkey(struct crypto_shash *tfm, const u8 *key,
{
struct michael_mic_ctx *mctx = crypto_shash_ctx(tfm);
- const __le32 *data = (const __le32 *)key;
-
if (keylen != 8)
return -EINVAL;
- mctx->l = le32_to_cpu(data[0]);
- mctx->r = le32_to_cpu(data[1]);
+ mctx->l = get_unaligned_le32(key);
+ mctx->r = get_unaligned_le32(key + 4);
return 0;
}
@@ -156,7 +150,6 @@ static struct shash_alg alg = {
.cra_name = "michael_mic",
.cra_driver_name = "michael_mic-generic",
.cra_blocksize = 8,
- .cra_alignmask = 3,
.cra_ctxsize = sizeof(struct michael_mic_ctx),
.cra_module = THIS_MODULE,
}
diff --git a/crypto/ofb.c b/crypto/ofb.c
index 2ec68e3f2c55..b630fdecceee 100644
--- a/crypto/ofb.c
+++ b/crypto/ofb.c
@@ -8,6 +8,7 @@
*/
#include <crypto/algapi.h>
+#include <crypto/internal/cipher.h>
#include <crypto/internal/skcipher.h>
#include <linux/err.h>
#include <linux/init.h>
@@ -102,3 +103,4 @@ module_exit(crypto_ofb_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("OFB block cipher mode of operation");
MODULE_ALIAS_CRYPTO("ofb");
+MODULE_IMPORT_NS(CRYPTO_INTERNAL);
diff --git a/crypto/pcbc.c b/crypto/pcbc.c
index ae921fb74dc9..7030f59e46b6 100644
--- a/crypto/pcbc.c
+++ b/crypto/pcbc.c
@@ -10,6 +10,7 @@
*/
#include <crypto/algapi.h>
+#include <crypto/internal/cipher.h>
#include <crypto/internal/skcipher.h>
#include <linux/err.h>
#include <linux/init.h>
@@ -191,3 +192,4 @@ module_exit(crypto_pcbc_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("PCBC block cipher mode of operation");
MODULE_ALIAS_CRYPTO("pcbc");
+MODULE_IMPORT_NS(CRYPTO_INTERNAL);
diff --git a/crypto/ripemd.h b/crypto/ripemd.h
index 93edbf52197d..b977785e2a62 100644
--- a/crypto/ripemd.h
+++ b/crypto/ripemd.h
@@ -6,29 +6,15 @@
#ifndef _CRYPTO_RMD_H
#define _CRYPTO_RMD_H
-#define RMD128_DIGEST_SIZE 16
-#define RMD128_BLOCK_SIZE 64
-
#define RMD160_DIGEST_SIZE 20
#define RMD160_BLOCK_SIZE 64
-#define RMD256_DIGEST_SIZE 32
-#define RMD256_BLOCK_SIZE 64
-
-#define RMD320_DIGEST_SIZE 40
-#define RMD320_BLOCK_SIZE 64
-
/* initial values */
#define RMD_H0 0x67452301UL
#define RMD_H1 0xefcdab89UL
#define RMD_H2 0x98badcfeUL
#define RMD_H3 0x10325476UL
#define RMD_H4 0xc3d2e1f0UL
-#define RMD_H5 0x76543210UL
-#define RMD_H6 0xfedcba98UL
-#define RMD_H7 0x89abcdefUL
-#define RMD_H8 0x01234567UL
-#define RMD_H9 0x3c2d1e0fUL
/* constants */
#define RMD_K1 0x00000000UL
diff --git a/crypto/rmd128.c b/crypto/rmd128.c
deleted file mode 100644
index 29308fb97e7e..000000000000
--- a/crypto/rmd128.c
+++ /dev/null
@@ -1,323 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Cryptographic API.
- *
- * RIPEMD-128 - RACE Integrity Primitives Evaluation Message Digest.
- *
- * Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC
- *
- * Copyright (c) 2008 Adrian-Ken Rueegsegger <ken@codelabs.ch>
- */
-#include <crypto/internal/hash.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/types.h>
-#include <asm/byteorder.h>
-
-#include "ripemd.h"
-
-struct rmd128_ctx {
- u64 byte_count;
- u32 state[4];
- __le32 buffer[16];
-};
-
-#define K1 RMD_K1
-#define K2 RMD_K2
-#define K3 RMD_K3
-#define K4 RMD_K4
-#define KK1 RMD_K6
-#define KK2 RMD_K7
-#define KK3 RMD_K8
-#define KK4 RMD_K1
-
-#define F1(x, y, z) (x ^ y ^ z) /* XOR */
-#define F2(x, y, z) (z ^ (x & (y ^ z))) /* x ? y : z */
-#define F3(x, y, z) ((x | ~y) ^ z)
-#define F4(x, y, z) (y ^ (z & (x ^ y))) /* z ? x : y */
-
-#define ROUND(a, b, c, d, f, k, x, s) { \
- (a) += f((b), (c), (d)) + le32_to_cpup(&(x)) + (k); \
- (a) = rol32((a), (s)); \
-}
-
-static void rmd128_transform(u32 *state, const __le32 *in)
-{
- u32 aa, bb, cc, dd, aaa, bbb, ccc, ddd;
-
- /* Initialize left lane */
- aa = state[0];
- bb = state[1];
- cc = state[2];
- dd = state[3];
-
- /* Initialize right lane */
- aaa = state[0];
- bbb = state[1];
- ccc = state[2];
- ddd = state[3];
-
- /* round 1: left lane */
- ROUND(aa, bb, cc, dd, F1, K1, in[0], 11);
- ROUND(dd, aa, bb, cc, F1, K1, in[1], 14);
- ROUND(cc, dd, aa, bb, F1, K1, in[2], 15);
- ROUND(bb, cc, dd, aa, F1, K1, in[3], 12);
- ROUND(aa, bb, cc, dd, F1, K1, in[4], 5);
- ROUND(dd, aa, bb, cc, F1, K1, in[5], 8);
- ROUND(cc, dd, aa, bb, F1, K1, in[6], 7);
- ROUND(bb, cc, dd, aa, F1, K1, in[7], 9);
- ROUND(aa, bb, cc, dd, F1, K1, in[8], 11);
- ROUND(dd, aa, bb, cc, F1, K1, in[9], 13);
- ROUND(cc, dd, aa, bb, F1, K1, in[10], 14);
- ROUND(bb, cc, dd, aa, F1, K1, in[11], 15);
- ROUND(aa, bb, cc, dd, F1, K1, in[12], 6);
- ROUND(dd, aa, bb, cc, F1, K1, in[13], 7);
- ROUND(cc, dd, aa, bb, F1, K1, in[14], 9);
- ROUND(bb, cc, dd, aa, F1, K1, in[15], 8);
-
- /* round 2: left lane */
- ROUND(aa, bb, cc, dd, F2, K2, in[7], 7);
- ROUND(dd, aa, bb, cc, F2, K2, in[4], 6);
- ROUND(cc, dd, aa, bb, F2, K2, in[13], 8);
- ROUND(bb, cc, dd, aa, F2, K2, in[1], 13);
- ROUND(aa, bb, cc, dd, F2, K2, in[10], 11);
- ROUND(dd, aa, bb, cc, F2, K2, in[6], 9);
- ROUND(cc, dd, aa, bb, F2, K2, in[15], 7);
- ROUND(bb, cc, dd, aa, F2, K2, in[3], 15);
- ROUND(aa, bb, cc, dd, F2, K2, in[12], 7);
- ROUND(dd, aa, bb, cc, F2, K2, in[0], 12);
- ROUND(cc, dd, aa, bb, F2, K2, in[9], 15);
- ROUND(bb, cc, dd, aa, F2, K2, in[5], 9);
- ROUND(aa, bb, cc, dd, F2, K2, in[2], 11);
- ROUND(dd, aa, bb, cc, F2, K2, in[14], 7);
- ROUND(cc, dd, aa, bb, F2, K2, in[11], 13);
- ROUND(bb, cc, dd, aa, F2, K2, in[8], 12);
-
- /* round 3: left lane */
- ROUND(aa, bb, cc, dd, F3, K3, in[3], 11);
- ROUND(dd, aa, bb, cc, F3, K3, in[10], 13);
- ROUND(cc, dd, aa, bb, F3, K3, in[14], 6);
- ROUND(bb, cc, dd, aa, F3, K3, in[4], 7);
- ROUND(aa, bb, cc, dd, F3, K3, in[9], 14);
- ROUND(dd, aa, bb, cc, F3, K3, in[15], 9);
- ROUND(cc, dd, aa, bb, F3, K3, in[8], 13);
- ROUND(bb, cc, dd, aa, F3, K3, in[1], 15);
- ROUND(aa, bb, cc, dd, F3, K3, in[2], 14);
- ROUND(dd, aa, bb, cc, F3, K3, in[7], 8);
- ROUND(cc, dd, aa, bb, F3, K3, in[0], 13);
- ROUND(bb, cc, dd, aa, F3, K3, in[6], 6);
- ROUND(aa, bb, cc, dd, F3, K3, in[13], 5);
- ROUND(dd, aa, bb, cc, F3, K3, in[11], 12);
- ROUND(cc, dd, aa, bb, F3, K3, in[5], 7);
- ROUND(bb, cc, dd, aa, F3, K3, in[12], 5);
-
- /* round 4: left lane */
- ROUND(aa, bb, cc, dd, F4, K4, in[1], 11);
- ROUND(dd, aa, bb, cc, F4, K4, in[9], 12);
- ROUND(cc, dd, aa, bb, F4, K4, in[11], 14);
- ROUND(bb, cc, dd, aa, F4, K4, in[10], 15);
- ROUND(aa, bb, cc, dd, F4, K4, in[0], 14);
- ROUND(dd, aa, bb, cc, F4, K4, in[8], 15);
- ROUND(cc, dd, aa, bb, F4, K4, in[12], 9);
- ROUND(bb, cc, dd, aa, F4, K4, in[4], 8);
- ROUND(aa, bb, cc, dd, F4, K4, in[13], 9);
- ROUND(dd, aa, bb, cc, F4, K4, in[3], 14);
- ROUND(cc, dd, aa, bb, F4, K4, in[7], 5);
- ROUND(bb, cc, dd, aa, F4, K4, in[15], 6);
- ROUND(aa, bb, cc, dd, F4, K4, in[14], 8);
- ROUND(dd, aa, bb, cc, F4, K4, in[5], 6);
- ROUND(cc, dd, aa, bb, F4, K4, in[6], 5);
- ROUND(bb, cc, dd, aa, F4, K4, in[2], 12);
-
- /* round 1: right lane */
- ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[5], 8);
- ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[14], 9);
- ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[7], 9);
- ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[0], 11);
- ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[9], 13);
- ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[2], 15);
- ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[11], 15);
- ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[4], 5);
- ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[13], 7);
- ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[6], 7);
- ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[15], 8);
- ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[8], 11);
- ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[1], 14);
- ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[10], 14);
- ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[3], 12);
- ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[12], 6);
-
- /* round 2: right lane */
- ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[6], 9);
- ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[11], 13);
- ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[3], 15);
- ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[7], 7);
- ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[0], 12);
- ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[13], 8);
- ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[5], 9);
- ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[10], 11);
- ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[14], 7);
- ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[15], 7);
- ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[8], 12);
- ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[12], 7);
- ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[4], 6);
- ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[9], 15);
- ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[1], 13);
- ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[2], 11);
-
- /* round 3: right lane */
- ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[15], 9);
- ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[5], 7);
- ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[1], 15);
- ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[3], 11);
- ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[7], 8);
- ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[14], 6);
- ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[6], 6);
- ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[9], 14);
- ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[11], 12);
- ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[8], 13);
- ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[12], 5);
- ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[2], 14);
- ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[10], 13);
- ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[0], 13);
- ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[4], 7);
- ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[13], 5);
-
- /* round 4: right lane */
- ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[8], 15);
- ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[6], 5);
- ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[4], 8);
- ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[1], 11);
- ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[3], 14);
- ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[11], 14);
- ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[15], 6);
- ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[0], 14);
- ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[5], 6);
- ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[12], 9);
- ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[2], 12);
- ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[13], 9);
- ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[9], 12);
- ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[7], 5);
- ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[10], 15);
- ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[14], 8);
-
- /* combine results */
- ddd += cc + state[1]; /* final result for state[0] */
- state[1] = state[2] + dd + aaa;
- state[2] = state[3] + aa + bbb;
- state[3] = state[0] + bb + ccc;
- state[0] = ddd;
-}
-
-static int rmd128_init(struct shash_desc *desc)
-{
- struct rmd128_ctx *rctx = shash_desc_ctx(desc);
-
- rctx->byte_count = 0;
-
- rctx->state[0] = RMD_H0;
- rctx->state[1] = RMD_H1;
- rctx->state[2] = RMD_H2;
- rctx->state[3] = RMD_H3;
-
- memset(rctx->buffer, 0, sizeof(rctx->buffer));
-
- return 0;
-}
-
-static int rmd128_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
-{
- struct rmd128_ctx *rctx = shash_desc_ctx(desc);
- const u32 avail = sizeof(rctx->buffer) - (rctx->byte_count & 0x3f);
-
- rctx->byte_count += len;
-
- /* Enough space in buffer? If so copy and we're done */
- if (avail > len) {
- memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail),
- data, len);
- goto out;
- }
-
- memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail),
- data, avail);
-
- rmd128_transform(rctx->state, rctx->buffer);
- data += avail;
- len -= avail;
-
- while (len >= sizeof(rctx->buffer)) {
- memcpy(rctx->buffer, data, sizeof(rctx->buffer));
- rmd128_transform(rctx->state, rctx->buffer);
- data += sizeof(rctx->buffer);
- len -= sizeof(rctx->buffer);
- }
-
- memcpy(rctx->buffer, data, len);
-
-out:
- return 0;
-}
-
-/* Add padding and return the message digest. */
-static int rmd128_final(struct shash_desc *desc, u8 *out)
-{
- struct rmd128_ctx *rctx = shash_desc_ctx(desc);
- u32 i, index, padlen;
- __le64 bits;
- __le32 *dst = (__le32 *)out;
- static const u8 padding[64] = { 0x80, };
-
- bits = cpu_to_le64(rctx->byte_count << 3);
-
- /* Pad out to 56 mod 64 */
- index = rctx->byte_count & 0x3f;
- padlen = (index < 56) ? (56 - index) : ((64+56) - index);
- rmd128_update(desc, padding, padlen);
-
- /* Append length */
- rmd128_update(desc, (const u8 *)&bits, sizeof(bits));
-
- /* Store state in digest */
- for (i = 0; i < 4; i++)
- dst[i] = cpu_to_le32p(&rctx->state[i]);
-
- /* Wipe context */
- memset(rctx, 0, sizeof(*rctx));
-
- return 0;
-}
-
-static struct shash_alg alg = {
- .digestsize = RMD128_DIGEST_SIZE,
- .init = rmd128_init,
- .update = rmd128_update,
- .final = rmd128_final,
- .descsize = sizeof(struct rmd128_ctx),
- .base = {
- .cra_name = "rmd128",
- .cra_driver_name = "rmd128-generic",
- .cra_blocksize = RMD128_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-};
-
-static int __init rmd128_mod_init(void)
-{
- return crypto_register_shash(&alg);
-}
-
-static void __exit rmd128_mod_fini(void)
-{
- crypto_unregister_shash(&alg);
-}
-
-subsys_initcall(rmd128_mod_init);
-module_exit(rmd128_mod_fini);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@codelabs.ch>");
-MODULE_DESCRIPTION("RIPEMD-128 Message Digest");
-MODULE_ALIAS_CRYPTO("rmd128");
diff --git a/crypto/rmd256.c b/crypto/rmd256.c
deleted file mode 100644
index 3c730e9de5fd..000000000000
--- a/crypto/rmd256.c
+++ /dev/null
@@ -1,342 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Cryptographic API.
- *
- * RIPEMD-256 - RACE Integrity Primitives Evaluation Message Digest.
- *
- * Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC
- *
- * Copyright (c) 2008 Adrian-Ken Rueegsegger <ken@codelabs.ch>
- */
-#include <crypto/internal/hash.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/types.h>
-#include <asm/byteorder.h>
-
-#include "ripemd.h"
-
-struct rmd256_ctx {
- u64 byte_count;
- u32 state[8];
- __le32 buffer[16];
-};
-
-#define K1 RMD_K1
-#define K2 RMD_K2
-#define K3 RMD_K3
-#define K4 RMD_K4
-#define KK1 RMD_K6
-#define KK2 RMD_K7
-#define KK3 RMD_K8
-#define KK4 RMD_K1
-
-#define F1(x, y, z) (x ^ y ^ z) /* XOR */
-#define F2(x, y, z) (z ^ (x & (y ^ z))) /* x ? y : z */
-#define F3(x, y, z) ((x | ~y) ^ z)
-#define F4(x, y, z) (y ^ (z & (x ^ y))) /* z ? x : y */
-
-#define ROUND(a, b, c, d, f, k, x, s) { \
- (a) += f((b), (c), (d)) + le32_to_cpup(&(x)) + (k); \
- (a) = rol32((a), (s)); \
-}
-
-static void rmd256_transform(u32 *state, const __le32 *in)
-{
- u32 aa, bb, cc, dd, aaa, bbb, ccc, ddd;
-
- /* Initialize left lane */
- aa = state[0];
- bb = state[1];
- cc = state[2];
- dd = state[3];
-
- /* Initialize right lane */
- aaa = state[4];
- bbb = state[5];
- ccc = state[6];
- ddd = state[7];
-
- /* round 1: left lane */
- ROUND(aa, bb, cc, dd, F1, K1, in[0], 11);
- ROUND(dd, aa, bb, cc, F1, K1, in[1], 14);
- ROUND(cc, dd, aa, bb, F1, K1, in[2], 15);
- ROUND(bb, cc, dd, aa, F1, K1, in[3], 12);
- ROUND(aa, bb, cc, dd, F1, K1, in[4], 5);
- ROUND(dd, aa, bb, cc, F1, K1, in[5], 8);
- ROUND(cc, dd, aa, bb, F1, K1, in[6], 7);
- ROUND(bb, cc, dd, aa, F1, K1, in[7], 9);
- ROUND(aa, bb, cc, dd, F1, K1, in[8], 11);
- ROUND(dd, aa, bb, cc, F1, K1, in[9], 13);
- ROUND(cc, dd, aa, bb, F1, K1, in[10], 14);
- ROUND(bb, cc, dd, aa, F1, K1, in[11], 15);
- ROUND(aa, bb, cc, dd, F1, K1, in[12], 6);
- ROUND(dd, aa, bb, cc, F1, K1, in[13], 7);
- ROUND(cc, dd, aa, bb, F1, K1, in[14], 9);
- ROUND(bb, cc, dd, aa, F1, K1, in[15], 8);
-
- /* round 1: right lane */
- ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[5], 8);
- ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[14], 9);
- ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[7], 9);
- ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[0], 11);
- ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[9], 13);
- ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[2], 15);
- ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[11], 15);
- ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[4], 5);
- ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[13], 7);
- ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[6], 7);
- ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[15], 8);
- ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[8], 11);
- ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[1], 14);
- ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[10], 14);
- ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[3], 12);
- ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[12], 6);
-
- /* Swap contents of "a" registers */
- swap(aa, aaa);
-
- /* round 2: left lane */
- ROUND(aa, bb, cc, dd, F2, K2, in[7], 7);
- ROUND(dd, aa, bb, cc, F2, K2, in[4], 6);
- ROUND(cc, dd, aa, bb, F2, K2, in[13], 8);
- ROUND(bb, cc, dd, aa, F2, K2, in[1], 13);
- ROUND(aa, bb, cc, dd, F2, K2, in[10], 11);
- ROUND(dd, aa, bb, cc, F2, K2, in[6], 9);
- ROUND(cc, dd, aa, bb, F2, K2, in[15], 7);
- ROUND(bb, cc, dd, aa, F2, K2, in[3], 15);
- ROUND(aa, bb, cc, dd, F2, K2, in[12], 7);
- ROUND(dd, aa, bb, cc, F2, K2, in[0], 12);
- ROUND(cc, dd, aa, bb, F2, K2, in[9], 15);
- ROUND(bb, cc, dd, aa, F2, K2, in[5], 9);
- ROUND(aa, bb, cc, dd, F2, K2, in[2], 11);
- ROUND(dd, aa, bb, cc, F2, K2, in[14], 7);
- ROUND(cc, dd, aa, bb, F2, K2, in[11], 13);
- ROUND(bb, cc, dd, aa, F2, K2, in[8], 12);
-
- /* round 2: right lane */
- ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[6], 9);
- ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[11], 13);
- ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[3], 15);
- ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[7], 7);
- ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[0], 12);
- ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[13], 8);
- ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[5], 9);
- ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[10], 11);
- ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[14], 7);
- ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[15], 7);
- ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[8], 12);
- ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[12], 7);
- ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[4], 6);
- ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[9], 15);
- ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[1], 13);
- ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[2], 11);
-
- /* Swap contents of "b" registers */
- swap(bb, bbb);
-
- /* round 3: left lane */
- ROUND(aa, bb, cc, dd, F3, K3, in[3], 11);
- ROUND(dd, aa, bb, cc, F3, K3, in[10], 13);
- ROUND(cc, dd, aa, bb, F3, K3, in[14], 6);
- ROUND(bb, cc, dd, aa, F3, K3, in[4], 7);
- ROUND(aa, bb, cc, dd, F3, K3, in[9], 14);
- ROUND(dd, aa, bb, cc, F3, K3, in[15], 9);
- ROUND(cc, dd, aa, bb, F3, K3, in[8], 13);
- ROUND(bb, cc, dd, aa, F3, K3, in[1], 15);
- ROUND(aa, bb, cc, dd, F3, K3, in[2], 14);
- ROUND(dd, aa, bb, cc, F3, K3, in[7], 8);
- ROUND(cc, dd, aa, bb, F3, K3, in[0], 13);
- ROUND(bb, cc, dd, aa, F3, K3, in[6], 6);
- ROUND(aa, bb, cc, dd, F3, K3, in[13], 5);
- ROUND(dd, aa, bb, cc, F3, K3, in[11], 12);
- ROUND(cc, dd, aa, bb, F3, K3, in[5], 7);
- ROUND(bb, cc, dd, aa, F3, K3, in[12], 5);
-
- /* round 3: right lane */
- ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[15], 9);
- ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[5], 7);
- ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[1], 15);
- ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[3], 11);
- ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[7], 8);
- ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[14], 6);
- ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[6], 6);
- ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[9], 14);
- ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[11], 12);
- ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[8], 13);
- ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[12], 5);
- ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[2], 14);
- ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[10], 13);
- ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[0], 13);
- ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[4], 7);
- ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[13], 5);
-
- /* Swap contents of "c" registers */
- swap(cc, ccc);
-
- /* round 4: left lane */
- ROUND(aa, bb, cc, dd, F4, K4, in[1], 11);
- ROUND(dd, aa, bb, cc, F4, K4, in[9], 12);
- ROUND(cc, dd, aa, bb, F4, K4, in[11], 14);
- ROUND(bb, cc, dd, aa, F4, K4, in[10], 15);
- ROUND(aa, bb, cc, dd, F4, K4, in[0], 14);
- ROUND(dd, aa, bb, cc, F4, K4, in[8], 15);
- ROUND(cc, dd, aa, bb, F4, K4, in[12], 9);
- ROUND(bb, cc, dd, aa, F4, K4, in[4], 8);
- ROUND(aa, bb, cc, dd, F4, K4, in[13], 9);
- ROUND(dd, aa, bb, cc, F4, K4, in[3], 14);
- ROUND(cc, dd, aa, bb, F4, K4, in[7], 5);
- ROUND(bb, cc, dd, aa, F4, K4, in[15], 6);
- ROUND(aa, bb, cc, dd, F4, K4, in[14], 8);
- ROUND(dd, aa, bb, cc, F4, K4, in[5], 6);
- ROUND(cc, dd, aa, bb, F4, K4, in[6], 5);
- ROUND(bb, cc, dd, aa, F4, K4, in[2], 12);
-
- /* round 4: right lane */
- ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[8], 15);
- ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[6], 5);
- ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[4], 8);
- ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[1], 11);
- ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[3], 14);
- ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[11], 14);
- ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[15], 6);
- ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[0], 14);
- ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[5], 6);
- ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[12], 9);
- ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[2], 12);
- ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[13], 9);
- ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[9], 12);
- ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[7], 5);
- ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[10], 15);
- ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[14], 8);
-
- /* Swap contents of "d" registers */
- swap(dd, ddd);
-
- /* combine results */
- state[0] += aa;
- state[1] += bb;
- state[2] += cc;
- state[3] += dd;
- state[4] += aaa;
- state[5] += bbb;
- state[6] += ccc;
- state[7] += ddd;
-}
-
-static int rmd256_init(struct shash_desc *desc)
-{
- struct rmd256_ctx *rctx = shash_desc_ctx(desc);
-
- rctx->byte_count = 0;
-
- rctx->state[0] = RMD_H0;
- rctx->state[1] = RMD_H1;
- rctx->state[2] = RMD_H2;
- rctx->state[3] = RMD_H3;
- rctx->state[4] = RMD_H5;
- rctx->state[5] = RMD_H6;
- rctx->state[6] = RMD_H7;
- rctx->state[7] = RMD_H8;
-
- memset(rctx->buffer, 0, sizeof(rctx->buffer));
-
- return 0;
-}
-
-static int rmd256_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
-{
- struct rmd256_ctx *rctx = shash_desc_ctx(desc);
- const u32 avail = sizeof(rctx->buffer) - (rctx->byte_count & 0x3f);
-
- rctx->byte_count += len;
-
- /* Enough space in buffer? If so copy and we're done */
- if (avail > len) {
- memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail),
- data, len);
- goto out;
- }
-
- memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail),
- data, avail);
-
- rmd256_transform(rctx->state, rctx->buffer);
- data += avail;
- len -= avail;
-
- while (len >= sizeof(rctx->buffer)) {
- memcpy(rctx->buffer, data, sizeof(rctx->buffer));
- rmd256_transform(rctx->state, rctx->buffer);
- data += sizeof(rctx->buffer);
- len -= sizeof(rctx->buffer);
- }
-
- memcpy(rctx->buffer, data, len);
-
-out:
- return 0;
-}
-
-/* Add padding and return the message digest. */
-static int rmd256_final(struct shash_desc *desc, u8 *out)
-{
- struct rmd256_ctx *rctx = shash_desc_ctx(desc);
- u32 i, index, padlen;
- __le64 bits;
- __le32 *dst = (__le32 *)out;
- static const u8 padding[64] = { 0x80, };
-
- bits = cpu_to_le64(rctx->byte_count << 3);
-
- /* Pad out to 56 mod 64 */
- index = rctx->byte_count & 0x3f;
- padlen = (index < 56) ? (56 - index) : ((64+56) - index);
- rmd256_update(desc, padding, padlen);
-
- /* Append length */
- rmd256_update(desc, (const u8 *)&bits, sizeof(bits));
-
- /* Store state in digest */
- for (i = 0; i < 8; i++)
- dst[i] = cpu_to_le32p(&rctx->state[i]);
-
- /* Wipe context */
- memset(rctx, 0, sizeof(*rctx));
-
- return 0;
-}
-
-static struct shash_alg alg = {
- .digestsize = RMD256_DIGEST_SIZE,
- .init = rmd256_init,
- .update = rmd256_update,
- .final = rmd256_final,
- .descsize = sizeof(struct rmd256_ctx),
- .base = {
- .cra_name = "rmd256",
- .cra_driver_name = "rmd256-generic",
- .cra_blocksize = RMD256_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-};
-
-static int __init rmd256_mod_init(void)
-{
- return crypto_register_shash(&alg);
-}
-
-static void __exit rmd256_mod_fini(void)
-{
- crypto_unregister_shash(&alg);
-}
-
-subsys_initcall(rmd256_mod_init);
-module_exit(rmd256_mod_fini);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@codelabs.ch>");
-MODULE_DESCRIPTION("RIPEMD-256 Message Digest");
-MODULE_ALIAS_CRYPTO("rmd256");
diff --git a/crypto/rmd320.c b/crypto/rmd320.c
deleted file mode 100644
index c919ad6c4705..000000000000
--- a/crypto/rmd320.c
+++ /dev/null
@@ -1,391 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Cryptographic API.
- *
- * RIPEMD-320 - RACE Integrity Primitives Evaluation Message Digest.
- *
- * Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC
- *
- * Copyright (c) 2008 Adrian-Ken Rueegsegger <ken@codelabs.ch>
- */
-#include <crypto/internal/hash.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/types.h>
-#include <asm/byteorder.h>
-
-#include "ripemd.h"
-
-struct rmd320_ctx {
- u64 byte_count;
- u32 state[10];
- __le32 buffer[16];
-};
-
-#define K1 RMD_K1
-#define K2 RMD_K2
-#define K3 RMD_K3
-#define K4 RMD_K4
-#define K5 RMD_K5
-#define KK1 RMD_K6
-#define KK2 RMD_K7
-#define KK3 RMD_K8
-#define KK4 RMD_K9
-#define KK5 RMD_K1
-
-#define F1(x, y, z) (x ^ y ^ z) /* XOR */
-#define F2(x, y, z) (z ^ (x & (y ^ z))) /* x ? y : z */
-#define F3(x, y, z) ((x | ~y) ^ z)
-#define F4(x, y, z) (y ^ (z & (x ^ y))) /* z ? x : y */
-#define F5(x, y, z) (x ^ (y | ~z))
-
-#define ROUND(a, b, c, d, e, f, k, x, s) { \
- (a) += f((b), (c), (d)) + le32_to_cpup(&(x)) + (k); \
- (a) = rol32((a), (s)) + (e); \
- (c) = rol32((c), 10); \
-}
-
-static void rmd320_transform(u32 *state, const __le32 *in)
-{
- u32 aa, bb, cc, dd, ee, aaa, bbb, ccc, ddd, eee;
-
- /* Initialize left lane */
- aa = state[0];
- bb = state[1];
- cc = state[2];
- dd = state[3];
- ee = state[4];
-
- /* Initialize right lane */
- aaa = state[5];
- bbb = state[6];
- ccc = state[7];
- ddd = state[8];
- eee = state[9];
-
- /* round 1: left lane */
- ROUND(aa, bb, cc, dd, ee, F1, K1, in[0], 11);
- ROUND(ee, aa, bb, cc, dd, F1, K1, in[1], 14);
- ROUND(dd, ee, aa, bb, cc, F1, K1, in[2], 15);
- ROUND(cc, dd, ee, aa, bb, F1, K1, in[3], 12);
- ROUND(bb, cc, dd, ee, aa, F1, K1, in[4], 5);
- ROUND(aa, bb, cc, dd, ee, F1, K1, in[5], 8);
- ROUND(ee, aa, bb, cc, dd, F1, K1, in[6], 7);
- ROUND(dd, ee, aa, bb, cc, F1, K1, in[7], 9);
- ROUND(cc, dd, ee, aa, bb, F1, K1, in[8], 11);
- ROUND(bb, cc, dd, ee, aa, F1, K1, in[9], 13);
- ROUND(aa, bb, cc, dd, ee, F1, K1, in[10], 14);
- ROUND(ee, aa, bb, cc, dd, F1, K1, in[11], 15);
- ROUND(dd, ee, aa, bb, cc, F1, K1, in[12], 6);
- ROUND(cc, dd, ee, aa, bb, F1, K1, in[13], 7);
- ROUND(bb, cc, dd, ee, aa, F1, K1, in[14], 9);
- ROUND(aa, bb, cc, dd, ee, F1, K1, in[15], 8);
-
- /* round 1: right lane */
- ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[5], 8);
- ROUND(eee, aaa, bbb, ccc, ddd, F5, KK1, in[14], 9);
- ROUND(ddd, eee, aaa, bbb, ccc, F5, KK1, in[7], 9);
- ROUND(ccc, ddd, eee, aaa, bbb, F5, KK1, in[0], 11);
- ROUND(bbb, ccc, ddd, eee, aaa, F5, KK1, in[9], 13);
- ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[2], 15);
- ROUND(eee, aaa, bbb, ccc, ddd, F5, KK1, in[11], 15);
- ROUND(ddd, eee, aaa, bbb, ccc, F5, KK1, in[4], 5);
- ROUND(ccc, ddd, eee, aaa, bbb, F5, KK1, in[13], 7);
- ROUND(bbb, ccc, ddd, eee, aaa, F5, KK1, in[6], 7);
- ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[15], 8);
- ROUND(eee, aaa, bbb, ccc, ddd, F5, KK1, in[8], 11);
- ROUND(ddd, eee, aaa, bbb, ccc, F5, KK1, in[1], 14);
- ROUND(ccc, ddd, eee, aaa, bbb, F5, KK1, in[10], 14);
- ROUND(bbb, ccc, ddd, eee, aaa, F5, KK1, in[3], 12);
- ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[12], 6);
-
- /* Swap contents of "a" registers */
- swap(aa, aaa);
-
- /* round 2: left lane" */
- ROUND(ee, aa, bb, cc, dd, F2, K2, in[7], 7);
- ROUND(dd, ee, aa, bb, cc, F2, K2, in[4], 6);
- ROUND(cc, dd, ee, aa, bb, F2, K2, in[13], 8);
- ROUND(bb, cc, dd, ee, aa, F2, K2, in[1], 13);
- ROUND(aa, bb, cc, dd, ee, F2, K2, in[10], 11);
- ROUND(ee, aa, bb, cc, dd, F2, K2, in[6], 9);
- ROUND(dd, ee, aa, bb, cc, F2, K2, in[15], 7);
- ROUND(cc, dd, ee, aa, bb, F2, K2, in[3], 15);
- ROUND(bb, cc, dd, ee, aa, F2, K2, in[12], 7);
- ROUND(aa, bb, cc, dd, ee, F2, K2, in[0], 12);
- ROUND(ee, aa, bb, cc, dd, F2, K2, in[9], 15);
- ROUND(dd, ee, aa, bb, cc, F2, K2, in[5], 9);
- ROUND(cc, dd, ee, aa, bb, F2, K2, in[2], 11);
- ROUND(bb, cc, dd, ee, aa, F2, K2, in[14], 7);
- ROUND(aa, bb, cc, dd, ee, F2, K2, in[11], 13);
- ROUND(ee, aa, bb, cc, dd, F2, K2, in[8], 12);
-
- /* round 2: right lane */
- ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[6], 9);
- ROUND(ddd, eee, aaa, bbb, ccc, F4, KK2, in[11], 13);
- ROUND(ccc, ddd, eee, aaa, bbb, F4, KK2, in[3], 15);
- ROUND(bbb, ccc, ddd, eee, aaa, F4, KK2, in[7], 7);
- ROUND(aaa, bbb, ccc, ddd, eee, F4, KK2, in[0], 12);
- ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[13], 8);
- ROUND(ddd, eee, aaa, bbb, ccc, F4, KK2, in[5], 9);
- ROUND(ccc, ddd, eee, aaa, bbb, F4, KK2, in[10], 11);
- ROUND(bbb, ccc, ddd, eee, aaa, F4, KK2, in[14], 7);
- ROUND(aaa, bbb, ccc, ddd, eee, F4, KK2, in[15], 7);
- ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[8], 12);
- ROUND(ddd, eee, aaa, bbb, ccc, F4, KK2, in[12], 7);
- ROUND(ccc, ddd, eee, aaa, bbb, F4, KK2, in[4], 6);
- ROUND(bbb, ccc, ddd, eee, aaa, F4, KK2, in[9], 15);
- ROUND(aaa, bbb, ccc, ddd, eee, F4, KK2, in[1], 13);
- ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[2], 11);
-
- /* Swap contents of "b" registers */
- swap(bb, bbb);
-
- /* round 3: left lane" */
- ROUND(dd, ee, aa, bb, cc, F3, K3, in[3], 11);
- ROUND(cc, dd, ee, aa, bb, F3, K3, in[10], 13);
- ROUND(bb, cc, dd, ee, aa, F3, K3, in[14], 6);
- ROUND(aa, bb, cc, dd, ee, F3, K3, in[4], 7);
- ROUND(ee, aa, bb, cc, dd, F3, K3, in[9], 14);
- ROUND(dd, ee, aa, bb, cc, F3, K3, in[15], 9);
- ROUND(cc, dd, ee, aa, bb, F3, K3, in[8], 13);
- ROUND(bb, cc, dd, ee, aa, F3, K3, in[1], 15);
- ROUND(aa, bb, cc, dd, ee, F3, K3, in[2], 14);
- ROUND(ee, aa, bb, cc, dd, F3, K3, in[7], 8);
- ROUND(dd, ee, aa, bb, cc, F3, K3, in[0], 13);
- ROUND(cc, dd, ee, aa, bb, F3, K3, in[6], 6);
- ROUND(bb, cc, dd, ee, aa, F3, K3, in[13], 5);
- ROUND(aa, bb, cc, dd, ee, F3, K3, in[11], 12);
- ROUND(ee, aa, bb, cc, dd, F3, K3, in[5], 7);
- ROUND(dd, ee, aa, bb, cc, F3, K3, in[12], 5);
-
- /* round 3: right lane */
- ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[15], 9);
- ROUND(ccc, ddd, eee, aaa, bbb, F3, KK3, in[5], 7);
- ROUND(bbb, ccc, ddd, eee, aaa, F3, KK3, in[1], 15);
- ROUND(aaa, bbb, ccc, ddd, eee, F3, KK3, in[3], 11);
- ROUND(eee, aaa, bbb, ccc, ddd, F3, KK3, in[7], 8);
- ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[14], 6);
- ROUND(ccc, ddd, eee, aaa, bbb, F3, KK3, in[6], 6);
- ROUND(bbb, ccc, ddd, eee, aaa, F3, KK3, in[9], 14);
- ROUND(aaa, bbb, ccc, ddd, eee, F3, KK3, in[11], 12);
- ROUND(eee, aaa, bbb, ccc, ddd, F3, KK3, in[8], 13);
- ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[12], 5);
- ROUND(ccc, ddd, eee, aaa, bbb, F3, KK3, in[2], 14);
- ROUND(bbb, ccc, ddd, eee, aaa, F3, KK3, in[10], 13);
- ROUND(aaa, bbb, ccc, ddd, eee, F3, KK3, in[0], 13);
- ROUND(eee, aaa, bbb, ccc, ddd, F3, KK3, in[4], 7);
- ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[13], 5);
-
- /* Swap contents of "c" registers */
- swap(cc, ccc);
-
- /* round 4: left lane" */
- ROUND(cc, dd, ee, aa, bb, F4, K4, in[1], 11);
- ROUND(bb, cc, dd, ee, aa, F4, K4, in[9], 12);
- ROUND(aa, bb, cc, dd, ee, F4, K4, in[11], 14);
- ROUND(ee, aa, bb, cc, dd, F4, K4, in[10], 15);
- ROUND(dd, ee, aa, bb, cc, F4, K4, in[0], 14);
- ROUND(cc, dd, ee, aa, bb, F4, K4, in[8], 15);
- ROUND(bb, cc, dd, ee, aa, F4, K4, in[12], 9);
- ROUND(aa, bb, cc, dd, ee, F4, K4, in[4], 8);
- ROUND(ee, aa, bb, cc, dd, F4, K4, in[13], 9);
- ROUND(dd, ee, aa, bb, cc, F4, K4, in[3], 14);
- ROUND(cc, dd, ee, aa, bb, F4, K4, in[7], 5);
- ROUND(bb, cc, dd, ee, aa, F4, K4, in[15], 6);
- ROUND(aa, bb, cc, dd, ee, F4, K4, in[14], 8);
- ROUND(ee, aa, bb, cc, dd, F4, K4, in[5], 6);
- ROUND(dd, ee, aa, bb, cc, F4, K4, in[6], 5);
- ROUND(cc, dd, ee, aa, bb, F4, K4, in[2], 12);
-
- /* round 4: right lane */
- ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[8], 15);
- ROUND(bbb, ccc, ddd, eee, aaa, F2, KK4, in[6], 5);
- ROUND(aaa, bbb, ccc, ddd, eee, F2, KK4, in[4], 8);
- ROUND(eee, aaa, bbb, ccc, ddd, F2, KK4, in[1], 11);
- ROUND(ddd, eee, aaa, bbb, ccc, F2, KK4, in[3], 14);
- ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[11], 14);
- ROUND(bbb, ccc, ddd, eee, aaa, F2, KK4, in[15], 6);
- ROUND(aaa, bbb, ccc, ddd, eee, F2, KK4, in[0], 14);
- ROUND(eee, aaa, bbb, ccc, ddd, F2, KK4, in[5], 6);
- ROUND(ddd, eee, aaa, bbb, ccc, F2, KK4, in[12], 9);
- ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[2], 12);
- ROUND(bbb, ccc, ddd, eee, aaa, F2, KK4, in[13], 9);
- ROUND(aaa, bbb, ccc, ddd, eee, F2, KK4, in[9], 12);
- ROUND(eee, aaa, bbb, ccc, ddd, F2, KK4, in[7], 5);
- ROUND(ddd, eee, aaa, bbb, ccc, F2, KK4, in[10], 15);
- ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[14], 8);
-
- /* Swap contents of "d" registers */
- swap(dd, ddd);
-
- /* round 5: left lane" */
- ROUND(bb, cc, dd, ee, aa, F5, K5, in[4], 9);
- ROUND(aa, bb, cc, dd, ee, F5, K5, in[0], 15);
- ROUND(ee, aa, bb, cc, dd, F5, K5, in[5], 5);
- ROUND(dd, ee, aa, bb, cc, F5, K5, in[9], 11);
- ROUND(cc, dd, ee, aa, bb, F5, K5, in[7], 6);
- ROUND(bb, cc, dd, ee, aa, F5, K5, in[12], 8);
- ROUND(aa, bb, cc, dd, ee, F5, K5, in[2], 13);
- ROUND(ee, aa, bb, cc, dd, F5, K5, in[10], 12);
- ROUND(dd, ee, aa, bb, cc, F5, K5, in[14], 5);
- ROUND(cc, dd, ee, aa, bb, F5, K5, in[1], 12);
- ROUND(bb, cc, dd, ee, aa, F5, K5, in[3], 13);
- ROUND(aa, bb, cc, dd, ee, F5, K5, in[8], 14);
- ROUND(ee, aa, bb, cc, dd, F5, K5, in[11], 11);
- ROUND(dd, ee, aa, bb, cc, F5, K5, in[6], 8);
- ROUND(cc, dd, ee, aa, bb, F5, K5, in[15], 5);
- ROUND(bb, cc, dd, ee, aa, F5, K5, in[13], 6);
-
- /* round 5: right lane */
- ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[12], 8);
- ROUND(aaa, bbb, ccc, ddd, eee, F1, KK5, in[15], 5);
- ROUND(eee, aaa, bbb, ccc, ddd, F1, KK5, in[10], 12);
- ROUND(ddd, eee, aaa, bbb, ccc, F1, KK5, in[4], 9);
- ROUND(ccc, ddd, eee, aaa, bbb, F1, KK5, in[1], 12);
- ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[5], 5);
- ROUND(aaa, bbb, ccc, ddd, eee, F1, KK5, in[8], 14);
- ROUND(eee, aaa, bbb, ccc, ddd, F1, KK5, in[7], 6);
- ROUND(ddd, eee, aaa, bbb, ccc, F1, KK5, in[6], 8);
- ROUND(ccc, ddd, eee, aaa, bbb, F1, KK5, in[2], 13);
- ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[13], 6);
- ROUND(aaa, bbb, ccc, ddd, eee, F1, KK5, in[14], 5);
- ROUND(eee, aaa, bbb, ccc, ddd, F1, KK5, in[0], 15);
- ROUND(ddd, eee, aaa, bbb, ccc, F1, KK5, in[3], 13);
- ROUND(ccc, ddd, eee, aaa, bbb, F1, KK5, in[9], 11);
- ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[11], 11);
-
- /* Swap contents of "e" registers */
- swap(ee, eee);
-
- /* combine results */
- state[0] += aa;
- state[1] += bb;
- state[2] += cc;
- state[3] += dd;
- state[4] += ee;
- state[5] += aaa;
- state[6] += bbb;
- state[7] += ccc;
- state[8] += ddd;
- state[9] += eee;
-}
-
-static int rmd320_init(struct shash_desc *desc)
-{
- struct rmd320_ctx *rctx = shash_desc_ctx(desc);
-
- rctx->byte_count = 0;
-
- rctx->state[0] = RMD_H0;
- rctx->state[1] = RMD_H1;
- rctx->state[2] = RMD_H2;
- rctx->state[3] = RMD_H3;
- rctx->state[4] = RMD_H4;
- rctx->state[5] = RMD_H5;
- rctx->state[6] = RMD_H6;
- rctx->state[7] = RMD_H7;
- rctx->state[8] = RMD_H8;
- rctx->state[9] = RMD_H9;
-
- memset(rctx->buffer, 0, sizeof(rctx->buffer));
-
- return 0;
-}
-
-static int rmd320_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
-{
- struct rmd320_ctx *rctx = shash_desc_ctx(desc);
- const u32 avail = sizeof(rctx->buffer) - (rctx->byte_count & 0x3f);
-
- rctx->byte_count += len;
-
- /* Enough space in buffer? If so copy and we're done */
- if (avail > len) {
- memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail),
- data, len);
- goto out;
- }
-
- memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail),
- data, avail);
-
- rmd320_transform(rctx->state, rctx->buffer);
- data += avail;
- len -= avail;
-
- while (len >= sizeof(rctx->buffer)) {
- memcpy(rctx->buffer, data, sizeof(rctx->buffer));
- rmd320_transform(rctx->state, rctx->buffer);
- data += sizeof(rctx->buffer);
- len -= sizeof(rctx->buffer);
- }
-
- memcpy(rctx->buffer, data, len);
-
-out:
- return 0;
-}
-
-/* Add padding and return the message digest. */
-static int rmd320_final(struct shash_desc *desc, u8 *out)
-{
- struct rmd320_ctx *rctx = shash_desc_ctx(desc);
- u32 i, index, padlen;
- __le64 bits;
- __le32 *dst = (__le32 *)out;
- static const u8 padding[64] = { 0x80, };
-
- bits = cpu_to_le64(rctx->byte_count << 3);
-
- /* Pad out to 56 mod 64 */
- index = rctx->byte_count & 0x3f;
- padlen = (index < 56) ? (56 - index) : ((64+56) - index);
- rmd320_update(desc, padding, padlen);
-
- /* Append length */
- rmd320_update(desc, (const u8 *)&bits, sizeof(bits));
-
- /* Store state in digest */
- for (i = 0; i < 10; i++)
- dst[i] = cpu_to_le32p(&rctx->state[i]);
-
- /* Wipe context */
- memset(rctx, 0, sizeof(*rctx));
-
- return 0;
-}
-
-static struct shash_alg alg = {
- .digestsize = RMD320_DIGEST_SIZE,
- .init = rmd320_init,
- .update = rmd320_update,
- .final = rmd320_final,
- .descsize = sizeof(struct rmd320_ctx),
- .base = {
- .cra_name = "rmd320",
- .cra_driver_name = "rmd320-generic",
- .cra_blocksize = RMD320_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-};
-
-static int __init rmd320_mod_init(void)
-{
- return crypto_register_shash(&alg);
-}
-
-static void __exit rmd320_mod_fini(void)
-{
- crypto_unregister_shash(&alg);
-}
-
-subsys_initcall(rmd320_mod_init);
-module_exit(rmd320_mod_fini);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@codelabs.ch>");
-MODULE_DESCRIPTION("RIPEMD-320 Message Digest");
-MODULE_ALIAS_CRYPTO("rmd320");
diff --git a/crypto/salsa20_generic.c b/crypto/salsa20_generic.c
deleted file mode 100644
index 3418869dabef..000000000000
--- a/crypto/salsa20_generic.c
+++ /dev/null
@@ -1,212 +0,0 @@
-/*
- * Salsa20: Salsa20 stream cipher algorithm
- *
- * Copyright (c) 2007 Tan Swee Heng <thesweeheng@gmail.com>
- *
- * Derived from:
- * - salsa20.c: Public domain C code by Daniel J. Bernstein <djb@cr.yp.to>
- *
- * Salsa20 is a stream cipher candidate in eSTREAM, the ECRYPT Stream
- * Cipher Project. It is designed by Daniel J. Bernstein <djb@cr.yp.to>.
- * More information about eSTREAM and Salsa20 can be found here:
- * https://www.ecrypt.eu.org/stream/
- * https://cr.yp.to/snuffle.html
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- */
-
-#include <asm/unaligned.h>
-#include <crypto/internal/skcipher.h>
-#include <linux/module.h>
-
-#define SALSA20_IV_SIZE 8
-#define SALSA20_MIN_KEY_SIZE 16
-#define SALSA20_MAX_KEY_SIZE 32
-#define SALSA20_BLOCK_SIZE 64
-
-struct salsa20_ctx {
- u32 initial_state[16];
-};
-
-static void salsa20_block(u32 *state, __le32 *stream)
-{
- u32 x[16];
- int i;
-
- memcpy(x, state, sizeof(x));
-
- for (i = 0; i < 20; i += 2) {
- x[ 4] ^= rol32((x[ 0] + x[12]), 7);
- x[ 8] ^= rol32((x[ 4] + x[ 0]), 9);
- x[12] ^= rol32((x[ 8] + x[ 4]), 13);
- x[ 0] ^= rol32((x[12] + x[ 8]), 18);
- x[ 9] ^= rol32((x[ 5] + x[ 1]), 7);
- x[13] ^= rol32((x[ 9] + x[ 5]), 9);
- x[ 1] ^= rol32((x[13] + x[ 9]), 13);
- x[ 5] ^= rol32((x[ 1] + x[13]), 18);
- x[14] ^= rol32((x[10] + x[ 6]), 7);
- x[ 2] ^= rol32((x[14] + x[10]), 9);
- x[ 6] ^= rol32((x[ 2] + x[14]), 13);
- x[10] ^= rol32((x[ 6] + x[ 2]), 18);
- x[ 3] ^= rol32((x[15] + x[11]), 7);
- x[ 7] ^= rol32((x[ 3] + x[15]), 9);
- x[11] ^= rol32((x[ 7] + x[ 3]), 13);
- x[15] ^= rol32((x[11] + x[ 7]), 18);
- x[ 1] ^= rol32((x[ 0] + x[ 3]), 7);
- x[ 2] ^= rol32((x[ 1] + x[ 0]), 9);
- x[ 3] ^= rol32((x[ 2] + x[ 1]), 13);
- x[ 0] ^= rol32((x[ 3] + x[ 2]), 18);
- x[ 6] ^= rol32((x[ 5] + x[ 4]), 7);
- x[ 7] ^= rol32((x[ 6] + x[ 5]), 9);
- x[ 4] ^= rol32((x[ 7] + x[ 6]), 13);
- x[ 5] ^= rol32((x[ 4] + x[ 7]), 18);
- x[11] ^= rol32((x[10] + x[ 9]), 7);
- x[ 8] ^= rol32((x[11] + x[10]), 9);
- x[ 9] ^= rol32((x[ 8] + x[11]), 13);
- x[10] ^= rol32((x[ 9] + x[ 8]), 18);
- x[12] ^= rol32((x[15] + x[14]), 7);
- x[13] ^= rol32((x[12] + x[15]), 9);
- x[14] ^= rol32((x[13] + x[12]), 13);
- x[15] ^= rol32((x[14] + x[13]), 18);
- }
-
- for (i = 0; i < 16; i++)
- stream[i] = cpu_to_le32(x[i] + state[i]);
-
- if (++state[8] == 0)
- state[9]++;
-}
-
-static void salsa20_docrypt(u32 *state, u8 *dst, const u8 *src,
- unsigned int bytes)
-{
- __le32 stream[SALSA20_BLOCK_SIZE / sizeof(__le32)];
-
- while (bytes >= SALSA20_BLOCK_SIZE) {
- salsa20_block(state, stream);
- crypto_xor_cpy(dst, src, (const u8 *)stream,
- SALSA20_BLOCK_SIZE);
- bytes -= SALSA20_BLOCK_SIZE;
- dst += SALSA20_BLOCK_SIZE;
- src += SALSA20_BLOCK_SIZE;
- }
- if (bytes) {
- salsa20_block(state, stream);
- crypto_xor_cpy(dst, src, (const u8 *)stream, bytes);
- }
-}
-
-static void salsa20_init(u32 *state, const struct salsa20_ctx *ctx,
- const u8 *iv)
-{
- memcpy(state, ctx->initial_state, sizeof(ctx->initial_state));
- state[6] = get_unaligned_le32(iv + 0);
- state[7] = get_unaligned_le32(iv + 4);
-}
-
-static int salsa20_setkey(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int keysize)
-{
- static const char sigma[16] = "expand 32-byte k";
- static const char tau[16] = "expand 16-byte k";
- struct salsa20_ctx *ctx = crypto_skcipher_ctx(tfm);
- const char *constants;
-
- if (keysize != SALSA20_MIN_KEY_SIZE &&
- keysize != SALSA20_MAX_KEY_SIZE)
- return -EINVAL;
-
- ctx->initial_state[1] = get_unaligned_le32(key + 0);
- ctx->initial_state[2] = get_unaligned_le32(key + 4);
- ctx->initial_state[3] = get_unaligned_le32(key + 8);
- ctx->initial_state[4] = get_unaligned_le32(key + 12);
- if (keysize == 32) { /* recommended */
- key += 16;
- constants = sigma;
- } else { /* keysize == 16 */
- constants = tau;
- }
- ctx->initial_state[11] = get_unaligned_le32(key + 0);
- ctx->initial_state[12] = get_unaligned_le32(key + 4);
- ctx->initial_state[13] = get_unaligned_le32(key + 8);
- ctx->initial_state[14] = get_unaligned_le32(key + 12);
- ctx->initial_state[0] = get_unaligned_le32(constants + 0);
- ctx->initial_state[5] = get_unaligned_le32(constants + 4);
- ctx->initial_state[10] = get_unaligned_le32(constants + 8);
- ctx->initial_state[15] = get_unaligned_le32(constants + 12);
-
- /* space for the nonce; it will be overridden for each request */
- ctx->initial_state[6] = 0;
- ctx->initial_state[7] = 0;
-
- /* initial block number */
- ctx->initial_state[8] = 0;
- ctx->initial_state[9] = 0;
-
- return 0;
-}
-
-static int salsa20_crypt(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- const struct salsa20_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct skcipher_walk walk;
- u32 state[16];
- int err;
-
- err = skcipher_walk_virt(&walk, req, false);
-
- salsa20_init(state, ctx, req->iv);
-
- while (walk.nbytes > 0) {
- unsigned int nbytes = walk.nbytes;
-
- if (nbytes < walk.total)
- nbytes = round_down(nbytes, walk.stride);
-
- salsa20_docrypt(state, walk.dst.virt.addr, walk.src.virt.addr,
- nbytes);
- err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
- }
-
- return err;
-}
-
-static struct skcipher_alg alg = {
- .base.cra_name = "salsa20",
- .base.cra_driver_name = "salsa20-generic",
- .base.cra_priority = 100,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct salsa20_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = SALSA20_MIN_KEY_SIZE,
- .max_keysize = SALSA20_MAX_KEY_SIZE,
- .ivsize = SALSA20_IV_SIZE,
- .chunksize = SALSA20_BLOCK_SIZE,
- .setkey = salsa20_setkey,
- .encrypt = salsa20_crypt,
- .decrypt = salsa20_crypt,
-};
-
-static int __init salsa20_generic_mod_init(void)
-{
- return crypto_register_skcipher(&alg);
-}
-
-static void __exit salsa20_generic_mod_fini(void)
-{
- crypto_unregister_skcipher(&alg);
-}
-
-subsys_initcall(salsa20_generic_mod_init);
-module_exit(salsa20_generic_mod_fini);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION ("Salsa20 stream cipher algorithm");
-MODULE_ALIAS_CRYPTO("salsa20");
-MODULE_ALIAS_CRYPTO("salsa20-generic");
diff --git a/crypto/serpent_generic.c b/crypto/serpent_generic.c
index 492c1d0bfe06..236c87547a17 100644
--- a/crypto/serpent_generic.c
+++ b/crypto/serpent_generic.c
@@ -5,17 +5,12 @@
* Serpent Cipher Algorithm.
*
* Copyright (C) 2002 Dag Arne Osvik <osvik@ii.uib.no>
- * 2003 Herbert Valerio Riedel <hvr@gnu.org>
- *
- * Added tnepres support:
- * Ruben Jesus Garcia Hernandez <ruben@ugr.es>, 18.10.2004
- * Based on code by hvr
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/errno.h>
-#include <asm/byteorder.h>
+#include <asm/unaligned.h>
#include <linux/crypto.h>
#include <linux/types.h>
#include <crypto/serpent.h>
@@ -453,19 +448,12 @@ void __serpent_encrypt(const void *c, u8 *dst, const u8 *src)
{
const struct serpent_ctx *ctx = c;
const u32 *k = ctx->expkey;
- const __le32 *s = (const __le32 *)src;
- __le32 *d = (__le32 *)dst;
u32 r0, r1, r2, r3, r4;
-/*
- * Note: The conversions between u8* and u32* might cause trouble
- * on architectures with stricter alignment rules than x86
- */
-
- r0 = le32_to_cpu(s[0]);
- r1 = le32_to_cpu(s[1]);
- r2 = le32_to_cpu(s[2]);
- r3 = le32_to_cpu(s[3]);
+ r0 = get_unaligned_le32(src);
+ r1 = get_unaligned_le32(src + 4);
+ r2 = get_unaligned_le32(src + 8);
+ r3 = get_unaligned_le32(src + 12);
K(r0, r1, r2, r3, 0);
S0(r0, r1, r2, r3, r4); LK(r2, r1, r3, r0, r4, 1);
@@ -501,10 +489,10 @@ void __serpent_encrypt(const void *c, u8 *dst, const u8 *src)
S6(r0, r1, r3, r2, r4); LK(r3, r4, r1, r2, r0, 31);
S7(r3, r4, r1, r2, r0); K(r0, r1, r2, r3, 32);
- d[0] = cpu_to_le32(r0);
- d[1] = cpu_to_le32(r1);
- d[2] = cpu_to_le32(r2);
- d[3] = cpu_to_le32(r3);
+ put_unaligned_le32(r0, dst);
+ put_unaligned_le32(r1, dst + 4);
+ put_unaligned_le32(r2, dst + 8);
+ put_unaligned_le32(r3, dst + 12);
}
EXPORT_SYMBOL_GPL(__serpent_encrypt);
@@ -519,14 +507,12 @@ void __serpent_decrypt(const void *c, u8 *dst, const u8 *src)
{
const struct serpent_ctx *ctx = c;
const u32 *k = ctx->expkey;
- const __le32 *s = (const __le32 *)src;
- __le32 *d = (__le32 *)dst;
u32 r0, r1, r2, r3, r4;
- r0 = le32_to_cpu(s[0]);
- r1 = le32_to_cpu(s[1]);
- r2 = le32_to_cpu(s[2]);
- r3 = le32_to_cpu(s[3]);
+ r0 = get_unaligned_le32(src);
+ r1 = get_unaligned_le32(src + 4);
+ r2 = get_unaligned_le32(src + 8);
+ r3 = get_unaligned_le32(src + 12);
K(r0, r1, r2, r3, 32);
SI7(r0, r1, r2, r3, r4); KL(r1, r3, r0, r4, r2, 31);
@@ -562,10 +548,10 @@ void __serpent_decrypt(const void *c, u8 *dst, const u8 *src)
SI1(r3, r1, r2, r0, r4); KL(r4, r1, r2, r0, r3, 1);
SI0(r4, r1, r2, r0, r3); K(r2, r3, r1, r4, 0);
- d[0] = cpu_to_le32(r2);
- d[1] = cpu_to_le32(r3);
- d[2] = cpu_to_le32(r1);
- d[3] = cpu_to_le32(r4);
+ put_unaligned_le32(r2, dst);
+ put_unaligned_le32(r3, dst + 4);
+ put_unaligned_le32(r1, dst + 8);
+ put_unaligned_le32(r4, dst + 12);
}
EXPORT_SYMBOL_GPL(__serpent_decrypt);
@@ -576,66 +562,13 @@ static void serpent_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
__serpent_decrypt(ctx, dst, src);
}
-static int tnepres_setkey(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keylen)
-{
- u8 rev_key[SERPENT_MAX_KEY_SIZE];
- int i;
-
- for (i = 0; i < keylen; ++i)
- rev_key[keylen - i - 1] = key[i];
-
- return serpent_setkey(tfm, rev_key, keylen);
-}
-
-static void tnepres_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
-{
- const u32 * const s = (const u32 * const)src;
- u32 * const d = (u32 * const)dst;
-
- u32 rs[4], rd[4];
-
- rs[0] = swab32(s[3]);
- rs[1] = swab32(s[2]);
- rs[2] = swab32(s[1]);
- rs[3] = swab32(s[0]);
-
- serpent_encrypt(tfm, (u8 *)rd, (u8 *)rs);
-
- d[0] = swab32(rd[3]);
- d[1] = swab32(rd[2]);
- d[2] = swab32(rd[1]);
- d[3] = swab32(rd[0]);
-}
-
-static void tnepres_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
-{
- const u32 * const s = (const u32 * const)src;
- u32 * const d = (u32 * const)dst;
-
- u32 rs[4], rd[4];
-
- rs[0] = swab32(s[3]);
- rs[1] = swab32(s[2]);
- rs[2] = swab32(s[1]);
- rs[3] = swab32(s[0]);
-
- serpent_decrypt(tfm, (u8 *)rd, (u8 *)rs);
-
- d[0] = swab32(rd[3]);
- d[1] = swab32(rd[2]);
- d[2] = swab32(rd[1]);
- d[3] = swab32(rd[0]);
-}
-
-static struct crypto_alg srp_algs[2] = { {
+static struct crypto_alg srp_alg = {
.cra_name = "serpent",
.cra_driver_name = "serpent-generic",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = SERPENT_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct serpent_ctx),
- .cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_u = { .cipher = {
.cia_min_keysize = SERPENT_MIN_KEY_SIZE,
@@ -643,38 +576,23 @@ static struct crypto_alg srp_algs[2] = { {
.cia_setkey = serpent_setkey,
.cia_encrypt = serpent_encrypt,
.cia_decrypt = serpent_decrypt } }
-}, {
- .cra_name = "tnepres",
- .cra_driver_name = "tnepres-generic",
- .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
- .cra_blocksize = SERPENT_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct serpent_ctx),
- .cra_alignmask = 3,
- .cra_module = THIS_MODULE,
- .cra_u = { .cipher = {
- .cia_min_keysize = SERPENT_MIN_KEY_SIZE,
- .cia_max_keysize = SERPENT_MAX_KEY_SIZE,
- .cia_setkey = tnepres_setkey,
- .cia_encrypt = tnepres_encrypt,
- .cia_decrypt = tnepres_decrypt } }
-} };
+};
static int __init serpent_mod_init(void)
{
- return crypto_register_algs(srp_algs, ARRAY_SIZE(srp_algs));
+ return crypto_register_alg(&srp_alg);
}
static void __exit serpent_mod_fini(void)
{
- crypto_unregister_algs(srp_algs, ARRAY_SIZE(srp_algs));
+ crypto_unregister_alg(&srp_alg);
}
subsys_initcall(serpent_mod_init);
module_exit(serpent_mod_fini);
MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Serpent and tnepres (kerneli compatible serpent reversed) Cipher Algorithm");
+MODULE_DESCRIPTION("Serpent Cipher Algorithm");
MODULE_AUTHOR("Dag Arne Osvik <osvik@ii.uib.no>");
-MODULE_ALIAS_CRYPTO("tnepres");
MODULE_ALIAS_CRYPTO("serpent");
MODULE_ALIAS_CRYPTO("serpent-generic");
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index b4dae640de9f..a15376245416 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -10,6 +10,7 @@
*/
#include <crypto/internal/aead.h>
+#include <crypto/internal/cipher.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include <linux/bug.h>
@@ -490,12 +491,6 @@ int skcipher_walk_virt(struct skcipher_walk *walk,
}
EXPORT_SYMBOL_GPL(skcipher_walk_virt);
-void skcipher_walk_atomise(struct skcipher_walk *walk)
-{
- walk->flags &= ~SKCIPHER_WALK_SLEEP;
-}
-EXPORT_SYMBOL_GPL(skcipher_walk_atomise);
-
int skcipher_walk_async(struct skcipher_walk *walk,
struct skcipher_request *req)
{
@@ -986,3 +981,4 @@ EXPORT_SYMBOL_GPL(skcipher_alloc_instance_simple);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Symmetric key cipher type");
+MODULE_IMPORT_NS(CRYPTO_INTERNAL);
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index a647bb298fbc..6b7c158dc508 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -70,8 +70,8 @@ static const char *check[] = {
"des", "md5", "des3_ede", "rot13", "sha1", "sha224", "sha256", "sm3",
"blowfish", "twofish", "serpent", "sha384", "sha512", "md4", "aes",
"cast6", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea",
- "khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt",
- "camellia", "seed", "salsa20", "rmd128", "rmd160", "rmd256", "rmd320",
+ "khazad", "wp512", "wp384", "wp256", "xeta", "fcrypt",
+ "camellia", "seed", "rmd160",
"lzo", "lzo-rle", "cts", "sha3-224", "sha3-256", "sha3-384",
"sha3-512", "streebog256", "streebog512",
NULL
@@ -199,8 +199,8 @@ static int test_mb_aead_jiffies(struct test_mb_aead_data *data, int enc,
goto out;
}
- pr_cont("%d operations in %d seconds (%ld bytes)\n",
- bcount * num_mb, secs, (long)bcount * blen * num_mb);
+ pr_cont("%d operations in %d seconds (%llu bytes)\n",
+ bcount * num_mb, secs, (u64)bcount * blen * num_mb);
out:
kfree(rc);
@@ -471,8 +471,8 @@ static int test_aead_jiffies(struct aead_request *req, int enc,
return ret;
}
- printk("%d operations in %d seconds (%ld bytes)\n",
- bcount, secs, (long)bcount * blen);
+ pr_cont("%d operations in %d seconds (%llu bytes)\n",
+ bcount, secs, (u64)bcount * blen);
return 0;
}
@@ -764,8 +764,8 @@ static int test_mb_ahash_jiffies(struct test_mb_ahash_data *data, int blen,
goto out;
}
- pr_cont("%d operations in %d seconds (%ld bytes)\n",
- bcount * num_mb, secs, (long)bcount * blen * num_mb);
+ pr_cont("%d operations in %d seconds (%llu bytes)\n",
+ bcount * num_mb, secs, (u64)bcount * blen * num_mb);
out:
kfree(rc);
@@ -1201,8 +1201,8 @@ static int test_mb_acipher_jiffies(struct test_mb_skcipher_data *data, int enc,
goto out;
}
- pr_cont("%d operations in %d seconds (%ld bytes)\n",
- bcount * num_mb, secs, (long)bcount * blen * num_mb);
+ pr_cont("%d operations in %d seconds (%llu bytes)\n",
+ bcount * num_mb, secs, (u64)bcount * blen * num_mb);
out:
kfree(rc);
@@ -1441,8 +1441,8 @@ static int test_acipher_jiffies(struct skcipher_request *req, int enc,
return ret;
}
- pr_cont("%d operations in %d seconds (%ld bytes)\n",
- bcount, secs, (long)bcount * blen);
+ pr_cont("%d operations in %d seconds (%llu bytes)\n",
+ bcount, secs, (u64)bcount * blen);
return 0;
}
@@ -1806,27 +1806,11 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
ret += tcrypt_test("wp256");
break;
- case 25:
- ret += tcrypt_test("ecb(tnepres)");
- break;
-
case 26:
ret += tcrypt_test("ecb(anubis)");
ret += tcrypt_test("cbc(anubis)");
break;
- case 27:
- ret += tcrypt_test("tgr192");
- break;
-
- case 28:
- ret += tcrypt_test("tgr160");
- break;
-
- case 29:
- ret += tcrypt_test("tgr128");
- break;
-
case 30:
ret += tcrypt_test("ecb(xeta)");
break;
@@ -1847,10 +1831,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
ret += tcrypt_test("sha224");
break;
- case 34:
- ret += tcrypt_test("salsa20");
- break;
-
case 35:
ret += tcrypt_test("gcm(aes)");
break;
@@ -1867,22 +1847,10 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
ret += tcrypt_test("cts(cbc(aes))");
break;
- case 39:
- ret += tcrypt_test("rmd128");
- break;
-
case 40:
ret += tcrypt_test("rmd160");
break;
- case 41:
- ret += tcrypt_test("rmd256");
- break;
-
- case 42:
- ret += tcrypt_test("rmd320");
- break;
-
case 43:
ret += tcrypt_test("ecb(seed)");
break;
@@ -1955,10 +1923,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
ret += tcrypt_test("xcbc(aes)");
break;
- case 107:
- ret += tcrypt_test("hmac(rmd128)");
- break;
-
case 108:
ret += tcrypt_test("hmac(rmd160)");
break;
@@ -2181,11 +2145,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
speed_template_32_48_64);
break;
- case 206:
- test_cipher_speed("salsa20", ENCRYPT, sec, NULL, 0,
- speed_template_16_32);
- break;
-
case 207:
test_cipher_speed("ecb(serpent)", ENCRYPT, sec, NULL, 0,
speed_template_16_32);
@@ -2393,38 +2352,14 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
test_hash_speed("wp512", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
fallthrough;
- case 310:
- test_hash_speed("tgr128", sec, generic_hash_speed_template);
- if (mode > 300 && mode < 400) break;
- fallthrough;
- case 311:
- test_hash_speed("tgr160", sec, generic_hash_speed_template);
- if (mode > 300 && mode < 400) break;
- fallthrough;
- case 312:
- test_hash_speed("tgr192", sec, generic_hash_speed_template);
- if (mode > 300 && mode < 400) break;
- fallthrough;
case 313:
test_hash_speed("sha224", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
fallthrough;
- case 314:
- test_hash_speed("rmd128", sec, generic_hash_speed_template);
- if (mode > 300 && mode < 400) break;
- fallthrough;
case 315:
test_hash_speed("rmd160", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
fallthrough;
- case 316:
- test_hash_speed("rmd256", sec, generic_hash_speed_template);
- if (mode > 300 && mode < 400) break;
- fallthrough;
- case 317:
- test_hash_speed("rmd320", sec, generic_hash_speed_template);
- if (mode > 300 && mode < 400) break;
- fallthrough;
case 318:
klen = 16;
test_hash_speed("ghash", sec, generic_hash_speed_template);
@@ -2517,38 +2452,14 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
test_ahash_speed("wp512", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
fallthrough;
- case 410:
- test_ahash_speed("tgr128", sec, generic_hash_speed_template);
- if (mode > 400 && mode < 500) break;
- fallthrough;
- case 411:
- test_ahash_speed("tgr160", sec, generic_hash_speed_template);
- if (mode > 400 && mode < 500) break;
- fallthrough;
- case 412:
- test_ahash_speed("tgr192", sec, generic_hash_speed_template);
- if (mode > 400 && mode < 500) break;
- fallthrough;
case 413:
test_ahash_speed("sha224", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
fallthrough;
- case 414:
- test_ahash_speed("rmd128", sec, generic_hash_speed_template);
- if (mode > 400 && mode < 500) break;
- fallthrough;
case 415:
test_ahash_speed("rmd160", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
fallthrough;
- case 416:
- test_ahash_speed("rmd256", sec, generic_hash_speed_template);
- if (mode > 400 && mode < 500) break;
- fallthrough;
- case 417:
- test_ahash_speed("rmd320", sec, generic_hash_speed_template);
- if (mode > 400 && mode < 500) break;
- fallthrough;
case 418:
test_ahash_speed("sha3-224", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 321e38eef51b..93359999c94b 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -33,10 +33,13 @@
#include <crypto/akcipher.h>
#include <crypto/kpp.h>
#include <crypto/acompress.h>
+#include <crypto/internal/cipher.h>
#include <crypto/internal/simd.h>
#include "internal.h"
+MODULE_IMPORT_NS(CRYPTO_INTERNAL);
+
static bool notests;
module_param(notests, bool, 0644);
MODULE_PARM_DESC(notests, "disable crypto self-tests");
@@ -4874,12 +4877,6 @@ static const struct alg_test_desc alg_test_descs[] = {
.cipher = __VECS(tea_tv_template)
}
}, {
- .alg = "ecb(tnepres)",
- .test = alg_test_skcipher,
- .suite = {
- .cipher = __VECS(tnepres_tv_template)
- }
- }, {
.alg = "ecb(twofish)",
.test = alg_test_skcipher,
.suite = {
@@ -4955,12 +4952,6 @@ static const struct alg_test_desc alg_test_descs[] = {
.hash = __VECS(hmac_md5_tv_template)
}
}, {
- .alg = "hmac(rmd128)",
- .test = alg_test_hash,
- .suite = {
- .hash = __VECS(hmac_rmd128_tv_template)
- }
- }, {
.alg = "hmac(rmd160)",
.test = alg_test_hash,
.suite = {
@@ -5273,30 +5264,12 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}
}, {
- .alg = "rmd128",
- .test = alg_test_hash,
- .suite = {
- .hash = __VECS(rmd128_tv_template)
- }
- }, {
.alg = "rmd160",
.test = alg_test_hash,
.suite = {
.hash = __VECS(rmd160_tv_template)
}
}, {
- .alg = "rmd256",
- .test = alg_test_hash,
- .suite = {
- .hash = __VECS(rmd256_tv_template)
- }
- }, {
- .alg = "rmd320",
- .test = alg_test_hash,
- .suite = {
- .hash = __VECS(rmd320_tv_template)
- }
- }, {
.alg = "rsa",
.test = alg_test_akcipher,
.fips_allowed = 1,
@@ -5304,12 +5277,6 @@ static const struct alg_test_desc alg_test_descs[] = {
.akcipher = __VECS(rsa_tv_template)
}
}, {
- .alg = "salsa20",
- .test = alg_test_skcipher,
- .suite = {
- .cipher = __VECS(salsa20_stream_tv_template)
- }
- }, {
.alg = "sha1",
.test = alg_test_hash,
.fips_allowed = 1,
@@ -5397,24 +5364,6 @@ static const struct alg_test_desc alg_test_descs[] = {
.hash = __VECS(streebog512_tv_template)
}
}, {
- .alg = "tgr128",
- .test = alg_test_hash,
- .suite = {
- .hash = __VECS(tgr128_tv_template)
- }
- }, {
- .alg = "tgr160",
- .test = alg_test_hash,
- .suite = {
- .hash = __VECS(tgr160_tv_template)
- }
- }, {
- .alg = "tgr192",
- .test = alg_test_hash,
- .suite = {
- .hash = __VECS(tgr192_tv_template)
- }
- }, {
.alg = "vmac64(aes)",
.test = alg_test_hash,
.suite = {
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index 8c83811c0e35..ced56ea0c9b4 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -3141,66 +3141,6 @@ static const struct hash_testvec md5_tv_template[] = {
};
/*
- * RIPEMD-128 test vectors from ISO/IEC 10118-3:2004(E)
- */
-static const struct hash_testvec rmd128_tv_template[] = {
- {
- .digest = "\xcd\xf2\x62\x13\xa1\x50\xdc\x3e"
- "\xcb\x61\x0f\x18\xf6\xb3\x8b\x46",
- }, {
- .plaintext = "a",
- .psize = 1,
- .digest = "\x86\xbe\x7a\xfa\x33\x9d\x0f\xc7"
- "\xcf\xc7\x85\xe7\x2f\x57\x8d\x33",
- }, {
- .plaintext = "abc",
- .psize = 3,
- .digest = "\xc1\x4a\x12\x19\x9c\x66\xe4\xba"
- "\x84\x63\x6b\x0f\x69\x14\x4c\x77",
- }, {
- .plaintext = "message digest",
- .psize = 14,
- .digest = "\x9e\x32\x7b\x3d\x6e\x52\x30\x62"
- "\xaf\xc1\x13\x2d\x7d\xf9\xd1\xb8",
- }, {
- .plaintext = "abcdefghijklmnopqrstuvwxyz",
- .psize = 26,
- .digest = "\xfd\x2a\xa6\x07\xf7\x1d\xc8\xf5"
- "\x10\x71\x49\x22\xb3\x71\x83\x4e",
- }, {
- .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcde"
- "fghijklmnopqrstuvwxyz0123456789",
- .psize = 62,
- .digest = "\xd1\xe9\x59\xeb\x17\x9c\x91\x1f"
- "\xae\xa4\x62\x4c\x60\xc5\xc7\x02",
- }, {
- .plaintext = "1234567890123456789012345678901234567890"
- "1234567890123456789012345678901234567890",
- .psize = 80,
- .digest = "\x3f\x45\xef\x19\x47\x32\xc2\xdb"
- "\xb2\xc4\xa2\xc7\x69\x79\x5f\xa3",
- }, {
- .plaintext = "abcdbcdecdefdefgefghfghighij"
- "hijkijkljklmklmnlmnomnopnopq",
- .psize = 56,
- .digest = "\xa1\xaa\x06\x89\xd0\xfa\xfa\x2d"
- "\xdc\x22\xe8\x8b\x49\x13\x3a\x06",
- }, {
- .plaintext = "abcdefghbcdefghicdefghijdefghijkefghijklfghi"
- "jklmghijklmnhijklmnoijklmnopjklmnopqklmnopqr"
- "lmnopqrsmnopqrstnopqrstu",
- .psize = 112,
- .digest = "\xd4\xec\xc9\x13\xe1\xdf\x77\x6b"
- "\xf4\x8d\xe9\xd5\x5b\x1f\x25\x46",
- }, {
- .plaintext = "abcdbcdecdefdefgefghfghighijhijk",
- .psize = 32,
- .digest = "\x13\xfc\x13\xe8\xef\xff\x34\x7d"
- "\xe1\x93\xff\x46\xdb\xac\xcf\xd4",
- }
-};
-
-/*
* RIPEMD-160 test vectors from ISO/IEC 10118-3:2004(E)
*/
static const struct hash_testvec rmd160_tv_template[] = {
@@ -3260,134 +3200,6 @@ static const struct hash_testvec rmd160_tv_template[] = {
}
};
-/*
- * RIPEMD-256 test vectors
- */
-static const struct hash_testvec rmd256_tv_template[] = {
- {
- .digest = "\x02\xba\x4c\x4e\x5f\x8e\xcd\x18"
- "\x77\xfc\x52\xd6\x4d\x30\xe3\x7a"
- "\x2d\x97\x74\xfb\x1e\x5d\x02\x63"
- "\x80\xae\x01\x68\xe3\xc5\x52\x2d",
- }, {
- .plaintext = "a",
- .psize = 1,
- .digest = "\xf9\x33\x3e\x45\xd8\x57\xf5\xd9"
- "\x0a\x91\xba\xb7\x0a\x1e\xba\x0c"
- "\xfb\x1b\xe4\xb0\x78\x3c\x9a\xcf"
- "\xcd\x88\x3a\x91\x34\x69\x29\x25",
- }, {
- .plaintext = "abc",
- .psize = 3,
- .digest = "\xaf\xbd\x6e\x22\x8b\x9d\x8c\xbb"
- "\xce\xf5\xca\x2d\x03\xe6\xdb\xa1"
- "\x0a\xc0\xbc\x7d\xcb\xe4\x68\x0e"
- "\x1e\x42\xd2\xe9\x75\x45\x9b\x65",
- }, {
- .plaintext = "message digest",
- .psize = 14,
- .digest = "\x87\xe9\x71\x75\x9a\x1c\xe4\x7a"
- "\x51\x4d\x5c\x91\x4c\x39\x2c\x90"
- "\x18\xc7\xc4\x6b\xc1\x44\x65\x55"
- "\x4a\xfc\xdf\x54\xa5\x07\x0c\x0e",
- }, {
- .plaintext = "abcdefghijklmnopqrstuvwxyz",
- .psize = 26,
- .digest = "\x64\x9d\x30\x34\x75\x1e\xa2\x16"
- "\x77\x6b\xf9\xa1\x8a\xcc\x81\xbc"
- "\x78\x96\x11\x8a\x51\x97\x96\x87"
- "\x82\xdd\x1f\xd9\x7d\x8d\x51\x33",
- }, {
- .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcde"
- "fghijklmnopqrstuvwxyz0123456789",
- .psize = 62,
- .digest = "\x57\x40\xa4\x08\xac\x16\xb7\x20"
- "\xb8\x44\x24\xae\x93\x1c\xbb\x1f"
- "\xe3\x63\xd1\xd0\xbf\x40\x17\xf1"
- "\xa8\x9f\x7e\xa6\xde\x77\xa0\xb8",
- }, {
- .plaintext = "1234567890123456789012345678901234567890"
- "1234567890123456789012345678901234567890",
- .psize = 80,
- .digest = "\x06\xfd\xcc\x7a\x40\x95\x48\xaa"
- "\xf9\x13\x68\xc0\x6a\x62\x75\xb5"
- "\x53\xe3\xf0\x99\xbf\x0e\xa4\xed"
- "\xfd\x67\x78\xdf\x89\xa8\x90\xdd",
- }, {
- .plaintext = "abcdbcdecdefdefgefghfghighij"
- "hijkijkljklmklmnlmnomnopnopq",
- .psize = 56,
- .digest = "\x38\x43\x04\x55\x83\xaa\xc6\xc8"
- "\xc8\xd9\x12\x85\x73\xe7\xa9\x80"
- "\x9a\xfb\x2a\x0f\x34\xcc\xc3\x6e"
- "\xa9\xe7\x2f\x16\xf6\x36\x8e\x3f",
- }
-};
-
-/*
- * RIPEMD-320 test vectors
- */
-static const struct hash_testvec rmd320_tv_template[] = {
- {
- .digest = "\x22\xd6\x5d\x56\x61\x53\x6c\xdc\x75\xc1"
- "\xfd\xf5\xc6\xde\x7b\x41\xb9\xf2\x73\x25"
- "\xeb\xc6\x1e\x85\x57\x17\x7d\x70\x5a\x0e"
- "\xc8\x80\x15\x1c\x3a\x32\xa0\x08\x99\xb8",
- }, {
- .plaintext = "a",
- .psize = 1,
- .digest = "\xce\x78\x85\x06\x38\xf9\x26\x58\xa5\xa5"
- "\x85\x09\x75\x79\x92\x6d\xda\x66\x7a\x57"
- "\x16\x56\x2c\xfc\xf6\xfb\xe7\x7f\x63\x54"
- "\x2f\x99\xb0\x47\x05\xd6\x97\x0d\xff\x5d",
- }, {
- .plaintext = "abc",
- .psize = 3,
- .digest = "\xde\x4c\x01\xb3\x05\x4f\x89\x30\xa7\x9d"
- "\x09\xae\x73\x8e\x92\x30\x1e\x5a\x17\x08"
- "\x5b\xef\xfd\xc1\xb8\xd1\x16\x71\x3e\x74"
- "\xf8\x2f\xa9\x42\xd6\x4c\xdb\xc4\x68\x2d",
- }, {
- .plaintext = "message digest",
- .psize = 14,
- .digest = "\x3a\x8e\x28\x50\x2e\xd4\x5d\x42\x2f\x68"
- "\x84\x4f\x9d\xd3\x16\xe7\xb9\x85\x33\xfa"
- "\x3f\x2a\x91\xd2\x9f\x84\xd4\x25\xc8\x8d"
- "\x6b\x4e\xff\x72\x7d\xf6\x6a\x7c\x01\x97",
- }, {
- .plaintext = "abcdefghijklmnopqrstuvwxyz",
- .psize = 26,
- .digest = "\xca\xbd\xb1\x81\x0b\x92\x47\x0a\x20\x93"
- "\xaa\x6b\xce\x05\x95\x2c\x28\x34\x8c\xf4"
- "\x3f\xf6\x08\x41\x97\x51\x66\xbb\x40\xed"
- "\x23\x40\x04\xb8\x82\x44\x63\xe6\xb0\x09",
- }, {
- .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcde"
- "fghijklmnopqrstuvwxyz0123456789",
- .psize = 62,
- .digest = "\xed\x54\x49\x40\xc8\x6d\x67\xf2\x50\xd2"
- "\x32\xc3\x0b\x7b\x3e\x57\x70\xe0\xc6\x0c"
- "\x8c\xb9\xa4\xca\xfe\x3b\x11\x38\x8a\xf9"
- "\x92\x0e\x1b\x99\x23\x0b\x84\x3c\x86\xa4",
- }, {
- .plaintext = "1234567890123456789012345678901234567890"
- "1234567890123456789012345678901234567890",
- .psize = 80,
- .digest = "\x55\x78\x88\xaf\x5f\x6d\x8e\xd6\x2a\xb6"
- "\x69\x45\xc6\xd2\xa0\xa4\x7e\xcd\x53\x41"
- "\xe9\x15\xeb\x8f\xea\x1d\x05\x24\x95\x5f"
- "\x82\x5d\xc7\x17\xe4\xa0\x08\xab\x2d\x42",
- }, {
- .plaintext = "abcdbcdecdefdefgefghfghighij"
- "hijkijkljklmklmnlmnomnopnopq",
- .psize = 56,
- .digest = "\xd0\x34\xa7\x95\x0c\xf7\x22\x02\x1b\xa4"
- "\xb8\x4d\xf7\x69\xa5\xde\x20\x60\xe2\x59"
- "\xdf\x4c\x9b\xb4\xa4\x26\x8c\x0e\x93\x5b"
- "\xbc\x74\x70\xa9\x69\xc9\xd0\x72\xa1\xac",
- }
-};
-
static const struct hash_testvec crct10dif_tv_template[] = {
{
.plaintext = "abc",
@@ -5138,132 +4950,6 @@ static const struct hash_testvec wp256_tv_template[] = {
},
};
-/*
- * TIGER test vectors from Tiger website
- */
-static const struct hash_testvec tgr192_tv_template[] = {
- {
- .plaintext = "",
- .psize = 0,
- .digest = "\x24\xf0\x13\x0c\x63\xac\x93\x32"
- "\x16\x16\x6e\x76\xb1\xbb\x92\x5f"
- "\xf3\x73\xde\x2d\x49\x58\x4e\x7a",
- }, {
- .plaintext = "abc",
- .psize = 3,
- .digest = "\xf2\x58\xc1\xe8\x84\x14\xab\x2a"
- "\x52\x7a\xb5\x41\xff\xc5\xb8\xbf"
- "\x93\x5f\x7b\x95\x1c\x13\x29\x51",
- }, {
- .plaintext = "Tiger",
- .psize = 5,
- .digest = "\x9f\x00\xf5\x99\x07\x23\x00\xdd"
- "\x27\x6a\xbb\x38\xc8\xeb\x6d\xec"
- "\x37\x79\x0c\x11\x6f\x9d\x2b\xdf",
- }, {
- .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+-",
- .psize = 64,
- .digest = "\x87\xfb\x2a\x90\x83\x85\x1c\xf7"
- "\x47\x0d\x2c\xf8\x10\xe6\xdf\x9e"
- "\xb5\x86\x44\x50\x34\xa5\xa3\x86",
- }, {
- .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZ=abcdefghijklmnopqrstuvwxyz+0123456789",
- .psize = 64,
- .digest = "\x46\x7d\xb8\x08\x63\xeb\xce\x48"
- "\x8d\xf1\xcd\x12\x61\x65\x5d\xe9"
- "\x57\x89\x65\x65\x97\x5f\x91\x97",
- }, {
- .plaintext = "Tiger - A Fast New Hash Function, "
- "by Ross Anderson and Eli Biham, "
- "proceedings of Fast Software Encryption 3, "
- "Cambridge, 1996.",
- .psize = 125,
- .digest = "\x3d\x9a\xeb\x03\xd1\xbd\x1a\x63"
- "\x57\xb2\x77\x4d\xfd\x6d\x5b\x24"
- "\xdd\x68\x15\x1d\x50\x39\x74\xfc",
- },
-};
-
-static const struct hash_testvec tgr160_tv_template[] = {
- {
- .plaintext = "",
- .psize = 0,
- .digest = "\x24\xf0\x13\x0c\x63\xac\x93\x32"
- "\x16\x16\x6e\x76\xb1\xbb\x92\x5f"
- "\xf3\x73\xde\x2d",
- }, {
- .plaintext = "abc",
- .psize = 3,
- .digest = "\xf2\x58\xc1\xe8\x84\x14\xab\x2a"
- "\x52\x7a\xb5\x41\xff\xc5\xb8\xbf"
- "\x93\x5f\x7b\x95",
- }, {
- .plaintext = "Tiger",
- .psize = 5,
- .digest = "\x9f\x00\xf5\x99\x07\x23\x00\xdd"
- "\x27\x6a\xbb\x38\xc8\xeb\x6d\xec"
- "\x37\x79\x0c\x11",
- }, {
- .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+-",
- .psize = 64,
- .digest = "\x87\xfb\x2a\x90\x83\x85\x1c\xf7"
- "\x47\x0d\x2c\xf8\x10\xe6\xdf\x9e"
- "\xb5\x86\x44\x50",
- }, {
- .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZ=abcdefghijklmnopqrstuvwxyz+0123456789",
- .psize = 64,
- .digest = "\x46\x7d\xb8\x08\x63\xeb\xce\x48"
- "\x8d\xf1\xcd\x12\x61\x65\x5d\xe9"
- "\x57\x89\x65\x65",
- }, {
- .plaintext = "Tiger - A Fast New Hash Function, "
- "by Ross Anderson and Eli Biham, "
- "proceedings of Fast Software Encryption 3, "
- "Cambridge, 1996.",
- .psize = 125,
- .digest = "\x3d\x9a\xeb\x03\xd1\xbd\x1a\x63"
- "\x57\xb2\x77\x4d\xfd\x6d\x5b\x24"
- "\xdd\x68\x15\x1d",
- },
-};
-
-static const struct hash_testvec tgr128_tv_template[] = {
- {
- .plaintext = "",
- .psize = 0,
- .digest = "\x24\xf0\x13\x0c\x63\xac\x93\x32"
- "\x16\x16\x6e\x76\xb1\xbb\x92\x5f",
- }, {
- .plaintext = "abc",
- .psize = 3,
- .digest = "\xf2\x58\xc1\xe8\x84\x14\xab\x2a"
- "\x52\x7a\xb5\x41\xff\xc5\xb8\xbf",
- }, {
- .plaintext = "Tiger",
- .psize = 5,
- .digest = "\x9f\x00\xf5\x99\x07\x23\x00\xdd"
- "\x27\x6a\xbb\x38\xc8\xeb\x6d\xec",
- }, {
- .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+-",
- .psize = 64,
- .digest = "\x87\xfb\x2a\x90\x83\x85\x1c\xf7"
- "\x47\x0d\x2c\xf8\x10\xe6\xdf\x9e",
- }, {
- .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZ=abcdefghijklmnopqrstuvwxyz+0123456789",
- .psize = 64,
- .digest = "\x46\x7d\xb8\x08\x63\xeb\xce\x48"
- "\x8d\xf1\xcd\x12\x61\x65\x5d\xe9",
- }, {
- .plaintext = "Tiger - A Fast New Hash Function, "
- "by Ross Anderson and Eli Biham, "
- "proceedings of Fast Software Encryption 3, "
- "Cambridge, 1996.",
- .psize = 125,
- .digest = "\x3d\x9a\xeb\x03\xd1\xbd\x1a\x63"
- "\x57\xb2\x77\x4d\xfd\x6d\x5b\x24",
- },
-};
-
static const struct hash_testvec ghash_tv_template[] =
{
{
@@ -5453,83 +5139,6 @@ static const struct hash_testvec hmac_md5_tv_template[] =
};
/*
- * HMAC-RIPEMD128 test vectors from RFC2286
- */
-static const struct hash_testvec hmac_rmd128_tv_template[] = {
- {
- .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
- .ksize = 16,
- .plaintext = "Hi There",
- .psize = 8,
- .digest = "\xfb\xf6\x1f\x94\x92\xaa\x4b\xbf"
- "\x81\xc1\x72\xe8\x4e\x07\x34\xdb",
- }, {
- .key = "Jefe",
- .ksize = 4,
- .plaintext = "what do ya want for nothing?",
- .psize = 28,
- .digest = "\x87\x5f\x82\x88\x62\xb6\xb3\x34"
- "\xb4\x27\xc5\x5f\x9f\x7f\xf0\x9b",
- }, {
- .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa",
- .ksize = 16,
- .plaintext = "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
- "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
- "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
- "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd",
- .psize = 50,
- .digest = "\x09\xf0\xb2\x84\x6d\x2f\x54\x3d"
- "\xa3\x63\xcb\xec\x8d\x62\xa3\x8d",
- }, {
- .key = "\x01\x02\x03\x04\x05\x06\x07\x08"
- "\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10"
- "\x11\x12\x13\x14\x15\x16\x17\x18\x19",
- .ksize = 25,
- .plaintext = "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
- "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
- "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
- "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd",
- .psize = 50,
- .digest = "\xbd\xbb\xd7\xcf\x03\xe4\x4b\x5a"
- "\xa6\x0a\xf8\x15\xbe\x4d\x22\x94",
- }, {
- .key = "\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c",
- .ksize = 16,
- .plaintext = "Test With Truncation",
- .psize = 20,
- .digest = "\xe7\x98\x08\xf2\x4b\x25\xfd\x03"
- "\x1c\x15\x5f\x0d\x55\x1d\x9a\x3a",
- }, {
- .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
- "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
- "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
- "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
- "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
- "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
- "\xaa\xaa",
- .ksize = 80,
- .plaintext = "Test Using Larger Than Block-Size Key - Hash Key First",
- .psize = 54,
- .digest = "\xdc\x73\x29\x28\xde\x98\x10\x4a"
- "\x1f\x59\xd3\x73\xc1\x50\xac\xbb",
- }, {
- .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
- "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
- "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
- "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
- "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
- "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
- "\xaa\xaa",
- .ksize = 80,
- .plaintext = "Test Using Larger Than Block-Size Key and Larger Than One "
- "Block-Size Data",
- .psize = 73,
- .digest = "\x5c\x6b\xec\x96\x79\x3e\x16\xd4"
- "\x06\x90\xc2\x37\x63\x5f\x30\xc5",
- },
-};
-
-/*
* HMAC-RIPEMD160 test vectors from RFC2286
*/
static const struct hash_testvec hmac_rmd160_tv_template[] = {
@@ -11806,85 +11415,6 @@ static const struct cipher_testvec serpent_tv_template[] = {
},
};
-static const struct cipher_testvec tnepres_tv_template[] = {
- { /* KeySize=0 */
- .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
- .ctext = "\x41\xcc\x6b\x31\x59\x31\x45\x97"
- "\x6d\x6f\xbb\x38\x4b\x37\x21\x28",
- .len = 16,
- },
- { /* KeySize=128, PT=0, I=1 */
- .ptext = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .key = "\x80\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .klen = 16,
- .ctext = "\x49\xaf\xbf\xad\x9d\x5a\x34\x05"
- "\x2c\xd8\xff\xa5\x98\x6b\xd2\xdd",
- .len = 16,
- }, { /* KeySize=128 */
- .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
- .klen = 16,
- .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
- .ctext = "\xea\xf4\xd7\xfc\xd8\x01\x34\x47"
- "\x81\x45\x0b\xfa\x0c\xd6\xad\x6e",
- .len = 16,
- }, { /* KeySize=128, I=121 */
- .key = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80",
- .klen = 16,
- .ptext = zeroed_string,
- .ctext = "\x3d\xda\xbf\xc0\x06\xda\xab\x06"
- "\x46\x2a\xf4\xef\x81\x54\x4e\x26",
- .len = 16,
- }, { /* KeySize=192, PT=0, I=1 */
- .key = "\x80\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .klen = 24,
- .ptext = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .ctext = "\xe7\x8e\x54\x02\xc7\x19\x55\x68"
- "\xac\x36\x78\xf7\xa3\xf6\x0c\x66",
- .len = 16,
- }, { /* KeySize=256, PT=0, I=1 */
- .key = "\x80\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .klen = 32,
- .ptext = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .ctext = "\xab\xed\x96\xe7\x66\xbf\x28\xcb"
- "\xc0\xeb\xd2\x1a\x82\xef\x08\x19",
- .len = 16,
- }, { /* KeySize=256, I=257 */
- .key = "\x1f\x1e\x1d\x1c\x1b\x1a\x19\x18"
- "\x17\x16\x15\x14\x13\x12\x11\x10"
- "\x0f\x0e\x0d\x0c\x0b\x0a\x09\x08"
- "\x07\x06\x05\x04\x03\x02\x01\x00",
- .klen = 32,
- .ptext = "\x0f\x0e\x0d\x0c\x0b\x0a\x09\x08"
- "\x07\x06\x05\x04\x03\x02\x01\x00",
- .ctext = "\x5c\xe7\x1c\x70\xd2\x88\x2e\x5b"
- "\xb8\x32\xe4\x33\xf8\x9f\x26\xde",
- .len = 16,
- }, { /* KeySize=256 */
- .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
- "\x10\x11\x12\x13\x14\x15\x16\x17"
- "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
- .klen = 32,
- .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
- .ctext = "\x64\xa9\x1a\x37\xed\x9f\xe7\x49"
- "\xa8\x4e\x76\xd6\xf5\x0d\x78\xee",
- .len = 16,
- }
-};
-
static const struct cipher_testvec serpent_cbc_tv_template[] = {
{ /* Generated with Crypto++ */
.key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
@@ -24800,1168 +24330,6 @@ static const struct cipher_testvec seed_tv_template[] = {
}
};
-static const struct cipher_testvec salsa20_stream_tv_template[] = {
- /*
- * Testvectors from verified.test-vectors submitted to ECRYPT.
- * They are truncated to size 39, 64, 111, 129 to test a variety
- * of input length.
- */
- { /* Set 3, vector 0 */
- .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F",
- .klen = 16,
- .iv = "\x00\x00\x00\x00\x00\x00\x00\x00",
- .ptext = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00",
- .ctext = "\x2D\xD5\xC3\xF7\xBA\x2B\x20\xF7"
- "\x68\x02\x41\x0C\x68\x86\x88\x89"
- "\x5A\xD8\xC1\xBD\x4E\xA6\xC9\xB1"
- "\x40\xFB\x9B\x90\xE2\x10\x49\xBF"
- "\x58\x3F\x52\x79\x70\xEB\xC1",
- .len = 39,
- }, { /* Set 5, vector 0 */
- .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .klen = 16,
- .iv = "\x80\x00\x00\x00\x00\x00\x00\x00",
- .ptext = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .ctext = "\xB6\x6C\x1E\x44\x46\xDD\x95\x57"
- "\xE5\x78\xE2\x23\xB0\xB7\x68\x01"
- "\x7B\x23\xB2\x67\xBB\x02\x34\xAE"
- "\x46\x26\xBF\x44\x3F\x21\x97\x76"
- "\x43\x6F\xB1\x9F\xD0\xE8\x86\x6F"
- "\xCD\x0D\xE9\xA9\x53\x8F\x4A\x09"
- "\xCA\x9A\xC0\x73\x2E\x30\xBC\xF9"
- "\x8E\x4F\x13\xE4\xB9\xE2\x01\xD9",
- .len = 64,
- }, { /* Set 3, vector 27 */
- .key = "\x1B\x1C\x1D\x1E\x1F\x20\x21\x22"
- "\x23\x24\x25\x26\x27\x28\x29\x2A"
- "\x2B\x2C\x2D\x2E\x2F\x30\x31\x32"
- "\x33\x34\x35\x36\x37\x38\x39\x3A",
- .klen = 32,
- .iv = "\x00\x00\x00\x00\x00\x00\x00\x00",
- .ptext = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00",
- .ctext = "\xAE\x39\x50\x8E\xAC\x9A\xEC\xE7"
- "\xBF\x97\xBB\x20\xB9\xDE\xE4\x1F"
- "\x87\xD9\x47\xF8\x28\x91\x35\x98"
- "\xDB\x72\xCC\x23\x29\x48\x56\x5E"
- "\x83\x7E\x0B\xF3\x7D\x5D\x38\x7B"
- "\x2D\x71\x02\xB4\x3B\xB5\xD8\x23"
- "\xB0\x4A\xDF\x3C\xEC\xB6\xD9\x3B"
- "\x9B\xA7\x52\xBE\xC5\xD4\x50\x59"
- "\x15\x14\xB4\x0E\x40\xE6\x53\xD1"
- "\x83\x9C\x5B\xA0\x92\x29\x6B\x5E"
- "\x96\x5B\x1E\x2F\xD3\xAC\xC1\x92"
- "\xB1\x41\x3F\x19\x2F\xC4\x3B\xC6"
- "\x95\x46\x45\x54\xE9\x75\x03\x08"
- "\x44\xAF\xE5\x8A\x81\x12\x09",
- .len = 111,
- }, { /* Set 5, vector 27 */
- .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .klen = 32,
- .iv = "\x00\x00\x00\x10\x00\x00\x00\x00",
- .ptext = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00",
- .ctext = "\xD2\xDB\x1A\x5C\xF1\xC1\xAC\xDB"
- "\xE8\x1A\x7A\x43\x40\xEF\x53\x43"
- "\x5E\x7F\x4B\x1A\x50\x52\x3F\x8D"
- "\x28\x3D\xCF\x85\x1D\x69\x6E\x60"
- "\xF2\xDE\x74\x56\x18\x1B\x84\x10"
- "\xD4\x62\xBA\x60\x50\xF0\x61\xF2"
- "\x1C\x78\x7F\xC1\x24\x34\xAF\x58"
- "\xBF\x2C\x59\xCA\x90\x77\xF3\xB0"
- "\x5B\x4A\xDF\x89\xCE\x2C\x2F\xFC"
- "\x67\xF0\xE3\x45\xE8\xB3\xB3\x75"
- "\xA0\x95\x71\xA1\x29\x39\x94\xCA"
- "\x45\x2F\xBD\xCB\x10\xB6\xBE\x9F"
- "\x8E\xF9\xB2\x01\x0A\x5A\x0A\xB7"
- "\x6B\x9D\x70\x8E\x4B\xD6\x2F\xCD"
- "\x2E\x40\x48\x75\xE9\xE2\x21\x45"
- "\x0B\xC9\xB6\xB5\x66\xBC\x9A\x59"
- "\x5A",
- .len = 129,
- }, { /* large test vector generated using Crypto++ */
- .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
- "\x10\x11\x12\x13\x14\x15\x16\x17"
- "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
- .klen = 32,
- .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .ptext =
- "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
- "\x10\x11\x12\x13\x14\x15\x16\x17"
- "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
- "\x20\x21\x22\x23\x24\x25\x26\x27"
- "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
- "\x30\x31\x32\x33\x34\x35\x36\x37"
- "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
- "\x40\x41\x42\x43\x44\x45\x46\x47"
- "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
- "\x50\x51\x52\x53\x54\x55\x56\x57"
- "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
- "\x60\x61\x62\x63\x64\x65\x66\x67"
- "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
- "\x70\x71\x72\x73\x74\x75\x76\x77"
- "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
- "\x80\x81\x82\x83\x84\x85\x86\x87"
- "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
- "\x90\x91\x92\x93\x94\x95\x96\x97"
- "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
- "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
- "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
- "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
- "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
- "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
- "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
- "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
- "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
- "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
- "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
- "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
- "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
- "\x00\x03\x06\x09\x0c\x0f\x12\x15"
- "\x18\x1b\x1e\x21\x24\x27\x2a\x2d"
- "\x30\x33\x36\x39\x3c\x3f\x42\x45"
- "\x48\x4b\x4e\x51\x54\x57\x5a\x5d"
- "\x60\x63\x66\x69\x6c\x6f\x72\x75"
- "\x78\x7b\x7e\x81\x84\x87\x8a\x8d"
- "\x90\x93\x96\x99\x9c\x9f\xa2\xa5"
- "\xa8\xab\xae\xb1\xb4\xb7\xba\xbd"
- "\xc0\xc3\xc6\xc9\xcc\xcf\xd2\xd5"
- "\xd8\xdb\xde\xe1\xe4\xe7\xea\xed"
- "\xf0\xf3\xf6\xf9\xfc\xff\x02\x05"
- "\x08\x0b\x0e\x11\x14\x17\x1a\x1d"
- "\x20\x23\x26\x29\x2c\x2f\x32\x35"
- "\x38\x3b\x3e\x41\x44\x47\x4a\x4d"
- "\x50\x53\x56\x59\x5c\x5f\x62\x65"
- "\x68\x6b\x6e\x71\x74\x77\x7a\x7d"
- "\x80\x83\x86\x89\x8c\x8f\x92\x95"
- "\x98\x9b\x9e\xa1\xa4\xa7\xaa\xad"
- "\xb0\xb3\xb6\xb9\xbc\xbf\xc2\xc5"
- "\xc8\xcb\xce\xd1\xd4\xd7\xda\xdd"
- "\xe0\xe3\xe6\xe9\xec\xef\xf2\xf5"
- "\xf8\xfb\xfe\x01\x04\x07\x0a\x0d"
- "\x10\x13\x16\x19\x1c\x1f\x22\x25"
- "\x28\x2b\x2e\x31\x34\x37\x3a\x3d"
- "\x40\x43\x46\x49\x4c\x4f\x52\x55"
- "\x58\x5b\x5e\x61\x64\x67\x6a\x6d"
- "\x70\x73\x76\x79\x7c\x7f\x82\x85"
- "\x88\x8b\x8e\x91\x94\x97\x9a\x9d"
- "\xa0\xa3\xa6\xa9\xac\xaf\xb2\xb5"
- "\xb8\xbb\xbe\xc1\xc4\xc7\xca\xcd"
- "\xd0\xd3\xd6\xd9\xdc\xdf\xe2\xe5"
- "\xe8\xeb\xee\xf1\xf4\xf7\xfa\xfd"
- "\x00\x05\x0a\x0f\x14\x19\x1e\x23"
- "\x28\x2d\x32\x37\x3c\x41\x46\x4b"
- "\x50\x55\x5a\x5f\x64\x69\x6e\x73"
- "\x78\x7d\x82\x87\x8c\x91\x96\x9b"
- "\xa0\xa5\xaa\xaf\xb4\xb9\xbe\xc3"
- "\xc8\xcd\xd2\xd7\xdc\xe1\xe6\xeb"
- "\xf0\xf5\xfa\xff\x04\x09\x0e\x13"
- "\x18\x1d\x22\x27\x2c\x31\x36\x3b"
- "\x40\x45\x4a\x4f\x54\x59\x5e\x63"
- "\x68\x6d\x72\x77\x7c\x81\x86\x8b"
- "\x90\x95\x9a\x9f\xa4\xa9\xae\xb3"
- "\xb8\xbd\xc2\xc7\xcc\xd1\xd6\xdb"
- "\xe0\xe5\xea\xef\xf4\xf9\xfe\x03"
- "\x08\x0d\x12\x17\x1c\x21\x26\x2b"
- "\x30\x35\x3a\x3f\x44\x49\x4e\x53"
- "\x58\x5d\x62\x67\x6c\x71\x76\x7b"
- "\x80\x85\x8a\x8f\x94\x99\x9e\xa3"
- "\xa8\xad\xb2\xb7\xbc\xc1\xc6\xcb"
- "\xd0\xd5\xda\xdf\xe4\xe9\xee\xf3"
- "\xf8\xfd\x02\x07\x0c\x11\x16\x1b"
- "\x20\x25\x2a\x2f\x34\x39\x3e\x43"
- "\x48\x4d\x52\x57\x5c\x61\x66\x6b"
- "\x70\x75\x7a\x7f\x84\x89\x8e\x93"
- "\x98\x9d\xa2\xa7\xac\xb1\xb6\xbb"
- "\xc0\xc5\xca\xcf\xd4\xd9\xde\xe3"
- "\xe8\xed\xf2\xf7\xfc\x01\x06\x0b"
- "\x10\x15\x1a\x1f\x24\x29\x2e\x33"
- "\x38\x3d\x42\x47\x4c\x51\x56\x5b"
- "\x60\x65\x6a\x6f\x74\x79\x7e\x83"
- "\x88\x8d\x92\x97\x9c\xa1\xa6\xab"
- "\xb0\xb5\xba\xbf\xc4\xc9\xce\xd3"
- "\xd8\xdd\xe2\xe7\xec\xf1\xf6\xfb"
- "\x00\x07\x0e\x15\x1c\x23\x2a\x31"
- "\x38\x3f\x46\x4d\x54\x5b\x62\x69"
- "\x70\x77\x7e\x85\x8c\x93\x9a\xa1"
- "\xa8\xaf\xb6\xbd\xc4\xcb\xd2\xd9"
- "\xe0\xe7\xee\xf5\xfc\x03\x0a\x11"
- "\x18\x1f\x26\x2d\x34\x3b\x42\x49"
- "\x50\x57\x5e\x65\x6c\x73\x7a\x81"
- "\x88\x8f\x96\x9d\xa4\xab\xb2\xb9"
- "\xc0\xc7\xce\xd5\xdc\xe3\xea\xf1"
- "\xf8\xff\x06\x0d\x14\x1b\x22\x29"
- "\x30\x37\x3e\x45\x4c\x53\x5a\x61"
- "\x68\x6f\x76\x7d\x84\x8b\x92\x99"
- "\xa0\xa7\xae\xb5\xbc\xc3\xca\xd1"
- "\xd8\xdf\xe6\xed\xf4\xfb\x02\x09"
- "\x10\x17\x1e\x25\x2c\x33\x3a\x41"
- "\x48\x4f\x56\x5d\x64\x6b\x72\x79"
- "\x80\x87\x8e\x95\x9c\xa3\xaa\xb1"
- "\xb8\xbf\xc6\xcd\xd4\xdb\xe2\xe9"
- "\xf0\xf7\xfe\x05\x0c\x13\x1a\x21"
- "\x28\x2f\x36\x3d\x44\x4b\x52\x59"
- "\x60\x67\x6e\x75\x7c\x83\x8a\x91"
- "\x98\x9f\xa6\xad\xb4\xbb\xc2\xc9"
- "\xd0\xd7\xde\xe5\xec\xf3\xfa\x01"
- "\x08\x0f\x16\x1d\x24\x2b\x32\x39"
- "\x40\x47\x4e\x55\x5c\x63\x6a\x71"
- "\x78\x7f\x86\x8d\x94\x9b\xa2\xa9"
- "\xb0\xb7\xbe\xc5\xcc\xd3\xda\xe1"
- "\xe8\xef\xf6\xfd\x04\x0b\x12\x19"
- "\x20\x27\x2e\x35\x3c\x43\x4a\x51"
- "\x58\x5f\x66\x6d\x74\x7b\x82\x89"
- "\x90\x97\x9e\xa5\xac\xb3\xba\xc1"
- "\xc8\xcf\xd6\xdd\xe4\xeb\xf2\xf9"
- "\x00\x09\x12\x1b\x24\x2d\x36\x3f"
- "\x48\x51\x5a\x63\x6c\x75\x7e\x87"
- "\x90\x99\xa2\xab\xb4\xbd\xc6\xcf"
- "\xd8\xe1\xea\xf3\xfc\x05\x0e\x17"
- "\x20\x29\x32\x3b\x44\x4d\x56\x5f"
- "\x68\x71\x7a\x83\x8c\x95\x9e\xa7"
- "\xb0\xb9\xc2\xcb\xd4\xdd\xe6\xef"
- "\xf8\x01\x0a\x13\x1c\x25\x2e\x37"
- "\x40\x49\x52\x5b\x64\x6d\x76\x7f"
- "\x88\x91\x9a\xa3\xac\xb5\xbe\xc7"
- "\xd0\xd9\xe2\xeb\xf4\xfd\x06\x0f"
- "\x18\x21\x2a\x33\x3c\x45\x4e\x57"
- "\x60\x69\x72\x7b\x84\x8d\x96\x9f"
- "\xa8\xb1\xba\xc3\xcc\xd5\xde\xe7"
- "\xf0\xf9\x02\x0b\x14\x1d\x26\x2f"
- "\x38\x41\x4a\x53\x5c\x65\x6e\x77"
- "\x80\x89\x92\x9b\xa4\xad\xb6\xbf"
- "\xc8\xd1\xda\xe3\xec\xf5\xfe\x07"
- "\x10\x19\x22\x2b\x34\x3d\x46\x4f"
- "\x58\x61\x6a\x73\x7c\x85\x8e\x97"
- "\xa0\xa9\xb2\xbb\xc4\xcd\xd6\xdf"
- "\xe8\xf1\xfa\x03\x0c\x15\x1e\x27"
- "\x30\x39\x42\x4b\x54\x5d\x66\x6f"
- "\x78\x81\x8a\x93\x9c\xa5\xae\xb7"
- "\xc0\xc9\xd2\xdb\xe4\xed\xf6\xff"
- "\x08\x11\x1a\x23\x2c\x35\x3e\x47"
- "\x50\x59\x62\x6b\x74\x7d\x86\x8f"
- "\x98\xa1\xaa\xb3\xbc\xc5\xce\xd7"
- "\xe0\xe9\xf2\xfb\x04\x0d\x16\x1f"
- "\x28\x31\x3a\x43\x4c\x55\x5e\x67"
- "\x70\x79\x82\x8b\x94\x9d\xa6\xaf"
- "\xb8\xc1\xca\xd3\xdc\xe5\xee\xf7"
- "\x00\x0b\x16\x21\x2c\x37\x42\x4d"
- "\x58\x63\x6e\x79\x84\x8f\x9a\xa5"
- "\xb0\xbb\xc6\xd1\xdc\xe7\xf2\xfd"
- "\x08\x13\x1e\x29\x34\x3f\x4a\x55"
- "\x60\x6b\x76\x81\x8c\x97\xa2\xad"
- "\xb8\xc3\xce\xd9\xe4\xef\xfa\x05"
- "\x10\x1b\x26\x31\x3c\x47\x52\x5d"
- "\x68\x73\x7e\x89\x94\x9f\xaa\xb5"
- "\xc0\xcb\xd6\xe1\xec\xf7\x02\x0d"
- "\x18\x23\x2e\x39\x44\x4f\x5a\x65"
- "\x70\x7b\x86\x91\x9c\xa7\xb2\xbd"
- "\xc8\xd3\xde\xe9\xf4\xff\x0a\x15"
- "\x20\x2b\x36\x41\x4c\x57\x62\x6d"
- "\x78\x83\x8e\x99\xa4\xaf\xba\xc5"
- "\xd0\xdb\xe6\xf1\xfc\x07\x12\x1d"
- "\x28\x33\x3e\x49\x54\x5f\x6a\x75"
- "\x80\x8b\x96\xa1\xac\xb7\xc2\xcd"
- "\xd8\xe3\xee\xf9\x04\x0f\x1a\x25"
- "\x30\x3b\x46\x51\x5c\x67\x72\x7d"
- "\x88\x93\x9e\xa9\xb4\xbf\xca\xd5"
- "\xe0\xeb\xf6\x01\x0c\x17\x22\x2d"
- "\x38\x43\x4e\x59\x64\x6f\x7a\x85"
- "\x90\x9b\xa6\xb1\xbc\xc7\xd2\xdd"
- "\xe8\xf3\xfe\x09\x14\x1f\x2a\x35"
- "\x40\x4b\x56\x61\x6c\x77\x82\x8d"
- "\x98\xa3\xae\xb9\xc4\xcf\xda\xe5"
- "\xf0\xfb\x06\x11\x1c\x27\x32\x3d"
- "\x48\x53\x5e\x69\x74\x7f\x8a\x95"
- "\xa0\xab\xb6\xc1\xcc\xd7\xe2\xed"
- "\xf8\x03\x0e\x19\x24\x2f\x3a\x45"
- "\x50\x5b\x66\x71\x7c\x87\x92\x9d"
- "\xa8\xb3\xbe\xc9\xd4\xdf\xea\xf5"
- "\x00\x0d\x1a\x27\x34\x41\x4e\x5b"
- "\x68\x75\x82\x8f\x9c\xa9\xb6\xc3"
- "\xd0\xdd\xea\xf7\x04\x11\x1e\x2b"
- "\x38\x45\x52\x5f\x6c\x79\x86\x93"
- "\xa0\xad\xba\xc7\xd4\xe1\xee\xfb"
- "\x08\x15\x22\x2f\x3c\x49\x56\x63"
- "\x70\x7d\x8a\x97\xa4\xb1\xbe\xcb"
- "\xd8\xe5\xf2\xff\x0c\x19\x26\x33"
- "\x40\x4d\x5a\x67\x74\x81\x8e\x9b"
- "\xa8\xb5\xc2\xcf\xdc\xe9\xf6\x03"
- "\x10\x1d\x2a\x37\x44\x51\x5e\x6b"
- "\x78\x85\x92\x9f\xac\xb9\xc6\xd3"
- "\xe0\xed\xfa\x07\x14\x21\x2e\x3b"
- "\x48\x55\x62\x6f\x7c\x89\x96\xa3"
- "\xb0\xbd\xca\xd7\xe4\xf1\xfe\x0b"
- "\x18\x25\x32\x3f\x4c\x59\x66\x73"
- "\x80\x8d\x9a\xa7\xb4\xc1\xce\xdb"
- "\xe8\xf5\x02\x0f\x1c\x29\x36\x43"
- "\x50\x5d\x6a\x77\x84\x91\x9e\xab"
- "\xb8\xc5\xd2\xdf\xec\xf9\x06\x13"
- "\x20\x2d\x3a\x47\x54\x61\x6e\x7b"
- "\x88\x95\xa2\xaf\xbc\xc9\xd6\xe3"
- "\xf0\xfd\x0a\x17\x24\x31\x3e\x4b"
- "\x58\x65\x72\x7f\x8c\x99\xa6\xb3"
- "\xc0\xcd\xda\xe7\xf4\x01\x0e\x1b"
- "\x28\x35\x42\x4f\x5c\x69\x76\x83"
- "\x90\x9d\xaa\xb7\xc4\xd1\xde\xeb"
- "\xf8\x05\x12\x1f\x2c\x39\x46\x53"
- "\x60\x6d\x7a\x87\x94\xa1\xae\xbb"
- "\xc8\xd5\xe2\xef\xfc\x09\x16\x23"
- "\x30\x3d\x4a\x57\x64\x71\x7e\x8b"
- "\x98\xa5\xb2\xbf\xcc\xd9\xe6\xf3"
- "\x00\x0f\x1e\x2d\x3c\x4b\x5a\x69"
- "\x78\x87\x96\xa5\xb4\xc3\xd2\xe1"
- "\xf0\xff\x0e\x1d\x2c\x3b\x4a\x59"
- "\x68\x77\x86\x95\xa4\xb3\xc2\xd1"
- "\xe0\xef\xfe\x0d\x1c\x2b\x3a\x49"
- "\x58\x67\x76\x85\x94\xa3\xb2\xc1"
- "\xd0\xdf\xee\xfd\x0c\x1b\x2a\x39"
- "\x48\x57\x66\x75\x84\x93\xa2\xb1"
- "\xc0\xcf\xde\xed\xfc\x0b\x1a\x29"
- "\x38\x47\x56\x65\x74\x83\x92\xa1"
- "\xb0\xbf\xce\xdd\xec\xfb\x0a\x19"
- "\x28\x37\x46\x55\x64\x73\x82\x91"
- "\xa0\xaf\xbe\xcd\xdc\xeb\xfa\x09"
- "\x18\x27\x36\x45\x54\x63\x72\x81"
- "\x90\x9f\xae\xbd\xcc\xdb\xea\xf9"
- "\x08\x17\x26\x35\x44\x53\x62\x71"
- "\x80\x8f\x9e\xad\xbc\xcb\xda\xe9"
- "\xf8\x07\x16\x25\x34\x43\x52\x61"
- "\x70\x7f\x8e\x9d\xac\xbb\xca\xd9"
- "\xe8\xf7\x06\x15\x24\x33\x42\x51"
- "\x60\x6f\x7e\x8d\x9c\xab\xba\xc9"
- "\xd8\xe7\xf6\x05\x14\x23\x32\x41"
- "\x50\x5f\x6e\x7d\x8c\x9b\xaa\xb9"
- "\xc8\xd7\xe6\xf5\x04\x13\x22\x31"
- "\x40\x4f\x5e\x6d\x7c\x8b\x9a\xa9"
- "\xb8\xc7\xd6\xe5\xf4\x03\x12\x21"
- "\x30\x3f\x4e\x5d\x6c\x7b\x8a\x99"
- "\xa8\xb7\xc6\xd5\xe4\xf3\x02\x11"
- "\x20\x2f\x3e\x4d\x5c\x6b\x7a\x89"
- "\x98\xa7\xb6\xc5\xd4\xe3\xf2\x01"
- "\x10\x1f\x2e\x3d\x4c\x5b\x6a\x79"
- "\x88\x97\xa6\xb5\xc4\xd3\xe2\xf1"
- "\x00\x11\x22\x33\x44\x55\x66\x77"
- "\x88\x99\xaa\xbb\xcc\xdd\xee\xff"
- "\x10\x21\x32\x43\x54\x65\x76\x87"
- "\x98\xa9\xba\xcb\xdc\xed\xfe\x0f"
- "\x20\x31\x42\x53\x64\x75\x86\x97"
- "\xa8\xb9\xca\xdb\xec\xfd\x0e\x1f"
- "\x30\x41\x52\x63\x74\x85\x96\xa7"
- "\xb8\xc9\xda\xeb\xfc\x0d\x1e\x2f"
- "\x40\x51\x62\x73\x84\x95\xa6\xb7"
- "\xc8\xd9\xea\xfb\x0c\x1d\x2e\x3f"
- "\x50\x61\x72\x83\x94\xa5\xb6\xc7"
- "\xd8\xe9\xfa\x0b\x1c\x2d\x3e\x4f"
- "\x60\x71\x82\x93\xa4\xb5\xc6\xd7"
- "\xe8\xf9\x0a\x1b\x2c\x3d\x4e\x5f"
- "\x70\x81\x92\xa3\xb4\xc5\xd6\xe7"
- "\xf8\x09\x1a\x2b\x3c\x4d\x5e\x6f"
- "\x80\x91\xa2\xb3\xc4\xd5\xe6\xf7"
- "\x08\x19\x2a\x3b\x4c\x5d\x6e\x7f"
- "\x90\xa1\xb2\xc3\xd4\xe5\xf6\x07"
- "\x18\x29\x3a\x4b\x5c\x6d\x7e\x8f"
- "\xa0\xb1\xc2\xd3\xe4\xf5\x06\x17"
- "\x28\x39\x4a\x5b\x6c\x7d\x8e\x9f"
- "\xb0\xc1\xd2\xe3\xf4\x05\x16\x27"
- "\x38\x49\x5a\x6b\x7c\x8d\x9e\xaf"
- "\xc0\xd1\xe2\xf3\x04\x15\x26\x37"
- "\x48\x59\x6a\x7b\x8c\x9d\xae\xbf"
- "\xd0\xe1\xf2\x03\x14\x25\x36\x47"
- "\x58\x69\x7a\x8b\x9c\xad\xbe\xcf"
- "\xe0\xf1\x02\x13\x24\x35\x46\x57"
- "\x68\x79\x8a\x9b\xac\xbd\xce\xdf"
- "\xf0\x01\x12\x23\x34\x45\x56\x67"
- "\x78\x89\x9a\xab\xbc\xcd\xde\xef"
- "\x00\x13\x26\x39\x4c\x5f\x72\x85"
- "\x98\xab\xbe\xd1\xe4\xf7\x0a\x1d"
- "\x30\x43\x56\x69\x7c\x8f\xa2\xb5"
- "\xc8\xdb\xee\x01\x14\x27\x3a\x4d"
- "\x60\x73\x86\x99\xac\xbf\xd2\xe5"
- "\xf8\x0b\x1e\x31\x44\x57\x6a\x7d"
- "\x90\xa3\xb6\xc9\xdc\xef\x02\x15"
- "\x28\x3b\x4e\x61\x74\x87\x9a\xad"
- "\xc0\xd3\xe6\xf9\x0c\x1f\x32\x45"
- "\x58\x6b\x7e\x91\xa4\xb7\xca\xdd"
- "\xf0\x03\x16\x29\x3c\x4f\x62\x75"
- "\x88\x9b\xae\xc1\xd4\xe7\xfa\x0d"
- "\x20\x33\x46\x59\x6c\x7f\x92\xa5"
- "\xb8\xcb\xde\xf1\x04\x17\x2a\x3d"
- "\x50\x63\x76\x89\x9c\xaf\xc2\xd5"
- "\xe8\xfb\x0e\x21\x34\x47\x5a\x6d"
- "\x80\x93\xa6\xb9\xcc\xdf\xf2\x05"
- "\x18\x2b\x3e\x51\x64\x77\x8a\x9d"
- "\xb0\xc3\xd6\xe9\xfc\x0f\x22\x35"
- "\x48\x5b\x6e\x81\x94\xa7\xba\xcd"
- "\xe0\xf3\x06\x19\x2c\x3f\x52\x65"
- "\x78\x8b\x9e\xb1\xc4\xd7\xea\xfd"
- "\x10\x23\x36\x49\x5c\x6f\x82\x95"
- "\xa8\xbb\xce\xe1\xf4\x07\x1a\x2d"
- "\x40\x53\x66\x79\x8c\x9f\xb2\xc5"
- "\xd8\xeb\xfe\x11\x24\x37\x4a\x5d"
- "\x70\x83\x96\xa9\xbc\xcf\xe2\xf5"
- "\x08\x1b\x2e\x41\x54\x67\x7a\x8d"
- "\xa0\xb3\xc6\xd9\xec\xff\x12\x25"
- "\x38\x4b\x5e\x71\x84\x97\xaa\xbd"
- "\xd0\xe3\xf6\x09\x1c\x2f\x42\x55"
- "\x68\x7b\x8e\xa1\xb4\xc7\xda\xed"
- "\x00\x15\x2a\x3f\x54\x69\x7e\x93"
- "\xa8\xbd\xd2\xe7\xfc\x11\x26\x3b"
- "\x50\x65\x7a\x8f\xa4\xb9\xce\xe3"
- "\xf8\x0d\x22\x37\x4c\x61\x76\x8b"
- "\xa0\xb5\xca\xdf\xf4\x09\x1e\x33"
- "\x48\x5d\x72\x87\x9c\xb1\xc6\xdb"
- "\xf0\x05\x1a\x2f\x44\x59\x6e\x83"
- "\x98\xad\xc2\xd7\xec\x01\x16\x2b"
- "\x40\x55\x6a\x7f\x94\xa9\xbe\xd3"
- "\xe8\xfd\x12\x27\x3c\x51\x66\x7b"
- "\x90\xa5\xba\xcf\xe4\xf9\x0e\x23"
- "\x38\x4d\x62\x77\x8c\xa1\xb6\xcb"
- "\xe0\xf5\x0a\x1f\x34\x49\x5e\x73"
- "\x88\x9d\xb2\xc7\xdc\xf1\x06\x1b"
- "\x30\x45\x5a\x6f\x84\x99\xae\xc3"
- "\xd8\xed\x02\x17\x2c\x41\x56\x6b"
- "\x80\x95\xaa\xbf\xd4\xe9\xfe\x13"
- "\x28\x3d\x52\x67\x7c\x91\xa6\xbb"
- "\xd0\xe5\xfa\x0f\x24\x39\x4e\x63"
- "\x78\x8d\xa2\xb7\xcc\xe1\xf6\x0b"
- "\x20\x35\x4a\x5f\x74\x89\x9e\xb3"
- "\xc8\xdd\xf2\x07\x1c\x31\x46\x5b"
- "\x70\x85\x9a\xaf\xc4\xd9\xee\x03"
- "\x18\x2d\x42\x57\x6c\x81\x96\xab"
- "\xc0\xd5\xea\xff\x14\x29\x3e\x53"
- "\x68\x7d\x92\xa7\xbc\xd1\xe6\xfb"
- "\x10\x25\x3a\x4f\x64\x79\x8e\xa3"
- "\xb8\xcd\xe2\xf7\x0c\x21\x36\x4b"
- "\x60\x75\x8a\x9f\xb4\xc9\xde\xf3"
- "\x08\x1d\x32\x47\x5c\x71\x86\x9b"
- "\xb0\xc5\xda\xef\x04\x19\x2e\x43"
- "\x58\x6d\x82\x97\xac\xc1\xd6\xeb"
- "\x00\x17\x2e\x45\x5c\x73\x8a\xa1"
- "\xb8\xcf\xe6\xfd\x14\x2b\x42\x59"
- "\x70\x87\x9e\xb5\xcc\xe3\xfa\x11"
- "\x28\x3f\x56\x6d\x84\x9b\xb2\xc9"
- "\xe0\xf7\x0e\x25\x3c\x53\x6a\x81"
- "\x98\xaf\xc6\xdd\xf4\x0b\x22\x39"
- "\x50\x67\x7e\x95\xac\xc3\xda\xf1"
- "\x08\x1f\x36\x4d\x64\x7b\x92\xa9"
- "\xc0\xd7\xee\x05\x1c\x33\x4a\x61"
- "\x78\x8f\xa6\xbd\xd4\xeb\x02\x19"
- "\x30\x47\x5e\x75\x8c\xa3\xba\xd1"
- "\xe8\xff\x16\x2d\x44\x5b\x72\x89"
- "\xa0\xb7\xce\xe5\xfc\x13\x2a\x41"
- "\x58\x6f\x86\x9d\xb4\xcb\xe2\xf9"
- "\x10\x27\x3e\x55\x6c\x83\x9a\xb1"
- "\xc8\xdf\xf6\x0d\x24\x3b\x52\x69"
- "\x80\x97\xae\xc5\xdc\xf3\x0a\x21"
- "\x38\x4f\x66\x7d\x94\xab\xc2\xd9"
- "\xf0\x07\x1e\x35\x4c\x63\x7a\x91"
- "\xa8\xbf\xd6\xed\x04\x1b\x32\x49"
- "\x60\x77\x8e\xa5\xbc\xd3\xea\x01"
- "\x18\x2f\x46\x5d\x74\x8b\xa2\xb9"
- "\xd0\xe7\xfe\x15\x2c\x43\x5a\x71"
- "\x88\x9f\xb6\xcd\xe4\xfb\x12\x29"
- "\x40\x57\x6e\x85\x9c\xb3\xca\xe1"
- "\xf8\x0f\x26\x3d\x54\x6b\x82\x99"
- "\xb0\xc7\xde\xf5\x0c\x23\x3a\x51"
- "\x68\x7f\x96\xad\xc4\xdb\xf2\x09"
- "\x20\x37\x4e\x65\x7c\x93\xaa\xc1"
- "\xd8\xef\x06\x1d\x34\x4b\x62\x79"
- "\x90\xa7\xbe\xd5\xec\x03\x1a\x31"
- "\x48\x5f\x76\x8d\xa4\xbb\xd2\xe9"
- "\x00\x19\x32\x4b\x64\x7d\x96\xaf"
- "\xc8\xe1\xfa\x13\x2c\x45\x5e\x77"
- "\x90\xa9\xc2\xdb\xf4\x0d\x26\x3f"
- "\x58\x71\x8a\xa3\xbc\xd5\xee\x07"
- "\x20\x39\x52\x6b\x84\x9d\xb6\xcf"
- "\xe8\x01\x1a\x33\x4c\x65\x7e\x97"
- "\xb0\xc9\xe2\xfb\x14\x2d\x46\x5f"
- "\x78\x91\xaa\xc3\xdc\xf5\x0e\x27"
- "\x40\x59\x72\x8b\xa4\xbd\xd6\xef"
- "\x08\x21\x3a\x53\x6c\x85\x9e\xb7"
- "\xd0\xe9\x02\x1b\x34\x4d\x66\x7f"
- "\x98\xb1\xca\xe3\xfc\x15\x2e\x47"
- "\x60\x79\x92\xab\xc4\xdd\xf6\x0f"
- "\x28\x41\x5a\x73\x8c\xa5\xbe\xd7"
- "\xf0\x09\x22\x3b\x54\x6d\x86\x9f"
- "\xb8\xd1\xea\x03\x1c\x35\x4e\x67"
- "\x80\x99\xb2\xcb\xe4\xfd\x16\x2f"
- "\x48\x61\x7a\x93\xac\xc5\xde\xf7"
- "\x10\x29\x42\x5b\x74\x8d\xa6\xbf"
- "\xd8\xf1\x0a\x23\x3c\x55\x6e\x87"
- "\xa0\xb9\xd2\xeb\x04\x1d\x36\x4f"
- "\x68\x81\x9a\xb3\xcc\xe5\xfe\x17"
- "\x30\x49\x62\x7b\x94\xad\xc6\xdf"
- "\xf8\x11\x2a\x43\x5c\x75\x8e\xa7"
- "\xc0\xd9\xf2\x0b\x24\x3d\x56\x6f"
- "\x88\xa1\xba\xd3\xec\x05\x1e\x37"
- "\x50\x69\x82\x9b\xb4\xcd\xe6\xff"
- "\x18\x31\x4a\x63\x7c\x95\xae\xc7"
- "\xe0\xf9\x12\x2b\x44\x5d\x76\x8f"
- "\xa8\xc1\xda\xf3\x0c\x25\x3e\x57"
- "\x70\x89\xa2\xbb\xd4\xed\x06\x1f"
- "\x38\x51\x6a\x83\x9c\xb5\xce\xe7"
- "\x00\x1b\x36\x51\x6c\x87\xa2\xbd"
- "\xd8\xf3\x0e\x29\x44\x5f\x7a\x95"
- "\xb0\xcb\xe6\x01\x1c\x37\x52\x6d"
- "\x88\xa3\xbe\xd9\xf4\x0f\x2a\x45"
- "\x60\x7b\x96\xb1\xcc\xe7\x02\x1d"
- "\x38\x53\x6e\x89\xa4\xbf\xda\xf5"
- "\x10\x2b\x46\x61\x7c\x97\xb2\xcd"
- "\xe8\x03\x1e\x39\x54\x6f\x8a\xa5"
- "\xc0\xdb\xf6\x11\x2c\x47\x62\x7d"
- "\x98\xb3\xce\xe9\x04\x1f\x3a\x55"
- "\x70\x8b\xa6\xc1\xdc\xf7\x12\x2d"
- "\x48\x63\x7e\x99\xb4\xcf\xea\x05"
- "\x20\x3b\x56\x71\x8c\xa7\xc2\xdd"
- "\xf8\x13\x2e\x49\x64\x7f\x9a\xb5"
- "\xd0\xeb\x06\x21\x3c\x57\x72\x8d"
- "\xa8\xc3\xde\xf9\x14\x2f\x4a\x65"
- "\x80\x9b\xb6\xd1\xec\x07\x22\x3d"
- "\x58\x73\x8e\xa9\xc4\xdf\xfa\x15"
- "\x30\x4b\x66\x81\x9c\xb7\xd2\xed"
- "\x08\x23\x3e\x59\x74\x8f\xaa\xc5"
- "\xe0\xfb\x16\x31\x4c\x67\x82\x9d"
- "\xb8\xd3\xee\x09\x24\x3f\x5a\x75"
- "\x90\xab\xc6\xe1\xfc\x17\x32\x4d"
- "\x68\x83\x9e\xb9\xd4\xef\x0a\x25"
- "\x40\x5b\x76\x91\xac\xc7\xe2\xfd"
- "\x18\x33\x4e\x69\x84\x9f\xba\xd5"
- "\xf0\x0b\x26\x41\x5c\x77\x92\xad"
- "\xc8\xe3\xfe\x19\x34\x4f\x6a\x85"
- "\xa0\xbb\xd6\xf1\x0c\x27\x42\x5d"
- "\x78\x93\xae\xc9\xe4\xff\x1a\x35"
- "\x50\x6b\x86\xa1\xbc\xd7\xf2\x0d"
- "\x28\x43\x5e\x79\x94\xaf\xca\xe5"
- "\x00\x1d\x3a\x57\x74\x91\xae\xcb"
- "\xe8\x05\x22\x3f\x5c\x79\x96\xb3"
- "\xd0\xed\x0a\x27\x44\x61\x7e\x9b"
- "\xb8\xd5\xf2\x0f\x2c\x49\x66\x83"
- "\xa0\xbd\xda\xf7\x14\x31\x4e\x6b"
- "\x88\xa5\xc2\xdf\xfc\x19\x36\x53"
- "\x70\x8d\xaa\xc7\xe4\x01\x1e\x3b"
- "\x58\x75\x92\xaf\xcc\xe9\x06\x23"
- "\x40\x5d\x7a\x97\xb4\xd1\xee\x0b"
- "\x28\x45\x62\x7f\x9c\xb9\xd6\xf3"
- "\x10\x2d\x4a\x67\x84\xa1\xbe\xdb"
- "\xf8\x15\x32\x4f\x6c\x89\xa6\xc3"
- "\xe0\xfd\x1a\x37\x54\x71\x8e\xab"
- "\xc8\xe5\x02\x1f\x3c\x59\x76\x93"
- "\xb0\xcd\xea\x07\x24\x41\x5e\x7b"
- "\x98\xb5\xd2\xef\x0c\x29\x46\x63"
- "\x80\x9d\xba\xd7\xf4\x11\x2e\x4b"
- "\x68\x85\xa2\xbf\xdc\xf9\x16\x33"
- "\x50\x6d\x8a\xa7\xc4\xe1\xfe\x1b"
- "\x38\x55\x72\x8f\xac\xc9\xe6\x03"
- "\x20\x3d\x5a\x77\x94\xb1\xce\xeb"
- "\x08\x25\x42\x5f\x7c\x99\xb6\xd3"
- "\xf0\x0d\x2a\x47\x64\x81\x9e\xbb"
- "\xd8\xf5\x12\x2f\x4c\x69\x86\xa3"
- "\xc0\xdd\xfa\x17\x34\x51\x6e\x8b"
- "\xa8\xc5\xe2\xff\x1c\x39\x56\x73"
- "\x90\xad\xca\xe7\x04\x21\x3e\x5b"
- "\x78\x95\xb2\xcf\xec\x09\x26\x43"
- "\x60\x7d\x9a\xb7\xd4\xf1\x0e\x2b"
- "\x48\x65\x82\x9f\xbc\xd9\xf6\x13"
- "\x30\x4d\x6a\x87\xa4\xc1\xde\xfb"
- "\x18\x35\x52\x6f\x8c\xa9\xc6\xe3"
- "\x00\x1f\x3e\x5d\x7c\x9b\xba\xd9"
- "\xf8\x17\x36\x55\x74\x93\xb2\xd1"
- "\xf0\x0f\x2e\x4d\x6c\x8b\xaa\xc9"
- "\xe8\x07\x26\x45\x64\x83\xa2\xc1"
- "\xe0\xff\x1e\x3d\x5c\x7b\x9a\xb9"
- "\xd8\xf7\x16\x35\x54\x73\x92\xb1"
- "\xd0\xef\x0e\x2d\x4c\x6b\x8a\xa9"
- "\xc8\xe7\x06\x25\x44\x63\x82\xa1"
- "\xc0\xdf\xfe\x1d\x3c\x5b\x7a\x99"
- "\xb8\xd7\xf6\x15\x34\x53\x72\x91"
- "\xb0\xcf\xee\x0d\x2c\x4b\x6a\x89"
- "\xa8\xc7\xe6\x05\x24\x43\x62\x81"
- "\xa0\xbf\xde\xfd\x1c\x3b\x5a\x79"
- "\x98\xb7\xd6\xf5\x14\x33\x52\x71"
- "\x90\xaf\xce\xed\x0c\x2b\x4a\x69"
- "\x88\xa7\xc6\xe5\x04\x23\x42\x61"
- "\x80\x9f\xbe\xdd\xfc\x1b\x3a\x59"
- "\x78\x97\xb6\xd5\xf4\x13\x32\x51"
- "\x70\x8f\xae\xcd\xec\x0b\x2a\x49"
- "\x68\x87\xa6\xc5\xe4\x03\x22\x41"
- "\x60\x7f\x9e\xbd\xdc\xfb\x1a\x39"
- "\x58\x77\x96\xb5\xd4\xf3\x12\x31"
- "\x50\x6f\x8e\xad\xcc\xeb\x0a\x29"
- "\x48\x67\x86\xa5\xc4\xe3\x02\x21"
- "\x40\x5f\x7e\x9d\xbc\xdb\xfa\x19"
- "\x38\x57\x76\x95\xb4\xd3\xf2\x11"
- "\x30\x4f\x6e\x8d\xac\xcb\xea\x09"
- "\x28\x47\x66\x85\xa4\xc3\xe2\x01"
- "\x20\x3f\x5e\x7d\x9c\xbb\xda\xf9"
- "\x18\x37\x56\x75\x94\xb3\xd2\xf1"
- "\x10\x2f\x4e\x6d\x8c\xab\xca\xe9"
- "\x08\x27\x46\x65\x84\xa3\xc2\xe1"
- "\x00\x21\x42\x63",
- .ctext =
- "\xb5\x81\xf5\x64\x18\x73\xe3\xf0"
- "\x4c\x13\xf2\x77\x18\x60\x65\x5e"
- "\x29\x01\xce\x98\x55\x53\xf9\x0c"
- "\x2a\x08\xd5\x09\xb3\x57\x55\x56"
- "\xc5\xe9\x56\x90\xcb\x6a\xa3\xc0"
- "\xff\xc4\x79\xb4\xd2\x97\x5d\xc4"
- "\x43\xd1\xfe\x94\x7b\x88\x06\x5a"
- "\xb2\x9e\x2c\xfc\x44\x03\xb7\x90"
- "\xa0\xc1\xba\x6a\x33\xb8\xc7\xb2"
- "\x9d\xe1\x12\x4f\xc0\x64\xd4\x01"
- "\xfe\x8c\x7a\x66\xf7\xe6\x5a\x91"
- "\xbb\xde\x56\x86\xab\x65\x21\x30"
- "\x00\x84\x65\x24\xa5\x7d\x85\xb4"
- "\xe3\x17\xed\x3a\xb7\x6f\xb4\x0b"
- "\x0b\xaf\x15\xae\x5a\x8f\xf2\x0c"
- "\x2f\x27\xf4\x09\xd8\xd2\x96\xb7"
- "\x71\xf2\xc5\x99\x4d\x7e\x7f\x75"
- "\x77\x89\x30\x8b\x59\xdb\xa2\xb2"
- "\xa0\xf3\x19\x39\x2b\xc5\x7e\x3f"
- "\x4f\xd9\xd3\x56\x28\x97\x44\xdc"
- "\xc0\x8b\x77\x24\xd9\x52\xe7\xc5"
- "\xaf\xf6\x7d\x59\xb2\x44\x05\x1d"
- "\xb1\xb0\x11\xa5\x0f\xec\x33\xe1"
- "\x6d\x1b\x4e\x1f\xff\x57\x91\xb4"
- "\x5b\x9a\x96\xc5\x53\xbc\xae\x20"
- "\x3c\xbb\x14\xe2\xe8\x22\x33\xc1"
- "\x5e\x76\x9e\x46\x99\xf6\x2a\x15"
- "\xc6\x97\x02\xa0\x66\x43\xd1\xa6"
- "\x31\xa6\x9f\xfb\xf4\xd3\x69\xe5"
- "\xcd\x76\x95\xb8\x7a\x82\x7f\x21"
- "\x45\xff\x3f\xce\x55\xf6\x95\x10"
- "\x08\x77\x10\x43\xc6\xf3\x09\xe5"
- "\x68\xe7\x3c\xad\x00\x52\x45\x0d"
- "\xfe\x2d\xc6\xc2\x94\x8c\x12\x1d"
- "\xe6\x25\xae\x98\x12\x8e\x19\x9c"
- "\x81\x68\xb1\x11\xf6\x69\xda\xe3"
- "\x62\x08\x18\x7a\x25\x49\x28\xac"
- "\xba\x71\x12\x0b\xe4\xa2\xe5\xc7"
- "\x5d\x8e\xec\x49\x40\x21\xbf\x5a"
- "\x98\xf3\x02\x68\x55\x03\x7f\x8a"
- "\xe5\x94\x0c\x32\x5c\x07\x82\x63"
- "\xaf\x6f\x91\x40\x84\x8e\x52\x25"
- "\xd0\xb0\x29\x53\x05\xe2\x50\x7a"
- "\x34\xeb\xc9\x46\x20\xa8\x3d\xde"
- "\x7f\x16\x5f\x36\xc5\x2e\xdc\xd1"
- "\x15\x47\xc7\x50\x40\x6d\x91\xc5"
- "\xe7\x93\x95\x1a\xd3\x57\xbc\x52"
- "\x33\xee\x14\x19\x22\x52\x89\xa7"
- "\x4a\x25\x56\x77\x4b\xca\xcf\x0a"
- "\xe1\xf5\x35\x85\x30\x7e\x59\x4a"
- "\xbd\x14\x5b\xdf\xe3\x46\xcb\xac"
- "\x1f\x6c\x96\x0e\xf4\x81\xd1\x99"
- "\xca\x88\x63\x3d\x02\x58\x6b\xa9"
- "\xe5\x9f\xb3\x00\xb2\x54\xc6\x74"
- "\x1c\xbf\x46\xab\x97\xcc\xf8\x54"
- "\x04\x07\x08\x52\xe6\xc0\xda\x93"
- "\x74\x7d\x93\x99\x5d\x78\x68\xa6"
- "\x2e\x6b\xd3\x6a\x69\xcc\x12\x6b"
- "\xd4\xc7\xa5\xc6\xe7\xf6\x03\x04"
- "\x5d\xcd\x61\x5e\x17\x40\xdc\xd1"
- "\x5c\xf5\x08\xdf\x5c\x90\x85\xa4"
- "\xaf\xf6\x78\xbb\x0d\xf1\xf4\xa4"
- "\x54\x26\x72\x9e\x61\xfa\x86\xcf"
- "\xe8\x9e\xa1\xe0\xc7\x48\x23\xae"
- "\x5a\x90\xae\x75\x0a\x74\x18\x89"
- "\x05\xb1\x92\xb2\x7f\xd0\x1b\xa6"
- "\x62\x07\x25\x01\xc7\xc2\x4f\xf9"
- "\xe8\xfe\x63\x95\x80\x07\xb4\x26"
- "\xcc\xd1\x26\xb6\xc4\x3f\x9e\xcb"
- "\x8e\x3b\x2e\x44\x16\xd3\x10\x9a"
- "\x95\x08\xeb\xc8\xcb\xeb\xbf\x6f"
- "\x0b\xcd\x1f\xc8\xca\x86\xaa\xec"
- "\x33\xe6\x69\xf4\x45\x25\x86\x3a"
- "\x22\x94\x4f\x00\x23\x6a\x44\xc2"
- "\x49\x97\x33\xab\x36\x14\x0a\x70"
- "\x24\xc3\xbe\x04\x3b\x79\xa0\xf9"
- "\xb8\xe7\x76\x29\x22\x83\xd7\xf2"
- "\x94\xf4\x41\x49\xba\x5f\x7b\x07"
- "\xb5\xfb\xdb\x03\x1a\x9f\xb6\x4c"
- "\xc2\x2e\x37\x40\x49\xc3\x38\x16"
- "\xe2\x4f\x77\x82\xb0\x68\x4c\x71"
- "\x1d\x57\x61\x9c\xd9\x4e\x54\x99"
- "\x47\x13\x28\x73\x3c\xbb\x00\x90"
- "\xf3\x4d\xc9\x0e\xfd\xe7\xb1\x71"
- "\xd3\x15\x79\xbf\xcc\x26\x2f\xbd"
- "\xad\x6c\x50\x69\x6c\x3e\x6d\x80"
- "\x9a\xea\x78\xaf\x19\xb2\x0d\x4d"
- "\xad\x04\x07\xae\x22\x90\x4a\x93"
- "\x32\x0e\x36\x9b\x1b\x46\xba\x3b"
- "\xb4\xac\xc6\xd1\xa2\x31\x53\x3b"
- "\x2a\x3d\x45\xfe\x03\x61\x10\x85"
- "\x17\x69\xa6\x78\xcc\x6c\x87\x49"
- "\x53\xf9\x80\x10\xde\x80\xa2\x41"
- "\x6a\xc3\x32\x02\xad\x6d\x3c\x56"
- "\x00\x71\x51\x06\xa7\xbd\xfb\xef"
- "\x3c\xb5\x9f\xfc\x48\x7d\x53\x7c"
- "\x66\xb0\x49\x23\xc4\x47\x10\x0e"
- "\xe5\x6c\x74\x13\xe6\xc5\x3f\xaa"
- "\xde\xff\x07\x44\xdd\x56\x1b\xad"
- "\x09\x77\xfb\x5b\x12\xb8\x0d\x38"
- "\x17\x37\x35\x7b\x9b\xbc\xfe\xd4"
- "\x7e\x8b\xda\x7e\x5b\x04\xa7\x22"
- "\xa7\x31\xa1\x20\x86\xc7\x1b\x99"
- "\xdb\xd1\x89\xf4\x94\xa3\x53\x69"
- "\x8d\xe7\xe8\x74\x11\x8d\x74\xd6"
- "\x07\x37\x91\x9f\xfd\x67\x50\x3a"
- "\xc9\xe1\xf4\x36\xd5\xa0\x47\xd1"
- "\xf9\xe5\x39\xa3\x31\xac\x07\x36"
- "\x23\xf8\x66\x18\x14\x28\x34\x0f"
- "\xb8\xd0\xe7\x29\xb3\x04\x4b\x55"
- "\x01\x41\xb2\x75\x8d\xcb\x96\x85"
- "\x3a\xfb\xab\x2b\x9e\xfa\x58\x20"
- "\x44\x1f\xc0\x14\x22\x75\x61\xe8"
- "\xaa\x19\xcf\xf1\x82\x56\xf4\xd7"
- "\x78\x7b\x3d\x5f\xb3\x9e\x0b\x8a"
- "\x57\x50\xdb\x17\x41\x65\x4d\xa3"
- "\x02\xc9\x9c\x9c\x53\xfb\x39\x39"
- "\x9b\x1d\x72\x24\xda\xb7\x39\xbe"
- "\x13\x3b\xfa\x29\xda\x9e\x54\x64"
- "\x6e\xba\xd8\xa1\xcb\xb3\x36\xfa"
- "\xcb\x47\x85\xe9\x61\x38\xbc\xbe"
- "\xc5\x00\x38\x2a\x54\xf7\xc4\xb9"
- "\xb3\xd3\x7b\xa0\xa0\xf8\x72\x7f"
- "\x8c\x8e\x82\x0e\xc6\x1c\x75\x9d"
- "\xca\x8e\x61\x87\xde\xad\x80\xd2"
- "\xf5\xf9\x80\xef\x15\x75\xaf\xf5"
- "\x80\xfb\xff\x6d\x1e\x25\xb7\x40"
- "\x61\x6a\x39\x5a\x6a\xb5\x31\xab"
- "\x97\x8a\x19\x89\x44\x40\xc0\xa6"
- "\xb4\x4e\x30\x32\x7b\x13\xe7\x67"
- "\xa9\x8b\x57\x04\xc2\x01\xa6\xf4"
- "\x28\x99\xad\x2c\x76\xa3\x78\xc2"
- "\x4a\xe6\xca\x5c\x50\x6a\xc1\xb0"
- "\x62\x4b\x10\x8e\x7c\x17\x43\xb3"
- "\x17\x66\x1c\x3e\x8d\x69\xf0\x5a"
- "\x71\xf5\x97\xdc\xd1\x45\xdd\x28"
- "\xf3\x5d\xdf\x53\x7b\x11\xe5\xbc"
- "\x4c\xdb\x1b\x51\x6b\xe9\xfb\x3d"
- "\xc1\xc3\x2c\xb9\x71\xf5\xb6\xb2"
- "\x13\x36\x79\x80\x53\xe8\xd3\xa6"
- "\x0a\xaf\xfd\x56\x97\xf7\x40\x8e"
- "\x45\xce\xf8\xb0\x9e\x5c\x33\x82"
- "\xb0\x44\x56\xfc\x05\x09\xe9\x2a"
- "\xac\x26\x80\x14\x1d\xc8\x3a\x35"
- "\x4c\x82\x97\xfd\x76\xb7\xa9\x0a"
- "\x35\x58\x79\x8e\x0f\x66\xea\xaf"
- "\x51\x6c\x09\xa9\x6e\x9b\xcb\x9a"
- "\x31\x47\xa0\x2f\x7c\x71\xb4\x4a"
- "\x11\xaa\x8c\x66\xc5\x64\xe6\x3a"
- "\x54\xda\x24\x6a\xc4\x41\x65\x46"
- "\x82\xa0\x0a\x0f\x5f\xfb\x25\xd0"
- "\x2c\x91\xa7\xee\xc4\x81\x07\x86"
- "\x75\x5e\x33\x69\x97\xe4\x2c\xa8"
- "\x9d\x9f\x0b\x6a\xbe\xad\x98\xda"
- "\x6d\x94\x41\xda\x2c\x1e\x89\xc4"
- "\xc2\xaf\x1e\x00\x05\x0b\x83\x60"
- "\xbd\x43\xea\x15\x23\x7f\xb9\xac"
- "\xee\x4f\x2c\xaf\x2a\xf3\xdf\xd0"
- "\xf3\x19\x31\xbb\x4a\x74\x84\x17"
- "\x52\x32\x2c\x7d\x61\xe4\xcb\xeb"
- "\x80\x38\x15\x52\xcb\x6f\xea\xe5"
- "\x73\x9c\xd9\x24\x69\xc6\x95\x32"
- "\x21\xc8\x11\xe4\xdc\x36\xd7\x93"
- "\x38\x66\xfb\xb2\x7f\x3a\xb9\xaf"
- "\x31\xdd\x93\x75\x78\x8a\x2c\x94"
- "\x87\x1a\x58\xec\x9e\x7d\x4d\xba"
- "\xe1\xe5\x4d\xfc\xbc\xa4\x2a\x14"
- "\xef\xcc\xa7\xec\xab\x43\x09\x18"
- "\xd3\xab\x68\xd1\x07\x99\x44\x47"
- "\xd6\x83\x85\x3b\x30\xea\xa9\x6b"
- "\x63\xea\xc4\x07\xfb\x43\x2f\xa4"
- "\xaa\xb0\xab\x03\x89\xce\x3f\x8c"
- "\x02\x7c\x86\x54\xbc\x88\xaf\x75"
- "\xd2\xdc\x63\x17\xd3\x26\xf6\x96"
- "\xa9\x3c\xf1\x61\x8c\x11\x18\xcc"
- "\xd6\xea\x5b\xe2\xcd\xf0\xf1\xb2"
- "\xe5\x35\x90\x1f\x85\x4c\x76\x5b"
- "\x66\xce\x44\xa4\x32\x9f\xe6\x7b"
- "\x71\x6e\x9f\x58\x15\x67\x72\x87"
- "\x64\x8e\x3a\x44\x45\xd4\x76\xfa"
- "\xc2\xf6\xef\x85\x05\x18\x7a\x9b"
- "\xba\x41\x54\xac\xf0\xfc\x59\x12"
- "\x3f\xdf\xa0\xe5\x8a\x65\xfd\x3a"
- "\x62\x8d\x83\x2c\x03\xbe\x05\x76"
- "\x2e\x53\x49\x97\x94\x33\xae\x40"
- "\x81\x15\xdb\x6e\xad\xaa\xf5\x4b"
- "\xe3\x98\x70\xdf\xe0\x7c\xcd\xdb"
- "\x02\xd4\x7d\x2f\xc1\xe6\xb4\xf3"
- "\xd7\x0d\x7a\xd9\x23\x9e\x87\x2d"
- "\xce\x87\xad\xcc\x72\x05\x00\x29"
- "\xdc\x73\x7f\x64\xc1\x15\x0e\xc2"
- "\xdf\xa7\x5f\xeb\x41\xa1\xcd\xef"
- "\x5c\x50\x79\x2a\x56\x56\x71\x8c"
- "\xac\xc0\x79\x50\x69\xca\x59\x32"
- "\x65\xf2\x54\xe4\x52\x38\x76\xd1"
- "\x5e\xde\x26\x9e\xfb\x75\x2e\x11"
- "\xb5\x10\xf4\x17\x73\xf5\x89\xc7"
- "\x4f\x43\x5c\x8e\x7c\xb9\x05\x52"
- "\x24\x40\x99\xfe\x9b\x85\x0b\x6c"
- "\x22\x3e\x8b\xae\x86\xa1\xd2\x79"
- "\x05\x68\x6b\xab\xe3\x41\x49\xed"
- "\x15\xa1\x8d\x40\x2d\x61\xdf\x1a"
- "\x59\xc9\x26\x8b\xef\x30\x4c\x88"
- "\x4b\x10\xf8\x8d\xa6\x92\x9f\x4b"
- "\xf3\xc4\x53\x0b\x89\x5d\x28\x92"
- "\xcf\x78\xb2\xc0\x5d\xed\x7e\xfc"
- "\xc0\x12\x23\x5f\x5a\x78\x86\x43"
- "\x6e\x27\xf7\x5a\xa7\x6a\xed\x19"
- "\x04\xf0\xb3\x12\xd1\xbd\x0e\x89"
- "\x6e\xbc\x96\xa8\xd8\x49\x39\x9f"
- "\x7e\x67\xf0\x2e\x3e\x01\xa9\xba"
- "\xec\x8b\x62\x8e\xcb\x4a\x70\x43"
- "\xc7\xc2\xc4\xca\x82\x03\x73\xe9"
- "\x11\xdf\xcf\x54\xea\xc9\xb0\x95"
- "\x51\xc0\x13\x3d\x92\x05\xfa\xf4"
- "\xa9\x34\xc8\xce\x6c\x3d\x54\xcc"
- "\xc4\xaf\xf1\xdc\x11\x44\x26\xa2"
- "\xaf\xf1\x85\x75\x7d\x03\x61\x68"
- "\x4e\x78\xc6\x92\x7d\x86\x7d\x77"
- "\xdc\x71\x72\xdb\xc6\xae\xa1\xcb"
- "\x70\x9a\x0b\x19\xbe\x4a\x6c\x2a"
- "\xe2\xba\x6c\x64\x9a\x13\x28\xdf"
- "\x85\x75\xe6\x43\xf6\x87\x08\x68"
- "\x6e\xba\x6e\x79\x9f\x04\xbc\x23"
- "\x50\xf6\x33\x5c\x1f\x24\x25\xbe"
- "\x33\x47\x80\x45\x56\xa3\xa7\xd7"
- "\x7a\xb1\x34\x0b\x90\x3c\x9c\xad"
- "\x44\x5f\x9e\x0e\x9d\xd4\xbd\x93"
- "\x5e\xfa\x3c\xe0\xb0\xd9\xed\xf3"
- "\xd6\x2e\xff\x24\xd8\x71\x6c\xed"
- "\xaf\x55\xeb\x22\xac\x93\x68\x32"
- "\x05\x5b\x47\xdd\xc6\x4a\xcb\xc7"
- "\x10\xe1\x3c\x92\x1a\xf3\x23\x78"
- "\x2b\xa1\xd2\x80\xf4\x12\xb1\x20"
- "\x8f\xff\x26\x35\xdd\xfb\xc7\x4e"
- "\x78\xf1\x2d\x50\x12\x77\xa8\x60"
- "\x7c\x0f\xf5\x16\x2f\x63\x70\x2a"
- "\xc0\x96\x80\x4e\x0a\xb4\x93\x35"
- "\x5d\x1d\x3f\x56\xf7\x2f\xbb\x90"
- "\x11\x16\x8f\xa2\xec\x47\xbe\xac"
- "\x56\x01\x26\x56\xb1\x8c\xb2\x10"
- "\xf9\x1a\xca\xf5\xd1\xb7\x39\x20"
- "\x63\xf1\x69\x20\x4f\x13\x12\x1f"
- "\x5b\x65\xfc\x98\xf7\xc4\x7a\xbe"
- "\xf7\x26\x4d\x2b\x84\x7b\x42\xad"
- "\xd8\x7a\x0a\xb4\xd8\x74\xbf\xc1"
- "\xf0\x6e\xb4\x29\xa3\xbb\xca\x46"
- "\x67\x70\x6a\x2d\xce\x0e\xa2\x8a"
- "\xa9\x87\xbf\x05\xc4\xc1\x04\xa3"
- "\xab\xd4\x45\x43\x8c\xb6\x02\xb0"
- "\x41\xc8\xfc\x44\x3d\x59\xaa\x2e"
- "\x44\x21\x2a\x8d\x88\x9d\x57\xf4"
- "\xa0\x02\x77\xb8\xa6\xa0\xe6\x75"
- "\x5c\x82\x65\x3e\x03\x5c\x29\x8f"
- "\x38\x55\xab\x33\x26\xef\x9f\x43"
- "\x52\xfd\x68\xaf\x36\xb4\xbb\x9a"
- "\x58\x09\x09\x1b\xc3\x65\x46\x46"
- "\x1d\xa7\x94\x18\x23\x50\x2c\xca"
- "\x2c\x55\x19\x97\x01\x9d\x93\x3b"
- "\x63\x86\xf2\x03\x67\x45\xd2\x72"
- "\x28\x52\x6c\xf4\xe3\x1c\xb5\x11"
- "\x13\xf1\xeb\x21\xc7\xd9\x56\x82"
- "\x2b\x82\x39\xbd\x69\x54\xed\x62"
- "\xc3\xe2\xde\x73\xd4\x6a\x12\xae"
- "\x13\x21\x7f\x4b\x5b\xfc\xbf\xe8"
- "\x2b\xbe\x56\xba\x68\x8b\x9a\xb1"
- "\x6e\xfa\xbf\x7e\x5a\x4b\xf1\xac"
- "\x98\x65\x85\xd1\x93\x53\xd3\x7b"
- "\x09\xdd\x4b\x10\x6d\x84\xb0\x13"
- "\x65\xbd\xcf\x52\x09\xc4\x85\xe2"
- "\x84\x74\x15\x65\xb7\xf7\x51\xaf"
- "\x55\xad\xa4\xd1\x22\x54\x70\x94"
- "\xa0\x1c\x90\x41\xfd\x99\xd7\x5a"
- "\x31\xef\xaa\x25\xd0\x7f\x4f\xea"
- "\x1d\x55\x42\xe5\x49\xb0\xd0\x46"
- "\x62\x36\x43\xb2\x82\x15\x75\x50"
- "\xa4\x72\xeb\x54\x27\x1f\x8a\xe4"
- "\x7d\xe9\x66\xc5\xf1\x53\xa4\xd1"
- "\x0c\xeb\xb8\xf8\xbc\xd4\xe2\xe7"
- "\xe1\xf8\x4b\xcb\xa9\xa1\xaf\x15"
- "\x83\xcb\x72\xd0\x33\x79\x00\x2d"
- "\x9f\xd7\xf1\x2e\x1e\x10\xe4\x45"
- "\xc0\x75\x3a\x39\xea\x68\xf7\x5d"
- "\x1b\x73\x8f\xe9\x8e\x0f\x72\x47"
- "\xae\x35\x0a\x31\x7a\x14\x4d\x4a"
- "\x6f\x47\xf7\x7e\x91\x6e\x74\x8b"
- "\x26\x47\xf9\xc3\xf9\xde\x70\xf5"
- "\x61\xab\xa9\x27\x9f\x82\xe4\x9c"
- "\x89\x91\x3f\x2e\x6a\xfd\xb5\x49"
- "\xe9\xfd\x59\x14\x36\x49\x40\x6d"
- "\x32\xd8\x85\x42\xf3\xa5\xdf\x0c"
- "\xa8\x27\xd7\x54\xe2\x63\x2f\xf2"
- "\x7e\x8b\x8b\xe7\xf1\x9a\x95\x35"
- "\x43\xdc\x3a\xe4\xb6\xf4\xd0\xdf"
- "\x9c\xcb\x94\xf3\x21\xa0\x77\x50"
- "\xe2\xc6\xc4\xc6\x5f\x09\x64\x5b"
- "\x92\x90\xd8\xe1\xd1\xed\x4b\x42"
- "\xd7\x37\xaf\x65\x3d\x11\x39\xb6"
- "\x24\x8a\x60\xae\xd6\x1e\xbf\x0e"
- "\x0d\xd7\xdc\x96\x0e\x65\x75\x4e"
- "\x29\x06\x9d\xa4\x51\x3a\x10\x63"
- "\x8f\x17\x07\xd5\x8e\x3c\xf4\x28"
- "\x00\x5a\x5b\x05\x19\xd8\xc0\x6c"
- "\xe5\x15\xe4\x9c\x9d\x71\x9d\x5e"
- "\x94\x29\x1a\xa7\x80\xfa\x0e\x33"
- "\x03\xdd\xb7\x3e\x9a\xa9\x26\x18"
- "\x37\xa9\x64\x08\x4d\x94\x5a\x88"
- "\xca\x35\xce\x81\x02\xe3\x1f\x1b"
- "\x89\x1a\x77\x85\xe3\x41\x6d\x32"
- "\x42\x19\x23\x7d\xc8\x73\xee\x25"
- "\x85\x0d\xf8\x31\x25\x79\x1b\x6f"
- "\x79\x25\xd2\xd8\xd4\x23\xfd\xf7"
- "\x82\x36\x6a\x0c\x46\x22\x15\xe9"
- "\xff\x72\x41\x91\x91\x7d\x3a\xb7"
- "\xdd\x65\x99\x70\xf6\x8d\x84\xf8"
- "\x67\x15\x20\x11\xd6\xb2\x55\x7b"
- "\xdb\x87\xee\xef\x55\x89\x2a\x59"
- "\x2b\x07\x8f\x43\x8a\x59\x3c\x01"
- "\x8b\x65\x54\xa1\x66\xd5\x38\xbd"
- "\xc6\x30\xa9\xcc\x49\xb6\xa8\x1b"
- "\xb8\xc0\x0e\xe3\x45\x28\xe2\xff"
- "\x41\x9f\x7e\x7c\xd1\xae\x9e\x25"
- "\x3f\x4c\x7c\x7c\xf4\xa8\x26\x4d"
- "\x5c\xfd\x4b\x27\x18\xf9\x61\x76"
- "\x48\xba\x0c\x6b\xa9\x4d\xfc\xf5"
- "\x3b\x35\x7e\x2f\x4a\xa9\xc2\x9a"
- "\xae\xab\x86\x09\x89\xc9\xc2\x40"
- "\x39\x2c\x81\xb3\xb8\x17\x67\xc2"
- "\x0d\x32\x4a\x3a\x67\x81\xd7\x1a"
- "\x34\x52\xc5\xdb\x0a\xf5\x63\x39"
- "\xea\x1f\xe1\x7c\xa1\x9e\xc1\x35"
- "\xe3\xb1\x18\x45\x67\xf9\x22\x38"
- "\x95\xd9\x34\x34\x86\xc6\x41\x94"
- "\x15\xf9\x5b\x41\xa6\x87\x8b\xf8"
- "\xd5\xe1\x1b\xe2\x5b\xf3\x86\x10"
- "\xff\xe6\xae\x69\x76\xbc\x0d\xb4"
- "\x09\x90\x0c\xa2\x65\x0c\xad\x74"
- "\xf5\xd7\xff\xda\xc1\xce\x85\xbe"
- "\x00\xa7\xff\x4d\x2f\x65\xd3\x8c"
- "\x86\x2d\x05\xe8\xed\x3e\x6b\x8b"
- "\x0f\x3d\x83\x8c\xf1\x1d\x5b\x96"
- "\x2e\xb1\x9c\xc2\x98\xe1\x70\xb9"
- "\xba\x5c\x8a\x43\xd6\x34\xa7\x2d"
- "\xc9\x92\xae\xf2\xa5\x7b\x05\x49"
- "\xa7\x33\x34\x86\xca\xe4\x96\x23"
- "\x76\x5b\xf2\xc6\xf1\x51\x28\x42"
- "\x7b\xcc\x76\x8f\xfa\xa2\xad\x31"
- "\xd4\xd6\x7a\x6d\x25\x25\x54\xe4"
- "\x3f\x50\x59\xe1\x5c\x05\xb7\x27"
- "\x48\xbf\x07\xec\x1b\x13\xbe\x2b"
- "\xa1\x57\x2b\xd5\xab\xd7\xd0\x4c"
- "\x1e\xcb\x71\x9b\xc5\x90\x85\xd3"
- "\xde\x59\xec\x71\xeb\x89\xbb\xd0"
- "\x09\x50\xe1\x16\x3f\xfd\x1c\x34"
- "\xc3\x1c\xa1\x10\x77\x53\x98\xef"
- "\xf2\xfd\xa5\x01\x59\xc2\x9b\x26"
- "\xc7\x42\xd9\x49\xda\x58\x2b\x6e"
- "\x9f\x53\x19\x76\x7e\xd9\xc9\x0e"
- "\x68\xc8\x7f\x51\x22\x42\xef\x49"
- "\xa4\x55\xb6\x36\xac\x09\xc7\x31"
- "\x88\x15\x4b\x2e\x8f\x3a\x08\xf7"
- "\xd8\xf7\xa8\xc5\xa9\x33\xa6\x45"
- "\xe4\xc4\x94\x76\xf3\x0d\x8f\x7e"
- "\xc8\xf6\xbc\x23\x0a\xb6\x4c\xd3"
- "\x6a\xcd\x36\xc2\x90\x5c\x5c\x3c"
- "\x65\x7b\xc2\xd6\xcc\xe6\x0d\x87"
- "\x73\x2e\x71\x79\x16\x06\x63\x28"
- "\x09\x15\xd8\x89\x38\x38\x3d\xb5"
- "\x42\x1c\x08\x24\xf7\x2a\xd2\x9d"
- "\xc8\xca\xef\xf9\x27\xd8\x07\x86"
- "\xf7\x43\x0b\x55\x15\x3f\x9f\x83"
- "\xef\xdc\x49\x9d\x2a\xc1\x54\x62"
- "\xbd\x9b\x66\x55\x9f\xb7\x12\xf3"
- "\x1b\x4d\x9d\x2a\x5c\xed\x87\x75"
- "\x87\x26\xec\x61\x2c\xb4\x0f\x89"
- "\xb0\xfb\x2e\x68\x5d\x15\xc7\x8d"
- "\x2e\xc0\xd9\xec\xaf\x4f\xd2\x25"
- "\x29\xe8\xd2\x26\x2b\x67\xe9\xfc"
- "\x2b\xa8\x67\x96\x12\x1f\x5b\x96"
- "\xc6\x14\x53\xaf\x44\xea\xd6\xe2"
- "\x94\x98\xe4\x12\x93\x4c\x92\xe0"
- "\x18\xa5\x8d\x2d\xe4\x71\x3c\x47"
- "\x4c\xf7\xe6\x47\x9e\xc0\x68\xdf"
- "\xd4\xf5\x5a\x74\xb1\x2b\x29\x03"
- "\x19\x07\xaf\x90\x62\x5c\x68\x98"
- "\x48\x16\x11\x02\x9d\xee\xb4\x9b"
- "\xe5\x42\x7f\x08\xfd\x16\x32\x0b"
- "\xd0\xb3\xfa\x2b\xb7\x99\xf9\x29"
- "\xcd\x20\x45\x9f\xb3\x1a\x5d\xa2"
- "\xaf\x4d\xe0\xbd\x42\x0d\xbc\x74"
- "\x99\x9c\x8e\x53\x1a\xb4\x3e\xbd"
- "\xa2\x9a\x2d\xf7\xf8\x39\x0f\x67"
- "\x63\xfc\x6b\xc0\xaf\xb3\x4b\x4f"
- "\x55\xc4\xcf\xa7\xc8\x04\x11\x3e"
- "\x14\x32\xbb\x1b\x38\x77\xd6\x7f"
- "\x54\x4c\xdf\x75\xf3\x07\x2d\x33"
- "\x9b\xa8\x20\xe1\x7b\x12\xb5\xf3"
- "\xef\x2f\xce\x72\xe5\x24\x60\xc1"
- "\x30\xe2\xab\xa1\x8e\x11\x09\xa8"
- "\x21\x33\x44\xfe\x7f\x35\x32\x93"
- "\x39\xa7\xad\x8b\x79\x06\xb2\xcb"
- "\x4e\xa9\x5f\xc7\xba\x74\x29\xec"
- "\x93\xa0\x4e\x54\x93\xc0\xbc\x55"
- "\x64\xf0\x48\xe5\x57\x99\xee\x75"
- "\xd6\x79\x0f\x66\xb7\xc6\x57\x76"
- "\xf7\xb7\xf3\x9c\xc5\x60\xe8\x7f"
- "\x83\x76\xd6\x0e\xaa\xe6\x90\x39"
- "\x1d\xa6\x32\x6a\x34\xe3\x55\xf8"
- "\x58\xa0\x58\x7d\x33\xe0\x22\x39"
- "\x44\x64\x87\x86\x5a\x2f\xa7\x7e"
- "\x0f\x38\xea\xb0\x30\xcc\x61\xa5"
- "\x6a\x32\xae\x1e\xf7\xe9\xd0\xa9"
- "\x0c\x32\x4b\xb5\x49\x28\xab\x85"
- "\x2f\x8e\x01\x36\x38\x52\xd0\xba"
- "\xd6\x02\x78\xf8\x0e\x3e\x9c\x8b"
- "\x6b\x45\x99\x3f\x5c\xfe\x58\xf1"
- "\x5c\x94\x04\xe1\xf5\x18\x6d\x51"
- "\xb2\x5d\x18\x20\xb6\xc2\x9a\x42"
- "\x1d\xb3\xab\x3c\xb6\x3a\x13\x03"
- "\xb2\x46\x82\x4f\xfc\x64\xbc\x4f"
- "\xca\xfa\x9c\xc0\xd5\xa7\xbd\x11"
- "\xb7\xe4\x5a\xf6\x6f\x4d\x4d\x54"
- "\xea\xa4\x98\x66\xd4\x22\x3b\xd3"
- "\x8f\x34\x47\xd9\x7c\xf4\x72\x3b"
- "\x4d\x02\x77\xf6\xd6\xdd\x08\x0a"
- "\x81\xe1\x86\x89\x3e\x56\x10\x3c"
- "\xba\xd7\x81\x8c\x08\xbc\x8b\xe2"
- "\x53\xec\xa7\x89\xee\xc8\x56\xb5"
- "\x36\x2c\xb2\x03\xba\x99\xdd\x7c"
- "\x48\xa0\xb0\xbc\x91\x33\xe9\xa8"
- "\xcb\xcd\xcf\x59\x5f\x1f\x15\xe2"
- "\x56\xf5\x4e\x01\x35\x27\x45\x77"
- "\x47\xc8\xbc\xcb\x7e\x39\xc1\x97"
- "\x28\xd3\x84\xfc\x2c\x3e\xc8\xad"
- "\x9c\xf8\x8a\x61\x9c\x28\xaa\xc5"
- "\x99\x20\x43\x85\x9d\xa5\xe2\x8b"
- "\xb8\xae\xeb\xd0\x32\x0d\x52\x78"
- "\x09\x56\x3f\xc7\xd8\x7e\x26\xfc"
- "\x37\xfb\x6f\x04\xfc\xfa\x92\x10"
- "\xac\xf8\x3e\x21\xdc\x8c\x21\x16"
- "\x7d\x67\x6e\xf6\xcd\xda\xb6\x98"
- "\x23\xab\x23\x3c\xb2\x10\xa0\x53"
- "\x5a\x56\x9f\xc5\xd0\xff\xbb\xe4"
- "\x98\x3c\x69\x1e\xdb\x38\x8f\x7e"
- "\x0f\xd2\x98\x88\x81\x8b\x45\x67"
- "\xea\x33\xf1\xeb\xe9\x97\x55\x2e"
- "\xd9\xaa\xeb\x5a\xec\xda\xe1\x68"
- "\xa8\x9d\x3c\x84\x7c\x05\x3d\x62"
- "\x87\x8f\x03\x21\x28\x95\x0c\x89"
- "\x25\x22\x4a\xb0\x93\xa9\x50\xa2"
- "\x2f\x57\x6e\x18\x42\x19\x54\x0c"
- "\x55\x67\xc6\x11\x49\xf4\x5c\xd2"
- "\xe9\x3d\xdd\x8b\x48\x71\x21\x00"
- "\xc3\x9a\x6c\x85\x74\x28\x83\x4a"
- "\x1b\x31\x05\xe1\x06\x92\xe7\xda"
- "\x85\x73\x78\x45\x20\x7f\xae\x13"
- "\x7c\x33\x06\x22\xf4\x83\xf9\x35"
- "\x3f\x6c\x71\xa8\x4e\x48\xbe\x9b"
- "\xce\x8a\xba\xda\xbe\x28\x08\xf7"
- "\xe2\x14\x8c\x71\xea\x72\xf9\x33"
- "\xf2\x88\x3f\xd7\xbb\x69\x6c\x29"
- "\x19\xdc\x84\xce\x1f\x12\x4f\xc8"
- "\xaf\xa5\x04\xba\x5a\xab\xb0\xd9"
- "\x14\x1f\x6c\x68\x98\x39\x89\x7a"
- "\xd9\xd8\x2f\xdf\xa8\x47\x4a\x25"
- "\xe2\xfb\x33\xf4\x59\x78\xe1\x68"
- "\x85\xcf\xfe\x59\x20\xd4\x05\x1d"
- "\x80\x99\xae\xbc\xca\xae\x0f\x2f"
- "\x65\x43\x34\x8e\x7e\xac\xd3\x93"
- "\x2f\xac\x6d\x14\x3d\x02\x07\x70"
- "\x9d\xa4\xf3\x1b\x5c\x36\xfc\x01"
- "\x73\x34\x85\x0c\x6c\xd6\xf1\xbd"
- "\x3f\xdf\xee\xf5\xd9\xba\x56\xef"
- "\xf4\x9b\x6b\xee\x9f\x5a\x78\x6d"
- "\x32\x19\xf4\xf7\xf8\x4c\x69\x0b"
- "\x4b\xbc\xbb\xb7\xf2\x85\xaf\x70"
- "\x75\x24\x6c\x54\xa7\x0e\x4d\x1d"
- "\x01\xbf\x08\xac\xcf\x7f\x2c\xe3"
- "\x14\x89\x5e\x70\x5a\x99\x92\xcd"
- "\x01\x84\xc8\xd2\xab\xe5\x4f\x58"
- "\xe7\x0f\x2f\x0e\xff\x68\xea\xfd"
- "\x15\xb3\x17\xe6\xb0\xe7\x85\xd8"
- "\x23\x2e\x05\xc7\xc9\xc4\x46\x1f"
- "\xe1\x9e\x49\x20\x23\x24\x4d\x7e"
- "\x29\x65\xff\xf4\xb6\xfd\x1a\x85"
- "\xc4\x16\xec\xfc\xea\x7b\xd6\x2c"
- "\x43\xf8\xb7\xbf\x79\xc0\x85\xcd"
- "\xef\xe1\x98\xd3\xa5\xf7\x90\x8c"
- "\xe9\x7f\x80\x6b\xd2\xac\x4c\x30"
- "\xa7\xc6\x61\x6c\xd2\xf9\x2c\xff"
- "\x30\xbc\x22\x81\x7d\x93\x12\xe4"
- "\x0a\xcd\xaf\xdd\xe8\xab\x0a\x1e"
- "\x13\xa4\x27\xc3\x5f\xf7\x4b\xbb"
- "\x37\x09\x4b\x91\x6f\x92\x4f\xaf"
- "\x52\xee\xdf\xef\x09\x6f\xf7\x5c"
- "\x6e\x12\x17\x72\x63\x57\xc7\xba"
- "\x3b\x6b\x38\x32\x73\x1b\x9c\x80"
- "\xc1\x7a\xc6\xcf\xcd\x35\xc0\x6b"
- "\x31\x1a\x6b\xe9\xd8\x2c\x29\x3f"
- "\x96\xfb\xb6\xcd\x13\x91\x3b\xc2"
- "\xd2\xa3\x31\x8d\xa4\xcd\x57\xcd"
- "\x13\x3d\x64\xfd\x06\xce\xe6\xdc"
- "\x0c\x24\x43\x31\x40\x57\xf1\x72"
- "\x17\xe3\x3a\x63\x6d\x35\xcf\x5d"
- "\x97\x40\x59\xdd\xf7\x3c\x02\xf7"
- "\x1c\x7e\x05\xbb\xa9\x0d\x01\xb1"
- "\x8e\xc0\x30\xa9\x53\x24\xc9\x89"
- "\x84\x6d\xaa\xd0\xcd\x91\xc2\x4d"
- "\x91\xb0\x89\xe2\xbf\x83\x44\xaa"
- "\x28\x72\x23\xa0\xc2\xad\xad\x1c"
- "\xfc\x3f\x09\x7a\x0b\xdc\xc5\x1b"
- "\x87\x13\xc6\x5b\x59\x8d\xf2\xc8"
- "\xaf\xdf\x11\x95",
- .len = 4100,
- },
-};
-
static const struct cipher_testvec chacha20_tv_template[] = {
{ /* RFC7539 A.2. Test Vector #1 */
.key = "\x00\x00\x00\x00\x00\x00\x00\x00"
diff --git a/crypto/tgr192.c b/crypto/tgr192.c
deleted file mode 100644
index aa29c529b44e..000000000000
--- a/crypto/tgr192.c
+++ /dev/null
@@ -1,682 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Cryptographic API.
- *
- * Tiger hashing Algorithm
- *
- * Copyright (C) 1998 Free Software Foundation, Inc.
- *
- * The Tiger algorithm was developed by Ross Anderson and Eli Biham.
- * It was optimized for 64-bit processors while still delievering
- * decent performance on 32 and 16-bit processors.
- *
- * This version is derived from the GnuPG implementation and the
- * Tiger-Perl interface written by Rafael Sevilla
- *
- * Adapted for Linux Kernel Crypto by Aaron Grothe
- * ajgrothe@yahoo.com, February 22, 2005
- */
-#include <crypto/internal/hash.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/types.h>
-#include <asm/byteorder.h>
-#include <asm/unaligned.h>
-
-#define TGR192_DIGEST_SIZE 24
-#define TGR160_DIGEST_SIZE 20
-#define TGR128_DIGEST_SIZE 16
-
-#define TGR192_BLOCK_SIZE 64
-
-struct tgr192_ctx {
- u64 a, b, c;
- u8 hash[64];
- int count;
- u32 nblocks;
-};
-
-static const u64 sbox1[256] = {
- 0x02aab17cf7e90c5eULL, 0xac424b03e243a8ecULL, 0x72cd5be30dd5fcd3ULL,
- 0x6d019b93f6f97f3aULL, 0xcd9978ffd21f9193ULL, 0x7573a1c9708029e2ULL,
- 0xb164326b922a83c3ULL, 0x46883eee04915870ULL, 0xeaace3057103ece6ULL,
- 0xc54169b808a3535cULL, 0x4ce754918ddec47cULL, 0x0aa2f4dfdc0df40cULL,
- 0x10b76f18a74dbefaULL, 0xc6ccb6235ad1ab6aULL, 0x13726121572fe2ffULL,
- 0x1a488c6f199d921eULL, 0x4bc9f9f4da0007caULL, 0x26f5e6f6e85241c7ULL,
- 0x859079dbea5947b6ULL, 0x4f1885c5c99e8c92ULL, 0xd78e761ea96f864bULL,
- 0x8e36428c52b5c17dULL, 0x69cf6827373063c1ULL, 0xb607c93d9bb4c56eULL,
- 0x7d820e760e76b5eaULL, 0x645c9cc6f07fdc42ULL, 0xbf38a078243342e0ULL,
- 0x5f6b343c9d2e7d04ULL, 0xf2c28aeb600b0ec6ULL, 0x6c0ed85f7254bcacULL,
- 0x71592281a4db4fe5ULL, 0x1967fa69ce0fed9fULL, 0xfd5293f8b96545dbULL,
- 0xc879e9d7f2a7600bULL, 0x860248920193194eULL, 0xa4f9533b2d9cc0b3ULL,
- 0x9053836c15957613ULL, 0xdb6dcf8afc357bf1ULL, 0x18beea7a7a370f57ULL,
- 0x037117ca50b99066ULL, 0x6ab30a9774424a35ULL, 0xf4e92f02e325249bULL,
- 0x7739db07061ccae1ULL, 0xd8f3b49ceca42a05ULL, 0xbd56be3f51382f73ULL,
- 0x45faed5843b0bb28ULL, 0x1c813d5c11bf1f83ULL, 0x8af0e4b6d75fa169ULL,
- 0x33ee18a487ad9999ULL, 0x3c26e8eab1c94410ULL, 0xb510102bc0a822f9ULL,
- 0x141eef310ce6123bULL, 0xfc65b90059ddb154ULL, 0xe0158640c5e0e607ULL,
- 0x884e079826c3a3cfULL, 0x930d0d9523c535fdULL, 0x35638d754e9a2b00ULL,
- 0x4085fccf40469dd5ULL, 0xc4b17ad28be23a4cULL, 0xcab2f0fc6a3e6a2eULL,
- 0x2860971a6b943fcdULL, 0x3dde6ee212e30446ULL, 0x6222f32ae01765aeULL,
- 0x5d550bb5478308feULL, 0xa9efa98da0eda22aULL, 0xc351a71686c40da7ULL,
- 0x1105586d9c867c84ULL, 0xdcffee85fda22853ULL, 0xccfbd0262c5eef76ULL,
- 0xbaf294cb8990d201ULL, 0xe69464f52afad975ULL, 0x94b013afdf133e14ULL,
- 0x06a7d1a32823c958ULL, 0x6f95fe5130f61119ULL, 0xd92ab34e462c06c0ULL,
- 0xed7bde33887c71d2ULL, 0x79746d6e6518393eULL, 0x5ba419385d713329ULL,
- 0x7c1ba6b948a97564ULL, 0x31987c197bfdac67ULL, 0xde6c23c44b053d02ULL,
- 0x581c49fed002d64dULL, 0xdd474d6338261571ULL, 0xaa4546c3e473d062ULL,
- 0x928fce349455f860ULL, 0x48161bbacaab94d9ULL, 0x63912430770e6f68ULL,
- 0x6ec8a5e602c6641cULL, 0x87282515337ddd2bULL, 0x2cda6b42034b701bULL,
- 0xb03d37c181cb096dULL, 0xe108438266c71c6fULL, 0x2b3180c7eb51b255ULL,
- 0xdf92b82f96c08bbcULL, 0x5c68c8c0a632f3baULL, 0x5504cc861c3d0556ULL,
- 0xabbfa4e55fb26b8fULL, 0x41848b0ab3baceb4ULL, 0xb334a273aa445d32ULL,
- 0xbca696f0a85ad881ULL, 0x24f6ec65b528d56cULL, 0x0ce1512e90f4524aULL,
- 0x4e9dd79d5506d35aULL, 0x258905fac6ce9779ULL, 0x2019295b3e109b33ULL,
- 0xf8a9478b73a054ccULL, 0x2924f2f934417eb0ULL, 0x3993357d536d1bc4ULL,
- 0x38a81ac21db6ff8bULL, 0x47c4fbf17d6016bfULL, 0x1e0faadd7667e3f5ULL,
- 0x7abcff62938beb96ULL, 0xa78dad948fc179c9ULL, 0x8f1f98b72911e50dULL,
- 0x61e48eae27121a91ULL, 0x4d62f7ad31859808ULL, 0xeceba345ef5ceaebULL,
- 0xf5ceb25ebc9684ceULL, 0xf633e20cb7f76221ULL, 0xa32cdf06ab8293e4ULL,
- 0x985a202ca5ee2ca4ULL, 0xcf0b8447cc8a8fb1ULL, 0x9f765244979859a3ULL,
- 0xa8d516b1a1240017ULL, 0x0bd7ba3ebb5dc726ULL, 0xe54bca55b86adb39ULL,
- 0x1d7a3afd6c478063ULL, 0x519ec608e7669eddULL, 0x0e5715a2d149aa23ULL,
- 0x177d4571848ff194ULL, 0xeeb55f3241014c22ULL, 0x0f5e5ca13a6e2ec2ULL,
- 0x8029927b75f5c361ULL, 0xad139fabc3d6e436ULL, 0x0d5df1a94ccf402fULL,
- 0x3e8bd948bea5dfc8ULL, 0xa5a0d357bd3ff77eULL, 0xa2d12e251f74f645ULL,
- 0x66fd9e525e81a082ULL, 0x2e0c90ce7f687a49ULL, 0xc2e8bcbeba973bc5ULL,
- 0x000001bce509745fULL, 0x423777bbe6dab3d6ULL, 0xd1661c7eaef06eb5ULL,
- 0xa1781f354daacfd8ULL, 0x2d11284a2b16affcULL, 0xf1fc4f67fa891d1fULL,
- 0x73ecc25dcb920adaULL, 0xae610c22c2a12651ULL, 0x96e0a810d356b78aULL,
- 0x5a9a381f2fe7870fULL, 0xd5ad62ede94e5530ULL, 0xd225e5e8368d1427ULL,
- 0x65977b70c7af4631ULL, 0x99f889b2de39d74fULL, 0x233f30bf54e1d143ULL,
- 0x9a9675d3d9a63c97ULL, 0x5470554ff334f9a8ULL, 0x166acb744a4f5688ULL,
- 0x70c74caab2e4aeadULL, 0xf0d091646f294d12ULL, 0x57b82a89684031d1ULL,
- 0xefd95a5a61be0b6bULL, 0x2fbd12e969f2f29aULL, 0x9bd37013feff9fe8ULL,
- 0x3f9b0404d6085a06ULL, 0x4940c1f3166cfe15ULL, 0x09542c4dcdf3defbULL,
- 0xb4c5218385cd5ce3ULL, 0xc935b7dc4462a641ULL, 0x3417f8a68ed3b63fULL,
- 0xb80959295b215b40ULL, 0xf99cdaef3b8c8572ULL, 0x018c0614f8fcb95dULL,
- 0x1b14accd1a3acdf3ULL, 0x84d471f200bb732dULL, 0xc1a3110e95e8da16ULL,
- 0x430a7220bf1a82b8ULL, 0xb77e090d39df210eULL, 0x5ef4bd9f3cd05e9dULL,
- 0x9d4ff6da7e57a444ULL, 0xda1d60e183d4a5f8ULL, 0xb287c38417998e47ULL,
- 0xfe3edc121bb31886ULL, 0xc7fe3ccc980ccbefULL, 0xe46fb590189bfd03ULL,
- 0x3732fd469a4c57dcULL, 0x7ef700a07cf1ad65ULL, 0x59c64468a31d8859ULL,
- 0x762fb0b4d45b61f6ULL, 0x155baed099047718ULL, 0x68755e4c3d50baa6ULL,
- 0xe9214e7f22d8b4dfULL, 0x2addbf532eac95f4ULL, 0x32ae3909b4bd0109ULL,
- 0x834df537b08e3450ULL, 0xfa209da84220728dULL, 0x9e691d9b9efe23f7ULL,
- 0x0446d288c4ae8d7fULL, 0x7b4cc524e169785bULL, 0x21d87f0135ca1385ULL,
- 0xcebb400f137b8aa5ULL, 0x272e2b66580796beULL, 0x3612264125c2b0deULL,
- 0x057702bdad1efbb2ULL, 0xd4babb8eacf84be9ULL, 0x91583139641bc67bULL,
- 0x8bdc2de08036e024ULL, 0x603c8156f49f68edULL, 0xf7d236f7dbef5111ULL,
- 0x9727c4598ad21e80ULL, 0xa08a0896670a5fd7ULL, 0xcb4a8f4309eba9cbULL,
- 0x81af564b0f7036a1ULL, 0xc0b99aa778199abdULL, 0x959f1ec83fc8e952ULL,
- 0x8c505077794a81b9ULL, 0x3acaaf8f056338f0ULL, 0x07b43f50627a6778ULL,
- 0x4a44ab49f5eccc77ULL, 0x3bc3d6e4b679ee98ULL, 0x9cc0d4d1cf14108cULL,
- 0x4406c00b206bc8a0ULL, 0x82a18854c8d72d89ULL, 0x67e366b35c3c432cULL,
- 0xb923dd61102b37f2ULL, 0x56ab2779d884271dULL, 0xbe83e1b0ff1525afULL,
- 0xfb7c65d4217e49a9ULL, 0x6bdbe0e76d48e7d4ULL, 0x08df828745d9179eULL,
- 0x22ea6a9add53bd34ULL, 0xe36e141c5622200aULL, 0x7f805d1b8cb750eeULL,
- 0xafe5c7a59f58e837ULL, 0xe27f996a4fb1c23cULL, 0xd3867dfb0775f0d0ULL,
- 0xd0e673de6e88891aULL, 0x123aeb9eafb86c25ULL, 0x30f1d5d5c145b895ULL,
- 0xbb434a2dee7269e7ULL, 0x78cb67ecf931fa38ULL, 0xf33b0372323bbf9cULL,
- 0x52d66336fb279c74ULL, 0x505f33ac0afb4eaaULL, 0xe8a5cd99a2cce187ULL,
- 0x534974801e2d30bbULL, 0x8d2d5711d5876d90ULL, 0x1f1a412891bc038eULL,
- 0xd6e2e71d82e56648ULL, 0x74036c3a497732b7ULL, 0x89b67ed96361f5abULL,
- 0xffed95d8f1ea02a2ULL, 0xe72b3bd61464d43dULL, 0xa6300f170bdc4820ULL,
- 0xebc18760ed78a77aULL
-};
-
-static const u64 sbox2[256] = {
- 0xe6a6be5a05a12138ULL, 0xb5a122a5b4f87c98ULL, 0x563c6089140b6990ULL,
- 0x4c46cb2e391f5dd5ULL, 0xd932addbc9b79434ULL, 0x08ea70e42015aff5ULL,
- 0xd765a6673e478cf1ULL, 0xc4fb757eab278d99ULL, 0xdf11c6862d6e0692ULL,
- 0xddeb84f10d7f3b16ULL, 0x6f2ef604a665ea04ULL, 0x4a8e0f0ff0e0dfb3ULL,
- 0xa5edeef83dbcba51ULL, 0xfc4f0a2a0ea4371eULL, 0xe83e1da85cb38429ULL,
- 0xdc8ff882ba1b1ce2ULL, 0xcd45505e8353e80dULL, 0x18d19a00d4db0717ULL,
- 0x34a0cfeda5f38101ULL, 0x0be77e518887caf2ULL, 0x1e341438b3c45136ULL,
- 0xe05797f49089ccf9ULL, 0xffd23f9df2591d14ULL, 0x543dda228595c5cdULL,
- 0x661f81fd99052a33ULL, 0x8736e641db0f7b76ULL, 0x15227725418e5307ULL,
- 0xe25f7f46162eb2faULL, 0x48a8b2126c13d9feULL, 0xafdc541792e76eeaULL,
- 0x03d912bfc6d1898fULL, 0x31b1aafa1b83f51bULL, 0xf1ac2796e42ab7d9ULL,
- 0x40a3a7d7fcd2ebacULL, 0x1056136d0afbbcc5ULL, 0x7889e1dd9a6d0c85ULL,
- 0xd33525782a7974aaULL, 0xa7e25d09078ac09bULL, 0xbd4138b3eac6edd0ULL,
- 0x920abfbe71eb9e70ULL, 0xa2a5d0f54fc2625cULL, 0xc054e36b0b1290a3ULL,
- 0xf6dd59ff62fe932bULL, 0x3537354511a8ac7dULL, 0xca845e9172fadcd4ULL,
- 0x84f82b60329d20dcULL, 0x79c62ce1cd672f18ULL, 0x8b09a2add124642cULL,
- 0xd0c1e96a19d9e726ULL, 0x5a786a9b4ba9500cULL, 0x0e020336634c43f3ULL,
- 0xc17b474aeb66d822ULL, 0x6a731ae3ec9baac2ULL, 0x8226667ae0840258ULL,
- 0x67d4567691caeca5ULL, 0x1d94155c4875adb5ULL, 0x6d00fd985b813fdfULL,
- 0x51286efcb774cd06ULL, 0x5e8834471fa744afULL, 0xf72ca0aee761ae2eULL,
- 0xbe40e4cdaee8e09aULL, 0xe9970bbb5118f665ULL, 0x726e4beb33df1964ULL,
- 0x703b000729199762ULL, 0x4631d816f5ef30a7ULL, 0xb880b5b51504a6beULL,
- 0x641793c37ed84b6cULL, 0x7b21ed77f6e97d96ULL, 0x776306312ef96b73ULL,
- 0xae528948e86ff3f4ULL, 0x53dbd7f286a3f8f8ULL, 0x16cadce74cfc1063ULL,
- 0x005c19bdfa52c6ddULL, 0x68868f5d64d46ad3ULL, 0x3a9d512ccf1e186aULL,
- 0x367e62c2385660aeULL, 0xe359e7ea77dcb1d7ULL, 0x526c0773749abe6eULL,
- 0x735ae5f9d09f734bULL, 0x493fc7cc8a558ba8ULL, 0xb0b9c1533041ab45ULL,
- 0x321958ba470a59bdULL, 0x852db00b5f46c393ULL, 0x91209b2bd336b0e5ULL,
- 0x6e604f7d659ef19fULL, 0xb99a8ae2782ccb24ULL, 0xccf52ab6c814c4c7ULL,
- 0x4727d9afbe11727bULL, 0x7e950d0c0121b34dULL, 0x756f435670ad471fULL,
- 0xf5add442615a6849ULL, 0x4e87e09980b9957aULL, 0x2acfa1df50aee355ULL,
- 0xd898263afd2fd556ULL, 0xc8f4924dd80c8fd6ULL, 0xcf99ca3d754a173aULL,
- 0xfe477bacaf91bf3cULL, 0xed5371f6d690c12dULL, 0x831a5c285e687094ULL,
- 0xc5d3c90a3708a0a4ULL, 0x0f7f903717d06580ULL, 0x19f9bb13b8fdf27fULL,
- 0xb1bd6f1b4d502843ULL, 0x1c761ba38fff4012ULL, 0x0d1530c4e2e21f3bULL,
- 0x8943ce69a7372c8aULL, 0xe5184e11feb5ce66ULL, 0x618bdb80bd736621ULL,
- 0x7d29bad68b574d0bULL, 0x81bb613e25e6fe5bULL, 0x071c9c10bc07913fULL,
- 0xc7beeb7909ac2d97ULL, 0xc3e58d353bc5d757ULL, 0xeb017892f38f61e8ULL,
- 0xd4effb9c9b1cc21aULL, 0x99727d26f494f7abULL, 0xa3e063a2956b3e03ULL,
- 0x9d4a8b9a4aa09c30ULL, 0x3f6ab7d500090fb4ULL, 0x9cc0f2a057268ac0ULL,
- 0x3dee9d2dedbf42d1ULL, 0x330f49c87960a972ULL, 0xc6b2720287421b41ULL,
- 0x0ac59ec07c00369cULL, 0xef4eac49cb353425ULL, 0xf450244eef0129d8ULL,
- 0x8acc46e5caf4deb6ULL, 0x2ffeab63989263f7ULL, 0x8f7cb9fe5d7a4578ULL,
- 0x5bd8f7644e634635ULL, 0x427a7315bf2dc900ULL, 0x17d0c4aa2125261cULL,
- 0x3992486c93518e50ULL, 0xb4cbfee0a2d7d4c3ULL, 0x7c75d6202c5ddd8dULL,
- 0xdbc295d8e35b6c61ULL, 0x60b369d302032b19ULL, 0xce42685fdce44132ULL,
- 0x06f3ddb9ddf65610ULL, 0x8ea4d21db5e148f0ULL, 0x20b0fce62fcd496fULL,
- 0x2c1b912358b0ee31ULL, 0xb28317b818f5a308ULL, 0xa89c1e189ca6d2cfULL,
- 0x0c6b18576aaadbc8ULL, 0xb65deaa91299fae3ULL, 0xfb2b794b7f1027e7ULL,
- 0x04e4317f443b5bebULL, 0x4b852d325939d0a6ULL, 0xd5ae6beefb207ffcULL,
- 0x309682b281c7d374ULL, 0xbae309a194c3b475ULL, 0x8cc3f97b13b49f05ULL,
- 0x98a9422ff8293967ULL, 0x244b16b01076ff7cULL, 0xf8bf571c663d67eeULL,
- 0x1f0d6758eee30da1ULL, 0xc9b611d97adeb9b7ULL, 0xb7afd5887b6c57a2ULL,
- 0x6290ae846b984fe1ULL, 0x94df4cdeacc1a5fdULL, 0x058a5bd1c5483affULL,
- 0x63166cc142ba3c37ULL, 0x8db8526eb2f76f40ULL, 0xe10880036f0d6d4eULL,
- 0x9e0523c9971d311dULL, 0x45ec2824cc7cd691ULL, 0x575b8359e62382c9ULL,
- 0xfa9e400dc4889995ULL, 0xd1823ecb45721568ULL, 0xdafd983b8206082fULL,
- 0xaa7d29082386a8cbULL, 0x269fcd4403b87588ULL, 0x1b91f5f728bdd1e0ULL,
- 0xe4669f39040201f6ULL, 0x7a1d7c218cf04adeULL, 0x65623c29d79ce5ceULL,
- 0x2368449096c00bb1ULL, 0xab9bf1879da503baULL, 0xbc23ecb1a458058eULL,
- 0x9a58df01bb401eccULL, 0xa070e868a85f143dULL, 0x4ff188307df2239eULL,
- 0x14d565b41a641183ULL, 0xee13337452701602ULL, 0x950e3dcf3f285e09ULL,
- 0x59930254b9c80953ULL, 0x3bf299408930da6dULL, 0xa955943f53691387ULL,
- 0xa15edecaa9cb8784ULL, 0x29142127352be9a0ULL, 0x76f0371fff4e7afbULL,
- 0x0239f450274f2228ULL, 0xbb073af01d5e868bULL, 0xbfc80571c10e96c1ULL,
- 0xd267088568222e23ULL, 0x9671a3d48e80b5b0ULL, 0x55b5d38ae193bb81ULL,
- 0x693ae2d0a18b04b8ULL, 0x5c48b4ecadd5335fULL, 0xfd743b194916a1caULL,
- 0x2577018134be98c4ULL, 0xe77987e83c54a4adULL, 0x28e11014da33e1b9ULL,
- 0x270cc59e226aa213ULL, 0x71495f756d1a5f60ULL, 0x9be853fb60afef77ULL,
- 0xadc786a7f7443dbfULL, 0x0904456173b29a82ULL, 0x58bc7a66c232bd5eULL,
- 0xf306558c673ac8b2ULL, 0x41f639c6b6c9772aULL, 0x216defe99fda35daULL,
- 0x11640cc71c7be615ULL, 0x93c43694565c5527ULL, 0xea038e6246777839ULL,
- 0xf9abf3ce5a3e2469ULL, 0x741e768d0fd312d2ULL, 0x0144b883ced652c6ULL,
- 0xc20b5a5ba33f8552ULL, 0x1ae69633c3435a9dULL, 0x97a28ca4088cfdecULL,
- 0x8824a43c1e96f420ULL, 0x37612fa66eeea746ULL, 0x6b4cb165f9cf0e5aULL,
- 0x43aa1c06a0abfb4aULL, 0x7f4dc26ff162796bULL, 0x6cbacc8e54ed9b0fULL,
- 0xa6b7ffefd2bb253eULL, 0x2e25bc95b0a29d4fULL, 0x86d6a58bdef1388cULL,
- 0xded74ac576b6f054ULL, 0x8030bdbc2b45805dULL, 0x3c81af70e94d9289ULL,
- 0x3eff6dda9e3100dbULL, 0xb38dc39fdfcc8847ULL, 0x123885528d17b87eULL,
- 0xf2da0ed240b1b642ULL, 0x44cefadcd54bf9a9ULL, 0x1312200e433c7ee6ULL,
- 0x9ffcc84f3a78c748ULL, 0xf0cd1f72248576bbULL, 0xec6974053638cfe4ULL,
- 0x2ba7b67c0cec4e4cULL, 0xac2f4df3e5ce32edULL, 0xcb33d14326ea4c11ULL,
- 0xa4e9044cc77e58bcULL, 0x5f513293d934fcefULL, 0x5dc9645506e55444ULL,
- 0x50de418f317de40aULL, 0x388cb31a69dde259ULL, 0x2db4a83455820a86ULL,
- 0x9010a91e84711ae9ULL, 0x4df7f0b7b1498371ULL, 0xd62a2eabc0977179ULL,
- 0x22fac097aa8d5c0eULL
-};
-
-static const u64 sbox3[256] = {
- 0xf49fcc2ff1daf39bULL, 0x487fd5c66ff29281ULL, 0xe8a30667fcdca83fULL,
- 0x2c9b4be3d2fcce63ULL, 0xda3ff74b93fbbbc2ULL, 0x2fa165d2fe70ba66ULL,
- 0xa103e279970e93d4ULL, 0xbecdec77b0e45e71ULL, 0xcfb41e723985e497ULL,
- 0xb70aaa025ef75017ULL, 0xd42309f03840b8e0ULL, 0x8efc1ad035898579ULL,
- 0x96c6920be2b2abc5ULL, 0x66af4163375a9172ULL, 0x2174abdcca7127fbULL,
- 0xb33ccea64a72ff41ULL, 0xf04a4933083066a5ULL, 0x8d970acdd7289af5ULL,
- 0x8f96e8e031c8c25eULL, 0xf3fec02276875d47ULL, 0xec7bf310056190ddULL,
- 0xf5adb0aebb0f1491ULL, 0x9b50f8850fd58892ULL, 0x4975488358b74de8ULL,
- 0xa3354ff691531c61ULL, 0x0702bbe481d2c6eeULL, 0x89fb24057deded98ULL,
- 0xac3075138596e902ULL, 0x1d2d3580172772edULL, 0xeb738fc28e6bc30dULL,
- 0x5854ef8f63044326ULL, 0x9e5c52325add3bbeULL, 0x90aa53cf325c4623ULL,
- 0xc1d24d51349dd067ULL, 0x2051cfeea69ea624ULL, 0x13220f0a862e7e4fULL,
- 0xce39399404e04864ULL, 0xd9c42ca47086fcb7ULL, 0x685ad2238a03e7ccULL,
- 0x066484b2ab2ff1dbULL, 0xfe9d5d70efbf79ecULL, 0x5b13b9dd9c481854ULL,
- 0x15f0d475ed1509adULL, 0x0bebcd060ec79851ULL, 0xd58c6791183ab7f8ULL,
- 0xd1187c5052f3eee4ULL, 0xc95d1192e54e82ffULL, 0x86eea14cb9ac6ca2ULL,
- 0x3485beb153677d5dULL, 0xdd191d781f8c492aULL, 0xf60866baa784ebf9ULL,
- 0x518f643ba2d08c74ULL, 0x8852e956e1087c22ULL, 0xa768cb8dc410ae8dULL,
- 0x38047726bfec8e1aULL, 0xa67738b4cd3b45aaULL, 0xad16691cec0dde19ULL,
- 0xc6d4319380462e07ULL, 0xc5a5876d0ba61938ULL, 0x16b9fa1fa58fd840ULL,
- 0x188ab1173ca74f18ULL, 0xabda2f98c99c021fULL, 0x3e0580ab134ae816ULL,
- 0x5f3b05b773645abbULL, 0x2501a2be5575f2f6ULL, 0x1b2f74004e7e8ba9ULL,
- 0x1cd7580371e8d953ULL, 0x7f6ed89562764e30ULL, 0xb15926ff596f003dULL,
- 0x9f65293da8c5d6b9ULL, 0x6ecef04dd690f84cULL, 0x4782275fff33af88ULL,
- 0xe41433083f820801ULL, 0xfd0dfe409a1af9b5ULL, 0x4325a3342cdb396bULL,
- 0x8ae77e62b301b252ULL, 0xc36f9e9f6655615aULL, 0x85455a2d92d32c09ULL,
- 0xf2c7dea949477485ULL, 0x63cfb4c133a39ebaULL, 0x83b040cc6ebc5462ULL,
- 0x3b9454c8fdb326b0ULL, 0x56f56a9e87ffd78cULL, 0x2dc2940d99f42bc6ULL,
- 0x98f7df096b096e2dULL, 0x19a6e01e3ad852bfULL, 0x42a99ccbdbd4b40bULL,
- 0xa59998af45e9c559ULL, 0x366295e807d93186ULL, 0x6b48181bfaa1f773ULL,
- 0x1fec57e2157a0a1dULL, 0x4667446af6201ad5ULL, 0xe615ebcacfb0f075ULL,
- 0xb8f31f4f68290778ULL, 0x22713ed6ce22d11eULL, 0x3057c1a72ec3c93bULL,
- 0xcb46acc37c3f1f2fULL, 0xdbb893fd02aaf50eULL, 0x331fd92e600b9fcfULL,
- 0xa498f96148ea3ad6ULL, 0xa8d8426e8b6a83eaULL, 0xa089b274b7735cdcULL,
- 0x87f6b3731e524a11ULL, 0x118808e5cbc96749ULL, 0x9906e4c7b19bd394ULL,
- 0xafed7f7e9b24a20cULL, 0x6509eadeeb3644a7ULL, 0x6c1ef1d3e8ef0edeULL,
- 0xb9c97d43e9798fb4ULL, 0xa2f2d784740c28a3ULL, 0x7b8496476197566fULL,
- 0x7a5be3e6b65f069dULL, 0xf96330ed78be6f10ULL, 0xeee60de77a076a15ULL,
- 0x2b4bee4aa08b9bd0ULL, 0x6a56a63ec7b8894eULL, 0x02121359ba34fef4ULL,
- 0x4cbf99f8283703fcULL, 0x398071350caf30c8ULL, 0xd0a77a89f017687aULL,
- 0xf1c1a9eb9e423569ULL, 0x8c7976282dee8199ULL, 0x5d1737a5dd1f7abdULL,
- 0x4f53433c09a9fa80ULL, 0xfa8b0c53df7ca1d9ULL, 0x3fd9dcbc886ccb77ULL,
- 0xc040917ca91b4720ULL, 0x7dd00142f9d1dcdfULL, 0x8476fc1d4f387b58ULL,
- 0x23f8e7c5f3316503ULL, 0x032a2244e7e37339ULL, 0x5c87a5d750f5a74bULL,
- 0x082b4cc43698992eULL, 0xdf917becb858f63cULL, 0x3270b8fc5bf86ddaULL,
- 0x10ae72bb29b5dd76ULL, 0x576ac94e7700362bULL, 0x1ad112dac61efb8fULL,
- 0x691bc30ec5faa427ULL, 0xff246311cc327143ULL, 0x3142368e30e53206ULL,
- 0x71380e31e02ca396ULL, 0x958d5c960aad76f1ULL, 0xf8d6f430c16da536ULL,
- 0xc8ffd13f1be7e1d2ULL, 0x7578ae66004ddbe1ULL, 0x05833f01067be646ULL,
- 0xbb34b5ad3bfe586dULL, 0x095f34c9a12b97f0ULL, 0x247ab64525d60ca8ULL,
- 0xdcdbc6f3017477d1ULL, 0x4a2e14d4decad24dULL, 0xbdb5e6d9be0a1eebULL,
- 0x2a7e70f7794301abULL, 0xdef42d8a270540fdULL, 0x01078ec0a34c22c1ULL,
- 0xe5de511af4c16387ULL, 0x7ebb3a52bd9a330aULL, 0x77697857aa7d6435ULL,
- 0x004e831603ae4c32ULL, 0xe7a21020ad78e312ULL, 0x9d41a70c6ab420f2ULL,
- 0x28e06c18ea1141e6ULL, 0xd2b28cbd984f6b28ULL, 0x26b75f6c446e9d83ULL,
- 0xba47568c4d418d7fULL, 0xd80badbfe6183d8eULL, 0x0e206d7f5f166044ULL,
- 0xe258a43911cbca3eULL, 0x723a1746b21dc0bcULL, 0xc7caa854f5d7cdd3ULL,
- 0x7cac32883d261d9cULL, 0x7690c26423ba942cULL, 0x17e55524478042b8ULL,
- 0xe0be477656a2389fULL, 0x4d289b5e67ab2da0ULL, 0x44862b9c8fbbfd31ULL,
- 0xb47cc8049d141365ULL, 0x822c1b362b91c793ULL, 0x4eb14655fb13dfd8ULL,
- 0x1ecbba0714e2a97bULL, 0x6143459d5cde5f14ULL, 0x53a8fbf1d5f0ac89ULL,
- 0x97ea04d81c5e5b00ULL, 0x622181a8d4fdb3f3ULL, 0xe9bcd341572a1208ULL,
- 0x1411258643cce58aULL, 0x9144c5fea4c6e0a4ULL, 0x0d33d06565cf620fULL,
- 0x54a48d489f219ca1ULL, 0xc43e5eac6d63c821ULL, 0xa9728b3a72770dafULL,
- 0xd7934e7b20df87efULL, 0xe35503b61a3e86e5ULL, 0xcae321fbc819d504ULL,
- 0x129a50b3ac60bfa6ULL, 0xcd5e68ea7e9fb6c3ULL, 0xb01c90199483b1c7ULL,
- 0x3de93cd5c295376cULL, 0xaed52edf2ab9ad13ULL, 0x2e60f512c0a07884ULL,
- 0xbc3d86a3e36210c9ULL, 0x35269d9b163951ceULL, 0x0c7d6e2ad0cdb5faULL,
- 0x59e86297d87f5733ULL, 0x298ef221898db0e7ULL, 0x55000029d1a5aa7eULL,
- 0x8bc08ae1b5061b45ULL, 0xc2c31c2b6c92703aULL, 0x94cc596baf25ef42ULL,
- 0x0a1d73db22540456ULL, 0x04b6a0f9d9c4179aULL, 0xeffdafa2ae3d3c60ULL,
- 0xf7c8075bb49496c4ULL, 0x9cc5c7141d1cd4e3ULL, 0x78bd1638218e5534ULL,
- 0xb2f11568f850246aULL, 0xedfabcfa9502bc29ULL, 0x796ce5f2da23051bULL,
- 0xaae128b0dc93537cULL, 0x3a493da0ee4b29aeULL, 0xb5df6b2c416895d7ULL,
- 0xfcabbd25122d7f37ULL, 0x70810b58105dc4b1ULL, 0xe10fdd37f7882a90ULL,
- 0x524dcab5518a3f5cULL, 0x3c9e85878451255bULL, 0x4029828119bd34e2ULL,
- 0x74a05b6f5d3ceccbULL, 0xb610021542e13ecaULL, 0x0ff979d12f59e2acULL,
- 0x6037da27e4f9cc50ULL, 0x5e92975a0df1847dULL, 0xd66de190d3e623feULL,
- 0x5032d6b87b568048ULL, 0x9a36b7ce8235216eULL, 0x80272a7a24f64b4aULL,
- 0x93efed8b8c6916f7ULL, 0x37ddbff44cce1555ULL, 0x4b95db5d4b99bd25ULL,
- 0x92d3fda169812fc0ULL, 0xfb1a4a9a90660bb6ULL, 0x730c196946a4b9b2ULL,
- 0x81e289aa7f49da68ULL, 0x64669a0f83b1a05fULL, 0x27b3ff7d9644f48bULL,
- 0xcc6b615c8db675b3ULL, 0x674f20b9bcebbe95ULL, 0x6f31238275655982ULL,
- 0x5ae488713e45cf05ULL, 0xbf619f9954c21157ULL, 0xeabac46040a8eae9ULL,
- 0x454c6fe9f2c0c1cdULL, 0x419cf6496412691cULL, 0xd3dc3bef265b0f70ULL,
- 0x6d0e60f5c3578a9eULL
-};
-
-static const u64 sbox4[256] = {
- 0x5b0e608526323c55ULL, 0x1a46c1a9fa1b59f5ULL, 0xa9e245a17c4c8ffaULL,
- 0x65ca5159db2955d7ULL, 0x05db0a76ce35afc2ULL, 0x81eac77ea9113d45ULL,
- 0x528ef88ab6ac0a0dULL, 0xa09ea253597be3ffULL, 0x430ddfb3ac48cd56ULL,
- 0xc4b3a67af45ce46fULL, 0x4ececfd8fbe2d05eULL, 0x3ef56f10b39935f0ULL,
- 0x0b22d6829cd619c6ULL, 0x17fd460a74df2069ULL, 0x6cf8cc8e8510ed40ULL,
- 0xd6c824bf3a6ecaa7ULL, 0x61243d581a817049ULL, 0x048bacb6bbc163a2ULL,
- 0xd9a38ac27d44cc32ULL, 0x7fddff5baaf410abULL, 0xad6d495aa804824bULL,
- 0xe1a6a74f2d8c9f94ULL, 0xd4f7851235dee8e3ULL, 0xfd4b7f886540d893ULL,
- 0x247c20042aa4bfdaULL, 0x096ea1c517d1327cULL, 0xd56966b4361a6685ULL,
- 0x277da5c31221057dULL, 0x94d59893a43acff7ULL, 0x64f0c51ccdc02281ULL,
- 0x3d33bcc4ff6189dbULL, 0xe005cb184ce66af1ULL, 0xff5ccd1d1db99beaULL,
- 0xb0b854a7fe42980fULL, 0x7bd46a6a718d4b9fULL, 0xd10fa8cc22a5fd8cULL,
- 0xd31484952be4bd31ULL, 0xc7fa975fcb243847ULL, 0x4886ed1e5846c407ULL,
- 0x28cddb791eb70b04ULL, 0xc2b00be2f573417fULL, 0x5c9590452180f877ULL,
- 0x7a6bddfff370eb00ULL, 0xce509e38d6d9d6a4ULL, 0xebeb0f00647fa702ULL,
- 0x1dcc06cf76606f06ULL, 0xe4d9f28ba286ff0aULL, 0xd85a305dc918c262ULL,
- 0x475b1d8732225f54ULL, 0x2d4fb51668ccb5feULL, 0xa679b9d9d72bba20ULL,
- 0x53841c0d912d43a5ULL, 0x3b7eaa48bf12a4e8ULL, 0x781e0e47f22f1ddfULL,
- 0xeff20ce60ab50973ULL, 0x20d261d19dffb742ULL, 0x16a12b03062a2e39ULL,
- 0x1960eb2239650495ULL, 0x251c16fed50eb8b8ULL, 0x9ac0c330f826016eULL,
- 0xed152665953e7671ULL, 0x02d63194a6369570ULL, 0x5074f08394b1c987ULL,
- 0x70ba598c90b25ce1ULL, 0x794a15810b9742f6ULL, 0x0d5925e9fcaf8c6cULL,
- 0x3067716cd868744eULL, 0x910ab077e8d7731bULL, 0x6a61bbdb5ac42f61ULL,
- 0x93513efbf0851567ULL, 0xf494724b9e83e9d5ULL, 0xe887e1985c09648dULL,
- 0x34b1d3c675370cfdULL, 0xdc35e433bc0d255dULL, 0xd0aab84234131be0ULL,
- 0x08042a50b48b7eafULL, 0x9997c4ee44a3ab35ULL, 0x829a7b49201799d0ULL,
- 0x263b8307b7c54441ULL, 0x752f95f4fd6a6ca6ULL, 0x927217402c08c6e5ULL,
- 0x2a8ab754a795d9eeULL, 0xa442f7552f72943dULL, 0x2c31334e19781208ULL,
- 0x4fa98d7ceaee6291ULL, 0x55c3862f665db309ULL, 0xbd0610175d53b1f3ULL,
- 0x46fe6cb840413f27ULL, 0x3fe03792df0cfa59ULL, 0xcfe700372eb85e8fULL,
- 0xa7be29e7adbce118ULL, 0xe544ee5cde8431ddULL, 0x8a781b1b41f1873eULL,
- 0xa5c94c78a0d2f0e7ULL, 0x39412e2877b60728ULL, 0xa1265ef3afc9a62cULL,
- 0xbcc2770c6a2506c5ULL, 0x3ab66dd5dce1ce12ULL, 0xe65499d04a675b37ULL,
- 0x7d8f523481bfd216ULL, 0x0f6f64fcec15f389ULL, 0x74efbe618b5b13c8ULL,
- 0xacdc82b714273e1dULL, 0xdd40bfe003199d17ULL, 0x37e99257e7e061f8ULL,
- 0xfa52626904775aaaULL, 0x8bbbf63a463d56f9ULL, 0xf0013f1543a26e64ULL,
- 0xa8307e9f879ec898ULL, 0xcc4c27a4150177ccULL, 0x1b432f2cca1d3348ULL,
- 0xde1d1f8f9f6fa013ULL, 0x606602a047a7ddd6ULL, 0xd237ab64cc1cb2c7ULL,
- 0x9b938e7225fcd1d3ULL, 0xec4e03708e0ff476ULL, 0xfeb2fbda3d03c12dULL,
- 0xae0bced2ee43889aULL, 0x22cb8923ebfb4f43ULL, 0x69360d013cf7396dULL,
- 0x855e3602d2d4e022ULL, 0x073805bad01f784cULL, 0x33e17a133852f546ULL,
- 0xdf4874058ac7b638ULL, 0xba92b29c678aa14aULL, 0x0ce89fc76cfaadcdULL,
- 0x5f9d4e0908339e34ULL, 0xf1afe9291f5923b9ULL, 0x6e3480f60f4a265fULL,
- 0xeebf3a2ab29b841cULL, 0xe21938a88f91b4adULL, 0x57dfeff845c6d3c3ULL,
- 0x2f006b0bf62caaf2ULL, 0x62f479ef6f75ee78ULL, 0x11a55ad41c8916a9ULL,
- 0xf229d29084fed453ULL, 0x42f1c27b16b000e6ULL, 0x2b1f76749823c074ULL,
- 0x4b76eca3c2745360ULL, 0x8c98f463b91691bdULL, 0x14bcc93cf1ade66aULL,
- 0x8885213e6d458397ULL, 0x8e177df0274d4711ULL, 0xb49b73b5503f2951ULL,
- 0x10168168c3f96b6bULL, 0x0e3d963b63cab0aeULL, 0x8dfc4b5655a1db14ULL,
- 0xf789f1356e14de5cULL, 0x683e68af4e51dac1ULL, 0xc9a84f9d8d4b0fd9ULL,
- 0x3691e03f52a0f9d1ULL, 0x5ed86e46e1878e80ULL, 0x3c711a0e99d07150ULL,
- 0x5a0865b20c4e9310ULL, 0x56fbfc1fe4f0682eULL, 0xea8d5de3105edf9bULL,
- 0x71abfdb12379187aULL, 0x2eb99de1bee77b9cULL, 0x21ecc0ea33cf4523ULL,
- 0x59a4d7521805c7a1ULL, 0x3896f5eb56ae7c72ULL, 0xaa638f3db18f75dcULL,
- 0x9f39358dabe9808eULL, 0xb7defa91c00b72acULL, 0x6b5541fd62492d92ULL,
- 0x6dc6dee8f92e4d5bULL, 0x353f57abc4beea7eULL, 0x735769d6da5690ceULL,
- 0x0a234aa642391484ULL, 0xf6f9508028f80d9dULL, 0xb8e319a27ab3f215ULL,
- 0x31ad9c1151341a4dULL, 0x773c22a57bef5805ULL, 0x45c7561a07968633ULL,
- 0xf913da9e249dbe36ULL, 0xda652d9b78a64c68ULL, 0x4c27a97f3bc334efULL,
- 0x76621220e66b17f4ULL, 0x967743899acd7d0bULL, 0xf3ee5bcae0ed6782ULL,
- 0x409f753600c879fcULL, 0x06d09a39b5926db6ULL, 0x6f83aeb0317ac588ULL,
- 0x01e6ca4a86381f21ULL, 0x66ff3462d19f3025ULL, 0x72207c24ddfd3bfbULL,
- 0x4af6b6d3e2ece2ebULL, 0x9c994dbec7ea08deULL, 0x49ace597b09a8bc4ULL,
- 0xb38c4766cf0797baULL, 0x131b9373c57c2a75ULL, 0xb1822cce61931e58ULL,
- 0x9d7555b909ba1c0cULL, 0x127fafdd937d11d2ULL, 0x29da3badc66d92e4ULL,
- 0xa2c1d57154c2ecbcULL, 0x58c5134d82f6fe24ULL, 0x1c3ae3515b62274fULL,
- 0xe907c82e01cb8126ULL, 0xf8ed091913e37fcbULL, 0x3249d8f9c80046c9ULL,
- 0x80cf9bede388fb63ULL, 0x1881539a116cf19eULL, 0x5103f3f76bd52457ULL,
- 0x15b7e6f5ae47f7a8ULL, 0xdbd7c6ded47e9ccfULL, 0x44e55c410228bb1aULL,
- 0xb647d4255edb4e99ULL, 0x5d11882bb8aafc30ULL, 0xf5098bbb29d3212aULL,
- 0x8fb5ea14e90296b3ULL, 0x677b942157dd025aULL, 0xfb58e7c0a390acb5ULL,
- 0x89d3674c83bd4a01ULL, 0x9e2da4df4bf3b93bULL, 0xfcc41e328cab4829ULL,
- 0x03f38c96ba582c52ULL, 0xcad1bdbd7fd85db2ULL, 0xbbb442c16082ae83ULL,
- 0xb95fe86ba5da9ab0ULL, 0xb22e04673771a93fULL, 0x845358c9493152d8ULL,
- 0xbe2a488697b4541eULL, 0x95a2dc2dd38e6966ULL, 0xc02c11ac923c852bULL,
- 0x2388b1990df2a87bULL, 0x7c8008fa1b4f37beULL, 0x1f70d0c84d54e503ULL,
- 0x5490adec7ece57d4ULL, 0x002b3c27d9063a3aULL, 0x7eaea3848030a2bfULL,
- 0xc602326ded2003c0ULL, 0x83a7287d69a94086ULL, 0xc57a5fcb30f57a8aULL,
- 0xb56844e479ebe779ULL, 0xa373b40f05dcbce9ULL, 0xd71a786e88570ee2ULL,
- 0x879cbacdbde8f6a0ULL, 0x976ad1bcc164a32fULL, 0xab21e25e9666d78bULL,
- 0x901063aae5e5c33cULL, 0x9818b34448698d90ULL, 0xe36487ae3e1e8abbULL,
- 0xafbdf931893bdcb4ULL, 0x6345a0dc5fbbd519ULL, 0x8628fe269b9465caULL,
- 0x1e5d01603f9c51ecULL, 0x4de44006a15049b7ULL, 0xbf6c70e5f776cbb1ULL,
- 0x411218f2ef552bedULL, 0xcb0c0708705a36a3ULL, 0xe74d14754f986044ULL,
- 0xcd56d9430ea8280eULL, 0xc12591d7535f5065ULL, 0xc83223f1720aef96ULL,
- 0xc3a0396f7363a51fULL
-};
-
-
-static void tgr192_round(u64 * ra, u64 * rb, u64 * rc, u64 x, int mul)
-{
- u64 a = *ra;
- u64 b = *rb;
- u64 c = *rc;
-
- c ^= x;
- a -= sbox1[c & 0xff] ^ sbox2[(c >> 16) & 0xff]
- ^ sbox3[(c >> 32) & 0xff] ^ sbox4[(c >> 48) & 0xff];
- b += sbox4[(c >> 8) & 0xff] ^ sbox3[(c >> 24) & 0xff]
- ^ sbox2[(c >> 40) & 0xff] ^ sbox1[(c >> 56) & 0xff];
- b *= mul;
-
- *ra = a;
- *rb = b;
- *rc = c;
-}
-
-
-static void tgr192_pass(u64 * ra, u64 * rb, u64 * rc, u64 * x, int mul)
-{
- u64 a = *ra;
- u64 b = *rb;
- u64 c = *rc;
-
- tgr192_round(&a, &b, &c, x[0], mul);
- tgr192_round(&b, &c, &a, x[1], mul);
- tgr192_round(&c, &a, &b, x[2], mul);
- tgr192_round(&a, &b, &c, x[3], mul);
- tgr192_round(&b, &c, &a, x[4], mul);
- tgr192_round(&c, &a, &b, x[5], mul);
- tgr192_round(&a, &b, &c, x[6], mul);
- tgr192_round(&b, &c, &a, x[7], mul);
-
- *ra = a;
- *rb = b;
- *rc = c;
-}
-
-
-static void tgr192_key_schedule(u64 * x)
-{
- x[0] -= x[7] ^ 0xa5a5a5a5a5a5a5a5ULL;
- x[1] ^= x[0];
- x[2] += x[1];
- x[3] -= x[2] ^ ((~x[1]) << 19);
- x[4] ^= x[3];
- x[5] += x[4];
- x[6] -= x[5] ^ ((~x[4]) >> 23);
- x[7] ^= x[6];
- x[0] += x[7];
- x[1] -= x[0] ^ ((~x[7]) << 19);
- x[2] ^= x[1];
- x[3] += x[2];
- x[4] -= x[3] ^ ((~x[2]) >> 23);
- x[5] ^= x[4];
- x[6] += x[5];
- x[7] -= x[6] ^ 0x0123456789abcdefULL;
-}
-
-
-/****************
- * Transform the message DATA which consists of 512 bytes (8 words)
- */
-
-static void tgr192_transform(struct tgr192_ctx *tctx, const u8 * data)
-{
- u64 a, b, c, aa, bb, cc;
- u64 x[8];
- int i;
-
- for (i = 0; i < 8; i++)
- x[i] = get_unaligned_le64(data + i * sizeof(__le64));
-
- /* save */
- a = aa = tctx->a;
- b = bb = tctx->b;
- c = cc = tctx->c;
-
- tgr192_pass(&a, &b, &c, x, 5);
- tgr192_key_schedule(x);
- tgr192_pass(&c, &a, &b, x, 7);
- tgr192_key_schedule(x);
- tgr192_pass(&b, &c, &a, x, 9);
-
-
- /* feedforward */
- a ^= aa;
- b -= bb;
- c += cc;
- /* store */
- tctx->a = a;
- tctx->b = b;
- tctx->c = c;
-}
-
-static int tgr192_init(struct shash_desc *desc)
-{
- struct tgr192_ctx *tctx = shash_desc_ctx(desc);
-
- tctx->a = 0x0123456789abcdefULL;
- tctx->b = 0xfedcba9876543210ULL;
- tctx->c = 0xf096a5b4c3b2e187ULL;
- tctx->nblocks = 0;
- tctx->count = 0;
-
- return 0;
-}
-
-
-/* Update the message digest with the contents
- * of INBUF with length INLEN. */
-static int tgr192_update(struct shash_desc *desc, const u8 *inbuf,
- unsigned int len)
-{
- struct tgr192_ctx *tctx = shash_desc_ctx(desc);
-
- if (tctx->count == 64) { /* flush the buffer */
- tgr192_transform(tctx, tctx->hash);
- tctx->count = 0;
- tctx->nblocks++;
- }
- if (!inbuf) {
- return 0;
- }
- if (tctx->count) {
- for (; len && tctx->count < 64; len--) {
- tctx->hash[tctx->count++] = *inbuf++;
- }
- tgr192_update(desc, NULL, 0);
- if (!len) {
- return 0;
- }
-
- }
-
- while (len >= 64) {
- tgr192_transform(tctx, inbuf);
- tctx->count = 0;
- tctx->nblocks++;
- len -= 64;
- inbuf += 64;
- }
- for (; len && tctx->count < 64; len--) {
- tctx->hash[tctx->count++] = *inbuf++;
- }
-
- return 0;
-}
-
-
-
-/* The routine terminates the computation */
-static int tgr192_final(struct shash_desc *desc, u8 * out)
-{
- struct tgr192_ctx *tctx = shash_desc_ctx(desc);
- __be64 *dst = (__be64 *)out;
- __be64 *be64p;
- __le32 *le32p;
- u32 t, msb, lsb;
-
- tgr192_update(desc, NULL, 0); /* flush */
-
- msb = 0;
- t = tctx->nblocks;
- if ((lsb = t << 6) < t) { /* multiply by 64 to make a byte count */
- msb++;
- }
- msb += t >> 26;
- t = lsb;
- if ((lsb = t + tctx->count) < t) { /* add the count */
- msb++;
- }
- t = lsb;
- if ((lsb = t << 3) < t) { /* multiply by 8 to make a bit count */
- msb++;
- }
- msb += t >> 29;
-
- if (tctx->count < 56) { /* enough room */
- tctx->hash[tctx->count++] = 0x01; /* pad */
- while (tctx->count < 56) {
- tctx->hash[tctx->count++] = 0; /* pad */
- }
- } else { /* need one extra block */
- tctx->hash[tctx->count++] = 0x01; /* pad character */
- while (tctx->count < 64) {
- tctx->hash[tctx->count++] = 0;
- }
- tgr192_update(desc, NULL, 0); /* flush */
- memset(tctx->hash, 0, 56); /* fill next block with zeroes */
- }
- /* append the 64 bit count */
- le32p = (__le32 *)&tctx->hash[56];
- le32p[0] = cpu_to_le32(lsb);
- le32p[1] = cpu_to_le32(msb);
-
- tgr192_transform(tctx, tctx->hash);
-
- be64p = (__be64 *)tctx->hash;
- dst[0] = be64p[0] = cpu_to_be64(tctx->a);
- dst[1] = be64p[1] = cpu_to_be64(tctx->b);
- dst[2] = be64p[2] = cpu_to_be64(tctx->c);
-
- return 0;
-}
-
-static int tgr160_final(struct shash_desc *desc, u8 * out)
-{
- u8 D[64];
-
- tgr192_final(desc, D);
- memcpy(out, D, TGR160_DIGEST_SIZE);
- memzero_explicit(D, TGR192_DIGEST_SIZE);
-
- return 0;
-}
-
-static int tgr128_final(struct shash_desc *desc, u8 * out)
-{
- u8 D[64];
-
- tgr192_final(desc, D);
- memcpy(out, D, TGR128_DIGEST_SIZE);
- memzero_explicit(D, TGR192_DIGEST_SIZE);
-
- return 0;
-}
-
-static struct shash_alg tgr_algs[3] = { {
- .digestsize = TGR192_DIGEST_SIZE,
- .init = tgr192_init,
- .update = tgr192_update,
- .final = tgr192_final,
- .descsize = sizeof(struct tgr192_ctx),
- .base = {
- .cra_name = "tgr192",
- .cra_driver_name = "tgr192-generic",
- .cra_blocksize = TGR192_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-}, {
- .digestsize = TGR160_DIGEST_SIZE,
- .init = tgr192_init,
- .update = tgr192_update,
- .final = tgr160_final,
- .descsize = sizeof(struct tgr192_ctx),
- .base = {
- .cra_name = "tgr160",
- .cra_driver_name = "tgr160-generic",
- .cra_blocksize = TGR192_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-}, {
- .digestsize = TGR128_DIGEST_SIZE,
- .init = tgr192_init,
- .update = tgr192_update,
- .final = tgr128_final,
- .descsize = sizeof(struct tgr192_ctx),
- .base = {
- .cra_name = "tgr128",
- .cra_driver_name = "tgr128-generic",
- .cra_blocksize = TGR192_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-} };
-
-static int __init tgr192_mod_init(void)
-{
- return crypto_register_shashes(tgr_algs, ARRAY_SIZE(tgr_algs));
-}
-
-static void __exit tgr192_mod_fini(void)
-{
- crypto_unregister_shashes(tgr_algs, ARRAY_SIZE(tgr_algs));
-}
-
-MODULE_ALIAS_CRYPTO("tgr192");
-MODULE_ALIAS_CRYPTO("tgr160");
-MODULE_ALIAS_CRYPTO("tgr128");
-
-subsys_initcall(tgr192_mod_init);
-module_exit(tgr192_mod_fini);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Tiger Message Digest Algorithm");
diff --git a/crypto/twofish_generic.c b/crypto/twofish_generic.c
index 4f7c033224f9..86b2f067a416 100644
--- a/crypto/twofish_generic.c
+++ b/crypto/twofish_generic.c
@@ -24,7 +24,7 @@
* Third Edition.
*/
-#include <asm/byteorder.h>
+#include <asm/unaligned.h>
#include <crypto/twofish.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -83,11 +83,11 @@
* whitening subkey number m. */
#define INPACK(n, x, m) \
- x = le32_to_cpu(src[n]) ^ ctx->w[m]
+ x = get_unaligned_le32(in + (n) * 4) ^ ctx->w[m]
#define OUTUNPACK(n, x, m) \
x ^= ctx->w[m]; \
- dst[n] = cpu_to_le32(x)
+ put_unaligned_le32(x, out + (n) * 4)
@@ -95,8 +95,6 @@
static void twofish_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
struct twofish_ctx *ctx = crypto_tfm_ctx(tfm);
- const __le32 *src = (const __le32 *)in;
- __le32 *dst = (__le32 *)out;
/* The four 32-bit chunks of the text. */
u32 a, b, c, d;
@@ -132,8 +130,6 @@ static void twofish_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
static void twofish_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
struct twofish_ctx *ctx = crypto_tfm_ctx(tfm);
- const __le32 *src = (const __le32 *)in;
- __le32 *dst = (__le32 *)out;
/* The four 32-bit chunks of the text. */
u32 a, b, c, d;
@@ -172,7 +168,6 @@ static struct crypto_alg alg = {
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = TF_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct twofish_ctx),
- .cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_u = { .cipher = {
.cia_min_keysize = TF_MIN_KEY_SIZE,
diff --git a/crypto/vmac.c b/crypto/vmac.c
index 9b565d1040d6..4633b2dda1e0 100644
--- a/crypto/vmac.c
+++ b/crypto/vmac.c
@@ -36,6 +36,7 @@
#include <linux/scatterlist.h>
#include <asm/byteorder.h>
#include <crypto/scatterwalk.h>
+#include <crypto/internal/cipher.h>
#include <crypto/internal/hash.h>
/*
@@ -693,3 +694,4 @@ module_exit(vmac_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("VMAC hash algorithm");
MODULE_ALIAS_CRYPTO("vmac64");
+MODULE_IMPORT_NS(CRYPTO_INTERNAL);
diff --git a/crypto/xcbc.c b/crypto/xcbc.c
index af3b7eb5d7c7..6074c5c1da49 100644
--- a/crypto/xcbc.c
+++ b/crypto/xcbc.c
@@ -6,6 +6,7 @@
* Kazunori Miyazawa <miyazawa@linux-ipv6.org>
*/
+#include <crypto/internal/cipher.h>
#include <crypto/internal/hash.h>
#include <linux/err.h>
#include <linux/kernel.h>
@@ -272,3 +273,4 @@ module_exit(crypto_xcbc_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("XCBC keyed hash algorithm");
MODULE_ALIAS_CRYPTO("xcbc");
+MODULE_IMPORT_NS(CRYPTO_INTERNAL);
diff --git a/crypto/xor.c b/crypto/xor.c
index 8f899f898ec9..8e72e5d5db0d 100644
--- a/crypto/xor.c
+++ b/crypto/xor.c
@@ -95,7 +95,7 @@ do_xor_speed(struct xor_block_template *tmpl, void *b1, void *b2)
for (i = 0; i < 3; i++) {
start = ktime_get();
for (j = 0; j < REPS; j++) {
- mb(); /* prevent loop optimzation */
+ mb(); /* prevent loop optimization */
tmpl->do_2(BENCH_SIZE, b1, b2);
mb();
}
diff --git a/crypto/xts.c b/crypto/xts.c
index ad45b009774b..6c12f30dbdd6 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -7,6 +7,7 @@
* Based on ecb.c
* Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
*/
+#include <crypto/internal/cipher.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include <linux/err.h>
@@ -464,3 +465,4 @@ module_exit(xts_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("XTS block cipher mode");
MODULE_ALIAS_CRYPTO("xts");
+MODULE_IMPORT_NS(CRYPTO_INTERNAL);
diff --git a/drivers/Kconfig b/drivers/Kconfig
index dcecc9f6e33f..62c753a73651 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -6,6 +6,7 @@ menu "Device Drivers"
source "drivers/amba/Kconfig"
source "drivers/eisa/Kconfig"
source "drivers/pci/Kconfig"
+source "drivers/cxl/Kconfig"
source "drivers/pcmcia/Kconfig"
source "drivers/rapidio/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index fd11b9ac4cc3..6fba7daba591 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -27,7 +27,7 @@ obj-y += idle/
obj-y += char/ipmi/
obj-$(CONFIG_ACPI) += acpi/
-obj-$(CONFIG_SFI) += sfi/
+
# PnP must come after ACPI since it will eventually need to check if acpi
# was used and do nothing if so
obj-$(CONFIG_PNP) += pnp/
@@ -73,6 +73,7 @@ obj-$(CONFIG_NVM) += lightnvm/
obj-y += base/ block/ misc/ mfd/ nfc/
obj-$(CONFIG_LIBNVDIMM) += nvdimm/
obj-$(CONFIG_DAX) += dax/
+obj-$(CONFIG_CXL_BUS) += cxl/
obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf/
obj-$(CONFIG_NUBUS) += nubus/
obj-y += macintosh/
diff --git a/drivers/accessibility/speakup/serialio.c b/drivers/accessibility/speakup/serialio.c
index 403b01d66367..53580bdc5baa 100644
--- a/drivers/accessibility/speakup/serialio.c
+++ b/drivers/accessibility/speakup/serialio.c
@@ -27,11 +27,11 @@ static const struct old_serial_port *serstate;
static int timeouts;
static int spk_serial_out(struct spk_synth *in_synth, const char ch);
-static void spk_serial_send_xchar(char ch);
-static void spk_serial_tiocmset(unsigned int set, unsigned int clear);
-static unsigned char spk_serial_in(void);
-static unsigned char spk_serial_in_nowait(void);
-static void spk_serial_flush_buffer(void);
+static void spk_serial_send_xchar(struct spk_synth *in_synth, char ch);
+static void spk_serial_tiocmset(struct spk_synth *in_synth, unsigned int set, unsigned int clear);
+static unsigned char spk_serial_in(struct spk_synth *in_synth);
+static unsigned char spk_serial_in_nowait(struct spk_synth *in_synth);
+static void spk_serial_flush_buffer(struct spk_synth *in_synth);
static int spk_serial_wait_for_xmitr(struct spk_synth *in_synth);
struct spk_io_ops spk_serial_io_ops = {
@@ -150,7 +150,7 @@ static void start_serial_interrupt(int irq)
outb(1, speakup_info.port_tts + UART_FCR); /* Turn FIFO On */
}
-static void spk_serial_send_xchar(char ch)
+static void spk_serial_send_xchar(struct spk_synth *synth, char ch)
{
int timeout = SPK_XMITR_TIMEOUT;
@@ -162,7 +162,7 @@ static void spk_serial_send_xchar(char ch)
outb(ch, speakup_info.port_tts);
}
-static void spk_serial_tiocmset(unsigned int set, unsigned int clear)
+static void spk_serial_tiocmset(struct spk_synth *in_synth, unsigned int set, unsigned int clear)
{
int old = inb(speakup_info.port_tts + UART_MCR);
@@ -251,7 +251,7 @@ static int spk_serial_wait_for_xmitr(struct spk_synth *in_synth)
return 1;
}
-static unsigned char spk_serial_in(void)
+static unsigned char spk_serial_in(struct spk_synth *in_synth)
{
int tmout = SPK_SERIAL_TIMEOUT;
@@ -265,7 +265,7 @@ static unsigned char spk_serial_in(void)
return inb_p(speakup_info.port_tts + UART_RX);
}
-static unsigned char spk_serial_in_nowait(void)
+static unsigned char spk_serial_in_nowait(struct spk_synth *in_synth)
{
unsigned char lsr;
@@ -275,7 +275,7 @@ static unsigned char spk_serial_in_nowait(void)
return inb_p(speakup_info.port_tts + UART_RX);
}
-static void spk_serial_flush_buffer(void)
+static void spk_serial_flush_buffer(struct spk_synth *in_synth)
{
/* TODO: flush the UART 16550 buffer */
}
@@ -307,7 +307,7 @@ const char *spk_serial_synth_immediate(struct spk_synth *synth,
}
EXPORT_SYMBOL_GPL(spk_serial_synth_immediate);
-void spk_serial_release(void)
+void spk_serial_release(struct spk_synth *synth)
{
spk_stop_serial_interrupt();
if (speakup_info.port_tts == 0)
diff --git a/drivers/accessibility/speakup/speakup_acntpc.c b/drivers/accessibility/speakup/speakup_acntpc.c
index c94328a5bd4a..c1ec087dca13 100644
--- a/drivers/accessibility/speakup/speakup_acntpc.c
+++ b/drivers/accessibility/speakup/speakup_acntpc.c
@@ -25,7 +25,7 @@
#define PROCSPEECH '\r'
static int synth_probe(struct spk_synth *synth);
-static void accent_release(void);
+static void accent_release(struct spk_synth *synth);
static const char *synth_immediate(struct spk_synth *synth, const char *buf);
static void do_catch_up(struct spk_synth *synth);
static void synth_flush(struct spk_synth *synth);
@@ -294,7 +294,7 @@ static int synth_probe(struct spk_synth *synth)
return 0;
}
-static void accent_release(void)
+static void accent_release(struct spk_synth *synth)
{
spk_stop_serial_interrupt();
if (speakup_info.port_tts)
diff --git a/drivers/accessibility/speakup/speakup_apollo.c b/drivers/accessibility/speakup/speakup_apollo.c
index 0877b4044c28..cd63581b2e99 100644
--- a/drivers/accessibility/speakup/speakup_apollo.c
+++ b/drivers/accessibility/speakup/speakup_apollo.c
@@ -163,8 +163,8 @@ static void do_catch_up(struct spk_synth *synth)
full_time_val = full_time->u.n.value;
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
if (!synth->io_ops->synth_out(synth, ch)) {
- synth->io_ops->tiocmset(0, UART_MCR_RTS);
- synth->io_ops->tiocmset(UART_MCR_RTS, 0);
+ synth->io_ops->tiocmset(synth, 0, UART_MCR_RTS);
+ synth->io_ops->tiocmset(synth, UART_MCR_RTS, 0);
schedule_timeout(msecs_to_jiffies(full_time_val));
continue;
}
diff --git a/drivers/accessibility/speakup/speakup_audptr.c b/drivers/accessibility/speakup/speakup_audptr.c
index e6a6a9665d8f..e89fd72579e6 100644
--- a/drivers/accessibility/speakup/speakup_audptr.c
+++ b/drivers/accessibility/speakup/speakup_audptr.c
@@ -119,8 +119,8 @@ static struct spk_synth synth_audptr = {
static void synth_flush(struct spk_synth *synth)
{
- synth->io_ops->flush_buffer();
- synth->io_ops->send_xchar(SYNTH_CLEAR);
+ synth->io_ops->flush_buffer(synth);
+ synth->io_ops->send_xchar(synth, SYNTH_CLEAR);
synth->io_ops->synth_out(synth, PROCSPEECH);
}
@@ -130,11 +130,11 @@ static void synth_version(struct spk_synth *synth)
char synth_id[40] = "";
synth->synth_immediate(synth, "\x05[Q]");
- synth_id[test] = synth->io_ops->synth_in();
+ synth_id[test] = synth->io_ops->synth_in(synth);
if (synth_id[test] == 'A') {
do {
/* read version string from synth */
- synth_id[++test] = synth->io_ops->synth_in();
+ synth_id[++test] = synth->io_ops->synth_in(synth);
} while (synth_id[test] != '\n' && test < 32);
synth_id[++test] = 0x00;
}
diff --git a/drivers/accessibility/speakup/speakup_decext.c b/drivers/accessibility/speakup/speakup_decext.c
index 7408eb29cf38..092cfd08a9e1 100644
--- a/drivers/accessibility/speakup/speakup_decext.c
+++ b/drivers/accessibility/speakup/speakup_decext.c
@@ -218,7 +218,7 @@ static void do_catch_up(struct spk_synth *synth)
static void synth_flush(struct spk_synth *synth)
{
in_escape = 0;
- synth->io_ops->flush_buffer();
+ synth->io_ops->flush_buffer(synth);
synth->synth_immediate(synth, "\033P;10z\033\\");
}
diff --git a/drivers/accessibility/speakup/speakup_decpc.c b/drivers/accessibility/speakup/speakup_decpc.c
index 96f24c848cc5..dec314dee214 100644
--- a/drivers/accessibility/speakup/speakup_decpc.c
+++ b/drivers/accessibility/speakup/speakup_decpc.c
@@ -125,7 +125,7 @@ enum { PRIMARY_DIC = 0, USER_DIC, COMMAND_DIC, ABBREV_DIC };
#define SYNTH_IO_EXTENT 8
static int synth_probe(struct spk_synth *synth);
-static void dtpc_release(void);
+static void dtpc_release(struct spk_synth *synth);
static const char *synth_immediate(struct spk_synth *synth, const char *buf);
static void do_catch_up(struct spk_synth *synth);
static void synth_flush(struct spk_synth *synth);
@@ -474,7 +474,7 @@ static int synth_probe(struct spk_synth *synth)
return 0;
}
-static void dtpc_release(void)
+static void dtpc_release(struct spk_synth *synth)
{
spk_stop_serial_interrupt();
if (speakup_info.port_tts)
diff --git a/drivers/accessibility/speakup/speakup_dectlk.c b/drivers/accessibility/speakup/speakup_dectlk.c
index ab6d61e80b1c..580ec796816b 100644
--- a/drivers/accessibility/speakup/speakup_dectlk.c
+++ b/drivers/accessibility/speakup/speakup_dectlk.c
@@ -78,6 +78,8 @@ static struct kobj_attribute direct_attribute =
__ATTR(direct, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute full_time_attribute =
__ATTR(full_time, 0644, spk_var_show, spk_var_store);
+static struct kobj_attribute flush_time_attribute =
+ __ATTR(flush_time, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute jiffy_delta_attribute =
__ATTR(jiffy_delta, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute trigger_time_attribute =
@@ -99,6 +101,7 @@ static struct attribute *synth_attrs[] = {
&delay_time_attribute.attr,
&direct_attribute.attr,
&full_time_attribute.attr,
+ &flush_time_attribute.attr,
&jiffy_delta_attribute.attr,
&trigger_time_attribute.attr,
NULL, /* need to NULL terminate the list of attributes */
@@ -118,6 +121,7 @@ static struct spk_synth synth_dectlk = {
.trigger = 50,
.jiffies = 50,
.full = 40000,
+ .flush_time = 4000,
.dev_name = SYNTH_DEFAULT_DEV,
.startup = SYNTH_START,
.checkval = SYNTH_CHECK,
@@ -200,18 +204,23 @@ static void do_catch_up(struct spk_synth *synth)
static u_char last = '\0';
unsigned long flags;
unsigned long jiff_max;
- unsigned long timeout = msecs_to_jiffies(4000);
+ unsigned long timeout;
DEFINE_WAIT(wait);
struct var_t *jiffy_delta;
struct var_t *delay_time;
+ struct var_t *flush_time;
int jiffy_delta_val;
int delay_time_val;
+ int timeout_val;
jiffy_delta = spk_get_var(JIFFY);
delay_time = spk_get_var(DELAY);
+ flush_time = spk_get_var(FLUSH);
spin_lock_irqsave(&speakup_info.spinlock, flags);
jiffy_delta_val = jiffy_delta->u.n.value;
+ timeout_val = flush_time->u.n.value;
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
+ timeout = msecs_to_jiffies(timeout_val);
jiff_max = jiffies + jiffy_delta_val;
while (!kthread_should_stop()) {
@@ -289,7 +298,7 @@ static void synth_flush(struct spk_synth *synth)
synth->io_ops->synth_out(synth, ']');
in_escape = 0;
is_flushing = 1;
- synth->io_ops->flush_buffer();
+ synth->io_ops->flush_buffer(synth);
synth->io_ops->synth_out(synth, SYNTH_CLEAR);
}
diff --git a/drivers/accessibility/speakup/speakup_dtlk.c b/drivers/accessibility/speakup/speakup_dtlk.c
index dbebed0eeeec..92838d3ae9eb 100644
--- a/drivers/accessibility/speakup/speakup_dtlk.c
+++ b/drivers/accessibility/speakup/speakup_dtlk.c
@@ -24,7 +24,7 @@
#define PROCSPEECH 0x00
static int synth_probe(struct spk_synth *synth);
-static void dtlk_release(void);
+static void dtlk_release(struct spk_synth *synth);
static const char *synth_immediate(struct spk_synth *synth, const char *buf);
static void do_catch_up(struct spk_synth *synth);
static void synth_flush(struct spk_synth *synth);
@@ -365,7 +365,7 @@ static int synth_probe(struct spk_synth *synth)
return 0;
}
-static void dtlk_release(void)
+static void dtlk_release(struct spk_synth *synth)
{
spk_stop_serial_interrupt();
if (speakup_info.port_tts)
diff --git a/drivers/accessibility/speakup/speakup_keypc.c b/drivers/accessibility/speakup/speakup_keypc.c
index 414827e888fc..311f4aa0be22 100644
--- a/drivers/accessibility/speakup/speakup_keypc.c
+++ b/drivers/accessibility/speakup/speakup_keypc.c
@@ -24,7 +24,7 @@
#define SYNTH_CLEAR 0x03
static int synth_probe(struct spk_synth *synth);
-static void keynote_release(void);
+static void keynote_release(struct spk_synth *synth);
static const char *synth_immediate(struct spk_synth *synth, const char *buf);
static void do_catch_up(struct spk_synth *synth);
static void synth_flush(struct spk_synth *synth);
@@ -295,7 +295,7 @@ static int synth_probe(struct spk_synth *synth)
return 0;
}
-static void keynote_release(void)
+static void keynote_release(struct spk_synth *synth)
{
spk_stop_serial_interrupt();
if (synth_port)
diff --git a/drivers/accessibility/speakup/speakup_ltlk.c b/drivers/accessibility/speakup/speakup_ltlk.c
index 3c59519a871f..3e59b387d0c4 100644
--- a/drivers/accessibility/speakup/speakup_ltlk.c
+++ b/drivers/accessibility/speakup/speakup_ltlk.c
@@ -132,7 +132,7 @@ static void synth_interrogate(struct spk_synth *synth)
synth->synth_immediate(synth, "\x18\x01?");
for (i = 0; i < 50; i++) {
- buf[i] = synth->io_ops->synth_in();
+ buf[i] = synth->io_ops->synth_in(synth);
if (i > 2 && buf[i] == 0x7f)
break;
}
diff --git a/drivers/accessibility/speakup/speakup_soft.c b/drivers/accessibility/speakup/speakup_soft.c
index 9a7029539f35..c3f97c572fb6 100644
--- a/drivers/accessibility/speakup/speakup_soft.c
+++ b/drivers/accessibility/speakup/speakup_soft.c
@@ -24,7 +24,7 @@
#define CLEAR_SYNTH 0x18
static int softsynth_probe(struct spk_synth *synth);
-static void softsynth_release(void);
+static void softsynth_release(struct spk_synth *synth);
static int softsynth_is_alive(struct spk_synth *synth);
static unsigned char get_index(struct spk_synth *synth);
@@ -402,7 +402,7 @@ static int softsynth_probe(struct spk_synth *synth)
return 0;
}
-static void softsynth_release(void)
+static void softsynth_release(struct spk_synth *synth)
{
misc_deregister(&synth_device);
misc_deregister(&synthu_device);
diff --git a/drivers/accessibility/speakup/speakup_spkout.c b/drivers/accessibility/speakup/speakup_spkout.c
index 6e933bf1de2e..bd3d8dc300ff 100644
--- a/drivers/accessibility/speakup/speakup_spkout.c
+++ b/drivers/accessibility/speakup/speakup_spkout.c
@@ -117,8 +117,8 @@ static struct spk_synth synth_spkout = {
static void synth_flush(struct spk_synth *synth)
{
- synth->io_ops->flush_buffer();
- synth->io_ops->send_xchar(SYNTH_CLEAR);
+ synth->io_ops->flush_buffer(synth);
+ synth->io_ops->send_xchar(synth, SYNTH_CLEAR);
}
module_param_named(ser, synth_spkout.ser, int, 0444);
diff --git a/drivers/accessibility/speakup/spk_priv.h b/drivers/accessibility/speakup/spk_priv.h
index 0f4bcbe5ddb9..9da57ead17cb 100644
--- a/drivers/accessibility/speakup/spk_priv.h
+++ b/drivers/accessibility/speakup/spk_priv.h
@@ -34,8 +34,8 @@
const struct old_serial_port *spk_serial_init(int index);
void spk_stop_serial_interrupt(void);
-void spk_serial_release(void);
-void spk_ttyio_release(void);
+void spk_serial_release(struct spk_synth *synth);
+void spk_ttyio_release(struct spk_synth *synth);
void spk_ttyio_register_ldisc(void);
void spk_ttyio_unregister_ldisc(void);
diff --git a/drivers/accessibility/speakup/spk_ttyio.c b/drivers/accessibility/speakup/spk_ttyio.c
index 6284aff434a1..9af1d4c124d3 100644
--- a/drivers/accessibility/speakup/spk_ttyio.c
+++ b/drivers/accessibility/speakup/spk_ttyio.c
@@ -12,14 +12,15 @@ struct spk_ldisc_data {
char buf;
struct completion completion;
bool buf_free;
+ struct spk_synth *synth;
};
-static struct spk_synth *spk_ttyio_synth;
-static struct tty_struct *speakup_tty;
-/* mutex to protect against speakup_tty disappearing from underneath us while
- * we are using it. this can happen when the device physically unplugged,
- * while in use. it also serialises access to speakup_tty.
+/*
+ * This allows to catch within spk_ttyio_ldisc_open whether it is getting set
+ * on for a speakup-driven device.
*/
+static struct tty_struct *speakup_tty;
+/* This mutex serializes the use of such global speakup_tty variable */
static DEFINE_MUTEX(speakup_tty_mutex);
static int ser_to_dev(int ser, dev_t *dev_no)
@@ -67,22 +68,20 @@ static int spk_ttyio_ldisc_open(struct tty_struct *tty)
static void spk_ttyio_ldisc_close(struct tty_struct *tty)
{
- mutex_lock(&speakup_tty_mutex);
- kfree(speakup_tty->disc_data);
- speakup_tty = NULL;
- mutex_unlock(&speakup_tty_mutex);
+ kfree(tty->disc_data);
}
static int spk_ttyio_receive_buf2(struct tty_struct *tty,
const unsigned char *cp, char *fp, int count)
{
struct spk_ldisc_data *ldisc_data = tty->disc_data;
+ struct spk_synth *synth = ldisc_data->synth;
- if (spk_ttyio_synth->read_buff_add) {
+ if (synth->read_buff_add) {
int i;
for (i = 0; i < count; i++)
- spk_ttyio_synth->read_buff_add(cp[i]);
+ synth->read_buff_add(cp[i]);
return count;
}
@@ -114,11 +113,11 @@ static struct tty_ldisc_ops spk_ttyio_ldisc_ops = {
static int spk_ttyio_out(struct spk_synth *in_synth, const char ch);
static int spk_ttyio_out_unicode(struct spk_synth *in_synth, u16 ch);
-static void spk_ttyio_send_xchar(char ch);
-static void spk_ttyio_tiocmset(unsigned int set, unsigned int clear);
-static unsigned char spk_ttyio_in(void);
-static unsigned char spk_ttyio_in_nowait(void);
-static void spk_ttyio_flush_buffer(void);
+static void spk_ttyio_send_xchar(struct spk_synth *in_synth, char ch);
+static void spk_ttyio_tiocmset(struct spk_synth *in_synth, unsigned int set, unsigned int clear);
+static unsigned char spk_ttyio_in(struct spk_synth *in_synth);
+static unsigned char spk_ttyio_in_nowait(struct spk_synth *in_synth);
+static void spk_ttyio_flush_buffer(struct spk_synth *in_synth);
static int spk_ttyio_wait_for_xmitr(struct spk_synth *in_synth);
struct spk_io_ops spk_ttyio_ops = {
@@ -152,7 +151,7 @@ static int spk_ttyio_initialise_ldisc(struct spk_synth *synth)
if (ret)
return ret;
- tty = tty_kopen(dev);
+ tty = tty_kopen_exclusive(dev);
if (IS_ERR(tty))
return PTR_ERR(tty);
@@ -187,13 +186,17 @@ static int spk_ttyio_initialise_ldisc(struct spk_synth *synth)
mutex_lock(&speakup_tty_mutex);
speakup_tty = tty;
ret = tty_set_ldisc(tty, N_SPEAKUP);
- if (ret)
- speakup_tty = NULL;
+ speakup_tty = NULL;
mutex_unlock(&speakup_tty_mutex);
- if (!ret)
+ if (!ret) {
/* Success */
+ struct spk_ldisc_data *ldisc_data = tty->disc_data;
+
+ ldisc_data->synth = synth;
+ synth->dev = tty;
return 0;
+ }
pr_err("speakup: Failed to set N_SPEAKUP on tty\n");
@@ -221,29 +224,30 @@ void spk_ttyio_unregister_ldisc(void)
static int spk_ttyio_out(struct spk_synth *in_synth, const char ch)
{
- mutex_lock(&speakup_tty_mutex);
- if (in_synth->alive && speakup_tty && speakup_tty->ops->write) {
- int ret = speakup_tty->ops->write(speakup_tty, &ch, 1);
-
- mutex_unlock(&speakup_tty_mutex);
- if (ret == 0)
- /* No room */
- return 0;
- if (ret < 0) {
- pr_warn("%s: I/O error, deactivating speakup\n",
- in_synth->long_name);
- /* No synth any more, so nobody will restart TTYs,
- * and we thus need to do it ourselves. Now that there
- * is no synth we can let application flood anyway
- */
- in_synth->alive = 0;
- speakup_start_ttys();
- return 0;
- }
+ struct tty_struct *tty = in_synth->dev;
+ int ret;
+
+ if (!in_synth->alive || !tty->ops->write)
+ return 0;
+
+ ret = tty->ops->write(tty, &ch, 1);
+
+ if (ret == 0)
+ /* No room */
+ return 0;
+
+ if (ret > 0)
+ /* Success */
return 1;
- }
- mutex_unlock(&speakup_tty_mutex);
+ pr_warn("%s: I/O error, deactivating speakup\n",
+ in_synth->long_name);
+ /* No synth any more, so nobody will restart TTYs,
+ * and we thus need to do it ourselves. Now that there
+ * is no synth we can let application flood anyway
+ */
+ in_synth->alive = 0;
+ speakup_start_ttys();
return 0;
}
@@ -264,47 +268,20 @@ static int spk_ttyio_out_unicode(struct spk_synth *in_synth, u16 ch)
return ret;
}
-static int check_tty(struct tty_struct *tty)
-{
- if (!tty) {
- pr_warn("%s: I/O error, deactivating speakup\n",
- spk_ttyio_synth->long_name);
- /* No synth any more, so nobody will restart TTYs, and we thus
- * need to do it ourselves. Now that there is no synth we can
- * let application flood anyway
- */
- spk_ttyio_synth->alive = 0;
- speakup_start_ttys();
- return 1;
- }
-
- return 0;
-}
-
-static void spk_ttyio_send_xchar(char ch)
+static void spk_ttyio_send_xchar(struct spk_synth *in_synth, char ch)
{
- mutex_lock(&speakup_tty_mutex);
- if (check_tty(speakup_tty)) {
- mutex_unlock(&speakup_tty_mutex);
- return;
- }
+ struct tty_struct *tty = in_synth->dev;
- if (speakup_tty->ops->send_xchar)
- speakup_tty->ops->send_xchar(speakup_tty, ch);
- mutex_unlock(&speakup_tty_mutex);
+ if (tty->ops->send_xchar)
+ tty->ops->send_xchar(tty, ch);
}
-static void spk_ttyio_tiocmset(unsigned int set, unsigned int clear)
+static void spk_ttyio_tiocmset(struct spk_synth *in_synth, unsigned int set, unsigned int clear)
{
- mutex_lock(&speakup_tty_mutex);
- if (check_tty(speakup_tty)) {
- mutex_unlock(&speakup_tty_mutex);
- return;
- }
+ struct tty_struct *tty = in_synth->dev;
- if (speakup_tty->ops->tiocmset)
- speakup_tty->ops->tiocmset(speakup_tty, set, clear);
- mutex_unlock(&speakup_tty_mutex);
+ if (tty->ops->tiocmset)
+ tty->ops->tiocmset(tty, set, clear);
}
static int spk_ttyio_wait_for_xmitr(struct spk_synth *in_synth)
@@ -312,9 +289,10 @@ static int spk_ttyio_wait_for_xmitr(struct spk_synth *in_synth)
return 1;
}
-static unsigned char ttyio_in(int timeout)
+static unsigned char ttyio_in(struct spk_synth *in_synth, int timeout)
{
- struct spk_ldisc_data *ldisc_data = speakup_tty->disc_data;
+ struct tty_struct *tty = in_synth->dev;
+ struct spk_ldisc_data *ldisc_data = tty->disc_data;
char rv;
if (!timeout) {
@@ -334,35 +312,29 @@ static unsigned char ttyio_in(int timeout)
mb();
ldisc_data->buf_free = true;
/* Let TTY push more characters */
- tty_schedule_flip(speakup_tty->port);
+ tty_schedule_flip(tty->port);
return rv;
}
-static unsigned char spk_ttyio_in(void)
+static unsigned char spk_ttyio_in(struct spk_synth *in_synth)
{
- return ttyio_in(SPK_SYNTH_TIMEOUT);
+ return ttyio_in(in_synth, SPK_SYNTH_TIMEOUT);
}
-static unsigned char spk_ttyio_in_nowait(void)
+static unsigned char spk_ttyio_in_nowait(struct spk_synth *in_synth)
{
- u8 rv = ttyio_in(0);
+ u8 rv = ttyio_in(in_synth, 0);
return (rv == 0xff) ? 0 : rv;
}
-static void spk_ttyio_flush_buffer(void)
+static void spk_ttyio_flush_buffer(struct spk_synth *in_synth)
{
- mutex_lock(&speakup_tty_mutex);
- if (check_tty(speakup_tty)) {
- mutex_unlock(&speakup_tty_mutex);
- return;
- }
+ struct tty_struct *tty = in_synth->dev;
- if (speakup_tty->ops->flush_buffer)
- speakup_tty->ops->flush_buffer(speakup_tty);
-
- mutex_unlock(&speakup_tty_mutex);
+ if (tty->ops->flush_buffer)
+ tty->ops->flush_buffer(tty);
}
int spk_ttyio_synth_probe(struct spk_synth *synth)
@@ -373,37 +345,38 @@ int spk_ttyio_synth_probe(struct spk_synth *synth)
return rv;
synth->alive = 1;
- spk_ttyio_synth = synth;
return 0;
}
EXPORT_SYMBOL_GPL(spk_ttyio_synth_probe);
-void spk_ttyio_release(void)
+void spk_ttyio_release(struct spk_synth *in_synth)
{
- if (!speakup_tty)
- return;
+ struct tty_struct *tty = in_synth->dev;
- tty_lock(speakup_tty);
+ tty_lock(tty);
- if (speakup_tty->ops->close)
- speakup_tty->ops->close(speakup_tty, NULL);
+ if (tty->ops->close)
+ tty->ops->close(tty, NULL);
+
+ tty_ldisc_flush(tty);
+ tty_unlock(tty);
+ tty_kclose(tty);
- tty_ldisc_flush(speakup_tty);
- tty_unlock(speakup_tty);
- tty_kclose(speakup_tty);
+ in_synth->dev = NULL;
}
EXPORT_SYMBOL_GPL(spk_ttyio_release);
-const char *spk_ttyio_synth_immediate(struct spk_synth *synth, const char *buff)
+const char *spk_ttyio_synth_immediate(struct spk_synth *in_synth, const char *buff)
{
+ struct tty_struct *tty = in_synth->dev;
u_char ch;
while ((ch = *buff)) {
if (ch == '\n')
- ch = synth->procspeech;
- if (tty_write_room(speakup_tty) < 1 ||
- !synth->io_ops->synth_out(synth, ch))
+ ch = in_synth->procspeech;
+ if (tty_write_room(tty) < 1 ||
+ !in_synth->io_ops->synth_out(in_synth, ch))
return buff;
buff++;
}
diff --git a/drivers/accessibility/speakup/spk_types.h b/drivers/accessibility/speakup/spk_types.h
index 91fca3033a45..6a96ad94bc3f 100644
--- a/drivers/accessibility/speakup/spk_types.h
+++ b/drivers/accessibility/speakup/spk_types.h
@@ -48,7 +48,7 @@ enum var_id_t {
ATTRIB_BLEEP, BLEEPS,
RATE, PITCH, VOL, TONE, PUNCT, VOICE, FREQUENCY, LANG,
DIRECT, PAUSE,
- CAPS_START, CAPS_STOP, CHARTAB, INFLECTION,
+ CAPS_START, CAPS_STOP, CHARTAB, INFLECTION, FLUSH,
MAXVARS
};
@@ -157,11 +157,11 @@ struct spk_synth;
struct spk_io_ops {
int (*synth_out)(struct spk_synth *synth, const char ch);
int (*synth_out_unicode)(struct spk_synth *synth, u16 ch);
- void (*send_xchar)(char ch);
- void (*tiocmset)(unsigned int set, unsigned int clear);
- unsigned char (*synth_in)(void);
- unsigned char (*synth_in_nowait)(void);
- void (*flush_buffer)(void);
+ void (*send_xchar)(struct spk_synth *synth, char ch);
+ void (*tiocmset)(struct spk_synth *synth, unsigned int set, unsigned int clear);
+ unsigned char (*synth_in)(struct spk_synth *synth);
+ unsigned char (*synth_in_nowait)(struct spk_synth *synth);
+ void (*flush_buffer)(struct spk_synth *synth);
int (*wait_for_xmitr)(struct spk_synth *synth);
};
@@ -178,6 +178,7 @@ struct spk_synth {
int trigger;
int jiffies;
int full;
+ int flush_time;
int ser;
char *dev_name;
short flags;
@@ -188,7 +189,7 @@ struct spk_synth {
int *default_vol;
struct spk_io_ops *io_ops;
int (*probe)(struct spk_synth *synth);
- void (*release)(void);
+ void (*release)(struct spk_synth *synth);
const char *(*synth_immediate)(struct spk_synth *synth,
const char *buff);
void (*catch_up)(struct spk_synth *synth);
@@ -200,6 +201,8 @@ struct spk_synth {
struct synth_indexing indexing;
int alive;
struct attribute_group attributes;
+
+ void *dev;
};
/*
diff --git a/drivers/accessibility/speakup/synth.c b/drivers/accessibility/speakup/synth.c
index ac47dbac7207..2b8699673bac 100644
--- a/drivers/accessibility/speakup/synth.c
+++ b/drivers/accessibility/speakup/synth.c
@@ -137,14 +137,14 @@ EXPORT_SYMBOL_GPL(spk_do_catch_up_unicode);
void spk_synth_flush(struct spk_synth *synth)
{
- synth->io_ops->flush_buffer();
+ synth->io_ops->flush_buffer(synth);
synth->io_ops->synth_out(synth, synth->clear);
}
EXPORT_SYMBOL_GPL(spk_synth_flush);
unsigned char spk_synth_get_index(struct spk_synth *synth)
{
- return synth->io_ops->synth_in_nowait();
+ return synth->io_ops->synth_in_nowait(synth);
}
EXPORT_SYMBOL_GPL(spk_synth_get_index);
@@ -348,6 +348,7 @@ struct var_t synth_time_vars[] = {
{ TRIGGER, .u.n = {NULL, 20, 10, 2000, 0, 0, NULL } },
{ JIFFY, .u.n = {NULL, 50, 20, 200, 0, 0, NULL } },
{ FULL, .u.n = {NULL, 400, 200, 60000, 0, 0, NULL } },
+ { FLUSH, .u.n = {NULL, 4000, 100, 4000, 0, 0, NULL } },
V_LAST_VAR
};
@@ -408,6 +409,8 @@ static int do_synth_init(struct spk_synth *in_synth)
synth_time_vars[2].u.n.default_val = synth->jiffies;
synth_time_vars[3].u.n.value =
synth_time_vars[3].u.n.default_val = synth->full;
+ synth_time_vars[4].u.n.value =
+ synth_time_vars[4].u.n.default_val = synth->flush_time;
synth_printf("%s", synth->init);
for (var = synth->vars;
(var->var_id >= 0) && (var->var_id < MAXVARS); var++)
@@ -440,7 +443,7 @@ void synth_release(void)
sysfs_remove_group(speakup_kobj, &synth->attributes);
for (var = synth->vars; var->var_id != MAXVARS; var++)
speakup_unregister_var(var->var_id);
- synth->release();
+ synth->release(synth);
synth = NULL;
}
diff --git a/drivers/accessibility/speakup/varhandlers.c b/drivers/accessibility/speakup/varhandlers.c
index d7f6bec7ff06..067c0da97dcb 100644
--- a/drivers/accessibility/speakup/varhandlers.c
+++ b/drivers/accessibility/speakup/varhandlers.c
@@ -23,6 +23,7 @@ static struct st_var_header var_headers[] = {
{ "trigger_time", TRIGGER, VAR_TIME, NULL, NULL },
{ "jiffy_delta", JIFFY, VAR_TIME, NULL, NULL },
{ "full_time", FULL, VAR_TIME, NULL, NULL },
+ { "flush_time", FLUSH, VAR_TIME, NULL, NULL },
{ "spell_delay", SPELL_DELAY, VAR_NUM, &spk_spell_delay, NULL },
{ "bleeps", BLEEPS, VAR_NUM, &spk_bleeps, NULL },
{ "attrib_bleep", ATTRIB_BLEEP, VAR_NUM, &spk_attrib_bleep, NULL },
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index ebcf534514be..eedec61e3476 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -87,6 +87,14 @@ config ACPI_SPCR_TABLE
This table provides information about the configuration of the
earlycon console.
+config ACPI_FPDT
+ bool "ACPI Firmware Performance Data Table (FPDT) support"
+ depends on X86_64
+ help
+ Enable support for the Firmware Performance Data Table (FPDT).
+ This table provides information on the timing of the system
+ boot, S3 suspend and S3 resume firmware code paths.
+
config ACPI_LPIT
bool
depends on X86_64
@@ -326,6 +334,9 @@ config ACPI_THERMAL
To compile this driver as a module, choose M here:
the module will be called thermal.
+config ACPI_PLATFORM_PROFILE
+ tristate
+
config ACPI_CUSTOM_DSDT_FILE
string "Custom DSDT Table file to include"
default ""
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 076894a3330f..700b41adf2db 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -57,6 +57,7 @@ acpi-$(CONFIG_X86) += x86/utils.o
acpi-$(CONFIG_X86) += x86/s2idle.o
acpi-$(CONFIG_DEBUG_FS) += debugfs.o
acpi-y += acpi_lpat.o
+acpi-$(CONFIG_ACPI_FPDT) += acpi_fpdt.o
acpi-$(CONFIG_ACPI_LPIT) += acpi_lpit.o
acpi-$(CONFIG_ACPI_GENERIC_GSI) += irq.o
acpi-$(CONFIG_ACPI_WATCHDOG) += acpi_watchdog.o
@@ -79,6 +80,7 @@ obj-$(CONFIG_ACPI_PCI_SLOT) += pci_slot.o
obj-$(CONFIG_ACPI_PROCESSOR) += processor.o
obj-$(CONFIG_ACPI) += container.o
obj-$(CONFIG_ACPI_THERMAL) += thermal.o
+obj-$(CONFIG_ACPI_PLATFORM_PROFILE) += platform_profile.o
obj-$(CONFIG_ACPI_NFIT) += nfit/
obj-$(CONFIG_ACPI_NUMA) += numa/
obj-$(CONFIG_ACPI) += acpi_memhotplug.o
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index 46a64e9fa716..b41180330cc1 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -6,6 +6,8 @@
* Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
*/
+#define pr_fmt(fmt) "ACPI: AC: " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -18,8 +20,6 @@
#include <linux/acpi.h>
#include <acpi/battery.h>
-#define PREFIX "ACPI: "
-
#define ACPI_AC_CLASS "ac_adapter"
#define ACPI_AC_DEVICE_NAME "AC Adapter"
#define ACPI_AC_FILE_STATE "state"
@@ -28,9 +28,6 @@
#define ACPI_AC_STATUS_ONLINE 0x01
#define ACPI_AC_STATUS_UNKNOWN 0xFF
-#define _COMPONENT ACPI_AC_COMPONENT
-ACPI_MODULE_NAME("ac");
-
MODULE_AUTHOR("Paul Diefenbaugh");
MODULE_DESCRIPTION("ACPI AC Adapter Driver");
MODULE_LICENSE("GPL");
@@ -102,8 +99,9 @@ static int acpi_ac_get_state(struct acpi_ac *ac)
status = acpi_evaluate_integer(ac->device->handle, "_PSR", NULL,
&ac->state);
if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status,
- "Error reading AC Adapter state"));
+ acpi_handle_info(ac->device->handle,
+ "Error reading AC Adapter state: %s\n",
+ acpi_format_exception(status));
ac->state = ACPI_AC_STATUS_UNKNOWN;
return -ENODEV;
}
@@ -153,8 +151,8 @@ static void acpi_ac_notify(struct acpi_device *device, u32 event)
switch (event) {
default:
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Unsupported event [0x%x]\n", event));
+ acpi_handle_debug(device->handle, "Unsupported event [0x%x]\n",
+ event);
fallthrough;
case ACPI_AC_NOTIFY_STATUS:
case ACPI_NOTIFY_BUS_CHECK:
@@ -278,9 +276,8 @@ static int acpi_ac_add(struct acpi_device *device)
goto end;
}
- printk(KERN_INFO PREFIX "%s [%s] (%s)\n",
- acpi_device_name(device), acpi_device_bid(device),
- ac->state ? "on-line" : "off-line");
+ pr_info("%s [%s] (%s)\n", acpi_device_name(device),
+ acpi_device_bid(device), ac->state ? "on-line" : "off-line");
ac->battery_nb.notifier_call = acpi_ac_battery_notify;
register_acpi_notifier(&ac->battery_nb);
@@ -348,7 +345,7 @@ static int __init acpi_ac_init(void)
for (i = 0; i < ARRAY_SIZE(acpi_ac_blacklist); i++)
if (acpi_dev_present(acpi_ac_blacklist[i].hid, "1",
acpi_ac_blacklist[i].hrv)) {
- pr_info(PREFIX "AC: found native %s PMIC, not loading\n",
+ pr_info("found native %s PMIC, not loading\n",
acpi_ac_blacklist[i].hid);
return -ENODEV;
}
diff --git a/drivers/acpi/acpi_configfs.c b/drivers/acpi/acpi_configfs.c
index cf91f49101ea..3a14859dbb75 100644
--- a/drivers/acpi/acpi_configfs.c
+++ b/drivers/acpi/acpi_configfs.c
@@ -268,7 +268,12 @@ static int __init acpi_configfs_init(void)
acpi_table_group = configfs_register_default_group(root, "table",
&acpi_tables_type);
- return PTR_ERR_OR_ZERO(acpi_table_group);
+ if (IS_ERR(acpi_table_group)) {
+ configfs_unregister_subsystem(&acpi_configfs);
+ return PTR_ERR(acpi_table_group);
+ }
+
+ return 0;
}
module_init(acpi_configfs_init);
diff --git a/drivers/acpi/acpi_fpdt.c b/drivers/acpi/acpi_fpdt.c
new file mode 100644
index 000000000000..a89a806a7a2a
--- /dev/null
+++ b/drivers/acpi/acpi_fpdt.c
@@ -0,0 +1,264 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/*
+ * FPDT support for exporting boot and suspend/resume performance data
+ *
+ * Copyright (C) 2021 Intel Corporation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "ACPI FPDT: " fmt
+
+#include <linux/acpi.h>
+
+/*
+ * FPDT contains ACPI table header and a number of fpdt_subtable_entries.
+ * Each fpdt_subtable_entry points to a subtable: FBPT or S3PT.
+ * Each FPDT subtable (FBPT/S3PT) is composed of a fpdt_subtable_header
+ * and a number of fpdt performance records.
+ * Each FPDT performance record is composed of a fpdt_record_header and
+ * performance data fields, for boot or suspend or resume phase.
+ */
+enum fpdt_subtable_type {
+ SUBTABLE_FBPT,
+ SUBTABLE_S3PT,
+};
+
+struct fpdt_subtable_entry {
+ u16 type; /* refer to enum fpdt_subtable_type */
+ u8 length;
+ u8 revision;
+ u32 reserved;
+ u64 address; /* physical address of the S3PT/FBPT table */
+};
+
+struct fpdt_subtable_header {
+ u32 signature;
+ u32 length;
+};
+
+enum fpdt_record_type {
+ RECORD_S3_RESUME,
+ RECORD_S3_SUSPEND,
+ RECORD_BOOT,
+};
+
+struct fpdt_record_header {
+ u16 type; /* refer to enum fpdt_record_type */
+ u8 length;
+ u8 revision;
+};
+
+struct resume_performance_record {
+ struct fpdt_record_header header;
+ u32 resume_count;
+ u64 resume_prev;
+ u64 resume_avg;
+} __attribute__((packed));
+
+struct boot_performance_record {
+ struct fpdt_record_header header;
+ u32 reserved;
+ u64 firmware_start;
+ u64 bootloader_load;
+ u64 bootloader_launch;
+ u64 exitbootservice_start;
+ u64 exitbootservice_end;
+} __attribute__((packed));
+
+struct suspend_performance_record {
+ struct fpdt_record_header header;
+ u64 suspend_start;
+ u64 suspend_end;
+} __attribute__((packed));
+
+
+static struct resume_performance_record *record_resume;
+static struct suspend_performance_record *record_suspend;
+static struct boot_performance_record *record_boot;
+
+#define FPDT_ATTR(phase, name) \
+static ssize_t name##_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, char *buf) \
+{ \
+ return sprintf(buf, "%llu\n", record_##phase->name); \
+} \
+static struct kobj_attribute name##_attr = \
+__ATTR(name##_ns, 0444, name##_show, NULL)
+
+FPDT_ATTR(resume, resume_prev);
+FPDT_ATTR(resume, resume_avg);
+FPDT_ATTR(suspend, suspend_start);
+FPDT_ATTR(suspend, suspend_end);
+FPDT_ATTR(boot, firmware_start);
+FPDT_ATTR(boot, bootloader_load);
+FPDT_ATTR(boot, bootloader_launch);
+FPDT_ATTR(boot, exitbootservice_start);
+FPDT_ATTR(boot, exitbootservice_end);
+
+static ssize_t resume_count_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%u\n", record_resume->resume_count);
+}
+
+static struct kobj_attribute resume_count_attr =
+__ATTR_RO(resume_count);
+
+static struct attribute *resume_attrs[] = {
+ &resume_count_attr.attr,
+ &resume_prev_attr.attr,
+ &resume_avg_attr.attr,
+ NULL
+};
+
+static const struct attribute_group resume_attr_group = {
+ .attrs = resume_attrs,
+ .name = "resume",
+};
+
+static struct attribute *suspend_attrs[] = {
+ &suspend_start_attr.attr,
+ &suspend_end_attr.attr,
+ NULL
+};
+
+static const struct attribute_group suspend_attr_group = {
+ .attrs = suspend_attrs,
+ .name = "suspend",
+};
+
+static struct attribute *boot_attrs[] = {
+ &firmware_start_attr.attr,
+ &bootloader_load_attr.attr,
+ &bootloader_launch_attr.attr,
+ &exitbootservice_start_attr.attr,
+ &exitbootservice_end_attr.attr,
+ NULL
+};
+
+static const struct attribute_group boot_attr_group = {
+ .attrs = boot_attrs,
+ .name = "boot",
+};
+
+static struct kobject *fpdt_kobj;
+
+static int fpdt_process_subtable(u64 address, u32 subtable_type)
+{
+ struct fpdt_subtable_header *subtable_header;
+ struct fpdt_record_header *record_header;
+ char *signature = (subtable_type == SUBTABLE_FBPT ? "FBPT" : "S3PT");
+ u32 length, offset;
+ int result;
+
+ subtable_header = acpi_os_map_memory(address, sizeof(*subtable_header));
+ if (!subtable_header)
+ return -ENOMEM;
+
+ if (strncmp((char *)&subtable_header->signature, signature, 4)) {
+ pr_info(FW_BUG "subtable signature and type mismatch!\n");
+ return -EINVAL;
+ }
+
+ length = subtable_header->length;
+ acpi_os_unmap_memory(subtable_header, sizeof(*subtable_header));
+
+ subtable_header = acpi_os_map_memory(address, length);
+ if (!subtable_header)
+ return -ENOMEM;
+
+ offset = sizeof(*subtable_header);
+ while (offset < length) {
+ record_header = (void *)subtable_header + offset;
+ offset += record_header->length;
+
+ switch (record_header->type) {
+ case RECORD_S3_RESUME:
+ if (subtable_type != SUBTABLE_S3PT) {
+ pr_err(FW_BUG "Invalid record %d for subtable %s\n",
+ record_header->type, signature);
+ return -EINVAL;
+ }
+ if (record_resume) {
+ pr_err("Duplicate resume performance record found.\n");
+ continue;
+ }
+ record_resume = (struct resume_performance_record *)record_header;
+ result = sysfs_create_group(fpdt_kobj, &resume_attr_group);
+ if (result)
+ return result;
+ break;
+ case RECORD_S3_SUSPEND:
+ if (subtable_type != SUBTABLE_S3PT) {
+ pr_err(FW_BUG "Invalid %d for subtable %s\n",
+ record_header->type, signature);
+ continue;
+ }
+ if (record_suspend) {
+ pr_err("Duplicate suspend performance record found.\n");
+ continue;
+ }
+ record_suspend = (struct suspend_performance_record *)record_header;
+ result = sysfs_create_group(fpdt_kobj, &suspend_attr_group);
+ if (result)
+ return result;
+ break;
+ case RECORD_BOOT:
+ if (subtable_type != SUBTABLE_FBPT) {
+ pr_err(FW_BUG "Invalid %d for subtable %s\n",
+ record_header->type, signature);
+ return -EINVAL;
+ }
+ if (record_boot) {
+ pr_err("Duplicate boot performance record found.\n");
+ continue;
+ }
+ record_boot = (struct boot_performance_record *)record_header;
+ result = sysfs_create_group(fpdt_kobj, &boot_attr_group);
+ if (result)
+ return result;
+ break;
+
+ default:
+ pr_err(FW_BUG "Invalid record %d found.\n", record_header->type);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static int __init acpi_init_fpdt(void)
+{
+ acpi_status status;
+ struct acpi_table_header *header;
+ struct fpdt_subtable_entry *subtable;
+ u32 offset = sizeof(*header);
+
+ status = acpi_get_table(ACPI_SIG_FPDT, 0, &header);
+
+ if (ACPI_FAILURE(status))
+ return 0;
+
+ fpdt_kobj = kobject_create_and_add("fpdt", acpi_kobj);
+ if (!fpdt_kobj)
+ return -ENOMEM;
+
+ while (offset < header->length) {
+ subtable = (void *)header + offset;
+ switch (subtable->type) {
+ case SUBTABLE_FBPT:
+ case SUBTABLE_S3PT:
+ fpdt_process_subtable(subtable->address,
+ subtable->type);
+ break;
+ default:
+ pr_info(FW_BUG "Invalid subtable type %d found.\n",
+ subtable->type);
+ break;
+ }
+ offset += sizeof(*subtable);
+ }
+ return 0;
+}
+
+fs_initcall(acpi_init_fpdt);
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index b8745ce48a47..b84ab722feb4 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -261,7 +261,7 @@ static uint32_t acpi_pad_idle_cpus_num(void)
return ps_tsk_num;
}
-static ssize_t acpi_pad_rrtime_store(struct device *dev,
+static ssize_t rrtime_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
unsigned long num;
@@ -275,16 +275,14 @@ static ssize_t acpi_pad_rrtime_store(struct device *dev,
return count;
}
-static ssize_t acpi_pad_rrtime_show(struct device *dev,
+static ssize_t rrtime_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return scnprintf(buf, PAGE_SIZE, "%d\n", round_robin_time);
}
-static DEVICE_ATTR(rrtime, S_IRUGO|S_IWUSR,
- acpi_pad_rrtime_show,
- acpi_pad_rrtime_store);
+static DEVICE_ATTR_RW(rrtime);
-static ssize_t acpi_pad_idlepct_store(struct device *dev,
+static ssize_t idlepct_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
unsigned long num;
@@ -298,16 +296,14 @@ static ssize_t acpi_pad_idlepct_store(struct device *dev,
return count;
}
-static ssize_t acpi_pad_idlepct_show(struct device *dev,
+static ssize_t idlepct_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return scnprintf(buf, PAGE_SIZE, "%d\n", idle_pct);
}
-static DEVICE_ATTR(idlepct, S_IRUGO|S_IWUSR,
- acpi_pad_idlepct_show,
- acpi_pad_idlepct_store);
+static DEVICE_ATTR_RW(idlepct);
-static ssize_t acpi_pad_idlecpus_store(struct device *dev,
+static ssize_t idlecpus_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
unsigned long num;
@@ -319,16 +315,14 @@ static ssize_t acpi_pad_idlecpus_store(struct device *dev,
return count;
}
-static ssize_t acpi_pad_idlecpus_show(struct device *dev,
+static ssize_t idlecpus_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return cpumap_print_to_pagebuf(false, buf,
to_cpumask(pad_busy_cpus_bits));
}
-static DEVICE_ATTR(idlecpus, S_IRUGO|S_IWUSR,
- acpi_pad_idlecpus_show,
- acpi_pad_idlecpus_store);
+static DEVICE_ATTR_RW(idlecpus);
static int acpi_pad_add_sysfs(struct acpi_device *device)
{
diff --git a/drivers/acpi/acpi_tad.c b/drivers/acpi/acpi_tad.c
index 7d45cce0c3c1..e9b8e8305e23 100644
--- a/drivers/acpi/acpi_tad.c
+++ b/drivers/acpi/acpi_tad.c
@@ -237,7 +237,7 @@ static ssize_t time_show(struct device *dev, struct device_attribute *attr,
rt.tz, rt.daylight);
}
-static DEVICE_ATTR(time, S_IRUSR | S_IWUSR, time_show, time_store);
+static DEVICE_ATTR_RW(time);
static struct attribute *acpi_tad_time_attrs[] = {
&dev_attr_time.attr,
@@ -446,7 +446,7 @@ static ssize_t ac_alarm_show(struct device *dev, struct device_attribute *attr,
return acpi_tad_alarm_read(dev, buf, ACPI_TAD_AC_TIMER);
}
-static DEVICE_ATTR(ac_alarm, S_IRUSR | S_IWUSR, ac_alarm_show, ac_alarm_store);
+static DEVICE_ATTR_RW(ac_alarm);
static ssize_t ac_policy_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
@@ -462,7 +462,7 @@ static ssize_t ac_policy_show(struct device *dev, struct device_attribute *attr,
return acpi_tad_policy_read(dev, buf, ACPI_TAD_AC_TIMER);
}
-static DEVICE_ATTR(ac_policy, S_IRUSR | S_IWUSR, ac_policy_show, ac_policy_store);
+static DEVICE_ATTR_RW(ac_policy);
static ssize_t ac_status_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
@@ -478,7 +478,7 @@ static ssize_t ac_status_show(struct device *dev, struct device_attribute *attr,
return acpi_tad_status_read(dev, buf, ACPI_TAD_AC_TIMER);
}
-static DEVICE_ATTR(ac_status, S_IRUSR | S_IWUSR, ac_status_show, ac_status_store);
+static DEVICE_ATTR_RW(ac_status);
static struct attribute *acpi_tad_attrs[] = {
&dev_attr_caps.attr,
@@ -505,7 +505,7 @@ static ssize_t dc_alarm_show(struct device *dev, struct device_attribute *attr,
return acpi_tad_alarm_read(dev, buf, ACPI_TAD_DC_TIMER);
}
-static DEVICE_ATTR(dc_alarm, S_IRUSR | S_IWUSR, dc_alarm_show, dc_alarm_store);
+static DEVICE_ATTR_RW(dc_alarm);
static ssize_t dc_policy_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
@@ -521,7 +521,7 @@ static ssize_t dc_policy_show(struct device *dev, struct device_attribute *attr,
return acpi_tad_policy_read(dev, buf, ACPI_TAD_DC_TIMER);
}
-static DEVICE_ATTR(dc_policy, S_IRUSR | S_IWUSR, dc_policy_show, dc_policy_store);
+static DEVICE_ATTR_RW(dc_policy);
static ssize_t dc_status_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
@@ -537,7 +537,7 @@ static ssize_t dc_status_show(struct device *dev, struct device_attribute *attr,
return acpi_tad_status_read(dev, buf, ACPI_TAD_DC_TIMER);
}
-static DEVICE_ATTR(dc_status, S_IRUSR | S_IWUSR, dc_status_show, dc_status_store);
+static DEVICE_ATTR_RW(dc_status);
static struct attribute *acpi_tad_dc_attrs[] = {
&dev_attr_dc_alarm.attr,
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index a322a7bd286b..2ea1781290cc 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -7,6 +7,8 @@
* Copyright (C) 2006 Thomas Tuttle <linux-kernel@ttuttle.net>
*/
+#define pr_fmt(fmt) "ACPI: video: " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -26,16 +28,11 @@
#include <acpi/video.h>
#include <linux/uaccess.h>
-#define PREFIX "ACPI: "
-
#define ACPI_VIDEO_BUS_NAME "Video Bus"
#define ACPI_VIDEO_DEVICE_NAME "Video Device"
#define MAX_NAME_LEN 20
-#define _COMPONENT ACPI_VIDEO_COMPONENT
-ACPI_MODULE_NAME("video");
-
MODULE_AUTHOR("Bruno Ducrot");
MODULE_DESCRIPTION("ACPI Video Driver");
MODULE_LICENSE("GPL");
@@ -326,11 +323,11 @@ acpi_video_device_lcd_query_levels(acpi_handle handle,
*levels = NULL;
status = acpi_evaluate_object(handle, "_BCL", NULL, &buffer);
- if (!ACPI_SUCCESS(status))
+ if (ACPI_FAILURE(status))
return status;
obj = (union acpi_object *)buffer.pointer;
if (!obj || (obj->type != ACPI_TYPE_PACKAGE)) {
- printk(KERN_ERR PREFIX "Invalid _BCL data\n");
+ acpi_handle_info(handle, "Invalid _BCL data\n");
status = -EFAULT;
goto err;
}
@@ -354,7 +351,7 @@ acpi_video_device_lcd_set_level(struct acpi_video_device *device, int level)
status = acpi_execute_simple_method(device->dev->handle,
"_BCM", level);
if (ACPI_FAILURE(status)) {
- ACPI_ERROR((AE_INFO, "Evaluating _BCM failed"));
+ acpi_handle_info(device->dev->handle, "_BCM evaluation failed\n");
return -EIO;
}
@@ -368,7 +365,7 @@ acpi_video_device_lcd_set_level(struct acpi_video_device *device, int level)
return 0;
}
- ACPI_ERROR((AE_INFO, "Current brightness invalid"));
+ acpi_handle_info(device->dev->handle, "Current brightness invalid\n");
return -EINVAL;
}
@@ -622,9 +619,8 @@ acpi_video_device_lcd_get_level_current(struct acpi_video_device *device,
* BQC returned an invalid level.
* Stop using it.
*/
- ACPI_WARNING((AE_INFO,
- "%s returned an invalid level",
- buf));
+ acpi_handle_info(device->dev->handle,
+ "%s returned an invalid level", buf);
device->cap._BQC = device->cap._BCQ = 0;
} else {
/*
@@ -635,7 +631,8 @@ acpi_video_device_lcd_get_level_current(struct acpi_video_device *device,
* ACPI video backlight still works w/ buggy _BQC.
* http://bugzilla.kernel.org/show_bug.cgi?id=12233
*/
- ACPI_WARNING((AE_INFO, "Evaluating %s failed", buf));
+ acpi_handle_info(device->dev->handle,
+ "%s evaluation failed", buf);
device->cap._BQC = device->cap._BCQ = 0;
}
}
@@ -675,7 +672,7 @@ acpi_video_device_EDID(struct acpi_video_device *device,
if (obj && obj->type == ACPI_TYPE_BUFFER)
*edid = obj;
else {
- printk(KERN_ERR PREFIX "Invalid _DDC data\n");
+ acpi_handle_info(device->dev->handle, "Invalid _DDC data\n");
status = -EFAULT;
kfree(obj);
}
@@ -827,10 +824,9 @@ int acpi_video_get_levels(struct acpi_device *device,
int result = 0;
u32 value;
- if (!ACPI_SUCCESS(acpi_video_device_lcd_query_levels(device->handle,
- &obj))) {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Could not query available "
- "LCD brightness level\n"));
+ if (ACPI_FAILURE(acpi_video_device_lcd_query_levels(device->handle, &obj))) {
+ acpi_handle_debug(device->handle,
+ "Could not query available LCD brightness level\n");
result = -ENODEV;
goto out;
}
@@ -842,7 +838,6 @@ int acpi_video_get_levels(struct acpi_device *device,
br = kzalloc(sizeof(*br), GFP_KERNEL);
if (!br) {
- printk(KERN_ERR "can't allocate memory\n");
result = -ENOMEM;
goto out;
}
@@ -863,7 +858,7 @@ int acpi_video_get_levels(struct acpi_device *device,
for (i = 0; i < obj->package.count; i++) {
o = (union acpi_object *)&obj->package.elements[i];
if (o->type != ACPI_TYPE_INTEGER) {
- printk(KERN_ERR PREFIX "Invalid data\n");
+ acpi_handle_info(device->handle, "Invalid data\n");
continue;
}
value = (u32) o->integer.value;
@@ -900,7 +895,8 @@ int acpi_video_get_levels(struct acpi_device *device,
br->levels[i] = br->levels[i - level_ac_battery];
count += level_ac_battery;
} else if (level_ac_battery > ACPI_VIDEO_FIRST_LEVEL)
- ACPI_ERROR((AE_INFO, "Too many duplicates in _BCL package"));
+ acpi_handle_info(device->handle,
+ "Too many duplicates in _BCL package");
/* Check if the _BCL package is in a reversed order */
if (max_level == br->levels[ACPI_VIDEO_FIRST_LEVEL]) {
@@ -910,8 +906,8 @@ int acpi_video_get_levels(struct acpi_device *device,
sizeof(br->levels[ACPI_VIDEO_FIRST_LEVEL]),
acpi_video_cmp_level, NULL);
} else if (max_level != br->levels[count - 1])
- ACPI_ERROR((AE_INFO,
- "Found unordered _BCL package"));
+ acpi_handle_info(device->handle,
+ "Found unordered _BCL package");
br->count = count;
*dev_br = br;
@@ -989,9 +985,9 @@ set_level:
if (result)
goto out_free_levels;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "found %d brightness levels\n",
- br->count - ACPI_VIDEO_FIRST_LEVEL));
+ acpi_handle_debug(device->dev->handle, "found %d brightness levels\n",
+ br->count - ACPI_VIDEO_FIRST_LEVEL);
+
return 0;
out_free_levels:
@@ -1023,7 +1019,8 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
if (acpi_has_method(device->dev->handle, "_BQC")) {
device->cap._BQC = 1;
} else if (acpi_has_method(device->dev->handle, "_BCQ")) {
- printk(KERN_WARNING FW_BUG "_BCQ is used instead of _BQC\n");
+ acpi_handle_info(device->dev->handle,
+ "_BCQ is used instead of _BQC\n");
device->cap._BCQ = 1;
}
@@ -1083,8 +1080,7 @@ static int acpi_video_bus_check(struct acpi_video_bus *video)
/* Does this device support video switching? */
if (video->cap._DOS || video->cap._DOD) {
if (!video->cap._DOS) {
- printk(KERN_WARNING FW_BUG
- "ACPI(%s) defines _DOD but not _DOS\n",
+ pr_info(FW_BUG "ACPI(%s) defines _DOD but not _DOS\n",
acpi_device_bid(video->device));
}
video->flags.multihead = 1;
@@ -1272,7 +1268,8 @@ acpi_video_device_bind(struct acpi_video_bus *video,
ids = &video->attached_array[i];
if (device->device_id == (ids->value.int_val & 0xffff)) {
ids->bind_info = device;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "device_bind %d\n", i));
+ acpi_handle_debug(video->device->handle, "%s: %d\n",
+ __func__, i);
}
}
}
@@ -1324,20 +1321,22 @@ static int acpi_video_device_enumerate(struct acpi_video_bus *video)
return AE_NOT_EXIST;
status = acpi_evaluate_object(video->device->handle, "_DOD", NULL, &buffer);
- if (!ACPI_SUCCESS(status)) {
- ACPI_EXCEPTION((AE_INFO, status, "Evaluating _DOD"));
+ if (ACPI_FAILURE(status)) {
+ acpi_handle_info(video->device->handle,
+ "_DOD evaluation failed: %s\n",
+ acpi_format_exception(status));
return status;
}
dod = buffer.pointer;
if (!dod || (dod->type != ACPI_TYPE_PACKAGE)) {
- ACPI_EXCEPTION((AE_INFO, status, "Invalid _DOD data"));
+ acpi_handle_info(video->device->handle, "Invalid _DOD data\n");
status = -EFAULT;
goto out;
}
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d video heads in _DOD\n",
- dod->package.count));
+ acpi_handle_debug(video->device->handle, "Found %d video heads in _DOD\n",
+ dod->package.count);
active_list = kcalloc(1 + dod->package.count,
sizeof(struct acpi_video_enumerated_device),
@@ -1352,15 +1351,18 @@ static int acpi_video_device_enumerate(struct acpi_video_bus *video)
obj = &dod->package.elements[i];
if (obj->type != ACPI_TYPE_INTEGER) {
- printk(KERN_ERR PREFIX
- "Invalid _DOD data in element %d\n", i);
+ acpi_handle_info(video->device->handle,
+ "Invalid _DOD data in element %d\n", i);
continue;
}
active_list[count].value.int_val = obj->integer.value;
active_list[count].bind_info = NULL;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "dod element[%d] = %d\n", i,
- (int)obj->integer.value));
+
+ acpi_handle_debug(video->device->handle,
+ "_DOD element[%d] = %d\n", i,
+ (int)obj->integer.value);
+
count++;
}
@@ -1451,7 +1453,8 @@ acpi_video_switch_brightness(struct work_struct *work)
out:
if (result)
- printk(KERN_ERR PREFIX "Failed to switch the brightness\n");
+ acpi_handle_info(device->dev->handle,
+ "Failed to switch brightness\n");
}
int acpi_video_get_edid(struct acpi_device *device, int type, int device_id,
@@ -1601,8 +1604,8 @@ static void acpi_video_bus_notify(struct acpi_device *device, u32 event)
break;
default:
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Unsupported event [0x%x]\n", event));
+ acpi_handle_debug(device->handle, "Unsupported event [0x%x]\n",
+ event);
break;
}
@@ -1675,8 +1678,7 @@ static void acpi_video_device_notify(acpi_handle handle, u32 event, void *data)
keycode = KEY_DISPLAY_OFF;
break;
default:
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Unsupported event [0x%x]\n", event));
+ acpi_handle_debug(handle, "Unsupported event [0x%x]\n", event);
break;
}
@@ -1812,11 +1814,12 @@ static void acpi_video_dev_register_backlight(struct acpi_video_device *device)
&device->cooling_dev->device.kobj,
"thermal_cooling");
if (result)
- printk(KERN_ERR PREFIX "Create sysfs link\n");
+ pr_info("sysfs link creation failed\n");
+
result = sysfs_create_link(&device->cooling_dev->device.kobj,
&device->dev->dev.kobj, "device");
if (result)
- printk(KERN_ERR PREFIX "Create sysfs link\n");
+ pr_info("Reverse sysfs link creation failed\n");
}
static void acpi_video_run_bcl_for_osi(struct acpi_video_bus *video)
@@ -2030,7 +2033,7 @@ static int acpi_video_bus_add(struct acpi_device *device)
acpi_video_bus_match, NULL,
device, NULL);
if (status == AE_ALREADY_EXISTS) {
- printk(KERN_WARNING FW_BUG
+ pr_info(FW_BUG
"Duplicate ACPI video bus devices for the"
" same VGA controller, please try module "
"parameter \"video.allow_duplicates=1\""
@@ -2073,7 +2076,7 @@ static int acpi_video_bus_add(struct acpi_device *device)
if (error)
goto err_put_video;
- printk(KERN_INFO PREFIX "%s [%s] (multi-head: %s rom: %s post: %s)\n",
+ pr_info("%s [%s] (multi-head: %s rom: %s post: %s)\n",
ACPI_VIDEO_DEVICE_NAME, acpi_device_bid(device),
video->flags.multihead ? "yes" : "no",
video->flags.rom ? "yes" : "no",
diff --git a/drivers/acpi/acpica/acapps.h b/drivers/acpi/acpica/acapps.h
index 173447d50acf..725e2f65cdca 100644
--- a/drivers/acpi/acpica/acapps.h
+++ b/drivers/acpi/acpica/acapps.h
@@ -3,7 +3,7 @@
*
* Module Name: acapps - common include for ACPI applications/tools
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
@@ -17,7 +17,7 @@
/* Common info for tool signons */
#define ACPICA_NAME "Intel ACPI Component Architecture"
-#define ACPICA_COPYRIGHT "Copyright (c) 2000 - 2020 Intel Corporation"
+#define ACPICA_COPYRIGHT "Copyright (c) 2000 - 2021 Intel Corporation"
#if ACPI_MACHINE_WIDTH == 64
#define ACPI_WIDTH " (64-bit version)"
diff --git a/drivers/acpi/acpica/accommon.h b/drivers/acpi/acpica/accommon.h
index 94e18bb76556..be3826f46f88 100644
--- a/drivers/acpi/acpica/accommon.h
+++ b/drivers/acpi/acpica/accommon.h
@@ -3,7 +3,7 @@
*
* Name: accommon.h - Common include files for generation of ACPICA source
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acconvert.h b/drivers/acpi/acpica/acconvert.h
index cf85d66da6e7..53b41c7a6119 100644
--- a/drivers/acpi/acpica/acconvert.h
+++ b/drivers/acpi/acpica/acconvert.h
@@ -3,7 +3,7 @@
*
* Module Name: acapps - common include for ACPI applications/tools
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index f8a3abdfe250..3ccc7b2a76f1 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -3,7 +3,7 @@
*
* Name: acdebug.h - ACPI/AML debugger
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acdispat.h b/drivers/acpi/acpica/acdispat.h
index 7ba6e308f146..3170a24fe505 100644
--- a/drivers/acpi/acpica/acdispat.h
+++ b/drivers/acpi/acpica/acdispat.h
@@ -3,7 +3,7 @@
*
* Name: acdispat.h - dispatcher (parser to interpreter interface)
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index 79f292687bd6..82a75964343b 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -3,7 +3,7 @@
*
* Name: acevents.h - Event subcomponent prototypes and defines
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 2fee91f57b21..d41b810e367c 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -3,7 +3,7 @@
*
* Name: acglobal.h - Declarations for global variables
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
index 6ab92e28330d..810de0b4c125 100644
--- a/drivers/acpi/acpica/achware.h
+++ b/drivers/acpi/acpica/achware.h
@@ -3,7 +3,7 @@
*
* Name: achware.h -- hardware specific interfaces
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acinterp.h b/drivers/acpi/acpica/acinterp.h
index a6d896cda2a5..816a16e1fc4c 100644
--- a/drivers/acpi/acpica/acinterp.h
+++ b/drivers/acpi/acpica/acinterp.h
@@ -3,7 +3,7 @@
*
* Name: acinterp.h - Interpreter subcomponent prototypes and defines
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index f83b98fa13ac..be57436182a1 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -3,7 +3,7 @@
*
* Name: aclocal.h - Internal data types used across the ACPI subsystem
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index 168904ba3086..93bd2d19c156 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -3,7 +3,7 @@
*
* Name: acmacros.h - C macros for the entire subsystem.
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index 40f6a3c33a15..199aabac3790 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -3,7 +3,7 @@
*
* Name: acnamesp.h - Namespace subcomponent prototypes and defines
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h
index 9f0219a8cb98..9db5ae0f79ea 100644
--- a/drivers/acpi/acpica/acobject.h
+++ b/drivers/acpi/acpica/acobject.h
@@ -3,7 +3,7 @@
*
* Name: acobject.h - Definition of union acpi_operand_object (Internal object only)
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
@@ -284,6 +284,7 @@ struct acpi_object_addr_handler {
acpi_adr_space_handler handler;
struct acpi_namespace_node *node; /* Parent device */
void *context;
+ acpi_mutex context_mutex;
acpi_adr_space_setup setup;
union acpi_operand_object *region_list; /* Regions using this handler */
union acpi_operand_object *next;
diff --git a/drivers/acpi/acpica/acopcode.h b/drivers/acpi/acpica/acopcode.h
index 8825394be9ab..c3f12ee9fc6f 100644
--- a/drivers/acpi/acpica/acopcode.h
+++ b/drivers/acpi/acpica/acopcode.h
@@ -3,7 +3,7 @@
*
* Name: acopcode.h - AML opcode information for the AML parser and interpreter
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acparser.h b/drivers/acpi/acpica/acparser.h
index bc00b85c0a8f..8e40e5909458 100644
--- a/drivers/acpi/acpica/acparser.h
+++ b/drivers/acpi/acpica/acparser.h
@@ -3,7 +3,7 @@
*
* Module Name: acparser.h - AML Parser subcomponent prototypes and defines
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h
index 57ea2276790f..15cf904f0751 100644
--- a/drivers/acpi/acpica/acpredef.h
+++ b/drivers/acpi/acpica/acpredef.h
@@ -3,7 +3,7 @@
*
* Name: acpredef - Information table for ACPI predefined methods and objects
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acresrc.h b/drivers/acpi/acpica/acresrc.h
index 6de8a1650d3d..0cb975a3e01d 100644
--- a/drivers/acpi/acpica/acresrc.h
+++ b/drivers/acpi/acpica/acresrc.h
@@ -3,7 +3,7 @@
*
* Name: acresrc.h - Resource Manager function prototypes
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acstruct.h b/drivers/acpi/acpica/acstruct.h
index 4c900c108f3f..e3beb096c46d 100644
--- a/drivers/acpi/acpica/acstruct.h
+++ b/drivers/acpi/acpica/acstruct.h
@@ -3,7 +3,7 @@
*
* Name: acstruct.h - Internal structs
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h
index 734624facda3..e2d0046799a2 100644
--- a/drivers/acpi/acpica/actables.h
+++ b/drivers/acpi/acpica/actables.h
@@ -3,7 +3,7 @@
*
* Name: actables.h - ACPI table management
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index 7c89b470ec81..be6de7149e67 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -3,7 +3,7 @@
*
* Name: acutils.h -- prototypes for the common (subsystem-wide) procedures
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/amlcode.h b/drivers/acpi/acpica/amlcode.h
index 1d541bbac4a3..d6b088c5001f 100644
--- a/drivers/acpi/acpica/amlcode.h
+++ b/drivers/acpi/acpica/amlcode.h
@@ -5,7 +5,7 @@
* Declarations and definitions contained herein are derived
* directly from the ACPI specification.
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/amlresrc.h b/drivers/acpi/acpica/amlresrc.h
index e5234e001acf..a9d91a3c2994 100644
--- a/drivers/acpi/acpica/amlresrc.h
+++ b/drivers/acpi/acpica/amlresrc.h
@@ -3,7 +3,7 @@
*
* Module Name: amlresrc.h - AML resource descriptors
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dbhistry.c b/drivers/acpi/acpica/dbhistry.c
index f5fba14461a6..fd813c5d3952 100644
--- a/drivers/acpi/acpica/dbhistry.c
+++ b/drivers/acpi/acpica/dbhistry.c
@@ -3,7 +3,7 @@
*
* Module Name: dbhistry - debugger HISTORY command
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dbinput.c b/drivers/acpi/acpica/dbinput.c
index 2952856b8a67..b8a48923064f 100644
--- a/drivers/acpi/acpica/dbinput.c
+++ b/drivers/acpi/acpica/dbinput.c
@@ -473,7 +473,7 @@ char *acpi_db_get_next_token(char *string,
/* Remove any spaces at the beginning, ignore blank lines */
- while (*string && isspace(*string)) {
+ while (*string && isspace((int)*string)) {
string++;
}
@@ -571,7 +571,7 @@ char *acpi_db_get_next_token(char *string,
/* Find end of token */
- while (*string && !isspace(*string)) {
+ while (*string && !isspace((int)*string)) {
string++;
}
break;
diff --git a/drivers/acpi/acpica/dbobject.c b/drivers/acpi/acpica/dbobject.c
index 4b4c530a0654..95ab91b35f29 100644
--- a/drivers/acpi/acpica/dbobject.c
+++ b/drivers/acpi/acpica/dbobject.c
@@ -47,7 +47,7 @@ acpi_db_dump_method_info(acpi_status status, struct acpi_walk_state *walk_state)
/* Ignore control codes, they are not errors */
- if ((status & AE_CODE_MASK) == AE_CODE_CONTROL) {
+ if (ACPI_CNTL_EXCEPTION(status)) {
return;
}
diff --git a/drivers/acpi/acpica/dsargs.c b/drivers/acpi/acpica/dsargs.c
index ad17f62e51d9..6630d6536fb0 100644
--- a/drivers/acpi/acpica/dsargs.c
+++ b/drivers/acpi/acpica/dsargs.c
@@ -4,7 +4,7 @@
* Module Name: dsargs - Support for execution of dynamic arguments for static
* objects (regions, fields, buffer fields, etc.)
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dscontrol.c b/drivers/acpi/acpica/dscontrol.c
index 4b5b6e859f62..a152f03135cd 100644
--- a/drivers/acpi/acpica/dscontrol.c
+++ b/drivers/acpi/acpica/dscontrol.c
@@ -4,7 +4,7 @@
* Module Name: dscontrol - Support for execution control opcodes -
* if/else/while/return
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
@@ -62,7 +62,7 @@ acpi_ds_exec_begin_control_op(struct acpi_walk_state *walk_state,
}
}
- /*lint -fallthrough */
+ ACPI_FALLTHROUGH;
case AML_IF_OP:
/*
diff --git a/drivers/acpi/acpica/dsdebug.c b/drivers/acpi/acpica/dsdebug.c
index 63bc5f19fb82..b9b03d629930 100644
--- a/drivers/acpi/acpica/dsdebug.c
+++ b/drivers/acpi/acpica/dsdebug.c
@@ -3,7 +3,7 @@
*
* Module Name: dsdebug - Parser/Interpreter interface - debugging
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
@@ -100,7 +100,7 @@ acpi_ds_dump_method_stack(acpi_status status,
/* Ignore control codes, they are not errors */
- if ((status & AE_CODE_MASK) == AE_CODE_CONTROL) {
+ if (ACPI_CNTL_EXCEPTION(status)) {
return_VOID;
}
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
index fa768b3a989e..a16817767969 100644
--- a/drivers/acpi/acpica/dsfield.c
+++ b/drivers/acpi/acpica/dsfield.c
@@ -3,7 +3,7 @@
*
* Module Name: dsfield - Dispatcher field routines
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsinit.c b/drivers/acpi/acpica/dsinit.c
index 9be2a309424c..ba6f882e83bc 100644
--- a/drivers/acpi/acpica/dsinit.c
+++ b/drivers/acpi/acpica/dsinit.c
@@ -3,7 +3,7 @@
*
* Module Name: dsinit - Object initialization namespace walk
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index cf67caff878a..8e011e59b9b4 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -3,7 +3,7 @@
*
* Module Name: dsmethod - Parser/Interpreter interface - control method parsing
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c
index c0a14a6a2c20..3c0c31157e7e 100644
--- a/drivers/acpi/acpica/dsobject.c
+++ b/drivers/acpi/acpica/dsobject.c
@@ -3,7 +3,7 @@
*
* Module Name: dsobject - Dispatcher object management routines
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index d9c26e720cb7..639635291ab7 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -3,7 +3,7 @@
*
* Module Name: dsopcode - Dispatcher support for regions and fields
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dspkginit.c b/drivers/acpi/acpica/dspkginit.c
index d869568d55c2..e642d65bcc66 100644
--- a/drivers/acpi/acpica/dspkginit.c
+++ b/drivers/acpi/acpica/dspkginit.c
@@ -3,7 +3,7 @@
*
* Module Name: dspkginit - Completion of deferred package initialization
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c
index 1d4f8c81028c..41ba7773fd10 100644
--- a/drivers/acpi/acpica/dswexec.c
+++ b/drivers/acpi/acpica/dswexec.c
@@ -4,7 +4,7 @@
* Module Name: dswexec - Dispatcher method execution callbacks;
* dispatch to interpreter.
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
@@ -598,8 +598,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
break;
}
- /* Fall through */
- /*lint -fallthrough */
+ ACPI_FALLTHROUGH;
case AML_INT_EVAL_SUBTREE_OP:
diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
index 27069325b6de..a377638e44f9 100644
--- a/drivers/acpi/acpica/dswload.c
+++ b/drivers/acpi/acpica/dswload.c
@@ -3,7 +3,7 @@
*
* Module Name: dswload - Dispatcher first pass namespace load callbacks
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
@@ -224,7 +224,7 @@ acpi_ds_load1_begin_op(struct acpi_walk_state *walk_state,
break;
}
- /*lint -fallthrough */
+ ACPI_FALLTHROUGH;
default:
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
index edadbe146506..3625952c3957 100644
--- a/drivers/acpi/acpica/dswload2.c
+++ b/drivers/acpi/acpica/dswload2.c
@@ -3,7 +3,7 @@
*
* Module Name: dswload2 - Dispatcher second pass namespace load callbacks
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
@@ -214,7 +214,7 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
break;
}
- /*lint -fallthrough */
+ ACPI_FALLTHROUGH;
default:
diff --git a/drivers/acpi/acpica/dswscope.c b/drivers/acpi/acpica/dswscope.c
index 9c397642fed7..9c123af08bc1 100644
--- a/drivers/acpi/acpica/dswscope.c
+++ b/drivers/acpi/acpica/dswscope.c
@@ -3,7 +3,7 @@
*
* Module Name: dswscope - Scope stack manipulation
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dswstate.c b/drivers/acpi/acpica/dswstate.c
index 809a0c0536b5..fbe2ba05c82a 100644
--- a/drivers/acpi/acpica/dswstate.c
+++ b/drivers/acpi/acpica/dswstate.c
@@ -3,7 +3,7 @@
*
* Module Name: dswstate - Dispatcher parse tree walk management routines
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
index 9efca54c51ac..35385148fedb 100644
--- a/drivers/acpi/acpica/evevent.c
+++ b/drivers/acpi/acpica/evevent.c
@@ -3,7 +3,7 @@
*
* Module Name: evevent - Fixed Event handling and dispatch
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evglock.c b/drivers/acpi/acpica/evglock.c
index 0ced84ae13e4..de4eea606ccd 100644
--- a/drivers/acpi/acpica/evglock.c
+++ b/drivers/acpi/acpica/evglock.c
@@ -3,7 +3,7 @@
*
* Module Name: evglock - Global Lock support
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index 06b9c8dd11c9..c5a06882bdf6 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -3,7 +3,7 @@
*
* Module Name: evgpe - General Purpose Event handling and dispatch
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index f5298be4273a..e5f8245c2d93 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -3,7 +3,7 @@
*
* Module Name: evgpeblk - GPE block creation and initialization.
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c
index 6d82d30d8f7b..b0724d6e6e80 100644
--- a/drivers/acpi/acpica/evgpeinit.c
+++ b/drivers/acpi/acpica/evgpeinit.c
@@ -3,7 +3,7 @@
*
* Module Name: evgpeinit - System GPE initialization and update
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c
index 738873e876ca..2e74308d7725 100644
--- a/drivers/acpi/acpica/evgpeutil.c
+++ b/drivers/acpi/acpica/evgpeutil.c
@@ -3,7 +3,7 @@
*
* Module Name: evgpeutil - GPE utilities
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evhandler.c b/drivers/acpi/acpica/evhandler.c
index 5884eba047f7..c0cd7147a5a3 100644
--- a/drivers/acpi/acpica/evhandler.c
+++ b/drivers/acpi/acpica/evhandler.c
@@ -3,7 +3,7 @@
*
* Module Name: evhandler - Support for Address Space handlers
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
@@ -489,6 +489,13 @@ acpi_ev_install_space_handler(struct acpi_namespace_node *node,
/* Init handler obj */
+ status =
+ acpi_os_create_mutex(&handler_obj->address_space.context_mutex);
+ if (ACPI_FAILURE(status)) {
+ acpi_ut_remove_reference(handler_obj);
+ goto unlock_and_exit;
+ }
+
handler_obj->address_space.space_id = (u8)space_id;
handler_obj->address_space.handler_flags = flags;
handler_obj->address_space.region_list = NULL;
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c
index ce1eda6beb84..f14ebcd610ab 100644
--- a/drivers/acpi/acpica/evmisc.c
+++ b/drivers/acpi/acpica/evmisc.c
@@ -3,7 +3,7 @@
*
* Module Name: evmisc - Miscellaneous event manager support functions
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index a8a4c8c9b9ef..4ef43c8ef5e7 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -3,7 +3,7 @@
*
* Module Name: evregion - Operation Region support
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
@@ -112,6 +112,8 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
union acpi_operand_object *region_obj2;
void *region_context = NULL;
struct acpi_connection_info *context;
+ acpi_mutex context_mutex;
+ u8 context_locked;
acpi_physical_address address;
ACPI_FUNCTION_TRACE(ev_address_space_dispatch);
@@ -136,6 +138,8 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
}
context = handler_desc->address_space.context;
+ context_mutex = handler_desc->address_space.context_mutex;
+ context_locked = FALSE;
/*
* It may be the case that the region has never been initialized.
@@ -204,6 +208,23 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
handler = handler_desc->address_space.handler;
address = (region_obj->region.address + region_offset);
+ ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
+ "Handler %p (@%p) Address %8.8X%8.8X [%s]\n",
+ &region_obj->region.handler->address_space, handler,
+ ACPI_FORMAT_UINT64(address),
+ acpi_ut_get_region_name(region_obj->region.
+ space_id)));
+
+ if (!(handler_desc->address_space.handler_flags &
+ ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) {
+ /*
+ * For handlers other than the default (supplied) handlers, we must
+ * exit the interpreter because the handler *might* block -- we don't
+ * know what it will do, so we can't hold the lock on the interpreter.
+ */
+ acpi_ex_exit_interpreter();
+ }
+
/*
* Special handling for generic_serial_bus and general_purpose_io:
* There are three extra parameters that must be passed to the
@@ -212,48 +233,39 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
* 2) Length of the above buffer
* 3) Actual access length from the access_as() op
*
+ * Since we pass these extra parameters via the context, which is
+ * shared between threads, we must lock the context to avoid these
+ * parameters being changed from another thread before the handler
+ * has completed running.
+ *
* In addition, for general_purpose_io, the Address and bit_width fields
* are defined as follows:
* 1) Address is the pin number index of the field (bit offset from
* the previous Connection)
* 2) bit_width is the actual bit length of the field (number of pins)
*/
- if ((region_obj->region.space_id == ACPI_ADR_SPACE_GSBUS) &&
+ if ((region_obj->region.space_id == ACPI_ADR_SPACE_GSBUS ||
+ region_obj->region.space_id == ACPI_ADR_SPACE_GPIO) &&
context && field_obj) {
- /* Get the Connection (resource_template) buffer */
+ status =
+ acpi_os_acquire_mutex(context_mutex, ACPI_WAIT_FOREVER);
+ if (ACPI_FAILURE(status)) {
+ goto re_enter_interpreter;
+ }
- context->connection = field_obj->field.resource_buffer;
- context->length = field_obj->field.resource_length;
- context->access_length = field_obj->field.access_length;
- }
- if ((region_obj->region.space_id == ACPI_ADR_SPACE_GPIO) &&
- context && field_obj) {
+ context_locked = TRUE;
/* Get the Connection (resource_template) buffer */
context->connection = field_obj->field.resource_buffer;
context->length = field_obj->field.resource_length;
context->access_length = field_obj->field.access_length;
- address = field_obj->field.pin_number_index;
- bit_width = field_obj->field.bit_length;
- }
-
- ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
- "Handler %p (@%p) Address %8.8X%8.8X [%s]\n",
- &region_obj->region.handler->address_space, handler,
- ACPI_FORMAT_UINT64(address),
- acpi_ut_get_region_name(region_obj->region.
- space_id)));
- if (!(handler_desc->address_space.handler_flags &
- ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) {
- /*
- * For handlers other than the default (supplied) handlers, we must
- * exit the interpreter because the handler *might* block -- we don't
- * know what it will do, so we can't hold the lock on the interpreter.
- */
- acpi_ex_exit_interpreter();
+ if (region_obj->region.space_id == ACPI_ADR_SPACE_GPIO) {
+ address = field_obj->field.pin_number_index;
+ bit_width = field_obj->field.bit_length;
+ }
}
/* Call the handler */
@@ -261,6 +273,10 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
status = handler(function, address, bit_width, value, context,
region_obj2->extra.region_context);
+ if (context_locked) {
+ acpi_os_release_mutex(context_mutex);
+ }
+
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status, "Returned by Handler for [%s]",
acpi_ut_get_region_name(region_obj->region.
@@ -277,6 +293,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
}
}
+re_enter_interpreter:
if (!(handler_desc->address_space.handler_flags &
ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) {
/*
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
index 89be3ccdad53..984c172453bf 100644
--- a/drivers/acpi/acpica/evrgnini.c
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -3,7 +3,7 @@
*
* Module Name: evrgnini- ACPI address_space (op_region) init
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
index e4e012297eee..ff5cf5b0705a 100644
--- a/drivers/acpi/acpica/evxface.c
+++ b/drivers/acpi/acpica/evxface.c
@@ -3,7 +3,7 @@
*
* Module Name: evxface - External interfaces for ACPI events
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index 1a15b0087379..5445a361c621 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -3,7 +3,7 @@
*
* Module Name: evxfevnt - External Interfaces, ACPI event disable/enable
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index 3be60673e461..a6d53cf86450 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -3,7 +3,7 @@
*
* Module Name: evxfgpe - External Interfaces for General Purpose Events (GPEs)
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
index da97fd0c6b51..b1ff0a8f9c14 100644
--- a/drivers/acpi/acpica/evxfregn.c
+++ b/drivers/acpi/acpica/evxfregn.c
@@ -4,7 +4,7 @@
* Module Name: evxfregn - External Interfaces, ACPI Operation Regions and
* Address Spaces.
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
@@ -201,6 +201,8 @@ acpi_remove_address_space_handler(acpi_handle device,
/* Now we can delete the handler object */
+ acpi_os_release_mutex(handler_obj->address_space.
+ context_mutex);
acpi_ut_remove_reference(handler_obj);
goto unlock_and_exit;
}
diff --git a/drivers/acpi/acpica/exconcat.c b/drivers/acpi/acpica/exconcat.c
index 43711412722f..2d220d470c60 100644
--- a/drivers/acpi/acpica/exconcat.c
+++ b/drivers/acpi/acpica/exconcat.c
@@ -3,7 +3,7 @@
*
* Module Name: exconcat - Concatenate-type AML operators
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c
index 68efd704e2dc..0cd9b3738e76 100644
--- a/drivers/acpi/acpica/exconfig.c
+++ b/drivers/acpi/acpica/exconfig.c
@@ -3,7 +3,7 @@
*
* Module Name: exconfig - Namespace reconfiguration (Load/Unload opcodes)
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exconvrt.c b/drivers/acpi/acpica/exconvrt.c
index 50c7aad2e86d..6b7498371eb0 100644
--- a/drivers/acpi/acpica/exconvrt.c
+++ b/drivers/acpi/acpica/exconvrt.c
@@ -3,7 +3,7 @@
*
* Module Name: exconvrt - Object conversion routines
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c
index a17482428b46..80b52ad55775 100644
--- a/drivers/acpi/acpica/excreate.c
+++ b/drivers/acpi/acpica/excreate.c
@@ -3,7 +3,7 @@
*
* Module Name: excreate - Named object creation
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exdebug.c b/drivers/acpi/acpica/exdebug.c
index a5223dcaee70..6a01e38b7d5a 100644
--- a/drivers/acpi/acpica/exdebug.c
+++ b/drivers/acpi/acpica/exdebug.c
@@ -3,7 +3,7 @@
*
* Module Name: exdebug - Support for stores to the AML Debug Object
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
index 47a4d9a40d6b..2aea44ecc37d 100644
--- a/drivers/acpi/acpica/exdump.c
+++ b/drivers/acpi/acpica/exdump.c
@@ -3,7 +3,7 @@
*
* Module Name: exdump - Interpreter debug output routines
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c
index 3323a2ba6a31..32f03ee81785 100644
--- a/drivers/acpi/acpica/exfield.c
+++ b/drivers/acpi/acpica/exfield.c
@@ -3,7 +3,7 @@
*
* Module Name: exfield - AML execution - field_unit read/write
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
index ade35ff1c7ba..bdc7a30d1217 100644
--- a/drivers/acpi/acpica/exfldio.c
+++ b/drivers/acpi/acpica/exfldio.c
@@ -3,7 +3,7 @@
*
* Module Name: exfldio - Aml Field I/O
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
@@ -434,7 +434,7 @@ acpi_ex_field_datum_io(union acpi_operand_object *obj_desc,
* region_field case and write the datum to the Operation Region
*/
- /*lint -fallthrough */
+ ACPI_FALLTHROUGH;
case ACPI_TYPE_LOCAL_REGION_FIELD:
/*
diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c
index 717e3998fd77..ad19f914641b 100644
--- a/drivers/acpi/acpica/exmisc.c
+++ b/drivers/acpi/acpica/exmisc.c
@@ -3,7 +3,7 @@
*
* Module Name: exmisc - ACPI AML (p-code) execution - specific opcodes
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exmutex.c b/drivers/acpi/acpica/exmutex.c
index 9ff247cba571..6237ae8284b1 100644
--- a/drivers/acpi/acpica/exmutex.c
+++ b/drivers/acpi/acpica/exmutex.c
@@ -3,7 +3,7 @@
*
* Module Name: exmutex - ASL Mutex Acquire/Release functions
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exnames.c b/drivers/acpi/acpica/exnames.c
index 74f8b0d0452b..5283603d078d 100644
--- a/drivers/acpi/acpica/exnames.c
+++ b/drivers/acpi/acpica/exnames.c
@@ -3,7 +3,7 @@
*
* Module Name: exnames - interpreter/scanner name load/execute
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c
index a46d685a3ffc..b639e930d642 100644
--- a/drivers/acpi/acpica/exoparg1.c
+++ b/drivers/acpi/acpica/exoparg1.c
@@ -3,7 +3,7 @@
*
* Module Name: exoparg1 - AML execution - opcodes with 1 argument
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c
index 03241d18ac1d..10323ab186da 100644
--- a/drivers/acpi/acpica/exoparg2.c
+++ b/drivers/acpi/acpica/exoparg2.c
@@ -3,7 +3,7 @@
*
* Module Name: exoparg2 - AML execution - opcodes with 2 arguments
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exoparg3.c b/drivers/acpi/acpica/exoparg3.c
index c8d0d75fc450..140aae009690 100644
--- a/drivers/acpi/acpica/exoparg3.c
+++ b/drivers/acpi/acpica/exoparg3.c
@@ -3,7 +3,7 @@
*
* Module Name: exoparg3 - AML execution - opcodes with 3 arguments
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exoparg6.c b/drivers/acpi/acpica/exoparg6.c
index 55d0fa056fe7..2cf9f37a0ba8 100644
--- a/drivers/acpi/acpica/exoparg6.c
+++ b/drivers/acpi/acpica/exoparg6.c
@@ -3,7 +3,7 @@
*
* Module Name: exoparg6 - AML execution - opcodes with 6 arguments
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c
index 4a0f03157e08..d8c55dde191b 100644
--- a/drivers/acpi/acpica/exprep.c
+++ b/drivers/acpi/acpica/exprep.c
@@ -3,7 +3,7 @@
*
* Module Name: exprep - ACPI AML field prep utilities
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
index 4914dbc44517..82b713a9a193 100644
--- a/drivers/acpi/acpica/exregion.c
+++ b/drivers/acpi/acpica/exregion.c
@@ -3,7 +3,7 @@
*
* Module Name: exregion - ACPI default op_region (address space) handlers
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exresnte.c b/drivers/acpi/acpica/exresnte.c
index 3e4018678c09..d80b76455c50 100644
--- a/drivers/acpi/acpica/exresnte.c
+++ b/drivers/acpi/acpica/exresnte.c
@@ -3,7 +3,7 @@
*
* Module Name: exresnte - AML Interpreter object resolution
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c
index 912a078c60a4..fa6a96242835 100644
--- a/drivers/acpi/acpica/exresolv.c
+++ b/drivers/acpi/acpica/exresolv.c
@@ -3,7 +3,7 @@
*
* Module Name: exresolv - AML Interpreter object resolution
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exresop.c b/drivers/acpi/acpica/exresop.c
index 4d1b22971d58..cbe2c88b1dc2 100644
--- a/drivers/acpi/acpica/exresop.c
+++ b/drivers/acpi/acpica/exresop.c
@@ -3,7 +3,7 @@
*
* Module Name: exresop - AML Interpreter operand/object resolution
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
@@ -198,7 +198,7 @@ acpi_ex_resolve_operands(u16 opcode,
target_op = AML_DEBUG_OP;
- /*lint -fallthrough */
+ ACPI_FALLTHROUGH;
case ACPI_REFCLASS_ARG:
case ACPI_REFCLASS_LOCAL:
@@ -264,7 +264,7 @@ acpi_ex_resolve_operands(u16 opcode,
* Else not a string - fall through to the normal Reference
* case below
*/
- /*lint -fallthrough */
+ ACPI_FALLTHROUGH;
case ARGI_REFERENCE: /* References: */
case ARGI_INTEGER_REF:
diff --git a/drivers/acpi/acpica/exserial.c b/drivers/acpi/acpica/exserial.c
index 760bc7cef55a..8e8d95f7947b 100644
--- a/drivers/acpi/acpica/exserial.c
+++ b/drivers/acpi/acpica/exserial.c
@@ -3,7 +3,7 @@
*
* Module Name: exserial - field_unit support for serial address spaces
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exstore.c b/drivers/acpi/acpica/exstore.c
index 3adc0a29d890..12f4210ea085 100644
--- a/drivers/acpi/acpica/exstore.c
+++ b/drivers/acpi/acpica/exstore.c
@@ -3,7 +3,7 @@
*
* Module Name: exstore - AML Interpreter object store support
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
@@ -96,7 +96,7 @@ acpi_ex_store(union acpi_operand_object *source_desc,
return_ACPI_STATUS(AE_OK);
}
- /*lint -fallthrough */
+ ACPI_FALLTHROUGH;
default:
@@ -422,7 +422,7 @@ acpi_ex_store_object_to_node(union acpi_operand_object *source_desc,
break;
}
- /* Fallthrough */
+ ACPI_FALLTHROUGH;
case ACPI_TYPE_DEVICE:
case ACPI_TYPE_EVENT:
diff --git a/drivers/acpi/acpica/exstoren.c b/drivers/acpi/acpica/exstoren.c
index 8c34f4e2ab8f..08469d37e73e 100644
--- a/drivers/acpi/acpica/exstoren.c
+++ b/drivers/acpi/acpica/exstoren.c
@@ -4,7 +4,7 @@
* Module Name: exstoren - AML Interpreter object store support,
* Store to Node (namespace object)
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exstorob.c b/drivers/acpi/acpica/exstorob.c
index dc66696080a5..a82628683329 100644
--- a/drivers/acpi/acpica/exstorob.c
+++ b/drivers/acpi/acpica/exstorob.c
@@ -3,7 +3,7 @@
*
* Module Name: exstorob - AML object store support, store to object
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exsystem.c b/drivers/acpi/acpica/exsystem.c
index f329b01672bb..1281c07112de 100644
--- a/drivers/acpi/acpica/exsystem.c
+++ b/drivers/acpi/acpica/exsystem.c
@@ -3,7 +3,7 @@
*
* Module Name: exsystem - Interface to OS services
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/extrace.c b/drivers/acpi/acpica/extrace.c
index 832a47885b99..8846f483fb02 100644
--- a/drivers/acpi/acpica/extrace.c
+++ b/drivers/acpi/acpica/extrace.c
@@ -3,7 +3,7 @@
*
* Module Name: extrace - Support for interpreter execution tracing
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c
index 8fefa6feac2f..4d41a866f633 100644
--- a/drivers/acpi/acpica/exutils.c
+++ b/drivers/acpi/acpica/exutils.c
@@ -3,7 +3,7 @@
*
* Module Name: exutils - interpreter/scanner utilities
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwacpi.c b/drivers/acpi/acpica/hwacpi.c
index 9b9aac27ff7e..96f55f079988 100644
--- a/drivers/acpi/acpica/hwacpi.c
+++ b/drivers/acpi/acpica/hwacpi.c
@@ -3,7 +3,7 @@
*
* Module Name: hwacpi - ACPI Hardware Initialization/Mode Interface
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwesleep.c b/drivers/acpi/acpica/hwesleep.c
index d9be5d0545d4..803402aefaeb 100644
--- a/drivers/acpi/acpica/hwesleep.c
+++ b/drivers/acpi/acpica/hwesleep.c
@@ -4,7 +4,7 @@
* Name: hwesleep.c - ACPI Hardware Sleep/Wake Support functions for the
* extended FADT-V5 sleep registers.
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
index b13a4ed5bc63..0770aa176cd5 100644
--- a/drivers/acpi/acpica/hwgpe.c
+++ b/drivers/acpi/acpica/hwgpe.c
@@ -3,7 +3,7 @@
*
* Module Name: hwgpe - Low level GPE enable/disable/clear functions
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
@@ -167,7 +167,7 @@ acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action)
return (AE_BAD_PARAMETER);
}
- /*lint -fallthrough */
+ ACPI_FALLTHROUGH;
case ACPI_GPE_ENABLE:
diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c
index 317ae870336b..14baa13bf848 100644
--- a/drivers/acpi/acpica/hwsleep.c
+++ b/drivers/acpi/acpica/hwsleep.c
@@ -4,7 +4,7 @@
* Name: hwsleep.c - ACPI Hardware Sleep/Wake Support functions for the
* original/legacy sleep/PM registers.
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c
index 07473ddfa9a9..63deadde9f48 100644
--- a/drivers/acpi/acpica/hwtimer.c
+++ b/drivers/acpi/acpica/hwtimer.c
@@ -3,7 +3,7 @@
*
* Name: hwtimer.c - ACPI Power Management Timer Interface
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c
index b2ca7dfd3fc9..e15badf4077a 100644
--- a/drivers/acpi/acpica/hwvalid.c
+++ b/drivers/acpi/acpica/hwvalid.c
@@ -3,7 +3,7 @@
*
* Module Name: hwvalid - I/O request validation
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c
index 134dbfadcd15..fb27aaad0dee 100644
--- a/drivers/acpi/acpica/hwxface.c
+++ b/drivers/acpi/acpica/hwxface.c
@@ -3,7 +3,7 @@
*
* Module Name: hwxface - Public ACPICA hardware interfaces
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
index a4b66f4b2714..89b12afed564 100644
--- a/drivers/acpi/acpica/hwxfsleep.c
+++ b/drivers/acpi/acpica/hwxfsleep.c
@@ -3,7 +3,7 @@
*
* Name: hwxfsleep.c - ACPI Hardware Sleep/Wake External Interfaces
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsarguments.c b/drivers/acpi/acpica/nsarguments.c
index 6bbc7d350a16..c8a2747005c5 100644
--- a/drivers/acpi/acpica/nsarguments.c
+++ b/drivers/acpi/acpica/nsarguments.c
@@ -3,7 +3,7 @@
*
* Module Name: nsarguments - Validation of args for ACPI predefined methods
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsconvert.c b/drivers/acpi/acpica/nsconvert.c
index c86c82939ebb..597d0eed23c1 100644
--- a/drivers/acpi/acpica/nsconvert.c
+++ b/drivers/acpi/acpica/nsconvert.c
@@ -4,7 +4,7 @@
* Module Name: nsconvert - Object conversions for objects returned by
* predefined methods
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
index 994f0b556c60..2f66f3ed1810 100644
--- a/drivers/acpi/acpica/nsdump.c
+++ b/drivers/acpi/acpica/nsdump.c
@@ -3,7 +3,7 @@
*
* Module Name: nsdump - table dumping routines for debug
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsdumpdv.c b/drivers/acpi/acpica/nsdumpdv.c
index b691fe20e384..d3dc6761bcdd 100644
--- a/drivers/acpi/acpica/nsdumpdv.c
+++ b/drivers/acpi/acpica/nsdumpdv.c
@@ -3,7 +3,7 @@
*
* Module Name: nsdump - table dumping routines for debug
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c
index e16f6a0c2c3f..4db81f8ba29b 100644
--- a/drivers/acpi/acpica/nsinit.c
+++ b/drivers/acpi/acpica/nsinit.c
@@ -3,7 +3,7 @@
*
* Module Name: nsinit - namespace initialization
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c
index 9ba17891edb6..7d77956ed790 100644
--- a/drivers/acpi/acpica/nsload.c
+++ b/drivers/acpi/acpica/nsload.c
@@ -3,7 +3,7 @@
*
* Module Name: nsload - namespace loading/expanding/contracting procedures
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c
index 7e74a765e785..778f80e624be 100644
--- a/drivers/acpi/acpica/nsparse.c
+++ b/drivers/acpi/acpica/nsparse.c
@@ -3,7 +3,7 @@
*
* Module Name: nsparse - namespace interface to AML parser
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c
index 167a1c2495ab..e4e5f32da7dc 100644
--- a/drivers/acpi/acpica/nspredef.c
+++ b/drivers/acpi/acpica/nspredef.c
@@ -3,7 +3,7 @@
*
* Module Name: nspredef - Validation of ACPI predefined methods and objects
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsprepkg.c b/drivers/acpi/acpica/nsprepkg.c
index 1875b1cba202..6742b50836f7 100644
--- a/drivers/acpi/acpica/nsprepkg.c
+++ b/drivers/acpi/acpica/nsprepkg.c
@@ -3,7 +3,7 @@
*
* Module Name: nsprepkg - Validation of package objects for predefined names
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c
index 90db2d85e7f5..499067daa22c 100644
--- a/drivers/acpi/acpica/nsrepair.c
+++ b/drivers/acpi/acpica/nsrepair.c
@@ -3,7 +3,7 @@
*
* Module Name: nsrepair - Repair for objects returned by predefined methods
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c
index d2c8d8279e7a..14b71b41e845 100644
--- a/drivers/acpi/acpica/nsrepair2.c
+++ b/drivers/acpi/acpica/nsrepair2.c
@@ -4,7 +4,7 @@
* Module Name: nsrepair2 - Repair for objects returned by specific
* predefined methods
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
@@ -495,8 +495,9 @@ acpi_ns_repair_HID(struct acpi_evaluate_info *info,
union acpi_operand_object **return_object_ptr)
{
union acpi_operand_object *return_object = *return_object_ptr;
- char *dest;
+ union acpi_operand_object *new_string;
char *source;
+ char *dest;
ACPI_FUNCTION_NAME(ns_repair_HID);
@@ -517,6 +518,13 @@ acpi_ns_repair_HID(struct acpi_evaluate_info *info,
return_ACPI_STATUS(AE_OK);
}
+ /* It is simplest to always create a new string object */
+
+ new_string = acpi_ut_create_string_object(return_object->string.length);
+ if (!new_string) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
/*
* Remove a leading asterisk if present. For some unknown reason, there
* are many machines in the field that contains IDs like this.
@@ -526,7 +534,7 @@ acpi_ns_repair_HID(struct acpi_evaluate_info *info,
source = return_object->string.pointer;
if (*source == '*') {
source++;
- return_object->string.length--;
+ new_string->string.length--;
ACPI_DEBUG_PRINT((ACPI_DB_REPAIR,
"%s: Removed invalid leading asterisk\n",
@@ -541,11 +549,12 @@ acpi_ns_repair_HID(struct acpi_evaluate_info *info,
* "NNNN####" where N is an uppercase letter or decimal digit, and
* # is a hex digit.
*/
- for (dest = return_object->string.pointer; *source; dest++, source++) {
+ for (dest = new_string->string.pointer; *source; dest++, source++) {
*dest = (char)toupper((int)*source);
}
- return_object->string.pointer[return_object->string.length] = 0;
+ acpi_ut_remove_reference(return_object);
+ *return_object_ptr = new_string;
return_ACPI_STATUS(AE_OK);
}
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index e66abdab8f31..83d0f276da4d 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -4,7 +4,7 @@
* Module Name: nsutils - Utilities for accessing ACPI namespace, accessing
* parents and siblings and Scope manipulation
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nswalk.c b/drivers/acpi/acpica/nswalk.c
index b7f3e8603ad8..915c2433463d 100644
--- a/drivers/acpi/acpica/nswalk.c
+++ b/drivers/acpi/acpica/nswalk.c
@@ -3,7 +3,7 @@
*
* Module Name: nswalk - Functions for walking the ACPI namespace
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index 0e6aba81605b..03487546da5a 100644
--- a/drivers/acpi/acpica/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -4,7 +4,7 @@
* Module Name: nsxfname - Public interfaces to the ACPI subsystem
* ACPI Namespace oriented interfaces
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
index 3b40db4ad9f3..b9ff535aa02e 100644
--- a/drivers/acpi/acpica/psargs.c
+++ b/drivers/acpi/acpica/psargs.c
@@ -3,7 +3,7 @@
*
* Module Name: psargs - Parse AML opcode arguments
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c
index 3cf0687b9915..4b51dd939f29 100644
--- a/drivers/acpi/acpica/psloop.c
+++ b/drivers/acpi/acpica/psloop.c
@@ -3,7 +3,7 @@
*
* Module Name: psloop - Main AML parse loop
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
@@ -264,8 +264,7 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
ACPI_TO_POINTER
(TRUE));
if (ACPI_FAILURE(status)
- && ((status & AE_CODE_MASK) !=
- AE_CODE_CONTROL)) {
+ && !ACPI_CNTL_EXCEPTION(status)) {
if (status == AE_AML_NO_RETURN_VALUE) {
ACPI_EXCEPTION((AE_INFO, status,
"Invoked method did not return a value"));
diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c
index 2480c26c5171..e4420cd6d281 100644
--- a/drivers/acpi/acpica/psobject.c
+++ b/drivers/acpi/acpica/psobject.c
@@ -3,7 +3,7 @@
*
* Module Name: psobject - Support for parse objects
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psopcode.c b/drivers/acpi/acpica/psopcode.c
index 28af49263ebf..3e80eb1a5f35 100644
--- a/drivers/acpi/acpica/psopcode.c
+++ b/drivers/acpi/acpica/psopcode.c
@@ -3,7 +3,7 @@
*
* Module Name: psopcode - Parser/Interpreter opcode information table
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psopinfo.c b/drivers/acpi/acpica/psopinfo.c
index ab9327f6a63c..476b00a121f3 100644
--- a/drivers/acpi/acpica/psopinfo.c
+++ b/drivers/acpi/acpica/psopinfo.c
@@ -3,7 +3,7 @@
*
* Module Name: psopinfo - AML opcode information functions and dispatch tables
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psparse.c b/drivers/acpi/acpica/psparse.c
index bd3caf735be3..7eb7a81619a3 100644
--- a/drivers/acpi/acpica/psparse.c
+++ b/drivers/acpi/acpica/psparse.c
@@ -3,7 +3,7 @@
*
* Module Name: psparse - Parser top level AML parse routines
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
@@ -383,7 +383,7 @@ acpi_ps_next_parse_state(struct acpi_walk_state *walk_state,
default:
status = callback_status;
- if ((callback_status & AE_CODE_MASK) == AE_CODE_CONTROL) {
+ if (ACPI_CNTL_EXCEPTION(callback_status)) {
status = AE_OK;
}
break;
diff --git a/drivers/acpi/acpica/psscope.c b/drivers/acpi/acpica/psscope.c
index fceb311995e9..3f2eada44942 100644
--- a/drivers/acpi/acpica/psscope.c
+++ b/drivers/acpi/acpica/psscope.c
@@ -3,7 +3,7 @@
*
* Module Name: psscope - Parser scope stack management routines
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/pstree.c b/drivers/acpi/acpica/pstree.c
index c8aef0694864..ffb2a7bfc6d7 100644
--- a/drivers/acpi/acpica/pstree.c
+++ b/drivers/acpi/acpica/pstree.c
@@ -3,7 +3,7 @@
*
* Module Name: pstree - Parser op tree manipulation/traversal/search
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psutils.c b/drivers/acpi/acpica/psutils.c
index 00efae2f95ba..e6596051d548 100644
--- a/drivers/acpi/acpica/psutils.c
+++ b/drivers/acpi/acpica/psutils.c
@@ -3,7 +3,7 @@
*
* Module Name: psutils - Parser miscellaneous utilities (Parser only)
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/pswalk.c b/drivers/acpi/acpica/pswalk.c
index 0fe3adf6b0e5..7018a789debc 100644
--- a/drivers/acpi/acpica/pswalk.c
+++ b/drivers/acpi/acpica/pswalk.c
@@ -3,7 +3,7 @@
*
* Module Name: pswalk - Parser routines to walk parsed op tree(s)
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psxface.c b/drivers/acpi/acpica/psxface.c
index 1bbfc8def388..fd0f28c7af1e 100644
--- a/drivers/acpi/acpica/psxface.c
+++ b/drivers/acpi/acpica/psxface.c
@@ -3,7 +3,7 @@
*
* Module Name: psxface - Parser external interfaces
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbdata.c b/drivers/acpi/acpica/tbdata.c
index 523b1e9b98d4..ebbca109edcb 100644
--- a/drivers/acpi/acpica/tbdata.c
+++ b/drivers/acpi/acpica/tbdata.c
@@ -3,7 +3,7 @@
*
* Module Name: tbdata - Table manager data structure functions
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index 907edc5edba7..5174abfa8af9 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -3,7 +3,7 @@
*
* Module Name: tbfadt - FADT table utilities
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbfind.c b/drivers/acpi/acpica/tbfind.c
index 56d81e490a5c..2c2c2b1f5a28 100644
--- a/drivers/acpi/acpica/tbfind.c
+++ b/drivers/acpi/acpica/tbfind.c
@@ -3,7 +3,7 @@
*
* Module Name: tbfind - find table
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index 0bb15add2245..8d1e5b572493 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -3,7 +3,7 @@
*
* Module Name: tbinstal - ACPI table installation and removal
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbprint.c b/drivers/acpi/acpica/tbprint.c
index 0b3494ad9a70..254823d494a2 100644
--- a/drivers/acpi/acpica/tbprint.c
+++ b/drivers/acpi/acpica/tbprint.c
@@ -3,7 +3,7 @@
*
* Module Name: tbprint - Table output utilities
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index dfe1ac3ae34a..4b9b329a5a92 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -3,7 +3,7 @@
*
* Module Name: tbutils - ACPI Table utilities
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index 7490429ddbf6..e6f51fedaf1a 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -3,7 +3,7 @@
*
* Module Name: tbxface - ACPI table-oriented external interfaces
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c
index bcba993d4dac..38623049b962 100644
--- a/drivers/acpi/acpica/tbxfload.c
+++ b/drivers/acpi/acpica/tbxfload.c
@@ -3,7 +3,7 @@
*
* Module Name: tbxfload - Table load/unload external interfaces
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c
index 0edc6ef5d46d..9fec3df6c3ba 100644
--- a/drivers/acpi/acpica/tbxfroot.c
+++ b/drivers/acpi/acpica/tbxfroot.c
@@ -3,7 +3,7 @@
*
* Module Name: tbxfroot - Find the root ACPI table (RSDT)
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utaddress.c b/drivers/acpi/acpica/utaddress.c
index 99fa48722cf6..7001f4b113f1 100644
--- a/drivers/acpi/acpica/utaddress.c
+++ b/drivers/acpi/acpica/utaddress.c
@@ -3,7 +3,7 @@
*
* Module Name: utaddress - op_region address range check
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utalloc.c b/drivers/acpi/acpica/utalloc.c
index 303ab51b4fcf..796fd9b33a7d 100644
--- a/drivers/acpi/acpica/utalloc.c
+++ b/drivers/acpi/acpica/utalloc.c
@@ -3,7 +3,7 @@
*
* Module Name: utalloc - local memory allocation routines
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utascii.c b/drivers/acpi/acpica/utascii.c
index d78656d960e8..e1b55575d5fb 100644
--- a/drivers/acpi/acpica/utascii.c
+++ b/drivers/acpi/acpica/utascii.c
@@ -3,7 +3,7 @@
*
* Module Name: utascii - Utility ascii functions
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utbuffer.c b/drivers/acpi/acpica/utbuffer.c
index f2ec427f4e29..8ab90f78825b 100644
--- a/drivers/acpi/acpica/utbuffer.c
+++ b/drivers/acpi/acpica/utbuffer.c
@@ -3,7 +3,7 @@
*
* Module Name: utbuffer - Buffer dump routines
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utcache.c b/drivers/acpi/acpica/utcache.c
index 1b03a2747401..814145019f95 100644
--- a/drivers/acpi/acpica/utcache.c
+++ b/drivers/acpi/acpica/utcache.c
@@ -3,7 +3,7 @@
*
* Module Name: utcache - local cache allocation routines
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c
index 41bdd0278dd8..d9877153f400 100644
--- a/drivers/acpi/acpica/utcopy.c
+++ b/drivers/acpi/acpica/utcopy.c
@@ -3,7 +3,7 @@
*
* Module Name: utcopy - Internal to external object translation utilities
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index 0c8cb0612414..09245945f319 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -3,7 +3,7 @@
*
* Module Name: utdebug - Debug print/trace routines
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c
index ed9aedf604a1..bcd3871079d7 100644
--- a/drivers/acpi/acpica/utdecode.c
+++ b/drivers/acpi/acpica/utdecode.c
@@ -3,7 +3,7 @@
*
* Module Name: utdecode - Utility decoding routines (value-to-string)
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c
index 4c0d4e434196..624a26794d55 100644
--- a/drivers/acpi/acpica/utdelete.c
+++ b/drivers/acpi/acpica/utdelete.c
@@ -112,7 +112,7 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
gpe_block);
}
- /*lint -fallthrough */
+ ACPI_FALLTHROUGH;
case ACPI_TYPE_PROCESSOR:
case ACPI_TYPE_THERMAL:
diff --git a/drivers/acpi/acpica/uteval.c b/drivers/acpi/acpica/uteval.c
index 8180d1a458f5..d2503920c620 100644
--- a/drivers/acpi/acpica/uteval.c
+++ b/drivers/acpi/acpica/uteval.c
@@ -3,7 +3,7 @@
*
* Module Name: uteval - Object evaluation
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index e6dcbdc3fc6e..59a48371a7bc 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -3,7 +3,7 @@
*
* Module Name: utglobal - Global variables for the ACPI subsystem
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/uthex.c b/drivers/acpi/acpica/uthex.c
index 0e02f12513dc..b1e94c094f9a 100644
--- a/drivers/acpi/acpica/uthex.c
+++ b/drivers/acpi/acpica/uthex.c
@@ -3,7 +3,7 @@
*
* Module Name: uthex -- Hex/ASCII support functions
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utids.c b/drivers/acpi/acpica/utids.c
index 3e68864ef242..08e9f316cbde 100644
--- a/drivers/acpi/acpica/utids.c
+++ b/drivers/acpi/acpica/utids.c
@@ -3,7 +3,7 @@
*
* Module Name: utids - support for device Ids - HID, UID, CID, SUB, CLS
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utinit.c b/drivers/acpi/acpica/utinit.c
index fdbc397c038d..7b606a1e6986 100644
--- a/drivers/acpi/acpica/utinit.c
+++ b/drivers/acpi/acpica/utinit.c
@@ -3,7 +3,7 @@
*
* Module Name: utinit - Common ACPI subsystem initialization
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utlock.c b/drivers/acpi/acpica/utlock.c
index 46be549539e7..923dd15e7a16 100644
--- a/drivers/acpi/acpica/utlock.c
+++ b/drivers/acpi/acpica/utlock.c
@@ -3,7 +3,7 @@
*
* Module Name: utlock - Reader/Writer lock interfaces
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c
index bbec04c291d2..84a210b49e3a 100644
--- a/drivers/acpi/acpica/utobject.c
+++ b/drivers/acpi/acpica/utobject.c
@@ -3,7 +3,7 @@
*
* Module Name: utobject - ACPI object create/delete/size/cache routines
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c
index 0a01c08dad8a..7b8e8bf1e824 100644
--- a/drivers/acpi/acpica/utosi.c
+++ b/drivers/acpi/acpica/utosi.c
@@ -3,7 +3,7 @@
*
* Module Name: utosi - Support for the _OSI predefined control method
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utpredef.c b/drivers/acpi/acpica/utpredef.c
index dd277f7e9f10..a6f87a88c30e 100644
--- a/drivers/acpi/acpica/utpredef.c
+++ b/drivers/acpi/acpica/utpredef.c
@@ -3,7 +3,7 @@
*
* Module Name: utpredef - support functions for predefined names
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utprint.c b/drivers/acpi/acpica/utprint.c
index 681c11f4af4e..e37d612e8db5 100644
--- a/drivers/acpi/acpica/utprint.c
+++ b/drivers/acpi/acpica/utprint.c
@@ -3,7 +3,7 @@
*
* Module Name: utprint - Formatted printing routines
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utstrsuppt.c b/drivers/acpi/acpica/utstrsuppt.c
index 2d91003fcf26..199982a6fb16 100644
--- a/drivers/acpi/acpica/utstrsuppt.c
+++ b/drivers/acpi/acpica/utstrsuppt.c
@@ -104,7 +104,7 @@ acpi_status acpi_ut_convert_decimal_string(char *string, u64 *return_value_ptr)
* 1) Runtime: terminate with no error, per the ACPI spec
* 2) Compiler: return an error
*/
- if (!isdigit(*string)) {
+ if (!isdigit((int)*string)) {
#ifdef ACPI_ASL_COMPILER
status = AE_BAD_DECIMAL_CONSTANT;
#endif
@@ -158,7 +158,7 @@ acpi_status acpi_ut_convert_hex_string(char *string, u64 *return_value_ptr)
* 1) Runtime: terminate with no error, per the ACPI spec
* 2) Compiler: return an error
*/
- if (!isxdigit(*string)) {
+ if (!isxdigit((int)*string)) {
#ifdef ACPI_ASL_COMPILER
status = AE_BAD_HEX_CONSTANT;
#endif
diff --git a/drivers/acpi/acpica/uttrack.c b/drivers/acpi/acpica/uttrack.c
index d366be431a84..2ce85fcfeb5b 100644
--- a/drivers/acpi/acpica/uttrack.c
+++ b/drivers/acpi/acpica/uttrack.c
@@ -3,7 +3,7 @@
*
* Module Name: uttrack - Memory allocation tracking routines (debug only)
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utuuid.c b/drivers/acpi/acpica/utuuid.c
index b8039954b0d1..090e44b6b6c7 100644
--- a/drivers/acpi/acpica/utuuid.c
+++ b/drivers/acpi/acpica/utuuid.c
@@ -3,7 +3,7 @@
*
* Module Name: utuuid -- UUID support functions
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c
index ca7c9f0144ef..3285c1a92e40 100644
--- a/drivers/acpi/acpica/utxface.c
+++ b/drivers/acpi/acpica/utxface.c
@@ -3,7 +3,7 @@
*
* Module Name: utxface - External interfaces, miscellaneous utility functions
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utxfinit.c b/drivers/acpi/acpica/utxfinit.c
index 653e3bb20036..91016366de1d 100644
--- a/drivers/acpi/acpica/utxfinit.c
+++ b/drivers/acpi/acpica/utxfinit.c
@@ -3,7 +3,7 @@
*
* Module Name: utxfinit - External interfaces for ACPICA initialization
*
- * Copyright (C) 2000 - 2020, Intel Corp.
+ * Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index 2e0b0fcad960..b9597216d021 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -688,7 +688,7 @@ static int __erst_read_from_storage(u64 record_id, u64 offset)
break;
if (erst_timedout(&timeout, SPIN_UNIT))
return -EIO;
- };
+ }
rc = apei_exec_run(&ctx, ACPI_ERST_GET_COMMAND_STATUS);
if (rc)
return rc;
diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c
index 6e980fe16772..f220bb00e91b 100644
--- a/drivers/acpi/apei/hest.c
+++ b/drivers/acpi/apei/hest.c
@@ -49,6 +49,12 @@ static const int hest_esrc_len_tab[ACPI_HEST_TYPE_RESERVED] = {
[ACPI_HEST_TYPE_IA32_DEFERRED_CHECK] = -1,
};
+static inline bool is_generic_error(struct acpi_hest_header *hest_hdr)
+{
+ return hest_hdr->type == ACPI_HEST_TYPE_GENERIC_ERROR ||
+ hest_hdr->type == ACPI_HEST_TYPE_GENERIC_ERROR_V2;
+}
+
static int hest_esrc_len(struct acpi_hest_header *hest_hdr)
{
u16 hest_type = hest_hdr->type;
@@ -141,8 +147,7 @@ static int __init hest_parse_ghes_count(struct acpi_hest_header *hest_hdr, void
{
int *count = data;
- if (hest_hdr->type == ACPI_HEST_TYPE_GENERIC_ERROR ||
- hest_hdr->type == ACPI_HEST_TYPE_GENERIC_ERROR_V2)
+ if (is_generic_error(hest_hdr))
(*count)++;
return 0;
}
@@ -153,8 +158,7 @@ static int __init hest_parse_ghes(struct acpi_hest_header *hest_hdr, void *data)
struct ghes_arr *ghes_arr = data;
int rc, i;
- if (hest_hdr->type != ACPI_HEST_TYPE_GENERIC_ERROR &&
- hest_hdr->type != ACPI_HEST_TYPE_GENERIC_ERROR_V2)
+ if (!is_generic_error(hest_hdr))
return 0;
if (!((struct acpi_hest_generic *)hest_hdr)->enabled)
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index d4eac6d7e9fb..2494138a6905 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -1107,6 +1107,11 @@ static int nc_dma_get_range(struct device *dev, u64 *size)
ncomp = (struct acpi_iort_named_component *)node->node_data;
+ if (!ncomp->memory_address_limit) {
+ pr_warn(FW_BUG "Named component missing memory address limit\n");
+ return -EINVAL;
+ }
+
*size = ncomp->memory_address_limit >= 64 ? U64_MAX :
1ULL<<ncomp->memory_address_limit;
@@ -1126,6 +1131,11 @@ static int rc_dma_get_range(struct device *dev, u64 *size)
rc = (struct acpi_iort_root_complex *)node->node_data;
+ if (!rc->memory_address_limit) {
+ pr_warn(FW_BUG "Root complex missing memory address limit\n");
+ return -EINVAL;
+ }
+
*size = rc->memory_address_limit >= 64 ? U64_MAX :
1ULL<<rc->memory_address_limit;
@@ -1173,8 +1183,8 @@ void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size)
end = dmaaddr + size - 1;
mask = DMA_BIT_MASK(ilog2(end) + 1);
dev->bus_dma_limit = end;
- dev->coherent_dma_mask = mask;
- *dev->dma_mask = mask;
+ dev->coherent_dma_mask = min(dev->coherent_dma_mask, mask);
+ *dev->dma_mask = min(*dev->dma_mask, mask);
}
*dma_addr = dmaaddr;
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 08ee1c7b12e0..b822f77afba6 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -8,7 +8,7 @@
* Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#define pr_fmt(fmt) "ACPI: battery: " fmt
#include <linux/async.h>
#include <linux/delay.h>
@@ -29,8 +29,6 @@
#include <acpi/battery.h>
-#define PREFIX "ACPI: "
-
#define ACPI_BATTERY_VALUE_UNKNOWN 0xFFFFFFFF
#define ACPI_BATTERY_CAPACITY_VALID(capacity) \
((capacity) != 0 && (capacity) != ACPI_BATTERY_VALUE_UNKNOWN)
@@ -44,10 +42,6 @@
#define ACPI_BATTERY_STATE_CHARGING 0x2
#define ACPI_BATTERY_STATE_CRITICAL 0x4
-#define _COMPONENT ACPI_BATTERY_COMPONENT
-
-ACPI_MODULE_NAME("battery");
-
MODULE_AUTHOR("Paul Diefenbaugh");
MODULE_AUTHOR("Alexey Starikovskiy <astarikovskiy@suse.de>");
MODULE_DESCRIPTION("ACPI Battery Driver");
@@ -466,7 +460,8 @@ static int extract_package(struct acpi_battery *battery,
static int acpi_battery_get_status(struct acpi_battery *battery)
{
if (acpi_bus_get_status(battery->device)) {
- ACPI_EXCEPTION((AE_INFO, AE_ERROR, "Evaluating _STA"));
+ acpi_handle_info(battery->device->handle,
+ "_STA evaluation failed\n");
return -ENODEV;
}
return 0;
@@ -535,8 +530,10 @@ static int acpi_battery_get_info(struct acpi_battery *battery)
mutex_unlock(&battery->lock);
if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status, "Evaluating %s",
- use_bix ? "_BIX":"_BIF"));
+ acpi_handle_info(battery->device->handle,
+ "%s evaluation failed: %s\n",
+ use_bix ?"_BIX":"_BIF",
+ acpi_format_exception(status));
} else {
result = extract_battery_info(use_bix,
battery,
@@ -573,7 +570,9 @@ static int acpi_battery_get_state(struct acpi_battery *battery)
mutex_unlock(&battery->lock);
if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status, "Evaluating _BST"));
+ acpi_handle_info(battery->device->handle,
+ "_BST evaluation failed: %s",
+ acpi_format_exception(status));
return -ENODEV;
}
@@ -590,7 +589,7 @@ static int acpi_battery_get_state(struct acpi_battery *battery)
battery->rate_now != ACPI_BATTERY_VALUE_UNKNOWN &&
(s16)(battery->rate_now) < 0) {
battery->rate_now = abs((s16)battery->rate_now);
- pr_warn_once(FW_BUG "battery: (dis)charge rate invalid.\n");
+ pr_warn_once(FW_BUG "(dis)charge rate invalid.\n");
}
if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags)
@@ -625,7 +624,9 @@ static int acpi_battery_set_alarm(struct acpi_battery *battery)
if (ACPI_FAILURE(status))
return -ENODEV;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Alarm set to %d\n", battery->alarm));
+ acpi_handle_debug(battery->device->handle, "Alarm set to %d\n",
+ battery->alarm);
+
return 0;
}
@@ -1201,8 +1202,7 @@ static int acpi_battery_add(struct acpi_device *device)
if (result)
goto fail;
- pr_info(PREFIX "%s Slot [%s] (battery %s)\n",
- ACPI_BATTERY_DEVICE_NAME, acpi_device_bid(device),
+ pr_info("Slot [%s] (battery %s)\n", acpi_device_bid(device),
device->status.battery_present ? "present" : "absent");
battery->pm_nb.notifier_call = battery_notify;
@@ -1282,8 +1282,7 @@ static void __init acpi_battery_init_async(void *unused, async_cookie_t cookie)
if (battery_check_pmic) {
for (i = 0; i < ARRAY_SIZE(acpi_battery_blacklist); i++)
if (acpi_dev_present(acpi_battery_blacklist[i], "1", -1)) {
- pr_info(PREFIX ACPI_BATTERY_DEVICE_NAME
- ": found native %s PMIC, not loading\n",
+ pr_info("found native %s PMIC, not loading\n",
acpi_battery_blacklist[i]);
return;
}
diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
index 251f961c28cc..19bb7f870204 100644
--- a/drivers/acpi/bgrt.c
+++ b/drivers/acpi/bgrt.c
@@ -15,40 +15,40 @@
static void *bgrt_image;
static struct kobject *bgrt_kobj;
-static ssize_t show_version(struct device *dev,
+static ssize_t version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab.version);
}
-static DEVICE_ATTR(version, S_IRUGO, show_version, NULL);
+static DEVICE_ATTR_RO(version);
-static ssize_t show_status(struct device *dev,
+static ssize_t status_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab.status);
}
-static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
+static DEVICE_ATTR_RO(status);
-static ssize_t show_type(struct device *dev,
+static ssize_t type_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab.image_type);
}
-static DEVICE_ATTR(type, S_IRUGO, show_type, NULL);
+static DEVICE_ATTR_RO(type);
-static ssize_t show_xoffset(struct device *dev,
+static ssize_t xoffset_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab.image_offset_x);
}
-static DEVICE_ATTR(xoffset, S_IRUGO, show_xoffset, NULL);
+static DEVICE_ATTR_RO(xoffset);
-static ssize_t show_yoffset(struct device *dev,
+static ssize_t yoffset_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab.image_offset_y);
}
-static DEVICE_ATTR(yoffset, S_IRUGO, show_yoffset, NULL);
+static DEVICE_ATTR_RO(yoffset);
static ssize_t image_read(struct file *file, struct kobject *kobj,
struct bin_attribute *attr, char *buf, loff_t off, size_t count)
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 1682f8b454a2..be7da23fad76 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -5,6 +5,8 @@
* Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
*/
+#define pr_fmt(fmt) "ACPI: " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/ioport.h>
@@ -31,9 +33,6 @@
#include "internal.h"
-#define _COMPONENT ACPI_BUS_COMPONENT
-ACPI_MODULE_NAME("bus");
-
struct acpi_device *acpi_root;
struct proc_dir_entry *acpi_root_dir;
EXPORT_SYMBOL(acpi_root_dir);
@@ -47,8 +46,7 @@ static inline int set_copy_dsdt(const struct dmi_system_id *id)
#else
static int set_copy_dsdt(const struct dmi_system_id *id)
{
- printk(KERN_NOTICE "%s detected - "
- "force copy of DSDT to local memory\n", id->ident);
+ pr_notice("%s detected - force copy of DSDT to local memory\n", id->ident);
acpi_gbl_copy_dsdt_locally = 1;
return 0;
}
@@ -116,13 +114,11 @@ int acpi_bus_get_status(struct acpi_device *device)
acpi_set_device_status(device, sta);
if (device->status.functional && !device->status.present) {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] status [%08x]: "
- "functional but not present;\n",
- device->pnp.bus_id, (u32)sta));
+ pr_debug("Device [%s] status [%08x]: functional but not present\n",
+ device->pnp.bus_id, (u32)sta);
}
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] status [%08x]\n",
- device->pnp.bus_id, (u32)sta));
+ pr_debug("Device [%s] status [%08x]\n", device->pnp.bus_id, (u32)sta);
return 0;
}
EXPORT_SYMBOL(acpi_bus_get_status);
@@ -281,10 +277,16 @@ bool osc_sb_apei_support_acked;
bool osc_pc_lpi_support_confirmed;
EXPORT_SYMBOL_GPL(osc_pc_lpi_support_confirmed);
+/*
+ * ACPI 6.4 Operating System Capabilities for USB.
+ */
+bool osc_sb_native_usb4_support_confirmed;
+EXPORT_SYMBOL_GPL(osc_sb_native_usb4_support_confirmed);
+
static u8 sb_uuid_str[] = "0811B06E-4A27-44F9-8D60-3CBBC22E7B48";
-static void acpi_bus_osc_support(void)
+static void acpi_bus_osc_negotiate_platform_control(void)
{
- u32 capbuf[2];
+ u32 capbuf[2], *capbuf_ret;
struct acpi_osc_context context = {
.uuid_str = sb_uuid_str,
.rev = 1,
@@ -317,21 +319,109 @@ static void acpi_bus_osc_support(void)
if (IS_ENABLED(CONFIG_SCHED_MC_PRIO))
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_CPC_DIVERSE_HIGH_SUPPORT;
+ if (IS_ENABLED(CONFIG_USB4))
+ capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_NATIVE_USB4_SUPPORT;
+
if (!ghes_disable)
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_APEI_SUPPORT;
if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle)))
return;
- if (ACPI_SUCCESS(acpi_run_osc(handle, &context))) {
- u32 *capbuf_ret = context.ret.pointer;
- if (context.ret.length > OSC_SUPPORT_DWORD) {
- osc_sb_apei_support_acked =
- capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_APEI_SUPPORT;
- osc_pc_lpi_support_confirmed =
- capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_PCLPI_SUPPORT;
- }
+
+ if (ACPI_FAILURE(acpi_run_osc(handle, &context)))
+ return;
+
+ capbuf_ret = context.ret.pointer;
+ if (context.ret.length <= OSC_SUPPORT_DWORD) {
kfree(context.ret.pointer);
+ return;
+ }
+
+ /*
+ * Now run _OSC again with query flag clear and with the caps
+ * supported by both the OS and the platform.
+ */
+ capbuf[OSC_QUERY_DWORD] = 0;
+ capbuf[OSC_SUPPORT_DWORD] = capbuf_ret[OSC_SUPPORT_DWORD];
+ kfree(context.ret.pointer);
+
+ if (ACPI_FAILURE(acpi_run_osc(handle, &context)))
+ return;
+
+ capbuf_ret = context.ret.pointer;
+ if (context.ret.length > OSC_SUPPORT_DWORD) {
+ osc_sb_apei_support_acked =
+ capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_APEI_SUPPORT;
+ osc_pc_lpi_support_confirmed =
+ capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_PCLPI_SUPPORT;
+ osc_sb_native_usb4_support_confirmed =
+ capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_NATIVE_USB4_SUPPORT;
}
- /* do we need to check other returned cap? Sounds no */
+
+ kfree(context.ret.pointer);
+}
+
+/*
+ * Native control of USB4 capabilities. If any of the tunneling bits is
+ * set it means OS is in control and we use software based connection
+ * manager.
+ */
+u32 osc_sb_native_usb4_control;
+EXPORT_SYMBOL_GPL(osc_sb_native_usb4_control);
+
+static void acpi_bus_decode_usb_osc(const char *msg, u32 bits)
+{
+ printk(KERN_INFO PREFIX "%s USB3%c DisplayPort%c PCIe%c XDomain%c\n", msg,
+ (bits & OSC_USB_USB3_TUNNELING) ? '+' : '-',
+ (bits & OSC_USB_DP_TUNNELING) ? '+' : '-',
+ (bits & OSC_USB_PCIE_TUNNELING) ? '+' : '-',
+ (bits & OSC_USB_XDOMAIN) ? '+' : '-');
+}
+
+static u8 sb_usb_uuid_str[] = "23A0D13A-26AB-486C-9C5F-0FFA525A575A";
+static void acpi_bus_osc_negotiate_usb_control(void)
+{
+ u32 capbuf[3];
+ struct acpi_osc_context context = {
+ .uuid_str = sb_usb_uuid_str,
+ .rev = 1,
+ .cap.length = sizeof(capbuf),
+ .cap.pointer = capbuf,
+ };
+ acpi_handle handle;
+ acpi_status status;
+ u32 control;
+
+ if (!osc_sb_native_usb4_support_confirmed)
+ return;
+
+ if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle)))
+ return;
+
+ control = OSC_USB_USB3_TUNNELING | OSC_USB_DP_TUNNELING |
+ OSC_USB_PCIE_TUNNELING | OSC_USB_XDOMAIN;
+
+ capbuf[OSC_QUERY_DWORD] = 0;
+ capbuf[OSC_SUPPORT_DWORD] = 0;
+ capbuf[OSC_CONTROL_DWORD] = control;
+
+ status = acpi_run_osc(handle, &context);
+ if (ACPI_FAILURE(status))
+ return;
+
+ if (context.ret.length != sizeof(capbuf)) {
+ printk(KERN_INFO PREFIX "USB4 _OSC: returned invalid length buffer\n");
+ goto out_free;
+ }
+
+ osc_sb_native_usb4_control =
+ control & ((u32 *)context.ret.pointer)[OSC_CONTROL_DWORD];
+
+ acpi_bus_decode_usb_osc("USB4 _OSC: OS supports", control);
+ acpi_bus_decode_usb_osc("USB4 _OSC: OS controls",
+ osc_sb_native_usb4_control);
+
+out_free:
+ kfree(context.ret.pointer);
}
/* --------------------------------------------------------------------------
@@ -915,9 +1005,9 @@ static int acpi_device_probe(struct device *dev)
return ret;
acpi_dev->driver = acpi_drv;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Driver [%s] successfully bound to device [%s]\n",
- acpi_drv->name, acpi_dev->pnp.bus_id));
+
+ pr_debug("Driver [%s] successfully bound to device [%s]\n",
+ acpi_drv->name, acpi_dev->pnp.bus_id);
if (acpi_drv->ops.notify) {
ret = acpi_device_install_notify_handler(acpi_dev);
@@ -931,8 +1021,9 @@ static int acpi_device_probe(struct device *dev)
}
}
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found driver [%s] for device [%s]\n",
- acpi_drv->name, acpi_dev->pnp.bus_id));
+ pr_debug("Found driver [%s] for device [%s]\n", acpi_drv->name,
+ acpi_dev->pnp.bus_id);
+
get_device(dev);
return 0;
}
@@ -995,15 +1086,15 @@ static int __init acpi_bus_init_irq(void)
message = "platform specific model";
break;
default:
- printk(KERN_WARNING PREFIX "Unknown interrupt routing model\n");
+ pr_info("Unknown interrupt routing model\n");
return -ENODEV;
}
- printk(KERN_INFO PREFIX "Using %s for interrupt routing\n", message);
+ pr_info("Using %s for interrupt routing\n", message);
status = acpi_execute_simple_method(NULL, "\\_PIC", acpi_irq_model);
if (ACPI_FAILURE(status) && (status != AE_NOT_FOUND)) {
- ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PIC"));
+ pr_info("_PIC evaluation failed: %s\n", acpi_format_exception(status));
return -ENODEV;
}
@@ -1027,7 +1118,7 @@ void __init acpi_early_init(void)
if (acpi_disabled)
return;
- printk(KERN_INFO PREFIX "Core revision %08x\n", ACPI_CA_VERSION);
+ pr_info("Core revision %08x\n", ACPI_CA_VERSION);
/* enable workarounds, unless strict ACPI spec. compliance */
if (!acpi_strict)
@@ -1048,15 +1139,13 @@ void __init acpi_early_init(void)
status = acpi_reallocate_root_table();
if (ACPI_FAILURE(status)) {
- printk(KERN_ERR PREFIX
- "Unable to reallocate ACPI tables\n");
+ pr_err("Unable to reallocate ACPI tables\n");
goto error0;
}
status = acpi_initialize_subsystem();
if (ACPI_FAILURE(status)) {
- printk(KERN_ERR PREFIX
- "Unable to initialize the ACPI Interpreter\n");
+ pr_err("Unable to initialize the ACPI Interpreter\n");
goto error0;
}
@@ -1102,7 +1191,7 @@ void __init acpi_subsystem_init(void)
status = acpi_enable_subsystem(~ACPI_NO_ACPI_ENABLE);
if (ACPI_FAILURE(status)) {
- printk(KERN_ERR PREFIX "Unable to enable ACPI\n");
+ pr_err("Unable to enable ACPI\n");
disable_acpi();
} else {
/*
@@ -1131,8 +1220,7 @@ static int __init acpi_bus_init(void)
status = acpi_load_tables();
if (ACPI_FAILURE(status)) {
- printk(KERN_ERR PREFIX
- "Unable to load the System Description Tables\n");
+ pr_err("Unable to load the System Description Tables\n");
goto error1;
}
@@ -1150,14 +1238,13 @@ static int __init acpi_bus_init(void)
status = acpi_enable_subsystem(ACPI_NO_ACPI_ENABLE);
if (ACPI_FAILURE(status)) {
- printk(KERN_ERR PREFIX
- "Unable to start the ACPI Interpreter\n");
+ pr_err("Unable to start the ACPI Interpreter\n");
goto error1;
}
status = acpi_initialize_objects(ACPI_FULL_INITIALIZATION);
if (ACPI_FAILURE(status)) {
- printk(KERN_ERR PREFIX "Unable to initialize ACPI objects\n");
+ pr_err("Unable to initialize ACPI objects\n");
goto error1;
}
@@ -1168,7 +1255,8 @@ static int __init acpi_bus_init(void)
* _OSC method may exist in module level code,
* so it must be run after ACPI_FULL_INITIALIZATION
*/
- acpi_bus_osc_support();
+ acpi_bus_osc_negotiate_platform_control();
+ acpi_bus_osc_negotiate_usb_control();
/*
* _PDC control method may load dynamic SSDT tables,
@@ -1186,7 +1274,7 @@ static int __init acpi_bus_init(void)
*/
acpi_ec_dsdt_probe();
- printk(KERN_INFO PREFIX "Interpreter enabled\n");
+ pr_info("Interpreter enabled\n");
/* Initialize sleep structures */
acpi_sleep_init();
@@ -1205,8 +1293,7 @@ static int __init acpi_bus_init(void)
acpi_install_notify_handler(ACPI_ROOT_OBJECT, ACPI_SYSTEM_NOTIFY,
&acpi_bus_notify, NULL);
if (ACPI_FAILURE(status)) {
- printk(KERN_ERR PREFIX
- "Unable to register for device notifications\n");
+ pr_err("Unable to register for system notifications\n");
goto error1;
}
@@ -1233,13 +1320,13 @@ static int __init acpi_init(void)
int result;
if (acpi_disabled) {
- printk(KERN_INFO PREFIX "Interpreter disabled.\n");
+ pr_info("Interpreter disabled.\n");
return -ENODEV;
}
acpi_kobj = kobject_create_and_add("acpi", firmware_kobj);
if (!acpi_kobj) {
- printk(KERN_WARNING "%s: kset create error\n", __func__);
+ pr_debug("%s: kset create error\n", __func__);
acpi_kobj = NULL;
}
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 0d93a5ef4d07..85e5e0328a2e 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -21,8 +21,6 @@
#include <linux/dmi.h>
#include <acpi/button.h>
-#define PREFIX "ACPI: "
-
#define ACPI_BUTTON_CLASS "button"
#define ACPI_BUTTON_FILE_STATE "state"
#define ACPI_BUTTON_TYPE_UNKNOWN 0x00
@@ -54,9 +52,6 @@ static const char * const lid_init_state_str[] = {
[ACPI_BUTTON_LID_INIT_DISABLED] = "disabled",
};
-#define _COMPONENT ACPI_BUTTON_COMPONENT
-ACPI_MODULE_NAME("button");
-
MODULE_AUTHOR("Paul Diefenbaugh");
MODULE_DESCRIPTION("ACPI Button Driver");
MODULE_LICENSE("GPL");
@@ -285,7 +280,7 @@ static int acpi_button_add_fs(struct acpi_device *device)
return 0;
if (acpi_button_dir || acpi_lid_dir) {
- printk(KERN_ERR PREFIX "More than one Lid device found!\n");
+ pr_info("More than one Lid device found!\n");
return -EEXIST;
}
@@ -434,8 +429,8 @@ static void acpi_button_notify(struct acpi_device *device, u32 event)
}
break;
default:
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Unsupported event [0x%x]\n", event));
+ acpi_handle_debug(device->handle, "Unsupported event [0x%x]\n",
+ event);
break;
}
}
@@ -523,7 +518,7 @@ static int acpi_button_add(struct acpi_device *device)
ACPI_BUTTON_CLASS, ACPI_BUTTON_SUBCLASS_LID);
input->open = acpi_lid_input_open;
} else {
- printk(KERN_ERR PREFIX "Unsupported hid [%s]\n", hid);
+ pr_info("Unsupported hid [%s]\n", hid);
error = -ENODEV;
goto err_free_input;
}
@@ -567,7 +562,7 @@ static int acpi_button_add(struct acpi_device *device)
}
device_init_wakeup(&device->dev, true);
- printk(KERN_INFO PREFIX "%s [%s]\n", name, acpi_device_bid(device));
+ pr_info("%s [%s]\n", name, acpi_device_bid(device));
return 0;
err_remove_fs:
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 75aaf94ae0a9..69057fcd2c04 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -233,8 +233,8 @@ static int send_pcc_cmd(int pcc_ss_id, u16 cmd)
{
int ret = -EIO, i;
struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
- struct acpi_pcct_shared_memory *generic_comm_base =
- (struct acpi_pcct_shared_memory *)pcc_ss_data->pcc_comm_addr;
+ struct acpi_pcct_shared_memory __iomem *generic_comm_base =
+ pcc_ss_data->pcc_comm_addr;
unsigned int time_delta;
/*
@@ -934,7 +934,7 @@ int __weak cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
{
int ret_val = 0;
- void __iomem *vaddr = 0;
+ void __iomem *vaddr = NULL;
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
struct cpc_reg *reg = &reg_res->cpc_entry.reg;
@@ -979,7 +979,7 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
{
int ret_val = 0;
- void __iomem *vaddr = 0;
+ void __iomem *vaddr = NULL;
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
struct cpc_reg *reg = &reg_res->cpc_entry.reg;
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 3586434d0ded..096153761ebc 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -10,6 +10,8 @@
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
+#define pr_fmt(fmt) "ACPI: PM: " fmt
+
#include <linux/acpi.h>
#include <linux/export.h>
#include <linux/mutex.h>
@@ -20,9 +22,6 @@
#include "internal.h"
-#define _COMPONENT ACPI_POWER_COMPONENT
-ACPI_MODULE_NAME("device_pm");
-
/**
* acpi_power_state_string - String representation of ACPI device power state.
* @state: ACPI device power state to return the string representation of.
@@ -130,8 +129,8 @@ int acpi_device_get_power(struct acpi_device *device, int *state)
*state = result;
out:
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] power state is %s\n",
- device->pnp.bus_id, acpi_power_state_string(*state)));
+ dev_dbg(&device->dev, "Device power state is %s\n",
+ acpi_power_state_string(*state));
return 0;
}
@@ -174,9 +173,8 @@ int acpi_device_set_power(struct acpi_device *device, int state)
/* There is a special case for D0 addressed below. */
if (state > ACPI_STATE_D0 && state == device->power.state) {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] already in %s\n",
- device->pnp.bus_id,
- acpi_power_state_string(state)));
+ dev_dbg(&device->dev, "Device already in %s\n",
+ acpi_power_state_string(state));
return 0;
}
@@ -276,10 +274,8 @@ int acpi_device_set_power(struct acpi_device *device, int state)
acpi_power_state_string(target_state));
} else {
device->power.state = target_state;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Device [%s] transitioned to %s\n",
- device->pnp.bus_id,
- acpi_power_state_string(target_state)));
+ dev_dbg(&device->dev, "Power state changed to %s\n",
+ acpi_power_state_string(target_state));
}
return result;
diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c
index 96869f1538b9..da4ff2a8b06a 100644
--- a/drivers/acpi/device_sysfs.c
+++ b/drivers/acpi/device_sysfs.c
@@ -251,20 +251,12 @@ int __acpi_device_uevent_modalias(struct acpi_device *adev,
if (add_uevent_var(env, "MODALIAS="))
return -ENOMEM;
- len = create_pnp_modalias(adev, &env->buf[env->buflen - 1],
- sizeof(env->buf) - env->buflen);
- if (len < 0)
- return len;
-
- env->buflen += len;
- if (!adev->data.of_compatible)
- return 0;
-
- if (len > 0 && add_uevent_var(env, "MODALIAS="))
- return -ENOMEM;
-
- len = create_of_modalias(adev, &env->buf[env->buflen - 1],
- sizeof(env->buf) - env->buflen);
+ if (adev->data.of_compatible)
+ len = create_of_modalias(adev, &env->buf[env->buflen - 1],
+ sizeof(env->buf) - env->buflen);
+ else
+ len = create_pnp_modalias(adev, &env->buf[env->buflen - 1],
+ sizeof(env->buf) - env->buflen);
if (len < 0)
return len;
@@ -333,11 +325,11 @@ int acpi_device_modalias(struct device *dev, char *buf, int size)
EXPORT_SYMBOL_GPL(acpi_device_modalias);
static ssize_t
-acpi_device_modalias_show(struct device *dev, struct device_attribute *attr, char *buf)
+modalias_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return __acpi_device_modalias(to_acpi_device(dev), buf, 1024);
}
-static DEVICE_ATTR(modalias, 0444, acpi_device_modalias_show, NULL);
+static DEVICE_ATTR_RO(modalias);
static ssize_t real_power_state_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -366,8 +358,8 @@ static ssize_t power_state_show(struct device *dev,
static DEVICE_ATTR_RO(power_state);
static ssize_t
-acpi_eject_store(struct device *d, struct device_attribute *attr,
- const char *buf, size_t count)
+eject_store(struct device *d, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct acpi_device *acpi_device = to_acpi_device(d);
acpi_object_type not_used;
@@ -395,28 +387,28 @@ acpi_eject_store(struct device *d, struct device_attribute *attr,
return status == AE_NO_MEMORY ? -ENOMEM : -EAGAIN;
}
-static DEVICE_ATTR(eject, 0200, NULL, acpi_eject_store);
+static DEVICE_ATTR_WO(eject);
static ssize_t
-acpi_device_hid_show(struct device *dev, struct device_attribute *attr, char *buf)
+hid_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct acpi_device *acpi_dev = to_acpi_device(dev);
return sprintf(buf, "%s\n", acpi_device_hid(acpi_dev));
}
-static DEVICE_ATTR(hid, 0444, acpi_device_hid_show, NULL);
+static DEVICE_ATTR_RO(hid);
-static ssize_t acpi_device_uid_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t uid_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct acpi_device *acpi_dev = to_acpi_device(dev);
return sprintf(buf, "%s\n", acpi_dev->pnp.unique_id);
}
-static DEVICE_ATTR(uid, 0444, acpi_device_uid_show, NULL);
+static DEVICE_ATTR_RO(uid);
-static ssize_t acpi_device_adr_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t adr_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct acpi_device *acpi_dev = to_acpi_device(dev);
@@ -425,16 +417,16 @@ static ssize_t acpi_device_adr_show(struct device *dev,
else
return sprintf(buf, "0x%08llx\n", acpi_dev->pnp.bus_address);
}
-static DEVICE_ATTR(adr, 0444, acpi_device_adr_show, NULL);
+static DEVICE_ATTR_RO(adr);
-static ssize_t acpi_device_path_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t path_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct acpi_device *acpi_dev = to_acpi_device(dev);
return acpi_object_path(acpi_dev->handle, buf);
}
-static DEVICE_ATTR(path, 0444, acpi_device_path_show, NULL);
+static DEVICE_ATTR_RO(path);
/* sysfs file that shows description text from the ACPI _STR method */
static ssize_t description_show(struct device *dev,
@@ -463,8 +455,8 @@ static ssize_t description_show(struct device *dev,
static DEVICE_ATTR_RO(description);
static ssize_t
-acpi_device_sun_show(struct device *dev, struct device_attribute *attr,
- char *buf) {
+sun_show(struct device *dev, struct device_attribute *attr,
+ char *buf) {
struct acpi_device *acpi_dev = to_acpi_device(dev);
acpi_status status;
unsigned long long sun;
@@ -475,11 +467,11 @@ acpi_device_sun_show(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%llu\n", sun);
}
-static DEVICE_ATTR(sun, 0444, acpi_device_sun_show, NULL);
+static DEVICE_ATTR_RO(sun);
static ssize_t
-acpi_device_hrv_show(struct device *dev, struct device_attribute *attr,
- char *buf) {
+hrv_show(struct device *dev, struct device_attribute *attr,
+ char *buf) {
struct acpi_device *acpi_dev = to_acpi_device(dev);
acpi_status status;
unsigned long long hrv;
@@ -490,7 +482,7 @@ acpi_device_hrv_show(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%llu\n", hrv);
}
-static DEVICE_ATTR(hrv, 0444, acpi_device_hrv_show, NULL);
+static DEVICE_ATTR_RO(hrv);
static ssize_t status_show(struct device *dev, struct device_attribute *attr,
char *buf) {
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index 24e076f44d23..0937ceab052e 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -484,7 +484,7 @@ int dock_notify(struct acpi_device *adev, u32 event)
/*
* show_docked - read method for "docked" file in sysfs
*/
-static ssize_t show_docked(struct device *dev,
+static ssize_t docked_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dock_station *dock_station = dev->platform_data;
@@ -493,25 +493,25 @@ static ssize_t show_docked(struct device *dev,
acpi_bus_get_device(dock_station->handle, &adev);
return snprintf(buf, PAGE_SIZE, "%u\n", acpi_device_enumerated(adev));
}
-static DEVICE_ATTR(docked, S_IRUGO, show_docked, NULL);
+static DEVICE_ATTR_RO(docked);
/*
* show_flags - read method for flags file in sysfs
*/
-static ssize_t show_flags(struct device *dev,
+static ssize_t flags_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dock_station *dock_station = dev->platform_data;
return snprintf(buf, PAGE_SIZE, "%d\n", dock_station->flags);
}
-static DEVICE_ATTR(flags, S_IRUGO, show_flags, NULL);
+static DEVICE_ATTR_RO(flags);
/*
* write_undock - write method for "undock" file in sysfs
*/
-static ssize_t write_undock(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t undock_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
int ret;
struct dock_station *dock_station = dev->platform_data;
@@ -525,13 +525,13 @@ static ssize_t write_undock(struct device *dev, struct device_attribute *attr,
acpi_scan_lock_release();
return ret ? ret: count;
}
-static DEVICE_ATTR(undock, S_IWUSR, NULL, write_undock);
+static DEVICE_ATTR_WO(undock);
/*
* show_dock_uid - read method for "uid" file in sysfs
*/
-static ssize_t show_dock_uid(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t uid_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
unsigned long long lbuf;
struct dock_station *dock_station = dev->platform_data;
@@ -542,10 +542,10 @@ static ssize_t show_dock_uid(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%llx\n", lbuf);
}
-static DEVICE_ATTR(uid, S_IRUGO, show_dock_uid, NULL);
+static DEVICE_ATTR_RO(uid);
-static ssize_t show_dock_type(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct dock_station *dock_station = dev->platform_data;
char *type;
@@ -561,7 +561,7 @@ static ssize_t show_dock_type(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%s\n", type);
}
-static DEVICE_ATTR(type, S_IRUGO, show_dock_type, NULL);
+static DEVICE_ATTR_RO(type);
static struct attribute *dock_attributes[] = {
&dev_attr_docked.attr,
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index b11b08a60684..8c5dde628405 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -2269,40 +2269,24 @@ static const struct attribute_group *acpi_nfit_region_attribute_groups[] = {
/* enough info to uniquely specify an interleave set */
struct nfit_set_info {
- struct nfit_set_info_map {
- u64 region_offset;
- u32 serial_number;
- u32 pad;
- } mapping[0];
+ u64 region_offset;
+ u32 serial_number;
+ u32 pad;
};
struct nfit_set_info2 {
- struct nfit_set_info_map2 {
- u64 region_offset;
- u32 serial_number;
- u16 vendor_id;
- u16 manufacturing_date;
- u8 manufacturing_location;
- u8 reserved[31];
- } mapping[0];
+ u64 region_offset;
+ u32 serial_number;
+ u16 vendor_id;
+ u16 manufacturing_date;
+ u8 manufacturing_location;
+ u8 reserved[31];
};
-static size_t sizeof_nfit_set_info(int num_mappings)
-{
- return sizeof(struct nfit_set_info)
- + num_mappings * sizeof(struct nfit_set_info_map);
-}
-
-static size_t sizeof_nfit_set_info2(int num_mappings)
-{
- return sizeof(struct nfit_set_info2)
- + num_mappings * sizeof(struct nfit_set_info_map2);
-}
-
static int cmp_map_compat(const void *m0, const void *m1)
{
- const struct nfit_set_info_map *map0 = m0;
- const struct nfit_set_info_map *map1 = m1;
+ const struct nfit_set_info *map0 = m0;
+ const struct nfit_set_info *map1 = m1;
return memcmp(&map0->region_offset, &map1->region_offset,
sizeof(u64));
@@ -2310,8 +2294,8 @@ static int cmp_map_compat(const void *m0, const void *m1)
static int cmp_map(const void *m0, const void *m1)
{
- const struct nfit_set_info_map *map0 = m0;
- const struct nfit_set_info_map *map1 = m1;
+ const struct nfit_set_info *map0 = m0;
+ const struct nfit_set_info *map1 = m1;
if (map0->region_offset < map1->region_offset)
return -1;
@@ -2322,8 +2306,8 @@ static int cmp_map(const void *m0, const void *m1)
static int cmp_map2(const void *m0, const void *m1)
{
- const struct nfit_set_info_map2 *map0 = m0;
- const struct nfit_set_info_map2 *map1 = m1;
+ const struct nfit_set_info2 *map0 = m0;
+ const struct nfit_set_info2 *map1 = m1;
if (map0->region_offset < map1->region_offset)
return -1;
@@ -2361,22 +2345,22 @@ static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc,
return -ENOMEM;
import_guid(&nd_set->type_guid, spa->range_guid);
- info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL);
+ info = devm_kcalloc(dev, nr, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
- info2 = devm_kzalloc(dev, sizeof_nfit_set_info2(nr), GFP_KERNEL);
+ info2 = devm_kcalloc(dev, nr, sizeof(*info2), GFP_KERNEL);
if (!info2)
return -ENOMEM;
for (i = 0; i < nr; i++) {
struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
- struct nfit_set_info_map *map = &info->mapping[i];
- struct nfit_set_info_map2 *map2 = &info2->mapping[i];
struct nvdimm *nvdimm = mapping->nvdimm;
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
- struct acpi_nfit_memory_map *memdev = memdev_from_spa(acpi_desc,
- spa->range_index, i);
+ struct nfit_set_info *map = &info[i];
+ struct nfit_set_info2 *map2 = &info2[i];
+ struct acpi_nfit_memory_map *memdev =
+ memdev_from_spa(acpi_desc, spa->range_index, i);
struct acpi_nfit_control_region *dcr = nfit_mem->dcr;
if (!memdev || !nfit_mem->dcr) {
@@ -2395,23 +2379,20 @@ static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc,
}
/* v1.1 namespaces */
- sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map),
- cmp_map, NULL);
- nd_set->cookie1 = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0);
+ sort(info, nr, sizeof(*info), cmp_map, NULL);
+ nd_set->cookie1 = nd_fletcher64(info, sizeof(*info) * nr, 0);
/* v1.2 namespaces */
- sort(&info2->mapping[0], nr, sizeof(struct nfit_set_info_map2),
- cmp_map2, NULL);
- nd_set->cookie2 = nd_fletcher64(info2, sizeof_nfit_set_info2(nr), 0);
+ sort(info2, nr, sizeof(*info2), cmp_map2, NULL);
+ nd_set->cookie2 = nd_fletcher64(info2, sizeof(*info2) * nr, 0);
/* support v1.1 namespaces created with the wrong sort order */
- sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map),
- cmp_map_compat, NULL);
- nd_set->altcookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0);
+ sort(info, nr, sizeof(*info), cmp_map_compat, NULL);
+ nd_set->altcookie = nd_fletcher64(info, sizeof(*info) * nr, 0);
/* record the result of the sort for the mapping position */
for (i = 0; i < nr; i++) {
- struct nfit_set_info_map2 *map2 = &info2->mapping[i];
+ struct nfit_set_info2 *map2 = &info2[i];
int j;
for (j = 0; j < nr; j++) {
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 0418febc5cf2..327e1b4eb6b0 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -9,6 +9,8 @@
* Author: Matthew Wilcox <willy@linux.intel.com>
*/
+#define pr_fmt(fmt) "ACPI: OSL: " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -37,6 +39,7 @@
#include "acpica/acnamesp.h"
#include "internal.h"
+/* Definitions for ACPI_DEBUG_PRINT() */
#define _COMPONENT ACPI_OS_SERVICES
ACPI_MODULE_NAME("osl");
@@ -327,7 +330,7 @@ void __iomem __ref
acpi_size pg_sz;
if (phys > ULONG_MAX) {
- printk(KERN_ERR PREFIX "Cannot map memory that high\n");
+ pr_err("Cannot map memory that high: 0x%llx\n", phys);
return NULL;
}
@@ -528,13 +531,12 @@ acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
*new_val = NULL;
if (!memcmp(init_val->name, "_OS_", 4) && strlen(acpi_os_name)) {
- printk(KERN_INFO PREFIX "Overriding _OS definition to '%s'\n",
- acpi_os_name);
+ pr_info("Overriding _OS definition to '%s'\n", acpi_os_name);
*new_val = acpi_os_name;
}
if (!memcmp(init_val->name, "_REV", 4) && acpi_rev_override) {
- printk(KERN_INFO PREFIX "Overriding _REV return value to 5\n");
+ pr_info("Overriding _REV return value to 5\n");
*new_val = (char *)5;
}
@@ -575,15 +577,14 @@ acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
return AE_ALREADY_ACQUIRED;
if (acpi_gsi_to_irq(gsi, &irq) < 0) {
- printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n",
- gsi);
+ pr_err("SCI (ACPI GSI %d) not registered\n", gsi);
return AE_OK;
}
acpi_irq_handler = handler;
acpi_irq_context = context;
if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) {
- printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq);
+ pr_err("SCI (IRQ%d) allocation failed\n", irq);
acpi_irq_handler = NULL;
return AE_NOT_ACQUIRED;
}
@@ -1071,7 +1072,7 @@ acpi_status acpi_os_execute(acpi_execute_type type,
if (type == OSL_DEBUGGER_MAIN_THREAD) {
ret = acpi_debugger_create_thread(function, context);
if (ret) {
- pr_err("Call to kthread_create() failed.\n");
+ pr_err("Kernel thread creation failed\n");
status = AE_ERROR;
}
goto out_thread;
@@ -1121,8 +1122,7 @@ acpi_status acpi_os_execute(acpi_execute_type type,
*/
ret = queue_work_on(0, queue, &dpc->work);
if (!ret) {
- printk(KERN_ERR PREFIX
- "Call to queue_work() failed.\n");
+ pr_err("Unable to queue work\n");
status = AE_ERROR;
}
err_workqueue:
@@ -1165,9 +1165,9 @@ acpi_status acpi_hotplug_schedule(struct acpi_device *adev, u32 src)
{
struct acpi_hp_work *hpw;
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "Scheduling hotplug event (%p, %u) for deferred execution.\n",
- adev, src));
+ acpi_handle_debug(adev->handle,
+ "Scheduling hotplug event %u for deferred handling\n",
+ src);
hpw = kmalloc(sizeof(*hpw), GFP_KERNEL);
if (!hpw)
@@ -1355,7 +1355,7 @@ acpi_status acpi_os_signal(u32 function, void *info)
{
switch (function) {
case ACPI_SIGNAL_FATAL:
- printk(KERN_ERR PREFIX "Fatal opcode executed\n");
+ pr_err("Fatal opcode executed\n");
break;
case ACPI_SIGNAL_BREAKPOINT:
/*
@@ -1407,7 +1407,7 @@ __setup("acpi_os_name=", acpi_os_name_setup);
static int __init acpi_no_auto_serialize_setup(char *str)
{
acpi_gbl_auto_serialize_methods = FALSE;
- pr_info("ACPI: auto-serialization disabled\n");
+ pr_info("Auto-serialization disabled\n");
return 1;
}
@@ -1458,38 +1458,28 @@ __setup("acpi_enforce_resources=", acpi_enforce_resources_setup);
int acpi_check_resource_conflict(const struct resource *res)
{
acpi_adr_space_type space_id;
- acpi_size length;
- u8 warn = 0;
- int clash = 0;
if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
return 0;
- if (!(res->flags & IORESOURCE_IO) && !(res->flags & IORESOURCE_MEM))
- return 0;
if (res->flags & IORESOURCE_IO)
space_id = ACPI_ADR_SPACE_SYSTEM_IO;
- else
+ else if (res->flags & IORESOURCE_MEM)
space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY;
+ else
+ return 0;
+
+ if (!acpi_check_address_range(space_id, res->start, resource_size(res), 1))
+ return 0;
+
+ pr_info("Resource conflict; ACPI support missing from driver?\n");
+
+ if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT)
+ return -EBUSY;
+
+ if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX)
+ pr_notice("Resource conflict: System may be unstable or behave erratically\n");
- length = resource_size(res);
- if (acpi_enforce_resources != ENFORCE_RESOURCES_NO)
- warn = 1;
- clash = acpi_check_address_range(space_id, res->start, length, warn);
-
- if (clash) {
- if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) {
- if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX)
- printk(KERN_NOTICE "ACPI: This conflict may"
- " cause random problems and system"
- " instability\n");
- printk(KERN_INFO "ACPI: If an ACPI driver is available"
- " for this device, you should use it instead of"
- " the native driver\n");
- }
- if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT)
- return -EBUSY;
- }
return 0;
}
EXPORT_SYMBOL(acpi_check_resource_conflict);
@@ -1722,7 +1712,7 @@ acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object)
static int __init acpi_no_static_ssdt_setup(char *s)
{
acpi_gbl_disable_ssdt_table_install = TRUE;
- pr_info("ACPI: static SSDT installation disabled\n");
+ pr_info("Static SSDT installation disabled\n");
return 0;
}
@@ -1731,8 +1721,7 @@ early_param("acpi_no_static_ssdt", acpi_no_static_ssdt_setup);
static int __init acpi_disable_return_repair(char *s)
{
- printk(KERN_NOTICE PREFIX
- "ACPI: Predefined validation mechanism disabled\n");
+ pr_notice("Predefined validation mechanism disabled\n");
acpi_gbl_disable_auto_repair = TRUE;
return 1;
@@ -1758,7 +1747,7 @@ acpi_status __init acpi_os_initialize(void)
void *rv;
rv = acpi_os_map_generic_address(&acpi_gbl_FADT.reset_register);
- pr_debug(PREFIX "%s: map reset_reg %s\n", __func__,
+ pr_debug("%s: Reset register mapping %s\n", __func__,
rv ? "successful" : "failed");
}
acpi_os_initialized = true;
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 0bf072cef6cf..dcd593766a64 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -56,8 +56,6 @@ static struct acpi_scan_handler pci_root_handler = {
},
};
-static DEFINE_MUTEX(osc_lock);
-
/**
* acpi_is_root_bridge - determine whether an ACPI CA node is a PCI root bridge
* @handle: the ACPI CA node in question.
@@ -223,12 +221,7 @@ static acpi_status acpi_pci_query_osc(struct acpi_pci_root *root,
static acpi_status acpi_pci_osc_support(struct acpi_pci_root *root, u32 flags)
{
- acpi_status status;
-
- mutex_lock(&osc_lock);
- status = acpi_pci_query_osc(root, flags, NULL);
- mutex_unlock(&osc_lock);
- return status;
+ return acpi_pci_query_osc(root, flags, NULL);
}
struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle)
@@ -353,10 +346,10 @@ EXPORT_SYMBOL_GPL(acpi_get_pci_dev);
* _OSC bits the BIOS has granted control of, but its contents are meaningless
* on failure.
**/
-acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 *mask, u32 req)
+static acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 *mask, u32 req)
{
struct acpi_pci_root *root;
- acpi_status status = AE_OK;
+ acpi_status status;
u32 ctrl, capbuf[3];
if (!mask)
@@ -370,18 +363,16 @@ acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 *mask, u32 req)
if (!root)
return AE_NOT_EXIST;
- mutex_lock(&osc_lock);
-
*mask = ctrl | root->osc_control_set;
/* No need to evaluate _OSC if the control was already granted. */
if ((root->osc_control_set & ctrl) == ctrl)
- goto out;
+ return AE_OK;
/* Need to check the available controls bits before requesting them. */
while (*mask) {
status = acpi_pci_query_osc(root, root->osc_support_set, mask);
if (ACPI_FAILURE(status))
- goto out;
+ return status;
if (ctrl == *mask)
break;
decode_osc_control(root, "platform does not support",
@@ -392,21 +383,19 @@ acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 *mask, u32 req)
if ((ctrl & req) != req) {
decode_osc_control(root, "not requesting control; platform does not support",
req & ~(ctrl));
- status = AE_SUPPORT;
- goto out;
+ return AE_SUPPORT;
}
capbuf[OSC_QUERY_DWORD] = 0;
capbuf[OSC_SUPPORT_DWORD] = root->osc_support_set;
capbuf[OSC_CONTROL_DWORD] = ctrl;
status = acpi_pci_run_osc(handle, capbuf, mask);
- if (ACPI_SUCCESS(status))
- root->osc_control_set = *mask;
-out:
- mutex_unlock(&osc_lock);
- return status;
+ if (ACPI_FAILURE(status))
+ return status;
+
+ root->osc_control_set = *mask;
+ return AE_OK;
}
-EXPORT_SYMBOL(acpi_pci_osc_control_set);
static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm,
bool is_pcie)
@@ -452,9 +441,8 @@ static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm,
if ((status == AE_NOT_FOUND) && !is_pcie)
return;
- dev_info(&device->dev, "_OSC failed (%s)%s\n",
- acpi_format_exception(status),
- pcie_aspm_support_enabled() ? "; disabling ASPM" : "");
+ dev_info(&device->dev, "_OSC: platform retains control of PCIe features (%s)\n",
+ acpi_format_exception(status));
return;
}
@@ -510,7 +498,7 @@ static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm,
} else {
decode_osc_control(root, "OS requested", requested);
decode_osc_control(root, "platform willing to grant", control);
- dev_info(&device->dev, "_OSC failed (%s); disabling ASPM\n",
+ dev_info(&device->dev, "_OSC: platform retains control of PCIe features (%s)\n",
acpi_format_exception(status));
/*
diff --git a/drivers/acpi/platform_profile.c b/drivers/acpi/platform_profile.c
new file mode 100644
index 000000000000..dd2fbf38e414
--- /dev/null
+++ b/drivers/acpi/platform_profile.c
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+/* Platform profile sysfs interface */
+
+#include <linux/acpi.h>
+#include <linux/bits.h>
+#include <linux/init.h>
+#include <linux/mutex.h>
+#include <linux/platform_profile.h>
+#include <linux/sysfs.h>
+
+static struct platform_profile_handler *cur_profile;
+static DEFINE_MUTEX(profile_lock);
+
+static const char * const profile_names[] = {
+ [PLATFORM_PROFILE_LOW_POWER] = "low-power",
+ [PLATFORM_PROFILE_COOL] = "cool",
+ [PLATFORM_PROFILE_QUIET] = "quiet",
+ [PLATFORM_PROFILE_BALANCED] = "balanced",
+ [PLATFORM_PROFILE_BALANCED_PERFORMANCE] = "balanced-performance",
+ [PLATFORM_PROFILE_PERFORMANCE] = "performance",
+};
+static_assert(ARRAY_SIZE(profile_names) == PLATFORM_PROFILE_LAST);
+
+static ssize_t platform_profile_choices_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int len = 0;
+ int err, i;
+
+ err = mutex_lock_interruptible(&profile_lock);
+ if (err)
+ return err;
+
+ if (!cur_profile) {
+ mutex_unlock(&profile_lock);
+ return -ENODEV;
+ }
+
+ for_each_set_bit(i, cur_profile->choices, PLATFORM_PROFILE_LAST) {
+ if (len == 0)
+ len += sysfs_emit_at(buf, len, "%s", profile_names[i]);
+ else
+ len += sysfs_emit_at(buf, len, " %s", profile_names[i]);
+ }
+ len += sysfs_emit_at(buf, len, "\n");
+ mutex_unlock(&profile_lock);
+ return len;
+}
+
+static ssize_t platform_profile_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ enum platform_profile_option profile = PLATFORM_PROFILE_BALANCED;
+ int err;
+
+ err = mutex_lock_interruptible(&profile_lock);
+ if (err)
+ return err;
+
+ if (!cur_profile) {
+ mutex_unlock(&profile_lock);
+ return -ENODEV;
+ }
+
+ err = cur_profile->profile_get(cur_profile, &profile);
+ mutex_unlock(&profile_lock);
+ if (err)
+ return err;
+
+ /* Check that profile is valid index */
+ if (WARN_ON((profile < 0) || (profile >= ARRAY_SIZE(profile_names))))
+ return -EIO;
+
+ return sysfs_emit(buf, "%s\n", profile_names[profile]);
+}
+
+static ssize_t platform_profile_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int err, i;
+
+ err = mutex_lock_interruptible(&profile_lock);
+ if (err)
+ return err;
+
+ if (!cur_profile) {
+ mutex_unlock(&profile_lock);
+ return -ENODEV;
+ }
+
+ /* Scan for a matching profile */
+ i = sysfs_match_string(profile_names, buf);
+ if (i < 0) {
+ mutex_unlock(&profile_lock);
+ return -EINVAL;
+ }
+
+ /* Check that platform supports this profile choice */
+ if (!test_bit(i, cur_profile->choices)) {
+ mutex_unlock(&profile_lock);
+ return -EOPNOTSUPP;
+ }
+
+ err = cur_profile->profile_set(cur_profile, i);
+ mutex_unlock(&profile_lock);
+ if (err)
+ return err;
+ return count;
+}
+
+static DEVICE_ATTR_RO(platform_profile_choices);
+static DEVICE_ATTR_RW(platform_profile);
+
+static struct attribute *platform_profile_attrs[] = {
+ &dev_attr_platform_profile_choices.attr,
+ &dev_attr_platform_profile.attr,
+ NULL
+};
+
+static const struct attribute_group platform_profile_group = {
+ .attrs = platform_profile_attrs
+};
+
+void platform_profile_notify(void)
+{
+ if (!cur_profile)
+ return;
+ sysfs_notify(acpi_kobj, NULL, "platform_profile");
+}
+EXPORT_SYMBOL_GPL(platform_profile_notify);
+
+int platform_profile_register(struct platform_profile_handler *pprof)
+{
+ int err;
+
+ mutex_lock(&profile_lock);
+ /* We can only have one active profile */
+ if (cur_profile) {
+ mutex_unlock(&profile_lock);
+ return -EEXIST;
+ }
+
+ /* Sanity check the profile handler field are set */
+ if (!pprof || bitmap_empty(pprof->choices, PLATFORM_PROFILE_LAST) ||
+ !pprof->profile_set || !pprof->profile_get) {
+ mutex_unlock(&profile_lock);
+ return -EINVAL;
+ }
+
+ err = sysfs_create_group(acpi_kobj, &platform_profile_group);
+ if (err) {
+ mutex_unlock(&profile_lock);
+ return err;
+ }
+
+ cur_profile = pprof;
+ mutex_unlock(&profile_lock);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(platform_profile_register);
+
+int platform_profile_remove(void)
+{
+ sysfs_remove_group(acpi_kobj, &platform_profile_group);
+
+ mutex_lock(&profile_lock);
+ cur_profile = NULL;
+ mutex_unlock(&profile_lock);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(platform_profile_remove);
+
+MODULE_AUTHOR("Mark Pearson <markpearson@lenovo.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 189a0d4c6d06..9b608b55d2b2 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -21,6 +21,8 @@
* may be shared by multiple devices.
*/
+#define pr_fmt(fmt) "ACPI: PM: " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -32,8 +34,6 @@
#include "sleep.h"
#include "internal.h"
-#define _COMPONENT ACPI_POWER_COMPONENT
-ACPI_MODULE_NAME("power");
#define ACPI_POWER_CLASS "power_resource"
#define ACPI_POWER_DEVICE_NAME "Power Resource"
#define ACPI_POWER_RESOURCE_STATE_OFF 0x00
@@ -181,9 +181,6 @@ static int acpi_power_get_state(acpi_handle handle, int *state)
{
acpi_status status = AE_OK;
unsigned long long sta = 0;
- char node_name[5];
- struct acpi_buffer buffer = { sizeof(node_name), node_name };
-
if (!handle || !state)
return -EINVAL;
@@ -195,11 +192,8 @@ static int acpi_power_get_state(acpi_handle handle, int *state)
*state = (sta & 0x01)?ACPI_POWER_RESOURCE_STATE_ON:
ACPI_POWER_RESOURCE_STATE_OFF;
- acpi_get_name(handle, ACPI_SINGLE_NAME, &buffer);
-
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Resource [%s] is %s\n",
- node_name,
- *state ? "on" : "off"));
+ acpi_handle_debug(handle, "Power resource is %s\n",
+ *state ? "on" : "off");
return 0;
}
@@ -229,8 +223,7 @@ static int acpi_power_get_list_state(struct list_head *list, int *state)
break;
}
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Resource list is %s\n",
- cur_state ? "on" : "off"));
+ pr_debug("Power resource list is %s\n", cur_state ? "on" : "off");
*state = cur_state;
return 0;
@@ -357,8 +350,7 @@ static int __acpi_power_on(struct acpi_power_resource *resource)
if (ACPI_FAILURE(status))
return -ENODEV;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Power resource [%s] turned on\n",
- resource->name));
+ pr_debug("Power resource [%s] turned on\n", resource->name);
/*
* If there are other dependents on this power resource we need to
@@ -383,9 +375,7 @@ static int acpi_power_on_unlocked(struct acpi_power_resource *resource)
int result = 0;
if (resource->ref_count++) {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Power resource [%s] already on\n",
- resource->name));
+ pr_debug("Power resource [%s] already on\n", resource->name);
} else {
result = __acpi_power_on(resource);
if (result)
@@ -413,8 +403,8 @@ static int __acpi_power_off(struct acpi_power_resource *resource)
if (ACPI_FAILURE(status))
return -ENODEV;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Power resource [%s] turned off\n",
- resource->name));
+ pr_debug("Power resource [%s] turned off\n", resource->name);
+
return 0;
}
@@ -423,16 +413,12 @@ static int acpi_power_off_unlocked(struct acpi_power_resource *resource)
int result = 0;
if (!resource->ref_count) {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Power resource [%s] already off\n",
- resource->name));
+ pr_debug("Power resource [%s] already off\n", resource->name);
return 0;
}
if (--resource->ref_count) {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Power resource [%s] still in use\n",
- resource->name));
+ pr_debug("Power resource [%s] still in use\n", resource->name);
} else {
result = __acpi_power_off(resource);
if (result)
@@ -672,7 +658,7 @@ int acpi_device_sleep_wake(struct acpi_device *dev,
if (ACPI_SUCCESS(status)) {
return 0;
} else if (status != AE_NOT_FOUND) {
- printk(KERN_ERR PREFIX "_DSW execution failed\n");
+ acpi_handle_info(dev->handle, "_DSW execution failed\n");
dev->wakeup.flags.valid = 0;
return -ENODEV;
}
@@ -680,7 +666,7 @@ int acpi_device_sleep_wake(struct acpi_device *dev,
/* Execute _PSW */
status = acpi_execute_simple_method(dev->handle, "_PSW", enable);
if (ACPI_FAILURE(status) && (status != AE_NOT_FOUND)) {
- printk(KERN_ERR PREFIX "_PSW execution failed\n");
+ acpi_handle_info(dev->handle, "_PSW execution failed\n");
dev->wakeup.flags.valid = 0;
return -ENODEV;
}
@@ -886,15 +872,16 @@ static void acpi_release_power_resource(struct device *dev)
kfree(resource);
}
-static ssize_t acpi_power_in_use_show(struct device *dev,
- struct device_attribute *attr,
- char *buf) {
+static ssize_t resource_in_use_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
struct acpi_power_resource *resource;
resource = to_power_resource(to_acpi_device(dev));
return sprintf(buf, "%u\n", !!resource->ref_count);
}
-static DEVICE_ATTR(resource_in_use, 0444, acpi_power_in_use_show, NULL);
+static DEVICE_ATTR_RO(resource_in_use);
static void acpi_power_sysfs_remove(struct acpi_device *device)
{
@@ -960,8 +947,8 @@ int acpi_add_power_resource(acpi_handle handle)
if (result)
goto err;
- printk(KERN_INFO PREFIX "%s [%s] (%s)\n", acpi_device_name(device),
- acpi_device_bid(device), state ? "on" : "off");
+ pr_info("%s [%s] (%s)\n", acpi_device_name(device),
+ acpi_device_bid(device), state ? "on" : "off");
device->flags.match_driver = true;
result = acpi_device_add(device, acpi_release_power_resource);
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 24e87b630573..e312ebaed8db 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -564,7 +564,7 @@ int acpi_node_prop_get(const struct fwnode_handle *fwnode,
/**
* acpi_data_get_property_array - return an ACPI array property with given name
- * @adev: ACPI data object to get the property from
+ * @data: ACPI data object to get the property from
* @name: Name of the property
* @type: Expected type of array elements
* @obj: Location to store a pointer to the property value (if not NULL)
@@ -787,9 +787,6 @@ static int acpi_data_prop_read_single(const struct acpi_device_data *data,
const union acpi_object *obj;
int ret;
- if (!val)
- return -EINVAL;
-
if (proptype >= DEV_PROP_U8 && proptype <= DEV_PROP_U64) {
ret = acpi_data_get_property(data, propname, ACPI_TYPE_INTEGER, &obj);
if (ret)
@@ -799,28 +796,43 @@ static int acpi_data_prop_read_single(const struct acpi_device_data *data,
case DEV_PROP_U8:
if (obj->integer.value > U8_MAX)
return -EOVERFLOW;
- *(u8 *)val = obj->integer.value;
+
+ if (val)
+ *(u8 *)val = obj->integer.value;
+
break;
case DEV_PROP_U16:
if (obj->integer.value > U16_MAX)
return -EOVERFLOW;
- *(u16 *)val = obj->integer.value;
+
+ if (val)
+ *(u16 *)val = obj->integer.value;
+
break;
case DEV_PROP_U32:
if (obj->integer.value > U32_MAX)
return -EOVERFLOW;
- *(u32 *)val = obj->integer.value;
+
+ if (val)
+ *(u32 *)val = obj->integer.value;
+
break;
default:
- *(u64 *)val = obj->integer.value;
+ if (val)
+ *(u64 *)val = obj->integer.value;
+
break;
}
+
+ if (!val)
+ return 1;
} else if (proptype == DEV_PROP_STRING) {
ret = acpi_data_get_property(data, propname, ACPI_TYPE_STRING, &obj);
if (ret)
return ret;
- *(char **)val = obj->string.pointer;
+ if (val)
+ *(char **)val = obj->string.pointer;
return 1;
} else {
@@ -829,20 +841,6 @@ static int acpi_data_prop_read_single(const struct acpi_device_data *data,
return ret;
}
-int acpi_dev_prop_read_single(struct acpi_device *adev, const char *propname,
- enum dev_prop_type proptype, void *val)
-{
- int ret;
-
- if (!adev)
- return -EINVAL;
-
- ret = acpi_data_prop_read_single(&adev->data, propname, proptype, val);
- if (ret < 0 || proptype != ACPI_TYPE_STRING)
- return ret;
- return 0;
-}
-
static int acpi_copy_property_array_u8(const union acpi_object *items, u8 *val,
size_t nval)
{
@@ -928,10 +926,20 @@ static int acpi_data_prop_read(const struct acpi_device_data *data,
const union acpi_object *items;
int ret;
- if (val && nval == 1) {
+ if (nval == 1 || !val) {
ret = acpi_data_prop_read_single(data, propname, proptype, val);
- if (ret >= 0)
+ /*
+ * The overflow error means that the property is there and it is
+ * single-value, but its type does not match, so return.
+ */
+ if (ret >= 0 || ret == -EOVERFLOW)
return ret;
+
+ /*
+ * Reading this property as a single-value one failed, but its
+ * value may still be represented as one-element array, so
+ * continue.
+ */
}
ret = acpi_data_get_property_array(data, propname, ACPI_TYPE_ANY, &obj);
@@ -973,12 +981,6 @@ static int acpi_data_prop_read(const struct acpi_device_data *data,
return ret;
}
-int acpi_dev_prop_read(const struct acpi_device *adev, const char *propname,
- enum dev_prop_type proptype, void *val, size_t nval)
-{
- return adev ? acpi_data_prop_read(&adev->data, propname, proptype, val, nval) : -EINVAL;
-}
-
/**
* acpi_node_prop_read - retrieve the value of an ACPI property with given name.
* @fwnode: Firmware node to get the property from.
@@ -991,9 +993,9 @@ int acpi_dev_prop_read(const struct acpi_device *adev, const char *propname,
* of the property. Otherwise, read at most @nval values to the array at the
* location pointed to by @val.
*/
-int acpi_node_prop_read(const struct fwnode_handle *fwnode,
- const char *propname, enum dev_prop_type proptype,
- void *val, size_t nval)
+static int acpi_node_prop_read(const struct fwnode_handle *fwnode,
+ const char *propname, enum dev_prop_type proptype,
+ void *val, size_t nval)
{
return acpi_data_prop_read(acpi_device_data_of_node(fwnode),
propname, proptype, val, nval);
@@ -1210,8 +1212,7 @@ static struct fwnode_handle *acpi_graph_get_child_prop_value(
/**
* acpi_graph_get_remote_endpoint - Parses and returns remote end of an endpoint
- * @fwnode: Endpoint firmware node pointing to a remote device
- * @endpoint: Firmware node of remote endpoint is filled here if not %NULL
+ * @__fwnode: Endpoint firmware node pointing to a remote device
*
* Returns the remote endpoint corresponding to @__fwnode. NULL on error.
*/
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 1db063b02f63..a184529d8fa4 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -19,8 +19,6 @@
#include "internal.h"
-#define _COMPONENT ACPI_BUS_COMPONENT
-ACPI_MODULE_NAME("scan");
extern struct acpi_device *acpi_root;
#define ACPI_BUS_CLASS "system_bus"
@@ -265,8 +263,7 @@ static int acpi_scan_hot_remove(struct acpi_device *device)
return error;
}
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Hot-removing device %s...\n", dev_name(&device->dev)));
+ acpi_handle_debug(handle, "Ejecting\n");
acpi_bus_trim(device);
@@ -578,29 +575,31 @@ static void acpi_scan_drop_device(acpi_handle handle, void *context)
mutex_unlock(&acpi_device_del_lock);
}
-static int acpi_get_device_data(acpi_handle handle, struct acpi_device **device,
- void (*callback)(void *))
+static struct acpi_device *handle_to_device(acpi_handle handle,
+ void (*callback)(void *))
{
+ struct acpi_device *adev = NULL;
acpi_status status;
- if (!device)
- return -EINVAL;
-
- *device = NULL;
-
status = acpi_get_data_full(handle, acpi_scan_drop_device,
- (void **)device, callback);
- if (ACPI_FAILURE(status) || !*device) {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No context for object [%p]\n",
- handle));
- return -ENODEV;
+ (void **)&adev, callback);
+ if (ACPI_FAILURE(status) || !adev) {
+ acpi_handle_debug(handle, "No context!\n");
+ return NULL;
}
- return 0;
+ return adev;
}
int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device)
{
- return acpi_get_device_data(handle, device, NULL);
+ if (!device)
+ return -EINVAL;
+
+ *device = handle_to_device(handle, NULL);
+ if (!*device)
+ return -ENODEV;
+
+ return 0;
}
EXPORT_SYMBOL(acpi_bus_get_device);
@@ -612,10 +611,7 @@ static void get_acpi_device(void *dev)
struct acpi_device *acpi_bus_get_acpi_device(acpi_handle handle)
{
- struct acpi_device *adev = NULL;
-
- acpi_get_device_data(handle, &adev, get_acpi_device);
- return adev;
+ return handle_to_device(handle, get_acpi_device);
}
void acpi_bus_put_acpi_device(struct acpi_device *adev)
@@ -623,12 +619,23 @@ void acpi_bus_put_acpi_device(struct acpi_device *adev)
put_device(&adev->dev);
}
+static struct acpi_device_bus_id *acpi_device_bus_id_match(const char *dev_id)
+{
+ struct acpi_device_bus_id *acpi_device_bus_id;
+
+ /* Find suitable bus_id and instance number in acpi_bus_id_list. */
+ list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node) {
+ if (!strcmp(acpi_device_bus_id->bus_id, dev_id))
+ return acpi_device_bus_id;
+ }
+ return NULL;
+}
+
int acpi_device_add(struct acpi_device *device,
void (*release)(struct device *))
{
+ struct acpi_device_bus_id *acpi_device_bus_id;
int result;
- struct acpi_device_bus_id *acpi_device_bus_id, *new_bus_id;
- int found = 0;
if (device->handle) {
acpi_status status;
@@ -654,38 +661,26 @@ int acpi_device_add(struct acpi_device *device,
INIT_LIST_HEAD(&device->del_list);
mutex_init(&device->physical_node_lock);
- new_bus_id = kzalloc(sizeof(struct acpi_device_bus_id), GFP_KERNEL);
- if (!new_bus_id) {
- pr_err(PREFIX "Memory allocation error\n");
- result = -ENOMEM;
- goto err_detach;
- }
-
mutex_lock(&acpi_device_lock);
- /*
- * Find suitable bus_id and instance number in acpi_bus_id_list
- * If failed, create one and link it into acpi_bus_id_list
- */
- list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node) {
- if (!strcmp(acpi_device_bus_id->bus_id,
- acpi_device_hid(device))) {
- acpi_device_bus_id->instance_no++;
- found = 1;
- kfree(new_bus_id);
- break;
+
+ acpi_device_bus_id = acpi_device_bus_id_match(acpi_device_hid(device));
+ if (acpi_device_bus_id) {
+ acpi_device_bus_id->instance_no++;
+ } else {
+ acpi_device_bus_id = kzalloc(sizeof(*acpi_device_bus_id),
+ GFP_KERNEL);
+ if (!acpi_device_bus_id) {
+ result = -ENOMEM;
+ goto err_unlock;
}
- }
- if (!found) {
- acpi_device_bus_id = new_bus_id;
acpi_device_bus_id->bus_id =
kstrdup_const(acpi_device_hid(device), GFP_KERNEL);
if (!acpi_device_bus_id->bus_id) {
- pr_err(PREFIX "Memory allocation error for bus id\n");
+ kfree(acpi_device_bus_id);
result = -ENOMEM;
- goto err_free_new_bus_id;
+ goto err_unlock;
}
- acpi_device_bus_id->instance_no = 0;
list_add_tail(&acpi_device_bus_id->node, &acpi_bus_id_list);
}
dev_set_name(&device->dev, "%s:%02x", acpi_device_bus_id->bus_id, acpi_device_bus_id->instance_no);
@@ -695,10 +690,12 @@ int acpi_device_add(struct acpi_device *device,
if (device->wakeup.flags.valid)
list_add_tail(&device->wakeup_list, &acpi_wakeup_device_list);
+
mutex_unlock(&acpi_device_lock);
if (device->parent)
device->dev.parent = &device->parent->dev;
+
device->dev.bus = &acpi_bus_type;
device->dev.release = release;
result = device_add(&device->dev);
@@ -714,20 +711,19 @@ int acpi_device_add(struct acpi_device *device,
return 0;
- err:
+err:
mutex_lock(&acpi_device_lock);
+
if (device->parent)
list_del(&device->node);
- list_del(&device->wakeup_list);
- err_free_new_bus_id:
- if (!found)
- kfree(new_bus_id);
+ list_del(&device->wakeup_list);
+err_unlock:
mutex_unlock(&acpi_device_lock);
- err_detach:
acpi_detach_data(device->handle, acpi_scan_drop_device);
+
return result;
}
@@ -830,7 +826,8 @@ static int acpi_bus_extract_wakeup_device_power_package(struct acpi_device *dev)
/* _PRW */
status = acpi_evaluate_object(handle, "_PRW", NULL, &buffer);
if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PRW"));
+ acpi_handle_info(handle, "_PRW evaluation failed: %s\n",
+ acpi_format_exception(status));
return err;
}
@@ -935,7 +932,7 @@ static void acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
err = acpi_bus_extract_wakeup_device_power_package(device);
if (err) {
- dev_err(&device->dev, "_PRW evaluation error: %d\n", err);
+ dev_err(&device->dev, "Unable to extract wakeup power resources");
return;
}
@@ -1171,8 +1168,7 @@ acpi_backlight_cap_match(acpi_handle handle, u32 level, void *context,
if (acpi_has_method(handle, "_BCM") &&
acpi_has_method(handle, "_BCL")) {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found generic backlight "
- "support\n"));
+ acpi_handle_debug(handle, "Found generic backlight support\n");
*cap |= ACPI_VIDEO_BACKLIGHT;
/* We have backlight support, no need to scan further */
return AE_CTRL_TERMINATE;
@@ -1663,17 +1659,15 @@ static int acpi_add_single_object(struct acpi_device **child,
acpi_handle handle, int type,
unsigned long long sta)
{
- int result;
- struct acpi_device *device;
- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
struct acpi_device_info *info = NULL;
+ struct acpi_device *device;
+ int result;
if (handle != ACPI_ROOT_OBJECT && type == ACPI_BUS_TYPE_DEVICE)
acpi_get_object_info(handle, &info);
device = kzalloc(sizeof(struct acpi_device), GFP_KERNEL);
if (!device) {
- printk(KERN_ERR PREFIX "Memory allocation error\n");
kfree(info);
return -ENOMEM;
}
@@ -1700,11 +1694,11 @@ static int acpi_add_single_object(struct acpi_device **child,
acpi_power_add_remove_device(device, true);
acpi_device_add_finalize(device);
- acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Added %s [%s] parent %s\n",
- dev_name(&device->dev), (char *) buffer.pointer,
- device->parent ? dev_name(&device->parent->dev) : "(null)"));
- kfree(buffer.pointer);
+
+ acpi_handle_debug(handle, "Added as %s, parent %s\n",
+ dev_name(&device->dev), device->parent ?
+ dev_name(&device->parent->dev) : "(null)");
+
*child = device;
return 0;
}
@@ -2123,12 +2117,12 @@ void acpi_walk_dep_device_list(acpi_handle handle)
list_for_each_entry_safe(dep, tmp, &acpi_dep_list, node) {
if (dep->supplier == handle) {
acpi_bus_get_device(dep->consumer, &adev);
- if (!adev)
- continue;
- adev->dep_unmet--;
- if (!adev->dep_unmet)
- acpi_bus_attach(adev, true);
+ if (adev) {
+ adev->dep_unmet--;
+ if (!adev->dep_unmet)
+ acpi_bus_attach(adev, true);
+ }
list_del(&dep->node);
kfree(dep);
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index a5cc4f3bb1e3..8baf7644a0d0 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -52,19 +52,12 @@ static const struct acpi_dlayer acpi_debug_layers[] = {
ACPI_DEBUG_INIT(ACPI_COMPILER),
ACPI_DEBUG_INIT(ACPI_TOOLS),
- ACPI_DEBUG_INIT(ACPI_BUS_COMPONENT),
- ACPI_DEBUG_INIT(ACPI_AC_COMPONENT),
- ACPI_DEBUG_INIT(ACPI_BATTERY_COMPONENT),
- ACPI_DEBUG_INIT(ACPI_BUTTON_COMPONENT),
ACPI_DEBUG_INIT(ACPI_SBS_COMPONENT),
ACPI_DEBUG_INIT(ACPI_FAN_COMPONENT),
ACPI_DEBUG_INIT(ACPI_PCI_COMPONENT),
- ACPI_DEBUG_INIT(ACPI_POWER_COMPONENT),
ACPI_DEBUG_INIT(ACPI_CONTAINER_COMPONENT),
ACPI_DEBUG_INIT(ACPI_SYSTEM_COMPONENT),
- ACPI_DEBUG_INIT(ACPI_THERMAL_COMPONENT),
ACPI_DEBUG_INIT(ACPI_MEMORY_DEVICE_COMPONENT),
- ACPI_DEBUG_INIT(ACPI_VIDEO_COMPONENT),
ACPI_DEBUG_INIT(ACPI_PROCESSOR_COMPONENT),
};
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 12c0ece746f0..95105db642b9 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -13,6 +13,8 @@
* concepts of 'multiple limiters', upper/lower limits, etc.
*/
+#define pr_fmt(fmt) "ACPI: thermal: " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/dmi.h>
@@ -29,8 +31,6 @@
#include <linux/uaccess.h>
#include <linux/units.h>
-#define PREFIX "ACPI: "
-
#define ACPI_THERMAL_CLASS "thermal_zone"
#define ACPI_THERMAL_DEVICE_NAME "Thermal Zone"
#define ACPI_THERMAL_NOTIFY_TEMPERATURE 0x80
@@ -43,9 +43,6 @@
#define ACPI_THERMAL_MAX_ACTIVE 10
#define ACPI_THERMAL_MAX_LIMIT_STR_LEN 65
-#define _COMPONENT ACPI_THERMAL_COMPONENT
-ACPI_MODULE_NAME("thermal");
-
MODULE_AUTHOR("Paul Diefenbaugh");
MODULE_DESCRIPTION("ACPI Thermal Zone Driver");
MODULE_LICENSE("GPL");
@@ -174,6 +171,8 @@ struct acpi_thermal {
struct thermal_zone_device *thermal_zone;
int kelvin_offset; /* in millidegrees */
struct work_struct thermal_check_work;
+ struct mutex thermal_check_lock;
+ refcount_t thermal_check_count;
};
/* --------------------------------------------------------------------------
@@ -195,8 +194,9 @@ static int acpi_thermal_get_temperature(struct acpi_thermal *tz)
return -ENODEV;
tz->temperature = tmp;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Temperature is %lu dK\n",
- tz->temperature));
+
+ acpi_handle_debug(tz->device->handle, "Temperature is %lu dK\n",
+ tz->temperature);
return 0;
}
@@ -214,8 +214,8 @@ static int acpi_thermal_get_polling_frequency(struct acpi_thermal *tz)
return -ENODEV;
tz->polling_frequency = tmp;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Polling frequency is %lu dS\n",
- tz->polling_frequency));
+ acpi_handle_debug(tz->device->handle, "Polling frequency is %lu dS\n",
+ tz->polling_frequency);
return 0;
}
@@ -252,12 +252,12 @@ static int acpi_thermal_set_cooling_mode(struct acpi_thermal *tz, int mode)
* 2.TODO: Devices listed in _PSL, _ALx, _TZD may change.
* We need to re-bind the cooling devices of a thermal zone when this occurs.
*/
-#define ACPI_THERMAL_TRIPS_EXCEPTION(flags, str) \
+#define ACPI_THERMAL_TRIPS_EXCEPTION(flags, tz, str) \
do { \
if (flags != ACPI_TRIPS_INIT) \
- ACPI_EXCEPTION((AE_INFO, AE_ERROR, \
+ acpi_handle_info(tz->device->handle, \
"ACPI thermal trip point %s changed\n" \
- "Please send acpidump to linux-acpi@vger.kernel.org", str)); \
+ "Please report to linux-acpi@vger.kernel.org\n", str); \
} while (0)
static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
@@ -281,17 +281,17 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
*/
if (ACPI_FAILURE(status)) {
tz->trips.critical.flags.valid = 0;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "No critical threshold\n"));
+ acpi_handle_debug(tz->device->handle,
+ "No critical threshold\n");
} else if (tmp <= 2732) {
- pr_warn(FW_BUG "Invalid critical threshold (%llu)\n",
+ pr_info(FW_BUG "Invalid critical threshold (%llu)\n",
tmp);
tz->trips.critical.flags.valid = 0;
} else {
tz->trips.critical.flags.valid = 1;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ acpi_handle_debug(tz->device->handle,
"Found critical threshold [%lu]\n",
- tz->trips.critical.temperature));
+ tz->trips.critical.temperature);
}
if (tz->trips.critical.flags.valid == 1) {
if (crt == -1) {
@@ -303,8 +303,8 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
* Allow override critical threshold
*/
if (crt_k > tz->trips.critical.temperature)
- pr_warn(PREFIX "Critical threshold %d C\n",
- crt);
+ pr_info("Critical threshold %d C\n", crt);
+
tz->trips.critical.temperature = crt_k;
}
}
@@ -316,14 +316,14 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
"_HOT", NULL, &tmp);
if (ACPI_FAILURE(status)) {
tz->trips.hot.flags.valid = 0;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "No hot threshold\n"));
+ acpi_handle_debug(tz->device->handle,
+ "No hot threshold\n");
} else {
tz->trips.hot.temperature = tmp;
tz->trips.hot.flags.valid = 1;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Found hot threshold [%lu]\n",
- tz->trips.hot.temperature));
+ acpi_handle_debug(tz->device->handle,
+ "Found hot threshold [%lu]\n",
+ tz->trips.hot.temperature);
}
}
@@ -376,7 +376,8 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
status = acpi_evaluate_reference(tz->device->handle, "_PSL",
NULL, &devices);
if (ACPI_FAILURE(status)) {
- pr_warn(PREFIX "Invalid passive threshold\n");
+ acpi_handle_info(tz->device->handle,
+ "Invalid passive threshold\n");
tz->trips.passive.flags.valid = 0;
}
else
@@ -386,12 +387,12 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
sizeof(struct acpi_handle_list))) {
memcpy(&tz->trips.passive.devices, &devices,
sizeof(struct acpi_handle_list));
- ACPI_THERMAL_TRIPS_EXCEPTION(flag, "device");
+ ACPI_THERMAL_TRIPS_EXCEPTION(flag, tz, "device");
}
}
if ((flag & ACPI_TRIPS_PASSIVE) || (flag & ACPI_TRIPS_DEVICES)) {
if (valid != tz->trips.passive.flags.valid)
- ACPI_THERMAL_TRIPS_EXCEPTION(flag, "state");
+ ACPI_THERMAL_TRIPS_EXCEPTION(flag, tz, "state");
}
/* Active (optional) */
@@ -438,8 +439,8 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
status = acpi_evaluate_reference(tz->device->handle,
name, NULL, &devices);
if (ACPI_FAILURE(status)) {
- pr_warn(PREFIX "Invalid active%d threshold\n",
- i);
+ acpi_handle_info(tz->device->handle,
+ "Invalid active%d threshold\n", i);
tz->trips.active[i].flags.valid = 0;
}
else
@@ -449,12 +450,12 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
sizeof(struct acpi_handle_list))) {
memcpy(&tz->trips.active[i].devices, &devices,
sizeof(struct acpi_handle_list));
- ACPI_THERMAL_TRIPS_EXCEPTION(flag, "device");
+ ACPI_THERMAL_TRIPS_EXCEPTION(flag, tz, "device");
}
}
if ((flag & ACPI_TRIPS_ACTIVE) || (flag & ACPI_TRIPS_DEVICES))
if (valid != tz->trips.active[i].flags.valid)
- ACPI_THERMAL_TRIPS_EXCEPTION(flag, "state");
+ ACPI_THERMAL_TRIPS_EXCEPTION(flag, tz, "state");
if (!tz->trips.active[i].flags.valid)
break;
@@ -467,7 +468,7 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
if (ACPI_SUCCESS(status)
&& memcmp(&tz->devices, &devices, sizeof(devices))) {
tz->devices = devices;
- ACPI_THERMAL_TRIPS_EXCEPTION(flag, "device");
+ ACPI_THERMAL_TRIPS_EXCEPTION(flag, tz, "device");
}
}
@@ -495,14 +496,6 @@ static int acpi_thermal_get_trip_points(struct acpi_thermal *tz)
return 0;
}
-static void acpi_thermal_check(void *data)
-{
- struct acpi_thermal *tz = data;
-
- thermal_zone_device_update(tz->thermal_zone,
- THERMAL_EVENT_UNSPECIFIED);
-}
-
/* sys I/F for generic thermal sysfs support */
static int thermal_get_temp(struct thermal_zone_device *thermal, int *temp)
@@ -677,27 +670,24 @@ static int thermal_get_trend(struct thermal_zone_device *thermal,
return 0;
}
-
-static int thermal_notify(struct thermal_zone_device *thermal, int trip,
- enum thermal_trip_type trip_type)
+static void acpi_thermal_zone_device_hot(struct thermal_zone_device *thermal)
{
- u8 type = 0;
struct acpi_thermal *tz = thermal->devdata;
- if (trip_type == THERMAL_TRIP_CRITICAL)
- type = ACPI_THERMAL_NOTIFY_CRITICAL;
- else if (trip_type == THERMAL_TRIP_HOT)
- type = ACPI_THERMAL_NOTIFY_HOT;
- else
- return 0;
-
acpi_bus_generate_netlink_event(tz->device->pnp.device_class,
- dev_name(&tz->device->dev), type, 1);
+ dev_name(&tz->device->dev),
+ ACPI_THERMAL_NOTIFY_HOT, 1);
+}
+
+static void acpi_thermal_zone_device_critical(struct thermal_zone_device *thermal)
+{
+ struct acpi_thermal *tz = thermal->devdata;
- if (trip_type == THERMAL_TRIP_CRITICAL && nocrt)
- return 1;
+ acpi_bus_generate_netlink_event(tz->device->pnp.device_class,
+ dev_name(&tz->device->dev),
+ ACPI_THERMAL_NOTIFY_CRITICAL, 1);
- return 0;
+ thermal_zone_device_critical(thermal);
}
static int acpi_thermal_cooling_device_cb(struct thermal_zone_device *thermal,
@@ -767,25 +757,6 @@ static int acpi_thermal_cooling_device_cb(struct thermal_zone_device *thermal,
}
}
- for (i = 0; i < tz->devices.count; i++) {
- handle = tz->devices.handles[i];
- status = acpi_bus_get_device(handle, &dev);
- if (ACPI_SUCCESS(status) && (dev == device)) {
- if (bind)
- result = thermal_zone_bind_cooling_device
- (thermal, THERMAL_TRIPS_NONE,
- cdev, THERMAL_NO_LIMIT,
- THERMAL_NO_LIMIT,
- THERMAL_WEIGHT_DEFAULT);
- else
- result = thermal_zone_unbind_cooling_device
- (thermal, THERMAL_TRIPS_NONE,
- cdev);
- if (result)
- goto failed;
- }
- }
-
failed:
return result;
}
@@ -812,7 +783,8 @@ static struct thermal_zone_device_ops acpi_thermal_zone_ops = {
.get_trip_temp = thermal_get_trip_temp,
.get_crit_temp = thermal_get_crit_temp,
.get_trend = thermal_get_trend,
- .notify = thermal_notify,
+ .hot = acpi_thermal_zone_device_hot,
+ .critical = acpi_thermal_zone_device_critical,
};
static int acpi_thermal_register_thermal_zone(struct acpi_thermal *tz)
@@ -900,6 +872,12 @@ static void acpi_thermal_unregister_thermal_zone(struct acpi_thermal *tz)
Driver Interface
-------------------------------------------------------------------------- */
+static void acpi_queue_thermal_check(struct acpi_thermal *tz)
+{
+ if (!work_pending(&tz->thermal_check_work))
+ queue_work(acpi_thermal_pm_queue, &tz->thermal_check_work);
+}
+
static void acpi_thermal_notify(struct acpi_device *device, u32 event)
{
struct acpi_thermal *tz = acpi_driver_data(device);
@@ -910,23 +888,23 @@ static void acpi_thermal_notify(struct acpi_device *device, u32 event)
switch (event) {
case ACPI_THERMAL_NOTIFY_TEMPERATURE:
- acpi_thermal_check(tz);
+ acpi_queue_thermal_check(tz);
break;
case ACPI_THERMAL_NOTIFY_THRESHOLDS:
acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_THRESHOLDS);
- acpi_thermal_check(tz);
+ acpi_queue_thermal_check(tz);
acpi_bus_generate_netlink_event(device->pnp.device_class,
dev_name(&device->dev), event, 0);
break;
case ACPI_THERMAL_NOTIFY_DEVICES:
acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_DEVICES);
- acpi_thermal_check(tz);
+ acpi_queue_thermal_check(tz);
acpi_bus_generate_netlink_event(device->pnp.device_class,
dev_name(&device->dev), event, 0);
break;
default:
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Unsupported event [0x%x]\n", event));
+ acpi_handle_debug(device->handle, "Unsupported event [0x%x]\n",
+ event);
break;
}
}
@@ -1020,7 +998,25 @@ static void acpi_thermal_check_fn(struct work_struct *work)
{
struct acpi_thermal *tz = container_of(work, struct acpi_thermal,
thermal_check_work);
- acpi_thermal_check(tz);
+
+ /*
+ * In general, it is not sufficient to check the pending bit, because
+ * subsequent instances of this function may be queued after one of them
+ * has started running (e.g. if _TMP sleeps). Avoid bailing out if just
+ * one of them is running, though, because it may have done the actual
+ * check some time ago, so allow at least one of them to block on the
+ * mutex while another one is running the update.
+ */
+ if (!refcount_dec_not_one(&tz->thermal_check_count))
+ return;
+
+ mutex_lock(&tz->thermal_check_lock);
+
+ thermal_zone_device_update(tz->thermal_zone, THERMAL_EVENT_UNSPECIFIED);
+
+ refcount_inc(&tz->thermal_check_count);
+
+ mutex_unlock(&tz->thermal_check_lock);
}
static int acpi_thermal_add(struct acpi_device *device)
@@ -1052,9 +1048,11 @@ static int acpi_thermal_add(struct acpi_device *device)
if (result)
goto free_memory;
+ refcount_set(&tz->thermal_check_count, 3);
+ mutex_init(&tz->thermal_check_lock);
INIT_WORK(&tz->thermal_check_work, acpi_thermal_check_fn);
- pr_info(PREFIX "%s [%s] (%ld C)\n", acpi_device_name(device),
+ pr_info("%s [%s] (%ld C)\n", acpi_device_name(device),
acpi_device_bid(device), deci_kelvin_to_celsius(tz->temperature));
goto end;
@@ -1117,7 +1115,7 @@ static int acpi_thermal_resume(struct device *dev)
tz->state.active |= tz->trips.active[i].flags.enabled;
}
- queue_work(acpi_thermal_pm_queue, &tz->thermal_check_work);
+ acpi_queue_thermal_check(tz);
return AE_OK;
}
@@ -1126,24 +1124,24 @@ static int acpi_thermal_resume(struct device *dev)
static int thermal_act(const struct dmi_system_id *d) {
if (act == 0) {
- pr_notice(PREFIX "%s detected: "
- "disabling all active thermal trip points\n", d->ident);
+ pr_notice("%s detected: disabling all active thermal trip points\n",
+ d->ident);
act = -1;
}
return 0;
}
static int thermal_nocrt(const struct dmi_system_id *d) {
- pr_notice(PREFIX "%s detected: "
- "disabling all critical thermal trip point actions.\n", d->ident);
+ pr_notice("%s detected: disabling all critical thermal trip point actions.\n",
+ d->ident);
nocrt = 1;
return 0;
}
static int thermal_tzp(const struct dmi_system_id *d) {
if (tzp == 0) {
- pr_notice(PREFIX "%s detected: "
- "enabling thermal zone polling\n", d->ident);
+ pr_notice("%s detected: enabling thermal zone polling\n",
+ d->ident);
tzp = 300; /* 300 dS = 30 Seconds */
}
return 0;
@@ -1151,8 +1149,8 @@ static int thermal_tzp(const struct dmi_system_id *d) {
static int thermal_psv(const struct dmi_system_id *d) {
if (psv == 0) {
- pr_notice(PREFIX "%s detected: "
- "disabling all passive thermal trip points\n", d->ident);
+ pr_notice("%s detected: disabling all passive thermal trip points\n",
+ d->ident);
psv = -1;
}
return 0;
@@ -1205,7 +1203,7 @@ static int __init acpi_thermal_init(void)
dmi_check_system(thermal_dmi_table);
if (off) {
- pr_notice(PREFIX "thermal control disabled\n");
+ pr_notice("thermal control disabled\n");
return -ENODEV;
}
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index d5411a166685..682edd913b3b 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -6,6 +6,8 @@
* Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
*/
+#define pr_fmt(fmt) "ACPI: utils: " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -18,24 +20,12 @@
#include "internal.h"
#include "sleep.h"
-#define _COMPONENT ACPI_BUS_COMPONENT
-ACPI_MODULE_NAME("utils");
-
/* --------------------------------------------------------------------------
Object Evaluation Helpers
-------------------------------------------------------------------------- */
-static void
-acpi_util_eval_error(acpi_handle h, acpi_string p, acpi_status s)
+static void acpi_util_eval_error(acpi_handle h, acpi_string p, acpi_status s)
{
-#ifdef ACPI_DEBUG_OUTPUT
- char prefix[80] = {'\0'};
- struct acpi_buffer buffer = {sizeof(prefix), prefix};
- acpi_get_name(h, ACPI_FULL_PATHNAME, &buffer);
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Evaluate [%s.%s]: %s\n",
- (char *) prefix, p, acpi_format_exception(s)));
-#else
- return;
-#endif
+ acpi_handle_debug(h, "Evaluate [%s]: %s\n", p, acpi_format_exception(s));
}
acpi_status
@@ -53,25 +43,24 @@ acpi_extract_package(union acpi_object *package,
if (!package || (package->type != ACPI_TYPE_PACKAGE)
|| (package->package.count < 1)) {
- printk(KERN_WARNING PREFIX "Invalid package argument\n");
+ pr_debug("Invalid package argument\n");
return AE_BAD_PARAMETER;
}
if (!format || !format->pointer || (format->length < 1)) {
- printk(KERN_WARNING PREFIX "Invalid format argument\n");
+ pr_debug("Invalid format argument\n");
return AE_BAD_PARAMETER;
}
if (!buffer) {
- printk(KERN_WARNING PREFIX "Invalid buffer argument\n");
+ pr_debug("Invalid buffer argument\n");
return AE_BAD_PARAMETER;
}
format_count = (format->length / sizeof(char)) - 1;
if (format_count > package->package.count) {
- printk(KERN_WARNING PREFIX "Format specifies more objects [%d]"
- " than exist in package [%d].\n",
- format_count, package->package.count);
+ pr_debug("Format specifies more objects [%d] than present [%d]\n",
+ format_count, package->package.count);
return AE_BAD_DATA;
}
@@ -99,10 +88,8 @@ acpi_extract_package(union acpi_object *package,
tail_offset += sizeof(char *);
break;
default:
- printk(KERN_WARNING PREFIX "Invalid package element"
- " [%d]: got number, expecting"
- " [%c]\n",
- i, format_string[i]);
+ pr_debug("Invalid package element [%d]: got number, expected [%c]\n",
+ i, format_string[i]);
return AE_BAD_DATA;
}
break;
@@ -123,10 +110,8 @@ acpi_extract_package(union acpi_object *package,
tail_offset += sizeof(u8 *);
break;
default:
- printk(KERN_WARNING PREFIX "Invalid package element"
- " [%d] got string/buffer,"
- " expecting [%c]\n",
- i, format_string[i]);
+ pr_debug("Invalid package element [%d] got string/buffer, expected [%c]\n",
+ i, format_string[i]);
return AE_BAD_DATA;
}
break;
@@ -137,19 +122,15 @@ acpi_extract_package(union acpi_object *package,
tail_offset += sizeof(void *);
break;
default:
- printk(KERN_WARNING PREFIX "Invalid package element"
- " [%d] got reference,"
- " expecting [%c]\n",
- i, format_string[i]);
+ pr_debug("Invalid package element [%d] got reference, expected [%c]\n",
+ i, format_string[i]);
return AE_BAD_DATA;
}
break;
case ACPI_TYPE_PACKAGE:
default:
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Found unsupported element at index=%d\n",
- i));
+ pr_debug("Unsupported element at index=%d\n", i);
/* TBD: handle nested packages... */
return AE_SUPPORT;
}
@@ -289,7 +270,7 @@ acpi_evaluate_integer(acpi_handle handle,
*data = element.integer.value;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Return value [%llu]\n", *data));
+ acpi_handle_debug(handle, "Return value [%llu]\n", *data);
return AE_OK;
}
@@ -363,8 +344,7 @@ acpi_evaluate_reference(acpi_handle handle,
/* Get the acpi_handle. */
list->handles[i] = element->reference.handle;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found reference [%p]\n",
- list->handles[i]));
+ acpi_handle_debug(list->handles[i], "Found in reference list\n");
}
end:
@@ -843,12 +823,13 @@ bool acpi_dev_present(const char *hid, const char *uid, s64 hrv)
EXPORT_SYMBOL(acpi_dev_present);
/**
- * acpi_dev_get_first_match_dev - Return the first match of ACPI device
+ * acpi_dev_get_next_match_dev - Return the next match of ACPI device
+ * @adev: Pointer to the previous acpi_device matching this @hid, @uid and @hrv
* @hid: Hardware ID of the device.
* @uid: Unique ID of the device, pass NULL to not check _UID
* @hrv: Hardware Revision of the device, pass -1 to not check _HRV
*
- * Return the first match of ACPI device if a matching device was present
+ * Return the next match of ACPI device if another matching device was present
* at the moment of invocation, or NULL otherwise.
*
* The caller is responsible to call put_device() on the returned device.
@@ -856,8 +837,9 @@ EXPORT_SYMBOL(acpi_dev_present);
* See additional information in acpi_dev_present() as well.
*/
struct acpi_device *
-acpi_dev_get_first_match_dev(const char *hid, const char *uid, s64 hrv)
+acpi_dev_get_next_match_dev(struct acpi_device *adev, const char *hid, const char *uid, s64 hrv)
{
+ struct device *start = adev ? &adev->dev : NULL;
struct acpi_dev_match_info match = {};
struct device *dev;
@@ -865,9 +847,29 @@ acpi_dev_get_first_match_dev(const char *hid, const char *uid, s64 hrv)
match.uid = uid;
match.hrv = hrv;
- dev = bus_find_device(&acpi_bus_type, NULL, &match, acpi_dev_match_cb);
+ dev = bus_find_device(&acpi_bus_type, start, &match, acpi_dev_match_cb);
return dev ? to_acpi_device(dev) : NULL;
}
+EXPORT_SYMBOL(acpi_dev_get_next_match_dev);
+
+/**
+ * acpi_dev_get_first_match_dev - Return the first match of ACPI device
+ * @hid: Hardware ID of the device.
+ * @uid: Unique ID of the device, pass NULL to not check _UID
+ * @hrv: Hardware Revision of the device, pass -1 to not check _HRV
+ *
+ * Return the first match of ACPI device if a matching device was present
+ * at the moment of invocation, or NULL otherwise.
+ *
+ * The caller is responsible to call put_device() on the returned device.
+ *
+ * See additional information in acpi_dev_present() as well.
+ */
+struct acpi_device *
+acpi_dev_get_first_match_dev(const char *hid, const char *uid, s64 hrv)
+{
+ return acpi_dev_get_next_match_dev(NULL, hid, uid, hrv);
+}
EXPORT_SYMBOL(acpi_dev_get_first_match_dev);
/*
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index ecc304149067..939ca220bf78 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -56,31 +56,28 @@ amba_lookup(const struct amba_id *table, struct amba_device *dev)
return NULL;
}
-static int amba_match(struct device *dev, struct device_driver *drv)
+static int amba_get_enable_pclk(struct amba_device *pcdev)
{
- struct amba_device *pcdev = to_amba_device(dev);
- struct amba_driver *pcdrv = to_amba_driver(drv);
+ int ret;
- /* When driver_override is set, only bind to the matching driver */
- if (pcdev->driver_override)
- return !strcmp(pcdev->driver_override, drv->name);
+ pcdev->pclk = clk_get(&pcdev->dev, "apb_pclk");
+ if (IS_ERR(pcdev->pclk))
+ return PTR_ERR(pcdev->pclk);
- return amba_lookup(pcdrv->id_table, pcdev) != NULL;
+ ret = clk_prepare_enable(pcdev->pclk);
+ if (ret)
+ clk_put(pcdev->pclk);
+
+ return ret;
}
-static int amba_uevent(struct device *dev, struct kobj_uevent_env *env)
+static void amba_put_disable_pclk(struct amba_device *pcdev)
{
- struct amba_device *pcdev = to_amba_device(dev);
- int retval = 0;
-
- retval = add_uevent_var(env, "AMBA_ID=%08x", pcdev->periphid);
- if (retval)
- return retval;
-
- retval = add_uevent_var(env, "MODALIAS=amba:d%08X", pcdev->periphid);
- return retval;
+ clk_disable_unprepare(pcdev->pclk);
+ clk_put(pcdev->pclk);
}
+
static ssize_t driver_override_show(struct device *_dev,
struct device_attribute *attr, char *buf)
{
@@ -152,102 +149,29 @@ static struct attribute *amba_dev_attrs[] = {
};
ATTRIBUTE_GROUPS(amba_dev);
-#ifdef CONFIG_PM
-/*
- * Hooks to provide runtime PM of the pclk (bus clock). It is safe to
- * enable/disable the bus clock at runtime PM suspend/resume as this
- * does not result in loss of context.
- */
-static int amba_pm_runtime_suspend(struct device *dev)
+static int amba_match(struct device *dev, struct device_driver *drv)
{
struct amba_device *pcdev = to_amba_device(dev);
- int ret = pm_generic_runtime_suspend(dev);
+ struct amba_driver *pcdrv = to_amba_driver(drv);
- if (ret == 0 && dev->driver) {
- if (pm_runtime_is_irq_safe(dev))
- clk_disable(pcdev->pclk);
- else
- clk_disable_unprepare(pcdev->pclk);
- }
+ /* When driver_override is set, only bind to the matching driver */
+ if (pcdev->driver_override)
+ return !strcmp(pcdev->driver_override, drv->name);
- return ret;
+ return amba_lookup(pcdrv->id_table, pcdev) != NULL;
}
-static int amba_pm_runtime_resume(struct device *dev)
+static int amba_uevent(struct device *dev, struct kobj_uevent_env *env)
{
struct amba_device *pcdev = to_amba_device(dev);
- int ret;
-
- if (dev->driver) {
- if (pm_runtime_is_irq_safe(dev))
- ret = clk_enable(pcdev->pclk);
- else
- ret = clk_prepare_enable(pcdev->pclk);
- /* Failure is probably fatal to the system, but... */
- if (ret)
- return ret;
- }
-
- return pm_generic_runtime_resume(dev);
-}
-#endif /* CONFIG_PM */
-
-static const struct dev_pm_ops amba_pm = {
- .suspend = pm_generic_suspend,
- .resume = pm_generic_resume,
- .freeze = pm_generic_freeze,
- .thaw = pm_generic_thaw,
- .poweroff = pm_generic_poweroff,
- .restore = pm_generic_restore,
- SET_RUNTIME_PM_OPS(
- amba_pm_runtime_suspend,
- amba_pm_runtime_resume,
- NULL
- )
-};
-
-/*
- * Primecells are part of the Advanced Microcontroller Bus Architecture,
- * so we call the bus "amba".
- * DMA configuration for platform and AMBA bus is same. So here we reuse
- * platform's DMA config routine.
- */
-struct bus_type amba_bustype = {
- .name = "amba",
- .dev_groups = amba_dev_groups,
- .match = amba_match,
- .uevent = amba_uevent,
- .dma_configure = platform_dma_configure,
- .pm = &amba_pm,
-};
-EXPORT_SYMBOL_GPL(amba_bustype);
-
-static int __init amba_init(void)
-{
- return bus_register(&amba_bustype);
-}
-
-postcore_initcall(amba_init);
-
-static int amba_get_enable_pclk(struct amba_device *pcdev)
-{
- int ret;
-
- pcdev->pclk = clk_get(&pcdev->dev, "apb_pclk");
- if (IS_ERR(pcdev->pclk))
- return PTR_ERR(pcdev->pclk);
-
- ret = clk_prepare_enable(pcdev->pclk);
- if (ret)
- clk_put(pcdev->pclk);
+ int retval = 0;
- return ret;
-}
+ retval = add_uevent_var(env, "AMBA_ID=%08x", pcdev->periphid);
+ if (retval)
+ return retval;
-static void amba_put_disable_pclk(struct amba_device *pcdev)
-{
- clk_disable_unprepare(pcdev->pclk);
- clk_put(pcdev->pclk);
+ retval = add_uevent_var(env, "MODALIAS=amba:d%08X", pcdev->periphid);
+ return retval;
}
/*
@@ -299,10 +223,10 @@ static int amba_remove(struct device *dev)
{
struct amba_device *pcdev = to_amba_device(dev);
struct amba_driver *drv = to_amba_driver(dev->driver);
- int ret;
pm_runtime_get_sync(dev);
- ret = drv->remove(pcdev);
+ if (drv->remove)
+ drv->remove(pcdev);
pm_runtime_put_noidle(dev);
/* Undo the runtime PM settings in amba_probe() */
@@ -313,15 +237,101 @@ static int amba_remove(struct device *dev)
amba_put_disable_pclk(pcdev);
dev_pm_domain_detach(dev, true);
- return ret;
+ return 0;
}
static void amba_shutdown(struct device *dev)
{
- struct amba_driver *drv = to_amba_driver(dev->driver);
- drv->shutdown(to_amba_device(dev));
+ struct amba_driver *drv;
+
+ if (!dev->driver)
+ return;
+
+ drv = to_amba_driver(dev->driver);
+ if (drv->shutdown)
+ drv->shutdown(to_amba_device(dev));
+}
+
+#ifdef CONFIG_PM
+/*
+ * Hooks to provide runtime PM of the pclk (bus clock). It is safe to
+ * enable/disable the bus clock at runtime PM suspend/resume as this
+ * does not result in loss of context.
+ */
+static int amba_pm_runtime_suspend(struct device *dev)
+{
+ struct amba_device *pcdev = to_amba_device(dev);
+ int ret = pm_generic_runtime_suspend(dev);
+
+ if (ret == 0 && dev->driver) {
+ if (pm_runtime_is_irq_safe(dev))
+ clk_disable(pcdev->pclk);
+ else
+ clk_disable_unprepare(pcdev->pclk);
+ }
+
+ return ret;
+}
+
+static int amba_pm_runtime_resume(struct device *dev)
+{
+ struct amba_device *pcdev = to_amba_device(dev);
+ int ret;
+
+ if (dev->driver) {
+ if (pm_runtime_is_irq_safe(dev))
+ ret = clk_enable(pcdev->pclk);
+ else
+ ret = clk_prepare_enable(pcdev->pclk);
+ /* Failure is probably fatal to the system, but... */
+ if (ret)
+ return ret;
+ }
+
+ return pm_generic_runtime_resume(dev);
+}
+#endif /* CONFIG_PM */
+
+static const struct dev_pm_ops amba_pm = {
+ .suspend = pm_generic_suspend,
+ .resume = pm_generic_resume,
+ .freeze = pm_generic_freeze,
+ .thaw = pm_generic_thaw,
+ .poweroff = pm_generic_poweroff,
+ .restore = pm_generic_restore,
+ SET_RUNTIME_PM_OPS(
+ amba_pm_runtime_suspend,
+ amba_pm_runtime_resume,
+ NULL
+ )
+};
+
+/*
+ * Primecells are part of the Advanced Microcontroller Bus Architecture,
+ * so we call the bus "amba".
+ * DMA configuration for platform and AMBA bus is same. So here we reuse
+ * platform's DMA config routine.
+ */
+struct bus_type amba_bustype = {
+ .name = "amba",
+ .dev_groups = amba_dev_groups,
+ .match = amba_match,
+ .uevent = amba_uevent,
+ .probe = amba_probe,
+ .remove = amba_remove,
+ .shutdown = amba_shutdown,
+ .dma_configure = platform_dma_configure,
+ .pm = &amba_pm,
+};
+EXPORT_SYMBOL_GPL(amba_bustype);
+
+static int __init amba_init(void)
+{
+ return bus_register(&amba_bustype);
}
+postcore_initcall(amba_init);
+
/**
* amba_driver_register - register an AMBA device driver
* @drv: amba device driver structure
@@ -332,12 +342,10 @@ static void amba_shutdown(struct device *dev)
*/
int amba_driver_register(struct amba_driver *drv)
{
- drv->drv.bus = &amba_bustype;
+ if (!drv->probe)
+ return -EINVAL;
-#define SETFN(fn) if (drv->fn) drv->drv.fn = amba_##fn
- SETFN(probe);
- SETFN(remove);
- SETFN(shutdown);
+ drv->drv.bus = &amba_bustype;
return driver_register(&drv->drv);
}
diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c
index 7b4f154f07e6..e80ba93c62a9 100644
--- a/drivers/android/binderfs.c
+++ b/drivers/android/binderfs.c
@@ -355,7 +355,8 @@ static inline bool is_binderfs_control_device(const struct dentry *dentry)
return info->control_dentry == dentry;
}
-static int binderfs_rename(struct inode *old_dir, struct dentry *old_dentry,
+static int binderfs_rename(struct user_namespace *mnt_userns,
+ struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry,
unsigned int flags)
{
@@ -363,7 +364,8 @@ static int binderfs_rename(struct inode *old_dir, struct dentry *old_dentry,
is_binderfs_control_device(new_dentry))
return -EPERM;
- return simple_rename(old_dir, old_dentry, new_dir, new_dentry, flags);
+ return simple_rename(&init_user_ns, old_dir, old_dentry, new_dir,
+ new_dentry, flags);
}
static int binderfs_unlink(struct inode *dir, struct dentry *dentry)
diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c
index 49f7acbfcf01..5b32df5d33ad 100644
--- a/drivers/ata/ahci_brcm.c
+++ b/drivers/ata/ahci_brcm.c
@@ -377,6 +377,10 @@ static int __maybe_unused brcm_ahci_resume(struct device *dev)
if (ret)
return ret;
+ ret = ahci_platform_enable_regulators(hpriv);
+ if (ret)
+ goto out_disable_clks;
+
brcm_sata_init(priv);
brcm_sata_phys_enable(priv);
brcm_sata_alpm_init(hpriv);
@@ -406,6 +410,8 @@ out_disable_platform_phys:
ahci_platform_disable_phys(hpriv);
out_disable_phys:
brcm_sata_phys_disable(priv);
+ ahci_platform_disable_regulators(hpriv);
+out_disable_clks:
ahci_platform_disable_clks(hpriv);
return ret;
}
@@ -490,6 +496,10 @@ static int brcm_ahci_probe(struct platform_device *pdev)
if (ret)
goto out_reset;
+ ret = ahci_platform_enable_regulators(hpriv);
+ if (ret)
+ goto out_disable_clks;
+
/* Must be first so as to configure endianness including that
* of the standard AHCI register space.
*/
@@ -499,7 +509,7 @@ static int brcm_ahci_probe(struct platform_device *pdev)
priv->port_mask = brcm_ahci_get_portmask(hpriv, priv);
if (!priv->port_mask) {
ret = -ENODEV;
- goto out_disable_clks;
+ goto out_disable_regulators;
}
/* Must be done before ahci_platform_enable_phys() */
@@ -524,6 +534,8 @@ out_disable_platform_phys:
ahci_platform_disable_phys(hpriv);
out_disable_phys:
brcm_sata_phys_disable(priv);
+out_disable_regulators:
+ ahci_platform_disable_regulators(hpriv);
out_disable_clks:
ahci_platform_disable_clks(hpriv);
out_reset:
diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c
index 08543aeb0093..498383cb6e29 100644
--- a/drivers/ata/pata_icside.c
+++ b/drivers/ata/pata_icside.c
@@ -202,14 +202,19 @@ static void pata_icside_set_dmamode(struct ata_port *ap, struct ata_device *adev
* Choose the IOMD cycle timing which ensure that the interface
* satisfies the measured active, recovery and cycle times.
*/
- if (t.active <= 50 && t.recover <= 375 && t.cycle <= 425)
- iomd_type = 'D', cycle = 187;
- else if (t.active <= 125 && t.recover <= 375 && t.cycle <= 500)
- iomd_type = 'C', cycle = 250;
- else if (t.active <= 200 && t.recover <= 550 && t.cycle <= 750)
- iomd_type = 'B', cycle = 437;
- else
- iomd_type = 'A', cycle = 562;
+ if (t.active <= 50 && t.recover <= 375 && t.cycle <= 425) {
+ iomd_type = 'D';
+ cycle = 187;
+ } else if (t.active <= 125 && t.recover <= 375 && t.cycle <= 500) {
+ iomd_type = 'C';
+ cycle = 250;
+ } else if (t.active <= 200 && t.recover <= 550 && t.cycle <= 750) {
+ iomd_type = 'B';
+ cycle = 437;
+ } else {
+ iomd_type = 'A';
+ cycle = 562;
+ }
ata_dev_info(adev, "timings: act %dns rec %dns cyc %dns (%c)\n",
t.active, t.recover, t.cycle, iomd_type);
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 5f0472c18bcb..0c13cac903de 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -3743,16 +3743,7 @@ static int __init idt77252_init(void)
struct sk_buff *skb;
printk("%s: at %p\n", __func__, idt77252_init);
-
- if (sizeof(skb->cb) < sizeof(struct atm_skb_data) +
- sizeof(struct idt77252_skb_prv)) {
- printk(KERN_ERR "%s: skb->cb is too small (%lu < %lu)\n",
- __func__, (unsigned long) sizeof(skb->cb),
- (unsigned long) sizeof(struct atm_skb_data) +
- sizeof(struct idt77252_skb_prv));
- return -EIO;
- }
-
+ BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct idt77252_skb_prv) + sizeof(struct atm_skb_data));
return pci_register_driver(&idt77252_driver);
}
diff --git a/drivers/atm/idt77252.h b/drivers/atm/idt77252.h
index 9339197d701c..b059d31364dd 100644
--- a/drivers/atm/idt77252.h
+++ b/drivers/atm/idt77252.h
@@ -789,7 +789,7 @@ struct idt77252_skb_prv {
struct scqe tbd; /* Transmit Buffer Descriptor */
dma_addr_t paddr; /* DMA handle */
u32 pool; /* sb_pool handle */
-};
+} __packed;
#define IDT77252_PRV_TBD(skb) \
(((struct idt77252_skb_prv *)(ATM_SKB(skb)+1))->tbd)
diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig
index a2b59b84bb88..1509cb74705a 100644
--- a/drivers/auxdisplay/Kconfig
+++ b/drivers/auxdisplay/Kconfig
@@ -507,6 +507,3 @@ config PANEL
depends on PARPORT
select AUXDISPLAY
select PARPORT_PANEL
-
-config CHARLCD
- tristate "Character LCD core support" if COMPILE_TEST
diff --git a/drivers/auxdisplay/cfag12864b.c b/drivers/auxdisplay/cfag12864b.c
index 7eebae7e322c..fd430e6866a1 100644
--- a/drivers/auxdisplay/cfag12864b.c
+++ b/drivers/auxdisplay/cfag12864b.c
@@ -5,7 +5,7 @@
* Description: cfag12864b LCD driver
* Depends: ks0108
*
- * Author: Copyright (C) Miguel Ojeda Sandonis
+ * Author: Copyright (C) Miguel Ojeda <ojeda@kernel.org>
* Date: 2006-10-31
*/
@@ -376,5 +376,5 @@ module_init(cfag12864b_init);
module_exit(cfag12864b_exit);
MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Miguel Ojeda Sandonis <miguel.ojeda.sandonis@gmail.com>");
+MODULE_AUTHOR("Miguel Ojeda <ojeda@kernel.org>");
MODULE_DESCRIPTION("cfag12864b LCD driver");
diff --git a/drivers/auxdisplay/cfag12864bfb.c b/drivers/auxdisplay/cfag12864bfb.c
index 2002291ab338..d66821adf453 100644
--- a/drivers/auxdisplay/cfag12864bfb.c
+++ b/drivers/auxdisplay/cfag12864bfb.c
@@ -5,7 +5,7 @@
* Description: cfag12864b LCD framebuffer driver
* Depends: cfag12864b
*
- * Author: Copyright (C) Miguel Ojeda Sandonis
+ * Author: Copyright (C) Miguel Ojeda <ojeda@kernel.org>
* Date: 2006-10-31
*/
@@ -171,5 +171,5 @@ module_init(cfag12864bfb_init);
module_exit(cfag12864bfb_exit);
MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Miguel Ojeda Sandonis <miguel.ojeda.sandonis@gmail.com>");
+MODULE_AUTHOR("Miguel Ojeda <ojeda@kernel.org>");
MODULE_DESCRIPTION("cfag12864b LCD framebuffer driver");
diff --git a/drivers/auxdisplay/ht16k33.c b/drivers/auxdisplay/ht16k33.c
index d951d54b26f5..1e69cc6d21a0 100644
--- a/drivers/auxdisplay/ht16k33.c
+++ b/drivers/auxdisplay/ht16k33.c
@@ -117,8 +117,7 @@ static void ht16k33_fb_queue(struct ht16k33_priv *priv)
{
struct ht16k33_fbdev *fbdev = &priv->fbdev;
- schedule_delayed_work(&fbdev->work,
- msecs_to_jiffies(HZ / fbdev->refresh_rate));
+ schedule_delayed_work(&fbdev->work, HZ / fbdev->refresh_rate);
}
/*
@@ -402,11 +401,6 @@ static int ht16k33_probe(struct i2c_client *client,
return -EIO;
}
- if (client->irq <= 0) {
- dev_err(&client->dev, "No IRQ specified\n");
- return -EINVAL;
- }
-
priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -459,9 +453,12 @@ static int ht16k33_probe(struct i2c_client *client,
if (err)
goto err_fbdev_info;
- err = ht16k33_keypad_probe(client, &priv->keypad);
- if (err)
- goto err_fbdev_unregister;
+ /* Keypad */
+ if (client->irq > 0) {
+ err = ht16k33_keypad_probe(client, &priv->keypad);
+ if (err)
+ goto err_fbdev_unregister;
+ }
/* Backlight */
memset(&bl_props, 0, sizeof(struct backlight_properties));
diff --git a/drivers/auxdisplay/ks0108.c b/drivers/auxdisplay/ks0108.c
index abfe3fa9e6f4..03c95ad4216c 100644
--- a/drivers/auxdisplay/ks0108.c
+++ b/drivers/auxdisplay/ks0108.c
@@ -5,7 +5,7 @@
* Description: ks0108 LCD Controller driver
* Depends: parport
*
- * Author: Copyright (C) Miguel Ojeda Sandonis
+ * Author: Copyright (C) Miguel Ojeda <ojeda@kernel.org>
* Date: 2006-10-31
*/
@@ -182,6 +182,6 @@ module_init(ks0108_init);
module_exit(ks0108_exit);
MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Miguel Ojeda Sandonis <miguel.ojeda.sandonis@gmail.com>");
+MODULE_AUTHOR("Miguel Ojeda <ojeda@kernel.org>");
MODULE_DESCRIPTION("ks0108 LCD Controller driver");
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 040be48ce046..ffcbe2bc460e 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -161,7 +161,7 @@ config HMEM_REPORTING
default n
depends on NUMA
help
- Enable reporting for heterogenous memory access attributes under
+ Enable reporting for heterogeneous memory access attributes under
their non-uniform memory nodes.
source "drivers/base/test/Kconfig"
@@ -213,4 +213,10 @@ config GENERIC_ARCH_TOPOLOGY
appropriate scaling, sysfs interface for reading capacity values at
runtime.
+config GENERIC_ARCH_NUMA
+ bool
+ help
+ Enable support for generic NUMA implementation. Currently, RISC-V
+ and ARM64 use it.
+
endmenu
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 5e7bf9669a81..8b93a7f291ec 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_PINCTRL) += pinctrl.o
obj-$(CONFIG_DEV_COREDUMP) += devcoredump.o
obj-$(CONFIG_GENERIC_MSI_IRQ_DOMAIN) += platform-msi.o
obj-$(CONFIG_GENERIC_ARCH_TOPOLOGY) += arch_topology.o
+obj-$(CONFIG_GENERIC_ARCH_NUMA) += arch_numa.o
obj-y += test/
diff --git a/arch/arm64/mm/numa.c b/drivers/base/arch_numa.c
index a8303bc6b62a..4cc4e117727d 100644
--- a/arch/arm64/mm/numa.c
+++ b/drivers/base/arch_numa.c
@@ -13,7 +13,6 @@
#include <linux/module.h>
#include <linux/of.h>
-#include <asm/acpi.h>
#include <asm/sections.h>
struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
@@ -356,11 +355,12 @@ static int __init numa_register_nodes(void)
/* Check that valid nid is set to memblks */
for_each_mem_region(mblk) {
int mblk_nid = memblock_get_region_node(mblk);
+ phys_addr_t start = mblk->base;
+ phys_addr_t end = mblk->base + mblk->size - 1;
if (mblk_nid == NUMA_NO_NODE || mblk_nid >= MAX_NUMNODES) {
- pr_warn("Warning: invalid memblk node %d [mem %#010Lx-%#010Lx]\n",
- mblk_nid, mblk->base,
- mblk->base + mblk->size - 1);
+ pr_warn("Warning: invalid memblk node %d [mem %pap-%pap]\n",
+ mblk_nid, &start, &end);
return -EINVAL;
}
}
@@ -428,14 +428,14 @@ out_free_distance:
static int __init dummy_numa_init(void)
{
phys_addr_t start = memblock_start_of_DRAM();
- phys_addr_t end = memblock_end_of_DRAM();
+ phys_addr_t end = memblock_end_of_DRAM() - 1;
int ret;
if (numa_off)
pr_info("NUMA disabled\n"); /* Forced off on command line. */
- pr_info("Faking a node at [mem %#018Lx-%#018Lx]\n", start, end - 1);
+ pr_info("Faking a node at [mem %pap-%pap]\n", &start, &end);
- ret = numa_add_memblk(0, start, end);
+ ret = numa_add_memblk(0, start, end + 1);
if (ret) {
pr_err("NUMA init failed\n");
return ret;
@@ -445,16 +445,36 @@ static int __init dummy_numa_init(void)
return 0;
}
+#ifdef CONFIG_ACPI_NUMA
+static int __init arch_acpi_numa_init(void)
+{
+ int ret;
+
+ ret = acpi_numa_init();
+ if (ret) {
+ pr_info("Failed to initialise from firmware\n");
+ return ret;
+ }
+
+ return srat_disabled() ? -EINVAL : 0;
+}
+#else
+static int __init arch_acpi_numa_init(void)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
/**
- * arm64_numa_init() - Initialize NUMA
+ * arch_numa_init() - Initialize NUMA
*
* Try each configured NUMA initialization method until one succeeds. The
* last fallback is dummy single node config encompassing whole memory.
*/
-void __init arm64_numa_init(void)
+void __init arch_numa_init(void)
{
if (!numa_off) {
- if (!acpi_disabled && !numa_init(arm64_acpi_numa_init))
+ if (!acpi_disabled && !numa_init(arch_acpi_numa_init))
return;
if (acpi_disabled && !numa_init(of_numa_init))
return;
diff --git a/drivers/base/auxiliary.c b/drivers/base/auxiliary.c
index 8336535f1e11..d8b314e7d0fd 100644
--- a/drivers/base/auxiliary.c
+++ b/drivers/base/auxiliary.c
@@ -15,6 +15,7 @@
#include <linux/pm_runtime.h>
#include <linux/string.h>
#include <linux/auxiliary_bus.h>
+#include "base.h"
static const struct auxiliary_device_id *auxiliary_match_id(const struct auxiliary_device_id *id,
const struct auxiliary_device *auxdev)
@@ -260,19 +261,11 @@ void auxiliary_driver_unregister(struct auxiliary_driver *auxdrv)
}
EXPORT_SYMBOL_GPL(auxiliary_driver_unregister);
-static int __init auxiliary_bus_init(void)
+void __init auxiliary_bus_init(void)
{
- return bus_register(&auxiliary_bus_type);
+ WARN_ON(bus_register(&auxiliary_bus_type));
}
-static void __exit auxiliary_bus_exit(void)
-{
- bus_unregister(&auxiliary_bus_type);
-}
-
-module_init(auxiliary_bus_init);
-module_exit(auxiliary_bus_exit);
-
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Auxiliary Bus");
MODULE_AUTHOR("David Ertman <david.m.ertman@intel.com>");
diff --git a/drivers/base/base.h b/drivers/base/base.h
index f5600a83124f..52b3d7b75c27 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -119,6 +119,11 @@ static inline int hypervisor_init(void) { return 0; }
extern int platform_bus_init(void);
extern void cpu_dev_init(void);
extern void container_dev_init(void);
+#ifdef CONFIG_AUXILIARY_BUS
+extern void auxiliary_bus_init(void);
+#else
+static inline void auxiliary_bus_init(void) { }
+#endif
struct kobject *virtual_device_parent(struct device *dev);
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index a9c23ecebc7c..36d0c654ea61 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -633,7 +633,7 @@ int bus_add_driver(struct device_driver *drv)
error = driver_add_groups(drv, bus->drv_groups);
if (error) {
/* How the hell do we get out of this pickle? Give up */
- printk(KERN_ERR "%s: driver_create_groups(%s) failed\n",
+ printk(KERN_ERR "%s: driver_add_groups(%s) failed\n",
__func__, drv->name);
}
@@ -729,23 +729,6 @@ int device_reprobe(struct device *dev)
}
EXPORT_SYMBOL_GPL(device_reprobe);
-/**
- * find_bus - locate bus by name.
- * @name: name of bus.
- *
- * Call kset_find_obj() to iterate over list of buses to
- * find a bus by name. Return bus if found.
- *
- * Note that kset_find_obj increments bus' reference count.
- */
-#if 0
-struct bus_type *find_bus(char *name)
-{
- struct kobject *k = kset_find_obj(bus_kset, name);
- return k ? to_bus(k) : NULL;
-}
-#endif /* 0 */
-
static int bus_add_groups(struct bus_type *bus,
const struct attribute_group **groups)
{
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 6eb4c7a904c5..f29839382f81 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -28,6 +28,7 @@
#include <linux/sched/signal.h>
#include <linux/sched/mm.h>
#include <linux/sysfs.h>
+#include <linux/dma-map-ops.h> /* for dma_default_coherent */
#include "base.h"
#include "power/power.h"
@@ -148,6 +149,21 @@ void fwnode_links_purge(struct fwnode_handle *fwnode)
fwnode_links_purge_consumers(fwnode);
}
+static void fw_devlink_purge_absent_suppliers(struct fwnode_handle *fwnode)
+{
+ struct fwnode_handle *child;
+
+ /* Don't purge consumer links of an added child */
+ if (fwnode->dev)
+ return;
+
+ fwnode->flags |= FWNODE_FLAG_NOT_DEVICE;
+ fwnode_links_purge_consumers(fwnode);
+
+ fwnode_for_each_available_child_node(fwnode, child)
+ fw_devlink_purge_absent_suppliers(child);
+}
+
#ifdef CONFIG_SRCU
static DEFINE_MUTEX(device_links_lock);
DEFINE_STATIC_SRCU(device_links_srcu);
@@ -244,7 +260,8 @@ int device_is_dependent(struct device *dev, void *target)
return ret;
list_for_each_entry(link, &dev->links.consumers, s_node) {
- if (link->flags == (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED))
+ if ((link->flags & ~DL_FLAG_INFERRED) ==
+ (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED))
continue;
if (link->consumer == target)
@@ -317,7 +334,8 @@ static int device_reorder_to_tail(struct device *dev, void *not_used)
device_for_each_child(dev, NULL, device_reorder_to_tail);
list_for_each_entry(link, &dev->links.consumers, s_node) {
- if (link->flags == (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED))
+ if ((link->flags & ~DL_FLAG_INFERRED) ==
+ (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED))
continue;
device_reorder_to_tail(link->consumer, NULL);
}
@@ -565,7 +583,8 @@ postcore_initcall(devlink_class_init);
#define DL_MANAGED_LINK_FLAGS (DL_FLAG_AUTOREMOVE_CONSUMER | \
DL_FLAG_AUTOREMOVE_SUPPLIER | \
DL_FLAG_AUTOPROBE_CONSUMER | \
- DL_FLAG_SYNC_STATE_ONLY)
+ DL_FLAG_SYNC_STATE_ONLY | \
+ DL_FLAG_INFERRED)
#define DL_ADD_VALID_FLAGS (DL_MANAGED_LINK_FLAGS | DL_FLAG_STATELESS | \
DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE)
@@ -634,7 +653,7 @@ struct device_link *device_link_add(struct device *consumer,
if (!consumer || !supplier || flags & ~DL_ADD_VALID_FLAGS ||
(flags & DL_FLAG_STATELESS && flags & DL_MANAGED_LINK_FLAGS) ||
(flags & DL_FLAG_SYNC_STATE_ONLY &&
- flags != DL_FLAG_SYNC_STATE_ONLY) ||
+ (flags & ~DL_FLAG_INFERRED) != DL_FLAG_SYNC_STATE_ONLY) ||
(flags & DL_FLAG_AUTOPROBE_CONSUMER &&
flags & (DL_FLAG_AUTOREMOVE_CONSUMER |
DL_FLAG_AUTOREMOVE_SUPPLIER)))
@@ -690,6 +709,10 @@ struct device_link *device_link_add(struct device *consumer,
if (link->consumer != consumer)
continue;
+ if (link->flags & DL_FLAG_INFERRED &&
+ !(flags & DL_FLAG_INFERRED))
+ link->flags &= ~DL_FLAG_INFERRED;
+
if (flags & DL_FLAG_PM_RUNTIME) {
if (!(link->flags & DL_FLAG_PM_RUNTIME)) {
pm_runtime_new_link(consumer);
@@ -949,6 +972,10 @@ int device_links_check_suppliers(struct device *dev)
mutex_lock(&fwnode_link_lock);
if (dev->fwnode && !list_empty(&dev->fwnode->suppliers) &&
!fw_devlink_is_permissive()) {
+ dev_dbg(dev, "probe deferral - wait for supplier %pfwP\n",
+ list_first_entry(&dev->fwnode->suppliers,
+ struct fwnode_link,
+ c_hook)->supplier);
mutex_unlock(&fwnode_link_lock);
return -EPROBE_DEFER;
}
@@ -963,6 +990,8 @@ int device_links_check_suppliers(struct device *dev)
if (link->status != DL_STATE_AVAILABLE &&
!(link->flags & DL_FLAG_SYNC_STATE_ONLY)) {
device_links_missing_supplier(dev);
+ dev_dbg(dev, "probe deferral - supplier %s not ready\n",
+ dev_name(link->supplier));
ret = -EPROBE_DEFER;
break;
}
@@ -1141,12 +1170,22 @@ void device_links_driver_bound(struct device *dev)
LIST_HEAD(sync_list);
/*
- * If a device probes successfully, it's expected to have created all
+ * If a device binds successfully, it's expected to have created all
* the device links it needs to or make new device links as it needs
- * them. So, it no longer needs to wait on any suppliers.
+ * them. So, fw_devlink no longer needs to create device links to any
+ * of the device's suppliers.
+ *
+ * Also, if a child firmware node of this bound device is not added as
+ * a device by now, assume it is never going to be added and make sure
+ * other devices don't defer probe indefinitely by waiting for such a
+ * child device.
*/
- if (dev->fwnode && dev->fwnode->dev == dev)
+ if (dev->fwnode && dev->fwnode->dev == dev) {
+ struct fwnode_handle *child;
fwnode_links_purge_suppliers(dev->fwnode);
+ fwnode_for_each_available_child_node(dev->fwnode, child)
+ fw_devlink_purge_absent_suppliers(child);
+ }
device_remove_file(dev, &dev_attr_waiting_for_supplier);
device_links_write_lock();
@@ -1457,7 +1496,14 @@ static void device_links_purge(struct device *dev)
device_links_write_unlock();
}
-static u32 fw_devlink_flags = DL_FLAG_SYNC_STATE_ONLY;
+#define FW_DEVLINK_FLAGS_PERMISSIVE (DL_FLAG_INFERRED | \
+ DL_FLAG_SYNC_STATE_ONLY)
+#define FW_DEVLINK_FLAGS_ON (DL_FLAG_INFERRED | \
+ DL_FLAG_AUTOPROBE_CONSUMER)
+#define FW_DEVLINK_FLAGS_RPM (FW_DEVLINK_FLAGS_ON | \
+ DL_FLAG_PM_RUNTIME)
+
+static u32 fw_devlink_flags = FW_DEVLINK_FLAGS_PERMISSIVE;
static int __init fw_devlink_setup(char *arg)
{
if (!arg)
@@ -1466,17 +1512,23 @@ static int __init fw_devlink_setup(char *arg)
if (strcmp(arg, "off") == 0) {
fw_devlink_flags = 0;
} else if (strcmp(arg, "permissive") == 0) {
- fw_devlink_flags = DL_FLAG_SYNC_STATE_ONLY;
+ fw_devlink_flags = FW_DEVLINK_FLAGS_PERMISSIVE;
} else if (strcmp(arg, "on") == 0) {
- fw_devlink_flags = DL_FLAG_AUTOPROBE_CONSUMER;
+ fw_devlink_flags = FW_DEVLINK_FLAGS_ON;
} else if (strcmp(arg, "rpm") == 0) {
- fw_devlink_flags = DL_FLAG_AUTOPROBE_CONSUMER |
- DL_FLAG_PM_RUNTIME;
+ fw_devlink_flags = FW_DEVLINK_FLAGS_RPM;
}
return 0;
}
early_param("fw_devlink", fw_devlink_setup);
+static bool fw_devlink_strict;
+static int __init fw_devlink_strict_setup(char *arg)
+{
+ return strtobool(arg, &fw_devlink_strict);
+}
+early_param("fw_devlink.strict", fw_devlink_strict_setup);
+
u32 fw_devlink_get_flags(void)
{
return fw_devlink_flags;
@@ -1484,7 +1536,12 @@ u32 fw_devlink_get_flags(void)
static bool fw_devlink_is_permissive(void)
{
- return fw_devlink_flags == DL_FLAG_SYNC_STATE_ONLY;
+ return fw_devlink_flags == FW_DEVLINK_FLAGS_PERMISSIVE;
+}
+
+bool fw_devlink_is_strict(void)
+{
+ return fw_devlink_strict && !fw_devlink_is_permissive();
}
static void fw_devlink_parse_fwnode(struct fwnode_handle *fwnode)
@@ -1507,6 +1564,53 @@ static void fw_devlink_parse_fwtree(struct fwnode_handle *fwnode)
}
/**
+ * fw_devlink_relax_cycle - Convert cyclic links to SYNC_STATE_ONLY links
+ * @con: Device to check dependencies for.
+ * @sup: Device to check against.
+ *
+ * Check if @sup depends on @con or any device dependent on it (its child or
+ * its consumer etc). When such a cyclic dependency is found, convert all
+ * device links created solely by fw_devlink into SYNC_STATE_ONLY device links.
+ * This is the equivalent of doing fw_devlink=permissive just between the
+ * devices in the cycle. We need to do this because, at this point, fw_devlink
+ * can't tell which of these dependencies is not a real dependency.
+ *
+ * Return 1 if a cycle is found. Otherwise, return 0.
+ */
+static int fw_devlink_relax_cycle(struct device *con, void *sup)
+{
+ struct device_link *link;
+ int ret;
+
+ if (con == sup)
+ return 1;
+
+ ret = device_for_each_child(con, sup, fw_devlink_relax_cycle);
+ if (ret)
+ return ret;
+
+ list_for_each_entry(link, &con->links.consumers, s_node) {
+ if ((link->flags & ~DL_FLAG_INFERRED) ==
+ (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED))
+ continue;
+
+ if (!fw_devlink_relax_cycle(link->consumer, sup))
+ continue;
+
+ ret = 1;
+
+ if (!(link->flags & DL_FLAG_INFERRED))
+ continue;
+
+ pm_runtime_drop_link(link);
+ link->flags = DL_FLAG_MANAGED | FW_DEVLINK_FLAGS_PERMISSIVE;
+ dev_dbg(link->consumer, "Relaxing link with %s\n",
+ dev_name(link->supplier));
+ }
+ return ret;
+}
+
+/**
* fw_devlink_create_devlink - Create a device link from a consumer to fwnode
* @con - Consumer device for the device link
* @sup_handle - fwnode handle of supplier
@@ -1534,15 +1638,39 @@ static int fw_devlink_create_devlink(struct device *con,
sup_dev = get_dev_from_fwnode(sup_handle);
if (sup_dev) {
/*
+ * If it's one of those drivers that don't actually bind to
+ * their device using driver core, then don't wait on this
+ * supplier device indefinitely.
+ */
+ if (sup_dev->links.status == DL_DEV_NO_DRIVER &&
+ sup_handle->flags & FWNODE_FLAG_INITIALIZED) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /*
* If this fails, it is due to cycles in device links. Just
* give up on this link and treat it as invalid.
*/
- if (!device_link_add(con, sup_dev, flags))
+ if (!device_link_add(con, sup_dev, flags) &&
+ !(flags & DL_FLAG_SYNC_STATE_ONLY)) {
+ dev_info(con, "Fixing up cyclic dependency with %s\n",
+ dev_name(sup_dev));
+ device_links_write_lock();
+ fw_devlink_relax_cycle(con, sup_dev);
+ device_links_write_unlock();
+ device_link_add(con, sup_dev,
+ FW_DEVLINK_FLAGS_PERMISSIVE);
ret = -EINVAL;
+ }
goto out;
}
+ /* Supplier that's already initialized without a struct device. */
+ if (sup_handle->flags & FWNODE_FLAG_INITIALIZED)
+ return -EINVAL;
+
/*
* DL_FLAG_SYNC_STATE_ONLY doesn't block probing and supports
* cycles. So cycle detection isn't necessary and shouldn't be
@@ -1631,7 +1759,7 @@ static void __fw_devlink_link_to_consumers(struct device *dev)
con_dev = NULL;
} else {
own_link = false;
- dl_flags = DL_FLAG_SYNC_STATE_ONLY;
+ dl_flags = FW_DEVLINK_FLAGS_PERMISSIVE;
}
}
@@ -1686,7 +1814,7 @@ static void __fw_devlink_link_to_suppliers(struct device *dev,
if (own_link)
dl_flags = fw_devlink_get_flags();
else
- dl_flags = DL_FLAG_SYNC_STATE_ONLY;
+ dl_flags = FW_DEVLINK_FLAGS_PERMISSIVE;
list_for_each_entry_safe(link, tmp, &fwnode->suppliers, c_hook) {
int ret;
@@ -2603,6 +2731,11 @@ void device_initialize(struct device *dev)
INIT_LIST_HEAD(&dev->links.suppliers);
INIT_LIST_HEAD(&dev->links.defer_sync);
dev->links.status = DL_DEV_NO_DRIVER;
+#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
+ defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
+ defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
+ dev->dma_coherent = dma_default_coherent;
+#endif
}
EXPORT_SYMBOL_GPL(device_initialize);
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index eac184e6d657..653c8c6ac7a7 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -162,7 +162,7 @@ static int dev_mkdir(const char *name, umode_t mode)
if (IS_ERR(dentry))
return PTR_ERR(dentry);
- err = vfs_mkdir(d_inode(path.dentry), dentry, mode);
+ err = vfs_mkdir(&init_user_ns, d_inode(path.dentry), dentry, mode);
if (!err)
/* mark as kernel-created inode */
d_inode(dentry)->i_private = &thread;
@@ -212,7 +212,8 @@ static int handle_create(const char *nodename, umode_t mode, kuid_t uid,
if (IS_ERR(dentry))
return PTR_ERR(dentry);
- err = vfs_mknod(d_inode(path.dentry), dentry, mode, dev->devt);
+ err = vfs_mknod(&init_user_ns, d_inode(path.dentry), dentry, mode,
+ dev->devt);
if (!err) {
struct iattr newattrs;
@@ -221,7 +222,7 @@ static int handle_create(const char *nodename, umode_t mode, kuid_t uid,
newattrs.ia_gid = gid;
newattrs.ia_valid = ATTR_MODE|ATTR_UID|ATTR_GID;
inode_lock(d_inode(dentry));
- notify_change(dentry, &newattrs, NULL);
+ notify_change(&init_user_ns, dentry, &newattrs, NULL);
inode_unlock(d_inode(dentry));
/* mark as kernel-created inode */
@@ -242,7 +243,8 @@ static int dev_rmdir(const char *name)
return PTR_ERR(dentry);
if (d_really_is_positive(dentry)) {
if (d_inode(dentry)->i_private == &thread)
- err = vfs_rmdir(d_inode(parent.dentry), dentry);
+ err = vfs_rmdir(&init_user_ns, d_inode(parent.dentry),
+ dentry);
else
err = -EPERM;
} else {
@@ -328,9 +330,10 @@ static int handle_remove(const char *nodename, struct device *dev)
newattrs.ia_valid =
ATTR_UID|ATTR_GID|ATTR_MODE;
inode_lock(d_inode(dentry));
- notify_change(dentry, &newattrs, NULL);
+ notify_change(&init_user_ns, dentry, &newattrs, NULL);
inode_unlock(d_inode(dentry));
- err = vfs_unlink(d_inode(parent.dentry), dentry, NULL);
+ err = vfs_unlink(&init_user_ns, d_inode(parent.dentry),
+ dentry, NULL);
if (!err || err == -ENOENT)
deleted = 1;
}
diff --git a/drivers/base/init.c b/drivers/base/init.c
index 908e6520e804..a9f57c22fb9e 100644
--- a/drivers/base/init.c
+++ b/drivers/base/init.c
@@ -32,6 +32,7 @@ void __init driver_init(void)
*/
of_core_init();
platform_bus_init();
+ auxiliary_bus_init();
cpu_dev_init();
memory_dev_init();
container_dev_init();
diff --git a/drivers/base/isa.c b/drivers/base/isa.c
index 2772f5d1948a..aa4737667026 100644
--- a/drivers/base/isa.c
+++ b/drivers/base/isa.c
@@ -51,7 +51,7 @@ static int isa_bus_remove(struct device *dev)
struct isa_driver *isa_driver = dev->platform_data;
if (isa_driver && isa_driver->remove)
- return isa_driver->remove(dev, to_isa_dev(dev)->id);
+ isa_driver->remove(dev, to_isa_dev(dev)->id);
return 0;
}
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index eef4ffb6122c..f35298425575 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -35,7 +35,7 @@ static const char *const online_type_to_str[] = {
[MMOP_ONLINE_MOVABLE] = "online_movable",
};
-int memhp_online_type_from_str(const char *str)
+int mhp_online_type_from_str(const char *str)
{
int i;
@@ -253,7 +253,7 @@ static int memory_subsys_offline(struct device *dev)
static ssize_t state_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- const int online_type = memhp_online_type_from_str(buf);
+ const int online_type = mhp_online_type_from_str(buf);
struct memory_block *mem = to_memory_block(dev);
int ret;
@@ -290,20 +290,20 @@ static ssize_t state_store(struct device *dev, struct device_attribute *attr,
}
/*
- * phys_device is a bad name for this. What I really want
- * is a way to differentiate between memory ranges that
- * are part of physical devices that constitute
- * a complete removable unit or fru.
- * i.e. do these ranges belong to the same physical device,
- * s.t. if I offline all of these sections I can then
- * remove the physical device?
+ * Legacy interface that we cannot remove: s390x exposes the storage increment
+ * covered by a memory block, allowing for identifying which memory blocks
+ * comprise a storage increment. Since a memory block spans complete
+ * storage increments nowadays, this interface is basically unused. Other
+ * archs never exposed != 0.
*/
static ssize_t phys_device_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct memory_block *mem = to_memory_block(dev);
+ unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
- return sysfs_emit(buf, "%d\n", mem->phys_device);
+ return sysfs_emit(buf, "%d\n",
+ arch_get_memory_phys_device(start_pfn));
}
#ifdef CONFIG_MEMORY_HOTREMOVE
@@ -387,19 +387,19 @@ static ssize_t auto_online_blocks_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%s\n",
- online_type_to_str[memhp_default_online_type]);
+ online_type_to_str[mhp_default_online_type]);
}
static ssize_t auto_online_blocks_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- const int online_type = memhp_online_type_from_str(buf);
+ const int online_type = mhp_online_type_from_str(buf);
if (online_type < 0)
return -EINVAL;
- memhp_default_online_type = online_type;
+ mhp_default_online_type = online_type;
return count;
}
@@ -488,11 +488,7 @@ static DEVICE_ATTR_WO(soft_offline_page);
static DEVICE_ATTR_WO(hard_offline_page);
#endif
-/*
- * Note that phys_device is optional. It is here to allow for
- * differentiation between which *physical* devices each
- * section belongs to...
- */
+/* See phys_device_show(). */
int __weak arch_get_memory_phys_device(unsigned long start_pfn)
{
return 0;
@@ -574,7 +570,6 @@ int register_memory(struct memory_block *memory)
static int init_memory_block(unsigned long block_id, unsigned long state)
{
struct memory_block *mem;
- unsigned long start_pfn;
int ret = 0;
mem = find_memory_block_by_id(block_id);
@@ -588,8 +583,6 @@ static int init_memory_block(unsigned long block_id, unsigned long state)
mem->start_section_nr = block_id * sections_per_block;
mem->state = state;
- start_pfn = section_nr_to_pfn(mem->start_section_nr);
- mem->phys_device = arch_get_memory_phys_device(start_pfn);
mem->nid = NUMA_NO_NODE;
ret = register_memory(mem);
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 04f71c7bc3f8..f449dbb2c746 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -372,14 +372,19 @@ static ssize_t node_read_meminfo(struct device *dev,
struct pglist_data *pgdat = NODE_DATA(nid);
struct sysinfo i;
unsigned long sreclaimable, sunreclaimable;
+ unsigned long swapcached = 0;
si_meminfo_node(&i, nid);
sreclaimable = node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B);
sunreclaimable = node_page_state_pages(pgdat, NR_SLAB_UNRECLAIMABLE_B);
+#ifdef CONFIG_SWAP
+ swapcached = node_page_state_pages(pgdat, NR_SWAPCACHE);
+#endif
len = sysfs_emit_at(buf, len,
"Node %d MemTotal: %8lu kB\n"
"Node %d MemFree: %8lu kB\n"
"Node %d MemUsed: %8lu kB\n"
+ "Node %d SwapCached: %8lu kB\n"
"Node %d Active: %8lu kB\n"
"Node %d Inactive: %8lu kB\n"
"Node %d Active(anon): %8lu kB\n"
@@ -391,6 +396,7 @@ static ssize_t node_read_meminfo(struct device *dev,
nid, K(i.totalram),
nid, K(i.freeram),
nid, K(i.totalram - i.freeram),
+ nid, K(swapcached),
nid, K(node_page_state(pgdat, NR_ACTIVE_ANON) +
node_page_state(pgdat, NR_ACTIVE_FILE)),
nid, K(node_page_state(pgdat, NR_INACTIVE_ANON) +
@@ -461,16 +467,11 @@ static ssize_t node_read_meminfo(struct device *dev,
nid, K(sunreclaimable)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
,
- nid, K(node_page_state(pgdat, NR_ANON_THPS) *
- HPAGE_PMD_NR),
- nid, K(node_page_state(pgdat, NR_SHMEM_THPS) *
- HPAGE_PMD_NR),
- nid, K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED) *
- HPAGE_PMD_NR),
- nid, K(node_page_state(pgdat, NR_FILE_THPS) *
- HPAGE_PMD_NR),
- nid, K(node_page_state(pgdat, NR_FILE_PMDMAPPED) *
- HPAGE_PMD_NR)
+ nid, K(node_page_state(pgdat, NR_ANON_THPS)),
+ nid, K(node_page_state(pgdat, NR_SHMEM_THPS)),
+ nid, K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)),
+ nid, K(node_page_state(pgdat, NR_FILE_THPS)),
+ nid, K(node_page_state(pgdat, NR_FILE_PMDMAPPED))
#endif
);
len += hugetlb_report_node_meminfo(buf, len, nid);
@@ -519,10 +520,14 @@ static ssize_t node_read_vmstat(struct device *dev,
sum_zone_numa_state(nid, i));
#endif
- for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
- len += sysfs_emit_at(buf, len, "%s %lu\n",
- node_stat_name(i),
- node_page_state_pages(pgdat, i));
+ for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
+ unsigned long pages = node_page_state_pages(pgdat, i);
+
+ if (vmstat_item_print_in_thp(i))
+ pages /= HPAGE_PMD_NR;
+ len += sysfs_emit_at(buf, len, "%s %lu\n", node_stat_name(i),
+ pages);
+ }
return len;
}
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 8456d8384ac8..6e1f8e0b661c 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -573,7 +573,7 @@ static void platform_device_release(struct device *dev)
struct platform_object *pa = container_of(dev, struct platform_object,
pdev.dev);
- of_device_node_put(&pa->pdev.dev);
+ of_node_put(pa->pdev.dev.of_node);
kfree(pa->pdev.dev.platform_data);
kfree(pa->pdev.mfd_cell);
kfree(pa->pdev.resource);
@@ -1463,13 +1463,16 @@ static int platform_remove(struct device *_dev)
{
struct platform_driver *drv = to_platform_driver(_dev->driver);
struct platform_device *dev = to_platform_device(_dev);
- int ret = 0;
- if (drv->remove)
- ret = drv->remove(dev);
+ if (drv->remove) {
+ int ret = drv->remove(dev);
+
+ if (ret)
+ dev_warn(_dev, "remove callback returned a non-zero value. This will be ignored.\n");
+ }
dev_pm_domain_detach(_dev, true);
- return ret;
+ return 0;
}
static void platform_shutdown(struct device *_dev)
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index ced6863a16a5..84d5acb6301b 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -23,6 +23,7 @@
enum pce_status {
PCE_STATUS_NONE = 0,
PCE_STATUS_ACQUIRED,
+ PCE_STATUS_PREPARED,
PCE_STATUS_ENABLED,
PCE_STATUS_ERROR,
};
@@ -32,9 +33,113 @@ struct pm_clock_entry {
char *con_id;
struct clk *clk;
enum pce_status status;
+ bool enabled_when_prepared;
};
/**
+ * pm_clk_list_lock - ensure exclusive access for modifying the PM clock
+ * entry list.
+ * @psd: pm_subsys_data instance corresponding to the PM clock entry list
+ * and clk_op_might_sleep count to be modified.
+ *
+ * Get exclusive access before modifying the PM clock entry list and the
+ * clock_op_might_sleep count to guard against concurrent modifications.
+ * This also protects against a concurrent clock_op_might_sleep and PM clock
+ * entry list usage in pm_clk_suspend()/pm_clk_resume() that may or may not
+ * happen in atomic context, hence both the mutex and the spinlock must be
+ * taken here.
+ */
+static void pm_clk_list_lock(struct pm_subsys_data *psd)
+ __acquires(&psd->lock)
+{
+ mutex_lock(&psd->clock_mutex);
+ spin_lock_irq(&psd->lock);
+}
+
+/**
+ * pm_clk_list_unlock - counterpart to pm_clk_list_lock().
+ * @psd: the same pm_subsys_data instance previously passed to
+ * pm_clk_list_lock().
+ */
+static void pm_clk_list_unlock(struct pm_subsys_data *psd)
+ __releases(&psd->lock)
+{
+ spin_unlock_irq(&psd->lock);
+ mutex_unlock(&psd->clock_mutex);
+}
+
+/**
+ * pm_clk_op_lock - ensure exclusive access for performing clock operations.
+ * @psd: pm_subsys_data instance corresponding to the PM clock entry list
+ * and clk_op_might_sleep count being used.
+ * @flags: stored irq flags.
+ * @fn: string for the caller function's name.
+ *
+ * This is used by pm_clk_suspend() and pm_clk_resume() to guard
+ * against concurrent modifications to the clock entry list and the
+ * clock_op_might_sleep count. If clock_op_might_sleep is != 0 then
+ * only the mutex can be locked and those functions can only be used in
+ * non atomic context. If clock_op_might_sleep == 0 then these functions
+ * may be used in any context and only the spinlock can be locked.
+ * Returns -EINVAL if called in atomic context when clock ops might sleep.
+ */
+static int pm_clk_op_lock(struct pm_subsys_data *psd, unsigned long *flags,
+ const char *fn)
+ /* sparse annotations don't work here as exit state isn't static */
+{
+ bool atomic_context = in_atomic() || irqs_disabled();
+
+try_again:
+ spin_lock_irqsave(&psd->lock, *flags);
+ if (!psd->clock_op_might_sleep) {
+ /* the __release is there to work around sparse limitations */
+ __release(&psd->lock);
+ return 0;
+ }
+
+ /* bail out if in atomic context */
+ if (atomic_context) {
+ pr_err("%s: atomic context with clock_ops_might_sleep = %d",
+ fn, psd->clock_op_might_sleep);
+ spin_unlock_irqrestore(&psd->lock, *flags);
+ might_sleep();
+ return -EPERM;
+ }
+
+ /* we must switch to the mutex */
+ spin_unlock_irqrestore(&psd->lock, *flags);
+ mutex_lock(&psd->clock_mutex);
+
+ /*
+ * There was a possibility for psd->clock_op_might_sleep
+ * to become 0 above. Keep the mutex only if not the case.
+ */
+ if (likely(psd->clock_op_might_sleep))
+ return 0;
+
+ mutex_unlock(&psd->clock_mutex);
+ goto try_again;
+}
+
+/**
+ * pm_clk_op_unlock - counterpart to pm_clk_op_lock().
+ * @psd: the same pm_subsys_data instance previously passed to
+ * pm_clk_op_lock().
+ * @flags: irq flags provided by pm_clk_op_lock().
+ */
+static void pm_clk_op_unlock(struct pm_subsys_data *psd, unsigned long *flags)
+ /* sparse annotations don't work here as entry state isn't static */
+{
+ if (psd->clock_op_might_sleep) {
+ mutex_unlock(&psd->clock_mutex);
+ } else {
+ /* the __acquire is there to work around sparse limitations */
+ __acquire(&psd->lock);
+ spin_unlock_irqrestore(&psd->lock, *flags);
+ }
+}
+
+/**
* pm_clk_enable - Enable a clock, reporting any errors
* @dev: The device for the given clock
* @ce: PM clock entry corresponding to the clock.
@@ -43,14 +148,21 @@ static inline void __pm_clk_enable(struct device *dev, struct pm_clock_entry *ce
{
int ret;
- if (ce->status < PCE_STATUS_ERROR) {
+ switch (ce->status) {
+ case PCE_STATUS_ACQUIRED:
+ ret = clk_prepare_enable(ce->clk);
+ break;
+ case PCE_STATUS_PREPARED:
ret = clk_enable(ce->clk);
- if (!ret)
- ce->status = PCE_STATUS_ENABLED;
- else
- dev_err(dev, "%s: failed to enable clk %p, error %d\n",
- __func__, ce->clk, ret);
+ break;
+ default:
+ return;
}
+ if (!ret)
+ ce->status = PCE_STATUS_ENABLED;
+ else
+ dev_err(dev, "%s: failed to enable clk %p, error %d\n",
+ __func__, ce->clk, ret);
}
/**
@@ -64,17 +176,20 @@ static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce)
ce->clk = clk_get(dev, ce->con_id);
if (IS_ERR(ce->clk)) {
ce->status = PCE_STATUS_ERROR;
+ return;
+ } else if (clk_is_enabled_when_prepared(ce->clk)) {
+ /* we defer preparing the clock in that case */
+ ce->status = PCE_STATUS_ACQUIRED;
+ ce->enabled_when_prepared = true;
+ } else if (clk_prepare(ce->clk)) {
+ ce->status = PCE_STATUS_ERROR;
+ dev_err(dev, "clk_prepare() failed\n");
+ return;
} else {
- if (clk_prepare(ce->clk)) {
- ce->status = PCE_STATUS_ERROR;
- dev_err(dev, "clk_prepare() failed\n");
- } else {
- ce->status = PCE_STATUS_ACQUIRED;
- dev_dbg(dev,
- "Clock %pC con_id %s managed by runtime PM.\n",
- ce->clk, ce->con_id);
- }
+ ce->status = PCE_STATUS_PREPARED;
}
+ dev_dbg(dev, "Clock %pC con_id %s managed by runtime PM.\n",
+ ce->clk, ce->con_id);
}
static int __pm_clk_add(struct device *dev, const char *con_id,
@@ -106,9 +221,11 @@ static int __pm_clk_add(struct device *dev, const char *con_id,
pm_clk_acquire(dev, ce);
- spin_lock_irq(&psd->lock);
+ pm_clk_list_lock(psd);
list_add_tail(&ce->node, &psd->clock_list);
- spin_unlock_irq(&psd->lock);
+ if (ce->enabled_when_prepared)
+ psd->clock_op_might_sleep++;
+ pm_clk_list_unlock(psd);
return 0;
}
@@ -239,14 +356,20 @@ static void __pm_clk_remove(struct pm_clock_entry *ce)
if (!ce)
return;
- if (ce->status < PCE_STATUS_ERROR) {
- if (ce->status == PCE_STATUS_ENABLED)
- clk_disable(ce->clk);
-
- if (ce->status >= PCE_STATUS_ACQUIRED) {
- clk_unprepare(ce->clk);
+ switch (ce->status) {
+ case PCE_STATUS_ENABLED:
+ clk_disable(ce->clk);
+ fallthrough;
+ case PCE_STATUS_PREPARED:
+ clk_unprepare(ce->clk);
+ fallthrough;
+ case PCE_STATUS_ACQUIRED:
+ case PCE_STATUS_ERROR:
+ if (!IS_ERR(ce->clk))
clk_put(ce->clk);
- }
+ break;
+ default:
+ break;
}
kfree(ce->con_id);
@@ -269,7 +392,7 @@ void pm_clk_remove(struct device *dev, const char *con_id)
if (!psd)
return;
- spin_lock_irq(&psd->lock);
+ pm_clk_list_lock(psd);
list_for_each_entry(ce, &psd->clock_list, node) {
if (!con_id && !ce->con_id)
@@ -280,12 +403,14 @@ void pm_clk_remove(struct device *dev, const char *con_id)
goto remove;
}
- spin_unlock_irq(&psd->lock);
+ pm_clk_list_unlock(psd);
return;
remove:
list_del(&ce->node);
- spin_unlock_irq(&psd->lock);
+ if (ce->enabled_when_prepared)
+ psd->clock_op_might_sleep--;
+ pm_clk_list_unlock(psd);
__pm_clk_remove(ce);
}
@@ -307,19 +432,21 @@ void pm_clk_remove_clk(struct device *dev, struct clk *clk)
if (!psd || !clk)
return;
- spin_lock_irq(&psd->lock);
+ pm_clk_list_lock(psd);
list_for_each_entry(ce, &psd->clock_list, node) {
if (clk == ce->clk)
goto remove;
}
- spin_unlock_irq(&psd->lock);
+ pm_clk_list_unlock(psd);
return;
remove:
list_del(&ce->node);
- spin_unlock_irq(&psd->lock);
+ if (ce->enabled_when_prepared)
+ psd->clock_op_might_sleep--;
+ pm_clk_list_unlock(psd);
__pm_clk_remove(ce);
}
@@ -330,13 +457,16 @@ EXPORT_SYMBOL_GPL(pm_clk_remove_clk);
* @dev: Device to initialize the list of PM clocks for.
*
* Initialize the lock and clock_list members of the device's pm_subsys_data
- * object.
+ * object, set the count of clocks that might sleep to 0.
*/
void pm_clk_init(struct device *dev)
{
struct pm_subsys_data *psd = dev_to_psd(dev);
- if (psd)
+ if (psd) {
INIT_LIST_HEAD(&psd->clock_list);
+ mutex_init(&psd->clock_mutex);
+ psd->clock_op_might_sleep = 0;
+ }
}
EXPORT_SYMBOL_GPL(pm_clk_init);
@@ -372,12 +502,13 @@ void pm_clk_destroy(struct device *dev)
INIT_LIST_HEAD(&list);
- spin_lock_irq(&psd->lock);
+ pm_clk_list_lock(psd);
list_for_each_entry_safe_reverse(ce, c, &psd->clock_list, node)
list_move(&ce->node, &list);
+ psd->clock_op_might_sleep = 0;
- spin_unlock_irq(&psd->lock);
+ pm_clk_list_unlock(psd);
dev_pm_put_subsys_data(dev);
@@ -397,23 +528,30 @@ int pm_clk_suspend(struct device *dev)
struct pm_subsys_data *psd = dev_to_psd(dev);
struct pm_clock_entry *ce;
unsigned long flags;
+ int ret;
dev_dbg(dev, "%s()\n", __func__);
if (!psd)
return 0;
- spin_lock_irqsave(&psd->lock, flags);
+ ret = pm_clk_op_lock(psd, &flags, __func__);
+ if (ret)
+ return ret;
list_for_each_entry_reverse(ce, &psd->clock_list, node) {
- if (ce->status < PCE_STATUS_ERROR) {
- if (ce->status == PCE_STATUS_ENABLED)
+ if (ce->status == PCE_STATUS_ENABLED) {
+ if (ce->enabled_when_prepared) {
+ clk_disable_unprepare(ce->clk);
+ ce->status = PCE_STATUS_ACQUIRED;
+ } else {
clk_disable(ce->clk);
- ce->status = PCE_STATUS_ACQUIRED;
+ ce->status = PCE_STATUS_PREPARED;
+ }
}
}
- spin_unlock_irqrestore(&psd->lock, flags);
+ pm_clk_op_unlock(psd, &flags);
return 0;
}
@@ -428,18 +566,21 @@ int pm_clk_resume(struct device *dev)
struct pm_subsys_data *psd = dev_to_psd(dev);
struct pm_clock_entry *ce;
unsigned long flags;
+ int ret;
dev_dbg(dev, "%s()\n", __func__);
if (!psd)
return 0;
- spin_lock_irqsave(&psd->lock, flags);
+ ret = pm_clk_op_lock(psd, &flags, __func__);
+ if (ret)
+ return ret;
list_for_each_entry(ce, &psd->clock_list, node)
__pm_clk_enable(dev, ce);
- spin_unlock_irqrestore(&psd->lock, flags);
+ pm_clk_op_unlock(psd, &flags);
return 0;
}
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 9a14eedacb92..78c310d3179d 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -297,6 +297,18 @@ static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
return state;
}
+static int genpd_xlate_performance_state(struct generic_pm_domain *genpd,
+ struct generic_pm_domain *parent,
+ unsigned int pstate)
+{
+ if (!parent->set_performance_state)
+ return pstate;
+
+ return dev_pm_opp_xlate_performance_state(genpd->opp_table,
+ parent->opp_table,
+ pstate);
+}
+
static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
unsigned int state, int depth)
{
@@ -311,13 +323,8 @@ static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
list_for_each_entry(link, &genpd->child_links, child_node) {
parent = link->parent;
- if (!parent->set_performance_state)
- continue;
-
/* Find parent's performance state */
- ret = dev_pm_opp_xlate_performance_state(genpd->opp_table,
- parent->opp_table,
- state);
+ ret = genpd_xlate_performance_state(genpd, parent, state);
if (unlikely(ret < 0))
goto err;
@@ -339,9 +346,11 @@ static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
goto err;
}
- ret = genpd->set_performance_state(genpd, state);
- if (ret)
- goto err;
+ if (genpd->set_performance_state) {
+ ret = genpd->set_performance_state(genpd, state);
+ if (ret)
+ goto err;
+ }
genpd->performance_state = state;
return 0;
@@ -352,9 +361,6 @@ err:
child_node) {
parent = link->parent;
- if (!parent->set_performance_state)
- continue;
-
genpd_lock_nested(parent, depth + 1);
parent_state = link->prev_performance_state;
@@ -399,9 +405,6 @@ int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
if (!genpd)
return -ENODEV;
- if (unlikely(!genpd->set_performance_state))
- return -EINVAL;
-
if (WARN_ON(!dev->power.subsys_data ||
!dev->power.subsys_data->domain_data))
return -EINVAL;
@@ -423,6 +426,35 @@ int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
}
EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state);
+/**
+ * dev_pm_genpd_set_next_wakeup - Notify PM framework of an impending wakeup.
+ *
+ * @dev: Device to handle
+ * @next: impending interrupt/wakeup for the device
+ *
+ *
+ * Allow devices to inform of the next wakeup. It's assumed that the users
+ * guarantee that the genpd wouldn't be detached while this routine is getting
+ * called. Additionally, it's also assumed that @dev isn't runtime suspended
+ * (RPM_SUSPENDED)."
+ * Although devices are expected to update the next_wakeup after the end of
+ * their usecase as well, it is possible the devices themselves may not know
+ * about that, so stale @next will be ignored when powering off the domain.
+ */
+void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next)
+{
+ struct generic_pm_domain_data *gpd_data;
+ struct generic_pm_domain *genpd;
+
+ genpd = dev_to_genpd_safe(dev);
+ if (!genpd)
+ return;
+
+ gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
+ gpd_data->next_wakeup = next;
+}
+EXPORT_SYMBOL_GPL(dev_pm_genpd_set_next_wakeup);
+
static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
{
unsigned int state_idx = genpd->state_idx;
@@ -934,8 +966,7 @@ static int genpd_runtime_resume(struct device *dev)
err_stop:
genpd_stop_dev(genpd, dev);
err_poweroff:
- if (!pm_runtime_is_irq_safe(dev) ||
- (pm_runtime_is_irq_safe(dev) && genpd_is_irq_safe(genpd))) {
+ if (!pm_runtime_is_irq_safe(dev) || genpd_is_irq_safe(genpd)) {
genpd_lock(genpd);
genpd_power_off(genpd, true, 0);
genpd_unlock(genpd);
@@ -1465,6 +1496,7 @@ static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev)
gpd_data->td.constraint_changed = true;
gpd_data->td.effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
+ gpd_data->next_wakeup = KTIME_MAX;
spin_lock_irq(&dev->power.lock);
@@ -2164,6 +2196,7 @@ static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
cp->node = of_node_get(np);
cp->data = data;
cp->xlate = xlate;
+ fwnode_dev_initialized(&np->fwnode, true);
mutex_lock(&of_genpd_mutex);
list_add(&cp->link, &of_genpd_providers);
@@ -2353,6 +2386,7 @@ void of_genpd_del_provider(struct device_node *np)
}
}
+ fwnode_dev_initialized(&cp->node->fwnode, false);
list_del(&cp->link);
of_node_put(cp->node);
kfree(cp);
@@ -2463,7 +2497,7 @@ int of_genpd_add_subdomain(struct of_phandle_args *parent_spec,
out:
mutex_unlock(&gpd_list_lock);
- return ret;
+ return ret == -ENOENT ? -EPROBE_DEFER : ret;
}
EXPORT_SYMBOL_GPL(of_genpd_add_subdomain);
@@ -2952,7 +2986,15 @@ static void rtpm_status_str(struct seq_file *s, struct device *dev)
else
WARN_ON(1);
- seq_puts(s, p);
+ seq_printf(s, "%-25s ", p);
+}
+
+static void perf_status_str(struct seq_file *s, struct device *dev)
+{
+ struct generic_pm_domain_data *gpd_data;
+
+ gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
+ seq_put_decimal_ull(s, "", gpd_data->performance_state);
}
static int genpd_summary_one(struct seq_file *s,
@@ -2980,7 +3022,7 @@ static int genpd_summary_one(struct seq_file *s,
else
snprintf(state, sizeof(state), "%s",
status_lookup[genpd->status]);
- seq_printf(s, "%-30s %-15s ", genpd->name, state);
+ seq_printf(s, "%-30s %-50s %u", genpd->name, state, genpd->performance_state);
/*
* Modifications on the list require holding locks on both
@@ -2988,6 +3030,8 @@ static int genpd_summary_one(struct seq_file *s,
* Also genpd->name is immutable.
*/
list_for_each_entry(link, &genpd->parent_links, parent_node) {
+ if (list_is_first(&link->parent_node, &genpd->parent_links))
+ seq_printf(s, "\n%48s", " ");
seq_printf(s, "%s", link->child->name);
if (!list_is_last(&link->parent_node, &genpd->parent_links))
seq_puts(s, ", ");
@@ -3002,6 +3046,7 @@ static int genpd_summary_one(struct seq_file *s,
seq_printf(s, "\n %-50s ", kobj_path);
rtpm_status_str(s, pm_data->dev);
+ perf_status_str(s, pm_data->dev);
kfree(kobj_path);
}
@@ -3017,9 +3062,9 @@ static int summary_show(struct seq_file *s, void *data)
struct generic_pm_domain *genpd;
int ret = 0;
- seq_puts(s, "domain status children\n");
+ seq_puts(s, "domain status children performance\n");
seq_puts(s, " /device runtime status\n");
- seq_puts(s, "----------------------------------------------------------------------\n");
+ seq_puts(s, "----------------------------------------------------------------------------------------------\n");
ret = mutex_lock_interruptible(&gpd_list_lock);
if (ret)
diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c
index 490ed7deb99a..c6c218758f0b 100644
--- a/drivers/base/power/domain_governor.c
+++ b/drivers/base/power/domain_governor.c
@@ -117,6 +117,55 @@ static bool default_suspend_ok(struct device *dev)
return td->cached_suspend_ok;
}
+static void update_domain_next_wakeup(struct generic_pm_domain *genpd, ktime_t now)
+{
+ ktime_t domain_wakeup = KTIME_MAX;
+ ktime_t next_wakeup;
+ struct pm_domain_data *pdd;
+ struct gpd_link *link;
+
+ if (!(genpd->flags & GENPD_FLAG_MIN_RESIDENCY))
+ return;
+
+ /*
+ * Devices that have a predictable wakeup pattern, may specify
+ * their next wakeup. Let's find the next wakeup from all the
+ * devices attached to this domain and from all the sub-domains.
+ * It is possible that component's a next wakeup may have become
+ * stale when we read that here. We will ignore to ensure the domain
+ * is able to enter its optimal idle state.
+ */
+ list_for_each_entry(pdd, &genpd->dev_list, list_node) {
+ next_wakeup = to_gpd_data(pdd)->next_wakeup;
+ if (next_wakeup != KTIME_MAX && !ktime_before(next_wakeup, now))
+ if (ktime_before(next_wakeup, domain_wakeup))
+ domain_wakeup = next_wakeup;
+ }
+
+ list_for_each_entry(link, &genpd->parent_links, parent_node) {
+ next_wakeup = link->child->next_wakeup;
+ if (next_wakeup != KTIME_MAX && !ktime_before(next_wakeup, now))
+ if (ktime_before(next_wakeup, domain_wakeup))
+ domain_wakeup = next_wakeup;
+ }
+
+ genpd->next_wakeup = domain_wakeup;
+}
+
+static bool next_wakeup_allows_state(struct generic_pm_domain *genpd,
+ unsigned int state, ktime_t now)
+{
+ ktime_t domain_wakeup = genpd->next_wakeup;
+ s64 idle_time_ns, min_sleep_ns;
+
+ min_sleep_ns = genpd->states[state].power_off_latency_ns +
+ genpd->states[state].residency_ns;
+
+ idle_time_ns = ktime_to_ns(ktime_sub(domain_wakeup, now));
+
+ return idle_time_ns >= min_sleep_ns;
+}
+
static bool __default_power_down_ok(struct dev_pm_domain *pd,
unsigned int state)
{
@@ -201,16 +250,41 @@ static bool __default_power_down_ok(struct dev_pm_domain *pd,
}
/**
- * default_power_down_ok - Default generic PM domain power off governor routine.
+ * _default_power_down_ok - Default generic PM domain power off governor routine.
* @pd: PM domain to check.
*
* This routine must be executed under the PM domain's lock.
*/
-static bool default_power_down_ok(struct dev_pm_domain *pd)
+static bool _default_power_down_ok(struct dev_pm_domain *pd, ktime_t now)
{
struct generic_pm_domain *genpd = pd_to_genpd(pd);
+ int state_idx = genpd->state_count - 1;
struct gpd_link *link;
+ /*
+ * Find the next wakeup from devices that can determine their own wakeup
+ * to find when the domain would wakeup and do it for every device down
+ * the hierarchy. It is not worth while to sleep if the state's residency
+ * cannot be met.
+ */
+ update_domain_next_wakeup(genpd, now);
+ if ((genpd->flags & GENPD_FLAG_MIN_RESIDENCY) && (genpd->next_wakeup != KTIME_MAX)) {
+ /* Let's find out the deepest domain idle state, the devices prefer */
+ while (state_idx >= 0) {
+ if (next_wakeup_allows_state(genpd, state_idx, now)) {
+ genpd->max_off_time_changed = true;
+ break;
+ }
+ state_idx--;
+ }
+
+ if (state_idx < 0) {
+ state_idx = 0;
+ genpd->cached_power_down_ok = false;
+ goto done;
+ }
+ }
+
if (!genpd->max_off_time_changed) {
genpd->state_idx = genpd->cached_power_down_state_idx;
return genpd->cached_power_down_ok;
@@ -228,21 +302,30 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
genpd->max_off_time_ns = -1;
genpd->max_off_time_changed = false;
genpd->cached_power_down_ok = true;
- genpd->state_idx = genpd->state_count - 1;
- /* Find a state to power down to, starting from the deepest. */
- while (!__default_power_down_ok(pd, genpd->state_idx)) {
- if (genpd->state_idx == 0) {
+ /*
+ * Find a state to power down to, starting from the state
+ * determined by the next wakeup.
+ */
+ while (!__default_power_down_ok(pd, state_idx)) {
+ if (state_idx == 0) {
genpd->cached_power_down_ok = false;
break;
}
- genpd->state_idx--;
+ state_idx--;
}
+done:
+ genpd->state_idx = state_idx;
genpd->cached_power_down_state_idx = genpd->state_idx;
return genpd->cached_power_down_ok;
}
+static bool default_power_down_ok(struct dev_pm_domain *pd)
+{
+ return _default_power_down_ok(pd, ktime_get());
+}
+
static bool always_on_power_down_ok(struct dev_pm_domain *domain)
{
return false;
@@ -254,11 +337,12 @@ static bool cpu_power_down_ok(struct dev_pm_domain *pd)
struct generic_pm_domain *genpd = pd_to_genpd(pd);
struct cpuidle_device *dev;
ktime_t domain_wakeup, next_hrtimer;
+ ktime_t now = ktime_get();
s64 idle_duration_ns;
int cpu, i;
/* Validate dev PM QoS constraints. */
- if (!default_power_down_ok(pd))
+ if (!_default_power_down_ok(pd, now))
return false;
if (!(genpd->flags & GENPD_FLAG_CPU_DOMAIN))
@@ -280,7 +364,7 @@ static bool cpu_power_down_ok(struct dev_pm_domain *pd)
}
/* The minimum idle duration is from now - until the next wakeup. */
- idle_duration_ns = ktime_to_ns(ktime_sub(domain_wakeup, ktime_get()));
+ idle_duration_ns = ktime_to_ns(ktime_sub(domain_wakeup, now));
if (idle_duration_ns <= 0)
return false;
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 46793276598d..f893c3c5af07 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -16,6 +16,7 @@
*/
#define pr_fmt(fmt) "PM: " fmt
+#define dev_fmt pr_fmt
#include <linux/device.h>
#include <linux/export.h>
@@ -449,8 +450,8 @@ static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
int error)
{
- pr_err("Device %s failed to %s%s: error %d\n",
- dev_name(dev), pm_verb(state.event), info, error);
+ dev_err(dev, "failed to %s%s: error %d\n", pm_verb(state.event), info,
+ error);
}
static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
@@ -1897,8 +1898,8 @@ int dpm_prepare(pm_message_t state)
error = 0;
continue;
}
- pr_info("Device %s not prepared for power transition: code %d\n",
- dev_name(dev), error);
+ dev_info(dev, "not prepared for power transition: code %d\n",
+ error);
put_device(dev);
break;
}
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index bfda153b1a41..18b82427d0cb 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -325,22 +325,22 @@ static void rpm_put_suppliers(struct device *dev)
static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
__releases(&dev->power.lock) __acquires(&dev->power.lock)
{
- int retval, idx;
bool use_links = dev->power.links_count > 0;
+ bool get = false;
+ int retval, idx;
+ bool put;
if (dev->power.irq_safe) {
spin_unlock(&dev->power.lock);
+ } else if (!use_links) {
+ spin_unlock_irq(&dev->power.lock);
} else {
+ get = dev->power.runtime_status == RPM_RESUMING;
+
spin_unlock_irq(&dev->power.lock);
- /*
- * Resume suppliers if necessary.
- *
- * The device's runtime PM status cannot change until this
- * routine returns, so it is safe to read the status outside of
- * the lock.
- */
- if (use_links && dev->power.runtime_status == RPM_RESUMING) {
+ /* Resume suppliers if necessary. */
+ if (get) {
idx = device_links_read_lock();
retval = rpm_get_suppliers(dev);
@@ -355,24 +355,36 @@ static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
if (dev->power.irq_safe) {
spin_lock(&dev->power.lock);
- } else {
- /*
- * If the device is suspending and the callback has returned
- * success, drop the usage counters of the suppliers that have
- * been reference counted on its resume.
- *
- * Do that if resume fails too.
- */
- if (use_links
- && ((dev->power.runtime_status == RPM_SUSPENDING && !retval)
- || (dev->power.runtime_status == RPM_RESUMING && retval))) {
- idx = device_links_read_lock();
+ return retval;
+ }
- fail:
- rpm_put_suppliers(dev);
+ spin_lock_irq(&dev->power.lock);
- device_links_read_unlock(idx);
- }
+ if (!use_links)
+ return retval;
+
+ /*
+ * If the device is suspending and the callback has returned success,
+ * drop the usage counters of the suppliers that have been reference
+ * counted on its resume.
+ *
+ * Do that if the resume fails too.
+ */
+ put = dev->power.runtime_status == RPM_SUSPENDING && !retval;
+ if (put)
+ __update_runtime_status(dev, RPM_SUSPENDED);
+ else
+ put = get && retval;
+
+ if (put) {
+ spin_unlock_irq(&dev->power.lock);
+
+ idx = device_links_read_lock();
+
+fail:
+ rpm_put_suppliers(dev);
+
+ device_links_read_unlock(idx);
spin_lock_irq(&dev->power.lock);
}
@@ -1100,7 +1112,7 @@ EXPORT_SYMBOL_GPL(__pm_runtime_resume);
* suspending the device when both its runtime PM status is %RPM_ACTIVE and its
* runtime PM usage counter is not zero.
*
- * The caller is resposible for decrementing the runtime PM usage counter of
+ * The caller is responsible for decrementing the runtime PM usage counter of
* @dev after this function has returned a positive value for it.
*/
int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count)
diff --git a/drivers/base/property.c b/drivers/base/property.c
index 35b95c6ac0c6..1421e9548857 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -837,9 +837,15 @@ EXPORT_SYMBOL_GPL(fwnode_handle_put);
/**
* fwnode_device_is_available - check if a device is available for use
* @fwnode: Pointer to the fwnode of the device.
+ *
+ * For fwnode node types that don't implement the .device_is_available()
+ * operation, this function returns true.
*/
bool fwnode_device_is_available(const struct fwnode_handle *fwnode)
{
+ if (!fwnode_has_op(fwnode, device_is_available))
+ return true;
+
return fwnode_call_bool_op(fwnode, device_is_available);
}
EXPORT_SYMBOL_GPL(fwnode_device_is_available);
@@ -1209,7 +1215,14 @@ fwnode_graph_get_endpoint_by_id(const struct fwnode_handle *fwnode,
best_ep_id = fwnode_ep.id;
}
- return best_ep;
+ if (best_ep)
+ return best_ep;
+
+ if (fwnode && !IS_ERR_OR_NULL(fwnode->secondary))
+ return fwnode_graph_get_endpoint_by_id(fwnode->secondary, port,
+ endpoint, flags);
+
+ return NULL;
}
EXPORT_SYMBOL_GPL(fwnode_graph_get_endpoint_by_id);
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index 7f4b3b62492c..f2469d3435ca 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -68,7 +68,7 @@ static int regcache_hw_init(struct regmap *map)
map->cache_bypass = cache_bypass;
if (ret == 0) {
map->reg_defaults_raw = tmp_buf;
- map->cache_free = 1;
+ map->cache_free = true;
} else {
kfree(tmp_buf);
}
diff --git a/drivers/base/regmap/regmap-sdw-mbq.c b/drivers/base/regmap/regmap-sdw-mbq.c
index 8ce30650b97c..fe3ac26b66ad 100644
--- a/drivers/base/regmap/regmap-sdw-mbq.c
+++ b/drivers/base/regmap/regmap-sdw-mbq.c
@@ -15,11 +15,11 @@ static int regmap_sdw_mbq_write(void *context, unsigned int reg, unsigned int va
struct sdw_slave *slave = dev_to_sdw_dev(dev);
int ret;
- ret = sdw_write(slave, SDW_SDCA_MBQ_CTL(reg), (val >> 8) & 0xff);
+ ret = sdw_write_no_pm(slave, SDW_SDCA_MBQ_CTL(reg), (val >> 8) & 0xff);
if (ret < 0)
return ret;
- return sdw_write(slave, reg, val & 0xff);
+ return sdw_write_no_pm(slave, reg, val & 0xff);
}
static int regmap_sdw_mbq_read(void *context, unsigned int reg, unsigned int *val)
@@ -29,11 +29,11 @@ static int regmap_sdw_mbq_read(void *context, unsigned int reg, unsigned int *va
int read0;
int read1;
- read0 = sdw_read(slave, reg);
+ read0 = sdw_read_no_pm(slave, reg);
if (read0 < 0)
return read0;
- read1 = sdw_read(slave, SDW_SDCA_MBQ_CTL(reg));
+ read1 = sdw_read_no_pm(slave, SDW_SDCA_MBQ_CTL(reg));
if (read1 < 0)
return read1;
@@ -98,4 +98,4 @@ struct regmap *__devm_regmap_init_sdw_mbq(struct sdw_slave *sdw,
EXPORT_SYMBOL_GPL(__devm_regmap_init_sdw_mbq);
MODULE_DESCRIPTION("Regmap SoundWire MBQ Module");
-MODULE_LICENSE("GPL v2");
+MODULE_LICENSE("GPL");
diff --git a/drivers/base/regmap/regmap-sdw.c b/drivers/base/regmap/regmap-sdw.c
index c83be26434e7..966de8a136d9 100644
--- a/drivers/base/regmap/regmap-sdw.c
+++ b/drivers/base/regmap/regmap-sdw.c
@@ -13,7 +13,7 @@ static int regmap_sdw_write(void *context, unsigned int reg, unsigned int val)
struct device *dev = context;
struct sdw_slave *slave = dev_to_sdw_dev(dev);
- return sdw_write(slave, reg, val);
+ return sdw_write_no_pm(slave, reg, val);
}
static int regmap_sdw_read(void *context, unsigned int reg, unsigned int *val)
@@ -22,7 +22,7 @@ static int regmap_sdw_read(void *context, unsigned int reg, unsigned int *val)
struct sdw_slave *slave = dev_to_sdw_dev(dev);
int read;
- read = sdw_read(slave, reg);
+ read = sdw_read_no_pm(slave, reg);
if (read < 0)
return read;
diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
index 4a4b2008fbc2..37179a8b1ceb 100644
--- a/drivers/base/swnode.c
+++ b/drivers/base/swnode.c
@@ -24,6 +24,7 @@ struct swnode {
struct swnode *parent;
unsigned int allocated:1;
+ unsigned int managed:1;
};
static DEFINE_IDA(swnode_root_ids);
@@ -48,6 +49,19 @@ EXPORT_SYMBOL_GPL(is_software_node);
struct swnode, fwnode) : NULL; \
})
+static inline struct swnode *dev_to_swnode(struct device *dev)
+{
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
+
+ if (!fwnode)
+ return NULL;
+
+ if (!is_software_node(fwnode))
+ fwnode = fwnode->secondary;
+
+ return to_swnode(fwnode);
+}
+
static struct swnode *
software_node_to_swnode(const struct software_node *node)
{
@@ -443,14 +457,18 @@ software_node_get_next_child(const struct fwnode_handle *fwnode,
struct swnode *c = to_swnode(child);
if (!p || list_empty(&p->children) ||
- (c && list_is_last(&c->entry, &p->children)))
+ (c && list_is_last(&c->entry, &p->children))) {
+ fwnode_handle_put(child);
return NULL;
+ }
if (c)
c = list_next_entry(c, entry);
else
c = list_first_entry(&p->children, struct swnode, entry);
- return &c->fwnode;
+
+ fwnode_handle_put(child);
+ return fwnode_handle_get(&c->fwnode);
}
static struct fwnode_handle *
@@ -536,6 +554,115 @@ software_node_get_reference_args(const struct fwnode_handle *fwnode,
return 0;
}
+static struct fwnode_handle *
+swnode_graph_find_next_port(const struct fwnode_handle *parent,
+ struct fwnode_handle *port)
+{
+ struct fwnode_handle *old = port;
+
+ while ((port = software_node_get_next_child(parent, old))) {
+ /*
+ * fwnode ports have naming style "port@", so we search for any
+ * children that follow that convention.
+ */
+ if (!strncmp(to_swnode(port)->node->name, "port@",
+ strlen("port@")))
+ return port;
+ old = port;
+ }
+
+ return NULL;
+}
+
+static struct fwnode_handle *
+software_node_graph_get_next_endpoint(const struct fwnode_handle *fwnode,
+ struct fwnode_handle *endpoint)
+{
+ struct swnode *swnode = to_swnode(fwnode);
+ struct fwnode_handle *parent;
+ struct fwnode_handle *port;
+
+ if (!swnode)
+ return NULL;
+
+ if (endpoint) {
+ port = software_node_get_parent(endpoint);
+ parent = software_node_get_parent(port);
+ } else {
+ parent = software_node_get_named_child_node(fwnode, "ports");
+ if (!parent)
+ parent = software_node_get(&swnode->fwnode);
+
+ port = swnode_graph_find_next_port(parent, NULL);
+ }
+
+ for (; port; port = swnode_graph_find_next_port(parent, port)) {
+ endpoint = software_node_get_next_child(port, endpoint);
+ if (endpoint) {
+ fwnode_handle_put(port);
+ break;
+ }
+ }
+
+ fwnode_handle_put(parent);
+
+ return endpoint;
+}
+
+static struct fwnode_handle *
+software_node_graph_get_remote_endpoint(const struct fwnode_handle *fwnode)
+{
+ struct swnode *swnode = to_swnode(fwnode);
+ const struct software_node_ref_args *ref;
+ const struct property_entry *prop;
+
+ if (!swnode)
+ return NULL;
+
+ prop = property_entry_get(swnode->node->properties, "remote-endpoint");
+ if (!prop || prop->type != DEV_PROP_REF || prop->is_inline)
+ return NULL;
+
+ ref = prop->pointer;
+
+ return software_node_get(software_node_fwnode(ref[0].node));
+}
+
+static struct fwnode_handle *
+software_node_graph_get_port_parent(struct fwnode_handle *fwnode)
+{
+ struct swnode *swnode = to_swnode(fwnode);
+
+ swnode = swnode->parent;
+ if (swnode && !strcmp(swnode->node->name, "ports"))
+ swnode = swnode->parent;
+
+ return swnode ? software_node_get(&swnode->fwnode) : NULL;
+}
+
+static int
+software_node_graph_parse_endpoint(const struct fwnode_handle *fwnode,
+ struct fwnode_endpoint *endpoint)
+{
+ struct swnode *swnode = to_swnode(fwnode);
+ const char *parent_name = swnode->parent->node->name;
+ int ret;
+
+ if (strlen("port@") >= strlen(parent_name) ||
+ strncmp(parent_name, "port@", strlen("port@")))
+ return -EINVAL;
+
+ /* Ports have naming style "port@n", we need to select the n */
+ ret = kstrtou32(parent_name + strlen("port@"), 10, &endpoint->port);
+ if (ret)
+ return ret;
+
+ endpoint->id = swnode->id;
+ endpoint->local_fwnode = fwnode;
+
+ return 0;
+}
+
static const struct fwnode_operations software_node_ops = {
.get = software_node_get,
.put = software_node_put,
@@ -547,7 +674,11 @@ static const struct fwnode_operations software_node_ops = {
.get_parent = software_node_get_parent,
.get_next_child_node = software_node_get_next_child,
.get_named_child_node = software_node_get_named_child_node,
- .get_reference_args = software_node_get_reference_args
+ .get_reference_args = software_node_get_reference_args,
+ .graph_get_next_endpoint = software_node_graph_get_next_endpoint,
+ .graph_get_remote_endpoint = software_node_graph_get_remote_endpoint,
+ .graph_get_port_parent = software_node_graph_get_port_parent,
+ .graph_parse_endpoint = software_node_graph_parse_endpoint,
};
/* -------------------------------------------------------------------------- */
@@ -688,7 +819,11 @@ out_err:
* software_node_register_nodes - Register an array of software nodes
* @nodes: Zero terminated array of software nodes to be registered
*
- * Register multiple software nodes at once.
+ * Register multiple software nodes at once. If any node in the array
+ * has its .parent pointer set (which can only be to another software_node),
+ * then its parent **must** have been registered before it is; either outside
+ * of this function or by ordering the array such that parent comes before
+ * child.
*/
int software_node_register_nodes(const struct software_node *nodes)
{
@@ -696,14 +831,23 @@ int software_node_register_nodes(const struct software_node *nodes)
int i;
for (i = 0; nodes[i].name; i++) {
- ret = software_node_register(&nodes[i]);
- if (ret) {
- software_node_unregister_nodes(nodes);
- return ret;
+ const struct software_node *parent = nodes[i].parent;
+
+ if (parent && !software_node_to_swnode(parent)) {
+ ret = -EINVAL;
+ goto err_unregister_nodes;
}
+
+ ret = software_node_register(&nodes[i]);
+ if (ret)
+ goto err_unregister_nodes;
}
return 0;
+
+err_unregister_nodes:
+ software_node_unregister_nodes(nodes);
+ return ret;
}
EXPORT_SYMBOL_GPL(software_node_register_nodes);
@@ -711,18 +855,23 @@ EXPORT_SYMBOL_GPL(software_node_register_nodes);
* software_node_unregister_nodes - Unregister an array of software nodes
* @nodes: Zero terminated array of software nodes to be unregistered
*
- * Unregister multiple software nodes at once.
+ * Unregister multiple software nodes at once. If parent pointers are set up
+ * in any of the software nodes then the array **must** be ordered such that
+ * parents come before their children.
*
- * NOTE: Be careful using this call if the nodes had parent pointers set up in
- * them before registering. If so, it is wiser to remove the nodes
- * individually, in the correct order (child before parent) instead of relying
- * on the sequential order of the list of nodes in the array.
+ * NOTE: If you are uncertain whether the array is ordered such that
+ * parents will be unregistered before their children, it is wiser to
+ * remove the nodes individually, in the correct order (child before
+ * parent).
*/
void software_node_unregister_nodes(const struct software_node *nodes)
{
- int i;
+ unsigned int i = 0;
+
+ while (nodes[i].name)
+ i++;
- for (i = 0; nodes[i].name; i++)
+ while (i--)
software_node_unregister(&nodes[i]);
}
EXPORT_SYMBOL_GPL(software_node_unregister_nodes);
@@ -757,16 +906,23 @@ EXPORT_SYMBOL_GPL(software_node_register_node_group);
* software_node_unregister_node_group - Unregister a group of software nodes
* @node_group: NULL terminated array of software node pointers to be unregistered
*
- * Unregister multiple software nodes at once.
+ * Unregister multiple software nodes at once. The array will be unwound in
+ * reverse order (i.e. last entry first) and thus if any members of the array are
+ * children of another member then the children must appear later in the list such
+ * that they are unregistered first.
*/
-void software_node_unregister_node_group(const struct software_node **node_group)
+void software_node_unregister_node_group(
+ const struct software_node **node_group)
{
- unsigned int i;
+ unsigned int i = 0;
if (!node_group)
return;
- for (i = 0; node_group[i]; i++)
+ while (node_group[i])
+ i++;
+
+ while (i--)
software_node_unregister(node_group[i]);
}
EXPORT_SYMBOL_GPL(software_node_unregister_node_group);
@@ -843,22 +999,99 @@ void fwnode_remove_software_node(struct fwnode_handle *fwnode)
}
EXPORT_SYMBOL_GPL(fwnode_remove_software_node);
+/**
+ * device_add_software_node - Assign software node to a device
+ * @dev: The device the software node is meant for.
+ * @swnode: The software node.
+ *
+ * This function will register @swnode and make it the secondary firmware node
+ * pointer of @dev. If @dev has no primary node, then @swnode will become the primary
+ * node.
+ */
+int device_add_software_node(struct device *dev, const struct software_node *swnode)
+{
+ int ret;
+
+ /* Only one software node per device. */
+ if (dev_to_swnode(dev))
+ return -EBUSY;
+
+ ret = software_node_register(swnode);
+ if (ret)
+ return ret;
+
+ set_secondary_fwnode(dev, software_node_fwnode(swnode));
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(device_add_software_node);
+
+/**
+ * device_remove_software_node - Remove device's software node
+ * @dev: The device with the software node.
+ *
+ * This function will unregister the software node of @dev.
+ */
+void device_remove_software_node(struct device *dev)
+{
+ struct swnode *swnode;
+
+ swnode = dev_to_swnode(dev);
+ if (!swnode)
+ return;
+
+ software_node_notify(dev, KOBJ_REMOVE);
+ set_secondary_fwnode(dev, NULL);
+ kobject_put(&swnode->kobj);
+}
+EXPORT_SYMBOL_GPL(device_remove_software_node);
+
+/**
+ * device_create_managed_software_node - Create a software node for a device
+ * @dev: The device the software node is assigned to.
+ * @properties: Device properties for the software node.
+ * @parent: Parent of the software node.
+ *
+ * Creates a software node as a managed resource for @dev, which means the
+ * lifetime of the newly created software node is tied to the lifetime of @dev.
+ * Software nodes created with this function should not be reused or shared
+ * because of that. The function takes a deep copy of @properties for the
+ * software node.
+ *
+ * Since the new software node is assigned directly to @dev, and since it should
+ * not be shared, it is not returned to the caller. The function returns 0 on
+ * success, and errno in case of an error.
+ */
+int device_create_managed_software_node(struct device *dev,
+ const struct property_entry *properties,
+ const struct software_node *parent)
+{
+ struct fwnode_handle *p = software_node_fwnode(parent);
+ struct fwnode_handle *fwnode;
+
+ if (parent && !p)
+ return -EINVAL;
+
+ fwnode = fwnode_create_software_node(properties, p);
+ if (IS_ERR(fwnode))
+ return PTR_ERR(fwnode);
+
+ to_swnode(fwnode)->managed = true;
+ set_secondary_fwnode(dev, fwnode);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(device_create_managed_software_node);
+
int software_node_notify(struct device *dev, unsigned long action)
{
- struct fwnode_handle *fwnode = dev_fwnode(dev);
struct swnode *swnode;
int ret;
- if (!fwnode)
- return 0;
-
- if (!is_software_node(fwnode))
- fwnode = fwnode->secondary;
- if (!is_software_node(fwnode))
+ swnode = dev_to_swnode(dev);
+ if (!swnode)
return 0;
- swnode = to_swnode(fwnode);
-
switch (action) {
case KOBJ_ADD:
ret = sysfs_create_link(&dev->kobj, &swnode->kobj,
@@ -878,6 +1111,11 @@ int software_node_notify(struct device *dev, unsigned long action)
sysfs_remove_link(&swnode->kobj, dev_name(dev));
sysfs_remove_link(&dev->kobj, "software_node");
kobject_put(&swnode->kobj);
+
+ if (swnode->managed) {
+ set_secondary_fwnode(dev, NULL);
+ kobject_put(&swnode->kobj);
+ }
break;
default:
break;
diff --git a/drivers/base/test/Makefile b/drivers/base/test/Makefile
index 3ca56367c84b..2f15fae8625f 100644
--- a/drivers/base/test/Makefile
+++ b/drivers/base/test/Makefile
@@ -2,3 +2,4 @@
obj-$(CONFIG_TEST_ASYNC_DRIVER_PROBE) += test_async_driver_probe.o
obj-$(CONFIG_KUNIT_DRIVER_PE_TEST) += property-entry-test.o
+CFLAGS_REMOVE_property-entry-test.o += -fplugin-arg-structleak_plugin-byref -fplugin-arg-structleak_plugin-byref-all
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 583b671b1d2d..fd236158f32d 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -66,6 +66,12 @@ config AMIGA_Z2RAM
To compile this driver as a module, choose M here: the
module will be called z2ram.
+config N64CART
+ bool "N64 cart support"
+ depends on MACH_NINTENDO64
+ help
+ Support for the N64 cart.
+
config CDROM
tristate
select BLK_SCSI_REQUEST
@@ -267,16 +273,6 @@ config BLK_DEV_NBD
If unsure, say N.
-config BLK_DEV_SKD
- tristate "STEC S1120 Block Driver"
- depends on PCI
- depends on 64BIT
- help
- Saying Y or M here will enable support for the
- STEC, Inc. S1120 PCIe SSD.
-
- Use device /dev/skd$N amd /dev/skd$Np$M.
-
config BLK_DEV_SX8
tristate "Promise SATA SX8 support"
depends on PCI
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index a3170859e01d..e3e3f1c79a82 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -17,12 +17,12 @@ obj-$(CONFIG_PS3_DISK) += ps3disk.o
obj-$(CONFIG_PS3_VRAM) += ps3vram.o
obj-$(CONFIG_ATARI_FLOPPY) += ataflop.o
obj-$(CONFIG_AMIGA_Z2RAM) += z2ram.o
+obj-$(CONFIG_N64CART) += n64cart.o
obj-$(CONFIG_BLK_DEV_RAM) += brd.o
obj-$(CONFIG_BLK_DEV_LOOP) += loop.o
obj-$(CONFIG_XILINX_SYSACE) += xsysace.o
obj-$(CONFIG_CDROM_PKTCDVD) += pktcdvd.o
obj-$(CONFIG_SUNVDC) += sunvdc.o
-obj-$(CONFIG_BLK_DEV_SKD) += skd.o
obj-$(CONFIG_BLK_DEV_UMEM) += umem.o
obj-$(CONFIG_BLK_DEV_NBD) += nbd.o
@@ -43,5 +43,4 @@ obj-$(CONFIG_BLK_DEV_RNBD) += rnbd/
obj-$(CONFIG_BLK_DEV_NULL_BLK) += null_blk/
-skd-y := skd_main.o
swim_mod-y := swim.o swim_asm.o
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index ac720bdcd983..ecd77897a761 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -1046,7 +1046,7 @@ aoe_end_request(struct aoedev *d, struct request *rq, int fastfail)
__blk_mq_end_request(rq, err);
- /* cf. http://lkml.org/lkml/2006/10/31/28 */
+ /* cf. https://lore.kernel.org/lkml/20061031071040.GS14055@kernel.dk/ */
if (!fastfail)
blk_mq_run_hw_queues(q, true);
}
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index c43a6ab4b1f3..18bf99906662 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -284,15 +284,11 @@ out:
static blk_qc_t brd_submit_bio(struct bio *bio)
{
- struct brd_device *brd = bio->bi_disk->private_data;
+ struct brd_device *brd = bio->bi_bdev->bd_disk->private_data;
+ sector_t sector = bio->bi_iter.bi_sector;
struct bio_vec bvec;
- sector_t sector;
struct bvec_iter iter;
- sector = bio->bi_iter.bi_sector;
- if (bio_end_sector(bio) > get_capacity(bio->bi_disk))
- goto io_error;
-
bio_for_each_segment(bvec, bio, iter) {
unsigned int len = bvec.bv_len;
int err;
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index 7227fc7ab8ed..72cf7603d51f 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -138,7 +138,7 @@ static int _drbd_md_sync_page_io(struct drbd_device *device,
op_flags |= REQ_FUA | REQ_PREFLUSH;
op_flags |= REQ_SYNC;
- bio = bio_alloc_drbd(GFP_NOIO);
+ bio = bio_alloc_bioset(GFP_NOIO, 1, &drbd_md_io_bio_set);
bio_set_dev(bio, bdev->md_bdev);
bio->bi_iter.bi_sector = sector;
err = -EIO;
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index df53dca5d02c..c1f816f896a8 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -976,7 +976,7 @@ static void drbd_bm_endio(struct bio *bio)
static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_hold(local)
{
- struct bio *bio = bio_alloc_drbd(GFP_NOIO);
+ struct bio *bio = bio_alloc_bioset(GFP_NOIO, 1, &drbd_md_io_bio_set);
struct drbd_device *device = ctx->device;
struct drbd_bitmap *b = device->bitmap;
struct page *page;
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 8f879e5c2f67..7d9cc433b758 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -1422,8 +1422,6 @@ extern mempool_t drbd_md_io_page_pool;
/* We also need to make sure we get a bio
* when we need it for housekeeping purposes */
extern struct bio_set drbd_md_io_bio_set;
-/* to allocate from that set */
-extern struct bio *bio_alloc_drbd(gfp_t gfp_mask);
/* And a bio_set for cloning */
extern struct bio_set drbd_io_bio_set;
@@ -1449,7 +1447,7 @@ extern void conn_free_crypto(struct drbd_connection *connection);
/* drbd_req */
extern void do_submit(struct work_struct *ws);
-extern void __drbd_make_request(struct drbd_device *, struct bio *, unsigned long);
+extern void __drbd_make_request(struct drbd_device *, struct bio *);
extern blk_qc_t drbd_submit_bio(struct bio *bio);
extern int drbd_read_remote(struct drbd_device *device, struct drbd_request *req);
extern int is_valid_ar_handle(struct drbd_request *, sector_t);
@@ -1579,8 +1577,8 @@ static inline void drbd_submit_bio_noacct(struct drbd_device *device,
int fault_type, struct bio *bio)
{
__release(local);
- if (!bio->bi_disk) {
- drbd_err(device, "drbd_submit_bio_noacct: bio->bi_disk == NULL\n");
+ if (!bio->bi_bdev) {
+ drbd_err(device, "drbd_submit_bio_noacct: bio->bi_bdev == NULL\n");
bio->bi_status = BLK_STS_IOERR;
bio_endio(bio);
return;
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 1c8c18b2a25f..25cd8a2f729d 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -138,19 +138,6 @@ static const struct block_device_operations drbd_ops = {
.release = drbd_release,
};
-struct bio *bio_alloc_drbd(gfp_t gfp_mask)
-{
- struct bio *bio;
-
- if (!bioset_initialized(&drbd_md_io_bio_set))
- return bio_alloc(gfp_mask, 1);
-
- bio = bio_alloc_bioset(gfp_mask, 1, &drbd_md_io_bio_set);
- if (!bio)
- return NULL;
- return bio;
-}
-
#ifdef __CHECKER__
/* When checking with sparse, and this is an inline function, sparse will
give tons of false positives. When this is a real functions sparse works.
@@ -2288,7 +2275,6 @@ static void do_retry(struct work_struct *ws)
list_for_each_entry_safe(req, tmp, &writes, tl_requests) {
struct drbd_device *device = req->device;
struct bio *bio = req->master_bio;
- unsigned long start_jif = req->start_jif;
bool expected;
expected =
@@ -2323,7 +2309,7 @@ static void do_retry(struct work_struct *ws)
/* We are not just doing submit_bio_noacct(),
* as we want to keep the start_time information. */
inc_ap_bio(device);
- __drbd_make_request(device, bio, start_jif);
+ __drbd_make_request(device, bio);
}
}
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 09c86ef3f0fd..c3f09a122f20 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -111,8 +111,10 @@ static struct page *page_chain_tail(struct page *page, int *len)
{
struct page *tmp;
int i = 1;
- while ((tmp = page_chain_next(page)))
- ++i, page = tmp;
+ while ((tmp = page_chain_next(page))) {
+ ++i;
+ page = tmp;
+ }
if (len)
*len = i;
return page;
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 330f851cb8f0..9398c2c2cb2d 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -30,7 +30,10 @@ static struct drbd_request *drbd_req_new(struct drbd_device *device, struct bio
return NULL;
memset(req, 0, sizeof(*req));
- drbd_req_make_private_bio(req, bio_src);
+ req->private_bio = bio_clone_fast(bio_src, GFP_NOIO, &drbd_io_bio_set);
+ req->private_bio->bi_private = req;
+ req->private_bio->bi_end_io = drbd_request_endio;
+
req->rq_state = (bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0)
| (bio_op(bio_src) == REQ_OP_WRITE_SAME ? RQ_WSAME : 0)
| (bio_op(bio_src) == REQ_OP_WRITE_ZEROES ? RQ_ZEROES : 0)
@@ -1188,7 +1191,7 @@ static void drbd_queue_write(struct drbd_device *device, struct drbd_request *re
* Returns ERR_PTR(-ENOMEM) if we cannot allocate a drbd_request.
*/
static struct drbd_request *
-drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long start_jif)
+drbd_request_prepare(struct drbd_device *device, struct bio *bio)
{
const int rw = bio_data_dir(bio);
struct drbd_request *req;
@@ -1416,9 +1419,9 @@ out:
complete_master_bio(device, &m);
}
-void __drbd_make_request(struct drbd_device *device, struct bio *bio, unsigned long start_jif)
+void __drbd_make_request(struct drbd_device *device, struct bio *bio)
{
- struct drbd_request *req = drbd_request_prepare(device, bio, start_jif);
+ struct drbd_request *req = drbd_request_prepare(device, bio);
if (IS_ERR_OR_NULL(req))
return;
drbd_send_and_submit(device, req);
@@ -1595,20 +1598,17 @@ void do_submit(struct work_struct *ws)
blk_qc_t drbd_submit_bio(struct bio *bio)
{
- struct drbd_device *device = bio->bi_disk->private_data;
- unsigned long start_jif;
+ struct drbd_device *device = bio->bi_bdev->bd_disk->private_data;
blk_queue_split(&bio);
- start_jif = jiffies;
-
/*
* what we "blindly" assume:
*/
D_ASSERT(device, IS_ALIGNED(bio->bi_iter.bi_size, 512));
inc_ap_bio(device);
- __drbd_make_request(device, bio, start_jif);
+ __drbd_make_request(device, bio);
return BLK_QC_T_NONE;
}
diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h
index 55bb0f8721fa..511f39a08de4 100644
--- a/drivers/block/drbd/drbd_req.h
+++ b/drivers/block/drbd/drbd_req.h
@@ -256,18 +256,6 @@ enum drbd_req_state_bits {
#define MR_WRITE 1
#define MR_READ 2
-static inline void drbd_req_make_private_bio(struct drbd_request *req, struct bio *bio_src)
-{
- struct bio *bio;
- bio = bio_clone_fast(bio_src, GFP_NOIO, &drbd_io_bio_set);
-
- req->private_bio = bio;
-
- bio->bi_private = req;
- bio->bi_end_io = drbd_request_endio;
- bio->bi_next = NULL;
-}
-
/* Short lived temporary struct on the stack.
* We could squirrel the error to be returned into
* bio->bi_iter.bi_size, or similar. But that would be too ugly. */
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 02044ab7f767..64563bfdf0da 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -1523,8 +1523,11 @@ int w_restart_disk_io(struct drbd_work *w, int cancel)
if (bio_data_dir(req->master_bio) == WRITE && req->rq_state & RQ_IN_ACT_LOG)
drbd_al_begin_io(device, &req->i);
- drbd_req_make_private_bio(req, req->master_bio);
+ req->private_bio = bio_clone_fast(req->master_bio, GFP_NOIO,
+ &drbd_io_bio_set);
bio_set_dev(req->private_bio, device->ldev->backing_bdev);
+ req->private_bio->bi_private = req;
+ req->private_bio->bi_end_io = drbd_request_endio;
submit_bio_noacct(req->private_bio);
return 0;
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index dfe1dfc901cc..0b71292d9d5a 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -4121,23 +4121,23 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
if (fdc_state[FDC(drive)].rawcmd == 1)
fdc_state[FDC(drive)].rawcmd = 2;
- if (!(mode & FMODE_NDELAY)) {
- if (mode & (FMODE_READ|FMODE_WRITE)) {
- drive_state[drive].last_checked = 0;
- clear_bit(FD_OPEN_SHOULD_FAIL_BIT,
- &drive_state[drive].flags);
- if (bdev_check_media_change(bdev))
- floppy_revalidate(bdev->bd_disk);
- if (test_bit(FD_DISK_CHANGED_BIT, &drive_state[drive].flags))
- goto out;
- if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &drive_state[drive].flags))
- goto out;
- }
- res = -EROFS;
- if ((mode & FMODE_WRITE) &&
- !test_bit(FD_DISK_WRITABLE_BIT, &drive_state[drive].flags))
+ if (mode & (FMODE_READ|FMODE_WRITE)) {
+ drive_state[drive].last_checked = 0;
+ clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &drive_state[drive].flags);
+ if (bdev_check_media_change(bdev))
+ floppy_revalidate(bdev->bd_disk);
+ if (test_bit(FD_DISK_CHANGED_BIT, &drive_state[drive].flags))
+ goto out;
+ if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &drive_state[drive].flags))
goto out;
}
+
+ res = -EROFS;
+
+ if ((mode & FMODE_WRITE) &&
+ !test_bit(FD_DISK_WRITABLE_BIT, &drive_state[drive].flags))
+ goto out;
+
mutex_unlock(&open_lock);
mutex_unlock(&floppy_mutex);
return 0;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index e5ff328f0917..a370cde3ddd4 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -663,7 +663,7 @@ static inline int is_loop_device(struct file *file)
{
struct inode *i = file->f_mapping->host;
- return i && S_ISBLK(i->i_mode) && MAJOR(i->i_rdev) == LOOP_MAJOR;
+ return i && S_ISBLK(i->i_mode) && imajor(i) == LOOP_MAJOR;
}
static int loop_validate_file(struct file *file, struct block_device *bdev)
@@ -704,7 +704,7 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
int error;
bool partscan;
- error = mutex_lock_killable(&loop_ctl_mutex);
+ error = mutex_lock_killable(&lo->lo_mutex);
if (error)
return error;
error = -ENXIO;
@@ -743,9 +743,9 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
loop_update_dio(lo);
blk_mq_unfreeze_queue(lo->lo_queue);
partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
- mutex_unlock(&loop_ctl_mutex);
+ mutex_unlock(&lo->lo_mutex);
/*
- * We must drop file reference outside of loop_ctl_mutex as dropping
+ * We must drop file reference outside of lo_mutex as dropping
* the file ref can take bd_mutex which creates circular locking
* dependency.
*/
@@ -755,7 +755,7 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
return 0;
out_err:
- mutex_unlock(&loop_ctl_mutex);
+ mutex_unlock(&lo->lo_mutex);
if (file)
fput(file);
return error;
@@ -1092,7 +1092,7 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
goto out_putf;
}
- error = mutex_lock_killable(&loop_ctl_mutex);
+ error = mutex_lock_killable(&lo->lo_mutex);
if (error)
goto out_bdev;
@@ -1171,7 +1171,7 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
* put /dev/loopXX inode. Later in __loop_clr_fd() we bdput(bdev).
*/
bdgrab(bdev);
- mutex_unlock(&loop_ctl_mutex);
+ mutex_unlock(&lo->lo_mutex);
if (partscan)
loop_reread_partitions(lo, bdev);
if (!(mode & FMODE_EXCL))
@@ -1179,7 +1179,7 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
return 0;
out_unlock:
- mutex_unlock(&loop_ctl_mutex);
+ mutex_unlock(&lo->lo_mutex);
out_bdev:
if (!(mode & FMODE_EXCL))
bd_abort_claiming(bdev, loop_configure);
@@ -1200,7 +1200,7 @@ static int __loop_clr_fd(struct loop_device *lo, bool release)
bool partscan = false;
int lo_number;
- mutex_lock(&loop_ctl_mutex);
+ mutex_lock(&lo->lo_mutex);
if (WARN_ON_ONCE(lo->lo_state != Lo_rundown)) {
err = -ENXIO;
goto out_unlock;
@@ -1212,6 +1212,9 @@ static int __loop_clr_fd(struct loop_device *lo, bool release)
goto out_unlock;
}
+ if (test_bit(QUEUE_FLAG_WC, &lo->lo_queue->queue_flags))
+ blk_queue_write_cache(lo->lo_queue, false, false);
+
/* freeze request queue during the transition */
blk_mq_freeze_queue(lo->lo_queue);
@@ -1253,7 +1256,7 @@ static int __loop_clr_fd(struct loop_device *lo, bool release)
lo_number = lo->lo_number;
loop_unprepare_queue(lo);
out_unlock:
- mutex_unlock(&loop_ctl_mutex);
+ mutex_unlock(&lo->lo_mutex);
if (partscan) {
/*
* bd_mutex has been held already in release path, so don't
@@ -1284,18 +1287,17 @@ out_unlock:
* protects us from all the other places trying to change the 'lo'
* device.
*/
- mutex_lock(&loop_ctl_mutex);
+ mutex_lock(&lo->lo_mutex);
lo->lo_flags = 0;
if (!part_shift)
lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN;
lo->lo_state = Lo_unbound;
- mutex_unlock(&loop_ctl_mutex);
+ mutex_unlock(&lo->lo_mutex);
/*
- * Need not hold loop_ctl_mutex to fput backing file.
- * Calling fput holding loop_ctl_mutex triggers a circular
- * lock dependency possibility warning as fput can take
- * bd_mutex which is usually taken before loop_ctl_mutex.
+ * Need not hold lo_mutex to fput backing file. Calling fput holding
+ * lo_mutex triggers a circular lock dependency possibility warning as
+ * fput can take bd_mutex which is usually taken before lo_mutex.
*/
if (filp)
fput(filp);
@@ -1306,11 +1308,11 @@ static int loop_clr_fd(struct loop_device *lo)
{
int err;
- err = mutex_lock_killable(&loop_ctl_mutex);
+ err = mutex_lock_killable(&lo->lo_mutex);
if (err)
return err;
if (lo->lo_state != Lo_bound) {
- mutex_unlock(&loop_ctl_mutex);
+ mutex_unlock(&lo->lo_mutex);
return -ENXIO;
}
/*
@@ -1325,11 +1327,11 @@ static int loop_clr_fd(struct loop_device *lo)
*/
if (atomic_read(&lo->lo_refcnt) > 1) {
lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
- mutex_unlock(&loop_ctl_mutex);
+ mutex_unlock(&lo->lo_mutex);
return 0;
}
lo->lo_state = Lo_rundown;
- mutex_unlock(&loop_ctl_mutex);
+ mutex_unlock(&lo->lo_mutex);
return __loop_clr_fd(lo, false);
}
@@ -1344,7 +1346,7 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
bool partscan = false;
bool size_changed = false;
- err = mutex_lock_killable(&loop_ctl_mutex);
+ err = mutex_lock_killable(&lo->lo_mutex);
if (err)
return err;
if (lo->lo_encrypt_key_size &&
@@ -1411,7 +1413,7 @@ out_unfreeze:
partscan = true;
}
out_unlock:
- mutex_unlock(&loop_ctl_mutex);
+ mutex_unlock(&lo->lo_mutex);
if (partscan)
loop_reread_partitions(lo, bdev);
@@ -1425,11 +1427,11 @@ loop_get_status(struct loop_device *lo, struct loop_info64 *info)
struct kstat stat;
int ret;
- ret = mutex_lock_killable(&loop_ctl_mutex);
+ ret = mutex_lock_killable(&lo->lo_mutex);
if (ret)
return ret;
if (lo->lo_state != Lo_bound) {
- mutex_unlock(&loop_ctl_mutex);
+ mutex_unlock(&lo->lo_mutex);
return -ENXIO;
}
@@ -1448,10 +1450,10 @@ loop_get_status(struct loop_device *lo, struct loop_info64 *info)
lo->lo_encrypt_key_size);
}
- /* Drop loop_ctl_mutex while we call into the filesystem. */
+ /* Drop lo_mutex while we call into the filesystem. */
path = lo->lo_backing_file->f_path;
path_get(&path);
- mutex_unlock(&loop_ctl_mutex);
+ mutex_unlock(&lo->lo_mutex);
ret = vfs_getattr(&path, &stat, STATX_INO, AT_STATX_SYNC_AS_STAT);
if (!ret) {
info->lo_device = huge_encode_dev(stat.dev);
@@ -1637,7 +1639,7 @@ static int lo_simple_ioctl(struct loop_device *lo, unsigned int cmd,
{
int err;
- err = mutex_lock_killable(&loop_ctl_mutex);
+ err = mutex_lock_killable(&lo->lo_mutex);
if (err)
return err;
switch (cmd) {
@@ -1653,7 +1655,7 @@ static int lo_simple_ioctl(struct loop_device *lo, unsigned int cmd,
default:
err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL;
}
- mutex_unlock(&loop_ctl_mutex);
+ mutex_unlock(&lo->lo_mutex);
return err;
}
@@ -1879,27 +1881,33 @@ static int lo_open(struct block_device *bdev, fmode_t mode)
struct loop_device *lo;
int err;
+ /*
+ * take loop_ctl_mutex to protect lo pointer from race with
+ * loop_control_ioctl(LOOP_CTL_REMOVE), however, to reduce contention
+ * release it prior to updating lo->lo_refcnt.
+ */
err = mutex_lock_killable(&loop_ctl_mutex);
if (err)
return err;
lo = bdev->bd_disk->private_data;
if (!lo) {
- err = -ENXIO;
- goto out;
+ mutex_unlock(&loop_ctl_mutex);
+ return -ENXIO;
}
-
- atomic_inc(&lo->lo_refcnt);
-out:
+ err = mutex_lock_killable(&lo->lo_mutex);
mutex_unlock(&loop_ctl_mutex);
- return err;
+ if (err)
+ return err;
+ atomic_inc(&lo->lo_refcnt);
+ mutex_unlock(&lo->lo_mutex);
+ return 0;
}
static void lo_release(struct gendisk *disk, fmode_t mode)
{
- struct loop_device *lo;
+ struct loop_device *lo = disk->private_data;
- mutex_lock(&loop_ctl_mutex);
- lo = disk->private_data;
+ mutex_lock(&lo->lo_mutex);
if (atomic_dec_return(&lo->lo_refcnt))
goto out_unlock;
@@ -1907,7 +1915,7 @@ static void lo_release(struct gendisk *disk, fmode_t mode)
if (lo->lo_state != Lo_bound)
goto out_unlock;
lo->lo_state = Lo_rundown;
- mutex_unlock(&loop_ctl_mutex);
+ mutex_unlock(&lo->lo_mutex);
/*
* In autoclear mode, stop the loop thread
* and remove configuration after last close.
@@ -1924,7 +1932,7 @@ static void lo_release(struct gendisk *disk, fmode_t mode)
}
out_unlock:
- mutex_unlock(&loop_ctl_mutex);
+ mutex_unlock(&lo->lo_mutex);
}
static const struct block_device_operations lo_fops = {
@@ -1963,10 +1971,10 @@ static int unregister_transfer_cb(int id, void *ptr, void *data)
struct loop_device *lo = ptr;
struct loop_func_table *xfer = data;
- mutex_lock(&loop_ctl_mutex);
+ mutex_lock(&lo->lo_mutex);
if (lo->lo_encryption == xfer)
loop_release_xfer(lo);
- mutex_unlock(&loop_ctl_mutex);
+ mutex_unlock(&lo->lo_mutex);
return 0;
}
@@ -2152,6 +2160,7 @@ static int loop_add(struct loop_device **l, int i)
disk->flags |= GENHD_FL_NO_PART_SCAN;
disk->flags |= GENHD_FL_EXT_DEVT;
atomic_set(&lo->lo_refcnt, 0);
+ mutex_init(&lo->lo_mutex);
lo->lo_number = i;
spin_lock_init(&lo->lo_lock);
disk->major = LOOP_MAJOR;
@@ -2182,6 +2191,7 @@ static void loop_remove(struct loop_device *lo)
blk_cleanup_queue(lo->lo_queue);
blk_mq_free_tag_set(&lo->tag_set);
put_disk(lo->lo_disk);
+ mutex_destroy(&lo->lo_mutex);
kfree(lo);
}
@@ -2261,15 +2271,21 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd,
ret = loop_lookup(&lo, parm);
if (ret < 0)
break;
+ ret = mutex_lock_killable(&lo->lo_mutex);
+ if (ret)
+ break;
if (lo->lo_state != Lo_unbound) {
ret = -EBUSY;
+ mutex_unlock(&lo->lo_mutex);
break;
}
if (atomic_read(&lo->lo_refcnt) > 0) {
ret = -EBUSY;
+ mutex_unlock(&lo->lo_mutex);
break;
}
lo->lo_disk->private_data = NULL;
+ mutex_unlock(&lo->lo_mutex);
idr_remove(&loop_index_idr, lo->lo_number);
loop_remove(lo);
break;
diff --git a/drivers/block/loop.h b/drivers/block/loop.h
index af75a5ee4094..a3c04f310672 100644
--- a/drivers/block/loop.h
+++ b/drivers/block/loop.h
@@ -62,6 +62,7 @@ struct loop_device {
struct request_queue *lo_queue;
struct blk_mq_tag_set tag_set;
struct gendisk *lo_disk;
+ struct mutex lo_mutex;
};
struct loop_cmd {
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 53ac59d19ae5..3be0dbc674bd 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -1015,7 +1015,7 @@ static int mtip_exec_internal_command(struct mtip_port *port,
rq->timeout = timeout;
/* insert request and run queue */
- blk_execute_rq(rq->q, NULL, rq, true);
+ blk_execute_rq(NULL, rq, true);
if (int_cmd->status) {
dev_err(&dd->pdev->dev, "Internal command [%02X] failed %d\n",
@@ -3924,23 +3924,18 @@ static DEFINE_HANDLER(7);
static void mtip_disable_link_opts(struct driver_data *dd, struct pci_dev *pdev)
{
- int pos;
unsigned short pcie_dev_ctrl;
- pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
- if (pos) {
- pci_read_config_word(pdev,
- pos + PCI_EXP_DEVCTL,
- &pcie_dev_ctrl);
- if (pcie_dev_ctrl & (1 << 11) ||
- pcie_dev_ctrl & (1 << 4)) {
+ if (pci_is_pcie(pdev)) {
+ pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &pcie_dev_ctrl);
+ if (pcie_dev_ctrl & PCI_EXP_DEVCTL_NOSNOOP_EN ||
+ pcie_dev_ctrl & PCI_EXP_DEVCTL_RELAX_EN) {
dev_info(&dd->pdev->dev,
"Disabling ERO/No-Snoop on bridge device %04x:%04x\n",
pdev->vendor, pdev->device);
pcie_dev_ctrl &= ~(PCI_EXP_DEVCTL_NOSNOOP_EN |
PCI_EXP_DEVCTL_RELAX_EN);
- pci_write_config_word(pdev,
- pos + PCI_EXP_DEVCTL,
+ pcie_capability_write_word(pdev, PCI_EXP_DEVCTL,
pcie_dev_ctrl);
}
}
diff --git a/drivers/block/n64cart.c b/drivers/block/n64cart.c
new file mode 100644
index 000000000000..47bdf324e962
--- /dev/null
+++ b/drivers/block/n64cart.c
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for the N64 cart.
+ *
+ * Copyright (c) 2021 Lauri Kasanen
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/bitops.h>
+#include <linux/blkdev.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+enum {
+ PI_DRAM_REG = 0,
+ PI_CART_REG,
+ PI_READ_REG,
+ PI_WRITE_REG,
+ PI_STATUS_REG,
+};
+
+#define PI_STATUS_DMA_BUSY (1 << 0)
+#define PI_STATUS_IO_BUSY (1 << 1)
+
+#define CART_DOMAIN 0x10000000
+#define CART_MAX 0x1FFFFFFF
+
+#define MIN_ALIGNMENT 8
+
+static u32 __iomem *reg_base;
+
+static unsigned int start;
+module_param(start, uint, 0);
+MODULE_PARM_DESC(start, "Start address of the cart block data");
+
+static unsigned int size;
+module_param(size, uint, 0);
+MODULE_PARM_DESC(size, "Size of the cart block data, in bytes");
+
+static void n64cart_write_reg(const u8 reg, const u32 value)
+{
+ writel(value, reg_base + reg);
+}
+
+static u32 n64cart_read_reg(const u8 reg)
+{
+ return readl(reg_base + reg);
+}
+
+static void n64cart_wait_dma(void)
+{
+ while (n64cart_read_reg(PI_STATUS_REG) &
+ (PI_STATUS_DMA_BUSY | PI_STATUS_IO_BUSY))
+ cpu_relax();
+}
+
+/*
+ * Process a single bvec of a bio.
+ */
+static bool n64cart_do_bvec(struct device *dev, struct bio_vec *bv, u32 pos)
+{
+ dma_addr_t dma_addr;
+ const u32 bstart = pos + start;
+
+ /* Alignment check */
+ WARN_ON_ONCE((bv->bv_offset & (MIN_ALIGNMENT - 1)) ||
+ (bv->bv_len & (MIN_ALIGNMENT - 1)));
+
+ dma_addr = dma_map_bvec(dev, bv, DMA_FROM_DEVICE, 0);
+ if (dma_mapping_error(dev, dma_addr))
+ return false;
+
+ n64cart_wait_dma();
+
+ n64cart_write_reg(PI_DRAM_REG, dma_addr + bv->bv_offset);
+ n64cart_write_reg(PI_CART_REG, (bstart | CART_DOMAIN) & CART_MAX);
+ n64cart_write_reg(PI_WRITE_REG, bv->bv_len - 1);
+
+ n64cart_wait_dma();
+
+ dma_unmap_page(dev, dma_addr, bv->bv_len, DMA_FROM_DEVICE);
+ return true;
+}
+
+static blk_qc_t n64cart_submit_bio(struct bio *bio)
+{
+ struct bio_vec bvec;
+ struct bvec_iter iter;
+ struct device *dev = bio->bi_disk->private_data;
+ u32 pos = bio->bi_iter.bi_sector << SECTOR_SHIFT;
+
+ bio_for_each_segment(bvec, bio, iter) {
+ if (!n64cart_do_bvec(dev, &bvec, pos))
+ goto io_error;
+ pos += bvec.bv_len;
+ }
+
+ bio_endio(bio);
+ return BLK_QC_T_NONE;
+io_error:
+ bio_io_error(bio);
+ return BLK_QC_T_NONE;
+}
+
+static const struct block_device_operations n64cart_fops = {
+ .owner = THIS_MODULE,
+ .submit_bio = n64cart_submit_bio,
+};
+
+/*
+ * The target device is embedded and RAM-constrained. We save RAM
+ * by initializing in __init code that gets dropped late in boot.
+ * For the same reason there is no module or unloading support.
+ */
+static int __init n64cart_probe(struct platform_device *pdev)
+{
+ struct gendisk *disk;
+
+ if (!start || !size) {
+ pr_err("start or size not specified\n");
+ return -ENODEV;
+ }
+
+ if (size & 4095) {
+ pr_err("size must be a multiple of 4K\n");
+ return -ENODEV;
+ }
+
+ reg_base = devm_platform_ioremap_resource(pdev, 0);
+ if (!reg_base)
+ return -EINVAL;
+
+ disk = alloc_disk(0);
+ if (!disk)
+ return -ENOMEM;
+
+ disk->queue = blk_alloc_queue(NUMA_NO_NODE);
+ if (!disk->queue)
+ return -ENOMEM;
+
+ disk->first_minor = 0;
+ disk->flags = GENHD_FL_NO_PART_SCAN | GENHD_FL_EXT_DEVT;
+ disk->fops = &n64cart_fops;
+ disk->private_data = &pdev->dev;
+ strcpy(disk->disk_name, "n64cart");
+
+ set_capacity(disk, size >> SECTOR_SHIFT);
+ set_disk_ro(disk, 1);
+
+ blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
+ blk_queue_physical_block_size(disk->queue, 4096);
+ blk_queue_logical_block_size(disk->queue, 4096);
+
+ add_disk(disk);
+
+ pr_info("n64cart: %u kb disk\n", size / 1024);
+
+ return 0;
+}
+
+static struct platform_driver n64cart_driver = {
+ .driver = {
+ .name = "n64cart",
+ },
+};
+
+static int __init n64cart_init(void)
+{
+ return platform_driver_probe(&n64cart_driver, n64cart_probe);
+}
+
+module_init(n64cart_init);
+
+MODULE_AUTHOR("Lauri Kasanen <cand@gmx.com>");
+MODULE_DESCRIPTION("Driver for the N64 cart");
+MODULE_LICENSE("GPL");
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 6727358e147d..4ff71b579cfc 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -78,8 +78,7 @@ struct link_dead_args {
#define NBD_RT_HAS_PID_FILE 3
#define NBD_RT_HAS_CONFIG_REF 4
#define NBD_RT_BOUND 5
-#define NBD_RT_DESTROY_ON_DISCONNECT 6
-#define NBD_RT_DISCONNECT_ON_CLOSE 7
+#define NBD_RT_DISCONNECT_ON_CLOSE 6
#define NBD_DESTROY_ON_DISCONNECT 0
#define NBD_DISCONNECT_REQUESTED 1
@@ -1022,6 +1021,12 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
if (!sock)
return err;
+ /*
+ * We need to make sure we don't get any errant requests while we're
+ * reallocating the ->socks array.
+ */
+ blk_mq_freeze_queue(nbd->disk->queue);
+
if (!netlink && !nbd->task_setup &&
!test_bit(NBD_RT_BOUND, &config->runtime_flags))
nbd->task_setup = current;
@@ -1060,10 +1065,12 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
nsock->cookie = 0;
socks[config->num_connections++] = nsock;
atomic_inc(&config->live_connections);
+ blk_mq_unfreeze_queue(nbd->disk->queue);
return 0;
put_socket:
+ blk_mq_unfreeze_queue(nbd->disk->queue);
sockfd_put(sock);
return err;
}
@@ -1521,17 +1528,7 @@ static int nbd_dbg_tasks_show(struct seq_file *s, void *unused)
return 0;
}
-static int nbd_dbg_tasks_open(struct inode *inode, struct file *file)
-{
- return single_open(file, nbd_dbg_tasks_show, inode->i_private);
-}
-
-static const struct file_operations nbd_dbg_tasks_ops = {
- .open = nbd_dbg_tasks_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(nbd_dbg_tasks);
static int nbd_dbg_flags_show(struct seq_file *s, void *unused)
{
@@ -1556,17 +1553,7 @@ static int nbd_dbg_flags_show(struct seq_file *s, void *unused)
return 0;
}
-static int nbd_dbg_flags_open(struct inode *inode, struct file *file)
-{
- return single_open(file, nbd_dbg_flags_show, inode->i_private);
-}
-
-static const struct file_operations nbd_dbg_flags_ops = {
- .open = nbd_dbg_flags_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(nbd_dbg_flags);
static int nbd_dev_dbg_init(struct nbd_device *nbd)
{
@@ -1584,11 +1571,11 @@ static int nbd_dev_dbg_init(struct nbd_device *nbd)
}
config->dbg_dir = dir;
- debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_ops);
+ debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_fops);
debugfs_create_u64("size_bytes", 0444, dir, &config->bytesize);
debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout);
debugfs_create_u64("blocksize", 0444, dir, &config->blksize);
- debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_ops);
+ debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_fops);
return 0;
}
@@ -1916,12 +1903,21 @@ again:
if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) {
u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]);
if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) {
- set_bit(NBD_RT_DESTROY_ON_DISCONNECT,
- &config->runtime_flags);
- set_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags);
- put_dev = true;
+ /*
+ * We have 1 ref to keep the device around, and then 1
+ * ref for our current operation here, which will be
+ * inherited by the config. If we already have
+ * DESTROY_ON_DISCONNECT set then we know we don't have
+ * that extra ref already held so we don't need the
+ * put_dev.
+ */
+ if (!test_and_set_bit(NBD_DESTROY_ON_DISCONNECT,
+ &nbd->flags))
+ put_dev = true;
} else {
- clear_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags);
+ if (test_and_clear_bit(NBD_DESTROY_ON_DISCONNECT,
+ &nbd->flags))
+ refcount_inc(&nbd->refs);
}
if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
set_bit(NBD_RT_DISCONNECT_ON_CLOSE,
@@ -2092,15 +2088,13 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) {
u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]);
if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) {
- if (!test_and_set_bit(NBD_RT_DESTROY_ON_DISCONNECT,
- &config->runtime_flags))
+ if (!test_and_set_bit(NBD_DESTROY_ON_DISCONNECT,
+ &nbd->flags))
put_dev = true;
- set_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags);
} else {
- if (test_and_clear_bit(NBD_RT_DESTROY_ON_DISCONNECT,
- &config->runtime_flags))
+ if (test_and_clear_bit(NBD_DESTROY_ON_DISCONNECT,
+ &nbd->flags))
refcount_inc(&nbd->refs);
- clear_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags);
}
if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index 5357c3a4a36f..d6c821d48090 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -1420,7 +1420,7 @@ static blk_qc_t null_submit_bio(struct bio *bio)
{
sector_t sector = bio->bi_iter.bi_sector;
sector_t nr_sectors = bio_sectors(bio);
- struct nullb *nullb = bio->bi_disk->private_data;
+ struct nullb *nullb = bio->bi_bdev->bd_disk->private_data;
struct nullb_queue *nq = nullb_to_queue(nullb);
struct nullb_cmd *cmd;
diff --git a/drivers/block/null_blk/zoned.c b/drivers/block/null_blk/zoned.c
index 148b871f263b..bfcab1c782b5 100644
--- a/drivers/block/null_blk/zoned.c
+++ b/drivers/block/null_blk/zoned.c
@@ -6,7 +6,10 @@
#define CREATE_TRACE_POINTS
#include "trace.h"
-#define MB_TO_SECTS(mb) (((sector_t)mb * SZ_1M) >> SECTOR_SHIFT)
+static inline sector_t mb_to_sects(unsigned long mb)
+{
+ return ((sector_t)mb * SZ_1M) >> SECTOR_SHIFT;
+}
static inline unsigned int null_zone_no(struct nullb_device *dev, sector_t sect)
{
@@ -77,12 +80,11 @@ int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q)
return -EINVAL;
}
- zone_capacity_sects = MB_TO_SECTS(dev->zone_capacity);
- dev_capacity_sects = MB_TO_SECTS(dev->size);
- dev->zone_size_sects = MB_TO_SECTS(dev->zone_size);
- dev->nr_zones = dev_capacity_sects >> ilog2(dev->zone_size_sects);
- if (dev_capacity_sects & (dev->zone_size_sects - 1))
- dev->nr_zones++;
+ zone_capacity_sects = mb_to_sects(dev->zone_capacity);
+ dev_capacity_sects = mb_to_sects(dev->size);
+ dev->zone_size_sects = mb_to_sects(dev->zone_size);
+ dev->nr_zones = round_up(dev_capacity_sects, dev->zone_size_sects)
+ >> ilog2(dev->zone_size_sects);
dev->zones = kvmalloc_array(dev->nr_zones, sizeof(struct nullb_zone),
GFP_KERNEL | __GFP_ZERO);
@@ -146,10 +148,6 @@ int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q)
sector += dev->zone_size_sects;
}
- q->limits.zoned = BLK_ZONED_HM;
- blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
- blk_queue_required_elevator_features(q, ELEVATOR_F_ZBD_SEQ_WRITE);
-
return 0;
}
@@ -158,6 +156,10 @@ int null_register_zoned_dev(struct nullb *nullb)
struct nullb_device *dev = nullb->dev;
struct request_queue *q = nullb->q;
+ blk_queue_set_zoned(nullb->disk, BLK_ZONED_HM);
+ blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
+ blk_queue_required_elevator_features(q, ELEVATOR_F_ZBD_SEQ_WRITE);
+
if (queue_is_mq(q)) {
int ret = blk_revalidate_disk_zones(nullb->disk, NULL);
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index a7af4f27b7c3..897acda20ac8 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -781,7 +781,7 @@ static int pd_special_command(struct pd_unit *disk,
req = blk_mq_rq_to_pdu(rq);
req->func = func;
- blk_execute_rq(disk->gd->queue, disk->gd, rq, 0);
+ blk_execute_rq(disk->gd, rq, 0);
blk_put_request(rq);
return 0;
}
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index b8bb8ec7538d..fc4b0f1aa86d 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -722,7 +722,7 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
if (cgc->quiet)
rq->rq_flags |= RQF_QUIET;
- blk_execute_rq(rq->q, pd->bdev->bd_disk, rq, 0);
+ blk_execute_rq(pd->bdev->bd_disk, rq, 0);
if (scsi_req(rq)->result)
ret = -EIO;
out:
@@ -2374,7 +2374,7 @@ static blk_qc_t pkt_submit_bio(struct bio *bio)
blk_queue_split(&bio);
- pd = bio->bi_disk->queue->queuedata;
+ pd = bio->bi_bdev->bd_disk->queue->queuedata;
if (!pd) {
pr_err("%s incorrect request queue\n", bio_devname(bio, b));
goto end_io;
@@ -2418,7 +2418,7 @@ static blk_qc_t pkt_submit_bio(struct bio *bio)
split = bio;
}
- pkt_make_request_write(bio->bi_disk->queue, split);
+ pkt_make_request_write(bio->bi_bdev->bd_disk->queue, split);
} while (split != bio);
return BLK_QC_T_NONE;
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c
index b71d28372ef3..1d738999fb69 100644
--- a/drivers/block/ps3vram.c
+++ b/drivers/block/ps3vram.c
@@ -581,7 +581,7 @@ out:
static blk_qc_t ps3vram_submit_bio(struct bio *bio)
{
- struct ps3_system_bus_device *dev = bio->bi_disk->private_data;
+ struct ps3_system_bus_device *dev = bio->bi_bdev->bd_disk->private_data;
struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
int busy;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 59cfe71d0b3a..bbb88eb009e0 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -692,29 +692,10 @@ static void rbd_release(struct gendisk *disk, fmode_t mode)
put_device(&rbd_dev->dev);
}
-static int rbd_set_read_only(struct block_device *bdev, bool ro)
-{
- struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
-
- /*
- * Both images mapped read-only and snapshots can't be marked
- * read-write.
- */
- if (!ro) {
- if (rbd_is_ro(rbd_dev))
- return -EROFS;
-
- rbd_assert(!rbd_is_snap(rbd_dev));
- }
-
- return 0;
-}
-
static const struct block_device_operations rbd_bd_ops = {
.owner = THIS_MODULE,
.open = rbd_open,
.release = rbd_release,
- .set_read_only = rbd_set_read_only,
};
/*
diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c
index 63f549889f87..5ac1881396af 100644
--- a/drivers/block/rsxx/core.c
+++ b/drivers/block/rsxx/core.c
@@ -165,15 +165,17 @@ static ssize_t rsxx_cram_read(struct file *fp, char __user *ubuf,
{
struct rsxx_cardinfo *card = file_inode(fp)->i_private;
char *buf;
- ssize_t st;
+ int st;
buf = kzalloc(cnt, GFP_KERNEL);
if (!buf)
return -ENOMEM;
st = rsxx_creg_read(card, CREG_ADD_CRAM + (u32)*ppos, cnt, buf, 1);
- if (!st)
- st = copy_to_user(ubuf, buf, cnt);
+ if (!st) {
+ if (copy_to_user(ubuf, buf, cnt))
+ st = -EFAULT;
+ }
kfree(buf);
if (st)
return st;
diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c
index edacefff6e35..9a28322a8cd8 100644
--- a/drivers/block/rsxx/dev.c
+++ b/drivers/block/rsxx/dev.c
@@ -122,7 +122,7 @@ static void bio_dma_done_cb(struct rsxx_cardinfo *card,
static blk_qc_t rsxx_submit_bio(struct bio *bio)
{
- struct rsxx_cardinfo *card = bio->bi_disk->private_data;
+ struct rsxx_cardinfo *card = bio->bi_bdev->bd_disk->private_data;
struct rsxx_bio_meta *bio_meta;
blk_status_t st = BLK_STS_IOERR;
diff --git a/drivers/block/rsxx/dma.c b/drivers/block/rsxx/dma.c
index 1914f5488b22..0574f4495755 100644
--- a/drivers/block/rsxx/dma.c
+++ b/drivers/block/rsxx/dma.c
@@ -944,8 +944,7 @@ failed_dma_setup:
ctrl->done_wq = NULL;
}
- if (ctrl->trackers)
- vfree(ctrl->trackers);
+ vfree(ctrl->trackers);
if (ctrl->status.buf)
dma_free_coherent(&card->dev->dev, STATUS_BUFFER_SIZE8,
diff --git a/drivers/block/rsxx/rsxx_priv.h b/drivers/block/rsxx/rsxx_priv.h
index 4861669e5786..6147977994ff 100644
--- a/drivers/block/rsxx/rsxx_priv.h
+++ b/drivers/block/rsxx/rsxx_priv.h
@@ -11,7 +11,6 @@
#ifndef __RSXX_PRIV_H__
#define __RSXX_PRIV_H__
-#include <linux/version.h>
#include <linux/semaphore.h>
#include <linux/fs.h>
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
deleted file mode 100644
index a962b4551bed..000000000000
--- a/drivers/block/skd_main.c
+++ /dev/null
@@ -1,3670 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Driver for sTec s1120 PCIe SSDs. sTec was acquired in 2013 by HGST and HGST
- * was acquired by Western Digital in 2012.
- *
- * Copyright 2012 sTec, Inc.
- * Copyright (c) 2017 Western Digital Corporation or its affiliates.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/blkdev.h>
-#include <linux/blk-mq.h>
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/compiler.h>
-#include <linux/workqueue.h>
-#include <linux/delay.h>
-#include <linux/time.h>
-#include <linux/hdreg.h>
-#include <linux/dma-mapping.h>
-#include <linux/completion.h>
-#include <linux/scatterlist.h>
-#include <linux/err.h>
-#include <linux/aer.h>
-#include <linux/wait.h>
-#include <linux/stringify.h>
-#include <scsi/scsi.h>
-#include <scsi/sg.h>
-#include <linux/io.h>
-#include <linux/uaccess.h>
-#include <asm/unaligned.h>
-
-#include "skd_s1120.h"
-
-static int skd_dbg_level;
-static int skd_isr_comp_limit = 4;
-
-#define SKD_ASSERT(expr) \
- do { \
- if (unlikely(!(expr))) { \
- pr_err("Assertion failed! %s,%s,%s,line=%d\n", \
- # expr, __FILE__, __func__, __LINE__); \
- } \
- } while (0)
-
-#define DRV_NAME "skd"
-#define PFX DRV_NAME ": "
-
-MODULE_LICENSE("GPL");
-
-MODULE_DESCRIPTION("STEC s1120 PCIe SSD block driver");
-
-#define PCI_VENDOR_ID_STEC 0x1B39
-#define PCI_DEVICE_ID_S1120 0x0001
-
-#define SKD_FUA_NV (1 << 1)
-#define SKD_MINORS_PER_DEVICE 16
-
-#define SKD_MAX_QUEUE_DEPTH 200u
-
-#define SKD_PAUSE_TIMEOUT (5 * 1000)
-
-#define SKD_N_FITMSG_BYTES (512u)
-#define SKD_MAX_REQ_PER_MSG 14
-
-#define SKD_N_SPECIAL_FITMSG_BYTES (128u)
-
-/* SG elements are 32 bytes, so we can make this 4096 and still be under the
- * 128KB limit. That allows 4096*4K = 16M xfer size
- */
-#define SKD_N_SG_PER_REQ_DEFAULT 256u
-
-#define SKD_N_COMPLETION_ENTRY 256u
-#define SKD_N_READ_CAP_BYTES (8u)
-
-#define SKD_N_INTERNAL_BYTES (512u)
-
-#define SKD_SKCOMP_SIZE \
- ((sizeof(struct fit_completion_entry_v1) + \
- sizeof(struct fit_comp_error_info)) * SKD_N_COMPLETION_ENTRY)
-
-/* 5 bits of uniqifier, 0xF800 */
-#define SKD_ID_TABLE_MASK (3u << 8u)
-#define SKD_ID_RW_REQUEST (0u << 8u)
-#define SKD_ID_INTERNAL (1u << 8u)
-#define SKD_ID_FIT_MSG (3u << 8u)
-#define SKD_ID_SLOT_MASK 0x00FFu
-#define SKD_ID_SLOT_AND_TABLE_MASK 0x03FFu
-
-#define SKD_N_MAX_SECTORS 2048u
-
-#define SKD_MAX_RETRIES 2u
-
-#define SKD_TIMER_SECONDS(seconds) (seconds)
-#define SKD_TIMER_MINUTES(minutes) ((minutes) * (60))
-
-#define INQ_STD_NBYTES 36
-
-enum skd_drvr_state {
- SKD_DRVR_STATE_LOAD,
- SKD_DRVR_STATE_IDLE,
- SKD_DRVR_STATE_BUSY,
- SKD_DRVR_STATE_STARTING,
- SKD_DRVR_STATE_ONLINE,
- SKD_DRVR_STATE_PAUSING,
- SKD_DRVR_STATE_PAUSED,
- SKD_DRVR_STATE_RESTARTING,
- SKD_DRVR_STATE_RESUMING,
- SKD_DRVR_STATE_STOPPING,
- SKD_DRVR_STATE_FAULT,
- SKD_DRVR_STATE_DISAPPEARED,
- SKD_DRVR_STATE_PROTOCOL_MISMATCH,
- SKD_DRVR_STATE_BUSY_ERASE,
- SKD_DRVR_STATE_BUSY_SANITIZE,
- SKD_DRVR_STATE_BUSY_IMMINENT,
- SKD_DRVR_STATE_WAIT_BOOT,
- SKD_DRVR_STATE_SYNCING,
-};
-
-#define SKD_WAIT_BOOT_TIMO SKD_TIMER_SECONDS(90u)
-#define SKD_STARTING_TIMO SKD_TIMER_SECONDS(8u)
-#define SKD_RESTARTING_TIMO SKD_TIMER_MINUTES(4u)
-#define SKD_BUSY_TIMO SKD_TIMER_MINUTES(20u)
-#define SKD_STARTED_BUSY_TIMO SKD_TIMER_SECONDS(60u)
-#define SKD_START_WAIT_SECONDS 90u
-
-enum skd_req_state {
- SKD_REQ_STATE_IDLE,
- SKD_REQ_STATE_SETUP,
- SKD_REQ_STATE_BUSY,
- SKD_REQ_STATE_COMPLETED,
- SKD_REQ_STATE_TIMEOUT,
-};
-
-enum skd_check_status_action {
- SKD_CHECK_STATUS_REPORT_GOOD,
- SKD_CHECK_STATUS_REPORT_SMART_ALERT,
- SKD_CHECK_STATUS_REQUEUE_REQUEST,
- SKD_CHECK_STATUS_REPORT_ERROR,
- SKD_CHECK_STATUS_BUSY_IMMINENT,
-};
-
-struct skd_msg_buf {
- struct fit_msg_hdr fmh;
- struct skd_scsi_request scsi[SKD_MAX_REQ_PER_MSG];
-};
-
-struct skd_fitmsg_context {
- u32 id;
-
- u32 length;
-
- struct skd_msg_buf *msg_buf;
- dma_addr_t mb_dma_address;
-};
-
-struct skd_request_context {
- enum skd_req_state state;
-
- u16 id;
- u32 fitmsg_id;
-
- u8 flush_cmd;
-
- enum dma_data_direction data_dir;
- struct scatterlist *sg;
- u32 n_sg;
- u32 sg_byte_count;
-
- struct fit_sg_descriptor *sksg_list;
- dma_addr_t sksg_dma_address;
-
- struct fit_completion_entry_v1 completion;
-
- struct fit_comp_error_info err_info;
- int retries;
-
- blk_status_t status;
-};
-
-struct skd_special_context {
- struct skd_request_context req;
-
- void *data_buf;
- dma_addr_t db_dma_address;
-
- struct skd_msg_buf *msg_buf;
- dma_addr_t mb_dma_address;
-};
-
-typedef enum skd_irq_type {
- SKD_IRQ_LEGACY,
- SKD_IRQ_MSI,
- SKD_IRQ_MSIX
-} skd_irq_type_t;
-
-#define SKD_MAX_BARS 2
-
-struct skd_device {
- void __iomem *mem_map[SKD_MAX_BARS];
- resource_size_t mem_phys[SKD_MAX_BARS];
- u32 mem_size[SKD_MAX_BARS];
-
- struct skd_msix_entry *msix_entries;
-
- struct pci_dev *pdev;
- int pcie_error_reporting_is_enabled;
-
- spinlock_t lock;
- struct gendisk *disk;
- struct blk_mq_tag_set tag_set;
- struct request_queue *queue;
- struct skd_fitmsg_context *skmsg;
- struct device *class_dev;
- int gendisk_on;
- int sync_done;
-
- u32 devno;
- u32 major;
- char isr_name[30];
-
- enum skd_drvr_state state;
- u32 drive_state;
-
- u32 cur_max_queue_depth;
- u32 queue_low_water_mark;
- u32 dev_max_queue_depth;
-
- u32 num_fitmsg_context;
- u32 num_req_context;
-
- struct skd_fitmsg_context *skmsg_table;
-
- struct skd_special_context internal_skspcl;
- u32 read_cap_blocksize;
- u32 read_cap_last_lba;
- int read_cap_is_valid;
- int inquiry_is_valid;
- u8 inq_serial_num[13]; /*12 chars plus null term */
-
- u8 skcomp_cycle;
- u32 skcomp_ix;
- struct kmem_cache *msgbuf_cache;
- struct kmem_cache *sglist_cache;
- struct kmem_cache *databuf_cache;
- struct fit_completion_entry_v1 *skcomp_table;
- struct fit_comp_error_info *skerr_table;
- dma_addr_t cq_dma_address;
-
- wait_queue_head_t waitq;
-
- struct timer_list timer;
- u32 timer_countdown;
- u32 timer_substate;
-
- int sgs_per_request;
- u32 last_mtd;
-
- u32 proto_ver;
-
- int dbg_level;
- u32 connect_time_stamp;
- int connect_retries;
-#define SKD_MAX_CONNECT_RETRIES 16
- u32 drive_jiffies;
-
- u32 timo_slot;
-
- struct work_struct start_queue;
- struct work_struct completion_worker;
-};
-
-#define SKD_WRITEL(DEV, VAL, OFF) skd_reg_write32(DEV, VAL, OFF)
-#define SKD_READL(DEV, OFF) skd_reg_read32(DEV, OFF)
-#define SKD_WRITEQ(DEV, VAL, OFF) skd_reg_write64(DEV, VAL, OFF)
-
-static inline u32 skd_reg_read32(struct skd_device *skdev, u32 offset)
-{
- u32 val = readl(skdev->mem_map[1] + offset);
-
- if (unlikely(skdev->dbg_level >= 2))
- dev_dbg(&skdev->pdev->dev, "offset %x = %x\n", offset, val);
- return val;
-}
-
-static inline void skd_reg_write32(struct skd_device *skdev, u32 val,
- u32 offset)
-{
- writel(val, skdev->mem_map[1] + offset);
- if (unlikely(skdev->dbg_level >= 2))
- dev_dbg(&skdev->pdev->dev, "offset %x = %x\n", offset, val);
-}
-
-static inline void skd_reg_write64(struct skd_device *skdev, u64 val,
- u32 offset)
-{
- writeq(val, skdev->mem_map[1] + offset);
- if (unlikely(skdev->dbg_level >= 2))
- dev_dbg(&skdev->pdev->dev, "offset %x = %016llx\n", offset,
- val);
-}
-
-
-#define SKD_IRQ_DEFAULT SKD_IRQ_MSIX
-static int skd_isr_type = SKD_IRQ_DEFAULT;
-
-module_param(skd_isr_type, int, 0444);
-MODULE_PARM_DESC(skd_isr_type, "Interrupt type capability."
- " (0==legacy, 1==MSI, 2==MSI-X, default==1)");
-
-#define SKD_MAX_REQ_PER_MSG_DEFAULT 1
-static int skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
-
-module_param(skd_max_req_per_msg, int, 0444);
-MODULE_PARM_DESC(skd_max_req_per_msg,
- "Maximum SCSI requests packed in a single message."
- " (1-" __stringify(SKD_MAX_REQ_PER_MSG) ", default==1)");
-
-#define SKD_MAX_QUEUE_DEPTH_DEFAULT 64
-#define SKD_MAX_QUEUE_DEPTH_DEFAULT_STR "64"
-static int skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
-
-module_param(skd_max_queue_depth, int, 0444);
-MODULE_PARM_DESC(skd_max_queue_depth,
- "Maximum SCSI requests issued to s1120."
- " (1-200, default==" SKD_MAX_QUEUE_DEPTH_DEFAULT_STR ")");
-
-static int skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
-module_param(skd_sgs_per_request, int, 0444);
-MODULE_PARM_DESC(skd_sgs_per_request,
- "Maximum SG elements per block request."
- " (1-4096, default==256)");
-
-static int skd_max_pass_thru = 1;
-module_param(skd_max_pass_thru, int, 0444);
-MODULE_PARM_DESC(skd_max_pass_thru,
- "Maximum SCSI pass-thru at a time. IGNORED");
-
-module_param(skd_dbg_level, int, 0444);
-MODULE_PARM_DESC(skd_dbg_level, "s1120 debug level (0,1,2)");
-
-module_param(skd_isr_comp_limit, int, 0444);
-MODULE_PARM_DESC(skd_isr_comp_limit, "s1120 isr comp limit (0=none) default=4");
-
-/* Major device number dynamically assigned. */
-static u32 skd_major;
-
-static void skd_destruct(struct skd_device *skdev);
-static const struct block_device_operations skd_blockdev_ops;
-static void skd_send_fitmsg(struct skd_device *skdev,
- struct skd_fitmsg_context *skmsg);
-static void skd_send_special_fitmsg(struct skd_device *skdev,
- struct skd_special_context *skspcl);
-static bool skd_preop_sg_list(struct skd_device *skdev,
- struct skd_request_context *skreq);
-static void skd_postop_sg_list(struct skd_device *skdev,
- struct skd_request_context *skreq);
-
-static void skd_restart_device(struct skd_device *skdev);
-static int skd_quiesce_dev(struct skd_device *skdev);
-static int skd_unquiesce_dev(struct skd_device *skdev);
-static void skd_disable_interrupts(struct skd_device *skdev);
-static void skd_isr_fwstate(struct skd_device *skdev);
-static void skd_recover_requests(struct skd_device *skdev);
-static void skd_soft_reset(struct skd_device *skdev);
-
-const char *skd_drive_state_to_str(int state);
-const char *skd_skdev_state_to_str(enum skd_drvr_state state);
-static void skd_log_skdev(struct skd_device *skdev, const char *event);
-static void skd_log_skreq(struct skd_device *skdev,
- struct skd_request_context *skreq, const char *event);
-
-/*
- *****************************************************************************
- * READ/WRITE REQUESTS
- *****************************************************************************
- */
-static bool skd_inc_in_flight(struct request *rq, void *data, bool reserved)
-{
- int *count = data;
-
- count++;
- return true;
-}
-
-static int skd_in_flight(struct skd_device *skdev)
-{
- int count = 0;
-
- blk_mq_tagset_busy_iter(&skdev->tag_set, skd_inc_in_flight, &count);
-
- return count;
-}
-
-static void
-skd_prep_rw_cdb(struct skd_scsi_request *scsi_req,
- int data_dir, unsigned lba,
- unsigned count)
-{
- if (data_dir == READ)
- scsi_req->cdb[0] = READ_10;
- else
- scsi_req->cdb[0] = WRITE_10;
-
- scsi_req->cdb[1] = 0;
- scsi_req->cdb[2] = (lba & 0xff000000) >> 24;
- scsi_req->cdb[3] = (lba & 0xff0000) >> 16;
- scsi_req->cdb[4] = (lba & 0xff00) >> 8;
- scsi_req->cdb[5] = (lba & 0xff);
- scsi_req->cdb[6] = 0;
- scsi_req->cdb[7] = (count & 0xff00) >> 8;
- scsi_req->cdb[8] = count & 0xff;
- scsi_req->cdb[9] = 0;
-}
-
-static void
-skd_prep_zerosize_flush_cdb(struct skd_scsi_request *scsi_req,
- struct skd_request_context *skreq)
-{
- skreq->flush_cmd = 1;
-
- scsi_req->cdb[0] = SYNCHRONIZE_CACHE;
- scsi_req->cdb[1] = 0;
- scsi_req->cdb[2] = 0;
- scsi_req->cdb[3] = 0;
- scsi_req->cdb[4] = 0;
- scsi_req->cdb[5] = 0;
- scsi_req->cdb[6] = 0;
- scsi_req->cdb[7] = 0;
- scsi_req->cdb[8] = 0;
- scsi_req->cdb[9] = 0;
-}
-
-/*
- * Return true if and only if all pending requests should be failed.
- */
-static bool skd_fail_all(struct request_queue *q)
-{
- struct skd_device *skdev = q->queuedata;
-
- SKD_ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE);
-
- skd_log_skdev(skdev, "req_not_online");
- switch (skdev->state) {
- case SKD_DRVR_STATE_PAUSING:
- case SKD_DRVR_STATE_PAUSED:
- case SKD_DRVR_STATE_STARTING:
- case SKD_DRVR_STATE_RESTARTING:
- case SKD_DRVR_STATE_WAIT_BOOT:
- /* In case of starting, we haven't started the queue,
- * so we can't get here... but requests are
- * possibly hanging out waiting for us because we
- * reported the dev/skd0 already. They'll wait
- * forever if connect doesn't complete.
- * What to do??? delay dev/skd0 ??
- */
- case SKD_DRVR_STATE_BUSY:
- case SKD_DRVR_STATE_BUSY_IMMINENT:
- case SKD_DRVR_STATE_BUSY_ERASE:
- return false;
-
- case SKD_DRVR_STATE_BUSY_SANITIZE:
- case SKD_DRVR_STATE_STOPPING:
- case SKD_DRVR_STATE_SYNCING:
- case SKD_DRVR_STATE_FAULT:
- case SKD_DRVR_STATE_DISAPPEARED:
- default:
- return true;
- }
-}
-
-static blk_status_t skd_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
- const struct blk_mq_queue_data *mqd)
-{
- struct request *const req = mqd->rq;
- struct request_queue *const q = req->q;
- struct skd_device *skdev = q->queuedata;
- struct skd_fitmsg_context *skmsg;
- struct fit_msg_hdr *fmh;
- const u32 tag = blk_mq_unique_tag(req);
- struct skd_request_context *const skreq = blk_mq_rq_to_pdu(req);
- struct skd_scsi_request *scsi_req;
- unsigned long flags = 0;
- const u32 lba = blk_rq_pos(req);
- const u32 count = blk_rq_sectors(req);
- const int data_dir = rq_data_dir(req);
-
- if (unlikely(skdev->state != SKD_DRVR_STATE_ONLINE))
- return skd_fail_all(q) ? BLK_STS_IOERR : BLK_STS_RESOURCE;
-
- if (!(req->rq_flags & RQF_DONTPREP)) {
- skreq->retries = 0;
- req->rq_flags |= RQF_DONTPREP;
- }
-
- blk_mq_start_request(req);
-
- WARN_ONCE(tag >= skd_max_queue_depth, "%#x > %#x (nr_requests = %lu)\n",
- tag, skd_max_queue_depth, q->nr_requests);
-
- SKD_ASSERT(skreq->state == SKD_REQ_STATE_IDLE);
-
- dev_dbg(&skdev->pdev->dev,
- "new req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n", req, lba,
- lba, count, count, data_dir);
-
- skreq->id = tag + SKD_ID_RW_REQUEST;
- skreq->flush_cmd = 0;
- skreq->n_sg = 0;
- skreq->sg_byte_count = 0;
-
- skreq->fitmsg_id = 0;
-
- skreq->data_dir = data_dir == READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
-
- if (req->bio && !skd_preop_sg_list(skdev, skreq)) {
- dev_dbg(&skdev->pdev->dev, "error Out\n");
- skreq->status = BLK_STS_RESOURCE;
- blk_mq_complete_request(req);
- return BLK_STS_OK;
- }
-
- dma_sync_single_for_device(&skdev->pdev->dev, skreq->sksg_dma_address,
- skreq->n_sg *
- sizeof(struct fit_sg_descriptor),
- DMA_TO_DEVICE);
-
- /* Either a FIT msg is in progress or we have to start one. */
- if (skd_max_req_per_msg == 1) {
- skmsg = NULL;
- } else {
- spin_lock_irqsave(&skdev->lock, flags);
- skmsg = skdev->skmsg;
- }
- if (!skmsg) {
- skmsg = &skdev->skmsg_table[tag];
- skdev->skmsg = skmsg;
-
- /* Initialize the FIT msg header */
- fmh = &skmsg->msg_buf->fmh;
- memset(fmh, 0, sizeof(*fmh));
- fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
- skmsg->length = sizeof(*fmh);
- } else {
- fmh = &skmsg->msg_buf->fmh;
- }
-
- skreq->fitmsg_id = skmsg->id;
-
- scsi_req = &skmsg->msg_buf->scsi[fmh->num_protocol_cmds_coalesced];
- memset(scsi_req, 0, sizeof(*scsi_req));
-
- scsi_req->hdr.tag = skreq->id;
- scsi_req->hdr.sg_list_dma_address =
- cpu_to_be64(skreq->sksg_dma_address);
-
- if (req_op(req) == REQ_OP_FLUSH) {
- skd_prep_zerosize_flush_cdb(scsi_req, skreq);
- SKD_ASSERT(skreq->flush_cmd == 1);
- } else {
- skd_prep_rw_cdb(scsi_req, data_dir, lba, count);
- }
-
- if (req->cmd_flags & REQ_FUA)
- scsi_req->cdb[1] |= SKD_FUA_NV;
-
- scsi_req->hdr.sg_list_len_bytes = cpu_to_be32(skreq->sg_byte_count);
-
- /* Complete resource allocations. */
- skreq->state = SKD_REQ_STATE_BUSY;
-
- skmsg->length += sizeof(struct skd_scsi_request);
- fmh->num_protocol_cmds_coalesced++;
-
- dev_dbg(&skdev->pdev->dev, "req=0x%x busy=%d\n", skreq->id,
- skd_in_flight(skdev));
-
- /*
- * If the FIT msg buffer is full send it.
- */
- if (skd_max_req_per_msg == 1) {
- skd_send_fitmsg(skdev, skmsg);
- } else {
- if (mqd->last ||
- fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) {
- skd_send_fitmsg(skdev, skmsg);
- skdev->skmsg = NULL;
- }
- spin_unlock_irqrestore(&skdev->lock, flags);
- }
-
- return BLK_STS_OK;
-}
-
-static enum blk_eh_timer_return skd_timed_out(struct request *req,
- bool reserved)
-{
- struct skd_device *skdev = req->q->queuedata;
-
- dev_err(&skdev->pdev->dev, "request with tag %#x timed out\n",
- blk_mq_unique_tag(req));
-
- return BLK_EH_RESET_TIMER;
-}
-
-static void skd_complete_rq(struct request *req)
-{
- struct skd_request_context *skreq = blk_mq_rq_to_pdu(req);
-
- blk_mq_end_request(req, skreq->status);
-}
-
-static bool skd_preop_sg_list(struct skd_device *skdev,
- struct skd_request_context *skreq)
-{
- struct request *req = blk_mq_rq_from_pdu(skreq);
- struct scatterlist *sgl = &skreq->sg[0], *sg;
- int n_sg;
- int i;
-
- skreq->sg_byte_count = 0;
-
- WARN_ON_ONCE(skreq->data_dir != DMA_TO_DEVICE &&
- skreq->data_dir != DMA_FROM_DEVICE);
-
- n_sg = blk_rq_map_sg(skdev->queue, req, sgl);
- if (n_sg <= 0)
- return false;
-
- /*
- * Map scatterlist to PCI bus addresses.
- * Note PCI might change the number of entries.
- */
- n_sg = dma_map_sg(&skdev->pdev->dev, sgl, n_sg, skreq->data_dir);
- if (n_sg <= 0)
- return false;
-
- SKD_ASSERT(n_sg <= skdev->sgs_per_request);
-
- skreq->n_sg = n_sg;
-
- for_each_sg(sgl, sg, n_sg, i) {
- struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
- u32 cnt = sg_dma_len(sg);
- uint64_t dma_addr = sg_dma_address(sg);
-
- sgd->control = FIT_SGD_CONTROL_NOT_LAST;
- sgd->byte_count = cnt;
- skreq->sg_byte_count += cnt;
- sgd->host_side_addr = dma_addr;
- sgd->dev_side_addr = 0;
- }
-
- skreq->sksg_list[n_sg - 1].next_desc_ptr = 0LL;
- skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST;
-
- if (unlikely(skdev->dbg_level > 1)) {
- dev_dbg(&skdev->pdev->dev,
- "skreq=%x sksg_list=%p sksg_dma=%pad\n",
- skreq->id, skreq->sksg_list, &skreq->sksg_dma_address);
- for (i = 0; i < n_sg; i++) {
- struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
-
- dev_dbg(&skdev->pdev->dev,
- " sg[%d] count=%u ctrl=0x%x addr=0x%llx next=0x%llx\n",
- i, sgd->byte_count, sgd->control,
- sgd->host_side_addr, sgd->next_desc_ptr);
- }
- }
-
- return true;
-}
-
-static void skd_postop_sg_list(struct skd_device *skdev,
- struct skd_request_context *skreq)
-{
- /*
- * restore the next ptr for next IO request so we
- * don't have to set it every time.
- */
- skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr =
- skreq->sksg_dma_address +
- ((skreq->n_sg) * sizeof(struct fit_sg_descriptor));
- dma_unmap_sg(&skdev->pdev->dev, &skreq->sg[0], skreq->n_sg,
- skreq->data_dir);
-}
-
-/*
- *****************************************************************************
- * TIMER
- *****************************************************************************
- */
-
-static void skd_timer_tick_not_online(struct skd_device *skdev);
-
-static void skd_start_queue(struct work_struct *work)
-{
- struct skd_device *skdev = container_of(work, typeof(*skdev),
- start_queue);
-
- /*
- * Although it is safe to call blk_start_queue() from interrupt
- * context, blk_mq_start_hw_queues() must not be called from
- * interrupt context.
- */
- blk_mq_start_hw_queues(skdev->queue);
-}
-
-static void skd_timer_tick(struct timer_list *t)
-{
- struct skd_device *skdev = from_timer(skdev, t, timer);
- unsigned long reqflags;
- u32 state;
-
- if (skdev->state == SKD_DRVR_STATE_FAULT)
- /* The driver has declared fault, and we want it to
- * stay that way until driver is reloaded.
- */
- return;
-
- spin_lock_irqsave(&skdev->lock, reqflags);
-
- state = SKD_READL(skdev, FIT_STATUS);
- state &= FIT_SR_DRIVE_STATE_MASK;
- if (state != skdev->drive_state)
- skd_isr_fwstate(skdev);
-
- if (skdev->state != SKD_DRVR_STATE_ONLINE)
- skd_timer_tick_not_online(skdev);
-
- mod_timer(&skdev->timer, (jiffies + HZ));
-
- spin_unlock_irqrestore(&skdev->lock, reqflags);
-}
-
-static void skd_timer_tick_not_online(struct skd_device *skdev)
-{
- switch (skdev->state) {
- case SKD_DRVR_STATE_IDLE:
- case SKD_DRVR_STATE_LOAD:
- break;
- case SKD_DRVR_STATE_BUSY_SANITIZE:
- dev_dbg(&skdev->pdev->dev,
- "drive busy sanitize[%x], driver[%x]\n",
- skdev->drive_state, skdev->state);
- /* If we've been in sanitize for 3 seconds, we figure we're not
- * going to get anymore completions, so recover requests now
- */
- if (skdev->timer_countdown > 0) {
- skdev->timer_countdown--;
- return;
- }
- skd_recover_requests(skdev);
- break;
-
- case SKD_DRVR_STATE_BUSY:
- case SKD_DRVR_STATE_BUSY_IMMINENT:
- case SKD_DRVR_STATE_BUSY_ERASE:
- dev_dbg(&skdev->pdev->dev, "busy[%x], countdown=%d\n",
- skdev->state, skdev->timer_countdown);
- if (skdev->timer_countdown > 0) {
- skdev->timer_countdown--;
- return;
- }
- dev_dbg(&skdev->pdev->dev,
- "busy[%x], timedout=%d, restarting device.",
- skdev->state, skdev->timer_countdown);
- skd_restart_device(skdev);
- break;
-
- case SKD_DRVR_STATE_WAIT_BOOT:
- case SKD_DRVR_STATE_STARTING:
- if (skdev->timer_countdown > 0) {
- skdev->timer_countdown--;
- return;
- }
- /* For now, we fault the drive. Could attempt resets to
- * revcover at some point. */
- skdev->state = SKD_DRVR_STATE_FAULT;
-
- dev_err(&skdev->pdev->dev, "DriveFault Connect Timeout (%x)\n",
- skdev->drive_state);
-
- /*start the queue so we can respond with error to requests */
- /* wakeup anyone waiting for startup complete */
- schedule_work(&skdev->start_queue);
- skdev->gendisk_on = -1;
- wake_up_interruptible(&skdev->waitq);
- break;
-
- case SKD_DRVR_STATE_ONLINE:
- /* shouldn't get here. */
- break;
-
- case SKD_DRVR_STATE_PAUSING:
- case SKD_DRVR_STATE_PAUSED:
- break;
-
- case SKD_DRVR_STATE_RESTARTING:
- if (skdev->timer_countdown > 0) {
- skdev->timer_countdown--;
- return;
- }
- /* For now, we fault the drive. Could attempt resets to
- * revcover at some point. */
- skdev->state = SKD_DRVR_STATE_FAULT;
- dev_err(&skdev->pdev->dev,
- "DriveFault Reconnect Timeout (%x)\n",
- skdev->drive_state);
-
- /*
- * Recovering does two things:
- * 1. completes IO with error
- * 2. reclaims dma resources
- * When is it safe to recover requests?
- * - if the drive state is faulted
- * - if the state is still soft reset after out timeout
- * - if the drive registers are dead (state = FF)
- * If it is "unsafe", we still need to recover, so we will
- * disable pci bus mastering and disable our interrupts.
- */
-
- if ((skdev->drive_state == FIT_SR_DRIVE_SOFT_RESET) ||
- (skdev->drive_state == FIT_SR_DRIVE_FAULT) ||
- (skdev->drive_state == FIT_SR_DRIVE_STATE_MASK))
- /* It never came out of soft reset. Try to
- * recover the requests and then let them
- * fail. This is to mitigate hung processes. */
- skd_recover_requests(skdev);
- else {
- dev_err(&skdev->pdev->dev, "Disable BusMaster (%x)\n",
- skdev->drive_state);
- pci_disable_device(skdev->pdev);
- skd_disable_interrupts(skdev);
- skd_recover_requests(skdev);
- }
-
- /*start the queue so we can respond with error to requests */
- /* wakeup anyone waiting for startup complete */
- schedule_work(&skdev->start_queue);
- skdev->gendisk_on = -1;
- wake_up_interruptible(&skdev->waitq);
- break;
-
- case SKD_DRVR_STATE_RESUMING:
- case SKD_DRVR_STATE_STOPPING:
- case SKD_DRVR_STATE_SYNCING:
- case SKD_DRVR_STATE_FAULT:
- case SKD_DRVR_STATE_DISAPPEARED:
- default:
- break;
- }
-}
-
-static int skd_start_timer(struct skd_device *skdev)
-{
- int rc;
-
- timer_setup(&skdev->timer, skd_timer_tick, 0);
-
- rc = mod_timer(&skdev->timer, (jiffies + HZ));
- if (rc)
- dev_err(&skdev->pdev->dev, "failed to start timer %d\n", rc);
- return rc;
-}
-
-static void skd_kill_timer(struct skd_device *skdev)
-{
- del_timer_sync(&skdev->timer);
-}
-
-/*
- *****************************************************************************
- * INTERNAL REQUESTS -- generated by driver itself
- *****************************************************************************
- */
-
-static int skd_format_internal_skspcl(struct skd_device *skdev)
-{
- struct skd_special_context *skspcl = &skdev->internal_skspcl;
- struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
- struct fit_msg_hdr *fmh;
- uint64_t dma_address;
- struct skd_scsi_request *scsi;
-
- fmh = &skspcl->msg_buf->fmh;
- fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
- fmh->num_protocol_cmds_coalesced = 1;
-
- scsi = &skspcl->msg_buf->scsi[0];
- memset(scsi, 0, sizeof(*scsi));
- dma_address = skspcl->req.sksg_dma_address;
- scsi->hdr.sg_list_dma_address = cpu_to_be64(dma_address);
- skspcl->req.n_sg = 1;
- sgd->control = FIT_SGD_CONTROL_LAST;
- sgd->byte_count = 0;
- sgd->host_side_addr = skspcl->db_dma_address;
- sgd->dev_side_addr = 0;
- sgd->next_desc_ptr = 0LL;
-
- return 1;
-}
-
-#define WR_BUF_SIZE SKD_N_INTERNAL_BYTES
-
-static void skd_send_internal_skspcl(struct skd_device *skdev,
- struct skd_special_context *skspcl,
- u8 opcode)
-{
- struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
- struct skd_scsi_request *scsi;
- unsigned char *buf = skspcl->data_buf;
- int i;
-
- if (skspcl->req.state != SKD_REQ_STATE_IDLE)
- /*
- * A refresh is already in progress.
- * Just wait for it to finish.
- */
- return;
-
- skspcl->req.state = SKD_REQ_STATE_BUSY;
-
- scsi = &skspcl->msg_buf->scsi[0];
- scsi->hdr.tag = skspcl->req.id;
-
- memset(scsi->cdb, 0, sizeof(scsi->cdb));
-
- switch (opcode) {
- case TEST_UNIT_READY:
- scsi->cdb[0] = TEST_UNIT_READY;
- sgd->byte_count = 0;
- scsi->hdr.sg_list_len_bytes = 0;
- break;
-
- case READ_CAPACITY:
- scsi->cdb[0] = READ_CAPACITY;
- sgd->byte_count = SKD_N_READ_CAP_BYTES;
- scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
- break;
-
- case INQUIRY:
- scsi->cdb[0] = INQUIRY;
- scsi->cdb[1] = 0x01; /* evpd */
- scsi->cdb[2] = 0x80; /* serial number page */
- scsi->cdb[4] = 0x10;
- sgd->byte_count = 16;
- scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
- break;
-
- case SYNCHRONIZE_CACHE:
- scsi->cdb[0] = SYNCHRONIZE_CACHE;
- sgd->byte_count = 0;
- scsi->hdr.sg_list_len_bytes = 0;
- break;
-
- case WRITE_BUFFER:
- scsi->cdb[0] = WRITE_BUFFER;
- scsi->cdb[1] = 0x02;
- scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
- scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
- sgd->byte_count = WR_BUF_SIZE;
- scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
- /* fill incrementing byte pattern */
- for (i = 0; i < sgd->byte_count; i++)
- buf[i] = i & 0xFF;
- break;
-
- case READ_BUFFER:
- scsi->cdb[0] = READ_BUFFER;
- scsi->cdb[1] = 0x02;
- scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
- scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
- sgd->byte_count = WR_BUF_SIZE;
- scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
- memset(skspcl->data_buf, 0, sgd->byte_count);
- break;
-
- default:
- SKD_ASSERT("Don't know what to send");
- return;
-
- }
- skd_send_special_fitmsg(skdev, skspcl);
-}
-
-static void skd_refresh_device_data(struct skd_device *skdev)
-{
- struct skd_special_context *skspcl = &skdev->internal_skspcl;
-
- skd_send_internal_skspcl(skdev, skspcl, TEST_UNIT_READY);
-}
-
-static int skd_chk_read_buf(struct skd_device *skdev,
- struct skd_special_context *skspcl)
-{
- unsigned char *buf = skspcl->data_buf;
- int i;
-
- /* check for incrementing byte pattern */
- for (i = 0; i < WR_BUF_SIZE; i++)
- if (buf[i] != (i & 0xFF))
- return 1;
-
- return 0;
-}
-
-static void skd_log_check_status(struct skd_device *skdev, u8 status, u8 key,
- u8 code, u8 qual, u8 fruc)
-{
- /* If the check condition is of special interest, log a message */
- if ((status == SAM_STAT_CHECK_CONDITION) && (key == 0x02)
- && (code == 0x04) && (qual == 0x06)) {
- dev_err(&skdev->pdev->dev,
- "*** LOST_WRITE_DATA ERROR *** key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
- key, code, qual, fruc);
- }
-}
-
-static void skd_complete_internal(struct skd_device *skdev,
- struct fit_completion_entry_v1 *skcomp,
- struct fit_comp_error_info *skerr,
- struct skd_special_context *skspcl)
-{
- u8 *buf = skspcl->data_buf;
- u8 status;
- int i;
- struct skd_scsi_request *scsi = &skspcl->msg_buf->scsi[0];
-
- lockdep_assert_held(&skdev->lock);
-
- SKD_ASSERT(skspcl == &skdev->internal_skspcl);
-
- dev_dbg(&skdev->pdev->dev, "complete internal %x\n", scsi->cdb[0]);
-
- dma_sync_single_for_cpu(&skdev->pdev->dev,
- skspcl->db_dma_address,
- skspcl->req.sksg_list[0].byte_count,
- DMA_BIDIRECTIONAL);
-
- skspcl->req.completion = *skcomp;
- skspcl->req.state = SKD_REQ_STATE_IDLE;
-
- status = skspcl->req.completion.status;
-
- skd_log_check_status(skdev, status, skerr->key, skerr->code,
- skerr->qual, skerr->fruc);
-
- switch (scsi->cdb[0]) {
- case TEST_UNIT_READY:
- if (status == SAM_STAT_GOOD)
- skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
- else if ((status == SAM_STAT_CHECK_CONDITION) &&
- (skerr->key == MEDIUM_ERROR))
- skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
- else {
- if (skdev->state == SKD_DRVR_STATE_STOPPING) {
- dev_dbg(&skdev->pdev->dev,
- "TUR failed, don't send anymore state 0x%x\n",
- skdev->state);
- return;
- }
- dev_dbg(&skdev->pdev->dev,
- "**** TUR failed, retry skerr\n");
- skd_send_internal_skspcl(skdev, skspcl,
- TEST_UNIT_READY);
- }
- break;
-
- case WRITE_BUFFER:
- if (status == SAM_STAT_GOOD)
- skd_send_internal_skspcl(skdev, skspcl, READ_BUFFER);
- else {
- if (skdev->state == SKD_DRVR_STATE_STOPPING) {
- dev_dbg(&skdev->pdev->dev,
- "write buffer failed, don't send anymore state 0x%x\n",
- skdev->state);
- return;
- }
- dev_dbg(&skdev->pdev->dev,
- "**** write buffer failed, retry skerr\n");
- skd_send_internal_skspcl(skdev, skspcl,
- TEST_UNIT_READY);
- }
- break;
-
- case READ_BUFFER:
- if (status == SAM_STAT_GOOD) {
- if (skd_chk_read_buf(skdev, skspcl) == 0)
- skd_send_internal_skspcl(skdev, skspcl,
- READ_CAPACITY);
- else {
- dev_err(&skdev->pdev->dev,
- "*** W/R Buffer mismatch %d ***\n",
- skdev->connect_retries);
- if (skdev->connect_retries <
- SKD_MAX_CONNECT_RETRIES) {
- skdev->connect_retries++;
- skd_soft_reset(skdev);
- } else {
- dev_err(&skdev->pdev->dev,
- "W/R Buffer Connect Error\n");
- return;
- }
- }
-
- } else {
- if (skdev->state == SKD_DRVR_STATE_STOPPING) {
- dev_dbg(&skdev->pdev->dev,
- "read buffer failed, don't send anymore state 0x%x\n",
- skdev->state);
- return;
- }
- dev_dbg(&skdev->pdev->dev,
- "**** read buffer failed, retry skerr\n");
- skd_send_internal_skspcl(skdev, skspcl,
- TEST_UNIT_READY);
- }
- break;
-
- case READ_CAPACITY:
- skdev->read_cap_is_valid = 0;
- if (status == SAM_STAT_GOOD) {
- skdev->read_cap_last_lba =
- (buf[0] << 24) | (buf[1] << 16) |
- (buf[2] << 8) | buf[3];
- skdev->read_cap_blocksize =
- (buf[4] << 24) | (buf[5] << 16) |
- (buf[6] << 8) | buf[7];
-
- dev_dbg(&skdev->pdev->dev, "last lba %d, bs %d\n",
- skdev->read_cap_last_lba,
- skdev->read_cap_blocksize);
-
- set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
-
- skdev->read_cap_is_valid = 1;
-
- skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
- } else if ((status == SAM_STAT_CHECK_CONDITION) &&
- (skerr->key == MEDIUM_ERROR)) {
- skdev->read_cap_last_lba = ~0;
- set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
- dev_dbg(&skdev->pdev->dev, "**** MEDIUM ERROR caused READCAP to fail, ignore failure and continue to inquiry\n");
- skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
- } else {
- dev_dbg(&skdev->pdev->dev, "**** READCAP failed, retry TUR\n");
- skd_send_internal_skspcl(skdev, skspcl,
- TEST_UNIT_READY);
- }
- break;
-
- case INQUIRY:
- skdev->inquiry_is_valid = 0;
- if (status == SAM_STAT_GOOD) {
- skdev->inquiry_is_valid = 1;
-
- for (i = 0; i < 12; i++)
- skdev->inq_serial_num[i] = buf[i + 4];
- skdev->inq_serial_num[12] = 0;
- }
-
- if (skd_unquiesce_dev(skdev) < 0)
- dev_dbg(&skdev->pdev->dev, "**** failed, to ONLINE device\n");
- /* connection is complete */
- skdev->connect_retries = 0;
- break;
-
- case SYNCHRONIZE_CACHE:
- if (status == SAM_STAT_GOOD)
- skdev->sync_done = 1;
- else
- skdev->sync_done = -1;
- wake_up_interruptible(&skdev->waitq);
- break;
-
- default:
- SKD_ASSERT("we didn't send this");
- }
-}
-
-/*
- *****************************************************************************
- * FIT MESSAGES
- *****************************************************************************
- */
-
-static void skd_send_fitmsg(struct skd_device *skdev,
- struct skd_fitmsg_context *skmsg)
-{
- u64 qcmd;
-
- dev_dbg(&skdev->pdev->dev, "dma address %pad, busy=%d\n",
- &skmsg->mb_dma_address, skd_in_flight(skdev));
- dev_dbg(&skdev->pdev->dev, "msg_buf %p\n", skmsg->msg_buf);
-
- qcmd = skmsg->mb_dma_address;
- qcmd |= FIT_QCMD_QID_NORMAL;
-
- if (unlikely(skdev->dbg_level > 1)) {
- u8 *bp = (u8 *)skmsg->msg_buf;
- int i;
- for (i = 0; i < skmsg->length; i += 8) {
- dev_dbg(&skdev->pdev->dev, "msg[%2d] %8ph\n", i,
- &bp[i]);
- if (i == 0)
- i = 64 - 8;
- }
- }
-
- if (skmsg->length > 256)
- qcmd |= FIT_QCMD_MSGSIZE_512;
- else if (skmsg->length > 128)
- qcmd |= FIT_QCMD_MSGSIZE_256;
- else if (skmsg->length > 64)
- qcmd |= FIT_QCMD_MSGSIZE_128;
- else
- /*
- * This makes no sense because the FIT msg header is
- * 64 bytes. If the msg is only 64 bytes long it has
- * no payload.
- */
- qcmd |= FIT_QCMD_MSGSIZE_64;
-
- dma_sync_single_for_device(&skdev->pdev->dev, skmsg->mb_dma_address,
- skmsg->length, DMA_TO_DEVICE);
-
- /* Make sure skd_msg_buf is written before the doorbell is triggered. */
- smp_wmb();
-
- SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
-}
-
-static void skd_send_special_fitmsg(struct skd_device *skdev,
- struct skd_special_context *skspcl)
-{
- u64 qcmd;
-
- WARN_ON_ONCE(skspcl->req.n_sg != 1);
-
- if (unlikely(skdev->dbg_level > 1)) {
- u8 *bp = (u8 *)skspcl->msg_buf;
- int i;
-
- for (i = 0; i < SKD_N_SPECIAL_FITMSG_BYTES; i += 8) {
- dev_dbg(&skdev->pdev->dev, " spcl[%2d] %8ph\n", i,
- &bp[i]);
- if (i == 0)
- i = 64 - 8;
- }
-
- dev_dbg(&skdev->pdev->dev,
- "skspcl=%p id=%04x sksg_list=%p sksg_dma=%pad\n",
- skspcl, skspcl->req.id, skspcl->req.sksg_list,
- &skspcl->req.sksg_dma_address);
- for (i = 0; i < skspcl->req.n_sg; i++) {
- struct fit_sg_descriptor *sgd =
- &skspcl->req.sksg_list[i];
-
- dev_dbg(&skdev->pdev->dev,
- " sg[%d] count=%u ctrl=0x%x addr=0x%llx next=0x%llx\n",
- i, sgd->byte_count, sgd->control,
- sgd->host_side_addr, sgd->next_desc_ptr);
- }
- }
-
- /*
- * Special FIT msgs are always 128 bytes: a 64-byte FIT hdr
- * and one 64-byte SSDI command.
- */
- qcmd = skspcl->mb_dma_address;
- qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128;
-
- dma_sync_single_for_device(&skdev->pdev->dev, skspcl->mb_dma_address,
- SKD_N_SPECIAL_FITMSG_BYTES, DMA_TO_DEVICE);
- dma_sync_single_for_device(&skdev->pdev->dev,
- skspcl->req.sksg_dma_address,
- 1 * sizeof(struct fit_sg_descriptor),
- DMA_TO_DEVICE);
- dma_sync_single_for_device(&skdev->pdev->dev,
- skspcl->db_dma_address,
- skspcl->req.sksg_list[0].byte_count,
- DMA_BIDIRECTIONAL);
-
- /* Make sure skd_msg_buf is written before the doorbell is triggered. */
- smp_wmb();
-
- SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
-}
-
-/*
- *****************************************************************************
- * COMPLETION QUEUE
- *****************************************************************************
- */
-
-static void skd_complete_other(struct skd_device *skdev,
- struct fit_completion_entry_v1 *skcomp,
- struct fit_comp_error_info *skerr);
-
-struct sns_info {
- u8 type;
- u8 stat;
- u8 key;
- u8 asc;
- u8 ascq;
- u8 mask;
- enum skd_check_status_action action;
-};
-
-static struct sns_info skd_chkstat_table[] = {
- /* Good */
- { 0x70, 0x02, RECOVERED_ERROR, 0, 0, 0x1c,
- SKD_CHECK_STATUS_REPORT_GOOD },
-
- /* Smart alerts */
- { 0x70, 0x02, NO_SENSE, 0x0B, 0x00, 0x1E, /* warnings */
- SKD_CHECK_STATUS_REPORT_SMART_ALERT },
- { 0x70, 0x02, NO_SENSE, 0x5D, 0x00, 0x1E, /* thresholds */
- SKD_CHECK_STATUS_REPORT_SMART_ALERT },
- { 0x70, 0x02, RECOVERED_ERROR, 0x0B, 0x01, 0x1F, /* temperature over trigger */
- SKD_CHECK_STATUS_REPORT_SMART_ALERT },
-
- /* Retry (with limits) */
- { 0x70, 0x02, 0x0B, 0, 0, 0x1C, /* This one is for DMA ERROR */
- SKD_CHECK_STATUS_REQUEUE_REQUEST },
- { 0x70, 0x02, 0x06, 0x0B, 0x00, 0x1E, /* warnings */
- SKD_CHECK_STATUS_REQUEUE_REQUEST },
- { 0x70, 0x02, 0x06, 0x5D, 0x00, 0x1E, /* thresholds */
- SKD_CHECK_STATUS_REQUEUE_REQUEST },
- { 0x70, 0x02, 0x06, 0x80, 0x30, 0x1F, /* backup power */
- SKD_CHECK_STATUS_REQUEUE_REQUEST },
-
- /* Busy (or about to be) */
- { 0x70, 0x02, 0x06, 0x3f, 0x01, 0x1F, /* fw changed */
- SKD_CHECK_STATUS_BUSY_IMMINENT },
-};
-
-/*
- * Look up status and sense data to decide how to handle the error
- * from the device.
- * mask says which fields must match e.g., mask=0x18 means check
- * type and stat, ignore key, asc, ascq.
- */
-
-static enum skd_check_status_action
-skd_check_status(struct skd_device *skdev,
- u8 cmp_status, struct fit_comp_error_info *skerr)
-{
- int i;
-
- dev_err(&skdev->pdev->dev, "key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
- skerr->key, skerr->code, skerr->qual, skerr->fruc);
-
- dev_dbg(&skdev->pdev->dev,
- "stat: t=%02x stat=%02x k=%02x c=%02x q=%02x fruc=%02x\n",
- skerr->type, cmp_status, skerr->key, skerr->code, skerr->qual,
- skerr->fruc);
-
- /* Does the info match an entry in the good category? */
- for (i = 0; i < ARRAY_SIZE(skd_chkstat_table); i++) {
- struct sns_info *sns = &skd_chkstat_table[i];
-
- if (sns->mask & 0x10)
- if (skerr->type != sns->type)
- continue;
-
- if (sns->mask & 0x08)
- if (cmp_status != sns->stat)
- continue;
-
- if (sns->mask & 0x04)
- if (skerr->key != sns->key)
- continue;
-
- if (sns->mask & 0x02)
- if (skerr->code != sns->asc)
- continue;
-
- if (sns->mask & 0x01)
- if (skerr->qual != sns->ascq)
- continue;
-
- if (sns->action == SKD_CHECK_STATUS_REPORT_SMART_ALERT) {
- dev_err(&skdev->pdev->dev,
- "SMART Alert: sense key/asc/ascq %02x/%02x/%02x\n",
- skerr->key, skerr->code, skerr->qual);
- }
- return sns->action;
- }
-
- /* No other match, so nonzero status means error,
- * zero status means good
- */
- if (cmp_status) {
- dev_dbg(&skdev->pdev->dev, "status check: error\n");
- return SKD_CHECK_STATUS_REPORT_ERROR;
- }
-
- dev_dbg(&skdev->pdev->dev, "status check good default\n");
- return SKD_CHECK_STATUS_REPORT_GOOD;
-}
-
-static void skd_resolve_req_exception(struct skd_device *skdev,
- struct skd_request_context *skreq,
- struct request *req)
-{
- u8 cmp_status = skreq->completion.status;
-
- switch (skd_check_status(skdev, cmp_status, &skreq->err_info)) {
- case SKD_CHECK_STATUS_REPORT_GOOD:
- case SKD_CHECK_STATUS_REPORT_SMART_ALERT:
- skreq->status = BLK_STS_OK;
- if (likely(!blk_should_fake_timeout(req->q)))
- blk_mq_complete_request(req);
- break;
-
- case SKD_CHECK_STATUS_BUSY_IMMINENT:
- skd_log_skreq(skdev, skreq, "retry(busy)");
- blk_mq_requeue_request(req, true);
- dev_info(&skdev->pdev->dev, "drive BUSY imminent\n");
- skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT;
- skdev->timer_countdown = SKD_TIMER_MINUTES(20);
- skd_quiesce_dev(skdev);
- break;
-
- case SKD_CHECK_STATUS_REQUEUE_REQUEST:
- if (++skreq->retries < SKD_MAX_RETRIES) {
- skd_log_skreq(skdev, skreq, "retry");
- blk_mq_requeue_request(req, true);
- break;
- }
- fallthrough;
-
- case SKD_CHECK_STATUS_REPORT_ERROR:
- default:
- skreq->status = BLK_STS_IOERR;
- if (likely(!blk_should_fake_timeout(req->q)))
- blk_mq_complete_request(req);
- break;
- }
-}
-
-static void skd_release_skreq(struct skd_device *skdev,
- struct skd_request_context *skreq)
-{
- /*
- * Reclaim the skd_request_context
- */
- skreq->state = SKD_REQ_STATE_IDLE;
-}
-
-static int skd_isr_completion_posted(struct skd_device *skdev,
- int limit, int *enqueued)
-{
- struct fit_completion_entry_v1 *skcmp;
- struct fit_comp_error_info *skerr;
- u16 req_id;
- u32 tag;
- u16 hwq = 0;
- struct request *rq;
- struct skd_request_context *skreq;
- u16 cmp_cntxt;
- u8 cmp_status;
- u8 cmp_cycle;
- u32 cmp_bytes;
- int rc = 0;
- int processed = 0;
-
- lockdep_assert_held(&skdev->lock);
-
- for (;; ) {
- SKD_ASSERT(skdev->skcomp_ix < SKD_N_COMPLETION_ENTRY);
-
- skcmp = &skdev->skcomp_table[skdev->skcomp_ix];
- cmp_cycle = skcmp->cycle;
- cmp_cntxt = skcmp->tag;
- cmp_status = skcmp->status;
- cmp_bytes = be32_to_cpu(skcmp->num_returned_bytes);
-
- skerr = &skdev->skerr_table[skdev->skcomp_ix];
-
- dev_dbg(&skdev->pdev->dev,
- "cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d busy=%d rbytes=0x%x proto=%d\n",
- skdev->skcomp_cycle, skdev->skcomp_ix, cmp_cycle,
- cmp_cntxt, cmp_status, skd_in_flight(skdev),
- cmp_bytes, skdev->proto_ver);
-
- if (cmp_cycle != skdev->skcomp_cycle) {
- dev_dbg(&skdev->pdev->dev, "end of completions\n");
- break;
- }
- /*
- * Update the completion queue head index and possibly
- * the completion cycle count. 8-bit wrap-around.
- */
- skdev->skcomp_ix++;
- if (skdev->skcomp_ix >= SKD_N_COMPLETION_ENTRY) {
- skdev->skcomp_ix = 0;
- skdev->skcomp_cycle++;
- }
-
- /*
- * The command context is a unique 32-bit ID. The low order
- * bits help locate the request. The request is usually a
- * r/w request (see skd_start() above) or a special request.
- */
- req_id = cmp_cntxt;
- tag = req_id & SKD_ID_SLOT_AND_TABLE_MASK;
-
- /* Is this other than a r/w request? */
- if (tag >= skdev->num_req_context) {
- /*
- * This is not a completion for a r/w request.
- */
- WARN_ON_ONCE(blk_mq_tag_to_rq(skdev->tag_set.tags[hwq],
- tag));
- skd_complete_other(skdev, skcmp, skerr);
- continue;
- }
-
- rq = blk_mq_tag_to_rq(skdev->tag_set.tags[hwq], tag);
- if (WARN(!rq, "No request for tag %#x -> %#x\n", cmp_cntxt,
- tag))
- continue;
- skreq = blk_mq_rq_to_pdu(rq);
-
- /*
- * Make sure the request ID for the slot matches.
- */
- if (skreq->id != req_id) {
- dev_err(&skdev->pdev->dev,
- "Completion mismatch comp_id=0x%04x skreq=0x%04x new=0x%04x\n",
- req_id, skreq->id, cmp_cntxt);
-
- continue;
- }
-
- SKD_ASSERT(skreq->state == SKD_REQ_STATE_BUSY);
-
- skreq->completion = *skcmp;
- if (unlikely(cmp_status == SAM_STAT_CHECK_CONDITION)) {
- skreq->err_info = *skerr;
- skd_log_check_status(skdev, cmp_status, skerr->key,
- skerr->code, skerr->qual,
- skerr->fruc);
- }
- /* Release DMA resources for the request. */
- if (skreq->n_sg > 0)
- skd_postop_sg_list(skdev, skreq);
-
- skd_release_skreq(skdev, skreq);
-
- /*
- * Capture the outcome and post it back to the native request.
- */
- if (likely(cmp_status == SAM_STAT_GOOD)) {
- skreq->status = BLK_STS_OK;
- if (likely(!blk_should_fake_timeout(rq->q)))
- blk_mq_complete_request(rq);
- } else {
- skd_resolve_req_exception(skdev, skreq, rq);
- }
-
- /* skd_isr_comp_limit equal zero means no limit */
- if (limit) {
- if (++processed >= limit) {
- rc = 1;
- break;
- }
- }
- }
-
- if (skdev->state == SKD_DRVR_STATE_PAUSING &&
- skd_in_flight(skdev) == 0) {
- skdev->state = SKD_DRVR_STATE_PAUSED;
- wake_up_interruptible(&skdev->waitq);
- }
-
- return rc;
-}
-
-static void skd_complete_other(struct skd_device *skdev,
- struct fit_completion_entry_v1 *skcomp,
- struct fit_comp_error_info *skerr)
-{
- u32 req_id = 0;
- u32 req_table;
- u32 req_slot;
- struct skd_special_context *skspcl;
-
- lockdep_assert_held(&skdev->lock);
-
- req_id = skcomp->tag;
- req_table = req_id & SKD_ID_TABLE_MASK;
- req_slot = req_id & SKD_ID_SLOT_MASK;
-
- dev_dbg(&skdev->pdev->dev, "table=0x%x id=0x%x slot=%d\n", req_table,
- req_id, req_slot);
-
- /*
- * Based on the request id, determine how to dispatch this completion.
- * This swich/case is finding the good cases and forwarding the
- * completion entry. Errors are reported below the switch.
- */
- switch (req_table) {
- case SKD_ID_RW_REQUEST:
- /*
- * The caller, skd_isr_completion_posted() above,
- * handles r/w requests. The only way we get here
- * is if the req_slot is out of bounds.
- */
- break;
-
- case SKD_ID_INTERNAL:
- if (req_slot == 0) {
- skspcl = &skdev->internal_skspcl;
- if (skspcl->req.id == req_id &&
- skspcl->req.state == SKD_REQ_STATE_BUSY) {
- skd_complete_internal(skdev,
- skcomp, skerr, skspcl);
- return;
- }
- }
- break;
-
- case SKD_ID_FIT_MSG:
- /*
- * These id's should never appear in a completion record.
- */
- break;
-
- default:
- /*
- * These id's should never appear anywhere;
- */
- break;
- }
-
- /*
- * If we get here it is a bad or stale id.
- */
-}
-
-static void skd_reset_skcomp(struct skd_device *skdev)
-{
- memset(skdev->skcomp_table, 0, SKD_SKCOMP_SIZE);
-
- skdev->skcomp_ix = 0;
- skdev->skcomp_cycle = 1;
-}
-
-/*
- *****************************************************************************
- * INTERRUPTS
- *****************************************************************************
- */
-static void skd_completion_worker(struct work_struct *work)
-{
- struct skd_device *skdev =
- container_of(work, struct skd_device, completion_worker);
- unsigned long flags;
- int flush_enqueued = 0;
-
- spin_lock_irqsave(&skdev->lock, flags);
-
- /*
- * pass in limit=0, which means no limit..
- * process everything in compq
- */
- skd_isr_completion_posted(skdev, 0, &flush_enqueued);
- schedule_work(&skdev->start_queue);
-
- spin_unlock_irqrestore(&skdev->lock, flags);
-}
-
-static void skd_isr_msg_from_dev(struct skd_device *skdev);
-
-static irqreturn_t
-skd_isr(int irq, void *ptr)
-{
- struct skd_device *skdev = ptr;
- u32 intstat;
- u32 ack;
- int rc = 0;
- int deferred = 0;
- int flush_enqueued = 0;
-
- spin_lock(&skdev->lock);
-
- for (;; ) {
- intstat = SKD_READL(skdev, FIT_INT_STATUS_HOST);
-
- ack = FIT_INT_DEF_MASK;
- ack &= intstat;
-
- dev_dbg(&skdev->pdev->dev, "intstat=0x%x ack=0x%x\n", intstat,
- ack);
-
- /* As long as there is an int pending on device, keep
- * running loop. When none, get out, but if we've never
- * done any processing, call completion handler?
- */
- if (ack == 0) {
- /* No interrupts on device, but run the completion
- * processor anyway?
- */
- if (rc == 0)
- if (likely (skdev->state
- == SKD_DRVR_STATE_ONLINE))
- deferred = 1;
- break;
- }
-
- rc = IRQ_HANDLED;
-
- SKD_WRITEL(skdev, ack, FIT_INT_STATUS_HOST);
-
- if (likely((skdev->state != SKD_DRVR_STATE_LOAD) &&
- (skdev->state != SKD_DRVR_STATE_STOPPING))) {
- if (intstat & FIT_ISH_COMPLETION_POSTED) {
- /*
- * If we have already deferred completion
- * processing, don't bother running it again
- */
- if (deferred == 0)
- deferred =
- skd_isr_completion_posted(skdev,
- skd_isr_comp_limit, &flush_enqueued);
- }
-
- if (intstat & FIT_ISH_FW_STATE_CHANGE) {
- skd_isr_fwstate(skdev);
- if (skdev->state == SKD_DRVR_STATE_FAULT ||
- skdev->state ==
- SKD_DRVR_STATE_DISAPPEARED) {
- spin_unlock(&skdev->lock);
- return rc;
- }
- }
-
- if (intstat & FIT_ISH_MSG_FROM_DEV)
- skd_isr_msg_from_dev(skdev);
- }
- }
-
- if (unlikely(flush_enqueued))
- schedule_work(&skdev->start_queue);
-
- if (deferred)
- schedule_work(&skdev->completion_worker);
- else if (!flush_enqueued)
- schedule_work(&skdev->start_queue);
-
- spin_unlock(&skdev->lock);
-
- return rc;
-}
-
-static void skd_drive_fault(struct skd_device *skdev)
-{
- skdev->state = SKD_DRVR_STATE_FAULT;
- dev_err(&skdev->pdev->dev, "Drive FAULT\n");
-}
-
-static void skd_drive_disappeared(struct skd_device *skdev)
-{
- skdev->state = SKD_DRVR_STATE_DISAPPEARED;
- dev_err(&skdev->pdev->dev, "Drive DISAPPEARED\n");
-}
-
-static void skd_isr_fwstate(struct skd_device *skdev)
-{
- u32 sense;
- u32 state;
- u32 mtd;
- int prev_driver_state = skdev->state;
-
- sense = SKD_READL(skdev, FIT_STATUS);
- state = sense & FIT_SR_DRIVE_STATE_MASK;
-
- dev_err(&skdev->pdev->dev, "s1120 state %s(%d)=>%s(%d)\n",
- skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
- skd_drive_state_to_str(state), state);
-
- skdev->drive_state = state;
-
- switch (skdev->drive_state) {
- case FIT_SR_DRIVE_INIT:
- if (skdev->state == SKD_DRVR_STATE_PROTOCOL_MISMATCH) {
- skd_disable_interrupts(skdev);
- break;
- }
- if (skdev->state == SKD_DRVR_STATE_RESTARTING)
- skd_recover_requests(skdev);
- if (skdev->state == SKD_DRVR_STATE_WAIT_BOOT) {
- skdev->timer_countdown = SKD_STARTING_TIMO;
- skdev->state = SKD_DRVR_STATE_STARTING;
- skd_soft_reset(skdev);
- break;
- }
- mtd = FIT_MXD_CONS(FIT_MTD_FITFW_INIT, 0, 0);
- SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
- skdev->last_mtd = mtd;
- break;
-
- case FIT_SR_DRIVE_ONLINE:
- skdev->cur_max_queue_depth = skd_max_queue_depth;
- if (skdev->cur_max_queue_depth > skdev->dev_max_queue_depth)
- skdev->cur_max_queue_depth = skdev->dev_max_queue_depth;
-
- skdev->queue_low_water_mark =
- skdev->cur_max_queue_depth * 2 / 3 + 1;
- if (skdev->queue_low_water_mark < 1)
- skdev->queue_low_water_mark = 1;
- dev_info(&skdev->pdev->dev,
- "Queue depth limit=%d dev=%d lowat=%d\n",
- skdev->cur_max_queue_depth,
- skdev->dev_max_queue_depth,
- skdev->queue_low_water_mark);
-
- skd_refresh_device_data(skdev);
- break;
-
- case FIT_SR_DRIVE_BUSY:
- skdev->state = SKD_DRVR_STATE_BUSY;
- skdev->timer_countdown = SKD_BUSY_TIMO;
- skd_quiesce_dev(skdev);
- break;
- case FIT_SR_DRIVE_BUSY_SANITIZE:
- /* set timer for 3 seconds, we'll abort any unfinished
- * commands after that expires
- */
- skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
- skdev->timer_countdown = SKD_TIMER_SECONDS(3);
- schedule_work(&skdev->start_queue);
- break;
- case FIT_SR_DRIVE_BUSY_ERASE:
- skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
- skdev->timer_countdown = SKD_BUSY_TIMO;
- break;
- case FIT_SR_DRIVE_OFFLINE:
- skdev->state = SKD_DRVR_STATE_IDLE;
- break;
- case FIT_SR_DRIVE_SOFT_RESET:
- switch (skdev->state) {
- case SKD_DRVR_STATE_STARTING:
- case SKD_DRVR_STATE_RESTARTING:
- /* Expected by a caller of skd_soft_reset() */
- break;
- default:
- skdev->state = SKD_DRVR_STATE_RESTARTING;
- break;
- }
- break;
- case FIT_SR_DRIVE_FW_BOOTING:
- dev_dbg(&skdev->pdev->dev, "ISR FIT_SR_DRIVE_FW_BOOTING\n");
- skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
- skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
- break;
-
- case FIT_SR_DRIVE_DEGRADED:
- case FIT_SR_PCIE_LINK_DOWN:
- case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
- break;
-
- case FIT_SR_DRIVE_FAULT:
- skd_drive_fault(skdev);
- skd_recover_requests(skdev);
- schedule_work(&skdev->start_queue);
- break;
-
- /* PCIe bus returned all Fs? */
- case 0xFF:
- dev_info(&skdev->pdev->dev, "state=0x%x sense=0x%x\n", state,
- sense);
- skd_drive_disappeared(skdev);
- skd_recover_requests(skdev);
- schedule_work(&skdev->start_queue);
- break;
- default:
- /*
- * Uknown FW State. Wait for a state we recognize.
- */
- break;
- }
- dev_err(&skdev->pdev->dev, "Driver state %s(%d)=>%s(%d)\n",
- skd_skdev_state_to_str(prev_driver_state), prev_driver_state,
- skd_skdev_state_to_str(skdev->state), skdev->state);
-}
-
-static bool skd_recover_request(struct request *req, void *data, bool reserved)
-{
- struct skd_device *const skdev = data;
- struct skd_request_context *skreq = blk_mq_rq_to_pdu(req);
-
- if (skreq->state != SKD_REQ_STATE_BUSY)
- return true;
-
- skd_log_skreq(skdev, skreq, "recover");
-
- /* Release DMA resources for the request. */
- if (skreq->n_sg > 0)
- skd_postop_sg_list(skdev, skreq);
-
- skreq->state = SKD_REQ_STATE_IDLE;
- skreq->status = BLK_STS_IOERR;
- blk_mq_complete_request(req);
- return true;
-}
-
-static void skd_recover_requests(struct skd_device *skdev)
-{
- blk_mq_tagset_busy_iter(&skdev->tag_set, skd_recover_request, skdev);
-}
-
-static void skd_isr_msg_from_dev(struct skd_device *skdev)
-{
- u32 mfd;
- u32 mtd;
- u32 data;
-
- mfd = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
-
- dev_dbg(&skdev->pdev->dev, "mfd=0x%x last_mtd=0x%x\n", mfd,
- skdev->last_mtd);
-
- /* ignore any mtd that is an ack for something we didn't send */
- if (FIT_MXD_TYPE(mfd) != FIT_MXD_TYPE(skdev->last_mtd))
- return;
-
- switch (FIT_MXD_TYPE(mfd)) {
- case FIT_MTD_FITFW_INIT:
- skdev->proto_ver = FIT_PROTOCOL_MAJOR_VER(mfd);
-
- if (skdev->proto_ver != FIT_PROTOCOL_VERSION_1) {
- dev_err(&skdev->pdev->dev, "protocol mismatch\n");
- dev_err(&skdev->pdev->dev, " got=%d support=%d\n",
- skdev->proto_ver, FIT_PROTOCOL_VERSION_1);
- dev_err(&skdev->pdev->dev, " please upgrade driver\n");
- skdev->state = SKD_DRVR_STATE_PROTOCOL_MISMATCH;
- skd_soft_reset(skdev);
- break;
- }
- mtd = FIT_MXD_CONS(FIT_MTD_GET_CMDQ_DEPTH, 0, 0);
- SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
- skdev->last_mtd = mtd;
- break;
-
- case FIT_MTD_GET_CMDQ_DEPTH:
- skdev->dev_max_queue_depth = FIT_MXD_DATA(mfd);
- mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_DEPTH, 0,
- SKD_N_COMPLETION_ENTRY);
- SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
- skdev->last_mtd = mtd;
- break;
-
- case FIT_MTD_SET_COMPQ_DEPTH:
- SKD_WRITEQ(skdev, skdev->cq_dma_address, FIT_MSG_TO_DEVICE_ARG);
- mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_ADDR, 0, 0);
- SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
- skdev->last_mtd = mtd;
- break;
-
- case FIT_MTD_SET_COMPQ_ADDR:
- skd_reset_skcomp(skdev);
- mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_HOST_ID, 0, skdev->devno);
- SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
- skdev->last_mtd = mtd;
- break;
-
- case FIT_MTD_CMD_LOG_HOST_ID:
- /* hardware interface overflows in y2106 */
- skdev->connect_time_stamp = (u32)ktime_get_real_seconds();
- data = skdev->connect_time_stamp & 0xFFFF;
- mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_LO, 0, data);
- SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
- skdev->last_mtd = mtd;
- break;
-
- case FIT_MTD_CMD_LOG_TIME_STAMP_LO:
- skdev->drive_jiffies = FIT_MXD_DATA(mfd);
- data = (skdev->connect_time_stamp >> 16) & 0xFFFF;
- mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_HI, 0, data);
- SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
- skdev->last_mtd = mtd;
- break;
-
- case FIT_MTD_CMD_LOG_TIME_STAMP_HI:
- skdev->drive_jiffies |= (FIT_MXD_DATA(mfd) << 16);
- mtd = FIT_MXD_CONS(FIT_MTD_ARM_QUEUE, 0, 0);
- SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
- skdev->last_mtd = mtd;
-
- dev_err(&skdev->pdev->dev, "Time sync driver=0x%x device=0x%x\n",
- skdev->connect_time_stamp, skdev->drive_jiffies);
- break;
-
- case FIT_MTD_ARM_QUEUE:
- skdev->last_mtd = 0;
- /*
- * State should be, or soon will be, FIT_SR_DRIVE_ONLINE.
- */
- break;
-
- default:
- break;
- }
-}
-
-static void skd_disable_interrupts(struct skd_device *skdev)
-{
- u32 sense;
-
- sense = SKD_READL(skdev, FIT_CONTROL);
- sense &= ~FIT_CR_ENABLE_INTERRUPTS;
- SKD_WRITEL(skdev, sense, FIT_CONTROL);
- dev_dbg(&skdev->pdev->dev, "sense 0x%x\n", sense);
-
- /* Note that the 1s is written. A 1-bit means
- * disable, a 0 means enable.
- */
- SKD_WRITEL(skdev, ~0, FIT_INT_MASK_HOST);
-}
-
-static void skd_enable_interrupts(struct skd_device *skdev)
-{
- u32 val;
-
- /* unmask interrupts first */
- val = FIT_ISH_FW_STATE_CHANGE +
- FIT_ISH_COMPLETION_POSTED + FIT_ISH_MSG_FROM_DEV;
-
- /* Note that the compliment of mask is written. A 1-bit means
- * disable, a 0 means enable. */
- SKD_WRITEL(skdev, ~val, FIT_INT_MASK_HOST);
- dev_dbg(&skdev->pdev->dev, "interrupt mask=0x%x\n", ~val);
-
- val = SKD_READL(skdev, FIT_CONTROL);
- val |= FIT_CR_ENABLE_INTERRUPTS;
- dev_dbg(&skdev->pdev->dev, "control=0x%x\n", val);
- SKD_WRITEL(skdev, val, FIT_CONTROL);
-}
-
-/*
- *****************************************************************************
- * START, STOP, RESTART, QUIESCE, UNQUIESCE
- *****************************************************************************
- */
-
-static void skd_soft_reset(struct skd_device *skdev)
-{
- u32 val;
-
- val = SKD_READL(skdev, FIT_CONTROL);
- val |= (FIT_CR_SOFT_RESET);
- dev_dbg(&skdev->pdev->dev, "control=0x%x\n", val);
- SKD_WRITEL(skdev, val, FIT_CONTROL);
-}
-
-static void skd_start_device(struct skd_device *skdev)
-{
- unsigned long flags;
- u32 sense;
- u32 state;
-
- spin_lock_irqsave(&skdev->lock, flags);
-
- /* ack all ghost interrupts */
- SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
-
- sense = SKD_READL(skdev, FIT_STATUS);
-
- dev_dbg(&skdev->pdev->dev, "initial status=0x%x\n", sense);
-
- state = sense & FIT_SR_DRIVE_STATE_MASK;
- skdev->drive_state = state;
- skdev->last_mtd = 0;
-
- skdev->state = SKD_DRVR_STATE_STARTING;
- skdev->timer_countdown = SKD_STARTING_TIMO;
-
- skd_enable_interrupts(skdev);
-
- switch (skdev->drive_state) {
- case FIT_SR_DRIVE_OFFLINE:
- dev_err(&skdev->pdev->dev, "Drive offline...\n");
- break;
-
- case FIT_SR_DRIVE_FW_BOOTING:
- dev_dbg(&skdev->pdev->dev, "FIT_SR_DRIVE_FW_BOOTING\n");
- skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
- skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
- break;
-
- case FIT_SR_DRIVE_BUSY_SANITIZE:
- dev_info(&skdev->pdev->dev, "Start: BUSY_SANITIZE\n");
- skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
- skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
- break;
-
- case FIT_SR_DRIVE_BUSY_ERASE:
- dev_info(&skdev->pdev->dev, "Start: BUSY_ERASE\n");
- skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
- skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
- break;
-
- case FIT_SR_DRIVE_INIT:
- case FIT_SR_DRIVE_ONLINE:
- skd_soft_reset(skdev);
- break;
-
- case FIT_SR_DRIVE_BUSY:
- dev_err(&skdev->pdev->dev, "Drive Busy...\n");
- skdev->state = SKD_DRVR_STATE_BUSY;
- skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
- break;
-
- case FIT_SR_DRIVE_SOFT_RESET:
- dev_err(&skdev->pdev->dev, "drive soft reset in prog\n");
- break;
-
- case FIT_SR_DRIVE_FAULT:
- /* Fault state is bad...soft reset won't do it...
- * Hard reset, maybe, but does it work on device?
- * For now, just fault so the system doesn't hang.
- */
- skd_drive_fault(skdev);
- /*start the queue so we can respond with error to requests */
- dev_dbg(&skdev->pdev->dev, "starting queue\n");
- schedule_work(&skdev->start_queue);
- skdev->gendisk_on = -1;
- wake_up_interruptible(&skdev->waitq);
- break;
-
- case 0xFF:
- /* Most likely the device isn't there or isn't responding
- * to the BAR1 addresses. */
- skd_drive_disappeared(skdev);
- /*start the queue so we can respond with error to requests */
- dev_dbg(&skdev->pdev->dev,
- "starting queue to error-out reqs\n");
- schedule_work(&skdev->start_queue);
- skdev->gendisk_on = -1;
- wake_up_interruptible(&skdev->waitq);
- break;
-
- default:
- dev_err(&skdev->pdev->dev, "Start: unknown state %x\n",
- skdev->drive_state);
- break;
- }
-
- state = SKD_READL(skdev, FIT_CONTROL);
- dev_dbg(&skdev->pdev->dev, "FIT Control Status=0x%x\n", state);
-
- state = SKD_READL(skdev, FIT_INT_STATUS_HOST);
- dev_dbg(&skdev->pdev->dev, "Intr Status=0x%x\n", state);
-
- state = SKD_READL(skdev, FIT_INT_MASK_HOST);
- dev_dbg(&skdev->pdev->dev, "Intr Mask=0x%x\n", state);
-
- state = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
- dev_dbg(&skdev->pdev->dev, "Msg from Dev=0x%x\n", state);
-
- state = SKD_READL(skdev, FIT_HW_VERSION);
- dev_dbg(&skdev->pdev->dev, "HW version=0x%x\n", state);
-
- spin_unlock_irqrestore(&skdev->lock, flags);
-}
-
-static void skd_stop_device(struct skd_device *skdev)
-{
- unsigned long flags;
- struct skd_special_context *skspcl = &skdev->internal_skspcl;
- u32 dev_state;
- int i;
-
- spin_lock_irqsave(&skdev->lock, flags);
-
- if (skdev->state != SKD_DRVR_STATE_ONLINE) {
- dev_err(&skdev->pdev->dev, "%s not online no sync\n", __func__);
- goto stop_out;
- }
-
- if (skspcl->req.state != SKD_REQ_STATE_IDLE) {
- dev_err(&skdev->pdev->dev, "%s no special\n", __func__);
- goto stop_out;
- }
-
- skdev->state = SKD_DRVR_STATE_SYNCING;
- skdev->sync_done = 0;
-
- skd_send_internal_skspcl(skdev, skspcl, SYNCHRONIZE_CACHE);
-
- spin_unlock_irqrestore(&skdev->lock, flags);
-
- wait_event_interruptible_timeout(skdev->waitq,
- (skdev->sync_done), (10 * HZ));
-
- spin_lock_irqsave(&skdev->lock, flags);
-
- switch (skdev->sync_done) {
- case 0:
- dev_err(&skdev->pdev->dev, "%s no sync\n", __func__);
- break;
- case 1:
- dev_err(&skdev->pdev->dev, "%s sync done\n", __func__);
- break;
- default:
- dev_err(&skdev->pdev->dev, "%s sync error\n", __func__);
- }
-
-stop_out:
- skdev->state = SKD_DRVR_STATE_STOPPING;
- spin_unlock_irqrestore(&skdev->lock, flags);
-
- skd_kill_timer(skdev);
-
- spin_lock_irqsave(&skdev->lock, flags);
- skd_disable_interrupts(skdev);
-
- /* ensure all ints on device are cleared */
- /* soft reset the device to unload with a clean slate */
- SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
- SKD_WRITEL(skdev, FIT_CR_SOFT_RESET, FIT_CONTROL);
-
- spin_unlock_irqrestore(&skdev->lock, flags);
-
- /* poll every 100ms, 1 second timeout */
- for (i = 0; i < 10; i++) {
- dev_state =
- SKD_READL(skdev, FIT_STATUS) & FIT_SR_DRIVE_STATE_MASK;
- if (dev_state == FIT_SR_DRIVE_INIT)
- break;
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(msecs_to_jiffies(100));
- }
-
- if (dev_state != FIT_SR_DRIVE_INIT)
- dev_err(&skdev->pdev->dev, "%s state error 0x%02x\n", __func__,
- dev_state);
-}
-
-/* assume spinlock is held */
-static void skd_restart_device(struct skd_device *skdev)
-{
- u32 state;
-
- /* ack all ghost interrupts */
- SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
-
- state = SKD_READL(skdev, FIT_STATUS);
-
- dev_dbg(&skdev->pdev->dev, "drive status=0x%x\n", state);
-
- state &= FIT_SR_DRIVE_STATE_MASK;
- skdev->drive_state = state;
- skdev->last_mtd = 0;
-
- skdev->state = SKD_DRVR_STATE_RESTARTING;
- skdev->timer_countdown = SKD_RESTARTING_TIMO;
-
- skd_soft_reset(skdev);
-}
-
-/* assume spinlock is held */
-static int skd_quiesce_dev(struct skd_device *skdev)
-{
- int rc = 0;
-
- switch (skdev->state) {
- case SKD_DRVR_STATE_BUSY:
- case SKD_DRVR_STATE_BUSY_IMMINENT:
- dev_dbg(&skdev->pdev->dev, "stopping queue\n");
- blk_mq_stop_hw_queues(skdev->queue);
- break;
- case SKD_DRVR_STATE_ONLINE:
- case SKD_DRVR_STATE_STOPPING:
- case SKD_DRVR_STATE_SYNCING:
- case SKD_DRVR_STATE_PAUSING:
- case SKD_DRVR_STATE_PAUSED:
- case SKD_DRVR_STATE_STARTING:
- case SKD_DRVR_STATE_RESTARTING:
- case SKD_DRVR_STATE_RESUMING:
- default:
- rc = -EINVAL;
- dev_dbg(&skdev->pdev->dev, "state [%d] not implemented\n",
- skdev->state);
- }
- return rc;
-}
-
-/* assume spinlock is held */
-static int skd_unquiesce_dev(struct skd_device *skdev)
-{
- int prev_driver_state = skdev->state;
-
- skd_log_skdev(skdev, "unquiesce");
- if (skdev->state == SKD_DRVR_STATE_ONLINE) {
- dev_dbg(&skdev->pdev->dev, "**** device already ONLINE\n");
- return 0;
- }
- if (skdev->drive_state != FIT_SR_DRIVE_ONLINE) {
- /*
- * If there has been an state change to other than
- * ONLINE, we will rely on controller state change
- * to come back online and restart the queue.
- * The BUSY state means that driver is ready to
- * continue normal processing but waiting for controller
- * to become available.
- */
- skdev->state = SKD_DRVR_STATE_BUSY;
- dev_dbg(&skdev->pdev->dev, "drive BUSY state\n");
- return 0;
- }
-
- /*
- * Drive has just come online, driver is either in startup,
- * paused performing a task, or bust waiting for hardware.
- */
- switch (skdev->state) {
- case SKD_DRVR_STATE_PAUSED:
- case SKD_DRVR_STATE_BUSY:
- case SKD_DRVR_STATE_BUSY_IMMINENT:
- case SKD_DRVR_STATE_BUSY_ERASE:
- case SKD_DRVR_STATE_STARTING:
- case SKD_DRVR_STATE_RESTARTING:
- case SKD_DRVR_STATE_FAULT:
- case SKD_DRVR_STATE_IDLE:
- case SKD_DRVR_STATE_LOAD:
- skdev->state = SKD_DRVR_STATE_ONLINE;
- dev_err(&skdev->pdev->dev, "Driver state %s(%d)=>%s(%d)\n",
- skd_skdev_state_to_str(prev_driver_state),
- prev_driver_state, skd_skdev_state_to_str(skdev->state),
- skdev->state);
- dev_dbg(&skdev->pdev->dev,
- "**** device ONLINE...starting block queue\n");
- dev_dbg(&skdev->pdev->dev, "starting queue\n");
- dev_info(&skdev->pdev->dev, "STEC s1120 ONLINE\n");
- schedule_work(&skdev->start_queue);
- skdev->gendisk_on = 1;
- wake_up_interruptible(&skdev->waitq);
- break;
-
- case SKD_DRVR_STATE_DISAPPEARED:
- default:
- dev_dbg(&skdev->pdev->dev,
- "**** driver state %d, not implemented\n",
- skdev->state);
- return -EBUSY;
- }
- return 0;
-}
-
-/*
- *****************************************************************************
- * PCIe MSI/MSI-X INTERRUPT HANDLERS
- *****************************************************************************
- */
-
-static irqreturn_t skd_reserved_isr(int irq, void *skd_host_data)
-{
- struct skd_device *skdev = skd_host_data;
- unsigned long flags;
-
- spin_lock_irqsave(&skdev->lock, flags);
- dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
- SKD_READL(skdev, FIT_INT_STATUS_HOST));
- dev_err(&skdev->pdev->dev, "MSIX reserved irq %d = 0x%x\n", irq,
- SKD_READL(skdev, FIT_INT_STATUS_HOST));
- SKD_WRITEL(skdev, FIT_INT_RESERVED_MASK, FIT_INT_STATUS_HOST);
- spin_unlock_irqrestore(&skdev->lock, flags);
- return IRQ_HANDLED;
-}
-
-static irqreturn_t skd_statec_isr(int irq, void *skd_host_data)
-{
- struct skd_device *skdev = skd_host_data;
- unsigned long flags;
-
- spin_lock_irqsave(&skdev->lock, flags);
- dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
- SKD_READL(skdev, FIT_INT_STATUS_HOST));
- SKD_WRITEL(skdev, FIT_ISH_FW_STATE_CHANGE, FIT_INT_STATUS_HOST);
- skd_isr_fwstate(skdev);
- spin_unlock_irqrestore(&skdev->lock, flags);
- return IRQ_HANDLED;
-}
-
-static irqreturn_t skd_comp_q(int irq, void *skd_host_data)
-{
- struct skd_device *skdev = skd_host_data;
- unsigned long flags;
- int flush_enqueued = 0;
- int deferred;
-
- spin_lock_irqsave(&skdev->lock, flags);
- dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
- SKD_READL(skdev, FIT_INT_STATUS_HOST));
- SKD_WRITEL(skdev, FIT_ISH_COMPLETION_POSTED, FIT_INT_STATUS_HOST);
- deferred = skd_isr_completion_posted(skdev, skd_isr_comp_limit,
- &flush_enqueued);
- if (flush_enqueued)
- schedule_work(&skdev->start_queue);
-
- if (deferred)
- schedule_work(&skdev->completion_worker);
- else if (!flush_enqueued)
- schedule_work(&skdev->start_queue);
-
- spin_unlock_irqrestore(&skdev->lock, flags);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t skd_msg_isr(int irq, void *skd_host_data)
-{
- struct skd_device *skdev = skd_host_data;
- unsigned long flags;
-
- spin_lock_irqsave(&skdev->lock, flags);
- dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
- SKD_READL(skdev, FIT_INT_STATUS_HOST));
- SKD_WRITEL(skdev, FIT_ISH_MSG_FROM_DEV, FIT_INT_STATUS_HOST);
- skd_isr_msg_from_dev(skdev);
- spin_unlock_irqrestore(&skdev->lock, flags);
- return IRQ_HANDLED;
-}
-
-static irqreturn_t skd_qfull_isr(int irq, void *skd_host_data)
-{
- struct skd_device *skdev = skd_host_data;
- unsigned long flags;
-
- spin_lock_irqsave(&skdev->lock, flags);
- dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
- SKD_READL(skdev, FIT_INT_STATUS_HOST));
- SKD_WRITEL(skdev, FIT_INT_QUEUE_FULL, FIT_INT_STATUS_HOST);
- spin_unlock_irqrestore(&skdev->lock, flags);
- return IRQ_HANDLED;
-}
-
-/*
- *****************************************************************************
- * PCIe MSI/MSI-X SETUP
- *****************************************************************************
- */
-
-struct skd_msix_entry {
- char isr_name[30];
-};
-
-struct skd_init_msix_entry {
- const char *name;
- irq_handler_t handler;
-};
-
-#define SKD_MAX_MSIX_COUNT 13
-#define SKD_MIN_MSIX_COUNT 7
-#define SKD_BASE_MSIX_IRQ 4
-
-static struct skd_init_msix_entry msix_entries[SKD_MAX_MSIX_COUNT] = {
- { "(DMA 0)", skd_reserved_isr },
- { "(DMA 1)", skd_reserved_isr },
- { "(DMA 2)", skd_reserved_isr },
- { "(DMA 3)", skd_reserved_isr },
- { "(State Change)", skd_statec_isr },
- { "(COMPL_Q)", skd_comp_q },
- { "(MSG)", skd_msg_isr },
- { "(Reserved)", skd_reserved_isr },
- { "(Reserved)", skd_reserved_isr },
- { "(Queue Full 0)", skd_qfull_isr },
- { "(Queue Full 1)", skd_qfull_isr },
- { "(Queue Full 2)", skd_qfull_isr },
- { "(Queue Full 3)", skd_qfull_isr },
-};
-
-static int skd_acquire_msix(struct skd_device *skdev)
-{
- int i, rc;
- struct pci_dev *pdev = skdev->pdev;
-
- rc = pci_alloc_irq_vectors(pdev, SKD_MAX_MSIX_COUNT, SKD_MAX_MSIX_COUNT,
- PCI_IRQ_MSIX);
- if (rc < 0) {
- dev_err(&skdev->pdev->dev, "failed to enable MSI-X %d\n", rc);
- goto out;
- }
-
- skdev->msix_entries = kcalloc(SKD_MAX_MSIX_COUNT,
- sizeof(struct skd_msix_entry), GFP_KERNEL);
- if (!skdev->msix_entries) {
- rc = -ENOMEM;
- dev_err(&skdev->pdev->dev, "msix table allocation error\n");
- goto out;
- }
-
- /* Enable MSI-X vectors for the base queue */
- for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) {
- struct skd_msix_entry *qentry = &skdev->msix_entries[i];
-
- snprintf(qentry->isr_name, sizeof(qentry->isr_name),
- "%s%d-msix %s", DRV_NAME, skdev->devno,
- msix_entries[i].name);
-
- rc = devm_request_irq(&skdev->pdev->dev,
- pci_irq_vector(skdev->pdev, i),
- msix_entries[i].handler, 0,
- qentry->isr_name, skdev);
- if (rc) {
- dev_err(&skdev->pdev->dev,
- "Unable to register(%d) MSI-X handler %d: %s\n",
- rc, i, qentry->isr_name);
- goto msix_out;
- }
- }
-
- dev_dbg(&skdev->pdev->dev, "%d msix irq(s) enabled\n",
- SKD_MAX_MSIX_COUNT);
- return 0;
-
-msix_out:
- while (--i >= 0)
- devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i), skdev);
-out:
- kfree(skdev->msix_entries);
- skdev->msix_entries = NULL;
- return rc;
-}
-
-static int skd_acquire_irq(struct skd_device *skdev)
-{
- struct pci_dev *pdev = skdev->pdev;
- unsigned int irq_flag = PCI_IRQ_LEGACY;
- int rc;
-
- if (skd_isr_type == SKD_IRQ_MSIX) {
- rc = skd_acquire_msix(skdev);
- if (!rc)
- return 0;
-
- dev_err(&skdev->pdev->dev,
- "failed to enable MSI-X, re-trying with MSI %d\n", rc);
- }
-
- snprintf(skdev->isr_name, sizeof(skdev->isr_name), "%s%d", DRV_NAME,
- skdev->devno);
-
- if (skd_isr_type != SKD_IRQ_LEGACY)
- irq_flag |= PCI_IRQ_MSI;
- rc = pci_alloc_irq_vectors(pdev, 1, 1, irq_flag);
- if (rc < 0) {
- dev_err(&skdev->pdev->dev,
- "failed to allocate the MSI interrupt %d\n", rc);
- return rc;
- }
-
- rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr,
- pdev->msi_enabled ? 0 : IRQF_SHARED,
- skdev->isr_name, skdev);
- if (rc) {
- pci_free_irq_vectors(pdev);
- dev_err(&skdev->pdev->dev, "failed to allocate interrupt %d\n",
- rc);
- return rc;
- }
-
- return 0;
-}
-
-static void skd_release_irq(struct skd_device *skdev)
-{
- struct pci_dev *pdev = skdev->pdev;
-
- if (skdev->msix_entries) {
- int i;
-
- for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) {
- devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i),
- skdev);
- }
-
- kfree(skdev->msix_entries);
- skdev->msix_entries = NULL;
- } else {
- devm_free_irq(&pdev->dev, pdev->irq, skdev);
- }
-
- pci_free_irq_vectors(pdev);
-}
-
-/*
- *****************************************************************************
- * CONSTRUCT
- *****************************************************************************
- */
-
-static void *skd_alloc_dma(struct skd_device *skdev, struct kmem_cache *s,
- dma_addr_t *dma_handle, gfp_t gfp,
- enum dma_data_direction dir)
-{
- struct device *dev = &skdev->pdev->dev;
- void *buf;
-
- buf = kmem_cache_alloc(s, gfp);
- if (!buf)
- return NULL;
- *dma_handle = dma_map_single(dev, buf,
- kmem_cache_size(s), dir);
- if (dma_mapping_error(dev, *dma_handle)) {
- kmem_cache_free(s, buf);
- buf = NULL;
- }
- return buf;
-}
-
-static void skd_free_dma(struct skd_device *skdev, struct kmem_cache *s,
- void *vaddr, dma_addr_t dma_handle,
- enum dma_data_direction dir)
-{
- if (!vaddr)
- return;
-
- dma_unmap_single(&skdev->pdev->dev, dma_handle,
- kmem_cache_size(s), dir);
- kmem_cache_free(s, vaddr);
-}
-
-static int skd_cons_skcomp(struct skd_device *skdev)
-{
- int rc = 0;
- struct fit_completion_entry_v1 *skcomp;
-
- dev_dbg(&skdev->pdev->dev,
- "comp pci_alloc, total bytes %zd entries %d\n",
- SKD_SKCOMP_SIZE, SKD_N_COMPLETION_ENTRY);
-
- skcomp = dma_alloc_coherent(&skdev->pdev->dev, SKD_SKCOMP_SIZE,
- &skdev->cq_dma_address, GFP_KERNEL);
-
- if (skcomp == NULL) {
- rc = -ENOMEM;
- goto err_out;
- }
-
- skdev->skcomp_table = skcomp;
- skdev->skerr_table = (struct fit_comp_error_info *)((char *)skcomp +
- sizeof(*skcomp) *
- SKD_N_COMPLETION_ENTRY);
-
-err_out:
- return rc;
-}
-
-static int skd_cons_skmsg(struct skd_device *skdev)
-{
- int rc = 0;
- u32 i;
-
- dev_dbg(&skdev->pdev->dev,
- "skmsg_table kcalloc, struct %lu, count %u total %lu\n",
- sizeof(struct skd_fitmsg_context), skdev->num_fitmsg_context,
- sizeof(struct skd_fitmsg_context) * skdev->num_fitmsg_context);
-
- skdev->skmsg_table = kcalloc(skdev->num_fitmsg_context,
- sizeof(struct skd_fitmsg_context),
- GFP_KERNEL);
- if (skdev->skmsg_table == NULL) {
- rc = -ENOMEM;
- goto err_out;
- }
-
- for (i = 0; i < skdev->num_fitmsg_context; i++) {
- struct skd_fitmsg_context *skmsg;
-
- skmsg = &skdev->skmsg_table[i];
-
- skmsg->id = i + SKD_ID_FIT_MSG;
-
- skmsg->msg_buf = dma_alloc_coherent(&skdev->pdev->dev,
- SKD_N_FITMSG_BYTES,
- &skmsg->mb_dma_address,
- GFP_KERNEL);
- if (skmsg->msg_buf == NULL) {
- rc = -ENOMEM;
- goto err_out;
- }
-
- WARN(((uintptr_t)skmsg->msg_buf | skmsg->mb_dma_address) &
- (FIT_QCMD_ALIGN - 1),
- "not aligned: msg_buf %p mb_dma_address %pad\n",
- skmsg->msg_buf, &skmsg->mb_dma_address);
- }
-
-err_out:
- return rc;
-}
-
-static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *skdev,
- u32 n_sg,
- dma_addr_t *ret_dma_addr)
-{
- struct fit_sg_descriptor *sg_list;
-
- sg_list = skd_alloc_dma(skdev, skdev->sglist_cache, ret_dma_addr,
- GFP_DMA | __GFP_ZERO, DMA_TO_DEVICE);
-
- if (sg_list != NULL) {
- uint64_t dma_address = *ret_dma_addr;
- u32 i;
-
- for (i = 0; i < n_sg - 1; i++) {
- uint64_t ndp_off;
- ndp_off = (i + 1) * sizeof(struct fit_sg_descriptor);
-
- sg_list[i].next_desc_ptr = dma_address + ndp_off;
- }
- sg_list[i].next_desc_ptr = 0LL;
- }
-
- return sg_list;
-}
-
-static void skd_free_sg_list(struct skd_device *skdev,
- struct fit_sg_descriptor *sg_list,
- dma_addr_t dma_addr)
-{
- if (WARN_ON_ONCE(!sg_list))
- return;
-
- skd_free_dma(skdev, skdev->sglist_cache, sg_list, dma_addr,
- DMA_TO_DEVICE);
-}
-
-static int skd_init_request(struct blk_mq_tag_set *set, struct request *rq,
- unsigned int hctx_idx, unsigned int numa_node)
-{
- struct skd_device *skdev = set->driver_data;
- struct skd_request_context *skreq = blk_mq_rq_to_pdu(rq);
-
- skreq->state = SKD_REQ_STATE_IDLE;
- skreq->sg = (void *)(skreq + 1);
- sg_init_table(skreq->sg, skd_sgs_per_request);
- skreq->sksg_list = skd_cons_sg_list(skdev, skd_sgs_per_request,
- &skreq->sksg_dma_address);
-
- return skreq->sksg_list ? 0 : -ENOMEM;
-}
-
-static void skd_exit_request(struct blk_mq_tag_set *set, struct request *rq,
- unsigned int hctx_idx)
-{
- struct skd_device *skdev = set->driver_data;
- struct skd_request_context *skreq = blk_mq_rq_to_pdu(rq);
-
- skd_free_sg_list(skdev, skreq->sksg_list, skreq->sksg_dma_address);
-}
-
-static int skd_cons_sksb(struct skd_device *skdev)
-{
- int rc = 0;
- struct skd_special_context *skspcl;
-
- skspcl = &skdev->internal_skspcl;
-
- skspcl->req.id = 0 + SKD_ID_INTERNAL;
- skspcl->req.state = SKD_REQ_STATE_IDLE;
-
- skspcl->data_buf = skd_alloc_dma(skdev, skdev->databuf_cache,
- &skspcl->db_dma_address,
- GFP_DMA | __GFP_ZERO,
- DMA_BIDIRECTIONAL);
- if (skspcl->data_buf == NULL) {
- rc = -ENOMEM;
- goto err_out;
- }
-
- skspcl->msg_buf = skd_alloc_dma(skdev, skdev->msgbuf_cache,
- &skspcl->mb_dma_address,
- GFP_DMA | __GFP_ZERO, DMA_TO_DEVICE);
- if (skspcl->msg_buf == NULL) {
- rc = -ENOMEM;
- goto err_out;
- }
-
- skspcl->req.sksg_list = skd_cons_sg_list(skdev, 1,
- &skspcl->req.sksg_dma_address);
- if (skspcl->req.sksg_list == NULL) {
- rc = -ENOMEM;
- goto err_out;
- }
-
- if (!skd_format_internal_skspcl(skdev)) {
- rc = -EINVAL;
- goto err_out;
- }
-
-err_out:
- return rc;
-}
-
-static const struct blk_mq_ops skd_mq_ops = {
- .queue_rq = skd_mq_queue_rq,
- .complete = skd_complete_rq,
- .timeout = skd_timed_out,
- .init_request = skd_init_request,
- .exit_request = skd_exit_request,
-};
-
-static int skd_cons_disk(struct skd_device *skdev)
-{
- int rc = 0;
- struct gendisk *disk;
- struct request_queue *q;
- unsigned long flags;
-
- disk = alloc_disk(SKD_MINORS_PER_DEVICE);
- if (!disk) {
- rc = -ENOMEM;
- goto err_out;
- }
-
- skdev->disk = disk;
- sprintf(disk->disk_name, DRV_NAME "%u", skdev->devno);
-
- disk->major = skdev->major;
- disk->first_minor = skdev->devno * SKD_MINORS_PER_DEVICE;
- disk->fops = &skd_blockdev_ops;
- disk->private_data = skdev;
-
- memset(&skdev->tag_set, 0, sizeof(skdev->tag_set));
- skdev->tag_set.ops = &skd_mq_ops;
- skdev->tag_set.nr_hw_queues = 1;
- skdev->tag_set.queue_depth = skd_max_queue_depth;
- skdev->tag_set.cmd_size = sizeof(struct skd_request_context) +
- skdev->sgs_per_request * sizeof(struct scatterlist);
- skdev->tag_set.numa_node = NUMA_NO_NODE;
- skdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
- BLK_ALLOC_POLICY_TO_MQ_FLAG(BLK_TAG_ALLOC_FIFO);
- skdev->tag_set.driver_data = skdev;
- rc = blk_mq_alloc_tag_set(&skdev->tag_set);
- if (rc)
- goto err_out;
- q = blk_mq_init_queue(&skdev->tag_set);
- if (IS_ERR(q)) {
- blk_mq_free_tag_set(&skdev->tag_set);
- rc = PTR_ERR(q);
- goto err_out;
- }
- q->queuedata = skdev;
-
- skdev->queue = q;
- disk->queue = q;
-
- blk_queue_write_cache(q, true, true);
- blk_queue_max_segments(q, skdev->sgs_per_request);
- blk_queue_max_hw_sectors(q, SKD_N_MAX_SECTORS);
-
- /* set optimal I/O size to 8KB */
- blk_queue_io_opt(q, 8192);
-
- blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
- blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
-
- blk_queue_rq_timeout(q, 8 * HZ);
-
- spin_lock_irqsave(&skdev->lock, flags);
- dev_dbg(&skdev->pdev->dev, "stopping queue\n");
- blk_mq_stop_hw_queues(skdev->queue);
- spin_unlock_irqrestore(&skdev->lock, flags);
-
-err_out:
- return rc;
-}
-
-#define SKD_N_DEV_TABLE 16u
-static u32 skd_next_devno;
-
-static struct skd_device *skd_construct(struct pci_dev *pdev)
-{
- struct skd_device *skdev;
- int blk_major = skd_major;
- size_t size;
- int rc;
-
- skdev = kzalloc(sizeof(*skdev), GFP_KERNEL);
-
- if (!skdev) {
- dev_err(&pdev->dev, "memory alloc failure\n");
- return NULL;
- }
-
- skdev->state = SKD_DRVR_STATE_LOAD;
- skdev->pdev = pdev;
- skdev->devno = skd_next_devno++;
- skdev->major = blk_major;
- skdev->dev_max_queue_depth = 0;
-
- skdev->num_req_context = skd_max_queue_depth;
- skdev->num_fitmsg_context = skd_max_queue_depth;
- skdev->cur_max_queue_depth = 1;
- skdev->queue_low_water_mark = 1;
- skdev->proto_ver = 99;
- skdev->sgs_per_request = skd_sgs_per_request;
- skdev->dbg_level = skd_dbg_level;
-
- spin_lock_init(&skdev->lock);
-
- INIT_WORK(&skdev->start_queue, skd_start_queue);
- INIT_WORK(&skdev->completion_worker, skd_completion_worker);
-
- size = max(SKD_N_FITMSG_BYTES, SKD_N_SPECIAL_FITMSG_BYTES);
- skdev->msgbuf_cache = kmem_cache_create("skd-msgbuf", size, 0,
- SLAB_HWCACHE_ALIGN, NULL);
- if (!skdev->msgbuf_cache)
- goto err_out;
- WARN_ONCE(kmem_cache_size(skdev->msgbuf_cache) < size,
- "skd-msgbuf: %d < %zd\n",
- kmem_cache_size(skdev->msgbuf_cache), size);
- size = skd_sgs_per_request * sizeof(struct fit_sg_descriptor);
- skdev->sglist_cache = kmem_cache_create("skd-sglist", size, 0,
- SLAB_HWCACHE_ALIGN, NULL);
- if (!skdev->sglist_cache)
- goto err_out;
- WARN_ONCE(kmem_cache_size(skdev->sglist_cache) < size,
- "skd-sglist: %d < %zd\n",
- kmem_cache_size(skdev->sglist_cache), size);
- size = SKD_N_INTERNAL_BYTES;
- skdev->databuf_cache = kmem_cache_create("skd-databuf", size, 0,
- SLAB_HWCACHE_ALIGN, NULL);
- if (!skdev->databuf_cache)
- goto err_out;
- WARN_ONCE(kmem_cache_size(skdev->databuf_cache) < size,
- "skd-databuf: %d < %zd\n",
- kmem_cache_size(skdev->databuf_cache), size);
-
- dev_dbg(&skdev->pdev->dev, "skcomp\n");
- rc = skd_cons_skcomp(skdev);
- if (rc < 0)
- goto err_out;
-
- dev_dbg(&skdev->pdev->dev, "skmsg\n");
- rc = skd_cons_skmsg(skdev);
- if (rc < 0)
- goto err_out;
-
- dev_dbg(&skdev->pdev->dev, "sksb\n");
- rc = skd_cons_sksb(skdev);
- if (rc < 0)
- goto err_out;
-
- dev_dbg(&skdev->pdev->dev, "disk\n");
- rc = skd_cons_disk(skdev);
- if (rc < 0)
- goto err_out;
-
- dev_dbg(&skdev->pdev->dev, "VICTORY\n");
- return skdev;
-
-err_out:
- dev_dbg(&skdev->pdev->dev, "construct failed\n");
- skd_destruct(skdev);
- return NULL;
-}
-
-/*
- *****************************************************************************
- * DESTRUCT (FREE)
- *****************************************************************************
- */
-
-static void skd_free_skcomp(struct skd_device *skdev)
-{
- if (skdev->skcomp_table)
- dma_free_coherent(&skdev->pdev->dev, SKD_SKCOMP_SIZE,
- skdev->skcomp_table, skdev->cq_dma_address);
-
- skdev->skcomp_table = NULL;
- skdev->cq_dma_address = 0;
-}
-
-static void skd_free_skmsg(struct skd_device *skdev)
-{
- u32 i;
-
- if (skdev->skmsg_table == NULL)
- return;
-
- for (i = 0; i < skdev->num_fitmsg_context; i++) {
- struct skd_fitmsg_context *skmsg;
-
- skmsg = &skdev->skmsg_table[i];
-
- if (skmsg->msg_buf != NULL) {
- dma_free_coherent(&skdev->pdev->dev, SKD_N_FITMSG_BYTES,
- skmsg->msg_buf,
- skmsg->mb_dma_address);
- }
- skmsg->msg_buf = NULL;
- skmsg->mb_dma_address = 0;
- }
-
- kfree(skdev->skmsg_table);
- skdev->skmsg_table = NULL;
-}
-
-static void skd_free_sksb(struct skd_device *skdev)
-{
- struct skd_special_context *skspcl = &skdev->internal_skspcl;
-
- skd_free_dma(skdev, skdev->databuf_cache, skspcl->data_buf,
- skspcl->db_dma_address, DMA_BIDIRECTIONAL);
-
- skspcl->data_buf = NULL;
- skspcl->db_dma_address = 0;
-
- skd_free_dma(skdev, skdev->msgbuf_cache, skspcl->msg_buf,
- skspcl->mb_dma_address, DMA_TO_DEVICE);
-
- skspcl->msg_buf = NULL;
- skspcl->mb_dma_address = 0;
-
- skd_free_sg_list(skdev, skspcl->req.sksg_list,
- skspcl->req.sksg_dma_address);
-
- skspcl->req.sksg_list = NULL;
- skspcl->req.sksg_dma_address = 0;
-}
-
-static void skd_free_disk(struct skd_device *skdev)
-{
- struct gendisk *disk = skdev->disk;
-
- if (disk && (disk->flags & GENHD_FL_UP))
- del_gendisk(disk);
-
- if (skdev->queue) {
- blk_cleanup_queue(skdev->queue);
- skdev->queue = NULL;
- if (disk)
- disk->queue = NULL;
- }
-
- if (skdev->tag_set.tags)
- blk_mq_free_tag_set(&skdev->tag_set);
-
- put_disk(disk);
- skdev->disk = NULL;
-}
-
-static void skd_destruct(struct skd_device *skdev)
-{
- if (skdev == NULL)
- return;
-
- cancel_work_sync(&skdev->start_queue);
-
- dev_dbg(&skdev->pdev->dev, "disk\n");
- skd_free_disk(skdev);
-
- dev_dbg(&skdev->pdev->dev, "sksb\n");
- skd_free_sksb(skdev);
-
- dev_dbg(&skdev->pdev->dev, "skmsg\n");
- skd_free_skmsg(skdev);
-
- dev_dbg(&skdev->pdev->dev, "skcomp\n");
- skd_free_skcomp(skdev);
-
- kmem_cache_destroy(skdev->databuf_cache);
- kmem_cache_destroy(skdev->sglist_cache);
- kmem_cache_destroy(skdev->msgbuf_cache);
-
- dev_dbg(&skdev->pdev->dev, "skdev\n");
- kfree(skdev);
-}
-
-/*
- *****************************************************************************
- * BLOCK DEVICE (BDEV) GLUE
- *****************************************************************************
- */
-
-static int skd_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo)
-{
- struct skd_device *skdev;
- u64 capacity;
-
- skdev = bdev->bd_disk->private_data;
-
- dev_dbg(&skdev->pdev->dev, "%s: CMD[%s] getgeo device\n",
- bdev->bd_disk->disk_name, current->comm);
-
- if (skdev->read_cap_is_valid) {
- capacity = get_capacity(skdev->disk);
- geo->heads = 64;
- geo->sectors = 255;
- geo->cylinders = (capacity) / (255 * 64);
-
- return 0;
- }
- return -EIO;
-}
-
-static int skd_bdev_attach(struct device *parent, struct skd_device *skdev)
-{
- dev_dbg(&skdev->pdev->dev, "add_disk\n");
- device_add_disk(parent, skdev->disk, NULL);
- return 0;
-}
-
-static const struct block_device_operations skd_blockdev_ops = {
- .owner = THIS_MODULE,
- .getgeo = skd_bdev_getgeo,
-};
-
-/*
- *****************************************************************************
- * PCIe DRIVER GLUE
- *****************************************************************************
- */
-
-static const struct pci_device_id skd_pci_tbl[] = {
- { PCI_VENDOR_ID_STEC, PCI_DEVICE_ID_S1120,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
- { 0 } /* terminate list */
-};
-
-MODULE_DEVICE_TABLE(pci, skd_pci_tbl);
-
-static char *skd_pci_info(struct skd_device *skdev, char *str)
-{
- int pcie_reg;
-
- strcpy(str, "PCIe (");
- pcie_reg = pci_find_capability(skdev->pdev, PCI_CAP_ID_EXP);
-
- if (pcie_reg) {
-
- char lwstr[6];
- uint16_t pcie_lstat, lspeed, lwidth;
-
- pcie_reg += 0x12;
- pci_read_config_word(skdev->pdev, pcie_reg, &pcie_lstat);
- lspeed = pcie_lstat & (0xF);
- lwidth = (pcie_lstat & 0x3F0) >> 4;
-
- if (lspeed == 1)
- strcat(str, "2.5GT/s ");
- else if (lspeed == 2)
- strcat(str, "5.0GT/s ");
- else
- strcat(str, "<unknown> ");
- snprintf(lwstr, sizeof(lwstr), "%dX)", lwidth);
- strcat(str, lwstr);
- }
- return str;
-}
-
-static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- int i;
- int rc = 0;
- char pci_str[32];
- struct skd_device *skdev;
-
- dev_dbg(&pdev->dev, "vendor=%04X device=%04x\n", pdev->vendor,
- pdev->device);
-
- rc = pci_enable_device(pdev);
- if (rc)
- return rc;
- rc = pci_request_regions(pdev, DRV_NAME);
- if (rc)
- goto err_out;
- rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (rc)
- rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (rc) {
- dev_err(&pdev->dev, "DMA mask error %d\n", rc);
- goto err_out_regions;
- }
-
- if (!skd_major) {
- rc = register_blkdev(0, DRV_NAME);
- if (rc < 0)
- goto err_out_regions;
- BUG_ON(!rc);
- skd_major = rc;
- }
-
- skdev = skd_construct(pdev);
- if (skdev == NULL) {
- rc = -ENOMEM;
- goto err_out_regions;
- }
-
- skd_pci_info(skdev, pci_str);
- dev_info(&pdev->dev, "%s 64bit\n", pci_str);
-
- pci_set_master(pdev);
- rc = pci_enable_pcie_error_reporting(pdev);
- if (rc) {
- dev_err(&pdev->dev,
- "bad enable of PCIe error reporting rc=%d\n", rc);
- skdev->pcie_error_reporting_is_enabled = 0;
- } else
- skdev->pcie_error_reporting_is_enabled = 1;
-
- pci_set_drvdata(pdev, skdev);
-
- for (i = 0; i < SKD_MAX_BARS; i++) {
- skdev->mem_phys[i] = pci_resource_start(pdev, i);
- skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
- skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
- skdev->mem_size[i]);
- if (!skdev->mem_map[i]) {
- dev_err(&pdev->dev,
- "Unable to map adapter memory!\n");
- rc = -ENODEV;
- goto err_out_iounmap;
- }
- dev_dbg(&pdev->dev, "mem_map=%p, phyd=%016llx, size=%d\n",
- skdev->mem_map[i], (uint64_t)skdev->mem_phys[i],
- skdev->mem_size[i]);
- }
-
- rc = skd_acquire_irq(skdev);
- if (rc) {
- dev_err(&pdev->dev, "interrupt resource error %d\n", rc);
- goto err_out_iounmap;
- }
-
- rc = skd_start_timer(skdev);
- if (rc)
- goto err_out_timer;
-
- init_waitqueue_head(&skdev->waitq);
-
- skd_start_device(skdev);
-
- rc = wait_event_interruptible_timeout(skdev->waitq,
- (skdev->gendisk_on),
- (SKD_START_WAIT_SECONDS * HZ));
- if (skdev->gendisk_on > 0) {
- /* device came on-line after reset */
- skd_bdev_attach(&pdev->dev, skdev);
- rc = 0;
- } else {
- /* we timed out, something is wrong with the device,
- don't add the disk structure */
- dev_err(&pdev->dev, "error: waiting for s1120 timed out %d!\n",
- rc);
- /* in case of no error; we timeout with ENXIO */
- if (!rc)
- rc = -ENXIO;
- goto err_out_timer;
- }
-
- return rc;
-
-err_out_timer:
- skd_stop_device(skdev);
- skd_release_irq(skdev);
-
-err_out_iounmap:
- for (i = 0; i < SKD_MAX_BARS; i++)
- if (skdev->mem_map[i])
- iounmap(skdev->mem_map[i]);
-
- if (skdev->pcie_error_reporting_is_enabled)
- pci_disable_pcie_error_reporting(pdev);
-
- skd_destruct(skdev);
-
-err_out_regions:
- pci_release_regions(pdev);
-
-err_out:
- pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
- return rc;
-}
-
-static void skd_pci_remove(struct pci_dev *pdev)
-{
- int i;
- struct skd_device *skdev;
-
- skdev = pci_get_drvdata(pdev);
- if (!skdev) {
- dev_err(&pdev->dev, "no device data for PCI\n");
- return;
- }
- skd_stop_device(skdev);
- skd_release_irq(skdev);
-
- for (i = 0; i < SKD_MAX_BARS; i++)
- if (skdev->mem_map[i])
- iounmap(skdev->mem_map[i]);
-
- if (skdev->pcie_error_reporting_is_enabled)
- pci_disable_pcie_error_reporting(pdev);
-
- skd_destruct(skdev);
-
- pci_release_regions(pdev);
- pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
-
- return;
-}
-
-static int skd_pci_suspend(struct pci_dev *pdev, pm_message_t state)
-{
- int i;
- struct skd_device *skdev;
-
- skdev = pci_get_drvdata(pdev);
- if (!skdev) {
- dev_err(&pdev->dev, "no device data for PCI\n");
- return -EIO;
- }
-
- skd_stop_device(skdev);
-
- skd_release_irq(skdev);
-
- for (i = 0; i < SKD_MAX_BARS; i++)
- if (skdev->mem_map[i])
- iounmap(skdev->mem_map[i]);
-
- if (skdev->pcie_error_reporting_is_enabled)
- pci_disable_pcie_error_reporting(pdev);
-
- pci_release_regions(pdev);
- pci_save_state(pdev);
- pci_disable_device(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
- return 0;
-}
-
-static int skd_pci_resume(struct pci_dev *pdev)
-{
- int i;
- int rc = 0;
- struct skd_device *skdev;
-
- skdev = pci_get_drvdata(pdev);
- if (!skdev) {
- dev_err(&pdev->dev, "no device data for PCI\n");
- return -1;
- }
-
- pci_set_power_state(pdev, PCI_D0);
- pci_enable_wake(pdev, PCI_D0, 0);
- pci_restore_state(pdev);
-
- rc = pci_enable_device(pdev);
- if (rc)
- return rc;
- rc = pci_request_regions(pdev, DRV_NAME);
- if (rc)
- goto err_out;
- rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (rc)
- rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (rc) {
- dev_err(&pdev->dev, "DMA mask error %d\n", rc);
- goto err_out_regions;
- }
-
- pci_set_master(pdev);
- rc = pci_enable_pcie_error_reporting(pdev);
- if (rc) {
- dev_err(&pdev->dev,
- "bad enable of PCIe error reporting rc=%d\n", rc);
- skdev->pcie_error_reporting_is_enabled = 0;
- } else
- skdev->pcie_error_reporting_is_enabled = 1;
-
- for (i = 0; i < SKD_MAX_BARS; i++) {
-
- skdev->mem_phys[i] = pci_resource_start(pdev, i);
- skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
- skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
- skdev->mem_size[i]);
- if (!skdev->mem_map[i]) {
- dev_err(&pdev->dev, "Unable to map adapter memory!\n");
- rc = -ENODEV;
- goto err_out_iounmap;
- }
- dev_dbg(&pdev->dev, "mem_map=%p, phyd=%016llx, size=%d\n",
- skdev->mem_map[i], (uint64_t)skdev->mem_phys[i],
- skdev->mem_size[i]);
- }
- rc = skd_acquire_irq(skdev);
- if (rc) {
- dev_err(&pdev->dev, "interrupt resource error %d\n", rc);
- goto err_out_iounmap;
- }
-
- rc = skd_start_timer(skdev);
- if (rc)
- goto err_out_timer;
-
- init_waitqueue_head(&skdev->waitq);
-
- skd_start_device(skdev);
-
- return rc;
-
-err_out_timer:
- skd_stop_device(skdev);
- skd_release_irq(skdev);
-
-err_out_iounmap:
- for (i = 0; i < SKD_MAX_BARS; i++)
- if (skdev->mem_map[i])
- iounmap(skdev->mem_map[i]);
-
- if (skdev->pcie_error_reporting_is_enabled)
- pci_disable_pcie_error_reporting(pdev);
-
-err_out_regions:
- pci_release_regions(pdev);
-
-err_out:
- pci_disable_device(pdev);
- return rc;
-}
-
-static void skd_pci_shutdown(struct pci_dev *pdev)
-{
- struct skd_device *skdev;
-
- dev_err(&pdev->dev, "%s called\n", __func__);
-
- skdev = pci_get_drvdata(pdev);
- if (!skdev) {
- dev_err(&pdev->dev, "no device data for PCI\n");
- return;
- }
-
- dev_err(&pdev->dev, "calling stop\n");
- skd_stop_device(skdev);
-}
-
-static struct pci_driver skd_driver = {
- .name = DRV_NAME,
- .id_table = skd_pci_tbl,
- .probe = skd_pci_probe,
- .remove = skd_pci_remove,
- .suspend = skd_pci_suspend,
- .resume = skd_pci_resume,
- .shutdown = skd_pci_shutdown,
-};
-
-/*
- *****************************************************************************
- * LOGGING SUPPORT
- *****************************************************************************
- */
-
-const char *skd_drive_state_to_str(int state)
-{
- switch (state) {
- case FIT_SR_DRIVE_OFFLINE:
- return "OFFLINE";
- case FIT_SR_DRIVE_INIT:
- return "INIT";
- case FIT_SR_DRIVE_ONLINE:
- return "ONLINE";
- case FIT_SR_DRIVE_BUSY:
- return "BUSY";
- case FIT_SR_DRIVE_FAULT:
- return "FAULT";
- case FIT_SR_DRIVE_DEGRADED:
- return "DEGRADED";
- case FIT_SR_PCIE_LINK_DOWN:
- return "INK_DOWN";
- case FIT_SR_DRIVE_SOFT_RESET:
- return "SOFT_RESET";
- case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
- return "NEED_FW";
- case FIT_SR_DRIVE_INIT_FAULT:
- return "INIT_FAULT";
- case FIT_SR_DRIVE_BUSY_SANITIZE:
- return "BUSY_SANITIZE";
- case FIT_SR_DRIVE_BUSY_ERASE:
- return "BUSY_ERASE";
- case FIT_SR_DRIVE_FW_BOOTING:
- return "FW_BOOTING";
- default:
- return "???";
- }
-}
-
-const char *skd_skdev_state_to_str(enum skd_drvr_state state)
-{
- switch (state) {
- case SKD_DRVR_STATE_LOAD:
- return "LOAD";
- case SKD_DRVR_STATE_IDLE:
- return "IDLE";
- case SKD_DRVR_STATE_BUSY:
- return "BUSY";
- case SKD_DRVR_STATE_STARTING:
- return "STARTING";
- case SKD_DRVR_STATE_ONLINE:
- return "ONLINE";
- case SKD_DRVR_STATE_PAUSING:
- return "PAUSING";
- case SKD_DRVR_STATE_PAUSED:
- return "PAUSED";
- case SKD_DRVR_STATE_RESTARTING:
- return "RESTARTING";
- case SKD_DRVR_STATE_RESUMING:
- return "RESUMING";
- case SKD_DRVR_STATE_STOPPING:
- return "STOPPING";
- case SKD_DRVR_STATE_SYNCING:
- return "SYNCING";
- case SKD_DRVR_STATE_FAULT:
- return "FAULT";
- case SKD_DRVR_STATE_DISAPPEARED:
- return "DISAPPEARED";
- case SKD_DRVR_STATE_BUSY_ERASE:
- return "BUSY_ERASE";
- case SKD_DRVR_STATE_BUSY_SANITIZE:
- return "BUSY_SANITIZE";
- case SKD_DRVR_STATE_BUSY_IMMINENT:
- return "BUSY_IMMINENT";
- case SKD_DRVR_STATE_WAIT_BOOT:
- return "WAIT_BOOT";
-
- default:
- return "???";
- }
-}
-
-static const char *skd_skreq_state_to_str(enum skd_req_state state)
-{
- switch (state) {
- case SKD_REQ_STATE_IDLE:
- return "IDLE";
- case SKD_REQ_STATE_SETUP:
- return "SETUP";
- case SKD_REQ_STATE_BUSY:
- return "BUSY";
- case SKD_REQ_STATE_COMPLETED:
- return "COMPLETED";
- case SKD_REQ_STATE_TIMEOUT:
- return "TIMEOUT";
- default:
- return "???";
- }
-}
-
-static void skd_log_skdev(struct skd_device *skdev, const char *event)
-{
- dev_dbg(&skdev->pdev->dev, "skdev=%p event='%s'\n", skdev, event);
- dev_dbg(&skdev->pdev->dev, " drive_state=%s(%d) driver_state=%s(%d)\n",
- skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
- skd_skdev_state_to_str(skdev->state), skdev->state);
- dev_dbg(&skdev->pdev->dev, " busy=%d limit=%d dev=%d lowat=%d\n",
- skd_in_flight(skdev), skdev->cur_max_queue_depth,
- skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
- dev_dbg(&skdev->pdev->dev, " cycle=%d cycle_ix=%d\n",
- skdev->skcomp_cycle, skdev->skcomp_ix);
-}
-
-static void skd_log_skreq(struct skd_device *skdev,
- struct skd_request_context *skreq, const char *event)
-{
- struct request *req = blk_mq_rq_from_pdu(skreq);
- u32 lba = blk_rq_pos(req);
- u32 count = blk_rq_sectors(req);
-
- dev_dbg(&skdev->pdev->dev, "skreq=%p event='%s'\n", skreq, event);
- dev_dbg(&skdev->pdev->dev, " state=%s(%d) id=0x%04x fitmsg=0x%04x\n",
- skd_skreq_state_to_str(skreq->state), skreq->state, skreq->id,
- skreq->fitmsg_id);
- dev_dbg(&skdev->pdev->dev, " sg_dir=%d n_sg=%d\n",
- skreq->data_dir, skreq->n_sg);
-
- dev_dbg(&skdev->pdev->dev,
- "req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n", req, lba, lba,
- count, count, (int)rq_data_dir(req));
-}
-
-/*
- *****************************************************************************
- * MODULE GLUE
- *****************************************************************************
- */
-
-static int __init skd_init(void)
-{
- BUILD_BUG_ON(sizeof(struct fit_completion_entry_v1) != 8);
- BUILD_BUG_ON(sizeof(struct fit_comp_error_info) != 32);
- BUILD_BUG_ON(sizeof(struct skd_command_header) != 16);
- BUILD_BUG_ON(sizeof(struct skd_scsi_request) != 32);
- BUILD_BUG_ON(sizeof(struct driver_inquiry_data) != 44);
- BUILD_BUG_ON(offsetof(struct skd_msg_buf, fmh) != 0);
- BUILD_BUG_ON(offsetof(struct skd_msg_buf, scsi) != 64);
- BUILD_BUG_ON(sizeof(struct skd_msg_buf) != SKD_N_FITMSG_BYTES);
-
- switch (skd_isr_type) {
- case SKD_IRQ_LEGACY:
- case SKD_IRQ_MSI:
- case SKD_IRQ_MSIX:
- break;
- default:
- pr_err(PFX "skd_isr_type %d invalid, re-set to %d\n",
- skd_isr_type, SKD_IRQ_DEFAULT);
- skd_isr_type = SKD_IRQ_DEFAULT;
- }
-
- if (skd_max_queue_depth < 1 ||
- skd_max_queue_depth > SKD_MAX_QUEUE_DEPTH) {
- pr_err(PFX "skd_max_queue_depth %d invalid, re-set to %d\n",
- skd_max_queue_depth, SKD_MAX_QUEUE_DEPTH_DEFAULT);
- skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
- }
-
- if (skd_max_req_per_msg < 1 ||
- skd_max_req_per_msg > SKD_MAX_REQ_PER_MSG) {
- pr_err(PFX "skd_max_req_per_msg %d invalid, re-set to %d\n",
- skd_max_req_per_msg, SKD_MAX_REQ_PER_MSG_DEFAULT);
- skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
- }
-
- if (skd_sgs_per_request < 1 || skd_sgs_per_request > 4096) {
- pr_err(PFX "skd_sg_per_request %d invalid, re-set to %d\n",
- skd_sgs_per_request, SKD_N_SG_PER_REQ_DEFAULT);
- skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
- }
-
- if (skd_dbg_level < 0 || skd_dbg_level > 2) {
- pr_err(PFX "skd_dbg_level %d invalid, re-set to %d\n",
- skd_dbg_level, 0);
- skd_dbg_level = 0;
- }
-
- if (skd_isr_comp_limit < 0) {
- pr_err(PFX "skd_isr_comp_limit %d invalid, set to %d\n",
- skd_isr_comp_limit, 0);
- skd_isr_comp_limit = 0;
- }
-
- return pci_register_driver(&skd_driver);
-}
-
-static void __exit skd_exit(void)
-{
- pci_unregister_driver(&skd_driver);
-
- if (skd_major)
- unregister_blkdev(skd_major, DRV_NAME);
-}
-
-module_init(skd_init);
-module_exit(skd_exit);
diff --git a/drivers/block/skd_s1120.h b/drivers/block/skd_s1120.h
deleted file mode 100644
index c30bb98c7cd2..000000000000
--- a/drivers/block/skd_s1120.h
+++ /dev/null
@@ -1,322 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright 2012 STEC, Inc.
- * Copyright (c) 2017 Western Digital Corporation or its affiliates.
- */
-
-
-#ifndef SKD_S1120_H
-#define SKD_S1120_H
-
-/*
- * Q-channel, 64-bit r/w
- */
-#define FIT_Q_COMMAND 0x400u
-#define FIT_QCMD_QID_MASK (0x3 << 1)
-#define FIT_QCMD_QID0 (0x0 << 1)
-#define FIT_QCMD_QID_NORMAL FIT_QCMD_QID0
-#define FIT_QCMD_QID1 (0x1 << 1)
-#define FIT_QCMD_QID2 (0x2 << 1)
-#define FIT_QCMD_QID3 (0x3 << 1)
-#define FIT_QCMD_FLUSH_QUEUE (0ull) /* add QID */
-#define FIT_QCMD_MSGSIZE_MASK (0x3 << 4)
-#define FIT_QCMD_MSGSIZE_64 (0x0 << 4)
-#define FIT_QCMD_MSGSIZE_128 (0x1 << 4)
-#define FIT_QCMD_MSGSIZE_256 (0x2 << 4)
-#define FIT_QCMD_MSGSIZE_512 (0x3 << 4)
-#define FIT_QCMD_ALIGN L1_CACHE_BYTES
-
-/*
- * Control, 32-bit r/w
- */
-#define FIT_CONTROL 0x500u
-#define FIT_CR_HARD_RESET (1u << 0u)
-#define FIT_CR_SOFT_RESET (1u << 1u)
-#define FIT_CR_DIS_TIMESTAMPS (1u << 6u)
-#define FIT_CR_ENABLE_INTERRUPTS (1u << 7u)
-
-/*
- * Status, 32-bit, r/o
- */
-#define FIT_STATUS 0x510u
-#define FIT_SR_DRIVE_STATE_MASK 0x000000FFu
-#define FIT_SR_SIGNATURE (0xFF << 8)
-#define FIT_SR_PIO_DMA (1 << 16)
-#define FIT_SR_DRIVE_OFFLINE 0x00
-#define FIT_SR_DRIVE_INIT 0x01
-/* #define FIT_SR_DRIVE_READY 0x02 */
-#define FIT_SR_DRIVE_ONLINE 0x03
-#define FIT_SR_DRIVE_BUSY 0x04
-#define FIT_SR_DRIVE_FAULT 0x05
-#define FIT_SR_DRIVE_DEGRADED 0x06
-#define FIT_SR_PCIE_LINK_DOWN 0x07
-#define FIT_SR_DRIVE_SOFT_RESET 0x08
-#define FIT_SR_DRIVE_INIT_FAULT 0x09
-#define FIT_SR_DRIVE_BUSY_SANITIZE 0x0A
-#define FIT_SR_DRIVE_BUSY_ERASE 0x0B
-#define FIT_SR_DRIVE_FW_BOOTING 0x0C
-#define FIT_SR_DRIVE_NEED_FW_DOWNLOAD 0xFE
-#define FIT_SR_DEVICE_MISSING 0xFF
-#define FIT_SR__RESERVED 0xFFFFFF00u
-
-/*
- * FIT_STATUS - Status register data definition
- */
-#define FIT_SR_STATE_MASK (0xFF << 0)
-#define FIT_SR_SIGNATURE (0xFF << 8)
-#define FIT_SR_PIO_DMA (1 << 16)
-
-/*
- * Interrupt status, 32-bit r/w1c (w1c ==> write 1 to clear)
- */
-#define FIT_INT_STATUS_HOST 0x520u
-#define FIT_ISH_FW_STATE_CHANGE (1u << 0u)
-#define FIT_ISH_COMPLETION_POSTED (1u << 1u)
-#define FIT_ISH_MSG_FROM_DEV (1u << 2u)
-#define FIT_ISH_UNDEFINED_3 (1u << 3u)
-#define FIT_ISH_UNDEFINED_4 (1u << 4u)
-#define FIT_ISH_Q0_FULL (1u << 5u)
-#define FIT_ISH_Q1_FULL (1u << 6u)
-#define FIT_ISH_Q2_FULL (1u << 7u)
-#define FIT_ISH_Q3_FULL (1u << 8u)
-#define FIT_ISH_QCMD_FIFO_OVERRUN (1u << 9u)
-#define FIT_ISH_BAD_EXP_ROM_READ (1u << 10u)
-
-#define FIT_INT_DEF_MASK \
- (FIT_ISH_FW_STATE_CHANGE | \
- FIT_ISH_COMPLETION_POSTED | \
- FIT_ISH_MSG_FROM_DEV | \
- FIT_ISH_Q0_FULL | \
- FIT_ISH_Q1_FULL | \
- FIT_ISH_Q2_FULL | \
- FIT_ISH_Q3_FULL | \
- FIT_ISH_QCMD_FIFO_OVERRUN | \
- FIT_ISH_BAD_EXP_ROM_READ)
-
-#define FIT_INT_QUEUE_FULL \
- (FIT_ISH_Q0_FULL | \
- FIT_ISH_Q1_FULL | \
- FIT_ISH_Q2_FULL | \
- FIT_ISH_Q3_FULL)
-
-#define MSI_MSG_NWL_ERROR_0 0x00000000
-#define MSI_MSG_NWL_ERROR_1 0x00000001
-#define MSI_MSG_NWL_ERROR_2 0x00000002
-#define MSI_MSG_NWL_ERROR_3 0x00000003
-#define MSI_MSG_STATE_CHANGE 0x00000004
-#define MSI_MSG_COMPLETION_POSTED 0x00000005
-#define MSI_MSG_MSG_FROM_DEV 0x00000006
-#define MSI_MSG_RESERVED_0 0x00000007
-#define MSI_MSG_RESERVED_1 0x00000008
-#define MSI_MSG_QUEUE_0_FULL 0x00000009
-#define MSI_MSG_QUEUE_1_FULL 0x0000000A
-#define MSI_MSG_QUEUE_2_FULL 0x0000000B
-#define MSI_MSG_QUEUE_3_FULL 0x0000000C
-
-#define FIT_INT_RESERVED_MASK \
- (FIT_ISH_UNDEFINED_3 | \
- FIT_ISH_UNDEFINED_4)
-
-/*
- * Interrupt mask, 32-bit r/w
- * Bit definitions are the same as FIT_INT_STATUS_HOST
- */
-#define FIT_INT_MASK_HOST 0x528u
-
-/*
- * Message to device, 32-bit r/w
- */
-#define FIT_MSG_TO_DEVICE 0x540u
-
-/*
- * Message from device, 32-bit, r/o
- */
-#define FIT_MSG_FROM_DEVICE 0x548u
-
-/*
- * 32-bit messages to/from device, composition/extraction macros
- */
-#define FIT_MXD_CONS(TYPE, PARAM, DATA) \
- ((((TYPE) & 0xFFu) << 24u) | \
- (((PARAM) & 0xFFu) << 16u) | \
- (((DATA) & 0xFFFFu) << 0u))
-#define FIT_MXD_TYPE(MXD) (((MXD) >> 24u) & 0xFFu)
-#define FIT_MXD_PARAM(MXD) (((MXD) >> 16u) & 0xFFu)
-#define FIT_MXD_DATA(MXD) (((MXD) >> 0u) & 0xFFFFu)
-
-/*
- * Types of messages to/from device
- */
-#define FIT_MTD_FITFW_INIT 0x01u
-#define FIT_MTD_GET_CMDQ_DEPTH 0x02u
-#define FIT_MTD_SET_COMPQ_DEPTH 0x03u
-#define FIT_MTD_SET_COMPQ_ADDR 0x04u
-#define FIT_MTD_ARM_QUEUE 0x05u
-#define FIT_MTD_CMD_LOG_HOST_ID 0x07u
-#define FIT_MTD_CMD_LOG_TIME_STAMP_LO 0x08u
-#define FIT_MTD_CMD_LOG_TIME_STAMP_HI 0x09u
-#define FIT_MFD_SMART_EXCEEDED 0x10u
-#define FIT_MFD_POWER_DOWN 0x11u
-#define FIT_MFD_OFFLINE 0x12u
-#define FIT_MFD_ONLINE 0x13u
-#define FIT_MFD_FW_RESTARTING 0x14u
-#define FIT_MFD_PM_ACTIVE 0x15u
-#define FIT_MFD_PM_STANDBY 0x16u
-#define FIT_MFD_PM_SLEEP 0x17u
-#define FIT_MFD_CMD_PROGRESS 0x18u
-
-#define FIT_MTD_DEBUG 0xFEu
-#define FIT_MFD_DEBUG 0xFFu
-
-#define FIT_MFD_MASK (0xFFu)
-#define FIT_MFD_DATA_MASK (0xFFu)
-#define FIT_MFD_MSG(x) (((x) >> 24) & FIT_MFD_MASK)
-#define FIT_MFD_DATA(x) ((x) & FIT_MFD_MASK)
-
-/*
- * Extra arg to FIT_MSG_TO_DEVICE, 64-bit r/w
- * Used to set completion queue address (FIT_MTD_SET_COMPQ_ADDR)
- * (was Response buffer in docs)
- */
-#define FIT_MSG_TO_DEVICE_ARG 0x580u
-
-/*
- * Hardware (ASIC) version, 32-bit r/o
- */
-#define FIT_HW_VERSION 0x588u
-
-/*
- * Scatter/gather list descriptor.
- * 32-bytes and must be aligned on a 32-byte boundary.
- * All fields are in little endian order.
- */
-struct fit_sg_descriptor {
- uint32_t control;
- uint32_t byte_count;
- uint64_t host_side_addr;
- uint64_t dev_side_addr;
- uint64_t next_desc_ptr;
-};
-
-#define FIT_SGD_CONTROL_NOT_LAST 0x000u
-#define FIT_SGD_CONTROL_LAST 0x40Eu
-
-/*
- * Header at the beginning of a FIT message. The header
- * is followed by SSDI requests each 64 bytes.
- * A FIT message can be up to 512 bytes long and must start
- * on a 64-byte boundary.
- */
-struct fit_msg_hdr {
- uint8_t protocol_id;
- uint8_t num_protocol_cmds_coalesced;
- uint8_t _reserved[62];
-};
-
-#define FIT_PROTOCOL_ID_FIT 1
-#define FIT_PROTOCOL_ID_SSDI 2
-#define FIT_PROTOCOL_ID_SOFIT 3
-
-
-#define FIT_PROTOCOL_MINOR_VER(mtd_val) ((mtd_val >> 16) & 0xF)
-#define FIT_PROTOCOL_MAJOR_VER(mtd_val) ((mtd_val >> 20) & 0xF)
-
-/*
- * Format of a completion entry. The completion queue is circular
- * and must have at least as many entries as the maximum number
- * of commands that may be issued to the device.
- *
- * There are no head/tail pointers. The cycle value is used to
- * infer the presence of new completion records.
- * Initially the cycle in all entries is 0, the index is 0, and
- * the cycle value to expect is 1. When completions are added
- * their cycle values are set to 1. When the index wraps the
- * cycle value to expect is incremented.
- *
- * Command_context is opaque and taken verbatim from the SSDI command.
- * All other fields are big endian.
- */
-#define FIT_PROTOCOL_VERSION_0 0
-
-/*
- * Protocol major version 1 completion entry.
- * The major protocol version is found in bits
- * 20-23 of the FIT_MTD_FITFW_INIT response.
- */
-struct fit_completion_entry_v1 {
- __be32 num_returned_bytes;
- uint16_t tag;
- uint8_t status; /* SCSI status */
- uint8_t cycle;
-};
-#define FIT_PROTOCOL_VERSION_1 1
-#define FIT_PROTOCOL_VERSION_CURRENT FIT_PROTOCOL_VERSION_1
-
-struct fit_comp_error_info {
- uint8_t type:7; /* 00: Bits0-6 indicates the type of sense data. */
- uint8_t valid:1; /* 00: Bit 7 := 1 ==> info field is valid. */
- uint8_t reserved0; /* 01: Obsolete field */
- uint8_t key:4; /* 02: Bits0-3 indicate the sense key. */
- uint8_t reserved2:1; /* 02: Reserved bit. */
- uint8_t bad_length:1; /* 02: Incorrect Length Indicator */
- uint8_t end_medium:1; /* 02: End of Medium */
- uint8_t file_mark:1; /* 02: Filemark */
- uint8_t info[4]; /* 03: */
- uint8_t reserved1; /* 07: Additional Sense Length */
- uint8_t cmd_spec[4]; /* 08: Command Specific Information */
- uint8_t code; /* 0C: Additional Sense Code */
- uint8_t qual; /* 0D: Additional Sense Code Qualifier */
- uint8_t fruc; /* 0E: Field Replaceable Unit Code */
- uint8_t sks_high:7; /* 0F: Sense Key Specific (MSB) */
- uint8_t sks_valid:1; /* 0F: Sense Key Specific Valid */
- uint16_t sks_low; /* 10: Sense Key Specific (LSW) */
- uint16_t reserved3; /* 12: Part of additional sense bytes (unused) */
- uint16_t uec; /* 14: Additional Sense Bytes */
- uint64_t per __packed; /* 16: Additional Sense Bytes */
- uint8_t reserved4[2]; /* 1E: Additional Sense Bytes (unused) */
-};
-
-
-/* Task management constants */
-#define SOFT_TASK_SIMPLE 0x00
-#define SOFT_TASK_HEAD_OF_QUEUE 0x01
-#define SOFT_TASK_ORDERED 0x02
-
-/* Version zero has the last 32 bits reserved,
- * Version one has the last 32 bits sg_list_len_bytes;
- */
-struct skd_command_header {
- __be64 sg_list_dma_address;
- uint16_t tag;
- uint8_t attribute;
- uint8_t add_cdb_len; /* In 32 bit words */
- __be32 sg_list_len_bytes;
-};
-
-struct skd_scsi_request {
- struct skd_command_header hdr;
- unsigned char cdb[16];
-/* unsigned char _reserved[16]; */
-};
-
-struct driver_inquiry_data {
- uint8_t peripheral_device_type:5;
- uint8_t qualifier:3;
- uint8_t page_code;
- __be16 page_length;
- __be16 pcie_bus_number;
- uint8_t pcie_device_number;
- uint8_t pcie_function_number;
- uint8_t pcie_link_speed;
- uint8_t pcie_link_lanes;
- __be16 pcie_vendor_id;
- __be16 pcie_device_id;
- __be16 pcie_subsystem_vendor_id;
- __be16 pcie_subsystem_device_id;
- uint8_t reserved1[2];
- uint8_t reserved2[3];
- uint8_t driver_version_length;
- uint8_t driver_version[0x14];
-};
-
-#endif /* SKD_S1120_H */
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
index 4478eb7efee0..2cdf2771f8e8 100644
--- a/drivers/block/sx8.c
+++ b/drivers/block/sx8.c
@@ -539,7 +539,7 @@ static int carm_array_info (struct carm_host *host, unsigned int array_idx)
spin_unlock_irq(&host->lock);
DPRINTK("blk_execute_rq_nowait, tag == %u\n", rq->tag);
- blk_execute_rq_nowait(host->oob_q, NULL, rq, true, NULL);
+ blk_execute_rq_nowait(NULL, rq, true, NULL);
return 0;
@@ -578,7 +578,7 @@ static int carm_send_special (struct carm_host *host, carm_sspc_t func)
crq->msg_bucket = (u32) rc;
DPRINTK("blk_execute_rq_nowait, tag == %u\n", rq->tag);
- blk_execute_rq_nowait(host->oob_q, NULL, rq, true, NULL);
+ blk_execute_rq_nowait(NULL, rq, true, NULL);
return 0;
}
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index 2b95d7b33b91..982732dbe82e 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -521,7 +521,7 @@ static int mm_check_plugged(struct cardinfo *card)
static blk_qc_t mm_submit_bio(struct bio *bio)
{
- struct cardinfo *card = bio->bi_disk->private_data;
+ struct cardinfo *card = bio->bi_bdev->bd_disk->private_data;
pr_debug("mm_make_request %llu %u\n",
(unsigned long long)bio->bi_iter.bi_sector,
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 145606dc52db..b9fa3ef5b57c 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -320,7 +320,7 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str)
if (err)
goto out;
- blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
+ blk_execute_rq(vblk->disk, req, false);
err = blk_status_to_errno(virtblk_result(blk_mq_rq_to_pdu(req)));
out:
blk_put_request(req);
@@ -705,6 +705,7 @@ static int virtblk_probe(struct virtio_device *vdev)
u32 v, blk_size, max_size, sg_elems, opt_io_size;
u16 min_io_size;
u8 physical_block_exp, alignment_offset;
+ unsigned int queue_depth;
if (!vdev->config->get) {
dev_err(&vdev->dev, "%s failure: config access disabled\n",
@@ -756,16 +757,18 @@ static int virtblk_probe(struct virtio_device *vdev)
}
/* Default queue sizing is to fill the ring. */
- if (!virtblk_queue_depth) {
- virtblk_queue_depth = vblk->vqs[0].vq->num_free;
+ if (likely(!virtblk_queue_depth)) {
+ queue_depth = vblk->vqs[0].vq->num_free;
/* ... but without indirect descs, we use 2 descs per req */
if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC))
- virtblk_queue_depth /= 2;
+ queue_depth /= 2;
+ } else {
+ queue_depth = virtblk_queue_depth;
}
memset(&vblk->tag_set, 0, sizeof(vblk->tag_set));
vblk->tag_set.ops = &virtio_mq_ops;
- vblk->tag_set.queue_depth = virtblk_queue_depth;
+ vblk->tag_set.queue_depth = queue_depth;
vblk->tag_set.numa_node = NUMA_NO_NODE;
vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
vblk->tag_set.cmd_size =
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 9ebf53903d7b..1cdf09ff67b6 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -794,8 +794,13 @@ again:
pages[i]->persistent_gnt = persistent_gnt;
} else {
if (gnttab_page_cache_get(&ring->free_pages,
- &pages[i]->page))
- goto out_of_memory;
+ &pages[i]->page)) {
+ gnttab_page_cache_put(&ring->free_pages,
+ pages_to_gnt,
+ segs_to_map);
+ ret = -ENOMEM;
+ goto out;
+ }
addr = vaddr(pages[i]->page);
pages_to_gnt[segs_to_map] = pages[i]->page;
pages[i]->persistent_gnt = NULL;
@@ -811,10 +816,8 @@ again:
break;
}
- if (segs_to_map) {
+ if (segs_to_map)
ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map);
- BUG_ON(ret);
- }
/*
* Now swizzle the MFN in our domain with the MFN from the other domain
@@ -830,7 +833,7 @@ again:
gnttab_page_cache_put(&ring->free_pages,
&pages[seg_idx]->page, 1);
pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE;
- ret |= 1;
+ ret |= !ret;
goto next;
}
pages[seg_idx]->handle = map[new_map_idx].handle;
@@ -882,17 +885,18 @@ next:
}
segs_to_map = 0;
last_map = map_until;
- if (map_until != num)
+ if (!ret && map_until != num)
goto again;
- return ret;
-
-out_of_memory:
- pr_alert("%s: out of memory\n", __func__);
- gnttab_page_cache_put(&ring->free_pages, pages_to_gnt, segs_to_map);
- for (i = last_map; i < num; i++)
+out:
+ for (i = last_map; i < num; i++) {
+ /* Don't zap current batch's valid persistent grants. */
+ if(i >= last_map + segs_to_map)
+ pages[i]->persistent_gnt = NULL;
pages[i]->handle = BLKBACK_INVALID_HANDLE;
- return -ENOMEM;
+ }
+
+ return ret;
}
static int xen_blkbk_map_seg(struct pending_req *pending_req)
@@ -1322,9 +1326,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
pages[i]->page,
seg[i].nsec << 9,
seg[i].offset) == 0)) {
-
- int nr_iovecs = min_t(int, (nseg-i), BIO_MAX_PAGES);
- bio = bio_alloc(GFP_KERNEL, nr_iovecs);
+ bio = bio_alloc(GFP_KERNEL, bio_max_segs(nseg - i));
if (unlikely(bio == NULL))
goto fail_put_bio;
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 9860d4842f36..c2aaf690352c 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -245,7 +245,7 @@ static int xen_blkif_map(struct xen_blkif_ring *ring, grant_ref_t *gref,
if (req_prod - rsp_prod > size)
goto fail;
- err = bind_interdomain_evtchn_to_irqhandler_lateeoi(blkif->domid,
+ err = bind_interdomain_evtchn_to_irqhandler_lateeoi(blkif->be->dev,
evtchn, xen_blkif_be_int, 0, "blkif-backend", ring);
if (err < 0)
goto fail;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 5265975b3fba..e1c6798889f4 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -945,7 +945,8 @@ static void blkif_set_queue_limits(struct blkfront_info *info)
if (info->feature_discard) {
blk_queue_flag_set(QUEUE_FLAG_DISCARD, rq);
blk_queue_max_discard_sectors(rq, get_capacity(gd));
- rq->limits.discard_granularity = info->discard_granularity;
+ rq->limits.discard_granularity = info->discard_granularity ?:
+ info->physical_sector_size;
rq->limits.discard_alignment = info->discard_alignment;
if (info->feature_secdiscard)
blk_queue_flag_set(QUEUE_FLAG_SECERASE, rq);
@@ -2179,19 +2180,12 @@ static void blkfront_closing(struct blkfront_info *info)
static void blkfront_setup_discard(struct blkfront_info *info)
{
- int err;
- unsigned int discard_granularity;
- unsigned int discard_alignment;
-
info->feature_discard = 1;
- err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
- "discard-granularity", "%u", &discard_granularity,
- "discard-alignment", "%u", &discard_alignment,
- NULL);
- if (!err) {
- info->discard_granularity = discard_granularity;
- info->discard_alignment = discard_alignment;
- }
+ info->discard_granularity = xenbus_read_unsigned(info->xbdev->otherend,
+ "discard-granularity",
+ 0);
+ info->discard_alignment = xenbus_read_unsigned(info->xbdev->otherend,
+ "discard-alignment", 0);
info->feature_secdiscard =
!!xenbus_read_unsigned(info->xbdev->otherend, "discard-secure",
0);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index e2933cb7a82a..a711a2e2a794 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -530,8 +530,7 @@ static ssize_t backing_dev_store(struct device *dev,
return len;
out:
- if (bitmap)
- kvfree(bitmap);
+ kvfree(bitmap);
if (bdev)
blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
@@ -1082,7 +1081,7 @@ static ssize_t mm_stat_show(struct device *dev,
zram->limit_pages << PAGE_SHIFT,
max_used << PAGE_SHIFT,
(u64)atomic64_read(&zram->stats.same_pages),
- pool_stats.pages_compacted,
+ atomic_long_read(&pool_stats.pages_compacted),
(u64)atomic64_read(&zram->stats.huge_pages),
(u64)atomic64_read(&zram->stats.huge_pages_since));
up_read(&zram->init_lock);
@@ -1596,7 +1595,7 @@ static void __zram_make_request(struct zram *zram, struct bio *bio)
*/
static blk_qc_t zram_submit_bio(struct bio *bio)
{
- struct zram *zram = bio->bi_disk->private_data;
+ struct zram *zram = bio->bi_bdev->bd_disk->private_data;
if (!valid_io_request(zram, bio->bi_iter.bi_sector,
bio->bi_iter.bi_size)) {
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index 41ff2071d7ef..88ce5f0ffc4b 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -437,38 +437,31 @@ int btintel_read_version_tlv(struct hci_dev *hdev, struct intel_version_tlv *ver
tlv = (struct intel_tlv *)skb->data;
switch (tlv->type) {
case INTEL_TLV_CNVI_TOP:
- version->cnvi_top =
- __le32_to_cpu(get_unaligned_le32(tlv->val));
+ version->cnvi_top = get_unaligned_le32(tlv->val);
break;
case INTEL_TLV_CNVR_TOP:
- version->cnvr_top =
- __le32_to_cpu(get_unaligned_le32(tlv->val));
+ version->cnvr_top = get_unaligned_le32(tlv->val);
break;
case INTEL_TLV_CNVI_BT:
- version->cnvi_bt =
- __le32_to_cpu(get_unaligned_le32(tlv->val));
+ version->cnvi_bt = get_unaligned_le32(tlv->val);
break;
case INTEL_TLV_CNVR_BT:
- version->cnvr_bt =
- __le32_to_cpu(get_unaligned_le32(tlv->val));
+ version->cnvr_bt = get_unaligned_le32(tlv->val);
break;
case INTEL_TLV_DEV_REV_ID:
- version->dev_rev_id =
- __le16_to_cpu(get_unaligned_le16(tlv->val));
+ version->dev_rev_id = get_unaligned_le16(tlv->val);
break;
case INTEL_TLV_IMAGE_TYPE:
version->img_type = tlv->val[0];
break;
case INTEL_TLV_TIME_STAMP:
- version->timestamp =
- __le16_to_cpu(get_unaligned_le16(tlv->val));
+ version->timestamp = get_unaligned_le16(tlv->val);
break;
case INTEL_TLV_BUILD_TYPE:
version->build_type = tlv->val[0];
break;
case INTEL_TLV_BUILD_NUM:
- version->build_num =
- __le32_to_cpu(get_unaligned_le32(tlv->val));
+ version->build_num = get_unaligned_le32(tlv->val);
break;
case INTEL_TLV_SECURE_BOOT:
version->secure_boot = tlv->val[0];
diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c
index 5f9f02795631..9872ef18f9fe 100644
--- a/drivers/bluetooth/btmtksdio.c
+++ b/drivers/bluetooth/btmtksdio.c
@@ -442,15 +442,15 @@ static int btmtksdio_rx_packet(struct btmtksdio_dev *bdev, u16 rx_size)
}
switch ((&pkts[i])->lsize) {
- case 1:
- dlen = skb->data[(&pkts[i])->loff];
- break;
- case 2:
- dlen = get_unaligned_le16(skb->data +
+ case 1:
+ dlen = skb->data[(&pkts[i])->loff];
+ break;
+ case 2:
+ dlen = get_unaligned_le16(skb->data +
(&pkts[i])->loff);
- break;
- default:
- goto err_kfree_skb;
+ break;
+ default:
+ goto err_kfree_skb;
}
pad_size = skb->len - (&pkts[i])->hlen - dlen;
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
index f85a55add9be..25114f0d1319 100644
--- a/drivers/bluetooth/btqca.c
+++ b/drivers/bluetooth/btqca.c
@@ -94,6 +94,53 @@ out:
}
EXPORT_SYMBOL_GPL(qca_read_soc_version);
+static int qca_read_fw_build_info(struct hci_dev *hdev)
+{
+ struct sk_buff *skb;
+ struct edl_event_hdr *edl;
+ char cmd, build_label[QCA_FW_BUILD_VER_LEN];
+ int build_lbl_len, err = 0;
+
+ bt_dev_dbg(hdev, "QCA read fw build info");
+
+ cmd = EDL_GET_BUILD_INFO_CMD;
+ skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, EDL_PATCH_CMD_LEN,
+ &cmd, 0, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ bt_dev_err(hdev, "Reading QCA fw build info failed (%d)",
+ err);
+ return err;
+ }
+
+ edl = (struct edl_event_hdr *)(skb->data);
+ if (!edl) {
+ bt_dev_err(hdev, "QCA read fw build info with no header");
+ err = -EILSEQ;
+ goto out;
+ }
+
+ if (edl->cresp != EDL_CMD_REQ_RES_EVT ||
+ edl->rtype != EDL_GET_BUILD_INFO_CMD) {
+ bt_dev_err(hdev, "QCA Wrong packet received %d %d", edl->cresp,
+ edl->rtype);
+ err = -EIO;
+ goto out;
+ }
+
+ build_lbl_len = edl->data[0];
+ if (build_lbl_len <= QCA_FW_BUILD_VER_LEN - 1) {
+ memcpy(build_label, edl->data + 1, build_lbl_len);
+ *(build_label + build_lbl_len) = '\0';
+ }
+
+ hci_set_fw_info(hdev, "%s", build_label);
+
+out:
+ kfree_skb(skb);
+ return err;
+}
+
static int qca_send_reset(struct hci_dev *hdev)
{
struct sk_buff *skb;
@@ -517,6 +564,19 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
return err;
}
+ /* WCN399x supports the Microsoft vendor extension with 0xFD70 as the
+ * VsMsftOpCode.
+ */
+ switch (soc_type) {
+ case QCA_WCN3990:
+ case QCA_WCN3991:
+ case QCA_WCN3998:
+ hci_set_msft_opcode(hdev, 0xFD70);
+ break;
+ default:
+ break;
+ }
+
/* Perform HCI reset */
err = qca_send_reset(hdev);
if (err < 0) {
@@ -524,6 +584,13 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
return err;
}
+ if (soc_type == QCA_WCN3991) {
+ /* get fw build info */
+ err = qca_read_fw_build_info(hdev);
+ if (err < 0)
+ return err;
+ }
+
bt_dev_info(hdev, "QCA setup on UART is completed");
return 0;
diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h
index e73b8f8775bd..b19add7675a4 100644
--- a/drivers/bluetooth/btqca.h
+++ b/drivers/bluetooth/btqca.h
@@ -11,6 +11,7 @@
#define EDL_PATCH_CMD_LEN (1)
#define EDL_PATCH_VER_REQ_CMD (0x19)
#define EDL_PATCH_TLV_REQ_CMD (0x1E)
+#define EDL_GET_BUILD_INFO_CMD (0x20)
#define EDL_NVM_ACCESS_SET_REQ_CMD (0x01)
#define MAX_SIZE_PER_TLV_SEGMENT (243)
#define QCA_PRE_SHUTDOWN_CMD (0xFC08)
diff --git a/drivers/bluetooth/btqcomsmd.c b/drivers/bluetooth/btqcomsmd.c
index 98d53764871f..2acb719e596f 100644
--- a/drivers/bluetooth/btqcomsmd.c
+++ b/drivers/bluetooth/btqcomsmd.c
@@ -142,12 +142,16 @@ static int btqcomsmd_probe(struct platform_device *pdev)
btq->cmd_channel = qcom_wcnss_open_channel(wcnss, "APPS_RIVA_BT_CMD",
btqcomsmd_cmd_callback, btq);
- if (IS_ERR(btq->cmd_channel))
- return PTR_ERR(btq->cmd_channel);
+ if (IS_ERR(btq->cmd_channel)) {
+ ret = PTR_ERR(btq->cmd_channel);
+ goto destroy_acl_channel;
+ }
hdev = hci_alloc_dev();
- if (!hdev)
- return -ENOMEM;
+ if (!hdev) {
+ ret = -ENOMEM;
+ goto destroy_cmd_channel;
+ }
hci_set_drvdata(hdev, btq);
btq->hdev = hdev;
@@ -161,14 +165,21 @@ static int btqcomsmd_probe(struct platform_device *pdev)
hdev->set_bdaddr = qca_set_bdaddr_rome;
ret = hci_register_dev(hdev);
- if (ret < 0) {
- hci_free_dev(hdev);
- return ret;
- }
+ if (ret < 0)
+ goto hci_free_dev;
platform_set_drvdata(pdev, btq);
return 0;
+
+hci_free_dev:
+ hci_free_dev(hdev);
+destroy_cmd_channel:
+ rpmsg_destroy_ept(btq->cmd_channel);
+destroy_acl_channel:
+ rpmsg_destroy_ept(btq->acl_channel);
+
+ return ret;
}
static int btqcomsmd_remove(struct platform_device *pdev)
diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
index a4f7cace66b0..e7fe5fb22753 100644
--- a/drivers/bluetooth/btrtl.c
+++ b/drivers/bluetooth/btrtl.c
@@ -38,6 +38,19 @@
.hci_ver = (hciv), \
.hci_bus = (bus)
+enum btrtl_chip_id {
+ CHIP_ID_8723A,
+ CHIP_ID_8723B,
+ CHIP_ID_8821A,
+ CHIP_ID_8761A,
+ CHIP_ID_8822B = 8,
+ CHIP_ID_8723D,
+ CHIP_ID_8821C,
+ CHIP_ID_8822C = 13,
+ CHIP_ID_8761B,
+ CHIP_ID_8852A = 18,
+};
+
struct id_table {
__u16 match_flags;
__u16 lmp_subver;
@@ -58,6 +71,7 @@ struct btrtl_device_info {
u8 *cfg_data;
int cfg_len;
bool drop_fw;
+ int project_id;
};
static const struct id_table ic_id_table[] = {
@@ -307,8 +321,10 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev,
/* Find project_id in table */
for (i = 0; i < ARRAY_SIZE(project_id_to_lmp_subver); i++) {
- if (project_id == project_id_to_lmp_subver[i].id)
+ if (project_id == project_id_to_lmp_subver[i].id) {
+ btrtl_dev->project_id = project_id;
break;
+ }
}
if (i >= ARRAY_SIZE(project_id_to_lmp_subver)) {
@@ -658,6 +674,12 @@ out_free:
}
}
+ /* RTL8822CE supports the Microsoft vendor extension and uses 0xFCF0
+ * for VsMsftOpCode.
+ */
+ if (lmp_subver == RTL_ROM_LMP_8822B)
+ hci_set_msft_opcode(hdev, 0xFCF0);
+
return btrtl_dev;
err_free:
@@ -708,13 +730,28 @@ int btrtl_setup_realtek(struct hci_dev *hdev)
ret = btrtl_download_firmware(hdev, btrtl_dev);
- btrtl_free(btrtl_dev);
-
/* Enable controller to do both LE scan and BR/EDR inquiry
* simultaneously.
*/
set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
+ /* Enable central-peripheral role (able to create new connections with
+ * an existing connection in slave role).
+ */
+ /* Enable WBS supported for the specific Realtek devices. */
+ switch (btrtl_dev->project_id) {
+ case CHIP_ID_8822C:
+ case CHIP_ID_8852A:
+ set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
+ set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
+ break;
+ default:
+ rtl_dev_dbg(hdev, "Central-peripheral role not enabled.");
+ rtl_dev_dbg(hdev, "WBS supported not enabled.");
+ break;
+ }
+
+ btrtl_free(btrtl_dev);
return ret;
}
EXPORT_SYMBOL_GPL(btrtl_setup_realtek);
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 03b83aa91277..52683fd22e05 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -368,6 +368,8 @@ static const struct usb_device_id blacklist_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x8087, 0x0032), .driver_info = BTUSB_INTEL_NEWGEN |
BTUSB_WIDEBAND_SPEECH},
+ { USB_DEVICE(0x8087, 0x0033), .driver_info = BTUSB_INTEL_NEWGEN |
+ BTUSB_WIDEBAND_SPEECH},
{ USB_DEVICE(0x8087, 0x07da), .driver_info = BTUSB_CSR },
{ USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL },
{ USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL },
@@ -506,7 +508,6 @@ static const struct dmi_system_id btusb_needs_reset_resume_table[] = {
#define BTUSB_HW_RESET_ACTIVE 12
#define BTUSB_TX_WAIT_VND_EVT 13
#define BTUSB_WAKEUP_DISABLE 14
-#define BTUSB_USE_ALT1_FOR_WBS 15
struct btusb_data {
struct hci_dev *hdev;
@@ -1736,15 +1737,12 @@ static void btusb_work(struct work_struct *work)
new_alts = data->sco_num;
}
} else if (data->air_mode == HCI_NOTIFY_ENABLE_SCO_TRANSP) {
- /* Check if Alt 6 is supported for Transparent audio */
- if (btusb_find_altsetting(data, 6)) {
- data->usb_alt6_packet_flow = true;
- new_alts = 6;
- } else if (test_bit(BTUSB_USE_ALT1_FOR_WBS, &data->flags)) {
- new_alts = 1;
- } else {
- bt_dev_err(hdev, "Device does not support ALT setting 6");
- }
+ /* Bluetooth USB spec recommends alt 6 (63 bytes), but
+ * many adapters do not support it. Alt 1 appears to
+ * work for all adapters that do not have alt 6, and
+ * which work with WBS at all.
+ */
+ new_alts = btusb_find_altsetting(data, 6) ? 6 : 1;
}
if (btusb_switch_alt_setting(hdev, new_alts) < 0)
@@ -1903,7 +1901,7 @@ static int btusb_setup_csr(struct hci_dev *hdev)
le16_to_cpu(rp->lmp_subver) == 0x1012 &&
le16_to_cpu(rp->hci_rev) == 0x0810 &&
le16_to_cpu(rp->hci_ver) == BLUETOOTH_VER_4_0) {
- bt_dev_warn(hdev, "CSR: detected a fake CSR dongle using a Barrot 8041a02 chip, this chip is very buggy and may have issues\n");
+ bt_dev_warn(hdev, "CSR: detected a fake CSR dongle using a Barrot 8041a02 chip, this chip is very buggy and may have issues");
pm_runtime_allow(&data->udev->dev);
@@ -1911,7 +1909,7 @@ static int btusb_setup_csr(struct hci_dev *hdev)
if (ret >= 0)
msleep(200);
else
- bt_dev_err(hdev, "Failed to suspend the device for Barrot 8041a02 receive-issue workaround\n");
+ bt_dev_err(hdev, "Failed to suspend the device for Barrot 8041a02 receive-issue workaround");
pm_runtime_forbid(&data->udev->dev);
@@ -2924,7 +2922,10 @@ finish:
* extension are using 0xFC1E for VsMsftOpCode.
*/
switch (ver.hw_variant) {
+ case 0x11: /* JfP */
case 0x12: /* ThP */
+ case 0x13: /* HrP */
+ case 0x14: /* CcP */
hci_set_msft_opcode(hdev, 0xFC1E);
break;
}
@@ -3127,6 +3128,12 @@ static int btusb_shutdown_intel_new(struct hci_dev *hdev)
#define FIRMWARE_MT7668 "mediatek/mt7668pr2h.bin"
#define HCI_WMT_MAX_EVENT_SIZE 64
+/* It is for mt79xx download rom patch*/
+#define MTK_FW_ROM_PATCH_HEADER_SIZE 32
+#define MTK_FW_ROM_PATCH_GD_SIZE 64
+#define MTK_FW_ROM_PATCH_SEC_MAP_SIZE 64
+#define MTK_SEC_MAP_COMMON_SIZE 12
+#define MTK_SEC_MAP_NEED_SEND_SIZE 52
enum {
BTMTK_WMT_PATCH_DWNLD = 0x1,
@@ -3138,6 +3145,7 @@ enum {
enum {
BTMTK_WMT_INVALID,
BTMTK_WMT_PATCH_UNDONE,
+ BTMTK_WMT_PATCH_PROGRESS,
BTMTK_WMT_PATCH_DONE,
BTMTK_WMT_ON_UNDONE,
BTMTK_WMT_ON_DONE,
@@ -3153,7 +3161,7 @@ struct btmtk_wmt_hdr {
struct btmtk_hci_wmt_cmd {
struct btmtk_wmt_hdr hdr;
- u8 data[256];
+ u8 data[];
} __packed;
struct btmtk_hci_wmt_evt {
@@ -3182,6 +3190,40 @@ struct btmtk_hci_wmt_params {
u32 *status;
};
+struct btmtk_patch_header {
+ u8 datetime[16];
+ u8 platform[4];
+ __le16 hwver;
+ __le16 swver;
+ __le32 magicnum;
+} __packed;
+
+struct btmtk_global_desc {
+ __le32 patch_ver;
+ __le32 sub_sys;
+ __le32 feature_opt;
+ __le32 section_num;
+} __packed;
+
+struct btmtk_section_map {
+ __le32 sectype;
+ __le32 secoffset;
+ __le32 secsize;
+ union {
+ __le32 u4SecSpec[13];
+ struct {
+ __le32 dlAddr;
+ __le32 dlsize;
+ __le32 seckeyidx;
+ __le32 alignlen;
+ __le32 sectype;
+ __le32 dlmodecrctype;
+ __le32 crc;
+ __le32 reserved[6];
+ } bin_info_spec;
+ };
+} __packed;
+
static void btusb_mtk_wmt_recv(struct urb *urb)
{
struct hci_dev *hdev = urb->context;
@@ -3199,7 +3241,7 @@ static void btusb_mtk_wmt_recv(struct urb *urb)
skb = bt_skb_alloc(HCI_WMT_MAX_EVENT_SIZE, GFP_ATOMIC);
if (!skb) {
hdev->stat.err_rx++;
- goto err_out;
+ return;
}
hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
@@ -3217,13 +3259,18 @@ static void btusb_mtk_wmt_recv(struct urb *urb)
*/
if (test_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags)) {
data->evt_skb = skb_clone(skb, GFP_ATOMIC);
- if (!data->evt_skb)
- goto err_out;
+ if (!data->evt_skb) {
+ kfree_skb(skb);
+ return;
+ }
}
err = hci_recv_frame(hdev, skb);
- if (err < 0)
- goto err_free_skb;
+ if (err < 0) {
+ kfree_skb(data->evt_skb);
+ data->evt_skb = NULL;
+ return;
+ }
if (test_and_clear_bit(BTUSB_TX_WAIT_VND_EVT,
&data->flags)) {
@@ -3232,11 +3279,6 @@ static void btusb_mtk_wmt_recv(struct urb *urb)
wake_up_bit(&data->flags,
BTUSB_TX_WAIT_VND_EVT);
}
-err_out:
- return;
-err_free_skb:
- kfree_skb(data->evt_skb);
- data->evt_skb = NULL;
return;
} else if (urb->status == -ENOENT) {
/* Avoid suspend failed when usb_kill_urb */
@@ -3252,7 +3294,7 @@ err_free_skb:
* to generate the event. Otherwise, the WMT event cannot return from
* the device successfully.
*/
- udelay(100);
+ udelay(500);
usb_anchor_urb(urb, &data->ctrl_anchor);
err = usb_submit_urb(urb, GFP_ATOMIC);
@@ -3327,7 +3369,7 @@ static int btusb_mtk_hci_wmt_sync(struct hci_dev *hdev,
struct btmtk_hci_wmt_evt_funcc *wmt_evt_funcc;
u32 hlen, status = BTMTK_WMT_INVALID;
struct btmtk_hci_wmt_evt *wmt_evt;
- struct btmtk_hci_wmt_cmd wc;
+ struct btmtk_hci_wmt_cmd *wc;
struct btmtk_wmt_hdr *hdr;
int err;
@@ -3341,20 +3383,24 @@ static int btusb_mtk_hci_wmt_sync(struct hci_dev *hdev,
if (hlen > 255)
return -EINVAL;
- hdr = (struct btmtk_wmt_hdr *)&wc;
+ wc = kzalloc(hlen, GFP_KERNEL);
+ if (!wc)
+ return -ENOMEM;
+
+ hdr = &wc->hdr;
hdr->dir = 1;
hdr->op = wmt_params->op;
hdr->dlen = cpu_to_le16(wmt_params->dlen + 1);
hdr->flag = wmt_params->flag;
- memcpy(wc.data, wmt_params->data, wmt_params->dlen);
+ memcpy(wc->data, wmt_params->data, wmt_params->dlen);
set_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags);
- err = __hci_cmd_send(hdev, 0xfc6f, hlen, &wc);
+ err = __hci_cmd_send(hdev, 0xfc6f, hlen, wc);
if (err < 0) {
clear_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags);
- return err;
+ goto err_free_wc;
}
/* The vendor specific WMT commands are all answered by a vendor
@@ -3371,13 +3417,14 @@ static int btusb_mtk_hci_wmt_sync(struct hci_dev *hdev,
if (err == -EINTR) {
bt_dev_err(hdev, "Execution of wmt command interrupted");
clear_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags);
- return err;
+ goto err_free_wc;
}
if (err) {
bt_dev_err(hdev, "Execution of wmt command timed out");
clear_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags);
- return -ETIMEDOUT;
+ err = -ETIMEDOUT;
+ goto err_free_wc;
}
/* Parse and handle the return WMT event */
@@ -3405,6 +3452,14 @@ static int btusb_mtk_hci_wmt_sync(struct hci_dev *hdev,
else
status = BTMTK_WMT_ON_UNDONE;
break;
+ case BTMTK_WMT_PATCH_DWNLD:
+ if (wmt_evt->whdr.flag == 2)
+ status = BTMTK_WMT_PATCH_DONE;
+ else if (wmt_evt->whdr.flag == 1)
+ status = BTMTK_WMT_PATCH_PROGRESS;
+ else
+ status = BTMTK_WMT_PATCH_UNDONE;
+ break;
}
if (wmt_params->status)
@@ -3413,6 +3468,119 @@ static int btusb_mtk_hci_wmt_sync(struct hci_dev *hdev,
err_free_skb:
kfree_skb(data->evt_skb);
data->evt_skb = NULL;
+err_free_wc:
+ kfree(wc);
+ return err;
+}
+
+static int btusb_mtk_setup_firmware_79xx(struct hci_dev *hdev, const char *fwname)
+{
+ struct btmtk_hci_wmt_params wmt_params;
+ struct btmtk_global_desc *globaldesc = NULL;
+ struct btmtk_section_map *sectionmap;
+ const struct firmware *fw;
+ const u8 *fw_ptr;
+ const u8 *fw_bin_ptr;
+ int err, dlen, i, status;
+ u8 flag, first_block, retry;
+ u32 section_num, dl_size, section_offset;
+ u8 cmd[64];
+
+ err = request_firmware(&fw, fwname, &hdev->dev);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to load firmware file (%d)", err);
+ return err;
+ }
+
+ fw_ptr = fw->data;
+ fw_bin_ptr = fw_ptr;
+ globaldesc = (struct btmtk_global_desc *)(fw_ptr + MTK_FW_ROM_PATCH_HEADER_SIZE);
+ section_num = globaldesc->section_num;
+
+ for (i = 0; i < section_num; i++) {
+ first_block = 1;
+ fw_ptr = fw_bin_ptr;
+ sectionmap = (struct btmtk_section_map *)(fw_ptr + MTK_FW_ROM_PATCH_HEADER_SIZE +
+ MTK_FW_ROM_PATCH_GD_SIZE + MTK_FW_ROM_PATCH_SEC_MAP_SIZE * i);
+
+ section_offset = sectionmap->secoffset;
+ dl_size = sectionmap->bin_info_spec.dlsize;
+
+ if (dl_size > 0) {
+ retry = 20;
+ while (retry > 0) {
+ cmd[0] = 0; /* 0 means legacy dl mode. */
+ memcpy(cmd + 1,
+ fw_ptr + MTK_FW_ROM_PATCH_HEADER_SIZE +
+ MTK_FW_ROM_PATCH_GD_SIZE + MTK_FW_ROM_PATCH_SEC_MAP_SIZE * i +
+ MTK_SEC_MAP_COMMON_SIZE,
+ MTK_SEC_MAP_NEED_SEND_SIZE + 1);
+
+ wmt_params.op = BTMTK_WMT_PATCH_DWNLD;
+ wmt_params.status = &status;
+ wmt_params.flag = 0;
+ wmt_params.dlen = MTK_SEC_MAP_NEED_SEND_SIZE + 1;
+ wmt_params.data = &cmd;
+
+ err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to send wmt patch dwnld (%d)",
+ err);
+ goto err_release_fw;
+ }
+
+ if (status == BTMTK_WMT_PATCH_UNDONE) {
+ break;
+ } else if (status == BTMTK_WMT_PATCH_PROGRESS) {
+ msleep(100);
+ retry--;
+ } else if (status == BTMTK_WMT_PATCH_DONE) {
+ goto next_section;
+ } else {
+ bt_dev_err(hdev, "Failed wmt patch dwnld status (%d)",
+ status);
+ goto err_release_fw;
+ }
+ }
+
+ fw_ptr += section_offset;
+ wmt_params.op = BTMTK_WMT_PATCH_DWNLD;
+ wmt_params.status = NULL;
+
+ while (dl_size > 0) {
+ dlen = min_t(int, 250, dl_size);
+ if (first_block == 1) {
+ flag = 1;
+ first_block = 0;
+ } else if (dl_size - dlen <= 0) {
+ flag = 3;
+ } else {
+ flag = 2;
+ }
+
+ wmt_params.flag = flag;
+ wmt_params.dlen = dlen;
+ wmt_params.data = fw_ptr;
+
+ err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to send wmt patch dwnld (%d)",
+ err);
+ goto err_release_fw;
+ }
+
+ dl_size -= dlen;
+ fw_ptr += dlen;
+ }
+ }
+next_section:
+ continue;
+ }
+ /* Wait a few moments for firmware activation done */
+ usleep_range(100000, 120000);
+
+err_release_fw:
+ release_firmware(fw);
return err;
}
@@ -3465,7 +3633,7 @@ static int btusb_mtk_setup_firmware(struct hci_dev *hdev, const char *fwname)
while (fw_size > 0) {
dlen = min_t(int, 250, fw_size);
- /* Tell deivice the position in sequence */
+ /* Tell device the position in sequence */
if (fw_size - dlen <= 0)
flag = 3;
else if (fw_size < fw->size - 30)
@@ -3555,9 +3723,9 @@ err_free_buf:
return err;
}
-static int btusb_mtk_id_get(struct btusb_data *data, u32 *id)
+static int btusb_mtk_id_get(struct btusb_data *data, u32 reg, u32 *id)
{
- return btusb_mtk_reg_read(data, 0x80000008, id);
+ return btusb_mtk_reg_read(data, reg, id);
}
static int btusb_mtk_setup(struct hci_dev *hdev)
@@ -3571,16 +3739,31 @@ static int btusb_mtk_setup(struct hci_dev *hdev)
const char *fwname;
int err, status;
u32 dev_id;
+ char fw_bin_name[64];
+ u32 fw_version;
u8 param;
calltime = ktime_get();
- err = btusb_mtk_id_get(data, &dev_id);
+ err = btusb_mtk_id_get(data, 0x80000008, &dev_id);
if (err < 0) {
bt_dev_err(hdev, "Failed to get device id (%d)", err);
return err;
}
+ if (!dev_id) {
+ err = btusb_mtk_id_get(data, 0x70010200, &dev_id);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to get device id (%d)", err);
+ return err;
+ }
+ err = btusb_mtk_id_get(data, 0x80021004, &fw_version);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to get fw version (%d)", err);
+ return err;
+ }
+ }
+
switch (dev_id) {
case 0x7663:
fwname = FIRMWARE_MT7663;
@@ -3588,8 +3771,28 @@ static int btusb_mtk_setup(struct hci_dev *hdev)
case 0x7668:
fwname = FIRMWARE_MT7668;
break;
+ case 0x7961:
+ snprintf(fw_bin_name, sizeof(fw_bin_name),
+ "mediatek/BT_RAM_CODE_MT%04x_1_%x_hdr.bin",
+ dev_id & 0xffff, (fw_version & 0xff) + 1);
+ err = btusb_mtk_setup_firmware_79xx(hdev, fw_bin_name);
+
+ /* Enable Bluetooth protocol */
+ param = 1;
+ wmt_params.op = BTMTK_WMT_FUNC_CTRL;
+ wmt_params.flag = 0;
+ wmt_params.dlen = sizeof(param);
+ wmt_params.data = &param;
+ wmt_params.status = NULL;
+
+ err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
+ return err;
+ }
+ goto done;
default:
- bt_dev_err(hdev, "Unsupported support hardware variant (%08x)",
+ bt_dev_err(hdev, "Unsupported hardware variant (%08x)",
dev_id);
return -ENODEV;
}
@@ -3665,6 +3868,7 @@ ignore_func_on:
}
kfree_skb(skb);
+done:
rettime = ktime_get();
delta = ktime_sub(rettime, calltime);
duration = (unsigned long long)ktime_to_ns(delta) >> 10;
@@ -3725,7 +3929,7 @@ static int marvell_config_oob_wake(struct hci_dev *hdev)
skb = bt_skb_alloc(sizeof(cmd), GFP_KERNEL);
if (!skb) {
- bt_dev_err(hdev, "%s: No memory\n", __func__);
+ bt_dev_err(hdev, "%s: No memory", __func__);
return -ENOMEM;
}
@@ -3734,7 +3938,7 @@ static int marvell_config_oob_wake(struct hci_dev *hdev)
ret = btusb_send_frame(hdev, skb);
if (ret) {
- bt_dev_err(hdev, "%s: configuration failed\n", __func__);
+ bt_dev_err(hdev, "%s: configuration failed", __func__);
kfree_skb(skb);
return ret;
}
@@ -4069,6 +4273,13 @@ static int btusb_setup_qca(struct hci_dev *hdev)
info = &qca_devices_table[i];
}
if (!info) {
+ /* If the rom_version is not matched in the qca_devices_table
+ * and the high ROM version is not zero, we assume this chip no
+ * need to load the rampatch and nvm.
+ */
+ if (ver_rom & ~0xffffU)
+ return 0;
+
bt_dev_err(hdev, "don't support firmware rome 0x%x", ver_rom);
return -ENODEV;
}
@@ -4264,6 +4475,20 @@ static bool btusb_prevent_wake(struct hci_dev *hdev)
return !device_may_wakeup(&data->udev->dev);
}
+static int btusb_shutdown_qca(struct hci_dev *hdev)
+{
+ struct sk_buff *skb;
+
+ skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ bt_dev_err(hdev, "HCI reset during shutdown failed");
+ return PTR_ERR(skb);
+ }
+ kfree_skb(skb);
+
+ return 0;
+}
+
static int btusb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
@@ -4523,6 +4748,7 @@ static int btusb_probe(struct usb_interface *intf,
if (id->driver_info & BTUSB_QCA_WCN6855) {
data->setup_on_usb = btusb_setup_qca;
+ hdev->shutdown = btusb_shutdown_qca;
hdev->set_bdaddr = btusb_set_bdaddr_wcn6855;
hdev->cmd_timeout = btusb_qca_cmd_timeout;
set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
@@ -4548,10 +4774,6 @@ static int btusb_probe(struct usb_interface *intf,
* (DEVICE_REMOTE_WAKEUP)
*/
set_bit(BTUSB_WAKEUP_DISABLE, &data->flags);
- if (btusb_find_altsetting(data, 1))
- set_bit(BTUSB_USE_ALT1_FOR_WBS, &data->flags);
- else
- bt_dev_err(hdev, "Device does not support ALT setting 1");
}
if (!reset)
@@ -4627,8 +4849,8 @@ static int btusb_probe(struct usb_interface *intf,
data->diag = NULL;
}
- if (enable_autosuspend)
- usb_enable_autosuspend(data->udev);
+ if (!enable_autosuspend)
+ usb_disable_autosuspend(data->udev);
err = hci_register_dev(hdev);
if (err < 0)
@@ -4688,6 +4910,9 @@ static void btusb_disconnect(struct usb_interface *intf)
gpiod_put(data->reset_gpio);
hci_free_dev(hdev);
+
+ if (!enable_autosuspend)
+ usb_enable_autosuspend(data->udev);
}
#ifdef CONFIG_PM
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index 8ea5ca8d71d6..3764ceb6fa0d 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -654,6 +654,7 @@ static const struct h4_recv_pkt bcm_recv_pkts[] = {
{ H4_RECV_ACL, .recv = hci_recv_frame },
{ H4_RECV_SCO, .recv = hci_recv_frame },
{ H4_RECV_EVENT, .recv = hci_recv_frame },
+ { H4_RECV_ISO, .recv = hci_recv_frame },
{ BCM_RECV_LM_DIAG, .recv = hci_recv_diag },
{ BCM_RECV_NULL, .recv = hci_recv_diag },
{ BCM_RECV_TYPE49, .recv = hci_recv_diag },
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
index 7be16a7f653b..27e96681d583 100644
--- a/drivers/bluetooth/hci_h5.c
+++ b/drivers/bluetooth/hci_h5.c
@@ -906,6 +906,11 @@ static int h5_btrtl_setup(struct h5 *h5)
/* Give the device some time before the hci-core sends it a reset */
usleep_range(10000, 20000);
+ /* Enable controller to do both LE scan and BR/EDR inquiry
+ * simultaneously.
+ */
+ set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &h5->hu->hdev->quirks);
+
out_free:
btrtl_free(btrtl_dev);
@@ -1022,6 +1027,8 @@ static const struct of_device_id rtl_bluetooth_of_match[] = {
.data = (const void *)&rtl_vnd },
{ .compatible = "realtek,rtl8723bs-bt",
.data = (const void *)&rtl_vnd },
+ { .compatible = "realtek,rtl8723ds-bt",
+ .data = (const void *)&rtl_vnd },
#endif
{ },
};
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index f83d67eafc9f..637c5b8c2aa1 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -127,10 +127,9 @@ int hci_uart_tx_wakeup(struct hci_uart *hu)
if (!test_bit(HCI_UART_PROTO_READY, &hu->flags))
goto no_schedule;
- if (test_and_set_bit(HCI_UART_SENDING, &hu->tx_state)) {
- set_bit(HCI_UART_TX_WAKEUP, &hu->tx_state);
+ set_bit(HCI_UART_TX_WAKEUP, &hu->tx_state);
+ if (test_and_set_bit(HCI_UART_SENDING, &hu->tx_state))
goto no_schedule;
- }
BT_DBG("");
@@ -174,10 +173,10 @@ restart:
kfree_skb(skb);
}
+ clear_bit(HCI_UART_SENDING, &hu->tx_state);
if (test_bit(HCI_UART_TX_WAKEUP, &hu->tx_state))
goto restart;
- clear_bit(HCI_UART_SENDING, &hu->tx_state);
wake_up_bit(&hu->tx_state, HCI_UART_SENDING);
}
@@ -802,7 +801,8 @@ static int hci_uart_tty_ioctl(struct tty_struct *tty, struct file *file,
* We don't provide read/write/poll interface for user space.
*/
static ssize_t hci_uart_tty_read(struct tty_struct *tty, struct file *file,
- unsigned char __user *buf, size_t nr)
+ unsigned char *buf, size_t nr,
+ void **cookie, unsigned long offset)
{
return 0;
}
@@ -819,29 +819,28 @@ static __poll_t hci_uart_tty_poll(struct tty_struct *tty,
return 0;
}
+static struct tty_ldisc_ops hci_uart_ldisc = {
+ .owner = THIS_MODULE,
+ .magic = TTY_LDISC_MAGIC,
+ .name = "n_hci",
+ .open = hci_uart_tty_open,
+ .close = hci_uart_tty_close,
+ .read = hci_uart_tty_read,
+ .write = hci_uart_tty_write,
+ .ioctl = hci_uart_tty_ioctl,
+ .compat_ioctl = hci_uart_tty_ioctl,
+ .poll = hci_uart_tty_poll,
+ .receive_buf = hci_uart_tty_receive,
+ .write_wakeup = hci_uart_tty_wakeup,
+};
+
static int __init hci_uart_init(void)
{
- static struct tty_ldisc_ops hci_uart_ldisc;
int err;
BT_INFO("HCI UART driver ver %s", VERSION);
/* Register the tty discipline */
-
- memset(&hci_uart_ldisc, 0, sizeof(hci_uart_ldisc));
- hci_uart_ldisc.magic = TTY_LDISC_MAGIC;
- hci_uart_ldisc.name = "n_hci";
- hci_uart_ldisc.open = hci_uart_tty_open;
- hci_uart_ldisc.close = hci_uart_tty_close;
- hci_uart_ldisc.read = hci_uart_tty_read;
- hci_uart_ldisc.write = hci_uart_tty_write;
- hci_uart_ldisc.ioctl = hci_uart_tty_ioctl;
- hci_uart_ldisc.compat_ioctl = hci_uart_tty_ioctl;
- hci_uart_ldisc.poll = hci_uart_tty_poll;
- hci_uart_ldisc.receive_buf = hci_uart_tty_receive;
- hci_uart_ldisc.write_wakeup = hci_uart_tty_wakeup;
- hci_uart_ldisc.owner = THIS_MODULE;
-
err = tty_register_ldisc(N_HCI, &hci_uart_ldisc);
if (err) {
BT_ERR("HCI line discipline registration failed. (%d)", err);
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 4a963682c702..de36af63e182 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -50,7 +50,8 @@
#define IBS_HOST_TX_IDLE_TIMEOUT_MS 2000
#define CMD_TRANS_TIMEOUT_MS 100
#define MEMDUMP_TIMEOUT_MS 8000
-#define IBS_DISABLE_SSR_TIMEOUT_MS (MEMDUMP_TIMEOUT_MS + 1000)
+#define IBS_DISABLE_SSR_TIMEOUT_MS \
+ (MEMDUMP_TIMEOUT_MS + FW_DOWNLOAD_TIMEOUT_MS)
#define FW_DOWNLOAD_TIMEOUT_MS 3000
/* susclk rate */
@@ -76,7 +77,8 @@ enum qca_flags {
QCA_MEMDUMP_COLLECTION,
QCA_HW_ERROR_EVENT,
QCA_SSR_TRIGGERED,
- QCA_BT_OFF
+ QCA_BT_OFF,
+ QCA_ROM_FW
};
enum qca_capabilities {
@@ -1024,7 +1026,9 @@ static void qca_controller_memdump(struct work_struct *work)
dump_size = __le32_to_cpu(dump->dump_size);
if (!(dump_size)) {
bt_dev_err(hu->hdev, "Rx invalid memdump size");
+ kfree(qca_memdump);
kfree_skb(skb);
+ qca->qca_memdump = NULL;
mutex_unlock(&qca->hci_memdump_lock);
return;
}
@@ -1661,6 +1665,7 @@ static int qca_setup(struct hci_uart *hu)
if (ret)
return ret;
+ clear_bit(QCA_ROM_FW, &qca->flags);
/* Patch downloading has to be done without IBS mode */
set_bit(QCA_IBS_DISABLED, &qca->flags);
@@ -1718,12 +1723,14 @@ retry:
hu->hdev->cmd_timeout = qca_cmd_timeout;
} else if (ret == -ENOENT) {
/* No patch/nvm-config found, run with original fw/config */
+ set_bit(QCA_ROM_FW, &qca->flags);
ret = 0;
} else if (ret == -EAGAIN) {
/*
* Userspace firmware loader will return -EAGAIN in case no
* patch/nvm-config is found, so run with original fw/config.
*/
+ set_bit(QCA_ROM_FW, &qca->flags);
ret = 0;
}
@@ -2100,17 +2107,29 @@ static int __maybe_unused qca_suspend(struct device *dev)
set_bit(QCA_SUSPENDING, &qca->flags);
- if (test_bit(QCA_BT_OFF, &qca->flags))
+ /* if BT SoC is running with default firmware then it does not
+ * support in-band sleep
+ */
+ if (test_bit(QCA_ROM_FW, &qca->flags))
+ return 0;
+
+ /* During SSR after memory dump collection, controller will be
+ * powered off and then powered on.If controller is powered off
+ * during SSR then we should wait until SSR is completed.
+ */
+ if (test_bit(QCA_BT_OFF, &qca->flags) &&
+ !test_bit(QCA_SSR_TRIGGERED, &qca->flags))
return 0;
- if (test_bit(QCA_IBS_DISABLED, &qca->flags)) {
+ if (test_bit(QCA_IBS_DISABLED, &qca->flags) ||
+ test_bit(QCA_SSR_TRIGGERED, &qca->flags)) {
wait_timeout = test_bit(QCA_SSR_TRIGGERED, &qca->flags) ?
IBS_DISABLE_SSR_TIMEOUT_MS :
FW_DOWNLOAD_TIMEOUT_MS;
/* QCA_IBS_DISABLED flag is set to true, During FW download
* and during memory dump collection. It is reset to false,
- * After FW download complete and after memory dump collections.
+ * After FW download complete.
*/
wait_on_bit_timeout(&qca->flags, QCA_IBS_DISABLED,
TASK_UNINTERRUPTIBLE, msecs_to_jiffies(wait_timeout));
@@ -2122,10 +2141,6 @@ static int __maybe_unused qca_suspend(struct device *dev)
}
}
- /* After memory dump collection, Controller is powered off.*/
- if (test_bit(QCA_BT_OFF, &qca->flags))
- return 0;
-
cancel_work_sync(&qca->ws_awake_device);
cancel_work_sync(&qca->ws_awake_rx);
diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c
index ef96ad06fa54..9e03402ef1b3 100644
--- a/drivers/bluetooth/hci_serdev.c
+++ b/drivers/bluetooth/hci_serdev.c
@@ -83,9 +83,9 @@ static void hci_uart_write_work(struct work_struct *work)
hci_uart_tx_complete(hu, hci_skb_pkt_type(skb));
kfree_skb(skb);
}
- } while (test_bit(HCI_UART_TX_WAKEUP, &hu->tx_state));
- clear_bit(HCI_UART_SENDING, &hu->tx_state);
+ clear_bit(HCI_UART_SENDING, &hu->tx_state);
+ } while (test_bit(HCI_UART_TX_WAKEUP, &hu->tx_state));
}
/* ------- Interface to HCI layer ------ */
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index 0c262c2aeaf2..e7f7eee6ee9a 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -80,7 +80,7 @@ config MOXTET
config HISILICON_LPC
bool "Support for ISA I/O space on HiSilicon Hip06/7"
- depends on (ARM64 && ARCH_HISI) || (COMPILE_TEST && !ALPHA && !HEXAGON && !PARISC && !C6X)
+ depends on (ARM64 && ARCH_HISI) || (COMPILE_TEST && !ALPHA && !HEXAGON && !PARISC)
depends on HAS_IOMEM
select INDIRECT_PIO if ARM64
help
diff --git a/drivers/bus/arm-integrator-lm.c b/drivers/bus/arm-integrator-lm.c
index 845b6c43fef8..2344d560b144 100644
--- a/drivers/bus/arm-integrator-lm.c
+++ b/drivers/bus/arm-integrator-lm.c
@@ -54,6 +54,7 @@ static int integrator_lm_populate(int num, struct device *dev)
ret = of_platform_default_populate(child, NULL, dev);
if (ret) {
dev_err(dev, "failed to populate module\n");
+ of_node_put(child);
return ret;
}
}
diff --git a/drivers/bus/fsl-mc/Kconfig b/drivers/bus/fsl-mc/Kconfig
index c23c77c9b705..b1fd55901c50 100644
--- a/drivers/bus/fsl-mc/Kconfig
+++ b/drivers/bus/fsl-mc/Kconfig
@@ -14,3 +14,10 @@ config FSL_MC_BUS
architecture. The fsl-mc bus driver handles discovery of
DPAA2 objects (which are represented as Linux devices) and
binding objects to drivers.
+
+config FSL_MC_UAPI_SUPPORT
+ bool "Management Complex (MC) userspace support"
+ depends on FSL_MC_BUS
+ help
+ Provides userspace support for interrogating, creating, destroying or
+ configuring DPAA2 objects exported by the Management Complex.
diff --git a/drivers/bus/fsl-mc/Makefile b/drivers/bus/fsl-mc/Makefile
index 3c518c7e8374..4ae292a30e53 100644
--- a/drivers/bus/fsl-mc/Makefile
+++ b/drivers/bus/fsl-mc/Makefile
@@ -16,3 +16,6 @@ mc-bus-driver-objs := fsl-mc-bus.o \
fsl-mc-allocator.o \
fsl-mc-msi.o \
dpmcp.o
+
+# MC userspace support
+obj-$(CONFIG_FSL_MC_UAPI_SUPPORT) += fsl-mc-uapi.o
diff --git a/drivers/bus/fsl-mc/dprc-driver.c b/drivers/bus/fsl-mc/dprc-driver.c
index 68488a7ad0d6..e3e2ae41c22b 100644
--- a/drivers/bus/fsl-mc/dprc-driver.c
+++ b/drivers/bus/fsl-mc/dprc-driver.c
@@ -237,8 +237,8 @@ static void dprc_add_new_devices(struct fsl_mc_device *mc_bus_dev,
* populated before they can get allocation requests from probe callbacks
* of the device drivers for the non-allocatable devices.
*/
-static int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev,
- bool alloc_interrupts)
+int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev,
+ bool alloc_interrupts)
{
int num_child_objects;
int dprc_get_obj_failures;
@@ -458,8 +458,9 @@ out:
/*
* Disable and clear interrupt for a given DPRC object
*/
-static int disable_dprc_irq(struct fsl_mc_device *mc_dev)
+int disable_dprc_irq(struct fsl_mc_device *mc_dev)
{
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
int error;
struct fsl_mc_io *mc_io = mc_dev->mc_io;
@@ -496,9 +497,18 @@ static int disable_dprc_irq(struct fsl_mc_device *mc_dev)
return error;
}
+ mc_bus->irq_enabled = 0;
+
return 0;
}
+int get_dprc_irq_state(struct fsl_mc_device *mc_dev)
+{
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
+
+ return mc_bus->irq_enabled;
+}
+
static int register_dprc_irq_handler(struct fsl_mc_device *mc_dev)
{
int error;
@@ -525,8 +535,9 @@ static int register_dprc_irq_handler(struct fsl_mc_device *mc_dev)
return 0;
}
-static int enable_dprc_irq(struct fsl_mc_device *mc_dev)
+int enable_dprc_irq(struct fsl_mc_device *mc_dev)
{
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
int error;
/*
@@ -554,6 +565,8 @@ static int enable_dprc_irq(struct fsl_mc_device *mc_dev)
return error;
}
+ mc_bus->irq_enabled = 1;
+
return 0;
}
@@ -603,6 +616,7 @@ int dprc_setup(struct fsl_mc_device *mc_dev)
struct irq_domain *mc_msi_domain;
bool mc_io_created = false;
bool msi_domain_set = false;
+ bool uapi_created = false;
u16 major_ver, minor_ver;
size_t region_size;
int error;
@@ -635,6 +649,11 @@ int dprc_setup(struct fsl_mc_device *mc_dev)
return error;
mc_io_created = true;
+ } else {
+ error = fsl_mc_uapi_create_device_file(mc_bus);
+ if (error < 0)
+ return -EPROBE_DEFER;
+ uapi_created = true;
}
mc_msi_domain = fsl_mc_find_msi_domain(&mc_dev->dev);
@@ -692,6 +711,9 @@ error_cleanup_msi_domain:
mc_dev->mc_io = NULL;
}
+ if (uapi_created)
+ fsl_mc_uapi_remove_device_file(mc_bus);
+
return error;
}
EXPORT_SYMBOL_GPL(dprc_setup);
@@ -763,6 +785,7 @@ static void dprc_teardown_irq(struct fsl_mc_device *mc_dev)
int dprc_cleanup(struct fsl_mc_device *mc_dev)
{
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
int error;
/* this function should be called only for DPRCs, it
@@ -793,6 +816,8 @@ int dprc_cleanup(struct fsl_mc_device *mc_dev)
if (!fsl_mc_is_root_dprc(&mc_dev->dev)) {
fsl_destroy_mc_io(mc_dev->mc_io);
mc_dev->mc_io = NULL;
+ } else {
+ fsl_mc_uapi_remove_device_file(mc_bus);
}
return 0;
diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
index b8e6acdf932e..380ad1fdb745 100644
--- a/drivers/bus/fsl-mc/fsl-mc-bus.c
+++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
@@ -41,7 +41,7 @@ struct fsl_mc {
struct fsl_mc_device *root_mc_bus_dev;
u8 num_translation_ranges;
struct fsl_mc_addr_translation_range *translation_ranges;
- void *fsl_mc_regs;
+ void __iomem *fsl_mc_regs;
};
/**
@@ -208,12 +208,108 @@ static struct attribute *fsl_mc_dev_attrs[] = {
ATTRIBUTE_GROUPS(fsl_mc_dev);
+static int scan_fsl_mc_bus(struct device *dev, void *data)
+{
+ struct fsl_mc_device *root_mc_dev;
+ struct fsl_mc_bus *root_mc_bus;
+
+ if (!fsl_mc_is_root_dprc(dev))
+ goto exit;
+
+ root_mc_dev = to_fsl_mc_device(dev);
+ root_mc_bus = to_fsl_mc_bus(root_mc_dev);
+ mutex_lock(&root_mc_bus->scan_mutex);
+ dprc_scan_objects(root_mc_dev, NULL);
+ mutex_unlock(&root_mc_bus->scan_mutex);
+
+exit:
+ return 0;
+}
+
+static ssize_t rescan_store(struct bus_type *bus,
+ const char *buf, size_t count)
+{
+ unsigned long val;
+
+ if (kstrtoul(buf, 0, &val) < 0)
+ return -EINVAL;
+
+ if (val)
+ bus_for_each_dev(bus, NULL, NULL, scan_fsl_mc_bus);
+
+ return count;
+}
+static BUS_ATTR_WO(rescan);
+
+static int fsl_mc_bus_set_autorescan(struct device *dev, void *data)
+{
+ struct fsl_mc_device *root_mc_dev;
+ unsigned long val;
+ char *buf = data;
+
+ if (!fsl_mc_is_root_dprc(dev))
+ goto exit;
+
+ root_mc_dev = to_fsl_mc_device(dev);
+
+ if (kstrtoul(buf, 0, &val) < 0)
+ return -EINVAL;
+
+ if (val)
+ enable_dprc_irq(root_mc_dev);
+ else
+ disable_dprc_irq(root_mc_dev);
+
+exit:
+ return 0;
+}
+
+static int fsl_mc_bus_get_autorescan(struct device *dev, void *data)
+{
+ struct fsl_mc_device *root_mc_dev;
+ char *buf = data;
+
+ if (!fsl_mc_is_root_dprc(dev))
+ goto exit;
+
+ root_mc_dev = to_fsl_mc_device(dev);
+
+ sprintf(buf, "%d\n", get_dprc_irq_state(root_mc_dev));
+exit:
+ return 0;
+}
+
+static ssize_t autorescan_store(struct bus_type *bus,
+ const char *buf, size_t count)
+{
+ bus_for_each_dev(bus, NULL, (void *)buf, fsl_mc_bus_set_autorescan);
+
+ return count;
+}
+
+static ssize_t autorescan_show(struct bus_type *bus, char *buf)
+{
+ bus_for_each_dev(bus, NULL, (void *)buf, fsl_mc_bus_get_autorescan);
+ return strlen(buf);
+}
+
+static BUS_ATTR_RW(autorescan);
+
+static struct attribute *fsl_mc_bus_attrs[] = {
+ &bus_attr_rescan.attr,
+ &bus_attr_autorescan.attr,
+ NULL,
+};
+
+ATTRIBUTE_GROUPS(fsl_mc_bus);
+
struct bus_type fsl_mc_bus_type = {
.name = "fsl-mc",
.match = fsl_mc_bus_match,
.uevent = fsl_mc_bus_uevent,
.dma_configure = fsl_mc_dma_configure,
.dev_groups = fsl_mc_dev_groups,
+ .bus_groups = fsl_mc_bus_groups,
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_type);
@@ -292,6 +388,11 @@ struct device_type fsl_mc_bus_dpdmai_type = {
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpdmai_type);
+struct device_type fsl_mc_bus_dpdbg_type = {
+ .name = "fsl_mc_bus_dpdbg"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpdbg_type);
+
static struct device_type *fsl_mc_get_device_type(const char *type)
{
static const struct {
@@ -313,6 +414,7 @@ static struct device_type *fsl_mc_get_device_type(const char *type)
{ &fsl_mc_bus_dpaiop_type, "dpaiop" },
{ &fsl_mc_bus_dpci_type, "dpci" },
{ &fsl_mc_bus_dpdmai_type, "dpdmai" },
+ { &fsl_mc_bus_dpdbg_type, "dpdbg" },
{ NULL, NULL }
};
int i;
@@ -840,6 +942,15 @@ struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev)
endpoint_desc.id = endpoint2.id;
endpoint = fsl_mc_device_lookup(&endpoint_desc, mc_bus_dev);
+ /*
+ * We know that the device has an endpoint because we verified by
+ * interrogating the firmware. This is the case when the device was not
+ * yet discovered by the fsl-mc bus, thus the lookup returned NULL.
+ * Differentiate this case by returning EPROBE_DEFER.
+ */
+ if (!endpoint)
+ return ERR_PTR(-EPROBE_DEFER);
+
return endpoint;
}
EXPORT_SYMBOL_GPL(fsl_mc_get_endpoint);
diff --git a/drivers/bus/fsl-mc/fsl-mc-private.h b/drivers/bus/fsl-mc/fsl-mc-private.h
index c932387641fa..1958fa065360 100644
--- a/drivers/bus/fsl-mc/fsl-mc-private.h
+++ b/drivers/bus/fsl-mc/fsl-mc-private.h
@@ -10,6 +10,8 @@
#include <linux/fsl/mc.h>
#include <linux/mutex.h>
+#include <linux/ioctl.h>
+#include <linux/miscdevice.h>
/*
* Data Path Management Complex (DPMNG) General API
@@ -543,6 +545,22 @@ struct fsl_mc_resource_pool {
};
/**
+ * struct fsl_mc_uapi - information associated with a device file
+ * @misc: struct miscdevice linked to the root dprc
+ * @device: newly created device in /dev
+ * @mutex: mutex lock to serialize the open/release operations
+ * @local_instance_in_use: local MC I/O instance in use or not
+ * @static_mc_io: pointer to the static MC I/O object
+ */
+struct fsl_mc_uapi {
+ struct miscdevice misc;
+ struct device *device;
+ struct mutex mutex; /* serialize open/release operations */
+ u32 local_instance_in_use;
+ struct fsl_mc_io *static_mc_io;
+};
+
+/**
* struct fsl_mc_bus - logical bus that corresponds to a physical DPRC
* @mc_dev: fsl-mc device for the bus device itself.
* @resource_pools: array of resource pools (one pool per resource type)
@@ -551,6 +569,7 @@ struct fsl_mc_resource_pool {
* @irq_resources: Pointer to array of IRQ objects for the IRQ pool
* @scan_mutex: Serializes bus scanning
* @dprc_attr: DPRC attributes
+ * @uapi_misc: struct that abstracts the interaction with userspace
*/
struct fsl_mc_bus {
struct fsl_mc_device mc_dev;
@@ -558,6 +577,8 @@ struct fsl_mc_bus {
struct fsl_mc_device_irq *irq_resources;
struct mutex scan_mutex; /* serializes bus scanning */
struct dprc_attributes dprc_attr;
+ struct fsl_mc_uapi uapi_misc;
+ int irq_enabled;
};
#define to_fsl_mc_bus(_mc_dev) \
@@ -574,6 +595,9 @@ int __init dprc_driver_init(void);
void dprc_driver_exit(void);
+int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev,
+ bool alloc_interrupts);
+
int __init fsl_mc_allocator_driver_init(void);
void fsl_mc_allocator_driver_exit(void);
@@ -612,4 +636,29 @@ void fsl_mc_get_root_dprc(struct device *dev,
struct fsl_mc_device *fsl_mc_device_lookup(struct fsl_mc_obj_desc *obj_desc,
struct fsl_mc_device *mc_bus_dev);
+u16 mc_cmd_hdr_read_cmdid(struct fsl_mc_command *cmd);
+
+#ifdef CONFIG_FSL_MC_UAPI_SUPPORT
+
+int fsl_mc_uapi_create_device_file(struct fsl_mc_bus *mc_bus);
+
+void fsl_mc_uapi_remove_device_file(struct fsl_mc_bus *mc_bus);
+
+#else
+
+static inline int fsl_mc_uapi_create_device_file(struct fsl_mc_bus *mc_bus)
+{
+ return 0;
+}
+
+static inline void fsl_mc_uapi_remove_device_file(struct fsl_mc_bus *mc_bus)
+{
+}
+
+#endif
+
+int disable_dprc_irq(struct fsl_mc_device *mc_dev);
+int enable_dprc_irq(struct fsl_mc_device *mc_dev);
+int get_dprc_irq_state(struct fsl_mc_device *mc_dev);
+
#endif /* _FSL_MC_PRIVATE_H_ */
diff --git a/drivers/bus/fsl-mc/fsl-mc-uapi.c b/drivers/bus/fsl-mc/fsl-mc-uapi.c
new file mode 100644
index 000000000000..9c4c1395fcdb
--- /dev/null
+++ b/drivers/bus/fsl-mc/fsl-mc-uapi.c
@@ -0,0 +1,597 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Management Complex (MC) userspace support
+ *
+ * Copyright 2021 NXP
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+
+#include "fsl-mc-private.h"
+
+struct uapi_priv_data {
+ struct fsl_mc_uapi *uapi;
+ struct fsl_mc_io *mc_io;
+};
+
+struct fsl_mc_cmd_desc {
+ u16 cmdid_value;
+ u16 cmdid_mask;
+ int size;
+ bool token;
+ int flags;
+};
+
+#define FSL_MC_CHECK_MODULE_ID BIT(0)
+#define FSL_MC_CAP_NET_ADMIN_NEEDED BIT(1)
+
+enum fsl_mc_cmd_index {
+ DPDBG_DUMP = 0,
+ DPDBG_SET,
+ DPRC_GET_CONTAINER_ID,
+ DPRC_CREATE_CONT,
+ DPRC_DESTROY_CONT,
+ DPRC_ASSIGN,
+ DPRC_UNASSIGN,
+ DPRC_GET_OBJ_COUNT,
+ DPRC_GET_OBJ,
+ DPRC_GET_RES_COUNT,
+ DPRC_GET_RES_IDS,
+ DPRC_SET_OBJ_LABEL,
+ DPRC_SET_LOCKED,
+ DPRC_CONNECT,
+ DPRC_DISCONNECT,
+ DPRC_GET_POOL,
+ DPRC_GET_POOL_COUNT,
+ DPRC_GET_CONNECTION,
+ DPCI_GET_LINK_STATE,
+ DPCI_GET_PEER_ATTR,
+ DPAIOP_GET_SL_VERSION,
+ DPAIOP_GET_STATE,
+ DPMNG_GET_VERSION,
+ DPSECI_GET_TX_QUEUE,
+ DPMAC_GET_COUNTER,
+ DPMAC_GET_MAC_ADDR,
+ DPNI_SET_PRIM_MAC,
+ DPNI_GET_PRIM_MAC,
+ DPNI_GET_STATISTICS,
+ DPNI_GET_LINK_STATE,
+ DPNI_GET_MAX_FRAME_LENGTH,
+ DPSW_GET_TAILDROP,
+ DPSW_SET_TAILDROP,
+ DPSW_IF_GET_COUNTER,
+ DPSW_IF_GET_MAX_FRAME_LENGTH,
+ DPDMUX_GET_COUNTER,
+ DPDMUX_IF_GET_MAX_FRAME_LENGTH,
+ GET_ATTR,
+ GET_IRQ_MASK,
+ GET_IRQ_STATUS,
+ CLOSE,
+ OPEN,
+ GET_API_VERSION,
+ DESTROY,
+ CREATE,
+};
+
+static struct fsl_mc_cmd_desc fsl_mc_accepted_cmds[] = {
+ [DPDBG_DUMP] = {
+ .cmdid_value = 0x1300,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 28,
+ },
+ [DPDBG_SET] = {
+ .cmdid_value = 0x1400,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 28,
+ },
+ [DPRC_GET_CONTAINER_ID] = {
+ .cmdid_value = 0x8300,
+ .cmdid_mask = 0xFFF0,
+ .token = false,
+ .size = 8,
+ },
+ [DPRC_CREATE_CONT] = {
+ .cmdid_value = 0x1510,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 40,
+ .flags = FSL_MC_CAP_NET_ADMIN_NEEDED,
+ },
+ [DPRC_DESTROY_CONT] = {
+ .cmdid_value = 0x1520,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 12,
+ .flags = FSL_MC_CAP_NET_ADMIN_NEEDED,
+ },
+ [DPRC_ASSIGN] = {
+ .cmdid_value = 0x1570,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 40,
+ .flags = FSL_MC_CAP_NET_ADMIN_NEEDED,
+ },
+ [DPRC_UNASSIGN] = {
+ .cmdid_value = 0x1580,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 40,
+ .flags = FSL_MC_CAP_NET_ADMIN_NEEDED,
+ },
+ [DPRC_GET_OBJ_COUNT] = {
+ .cmdid_value = 0x1590,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 16,
+ },
+ [DPRC_GET_OBJ] = {
+ .cmdid_value = 0x15A0,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 12,
+ },
+ [DPRC_GET_RES_COUNT] = {
+ .cmdid_value = 0x15B0,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 32,
+ },
+ [DPRC_GET_RES_IDS] = {
+ .cmdid_value = 0x15C0,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 40,
+ },
+ [DPRC_SET_OBJ_LABEL] = {
+ .cmdid_value = 0x1610,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 48,
+ .flags = FSL_MC_CAP_NET_ADMIN_NEEDED,
+ },
+ [DPRC_SET_LOCKED] = {
+ .cmdid_value = 0x16B0,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 16,
+ .flags = FSL_MC_CAP_NET_ADMIN_NEEDED,
+ },
+ [DPRC_CONNECT] = {
+ .cmdid_value = 0x1670,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 56,
+ .flags = FSL_MC_CAP_NET_ADMIN_NEEDED,
+ },
+ [DPRC_DISCONNECT] = {
+ .cmdid_value = 0x1680,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 32,
+ .flags = FSL_MC_CAP_NET_ADMIN_NEEDED,
+ },
+ [DPRC_GET_POOL] = {
+ .cmdid_value = 0x1690,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 12,
+ },
+ [DPRC_GET_POOL_COUNT] = {
+ .cmdid_value = 0x16A0,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 8,
+ },
+ [DPRC_GET_CONNECTION] = {
+ .cmdid_value = 0x16C0,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 32,
+ },
+
+ [DPCI_GET_LINK_STATE] = {
+ .cmdid_value = 0x0E10,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 8,
+ },
+ [DPCI_GET_PEER_ATTR] = {
+ .cmdid_value = 0x0E20,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 8,
+ },
+ [DPAIOP_GET_SL_VERSION] = {
+ .cmdid_value = 0x2820,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 8,
+ },
+ [DPAIOP_GET_STATE] = {
+ .cmdid_value = 0x2830,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 8,
+ },
+ [DPMNG_GET_VERSION] = {
+ .cmdid_value = 0x8310,
+ .cmdid_mask = 0xFFF0,
+ .token = false,
+ .size = 8,
+ },
+ [DPSECI_GET_TX_QUEUE] = {
+ .cmdid_value = 0x1970,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 14,
+ },
+ [DPMAC_GET_COUNTER] = {
+ .cmdid_value = 0x0c40,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 9,
+ },
+ [DPMAC_GET_MAC_ADDR] = {
+ .cmdid_value = 0x0c50,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 8,
+ },
+ [DPNI_SET_PRIM_MAC] = {
+ .cmdid_value = 0x2240,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 16,
+ .flags = FSL_MC_CAP_NET_ADMIN_NEEDED,
+ },
+ [DPNI_GET_PRIM_MAC] = {
+ .cmdid_value = 0x2250,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 8,
+ },
+ [DPNI_GET_STATISTICS] = {
+ .cmdid_value = 0x25D0,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 10,
+ },
+ [DPNI_GET_LINK_STATE] = {
+ .cmdid_value = 0x2150,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 8,
+ },
+ [DPNI_GET_MAX_FRAME_LENGTH] = {
+ .cmdid_value = 0x2170,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 8,
+ },
+ [DPSW_GET_TAILDROP] = {
+ .cmdid_value = 0x0A80,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 14,
+ },
+ [DPSW_SET_TAILDROP] = {
+ .cmdid_value = 0x0A90,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 24,
+ .flags = FSL_MC_CAP_NET_ADMIN_NEEDED,
+ },
+ [DPSW_IF_GET_COUNTER] = {
+ .cmdid_value = 0x0340,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 11,
+ },
+ [DPSW_IF_GET_MAX_FRAME_LENGTH] = {
+ .cmdid_value = 0x0450,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 10,
+ },
+ [DPDMUX_GET_COUNTER] = {
+ .cmdid_value = 0x0b20,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 11,
+ },
+ [DPDMUX_IF_GET_MAX_FRAME_LENGTH] = {
+ .cmdid_value = 0x0a20,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 10,
+ },
+ [GET_ATTR] = {
+ .cmdid_value = 0x0040,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 8,
+ },
+ [GET_IRQ_MASK] = {
+ .cmdid_value = 0x0150,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 13,
+ },
+ [GET_IRQ_STATUS] = {
+ .cmdid_value = 0x0160,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 13,
+ },
+ [CLOSE] = {
+ .cmdid_value = 0x8000,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 8,
+ },
+
+ /* Common commands amongst all types of objects. Must be checked last. */
+ [OPEN] = {
+ .cmdid_value = 0x8000,
+ .cmdid_mask = 0xFC00,
+ .token = false,
+ .size = 12,
+ .flags = FSL_MC_CHECK_MODULE_ID,
+ },
+ [GET_API_VERSION] = {
+ .cmdid_value = 0xA000,
+ .cmdid_mask = 0xFC00,
+ .token = false,
+ .size = 8,
+ .flags = FSL_MC_CHECK_MODULE_ID,
+ },
+ [DESTROY] = {
+ .cmdid_value = 0x9800,
+ .cmdid_mask = 0xFC00,
+ .token = true,
+ .size = 12,
+ .flags = FSL_MC_CHECK_MODULE_ID | FSL_MC_CAP_NET_ADMIN_NEEDED,
+ },
+ [CREATE] = {
+ .cmdid_value = 0x9000,
+ .cmdid_mask = 0xFC00,
+ .token = true,
+ .size = 64,
+ .flags = FSL_MC_CHECK_MODULE_ID | FSL_MC_CAP_NET_ADMIN_NEEDED,
+ },
+};
+
+#define FSL_MC_NUM_ACCEPTED_CMDS ARRAY_SIZE(fsl_mc_accepted_cmds)
+
+#define FSL_MC_MAX_MODULE_ID 0x10
+
+static int fsl_mc_command_check(struct fsl_mc_device *mc_dev,
+ struct fsl_mc_command *mc_cmd)
+{
+ struct fsl_mc_cmd_desc *desc = NULL;
+ int mc_cmd_max_size, i;
+ bool token_provided;
+ u16 cmdid, module_id;
+ char *mc_cmd_end;
+ char sum = 0;
+
+ /* Check if this is an accepted MC command */
+ cmdid = mc_cmd_hdr_read_cmdid(mc_cmd);
+ for (i = 0; i < FSL_MC_NUM_ACCEPTED_CMDS; i++) {
+ desc = &fsl_mc_accepted_cmds[i];
+ if ((cmdid & desc->cmdid_mask) == desc->cmdid_value)
+ break;
+ }
+ if (i == FSL_MC_NUM_ACCEPTED_CMDS) {
+ dev_err(&mc_dev->dev, "MC command 0x%04x: cmdid not accepted\n", cmdid);
+ return -EACCES;
+ }
+
+ /* Check if the size of the command is honored. Anything beyond the
+ * last valid byte of the command should be zeroed.
+ */
+ mc_cmd_max_size = sizeof(*mc_cmd);
+ mc_cmd_end = ((char *)mc_cmd) + desc->size;
+ for (i = desc->size; i < mc_cmd_max_size; i++)
+ sum |= *mc_cmd_end++;
+ if (sum) {
+ dev_err(&mc_dev->dev, "MC command 0x%04x: garbage beyond max size of %d bytes!\n",
+ cmdid, desc->size);
+ return -EACCES;
+ }
+
+ /* Some MC commands request a token to be passed so that object
+ * identification is possible. Check if the token passed in the command
+ * is as expected.
+ */
+ token_provided = mc_cmd_hdr_read_token(mc_cmd) ? true : false;
+ if (token_provided != desc->token) {
+ dev_err(&mc_dev->dev, "MC command 0x%04x: token 0x%04x is invalid!\n",
+ cmdid, mc_cmd_hdr_read_token(mc_cmd));
+ return -EACCES;
+ }
+
+ /* If needed, check if the module ID passed is valid */
+ if (desc->flags & FSL_MC_CHECK_MODULE_ID) {
+ /* The module ID is represented by bits [4:9] from the cmdid */
+ module_id = (cmdid & GENMASK(9, 4)) >> 4;
+ if (module_id == 0 || module_id > FSL_MC_MAX_MODULE_ID) {
+ dev_err(&mc_dev->dev, "MC command 0x%04x: unknown module ID 0x%x\n",
+ cmdid, module_id);
+ return -EACCES;
+ }
+ }
+
+ /* Some commands alter how hardware resources are managed. For these
+ * commands, check for CAP_NET_ADMIN.
+ */
+ if (desc->flags & FSL_MC_CAP_NET_ADMIN_NEEDED) {
+ if (!capable(CAP_NET_ADMIN)) {
+ dev_err(&mc_dev->dev, "MC command 0x%04x: needs CAP_NET_ADMIN!\n",
+ cmdid);
+ return -EPERM;
+ }
+ }
+
+ return 0;
+}
+
+static int fsl_mc_uapi_send_command(struct fsl_mc_device *mc_dev, unsigned long arg,
+ struct fsl_mc_io *mc_io)
+{
+ struct fsl_mc_command mc_cmd;
+ int error;
+
+ error = copy_from_user(&mc_cmd, (void __user *)arg, sizeof(mc_cmd));
+ if (error)
+ return -EFAULT;
+
+ error = fsl_mc_command_check(mc_dev, &mc_cmd);
+ if (error)
+ return error;
+
+ error = mc_send_command(mc_io, &mc_cmd);
+ if (error)
+ return error;
+
+ error = copy_to_user((void __user *)arg, &mc_cmd, sizeof(mc_cmd));
+ if (error)
+ return -EFAULT;
+
+ return 0;
+}
+
+static int fsl_mc_uapi_dev_open(struct inode *inode, struct file *filep)
+{
+ struct fsl_mc_device *root_mc_device;
+ struct uapi_priv_data *priv_data;
+ struct fsl_mc_io *dynamic_mc_io;
+ struct fsl_mc_uapi *mc_uapi;
+ struct fsl_mc_bus *mc_bus;
+ int error;
+
+ priv_data = kzalloc(sizeof(*priv_data), GFP_KERNEL);
+ if (!priv_data)
+ return -ENOMEM;
+
+ mc_uapi = container_of(filep->private_data, struct fsl_mc_uapi, misc);
+ mc_bus = container_of(mc_uapi, struct fsl_mc_bus, uapi_misc);
+ root_mc_device = &mc_bus->mc_dev;
+
+ mutex_lock(&mc_uapi->mutex);
+
+ if (!mc_uapi->local_instance_in_use) {
+ priv_data->mc_io = mc_uapi->static_mc_io;
+ mc_uapi->local_instance_in_use = 1;
+ } else {
+ error = fsl_mc_portal_allocate(root_mc_device, 0,
+ &dynamic_mc_io);
+ if (error) {
+ dev_dbg(&root_mc_device->dev,
+ "Could not allocate MC portal\n");
+ goto error_portal_allocate;
+ }
+
+ priv_data->mc_io = dynamic_mc_io;
+ }
+ priv_data->uapi = mc_uapi;
+ filep->private_data = priv_data;
+
+ mutex_unlock(&mc_uapi->mutex);
+
+ return 0;
+
+error_portal_allocate:
+ mutex_unlock(&mc_uapi->mutex);
+ kfree(priv_data);
+
+ return error;
+}
+
+static int fsl_mc_uapi_dev_release(struct inode *inode, struct file *filep)
+{
+ struct uapi_priv_data *priv_data;
+ struct fsl_mc_uapi *mc_uapi;
+ struct fsl_mc_io *mc_io;
+
+ priv_data = filep->private_data;
+ mc_uapi = priv_data->uapi;
+ mc_io = priv_data->mc_io;
+
+ mutex_lock(&mc_uapi->mutex);
+
+ if (mc_io == mc_uapi->static_mc_io)
+ mc_uapi->local_instance_in_use = 0;
+ else
+ fsl_mc_portal_free(mc_io);
+
+ kfree(filep->private_data);
+ filep->private_data = NULL;
+
+ mutex_unlock(&mc_uapi->mutex);
+
+ return 0;
+}
+
+static long fsl_mc_uapi_dev_ioctl(struct file *file,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ struct uapi_priv_data *priv_data = file->private_data;
+ struct fsl_mc_device *root_mc_device;
+ struct fsl_mc_bus *mc_bus;
+ int error;
+
+ mc_bus = container_of(priv_data->uapi, struct fsl_mc_bus, uapi_misc);
+ root_mc_device = &mc_bus->mc_dev;
+
+ switch (cmd) {
+ case FSL_MC_SEND_MC_COMMAND:
+ error = fsl_mc_uapi_send_command(root_mc_device, arg, priv_data->mc_io);
+ break;
+ default:
+ dev_dbg(&root_mc_device->dev, "unexpected ioctl call number\n");
+ error = -EINVAL;
+ }
+
+ return error;
+}
+
+static const struct file_operations fsl_mc_uapi_dev_fops = {
+ .owner = THIS_MODULE,
+ .open = fsl_mc_uapi_dev_open,
+ .release = fsl_mc_uapi_dev_release,
+ .unlocked_ioctl = fsl_mc_uapi_dev_ioctl,
+};
+
+int fsl_mc_uapi_create_device_file(struct fsl_mc_bus *mc_bus)
+{
+ struct fsl_mc_device *mc_dev = &mc_bus->mc_dev;
+ struct fsl_mc_uapi *mc_uapi = &mc_bus->uapi_misc;
+ int error;
+
+ mc_uapi->misc.minor = MISC_DYNAMIC_MINOR;
+ mc_uapi->misc.name = dev_name(&mc_dev->dev);
+ mc_uapi->misc.fops = &fsl_mc_uapi_dev_fops;
+
+ error = misc_register(&mc_uapi->misc);
+ if (error)
+ return error;
+
+ mc_uapi->static_mc_io = mc_bus->mc_dev.mc_io;
+
+ mutex_init(&mc_uapi->mutex);
+
+ return 0;
+}
+
+void fsl_mc_uapi_remove_device_file(struct fsl_mc_bus *mc_bus)
+{
+ misc_deregister(&mc_bus->uapi_misc.misc);
+}
diff --git a/drivers/bus/fsl-mc/mc-sys.c b/drivers/bus/fsl-mc/mc-sys.c
index 85a0225db522..b291b35e3884 100644
--- a/drivers/bus/fsl-mc/mc-sys.c
+++ b/drivers/bus/fsl-mc/mc-sys.c
@@ -35,7 +35,7 @@ static enum mc_cmd_status mc_cmd_hdr_read_status(struct fsl_mc_command *cmd)
return (enum mc_cmd_status)hdr->status;
}
-static u16 mc_cmd_hdr_read_cmdid(struct fsl_mc_command *cmd)
+u16 mc_cmd_hdr_read_cmdid(struct fsl_mc_command *cmd)
{
struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header;
u16 cmd_id = le16_to_cpu(hdr->cmd_id);
diff --git a/drivers/bus/mhi/core/init.c b/drivers/bus/mhi/core/init.c
index f0697f433c2f..be4eebb0971b 100644
--- a/drivers/bus/mhi/core/init.c
+++ b/drivers/bus/mhi/core/init.c
@@ -151,12 +151,17 @@ int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
{
struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ unsigned long irq_flags = IRQF_SHARED | IRQF_NO_SUSPEND;
int i, ret;
+ /* if controller driver has set irq_flags, use it */
+ if (mhi_cntrl->irq_flags)
+ irq_flags = mhi_cntrl->irq_flags;
+
/* Setup BHI_INTVEC IRQ */
ret = request_threaded_irq(mhi_cntrl->irq[0], mhi_intvec_handler,
mhi_intvec_threaded_handler,
- IRQF_SHARED | IRQF_NO_SUSPEND,
+ irq_flags,
"bhi", mhi_cntrl);
if (ret)
return ret;
@@ -174,7 +179,7 @@ int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
ret = request_irq(mhi_cntrl->irq[mhi_event->irq],
mhi_irq_handler,
- IRQF_SHARED | IRQF_NO_SUSPEND,
+ irq_flags,
"mhi", mhi_event);
if (ret) {
dev_err(dev, "Error requesting irq:%d for ev:%d\n",
@@ -552,6 +557,9 @@ void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
tre_ring = &mhi_chan->tre_ring;
chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan];
+ if (!chan_ctxt->rbase) /* Already uninitialized */
+ return;
+
mhi_free_coherent(mhi_cntrl, tre_ring->alloc_size,
tre_ring->pre_aligned, tre_ring->dma_handle);
vfree(buf_ring->base);
diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c
index d34d7e90e38d..4e0131b94056 100644
--- a/drivers/bus/mhi/core/main.c
+++ b/drivers/bus/mhi/core/main.c
@@ -111,7 +111,14 @@ void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl,
dma_addr_t db;
db = ring->iommu_base + (ring->wp - ring->base);
+
+ /*
+ * Writes to the new ring element must be visible to the hardware
+ * before letting h/w know there is new element to fetch.
+ */
+ dma_wmb();
*ring->ctxt_wp = db;
+
mhi_chan->db_cfg.process_db(mhi_cntrl, &mhi_chan->db_cfg,
ring->db_addr, db);
}
@@ -135,6 +142,19 @@ enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl)
}
EXPORT_SYMBOL_GPL(mhi_get_mhi_state);
+void mhi_soc_reset(struct mhi_controller *mhi_cntrl)
+{
+ if (mhi_cntrl->reset) {
+ mhi_cntrl->reset(mhi_cntrl);
+ return;
+ }
+
+ /* Generic MHI SoC reset */
+ mhi_write_reg(mhi_cntrl, mhi_cntrl->regs, MHI_SOC_RESET_REQ_OFFSET,
+ MHI_SOC_RESET_REQ);
+}
+EXPORT_SYMBOL_GPL(mhi_soc_reset);
+
int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl,
struct mhi_buf_info *buf_info)
{
@@ -260,6 +280,18 @@ int mhi_destroy_device(struct device *dev, void *data)
return 0;
}
+int mhi_get_free_desc_count(struct mhi_device *mhi_dev,
+ enum dma_data_direction dir)
+{
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ?
+ mhi_dev->ul_chan : mhi_dev->dl_chan;
+ struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
+
+ return get_nr_avail_ring_elements(mhi_cntrl, tre_ring);
+}
+EXPORT_SYMBOL_GPL(mhi_get_free_desc_count);
+
void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason)
{
struct mhi_driver *mhi_drv;
@@ -947,118 +979,88 @@ static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl,
return (tmp == ring->rp);
}
-int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
- struct sk_buff *skb, size_t len, enum mhi_flags mflags)
+static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
+ enum dma_data_direction dir, enum mhi_flags mflags)
{
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
mhi_dev->dl_chan;
struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
- struct mhi_buf_info buf_info = { };
+ unsigned long flags;
int ret;
- /* If MHI host pre-allocates buffers then client drivers cannot queue */
- if (mhi_chan->pre_alloc)
- return -EINVAL;
+ if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
+ return -EIO;
- if (mhi_is_ring_full(mhi_cntrl, tre_ring))
- return -ENOMEM;
+ read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
- read_lock_bh(&mhi_cntrl->pm_lock);
- if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {
- read_unlock_bh(&mhi_cntrl->pm_lock);
- return -EIO;
+ ret = mhi_is_ring_full(mhi_cntrl, tre_ring);
+ if (unlikely(ret)) {
+ ret = -ENOMEM;
+ goto exit_unlock;
}
- /* we're in M3 or transitioning to M3 */
+ ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf_info, mflags);
+ if (unlikely(ret))
+ goto exit_unlock;
+
+ /* trigger M3 exit if necessary */
if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
mhi_trigger_resume(mhi_cntrl);
- /* Toggle wake to exit out of M2 */
+ /* Assert dev_wake (to exit/prevent M1/M2)*/
mhi_cntrl->wake_toggle(mhi_cntrl);
- buf_info.v_addr = skb->data;
- buf_info.cb_buf = skb;
- buf_info.len = len;
-
- ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
- if (unlikely(ret)) {
- read_unlock_bh(&mhi_cntrl->pm_lock);
- return ret;
- }
-
if (mhi_chan->dir == DMA_TO_DEVICE)
atomic_inc(&mhi_cntrl->pending_pkts);
- if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
- read_lock_bh(&mhi_chan->lock);
- mhi_ring_chan_db(mhi_cntrl, mhi_chan);
- read_unlock_bh(&mhi_chan->lock);
+ if (unlikely(!MHI_DB_ACCESS_VALID(mhi_cntrl))) {
+ ret = -EIO;
+ goto exit_unlock;
}
- read_unlock_bh(&mhi_cntrl->pm_lock);
+ mhi_ring_chan_db(mhi_cntrl, mhi_chan);
- return 0;
+exit_unlock:
+ read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
+
+ return ret;
}
-EXPORT_SYMBOL_GPL(mhi_queue_skb);
-int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
- struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags)
+int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
+ struct sk_buff *skb, size_t len, enum mhi_flags mflags)
{
- struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
mhi_dev->dl_chan;
- struct device *dev = &mhi_cntrl->mhi_dev->dev;
- struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
struct mhi_buf_info buf_info = { };
- int ret;
-
- /* If MHI host pre-allocates buffers then client drivers cannot queue */
- if (mhi_chan->pre_alloc)
- return -EINVAL;
-
- if (mhi_is_ring_full(mhi_cntrl, tre_ring))
- return -ENOMEM;
- read_lock_bh(&mhi_cntrl->pm_lock);
- if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {
- dev_err(dev, "MHI is not in activate state, PM state: %s\n",
- to_mhi_pm_state_str(mhi_cntrl->pm_state));
- read_unlock_bh(&mhi_cntrl->pm_lock);
+ buf_info.v_addr = skb->data;
+ buf_info.cb_buf = skb;
+ buf_info.len = len;
- return -EIO;
- }
+ if (unlikely(mhi_chan->pre_alloc))
+ return -EINVAL;
- /* we're in M3 or transitioning to M3 */
- if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
- mhi_trigger_resume(mhi_cntrl);
+ return mhi_queue(mhi_dev, &buf_info, dir, mflags);
+}
+EXPORT_SYMBOL_GPL(mhi_queue_skb);
- /* Toggle wake to exit out of M2 */
- mhi_cntrl->wake_toggle(mhi_cntrl);
+int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
+ struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags)
+{
+ struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
+ mhi_dev->dl_chan;
+ struct mhi_buf_info buf_info = { };
buf_info.p_addr = mhi_buf->dma_addr;
buf_info.cb_buf = mhi_buf;
buf_info.pre_mapped = true;
buf_info.len = len;
- ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
- if (unlikely(ret)) {
- read_unlock_bh(&mhi_cntrl->pm_lock);
- return ret;
- }
-
- if (mhi_chan->dir == DMA_TO_DEVICE)
- atomic_inc(&mhi_cntrl->pending_pkts);
-
- if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
- read_lock_bh(&mhi_chan->lock);
- mhi_ring_chan_db(mhi_cntrl, mhi_chan);
- read_unlock_bh(&mhi_chan->lock);
- }
-
- read_unlock_bh(&mhi_cntrl->pm_lock);
+ if (unlikely(mhi_chan->pre_alloc))
+ return -EINVAL;
- return 0;
+ return mhi_queue(mhi_dev, &buf_info, dir, mflags);
}
EXPORT_SYMBOL_GPL(mhi_queue_dma);
@@ -1112,57 +1114,13 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
void *buf, size_t len, enum mhi_flags mflags)
{
- struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
- struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
- mhi_dev->dl_chan;
- struct mhi_ring *tre_ring;
struct mhi_buf_info buf_info = { };
- unsigned long flags;
- int ret;
-
- /*
- * this check here only as a guard, it's always
- * possible mhi can enter error while executing rest of function,
- * which is not fatal so we do not need to hold pm_lock
- */
- if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
- return -EIO;
-
- tre_ring = &mhi_chan->tre_ring;
- if (mhi_is_ring_full(mhi_cntrl, tre_ring))
- return -ENOMEM;
buf_info.v_addr = buf;
buf_info.cb_buf = buf;
buf_info.len = len;
- ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
- if (unlikely(ret))
- return ret;
-
- read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
-
- /* we're in M3 or transitioning to M3 */
- if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
- mhi_trigger_resume(mhi_cntrl);
-
- /* Toggle wake to exit out of M2 */
- mhi_cntrl->wake_toggle(mhi_cntrl);
-
- if (mhi_chan->dir == DMA_TO_DEVICE)
- atomic_inc(&mhi_cntrl->pending_pkts);
-
- if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
- unsigned long flags;
-
- read_lock_irqsave(&mhi_chan->lock, flags);
- mhi_ring_chan_db(mhi_cntrl, mhi_chan);
- read_unlock_irqrestore(&mhi_chan->lock, flags);
- }
-
- read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
-
- return 0;
+ return mhi_queue(mhi_dev, &buf_info, dir, mflags);
}
EXPORT_SYMBOL_GPL(mhi_queue_buf);
diff --git a/drivers/bus/mhi/pci_generic.c b/drivers/bus/mhi/pci_generic.c
index f5bee76ea061..20673a4b4a3c 100644
--- a/drivers/bus/mhi/pci_generic.c
+++ b/drivers/bus/mhi/pci_generic.c
@@ -8,13 +8,21 @@
* Copyright (C) 2020 Linaro Ltd <loic.poulain@linaro.org>
*/
+#include <linux/aer.h>
+#include <linux/delay.h>
#include <linux/device.h>
#include <linux/mhi.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
#define MHI_PCI_DEFAULT_BAR_NUM 0
+#define MHI_POST_RESET_DELAY_MS 500
+
+#define HEALTH_CHECK_PERIOD (HZ * 2)
+
/**
* struct mhi_pci_dev_info - MHI PCI device specific information
* @config: MHI controller configuration
@@ -76,6 +84,36 @@ struct mhi_pci_dev_info {
.offload_channel = false, \
}
+#define MHI_CHANNEL_CONFIG_HW_UL(ch_num, ch_name, el_count, ev_ring) \
+ { \
+ .num = ch_num, \
+ .name = ch_name, \
+ .num_elements = el_count, \
+ .event_ring = ev_ring, \
+ .dir = DMA_TO_DEVICE, \
+ .ee_mask = BIT(MHI_EE_AMSS), \
+ .pollcfg = 0, \
+ .doorbell = MHI_DB_BRST_ENABLE, \
+ .lpm_notify = false, \
+ .offload_channel = false, \
+ .doorbell_mode_switch = true, \
+ } \
+
+#define MHI_CHANNEL_CONFIG_HW_DL(ch_num, ch_name, el_count, ev_ring) \
+ { \
+ .num = ch_num, \
+ .name = ch_name, \
+ .num_elements = el_count, \
+ .event_ring = ev_ring, \
+ .dir = DMA_FROM_DEVICE, \
+ .ee_mask = BIT(MHI_EE_AMSS), \
+ .pollcfg = 0, \
+ .doorbell = MHI_DB_BRST_ENABLE, \
+ .lpm_notify = false, \
+ .offload_channel = false, \
+ .doorbell_mode_switch = true, \
+ }
+
#define MHI_EVENT_CONFIG_DATA(ev_ring) \
{ \
.num_elements = 128, \
@@ -91,8 +129,8 @@ struct mhi_pci_dev_info {
#define MHI_EVENT_CONFIG_HW_DATA(ev_ring, ch_num) \
{ \
- .num_elements = 128, \
- .irq_moderation_ms = 5, \
+ .num_elements = 2048, \
+ .irq_moderation_ms = 1, \
.irq = (ev_ring) + 1, \
.priority = 1, \
.mode = MHI_DB_BRST_DISABLE, \
@@ -104,27 +142,31 @@ struct mhi_pci_dev_info {
}
static const struct mhi_channel_config modem_qcom_v1_mhi_channels[] = {
+ MHI_CHANNEL_CONFIG_UL(4, "DIAG", 16, 1),
+ MHI_CHANNEL_CONFIG_DL(5, "DIAG", 16, 1),
MHI_CHANNEL_CONFIG_UL(12, "MBIM", 4, 0),
MHI_CHANNEL_CONFIG_DL(13, "MBIM", 4, 0),
MHI_CHANNEL_CONFIG_UL(14, "QMI", 4, 0),
MHI_CHANNEL_CONFIG_DL(15, "QMI", 4, 0),
MHI_CHANNEL_CONFIG_UL(20, "IPCR", 8, 0),
MHI_CHANNEL_CONFIG_DL(21, "IPCR", 8, 0),
- MHI_CHANNEL_CONFIG_UL(100, "IP_HW0", 128, 1),
- MHI_CHANNEL_CONFIG_DL(101, "IP_HW0", 128, 2),
+ MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 128, 2),
+ MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 128, 3),
};
-static const struct mhi_event_config modem_qcom_v1_mhi_events[] = {
+static struct mhi_event_config modem_qcom_v1_mhi_events[] = {
/* first ring is control+data ring */
MHI_EVENT_CONFIG_CTRL(0),
+ /* DIAG dedicated event ring */
+ MHI_EVENT_CONFIG_DATA(1),
/* Hardware channels request dedicated hardware event rings */
- MHI_EVENT_CONFIG_HW_DATA(1, 100),
- MHI_EVENT_CONFIG_HW_DATA(2, 101)
+ MHI_EVENT_CONFIG_HW_DATA(2, 100),
+ MHI_EVENT_CONFIG_HW_DATA(3, 101)
};
-static const struct mhi_controller_config modem_qcom_v1_mhiv_config = {
+static struct mhi_controller_config modem_qcom_v1_mhiv_config = {
.max_channels = 128,
- .timeout_ms = 5000,
+ .timeout_ms = 8000,
.num_channels = ARRAY_SIZE(modem_qcom_v1_mhi_channels),
.ch_cfg = modem_qcom_v1_mhi_channels,
.num_events = ARRAY_SIZE(modem_qcom_v1_mhi_events),
@@ -147,6 +189,18 @@ static const struct pci_device_id mhi_pci_id_table[] = {
};
MODULE_DEVICE_TABLE(pci, mhi_pci_id_table);
+enum mhi_pci_device_status {
+ MHI_PCI_DEV_STARTED,
+};
+
+struct mhi_pci_device {
+ struct mhi_controller mhi_cntrl;
+ struct pci_saved_state *pci_state;
+ struct work_struct recovery_work;
+ struct timer_list health_check_timer;
+ unsigned long status;
+};
+
static int mhi_pci_read_reg(struct mhi_controller *mhi_cntrl,
void __iomem *addr, u32 *out)
{
@@ -163,7 +217,31 @@ static void mhi_pci_write_reg(struct mhi_controller *mhi_cntrl,
static void mhi_pci_status_cb(struct mhi_controller *mhi_cntrl,
enum mhi_callback cb)
{
+ struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
+
/* Nothing to do for now */
+ switch (cb) {
+ case MHI_CB_FATAL_ERROR:
+ case MHI_CB_SYS_ERROR:
+ dev_warn(&pdev->dev, "firmware crashed (%u)\n", cb);
+ break;
+ default:
+ break;
+ }
+}
+
+static bool mhi_pci_is_alive(struct mhi_controller *mhi_cntrl)
+{
+ struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
+ u16 vendor = 0;
+
+ if (pci_read_config_word(pdev, PCI_VENDOR_ID, &vendor))
+ return false;
+
+ if (vendor == (u16) ~0 || vendor == 0)
+ return false;
+
+ return true;
}
static int mhi_pci_claim(struct mhi_controller *mhi_cntrl,
@@ -227,8 +305,12 @@ static int mhi_pci_get_irqs(struct mhi_controller *mhi_cntrl,
}
if (nr_vectors < mhi_cntrl->nr_irqs) {
- dev_warn(&pdev->dev, "Not enough MSI vectors (%d/%d), use shared MSI\n",
- nr_vectors, mhi_cntrl_config->num_events);
+ dev_warn(&pdev->dev, "using shared MSI\n");
+
+ /* Patch msi vectors, use only one (shared) */
+ for (i = 0; i < mhi_cntrl_config->num_events; i++)
+ mhi_cntrl_config->event_cfg[i].irq = 0;
+ mhi_cntrl->nr_irqs = 1;
}
irq = devm_kcalloc(&pdev->dev, mhi_cntrl->nr_irqs, sizeof(int), GFP_KERNEL);
@@ -257,20 +339,89 @@ static void mhi_pci_runtime_put(struct mhi_controller *mhi_cntrl)
/* no PM for now */
}
+static void mhi_pci_recovery_work(struct work_struct *work)
+{
+ struct mhi_pci_device *mhi_pdev = container_of(work, struct mhi_pci_device,
+ recovery_work);
+ struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
+ struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
+ int err;
+
+ dev_warn(&pdev->dev, "device recovery started\n");
+
+ del_timer(&mhi_pdev->health_check_timer);
+
+ /* Clean up MHI state */
+ if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
+ mhi_power_down(mhi_cntrl, false);
+ mhi_unprepare_after_power_down(mhi_cntrl);
+ }
+
+ /* Check if we can recover without full reset */
+ pci_set_power_state(pdev, PCI_D0);
+ pci_load_saved_state(pdev, mhi_pdev->pci_state);
+ pci_restore_state(pdev);
+
+ if (!mhi_pci_is_alive(mhi_cntrl))
+ goto err_try_reset;
+
+ err = mhi_prepare_for_power_up(mhi_cntrl);
+ if (err)
+ goto err_try_reset;
+
+ err = mhi_sync_power_up(mhi_cntrl);
+ if (err)
+ goto err_unprepare;
+
+ dev_dbg(&pdev->dev, "Recovery completed\n");
+
+ set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status);
+ mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
+ return;
+
+err_unprepare:
+ mhi_unprepare_after_power_down(mhi_cntrl);
+err_try_reset:
+ if (pci_reset_function(pdev))
+ dev_err(&pdev->dev, "Recovery failed\n");
+}
+
+static void health_check(struct timer_list *t)
+{
+ struct mhi_pci_device *mhi_pdev = from_timer(mhi_pdev, t, health_check_timer);
+ struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
+
+ if (!mhi_pci_is_alive(mhi_cntrl)) {
+ dev_err(mhi_cntrl->cntrl_dev, "Device died\n");
+ queue_work(system_long_wq, &mhi_pdev->recovery_work);
+ return;
+ }
+
+ /* reschedule in two seconds */
+ mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
+}
+
static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
const struct mhi_pci_dev_info *info = (struct mhi_pci_dev_info *) id->driver_data;
const struct mhi_controller_config *mhi_cntrl_config;
+ struct mhi_pci_device *mhi_pdev;
struct mhi_controller *mhi_cntrl;
int err;
dev_dbg(&pdev->dev, "MHI PCI device found: %s\n", info->name);
- mhi_cntrl = mhi_alloc_controller();
- if (!mhi_cntrl)
+ /* mhi_pdev.mhi_cntrl must be zero-initialized */
+ mhi_pdev = devm_kzalloc(&pdev->dev, sizeof(*mhi_pdev), GFP_KERNEL);
+ if (!mhi_pdev)
return -ENOMEM;
+ INIT_WORK(&mhi_pdev->recovery_work, mhi_pci_recovery_work);
+ timer_setup(&mhi_pdev->health_check_timer, health_check, 0);
+
mhi_cntrl_config = info->config;
+ mhi_cntrl = &mhi_pdev->mhi_cntrl;
+
mhi_cntrl->cntrl_dev = &pdev->dev;
mhi_cntrl->iova_start = 0;
mhi_cntrl->iova_stop = (dma_addr_t)DMA_BIT_MASK(info->dma_data_width);
@@ -285,17 +436,23 @@ static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = mhi_pci_claim(mhi_cntrl, info->bar_num, DMA_BIT_MASK(info->dma_data_width));
if (err)
- goto err_release;
+ return err;
err = mhi_pci_get_irqs(mhi_cntrl, mhi_cntrl_config);
if (err)
- goto err_release;
+ return err;
+
+ pci_set_drvdata(pdev, mhi_pdev);
+
+ /* Have stored pci confspace at hand for restore in sudden PCI error */
+ pci_save_state(pdev);
+ mhi_pdev->pci_state = pci_store_saved_state(pdev);
- pci_set_drvdata(pdev, mhi_cntrl);
+ pci_enable_pcie_error_reporting(pdev);
err = mhi_register_controller(mhi_cntrl, mhi_cntrl_config);
if (err)
- goto err_release;
+ return err;
/* MHI bus does not power up the controller by default */
err = mhi_prepare_for_power_up(mhi_cntrl);
@@ -310,33 +467,209 @@ static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_unprepare;
}
+ set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status);
+
+ /* start health check */
+ mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
+
return 0;
err_unprepare:
mhi_unprepare_after_power_down(mhi_cntrl);
err_unregister:
mhi_unregister_controller(mhi_cntrl);
-err_release:
- mhi_free_controller(mhi_cntrl);
return err;
}
static void mhi_pci_remove(struct pci_dev *pdev)
{
- struct mhi_controller *mhi_cntrl = pci_get_drvdata(pdev);
+ struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
+ struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
+
+ del_timer(&mhi_pdev->health_check_timer);
+ cancel_work_sync(&mhi_pdev->recovery_work);
+
+ if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
+ mhi_power_down(mhi_cntrl, true);
+ mhi_unprepare_after_power_down(mhi_cntrl);
+ }
- mhi_power_down(mhi_cntrl, true);
- mhi_unprepare_after_power_down(mhi_cntrl);
mhi_unregister_controller(mhi_cntrl);
- mhi_free_controller(mhi_cntrl);
}
+static void mhi_pci_reset_prepare(struct pci_dev *pdev)
+{
+ struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
+ struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
+
+ dev_info(&pdev->dev, "reset\n");
+
+ del_timer(&mhi_pdev->health_check_timer);
+
+ /* Clean up MHI state */
+ if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
+ mhi_power_down(mhi_cntrl, false);
+ mhi_unprepare_after_power_down(mhi_cntrl);
+ }
+
+ /* cause internal device reset */
+ mhi_soc_reset(mhi_cntrl);
+
+ /* Be sure device reset has been executed */
+ msleep(MHI_POST_RESET_DELAY_MS);
+}
+
+static void mhi_pci_reset_done(struct pci_dev *pdev)
+{
+ struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
+ struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
+ int err;
+
+ /* Restore initial known working PCI state */
+ pci_load_saved_state(pdev, mhi_pdev->pci_state);
+ pci_restore_state(pdev);
+
+ /* Is device status available ? */
+ if (!mhi_pci_is_alive(mhi_cntrl)) {
+ dev_err(&pdev->dev, "reset failed\n");
+ return;
+ }
+
+ err = mhi_prepare_for_power_up(mhi_cntrl);
+ if (err) {
+ dev_err(&pdev->dev, "failed to prepare MHI controller\n");
+ return;
+ }
+
+ err = mhi_sync_power_up(mhi_cntrl);
+ if (err) {
+ dev_err(&pdev->dev, "failed to power up MHI controller\n");
+ mhi_unprepare_after_power_down(mhi_cntrl);
+ return;
+ }
+
+ set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status);
+ mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
+}
+
+static pci_ers_result_t mhi_pci_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
+ struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
+
+ dev_err(&pdev->dev, "PCI error detected, state = %u\n", state);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ /* Clean up MHI state */
+ if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
+ mhi_power_down(mhi_cntrl, false);
+ mhi_unprepare_after_power_down(mhi_cntrl);
+ } else {
+ /* Nothing to do */
+ return PCI_ERS_RESULT_RECOVERED;
+ }
+
+ pci_disable_device(pdev);
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t mhi_pci_slot_reset(struct pci_dev *pdev)
+{
+ if (pci_enable_device(pdev)) {
+ dev_err(&pdev->dev, "Cannot re-enable PCI device after reset.\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void mhi_pci_io_resume(struct pci_dev *pdev)
+{
+ struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
+
+ dev_err(&pdev->dev, "PCI slot reset done\n");
+
+ queue_work(system_long_wq, &mhi_pdev->recovery_work);
+}
+
+static const struct pci_error_handlers mhi_pci_err_handler = {
+ .error_detected = mhi_pci_error_detected,
+ .slot_reset = mhi_pci_slot_reset,
+ .resume = mhi_pci_io_resume,
+ .reset_prepare = mhi_pci_reset_prepare,
+ .reset_done = mhi_pci_reset_done,
+};
+
+static int __maybe_unused mhi_pci_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
+ struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
+
+ del_timer(&mhi_pdev->health_check_timer);
+ cancel_work_sync(&mhi_pdev->recovery_work);
+
+ /* Transition to M3 state */
+ mhi_pm_suspend(mhi_cntrl);
+
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_wake_from_d3(pdev, true);
+ pci_set_power_state(pdev, PCI_D3hot);
+
+ return 0;
+}
+
+static int __maybe_unused mhi_pci_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
+ struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
+ int err;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ pci_set_master(pdev);
+
+ err = pci_enable_device(pdev);
+ if (err)
+ goto err_recovery;
+
+ /* Exit M3, transition to M0 state */
+ err = mhi_pm_resume(mhi_cntrl);
+ if (err) {
+ dev_err(&pdev->dev, "failed to resume device: %d\n", err);
+ goto err_recovery;
+ }
+
+ /* Resume health check */
+ mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
+
+ return 0;
+
+err_recovery:
+ /* The device may have loose power or crashed, try recovering it */
+ queue_work(system_long_wq, &mhi_pdev->recovery_work);
+
+ return err;
+}
+
+static const struct dev_pm_ops mhi_pci_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(mhi_pci_suspend, mhi_pci_resume)
+};
+
static struct pci_driver mhi_pci_driver = {
.name = "mhi-pci-generic",
.id_table = mhi_pci_id_table,
.probe = mhi_pci_probe,
- .remove = mhi_pci_remove
+ .remove = mhi_pci_remove,
+ .err_handler = &mhi_pci_err_handler,
+ .driver.pm = &mhi_pci_pm_ops
};
module_pci_driver(mhi_pci_driver);
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
index 2519ceede64b..dd9e7343a5e3 100644
--- a/drivers/bus/mvebu-mbus.c
+++ b/drivers/bus/mvebu-mbus.c
@@ -1111,7 +1111,7 @@ static int __init mvebu_mbus_common_init(struct mvebu_mbus_state *mbus,
mbus->sdramwins_base = ioremap(sdramwins_phys_base, sdramwins_size);
if (!mbus->sdramwins_base) {
- iounmap(mbus_state.mbuswins_base);
+ iounmap(mbus->mbuswins_base);
return -ENOMEM;
}
diff --git a/drivers/bus/simple-pm-bus.c b/drivers/bus/simple-pm-bus.c
index c5eb46cbf388..01a3d0cd08ed 100644
--- a/drivers/bus/simple-pm-bus.c
+++ b/drivers/bus/simple-pm-bus.c
@@ -16,6 +16,7 @@
static int simple_pm_bus_probe(struct platform_device *pdev)
{
+ const struct of_dev_auxdata *lookup = dev_get_platdata(&pdev->dev);
struct device_node *np = pdev->dev.of_node;
dev_dbg(&pdev->dev, "%s\n", __func__);
@@ -23,7 +24,7 @@ static int simple_pm_bus_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
if (np)
- of_platform_populate(np, NULL, NULL, &pdev->dev);
+ of_platform_populate(np, NULL, lookup, &pdev->dev);
return 0;
}
diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c
index 1bb00a959c67..d46db132d085 100644
--- a/drivers/bus/sunxi-rsb.c
+++ b/drivers/bus/sunxi-rsb.c
@@ -45,6 +45,8 @@
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/reset.h>
#include <linux/slab.h>
@@ -126,6 +128,7 @@ struct sunxi_rsb {
struct completion complete;
struct mutex lock;
unsigned int status;
+ u32 clk_freq;
};
/* bus / slave device related functions */
@@ -170,7 +173,9 @@ static int sunxi_rsb_device_remove(struct device *dev)
{
const struct sunxi_rsb_driver *drv = to_sunxi_rsb_driver(dev->driver);
- return drv->remove(to_sunxi_rsb_device(dev));
+ drv->remove(to_sunxi_rsb_device(dev));
+
+ return 0;
}
static struct bus_type sunxi_rsb_bus = {
@@ -335,6 +340,10 @@ static int sunxi_rsb_read(struct sunxi_rsb *rsb, u8 rtaddr, u8 addr,
return -EINVAL;
}
+ ret = pm_runtime_resume_and_get(rsb->dev);
+ if (ret)
+ return ret;
+
mutex_lock(&rsb->lock);
writel(addr, rsb->regs + RSB_ADDR);
@@ -350,6 +359,9 @@ static int sunxi_rsb_read(struct sunxi_rsb *rsb, u8 rtaddr, u8 addr,
unlock:
mutex_unlock(&rsb->lock);
+ pm_runtime_mark_last_busy(rsb->dev);
+ pm_runtime_put_autosuspend(rsb->dev);
+
return ret;
}
@@ -377,6 +389,10 @@ static int sunxi_rsb_write(struct sunxi_rsb *rsb, u8 rtaddr, u8 addr,
return -EINVAL;
}
+ ret = pm_runtime_resume_and_get(rsb->dev);
+ if (ret)
+ return ret;
+
mutex_lock(&rsb->lock);
writel(addr, rsb->regs + RSB_ADDR);
@@ -387,6 +403,9 @@ static int sunxi_rsb_write(struct sunxi_rsb *rsb, u8 rtaddr, u8 addr,
mutex_unlock(&rsb->lock);
+ pm_runtime_mark_last_busy(rsb->dev);
+ pm_runtime_put_autosuspend(rsb->dev);
+
return ret;
}
@@ -614,11 +633,100 @@ static int of_rsb_register_devices(struct sunxi_rsb *rsb)
return 0;
}
-static const struct of_device_id sunxi_rsb_of_match_table[] = {
- { .compatible = "allwinner,sun8i-a23-rsb" },
- {}
-};
-MODULE_DEVICE_TABLE(of, sunxi_rsb_of_match_table);
+static int sunxi_rsb_hw_init(struct sunxi_rsb *rsb)
+{
+ struct device *dev = rsb->dev;
+ unsigned long p_clk_freq;
+ u32 clk_delay, reg;
+ int clk_div, ret;
+
+ ret = clk_prepare_enable(rsb->clk);
+ if (ret) {
+ dev_err(dev, "failed to enable clk: %d\n", ret);
+ return ret;
+ }
+
+ ret = reset_control_deassert(rsb->rstc);
+ if (ret) {
+ dev_err(dev, "failed to deassert reset line: %d\n", ret);
+ goto err_clk_disable;
+ }
+
+ /* reset the controller */
+ writel(RSB_CTRL_SOFT_RST, rsb->regs + RSB_CTRL);
+ readl_poll_timeout(rsb->regs + RSB_CTRL, reg,
+ !(reg & RSB_CTRL_SOFT_RST), 1000, 100000);
+
+ /*
+ * Clock frequency and delay calculation code is from
+ * Allwinner U-boot sources.
+ *
+ * From A83 user manual:
+ * bus clock frequency = parent clock frequency / (2 * (divider + 1))
+ */
+ p_clk_freq = clk_get_rate(rsb->clk);
+ clk_div = p_clk_freq / rsb->clk_freq / 2;
+ if (!clk_div)
+ clk_div = 1;
+ else if (clk_div > RSB_CCR_MAX_CLK_DIV + 1)
+ clk_div = RSB_CCR_MAX_CLK_DIV + 1;
+
+ clk_delay = clk_div >> 1;
+ if (!clk_delay)
+ clk_delay = 1;
+
+ dev_info(dev, "RSB running at %lu Hz\n", p_clk_freq / clk_div / 2);
+ writel(RSB_CCR_SDA_OUT_DELAY(clk_delay) | RSB_CCR_CLK_DIV(clk_div - 1),
+ rsb->regs + RSB_CCR);
+
+ return 0;
+
+err_clk_disable:
+ clk_disable_unprepare(rsb->clk);
+
+ return ret;
+}
+
+static void sunxi_rsb_hw_exit(struct sunxi_rsb *rsb)
+{
+ /* Keep the clock and PM reference counts consistent. */
+ if (pm_runtime_status_suspended(rsb->dev))
+ pm_runtime_resume(rsb->dev);
+ reset_control_assert(rsb->rstc);
+ clk_disable_unprepare(rsb->clk);
+}
+
+static int __maybe_unused sunxi_rsb_runtime_suspend(struct device *dev)
+{
+ struct sunxi_rsb *rsb = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(rsb->clk);
+
+ return 0;
+}
+
+static int __maybe_unused sunxi_rsb_runtime_resume(struct device *dev)
+{
+ struct sunxi_rsb *rsb = dev_get_drvdata(dev);
+
+ return clk_prepare_enable(rsb->clk);
+}
+
+static int __maybe_unused sunxi_rsb_suspend(struct device *dev)
+{
+ struct sunxi_rsb *rsb = dev_get_drvdata(dev);
+
+ sunxi_rsb_hw_exit(rsb);
+
+ return 0;
+}
+
+static int __maybe_unused sunxi_rsb_resume(struct device *dev)
+{
+ struct sunxi_rsb *rsb = dev_get_drvdata(dev);
+
+ return sunxi_rsb_hw_init(rsb);
+}
static int sunxi_rsb_probe(struct platform_device *pdev)
{
@@ -626,10 +734,8 @@ static int sunxi_rsb_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node;
struct resource *r;
struct sunxi_rsb *rsb;
- unsigned long p_clk_freq;
- u32 clk_delay, clk_freq = 3000000;
- int clk_div, irq, ret;
- u32 reg;
+ u32 clk_freq = 3000000;
+ int irq, ret;
of_property_read_u32(np, "clock-frequency", &clk_freq);
if (clk_freq > RSB_MAX_FREQ) {
@@ -644,6 +750,7 @@ static int sunxi_rsb_probe(struct platform_device *pdev)
return -ENOMEM;
rsb->dev = dev;
+ rsb->clk_freq = clk_freq;
platform_set_drvdata(pdev, rsb);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
rsb->regs = devm_ioremap_resource(dev, r);
@@ -661,79 +768,41 @@ static int sunxi_rsb_probe(struct platform_device *pdev)
return ret;
}
- ret = clk_prepare_enable(rsb->clk);
- if (ret) {
- dev_err(dev, "failed to enable clk: %d\n", ret);
- return ret;
- }
-
- p_clk_freq = clk_get_rate(rsb->clk);
-
rsb->rstc = devm_reset_control_get(dev, NULL);
if (IS_ERR(rsb->rstc)) {
ret = PTR_ERR(rsb->rstc);
dev_err(dev, "failed to retrieve reset controller: %d\n", ret);
- goto err_clk_disable;
- }
-
- ret = reset_control_deassert(rsb->rstc);
- if (ret) {
- dev_err(dev, "failed to deassert reset line: %d\n", ret);
- goto err_clk_disable;
+ return ret;
}
init_completion(&rsb->complete);
mutex_init(&rsb->lock);
- /* reset the controller */
- writel(RSB_CTRL_SOFT_RST, rsb->regs + RSB_CTRL);
- readl_poll_timeout(rsb->regs + RSB_CTRL, reg,
- !(reg & RSB_CTRL_SOFT_RST), 1000, 100000);
-
- /*
- * Clock frequency and delay calculation code is from
- * Allwinner U-boot sources.
- *
- * From A83 user manual:
- * bus clock frequency = parent clock frequency / (2 * (divider + 1))
- */
- clk_div = p_clk_freq / clk_freq / 2;
- if (!clk_div)
- clk_div = 1;
- else if (clk_div > RSB_CCR_MAX_CLK_DIV + 1)
- clk_div = RSB_CCR_MAX_CLK_DIV + 1;
-
- clk_delay = clk_div >> 1;
- if (!clk_delay)
- clk_delay = 1;
-
- dev_info(dev, "RSB running at %lu Hz\n", p_clk_freq / clk_div / 2);
- writel(RSB_CCR_SDA_OUT_DELAY(clk_delay) | RSB_CCR_CLK_DIV(clk_div - 1),
- rsb->regs + RSB_CCR);
-
ret = devm_request_irq(dev, irq, sunxi_rsb_irq, 0, RSB_CTRL_NAME, rsb);
if (ret) {
dev_err(dev, "can't register interrupt handler irq %d: %d\n",
irq, ret);
- goto err_reset_assert;
+ return ret;
}
+ ret = sunxi_rsb_hw_init(rsb);
+ if (ret)
+ return ret;
+
/* initialize all devices on the bus into RSB mode */
ret = sunxi_rsb_init_device_mode(rsb);
if (ret)
dev_warn(dev, "Initialize device mode failed: %d\n", ret);
+ pm_suspend_ignore_children(dev, true);
+ pm_runtime_set_active(dev);
+ pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_enable(dev);
+
of_rsb_register_devices(rsb);
return 0;
-
-err_reset_assert:
- reset_control_assert(rsb->rstc);
-
-err_clk_disable:
- clk_disable_unprepare(rsb->clk);
-
- return ret;
}
static int sunxi_rsb_remove(struct platform_device *pdev)
@@ -741,18 +810,40 @@ static int sunxi_rsb_remove(struct platform_device *pdev)
struct sunxi_rsb *rsb = platform_get_drvdata(pdev);
device_for_each_child(rsb->dev, NULL, sunxi_rsb_remove_devices);
- reset_control_assert(rsb->rstc);
- clk_disable_unprepare(rsb->clk);
+ pm_runtime_disable(&pdev->dev);
+ sunxi_rsb_hw_exit(rsb);
return 0;
}
+static void sunxi_rsb_shutdown(struct platform_device *pdev)
+{
+ struct sunxi_rsb *rsb = platform_get_drvdata(pdev);
+
+ pm_runtime_disable(&pdev->dev);
+ sunxi_rsb_hw_exit(rsb);
+}
+
+static const struct dev_pm_ops sunxi_rsb_dev_pm_ops = {
+ SET_RUNTIME_PM_OPS(sunxi_rsb_runtime_suspend,
+ sunxi_rsb_runtime_resume, NULL)
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(sunxi_rsb_suspend, sunxi_rsb_resume)
+};
+
+static const struct of_device_id sunxi_rsb_of_match_table[] = {
+ { .compatible = "allwinner,sun8i-a23-rsb" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sunxi_rsb_of_match_table);
+
static struct platform_driver sunxi_rsb_driver = {
.probe = sunxi_rsb_probe,
.remove = sunxi_rsb_remove,
+ .shutdown = sunxi_rsb_shutdown,
.driver = {
.name = RSB_CTRL_NAME,
.of_match_table = sunxi_rsb_of_match_table,
+ .pm = &sunxi_rsb_dev_pm_ops,
},
};
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 8f0e52a71493..90ad34c6ef8e 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -2214,7 +2214,7 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
rq->timeout = 60 * HZ;
bio = rq->bio;
- blk_execute_rq(q, cdi->disk, rq, 0);
+ blk_execute_rq(cdi->disk, rq, 0);
if (scsi_req(rq)->result) {
struct scsi_sense_hdr sshdr;
diff --git a/drivers/char/hw_random/ingenic-trng.c b/drivers/char/hw_random/ingenic-trng.c
index 954a8411d67d..0eb80f786f4d 100644
--- a/drivers/char/hw_random/ingenic-trng.c
+++ b/drivers/char/hw_random/ingenic-trng.c
@@ -113,13 +113,17 @@ static int ingenic_trng_probe(struct platform_device *pdev)
ret = hwrng_register(&trng->rng);
if (ret) {
dev_err(&pdev->dev, "Failed to register hwrng\n");
- return ret;
+ goto err_unprepare_clk;
}
platform_set_drvdata(pdev, trng);
dev_info(&pdev->dev, "Ingenic DTRNG driver registered\n");
return 0;
+
+err_unprepare_clk:
+ clk_disable_unprepare(trng->clk);
+ return ret;
}
static int ingenic_trng_remove(struct platform_device *pdev)
diff --git a/drivers/char/hw_random/iproc-rng200.c b/drivers/char/hw_random/iproc-rng200.c
index 01583faf9893..a43743887db1 100644
--- a/drivers/char/hw_random/iproc-rng200.c
+++ b/drivers/char/hw_random/iproc-rng200.c
@@ -28,7 +28,6 @@
#define RNG_CTRL_OFFSET 0x00
#define RNG_CTRL_RNG_RBGEN_MASK 0x00001FFF
#define RNG_CTRL_RNG_RBGEN_ENABLE 0x00000001
-#define RNG_CTRL_RNG_RBGEN_DISABLE 0x00000000
#define RNG_SOFT_RESET_OFFSET 0x04
#define RNG_SOFT_RESET 0x00000001
@@ -54,15 +53,24 @@ struct iproc_rng200_dev {
#define to_rng_priv(rng) container_of(rng, struct iproc_rng200_dev, rng)
-static void iproc_rng200_restart(void __iomem *rng_base)
+static void iproc_rng200_enable_set(void __iomem *rng_base, bool enable)
{
- uint32_t val;
+ u32 val;
- /* Disable RBG */
val = ioread32(rng_base + RNG_CTRL_OFFSET);
val &= ~RNG_CTRL_RNG_RBGEN_MASK;
- val |= RNG_CTRL_RNG_RBGEN_DISABLE;
+
+ if (enable)
+ val |= RNG_CTRL_RNG_RBGEN_ENABLE;
+
iowrite32(val, rng_base + RNG_CTRL_OFFSET);
+}
+
+static void iproc_rng200_restart(void __iomem *rng_base)
+{
+ uint32_t val;
+
+ iproc_rng200_enable_set(rng_base, false);
/* Clear all interrupt status */
iowrite32(0xFFFFFFFFUL, rng_base + RNG_INT_STATUS_OFFSET);
@@ -84,11 +92,7 @@ static void iproc_rng200_restart(void __iomem *rng_base)
val &= ~RBG_SOFT_RESET;
iowrite32(val, rng_base + RBG_SOFT_RESET_OFFSET);
- /* Enable RBG */
- val = ioread32(rng_base + RNG_CTRL_OFFSET);
- val &= ~RNG_CTRL_RNG_RBGEN_MASK;
- val |= RNG_CTRL_RNG_RBGEN_ENABLE;
- iowrite32(val, rng_base + RNG_CTRL_OFFSET);
+ iproc_rng200_enable_set(rng_base, true);
}
static int iproc_rng200_read(struct hwrng *rng, void *buf, size_t max,
@@ -155,13 +159,8 @@ static int iproc_rng200_read(struct hwrng *rng, void *buf, size_t max,
static int iproc_rng200_init(struct hwrng *rng)
{
struct iproc_rng200_dev *priv = to_rng_priv(rng);
- uint32_t val;
- /* Setup RNG. */
- val = ioread32(priv->base + RNG_CTRL_OFFSET);
- val &= ~RNG_CTRL_RNG_RBGEN_MASK;
- val |= RNG_CTRL_RNG_RBGEN_ENABLE;
- iowrite32(val, priv->base + RNG_CTRL_OFFSET);
+ iproc_rng200_enable_set(priv->base, true);
return 0;
}
@@ -169,13 +168,8 @@ static int iproc_rng200_init(struct hwrng *rng)
static void iproc_rng200_cleanup(struct hwrng *rng)
{
struct iproc_rng200_dev *priv = to_rng_priv(rng);
- uint32_t val;
- /* Disable RNG hardware */
- val = ioread32(priv->base + RNG_CTRL_OFFSET);
- val &= ~RNG_CTRL_RNG_RBGEN_MASK;
- val |= RNG_CTRL_RNG_RBGEN_DISABLE;
- iowrite32(val, priv->base + RNG_CTRL_OFFSET);
+ iproc_rng200_enable_set(priv->base, false);
}
static int iproc_rng200_probe(struct platform_device *pdev)
diff --git a/drivers/char/hw_random/nomadik-rng.c b/drivers/char/hw_random/nomadik-rng.c
index b0ded41eb865..67947a19aa22 100644
--- a/drivers/char/hw_random/nomadik-rng.c
+++ b/drivers/char/hw_random/nomadik-rng.c
@@ -69,11 +69,10 @@ out_clk:
return ret;
}
-static int nmk_rng_remove(struct amba_device *dev)
+static void nmk_rng_remove(struct amba_device *dev)
{
amba_release_regions(dev);
clk_disable(rng_clk);
- return 0;
}
static const struct amba_id nmk_rng_ids[] = {
diff --git a/drivers/char/hw_random/optee-rng.c b/drivers/char/hw_random/optee-rng.c
index a99d82949981..135a82590923 100644
--- a/drivers/char/hw_random/optee-rng.c
+++ b/drivers/char/hw_random/optee-rng.c
@@ -243,7 +243,7 @@ static int optee_rng_probe(struct device *dev)
if (err)
goto out_sess;
- err = hwrng_register(&pvt_data.optee_rng);
+ err = devm_hwrng_register(dev, &pvt_data.optee_rng);
if (err) {
dev_err(dev, "hwrng registration failed (%d)\n", err);
goto out_sess;
@@ -263,7 +263,6 @@ out_ctx:
static int optee_rng_remove(struct device *dev)
{
- hwrng_unregister(&pvt_data.optee_rng);
tee_client_close_session(pvt_data.ctx, pvt_data.session_id);
tee_client_close_context(pvt_data.ctx);
diff --git a/drivers/char/hw_random/timeriomem-rng.c b/drivers/char/hw_random/timeriomem-rng.c
index e262445fed5f..8ea1fc831eb7 100644
--- a/drivers/char/hw_random/timeriomem-rng.c
+++ b/drivers/char/hw_random/timeriomem-rng.c
@@ -69,7 +69,7 @@ static int timeriomem_rng_read(struct hwrng *hwrng, void *data,
*/
if (retval > 0)
usleep_range(period_us,
- period_us + min(1, period_us / 100));
+ period_us + max(1, period_us / 100));
*(u32 *)data = readl(priv->io_base);
retval += sizeof(u32);
@@ -169,7 +169,7 @@ static int timeriomem_rng_probe(struct platform_device *pdev)
priv->present = 1;
complete(&priv->completion);
- err = hwrng_register(&priv->rng_ops);
+ err = devm_hwrng_register(&pdev->dev, &priv->rng_ops);
if (err) {
dev_err(&pdev->dev, "problem registering\n");
return err;
@@ -185,7 +185,6 @@ static int timeriomem_rng_remove(struct platform_device *pdev)
{
struct timeriomem_rng_private *priv = platform_get_drvdata(pdev);
- hwrng_unregister(&priv->rng_ops);
hrtimer_cancel(&priv->timer);
return 0;
diff --git a/drivers/char/ipmi/ipmb_dev_int.c b/drivers/char/ipmi/ipmb_dev_int.c
index 382b28f1cf2f..49b8f22fdcf0 100644
--- a/drivers/char/ipmi/ipmb_dev_int.c
+++ b/drivers/char/ipmi/ipmb_dev_int.c
@@ -137,7 +137,7 @@ static ssize_t ipmb_write(struct file *file, const char __user *buf,
{
struct ipmb_dev *ipmb_dev = to_ipmb_dev(file);
u8 rq_sa, netf_rq_lun, msg_len;
- union i2c_smbus_data data;
+ struct i2c_client *temp_client;
u8 msg[MAX_MSG_LEN];
ssize_t ret;
@@ -160,21 +160,21 @@ static ssize_t ipmb_write(struct file *file, const char __user *buf,
}
/*
- * subtract rq_sa and netf_rq_lun from the length of the msg passed to
- * i2c_smbus_xfer
+ * subtract rq_sa and netf_rq_lun from the length of the msg. Fill the
+ * temporary client. Note that its use is an exception for IPMI.
*/
msg_len = msg[IPMB_MSG_LEN_IDX] - SMBUS_MSG_HEADER_LENGTH;
- if (msg_len > I2C_SMBUS_BLOCK_MAX)
- msg_len = I2C_SMBUS_BLOCK_MAX;
+ temp_client = kmemdup(ipmb_dev->client, sizeof(*temp_client), GFP_KERNEL);
+ if (!temp_client)
+ return -ENOMEM;
+
+ temp_client->addr = rq_sa;
- data.block[0] = msg_len;
- memcpy(&data.block[1], msg + SMBUS_MSG_IDX_OFFSET, msg_len);
- ret = i2c_smbus_xfer(ipmb_dev->client->adapter, rq_sa,
- ipmb_dev->client->flags,
- I2C_SMBUS_WRITE, netf_rq_lun,
- I2C_SMBUS_BLOCK_DATA, &data);
+ ret = i2c_smbus_write_block_data(temp_client, netf_rq_lun, msg_len,
+ msg + SMBUS_MSG_IDX_OFFSET);
+ kfree(temp_client);
- return ret ? : count;
+ return ret < 0 ? ret : count;
}
static __poll_t ipmb_poll(struct file *file, poll_table *wait)
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 94c2b556cf97..869b9f5e8e03 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -31,9 +31,6 @@
#include <linux/uio.h>
#include <linux/uaccess.h>
#include <linux/security.h>
-#include <linux/pseudo_fs.h>
-#include <uapi/linux/magic.h>
-#include <linux/mount.h>
#ifdef CONFIG_IA64
# include <linux/efi.h>
@@ -294,13 +291,6 @@ static int uncached_access(struct file *file, phys_addr_t addr)
* attribute aliases.
*/
return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
-#elif defined(CONFIG_MIPS)
- {
- extern int __uncached_access(struct file *file,
- unsigned long addr);
-
- return __uncached_access(file, addr);
- }
#else
/*
* Accessing memory above the top the kernel knows about or through a
@@ -836,42 +826,6 @@ static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
return ret;
}
-static struct inode *devmem_inode;
-
-#ifdef CONFIG_IO_STRICT_DEVMEM
-void revoke_devmem(struct resource *res)
-{
- /* pairs with smp_store_release() in devmem_init_inode() */
- struct inode *inode = smp_load_acquire(&devmem_inode);
-
- /*
- * Check that the initialization has completed. Losing the race
- * is ok because it means drivers are claiming resources before
- * the fs_initcall level of init and prevent /dev/mem from
- * establishing mappings.
- */
- if (!inode)
- return;
-
- /*
- * The expectation is that the driver has successfully marked
- * the resource busy by this point, so devmem_is_allowed()
- * should start returning false, however for performance this
- * does not iterate the entire resource range.
- */
- if (devmem_is_allowed(PHYS_PFN(res->start)) &&
- devmem_is_allowed(PHYS_PFN(res->end))) {
- /*
- * *cringe* iomem=relaxed says "go ahead, what's the
- * worst that can happen?"
- */
- return;
- }
-
- unmap_mapping_range(inode->i_mapping, res->start, resource_size(res), 1);
-}
-#endif
-
static int open_port(struct inode *inode, struct file *filp)
{
int rc;
@@ -891,8 +845,7 @@ static int open_port(struct inode *inode, struct file *filp)
* revocations when drivers want to take over a /dev/mem mapped
* range.
*/
- inode->i_mapping = devmem_inode->i_mapping;
- filp->f_mapping = inode->i_mapping;
+ filp->f_mapping = iomem_get_mapping();
return 0;
}
@@ -1024,48 +977,6 @@ static char *mem_devnode(struct device *dev, umode_t *mode)
static struct class *mem_class;
-static int devmem_fs_init_fs_context(struct fs_context *fc)
-{
- return init_pseudo(fc, DEVMEM_MAGIC) ? 0 : -ENOMEM;
-}
-
-static struct file_system_type devmem_fs_type = {
- .name = "devmem",
- .owner = THIS_MODULE,
- .init_fs_context = devmem_fs_init_fs_context,
- .kill_sb = kill_anon_super,
-};
-
-static int devmem_init_inode(void)
-{
- static struct vfsmount *devmem_vfs_mount;
- static int devmem_fs_cnt;
- struct inode *inode;
- int rc;
-
- rc = simple_pin_fs(&devmem_fs_type, &devmem_vfs_mount, &devmem_fs_cnt);
- if (rc < 0) {
- pr_err("Cannot mount /dev/mem pseudo filesystem: %d\n", rc);
- return rc;
- }
-
- inode = alloc_anon_inode(devmem_vfs_mount->mnt_sb);
- if (IS_ERR(inode)) {
- rc = PTR_ERR(inode);
- pr_err("Cannot allocate inode for /dev/mem: %d\n", rc);
- simple_release_fs(&devmem_vfs_mount, &devmem_fs_cnt);
- return rc;
- }
-
- /*
- * Publish /dev/mem initialized.
- * Pairs with smp_load_acquire() in revoke_devmem().
- */
- smp_store_release(&devmem_inode, inode);
-
- return 0;
-}
-
static int __init chr_dev_init(void)
{
int minor;
@@ -1087,8 +998,6 @@ static int __init chr_dev_init(void)
*/
if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
continue;
- if ((minor == DEVMEM_MINOR) && devmem_init_inode() != 0)
- continue;
device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
NULL, devlist[minor].name);
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index e342daa73d1b..2be8d9a8eec5 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -2494,8 +2494,6 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
__FILE__, __LINE__, tty->driver->name, port->count);
- port->low_latency = (port->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
-
spin_lock_irqsave(&info->netlock, flags);
if (info->netcount) {
retval = -EBUSY;
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 5f3b8ac9d97b..0fe9e200e4c8 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1261,8 +1261,6 @@ void add_interrupt_randomness(int irq, int irq_flags)
cycles_t cycles = random_get_entropy();
__u32 c_high, j_high;
__u64 ip;
- unsigned long seed;
- int credit = 0;
if (cycles == 0)
cycles = get_reg(fast_pool, regs);
@@ -1298,23 +1296,12 @@ void add_interrupt_randomness(int irq, int irq_flags)
fast_pool->last = now;
__mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool));
-
- /*
- * If we have architectural seed generator, produce a seed and
- * add it to the pool. For the sake of paranoia don't let the
- * architectural seed generator dominate the input from the
- * interrupt noise.
- */
- if (arch_get_random_seed_long(&seed)) {
- __mix_pool_bytes(r, &seed, sizeof(seed));
- credit = 1;
- }
spin_unlock(&r->lock);
fast_pool->count = 0;
/* award one bit for the contents of the fast pool */
- credit_entropy_bits(r, credit + 1);
+ credit_entropy_bits(r, 1);
}
EXPORT_SYMBOL_GPL(add_interrupt_randomness);
@@ -1972,7 +1959,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
return -EPERM;
if (crng_init < 2)
return -ENODATA;
- crng_reseed(&primary_crng, NULL);
+ crng_reseed(&primary_crng, &input_pool);
crng_global_init_time = jiffies - 1;
return 0;
default:
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index a18c314da211..4308f9ca7a43 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -86,6 +86,16 @@ config TCG_TIS_SYNQUACER
To compile this driver as a module, choose M here;
the module will be called tpm_tis_synquacer.
+config TCG_TIS_I2C_CR50
+ tristate "TPM Interface Specification 2.0 Interface (I2C - CR50)"
+ depends on I2C
+ select TCG_CR50
+ help
+ This is a driver for the Google cr50 I2C TPM interface which is a
+ custom microcontroller and requires a custom i2c protocol interface
+ to handle the limitations of the hardware. To compile this driver
+ as a module, choose M here; the module will be called tcg_tis_i2c_cr50.
+
config TCG_TIS_I2C_ATMEL
tristate "TPM Interface Specification 1.2 Interface (I2C - Atmel)"
depends on I2C
diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile
index 84db4fb3a9c9..66d39ea6bd10 100644
--- a/drivers/char/tpm/Makefile
+++ b/drivers/char/tpm/Makefile
@@ -27,6 +27,8 @@ obj-$(CONFIG_TCG_TIS_SPI) += tpm_tis_spi.o
tpm_tis_spi-y := tpm_tis_spi_main.o
tpm_tis_spi-$(CONFIG_TCG_TIS_SPI_CR50) += tpm_tis_spi_cr50.o
+obj-$(CONFIG_TCG_TIS_I2C_CR50) += tpm_tis_i2c_cr50.o
+
obj-$(CONFIG_TCG_TIS_I2C_ATMEL) += tpm_i2c_atmel.o
obj-$(CONFIG_TCG_TIS_I2C_INFINEON) += tpm_i2c_infineon.o
obj-$(CONFIG_TCG_TIS_I2C_NUVOTON) += tpm_i2c_nuvoton.o
diff --git a/drivers/char/tpm/eventlog/tpm1.c b/drivers/char/tpm/eventlog/tpm1.c
index 2c96977ad080..8aa9057601d6 100644
--- a/drivers/char/tpm/eventlog/tpm1.c
+++ b/drivers/char/tpm/eventlog/tpm1.c
@@ -210,6 +210,7 @@ static int get_event_name(char *dest, struct tcpa_event *event,
default:
break;
}
+ break;
default:
break;
}
diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c
index 1784530b8387..c08cbb306636 100644
--- a/drivers/char/tpm/tpm-dev-common.c
+++ b/drivers/char/tpm/tpm-dev-common.c
@@ -20,7 +20,6 @@
#include "tpm-dev.h"
static struct workqueue_struct *tpm_dev_wq;
-static DEFINE_MUTEX(tpm_dev_wq_lock);
static ssize_t tpm_dev_transmit(struct tpm_chip *chip, struct tpm_space *space,
u8 *buf, size_t bufsiz)
diff --git a/drivers/char/tpm/tpm-sysfs.c b/drivers/char/tpm/tpm-sysfs.c
index e2ff0b273a0f..63f03cfb8e6a 100644
--- a/drivers/char/tpm/tpm-sysfs.c
+++ b/drivers/char/tpm/tpm-sysfs.c
@@ -337,11 +337,190 @@ static const struct attribute_group tpm2_dev_group = {
.attrs = tpm2_dev_attrs,
};
+struct tpm_pcr_attr {
+ int alg_id;
+ int pcr;
+ struct device_attribute attr;
+};
+
+#define to_tpm_pcr_attr(a) container_of(a, struct tpm_pcr_attr, attr)
+
+static ssize_t pcr_value_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpm_pcr_attr *ha = to_tpm_pcr_attr(attr);
+ struct tpm_chip *chip = to_tpm_chip(dev);
+ struct tpm_digest digest;
+ int i;
+ int digest_size = 0;
+ int rc;
+ char *str = buf;
+
+ for (i = 0; i < chip->nr_allocated_banks; i++)
+ if (ha->alg_id == chip->allocated_banks[i].alg_id)
+ digest_size = chip->allocated_banks[i].digest_size;
+ /* should never happen */
+ if (!digest_size)
+ return -EINVAL;
+
+ digest.alg_id = ha->alg_id;
+ rc = tpm_pcr_read(chip, ha->pcr, &digest);
+ if (rc)
+ return rc;
+ for (i = 0; i < digest_size; i++)
+ str += sprintf(str, "%02X", digest.digest[i]);
+ str += sprintf(str, "\n");
+
+ return str - buf;
+}
+
+/*
+ * The following set of defines represents all the magic to build
+ * the per hash attribute groups for displaying each bank of PCRs.
+ * The only slight problem with this approach is that every PCR is
+ * hard coded to be present, so you don't know if an PCR is missing
+ * until a cat of the file returns -EINVAL
+ *
+ * Also note you must ignore checkpatch warnings in this macro
+ * code. This is deep macro magic that checkpatch.pl doesn't
+ * understand.
+ */
+
+/* Note, this must match TPM2_PLATFORM_PCR which is fixed at 24. */
+#define _TPM_HELPER(_alg, _hash, F) \
+ F(_alg, _hash, 0) \
+ F(_alg, _hash, 1) \
+ F(_alg, _hash, 2) \
+ F(_alg, _hash, 3) \
+ F(_alg, _hash, 4) \
+ F(_alg, _hash, 5) \
+ F(_alg, _hash, 6) \
+ F(_alg, _hash, 7) \
+ F(_alg, _hash, 8) \
+ F(_alg, _hash, 9) \
+ F(_alg, _hash, 10) \
+ F(_alg, _hash, 11) \
+ F(_alg, _hash, 12) \
+ F(_alg, _hash, 13) \
+ F(_alg, _hash, 14) \
+ F(_alg, _hash, 15) \
+ F(_alg, _hash, 16) \
+ F(_alg, _hash, 17) \
+ F(_alg, _hash, 18) \
+ F(_alg, _hash, 19) \
+ F(_alg, _hash, 20) \
+ F(_alg, _hash, 21) \
+ F(_alg, _hash, 22) \
+ F(_alg, _hash, 23)
+
+/* ignore checkpatch warning about trailing ; in macro. */
+#define PCR_ATTR(_alg, _hash, _pcr) \
+ static struct tpm_pcr_attr dev_attr_pcr_##_hash##_##_pcr = { \
+ .alg_id = _alg, \
+ .pcr = _pcr, \
+ .attr = { \
+ .attr = { \
+ .name = __stringify(_pcr), \
+ .mode = 0444 \
+ }, \
+ .show = pcr_value_show \
+ } \
+ };
+
+#define PCR_ATTRS(_alg, _hash) \
+ _TPM_HELPER(_alg, _hash, PCR_ATTR)
+
+/* ignore checkpatch warning about trailing , in macro. */
+#define PCR_ATTR_VAL(_alg, _hash, _pcr) \
+ &dev_attr_pcr_##_hash##_##_pcr.attr.attr,
+
+#define PCR_ATTR_GROUP_ARRAY(_alg, _hash) \
+ static struct attribute *pcr_group_attrs_##_hash[] = { \
+ _TPM_HELPER(_alg, _hash, PCR_ATTR_VAL) \
+ NULL \
+ }
+
+#define PCR_ATTR_GROUP(_alg, _hash) \
+ static struct attribute_group pcr_group_##_hash = { \
+ .name = "pcr-" __stringify(_hash), \
+ .attrs = pcr_group_attrs_##_hash \
+ }
+
+#define PCR_ATTR_BUILD(_alg, _hash) \
+ PCR_ATTRS(_alg, _hash) \
+ PCR_ATTR_GROUP_ARRAY(_alg, _hash); \
+ PCR_ATTR_GROUP(_alg, _hash)
+/*
+ * End of macro structure to build an attribute group containing 24
+ * PCR value files for each supported hash algorithm
+ */
+
+/*
+ * The next set of macros implements the cleverness for each hash to
+ * build a static attribute group called pcr_group_<hash> which can be
+ * added to chip->groups[].
+ *
+ * The first argument is the TPM algorithm id and the second is the
+ * hash used as both the suffix and the group name. Note: the group
+ * name is a directory in the top level tpm class with the name
+ * pcr-<hash>, so it must not clash with any other names already
+ * in the sysfs directory.
+ */
+PCR_ATTR_BUILD(TPM_ALG_SHA1, sha1);
+PCR_ATTR_BUILD(TPM_ALG_SHA256, sha256);
+PCR_ATTR_BUILD(TPM_ALG_SHA384, sha384);
+PCR_ATTR_BUILD(TPM_ALG_SHA512, sha512);
+PCR_ATTR_BUILD(TPM_ALG_SM3_256, sm3);
+
+
void tpm_sysfs_add_device(struct tpm_chip *chip)
{
+ int i;
+
WARN_ON(chip->groups_cnt != 0);
+
if (chip->flags & TPM_CHIP_FLAG_TPM2)
chip->groups[chip->groups_cnt++] = &tpm2_dev_group;
else
chip->groups[chip->groups_cnt++] = &tpm1_dev_group;
+
+ /* add one group for each bank hash */
+ for (i = 0; i < chip->nr_allocated_banks; i++) {
+ switch (chip->allocated_banks[i].alg_id) {
+ case TPM_ALG_SHA1:
+ chip->groups[chip->groups_cnt++] = &pcr_group_sha1;
+ break;
+ case TPM_ALG_SHA256:
+ chip->groups[chip->groups_cnt++] = &pcr_group_sha256;
+ break;
+ case TPM_ALG_SHA384:
+ chip->groups[chip->groups_cnt++] = &pcr_group_sha384;
+ break;
+ case TPM_ALG_SHA512:
+ chip->groups[chip->groups_cnt++] = &pcr_group_sha512;
+ break;
+ case TPM_ALG_SM3_256:
+ chip->groups[chip->groups_cnt++] = &pcr_group_sm3;
+ break;
+ default:
+ /*
+ * If triggers, send a patch to add both a
+ * PCR_ATTR_BUILD() macro above for the
+ * missing algorithm as well as an additional
+ * case in this switch statement.
+ */
+ dev_err(&chip->dev,
+ "TPM with unsupported bank algorithm 0x%04x",
+ chip->allocated_banks[i].alg_id);
+ break;
+ }
+ }
+
+ /*
+ * This will only trigger if someone has added an additional
+ * hash to the tpm_algorithms enum without incrementing
+ * TPM_MAX_HASHES.
+ */
+ WARN_ON(chip->groups_cnt > TPM_MAX_HASHES + 1);
}
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 947d1db0a5cc..283f78211c3a 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -164,8 +164,6 @@ extern const struct file_operations tpmrm_fops;
extern struct idr dev_nums_idr;
ssize_t tpm_transmit(struct tpm_chip *chip, u8 *buf, size_t bufsiz);
-ssize_t tpm_transmit_cmd(struct tpm_chip *chip, struct tpm_buf *buf,
- size_t min_rsp_body_length, const char *desc);
int tpm_get_timeouts(struct tpm_chip *);
int tpm_auto_startup(struct tpm_chip *chip);
@@ -194,8 +192,6 @@ static inline void tpm_msleep(unsigned int delay_msec)
int tpm_chip_start(struct tpm_chip *chip);
void tpm_chip_stop(struct tpm_chip *chip);
struct tpm_chip *tpm_find_get_ops(struct tpm_chip *chip);
-__must_check int tpm_try_get_ops(struct tpm_chip *chip);
-void tpm_put_ops(struct tpm_chip *chip);
struct tpm_chip *tpm_chip_alloc(struct device *dev,
const struct tpm_class_ops *ops);
diff --git a/drivers/char/tpm/tpm_ppi.c b/drivers/char/tpm/tpm_ppi.c
index b2dab941cb7f..40018a73b3cb 100644
--- a/drivers/char/tpm/tpm_ppi.c
+++ b/drivers/char/tpm/tpm_ppi.c
@@ -358,7 +358,7 @@ static struct attribute *ppi_attrs[] = {
&dev_attr_tcg_operations.attr,
&dev_attr_vs_operations.attr, NULL,
};
-static struct attribute_group ppi_attr_grp = {
+static const struct attribute_group ppi_attr_grp = {
.name = "ppi",
.attrs = ppi_attrs
};
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index 92c51c6cfd1b..a2e0395cbe61 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -125,7 +125,8 @@ static bool check_locality(struct tpm_chip *chip, int l)
if (rc < 0)
return false;
- if ((access & (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) ==
+ if ((access & (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID
+ | TPM_ACCESS_REQUEST_USE)) ==
(TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) {
priv->locality = l;
return true;
@@ -134,58 +135,13 @@ static bool check_locality(struct tpm_chip *chip, int l)
return false;
}
-static bool locality_inactive(struct tpm_chip *chip, int l)
-{
- struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
- int rc;
- u8 access;
-
- rc = tpm_tis_read8(priv, TPM_ACCESS(l), &access);
- if (rc < 0)
- return false;
-
- if ((access & (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY))
- == TPM_ACCESS_VALID)
- return true;
-
- return false;
-}
-
static int release_locality(struct tpm_chip *chip, int l)
{
struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
- unsigned long stop, timeout;
- long rc;
tpm_tis_write8(priv, TPM_ACCESS(l), TPM_ACCESS_ACTIVE_LOCALITY);
- stop = jiffies + chip->timeout_a;
-
- if (chip->flags & TPM_CHIP_FLAG_IRQ) {
-again:
- timeout = stop - jiffies;
- if ((long)timeout <= 0)
- return -1;
-
- rc = wait_event_interruptible_timeout(priv->int_queue,
- (locality_inactive(chip, l)),
- timeout);
-
- if (rc > 0)
- return 0;
-
- if (rc == -ERESTARTSYS && freezing(current)) {
- clear_thread_flag(TIF_SIGPENDING);
- goto again;
- }
- } else {
- do {
- if (locality_inactive(chip, l))
- return 0;
- tpm_msleep(TPM_TIMEOUT);
- } while (time_before(jiffies, stop));
- }
- return -1;
+ return 0;
}
static int request_locality(struct tpm_chip *chip, int l)
@@ -751,12 +707,22 @@ static int tpm_tis_gen_interrupt(struct tpm_chip *chip)
const char *desc = "attempting to generate an interrupt";
u32 cap2;
cap_t cap;
+ int ret;
+ /* TPM 2.0 */
if (chip->flags & TPM_CHIP_FLAG_TPM2)
return tpm2_get_tpm_pt(chip, 0x100, &cap2, desc);
- else
- return tpm1_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, desc,
- 0);
+
+ /* TPM 1.2 */
+ ret = request_locality(chip, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = tpm1_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, desc, 0);
+
+ release_locality(chip, 0);
+
+ return ret;
}
/* Register the IRQ and issue a command that will cause an interrupt. If an
@@ -1063,11 +1029,21 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
init_waitqueue_head(&priv->read_queue);
init_waitqueue_head(&priv->int_queue);
if (irq != -1) {
- /* Before doing irq testing issue a command to the TPM in polling mode
+ /*
+ * Before doing irq testing issue a command to the TPM in polling mode
* to make sure it works. May as well use that command to set the
* proper timeouts for the driver.
*/
- if (tpm_get_timeouts(chip)) {
+
+ rc = request_locality(chip, 0);
+ if (rc < 0)
+ goto out_err;
+
+ rc = tpm_get_timeouts(chip);
+
+ release_locality(chip, 0);
+
+ if (rc) {
dev_err(dev, "Could not get TPM timeouts and durations\n");
rc = -ENODEV;
goto out_err;
diff --git a/drivers/char/tpm/tpm_tis_i2c_cr50.c b/drivers/char/tpm/tpm_tis_i2c_cr50.c
new file mode 100644
index 000000000000..ec9a65e7887d
--- /dev/null
+++ b/drivers/char/tpm/tpm_tis_i2c_cr50.c
@@ -0,0 +1,790 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2020 Google Inc.
+ *
+ * Based on Infineon TPM driver by Peter Huewe.
+ *
+ * cr50 is a firmware for H1 secure modules that requires special
+ * handling for the I2C interface.
+ *
+ * - Use an interrupt for transaction status instead of hardcoded delays.
+ * - Must use write+wait+read read protocol.
+ * - All 4 bytes of status register must be read/written at once.
+ * - Burst count max is 63 bytes, and burst count behaves slightly differently
+ * than other I2C TPMs.
+ * - When reading from FIFO the full burstcnt must be read instead of just
+ * reading header and determining the remainder.
+ */
+
+#include <linux/acpi.h>
+#include <linux/completion.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pm.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+
+#include "tpm_tis_core.h"
+
+#define TPM_CR50_MAX_BUFSIZE 64
+#define TPM_CR50_TIMEOUT_SHORT_MS 2 /* Short timeout during transactions */
+#define TPM_CR50_TIMEOUT_NOIRQ_MS 20 /* Timeout for TPM ready without IRQ */
+#define TPM_CR50_I2C_DID_VID 0x00281ae0L /* Device and vendor ID reg value */
+#define TPM_CR50_I2C_MAX_RETRIES 3 /* Max retries due to I2C errors */
+#define TPM_CR50_I2C_RETRY_DELAY_LO 55 /* Min usecs between retries on I2C */
+#define TPM_CR50_I2C_RETRY_DELAY_HI 65 /* Max usecs between retries on I2C */
+
+#define TPM_I2C_ACCESS(l) (0x0000 | ((l) << 4))
+#define TPM_I2C_STS(l) (0x0001 | ((l) << 4))
+#define TPM_I2C_DATA_FIFO(l) (0x0005 | ((l) << 4))
+#define TPM_I2C_DID_VID(l) (0x0006 | ((l) << 4))
+
+/**
+ * struct tpm_i2c_cr50_priv_data - Driver private data.
+ * @irq: Irq number used for this chip.
+ * If irq <= 0, then a fixed timeout is used instead of waiting for irq.
+ * @tpm_ready: Struct used by irq handler to signal R/W readiness.
+ * @buf: Buffer used for i2c writes, with i2c address prepended to content.
+ *
+ * Private driver struct used by kernel threads and interrupt context.
+ */
+struct tpm_i2c_cr50_priv_data {
+ int irq;
+ struct completion tpm_ready;
+ u8 buf[TPM_CR50_MAX_BUFSIZE];
+};
+
+/**
+ * tpm_cr50_i2c_int_handler() - cr50 interrupt handler.
+ * @dummy: Unused parameter.
+ * @tpm_info: TPM chip information.
+ *
+ * The cr50 interrupt handler signals waiting threads that the
+ * interrupt has been asserted. It does not do any interrupt triggered
+ * processing but is instead used to avoid fixed delays.
+ *
+ * Return:
+ * IRQ_HANDLED signifies irq was handled by this device.
+ */
+static irqreturn_t tpm_cr50_i2c_int_handler(int dummy, void *tpm_info)
+{
+ struct tpm_chip *chip = tpm_info;
+ struct tpm_i2c_cr50_priv_data *priv = dev_get_drvdata(&chip->dev);
+
+ complete(&priv->tpm_ready);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * tpm_cr50_i2c_wait_tpm_ready() - Wait for tpm to signal ready.
+ * @chip: A TPM chip.
+ *
+ * Wait for completion interrupt if available, otherwise use a fixed
+ * delay for the TPM to be ready.
+ *
+ * Return:
+ * - 0: Success.
+ * - -errno: A POSIX error code.
+ */
+static int tpm_cr50_i2c_wait_tpm_ready(struct tpm_chip *chip)
+{
+ struct tpm_i2c_cr50_priv_data *priv = dev_get_drvdata(&chip->dev);
+
+ /* Use a safe fixed delay if interrupt is not supported */
+ if (priv->irq <= 0) {
+ msleep(TPM_CR50_TIMEOUT_NOIRQ_MS);
+ return 0;
+ }
+
+ /* Wait for interrupt to indicate TPM is ready to respond */
+ if (!wait_for_completion_timeout(&priv->tpm_ready,
+ msecs_to_jiffies(chip->timeout_a))) {
+ dev_warn(&chip->dev, "Timeout waiting for TPM ready\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/**
+ * tpm_cr50_i2c_enable_tpm_irq() - Enable TPM irq.
+ * @chip: A TPM chip.
+ */
+static void tpm_cr50_i2c_enable_tpm_irq(struct tpm_chip *chip)
+{
+ struct tpm_i2c_cr50_priv_data *priv = dev_get_drvdata(&chip->dev);
+
+ if (priv->irq > 0) {
+ reinit_completion(&priv->tpm_ready);
+ enable_irq(priv->irq);
+ }
+}
+
+/**
+ * tpm_cr50_i2c_disable_tpm_irq() - Disable TPM irq.
+ * @chip: A TPM chip.
+ */
+static void tpm_cr50_i2c_disable_tpm_irq(struct tpm_chip *chip)
+{
+ struct tpm_i2c_cr50_priv_data *priv = dev_get_drvdata(&chip->dev);
+
+ if (priv->irq > 0)
+ disable_irq(priv->irq);
+}
+
+/**
+ * tpm_cr50_i2c_transfer_message() - Transfer a message over i2c.
+ * @dev: Device information.
+ * @adapter: I2C adapter.
+ * @msg: Message to transfer.
+ *
+ * Call unlocked i2c transfer routine with the provided parameters and
+ * retry in case of bus errors.
+ *
+ * Return:
+ * - 0: Success.
+ * - -errno: A POSIX error code.
+ */
+static int tpm_cr50_i2c_transfer_message(struct device *dev,
+ struct i2c_adapter *adapter,
+ struct i2c_msg *msg)
+{
+ unsigned int try;
+ int rc;
+
+ for (try = 0; try < TPM_CR50_I2C_MAX_RETRIES; try++) {
+ rc = __i2c_transfer(adapter, msg, 1);
+ if (rc == 1)
+ return 0; /* Successfully transferred the message */
+ if (try)
+ dev_warn(dev, "i2c transfer failed (attempt %d/%d): %d\n",
+ try + 1, TPM_CR50_I2C_MAX_RETRIES, rc);
+ usleep_range(TPM_CR50_I2C_RETRY_DELAY_LO, TPM_CR50_I2C_RETRY_DELAY_HI);
+ }
+
+ /* No i2c message transferred */
+ return -EIO;
+}
+
+/**
+ * tpm_cr50_i2c_read() - Read from TPM register.
+ * @chip: A TPM chip.
+ * @addr: Register address to read from.
+ * @buffer: Read destination, provided by caller.
+ * @len: Number of bytes to read.
+ *
+ * Sends the register address byte to the TPM, then waits until TPM
+ * is ready via interrupt signal or timeout expiration, then 'len'
+ * bytes are read from TPM response into the provided 'buffer'.
+ *
+ * Return:
+ * - 0: Success.
+ * - -errno: A POSIX error code.
+ */
+static int tpm_cr50_i2c_read(struct tpm_chip *chip, u8 addr, u8 *buffer, size_t len)
+{
+ struct i2c_client *client = to_i2c_client(chip->dev.parent);
+ struct i2c_msg msg_reg_addr = {
+ .addr = client->addr,
+ .len = 1,
+ .buf = &addr
+ };
+ struct i2c_msg msg_response = {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = len,
+ .buf = buffer
+ };
+ int rc;
+
+ i2c_lock_bus(client->adapter, I2C_LOCK_SEGMENT);
+
+ /* Prepare for completion interrupt */
+ tpm_cr50_i2c_enable_tpm_irq(chip);
+
+ /* Send the register address byte to the TPM */
+ rc = tpm_cr50_i2c_transfer_message(&chip->dev, client->adapter, &msg_reg_addr);
+ if (rc < 0)
+ goto out;
+
+ /* Wait for TPM to be ready with response data */
+ rc = tpm_cr50_i2c_wait_tpm_ready(chip);
+ if (rc < 0)
+ goto out;
+
+ /* Read response data from the TPM */
+ rc = tpm_cr50_i2c_transfer_message(&chip->dev, client->adapter, &msg_response);
+
+out:
+ tpm_cr50_i2c_disable_tpm_irq(chip);
+ i2c_unlock_bus(client->adapter, I2C_LOCK_SEGMENT);
+
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
+
+/**
+ * tpm_cr50_i2c_write()- Write to TPM register.
+ * @chip: A TPM chip.
+ * @addr: Register address to write to.
+ * @buffer: Data to write.
+ * @len: Number of bytes to write.
+ *
+ * The provided address is prepended to the data in 'buffer', the
+ * cobined address+data is sent to the TPM, then wait for TPM to
+ * indicate it is done writing.
+ *
+ * Return:
+ * - 0: Success.
+ * - -errno: A POSIX error code.
+ */
+static int tpm_cr50_i2c_write(struct tpm_chip *chip, u8 addr, u8 *buffer,
+ size_t len)
+{
+ struct tpm_i2c_cr50_priv_data *priv = dev_get_drvdata(&chip->dev);
+ struct i2c_client *client = to_i2c_client(chip->dev.parent);
+ struct i2c_msg msg = {
+ .addr = client->addr,
+ .len = len + 1,
+ .buf = priv->buf
+ };
+ int rc;
+
+ if (len > TPM_CR50_MAX_BUFSIZE - 1)
+ return -EINVAL;
+
+ /* Prepend the 'register address' to the buffer */
+ priv->buf[0] = addr;
+ memcpy(priv->buf + 1, buffer, len);
+
+ i2c_lock_bus(client->adapter, I2C_LOCK_SEGMENT);
+
+ /* Prepare for completion interrupt */
+ tpm_cr50_i2c_enable_tpm_irq(chip);
+
+ /* Send write request buffer with address */
+ rc = tpm_cr50_i2c_transfer_message(&chip->dev, client->adapter, &msg);
+ if (rc < 0)
+ goto out;
+
+ /* Wait for TPM to be ready, ignore timeout */
+ tpm_cr50_i2c_wait_tpm_ready(chip);
+
+out:
+ tpm_cr50_i2c_disable_tpm_irq(chip);
+ i2c_unlock_bus(client->adapter, I2C_LOCK_SEGMENT);
+
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
+
+/**
+ * tpm_cr50_check_locality() - Verify TPM locality 0 is active.
+ * @chip: A TPM chip.
+ *
+ * Return:
+ * - 0: Success.
+ * - -errno: A POSIX error code.
+ */
+static int tpm_cr50_check_locality(struct tpm_chip *chip)
+{
+ u8 mask = TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY;
+ u8 buf;
+ int rc;
+
+ rc = tpm_cr50_i2c_read(chip, TPM_I2C_ACCESS(0), &buf, sizeof(buf));
+ if (rc < 0)
+ return rc;
+
+ if ((buf & mask) == mask)
+ return 0;
+
+ return -EIO;
+}
+
+/**
+ * tpm_cr50_release_locality() - Release TPM locality.
+ * @chip: A TPM chip.
+ * @force: Flag to force release if set.
+ */
+static void tpm_cr50_release_locality(struct tpm_chip *chip, bool force)
+{
+ u8 mask = TPM_ACCESS_VALID | TPM_ACCESS_REQUEST_PENDING;
+ u8 addr = TPM_I2C_ACCESS(0);
+ u8 buf;
+
+ if (tpm_cr50_i2c_read(chip, addr, &buf, sizeof(buf)) < 0)
+ return;
+
+ if (force || (buf & mask) == mask) {
+ buf = TPM_ACCESS_ACTIVE_LOCALITY;
+ tpm_cr50_i2c_write(chip, addr, &buf, sizeof(buf));
+ }
+}
+
+/**
+ * tpm_cr50_request_locality() - Request TPM locality 0.
+ * @chip: A TPM chip.
+ *
+ * Return:
+ * - 0: Success.
+ * - -errno: A POSIX error code.
+ */
+static int tpm_cr50_request_locality(struct tpm_chip *chip)
+{
+ u8 buf = TPM_ACCESS_REQUEST_USE;
+ unsigned long stop;
+ int rc;
+
+ if (!tpm_cr50_check_locality(chip))
+ return 0;
+
+ rc = tpm_cr50_i2c_write(chip, TPM_I2C_ACCESS(0), &buf, sizeof(buf));
+ if (rc < 0)
+ return rc;
+
+ stop = jiffies + chip->timeout_a;
+ do {
+ if (!tpm_cr50_check_locality(chip))
+ return 0;
+
+ msleep(TPM_CR50_TIMEOUT_SHORT_MS);
+ } while (time_before(jiffies, stop));
+
+ return -ETIMEDOUT;
+}
+
+/**
+ * tpm_cr50_i2c_tis_status() - Read cr50 tis status.
+ * @chip: A TPM chip.
+ *
+ * cr50 requires all 4 bytes of status register to be read.
+ *
+ * Return:
+ * TPM status byte.
+ */
+static u8 tpm_cr50_i2c_tis_status(struct tpm_chip *chip)
+{
+ u8 buf[4];
+
+ if (tpm_cr50_i2c_read(chip, TPM_I2C_STS(0), buf, sizeof(buf)) < 0)
+ return 0;
+
+ return buf[0];
+}
+
+/**
+ * tpm_cr50_i2c_tis_set_ready() - Set status register to ready.
+ * @chip: A TPM chip.
+ *
+ * cr50 requires all 4 bytes of status register to be written.
+ */
+static void tpm_cr50_i2c_tis_set_ready(struct tpm_chip *chip)
+{
+ u8 buf[4] = { TPM_STS_COMMAND_READY };
+
+ tpm_cr50_i2c_write(chip, TPM_I2C_STS(0), buf, sizeof(buf));
+ msleep(TPM_CR50_TIMEOUT_SHORT_MS);
+}
+
+/**
+ * tpm_cr50_i2c_get_burst_and_status() - Get burst count and status.
+ * @chip: A TPM chip.
+ * @mask: Status mask.
+ * @burst: Return value for burst.
+ * @status: Return value for status.
+ *
+ * cr50 uses bytes 3:2 of status register for burst count and
+ * all 4 bytes must be read.
+ *
+ * Return:
+ * - 0: Success.
+ * - -errno: A POSIX error code.
+ */
+static int tpm_cr50_i2c_get_burst_and_status(struct tpm_chip *chip, u8 mask,
+ size_t *burst, u32 *status)
+{
+ unsigned long stop;
+ u8 buf[4];
+
+ *status = 0;
+
+ /* wait for burstcount */
+ stop = jiffies + chip->timeout_b;
+
+ do {
+ if (tpm_cr50_i2c_read(chip, TPM_I2C_STS(0), buf, sizeof(buf)) < 0) {
+ msleep(TPM_CR50_TIMEOUT_SHORT_MS);
+ continue;
+ }
+
+ *status = *buf;
+ *burst = le16_to_cpup((__le16 *)(buf + 1));
+
+ if ((*status & mask) == mask &&
+ *burst > 0 && *burst <= TPM_CR50_MAX_BUFSIZE - 1)
+ return 0;
+
+ msleep(TPM_CR50_TIMEOUT_SHORT_MS);
+ } while (time_before(jiffies, stop));
+
+ dev_err(&chip->dev, "Timeout reading burst and status\n");
+ return -ETIMEDOUT;
+}
+
+/**
+ * tpm_cr50_i2c_tis_recv() - TPM reception callback.
+ * @chip: A TPM chip.
+ * @buf: Reception buffer.
+ * @buf_len: Buffer length to read.
+ *
+ * Return:
+ * - >= 0: Number of read bytes.
+ * - -errno: A POSIX error code.
+ */
+static int tpm_cr50_i2c_tis_recv(struct tpm_chip *chip, u8 *buf, size_t buf_len)
+{
+
+ u8 mask = TPM_STS_VALID | TPM_STS_DATA_AVAIL;
+ size_t burstcnt, cur, len, expected;
+ u8 addr = TPM_I2C_DATA_FIFO(0);
+ u32 status;
+ int rc;
+
+ if (buf_len < TPM_HEADER_SIZE)
+ return -EINVAL;
+
+ rc = tpm_cr50_i2c_get_burst_and_status(chip, mask, &burstcnt, &status);
+ if (rc < 0)
+ goto out_err;
+
+ if (burstcnt > buf_len || burstcnt < TPM_HEADER_SIZE) {
+ dev_err(&chip->dev,
+ "Unexpected burstcnt: %zu (max=%zu, min=%d)\n",
+ burstcnt, buf_len, TPM_HEADER_SIZE);
+ rc = -EIO;
+ goto out_err;
+ }
+
+ /* Read first chunk of burstcnt bytes */
+ rc = tpm_cr50_i2c_read(chip, addr, buf, burstcnt);
+ if (rc < 0) {
+ dev_err(&chip->dev, "Read of first chunk failed\n");
+ goto out_err;
+ }
+
+ /* Determine expected data in the return buffer */
+ expected = be32_to_cpup((__be32 *)(buf + 2));
+ if (expected > buf_len) {
+ dev_err(&chip->dev, "Buffer too small to receive i2c data\n");
+ goto out_err;
+ }
+
+ /* Now read the rest of the data */
+ cur = burstcnt;
+ while (cur < expected) {
+ /* Read updated burst count and check status */
+ rc = tpm_cr50_i2c_get_burst_and_status(chip, mask, &burstcnt, &status);
+ if (rc < 0)
+ goto out_err;
+
+ len = min_t(size_t, burstcnt, expected - cur);
+ rc = tpm_cr50_i2c_read(chip, addr, buf + cur, len);
+ if (rc < 0) {
+ dev_err(&chip->dev, "Read failed\n");
+ goto out_err;
+ }
+
+ cur += len;
+ }
+
+ /* Ensure TPM is done reading data */
+ rc = tpm_cr50_i2c_get_burst_and_status(chip, TPM_STS_VALID, &burstcnt, &status);
+ if (rc < 0)
+ goto out_err;
+ if (status & TPM_STS_DATA_AVAIL) {
+ dev_err(&chip->dev, "Data still available\n");
+ rc = -EIO;
+ goto out_err;
+ }
+
+ tpm_cr50_release_locality(chip, false);
+ return cur;
+
+out_err:
+ /* Abort current transaction if still pending */
+ if (tpm_cr50_i2c_tis_status(chip) & TPM_STS_COMMAND_READY)
+ tpm_cr50_i2c_tis_set_ready(chip);
+
+ tpm_cr50_release_locality(chip, false);
+ return rc;
+}
+
+/**
+ * tpm_cr50_i2c_tis_send() - TPM transmission callback.
+ * @chip: A TPM chip.
+ * @buf: Buffer to send.
+ * @len: Buffer length.
+ *
+ * Return:
+ * - 0: Success.
+ * - -errno: A POSIX error code.
+ */
+static int tpm_cr50_i2c_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
+{
+ size_t burstcnt, limit, sent = 0;
+ u8 tpm_go[4] = { TPM_STS_GO };
+ unsigned long stop;
+ u32 status;
+ int rc;
+
+ rc = tpm_cr50_request_locality(chip);
+ if (rc < 0)
+ return rc;
+
+ /* Wait until TPM is ready for a command */
+ stop = jiffies + chip->timeout_b;
+ while (!(tpm_cr50_i2c_tis_status(chip) & TPM_STS_COMMAND_READY)) {
+ if (time_after(jiffies, stop)) {
+ rc = -ETIMEDOUT;
+ goto out_err;
+ }
+
+ tpm_cr50_i2c_tis_set_ready(chip);
+ }
+
+ while (len > 0) {
+ u8 mask = TPM_STS_VALID;
+
+ /* Wait for data if this is not the first chunk */
+ if (sent > 0)
+ mask |= TPM_STS_DATA_EXPECT;
+
+ /* Read burst count and check status */
+ rc = tpm_cr50_i2c_get_burst_and_status(chip, mask, &burstcnt, &status);
+ if (rc < 0)
+ goto out_err;
+
+ /*
+ * Use burstcnt - 1 to account for the address byte
+ * that is inserted by tpm_cr50_i2c_write()
+ */
+ limit = min_t(size_t, burstcnt - 1, len);
+ rc = tpm_cr50_i2c_write(chip, TPM_I2C_DATA_FIFO(0), &buf[sent], limit);
+ if (rc < 0) {
+ dev_err(&chip->dev, "Write failed\n");
+ goto out_err;
+ }
+
+ sent += limit;
+ len -= limit;
+ }
+
+ /* Ensure TPM is not expecting more data */
+ rc = tpm_cr50_i2c_get_burst_and_status(chip, TPM_STS_VALID, &burstcnt, &status);
+ if (rc < 0)
+ goto out_err;
+ if (status & TPM_STS_DATA_EXPECT) {
+ dev_err(&chip->dev, "Data still expected\n");
+ rc = -EIO;
+ goto out_err;
+ }
+
+ /* Start the TPM command */
+ rc = tpm_cr50_i2c_write(chip, TPM_I2C_STS(0), tpm_go,
+ sizeof(tpm_go));
+ if (rc < 0) {
+ dev_err(&chip->dev, "Start command failed\n");
+ goto out_err;
+ }
+ return 0;
+
+out_err:
+ /* Abort current transaction if still pending */
+ if (tpm_cr50_i2c_tis_status(chip) & TPM_STS_COMMAND_READY)
+ tpm_cr50_i2c_tis_set_ready(chip);
+
+ tpm_cr50_release_locality(chip, false);
+ return rc;
+}
+
+/**
+ * tpm_cr50_i2c_req_canceled() - Callback to notify a request cancel.
+ * @chip: A TPM chip.
+ * @status: Status given by the cancel callback.
+ *
+ * Return:
+ * True if command is ready, False otherwise.
+ */
+static bool tpm_cr50_i2c_req_canceled(struct tpm_chip *chip, u8 status)
+{
+ return status == TPM_STS_COMMAND_READY;
+}
+
+static const struct tpm_class_ops cr50_i2c = {
+ .flags = TPM_OPS_AUTO_STARTUP,
+ .status = &tpm_cr50_i2c_tis_status,
+ .recv = &tpm_cr50_i2c_tis_recv,
+ .send = &tpm_cr50_i2c_tis_send,
+ .cancel = &tpm_cr50_i2c_tis_set_ready,
+ .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ .req_canceled = &tpm_cr50_i2c_req_canceled,
+};
+
+static const struct i2c_device_id cr50_i2c_table[] = {
+ {"cr50_i2c", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, cr50_i2c_table);
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id cr50_i2c_acpi_id[] = {
+ { "GOOG0005", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, cr50_i2c_acpi_id);
+#endif
+
+#ifdef CONFIG_OF
+static const struct of_device_id of_cr50_i2c_match[] = {
+ { .compatible = "google,cr50", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, of_cr50_i2c_match);
+#endif
+
+/**
+ * tpm_cr50_i2c_probe() - Driver probe function.
+ * @client: I2C client information.
+ * @id: I2C device id.
+ *
+ * Return:
+ * - 0: Success.
+ * - -errno: A POSIX error code.
+ */
+static int tpm_cr50_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct tpm_i2c_cr50_priv_data *priv;
+ struct device *dev = &client->dev;
+ struct tpm_chip *chip;
+ u32 vendor;
+ u8 buf[4];
+ int rc;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+ return -ENODEV;
+
+ chip = tpmm_chip_alloc(dev, &cr50_i2c);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ /* cr50 is a TPM 2.0 chip */
+ chip->flags |= TPM_CHIP_FLAG_TPM2;
+ chip->flags |= TPM_CHIP_FLAG_FIRMWARE_POWER_MANAGED;
+
+ /* Default timeouts */
+ chip->timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
+ chip->timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT);
+ chip->timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
+ chip->timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
+
+ dev_set_drvdata(&chip->dev, priv);
+ init_completion(&priv->tpm_ready);
+
+ if (client->irq > 0) {
+ rc = devm_request_irq(dev, client->irq, tpm_cr50_i2c_int_handler,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ dev->driver->name, chip);
+ if (rc < 0) {
+ dev_err(dev, "Failed to probe IRQ %d\n", client->irq);
+ return rc;
+ }
+
+ disable_irq(client->irq);
+ priv->irq = client->irq;
+ } else {
+ dev_warn(dev, "No IRQ, will use %ums delay for TPM ready\n",
+ TPM_CR50_TIMEOUT_NOIRQ_MS);
+ }
+
+ rc = tpm_cr50_request_locality(chip);
+ if (rc < 0) {
+ dev_err(dev, "Could not request locality\n");
+ return rc;
+ }
+
+ /* Read four bytes from DID_VID register */
+ rc = tpm_cr50_i2c_read(chip, TPM_I2C_DID_VID(0), buf, sizeof(buf));
+ if (rc < 0) {
+ dev_err(dev, "Could not read vendor id\n");
+ tpm_cr50_release_locality(chip, true);
+ return rc;
+ }
+
+ vendor = le32_to_cpup((__le32 *)buf);
+ if (vendor != TPM_CR50_I2C_DID_VID) {
+ dev_err(dev, "Vendor ID did not match! ID was %08x\n", vendor);
+ tpm_cr50_release_locality(chip, true);
+ return -ENODEV;
+ }
+
+ dev_info(dev, "cr50 TPM 2.0 (i2c 0x%02x irq %d id 0x%x)\n",
+ client->addr, client->irq, vendor >> 16);
+
+ return tpm_chip_register(chip);
+}
+
+/**
+ * tpm_cr50_i2c_remove() - Driver remove function.
+ * @client: I2C client information.
+ *
+ * Return:
+ * - 0: Success.
+ * - -errno: A POSIX error code.
+ */
+static int tpm_cr50_i2c_remove(struct i2c_client *client)
+{
+ struct tpm_chip *chip = i2c_get_clientdata(client);
+ struct device *dev = &client->dev;
+
+ if (!chip) {
+ dev_err(dev, "Could not get client data at remove\n");
+ return -ENODEV;
+ }
+
+ tpm_chip_unregister(chip);
+ tpm_cr50_release_locality(chip, true);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(cr50_i2c_pm, tpm_pm_suspend, tpm_pm_resume);
+
+static struct i2c_driver cr50_i2c_driver = {
+ .id_table = cr50_i2c_table,
+ .probe = tpm_cr50_i2c_probe,
+ .remove = tpm_cr50_i2c_remove,
+ .driver = {
+ .name = "cr50_i2c",
+ .pm = &cr50_i2c_pm,
+ .acpi_match_table = ACPI_PTR(cr50_i2c_acpi_id),
+ .of_match_table = of_match_ptr(of_cr50_i2c_match),
+ },
+};
+
+module_i2c_driver(cr50_i2c_driver);
+
+MODULE_DESCRIPTION("cr50 TPM I2C Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 85856cff506c..a588d56502d4 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -247,7 +247,8 @@ config CLK_TWL6040
config COMMON_CLK_AXI_CLKGEN
tristate "AXI clkgen driver"
- depends on ARCH_ZYNQ || MICROBLAZE || COMPILE_TEST
+ depends on HAS_IOMEM || COMPILE_TEST
+ depends on OF
help
Support for the Analog Devices axi-clkgen pcore clock generator for Xilinx
FPGAs. It is commonly used in Analog Devices' reference designs.
@@ -368,6 +369,13 @@ config COMMON_CLK_FIXED_MMIO
help
Support for Memory Mapped IO Fixed clocks
+config COMMON_CLK_K210
+ bool "Clock driver for the Canaan Kendryte K210 SoC"
+ depends on OF && RISCV && SOC_CANAAN
+ default SOC_CANAAN
+ help
+ Support for the Canaan Kendryte K210 RISC-V SoC clocks.
+
source "drivers/clk/actions/Kconfig"
source "drivers/clk/analogbits/Kconfig"
source "drivers/clk/baikal-t1/Kconfig"
@@ -379,6 +387,7 @@ source "drivers/clk/ingenic/Kconfig"
source "drivers/clk/keystone/Kconfig"
source "drivers/clk/mediatek/Kconfig"
source "drivers/clk/meson/Kconfig"
+source "drivers/clk/mstar/Kconfig"
source "drivers/clk/mvebu/Kconfig"
source "drivers/clk/qcom/Kconfig"
source "drivers/clk/renesas/Kconfig"
@@ -392,6 +401,7 @@ source "drivers/clk/tegra/Kconfig"
source "drivers/clk/ti/Kconfig"
source "drivers/clk/uniphier/Kconfig"
source "drivers/clk/x86/Kconfig"
+source "drivers/clk/xilinx/Kconfig"
source "drivers/clk/zynqmp/Kconfig"
endif
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index dbdc590e7de3..b22ae4f81e0b 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -27,7 +27,6 @@ obj-$(CONFIG_COMMON_CLK_CDCE706) += clk-cdce706.o
obj-$(CONFIG_COMMON_CLK_CDCE925) += clk-cdce925.o
obj-$(CONFIG_ARCH_CLPS711X) += clk-clps711x.o
obj-$(CONFIG_COMMON_CLK_CS2000_CP) += clk-cs2000-cp.o
-obj-$(CONFIG_ARCH_EFM32) += clk-efm32gg.o
obj-$(CONFIG_ARCH_SPARX5) += clk-sparx5.o
obj-$(CONFIG_COMMON_CLK_FIXED_MMIO) += clk-fixed-mmio.o
obj-$(CONFIG_COMMON_CLK_FSL_FLEXSPI) += clk-fsl-flexspi.o
@@ -37,6 +36,7 @@ obj-$(CONFIG_COMMON_CLK_ASPEED) += clk-aspeed.o
obj-$(CONFIG_MACH_ASPEED_G6) += clk-ast2600.o
obj-$(CONFIG_ARCH_HIGHBANK) += clk-highbank.o
obj-$(CONFIG_CLK_HSDK) += clk-hsdk-pll.o
+obj-$(CONFIG_COMMON_CLK_K210) += clk-k210.o
obj-$(CONFIG_COMMON_CLK_LOCHNAGAR) += clk-lochnagar.o
obj-$(CONFIG_COMMON_CLK_MAX77686) += clk-max77686.o
obj-$(CONFIG_COMMON_CLK_MAX9485) += clk-max9485.o
@@ -63,9 +63,7 @@ obj-$(CONFIG_COMMON_CLK_SI570) += clk-si570.o
obj-$(CONFIG_COMMON_CLK_STM32F) += clk-stm32f4.o
obj-$(CONFIG_COMMON_CLK_STM32H7) += clk-stm32h7.o
obj-$(CONFIG_COMMON_CLK_STM32MP157) += clk-stm32mp1.o
-obj-$(CONFIG_ARCH_TANGO) += clk-tango4.o
obj-$(CONFIG_CLK_TWL6040) += clk-twl6040.o
-obj-$(CONFIG_ARCH_U300) += clk-u300.o
obj-$(CONFIG_ARCH_VT8500) += clk-vt8500.o
obj-$(CONFIG_COMMON_CLK_VC5) += clk-versaclock5.o
obj-$(CONFIG_COMMON_CLK_WM831X) += clk-wm831x.o
@@ -95,6 +93,7 @@ obj-$(CONFIG_MACH_PIC32) += microchip/
ifeq ($(CONFIG_COMMON_CLK), y)
obj-$(CONFIG_ARCH_MMP) += mmp/
endif
+obj-y += mstar/
obj-y += mvebu/
obj-$(CONFIG_ARCH_MXS) += mxs/
obj-$(CONFIG_COMMON_CLK_NXP) += nxp/
@@ -105,7 +104,6 @@ obj-y += renesas/
obj-$(CONFIG_ARCH_ROCKCHIP) += rockchip/
obj-$(CONFIG_COMMON_CLK_SAMSUNG) += samsung/
obj-$(CONFIG_CLK_SIFIVE) += sifive/
-obj-$(CONFIG_ARCH_SIRF) += sirf/
obj-$(CONFIG_ARCH_SOCFPGA) += socfpga/
obj-$(CONFIG_ARCH_AGILEX) += socfpga/
obj-$(CONFIG_ARCH_STRATIX10) += socfpga/
@@ -122,6 +120,6 @@ obj-y += versatile/
ifeq ($(CONFIG_COMMON_CLK), y)
obj-$(CONFIG_X86) += x86/
endif
-obj-$(CONFIG_ARCH_ZX) += zte/
+obj-y += xilinx/
obj-$(CONFIG_ARCH_ZYNQ) += zynq/
obj-$(CONFIG_COMMON_CLK_ZYNQMP) += zynqmp/
diff --git a/drivers/clk/at91/at91rm9200.c b/drivers/clk/at91/at91rm9200.c
index 0fad1009f315..428a6f4b9ebc 100644
--- a/drivers/clk/at91/at91rm9200.c
+++ b/drivers/clk/at91/at91rm9200.c
@@ -215,5 +215,4 @@ err_free:
* deferring properly. Once this is fixed, this can be switched to a platform
* driver.
*/
-CLK_OF_DECLARE_DRIVER(at91rm9200_pmc, "atmel,at91rm9200-pmc",
- at91rm9200_pmc_setup);
+CLK_OF_DECLARE(at91rm9200_pmc, "atmel,at91rm9200-pmc", at91rm9200_pmc_setup);
diff --git a/drivers/clk/at91/at91sam9260.c b/drivers/clk/at91/at91sam9260.c
index ceb5495f723a..b29843bea278 100644
--- a/drivers/clk/at91/at91sam9260.c
+++ b/drivers/clk/at91/at91sam9260.c
@@ -491,26 +491,26 @@ static void __init at91sam9260_pmc_setup(struct device_node *np)
{
at91sam926x_pmc_setup(np, &at91sam9260_data);
}
-CLK_OF_DECLARE_DRIVER(at91sam9260_pmc, "atmel,at91sam9260-pmc",
- at91sam9260_pmc_setup);
+
+CLK_OF_DECLARE(at91sam9260_pmc, "atmel,at91sam9260-pmc", at91sam9260_pmc_setup);
static void __init at91sam9261_pmc_setup(struct device_node *np)
{
at91sam926x_pmc_setup(np, &at91sam9261_data);
}
-CLK_OF_DECLARE_DRIVER(at91sam9261_pmc, "atmel,at91sam9261-pmc",
- at91sam9261_pmc_setup);
+
+CLK_OF_DECLARE(at91sam9261_pmc, "atmel,at91sam9261-pmc", at91sam9261_pmc_setup);
static void __init at91sam9263_pmc_setup(struct device_node *np)
{
at91sam926x_pmc_setup(np, &at91sam9263_data);
}
-CLK_OF_DECLARE_DRIVER(at91sam9263_pmc, "atmel,at91sam9263-pmc",
- at91sam9263_pmc_setup);
+
+CLK_OF_DECLARE(at91sam9263_pmc, "atmel,at91sam9263-pmc", at91sam9263_pmc_setup);
static void __init at91sam9g20_pmc_setup(struct device_node *np)
{
at91sam926x_pmc_setup(np, &at91sam9g20_data);
}
-CLK_OF_DECLARE_DRIVER(at91sam9g20_pmc, "atmel,at91sam9g20-pmc",
- at91sam9g20_pmc_setup);
+
+CLK_OF_DECLARE(at91sam9g20_pmc, "atmel,at91sam9g20-pmc", at91sam9g20_pmc_setup);
diff --git a/drivers/clk/at91/at91sam9g45.c b/drivers/clk/at91/at91sam9g45.c
index 0214333dedd3..15da0dfe3ef2 100644
--- a/drivers/clk/at91/at91sam9g45.c
+++ b/drivers/clk/at91/at91sam9g45.c
@@ -228,5 +228,4 @@ err_free:
* The TCB is used as the clocksource so its clock is needed early. This means
* this can't be a platform driver.
*/
-CLK_OF_DECLARE_DRIVER(at91sam9g45_pmc, "atmel,at91sam9g45-pmc",
- at91sam9g45_pmc_setup);
+CLK_OF_DECLARE(at91sam9g45_pmc, "atmel,at91sam9g45-pmc", at91sam9g45_pmc_setup);
diff --git a/drivers/clk/at91/at91sam9n12.c b/drivers/clk/at91/at91sam9n12.c
index f9db5316a7f1..7fe435f4b46b 100644
--- a/drivers/clk/at91/at91sam9n12.c
+++ b/drivers/clk/at91/at91sam9n12.c
@@ -255,5 +255,4 @@ err_free:
* The TCB is used as the clocksource so its clock is needed early. This means
* this can't be a platform driver.
*/
-CLK_OF_DECLARE_DRIVER(at91sam9n12_pmc, "atmel,at91sam9n12-pmc",
- at91sam9n12_pmc_setup);
+CLK_OF_DECLARE(at91sam9n12_pmc, "atmel,at91sam9n12-pmc", at91sam9n12_pmc_setup);
diff --git a/drivers/clk/at91/at91sam9rl.c b/drivers/clk/at91/at91sam9rl.c
index 66736e03cfef..ecbabf5162bd 100644
--- a/drivers/clk/at91/at91sam9rl.c
+++ b/drivers/clk/at91/at91sam9rl.c
@@ -186,4 +186,5 @@ static void __init at91sam9rl_pmc_setup(struct device_node *np)
err_free:
kfree(at91sam9rl_pmc);
}
-CLK_OF_DECLARE_DRIVER(at91sam9rl_pmc, "atmel,at91sam9rl-pmc", at91sam9rl_pmc_setup);
+
+CLK_OF_DECLARE(at91sam9rl_pmc, "atmel,at91sam9rl-pmc", at91sam9rl_pmc_setup);
diff --git a/drivers/clk/at91/at91sam9x5.c b/drivers/clk/at91/at91sam9x5.c
index 79b9d3667228..5cce48c64ea2 100644
--- a/drivers/clk/at91/at91sam9x5.c
+++ b/drivers/clk/at91/at91sam9x5.c
@@ -302,33 +302,33 @@ static void __init at91sam9g15_pmc_setup(struct device_node *np)
{
at91sam9x5_pmc_setup(np, at91sam9g15_periphck, true);
}
-CLK_OF_DECLARE_DRIVER(at91sam9g15_pmc, "atmel,at91sam9g15-pmc",
- at91sam9g15_pmc_setup);
+
+CLK_OF_DECLARE(at91sam9g15_pmc, "atmel,at91sam9g15-pmc", at91sam9g15_pmc_setup);
static void __init at91sam9g25_pmc_setup(struct device_node *np)
{
at91sam9x5_pmc_setup(np, at91sam9g25_periphck, false);
}
-CLK_OF_DECLARE_DRIVER(at91sam9g25_pmc, "atmel,at91sam9g25-pmc",
- at91sam9g25_pmc_setup);
+
+CLK_OF_DECLARE(at91sam9g25_pmc, "atmel,at91sam9g25-pmc", at91sam9g25_pmc_setup);
static void __init at91sam9g35_pmc_setup(struct device_node *np)
{
at91sam9x5_pmc_setup(np, at91sam9g35_periphck, true);
}
-CLK_OF_DECLARE_DRIVER(at91sam9g35_pmc, "atmel,at91sam9g35-pmc",
- at91sam9g35_pmc_setup);
+
+CLK_OF_DECLARE(at91sam9g35_pmc, "atmel,at91sam9g35-pmc", at91sam9g35_pmc_setup);
static void __init at91sam9x25_pmc_setup(struct device_node *np)
{
at91sam9x5_pmc_setup(np, at91sam9x25_periphck, false);
}
-CLK_OF_DECLARE_DRIVER(at91sam9x25_pmc, "atmel,at91sam9x25-pmc",
- at91sam9x25_pmc_setup);
+
+CLK_OF_DECLARE(at91sam9x25_pmc, "atmel,at91sam9x25-pmc", at91sam9x25_pmc_setup);
static void __init at91sam9x35_pmc_setup(struct device_node *np)
{
at91sam9x5_pmc_setup(np, at91sam9x35_periphck, true);
}
-CLK_OF_DECLARE_DRIVER(at91sam9x35_pmc, "atmel,at91sam9x35-pmc",
- at91sam9x35_pmc_setup);
+
+CLK_OF_DECLARE(at91sam9x35_pmc, "atmel,at91sam9x35-pmc", at91sam9x35_pmc_setup);
diff --git a/drivers/clk/at91/sama5d2.c b/drivers/clk/at91/sama5d2.c
index 9a5cbc7cd55a..3d1f78176c3e 100644
--- a/drivers/clk/at91/sama5d2.c
+++ b/drivers/clk/at91/sama5d2.c
@@ -372,4 +372,5 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
err_free:
kfree(sama5d2_pmc);
}
-CLK_OF_DECLARE_DRIVER(sama5d2_pmc, "atmel,sama5d2-pmc", sama5d2_pmc_setup);
+
+CLK_OF_DECLARE(sama5d2_pmc, "atmel,sama5d2-pmc", sama5d2_pmc_setup);
diff --git a/drivers/clk/at91/sama5d3.c b/drivers/clk/at91/sama5d3.c
index 87009ee8effc..d376257807d2 100644
--- a/drivers/clk/at91/sama5d3.c
+++ b/drivers/clk/at91/sama5d3.c
@@ -255,4 +255,4 @@ err_free:
* The TCB is used as the clocksource so its clock is needed early. This means
* this can't be a platform driver.
*/
-CLK_OF_DECLARE_DRIVER(sama5d3_pmc, "atmel,sama5d3-pmc", sama5d3_pmc_setup);
+CLK_OF_DECLARE(sama5d3_pmc, "atmel,sama5d3-pmc", sama5d3_pmc_setup);
diff --git a/drivers/clk/at91/sama5d4.c b/drivers/clk/at91/sama5d4.c
index 57fff790188b..5cbaac68da44 100644
--- a/drivers/clk/at91/sama5d4.c
+++ b/drivers/clk/at91/sama5d4.c
@@ -286,4 +286,5 @@ static void __init sama5d4_pmc_setup(struct device_node *np)
err_free:
kfree(sama5d4_pmc);
}
-CLK_OF_DECLARE_DRIVER(sama5d4_pmc, "atmel,sama5d4-pmc", sama5d4_pmc_setup);
+
+CLK_OF_DECLARE(sama5d4_pmc, "atmel,sama5d4-pmc", sama5d4_pmc_setup);
diff --git a/drivers/clk/bcm/clk-iproc-pll.c b/drivers/clk/bcm/clk-iproc-pll.c
index 274441e2ddb2..33da30f99c79 100644
--- a/drivers/clk/bcm/clk-iproc-pll.c
+++ b/drivers/clk/bcm/clk-iproc-pll.c
@@ -704,7 +704,7 @@ static const struct clk_ops iproc_clk_ops = {
.set_rate = iproc_clk_set_rate,
};
-/**
+/*
* Some PLLs require the PLL SW override bit to be set before changes can be
* applied to the PLL
*/
diff --git a/drivers/clk/clk-ast2600.c b/drivers/clk/clk-ast2600.c
index 177368cac6dd..a55b37fc2c8b 100644
--- a/drivers/clk/clk-ast2600.c
+++ b/drivers/clk/clk-ast2600.c
@@ -17,7 +17,8 @@
#define ASPEED_G6_NUM_CLKS 71
-#define ASPEED_G6_SILICON_REV 0x004
+#define ASPEED_G6_SILICON_REV 0x014
+#define CHIP_REVISION_ID GENMASK(23, 16)
#define ASPEED_G6_RESET_CTRL 0x040
#define ASPEED_G6_RESET_CTRL2 0x050
@@ -190,18 +191,34 @@ static struct clk_hw *ast2600_calc_pll(const char *name, u32 val)
static struct clk_hw *ast2600_calc_apll(const char *name, u32 val)
{
unsigned int mult, div;
+ u32 chip_id = readl(scu_g6_base + ASPEED_G6_SILICON_REV);
- if (val & BIT(20)) {
- /* Pass through mode */
- mult = div = 1;
+ if (((chip_id & CHIP_REVISION_ID) >> 16) >= 2) {
+ if (val & BIT(24)) {
+ /* Pass through mode */
+ mult = div = 1;
+ } else {
+ /* F = 25Mhz * [(m + 1) / (n + 1)] / (p + 1) */
+ u32 m = val & 0x1fff;
+ u32 n = (val >> 13) & 0x3f;
+ u32 p = (val >> 19) & 0xf;
+
+ mult = (m + 1);
+ div = (n + 1) * (p + 1);
+ }
} else {
- /* F = 25Mhz * (2-od) * [(m + 2) / (n + 1)] */
- u32 m = (val >> 5) & 0x3f;
- u32 od = (val >> 4) & 0x1;
- u32 n = val & 0xf;
+ if (val & BIT(20)) {
+ /* Pass through mode */
+ mult = div = 1;
+ } else {
+ /* F = 25Mhz * (2-od) * [(m + 2) / (n + 1)] */
+ u32 m = (val >> 5) & 0x3f;
+ u32 od = (val >> 4) & 0x1;
+ u32 n = val & 0xf;
- mult = (2 - od) * (m + 2);
- div = n + 1;
+ mult = (2 - od) * (m + 2);
+ div = n + 1;
+ }
}
return clk_hw_register_fixed_factor(NULL, name, "clkin", 0,
mult, div);
diff --git a/drivers/clk/clk-axi-clkgen.c b/drivers/clk/clk-axi-clkgen.c
index ad86e031ba3e..ac6ff736ac8f 100644
--- a/drivers/clk/clk-axi-clkgen.c
+++ b/drivers/clk/clk-axi-clkgen.c
@@ -108,6 +108,13 @@ static uint32_t axi_clkgen_lookup_lock(unsigned int m)
return 0x1f1f00fa;
}
+static const struct axi_clkgen_limits axi_clkgen_zynqmp_default_limits = {
+ .fpfd_min = 10000,
+ .fpfd_max = 450000,
+ .fvco_min = 800000,
+ .fvco_max = 1600000,
+};
+
static const struct axi_clkgen_limits axi_clkgen_zynq_default_limits = {
.fpfd_min = 10000,
.fpfd_max = 300000,
@@ -503,7 +510,6 @@ static int axi_clkgen_probe(struct platform_device *pdev)
struct clk_init_data init;
const char *parent_names[2];
const char *clk_name;
- struct resource *mem;
unsigned int i;
int ret;
@@ -515,8 +521,7 @@ static int axi_clkgen_probe(struct platform_device *pdev)
if (!axi_clkgen)
return -ENOMEM;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- axi_clkgen->base = devm_ioremap_resource(&pdev->dev, mem);
+ axi_clkgen->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(axi_clkgen->base))
return PTR_ERR(axi_clkgen->base);
@@ -561,6 +566,10 @@ static int axi_clkgen_remove(struct platform_device *pdev)
static const struct of_device_id axi_clkgen_ids[] = {
{
+ .compatible = "adi,zynqmp-axi-clkgen-2.00.a",
+ .data = &axi_clkgen_zynqmp_default_limits,
+ },
+ {
.compatible = "adi,axi-clkgen-2.00.a",
.data = &axi_clkgen_zynq_default_limits,
},
diff --git a/drivers/clk/clk-bd718x7.c b/drivers/clk/clk-bd718x7.c
index b52e8d6f660c..17d90e09f1c0 100644
--- a/drivers/clk/clk-bd718x7.c
+++ b/drivers/clk/clk-bd718x7.c
@@ -31,12 +31,12 @@ struct bd718xx_clk {
u8 reg;
u8 mask;
struct platform_device *pdev;
- struct rohm_regmap_dev *mfd;
+ struct regmap *regmap;
};
static int bd71837_clk_set(struct bd718xx_clk *c, unsigned int status)
{
- return regmap_update_bits(c->mfd->regmap, c->reg, c->mask, status);
+ return regmap_update_bits(c->regmap, c->reg, c->mask, status);
}
static void bd71837_clk_disable(struct clk_hw *hw)
@@ -62,7 +62,7 @@ static int bd71837_clk_is_enabled(struct clk_hw *hw)
int rval;
struct bd718xx_clk *c = container_of(hw, struct bd718xx_clk, hw);
- rval = regmap_read(c->mfd->regmap, c->reg, &enabled);
+ rval = regmap_read(c->regmap, c->reg, &enabled);
if (rval)
return rval;
@@ -82,7 +82,6 @@ static int bd71837_clk_probe(struct platform_device *pdev)
int rval = -ENOMEM;
const char *parent_clk;
struct device *parent = pdev->dev.parent;
- struct rohm_regmap_dev *mfd = dev_get_drvdata(parent);
struct clk_init_data init = {
.name = "bd718xx-32k-out",
.ops = &bd71837_clk_ops,
@@ -93,6 +92,10 @@ static int bd71837_clk_probe(struct platform_device *pdev)
if (!c)
return -ENOMEM;
+ c->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!c->regmap)
+ return -ENODEV;
+
init.num_parents = 1;
parent_clk = of_clk_get_parent_name(parent->of_node, 0);
@@ -119,7 +122,6 @@ static int bd71837_clk_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "Unknown clk chip\n");
return -EINVAL;
}
- c->mfd = mfd;
c->pdev = pdev;
c->hw.init = &init;
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index c499799693cc..344997203f0e 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -494,8 +494,13 @@ struct clk_hw *__clk_hw_register_divider(struct device *dev,
else
init.ops = &clk_divider_ops;
init.flags = flags;
- init.parent_names = (parent_name ? &parent_name: NULL);
- init.num_parents = (parent_name ? 1 : 0);
+ init.parent_names = parent_name ? &parent_name : NULL;
+ init.parent_hws = parent_hw ? &parent_hw : NULL;
+ init.parent_data = parent_data;
+ if (parent_name || parent_hw || parent_data)
+ init.num_parents = 1;
+ else
+ init.num_parents = 0;
/* struct clk_divider assignments */
div->reg = reg;
diff --git a/drivers/clk/clk-efm32gg.c b/drivers/clk/clk-efm32gg.c
deleted file mode 100644
index 85beaacb4088..000000000000
--- a/drivers/clk/clk-efm32gg.c
+++ /dev/null
@@ -1,84 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2013 Pengutronix
- * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
- */
-#include <linux/io.h>
-#include <linux/clk-provider.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/slab.h>
-
-#include <dt-bindings/clock/efm32-cmu.h>
-
-#define CMU_HFPERCLKEN0 0x44
-#define CMU_MAX_CLKS 37
-
-static struct clk_hw_onecell_data *clk_data;
-
-static void __init efm32gg_cmu_init(struct device_node *np)
-{
- int i;
- void __iomem *base;
- struct clk_hw **hws;
-
- clk_data = kzalloc(struct_size(clk_data, hws, CMU_MAX_CLKS),
- GFP_KERNEL);
-
- if (!clk_data)
- return;
-
- hws = clk_data->hws;
-
- for (i = 0; i < CMU_MAX_CLKS; ++i)
- hws[i] = ERR_PTR(-ENOENT);
-
- base = of_iomap(np, 0);
- if (!base) {
- pr_warn("Failed to map address range for efm32gg,cmu node\n");
- return;
- }
-
- hws[clk_HFXO] = clk_hw_register_fixed_rate(NULL, "HFXO", NULL, 0,
- 48000000);
-
- hws[clk_HFPERCLKUSART0] = clk_hw_register_gate(NULL, "HFPERCLK.USART0",
- "HFXO", 0, base + CMU_HFPERCLKEN0, 0, 0, NULL);
- hws[clk_HFPERCLKUSART1] = clk_hw_register_gate(NULL, "HFPERCLK.USART1",
- "HFXO", 0, base + CMU_HFPERCLKEN0, 1, 0, NULL);
- hws[clk_HFPERCLKUSART2] = clk_hw_register_gate(NULL, "HFPERCLK.USART2",
- "HFXO", 0, base + CMU_HFPERCLKEN0, 2, 0, NULL);
- hws[clk_HFPERCLKUART0] = clk_hw_register_gate(NULL, "HFPERCLK.UART0",
- "HFXO", 0, base + CMU_HFPERCLKEN0, 3, 0, NULL);
- hws[clk_HFPERCLKUART1] = clk_hw_register_gate(NULL, "HFPERCLK.UART1",
- "HFXO", 0, base + CMU_HFPERCLKEN0, 4, 0, NULL);
- hws[clk_HFPERCLKTIMER0] = clk_hw_register_gate(NULL, "HFPERCLK.TIMER0",
- "HFXO", 0, base + CMU_HFPERCLKEN0, 5, 0, NULL);
- hws[clk_HFPERCLKTIMER1] = clk_hw_register_gate(NULL, "HFPERCLK.TIMER1",
- "HFXO", 0, base + CMU_HFPERCLKEN0, 6, 0, NULL);
- hws[clk_HFPERCLKTIMER2] = clk_hw_register_gate(NULL, "HFPERCLK.TIMER2",
- "HFXO", 0, base + CMU_HFPERCLKEN0, 7, 0, NULL);
- hws[clk_HFPERCLKTIMER3] = clk_hw_register_gate(NULL, "HFPERCLK.TIMER3",
- "HFXO", 0, base + CMU_HFPERCLKEN0, 8, 0, NULL);
- hws[clk_HFPERCLKACMP0] = clk_hw_register_gate(NULL, "HFPERCLK.ACMP0",
- "HFXO", 0, base + CMU_HFPERCLKEN0, 9, 0, NULL);
- hws[clk_HFPERCLKACMP1] = clk_hw_register_gate(NULL, "HFPERCLK.ACMP1",
- "HFXO", 0, base + CMU_HFPERCLKEN0, 10, 0, NULL);
- hws[clk_HFPERCLKI2C0] = clk_hw_register_gate(NULL, "HFPERCLK.I2C0",
- "HFXO", 0, base + CMU_HFPERCLKEN0, 11, 0, NULL);
- hws[clk_HFPERCLKI2C1] = clk_hw_register_gate(NULL, "HFPERCLK.I2C1",
- "HFXO", 0, base + CMU_HFPERCLKEN0, 12, 0, NULL);
- hws[clk_HFPERCLKGPIO] = clk_hw_register_gate(NULL, "HFPERCLK.GPIO",
- "HFXO", 0, base + CMU_HFPERCLKEN0, 13, 0, NULL);
- hws[clk_HFPERCLKVCMP] = clk_hw_register_gate(NULL, "HFPERCLK.VCMP",
- "HFXO", 0, base + CMU_HFPERCLKEN0, 14, 0, NULL);
- hws[clk_HFPERCLKPRS] = clk_hw_register_gate(NULL, "HFPERCLK.PRS",
- "HFXO", 0, base + CMU_HFPERCLKEN0, 15, 0, NULL);
- hws[clk_HFPERCLKADC0] = clk_hw_register_gate(NULL, "HFPERCLK.ADC0",
- "HFXO", 0, base + CMU_HFPERCLKEN0, 16, 0, NULL);
- hws[clk_HFPERCLKDAC0] = clk_hw_register_gate(NULL, "HFPERCLK.DAC0",
- "HFXO", 0, base + CMU_HFPERCLKEN0, 17, 0, NULL);
-
- of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
-}
-CLK_OF_DECLARE(efm32ggcmu, "efm32gg,cmu", efm32gg_cmu_init);
diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c
index 910e6e74ae90..4f7bf3929d6d 100644
--- a/drivers/clk/clk-fixed-factor.c
+++ b/drivers/clk/clk-fixed-factor.c
@@ -64,10 +64,16 @@ const struct clk_ops clk_fixed_factor_ops = {
};
EXPORT_SYMBOL_GPL(clk_fixed_factor_ops);
+static void devm_clk_hw_register_fixed_factor_release(struct device *dev, void *res)
+{
+ clk_hw_unregister_fixed_factor(&((struct clk_fixed_factor *)res)->hw);
+}
+
static struct clk_hw *
__clk_hw_register_fixed_factor(struct device *dev, struct device_node *np,
const char *name, const char *parent_name, int index,
- unsigned long flags, unsigned int mult, unsigned int div)
+ unsigned long flags, unsigned int mult, unsigned int div,
+ bool devm)
{
struct clk_fixed_factor *fix;
struct clk_init_data init = { };
@@ -75,7 +81,15 @@ __clk_hw_register_fixed_factor(struct device *dev, struct device_node *np,
struct clk_hw *hw;
int ret;
- fix = kmalloc(sizeof(*fix), GFP_KERNEL);
+ /* You can't use devm without a dev */
+ if (devm && !dev)
+ return ERR_PTR(-EINVAL);
+
+ if (devm)
+ fix = devres_alloc(devm_clk_hw_register_fixed_factor_release,
+ sizeof(*fix), GFP_KERNEL);
+ else
+ fix = kmalloc(sizeof(*fix), GFP_KERNEL);
if (!fix)
return ERR_PTR(-ENOMEM);
@@ -99,9 +113,13 @@ __clk_hw_register_fixed_factor(struct device *dev, struct device_node *np,
else
ret = of_clk_hw_register(np, hw);
if (ret) {
- kfree(fix);
+ if (devm)
+ devres_free(fix);
+ else
+ kfree(fix);
hw = ERR_PTR(ret);
- }
+ } else if (devm)
+ devres_add(dev, fix);
return hw;
}
@@ -111,7 +129,7 @@ struct clk_hw *clk_hw_register_fixed_factor(struct device *dev,
unsigned int mult, unsigned int div)
{
return __clk_hw_register_fixed_factor(dev, NULL, name, parent_name, -1,
- flags, mult, div);
+ flags, mult, div, false);
}
EXPORT_SYMBOL_GPL(clk_hw_register_fixed_factor);
@@ -153,6 +171,15 @@ void clk_hw_unregister_fixed_factor(struct clk_hw *hw)
}
EXPORT_SYMBOL_GPL(clk_hw_unregister_fixed_factor);
+struct clk_hw *devm_clk_hw_register_fixed_factor(struct device *dev,
+ const char *name, const char *parent_name, unsigned long flags,
+ unsigned int mult, unsigned int div)
+{
+ return __clk_hw_register_fixed_factor(dev, NULL, name, parent_name, -1,
+ flags, mult, div, true);
+}
+EXPORT_SYMBOL_GPL(devm_clk_hw_register_fixed_factor);
+
#ifdef CONFIG_OF
static const struct of_device_id set_rate_parent_matches[] = {
{ .compatible = "allwinner,sun4i-a10-pll3-2x-clk" },
@@ -185,7 +212,7 @@ static struct clk_hw *_of_fixed_factor_clk_setup(struct device_node *node)
flags |= CLK_SET_RATE_PARENT;
hw = __clk_hw_register_fixed_factor(NULL, node, clk_name, NULL, 0,
- flags, mult, div);
+ flags, mult, div, false);
if (IS_ERR(hw)) {
/*
* Clear OF_POPULATED flag so that clock registration can be
diff --git a/drivers/clk/clk-fixed-mmio.c b/drivers/clk/clk-fixed-mmio.c
index 51f26619b6a2..5225d17d6b3f 100644
--- a/drivers/clk/clk-fixed-mmio.c
+++ b/drivers/clk/clk-fixed-mmio.c
@@ -55,7 +55,7 @@ static void __init of_fixed_mmio_clk_setup(struct device_node *node)
}
CLK_OF_DECLARE(fixed_mmio_clk, "fixed-mmio-clock", of_fixed_mmio_clk_setup);
-/**
+/*
* This is not executed when of_fixed_mmio_clk_setup succeeded.
*/
static int of_fixed_mmio_clk_probe(struct platform_device *pdev)
diff --git a/drivers/clk/clk-k210.c b/drivers/clk/clk-k210.c
new file mode 100644
index 000000000000..6c84abf5b2e3
--- /dev/null
+++ b/drivers/clk/clk-k210.c
@@ -0,0 +1,1007 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2019-20 Sean Anderson <seanga2@gmail.com>
+ * Copyright (c) 2019 Western Digital Corporation or its affiliates.
+ */
+#define pr_fmt(fmt) "k210-clk: " fmt
+
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_clk.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/clk-provider.h>
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <soc/canaan/k210-sysctl.h>
+
+#include <dt-bindings/clock/k210-clk.h>
+
+struct k210_sysclk;
+
+struct k210_clk {
+ int id;
+ struct k210_sysclk *ksc;
+ struct clk_hw hw;
+};
+
+struct k210_clk_cfg {
+ const char *name;
+ u8 gate_reg;
+ u8 gate_bit;
+ u8 div_reg;
+ u8 div_shift;
+ u8 div_width;
+ u8 div_type;
+ u8 mux_reg;
+ u8 mux_bit;
+};
+
+enum k210_clk_div_type {
+ K210_DIV_NONE,
+ K210_DIV_ONE_BASED,
+ K210_DIV_DOUBLE_ONE_BASED,
+ K210_DIV_POWER_OF_TWO,
+};
+
+#define K210_GATE(_reg, _bit) \
+ .gate_reg = (_reg), \
+ .gate_bit = (_bit)
+
+#define K210_DIV(_reg, _shift, _width, _type) \
+ .div_reg = (_reg), \
+ .div_shift = (_shift), \
+ .div_width = (_width), \
+ .div_type = (_type)
+
+#define K210_MUX(_reg, _bit) \
+ .mux_reg = (_reg), \
+ .mux_bit = (_bit)
+
+static struct k210_clk_cfg k210_clk_cfgs[K210_NUM_CLKS] = {
+ /* Gated clocks, no mux, no divider */
+ [K210_CLK_CPU] = {
+ .name = "cpu",
+ K210_GATE(K210_SYSCTL_EN_CENT, 0)
+ },
+ [K210_CLK_DMA] = {
+ .name = "dma",
+ K210_GATE(K210_SYSCTL_EN_PERI, 1)
+ },
+ [K210_CLK_FFT] = {
+ .name = "fft",
+ K210_GATE(K210_SYSCTL_EN_PERI, 4)
+ },
+ [K210_CLK_GPIO] = {
+ .name = "gpio",
+ K210_GATE(K210_SYSCTL_EN_PERI, 5)
+ },
+ [K210_CLK_UART1] = {
+ .name = "uart1",
+ K210_GATE(K210_SYSCTL_EN_PERI, 16)
+ },
+ [K210_CLK_UART2] = {
+ .name = "uart2",
+ K210_GATE(K210_SYSCTL_EN_PERI, 17)
+ },
+ [K210_CLK_UART3] = {
+ .name = "uart3",
+ K210_GATE(K210_SYSCTL_EN_PERI, 18)
+ },
+ [K210_CLK_FPIOA] = {
+ .name = "fpioa",
+ K210_GATE(K210_SYSCTL_EN_PERI, 20)
+ },
+ [K210_CLK_SHA] = {
+ .name = "sha",
+ K210_GATE(K210_SYSCTL_EN_PERI, 26)
+ },
+ [K210_CLK_AES] = {
+ .name = "aes",
+ K210_GATE(K210_SYSCTL_EN_PERI, 19)
+ },
+ [K210_CLK_OTP] = {
+ .name = "otp",
+ K210_GATE(K210_SYSCTL_EN_PERI, 27)
+ },
+ [K210_CLK_RTC] = {
+ .name = "rtc",
+ K210_GATE(K210_SYSCTL_EN_PERI, 29)
+ },
+
+ /* Gated divider clocks */
+ [K210_CLK_SRAM0] = {
+ .name = "sram0",
+ K210_GATE(K210_SYSCTL_EN_CENT, 1),
+ K210_DIV(K210_SYSCTL_THR0, 0, 4, K210_DIV_ONE_BASED)
+ },
+ [K210_CLK_SRAM1] = {
+ .name = "sram1",
+ K210_GATE(K210_SYSCTL_EN_CENT, 2),
+ K210_DIV(K210_SYSCTL_THR0, 4, 4, K210_DIV_ONE_BASED)
+ },
+ [K210_CLK_ROM] = {
+ .name = "rom",
+ K210_GATE(K210_SYSCTL_EN_PERI, 0),
+ K210_DIV(K210_SYSCTL_THR0, 16, 4, K210_DIV_ONE_BASED)
+ },
+ [K210_CLK_DVP] = {
+ .name = "dvp",
+ K210_GATE(K210_SYSCTL_EN_PERI, 3),
+ K210_DIV(K210_SYSCTL_THR0, 12, 4, K210_DIV_ONE_BASED)
+ },
+ [K210_CLK_APB0] = {
+ .name = "apb0",
+ K210_GATE(K210_SYSCTL_EN_CENT, 3),
+ K210_DIV(K210_SYSCTL_SEL0, 3, 3, K210_DIV_ONE_BASED)
+ },
+ [K210_CLK_APB1] = {
+ .name = "apb1",
+ K210_GATE(K210_SYSCTL_EN_CENT, 4),
+ K210_DIV(K210_SYSCTL_SEL0, 6, 3, K210_DIV_ONE_BASED)
+ },
+ [K210_CLK_APB2] = {
+ .name = "apb2",
+ K210_GATE(K210_SYSCTL_EN_CENT, 5),
+ K210_DIV(K210_SYSCTL_SEL0, 9, 3, K210_DIV_ONE_BASED)
+ },
+ [K210_CLK_AI] = {
+ .name = "ai",
+ K210_GATE(K210_SYSCTL_EN_PERI, 2),
+ K210_DIV(K210_SYSCTL_THR0, 8, 4, K210_DIV_ONE_BASED)
+ },
+ [K210_CLK_SPI0] = {
+ .name = "spi0",
+ K210_GATE(K210_SYSCTL_EN_PERI, 6),
+ K210_DIV(K210_SYSCTL_THR1, 0, 8, K210_DIV_DOUBLE_ONE_BASED)
+ },
+ [K210_CLK_SPI1] = {
+ .name = "spi1",
+ K210_GATE(K210_SYSCTL_EN_PERI, 7),
+ K210_DIV(K210_SYSCTL_THR1, 8, 8, K210_DIV_DOUBLE_ONE_BASED)
+ },
+ [K210_CLK_SPI2] = {
+ .name = "spi2",
+ K210_GATE(K210_SYSCTL_EN_PERI, 8),
+ K210_DIV(K210_SYSCTL_THR1, 16, 8, K210_DIV_DOUBLE_ONE_BASED)
+ },
+ [K210_CLK_I2C0] = {
+ .name = "i2c0",
+ K210_GATE(K210_SYSCTL_EN_PERI, 13),
+ K210_DIV(K210_SYSCTL_THR5, 8, 8, K210_DIV_DOUBLE_ONE_BASED)
+ },
+ [K210_CLK_I2C1] = {
+ .name = "i2c1",
+ K210_GATE(K210_SYSCTL_EN_PERI, 14),
+ K210_DIV(K210_SYSCTL_THR5, 16, 8, K210_DIV_DOUBLE_ONE_BASED)
+ },
+ [K210_CLK_I2C2] = {
+ .name = "i2c2",
+ K210_GATE(K210_SYSCTL_EN_PERI, 15),
+ K210_DIV(K210_SYSCTL_THR5, 24, 8, K210_DIV_DOUBLE_ONE_BASED)
+ },
+ [K210_CLK_WDT0] = {
+ .name = "wdt0",
+ K210_GATE(K210_SYSCTL_EN_PERI, 24),
+ K210_DIV(K210_SYSCTL_THR6, 0, 8, K210_DIV_DOUBLE_ONE_BASED)
+ },
+ [K210_CLK_WDT1] = {
+ .name = "wdt1",
+ K210_GATE(K210_SYSCTL_EN_PERI, 25),
+ K210_DIV(K210_SYSCTL_THR6, 8, 8, K210_DIV_DOUBLE_ONE_BASED)
+ },
+ [K210_CLK_I2S0] = {
+ .name = "i2s0",
+ K210_GATE(K210_SYSCTL_EN_PERI, 10),
+ K210_DIV(K210_SYSCTL_THR3, 0, 16, K210_DIV_DOUBLE_ONE_BASED)
+ },
+ [K210_CLK_I2S1] = {
+ .name = "i2s1",
+ K210_GATE(K210_SYSCTL_EN_PERI, 11),
+ K210_DIV(K210_SYSCTL_THR3, 16, 16, K210_DIV_DOUBLE_ONE_BASED)
+ },
+ [K210_CLK_I2S2] = {
+ .name = "i2s2",
+ K210_GATE(K210_SYSCTL_EN_PERI, 12),
+ K210_DIV(K210_SYSCTL_THR4, 0, 16, K210_DIV_DOUBLE_ONE_BASED)
+ },
+
+ /* Divider clocks, no gate, no mux */
+ [K210_CLK_I2S0_M] = {
+ .name = "i2s0_m",
+ K210_DIV(K210_SYSCTL_THR4, 16, 8, K210_DIV_DOUBLE_ONE_BASED)
+ },
+ [K210_CLK_I2S1_M] = {
+ .name = "i2s1_m",
+ K210_DIV(K210_SYSCTL_THR4, 24, 8, K210_DIV_DOUBLE_ONE_BASED)
+ },
+ [K210_CLK_I2S2_M] = {
+ .name = "i2s2_m",
+ K210_DIV(K210_SYSCTL_THR4, 0, 8, K210_DIV_DOUBLE_ONE_BASED)
+ },
+
+ /* Muxed gated divider clocks */
+ [K210_CLK_SPI3] = {
+ .name = "spi3",
+ K210_GATE(K210_SYSCTL_EN_PERI, 9),
+ K210_DIV(K210_SYSCTL_THR1, 24, 8, K210_DIV_DOUBLE_ONE_BASED),
+ K210_MUX(K210_SYSCTL_SEL0, 12)
+ },
+ [K210_CLK_TIMER0] = {
+ .name = "timer0",
+ K210_GATE(K210_SYSCTL_EN_PERI, 21),
+ K210_DIV(K210_SYSCTL_THR2, 0, 8, K210_DIV_DOUBLE_ONE_BASED),
+ K210_MUX(K210_SYSCTL_SEL0, 13)
+ },
+ [K210_CLK_TIMER1] = {
+ .name = "timer1",
+ K210_GATE(K210_SYSCTL_EN_PERI, 22),
+ K210_DIV(K210_SYSCTL_THR2, 8, 8, K210_DIV_DOUBLE_ONE_BASED),
+ K210_MUX(K210_SYSCTL_SEL0, 14)
+ },
+ [K210_CLK_TIMER2] = {
+ .name = "timer2",
+ K210_GATE(K210_SYSCTL_EN_PERI, 23),
+ K210_DIV(K210_SYSCTL_THR2, 16, 8, K210_DIV_DOUBLE_ONE_BASED),
+ K210_MUX(K210_SYSCTL_SEL0, 15)
+ },
+};
+
+/*
+ * PLL control register bits.
+ */
+#define K210_PLL_CLKR GENMASK(3, 0)
+#define K210_PLL_CLKF GENMASK(9, 4)
+#define K210_PLL_CLKOD GENMASK(13, 10)
+#define K210_PLL_BWADJ GENMASK(19, 14)
+#define K210_PLL_RESET (1 << 20)
+#define K210_PLL_PWRD (1 << 21)
+#define K210_PLL_INTFB (1 << 22)
+#define K210_PLL_BYPASS (1 << 23)
+#define K210_PLL_TEST (1 << 24)
+#define K210_PLL_EN (1 << 25)
+#define K210_PLL_SEL GENMASK(27, 26) /* PLL2 only */
+
+/*
+ * PLL lock register bits.
+ */
+#define K210_PLL_LOCK 0
+#define K210_PLL_CLEAR_SLIP 2
+#define K210_PLL_TEST_OUT 3
+
+/*
+ * Clock selector register bits.
+ */
+#define K210_ACLK_SEL BIT(0)
+#define K210_ACLK_DIV GENMASK(2, 1)
+
+/*
+ * PLLs.
+ */
+enum k210_pll_id {
+ K210_PLL0, K210_PLL1, K210_PLL2, K210_PLL_NUM
+};
+
+struct k210_pll {
+ enum k210_pll_id id;
+ struct k210_sysclk *ksc;
+ void __iomem *base;
+ void __iomem *reg;
+ void __iomem *lock;
+ u8 lock_shift;
+ u8 lock_width;
+ struct clk_hw hw;
+};
+#define to_k210_pll(_hw) container_of(_hw, struct k210_pll, hw)
+
+/*
+ * PLLs configuration: by default PLL0 runs at 780 MHz and PLL1 at 299 MHz.
+ * The first 2 SRAM banks depend on ACLK/CPU clock which is by default PLL0
+ * rate divided by 2. Set PLL1 to 390 MHz so that the third SRAM bank has the
+ * same clock as the first 2.
+ */
+struct k210_pll_cfg {
+ u32 reg;
+ u8 lock_shift;
+ u8 lock_width;
+ u32 r;
+ u32 f;
+ u32 od;
+ u32 bwadj;
+};
+
+static struct k210_pll_cfg k210_plls_cfg[] = {
+ { K210_SYSCTL_PLL0, 0, 2, 0, 59, 1, 59 }, /* 780 MHz */
+ { K210_SYSCTL_PLL1, 8, 1, 0, 59, 3, 59 }, /* 390 MHz */
+ { K210_SYSCTL_PLL2, 16, 1, 0, 22, 1, 22 }, /* 299 MHz */
+};
+
+/**
+ * struct k210_sysclk - sysclk driver data
+ * @regs: system controller registers start address
+ * @clk_lock: clock setting spinlock
+ * @plls: SoC PLLs descriptors
+ * @aclk: ACLK clock
+ * @clks: All other clocks
+ */
+struct k210_sysclk {
+ void __iomem *regs;
+ spinlock_t clk_lock;
+ struct k210_pll plls[K210_PLL_NUM];
+ struct clk_hw aclk;
+ struct k210_clk clks[K210_NUM_CLKS];
+};
+
+#define to_k210_sysclk(_hw) container_of(_hw, struct k210_sysclk, aclk)
+
+/*
+ * Set ACLK parent selector: 0 for IN0, 1 for PLL0.
+ */
+static void k210_aclk_set_selector(void __iomem *regs, u8 sel)
+{
+ u32 reg = readl(regs + K210_SYSCTL_SEL0);
+
+ if (sel)
+ reg |= K210_ACLK_SEL;
+ else
+ reg &= K210_ACLK_SEL;
+ writel(reg, regs + K210_SYSCTL_SEL0);
+}
+
+static void k210_init_pll(void __iomem *regs, enum k210_pll_id pllid,
+ struct k210_pll *pll)
+{
+ pll->id = pllid;
+ pll->reg = regs + k210_plls_cfg[pllid].reg;
+ pll->lock = regs + K210_SYSCTL_PLL_LOCK;
+ pll->lock_shift = k210_plls_cfg[pllid].lock_shift;
+ pll->lock_width = k210_plls_cfg[pllid].lock_width;
+}
+
+static void k210_pll_wait_for_lock(struct k210_pll *pll)
+{
+ u32 reg, mask = GENMASK(pll->lock_shift + pll->lock_width - 1,
+ pll->lock_shift);
+
+ while (true) {
+ reg = readl(pll->lock);
+ if ((reg & mask) == mask)
+ break;
+
+ reg |= BIT(pll->lock_shift + K210_PLL_CLEAR_SLIP);
+ writel(reg, pll->lock);
+ }
+}
+
+static bool k210_pll_hw_is_enabled(struct k210_pll *pll)
+{
+ u32 reg = readl(pll->reg);
+ u32 mask = K210_PLL_PWRD | K210_PLL_EN;
+
+ if (reg & K210_PLL_RESET)
+ return false;
+
+ return (reg & mask) == mask;
+}
+
+static void k210_pll_enable_hw(void __iomem *regs, struct k210_pll *pll)
+{
+ struct k210_pll_cfg *pll_cfg = &k210_plls_cfg[pll->id];
+ u32 reg;
+
+ if (k210_pll_hw_is_enabled(pll))
+ return;
+
+ /*
+ * For PLL0, we need to re-parent ACLK to IN0 to keep the CPU cores and
+ * SRAM running.
+ */
+ if (pll->id == K210_PLL0)
+ k210_aclk_set_selector(regs, 0);
+
+ /* Set PLL factors */
+ reg = readl(pll->reg);
+ reg &= ~GENMASK(19, 0);
+ reg |= FIELD_PREP(K210_PLL_CLKR, pll_cfg->r);
+ reg |= FIELD_PREP(K210_PLL_CLKF, pll_cfg->f);
+ reg |= FIELD_PREP(K210_PLL_CLKOD, pll_cfg->od);
+ reg |= FIELD_PREP(K210_PLL_BWADJ, pll_cfg->bwadj);
+ reg |= K210_PLL_PWRD;
+ writel(reg, pll->reg);
+
+ /*
+ * Reset the PLL: ensure reset is low before asserting it.
+ * The magic NOPs come from the Kendryte reference SDK.
+ */
+ reg &= ~K210_PLL_RESET;
+ writel(reg, pll->reg);
+ reg |= K210_PLL_RESET;
+ writel(reg, pll->reg);
+ nop();
+ nop();
+ reg &= ~K210_PLL_RESET;
+ writel(reg, pll->reg);
+
+ k210_pll_wait_for_lock(pll);
+
+ reg &= ~K210_PLL_BYPASS;
+ reg |= K210_PLL_EN;
+ writel(reg, pll->reg);
+
+ if (pll->id == K210_PLL0)
+ k210_aclk_set_selector(regs, 1);
+}
+
+static int k210_pll_enable(struct clk_hw *hw)
+{
+ struct k210_pll *pll = to_k210_pll(hw);
+ struct k210_sysclk *ksc = pll->ksc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ksc->clk_lock, flags);
+
+ k210_pll_enable_hw(ksc->regs, pll);
+
+ spin_unlock_irqrestore(&ksc->clk_lock, flags);
+
+ return 0;
+}
+
+static void k210_pll_disable(struct clk_hw *hw)
+{
+ struct k210_pll *pll = to_k210_pll(hw);
+ struct k210_sysclk *ksc = pll->ksc;
+ unsigned long flags;
+ u32 reg;
+
+ /*
+ * Bypassing before powering off is important so child clocks do not
+ * stop working. This is especially important for pll0, the indirect
+ * parent of the cpu clock.
+ */
+ spin_lock_irqsave(&ksc->clk_lock, flags);
+ reg = readl(pll->reg);
+ reg |= K210_PLL_BYPASS;
+ writel(reg, pll->reg);
+
+ reg &= ~K210_PLL_PWRD;
+ reg &= ~K210_PLL_EN;
+ writel(reg, pll->reg);
+ spin_unlock_irqrestore(&ksc->clk_lock, flags);
+}
+
+static int k210_pll_is_enabled(struct clk_hw *hw)
+{
+ return k210_pll_hw_is_enabled(to_k210_pll(hw));
+}
+
+static unsigned long k210_pll_get_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct k210_pll *pll = to_k210_pll(hw);
+ u32 reg = readl(pll->reg);
+ u32 r, f, od;
+
+ if (reg & K210_PLL_BYPASS)
+ return parent_rate;
+
+ if (!(reg & K210_PLL_PWRD))
+ return 0;
+
+ r = FIELD_GET(K210_PLL_CLKR, reg) + 1;
+ f = FIELD_GET(K210_PLL_CLKF, reg) + 1;
+ od = FIELD_GET(K210_PLL_CLKOD, reg) + 1;
+
+ return (u64)parent_rate * f / (r * od);
+}
+
+static const struct clk_ops k210_pll_ops = {
+ .enable = k210_pll_enable,
+ .disable = k210_pll_disable,
+ .is_enabled = k210_pll_is_enabled,
+ .recalc_rate = k210_pll_get_rate,
+};
+
+static int k210_pll2_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct k210_pll *pll = to_k210_pll(hw);
+ struct k210_sysclk *ksc = pll->ksc;
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&ksc->clk_lock, flags);
+
+ reg = readl(pll->reg);
+ reg &= ~K210_PLL_SEL;
+ reg |= FIELD_PREP(K210_PLL_SEL, index);
+ writel(reg, pll->reg);
+
+ spin_unlock_irqrestore(&ksc->clk_lock, flags);
+
+ return 0;
+}
+
+static u8 k210_pll2_get_parent(struct clk_hw *hw)
+{
+ struct k210_pll *pll = to_k210_pll(hw);
+ u32 reg = readl(pll->reg);
+
+ return FIELD_GET(K210_PLL_SEL, reg);
+}
+
+static const struct clk_ops k210_pll2_ops = {
+ .enable = k210_pll_enable,
+ .disable = k210_pll_disable,
+ .is_enabled = k210_pll_is_enabled,
+ .recalc_rate = k210_pll_get_rate,
+ .set_parent = k210_pll2_set_parent,
+ .get_parent = k210_pll2_get_parent,
+};
+
+static int __init k210_register_pll(struct device_node *np,
+ struct k210_sysclk *ksc,
+ enum k210_pll_id pllid, const char *name,
+ int num_parents, const struct clk_ops *ops)
+{
+ struct k210_pll *pll = &ksc->plls[pllid];
+ struct clk_init_data init = {};
+ const struct clk_parent_data parent_data[] = {
+ { /* .index = 0 for in0 */ },
+ { .hw = &ksc->plls[K210_PLL0].hw },
+ { .hw = &ksc->plls[K210_PLL1].hw },
+ };
+
+ init.name = name;
+ init.parent_data = parent_data;
+ init.num_parents = num_parents;
+ init.ops = ops;
+
+ pll->hw.init = &init;
+ pll->ksc = ksc;
+
+ return of_clk_hw_register(np, &pll->hw);
+}
+
+static int __init k210_register_plls(struct device_node *np,
+ struct k210_sysclk *ksc)
+{
+ int i, ret;
+
+ for (i = 0; i < K210_PLL_NUM; i++)
+ k210_init_pll(ksc->regs, i, &ksc->plls[i]);
+
+ /* PLL0 and PLL1 only have IN0 as parent */
+ ret = k210_register_pll(np, ksc, K210_PLL0, "pll0", 1, &k210_pll_ops);
+ if (ret) {
+ pr_err("%pOFP: register PLL0 failed\n", np);
+ return ret;
+ }
+ ret = k210_register_pll(np, ksc, K210_PLL1, "pll1", 1, &k210_pll_ops);
+ if (ret) {
+ pr_err("%pOFP: register PLL1 failed\n", np);
+ return ret;
+ }
+
+ /* PLL2 has IN0, PLL0 and PLL1 as parents */
+ ret = k210_register_pll(np, ksc, K210_PLL2, "pll2", 3, &k210_pll2_ops);
+ if (ret) {
+ pr_err("%pOFP: register PLL2 failed\n", np);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int k210_aclk_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct k210_sysclk *ksc = to_k210_sysclk(hw);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ksc->clk_lock, flags);
+
+ k210_aclk_set_selector(ksc->regs, index);
+
+ spin_unlock_irqrestore(&ksc->clk_lock, flags);
+
+ return 0;
+}
+
+static u8 k210_aclk_get_parent(struct clk_hw *hw)
+{
+ struct k210_sysclk *ksc = to_k210_sysclk(hw);
+ u32 sel;
+
+ sel = readl(ksc->regs + K210_SYSCTL_SEL0) & K210_ACLK_SEL;
+
+ return sel ? 1 : 0;
+}
+
+static unsigned long k210_aclk_get_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct k210_sysclk *ksc = to_k210_sysclk(hw);
+ u32 reg = readl(ksc->regs + K210_SYSCTL_SEL0);
+ unsigned int shift;
+
+ if (!(reg & 0x1))
+ return parent_rate;
+
+ shift = FIELD_GET(K210_ACLK_DIV, reg);
+
+ return parent_rate / (2UL << shift);
+}
+
+static const struct clk_ops k210_aclk_ops = {
+ .set_parent = k210_aclk_set_parent,
+ .get_parent = k210_aclk_get_parent,
+ .recalc_rate = k210_aclk_get_rate,
+};
+
+/*
+ * ACLK has IN0 and PLL0 as parents.
+ */
+static int __init k210_register_aclk(struct device_node *np,
+ struct k210_sysclk *ksc)
+{
+ struct clk_init_data init = {};
+ const struct clk_parent_data parent_data[] = {
+ { /* .index = 0 for in0 */ },
+ { .hw = &ksc->plls[K210_PLL0].hw },
+ };
+ int ret;
+
+ init.name = "aclk";
+ init.parent_data = parent_data;
+ init.num_parents = 2;
+ init.ops = &k210_aclk_ops;
+ ksc->aclk.init = &init;
+
+ ret = of_clk_hw_register(np, &ksc->aclk);
+ if (ret) {
+ pr_err("%pOFP: register aclk failed\n", np);
+ return ret;
+ }
+
+ return 0;
+}
+
+#define to_k210_clk(_hw) container_of(_hw, struct k210_clk, hw)
+
+static int k210_clk_enable(struct clk_hw *hw)
+{
+ struct k210_clk *kclk = to_k210_clk(hw);
+ struct k210_sysclk *ksc = kclk->ksc;
+ struct k210_clk_cfg *cfg = &k210_clk_cfgs[kclk->id];
+ unsigned long flags;
+ u32 reg;
+
+ if (!cfg->gate_reg)
+ return 0;
+
+ spin_lock_irqsave(&ksc->clk_lock, flags);
+ reg = readl(ksc->regs + cfg->gate_reg);
+ reg |= BIT(cfg->gate_bit);
+ writel(reg, ksc->regs + cfg->gate_reg);
+ spin_unlock_irqrestore(&ksc->clk_lock, flags);
+
+ return 0;
+}
+
+static void k210_clk_disable(struct clk_hw *hw)
+{
+ struct k210_clk *kclk = to_k210_clk(hw);
+ struct k210_sysclk *ksc = kclk->ksc;
+ struct k210_clk_cfg *cfg = &k210_clk_cfgs[kclk->id];
+ unsigned long flags;
+ u32 reg;
+
+ if (!cfg->gate_reg)
+ return;
+
+ spin_lock_irqsave(&ksc->clk_lock, flags);
+ reg = readl(ksc->regs + cfg->gate_reg);
+ reg &= ~BIT(cfg->gate_bit);
+ writel(reg, ksc->regs + cfg->gate_reg);
+ spin_unlock_irqrestore(&ksc->clk_lock, flags);
+}
+
+static int k210_clk_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct k210_clk *kclk = to_k210_clk(hw);
+ struct k210_sysclk *ksc = kclk->ksc;
+ struct k210_clk_cfg *cfg = &k210_clk_cfgs[kclk->id];
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&ksc->clk_lock, flags);
+ reg = readl(ksc->regs + cfg->mux_reg);
+ if (index)
+ reg |= BIT(cfg->mux_bit);
+ else
+ reg &= ~BIT(cfg->mux_bit);
+ spin_unlock_irqrestore(&ksc->clk_lock, flags);
+
+ return 0;
+}
+
+static u8 k210_clk_get_parent(struct clk_hw *hw)
+{
+ struct k210_clk *kclk = to_k210_clk(hw);
+ struct k210_sysclk *ksc = kclk->ksc;
+ struct k210_clk_cfg *cfg = &k210_clk_cfgs[kclk->id];
+ unsigned long flags;
+ u32 reg, idx;
+
+ spin_lock_irqsave(&ksc->clk_lock, flags);
+ reg = readl(ksc->regs + cfg->mux_reg);
+ idx = (reg & BIT(cfg->mux_bit)) ? 1 : 0;
+ spin_unlock_irqrestore(&ksc->clk_lock, flags);
+
+ return idx;
+}
+
+static unsigned long k210_clk_get_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct k210_clk *kclk = to_k210_clk(hw);
+ struct k210_sysclk *ksc = kclk->ksc;
+ struct k210_clk_cfg *cfg = &k210_clk_cfgs[kclk->id];
+ u32 reg, div_val;
+
+ if (!cfg->div_reg)
+ return parent_rate;
+
+ reg = readl(ksc->regs + cfg->div_reg);
+ div_val = (reg >> cfg->div_shift) & GENMASK(cfg->div_width - 1, 0);
+
+ switch (cfg->div_type) {
+ case K210_DIV_ONE_BASED:
+ return parent_rate / (div_val + 1);
+ case K210_DIV_DOUBLE_ONE_BASED:
+ return parent_rate / ((div_val + 1) * 2);
+ case K210_DIV_POWER_OF_TWO:
+ return parent_rate / (2UL << div_val);
+ case K210_DIV_NONE:
+ default:
+ return 0;
+ }
+}
+
+static const struct clk_ops k210_clk_mux_ops = {
+ .enable = k210_clk_enable,
+ .disable = k210_clk_disable,
+ .set_parent = k210_clk_set_parent,
+ .get_parent = k210_clk_get_parent,
+ .recalc_rate = k210_clk_get_rate,
+};
+
+static const struct clk_ops k210_clk_ops = {
+ .enable = k210_clk_enable,
+ .disable = k210_clk_disable,
+ .recalc_rate = k210_clk_get_rate,
+};
+
+static void __init k210_register_clk(struct device_node *np,
+ struct k210_sysclk *ksc, int id,
+ const struct clk_parent_data *parent_data,
+ int num_parents, unsigned long flags)
+{
+ struct k210_clk *kclk = &ksc->clks[id];
+ struct clk_init_data init = {};
+ int ret;
+
+ init.name = k210_clk_cfgs[id].name;
+ init.flags = flags;
+ init.parent_data = parent_data;
+ init.num_parents = num_parents;
+ if (num_parents > 1)
+ init.ops = &k210_clk_mux_ops;
+ else
+ init.ops = &k210_clk_ops;
+
+ kclk->id = id;
+ kclk->ksc = ksc;
+ kclk->hw.init = &init;
+
+ ret = of_clk_hw_register(np, &kclk->hw);
+ if (ret) {
+ pr_err("%pOFP: register clock %s failed\n",
+ np, k210_clk_cfgs[id].name);
+ kclk->id = -1;
+ }
+}
+
+/*
+ * All muxed clocks have IN0 and PLL0 as parents.
+ */
+static inline void __init k210_register_mux_clk(struct device_node *np,
+ struct k210_sysclk *ksc, int id)
+{
+ const struct clk_parent_data parent_data[2] = {
+ { /* .index = 0 for in0 */ },
+ { .hw = &ksc->plls[K210_PLL0].hw }
+ };
+
+ k210_register_clk(np, ksc, id, parent_data, 2, 0);
+}
+
+static inline void __init k210_register_in0_child(struct device_node *np,
+ struct k210_sysclk *ksc, int id)
+{
+ const struct clk_parent_data parent_data = {
+ /* .index = 0 for in0 */
+ };
+
+ k210_register_clk(np, ksc, id, &parent_data, 1, 0);
+}
+
+static inline void __init k210_register_pll_child(struct device_node *np,
+ struct k210_sysclk *ksc, int id,
+ enum k210_pll_id pllid,
+ unsigned long flags)
+{
+ const struct clk_parent_data parent_data = {
+ .hw = &ksc->plls[pllid].hw,
+ };
+
+ k210_register_clk(np, ksc, id, &parent_data, 1, flags);
+}
+
+static inline void __init k210_register_aclk_child(struct device_node *np,
+ struct k210_sysclk *ksc, int id,
+ unsigned long flags)
+{
+ const struct clk_parent_data parent_data = {
+ .hw = &ksc->aclk,
+ };
+
+ k210_register_clk(np, ksc, id, &parent_data, 1, flags);
+}
+
+static inline void __init k210_register_clk_child(struct device_node *np,
+ struct k210_sysclk *ksc, int id,
+ int parent_id)
+{
+ const struct clk_parent_data parent_data = {
+ .hw = &ksc->clks[parent_id].hw,
+ };
+
+ k210_register_clk(np, ksc, id, &parent_data, 1, 0);
+}
+
+static struct clk_hw *k210_clk_hw_onecell_get(struct of_phandle_args *clkspec,
+ void *data)
+{
+ struct k210_sysclk *ksc = data;
+ unsigned int idx = clkspec->args[0];
+
+ if (idx >= K210_NUM_CLKS)
+ return ERR_PTR(-EINVAL);
+
+ return &ksc->clks[idx].hw;
+}
+
+static void __init k210_clk_init(struct device_node *np)
+{
+ struct device_node *sysctl_np;
+ struct k210_sysclk *ksc;
+ int i, ret;
+
+ ksc = kzalloc(sizeof(*ksc), GFP_KERNEL);
+ if (!ksc)
+ return;
+
+ spin_lock_init(&ksc->clk_lock);
+ sysctl_np = of_get_parent(np);
+ ksc->regs = of_iomap(sysctl_np, 0);
+ of_node_put(sysctl_np);
+ if (!ksc->regs) {
+ pr_err("%pOFP: failed to map registers\n", np);
+ return;
+ }
+
+ ret = k210_register_plls(np, ksc);
+ if (ret)
+ return;
+
+ ret = k210_register_aclk(np, ksc);
+ if (ret)
+ return;
+
+ /*
+ * Critical clocks: there are no consumers of the SRAM clocks,
+ * including the AI clock for the third SRAM bank. The CPU clock
+ * is only referenced by the uarths serial device and so would be
+ * disabled if the serial console is disabled to switch to another
+ * console. Mark all these clocks as critical so that they are never
+ * disabled by the core clock management.
+ */
+ k210_register_aclk_child(np, ksc, K210_CLK_CPU, CLK_IS_CRITICAL);
+ k210_register_aclk_child(np, ksc, K210_CLK_SRAM0, CLK_IS_CRITICAL);
+ k210_register_aclk_child(np, ksc, K210_CLK_SRAM1, CLK_IS_CRITICAL);
+ k210_register_pll_child(np, ksc, K210_CLK_AI, K210_PLL1,
+ CLK_IS_CRITICAL);
+
+ /* Clocks with aclk as source */
+ k210_register_aclk_child(np, ksc, K210_CLK_DMA, 0);
+ k210_register_aclk_child(np, ksc, K210_CLK_FFT, 0);
+ k210_register_aclk_child(np, ksc, K210_CLK_ROM, 0);
+ k210_register_aclk_child(np, ksc, K210_CLK_DVP, 0);
+ k210_register_aclk_child(np, ksc, K210_CLK_APB0, 0);
+ k210_register_aclk_child(np, ksc, K210_CLK_APB1, 0);
+ k210_register_aclk_child(np, ksc, K210_CLK_APB2, 0);
+
+ /* Clocks with PLL0 as source */
+ k210_register_pll_child(np, ksc, K210_CLK_SPI0, K210_PLL0, 0);
+ k210_register_pll_child(np, ksc, K210_CLK_SPI1, K210_PLL0, 0);
+ k210_register_pll_child(np, ksc, K210_CLK_SPI2, K210_PLL0, 0);
+ k210_register_pll_child(np, ksc, K210_CLK_I2C0, K210_PLL0, 0);
+ k210_register_pll_child(np, ksc, K210_CLK_I2C1, K210_PLL0, 0);
+ k210_register_pll_child(np, ksc, K210_CLK_I2C2, K210_PLL0, 0);
+
+ /* Clocks with PLL2 as source */
+ k210_register_pll_child(np, ksc, K210_CLK_I2S0, K210_PLL2, 0);
+ k210_register_pll_child(np, ksc, K210_CLK_I2S1, K210_PLL2, 0);
+ k210_register_pll_child(np, ksc, K210_CLK_I2S2, K210_PLL2, 0);
+ k210_register_pll_child(np, ksc, K210_CLK_I2S0_M, K210_PLL2, 0);
+ k210_register_pll_child(np, ksc, K210_CLK_I2S1_M, K210_PLL2, 0);
+ k210_register_pll_child(np, ksc, K210_CLK_I2S2_M, K210_PLL2, 0);
+
+ /* Clocks with IN0 as source */
+ k210_register_in0_child(np, ksc, K210_CLK_WDT0);
+ k210_register_in0_child(np, ksc, K210_CLK_WDT1);
+ k210_register_in0_child(np, ksc, K210_CLK_RTC);
+
+ /* Clocks with APB0 as source */
+ k210_register_clk_child(np, ksc, K210_CLK_GPIO, K210_CLK_APB0);
+ k210_register_clk_child(np, ksc, K210_CLK_UART1, K210_CLK_APB0);
+ k210_register_clk_child(np, ksc, K210_CLK_UART2, K210_CLK_APB0);
+ k210_register_clk_child(np, ksc, K210_CLK_UART3, K210_CLK_APB0);
+ k210_register_clk_child(np, ksc, K210_CLK_FPIOA, K210_CLK_APB0);
+ k210_register_clk_child(np, ksc, K210_CLK_SHA, K210_CLK_APB0);
+
+ /* Clocks with APB1 as source */
+ k210_register_clk_child(np, ksc, K210_CLK_AES, K210_CLK_APB1);
+ k210_register_clk_child(np, ksc, K210_CLK_OTP, K210_CLK_APB1);
+
+ /* Mux clocks with in0 or pll0 as source */
+ k210_register_mux_clk(np, ksc, K210_CLK_SPI3);
+ k210_register_mux_clk(np, ksc, K210_CLK_TIMER0);
+ k210_register_mux_clk(np, ksc, K210_CLK_TIMER1);
+ k210_register_mux_clk(np, ksc, K210_CLK_TIMER2);
+
+ /* Check for registration errors */
+ for (i = 0; i < K210_NUM_CLKS; i++) {
+ if (ksc->clks[i].id != i)
+ return;
+ }
+
+ ret = of_clk_add_hw_provider(np, k210_clk_hw_onecell_get, ksc);
+ if (ret) {
+ pr_err("%pOFP: add clock provider failed %d\n", np, ret);
+ return;
+ }
+
+ pr_info("%pOFP: CPU running at %lu MHz\n",
+ np, clk_hw_get_rate(&ksc->clks[K210_CLK_CPU].hw) / 1000000);
+}
+
+CLK_OF_DECLARE(k210_clk, "canaan,k210-clk", k210_clk_init);
+
+/*
+ * Enable PLL1 to be able to use the AI SRAM.
+ */
+void __init k210_clk_early_init(void __iomem *regs)
+{
+ struct k210_pll pll1;
+
+ /* Make sure ACLK selector is set to PLL0 */
+ k210_aclk_set_selector(regs, 1);
+
+ /* Startup PLL1 to enable the aisram bank for general memory use */
+ k210_init_pll(regs, K210_PLL1, &pll1);
+ k210_pll_enable_hw(regs, &pll1);
+}
diff --git a/drivers/clk/clk-npcm7xx.c b/drivers/clk/clk-npcm7xx.c
index 27a86b7a34db..e677bb5a784b 100644
--- a/drivers/clk/clk-npcm7xx.c
+++ b/drivers/clk/clk-npcm7xx.c
@@ -361,13 +361,6 @@ static const struct npcm7xx_clk_mux_data npcm7xx_muxes[] __initconst = {
dvcssel_mux_parents, ARRAY_SIZE(dvcssel_mux_parents), 0, -1},
};
-/* fixed ratio dividers (no register): */
-static const struct npcm7xx_clk_div_fixed_data npcm7xx_divs_fx[] __initconst = {
- { 1, 2, NPCM7XX_CLK_S_MC, NPCM7XX_CLK_S_MC_MUX, 0, NPCM7XX_CLK_MC},
- { 1, 2, NPCM7XX_CLK_S_PLL1_DIV2, NPCM7XX_CLK_S_PLL1, 0, -1},
- { 1, 2, NPCM7XX_CLK_S_PLL2_DIV2, NPCM7XX_CLK_S_PLL2, 0, -1},
-};
-
/* configurable dividers: */
static const struct npcm7xx_clk_div_data npcm7xx_divs[] __initconst = {
{NPCM7XX_CLKDIV1, 28, 3, NPCM7XX_CLK_S_ADC,
@@ -435,107 +428,6 @@ static const struct npcm7xx_clk_div_data npcm7xx_divs[] __initconst = {
};
-static const struct npcm7xx_clk_gate_data npcm7xx_gates[] __initconst = {
- {NPCM7XX_CLKEN1, 31, "smb1-gate", NPCM7XX_CLK_S_APB2, 0},
- {NPCM7XX_CLKEN1, 30, "smb0-gate", NPCM7XX_CLK_S_APB2, 0},
- {NPCM7XX_CLKEN1, 29, "smb7-gate", NPCM7XX_CLK_S_APB2, 0},
- {NPCM7XX_CLKEN1, 28, "smb6-gate", NPCM7XX_CLK_S_APB2, 0},
- {NPCM7XX_CLKEN1, 27, "adc-gate", NPCM7XX_CLK_S_APB1, 0},
- {NPCM7XX_CLKEN1, 26, "wdt-gate", NPCM7XX_CLK_S_TIMER, 0},
- {NPCM7XX_CLKEN1, 25, "usbdev3-gate", NPCM7XX_CLK_S_AHB, 0},
- {NPCM7XX_CLKEN1, 24, "usbdev6-gate", NPCM7XX_CLK_S_AHB, 0},
- {NPCM7XX_CLKEN1, 23, "usbdev5-gate", NPCM7XX_CLK_S_AHB, 0},
- {NPCM7XX_CLKEN1, 22, "usbdev4-gate", NPCM7XX_CLK_S_AHB, 0},
- {NPCM7XX_CLKEN1, 21, "emc2-gate", NPCM7XX_CLK_S_AHB, 0},
- {NPCM7XX_CLKEN1, 20, "timer5_9-gate", NPCM7XX_CLK_S_APB1, 0},
- {NPCM7XX_CLKEN1, 19, "timer0_4-gate", NPCM7XX_CLK_S_APB1, 0},
- {NPCM7XX_CLKEN1, 18, "pwmm0-gate", NPCM7XX_CLK_S_APB3, 0},
- {NPCM7XX_CLKEN1, 17, "huart-gate", NPCM7XX_CLK_S_UART, 0},
- {NPCM7XX_CLKEN1, 16, "smb5-gate", NPCM7XX_CLK_S_APB2, 0},
- {NPCM7XX_CLKEN1, 15, "smb4-gate", NPCM7XX_CLK_S_APB2, 0},
- {NPCM7XX_CLKEN1, 14, "smb3-gate", NPCM7XX_CLK_S_APB2, 0},
- {NPCM7XX_CLKEN1, 13, "smb2-gate", NPCM7XX_CLK_S_APB2, 0},
- {NPCM7XX_CLKEN1, 12, "mc-gate", NPCM7XX_CLK_S_MC, 0},
- {NPCM7XX_CLKEN1, 11, "uart01-gate", NPCM7XX_CLK_S_APB1, 0},
- {NPCM7XX_CLKEN1, 10, "aes-gate", NPCM7XX_CLK_S_AHB, 0},
- {NPCM7XX_CLKEN1, 9, "peci-gate", NPCM7XX_CLK_S_APB3, 0},
- {NPCM7XX_CLKEN1, 8, "usbdev2-gate", NPCM7XX_CLK_S_AHB, 0},
- {NPCM7XX_CLKEN1, 7, "uart23-gate", NPCM7XX_CLK_S_APB1, 0},
- {NPCM7XX_CLKEN1, 6, "emc1-gate", NPCM7XX_CLK_S_AHB, 0},
- {NPCM7XX_CLKEN1, 5, "usbdev1-gate", NPCM7XX_CLK_S_AHB, 0},
- {NPCM7XX_CLKEN1, 4, "shm-gate", NPCM7XX_CLK_S_AHB, 0},
- /* bit 3 is reserved */
- {NPCM7XX_CLKEN1, 2, "kcs-gate", NPCM7XX_CLK_S_APB1, 0},
- {NPCM7XX_CLKEN1, 1, "spi3-gate", NPCM7XX_CLK_S_AHB, 0},
- {NPCM7XX_CLKEN1, 0, "spi0-gate", NPCM7XX_CLK_S_AHB, 0},
-
- {NPCM7XX_CLKEN2, 31, "cp-gate", NPCM7XX_CLK_S_AHB, 0},
- {NPCM7XX_CLKEN2, 30, "tock-gate", NPCM7XX_CLK_S_TOCK, 0},
- /* bit 29 is reserved */
- {NPCM7XX_CLKEN2, 28, "gmac1-gate", NPCM7XX_CLK_S_AHB, 0},
- {NPCM7XX_CLKEN2, 27, "usbif-gate", NPCM7XX_CLK_S_USBIF, 0},
- {NPCM7XX_CLKEN2, 26, "usbhost-gate", NPCM7XX_CLK_S_AHB, 0},
- {NPCM7XX_CLKEN2, 25, "gmac2-gate", NPCM7XX_CLK_S_AHB, 0},
- /* bit 24 is reserved */
- {NPCM7XX_CLKEN2, 23, "pspi2-gate", NPCM7XX_CLK_S_APB5, 0},
- {NPCM7XX_CLKEN2, 22, "pspi1-gate", NPCM7XX_CLK_S_APB5, 0},
- {NPCM7XX_CLKEN2, 21, "3des-gate", NPCM7XX_CLK_S_AHB, 0},
- /* bit 20 is reserved */
- {NPCM7XX_CLKEN2, 19, "siox2-gate", NPCM7XX_CLK_S_APB3, 0},
- {NPCM7XX_CLKEN2, 18, "siox1-gate", NPCM7XX_CLK_S_APB3, 0},
- /* bit 17 is reserved */
- {NPCM7XX_CLKEN2, 16, "fuse-gate", NPCM7XX_CLK_S_APB4, 0},
- /* bit 15 is reserved */
- {NPCM7XX_CLKEN2, 14, "vcd-gate", NPCM7XX_CLK_S_AHB, 0},
- {NPCM7XX_CLKEN2, 13, "ece-gate", NPCM7XX_CLK_S_AHB, 0},
- {NPCM7XX_CLKEN2, 12, "vdma-gate", NPCM7XX_CLK_S_AHB, 0},
- {NPCM7XX_CLKEN2, 11, "ahbpcibrg-gate", NPCM7XX_CLK_S_AHB, 0},
- {NPCM7XX_CLKEN2, 10, "gfxsys-gate", NPCM7XX_CLK_S_APB1, 0},
- {NPCM7XX_CLKEN2, 9, "sdhc-gate", NPCM7XX_CLK_S_AHB, 0},
- {NPCM7XX_CLKEN2, 8, "mmc-gate", NPCM7XX_CLK_S_AHB, 0},
- {NPCM7XX_CLKEN2, 7, "mft7-gate", NPCM7XX_CLK_S_APB4, 0},
- {NPCM7XX_CLKEN2, 6, "mft6-gate", NPCM7XX_CLK_S_APB4, 0},
- {NPCM7XX_CLKEN2, 5, "mft5-gate", NPCM7XX_CLK_S_APB4, 0},
- {NPCM7XX_CLKEN2, 4, "mft4-gate", NPCM7XX_CLK_S_APB4, 0},
- {NPCM7XX_CLKEN2, 3, "mft3-gate", NPCM7XX_CLK_S_APB4, 0},
- {NPCM7XX_CLKEN2, 2, "mft2-gate", NPCM7XX_CLK_S_APB4, 0},
- {NPCM7XX_CLKEN2, 1, "mft1-gate", NPCM7XX_CLK_S_APB4, 0},
- {NPCM7XX_CLKEN2, 0, "mft0-gate", NPCM7XX_CLK_S_APB4, 0},
-
- {NPCM7XX_CLKEN3, 31, "gpiom7-gate", NPCM7XX_CLK_S_APB1, 0},
- {NPCM7XX_CLKEN3, 30, "gpiom6-gate", NPCM7XX_CLK_S_APB1, 0},
- {NPCM7XX_CLKEN3, 29, "gpiom5-gate", NPCM7XX_CLK_S_APB1, 0},
- {NPCM7XX_CLKEN3, 28, "gpiom4-gate", NPCM7XX_CLK_S_APB1, 0},
- {NPCM7XX_CLKEN3, 27, "gpiom3-gate", NPCM7XX_CLK_S_APB1, 0},
- {NPCM7XX_CLKEN3, 26, "gpiom2-gate", NPCM7XX_CLK_S_APB1, 0},
- {NPCM7XX_CLKEN3, 25, "gpiom1-gate", NPCM7XX_CLK_S_APB1, 0},
- {NPCM7XX_CLKEN3, 24, "gpiom0-gate", NPCM7XX_CLK_S_APB1, 0},
- {NPCM7XX_CLKEN3, 23, "espi-gate", NPCM7XX_CLK_S_APB2, 0},
- {NPCM7XX_CLKEN3, 22, "smb11-gate", NPCM7XX_CLK_S_APB2, 0},
- {NPCM7XX_CLKEN3, 21, "smb10-gate", NPCM7XX_CLK_S_APB2, 0},
- {NPCM7XX_CLKEN3, 20, "smb9-gate", NPCM7XX_CLK_S_APB2, 0},
- {NPCM7XX_CLKEN3, 19, "smb8-gate", NPCM7XX_CLK_S_APB2, 0},
- {NPCM7XX_CLKEN3, 18, "smb15-gate", NPCM7XX_CLK_S_APB2, 0},
- {NPCM7XX_CLKEN3, 17, "rng-gate", NPCM7XX_CLK_S_APB1, 0},
- {NPCM7XX_CLKEN3, 16, "timer10_14-gate", NPCM7XX_CLK_S_APB1, 0},
- {NPCM7XX_CLKEN3, 15, "pcirc-gate", NPCM7XX_CLK_S_AHB, 0},
- {NPCM7XX_CLKEN3, 14, "sececc-gate", NPCM7XX_CLK_S_AHB, 0},
- {NPCM7XX_CLKEN3, 13, "sha-gate", NPCM7XX_CLK_S_AHB, 0},
- {NPCM7XX_CLKEN3, 12, "smb14-gate", NPCM7XX_CLK_S_APB2, 0},
- /* bit 11 is reserved */
- /* bit 10 is reserved */
- {NPCM7XX_CLKEN3, 9, "pcimbx-gate", NPCM7XX_CLK_S_AHB, 0},
- /* bit 8 is reserved */
- {NPCM7XX_CLKEN3, 7, "usbdev9-gate", NPCM7XX_CLK_S_AHB, 0},
- {NPCM7XX_CLKEN3, 6, "usbdev8-gate", NPCM7XX_CLK_S_AHB, 0},
- {NPCM7XX_CLKEN3, 5, "usbdev7-gate", NPCM7XX_CLK_S_AHB, 0},
- {NPCM7XX_CLKEN3, 4, "usbdev0-gate", NPCM7XX_CLK_S_AHB, 0},
- {NPCM7XX_CLKEN3, 3, "smb13-gate", NPCM7XX_CLK_S_APB2, 0},
- {NPCM7XX_CLKEN3, 2, "spix-gate", NPCM7XX_CLK_S_AHB, 0},
- {NPCM7XX_CLKEN3, 1, "smb12-gate", NPCM7XX_CLK_S_APB2, 0},
- {NPCM7XX_CLKEN3, 0, "pwmm1-gate", NPCM7XX_CLK_S_APB3, 0},
-};
-
static DEFINE_SPINLOCK(npcm7xx_clk_lock);
static void __init npcm7xx_clk_init(struct device_node *clk_np)
diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c
index 70aa521e7e7f..88898b97a443 100644
--- a/drivers/clk/clk-qoriq.c
+++ b/drivers/clk/clk-qoriq.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2013 Freescale Semiconductor, Inc.
+ * Copyright 2021 NXP
*
* clock driver for Freescale QorIQ SoCs.
*/
@@ -564,7 +565,9 @@ static const struct clockgen_chipinfo chipinfo[] = {
.cmux_to_group = {
0, 1, 1, 1, -1
},
- .pll_mask = 0x3f,
+ .pll_mask = BIT(PLATFORM_PLL) |
+ BIT(CGA_PLL1) | BIT(CGA_PLL2) | BIT(CGA_PLL3) |
+ BIT(CGB_PLL1) | BIT(CGB_PLL2),
.flags = CG_PLL_8BIT,
},
{
@@ -580,7 +583,9 @@ static const struct clockgen_chipinfo chipinfo[] = {
.cmux_to_group = {
0, 1, 1, 1, -1
},
- .pll_mask = 0x3f,
+ .pll_mask = BIT(PLATFORM_PLL) |
+ BIT(CGA_PLL1) | BIT(CGA_PLL2) | BIT(CGA_PLL3) |
+ BIT(CGB_PLL1) | BIT(CGB_PLL2),
.flags = CG_PLL_8BIT,
},
{
@@ -591,7 +596,8 @@ static const struct clockgen_chipinfo chipinfo[] = {
.cmux_to_group = {
0, -1
},
- .pll_mask = 0x03,
+ .pll_mask = BIT(PLATFORM_PLL) |
+ BIT(CGA_PLL1) | BIT(CGA_PLL2),
},
{
.compat = "fsl,ls1028a-clockgen",
@@ -605,7 +611,8 @@ static const struct clockgen_chipinfo chipinfo[] = {
.cmux_to_group = {
0, 0, 0, 0, -1
},
- .pll_mask = 0x07,
+ .pll_mask = BIT(PLATFORM_PLL) |
+ BIT(CGA_PLL1) | BIT(CGA_PLL2),
.flags = CG_VER3 | CG_LITTLE_ENDIAN,
},
{
@@ -620,7 +627,8 @@ static const struct clockgen_chipinfo chipinfo[] = {
.cmux_to_group = {
0, -1
},
- .pll_mask = 0x07,
+ .pll_mask = BIT(PLATFORM_PLL) |
+ BIT(CGA_PLL1) | BIT(CGA_PLL2),
.flags = CG_PLL_8BIT,
},
{
@@ -635,7 +643,8 @@ static const struct clockgen_chipinfo chipinfo[] = {
.cmux_to_group = {
0, -1
},
- .pll_mask = 0x07,
+ .pll_mask = BIT(PLATFORM_PLL) |
+ BIT(CGA_PLL1) | BIT(CGA_PLL2),
.flags = CG_PLL_8BIT,
},
{
@@ -649,7 +658,8 @@ static const struct clockgen_chipinfo chipinfo[] = {
.cmux_to_group = {
0, 0, -1
},
- .pll_mask = 0x07,
+ .pll_mask = BIT(PLATFORM_PLL) |
+ BIT(CGA_PLL1) | BIT(CGA_PLL2),
.flags = CG_VER3 | CG_LITTLE_ENDIAN,
},
{
@@ -660,7 +670,7 @@ static const struct clockgen_chipinfo chipinfo[] = {
.cmux_to_group = {
0, -1
},
- .pll_mask = 0x03,
+ .pll_mask = BIT(PLATFORM_PLL) | BIT(CGA_PLL1),
},
{
.compat = "fsl,ls2080a-clockgen",
@@ -670,7 +680,9 @@ static const struct clockgen_chipinfo chipinfo[] = {
.cmux_to_group = {
0, 0, 1, 1, -1
},
- .pll_mask = 0x37,
+ .pll_mask = BIT(PLATFORM_PLL) |
+ BIT(CGA_PLL1) | BIT(CGA_PLL2) |
+ BIT(CGB_PLL1) | BIT(CGB_PLL2),
.flags = CG_VER3 | CG_LITTLE_ENDIAN,
},
{
@@ -681,7 +693,9 @@ static const struct clockgen_chipinfo chipinfo[] = {
.cmux_to_group = {
0, 0, 0, 0, 1, 1, 1, 1, -1
},
- .pll_mask = 0x37,
+ .pll_mask = BIT(PLATFORM_PLL) |
+ BIT(CGA_PLL1) | BIT(CGA_PLL2) |
+ BIT(CGB_PLL1) | BIT(CGB_PLL2),
.flags = CG_VER3 | CG_LITTLE_ENDIAN,
},
{
@@ -694,7 +708,8 @@ static const struct clockgen_chipinfo chipinfo[] = {
.cmux_to_group = {
0, 0, 1, 1, -1
},
- .pll_mask = 0x07,
+ .pll_mask = BIT(PLATFORM_PLL) |
+ BIT(CGA_PLL1) | BIT(CGA_PLL2),
},
{
.compat = "fsl,p3041-clockgen",
@@ -706,7 +721,8 @@ static const struct clockgen_chipinfo chipinfo[] = {
.cmux_to_group = {
0, 0, 1, 1, -1
},
- .pll_mask = 0x07,
+ .pll_mask = BIT(PLATFORM_PLL) |
+ BIT(CGA_PLL1) | BIT(CGA_PLL2),
},
{
.compat = "fsl,p4080-clockgen",
@@ -718,7 +734,9 @@ static const struct clockgen_chipinfo chipinfo[] = {
.cmux_to_group = {
0, 0, 0, 0, 1, 1, 1, 1, -1
},
- .pll_mask = 0x1f,
+ .pll_mask = BIT(PLATFORM_PLL) |
+ BIT(CGA_PLL1) | BIT(CGA_PLL2) |
+ BIT(CGA_PLL3) | BIT(CGA_PLL4),
},
{
.compat = "fsl,p5020-clockgen",
@@ -730,7 +748,8 @@ static const struct clockgen_chipinfo chipinfo[] = {
.cmux_to_group = {
0, 1, -1
},
- .pll_mask = 0x07,
+ .pll_mask = BIT(PLATFORM_PLL) |
+ BIT(CGA_PLL1) | BIT(CGA_PLL2),
},
{
.compat = "fsl,p5040-clockgen",
@@ -742,7 +761,8 @@ static const struct clockgen_chipinfo chipinfo[] = {
.cmux_to_group = {
0, 0, 1, 1, -1
},
- .pll_mask = 0x0f,
+ .pll_mask = BIT(PLATFORM_PLL) |
+ BIT(CGA_PLL1) | BIT(CGA_PLL2) | BIT(CGA_PLL3),
},
{
.compat = "fsl,t1023-clockgen",
@@ -757,7 +777,7 @@ static const struct clockgen_chipinfo chipinfo[] = {
.cmux_to_group = {
0, 0, -1
},
- .pll_mask = 0x03,
+ .pll_mask = BIT(PLATFORM_PLL) | BIT(CGA_PLL1),
.flags = CG_PLL_8BIT,
},
{
@@ -770,7 +790,8 @@ static const struct clockgen_chipinfo chipinfo[] = {
.cmux_to_group = {
0, 0, 0, 0, -1
},
- .pll_mask = 0x07,
+ .pll_mask = BIT(PLATFORM_PLL) |
+ BIT(CGA_PLL1) | BIT(CGA_PLL2),
.flags = CG_PLL_8BIT,
},
{
@@ -786,7 +807,8 @@ static const struct clockgen_chipinfo chipinfo[] = {
.cmux_to_group = {
0, -1
},
- .pll_mask = 0x07,
+ .pll_mask = BIT(PLATFORM_PLL) |
+ BIT(CGA_PLL1) | BIT(CGA_PLL2),
.flags = CG_PLL_8BIT,
},
{
@@ -802,7 +824,9 @@ static const struct clockgen_chipinfo chipinfo[] = {
.cmux_to_group = {
0, 0, 1, -1
},
- .pll_mask = 0x3f,
+ .pll_mask = BIT(PLATFORM_PLL) |
+ BIT(CGA_PLL1) | BIT(CGA_PLL2) | BIT(CGA_PLL3) |
+ BIT(CGB_PLL1) | BIT(CGB_PLL2),
.flags = CG_PLL_8BIT,
},
{},
diff --git a/drivers/clk/clk-si570.c b/drivers/clk/clk-si570.c
index 34b25609f55f..eea50121718a 100644
--- a/drivers/clk/clk-si570.c
+++ b/drivers/clk/clk-si570.c
@@ -4,7 +4,7 @@
*
* Copyright (C) 2010, 2011 Ericsson AB.
* Copyright (C) 2011 Guenter Roeck.
- * Copyright (C) 2011 - 2013 Xilinx Inc.
+ * Copyright (C) 2011 - 2021 Xilinx Inc.
*
* Author: Guenter Roeck <guenter.roeck@ericsson.com>
* Sören Brinkmann <soren.brinkmann@xilinx.com>
@@ -123,14 +123,18 @@ static int si570_get_divs(struct clk_si570 *data, u64 *rfreq,
* si570_get_defaults() - Get default values
* @data: Driver data structure
* @fout: Factory frequency output
+ * @skip_recall: If true, don't recall NVM into RAM
* Returns 0 on success, negative errno otherwise.
*/
-static int si570_get_defaults(struct clk_si570 *data, u64 fout)
+static int si570_get_defaults(struct clk_si570 *data, u64 fout,
+ bool skip_recall)
{
int err;
u64 fdco;
- regmap_write(data->regmap, SI570_REG_CONTROL, SI570_CNTRL_RECALL);
+ if (!skip_recall)
+ regmap_write(data->regmap, SI570_REG_CONTROL,
+ SI570_CNTRL_RECALL);
err = si570_get_divs(data, &data->rfreq, &data->n1, &data->hs_div);
if (err)
@@ -400,6 +404,7 @@ static int si570_probe(struct i2c_client *client,
struct clk_si570 *data;
struct clk_init_data init;
u32 initial_fout, factory_fout, stability;
+ bool skip_recall;
int err;
enum clk_si570_variant variant = id->driver_data;
@@ -441,6 +446,9 @@ static int si570_probe(struct i2c_client *client,
return err;
}
+ skip_recall = of_property_read_bool(client->dev.of_node,
+ "silabs,skip-recall");
+
data->regmap = devm_regmap_init_i2c(client, &si570_regmap_config);
if (IS_ERR(data->regmap)) {
dev_err(&client->dev, "failed to allocate register map\n");
@@ -448,7 +456,7 @@ static int si570_probe(struct i2c_client *client,
}
i2c_set_clientdata(client, data);
- err = si570_get_defaults(data, factory_fout);
+ err = si570_get_defaults(data, factory_fout, skip_recall);
if (err)
return err;
diff --git a/drivers/clk/clk-tango4.c b/drivers/clk/clk-tango4.c
deleted file mode 100644
index fe12a43f7a40..000000000000
--- a/drivers/clk/clk-tango4.c
+++ /dev/null
@@ -1,85 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/kernel.h>
-#include <linux/clk-provider.h>
-#include <linux/of_address.h>
-#include <linux/init.h>
-#include <linux/io.h>
-
-#define CLK_COUNT 4 /* cpu_clk, sys_clk, usb_clk, sdio_clk */
-static struct clk *clks[CLK_COUNT];
-static struct clk_onecell_data clk_data = { clks, CLK_COUNT };
-
-#define SYSCLK_DIV 0x20
-#define CPUCLK_DIV 0x24
-#define DIV_BYPASS BIT(23)
-
-/*** CLKGEN_PLL ***/
-#define extract_pll_n(val) ((val >> 0) & ((1u << 7) - 1))
-#define extract_pll_k(val) ((val >> 13) & ((1u << 3) - 1))
-#define extract_pll_m(val) ((val >> 16) & ((1u << 3) - 1))
-#define extract_pll_isel(val) ((val >> 24) & ((1u << 3) - 1))
-
-static void __init make_pll(int idx, const char *parent, void __iomem *base)
-{
- char name[8];
- u32 val, mul, div;
-
- sprintf(name, "pll%d", idx);
- val = readl(base + idx * 8);
- mul = extract_pll_n(val) + 1;
- div = (extract_pll_m(val) + 1) << extract_pll_k(val);
- clk_register_fixed_factor(NULL, name, parent, 0, mul, div);
- if (extract_pll_isel(val) != 1)
- panic("%s: input not set to XTAL_IN\n", name);
-}
-
-static void __init make_cd(int idx, void __iomem *base)
-{
- char name[8];
- u32 val, mul, div;
-
- sprintf(name, "cd%d", idx);
- val = readl(base + idx * 8);
- mul = 1 << 27;
- div = (2 << 27) + val;
- clk_register_fixed_factor(NULL, name, "pll2", 0, mul, div);
- if (val > 0xf0000000)
- panic("%s: unsupported divider %x\n", name, val);
-}
-
-static void __init tango4_clkgen_setup(struct device_node *np)
-{
- struct clk **pp = clk_data.clks;
- void __iomem *base = of_iomap(np, 0);
- const char *parent = of_clk_get_parent_name(np, 0);
-
- if (!base)
- panic("%pOFn: invalid address\n", np);
-
- if (readl(base + CPUCLK_DIV) & DIV_BYPASS)
- panic("%pOFn: unsupported cpuclk setup\n", np);
-
- if (readl(base + SYSCLK_DIV) & DIV_BYPASS)
- panic("%pOFn: unsupported sysclk setup\n", np);
-
- writel(0x100, base + CPUCLK_DIV); /* disable frequency ramping */
-
- make_pll(0, parent, base);
- make_pll(1, parent, base);
- make_pll(2, parent, base);
- make_cd(2, base + 0x80);
- make_cd(6, base + 0x80);
-
- pp[0] = clk_register_divider(NULL, "cpu_clk", "pll0", 0,
- base + CPUCLK_DIV, 8, 8, CLK_DIVIDER_ONE_BASED, NULL);
- pp[1] = clk_register_fixed_factor(NULL, "sys_clk", "pll1", 0, 1, 4);
- pp[2] = clk_register_fixed_factor(NULL, "usb_clk", "cd2", 0, 1, 2);
- pp[3] = clk_register_fixed_factor(NULL, "sdio_clk", "cd6", 0, 1, 2);
-
- if (IS_ERR(pp[0]) || IS_ERR(pp[1]) || IS_ERR(pp[2]) || IS_ERR(pp[3]))
- panic("%pOFn: clk registration failed\n", np);
-
- if (of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data))
- panic("%pOFn: clk provider registration failed\n", np);
-}
-CLK_OF_DECLARE(tango4_clkgen, "sigma,tango4-clkgen", tango4_clkgen_setup);
diff --git a/drivers/clk/clk-u300.c b/drivers/clk/clk-u300.c
deleted file mode 100644
index e228c07c4c6e..000000000000
--- a/drivers/clk/clk-u300.c
+++ /dev/null
@@ -1,1199 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * U300 clock implementation
- * Copyright (C) 2007-2012 ST-Ericsson AB
- * Author: Linus Walleij <linus.walleij@stericsson.com>
- * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
- */
-#include <linux/clkdev.h>
-#include <linux/slab.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/clk-provider.h>
-#include <linux/spinlock.h>
-#include <linux/of.h>
-#include <linux/platform_data/clk-u300.h>
-
-/* APP side SYSCON registers */
-/* CLK Control Register 16bit (R/W) */
-#define U300_SYSCON_CCR (0x0000)
-#define U300_SYSCON_CCR_I2S1_USE_VCXO (0x0040)
-#define U300_SYSCON_CCR_I2S0_USE_VCXO (0x0020)
-#define U300_SYSCON_CCR_TURN_VCXO_ON (0x0008)
-#define U300_SYSCON_CCR_CLKING_PERFORMANCE_MASK (0x0007)
-#define U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW_POWER (0x04)
-#define U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW (0x03)
-#define U300_SYSCON_CCR_CLKING_PERFORMANCE_INTERMEDIATE (0x02)
-#define U300_SYSCON_CCR_CLKING_PERFORMANCE_HIGH (0x01)
-#define U300_SYSCON_CCR_CLKING_PERFORMANCE_BEST (0x00)
-/* CLK Status Register 16bit (R/W) */
-#define U300_SYSCON_CSR (0x0004)
-#define U300_SYSCON_CSR_PLL208_LOCK_IND (0x0002)
-#define U300_SYSCON_CSR_PLL13_LOCK_IND (0x0001)
-/* Reset lines for SLOW devices 16bit (R/W) */
-#define U300_SYSCON_RSR (0x0014)
-#define U300_SYSCON_RSR_PPM_RESET_EN (0x0200)
-#define U300_SYSCON_RSR_ACC_TMR_RESET_EN (0x0100)
-#define U300_SYSCON_RSR_APP_TMR_RESET_EN (0x0080)
-#define U300_SYSCON_RSR_RTC_RESET_EN (0x0040)
-#define U300_SYSCON_RSR_KEYPAD_RESET_EN (0x0020)
-#define U300_SYSCON_RSR_GPIO_RESET_EN (0x0010)
-#define U300_SYSCON_RSR_EH_RESET_EN (0x0008)
-#define U300_SYSCON_RSR_BTR_RESET_EN (0x0004)
-#define U300_SYSCON_RSR_UART_RESET_EN (0x0002)
-#define U300_SYSCON_RSR_SLOW_BRIDGE_RESET_EN (0x0001)
-/* Reset lines for FAST devices 16bit (R/W) */
-#define U300_SYSCON_RFR (0x0018)
-#define U300_SYSCON_RFR_UART1_RESET_ENABLE (0x0080)
-#define U300_SYSCON_RFR_SPI_RESET_ENABLE (0x0040)
-#define U300_SYSCON_RFR_MMC_RESET_ENABLE (0x0020)
-#define U300_SYSCON_RFR_PCM_I2S1_RESET_ENABLE (0x0010)
-#define U300_SYSCON_RFR_PCM_I2S0_RESET_ENABLE (0x0008)
-#define U300_SYSCON_RFR_I2C1_RESET_ENABLE (0x0004)
-#define U300_SYSCON_RFR_I2C0_RESET_ENABLE (0x0002)
-#define U300_SYSCON_RFR_FAST_BRIDGE_RESET_ENABLE (0x0001)
-/* Reset lines for the rest of the peripherals 16bit (R/W) */
-#define U300_SYSCON_RRR (0x001c)
-#define U300_SYSCON_RRR_CDS_RESET_EN (0x4000)
-#define U300_SYSCON_RRR_ISP_RESET_EN (0x2000)
-#define U300_SYSCON_RRR_INTCON_RESET_EN (0x1000)
-#define U300_SYSCON_RRR_MSPRO_RESET_EN (0x0800)
-#define U300_SYSCON_RRR_XGAM_RESET_EN (0x0100)
-#define U300_SYSCON_RRR_XGAM_VC_SYNC_RESET_EN (0x0080)
-#define U300_SYSCON_RRR_NANDIF_RESET_EN (0x0040)
-#define U300_SYSCON_RRR_EMIF_RESET_EN (0x0020)
-#define U300_SYSCON_RRR_DMAC_RESET_EN (0x0010)
-#define U300_SYSCON_RRR_CPU_RESET_EN (0x0008)
-#define U300_SYSCON_RRR_APEX_RESET_EN (0x0004)
-#define U300_SYSCON_RRR_AHB_RESET_EN (0x0002)
-#define U300_SYSCON_RRR_AAIF_RESET_EN (0x0001)
-/* Clock enable for SLOW peripherals 16bit (R/W) */
-#define U300_SYSCON_CESR (0x0020)
-#define U300_SYSCON_CESR_PPM_CLK_EN (0x0200)
-#define U300_SYSCON_CESR_ACC_TMR_CLK_EN (0x0100)
-#define U300_SYSCON_CESR_APP_TMR_CLK_EN (0x0080)
-#define U300_SYSCON_CESR_KEYPAD_CLK_EN (0x0040)
-#define U300_SYSCON_CESR_GPIO_CLK_EN (0x0010)
-#define U300_SYSCON_CESR_EH_CLK_EN (0x0008)
-#define U300_SYSCON_CESR_BTR_CLK_EN (0x0004)
-#define U300_SYSCON_CESR_UART_CLK_EN (0x0002)
-#define U300_SYSCON_CESR_SLOW_BRIDGE_CLK_EN (0x0001)
-/* Clock enable for FAST peripherals 16bit (R/W) */
-#define U300_SYSCON_CEFR (0x0024)
-#define U300_SYSCON_CEFR_UART1_CLK_EN (0x0200)
-#define U300_SYSCON_CEFR_I2S1_CORE_CLK_EN (0x0100)
-#define U300_SYSCON_CEFR_I2S0_CORE_CLK_EN (0x0080)
-#define U300_SYSCON_CEFR_SPI_CLK_EN (0x0040)
-#define U300_SYSCON_CEFR_MMC_CLK_EN (0x0020)
-#define U300_SYSCON_CEFR_I2S1_CLK_EN (0x0010)
-#define U300_SYSCON_CEFR_I2S0_CLK_EN (0x0008)
-#define U300_SYSCON_CEFR_I2C1_CLK_EN (0x0004)
-#define U300_SYSCON_CEFR_I2C0_CLK_EN (0x0002)
-#define U300_SYSCON_CEFR_FAST_BRIDGE_CLK_EN (0x0001)
-/* Clock enable for the rest of the peripherals 16bit (R/W) */
-#define U300_SYSCON_CERR (0x0028)
-#define U300_SYSCON_CERR_CDS_CLK_EN (0x2000)
-#define U300_SYSCON_CERR_ISP_CLK_EN (0x1000)
-#define U300_SYSCON_CERR_MSPRO_CLK_EN (0x0800)
-#define U300_SYSCON_CERR_AHB_SUBSYS_BRIDGE_CLK_EN (0x0400)
-#define U300_SYSCON_CERR_SEMI_CLK_EN (0x0200)
-#define U300_SYSCON_CERR_XGAM_CLK_EN (0x0100)
-#define U300_SYSCON_CERR_VIDEO_ENC_CLK_EN (0x0080)
-#define U300_SYSCON_CERR_NANDIF_CLK_EN (0x0040)
-#define U300_SYSCON_CERR_EMIF_CLK_EN (0x0020)
-#define U300_SYSCON_CERR_DMAC_CLK_EN (0x0010)
-#define U300_SYSCON_CERR_CPU_CLK_EN (0x0008)
-#define U300_SYSCON_CERR_APEX_CLK_EN (0x0004)
-#define U300_SYSCON_CERR_AHB_CLK_EN (0x0002)
-#define U300_SYSCON_CERR_AAIF_CLK_EN (0x0001)
-/* Single block clock enable 16bit (-/W) */
-#define U300_SYSCON_SBCER (0x002c)
-#define U300_SYSCON_SBCER_PPM_CLK_EN (0x0009)
-#define U300_SYSCON_SBCER_ACC_TMR_CLK_EN (0x0008)
-#define U300_SYSCON_SBCER_APP_TMR_CLK_EN (0x0007)
-#define U300_SYSCON_SBCER_KEYPAD_CLK_EN (0x0006)
-#define U300_SYSCON_SBCER_GPIO_CLK_EN (0x0004)
-#define U300_SYSCON_SBCER_EH_CLK_EN (0x0003)
-#define U300_SYSCON_SBCER_BTR_CLK_EN (0x0002)
-#define U300_SYSCON_SBCER_UART_CLK_EN (0x0001)
-#define U300_SYSCON_SBCER_SLOW_BRIDGE_CLK_EN (0x0000)
-#define U300_SYSCON_SBCER_UART1_CLK_EN (0x0019)
-#define U300_SYSCON_SBCER_I2S1_CORE_CLK_EN (0x0018)
-#define U300_SYSCON_SBCER_I2S0_CORE_CLK_EN (0x0017)
-#define U300_SYSCON_SBCER_SPI_CLK_EN (0x0016)
-#define U300_SYSCON_SBCER_MMC_CLK_EN (0x0015)
-#define U300_SYSCON_SBCER_I2S1_CLK_EN (0x0014)
-#define U300_SYSCON_SBCER_I2S0_CLK_EN (0x0013)
-#define U300_SYSCON_SBCER_I2C1_CLK_EN (0x0012)
-#define U300_SYSCON_SBCER_I2C0_CLK_EN (0x0011)
-#define U300_SYSCON_SBCER_FAST_BRIDGE_CLK_EN (0x0010)
-#define U300_SYSCON_SBCER_CDS_CLK_EN (0x002D)
-#define U300_SYSCON_SBCER_ISP_CLK_EN (0x002C)
-#define U300_SYSCON_SBCER_MSPRO_CLK_EN (0x002B)
-#define U300_SYSCON_SBCER_AHB_SUBSYS_BRIDGE_CLK_EN (0x002A)
-#define U300_SYSCON_SBCER_SEMI_CLK_EN (0x0029)
-#define U300_SYSCON_SBCER_XGAM_CLK_EN (0x0028)
-#define U300_SYSCON_SBCER_VIDEO_ENC_CLK_EN (0x0027)
-#define U300_SYSCON_SBCER_NANDIF_CLK_EN (0x0026)
-#define U300_SYSCON_SBCER_EMIF_CLK_EN (0x0025)
-#define U300_SYSCON_SBCER_DMAC_CLK_EN (0x0024)
-#define U300_SYSCON_SBCER_CPU_CLK_EN (0x0023)
-#define U300_SYSCON_SBCER_APEX_CLK_EN (0x0022)
-#define U300_SYSCON_SBCER_AHB_CLK_EN (0x0021)
-#define U300_SYSCON_SBCER_AAIF_CLK_EN (0x0020)
-/* Single block clock disable 16bit (-/W) */
-#define U300_SYSCON_SBCDR (0x0030)
-/* Same values as above for SBCER */
-/* Clock force SLOW peripherals 16bit (R/W) */
-#define U300_SYSCON_CFSR (0x003c)
-#define U300_SYSCON_CFSR_PPM_CLK_FORCE_EN (0x0200)
-#define U300_SYSCON_CFSR_ACC_TMR_CLK_FORCE_EN (0x0100)
-#define U300_SYSCON_CFSR_APP_TMR_CLK_FORCE_EN (0x0080)
-#define U300_SYSCON_CFSR_KEYPAD_CLK_FORCE_EN (0x0020)
-#define U300_SYSCON_CFSR_GPIO_CLK_FORCE_EN (0x0010)
-#define U300_SYSCON_CFSR_EH_CLK_FORCE_EN (0x0008)
-#define U300_SYSCON_CFSR_BTR_CLK_FORCE_EN (0x0004)
-#define U300_SYSCON_CFSR_UART_CLK_FORCE_EN (0x0002)
-#define U300_SYSCON_CFSR_SLOW_BRIDGE_CLK_FORCE_EN (0x0001)
-/* Clock force FAST peripherals 16bit (R/W) */
-#define U300_SYSCON_CFFR (0x40)
-/* Values not defined. Define if you want to use them. */
-/* Clock force the rest of the peripherals 16bit (R/W) */
-#define U300_SYSCON_CFRR (0x44)
-#define U300_SYSCON_CFRR_CDS_CLK_FORCE_EN (0x2000)
-#define U300_SYSCON_CFRR_ISP_CLK_FORCE_EN (0x1000)
-#define U300_SYSCON_CFRR_MSPRO_CLK_FORCE_EN (0x0800)
-#define U300_SYSCON_CFRR_AHB_SUBSYS_BRIDGE_CLK_FORCE_EN (0x0400)
-#define U300_SYSCON_CFRR_SEMI_CLK_FORCE_EN (0x0200)
-#define U300_SYSCON_CFRR_XGAM_CLK_FORCE_EN (0x0100)
-#define U300_SYSCON_CFRR_VIDEO_ENC_CLK_FORCE_EN (0x0080)
-#define U300_SYSCON_CFRR_NANDIF_CLK_FORCE_EN (0x0040)
-#define U300_SYSCON_CFRR_EMIF_CLK_FORCE_EN (0x0020)
-#define U300_SYSCON_CFRR_DMAC_CLK_FORCE_EN (0x0010)
-#define U300_SYSCON_CFRR_CPU_CLK_FORCE_EN (0x0008)
-#define U300_SYSCON_CFRR_APEX_CLK_FORCE_EN (0x0004)
-#define U300_SYSCON_CFRR_AHB_CLK_FORCE_EN (0x0002)
-#define U300_SYSCON_CFRR_AAIF_CLK_FORCE_EN (0x0001)
-/* PLL208 Frequency Control 16bit (R/W) */
-#define U300_SYSCON_PFCR (0x48)
-#define U300_SYSCON_PFCR_DPLL_MULT_NUM (0x000F)
-/* Power Management Control 16bit (R/W) */
-#define U300_SYSCON_PMCR (0x50)
-#define U300_SYSCON_PMCR_DCON_ENABLE (0x0002)
-#define U300_SYSCON_PMCR_PWR_MGNT_ENABLE (0x0001)
-/* Reset Out 16bit (R/W) */
-#define U300_SYSCON_RCR (0x6c)
-#define U300_SYSCON_RCR_RESOUT0_RST_N_DISABLE (0x0001)
-/* EMIF Slew Rate Control 16bit (R/W) */
-#define U300_SYSCON_SRCLR (0x70)
-#define U300_SYSCON_SRCLR_MASK (0x03FF)
-#define U300_SYSCON_SRCLR_VALUE (0x03FF)
-#define U300_SYSCON_SRCLR_EMIF_1_SLRC_5_B (0x0200)
-#define U300_SYSCON_SRCLR_EMIF_1_SLRC_5_A (0x0100)
-#define U300_SYSCON_SRCLR_EMIF_1_SLRC_4_B (0x0080)
-#define U300_SYSCON_SRCLR_EMIF_1_SLRC_4_A (0x0040)
-#define U300_SYSCON_SRCLR_EMIF_1_SLRC_3_B (0x0020)
-#define U300_SYSCON_SRCLR_EMIF_1_SLRC_3_A (0x0010)
-#define U300_SYSCON_SRCLR_EMIF_1_SLRC_2_B (0x0008)
-#define U300_SYSCON_SRCLR_EMIF_1_SLRC_2_A (0x0004)
-#define U300_SYSCON_SRCLR_EMIF_1_SLRC_1_B (0x0002)
-#define U300_SYSCON_SRCLR_EMIF_1_SLRC_1_A (0x0001)
-/* EMIF Clock Control Register 16bit (R/W) */
-#define U300_SYSCON_ECCR (0x0078)
-#define U300_SYSCON_ECCR_MASK (0x000F)
-#define U300_SYSCON_ECCR_EMIF_1_STATIC_CLK_EN_N_DISABLE (0x0008)
-#define U300_SYSCON_ECCR_EMIF_1_RET_OUT_CLK_EN_N_DISABLE (0x0004)
-#define U300_SYSCON_ECCR_EMIF_MEMCLK_RET_EN_N_DISABLE (0x0002)
-#define U300_SYSCON_ECCR_EMIF_SDRCLK_RET_EN_N_DISABLE (0x0001)
-/* MMC/MSPRO frequency divider register 0 16bit (R/W) */
-#define U300_SYSCON_MMF0R (0x90)
-#define U300_SYSCON_MMF0R_MASK (0x00FF)
-#define U300_SYSCON_MMF0R_FREQ_0_HIGH_MASK (0x00F0)
-#define U300_SYSCON_MMF0R_FREQ_0_LOW_MASK (0x000F)
-/* MMC/MSPRO frequency divider register 1 16bit (R/W) */
-#define U300_SYSCON_MMF1R (0x94)
-#define U300_SYSCON_MMF1R_MASK (0x00FF)
-#define U300_SYSCON_MMF1R_FREQ_1_HIGH_MASK (0x00F0)
-#define U300_SYSCON_MMF1R_FREQ_1_LOW_MASK (0x000F)
-/* Clock control for the MMC and MSPRO blocks 16bit (R/W) */
-#define U300_SYSCON_MMCR (0x9C)
-#define U300_SYSCON_MMCR_MASK (0x0003)
-#define U300_SYSCON_MMCR_MMC_FB_CLK_SEL_ENABLE (0x0002)
-#define U300_SYSCON_MMCR_MSPRO_FREQSEL_ENABLE (0x0001)
-/* SYS_0_CLK_CONTROL first clock control 16bit (R/W) */
-#define U300_SYSCON_S0CCR (0x120)
-#define U300_SYSCON_S0CCR_FIELD_MASK (0x43FF)
-#define U300_SYSCON_S0CCR_CLOCK_REQ (0x4000)
-#define U300_SYSCON_S0CCR_CLOCK_REQ_MONITOR (0x2000)
-#define U300_SYSCON_S0CCR_CLOCK_INV (0x0200)
-#define U300_SYSCON_S0CCR_CLOCK_FREQ_MASK (0x01E0)
-#define U300_SYSCON_S0CCR_CLOCK_SELECT_MASK (0x001E)
-#define U300_SYSCON_S0CCR_CLOCK_ENABLE (0x0001)
-#define U300_SYSCON_S0CCR_SEL_MCLK (0x8 << 1)
-#define U300_SYSCON_S0CCR_SEL_ACC_FSM_CLK (0xA << 1)
-#define U300_SYSCON_S0CCR_SEL_PLL60_48_CLK (0xC << 1)
-#define U300_SYSCON_S0CCR_SEL_PLL60_60_CLK (0xD << 1)
-#define U300_SYSCON_S0CCR_SEL_ACC_PLL208_CLK (0xE << 1)
-#define U300_SYSCON_S0CCR_SEL_APP_PLL13_CLK (0x0 << 1)
-#define U300_SYSCON_S0CCR_SEL_APP_FSM_CLK (0x2 << 1)
-#define U300_SYSCON_S0CCR_SEL_RTC_CLK (0x4 << 1)
-#define U300_SYSCON_S0CCR_SEL_APP_PLL208_CLK (0x6 << 1)
-/* SYS_1_CLK_CONTROL second clock control 16 bit (R/W) */
-#define U300_SYSCON_S1CCR (0x124)
-#define U300_SYSCON_S1CCR_FIELD_MASK (0x43FF)
-#define U300_SYSCON_S1CCR_CLOCK_REQ (0x4000)
-#define U300_SYSCON_S1CCR_CLOCK_REQ_MONITOR (0x2000)
-#define U300_SYSCON_S1CCR_CLOCK_INV (0x0200)
-#define U300_SYSCON_S1CCR_CLOCK_FREQ_MASK (0x01E0)
-#define U300_SYSCON_S1CCR_CLOCK_SELECT_MASK (0x001E)
-#define U300_SYSCON_S1CCR_CLOCK_ENABLE (0x0001)
-#define U300_SYSCON_S1CCR_SEL_MCLK (0x8 << 1)
-#define U300_SYSCON_S1CCR_SEL_ACC_FSM_CLK (0xA << 1)
-#define U300_SYSCON_S1CCR_SEL_PLL60_48_CLK (0xC << 1)
-#define U300_SYSCON_S1CCR_SEL_PLL60_60_CLK (0xD << 1)
-#define U300_SYSCON_S1CCR_SEL_ACC_PLL208_CLK (0xE << 1)
-#define U300_SYSCON_S1CCR_SEL_ACC_PLL13_CLK (0x0 << 1)
-#define U300_SYSCON_S1CCR_SEL_APP_FSM_CLK (0x2 << 1)
-#define U300_SYSCON_S1CCR_SEL_RTC_CLK (0x4 << 1)
-#define U300_SYSCON_S1CCR_SEL_APP_PLL208_CLK (0x6 << 1)
-/* SYS_2_CLK_CONTROL third clock control 16 bit (R/W) */
-#define U300_SYSCON_S2CCR (0x128)
-#define U300_SYSCON_S2CCR_FIELD_MASK (0xC3FF)
-#define U300_SYSCON_S2CCR_CLK_STEAL (0x8000)
-#define U300_SYSCON_S2CCR_CLOCK_REQ (0x4000)
-#define U300_SYSCON_S2CCR_CLOCK_REQ_MONITOR (0x2000)
-#define U300_SYSCON_S2CCR_CLOCK_INV (0x0200)
-#define U300_SYSCON_S2CCR_CLOCK_FREQ_MASK (0x01E0)
-#define U300_SYSCON_S2CCR_CLOCK_SELECT_MASK (0x001E)
-#define U300_SYSCON_S2CCR_CLOCK_ENABLE (0x0001)
-#define U300_SYSCON_S2CCR_SEL_MCLK (0x8 << 1)
-#define U300_SYSCON_S2CCR_SEL_ACC_FSM_CLK (0xA << 1)
-#define U300_SYSCON_S2CCR_SEL_PLL60_48_CLK (0xC << 1)
-#define U300_SYSCON_S2CCR_SEL_PLL60_60_CLK (0xD << 1)
-#define U300_SYSCON_S2CCR_SEL_ACC_PLL208_CLK (0xE << 1)
-#define U300_SYSCON_S2CCR_SEL_ACC_PLL13_CLK (0x0 << 1)
-#define U300_SYSCON_S2CCR_SEL_APP_FSM_CLK (0x2 << 1)
-#define U300_SYSCON_S2CCR_SEL_RTC_CLK (0x4 << 1)
-#define U300_SYSCON_S2CCR_SEL_APP_PLL208_CLK (0x6 << 1)
-/* SC_PLL_IRQ_CONTROL 16bit (R/W) */
-#define U300_SYSCON_PICR (0x0130)
-#define U300_SYSCON_PICR_MASK (0x00FF)
-#define U300_SYSCON_PICR_FORCE_PLL208_LOCK_LOW_ENABLE (0x0080)
-#define U300_SYSCON_PICR_FORCE_PLL208_LOCK_HIGH_ENABLE (0x0040)
-#define U300_SYSCON_PICR_FORCE_PLL13_LOCK_LOW_ENABLE (0x0020)
-#define U300_SYSCON_PICR_FORCE_PLL13_LOCK_HIGH_ENABLE (0x0010)
-#define U300_SYSCON_PICR_IRQMASK_PLL13_UNLOCK_ENABLE (0x0008)
-#define U300_SYSCON_PICR_IRQMASK_PLL13_LOCK_ENABLE (0x0004)
-#define U300_SYSCON_PICR_IRQMASK_PLL208_UNLOCK_ENABLE (0x0002)
-#define U300_SYSCON_PICR_IRQMASK_PLL208_LOCK_ENABLE (0x0001)
-/* SC_PLL_IRQ_STATUS 16 bit (R/-) */
-#define U300_SYSCON_PISR (0x0134)
-#define U300_SYSCON_PISR_MASK (0x000F)
-#define U300_SYSCON_PISR_PLL13_UNLOCK_IND (0x0008)
-#define U300_SYSCON_PISR_PLL13_LOCK_IND (0x0004)
-#define U300_SYSCON_PISR_PLL208_UNLOCK_IND (0x0002)
-#define U300_SYSCON_PISR_PLL208_LOCK_IND (0x0001)
-/* SC_PLL_IRQ_CLEAR 16 bit (-/W) */
-#define U300_SYSCON_PICLR (0x0138)
-#define U300_SYSCON_PICLR_MASK (0x000F)
-#define U300_SYSCON_PICLR_RWMASK (0x0000)
-#define U300_SYSCON_PICLR_PLL13_UNLOCK_SC (0x0008)
-#define U300_SYSCON_PICLR_PLL13_LOCK_SC (0x0004)
-#define U300_SYSCON_PICLR_PLL208_UNLOCK_SC (0x0002)
-#define U300_SYSCON_PICLR_PLL208_LOCK_SC (0x0001)
-/* Clock activity observability register 0 */
-#define U300_SYSCON_C0OAR (0x140)
-#define U300_SYSCON_C0OAR_MASK (0xFFFF)
-#define U300_SYSCON_C0OAR_VALUE (0xFFFF)
-#define U300_SYSCON_C0OAR_BT_H_CLK (0x8000)
-#define U300_SYSCON_C0OAR_ASPB_P_CLK (0x4000)
-#define U300_SYSCON_C0OAR_APP_SEMI_H_CLK (0x2000)
-#define U300_SYSCON_C0OAR_APP_SEMI_CLK (0x1000)
-#define U300_SYSCON_C0OAR_APP_MMC_MSPRO_CLK (0x0800)
-#define U300_SYSCON_C0OAR_APP_I2S1_CLK (0x0400)
-#define U300_SYSCON_C0OAR_APP_I2S0_CLK (0x0200)
-#define U300_SYSCON_C0OAR_APP_CPU_CLK (0x0100)
-#define U300_SYSCON_C0OAR_APP_52_CLK (0x0080)
-#define U300_SYSCON_C0OAR_APP_208_CLK (0x0040)
-#define U300_SYSCON_C0OAR_APP_104_CLK (0x0020)
-#define U300_SYSCON_C0OAR_APEX_CLK (0x0010)
-#define U300_SYSCON_C0OAR_AHPB_M_H_CLK (0x0008)
-#define U300_SYSCON_C0OAR_AHB_CLK (0x0004)
-#define U300_SYSCON_C0OAR_AFPB_P_CLK (0x0002)
-#define U300_SYSCON_C0OAR_AAIF_CLK (0x0001)
-/* Clock activity observability register 1 */
-#define U300_SYSCON_C1OAR (0x144)
-#define U300_SYSCON_C1OAR_MASK (0x3FFE)
-#define U300_SYSCON_C1OAR_VALUE (0x3FFE)
-#define U300_SYSCON_C1OAR_NFIF_F_CLK (0x2000)
-#define U300_SYSCON_C1OAR_MSPRO_CLK (0x1000)
-#define U300_SYSCON_C1OAR_MMC_P_CLK (0x0800)
-#define U300_SYSCON_C1OAR_MMC_CLK (0x0400)
-#define U300_SYSCON_C1OAR_KP_P_CLK (0x0200)
-#define U300_SYSCON_C1OAR_I2C1_P_CLK (0x0100)
-#define U300_SYSCON_C1OAR_I2C0_P_CLK (0x0080)
-#define U300_SYSCON_C1OAR_GPIO_CLK (0x0040)
-#define U300_SYSCON_C1OAR_EMIF_MPMC_CLK (0x0020)
-#define U300_SYSCON_C1OAR_EMIF_H_CLK (0x0010)
-#define U300_SYSCON_C1OAR_EVHIST_CLK (0x0008)
-#define U300_SYSCON_C1OAR_PPM_CLK (0x0004)
-#define U300_SYSCON_C1OAR_DMA_CLK (0x0002)
-/* Clock activity observability register 2 */
-#define U300_SYSCON_C2OAR (0x148)
-#define U300_SYSCON_C2OAR_MASK (0x0FFF)
-#define U300_SYSCON_C2OAR_VALUE (0x0FFF)
-#define U300_SYSCON_C2OAR_XGAM_CDI_CLK (0x0800)
-#define U300_SYSCON_C2OAR_XGAM_CLK (0x0400)
-#define U300_SYSCON_C2OAR_VC_H_CLK (0x0200)
-#define U300_SYSCON_C2OAR_VC_CLK (0x0100)
-#define U300_SYSCON_C2OAR_UA_P_CLK (0x0080)
-#define U300_SYSCON_C2OAR_TMR1_CLK (0x0040)
-#define U300_SYSCON_C2OAR_TMR0_CLK (0x0020)
-#define U300_SYSCON_C2OAR_SPI_P_CLK (0x0010)
-#define U300_SYSCON_C2OAR_PCM_I2S1_CORE_CLK (0x0008)
-#define U300_SYSCON_C2OAR_PCM_I2S1_CLK (0x0004)
-#define U300_SYSCON_C2OAR_PCM_I2S0_CORE_CLK (0x0002)
-#define U300_SYSCON_C2OAR_PCM_I2S0_CLK (0x0001)
-
-
-/*
- * The clocking hierarchy currently looks like this.
- * NOTE: the idea is NOT to show how the clocks are routed on the chip!
- * The ideas is to show dependencies, so a clock higher up in the
- * hierarchy has to be on in order for another clock to be on. Now,
- * both CPU and DMA can actually be on top of the hierarchy, and that
- * is not modeled currently. Instead we have the backbone AMBA bus on
- * top. This bus cannot be programmed in any way but conceptually it
- * needs to be active for the bridges and devices to transport data.
- *
- * Please be aware that a few clocks are hw controlled, which mean that
- * the hw itself can turn on/off or change the rate of the clock when
- * needed!
- *
- * AMBA bus
- * |
- * +- CPU
- * +- FSMC NANDIF NAND Flash interface
- * +- SEMI Shared Memory interface
- * +- ISP Image Signal Processor (U335 only)
- * +- CDS (U335 only)
- * +- DMA Direct Memory Access Controller
- * +- AAIF APP/ACC Interface (Mobile Scalable Link, MSL)
- * +- APEX
- * +- VIDEO_ENC AVE2/3 Video Encoder
- * +- XGAM Graphics Accelerator Controller
- * +- AHB
- * |
- * +- ahb:0 AHB Bridge
- * | |
- * | +- ahb:1 INTCON Interrupt controller
- * | +- ahb:3 MSPRO Memory Stick Pro controller
- * | +- ahb:4 EMIF External Memory interface
- * |
- * +- fast:0 FAST bridge
- * | |
- * | +- fast:1 MMCSD MMC/SD card reader controller
- * | +- fast:2 I2S0 PCM I2S channel 0 controller
- * | +- fast:3 I2S1 PCM I2S channel 1 controller
- * | +- fast:4 I2C0 I2C channel 0 controller
- * | +- fast:5 I2C1 I2C channel 1 controller
- * | +- fast:6 SPI SPI controller
- * | +- fast:7 UART1 Secondary UART (U335 only)
- * |
- * +- slow:0 SLOW bridge
- * |
- * +- slow:1 SYSCON (not possible to control)
- * +- slow:2 WDOG Watchdog
- * +- slow:3 UART0 primary UART
- * +- slow:4 TIMER_APP Application timer - used in Linux
- * +- slow:5 KEYPAD controller
- * +- slow:6 GPIO controller
- * +- slow:7 RTC controller
- * +- slow:8 BT Bus Tracer (not used currently)
- * +- slow:9 EH Event Handler (not used currently)
- * +- slow:a TIMER_ACC Access style timer (not used currently)
- * +- slow:b PPM (U335 only, what is that?)
- */
-
-/* Global syscon virtual base */
-static void __iomem *syscon_vbase;
-
-/**
- * struct clk_syscon - U300 syscon clock
- * @hw: corresponding clock hardware entry
- * @hw_ctrld: whether this clock is hardware controlled (for refcount etc)
- * and does not need any magic pokes to be enabled/disabled
- * @reset: state holder, whether this block's reset line is asserted or not
- * @res_reg: reset line enable/disable flag register
- * @res_bit: bit for resetting or taking this consumer out of reset
- * @en_reg: clock line enable/disable flag register
- * @en_bit: bit for enabling/disabling this consumer clock line
- * @clk_val: magic value to poke in the register to enable/disable
- * this one clock
- */
-struct clk_syscon {
- struct clk_hw hw;
- bool hw_ctrld;
- bool reset;
- void __iomem *res_reg;
- u8 res_bit;
- void __iomem *en_reg;
- u8 en_bit;
- u16 clk_val;
-};
-
-#define to_syscon(_hw) container_of(_hw, struct clk_syscon, hw)
-
-static DEFINE_SPINLOCK(syscon_resetreg_lock);
-
-/*
- * Reset control functions. We remember if a block has been
- * taken out of reset and don't remove the reset assertion again
- * and vice versa. Currently we only remove resets so the
- * enablement function is defined out.
- */
-static void syscon_block_reset_enable(struct clk_syscon *sclk)
-{
- unsigned long iflags;
- u16 val;
-
- /* Not all blocks support resetting */
- if (!sclk->res_reg)
- return;
- spin_lock_irqsave(&syscon_resetreg_lock, iflags);
- val = readw(sclk->res_reg);
- val |= BIT(sclk->res_bit);
- writew(val, sclk->res_reg);
- spin_unlock_irqrestore(&syscon_resetreg_lock, iflags);
- sclk->reset = true;
-}
-
-static void syscon_block_reset_disable(struct clk_syscon *sclk)
-{
- unsigned long iflags;
- u16 val;
-
- /* Not all blocks support resetting */
- if (!sclk->res_reg)
- return;
- spin_lock_irqsave(&syscon_resetreg_lock, iflags);
- val = readw(sclk->res_reg);
- val &= ~BIT(sclk->res_bit);
- writew(val, sclk->res_reg);
- spin_unlock_irqrestore(&syscon_resetreg_lock, iflags);
- sclk->reset = false;
-}
-
-static int syscon_clk_prepare(struct clk_hw *hw)
-{
- struct clk_syscon *sclk = to_syscon(hw);
-
- /* If the block is in reset, bring it out */
- if (sclk->reset)
- syscon_block_reset_disable(sclk);
- return 0;
-}
-
-static void syscon_clk_unprepare(struct clk_hw *hw)
-{
- struct clk_syscon *sclk = to_syscon(hw);
-
- /* Please don't force the console into reset */
- if (sclk->clk_val == U300_SYSCON_SBCER_UART_CLK_EN)
- return;
- /* When unpreparing, force block into reset */
- if (!sclk->reset)
- syscon_block_reset_enable(sclk);
-}
-
-static int syscon_clk_enable(struct clk_hw *hw)
-{
- struct clk_syscon *sclk = to_syscon(hw);
-
- /* Don't touch the hardware controlled clocks */
- if (sclk->hw_ctrld)
- return 0;
- /* These cannot be controlled */
- if (sclk->clk_val == 0xFFFFU)
- return 0;
-
- writew(sclk->clk_val, syscon_vbase + U300_SYSCON_SBCER);
- return 0;
-}
-
-static void syscon_clk_disable(struct clk_hw *hw)
-{
- struct clk_syscon *sclk = to_syscon(hw);
-
- /* Don't touch the hardware controlled clocks */
- if (sclk->hw_ctrld)
- return;
- if (sclk->clk_val == 0xFFFFU)
- return;
- /* Please don't disable the console port */
- if (sclk->clk_val == U300_SYSCON_SBCER_UART_CLK_EN)
- return;
-
- writew(sclk->clk_val, syscon_vbase + U300_SYSCON_SBCDR);
-}
-
-static int syscon_clk_is_enabled(struct clk_hw *hw)
-{
- struct clk_syscon *sclk = to_syscon(hw);
- u16 val;
-
- /* If no enable register defined, it's always-on */
- if (!sclk->en_reg)
- return 1;
-
- val = readw(sclk->en_reg);
- val &= BIT(sclk->en_bit);
-
- return val ? 1 : 0;
-}
-
-static u16 syscon_get_perf(void)
-{
- u16 val;
-
- val = readw(syscon_vbase + U300_SYSCON_CCR);
- val &= U300_SYSCON_CCR_CLKING_PERFORMANCE_MASK;
- return val;
-}
-
-static unsigned long
-syscon_clk_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- struct clk_syscon *sclk = to_syscon(hw);
- u16 perf = syscon_get_perf();
-
- switch (sclk->clk_val) {
- case U300_SYSCON_SBCER_FAST_BRIDGE_CLK_EN:
- case U300_SYSCON_SBCER_I2C0_CLK_EN:
- case U300_SYSCON_SBCER_I2C1_CLK_EN:
- case U300_SYSCON_SBCER_MMC_CLK_EN:
- case U300_SYSCON_SBCER_SPI_CLK_EN:
- /* The FAST clocks have one progression */
- switch (perf) {
- case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW_POWER:
- case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW:
- return 13000000;
- default:
- return parent_rate; /* 26 MHz */
- }
- case U300_SYSCON_SBCER_DMAC_CLK_EN:
- case U300_SYSCON_SBCER_NANDIF_CLK_EN:
- case U300_SYSCON_SBCER_XGAM_CLK_EN:
- /* AMBA interconnect peripherals */
- switch (perf) {
- case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW_POWER:
- case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW:
- return 6500000;
- case U300_SYSCON_CCR_CLKING_PERFORMANCE_INTERMEDIATE:
- return 26000000;
- default:
- return parent_rate; /* 52 MHz */
- }
- case U300_SYSCON_SBCER_SEMI_CLK_EN:
- case U300_SYSCON_SBCER_EMIF_CLK_EN:
- /* EMIF speeds */
- switch (perf) {
- case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW_POWER:
- case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW:
- return 13000000;
- case U300_SYSCON_CCR_CLKING_PERFORMANCE_INTERMEDIATE:
- return 52000000;
- default:
- return 104000000;
- }
- case U300_SYSCON_SBCER_CPU_CLK_EN:
- /* And the fast CPU clock */
- switch (perf) {
- case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW_POWER:
- case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW:
- return 13000000;
- case U300_SYSCON_CCR_CLKING_PERFORMANCE_INTERMEDIATE:
- return 52000000;
- case U300_SYSCON_CCR_CLKING_PERFORMANCE_HIGH:
- return 104000000;
- default:
- return parent_rate; /* 208 MHz */
- }
- default:
- /*
- * The SLOW clocks and default just inherit the rate of
- * their parent (typically PLL13 13 MHz).
- */
- return parent_rate;
- }
-}
-
-static long
-syscon_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
-{
- struct clk_syscon *sclk = to_syscon(hw);
-
- if (sclk->clk_val != U300_SYSCON_SBCER_CPU_CLK_EN)
- return *prate;
- /* We really only support setting the rate of the CPU clock */
- if (rate <= 13000000)
- return 13000000;
- if (rate <= 52000000)
- return 52000000;
- if (rate <= 104000000)
- return 104000000;
- return 208000000;
-}
-
-static int syscon_clk_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
-{
- struct clk_syscon *sclk = to_syscon(hw);
- u16 val;
-
- /* We only support setting the rate of the CPU clock */
- if (sclk->clk_val != U300_SYSCON_SBCER_CPU_CLK_EN)
- return -EINVAL;
- switch (rate) {
- case 13000000:
- val = U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW_POWER;
- break;
- case 52000000:
- val = U300_SYSCON_CCR_CLKING_PERFORMANCE_INTERMEDIATE;
- break;
- case 104000000:
- val = U300_SYSCON_CCR_CLKING_PERFORMANCE_HIGH;
- break;
- case 208000000:
- val = U300_SYSCON_CCR_CLKING_PERFORMANCE_BEST;
- break;
- default:
- return -EINVAL;
- }
- val |= readw(syscon_vbase + U300_SYSCON_CCR) &
- ~U300_SYSCON_CCR_CLKING_PERFORMANCE_MASK ;
- writew(val, syscon_vbase + U300_SYSCON_CCR);
- return 0;
-}
-
-static const struct clk_ops syscon_clk_ops = {
- .prepare = syscon_clk_prepare,
- .unprepare = syscon_clk_unprepare,
- .enable = syscon_clk_enable,
- .disable = syscon_clk_disable,
- .is_enabled = syscon_clk_is_enabled,
- .recalc_rate = syscon_clk_recalc_rate,
- .round_rate = syscon_clk_round_rate,
- .set_rate = syscon_clk_set_rate,
-};
-
-static struct clk_hw * __init
-syscon_clk_register(struct device *dev, const char *name,
- const char *parent_name, unsigned long flags,
- bool hw_ctrld,
- void __iomem *res_reg, u8 res_bit,
- void __iomem *en_reg, u8 en_bit,
- u16 clk_val)
-{
- struct clk_hw *hw;
- struct clk_syscon *sclk;
- struct clk_init_data init;
- int ret;
-
- sclk = kzalloc(sizeof(*sclk), GFP_KERNEL);
- if (!sclk)
- return ERR_PTR(-ENOMEM);
-
- init.name = name;
- init.ops = &syscon_clk_ops;
- init.flags = flags;
- init.parent_names = (parent_name ? &parent_name : NULL);
- init.num_parents = (parent_name ? 1 : 0);
- sclk->hw.init = &init;
- sclk->hw_ctrld = hw_ctrld;
- /* Assume the block is in reset at registration */
- sclk->reset = true;
- sclk->res_reg = res_reg;
- sclk->res_bit = res_bit;
- sclk->en_reg = en_reg;
- sclk->en_bit = en_bit;
- sclk->clk_val = clk_val;
-
- hw = &sclk->hw;
- ret = clk_hw_register(dev, hw);
- if (ret) {
- kfree(sclk);
- hw = ERR_PTR(ret);
- }
-
- return hw;
-}
-
-#define U300_CLK_TYPE_SLOW 0
-#define U300_CLK_TYPE_FAST 1
-#define U300_CLK_TYPE_REST 2
-
-/**
- * struct u300_clock - defines the bits and pieces for a certain clock
- * @type: the clock type, slow fast or rest
- * @id: the bit in the slow/fast/rest register for this clock
- * @hw_ctrld: whether the clock is hardware controlled
- * @clk_val: a value to poke in the one-write enable/disable registers
- */
-struct u300_clock {
- u8 type;
- u8 id;
- bool hw_ctrld;
- u16 clk_val;
-};
-
-static struct u300_clock const u300_clk_lookup[] __initconst = {
- {
- .type = U300_CLK_TYPE_REST,
- .id = 3,
- .hw_ctrld = true,
- .clk_val = U300_SYSCON_SBCER_CPU_CLK_EN,
- },
- {
- .type = U300_CLK_TYPE_REST,
- .id = 4,
- .hw_ctrld = true,
- .clk_val = U300_SYSCON_SBCER_DMAC_CLK_EN,
- },
- {
- .type = U300_CLK_TYPE_REST,
- .id = 5,
- .hw_ctrld = false,
- .clk_val = U300_SYSCON_SBCER_EMIF_CLK_EN,
- },
- {
- .type = U300_CLK_TYPE_REST,
- .id = 6,
- .hw_ctrld = false,
- .clk_val = U300_SYSCON_SBCER_NANDIF_CLK_EN,
- },
- {
- .type = U300_CLK_TYPE_REST,
- .id = 8,
- .hw_ctrld = true,
- .clk_val = U300_SYSCON_SBCER_XGAM_CLK_EN,
- },
- {
- .type = U300_CLK_TYPE_REST,
- .id = 9,
- .hw_ctrld = false,
- .clk_val = U300_SYSCON_SBCER_SEMI_CLK_EN,
- },
- {
- .type = U300_CLK_TYPE_REST,
- .id = 10,
- .hw_ctrld = true,
- .clk_val = U300_SYSCON_SBCER_AHB_SUBSYS_BRIDGE_CLK_EN,
- },
- {
- .type = U300_CLK_TYPE_REST,
- .id = 12,
- .hw_ctrld = false,
- /* INTCON: cannot be enabled, just taken out of reset */
- .clk_val = 0xFFFFU,
- },
- {
- .type = U300_CLK_TYPE_FAST,
- .id = 0,
- .hw_ctrld = true,
- .clk_val = U300_SYSCON_SBCER_FAST_BRIDGE_CLK_EN,
- },
- {
- .type = U300_CLK_TYPE_FAST,
- .id = 1,
- .hw_ctrld = false,
- .clk_val = U300_SYSCON_SBCER_I2C0_CLK_EN,
- },
- {
- .type = U300_CLK_TYPE_FAST,
- .id = 2,
- .hw_ctrld = false,
- .clk_val = U300_SYSCON_SBCER_I2C1_CLK_EN,
- },
- {
- .type = U300_CLK_TYPE_FAST,
- .id = 5,
- .hw_ctrld = false,
- .clk_val = U300_SYSCON_SBCER_MMC_CLK_EN,
- },
- {
- .type = U300_CLK_TYPE_FAST,
- .id = 6,
- .hw_ctrld = false,
- .clk_val = U300_SYSCON_SBCER_SPI_CLK_EN,
- },
- {
- .type = U300_CLK_TYPE_SLOW,
- .id = 0,
- .hw_ctrld = true,
- .clk_val = U300_SYSCON_SBCER_SLOW_BRIDGE_CLK_EN,
- },
- {
- .type = U300_CLK_TYPE_SLOW,
- .id = 1,
- .hw_ctrld = false,
- .clk_val = U300_SYSCON_SBCER_UART_CLK_EN,
- },
- {
- .type = U300_CLK_TYPE_SLOW,
- .id = 4,
- .hw_ctrld = false,
- .clk_val = U300_SYSCON_SBCER_GPIO_CLK_EN,
- },
- {
- .type = U300_CLK_TYPE_SLOW,
- .id = 6,
- .hw_ctrld = true,
- /* No clock enable register bit */
- .clk_val = 0xFFFFU,
- },
- {
- .type = U300_CLK_TYPE_SLOW,
- .id = 7,
- .hw_ctrld = false,
- .clk_val = U300_SYSCON_SBCER_APP_TMR_CLK_EN,
- },
- {
- .type = U300_CLK_TYPE_SLOW,
- .id = 8,
- .hw_ctrld = false,
- .clk_val = U300_SYSCON_SBCER_ACC_TMR_CLK_EN,
- },
-};
-
-static void __init of_u300_syscon_clk_init(struct device_node *np)
-{
- struct clk_hw *hw = ERR_PTR(-EINVAL);
- const char *clk_name = np->name;
- const char *parent_name;
- void __iomem *res_reg;
- void __iomem *en_reg;
- u32 clk_type;
- u32 clk_id;
- int i;
-
- if (of_property_read_u32(np, "clock-type", &clk_type)) {
- pr_err("%s: syscon clock \"%s\" missing clock-type property\n",
- __func__, clk_name);
- return;
- }
- if (of_property_read_u32(np, "clock-id", &clk_id)) {
- pr_err("%s: syscon clock \"%s\" missing clock-id property\n",
- __func__, clk_name);
- return;
- }
- parent_name = of_clk_get_parent_name(np, 0);
-
- switch (clk_type) {
- case U300_CLK_TYPE_SLOW:
- res_reg = syscon_vbase + U300_SYSCON_RSR;
- en_reg = syscon_vbase + U300_SYSCON_CESR;
- break;
- case U300_CLK_TYPE_FAST:
- res_reg = syscon_vbase + U300_SYSCON_RFR;
- en_reg = syscon_vbase + U300_SYSCON_CEFR;
- break;
- case U300_CLK_TYPE_REST:
- res_reg = syscon_vbase + U300_SYSCON_RRR;
- en_reg = syscon_vbase + U300_SYSCON_CERR;
- break;
- default:
- pr_err("unknown clock type %x specified\n", clk_type);
- return;
- }
-
- for (i = 0; i < ARRAY_SIZE(u300_clk_lookup); i++) {
- const struct u300_clock *u3clk = &u300_clk_lookup[i];
-
- if (u3clk->type == clk_type && u3clk->id == clk_id)
- hw = syscon_clk_register(NULL, clk_name, parent_name,
- 0, u3clk->hw_ctrld,
- res_reg, u3clk->id,
- en_reg, u3clk->id,
- u3clk->clk_val);
- }
-
- if (!IS_ERR(hw)) {
- of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw);
-
- /*
- * Some few system clocks - device tree does not
- * represent clocks without a corresponding device node.
- * for now we add these three clocks here.
- */
- if (clk_type == U300_CLK_TYPE_REST && clk_id == 5)
- clk_hw_register_clkdev(hw, NULL, "pl172");
- if (clk_type == U300_CLK_TYPE_REST && clk_id == 9)
- clk_hw_register_clkdev(hw, NULL, "semi");
- if (clk_type == U300_CLK_TYPE_REST && clk_id == 12)
- clk_hw_register_clkdev(hw, NULL, "intcon");
- }
-}
-
-/**
- * struct clk_mclk - U300 MCLK clock (MMC/SD clock)
- * @hw: corresponding clock hardware entry
- * @is_mspro: if this is the memory stick clock rather than MMC/SD
- */
-struct clk_mclk {
- struct clk_hw hw;
- bool is_mspro;
-};
-
-#define to_mclk(_hw) container_of(_hw, struct clk_mclk, hw)
-
-static int mclk_clk_prepare(struct clk_hw *hw)
-{
- struct clk_mclk *mclk = to_mclk(hw);
- u16 val;
-
- /* The MMC and MSPRO clocks need some special set-up */
- if (!mclk->is_mspro) {
- /* Set default MMC clock divisor to 18.9 MHz */
- writew(0x0054U, syscon_vbase + U300_SYSCON_MMF0R);
- val = readw(syscon_vbase + U300_SYSCON_MMCR);
- /* Disable the MMC feedback clock */
- val &= ~U300_SYSCON_MMCR_MMC_FB_CLK_SEL_ENABLE;
- /* Disable MSPRO frequency */
- val &= ~U300_SYSCON_MMCR_MSPRO_FREQSEL_ENABLE;
- writew(val, syscon_vbase + U300_SYSCON_MMCR);
- } else {
- val = readw(syscon_vbase + U300_SYSCON_MMCR);
- /* Disable the MMC feedback clock */
- val &= ~U300_SYSCON_MMCR_MMC_FB_CLK_SEL_ENABLE;
- /* Enable MSPRO frequency */
- val |= U300_SYSCON_MMCR_MSPRO_FREQSEL_ENABLE;
- writew(val, syscon_vbase + U300_SYSCON_MMCR);
- }
-
- return 0;
-}
-
-static unsigned long
-mclk_clk_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- u16 perf = syscon_get_perf();
-
- switch (perf) {
- case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW_POWER:
- /*
- * Here, the 208 MHz PLL gets shut down and the always
- * on 13 MHz PLL used for RTC etc kicks into use
- * instead.
- */
- return 13000000;
- case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW:
- case U300_SYSCON_CCR_CLKING_PERFORMANCE_INTERMEDIATE:
- case U300_SYSCON_CCR_CLKING_PERFORMANCE_HIGH:
- case U300_SYSCON_CCR_CLKING_PERFORMANCE_BEST:
- {
- /*
- * This clock is under program control. The register is
- * divided in two nybbles, bit 7-4 gives cycles-1 to count
- * high, bit 3-0 gives cycles-1 to count low. Distribute
- * these with no more than 1 cycle difference between
- * low and high and add low and high to get the actual
- * divisor. The base PLL is 208 MHz. Writing 0x00 will
- * divide by 1 and 1 so the highest frequency possible
- * is 104 MHz.
- *
- * e.g. 0x54 =>
- * f = 208 / ((5+1) + (4+1)) = 208 / 11 = 18.9 MHz
- */
- u16 val = readw(syscon_vbase + U300_SYSCON_MMF0R) &
- U300_SYSCON_MMF0R_MASK;
- switch (val) {
- case 0x0054:
- return 18900000;
- case 0x0044:
- return 20800000;
- case 0x0043:
- return 23100000;
- case 0x0033:
- return 26000000;
- case 0x0032:
- return 29700000;
- case 0x0022:
- return 34700000;
- case 0x0021:
- return 41600000;
- case 0x0011:
- return 52000000;
- case 0x0000:
- return 104000000;
- default:
- break;
- }
- }
- default:
- break;
- }
- return parent_rate;
-}
-
-static long
-mclk_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
-{
- if (rate <= 18900000)
- return 18900000;
- if (rate <= 20800000)
- return 20800000;
- if (rate <= 23100000)
- return 23100000;
- if (rate <= 26000000)
- return 26000000;
- if (rate <= 29700000)
- return 29700000;
- if (rate <= 34700000)
- return 34700000;
- if (rate <= 41600000)
- return 41600000;
- /* Highest rate */
- return 52000000;
-}
-
-static int mclk_clk_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
-{
- u16 val;
- u16 reg;
-
- switch (rate) {
- case 18900000:
- val = 0x0054;
- break;
- case 20800000:
- val = 0x0044;
- break;
- case 23100000:
- val = 0x0043;
- break;
- case 26000000:
- val = 0x0033;
- break;
- case 29700000:
- val = 0x0032;
- break;
- case 34700000:
- val = 0x0022;
- break;
- case 41600000:
- val = 0x0021;
- break;
- case 52000000:
- val = 0x0011;
- break;
- case 104000000:
- val = 0x0000;
- break;
- default:
- return -EINVAL;
- }
-
- reg = readw(syscon_vbase + U300_SYSCON_MMF0R) &
- ~U300_SYSCON_MMF0R_MASK;
- writew(reg | val, syscon_vbase + U300_SYSCON_MMF0R);
- return 0;
-}
-
-static const struct clk_ops mclk_ops = {
- .prepare = mclk_clk_prepare,
- .recalc_rate = mclk_clk_recalc_rate,
- .round_rate = mclk_clk_round_rate,
- .set_rate = mclk_clk_set_rate,
-};
-
-static struct clk_hw * __init
-mclk_clk_register(struct device *dev, const char *name,
- const char *parent_name, bool is_mspro)
-{
- struct clk_hw *hw;
- struct clk_mclk *mclk;
- struct clk_init_data init;
- int ret;
-
- mclk = kzalloc(sizeof(*mclk), GFP_KERNEL);
- if (!mclk)
- return ERR_PTR(-ENOMEM);
-
- init.name = "mclk";
- init.ops = &mclk_ops;
- init.flags = 0;
- init.parent_names = (parent_name ? &parent_name : NULL);
- init.num_parents = (parent_name ? 1 : 0);
- mclk->hw.init = &init;
- mclk->is_mspro = is_mspro;
-
- hw = &mclk->hw;
- ret = clk_hw_register(dev, hw);
- if (ret) {
- kfree(mclk);
- hw = ERR_PTR(ret);
- }
-
- return hw;
-}
-
-static void __init of_u300_syscon_mclk_init(struct device_node *np)
-{
- struct clk_hw *hw;
- const char *clk_name = np->name;
- const char *parent_name;
-
- parent_name = of_clk_get_parent_name(np, 0);
- hw = mclk_clk_register(NULL, clk_name, parent_name, false);
- if (!IS_ERR(hw))
- of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw);
-}
-
-static const struct of_device_id u300_clk_match[] __initconst = {
- {
- .compatible = "fixed-clock",
- .data = of_fixed_clk_setup,
- },
- {
- .compatible = "fixed-factor-clock",
- .data = of_fixed_factor_clk_setup,
- },
- {
- .compatible = "stericsson,u300-syscon-clk",
- .data = of_u300_syscon_clk_init,
- },
- {
- .compatible = "stericsson,u300-syscon-mclk",
- .data = of_u300_syscon_mclk_init,
- },
- {}
-};
-
-
-void __init u300_clk_init(void __iomem *base)
-{
- u16 val;
-
- syscon_vbase = base;
-
- /* Set system to run at PLL208, max performance, a known state. */
- val = readw(syscon_vbase + U300_SYSCON_CCR);
- val &= ~U300_SYSCON_CCR_CLKING_PERFORMANCE_MASK;
- writew(val, syscon_vbase + U300_SYSCON_CCR);
- /* Wait for the PLL208 to lock if not locked in yet */
- while (!(readw(syscon_vbase + U300_SYSCON_CSR) &
- U300_SYSCON_CSR_PLL208_LOCK_IND));
-
- /* Power management enable */
- val = readw(syscon_vbase + U300_SYSCON_PMCR);
- val |= U300_SYSCON_PMCR_PWR_MGNT_ENABLE;
- writew(val, syscon_vbase + U300_SYSCON_PMCR);
-
- of_clk_init(u300_clk_match);
-}
diff --git a/drivers/clk/clk-versaclock5.c b/drivers/clk/clk-versaclock5.c
index 43db67337bc0..344cd6c61188 100644
--- a/drivers/clk/clk-versaclock5.c
+++ b/drivers/clk/clk-versaclock5.c
@@ -759,6 +759,63 @@ static int vc5_update_power(struct device_node *np_output,
return 0;
}
+static int vc5_map_cap_value(u32 femtofarads)
+{
+ int mapped_value;
+
+ /*
+ * The datasheet explicitly states 9000 - 25000 with 0.5pF
+ * steps, but the Programmer's guide shows the steps are 0.430pF.
+ * After getting feedback from Renesas, the .5pF steps were the
+ * goal, but 430nF was the actual values.
+ * Because of this, the actual range goes to 22760 instead of 25000
+ */
+ if (femtofarads < 9000 || femtofarads > 22760)
+ return -EINVAL;
+
+ /*
+ * The Programmer's guide shows XTAL[5:0] but in reality,
+ * XTAL[0] and XTAL[1] are both LSB which makes the math
+ * strange. With clarfication from Renesas, setting the
+ * values should be simpler by ignoring XTAL[0]
+ */
+ mapped_value = DIV_ROUND_CLOSEST(femtofarads - 9000, 430);
+
+ /*
+ * Since the calculation ignores XTAL[0], there is one
+ * special case where mapped_value = 32. In reality, this means
+ * the real mapped value should be 111111b. In other cases,
+ * the mapped_value needs to be shifted 1 to the left.
+ */
+ if (mapped_value > 31)
+ mapped_value = 0x3f;
+ else
+ mapped_value <<= 1;
+
+ return mapped_value;
+}
+static int vc5_update_cap_load(struct device_node *node, struct vc5_driver_data *vc5)
+{
+ u32 value;
+ int mapped_value;
+
+ if (!of_property_read_u32(node, "idt,xtal-load-femtofarads", &value)) {
+ mapped_value = vc5_map_cap_value(value);
+ if (mapped_value < 0)
+ return mapped_value;
+
+ /*
+ * The mapped_value is really the high 6 bits of
+ * VC5_XTAL_X1_LOAD_CAP and VC5_XTAL_X2_LOAD_CAP, so
+ * shift the value 2 places.
+ */
+ regmap_update_bits(vc5->regmap, VC5_XTAL_X1_LOAD_CAP, ~0x03, mapped_value << 2);
+ regmap_update_bits(vc5->regmap, VC5_XTAL_X2_LOAD_CAP, ~0x03, mapped_value << 2);
+ }
+
+ return 0;
+}
+
static int vc5_update_slew(struct device_node *np_output,
struct vc5_out_data *clk_out)
{
@@ -884,6 +941,13 @@ static int vc5_probe(struct i2c_client *client, const struct i2c_device_id *id)
return -EINVAL;
}
+ /* Configure Optional Loading Capacitance for external XTAL */
+ if (!(vc5->chip_info->flags & VC5_HAS_INTERNAL_XTAL)) {
+ ret = vc5_update_cap_load(client->dev.of_node, vc5);
+ if (ret)
+ goto err_clk_register;
+ }
+
init.name = kasprintf(GFP_KERNEL, "%pOFn.mux", client->dev.of_node);
init.ops = &vc5_mux_ops;
init.flags = 0;
diff --git a/drivers/clk/clk-xgene.c b/drivers/clk/clk-xgene.c
index 3fd53057c01f..857217cbcef8 100644
--- a/drivers/clk/clk-xgene.c
+++ b/drivers/clk/clk-xgene.c
@@ -206,17 +206,16 @@ static void xgene_pcppllclk_init(struct device_node *np)
* @hw: handle between common and hardware-specific interfaces
* @reg: register containing the fractional scale multiplier (scaler)
* @shift: shift to the unit bit field
+ * @mask: mask to the unit bit field
* @denom: 1/denominator unit
* @lock: register lock
- * Flags:
- * XGENE_CLK_PMD_SCALE_INVERTED - By default the scaler is the value read
+ * @flags: XGENE_CLK_PMD_SCALE_INVERTED - By default the scaler is the value read
* from the register plus one. For example,
* 0 for (0 + 1) / denom,
* 1 for (1 + 1) / denom and etc.
* If this flag is set, it is
* 0 for (denom - 0) / denom,
* 1 for (denom - 1) / denom and etc.
- *
*/
struct xgene_clk_pmd {
struct clk_hw hw;
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 8c1d04db990d..5052541a0986 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -1164,6 +1164,27 @@ int clk_enable(struct clk *clk)
}
EXPORT_SYMBOL_GPL(clk_enable);
+/**
+ * clk_is_enabled_when_prepared - indicate if preparing a clock also enables it.
+ * @clk: clock source
+ *
+ * Returns true if clk_prepare() implicitly enables the clock, effectively
+ * making clk_enable()/clk_disable() no-ops, false otherwise.
+ *
+ * This is of interest mainly to power management code where actually
+ * disabling the clock also requires unpreparing it to have any material
+ * effect.
+ *
+ * Regardless of the value returned here, the caller must always invoke
+ * clk_enable() or clk_prepare_enable() and counterparts for usage counts
+ * to be right.
+ */
+bool clk_is_enabled_when_prepared(struct clk *clk)
+{
+ return clk && !(clk->core->ops->enable && clk->core->ops->disable);
+}
+EXPORT_SYMBOL_GPL(clk_is_enabled_when_prepared);
+
static int clk_core_prepare_enable(struct clk_core *core)
{
int ret;
@@ -4555,6 +4576,8 @@ int of_clk_add_provider(struct device_node *np,
if (ret < 0)
of_clk_del_provider(np);
+ fwnode_dev_initialized(&np->fwnode, true);
+
return ret;
}
EXPORT_SYMBOL_GPL(of_clk_add_provider);
@@ -4672,6 +4695,7 @@ void of_clk_del_provider(struct device_node *np)
list_for_each_entry(cp, &of_clk_providers, link) {
if (cp->node == np) {
list_del(&cp->link);
+ fwnode_dev_initialized(&np->fwnode, false);
of_node_put(cp->node);
kfree(cp);
break;
diff --git a/drivers/clk/imx/Kconfig b/drivers/clk/imx/Kconfig
index 3061896503f3..47d9ec3abd2f 100644
--- a/drivers/clk/imx/Kconfig
+++ b/drivers/clk/imx/Kconfig
@@ -6,8 +6,6 @@ config MXC_CLK
config MXC_CLK_SCU
tristate
- depends on ARCH_MXC
- depends on IMX_SCU && HAVE_ARM_SMCCC
config CLK_IMX1
def_bool SOC_IMX1
diff --git a/drivers/clk/imx/clk-imx31.c b/drivers/clk/imx/clk-imx31.c
index 7b13fb57d842..c44e18c6f63f 100644
--- a/drivers/clk/imx/clk-imx31.c
+++ b/drivers/clk/imx/clk-imx31.c
@@ -51,16 +51,6 @@ enum mx31_clks {
static struct clk *clk[clk_max];
static struct clk_onecell_data clk_data;
-static struct clk ** const uart_clks[] __initconst = {
- &clk[ipg],
- &clk[uart1_gate],
- &clk[uart2_gate],
- &clk[uart3_gate],
- &clk[uart4_gate],
- &clk[uart5_gate],
- NULL
-};
-
static void __init _mx31_clocks_init(void __iomem *base, unsigned long fref)
{
clk[dummy] = imx_clk_fixed("dummy", 0);
diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c
index b2ff187cedab..521d6136d22c 100644
--- a/drivers/clk/imx/clk-imx6q.c
+++ b/drivers/clk/imx/clk-imx6q.c
@@ -338,10 +338,10 @@ static void init_ldb_clks(struct device_node *np, void __iomem *ccm_base)
of_assigned_ldb_sels(np, &sel[0][3], &sel[1][3]);
for (i = 0; i < 2; i++) {
- /* Warn if a glitch might have been introduced already */
+ /* Print a notice if a glitch might have been introduced already */
if (sel[i][0] != 3) {
- pr_warn("ccm: ldb_di%d_sel already changed from reset value: %d\n",
- i, sel[i][0]);
+ pr_notice("ccm: possible glitch: ldb_di%d_sel already changed from reset value: %d\n",
+ i, sel[i][0]);
}
if (sel[i][0] == sel[i][3])
diff --git a/drivers/clk/imx/clk-imx6sl.c b/drivers/clk/imx/clk-imx6sl.c
index 2f9361946a0e..29eab05c9068 100644
--- a/drivers/clk/imx/clk-imx6sl.c
+++ b/drivers/clk/imx/clk-imx6sl.c
@@ -6,6 +6,7 @@
#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
+#include <linux/clk/imx.h>
#include <linux/err.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/imx/clk-imx8mm.c b/drivers/clk/imx/clk-imx8mm.c
index 7c905861af5d..6a01eec36dd0 100644
--- a/drivers/clk/imx/clk-imx8mm.c
+++ b/drivers/clk/imx/clk-imx8mm.c
@@ -288,6 +288,11 @@ static const char *imx8mm_clko1_sels[] = {"osc_24m", "sys_pll1_800m", "dummy", "
static const char *imx8mm_clko2_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll1_400m", "sys_pll2_166m",
"sys_pll3_out", "audio_pll1_out", "video_pll1_out", "osc_32k", };
+static const char * const clkout_sels[] = {"audio_pll1_out", "audio_pll2_out", "video_pll1_out",
+ "dummy", "dummy", "gpu_pll_out", "vpu_pll_out",
+ "arm_pll_out", "sys_pll1", "sys_pll2", "sys_pll3",
+ "dummy", "dummy", "osc_24m", "dummy", "osc_32k"};
+
static struct clk_hw_onecell_data *clk_hw_data;
static struct clk_hw **hws;
@@ -410,6 +415,13 @@ static int imx8mm_clocks_probe(struct platform_device *pdev)
hws[IMX8MM_SYS_PLL2_500M] = imx_clk_hw_fixed_factor("sys_pll2_500m", "sys_pll2_500m_cg", 1, 2);
hws[IMX8MM_SYS_PLL2_1000M] = imx_clk_hw_fixed_factor("sys_pll2_1000m", "sys_pll2_out", 1, 1);
+ hws[IMX8MM_CLK_CLKOUT1_SEL] = imx_clk_hw_mux("clkout1_sel", base + 0x128, 4, 4, clkout_sels, ARRAY_SIZE(clkout_sels));
+ hws[IMX8MM_CLK_CLKOUT1_DIV] = imx_clk_hw_divider("clkout1_div", "clkout1_sel", base + 0x128, 0, 4);
+ hws[IMX8MM_CLK_CLKOUT1] = imx_clk_hw_gate("clkout1", "clkout1_div", base + 0x128, 8);
+ hws[IMX8MM_CLK_CLKOUT2_SEL] = imx_clk_hw_mux("clkout2_sel", base + 0x128, 20, 4, clkout_sels, ARRAY_SIZE(clkout_sels));
+ hws[IMX8MM_CLK_CLKOUT2_DIV] = imx_clk_hw_divider("clkout2_div", "clkout2_sel", base + 0x128, 16, 4);
+ hws[IMX8MM_CLK_CLKOUT2] = imx_clk_hw_gate("clkout2", "clkout2_div", base + 0x128, 24);
+
np = dev->of_node;
base = devm_platform_ioremap_resource(pdev, 0);
if (WARN_ON(IS_ERR(base)))
diff --git a/drivers/clk/imx/clk-imx8mn.c b/drivers/clk/imx/clk-imx8mn.c
index 3c21db942d5b..324c5fd0aa04 100644
--- a/drivers/clk/imx/clk-imx8mn.c
+++ b/drivers/clk/imx/clk-imx8mn.c
@@ -281,6 +281,11 @@ static const char * const imx8mn_clko2_sels[] = {"osc_24m", "sys_pll2_200m", "sy
"sys_pll2_166m", "sys_pll3_out", "audio_pll1_out",
"video_pll1_out", "osc_32k", };
+static const char * const clkout_sels[] = {"audio_pll1_out", "audio_pll2_out", "video_pll1_out",
+ "dummy", "dummy", "gpu_pll_out", "dummy",
+ "arm_pll_out", "sys_pll1", "sys_pll2", "sys_pll3",
+ "dummy", "dummy", "osc_24m", "dummy", "osc_32k"};
+
static struct clk_hw_onecell_data *clk_hw_data;
static struct clk_hw **hws;
@@ -405,6 +410,13 @@ static int imx8mn_clocks_probe(struct platform_device *pdev)
hws[IMX8MN_SYS_PLL2_500M] = imx_clk_hw_fixed_factor("sys_pll2_500m", "sys_pll2_500m_cg", 1, 2);
hws[IMX8MN_SYS_PLL2_1000M] = imx_clk_hw_fixed_factor("sys_pll2_1000m", "sys_pll2_out", 1, 1);
+ hws[IMX8MN_CLK_CLKOUT1_SEL] = imx_clk_hw_mux("clkout1_sel", base + 0x128, 4, 4, clkout_sels, ARRAY_SIZE(clkout_sels));
+ hws[IMX8MN_CLK_CLKOUT1_DIV] = imx_clk_hw_divider("clkout1_div", "clkout1_sel", base + 0x128, 0, 4);
+ hws[IMX8MN_CLK_CLKOUT1] = imx_clk_hw_gate("clkout1", "clkout1_div", base + 0x128, 8);
+ hws[IMX8MN_CLK_CLKOUT2_SEL] = imx_clk_hw_mux("clkout2_sel", base + 0x128, 20, 4, clkout_sels, ARRAY_SIZE(clkout_sels));
+ hws[IMX8MN_CLK_CLKOUT2_DIV] = imx_clk_hw_divider("clkout2_div", "clkout2_sel", base + 0x128, 16, 4);
+ hws[IMX8MN_CLK_CLKOUT2] = imx_clk_hw_gate("clkout2", "clkout2_div", base + 0x128, 24);
+
np = dev->of_node;
base = devm_platform_ioremap_resource(pdev, 0);
if (WARN_ON(IS_ERR(base))) {
diff --git a/drivers/clk/imx/clk-imx8mq.c b/drivers/clk/imx/clk-imx8mq.c
index 779ea69e639c..4dd4ae9d022b 100644
--- a/drivers/clk/imx/clk-imx8mq.c
+++ b/drivers/clk/imx/clk-imx8mq.c
@@ -270,6 +270,14 @@ static const char * const imx8mq_clko1_sels[] = {"osc_25m", "sys1_pll_800m", "os
static const char * const imx8mq_clko2_sels[] = {"osc_25m", "sys2_pll_200m", "sys1_pll_400m", "sys2_pll_166m",
"sys3_pll_out", "audio_pll1_out", "video_pll1_out", "ckil", };
+static const char * const pllout_monitor_sels[] = {"osc_25m", "osc_27m", "dummy", "dummy", "ckil",
+ "audio_pll1_out_monitor", "audio_pll2_out_monitor",
+ "video_pll1_out_monitor", "gpu_pll_out_monitor",
+ "vpu_pll_out_monitor", "arm_pll_out_monitor",
+ "sys_pll1_out_monitor", "sys_pll2_out_monitor",
+ "sys_pll3_out_monitor", "dram_pll_out_monitor",
+ "video_pll2_out_monitor", };
+
static struct clk_hw_onecell_data *clk_hw_data;
static struct clk_hw **hws;
@@ -399,6 +407,20 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
hws[IMX8MQ_SYS2_PLL_500M] = imx_clk_hw_fixed_factor("sys2_pll_500m", "sys2_pll_500m_cg", 1, 2);
hws[IMX8MQ_SYS2_PLL_1000M] = imx_clk_hw_fixed_factor("sys2_pll_1000m", "sys2_pll_1000m_cg", 1, 1);
+ hws[IMX8MQ_CLK_MON_AUDIO_PLL1_DIV] = imx_clk_hw_divider("audio_pll1_out_monitor", "audio_pll1_bypass", base + 0x78, 0, 3);
+ hws[IMX8MQ_CLK_MON_AUDIO_PLL2_DIV] = imx_clk_hw_divider("audio_pll2_out_monitor", "audio_pll2_bypass", base + 0x78, 4, 3);
+ hws[IMX8MQ_CLK_MON_VIDEO_PLL1_DIV] = imx_clk_hw_divider("video_pll1_out_monitor", "video_pll1_bypass", base + 0x78, 8, 3);
+ hws[IMX8MQ_CLK_MON_GPU_PLL_DIV] = imx_clk_hw_divider("gpu_pll_out_monitor", "gpu_pll_bypass", base + 0x78, 12, 3);
+ hws[IMX8MQ_CLK_MON_VPU_PLL_DIV] = imx_clk_hw_divider("vpu_pll_out_monitor", "vpu_pll_bypass", base + 0x78, 16, 3);
+ hws[IMX8MQ_CLK_MON_ARM_PLL_DIV] = imx_clk_hw_divider("arm_pll_out_monitor", "arm_pll_bypass", base + 0x78, 20, 3);
+ hws[IMX8MQ_CLK_MON_SYS_PLL1_DIV] = imx_clk_hw_divider("sys_pll1_out_monitor", "sys1_pll_out", base + 0x7c, 0, 3);
+ hws[IMX8MQ_CLK_MON_SYS_PLL2_DIV] = imx_clk_hw_divider("sys_pll2_out_monitor", "sys2_pll_out", base + 0x7c, 4, 3);
+ hws[IMX8MQ_CLK_MON_SYS_PLL3_DIV] = imx_clk_hw_divider("sys_pll3_out_monitor", "sys3_pll_out", base + 0x7c, 8, 3);
+ hws[IMX8MQ_CLK_MON_DRAM_PLL_DIV] = imx_clk_hw_divider("dram_pll_out_monitor", "dram_pll_out", base + 0x7c, 12, 3);
+ hws[IMX8MQ_CLK_MON_VIDEO_PLL2_DIV] = imx_clk_hw_divider("video_pll2_out_monitor", "video2_pll_out", base + 0x7c, 16, 3);
+ hws[IMX8MQ_CLK_MON_SEL] = imx_clk_hw_mux("pllout_monitor_sel", base + 0x74, 0, 4, pllout_monitor_sels, ARRAY_SIZE(pllout_monitor_sels));
+ hws[IMX8MQ_CLK_MON_CLK2_OUT] = imx_clk_hw_gate("pllout_monitor_clk2", "pllout_monitor_sel", base + 0x74, 4);
+
np = dev->of_node;
base = devm_platform_ioremap_resource(pdev, 0);
if (WARN_ON(IS_ERR(base)))
diff --git a/drivers/clk/imx/clk-imx8qxp.c b/drivers/clk/imx/clk-imx8qxp.c
index 5b3d4ede7c7c..fbf1170c09ed 100644
--- a/drivers/clk/imx/clk-imx8qxp.c
+++ b/drivers/clk/imx/clk-imx8qxp.c
@@ -17,6 +17,14 @@
#include <dt-bindings/clock/imx8-clock.h>
#include <dt-bindings/firmware/imx/rsrc.h>
+static const char *dc0_sels[] = {
+ "clk_dummy",
+ "clk_dummy",
+ "dc0_pll0_clk",
+ "dc0_pll1_clk",
+ "dc0_bypass0_clk",
+};
+
static int imx8qxp_clk_probe(struct platform_device *pdev)
{
struct device_node *ccm_node = pdev->dev.of_node;
@@ -115,12 +123,26 @@ static int imx8qxp_clk_probe(struct platform_device *pdev)
clks[IMX_CONN_USB2_LPM_CLK] = imx_clk_scu("usb3_lpm_div", IMX_SC_R_USB_2, IMX_SC_PM_CLK_MISC, clk_cells);
/* Display controller SS */
- clks[IMX_DC0_DISP0_CLK] = imx_clk_scu("dc0_disp0_clk", IMX_SC_R_DC_0, IMX_SC_PM_CLK_MISC0, clk_cells);
- clks[IMX_DC0_DISP1_CLK] = imx_clk_scu("dc0_disp1_clk", IMX_SC_R_DC_0, IMX_SC_PM_CLK_MISC1, clk_cells);
+ clks[IMX_DC0_DISP0_CLK] = imx_clk_scu2("dc0_disp0_clk", dc0_sels, ARRAY_SIZE(dc0_sels), IMX_SC_R_DC_0, IMX_SC_PM_CLK_MISC0, clk_cells);
+ clks[IMX_DC0_DISP1_CLK] = imx_clk_scu2("dc0_disp1_clk", dc0_sels, ARRAY_SIZE(dc0_sels), IMX_SC_R_DC_0, IMX_SC_PM_CLK_MISC1, clk_cells);
+ clks[IMX_DC0_PLL0_CLK] = imx_clk_scu("dc0_pll0_clk", IMX_SC_R_DC_0_PLL_0, IMX_SC_PM_CLK_PLL, clk_cells);
+ clks[IMX_DC0_PLL1_CLK] = imx_clk_scu("dc0_pll1_clk", IMX_SC_R_DC_0_PLL_1, IMX_SC_PM_CLK_PLL, clk_cells);
+ clks[IMX_DC0_BYPASS0_CLK] = imx_clk_scu("dc0_bypass0_clk", IMX_SC_R_DC_0_VIDEO0, IMX_SC_PM_CLK_BYPASS, clk_cells);
+ clks[IMX_DC0_BYPASS1_CLK] = imx_clk_scu("dc0_bypass1_clk", IMX_SC_R_DC_0_VIDEO1, IMX_SC_PM_CLK_BYPASS, clk_cells);
/* MIPI-LVDS SS */
+ clks[IMX_MIPI0_LVDS_PIXEL_CLK] = imx_clk_scu("mipi0_lvds_pixel_clk", IMX_SC_R_LVDS_0, IMX_SC_PM_CLK_MISC2, clk_cells);
+ clks[IMX_MIPI0_LVDS_BYPASS_CLK] = imx_clk_scu("mipi0_lvds_bypass_clk", IMX_SC_R_LVDS_0, IMX_SC_PM_CLK_BYPASS, clk_cells);
+ clks[IMX_MIPI0_LVDS_PHY_CLK] = imx_clk_scu("mipi0_lvds_phy_clk", IMX_SC_R_LVDS_0, IMX_SC_PM_CLK_MISC3, clk_cells);
clks[IMX_MIPI0_I2C0_CLK] = imx_clk_scu("mipi0_i2c0_clk", IMX_SC_R_MIPI_0_I2C_0, IMX_SC_PM_CLK_MISC2, clk_cells);
clks[IMX_MIPI0_I2C1_CLK] = imx_clk_scu("mipi0_i2c1_clk", IMX_SC_R_MIPI_0_I2C_1, IMX_SC_PM_CLK_MISC2, clk_cells);
+ clks[IMX_MIPI0_PWM0_CLK] = imx_clk_scu("mipi0_pwm0_clk", IMX_SC_R_MIPI_0_PWM_0, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_MIPI1_LVDS_PIXEL_CLK] = imx_clk_scu("mipi1_lvds_pixel_clk", IMX_SC_R_LVDS_1, IMX_SC_PM_CLK_MISC2, clk_cells);
+ clks[IMX_MIPI1_LVDS_BYPASS_CLK] = imx_clk_scu("mipi1_lvds_bypass_clk", IMX_SC_R_LVDS_1, IMX_SC_PM_CLK_BYPASS, clk_cells);
+ clks[IMX_MIPI1_LVDS_PHY_CLK] = imx_clk_scu("mipi1_lvds_phy_clk", IMX_SC_R_LVDS_1, IMX_SC_PM_CLK_MISC3, clk_cells);
+ clks[IMX_MIPI1_I2C0_CLK] = imx_clk_scu("mipi1_i2c0_clk", IMX_SC_R_MIPI_1_I2C_0, IMX_SC_PM_CLK_MISC2, clk_cells);
+ clks[IMX_MIPI1_I2C1_CLK] = imx_clk_scu("mipi1_i2c1_clk", IMX_SC_R_MIPI_1_I2C_1, IMX_SC_PM_CLK_MISC2, clk_cells);
+ clks[IMX_MIPI1_PWM0_CLK] = imx_clk_scu("mipi1_pwm0_clk", IMX_SC_R_MIPI_1_PWM_0, IMX_SC_PM_CLK_PER, clk_cells);
/* MIPI CSI SS */
clks[IMX_CSI0_CORE_CLK] = imx_clk_scu("mipi_csi0_core_clk", IMX_SC_R_CSI_0, IMX_SC_PM_CLK_PER, clk_cells);
diff --git a/drivers/clk/mediatek/Kconfig b/drivers/clk/mediatek/Kconfig
index ce8475098b31..886e2d9fced5 100644
--- a/drivers/clk/mediatek/Kconfig
+++ b/drivers/clk/mediatek/Kconfig
@@ -426,66 +426,77 @@ config COMMON_CLK_MT8183
config COMMON_CLK_MT8183_AUDIOSYS
bool "Clock driver for MediaTek MT8183 audiosys"
depends on COMMON_CLK_MT8183
+ default COMMON_CLK_MT8183
help
This driver supports MediaTek MT8183 audiosys clocks.
config COMMON_CLK_MT8183_CAMSYS
bool "Clock driver for MediaTek MT8183 camsys"
depends on COMMON_CLK_MT8183
+ default COMMON_CLK_MT8183
help
This driver supports MediaTek MT8183 camsys clocks.
config COMMON_CLK_MT8183_IMGSYS
bool "Clock driver for MediaTek MT8183 imgsys"
depends on COMMON_CLK_MT8183
+ default COMMON_CLK_MT8183
help
This driver supports MediaTek MT8183 imgsys clocks.
config COMMON_CLK_MT8183_IPU_CORE0
bool "Clock driver for MediaTek MT8183 ipu_core0"
depends on COMMON_CLK_MT8183
+ default COMMON_CLK_MT8183
help
This driver supports MediaTek MT8183 ipu_core0 clocks.
config COMMON_CLK_MT8183_IPU_CORE1
bool "Clock driver for MediaTek MT8183 ipu_core1"
depends on COMMON_CLK_MT8183
+ default COMMON_CLK_MT8183
help
This driver supports MediaTek MT8183 ipu_core1 clocks.
config COMMON_CLK_MT8183_IPU_ADL
bool "Clock driver for MediaTek MT8183 ipu_adl"
depends on COMMON_CLK_MT8183
+ default COMMON_CLK_MT8183
help
This driver supports MediaTek MT8183 ipu_adl clocks.
config COMMON_CLK_MT8183_IPU_CONN
bool "Clock driver for MediaTek MT8183 ipu_conn"
depends on COMMON_CLK_MT8183
+ default COMMON_CLK_MT8183
help
This driver supports MediaTek MT8183 ipu_conn clocks.
config COMMON_CLK_MT8183_MFGCFG
bool "Clock driver for MediaTek MT8183 mfgcfg"
depends on COMMON_CLK_MT8183
+ default COMMON_CLK_MT8183
help
This driver supports MediaTek MT8183 mfgcfg clocks.
config COMMON_CLK_MT8183_MMSYS
bool "Clock driver for MediaTek MT8183 mmsys"
depends on COMMON_CLK_MT8183
+ default COMMON_CLK_MT8183
help
This driver supports MediaTek MT8183 mmsys clocks.
config COMMON_CLK_MT8183_VDECSYS
bool "Clock driver for MediaTek MT8183 vdecsys"
depends on COMMON_CLK_MT8183
+ default COMMON_CLK_MT8183
help
This driver supports MediaTek MT8183 vdecsys clocks.
config COMMON_CLK_MT8183_VENCSYS
bool "Clock driver for MediaTek MT8183 vencsys"
depends on COMMON_CLK_MT8183
+ default COMMON_CLK_MT8183
help
This driver supports MediaTek MT8183 vencsys clocks.
diff --git a/drivers/clk/mediatek/clk-mux.c b/drivers/clk/mediatek/clk-mux.c
index dcc1352bf13c..b0c61709bacc 100644
--- a/drivers/clk/mediatek/clk-mux.c
+++ b/drivers/clk/mediatek/clk-mux.c
@@ -17,29 +17,36 @@ static inline struct mtk_clk_mux *to_mtk_clk_mux(struct clk_hw *hw)
return container_of(hw, struct mtk_clk_mux, hw);
}
-static int mtk_clk_mux_enable(struct clk_hw *hw)
+static int mtk_clk_mux_enable_setclr(struct clk_hw *hw)
{
struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
- u32 mask = BIT(mux->data->gate_shift);
-
- return regmap_update_bits(mux->regmap, mux->data->mux_ofs,
- mask, ~mask);
-}
+ unsigned long flags = 0;
-static void mtk_clk_mux_disable(struct clk_hw *hw)
-{
- struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
- u32 mask = BIT(mux->data->gate_shift);
+ if (mux->lock)
+ spin_lock_irqsave(mux->lock, flags);
+ else
+ __acquire(mux->lock);
- regmap_update_bits(mux->regmap, mux->data->mux_ofs, mask, mask);
-}
+ regmap_write(mux->regmap, mux->data->clr_ofs,
+ BIT(mux->data->gate_shift));
+
+ /*
+ * If the parent has been changed when the clock was disabled, it will
+ * not be effective yet. Set the update bit to ensure the mux gets
+ * updated.
+ */
+ if (mux->reparent && mux->data->upd_shift >= 0) {
+ regmap_write(mux->regmap, mux->data->upd_ofs,
+ BIT(mux->data->upd_shift));
+ mux->reparent = false;
+ }
-static int mtk_clk_mux_enable_setclr(struct clk_hw *hw)
-{
- struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
+ if (mux->lock)
+ spin_unlock_irqrestore(mux->lock, flags);
+ else
+ __release(mux->lock);
- return regmap_write(mux->regmap, mux->data->clr_ofs,
- BIT(mux->data->gate_shift));
+ return 0;
}
static void mtk_clk_mux_disable_setclr(struct clk_hw *hw)
@@ -72,28 +79,6 @@ static u8 mtk_clk_mux_get_parent(struct clk_hw *hw)
return val;
}
-static int mtk_clk_mux_set_parent_lock(struct clk_hw *hw, u8 index)
-{
- struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
- u32 mask = GENMASK(mux->data->mux_width - 1, 0);
- unsigned long flags = 0;
-
- if (mux->lock)
- spin_lock_irqsave(mux->lock, flags);
- else
- __acquire(mux->lock);
-
- regmap_update_bits(mux->regmap, mux->data->mux_ofs, mask,
- index << mux->data->mux_shift);
-
- if (mux->lock)
- spin_unlock_irqrestore(mux->lock, flags);
- else
- __release(mux->lock);
-
- return 0;
-}
-
static int mtk_clk_mux_set_parent_setclr_lock(struct clk_hw *hw, u8 index)
{
struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
@@ -116,9 +101,11 @@ static int mtk_clk_mux_set_parent_setclr_lock(struct clk_hw *hw, u8 index)
regmap_write(mux->regmap, mux->data->set_ofs,
index << mux->data->mux_shift);
- if (mux->data->upd_shift >= 0)
+ if (mux->data->upd_shift >= 0) {
regmap_write(mux->regmap, mux->data->upd_ofs,
BIT(mux->data->upd_shift));
+ mux->reparent = true;
+ }
}
if (mux->lock)
@@ -129,25 +116,7 @@ static int mtk_clk_mux_set_parent_setclr_lock(struct clk_hw *hw, u8 index)
return 0;
}
-const struct clk_ops mtk_mux_ops = {
- .get_parent = mtk_clk_mux_get_parent,
- .set_parent = mtk_clk_mux_set_parent_lock,
-};
-
-const struct clk_ops mtk_mux_clr_set_upd_ops = {
- .get_parent = mtk_clk_mux_get_parent,
- .set_parent = mtk_clk_mux_set_parent_setclr_lock,
-};
-
-const struct clk_ops mtk_mux_gate_ops = {
- .enable = mtk_clk_mux_enable,
- .disable = mtk_clk_mux_disable,
- .is_enabled = mtk_clk_mux_is_enabled,
- .get_parent = mtk_clk_mux_get_parent,
- .set_parent = mtk_clk_mux_set_parent_lock,
-};
-
-const struct clk_ops mtk_mux_gate_clr_set_upd_ops = {
+static const struct clk_ops mtk_mux_ops = {
.enable = mtk_clk_mux_enable_setclr,
.disable = mtk_clk_mux_disable_setclr,
.is_enabled = mtk_clk_mux_is_enabled,
@@ -171,7 +140,7 @@ static struct clk *mtk_clk_register_mux(const struct mtk_mux *mux,
init.flags = mux->flags | CLK_SET_RATE_PARENT;
init.parent_names = mux->parent_names;
init.num_parents = mux->num_parents;
- init.ops = mux->ops;
+ init.ops = &mtk_mux_ops;
clk_mux->regmap = regmap;
clk_mux->data = mux;
diff --git a/drivers/clk/mediatek/clk-mux.h b/drivers/clk/mediatek/clk-mux.h
index 8e2f927dd2ff..f1946161ade1 100644
--- a/drivers/clk/mediatek/clk-mux.h
+++ b/drivers/clk/mediatek/clk-mux.h
@@ -14,6 +14,7 @@ struct mtk_clk_mux {
struct regmap *regmap;
const struct mtk_mux *data;
spinlock_t *lock;
+ bool reparent;
};
struct mtk_mux {
@@ -32,19 +33,12 @@ struct mtk_mux {
u8 gate_shift;
s8 upd_shift;
- const struct clk_ops *ops;
-
signed char num_parents;
};
-extern const struct clk_ops mtk_mux_ops;
-extern const struct clk_ops mtk_mux_clr_set_upd_ops;
-extern const struct clk_ops mtk_mux_gate_ops;
-extern const struct clk_ops mtk_mux_gate_clr_set_upd_ops;
-
#define GATE_CLR_SET_UPD_FLAGS(_id, _name, _parents, _mux_ofs, \
_mux_set_ofs, _mux_clr_ofs, _shift, _width, \
- _gate, _upd_ofs, _upd, _flags, _ops) { \
+ _gate, _upd_ofs, _upd, _flags) { \
.id = _id, \
.name = _name, \
.mux_ofs = _mux_ofs, \
@@ -58,7 +52,6 @@ extern const struct clk_ops mtk_mux_gate_clr_set_upd_ops;
.parent_names = _parents, \
.num_parents = ARRAY_SIZE(_parents), \
.flags = _flags, \
- .ops = &_ops, \
}
#define MUX_GATE_CLR_SET_UPD_FLAGS(_id, _name, _parents, _mux_ofs, \
@@ -66,8 +59,7 @@ extern const struct clk_ops mtk_mux_gate_clr_set_upd_ops;
_gate, _upd_ofs, _upd, _flags) \
GATE_CLR_SET_UPD_FLAGS(_id, _name, _parents, _mux_ofs, \
_mux_set_ofs, _mux_clr_ofs, _shift, _width, \
- _gate, _upd_ofs, _upd, _flags, \
- mtk_mux_gate_clr_set_upd_ops)
+ _gate, _upd_ofs, _upd, _flags) \
#define MUX_GATE_CLR_SET_UPD(_id, _name, _parents, _mux_ofs, \
_mux_set_ofs, _mux_clr_ofs, _shift, _width, \
diff --git a/drivers/clk/meson/axg.c b/drivers/clk/meson/axg.c
index 0e44695b8772..2ad3801398dc 100644
--- a/drivers/clk/meson/axg.c
+++ b/drivers/clk/meson/axg.c
@@ -1879,7 +1879,6 @@ static MESON_GATE(axg_mmc_pclk, HHI_GCLK_MPEG2, 11);
static MESON_GATE(axg_vpu_intr, HHI_GCLK_MPEG2, 25);
static MESON_GATE(axg_sec_ahb_ahb3_bridge, HHI_GCLK_MPEG2, 26);
static MESON_GATE(axg_gic, HHI_GCLK_MPEG2, 30);
-static MESON_GATE(axg_mipi_enable, HHI_MIPI_CNTL0, 29);
/* Always On (AO) domain gates */
@@ -1974,7 +1973,6 @@ static struct clk_hw_onecell_data axg_hw_onecell_data = {
[CLKID_PCIE_REF] = &axg_pcie_ref.hw,
[CLKID_PCIE_CML_EN0] = &axg_pcie_cml_en0.hw,
[CLKID_PCIE_CML_EN1] = &axg_pcie_cml_en1.hw,
- [CLKID_MIPI_ENABLE] = &axg_mipi_enable.hw,
[CLKID_GEN_CLK_SEL] = &axg_gen_clk_sel.hw,
[CLKID_GEN_CLK_DIV] = &axg_gen_clk_div.hw,
[CLKID_GEN_CLK] = &axg_gen_clk.hw,
@@ -2115,7 +2113,6 @@ static struct clk_regmap *const axg_clk_regmaps[] = {
&axg_pcie_ref,
&axg_pcie_cml_en0,
&axg_pcie_cml_en1,
- &axg_mipi_enable,
&axg_gen_clk_sel,
&axg_gen_clk_div,
&axg_gen_clk,
diff --git a/drivers/clk/meson/axg.h b/drivers/clk/meson/axg.h
index 481b307ea3cb..23ea87964af2 100644
--- a/drivers/clk/meson/axg.h
+++ b/drivers/clk/meson/axg.h
@@ -16,7 +16,6 @@
* Register offsets from the data sheet must be multiplied by 4 before
* adding them to the base address to get the right value.
*/
-#define HHI_MIPI_CNTL0 0x00
#define HHI_GP0_PLL_CNTL 0x40
#define HHI_GP0_PLL_CNTL2 0x44
#define HHI_GP0_PLL_CNTL3 0x48
diff --git a/drivers/clk/meson/clk-pll.c b/drivers/clk/meson/clk-pll.c
index b17a13e9337c..49f27fe53213 100644
--- a/drivers/clk/meson/clk-pll.c
+++ b/drivers/clk/meson/clk-pll.c
@@ -365,13 +365,14 @@ static int meson_clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
{
struct clk_regmap *clk = to_clk_regmap(hw);
struct meson_clk_pll_data *pll = meson_clk_pll_data(clk);
- unsigned int enabled, m, n, frac = 0, ret;
+ unsigned int enabled, m, n, frac = 0;
unsigned long old_rate;
+ int ret;
if (parent_rate == 0 || rate == 0)
return -EINVAL;
- old_rate = rate;
+ old_rate = clk_hw_get_rate(hw);
ret = meson_clk_get_pll_settings(rate, parent_rate, &m, &n, pll);
if (ret)
@@ -393,7 +394,8 @@ static int meson_clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
if (!enabled)
return 0;
- if (meson_clk_pll_enable(hw)) {
+ ret = meson_clk_pll_enable(hw);
+ if (ret) {
pr_warn("%s: pll did not lock, trying to restore old rate %lu\n",
__func__, old_rate);
/*
@@ -405,7 +407,7 @@ static int meson_clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
meson_clk_pll_set_rate(hw, old_rate, parent_rate);
}
- return 0;
+ return ret;
}
/*
diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c
index 862f0756b50f..a844d35b553a 100644
--- a/drivers/clk/meson/meson8b.c
+++ b/drivers/clk/meson/meson8b.c
@@ -52,15 +52,6 @@ static const struct pll_params_table sys_pll_params_table[] = {
{ /* sentinel */ },
};
-static struct clk_fixed_rate meson8b_xtal = {
- .fixed_rate = 24000000,
- .hw.init = &(struct clk_init_data){
- .name = "xtal",
- .num_parents = 0,
- .ops = &clk_fixed_rate_ops,
- },
-};
-
static struct clk_regmap meson8b_fixed_pll_dco = {
.data = &(struct meson_clk_pll_data){
.en = {
@@ -2715,7 +2706,6 @@ static MESON_GATE(meson8b_ao_iface, HHI_GCLK_AO, 3);
static struct clk_hw_onecell_data meson8_hw_onecell_data = {
.hws = {
- [CLKID_XTAL] = &meson8b_xtal.hw,
[CLKID_PLL_FIXED] = &meson8b_fixed_pll.hw,
[CLKID_PLL_VID] = &meson8b_vid_pll.hw,
[CLKID_PLL_SYS] = &meson8b_sys_pll.hw,
@@ -2922,7 +2912,6 @@ static struct clk_hw_onecell_data meson8_hw_onecell_data = {
static struct clk_hw_onecell_data meson8b_hw_onecell_data = {
.hws = {
- [CLKID_XTAL] = &meson8b_xtal.hw,
[CLKID_PLL_FIXED] = &meson8b_fixed_pll.hw,
[CLKID_PLL_VID] = &meson8b_vid_pll.hw,
[CLKID_PLL_SYS] = &meson8b_sys_pll.hw,
@@ -3140,7 +3129,6 @@ static struct clk_hw_onecell_data meson8b_hw_onecell_data = {
static struct clk_hw_onecell_data meson8m2_hw_onecell_data = {
.hws = {
- [CLKID_XTAL] = &meson8b_xtal.hw,
[CLKID_PLL_FIXED] = &meson8b_fixed_pll.hw,
[CLKID_PLL_VID] = &meson8b_vid_pll.hw,
[CLKID_PLL_SYS] = &meson8b_sys_pll.hw,
@@ -3725,36 +3713,19 @@ static struct meson8b_nb_data meson8b_cpu_nb_data = {
.nb.notifier_call = meson8b_cpu_clk_notifier_cb,
};
-static const struct regmap_config clkc_regmap_config = {
- .reg_bits = 32,
- .val_bits = 32,
- .reg_stride = 4,
-};
-
static void __init meson8b_clkc_init_common(struct device_node *np,
struct clk_hw_onecell_data *clk_hw_onecell_data)
{
struct meson8b_clk_reset *rstc;
const char *notifier_clk_name;
struct clk *notifier_clk;
- void __iomem *clk_base;
struct regmap *map;
int i, ret;
map = syscon_node_to_regmap(of_get_parent(np));
if (IS_ERR(map)) {
- pr_info("failed to get HHI regmap - Trying obsolete regs\n");
-
- /* Generic clocks, PLLs and some of the reset-bits */
- clk_base = of_iomap(np, 1);
- if (!clk_base) {
- pr_err("%s: Unable to map clk base\n", __func__);
- return;
- }
-
- map = regmap_init_mmio(NULL, clk_base, &clkc_regmap_config);
- if (IS_ERR(map))
- return;
+ pr_err("failed to get HHI regmap - Trying obsolete regs\n");
+ return;
}
rstc = kzalloc(sizeof(*rstc), GFP_KERNEL);
@@ -3778,16 +3749,10 @@ static void __init meson8b_clkc_init_common(struct device_node *np,
meson8b_clk_regmaps[i]->map = map;
/*
- * always skip CLKID_UNUSED and also skip XTAL if the .dtb provides the
- * XTAL clock as input.
+ * register all clks and start with the first used ID (which is
+ * CLKID_PLL_FIXED)
*/
- if (!IS_ERR(of_clk_get_by_name(np, "xtal")))
- i = CLKID_PLL_FIXED;
- else
- i = CLKID_XTAL;
-
- /* register all clks */
- for (; i < CLK_NR_CLKS; i++) {
+ for (i = CLKID_PLL_FIXED; i < CLK_NR_CLKS; i++) {
/* array might be sparse */
if (!clk_hw_onecell_data->hws[i])
continue;
diff --git a/drivers/clk/mmp/clk-audio.c b/drivers/clk/mmp/clk-audio.c
index eea69d498bd2..7aa7f4a9564f 100644
--- a/drivers/clk/mmp/clk-audio.c
+++ b/drivers/clk/mmp/clk-audio.c
@@ -392,7 +392,8 @@ static int mmp2_audio_clk_remove(struct platform_device *pdev)
return 0;
}
-static int __maybe_unused mmp2_audio_clk_suspend(struct device *dev)
+#ifdef CONFIG_PM
+static int mmp2_audio_clk_suspend(struct device *dev)
{
struct mmp2_audio_clk *priv = dev_get_drvdata(dev);
@@ -404,7 +405,7 @@ static int __maybe_unused mmp2_audio_clk_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused mmp2_audio_clk_resume(struct device *dev)
+static int mmp2_audio_clk_resume(struct device *dev)
{
struct mmp2_audio_clk *priv = dev_get_drvdata(dev);
@@ -415,6 +416,7 @@ static int __maybe_unused mmp2_audio_clk_resume(struct device *dev)
return 0;
}
+#endif
static const struct dev_pm_ops mmp2_audio_clk_pm_ops = {
SET_RUNTIME_PM_OPS(mmp2_audio_clk_suspend, mmp2_audio_clk_resume, NULL)
diff --git a/drivers/clk/mstar/Kconfig b/drivers/clk/mstar/Kconfig
new file mode 100644
index 000000000000..de37e1bce2d2
--- /dev/null
+++ b/drivers/clk/mstar/Kconfig
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config MSTAR_MSC313_MPLL
+ bool "MStar MPLL driver"
+ depends on ARCH_MSTARV7 || COMPILE_TEST
+ default ARCH_MSTARV7
+ select REGMAP_MMIO
+ help
+ Support for the MPLL PLL and dividers block present on
+ MStar/Sigmastar SoCs.
diff --git a/drivers/clk/mstar/Makefile b/drivers/clk/mstar/Makefile
new file mode 100644
index 000000000000..f8dcd25ede1d
--- /dev/null
+++ b/drivers/clk/mstar/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for mstar specific clk
+#
+
+obj-$(CONFIG_MSTAR_MSC313_MPLL) += clk-msc313-mpll.o
diff --git a/drivers/clk/mstar/clk-msc313-mpll.c b/drivers/clk/mstar/clk-msc313-mpll.c
new file mode 100644
index 000000000000..61beb4e87525
--- /dev/null
+++ b/drivers/clk/mstar/clk-msc313-mpll.c
@@ -0,0 +1,155 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * MStar MSC313 MPLL driver
+ *
+ * Copyright (C) 2020 Daniel Palmer <daniel@thingy.jp>
+ */
+
+#include <linux/platform_device.h>
+#include <linux/of_address.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+
+#define REG_CONFIG1 0x8
+#define REG_CONFIG2 0xc
+
+static const struct regmap_config msc313_mpll_regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 16,
+ .reg_stride = 4,
+};
+
+static const struct reg_field config1_loop_div_first = REG_FIELD(REG_CONFIG1, 8, 9);
+static const struct reg_field config1_input_div_first = REG_FIELD(REG_CONFIG1, 4, 5);
+static const struct reg_field config2_output_div_first = REG_FIELD(REG_CONFIG2, 12, 13);
+static const struct reg_field config2_loop_div_second = REG_FIELD(REG_CONFIG2, 0, 7);
+
+static const unsigned int output_dividers[] = {
+ 2, 3, 4, 5, 6, 7, 10
+};
+
+#define NUMOUTPUTS (ARRAY_SIZE(output_dividers) + 1)
+
+struct msc313_mpll {
+ struct clk_hw clk_hw;
+ struct regmap_field *input_div;
+ struct regmap_field *loop_div_first;
+ struct regmap_field *loop_div_second;
+ struct regmap_field *output_div;
+ struct clk_hw_onecell_data *clk_data;
+};
+
+#define to_mpll(_hw) container_of(_hw, struct msc313_mpll, clk_hw)
+
+static unsigned long msc313_mpll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct msc313_mpll *mpll = to_mpll(hw);
+ unsigned int input_div, output_div, loop_first, loop_second;
+ unsigned long output_rate;
+
+ regmap_field_read(mpll->input_div, &input_div);
+ regmap_field_read(mpll->output_div, &output_div);
+ regmap_field_read(mpll->loop_div_first, &loop_first);
+ regmap_field_read(mpll->loop_div_second, &loop_second);
+
+ output_rate = parent_rate / (1 << input_div);
+ output_rate *= (1 << loop_first) * max(loop_second, 1U);
+ output_rate /= max(output_div, 1U);
+
+ return output_rate;
+}
+
+static const struct clk_ops msc313_mpll_ops = {
+ .recalc_rate = msc313_mpll_recalc_rate,
+};
+
+static const struct clk_parent_data mpll_parent = {
+ .index = 0,
+};
+
+static int msc313_mpll_probe(struct platform_device *pdev)
+{
+ void __iomem *base;
+ struct msc313_mpll *mpll;
+ struct clk_init_data clk_init = { };
+ struct device *dev = &pdev->dev;
+ struct regmap *regmap;
+ char *outputname;
+ struct clk_hw *divhw;
+ int ret, i;
+
+ mpll = devm_kzalloc(dev, sizeof(*mpll), GFP_KERNEL);
+ if (!mpll)
+ return -ENOMEM;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ regmap = devm_regmap_init_mmio(dev, base, &msc313_mpll_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ mpll->input_div = devm_regmap_field_alloc(dev, regmap, config1_input_div_first);
+ if (IS_ERR(mpll->input_div))
+ return PTR_ERR(mpll->input_div);
+ mpll->output_div = devm_regmap_field_alloc(dev, regmap, config2_output_div_first);
+ if (IS_ERR(mpll->output_div))
+ return PTR_ERR(mpll->output_div);
+ mpll->loop_div_first = devm_regmap_field_alloc(dev, regmap, config1_loop_div_first);
+ if (IS_ERR(mpll->loop_div_first))
+ return PTR_ERR(mpll->loop_div_first);
+ mpll->loop_div_second = devm_regmap_field_alloc(dev, regmap, config2_loop_div_second);
+ if (IS_ERR(mpll->loop_div_second))
+ return PTR_ERR(mpll->loop_div_second);
+
+ mpll->clk_data = devm_kzalloc(dev, struct_size(mpll->clk_data, hws,
+ ARRAY_SIZE(output_dividers)), GFP_KERNEL);
+ if (!mpll->clk_data)
+ return -ENOMEM;
+
+ clk_init.name = dev_name(dev);
+ clk_init.ops = &msc313_mpll_ops;
+ clk_init.parent_data = &mpll_parent;
+ clk_init.num_parents = 1;
+ mpll->clk_hw.init = &clk_init;
+
+ ret = devm_clk_hw_register(dev, &mpll->clk_hw);
+ if (ret)
+ return ret;
+
+ mpll->clk_data->num = NUMOUTPUTS;
+ mpll->clk_data->hws[0] = &mpll->clk_hw;
+
+ for (i = 0; i < ARRAY_SIZE(output_dividers); i++) {
+ outputname = devm_kasprintf(dev, GFP_KERNEL, "%s_div_%u",
+ clk_init.name, output_dividers[i]);
+ if (!outputname)
+ return -ENOMEM;
+ divhw = devm_clk_hw_register_fixed_factor(dev, outputname,
+ clk_init.name, 0, 1, output_dividers[i]);
+ if (IS_ERR(divhw))
+ return PTR_ERR(divhw);
+ mpll->clk_data->hws[i + 1] = divhw;
+ }
+
+ platform_set_drvdata(pdev, mpll);
+
+ return devm_of_clk_add_hw_provider(&pdev->dev, of_clk_hw_onecell_get,
+ mpll->clk_data);
+}
+
+static const struct of_device_id msc313_mpll_of_match[] = {
+ { .compatible = "mstar,msc313-mpll", },
+ {}
+};
+
+static struct platform_driver msc313_mpll_driver = {
+ .driver = {
+ .name = "mstar-msc313-mpll",
+ .of_match_table = msc313_mpll_of_match,
+ },
+ .probe = msc313_mpll_probe,
+};
+builtin_platform_driver(msc313_mpll_driver);
diff --git a/drivers/clk/mvebu/ap-cpu-clk.c b/drivers/clk/mvebu/ap-cpu-clk.c
index b4259b60dcfd..08ba59ec3fb1 100644
--- a/drivers/clk/mvebu/ap-cpu-clk.c
+++ b/drivers/clk/mvebu/ap-cpu-clk.c
@@ -30,7 +30,7 @@
#define APN806_MAX_DIVIDER 32
-/**
+/*
* struct cpu_dfs_regs: CPU DFS register mapping
* @divider_reg: full integer ratio from PLL frequency to CPU clock frequency
* @force_reg: request to force new ratio regardless of relation to other clocks
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index d32bb12cd8d0..45646b867cdb 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -28,6 +28,14 @@ config QCOM_A53PLL
Say Y if you want to support higher CPU frequencies on MSM8916
devices.
+config QCOM_A7PLL
+ tristate "SDX55 A7 PLL"
+ help
+ Support for the A7 PLL on SDX55 devices. It provides the CPU with
+ frequencies above 1GHz.
+ Say Y if you want to support higher CPU frequencies on SDX55
+ devices.
+
config QCOM_CLK_APCS_MSM8916
tristate "MSM8916 APCS Clock Controller"
depends on QCOM_APCS_IPC || COMPILE_TEST
@@ -46,6 +54,15 @@ config QCOM_CLK_APCC_MSM8996
Say Y if you want to support CPU clock scaling using CPUfreq
drivers for dynamic power management.
+config QCOM_CLK_APCS_SDX55
+ tristate "SDX55 APCS Clock Controller"
+ depends on QCOM_APCS_IPC || COMPILE_TEST
+ help
+ Support for the APCS Clock Controller on SDX55 platform. The
+ APCS is managing the mux and divider which feeds the CPUs.
+ Say Y if you want to support CPU frequency scaling on devices
+ such as SDX55.
+
config QCOM_CLK_RPM
tristate "RPM based Clock Controller"
depends on MFD_QCOM_RPM
@@ -317,6 +334,24 @@ config SC_GCC_7180
Say Y if you want to use peripheral devices such as UART, SPI,
I2C, USB, UFS, SDCC, etc.
+config SC_GCC_7280
+ tristate "SC7280 Global Clock Controller"
+ select QCOM_GDSC
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on SC7280 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ I2C, USB, UFS, SDCC, PCIe etc.
+
+config SC_GCC_8180X
+ tristate "SC8180X Global Clock Controller"
+ select QCOM_GDSC
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on SC8180X devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ I2C, USB, UFS, SDCC, etc.
+
config SC_LPASS_CORECC_7180
tristate "SC7180 LPASS Core Clock Controller"
select SC_GCC_7180
@@ -366,6 +401,24 @@ config SDM_GCC_660
Say Y if you want to use peripheral devices such as UART, SPI,
i2C, USB, UFS, SDDC, PCIe, etc.
+config SDM_MMCC_660
+ tristate "SDM660 Multimedia Clock Controller"
+ select SDM_GCC_660
+ select QCOM_GDSC
+ help
+ Support for the multimedia clock controller on SDM660 devices.
+ Say Y if you want to support multimedia devices such as display,
+ graphics, video encode/decode, camera, etc.
+
+config SDM_GPUCC_660
+ tristate "SDM660 Graphics Clock Controller"
+ select SDM_GCC_660
+ select QCOM_GDSC
+ help
+ Support for the graphics clock controller on SDM630/636/660 devices.
+ Say Y if you want to support graphics controller devices and
+ functionality such as 3D graphics
+
config QCS_TURING_404
tristate "QCS404 Turing Clock Controller"
help
@@ -454,6 +507,14 @@ config SM_GCC_8250
Say Y if you want to use peripheral devices such as UART,
SPI, I2C, USB, SD/UFS, PCIe etc.
+config SM_GCC_8350
+ tristate "SM8350 Global Clock Controller"
+ select QCOM_GDSC
+ help
+ Support for the global clock controller on SM8350 devices.
+ Say Y if you want to use peripheral devices such as UART,
+ SPI, I2C, USB, SD/UFS, PCIe etc.
+
config SM_GPUCC_8150
tristate "SM8150 Graphics Clock Controller"
select SM_GCC_8150
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 9e5e0e3cb7b4..c8291312e723 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -44,8 +44,10 @@ obj-$(CONFIG_MSM_MMCC_8974) += mmcc-msm8974.o
obj-$(CONFIG_MSM_MMCC_8996) += mmcc-msm8996.o
obj-$(CONFIG_MSM_MMCC_8998) += mmcc-msm8998.o
obj-$(CONFIG_QCOM_A53PLL) += a53-pll.o
+obj-$(CONFIG_QCOM_A7PLL) += a7-pll.o
obj-$(CONFIG_QCOM_CLK_APCS_MSM8916) += apcs-msm8916.o
obj-$(CONFIG_QCOM_CLK_APCC_MSM8996) += clk-cpu-8996.o
+obj-$(CONFIG_QCOM_CLK_APCS_SDX55) += apcs-sdx55.o
obj-$(CONFIG_QCOM_CLK_RPM) += clk-rpm.o
obj-$(CONFIG_QCOM_CLK_RPMH) += clk-rpmh.o
obj-$(CONFIG_QCOM_CLK_SMD_RPM) += clk-smd-rpm.o
@@ -55,6 +57,8 @@ obj-$(CONFIG_QCS_TURING_404) += turingcc-qcs404.o
obj-$(CONFIG_SC_CAMCC_7180) += camcc-sc7180.o
obj-$(CONFIG_SC_DISPCC_7180) += dispcc-sc7180.o
obj-$(CONFIG_SC_GCC_7180) += gcc-sc7180.o
+obj-$(CONFIG_SC_GCC_7280) += gcc-sc7280.o
+obj-$(CONFIG_SC_GCC_8180X) += gcc-sc8180x.o
obj-$(CONFIG_SC_GPUCC_7180) += gpucc-sc7180.o
obj-$(CONFIG_SC_LPASS_CORECC_7180) += lpasscorecc-sc7180.o
obj-$(CONFIG_SC_MSS_7180) += mss-sc7180.o
@@ -62,6 +66,8 @@ obj-$(CONFIG_SC_VIDEOCC_7180) += videocc-sc7180.o
obj-$(CONFIG_SDM_CAMCC_845) += camcc-sdm845.o
obj-$(CONFIG_SDM_DISPCC_845) += dispcc-sdm845.o
obj-$(CONFIG_SDM_GCC_660) += gcc-sdm660.o
+obj-$(CONFIG_SDM_MMCC_660) += mmcc-sdm660.o
+obj-$(CONFIG_SDM_GPUCC_660) += gpucc-sdm660.o
obj-$(CONFIG_SDM_GCC_845) += gcc-sdm845.o
obj-$(CONFIG_SDM_GPUCC_845) += gpucc-sdm845.o
obj-$(CONFIG_SDM_LPASSCC_845) += lpasscc-sdm845.o
@@ -70,6 +76,7 @@ obj-$(CONFIG_SDX_GCC_55) += gcc-sdx55.o
obj-$(CONFIG_SM_DISPCC_8250) += dispcc-sm8250.o
obj-$(CONFIG_SM_GCC_8150) += gcc-sm8150.o
obj-$(CONFIG_SM_GCC_8250) += gcc-sm8250.o
+obj-$(CONFIG_SM_GCC_8350) += gcc-sm8350.o
obj-$(CONFIG_SM_GPUCC_8150) += gpucc-sm8150.o
obj-$(CONFIG_SM_GPUCC_8250) += gpucc-sm8250.o
obj-$(CONFIG_SM_VIDEOCC_8150) += videocc-sm8150.o
diff --git a/drivers/clk/qcom/a7-pll.c b/drivers/clk/qcom/a7-pll.c
new file mode 100644
index 000000000000..e171d3caf2cf
--- /dev/null
+++ b/drivers/clk/qcom/a7-pll.c
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Qualcomm A7 PLL driver
+ *
+ * Copyright (c) 2020, Linaro Limited
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "clk-alpha-pll.h"
+
+#define LUCID_PLL_OFF_L_VAL 0x04
+
+static const struct pll_vco lucid_vco[] = {
+ { 249600000, 2000000000, 0 },
+};
+
+static struct clk_alpha_pll a7pll = {
+ .offset = 0x100,
+ .vco_table = lucid_vco,
+ .num_vco = ARRAY_SIZE(lucid_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "a7pll",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_ops,
+ },
+ },
+};
+
+static const struct alpha_pll_config a7pll_config = {
+ .l = 0x39,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x2261,
+ .config_ctl_hi1_val = 0x029A699C,
+ .user_ctl_val = 0x1,
+ .user_ctl_hi_val = 0x805,
+};
+
+static const struct regmap_config a7pll_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x1000,
+ .fast_io = true,
+};
+
+static int qcom_a7pll_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct regmap *regmap;
+ void __iomem *base;
+ u32 l_val;
+ int ret;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ regmap = devm_regmap_init_mmio(dev, base, &a7pll_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ /* Configure PLL only if the l_val is zero */
+ regmap_read(regmap, a7pll.offset + LUCID_PLL_OFF_L_VAL, &l_val);
+ if (!l_val)
+ clk_lucid_pll_configure(&a7pll, regmap, &a7pll_config);
+
+ ret = devm_clk_register_regmap(dev, &a7pll.clkr);
+ if (ret)
+ return ret;
+
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get,
+ &a7pll.clkr.hw);
+}
+
+static const struct of_device_id qcom_a7pll_match_table[] = {
+ { .compatible = "qcom,sdx55-a7pll" },
+ { }
+};
+
+static struct platform_driver qcom_a7pll_driver = {
+ .probe = qcom_a7pll_probe,
+ .driver = {
+ .name = "qcom-a7pll",
+ .of_match_table = qcom_a7pll_match_table,
+ },
+};
+module_platform_driver(qcom_a7pll_driver);
+
+MODULE_DESCRIPTION("Qualcomm A7 PLL Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/apcs-sdx55.c b/drivers/clk/qcom/apcs-sdx55.c
new file mode 100644
index 000000000000..d0edabebf9c2
--- /dev/null
+++ b/drivers/clk/qcom/apcs-sdx55.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Qualcomm SDX55 APCS clock controller driver
+ *
+ * Copyright (c) 2020, Linaro Limited
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/cpu.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include "clk-regmap.h"
+#include "clk-regmap-mux-div.h"
+
+static const u32 apcs_mux_clk_parent_map[] = { 0, 1, 5 };
+
+static const struct clk_parent_data pdata[] = {
+ { .fw_name = "ref" },
+ { .fw_name = "aux" },
+ { .fw_name = "pll" },
+};
+
+/*
+ * We use the notifier function for switching to a temporary safe configuration
+ * (mux and divider), while the A7 PLL is reconfigured.
+ */
+static int a7cc_notifier_cb(struct notifier_block *nb, unsigned long event,
+ void *data)
+{
+ int ret = 0;
+ struct clk_regmap_mux_div *md = container_of(nb,
+ struct clk_regmap_mux_div,
+ clk_nb);
+ if (event == PRE_RATE_CHANGE)
+ /* set the mux and divider to safe frequency (400mhz) */
+ ret = mux_div_set_src_div(md, 1, 2);
+
+ return notifier_from_errno(ret);
+}
+
+static int qcom_apcs_sdx55_clk_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device *parent = dev->parent;
+ struct device *cpu_dev;
+ struct clk_regmap_mux_div *a7cc;
+ struct regmap *regmap;
+ struct clk_init_data init = { };
+ int ret;
+
+ regmap = dev_get_regmap(parent, NULL);
+ if (!regmap) {
+ dev_err_probe(dev, -ENODEV, "Failed to get parent regmap\n");
+ return -ENODEV;
+ }
+
+ a7cc = devm_kzalloc(dev, sizeof(*a7cc), GFP_KERNEL);
+ if (!a7cc)
+ return -ENOMEM;
+
+ init.name = "a7mux";
+ init.parent_data = pdata;
+ init.num_parents = ARRAY_SIZE(pdata);
+ init.ops = &clk_regmap_mux_div_ops;
+
+ a7cc->clkr.hw.init = &init;
+ a7cc->clkr.regmap = regmap;
+ a7cc->reg_offset = 0x8;
+ a7cc->hid_width = 5;
+ a7cc->hid_shift = 0;
+ a7cc->src_width = 3;
+ a7cc->src_shift = 8;
+ a7cc->parent_map = apcs_mux_clk_parent_map;
+
+ a7cc->pclk = devm_clk_get(parent, "pll");
+ if (IS_ERR(a7cc->pclk)) {
+ ret = PTR_ERR(a7cc->pclk);
+ if (ret != -EPROBE_DEFER)
+ dev_err_probe(dev, ret, "Failed to get PLL clk\n");
+ return ret;
+ }
+
+ a7cc->clk_nb.notifier_call = a7cc_notifier_cb;
+ ret = clk_notifier_register(a7cc->pclk, &a7cc->clk_nb);
+ if (ret) {
+ dev_err_probe(dev, ret, "Failed to register clock notifier\n");
+ return ret;
+ }
+
+ ret = devm_clk_register_regmap(dev, &a7cc->clkr);
+ if (ret) {
+ dev_err_probe(dev, ret, "Failed to register regmap clock\n");
+ goto err;
+ }
+
+ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get,
+ &a7cc->clkr.hw);
+ if (ret) {
+ dev_err_probe(dev, ret, "Failed to add clock provider\n");
+ goto err;
+ }
+
+ platform_set_drvdata(pdev, a7cc);
+
+ /*
+ * Attach the power domain to cpudev. Since there is no dedicated driver
+ * for CPUs and the SDX55 platform lacks hardware specific CPUFreq
+ * driver, there seems to be no better place to do this. So do it here!
+ */
+ cpu_dev = get_cpu_device(0);
+ dev_pm_domain_attach(cpu_dev, true);
+
+ return 0;
+
+err:
+ clk_notifier_unregister(a7cc->pclk, &a7cc->clk_nb);
+ return ret;
+}
+
+static int qcom_apcs_sdx55_clk_remove(struct platform_device *pdev)
+{
+ struct device *cpu_dev = get_cpu_device(0);
+ struct clk_regmap_mux_div *a7cc = platform_get_drvdata(pdev);
+
+ clk_notifier_unregister(a7cc->pclk, &a7cc->clk_nb);
+ dev_pm_domain_detach(cpu_dev, true);
+
+ return 0;
+}
+
+static struct platform_driver qcom_apcs_sdx55_clk_driver = {
+ .probe = qcom_apcs_sdx55_clk_probe,
+ .remove = qcom_apcs_sdx55_clk_remove,
+ .driver = {
+ .name = "qcom-sdx55-acps-clk",
+ },
+};
+module_platform_driver(qcom_apcs_sdx55_clk_driver);
+
+MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Qualcomm SDX55 APCS clock driver");
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index 21c357c26ec4..c6eb99169ddc 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -156,6 +156,12 @@ EXPORT_SYMBOL_GPL(clk_alpha_pll_regs);
/* LUCID PLL specific settings and offsets */
#define LUCID_PCAL_DONE BIT(27)
+/* LUCID 5LPE PLL specific settings and offsets */
+#define LUCID_5LPE_PCAL_DONE BIT(11)
+#define LUCID_5LPE_ALPHA_PLL_ACK_LATCH BIT(13)
+#define LUCID_5LPE_PLL_LATCH_INPUT BIT(14)
+#define LUCID_5LPE_ENABLE_VOTE_RUN BIT(21)
+
#define pll_alpha_width(p) \
((PLL_ALPHA_VAL_U(p) - PLL_ALPHA_VAL(p) == 4) ? \
ALPHA_REG_BITWIDTH : ALPHA_REG_16BIT_WIDTH)
@@ -777,15 +783,15 @@ static long alpha_pll_huayra_round_rate(struct clk_hw *hw, unsigned long rate,
static int trion_pll_is_enabled(struct clk_alpha_pll *pll,
struct regmap *regmap)
{
- u32 mode_regval, opmode_regval;
+ u32 mode_val, opmode_val;
int ret;
- ret = regmap_read(regmap, PLL_MODE(pll), &mode_regval);
- ret |= regmap_read(regmap, PLL_OPMODE(pll), &opmode_regval);
+ ret = regmap_read(regmap, PLL_MODE(pll), &mode_val);
+ ret |= regmap_read(regmap, PLL_OPMODE(pll), &opmode_val);
if (ret)
return 0;
- return ((opmode_regval & PLL_RUN) && (mode_regval & PLL_OUTCTRL));
+ return ((opmode_val & PLL_RUN) && (mode_val & PLL_OUTCTRL));
}
static int clk_trion_pll_is_enabled(struct clk_hw *hw)
@@ -1445,12 +1451,12 @@ EXPORT_SYMBOL_GPL(clk_trion_pll_configure);
static int __alpha_pll_trion_prepare(struct clk_hw *hw, u32 pcal_done)
{
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
- u32 regval;
+ u32 val;
int ret;
/* Return early if calibration is not needed. */
- regmap_read(pll->clkr.regmap, PLL_STATUS(pll), &regval);
- if (regval & pcal_done)
+ regmap_read(pll->clkr.regmap, PLL_STATUS(pll), &val);
+ if (val & pcal_done)
return 0;
/* On/off to calibrate */
@@ -1471,12 +1477,12 @@ static int alpha_pll_lucid_prepare(struct clk_hw *hw)
return __alpha_pll_trion_prepare(hw, LUCID_PCAL_DONE);
}
-static int alpha_pll_trion_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long prate)
+static int __alpha_pll_trion_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate, u32 latch_bit, u32 latch_ack)
{
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
unsigned long rrate;
- u32 regval, l, alpha_width = pll_alpha_width(pll);
+ u32 val, l, alpha_width = pll_alpha_width(pll);
u64 a;
int ret;
@@ -1490,22 +1496,20 @@ static int alpha_pll_trion_set_rate(struct clk_hw *hw, unsigned long rate,
regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL(pll), a);
/* Latch the PLL input */
- ret = regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll),
- PLL_UPDATE, PLL_UPDATE);
+ ret = regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll), latch_bit, latch_bit);
if (ret)
return ret;
/* Wait for 2 reference cycles before checking the ACK bit. */
udelay(1);
- regmap_read(pll->clkr.regmap, PLL_MODE(pll), &regval);
- if (!(regval & ALPHA_PLL_ACK_LATCH)) {
+ regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
+ if (!(val & latch_ack)) {
pr_err("Lucid PLL latch failed. Output may be unstable!\n");
return -EINVAL;
}
/* Return the latch input to 0 */
- ret = regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll),
- PLL_UPDATE, 0);
+ ret = regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll), latch_bit, 0);
if (ret)
return ret;
@@ -1520,6 +1524,12 @@ static int alpha_pll_trion_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
+static int alpha_pll_trion_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate)
+{
+ return __alpha_pll_trion_set_rate(hw, rate, prate, PLL_UPDATE, ALPHA_PLL_ACK_LATCH);
+}
+
const struct clk_ops clk_alpha_pll_trion_ops = {
.prepare = alpha_pll_trion_prepare,
.enable = clk_trion_pll_enable,
@@ -1600,3 +1610,170 @@ const struct clk_ops clk_alpha_pll_agera_ops = {
.set_rate = clk_alpha_pll_agera_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_agera_ops);
+
+static int alpha_pll_lucid_5lpe_enable(struct clk_hw *hw)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 val;
+ int ret;
+
+ ret = regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &val);
+ if (ret)
+ return ret;
+
+ /* If in FSM mode, just vote for it */
+ if (val & LUCID_5LPE_ENABLE_VOTE_RUN) {
+ ret = clk_enable_regmap(hw);
+ if (ret)
+ return ret;
+ return wait_for_pll_enable_lock(pll);
+ }
+
+ /* Check if PLL is already enabled, return if enabled */
+ ret = trion_pll_is_enabled(pll, pll->clkr.regmap);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll), PLL_RESET_N, PLL_RESET_N);
+ if (ret)
+ return ret;
+
+ regmap_write(pll->clkr.regmap, PLL_OPMODE(pll), PLL_RUN);
+
+ ret = wait_for_pll_enable_lock(pll);
+ if (ret)
+ return ret;
+
+ /* Enable the PLL outputs */
+ ret = regmap_update_bits(pll->clkr.regmap, PLL_USER_CTL(pll), PLL_OUT_MASK, PLL_OUT_MASK);
+ if (ret)
+ return ret;
+
+ /* Enable the global PLL outputs */
+ return regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll), PLL_OUTCTRL, PLL_OUTCTRL);
+}
+
+static void alpha_pll_lucid_5lpe_disable(struct clk_hw *hw)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 val;
+ int ret;
+
+ ret = regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &val);
+ if (ret)
+ return;
+
+ /* If in FSM mode, just unvote it */
+ if (val & LUCID_5LPE_ENABLE_VOTE_RUN) {
+ clk_disable_regmap(hw);
+ return;
+ }
+
+ /* Disable the global PLL output */
+ ret = regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll), PLL_OUTCTRL, 0);
+ if (ret)
+ return;
+
+ /* Disable the PLL outputs */
+ ret = regmap_update_bits(pll->clkr.regmap, PLL_USER_CTL(pll), PLL_OUT_MASK, 0);
+ if (ret)
+ return;
+
+ /* Place the PLL mode in STANDBY */
+ regmap_write(pll->clkr.regmap, PLL_OPMODE(pll), PLL_STANDBY);
+}
+
+/*
+ * The Lucid 5LPE PLL requires a power-on self-calibration which happens
+ * when the PLL comes out of reset. Calibrate in case it is not completed.
+ */
+static int alpha_pll_lucid_5lpe_prepare(struct clk_hw *hw)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ struct clk_hw *p;
+ u32 val = 0;
+ int ret;
+
+ /* Return early if calibration is not needed. */
+ regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
+ if (val & LUCID_5LPE_PCAL_DONE)
+ return 0;
+
+ p = clk_hw_get_parent(hw);
+ if (!p)
+ return -EINVAL;
+
+ ret = alpha_pll_lucid_5lpe_enable(hw);
+ if (ret)
+ return ret;
+
+ alpha_pll_lucid_5lpe_disable(hw);
+
+ return 0;
+}
+
+static int alpha_pll_lucid_5lpe_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate)
+{
+ return __alpha_pll_trion_set_rate(hw, rate, prate,
+ LUCID_5LPE_PLL_LATCH_INPUT,
+ LUCID_5LPE_ALPHA_PLL_ACK_LATCH);
+}
+
+static int clk_lucid_5lpe_pll_postdiv_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
+ int i, val = 0, div, ret;
+ u32 mask;
+
+ /*
+ * If the PLL is in FSM mode, then treat set_rate callback as a
+ * no-operation.
+ */
+ ret = regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &val);
+ if (ret)
+ return ret;
+
+ if (val & LUCID_5LPE_ENABLE_VOTE_RUN)
+ return 0;
+
+ div = DIV_ROUND_UP_ULL((u64)parent_rate, rate);
+ for (i = 0; i < pll->num_post_div; i++) {
+ if (pll->post_div_table[i].div == div) {
+ val = pll->post_div_table[i].val;
+ break;
+ }
+ }
+
+ mask = GENMASK(pll->width + pll->post_div_shift - 1, pll->post_div_shift);
+ return regmap_update_bits(pll->clkr.regmap, PLL_USER_CTL(pll),
+ mask, val << pll->post_div_shift);
+}
+
+const struct clk_ops clk_alpha_pll_lucid_5lpe_ops = {
+ .prepare = alpha_pll_lucid_5lpe_prepare,
+ .enable = alpha_pll_lucid_5lpe_enable,
+ .disable = alpha_pll_lucid_5lpe_disable,
+ .is_enabled = clk_trion_pll_is_enabled,
+ .recalc_rate = clk_trion_pll_recalc_rate,
+ .round_rate = clk_alpha_pll_round_rate,
+ .set_rate = alpha_pll_lucid_5lpe_set_rate,
+};
+EXPORT_SYMBOL(clk_alpha_pll_lucid_5lpe_ops);
+
+const struct clk_ops clk_alpha_pll_fixed_lucid_5lpe_ops = {
+ .enable = alpha_pll_lucid_5lpe_enable,
+ .disable = alpha_pll_lucid_5lpe_disable,
+ .is_enabled = clk_trion_pll_is_enabled,
+ .recalc_rate = clk_trion_pll_recalc_rate,
+ .round_rate = clk_alpha_pll_round_rate,
+};
+EXPORT_SYMBOL(clk_alpha_pll_fixed_lucid_5lpe_ops);
+
+const struct clk_ops clk_alpha_pll_postdiv_lucid_5lpe_ops = {
+ .recalc_rate = clk_alpha_pll_postdiv_fabia_recalc_rate,
+ .round_rate = clk_alpha_pll_postdiv_fabia_round_rate,
+ .set_rate = clk_lucid_5lpe_pll_postdiv_set_rate,
+};
+EXPORT_SYMBOL(clk_alpha_pll_postdiv_lucid_5lpe_ops);
diff --git a/drivers/clk/qcom/clk-alpha-pll.h b/drivers/clk/qcom/clk-alpha-pll.h
index 0ea30d2f3da1..6943e933be0f 100644
--- a/drivers/clk/qcom/clk-alpha-pll.h
+++ b/drivers/clk/qcom/clk-alpha-pll.h
@@ -144,6 +144,10 @@ extern const struct clk_ops clk_alpha_pll_lucid_ops;
extern const struct clk_ops clk_alpha_pll_postdiv_lucid_ops;
extern const struct clk_ops clk_alpha_pll_agera_ops;
+extern const struct clk_ops clk_alpha_pll_lucid_5lpe_ops;
+extern const struct clk_ops clk_alpha_pll_fixed_lucid_5lpe_ops;
+extern const struct clk_ops clk_alpha_pll_postdiv_lucid_5lpe_ops;
+
void clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config);
void clk_fabia_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h
index 86d2b8b90173..99efcc7f8d88 100644
--- a/drivers/clk/qcom/clk-rcg.h
+++ b/drivers/clk/qcom/clk-rcg.h
@@ -153,6 +153,15 @@ struct clk_rcg2 {
#define to_clk_rcg2(_hw) container_of(to_clk_regmap(_hw), struct clk_rcg2, clkr)
+struct clk_rcg2_gfx3d {
+ u8 div;
+ struct clk_rcg2 rcg;
+ struct clk_hw **hws;
+};
+
+#define to_clk_rcg2_gfx3d(_hw) \
+ container_of(to_clk_rcg2(_hw), struct clk_rcg2_gfx3d, rcg)
+
extern const struct clk_ops clk_rcg2_ops;
extern const struct clk_ops clk_rcg2_floor_ops;
extern const struct clk_ops clk_edp_pixel_ops;
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index 59a5a0f261f3..42f13a2d1cc1 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -728,40 +728,51 @@ static int clk_gfx3d_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
struct clk_rate_request parent_req = { };
- struct clk_hw *p2, *p8, *p9, *xo;
- unsigned long p9_rate;
+ struct clk_rcg2_gfx3d *cgfx = to_clk_rcg2_gfx3d(hw);
+ struct clk_hw *xo, *p0, *p1, *p2;
+ unsigned long request, p0_rate;
int ret;
+ p0 = cgfx->hws[0];
+ p1 = cgfx->hws[1];
+ p2 = cgfx->hws[2];
+ /*
+ * This function does ping-pong the RCG between PLLs: if we don't
+ * have at least one fixed PLL and two variable ones,
+ * then it's not going to work correctly.
+ */
+ if (WARN_ON(!p0 || !p1 || !p2))
+ return -EINVAL;
+
xo = clk_hw_get_parent_by_index(hw, 0);
if (req->rate == clk_hw_get_rate(xo)) {
req->best_parent_hw = xo;
return 0;
}
- p9 = clk_hw_get_parent_by_index(hw, 2);
- p2 = clk_hw_get_parent_by_index(hw, 3);
- p8 = clk_hw_get_parent_by_index(hw, 4);
+ request = req->rate;
+ if (cgfx->div > 1)
+ parent_req.rate = request = request * cgfx->div;
- /* PLL9 is a fixed rate PLL */
- p9_rate = clk_hw_get_rate(p9);
+ /* This has to be a fixed rate PLL */
+ p0_rate = clk_hw_get_rate(p0);
- parent_req.rate = req->rate = min(req->rate, p9_rate);
- if (req->rate == p9_rate) {
- req->rate = req->best_parent_rate = p9_rate;
- req->best_parent_hw = p9;
+ if (request == p0_rate) {
+ req->rate = req->best_parent_rate = p0_rate;
+ req->best_parent_hw = p0;
return 0;
}
- if (req->best_parent_hw == p9) {
+ if (req->best_parent_hw == p0) {
/* Are we going back to a previously used rate? */
- if (clk_hw_get_rate(p8) == req->rate)
- req->best_parent_hw = p8;
- else
+ if (clk_hw_get_rate(p2) == request)
req->best_parent_hw = p2;
- } else if (req->best_parent_hw == p8) {
- req->best_parent_hw = p2;
+ else
+ req->best_parent_hw = p1;
+ } else if (req->best_parent_hw == p2) {
+ req->best_parent_hw = p1;
} else {
- req->best_parent_hw = p8;
+ req->best_parent_hw = p2;
}
ret = __clk_determine_rate(req->best_parent_hw, &parent_req);
@@ -769,6 +780,8 @@ static int clk_gfx3d_determine_rate(struct clk_hw *hw,
return ret;
req->rate = req->best_parent_rate = parent_req.rate;
+ if (cgfx->div > 1)
+ req->rate /= cgfx->div;
return 0;
}
@@ -776,12 +789,16 @@ static int clk_gfx3d_determine_rate(struct clk_hw *hw,
static int clk_gfx3d_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate, u8 index)
{
- struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ struct clk_rcg2_gfx3d *cgfx = to_clk_rcg2_gfx3d(hw);
+ struct clk_rcg2 *rcg = &cgfx->rcg;
u32 cfg;
int ret;
- /* Just mux it, we don't use the division or m/n hardware */
cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
+ /* On some targets, the GFX3D RCG may need to divide PLL frequency */
+ if (cgfx->div > 1)
+ cfg |= ((2 * cgfx->div) - 1) << CFG_SRC_DIV_SHIFT;
+
ret = regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, cfg);
if (ret)
return ret;
diff --git a/drivers/clk/qcom/clk-regmap.c b/drivers/clk/qcom/clk-regmap.c
index ce80db27ccf2..92ac4e0d7dbe 100644
--- a/drivers/clk/qcom/clk-regmap.c
+++ b/drivers/clk/qcom/clk-regmap.c
@@ -87,6 +87,7 @@ EXPORT_SYMBOL_GPL(clk_disable_regmap);
/**
* devm_clk_register_regmap - register a clk_regmap clock
*
+ * @dev: reference to the caller's device
* @rclk: clk to operate on
*
* Clocks that use regmap for their register I/O should register their
diff --git a/drivers/clk/qcom/clk-rpm.c b/drivers/clk/qcom/clk-rpm.c
index f71d228fd6bd..a18811c38018 100644
--- a/drivers/clk/qcom/clk-rpm.c
+++ b/drivers/clk/qcom/clk-rpm.c
@@ -73,62 +73,6 @@
}, \
}
-#define DEFINE_CLK_RPM_PXO_BRANCH(_platform, _name, _active, r_id, r) \
- static struct clk_rpm _platform##_##_active; \
- static struct clk_rpm _platform##_##_name = { \
- .rpm_clk_id = (r_id), \
- .active_only = true, \
- .peer = &_platform##_##_active, \
- .rate = (r), \
- .branch = true, \
- .hw.init = &(struct clk_init_data){ \
- .ops = &clk_rpm_branch_ops, \
- .name = #_name, \
- .parent_names = (const char *[]){ "pxo_board" }, \
- .num_parents = 1, \
- }, \
- }; \
- static struct clk_rpm _platform##_##_active = { \
- .rpm_clk_id = (r_id), \
- .peer = &_platform##_##_name, \
- .rate = (r), \
- .branch = true, \
- .hw.init = &(struct clk_init_data){ \
- .ops = &clk_rpm_branch_ops, \
- .name = #_active, \
- .parent_names = (const char *[]){ "pxo_board" }, \
- .num_parents = 1, \
- }, \
- }
-
-#define DEFINE_CLK_RPM_CXO_BRANCH(_platform, _name, _active, r_id, r) \
- static struct clk_rpm _platform##_##_active; \
- static struct clk_rpm _platform##_##_name = { \
- .rpm_clk_id = (r_id), \
- .peer = &_platform##_##_active, \
- .rate = (r), \
- .branch = true, \
- .hw.init = &(struct clk_init_data){ \
- .ops = &clk_rpm_branch_ops, \
- .name = #_name, \
- .parent_names = (const char *[]){ "cxo_board" }, \
- .num_parents = 1, \
- }, \
- }; \
- static struct clk_rpm _platform##_##_active = { \
- .rpm_clk_id = (r_id), \
- .active_only = true, \
- .peer = &_platform##_##_name, \
- .rate = (r), \
- .branch = true, \
- .hw.init = &(struct clk_init_data){ \
- .ops = &clk_rpm_branch_ops, \
- .name = #_active, \
- .parent_names = (const char *[]){ "cxo_board" }, \
- .num_parents = 1, \
- }, \
- }
-
#define to_clk_rpm(_hw) container_of(_hw, struct clk_rpm, hw)
struct rpm_cc;
@@ -450,13 +394,6 @@ static const struct clk_ops clk_rpm_ops = {
.recalc_rate = clk_rpm_recalc_rate,
};
-static const struct clk_ops clk_rpm_branch_ops = {
- .prepare = clk_rpm_prepare,
- .unprepare = clk_rpm_unprepare,
- .round_rate = clk_rpm_round_rate,
- .recalc_rate = clk_rpm_recalc_rate,
-};
-
/* MSM8660/APQ8060 */
DEFINE_CLK_RPM(msm8660, afab_clk, afab_a_clk, QCOM_RPM_APPS_FABRIC_CLK);
DEFINE_CLK_RPM(msm8660, sfab_clk, sfab_a_clk, QCOM_RPM_SYS_FABRIC_CLK);
diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c
index 6a2a13c5058e..91dc390a583b 100644
--- a/drivers/clk/qcom/clk-rpmh.c
+++ b/drivers/clk/qcom/clk-rpmh.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
*/
#include <linux/clk-provider.h>
@@ -348,6 +348,10 @@ DEFINE_CLK_RPMH_VRM(sdm845, rf_clk1, rf_clk1_ao, "rfclka1", 1);
DEFINE_CLK_RPMH_VRM(sdm845, rf_clk2, rf_clk2_ao, "rfclka2", 1);
DEFINE_CLK_RPMH_VRM(sdm845, rf_clk3, rf_clk3_ao, "rfclka3", 1);
DEFINE_CLK_RPMH_VRM(sm8150, rf_clk3, rf_clk3_ao, "rfclka3", 1);
+DEFINE_CLK_RPMH_VRM(sc8180x, rf_clk1, rf_clk1_ao, "rfclkd1", 1);
+DEFINE_CLK_RPMH_VRM(sc8180x, rf_clk2, rf_clk2_ao, "rfclkd2", 1);
+DEFINE_CLK_RPMH_VRM(sc8180x, rf_clk3, rf_clk3_ao, "rfclkd3", 1);
+DEFINE_CLK_RPMH_VRM(sc8180x, rf_clk4, rf_clk4_ao, "rfclkd4", 1);
DEFINE_CLK_RPMH_BCM(sdm845, ipa, "IP0");
DEFINE_CLK_RPMH_BCM(sdm845, ce, "CE0");
@@ -431,6 +435,26 @@ static const struct clk_rpmh_desc clk_rpmh_sc7180 = {
.num_clks = ARRAY_SIZE(sc7180_rpmh_clocks),
};
+static struct clk_hw *sc8180x_rpmh_clocks[] = {
+ [RPMH_CXO_CLK] = &sdm845_bi_tcxo.hw,
+ [RPMH_CXO_CLK_A] = &sdm845_bi_tcxo_ao.hw,
+ [RPMH_LN_BB_CLK2] = &sdm845_ln_bb_clk2.hw,
+ [RPMH_LN_BB_CLK2_A] = &sdm845_ln_bb_clk2_ao.hw,
+ [RPMH_LN_BB_CLK3] = &sdm845_ln_bb_clk3.hw,
+ [RPMH_LN_BB_CLK3_A] = &sdm845_ln_bb_clk3_ao.hw,
+ [RPMH_RF_CLK1] = &sc8180x_rf_clk1.hw,
+ [RPMH_RF_CLK1_A] = &sc8180x_rf_clk1_ao.hw,
+ [RPMH_RF_CLK2] = &sc8180x_rf_clk2.hw,
+ [RPMH_RF_CLK2_A] = &sc8180x_rf_clk2_ao.hw,
+ [RPMH_RF_CLK3] = &sc8180x_rf_clk3.hw,
+ [RPMH_RF_CLK3_A] = &sc8180x_rf_clk3_ao.hw,
+};
+
+static const struct clk_rpmh_desc clk_rpmh_sc8180x = {
+ .clks = sc8180x_rpmh_clocks,
+ .num_clks = ARRAY_SIZE(sc8180x_rpmh_clocks),
+};
+
DEFINE_CLK_RPMH_VRM(sm8250, ln_bb_clk1, ln_bb_clk1_ao, "lnbclka1", 2);
static struct clk_hw *sm8250_rpmh_clocks[] = {
@@ -486,6 +510,27 @@ static const struct clk_rpmh_desc clk_rpmh_sm8350 = {
.num_clks = ARRAY_SIZE(sm8350_rpmh_clocks),
};
+static struct clk_hw *sc7280_rpmh_clocks[] = {
+ [RPMH_CXO_CLK] = &sdm845_bi_tcxo.hw,
+ [RPMH_CXO_CLK_A] = &sdm845_bi_tcxo_ao.hw,
+ [RPMH_LN_BB_CLK2] = &sdm845_ln_bb_clk2.hw,
+ [RPMH_LN_BB_CLK2_A] = &sdm845_ln_bb_clk2_ao.hw,
+ [RPMH_RF_CLK1] = &sdm845_rf_clk1.hw,
+ [RPMH_RF_CLK1_A] = &sdm845_rf_clk1_ao.hw,
+ [RPMH_RF_CLK3] = &sdm845_rf_clk3.hw,
+ [RPMH_RF_CLK3_A] = &sdm845_rf_clk3_ao.hw,
+ [RPMH_RF_CLK4] = &sm8350_rf_clk4.hw,
+ [RPMH_RF_CLK4_A] = &sm8350_rf_clk4_ao.hw,
+ [RPMH_IPA_CLK] = &sdm845_ipa.hw,
+ [RPMH_PKA_CLK] = &sm8350_pka.hw,
+ [RPMH_HWKM_CLK] = &sm8350_hwkm.hw,
+};
+
+static const struct clk_rpmh_desc clk_rpmh_sc7280 = {
+ .clks = sc7280_rpmh_clocks,
+ .num_clks = ARRAY_SIZE(sc7280_rpmh_clocks),
+};
+
static struct clk_hw *of_clk_rpmh_hw_get(struct of_phandle_args *clkspec,
void *data)
{
@@ -570,11 +615,13 @@ static int clk_rpmh_probe(struct platform_device *pdev)
static const struct of_device_id clk_rpmh_match_table[] = {
{ .compatible = "qcom,sc7180-rpmh-clk", .data = &clk_rpmh_sc7180},
+ { .compatible = "qcom,sc8180x-rpmh-clk", .data = &clk_rpmh_sc8180x},
{ .compatible = "qcom,sdm845-rpmh-clk", .data = &clk_rpmh_sdm845},
{ .compatible = "qcom,sdx55-rpmh-clk", .data = &clk_rpmh_sdx55},
{ .compatible = "qcom,sm8150-rpmh-clk", .data = &clk_rpmh_sm8150},
{ .compatible = "qcom,sm8250-rpmh-clk", .data = &clk_rpmh_sm8250},
{ .compatible = "qcom,sm8350-rpmh-clk", .data = &clk_rpmh_sm8350},
+ { .compatible = "qcom,sc7280-rpmh-clk", .data = &clk_rpmh_sc7280},
{ }
};
MODULE_DEVICE_TABLE(of, clk_rpmh_match_table);
diff --git a/drivers/clk/qcom/gcc-ipq4019.c b/drivers/clk/qcom/gcc-ipq4019.c
index ef5137fd50f3..8abad4032de7 100644
--- a/drivers/clk/qcom/gcc-ipq4019.c
+++ b/drivers/clk/qcom/gcc-ipq4019.c
@@ -1276,16 +1276,15 @@ static int clk_cpu_div_set_rate(struct clk_hw *hw, unsigned long rate,
struct clk_fepll *pll = to_clk_fepll(hw);
const struct freq_tbl *f;
u32 mask;
- int ret;
f = qcom_find_freq(pll->freq_tbl, rate);
if (!f)
return -EINVAL;
mask = (BIT(pll->cdiv.width) - 1) << pll->cdiv.shift;
- ret = regmap_update_bits(pll->cdiv.clkr.regmap,
- pll->cdiv.reg, mask,
- f->pre_div << pll->cdiv.shift);
+ regmap_update_bits(pll->cdiv.clkr.regmap,
+ pll->cdiv.reg, mask,
+ f->pre_div << pll->cdiv.shift);
/*
* There is no status bit which can be checked for successful CPU
* divider update operation so using delay for the same.
diff --git a/drivers/clk/qcom/gcc-msm8998.c b/drivers/clk/qcom/gcc-msm8998.c
index 9d7016bcd680..050c91af888e 100644
--- a/drivers/clk/qcom/gcc-msm8998.c
+++ b/drivers/clk/qcom/gcc-msm8998.c
@@ -135,7 +135,7 @@ static struct pll_vco fabia_vco[] = {
static struct clk_alpha_pll gpll0 = {
.offset = 0x0,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.vco_table = fabia_vco,
.num_vco = ARRAY_SIZE(fabia_vco),
.clkr = {
@@ -145,58 +145,58 @@ static struct clk_alpha_pll gpll0 = {
.name = "gpll0",
.parent_names = (const char *[]){ "xo" },
.num_parents = 1,
- .ops = &clk_alpha_pll_ops,
+ .ops = &clk_alpha_pll_fixed_fabia_ops,
}
},
};
static struct clk_alpha_pll_postdiv gpll0_out_even = {
.offset = 0x0,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll0_out_even",
.parent_names = (const char *[]){ "gpll0" },
.num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ops,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll_postdiv gpll0_out_main = {
.offset = 0x0,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll0_out_main",
.parent_names = (const char *[]){ "gpll0" },
.num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ops,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll_postdiv gpll0_out_odd = {
.offset = 0x0,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll0_out_odd",
.parent_names = (const char *[]){ "gpll0" },
.num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ops,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll_postdiv gpll0_out_test = {
.offset = 0x0,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll0_out_test",
.parent_names = (const char *[]){ "gpll0" },
.num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ops,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll gpll1 = {
.offset = 0x1000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.vco_table = fabia_vco,
.num_vco = ARRAY_SIZE(fabia_vco),
.clkr = {
@@ -206,58 +206,58 @@ static struct clk_alpha_pll gpll1 = {
.name = "gpll1",
.parent_names = (const char *[]){ "xo" },
.num_parents = 1,
- .ops = &clk_alpha_pll_ops,
+ .ops = &clk_alpha_pll_fixed_fabia_ops,
}
},
};
static struct clk_alpha_pll_postdiv gpll1_out_even = {
.offset = 0x1000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll1_out_even",
.parent_names = (const char *[]){ "gpll1" },
.num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ops,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll_postdiv gpll1_out_main = {
.offset = 0x1000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll1_out_main",
.parent_names = (const char *[]){ "gpll1" },
.num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ops,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll_postdiv gpll1_out_odd = {
.offset = 0x1000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll1_out_odd",
.parent_names = (const char *[]){ "gpll1" },
.num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ops,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll_postdiv gpll1_out_test = {
.offset = 0x1000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll1_out_test",
.parent_names = (const char *[]){ "gpll1" },
.num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ops,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll gpll2 = {
.offset = 0x2000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.vco_table = fabia_vco,
.num_vco = ARRAY_SIZE(fabia_vco),
.clkr = {
@@ -267,58 +267,58 @@ static struct clk_alpha_pll gpll2 = {
.name = "gpll2",
.parent_names = (const char *[]){ "xo" },
.num_parents = 1,
- .ops = &clk_alpha_pll_ops,
+ .ops = &clk_alpha_pll_fixed_fabia_ops,
}
},
};
static struct clk_alpha_pll_postdiv gpll2_out_even = {
.offset = 0x2000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll2_out_even",
.parent_names = (const char *[]){ "gpll2" },
.num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ops,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll_postdiv gpll2_out_main = {
.offset = 0x2000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll2_out_main",
.parent_names = (const char *[]){ "gpll2" },
.num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ops,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll_postdiv gpll2_out_odd = {
.offset = 0x2000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll2_out_odd",
.parent_names = (const char *[]){ "gpll2" },
.num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ops,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll_postdiv gpll2_out_test = {
.offset = 0x2000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll2_out_test",
.parent_names = (const char *[]){ "gpll2" },
.num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ops,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll gpll3 = {
.offset = 0x3000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.vco_table = fabia_vco,
.num_vco = ARRAY_SIZE(fabia_vco),
.clkr = {
@@ -328,58 +328,58 @@ static struct clk_alpha_pll gpll3 = {
.name = "gpll3",
.parent_names = (const char *[]){ "xo" },
.num_parents = 1,
- .ops = &clk_alpha_pll_ops,
+ .ops = &clk_alpha_pll_fixed_fabia_ops,
}
},
};
static struct clk_alpha_pll_postdiv gpll3_out_even = {
.offset = 0x3000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll3_out_even",
.parent_names = (const char *[]){ "gpll3" },
.num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ops,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll_postdiv gpll3_out_main = {
.offset = 0x3000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll3_out_main",
.parent_names = (const char *[]){ "gpll3" },
.num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ops,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll_postdiv gpll3_out_odd = {
.offset = 0x3000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll3_out_odd",
.parent_names = (const char *[]){ "gpll3" },
.num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ops,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll_postdiv gpll3_out_test = {
.offset = 0x3000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll3_out_test",
.parent_names = (const char *[]){ "gpll3" },
.num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ops,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll gpll4 = {
.offset = 0x77000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.vco_table = fabia_vco,
.num_vco = ARRAY_SIZE(fabia_vco),
.clkr = {
@@ -389,52 +389,52 @@ static struct clk_alpha_pll gpll4 = {
.name = "gpll4",
.parent_names = (const char *[]){ "xo" },
.num_parents = 1,
- .ops = &clk_alpha_pll_ops,
+ .ops = &clk_alpha_pll_fixed_fabia_ops,
}
},
};
static struct clk_alpha_pll_postdiv gpll4_out_even = {
.offset = 0x77000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll4_out_even",
.parent_names = (const char *[]){ "gpll4" },
.num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ops,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll_postdiv gpll4_out_main = {
.offset = 0x77000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll4_out_main",
.parent_names = (const char *[]){ "gpll4" },
.num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ops,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll_postdiv gpll4_out_odd = {
.offset = 0x77000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll4_out_odd",
.parent_names = (const char *[]){ "gpll4" },
.num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ops,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll_postdiv gpll4_out_test = {
.offset = 0x77000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll4_out_test",
.parent_names = (const char *[]){ "gpll4" },
.num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ops,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
@@ -1341,6 +1341,22 @@ static struct clk_branch gcc_boot_rom_ahb_clk = {
},
};
+static struct clk_branch gcc_mmss_gpll0_clk = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mmss_gpll0_clk",
+ .parent_names = (const char *[]){
+ "gpll0_out_main",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_mss_gpll0_div_clk_src = {
.halt_check = BRANCH_HALT_DELAY,
.clkr = {
@@ -2065,6 +2081,12 @@ static struct clk_branch gcc_gpu_cfg_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "gcc_gpu_cfg_ahb_clk",
.ops = &clk_branch2_ops,
+ /*
+ * The GPU IOMMU depends on this clock and hypervisor
+ * will crash the SoC if this clock goes down, due to
+ * secure contexts protection.
+ */
+ .flags = CLK_IS_CRITICAL,
},
},
};
@@ -2144,6 +2166,25 @@ static struct clk_branch gcc_hmss_trig_clk = {
},
};
+static struct freq_tbl ftbl_hmss_gpll0_clk_src[] = {
+ F( 300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ F( 600000000, P_GPLL0_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 hmss_gpll0_clk_src = {
+ .cmd_rcgr = 0x4805c,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_hmss_gpll0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "hmss_gpll0_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_names_1),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
static struct clk_branch gcc_mmss_noc_cfg_ahb_clk = {
.halt_reg = 0x9004,
.halt_check = BRANCH_HALT,
@@ -2944,6 +2985,8 @@ static struct clk_regmap *gcc_msm8998_clocks[] = {
[GCC_MSS_GPLL0_DIV_CLK_SRC] = &gcc_mss_gpll0_div_clk_src.clkr,
[GCC_MSS_SNOC_AXI_CLK] = &gcc_mss_snoc_axi_clk.clkr,
[GCC_MSS_MNOC_BIMC_AXI_CLK] = &gcc_mss_mnoc_bimc_axi_clk.clkr,
+ [GCC_MMSS_GPLL0_CLK] = &gcc_mmss_gpll0_clk.clkr,
+ [HMSS_GPLL0_CLK_SRC] = &hmss_gpll0_clk_src.clkr,
};
static struct gdsc *gcc_msm8998_gdscs[] = {
diff --git a/drivers/clk/qcom/gcc-sc7180.c b/drivers/clk/qcom/gcc-sc7180.c
index d82d725ac231..88e896abb663 100644
--- a/drivers/clk/qcom/gcc-sc7180.c
+++ b/drivers/clk/qcom/gcc-sc7180.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
*/
#include <linux/clk-provider.h>
@@ -891,21 +891,6 @@ static struct clk_branch gcc_boot_rom_ahb_clk = {
},
};
-static struct clk_branch gcc_camera_ahb_clk = {
- .halt_reg = 0xb008,
- .halt_check = BRANCH_HALT,
- .hwcg_reg = 0xb008,
- .hwcg_bit = 1,
- .clkr = {
- .enable_reg = 0xb008,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_camera_ahb_clk",
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch gcc_camera_hf_axi_clk = {
.halt_reg = 0xb020,
.halt_check = BRANCH_HALT,
@@ -934,19 +919,6 @@ static struct clk_branch gcc_camera_throttle_hf_axi_clk = {
},
};
-static struct clk_branch gcc_camera_xo_clk = {
- .halt_reg = 0xb02c,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0xb02c,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_camera_xo_clk",
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch gcc_ce1_ahb_clk = {
.halt_reg = 0x4100c,
.halt_check = BRANCH_HALT_VOTED,
@@ -1111,19 +1083,6 @@ static struct clk_branch gcc_disp_throttle_hf_axi_clk = {
},
};
-static struct clk_branch gcc_disp_xo_clk = {
- .halt_reg = 0xb030,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0xb030,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_disp_xo_clk",
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch gcc_gp1_clk = {
.halt_reg = 0x64000,
.halt_check = BRANCH_HALT,
@@ -2174,19 +2133,6 @@ static struct clk_branch gcc_video_throttle_axi_clk = {
},
};
-static struct clk_branch gcc_video_xo_clk = {
- .halt_reg = 0xb028,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0xb028,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_video_xo_clk",
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch gcc_mss_cfg_ahb_clk = {
.halt_reg = 0x8a000,
.halt_check = BRANCH_HALT,
@@ -2317,10 +2263,8 @@ static struct clk_regmap *gcc_sc7180_clocks[] = {
[GCC_AGGRE_UFS_PHY_AXI_CLK] = &gcc_aggre_ufs_phy_axi_clk.clkr,
[GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr,
[GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
- [GCC_CAMERA_AHB_CLK] = &gcc_camera_ahb_clk.clkr,
[GCC_CAMERA_HF_AXI_CLK] = &gcc_camera_hf_axi_clk.clkr,
[GCC_CAMERA_THROTTLE_HF_AXI_CLK] = &gcc_camera_throttle_hf_axi_clk.clkr,
- [GCC_CAMERA_XO_CLK] = &gcc_camera_xo_clk.clkr,
[GCC_CE1_AHB_CLK] = &gcc_ce1_ahb_clk.clkr,
[GCC_CE1_AXI_CLK] = &gcc_ce1_axi_clk.clkr,
[GCC_CE1_CLK] = &gcc_ce1_clk.clkr,
@@ -2333,7 +2277,6 @@ static struct clk_regmap *gcc_sc7180_clocks[] = {
[GCC_DISP_GPLL0_DIV_CLK_SRC] = &gcc_disp_gpll0_div_clk_src.clkr,
[GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr,
[GCC_DISP_THROTTLE_HF_AXI_CLK] = &gcc_disp_throttle_hf_axi_clk.clkr,
- [GCC_DISP_XO_CLK] = &gcc_disp_xo_clk.clkr,
[GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
[GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
[GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
@@ -2429,7 +2372,6 @@ static struct clk_regmap *gcc_sc7180_clocks[] = {
[GCC_VIDEO_AXI_CLK] = &gcc_video_axi_clk.clkr,
[GCC_VIDEO_GPLL0_DIV_CLK_SRC] = &gcc_video_gpll0_div_clk_src.clkr,
[GCC_VIDEO_THROTTLE_AXI_CLK] = &gcc_video_throttle_axi_clk.clkr,
- [GCC_VIDEO_XO_CLK] = &gcc_video_xo_clk.clkr,
[GPLL0] = &gpll0.clkr,
[GPLL0_OUT_EVEN] = &gpll0_out_even.clkr,
[GPLL6] = &gpll6.clkr,
@@ -2519,12 +2461,16 @@ static int gcc_sc7180_probe(struct platform_device *pdev)
/*
* Keep the clocks always-ON
- * GCC_CPUSS_GNOC_CLK, GCC_VIDEO_AHB_CLK, GCC_DISP_AHB_CLK
- * GCC_GPU_CFG_AHB_CLK
+ * GCC_CPUSS_GNOC_CLK, GCC_VIDEO_AHB_CLK, GCC_CAMERA_AHB_CLK,
+ * GCC_DISP_AHB_CLK, GCC_GPU_CFG_AHB_CLK
*/
regmap_update_bits(regmap, 0x48004, BIT(0), BIT(0));
regmap_update_bits(regmap, 0x0b004, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x0b008, BIT(0), BIT(0));
regmap_update_bits(regmap, 0x0b00c, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x0b02c, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x0b028, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x0b030, BIT(0), BIT(0));
regmap_update_bits(regmap, 0x71004, BIT(0), BIT(0));
ret = qcom_cc_register_rcg_dfs(regmap, gcc_dfs_clocks,
diff --git a/drivers/clk/qcom/gcc-sc7280.c b/drivers/clk/qcom/gcc-sc7280.c
new file mode 100644
index 000000000000..22736c16ed16
--- /dev/null
+++ b/drivers/clk/qcom/gcc-sc7280.c
@@ -0,0 +1,3603 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,gcc-sc7280.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ P_BI_TCXO,
+ P_GCC_GPLL0_OUT_EVEN,
+ P_GCC_GPLL0_OUT_MAIN,
+ P_GCC_GPLL0_OUT_ODD,
+ P_GCC_GPLL10_OUT_MAIN,
+ P_GCC_GPLL4_OUT_MAIN,
+ P_GCC_GPLL9_OUT_MAIN,
+ P_PCIE_0_PIPE_CLK,
+ P_PCIE_1_PIPE_CLK,
+ P_SLEEP_CLK,
+ P_UFS_PHY_RX_SYMBOL_0_CLK,
+ P_UFS_PHY_RX_SYMBOL_1_CLK,
+ P_UFS_PHY_TX_SYMBOL_0_CLK,
+ P_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK,
+ P_GCC_MSS_GPLL0_MAIN_DIV_CLK,
+};
+
+static struct clk_alpha_pll gcc_gpll0 = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpll0",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gcc_gpll0_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gcc_gpll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_gcc_gpll0_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_gcc_gpll0_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gpll0_out_even",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_lucid_ops,
+ },
+};
+
+static const struct clk_div_table post_div_table_gcc_gpll0_out_odd[] = {
+ { 0x3, 3 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gcc_gpll0_out_odd = {
+ .offset = 0x0,
+ .post_div_shift = 12,
+ .post_div_table = post_div_table_gcc_gpll0_out_odd,
+ .num_post_div = ARRAY_SIZE(post_div_table_gcc_gpll0_out_odd),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gpll0_out_odd",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_lucid_ops,
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll1 = {
+ .offset = 0x1000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpll1",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll10 = {
+ .offset = 0x1e000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpll10",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll4 = {
+ .offset = 0x76000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpll4",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll9 = {
+ .offset = 0x1c000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(8),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpll9",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_gpll0_main_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(17),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_gpll0_main_div_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_gpll0_out_even.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static const struct parent_map gcc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_0[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct clk_parent_data gcc_parent_data_0_ao[] = {
+ { .fw_name = "bi_tcxo_ao" },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL0_OUT_ODD, 3 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_1[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll0_out_odd.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_SLEEP_CLK, 5 },
+};
+
+static const struct clk_parent_data gcc_parent_data_2[] = {
+ { .fw_name = "bi_tcxo" },
+ { .fw_name = "sleep_clk" },
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data gcc_parent_data_3[] = {
+ { .fw_name = "bi_tcxo" },
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL0_OUT_ODD, 3 },
+ { P_SLEEP_CLK, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_4[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll0_out_odd.clkr.hw },
+ { .fw_name = "sleep_clk" },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_5[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_6[] = {
+ { P_PCIE_0_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_6[] = {
+ { .fw_name = "pcie_0_pipe_clk", .name = "pcie_0_pipe_clk" },
+ { .fw_name = "bi_tcxo" },
+};
+
+static const struct parent_map gcc_parent_map_7[] = {
+ { P_PCIE_1_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_7[] = {
+ { .fw_name = "pcie_1_pipe_clk", .name = "pcie_1_pipe_clk" },
+ { .fw_name = "bi_tcxo" },
+};
+
+static const struct parent_map gcc_parent_map_8[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL0_OUT_ODD, 3 },
+ { P_GCC_GPLL10_OUT_MAIN, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_8[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll0_out_odd.clkr.hw },
+ { .hw = &gcc_gpll10.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_9[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL9_OUT_MAIN, 2 },
+ { P_GCC_GPLL0_OUT_ODD, 3 },
+ { P_GCC_GPLL4_OUT_MAIN, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_9[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll9.clkr.hw },
+ { .hw = &gcc_gpll0_out_odd.clkr.hw },
+ { .hw = &gcc_gpll4.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_10[] = {
+ { P_UFS_PHY_RX_SYMBOL_0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_10[] = {
+ { .fw_name = "ufs_phy_rx_symbol_0_clk" },
+ { .fw_name = "bi_tcxo" },
+};
+
+static const struct parent_map gcc_parent_map_11[] = {
+ { P_UFS_PHY_RX_SYMBOL_1_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_11[] = {
+ { .fw_name = "ufs_phy_rx_symbol_1_clk" },
+ { .fw_name = "bi_tcxo" },
+};
+
+static const struct parent_map gcc_parent_map_12[] = {
+ { P_UFS_PHY_TX_SYMBOL_0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_12[] = {
+ { .fw_name = "ufs_phy_tx_symbol_0_clk" },
+ { .fw_name = "bi_tcxo" },
+};
+
+static const struct parent_map gcc_parent_map_13[] = {
+ { P_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_13[] = {
+ { .fw_name = "usb3_phy_wrapper_gcc_usb30_pipe_clk" },
+ { .fw_name = "bi_tcxo" },
+};
+
+static const struct parent_map gcc_parent_map_14[] = {
+ { P_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_14[] = {
+ { .fw_name = "usb3_phy_wrapper_gcc_usb30_pipe_clk" },
+ { .fw_name = "bi_tcxo" },
+};
+
+static const struct parent_map gcc_parent_map_15[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_MSS_GPLL0_MAIN_DIV_CLK, 1 },
+};
+
+static const struct clk_parent_data gcc_parent_data_15[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gcc_mss_gpll0_main_div_clk_src.clkr.hw },
+};
+
+static struct clk_regmap_mux gcc_pcie_0_pipe_clk_src = {
+ .reg = 0x6b054,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_6,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_pipe_clk_src",
+ .parent_data = gcc_parent_data_6,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_6),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_pcie_1_pipe_clk_src = {
+ .reg = 0x8d054,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_7,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_pipe_clk_src",
+ .parent_data = gcc_parent_data_7,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_7),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_phy_rx_symbol_0_clk_src = {
+ .reg = 0x77058,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_10,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_rx_symbol_0_clk_src",
+ .parent_data = gcc_parent_data_10,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_10),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_phy_rx_symbol_1_clk_src = {
+ .reg = 0x770c8,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_11,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_rx_symbol_1_clk_src",
+ .parent_data = gcc_parent_data_11,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_11),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_phy_tx_symbol_0_clk_src = {
+ .reg = 0x77048,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_12,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_tx_symbol_0_clk_src",
+ .parent_data = gcc_parent_data_12,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_12),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb3_prim_phy_pipe_clk_src = {
+ .reg = 0xf060,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_13,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_pipe_clk_src",
+ .parent_data = gcc_parent_data_13,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_13),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb3_sec_phy_pipe_clk_src = {
+ .reg = 0x9e060,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_14,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_phy_pipe_clk_src",
+ .parent_data = gcc_parent_data_14,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_14),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+static const struct freq_tbl ftbl_gcc_cpuss_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_cpuss_ahb_clk_src = {
+ .cmd_rcgr = 0x4800c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_cpuss_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_ahb_clk_src",
+ .parent_data = gcc_parent_data_0_ao,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0_ao),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
+ F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(200000000, P_GCC_GPLL0_OUT_ODD, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_gp1_clk_src = {
+ .cmd_rcgr = 0x64004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp2_clk_src = {
+ .cmd_rcgr = 0x65004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp3_clk_src = {
+ .cmd_rcgr = 0x66004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_0_aux_clk_src[] = {
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_0_aux_clk_src = {
+ .cmd_rcgr = 0x6b058,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_0_phy_rchng_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_0_phy_rchng_clk_src = {
+ .cmd_rcgr = 0x6b03c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_1_aux_clk_src = {
+ .cmd_rcgr = 0x8d058,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_1_phy_rchng_clk_src = {
+ .cmd_rcgr = 0x8d03c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = {
+ F(60000000, P_GCC_GPLL0_OUT_EVEN, 5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pdm2_clk_src = {
+ .cmd_rcgr = 0x33010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pdm2_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qspi_core_clk_src[] = {
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(150000000, P_GCC_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(300000000, P_GCC_GPLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_qspi_core_clk_src = {
+ .cmd_rcgr = 0x4b00c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qspi_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qspi_core_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(102400000, P_GCC_GPLL0_OUT_EVEN, 1, 128, 375),
+ F(112000000, P_GCC_GPLL0_OUT_EVEN, 1, 28, 75),
+ F(117964800, P_GCC_GPLL0_OUT_EVEN, 1, 6144, 15625),
+ F(120000000, P_GCC_GPLL0_OUT_EVEN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
+ .cmd_rcgr = 0x17010,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
+ .cmd_rcgr = 0x17140,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s1_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s2_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
+ .cmd_rcgr = 0x17270,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
+ .cmd_rcgr = 0x173a0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s4_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
+ .cmd_rcgr = 0x174d0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
+ .cmd_rcgr = 0x17600,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s6_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s6_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s6_clk_src = {
+ .cmd_rcgr = 0x17730,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s6_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s7_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s7_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s7_clk_src = {
+ .cmd_rcgr = 0x17860,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s7_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
+ .cmd_rcgr = 0x18010,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
+ .cmd_rcgr = 0x18140,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = {
+ .cmd_rcgr = 0x18270,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
+ .cmd_rcgr = 0x183a0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s4_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
+ .cmd_rcgr = 0x184d0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
+ .cmd_rcgr = 0x18600,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s6_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s6_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s6_clk_src = {
+ .cmd_rcgr = 0x18730,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s6_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s7_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s7_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s7_clk_src = {
+ .cmd_rcgr = 0x18860,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s7_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_apps_clk_src[] = {
+ F(144000, P_BI_TCXO, 16, 3, 25),
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(20000000, P_GCC_GPLL0_OUT_EVEN, 5, 1, 3),
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(192000000, P_GCC_GPLL10_OUT_MAIN, 2, 0, 0),
+ F(384000000, P_GCC_GPLL10_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc1_apps_clk_src = {
+ .cmd_rcgr = 0x7500c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_gcc_sdcc1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_apps_clk_src",
+ .parent_data = gcc_parent_data_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_8),
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_ice_core_clk_src[] = {
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(150000000, P_GCC_GPLL0_OUT_EVEN, 2, 0, 0),
+ F(300000000, P_GCC_GPLL0_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc1_ice_core_clk_src = {
+ .cmd_rcgr = 0x7502c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_sdcc1_ice_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_ice_core_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(202000000, P_GCC_GPLL9_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
+ .cmd_rcgr = 0x1400c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_9,
+ .freq_tbl = ftbl_gcc_sdcc2_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_apps_clk_src",
+ .parent_data = gcc_parent_data_9,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_9),
+ .flags = CLK_OPS_PARENT_ENABLE,
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc4_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc4_apps_clk_src = {
+ .cmd_rcgr = 0x1600c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_sdcc4_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc4_apps_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_axi_clk_src[] = {
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(150000000, P_GCC_GPLL0_OUT_EVEN, 2, 0, 0),
+ F(300000000, P_GCC_GPLL0_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_axi_clk_src = {
+ .cmd_rcgr = 0x77024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_phy_axi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_axi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_ice_core_clk_src[] = {
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(150000000, P_GCC_GPLL0_OUT_EVEN, 2, 0, 0),
+ F(300000000, P_GCC_GPLL0_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_ice_core_clk_src = {
+ .cmd_rcgr = 0x7706c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_phy_ice_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ice_core_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_phy_aux_clk_src = {
+ .cmd_rcgr = 0x770a0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_unipro_core_clk_src = {
+ .cmd_rcgr = 0x77084,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_phy_ice_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_unipro_core_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_prim_master_clk_src[] = {
+ F(66666667, P_GCC_GPLL0_OUT_EVEN, 4.5, 0, 0),
+ F(133333333, P_GCC_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ F(200000000, P_GCC_GPLL0_OUT_ODD, 1, 0, 0),
+ F(240000000, P_GCC_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_prim_master_clk_src = {
+ .cmd_rcgr = 0xf020,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_usb30_prim_master_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_master_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_prim_mock_utmi_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = {
+ .cmd_rcgr = 0xf038,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_usb30_prim_mock_utmi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_sec_master_clk_src[] = {
+ F(60000000, P_GCC_GPLL0_OUT_EVEN, 5, 0, 0),
+ F(120000000, P_GCC_GPLL0_OUT_EVEN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_sec_master_clk_src = {
+ .cmd_rcgr = 0x9e020,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_usb30_sec_master_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sec_master_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_sec_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x9e038,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_usb30_prim_mock_utmi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sec_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = {
+ .cmd_rcgr = 0xf064,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_usb30_prim_mock_utmi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_sec_phy_aux_clk_src = {
+ .cmd_rcgr = 0x9e064,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_usb30_prim_mock_utmi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sec_ctrl_clk_src[] = {
+ F(4800000, P_BI_TCXO, 4, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sec_ctrl_clk_src = {
+ .cmd_rcgr = 0x3d02c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_sec_ctrl_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sec_ctrl_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_cpuss_ahb_postdiv_clk_src = {
+ .reg = 0x48024,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gcc_cpuss_ahb_postdiv_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_cpuss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb30_prim_mock_utmi_postdiv_clk_src = {
+ .reg = 0xf050,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_postdiv_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_prim_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb30_sec_mock_utmi_postdiv_clk_src = {
+ .reg = 0x9e050,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gcc_usb30_sec_mock_utmi_postdiv_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_sec_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch gcc_pcie_clkref_en = {
+ .halt_reg = 0x8c004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_clkref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_edp_clkref_en = {
+ .halt_reg = 0x8c008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_edp_clkref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_noc_pcie_0_axi_clk = {
+ .halt_reg = 0x6b080,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x6b080,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(12),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_noc_pcie_0_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_noc_pcie_1_axi_clk = {
+ .halt_reg = 0x8d084,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x8d084,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_noc_pcie_1_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_noc_pcie_tbu_clk = {
+ .halt_reg = 0x90010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x90010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(18),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_noc_pcie_tbu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_noc_pcie_center_sf_axi_clk = {
+ .halt_reg = 0x8d088,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x8d088,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(28),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_noc_pcie_center_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_phy_axi_clk = {
+ .halt_reg = 0x770cc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x770cc,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x770cc,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_ufs_phy_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_prim_axi_clk = {
+ .halt_reg = 0xf080,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xf080,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xf080,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_usb3_prim_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_sec_axi_clk = {
+ .halt_reg = 0x9e080,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x9e080,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x9e080,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_usb3_sec_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_sec_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_hf_axi_clk = {
+ .halt_reg = 0x26010,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x26010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x26010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camera_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_sf_axi_clk = {
+ .halt_reg = 0x2601c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x2601c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2601c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camera_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_prim_axi_clk = {
+ .halt_reg = 0xf07c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xf07c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xf07c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cfg_noc_usb3_prim_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_sec_axi_clk = {
+ .halt_reg = 0x9e07c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x9e07c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x9e07c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cfg_noc_usb3_sec_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_sec_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* For CPUSS functionality the AHB clock needs to be left enabled */
+static struct clk_branch gcc_cpuss_ahb_clk = {
+ .halt_reg = 0x48000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x48000,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(21),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_cpuss_ahb_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ddrss_gpu_axi_clk = {
+ .halt_reg = 0x71154,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x71154,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x71154,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ddrss_gpu_axi_clk",
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ddrss_pcie_sf_clk = {
+ .halt_reg = 0x8d080,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x8d080,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(19),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ddrss_pcie_sf_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_gpll0_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_gpll0_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_hf_axi_clk = {
+ .halt_reg = 0x2700c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x2700c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2700c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_sf_axi_clk = {
+ .halt_reg = 0x27014,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x27014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x27014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x64000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x64000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_gp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x65000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x65000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_gp2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x66000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x66000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_gp3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_gpll0_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(16),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_gpll0_div_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_gpll0_out_even.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_iref_en = {
+ .halt_reg = 0x8c014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_iref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_memnoc_gfx_clk = {
+ .halt_reg = 0x7100c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7100c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7100c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_memnoc_gfx_clk",
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_snoc_dvm_gfx_clk = {
+ .halt_reg = 0x71018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x71018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_snoc_dvm_gfx_clk",
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie0_phy_rchng_clk = {
+ .halt_reg = 0x6b038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(22),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie0_phy_rchng_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pcie_0_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie1_phy_rchng_clk = {
+ .halt_reg = 0x8d038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(23),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie1_phy_rchng_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pcie_1_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_aux_clk = {
+ .halt_reg = 0x6b028,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pcie_0_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_cfg_ahb_clk = {
+ .halt_reg = 0x6b024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_mstr_axi_clk = {
+ .halt_reg = 0x6b01c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_pipe_clk = {
+ .halt_reg = 0x6b030,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_pipe_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pcie_0_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_axi_clk = {
+ .halt_reg = 0x6b014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_q2a_axi_clk = {
+ .halt_reg = 0x6b010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_aux_clk = {
+ .halt_reg = 0x8d028,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(29),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pcie_1_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_cfg_ahb_clk = {
+ .halt_reg = 0x8d024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x8d024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(28),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_mstr_axi_clk = {
+ .halt_reg = 0x8d01c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(27),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_pipe_clk = {
+ .halt_reg = 0x8d030,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(30),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_pipe_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pcie_1_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_slv_axi_clk = {
+ .halt_reg = 0x8d014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(26),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_slv_q2a_axi_clk = {
+ .halt_reg = 0x8d010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(25),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_throttle_core_clk = {
+ .halt_reg = 0x90018,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x90018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(20),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_throttle_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x3300c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pdm2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x33004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x33004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x33004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_xo4_clk = {
+ .halt_reg = 0x33008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x33008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_xo4_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_nrt_ahb_clk = {
+ .halt_reg = 0x26008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x26008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x26008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_camera_nrt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_rt_ahb_clk = {
+ .halt_reg = 0x2600c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2600c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2600c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_camera_rt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_disp_ahb_clk = {
+ .halt_reg = 0x27008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x27008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_disp_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_vcodec_ahb_clk = {
+ .halt_reg = 0x28008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x28008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x28008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_video_vcodec_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qspi_cnoc_periph_ahb_clk = {
+ .halt_reg = 0x4b004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x4b004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x4b004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qspi_cnoc_periph_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qspi_core_clk = {
+ .halt_reg = 0x4b008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qspi_core_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qspi_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_2x_clk = {
+ .halt_reg = 0x23008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_clk = {
+ .halt_reg = 0x23000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(8),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s0_clk = {
+ .halt_reg = 0x1700c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s1_clk = {
+ .halt_reg = 0x1713c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s2_clk = {
+ .halt_reg = 0x1726c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(12),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s3_clk = {
+ .halt_reg = 0x1739c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s3_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s4_clk = {
+ .halt_reg = 0x174cc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(14),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s4_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s5_clk = {
+ .halt_reg = 0x175fc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s5_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s6_clk = {
+ .halt_reg = 0x1772c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(16),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s6_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s7_clk = {
+ .halt_reg = 0x1785c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(17),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s7_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s7_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_2x_clk = {
+ .halt_reg = 0x23140,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(18),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_clk = {
+ .halt_reg = 0x23138,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(19),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s0_clk = {
+ .halt_reg = 0x1800c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(22),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s1_clk = {
+ .halt_reg = 0x1813c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(23),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s2_clk = {
+ .halt_reg = 0x1826c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(24),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s3_clk = {
+ .halt_reg = 0x1839c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(25),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s3_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s4_clk = {
+ .halt_reg = 0x184cc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(26),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s4_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s5_clk = {
+ .halt_reg = 0x185fc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(27),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s5_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s6_clk = {
+ .halt_reg = 0x1872c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s6_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s7_clk = {
+ .halt_reg = 0x1885c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(14),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s7_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s7_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_m_ahb_clk = {
+ .halt_reg = 0x17004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(6),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_0_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_s_ahb_clk = {
+ .halt_reg = 0x17008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_0_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_m_ahb_clk = {
+ .halt_reg = 0x18004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x18004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(20),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_1_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_s_ahb_clk = {
+ .halt_reg = 0x18008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x18008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(21),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_1_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ahb_clk = {
+ .halt_reg = 0x75004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x75004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_apps_clk = {
+ .halt_reg = 0x75008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x75008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_sdcc1_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ice_core_clk = {
+ .halt_reg = 0x75024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x75024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x75024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_ice_core_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_sdcc1_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_ahb_clk = {
+ .halt_reg = 0x14008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_apps_clk = {
+ .halt_reg = 0x14004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_sdcc2_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc4_ahb_clk = {
+ .halt_reg = 0x16008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x16008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc4_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc4_apps_clk = {
+ .halt_reg = 0x16004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x16004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc4_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_sdcc4_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* For CPUSS functionality the AHB clock needs to be left enabled */
+static struct clk_branch gcc_sys_noc_cpuss_ahb_clk = {
+ .halt_reg = 0x48178,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x48178,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sys_noc_cpuss_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_cpuss_ahb_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_throttle_pcie_ahb_clk = {
+ .halt_reg = 0x9001c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9001c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_throttle_pcie_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_titan_nrt_throttle_core_clk = {
+ .halt_reg = 0x26024,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x26024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x26024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_titan_nrt_throttle_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_titan_rt_throttle_core_clk = {
+ .halt_reg = 0x26018,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x26018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x26018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_titan_rt_throttle_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_1_clkref_en = {
+ .halt_reg = 0x8c000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_1_clkref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ahb_clk = {
+ .halt_reg = 0x77018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_axi_clk = {
+ .halt_reg = 0x77010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ice_core_clk = {
+ .halt_reg = 0x77064,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77064,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77064,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ice_core_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_phy_aux_clk = {
+ .halt_reg = 0x7709c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7709c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7709c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_phy_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_0_clk = {
+ .halt_reg = 0x77020,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x77020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_rx_symbol_0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_rx_symbol_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_1_clk = {
+ .halt_reg = 0x770b8,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x770b8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_rx_symbol_1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_rx_symbol_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_tx_symbol_0_clk = {
+ .halt_reg = 0x7701c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x7701c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_tx_symbol_0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_tx_symbol_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_unipro_core_clk = {
+ .halt_reg = 0x7705c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7705c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7705c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_unipro_core_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_unipro_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_master_clk = {
+ .halt_reg = 0xf010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_master_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_mock_utmi_clk = {
+ .halt_reg = 0xf01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_mock_utmi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw =
+ &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_sleep_clk = {
+ .halt_reg = 0xf018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sec_master_clk = {
+ .halt_reg = 0x9e010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9e010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sec_master_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_sec_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sec_mock_utmi_clk = {
+ .halt_reg = 0x9e01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9e01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sec_mock_utmi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw =
+ &gcc_usb30_sec_mock_utmi_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sec_sleep_clk = {
+ .halt_reg = 0x9e018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9e018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sec_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_aux_clk = {
+ .halt_reg = 0xf054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_com_aux_clk = {
+ .halt_reg = 0xf058,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_com_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_pipe_clk = {
+ .halt_reg = 0xf05c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .hwcg_reg = 0xf05c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xf05c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_pipe_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb3_prim_phy_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_lpass_clk = {
+ .halt_reg = 0x47020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x47020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cfg_noc_lpass_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+static struct clk_branch gcc_mss_cfg_ahb_clk = {
+ .halt_reg = 0x8a000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8a000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_offline_axi_clk = {
+ .halt_reg = 0x8a004,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x8a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_offline_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_snoc_axi_clk = {
+ .halt_reg = 0x8a154,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x8a154,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_snoc_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_q6_memnoc_axi_clk = {
+ .halt_reg = 0x8a158,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8a158,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_q6_memnoc_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_mss_q6ss_boot_clk_src = {
+ .reg = 0x8a2a4,
+ .shift = 0,
+ .width = 1,
+ .parent_map = gcc_parent_map_15,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_q6ss_boot_clk_src",
+ .parent_data = gcc_parent_data_15,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_15),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_phy_aux_clk = {
+ .halt_reg = 0x9e054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9e054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_phy_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb3_sec_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_phy_com_aux_clk = {
+ .halt_reg = 0x9e058,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9e058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_phy_com_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb3_sec_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_phy_pipe_clk = {
+ .halt_reg = 0x9e05c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x9e05c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x9e05c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_phy_pipe_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb3_sec_phy_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi0_clk = {
+ .halt_reg = 0x2800c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x2800c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2800c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_axi0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_mvp_throttle_core_clk = {
+ .halt_reg = 0x28010,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x28010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x28010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_mvp_throttle_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_wpss_ahb_clk = {
+ .halt_reg = 0x9d154,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9d154,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_wpss_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_wpss_ahb_bdg_mst_clk = {
+ .halt_reg = 0x9d158,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9d158,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_wpss_ahb_bdg_mst_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_wpss_rscp_clk = {
+ .halt_reg = 0x9d16c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9d16c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_wpss_rscp_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc gcc_pcie_0_gdsc = {
+ .gdscr = 0x6b004,
+ .pd = {
+ .name = "gcc_pcie_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc gcc_pcie_1_gdsc = {
+ .gdscr = 0x8d004,
+ .pd = {
+ .name = "gcc_pcie_1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc gcc_ufs_phy_gdsc = {
+ .gdscr = 0x77004,
+ .pd = {
+ .name = "gcc_ufs_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc gcc_usb30_prim_gdsc = {
+ .gdscr = 0xf004,
+ .pd = {
+ .name = "gcc_usb30_prim_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc gcc_usb30_sec_gdsc = {
+ .gdscr = 0x9e004,
+ .pd = {
+ .name = "gcc_usb30_sec_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc = {
+ .gdscr = 0x7d050,
+ .pd = {
+ .name = "hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc = {
+ .gdscr = 0x7d058,
+ .pd = {
+ .name = "hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_mmnoc_mmu_tbu_sf0_gdsc = {
+ .gdscr = 0x7d054,
+ .pd = {
+ .name = "hlos1_vote_mmnoc_mmu_tbu_sf0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_turing_mmu_tbu0_gdsc = {
+ .gdscr = 0x7d05c,
+ .pd = {
+ .name = "hlos1_vote_turing_mmu_tbu0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_turing_mmu_tbu1_gdsc = {
+ .gdscr = 0x7d060,
+ .pd = {
+ .name = "hlos1_vote_turing_mmu_tbu1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct clk_regmap *gcc_sc7280_clocks[] = {
+ [GCC_AGGRE_NOC_PCIE_0_AXI_CLK] = &gcc_aggre_noc_pcie_0_axi_clk.clkr,
+ [GCC_AGGRE_NOC_PCIE_1_AXI_CLK] = &gcc_aggre_noc_pcie_1_axi_clk.clkr,
+ [GCC_AGGRE_UFS_PHY_AXI_CLK] = &gcc_aggre_ufs_phy_axi_clk.clkr,
+ [GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr,
+ [GCC_AGGRE_USB3_SEC_AXI_CLK] = &gcc_aggre_usb3_sec_axi_clk.clkr,
+ [GCC_CAMERA_HF_AXI_CLK] = &gcc_camera_hf_axi_clk.clkr,
+ [GCC_CAMERA_SF_AXI_CLK] = &gcc_camera_sf_axi_clk.clkr,
+ [GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr,
+ [GCC_CFG_NOC_USB3_SEC_AXI_CLK] = &gcc_cfg_noc_usb3_sec_axi_clk.clkr,
+ [GCC_CPUSS_AHB_CLK] = &gcc_cpuss_ahb_clk.clkr,
+ [GCC_CPUSS_AHB_CLK_SRC] = &gcc_cpuss_ahb_clk_src.clkr,
+ [GCC_CPUSS_AHB_POSTDIV_CLK_SRC] = &gcc_cpuss_ahb_postdiv_clk_src.clkr,
+ [GCC_DDRSS_GPU_AXI_CLK] = &gcc_ddrss_gpu_axi_clk.clkr,
+ [GCC_DDRSS_PCIE_SF_CLK] = &gcc_ddrss_pcie_sf_clk.clkr,
+ [GCC_DISP_GPLL0_CLK_SRC] = &gcc_disp_gpll0_clk_src.clkr,
+ [GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr,
+ [GCC_DISP_SF_AXI_CLK] = &gcc_disp_sf_axi_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr,
+ [GCC_GPLL0] = &gcc_gpll0.clkr,
+ [GCC_GPLL0_OUT_EVEN] = &gcc_gpll0_out_even.clkr,
+ [GCC_GPLL0_OUT_ODD] = &gcc_gpll0_out_odd.clkr,
+ [GCC_GPLL1] = &gcc_gpll1.clkr,
+ [GCC_GPLL10] = &gcc_gpll10.clkr,
+ [GCC_GPLL4] = &gcc_gpll4.clkr,
+ [GCC_GPLL9] = &gcc_gpll9.clkr,
+ [GCC_GPU_GPLL0_CLK_SRC] = &gcc_gpu_gpll0_clk_src.clkr,
+ [GCC_GPU_GPLL0_DIV_CLK_SRC] = &gcc_gpu_gpll0_div_clk_src.clkr,
+ [GCC_GPU_IREF_EN] = &gcc_gpu_iref_en.clkr,
+ [GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr,
+ [GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr,
+ [GCC_PCIE0_PHY_RCHNG_CLK] = &gcc_pcie0_phy_rchng_clk.clkr,
+ [GCC_PCIE1_PHY_RCHNG_CLK] = &gcc_pcie1_phy_rchng_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK] = &gcc_pcie_0_aux_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK_SRC] = &gcc_pcie_0_aux_clk_src.clkr,
+ [GCC_PCIE_0_CFG_AHB_CLK] = &gcc_pcie_0_cfg_ahb_clk.clkr,
+ [GCC_PCIE_0_MSTR_AXI_CLK] = &gcc_pcie_0_mstr_axi_clk.clkr,
+ [GCC_PCIE_0_PHY_RCHNG_CLK_SRC] = &gcc_pcie_0_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_0_PIPE_CLK] = &gcc_pcie_0_pipe_clk.clkr,
+ [GCC_PCIE_0_PIPE_CLK_SRC] = &gcc_pcie_0_pipe_clk_src.clkr,
+ [GCC_PCIE_0_SLV_AXI_CLK] = &gcc_pcie_0_slv_axi_clk.clkr,
+ [GCC_PCIE_0_SLV_Q2A_AXI_CLK] = &gcc_pcie_0_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_1_AUX_CLK] = &gcc_pcie_1_aux_clk.clkr,
+ [GCC_PCIE_1_AUX_CLK_SRC] = &gcc_pcie_1_aux_clk_src.clkr,
+ [GCC_PCIE_1_CFG_AHB_CLK] = &gcc_pcie_1_cfg_ahb_clk.clkr,
+ [GCC_PCIE_1_MSTR_AXI_CLK] = &gcc_pcie_1_mstr_axi_clk.clkr,
+ [GCC_PCIE_1_PHY_RCHNG_CLK_SRC] = &gcc_pcie_1_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_1_PIPE_CLK] = &gcc_pcie_1_pipe_clk.clkr,
+ [GCC_PCIE_1_PIPE_CLK_SRC] = &gcc_pcie_1_pipe_clk_src.clkr,
+ [GCC_PCIE_1_SLV_AXI_CLK] = &gcc_pcie_1_slv_axi_clk.clkr,
+ [GCC_PCIE_1_SLV_Q2A_AXI_CLK] = &gcc_pcie_1_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_THROTTLE_CORE_CLK] = &gcc_pcie_throttle_core_clk.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr,
+ [GCC_QMIP_CAMERA_NRT_AHB_CLK] = &gcc_qmip_camera_nrt_ahb_clk.clkr,
+ [GCC_QMIP_CAMERA_RT_AHB_CLK] = &gcc_qmip_camera_rt_ahb_clk.clkr,
+ [GCC_QMIP_DISP_AHB_CLK] = &gcc_qmip_disp_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_VCODEC_AHB_CLK] = &gcc_qmip_video_vcodec_ahb_clk.clkr,
+ [GCC_QSPI_CNOC_PERIPH_AHB_CLK] = &gcc_qspi_cnoc_periph_ahb_clk.clkr,
+ [GCC_QSPI_CORE_CLK] = &gcc_qspi_core_clk.clkr,
+ [GCC_QSPI_CORE_CLK_SRC] = &gcc_qspi_core_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_CORE_2X_CLK] = &gcc_qupv3_wrap0_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP0_CORE_CLK] = &gcc_qupv3_wrap0_core_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK] = &gcc_qupv3_wrap0_s0_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK_SRC] = &gcc_qupv3_wrap0_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK] = &gcc_qupv3_wrap0_s1_clk.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK_SRC] = &gcc_qupv3_wrap0_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK] = &gcc_qupv3_wrap0_s2_clk.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK_SRC] = &gcc_qupv3_wrap0_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK] = &gcc_qupv3_wrap0_s3_clk.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK_SRC] = &gcc_qupv3_wrap0_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK] = &gcc_qupv3_wrap0_s4_clk.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK_SRC] = &gcc_qupv3_wrap0_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK] = &gcc_qupv3_wrap0_s5_clk.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK_SRC] = &gcc_qupv3_wrap0_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S6_CLK] = &gcc_qupv3_wrap0_s6_clk.clkr,
+ [GCC_QUPV3_WRAP0_S6_CLK_SRC] = &gcc_qupv3_wrap0_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S7_CLK] = &gcc_qupv3_wrap0_s7_clk.clkr,
+ [GCC_QUPV3_WRAP0_S7_CLK_SRC] = &gcc_qupv3_wrap0_s7_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_CORE_2X_CLK] = &gcc_qupv3_wrap1_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP1_CORE_CLK] = &gcc_qupv3_wrap1_core_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK] = &gcc_qupv3_wrap1_s0_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK_SRC] = &gcc_qupv3_wrap1_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK] = &gcc_qupv3_wrap1_s1_clk.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK_SRC] = &gcc_qupv3_wrap1_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK] = &gcc_qupv3_wrap1_s2_clk.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK_SRC] = &gcc_qupv3_wrap1_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK] = &gcc_qupv3_wrap1_s3_clk.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK_SRC] = &gcc_qupv3_wrap1_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK] = &gcc_qupv3_wrap1_s4_clk.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK_SRC] = &gcc_qupv3_wrap1_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK] = &gcc_qupv3_wrap1_s5_clk.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK_SRC] = &gcc_qupv3_wrap1_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S6_CLK] = &gcc_qupv3_wrap1_s6_clk.clkr,
+ [GCC_QUPV3_WRAP1_S6_CLK_SRC] = &gcc_qupv3_wrap1_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S7_CLK] = &gcc_qupv3_wrap1_s7_clk.clkr,
+ [GCC_QUPV3_WRAP1_S7_CLK_SRC] = &gcc_qupv3_wrap1_s7_clk_src.clkr,
+ [GCC_QUPV3_WRAP_0_M_AHB_CLK] = &gcc_qupv3_wrap_0_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_0_S_AHB_CLK] = &gcc_qupv3_wrap_0_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_M_AHB_CLK] = &gcc_qupv3_wrap_1_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_S_AHB_CLK] = &gcc_qupv3_wrap_1_s_ahb_clk.clkr,
+ [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
+ [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+ [GCC_SDCC1_APPS_CLK_SRC] = &gcc_sdcc1_apps_clk_src.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK] = &gcc_sdcc1_ice_core_clk.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK_SRC] = &gcc_sdcc1_ice_core_clk_src.clkr,
+ [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_SDCC2_APPS_CLK_SRC] = &gcc_sdcc2_apps_clk_src.clkr,
+ [GCC_SDCC4_AHB_CLK] = &gcc_sdcc4_ahb_clk.clkr,
+ [GCC_SDCC4_APPS_CLK] = &gcc_sdcc4_apps_clk.clkr,
+ [GCC_SDCC4_APPS_CLK_SRC] = &gcc_sdcc4_apps_clk_src.clkr,
+ [GCC_SYS_NOC_CPUSS_AHB_CLK] = &gcc_sys_noc_cpuss_ahb_clk.clkr,
+ [GCC_THROTTLE_PCIE_AHB_CLK] = &gcc_throttle_pcie_ahb_clk.clkr,
+ [GCC_TITAN_NRT_THROTTLE_CORE_CLK] =
+ &gcc_titan_nrt_throttle_core_clk.clkr,
+ [GCC_TITAN_RT_THROTTLE_CORE_CLK] = &gcc_titan_rt_throttle_core_clk.clkr,
+ [GCC_UFS_1_CLKREF_EN] = &gcc_ufs_1_clkref_en.clkr,
+ [GCC_UFS_PHY_AHB_CLK] = &gcc_ufs_phy_ahb_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK] = &gcc_ufs_phy_axi_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK_SRC] = &gcc_ufs_phy_axi_clk_src.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK] = &gcc_ufs_phy_ice_core_clk.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK_SRC] = &gcc_ufs_phy_ice_core_clk_src.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK] = &gcc_ufs_phy_phy_aux_clk.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK_SRC] = &gcc_ufs_phy_phy_aux_clk_src.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK] = &gcc_ufs_phy_rx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC] =
+ &gcc_ufs_phy_rx_symbol_0_clk_src.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_1_CLK] = &gcc_ufs_phy_rx_symbol_1_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC] =
+ &gcc_ufs_phy_rx_symbol_1_clk_src.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK] = &gcc_ufs_phy_tx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC] =
+ &gcc_ufs_phy_tx_symbol_0_clk_src.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK] = &gcc_ufs_phy_unipro_core_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC] =
+ &gcc_ufs_phy_unipro_core_clk_src.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK] = &gcc_usb30_prim_master_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK_SRC] = &gcc_usb30_prim_master_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK] = &gcc_usb30_prim_mock_utmi_clk.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC] =
+ &gcc_usb30_prim_mock_utmi_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC] =
+ &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr,
+ [GCC_USB30_PRIM_SLEEP_CLK] = &gcc_usb30_prim_sleep_clk.clkr,
+ [GCC_USB30_SEC_MASTER_CLK] = &gcc_usb30_sec_master_clk.clkr,
+ [GCC_USB30_SEC_MASTER_CLK_SRC] = &gcc_usb30_sec_master_clk_src.clkr,
+ [GCC_USB30_SEC_MOCK_UTMI_CLK] = &gcc_usb30_sec_mock_utmi_clk.clkr,
+ [GCC_USB30_SEC_MOCK_UTMI_CLK_SRC] =
+ &gcc_usb30_sec_mock_utmi_clk_src.clkr,
+ [GCC_USB30_SEC_MOCK_UTMI_POSTDIV_CLK_SRC] =
+ &gcc_usb30_sec_mock_utmi_postdiv_clk_src.clkr,
+ [GCC_USB30_SEC_SLEEP_CLK] = &gcc_usb30_sec_sleep_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK] = &gcc_usb3_prim_phy_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK_SRC] = &gcc_usb3_prim_phy_aux_clk_src.clkr,
+ [GCC_USB3_PRIM_PHY_COM_AUX_CLK] = &gcc_usb3_prim_phy_com_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK] = &gcc_usb3_prim_phy_pipe_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK_SRC] = &gcc_usb3_prim_phy_pipe_clk_src.clkr,
+ [GCC_USB3_SEC_PHY_AUX_CLK] = &gcc_usb3_sec_phy_aux_clk.clkr,
+ [GCC_USB3_SEC_PHY_AUX_CLK_SRC] = &gcc_usb3_sec_phy_aux_clk_src.clkr,
+ [GCC_USB3_SEC_PHY_COM_AUX_CLK] = &gcc_usb3_sec_phy_com_aux_clk.clkr,
+ [GCC_USB3_SEC_PHY_PIPE_CLK] = &gcc_usb3_sec_phy_pipe_clk.clkr,
+ [GCC_USB3_SEC_PHY_PIPE_CLK_SRC] = &gcc_usb3_sec_phy_pipe_clk_src.clkr,
+ [GCC_VIDEO_AXI0_CLK] = &gcc_video_axi0_clk.clkr,
+ [GCC_VIDEO_MVP_THROTTLE_CORE_CLK] =
+ &gcc_video_mvp_throttle_core_clk.clkr,
+ [GCC_CFG_NOC_LPASS_CLK] = &gcc_cfg_noc_lpass_clk.clkr,
+ [GCC_MSS_GPLL0_MAIN_DIV_CLK_SRC] = &gcc_mss_gpll0_main_div_clk_src.clkr,
+ [GCC_MSS_CFG_AHB_CLK] = &gcc_mss_cfg_ahb_clk.clkr,
+ [GCC_MSS_OFFLINE_AXI_CLK] = &gcc_mss_offline_axi_clk.clkr,
+ [GCC_MSS_SNOC_AXI_CLK] = &gcc_mss_snoc_axi_clk.clkr,
+ [GCC_MSS_Q6_MEMNOC_AXI_CLK] = &gcc_mss_q6_memnoc_axi_clk.clkr,
+ [GCC_MSS_Q6SS_BOOT_CLK_SRC] = &gcc_mss_q6ss_boot_clk_src.clkr,
+ [GCC_AGGRE_NOC_PCIE_TBU_CLK] = &gcc_aggre_noc_pcie_tbu_clk.clkr,
+ [GCC_AGGRE_NOC_PCIE_CENTER_SF_AXI_CLK] =
+ &gcc_aggre_noc_pcie_center_sf_axi_clk.clkr,
+ [GCC_PCIE_CLKREF_EN] = &gcc_pcie_clkref_en.clkr,
+ [GCC_EDP_CLKREF_EN] = &gcc_edp_clkref_en.clkr,
+ [GCC_SEC_CTRL_CLK_SRC] = &gcc_sec_ctrl_clk_src.clkr,
+ [GCC_WPSS_AHB_CLK] = &gcc_wpss_ahb_clk.clkr,
+ [GCC_WPSS_AHB_BDG_MST_CLK] = &gcc_wpss_ahb_bdg_mst_clk.clkr,
+ [GCC_WPSS_RSCP_CLK] = &gcc_wpss_rscp_clk.clkr,
+};
+
+static struct gdsc *gcc_sc7280_gdscs[] = {
+ [GCC_PCIE_0_GDSC] = &gcc_pcie_0_gdsc,
+ [GCC_PCIE_1_GDSC] = &gcc_pcie_1_gdsc,
+ [GCC_UFS_PHY_GDSC] = &gcc_ufs_phy_gdsc,
+ [GCC_USB30_PRIM_GDSC] = &gcc_usb30_prim_gdsc,
+ [GCC_USB30_SEC_GDSC] = &gcc_usb30_sec_gdsc,
+ [HLOS1_VOTE_MMNOC_MMU_TBU_HF0_GDSC] = &hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc,
+ [HLOS1_VOTE_MMNOC_MMU_TBU_HF1_GDSC] = &hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc,
+ [HLOS1_VOTE_MMNOC_MMU_TBU_SF0_GDSC] = &hlos1_vote_mmnoc_mmu_tbu_sf0_gdsc,
+ [HLOS1_VOTE_TURING_MMU_TBU0_GDSC] = &hlos1_vote_turing_mmu_tbu0_gdsc,
+ [HLOS1_VOTE_TURING_MMU_TBU1_GDSC] = &hlos1_vote_turing_mmu_tbu1_gdsc,
+};
+
+static const struct qcom_reset_map gcc_sc7280_resets[] = {
+ [GCC_PCIE_0_BCR] = { 0x6b000 },
+ [GCC_PCIE_0_PHY_BCR] = { 0x6c01c },
+ [GCC_PCIE_1_BCR] = { 0x8d000 },
+ [GCC_PCIE_1_PHY_BCR] = { 0x8e01c },
+ [GCC_QUSB2PHY_PRIM_BCR] = { 0x12000 },
+ [GCC_QUSB2PHY_SEC_BCR] = { 0x12004 },
+ [GCC_SDCC1_BCR] = { 0x75000 },
+ [GCC_SDCC2_BCR] = { 0x14000 },
+ [GCC_SDCC4_BCR] = { 0x16000 },
+ [GCC_UFS_PHY_BCR] = { 0x77000 },
+ [GCC_USB30_PRIM_BCR] = { 0xf000 },
+ [GCC_USB30_SEC_BCR] = { 0x9e000 },
+ [GCC_USB3_DP_PHY_PRIM_BCR] = { 0x50008 },
+ [GCC_USB3_PHY_PRIM_BCR] = { 0x50000 },
+ [GCC_USB3PHY_PHY_PRIM_BCR] = { 0x50004 },
+ [GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0x6a000 },
+};
+
+static const struct clk_rcg_dfs_data gcc_dfs_clocks[] = {
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s6_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s7_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s6_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s7_clk_src),
+};
+
+static const struct regmap_config gcc_sc7280_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x9f128,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_sc7280_desc = {
+ .config = &gcc_sc7280_regmap_config,
+ .clks = gcc_sc7280_clocks,
+ .num_clks = ARRAY_SIZE(gcc_sc7280_clocks),
+ .resets = gcc_sc7280_resets,
+ .num_resets = ARRAY_SIZE(gcc_sc7280_resets),
+ .gdscs = gcc_sc7280_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_sc7280_gdscs),
+};
+
+static const struct of_device_id gcc_sc7280_match_table[] = {
+ { .compatible = "qcom,gcc-sc7280" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_sc7280_match_table);
+
+static int gcc_sc7280_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ regmap = qcom_cc_map(pdev, &gcc_sc7280_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ /*
+ * Keep the clocks always-ON
+ * GCC_CAMERA_AHB_CLK/XO_CLK, GCC_DISP_AHB_CLK/XO_CLK
+ * GCC_VIDEO_AHB_CLK/XO_CLK, GCC_GPU_CFG_AHB_CLK
+ */
+ regmap_update_bits(regmap, 0x26004, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x26028, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x27004, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x2701C, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x28004, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x28014, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x71004, BIT(0), BIT(0));
+
+ ret = qcom_cc_register_rcg_dfs(regmap, gcc_dfs_clocks,
+ ARRAY_SIZE(gcc_dfs_clocks));
+ if (ret)
+ return ret;
+
+ return qcom_cc_really_probe(pdev, &gcc_sc7280_desc, regmap);
+}
+
+static struct platform_driver gcc_sc7280_driver = {
+ .probe = gcc_sc7280_probe,
+ .driver = {
+ .name = "gcc-sc7280",
+ .of_match_table = gcc_sc7280_match_table,
+ },
+};
+
+static int __init gcc_sc7280_init(void)
+{
+ return platform_driver_register(&gcc_sc7280_driver);
+}
+subsys_initcall(gcc_sc7280_init);
+
+static void __exit gcc_sc7280_exit(void)
+{
+ platform_driver_unregister(&gcc_sc7280_driver);
+}
+module_exit(gcc_sc7280_exit);
+
+MODULE_DESCRIPTION("QTI GCC SC7280 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/gcc-sc8180x.c b/drivers/clk/qcom/gcc-sc8180x.c
new file mode 100644
index 000000000000..90525ae1bb3a
--- /dev/null
+++ b/drivers/clk/qcom/gcc-sc8180x.c
@@ -0,0 +1,4629 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2020-2021, Linaro Ltd.
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/clock/qcom,gcc-sc8180x.h>
+
+#include "common.h"
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ P_AUD_REF_CLK,
+ P_BI_TCXO,
+ P_GPLL0_OUT_EVEN,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL1_OUT_MAIN,
+ P_GPLL2_OUT_MAIN,
+ P_GPLL4_OUT_MAIN,
+ P_GPLL5_OUT_MAIN,
+ P_GPLL7_OUT_MAIN,
+ P_GPLL9_OUT_MAIN,
+ P_SLEEP_CLK,
+};
+
+static struct pll_vco trion_vco[] = {
+ { 249600000, 2000000000, 0 },
+};
+
+static struct clk_alpha_pll gpll0 = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION],
+ .vco_table = trion_vco,
+ .num_vco = ARRAY_SIZE(trion_vco),
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll0",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_trion_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_trion_even[] = {
+ { 0x0, 1 },
+ { 0x1, 2 },
+ { 0x3, 4 },
+ { 0x7, 8 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_trion_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_trion_even),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION],
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll0_out_even",
+ .parent_hws = (const struct clk_hw *[]){ &gpll0.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_trion_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll1 = {
+ .offset = 0x1000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION],
+ .vco_table = trion_vco,
+ .num_vco = ARRAY_SIZE(trion_vco),
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll1",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_trion_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gpll4 = {
+ .offset = 0x76000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION],
+ .vco_table = trion_vco,
+ .num_vco = ARRAY_SIZE(trion_vco),
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll4",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_trion_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gpll7 = {
+ .offset = 0x1a000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION],
+ .vco_table = trion_vco,
+ .num_vco = ARRAY_SIZE(trion_vco),
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll7",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_trion_ops,
+ },
+ },
+};
+
+static const struct parent_map gcc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parents_0[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_SLEEP_CLK, 5 },
+ { P_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parents_1[] = {
+ { .fw_name = "bi_tcxo", },
+ { .hw = &gpll0.clkr.hw },
+ { .fw_name = "sleep_clk", },
+ { .hw = &gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_SLEEP_CLK, 5 },
+};
+
+static const struct clk_parent_data gcc_parents_2[] = {
+ { .fw_name = "bi_tcxo", },
+ { .fw_name = "sleep_clk", },
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL2_OUT_MAIN, 2 },
+ { P_GPLL5_OUT_MAIN, 3 },
+ { P_GPLL1_OUT_MAIN, 4 },
+ { P_GPLL4_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parents_3[] = {
+ { .fw_name = "bi_tcxo", },
+ { .hw = &gpll0.clkr.hw },
+ { .name = "gpll2" },
+ { .name = "gpll5" },
+ { .hw = &gpll1.clkr.hw },
+ { .hw = &gpll4.clkr.hw },
+ { .hw = &gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data gcc_parents_4[] = {
+ { .fw_name = "bi_tcxo", },
+};
+
+static const struct parent_map gcc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+};
+
+static const struct clk_parent_data gcc_parents_5[] = {
+ { .fw_name = "bi_tcxo", },
+ { .hw = &gpll0.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL7_OUT_MAIN, 3 },
+ { P_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parents_6[] = {
+ { .fw_name = "bi_tcxo", },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll7.clkr.hw },
+ { .hw = &gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_7[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL9_OUT_MAIN, 2 },
+ { P_GPLL4_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parents_7[] = {
+ { .fw_name = "bi_tcxo", },
+ { .hw = &gpll0.clkr.hw },
+ { .name = "gppl9" },
+ { .hw = &gpll4.clkr.hw },
+ { .hw = &gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_8[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_AUD_REF_CLK, 2 },
+ { P_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parents_8[] = {
+ { .fw_name = "bi_tcxo", },
+ { .hw = &gpll0.clkr.hw },
+ { .name = "aud_ref_clk" },
+ { .hw = &gpll0_out_even.clkr.hw },
+};
+
+static const struct freq_tbl ftbl_gcc_cpuss_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_cpuss_ahb_clk_src = {
+ .cmd_rcgr = 0x48014,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_cpuss_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_ahb_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_emac_ptp_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(125000000, P_GPLL7_OUT_MAIN, 4, 0, 0),
+ F(250000000, P_GPLL7_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_emac_ptp_clk_src = {
+ .cmd_rcgr = 0x6038,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_gcc_emac_ptp_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_emac_ptp_clk_src",
+ .parent_data = gcc_parents_6,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_emac_rgmii_clk_src[] = {
+ F(2500000, P_BI_TCXO, 1, 25, 192),
+ F(5000000, P_BI_TCXO, 1, 25, 96),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(125000000, P_GPLL7_OUT_MAIN, 4, 0, 0),
+ F(250000000, P_GPLL7_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_emac_rgmii_clk_src = {
+ .cmd_rcgr = 0x601c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_gcc_emac_rgmii_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_emac_rgmii_clk_src",
+ .parent_data = gcc_parents_6,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_gp1_clk_src = {
+ .cmd_rcgr = 0x64004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk_src",
+ .parent_data = gcc_parents_1,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp2_clk_src = {
+ .cmd_rcgr = 0x65004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk_src",
+ .parent_data = gcc_parents_1,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp3_clk_src = {
+ .cmd_rcgr = 0x66004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk_src",
+ .parent_data = gcc_parents_1,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp4_clk_src = {
+ .cmd_rcgr = 0xbe004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp4_clk_src",
+ .parent_data = gcc_parents_1,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp5_clk_src = {
+ .cmd_rcgr = 0xbf004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp5_clk_src",
+ .parent_data = gcc_parents_1,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_npu_axi_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(60000000, P_GPLL0_OUT_EVEN, 5, 0, 0),
+ F(150000000, P_GPLL0_OUT_EVEN, 2, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ F(403000000, P_GPLL4_OUT_MAIN, 2, 0, 0),
+ F(533000000, P_GPLL1_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_npu_axi_clk_src = {
+ .cmd_rcgr = 0x4d014,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_npu_axi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_npu_axi_clk_src",
+ .parent_data = gcc_parents_3,
+ .num_parents = 7,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_0_aux_clk_src[] = {
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_0_aux_clk_src = {
+ .cmd_rcgr = 0x6b02c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_aux_clk_src",
+ .parent_data = gcc_parents_2,
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_1_aux_clk_src = {
+ .cmd_rcgr = 0x8d02c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_aux_clk_src",
+ .parent_data = gcc_parents_2,
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_2_aux_clk_src = {
+ .cmd_rcgr = 0x9d02c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_2_aux_clk_src",
+ .parent_data = gcc_parents_2,
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_3_aux_clk_src = {
+ .cmd_rcgr = 0xa302c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_3_aux_clk_src",
+ .parent_data = gcc_parents_2,
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_phy_refgen_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_phy_refgen_clk_src = {
+ .cmd_rcgr = 0x6f014,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_phy_refgen_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_phy_refgen_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = {
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pdm2_clk_src = {
+ .cmd_rcgr = 0x33010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pdm2_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qspi_1_core_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_qspi_1_core_clk_src = {
+ .cmd_rcgr = 0x4a00c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qspi_1_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qspi_1_core_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qspi_core_clk_src = {
+ .cmd_rcgr = 0x4b008,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qspi_1_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qspi_core_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = {
+ F(7372800, P_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(64000000, P_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(102400000, P_GPLL0_OUT_EVEN, 1, 128, 375),
+ F(112000000, P_GPLL0_OUT_EVEN, 1, 28, 75),
+ F(117964800, P_GPLL0_OUT_EVEN, 1, 6144, 15625),
+ F(120000000, P_GPLL0_OUT_EVEN, 2.5, 0, 0),
+ F(128000000, P_GPLL0_OUT_MAIN, 1, 16, 75),
+ { }
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
+ .cmd_rcgr = 0x17148,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s0_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
+ .cmd_rcgr = 0x17278,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s1_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
+ .cmd_rcgr = 0x173a8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s2_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
+ .cmd_rcgr = 0x174d8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s3_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
+ .cmd_rcgr = 0x17608,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s4_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
+ .cmd_rcgr = 0x17738,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s5_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s6_clk_src = {
+ .cmd_rcgr = 0x17868,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s6_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s7_clk_src = {
+ .cmd_rcgr = 0x17998,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s7_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
+ .cmd_rcgr = 0x18148,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s0_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
+ .cmd_rcgr = 0x18278,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s1_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = {
+ .cmd_rcgr = 0x183a8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s2_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
+ .cmd_rcgr = 0x184d8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s3_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
+ .cmd_rcgr = 0x18608,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s4_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
+ .cmd_rcgr = 0x18738,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s5_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s0_clk_src = {
+ .cmd_rcgr = 0x1e148,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s0_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s1_clk_src = {
+ .cmd_rcgr = 0x1e278,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s1_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s2_clk_src = {
+ .cmd_rcgr = 0x1e3a8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s2_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s3_clk_src = {
+ .cmd_rcgr = 0x1e4d8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s3_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s4_clk_src = {
+ .cmd_rcgr = 0x1e608,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s4_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s5_clk_src = {
+ .cmd_rcgr = 0x1e738,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s5_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_MAIN, 12, 1, 2),
+ F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
+ .cmd_rcgr = 0x1400c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_7,
+ .freq_tbl = ftbl_gcc_sdcc2_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_apps_clk_src",
+ .parent_data = gcc_parents_7,
+ .num_parents = 5,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc4_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(37500000, P_GPLL0_OUT_MAIN, 16, 0, 0),
+ F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0),
+ F(75000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc4_apps_clk_src = {
+ .cmd_rcgr = 0x1600c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_sdcc4_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc4_apps_clk_src",
+ .parent_data = gcc_parents_5,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_tsif_ref_clk_src[] = {
+ F(105495, P_BI_TCXO, 2, 1, 91),
+ { }
+};
+
+static struct clk_rcg2 gcc_tsif_ref_clk_src = {
+ .cmd_rcgr = 0x36010,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_gcc_tsif_ref_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_tsif_ref_clk_src",
+ .parent_data = gcc_parents_8,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_card_2_axi_clk_src[] = {
+ F(37500000, P_GPLL0_OUT_EVEN, 8, 0, 0),
+ F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_card_2_axi_clk_src = {
+ .cmd_rcgr = 0xa2020,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_2_axi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_2_axi_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_card_2_ice_core_clk_src = {
+ .cmd_rcgr = 0xa2060,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_2_axi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_2_ice_core_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_card_2_phy_aux_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_card_2_phy_aux_clk_src = {
+ .cmd_rcgr = 0xa2094,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_ufs_card_2_phy_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_2_phy_aux_clk_src",
+ .parent_data = gcc_parents_4,
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_card_2_unipro_core_clk_src = {
+ .cmd_rcgr = 0xa2078,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_2_axi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_2_unipro_core_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_card_axi_clk_src[] = {
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_card_axi_clk_src = {
+ .cmd_rcgr = 0x75020,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_axi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_axi_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_card_ice_core_clk_src[] = {
+ F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_card_ice_core_clk_src = {
+ .cmd_rcgr = 0x75060,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_ice_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_ice_core_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_card_phy_aux_clk_src = {
+ .cmd_rcgr = 0x75094,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_ufs_card_2_phy_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_phy_aux_clk_src",
+ .parent_data = gcc_parents_4,
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_card_unipro_core_clk_src[] = {
+ F(37500000, P_GPLL0_OUT_EVEN, 8, 0, 0),
+ F(75000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_card_unipro_core_clk_src = {
+ .cmd_rcgr = 0x75078,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_unipro_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_unipro_core_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_axi_clk_src[] = {
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(37500000, P_GPLL0_OUT_EVEN, 8, 0, 0),
+ F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_axi_clk_src = {
+ .cmd_rcgr = 0x77020,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_phy_axi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_axi_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_ice_core_clk_src = {
+ .cmd_rcgr = 0x77060,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_2_axi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ice_core_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_phy_aux_clk_src = {
+ .cmd_rcgr = 0x77094,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_phy_aux_clk_src",
+ .parent_data = gcc_parents_4,
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_unipro_core_clk_src = {
+ .cmd_rcgr = 0x77078,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_2_axi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_unipro_core_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_mp_master_clk_src[] = {
+ F(33333333, P_GPLL0_OUT_EVEN, 9, 0, 0),
+ F(66666667, P_GPLL0_OUT_EVEN, 4.5, 0, 0),
+ F(133333333, P_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_mp_master_clk_src = {
+ .cmd_rcgr = 0xa601c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_mp_master_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_mp_master_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_mp_mock_utmi_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(20000000, P_GPLL0_OUT_EVEN, 15, 0, 0),
+ F(40000000, P_GPLL0_OUT_EVEN, 7.5, 0, 0),
+ F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_mp_mock_utmi_clk_src = {
+ .cmd_rcgr = 0xa6034,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_mp_mock_utmi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_mp_mock_utmi_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_prim_master_clk_src = {
+ .cmd_rcgr = 0xf01c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_mp_master_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_master_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = {
+ .cmd_rcgr = 0xf034,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_mp_mock_utmi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_mock_utmi_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_sec_master_clk_src = {
+ .cmd_rcgr = 0x1001c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_mp_master_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sec_master_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_sec_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x10034,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_mp_mock_utmi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sec_mock_utmi_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_mp_phy_aux_clk_src = {
+ .cmd_rcgr = 0xa6068,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_ufs_card_2_phy_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_mp_phy_aux_clk_src",
+ .parent_data = gcc_parents_2,
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = {
+ .cmd_rcgr = 0xf060,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_ufs_card_2_phy_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_aux_clk_src",
+ .parent_data = gcc_parents_2,
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_sec_phy_aux_clk_src = {
+ .cmd_rcgr = 0x10060,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_ufs_card_2_phy_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_phy_aux_clk_src",
+ .parent_data = gcc_parents_2,
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_aggre_noc_pcie_tbu_clk = {
+ .halt_reg = 0x90018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x90018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_noc_pcie_tbu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_card_axi_clk = {
+ .halt_reg = 0x750c0,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x750c0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x750c0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_ufs_card_axi_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_ufs_card_axi_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_card_axi_hw_ctl_clk = {
+ .halt_reg = 0x750c0,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x750c0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x750c0,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_ufs_card_axi_hw_ctl_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_aggre_ufs_card_axi_clk.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch_simple_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_phy_axi_clk = {
+ .halt_reg = 0x770c0,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x770c0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x770c0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_ufs_phy_axi_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_ufs_phy_axi_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_phy_axi_hw_ctl_clk = {
+ .halt_reg = 0x770c0,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x770c0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x770c0,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_ufs_phy_axi_hw_ctl_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_aggre_ufs_phy_axi_clk.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch_simple_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_mp_axi_clk = {
+ .halt_reg = 0xa6084,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa6084,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_usb3_mp_axi_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_usb30_mp_master_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_prim_axi_clk = {
+ .halt_reg = 0xf07c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf07c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_usb3_prim_axi_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_usb30_prim_master_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_sec_axi_clk = {
+ .halt_reg = 0x1007c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1007c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_usb3_sec_axi_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_usb30_sec_master_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x38004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x38004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_boot_rom_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_hf_axi_clk = {
+ .halt_reg = 0xb030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camera_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_sf_axi_clk = {
+ .halt_reg = 0xb034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb034,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camera_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_mp_axi_clk = {
+ .halt_reg = 0xa609c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa609c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cfg_noc_usb3_mp_axi_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_usb30_mp_master_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_prim_axi_clk = {
+ .halt_reg = 0xf078,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf078,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cfg_noc_usb3_prim_axi_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_usb30_prim_master_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_sec_axi_clk = {
+ .halt_reg = 0x10078,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10078,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cfg_noc_usb3_sec_axi_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_usb30_sec_master_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* For CPUSS functionality the AHB clock needs to be left enabled */
+static struct clk_branch gcc_cpuss_ahb_clk = {
+ .halt_reg = 0x48000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(21),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_cpuss_ahb_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cpuss_rbcpr_clk = {
+ .halt_reg = 0x48008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x48008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_rbcpr_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ddrss_gpu_axi_clk = {
+ .halt_reg = 0x71154,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x71154,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ddrss_gpu_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_hf_axi_clk = {
+ .halt_reg = 0xb038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_sf_axi_clk = {
+ .halt_reg = 0xb03c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb03c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac_axi_clk = {
+ .halt_reg = 0x6010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_emac_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac_ptp_clk = {
+ .halt_reg = 0x6034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6034,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_emac_ptp_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_emac_ptp_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac_rgmii_clk = {
+ .halt_reg = 0x6018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_emac_rgmii_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_emac_rgmii_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac_slv_ahb_clk = {
+ .halt_reg = 0x6014,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x6014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x6014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_emac_slv_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x64000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x64000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_gp1_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x65000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x65000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_gp2_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x66000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x66000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_gp3_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp4_clk = {
+ .halt_reg = 0xbe000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xbe000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp4_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_gp4_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp5_clk = {
+ .halt_reg = 0xbf000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xbf000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp5_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_gp5_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_gpll0_clk_src",
+ .parent_hws = (const struct clk_hw *[]){ &gpll0.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(16),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_gpll0_div_clk_src",
+ .parent_hws = (const struct clk_hw *[]){
+ &gpll0_out_even.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_memnoc_gfx_clk = {
+ .halt_reg = 0x7100c,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x7100c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_memnoc_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_snoc_dvm_gfx_clk = {
+ .halt_reg = 0x71018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x71018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_snoc_dvm_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_npu_at_clk = {
+ .halt_reg = 0x4d010,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x4d010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_npu_at_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_npu_axi_clk = {
+ .halt_reg = 0x4d008,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x4d008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_npu_axi_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_npu_axi_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_npu_gpll0_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(18),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_npu_gpll0_clk_src",
+ .parent_hws = (const struct clk_hw *[]){ &gpll0.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_npu_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(19),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_npu_gpll0_div_clk_src",
+ .parent_hws = (const struct clk_hw *[]){
+ &gpll0_out_even.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_npu_trig_clk = {
+ .halt_reg = 0x4d00c,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x4d00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_npu_trig_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie0_phy_refgen_clk = {
+ .halt_reg = 0x6f02c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6f02c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie0_phy_refgen_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_pcie_phy_refgen_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie1_phy_refgen_clk = {
+ .halt_reg = 0x6f030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6f030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie1_phy_refgen_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_pcie_phy_refgen_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie2_phy_refgen_clk = {
+ .halt_reg = 0x6f034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6f034,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie2_phy_refgen_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_pcie_phy_refgen_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie3_phy_refgen_clk = {
+ .halt_reg = 0x6f038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6f038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie3_phy_refgen_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_pcie_phy_refgen_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_aux_clk = {
+ .halt_reg = 0x6b020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_aux_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_pcie_0_aux_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_cfg_ahb_clk = {
+ .halt_reg = 0x6b01c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b01c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_clkref_clk = {
+ .halt_reg = 0x8c00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_mstr_axi_clk = {
+ .halt_reg = 0x6b018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_pipe_clk = {
+ .halt_reg = 0x6b024,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_axi_clk = {
+ .halt_reg = 0x6b014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_q2a_axi_clk = {
+ .halt_reg = 0x6b010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_aux_clk = {
+ .halt_reg = 0x8d020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(29),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_aux_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_pcie_1_aux_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_cfg_ahb_clk = {
+ .halt_reg = 0x8d01c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x8d01c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(28),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_clkref_clk = {
+ .halt_reg = 0x8c02c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c02c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_mstr_axi_clk = {
+ .halt_reg = 0x8d018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(27),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_pipe_clk = {
+ .halt_reg = 0x8d024,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(30),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_slv_axi_clk = {
+ .halt_reg = 0x8d014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x8d014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(26),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_slv_q2a_axi_clk = {
+ .halt_reg = 0x8d010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(25),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_aux_clk = {
+ .halt_reg = 0x9d020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52014,
+ .enable_mask = BIT(14),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_2_aux_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_pcie_2_aux_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_cfg_ahb_clk = {
+ .halt_reg = 0x9d01c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x9d01c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52014,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_2_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_clkref_clk = {
+ .halt_reg = 0x8c014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_2_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_mstr_axi_clk = {
+ .halt_reg = 0x9d018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52014,
+ .enable_mask = BIT(12),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_2_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_pipe_clk = {
+ .halt_reg = 0x9d024,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x52014,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_2_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_slv_axi_clk = {
+ .halt_reg = 0x9d014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x9d014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52014,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_2_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_slv_q2a_axi_clk = {
+ .halt_reg = 0x9d010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52014,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_2_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3_aux_clk = {
+ .halt_reg = 0xa3020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52014,
+ .enable_mask = BIT(20),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_3_aux_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_pcie_3_aux_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3_cfg_ahb_clk = {
+ .halt_reg = 0xa301c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xa301c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52014,
+ .enable_mask = BIT(19),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_3_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3_clkref_clk = {
+ .halt_reg = 0x8c018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_3_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3_mstr_axi_clk = {
+ .halt_reg = 0xa3018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52014,
+ .enable_mask = BIT(18),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_3_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3_pipe_clk = {
+ .halt_reg = 0xa3024,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x52014,
+ .enable_mask = BIT(21),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_3_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3_slv_axi_clk = {
+ .halt_reg = 0xa3014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xa3014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52014,
+ .enable_mask = BIT(17),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_3_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3_slv_q2a_axi_clk = {
+ .halt_reg = 0xa3010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52014,
+ .enable_mask = BIT(16),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_3_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_phy_aux_clk = {
+ .halt_reg = 0x6f004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6f004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_phy_aux_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_pcie_0_aux_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x3300c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_pdm2_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x33004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x33004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x33004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_xo4_clk = {
+ .halt_reg = 0x33008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x33008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_xo4_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_prng_ahb_clk = {
+ .halt_reg = 0x34004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_prng_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_nrt_ahb_clk = {
+ .halt_reg = 0xb018,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0xb018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_camera_nrt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_rt_ahb_clk = {
+ .halt_reg = 0xb01c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0xb01c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_camera_rt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_disp_ahb_clk = {
+ .halt_reg = 0xb020,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0xb020,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_disp_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_cvp_ahb_clk = {
+ .halt_reg = 0xb010,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0xb010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_video_cvp_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_vcodec_ahb_clk = {
+ .halt_reg = 0xb014,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0xb014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_video_vcodec_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qspi_1_cnoc_periph_ahb_clk = {
+ .halt_reg = 0x4a004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qspi_1_cnoc_periph_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qspi_1_core_clk = {
+ .halt_reg = 0x4a008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4a008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qspi_1_core_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_qspi_1_core_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qspi_cnoc_periph_ahb_clk = {
+ .halt_reg = 0x4b000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4b000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qspi_cnoc_periph_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qspi_core_clk = {
+ .halt_reg = 0x4b004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4b004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qspi_core_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_qspi_core_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s0_clk = {
+ .halt_reg = 0x17144,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s0_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_qupv3_wrap0_s0_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s1_clk = {
+ .halt_reg = 0x17274,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s1_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_qupv3_wrap0_s1_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s2_clk = {
+ .halt_reg = 0x173a4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(12),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s2_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_qupv3_wrap0_s2_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s3_clk = {
+ .halt_reg = 0x174d4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s3_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_qupv3_wrap0_s3_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s4_clk = {
+ .halt_reg = 0x17604,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(14),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s4_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_qupv3_wrap0_s4_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s5_clk = {
+ .halt_reg = 0x17734,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s5_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_qupv3_wrap0_s5_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s6_clk = {
+ .halt_reg = 0x17864,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(16),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s6_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_qupv3_wrap0_s6_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s7_clk = {
+ .halt_reg = 0x17994,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(17),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s7_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_qupv3_wrap0_s7_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s0_clk = {
+ .halt_reg = 0x18144,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(22),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s0_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_qupv3_wrap1_s0_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s1_clk = {
+ .halt_reg = 0x18274,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(23),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s1_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_qupv3_wrap1_s1_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s2_clk = {
+ .halt_reg = 0x183a4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(24),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s2_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_qupv3_wrap1_s2_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s3_clk = {
+ .halt_reg = 0x184d4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(25),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s3_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_qupv3_wrap1_s3_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s4_clk = {
+ .halt_reg = 0x18604,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(26),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s4_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_qupv3_wrap1_s4_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s5_clk = {
+ .halt_reg = 0x18734,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(27),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s5_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_qupv3_wrap1_s5_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s0_clk = {
+ .halt_reg = 0x1e144,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52014,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s0_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_qupv3_wrap2_s0_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s1_clk = {
+ .halt_reg = 0x1e274,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52014,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s1_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_qupv3_wrap2_s1_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s2_clk = {
+ .halt_reg = 0x1e3a4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52014,
+ .enable_mask = BIT(6),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s2_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_qupv3_wrap2_s2_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s3_clk = {
+ .halt_reg = 0x1e4d4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52014,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s3_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_qupv3_wrap2_s3_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s4_clk = {
+ .halt_reg = 0x1e604,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52014,
+ .enable_mask = BIT(8),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s4_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_qupv3_wrap2_s4_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s5_clk = {
+ .halt_reg = 0x1e734,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52014,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s5_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_qupv3_wrap2_s5_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_m_ahb_clk = {
+ .halt_reg = 0x17004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(6),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_0_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_s_ahb_clk = {
+ .halt_reg = 0x17008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_0_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_m_ahb_clk = {
+ .halt_reg = 0x18004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(20),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_1_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_s_ahb_clk = {
+ .halt_reg = 0x18008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x18008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(21),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_1_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_2_m_ahb_clk = {
+ .halt_reg = 0x1e004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52014,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_2_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_2_s_ahb_clk = {
+ .halt_reg = 0x1e008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1e008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52014,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_2_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_ahb_clk = {
+ .halt_reg = 0x14008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_apps_clk = {
+ .halt_reg = 0x14004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_apps_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_sdcc2_apps_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc4_ahb_clk = {
+ .halt_reg = 0x16008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x16008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc4_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc4_apps_clk = {
+ .halt_reg = 0x16004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x16004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc4_apps_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_sdcc4_apps_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* For CPUSS functionality the SYS NOC clock needs to be left enabled */
+static struct clk_branch gcc_sys_noc_cpuss_ahb_clk = {
+ .halt_reg = 0x4819c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sys_noc_cpuss_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_cpuss_ahb_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_tsif_ahb_clk = {
+ .halt_reg = 0x36004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x36004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_tsif_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_tsif_inactivity_timers_clk = {
+ .halt_reg = 0x3600c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3600c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_tsif_inactivity_timers_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_tsif_ref_clk = {
+ .halt_reg = 0x36008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x36008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_tsif_ref_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_tsif_ref_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_2_ahb_clk = {
+ .halt_reg = 0xa2014,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0xa2014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xa2014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_2_axi_clk = {
+ .halt_reg = 0xa2010,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0xa2010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xa2010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_2_axi_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_ufs_card_2_axi_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_2_ice_core_clk = {
+ .halt_reg = 0xa205c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0xa205c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xa205c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_2_ice_core_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_ufs_card_2_ice_core_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_2_phy_aux_clk = {
+ .halt_reg = 0xa2090,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0xa2090,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xa2090,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_2_phy_aux_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_ufs_card_2_phy_aux_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_2_rx_symbol_0_clk = {
+ .halt_reg = 0xa201c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa201c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_2_rx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_2_rx_symbol_1_clk = {
+ .halt_reg = 0xa20ac,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa20ac,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_2_rx_symbol_1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_2_tx_symbol_0_clk = {
+ .halt_reg = 0xa2018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa2018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_2_tx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_2_unipro_core_clk = {
+ .halt_reg = 0xa2058,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0xa2058,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xa2058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_2_unipro_core_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_ufs_card_2_unipro_core_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_ahb_clk = {
+ .halt_reg = 0x75014,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x75014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x75014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_axi_clk = {
+ .halt_reg = 0x75010,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x75010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x75010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_axi_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_ufs_card_axi_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_axi_hw_ctl_clk = {
+ .halt_reg = 0x75010,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x75010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x75010,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_axi_hw_ctl_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_ufs_card_axi_clk.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch_simple_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_ice_core_clk = {
+ .halt_reg = 0x7505c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x7505c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7505c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_ice_core_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_ufs_card_ice_core_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_ice_core_hw_ctl_clk = {
+ .halt_reg = 0x7505c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x7505c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7505c,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_ice_core_hw_ctl_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_ufs_card_ice_core_clk.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch_simple_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_phy_aux_clk = {
+ .halt_reg = 0x75090,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x75090,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x75090,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_phy_aux_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_ufs_card_phy_aux_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_phy_aux_hw_ctl_clk = {
+ .halt_reg = 0x75090,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x75090,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x75090,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_phy_aux_hw_ctl_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_ufs_card_phy_aux_clk.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch_simple_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_rx_symbol_0_clk = {
+ .halt_reg = 0x7501c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x7501c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_rx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_rx_symbol_1_clk = {
+ .halt_reg = 0x750ac,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x750ac,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_rx_symbol_1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_tx_symbol_0_clk = {
+ .halt_reg = 0x75018,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x75018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_tx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_unipro_core_clk = {
+ .halt_reg = 0x75058,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x75058,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x75058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_unipro_core_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_ufs_card_unipro_core_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_unipro_core_hw_ctl_clk = {
+ .halt_reg = 0x75058,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x75058,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x75058,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_unipro_core_hw_ctl_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_ufs_card_unipro_core_clk.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch_simple_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ahb_clk = {
+ .halt_reg = 0x77014,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x77014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_axi_clk = {
+ .halt_reg = 0x77010,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x77010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_axi_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_ufs_phy_axi_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_axi_hw_ctl_clk = {
+ .halt_reg = 0x77010,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x77010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77010,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_axi_hw_ctl_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_ufs_phy_axi_clk.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch_simple_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ice_core_clk = {
+ .halt_reg = 0x7705c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x7705c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7705c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ice_core_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_ufs_phy_ice_core_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ice_core_hw_ctl_clk = {
+ .halt_reg = 0x7705c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x7705c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7705c,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ice_core_hw_ctl_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_ufs_phy_ice_core_clk.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch_simple_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_phy_aux_clk = {
+ .halt_reg = 0x77090,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x77090,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77090,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_phy_aux_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_ufs_phy_phy_aux_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_phy_aux_hw_ctl_clk = {
+ .halt_reg = 0x77090,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x77090,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77090,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_phy_aux_hw_ctl_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_ufs_phy_phy_aux_clk.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch_simple_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_0_clk = {
+ .halt_reg = 0x7701c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x7701c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_rx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_1_clk = {
+ .halt_reg = 0x770ac,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x770ac,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_rx_symbol_1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_tx_symbol_0_clk = {
+ .halt_reg = 0x77018,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x77018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_tx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_unipro_core_clk = {
+ .halt_reg = 0x77058,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x77058,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_unipro_core_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_ufs_phy_unipro_core_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_unipro_core_hw_ctl_clk = {
+ .halt_reg = 0x77058,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x77058,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77058,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_unipro_core_hw_ctl_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_ufs_phy_unipro_core_clk.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch_simple_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_mp_master_clk = {
+ .halt_reg = 0xa6010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa6010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_mp_master_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_usb30_mp_master_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_mp_mock_utmi_clk = {
+ .halt_reg = 0xa6018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa6018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_mp_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_usb30_mp_mock_utmi_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_mp_sleep_clk = {
+ .halt_reg = 0xa6014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa6014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_mp_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_master_clk = {
+ .halt_reg = 0xf010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_master_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_usb30_prim_master_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_mock_utmi_clk = {
+ .halt_reg = 0xf018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_usb30_prim_mock_utmi_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_sleep_clk = {
+ .halt_reg = 0xf014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sec_master_clk = {
+ .halt_reg = 0x10010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sec_master_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_usb30_sec_master_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sec_mock_utmi_clk = {
+ .halt_reg = 0x10018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sec_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_usb30_sec_mock_utmi_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sec_sleep_clk = {
+ .halt_reg = 0x10014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sec_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_mp_phy_aux_clk = {
+ .halt_reg = 0xa6050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa6050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_mp_phy_aux_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_usb3_mp_phy_aux_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_mp_phy_com_aux_clk = {
+ .halt_reg = 0xa6054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa6054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_mp_phy_com_aux_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_usb3_mp_phy_aux_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_mp_phy_pipe_0_clk = {
+ .halt_reg = 0xa6058,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0xa6058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_mp_phy_pipe_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_mp_phy_pipe_1_clk = {
+ .halt_reg = 0xa605c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0xa605c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_mp_phy_pipe_1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_clkref_clk = {
+ .halt_reg = 0x8c008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_aux_clk = {
+ .halt_reg = 0xf050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_aux_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_usb3_prim_phy_aux_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_com_aux_clk = {
+ .halt_reg = 0xf054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_com_aux_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_usb3_prim_phy_aux_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_pipe_clk = {
+ .halt_reg = 0xf058,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0xf058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_clkref_clk = {
+ .halt_reg = 0x8c028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_phy_aux_clk = {
+ .halt_reg = 0x10050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_phy_aux_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_usb3_sec_phy_aux_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_phy_com_aux_clk = {
+ .halt_reg = 0x10054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_phy_com_aux_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_usb3_sec_phy_aux_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_phy_pipe_clk = {
+ .halt_reg = 0x10058,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x10058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_phy_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi0_clk = {
+ .halt_reg = 0xb024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_axi0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi1_clk = {
+ .halt_reg = 0xb028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_axi1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axic_clk = {
+ .halt_reg = 0xb02c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb02c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_axic_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc usb30_sec_gdsc = {
+ .gdscr = 0x10004,
+ .pd = {
+ .name = "usb30_sec_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc emac_gdsc = {
+ .gdscr = 0x6004,
+ .pd = {
+ .name = "emac_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc usb30_prim_gdsc = {
+ .gdscr = 0xf004,
+ .pd = {
+ .name = "usb30_prim_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc pcie_0_gdsc = {
+ .gdscr = 0x6b004,
+ .pd = {
+ .name = "pcie_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc ufs_card_gdsc = {
+ .gdscr = 0x75004,
+ .pd = {
+ .name = "ufs_card_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc ufs_phy_gdsc = {
+ .gdscr = 0x77004,
+ .pd = {
+ .name = "ufs_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc pcie_1_gdsc = {
+ .gdscr = 0x8d004,
+ .pd = {
+ .name = "pcie_1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc pcie_2_gdsc = {
+ .gdscr = 0x9d004,
+ .pd = {
+ .name = "pcie_2_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc ufs_card_2_gdsc = {
+ .gdscr = 0xa2004,
+ .pd = {
+ .name = "ufs_card_2_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc pcie_3_gdsc = {
+ .gdscr = 0xa3004,
+ .pd = {
+ .name = "pcie_3_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc usb30_mp_gdsc = {
+ .gdscr = 0xa6004,
+ .pd = {
+ .name = "usb30_mp_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct clk_regmap *gcc_sc8180x_clocks[] = {
+ [GCC_AGGRE_NOC_PCIE_TBU_CLK] = &gcc_aggre_noc_pcie_tbu_clk.clkr,
+ [GCC_AGGRE_UFS_CARD_AXI_CLK] = &gcc_aggre_ufs_card_axi_clk.clkr,
+ [GCC_AGGRE_UFS_CARD_AXI_HW_CTL_CLK] = &gcc_aggre_ufs_card_axi_hw_ctl_clk.clkr,
+ [GCC_AGGRE_UFS_PHY_AXI_CLK] = &gcc_aggre_ufs_phy_axi_clk.clkr,
+ [GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK] = &gcc_aggre_ufs_phy_axi_hw_ctl_clk.clkr,
+ [GCC_AGGRE_USB3_MP_AXI_CLK] = &gcc_aggre_usb3_mp_axi_clk.clkr,
+ [GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr,
+ [GCC_AGGRE_USB3_SEC_AXI_CLK] = &gcc_aggre_usb3_sec_axi_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CAMERA_HF_AXI_CLK] = &gcc_camera_hf_axi_clk.clkr,
+ [GCC_CAMERA_SF_AXI_CLK] = &gcc_camera_sf_axi_clk.clkr,
+ [GCC_CFG_NOC_USB3_MP_AXI_CLK] = &gcc_cfg_noc_usb3_mp_axi_clk.clkr,
+ [GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr,
+ [GCC_CFG_NOC_USB3_SEC_AXI_CLK] = &gcc_cfg_noc_usb3_sec_axi_clk.clkr,
+ [GCC_CPUSS_AHB_CLK] = &gcc_cpuss_ahb_clk.clkr,
+ [GCC_CPUSS_AHB_CLK_SRC] = &gcc_cpuss_ahb_clk_src.clkr,
+ [GCC_CPUSS_RBCPR_CLK] = &gcc_cpuss_rbcpr_clk.clkr,
+ [GCC_DDRSS_GPU_AXI_CLK] = &gcc_ddrss_gpu_axi_clk.clkr,
+ [GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr,
+ [GCC_DISP_SF_AXI_CLK] = &gcc_disp_sf_axi_clk.clkr,
+ [GCC_EMAC_AXI_CLK] = &gcc_emac_axi_clk.clkr,
+ [GCC_EMAC_PTP_CLK] = &gcc_emac_ptp_clk.clkr,
+ [GCC_EMAC_PTP_CLK_SRC] = &gcc_emac_ptp_clk_src.clkr,
+ [GCC_EMAC_RGMII_CLK] = &gcc_emac_rgmii_clk.clkr,
+ [GCC_EMAC_RGMII_CLK_SRC] = &gcc_emac_rgmii_clk_src.clkr,
+ [GCC_EMAC_SLV_AHB_CLK] = &gcc_emac_slv_ahb_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr,
+ [GCC_GP4_CLK] = &gcc_gp4_clk.clkr,
+ [GCC_GP4_CLK_SRC] = &gcc_gp4_clk_src.clkr,
+ [GCC_GP5_CLK] = &gcc_gp5_clk.clkr,
+ [GCC_GP5_CLK_SRC] = &gcc_gp5_clk_src.clkr,
+ [GCC_GPU_GPLL0_CLK_SRC] = &gcc_gpu_gpll0_clk_src.clkr,
+ [GCC_GPU_GPLL0_DIV_CLK_SRC] = &gcc_gpu_gpll0_div_clk_src.clkr,
+ [GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr,
+ [GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr,
+ [GCC_NPU_AT_CLK] = &gcc_npu_at_clk.clkr,
+ [GCC_NPU_AXI_CLK] = &gcc_npu_axi_clk.clkr,
+ [GCC_NPU_AXI_CLK_SRC] = &gcc_npu_axi_clk_src.clkr,
+ [GCC_NPU_GPLL0_CLK_SRC] = &gcc_npu_gpll0_clk_src.clkr,
+ [GCC_NPU_GPLL0_DIV_CLK_SRC] = &gcc_npu_gpll0_div_clk_src.clkr,
+ [GCC_NPU_TRIG_CLK] = &gcc_npu_trig_clk.clkr,
+ [GCC_PCIE0_PHY_REFGEN_CLK] = &gcc_pcie0_phy_refgen_clk.clkr,
+ [GCC_PCIE1_PHY_REFGEN_CLK] = &gcc_pcie1_phy_refgen_clk.clkr,
+ [GCC_PCIE2_PHY_REFGEN_CLK] = &gcc_pcie2_phy_refgen_clk.clkr,
+ [GCC_PCIE3_PHY_REFGEN_CLK] = &gcc_pcie3_phy_refgen_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK] = &gcc_pcie_0_aux_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK_SRC] = &gcc_pcie_0_aux_clk_src.clkr,
+ [GCC_PCIE_0_CFG_AHB_CLK] = &gcc_pcie_0_cfg_ahb_clk.clkr,
+ [GCC_PCIE_0_CLKREF_CLK] = &gcc_pcie_0_clkref_clk.clkr,
+ [GCC_PCIE_0_MSTR_AXI_CLK] = &gcc_pcie_0_mstr_axi_clk.clkr,
+ [GCC_PCIE_0_PIPE_CLK] = &gcc_pcie_0_pipe_clk.clkr,
+ [GCC_PCIE_0_SLV_AXI_CLK] = &gcc_pcie_0_slv_axi_clk.clkr,
+ [GCC_PCIE_0_SLV_Q2A_AXI_CLK] = &gcc_pcie_0_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_1_AUX_CLK] = &gcc_pcie_1_aux_clk.clkr,
+ [GCC_PCIE_1_AUX_CLK_SRC] = &gcc_pcie_1_aux_clk_src.clkr,
+ [GCC_PCIE_1_CFG_AHB_CLK] = &gcc_pcie_1_cfg_ahb_clk.clkr,
+ [GCC_PCIE_1_CLKREF_CLK] = &gcc_pcie_1_clkref_clk.clkr,
+ [GCC_PCIE_1_MSTR_AXI_CLK] = &gcc_pcie_1_mstr_axi_clk.clkr,
+ [GCC_PCIE_1_PIPE_CLK] = &gcc_pcie_1_pipe_clk.clkr,
+ [GCC_PCIE_1_SLV_AXI_CLK] = &gcc_pcie_1_slv_axi_clk.clkr,
+ [GCC_PCIE_1_SLV_Q2A_AXI_CLK] = &gcc_pcie_1_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_2_AUX_CLK] = &gcc_pcie_2_aux_clk.clkr,
+ [GCC_PCIE_2_AUX_CLK_SRC] = &gcc_pcie_2_aux_clk_src.clkr,
+ [GCC_PCIE_2_CFG_AHB_CLK] = &gcc_pcie_2_cfg_ahb_clk.clkr,
+ [GCC_PCIE_2_CLKREF_CLK] = &gcc_pcie_2_clkref_clk.clkr,
+ [GCC_PCIE_2_MSTR_AXI_CLK] = &gcc_pcie_2_mstr_axi_clk.clkr,
+ [GCC_PCIE_2_PIPE_CLK] = &gcc_pcie_2_pipe_clk.clkr,
+ [GCC_PCIE_2_SLV_AXI_CLK] = &gcc_pcie_2_slv_axi_clk.clkr,
+ [GCC_PCIE_2_SLV_Q2A_AXI_CLK] = &gcc_pcie_2_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_3_AUX_CLK] = &gcc_pcie_3_aux_clk.clkr,
+ [GCC_PCIE_3_AUX_CLK_SRC] = &gcc_pcie_3_aux_clk_src.clkr,
+ [GCC_PCIE_3_CFG_AHB_CLK] = &gcc_pcie_3_cfg_ahb_clk.clkr,
+ [GCC_PCIE_3_CLKREF_CLK] = &gcc_pcie_3_clkref_clk.clkr,
+ [GCC_PCIE_3_MSTR_AXI_CLK] = &gcc_pcie_3_mstr_axi_clk.clkr,
+ [GCC_PCIE_3_PIPE_CLK] = &gcc_pcie_3_pipe_clk.clkr,
+ [GCC_PCIE_3_SLV_AXI_CLK] = &gcc_pcie_3_slv_axi_clk.clkr,
+ [GCC_PCIE_3_SLV_Q2A_AXI_CLK] = &gcc_pcie_3_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_PHY_AUX_CLK] = &gcc_pcie_phy_aux_clk.clkr,
+ [GCC_PCIE_PHY_REFGEN_CLK_SRC] = &gcc_pcie_phy_refgen_clk_src.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr,
+ [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
+ [GCC_QMIP_CAMERA_NRT_AHB_CLK] = &gcc_qmip_camera_nrt_ahb_clk.clkr,
+ [GCC_QMIP_CAMERA_RT_AHB_CLK] = &gcc_qmip_camera_rt_ahb_clk.clkr,
+ [GCC_QMIP_DISP_AHB_CLK] = &gcc_qmip_disp_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_CVP_AHB_CLK] = &gcc_qmip_video_cvp_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_VCODEC_AHB_CLK] = &gcc_qmip_video_vcodec_ahb_clk.clkr,
+ [GCC_QSPI_1_CNOC_PERIPH_AHB_CLK] = &gcc_qspi_1_cnoc_periph_ahb_clk.clkr,
+ [GCC_QSPI_1_CORE_CLK] = &gcc_qspi_1_core_clk.clkr,
+ [GCC_QSPI_1_CORE_CLK_SRC] = &gcc_qspi_1_core_clk_src.clkr,
+ [GCC_QSPI_CNOC_PERIPH_AHB_CLK] = &gcc_qspi_cnoc_periph_ahb_clk.clkr,
+ [GCC_QSPI_CORE_CLK] = &gcc_qspi_core_clk.clkr,
+ [GCC_QSPI_CORE_CLK_SRC] = &gcc_qspi_core_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK] = &gcc_qupv3_wrap0_s0_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK_SRC] = &gcc_qupv3_wrap0_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK] = &gcc_qupv3_wrap0_s1_clk.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK_SRC] = &gcc_qupv3_wrap0_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK] = &gcc_qupv3_wrap0_s2_clk.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK_SRC] = &gcc_qupv3_wrap0_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK] = &gcc_qupv3_wrap0_s3_clk.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK_SRC] = &gcc_qupv3_wrap0_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK] = &gcc_qupv3_wrap0_s4_clk.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK_SRC] = &gcc_qupv3_wrap0_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK] = &gcc_qupv3_wrap0_s5_clk.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK_SRC] = &gcc_qupv3_wrap0_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S6_CLK] = &gcc_qupv3_wrap0_s6_clk.clkr,
+ [GCC_QUPV3_WRAP0_S6_CLK_SRC] = &gcc_qupv3_wrap0_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S7_CLK] = &gcc_qupv3_wrap0_s7_clk.clkr,
+ [GCC_QUPV3_WRAP0_S7_CLK_SRC] = &gcc_qupv3_wrap0_s7_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK] = &gcc_qupv3_wrap1_s0_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK_SRC] = &gcc_qupv3_wrap1_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK] = &gcc_qupv3_wrap1_s1_clk.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK_SRC] = &gcc_qupv3_wrap1_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK] = &gcc_qupv3_wrap1_s2_clk.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK_SRC] = &gcc_qupv3_wrap1_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK] = &gcc_qupv3_wrap1_s3_clk.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK_SRC] = &gcc_qupv3_wrap1_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK] = &gcc_qupv3_wrap1_s4_clk.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK_SRC] = &gcc_qupv3_wrap1_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK] = &gcc_qupv3_wrap1_s5_clk.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK_SRC] = &gcc_qupv3_wrap1_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S0_CLK] = &gcc_qupv3_wrap2_s0_clk.clkr,
+ [GCC_QUPV3_WRAP2_S0_CLK_SRC] = &gcc_qupv3_wrap2_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S1_CLK] = &gcc_qupv3_wrap2_s1_clk.clkr,
+ [GCC_QUPV3_WRAP2_S1_CLK_SRC] = &gcc_qupv3_wrap2_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S2_CLK] = &gcc_qupv3_wrap2_s2_clk.clkr,
+ [GCC_QUPV3_WRAP2_S2_CLK_SRC] = &gcc_qupv3_wrap2_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S3_CLK] = &gcc_qupv3_wrap2_s3_clk.clkr,
+ [GCC_QUPV3_WRAP2_S3_CLK_SRC] = &gcc_qupv3_wrap2_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S4_CLK] = &gcc_qupv3_wrap2_s4_clk.clkr,
+ [GCC_QUPV3_WRAP2_S4_CLK_SRC] = &gcc_qupv3_wrap2_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S5_CLK] = &gcc_qupv3_wrap2_s5_clk.clkr,
+ [GCC_QUPV3_WRAP2_S5_CLK_SRC] = &gcc_qupv3_wrap2_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP_0_M_AHB_CLK] = &gcc_qupv3_wrap_0_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_0_S_AHB_CLK] = &gcc_qupv3_wrap_0_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_M_AHB_CLK] = &gcc_qupv3_wrap_1_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_S_AHB_CLK] = &gcc_qupv3_wrap_1_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_2_M_AHB_CLK] = &gcc_qupv3_wrap_2_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_2_S_AHB_CLK] = &gcc_qupv3_wrap_2_s_ahb_clk.clkr,
+ [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_SDCC2_APPS_CLK_SRC] = &gcc_sdcc2_apps_clk_src.clkr,
+ [GCC_SDCC4_AHB_CLK] = &gcc_sdcc4_ahb_clk.clkr,
+ [GCC_SDCC4_APPS_CLK] = &gcc_sdcc4_apps_clk.clkr,
+ [GCC_SDCC4_APPS_CLK_SRC] = &gcc_sdcc4_apps_clk_src.clkr,
+ [GCC_SYS_NOC_CPUSS_AHB_CLK] = &gcc_sys_noc_cpuss_ahb_clk.clkr,
+ [GCC_TSIF_AHB_CLK] = &gcc_tsif_ahb_clk.clkr,
+ [GCC_TSIF_INACTIVITY_TIMERS_CLK] = &gcc_tsif_inactivity_timers_clk.clkr,
+ [GCC_TSIF_REF_CLK] = &gcc_tsif_ref_clk.clkr,
+ [GCC_TSIF_REF_CLK_SRC] = &gcc_tsif_ref_clk_src.clkr,
+ [GCC_UFS_CARD_2_AHB_CLK] = &gcc_ufs_card_2_ahb_clk.clkr,
+ [GCC_UFS_CARD_2_AXI_CLK] = &gcc_ufs_card_2_axi_clk.clkr,
+ [GCC_UFS_CARD_2_AXI_CLK_SRC] = &gcc_ufs_card_2_axi_clk_src.clkr,
+ [GCC_UFS_CARD_2_ICE_CORE_CLK] = &gcc_ufs_card_2_ice_core_clk.clkr,
+ [GCC_UFS_CARD_2_ICE_CORE_CLK_SRC] = &gcc_ufs_card_2_ice_core_clk_src.clkr,
+ [GCC_UFS_CARD_2_PHY_AUX_CLK] = &gcc_ufs_card_2_phy_aux_clk.clkr,
+ [GCC_UFS_CARD_2_PHY_AUX_CLK_SRC] = &gcc_ufs_card_2_phy_aux_clk_src.clkr,
+ [GCC_UFS_CARD_2_RX_SYMBOL_0_CLK] = &gcc_ufs_card_2_rx_symbol_0_clk.clkr,
+ [GCC_UFS_CARD_2_RX_SYMBOL_1_CLK] = &gcc_ufs_card_2_rx_symbol_1_clk.clkr,
+ [GCC_UFS_CARD_2_TX_SYMBOL_0_CLK] = &gcc_ufs_card_2_tx_symbol_0_clk.clkr,
+ [GCC_UFS_CARD_2_UNIPRO_CORE_CLK] = &gcc_ufs_card_2_unipro_core_clk.clkr,
+ [GCC_UFS_CARD_2_UNIPRO_CORE_CLK_SRC] = &gcc_ufs_card_2_unipro_core_clk_src.clkr,
+ [GCC_UFS_CARD_AHB_CLK] = &gcc_ufs_card_ahb_clk.clkr,
+ [GCC_UFS_CARD_AXI_CLK] = &gcc_ufs_card_axi_clk.clkr,
+ [GCC_UFS_CARD_AXI_CLK_SRC] = &gcc_ufs_card_axi_clk_src.clkr,
+ [GCC_UFS_CARD_AXI_HW_CTL_CLK] = &gcc_ufs_card_axi_hw_ctl_clk.clkr,
+ [GCC_UFS_CARD_ICE_CORE_CLK] = &gcc_ufs_card_ice_core_clk.clkr,
+ [GCC_UFS_CARD_ICE_CORE_CLK_SRC] = &gcc_ufs_card_ice_core_clk_src.clkr,
+ [GCC_UFS_CARD_ICE_CORE_HW_CTL_CLK] = &gcc_ufs_card_ice_core_hw_ctl_clk.clkr,
+ [GCC_UFS_CARD_PHY_AUX_CLK] = &gcc_ufs_card_phy_aux_clk.clkr,
+ [GCC_UFS_CARD_PHY_AUX_CLK_SRC] = &gcc_ufs_card_phy_aux_clk_src.clkr,
+ [GCC_UFS_CARD_PHY_AUX_HW_CTL_CLK] = &gcc_ufs_card_phy_aux_hw_ctl_clk.clkr,
+ [GCC_UFS_CARD_RX_SYMBOL_0_CLK] = &gcc_ufs_card_rx_symbol_0_clk.clkr,
+ [GCC_UFS_CARD_RX_SYMBOL_1_CLK] = &gcc_ufs_card_rx_symbol_1_clk.clkr,
+ [GCC_UFS_CARD_TX_SYMBOL_0_CLK] = &gcc_ufs_card_tx_symbol_0_clk.clkr,
+ [GCC_UFS_CARD_UNIPRO_CORE_CLK] = &gcc_ufs_card_unipro_core_clk.clkr,
+ [GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC] = &gcc_ufs_card_unipro_core_clk_src.clkr,
+ [GCC_UFS_CARD_UNIPRO_CORE_HW_CTL_CLK] = &gcc_ufs_card_unipro_core_hw_ctl_clk.clkr,
+ [GCC_UFS_PHY_AHB_CLK] = &gcc_ufs_phy_ahb_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK] = &gcc_ufs_phy_axi_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK_SRC] = &gcc_ufs_phy_axi_clk_src.clkr,
+ [GCC_UFS_PHY_AXI_HW_CTL_CLK] = &gcc_ufs_phy_axi_hw_ctl_clk.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK] = &gcc_ufs_phy_ice_core_clk.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK_SRC] = &gcc_ufs_phy_ice_core_clk_src.clkr,
+ [GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK] = &gcc_ufs_phy_ice_core_hw_ctl_clk.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK] = &gcc_ufs_phy_phy_aux_clk.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK_SRC] = &gcc_ufs_phy_phy_aux_clk_src.clkr,
+ [GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK] = &gcc_ufs_phy_phy_aux_hw_ctl_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK] = &gcc_ufs_phy_rx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_1_CLK] = &gcc_ufs_phy_rx_symbol_1_clk.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK] = &gcc_ufs_phy_tx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK] = &gcc_ufs_phy_unipro_core_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC] = &gcc_ufs_phy_unipro_core_clk_src.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK] = &gcc_ufs_phy_unipro_core_hw_ctl_clk.clkr,
+ [GCC_USB30_MP_MASTER_CLK] = &gcc_usb30_mp_master_clk.clkr,
+ [GCC_USB30_MP_MASTER_CLK_SRC] = &gcc_usb30_mp_master_clk_src.clkr,
+ [GCC_USB30_MP_MOCK_UTMI_CLK] = &gcc_usb30_mp_mock_utmi_clk.clkr,
+ [GCC_USB30_MP_MOCK_UTMI_CLK_SRC] = &gcc_usb30_mp_mock_utmi_clk_src.clkr,
+ [GCC_USB30_MP_SLEEP_CLK] = &gcc_usb30_mp_sleep_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK] = &gcc_usb30_prim_master_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK_SRC] = &gcc_usb30_prim_master_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK] = &gcc_usb30_prim_mock_utmi_clk.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC] = &gcc_usb30_prim_mock_utmi_clk_src.clkr,
+ [GCC_USB30_PRIM_SLEEP_CLK] = &gcc_usb30_prim_sleep_clk.clkr,
+ [GCC_USB30_SEC_MASTER_CLK] = &gcc_usb30_sec_master_clk.clkr,
+ [GCC_USB30_SEC_MASTER_CLK_SRC] = &gcc_usb30_sec_master_clk_src.clkr,
+ [GCC_USB30_SEC_MOCK_UTMI_CLK] = &gcc_usb30_sec_mock_utmi_clk.clkr,
+ [GCC_USB30_SEC_MOCK_UTMI_CLK_SRC] = &gcc_usb30_sec_mock_utmi_clk_src.clkr,
+ [GCC_USB30_SEC_SLEEP_CLK] = &gcc_usb30_sec_sleep_clk.clkr,
+ [GCC_USB3_MP_PHY_AUX_CLK] = &gcc_usb3_mp_phy_aux_clk.clkr,
+ [GCC_USB3_MP_PHY_AUX_CLK_SRC] = &gcc_usb3_mp_phy_aux_clk_src.clkr,
+ [GCC_USB3_MP_PHY_COM_AUX_CLK] = &gcc_usb3_mp_phy_com_aux_clk.clkr,
+ [GCC_USB3_MP_PHY_PIPE_0_CLK] = &gcc_usb3_mp_phy_pipe_0_clk.clkr,
+ [GCC_USB3_MP_PHY_PIPE_1_CLK] = &gcc_usb3_mp_phy_pipe_1_clk.clkr,
+ [GCC_USB3_PRIM_CLKREF_CLK] = &gcc_usb3_prim_clkref_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK] = &gcc_usb3_prim_phy_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK_SRC] = &gcc_usb3_prim_phy_aux_clk_src.clkr,
+ [GCC_USB3_PRIM_PHY_COM_AUX_CLK] = &gcc_usb3_prim_phy_com_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK] = &gcc_usb3_prim_phy_pipe_clk.clkr,
+ [GCC_USB3_SEC_CLKREF_CLK] = &gcc_usb3_sec_clkref_clk.clkr,
+ [GCC_USB3_SEC_PHY_AUX_CLK] = &gcc_usb3_sec_phy_aux_clk.clkr,
+ [GCC_USB3_SEC_PHY_AUX_CLK_SRC] = &gcc_usb3_sec_phy_aux_clk_src.clkr,
+ [GCC_USB3_SEC_PHY_COM_AUX_CLK] = &gcc_usb3_sec_phy_com_aux_clk.clkr,
+ [GCC_USB3_SEC_PHY_PIPE_CLK] = &gcc_usb3_sec_phy_pipe_clk.clkr,
+ [GCC_VIDEO_AXI0_CLK] = &gcc_video_axi0_clk.clkr,
+ [GCC_VIDEO_AXI1_CLK] = &gcc_video_axi1_clk.clkr,
+ [GCC_VIDEO_AXIC_CLK] = &gcc_video_axic_clk.clkr,
+ [GPLL0] = &gpll0.clkr,
+ [GPLL0_OUT_EVEN] = &gpll0_out_even.clkr,
+ [GPLL1] = &gpll1.clkr,
+ [GPLL4] = &gpll4.clkr,
+ [GPLL7] = &gpll7.clkr,
+};
+
+static const struct qcom_reset_map gcc_sc8180x_resets[] = {
+ [GCC_EMAC_BCR] = { 0x6000 },
+ [GCC_GPU_BCR] = { 0x71000 },
+ [GCC_MMSS_BCR] = { 0xb000 },
+ [GCC_NPU_BCR] = { 0x4d000 },
+ [GCC_PCIE_0_BCR] = { 0x6b000 },
+ [GCC_PCIE_0_PHY_BCR] = { 0x6c01c },
+ [GCC_PCIE_1_BCR] = { 0x8d000 },
+ [GCC_PCIE_1_PHY_BCR] = { 0x8e01c },
+ [GCC_PCIE_2_BCR] = { 0x9d000 },
+ [GCC_PCIE_2_PHY_BCR] = { 0xa701c },
+ [GCC_PCIE_3_BCR] = { 0xa3000 },
+ [GCC_PCIE_3_PHY_BCR] = { 0xa801c },
+ [GCC_PCIE_PHY_BCR] = { 0x6f000 },
+ [GCC_PDM_BCR] = { 0x33000 },
+ [GCC_PRNG_BCR] = { 0x34000 },
+ [GCC_QSPI_1_BCR] = { 0x4a000 },
+ [GCC_QSPI_BCR] = { 0x24008 },
+ [GCC_QUPV3_WRAPPER_0_BCR] = { 0x17000 },
+ [GCC_QUPV3_WRAPPER_1_BCR] = { 0x18000 },
+ [GCC_QUPV3_WRAPPER_2_BCR] = { 0x1e000 },
+ [GCC_QUSB2PHY_5_BCR] = { 0x12010 },
+ [GCC_QUSB2PHY_MP0_BCR] = { 0x12008 },
+ [GCC_QUSB2PHY_MP1_BCR] = { 0x1200c },
+ [GCC_QUSB2PHY_PRIM_BCR] = { 0x12000 },
+ [GCC_QUSB2PHY_SEC_BCR] = { 0x12004 },
+ [GCC_USB3_PHY_PRIM_SP0_BCR] = { 0x50000 },
+ [GCC_USB3_PHY_PRIM_SP1_BCR] = { 0x50004 },
+ [GCC_USB3_DP_PHY_PRIM_SP0_BCR] = { 0x50010 },
+ [GCC_USB3_DP_PHY_PRIM_SP1_BCR] = { 0x50014 },
+ [GCC_USB3_PHY_SEC_BCR] = { 0x50018 },
+ [GCC_USB3PHY_PHY_SEC_BCR] = { 0x5001c },
+ [GCC_USB3_DP_PHY_SEC_BCR] = { 0x50020 },
+ [GCC_SDCC2_BCR] = { 0x14000 },
+ [GCC_SDCC4_BCR] = { 0x16000 },
+ [GCC_TSIF_BCR] = { 0x36000 },
+ [GCC_UFS_CARD_2_BCR] = { 0xa2000 },
+ [GCC_UFS_CARD_BCR] = { 0x75000 },
+ [GCC_UFS_PHY_BCR] = { 0x77000 },
+ [GCC_USB30_MP_BCR] = { 0xa6000 },
+ [GCC_USB30_PRIM_BCR] = { 0xf000 },
+ [GCC_USB30_SEC_BCR] = { 0x10000 },
+ [GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0x6a000 },
+ [GCC_VIDEO_AXIC_CLK_BCR] = { 0xb02c, 2 },
+ [GCC_VIDEO_AXI0_CLK_BCR] = { 0xb024, 2 },
+ [GCC_VIDEO_AXI1_CLK_BCR] = { 0xb028, 2 },
+};
+
+static struct gdsc *gcc_sc8180x_gdscs[] = {
+ [EMAC_GDSC] = &emac_gdsc,
+ [PCIE_0_GDSC] = &pcie_0_gdsc,
+ [PCIE_1_GDSC] = &pcie_1_gdsc,
+ [PCIE_2_GDSC] = &pcie_2_gdsc,
+ [PCIE_3_GDSC] = &pcie_3_gdsc,
+ [UFS_CARD_GDSC] = &ufs_card_gdsc,
+ [UFS_CARD_2_GDSC] = &ufs_card_2_gdsc,
+ [UFS_PHY_GDSC] = &ufs_phy_gdsc,
+ [USB30_MP_GDSC] = &usb30_mp_gdsc,
+ [USB30_PRIM_GDSC] = &usb30_prim_gdsc,
+ [USB30_SEC_GDSC] = &usb30_sec_gdsc,
+};
+
+static const struct regmap_config gcc_sc8180x_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xc0004,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_sc8180x_desc = {
+ .config = &gcc_sc8180x_regmap_config,
+ .clks = gcc_sc8180x_clocks,
+ .num_clks = ARRAY_SIZE(gcc_sc8180x_clocks),
+ .resets = gcc_sc8180x_resets,
+ .num_resets = ARRAY_SIZE(gcc_sc8180x_resets),
+ .gdscs = gcc_sc8180x_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_sc8180x_gdscs),
+};
+
+static const struct of_device_id gcc_sc8180x_match_table[] = {
+ { .compatible = "qcom,gcc-sc8180x" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_sc8180x_match_table);
+
+static int gcc_sc8180x_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, &gcc_sc8180x_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ /*
+ * Enable the following always-on clocks:
+ * GCC_VIDEO_AHB_CLK, GCC_CAMERA_AHB_CLK, GCC_DISP_AHB_CLK,
+ * GCC_VIDEO_XO_CLK, GCC_CAMERA_XO_CLK, GCC_DISP_XO_CLK,
+ * GCC_CPUSS_GNOC_CLK, GCC_CPUSS_DVM_BUS_CLK, GCC_NPU_CFG_AHB_CLK and
+ * GCC_GPU_CFG_AHB_CLK
+ */
+ regmap_update_bits(regmap, 0xb004, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0xb008, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0xb00c, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0xb040, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0xb044, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0xb048, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x48004, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x48190, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x4d004, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x71004, BIT(0), BIT(0));
+
+ /* Disable the GPLL0 active input to NPU and GPU via MISC registers */
+ regmap_update_bits(regmap, 0x4d110, 0x3, 0x3);
+ regmap_update_bits(regmap, 0x71028, 0x3, 0x3);
+
+ return qcom_cc_really_probe(pdev, &gcc_sc8180x_desc, regmap);
+}
+
+static struct platform_driver gcc_sc8180x_driver = {
+ .probe = gcc_sc8180x_probe,
+ .driver = {
+ .name = "gcc-sc8180x",
+ .of_match_table = gcc_sc8180x_match_table,
+ },
+};
+
+static int __init gcc_sc8180x_init(void)
+{
+ return platform_driver_register(&gcc_sc8180x_driver);
+}
+core_initcall(gcc_sc8180x_init);
+
+static void __exit gcc_sc8180x_exit(void)
+{
+ platform_driver_unregister(&gcc_sc8180x_driver);
+}
+module_exit(gcc_sc8180x_exit);
+
+MODULE_DESCRIPTION("QTI GCC SC8180x driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/gcc-sdm660.c b/drivers/clk/qcom/gcc-sdm660.c
index 31258795e7b8..6394257ca8c0 100644
--- a/drivers/clk/qcom/gcc-sdm660.c
+++ b/drivers/clk/qcom/gcc-sdm660.c
@@ -1571,6 +1571,7 @@ static struct clk_branch gcc_gpu_cfg_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "gcc_gpu_cfg_ahb_clk",
.ops = &clk_branch2_ops,
+ .flags = CLK_IS_CRITICAL,
},
},
};
@@ -1684,6 +1685,12 @@ static struct clk_branch gcc_mmss_noc_cfg_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "gcc_mmss_noc_cfg_ahb_clk",
.ops = &clk_branch2_ops,
+ /*
+ * Any access to mmss depends on this clock.
+ * Gating this clock has been shown to crash the system
+ * when mmssnoc_axi_rpm_clk is inited in rpmcc.
+ */
+ .flags = CLK_IS_CRITICAL,
},
},
};
diff --git a/drivers/clk/qcom/gcc-sm8250.c b/drivers/clk/qcom/gcc-sm8250.c
index 6cb6617b8d88..ab594a0f0c40 100644
--- a/drivers/clk/qcom/gcc-sm8250.c
+++ b/drivers/clk/qcom/gcc-sm8250.c
@@ -722,7 +722,7 @@ static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
.name = "gcc_sdcc2_apps_clk_src",
.parent_data = gcc_parent_data_4,
.num_parents = 5,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_floor_ops,
},
};
@@ -745,7 +745,7 @@ static struct clk_rcg2 gcc_sdcc4_apps_clk_src = {
.name = "gcc_sdcc4_apps_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = 3,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_floor_ops,
},
};
diff --git a/drivers/clk/qcom/gcc-sm8350.c b/drivers/clk/qcom/gcc-sm8350.c
new file mode 100644
index 000000000000..1c23b9f84900
--- /dev/null
+++ b/drivers/clk/qcom/gcc-sm8350.c
@@ -0,0 +1,3890 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2020-2021, Linaro Limited
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,gcc-sm8350.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ P_BI_TCXO,
+ P_CORE_BI_PLL_TEST_SE,
+ P_GCC_GPLL0_OUT_EVEN,
+ P_GCC_GPLL0_OUT_MAIN,
+ P_GCC_GPLL4_OUT_MAIN,
+ P_GCC_GPLL9_OUT_MAIN,
+ P_PCIE_0_PIPE_CLK,
+ P_PCIE_1_PIPE_CLK,
+ P_SLEEP_CLK,
+ P_UFS_CARD_RX_SYMBOL_0_CLK,
+ P_UFS_CARD_RX_SYMBOL_1_CLK,
+ P_UFS_CARD_TX_SYMBOL_0_CLK,
+ P_UFS_PHY_RX_SYMBOL_0_CLK,
+ P_UFS_PHY_RX_SYMBOL_1_CLK,
+ P_UFS_PHY_TX_SYMBOL_0_CLK,
+ P_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK,
+ P_USB3_UNI_PHY_SEC_GCC_USB30_PIPE_CLK,
+};
+
+static struct clk_alpha_pll gcc_gpll0 = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpll0",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_5lpe_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gcc_gpll0_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gcc_gpll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_gcc_gpll0_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_gcc_gpll0_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gpll0_out_even",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_lucid_5lpe_ops,
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll4 = {
+ .offset = 0x76000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpll4",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ .name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_5lpe_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll9 = {
+ .offset = 0x1c000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpll9",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ .name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_5lpe_ops,
+ },
+ },
+};
+
+static const struct parent_map gcc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const struct clk_parent_data gcc_parent_data_0[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+ { .fw_name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_SLEEP_CLK, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const struct clk_parent_data gcc_parent_data_1[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .fw_name = "sleep_clk" },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+ { .fw_name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_SLEEP_CLK, 5 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const struct clk_parent_data gcc_parent_data_2[] = {
+ { .fw_name = "bi_tcxo" },
+ { .fw_name = "sleep_clk" },
+ { .fw_name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const struct clk_parent_data gcc_parent_data_3[] = {
+ { .fw_name = "bi_tcxo" },
+ { .fw_name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+ { P_PCIE_0_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_4[] = {
+ { .fw_name = "pcie_0_pipe_clk", },
+ { .fw_name = "bi_tcxo" },
+};
+
+static const struct parent_map gcc_parent_map_5[] = {
+ { P_PCIE_1_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_5[] = {
+ { .fw_name = "pcie_1_pipe_clk" },
+ { .fw_name = "bi_tcxo" },
+};
+
+static const struct parent_map gcc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL9_OUT_MAIN, 2 },
+ { P_GCC_GPLL4_OUT_MAIN, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const struct clk_parent_data gcc_parent_data_6[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll9.clkr.hw },
+ { .hw = &gcc_gpll4.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+ { .fw_name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map gcc_parent_map_7[] = {
+ { P_UFS_CARD_RX_SYMBOL_0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_7[] = {
+ { .fw_name = "ufs_card_rx_symbol_0_clk" },
+ { .fw_name = "bi_tcxo" },
+};
+
+static const struct parent_map gcc_parent_map_8[] = {
+ { P_UFS_CARD_RX_SYMBOL_1_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_8[] = {
+ { .fw_name = "ufs_card_rx_symbol_1_clk" },
+ { .fw_name = "bi_tcxo" },
+};
+
+static const struct parent_map gcc_parent_map_9[] = {
+ { P_UFS_CARD_TX_SYMBOL_0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_9[] = {
+ { .fw_name = "ufs_card_tx_symbol_0_clk" },
+ { .fw_name = "bi_tcxo" },
+};
+
+static const struct parent_map gcc_parent_map_10[] = {
+ { P_UFS_PHY_RX_SYMBOL_0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_10[] = {
+ { .fw_name = "ufs_phy_rx_symbol_0_clk" },
+ { .fw_name = "bi_tcxo" },
+};
+
+static const struct parent_map gcc_parent_map_11[] = {
+ { P_UFS_PHY_RX_SYMBOL_1_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_11[] = {
+ { .fw_name = "ufs_phy_rx_symbol_1_clk" },
+ { .fw_name = "bi_tcxo" },
+};
+
+static const struct parent_map gcc_parent_map_12[] = {
+ { P_UFS_PHY_TX_SYMBOL_0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_12[] = {
+ { .fw_name = "ufs_phy_tx_symbol_0_clk" },
+ { .fw_name = "bi_tcxo" },
+};
+
+static const struct parent_map gcc_parent_map_13[] = {
+ { P_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK, 0 },
+ { P_CORE_BI_PLL_TEST_SE, 1 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_13[] = {
+ { .fw_name = "usb3_phy_wrapper_gcc_usb30_pipe_clk" },
+ { .fw_name = "core_bi_pll_test_se" },
+ { .fw_name = "bi_tcxo" },
+};
+
+static const struct parent_map gcc_parent_map_14[] = {
+ { P_USB3_UNI_PHY_SEC_GCC_USB30_PIPE_CLK, 0 },
+ { P_CORE_BI_PLL_TEST_SE, 1 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_14[] = {
+ { .fw_name = "usb3_uni_phy_sec_gcc_usb30_pipe_clk" },
+ { .fw_name = "core_bi_pll_test_se" },
+ { .fw_name = "bi_tcxo" },
+};
+
+static struct clk_regmap_mux gcc_pcie_0_pipe_clk_src = {
+ .reg = 0x6b054,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_4,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_pipe_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = 2,
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_pcie_1_pipe_clk_src = {
+ .reg = 0x8d054,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_5,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_pipe_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = 2,
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_card_rx_symbol_0_clk_src = {
+ .reg = 0x75058,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_7,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_rx_symbol_0_clk_src",
+ .parent_data = gcc_parent_data_7,
+ .num_parents = 2,
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_card_rx_symbol_1_clk_src = {
+ .reg = 0x750c8,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_8,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_rx_symbol_1_clk_src",
+ .parent_data = gcc_parent_data_8,
+ .num_parents = 2,
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_card_tx_symbol_0_clk_src = {
+ .reg = 0x75048,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_9,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_tx_symbol_0_clk_src",
+ .parent_data = gcc_parent_data_9,
+ .num_parents = 2,
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_phy_rx_symbol_0_clk_src = {
+ .reg = 0x77058,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_10,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_rx_symbol_0_clk_src",
+ .parent_data = gcc_parent_data_10,
+ .num_parents = 2,
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_phy_rx_symbol_1_clk_src = {
+ .reg = 0x770c8,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_11,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_rx_symbol_1_clk_src",
+ .parent_data = gcc_parent_data_11,
+ .num_parents = 2,
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_phy_tx_symbol_0_clk_src = {
+ .reg = 0x77048,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_12,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_tx_symbol_0_clk_src",
+ .parent_data = gcc_parent_data_12,
+ .num_parents = 2,
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb3_prim_phy_pipe_clk_src = {
+ .reg = 0xf060,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_13,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_pipe_clk_src",
+ .parent_data = gcc_parent_data_13,
+ .num_parents = 3,
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb3_sec_phy_pipe_clk_src = {
+ .reg = 0x10060,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_14,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_phy_pipe_clk_src",
+ .parent_data = gcc_parent_data_14,
+ .num_parents = 3,
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
+ F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_gp1_clk_src = {
+ .cmd_rcgr = 0x64004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = 5,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp2_clk_src = {
+ .cmd_rcgr = 0x65004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = 5,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp3_clk_src = {
+ .cmd_rcgr = 0x66004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = 5,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_0_aux_clk_src[] = {
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_0_aux_clk_src = {
+ .cmd_rcgr = 0x6b058,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_0_phy_rchng_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_0_phy_rchng_clk_src = {
+ .cmd_rcgr = 0x6b03c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_1_aux_clk_src = {
+ .cmd_rcgr = 0x8d058,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_1_phy_rchng_clk_src = {
+ .cmd_rcgr = 0x8d03c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = {
+ F(60000000, P_GCC_GPLL0_OUT_MAIN, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pdm2_clk_src = {
+ .cmd_rcgr = 0x33010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pdm2_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
+ .cmd_rcgr = 0x17010,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
+ .cmd_rcgr = 0x17140,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
+ .cmd_rcgr = 0x17270,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
+ .cmd_rcgr = 0x173a0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s4_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
+ .cmd_rcgr = 0x174d0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
+ .cmd_rcgr = 0x17600,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s6_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s6_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s6_clk_src = {
+ .cmd_rcgr = 0x17730,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s6_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s7_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s7_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s7_clk_src = {
+ .cmd_rcgr = 0x17860,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s7_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap1_s0_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(102400000, P_GCC_GPLL0_OUT_EVEN, 1, 128, 375),
+ F(112000000, P_GCC_GPLL0_OUT_EVEN, 1, 28, 75),
+ F(117964800, P_GCC_GPLL0_OUT_EVEN, 1, 6144, 15625),
+ F(120000000, P_GCC_GPLL0_OUT_MAIN, 5, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
+ .cmd_rcgr = 0x18010,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
+ .cmd_rcgr = 0x18140,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = {
+ .cmd_rcgr = 0x18270,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
+ .cmd_rcgr = 0x183a0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s4_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
+ .cmd_rcgr = 0x184d0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
+ .cmd_rcgr = 0x18600,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s0_clk_src = {
+ .cmd_rcgr = 0x1e010,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s1_clk_src = {
+ .cmd_rcgr = 0x1e140,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s2_clk_src = {
+ .cmd_rcgr = 0x1e270,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s3_clk_src = {
+ .cmd_rcgr = 0x1e3a0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s4_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s4_clk_src = {
+ .cmd_rcgr = 0x1e4d0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s5_clk_src = {
+ .cmd_rcgr = 0x1e600,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s5_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(202000000, P_GCC_GPLL9_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
+ .cmd_rcgr = 0x1400c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_gcc_sdcc2_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_apps_clk_src",
+ .parent_data = gcc_parent_data_6,
+ .num_parents = 6,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc4_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc4_apps_clk_src = {
+ .cmd_rcgr = 0x1600c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_sdcc4_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc4_apps_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_card_axi_clk_src[] = {
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(150000000, P_GCC_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(300000000, P_GCC_GPLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_card_axi_clk_src = {
+ .cmd_rcgr = 0x75024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_axi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_axi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_card_ice_core_clk_src[] = {
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(150000000, P_GCC_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(300000000, P_GCC_GPLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_card_ice_core_clk_src = {
+ .cmd_rcgr = 0x7506c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_ice_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_ice_core_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_card_phy_aux_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_card_phy_aux_clk_src = {
+ .cmd_rcgr = 0x750a0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_ufs_card_phy_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_card_unipro_core_clk_src = {
+ .cmd_rcgr = 0x75084,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_ice_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_unipro_core_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_axi_clk_src = {
+ .cmd_rcgr = 0x77024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_axi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_axi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_ice_core_clk_src = {
+ .cmd_rcgr = 0x7706c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_ice_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ice_core_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_phy_aux_clk_src = {
+ .cmd_rcgr = 0x770a0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_unipro_core_clk_src = {
+ .cmd_rcgr = 0x77084,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_ice_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_unipro_core_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_prim_master_clk_src[] = {
+ F(66666667, P_GCC_GPLL0_OUT_EVEN, 4.5, 0, 0),
+ F(133333333, P_GCC_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(240000000, P_GCC_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_prim_master_clk_src = {
+ .cmd_rcgr = 0xf020,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_master_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = {
+ .cmd_rcgr = 0xf038,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_phy_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_sec_master_clk_src = {
+ .cmd_rcgr = 0x10020,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_master_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sec_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_sec_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x10038,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_phy_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sec_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = {
+ .cmd_rcgr = 0xf064,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_ufs_card_phy_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_sec_phy_aux_clk_src = {
+ .cmd_rcgr = 0x10064,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_ufs_card_phy_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb30_prim_mock_utmi_postdiv_clk_src = {
+ .reg = 0xf050,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_postdiv_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_prim_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb30_sec_mock_utmi_postdiv_clk_src = {
+ .reg = 0x10050,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gcc_usb30_sec_mock_utmi_postdiv_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_sec_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+/* external clocks so add BRANCH_HALT_SKIP */
+static struct clk_branch gcc_aggre_noc_pcie_0_axi_clk = {
+ .halt_reg = 0x6b080,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(12),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_noc_pcie_0_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* external clocks so add BRANCH_HALT_SKIP */
+static struct clk_branch gcc_aggre_noc_pcie_1_axi_clk = {
+ .halt_reg = 0x8d084,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_noc_pcie_1_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_noc_pcie_tbu_clk = {
+ .halt_reg = 0x9000c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x9000c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(18),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_noc_pcie_tbu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_card_axi_clk = {
+ .halt_reg = 0x750cc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x750cc,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x750cc,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_ufs_card_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_card_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_card_axi_hw_ctl_clk = {
+ .halt_reg = 0x750cc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x750cc,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x750cc,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_ufs_card_axi_hw_ctl_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_card_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_phy_axi_clk = {
+ .halt_reg = 0x770cc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x770cc,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x770cc,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_ufs_phy_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_phy_axi_hw_ctl_clk = {
+ .halt_reg = 0x770cc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x770cc,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x770cc,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_ufs_phy_axi_hw_ctl_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_prim_axi_clk = {
+ .halt_reg = 0xf080,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xf080,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xf080,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_usb3_prim_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_sec_axi_clk = {
+ .halt_reg = 0x10080,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x10080,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x10080,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_usb3_sec_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_sec_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x38004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x38004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_boot_rom_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* external clocks so add BRANCH_HALT_SKIP */
+static struct clk_branch gcc_camera_hf_axi_clk = {
+ .halt_reg = 0x26010,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x26010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x26010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camera_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* external clocks so add BRANCH_HALT_SKIP */
+static struct clk_branch gcc_camera_sf_axi_clk = {
+ .halt_reg = 0x26014,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x26014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x26014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camera_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_prim_axi_clk = {
+ .halt_reg = 0xf07c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xf07c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xf07c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cfg_noc_usb3_prim_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_sec_axi_clk = {
+ .halt_reg = 0x1007c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1007c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1007c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cfg_noc_usb3_sec_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_sec_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* external clocks so add BRANCH_HALT_SKIP */
+static struct clk_branch gcc_ddrss_gpu_axi_clk = {
+ .halt_reg = 0x71154,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x71154,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x71154,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ddrss_gpu_axi_clk",
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+/* external clocks so add BRANCH_HALT_SKIP */
+static struct clk_branch gcc_ddrss_pcie_sf_tbu_clk = {
+ .halt_reg = 0x8d080,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x8d080,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(19),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ddrss_pcie_sf_tbu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* external clocks so add BRANCH_HALT_SKIP */
+static struct clk_branch gcc_disp_hf_axi_clk = {
+ .halt_reg = 0x2700c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x2700c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2700c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* external clocks so add BRANCH_HALT_SKIP */
+static struct clk_branch gcc_disp_sf_axi_clk = {
+ .halt_reg = 0x27014,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x27014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x27014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x64000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x64000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_gp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x65000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x65000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_gp2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x66000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x66000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_gp3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* Clock ON depends on external parent clock, so don't poll */
+static struct clk_branch gcc_gpu_gpll0_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_gpll0_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* Clock ON depends on external parent clock, so don't poll */
+static struct clk_branch gcc_gpu_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(16),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_gpll0_div_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_gpll0_out_even.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_iref_en = {
+ .halt_reg = 0x8c014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_iref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_memnoc_gfx_clk = {
+ .halt_reg = 0x7100c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7100c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7100c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_memnoc_gfx_clk",
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_snoc_dvm_gfx_clk = {
+ .halt_reg = 0x71018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x71018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_snoc_dvm_gfx_clk",
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie0_phy_rchng_clk = {
+ .halt_reg = 0x6b038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(22),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie0_phy_rchng_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pcie_0_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie1_phy_rchng_clk = {
+ .halt_reg = 0x8d038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(23),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie1_phy_rchng_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pcie_1_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_aux_clk = {
+ .halt_reg = 0x6b028,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pcie_0_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_cfg_ahb_clk = {
+ .halt_reg = 0x6b024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_clkref_en = {
+ .halt_reg = 0x8c004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_clkref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* external clocks so add BRANCH_HALT_SKIP */
+static struct clk_branch gcc_pcie_0_mstr_axi_clk = {
+ .halt_reg = 0x6b01c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x6b01c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* external clocks so add BRANCH_HALT_SKIP */
+static struct clk_branch gcc_pcie_0_pipe_clk = {
+ .halt_reg = 0x6b030,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_pipe_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pcie_0_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_axi_clk = {
+ .halt_reg = 0x6b014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_q2a_axi_clk = {
+ .halt_reg = 0x6b010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_aux_clk = {
+ .halt_reg = 0x8d028,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(29),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pcie_1_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_cfg_ahb_clk = {
+ .halt_reg = 0x8d024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x8d024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(28),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_clkref_en = {
+ .halt_reg = 0x8c008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_clkref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* external clocks so add BRANCH_HALT_SKIP */
+static struct clk_branch gcc_pcie_1_mstr_axi_clk = {
+ .halt_reg = 0x8d01c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x8d01c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(27),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* external clocks so add BRANCH_HALT_SKIP */
+static struct clk_branch gcc_pcie_1_pipe_clk = {
+ .halt_reg = 0x8d030,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(30),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_pipe_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pcie_1_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_slv_axi_clk = {
+ .halt_reg = 0x8d014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x8d014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(26),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_slv_q2a_axi_clk = {
+ .halt_reg = 0x8d010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(25),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x3300c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pdm2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x33004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x33004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x33004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_xo4_clk = {
+ .halt_reg = 0x33008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x33008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_xo4_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_nrt_ahb_clk = {
+ .halt_reg = 0x26008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x26008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x26008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_camera_nrt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_rt_ahb_clk = {
+ .halt_reg = 0x2600c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2600c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2600c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_camera_rt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_disp_ahb_clk = {
+ .halt_reg = 0x27008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x27008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x27008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_disp_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_cvp_ahb_clk = {
+ .halt_reg = 0x28008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x28008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x28008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_video_cvp_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_vcodec_ahb_clk = {
+ .halt_reg = 0x2800c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2800c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2800c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_video_vcodec_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_2x_clk = {
+ .halt_reg = 0x23008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_clk = {
+ .halt_reg = 0x23000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(8),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s0_clk = {
+ .halt_reg = 0x1700c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s1_clk = {
+ .halt_reg = 0x1713c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s2_clk = {
+ .halt_reg = 0x1726c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(12),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s3_clk = {
+ .halt_reg = 0x1739c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s3_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s4_clk = {
+ .halt_reg = 0x174cc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(14),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s4_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s5_clk = {
+ .halt_reg = 0x175fc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s5_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s6_clk = {
+ .halt_reg = 0x1772c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(16),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s6_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s7_clk = {
+ .halt_reg = 0x1785c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(17),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s7_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s7_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_2x_clk = {
+ .halt_reg = 0x23140,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(18),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_clk = {
+ .halt_reg = 0x23138,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(19),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_m_ahb_clk = {
+ .halt_reg = 0x18004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x18004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(20),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_1_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_s_ahb_clk = {
+ .halt_reg = 0x18008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x18008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(21),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_1_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s0_clk = {
+ .halt_reg = 0x1800c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(22),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s1_clk = {
+ .halt_reg = 0x1813c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(23),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s2_clk = {
+ .halt_reg = 0x1826c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(24),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s3_clk = {
+ .halt_reg = 0x1839c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(25),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s3_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s4_clk = {
+ .halt_reg = 0x184cc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(26),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s4_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s5_clk = {
+ .halt_reg = 0x185fc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(27),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s5_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_core_2x_clk = {
+ .halt_reg = 0x23278,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_core_clk = {
+ .halt_reg = 0x23270,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s0_clk = {
+ .halt_reg = 0x1e00c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap2_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s1_clk = {
+ .halt_reg = 0x1e13c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap2_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s2_clk = {
+ .halt_reg = 0x1e26c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(6),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap2_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s3_clk = {
+ .halt_reg = 0x1e39c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s3_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap2_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s4_clk = {
+ .halt_reg = 0x1e4cc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(8),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s4_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap2_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s5_clk = {
+ .halt_reg = 0x1e5fc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s5_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap2_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_m_ahb_clk = {
+ .halt_reg = 0x17004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(6),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_0_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_s_ahb_clk = {
+ .halt_reg = 0x17008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_0_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_2_m_ahb_clk = {
+ .halt_reg = 0x1e004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1e004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_2_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_2_s_ahb_clk = {
+ .halt_reg = 0x1e008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1e008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_2_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_ahb_clk = {
+ .halt_reg = 0x14008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_apps_clk = {
+ .halt_reg = 0x14004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_sdcc2_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc4_ahb_clk = {
+ .halt_reg = 0x16008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x16008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc4_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc4_apps_clk = {
+ .halt_reg = 0x16004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x16004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc4_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_sdcc4_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_throttle_pcie_ahb_clk = {
+ .halt_reg = 0x9044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9044,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_throttle_pcie_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_1_clkref_en = {
+ .halt_reg = 0x8c000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_1_clkref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_ahb_clk = {
+ .halt_reg = 0x75018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x75018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x75018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_axi_clk = {
+ .halt_reg = 0x75010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x75010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x75010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_card_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_axi_hw_ctl_clk = {
+ .halt_reg = 0x75010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x75010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x75010,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_axi_hw_ctl_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_card_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_ice_core_clk = {
+ .halt_reg = 0x75064,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x75064,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x75064,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_ice_core_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_card_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_ice_core_hw_ctl_clk = {
+ .halt_reg = 0x75064,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x75064,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x75064,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_ice_core_hw_ctl_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_card_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_phy_aux_clk = {
+ .halt_reg = 0x7509c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7509c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7509c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_phy_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_card_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_phy_aux_hw_ctl_clk = {
+ .halt_reg = 0x7509c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7509c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7509c,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_phy_aux_hw_ctl_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_card_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* Clock ON depends on external parent clock, so don't poll */
+static struct clk_branch gcc_ufs_card_rx_symbol_0_clk = {
+ .halt_reg = 0x75020,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x75020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_rx_symbol_0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_card_rx_symbol_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* Clock ON depends on external parent clock, so don't poll */
+static struct clk_branch gcc_ufs_card_rx_symbol_1_clk = {
+ .halt_reg = 0x750b8,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x750b8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_rx_symbol_1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_card_rx_symbol_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* Clock ON depends on external parent clock, so don't poll */
+static struct clk_branch gcc_ufs_card_tx_symbol_0_clk = {
+ .halt_reg = 0x7501c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x7501c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_tx_symbol_0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_card_tx_symbol_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_unipro_core_clk = {
+ .halt_reg = 0x7505c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7505c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7505c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_unipro_core_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_card_unipro_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_unipro_core_hw_ctl_clk = {
+ .halt_reg = 0x7505c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7505c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7505c,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_unipro_core_hw_ctl_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_card_unipro_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ahb_clk = {
+ .halt_reg = 0x77018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_axi_clk = {
+ .halt_reg = 0x77010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_axi_hw_ctl_clk = {
+ .halt_reg = 0x77010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77010,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_axi_hw_ctl_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ice_core_clk = {
+ .halt_reg = 0x77064,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77064,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77064,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ice_core_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ice_core_hw_ctl_clk = {
+ .halt_reg = 0x77064,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77064,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77064,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ice_core_hw_ctl_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_phy_aux_clk = {
+ .halt_reg = 0x7709c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7709c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7709c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_phy_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_phy_aux_hw_ctl_clk = {
+ .halt_reg = 0x7709c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7709c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7709c,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_phy_aux_hw_ctl_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* Clock ON depends on external parent clock, so don't poll */
+static struct clk_branch gcc_ufs_phy_rx_symbol_0_clk = {
+ .halt_reg = 0x77020,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x77020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_rx_symbol_0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_rx_symbol_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* Clock ON depends on external parent clock, so don't poll */
+static struct clk_branch gcc_ufs_phy_rx_symbol_1_clk = {
+ .halt_reg = 0x770b8,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x770b8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_rx_symbol_1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_rx_symbol_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* Clock ON depends on external parent clock, so don't poll */
+static struct clk_branch gcc_ufs_phy_tx_symbol_0_clk = {
+ .halt_reg = 0x7701c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x7701c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_tx_symbol_0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_tx_symbol_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_unipro_core_clk = {
+ .halt_reg = 0x7705c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7705c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7705c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_unipro_core_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_unipro_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_unipro_core_hw_ctl_clk = {
+ .halt_reg = 0x7705c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7705c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7705c,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_unipro_core_hw_ctl_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_unipro_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_master_clk = {
+ .halt_reg = 0xf010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_master_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_master_clk__force_mem_core_on = {
+ .halt_reg = 0xf010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf010,
+ .enable_mask = BIT(14),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_master_clk__force_mem_core_on",
+ .ops = &clk_branch_simple_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_mock_utmi_clk = {
+ .halt_reg = 0xf01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_mock_utmi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw =
+ &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_sleep_clk = {
+ .halt_reg = 0xf018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sec_master_clk = {
+ .halt_reg = 0x10010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sec_master_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_sec_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sec_master_clk__force_mem_core_on = {
+ .halt_reg = 0x10010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10010,
+ .enable_mask = BIT(14),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sec_master_clk__force_mem_core_on",
+ .ops = &clk_branch_simple_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sec_mock_utmi_clk = {
+ .halt_reg = 0x1001c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1001c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sec_mock_utmi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw =
+ &gcc_usb30_sec_mock_utmi_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sec_sleep_clk = {
+ .halt_reg = 0x10018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sec_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_aux_clk = {
+ .halt_reg = 0xf054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_com_aux_clk = {
+ .halt_reg = 0xf058,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_com_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* Clock ON depends on external parent clock, so don't poll */
+static struct clk_branch gcc_usb3_prim_phy_pipe_clk = {
+ .halt_reg = 0xf05c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .hwcg_reg = 0xf05c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xf05c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_pipe_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb3_prim_phy_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_clkref_en = {
+ .halt_reg = 0x8c010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_clkref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_phy_aux_clk = {
+ .halt_reg = 0x10054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_phy_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb3_sec_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_phy_com_aux_clk = {
+ .halt_reg = 0x10058,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_phy_com_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb3_sec_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* Clock ON depends on external parent clock, so don't poll */
+static struct clk_branch gcc_usb3_sec_phy_pipe_clk = {
+ .halt_reg = 0x1005c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x1005c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_phy_pipe_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb3_sec_phy_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* external clocks so add BRANCH_HALT_SKIP */
+static struct clk_branch gcc_video_axi0_clk = {
+ .halt_reg = 0x28010,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x28010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x28010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_axi0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* external clocks so add BRANCH_HALT_SKIP */
+static struct clk_branch gcc_video_axi1_clk = {
+ .halt_reg = 0x28018,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x28018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x28018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_axi1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc pcie_0_gdsc = {
+ .gdscr = 0x6b004,
+ .pd = {
+ .name = "pcie_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc pcie_1_gdsc = {
+ .gdscr = 0x8d004,
+ .pd = {
+ .name = "pcie_1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc ufs_card_gdsc = {
+ .gdscr = 0x75004,
+ .pd = {
+ .name = "ufs_card_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc ufs_phy_gdsc = {
+ .gdscr = 0x77004,
+ .pd = {
+ .name = "ufs_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc usb30_prim_gdsc = {
+ .gdscr = 0xf004,
+ .pd = {
+ .name = "usb30_prim_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc usb30_sec_gdsc = {
+ .gdscr = 0x10004,
+ .pd = {
+ .name = "usb30_sec_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc = {
+ .gdscr = 0x7d050,
+ .pd = {
+ .name = "hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc = {
+ .gdscr = 0x7d058,
+ .pd = {
+ .name = "hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_mmnoc_mmu_tbu_sf0_gdsc = {
+ .gdscr = 0x7d054,
+ .pd = {
+ .name = "hlos1_vote_mmnoc_mmu_tbu_sf0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_mmnoc_mmu_tbu_sf1_gdsc = {
+ .gdscr = 0x7d06c,
+ .pd = {
+ .name = "hlos1_vote_mmnoc_mmu_tbu_sf1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct clk_regmap *gcc_sm8350_clocks[] = {
+ [GCC_AGGRE_NOC_PCIE_0_AXI_CLK] = &gcc_aggre_noc_pcie_0_axi_clk.clkr,
+ [GCC_AGGRE_NOC_PCIE_1_AXI_CLK] = &gcc_aggre_noc_pcie_1_axi_clk.clkr,
+ [GCC_AGGRE_NOC_PCIE_TBU_CLK] = &gcc_aggre_noc_pcie_tbu_clk.clkr,
+ [GCC_AGGRE_UFS_CARD_AXI_CLK] = &gcc_aggre_ufs_card_axi_clk.clkr,
+ [GCC_AGGRE_UFS_CARD_AXI_HW_CTL_CLK] = &gcc_aggre_ufs_card_axi_hw_ctl_clk.clkr,
+ [GCC_AGGRE_UFS_PHY_AXI_CLK] = &gcc_aggre_ufs_phy_axi_clk.clkr,
+ [GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK] = &gcc_aggre_ufs_phy_axi_hw_ctl_clk.clkr,
+ [GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr,
+ [GCC_AGGRE_USB3_SEC_AXI_CLK] = &gcc_aggre_usb3_sec_axi_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CAMERA_HF_AXI_CLK] = &gcc_camera_hf_axi_clk.clkr,
+ [GCC_CAMERA_SF_AXI_CLK] = &gcc_camera_sf_axi_clk.clkr,
+ [GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr,
+ [GCC_CFG_NOC_USB3_SEC_AXI_CLK] = &gcc_cfg_noc_usb3_sec_axi_clk.clkr,
+ [GCC_DDRSS_GPU_AXI_CLK] = &gcc_ddrss_gpu_axi_clk.clkr,
+ [GCC_DDRSS_PCIE_SF_TBU_CLK] = &gcc_ddrss_pcie_sf_tbu_clk.clkr,
+ [GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr,
+ [GCC_DISP_SF_AXI_CLK] = &gcc_disp_sf_axi_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr,
+ [GCC_GPLL0] = &gcc_gpll0.clkr,
+ [GCC_GPLL0_OUT_EVEN] = &gcc_gpll0_out_even.clkr,
+ [GCC_GPLL4] = &gcc_gpll4.clkr,
+ [GCC_GPLL9] = &gcc_gpll9.clkr,
+ [GCC_GPU_GPLL0_CLK_SRC] = &gcc_gpu_gpll0_clk_src.clkr,
+ [GCC_GPU_GPLL0_DIV_CLK_SRC] = &gcc_gpu_gpll0_div_clk_src.clkr,
+ [GCC_GPU_IREF_EN] = &gcc_gpu_iref_en.clkr,
+ [GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr,
+ [GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr,
+ [GCC_PCIE0_PHY_RCHNG_CLK] = &gcc_pcie0_phy_rchng_clk.clkr,
+ [GCC_PCIE1_PHY_RCHNG_CLK] = &gcc_pcie1_phy_rchng_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK] = &gcc_pcie_0_aux_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK_SRC] = &gcc_pcie_0_aux_clk_src.clkr,
+ [GCC_PCIE_0_CFG_AHB_CLK] = &gcc_pcie_0_cfg_ahb_clk.clkr,
+ [GCC_PCIE_0_CLKREF_EN] = &gcc_pcie_0_clkref_en.clkr,
+ [GCC_PCIE_0_MSTR_AXI_CLK] = &gcc_pcie_0_mstr_axi_clk.clkr,
+ [GCC_PCIE_0_PHY_RCHNG_CLK_SRC] = &gcc_pcie_0_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_0_PIPE_CLK] = &gcc_pcie_0_pipe_clk.clkr,
+ [GCC_PCIE_0_PIPE_CLK_SRC] = &gcc_pcie_0_pipe_clk_src.clkr,
+ [GCC_PCIE_0_SLV_AXI_CLK] = &gcc_pcie_0_slv_axi_clk.clkr,
+ [GCC_PCIE_0_SLV_Q2A_AXI_CLK] = &gcc_pcie_0_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_1_AUX_CLK] = &gcc_pcie_1_aux_clk.clkr,
+ [GCC_PCIE_1_AUX_CLK_SRC] = &gcc_pcie_1_aux_clk_src.clkr,
+ [GCC_PCIE_1_CFG_AHB_CLK] = &gcc_pcie_1_cfg_ahb_clk.clkr,
+ [GCC_PCIE_1_CLKREF_EN] = &gcc_pcie_1_clkref_en.clkr,
+ [GCC_PCIE_1_MSTR_AXI_CLK] = &gcc_pcie_1_mstr_axi_clk.clkr,
+ [GCC_PCIE_1_PHY_RCHNG_CLK_SRC] = &gcc_pcie_1_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_1_PIPE_CLK] = &gcc_pcie_1_pipe_clk.clkr,
+ [GCC_PCIE_1_PIPE_CLK_SRC] = &gcc_pcie_1_pipe_clk_src.clkr,
+ [GCC_PCIE_1_SLV_AXI_CLK] = &gcc_pcie_1_slv_axi_clk.clkr,
+ [GCC_PCIE_1_SLV_Q2A_AXI_CLK] = &gcc_pcie_1_slv_q2a_axi_clk.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr,
+ [GCC_QMIP_CAMERA_NRT_AHB_CLK] = &gcc_qmip_camera_nrt_ahb_clk.clkr,
+ [GCC_QMIP_CAMERA_RT_AHB_CLK] = &gcc_qmip_camera_rt_ahb_clk.clkr,
+ [GCC_QMIP_DISP_AHB_CLK] = &gcc_qmip_disp_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_CVP_AHB_CLK] = &gcc_qmip_video_cvp_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_VCODEC_AHB_CLK] = &gcc_qmip_video_vcodec_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP0_CORE_2X_CLK] = &gcc_qupv3_wrap0_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP0_CORE_CLK] = &gcc_qupv3_wrap0_core_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK] = &gcc_qupv3_wrap0_s0_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK_SRC] = &gcc_qupv3_wrap0_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK] = &gcc_qupv3_wrap0_s1_clk.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK_SRC] = &gcc_qupv3_wrap0_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK] = &gcc_qupv3_wrap0_s2_clk.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK_SRC] = &gcc_qupv3_wrap0_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK] = &gcc_qupv3_wrap0_s3_clk.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK_SRC] = &gcc_qupv3_wrap0_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK] = &gcc_qupv3_wrap0_s4_clk.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK_SRC] = &gcc_qupv3_wrap0_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK] = &gcc_qupv3_wrap0_s5_clk.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK_SRC] = &gcc_qupv3_wrap0_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S6_CLK] = &gcc_qupv3_wrap0_s6_clk.clkr,
+ [GCC_QUPV3_WRAP0_S6_CLK_SRC] = &gcc_qupv3_wrap0_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S7_CLK] = &gcc_qupv3_wrap0_s7_clk.clkr,
+ [GCC_QUPV3_WRAP0_S7_CLK_SRC] = &gcc_qupv3_wrap0_s7_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_CORE_2X_CLK] = &gcc_qupv3_wrap1_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP1_CORE_CLK] = &gcc_qupv3_wrap1_core_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK] = &gcc_qupv3_wrap1_s0_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK_SRC] = &gcc_qupv3_wrap1_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK] = &gcc_qupv3_wrap1_s1_clk.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK_SRC] = &gcc_qupv3_wrap1_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK] = &gcc_qupv3_wrap1_s2_clk.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK_SRC] = &gcc_qupv3_wrap1_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK] = &gcc_qupv3_wrap1_s3_clk.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK_SRC] = &gcc_qupv3_wrap1_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK] = &gcc_qupv3_wrap1_s4_clk.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK_SRC] = &gcc_qupv3_wrap1_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK] = &gcc_qupv3_wrap1_s5_clk.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK_SRC] = &gcc_qupv3_wrap1_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_CORE_2X_CLK] = &gcc_qupv3_wrap2_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP2_CORE_CLK] = &gcc_qupv3_wrap2_core_clk.clkr,
+ [GCC_QUPV3_WRAP2_S0_CLK] = &gcc_qupv3_wrap2_s0_clk.clkr,
+ [GCC_QUPV3_WRAP2_S0_CLK_SRC] = &gcc_qupv3_wrap2_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S1_CLK] = &gcc_qupv3_wrap2_s1_clk.clkr,
+ [GCC_QUPV3_WRAP2_S1_CLK_SRC] = &gcc_qupv3_wrap2_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S2_CLK] = &gcc_qupv3_wrap2_s2_clk.clkr,
+ [GCC_QUPV3_WRAP2_S2_CLK_SRC] = &gcc_qupv3_wrap2_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S3_CLK] = &gcc_qupv3_wrap2_s3_clk.clkr,
+ [GCC_QUPV3_WRAP2_S3_CLK_SRC] = &gcc_qupv3_wrap2_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S4_CLK] = &gcc_qupv3_wrap2_s4_clk.clkr,
+ [GCC_QUPV3_WRAP2_S4_CLK_SRC] = &gcc_qupv3_wrap2_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S5_CLK] = &gcc_qupv3_wrap2_s5_clk.clkr,
+ [GCC_QUPV3_WRAP2_S5_CLK_SRC] = &gcc_qupv3_wrap2_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP_0_M_AHB_CLK] = &gcc_qupv3_wrap_0_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_0_S_AHB_CLK] = &gcc_qupv3_wrap_0_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_M_AHB_CLK] = &gcc_qupv3_wrap_1_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_S_AHB_CLK] = &gcc_qupv3_wrap_1_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_2_M_AHB_CLK] = &gcc_qupv3_wrap_2_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_2_S_AHB_CLK] = &gcc_qupv3_wrap_2_s_ahb_clk.clkr,
+ [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_SDCC2_APPS_CLK_SRC] = &gcc_sdcc2_apps_clk_src.clkr,
+ [GCC_SDCC4_AHB_CLK] = &gcc_sdcc4_ahb_clk.clkr,
+ [GCC_SDCC4_APPS_CLK] = &gcc_sdcc4_apps_clk.clkr,
+ [GCC_SDCC4_APPS_CLK_SRC] = &gcc_sdcc4_apps_clk_src.clkr,
+ [GCC_THROTTLE_PCIE_AHB_CLK] = &gcc_throttle_pcie_ahb_clk.clkr,
+ [GCC_UFS_1_CLKREF_EN] = &gcc_ufs_1_clkref_en.clkr,
+ [GCC_UFS_CARD_AHB_CLK] = &gcc_ufs_card_ahb_clk.clkr,
+ [GCC_UFS_CARD_AXI_CLK] = &gcc_ufs_card_axi_clk.clkr,
+ [GCC_UFS_CARD_AXI_CLK_SRC] = &gcc_ufs_card_axi_clk_src.clkr,
+ [GCC_UFS_CARD_AXI_HW_CTL_CLK] = &gcc_ufs_card_axi_hw_ctl_clk.clkr,
+ [GCC_UFS_CARD_ICE_CORE_CLK] = &gcc_ufs_card_ice_core_clk.clkr,
+ [GCC_UFS_CARD_ICE_CORE_CLK_SRC] = &gcc_ufs_card_ice_core_clk_src.clkr,
+ [GCC_UFS_CARD_ICE_CORE_HW_CTL_CLK] = &gcc_ufs_card_ice_core_hw_ctl_clk.clkr,
+ [GCC_UFS_CARD_PHY_AUX_CLK] = &gcc_ufs_card_phy_aux_clk.clkr,
+ [GCC_UFS_CARD_PHY_AUX_CLK_SRC] = &gcc_ufs_card_phy_aux_clk_src.clkr,
+ [GCC_UFS_CARD_PHY_AUX_HW_CTL_CLK] = &gcc_ufs_card_phy_aux_hw_ctl_clk.clkr,
+ [GCC_UFS_CARD_RX_SYMBOL_0_CLK] = &gcc_ufs_card_rx_symbol_0_clk.clkr,
+ [GCC_UFS_CARD_RX_SYMBOL_0_CLK_SRC] = &gcc_ufs_card_rx_symbol_0_clk_src.clkr,
+ [GCC_UFS_CARD_RX_SYMBOL_1_CLK] = &gcc_ufs_card_rx_symbol_1_clk.clkr,
+ [GCC_UFS_CARD_RX_SYMBOL_1_CLK_SRC] = &gcc_ufs_card_rx_symbol_1_clk_src.clkr,
+ [GCC_UFS_CARD_TX_SYMBOL_0_CLK] = &gcc_ufs_card_tx_symbol_0_clk.clkr,
+ [GCC_UFS_CARD_TX_SYMBOL_0_CLK_SRC] = &gcc_ufs_card_tx_symbol_0_clk_src.clkr,
+ [GCC_UFS_CARD_UNIPRO_CORE_CLK] = &gcc_ufs_card_unipro_core_clk.clkr,
+ [GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC] = &gcc_ufs_card_unipro_core_clk_src.clkr,
+ [GCC_UFS_CARD_UNIPRO_CORE_HW_CTL_CLK] = &gcc_ufs_card_unipro_core_hw_ctl_clk.clkr,
+ [GCC_UFS_PHY_AHB_CLK] = &gcc_ufs_phy_ahb_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK] = &gcc_ufs_phy_axi_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK_SRC] = &gcc_ufs_phy_axi_clk_src.clkr,
+ [GCC_UFS_PHY_AXI_HW_CTL_CLK] = &gcc_ufs_phy_axi_hw_ctl_clk.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK] = &gcc_ufs_phy_ice_core_clk.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK_SRC] = &gcc_ufs_phy_ice_core_clk_src.clkr,
+ [GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK] = &gcc_ufs_phy_ice_core_hw_ctl_clk.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK] = &gcc_ufs_phy_phy_aux_clk.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK_SRC] = &gcc_ufs_phy_phy_aux_clk_src.clkr,
+ [GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK] = &gcc_ufs_phy_phy_aux_hw_ctl_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK] = &gcc_ufs_phy_rx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC] = &gcc_ufs_phy_rx_symbol_0_clk_src.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_1_CLK] = &gcc_ufs_phy_rx_symbol_1_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC] = &gcc_ufs_phy_rx_symbol_1_clk_src.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK] = &gcc_ufs_phy_tx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC] = &gcc_ufs_phy_tx_symbol_0_clk_src.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK] = &gcc_ufs_phy_unipro_core_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC] = &gcc_ufs_phy_unipro_core_clk_src.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK] = &gcc_ufs_phy_unipro_core_hw_ctl_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK] = &gcc_usb30_prim_master_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK__FORCE_MEM_CORE_ON] =
+ &gcc_usb30_prim_master_clk__force_mem_core_on.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK_SRC] = &gcc_usb30_prim_master_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK] = &gcc_usb30_prim_mock_utmi_clk.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC] = &gcc_usb30_prim_mock_utmi_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC] = &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr,
+ [GCC_USB30_PRIM_SLEEP_CLK] = &gcc_usb30_prim_sleep_clk.clkr,
+ [GCC_USB30_SEC_MASTER_CLK] = &gcc_usb30_sec_master_clk.clkr,
+ [GCC_USB30_SEC_MASTER_CLK__FORCE_MEM_CORE_ON] =
+ &gcc_usb30_sec_master_clk__force_mem_core_on.clkr,
+ [GCC_USB30_SEC_MASTER_CLK_SRC] = &gcc_usb30_sec_master_clk_src.clkr,
+ [GCC_USB30_SEC_MOCK_UTMI_CLK] = &gcc_usb30_sec_mock_utmi_clk.clkr,
+ [GCC_USB30_SEC_MOCK_UTMI_CLK_SRC] = &gcc_usb30_sec_mock_utmi_clk_src.clkr,
+ [GCC_USB30_SEC_MOCK_UTMI_POSTDIV_CLK_SRC] = &gcc_usb30_sec_mock_utmi_postdiv_clk_src.clkr,
+ [GCC_USB30_SEC_SLEEP_CLK] = &gcc_usb30_sec_sleep_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK] = &gcc_usb3_prim_phy_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK_SRC] = &gcc_usb3_prim_phy_aux_clk_src.clkr,
+ [GCC_USB3_PRIM_PHY_COM_AUX_CLK] = &gcc_usb3_prim_phy_com_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK] = &gcc_usb3_prim_phy_pipe_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK_SRC] = &gcc_usb3_prim_phy_pipe_clk_src.clkr,
+ [GCC_USB3_SEC_CLKREF_EN] = &gcc_usb3_sec_clkref_en.clkr,
+ [GCC_USB3_SEC_PHY_AUX_CLK] = &gcc_usb3_sec_phy_aux_clk.clkr,
+ [GCC_USB3_SEC_PHY_AUX_CLK_SRC] = &gcc_usb3_sec_phy_aux_clk_src.clkr,
+ [GCC_USB3_SEC_PHY_COM_AUX_CLK] = &gcc_usb3_sec_phy_com_aux_clk.clkr,
+ [GCC_USB3_SEC_PHY_PIPE_CLK] = &gcc_usb3_sec_phy_pipe_clk.clkr,
+ [GCC_USB3_SEC_PHY_PIPE_CLK_SRC] = &gcc_usb3_sec_phy_pipe_clk_src.clkr,
+ [GCC_VIDEO_AXI0_CLK] = &gcc_video_axi0_clk.clkr,
+ [GCC_VIDEO_AXI1_CLK] = &gcc_video_axi1_clk.clkr,
+};
+
+static struct gdsc *gcc_sm8350_gdscs[] = {
+ [PCIE_0_GDSC] = &pcie_0_gdsc,
+ [PCIE_1_GDSC] = &pcie_1_gdsc,
+ [UFS_CARD_GDSC] = &ufs_card_gdsc,
+ [UFS_PHY_GDSC] = &ufs_phy_gdsc,
+ [USB30_PRIM_GDSC] = &usb30_prim_gdsc,
+ [USB30_SEC_GDSC] = &usb30_sec_gdsc,
+ [HLOS1_VOTE_MMNOC_MMU_TBU_HF0_GDSC] = &hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc,
+ [HLOS1_VOTE_MMNOC_MMU_TBU_HF1_GDSC] = &hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc,
+ [HLOS1_VOTE_MMNOC_MMU_TBU_SF0_GDSC] = &hlos1_vote_mmnoc_mmu_tbu_sf0_gdsc,
+ [HLOS1_VOTE_MMNOC_MMU_TBU_SF1_GDSC] = &hlos1_vote_mmnoc_mmu_tbu_sf1_gdsc,
+};
+
+static const struct qcom_reset_map gcc_sm8350_resets[] = {
+ [GCC_CAMERA_BCR] = { 0x26000 },
+ [GCC_DISPLAY_BCR] = { 0x27000 },
+ [GCC_GPU_BCR] = { 0x71000 },
+ [GCC_MMSS_BCR] = { 0xb000 },
+ [GCC_PCIE_0_BCR] = { 0x6b000 },
+ [GCC_PCIE_0_LINK_DOWN_BCR] = { 0x6c014 },
+ [GCC_PCIE_0_NOCSR_COM_PHY_BCR] = { 0x6c020 },
+ [GCC_PCIE_0_PHY_BCR] = { 0x6c01c },
+ [GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR] = { 0x6c028 },
+ [GCC_PCIE_1_BCR] = { 0x8d000 },
+ [GCC_PCIE_1_LINK_DOWN_BCR] = { 0x8e014 },
+ [GCC_PCIE_1_NOCSR_COM_PHY_BCR] = { 0x8e020 },
+ [GCC_PCIE_1_PHY_BCR] = { 0x8e01c },
+ [GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR] = { 0x8e000 },
+ [GCC_PCIE_PHY_CFG_AHB_BCR] = { 0x6f00c },
+ [GCC_PCIE_PHY_COM_BCR] = { 0x6f010 },
+ [GCC_PDM_BCR] = { 0x33000 },
+ [GCC_QUPV3_WRAPPER_0_BCR] = { 0x17000 },
+ [GCC_QUPV3_WRAPPER_1_BCR] = { 0x18000 },
+ [GCC_QUPV3_WRAPPER_2_BCR] = { 0x1e000 },
+ [GCC_QUSB2PHY_PRIM_BCR] = { 0x12000 },
+ [GCC_QUSB2PHY_SEC_BCR] = { 0x12004 },
+ [GCC_SDCC2_BCR] = { 0x14000 },
+ [GCC_SDCC4_BCR] = { 0x16000 },
+ [GCC_UFS_CARD_BCR] = { 0x75000 },
+ [GCC_UFS_PHY_BCR] = { 0x77000 },
+ [GCC_USB30_PRIM_BCR] = { 0xf000 },
+ [GCC_USB30_SEC_BCR] = { 0x10000 },
+ [GCC_USB3_DP_PHY_PRIM_BCR] = { 0x50008 },
+ [GCC_USB3_DP_PHY_SEC_BCR] = { 0x50014 },
+ [GCC_USB3_PHY_PRIM_BCR] = { 0x50000 },
+ [GCC_USB3_PHY_SEC_BCR] = { 0x5000c },
+ [GCC_USB3PHY_PHY_PRIM_BCR] = { 0x50004 },
+ [GCC_USB3PHY_PHY_SEC_BCR] = { 0x50010 },
+ [GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0x6a000 },
+ [GCC_VIDEO_AXI0_CLK_ARES] = { 0x28010, 2 },
+ [GCC_VIDEO_AXI1_CLK_ARES] = { 0x28018, 2 },
+ [GCC_VIDEO_BCR] = { 0x28000 },
+};
+
+static const struct clk_rcg_dfs_data gcc_dfs_clocks[] = {
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s6_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s7_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s5_clk_src),
+};
+
+static const struct regmap_config gcc_sm8350_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x9c100,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_sm8350_desc = {
+ .config = &gcc_sm8350_regmap_config,
+ .clks = gcc_sm8350_clocks,
+ .num_clks = ARRAY_SIZE(gcc_sm8350_clocks),
+ .resets = gcc_sm8350_resets,
+ .num_resets = ARRAY_SIZE(gcc_sm8350_resets),
+ .gdscs = gcc_sm8350_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_sm8350_gdscs),
+};
+
+static const struct of_device_id gcc_sm8350_match_table[] = {
+ { .compatible = "qcom,gcc-sm8350" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_sm8350_match_table);
+
+static int gcc_sm8350_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ regmap = qcom_cc_map(pdev, &gcc_sm8350_desc);
+ if (IS_ERR(regmap)) {
+ dev_err(&pdev->dev, "Failed to map gcc registers\n");
+ return PTR_ERR(regmap);
+ }
+
+ /*
+ * Keep the critical clock always-On
+ * GCC_CAMERA_AHB_CLK, GCC_CAMERA_XO_CLK, GCC_DISP_AHB_CLK, GCC_DISP_XO_CLK,
+ * GCC_GPU_CFG_AHB_CLK, GCC_VIDEO_AHB_CLK, GCC_VIDEO_XO_CLK
+ */
+ regmap_update_bits(regmap, 0x26004, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x26018, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x27004, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x2701c, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x71004, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x28004, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x28020, BIT(0), BIT(0));
+
+ ret = qcom_cc_register_rcg_dfs(regmap, gcc_dfs_clocks, ARRAY_SIZE(gcc_dfs_clocks));
+ if (ret)
+ return ret;
+
+ /* FORCE_MEM_CORE_ON for ufs phy ice core clocks */
+ regmap_update_bits(regmap, gcc_ufs_phy_ice_core_clk.halt_reg, BIT(14), BIT(14));
+
+ return qcom_cc_really_probe(pdev, &gcc_sm8350_desc, regmap);
+}
+
+static struct platform_driver gcc_sm8350_driver = {
+ .probe = gcc_sm8350_probe,
+ .driver = {
+ .name = "sm8350-gcc",
+ .of_match_table = gcc_sm8350_match_table,
+ },
+};
+
+static int __init gcc_sm8350_init(void)
+{
+ return platform_driver_register(&gcc_sm8350_driver);
+}
+subsys_initcall(gcc_sm8350_init);
+
+static void __exit gcc_sm8350_exit(void)
+{
+ platform_driver_unregister(&gcc_sm8350_driver);
+}
+module_exit(gcc_sm8350_exit);
+
+MODULE_DESCRIPTION("QTI GCC SM8350 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/gdsc.c b/drivers/clk/qcom/gdsc.c
index af26e0695b86..51ed640e527b 100644
--- a/drivers/clk/qcom/gdsc.c
+++ b/drivers/clk/qcom/gdsc.c
@@ -183,7 +183,10 @@ static inline int gdsc_assert_reset(struct gdsc *sc)
static inline void gdsc_force_mem_on(struct gdsc *sc)
{
int i;
- u32 mask = RETAIN_MEM | RETAIN_PERIPH;
+ u32 mask = RETAIN_MEM;
+
+ if (!(sc->flags & NO_RET_PERIPH))
+ mask |= RETAIN_PERIPH;
for (i = 0; i < sc->cxc_count; i++)
regmap_update_bits(sc->regmap, sc->cxcs[i], mask, mask);
@@ -192,7 +195,10 @@ static inline void gdsc_force_mem_on(struct gdsc *sc)
static inline void gdsc_clear_mem_on(struct gdsc *sc)
{
int i;
- u32 mask = RETAIN_MEM | RETAIN_PERIPH;
+ u32 mask = RETAIN_MEM;
+
+ if (!(sc->flags & NO_RET_PERIPH))
+ mask |= RETAIN_PERIPH;
for (i = 0; i < sc->cxc_count; i++)
regmap_update_bits(sc->regmap, sc->cxcs[i], mask, 0);
diff --git a/drivers/clk/qcom/gdsc.h b/drivers/clk/qcom/gdsc.h
index bd537438c793..5bb396b344d1 100644
--- a/drivers/clk/qcom/gdsc.h
+++ b/drivers/clk/qcom/gdsc.h
@@ -42,7 +42,7 @@ struct gdsc {
#define PWRSTS_ON BIT(2)
#define PWRSTS_OFF_ON (PWRSTS_OFF | PWRSTS_ON)
#define PWRSTS_RET_ON (PWRSTS_RET | PWRSTS_ON)
- const u8 flags;
+ const u16 flags;
#define VOTABLE BIT(0)
#define CLAMP_IO BIT(1)
#define HW_CTRL BIT(2)
@@ -51,6 +51,7 @@ struct gdsc {
#define POLL_CFG_GDSCR BIT(5)
#define ALWAYS_ON BIT(6)
#define RETAIN_FF_ENABLE BIT(7)
+#define NO_RET_PERIPH BIT(8)
struct reset_controller_dev *rcdev;
unsigned int *resets;
unsigned int reset_count;
diff --git a/drivers/clk/qcom/gpucc-msm8998.c b/drivers/clk/qcom/gpucc-msm8998.c
index 9b3923af02a1..fedfffaf0a8d 100644
--- a/drivers/clk/qcom/gpucc-msm8998.c
+++ b/drivers/clk/qcom/gpucc-msm8998.c
@@ -50,6 +50,11 @@ static struct clk_branch gpucc_cxo_clk = {
},
};
+static struct pll_vco fabia_vco[] = {
+ { 249600000, 2000000000, 0 },
+ { 125000000, 1000000000, 1 },
+};
+
static const struct clk_div_table post_div_table_fabia_even[] = {
{ 0x0, 1 },
{ 0x1, 2 },
@@ -61,11 +66,13 @@ static const struct clk_div_table post_div_table_fabia_even[] = {
static struct clk_alpha_pll gpupll0 = {
.offset = 0x0,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .vco_table = fabia_vco,
+ .num_vco = ARRAY_SIZE(fabia_vco),
.clkr.hw.init = &(struct clk_init_data){
.name = "gpupll0",
.parent_hws = (const struct clk_hw *[]){ &gpucc_cxo_clk.clkr.hw },
.num_parents = 1,
- .ops = &clk_alpha_pll_fixed_fabia_ops,
+ .ops = &clk_alpha_pll_fabia_ops,
},
};
@@ -80,6 +87,7 @@ static struct clk_alpha_pll_postdiv gpupll0_out_even = {
.name = "gpupll0_out_even",
.parent_hws = (const struct clk_hw *[]){ &gpupll0.clkr.hw },
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
@@ -253,12 +261,16 @@ static struct gdsc gpu_cx_gdsc = {
static struct gdsc gpu_gx_gdsc = {
.gdscr = 0x1094,
.clamp_io_ctrl = 0x130,
+ .resets = (unsigned int []){ GPU_GX_BCR },
+ .reset_count = 1,
+ .cxcs = (unsigned int []){ 0x1098 },
+ .cxc_count = 1,
.pd = {
.name = "gpu_gx",
},
.parent = &gpu_cx_gdsc.pd,
- .pwrsts = PWRSTS_OFF_ON,
- .flags = CLAMP_IO | AON_RESET,
+ .pwrsts = PWRSTS_OFF_ON | PWRSTS_RET,
+ .flags = CLAMP_IO | SW_RESET | AON_RESET | NO_RET_PERIPH,
};
static struct clk_regmap *gpucc_msm8998_clocks[] = {
diff --git a/drivers/clk/qcom/gpucc-sdm660.c b/drivers/clk/qcom/gpucc-sdm660.c
new file mode 100644
index 000000000000..1ebcceb3a50d
--- /dev/null
+++ b/drivers/clk/qcom/gpucc-sdm660.c
@@ -0,0 +1,349 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2020, AngeloGioacchino Del Regno
+ * <angelogioacchino.delregno@somainline.org>
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+#include <dt-bindings/clock/qcom,gpucc-sdm660.h>
+
+#include "clk-alpha-pll.h"
+#include "common.h"
+#include "clk-regmap.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-branch.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ P_GPU_XO,
+ P_CORE_BI_PLL_TEST_SE,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL0_OUT_MAIN_DIV,
+ P_GPU_PLL0_PLL_OUT_MAIN,
+ P_GPU_PLL1_PLL_OUT_MAIN,
+};
+
+static struct clk_branch gpucc_cxo_clk = {
+ .halt_reg = 0x1020,
+ .clkr = {
+ .enable_reg = 0x1020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpucc_cxo_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ .name = "xo"
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_IS_CRITICAL,
+ },
+ },
+};
+
+static struct pll_vco gpu_vco[] = {
+ { 1000000000, 2000000000, 0 },
+ { 500000000, 1000000000, 2 },
+ { 250000000, 500000000, 3 },
+};
+
+static struct clk_alpha_pll gpu_pll0_pll_out_main = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .vco_table = gpu_vco,
+ .num_vco = ARRAY_SIZE(gpu_vco),
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpu_pll0_pll_out_main",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gpucc_cxo_clk.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+};
+
+static struct clk_alpha_pll gpu_pll1_pll_out_main = {
+ .offset = 0x40,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .vco_table = gpu_vco,
+ .num_vco = ARRAY_SIZE(gpu_vco),
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpu_pll1_pll_out_main",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gpucc_cxo_clk.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+};
+
+static const struct parent_map gpucc_parent_map_1[] = {
+ { P_GPU_XO, 0 },
+ { P_GPU_PLL0_PLL_OUT_MAIN, 1 },
+ { P_GPU_PLL1_PLL_OUT_MAIN, 3 },
+ { P_GPLL0_OUT_MAIN, 5 },
+};
+
+static const struct clk_parent_data gpucc_parent_data_1[] = {
+ { .hw = &gpucc_cxo_clk.clkr.hw },
+ { .hw = &gpu_pll0_pll_out_main.clkr.hw },
+ { .hw = &gpu_pll1_pll_out_main.clkr.hw },
+ { .fw_name = "gcc_gpu_gpll0_clk", .name = "gcc_gpu_gpll0_clk" },
+};
+
+static struct clk_rcg2_gfx3d gfx3d_clk_src = {
+ .div = 2,
+ .rcg = {
+ .cmd_rcgr = 0x1070,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpucc_parent_map_1,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gfx3d_clk_src",
+ .parent_data = gpucc_parent_data_1,
+ .num_parents = 4,
+ .ops = &clk_gfx3d_ops,
+ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+ },
+ },
+ .hws = (struct clk_hw*[]){
+ &gpucc_cxo_clk.clkr.hw,
+ &gpu_pll0_pll_out_main.clkr.hw,
+ &gpu_pll1_pll_out_main.clkr.hw,
+ }
+};
+
+static struct clk_branch gpucc_gfx3d_clk = {
+ .halt_reg = 0x1098,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x1098,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1098,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpucc_gfx3d_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gfx3d_clk_src.rcg.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static const struct parent_map gpucc_parent_map_0[] = {
+ { P_GPU_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_MAIN_DIV, 6 },
+};
+
+static const struct clk_parent_data gpucc_parent_data_0[] = {
+ { .hw = &gpucc_cxo_clk.clkr.hw },
+ { .fw_name = "gcc_gpu_gpll0_clk", .name = "gcc_gpu_gpll0_clk" },
+ { .fw_name = "gcc_gpu_gpll0_div_clk", .name = "gcc_gpu_gpll0_div_clk" },
+};
+
+static const struct freq_tbl ftbl_rbbmtimer_clk_src[] = {
+ F(19200000, P_GPU_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 rbbmtimer_clk_src = {
+ .cmd_rcgr = 0x10b0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpucc_parent_map_0,
+ .freq_tbl = ftbl_rbbmtimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "rbbmtimer_clk_src",
+ .parent_data = gpucc_parent_data_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_rbcpr_clk_src[] = {
+ F(19200000, P_GPU_XO, 1, 0, 0),
+ F(50000000, P_GPLL0_OUT_MAIN_DIV, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 rbcpr_clk_src = {
+ .cmd_rcgr = 0x1030,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpucc_parent_map_0,
+ .freq_tbl = ftbl_rbcpr_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "rbcpr_clk_src",
+ .parent_data = gpucc_parent_data_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gpucc_rbbmtimer_clk = {
+ .halt_reg = 0x10d0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10d0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpucc_rbbmtimer_clk",
+ .parent_names = (const char *[]){
+ "rbbmtimer_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpucc_rbcpr_clk = {
+ .halt_reg = 0x1054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpucc_rbcpr_clk",
+ .parent_names = (const char *[]){
+ "rbcpr_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc gpu_cx_gdsc = {
+ .gdscr = 0x1004,
+ .gds_hw_ctrl = 0x1008,
+ .pd = {
+ .name = "gpu_cx",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc gpu_gx_gdsc = {
+ .gdscr = 0x1094,
+ .clamp_io_ctrl = 0x130,
+ .resets = (unsigned int []){ GPU_GX_BCR },
+ .reset_count = 1,
+ .cxcs = (unsigned int []){ 0x1098 },
+ .cxc_count = 1,
+ .pd = {
+ .name = "gpu_gx",
+ },
+ .parent = &gpu_cx_gdsc.pd,
+ .pwrsts = PWRSTS_OFF | PWRSTS_ON | PWRSTS_RET,
+ .flags = CLAMP_IO | SW_RESET | AON_RESET | NO_RET_PERIPH,
+};
+
+static struct gdsc *gpucc_sdm660_gdscs[] = {
+ [GPU_CX_GDSC] = &gpu_cx_gdsc,
+ [GPU_GX_GDSC] = &gpu_gx_gdsc,
+};
+
+static const struct qcom_reset_map gpucc_sdm660_resets[] = {
+ [GPU_CX_BCR] = { 0x1000 },
+ [RBCPR_BCR] = { 0x1050 },
+ [GPU_GX_BCR] = { 0x1090 },
+ [SPDM_BCR] = { 0x10E0 },
+};
+
+static struct clk_regmap *gpucc_sdm660_clocks[] = {
+ [GPUCC_CXO_CLK] = &gpucc_cxo_clk.clkr,
+ [GPU_PLL0_PLL] = &gpu_pll0_pll_out_main.clkr,
+ [GPU_PLL1_PLL] = &gpu_pll1_pll_out_main.clkr,
+ [GFX3D_CLK_SRC] = &gfx3d_clk_src.rcg.clkr,
+ [RBCPR_CLK_SRC] = &rbcpr_clk_src.clkr,
+ [RBBMTIMER_CLK_SRC] = &rbbmtimer_clk_src.clkr,
+ [GPUCC_RBCPR_CLK] = &gpucc_rbcpr_clk.clkr,
+ [GPUCC_GFX3D_CLK] = &gpucc_gfx3d_clk.clkr,
+ [GPUCC_RBBMTIMER_CLK] = &gpucc_rbbmtimer_clk.clkr,
+};
+
+static const struct regmap_config gpucc_660_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x9034,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gpucc_sdm660_desc = {
+ .config = &gpucc_660_regmap_config,
+ .clks = gpucc_sdm660_clocks,
+ .num_clks = ARRAY_SIZE(gpucc_sdm660_clocks),
+ .resets = gpucc_sdm660_resets,
+ .num_resets = ARRAY_SIZE(gpucc_sdm660_resets),
+ .gdscs = gpucc_sdm660_gdscs,
+ .num_gdscs = ARRAY_SIZE(gpucc_sdm660_gdscs),
+};
+
+static const struct of_device_id gpucc_sdm660_match_table[] = {
+ { .compatible = "qcom,gpucc-sdm660" },
+ { .compatible = "qcom,gpucc-sdm630" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gpucc_sdm660_match_table);
+
+static int gpucc_sdm660_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ struct alpha_pll_config gpu_pll_config = {
+ .config_ctl_val = 0x4001055b,
+ .alpha = 0xaaaaab00,
+ .alpha_en_mask = BIT(24),
+ .vco_val = 0x2 << 20,
+ .vco_mask = 0x3 << 20,
+ .main_output_mask = 0x1,
+ };
+
+ regmap = qcom_cc_map(pdev, &gpucc_sdm660_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ /* 800MHz configuration for GPU PLL0 */
+ gpu_pll_config.l = 0x29;
+ gpu_pll_config.alpha_hi = 0xaa;
+ clk_alpha_pll_configure(&gpu_pll0_pll_out_main, regmap, &gpu_pll_config);
+
+ /* 740MHz configuration for GPU PLL1 */
+ gpu_pll_config.l = 0x26;
+ gpu_pll_config.alpha_hi = 0x8a;
+ clk_alpha_pll_configure(&gpu_pll1_pll_out_main, regmap, &gpu_pll_config);
+
+ return qcom_cc_really_probe(pdev, &gpucc_sdm660_desc, regmap);
+}
+
+static struct platform_driver gpucc_sdm660_driver = {
+ .probe = gpucc_sdm660_probe,
+ .driver = {
+ .name = "gpucc-sdm660",
+ .of_match_table = gpucc_sdm660_match_table,
+ },
+};
+module_platform_driver(gpucc_sdm660_driver);
+
+MODULE_DESCRIPTION("Qualcomm SDM630/SDM660 GPUCC Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/lpass-gfm-sm8250.c b/drivers/clk/qcom/lpass-gfm-sm8250.c
index d366c7c2abc7..f5e31e692b9b 100644
--- a/drivers/clk/qcom/lpass-gfm-sm8250.c
+++ b/drivers/clk/qcom/lpass-gfm-sm8250.c
@@ -33,14 +33,13 @@ struct clk_gfm {
void __iomem *gfm_mux;
};
-#define GFM_MASK BIT(1)
#define to_clk_gfm(_hw) container_of(_hw, struct clk_gfm, hw)
static u8 clk_gfm_get_parent(struct clk_hw *hw)
{
struct clk_gfm *clk = to_clk_gfm(hw);
- return readl(clk->gfm_mux) & GFM_MASK;
+ return readl(clk->gfm_mux) & clk->mux_mask;
}
static int clk_gfm_set_parent(struct clk_hw *hw, u8 index)
@@ -51,9 +50,10 @@ static int clk_gfm_set_parent(struct clk_hw *hw, u8 index)
val = readl(clk->gfm_mux);
if (index)
- val |= GFM_MASK;
+ val |= clk->mux_mask;
else
- val &= ~GFM_MASK;
+ val &= ~clk->mux_mask;
+
writel(val, clk->gfm_mux);
diff --git a/drivers/clk/qcom/mmcc-msm8974.c b/drivers/clk/qcom/mmcc-msm8974.c
index 015426262d08..a1552b6771bc 100644
--- a/drivers/clk/qcom/mmcc-msm8974.c
+++ b/drivers/clk/qcom/mmcc-msm8974.c
@@ -74,22 +74,6 @@ static const char * const mmcc_xo_mmpll0_dsi_hdmi_gpll0[] = {
"dsi1pll",
};
-static const struct parent_map mmcc_xo_mmpll0_1_2_gpll0_map[] = {
- { P_XO, 0 },
- { P_MMPLL0, 1 },
- { P_MMPLL1, 2 },
- { P_GPLL0, 5 },
- { P_MMPLL2, 3 }
-};
-
-static const char * const mmcc_xo_mmpll0_1_2_gpll0[] = {
- "xo",
- "mmpll0_vote",
- "mmpll1_vote",
- "mmss_gpll0_vote",
- "mmpll2",
-};
-
static const struct parent_map mmcc_xo_mmpll0_1_3_gpll0_map[] = {
{ P_XO, 0 },
{ P_MMPLL0, 1 },
diff --git a/drivers/clk/qcom/mmcc-msm8996.c b/drivers/clk/qcom/mmcc-msm8996.c
index 3b3aac07fb2d..24843e4f2599 100644
--- a/drivers/clk/qcom/mmcc-msm8996.c
+++ b/drivers/clk/qcom/mmcc-msm8996.c
@@ -528,16 +528,23 @@ static struct clk_rcg2 maxi_clk_src = {
},
};
-static struct clk_rcg2 gfx3d_clk_src = {
- .cmd_rcgr = 0x4000,
- .hid_width = 5,
- .parent_map = mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0_map,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gfx3d_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0,
- .num_parents = 6,
- .ops = &clk_gfx3d_ops,
- .flags = CLK_SET_RATE_PARENT,
+static struct clk_rcg2_gfx3d gfx3d_clk_src = {
+ .rcg = {
+ .cmd_rcgr = 0x4000,
+ .hid_width = 5,
+ .parent_map = mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gfx3d_clk_src",
+ .parent_names = mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0,
+ .num_parents = 6,
+ .ops = &clk_gfx3d_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+ .hws = (struct clk_hw*[]) {
+ &mmpll9.clkr.hw,
+ &mmpll2.clkr.hw,
+ &mmpll8.clkr.hw
},
};
@@ -3089,7 +3096,7 @@ static struct clk_regmap *mmcc_msm8996_clocks[] = {
[AHB_CLK_SRC] = &ahb_clk_src.clkr,
[AXI_CLK_SRC] = &axi_clk_src.clkr,
[MAXI_CLK_SRC] = &maxi_clk_src.clkr,
- [GFX3D_CLK_SRC] = &gfx3d_clk_src.clkr,
+ [GFX3D_CLK_SRC] = &gfx3d_clk_src.rcg.clkr,
[RBBMTIMER_CLK_SRC] = &rbbmtimer_clk_src.clkr,
[ISENSE_CLK_SRC] = &isense_clk_src.clkr,
[RBCPR_CLK_SRC] = &rbcpr_clk_src.clkr,
diff --git a/drivers/clk/qcom/mmcc-msm8998.c b/drivers/clk/qcom/mmcc-msm8998.c
index dd68983fe22e..467dadccde02 100644
--- a/drivers/clk/qcom/mmcc-msm8998.c
+++ b/drivers/clk/qcom/mmcc-msm8998.c
@@ -1211,6 +1211,8 @@ static struct clk_rcg2 vfe1_clk_src = {
static struct clk_branch misc_ahb_clk = {
.halt_reg = 0x328,
+ .hwcg_reg = 0x328,
+ .hwcg_bit = 1,
.clkr = {
.enable_reg = 0x328,
.enable_mask = BIT(0),
@@ -1241,6 +1243,8 @@ static struct clk_branch video_core_clk = {
static struct clk_branch video_ahb_clk = {
.halt_reg = 0x1030,
+ .hwcg_reg = 0x1030,
+ .hwcg_bit = 1,
.clkr = {
.enable_reg = 0x1030,
.enable_mask = BIT(0),
@@ -1315,6 +1319,8 @@ static struct clk_branch video_subcore1_clk = {
static struct clk_branch mdss_ahb_clk = {
.halt_reg = 0x2308,
+ .hwcg_reg = 0x2308,
+ .hwcg_bit = 1,
.clkr = {
.enable_reg = 0x2308,
.enable_mask = BIT(0),
@@ -2496,6 +2502,8 @@ static struct clk_branch mnoc_ahb_clk = {
static struct clk_branch bimc_smmu_ahb_clk = {
.halt_reg = 0xe004,
+ .hwcg_reg = 0xe004,
+ .hwcg_bit = 1,
.clkr = {
.enable_reg = 0xe004,
.enable_mask = BIT(0),
@@ -2511,6 +2519,8 @@ static struct clk_branch bimc_smmu_ahb_clk = {
static struct clk_branch bimc_smmu_axi_clk = {
.halt_reg = 0xe008,
+ .hwcg_reg = 0xe008,
+ .hwcg_bit = 1,
.clkr = {
.enable_reg = 0xe008,
.enable_mask = BIT(0),
@@ -2653,7 +2663,7 @@ static struct gdsc bimc_smmu_gdsc = {
.name = "bimc_smmu",
},
.pwrsts = PWRSTS_OFF_ON,
- .flags = HW_CTRL,
+ .flags = HW_CTRL | ALWAYS_ON,
};
static struct clk_regmap *mmcc_msm8998_clocks[] = {
diff --git a/drivers/clk/qcom/mmcc-sdm660.c b/drivers/clk/qcom/mmcc-sdm660.c
new file mode 100644
index 000000000000..941993bc610d
--- /dev/null
+++ b/drivers/clk/qcom/mmcc-sdm660.c
@@ -0,0 +1,2864 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2020, Martin Botka <martin.botka@somainline.org>
+ * Copyright (c) 2020, Konrad Dybcio <konrad.dybcio@somainline.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+#include <linux/clk.h>
+
+
+#include <dt-bindings/clock/qcom,mmcc-sdm660.h>
+
+#include "common.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-alpha-pll.h"
+#include "clk-rcg.h"
+#include "clk-branch.h"
+#include "reset.h"
+#include "gdsc.h"
+
+enum {
+ P_XO,
+ P_DSI0PLL_BYTE,
+ P_DSI0PLL,
+ P_DSI1PLL_BYTE,
+ P_DSI1PLL,
+ P_GPLL0,
+ P_GPLL0_DIV,
+ P_MMPLL0,
+ P_MMPLL10,
+ P_MMPLL3,
+ P_MMPLL4,
+ P_MMPLL5,
+ P_MMPLL6,
+ P_MMPLL7,
+ P_MMPLL8,
+ P_SLEEP_CLK,
+ P_DP_PHY_PLL_LINK_CLK,
+ P_DP_PHY_PLL_VCO_DIV,
+};
+
+static const struct parent_map mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll8_gpll0_gpll0_div_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL0, 1 },
+ { P_MMPLL4, 2 },
+ { P_MMPLL7, 3 },
+ { P_MMPLL8, 4 },
+ { P_GPLL0, 5 },
+ { P_GPLL0_DIV, 6 },
+};
+
+/* Voteable PLL */
+static struct clk_alpha_pll mmpll0 = {
+ .offset = 0xc000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x1f0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mmpll0",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll mmpll6 = {
+ .offset = 0xf0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x1f0,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "mmpll6",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+/* APSS controlled PLLs */
+static struct pll_vco vco[] = {
+ { 1000000000, 2000000000, 0 },
+ { 750000000, 1500000000, 1 },
+ { 500000000, 1000000000, 2 },
+ { 250000000, 500000000, 3 },
+};
+
+static struct pll_vco mmpll3_vco[] = {
+ { 750000000, 1500000000, 1 },
+};
+
+static const struct alpha_pll_config mmpll10_config = {
+ .l = 0x1e,
+ .config_ctl_val = 0x00004289,
+ .main_output_mask = 0x1,
+};
+
+static struct clk_alpha_pll mmpll10 = {
+ .offset = 0x190,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "mmpll10",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static const struct alpha_pll_config mmpll3_config = {
+ .l = 0x2e,
+ .config_ctl_val = 0x4001055b,
+ .vco_val = 0x1 << 20,
+ .vco_mask = 0x3 << 20,
+ .main_output_mask = 0x1,
+};
+
+static struct clk_alpha_pll mmpll3 = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .vco_table = mmpll3_vco,
+ .num_vco = ARRAY_SIZE(mmpll3_vco),
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "mmpll3",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static const struct alpha_pll_config mmpll4_config = {
+ .l = 0x28,
+ .config_ctl_val = 0x4001055b,
+ .vco_val = 0x2 << 20,
+ .vco_mask = 0x3 << 20,
+ .main_output_mask = 0x1,
+};
+
+static struct clk_alpha_pll mmpll4 = {
+ .offset = 0x50,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .vco_table = vco,
+ .num_vco = ARRAY_SIZE(vco),
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "mmpll4",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static const struct alpha_pll_config mmpll5_config = {
+ .l = 0x2a,
+ .config_ctl_val = 0x4001055b,
+ .alpha_hi = 0xf8,
+ .alpha_en_mask = BIT(24),
+ .vco_val = 0x2 << 20,
+ .vco_mask = 0x3 << 20,
+ .main_output_mask = 0x1,
+};
+
+static struct clk_alpha_pll mmpll5 = {
+ .offset = 0xa0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .vco_table = vco,
+ .num_vco = ARRAY_SIZE(vco),
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "mmpll5",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static const struct alpha_pll_config mmpll7_config = {
+ .l = 0x32,
+ .config_ctl_val = 0x4001055b,
+ .vco_val = 0x2 << 20,
+ .vco_mask = 0x3 << 20,
+ .main_output_mask = 0x1,
+};
+
+static struct clk_alpha_pll mmpll7 = {
+ .offset = 0x140,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .vco_table = vco,
+ .num_vco = ARRAY_SIZE(vco),
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "mmpll7",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static const struct alpha_pll_config mmpll8_config = {
+ .l = 0x30,
+ .alpha_hi = 0x70,
+ .alpha_en_mask = BIT(24),
+ .config_ctl_val = 0x4001055b,
+ .vco_val = 0x2 << 20,
+ .vco_mask = 0x3 << 20,
+ .main_output_mask = 0x1,
+};
+
+static struct clk_alpha_pll mmpll8 = {
+ .offset = 0x1c0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .vco_table = vco,
+ .num_vco = ARRAY_SIZE(vco),
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "mmpll8",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static const struct clk_parent_data mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll8_gpll0_gpll0_div[] = {
+ { .fw_name = "xo" },
+ { .hw = &mmpll0.clkr.hw },
+ { .hw = &mmpll4.clkr.hw },
+ { .hw = &mmpll7.clkr.hw },
+ { .hw = &mmpll8.clkr.hw },
+ { .fw_name = "gpll0" },
+ { .fw_name = "gpll0_div" },
+};
+
+static const struct parent_map mmcc_xo_dsibyte_map[] = {
+ { P_XO, 0 },
+ { P_DSI0PLL_BYTE, 1 },
+ { P_DSI1PLL_BYTE, 2 },
+};
+
+static const struct clk_parent_data mmcc_xo_dsibyte[] = {
+ { .fw_name = "xo" },
+ { .fw_name = "dsi0pllbyte" },
+ { .fw_name = "dsi1pllbyte" },
+};
+
+static const struct parent_map mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL0, 1 },
+ { P_MMPLL4, 2 },
+ { P_MMPLL7, 3 },
+ { P_MMPLL10, 4 },
+ { P_GPLL0, 5 },
+ { P_GPLL0_DIV, 6 },
+};
+
+static const struct clk_parent_data mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div[] = {
+ { .fw_name = "xo" },
+ { .hw = &mmpll0.clkr.hw },
+ { .hw = &mmpll4.clkr.hw },
+ { .hw = &mmpll7.clkr.hw },
+ { .hw = &mmpll10.clkr.hw },
+ { .fw_name = "gpll0" },
+ { .fw_name = "gpll0_div" },
+};
+
+static const struct parent_map mmcc_xo_mmpll4_mmpll7_mmpll10_sleep_gpll0_gpll0_div_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL4, 1 },
+ { P_MMPLL7, 2 },
+ { P_MMPLL10, 3 },
+ { P_SLEEP_CLK, 4 },
+ { P_GPLL0, 5 },
+ { P_GPLL0_DIV, 6 },
+};
+
+static const struct clk_parent_data mmcc_xo_mmpll4_mmpll7_mmpll10_sleep_gpll0_gpll0_div[] = {
+ { .fw_name = "xo" },
+ { .hw = &mmpll4.clkr.hw },
+ { .hw = &mmpll7.clkr.hw },
+ { .hw = &mmpll10.clkr.hw },
+ { .fw_name = "sleep_clk" },
+ { .fw_name = "gpll0" },
+ { .fw_name = "gpll0_div" },
+};
+
+static const struct parent_map mmcc_xo_mmpll0_mmpll7_mmpll10_sleep_gpll0_gpll0_div_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL0, 1 },
+ { P_MMPLL7, 2 },
+ { P_MMPLL10, 3 },
+ { P_SLEEP_CLK, 4 },
+ { P_GPLL0, 5 },
+ { P_GPLL0_DIV, 6 },
+};
+
+static const struct clk_parent_data mmcc_xo_mmpll0_mmpll7_mmpll10_sleep_gpll0_gpll0_div[] = {
+ { .fw_name = "xo" },
+ { .hw = &mmpll0.clkr.hw },
+ { .hw = &mmpll7.clkr.hw },
+ { .hw = &mmpll10.clkr.hw },
+ { .fw_name = "sleep_clk" },
+ { .fw_name = "gpll0" },
+ { .fw_name = "gpll0_div" },
+};
+
+static const struct parent_map mmcc_xo_gpll0_gpll0_div_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 5 },
+ { P_GPLL0_DIV, 6 },
+};
+
+static const struct clk_parent_data mmcc_xo_gpll0_gpll0_div[] = {
+ { .fw_name = "xo" },
+ { .fw_name = "gpll0" },
+ { .fw_name = "gpll0_div" },
+};
+
+static const struct parent_map mmcc_xo_dplink_dpvco_map[] = {
+ { P_XO, 0 },
+ { P_DP_PHY_PLL_LINK_CLK, 1 },
+ { P_DP_PHY_PLL_VCO_DIV, 2 },
+};
+
+static const struct clk_parent_data mmcc_xo_dplink_dpvco[] = {
+ { .fw_name = "xo" },
+ { .fw_name = "dp_link_2x_clk_divsel_five" },
+ { .fw_name = "dp_vco_divided_clk_src_mux" },
+};
+
+static const struct parent_map mmcc_xo_mmpll0_mmpll5_mmpll7_gpll0_gpll0_div_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL0, 1 },
+ { P_MMPLL5, 2 },
+ { P_MMPLL7, 3 },
+ { P_GPLL0, 5 },
+ { P_GPLL0_DIV, 6 },
+};
+
+static const struct clk_parent_data mmcc_xo_mmpll0_mmpll5_mmpll7_gpll0_gpll0_div[] = {
+ { .fw_name = "xo" },
+ { .hw = &mmpll0.clkr.hw },
+ { .hw = &mmpll5.clkr.hw },
+ { .hw = &mmpll7.clkr.hw },
+ { .fw_name = "gpll0" },
+ { .fw_name = "gpll0_div" },
+};
+
+static const struct parent_map mmcc_xo_dsi0pll_dsi1pll_map[] = {
+ { P_XO, 0 },
+ { P_DSI0PLL, 1 },
+ { P_DSI1PLL, 2 },
+};
+
+static const struct clk_parent_data mmcc_xo_dsi0pll_dsi1pll[] = {
+ { .fw_name = "xo" },
+ { .fw_name = "dsi0pll" },
+ { .fw_name = "dsi1pll" },
+};
+
+static const struct parent_map mmcc_mmpll0_mmpll4_mmpll7_mmpll10_mmpll6_gpll0_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL0, 1 },
+ { P_MMPLL4, 2 },
+ { P_MMPLL7, 3 },
+ { P_MMPLL10, 4 },
+ { P_MMPLL6, 5 },
+ { P_GPLL0, 6 },
+};
+
+static const struct clk_parent_data mmcc_mmpll0_mmpll4_mmpll7_mmpll10_mmpll6_gpll0[] = {
+ { .fw_name = "xo" },
+ { .hw = &mmpll0.clkr.hw },
+ { .hw = &mmpll4.clkr.hw },
+ { .hw = &mmpll7.clkr.hw },
+ { .hw = &mmpll10.clkr.hw },
+ { .hw = &mmpll6.clkr.hw },
+ { .fw_name = "gpll0" },
+};
+
+static const struct parent_map mmcc_xo_mmpll0_gpll0_gpll0_div_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL0, 1 },
+ { P_GPLL0, 5 },
+ { P_GPLL0_DIV, 6 },
+};
+
+static const struct clk_parent_data mmcc_xo_mmpll0_gpll0_gpll0_div[] = {
+ { .fw_name = "xo" },
+ { .hw = &mmpll0.clkr.hw },
+ { .fw_name = "gpll0" },
+ { .fw_name = "gpll0_div" },
+};
+
+static const struct parent_map mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_mmpll6_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL0, 1 },
+ { P_MMPLL4, 2 },
+ { P_MMPLL7, 3 },
+ { P_MMPLL10, 4 },
+ { P_GPLL0, 5 },
+ { P_MMPLL6, 6 },
+};
+
+static const struct clk_parent_data mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_mmpll6[] = {
+ { .fw_name = "xo" },
+ { .hw = &mmpll0.clkr.hw },
+ { .hw = &mmpll4.clkr.hw },
+ { .hw = &mmpll7.clkr.hw },
+ { .hw = &mmpll10.clkr.hw },
+ { .fw_name = "gpll0" },
+ { .hw = &mmpll6.clkr.hw },
+};
+
+static const struct parent_map mmcc_xo_mmpll0_mmpll8_mmpll3_mmpll6_gpll0_mmpll7_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL0, 1 },
+ { P_MMPLL8, 2 },
+ { P_MMPLL3, 3 },
+ { P_MMPLL6, 4 },
+ { P_GPLL0, 5 },
+ { P_MMPLL7, 6 },
+};
+
+static const struct clk_parent_data mmcc_xo_mmpll0_mmpll8_mmpll3_mmpll6_gpll0_mmpll7[] = {
+ { .fw_name = "xo" },
+ { .hw = &mmpll0.clkr.hw },
+ { .hw = &mmpll8.clkr.hw },
+ { .hw = &mmpll3.clkr.hw },
+ { .hw = &mmpll6.clkr.hw },
+ { .fw_name = "gpll0" },
+ { .hw = &mmpll7.clkr.hw },
+};
+
+static const struct freq_tbl ftbl_ahb_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(40000000, P_GPLL0_DIV, 7.5, 0, 0),
+ F(80800000, P_MMPLL0, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 ahb_clk_src = {
+ .cmd_rcgr = 0x5000,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "ahb_clk_src",
+ .parent_data = mmcc_xo_mmpll0_gpll0_gpll0_div,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 byte0_clk_src = {
+ .cmd_rcgr = 0x2120,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_dsibyte_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "byte0_clk_src",
+ .parent_data = mmcc_xo_dsibyte,
+ .num_parents = 3,
+ .ops = &clk_byte2_ops,
+ .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk_rcg2 byte1_clk_src = {
+ .cmd_rcgr = 0x2140,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_dsibyte_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "byte1_clk_src",
+ .parent_data = mmcc_xo_dsibyte,
+ .num_parents = 3,
+ .ops = &clk_byte2_ops,
+ .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static const struct freq_tbl ftbl_camss_gp0_clk_src[] = {
+ F(10000, P_XO, 16, 1, 120),
+ F(24000, P_XO, 16, 1, 50),
+ F(6000000, P_GPLL0_DIV, 10, 1, 5),
+ F(12000000, P_GPLL0_DIV, 10, 2, 5),
+ F(13043478, P_GPLL0_DIV, 1, 1, 23),
+ F(24000000, P_GPLL0_DIV, 1, 2, 25),
+ F(50000000, P_GPLL0_DIV, 6, 0, 0),
+ F(100000000, P_GPLL0_DIV, 3, 0, 0),
+ F(200000000, P_GPLL0, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 camss_gp0_clk_src = {
+ .cmd_rcgr = 0x3420,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll7_mmpll10_sleep_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_camss_gp0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camss_gp0_clk_src",
+ .parent_data = mmcc_xo_mmpll0_mmpll7_mmpll10_sleep_gpll0_gpll0_div,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 camss_gp1_clk_src = {
+ .cmd_rcgr = 0x3450,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll7_mmpll10_sleep_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_camss_gp0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camss_gp1_clk_src",
+ .parent_data = mmcc_xo_mmpll0_mmpll7_mmpll10_sleep_gpll0_gpll0_div,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cci_clk_src[] = {
+ F(37500000, P_GPLL0_DIV, 8, 0, 0),
+ F(50000000, P_GPLL0_DIV, 6, 0, 0),
+ F(100000000, P_GPLL0, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cci_clk_src = {
+ .cmd_rcgr = 0x3300,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll7_mmpll10_sleep_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_cci_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cci_clk_src",
+ .parent_data = mmcc_xo_mmpll0_mmpll7_mmpll10_sleep_gpll0_gpll0_div,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cpp_clk_src[] = {
+ F(120000000, P_GPLL0, 5, 0, 0),
+ F(256000000, P_MMPLL4, 3, 0, 0),
+ F(384000000, P_MMPLL4, 2, 0, 0),
+ F(480000000, P_MMPLL7, 2, 0, 0),
+ F(540000000, P_MMPLL6, 2, 0, 0),
+ F(576000000, P_MMPLL10, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cpp_clk_src = {
+ .cmd_rcgr = 0x3640,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_mmpll6_map,
+ .freq_tbl = ftbl_cpp_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cpp_clk_src",
+ .parent_data = mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_mmpll6,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_csi0_clk_src[] = {
+ F(100000000, P_GPLL0_DIV, 3, 0, 0),
+ F(200000000, P_GPLL0, 3, 0, 0),
+ F(310000000, P_MMPLL8, 3, 0, 0),
+ F(404000000, P_MMPLL0, 2, 0, 0),
+ F(465000000, P_MMPLL8, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 csi0_clk_src = {
+ .cmd_rcgr = 0x3090,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll8_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_csi0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi0_clk_src",
+ .parent_data = mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll8_gpll0_gpll0_div,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_csi0phytimer_clk_src[] = {
+ F(100000000, P_GPLL0_DIV, 3, 0, 0),
+ F(200000000, P_GPLL0, 3, 0, 0),
+ F(269333333, P_MMPLL0, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 csi0phytimer_clk_src = {
+ .cmd_rcgr = 0x3000,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_csi0phytimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi0phytimer_clk_src",
+ .parent_data = mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 csi1_clk_src = {
+ .cmd_rcgr = 0x3100,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll8_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_csi0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi1_clk_src",
+ .parent_data = mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll8_gpll0_gpll0_div,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 csi1phytimer_clk_src = {
+ .cmd_rcgr = 0x3030,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_csi0phytimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi1phytimer_clk_src",
+ .parent_data = mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 csi2_clk_src = {
+ .cmd_rcgr = 0x3160,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll8_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_csi0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi2_clk_src",
+ .parent_data = mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll8_gpll0_gpll0_div,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 csi2phytimer_clk_src = {
+ .cmd_rcgr = 0x3060,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_csi0phytimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi2phytimer_clk_src",
+ .parent_data = mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 csi3_clk_src = {
+ .cmd_rcgr = 0x31c0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll8_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_csi0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi3_clk_src",
+ .parent_data = mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll8_gpll0_gpll0_div,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_csiphy_clk_src[] = {
+ F(100000000, P_GPLL0_DIV, 3, 0, 0),
+ F(200000000, P_GPLL0, 3, 0, 0),
+ F(269333333, P_MMPLL0, 3, 0, 0),
+ F(320000000, P_MMPLL7, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 csiphy_clk_src = {
+ .cmd_rcgr = 0x3800,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll8_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_csiphy_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csiphy_clk_src",
+ .parent_data = mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll8_gpll0_gpll0_div,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_dp_aux_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 dp_aux_clk_src = {
+ .cmd_rcgr = 0x2260,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_dp_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "dp_aux_clk_src",
+ .parent_data = mmcc_xo_gpll0_gpll0_div,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_dp_crypto_clk_src[] = {
+ F(101250000, P_DP_PHY_PLL_VCO_DIV, 4, 0, 0),
+ F(168750000, P_DP_PHY_PLL_VCO_DIV, 4, 0, 0),
+ F(337500000, P_DP_PHY_PLL_VCO_DIV, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 dp_crypto_clk_src = {
+ .cmd_rcgr = 0x2220,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_dplink_dpvco_map,
+ .freq_tbl = ftbl_dp_crypto_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "dp_crypto_clk_src",
+ .parent_data = mmcc_xo_dplink_dpvco,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_dp_gtc_clk_src[] = {
+ F(40000000, P_GPLL0_DIV, 7.5, 0, 0),
+ F(60000000, P_GPLL0, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 dp_gtc_clk_src = {
+ .cmd_rcgr = 0x2280,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_dp_gtc_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "dp_gtc_clk_src",
+ .parent_data = mmcc_xo_gpll0_gpll0_div,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_dp_link_clk_src[] = {
+ F(162000000, P_DP_PHY_PLL_LINK_CLK, 2, 0, 0),
+ F(270000000, P_DP_PHY_PLL_LINK_CLK, 2, 0, 0),
+ F(540000000, P_DP_PHY_PLL_LINK_CLK, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 dp_link_clk_src = {
+ .cmd_rcgr = 0x2200,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_dplink_dpvco_map,
+ .freq_tbl = ftbl_dp_link_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "dp_link_clk_src",
+ .parent_data = mmcc_xo_dplink_dpvco,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ .flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_rcg2 dp_pixel_clk_src = {
+ .cmd_rcgr = 0x2240,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_dplink_dpvco_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "dp_pixel_clk_src",
+ .parent_data = mmcc_xo_dplink_dpvco,
+ .num_parents = 3,
+ .ops = &clk_dp_ops,
+ .flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_rcg2 esc0_clk_src = {
+ .cmd_rcgr = 0x2160,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_dsibyte_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "esc0_clk_src",
+ .parent_data = mmcc_xo_dsibyte,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 esc1_clk_src = {
+ .cmd_rcgr = 0x2180,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_dsibyte_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "esc1_clk_src",
+ .parent_data = mmcc_xo_dsibyte,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_jpeg0_clk_src[] = {
+ F(66666667, P_GPLL0_DIV, 4.5, 0, 0),
+ F(133333333, P_GPLL0, 4.5, 0, 0),
+ F(219428571, P_MMPLL4, 3.5, 0, 0),
+ F(320000000, P_MMPLL7, 3, 0, 0),
+ F(480000000, P_MMPLL7, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 jpeg0_clk_src = {
+ .cmd_rcgr = 0x3500,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_jpeg0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "jpeg0_clk_src",
+ .parent_data = mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_mclk0_clk_src[] = {
+ F(4800000, P_XO, 4, 0, 0),
+ F(6000000, P_GPLL0_DIV, 10, 1, 5),
+ F(8000000, P_GPLL0_DIV, 1, 2, 75),
+ F(9600000, P_XO, 2, 0, 0),
+ F(16666667, P_GPLL0_DIV, 2, 1, 9),
+ F(19200000, P_XO, 1, 0, 0),
+ F(24000000, P_MMPLL10, 1, 1, 24),
+ F(33333333, P_GPLL0_DIV, 1, 1, 9),
+ F(48000000, P_GPLL0, 1, 2, 25),
+ F(66666667, P_GPLL0, 1, 1, 9),
+ { }
+};
+
+static struct clk_rcg2 mclk0_clk_src = {
+ .cmd_rcgr = 0x3360,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll4_mmpll7_mmpll10_sleep_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mclk0_clk_src",
+ .parent_data = mmcc_xo_mmpll4_mmpll7_mmpll10_sleep_gpll0_gpll0_div,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 mclk1_clk_src = {
+ .cmd_rcgr = 0x3390,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll4_mmpll7_mmpll10_sleep_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mclk1_clk_src",
+ .parent_data = mmcc_xo_mmpll4_mmpll7_mmpll10_sleep_gpll0_gpll0_div,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 mclk2_clk_src = {
+ .cmd_rcgr = 0x33c0,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll4_mmpll7_mmpll10_sleep_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mclk2_clk_src",
+ .parent_data = mmcc_xo_mmpll4_mmpll7_mmpll10_sleep_gpll0_gpll0_div,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 mclk3_clk_src = {
+ .cmd_rcgr = 0x33f0,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll4_mmpll7_mmpll10_sleep_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mclk3_clk_src",
+ .parent_data = mmcc_xo_mmpll4_mmpll7_mmpll10_sleep_gpll0_gpll0_div,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_mdp_clk_src[] = {
+ F(100000000, P_GPLL0_DIV, 3, 0, 0),
+ F(150000000, P_GPLL0_DIV, 2, 0, 0),
+ F(171428571, P_GPLL0, 3.5, 0, 0),
+ F(200000000, P_GPLL0, 3, 0, 0),
+ F(275000000, P_MMPLL5, 3, 0, 0),
+ F(300000000, P_GPLL0, 2, 0, 0),
+ F(330000000, P_MMPLL5, 2.5, 0, 0),
+ F(412500000, P_MMPLL5, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 mdp_clk_src = {
+ .cmd_rcgr = 0x2040,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll5_mmpll7_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_mdp_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mdp_clk_src",
+ .parent_data = mmcc_xo_mmpll0_mmpll5_mmpll7_gpll0_gpll0_div,
+ .num_parents = 6,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 pclk0_clk_src = {
+ .cmd_rcgr = 0x2000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_dsi0pll_dsi1pll_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pclk0_clk_src",
+ .parent_data = mmcc_xo_dsi0pll_dsi1pll,
+ .num_parents = 3,
+ .ops = &clk_pixel_ops,
+ .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk_rcg2 pclk1_clk_src = {
+ .cmd_rcgr = 0x2020,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_dsi0pll_dsi1pll_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pclk1_clk_src",
+ .parent_data = mmcc_xo_dsi0pll_dsi1pll,
+ .num_parents = 3,
+ .ops = &clk_pixel_ops,
+ .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static const struct freq_tbl ftbl_rot_clk_src[] = {
+ F(171428571, P_GPLL0, 3.5, 0, 0),
+ F(275000000, P_MMPLL5, 3, 0, 0),
+ F(300000000, P_GPLL0, 2, 0, 0),
+ F(330000000, P_MMPLL5, 2.5, 0, 0),
+ F(412500000, P_MMPLL5, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 rot_clk_src = {
+ .cmd_rcgr = 0x21a0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll5_mmpll7_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_rot_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "rot_clk_src",
+ .parent_data = mmcc_xo_mmpll0_mmpll5_mmpll7_gpll0_gpll0_div,
+ .num_parents = 6,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_vfe0_clk_src[] = {
+ F(120000000, P_GPLL0, 5, 0, 0),
+ F(200000000, P_GPLL0, 3, 0, 0),
+ F(256000000, P_MMPLL4, 3, 0, 0),
+ F(300000000, P_GPLL0, 2, 0, 0),
+ F(404000000, P_MMPLL0, 2, 0, 0),
+ F(480000000, P_MMPLL7, 2, 0, 0),
+ F(540000000, P_MMPLL6, 2, 0, 0),
+ F(576000000, P_MMPLL10, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 vfe0_clk_src = {
+ .cmd_rcgr = 0x3600,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = mmcc_mmpll0_mmpll4_mmpll7_mmpll10_mmpll6_gpll0_map,
+ .freq_tbl = ftbl_vfe0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "vfe0_clk_src",
+ .parent_data = mmcc_mmpll0_mmpll4_mmpll7_mmpll10_mmpll6_gpll0,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 vfe1_clk_src = {
+ .cmd_rcgr = 0x3620,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = mmcc_mmpll0_mmpll4_mmpll7_mmpll10_mmpll6_gpll0_map,
+ .freq_tbl = ftbl_vfe0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "vfe1_clk_src",
+ .parent_data = mmcc_mmpll0_mmpll4_mmpll7_mmpll10_mmpll6_gpll0,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_video_core_clk_src[] = {
+ F(133333333, P_GPLL0, 4.5, 0, 0),
+ F(269333333, P_MMPLL0, 3, 0, 0),
+ F(320000000, P_MMPLL7, 3, 0, 0),
+ F(404000000, P_MMPLL0, 2, 0, 0),
+ F(441600000, P_MMPLL3, 2, 0, 0),
+ F(518400000, P_MMPLL3, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 video_core_clk_src = {
+ .cmd_rcgr = 0x1000,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll8_mmpll3_mmpll6_gpll0_mmpll7_map,
+ .freq_tbl = ftbl_video_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "video_core_clk_src",
+ .parent_data = mmcc_xo_mmpll0_mmpll8_mmpll3_mmpll6_gpll0_mmpll7,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ .flags = CLK_IS_CRITICAL,
+ },
+};
+
+static struct clk_rcg2 vsync_clk_src = {
+ .cmd_rcgr = 0x2080,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_dp_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "vsync_clk_src",
+ .parent_data = mmcc_xo_gpll0_gpll0_div,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch bimc_smmu_ahb_clk = {
+ .halt_reg = 0xe004,
+ .halt_check = BRANCH_VOTED,
+ .hwcg_reg = 0xe004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xe004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "bimc_smmu_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch bimc_smmu_axi_clk = {
+ .halt_reg = 0xe008,
+ .halt_check = BRANCH_VOTED,
+ .hwcg_reg = 0xe008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xe008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "bimc_smmu_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_ahb_clk = {
+ .halt_reg = 0x348c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x348c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x348c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_cci_ahb_clk = {
+ .halt_reg = 0x3348,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3348,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_cci_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_cci_clk = {
+ .halt_reg = 0x3344,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3344,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_cci_clk",
+ .parent_hws = (const struct clk_hw *[]){ &cci_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_cpp_ahb_clk = {
+ .halt_reg = 0x36b4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x36b4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_cpp_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_cpp_axi_clk = {
+ .halt_reg = 0x36c4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x36c4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_cpp_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_cpp_clk = {
+ .halt_reg = 0x36b0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x36b0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_cpp_clk",
+ .parent_hws = (const struct clk_hw *[]){ &cpp_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_cpp_vbif_ahb_clk = {
+ .halt_reg = 0x36c8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x36c8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_cpp_vbif_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi0_ahb_clk = {
+ .halt_reg = 0x30bc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x30bc,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi0_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi0_clk = {
+ .halt_reg = 0x30b4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x30b4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi0_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csi0_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi0phytimer_clk = {
+ .halt_reg = 0x3024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi0phytimer_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csi0phytimer_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi0pix_clk = {
+ .halt_reg = 0x30e4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x30e4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi0pix_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csi0_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi0rdi_clk = {
+ .halt_reg = 0x30d4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x30d4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi0rdi_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csi0_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi1_ahb_clk = {
+ .halt_reg = 0x3128,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3128,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi1_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi1_clk = {
+ .halt_reg = 0x3124,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3124,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi1_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csi1_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi1phytimer_clk = {
+ .halt_reg = 0x3054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi1phytimer_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csi1phytimer_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi1pix_clk = {
+ .halt_reg = 0x3154,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3154,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi1pix_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csi1_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi1rdi_clk = {
+ .halt_reg = 0x3144,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3144,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi1rdi_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csi1_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi2_ahb_clk = {
+ .halt_reg = 0x3188,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3188,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi2_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi2_clk = {
+ .halt_reg = 0x3184,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3184,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi2_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csi2_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi2phytimer_clk = {
+ .halt_reg = 0x3084,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3084,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi2phytimer_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csi2phytimer_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi2pix_clk = {
+ .halt_reg = 0x31b4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x31b4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi2pix_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csi2_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi2rdi_clk = {
+ .halt_reg = 0x31a4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x31a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi2rdi_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csi2_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi3_ahb_clk = {
+ .halt_reg = 0x31e8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x31e8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi3_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi3_clk = {
+ .halt_reg = 0x31e4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x31e4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi3_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csi3_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi3pix_clk = {
+ .halt_reg = 0x3214,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3214,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi3pix_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csi3_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi3rdi_clk = {
+ .halt_reg = 0x3204,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3204,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi3rdi_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csi3_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi_vfe0_clk = {
+ .halt_reg = 0x3704,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3704,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi_vfe0_clk",
+ .parent_hws = (const struct clk_hw *[]){ &vfe0_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi_vfe1_clk = {
+ .halt_reg = 0x3714,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3714,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi_vfe1_clk",
+ .parent_hws = (const struct clk_hw *[]){ &vfe1_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csiphy0_clk = {
+ .halt_reg = 0x3740,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3740,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csiphy0_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csiphy_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csiphy1_clk = {
+ .halt_reg = 0x3744,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3744,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csiphy1_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csiphy_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csiphy2_clk = {
+ .halt_reg = 0x3748,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3748,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csiphy2_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csiphy_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+
+static struct clk_branch camss_cphy_csid0_clk = {
+ .halt_reg = 0x3730,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3730,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_cphy_csid0_clk",
+ .parent_hws = (const struct clk_hw *[]){ &camss_csiphy0_clk.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_cphy_csid1_clk = {
+ .halt_reg = 0x3734,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3734,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_cphy_csid1_clk",
+ .parent_hws = (const struct clk_hw *[]){ &camss_csiphy1_clk.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_cphy_csid2_clk = {
+ .halt_reg = 0x3738,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3738,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_cphy_csid2_clk",
+ .parent_hws = (const struct clk_hw *[]){ &camss_csiphy2_clk.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_cphy_csid3_clk = {
+ .halt_reg = 0x373c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x373c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_cphy_csid3_clk",
+ .parent_hws = (const struct clk_hw *[]){ &csiphy_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_gp0_clk = {
+ .halt_reg = 0x3444,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3444,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_gp0_clk",
+ .parent_hws = (const struct clk_hw *[]){ &camss_gp0_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_gp1_clk = {
+ .halt_reg = 0x3474,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3474,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_gp1_clk",
+ .parent_hws = (const struct clk_hw *[]){ &camss_gp1_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_ispif_ahb_clk = {
+ .halt_reg = 0x3224,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3224,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_ispif_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_jpeg0_clk = {
+ .halt_reg = 0x35a8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x35a8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_jpeg0_clk",
+ .parent_hws = (const struct clk_hw *[]){ &jpeg0_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_jpeg_ahb_clk = {
+ .halt_reg = 0x35b4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x35b4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_jpeg_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_jpeg_axi_clk = {
+ .halt_reg = 0x35b8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x35b8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_jpeg_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch throttle_camss_axi_clk = {
+ .halt_reg = 0x3c3c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3c3c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "throttle_camss_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_mclk0_clk = {
+ .halt_reg = 0x3384,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3384,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_mclk0_clk",
+ .parent_hws = (const struct clk_hw *[]){ &mclk0_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_mclk1_clk = {
+ .halt_reg = 0x33b4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x33b4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_mclk1_clk",
+ .parent_hws = (const struct clk_hw *[]){ &mclk1_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_mclk2_clk = {
+ .halt_reg = 0x33e4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x33e4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_mclk2_clk",
+ .parent_hws = (const struct clk_hw *[]){ &mclk2_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_mclk3_clk = {
+ .halt_reg = 0x3414,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3414,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_mclk3_clk",
+ .parent_hws = (const struct clk_hw *[]){ &mclk3_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_micro_ahb_clk = {
+ .halt_reg = 0x3494,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3494,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_micro_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_top_ahb_clk = {
+ .halt_reg = 0x3484,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3484,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_top_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_vfe0_ahb_clk = {
+ .halt_reg = 0x3668,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3668,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_vfe0_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_vfe0_clk = {
+ .halt_reg = 0x36a8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x36a8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_vfe0_clk",
+ .parent_hws = (const struct clk_hw *[]){ &vfe0_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_vfe0_stream_clk = {
+ .halt_reg = 0x3720,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3720,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_vfe0_stream_clk",
+ .parent_hws = (const struct clk_hw *[]){ &vfe0_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_vfe1_ahb_clk = {
+ .halt_reg = 0x3678,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3678,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_vfe1_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_vfe1_clk = {
+ .halt_reg = 0x36ac,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x36ac,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_vfe1_clk",
+ .parent_hws = (const struct clk_hw *[]){ &vfe1_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_vfe1_stream_clk = {
+ .halt_reg = 0x3724,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3724,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_vfe1_stream_clk",
+ .parent_hws = (const struct clk_hw *[]){ &vfe1_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_vfe_vbif_ahb_clk = {
+ .halt_reg = 0x36b8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x36b8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_vfe_vbif_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_vfe_vbif_axi_clk = {
+ .halt_reg = 0x36bc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x36bc,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_vfe_vbif_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch csiphy_ahb2crif_clk = {
+ .halt_reg = 0x374c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x374c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x374c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "csiphy_ahb2crif_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_ahb_clk = {
+ .halt_reg = 0x2308,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x8a004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2308,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .flags = CLK_SET_RATE_PARENT,
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_axi_clk_src[] = {
+ F(75000000, P_GPLL0, 8, 0, 0),
+ F(171428571, P_GPLL0, 3.5, 0, 0),
+ F(240000000, P_GPLL0, 2.5, 0, 0),
+ F(323200000, P_MMPLL0, 2.5, 0, 0),
+ F(406000000, P_MMPLL0, 2, 0, 0),
+ { }
+};
+
+/* RO to linux */
+static struct clk_rcg2 axi_clk_src = {
+ .cmd_rcgr = 0xd000,
+ .hid_width = 5,
+ .parent_map = mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_axi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "axi_clk_src",
+ .parent_data = mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch mdss_axi_clk = {
+ .halt_reg = 0x2310,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2310,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_axi_clk",
+ .parent_hws = (const struct clk_hw *[]){ &axi_clk_src.clkr.hw },
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch throttle_mdss_axi_clk = {
+ .halt_reg = 0x246c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x246c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x246c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "throttle_mdss_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_byte0_clk = {
+ .halt_reg = 0x233c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x233c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_byte0_clk",
+ .parent_hws = (const struct clk_hw *[]){ &byte0_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap_div mdss_byte0_intf_div_clk = {
+ .reg = 0x237c,
+ .shift = 0,
+ .width = 2,
+ /*
+ * NOTE: Op does not work for div-3. Current assumption is that div-3
+ * is not a recommended setting for this divider.
+ */
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_byte0_intf_div_clk",
+ .parent_hws = (const struct clk_hw *[]){ &byte0_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_regmap_div_ops,
+ .flags = CLK_GET_RATE_NOCACHE,
+ },
+ },
+};
+
+static struct clk_branch mdss_byte0_intf_clk = {
+ .halt_reg = 0x2374,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2374,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_byte0_intf_clk",
+ .parent_hws = (const struct clk_hw *[]){ &mdss_byte0_intf_div_clk.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_byte1_clk = {
+ .halt_reg = 0x2340,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2340,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_byte1_clk",
+ .parent_hws = (const struct clk_hw *[]){ &byte1_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap_div mdss_byte1_intf_div_clk = {
+ .reg = 0x2380,
+ .shift = 0,
+ .width = 2,
+ /*
+ * NOTE: Op does not work for div-3. Current assumption is that div-3
+ * is not a recommended setting for this divider.
+ */
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_byte1_intf_div_clk",
+ .parent_hws = (const struct clk_hw *[]){ &byte1_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_regmap_div_ops,
+ .flags = CLK_GET_RATE_NOCACHE,
+ },
+ },
+};
+
+static struct clk_branch mdss_byte1_intf_clk = {
+ .halt_reg = 0x2378,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2378,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_byte1_intf_clk",
+ .parent_hws = (const struct clk_hw *[]){ &mdss_byte1_intf_div_clk.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_dp_aux_clk = {
+ .halt_reg = 0x2364,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2364,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_dp_aux_clk",
+ .parent_hws = (const struct clk_hw *[]){ &dp_aux_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_dp_crypto_clk = {
+ .halt_reg = 0x235c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x235c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_dp_crypto_clk",
+ .parent_hws = (const struct clk_hw *[]){ &dp_crypto_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_dp_gtc_clk = {
+ .halt_reg = 0x2368,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2368,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_dp_gtc_clk",
+ .parent_hws = (const struct clk_hw *[]){ &dp_gtc_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_dp_link_clk = {
+ .halt_reg = 0x2354,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2354,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_dp_link_clk",
+ .parent_hws = (const struct clk_hw *[]){ &dp_link_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* Reset state of MDSS_DP_LINK_INTF_DIV is 0x3 (div-4) */
+static struct clk_branch mdss_dp_link_intf_clk = {
+ .halt_reg = 0x2358,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2358,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_dp_link_intf_clk",
+ .parent_hws = (const struct clk_hw *[]){ &dp_link_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_dp_pixel_clk = {
+ .halt_reg = 0x2360,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2360,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_dp_pixel_clk",
+ .parent_hws = (const struct clk_hw *[]){ &dp_pixel_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_esc0_clk = {
+ .halt_reg = 0x2344,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2344,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_esc0_clk",
+ .parent_hws = (const struct clk_hw *[]){ &esc0_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_esc1_clk = {
+ .halt_reg = 0x2348,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2348,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_esc1_clk",
+ .parent_hws = (const struct clk_hw *[]){ &esc1_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_hdmi_dp_ahb_clk = {
+ .halt_reg = 0x230c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x230c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_hdmi_dp_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_mdp_clk = {
+ .halt_reg = 0x231c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x231c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_mdp_clk",
+ .parent_hws = (const struct clk_hw *[]){ &mdp_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_pclk0_clk = {
+ .halt_reg = 0x2314,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2314,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_pclk0_clk",
+ .parent_hws = (const struct clk_hw *[]){ &pclk0_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_pclk1_clk = {
+ .halt_reg = 0x2318,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2318,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_pclk1_clk",
+ .parent_hws = (const struct clk_hw *[]){ &pclk1_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_rot_clk = {
+ .halt_reg = 0x2350,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2350,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_rot_clk",
+ .parent_hws = (const struct clk_hw *[]){ &rot_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_vsync_clk = {
+ .halt_reg = 0x2328,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2328,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_vsync_clk",
+ .parent_hws = (const struct clk_hw *[]){ &vsync_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mnoc_ahb_clk = {
+ .halt_reg = 0x5024,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x5024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mnoc_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch misc_ahb_clk = {
+ .halt_reg = 0x328,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x328,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x328,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "misc_ahb_clk",
+ /*
+ * Dependency to be enabled before the branch is
+ * enabled.
+ */
+ .parent_hws = (const struct clk_hw *[]){ &mnoc_ahb_clk.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch misc_cxo_clk = {
+ .halt_reg = 0x324,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x324,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "misc_cxo_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch snoc_dvm_axi_clk = {
+ .halt_reg = 0xe040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "snoc_dvm_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_ahb_clk = {
+ .halt_reg = 0x1030,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x1030,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "video_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_axi_clk = {
+ .halt_reg = 0x1034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1034,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "video_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch throttle_video_axi_clk = {
+ .halt_reg = 0x118c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x118c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x118c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "throttle_video_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_core_clk = {
+ .halt_reg = 0x1028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "video_core_clk",
+ .parent_hws = (const struct clk_hw *[]){ &video_core_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_subcore0_clk = {
+ .halt_reg = 0x1048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1048,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "video_subcore0_clk",
+ .parent_hws = (const struct clk_hw *[]){ &video_core_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc venus_gdsc = {
+ .gdscr = 0x1024,
+ .pd = {
+ .name = "venus",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc venus_core0_gdsc = {
+ .gdscr = 0x1040,
+ .pd = {
+ .name = "venus_core0",
+ },
+ .parent = &venus_gdsc.pd,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc mdss_gdsc = {
+ .gdscr = 0x2304,
+ .pd = {
+ .name = "mdss",
+ },
+ .cxcs = (unsigned int []){ 0x2040 },
+ .cxc_count = 1,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc camss_top_gdsc = {
+ .gdscr = 0x34a0,
+ .pd = {
+ .name = "camss_top",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc camss_vfe0_gdsc = {
+ .gdscr = 0x3664,
+ .pd = {
+ .name = "camss_vfe0",
+ },
+ .parent = &camss_top_gdsc.pd,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc camss_vfe1_gdsc = {
+ .gdscr = 0x3674,
+ .pd = {
+ .name = "camss_vfe1_gdsc",
+ },
+ .parent = &camss_top_gdsc.pd,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc camss_cpp_gdsc = {
+ .gdscr = 0x36d4,
+ .pd = {
+ .name = "camss_cpp",
+ },
+ .parent = &camss_top_gdsc.pd,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+/* This GDSC seems to hang the whole multimedia subsystem.
+static struct gdsc bimc_smmu_gdsc = {
+ .gdscr = 0xe020,
+ .gds_hw_ctrl = 0xe024,
+ .pd = {
+ .name = "bimc_smmu",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .parent = &bimc_smmu_gdsc.pd,
+ .flags = HW_CTRL,
+};
+*/
+
+static struct clk_regmap *mmcc_660_clocks[] = {
+ [AHB_CLK_SRC] = &ahb_clk_src.clkr,
+ [BYTE0_CLK_SRC] = &byte0_clk_src.clkr,
+ [BYTE1_CLK_SRC] = &byte1_clk_src.clkr,
+ [CAMSS_GP0_CLK_SRC] = &camss_gp0_clk_src.clkr,
+ [CAMSS_GP1_CLK_SRC] = &camss_gp1_clk_src.clkr,
+ [CCI_CLK_SRC] = &cci_clk_src.clkr,
+ [CPP_CLK_SRC] = &cpp_clk_src.clkr,
+ [CSI0_CLK_SRC] = &csi0_clk_src.clkr,
+ [CSI0PHYTIMER_CLK_SRC] = &csi0phytimer_clk_src.clkr,
+ [CSI1_CLK_SRC] = &csi1_clk_src.clkr,
+ [CSI1PHYTIMER_CLK_SRC] = &csi1phytimer_clk_src.clkr,
+ [CSI2_CLK_SRC] = &csi2_clk_src.clkr,
+ [CSI2PHYTIMER_CLK_SRC] = &csi2phytimer_clk_src.clkr,
+ [CSI3_CLK_SRC] = &csi3_clk_src.clkr,
+ [CSIPHY_CLK_SRC] = &csiphy_clk_src.clkr,
+ [DP_AUX_CLK_SRC] = &dp_aux_clk_src.clkr,
+ [DP_CRYPTO_CLK_SRC] = &dp_crypto_clk_src.clkr,
+ [DP_GTC_CLK_SRC] = &dp_gtc_clk_src.clkr,
+ [DP_LINK_CLK_SRC] = &dp_link_clk_src.clkr,
+ [DP_PIXEL_CLK_SRC] = &dp_pixel_clk_src.clkr,
+ [ESC0_CLK_SRC] = &esc0_clk_src.clkr,
+ [ESC1_CLK_SRC] = &esc1_clk_src.clkr,
+ [JPEG0_CLK_SRC] = &jpeg0_clk_src.clkr,
+ [MCLK0_CLK_SRC] = &mclk0_clk_src.clkr,
+ [MCLK1_CLK_SRC] = &mclk1_clk_src.clkr,
+ [MCLK2_CLK_SRC] = &mclk2_clk_src.clkr,
+ [MCLK3_CLK_SRC] = &mclk3_clk_src.clkr,
+ [MDP_CLK_SRC] = &mdp_clk_src.clkr,
+ [MMPLL0_PLL] = &mmpll0.clkr,
+ [MMPLL10_PLL] = &mmpll10.clkr,
+ [MMPLL3_PLL] = &mmpll3.clkr,
+ [MMPLL4_PLL] = &mmpll4.clkr,
+ [MMPLL5_PLL] = &mmpll5.clkr,
+ [MMPLL6_PLL] = &mmpll6.clkr,
+ [MMPLL7_PLL] = &mmpll7.clkr,
+ [MMPLL8_PLL] = &mmpll8.clkr,
+ [BIMC_SMMU_AHB_CLK] = &bimc_smmu_ahb_clk.clkr,
+ [BIMC_SMMU_AXI_CLK] = &bimc_smmu_axi_clk.clkr,
+ [CAMSS_AHB_CLK] = &camss_ahb_clk.clkr,
+ [CAMSS_CCI_AHB_CLK] = &camss_cci_ahb_clk.clkr,
+ [CAMSS_CCI_CLK] = &camss_cci_clk.clkr,
+ [CAMSS_CPHY_CSID0_CLK] = &camss_cphy_csid0_clk.clkr,
+ [CAMSS_CPHY_CSID1_CLK] = &camss_cphy_csid1_clk.clkr,
+ [CAMSS_CPHY_CSID2_CLK] = &camss_cphy_csid2_clk.clkr,
+ [CAMSS_CPHY_CSID3_CLK] = &camss_cphy_csid3_clk.clkr,
+ [CAMSS_CPP_AHB_CLK] = &camss_cpp_ahb_clk.clkr,
+ [CAMSS_CPP_AXI_CLK] = &camss_cpp_axi_clk.clkr,
+ [CAMSS_CPP_CLK] = &camss_cpp_clk.clkr,
+ [CAMSS_CPP_VBIF_AHB_CLK] = &camss_cpp_vbif_ahb_clk.clkr,
+ [CAMSS_CSI0_AHB_CLK] = &camss_csi0_ahb_clk.clkr,
+ [CAMSS_CSI0_CLK] = &camss_csi0_clk.clkr,
+ [CAMSS_CSI0PHYTIMER_CLK] = &camss_csi0phytimer_clk.clkr,
+ [CAMSS_CSI0PIX_CLK] = &camss_csi0pix_clk.clkr,
+ [CAMSS_CSI0RDI_CLK] = &camss_csi0rdi_clk.clkr,
+ [CAMSS_CSI1_AHB_CLK] = &camss_csi1_ahb_clk.clkr,
+ [CAMSS_CSI1_CLK] = &camss_csi1_clk.clkr,
+ [CAMSS_CSI1PHYTIMER_CLK] = &camss_csi1phytimer_clk.clkr,
+ [CAMSS_CSI1PIX_CLK] = &camss_csi1pix_clk.clkr,
+ [CAMSS_CSI1RDI_CLK] = &camss_csi1rdi_clk.clkr,
+ [CAMSS_CSI2_AHB_CLK] = &camss_csi2_ahb_clk.clkr,
+ [CAMSS_CSI2_CLK] = &camss_csi2_clk.clkr,
+ [CAMSS_CSI2PHYTIMER_CLK] = &camss_csi2phytimer_clk.clkr,
+ [CAMSS_CSI2PIX_CLK] = &camss_csi2pix_clk.clkr,
+ [CAMSS_CSI2RDI_CLK] = &camss_csi2rdi_clk.clkr,
+ [CAMSS_CSI3_AHB_CLK] = &camss_csi3_ahb_clk.clkr,
+ [CAMSS_CSI3_CLK] = &camss_csi3_clk.clkr,
+ [CAMSS_CSI3PIX_CLK] = &camss_csi3pix_clk.clkr,
+ [CAMSS_CSI3RDI_CLK] = &camss_csi3rdi_clk.clkr,
+ [CAMSS_CSI_VFE0_CLK] = &camss_csi_vfe0_clk.clkr,
+ [CAMSS_CSI_VFE1_CLK] = &camss_csi_vfe1_clk.clkr,
+ [CAMSS_CSIPHY0_CLK] = &camss_csiphy0_clk.clkr,
+ [CAMSS_CSIPHY1_CLK] = &camss_csiphy1_clk.clkr,
+ [CAMSS_CSIPHY2_CLK] = &camss_csiphy2_clk.clkr,
+ [CAMSS_GP0_CLK] = &camss_gp0_clk.clkr,
+ [CAMSS_GP1_CLK] = &camss_gp1_clk.clkr,
+ [CAMSS_ISPIF_AHB_CLK] = &camss_ispif_ahb_clk.clkr,
+ [CAMSS_JPEG0_CLK] = &camss_jpeg0_clk.clkr,
+ [CAMSS_JPEG_AHB_CLK] = &camss_jpeg_ahb_clk.clkr,
+ [CAMSS_JPEG_AXI_CLK] = &camss_jpeg_axi_clk.clkr,
+ [CAMSS_MCLK0_CLK] = &camss_mclk0_clk.clkr,
+ [CAMSS_MCLK1_CLK] = &camss_mclk1_clk.clkr,
+ [CAMSS_MCLK2_CLK] = &camss_mclk2_clk.clkr,
+ [CAMSS_MCLK3_CLK] = &camss_mclk3_clk.clkr,
+ [CAMSS_MICRO_AHB_CLK] = &camss_micro_ahb_clk.clkr,
+ [CAMSS_TOP_AHB_CLK] = &camss_top_ahb_clk.clkr,
+ [CAMSS_VFE0_AHB_CLK] = &camss_vfe0_ahb_clk.clkr,
+ [CAMSS_VFE0_CLK] = &camss_vfe0_clk.clkr,
+ [CAMSS_VFE0_STREAM_CLK] = &camss_vfe0_stream_clk.clkr,
+ [CAMSS_VFE1_AHB_CLK] = &camss_vfe1_ahb_clk.clkr,
+ [CAMSS_VFE1_CLK] = &camss_vfe1_clk.clkr,
+ [CAMSS_VFE1_STREAM_CLK] = &camss_vfe1_stream_clk.clkr,
+ [CAMSS_VFE_VBIF_AHB_CLK] = &camss_vfe_vbif_ahb_clk.clkr,
+ [CAMSS_VFE_VBIF_AXI_CLK] = &camss_vfe_vbif_axi_clk.clkr,
+ [CSIPHY_AHB2CRIF_CLK] = &csiphy_ahb2crif_clk.clkr,
+ [MDSS_AHB_CLK] = &mdss_ahb_clk.clkr,
+ [MDSS_AXI_CLK] = &mdss_axi_clk.clkr,
+ [MDSS_BYTE0_CLK] = &mdss_byte0_clk.clkr,
+ [MDSS_BYTE0_INTF_CLK] = &mdss_byte0_intf_clk.clkr,
+ [MDSS_BYTE0_INTF_DIV_CLK] = &mdss_byte0_intf_div_clk.clkr,
+ [MDSS_BYTE1_CLK] = &mdss_byte1_clk.clkr,
+ [MDSS_BYTE1_INTF_CLK] = &mdss_byte1_intf_clk.clkr,
+ [MDSS_DP_AUX_CLK] = &mdss_dp_aux_clk.clkr,
+ [MDSS_DP_CRYPTO_CLK] = &mdss_dp_crypto_clk.clkr,
+ [MDSS_DP_GTC_CLK] = &mdss_dp_gtc_clk.clkr,
+ [MDSS_DP_LINK_CLK] = &mdss_dp_link_clk.clkr,
+ [MDSS_DP_LINK_INTF_CLK] = &mdss_dp_link_intf_clk.clkr,
+ [MDSS_DP_PIXEL_CLK] = &mdss_dp_pixel_clk.clkr,
+ [MDSS_ESC0_CLK] = &mdss_esc0_clk.clkr,
+ [MDSS_ESC1_CLK] = &mdss_esc1_clk.clkr,
+ [MDSS_HDMI_DP_AHB_CLK] = &mdss_hdmi_dp_ahb_clk.clkr,
+ [MDSS_MDP_CLK] = &mdss_mdp_clk.clkr,
+ [MDSS_PCLK0_CLK] = &mdss_pclk0_clk.clkr,
+ [MDSS_PCLK1_CLK] = &mdss_pclk1_clk.clkr,
+ [MDSS_ROT_CLK] = &mdss_rot_clk.clkr,
+ [MDSS_VSYNC_CLK] = &mdss_vsync_clk.clkr,
+ [MISC_AHB_CLK] = &misc_ahb_clk.clkr,
+ [MISC_CXO_CLK] = &misc_cxo_clk.clkr,
+ [MNOC_AHB_CLK] = &mnoc_ahb_clk.clkr,
+ [SNOC_DVM_AXI_CLK] = &snoc_dvm_axi_clk.clkr,
+ [THROTTLE_CAMSS_AXI_CLK] = &throttle_camss_axi_clk.clkr,
+ [THROTTLE_MDSS_AXI_CLK] = &throttle_mdss_axi_clk.clkr,
+ [THROTTLE_VIDEO_AXI_CLK] = &throttle_video_axi_clk.clkr,
+ [VIDEO_AHB_CLK] = &video_ahb_clk.clkr,
+ [VIDEO_AXI_CLK] = &video_axi_clk.clkr,
+ [VIDEO_CORE_CLK] = &video_core_clk.clkr,
+ [VIDEO_SUBCORE0_CLK] = &video_subcore0_clk.clkr,
+ [PCLK0_CLK_SRC] = &pclk0_clk_src.clkr,
+ [PCLK1_CLK_SRC] = &pclk1_clk_src.clkr,
+ [ROT_CLK_SRC] = &rot_clk_src.clkr,
+ [VFE0_CLK_SRC] = &vfe0_clk_src.clkr,
+ [VFE1_CLK_SRC] = &vfe1_clk_src.clkr,
+ [VIDEO_CORE_CLK_SRC] = &video_core_clk_src.clkr,
+ [VSYNC_CLK_SRC] = &vsync_clk_src.clkr,
+ [MDSS_BYTE1_INTF_DIV_CLK] = &mdss_byte1_intf_div_clk.clkr,
+ [AXI_CLK_SRC] = &axi_clk_src.clkr,
+};
+
+static struct gdsc *mmcc_sdm660_gdscs[] = {
+ [VENUS_GDSC] = &venus_gdsc,
+ [VENUS_CORE0_GDSC] = &venus_core0_gdsc,
+ [MDSS_GDSC] = &mdss_gdsc,
+ [CAMSS_TOP_GDSC] = &camss_top_gdsc,
+ [CAMSS_VFE0_GDSC] = &camss_vfe0_gdsc,
+ [CAMSS_VFE1_GDSC] = &camss_vfe1_gdsc,
+ [CAMSS_CPP_GDSC] = &camss_cpp_gdsc,
+};
+
+static const struct qcom_reset_map mmcc_660_resets[] = {
+ [CAMSS_MICRO_BCR] = { 0x3490 },
+};
+
+static const struct regmap_config mmcc_660_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x40000,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc mmcc_660_desc = {
+ .config = &mmcc_660_regmap_config,
+ .clks = mmcc_660_clocks,
+ .num_clks = ARRAY_SIZE(mmcc_660_clocks),
+ .resets = mmcc_660_resets,
+ .num_resets = ARRAY_SIZE(mmcc_660_resets),
+ .gdscs = mmcc_sdm660_gdscs,
+ .num_gdscs = ARRAY_SIZE(mmcc_sdm660_gdscs),
+};
+
+static const struct of_device_id mmcc_660_match_table[] = {
+ { .compatible = "qcom,mmcc-sdm660" },
+ { .compatible = "qcom,mmcc-sdm630", .data = (void *)1UL },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mmcc_660_match_table);
+
+static void sdm630_clock_override(void)
+{
+ /* SDM630 has only one DSI */
+ mmcc_660_desc.clks[BYTE1_CLK_SRC] = NULL;
+ mmcc_660_desc.clks[MDSS_BYTE1_CLK] = NULL;
+ mmcc_660_desc.clks[MDSS_BYTE1_INTF_DIV_CLK] = NULL;
+ mmcc_660_desc.clks[MDSS_BYTE1_INTF_CLK] = NULL;
+ mmcc_660_desc.clks[ESC1_CLK_SRC] = NULL;
+ mmcc_660_desc.clks[MDSS_ESC1_CLK] = NULL;
+ mmcc_660_desc.clks[PCLK1_CLK_SRC] = NULL;
+ mmcc_660_desc.clks[MDSS_PCLK1_CLK] = NULL;
+}
+
+static int mmcc_660_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *id;
+ struct regmap *regmap;
+ bool is_sdm630;
+
+ id = of_match_device(mmcc_660_match_table, &pdev->dev);
+ if (!id)
+ return -ENODEV;
+ is_sdm630 = !!(id->data);
+
+ regmap = qcom_cc_map(pdev, &mmcc_660_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ if (is_sdm630)
+ sdm630_clock_override();
+
+ clk_alpha_pll_configure(&mmpll3, regmap, &mmpll3_config);
+ clk_alpha_pll_configure(&mmpll4, regmap, &mmpll4_config);
+ clk_alpha_pll_configure(&mmpll5, regmap, &mmpll5_config);
+ clk_alpha_pll_configure(&mmpll7, regmap, &mmpll7_config);
+ clk_alpha_pll_configure(&mmpll8, regmap, &mmpll8_config);
+ clk_alpha_pll_configure(&mmpll10, regmap, &mmpll10_config);
+
+ return qcom_cc_really_probe(pdev, &mmcc_660_desc, regmap);
+}
+
+static struct platform_driver mmcc_660_driver = {
+ .probe = mmcc_660_probe,
+ .driver = {
+ .name = "mmcc-sdm660",
+ .of_match_table = mmcc_660_match_table,
+ },
+};
+module_platform_driver(mmcc_660_driver);
+
+MODULE_DESCRIPTION("Qualcomm SDM630/SDM660 MMCC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/videocc-sm8250.c b/drivers/clk/qcom/videocc-sm8250.c
index 2797c61f5938..b0efadc19634 100644
--- a/drivers/clk/qcom/videocc-sm8250.c
+++ b/drivers/clk/qcom/videocc-sm8250.c
@@ -169,6 +169,21 @@ static struct clk_regmap_div video_cc_mvs0c_div2_div_clk_src = {
},
};
+static struct clk_regmap_div video_cc_mvs0_div_clk_src = {
+ .reg = 0xd54,
+ .shift = 0,
+ .width = 2,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "video_cc_mvs0_div_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &video_cc_mvs0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
static struct clk_regmap_div video_cc_mvs1c_div2_div_clk_src = {
.reg = 0xcf4,
.shift = 0,
@@ -202,6 +217,24 @@ static struct clk_branch video_cc_mvs0c_clk = {
},
};
+static struct clk_branch video_cc_mvs0_clk = {
+ .halt_reg = 0xd34,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xd34,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "video_cc_mvs0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &video_cc_mvs0_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch video_cc_mvs1_div2_clk = {
.halt_reg = 0xdf4,
.halt_check = BRANCH_HALT_VOTED,
@@ -245,6 +278,7 @@ static struct gdsc mvs0c_gdsc = {
},
.flags = 0,
.pwrsts = PWRSTS_OFF_ON,
+ .supply = "mmcx",
};
static struct gdsc mvs1c_gdsc = {
@@ -254,6 +288,7 @@ static struct gdsc mvs1c_gdsc = {
},
.flags = 0,
.pwrsts = PWRSTS_OFF_ON,
+ .supply = "mmcx",
};
static struct gdsc mvs0_gdsc = {
@@ -263,6 +298,7 @@ static struct gdsc mvs0_gdsc = {
},
.flags = HW_CTRL,
.pwrsts = PWRSTS_OFF_ON,
+ .supply = "mmcx",
};
static struct gdsc mvs1_gdsc = {
@@ -272,10 +308,13 @@ static struct gdsc mvs1_gdsc = {
},
.flags = HW_CTRL,
.pwrsts = PWRSTS_OFF_ON,
+ .supply = "mmcx",
};
static struct clk_regmap *video_cc_sm8250_clocks[] = {
+ [VIDEO_CC_MVS0_CLK] = &video_cc_mvs0_clk.clkr,
[VIDEO_CC_MVS0_CLK_SRC] = &video_cc_mvs0_clk_src.clkr,
+ [VIDEO_CC_MVS0_DIV_CLK_SRC] = &video_cc_mvs0_div_clk_src.clkr,
[VIDEO_CC_MVS0C_CLK] = &video_cc_mvs0c_clk.clkr,
[VIDEO_CC_MVS0C_DIV2_DIV_CLK_SRC] = &video_cc_mvs0c_div2_div_clk_src.clkr,
[VIDEO_CC_MVS1_CLK_SRC] = &video_cc_mvs1_clk_src.clkr,
diff --git a/drivers/clk/renesas/Kconfig b/drivers/clk/renesas/Kconfig
index 18915d668a30..607e64a17d72 100644
--- a/drivers/clk/renesas/Kconfig
+++ b/drivers/clk/renesas/Kconfig
@@ -148,6 +148,7 @@ config CLK_R8A77995
config CLK_R8A779A0
bool "R-Car V3U clock support" if COMPILE_TEST
+ select CLK_RCAR_CPG_LIB
select CLK_RENESAS_CPG_MSSR
config CLK_R9A06G032
@@ -162,12 +163,16 @@ config CLK_SH73A0
# Family
+config CLK_RCAR_CPG_LIB
+ bool "CPG/MSSR library functions" if COMPILE_TEST
+
config CLK_RCAR_GEN2_CPG
bool "R-Car Gen2 CPG clock support" if COMPILE_TEST
select CLK_RENESAS_CPG_MSSR
config CLK_RCAR_GEN3_CPG
bool "R-Car Gen3 and RZ/G2 CPG clock support" if COMPILE_TEST
+ select CLK_RCAR_CPG_LIB
select CLK_RENESAS_CPG_MSSR
config CLK_RCAR_USB2_CLOCK_SEL
diff --git a/drivers/clk/renesas/Makefile b/drivers/clk/renesas/Makefile
index c803912ef2ce..ef0d2bba92bf 100644
--- a/drivers/clk/renesas/Makefile
+++ b/drivers/clk/renesas/Makefile
@@ -32,6 +32,7 @@ obj-$(CONFIG_CLK_R9A06G032) += r9a06g032-clocks.o
obj-$(CONFIG_CLK_SH73A0) += clk-sh73a0.o
# Family
+obj-$(CONFIG_CLK_RCAR_CPG_LIB) += rcar-cpg-lib.o
obj-$(CONFIG_CLK_RCAR_GEN2_CPG) += rcar-gen2-cpg.o
obj-$(CONFIG_CLK_RCAR_GEN3_CPG) += rcar-gen3-cpg.o
obj-$(CONFIG_CLK_RCAR_USB2_CLOCK_SEL) += rcar-usb2-clock-sel.o
diff --git a/drivers/clk/renesas/r8a7796-cpg-mssr.c b/drivers/clk/renesas/r8a7796-cpg-mssr.c
index 2cd6e3876fbd..41593c126faf 100644
--- a/drivers/clk/renesas/r8a7796-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7796-cpg-mssr.c
@@ -128,6 +128,11 @@ static const struct cpg_core_clk r8a7796_core_clks[] __initconst = {
static struct mssr_mod_clk r8a7796_mod_clks[] __initdata = {
DEF_MOD("fdp1-0", 119, R8A7796_CLK_S0D1),
+ DEF_MOD("tmu4", 121, R8A7796_CLK_S0D6),
+ DEF_MOD("tmu3", 122, R8A7796_CLK_S3D2),
+ DEF_MOD("tmu2", 123, R8A7796_CLK_S3D2),
+ DEF_MOD("tmu1", 124, R8A7796_CLK_S3D2),
+ DEF_MOD("tmu0", 125, R8A7796_CLK_CP),
DEF_MOD("scif5", 202, R8A7796_CLK_S3D4),
DEF_MOD("scif4", 203, R8A7796_CLK_S3D4),
DEF_MOD("scif3", 204, R8A7796_CLK_S3D4),
diff --git a/drivers/clk/renesas/r8a77965-cpg-mssr.c b/drivers/clk/renesas/r8a77965-cpg-mssr.c
index 2b55a06ac5cf..46a157732759 100644
--- a/drivers/clk/renesas/r8a77965-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a77965-cpg-mssr.c
@@ -123,6 +123,11 @@ static const struct cpg_core_clk r8a77965_core_clks[] __initconst = {
static const struct mssr_mod_clk r8a77965_mod_clks[] __initconst = {
DEF_MOD("fdp1-0", 119, R8A77965_CLK_S0D1),
+ DEF_MOD("tmu4", 121, R8A77965_CLK_S0D6),
+ DEF_MOD("tmu3", 122, R8A77965_CLK_S3D2),
+ DEF_MOD("tmu2", 123, R8A77965_CLK_S3D2),
+ DEF_MOD("tmu1", 124, R8A77965_CLK_S3D2),
+ DEF_MOD("tmu0", 125, R8A77965_CLK_CP),
DEF_MOD("scif5", 202, R8A77965_CLK_S3D4),
DEF_MOD("scif4", 203, R8A77965_CLK_S3D4),
DEF_MOD("scif3", 204, R8A77965_CLK_S3D4),
diff --git a/drivers/clk/renesas/r8a77990-cpg-mssr.c b/drivers/clk/renesas/r8a77990-cpg-mssr.c
index 2b97ab61d044..2d172f80b34c 100644
--- a/drivers/clk/renesas/r8a77990-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a77990-cpg-mssr.c
@@ -124,6 +124,11 @@ static const struct cpg_core_clk r8a77990_core_clks[] __initconst = {
};
static const struct mssr_mod_clk r8a77990_mod_clks[] __initconst = {
+ DEF_MOD("tmu4", 121, R8A77990_CLK_S0D6C),
+ DEF_MOD("tmu3", 122, R8A77990_CLK_S3D2C),
+ DEF_MOD("tmu2", 123, R8A77990_CLK_S3D2C),
+ DEF_MOD("tmu1", 124, R8A77990_CLK_S3D2C),
+ DEF_MOD("tmu0", 125, R8A77990_CLK_CP),
DEF_MOD("scif5", 202, R8A77990_CLK_S3D4C),
DEF_MOD("scif4", 203, R8A77990_CLK_S3D4C),
DEF_MOD("scif3", 204, R8A77990_CLK_S3D4C),
diff --git a/drivers/clk/renesas/r8a77995-cpg-mssr.c b/drivers/clk/renesas/r8a77995-cpg-mssr.c
index 5b4691117b47..9cfd00cf4e69 100644
--- a/drivers/clk/renesas/r8a77995-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a77995-cpg-mssr.c
@@ -111,6 +111,11 @@ static const struct cpg_core_clk r8a77995_core_clks[] __initconst = {
};
static const struct mssr_mod_clk r8a77995_mod_clks[] __initconst = {
+ DEF_MOD("tmu4", 121, R8A77995_CLK_S1D4C),
+ DEF_MOD("tmu3", 122, R8A77995_CLK_S3D2C),
+ DEF_MOD("tmu2", 123, R8A77995_CLK_S3D2C),
+ DEF_MOD("tmu1", 124, R8A77995_CLK_S3D2C),
+ DEF_MOD("tmu0", 125, R8A77995_CLK_CP),
DEF_MOD("scif5", 202, R8A77995_CLK_S3D4C),
DEF_MOD("scif4", 203, R8A77995_CLK_S3D4C),
DEF_MOD("scif3", 204, R8A77995_CLK_S3D4C),
diff --git a/drivers/clk/renesas/r8a779a0-cpg-mssr.c b/drivers/clk/renesas/r8a779a0-cpg-mssr.c
index aa5389b04d74..f23fe9d5e5e1 100644
--- a/drivers/clk/renesas/r8a779a0-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a779a0-cpg-mssr.c
@@ -25,6 +25,7 @@
#include <dt-bindings/clock/r8a779a0-cpg-mssr.h>
+#include "rcar-cpg-lib.h"
#include "renesas-cpg-mssr.h"
enum rcar_r8a779a0_clk_types {
@@ -32,6 +33,7 @@ enum rcar_r8a779a0_clk_types {
CLK_TYPE_R8A779A0_PLL1,
CLK_TYPE_R8A779A0_PLL2X_3X, /* PLL[23][01] */
CLK_TYPE_R8A779A0_PLL5,
+ CLK_TYPE_R8A779A0_SD,
CLK_TYPE_R8A779A0_MDSEL, /* Select parent/divider using mode pin */
CLK_TYPE_R8A779A0_OSC, /* OSC EXTAL predivider and fixed divider */
};
@@ -69,7 +71,6 @@ enum clk_ids {
CLK_PLL5_DIV2,
CLK_PLL5_DIV4,
CLK_S1,
- CLK_S2,
CLK_S3,
CLK_SDSRC,
CLK_RPCSRC,
@@ -83,6 +84,9 @@ enum clk_ids {
DEF_BASE(_name, _id, CLK_TYPE_R8A779A0_PLL2X_3X, CLK_MAIN, \
.offset = _offset)
+#define DEF_SD(_name, _id, _parent, _offset) \
+ DEF_BASE(_name, _id, CLK_TYPE_R8A779A0_SD, _parent, .offset = _offset)
+
#define DEF_MDSEL(_name, _id, _md, _parent0, _div0, _parent1, _div1) \
DEF_BASE(_name, _id, CLK_TYPE_R8A779A0_MDSEL, \
(_parent0) << 16 | (_parent1), \
@@ -114,6 +118,7 @@ static const struct cpg_core_clk r8a779a0_core_clks[] __initconst = {
DEF_FIXED(".pll5_div4", CLK_PLL5_DIV4, CLK_PLL5_DIV2, 2, 1),
DEF_FIXED(".s1", CLK_S1, CLK_PLL1_DIV2, 2, 1),
DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 4, 1),
+ DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL5_DIV4, 1, 1),
DEF_RATE(".oco", CLK_OCO, 32768),
/* Core Clock Outputs */
@@ -137,7 +142,10 @@ static const struct cpg_core_clk r8a779a0_core_clks[] __initconst = {
DEF_FIXED("icu", R8A779A0_CLK_ICU, CLK_PLL5_DIV4, 2, 1),
DEF_FIXED("icud2", R8A779A0_CLK_ICUD2, CLK_PLL5_DIV4, 4, 1),
DEF_FIXED("vcbus", R8A779A0_CLK_VCBUS, CLK_PLL5_DIV4, 1, 1),
- DEF_FIXED("cbfusa", R8A779A0_CLK_CBFUSA, CLK_MAIN, 2, 1),
+ DEF_FIXED("cbfusa", R8A779A0_CLK_CBFUSA, CLK_EXTAL, 2, 1),
+ DEF_FIXED("cp", R8A779A0_CLK_CP, CLK_EXTAL, 2, 1),
+
+ DEF_SD("sd0", R8A779A0_CLK_SD0, CLK_SDSRC, 0x870),
DEF_DIV6P1("mso", R8A779A0_CLK_MSO, CLK_PLL5_DIV4, 0x87c),
DEF_DIV6P1("canfd", R8A779A0_CLK_CANFD, CLK_PLL5_DIV4, 0x878),
@@ -148,14 +156,42 @@ static const struct cpg_core_clk r8a779a0_core_clks[] __initconst = {
};
static const struct mssr_mod_clk r8a779a0_mod_clks[] __initconst = {
+ DEF_MOD("avb0", 211, R8A779A0_CLK_S3D2),
+ DEF_MOD("avb1", 212, R8A779A0_CLK_S3D2),
+ DEF_MOD("avb2", 213, R8A779A0_CLK_S3D2),
+ DEF_MOD("avb3", 214, R8A779A0_CLK_S3D2),
+ DEF_MOD("avb4", 215, R8A779A0_CLK_S3D2),
+ DEF_MOD("avb5", 216, R8A779A0_CLK_S3D2),
DEF_MOD("csi40", 331, R8A779A0_CLK_CSI0),
DEF_MOD("csi41", 400, R8A779A0_CLK_CSI0),
DEF_MOD("csi42", 401, R8A779A0_CLK_CSI0),
DEF_MOD("csi43", 402, R8A779A0_CLK_CSI0),
+ DEF_MOD("fcpvd0", 508, R8A779A0_CLK_S3D1),
+ DEF_MOD("fcpvd1", 509, R8A779A0_CLK_S3D1),
+ DEF_MOD("hscif0", 514, R8A779A0_CLK_S1D2),
+ DEF_MOD("hscif1", 515, R8A779A0_CLK_S1D2),
+ DEF_MOD("hscif2", 516, R8A779A0_CLK_S1D2),
+ DEF_MOD("hscif3", 517, R8A779A0_CLK_S1D2),
+ DEF_MOD("i2c0", 518, R8A779A0_CLK_S1D4),
+ DEF_MOD("i2c1", 519, R8A779A0_CLK_S1D4),
+ DEF_MOD("i2c2", 520, R8A779A0_CLK_S1D4),
+ DEF_MOD("i2c3", 521, R8A779A0_CLK_S1D4),
+ DEF_MOD("i2c4", 522, R8A779A0_CLK_S1D4),
+ DEF_MOD("i2c5", 523, R8A779A0_CLK_S1D4),
+ DEF_MOD("i2c6", 524, R8A779A0_CLK_S1D4),
+ DEF_MOD("msi0", 618, R8A779A0_CLK_MSO),
+ DEF_MOD("msi1", 619, R8A779A0_CLK_MSO),
+ DEF_MOD("msi2", 620, R8A779A0_CLK_MSO),
+ DEF_MOD("msi3", 621, R8A779A0_CLK_MSO),
+ DEF_MOD("msi4", 622, R8A779A0_CLK_MSO),
+ DEF_MOD("msi5", 623, R8A779A0_CLK_MSO),
DEF_MOD("scif0", 702, R8A779A0_CLK_S1D8),
DEF_MOD("scif1", 703, R8A779A0_CLK_S1D8),
DEF_MOD("scif3", 704, R8A779A0_CLK_S1D8),
DEF_MOD("scif4", 705, R8A779A0_CLK_S1D8),
+ DEF_MOD("sdhi0", 706, R8A779A0_CLK_SD0),
+ DEF_MOD("sydm1", 709, R8A779A0_CLK_S1D2),
+ DEF_MOD("sydm2", 710, R8A779A0_CLK_S1D2),
DEF_MOD("vin00", 730, R8A779A0_CLK_S1D1),
DEF_MOD("vin01", 731, R8A779A0_CLK_S1D1),
DEF_MOD("vin02", 800, R8A779A0_CLK_S1D1),
@@ -188,10 +224,19 @@ static const struct mssr_mod_clk r8a779a0_mod_clks[] __initconst = {
DEF_MOD("vin35", 827, R8A779A0_CLK_S1D1),
DEF_MOD("vin36", 828, R8A779A0_CLK_S1D1),
DEF_MOD("vin37", 829, R8A779A0_CLK_S1D1),
+ DEF_MOD("vspd0", 830, R8A779A0_CLK_S3D1),
+ DEF_MOD("vspd1", 831, R8A779A0_CLK_S3D1),
+ DEF_MOD("rwdt", 907, R8A779A0_CLK_R),
+ DEF_MOD("pfc0", 915, R8A779A0_CLK_CP),
+ DEF_MOD("pfc1", 916, R8A779A0_CLK_CP),
+ DEF_MOD("pfc2", 917, R8A779A0_CLK_CP),
+ DEF_MOD("pfc3", 918, R8A779A0_CLK_CP),
+ DEF_MOD("vspx0", 1028, R8A779A0_CLK_S1D1),
+ DEF_MOD("vspx1", 1029, R8A779A0_CLK_S1D1),
+ DEF_MOD("vspx2", 1030, R8A779A0_CLK_S1D1),
+ DEF_MOD("vspx3", 1031, R8A779A0_CLK_S1D1),
};
-static spinlock_t cpg_lock;
-
static const struct rcar_r8a779a0_cpg_pll_config *cpg_pll_config __initdata;
static unsigned int cpg_clk_extalr __initdata;
static u32 cpg_mode __initdata;
@@ -230,6 +275,12 @@ static struct clk * __init rcar_r8a779a0_cpg_clk_register(struct device *dev,
div = cpg_pll_config->pll5_div;
break;
+ case CLK_TYPE_R8A779A0_SD:
+ return cpg_sd_clk_register(core->name, base, core->offset,
+ __clk_get_name(parent), notifiers,
+ false);
+ break;
+
case CLK_TYPE_R8A779A0_MDSEL:
/*
* Clock selectable between two parents and two fixed dividers
@@ -261,6 +312,10 @@ static struct clk * __init rcar_r8a779a0_cpg_clk_register(struct device *dev,
__clk_get_name(parent), 0, mult, div);
}
+static const unsigned int r8a779a0_crit_mod_clks[] __initconst = {
+ MOD_CLK_ID(907), /* RWDT */
+};
+
/*
* CPG Clock Data
*/
@@ -311,6 +366,10 @@ const struct cpg_mssr_info r8a779a0_cpg_mssr_info __initconst = {
.num_mod_clks = ARRAY_SIZE(r8a779a0_mod_clks),
.num_hw_mod_clks = 15 * 32,
+ /* Critical Module Clocks */
+ .crit_mod_clks = r8a779a0_crit_mod_clks,
+ .num_crit_mod_clks = ARRAY_SIZE(r8a779a0_crit_mod_clks),
+
/* Callbacks */
.init = r8a779a0_cpg_mssr_init,
.cpg_clk_register = rcar_r8a779a0_cpg_clk_register,
diff --git a/drivers/clk/renesas/rcar-cpg-lib.c b/drivers/clk/renesas/rcar-cpg-lib.c
new file mode 100644
index 000000000000..7e7e5d1341d5
--- /dev/null
+++ b/drivers/clk/renesas/rcar-cpg-lib.c
@@ -0,0 +1,270 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * R-Car Gen3 Clock Pulse Generator Library
+ *
+ * Copyright (C) 2015-2018 Glider bvba
+ * Copyright (C) 2019 Renesas Electronics Corp.
+ *
+ * Based on clk-rcar-gen3.c
+ *
+ * Copyright (C) 2015 Renesas Electronics Corp.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/pm.h>
+#include <linux/slab.h>
+#include <linux/sys_soc.h>
+
+#include "rcar-cpg-lib.h"
+
+spinlock_t cpg_lock;
+
+void cpg_reg_modify(void __iomem *reg, u32 clear, u32 set)
+{
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&cpg_lock, flags);
+ val = readl(reg);
+ val &= ~clear;
+ val |= set;
+ writel(val, reg);
+ spin_unlock_irqrestore(&cpg_lock, flags);
+};
+
+static int cpg_simple_notifier_call(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct cpg_simple_notifier *csn =
+ container_of(nb, struct cpg_simple_notifier, nb);
+
+ switch (action) {
+ case PM_EVENT_SUSPEND:
+ csn->saved = readl(csn->reg);
+ return NOTIFY_OK;
+
+ case PM_EVENT_RESUME:
+ writel(csn->saved, csn->reg);
+ return NOTIFY_OK;
+ }
+ return NOTIFY_DONE;
+}
+
+void cpg_simple_notifier_register(struct raw_notifier_head *notifiers,
+ struct cpg_simple_notifier *csn)
+{
+ csn->nb.notifier_call = cpg_simple_notifier_call;
+ raw_notifier_chain_register(notifiers, &csn->nb);
+}
+
+/*
+ * SDn Clock
+ */
+#define CPG_SD_STP_HCK BIT(9)
+#define CPG_SD_STP_CK BIT(8)
+
+#define CPG_SD_STP_MASK (CPG_SD_STP_HCK | CPG_SD_STP_CK)
+#define CPG_SD_FC_MASK (0x7 << 2 | 0x3 << 0)
+
+#define CPG_SD_DIV_TABLE_DATA(stp_hck, sd_srcfc, sd_fc, sd_div) \
+{ \
+ .val = ((stp_hck) ? CPG_SD_STP_HCK : 0) | \
+ ((sd_srcfc) << 2) | \
+ ((sd_fc) << 0), \
+ .div = (sd_div), \
+}
+
+struct sd_div_table {
+ u32 val;
+ unsigned int div;
+};
+
+struct sd_clock {
+ struct clk_hw hw;
+ const struct sd_div_table *div_table;
+ struct cpg_simple_notifier csn;
+ unsigned int div_num;
+ unsigned int cur_div_idx;
+};
+
+/* SDn divider
+ * sd_srcfc sd_fc div
+ * stp_hck (div) (div) = sd_srcfc x sd_fc
+ *---------------------------------------------------------
+ * 0 0 (1) 1 (4) 4 : SDR104 / HS200 / HS400 (8 TAP)
+ * 0 1 (2) 1 (4) 8 : SDR50
+ * 1 2 (4) 1 (4) 16 : HS / SDR25
+ * 1 3 (8) 1 (4) 32 : NS / SDR12
+ * 1 4 (16) 1 (4) 64
+ * 0 0 (1) 0 (2) 2
+ * 0 1 (2) 0 (2) 4 : SDR104 / HS200 / HS400 (4 TAP)
+ * 1 2 (4) 0 (2) 8
+ * 1 3 (8) 0 (2) 16
+ * 1 4 (16) 0 (2) 32
+ *
+ * NOTE: There is a quirk option to ignore the first row of the dividers
+ * table when searching for suitable settings. This is because HS400 on
+ * early ES versions of H3 and M3-W requires a specific setting to work.
+ */
+static const struct sd_div_table cpg_sd_div_table[] = {
+/* CPG_SD_DIV_TABLE_DATA(stp_hck, sd_srcfc, sd_fc, sd_div) */
+ CPG_SD_DIV_TABLE_DATA(0, 0, 1, 4),
+ CPG_SD_DIV_TABLE_DATA(0, 1, 1, 8),
+ CPG_SD_DIV_TABLE_DATA(1, 2, 1, 16),
+ CPG_SD_DIV_TABLE_DATA(1, 3, 1, 32),
+ CPG_SD_DIV_TABLE_DATA(1, 4, 1, 64),
+ CPG_SD_DIV_TABLE_DATA(0, 0, 0, 2),
+ CPG_SD_DIV_TABLE_DATA(0, 1, 0, 4),
+ CPG_SD_DIV_TABLE_DATA(1, 2, 0, 8),
+ CPG_SD_DIV_TABLE_DATA(1, 3, 0, 16),
+ CPG_SD_DIV_TABLE_DATA(1, 4, 0, 32),
+};
+
+#define to_sd_clock(_hw) container_of(_hw, struct sd_clock, hw)
+
+static int cpg_sd_clock_enable(struct clk_hw *hw)
+{
+ struct sd_clock *clock = to_sd_clock(hw);
+
+ cpg_reg_modify(clock->csn.reg, CPG_SD_STP_MASK,
+ clock->div_table[clock->cur_div_idx].val &
+ CPG_SD_STP_MASK);
+
+ return 0;
+}
+
+static void cpg_sd_clock_disable(struct clk_hw *hw)
+{
+ struct sd_clock *clock = to_sd_clock(hw);
+
+ cpg_reg_modify(clock->csn.reg, 0, CPG_SD_STP_MASK);
+}
+
+static int cpg_sd_clock_is_enabled(struct clk_hw *hw)
+{
+ struct sd_clock *clock = to_sd_clock(hw);
+
+ return !(readl(clock->csn.reg) & CPG_SD_STP_MASK);
+}
+
+static unsigned long cpg_sd_clock_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct sd_clock *clock = to_sd_clock(hw);
+
+ return DIV_ROUND_CLOSEST(parent_rate,
+ clock->div_table[clock->cur_div_idx].div);
+}
+
+static int cpg_sd_clock_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ unsigned long best_rate = ULONG_MAX, diff_min = ULONG_MAX;
+ struct sd_clock *clock = to_sd_clock(hw);
+ unsigned long calc_rate, diff;
+ unsigned int i;
+
+ for (i = 0; i < clock->div_num; i++) {
+ calc_rate = DIV_ROUND_CLOSEST(req->best_parent_rate,
+ clock->div_table[i].div);
+ if (calc_rate < req->min_rate || calc_rate > req->max_rate)
+ continue;
+
+ diff = calc_rate > req->rate ? calc_rate - req->rate
+ : req->rate - calc_rate;
+ if (diff < diff_min) {
+ best_rate = calc_rate;
+ diff_min = diff;
+ }
+ }
+
+ if (best_rate == ULONG_MAX)
+ return -EINVAL;
+
+ req->rate = best_rate;
+ return 0;
+}
+
+static int cpg_sd_clock_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct sd_clock *clock = to_sd_clock(hw);
+ unsigned int i;
+
+ for (i = 0; i < clock->div_num; i++)
+ if (rate == DIV_ROUND_CLOSEST(parent_rate,
+ clock->div_table[i].div))
+ break;
+
+ if (i >= clock->div_num)
+ return -EINVAL;
+
+ clock->cur_div_idx = i;
+
+ cpg_reg_modify(clock->csn.reg, CPG_SD_STP_MASK | CPG_SD_FC_MASK,
+ clock->div_table[i].val &
+ (CPG_SD_STP_MASK | CPG_SD_FC_MASK));
+
+ return 0;
+}
+
+static const struct clk_ops cpg_sd_clock_ops = {
+ .enable = cpg_sd_clock_enable,
+ .disable = cpg_sd_clock_disable,
+ .is_enabled = cpg_sd_clock_is_enabled,
+ .recalc_rate = cpg_sd_clock_recalc_rate,
+ .determine_rate = cpg_sd_clock_determine_rate,
+ .set_rate = cpg_sd_clock_set_rate,
+};
+
+struct clk * __init cpg_sd_clk_register(const char *name,
+ void __iomem *base, unsigned int offset, const char *parent_name,
+ struct raw_notifier_head *notifiers, bool skip_first)
+{
+ struct clk_init_data init;
+ struct sd_clock *clock;
+ struct clk *clk;
+ u32 val;
+
+ clock = kzalloc(sizeof(*clock), GFP_KERNEL);
+ if (!clock)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &cpg_sd_clock_ops;
+ init.flags = CLK_SET_RATE_PARENT;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ clock->csn.reg = base + offset;
+ clock->hw.init = &init;
+ clock->div_table = cpg_sd_div_table;
+ clock->div_num = ARRAY_SIZE(cpg_sd_div_table);
+
+ if (skip_first) {
+ clock->div_table++;
+ clock->div_num--;
+ }
+
+ val = readl(clock->csn.reg) & ~CPG_SD_FC_MASK;
+ val |= CPG_SD_STP_MASK | (clock->div_table[0].val & CPG_SD_FC_MASK);
+ writel(val, clock->csn.reg);
+
+ clk = clk_register(NULL, &clock->hw);
+ if (IS_ERR(clk))
+ goto free_clock;
+
+ cpg_simple_notifier_register(notifiers, &clock->csn);
+ return clk;
+
+free_clock:
+ kfree(clock);
+ return clk;
+}
+
+
diff --git a/drivers/clk/renesas/rcar-cpg-lib.h b/drivers/clk/renesas/rcar-cpg-lib.h
new file mode 100644
index 000000000000..d00c91b116ca
--- /dev/null
+++ b/drivers/clk/renesas/rcar-cpg-lib.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * R-Car Gen3 Clock Pulse Generator Library
+ *
+ * Copyright (C) 2015-2018 Glider bvba
+ * Copyright (C) 2019 Renesas Electronics Corp.
+ *
+ * Based on clk-rcar-gen3.c
+ *
+ * Copyright (C) 2015 Renesas Electronics Corp.
+ */
+
+#ifndef __CLK_RENESAS_RCAR_CPG_LIB_H__
+#define __CLK_RENESAS_RCAR_CPG_LIB_H__
+
+extern spinlock_t cpg_lock;
+
+struct cpg_simple_notifier {
+ struct notifier_block nb;
+ void __iomem *reg;
+ u32 saved;
+};
+
+void cpg_simple_notifier_register(struct raw_notifier_head *notifiers,
+ struct cpg_simple_notifier *csn);
+
+void cpg_reg_modify(void __iomem *reg, u32 clear, u32 set);
+
+struct clk * __init cpg_sd_clk_register(const char *name,
+ void __iomem *base, unsigned int offset, const char *parent_name,
+ struct raw_notifier_head *notifiers, bool skip_first);
+
+#endif
diff --git a/drivers/clk/renesas/rcar-gen3-cpg.c b/drivers/clk/renesas/rcar-gen3-cpg.c
index 063b61151488..17826599e9dd 100644
--- a/drivers/clk/renesas/rcar-gen3-cpg.c
+++ b/drivers/clk/renesas/rcar-gen3-cpg.c
@@ -23,6 +23,7 @@
#include <linux/sys_soc.h>
#include "renesas-cpg-mssr.h"
+#include "rcar-cpg-lib.h"
#include "rcar-gen3-cpg.h"
#define CPG_PLL0CR 0x00d8
@@ -31,52 +32,6 @@
#define CPG_RCKCR_CKSEL BIT(15) /* RCLK Clock Source Select */
-static spinlock_t cpg_lock;
-
-static void cpg_reg_modify(void __iomem *reg, u32 clear, u32 set)
-{
- unsigned long flags;
- u32 val;
-
- spin_lock_irqsave(&cpg_lock, flags);
- val = readl(reg);
- val &= ~clear;
- val |= set;
- writel(val, reg);
- spin_unlock_irqrestore(&cpg_lock, flags);
-};
-
-struct cpg_simple_notifier {
- struct notifier_block nb;
- void __iomem *reg;
- u32 saved;
-};
-
-static int cpg_simple_notifier_call(struct notifier_block *nb,
- unsigned long action, void *data)
-{
- struct cpg_simple_notifier *csn =
- container_of(nb, struct cpg_simple_notifier, nb);
-
- switch (action) {
- case PM_EVENT_SUSPEND:
- csn->saved = readl(csn->reg);
- return NOTIFY_OK;
-
- case PM_EVENT_RESUME:
- writel(csn->saved, csn->reg);
- return NOTIFY_OK;
- }
- return NOTIFY_DONE;
-}
-
-static void cpg_simple_notifier_register(struct raw_notifier_head *notifiers,
- struct cpg_simple_notifier *csn)
-{
- csn->nb.notifier_call = cpg_simple_notifier_call;
- raw_notifier_chain_register(notifiers, &csn->nb);
-}
-
/*
* Z Clock & Z2 Clock
*
@@ -215,217 +170,6 @@ static struct clk * __init cpg_z_clk_register(const char *name,
return clk;
}
-/*
- * SDn Clock
- */
-#define CPG_SD_STP_HCK BIT(9)
-#define CPG_SD_STP_CK BIT(8)
-
-#define CPG_SD_STP_MASK (CPG_SD_STP_HCK | CPG_SD_STP_CK)
-#define CPG_SD_FC_MASK (0x7 << 2 | 0x3 << 0)
-
-#define CPG_SD_DIV_TABLE_DATA(stp_hck, sd_srcfc, sd_fc, sd_div) \
-{ \
- .val = ((stp_hck) ? CPG_SD_STP_HCK : 0) | \
- ((sd_srcfc) << 2) | \
- ((sd_fc) << 0), \
- .div = (sd_div), \
-}
-
-struct sd_div_table {
- u32 val;
- unsigned int div;
-};
-
-struct sd_clock {
- struct clk_hw hw;
- const struct sd_div_table *div_table;
- struct cpg_simple_notifier csn;
- unsigned int div_num;
- unsigned int cur_div_idx;
-};
-
-/* SDn divider
- * sd_srcfc sd_fc div
- * stp_hck (div) (div) = sd_srcfc x sd_fc
- *---------------------------------------------------------
- * 0 0 (1) 1 (4) 4 : SDR104 / HS200 / HS400 (8 TAP)
- * 0 1 (2) 1 (4) 8 : SDR50
- * 1 2 (4) 1 (4) 16 : HS / SDR25
- * 1 3 (8) 1 (4) 32 : NS / SDR12
- * 1 4 (16) 1 (4) 64
- * 0 0 (1) 0 (2) 2
- * 0 1 (2) 0 (2) 4 : SDR104 / HS200 / HS400 (4 TAP)
- * 1 2 (4) 0 (2) 8
- * 1 3 (8) 0 (2) 16
- * 1 4 (16) 0 (2) 32
- *
- * NOTE: There is a quirk option to ignore the first row of the dividers
- * table when searching for suitable settings. This is because HS400 on
- * early ES versions of H3 and M3-W requires a specific setting to work.
- */
-static const struct sd_div_table cpg_sd_div_table[] = {
-/* CPG_SD_DIV_TABLE_DATA(stp_hck, sd_srcfc, sd_fc, sd_div) */
- CPG_SD_DIV_TABLE_DATA(0, 0, 1, 4),
- CPG_SD_DIV_TABLE_DATA(0, 1, 1, 8),
- CPG_SD_DIV_TABLE_DATA(1, 2, 1, 16),
- CPG_SD_DIV_TABLE_DATA(1, 3, 1, 32),
- CPG_SD_DIV_TABLE_DATA(1, 4, 1, 64),
- CPG_SD_DIV_TABLE_DATA(0, 0, 0, 2),
- CPG_SD_DIV_TABLE_DATA(0, 1, 0, 4),
- CPG_SD_DIV_TABLE_DATA(1, 2, 0, 8),
- CPG_SD_DIV_TABLE_DATA(1, 3, 0, 16),
- CPG_SD_DIV_TABLE_DATA(1, 4, 0, 32),
-};
-
-#define to_sd_clock(_hw) container_of(_hw, struct sd_clock, hw)
-
-static int cpg_sd_clock_enable(struct clk_hw *hw)
-{
- struct sd_clock *clock = to_sd_clock(hw);
-
- cpg_reg_modify(clock->csn.reg, CPG_SD_STP_MASK,
- clock->div_table[clock->cur_div_idx].val &
- CPG_SD_STP_MASK);
-
- return 0;
-}
-
-static void cpg_sd_clock_disable(struct clk_hw *hw)
-{
- struct sd_clock *clock = to_sd_clock(hw);
-
- cpg_reg_modify(clock->csn.reg, 0, CPG_SD_STP_MASK);
-}
-
-static int cpg_sd_clock_is_enabled(struct clk_hw *hw)
-{
- struct sd_clock *clock = to_sd_clock(hw);
-
- return !(readl(clock->csn.reg) & CPG_SD_STP_MASK);
-}
-
-static unsigned long cpg_sd_clock_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- struct sd_clock *clock = to_sd_clock(hw);
-
- return DIV_ROUND_CLOSEST(parent_rate,
- clock->div_table[clock->cur_div_idx].div);
-}
-
-static int cpg_sd_clock_determine_rate(struct clk_hw *hw,
- struct clk_rate_request *req)
-{
- unsigned long best_rate = ULONG_MAX, diff_min = ULONG_MAX;
- struct sd_clock *clock = to_sd_clock(hw);
- unsigned long calc_rate, diff;
- unsigned int i;
-
- for (i = 0; i < clock->div_num; i++) {
- calc_rate = DIV_ROUND_CLOSEST(req->best_parent_rate,
- clock->div_table[i].div);
- if (calc_rate < req->min_rate || calc_rate > req->max_rate)
- continue;
-
- diff = calc_rate > req->rate ? calc_rate - req->rate
- : req->rate - calc_rate;
- if (diff < diff_min) {
- best_rate = calc_rate;
- diff_min = diff;
- }
- }
-
- if (best_rate == ULONG_MAX)
- return -EINVAL;
-
- req->rate = best_rate;
- return 0;
-}
-
-static int cpg_sd_clock_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
-{
- struct sd_clock *clock = to_sd_clock(hw);
- unsigned int i;
-
- for (i = 0; i < clock->div_num; i++)
- if (rate == DIV_ROUND_CLOSEST(parent_rate,
- clock->div_table[i].div))
- break;
-
- if (i >= clock->div_num)
- return -EINVAL;
-
- clock->cur_div_idx = i;
-
- cpg_reg_modify(clock->csn.reg, CPG_SD_STP_MASK | CPG_SD_FC_MASK,
- clock->div_table[i].val &
- (CPG_SD_STP_MASK | CPG_SD_FC_MASK));
-
- return 0;
-}
-
-static const struct clk_ops cpg_sd_clock_ops = {
- .enable = cpg_sd_clock_enable,
- .disable = cpg_sd_clock_disable,
- .is_enabled = cpg_sd_clock_is_enabled,
- .recalc_rate = cpg_sd_clock_recalc_rate,
- .determine_rate = cpg_sd_clock_determine_rate,
- .set_rate = cpg_sd_clock_set_rate,
-};
-
-static u32 cpg_quirks __initdata;
-
-#define PLL_ERRATA BIT(0) /* Missing PLL0/2/4 post-divider */
-#define RCKCR_CKSEL BIT(1) /* Manual RCLK parent selection */
-#define SD_SKIP_FIRST BIT(2) /* Skip first clock in SD table */
-
-static struct clk * __init cpg_sd_clk_register(const char *name,
- void __iomem *base, unsigned int offset, const char *parent_name,
- struct raw_notifier_head *notifiers)
-{
- struct clk_init_data init;
- struct sd_clock *clock;
- struct clk *clk;
- u32 val;
-
- clock = kzalloc(sizeof(*clock), GFP_KERNEL);
- if (!clock)
- return ERR_PTR(-ENOMEM);
-
- init.name = name;
- init.ops = &cpg_sd_clock_ops;
- init.flags = CLK_SET_RATE_PARENT;
- init.parent_names = &parent_name;
- init.num_parents = 1;
-
- clock->csn.reg = base + offset;
- clock->hw.init = &init;
- clock->div_table = cpg_sd_div_table;
- clock->div_num = ARRAY_SIZE(cpg_sd_div_table);
-
- if (cpg_quirks & SD_SKIP_FIRST) {
- clock->div_table++;
- clock->div_num--;
- }
-
- val = readl(clock->csn.reg) & ~CPG_SD_FC_MASK;
- val |= CPG_SD_STP_MASK | (clock->div_table[0].val & CPG_SD_FC_MASK);
- writel(val, clock->csn.reg);
-
- clk = clk_register(NULL, &clock->hw);
- if (IS_ERR(clk))
- goto free_clock;
-
- cpg_simple_notifier_register(notifiers, &clock->csn);
- return clk;
-
-free_clock:
- kfree(clock);
- return clk;
-}
-
struct rpc_clock {
struct clk_divider div;
struct clk_gate gate;
@@ -518,6 +262,12 @@ static struct clk * __init cpg_rpcd2_clk_register(const char *name,
static const struct rcar_gen3_cpg_pll_config *cpg_pll_config __initdata;
static unsigned int cpg_clk_extalr __initdata;
static u32 cpg_mode __initdata;
+static u32 cpg_quirks __initdata;
+
+#define PLL_ERRATA BIT(0) /* Missing PLL0/2/4 post-divider */
+#define RCKCR_CKSEL BIT(1) /* Manual RCLK parent selection */
+#define SD_SKIP_FIRST BIT(2) /* Skip first clock in SD table */
+
static const struct soc_device_attribute cpg_quirks_match[] __initconst = {
{
@@ -613,7 +363,8 @@ struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev,
case CLK_TYPE_GEN3_SD:
return cpg_sd_clk_register(core->name, base, core->offset,
- __clk_get_name(parent), notifiers);
+ __clk_get_name(parent), notifiers,
+ cpg_quirks & SD_SKIP_FIRST);
case CLK_TYPE_GEN3_R:
if (cpg_quirks & RCKCR_CKSEL) {
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
index 1c3215dc4877..bffbc3d2faf5 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.c
+++ b/drivers/clk/renesas/renesas-cpg-mssr.c
@@ -136,8 +136,8 @@ static const u16 srstclr_for_v3u[] = {
* @control_regs: Pointer to control registers array
* @reset_regs: Pointer to reset registers array
* @reset_clear_regs: Pointer to reset clearing registers array
- * @smstpcr_saved[].mask: Mask of SMSTPCR[] bits under our control
- * @smstpcr_saved[].val: Saved values of SMSTPCR[]
+ * @smstpcr_saved: [].mask: Mask of SMSTPCR[] bits under our control
+ * [].val: Saved values of SMSTPCR[]
* @clks: Array containing all Core and Module Clocks
*/
struct cpg_mssr_priv {
diff --git a/drivers/clk/rockchip/clk-cpu.c b/drivers/clk/rockchip/clk-cpu.c
index 0dc478a19451..fa9027fb1920 100644
--- a/drivers/clk/rockchip/clk-cpu.c
+++ b/drivers/clk/rockchip/clk-cpu.c
@@ -51,10 +51,6 @@
*/
struct rockchip_cpuclk {
struct clk_hw hw;
-
- struct clk_mux cpu_mux;
- const struct clk_ops *cpu_mux_ops;
-
struct clk *alt_parent;
void __iomem *reg_base;
struct notifier_block clk_nb;
diff --git a/drivers/clk/rockchip/clk-half-divider.c b/drivers/clk/rockchip/clk-half-divider.c
index ccd5c270c213..64f7faad2148 100644
--- a/drivers/clk/rockchip/clk-half-divider.c
+++ b/drivers/clk/rockchip/clk-half-divider.c
@@ -145,7 +145,7 @@ static const struct clk_ops clk_half_divider_ops = {
.set_rate = clk_half_divider_set_rate,
};
-/**
+/*
* Register a clock branch.
* Most clock branches have a form like
*
diff --git a/drivers/clk/rockchip/clk-pll.c b/drivers/clk/rockchip/clk-pll.c
index 4c6c9167ef50..fe937bcdb487 100644
--- a/drivers/clk/rockchip/clk-pll.c
+++ b/drivers/clk/rockchip/clk-pll.c
@@ -97,7 +97,7 @@ static int rockchip_pll_wait_lock(struct rockchip_clk_pll *pll)
return ret;
}
-/**
+/*
* PLL used in RK3036
*/
@@ -358,7 +358,7 @@ static const struct clk_ops rockchip_rk3036_pll_clk_ops = {
.init = rockchip_rk3036_pll_init,
};
-/**
+/*
* PLL used in RK3066, RK3188 and RK3288
*/
@@ -577,7 +577,7 @@ static const struct clk_ops rockchip_rk3066_pll_clk_ops = {
.init = rockchip_rk3066_pll_init,
};
-/**
+/*
* PLL used in RK3399
*/
diff --git a/drivers/clk/rockchip/clk-rk3368.c b/drivers/clk/rockchip/clk-rk3368.c
index 55443349439b..9a0dab9448db 100644
--- a/drivers/clk/rockchip/clk-rk3368.c
+++ b/drivers/clk/rockchip/clk-rk3368.c
@@ -474,7 +474,7 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
COMPOSITE_NODIV(0, "vip_src", mux_pll_src_cpll_gpll_p, 0,
RK3368_CLKSEL_CON(21), 15, 1, MFLAGS,
RK3368_CLKGATE_CON(4), 5, GFLAGS),
- COMPOSITE_NOGATE(0, "sclk_vip_out", mux_vip_out_p, 0,
+ COMPOSITE_NOGATE(SCLK_VIP_OUT, "sclk_vip_out", mux_vip_out_p, 0,
RK3368_CLKSEL_CON(21), 14, 1, MFLAGS, 8, 5, DFLAGS),
COMPOSITE_NODIV(SCLK_EDP_24M, "sclk_edp_24m", mux_edp_24m_p, 0,
@@ -818,8 +818,8 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
* pclk_vio gates
* pclk_vio comes from the exactly same source as hclk_vio
*/
- GATE(0, "pclk_dphyrx", "hclk_vio", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(14), 8, GFLAGS),
- GATE(0, "pclk_dphytx", "hclk_vio", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(14), 8, GFLAGS),
+ GATE(PCLK_DPHYRX, "pclk_dphyrx", "hclk_vio", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(22), 11, GFLAGS),
+ GATE(PCLK_DPHYTX0, "pclk_dphytx0", "hclk_vio", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(22), 10, GFLAGS),
/* pclk_pd_pmu gates */
GATE(PCLK_PMUGRF, "pclk_pmugrf", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(23), 5, GFLAGS),
diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c
index 336481bc6cc7..049e5e0b64f6 100644
--- a/drivers/clk/rockchip/clk.c
+++ b/drivers/clk/rockchip/clk.c
@@ -24,7 +24,7 @@
#include <linux/rational.h>
#include "clk.h"
-/**
+/*
* Register a clock branch.
* Most clock branches have a form like
*
@@ -170,7 +170,7 @@ static int rockchip_clk_frac_notifier_cb(struct notifier_block *nb,
return notifier_from_errno(ret);
}
-/**
+/*
* fractional divider must set that denominator is 20 times larger than
* numerator to generate precise clock frequency.
*/
diff --git a/drivers/clk/sifive/fu540-prci.h b/drivers/clk/sifive/fu540-prci.h
index c8271efa7bdc..c220677dc010 100644
--- a/drivers/clk/sifive/fu540-prci.h
+++ b/drivers/clk/sifive/fu540-prci.h
@@ -13,9 +13,4 @@
extern struct __prci_clock __prci_init_clocks_fu540[NUM_CLOCK_FU540];
-static const struct prci_clk_desc prci_clk_fu540 = {
- .clks = __prci_init_clocks_fu540,
- .num_clks = ARRAY_SIZE(__prci_init_clocks_fu540),
-};
-
#endif /* __SIFIVE_CLK_FU540_PRCI_H */
diff --git a/drivers/clk/sifive/sifive-prci.c b/drivers/clk/sifive/sifive-prci.c
index c78b042750e2..1490b01ce629 100644
--- a/drivers/clk/sifive/sifive-prci.c
+++ b/drivers/clk/sifive/sifive-prci.c
@@ -12,6 +12,11 @@
#include "fu540-prci.h"
#include "fu740-prci.h"
+static const struct prci_clk_desc prci_clk_fu540 = {
+ .clks = __prci_init_clocks_fu540,
+ .num_clks = ARRAY_SIZE(__prci_init_clocks_fu540),
+};
+
/*
* Private functions
*/
diff --git a/drivers/clk/sirf/Makefile b/drivers/clk/sirf/Makefile
deleted file mode 100644
index 0ff61f87cddb..000000000000
--- a/drivers/clk/sirf/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Makefile for sirf specific clk
-#
-
-obj-$(CONFIG_ARCH_SIRF) += clk-prima2.o clk-atlas6.o clk-atlas7.o
diff --git a/drivers/clk/sirf/atlas6.h b/drivers/clk/sirf/atlas6.h
deleted file mode 100644
index cb871e30a175..000000000000
--- a/drivers/clk/sirf/atlas6.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#define SIRFSOC_CLKC_CLK_EN0 0x0000
-#define SIRFSOC_CLKC_CLK_EN1 0x0004
-#define SIRFSOC_CLKC_REF_CFG 0x0020
-#define SIRFSOC_CLKC_CPU_CFG 0x0024
-#define SIRFSOC_CLKC_MEM_CFG 0x0028
-#define SIRFSOC_CLKC_MEMDIV_CFG 0x002C
-#define SIRFSOC_CLKC_SYS_CFG 0x0030
-#define SIRFSOC_CLKC_IO_CFG 0x0034
-#define SIRFSOC_CLKC_DSP_CFG 0x0038
-#define SIRFSOC_CLKC_GFX_CFG 0x003c
-#define SIRFSOC_CLKC_MM_CFG 0x0040
-#define SIRFSOC_CLKC_GFX2D_CFG 0x0040
-#define SIRFSOC_CLKC_LCD_CFG 0x0044
-#define SIRFSOC_CLKC_MMC01_CFG 0x0048
-#define SIRFSOC_CLKC_MMC23_CFG 0x004C
-#define SIRFSOC_CLKC_MMC45_CFG 0x0050
-#define SIRFSOC_CLKC_NAND_CFG 0x0054
-#define SIRFSOC_CLKC_NANDDIV_CFG 0x0058
-#define SIRFSOC_CLKC_PLL1_CFG0 0x0080
-#define SIRFSOC_CLKC_PLL2_CFG0 0x0084
-#define SIRFSOC_CLKC_PLL3_CFG0 0x0088
-#define SIRFSOC_CLKC_PLL1_CFG1 0x008c
-#define SIRFSOC_CLKC_PLL2_CFG1 0x0090
-#define SIRFSOC_CLKC_PLL3_CFG1 0x0094
-#define SIRFSOC_CLKC_PLL1_CFG2 0x0098
-#define SIRFSOC_CLKC_PLL2_CFG2 0x009c
-#define SIRFSOC_CLKC_PLL3_CFG2 0x00A0
-#define SIRFSOC_USBPHY_PLL_CTRL 0x0008
-#define SIRFSOC_USBPHY_PLL_POWERDOWN BIT(1)
-#define SIRFSOC_USBPHY_PLL_BYPASS BIT(2)
-#define SIRFSOC_USBPHY_PLL_LOCK BIT(3)
diff --git a/drivers/clk/sirf/clk-atlas6.c b/drivers/clk/sirf/clk-atlas6.c
deleted file mode 100644
index b95483bb6a5e..000000000000
--- a/drivers/clk/sirf/clk-atlas6.c
+++ /dev/null
@@ -1,150 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Clock tree for CSR SiRFatlasVI
- *
- * Copyright (c) 2011 - 2014 Cambridge Silicon Radio Limited, a CSR plc group
- * company.
- */
-
-#include <linux/module.h>
-#include <linux/bitops.h>
-#include <linux/io.h>
-#include <linux/clkdev.h>
-#include <linux/clk-provider.h>
-#include <linux/of_address.h>
-#include <linux/syscore_ops.h>
-
-#include "atlas6.h"
-#include "clk-common.c"
-
-static struct clk_dmn clk_mmc01 = {
- .regofs = SIRFSOC_CLKC_MMC01_CFG,
- .enable_bit = 59,
- .hw = {
- .init = &clk_mmc01_init,
- },
-};
-
-static struct clk_dmn clk_mmc23 = {
- .regofs = SIRFSOC_CLKC_MMC23_CFG,
- .enable_bit = 60,
- .hw = {
- .init = &clk_mmc23_init,
- },
-};
-
-static struct clk_dmn clk_mmc45 = {
- .regofs = SIRFSOC_CLKC_MMC45_CFG,
- .enable_bit = 61,
- .hw = {
- .init = &clk_mmc45_init,
- },
-};
-
-static const struct clk_init_data clk_nand_init = {
- .name = "nand",
- .ops = &dmn_ops,
- .parent_names = dmn_clk_parents,
- .num_parents = ARRAY_SIZE(dmn_clk_parents),
-};
-
-static struct clk_dmn clk_nand = {
- .regofs = SIRFSOC_CLKC_NAND_CFG,
- .enable_bit = 34,
- .hw = {
- .init = &clk_nand_init,
- },
-};
-
-enum atlas6_clk_index {
- /* 0 1 2 3 4 5 6 7 8 9 */
- rtc, osc, pll1, pll2, pll3, mem, sys, security, dsp, gps,
- mf, io, cpu, uart0, uart1, uart2, tsc, i2c0, i2c1, spi0,
- spi1, pwmc, efuse, pulse, dmac0, dmac1, nand, audio, usp0, usp1,
- usp2, vip, gfx, gfx2d, lcd, vpp, mmc01, mmc23, mmc45, usbpll,
- usb0, usb1, cphif, maxclk,
-};
-
-static __initdata struct clk_hw *atlas6_clk_hw_array[maxclk] = {
- NULL, /* dummy */
- NULL,
- &clk_pll1.hw,
- &clk_pll2.hw,
- &clk_pll3.hw,
- &clk_mem.hw,
- &clk_sys.hw,
- &clk_security.hw,
- &clk_dsp.hw,
- &clk_gps.hw,
- &clk_mf.hw,
- &clk_io.hw,
- &clk_cpu.hw,
- &clk_uart0.hw,
- &clk_uart1.hw,
- &clk_uart2.hw,
- &clk_tsc.hw,
- &clk_i2c0.hw,
- &clk_i2c1.hw,
- &clk_spi0.hw,
- &clk_spi1.hw,
- &clk_pwmc.hw,
- &clk_efuse.hw,
- &clk_pulse.hw,
- &clk_dmac0.hw,
- &clk_dmac1.hw,
- &clk_nand.hw,
- &clk_audio.hw,
- &clk_usp0.hw,
- &clk_usp1.hw,
- &clk_usp2.hw,
- &clk_vip.hw,
- &clk_gfx.hw,
- &clk_gfx2d.hw,
- &clk_lcd.hw,
- &clk_vpp.hw,
- &clk_mmc01.hw,
- &clk_mmc23.hw,
- &clk_mmc45.hw,
- &usb_pll_clk_hw,
- &clk_usb0.hw,
- &clk_usb1.hw,
- &clk_cphif.hw,
-};
-
-static struct clk *atlas6_clks[maxclk];
-
-static void __init atlas6_clk_init(struct device_node *np)
-{
- struct device_node *rscnp;
- int i;
-
- rscnp = of_find_compatible_node(NULL, NULL, "sirf,prima2-rsc");
- sirfsoc_rsc_vbase = of_iomap(rscnp, 0);
- if (!sirfsoc_rsc_vbase)
- panic("unable to map rsc registers\n");
- of_node_put(rscnp);
-
- sirfsoc_clk_vbase = of_iomap(np, 0);
- if (!sirfsoc_clk_vbase)
- panic("unable to map clkc registers\n");
-
- /* These are always available (RTC and 26MHz OSC)*/
- atlas6_clks[rtc] = clk_register_fixed_rate(NULL, "rtc", NULL, 0, 32768);
- atlas6_clks[osc] = clk_register_fixed_rate(NULL, "osc", NULL, 0,
- 26000000);
-
- for (i = pll1; i < maxclk; i++) {
- atlas6_clks[i] = clk_register(NULL, atlas6_clk_hw_array[i]);
- BUG_ON(IS_ERR(atlas6_clks[i]));
- }
- clk_register_clkdev(atlas6_clks[cpu], NULL, "cpu");
- clk_register_clkdev(atlas6_clks[io], NULL, "io");
- clk_register_clkdev(atlas6_clks[mem], NULL, "mem");
- clk_register_clkdev(atlas6_clks[mem], NULL, "osc");
-
- clk_data.clks = atlas6_clks;
- clk_data.clk_num = maxclk;
-
- of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
-}
-CLK_OF_DECLARE(atlas6_clk, "sirf,atlas6-clkc", atlas6_clk_init);
diff --git a/drivers/clk/sirf/clk-atlas7.c b/drivers/clk/sirf/clk-atlas7.c
deleted file mode 100644
index 3f57fefd13bb..000000000000
--- a/drivers/clk/sirf/clk-atlas7.c
+++ /dev/null
@@ -1,1682 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Clock tree for CSR SiRFAtlas7
- *
- * Copyright (c) 2014 Cambridge Silicon Radio Limited, a CSR plc group company.
- */
-
-#include <linux/bitops.h>
-#include <linux/io.h>
-#include <linux/clk-provider.h>
-#include <linux/delay.h>
-#include <linux/of_address.h>
-#include <linux/reset-controller.h>
-#include <linux/slab.h>
-
-#define SIRFSOC_CLKC_MEMPLL_AB_FREQ 0x0000
-#define SIRFSOC_CLKC_MEMPLL_AB_SSC 0x0004
-#define SIRFSOC_CLKC_MEMPLL_AB_CTRL0 0x0008
-#define SIRFSOC_CLKC_MEMPLL_AB_CTRL1 0x000c
-#define SIRFSOC_CLKC_MEMPLL_AB_STATUS 0x0010
-#define SIRFSOC_CLKC_MEMPLL_AB_SSRAM_ADDR 0x0014
-#define SIRFSOC_CLKC_MEMPLL_AB_SSRAM_DATA 0x0018
-
-#define SIRFSOC_CLKC_CPUPLL_AB_FREQ 0x001c
-#define SIRFSOC_CLKC_CPUPLL_AB_SSC 0x0020
-#define SIRFSOC_CLKC_CPUPLL_AB_CTRL0 0x0024
-#define SIRFSOC_CLKC_CPUPLL_AB_CTRL1 0x0028
-#define SIRFSOC_CLKC_CPUPLL_AB_STATUS 0x002c
-
-#define SIRFSOC_CLKC_SYS0PLL_AB_FREQ 0x0030
-#define SIRFSOC_CLKC_SYS0PLL_AB_SSC 0x0034
-#define SIRFSOC_CLKC_SYS0PLL_AB_CTRL0 0x0038
-#define SIRFSOC_CLKC_SYS0PLL_AB_CTRL1 0x003c
-#define SIRFSOC_CLKC_SYS0PLL_AB_STATUS 0x0040
-
-#define SIRFSOC_CLKC_SYS1PLL_AB_FREQ 0x0044
-#define SIRFSOC_CLKC_SYS1PLL_AB_SSC 0x0048
-#define SIRFSOC_CLKC_SYS1PLL_AB_CTRL0 0x004c
-#define SIRFSOC_CLKC_SYS1PLL_AB_CTRL1 0x0050
-#define SIRFSOC_CLKC_SYS1PLL_AB_STATUS 0x0054
-
-#define SIRFSOC_CLKC_SYS2PLL_AB_FREQ 0x0058
-#define SIRFSOC_CLKC_SYS2PLL_AB_SSC 0x005c
-#define SIRFSOC_CLKC_SYS2PLL_AB_CTRL0 0x0060
-#define SIRFSOC_CLKC_SYS2PLL_AB_CTRL1 0x0064
-#define SIRFSOC_CLKC_SYS2PLL_AB_STATUS 0x0068
-
-#define SIRFSOC_CLKC_SYS3PLL_AB_FREQ 0x006c
-#define SIRFSOC_CLKC_SYS3PLL_AB_SSC 0x0070
-#define SIRFSOC_CLKC_SYS3PLL_AB_CTRL0 0x0074
-#define SIRFSOC_CLKC_SYS3PLL_AB_CTRL1 0x0078
-#define SIRFSOC_CLKC_SYS3PLL_AB_STATUS 0x007c
-
-#define SIRFSOC_ABPLL_CTRL0_SSEN 0x00001000
-#define SIRFSOC_ABPLL_CTRL0_BYPASS 0x00000010
-#define SIRFSOC_ABPLL_CTRL0_RESET 0x00000001
-
-#define SIRFSOC_CLKC_AUDIO_DTO_INC 0x0088
-#define SIRFSOC_CLKC_DISP0_DTO_INC 0x008c
-#define SIRFSOC_CLKC_DISP1_DTO_INC 0x0090
-
-#define SIRFSOC_CLKC_AUDIO_DTO_SRC 0x0094
-#define SIRFSOC_CLKC_AUDIO_DTO_ENA 0x0098
-#define SIRFSOC_CLKC_AUDIO_DTO_DROFF 0x009c
-
-#define SIRFSOC_CLKC_DISP0_DTO_SRC 0x00a0
-#define SIRFSOC_CLKC_DISP0_DTO_ENA 0x00a4
-#define SIRFSOC_CLKC_DISP0_DTO_DROFF 0x00a8
-
-#define SIRFSOC_CLKC_DISP1_DTO_SRC 0x00ac
-#define SIRFSOC_CLKC_DISP1_DTO_ENA 0x00b0
-#define SIRFSOC_CLKC_DISP1_DTO_DROFF 0x00b4
-
-#define SIRFSOC_CLKC_I2S_CLK_SEL 0x00b8
-#define SIRFSOC_CLKC_I2S_SEL_STAT 0x00bc
-
-#define SIRFSOC_CLKC_USBPHY_CLKDIV_CFG 0x00c0
-#define SIRFSOC_CLKC_USBPHY_CLKDIV_ENA 0x00c4
-#define SIRFSOC_CLKC_USBPHY_CLK_SEL 0x00c8
-#define SIRFSOC_CLKC_USBPHY_CLK_SEL_STAT 0x00cc
-
-#define SIRFSOC_CLKC_BTSS_CLKDIV_CFG 0x00d0
-#define SIRFSOC_CLKC_BTSS_CLKDIV_ENA 0x00d4
-#define SIRFSOC_CLKC_BTSS_CLK_SEL 0x00d8
-#define SIRFSOC_CLKC_BTSS_CLK_SEL_STAT 0x00dc
-
-#define SIRFSOC_CLKC_RGMII_CLKDIV_CFG 0x00e0
-#define SIRFSOC_CLKC_RGMII_CLKDIV_ENA 0x00e4
-#define SIRFSOC_CLKC_RGMII_CLK_SEL 0x00e8
-#define SIRFSOC_CLKC_RGMII_CLK_SEL_STAT 0x00ec
-
-#define SIRFSOC_CLKC_CPU_CLKDIV_CFG 0x00f0
-#define SIRFSOC_CLKC_CPU_CLKDIV_ENA 0x00f4
-#define SIRFSOC_CLKC_CPU_CLK_SEL 0x00f8
-#define SIRFSOC_CLKC_CPU_CLK_SEL_STAT 0x00fc
-
-#define SIRFSOC_CLKC_SDPHY01_CLKDIV_CFG 0x0100
-#define SIRFSOC_CLKC_SDPHY01_CLKDIV_ENA 0x0104
-#define SIRFSOC_CLKC_SDPHY01_CLK_SEL 0x0108
-#define SIRFSOC_CLKC_SDPHY01_CLK_SEL_STAT 0x010c
-
-#define SIRFSOC_CLKC_SDPHY23_CLKDIV_CFG 0x0110
-#define SIRFSOC_CLKC_SDPHY23_CLKDIV_ENA 0x0114
-#define SIRFSOC_CLKC_SDPHY23_CLK_SEL 0x0118
-#define SIRFSOC_CLKC_SDPHY23_CLK_SEL_STAT 0x011c
-
-#define SIRFSOC_CLKC_SDPHY45_CLKDIV_CFG 0x0120
-#define SIRFSOC_CLKC_SDPHY45_CLKDIV_ENA 0x0124
-#define SIRFSOC_CLKC_SDPHY45_CLK_SEL 0x0128
-#define SIRFSOC_CLKC_SDPHY45_CLK_SEL_STAT 0x012c
-
-#define SIRFSOC_CLKC_SDPHY67_CLKDIV_CFG 0x0130
-#define SIRFSOC_CLKC_SDPHY67_CLKDIV_ENA 0x0134
-#define SIRFSOC_CLKC_SDPHY67_CLK_SEL 0x0138
-#define SIRFSOC_CLKC_SDPHY67_CLK_SEL_STAT 0x013c
-
-#define SIRFSOC_CLKC_CAN_CLKDIV_CFG 0x0140
-#define SIRFSOC_CLKC_CAN_CLKDIV_ENA 0x0144
-#define SIRFSOC_CLKC_CAN_CLK_SEL 0x0148
-#define SIRFSOC_CLKC_CAN_CLK_SEL_STAT 0x014c
-
-#define SIRFSOC_CLKC_DEINT_CLKDIV_CFG 0x0150
-#define SIRFSOC_CLKC_DEINT_CLKDIV_ENA 0x0154
-#define SIRFSOC_CLKC_DEINT_CLK_SEL 0x0158
-#define SIRFSOC_CLKC_DEINT_CLK_SEL_STAT 0x015c
-
-#define SIRFSOC_CLKC_NAND_CLKDIV_CFG 0x0160
-#define SIRFSOC_CLKC_NAND_CLKDIV_ENA 0x0164
-#define SIRFSOC_CLKC_NAND_CLK_SEL 0x0168
-#define SIRFSOC_CLKC_NAND_CLK_SEL_STAT 0x016c
-
-#define SIRFSOC_CLKC_DISP0_CLKDIV_CFG 0x0170
-#define SIRFSOC_CLKC_DISP0_CLKDIV_ENA 0x0174
-#define SIRFSOC_CLKC_DISP0_CLK_SEL 0x0178
-#define SIRFSOC_CLKC_DISP0_CLK_SEL_STAT 0x017c
-
-#define SIRFSOC_CLKC_DISP1_CLKDIV_CFG 0x0180
-#define SIRFSOC_CLKC_DISP1_CLKDIV_ENA 0x0184
-#define SIRFSOC_CLKC_DISP1_CLK_SEL 0x0188
-#define SIRFSOC_CLKC_DISP1_CLK_SEL_STAT 0x018c
-
-#define SIRFSOC_CLKC_GPU_CLKDIV_CFG 0x0190
-#define SIRFSOC_CLKC_GPU_CLKDIV_ENA 0x0194
-#define SIRFSOC_CLKC_GPU_CLK_SEL 0x0198
-#define SIRFSOC_CLKC_GPU_CLK_SEL_STAT 0x019c
-
-#define SIRFSOC_CLKC_GNSS_CLKDIV_CFG 0x01a0
-#define SIRFSOC_CLKC_GNSS_CLKDIV_ENA 0x01a4
-#define SIRFSOC_CLKC_GNSS_CLK_SEL 0x01a8
-#define SIRFSOC_CLKC_GNSS_CLK_SEL_STAT 0x01ac
-
-#define SIRFSOC_CLKC_SHARED_DIVIDER_CFG0 0x01b0
-#define SIRFSOC_CLKC_SHARED_DIVIDER_CFG1 0x01b4
-#define SIRFSOC_CLKC_SHARED_DIVIDER_ENA 0x01b8
-
-#define SIRFSOC_CLKC_SYS_CLK_SEL 0x01bc
-#define SIRFSOC_CLKC_SYS_CLK_SEL_STAT 0x01c0
-#define SIRFSOC_CLKC_IO_CLK_SEL 0x01c4
-#define SIRFSOC_CLKC_IO_CLK_SEL_STAT 0x01c8
-#define SIRFSOC_CLKC_G2D_CLK_SEL 0x01cc
-#define SIRFSOC_CLKC_G2D_CLK_SEL_STAT 0x01d0
-#define SIRFSOC_CLKC_JPENC_CLK_SEL 0x01d4
-#define SIRFSOC_CLKC_JPENC_CLK_SEL_STAT 0x01d8
-#define SIRFSOC_CLKC_VDEC_CLK_SEL 0x01dc
-#define SIRFSOC_CLKC_VDEC_CLK_SEL_STAT 0x01e0
-#define SIRFSOC_CLKC_GMAC_CLK_SEL 0x01e4
-#define SIRFSOC_CLKC_GMAC_CLK_SEL_STAT 0x01e8
-#define SIRFSOC_CLKC_USB_CLK_SEL 0x01ec
-#define SIRFSOC_CLKC_USB_CLK_SEL_STAT 0x01f0
-#define SIRFSOC_CLKC_KAS_CLK_SEL 0x01f4
-#define SIRFSOC_CLKC_KAS_CLK_SEL_STAT 0x01f8
-#define SIRFSOC_CLKC_SEC_CLK_SEL 0x01fc
-#define SIRFSOC_CLKC_SEC_CLK_SEL_STAT 0x0200
-#define SIRFSOC_CLKC_SDR_CLK_SEL 0x0204
-#define SIRFSOC_CLKC_SDR_CLK_SEL_STAT 0x0208
-#define SIRFSOC_CLKC_VIP_CLK_SEL 0x020c
-#define SIRFSOC_CLKC_VIP_CLK_SEL_STAT 0x0210
-#define SIRFSOC_CLKC_NOCD_CLK_SEL 0x0214
-#define SIRFSOC_CLKC_NOCD_CLK_SEL_STAT 0x0218
-#define SIRFSOC_CLKC_NOCR_CLK_SEL 0x021c
-#define SIRFSOC_CLKC_NOCR_CLK_SEL_STAT 0x0220
-#define SIRFSOC_CLKC_TPIU_CLK_SEL 0x0224
-#define SIRFSOC_CLKC_TPIU_CLK_SEL_STAT 0x0228
-
-#define SIRFSOC_CLKC_ROOT_CLK_EN0_SET 0x022c
-#define SIRFSOC_CLKC_ROOT_CLK_EN0_CLR 0x0230
-#define SIRFSOC_CLKC_ROOT_CLK_EN0_STAT 0x0234
-#define SIRFSOC_CLKC_ROOT_CLK_EN1_SET 0x0238
-#define SIRFSOC_CLKC_ROOT_CLK_EN1_CLR 0x023c
-#define SIRFSOC_CLKC_ROOT_CLK_EN1_STAT 0x0240
-
-#define SIRFSOC_CLKC_LEAF_CLK_EN0_SET 0x0244
-#define SIRFSOC_CLKC_LEAF_CLK_EN0_CLR 0x0248
-#define SIRFSOC_CLKC_LEAF_CLK_EN0_STAT 0x024c
-
-#define SIRFSOC_CLKC_RSTC_A7_SW_RST 0x0308
-
-#define SIRFSOC_CLKC_LEAF_CLK_EN1_SET 0x04a0
-#define SIRFSOC_CLKC_LEAF_CLK_EN2_SET 0x04b8
-#define SIRFSOC_CLKC_LEAF_CLK_EN3_SET 0x04d0
-#define SIRFSOC_CLKC_LEAF_CLK_EN4_SET 0x04e8
-#define SIRFSOC_CLKC_LEAF_CLK_EN5_SET 0x0500
-#define SIRFSOC_CLKC_LEAF_CLK_EN6_SET 0x0518
-#define SIRFSOC_CLKC_LEAF_CLK_EN7_SET 0x0530
-#define SIRFSOC_CLKC_LEAF_CLK_EN8_SET 0x0548
-
-#define SIRFSOC_NOC_CLK_IDLEREQ_SET 0x02D0
-#define SIRFSOC_NOC_CLK_IDLEREQ_CLR 0x02D4
-#define SIRFSOC_NOC_CLK_SLVRDY_SET 0x02E8
-#define SIRFSOC_NOC_CLK_SLVRDY_CLR 0x02EC
-#define SIRFSOC_NOC_CLK_IDLE_STATUS 0x02F4
-
-struct clk_pll {
- struct clk_hw hw;
- u16 regofs; /* register offset */
-};
-#define to_pllclk(_hw) container_of(_hw, struct clk_pll, hw)
-
-struct clk_dto {
- struct clk_hw hw;
- u16 inc_offset; /* dto increment offset */
- u16 src_offset; /* dto src offset */
-};
-#define to_dtoclk(_hw) container_of(_hw, struct clk_dto, hw)
-
-enum clk_unit_type {
- CLK_UNIT_NOC_OTHER,
- CLK_UNIT_NOC_CLOCK,
- CLK_UNIT_NOC_SOCKET,
-};
-
-struct clk_unit {
- struct clk_hw hw;
- u16 regofs;
- u16 bit;
- u32 type;
- u8 idle_bit;
- spinlock_t *lock;
-};
-#define to_unitclk(_hw) container_of(_hw, struct clk_unit, hw)
-
-struct atlas7_div_init_data {
- const char *div_name;
- const char *parent_name;
- const char *gate_name;
- unsigned long flags;
- u8 divider_flags;
- u8 gate_flags;
- u32 div_offset;
- u8 shift;
- u8 width;
- u32 gate_offset;
- u8 gate_bit;
- spinlock_t *lock;
-};
-
-struct atlas7_mux_init_data {
- const char *mux_name;
- const char * const *parent_names;
- u8 parent_num;
- unsigned long flags;
- u8 mux_flags;
- u32 mux_offset;
- u8 shift;
- u8 width;
-};
-
-struct atlas7_unit_init_data {
- u32 index;
- const char *unit_name;
- const char *parent_name;
- unsigned long flags;
- u32 regofs;
- u8 bit;
- u32 type;
- u8 idle_bit;
- spinlock_t *lock;
-};
-
-struct atlas7_reset_desc {
- const char *name;
- u32 clk_ofs;
- u8 clk_bit;
- u32 rst_ofs;
- u8 rst_bit;
- spinlock_t *lock;
-};
-
-static void __iomem *sirfsoc_clk_vbase;
-static struct clk_onecell_data clk_data;
-
-static const struct clk_div_table pll_div_table[] = {
- { .val = 0, .div = 1 },
- { .val = 1, .div = 2 },
- { .val = 2, .div = 4 },
- { .val = 3, .div = 8 },
- { .val = 4, .div = 16 },
- { .val = 5, .div = 32 },
-};
-
-static DEFINE_SPINLOCK(cpupll_ctrl1_lock);
-static DEFINE_SPINLOCK(mempll_ctrl1_lock);
-static DEFINE_SPINLOCK(sys0pll_ctrl1_lock);
-static DEFINE_SPINLOCK(sys1pll_ctrl1_lock);
-static DEFINE_SPINLOCK(sys2pll_ctrl1_lock);
-static DEFINE_SPINLOCK(sys3pll_ctrl1_lock);
-static DEFINE_SPINLOCK(usbphy_div_lock);
-static DEFINE_SPINLOCK(btss_div_lock);
-static DEFINE_SPINLOCK(rgmii_div_lock);
-static DEFINE_SPINLOCK(cpu_div_lock);
-static DEFINE_SPINLOCK(sdphy01_div_lock);
-static DEFINE_SPINLOCK(sdphy23_div_lock);
-static DEFINE_SPINLOCK(sdphy45_div_lock);
-static DEFINE_SPINLOCK(sdphy67_div_lock);
-static DEFINE_SPINLOCK(can_div_lock);
-static DEFINE_SPINLOCK(deint_div_lock);
-static DEFINE_SPINLOCK(nand_div_lock);
-static DEFINE_SPINLOCK(disp0_div_lock);
-static DEFINE_SPINLOCK(disp1_div_lock);
-static DEFINE_SPINLOCK(gpu_div_lock);
-static DEFINE_SPINLOCK(gnss_div_lock);
-/* gate register shared */
-static DEFINE_SPINLOCK(share_div_lock);
-static DEFINE_SPINLOCK(root0_gate_lock);
-static DEFINE_SPINLOCK(root1_gate_lock);
-static DEFINE_SPINLOCK(leaf0_gate_lock);
-static DEFINE_SPINLOCK(leaf1_gate_lock);
-static DEFINE_SPINLOCK(leaf2_gate_lock);
-static DEFINE_SPINLOCK(leaf3_gate_lock);
-static DEFINE_SPINLOCK(leaf4_gate_lock);
-static DEFINE_SPINLOCK(leaf5_gate_lock);
-static DEFINE_SPINLOCK(leaf6_gate_lock);
-static DEFINE_SPINLOCK(leaf7_gate_lock);
-static DEFINE_SPINLOCK(leaf8_gate_lock);
-
-static inline unsigned long clkc_readl(unsigned reg)
-{
- return readl(sirfsoc_clk_vbase + reg);
-}
-
-static inline void clkc_writel(u32 val, unsigned reg)
-{
- writel(val, sirfsoc_clk_vbase + reg);
-}
-
-/*
-* ABPLL
-* integer mode: Fvco = Fin * 2 * NF / NR
-* Spread Spectrum mode: Fvco = Fin * SSN / NR
-* SSN = 2^24 / (256 * ((ssdiv >> ssdepth) << ssdepth) + (ssmod << ssdepth))
-*/
-static unsigned long pll_clk_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- unsigned long fin = parent_rate;
- struct clk_pll *clk = to_pllclk(hw);
- u64 rate;
- u32 regctrl0 = clkc_readl(clk->regofs + SIRFSOC_CLKC_MEMPLL_AB_CTRL0 -
- SIRFSOC_CLKC_MEMPLL_AB_FREQ);
- u32 regfreq = clkc_readl(clk->regofs);
- u32 regssc = clkc_readl(clk->regofs + SIRFSOC_CLKC_MEMPLL_AB_SSC -
- SIRFSOC_CLKC_MEMPLL_AB_FREQ);
- u32 nr = (regfreq >> 16 & (BIT(3) - 1)) + 1;
- u32 nf = (regfreq & (BIT(9) - 1)) + 1;
- u32 ssdiv = regssc >> 8 & (BIT(12) - 1);
- u32 ssdepth = regssc >> 20 & (BIT(2) - 1);
- u32 ssmod = regssc & (BIT(8) - 1);
-
- if (regctrl0 & SIRFSOC_ABPLL_CTRL0_BYPASS)
- return fin;
-
- if (regctrl0 & SIRFSOC_ABPLL_CTRL0_SSEN) {
- rate = fin;
- rate *= 1 << 24;
- do_div(rate, nr);
- do_div(rate, (256 * ((ssdiv >> ssdepth) << ssdepth)
- + (ssmod << ssdepth)));
- } else {
- rate = 2 * fin;
- rate *= nf;
- do_div(rate, nr);
- }
- return rate;
-}
-
-static const struct clk_ops ab_pll_ops = {
- .recalc_rate = pll_clk_recalc_rate,
-};
-
-static const char * const pll_clk_parents[] = {
- "xin",
-};
-
-static const struct clk_init_data clk_cpupll_init = {
- .name = "cpupll_vco",
- .ops = &ab_pll_ops,
- .parent_names = pll_clk_parents,
- .num_parents = ARRAY_SIZE(pll_clk_parents),
-};
-
-static struct clk_pll clk_cpupll = {
- .regofs = SIRFSOC_CLKC_CPUPLL_AB_FREQ,
- .hw = {
- .init = &clk_cpupll_init,
- },
-};
-
-static const struct clk_init_data clk_mempll_init = {
- .name = "mempll_vco",
- .ops = &ab_pll_ops,
- .parent_names = pll_clk_parents,
- .num_parents = ARRAY_SIZE(pll_clk_parents),
-};
-
-static struct clk_pll clk_mempll = {
- .regofs = SIRFSOC_CLKC_MEMPLL_AB_FREQ,
- .hw = {
- .init = &clk_mempll_init,
- },
-};
-
-static const struct clk_init_data clk_sys0pll_init = {
- .name = "sys0pll_vco",
- .ops = &ab_pll_ops,
- .parent_names = pll_clk_parents,
- .num_parents = ARRAY_SIZE(pll_clk_parents),
-};
-
-static struct clk_pll clk_sys0pll = {
- .regofs = SIRFSOC_CLKC_SYS0PLL_AB_FREQ,
- .hw = {
- .init = &clk_sys0pll_init,
- },
-};
-
-static const struct clk_init_data clk_sys1pll_init = {
- .name = "sys1pll_vco",
- .ops = &ab_pll_ops,
- .parent_names = pll_clk_parents,
- .num_parents = ARRAY_SIZE(pll_clk_parents),
-};
-
-static struct clk_pll clk_sys1pll = {
- .regofs = SIRFSOC_CLKC_SYS1PLL_AB_FREQ,
- .hw = {
- .init = &clk_sys1pll_init,
- },
-};
-
-static const struct clk_init_data clk_sys2pll_init = {
- .name = "sys2pll_vco",
- .ops = &ab_pll_ops,
- .parent_names = pll_clk_parents,
- .num_parents = ARRAY_SIZE(pll_clk_parents),
-};
-
-static struct clk_pll clk_sys2pll = {
- .regofs = SIRFSOC_CLKC_SYS2PLL_AB_FREQ,
- .hw = {
- .init = &clk_sys2pll_init,
- },
-};
-
-static const struct clk_init_data clk_sys3pll_init = {
- .name = "sys3pll_vco",
- .ops = &ab_pll_ops,
- .parent_names = pll_clk_parents,
- .num_parents = ARRAY_SIZE(pll_clk_parents),
-};
-
-static struct clk_pll clk_sys3pll = {
- .regofs = SIRFSOC_CLKC_SYS3PLL_AB_FREQ,
- .hw = {
- .init = &clk_sys3pll_init,
- },
-};
-
-/*
- * DTO in clkc, default enable double resolution mode
- * double resolution mode:fout = fin * finc / 2^29
- * normal mode:fout = fin * finc / 2^28
- */
-#define DTO_RESL_DOUBLE (1ULL << 29)
-#define DTO_RESL_NORMAL (1ULL << 28)
-
-static int dto_clk_is_enabled(struct clk_hw *hw)
-{
- struct clk_dto *clk = to_dtoclk(hw);
- int reg;
-
- reg = clk->src_offset + SIRFSOC_CLKC_AUDIO_DTO_ENA - SIRFSOC_CLKC_AUDIO_DTO_SRC;
-
- return !!(clkc_readl(reg) & BIT(0));
-}
-
-static int dto_clk_enable(struct clk_hw *hw)
-{
- u32 val, reg;
- struct clk_dto *clk = to_dtoclk(hw);
-
- reg = clk->src_offset + SIRFSOC_CLKC_AUDIO_DTO_ENA - SIRFSOC_CLKC_AUDIO_DTO_SRC;
-
- val = clkc_readl(reg) | BIT(0);
- clkc_writel(val, reg);
- return 0;
-}
-
-static void dto_clk_disable(struct clk_hw *hw)
-{
- u32 val, reg;
- struct clk_dto *clk = to_dtoclk(hw);
-
- reg = clk->src_offset + SIRFSOC_CLKC_AUDIO_DTO_ENA - SIRFSOC_CLKC_AUDIO_DTO_SRC;
-
- val = clkc_readl(reg) & ~BIT(0);
- clkc_writel(val, reg);
-}
-
-static unsigned long dto_clk_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- u64 rate = parent_rate;
- struct clk_dto *clk = to_dtoclk(hw);
- u32 finc = clkc_readl(clk->inc_offset);
- u32 droff = clkc_readl(clk->src_offset + SIRFSOC_CLKC_AUDIO_DTO_DROFF - SIRFSOC_CLKC_AUDIO_DTO_SRC);
-
- rate *= finc;
- if (droff & BIT(0))
- /* Double resolution off */
- do_div(rate, DTO_RESL_NORMAL);
- else
- do_div(rate, DTO_RESL_DOUBLE);
-
- return rate;
-}
-
-static long dto_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
-{
- u64 dividend = rate * DTO_RESL_DOUBLE;
-
- do_div(dividend, *parent_rate);
- dividend *= *parent_rate;
- do_div(dividend, DTO_RESL_DOUBLE);
-
- return dividend;
-}
-
-static int dto_clk_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
-{
- u64 dividend = rate * DTO_RESL_DOUBLE;
- struct clk_dto *clk = to_dtoclk(hw);
-
- do_div(dividend, parent_rate);
- clkc_writel(0, clk->src_offset + SIRFSOC_CLKC_AUDIO_DTO_DROFF - SIRFSOC_CLKC_AUDIO_DTO_SRC);
- clkc_writel(dividend, clk->inc_offset);
-
- return 0;
-}
-
-static u8 dto_clk_get_parent(struct clk_hw *hw)
-{
- struct clk_dto *clk = to_dtoclk(hw);
-
- return clkc_readl(clk->src_offset);
-}
-
-/*
- * dto need CLK_SET_PARENT_GATE
- */
-static int dto_clk_set_parent(struct clk_hw *hw, u8 index)
-{
- struct clk_dto *clk = to_dtoclk(hw);
-
- clkc_writel(index, clk->src_offset);
- return 0;
-}
-
-static const struct clk_ops dto_ops = {
- .is_enabled = dto_clk_is_enabled,
- .enable = dto_clk_enable,
- .disable = dto_clk_disable,
- .recalc_rate = dto_clk_recalc_rate,
- .round_rate = dto_clk_round_rate,
- .set_rate = dto_clk_set_rate,
- .get_parent = dto_clk_get_parent,
- .set_parent = dto_clk_set_parent,
-};
-
-/* dto parent clock as syspllvco/clk1 */
-static const char * const audiodto_clk_parents[] = {
- "sys0pll_clk1",
- "sys1pll_clk1",
- "sys3pll_clk1",
-};
-
-static const struct clk_init_data clk_audiodto_init = {
- .name = "audio_dto",
- .ops = &dto_ops,
- .parent_names = audiodto_clk_parents,
- .num_parents = ARRAY_SIZE(audiodto_clk_parents),
-};
-
-static struct clk_dto clk_audio_dto = {
- .inc_offset = SIRFSOC_CLKC_AUDIO_DTO_INC,
- .src_offset = SIRFSOC_CLKC_AUDIO_DTO_SRC,
- .hw = {
- .init = &clk_audiodto_init,
- },
-};
-
-static const char * const disp0dto_clk_parents[] = {
- "sys0pll_clk1",
- "sys1pll_clk1",
- "sys3pll_clk1",
-};
-
-static const struct clk_init_data clk_disp0dto_init = {
- .name = "disp0_dto",
- .ops = &dto_ops,
- .parent_names = disp0dto_clk_parents,
- .num_parents = ARRAY_SIZE(disp0dto_clk_parents),
-};
-
-static struct clk_dto clk_disp0_dto = {
- .inc_offset = SIRFSOC_CLKC_DISP0_DTO_INC,
- .src_offset = SIRFSOC_CLKC_DISP0_DTO_SRC,
- .hw = {
- .init = &clk_disp0dto_init,
- },
-};
-
-static const char * const disp1dto_clk_parents[] = {
- "sys0pll_clk1",
- "sys1pll_clk1",
- "sys3pll_clk1",
-};
-
-static const struct clk_init_data clk_disp1dto_init = {
- .name = "disp1_dto",
- .ops = &dto_ops,
- .parent_names = disp1dto_clk_parents,
- .num_parents = ARRAY_SIZE(disp1dto_clk_parents),
-};
-
-static struct clk_dto clk_disp1_dto = {
- .inc_offset = SIRFSOC_CLKC_DISP1_DTO_INC,
- .src_offset = SIRFSOC_CLKC_DISP1_DTO_SRC,
- .hw = {
- .init = &clk_disp1dto_init,
- },
-};
-
-static struct atlas7_div_init_data divider_list[] __initdata = {
- /* div_name, parent_name, gate_name, clk_flag, divider_flag, gate_flag, div_offset, shift, wdith, gate_offset, bit_enable, lock */
- { "sys0pll_qa1", "sys0pll_fixdiv", "sys0pll_a1", 0, 0, 0, SIRFSOC_CLKC_USBPHY_CLKDIV_CFG, 0, 6, SIRFSOC_CLKC_USBPHY_CLKDIV_ENA, 0, &usbphy_div_lock },
- { "sys1pll_qa1", "sys1pll_fixdiv", "sys1pll_a1", 0, 0, 0, SIRFSOC_CLKC_USBPHY_CLKDIV_CFG, 8, 6, SIRFSOC_CLKC_USBPHY_CLKDIV_ENA, 4, &usbphy_div_lock },
- { "sys2pll_qa1", "sys2pll_fixdiv", "sys2pll_a1", 0, 0, 0, SIRFSOC_CLKC_USBPHY_CLKDIV_CFG, 16, 6, SIRFSOC_CLKC_USBPHY_CLKDIV_ENA, 8, &usbphy_div_lock },
- { "sys3pll_qa1", "sys3pll_fixdiv", "sys3pll_a1", 0, 0, 0, SIRFSOC_CLKC_USBPHY_CLKDIV_CFG, 24, 6, SIRFSOC_CLKC_USBPHY_CLKDIV_ENA, 12, &usbphy_div_lock },
- { "sys0pll_qa2", "sys0pll_fixdiv", "sys0pll_a2", 0, 0, 0, SIRFSOC_CLKC_BTSS_CLKDIV_CFG, 0, 6, SIRFSOC_CLKC_BTSS_CLKDIV_ENA, 0, &btss_div_lock },
- { "sys1pll_qa2", "sys1pll_fixdiv", "sys1pll_a2", 0, 0, 0, SIRFSOC_CLKC_BTSS_CLKDIV_CFG, 8, 6, SIRFSOC_CLKC_BTSS_CLKDIV_ENA, 4, &btss_div_lock },
- { "sys2pll_qa2", "sys2pll_fixdiv", "sys2pll_a2", 0, 0, 0, SIRFSOC_CLKC_BTSS_CLKDIV_CFG, 16, 6, SIRFSOC_CLKC_BTSS_CLKDIV_ENA, 8, &btss_div_lock },
- { "sys3pll_qa2", "sys3pll_fixdiv", "sys3pll_a2", 0, 0, 0, SIRFSOC_CLKC_BTSS_CLKDIV_CFG, 24, 6, SIRFSOC_CLKC_BTSS_CLKDIV_ENA, 12, &btss_div_lock },
- { "sys0pll_qa3", "sys0pll_fixdiv", "sys0pll_a3", 0, 0, 0, SIRFSOC_CLKC_RGMII_CLKDIV_CFG, 0, 6, SIRFSOC_CLKC_RGMII_CLKDIV_ENA, 0, &rgmii_div_lock },
- { "sys1pll_qa3", "sys1pll_fixdiv", "sys1pll_a3", 0, 0, 0, SIRFSOC_CLKC_RGMII_CLKDIV_CFG, 8, 6, SIRFSOC_CLKC_RGMII_CLKDIV_ENA, 4, &rgmii_div_lock },
- { "sys2pll_qa3", "sys2pll_fixdiv", "sys2pll_a3", 0, 0, 0, SIRFSOC_CLKC_RGMII_CLKDIV_CFG, 16, 6, SIRFSOC_CLKC_RGMII_CLKDIV_ENA, 8, &rgmii_div_lock },
- { "sys3pll_qa3", "sys3pll_fixdiv", "sys3pll_a3", 0, 0, 0, SIRFSOC_CLKC_RGMII_CLKDIV_CFG, 24, 6, SIRFSOC_CLKC_RGMII_CLKDIV_ENA, 12, &rgmii_div_lock },
- { "sys0pll_qa4", "sys0pll_fixdiv", "sys0pll_a4", 0, 0, 0, SIRFSOC_CLKC_CPU_CLKDIV_CFG, 0, 6, SIRFSOC_CLKC_CPU_CLKDIV_ENA, 0, &cpu_div_lock },
- { "sys1pll_qa4", "sys1pll_fixdiv", "sys1pll_a4", 0, 0, CLK_IGNORE_UNUSED, SIRFSOC_CLKC_CPU_CLKDIV_CFG, 8, 6, SIRFSOC_CLKC_CPU_CLKDIV_ENA, 4, &cpu_div_lock },
- { "sys0pll_qa5", "sys0pll_fixdiv", "sys0pll_a5", 0, 0, 0, SIRFSOC_CLKC_SDPHY01_CLKDIV_CFG, 0, 6, SIRFSOC_CLKC_SDPHY01_CLKDIV_ENA, 0, &sdphy01_div_lock },
- { "sys1pll_qa5", "sys1pll_fixdiv", "sys1pll_a5", 0, 0, 0, SIRFSOC_CLKC_SDPHY01_CLKDIV_CFG, 8, 6, SIRFSOC_CLKC_SDPHY01_CLKDIV_ENA, 4, &sdphy01_div_lock },
- { "sys2pll_qa5", "sys2pll_fixdiv", "sys2pll_a5", 0, 0, 0, SIRFSOC_CLKC_SDPHY01_CLKDIV_CFG, 16, 6, SIRFSOC_CLKC_SDPHY01_CLKDIV_ENA, 8, &sdphy01_div_lock },
- { "sys3pll_qa5", "sys3pll_fixdiv", "sys3pll_a5", 0, 0, 0, SIRFSOC_CLKC_SDPHY01_CLKDIV_CFG, 24, 6, SIRFSOC_CLKC_SDPHY01_CLKDIV_ENA, 12, &sdphy01_div_lock },
- { "sys0pll_qa6", "sys0pll_fixdiv", "sys0pll_a6", 0, 0, 0, SIRFSOC_CLKC_SDPHY23_CLKDIV_CFG, 0, 6, SIRFSOC_CLKC_SDPHY23_CLKDIV_ENA, 0, &sdphy23_div_lock },
- { "sys1pll_qa6", "sys1pll_fixdiv", "sys1pll_a6", 0, 0, 0, SIRFSOC_CLKC_SDPHY23_CLKDIV_CFG, 8, 6, SIRFSOC_CLKC_SDPHY23_CLKDIV_ENA, 4, &sdphy23_div_lock },
- { "sys2pll_qa6", "sys2pll_fixdiv", "sys2pll_a6", 0, 0, 0, SIRFSOC_CLKC_SDPHY23_CLKDIV_CFG, 16, 6, SIRFSOC_CLKC_SDPHY23_CLKDIV_ENA, 8, &sdphy23_div_lock },
- { "sys3pll_qa6", "sys3pll_fixdiv", "sys3pll_a6", 0, 0, 0, SIRFSOC_CLKC_SDPHY23_CLKDIV_CFG, 24, 6, SIRFSOC_CLKC_SDPHY23_CLKDIV_ENA, 12, &sdphy23_div_lock },
- { "sys0pll_qa7", "sys0pll_fixdiv", "sys0pll_a7", 0, 0, 0, SIRFSOC_CLKC_SDPHY45_CLKDIV_CFG, 0, 6, SIRFSOC_CLKC_SDPHY45_CLKDIV_ENA, 0, &sdphy45_div_lock },
- { "sys1pll_qa7", "sys1pll_fixdiv", "sys1pll_a7", 0, 0, 0, SIRFSOC_CLKC_SDPHY45_CLKDIV_CFG, 8, 6, SIRFSOC_CLKC_SDPHY45_CLKDIV_ENA, 4, &sdphy45_div_lock },
- { "sys2pll_qa7", "sys2pll_fixdiv", "sys2pll_a7", 0, 0, 0, SIRFSOC_CLKC_SDPHY45_CLKDIV_CFG, 16, 6, SIRFSOC_CLKC_SDPHY45_CLKDIV_ENA, 8, &sdphy45_div_lock },
- { "sys3pll_qa7", "sys3pll_fixdiv", "sys3pll_a7", 0, 0, 0, SIRFSOC_CLKC_SDPHY45_CLKDIV_CFG, 24, 6, SIRFSOC_CLKC_SDPHY45_CLKDIV_ENA, 12, &sdphy45_div_lock },
- { "sys0pll_qa8", "sys0pll_fixdiv", "sys0pll_a8", 0, 0, 0, SIRFSOC_CLKC_SDPHY67_CLKDIV_CFG, 0, 6, SIRFSOC_CLKC_SDPHY67_CLKDIV_ENA, 0, &sdphy67_div_lock },
- { "sys1pll_qa8", "sys1pll_fixdiv", "sys1pll_a8", 0, 0, 0, SIRFSOC_CLKC_SDPHY67_CLKDIV_CFG, 8, 6, SIRFSOC_CLKC_SDPHY67_CLKDIV_ENA, 4, &sdphy67_div_lock },
- { "sys2pll_qa8", "sys2pll_fixdiv", "sys2pll_a8", 0, 0, 0, SIRFSOC_CLKC_SDPHY67_CLKDIV_CFG, 16, 6, SIRFSOC_CLKC_SDPHY67_CLKDIV_ENA, 8, &sdphy67_div_lock },
- { "sys3pll_qa8", "sys3pll_fixdiv", "sys3pll_a8", 0, 0, 0, SIRFSOC_CLKC_SDPHY67_CLKDIV_CFG, 24, 6, SIRFSOC_CLKC_SDPHY67_CLKDIV_ENA, 12, &sdphy67_div_lock },
- { "sys0pll_qa9", "sys0pll_fixdiv", "sys0pll_a9", 0, 0, 0, SIRFSOC_CLKC_CAN_CLKDIV_CFG, 0, 6, SIRFSOC_CLKC_CAN_CLKDIV_ENA, 0, &can_div_lock },
- { "sys1pll_qa9", "sys1pll_fixdiv", "sys1pll_a9", 0, 0, 0, SIRFSOC_CLKC_CAN_CLKDIV_CFG, 8, 6, SIRFSOC_CLKC_CAN_CLKDIV_ENA, 4, &can_div_lock },
- { "sys2pll_qa9", "sys2pll_fixdiv", "sys2pll_a9", 0, 0, 0, SIRFSOC_CLKC_CAN_CLKDIV_CFG, 16, 6, SIRFSOC_CLKC_CAN_CLKDIV_ENA, 8, &can_div_lock },
- { "sys3pll_qa9", "sys3pll_fixdiv", "sys3pll_a9", 0, 0, 0, SIRFSOC_CLKC_CAN_CLKDIV_CFG, 24, 6, SIRFSOC_CLKC_CAN_CLKDIV_ENA, 12, &can_div_lock },
- { "sys0pll_qa10", "sys0pll_fixdiv", "sys0pll_a10", 0, 0, 0, SIRFSOC_CLKC_DEINT_CLKDIV_CFG, 0, 6, SIRFSOC_CLKC_DEINT_CLKDIV_ENA, 0, &deint_div_lock },
- { "sys1pll_qa10", "sys1pll_fixdiv", "sys1pll_a10", 0, 0, 0, SIRFSOC_CLKC_DEINT_CLKDIV_CFG, 8, 6, SIRFSOC_CLKC_DEINT_CLKDIV_ENA, 4, &deint_div_lock },
- { "sys2pll_qa10", "sys2pll_fixdiv", "sys2pll_a10", 0, 0, 0, SIRFSOC_CLKC_DEINT_CLKDIV_CFG, 16, 6, SIRFSOC_CLKC_DEINT_CLKDIV_ENA, 8, &deint_div_lock },
- { "sys3pll_qa10", "sys3pll_fixdiv", "sys3pll_a10", 0, 0, 0, SIRFSOC_CLKC_DEINT_CLKDIV_CFG, 24, 6, SIRFSOC_CLKC_DEINT_CLKDIV_ENA, 12, &deint_div_lock },
- { "sys0pll_qa11", "sys0pll_fixdiv", "sys0pll_a11", 0, 0, 0, SIRFSOC_CLKC_NAND_CLKDIV_CFG, 0, 6, SIRFSOC_CLKC_NAND_CLKDIV_ENA, 0, &nand_div_lock },
- { "sys1pll_qa11", "sys1pll_fixdiv", "sys1pll_a11", 0, 0, 0, SIRFSOC_CLKC_NAND_CLKDIV_CFG, 8, 6, SIRFSOC_CLKC_NAND_CLKDIV_ENA, 4, &nand_div_lock },
- { "sys2pll_qa11", "sys2pll_fixdiv", "sys2pll_a11", 0, 0, 0, SIRFSOC_CLKC_NAND_CLKDIV_CFG, 16, 6, SIRFSOC_CLKC_NAND_CLKDIV_ENA, 8, &nand_div_lock },
- { "sys3pll_qa11", "sys3pll_fixdiv", "sys3pll_a11", 0, 0, 0, SIRFSOC_CLKC_NAND_CLKDIV_CFG, 24, 6, SIRFSOC_CLKC_NAND_CLKDIV_ENA, 12, &nand_div_lock },
- { "sys0pll_qa12", "sys0pll_fixdiv", "sys0pll_a12", 0, 0, 0, SIRFSOC_CLKC_DISP0_CLKDIV_CFG, 0, 6, SIRFSOC_CLKC_DISP0_CLKDIV_ENA, 0, &disp0_div_lock },
- { "sys1pll_qa12", "sys1pll_fixdiv", "sys1pll_a12", 0, 0, 0, SIRFSOC_CLKC_DISP0_CLKDIV_CFG, 8, 6, SIRFSOC_CLKC_DISP0_CLKDIV_ENA, 4, &disp0_div_lock },
- { "sys2pll_qa12", "sys2pll_fixdiv", "sys2pll_a12", 0, 0, 0, SIRFSOC_CLKC_DISP0_CLKDIV_CFG, 16, 6, SIRFSOC_CLKC_DISP0_CLKDIV_ENA, 8, &disp0_div_lock },
- { "sys3pll_qa12", "sys3pll_fixdiv", "sys3pll_a12", 0, 0, 0, SIRFSOC_CLKC_DISP0_CLKDIV_CFG, 24, 6, SIRFSOC_CLKC_DISP0_CLKDIV_ENA, 12, &disp0_div_lock },
- { "sys0pll_qa13", "sys0pll_fixdiv", "sys0pll_a13", 0, 0, 0, SIRFSOC_CLKC_DISP1_CLKDIV_CFG, 0, 6, SIRFSOC_CLKC_DISP1_CLKDIV_ENA, 0, &disp1_div_lock },
- { "sys1pll_qa13", "sys1pll_fixdiv", "sys1pll_a13", 0, 0, 0, SIRFSOC_CLKC_DISP1_CLKDIV_CFG, 8, 6, SIRFSOC_CLKC_DISP1_CLKDIV_ENA, 4, &disp1_div_lock },
- { "sys2pll_qa13", "sys2pll_fixdiv", "sys2pll_a13", 0, 0, 0, SIRFSOC_CLKC_DISP1_CLKDIV_CFG, 16, 6, SIRFSOC_CLKC_DISP1_CLKDIV_ENA, 8, &disp1_div_lock },
- { "sys3pll_qa13", "sys3pll_fixdiv", "sys3pll_a13", 0, 0, 0, SIRFSOC_CLKC_DISP1_CLKDIV_CFG, 24, 6, SIRFSOC_CLKC_DISP1_CLKDIV_ENA, 12, &disp1_div_lock },
- { "sys0pll_qa14", "sys0pll_fixdiv", "sys0pll_a14", 0, 0, 0, SIRFSOC_CLKC_GPU_CLKDIV_CFG, 0, 6, SIRFSOC_CLKC_GPU_CLKDIV_ENA, 0, &gpu_div_lock },
- { "sys1pll_qa14", "sys1pll_fixdiv", "sys1pll_a14", 0, 0, 0, SIRFSOC_CLKC_GPU_CLKDIV_CFG, 8, 6, SIRFSOC_CLKC_GPU_CLKDIV_ENA, 4, &gpu_div_lock },
- { "sys2pll_qa14", "sys2pll_fixdiv", "sys2pll_a14", 0, 0, 0, SIRFSOC_CLKC_GPU_CLKDIV_CFG, 16, 6, SIRFSOC_CLKC_GPU_CLKDIV_ENA, 8, &gpu_div_lock },
- { "sys3pll_qa14", "sys3pll_fixdiv", "sys3pll_a14", 0, 0, 0, SIRFSOC_CLKC_GPU_CLKDIV_CFG, 24, 6, SIRFSOC_CLKC_GPU_CLKDIV_ENA, 12, &gpu_div_lock },
- { "sys0pll_qa15", "sys0pll_fixdiv", "sys0pll_a15", 0, 0, 0, SIRFSOC_CLKC_GNSS_CLKDIV_CFG, 0, 6, SIRFSOC_CLKC_GNSS_CLKDIV_ENA, 0, &gnss_div_lock },
- { "sys1pll_qa15", "sys1pll_fixdiv", "sys1pll_a15", 0, 0, 0, SIRFSOC_CLKC_GNSS_CLKDIV_CFG, 8, 6, SIRFSOC_CLKC_GNSS_CLKDIV_ENA, 4, &gnss_div_lock },
- { "sys2pll_qa15", "sys2pll_fixdiv", "sys2pll_a15", 0, 0, 0, SIRFSOC_CLKC_GNSS_CLKDIV_CFG, 16, 6, SIRFSOC_CLKC_GNSS_CLKDIV_ENA, 8, &gnss_div_lock },
- { "sys3pll_qa15", "sys3pll_fixdiv", "sys3pll_a15", 0, 0, 0, SIRFSOC_CLKC_GNSS_CLKDIV_CFG, 24, 6, SIRFSOC_CLKC_GNSS_CLKDIV_ENA, 12, &gnss_div_lock },
- { "sys1pll_qa18", "sys1pll_fixdiv", "sys1pll_a18", 0, 0, 0, SIRFSOC_CLKC_SHARED_DIVIDER_CFG0, 24, 6, SIRFSOC_CLKC_SHARED_DIVIDER_ENA, 12, &share_div_lock },
- { "sys1pll_qa19", "sys1pll_fixdiv", "sys1pll_a19", 0, 0, CLK_IGNORE_UNUSED, SIRFSOC_CLKC_SHARED_DIVIDER_CFG0, 16, 6, SIRFSOC_CLKC_SHARED_DIVIDER_ENA, 8, &share_div_lock },
- { "sys1pll_qa20", "sys1pll_fixdiv", "sys1pll_a20", 0, 0, 0, SIRFSOC_CLKC_SHARED_DIVIDER_CFG0, 8, 6, SIRFSOC_CLKC_SHARED_DIVIDER_ENA, 4, &share_div_lock },
- { "sys2pll_qa20", "sys2pll_fixdiv", "sys2pll_a20", 0, 0, 0, SIRFSOC_CLKC_SHARED_DIVIDER_CFG0, 0, 6, SIRFSOC_CLKC_SHARED_DIVIDER_ENA, 0, &share_div_lock },
- { "sys1pll_qa17", "sys1pll_fixdiv", "sys1pll_a17", 0, 0, CLK_IGNORE_UNUSED, SIRFSOC_CLKC_SHARED_DIVIDER_CFG1, 8, 6, SIRFSOC_CLKC_SHARED_DIVIDER_ENA, 20, &share_div_lock },
- { "sys0pll_qa20", "sys0pll_fixdiv", "sys0pll_a20", 0, 0, 0, SIRFSOC_CLKC_SHARED_DIVIDER_CFG1, 0, 6, SIRFSOC_CLKC_SHARED_DIVIDER_ENA, 16, &share_div_lock },
-};
-
-static const char * const i2s_clk_parents[] = {
- "xin",
- "xinw",
- "audio_dto",
- /* "pwm_i2s01" */
-};
-
-static const char * const usbphy_clk_parents[] = {
- "xin",
- "xinw",
- "sys0pll_a1",
- "sys1pll_a1",
- "sys2pll_a1",
- "sys3pll_a1",
-};
-
-static const char * const btss_clk_parents[] = {
- "xin",
- "xinw",
- "sys0pll_a2",
- "sys1pll_a2",
- "sys2pll_a2",
- "sys3pll_a2",
-};
-
-static const char * const rgmii_clk_parents[] = {
- "xin",
- "xinw",
- "sys0pll_a3",
- "sys1pll_a3",
- "sys2pll_a3",
- "sys3pll_a3",
-};
-
-static const char * const cpu_clk_parents[] = {
- "xin",
- "xinw",
- "sys0pll_a4",
- "sys1pll_a4",
- "cpupll_clk1",
-};
-
-static const char * const sdphy01_clk_parents[] = {
- "xin",
- "xinw",
- "sys0pll_a5",
- "sys1pll_a5",
- "sys2pll_a5",
- "sys3pll_a5",
-};
-
-static const char * const sdphy23_clk_parents[] = {
- "xin",
- "xinw",
- "sys0pll_a6",
- "sys1pll_a6",
- "sys2pll_a6",
- "sys3pll_a6",
-};
-
-static const char * const sdphy45_clk_parents[] = {
- "xin",
- "xinw",
- "sys0pll_a7",
- "sys1pll_a7",
- "sys2pll_a7",
- "sys3pll_a7",
-};
-
-static const char * const sdphy67_clk_parents[] = {
- "xin",
- "xinw",
- "sys0pll_a8",
- "sys1pll_a8",
- "sys2pll_a8",
- "sys3pll_a8",
-};
-
-static const char * const can_clk_parents[] = {
- "xin",
- "xinw",
- "sys0pll_a9",
- "sys1pll_a9",
- "sys2pll_a9",
- "sys3pll_a9",
-};
-
-static const char * const deint_clk_parents[] = {
- "xin",
- "xinw",
- "sys0pll_a10",
- "sys1pll_a10",
- "sys2pll_a10",
- "sys3pll_a10",
-};
-
-static const char * const nand_clk_parents[] = {
- "xin",
- "xinw",
- "sys0pll_a11",
- "sys1pll_a11",
- "sys2pll_a11",
- "sys3pll_a11",
-};
-
-static const char * const disp0_clk_parents[] = {
- "xin",
- "xinw",
- "sys0pll_a12",
- "sys1pll_a12",
- "sys2pll_a12",
- "sys3pll_a12",
- "disp0_dto",
-};
-
-static const char * const disp1_clk_parents[] = {
- "xin",
- "xinw",
- "sys0pll_a13",
- "sys1pll_a13",
- "sys2pll_a13",
- "sys3pll_a13",
- "disp1_dto",
-};
-
-static const char * const gpu_clk_parents[] = {
- "xin",
- "xinw",
- "sys0pll_a14",
- "sys1pll_a14",
- "sys2pll_a14",
- "sys3pll_a14",
-};
-
-static const char * const gnss_clk_parents[] = {
- "xin",
- "xinw",
- "sys0pll_a15",
- "sys1pll_a15",
- "sys2pll_a15",
- "sys3pll_a15",
-};
-
-static const char * const sys_clk_parents[] = {
- "xin",
- "xinw",
- "sys2pll_a20",
- "sys1pll_a20",
- "sys1pll_a19",
- "sys1pll_a18",
- "sys0pll_a20",
- "sys1pll_a17",
-};
-
-static const char * const io_clk_parents[] = {
- "xin",
- "xinw",
- "sys2pll_a20",
- "sys1pll_a20",
- "sys1pll_a19",
- "sys1pll_a18",
- "sys0pll_a20",
- "sys1pll_a17",
-};
-
-static const char * const g2d_clk_parents[] = {
- "xin",
- "xinw",
- "sys2pll_a20",
- "sys1pll_a20",
- "sys1pll_a19",
- "sys1pll_a18",
- "sys0pll_a20",
- "sys1pll_a17",
-};
-
-static const char * const jpenc_clk_parents[] = {
- "xin",
- "xinw",
- "sys2pll_a20",
- "sys1pll_a20",
- "sys1pll_a19",
- "sys1pll_a18",
- "sys0pll_a20",
- "sys1pll_a17",
-};
-
-static const char * const vdec_clk_parents[] = {
- "xin",
- "xinw",
- "sys2pll_a20",
- "sys1pll_a20",
- "sys1pll_a19",
- "sys1pll_a18",
- "sys0pll_a20",
- "sys1pll_a17",
-};
-
-static const char * const gmac_clk_parents[] = {
- "xin",
- "xinw",
- "sys2pll_a20",
- "sys1pll_a20",
- "sys1pll_a19",
- "sys1pll_a18",
- "sys0pll_a20",
- "sys1pll_a17",
-};
-
-static const char * const usb_clk_parents[] = {
- "xin",
- "xinw",
- "sys2pll_a20",
- "sys1pll_a20",
- "sys1pll_a19",
- "sys1pll_a18",
- "sys0pll_a20",
- "sys1pll_a17",
-};
-
-static const char * const kas_clk_parents[] = {
- "xin",
- "xinw",
- "sys2pll_a20",
- "sys1pll_a20",
- "sys1pll_a19",
- "sys1pll_a18",
- "sys0pll_a20",
- "sys1pll_a17",
-};
-
-static const char * const sec_clk_parents[] = {
- "xin",
- "xinw",
- "sys2pll_a20",
- "sys1pll_a20",
- "sys1pll_a19",
- "sys1pll_a18",
- "sys0pll_a20",
- "sys1pll_a17",
-};
-
-static const char * const sdr_clk_parents[] = {
- "xin",
- "xinw",
- "sys2pll_a20",
- "sys1pll_a20",
- "sys1pll_a19",
- "sys1pll_a18",
- "sys0pll_a20",
- "sys1pll_a17",
-};
-
-static const char * const vip_clk_parents[] = {
- "xin",
- "xinw",
- "sys2pll_a20",
- "sys1pll_a20",
- "sys1pll_a19",
- "sys1pll_a18",
- "sys0pll_a20",
- "sys1pll_a17",
-};
-
-static const char * const nocd_clk_parents[] = {
- "xin",
- "xinw",
- "sys2pll_a20",
- "sys1pll_a20",
- "sys1pll_a19",
- "sys1pll_a18",
- "sys0pll_a20",
- "sys1pll_a17",
-};
-
-static const char * const nocr_clk_parents[] = {
- "xin",
- "xinw",
- "sys2pll_a20",
- "sys1pll_a20",
- "sys1pll_a19",
- "sys1pll_a18",
- "sys0pll_a20",
- "sys1pll_a17",
-};
-
-static const char * const tpiu_clk_parents[] = {
- "xin",
- "xinw",
- "sys2pll_a20",
- "sys1pll_a20",
- "sys1pll_a19",
- "sys1pll_a18",
- "sys0pll_a20",
- "sys1pll_a17",
-};
-
-static struct atlas7_mux_init_data mux_list[] __initdata = {
- /* mux_name, parent_names, parent_num, flags, mux_flags, mux_offset, shift, width */
- { "i2s_mux", i2s_clk_parents, ARRAY_SIZE(i2s_clk_parents), 0, 0, SIRFSOC_CLKC_I2S_CLK_SEL, 0, 2 },
- { "usbphy_mux", usbphy_clk_parents, ARRAY_SIZE(usbphy_clk_parents), 0, 0, SIRFSOC_CLKC_I2S_CLK_SEL, 0, 3 },
- { "btss_mux", btss_clk_parents, ARRAY_SIZE(btss_clk_parents), 0, 0, SIRFSOC_CLKC_BTSS_CLK_SEL, 0, 3 },
- { "rgmii_mux", rgmii_clk_parents, ARRAY_SIZE(rgmii_clk_parents), 0, 0, SIRFSOC_CLKC_RGMII_CLK_SEL, 0, 3 },
- { "cpu_mux", cpu_clk_parents, ARRAY_SIZE(cpu_clk_parents), 0, 0, SIRFSOC_CLKC_CPU_CLK_SEL, 0, 3 },
- { "sdphy01_mux", sdphy01_clk_parents, ARRAY_SIZE(sdphy01_clk_parents), 0, 0, SIRFSOC_CLKC_SDPHY01_CLK_SEL, 0, 3 },
- { "sdphy23_mux", sdphy23_clk_parents, ARRAY_SIZE(sdphy23_clk_parents), 0, 0, SIRFSOC_CLKC_SDPHY23_CLK_SEL, 0, 3 },
- { "sdphy45_mux", sdphy45_clk_parents, ARRAY_SIZE(sdphy45_clk_parents), 0, 0, SIRFSOC_CLKC_SDPHY45_CLK_SEL, 0, 3 },
- { "sdphy67_mux", sdphy67_clk_parents, ARRAY_SIZE(sdphy67_clk_parents), 0, 0, SIRFSOC_CLKC_SDPHY67_CLK_SEL, 0, 3 },
- { "can_mux", can_clk_parents, ARRAY_SIZE(can_clk_parents), 0, 0, SIRFSOC_CLKC_CAN_CLK_SEL, 0, 3 },
- { "deint_mux", deint_clk_parents, ARRAY_SIZE(deint_clk_parents), 0, 0, SIRFSOC_CLKC_DEINT_CLK_SEL, 0, 3 },
- { "nand_mux", nand_clk_parents, ARRAY_SIZE(nand_clk_parents), 0, 0, SIRFSOC_CLKC_NAND_CLK_SEL, 0, 3 },
- { "disp0_mux", disp0_clk_parents, ARRAY_SIZE(disp0_clk_parents), 0, 0, SIRFSOC_CLKC_DISP0_CLK_SEL, 0, 3 },
- { "disp1_mux", disp1_clk_parents, ARRAY_SIZE(disp1_clk_parents), 0, 0, SIRFSOC_CLKC_DISP1_CLK_SEL, 0, 3 },
- { "gpu_mux", gpu_clk_parents, ARRAY_SIZE(gpu_clk_parents), 0, 0, SIRFSOC_CLKC_GPU_CLK_SEL, 0, 3 },
- { "gnss_mux", gnss_clk_parents, ARRAY_SIZE(gnss_clk_parents), 0, 0, SIRFSOC_CLKC_GNSS_CLK_SEL, 0, 3 },
- { "sys_mux", sys_clk_parents, ARRAY_SIZE(sys_clk_parents), 0, 0, SIRFSOC_CLKC_SYS_CLK_SEL, 0, 3 },
- { "io_mux", io_clk_parents, ARRAY_SIZE(io_clk_parents), 0, 0, SIRFSOC_CLKC_IO_CLK_SEL, 0, 3 },
- { "g2d_mux", g2d_clk_parents, ARRAY_SIZE(g2d_clk_parents), 0, 0, SIRFSOC_CLKC_G2D_CLK_SEL, 0, 3 },
- { "jpenc_mux", jpenc_clk_parents, ARRAY_SIZE(jpenc_clk_parents), 0, 0, SIRFSOC_CLKC_JPENC_CLK_SEL, 0, 3 },
- { "vdec_mux", vdec_clk_parents, ARRAY_SIZE(vdec_clk_parents), 0, 0, SIRFSOC_CLKC_VDEC_CLK_SEL, 0, 3 },
- { "gmac_mux", gmac_clk_parents, ARRAY_SIZE(gmac_clk_parents), 0, 0, SIRFSOC_CLKC_GMAC_CLK_SEL, 0, 3 },
- { "usb_mux", usb_clk_parents, ARRAY_SIZE(usb_clk_parents), 0, 0, SIRFSOC_CLKC_USB_CLK_SEL, 0, 3 },
- { "kas_mux", kas_clk_parents, ARRAY_SIZE(kas_clk_parents), 0, 0, SIRFSOC_CLKC_KAS_CLK_SEL, 0, 3 },
- { "sec_mux", sec_clk_parents, ARRAY_SIZE(sec_clk_parents), 0, 0, SIRFSOC_CLKC_SEC_CLK_SEL, 0, 3 },
- { "sdr_mux", sdr_clk_parents, ARRAY_SIZE(sdr_clk_parents), 0, 0, SIRFSOC_CLKC_SDR_CLK_SEL, 0, 3 },
- { "vip_mux", vip_clk_parents, ARRAY_SIZE(vip_clk_parents), 0, 0, SIRFSOC_CLKC_VIP_CLK_SEL, 0, 3 },
- { "nocd_mux", nocd_clk_parents, ARRAY_SIZE(nocd_clk_parents), 0, 0, SIRFSOC_CLKC_NOCD_CLK_SEL, 0, 3 },
- { "nocr_mux", nocr_clk_parents, ARRAY_SIZE(nocr_clk_parents), 0, 0, SIRFSOC_CLKC_NOCR_CLK_SEL, 0, 3 },
- { "tpiu_mux", tpiu_clk_parents, ARRAY_SIZE(tpiu_clk_parents), 0, 0, SIRFSOC_CLKC_TPIU_CLK_SEL, 0, 3 },
-};
-
- /* new unit should add start from the tail of list */
-static struct atlas7_unit_init_data unit_list[] __initdata = {
- /* unit_name, parent_name, flags, regofs, bit, lock */
- { 0, "audmscm_kas", "kas_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 0, 0, 0, &root0_gate_lock },
- { 1, "gnssm_gnss", "gnss_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 1, 0, 0, &root0_gate_lock },
- { 2, "gpum_gpu", "gpu_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 2, 0, 0, &root0_gate_lock },
- { 3, "mediam_g2d", "g2d_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 3, 0, 0, &root0_gate_lock },
- { 4, "mediam_jpenc", "jpenc_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 4, 0, 0, &root0_gate_lock },
- { 5, "vdifm_disp0", "disp0_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 5, 0, 0, &root0_gate_lock },
- { 6, "vdifm_disp1", "disp1_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 6, 0, 0, &root0_gate_lock },
- { 7, "audmscm_i2s", "i2s_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 8, 0, 0, &root0_gate_lock },
- { 8, "audmscm_io", "io_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 11, 0, 0, &root0_gate_lock },
- { 9, "vdifm_io", "io_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 12, 0, 0, &root0_gate_lock },
- { 10, "gnssm_io", "io_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 13, 0, 0, &root0_gate_lock },
- { 11, "mediam_io", "io_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 14, 0, 0, &root0_gate_lock },
- { 12, "btm_io", "io_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 17, 0, 0, &root0_gate_lock },
- { 13, "mediam_sdphy01", "sdphy01_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 18, 0, 0, &root0_gate_lock },
- { 14, "vdifm_sdphy23", "sdphy23_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 19, 0, 0, &root0_gate_lock },
- { 15, "vdifm_sdphy45", "sdphy45_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 20, 0, 0, &root0_gate_lock },
- { 16, "vdifm_sdphy67", "sdphy67_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 21, 0, 0, &root0_gate_lock },
- { 17, "audmscm_xin", "xin", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 22, 0, 0, &root0_gate_lock },
- { 18, "mediam_nand", "nand_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 27, 0, 0, &root0_gate_lock },
- { 19, "gnssm_sec", "sec_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 28, 0, 0, &root0_gate_lock },
- { 20, "cpum_cpu", "cpu_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 29, 0, 0, &root0_gate_lock },
- { 21, "gnssm_xin", "xin", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 30, 0, 0, &root0_gate_lock },
- { 22, "vdifm_vip", "vip_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 31, 0, 0, &root0_gate_lock },
- { 23, "btm_btss", "btss_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 0, 0, 0, &root1_gate_lock },
- { 24, "mediam_usbphy", "usbphy_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 1, 0, 0, &root1_gate_lock },
- { 25, "rtcm_kas", "kas_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 2, 0, 0, &root1_gate_lock },
- { 26, "audmscm_nocd", "nocd_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 3, 0, 0, &root1_gate_lock },
- { 27, "vdifm_nocd", "nocd_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 4, 0, 0, &root1_gate_lock },
- { 28, "gnssm_nocd", "nocd_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 5, 0, 0, &root1_gate_lock },
- { 29, "mediam_nocd", "nocd_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 6, 0, 0, &root1_gate_lock },
- { 30, "cpum_nocd", "nocd_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 8, 0, 0, &root1_gate_lock },
- { 31, "gpum_nocd", "nocd_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 9, 0, 0, &root1_gate_lock },
- { 32, "audmscm_nocr", "nocr_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 11, 0, 0, &root1_gate_lock },
- { 33, "vdifm_nocr", "nocr_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 12, 0, 0, &root1_gate_lock },
- { 34, "gnssm_nocr", "nocr_mux", CLK_IGNORE_UNUSED, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 13, 0, 0, &root1_gate_lock },
- { 35, "mediam_nocr", "nocr_mux", CLK_IGNORE_UNUSED, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 14, 0, 0, &root1_gate_lock },
- { 36, "ddrm_nocr", "nocr_mux", CLK_IGNORE_UNUSED, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 15, 0, 0, &root1_gate_lock },
- { 37, "cpum_tpiu", "tpiu_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 16, 0, 0, &root1_gate_lock },
- { 38, "gpum_nocr", "nocr_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 17, 0, 0, &root1_gate_lock },
- { 39, "gnssm_rgmii", "rgmii_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 20, 0, 0, &root1_gate_lock },
- { 40, "mediam_vdec", "vdec_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 21, 0, 0, &root1_gate_lock },
- { 41, "gpum_sdr", "sdr_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 22, 0, 0, &root1_gate_lock },
- { 42, "vdifm_deint", "deint_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 23, 0, 0, &root1_gate_lock },
- { 43, "gnssm_can", "can_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 26, 0, 0, &root1_gate_lock },
- { 44, "mediam_usb", "usb_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 28, 0, 0, &root1_gate_lock },
- { 45, "gnssm_gmac", "gmac_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 29, 0, 0, &root1_gate_lock },
- { 46, "cvd_io", "audmscm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 0, CLK_UNIT_NOC_CLOCK, 4, &leaf1_gate_lock },
- { 47, "timer_io", "audmscm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 1, 0, 0, &leaf1_gate_lock },
- { 48, "pulse_io", "audmscm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 2, 0, 0, &leaf1_gate_lock },
- { 49, "tsc_io", "audmscm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 3, 0, 0, &leaf1_gate_lock },
- { 50, "tsc_xin", "audmscm_xin", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 21, 0, 0, &leaf1_gate_lock },
- { 51, "ioctop_io", "audmscm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 4, 0, 0, &leaf1_gate_lock },
- { 52, "rsc_io", "audmscm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 5, 0, 0, &leaf1_gate_lock },
- { 53, "dvm_io", "audmscm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 6, CLK_UNIT_NOC_SOCKET, 7, &leaf1_gate_lock },
- { 54, "lvds_xin", "audmscm_xin", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 7, CLK_UNIT_NOC_SOCKET, 8, &leaf1_gate_lock },
- { 55, "kas_kas", "audmscm_kas", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 8, CLK_UNIT_NOC_CLOCK, 2, &leaf1_gate_lock },
- { 56, "ac97_kas", "audmscm_kas", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 9, 0, 0, &leaf1_gate_lock },
- { 57, "usp0_kas", "audmscm_kas", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 10, CLK_UNIT_NOC_SOCKET, 4, &leaf1_gate_lock },
- { 58, "usp1_kas", "audmscm_kas", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 11, CLK_UNIT_NOC_SOCKET, 5, &leaf1_gate_lock },
- { 59, "usp2_kas", "audmscm_kas", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 12, CLK_UNIT_NOC_SOCKET, 6, &leaf1_gate_lock },
- { 60, "dmac2_kas", "audmscm_kas", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 13, CLK_UNIT_NOC_SOCKET, 1, &leaf1_gate_lock },
- { 61, "dmac3_kas", "audmscm_kas", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 14, CLK_UNIT_NOC_SOCKET, 2, &leaf1_gate_lock },
- { 62, "audioif_kas", "audmscm_kas", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 15, CLK_UNIT_NOC_SOCKET, 0, &leaf1_gate_lock },
- { 63, "i2s1_kas", "audmscm_kas", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 17, CLK_UNIT_NOC_CLOCK, 2, &leaf1_gate_lock },
- { 64, "thaudmscm_io", "audmscm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 22, 0, 0, &leaf1_gate_lock },
- { 65, "analogtest_xin", "audmscm_xin", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 23, 0, 0, &leaf1_gate_lock },
- { 66, "sys2pci_io", "vdifm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 0, CLK_UNIT_NOC_CLOCK, 20, &leaf2_gate_lock },
- { 67, "pciarb_io", "vdifm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 1, 0, 0, &leaf2_gate_lock },
- { 68, "pcicopy_io", "vdifm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 2, 0, 0, &leaf2_gate_lock },
- { 69, "rom_io", "vdifm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 3, 0, 0, &leaf2_gate_lock },
- { 70, "sdio23_io", "vdifm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 4, 0, 0, &leaf2_gate_lock },
- { 71, "sdio45_io", "vdifm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 5, 0, 0, &leaf2_gate_lock },
- { 72, "sdio67_io", "vdifm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 6, 0, 0, &leaf2_gate_lock },
- { 73, "vip1_io", "vdifm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 7, 0, 0, &leaf2_gate_lock },
- { 74, "vip1_vip", "vdifm_vip", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 16, CLK_UNIT_NOC_CLOCK, 21, &leaf2_gate_lock },
- { 75, "sdio23_sdphy23", "vdifm_sdphy23", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 8, 0, 0, &leaf2_gate_lock },
- { 76, "sdio45_sdphy45", "vdifm_sdphy45", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 9, 0, 0, &leaf2_gate_lock },
- { 77, "sdio67_sdphy67", "vdifm_sdphy67", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 10, 0, 0, &leaf2_gate_lock },
- { 78, "vpp0_disp0", "vdifm_disp0", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 11, CLK_UNIT_NOC_CLOCK, 22, &leaf2_gate_lock },
- { 79, "lcd0_disp0", "vdifm_disp0", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 12, CLK_UNIT_NOC_CLOCK, 18, &leaf2_gate_lock },
- { 80, "vpp1_disp1", "vdifm_disp1", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 13, CLK_UNIT_NOC_CLOCK, 23, &leaf2_gate_lock },
- { 81, "lcd1_disp1", "vdifm_disp1", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 14, CLK_UNIT_NOC_CLOCK, 19, &leaf2_gate_lock },
- { 82, "dcu_deint", "vdifm_deint", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 15, CLK_UNIT_NOC_CLOCK, 17, &leaf2_gate_lock },
- { 83, "vdifm_dapa_r_nocr", "vdifm_nocr", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 17, 0, 0, &leaf2_gate_lock },
- { 84, "gpio1_io", "vdifm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 18, 0, 0, &leaf2_gate_lock },
- { 85, "thvdifm_io", "vdifm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 19, 0, 0, &leaf2_gate_lock },
- { 86, "gmac_rgmii", "gnssm_rgmii", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 0, 0, 0, &leaf3_gate_lock },
- { 87, "gmac_gmac", "gnssm_gmac", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 1, CLK_UNIT_NOC_CLOCK, 10, &leaf3_gate_lock },
- { 88, "uart1_io", "gnssm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 2, CLK_UNIT_NOC_SOCKET, 14, &leaf3_gate_lock },
- { 89, "dmac0_io", "gnssm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 3, CLK_UNIT_NOC_SOCKET, 11, &leaf3_gate_lock },
- { 90, "uart0_io", "gnssm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 4, CLK_UNIT_NOC_SOCKET, 13, &leaf3_gate_lock },
- { 91, "uart2_io", "gnssm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 5, CLK_UNIT_NOC_SOCKET, 15, &leaf3_gate_lock },
- { 92, "uart3_io", "gnssm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 6, CLK_UNIT_NOC_SOCKET, 16, &leaf3_gate_lock },
- { 93, "uart4_io", "gnssm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 7, CLK_UNIT_NOC_SOCKET, 17, &leaf3_gate_lock },
- { 94, "uart5_io", "gnssm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 8, CLK_UNIT_NOC_SOCKET, 18, &leaf3_gate_lock },
- { 95, "spi1_io", "gnssm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 9, CLK_UNIT_NOC_SOCKET, 12, &leaf3_gate_lock },
- { 96, "gnss_gnss", "gnssm_gnss", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 10, 0, 0, &leaf3_gate_lock },
- { 97, "canbus1_can", "gnssm_can", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 12, CLK_UNIT_NOC_CLOCK, 7, &leaf3_gate_lock },
- { 98, "ccsec_sec", "gnssm_sec", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 15, CLK_UNIT_NOC_CLOCK, 9, &leaf3_gate_lock },
- { 99, "ccpub_sec", "gnssm_sec", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 16, CLK_UNIT_NOC_CLOCK, 8, &leaf3_gate_lock },
- { 100, "gnssm_dapa_r_nocr", "gnssm_nocr", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 13, 0, 0, &leaf3_gate_lock },
- { 101, "thgnssm_io", "gnssm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 14, 0, 0, &leaf3_gate_lock },
- { 102, "media_vdec", "mediam_vdec", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 0, CLK_UNIT_NOC_CLOCK, 3, &leaf4_gate_lock },
- { 103, "media_jpenc", "mediam_jpenc", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 1, CLK_UNIT_NOC_CLOCK, 1, &leaf4_gate_lock },
- { 104, "g2d_g2d", "mediam_g2d", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 2, CLK_UNIT_NOC_CLOCK, 12, &leaf4_gate_lock },
- { 105, "i2c0_io", "mediam_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 3, CLK_UNIT_NOC_SOCKET, 21, &leaf4_gate_lock },
- { 106, "i2c1_io", "mediam_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 4, CLK_UNIT_NOC_SOCKET, 20, &leaf4_gate_lock },
- { 107, "gpio0_io", "mediam_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 5, CLK_UNIT_NOC_SOCKET, 19, &leaf4_gate_lock },
- { 108, "nand_io", "mediam_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 6, 0, 0, &leaf4_gate_lock },
- { 109, "sdio01_io", "mediam_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 7, 0, 0, &leaf4_gate_lock },
- { 110, "sys2pci2_io", "mediam_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 8, CLK_UNIT_NOC_CLOCK, 13, &leaf4_gate_lock },
- { 111, "sdio01_sdphy01", "mediam_sdphy01", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 9, 0, 0, &leaf4_gate_lock },
- { 112, "nand_nand", "mediam_nand", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 10, CLK_UNIT_NOC_CLOCK, 14, &leaf4_gate_lock },
- { 113, "usb0_usb", "mediam_usb", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 11, CLK_UNIT_NOC_CLOCK, 15, &leaf4_gate_lock },
- { 114, "usb1_usb", "mediam_usb", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 12, CLK_UNIT_NOC_CLOCK, 16, &leaf4_gate_lock },
- { 115, "usbphy0_usbphy", "mediam_usbphy", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 13, 0, 0, &leaf4_gate_lock },
- { 116, "usbphy1_usbphy", "mediam_usbphy", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 14, 0, 0, &leaf4_gate_lock },
- { 117, "thmediam_io", "mediam_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 15, 0, 0, &leaf4_gate_lock },
- { 118, "memc_mem", "mempll_clk1", CLK_IGNORE_UNUSED, SIRFSOC_CLKC_LEAF_CLK_EN5_SET, 0, 0, 0, &leaf5_gate_lock },
- { 119, "dapa_mem", "mempll_clk1", 0, SIRFSOC_CLKC_LEAF_CLK_EN5_SET, 1, 0, 0, &leaf5_gate_lock },
- { 120, "nocddrm_nocr", "ddrm_nocr", 0, SIRFSOC_CLKC_LEAF_CLK_EN5_SET, 2, 0, 0, &leaf5_gate_lock },
- { 121, "thddrm_nocr", "ddrm_nocr", 0, SIRFSOC_CLKC_LEAF_CLK_EN5_SET, 3, 0, 0, &leaf5_gate_lock },
- { 122, "spram1_cpudiv2", "cpum_cpu", 0, SIRFSOC_CLKC_LEAF_CLK_EN6_SET, 0, CLK_UNIT_NOC_SOCKET, 9, &leaf6_gate_lock },
- { 123, "spram2_cpudiv2", "cpum_cpu", 0, SIRFSOC_CLKC_LEAF_CLK_EN6_SET, 1, CLK_UNIT_NOC_SOCKET, 10, &leaf6_gate_lock },
- { 124, "coresight_cpudiv2", "cpum_cpu", 0, SIRFSOC_CLKC_LEAF_CLK_EN6_SET, 2, 0, 0, &leaf6_gate_lock },
- { 125, "coresight_tpiu", "cpum_tpiu", 0, SIRFSOC_CLKC_LEAF_CLK_EN6_SET, 3, 0, 0, &leaf6_gate_lock },
- { 126, "graphic_gpu", "gpum_gpu", 0, SIRFSOC_CLKC_LEAF_CLK_EN7_SET, 0, CLK_UNIT_NOC_CLOCK, 0, &leaf7_gate_lock },
- { 127, "vss_sdr", "gpum_sdr", 0, SIRFSOC_CLKC_LEAF_CLK_EN7_SET, 1, CLK_UNIT_NOC_CLOCK, 11, &leaf7_gate_lock },
- { 128, "thgpum_nocr", "gpum_nocr", 0, SIRFSOC_CLKC_LEAF_CLK_EN7_SET, 2, 0, 0, &leaf7_gate_lock },
- { 129, "a7ca_btss", "btm_btss", 0, SIRFSOC_CLKC_LEAF_CLK_EN8_SET, 1, 0, 0, &leaf8_gate_lock },
- { 130, "dmac4_io", "a7ca_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN8_SET, 2, 0, 0, &leaf8_gate_lock },
- { 131, "uart6_io", "dmac4_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN8_SET, 3, 0, 0, &leaf8_gate_lock },
- { 132, "usp3_io", "dmac4_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN8_SET, 4, 0, 0, &leaf8_gate_lock },
- { 133, "a7ca_io", "noc_btm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN8_SET, 5, 0, 0, &leaf8_gate_lock },
- { 134, "noc_btm_io", "btm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN8_SET, 6, 0, 0, &leaf8_gate_lock },
- { 135, "thbtm_io", "btm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN8_SET, 7, 0, 0, &leaf8_gate_lock },
- { 136, "btslow", "xinw_fixdiv_btslow", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 25, 0, 0, &root1_gate_lock },
- { 137, "a7ca_btslow", "btslow", 0, SIRFSOC_CLKC_LEAF_CLK_EN8_SET, 0, 0, 0, &leaf8_gate_lock },
- { 138, "pwm_io", "io_mux", 0, SIRFSOC_CLKC_LEAF_CLK_EN0_SET, 0, 0, 0, &leaf0_gate_lock },
- { 139, "pwm_xin", "xin", 0, SIRFSOC_CLKC_LEAF_CLK_EN0_SET, 1, 0, 0, &leaf0_gate_lock },
- { 140, "pwm_xinw", "xinw", 0, SIRFSOC_CLKC_LEAF_CLK_EN0_SET, 2, 0, 0, &leaf0_gate_lock },
- { 141, "thcgum_sys", "sys_mux", 0, SIRFSOC_CLKC_LEAF_CLK_EN0_SET, 3, 0, 0, &leaf0_gate_lock },
-};
-
-static struct clk *atlas7_clks[ARRAY_SIZE(unit_list) + ARRAY_SIZE(mux_list)];
-
-static int unit_clk_is_enabled(struct clk_hw *hw)
-{
- struct clk_unit *clk = to_unitclk(hw);
- u32 reg;
-
- reg = clk->regofs + SIRFSOC_CLKC_ROOT_CLK_EN0_STAT - SIRFSOC_CLKC_ROOT_CLK_EN0_SET;
-
- return !!(clkc_readl(reg) & BIT(clk->bit));
-}
-
-static int unit_clk_enable(struct clk_hw *hw)
-{
- u32 reg;
- struct clk_unit *clk = to_unitclk(hw);
- unsigned long flags;
-
- reg = clk->regofs;
-
- spin_lock_irqsave(clk->lock, flags);
- clkc_writel(BIT(clk->bit), reg);
- if (clk->type == CLK_UNIT_NOC_CLOCK)
- clkc_writel(BIT(clk->idle_bit), SIRFSOC_NOC_CLK_IDLEREQ_CLR);
- else if (clk->type == CLK_UNIT_NOC_SOCKET)
- clkc_writel(BIT(clk->idle_bit), SIRFSOC_NOC_CLK_SLVRDY_SET);
-
- spin_unlock_irqrestore(clk->lock, flags);
- return 0;
-}
-
-static void unit_clk_disable(struct clk_hw *hw)
-{
- u32 reg;
- u32 i = 0;
- struct clk_unit *clk = to_unitclk(hw);
- unsigned long flags;
-
- reg = clk->regofs + SIRFSOC_CLKC_ROOT_CLK_EN0_CLR - SIRFSOC_CLKC_ROOT_CLK_EN0_SET;
- spin_lock_irqsave(clk->lock, flags);
- if (clk->type == CLK_UNIT_NOC_CLOCK) {
- clkc_writel(BIT(clk->idle_bit), SIRFSOC_NOC_CLK_IDLEREQ_SET);
- while (!(clkc_readl(SIRFSOC_NOC_CLK_IDLE_STATUS) &
- BIT(clk->idle_bit)) && (i++ < 100)) {
- cpu_relax();
- udelay(10);
- }
-
- if (i == 100) {
- pr_err("unit NoC Clock disconnect Error:timeout\n");
- /*once timeout, undo idlereq by CLR*/
- clkc_writel(BIT(clk->idle_bit), SIRFSOC_NOC_CLK_IDLEREQ_CLR);
- goto err;
- }
-
- } else if (clk->type == CLK_UNIT_NOC_SOCKET)
- clkc_writel(BIT(clk->idle_bit), SIRFSOC_NOC_CLK_SLVRDY_CLR);
-
- clkc_writel(BIT(clk->bit), reg);
-err:
- spin_unlock_irqrestore(clk->lock, flags);
-}
-
-static const struct clk_ops unit_clk_ops = {
- .is_enabled = unit_clk_is_enabled,
- .enable = unit_clk_enable,
- .disable = unit_clk_disable,
-};
-
-static struct clk * __init
-atlas7_unit_clk_register(struct device *dev, const char *name,
- const char * const parent_name, unsigned long flags,
- u32 regofs, u8 bit, u32 type, u8 idle_bit, spinlock_t *lock)
-{
- struct clk *clk;
- struct clk_unit *unit;
- struct clk_init_data init;
-
- unit = kzalloc(sizeof(*unit), GFP_KERNEL);
- if (!unit)
- return ERR_PTR(-ENOMEM);
-
- init.name = name;
- init.parent_names = &parent_name;
- init.num_parents = 1;
- init.ops = &unit_clk_ops;
- init.flags = flags;
-
- unit->hw.init = &init;
- unit->regofs = regofs;
- unit->bit = bit;
-
- unit->type = type;
- unit->idle_bit = idle_bit;
- unit->lock = lock;
-
- clk = clk_register(dev, &unit->hw);
- if (IS_ERR(clk))
- kfree(unit);
-
- return clk;
-}
-
-static struct atlas7_reset_desc atlas7_reset_unit[] = {
- { "PWM", 0x0244, 0, 0x0320, 0, &leaf0_gate_lock }, /* 0-5 */
- { "THCGUM", 0x0244, 3, 0x0320, 1, &leaf0_gate_lock },
- { "CVD", 0x04A0, 0, 0x032C, 0, &leaf1_gate_lock },
- { "TIMER", 0x04A0, 1, 0x032C, 1, &leaf1_gate_lock },
- { "PULSEC", 0x04A0, 2, 0x032C, 2, &leaf1_gate_lock },
- { "TSC", 0x04A0, 3, 0x032C, 3, &leaf1_gate_lock },
- { "IOCTOP", 0x04A0, 4, 0x032C, 4, &leaf1_gate_lock }, /* 6-10 */
- { "RSC", 0x04A0, 5, 0x032C, 5, &leaf1_gate_lock },
- { "DVM", 0x04A0, 6, 0x032C, 6, &leaf1_gate_lock },
- { "LVDS", 0x04A0, 7, 0x032C, 7, &leaf1_gate_lock },
- { "KAS", 0x04A0, 8, 0x032C, 8, &leaf1_gate_lock },
- { "AC97", 0x04A0, 9, 0x032C, 9, &leaf1_gate_lock }, /* 11-15 */
- { "USP0", 0x04A0, 10, 0x032C, 10, &leaf1_gate_lock },
- { "USP1", 0x04A0, 11, 0x032C, 11, &leaf1_gate_lock },
- { "USP2", 0x04A0, 12, 0x032C, 12, &leaf1_gate_lock },
- { "DMAC2", 0x04A0, 13, 0x032C, 13, &leaf1_gate_lock },
- { "DMAC3", 0x04A0, 14, 0x032C, 14, &leaf1_gate_lock }, /* 16-20 */
- { "AUDIO", 0x04A0, 15, 0x032C, 15, &leaf1_gate_lock },
- { "I2S1", 0x04A0, 17, 0x032C, 16, &leaf1_gate_lock },
- { "PMU_AUDIO", 0x04A0, 22, 0x032C, 17, &leaf1_gate_lock },
- { "THAUDMSCM", 0x04A0, 23, 0x032C, 18, &leaf1_gate_lock },
- { "SYS2PCI", 0x04B8, 0, 0x0338, 0, &leaf2_gate_lock }, /* 21-25 */
- { "PCIARB", 0x04B8, 1, 0x0338, 1, &leaf2_gate_lock },
- { "PCICOPY", 0x04B8, 2, 0x0338, 2, &leaf2_gate_lock },
- { "ROM", 0x04B8, 3, 0x0338, 3, &leaf2_gate_lock },
- { "SDIO23", 0x04B8, 4, 0x0338, 4, &leaf2_gate_lock },
- { "SDIO45", 0x04B8, 5, 0x0338, 5, &leaf2_gate_lock }, /* 26-30 */
- { "SDIO67", 0x04B8, 6, 0x0338, 6, &leaf2_gate_lock },
- { "VIP1", 0x04B8, 7, 0x0338, 7, &leaf2_gate_lock },
- { "VPP0", 0x04B8, 11, 0x0338, 8, &leaf2_gate_lock },
- { "LCD0", 0x04B8, 12, 0x0338, 9, &leaf2_gate_lock },
- { "VPP1", 0x04B8, 13, 0x0338, 10, &leaf2_gate_lock }, /* 31-35 */
- { "LCD1", 0x04B8, 14, 0x0338, 11, &leaf2_gate_lock },
- { "DCU", 0x04B8, 15, 0x0338, 12, &leaf2_gate_lock },
- { "GPIO", 0x04B8, 18, 0x0338, 13, &leaf2_gate_lock },
- { "DAPA_VDIFM", 0x04B8, 17, 0x0338, 15, &leaf2_gate_lock },
- { "THVDIFM", 0x04B8, 19, 0x0338, 16, &leaf2_gate_lock }, /* 36-40 */
- { "RGMII", 0x04D0, 0, 0x0344, 0, &leaf3_gate_lock },
- { "GMAC", 0x04D0, 1, 0x0344, 1, &leaf3_gate_lock },
- { "UART1", 0x04D0, 2, 0x0344, 2, &leaf3_gate_lock },
- { "DMAC0", 0x04D0, 3, 0x0344, 3, &leaf3_gate_lock },
- { "UART0", 0x04D0, 4, 0x0344, 4, &leaf3_gate_lock }, /* 41-45 */
- { "UART2", 0x04D0, 5, 0x0344, 5, &leaf3_gate_lock },
- { "UART3", 0x04D0, 6, 0x0344, 6, &leaf3_gate_lock },
- { "UART4", 0x04D0, 7, 0x0344, 7, &leaf3_gate_lock },
- { "UART5", 0x04D0, 8, 0x0344, 8, &leaf3_gate_lock },
- { "SPI1", 0x04D0, 9, 0x0344, 9, &leaf3_gate_lock }, /* 46-50 */
- { "GNSS_SYS_M0", 0x04D0, 10, 0x0344, 10, &leaf3_gate_lock },
- { "CANBUS1", 0x04D0, 12, 0x0344, 11, &leaf3_gate_lock },
- { "CCSEC", 0x04D0, 15, 0x0344, 12, &leaf3_gate_lock },
- { "CCPUB", 0x04D0, 16, 0x0344, 13, &leaf3_gate_lock },
- { "DAPA_GNSSM", 0x04D0, 13, 0x0344, 14, &leaf3_gate_lock }, /* 51-55 */
- { "THGNSSM", 0x04D0, 14, 0x0344, 15, &leaf3_gate_lock },
- { "VDEC", 0x04E8, 0, 0x0350, 0, &leaf4_gate_lock },
- { "JPENC", 0x04E8, 1, 0x0350, 1, &leaf4_gate_lock },
- { "G2D", 0x04E8, 2, 0x0350, 2, &leaf4_gate_lock },
- { "I2C0", 0x04E8, 3, 0x0350, 3, &leaf4_gate_lock }, /* 56-60 */
- { "I2C1", 0x04E8, 4, 0x0350, 4, &leaf4_gate_lock },
- { "GPIO0", 0x04E8, 5, 0x0350, 5, &leaf4_gate_lock },
- { "NAND", 0x04E8, 6, 0x0350, 6, &leaf4_gate_lock },
- { "SDIO01", 0x04E8, 7, 0x0350, 7, &leaf4_gate_lock },
- { "SYS2PCI2", 0x04E8, 8, 0x0350, 8, &leaf4_gate_lock }, /* 61-65 */
- { "USB0", 0x04E8, 11, 0x0350, 9, &leaf4_gate_lock },
- { "USB1", 0x04E8, 12, 0x0350, 10, &leaf4_gate_lock },
- { "THMEDIAM", 0x04E8, 15, 0x0350, 11, &leaf4_gate_lock },
- { "MEMC_DDRPHY", 0x0500, 0, 0x035C, 0, &leaf5_gate_lock },
- { "MEMC_UPCTL", 0x0500, 0, 0x035C, 1, &leaf5_gate_lock }, /* 66-70 */
- { "DAPA_MEM", 0x0500, 1, 0x035C, 2, &leaf5_gate_lock },
- { "MEMC_MEMDIV", 0x0500, 0, 0x035C, 3, &leaf5_gate_lock },
- { "THDDRM", 0x0500, 3, 0x035C, 4, &leaf5_gate_lock },
- { "CORESIGHT", 0x0518, 3, 0x0368, 13, &leaf6_gate_lock },
- { "THCPUM", 0x0518, 4, 0x0368, 17, &leaf6_gate_lock }, /* 71-75 */
- { "GRAPHIC", 0x0530, 0, 0x0374, 0, &leaf7_gate_lock },
- { "VSS_SDR", 0x0530, 1, 0x0374, 1, &leaf7_gate_lock },
- { "THGPUM", 0x0530, 2, 0x0374, 2, &leaf7_gate_lock },
- { "DMAC4", 0x0548, 2, 0x0380, 1, &leaf8_gate_lock },
- { "UART6", 0x0548, 3, 0x0380, 2, &leaf8_gate_lock }, /* 76- */
- { "USP3", 0x0548, 4, 0x0380, 3, &leaf8_gate_lock },
- { "THBTM", 0x0548, 5, 0x0380, 5, &leaf8_gate_lock },
- { "A7CA", 0x0548, 1, 0x0380, 0, &leaf8_gate_lock },
- { "A7CA_APB", 0x0548, 5, 0x0380, 4, &leaf8_gate_lock },
-};
-
-static int atlas7_reset_module(struct reset_controller_dev *rcdev,
- unsigned long reset_idx)
-{
- struct atlas7_reset_desc *reset = &atlas7_reset_unit[reset_idx];
- unsigned long flags;
-
- /*
- * HW suggest unit reset sequence:
- * assert sw reset (0)
- * setting sw clk_en to if the clock was disabled before reset
- * delay 16 clocks
- * disable clock (sw clk_en = 0)
- * de-assert reset (1)
- * after this sequence, restore clock or not is decided by SW
- */
-
- spin_lock_irqsave(reset->lock, flags);
- /* clock enable or not */
- if (clkc_readl(reset->clk_ofs + 8) & (1 << reset->clk_bit)) {
- clkc_writel(1 << reset->rst_bit, reset->rst_ofs + 4);
- udelay(2);
- clkc_writel(1 << reset->clk_bit, reset->clk_ofs + 4);
- clkc_writel(1 << reset->rst_bit, reset->rst_ofs);
- /* restore clock enable */
- clkc_writel(1 << reset->clk_bit, reset->clk_ofs);
- } else {
- clkc_writel(1 << reset->rst_bit, reset->rst_ofs + 4);
- clkc_writel(1 << reset->clk_bit, reset->clk_ofs);
- udelay(2);
- clkc_writel(1 << reset->clk_bit, reset->clk_ofs + 4);
- clkc_writel(1 << reset->rst_bit, reset->rst_ofs);
- }
- spin_unlock_irqrestore(reset->lock, flags);
-
- return 0;
-}
-
-static const struct reset_control_ops atlas7_rst_ops = {
- .reset = atlas7_reset_module,
-};
-
-static struct reset_controller_dev atlas7_rst_ctlr = {
- .ops = &atlas7_rst_ops,
- .owner = THIS_MODULE,
- .of_reset_n_cells = 1,
-};
-
-static void __init atlas7_clk_init(struct device_node *np)
-{
- struct clk *clk;
- struct atlas7_div_init_data *div;
- struct atlas7_mux_init_data *mux;
- struct atlas7_unit_init_data *unit;
- int i;
- int ret;
-
- sirfsoc_clk_vbase = of_iomap(np, 0);
- if (!sirfsoc_clk_vbase)
- panic("unable to map clkc registers\n");
-
- of_node_put(np);
-
- clk = clk_register(NULL, &clk_cpupll.hw);
- BUG_ON(!clk);
- clk = clk_register(NULL, &clk_mempll.hw);
- BUG_ON(!clk);
- clk = clk_register(NULL, &clk_sys0pll.hw);
- BUG_ON(!clk);
- clk = clk_register(NULL, &clk_sys1pll.hw);
- BUG_ON(!clk);
- clk = clk_register(NULL, &clk_sys2pll.hw);
- BUG_ON(!clk);
- clk = clk_register(NULL, &clk_sys3pll.hw);
- BUG_ON(!clk);
-
- clk = clk_register_divider_table(NULL, "cpupll_div1", "cpupll_vco", 0,
- sirfsoc_clk_vbase + SIRFSOC_CLKC_CPUPLL_AB_CTRL1, 0, 3, 0,
- pll_div_table, &cpupll_ctrl1_lock);
- BUG_ON(!clk);
- clk = clk_register_divider_table(NULL, "cpupll_div2", "cpupll_vco", 0,
- sirfsoc_clk_vbase + SIRFSOC_CLKC_CPUPLL_AB_CTRL1, 4, 3, 0,
- pll_div_table, &cpupll_ctrl1_lock);
- BUG_ON(!clk);
- clk = clk_register_divider_table(NULL, "cpupll_div3", "cpupll_vco", 0,
- sirfsoc_clk_vbase + SIRFSOC_CLKC_CPUPLL_AB_CTRL1, 8, 3, 0,
- pll_div_table, &cpupll_ctrl1_lock);
- BUG_ON(!clk);
-
- clk = clk_register_divider_table(NULL, "mempll_div1", "mempll_vco", 0,
- sirfsoc_clk_vbase + SIRFSOC_CLKC_MEMPLL_AB_CTRL1, 0, 3, 0,
- pll_div_table, &mempll_ctrl1_lock);
- BUG_ON(!clk);
- clk = clk_register_divider_table(NULL, "mempll_div2", "mempll_vco", 0,
- sirfsoc_clk_vbase + SIRFSOC_CLKC_MEMPLL_AB_CTRL1, 4, 3, 0,
- pll_div_table, &mempll_ctrl1_lock);
- BUG_ON(!clk);
- clk = clk_register_divider_table(NULL, "mempll_div3", "mempll_vco", 0,
- sirfsoc_clk_vbase + SIRFSOC_CLKC_MEMPLL_AB_CTRL1, 8, 3, 0,
- pll_div_table, &mempll_ctrl1_lock);
- BUG_ON(!clk);
-
- clk = clk_register_divider_table(NULL, "sys0pll_div1", "sys0pll_vco", 0,
- sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS0PLL_AB_CTRL1, 0, 3, 0,
- pll_div_table, &sys0pll_ctrl1_lock);
- BUG_ON(!clk);
- clk = clk_register_divider_table(NULL, "sys0pll_div2", "sys0pll_vco", 0,
- sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS0PLL_AB_CTRL1, 4, 3, 0,
- pll_div_table, &sys0pll_ctrl1_lock);
- BUG_ON(!clk);
- clk = clk_register_divider_table(NULL, "sys0pll_div3", "sys0pll_vco", 0,
- sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS0PLL_AB_CTRL1, 8, 3, 0,
- pll_div_table, &sys0pll_ctrl1_lock);
- BUG_ON(!clk);
- clk = clk_register_fixed_factor(NULL, "sys0pll_fixdiv", "sys0pll_vco",
- CLK_SET_RATE_PARENT, 1, 2);
-
- clk = clk_register_divider_table(NULL, "sys1pll_div1", "sys1pll_vco", 0,
- sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS1PLL_AB_CTRL1, 0, 3, 0,
- pll_div_table, &sys1pll_ctrl1_lock);
- BUG_ON(!clk);
- clk = clk_register_divider_table(NULL, "sys1pll_div2", "sys1pll_vco", 0,
- sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS1PLL_AB_CTRL1, 4, 3, 0,
- pll_div_table, &sys1pll_ctrl1_lock);
- BUG_ON(!clk);
- clk = clk_register_divider_table(NULL, "sys1pll_div3", "sys1pll_vco", 0,
- sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS1PLL_AB_CTRL1, 8, 3, 0,
- pll_div_table, &sys1pll_ctrl1_lock);
- BUG_ON(!clk);
- clk = clk_register_fixed_factor(NULL, "sys1pll_fixdiv", "sys1pll_vco",
- CLK_SET_RATE_PARENT, 1, 2);
-
- clk = clk_register_divider_table(NULL, "sys2pll_div1", "sys2pll_vco", 0,
- sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS2PLL_AB_CTRL1, 0, 3, 0,
- pll_div_table, &sys2pll_ctrl1_lock);
- BUG_ON(!clk);
- clk = clk_register_divider_table(NULL, "sys2pll_div2", "sys2pll_vco", 0,
- sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS2PLL_AB_CTRL1, 4, 3, 0,
- pll_div_table, &sys2pll_ctrl1_lock);
- BUG_ON(!clk);
- clk = clk_register_divider_table(NULL, "sys2pll_div3", "sys2pll_vco", 0,
- sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS2PLL_AB_CTRL1, 8, 3, 0,
- pll_div_table, &sys2pll_ctrl1_lock);
- BUG_ON(!clk);
- clk = clk_register_fixed_factor(NULL, "sys2pll_fixdiv", "sys2pll_vco",
- CLK_SET_RATE_PARENT, 1, 2);
-
- clk = clk_register_divider_table(NULL, "sys3pll_div1", "sys3pll_vco", 0,
- sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS3PLL_AB_CTRL1, 0, 3, 0,
- pll_div_table, &sys3pll_ctrl1_lock);
- BUG_ON(!clk);
- clk = clk_register_divider_table(NULL, "sys3pll_div2", "sys3pll_vco", 0,
- sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS3PLL_AB_CTRL1, 4, 3, 0,
- pll_div_table, &sys3pll_ctrl1_lock);
- BUG_ON(!clk);
- clk = clk_register_divider_table(NULL, "sys3pll_div3", "sys3pll_vco", 0,
- sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS3PLL_AB_CTRL1, 8, 3, 0,
- pll_div_table, &sys3pll_ctrl1_lock);
- BUG_ON(!clk);
- clk = clk_register_fixed_factor(NULL, "sys3pll_fixdiv", "sys3pll_vco",
- CLK_SET_RATE_PARENT, 1, 2);
-
- BUG_ON(!clk);
- clk = clk_register_fixed_factor(NULL, "xinw_fixdiv_btslow", "xinw",
- CLK_SET_RATE_PARENT, 1, 4);
-
- BUG_ON(!clk);
- clk = clk_register_gate(NULL, "cpupll_clk1", "cpupll_div1",
- CLK_SET_RATE_PARENT, sirfsoc_clk_vbase + SIRFSOC_CLKC_CPUPLL_AB_CTRL1,
- 12, 0, &cpupll_ctrl1_lock);
- BUG_ON(!clk);
- clk = clk_register_gate(NULL, "cpupll_clk2", "cpupll_div2",
- CLK_SET_RATE_PARENT, sirfsoc_clk_vbase + SIRFSOC_CLKC_CPUPLL_AB_CTRL1,
- 13, 0, &cpupll_ctrl1_lock);
- BUG_ON(!clk);
- clk = clk_register_gate(NULL, "cpupll_clk3", "cpupll_div3",
- CLK_SET_RATE_PARENT, sirfsoc_clk_vbase + SIRFSOC_CLKC_CPUPLL_AB_CTRL1,
- 14, 0, &cpupll_ctrl1_lock);
- BUG_ON(!clk);
-
- clk = clk_register_gate(NULL, "mempll_clk1", "mempll_div1",
- CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
- sirfsoc_clk_vbase + SIRFSOC_CLKC_MEMPLL_AB_CTRL1,
- 12, 0, &mempll_ctrl1_lock);
- BUG_ON(!clk);
- clk = clk_register_gate(NULL, "mempll_clk2", "mempll_div2",
- CLK_SET_RATE_PARENT, sirfsoc_clk_vbase + SIRFSOC_CLKC_MEMPLL_AB_CTRL1,
- 13, 0, &mempll_ctrl1_lock);
- BUG_ON(!clk);
- clk = clk_register_gate(NULL, "mempll_clk3", "mempll_div3",
- CLK_SET_RATE_PARENT, sirfsoc_clk_vbase + SIRFSOC_CLKC_MEMPLL_AB_CTRL1,
- 14, 0, &mempll_ctrl1_lock);
- BUG_ON(!clk);
-
- clk = clk_register_gate(NULL, "sys0pll_clk1", "sys0pll_div1",
- CLK_SET_RATE_PARENT, sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS0PLL_AB_CTRL1,
- 12, 0, &sys0pll_ctrl1_lock);
- BUG_ON(!clk);
- clk = clk_register_gate(NULL, "sys0pll_clk2", "sys0pll_div2",
- CLK_SET_RATE_PARENT, sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS0PLL_AB_CTRL1,
- 13, 0, &sys0pll_ctrl1_lock);
- BUG_ON(!clk);
- clk = clk_register_gate(NULL, "sys0pll_clk3", "sys0pll_div3",
- CLK_SET_RATE_PARENT, sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS0PLL_AB_CTRL1,
- 14, 0, &sys0pll_ctrl1_lock);
- BUG_ON(!clk);
-
- clk = clk_register_gate(NULL, "sys1pll_clk1", "sys1pll_div1",
- CLK_SET_RATE_PARENT, sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS1PLL_AB_CTRL1,
- 12, 0, &sys1pll_ctrl1_lock);
- BUG_ON(!clk);
- clk = clk_register_gate(NULL, "sys1pll_clk2", "sys1pll_div2",
- CLK_SET_RATE_PARENT, sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS1PLL_AB_CTRL1,
- 13, 0, &sys1pll_ctrl1_lock);
- BUG_ON(!clk);
- clk = clk_register_gate(NULL, "sys1pll_clk3", "sys1pll_div3",
- CLK_SET_RATE_PARENT, sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS1PLL_AB_CTRL1,
- 14, 0, &sys1pll_ctrl1_lock);
- BUG_ON(!clk);
-
- clk = clk_register_gate(NULL, "sys2pll_clk1", "sys2pll_div1",
- CLK_SET_RATE_PARENT, sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS2PLL_AB_CTRL1,
- 12, 0, &sys2pll_ctrl1_lock);
- BUG_ON(!clk);
- clk = clk_register_gate(NULL, "sys2pll_clk2", "sys2pll_div2",
- CLK_SET_RATE_PARENT, sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS2PLL_AB_CTRL1,
- 13, 0, &sys2pll_ctrl1_lock);
- BUG_ON(!clk);
- clk = clk_register_gate(NULL, "sys2pll_clk3", "sys2pll_div3",
- CLK_SET_RATE_PARENT, sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS2PLL_AB_CTRL1,
- 14, 0, &sys2pll_ctrl1_lock);
- BUG_ON(!clk);
-
- clk = clk_register_gate(NULL, "sys3pll_clk1", "sys3pll_div1",
- CLK_SET_RATE_PARENT, sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS3PLL_AB_CTRL1,
- 12, 0, &sys3pll_ctrl1_lock);
- BUG_ON(!clk);
- clk = clk_register_gate(NULL, "sys3pll_clk2", "sys3pll_div2",
- CLK_SET_RATE_PARENT, sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS3PLL_AB_CTRL1,
- 13, 0, &sys3pll_ctrl1_lock);
- BUG_ON(!clk);
- clk = clk_register_gate(NULL, "sys3pll_clk3", "sys3pll_div3",
- CLK_SET_RATE_PARENT, sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS3PLL_AB_CTRL1,
- 14, 0, &sys3pll_ctrl1_lock);
- BUG_ON(!clk);
-
- clk = clk_register(NULL, &clk_audio_dto.hw);
- BUG_ON(!clk);
-
- clk = clk_register(NULL, &clk_disp0_dto.hw);
- BUG_ON(!clk);
-
- clk = clk_register(NULL, &clk_disp1_dto.hw);
- BUG_ON(!clk);
-
- for (i = 0; i < ARRAY_SIZE(divider_list); i++) {
- div = &divider_list[i];
- clk = clk_register_divider(NULL, div->div_name,
- div->parent_name, div->divider_flags, sirfsoc_clk_vbase + div->div_offset,
- div->shift, div->width, 0, div->lock);
- BUG_ON(!clk);
- clk = clk_register_gate(NULL, div->gate_name, div->div_name,
- div->gate_flags, sirfsoc_clk_vbase + div->gate_offset,
- div->gate_bit, 0, div->lock);
- BUG_ON(!clk);
- }
- /* ignore selector status register check */
- for (i = 0; i < ARRAY_SIZE(mux_list); i++) {
- mux = &mux_list[i];
- clk = clk_register_mux(NULL, mux->mux_name, mux->parent_names,
- mux->parent_num, mux->flags,
- sirfsoc_clk_vbase + mux->mux_offset,
- mux->shift, mux->width,
- mux->mux_flags, NULL);
- atlas7_clks[ARRAY_SIZE(unit_list) + i] = clk;
- BUG_ON(!clk);
- }
-
- for (i = 0; i < ARRAY_SIZE(unit_list); i++) {
- unit = &unit_list[i];
- atlas7_clks[i] = atlas7_unit_clk_register(NULL, unit->unit_name, unit->parent_name,
- unit->flags, unit->regofs, unit->bit, unit->type, unit->idle_bit, unit->lock);
- BUG_ON(!atlas7_clks[i]);
- }
-
- clk_data.clks = atlas7_clks;
- clk_data.clk_num = ARRAY_SIZE(unit_list) + ARRAY_SIZE(mux_list);
-
- ret = of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
- BUG_ON(ret);
-
- atlas7_rst_ctlr.of_node = np;
- atlas7_rst_ctlr.nr_resets = ARRAY_SIZE(atlas7_reset_unit);
- reset_controller_register(&atlas7_rst_ctlr);
-}
-CLK_OF_DECLARE(atlas7_clk, "sirf,atlas7-car", atlas7_clk_init);
diff --git a/drivers/clk/sirf/clk-common.c b/drivers/clk/sirf/clk-common.c
deleted file mode 100644
index dcf4e25a0216..000000000000
--- a/drivers/clk/sirf/clk-common.c
+++ /dev/null
@@ -1,1037 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * common clks module for all SiRF SoCs
- *
- * Copyright (c) 2011 - 2014 Cambridge Silicon Radio Limited, a CSR plc group
- * company.
- */
-
-#include <linux/clk.h>
-
-#define KHZ 1000
-#define MHZ (KHZ * KHZ)
-
-static void __iomem *sirfsoc_clk_vbase;
-static void __iomem *sirfsoc_rsc_vbase;
-static struct clk_onecell_data clk_data;
-
-/*
- * SiRFprimaII clock controller
- * - 2 oscillators: osc-26MHz, rtc-32.768KHz
- * - 3 standard configurable plls: pll1, pll2 & pll3
- * - 2 exclusive plls: usb phy pll and sata phy pll
- * - 8 clock domains: cpu/cpudiv, mem/memdiv, sys/io, dsp, graphic, multimedia,
- * display and sdphy.
- * Each clock domain can select its own clock source from five clock sources,
- * X_XIN, X_XINW, PLL1, PLL2 and PLL3. The domain clock is used as the source
- * clock of the group clock.
- * - dsp domain: gps, mf
- * - io domain: dmac, nand, audio, uart, i2c, spi, usp, pwm, pulse
- * - sys domain: security
- */
-
-struct clk_pll {
- struct clk_hw hw;
- unsigned short regofs; /* register offset */
-};
-
-#define to_pllclk(_hw) container_of(_hw, struct clk_pll, hw)
-
-struct clk_dmn {
- struct clk_hw hw;
- signed char enable_bit; /* enable bit: 0 ~ 63 */
- unsigned short regofs; /* register offset */
-};
-
-#define to_dmnclk(_hw) container_of(_hw, struct clk_dmn, hw)
-
-struct clk_std {
- struct clk_hw hw;
- signed char enable_bit; /* enable bit: 0 ~ 63 */
-};
-
-#define to_stdclk(_hw) container_of(_hw, struct clk_std, hw)
-
-static int std_clk_is_enabled(struct clk_hw *hw);
-static int std_clk_enable(struct clk_hw *hw);
-static void std_clk_disable(struct clk_hw *hw);
-
-static inline unsigned long clkc_readl(unsigned reg)
-{
- return readl(sirfsoc_clk_vbase + reg);
-}
-
-static inline void clkc_writel(u32 val, unsigned reg)
-{
- writel(val, sirfsoc_clk_vbase + reg);
-}
-
-/*
- * std pll
- */
-
-static unsigned long pll_clk_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- unsigned long fin = parent_rate;
- struct clk_pll *clk = to_pllclk(hw);
- u32 regcfg2 = clk->regofs + SIRFSOC_CLKC_PLL1_CFG2 -
- SIRFSOC_CLKC_PLL1_CFG0;
-
- if (clkc_readl(regcfg2) & BIT(2)) {
- /* pll bypass mode */
- return fin;
- } else {
- /* fout = fin * nf / nr / od */
- u32 cfg0 = clkc_readl(clk->regofs);
- u32 nf = (cfg0 & (BIT(13) - 1)) + 1;
- u32 nr = ((cfg0 >> 13) & (BIT(6) - 1)) + 1;
- u32 od = ((cfg0 >> 19) & (BIT(4) - 1)) + 1;
- WARN_ON(fin % MHZ);
- return fin / MHZ * nf / nr / od * MHZ;
- }
-}
-
-static long pll_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
-{
- unsigned long fin, nf, nr, od;
- u64 dividend;
-
- /*
- * fout = fin * nf / (nr * od);
- * set od = 1, nr = fin/MHz, so fout = nf * MHz
- */
- rate = rate - rate % MHZ;
-
- nf = rate / MHZ;
- if (nf > BIT(13))
- nf = BIT(13);
- if (nf < 1)
- nf = 1;
-
- fin = *parent_rate;
-
- nr = fin / MHZ;
- if (nr > BIT(6))
- nr = BIT(6);
- od = 1;
-
- dividend = (u64)fin * nf;
- do_div(dividend, nr * od);
-
- return (long)dividend;
-}
-
-static int pll_clk_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
-{
- struct clk_pll *clk = to_pllclk(hw);
- unsigned long fin, nf, nr, od, reg;
-
- /*
- * fout = fin * nf / (nr * od);
- * set od = 1, nr = fin/MHz, so fout = nf * MHz
- */
-
- nf = rate / MHZ;
- if (unlikely((rate % MHZ) || nf > BIT(13) || nf < 1))
- return -EINVAL;
-
- fin = parent_rate;
- BUG_ON(fin < MHZ);
-
- nr = fin / MHZ;
- BUG_ON((fin % MHZ) || nr > BIT(6));
-
- od = 1;
-
- reg = (nf - 1) | ((nr - 1) << 13) | ((od - 1) << 19);
- clkc_writel(reg, clk->regofs);
-
- reg = clk->regofs + SIRFSOC_CLKC_PLL1_CFG1 - SIRFSOC_CLKC_PLL1_CFG0;
- clkc_writel((nf >> 1) - 1, reg);
-
- reg = clk->regofs + SIRFSOC_CLKC_PLL1_CFG2 - SIRFSOC_CLKC_PLL1_CFG0;
- while (!(clkc_readl(reg) & BIT(6)))
- cpu_relax();
-
- return 0;
-}
-
-static long cpu_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
-{
- /*
- * SiRF SoC has not cpu clock control,
- * So bypass to it's parent pll.
- */
- struct clk_hw *parent_clk = clk_hw_get_parent(hw);
- struct clk_hw *pll_parent_clk = clk_hw_get_parent(parent_clk);
- unsigned long pll_parent_rate = clk_hw_get_rate(pll_parent_clk);
- return pll_clk_round_rate(parent_clk, rate, &pll_parent_rate);
-}
-
-static unsigned long cpu_clk_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- /*
- * SiRF SoC has not cpu clock control,
- * So return the parent pll rate.
- */
- struct clk_hw *parent_clk = clk_hw_get_parent(hw);
- return clk_hw_get_rate(parent_clk);
-}
-
-static const struct clk_ops std_pll_ops = {
- .recalc_rate = pll_clk_recalc_rate,
- .round_rate = pll_clk_round_rate,
- .set_rate = pll_clk_set_rate,
-};
-
-static const char * const pll_clk_parents[] = {
- "osc",
-};
-
-static const struct clk_init_data clk_pll1_init = {
- .name = "pll1",
- .ops = &std_pll_ops,
- .parent_names = pll_clk_parents,
- .num_parents = ARRAY_SIZE(pll_clk_parents),
-};
-
-static const struct clk_init_data clk_pll2_init = {
- .name = "pll2",
- .ops = &std_pll_ops,
- .parent_names = pll_clk_parents,
- .num_parents = ARRAY_SIZE(pll_clk_parents),
-};
-
-static const struct clk_init_data clk_pll3_init = {
- .name = "pll3",
- .ops = &std_pll_ops,
- .parent_names = pll_clk_parents,
- .num_parents = ARRAY_SIZE(pll_clk_parents),
-};
-
-static struct clk_pll clk_pll1 = {
- .regofs = SIRFSOC_CLKC_PLL1_CFG0,
- .hw = {
- .init = &clk_pll1_init,
- },
-};
-
-static struct clk_pll clk_pll2 = {
- .regofs = SIRFSOC_CLKC_PLL2_CFG0,
- .hw = {
- .init = &clk_pll2_init,
- },
-};
-
-static struct clk_pll clk_pll3 = {
- .regofs = SIRFSOC_CLKC_PLL3_CFG0,
- .hw = {
- .init = &clk_pll3_init,
- },
-};
-
-/*
- * usb uses specified pll
- */
-
-static int usb_pll_clk_enable(struct clk_hw *hw)
-{
- u32 reg = readl(sirfsoc_rsc_vbase + SIRFSOC_USBPHY_PLL_CTRL);
- reg &= ~(SIRFSOC_USBPHY_PLL_POWERDOWN | SIRFSOC_USBPHY_PLL_BYPASS);
- writel(reg, sirfsoc_rsc_vbase + SIRFSOC_USBPHY_PLL_CTRL);
- while (!(readl(sirfsoc_rsc_vbase + SIRFSOC_USBPHY_PLL_CTRL) &
- SIRFSOC_USBPHY_PLL_LOCK))
- cpu_relax();
-
- return 0;
-}
-
-static void usb_pll_clk_disable(struct clk_hw *clk)
-{
- u32 reg = readl(sirfsoc_rsc_vbase + SIRFSOC_USBPHY_PLL_CTRL);
- reg |= (SIRFSOC_USBPHY_PLL_POWERDOWN | SIRFSOC_USBPHY_PLL_BYPASS);
- writel(reg, sirfsoc_rsc_vbase + SIRFSOC_USBPHY_PLL_CTRL);
-}
-
-static unsigned long usb_pll_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
-{
- u32 reg = readl(sirfsoc_rsc_vbase + SIRFSOC_USBPHY_PLL_CTRL);
- return (reg & SIRFSOC_USBPHY_PLL_BYPASS) ? parent_rate : 48*MHZ;
-}
-
-static const struct clk_ops usb_pll_ops = {
- .enable = usb_pll_clk_enable,
- .disable = usb_pll_clk_disable,
- .recalc_rate = usb_pll_clk_recalc_rate,
-};
-
-static const struct clk_init_data clk_usb_pll_init = {
- .name = "usb_pll",
- .ops = &usb_pll_ops,
- .parent_names = pll_clk_parents,
- .num_parents = ARRAY_SIZE(pll_clk_parents),
-};
-
-static struct clk_hw usb_pll_clk_hw = {
- .init = &clk_usb_pll_init,
-};
-
-/*
- * clock domains - cpu, mem, sys/io, dsp, gfx
- */
-
-static const char * const dmn_clk_parents[] = {
- "rtc",
- "osc",
- "pll1",
- "pll2",
- "pll3",
-};
-
-static u8 dmn_clk_get_parent(struct clk_hw *hw)
-{
- struct clk_dmn *clk = to_dmnclk(hw);
- u32 cfg = clkc_readl(clk->regofs);
- const char *name = clk_hw_get_name(hw);
-
- /* parent of io domain can only be pll3 */
- if (strcmp(name, "io") == 0)
- return 4;
-
- WARN_ON((cfg & (BIT(3) - 1)) > 4);
-
- return cfg & (BIT(3) - 1);
-}
-
-static int dmn_clk_set_parent(struct clk_hw *hw, u8 parent)
-{
- struct clk_dmn *clk = to_dmnclk(hw);
- u32 cfg = clkc_readl(clk->regofs);
- const char *name = clk_hw_get_name(hw);
-
- /* parent of io domain can only be pll3 */
- if (strcmp(name, "io") == 0)
- return -EINVAL;
-
- cfg &= ~(BIT(3) - 1);
- clkc_writel(cfg | parent, clk->regofs);
- /* BIT(3) - switching status: 1 - busy, 0 - done */
- while (clkc_readl(clk->regofs) & BIT(3))
- cpu_relax();
-
- return 0;
-}
-
-static unsigned long dmn_clk_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-
-{
- unsigned long fin = parent_rate;
- struct clk_dmn *clk = to_dmnclk(hw);
-
- u32 cfg = clkc_readl(clk->regofs);
-
- if (cfg & BIT(24)) {
- /* fcd bypass mode */
- return fin;
- } else {
- /*
- * wait count: bit[19:16], hold count: bit[23:20]
- */
- u32 wait = (cfg >> 16) & (BIT(4) - 1);
- u32 hold = (cfg >> 20) & (BIT(4) - 1);
-
- return fin / (wait + hold + 2);
- }
-}
-
-static long dmn_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
-{
- unsigned long fin;
- unsigned ratio, wait, hold;
- const char *name = clk_hw_get_name(hw);
- unsigned bits = (strcmp(name, "mem") == 0) ? 3 : 4;
-
- fin = *parent_rate;
- ratio = fin / rate;
-
- if (ratio < 2)
- ratio = 2;
- if (ratio > BIT(bits + 1))
- ratio = BIT(bits + 1);
-
- wait = (ratio >> 1) - 1;
- hold = ratio - wait - 2;
-
- return fin / (wait + hold + 2);
-}
-
-static int dmn_clk_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
-{
- struct clk_dmn *clk = to_dmnclk(hw);
- unsigned long fin;
- unsigned ratio, wait, hold, reg;
- const char *name = clk_hw_get_name(hw);
- unsigned bits = (strcmp(name, "mem") == 0) ? 3 : 4;
-
- fin = parent_rate;
- ratio = fin / rate;
-
- if (unlikely(ratio < 2 || ratio > BIT(bits + 1)))
- return -EINVAL;
-
- WARN_ON(fin % rate);
-
- wait = (ratio >> 1) - 1;
- hold = ratio - wait - 2;
-
- reg = clkc_readl(clk->regofs);
- reg &= ~(((BIT(bits) - 1) << 16) | ((BIT(bits) - 1) << 20));
- reg |= (wait << 16) | (hold << 20) | BIT(25);
- clkc_writel(reg, clk->regofs);
-
- /* waiting FCD been effective */
- while (clkc_readl(clk->regofs) & BIT(25))
- cpu_relax();
-
- return 0;
-}
-
-static int cpu_clk_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
-{
- int ret1, ret2;
- struct clk *cur_parent;
-
- if (rate == clk_get_rate(clk_pll1.hw.clk)) {
- ret1 = clk_set_parent(hw->clk, clk_pll1.hw.clk);
- return ret1;
- }
-
- if (rate == clk_get_rate(clk_pll2.hw.clk)) {
- ret1 = clk_set_parent(hw->clk, clk_pll2.hw.clk);
- return ret1;
- }
-
- if (rate == clk_get_rate(clk_pll3.hw.clk)) {
- ret1 = clk_set_parent(hw->clk, clk_pll3.hw.clk);
- return ret1;
- }
-
- cur_parent = clk_get_parent(hw->clk);
-
- /* switch to tmp pll before setting parent clock's rate */
- if (cur_parent == clk_pll1.hw.clk) {
- ret1 = clk_set_parent(hw->clk, clk_pll2.hw.clk);
- BUG_ON(ret1);
- }
-
- ret2 = clk_set_rate(clk_pll1.hw.clk, rate);
-
- ret1 = clk_set_parent(hw->clk, clk_pll1.hw.clk);
-
- return ret2 ? ret2 : ret1;
-}
-
-static const struct clk_ops msi_ops = {
- .set_rate = dmn_clk_set_rate,
- .round_rate = dmn_clk_round_rate,
- .recalc_rate = dmn_clk_recalc_rate,
- .set_parent = dmn_clk_set_parent,
- .get_parent = dmn_clk_get_parent,
-};
-
-static const struct clk_init_data clk_mem_init = {
- .name = "mem",
- .ops = &msi_ops,
- .parent_names = dmn_clk_parents,
- .num_parents = ARRAY_SIZE(dmn_clk_parents),
-};
-
-static struct clk_dmn clk_mem = {
- .regofs = SIRFSOC_CLKC_MEM_CFG,
- .hw = {
- .init = &clk_mem_init,
- },
-};
-
-static const struct clk_init_data clk_sys_init = {
- .name = "sys",
- .ops = &msi_ops,
- .parent_names = dmn_clk_parents,
- .num_parents = ARRAY_SIZE(dmn_clk_parents),
- .flags = CLK_SET_RATE_GATE,
-};
-
-static struct clk_dmn clk_sys = {
- .regofs = SIRFSOC_CLKC_SYS_CFG,
- .hw = {
- .init = &clk_sys_init,
- },
-};
-
-static const struct clk_init_data clk_io_init = {
- .name = "io",
- .ops = &msi_ops,
- .parent_names = dmn_clk_parents,
- .num_parents = ARRAY_SIZE(dmn_clk_parents),
-};
-
-static struct clk_dmn clk_io = {
- .regofs = SIRFSOC_CLKC_IO_CFG,
- .hw = {
- .init = &clk_io_init,
- },
-};
-
-static const struct clk_ops cpu_ops = {
- .set_parent = dmn_clk_set_parent,
- .get_parent = dmn_clk_get_parent,
- .set_rate = cpu_clk_set_rate,
- .round_rate = cpu_clk_round_rate,
- .recalc_rate = cpu_clk_recalc_rate,
-};
-
-static const struct clk_init_data clk_cpu_init = {
- .name = "cpu",
- .ops = &cpu_ops,
- .parent_names = dmn_clk_parents,
- .num_parents = ARRAY_SIZE(dmn_clk_parents),
- .flags = CLK_SET_RATE_PARENT,
-};
-
-static struct clk_dmn clk_cpu = {
- .regofs = SIRFSOC_CLKC_CPU_CFG,
- .hw = {
- .init = &clk_cpu_init,
- },
-};
-
-static const struct clk_ops dmn_ops = {
- .is_enabled = std_clk_is_enabled,
- .enable = std_clk_enable,
- .disable = std_clk_disable,
- .set_rate = dmn_clk_set_rate,
- .round_rate = dmn_clk_round_rate,
- .recalc_rate = dmn_clk_recalc_rate,
- .set_parent = dmn_clk_set_parent,
- .get_parent = dmn_clk_get_parent,
-};
-
-/* dsp, gfx, mm, lcd and vpp domain */
-
-static const struct clk_init_data clk_dsp_init = {
- .name = "dsp",
- .ops = &dmn_ops,
- .parent_names = dmn_clk_parents,
- .num_parents = ARRAY_SIZE(dmn_clk_parents),
-};
-
-static struct clk_dmn clk_dsp = {
- .regofs = SIRFSOC_CLKC_DSP_CFG,
- .enable_bit = 0,
- .hw = {
- .init = &clk_dsp_init,
- },
-};
-
-static const struct clk_init_data clk_gfx_init = {
- .name = "gfx",
- .ops = &dmn_ops,
- .parent_names = dmn_clk_parents,
- .num_parents = ARRAY_SIZE(dmn_clk_parents),
-};
-
-static struct clk_dmn clk_gfx = {
- .regofs = SIRFSOC_CLKC_GFX_CFG,
- .enable_bit = 8,
- .hw = {
- .init = &clk_gfx_init,
- },
-};
-
-static const struct clk_init_data clk_mm_init = {
- .name = "mm",
- .ops = &dmn_ops,
- .parent_names = dmn_clk_parents,
- .num_parents = ARRAY_SIZE(dmn_clk_parents),
-};
-
-static struct clk_dmn clk_mm = {
- .regofs = SIRFSOC_CLKC_MM_CFG,
- .enable_bit = 9,
- .hw = {
- .init = &clk_mm_init,
- },
-};
-
-/*
- * for atlas6, gfx2d holds the bit of prima2's clk_mm
- */
-#define clk_gfx2d clk_mm
-
-static const struct clk_init_data clk_lcd_init = {
- .name = "lcd",
- .ops = &dmn_ops,
- .parent_names = dmn_clk_parents,
- .num_parents = ARRAY_SIZE(dmn_clk_parents),
-};
-
-static struct clk_dmn clk_lcd = {
- .regofs = SIRFSOC_CLKC_LCD_CFG,
- .enable_bit = 10,
- .hw = {
- .init = &clk_lcd_init,
- },
-};
-
-static const struct clk_init_data clk_vpp_init = {
- .name = "vpp",
- .ops = &dmn_ops,
- .parent_names = dmn_clk_parents,
- .num_parents = ARRAY_SIZE(dmn_clk_parents),
-};
-
-static struct clk_dmn clk_vpp = {
- .regofs = SIRFSOC_CLKC_LCD_CFG,
- .enable_bit = 11,
- .hw = {
- .init = &clk_vpp_init,
- },
-};
-
-static const struct clk_init_data clk_mmc01_init = {
- .name = "mmc01",
- .ops = &dmn_ops,
- .parent_names = dmn_clk_parents,
- .num_parents = ARRAY_SIZE(dmn_clk_parents),
-};
-
-static const struct clk_init_data clk_mmc23_init = {
- .name = "mmc23",
- .ops = &dmn_ops,
- .parent_names = dmn_clk_parents,
- .num_parents = ARRAY_SIZE(dmn_clk_parents),
-};
-
-static const struct clk_init_data clk_mmc45_init = {
- .name = "mmc45",
- .ops = &dmn_ops,
- .parent_names = dmn_clk_parents,
- .num_parents = ARRAY_SIZE(dmn_clk_parents),
-};
-
-/*
- * peripheral controllers in io domain
- */
-
-static int std_clk_is_enabled(struct clk_hw *hw)
-{
- u32 reg;
- int bit;
- struct clk_std *clk = to_stdclk(hw);
-
- bit = clk->enable_bit % 32;
- reg = clk->enable_bit / 32;
- reg = SIRFSOC_CLKC_CLK_EN0 + reg * sizeof(reg);
-
- return !!(clkc_readl(reg) & BIT(bit));
-}
-
-static int std_clk_enable(struct clk_hw *hw)
-{
- u32 val, reg;
- int bit;
- struct clk_std *clk = to_stdclk(hw);
-
- BUG_ON(clk->enable_bit < 0 || clk->enable_bit > 63);
-
- bit = clk->enable_bit % 32;
- reg = clk->enable_bit / 32;
- reg = SIRFSOC_CLKC_CLK_EN0 + reg * sizeof(reg);
-
- val = clkc_readl(reg) | BIT(bit);
- clkc_writel(val, reg);
- return 0;
-}
-
-static void std_clk_disable(struct clk_hw *hw)
-{
- u32 val, reg;
- int bit;
- struct clk_std *clk = to_stdclk(hw);
-
- BUG_ON(clk->enable_bit < 0 || clk->enable_bit > 63);
-
- bit = clk->enable_bit % 32;
- reg = clk->enable_bit / 32;
- reg = SIRFSOC_CLKC_CLK_EN0 + reg * sizeof(reg);
-
- val = clkc_readl(reg) & ~BIT(bit);
- clkc_writel(val, reg);
-}
-
-static const char * const std_clk_io_parents[] = {
- "io",
-};
-
-static const struct clk_ops ios_ops = {
- .is_enabled = std_clk_is_enabled,
- .enable = std_clk_enable,
- .disable = std_clk_disable,
-};
-
-static const struct clk_init_data clk_cphif_init = {
- .name = "cphif",
- .ops = &ios_ops,
- .parent_names = std_clk_io_parents,
- .num_parents = ARRAY_SIZE(std_clk_io_parents),
-};
-
-static struct clk_std clk_cphif = {
- .enable_bit = 20,
- .hw = {
- .init = &clk_cphif_init,
- },
-};
-
-static const struct clk_init_data clk_dmac0_init = {
- .name = "dmac0",
- .ops = &ios_ops,
- .parent_names = std_clk_io_parents,
- .num_parents = ARRAY_SIZE(std_clk_io_parents),
-};
-
-static struct clk_std clk_dmac0 = {
- .enable_bit = 32,
- .hw = {
- .init = &clk_dmac0_init,
- },
-};
-
-static const struct clk_init_data clk_dmac1_init = {
- .name = "dmac1",
- .ops = &ios_ops,
- .parent_names = std_clk_io_parents,
- .num_parents = ARRAY_SIZE(std_clk_io_parents),
-};
-
-static struct clk_std clk_dmac1 = {
- .enable_bit = 33,
- .hw = {
- .init = &clk_dmac1_init,
- },
-};
-
-static const struct clk_init_data clk_audio_init = {
- .name = "audio",
- .ops = &ios_ops,
- .parent_names = std_clk_io_parents,
- .num_parents = ARRAY_SIZE(std_clk_io_parents),
-};
-
-static struct clk_std clk_audio = {
- .enable_bit = 35,
- .hw = {
- .init = &clk_audio_init,
- },
-};
-
-static const struct clk_init_data clk_uart0_init = {
- .name = "uart0",
- .ops = &ios_ops,
- .parent_names = std_clk_io_parents,
- .num_parents = ARRAY_SIZE(std_clk_io_parents),
-};
-
-static struct clk_std clk_uart0 = {
- .enable_bit = 36,
- .hw = {
- .init = &clk_uart0_init,
- },
-};
-
-static const struct clk_init_data clk_uart1_init = {
- .name = "uart1",
- .ops = &ios_ops,
- .parent_names = std_clk_io_parents,
- .num_parents = ARRAY_SIZE(std_clk_io_parents),
-};
-
-static struct clk_std clk_uart1 = {
- .enable_bit = 37,
- .hw = {
- .init = &clk_uart1_init,
- },
-};
-
-static const struct clk_init_data clk_uart2_init = {
- .name = "uart2",
- .ops = &ios_ops,
- .parent_names = std_clk_io_parents,
- .num_parents = ARRAY_SIZE(std_clk_io_parents),
-};
-
-static struct clk_std clk_uart2 = {
- .enable_bit = 38,
- .hw = {
- .init = &clk_uart2_init,
- },
-};
-
-static const struct clk_init_data clk_usp0_init = {
- .name = "usp0",
- .ops = &ios_ops,
- .parent_names = std_clk_io_parents,
- .num_parents = ARRAY_SIZE(std_clk_io_parents),
-};
-
-static struct clk_std clk_usp0 = {
- .enable_bit = 39,
- .hw = {
- .init = &clk_usp0_init,
- },
-};
-
-static const struct clk_init_data clk_usp1_init = {
- .name = "usp1",
- .ops = &ios_ops,
- .parent_names = std_clk_io_parents,
- .num_parents = ARRAY_SIZE(std_clk_io_parents),
-};
-
-static struct clk_std clk_usp1 = {
- .enable_bit = 40,
- .hw = {
- .init = &clk_usp1_init,
- },
-};
-
-static const struct clk_init_data clk_usp2_init = {
- .name = "usp2",
- .ops = &ios_ops,
- .parent_names = std_clk_io_parents,
- .num_parents = ARRAY_SIZE(std_clk_io_parents),
-};
-
-static struct clk_std clk_usp2 = {
- .enable_bit = 41,
- .hw = {
- .init = &clk_usp2_init,
- },
-};
-
-static const struct clk_init_data clk_vip_init = {
- .name = "vip",
- .ops = &ios_ops,
- .parent_names = std_clk_io_parents,
- .num_parents = ARRAY_SIZE(std_clk_io_parents),
-};
-
-static struct clk_std clk_vip = {
- .enable_bit = 42,
- .hw = {
- .init = &clk_vip_init,
- },
-};
-
-static const struct clk_init_data clk_spi0_init = {
- .name = "spi0",
- .ops = &ios_ops,
- .parent_names = std_clk_io_parents,
- .num_parents = ARRAY_SIZE(std_clk_io_parents),
-};
-
-static struct clk_std clk_spi0 = {
- .enable_bit = 43,
- .hw = {
- .init = &clk_spi0_init,
- },
-};
-
-static const struct clk_init_data clk_spi1_init = {
- .name = "spi1",
- .ops = &ios_ops,
- .parent_names = std_clk_io_parents,
- .num_parents = ARRAY_SIZE(std_clk_io_parents),
-};
-
-static struct clk_std clk_spi1 = {
- .enable_bit = 44,
- .hw = {
- .init = &clk_spi1_init,
- },
-};
-
-static const struct clk_init_data clk_tsc_init = {
- .name = "tsc",
- .ops = &ios_ops,
- .parent_names = std_clk_io_parents,
- .num_parents = ARRAY_SIZE(std_clk_io_parents),
-};
-
-static struct clk_std clk_tsc = {
- .enable_bit = 45,
- .hw = {
- .init = &clk_tsc_init,
- },
-};
-
-static const struct clk_init_data clk_i2c0_init = {
- .name = "i2c0",
- .ops = &ios_ops,
- .parent_names = std_clk_io_parents,
- .num_parents = ARRAY_SIZE(std_clk_io_parents),
-};
-
-static struct clk_std clk_i2c0 = {
- .enable_bit = 46,
- .hw = {
- .init = &clk_i2c0_init,
- },
-};
-
-static const struct clk_init_data clk_i2c1_init = {
- .name = "i2c1",
- .ops = &ios_ops,
- .parent_names = std_clk_io_parents,
- .num_parents = ARRAY_SIZE(std_clk_io_parents),
-};
-
-static struct clk_std clk_i2c1 = {
- .enable_bit = 47,
- .hw = {
- .init = &clk_i2c1_init,
- },
-};
-
-static const struct clk_init_data clk_pwmc_init = {
- .name = "pwmc",
- .ops = &ios_ops,
- .parent_names = std_clk_io_parents,
- .num_parents = ARRAY_SIZE(std_clk_io_parents),
-};
-
-static struct clk_std clk_pwmc = {
- .enable_bit = 48,
- .hw = {
- .init = &clk_pwmc_init,
- },
-};
-
-static const struct clk_init_data clk_efuse_init = {
- .name = "efuse",
- .ops = &ios_ops,
- .parent_names = std_clk_io_parents,
- .num_parents = ARRAY_SIZE(std_clk_io_parents),
-};
-
-static struct clk_std clk_efuse = {
- .enable_bit = 49,
- .hw = {
- .init = &clk_efuse_init,
- },
-};
-
-static const struct clk_init_data clk_pulse_init = {
- .name = "pulse",
- .ops = &ios_ops,
- .parent_names = std_clk_io_parents,
- .num_parents = ARRAY_SIZE(std_clk_io_parents),
-};
-
-static struct clk_std clk_pulse = {
- .enable_bit = 50,
- .hw = {
- .init = &clk_pulse_init,
- },
-};
-
-static const char * const std_clk_dsp_parents[] = {
- "dsp",
-};
-
-static const struct clk_init_data clk_gps_init = {
- .name = "gps",
- .ops = &ios_ops,
- .parent_names = std_clk_dsp_parents,
- .num_parents = ARRAY_SIZE(std_clk_dsp_parents),
-};
-
-static struct clk_std clk_gps = {
- .enable_bit = 1,
- .hw = {
- .init = &clk_gps_init,
- },
-};
-
-static const struct clk_init_data clk_mf_init = {
- .name = "mf",
- .ops = &ios_ops,
- .parent_names = std_clk_io_parents,
- .num_parents = ARRAY_SIZE(std_clk_io_parents),
-};
-
-static struct clk_std clk_mf = {
- .enable_bit = 2,
- .hw = {
- .init = &clk_mf_init,
- },
-};
-
-static const char * const std_clk_sys_parents[] = {
- "sys",
-};
-
-static const struct clk_init_data clk_security_init = {
- .name = "security",
- .ops = &ios_ops,
- .parent_names = std_clk_sys_parents,
- .num_parents = ARRAY_SIZE(std_clk_sys_parents),
-};
-
-static struct clk_std clk_security = {
- .enable_bit = 19,
- .hw = {
- .init = &clk_security_init,
- },
-};
-
-static const char * const std_clk_usb_parents[] = {
- "usb_pll",
-};
-
-static const struct clk_init_data clk_usb0_init = {
- .name = "usb0",
- .ops = &ios_ops,
- .parent_names = std_clk_usb_parents,
- .num_parents = ARRAY_SIZE(std_clk_usb_parents),
-};
-
-static struct clk_std clk_usb0 = {
- .enable_bit = 16,
- .hw = {
- .init = &clk_usb0_init,
- },
-};
-
-static const struct clk_init_data clk_usb1_init = {
- .name = "usb1",
- .ops = &ios_ops,
- .parent_names = std_clk_usb_parents,
- .num_parents = ARRAY_SIZE(std_clk_usb_parents),
-};
-
-static struct clk_std clk_usb1 = {
- .enable_bit = 17,
- .hw = {
- .init = &clk_usb1_init,
- },
-};
diff --git a/drivers/clk/sirf/clk-prima2.c b/drivers/clk/sirf/clk-prima2.c
deleted file mode 100644
index d17b345f4d2d..000000000000
--- a/drivers/clk/sirf/clk-prima2.c
+++ /dev/null
@@ -1,149 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Clock tree for CSR SiRFprimaII
- *
- * Copyright (c) 2011 - 2014 Cambridge Silicon Radio Limited, a CSR plc group
- * company.
- */
-
-#include <linux/module.h>
-#include <linux/bitops.h>
-#include <linux/io.h>
-#include <linux/clkdev.h>
-#include <linux/clk-provider.h>
-#include <linux/of_address.h>
-#include <linux/syscore_ops.h>
-
-#include "prima2.h"
-#include "clk-common.c"
-
-static struct clk_dmn clk_mmc01 = {
- .regofs = SIRFSOC_CLKC_MMC_CFG,
- .enable_bit = 59,
- .hw = {
- .init = &clk_mmc01_init,
- },
-};
-
-static struct clk_dmn clk_mmc23 = {
- .regofs = SIRFSOC_CLKC_MMC_CFG,
- .enable_bit = 60,
- .hw = {
- .init = &clk_mmc23_init,
- },
-};
-
-static struct clk_dmn clk_mmc45 = {
- .regofs = SIRFSOC_CLKC_MMC_CFG,
- .enable_bit = 61,
- .hw = {
- .init = &clk_mmc45_init,
- },
-};
-
-static const struct clk_init_data clk_nand_init = {
- .name = "nand",
- .ops = &ios_ops,
- .parent_names = std_clk_io_parents,
- .num_parents = ARRAY_SIZE(std_clk_io_parents),
-};
-
-static struct clk_std clk_nand = {
- .enable_bit = 34,
- .hw = {
- .init = &clk_nand_init,
- },
-};
-
-enum prima2_clk_index {
- /* 0 1 2 3 4 5 6 7 8 9 */
- rtc, osc, pll1, pll2, pll3, mem, sys, security, dsp, gps,
- mf, io, cpu, uart0, uart1, uart2, tsc, i2c0, i2c1, spi0,
- spi1, pwmc, efuse, pulse, dmac0, dmac1, nand, audio, usp0, usp1,
- usp2, vip, gfx, mm, lcd, vpp, mmc01, mmc23, mmc45, usbpll,
- usb0, usb1, cphif, maxclk,
-};
-
-static __initdata struct clk_hw *prima2_clk_hw_array[maxclk] = {
- NULL, /* dummy */
- NULL,
- &clk_pll1.hw,
- &clk_pll2.hw,
- &clk_pll3.hw,
- &clk_mem.hw,
- &clk_sys.hw,
- &clk_security.hw,
- &clk_dsp.hw,
- &clk_gps.hw,
- &clk_mf.hw,
- &clk_io.hw,
- &clk_cpu.hw,
- &clk_uart0.hw,
- &clk_uart1.hw,
- &clk_uart2.hw,
- &clk_tsc.hw,
- &clk_i2c0.hw,
- &clk_i2c1.hw,
- &clk_spi0.hw,
- &clk_spi1.hw,
- &clk_pwmc.hw,
- &clk_efuse.hw,
- &clk_pulse.hw,
- &clk_dmac0.hw,
- &clk_dmac1.hw,
- &clk_nand.hw,
- &clk_audio.hw,
- &clk_usp0.hw,
- &clk_usp1.hw,
- &clk_usp2.hw,
- &clk_vip.hw,
- &clk_gfx.hw,
- &clk_mm.hw,
- &clk_lcd.hw,
- &clk_vpp.hw,
- &clk_mmc01.hw,
- &clk_mmc23.hw,
- &clk_mmc45.hw,
- &usb_pll_clk_hw,
- &clk_usb0.hw,
- &clk_usb1.hw,
- &clk_cphif.hw,
-};
-
-static struct clk *prima2_clks[maxclk];
-
-static void __init prima2_clk_init(struct device_node *np)
-{
- struct device_node *rscnp;
- int i;
-
- rscnp = of_find_compatible_node(NULL, NULL, "sirf,prima2-rsc");
- sirfsoc_rsc_vbase = of_iomap(rscnp, 0);
- if (!sirfsoc_rsc_vbase)
- panic("unable to map rsc registers\n");
- of_node_put(rscnp);
-
- sirfsoc_clk_vbase = of_iomap(np, 0);
- if (!sirfsoc_clk_vbase)
- panic("unable to map clkc registers\n");
-
- /* These are always available (RTC and 26MHz OSC)*/
- prima2_clks[rtc] = clk_register_fixed_rate(NULL, "rtc", NULL, 0, 32768);
- prima2_clks[osc] = clk_register_fixed_rate(NULL, "osc", NULL, 0,
- 26000000);
-
- for (i = pll1; i < maxclk; i++) {
- prima2_clks[i] = clk_register(NULL, prima2_clk_hw_array[i]);
- BUG_ON(IS_ERR(prima2_clks[i]));
- }
- clk_register_clkdev(prima2_clks[cpu], NULL, "cpu");
- clk_register_clkdev(prima2_clks[io], NULL, "io");
- clk_register_clkdev(prima2_clks[mem], NULL, "mem");
- clk_register_clkdev(prima2_clks[mem], NULL, "osc");
-
- clk_data.clks = prima2_clks;
- clk_data.clk_num = maxclk;
-
- of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
-}
-CLK_OF_DECLARE(prima2_clk, "sirf,prima2-clkc", prima2_clk_init);
diff --git a/drivers/clk/sirf/prima2.h b/drivers/clk/sirf/prima2.h
deleted file mode 100644
index 2fb56941795d..000000000000
--- a/drivers/clk/sirf/prima2.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#define SIRFSOC_CLKC_CLK_EN0 0x0000
-#define SIRFSOC_CLKC_CLK_EN1 0x0004
-#define SIRFSOC_CLKC_REF_CFG 0x0014
-#define SIRFSOC_CLKC_CPU_CFG 0x0018
-#define SIRFSOC_CLKC_MEM_CFG 0x001c
-#define SIRFSOC_CLKC_SYS_CFG 0x0020
-#define SIRFSOC_CLKC_IO_CFG 0x0024
-#define SIRFSOC_CLKC_DSP_CFG 0x0028
-#define SIRFSOC_CLKC_GFX_CFG 0x002c
-#define SIRFSOC_CLKC_MM_CFG 0x0030
-#define SIRFSOC_CLKC_LCD_CFG 0x0034
-#define SIRFSOC_CLKC_MMC_CFG 0x0038
-#define SIRFSOC_CLKC_PLL1_CFG0 0x0040
-#define SIRFSOC_CLKC_PLL2_CFG0 0x0044
-#define SIRFSOC_CLKC_PLL3_CFG0 0x0048
-#define SIRFSOC_CLKC_PLL1_CFG1 0x004c
-#define SIRFSOC_CLKC_PLL2_CFG1 0x0050
-#define SIRFSOC_CLKC_PLL3_CFG1 0x0054
-#define SIRFSOC_CLKC_PLL1_CFG2 0x0058
-#define SIRFSOC_CLKC_PLL2_CFG2 0x005c
-#define SIRFSOC_CLKC_PLL3_CFG2 0x0060
-#define SIRFSOC_USBPHY_PLL_CTRL 0x0008
-#define SIRFSOC_USBPHY_PLL_POWERDOWN BIT(1)
-#define SIRFSOC_USBPHY_PLL_BYPASS BIT(2)
-#define SIRFSOC_USBPHY_PLL_LOCK BIT(3)
diff --git a/drivers/clk/socfpga/clk-agilex.c b/drivers/clk/socfpga/clk-agilex.c
index bb3e80928ebe..7689bdd0a914 100644
--- a/drivers/clk/socfpga/clk-agilex.c
+++ b/drivers/clk/socfpga/clk-agilex.c
@@ -196,6 +196,17 @@ static const struct stratix10_pll_clock agilex_pll_clks[] = {
0, 0x9c},
};
+static const struct n5x_perip_c_clock n5x_main_perip_c_clks[] = {
+ { AGILEX_MAIN_PLL_C0_CLK, "main_pll_c0", "main_pll", NULL, 1, 0, 0x54, 0},
+ { AGILEX_MAIN_PLL_C1_CLK, "main_pll_c1", "main_pll", NULL, 1, 0, 0x54, 8},
+ { AGILEX_MAIN_PLL_C2_CLK, "main_pll_c2", "main_pll", NULL, 1, 0, 0x54, 16},
+ { AGILEX_MAIN_PLL_C3_CLK, "main_pll_c3", "main_pll", NULL, 1, 0, 0x54, 24},
+ { AGILEX_PERIPH_PLL_C0_CLK, "peri_pll_c0", "periph_pll", NULL, 1, 0, 0xA8, 0},
+ { AGILEX_PERIPH_PLL_C1_CLK, "peri_pll_c1", "periph_pll", NULL, 1, 0, 0xA8, 8},
+ { AGILEX_PERIPH_PLL_C2_CLK, "peri_pll_c2", "periph_pll", NULL, 1, 0, 0xA8, 16},
+ { AGILEX_PERIPH_PLL_C3_CLK, "peri_pll_c3", "periph_pll", NULL, 1, 0, 0xA8, 24},
+};
+
static const struct stratix10_perip_c_clock agilex_main_perip_c_clks[] = {
{ AGILEX_MAIN_PLL_C0_CLK, "main_pll_c0", "main_pll", NULL, 1, 0, 0x58},
{ AGILEX_MAIN_PLL_C1_CLK, "main_pll_c1", "main_pll", NULL, 1, 0, 0x5C},
@@ -289,6 +300,25 @@ static const struct stratix10_gate_clock agilex_gate_clks[] = {
10, 0, 0, 0, 0, 0, 4},
};
+static int n5x_clk_register_c_perip(const struct n5x_perip_c_clock *clks,
+ int nums, struct stratix10_clock_data *data)
+{
+ struct clk *clk;
+ void __iomem *base = data->base;
+ int i;
+
+ for (i = 0; i < nums; i++) {
+ clk = n5x_register_periph(&clks[i], base);
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to register clock %s\n",
+ __func__, clks[i].name);
+ continue;
+ }
+ data->clk_data.clks[clks[i].id] = clk;
+ }
+ return 0;
+}
+
static int agilex_clk_register_c_perip(const struct stratix10_perip_c_clock *clks,
int nums, struct stratix10_clock_data *data)
{
@@ -367,6 +397,26 @@ static int agilex_clk_register_pll(const struct stratix10_pll_clock *clks,
return 0;
}
+static int n5x_clk_register_pll(const struct stratix10_pll_clock *clks,
+ int nums, struct stratix10_clock_data *data)
+{
+ struct clk *clk;
+ void __iomem *base = data->base;
+ int i;
+
+ for (i = 0; i < nums; i++) {
+ clk = n5x_register_pll(&clks[i], base);
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to register clock %s\n",
+ __func__, clks[i].name);
+ continue;
+ }
+ data->clk_data.clks[clks[i].id] = clk;
+ }
+
+ return 0;
+}
+
static struct stratix10_clock_data *__socfpga_agilex_clk_init(struct platform_device *pdev,
int nr_clks)
{
@@ -401,7 +451,7 @@ static struct stratix10_clock_data *__socfpga_agilex_clk_init(struct platform_de
return clk_data;
}
-static int agilex_clkmgr_probe(struct platform_device *pdev)
+static int agilex_clkmgr_init(struct platform_device *pdev)
{
struct stratix10_clock_data *clk_data;
@@ -423,9 +473,43 @@ static int agilex_clkmgr_probe(struct platform_device *pdev)
return 0;
}
+static int n5x_clkmgr_init(struct platform_device *pdev)
+{
+ struct stratix10_clock_data *clk_data;
+
+ clk_data = __socfpga_agilex_clk_init(pdev, AGILEX_NUM_CLKS);
+ if (IS_ERR(clk_data))
+ return PTR_ERR(clk_data);
+
+ n5x_clk_register_pll(agilex_pll_clks, ARRAY_SIZE(agilex_pll_clks), clk_data);
+
+ n5x_clk_register_c_perip(n5x_main_perip_c_clks,
+ ARRAY_SIZE(n5x_main_perip_c_clks), clk_data);
+
+ agilex_clk_register_cnt_perip(agilex_main_perip_cnt_clks,
+ ARRAY_SIZE(agilex_main_perip_cnt_clks),
+ clk_data);
+
+ agilex_clk_register_gate(agilex_gate_clks, ARRAY_SIZE(agilex_gate_clks),
+ clk_data);
+ return 0;
+}
+
+static int agilex_clkmgr_probe(struct platform_device *pdev)
+{
+ int (*probe_func)(struct platform_device *init_func);
+
+ probe_func = of_device_get_match_data(&pdev->dev);
+ if (!probe_func)
+ return -ENODEV;
+ return probe_func(pdev);
+}
+
static const struct of_device_id agilex_clkmgr_match_table[] = {
{ .compatible = "intel,agilex-clkmgr",
- .data = agilex_clkmgr_probe },
+ .data = agilex_clkmgr_init },
+ { .compatible = "intel,easic-n5x-clkmgr",
+ .data = n5x_clkmgr_init },
{ }
};
diff --git a/drivers/clk/socfpga/clk-periph-s10.c b/drivers/clk/socfpga/clk-periph-s10.c
index 397b77b89b16..0ff2b9d24035 100644
--- a/drivers/clk/socfpga/clk-periph-s10.c
+++ b/drivers/clk/socfpga/clk-periph-s10.c
@@ -15,6 +15,21 @@
#define to_periph_clk(p) container_of(p, struct socfpga_periph_clk, hw.hw)
+static unsigned long n5x_clk_peri_c_clk_recalc_rate(struct clk_hw *hwclk,
+ unsigned long parent_rate)
+{
+ struct socfpga_periph_clk *socfpgaclk = to_periph_clk(hwclk);
+ unsigned long div;
+ unsigned long shift = socfpgaclk->shift;
+ u32 val;
+
+ val = readl(socfpgaclk->hw.reg);
+ val &= (0x1f << shift);
+ div = (val >> shift) + 1;
+
+ return parent_rate / div;
+}
+
static unsigned long clk_peri_c_clk_recalc_rate(struct clk_hw *hwclk,
unsigned long parent_rate)
{
@@ -63,6 +78,11 @@ static u8 clk_periclk_get_parent(struct clk_hw *hwclk)
return parent;
}
+static const struct clk_ops n5x_peri_c_clk_ops = {
+ .recalc_rate = n5x_clk_peri_c_clk_recalc_rate,
+ .get_parent = clk_periclk_get_parent,
+};
+
static const struct clk_ops peri_c_clk_ops = {
.recalc_rate = clk_peri_c_clk_recalc_rate,
.get_parent = clk_periclk_get_parent,
@@ -107,6 +127,39 @@ struct clk *s10_register_periph(const struct stratix10_perip_c_clock *clks,
return clk;
}
+struct clk *n5x_register_periph(const struct n5x_perip_c_clock *clks,
+ void __iomem *regbase)
+{
+ struct clk *clk;
+ struct socfpga_periph_clk *periph_clk;
+ struct clk_init_data init;
+ const char *name = clks->name;
+ const char *parent_name = clks->parent_name;
+
+ periph_clk = kzalloc(sizeof(*periph_clk), GFP_KERNEL);
+ if (WARN_ON(!periph_clk))
+ return NULL;
+
+ periph_clk->hw.reg = regbase + clks->offset;
+ periph_clk->shift = clks->shift;
+
+ init.name = name;
+ init.ops = &n5x_peri_c_clk_ops;
+ init.flags = clks->flags;
+
+ init.num_parents = clks->num_parents;
+ init.parent_names = parent_name ? &parent_name : NULL;
+
+ periph_clk->hw.hw.init = &init;
+
+ clk = clk_register(NULL, &periph_clk->hw.hw);
+ if (WARN_ON(IS_ERR(clk))) {
+ kfree(periph_clk);
+ return NULL;
+ }
+ return clk;
+}
+
struct clk *s10_register_cnt_periph(const struct stratix10_perip_cnt_clock *clks,
void __iomem *regbase)
{
diff --git a/drivers/clk/socfpga/clk-pll-a10.c b/drivers/clk/socfpga/clk-pll-a10.c
index db54f7d806a0..3338f054fe98 100644
--- a/drivers/clk/socfpga/clk-pll-a10.c
+++ b/drivers/clk/socfpga/clk-pll-a10.c
@@ -73,7 +73,6 @@ static struct clk * __init __socfpga_pll_init(struct device_node *node,
const char *parent_name[SOCFGPA_MAX_PARENTS];
struct clk_init_data init;
struct device_node *clkmgr_np;
- int rc;
int i = 0;
of_property_read_u32(node, "reg", &reg);
@@ -108,7 +107,7 @@ static struct clk * __init __socfpga_pll_init(struct device_node *node,
kfree(pll_clk);
return NULL;
}
- rc = of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
return clk;
}
diff --git a/drivers/clk/socfpga/clk-pll-s10.c b/drivers/clk/socfpga/clk-pll-s10.c
index 4e268953b7da..f6f66e08e1f4 100644
--- a/drivers/clk/socfpga/clk-pll-s10.c
+++ b/drivers/clk/socfpga/clk-pll-s10.c
@@ -27,10 +27,37 @@
#define SWCTRLBTCLKSEL_MASK 0x200
#define SWCTRLBTCLKSEL_SHIFT 9
+#define SOCFPGA_N5X_PLLDIV_FDIV_MASK GENMASK(16, 8)
+#define SOCFPGA_N5X_PLLDIV_FDIV_SHIFT 8
+#define SOCFPGA_N5X_PLLDIV_RDIV_MASK GENMASK(5, 0)
+#define SOCFPGA_N5X_PLLDIV_QDIV_MASK GENMASK(26, 24)
+#define SOCFPGA_N5X_PLLDIV_QDIV_SHIFT 24
+
#define SOCFPGA_BOOT_CLK "boot_clk"
#define to_socfpga_clk(p) container_of(p, struct socfpga_pll, hw.hw)
+static unsigned long n5x_clk_pll_recalc_rate(struct clk_hw *hwclk,
+ unsigned long parent_rate)
+{
+ struct socfpga_pll *socfpgaclk = to_socfpga_clk(hwclk);
+ unsigned long fdiv, reg, rdiv, qdiv;
+ u32 power = 1;
+
+ /* read VCO1 reg for numerator and denominator */
+ reg = readl(socfpgaclk->hw.reg + 0x8);
+ fdiv = (reg & SOCFPGA_N5X_PLLDIV_FDIV_MASK) >> SOCFPGA_N5X_PLLDIV_FDIV_SHIFT;
+ rdiv = (reg & SOCFPGA_N5X_PLLDIV_RDIV_MASK);
+ qdiv = (reg & SOCFPGA_N5X_PLLDIV_QDIV_MASK) >> SOCFPGA_N5X_PLLDIV_QDIV_SHIFT;
+
+ while (qdiv) {
+ power *= 2;
+ qdiv--;
+ }
+
+ return ((parent_rate * 2 * (fdiv + 1)) / ((rdiv + 1) * power));
+}
+
static unsigned long agilex_clk_pll_recalc_rate(struct clk_hw *hwclk,
unsigned long parent_rate)
{
@@ -123,6 +150,25 @@ static int clk_pll_prepare(struct clk_hw *hwclk)
return 0;
}
+static int n5x_clk_pll_prepare(struct clk_hw *hwclk)
+{
+ struct socfpga_pll *socfpgaclk = to_socfpga_clk(hwclk);
+ u32 reg;
+
+ /* Bring PLL out of reset */
+ reg = readl(socfpgaclk->hw.reg + 0x4);
+ reg |= SOCFPGA_PLL_RESET_MASK;
+ writel(reg, socfpgaclk->hw.reg + 0x4);
+
+ return 0;
+}
+
+static const struct clk_ops n5x_clk_pll_ops = {
+ .recalc_rate = n5x_clk_pll_recalc_rate,
+ .get_parent = clk_pll_get_parent,
+ .prepare = n5x_clk_pll_prepare,
+};
+
static const struct clk_ops agilex_clk_pll_ops = {
.recalc_rate = agilex_clk_pll_recalc_rate,
.get_parent = clk_pll_get_parent,
@@ -214,3 +260,40 @@ struct clk *agilex_register_pll(const struct stratix10_pll_clock *clks,
}
return clk;
}
+
+struct clk *n5x_register_pll(const struct stratix10_pll_clock *clks,
+ void __iomem *reg)
+{
+ struct clk *clk;
+ struct socfpga_pll *pll_clk;
+ struct clk_init_data init;
+ const char *name = clks->name;
+
+ pll_clk = kzalloc(sizeof(*pll_clk), GFP_KERNEL);
+ if (WARN_ON(!pll_clk))
+ return NULL;
+
+ pll_clk->hw.reg = reg + clks->offset;
+
+ if (streq(name, SOCFPGA_BOOT_CLK))
+ init.ops = &clk_boot_ops;
+ else
+ init.ops = &n5x_clk_pll_ops;
+
+ init.name = name;
+ init.flags = clks->flags;
+
+ init.num_parents = clks->num_parents;
+ init.parent_names = NULL;
+ init.parent_data = clks->parent_data;
+ pll_clk->hw.hw.init = &init;
+
+ pll_clk->hw.bit_idx = SOCFPGA_PLL_POWER;
+
+ clk = clk_register(NULL, &pll_clk->hw.hw);
+ if (WARN_ON(IS_ERR(clk))) {
+ kfree(pll_clk);
+ return NULL;
+ }
+ return clk;
+}
diff --git a/drivers/clk/socfpga/clk-pll.c b/drivers/clk/socfpga/clk-pll.c
index e5fb786843f3..3cf99df7d005 100644
--- a/drivers/clk/socfpga/clk-pll.c
+++ b/drivers/clk/socfpga/clk-pll.c
@@ -80,7 +80,6 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node,
const char *parent_name[SOCFPGA_MAX_PARENTS];
struct clk_init_data init;
struct device_node *clkmgr_np;
- int rc;
of_property_read_u32(node, "reg", &reg);
@@ -111,7 +110,7 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node,
kfree(pll_clk);
return NULL;
}
- rc = of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
return clk;
}
diff --git a/drivers/clk/socfpga/stratix10-clk.h b/drivers/clk/socfpga/stratix10-clk.h
index f9d5d724c694..420deed677ce 100644
--- a/drivers/clk/socfpga/stratix10-clk.h
+++ b/drivers/clk/socfpga/stratix10-clk.h
@@ -30,6 +30,17 @@ struct stratix10_perip_c_clock {
unsigned long offset;
};
+struct n5x_perip_c_clock {
+ unsigned int id;
+ const char *name;
+ const char *parent_name;
+ const char *const *parent_names;
+ u8 num_parents;
+ unsigned long flags;
+ unsigned long offset;
+ unsigned long shift;
+};
+
struct stratix10_perip_cnt_clock {
unsigned int id;
const char *name;
@@ -64,8 +75,12 @@ struct clk *s10_register_pll(const struct stratix10_pll_clock *,
void __iomem *);
struct clk *agilex_register_pll(const struct stratix10_pll_clock *,
void __iomem *);
+struct clk *n5x_register_pll(const struct stratix10_pll_clock *clks,
+ void __iomem *reg);
struct clk *s10_register_periph(const struct stratix10_perip_c_clock *,
- void __iomem *);
+ void __iomem *reg);
+struct clk *n5x_register_periph(const struct n5x_perip_c_clock *clks,
+ void __iomem *reg);
struct clk *s10_register_cnt_periph(const struct stratix10_perip_cnt_clock *,
void __iomem *);
struct clk *s10_register_gate(const struct stratix10_gate_clock *,
diff --git a/drivers/clk/spear/spear1310_clock.c b/drivers/clk/spear/spear1310_clock.c
index 591248c9a88e..8c8974866789 100644
--- a/drivers/clk/spear/spear1310_clock.c
+++ b/drivers/clk/spear/spear1310_clock.c
@@ -12,6 +12,7 @@
*/
#include <linux/clkdev.h>
+#include <linux/clk/spear.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/of_platform.h>
diff --git a/drivers/clk/spear/spear1340_clock.c b/drivers/clk/spear/spear1340_clock.c
index 9163bbb46411..c0dc94355c87 100644
--- a/drivers/clk/spear/spear1340_clock.c
+++ b/drivers/clk/spear/spear1340_clock.c
@@ -12,6 +12,7 @@
*/
#include <linux/clkdev.h>
+#include <linux/clk/spear.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/of_platform.h>
diff --git a/drivers/clk/st/clkgen-fsyn.c b/drivers/clk/st/clkgen-fsyn.c
index f1adc858b590..dd6062e043e0 100644
--- a/drivers/clk/st/clkgen-fsyn.c
+++ b/drivers/clk/st/clkgen-fsyn.c
@@ -172,10 +172,10 @@ static const struct clkgen_quadfs_data st_fs660c32_D = {
* ST quad channel frequency synthesizer block
*
* @hw: handle between common and hardware-specific interfaces.
- * @ndiv: regmap field for the ndiv control.
* @regs_base: base address of the configuration registers.
* @lock: spinlock.
- *
+ * @data: local driver data
+ * @ndiv: regmap field for the ndiv control.
*/
struct st_clk_quadfs_pll {
struct clk_hw hw;
@@ -426,7 +426,7 @@ static struct clk * __init st_clk_register_quadfs_pll(
* parent - fixed parent. No clk_set_parent support
*/
-/**
+/*
* struct st_clk_quadfs_fsynth - One clock output from a four channel digital
* frequency synthesizer (fsynth) block.
*
diff --git a/drivers/clk/st/clkgen-pll.c b/drivers/clk/st/clkgen-pll.c
index c3952f2c42ba..119c5b33080c 100644
--- a/drivers/clk/st/clkgen-pll.c
+++ b/drivers/clk/st/clkgen-pll.c
@@ -130,12 +130,11 @@ static struct clkgen_pll_data st_pll4600c28_418_a9 = {
* parent - fixed parent. No clk_set_parent support
*/
-/**
+/*
* PLL clock that is integrated in the ClockGenA instances on the STiH415
* and STiH416.
*
* @hw: handle between common and hardware-specific interfaces.
- * @type: PLL instance type.
* @regs_base: base of the PLL configuration register(s).
*
*/
diff --git a/drivers/clk/sunxi-ng/Kconfig b/drivers/clk/sunxi-ng/Kconfig
index ce5f5847d5d3..cd46d8853876 100644
--- a/drivers/clk/sunxi-ng/Kconfig
+++ b/drivers/clk/sunxi-ng/Kconfig
@@ -32,8 +32,13 @@ config SUN50I_H6_CCU
default ARM64 && ARCH_SUNXI
depends on (ARM64 && ARCH_SUNXI) || COMPILE_TEST
+config SUN50I_H616_CCU
+ bool "Support for the Allwinner H616 CCU"
+ default ARM64 && ARCH_SUNXI
+ depends on (ARM64 && ARCH_SUNXI) || COMPILE_TEST
+
config SUN50I_H6_R_CCU
- bool "Support for the Allwinner H6 PRCM CCU"
+ bool "Support for the Allwinner H6 and H616 PRCM CCU"
default ARM64 && ARCH_SUNXI
depends on (ARM64 && ARCH_SUNXI) || COMPILE_TEST
diff --git a/drivers/clk/sunxi-ng/Makefile b/drivers/clk/sunxi-ng/Makefile
index 3eb5cff40eac..96c324306d97 100644
--- a/drivers/clk/sunxi-ng/Makefile
+++ b/drivers/clk/sunxi-ng/Makefile
@@ -26,6 +26,7 @@ obj-$(CONFIG_SUN50I_A64_CCU) += ccu-sun50i-a64.o
obj-$(CONFIG_SUN50I_A100_CCU) += ccu-sun50i-a100.o
obj-$(CONFIG_SUN50I_A100_R_CCU) += ccu-sun50i-a100-r.o
obj-$(CONFIG_SUN50I_H6_CCU) += ccu-sun50i-h6.o
+obj-$(CONFIG_SUN50I_H616_CCU) += ccu-sun50i-h616.o
obj-$(CONFIG_SUN50I_H6_R_CCU) += ccu-sun50i-h6-r.o
obj-$(CONFIG_SUN4I_A10_CCU) += ccu-sun4i-a10.o
obj-$(CONFIG_SUN5I_CCU) += ccu-sun5i.o
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c
index 50f8d1bc7046..f8909a7ed553 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c
@@ -91,6 +91,8 @@ static SUNXI_CCU_GATE(r_apb2_uart_clk, "r-apb2-uart", "r-apb2",
0x18c, BIT(0), 0);
static SUNXI_CCU_GATE(r_apb2_i2c_clk, "r-apb2-i2c", "r-apb2",
0x19c, BIT(0), 0);
+static SUNXI_CCU_GATE(r_apb2_rsb_clk, "r-apb2-rsb", "r-apb2",
+ 0x1bc, BIT(0), 0);
static SUNXI_CCU_GATE(r_apb1_ir_clk, "r-apb1-ir", "r-apb1",
0x1cc, BIT(0), 0);
static SUNXI_CCU_GATE(r_apb1_w1_clk, "r-apb1-w1", "r-apb1",
@@ -130,12 +132,23 @@ static struct ccu_common *sun50i_h6_r_ccu_clks[] = {
&r_apb1_pwm_clk.common,
&r_apb2_uart_clk.common,
&r_apb2_i2c_clk.common,
+ &r_apb2_rsb_clk.common,
&r_apb1_ir_clk.common,
&r_apb1_w1_clk.common,
&ir_clk.common,
&w1_clk.common,
};
+static struct ccu_common *sun50i_h616_r_ccu_clks[] = {
+ &r_apb1_clk.common,
+ &r_apb2_clk.common,
+ &r_apb1_twd_clk.common,
+ &r_apb2_i2c_clk.common,
+ &r_apb2_rsb_clk.common,
+ &r_apb1_ir_clk.common,
+ &ir_clk.common,
+};
+
static struct clk_hw_onecell_data sun50i_h6_r_hw_clks = {
.hws = {
[CLK_AR100] = &ar100_clk.common.hw,
@@ -147,6 +160,7 @@ static struct clk_hw_onecell_data sun50i_h6_r_hw_clks = {
[CLK_R_APB1_PWM] = &r_apb1_pwm_clk.common.hw,
[CLK_R_APB2_UART] = &r_apb2_uart_clk.common.hw,
[CLK_R_APB2_I2C] = &r_apb2_i2c_clk.common.hw,
+ [CLK_R_APB2_RSB] = &r_apb2_rsb_clk.common.hw,
[CLK_R_APB1_IR] = &r_apb1_ir_clk.common.hw,
[CLK_R_APB1_W1] = &r_apb1_w1_clk.common.hw,
[CLK_IR] = &ir_clk.common.hw,
@@ -155,16 +169,38 @@ static struct clk_hw_onecell_data sun50i_h6_r_hw_clks = {
.num = CLK_NUMBER,
};
+static struct clk_hw_onecell_data sun50i_h616_r_hw_clks = {
+ .hws = {
+ [CLK_R_AHB] = &r_ahb_clk.hw,
+ [CLK_R_APB1] = &r_apb1_clk.common.hw,
+ [CLK_R_APB2] = &r_apb2_clk.common.hw,
+ [CLK_R_APB1_TWD] = &r_apb1_twd_clk.common.hw,
+ [CLK_R_APB2_I2C] = &r_apb2_i2c_clk.common.hw,
+ [CLK_R_APB2_RSB] = &r_apb2_rsb_clk.common.hw,
+ [CLK_R_APB1_IR] = &r_apb1_ir_clk.common.hw,
+ [CLK_IR] = &ir_clk.common.hw,
+ },
+ .num = CLK_NUMBER,
+};
+
static struct ccu_reset_map sun50i_h6_r_ccu_resets[] = {
[RST_R_APB1_TIMER] = { 0x11c, BIT(16) },
[RST_R_APB1_TWD] = { 0x12c, BIT(16) },
[RST_R_APB1_PWM] = { 0x13c, BIT(16) },
[RST_R_APB2_UART] = { 0x18c, BIT(16) },
[RST_R_APB2_I2C] = { 0x19c, BIT(16) },
+ [RST_R_APB2_RSB] = { 0x1bc, BIT(16) },
[RST_R_APB1_IR] = { 0x1cc, BIT(16) },
[RST_R_APB1_W1] = { 0x1ec, BIT(16) },
};
+static struct ccu_reset_map sun50i_h616_r_ccu_resets[] = {
+ [RST_R_APB1_TWD] = { 0x12c, BIT(16) },
+ [RST_R_APB2_I2C] = { 0x19c, BIT(16) },
+ [RST_R_APB2_RSB] = { 0x1bc, BIT(16) },
+ [RST_R_APB1_IR] = { 0x1cc, BIT(16) },
+};
+
static const struct sunxi_ccu_desc sun50i_h6_r_ccu_desc = {
.ccu_clks = sun50i_h6_r_ccu_clks,
.num_ccu_clks = ARRAY_SIZE(sun50i_h6_r_ccu_clks),
@@ -175,6 +211,16 @@ static const struct sunxi_ccu_desc sun50i_h6_r_ccu_desc = {
.num_resets = ARRAY_SIZE(sun50i_h6_r_ccu_resets),
};
+static const struct sunxi_ccu_desc sun50i_h616_r_ccu_desc = {
+ .ccu_clks = sun50i_h616_r_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun50i_h616_r_ccu_clks),
+
+ .hw_clks = &sun50i_h616_r_hw_clks,
+
+ .resets = sun50i_h616_r_ccu_resets,
+ .num_resets = ARRAY_SIZE(sun50i_h616_r_ccu_resets),
+};
+
static void __init sunxi_r_ccu_init(struct device_node *node,
const struct sunxi_ccu_desc *desc)
{
@@ -195,3 +241,10 @@ static void __init sun50i_h6_r_ccu_setup(struct device_node *node)
}
CLK_OF_DECLARE(sun50i_h6_r_ccu, "allwinner,sun50i-h6-r-ccu",
sun50i_h6_r_ccu_setup);
+
+static void __init sun50i_h616_r_ccu_setup(struct device_node *node)
+{
+ sunxi_r_ccu_init(node, &sun50i_h616_r_ccu_desc);
+}
+CLK_OF_DECLARE(sun50i_h616_r_ccu, "allwinner,sun50i-h616-r-ccu",
+ sun50i_h616_r_ccu_setup);
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.h b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.h
index 782117dc0b28..7e290b840803 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.h
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.h
@@ -14,6 +14,6 @@
#define CLK_R_APB2 3
-#define CLK_NUMBER (CLK_W1 + 1)
+#define CLK_NUMBER (CLK_R_APB2_RSB + 1)
#endif /* _CCU_SUN50I_H6_R_H */
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
index f2497d0a4683..bff446b78290 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
@@ -237,7 +237,7 @@ static const char * const psi_ahb1_ahb2_parents[] = { "osc24M", "osc32k",
static SUNXI_CCU_MP_WITH_MUX(psi_ahb1_ahb2_clk, "psi-ahb1-ahb2",
psi_ahb1_ahb2_parents,
0x510,
- 0, 5, /* M */
+ 0, 2, /* M */
8, 2, /* P */
24, 2, /* mux */
0);
@@ -246,19 +246,19 @@ static const char * const ahb3_apb1_apb2_parents[] = { "osc24M", "osc32k",
"psi-ahb1-ahb2",
"pll-periph0" };
static SUNXI_CCU_MP_WITH_MUX(ahb3_clk, "ahb3", ahb3_apb1_apb2_parents, 0x51c,
- 0, 5, /* M */
+ 0, 2, /* M */
8, 2, /* P */
24, 2, /* mux */
0);
static SUNXI_CCU_MP_WITH_MUX(apb1_clk, "apb1", ahb3_apb1_apb2_parents, 0x520,
- 0, 5, /* M */
+ 0, 2, /* M */
8, 2, /* P */
24, 2, /* mux */
0);
static SUNXI_CCU_MP_WITH_MUX(apb2_clk, "apb2", ahb3_apb1_apb2_parents, 0x524,
- 0, 5, /* M */
+ 0, 2, /* M */
8, 2, /* P */
24, 2, /* mux */
0);
@@ -682,7 +682,7 @@ static struct ccu_mux hdmi_cec_clk = {
.common = {
.reg = 0xb10,
- .features = CCU_FEATURE_VARIABLE_PREDIV,
+ .features = CCU_FEATURE_FIXED_PREDIV,
.hw.init = CLK_HW_INIT_PARENTS("hdmi-cec",
hdmi_cec_parents,
&ccu_mux_ops,
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h616.c b/drivers/clk/sunxi-ng/ccu-sun50i-h616.c
new file mode 100644
index 000000000000..225307305880
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h616.c
@@ -0,0 +1,1150 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020 Arm Ltd.
+ * Based on the H6 CCU driver, which is:
+ * Copyright (c) 2017 Icenowy Zheng <icenowy@aosc.io>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+
+#include "ccu_common.h"
+#include "ccu_reset.h"
+
+#include "ccu_div.h"
+#include "ccu_gate.h"
+#include "ccu_mp.h"
+#include "ccu_mult.h"
+#include "ccu_nk.h"
+#include "ccu_nkm.h"
+#include "ccu_nkmp.h"
+#include "ccu_nm.h"
+
+#include "ccu-sun50i-h616.h"
+
+/*
+ * The CPU PLL is actually NP clock, with P being /1, /2 or /4. However
+ * P should only be used for output frequencies lower than 288 MHz.
+ *
+ * For now we can just model it as a multiplier clock, and force P to /1.
+ *
+ * The M factor is present in the register's description, but not in the
+ * frequency formula, and it's documented as "M is only used for backdoor
+ * testing", so it's not modelled and then force to 0.
+ */
+#define SUN50I_H616_PLL_CPUX_REG 0x000
+static struct ccu_mult pll_cpux_clk = {
+ .enable = BIT(31),
+ .lock = BIT(28),
+ .mult = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .common = {
+ .reg = 0x000,
+ .hw.init = CLK_HW_INIT("pll-cpux", "osc24M",
+ &ccu_mult_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+/* Some PLLs are input * N / div1 / P. Model them as NKMP with no K */
+#define SUN50I_H616_PLL_DDR0_REG 0x010
+static struct ccu_nkmp pll_ddr0_clk = {
+ .enable = BIT(31),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .p = _SUNXI_CCU_DIV(0, 1), /* output divider */
+ .common = {
+ .reg = 0x010,
+ .hw.init = CLK_HW_INIT("pll-ddr0", "osc24M",
+ &ccu_nkmp_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+#define SUN50I_H616_PLL_DDR1_REG 0x018
+static struct ccu_nkmp pll_ddr1_clk = {
+ .enable = BIT(31),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .p = _SUNXI_CCU_DIV(0, 1), /* output divider */
+ .common = {
+ .reg = 0x018,
+ .hw.init = CLK_HW_INIT("pll-ddr1", "osc24M",
+ &ccu_nkmp_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+#define SUN50I_H616_PLL_PERIPH0_REG 0x020
+static struct ccu_nkmp pll_periph0_clk = {
+ .enable = BIT(31),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .p = _SUNXI_CCU_DIV(0, 1), /* output divider */
+ .fixed_post_div = 2,
+ .common = {
+ .reg = 0x020,
+ .features = CCU_FEATURE_FIXED_POSTDIV,
+ .hw.init = CLK_HW_INIT("pll-periph0", "osc24M",
+ &ccu_nkmp_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+#define SUN50I_H616_PLL_PERIPH1_REG 0x028
+static struct ccu_nkmp pll_periph1_clk = {
+ .enable = BIT(31),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .p = _SUNXI_CCU_DIV(0, 1), /* output divider */
+ .fixed_post_div = 2,
+ .common = {
+ .reg = 0x028,
+ .features = CCU_FEATURE_FIXED_POSTDIV,
+ .hw.init = CLK_HW_INIT("pll-periph1", "osc24M",
+ &ccu_nkmp_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+#define SUN50I_H616_PLL_GPU_REG 0x030
+static struct ccu_nkmp pll_gpu_clk = {
+ .enable = BIT(31),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .p = _SUNXI_CCU_DIV(0, 1), /* output divider */
+ .common = {
+ .reg = 0x030,
+ .hw.init = CLK_HW_INIT("pll-gpu", "osc24M",
+ &ccu_nkmp_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+/*
+ * For Video PLLs, the output divider is described as "used for testing"
+ * in the user manual. So it's not modelled and forced to 0.
+ */
+#define SUN50I_H616_PLL_VIDEO0_REG 0x040
+static struct ccu_nm pll_video0_clk = {
+ .enable = BIT(31),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .fixed_post_div = 4,
+ .min_rate = 288000000,
+ .max_rate = 2400000000UL,
+ .common = {
+ .reg = 0x040,
+ .features = CCU_FEATURE_FIXED_POSTDIV,
+ .hw.init = CLK_HW_INIT("pll-video0", "osc24M",
+ &ccu_nm_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+#define SUN50I_H616_PLL_VIDEO1_REG 0x048
+static struct ccu_nm pll_video1_clk = {
+ .enable = BIT(31),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .fixed_post_div = 4,
+ .min_rate = 288000000,
+ .max_rate = 2400000000UL,
+ .common = {
+ .reg = 0x048,
+ .features = CCU_FEATURE_FIXED_POSTDIV,
+ .hw.init = CLK_HW_INIT("pll-video1", "osc24M",
+ &ccu_nm_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+#define SUN50I_H616_PLL_VIDEO2_REG 0x050
+static struct ccu_nm pll_video2_clk = {
+ .enable = BIT(31),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .fixed_post_div = 4,
+ .min_rate = 288000000,
+ .max_rate = 2400000000UL,
+ .common = {
+ .reg = 0x050,
+ .features = CCU_FEATURE_FIXED_POSTDIV,
+ .hw.init = CLK_HW_INIT("pll-video2", "osc24M",
+ &ccu_nm_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+#define SUN50I_H616_PLL_VE_REG 0x058
+static struct ccu_nkmp pll_ve_clk = {
+ .enable = BIT(31),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .p = _SUNXI_CCU_DIV(0, 1), /* output divider */
+ .common = {
+ .reg = 0x058,
+ .hw.init = CLK_HW_INIT("pll-ve", "osc24M",
+ &ccu_nkmp_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+#define SUN50I_H616_PLL_DE_REG 0x060
+static struct ccu_nkmp pll_de_clk = {
+ .enable = BIT(31),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .p = _SUNXI_CCU_DIV(0, 1), /* output divider */
+ .common = {
+ .reg = 0x060,
+ .hw.init = CLK_HW_INIT("pll-de", "osc24M",
+ &ccu_nkmp_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+/*
+ * TODO: Determine SDM settings for the audio PLL. The manual suggests
+ * PLL_FACTOR_N=16, PLL_POST_DIV_P=2, OUTPUT_DIV=2, pattern=0xe000c49b
+ * for 24.576 MHz, and PLL_FACTOR_N=22, PLL_POST_DIV_P=3, OUTPUT_DIV=2,
+ * pattern=0xe001288c for 22.5792 MHz.
+ * This clashes with our fixed PLL_POST_DIV_P.
+ */
+#define SUN50I_H616_PLL_AUDIO_REG 0x078
+static struct ccu_nm pll_audio_hs_clk = {
+ .enable = BIT(31),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .common = {
+ .reg = 0x078,
+ .hw.init = CLK_HW_INIT("pll-audio-hs", "osc24M",
+ &ccu_nm_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+static const char * const cpux_parents[] = { "osc24M", "osc32k",
+ "iosc", "pll-cpux", "pll-periph0" };
+static SUNXI_CCU_MUX(cpux_clk, "cpux", cpux_parents,
+ 0x500, 24, 3, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
+static SUNXI_CCU_M(axi_clk, "axi", "cpux", 0x500, 0, 2, 0);
+static SUNXI_CCU_M(cpux_apb_clk, "cpux-apb", "cpux", 0x500, 8, 2, 0);
+
+static const char * const psi_ahb1_ahb2_parents[] = { "osc24M", "osc32k",
+ "iosc", "pll-periph0" };
+static SUNXI_CCU_MP_WITH_MUX(psi_ahb1_ahb2_clk, "psi-ahb1-ahb2",
+ psi_ahb1_ahb2_parents,
+ 0x510,
+ 0, 2, /* M */
+ 8, 2, /* P */
+ 24, 2, /* mux */
+ 0);
+
+static const char * const ahb3_apb1_apb2_parents[] = { "osc24M", "osc32k",
+ "psi-ahb1-ahb2",
+ "pll-periph0" };
+static SUNXI_CCU_MP_WITH_MUX(ahb3_clk, "ahb3", ahb3_apb1_apb2_parents, 0x51c,
+ 0, 2, /* M */
+ 8, 2, /* P */
+ 24, 2, /* mux */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX(apb1_clk, "apb1", ahb3_apb1_apb2_parents, 0x520,
+ 0, 2, /* M */
+ 8, 2, /* P */
+ 24, 2, /* mux */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX(apb2_clk, "apb2", ahb3_apb1_apb2_parents, 0x524,
+ 0, 2, /* M */
+ 8, 2, /* P */
+ 24, 2, /* mux */
+ 0);
+
+static const char * const mbus_parents[] = { "osc24M", "pll-periph0-2x",
+ "pll-ddr0", "pll-ddr1" };
+static SUNXI_CCU_M_WITH_MUX_GATE(mbus_clk, "mbus", mbus_parents, 0x540,
+ 0, 3, /* M */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ CLK_IS_CRITICAL);
+
+static const char * const de_parents[] = { "pll-de", "pll-periph0-2x" };
+static SUNXI_CCU_M_WITH_MUX_GATE(de_clk, "de", de_parents, 0x600,
+ 0, 4, /* M */
+ 24, 1, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE(bus_de_clk, "bus-de", "psi-ahb1-ahb2",
+ 0x60c, BIT(0), 0);
+
+static SUNXI_CCU_M_WITH_MUX_GATE(deinterlace_clk, "deinterlace",
+ de_parents,
+ 0x620,
+ 0, 4, /* M */
+ 24, 1, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE(bus_deinterlace_clk, "bus-deinterlace", "psi-ahb1-ahb2",
+ 0x62c, BIT(0), 0);
+
+static SUNXI_CCU_M_WITH_MUX_GATE(g2d_clk, "g2d", de_parents, 0x630,
+ 0, 4, /* M */
+ 24, 1, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE(bus_g2d_clk, "bus-g2d", "psi-ahb1-ahb2",
+ 0x63c, BIT(0), 0);
+
+static const char * const gpu0_parents[] = { "pll-gpu", "gpu1" };
+static SUNXI_CCU_M_WITH_MUX_GATE(gpu0_clk, "gpu0", gpu0_parents, 0x670,
+ 0, 2, /* M */
+ 24, 1, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+static SUNXI_CCU_M_WITH_GATE(gpu1_clk, "gpu1", "pll-periph0-2x", 0x674,
+ 0, 2, /* M */
+ BIT(31),/* gate */
+ 0);
+
+static SUNXI_CCU_GATE(bus_gpu_clk, "bus-gpu", "psi-ahb1-ahb2",
+ 0x67c, BIT(0), 0);
+
+static const char * const ce_parents[] = { "osc24M", "pll-periph0-2x" };
+static SUNXI_CCU_MP_WITH_MUX_GATE(ce_clk, "ce", ce_parents, 0x680,
+ 0, 4, /* M */
+ 8, 2, /* N */
+ 24, 1, /* mux */
+ BIT(31),/* gate */
+ 0);
+
+static SUNXI_CCU_GATE(bus_ce_clk, "bus-ce", "psi-ahb1-ahb2",
+ 0x68c, BIT(0), 0);
+
+static const char * const ve_parents[] = { "pll-ve" };
+static SUNXI_CCU_M_WITH_MUX_GATE(ve_clk, "ve", ve_parents, 0x690,
+ 0, 3, /* M */
+ 24, 1, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE(bus_ve_clk, "bus-ve", "psi-ahb1-ahb2",
+ 0x69c, BIT(0), 0);
+
+static SUNXI_CCU_GATE(bus_dma_clk, "bus-dma", "psi-ahb1-ahb2",
+ 0x70c, BIT(0), 0);
+
+static SUNXI_CCU_GATE(bus_hstimer_clk, "bus-hstimer", "psi-ahb1-ahb2",
+ 0x73c, BIT(0), 0);
+
+static SUNXI_CCU_GATE(avs_clk, "avs", "osc24M", 0x740, BIT(31), 0);
+
+static SUNXI_CCU_GATE(bus_dbg_clk, "bus-dbg", "psi-ahb1-ahb2",
+ 0x78c, BIT(0), 0);
+
+static SUNXI_CCU_GATE(bus_psi_clk, "bus-psi", "psi-ahb1-ahb2",
+ 0x79c, BIT(0), 0);
+
+static SUNXI_CCU_GATE(bus_pwm_clk, "bus-pwm", "apb1", 0x7ac, BIT(0), 0);
+
+static SUNXI_CCU_GATE(bus_iommu_clk, "bus-iommu", "apb1", 0x7bc, BIT(0), 0);
+
+static const char * const dram_parents[] = { "pll-ddr0", "pll-ddr1" };
+static struct ccu_div dram_clk = {
+ .div = _SUNXI_CCU_DIV(0, 2),
+ .mux = _SUNXI_CCU_MUX(24, 2),
+ .common = {
+ .reg = 0x800,
+ .hw.init = CLK_HW_INIT_PARENTS("dram",
+ dram_parents,
+ &ccu_div_ops,
+ CLK_IS_CRITICAL),
+ },
+};
+
+static SUNXI_CCU_GATE(mbus_dma_clk, "mbus-dma", "mbus",
+ 0x804, BIT(0), 0);
+static SUNXI_CCU_GATE(mbus_ve_clk, "mbus-ve", "mbus",
+ 0x804, BIT(1), 0);
+static SUNXI_CCU_GATE(mbus_ce_clk, "mbus-ce", "mbus",
+ 0x804, BIT(2), 0);
+static SUNXI_CCU_GATE(mbus_ts_clk, "mbus-ts", "mbus",
+ 0x804, BIT(3), 0);
+static SUNXI_CCU_GATE(mbus_nand_clk, "mbus-nand", "mbus",
+ 0x804, BIT(5), 0);
+static SUNXI_CCU_GATE(mbus_g2d_clk, "mbus-g2d", "mbus",
+ 0x804, BIT(10), 0);
+
+static SUNXI_CCU_GATE(bus_dram_clk, "bus-dram", "psi-ahb1-ahb2",
+ 0x80c, BIT(0), CLK_IS_CRITICAL);
+
+static const char * const nand_spi_parents[] = { "osc24M", "pll-periph0",
+ "pll-periph1", "pll-periph0-2x",
+ "pll-periph1-2x" };
+static SUNXI_CCU_MP_WITH_MUX_GATE(nand0_clk, "nand0", nand_spi_parents, 0x810,
+ 0, 4, /* M */
+ 8, 2, /* N */
+ 24, 3, /* mux */
+ BIT(31),/* gate */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(nand1_clk, "nand1", nand_spi_parents, 0x814,
+ 0, 4, /* M */
+ 8, 2, /* N */
+ 24, 3, /* mux */
+ BIT(31),/* gate */
+ 0);
+
+static SUNXI_CCU_GATE(bus_nand_clk, "bus-nand", "ahb3", 0x82c, BIT(0), 0);
+
+static const char * const mmc_parents[] = { "osc24M", "pll-periph0-2x",
+ "pll-periph1-2x" };
+static SUNXI_CCU_MP_WITH_MUX_GATE_POSTDIV(mmc0_clk, "mmc0", mmc_parents, 0x830,
+ 0, 4, /* M */
+ 8, 2, /* N */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 2, /* post-div */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE_POSTDIV(mmc1_clk, "mmc1", mmc_parents, 0x834,
+ 0, 4, /* M */
+ 8, 2, /* N */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 2, /* post-div */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE_POSTDIV(mmc2_clk, "mmc2", mmc_parents, 0x838,
+ 0, 4, /* M */
+ 8, 2, /* N */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 2, /* post-div */
+ 0);
+
+static SUNXI_CCU_GATE(bus_mmc0_clk, "bus-mmc0", "ahb3", 0x84c, BIT(0), 0);
+static SUNXI_CCU_GATE(bus_mmc1_clk, "bus-mmc1", "ahb3", 0x84c, BIT(1), 0);
+static SUNXI_CCU_GATE(bus_mmc2_clk, "bus-mmc2", "ahb3", 0x84c, BIT(2), 0);
+
+static SUNXI_CCU_GATE(bus_uart0_clk, "bus-uart0", "apb2", 0x90c, BIT(0), 0);
+static SUNXI_CCU_GATE(bus_uart1_clk, "bus-uart1", "apb2", 0x90c, BIT(1), 0);
+static SUNXI_CCU_GATE(bus_uart2_clk, "bus-uart2", "apb2", 0x90c, BIT(2), 0);
+static SUNXI_CCU_GATE(bus_uart3_clk, "bus-uart3", "apb2", 0x90c, BIT(3), 0);
+static SUNXI_CCU_GATE(bus_uart4_clk, "bus-uart4", "apb2", 0x90c, BIT(4), 0);
+static SUNXI_CCU_GATE(bus_uart5_clk, "bus-uart5", "apb2", 0x90c, BIT(5), 0);
+
+static SUNXI_CCU_GATE(bus_i2c0_clk, "bus-i2c0", "apb2", 0x91c, BIT(0), 0);
+static SUNXI_CCU_GATE(bus_i2c1_clk, "bus-i2c1", "apb2", 0x91c, BIT(1), 0);
+static SUNXI_CCU_GATE(bus_i2c2_clk, "bus-i2c2", "apb2", 0x91c, BIT(2), 0);
+static SUNXI_CCU_GATE(bus_i2c3_clk, "bus-i2c3", "apb2", 0x91c, BIT(3), 0);
+static SUNXI_CCU_GATE(bus_i2c4_clk, "bus-i2c4", "apb2", 0x91c, BIT(4), 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(spi0_clk, "spi0", nand_spi_parents, 0x940,
+ 0, 4, /* M */
+ 8, 2, /* N */
+ 24, 3, /* mux */
+ BIT(31),/* gate */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(spi1_clk, "spi1", nand_spi_parents, 0x944,
+ 0, 4, /* M */
+ 8, 2, /* N */
+ 24, 3, /* mux */
+ BIT(31),/* gate */
+ 0);
+
+static SUNXI_CCU_GATE(bus_spi0_clk, "bus-spi0", "ahb3", 0x96c, BIT(0), 0);
+static SUNXI_CCU_GATE(bus_spi1_clk, "bus-spi1", "ahb3", 0x96c, BIT(1), 0);
+
+static SUNXI_CCU_GATE(emac_25m_clk, "emac-25m", "ahb3", 0x970,
+ BIT(31) | BIT(30), 0);
+
+static SUNXI_CCU_GATE(bus_emac0_clk, "bus-emac0", "ahb3", 0x97c, BIT(0), 0);
+static SUNXI_CCU_GATE(bus_emac1_clk, "bus-emac1", "ahb3", 0x97c, BIT(1), 0);
+
+static const char * const ts_parents[] = { "osc24M", "pll-periph0" };
+static SUNXI_CCU_MP_WITH_MUX_GATE(ts_clk, "ts", ts_parents, 0x9b0,
+ 0, 4, /* M */
+ 8, 2, /* N */
+ 24, 1, /* mux */
+ BIT(31),/* gate */
+ 0);
+
+static SUNXI_CCU_GATE(bus_ts_clk, "bus-ts", "ahb3", 0x9bc, BIT(0), 0);
+
+static SUNXI_CCU_GATE(bus_ths_clk, "bus-ths", "apb1", 0x9fc, BIT(0), 0);
+
+static const char * const audio_parents[] = { "pll-audio-1x", "pll-audio-2x",
+ "pll-audio-4x", "pll-audio-hs" };
+static struct ccu_div spdif_clk = {
+ .enable = BIT(31),
+ .div = _SUNXI_CCU_DIV_FLAGS(8, 2, CLK_DIVIDER_POWER_OF_TWO),
+ .mux = _SUNXI_CCU_MUX(24, 2),
+ .common = {
+ .reg = 0xa20,
+ .hw.init = CLK_HW_INIT_PARENTS("spdif",
+ audio_parents,
+ &ccu_div_ops,
+ 0),
+ },
+};
+
+static SUNXI_CCU_GATE(bus_spdif_clk, "bus-spdif", "apb1", 0xa2c, BIT(0), 0);
+
+static struct ccu_div dmic_clk = {
+ .enable = BIT(31),
+ .div = _SUNXI_CCU_DIV_FLAGS(8, 2, CLK_DIVIDER_POWER_OF_TWO),
+ .mux = _SUNXI_CCU_MUX(24, 2),
+ .common = {
+ .reg = 0xa40,
+ .hw.init = CLK_HW_INIT_PARENTS("dmic",
+ audio_parents,
+ &ccu_div_ops,
+ 0),
+ },
+};
+
+static SUNXI_CCU_GATE(bus_dmic_clk, "bus-dmic", "apb1", 0xa4c, BIT(0), 0);
+
+static SUNXI_CCU_M_WITH_MUX_GATE(audio_codec_1x_clk, "audio-codec-1x",
+ audio_parents, 0xa50,
+ 0, 4, /* M */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+static SUNXI_CCU_M_WITH_MUX_GATE(audio_codec_4x_clk, "audio-codec-4x",
+ audio_parents, 0xa54,
+ 0, 4, /* M */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE(bus_audio_codec_clk, "bus-audio-codec", "apb1", 0xa5c,
+ BIT(0), 0);
+
+static struct ccu_div audio_hub_clk = {
+ .enable = BIT(31),
+ .div = _SUNXI_CCU_DIV_FLAGS(8, 2, CLK_DIVIDER_POWER_OF_TWO),
+ .mux = _SUNXI_CCU_MUX(24, 2),
+ .common = {
+ .reg = 0xa60,
+ .hw.init = CLK_HW_INIT_PARENTS("audio-hub",
+ audio_parents,
+ &ccu_div_ops,
+ 0),
+ },
+};
+
+static SUNXI_CCU_GATE(bus_audio_hub_clk, "bus-audio-hub", "apb1", 0xa6c, BIT(0), 0);
+
+/*
+ * There are OHCI 12M clock source selection bits for the four USB 2.0 ports.
+ * We will force them to 0 (12M divided from 48M).
+ */
+#define SUN50I_H616_USB0_CLK_REG 0xa70
+#define SUN50I_H616_USB1_CLK_REG 0xa74
+#define SUN50I_H616_USB2_CLK_REG 0xa78
+#define SUN50I_H616_USB3_CLK_REG 0xa7c
+
+static SUNXI_CCU_GATE(usb_ohci0_clk, "usb-ohci0", "osc12M", 0xa70, BIT(31), 0);
+static SUNXI_CCU_GATE(usb_phy0_clk, "usb-phy0", "osc24M", 0xa70, BIT(29), 0);
+
+static SUNXI_CCU_GATE(usb_ohci1_clk, "usb-ohci1", "osc12M", 0xa74, BIT(31), 0);
+static SUNXI_CCU_GATE(usb_phy1_clk, "usb-phy1", "osc24M", 0xa74, BIT(29), 0);
+
+static SUNXI_CCU_GATE(usb_ohci2_clk, "usb-ohci2", "osc12M", 0xa78, BIT(31), 0);
+static SUNXI_CCU_GATE(usb_phy2_clk, "usb-phy2", "osc24M", 0xa78, BIT(29), 0);
+
+static SUNXI_CCU_GATE(usb_ohci3_clk, "usb-ohci3", "osc12M", 0xa7c, BIT(31), 0);
+static SUNXI_CCU_GATE(usb_phy3_clk, "usb-phy3", "osc24M", 0xa7c, BIT(29), 0);
+
+static SUNXI_CCU_GATE(bus_ohci0_clk, "bus-ohci0", "ahb3", 0xa8c, BIT(0), 0);
+static SUNXI_CCU_GATE(bus_ohci1_clk, "bus-ohci1", "ahb3", 0xa8c, BIT(1), 0);
+static SUNXI_CCU_GATE(bus_ohci2_clk, "bus-ohci2", "ahb3", 0xa8c, BIT(2), 0);
+static SUNXI_CCU_GATE(bus_ohci3_clk, "bus-ohci3", "ahb3", 0xa8c, BIT(3), 0);
+static SUNXI_CCU_GATE(bus_ehci0_clk, "bus-ehci0", "ahb3", 0xa8c, BIT(4), 0);
+static SUNXI_CCU_GATE(bus_ehci1_clk, "bus-ehci1", "ahb3", 0xa8c, BIT(5), 0);
+static SUNXI_CCU_GATE(bus_ehci2_clk, "bus-ehci2", "ahb3", 0xa8c, BIT(6), 0);
+static SUNXI_CCU_GATE(bus_ehci3_clk, "bus-ehci3", "ahb3", 0xa8c, BIT(7), 0);
+static SUNXI_CCU_GATE(bus_otg_clk, "bus-otg", "ahb3", 0xa8c, BIT(8), 0);
+
+static SUNXI_CCU_GATE(bus_keyadc_clk, "bus-keyadc", "apb1", 0xa9c, BIT(0), 0);
+
+static const char * const hdmi_parents[] = { "pll-video0", "pll-video0-4x",
+ "pll-video2", "pll-video2-4x" };
+static SUNXI_CCU_M_WITH_MUX_GATE(hdmi_clk, "hdmi", hdmi_parents, 0xb00,
+ 0, 4, /* M */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE(hdmi_slow_clk, "hdmi-slow", "osc24M", 0xb04, BIT(31), 0);
+
+static const char * const hdmi_cec_parents[] = { "osc32k", "pll-periph0-2x" };
+static const struct ccu_mux_fixed_prediv hdmi_cec_predivs[] = {
+ { .index = 1, .div = 36621 },
+};
+
+#define SUN50I_H616_HDMI_CEC_CLK_REG 0xb10
+static struct ccu_mux hdmi_cec_clk = {
+ .enable = BIT(31) | BIT(30),
+
+ .mux = {
+ .shift = 24,
+ .width = 2,
+
+ .fixed_predivs = hdmi_cec_predivs,
+ .n_predivs = ARRAY_SIZE(hdmi_cec_predivs),
+ },
+
+ .common = {
+ .reg = 0xb10,
+ .features = CCU_FEATURE_FIXED_PREDIV,
+ .hw.init = CLK_HW_INIT_PARENTS("hdmi-cec",
+ hdmi_cec_parents,
+ &ccu_mux_ops,
+ 0),
+ },
+};
+
+static SUNXI_CCU_GATE(bus_hdmi_clk, "bus-hdmi", "ahb3", 0xb1c, BIT(0), 0);
+
+static SUNXI_CCU_GATE(bus_tcon_top_clk, "bus-tcon-top", "ahb3",
+ 0xb5c, BIT(0), 0);
+
+static const char * const tcon_tv_parents[] = { "pll-video0",
+ "pll-video0-4x",
+ "pll-video1",
+ "pll-video1-4x" };
+static SUNXI_CCU_MP_WITH_MUX_GATE(tcon_tv0_clk, "tcon-tv0",
+ tcon_tv_parents, 0xb80,
+ 0, 4, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+static SUNXI_CCU_MP_WITH_MUX_GATE(tcon_tv1_clk, "tcon-tv1",
+ tcon_tv_parents, 0xb84,
+ 0, 4, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE(bus_tcon_tv0_clk, "bus-tcon-tv0", "ahb3",
+ 0xb9c, BIT(0), 0);
+static SUNXI_CCU_GATE(bus_tcon_tv1_clk, "bus-tcon-tv1", "ahb3",
+ 0xb9c, BIT(1), 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(tve0_clk, "tve0",
+ tcon_tv_parents, 0xbb0,
+ 0, 4, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE(bus_tve_top_clk, "bus-tve-top", "ahb3",
+ 0xbbc, BIT(0), 0);
+static SUNXI_CCU_GATE(bus_tve0_clk, "bus-tve0", "ahb3",
+ 0xbbc, BIT(1), 0);
+
+static const char * const hdcp_parents[] = { "pll-periph0", "pll-periph1" };
+static SUNXI_CCU_M_WITH_MUX_GATE(hdcp_clk, "hdcp", hdcp_parents, 0xc40,
+ 0, 4, /* M */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE(bus_hdcp_clk, "bus-hdcp", "ahb3", 0xc4c, BIT(0), 0);
+
+/* Fixed factor clocks */
+static CLK_FIXED_FACTOR_FW_NAME(osc12M_clk, "osc12M", "hosc", 2, 1, 0);
+
+static const struct clk_hw *clk_parent_pll_audio[] = {
+ &pll_audio_hs_clk.common.hw
+};
+
+/*
+ * The divider of pll-audio is fixed to 24 for now, so 24576000 and 22579200
+ * rates can be set exactly in conjunction with sigma-delta modulation.
+ */
+static CLK_FIXED_FACTOR_HWS(pll_audio_1x_clk, "pll-audio-1x",
+ clk_parent_pll_audio,
+ 96, 1, CLK_SET_RATE_PARENT);
+static CLK_FIXED_FACTOR_HWS(pll_audio_2x_clk, "pll-audio-2x",
+ clk_parent_pll_audio,
+ 48, 1, CLK_SET_RATE_PARENT);
+static CLK_FIXED_FACTOR_HWS(pll_audio_4x_clk, "pll-audio-4x",
+ clk_parent_pll_audio,
+ 24, 1, CLK_SET_RATE_PARENT);
+
+static const struct clk_hw *pll_periph0_parents[] = {
+ &pll_periph0_clk.common.hw
+};
+
+static CLK_FIXED_FACTOR_HWS(pll_periph0_2x_clk, "pll-periph0-2x",
+ pll_periph0_parents,
+ 1, 2, 0);
+
+static const struct clk_hw *pll_periph1_parents[] = {
+ &pll_periph1_clk.common.hw
+};
+
+static CLK_FIXED_FACTOR_HWS(pll_periph1_2x_clk, "pll-periph1-2x",
+ pll_periph1_parents,
+ 1, 2, 0);
+
+static CLK_FIXED_FACTOR_HW(pll_video0_4x_clk, "pll-video0-4x",
+ &pll_video0_clk.common.hw,
+ 1, 4, CLK_SET_RATE_PARENT);
+static CLK_FIXED_FACTOR_HW(pll_video1_4x_clk, "pll-video1-4x",
+ &pll_video1_clk.common.hw,
+ 1, 4, CLK_SET_RATE_PARENT);
+static CLK_FIXED_FACTOR_HW(pll_video2_4x_clk, "pll-video2-4x",
+ &pll_video2_clk.common.hw,
+ 1, 4, CLK_SET_RATE_PARENT);
+
+static struct ccu_common *sun50i_h616_ccu_clks[] = {
+ &pll_cpux_clk.common,
+ &pll_ddr0_clk.common,
+ &pll_ddr1_clk.common,
+ &pll_periph0_clk.common,
+ &pll_periph1_clk.common,
+ &pll_gpu_clk.common,
+ &pll_video0_clk.common,
+ &pll_video1_clk.common,
+ &pll_video2_clk.common,
+ &pll_ve_clk.common,
+ &pll_de_clk.common,
+ &pll_audio_hs_clk.common,
+ &cpux_clk.common,
+ &axi_clk.common,
+ &cpux_apb_clk.common,
+ &psi_ahb1_ahb2_clk.common,
+ &ahb3_clk.common,
+ &apb1_clk.common,
+ &apb2_clk.common,
+ &mbus_clk.common,
+ &de_clk.common,
+ &bus_de_clk.common,
+ &deinterlace_clk.common,
+ &bus_deinterlace_clk.common,
+ &g2d_clk.common,
+ &bus_g2d_clk.common,
+ &gpu0_clk.common,
+ &bus_gpu_clk.common,
+ &gpu1_clk.common,
+ &ce_clk.common,
+ &bus_ce_clk.common,
+ &ve_clk.common,
+ &bus_ve_clk.common,
+ &bus_dma_clk.common,
+ &bus_hstimer_clk.common,
+ &avs_clk.common,
+ &bus_dbg_clk.common,
+ &bus_psi_clk.common,
+ &bus_pwm_clk.common,
+ &bus_iommu_clk.common,
+ &dram_clk.common,
+ &mbus_dma_clk.common,
+ &mbus_ve_clk.common,
+ &mbus_ce_clk.common,
+ &mbus_ts_clk.common,
+ &mbus_nand_clk.common,
+ &mbus_g2d_clk.common,
+ &bus_dram_clk.common,
+ &nand0_clk.common,
+ &nand1_clk.common,
+ &bus_nand_clk.common,
+ &mmc0_clk.common,
+ &mmc1_clk.common,
+ &mmc2_clk.common,
+ &bus_mmc0_clk.common,
+ &bus_mmc1_clk.common,
+ &bus_mmc2_clk.common,
+ &bus_uart0_clk.common,
+ &bus_uart1_clk.common,
+ &bus_uart2_clk.common,
+ &bus_uart3_clk.common,
+ &bus_uart4_clk.common,
+ &bus_uart5_clk.common,
+ &bus_i2c0_clk.common,
+ &bus_i2c1_clk.common,
+ &bus_i2c2_clk.common,
+ &bus_i2c3_clk.common,
+ &bus_i2c4_clk.common,
+ &spi0_clk.common,
+ &spi1_clk.common,
+ &bus_spi0_clk.common,
+ &bus_spi1_clk.common,
+ &emac_25m_clk.common,
+ &bus_emac0_clk.common,
+ &bus_emac1_clk.common,
+ &ts_clk.common,
+ &bus_ts_clk.common,
+ &bus_ths_clk.common,
+ &spdif_clk.common,
+ &bus_spdif_clk.common,
+ &dmic_clk.common,
+ &bus_dmic_clk.common,
+ &audio_codec_1x_clk.common,
+ &audio_codec_4x_clk.common,
+ &bus_audio_codec_clk.common,
+ &audio_hub_clk.common,
+ &bus_audio_hub_clk.common,
+ &usb_ohci0_clk.common,
+ &usb_phy0_clk.common,
+ &usb_ohci1_clk.common,
+ &usb_phy1_clk.common,
+ &usb_ohci2_clk.common,
+ &usb_phy2_clk.common,
+ &usb_ohci3_clk.common,
+ &usb_phy3_clk.common,
+ &bus_ohci0_clk.common,
+ &bus_ohci1_clk.common,
+ &bus_ohci2_clk.common,
+ &bus_ohci3_clk.common,
+ &bus_ehci0_clk.common,
+ &bus_ehci1_clk.common,
+ &bus_ehci2_clk.common,
+ &bus_ehci3_clk.common,
+ &bus_otg_clk.common,
+ &bus_keyadc_clk.common,
+ &hdmi_clk.common,
+ &hdmi_slow_clk.common,
+ &hdmi_cec_clk.common,
+ &bus_hdmi_clk.common,
+ &bus_tcon_top_clk.common,
+ &tcon_tv0_clk.common,
+ &tcon_tv1_clk.common,
+ &bus_tcon_tv0_clk.common,
+ &bus_tcon_tv1_clk.common,
+ &tve0_clk.common,
+ &bus_tve_top_clk.common,
+ &bus_tve0_clk.common,
+ &hdcp_clk.common,
+ &bus_hdcp_clk.common,
+};
+
+static struct clk_hw_onecell_data sun50i_h616_hw_clks = {
+ .hws = {
+ [CLK_OSC12M] = &osc12M_clk.hw,
+ [CLK_PLL_CPUX] = &pll_cpux_clk.common.hw,
+ [CLK_PLL_DDR0] = &pll_ddr0_clk.common.hw,
+ [CLK_PLL_DDR1] = &pll_ddr1_clk.common.hw,
+ [CLK_PLL_PERIPH0] = &pll_periph0_clk.common.hw,
+ [CLK_PLL_PERIPH0_2X] = &pll_periph0_2x_clk.hw,
+ [CLK_PLL_PERIPH1] = &pll_periph1_clk.common.hw,
+ [CLK_PLL_PERIPH1_2X] = &pll_periph1_2x_clk.hw,
+ [CLK_PLL_GPU] = &pll_gpu_clk.common.hw,
+ [CLK_PLL_VIDEO0] = &pll_video0_clk.common.hw,
+ [CLK_PLL_VIDEO0_4X] = &pll_video0_4x_clk.hw,
+ [CLK_PLL_VIDEO1] = &pll_video1_clk.common.hw,
+ [CLK_PLL_VIDEO1_4X] = &pll_video1_4x_clk.hw,
+ [CLK_PLL_VIDEO2] = &pll_video2_clk.common.hw,
+ [CLK_PLL_VIDEO2_4X] = &pll_video2_4x_clk.hw,
+ [CLK_PLL_VE] = &pll_ve_clk.common.hw,
+ [CLK_PLL_DE] = &pll_de_clk.common.hw,
+ [CLK_PLL_AUDIO_HS] = &pll_audio_hs_clk.common.hw,
+ [CLK_PLL_AUDIO_1X] = &pll_audio_1x_clk.hw,
+ [CLK_PLL_AUDIO_2X] = &pll_audio_2x_clk.hw,
+ [CLK_PLL_AUDIO_4X] = &pll_audio_4x_clk.hw,
+ [CLK_CPUX] = &cpux_clk.common.hw,
+ [CLK_AXI] = &axi_clk.common.hw,
+ [CLK_CPUX_APB] = &cpux_apb_clk.common.hw,
+ [CLK_PSI_AHB1_AHB2] = &psi_ahb1_ahb2_clk.common.hw,
+ [CLK_AHB3] = &ahb3_clk.common.hw,
+ [CLK_APB1] = &apb1_clk.common.hw,
+ [CLK_APB2] = &apb2_clk.common.hw,
+ [CLK_MBUS] = &mbus_clk.common.hw,
+ [CLK_DE] = &de_clk.common.hw,
+ [CLK_BUS_DE] = &bus_de_clk.common.hw,
+ [CLK_DEINTERLACE] = &deinterlace_clk.common.hw,
+ [CLK_BUS_DEINTERLACE] = &bus_deinterlace_clk.common.hw,
+ [CLK_G2D] = &g2d_clk.common.hw,
+ [CLK_BUS_G2D] = &bus_g2d_clk.common.hw,
+ [CLK_GPU0] = &gpu0_clk.common.hw,
+ [CLK_BUS_GPU] = &bus_gpu_clk.common.hw,
+ [CLK_GPU1] = &gpu1_clk.common.hw,
+ [CLK_CE] = &ce_clk.common.hw,
+ [CLK_BUS_CE] = &bus_ce_clk.common.hw,
+ [CLK_VE] = &ve_clk.common.hw,
+ [CLK_BUS_VE] = &bus_ve_clk.common.hw,
+ [CLK_BUS_DMA] = &bus_dma_clk.common.hw,
+ [CLK_BUS_HSTIMER] = &bus_hstimer_clk.common.hw,
+ [CLK_AVS] = &avs_clk.common.hw,
+ [CLK_BUS_DBG] = &bus_dbg_clk.common.hw,
+ [CLK_BUS_PSI] = &bus_psi_clk.common.hw,
+ [CLK_BUS_PWM] = &bus_pwm_clk.common.hw,
+ [CLK_BUS_IOMMU] = &bus_iommu_clk.common.hw,
+ [CLK_DRAM] = &dram_clk.common.hw,
+ [CLK_MBUS_DMA] = &mbus_dma_clk.common.hw,
+ [CLK_MBUS_VE] = &mbus_ve_clk.common.hw,
+ [CLK_MBUS_CE] = &mbus_ce_clk.common.hw,
+ [CLK_MBUS_TS] = &mbus_ts_clk.common.hw,
+ [CLK_MBUS_NAND] = &mbus_nand_clk.common.hw,
+ [CLK_MBUS_G2D] = &mbus_g2d_clk.common.hw,
+ [CLK_BUS_DRAM] = &bus_dram_clk.common.hw,
+ [CLK_NAND0] = &nand0_clk.common.hw,
+ [CLK_NAND1] = &nand1_clk.common.hw,
+ [CLK_BUS_NAND] = &bus_nand_clk.common.hw,
+ [CLK_MMC0] = &mmc0_clk.common.hw,
+ [CLK_MMC1] = &mmc1_clk.common.hw,
+ [CLK_MMC2] = &mmc2_clk.common.hw,
+ [CLK_BUS_MMC0] = &bus_mmc0_clk.common.hw,
+ [CLK_BUS_MMC1] = &bus_mmc1_clk.common.hw,
+ [CLK_BUS_MMC2] = &bus_mmc2_clk.common.hw,
+ [CLK_BUS_UART0] = &bus_uart0_clk.common.hw,
+ [CLK_BUS_UART1] = &bus_uart1_clk.common.hw,
+ [CLK_BUS_UART2] = &bus_uart2_clk.common.hw,
+ [CLK_BUS_UART3] = &bus_uart3_clk.common.hw,
+ [CLK_BUS_UART4] = &bus_uart4_clk.common.hw,
+ [CLK_BUS_UART5] = &bus_uart5_clk.common.hw,
+ [CLK_BUS_I2C0] = &bus_i2c0_clk.common.hw,
+ [CLK_BUS_I2C1] = &bus_i2c1_clk.common.hw,
+ [CLK_BUS_I2C2] = &bus_i2c2_clk.common.hw,
+ [CLK_BUS_I2C3] = &bus_i2c3_clk.common.hw,
+ [CLK_BUS_I2C4] = &bus_i2c4_clk.common.hw,
+ [CLK_SPI0] = &spi0_clk.common.hw,
+ [CLK_SPI1] = &spi1_clk.common.hw,
+ [CLK_BUS_SPI0] = &bus_spi0_clk.common.hw,
+ [CLK_BUS_SPI1] = &bus_spi1_clk.common.hw,
+ [CLK_EMAC_25M] = &emac_25m_clk.common.hw,
+ [CLK_BUS_EMAC0] = &bus_emac0_clk.common.hw,
+ [CLK_BUS_EMAC1] = &bus_emac1_clk.common.hw,
+ [CLK_TS] = &ts_clk.common.hw,
+ [CLK_BUS_TS] = &bus_ts_clk.common.hw,
+ [CLK_BUS_THS] = &bus_ths_clk.common.hw,
+ [CLK_SPDIF] = &spdif_clk.common.hw,
+ [CLK_BUS_SPDIF] = &bus_spdif_clk.common.hw,
+ [CLK_DMIC] = &dmic_clk.common.hw,
+ [CLK_BUS_DMIC] = &bus_dmic_clk.common.hw,
+ [CLK_AUDIO_CODEC_1X] = &audio_codec_1x_clk.common.hw,
+ [CLK_AUDIO_CODEC_4X] = &audio_codec_4x_clk.common.hw,
+ [CLK_BUS_AUDIO_CODEC] = &bus_audio_codec_clk.common.hw,
+ [CLK_AUDIO_HUB] = &audio_hub_clk.common.hw,
+ [CLK_BUS_AUDIO_HUB] = &bus_audio_hub_clk.common.hw,
+ [CLK_USB_OHCI0] = &usb_ohci0_clk.common.hw,
+ [CLK_USB_PHY0] = &usb_phy0_clk.common.hw,
+ [CLK_USB_OHCI1] = &usb_ohci1_clk.common.hw,
+ [CLK_USB_PHY1] = &usb_phy1_clk.common.hw,
+ [CLK_USB_OHCI2] = &usb_ohci2_clk.common.hw,
+ [CLK_USB_PHY2] = &usb_phy2_clk.common.hw,
+ [CLK_USB_OHCI3] = &usb_ohci3_clk.common.hw,
+ [CLK_USB_PHY3] = &usb_phy3_clk.common.hw,
+ [CLK_BUS_OHCI0] = &bus_ohci0_clk.common.hw,
+ [CLK_BUS_OHCI1] = &bus_ohci1_clk.common.hw,
+ [CLK_BUS_OHCI2] = &bus_ohci2_clk.common.hw,
+ [CLK_BUS_OHCI3] = &bus_ohci3_clk.common.hw,
+ [CLK_BUS_EHCI0] = &bus_ehci0_clk.common.hw,
+ [CLK_BUS_EHCI1] = &bus_ehci1_clk.common.hw,
+ [CLK_BUS_EHCI2] = &bus_ehci2_clk.common.hw,
+ [CLK_BUS_EHCI3] = &bus_ehci3_clk.common.hw,
+ [CLK_BUS_OTG] = &bus_otg_clk.common.hw,
+ [CLK_BUS_KEYADC] = &bus_keyadc_clk.common.hw,
+ [CLK_HDMI] = &hdmi_clk.common.hw,
+ [CLK_HDMI_SLOW] = &hdmi_slow_clk.common.hw,
+ [CLK_HDMI_CEC] = &hdmi_cec_clk.common.hw,
+ [CLK_BUS_HDMI] = &bus_hdmi_clk.common.hw,
+ [CLK_BUS_TCON_TOP] = &bus_tcon_top_clk.common.hw,
+ [CLK_TCON_TV0] = &tcon_tv0_clk.common.hw,
+ [CLK_TCON_TV1] = &tcon_tv1_clk.common.hw,
+ [CLK_BUS_TCON_TV0] = &bus_tcon_tv0_clk.common.hw,
+ [CLK_BUS_TCON_TV1] = &bus_tcon_tv1_clk.common.hw,
+ [CLK_TVE0] = &tve0_clk.common.hw,
+ [CLK_BUS_TVE_TOP] = &bus_tve_top_clk.common.hw,
+ [CLK_BUS_TVE0] = &bus_tve0_clk.common.hw,
+ [CLK_HDCP] = &hdcp_clk.common.hw,
+ [CLK_BUS_HDCP] = &bus_hdcp_clk.common.hw,
+ },
+ .num = CLK_NUMBER,
+};
+
+static struct ccu_reset_map sun50i_h616_ccu_resets[] = {
+ [RST_MBUS] = { 0x540, BIT(30) },
+
+ [RST_BUS_DE] = { 0x60c, BIT(16) },
+ [RST_BUS_DEINTERLACE] = { 0x62c, BIT(16) },
+ [RST_BUS_GPU] = { 0x67c, BIT(16) },
+ [RST_BUS_CE] = { 0x68c, BIT(16) },
+ [RST_BUS_VE] = { 0x69c, BIT(16) },
+ [RST_BUS_DMA] = { 0x70c, BIT(16) },
+ [RST_BUS_HSTIMER] = { 0x73c, BIT(16) },
+ [RST_BUS_DBG] = { 0x78c, BIT(16) },
+ [RST_BUS_PSI] = { 0x79c, BIT(16) },
+ [RST_BUS_PWM] = { 0x7ac, BIT(16) },
+ [RST_BUS_IOMMU] = { 0x7bc, BIT(16) },
+ [RST_BUS_DRAM] = { 0x80c, BIT(16) },
+ [RST_BUS_NAND] = { 0x82c, BIT(16) },
+ [RST_BUS_MMC0] = { 0x84c, BIT(16) },
+ [RST_BUS_MMC1] = { 0x84c, BIT(17) },
+ [RST_BUS_MMC2] = { 0x84c, BIT(18) },
+ [RST_BUS_UART0] = { 0x90c, BIT(16) },
+ [RST_BUS_UART1] = { 0x90c, BIT(17) },
+ [RST_BUS_UART2] = { 0x90c, BIT(18) },
+ [RST_BUS_UART3] = { 0x90c, BIT(19) },
+ [RST_BUS_UART4] = { 0x90c, BIT(20) },
+ [RST_BUS_UART5] = { 0x90c, BIT(21) },
+ [RST_BUS_I2C0] = { 0x91c, BIT(16) },
+ [RST_BUS_I2C1] = { 0x91c, BIT(17) },
+ [RST_BUS_I2C2] = { 0x91c, BIT(18) },
+ [RST_BUS_I2C3] = { 0x91c, BIT(19) },
+ [RST_BUS_I2C4] = { 0x91c, BIT(20) },
+ [RST_BUS_SPI0] = { 0x96c, BIT(16) },
+ [RST_BUS_SPI1] = { 0x96c, BIT(17) },
+ [RST_BUS_EMAC0] = { 0x97c, BIT(16) },
+ [RST_BUS_EMAC1] = { 0x97c, BIT(17) },
+ [RST_BUS_TS] = { 0x9bc, BIT(16) },
+ [RST_BUS_THS] = { 0x9fc, BIT(16) },
+ [RST_BUS_SPDIF] = { 0xa2c, BIT(16) },
+ [RST_BUS_DMIC] = { 0xa4c, BIT(16) },
+ [RST_BUS_AUDIO_CODEC] = { 0xa5c, BIT(16) },
+ [RST_BUS_AUDIO_HUB] = { 0xa6c, BIT(16) },
+
+ [RST_USB_PHY0] = { 0xa70, BIT(30) },
+ [RST_USB_PHY1] = { 0xa74, BIT(30) },
+ [RST_USB_PHY2] = { 0xa78, BIT(30) },
+ [RST_USB_PHY3] = { 0xa7c, BIT(30) },
+ [RST_BUS_OHCI0] = { 0xa8c, BIT(16) },
+ [RST_BUS_OHCI1] = { 0xa8c, BIT(17) },
+ [RST_BUS_OHCI2] = { 0xa8c, BIT(18) },
+ [RST_BUS_OHCI3] = { 0xa8c, BIT(19) },
+ [RST_BUS_EHCI0] = { 0xa8c, BIT(20) },
+ [RST_BUS_EHCI1] = { 0xa8c, BIT(21) },
+ [RST_BUS_EHCI2] = { 0xa8c, BIT(22) },
+ [RST_BUS_EHCI3] = { 0xa8c, BIT(23) },
+ [RST_BUS_OTG] = { 0xa8c, BIT(24) },
+ [RST_BUS_KEYADC] = { 0xa9c, BIT(16) },
+
+ [RST_BUS_HDMI] = { 0xb1c, BIT(16) },
+ [RST_BUS_HDMI_SUB] = { 0xb1c, BIT(17) },
+ [RST_BUS_TCON_TOP] = { 0xb5c, BIT(16) },
+ [RST_BUS_TCON_TV0] = { 0xb9c, BIT(16) },
+ [RST_BUS_TCON_TV1] = { 0xb9c, BIT(17) },
+ [RST_BUS_TVE_TOP] = { 0xbbc, BIT(16) },
+ [RST_BUS_TVE0] = { 0xbbc, BIT(17) },
+ [RST_BUS_HDCP] = { 0xc4c, BIT(16) },
+};
+
+static const struct sunxi_ccu_desc sun50i_h616_ccu_desc = {
+ .ccu_clks = sun50i_h616_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun50i_h616_ccu_clks),
+
+ .hw_clks = &sun50i_h616_hw_clks,
+
+ .resets = sun50i_h616_ccu_resets,
+ .num_resets = ARRAY_SIZE(sun50i_h616_ccu_resets),
+};
+
+static const u32 pll_regs[] = {
+ SUN50I_H616_PLL_CPUX_REG,
+ SUN50I_H616_PLL_DDR0_REG,
+ SUN50I_H616_PLL_DDR1_REG,
+ SUN50I_H616_PLL_PERIPH0_REG,
+ SUN50I_H616_PLL_PERIPH1_REG,
+ SUN50I_H616_PLL_GPU_REG,
+ SUN50I_H616_PLL_VIDEO0_REG,
+ SUN50I_H616_PLL_VIDEO1_REG,
+ SUN50I_H616_PLL_VIDEO2_REG,
+ SUN50I_H616_PLL_VE_REG,
+ SUN50I_H616_PLL_DE_REG,
+ SUN50I_H616_PLL_AUDIO_REG,
+};
+
+static const u32 pll_video_regs[] = {
+ SUN50I_H616_PLL_VIDEO0_REG,
+ SUN50I_H616_PLL_VIDEO1_REG,
+ SUN50I_H616_PLL_VIDEO2_REG,
+};
+
+static const u32 usb2_clk_regs[] = {
+ SUN50I_H616_USB0_CLK_REG,
+ SUN50I_H616_USB1_CLK_REG,
+ SUN50I_H616_USB2_CLK_REG,
+ SUN50I_H616_USB3_CLK_REG,
+};
+
+static void __init sun50i_h616_ccu_setup(struct device_node *node)
+{
+ void __iomem *reg;
+ u32 val;
+ int i;
+
+ reg = of_io_request_and_map(node, 0, of_node_full_name(node));
+ if (IS_ERR(reg)) {
+ pr_err("%pOF: Could not map clock registers\n", node);
+ return;
+ }
+
+ /* Enable the lock bits and the output enable bits on all PLLs */
+ for (i = 0; i < ARRAY_SIZE(pll_regs); i++) {
+ val = readl(reg + pll_regs[i]);
+ val |= BIT(29) | BIT(27);
+ writel(val, reg + pll_regs[i]);
+ }
+
+ /*
+ * Force the output divider of video PLLs to 0.
+ *
+ * See the comment before pll-video0 definition for the reason.
+ */
+ for (i = 0; i < ARRAY_SIZE(pll_video_regs); i++) {
+ val = readl(reg + pll_video_regs[i]);
+ val &= ~BIT(0);
+ writel(val, reg + pll_video_regs[i]);
+ }
+
+ /*
+ * Force OHCI 12M clock sources to 00 (12MHz divided from 48MHz)
+ *
+ * This clock mux is still mysterious, and the code just enforces
+ * it to have a valid clock parent.
+ */
+ for (i = 0; i < ARRAY_SIZE(usb2_clk_regs); i++) {
+ val = readl(reg + usb2_clk_regs[i]);
+ val &= ~GENMASK(25, 24);
+ writel(val, reg + usb2_clk_regs[i]);
+ }
+
+ /*
+ * Force the post-divider of pll-audio to 12 and the output divider
+ * of it to 2, so 24576000 and 22579200 rates can be set exactly.
+ */
+ val = readl(reg + SUN50I_H616_PLL_AUDIO_REG);
+ val &= ~(GENMASK(21, 16) | BIT(0));
+ writel(val | (11 << 16) | BIT(0), reg + SUN50I_H616_PLL_AUDIO_REG);
+
+ /*
+ * First clock parent (osc32K) is unusable for CEC. But since there
+ * is no good way to force parent switch (both run with same frequency),
+ * just set second clock parent here.
+ */
+ val = readl(reg + SUN50I_H616_HDMI_CEC_CLK_REG);
+ val |= BIT(24);
+ writel(val, reg + SUN50I_H616_HDMI_CEC_CLK_REG);
+
+ i = sunxi_ccu_probe(node, reg, &sun50i_h616_ccu_desc);
+ if (i)
+ pr_err("%pOF: probing clocks fails: %d\n", node, i);
+}
+
+CLK_OF_DECLARE(sun50i_h616_ccu, "allwinner,sun50i-h616-ccu",
+ sun50i_h616_ccu_setup);
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h616.h b/drivers/clk/sunxi-ng/ccu-sun50i-h616.h
new file mode 100644
index 000000000000..dd671b413f22
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h616.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2020 Arm Ltd.
+ */
+
+#ifndef _CCU_SUN50I_H616_H_
+#define _CCU_SUN50I_H616_H_
+
+#include <dt-bindings/clock/sun50i-h616-ccu.h>
+#include <dt-bindings/reset/sun50i-h616-ccu.h>
+
+#define CLK_OSC12M 0
+#define CLK_PLL_CPUX 1
+#define CLK_PLL_DDR0 2
+#define CLK_PLL_DDR1 3
+
+/* PLL_PERIPH0 exported for PRCM */
+
+#define CLK_PLL_PERIPH0_2X 5
+#define CLK_PLL_PERIPH1 6
+#define CLK_PLL_PERIPH1_2X 7
+#define CLK_PLL_GPU 8
+#define CLK_PLL_VIDEO0 9
+#define CLK_PLL_VIDEO0_4X 10
+#define CLK_PLL_VIDEO1 11
+#define CLK_PLL_VIDEO1_4X 12
+#define CLK_PLL_VIDEO2 13
+#define CLK_PLL_VIDEO2_4X 14
+#define CLK_PLL_VE 15
+#define CLK_PLL_DE 16
+#define CLK_PLL_AUDIO_HS 17
+#define CLK_PLL_AUDIO_1X 18
+#define CLK_PLL_AUDIO_2X 19
+#define CLK_PLL_AUDIO_4X 20
+
+/* CPUX clock exported for DVFS */
+
+#define CLK_AXI 22
+#define CLK_CPUX_APB 23
+#define CLK_PSI_AHB1_AHB2 24
+#define CLK_AHB3 25
+
+/* APB1 clock exported for PIO */
+
+#define CLK_APB2 27
+#define CLK_MBUS 28
+
+/* All module clocks and bus gates are exported except DRAM */
+
+#define CLK_DRAM 49
+
+#define CLK_BUS_DRAM 56
+
+#define CLK_NUMBER (CLK_BUS_HDCP + 1)
+
+#endif /* _CCU_SUN50I_H616_H_ */
diff --git a/drivers/clk/sunxi-ng/ccu_mp.c b/drivers/clk/sunxi-ng/ccu_mp.c
index fa4ecb915590..9d3a76604d94 100644
--- a/drivers/clk/sunxi-ng/ccu_mp.c
+++ b/drivers/clk/sunxi-ng/ccu_mp.c
@@ -108,7 +108,7 @@ static unsigned long ccu_mp_round_rate(struct ccu_mux_internal *mux,
max_m = cmp->m.max ?: 1 << cmp->m.width;
max_p = cmp->p.max ?: 1 << ((1 << cmp->p.width) - 1);
- if (!(clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)) {
+ if (!clk_hw_can_set_rate_parent(&cmp->common.hw)) {
ccu_mp_find_best(*parent_rate, rate, max_m, max_p, &m, &p);
rate = *parent_rate / p / m;
} else {
diff --git a/drivers/clk/sunxi/clk-a10-ve.c b/drivers/clk/sunxi/clk-a10-ve.c
index cb5daa4b37db..65810937a13a 100644
--- a/drivers/clk/sunxi/clk-a10-ve.c
+++ b/drivers/clk/sunxi/clk-a10-ve.c
@@ -20,7 +20,7 @@ static DEFINE_SPINLOCK(ve_lock);
#define SUN4I_VE_DIVIDER_WIDTH 3
#define SUN4I_VE_RESET 0
-/**
+/*
* sunxi_ve_reset... - reset bit in ve clk registers handling
*/
diff --git a/drivers/clk/sunxi/clk-mod0.c b/drivers/clk/sunxi/clk-mod0.c
index 0cca91e075a5..f9d715ec9908 100644
--- a/drivers/clk/sunxi/clk-mod0.c
+++ b/drivers/clk/sunxi/clk-mod0.c
@@ -14,7 +14,7 @@
#include "clk-factors.h"
-/**
+/*
* sun4i_a10_get_mod0_factors() - calculates m, n factors for MOD0-style clocks
* MOD0 rate is calculated as follows
* rate = (parent_rate >> p) / (m + 1);
diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c
index e1aa1fbac48a..5fe7049ea693 100644
--- a/drivers/clk/sunxi/clk-sunxi.c
+++ b/drivers/clk/sunxi/clk-sunxi.c
@@ -23,7 +23,7 @@ static DEFINE_SPINLOCK(clk_lock);
/* Maximum number of parents our clocks have */
#define SUNXI_MAX_PARENTS 5
-/**
+/*
* sun4i_get_pll1_factors() - calculates n, k, m, p factors for PLL1
* PLL1 rate is calculated as follows
* rate = (parent_rate * n * (k + 1) >> p) / (m + 1);
@@ -71,7 +71,7 @@ static void sun4i_get_pll1_factors(struct factors_request *req)
req->n = div / 4;
}
-/**
+/*
* sun6i_a31_get_pll1_factors() - calculates n, k and m factors for PLL1
* PLL1 rate is calculated as follows
* rate = parent_rate * (n + 1) * (k + 1) / (m + 1);
@@ -147,7 +147,7 @@ static void sun6i_a31_get_pll1_factors(struct factors_request *req)
}
}
-/**
+/*
* sun8i_a23_get_pll1_factors() - calculates n, k, m, p factors for PLL1
* PLL1 rate is calculated as follows
* rate = (parent_rate * (n + 1) * (k + 1) >> p) / (m + 1);
@@ -191,7 +191,7 @@ static void sun8i_a23_get_pll1_factors(struct factors_request *req)
req->n = div / 4 - 1;
}
-/**
+/*
* sun4i_get_pll5_factors() - calculates n, k factors for PLL5
* PLL5 rate is calculated as follows
* rate = parent_rate * n * (k + 1)
@@ -218,7 +218,7 @@ static void sun4i_get_pll5_factors(struct factors_request *req)
req->n = DIV_ROUND_UP(div, (req->k + 1));
}
-/**
+/*
* sun6i_a31_get_pll6_factors() - calculates n, k factors for A31 PLL6x2
* PLL6x2 rate is calculated as follows
* rate = parent_rate * (n + 1) * (k + 1)
@@ -240,7 +240,7 @@ static void sun6i_a31_get_pll6_factors(struct factors_request *req)
req->n = DIV_ROUND_UP(div, (req->k + 1)) - 1;
}
-/**
+/*
* sun5i_a13_get_ahb_factors() - calculates m, p factors for AHB
* AHB rate is calculated as follows
* rate = parent_rate >> p
@@ -276,7 +276,7 @@ static void sun5i_a13_get_ahb_factors(struct factors_request *req)
#define SUN6I_AHB1_PARENT_PLL6 3
-/**
+/*
* sun6i_a31_get_ahb_factors() - calculates m, p factors for AHB
* AHB rate is calculated as follows
* rate = parent_rate >> p
@@ -320,7 +320,7 @@ static void sun6i_get_ahb1_factors(struct factors_request *req)
req->m = calcm - 1;
}
-/**
+/*
* sun6i_ahb1_recalc() - calculates AHB clock rate from m, p factors and
* parent index
*/
@@ -336,7 +336,7 @@ static void sun6i_ahb1_recalc(struct factors_request *req)
req->rate >>= req->p;
}
-/**
+/*
* sun4i_get_apb1_factors() - calculates m, p factors for APB1
* APB1 rate is calculated as follows
* rate = (parent_rate >> p) / (m + 1);
@@ -375,7 +375,7 @@ static void sun4i_get_apb1_factors(struct factors_request *req)
-/**
+/*
* sun7i_a20_get_out_factors() - calculates m, p factors for CLK_OUT_A/B
* CLK_OUT rate is calculated as follows
* rate = (parent_rate >> p) / (m + 1);
@@ -408,7 +408,7 @@ static void sun7i_a20_get_out_factors(struct factors_request *req)
req->p = calcp;
}
-/**
+/*
* sunxi_factors_clk_setup() - Setup function for factor clocks
*/
@@ -625,7 +625,7 @@ CLK_OF_DECLARE(sun7i_out, "allwinner,sun7i-a20-out-clk",
sun7i_out_clk_setup);
-/**
+/*
* sunxi_mux_clk_setup() - Setup function for muxes
*/
@@ -717,7 +717,7 @@ CLK_OF_DECLARE(sun8i_ahb2, "allwinner,sun8i-h3-ahb2-clk",
sun8i_ahb2_clk_setup);
-/**
+/*
* sunxi_divider_clk_setup() - Setup function for simple divider clocks
*/
@@ -853,7 +853,7 @@ CLK_OF_DECLARE(sun8i_axi, "allwinner,sun8i-a23-axi-clk",
-/**
+/*
* sunxi_gates_clk_setup() - Setup function for leaf gates on clocks
*/
@@ -863,7 +863,7 @@ struct gates_data {
DECLARE_BITMAP(mask, SUNXI_GATES_MAX_SIZE);
};
-/**
+/*
* sunxi_divs_clk_setup() helper data
*/
@@ -929,7 +929,7 @@ static const struct divs_data sun6i_a31_pll6_divs_data __initconst = {
}
};
-/**
+/*
* sunxi_divs_clk_setup() - Setup function for leaf divisors on clocks
*
* These clocks look something like this
diff --git a/drivers/clk/tegra/Kconfig b/drivers/clk/tegra/Kconfig
index deaa4605824c..90df619dc087 100644
--- a/drivers/clk/tegra/Kconfig
+++ b/drivers/clk/tegra/Kconfig
@@ -7,3 +7,6 @@ config TEGRA_CLK_DFLL
depends on ARCH_TEGRA_124_SOC || ARCH_TEGRA_210_SOC
select PM_OPP
def_bool y
+
+config TEGRA124_CLK_EMC
+ bool
diff --git a/drivers/clk/tegra/Makefile b/drivers/clk/tegra/Makefile
index eec2313fd37e..7b1816856eb5 100644
--- a/drivers/clk/tegra/Makefile
+++ b/drivers/clk/tegra/Makefile
@@ -22,7 +22,7 @@ obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += clk-tegra20-emc.o
obj-$(CONFIG_ARCH_TEGRA_114_SOC) += clk-tegra114.o
obj-$(CONFIG_ARCH_TEGRA_124_SOC) += clk-tegra124.o
obj-$(CONFIG_TEGRA_CLK_DFLL) += clk-tegra124-dfll-fcpu.o
-obj-$(CONFIG_TEGRA124_EMC) += clk-tegra124-emc.o
+obj-$(CONFIG_TEGRA124_CLK_EMC) += clk-tegra124-emc.o
obj-$(CONFIG_ARCH_TEGRA_132_SOC) += clk-tegra124.o
obj-y += cvb.o
obj-$(CONFIG_ARCH_TEGRA_210_SOC) += clk-tegra210.o
diff --git a/drivers/clk/tegra/clk-tegra124-emc.c b/drivers/clk/tegra/clk-tegra124-emc.c
index 745f9faa98d8..bdf6f4a51617 100644
--- a/drivers/clk/tegra/clk-tegra124-emc.c
+++ b/drivers/clk/tegra/clk-tegra124-emc.c
@@ -11,7 +11,9 @@
#include <linux/clk-provider.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
+#include <linux/clk/tegra.h>
#include <linux/delay.h>
+#include <linux/export.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_address.h>
@@ -21,7 +23,6 @@
#include <linux/string.h>
#include <soc/tegra/fuse.h>
-#include <soc/tegra/emc.h>
#include "clk.h"
@@ -80,6 +81,9 @@ struct tegra_clk_emc {
int num_timings;
struct emc_timing *timings;
spinlock_t *lock;
+
+ tegra124_emc_prepare_timing_change_cb *prepare_timing_change;
+ tegra124_emc_complete_timing_change_cb *complete_timing_change;
};
/* Common clock framework callback implementations */
@@ -176,6 +180,9 @@ static struct tegra_emc *emc_ensure_emc_driver(struct tegra_clk_emc *tegra)
if (tegra->emc)
return tegra->emc;
+ if (!tegra->prepare_timing_change || !tegra->complete_timing_change)
+ return NULL;
+
if (!tegra->emc_node)
return NULL;
@@ -241,7 +248,7 @@ static int emc_set_timing(struct tegra_clk_emc *tegra,
div = timing->parent_rate / (timing->rate / 2) - 2;
- err = tegra_emc_prepare_timing_change(emc, timing->rate);
+ err = tegra->prepare_timing_change(emc, timing->rate);
if (err)
return err;
@@ -259,7 +266,7 @@ static int emc_set_timing(struct tegra_clk_emc *tegra,
spin_unlock_irqrestore(tegra->lock, flags);
- tegra_emc_complete_timing_change(emc, timing->rate);
+ tegra->complete_timing_change(emc, timing->rate);
clk_hw_reparent(&tegra->hw, __clk_get_hw(timing->parent));
clk_disable_unprepare(tegra->prev_parent);
@@ -473,8 +480,8 @@ static const struct clk_ops tegra_clk_emc_ops = {
.get_parent = emc_get_parent,
};
-struct clk *tegra_clk_register_emc(void __iomem *base, struct device_node *np,
- spinlock_t *lock)
+struct clk *tegra124_clk_register_emc(void __iomem *base, struct device_node *np,
+ spinlock_t *lock)
{
struct tegra_clk_emc *tegra;
struct clk_init_data init;
@@ -538,3 +545,27 @@ struct clk *tegra_clk_register_emc(void __iomem *base, struct device_node *np,
return clk;
};
+
+void tegra124_clk_set_emc_callbacks(tegra124_emc_prepare_timing_change_cb *prep_cb,
+ tegra124_emc_complete_timing_change_cb *complete_cb)
+{
+ struct clk *clk = __clk_lookup("emc");
+ struct tegra_clk_emc *tegra;
+ struct clk_hw *hw;
+
+ if (clk) {
+ hw = __clk_get_hw(clk);
+ tegra = container_of(hw, struct tegra_clk_emc, hw);
+
+ tegra->prepare_timing_change = prep_cb;
+ tegra->complete_timing_change = complete_cb;
+ }
+}
+EXPORT_SYMBOL_GPL(tegra124_clk_set_emc_callbacks);
+
+bool tegra124_clk_emc_driver_available(struct clk_hw *hw)
+{
+ struct tegra_clk_emc *tegra = container_of(hw, struct tegra_clk_emc, hw);
+
+ return tegra->prepare_timing_change && tegra->complete_timing_change;
+}
diff --git a/drivers/clk/tegra/clk-tegra124.c b/drivers/clk/tegra/clk-tegra124.c
index e931319dcc9d..934520aab6e3 100644
--- a/drivers/clk/tegra/clk-tegra124.c
+++ b/drivers/clk/tegra/clk-tegra124.c
@@ -1500,6 +1500,26 @@ static void __init tegra124_132_clock_init_pre(struct device_node *np)
writel(plld_base, clk_base + PLLD_BASE);
}
+static struct clk *tegra124_clk_src_onecell_get(struct of_phandle_args *clkspec,
+ void *data)
+{
+ struct clk_hw *hw;
+ struct clk *clk;
+
+ clk = of_clk_src_onecell_get(clkspec, data);
+ if (IS_ERR(clk))
+ return clk;
+
+ hw = __clk_get_hw(clk);
+
+ if (clkspec->args[0] == TEGRA124_CLK_EMC) {
+ if (!tegra124_clk_emc_driver_available(hw))
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ return clk;
+}
+
/**
* tegra124_132_clock_init_post - clock initialization postamble for T124/T132
* @np: struct device_node * of the DT node for the SoC CAR IP block
@@ -1516,10 +1536,10 @@ static void __init tegra124_132_clock_init_post(struct device_node *np)
&pll_x_params);
tegra_init_special_resets(1, tegra124_reset_assert,
tegra124_reset_deassert);
- tegra_add_of_provider(np, of_clk_src_onecell_get);
+ tegra_add_of_provider(np, tegra124_clk_src_onecell_get);
- clks[TEGRA124_CLK_EMC] = tegra_clk_register_emc(clk_base, np,
- &emc_lock);
+ clks[TEGRA124_CLK_EMC] = tegra124_clk_register_emc(clk_base, np,
+ &emc_lock);
tegra_register_devclks(devclks, ARRAY_SIZE(devclks));
diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c
index 9cf249c344d9..16dbf83d2f62 100644
--- a/drivers/clk/tegra/clk-tegra30.c
+++ b/drivers/clk/tegra/clk-tegra30.c
@@ -1104,12 +1104,9 @@ static void tegra30_cpu_out_of_reset(u32 cpu)
static void tegra30_enable_cpu_clock(u32 cpu)
{
- unsigned int reg;
-
writel(CPU_CLOCK(cpu),
clk_base + TEGRA30_CLK_RST_CONTROLLER_CLK_CPU_CMPLX_CLR);
- reg = readl(clk_base +
- TEGRA30_CLK_RST_CONTROLLER_CLK_CPU_CMPLX_CLR);
+ readl(clk_base + TEGRA30_CLK_RST_CONTROLLER_CLK_CPU_CMPLX_CLR);
}
static void tegra30_disable_cpu_clock(u32 cpu)
diff --git a/drivers/clk/tegra/clk.h b/drivers/clk/tegra/clk.h
index 6b565f6b5f66..c3e36b5dcc75 100644
--- a/drivers/clk/tegra/clk.h
+++ b/drivers/clk/tegra/clk.h
@@ -881,16 +881,22 @@ void tegra_super_clk_gen5_init(void __iomem *clk_base,
void __iomem *pmc_base, struct tegra_clk *tegra_clks,
struct tegra_clk_pll_params *pll_params);
-#ifdef CONFIG_TEGRA124_EMC
-struct clk *tegra_clk_register_emc(void __iomem *base, struct device_node *np,
- spinlock_t *lock);
+#ifdef CONFIG_TEGRA124_CLK_EMC
+struct clk *tegra124_clk_register_emc(void __iomem *base, struct device_node *np,
+ spinlock_t *lock);
+bool tegra124_clk_emc_driver_available(struct clk_hw *emc_hw);
#else
-static inline struct clk *tegra_clk_register_emc(void __iomem *base,
- struct device_node *np,
- spinlock_t *lock)
+static inline struct clk *
+tegra124_clk_register_emc(void __iomem *base, struct device_node *np,
+ spinlock_t *lock)
{
return NULL;
}
+
+static inline bool tegra124_clk_emc_driver_available(struct clk_hw *emc_hw)
+{
+ return false;
+}
#endif
void tegra114_clock_tune_cpu_trimmers_high(void);
diff --git a/drivers/clk/tegra/cvb.c b/drivers/clk/tegra/cvb.c
index 21115c4e5d3a..a7fdc7622913 100644
--- a/drivers/clk/tegra/cvb.c
+++ b/drivers/clk/tegra/cvb.c
@@ -86,6 +86,7 @@ static int build_opp_table(struct device *dev, const struct cvb_table *table,
* @dev: the struct device * for which the OPP table is built
* @tables: array of CVB tables
* @count: size of the previously mentioned array
+ * @align: parameters of the regulator step and offset
* @process_id: process id of the HW module
* @speedo_id: speedo id of the HW module
* @speedo_value: speedo value of the HW module
diff --git a/drivers/clk/ti/clkt_dpll.c b/drivers/clk/ti/clkt_dpll.c
index 87ece6cd4226..dfaa4d1f0b64 100644
--- a/drivers/clk/ti/clkt_dpll.c
+++ b/drivers/clk/ti/clkt_dpll.c
@@ -269,8 +269,9 @@ unsigned long omap2_get_dpll_rate(struct clk_hw_omap *clk)
/**
* omap2_dpll_round_rate - round a target rate for an OMAP DPLL
- * @clk: struct clk * for a DPLL
+ * @hw: struct clk_hw containing the struct clk * for a DPLL
* @target_rate: desired DPLL clock rate
+ * @parent_rate: parent's DPLL clock rate
*
* Given a DPLL and a desired target rate, round the target rate to a
* possible, programmable rate for this DPLL. Attempts to select the
diff --git a/drivers/clk/ti/clockdomain.c b/drivers/clk/ti/clockdomain.c
index 700b7f44f671..74831b2752b3 100644
--- a/drivers/clk/ti/clockdomain.c
+++ b/drivers/clk/ti/clockdomain.c
@@ -97,7 +97,7 @@ void omap2_clkops_disable_clkdm(struct clk_hw *hw)
/**
* omap2_init_clk_clkdm - look up a clockdomain name, store pointer in clk
- * @clk: OMAP clock struct ptr to use
+ * @hw: Pointer to clk_hw_omap used to obtain OMAP clock struct ptr to use
*
* Convert a clockdomain name stored in a struct clk 'clk' into a
* clockdomain pointer, and save it into the struct clk. Intended to be
diff --git a/drivers/clk/ti/dpll.c b/drivers/clk/ti/dpll.c
index 247510e306e2..d6f1ac5b53e1 100644
--- a/drivers/clk/ti/dpll.c
+++ b/drivers/clk/ti/dpll.c
@@ -151,7 +151,7 @@ static const struct clk_ops dpll_x2_ck_ops = {
/**
* _register_dpll - low level registration of a DPLL clock
- * @hw: hardware clock definition for the clock
+ * @user: pointer to the hardware clock definition for the clock
* @node: device node for the clock
*
* Finalizes DPLL registration process. In case a failure (clk-ref or
diff --git a/drivers/clk/ti/dpll3xxx.c b/drivers/clk/ti/dpll3xxx.c
index 2490026948b4..6097b099a5df 100644
--- a/drivers/clk/ti/dpll3xxx.c
+++ b/drivers/clk/ti/dpll3xxx.c
@@ -125,7 +125,7 @@ static u16 _omap3_dpll_compute_freqsel(struct clk_hw_omap *clk, u8 n)
return f;
}
-/*
+/**
* _omap3_noncore_dpll_lock - instruct a DPLL to lock and wait for readiness
* @clk: pointer to a DPLL struct clk
*
@@ -168,7 +168,7 @@ done:
return r;
}
-/*
+/**
* _omap3_noncore_dpll_bypass - instruct a DPLL to bypass and wait for readiness
* @clk: pointer to a DPLL struct clk
*
@@ -204,7 +204,7 @@ static int _omap3_noncore_dpll_bypass(struct clk_hw_omap *clk)
return r;
}
-/*
+/**
* _omap3_noncore_dpll_stop - instruct a DPLL to stop
* @clk: pointer to a DPLL struct clk
*
@@ -291,7 +291,7 @@ static void _lookup_sddiv(struct clk_hw_omap *clk, u8 *sd_div, u16 m, u8 n)
*sd_div = sd;
}
-/*
+/**
* _omap3_noncore_dpll_program - set non-core DPLL M,N values directly
* @clk: struct clk * of DPLL to set
* @freqsel: FREQSEL value to set
@@ -406,7 +406,8 @@ static int omap3_noncore_dpll_program(struct clk_hw_omap *clk, u16 freqsel)
/**
* omap3_dpll_recalc - recalculate DPLL rate
- * @clk: DPLL struct clk
+ * @hw: struct clk_hw containing the DPLL struct clk
+ * @parent_rate: clock rate of the DPLL parent
*
* Recalculate and propagate the DPLL rate.
*/
@@ -421,7 +422,7 @@ unsigned long omap3_dpll_recalc(struct clk_hw *hw, unsigned long parent_rate)
/**
* omap3_noncore_dpll_enable - instruct a DPLL to enter bypass or lock mode
- * @clk: pointer to a DPLL struct clk
+ * @hw: struct clk_hw containing then pointer to a DPLL struct clk
*
* Instructs a non-CORE DPLL to enable, e.g., to enter bypass or lock.
* The choice of modes depends on the DPLL's programmed rate: if it is
@@ -470,7 +471,7 @@ int omap3_noncore_dpll_enable(struct clk_hw *hw)
/**
* omap3_noncore_dpll_disable - instruct a DPLL to enter low-power stop
- * @clk: pointer to a DPLL struct clk
+ * @hw: struct clk_hw containing then pointer to a DPLL struct clk
*
* Instructs a non-CORE DPLL to enter low-power stop. This function is
* intended for use in struct clkops. No return value.
@@ -745,7 +746,8 @@ static struct clk_hw_omap *omap3_find_clkoutx2_dpll(struct clk_hw *hw)
/**
* omap3_clkoutx2_recalc - recalculate DPLL X2 output virtual clock rate
- * @clk: DPLL output struct clk
+ * @hw: pointer struct clk_hw
+ * @parent_rate: clock rate of the DPLL parent
*
* Using parent clock DPLL data, look up DPLL state. If locked, set our
* rate to the dpll_clk * 2; otherwise, just use dpll_clk.
@@ -913,7 +915,7 @@ const struct clk_hw_omap_ops clkhwops_omap3_dpll = {
* omap3_dpll4_set_rate - set rate for omap3 per-dpll
* @hw: clock to change
* @rate: target rate for clock
- * @parent_rate: rate of the parent clock
+ * @parent_rate: clock rate of the DPLL parent
*
* Check if the current SoC supports the per-dpll reprogram operation
* or not, and then do the rate change if supported. Returns -EINVAL
diff --git a/drivers/clk/ti/dpll44xx.c b/drivers/clk/ti/dpll44xx.c
index 89c3ed1a24b8..3fc2cab69a3f 100644
--- a/drivers/clk/ti/dpll44xx.c
+++ b/drivers/clk/ti/dpll44xx.c
@@ -102,7 +102,8 @@ static void omap4_dpll_lpmode_recalc(struct dpll_data *dd)
/**
* omap4_dpll_regm4xen_recalc - compute DPLL rate, considering REGM4XEN bit
- * @clk: struct clk * of the DPLL to compute the rate for
+ * @hw: pointer to the clock to compute the rate for
+ * @parent_rate: clock rate of the DPLL parent
*
* Compute the output rate for the OMAP4 DPLL represented by @clk.
* Takes the REGM4XEN bit into consideration, which is needed for the
@@ -134,8 +135,9 @@ unsigned long omap4_dpll_regm4xen_recalc(struct clk_hw *hw,
/**
* omap4_dpll_regm4xen_round_rate - round DPLL rate, considering REGM4XEN bit
- * @clk: struct clk * of the DPLL to round a rate for
+ * @hw: struct hw_clk containing the struct clk * of the DPLL to round a rate for
* @target_rate: the desired rate of the DPLL
+ * @parent_rate: clock rate of the DPLL parent
*
* Compute the rate that would be programmed into the DPLL hardware
* for @clk if set_rate() were to be provided with the rate
diff --git a/drivers/clk/ti/gate.c b/drivers/clk/ti/gate.c
index 42389558418c..b1d0fdb40a75 100644
--- a/drivers/clk/ti/gate.c
+++ b/drivers/clk/ti/gate.c
@@ -55,7 +55,7 @@ static const struct clk_ops omap_gate_clk_hsdiv_restore_ops = {
/**
* omap36xx_gate_clk_enable_with_hsdiv_restore - enable clocks suffering
* from HSDivider PWRDN problem Implements Errata ID: i556.
- * @clk: DPLL output struct clk
+ * @hw: DPLL output struct clk_hw
*
* 3630 only: dpll3_m3_ck, dpll4_m2_ck, dpll4_m3_ck, dpll4_m4_ck,
* dpll4_m5_ck & dpll4_m6_ck dividers gets loaded with reset
diff --git a/drivers/clk/versatile/clk-icst.c b/drivers/clk/versatile/clk-icst.c
index 692be2fd9261..fdd6aa3cb1fc 100644
--- a/drivers/clk/versatile/clk-icst.c
+++ b/drivers/clk/versatile/clk-icst.c
@@ -36,8 +36,9 @@
/**
* struct clk_icst - ICST VCO clock wrapper
* @hw: corresponding clock hardware entry
- * @vcoreg: VCO register address
- * @lockreg: VCO lock register address
+ * @map: register map
+ * @vcoreg_off: VCO register address
+ * @lockreg_off: VCO lock register address
* @params: parameters for this ICST instance
* @rate: current rate
* @ctype: the type of control register for the ICST
@@ -428,7 +429,7 @@ static const struct icst_params icst307_params = {
.idx2s = icst307_idx2s,
};
-/**
+/*
* The core modules on the Integrator/AP and Integrator/CP have
* especially crippled ICST525 control.
*/
diff --git a/drivers/clk/xilinx/Kconfig b/drivers/clk/xilinx/Kconfig
new file mode 100644
index 000000000000..5224114176ed
--- /dev/null
+++ b/drivers/clk/xilinx/Kconfig
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0
+
+config XILINX_VCU
+ tristate "Xilinx VCU logicoreIP Init"
+ depends on HAS_IOMEM && COMMON_CLK
+ select REGMAP_MMIO
+ help
+ Provides the driver to enable and disable the isolation between the
+ processing system and programmable logic part by using the logicoreIP
+ register set. This driver also configures the frequency based on the
+ clock information from the logicoreIP register set.
+
+ If you say yes here you get support for the logicoreIP.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called xlnx_vcu.
+
diff --git a/drivers/clk/xilinx/Makefile b/drivers/clk/xilinx/Makefile
new file mode 100644
index 000000000000..dee8fd51e303
--- /dev/null
+++ b/drivers/clk/xilinx/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_XILINX_VCU) += xlnx_vcu.o
diff --git a/drivers/clk/xilinx/xlnx_vcu.c b/drivers/clk/xilinx/xlnx_vcu.c
new file mode 100644
index 000000000000..d66b1315114e
--- /dev/null
+++ b/drivers/clk/xilinx/xlnx_vcu.c
@@ -0,0 +1,743 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx VCU Init
+ *
+ * Copyright (C) 2016 - 2017 Xilinx, Inc.
+ *
+ * Contacts Dhaval Shah <dshah@xilinx.com>
+ */
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mfd/syscon/xlnx-vcu.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/xlnx-vcu.h>
+
+#define VCU_PLL_CTRL 0x24
+#define VCU_PLL_CTRL_RESET BIT(0)
+#define VCU_PLL_CTRL_POR_IN BIT(1)
+#define VCU_PLL_CTRL_PWR_POR BIT(2)
+#define VCU_PLL_CTRL_BYPASS BIT(3)
+#define VCU_PLL_CTRL_FBDIV GENMASK(14, 8)
+#define VCU_PLL_CTRL_CLKOUTDIV GENMASK(18, 16)
+
+#define VCU_PLL_CFG 0x28
+#define VCU_PLL_CFG_RES GENMASK(3, 0)
+#define VCU_PLL_CFG_CP GENMASK(8, 5)
+#define VCU_PLL_CFG_LFHF GENMASK(12, 10)
+#define VCU_PLL_CFG_LOCK_CNT GENMASK(22, 13)
+#define VCU_PLL_CFG_LOCK_DLY GENMASK(31, 25)
+#define VCU_ENC_CORE_CTRL 0x30
+#define VCU_ENC_MCU_CTRL 0x34
+#define VCU_DEC_CORE_CTRL 0x38
+#define VCU_DEC_MCU_CTRL 0x3c
+#define VCU_PLL_STATUS 0x60
+#define VCU_PLL_STATUS_LOCK_STATUS BIT(0)
+
+#define MHZ 1000000
+#define FVCO_MIN (1500U * MHZ)
+#define FVCO_MAX (3000U * MHZ)
+
+/**
+ * struct xvcu_device - Xilinx VCU init device structure
+ * @dev: Platform device
+ * @pll_ref: pll ref clock source
+ * @aclk: axi clock source
+ * @logicore_reg_ba: logicore reg base address
+ * @vcu_slcr_ba: vcu_slcr Register base address
+ * @pll: handle for the VCU PLL
+ * @pll_post: handle for the VCU PLL post divider
+ * @clk_data: clocks provided by the vcu clock provider
+ */
+struct xvcu_device {
+ struct device *dev;
+ struct clk *pll_ref;
+ struct clk *aclk;
+ struct regmap *logicore_reg_ba;
+ void __iomem *vcu_slcr_ba;
+ struct clk_hw *pll;
+ struct clk_hw *pll_post;
+ struct clk_hw_onecell_data *clk_data;
+};
+
+static struct regmap_config vcu_settings_regmap_config = {
+ .name = "regmap",
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 0xfff,
+ .cache_type = REGCACHE_NONE,
+};
+
+/**
+ * struct xvcu_pll_cfg - Helper data
+ * @fbdiv: The integer portion of the feedback divider to the PLL
+ * @cp: PLL charge pump control
+ * @res: PLL loop filter resistor control
+ * @lfhf: PLL loop filter high frequency capacitor control
+ * @lock_dly: Lock circuit configuration settings for lock windowsize
+ * @lock_cnt: Lock circuit counter setting
+ */
+struct xvcu_pll_cfg {
+ u32 fbdiv;
+ u32 cp;
+ u32 res;
+ u32 lfhf;
+ u32 lock_dly;
+ u32 lock_cnt;
+};
+
+static const struct xvcu_pll_cfg xvcu_pll_cfg[] = {
+ { 25, 3, 10, 3, 63, 1000 },
+ { 26, 3, 10, 3, 63, 1000 },
+ { 27, 4, 6, 3, 63, 1000 },
+ { 28, 4, 6, 3, 63, 1000 },
+ { 29, 4, 6, 3, 63, 1000 },
+ { 30, 4, 6, 3, 63, 1000 },
+ { 31, 6, 1, 3, 63, 1000 },
+ { 32, 6, 1, 3, 63, 1000 },
+ { 33, 4, 10, 3, 63, 1000 },
+ { 34, 5, 6, 3, 63, 1000 },
+ { 35, 5, 6, 3, 63, 1000 },
+ { 36, 5, 6, 3, 63, 1000 },
+ { 37, 5, 6, 3, 63, 1000 },
+ { 38, 5, 6, 3, 63, 975 },
+ { 39, 3, 12, 3, 63, 950 },
+ { 40, 3, 12, 3, 63, 925 },
+ { 41, 3, 12, 3, 63, 900 },
+ { 42, 3, 12, 3, 63, 875 },
+ { 43, 3, 12, 3, 63, 850 },
+ { 44, 3, 12, 3, 63, 850 },
+ { 45, 3, 12, 3, 63, 825 },
+ { 46, 3, 12, 3, 63, 800 },
+ { 47, 3, 12, 3, 63, 775 },
+ { 48, 3, 12, 3, 63, 775 },
+ { 49, 3, 12, 3, 63, 750 },
+ { 50, 3, 12, 3, 63, 750 },
+ { 51, 3, 2, 3, 63, 725 },
+ { 52, 3, 2, 3, 63, 700 },
+ { 53, 3, 2, 3, 63, 700 },
+ { 54, 3, 2, 3, 63, 675 },
+ { 55, 3, 2, 3, 63, 675 },
+ { 56, 3, 2, 3, 63, 650 },
+ { 57, 3, 2, 3, 63, 650 },
+ { 58, 3, 2, 3, 63, 625 },
+ { 59, 3, 2, 3, 63, 625 },
+ { 60, 3, 2, 3, 63, 625 },
+ { 61, 3, 2, 3, 63, 600 },
+ { 62, 3, 2, 3, 63, 600 },
+ { 63, 3, 2, 3, 63, 600 },
+ { 64, 3, 2, 3, 63, 600 },
+ { 65, 3, 2, 3, 63, 600 },
+ { 66, 3, 2, 3, 63, 600 },
+ { 67, 3, 2, 3, 63, 600 },
+ { 68, 3, 2, 3, 63, 600 },
+ { 69, 3, 2, 3, 63, 600 },
+ { 70, 3, 2, 3, 63, 600 },
+ { 71, 3, 2, 3, 63, 600 },
+ { 72, 3, 2, 3, 63, 600 },
+ { 73, 3, 2, 3, 63, 600 },
+ { 74, 3, 2, 3, 63, 600 },
+ { 75, 3, 2, 3, 63, 600 },
+ { 76, 3, 2, 3, 63, 600 },
+ { 77, 3, 2, 3, 63, 600 },
+ { 78, 3, 2, 3, 63, 600 },
+ { 79, 3, 2, 3, 63, 600 },
+ { 80, 3, 2, 3, 63, 600 },
+ { 81, 3, 2, 3, 63, 600 },
+ { 82, 3, 2, 3, 63, 600 },
+ { 83, 4, 2, 3, 63, 600 },
+ { 84, 4, 2, 3, 63, 600 },
+ { 85, 4, 2, 3, 63, 600 },
+ { 86, 4, 2, 3, 63, 600 },
+ { 87, 4, 2, 3, 63, 600 },
+ { 88, 4, 2, 3, 63, 600 },
+ { 89, 4, 2, 3, 63, 600 },
+ { 90, 4, 2, 3, 63, 600 },
+ { 91, 4, 2, 3, 63, 600 },
+ { 92, 4, 2, 3, 63, 600 },
+ { 93, 4, 2, 3, 63, 600 },
+ { 94, 4, 2, 3, 63, 600 },
+ { 95, 4, 2, 3, 63, 600 },
+ { 96, 4, 2, 3, 63, 600 },
+ { 97, 4, 2, 3, 63, 600 },
+ { 98, 4, 2, 3, 63, 600 },
+ { 99, 4, 2, 3, 63, 600 },
+ { 100, 4, 2, 3, 63, 600 },
+ { 101, 4, 2, 3, 63, 600 },
+ { 102, 4, 2, 3, 63, 600 },
+ { 103, 5, 2, 3, 63, 600 },
+ { 104, 5, 2, 3, 63, 600 },
+ { 105, 5, 2, 3, 63, 600 },
+ { 106, 5, 2, 3, 63, 600 },
+ { 107, 3, 4, 3, 63, 600 },
+ { 108, 3, 4, 3, 63, 600 },
+ { 109, 3, 4, 3, 63, 600 },
+ { 110, 3, 4, 3, 63, 600 },
+ { 111, 3, 4, 3, 63, 600 },
+ { 112, 3, 4, 3, 63, 600 },
+ { 113, 3, 4, 3, 63, 600 },
+ { 114, 3, 4, 3, 63, 600 },
+ { 115, 3, 4, 3, 63, 600 },
+ { 116, 3, 4, 3, 63, 600 },
+ { 117, 3, 4, 3, 63, 600 },
+ { 118, 3, 4, 3, 63, 600 },
+ { 119, 3, 4, 3, 63, 600 },
+ { 120, 3, 4, 3, 63, 600 },
+ { 121, 3, 4, 3, 63, 600 },
+ { 122, 3, 4, 3, 63, 600 },
+ { 123, 3, 4, 3, 63, 600 },
+ { 124, 3, 4, 3, 63, 600 },
+ { 125, 3, 4, 3, 63, 600 },
+};
+
+/**
+ * xvcu_read - Read from the VCU register space
+ * @iomem: vcu reg space base address
+ * @offset: vcu reg offset from base
+ *
+ * Return: Returns 32bit value from VCU register specified
+ *
+ */
+static inline u32 xvcu_read(void __iomem *iomem, u32 offset)
+{
+ return ioread32(iomem + offset);
+}
+
+/**
+ * xvcu_write - Write to the VCU register space
+ * @iomem: vcu reg space base address
+ * @offset: vcu reg offset from base
+ * @value: Value to write
+ */
+static inline void xvcu_write(void __iomem *iomem, u32 offset, u32 value)
+{
+ iowrite32(value, iomem + offset);
+}
+
+#define to_vcu_pll(_hw) container_of(_hw, struct vcu_pll, hw)
+
+struct vcu_pll {
+ struct clk_hw hw;
+ void __iomem *reg_base;
+ unsigned long fvco_min;
+ unsigned long fvco_max;
+};
+
+static int xvcu_pll_wait_for_lock(struct vcu_pll *pll)
+{
+ void __iomem *base = pll->reg_base;
+ unsigned long timeout;
+ u32 lock_status;
+
+ timeout = jiffies + msecs_to_jiffies(2000);
+ do {
+ lock_status = xvcu_read(base, VCU_PLL_STATUS);
+ if (lock_status & VCU_PLL_STATUS_LOCK_STATUS)
+ return 0;
+ } while (!time_after(jiffies, timeout));
+
+ return -ETIMEDOUT;
+}
+
+static struct clk_hw *xvcu_register_pll_post(struct device *dev,
+ const char *name,
+ const struct clk_hw *parent_hw,
+ void __iomem *reg_base)
+{
+ u32 div;
+ u32 vcu_pll_ctrl;
+
+ /*
+ * The output divider of the PLL must be set to 1/2 to meet the
+ * timing in the design.
+ */
+ vcu_pll_ctrl = xvcu_read(reg_base, VCU_PLL_CTRL);
+ div = FIELD_GET(VCU_PLL_CTRL_CLKOUTDIV, vcu_pll_ctrl);
+ if (div != 1)
+ return ERR_PTR(-EINVAL);
+
+ return clk_hw_register_fixed_factor(dev, "vcu_pll_post",
+ clk_hw_get_name(parent_hw),
+ CLK_SET_RATE_PARENT, 1, 2);
+}
+
+static const struct xvcu_pll_cfg *xvcu_find_cfg(int div)
+{
+ const struct xvcu_pll_cfg *cfg = NULL;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(xvcu_pll_cfg) - 1; i++)
+ if (xvcu_pll_cfg[i].fbdiv == div)
+ cfg = &xvcu_pll_cfg[i];
+
+ return cfg;
+}
+
+static int xvcu_pll_set_div(struct vcu_pll *pll, int div)
+{
+ void __iomem *base = pll->reg_base;
+ const struct xvcu_pll_cfg *cfg = NULL;
+ u32 vcu_pll_ctrl;
+ u32 cfg_val;
+
+ cfg = xvcu_find_cfg(div);
+ if (!cfg)
+ return -EINVAL;
+
+ vcu_pll_ctrl = xvcu_read(base, VCU_PLL_CTRL);
+ vcu_pll_ctrl &= ~VCU_PLL_CTRL_FBDIV;
+ vcu_pll_ctrl |= FIELD_PREP(VCU_PLL_CTRL_FBDIV, cfg->fbdiv);
+ xvcu_write(base, VCU_PLL_CTRL, vcu_pll_ctrl);
+
+ cfg_val = FIELD_PREP(VCU_PLL_CFG_RES, cfg->res) |
+ FIELD_PREP(VCU_PLL_CFG_CP, cfg->cp) |
+ FIELD_PREP(VCU_PLL_CFG_LFHF, cfg->lfhf) |
+ FIELD_PREP(VCU_PLL_CFG_LOCK_CNT, cfg->lock_cnt) |
+ FIELD_PREP(VCU_PLL_CFG_LOCK_DLY, cfg->lock_dly);
+ xvcu_write(base, VCU_PLL_CFG, cfg_val);
+
+ return 0;
+}
+
+static long xvcu_pll_round_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long *parent_rate)
+{
+ struct vcu_pll *pll = to_vcu_pll(hw);
+ unsigned int feedback_div;
+
+ rate = clamp_t(unsigned long, rate, pll->fvco_min, pll->fvco_max);
+
+ feedback_div = DIV_ROUND_CLOSEST_ULL(rate, *parent_rate);
+ feedback_div = clamp_t(unsigned int, feedback_div, 25, 125);
+
+ return *parent_rate * feedback_div;
+}
+
+static unsigned long xvcu_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct vcu_pll *pll = to_vcu_pll(hw);
+ void __iomem *base = pll->reg_base;
+ unsigned int div;
+ u32 vcu_pll_ctrl;
+
+ vcu_pll_ctrl = xvcu_read(base, VCU_PLL_CTRL);
+ div = FIELD_GET(VCU_PLL_CTRL_FBDIV, vcu_pll_ctrl);
+
+ return div * parent_rate;
+}
+
+static int xvcu_pll_set_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate)
+{
+ struct vcu_pll *pll = to_vcu_pll(hw);
+
+ return xvcu_pll_set_div(pll, rate / parent_rate);
+}
+
+static int xvcu_pll_enable(struct clk_hw *hw)
+{
+ struct vcu_pll *pll = to_vcu_pll(hw);
+ void __iomem *base = pll->reg_base;
+ u32 vcu_pll_ctrl;
+ int ret;
+
+ vcu_pll_ctrl = xvcu_read(base, VCU_PLL_CTRL);
+ vcu_pll_ctrl |= VCU_PLL_CTRL_BYPASS;
+ xvcu_write(base, VCU_PLL_CTRL, vcu_pll_ctrl);
+
+ vcu_pll_ctrl = xvcu_read(base, VCU_PLL_CTRL);
+ vcu_pll_ctrl &= ~VCU_PLL_CTRL_POR_IN;
+ vcu_pll_ctrl &= ~VCU_PLL_CTRL_PWR_POR;
+ vcu_pll_ctrl &= ~VCU_PLL_CTRL_RESET;
+ xvcu_write(base, VCU_PLL_CTRL, vcu_pll_ctrl);
+
+ ret = xvcu_pll_wait_for_lock(pll);
+ if (ret) {
+ pr_err("VCU PLL is not locked\n");
+ goto err;
+ }
+
+ vcu_pll_ctrl = xvcu_read(base, VCU_PLL_CTRL);
+ vcu_pll_ctrl &= ~VCU_PLL_CTRL_BYPASS;
+ xvcu_write(base, VCU_PLL_CTRL, vcu_pll_ctrl);
+
+err:
+ return ret;
+}
+
+static void xvcu_pll_disable(struct clk_hw *hw)
+{
+ struct vcu_pll *pll = to_vcu_pll(hw);
+ void __iomem *base = pll->reg_base;
+ u32 vcu_pll_ctrl;
+
+ vcu_pll_ctrl = xvcu_read(base, VCU_PLL_CTRL);
+ vcu_pll_ctrl |= VCU_PLL_CTRL_POR_IN;
+ vcu_pll_ctrl |= VCU_PLL_CTRL_PWR_POR;
+ vcu_pll_ctrl |= VCU_PLL_CTRL_RESET;
+ xvcu_write(base, VCU_PLL_CTRL, vcu_pll_ctrl);
+}
+
+static const struct clk_ops vcu_pll_ops = {
+ .enable = xvcu_pll_enable,
+ .disable = xvcu_pll_disable,
+ .round_rate = xvcu_pll_round_rate,
+ .recalc_rate = xvcu_pll_recalc_rate,
+ .set_rate = xvcu_pll_set_rate,
+};
+
+static struct clk_hw *xvcu_register_pll(struct device *dev,
+ void __iomem *reg_base,
+ const char *name, const char *parent,
+ unsigned long flags)
+{
+ struct vcu_pll *pll;
+ struct clk_hw *hw;
+ struct clk_init_data init;
+ int ret;
+
+ init.name = name;
+ init.parent_names = &parent;
+ init.ops = &vcu_pll_ops;
+ init.num_parents = 1;
+ init.flags = flags;
+
+ pll = devm_kmalloc(dev, sizeof(*pll), GFP_KERNEL);
+ if (!pll)
+ return ERR_PTR(-ENOMEM);
+
+ pll->hw.init = &init;
+ pll->reg_base = reg_base;
+ pll->fvco_min = FVCO_MIN;
+ pll->fvco_max = FVCO_MAX;
+
+ hw = &pll->hw;
+ ret = devm_clk_hw_register(dev, hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ clk_hw_set_rate_range(hw, pll->fvco_min, pll->fvco_max);
+
+ return hw;
+}
+
+static struct clk_hw *xvcu_clk_hw_register_leaf(struct device *dev,
+ const char *name,
+ const struct clk_parent_data *parent_data,
+ u8 num_parents,
+ void __iomem *reg)
+{
+ u8 mux_flags = CLK_MUX_ROUND_CLOSEST;
+ u8 divider_flags = CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_DIVIDER_ROUND_CLOSEST;
+ struct clk_hw *mux = NULL;
+ struct clk_hw *divider = NULL;
+ struct clk_hw *gate = NULL;
+ char *name_mux;
+ char *name_div;
+ int err;
+ /* Protect register shared by clocks */
+ spinlock_t *lock;
+
+ lock = devm_kzalloc(dev, sizeof(*lock), GFP_KERNEL);
+ if (!lock)
+ return ERR_PTR(-ENOMEM);
+ spin_lock_init(lock);
+
+ name_mux = devm_kasprintf(dev, GFP_KERNEL, "%s%s", name, "_mux");
+ if (!name_mux)
+ return ERR_PTR(-ENOMEM);
+ mux = clk_hw_register_mux_parent_data(dev, name_mux,
+ parent_data, num_parents,
+ CLK_SET_RATE_PARENT,
+ reg, 0, 1, mux_flags, lock);
+ if (IS_ERR(mux))
+ return mux;
+
+ name_div = devm_kasprintf(dev, GFP_KERNEL, "%s%s", name, "_div");
+ if (!name_div) {
+ err = -ENOMEM;
+ goto unregister_mux;
+ }
+ divider = clk_hw_register_divider_parent_hw(dev, name_div, mux,
+ CLK_SET_RATE_PARENT,
+ reg, 4, 6, divider_flags,
+ lock);
+ if (IS_ERR(divider)) {
+ err = PTR_ERR(divider);
+ goto unregister_mux;
+ }
+
+ gate = clk_hw_register_gate_parent_hw(dev, name, divider,
+ CLK_SET_RATE_PARENT, reg, 12, 0,
+ lock);
+ if (IS_ERR(gate)) {
+ err = PTR_ERR(gate);
+ goto unregister_divider;
+ }
+
+ return gate;
+
+unregister_divider:
+ clk_hw_unregister_divider(divider);
+unregister_mux:
+ clk_hw_unregister_mux(mux);
+
+ return ERR_PTR(err);
+}
+
+static void xvcu_clk_hw_unregister_leaf(struct clk_hw *hw)
+{
+ struct clk_hw *gate = hw;
+ struct clk_hw *divider;
+ struct clk_hw *mux;
+
+ if (!gate)
+ return;
+
+ divider = clk_hw_get_parent(gate);
+ clk_hw_unregister_gate(gate);
+ if (!divider)
+ return;
+
+ mux = clk_hw_get_parent(divider);
+ clk_hw_unregister_mux(mux);
+ if (!divider)
+ return;
+
+ clk_hw_unregister_divider(divider);
+}
+
+static int xvcu_register_clock_provider(struct xvcu_device *xvcu)
+{
+ struct device *dev = xvcu->dev;
+ struct clk_parent_data parent_data[2] = { 0 };
+ struct clk_hw_onecell_data *data;
+ struct clk_hw **hws;
+ struct clk_hw *hw;
+ void __iomem *reg_base = xvcu->vcu_slcr_ba;
+
+ data = devm_kzalloc(dev, struct_size(data, hws, CLK_XVCU_NUM_CLOCKS), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ data->num = CLK_XVCU_NUM_CLOCKS;
+ hws = data->hws;
+
+ xvcu->clk_data = data;
+
+ hw = xvcu_register_pll(dev, reg_base,
+ "vcu_pll", __clk_get_name(xvcu->pll_ref),
+ CLK_SET_RATE_NO_REPARENT | CLK_OPS_PARENT_ENABLE);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+ xvcu->pll = hw;
+
+ hw = xvcu_register_pll_post(dev, "vcu_pll_post", xvcu->pll, reg_base);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+ xvcu->pll_post = hw;
+
+ parent_data[0].fw_name = "pll_ref";
+ parent_data[1].hw = xvcu->pll_post;
+
+ hws[CLK_XVCU_ENC_CORE] =
+ xvcu_clk_hw_register_leaf(dev, "venc_core_clk",
+ parent_data,
+ ARRAY_SIZE(parent_data),
+ reg_base + VCU_ENC_CORE_CTRL);
+ hws[CLK_XVCU_ENC_MCU] =
+ xvcu_clk_hw_register_leaf(dev, "venc_mcu_clk",
+ parent_data,
+ ARRAY_SIZE(parent_data),
+ reg_base + VCU_ENC_MCU_CTRL);
+ hws[CLK_XVCU_DEC_CORE] =
+ xvcu_clk_hw_register_leaf(dev, "vdec_core_clk",
+ parent_data,
+ ARRAY_SIZE(parent_data),
+ reg_base + VCU_DEC_CORE_CTRL);
+ hws[CLK_XVCU_DEC_MCU] =
+ xvcu_clk_hw_register_leaf(dev, "vdec_mcu_clk",
+ parent_data,
+ ARRAY_SIZE(parent_data),
+ reg_base + VCU_DEC_MCU_CTRL);
+
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, data);
+}
+
+static void xvcu_unregister_clock_provider(struct xvcu_device *xvcu)
+{
+ struct clk_hw_onecell_data *data = xvcu->clk_data;
+ struct clk_hw **hws = data->hws;
+
+ if (!IS_ERR_OR_NULL(hws[CLK_XVCU_DEC_MCU]))
+ xvcu_clk_hw_unregister_leaf(hws[CLK_XVCU_DEC_MCU]);
+ if (!IS_ERR_OR_NULL(hws[CLK_XVCU_DEC_CORE]))
+ xvcu_clk_hw_unregister_leaf(hws[CLK_XVCU_DEC_CORE]);
+ if (!IS_ERR_OR_NULL(hws[CLK_XVCU_ENC_MCU]))
+ xvcu_clk_hw_unregister_leaf(hws[CLK_XVCU_ENC_MCU]);
+ if (!IS_ERR_OR_NULL(hws[CLK_XVCU_ENC_CORE]))
+ xvcu_clk_hw_unregister_leaf(hws[CLK_XVCU_ENC_CORE]);
+
+ clk_hw_unregister_fixed_factor(xvcu->pll_post);
+}
+
+/**
+ * xvcu_probe - Probe existence of the logicoreIP
+ * and initialize PLL
+ *
+ * @pdev: Pointer to the platform_device structure
+ *
+ * Return: Returns 0 on success
+ * Negative error code otherwise
+ */
+static int xvcu_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct xvcu_device *xvcu;
+ void __iomem *regs;
+ int ret;
+
+ xvcu = devm_kzalloc(&pdev->dev, sizeof(*xvcu), GFP_KERNEL);
+ if (!xvcu)
+ return -ENOMEM;
+
+ xvcu->dev = &pdev->dev;
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vcu_slcr");
+ if (!res) {
+ dev_err(&pdev->dev, "get vcu_slcr memory resource failed.\n");
+ return -ENODEV;
+ }
+
+ xvcu->vcu_slcr_ba = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!xvcu->vcu_slcr_ba) {
+ dev_err(&pdev->dev, "vcu_slcr register mapping failed.\n");
+ return -ENOMEM;
+ }
+
+ xvcu->logicore_reg_ba =
+ syscon_regmap_lookup_by_compatible("xlnx,vcu-settings");
+ if (IS_ERR(xvcu->logicore_reg_ba)) {
+ dev_info(&pdev->dev,
+ "could not find xlnx,vcu-settings: trying direct register access\n");
+
+ res = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "logicore");
+ if (!res) {
+ dev_err(&pdev->dev, "get logicore memory resource failed.\n");
+ return -ENODEV;
+ }
+
+ regs = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!regs) {
+ dev_err(&pdev->dev, "logicore register mapping failed.\n");
+ return -ENOMEM;
+ }
+
+ xvcu->logicore_reg_ba =
+ devm_regmap_init_mmio(&pdev->dev, regs,
+ &vcu_settings_regmap_config);
+ if (IS_ERR(xvcu->logicore_reg_ba)) {
+ dev_err(&pdev->dev, "failed to init regmap\n");
+ return PTR_ERR(xvcu->logicore_reg_ba);
+ }
+ }
+
+ xvcu->aclk = devm_clk_get(&pdev->dev, "aclk");
+ if (IS_ERR(xvcu->aclk)) {
+ dev_err(&pdev->dev, "Could not get aclk clock\n");
+ return PTR_ERR(xvcu->aclk);
+ }
+
+ xvcu->pll_ref = devm_clk_get(&pdev->dev, "pll_ref");
+ if (IS_ERR(xvcu->pll_ref)) {
+ dev_err(&pdev->dev, "Could not get pll_ref clock\n");
+ return PTR_ERR(xvcu->pll_ref);
+ }
+
+ ret = clk_prepare_enable(xvcu->aclk);
+ if (ret) {
+ dev_err(&pdev->dev, "aclk clock enable failed\n");
+ return ret;
+ }
+
+ /*
+ * Do the Gasket isolation and put the VCU out of reset
+ * Bit 0 : Gasket isolation
+ * Bit 1 : put VCU out of reset
+ */
+ regmap_write(xvcu->logicore_reg_ba, VCU_GASKET_INIT, VCU_GASKET_VALUE);
+
+ ret = xvcu_register_clock_provider(xvcu);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register clock provider\n");
+ goto error_clk_provider;
+ }
+
+ dev_set_drvdata(&pdev->dev, xvcu);
+
+ return 0;
+
+error_clk_provider:
+ xvcu_unregister_clock_provider(xvcu);
+ clk_disable_unprepare(xvcu->aclk);
+ return ret;
+}
+
+/**
+ * xvcu_remove - Insert gasket isolation
+ * and disable the clock
+ * @pdev: Pointer to the platform_device structure
+ *
+ * Return: Returns 0 on success
+ * Negative error code otherwise
+ */
+static int xvcu_remove(struct platform_device *pdev)
+{
+ struct xvcu_device *xvcu;
+
+ xvcu = platform_get_drvdata(pdev);
+ if (!xvcu)
+ return -ENODEV;
+
+ xvcu_unregister_clock_provider(xvcu);
+
+ /* Add the Gasket isolation and put the VCU in reset. */
+ regmap_write(xvcu->logicore_reg_ba, VCU_GASKET_INIT, 0);
+
+ clk_disable_unprepare(xvcu->aclk);
+
+ return 0;
+}
+
+static const struct of_device_id xvcu_of_id_table[] = {
+ { .compatible = "xlnx,vcu" },
+ { .compatible = "xlnx,vcu-logicoreip-1.0" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, xvcu_of_id_table);
+
+static struct platform_driver xvcu_driver = {
+ .driver = {
+ .name = "xilinx-vcu",
+ .of_match_table = xvcu_of_id_table,
+ },
+ .probe = xvcu_probe,
+ .remove = xvcu_remove,
+};
+
+module_platform_driver(xvcu_driver);
+
+MODULE_AUTHOR("Dhaval Shah <dshah@xilinx.com>");
+MODULE_DESCRIPTION("Xilinx VCU init Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/zte/Makefile b/drivers/clk/zte/Makefile
deleted file mode 100644
index f130643b695d..000000000000
--- a/drivers/clk/zte/Makefile
+++ /dev/null
@@ -1,4 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-obj-y := clk.o
-obj-$(CONFIG_SOC_ZX296702) += clk-zx296702.o
-obj-$(CONFIG_ARCH_ZX) += clk-zx296718.o
diff --git a/drivers/clk/zte/clk-zx296702.c b/drivers/clk/zte/clk-zx296702.c
deleted file mode 100644
index e846f2a34feb..000000000000
--- a/drivers/clk/zte/clk-zx296702.c
+++ /dev/null
@@ -1,741 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright 2014 Linaro Ltd.
- * Copyright (C) 2014 ZTE Corporation.
- */
-
-#include <linux/clk-provider.h>
-#include <linux/of_address.h>
-#include <dt-bindings/clock/zx296702-clock.h>
-#include "clk.h"
-
-static DEFINE_SPINLOCK(reg_lock);
-
-static void __iomem *topcrm_base;
-static void __iomem *lsp0crpm_base;
-static void __iomem *lsp1crpm_base;
-
-static struct clk *topclk[ZX296702_TOPCLK_END];
-static struct clk *lsp0clk[ZX296702_LSP0CLK_END];
-static struct clk *lsp1clk[ZX296702_LSP1CLK_END];
-
-static struct clk_onecell_data topclk_data;
-static struct clk_onecell_data lsp0clk_data;
-static struct clk_onecell_data lsp1clk_data;
-
-#define CLK_MUX (topcrm_base + 0x04)
-#define CLK_DIV (topcrm_base + 0x08)
-#define CLK_EN0 (topcrm_base + 0x0c)
-#define CLK_EN1 (topcrm_base + 0x10)
-#define VOU_LOCAL_CLKEN (topcrm_base + 0x68)
-#define VOU_LOCAL_CLKSEL (topcrm_base + 0x70)
-#define VOU_LOCAL_DIV2_SET (topcrm_base + 0x74)
-#define CLK_MUX1 (topcrm_base + 0x8c)
-
-#define CLK_SDMMC1 (lsp0crpm_base + 0x0c)
-#define CLK_GPIO (lsp0crpm_base + 0x2c)
-#define CLK_SPDIF0 (lsp0crpm_base + 0x10)
-#define SPDIF0_DIV (lsp0crpm_base + 0x14)
-#define CLK_I2S0 (lsp0crpm_base + 0x18)
-#define I2S0_DIV (lsp0crpm_base + 0x1c)
-#define CLK_I2S1 (lsp0crpm_base + 0x20)
-#define I2S1_DIV (lsp0crpm_base + 0x24)
-#define CLK_I2S2 (lsp0crpm_base + 0x34)
-#define I2S2_DIV (lsp0crpm_base + 0x38)
-
-#define CLK_UART0 (lsp1crpm_base + 0x20)
-#define CLK_UART1 (lsp1crpm_base + 0x24)
-#define CLK_SDMMC0 (lsp1crpm_base + 0x2c)
-#define CLK_SPDIF1 (lsp1crpm_base + 0x30)
-#define SPDIF1_DIV (lsp1crpm_base + 0x34)
-
-static const struct zx_pll_config pll_a9_config[] = {
- { .rate = 700000000, .cfg0 = 0x800405d1, .cfg1 = 0x04555555 },
- { .rate = 800000000, .cfg0 = 0x80040691, .cfg1 = 0x04aaaaaa },
- { .rate = 900000000, .cfg0 = 0x80040791, .cfg1 = 0x04000000 },
- { .rate = 1000000000, .cfg0 = 0x80040851, .cfg1 = 0x04555555 },
- { .rate = 1100000000, .cfg0 = 0x80040911, .cfg1 = 0x04aaaaaa },
- { .rate = 1200000000, .cfg0 = 0x80040a11, .cfg1 = 0x04000000 },
-};
-
-static const struct clk_div_table main_hlk_div[] = {
- { .val = 1, .div = 2, },
- { .val = 3, .div = 4, },
- { /* sentinel */ }
-};
-
-static const struct clk_div_table a9_as1_aclk_divider[] = {
- { .val = 0, .div = 1, },
- { .val = 1, .div = 2, },
- { .val = 3, .div = 4, },
- { /* sentinel */ }
-};
-
-static const struct clk_div_table sec_wclk_divider[] = {
- { .val = 0, .div = 1, },
- { .val = 1, .div = 2, },
- { .val = 3, .div = 4, },
- { .val = 5, .div = 6, },
- { .val = 7, .div = 8, },
- { /* sentinel */ }
-};
-
-static const char * const matrix_aclk_sel[] = {
- "pll_mm0_198M",
- "osc",
- "clk_148M5",
- "pll_lsp_104M",
-};
-
-static const char * const a9_wclk_sel[] = {
- "pll_a9",
- "osc",
- "clk_500",
- "clk_250",
-};
-
-static const char * const a9_as1_aclk_sel[] = {
- "clk_250",
- "osc",
- "pll_mm0_396M",
- "pll_mac_333M",
-};
-
-static const char * const a9_trace_clkin_sel[] = {
- "clk_74M25",
- "pll_mm1_108M",
- "clk_125",
- "clk_148M5",
-};
-
-static const char * const decppu_aclk_sel[] = {
- "clk_250",
- "pll_mm0_198M",
- "pll_lsp_104M",
- "pll_audio_294M912",
-};
-
-static const char * const vou_main_wclk_sel[] = {
- "clk_148M5",
- "clk_74M25",
- "clk_27",
- "pll_mm1_54M",
-};
-
-static const char * const vou_scaler_wclk_sel[] = {
- "clk_250",
- "pll_mac_333M",
- "pll_audio_294M912",
- "pll_mm0_198M",
-};
-
-static const char * const r2d_wclk_sel[] = {
- "pll_audio_294M912",
- "pll_mac_333M",
- "pll_a9_350M",
- "pll_mm0_396M",
-};
-
-static const char * const ddr_wclk_sel[] = {
- "pll_mac_333M",
- "pll_ddr_266M",
- "pll_audio_294M912",
- "pll_mm0_198M",
-};
-
-static const char * const nand_wclk_sel[] = {
- "pll_lsp_104M",
- "osc",
-};
-
-static const char * const lsp_26_wclk_sel[] = {
- "pll_lsp_26M",
- "osc",
-};
-
-static const char * const vl0_sel[] = {
- "vou_main_channel_div",
- "vou_aux_channel_div",
-};
-
-static const char * const hdmi_sel[] = {
- "vou_main_channel_wclk",
- "vou_aux_channel_wclk",
-};
-
-static const char * const sdmmc0_wclk_sel[] = {
- "lsp1_104M_wclk",
- "lsp1_26M_wclk",
-};
-
-static const char * const sdmmc1_wclk_sel[] = {
- "lsp0_104M_wclk",
- "lsp0_26M_wclk",
-};
-
-static const char * const uart_wclk_sel[] = {
- "lsp1_104M_wclk",
- "lsp1_26M_wclk",
-};
-
-static const char * const spdif0_wclk_sel[] = {
- "lsp0_104M_wclk",
- "lsp0_26M_wclk",
-};
-
-static const char * const spdif1_wclk_sel[] = {
- "lsp1_104M_wclk",
- "lsp1_26M_wclk",
-};
-
-static const char * const i2s_wclk_sel[] = {
- "lsp0_104M_wclk",
- "lsp0_26M_wclk",
-};
-
-static inline struct clk *zx_divtbl(const char *name, const char *parent,
- void __iomem *reg, u8 shift, u8 width,
- const struct clk_div_table *table)
-{
- return clk_register_divider_table(NULL, name, parent, 0, reg, shift,
- width, 0, table, &reg_lock);
-}
-
-static inline struct clk *zx_div(const char *name, const char *parent,
- void __iomem *reg, u8 shift, u8 width)
-{
- return clk_register_divider(NULL, name, parent, 0,
- reg, shift, width, 0, &reg_lock);
-}
-
-static inline struct clk *zx_mux(const char *name, const char * const *parents,
- int num_parents, void __iomem *reg, u8 shift, u8 width)
-{
- return clk_register_mux(NULL, name, parents, num_parents,
- 0, reg, shift, width, 0, &reg_lock);
-}
-
-static inline struct clk *zx_gate(const char *name, const char *parent,
- void __iomem *reg, u8 shift)
-{
- return clk_register_gate(NULL, name, parent, CLK_IGNORE_UNUSED,
- reg, shift, CLK_SET_RATE_PARENT, &reg_lock);
-}
-
-static void __init zx296702_top_clocks_init(struct device_node *np)
-{
- struct clk **clk = topclk;
- int i;
-
- topcrm_base = of_iomap(np, 0);
- WARN_ON(!topcrm_base);
-
- clk[ZX296702_OSC] =
- clk_register_fixed_rate(NULL, "osc", NULL, 0, 30000000);
- clk[ZX296702_PLL_A9] =
- clk_register_zx_pll("pll_a9", "osc", 0, topcrm_base
- + 0x01c, pll_a9_config,
- ARRAY_SIZE(pll_a9_config), &reg_lock);
-
- /* TODO: pll_a9_350M look like changeble follow a9 pll */
- clk[ZX296702_PLL_A9_350M] =
- clk_register_fixed_rate(NULL, "pll_a9_350M", "osc", 0,
- 350000000);
- clk[ZX296702_PLL_MAC_1000M] =
- clk_register_fixed_rate(NULL, "pll_mac_1000M", "osc", 0,
- 1000000000);
- clk[ZX296702_PLL_MAC_333M] =
- clk_register_fixed_rate(NULL, "pll_mac_333M", "osc", 0,
- 333000000);
- clk[ZX296702_PLL_MM0_1188M] =
- clk_register_fixed_rate(NULL, "pll_mm0_1188M", "osc", 0,
- 1188000000);
- clk[ZX296702_PLL_MM0_396M] =
- clk_register_fixed_rate(NULL, "pll_mm0_396M", "osc", 0,
- 396000000);
- clk[ZX296702_PLL_MM0_198M] =
- clk_register_fixed_rate(NULL, "pll_mm0_198M", "osc", 0,
- 198000000);
- clk[ZX296702_PLL_MM1_108M] =
- clk_register_fixed_rate(NULL, "pll_mm1_108M", "osc", 0,
- 108000000);
- clk[ZX296702_PLL_MM1_72M] =
- clk_register_fixed_rate(NULL, "pll_mm1_72M", "osc", 0,
- 72000000);
- clk[ZX296702_PLL_MM1_54M] =
- clk_register_fixed_rate(NULL, "pll_mm1_54M", "osc", 0,
- 54000000);
- clk[ZX296702_PLL_LSP_104M] =
- clk_register_fixed_rate(NULL, "pll_lsp_104M", "osc", 0,
- 104000000);
- clk[ZX296702_PLL_LSP_26M] =
- clk_register_fixed_rate(NULL, "pll_lsp_26M", "osc", 0,
- 26000000);
- clk[ZX296702_PLL_DDR_266M] =
- clk_register_fixed_rate(NULL, "pll_ddr_266M", "osc", 0,
- 266000000);
- clk[ZX296702_PLL_AUDIO_294M912] =
- clk_register_fixed_rate(NULL, "pll_audio_294M912", "osc", 0,
- 294912000);
-
- /* bus clock */
- clk[ZX296702_MATRIX_ACLK] =
- zx_mux("matrix_aclk", matrix_aclk_sel,
- ARRAY_SIZE(matrix_aclk_sel), CLK_MUX, 2, 2);
- clk[ZX296702_MAIN_HCLK] =
- zx_divtbl("main_hclk", "matrix_aclk", CLK_DIV, 0, 2,
- main_hlk_div);
- clk[ZX296702_MAIN_PCLK] =
- zx_divtbl("main_pclk", "matrix_aclk", CLK_DIV, 2, 2,
- main_hlk_div);
-
- /* cpu clock */
- clk[ZX296702_CLK_500] =
- clk_register_fixed_factor(NULL, "clk_500", "pll_mac_1000M", 0,
- 1, 2);
- clk[ZX296702_CLK_250] =
- clk_register_fixed_factor(NULL, "clk_250", "pll_mac_1000M", 0,
- 1, 4);
- clk[ZX296702_CLK_125] =
- clk_register_fixed_factor(NULL, "clk_125", "clk_250", 0, 1, 2);
- clk[ZX296702_CLK_148M5] =
- clk_register_fixed_factor(NULL, "clk_148M5", "pll_mm0_1188M", 0,
- 1, 8);
- clk[ZX296702_CLK_74M25] =
- clk_register_fixed_factor(NULL, "clk_74M25", "pll_mm0_1188M", 0,
- 1, 16);
- clk[ZX296702_A9_WCLK] =
- zx_mux("a9_wclk", a9_wclk_sel, ARRAY_SIZE(a9_wclk_sel), CLK_MUX,
- 0, 2);
- clk[ZX296702_A9_AS1_ACLK_MUX] =
- zx_mux("a9_as1_aclk_mux", a9_as1_aclk_sel,
- ARRAY_SIZE(a9_as1_aclk_sel), CLK_MUX, 4, 2);
- clk[ZX296702_A9_TRACE_CLKIN_MUX] =
- zx_mux("a9_trace_clkin_mux", a9_trace_clkin_sel,
- ARRAY_SIZE(a9_trace_clkin_sel), CLK_MUX1, 0, 2);
- clk[ZX296702_A9_AS1_ACLK_DIV] =
- zx_divtbl("a9_as1_aclk_div", "a9_as1_aclk_mux", CLK_DIV, 4, 2,
- a9_as1_aclk_divider);
-
- /* multi-media clock */
- clk[ZX296702_CLK_2] =
- clk_register_fixed_factor(NULL, "clk_2", "pll_mm1_72M", 0,
- 1, 36);
- clk[ZX296702_CLK_27] =
- clk_register_fixed_factor(NULL, "clk_27", "pll_mm1_54M", 0,
- 1, 2);
- clk[ZX296702_DECPPU_ACLK_MUX] =
- zx_mux("decppu_aclk_mux", decppu_aclk_sel,
- ARRAY_SIZE(decppu_aclk_sel), CLK_MUX, 6, 2);
- clk[ZX296702_PPU_ACLK_MUX] =
- zx_mux("ppu_aclk_mux", decppu_aclk_sel,
- ARRAY_SIZE(decppu_aclk_sel), CLK_MUX, 8, 2);
- clk[ZX296702_MALI400_ACLK_MUX] =
- zx_mux("mali400_aclk_mux", decppu_aclk_sel,
- ARRAY_SIZE(decppu_aclk_sel), CLK_MUX, 12, 2);
- clk[ZX296702_VOU_ACLK_MUX] =
- zx_mux("vou_aclk_mux", decppu_aclk_sel,
- ARRAY_SIZE(decppu_aclk_sel), CLK_MUX, 10, 2);
- clk[ZX296702_VOU_MAIN_WCLK_MUX] =
- zx_mux("vou_main_wclk_mux", vou_main_wclk_sel,
- ARRAY_SIZE(vou_main_wclk_sel), CLK_MUX, 14, 2);
- clk[ZX296702_VOU_AUX_WCLK_MUX] =
- zx_mux("vou_aux_wclk_mux", vou_main_wclk_sel,
- ARRAY_SIZE(vou_main_wclk_sel), CLK_MUX, 16, 2);
- clk[ZX296702_VOU_SCALER_WCLK_MUX] =
- zx_mux("vou_scaler_wclk_mux", vou_scaler_wclk_sel,
- ARRAY_SIZE(vou_scaler_wclk_sel), CLK_MUX,
- 18, 2);
- clk[ZX296702_R2D_ACLK_MUX] =
- zx_mux("r2d_aclk_mux", decppu_aclk_sel,
- ARRAY_SIZE(decppu_aclk_sel), CLK_MUX, 20, 2);
- clk[ZX296702_R2D_WCLK_MUX] =
- zx_mux("r2d_wclk_mux", r2d_wclk_sel,
- ARRAY_SIZE(r2d_wclk_sel), CLK_MUX, 22, 2);
-
- /* other clock */
- clk[ZX296702_CLK_50] =
- clk_register_fixed_factor(NULL, "clk_50", "pll_mac_1000M",
- 0, 1, 20);
- clk[ZX296702_CLK_25] =
- clk_register_fixed_factor(NULL, "clk_25", "pll_mac_1000M",
- 0, 1, 40);
- clk[ZX296702_CLK_12] =
- clk_register_fixed_factor(NULL, "clk_12", "pll_mm1_72M",
- 0, 1, 6);
- clk[ZX296702_CLK_16M384] =
- clk_register_fixed_factor(NULL, "clk_16M384",
- "pll_audio_294M912", 0, 1, 18);
- clk[ZX296702_CLK_32K768] =
- clk_register_fixed_factor(NULL, "clk_32K768", "clk_16M384",
- 0, 1, 500);
- clk[ZX296702_SEC_WCLK_DIV] =
- zx_divtbl("sec_wclk_div", "pll_lsp_104M", CLK_DIV, 6, 3,
- sec_wclk_divider);
- clk[ZX296702_DDR_WCLK_MUX] =
- zx_mux("ddr_wclk_mux", ddr_wclk_sel,
- ARRAY_SIZE(ddr_wclk_sel), CLK_MUX, 24, 2);
- clk[ZX296702_NAND_WCLK_MUX] =
- zx_mux("nand_wclk_mux", nand_wclk_sel,
- ARRAY_SIZE(nand_wclk_sel), CLK_MUX, 24, 2);
- clk[ZX296702_LSP_26_WCLK_MUX] =
- zx_mux("lsp_26_wclk_mux", lsp_26_wclk_sel,
- ARRAY_SIZE(lsp_26_wclk_sel), CLK_MUX, 27, 1);
-
- /* gates */
- clk[ZX296702_A9_AS0_ACLK] =
- zx_gate("a9_as0_aclk", "matrix_aclk", CLK_EN0, 0);
- clk[ZX296702_A9_AS1_ACLK] =
- zx_gate("a9_as1_aclk", "a9_as1_aclk_div", CLK_EN0, 1);
- clk[ZX296702_A9_TRACE_CLKIN] =
- zx_gate("a9_trace_clkin", "a9_trace_clkin_mux", CLK_EN0, 2);
- clk[ZX296702_DECPPU_AXI_M_ACLK] =
- zx_gate("decppu_axi_m_aclk", "decppu_aclk_mux", CLK_EN0, 3);
- clk[ZX296702_DECPPU_AHB_S_HCLK] =
- zx_gate("decppu_ahb_s_hclk", "main_hclk", CLK_EN0, 4);
- clk[ZX296702_PPU_AXI_M_ACLK] =
- zx_gate("ppu_axi_m_aclk", "ppu_aclk_mux", CLK_EN0, 5);
- clk[ZX296702_PPU_AHB_S_HCLK] =
- zx_gate("ppu_ahb_s_hclk", "main_hclk", CLK_EN0, 6);
- clk[ZX296702_VOU_AXI_M_ACLK] =
- zx_gate("vou_axi_m_aclk", "vou_aclk_mux", CLK_EN0, 7);
- clk[ZX296702_VOU_APB_PCLK] =
- zx_gate("vou_apb_pclk", "main_pclk", CLK_EN0, 8);
- clk[ZX296702_VOU_MAIN_CHANNEL_WCLK] =
- zx_gate("vou_main_channel_wclk", "vou_main_wclk_mux",
- CLK_EN0, 9);
- clk[ZX296702_VOU_AUX_CHANNEL_WCLK] =
- zx_gate("vou_aux_channel_wclk", "vou_aux_wclk_mux",
- CLK_EN0, 10);
- clk[ZX296702_VOU_HDMI_OSCLK_CEC] =
- zx_gate("vou_hdmi_osclk_cec", "clk_2", CLK_EN0, 11);
- clk[ZX296702_VOU_SCALER_WCLK] =
- zx_gate("vou_scaler_wclk", "vou_scaler_wclk_mux", CLK_EN0, 12);
- clk[ZX296702_MALI400_AXI_M_ACLK] =
- zx_gate("mali400_axi_m_aclk", "mali400_aclk_mux", CLK_EN0, 13);
- clk[ZX296702_MALI400_APB_PCLK] =
- zx_gate("mali400_apb_pclk", "main_pclk", CLK_EN0, 14);
- clk[ZX296702_R2D_WCLK] =
- zx_gate("r2d_wclk", "r2d_wclk_mux", CLK_EN0, 15);
- clk[ZX296702_R2D_AXI_M_ACLK] =
- zx_gate("r2d_axi_m_aclk", "r2d_aclk_mux", CLK_EN0, 16);
- clk[ZX296702_R2D_AHB_HCLK] =
- zx_gate("r2d_ahb_hclk", "main_hclk", CLK_EN0, 17);
- clk[ZX296702_DDR3_AXI_S0_ACLK] =
- zx_gate("ddr3_axi_s0_aclk", "matrix_aclk", CLK_EN0, 18);
- clk[ZX296702_DDR3_APB_PCLK] =
- zx_gate("ddr3_apb_pclk", "main_pclk", CLK_EN0, 19);
- clk[ZX296702_DDR3_WCLK] =
- zx_gate("ddr3_wclk", "ddr_wclk_mux", CLK_EN0, 20);
- clk[ZX296702_USB20_0_AHB_HCLK] =
- zx_gate("usb20_0_ahb_hclk", "main_hclk", CLK_EN0, 21);
- clk[ZX296702_USB20_0_EXTREFCLK] =
- zx_gate("usb20_0_extrefclk", "clk_12", CLK_EN0, 22);
- clk[ZX296702_USB20_1_AHB_HCLK] =
- zx_gate("usb20_1_ahb_hclk", "main_hclk", CLK_EN0, 23);
- clk[ZX296702_USB20_1_EXTREFCLK] =
- zx_gate("usb20_1_extrefclk", "clk_12", CLK_EN0, 24);
- clk[ZX296702_USB20_2_AHB_HCLK] =
- zx_gate("usb20_2_ahb_hclk", "main_hclk", CLK_EN0, 25);
- clk[ZX296702_USB20_2_EXTREFCLK] =
- zx_gate("usb20_2_extrefclk", "clk_12", CLK_EN0, 26);
- clk[ZX296702_GMAC_AXI_M_ACLK] =
- zx_gate("gmac_axi_m_aclk", "matrix_aclk", CLK_EN0, 27);
- clk[ZX296702_GMAC_APB_PCLK] =
- zx_gate("gmac_apb_pclk", "main_pclk", CLK_EN0, 28);
- clk[ZX296702_GMAC_125_CLKIN] =
- zx_gate("gmac_125_clkin", "clk_125", CLK_EN0, 29);
- clk[ZX296702_GMAC_RMII_CLKIN] =
- zx_gate("gmac_rmii_clkin", "clk_50", CLK_EN0, 30);
- clk[ZX296702_GMAC_25M_CLK] =
- zx_gate("gmac_25M_clk", "clk_25", CLK_EN0, 31);
- clk[ZX296702_NANDFLASH_AHB_HCLK] =
- zx_gate("nandflash_ahb_hclk", "main_hclk", CLK_EN1, 0);
- clk[ZX296702_NANDFLASH_WCLK] =
- zx_gate("nandflash_wclk", "nand_wclk_mux", CLK_EN1, 1);
- clk[ZX296702_LSP0_APB_PCLK] =
- zx_gate("lsp0_apb_pclk", "main_pclk", CLK_EN1, 2);
- clk[ZX296702_LSP0_AHB_HCLK] =
- zx_gate("lsp0_ahb_hclk", "main_hclk", CLK_EN1, 3);
- clk[ZX296702_LSP0_26M_WCLK] =
- zx_gate("lsp0_26M_wclk", "lsp_26_wclk_mux", CLK_EN1, 4);
- clk[ZX296702_LSP0_104M_WCLK] =
- zx_gate("lsp0_104M_wclk", "pll_lsp_104M", CLK_EN1, 5);
- clk[ZX296702_LSP0_16M384_WCLK] =
- zx_gate("lsp0_16M384_wclk", "clk_16M384", CLK_EN1, 6);
- clk[ZX296702_LSP1_APB_PCLK] =
- zx_gate("lsp1_apb_pclk", "main_pclk", CLK_EN1, 7);
- /* FIXME: wclk enable bit is bit8. We hack it as reserved 31 for
- * UART does not work after parent clk is disabled/enabled */
- clk[ZX296702_LSP1_26M_WCLK] =
- zx_gate("lsp1_26M_wclk", "lsp_26_wclk_mux", CLK_EN1, 31);
- clk[ZX296702_LSP1_104M_WCLK] =
- zx_gate("lsp1_104M_wclk", "pll_lsp_104M", CLK_EN1, 9);
- clk[ZX296702_LSP1_32K_CLK] =
- zx_gate("lsp1_32K_clk", "clk_32K768", CLK_EN1, 10);
- clk[ZX296702_AON_HCLK] =
- zx_gate("aon_hclk", "main_hclk", CLK_EN1, 11);
- clk[ZX296702_SYS_CTRL_PCLK] =
- zx_gate("sys_ctrl_pclk", "main_pclk", CLK_EN1, 12);
- clk[ZX296702_DMA_PCLK] =
- zx_gate("dma_pclk", "main_pclk", CLK_EN1, 13);
- clk[ZX296702_DMA_ACLK] =
- zx_gate("dma_aclk", "matrix_aclk", CLK_EN1, 14);
- clk[ZX296702_SEC_HCLK] =
- zx_gate("sec_hclk", "main_hclk", CLK_EN1, 15);
- clk[ZX296702_AES_WCLK] =
- zx_gate("aes_wclk", "sec_wclk_div", CLK_EN1, 16);
- clk[ZX296702_DES_WCLK] =
- zx_gate("des_wclk", "sec_wclk_div", CLK_EN1, 17);
- clk[ZX296702_IRAM_ACLK] =
- zx_gate("iram_aclk", "matrix_aclk", CLK_EN1, 18);
- clk[ZX296702_IROM_ACLK] =
- zx_gate("irom_aclk", "matrix_aclk", CLK_EN1, 19);
- clk[ZX296702_BOOT_CTRL_HCLK] =
- zx_gate("boot_ctrl_hclk", "main_hclk", CLK_EN1, 20);
- clk[ZX296702_EFUSE_CLK_30] =
- zx_gate("efuse_clk_30", "osc", CLK_EN1, 21);
-
- /* TODO: add VOU Local clocks */
- clk[ZX296702_VOU_MAIN_CHANNEL_DIV] =
- zx_div("vou_main_channel_div", "vou_main_channel_wclk",
- VOU_LOCAL_DIV2_SET, 1, 1);
- clk[ZX296702_VOU_AUX_CHANNEL_DIV] =
- zx_div("vou_aux_channel_div", "vou_aux_channel_wclk",
- VOU_LOCAL_DIV2_SET, 0, 1);
- clk[ZX296702_VOU_TV_ENC_HD_DIV] =
- zx_div("vou_tv_enc_hd_div", "vou_tv_enc_hd_mux",
- VOU_LOCAL_DIV2_SET, 3, 1);
- clk[ZX296702_VOU_TV_ENC_SD_DIV] =
- zx_div("vou_tv_enc_sd_div", "vou_tv_enc_sd_mux",
- VOU_LOCAL_DIV2_SET, 2, 1);
- clk[ZX296702_VL0_MUX] =
- zx_mux("vl0_mux", vl0_sel, ARRAY_SIZE(vl0_sel),
- VOU_LOCAL_CLKSEL, 8, 1);
- clk[ZX296702_VL1_MUX] =
- zx_mux("vl1_mux", vl0_sel, ARRAY_SIZE(vl0_sel),
- VOU_LOCAL_CLKSEL, 9, 1);
- clk[ZX296702_VL2_MUX] =
- zx_mux("vl2_mux", vl0_sel, ARRAY_SIZE(vl0_sel),
- VOU_LOCAL_CLKSEL, 10, 1);
- clk[ZX296702_GL0_MUX] =
- zx_mux("gl0_mux", vl0_sel, ARRAY_SIZE(vl0_sel),
- VOU_LOCAL_CLKSEL, 5, 1);
- clk[ZX296702_GL1_MUX] =
- zx_mux("gl1_mux", vl0_sel, ARRAY_SIZE(vl0_sel),
- VOU_LOCAL_CLKSEL, 6, 1);
- clk[ZX296702_GL2_MUX] =
- zx_mux("gl2_mux", vl0_sel, ARRAY_SIZE(vl0_sel),
- VOU_LOCAL_CLKSEL, 7, 1);
- clk[ZX296702_WB_MUX] =
- zx_mux("wb_mux", vl0_sel, ARRAY_SIZE(vl0_sel),
- VOU_LOCAL_CLKSEL, 11, 1);
- clk[ZX296702_HDMI_MUX] =
- zx_mux("hdmi_mux", hdmi_sel, ARRAY_SIZE(hdmi_sel),
- VOU_LOCAL_CLKSEL, 4, 1);
- clk[ZX296702_VOU_TV_ENC_HD_MUX] =
- zx_mux("vou_tv_enc_hd_mux", hdmi_sel, ARRAY_SIZE(hdmi_sel),
- VOU_LOCAL_CLKSEL, 3, 1);
- clk[ZX296702_VOU_TV_ENC_SD_MUX] =
- zx_mux("vou_tv_enc_sd_mux", hdmi_sel, ARRAY_SIZE(hdmi_sel),
- VOU_LOCAL_CLKSEL, 2, 1);
- clk[ZX296702_VL0_CLK] =
- zx_gate("vl0_clk", "vl0_mux", VOU_LOCAL_CLKEN, 8);
- clk[ZX296702_VL1_CLK] =
- zx_gate("vl1_clk", "vl1_mux", VOU_LOCAL_CLKEN, 9);
- clk[ZX296702_VL2_CLK] =
- zx_gate("vl2_clk", "vl2_mux", VOU_LOCAL_CLKEN, 10);
- clk[ZX296702_GL0_CLK] =
- zx_gate("gl0_clk", "gl0_mux", VOU_LOCAL_CLKEN, 5);
- clk[ZX296702_GL1_CLK] =
- zx_gate("gl1_clk", "gl1_mux", VOU_LOCAL_CLKEN, 6);
- clk[ZX296702_GL2_CLK] =
- zx_gate("gl2_clk", "gl2_mux", VOU_LOCAL_CLKEN, 7);
- clk[ZX296702_WB_CLK] =
- zx_gate("wb_clk", "wb_mux", VOU_LOCAL_CLKEN, 11);
- clk[ZX296702_CL_CLK] =
- zx_gate("cl_clk", "vou_main_channel_div", VOU_LOCAL_CLKEN, 12);
- clk[ZX296702_MAIN_MIX_CLK] =
- zx_gate("main_mix_clk", "vou_main_channel_div",
- VOU_LOCAL_CLKEN, 4);
- clk[ZX296702_AUX_MIX_CLK] =
- zx_gate("aux_mix_clk", "vou_aux_channel_div",
- VOU_LOCAL_CLKEN, 3);
- clk[ZX296702_HDMI_CLK] =
- zx_gate("hdmi_clk", "hdmi_mux", VOU_LOCAL_CLKEN, 2);
- clk[ZX296702_VOU_TV_ENC_HD_DAC_CLK] =
- zx_gate("vou_tv_enc_hd_dac_clk", "vou_tv_enc_hd_div",
- VOU_LOCAL_CLKEN, 1);
- clk[ZX296702_VOU_TV_ENC_SD_DAC_CLK] =
- zx_gate("vou_tv_enc_sd_dac_clk", "vou_tv_enc_sd_div",
- VOU_LOCAL_CLKEN, 0);
-
- /* CA9 PERIPHCLK = a9_wclk / 2 */
- clk[ZX296702_A9_PERIPHCLK] =
- clk_register_fixed_factor(NULL, "a9_periphclk", "a9_wclk",
- 0, 1, 2);
-
- for (i = 0; i < ARRAY_SIZE(topclk); i++) {
- if (IS_ERR(clk[i])) {
- pr_err("zx296702 clk %d: register failed with %ld\n",
- i, PTR_ERR(clk[i]));
- return;
- }
- }
-
- topclk_data.clks = topclk;
- topclk_data.clk_num = ARRAY_SIZE(topclk);
- of_clk_add_provider(np, of_clk_src_onecell_get, &topclk_data);
-}
-CLK_OF_DECLARE(zx296702_top_clk, "zte,zx296702-topcrm-clk",
- zx296702_top_clocks_init);
-
-static void __init zx296702_lsp0_clocks_init(struct device_node *np)
-{
- struct clk **clk = lsp0clk;
- int i;
-
- lsp0crpm_base = of_iomap(np, 0);
- WARN_ON(!lsp0crpm_base);
-
- /* SDMMC1 */
- clk[ZX296702_SDMMC1_WCLK_MUX] =
- zx_mux("sdmmc1_wclk_mux", sdmmc1_wclk_sel,
- ARRAY_SIZE(sdmmc1_wclk_sel), CLK_SDMMC1, 4, 1);
- clk[ZX296702_SDMMC1_WCLK_DIV] =
- zx_div("sdmmc1_wclk_div", "sdmmc1_wclk_mux", CLK_SDMMC1, 12, 4);
- clk[ZX296702_SDMMC1_WCLK] =
- zx_gate("sdmmc1_wclk", "sdmmc1_wclk_div", CLK_SDMMC1, 1);
- clk[ZX296702_SDMMC1_PCLK] =
- zx_gate("sdmmc1_pclk", "lsp0_apb_pclk", CLK_SDMMC1, 0);
-
- clk[ZX296702_GPIO_CLK] =
- zx_gate("gpio_clk", "lsp0_apb_pclk", CLK_GPIO, 0);
-
- /* SPDIF */
- clk[ZX296702_SPDIF0_WCLK_MUX] =
- zx_mux("spdif0_wclk_mux", spdif0_wclk_sel,
- ARRAY_SIZE(spdif0_wclk_sel), CLK_SPDIF0, 4, 1);
- clk[ZX296702_SPDIF0_WCLK] =
- zx_gate("spdif0_wclk", "spdif0_wclk_mux", CLK_SPDIF0, 1);
- clk[ZX296702_SPDIF0_PCLK] =
- zx_gate("spdif0_pclk", "lsp0_apb_pclk", CLK_SPDIF0, 0);
-
- clk[ZX296702_SPDIF0_DIV] =
- clk_register_zx_audio("spdif0_div", "spdif0_wclk", 0,
- SPDIF0_DIV);
-
- /* I2S */
- clk[ZX296702_I2S0_WCLK_MUX] =
- zx_mux("i2s0_wclk_mux", i2s_wclk_sel,
- ARRAY_SIZE(i2s_wclk_sel), CLK_I2S0, 4, 1);
- clk[ZX296702_I2S0_WCLK] =
- zx_gate("i2s0_wclk", "i2s0_wclk_mux", CLK_I2S0, 1);
- clk[ZX296702_I2S0_PCLK] =
- zx_gate("i2s0_pclk", "lsp0_apb_pclk", CLK_I2S0, 0);
-
- clk[ZX296702_I2S0_DIV] =
- clk_register_zx_audio("i2s0_div", "i2s0_wclk", 0, I2S0_DIV);
-
- clk[ZX296702_I2S1_WCLK_MUX] =
- zx_mux("i2s1_wclk_mux", i2s_wclk_sel,
- ARRAY_SIZE(i2s_wclk_sel), CLK_I2S1, 4, 1);
- clk[ZX296702_I2S1_WCLK] =
- zx_gate("i2s1_wclk", "i2s1_wclk_mux", CLK_I2S1, 1);
- clk[ZX296702_I2S1_PCLK] =
- zx_gate("i2s1_pclk", "lsp0_apb_pclk", CLK_I2S1, 0);
-
- clk[ZX296702_I2S1_DIV] =
- clk_register_zx_audio("i2s1_div", "i2s1_wclk", 0, I2S1_DIV);
-
- clk[ZX296702_I2S2_WCLK_MUX] =
- zx_mux("i2s2_wclk_mux", i2s_wclk_sel,
- ARRAY_SIZE(i2s_wclk_sel), CLK_I2S2, 4, 1);
- clk[ZX296702_I2S2_WCLK] =
- zx_gate("i2s2_wclk", "i2s2_wclk_mux", CLK_I2S2, 1);
- clk[ZX296702_I2S2_PCLK] =
- zx_gate("i2s2_pclk", "lsp0_apb_pclk", CLK_I2S2, 0);
-
- clk[ZX296702_I2S2_DIV] =
- clk_register_zx_audio("i2s2_div", "i2s2_wclk", 0, I2S2_DIV);
-
- for (i = 0; i < ARRAY_SIZE(lsp0clk); i++) {
- if (IS_ERR(clk[i])) {
- pr_err("zx296702 clk %d: register failed with %ld\n",
- i, PTR_ERR(clk[i]));
- return;
- }
- }
-
- lsp0clk_data.clks = lsp0clk;
- lsp0clk_data.clk_num = ARRAY_SIZE(lsp0clk);
- of_clk_add_provider(np, of_clk_src_onecell_get, &lsp0clk_data);
-}
-CLK_OF_DECLARE(zx296702_lsp0_clk, "zte,zx296702-lsp0crpm-clk",
- zx296702_lsp0_clocks_init);
-
-static void __init zx296702_lsp1_clocks_init(struct device_node *np)
-{
- struct clk **clk = lsp1clk;
- int i;
-
- lsp1crpm_base = of_iomap(np, 0);
- WARN_ON(!lsp1crpm_base);
-
- /* UART0 */
- clk[ZX296702_UART0_WCLK_MUX] =
- zx_mux("uart0_wclk_mux", uart_wclk_sel,
- ARRAY_SIZE(uart_wclk_sel), CLK_UART0, 4, 1);
- /* FIXME: uart wclk enable bit is bit1 in. We hack it as reserved 31 for
- * UART does not work after parent clk is disabled/enabled */
- clk[ZX296702_UART0_WCLK] =
- zx_gate("uart0_wclk", "uart0_wclk_mux", CLK_UART0, 31);
- clk[ZX296702_UART0_PCLK] =
- zx_gate("uart0_pclk", "lsp1_apb_pclk", CLK_UART0, 0);
-
- /* UART1 */
- clk[ZX296702_UART1_WCLK_MUX] =
- zx_mux("uart1_wclk_mux", uart_wclk_sel,
- ARRAY_SIZE(uart_wclk_sel), CLK_UART1, 4, 1);
- clk[ZX296702_UART1_WCLK] =
- zx_gate("uart1_wclk", "uart1_wclk_mux", CLK_UART1, 1);
- clk[ZX296702_UART1_PCLK] =
- zx_gate("uart1_pclk", "lsp1_apb_pclk", CLK_UART1, 0);
-
- /* SDMMC0 */
- clk[ZX296702_SDMMC0_WCLK_MUX] =
- zx_mux("sdmmc0_wclk_mux", sdmmc0_wclk_sel,
- ARRAY_SIZE(sdmmc0_wclk_sel), CLK_SDMMC0, 4, 1);
- clk[ZX296702_SDMMC0_WCLK_DIV] =
- zx_div("sdmmc0_wclk_div", "sdmmc0_wclk_mux", CLK_SDMMC0, 12, 4);
- clk[ZX296702_SDMMC0_WCLK] =
- zx_gate("sdmmc0_wclk", "sdmmc0_wclk_div", CLK_SDMMC0, 1);
- clk[ZX296702_SDMMC0_PCLK] =
- zx_gate("sdmmc0_pclk", "lsp1_apb_pclk", CLK_SDMMC0, 0);
-
- clk[ZX296702_SPDIF1_WCLK_MUX] =
- zx_mux("spdif1_wclk_mux", spdif1_wclk_sel,
- ARRAY_SIZE(spdif1_wclk_sel), CLK_SPDIF1, 4, 1);
- clk[ZX296702_SPDIF1_WCLK] =
- zx_gate("spdif1_wclk", "spdif1_wclk_mux", CLK_SPDIF1, 1);
- clk[ZX296702_SPDIF1_PCLK] =
- zx_gate("spdif1_pclk", "lsp1_apb_pclk", CLK_SPDIF1, 0);
-
- clk[ZX296702_SPDIF1_DIV] =
- clk_register_zx_audio("spdif1_div", "spdif1_wclk", 0,
- SPDIF1_DIV);
-
- for (i = 0; i < ARRAY_SIZE(lsp1clk); i++) {
- if (IS_ERR(clk[i])) {
- pr_err("zx296702 clk %d: register failed with %ld\n",
- i, PTR_ERR(clk[i]));
- return;
- }
- }
-
- lsp1clk_data.clks = lsp1clk;
- lsp1clk_data.clk_num = ARRAY_SIZE(lsp1clk);
- of_clk_add_provider(np, of_clk_src_onecell_get, &lsp1clk_data);
-}
-CLK_OF_DECLARE(zx296702_lsp1_clk, "zte,zx296702-lsp1crpm-clk",
- zx296702_lsp1_clocks_init);
diff --git a/drivers/clk/zte/clk-zx296718.c b/drivers/clk/zte/clk-zx296718.c
deleted file mode 100644
index dd7045bc48c1..000000000000
--- a/drivers/clk/zte/clk-zx296718.c
+++ /dev/null
@@ -1,1074 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2015 - 2016 ZTE Corporation.
- * Copyright (C) 2016 Linaro Ltd.
- */
-#include <linux/clk-provider.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
-#include <linux/platform_device.h>
-
-#include <dt-bindings/clock/zx296718-clock.h>
-#include "clk.h"
-
-/* TOP CRM */
-#define TOP_CLK_MUX0 0x04
-#define TOP_CLK_MUX1 0x08
-#define TOP_CLK_MUX2 0x0c
-#define TOP_CLK_MUX3 0x10
-#define TOP_CLK_MUX4 0x14
-#define TOP_CLK_MUX5 0x18
-#define TOP_CLK_MUX6 0x1c
-#define TOP_CLK_MUX7 0x20
-#define TOP_CLK_MUX9 0x28
-
-
-#define TOP_CLK_GATE0 0x34
-#define TOP_CLK_GATE1 0x38
-#define TOP_CLK_GATE2 0x3c
-#define TOP_CLK_GATE3 0x40
-#define TOP_CLK_GATE4 0x44
-#define TOP_CLK_GATE5 0x48
-#define TOP_CLK_GATE6 0x4c
-
-#define TOP_CLK_DIV0 0x58
-
-#define PLL_CPU_REG 0x80
-#define PLL_VGA_REG 0xb0
-#define PLL_DDR_REG 0xa0
-
-/* LSP0 CRM */
-#define LSP0_TIMER3_CLK 0x4
-#define LSP0_TIMER4_CLK 0x8
-#define LSP0_TIMER5_CLK 0xc
-#define LSP0_UART3_CLK 0x10
-#define LSP0_UART1_CLK 0x14
-#define LSP0_UART2_CLK 0x18
-#define LSP0_SPIFC0_CLK 0x1c
-#define LSP0_I2C4_CLK 0x20
-#define LSP0_I2C5_CLK 0x24
-#define LSP0_SSP0_CLK 0x28
-#define LSP0_SSP1_CLK 0x2c
-#define LSP0_USIM0_CLK 0x30
-#define LSP0_GPIO_CLK 0x34
-#define LSP0_I2C3_CLK 0x38
-
-/* LSP1 CRM */
-#define LSP1_UART4_CLK 0x08
-#define LSP1_UART5_CLK 0x0c
-#define LSP1_PWM_CLK 0x10
-#define LSP1_I2C2_CLK 0x14
-#define LSP1_SSP2_CLK 0x1c
-#define LSP1_SSP3_CLK 0x20
-#define LSP1_SSP4_CLK 0x24
-#define LSP1_USIM1_CLK 0x28
-
-/* audio lsp */
-#define AUDIO_I2S0_DIV_CFG1 0x10
-#define AUDIO_I2S0_DIV_CFG2 0x14
-#define AUDIO_I2S0_CLK 0x18
-#define AUDIO_I2S1_DIV_CFG1 0x20
-#define AUDIO_I2S1_DIV_CFG2 0x24
-#define AUDIO_I2S1_CLK 0x28
-#define AUDIO_I2S2_DIV_CFG1 0x30
-#define AUDIO_I2S2_DIV_CFG2 0x34
-#define AUDIO_I2S2_CLK 0x38
-#define AUDIO_I2S3_DIV_CFG1 0x40
-#define AUDIO_I2S3_DIV_CFG2 0x44
-#define AUDIO_I2S3_CLK 0x48
-#define AUDIO_I2C0_CLK 0x50
-#define AUDIO_SPDIF0_DIV_CFG1 0x60
-#define AUDIO_SPDIF0_DIV_CFG2 0x64
-#define AUDIO_SPDIF0_CLK 0x68
-#define AUDIO_SPDIF1_DIV_CFG1 0x70
-#define AUDIO_SPDIF1_DIV_CFG2 0x74
-#define AUDIO_SPDIF1_CLK 0x78
-#define AUDIO_TIMER_CLK 0x80
-#define AUDIO_TDM_CLK 0x90
-#define AUDIO_TS_CLK 0xa0
-
-static DEFINE_SPINLOCK(clk_lock);
-
-static const struct zx_pll_config pll_cpu_table[] = {
- PLL_RATE(1312000000, 0x00103621, 0x04aaaaaa),
- PLL_RATE(1407000000, 0x00103a21, 0x04aaaaaa),
- PLL_RATE(1503000000, 0x00103e21, 0x04aaaaaa),
- PLL_RATE(1600000000, 0x00104221, 0x04aaaaaa),
-};
-
-static const struct zx_pll_config pll_vga_table[] = {
- PLL_RATE(36000000, 0x00102464, 0x04000000), /* 800x600@56 */
- PLL_RATE(40000000, 0x00102864, 0x04000000), /* 800x600@60 */
- PLL_RATE(49500000, 0x00103164, 0x04800000), /* 800x600@75 */
- PLL_RATE(50000000, 0x00103264, 0x04000000), /* 800x600@72 */
- PLL_RATE(56250000, 0x00103864, 0x04400000), /* 800x600@85 */
- PLL_RATE(65000000, 0x00104164, 0x04000000), /* 1024x768@60 */
- PLL_RATE(74375000, 0x00104a64, 0x04600000), /* 1280x720@60 */
- PLL_RATE(75000000, 0x00104b64, 0x04800000), /* 1024x768@70 */
- PLL_RATE(78750000, 0x00104e64, 0x04c00000), /* 1024x768@75 */
- PLL_RATE(85500000, 0x00105564, 0x04800000), /* 1360x768@60 */
- PLL_RATE(106500000, 0x00106a64, 0x04800000), /* 1440x900@60 */
- PLL_RATE(108000000, 0x00106c64, 0x04000000), /* 1280x1024@60 */
- PLL_RATE(110000000, 0x00106e64, 0x04000000), /* 1024x768@85 */
- PLL_RATE(135000000, 0x00105a44, 0x04000000), /* 1280x1024@75 */
- PLL_RATE(136750000, 0x00104462, 0x04600000), /* 1440x900@75 */
- PLL_RATE(148500000, 0x00104a62, 0x04400000), /* 1920x1080@60 */
- PLL_RATE(157000000, 0x00104e62, 0x04800000), /* 1440x900@85 */
- PLL_RATE(157500000, 0x00104e62, 0x04c00000), /* 1280x1024@85 */
- PLL_RATE(162000000, 0x00105162, 0x04000000), /* 1600x1200@60 */
- PLL_RATE(193250000, 0x00106062, 0x04a00000), /* 1920x1200@60 */
-};
-
-PNAME(osc) = {
- "osc24m",
- "osc32k",
-};
-
-PNAME(dbg_wclk_p) = {
- "clk334m",
- "clk466m",
- "clk396m",
- "clk250m",
-};
-
-PNAME(a72_coreclk_p) = {
- "osc24m",
- "pll_mm0_1188m",
- "pll_mm1_1296m",
- "clk1000m",
- "clk648m",
- "clk1600m",
- "pll_audio_1800m",
- "pll_vga_1800m",
-};
-
-PNAME(cpu_periclk_p) = {
- "osc24m",
- "clk500m",
- "clk594m",
- "clk466m",
- "clk294m",
- "clk334m",
- "clk250m",
- "clk125m",
-};
-
-PNAME(a53_coreclk_p) = {
- "osc24m",
- "clk1000m",
- "pll_mm0_1188m",
- "clk648m",
- "clk500m",
- "clk800m",
- "clk1600m",
- "pll_audio_1800m",
-};
-
-PNAME(sec_wclk_p) = {
- "osc24m",
- "clk396m",
- "clk334m",
- "clk297m",
- "clk250m",
- "clk198m",
- "clk148m5",
- "clk99m",
-};
-
-PNAME(sd_nand_wclk_p) = {
- "osc24m",
- "clk49m5",
- "clk99m",
- "clk198m",
- "clk167m",
- "clk148m5",
- "clk125m",
- "clk216m",
-};
-
-PNAME(emmc_wclk_p) = {
- "osc24m",
- "clk198m",
- "clk99m",
- "clk396m",
- "clk334m",
- "clk297m",
- "clk250m",
- "clk148m5",
-};
-
-PNAME(clk32_p) = {
- "osc32k",
- "clk32k768",
-};
-
-PNAME(usb_ref24m_p) = {
- "osc32k",
- "clk32k768",
-};
-
-PNAME(sys_noc_alck_p) = {
- "osc24m",
- "clk250m",
- "clk198m",
- "clk148m5",
- "clk108m",
- "clk54m",
- "clk216m",
- "clk240m",
-};
-
-PNAME(vde_aclk_p) = {
- "clk334m",
- "clk594m",
- "clk500m",
- "clk432m",
- "clk480m",
- "clk297m",
- "clk_vga", /*600MHz*/
- "clk294m",
-};
-
-PNAME(vce_aclk_p) = {
- "clk334m",
- "clk594m",
- "clk500m",
- "clk432m",
- "clk396m",
- "clk297m",
- "clk_vga", /*600MHz*/
- "clk294m",
-};
-
-PNAME(hde_aclk_p) = {
- "clk334m",
- "clk594m",
- "clk500m",
- "clk432m",
- "clk396m",
- "clk297m",
- "clk_vga", /*600MHz*/
- "clk294m",
-};
-
-PNAME(gpu_aclk_p) = {
- "clk334m",
- "clk648m",
- "clk594m",
- "clk500m",
- "clk396m",
- "clk297m",
- "clk_vga", /*600MHz*/
- "clk294m",
-};
-
-PNAME(sappu_aclk_p) = {
- "clk396m",
- "clk500m",
- "clk250m",
- "clk148m5",
-};
-
-PNAME(sappu_wclk_p) = {
- "clk198m",
- "clk396m",
- "clk334m",
- "clk297m",
- "clk250m",
- "clk148m5",
- "clk125m",
- "clk99m",
-};
-
-PNAME(vou_aclk_p) = {
- "clk334m",
- "clk594m",
- "clk500m",
- "clk432m",
- "clk396m",
- "clk297m",
- "clk_vga", /*600MHz*/
- "clk294m",
-};
-
-PNAME(vou_main_wclk_p) = {
- "clk108m",
- "clk594m",
- "clk297m",
- "clk148m5",
- "clk74m25",
- "clk54m",
- "clk27m",
- "clk_vga",
-};
-
-PNAME(vou_aux_wclk_p) = {
- "clk108m",
- "clk148m5",
- "clk74m25",
- "clk54m",
- "clk27m",
- "clk_vga",
- "clk54m_mm0",
- "clk"
-};
-
-PNAME(vou_ppu_wclk_p) = {
- "clk334m",
- "clk432m",
- "clk396m",
- "clk297m",
- "clk250m",
- "clk125m",
- "clk198m",
- "clk99m",
-};
-
-PNAME(vga_i2c_wclk_p) = {
- "osc24m",
- "clk99m",
-};
-
-PNAME(viu_m0_aclk_p) = {
- "clk334m",
- "clk432m",
- "clk396m",
- "clk297m",
- "clk250m",
- "clk125m",
- "clk198m",
- "osc24m",
-};
-
-PNAME(viu_m1_aclk_p) = {
- "clk198m",
- "clk250m",
- "clk297m",
- "clk125m",
- "clk396m",
- "clk334m",
- "clk148m5",
- "osc24m",
-};
-
-PNAME(viu_clk_p) = {
- "clk198m",
- "clk334m",
- "clk297m",
- "clk250m",
- "clk396m",
- "clk125m",
- "clk99m",
- "clk148m5",
-};
-
-PNAME(viu_jpeg_clk_p) = {
- "clk334m",
- "clk480m",
- "clk432m",
- "clk396m",
- "clk297m",
- "clk250m",
- "clk125m",
- "clk198m",
-};
-
-PNAME(ts_sys_clk_p) = {
- "clk192m",
- "clk167m",
- "clk125m",
- "clk99m",
-};
-
-PNAME(wdt_ares_p) = {
- "osc24m",
- "clk32k"
-};
-
-static struct clk_zx_pll zx296718_pll_clk[] = {
- ZX296718_PLL("pll_cpu", "osc24m", PLL_CPU_REG, pll_cpu_table),
- ZX296718_PLL("pll_vga", "osc24m", PLL_VGA_REG, pll_vga_table),
-};
-
-static struct zx_clk_fixed_factor top_ffactor_clk[] = {
- FFACTOR(0, "clk4m", "osc24m", 1, 6, 0),
- FFACTOR(0, "clk2m", "osc24m", 1, 12, 0),
- /* pll cpu */
- FFACTOR(0, "clk1600m", "pll_cpu", 1, 1, CLK_SET_RATE_PARENT),
- FFACTOR(0, "clk800m", "pll_cpu", 1, 2, CLK_SET_RATE_PARENT),
- /* pll mac */
- FFACTOR(0, "clk25m", "pll_mac", 1, 40, 0),
- FFACTOR(0, "clk125m", "pll_mac", 1, 8, 0),
- FFACTOR(0, "clk250m", "pll_mac", 1, 4, 0),
- FFACTOR(0, "clk50m", "pll_mac", 1, 20, 0),
- FFACTOR(0, "clk500m", "pll_mac", 1, 2, 0),
- FFACTOR(0, "clk1000m", "pll_mac", 1, 1, 0),
- FFACTOR(0, "clk334m", "pll_mac", 1, 3, 0),
- FFACTOR(0, "clk167m", "pll_mac", 1, 6, 0),
- /* pll mm */
- FFACTOR(0, "clk54m_mm0", "pll_mm0", 1, 22, 0),
- FFACTOR(0, "clk74m25", "pll_mm0", 1, 16, 0),
- FFACTOR(0, "clk148m5", "pll_mm0", 1, 8, 0),
- FFACTOR(0, "clk297m", "pll_mm0", 1, 4, 0),
- FFACTOR(0, "clk594m", "pll_mm0", 1, 2, 0),
- FFACTOR(0, "pll_mm0_1188m", "pll_mm0", 1, 1, 0),
- FFACTOR(0, "clk396m", "pll_mm0", 1, 3, 0),
- FFACTOR(0, "clk198m", "pll_mm0", 1, 6, 0),
- FFACTOR(0, "clk99m", "pll_mm0", 1, 12, 0),
- FFACTOR(0, "clk49m5", "pll_mm0", 1, 24, 0),
- /* pll mm */
- FFACTOR(0, "clk324m", "pll_mm1", 1, 4, 0),
- FFACTOR(0, "clk648m", "pll_mm1", 1, 2, 0),
- FFACTOR(0, "pll_mm1_1296m", "pll_mm1", 1, 1, 0),
- FFACTOR(0, "clk216m", "pll_mm1", 1, 6, 0),
- FFACTOR(0, "clk432m", "pll_mm1", 1, 3, 0),
- FFACTOR(0, "clk108m", "pll_mm1", 1, 12, 0),
- FFACTOR(0, "clk72m", "pll_mm1", 1, 18, 0),
- FFACTOR(0, "clk27m", "pll_mm1", 1, 48, 0),
- FFACTOR(0, "clk54m", "pll_mm1", 1, 24, 0),
- /* vga */
- FFACTOR(0, "pll_vga_1800m", "pll_vga", 1, 1, 0),
- FFACTOR(0, "clk_vga", "pll_vga", 1, 1, CLK_SET_RATE_PARENT),
- /* pll ddr */
- FFACTOR(0, "clk466m", "pll_ddr", 1, 2, 0),
-
- /* pll audio */
- FFACTOR(0, "pll_audio_1800m", "pll_audio", 1, 1, 0),
- FFACTOR(0, "clk32k768", "pll_audio", 1, 27000, 0),
- FFACTOR(0, "clk16m384", "pll_audio", 1, 54, 0),
- FFACTOR(0, "clk294m", "pll_audio", 1, 3, 0),
-
- /* pll hsic*/
- FFACTOR(0, "clk240m", "pll_hsic", 1, 4, 0),
- FFACTOR(0, "clk480m", "pll_hsic", 1, 2, 0),
- FFACTOR(0, "clk192m", "pll_hsic", 1, 5, 0),
- FFACTOR(0, "clk_pll_24m", "pll_hsic", 1, 40, 0),
- FFACTOR(0, "emmc_mux_div2", "emmc_mux", 1, 2, CLK_SET_RATE_PARENT),
-};
-
-static const struct clk_div_table noc_div_table[] = {
- { .val = 1, .div = 2, },
- { .val = 3, .div = 4, },
-};
-static struct zx_clk_div top_div_clk[] = {
- DIV_T(0, "sys_noc_hclk", "sys_noc_aclk", TOP_CLK_DIV0, 0, 2, 0, noc_div_table),
- DIV_T(0, "sys_noc_pclk", "sys_noc_aclk", TOP_CLK_DIV0, 4, 2, 0, noc_div_table),
-};
-
-static struct zx_clk_mux top_mux_clk[] = {
- MUX(0, "dbg_mux", dbg_wclk_p, TOP_CLK_MUX0, 12, 2),
- MUX(0, "a72_mux", a72_coreclk_p, TOP_CLK_MUX0, 8, 3),
- MUX(0, "cpu_peri_mux", cpu_periclk_p, TOP_CLK_MUX0, 4, 3),
- MUX_F(0, "a53_mux", a53_coreclk_p, TOP_CLK_MUX0, 0, 3, CLK_SET_RATE_PARENT, 0),
- MUX(0, "sys_noc_aclk", sys_noc_alck_p, TOP_CLK_MUX1, 0, 3),
- MUX(0, "sec_mux", sec_wclk_p, TOP_CLK_MUX2, 16, 3),
- MUX(0, "sd1_mux", sd_nand_wclk_p, TOP_CLK_MUX2, 12, 3),
- MUX(0, "sd0_mux", sd_nand_wclk_p, TOP_CLK_MUX2, 8, 3),
- MUX(0, "emmc_mux", emmc_wclk_p, TOP_CLK_MUX2, 4, 3),
- MUX(0, "nand_mux", sd_nand_wclk_p, TOP_CLK_MUX2, 0, 3),
- MUX(0, "usb_ref24m_mux", usb_ref24m_p, TOP_CLK_MUX9, 16, 1),
- MUX(0, "clk32k", clk32_p, TOP_CLK_MUX9, 12, 1),
- MUX_F(0, "wdt_mux", wdt_ares_p, TOP_CLK_MUX9, 8, 1, CLK_SET_RATE_PARENT, 0),
- MUX(0, "timer_mux", osc, TOP_CLK_MUX9, 4, 1),
- MUX(0, "vde_mux", vde_aclk_p, TOP_CLK_MUX4, 0, 3),
- MUX(0, "vce_mux", vce_aclk_p, TOP_CLK_MUX4, 4, 3),
- MUX(0, "hde_mux", hde_aclk_p, TOP_CLK_MUX4, 8, 3),
- MUX(0, "gpu_mux", gpu_aclk_p, TOP_CLK_MUX5, 0, 3),
- MUX(0, "sappu_a_mux", sappu_aclk_p, TOP_CLK_MUX5, 4, 2),
- MUX(0, "sappu_w_mux", sappu_wclk_p, TOP_CLK_MUX5, 8, 3),
- MUX(0, "vou_a_mux", vou_aclk_p, TOP_CLK_MUX7, 0, 3),
- MUX_F(0, "vou_main_w_mux", vou_main_wclk_p, TOP_CLK_MUX7, 4, 3, CLK_SET_RATE_PARENT, 0),
- MUX_F(0, "vou_aux_w_mux", vou_aux_wclk_p, TOP_CLK_MUX7, 8, 3, CLK_SET_RATE_PARENT, 0),
- MUX(0, "vou_ppu_w_mux", vou_ppu_wclk_p, TOP_CLK_MUX7, 12, 3),
- MUX(0, "vga_i2c_mux", vga_i2c_wclk_p, TOP_CLK_MUX7, 16, 1),
- MUX(0, "viu_m0_a_mux", viu_m0_aclk_p, TOP_CLK_MUX6, 0, 3),
- MUX(0, "viu_m1_a_mux", viu_m1_aclk_p, TOP_CLK_MUX6, 4, 3),
- MUX(0, "viu_w_mux", viu_clk_p, TOP_CLK_MUX6, 8, 3),
- MUX(0, "viu_jpeg_w_mux", viu_jpeg_clk_p, TOP_CLK_MUX6, 12, 3),
- MUX(0, "ts_sys_mux", ts_sys_clk_p, TOP_CLK_MUX6, 16, 2),
-};
-
-static struct zx_clk_gate top_gate_clk[] = {
- GATE(CPU_DBG_GATE, "dbg_wclk", "dbg_mux", TOP_CLK_GATE0, 4, CLK_SET_RATE_PARENT, 0),
- GATE(A72_GATE, "a72_coreclk", "a72_mux", TOP_CLK_GATE0, 3, CLK_SET_RATE_PARENT, 0),
- GATE(CPU_PERI_GATE, "cpu_peri", "cpu_peri_mux", TOP_CLK_GATE0, 1, CLK_SET_RATE_PARENT, 0),
- GATE(A53_GATE, "a53_coreclk", "a53_mux", TOP_CLK_GATE0, 0, CLK_SET_RATE_PARENT, 0),
- GATE(SD1_WCLK, "sd1_wclk", "sd1_mux", TOP_CLK_GATE1, 13, CLK_SET_RATE_PARENT, 0),
- GATE(SD0_WCLK, "sd0_wclk", "sd0_mux", TOP_CLK_GATE1, 9, CLK_SET_RATE_PARENT, 0),
- GATE(EMMC_WCLK, "emmc_wclk", "emmc_mux_div2", TOP_CLK_GATE0, 5, CLK_SET_RATE_PARENT, 0),
- GATE(EMMC_NAND_AXI, "emmc_nand_aclk", "sys_noc_aclk", TOP_CLK_GATE1, 4, CLK_SET_RATE_PARENT, 0),
- GATE(NAND_WCLK, "nand_wclk", "nand_mux", TOP_CLK_GATE0, 1, CLK_SET_RATE_PARENT, 0),
- GATE(EMMC_NAND_AHB, "emmc_nand_hclk", "sys_noc_hclk", TOP_CLK_GATE1, 0, CLK_SET_RATE_PARENT, 0),
- GATE(0, "lsp1_pclk", "sys_noc_pclk", TOP_CLK_GATE2, 31, 0, 0),
- GATE(LSP1_148M5, "lsp1_148m5", "clk148m5", TOP_CLK_GATE2, 30, 0, 0),
- GATE(LSP1_99M, "lsp1_99m", "clk99m", TOP_CLK_GATE2, 29, 0, 0),
- GATE(LSP1_24M, "lsp1_24m", "osc24m", TOP_CLK_GATE2, 28, 0, 0),
- GATE(LSP0_74M25, "lsp0_74m25", "clk74m25", TOP_CLK_GATE2, 25, 0, 0),
- GATE(0, "lsp0_pclk", "sys_noc_pclk", TOP_CLK_GATE2, 24, 0, 0),
- GATE(LSP0_32K, "lsp0_32k", "osc32k", TOP_CLK_GATE2, 23, 0, 0),
- GATE(LSP0_148M5, "lsp0_148m5", "clk148m5", TOP_CLK_GATE2, 22, 0, 0),
- GATE(LSP0_99M, "lsp0_99m", "clk99m", TOP_CLK_GATE2, 21, 0, 0),
- GATE(LSP0_24M, "lsp0_24m", "osc24m", TOP_CLK_GATE2, 20, 0, 0),
- GATE(AUDIO_99M, "audio_99m", "clk99m", TOP_CLK_GATE5, 27, 0, 0),
- GATE(AUDIO_24M, "audio_24m", "osc24m", TOP_CLK_GATE5, 28, 0, 0),
- GATE(AUDIO_16M384, "audio_16m384", "clk16m384", TOP_CLK_GATE5, 29, 0, 0),
- GATE(AUDIO_32K, "audio_32k", "clk32k", TOP_CLK_GATE5, 30, 0, 0),
- GATE(WDT_WCLK, "wdt_wclk", "wdt_mux", TOP_CLK_GATE6, 9, CLK_SET_RATE_PARENT, 0),
- GATE(TIMER_WCLK, "timer_wclk", "timer_mux", TOP_CLK_GATE6, 5, CLK_SET_RATE_PARENT, 0),
- GATE(VDE_ACLK, "vde_aclk", "vde_mux", TOP_CLK_GATE3, 0, CLK_SET_RATE_PARENT, 0),
- GATE(VCE_ACLK, "vce_aclk", "vce_mux", TOP_CLK_GATE3, 4, CLK_SET_RATE_PARENT, 0),
- GATE(HDE_ACLK, "hde_aclk", "hde_mux", TOP_CLK_GATE3, 8, CLK_SET_RATE_PARENT, 0),
- GATE(GPU_ACLK, "gpu_aclk", "gpu_mux", TOP_CLK_GATE3, 16, CLK_SET_RATE_PARENT, 0),
- GATE(SAPPU_ACLK, "sappu_aclk", "sappu_a_mux", TOP_CLK_GATE3, 20, CLK_SET_RATE_PARENT, 0),
- GATE(SAPPU_WCLK, "sappu_wclk", "sappu_w_mux", TOP_CLK_GATE3, 22, CLK_SET_RATE_PARENT, 0),
- GATE(VOU_ACLK, "vou_aclk", "vou_a_mux", TOP_CLK_GATE4, 16, CLK_SET_RATE_PARENT, 0),
- GATE(VOU_MAIN_WCLK, "vou_main_wclk", "vou_main_w_mux", TOP_CLK_GATE4, 18, CLK_SET_RATE_PARENT, 0),
- GATE(VOU_AUX_WCLK, "vou_aux_wclk", "vou_aux_w_mux", TOP_CLK_GATE4, 19, CLK_SET_RATE_PARENT, 0),
- GATE(VOU_PPU_WCLK, "vou_ppu_wclk", "vou_ppu_w_mux", TOP_CLK_GATE4, 20, CLK_SET_RATE_PARENT, 0),
- GATE(MIPI_CFG_CLK, "mipi_cfg_clk", "osc24m", TOP_CLK_GATE4, 21, 0, 0),
- GATE(VGA_I2C_WCLK, "vga_i2c_wclk", "vga_i2c_mux", TOP_CLK_GATE4, 23, CLK_SET_RATE_PARENT, 0),
- GATE(MIPI_REF_CLK, "mipi_ref_clk", "clk27m", TOP_CLK_GATE4, 24, 0, 0),
- GATE(HDMI_OSC_CEC, "hdmi_osc_cec", "clk2m", TOP_CLK_GATE4, 22, 0, 0),
- GATE(HDMI_OSC_CLK, "hdmi_osc_clk", "clk240m", TOP_CLK_GATE4, 25, 0, 0),
- GATE(HDMI_XCLK, "hdmi_xclk", "osc24m", TOP_CLK_GATE4, 26, 0, 0),
- GATE(VIU_M0_ACLK, "viu_m0_aclk", "viu_m0_a_mux", TOP_CLK_GATE4, 0, CLK_SET_RATE_PARENT, 0),
- GATE(VIU_M1_ACLK, "viu_m1_aclk", "viu_m1_a_mux", TOP_CLK_GATE4, 1, CLK_SET_RATE_PARENT, 0),
- GATE(VIU_WCLK, "viu_wclk", "viu_w_mux", TOP_CLK_GATE4, 2, CLK_SET_RATE_PARENT, 0),
- GATE(VIU_JPEG_WCLK, "viu_jpeg_wclk", "viu_jpeg_w_mux", TOP_CLK_GATE4, 3, CLK_SET_RATE_PARENT, 0),
- GATE(VIU_CFG_CLK, "viu_cfg_clk", "osc24m", TOP_CLK_GATE4, 6, 0, 0),
- GATE(TS_SYS_WCLK, "ts_sys_wclk", "ts_sys_mux", TOP_CLK_GATE5, 2, CLK_SET_RATE_PARENT, 0),
- GATE(TS_SYS_108M, "ts_sys_108m", "clk108m", TOP_CLK_GATE5, 3, 0, 0),
- GATE(USB20_HCLK, "usb20_hclk", "sys_noc_hclk", TOP_CLK_GATE2, 12, 0, 0),
- GATE(USB20_PHY_CLK, "usb20_phy_clk", "usb_ref24m_mux", TOP_CLK_GATE2, 13, 0, 0),
- GATE(USB21_HCLK, "usb21_hclk", "sys_noc_hclk", TOP_CLK_GATE2, 14, 0, 0),
- GATE(USB21_PHY_CLK, "usb21_phy_clk", "usb_ref24m_mux", TOP_CLK_GATE2, 15, 0, 0),
- GATE(GMAC_RMIICLK, "gmac_rmii_clk", "clk50m", TOP_CLK_GATE2, 3, 0, 0),
- GATE(GMAC_PCLK, "gmac_pclk", "clk198m", TOP_CLK_GATE2, 1, 0, 0),
- GATE(GMAC_ACLK, "gmac_aclk", "clk49m5", TOP_CLK_GATE2, 0, 0, 0),
- GATE(GMAC_RFCLK, "gmac_refclk", "clk25m", TOP_CLK_GATE2, 4, 0, 0),
- GATE(SD1_AHB, "sd1_hclk", "sys_noc_hclk", TOP_CLK_GATE1, 12, 0, 0),
- GATE(SD0_AHB, "sd0_hclk", "sys_noc_hclk", TOP_CLK_GATE1, 8, 0, 0),
- GATE(TEMPSENSOR_GATE, "tempsensor_gate", "clk4m", TOP_CLK_GATE5, 31, 0, 0),
-};
-
-static struct clk_hw_onecell_data top_hw_onecell_data = {
- .num = TOP_NR_CLKS,
- .hws = {
- [TOP_NR_CLKS - 1] = NULL,
- },
-};
-
-static int __init top_clocks_init(struct device_node *np)
-{
- void __iomem *reg_base;
- int i, ret;
- const char *name;
-
- reg_base = of_iomap(np, 0);
- if (!reg_base) {
- pr_err("%s: Unable to map clk base\n", __func__);
- return -ENXIO;
- }
-
- for (i = 0; i < ARRAY_SIZE(zx296718_pll_clk); i++) {
- zx296718_pll_clk[i].reg_base += (uintptr_t)reg_base;
- name = zx296718_pll_clk[i].hw.init->name;
- ret = clk_hw_register(NULL, &zx296718_pll_clk[i].hw);
- if (ret)
- pr_warn("top clk %s init error!\n", name);
- }
-
- for (i = 0; i < ARRAY_SIZE(top_ffactor_clk); i++) {
- if (top_ffactor_clk[i].id)
- top_hw_onecell_data.hws[top_ffactor_clk[i].id] =
- &top_ffactor_clk[i].factor.hw;
-
- name = top_ffactor_clk[i].factor.hw.init->name;
- ret = clk_hw_register(NULL, &top_ffactor_clk[i].factor.hw);
- if (ret)
- pr_warn("top clk %s init error!\n", name);
- }
-
- for (i = 0; i < ARRAY_SIZE(top_mux_clk); i++) {
- if (top_mux_clk[i].id)
- top_hw_onecell_data.hws[top_mux_clk[i].id] =
- &top_mux_clk[i].mux.hw;
-
- top_mux_clk[i].mux.reg += (uintptr_t)reg_base;
- name = top_mux_clk[i].mux.hw.init->name;
- ret = clk_hw_register(NULL, &top_mux_clk[i].mux.hw);
- if (ret)
- pr_warn("top clk %s init error!\n", name);
- }
-
- for (i = 0; i < ARRAY_SIZE(top_gate_clk); i++) {
- if (top_gate_clk[i].id)
- top_hw_onecell_data.hws[top_gate_clk[i].id] =
- &top_gate_clk[i].gate.hw;
-
- top_gate_clk[i].gate.reg += (uintptr_t)reg_base;
- name = top_gate_clk[i].gate.hw.init->name;
- ret = clk_hw_register(NULL, &top_gate_clk[i].gate.hw);
- if (ret)
- pr_warn("top clk %s init error!\n", name);
- }
-
- for (i = 0; i < ARRAY_SIZE(top_div_clk); i++) {
- if (top_div_clk[i].id)
- top_hw_onecell_data.hws[top_div_clk[i].id] =
- &top_div_clk[i].div.hw;
-
- top_div_clk[i].div.reg += (uintptr_t)reg_base;
- name = top_div_clk[i].div.hw.init->name;
- ret = clk_hw_register(NULL, &top_div_clk[i].div.hw);
- if (ret)
- pr_warn("top clk %s init error!\n", name);
- }
-
- ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
- &top_hw_onecell_data);
- if (ret) {
- pr_err("failed to register top clk provider: %d\n", ret);
- return ret;
- }
-
- return 0;
-}
-
-static const struct clk_div_table common_even_div_table[] = {
- { .val = 0, .div = 1, },
- { .val = 1, .div = 2, },
- { .val = 3, .div = 4, },
- { .val = 5, .div = 6, },
- { .val = 7, .div = 8, },
- { .val = 9, .div = 10, },
- { .val = 11, .div = 12, },
- { .val = 13, .div = 14, },
- { .val = 15, .div = 16, },
-};
-
-static const struct clk_div_table common_div_table[] = {
- { .val = 0, .div = 1, },
- { .val = 1, .div = 2, },
- { .val = 2, .div = 3, },
- { .val = 3, .div = 4, },
- { .val = 4, .div = 5, },
- { .val = 5, .div = 6, },
- { .val = 6, .div = 7, },
- { .val = 7, .div = 8, },
- { .val = 8, .div = 9, },
- { .val = 9, .div = 10, },
- { .val = 10, .div = 11, },
- { .val = 11, .div = 12, },
- { .val = 12, .div = 13, },
- { .val = 13, .div = 14, },
- { .val = 14, .div = 15, },
- { .val = 15, .div = 16, },
-};
-
-PNAME(lsp0_wclk_common_p) = {
- "lsp0_24m",
- "lsp0_99m",
-};
-
-PNAME(lsp0_wclk_timer3_p) = {
- "timer3_div",
- "lsp0_32k"
-};
-
-PNAME(lsp0_wclk_timer4_p) = {
- "timer4_div",
- "lsp0_32k"
-};
-
-PNAME(lsp0_wclk_timer5_p) = {
- "timer5_div",
- "lsp0_32k"
-};
-
-PNAME(lsp0_wclk_spifc0_p) = {
- "lsp0_148m5",
- "lsp0_24m",
- "lsp0_99m",
- "lsp0_74m25"
-};
-
-PNAME(lsp0_wclk_ssp_p) = {
- "lsp0_148m5",
- "lsp0_99m",
- "lsp0_24m",
-};
-
-static struct zx_clk_mux lsp0_mux_clk[] = {
- MUX(0, "timer3_wclk_mux", lsp0_wclk_timer3_p, LSP0_TIMER3_CLK, 4, 1),
- MUX(0, "timer4_wclk_mux", lsp0_wclk_timer4_p, LSP0_TIMER4_CLK, 4, 1),
- MUX(0, "timer5_wclk_mux", lsp0_wclk_timer5_p, LSP0_TIMER5_CLK, 4, 1),
- MUX(0, "uart3_wclk_mux", lsp0_wclk_common_p, LSP0_UART3_CLK, 4, 1),
- MUX(0, "uart1_wclk_mux", lsp0_wclk_common_p, LSP0_UART1_CLK, 4, 1),
- MUX(0, "uart2_wclk_mux", lsp0_wclk_common_p, LSP0_UART2_CLK, 4, 1),
- MUX(0, "spifc0_wclk_mux", lsp0_wclk_spifc0_p, LSP0_SPIFC0_CLK, 4, 2),
- MUX(0, "i2c4_wclk_mux", lsp0_wclk_common_p, LSP0_I2C4_CLK, 4, 1),
- MUX(0, "i2c5_wclk_mux", lsp0_wclk_common_p, LSP0_I2C5_CLK, 4, 1),
- MUX(0, "ssp0_wclk_mux", lsp0_wclk_ssp_p, LSP0_SSP0_CLK, 4, 1),
- MUX(0, "ssp1_wclk_mux", lsp0_wclk_ssp_p, LSP0_SSP1_CLK, 4, 1),
- MUX(0, "i2c3_wclk_mux", lsp0_wclk_common_p, LSP0_I2C3_CLK, 4, 1),
-};
-
-static struct zx_clk_gate lsp0_gate_clk[] = {
- GATE(LSP0_TIMER3_WCLK, "timer3_wclk", "timer3_wclk_mux", LSP0_TIMER3_CLK, 1, CLK_SET_RATE_PARENT, 0),
- GATE(LSP0_TIMER4_WCLK, "timer4_wclk", "timer4_wclk_mux", LSP0_TIMER4_CLK, 1, CLK_SET_RATE_PARENT, 0),
- GATE(LSP0_TIMER5_WCLK, "timer5_wclk", "timer5_wclk_mux", LSP0_TIMER5_CLK, 1, CLK_SET_RATE_PARENT, 0),
- GATE(LSP0_UART3_WCLK, "uart3_wclk", "uart3_wclk_mux", LSP0_UART3_CLK, 1, CLK_SET_RATE_PARENT, 0),
- GATE(LSP0_UART1_WCLK, "uart1_wclk", "uart1_wclk_mux", LSP0_UART1_CLK, 1, CLK_SET_RATE_PARENT, 0),
- GATE(LSP0_UART2_WCLK, "uart2_wclk", "uart2_wclk_mux", LSP0_UART2_CLK, 1, CLK_SET_RATE_PARENT, 0),
- GATE(LSP0_SPIFC0_WCLK, "spifc0_wclk", "spifc0_wclk_mux", LSP0_SPIFC0_CLK, 1, CLK_SET_RATE_PARENT, 0),
- GATE(LSP0_I2C4_WCLK, "i2c4_wclk", "i2c4_wclk_mux", LSP0_I2C4_CLK, 1, CLK_SET_RATE_PARENT, 0),
- GATE(LSP0_I2C5_WCLK, "i2c5_wclk", "i2c5_wclk_mux", LSP0_I2C5_CLK, 1, CLK_SET_RATE_PARENT, 0),
- GATE(LSP0_SSP0_WCLK, "ssp0_wclk", "ssp0_div", LSP0_SSP0_CLK, 1, CLK_SET_RATE_PARENT, 0),
- GATE(LSP0_SSP1_WCLK, "ssp1_wclk", "ssp1_div", LSP0_SSP1_CLK, 1, CLK_SET_RATE_PARENT, 0),
- GATE(LSP0_I2C3_WCLK, "i2c3_wclk", "i2c3_wclk_mux", LSP0_I2C3_CLK, 1, CLK_SET_RATE_PARENT, 0),
-};
-
-static struct zx_clk_div lsp0_div_clk[] = {
- DIV_T(0, "timer3_div", "lsp0_24m", LSP0_TIMER3_CLK, 12, 4, 0, common_even_div_table),
- DIV_T(0, "timer4_div", "lsp0_24m", LSP0_TIMER4_CLK, 12, 4, 0, common_even_div_table),
- DIV_T(0, "timer5_div", "lsp0_24m", LSP0_TIMER5_CLK, 12, 4, 0, common_even_div_table),
- DIV_T(0, "ssp0_div", "ssp0_wclk_mux", LSP0_SSP0_CLK, 12, 4, 0, common_even_div_table),
- DIV_T(0, "ssp1_div", "ssp1_wclk_mux", LSP0_SSP1_CLK, 12, 4, 0, common_even_div_table),
-};
-
-static struct clk_hw_onecell_data lsp0_hw_onecell_data = {
- .num = LSP0_NR_CLKS,
- .hws = {
- [LSP0_NR_CLKS - 1] = NULL,
- },
-};
-
-static int __init lsp0_clocks_init(struct device_node *np)
-{
- void __iomem *reg_base;
- int i, ret;
- const char *name;
-
- reg_base = of_iomap(np, 0);
- if (!reg_base) {
- pr_err("%s: Unable to map clk base\n", __func__);
- return -ENXIO;
- }
-
- for (i = 0; i < ARRAY_SIZE(lsp0_mux_clk); i++) {
- if (lsp0_mux_clk[i].id)
- lsp0_hw_onecell_data.hws[lsp0_mux_clk[i].id] =
- &lsp0_mux_clk[i].mux.hw;
-
- lsp0_mux_clk[i].mux.reg += (uintptr_t)reg_base;
- name = lsp0_mux_clk[i].mux.hw.init->name;
- ret = clk_hw_register(NULL, &lsp0_mux_clk[i].mux.hw);
- if (ret)
- pr_warn("lsp0 clk %s init error!\n", name);
- }
-
- for (i = 0; i < ARRAY_SIZE(lsp0_gate_clk); i++) {
- if (lsp0_gate_clk[i].id)
- lsp0_hw_onecell_data.hws[lsp0_gate_clk[i].id] =
- &lsp0_gate_clk[i].gate.hw;
-
- lsp0_gate_clk[i].gate.reg += (uintptr_t)reg_base;
- name = lsp0_gate_clk[i].gate.hw.init->name;
- ret = clk_hw_register(NULL, &lsp0_gate_clk[i].gate.hw);
- if (ret)
- pr_warn("lsp0 clk %s init error!\n", name);
- }
-
- for (i = 0; i < ARRAY_SIZE(lsp0_div_clk); i++) {
- if (lsp0_div_clk[i].id)
- lsp0_hw_onecell_data.hws[lsp0_div_clk[i].id] =
- &lsp0_div_clk[i].div.hw;
-
- lsp0_div_clk[i].div.reg += (uintptr_t)reg_base;
- name = lsp0_div_clk[i].div.hw.init->name;
- ret = clk_hw_register(NULL, &lsp0_div_clk[i].div.hw);
- if (ret)
- pr_warn("lsp0 clk %s init error!\n", name);
- }
-
- ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
- &lsp0_hw_onecell_data);
- if (ret) {
- pr_err("failed to register lsp0 clk provider: %d\n", ret);
- return ret;
- }
-
- return 0;
-}
-
-PNAME(lsp1_wclk_common_p) = {
- "lsp1_24m",
- "lsp1_99m",
-};
-
-PNAME(lsp1_wclk_ssp_p) = {
- "lsp1_148m5",
- "lsp1_99m",
- "lsp1_24m",
-};
-
-static struct zx_clk_mux lsp1_mux_clk[] = {
- MUX(0, "uart4_wclk_mux", lsp1_wclk_common_p, LSP1_UART4_CLK, 4, 1),
- MUX(0, "uart5_wclk_mux", lsp1_wclk_common_p, LSP1_UART5_CLK, 4, 1),
- MUX(0, "pwm_wclk_mux", lsp1_wclk_common_p, LSP1_PWM_CLK, 4, 1),
- MUX(0, "i2c2_wclk_mux", lsp1_wclk_common_p, LSP1_I2C2_CLK, 4, 1),
- MUX(0, "ssp2_wclk_mux", lsp1_wclk_ssp_p, LSP1_SSP2_CLK, 4, 2),
- MUX(0, "ssp3_wclk_mux", lsp1_wclk_ssp_p, LSP1_SSP3_CLK, 4, 2),
- MUX(0, "ssp4_wclk_mux", lsp1_wclk_ssp_p, LSP1_SSP4_CLK, 4, 2),
- MUX(0, "usim1_wclk_mux", lsp1_wclk_common_p, LSP1_USIM1_CLK, 4, 1),
-};
-
-static struct zx_clk_div lsp1_div_clk[] = {
- DIV_T(0, "pwm_div", "pwm_wclk_mux", LSP1_PWM_CLK, 12, 4, CLK_SET_RATE_PARENT, common_div_table),
- DIV_T(0, "ssp2_div", "ssp2_wclk_mux", LSP1_SSP2_CLK, 12, 4, CLK_SET_RATE_PARENT, common_even_div_table),
- DIV_T(0, "ssp3_div", "ssp3_wclk_mux", LSP1_SSP3_CLK, 12, 4, CLK_SET_RATE_PARENT, common_even_div_table),
- DIV_T(0, "ssp4_div", "ssp4_wclk_mux", LSP1_SSP4_CLK, 12, 4, CLK_SET_RATE_PARENT, common_even_div_table),
-};
-
-static struct zx_clk_gate lsp1_gate_clk[] = {
- GATE(LSP1_UART4_WCLK, "lsp1_uart4_wclk", "uart4_wclk_mux", LSP1_UART4_CLK, 1, CLK_SET_RATE_PARENT, 0),
- GATE(LSP1_UART5_WCLK, "lsp1_uart5_wclk", "uart5_wclk_mux", LSP1_UART5_CLK, 1, CLK_SET_RATE_PARENT, 0),
- GATE(LSP1_PWM_WCLK, "lsp1_pwm_wclk", "pwm_div", LSP1_PWM_CLK, 1, CLK_SET_RATE_PARENT, 0),
- GATE(LSP1_PWM_PCLK, "lsp1_pwm_pclk", "lsp1_pclk", LSP1_PWM_CLK, 0, 0, 0),
- GATE(LSP1_I2C2_WCLK, "lsp1_i2c2_wclk", "i2c2_wclk_mux", LSP1_I2C2_CLK, 1, CLK_SET_RATE_PARENT, 0),
- GATE(LSP1_SSP2_WCLK, "lsp1_ssp2_wclk", "ssp2_div", LSP1_SSP2_CLK, 1, CLK_SET_RATE_PARENT, 0),
- GATE(LSP1_SSP3_WCLK, "lsp1_ssp3_wclk", "ssp3_div", LSP1_SSP3_CLK, 1, CLK_SET_RATE_PARENT, 0),
- GATE(LSP1_SSP4_WCLK, "lsp1_ssp4_wclk", "ssp4_div", LSP1_SSP4_CLK, 1, CLK_SET_RATE_PARENT, 0),
- GATE(LSP1_USIM1_WCLK, "lsp1_usim1_wclk", "usim1_wclk_mux", LSP1_USIM1_CLK, 1, CLK_SET_RATE_PARENT, 0),
-};
-
-static struct clk_hw_onecell_data lsp1_hw_onecell_data = {
- .num = LSP1_NR_CLKS,
- .hws = {
- [LSP1_NR_CLKS - 1] = NULL,
- },
-};
-
-static int __init lsp1_clocks_init(struct device_node *np)
-{
- void __iomem *reg_base;
- int i, ret;
- const char *name;
-
- reg_base = of_iomap(np, 0);
- if (!reg_base) {
- pr_err("%s: Unable to map clk base\n", __func__);
- return -ENXIO;
- }
-
- for (i = 0; i < ARRAY_SIZE(lsp1_mux_clk); i++) {
- if (lsp1_mux_clk[i].id)
- lsp1_hw_onecell_data.hws[lsp1_mux_clk[i].id] =
- &lsp0_mux_clk[i].mux.hw;
-
- lsp1_mux_clk[i].mux.reg += (uintptr_t)reg_base;
- name = lsp1_mux_clk[i].mux.hw.init->name;
- ret = clk_hw_register(NULL, &lsp1_mux_clk[i].mux.hw);
- if (ret)
- pr_warn("lsp1 clk %s init error!\n", name);
- }
-
- for (i = 0; i < ARRAY_SIZE(lsp1_gate_clk); i++) {
- if (lsp1_gate_clk[i].id)
- lsp1_hw_onecell_data.hws[lsp1_gate_clk[i].id] =
- &lsp1_gate_clk[i].gate.hw;
-
- lsp1_gate_clk[i].gate.reg += (uintptr_t)reg_base;
- name = lsp1_gate_clk[i].gate.hw.init->name;
- ret = clk_hw_register(NULL, &lsp1_gate_clk[i].gate.hw);
- if (ret)
- pr_warn("lsp1 clk %s init error!\n", name);
- }
-
- for (i = 0; i < ARRAY_SIZE(lsp1_div_clk); i++) {
- if (lsp1_div_clk[i].id)
- lsp1_hw_onecell_data.hws[lsp1_div_clk[i].id] =
- &lsp1_div_clk[i].div.hw;
-
- lsp1_div_clk[i].div.reg += (uintptr_t)reg_base;
- name = lsp1_div_clk[i].div.hw.init->name;
- ret = clk_hw_register(NULL, &lsp1_div_clk[i].div.hw);
- if (ret)
- pr_warn("lsp1 clk %s init error!\n", name);
- }
-
- ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
- &lsp1_hw_onecell_data);
- if (ret) {
- pr_err("failed to register lsp1 clk provider: %d\n", ret);
- return ret;
- }
-
- return 0;
-}
-
-PNAME(audio_wclk_common_p) = {
- "audio_99m",
- "audio_24m",
-};
-
-PNAME(audio_timer_p) = {
- "audio_24m",
- "audio_32k",
-};
-
-static struct zx_clk_mux audio_mux_clk[] = {
- MUX(I2S0_WCLK_MUX, "i2s0_wclk_mux", audio_wclk_common_p, AUDIO_I2S0_CLK, 0, 1),
- MUX(I2S1_WCLK_MUX, "i2s1_wclk_mux", audio_wclk_common_p, AUDIO_I2S1_CLK, 0, 1),
- MUX(I2S2_WCLK_MUX, "i2s2_wclk_mux", audio_wclk_common_p, AUDIO_I2S2_CLK, 0, 1),
- MUX(I2S3_WCLK_MUX, "i2s3_wclk_mux", audio_wclk_common_p, AUDIO_I2S3_CLK, 0, 1),
- MUX(0, "i2c0_wclk_mux", audio_wclk_common_p, AUDIO_I2C0_CLK, 0, 1),
- MUX(0, "spdif0_wclk_mux", audio_wclk_common_p, AUDIO_SPDIF0_CLK, 0, 1),
- MUX(0, "spdif1_wclk_mux", audio_wclk_common_p, AUDIO_SPDIF1_CLK, 0, 1),
- MUX(0, "timer_wclk_mux", audio_timer_p, AUDIO_TIMER_CLK, 0, 1),
-};
-
-static struct clk_zx_audio_divider audio_adiv_clk[] = {
- AUDIO_DIV(0, "i2s0_wclk_div", "i2s0_wclk_mux", AUDIO_I2S0_DIV_CFG1),
- AUDIO_DIV(0, "i2s1_wclk_div", "i2s1_wclk_mux", AUDIO_I2S1_DIV_CFG1),
- AUDIO_DIV(0, "i2s2_wclk_div", "i2s2_wclk_mux", AUDIO_I2S2_DIV_CFG1),
- AUDIO_DIV(0, "i2s3_wclk_div", "i2s3_wclk_mux", AUDIO_I2S3_DIV_CFG1),
- AUDIO_DIV(0, "spdif0_wclk_div", "spdif0_wclk_mux", AUDIO_SPDIF0_DIV_CFG1),
- AUDIO_DIV(0, "spdif1_wclk_div", "spdif1_wclk_mux", AUDIO_SPDIF1_DIV_CFG1),
-};
-
-static struct zx_clk_div audio_div_clk[] = {
- DIV_T(0, "tdm_wclk_div", "audio_16m384", AUDIO_TDM_CLK, 8, 4, 0, common_div_table),
-};
-
-static struct zx_clk_gate audio_gate_clk[] = {
- GATE(AUDIO_I2S0_WCLK, "i2s0_wclk", "i2s0_wclk_div", AUDIO_I2S0_CLK, 9, CLK_SET_RATE_PARENT, 0),
- GATE(AUDIO_I2S1_WCLK, "i2s1_wclk", "i2s1_wclk_div", AUDIO_I2S1_CLK, 9, CLK_SET_RATE_PARENT, 0),
- GATE(AUDIO_I2S2_WCLK, "i2s2_wclk", "i2s2_wclk_div", AUDIO_I2S2_CLK, 9, CLK_SET_RATE_PARENT, 0),
- GATE(AUDIO_I2S3_WCLK, "i2s3_wclk", "i2s3_wclk_div", AUDIO_I2S3_CLK, 9, CLK_SET_RATE_PARENT, 0),
- GATE(AUDIO_I2S0_PCLK, "i2s0_pclk", "clk49m5", AUDIO_I2S0_CLK, 8, 0, 0),
- GATE(AUDIO_I2S1_PCLK, "i2s1_pclk", "clk49m5", AUDIO_I2S1_CLK, 8, 0, 0),
- GATE(AUDIO_I2S2_PCLK, "i2s2_pclk", "clk49m5", AUDIO_I2S2_CLK, 8, 0, 0),
- GATE(AUDIO_I2S3_PCLK, "i2s3_pclk", "clk49m5", AUDIO_I2S3_CLK, 8, 0, 0),
- GATE(AUDIO_I2C0_WCLK, "i2c0_wclk", "i2c0_wclk_mux", AUDIO_I2C0_CLK, 9, CLK_SET_RATE_PARENT, 0),
- GATE(AUDIO_SPDIF0_WCLK, "spdif0_wclk", "spdif0_wclk_div", AUDIO_SPDIF0_CLK, 9, CLK_SET_RATE_PARENT, 0),
- GATE(AUDIO_SPDIF1_WCLK, "spdif1_wclk", "spdif1_wclk_div", AUDIO_SPDIF1_CLK, 9, CLK_SET_RATE_PARENT, 0),
- GATE(AUDIO_TDM_WCLK, "tdm_wclk", "tdm_wclk_div", AUDIO_TDM_CLK, 17, CLK_SET_RATE_PARENT, 0),
- GATE(AUDIO_TS_PCLK, "tempsensor_pclk", "clk49m5", AUDIO_TS_CLK, 1, 0, 0),
-};
-
-static struct clk_hw_onecell_data audio_hw_onecell_data = {
- .num = AUDIO_NR_CLKS,
- .hws = {
- [AUDIO_NR_CLKS - 1] = NULL,
- },
-};
-
-static int __init audio_clocks_init(struct device_node *np)
-{
- void __iomem *reg_base;
- int i, ret;
- const char *name;
-
- reg_base = of_iomap(np, 0);
- if (!reg_base) {
- pr_err("%s: Unable to map audio clk base\n", __func__);
- return -ENXIO;
- }
-
- for (i = 0; i < ARRAY_SIZE(audio_mux_clk); i++) {
- if (audio_mux_clk[i].id)
- audio_hw_onecell_data.hws[audio_mux_clk[i].id] =
- &audio_mux_clk[i].mux.hw;
-
- audio_mux_clk[i].mux.reg += (uintptr_t)reg_base;
- name = audio_mux_clk[i].mux.hw.init->name;
- ret = clk_hw_register(NULL, &audio_mux_clk[i].mux.hw);
- if (ret)
- pr_warn("audio clk %s init error!\n", name);
- }
-
- for (i = 0; i < ARRAY_SIZE(audio_adiv_clk); i++) {
- if (audio_adiv_clk[i].id)
- audio_hw_onecell_data.hws[audio_adiv_clk[i].id] =
- &audio_adiv_clk[i].hw;
-
- audio_adiv_clk[i].reg_base += (uintptr_t)reg_base;
- name = audio_adiv_clk[i].hw.init->name;
- ret = clk_hw_register(NULL, &audio_adiv_clk[i].hw);
- if (ret)
- pr_warn("audio clk %s init error!\n", name);
- }
-
- for (i = 0; i < ARRAY_SIZE(audio_div_clk); i++) {
- if (audio_div_clk[i].id)
- audio_hw_onecell_data.hws[audio_div_clk[i].id] =
- &audio_div_clk[i].div.hw;
-
- audio_div_clk[i].div.reg += (uintptr_t)reg_base;
- name = audio_div_clk[i].div.hw.init->name;
- ret = clk_hw_register(NULL, &audio_div_clk[i].div.hw);
- if (ret)
- pr_warn("audio clk %s init error!\n", name);
- }
-
- for (i = 0; i < ARRAY_SIZE(audio_gate_clk); i++) {
- if (audio_gate_clk[i].id)
- audio_hw_onecell_data.hws[audio_gate_clk[i].id] =
- &audio_gate_clk[i].gate.hw;
-
- audio_gate_clk[i].gate.reg += (uintptr_t)reg_base;
- name = audio_gate_clk[i].gate.hw.init->name;
- ret = clk_hw_register(NULL, &audio_gate_clk[i].gate.hw);
- if (ret)
- pr_warn("audio clk %s init error!\n", name);
- }
-
- ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
- &audio_hw_onecell_data);
- if (ret) {
- pr_err("failed to register audio clk provider: %d\n", ret);
- return ret;
- }
-
- return 0;
-}
-
-static const struct of_device_id zx_clkc_match_table[] = {
- { .compatible = "zte,zx296718-topcrm", .data = &top_clocks_init },
- { .compatible = "zte,zx296718-lsp0crm", .data = &lsp0_clocks_init },
- { .compatible = "zte,zx296718-lsp1crm", .data = &lsp1_clocks_init },
- { .compatible = "zte,zx296718-audiocrm", .data = &audio_clocks_init },
- { }
-};
-
-static int zx_clkc_probe(struct platform_device *pdev)
-{
- int (*init_fn)(struct device_node *np);
- struct device_node *np = pdev->dev.of_node;
-
- init_fn = of_device_get_match_data(&pdev->dev);
- if (!init_fn) {
- dev_err(&pdev->dev, "Error: No device match found\n");
- return -ENODEV;
- }
-
- return init_fn(np);
-}
-
-static struct platform_driver zx_clk_driver = {
- .probe = zx_clkc_probe,
- .driver = {
- .name = "zx296718-clkc",
- .of_match_table = zx_clkc_match_table,
- },
-};
-
-static int __init zx_clk_init(void)
-{
- return platform_driver_register(&zx_clk_driver);
-}
-core_initcall(zx_clk_init);
diff --git a/drivers/clk/zte/clk.c b/drivers/clk/zte/clk.c
deleted file mode 100644
index 8bda6d41ad3a..000000000000
--- a/drivers/clk/zte/clk.c
+++ /dev/null
@@ -1,446 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright 2014 Linaro Ltd.
- * Copyright (C) 2014 ZTE Corporation.
- */
-
-#include <linux/clk-provider.h>
-#include <linux/err.h>
-#include <linux/gcd.h>
-#include <linux/io.h>
-#include <linux/iopoll.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <asm/div64.h>
-
-#include "clk.h"
-
-#define to_clk_zx_pll(_hw) container_of(_hw, struct clk_zx_pll, hw)
-#define to_clk_zx_audio(_hw) container_of(_hw, struct clk_zx_audio, hw)
-
-#define CFG0_CFG1_OFFSET 4
-#define LOCK_FLAG 30
-#define POWER_DOWN 31
-
-static int rate_to_idx(struct clk_zx_pll *zx_pll, unsigned long rate)
-{
- const struct zx_pll_config *config = zx_pll->lookup_table;
- int i;
-
- for (i = 0; i < zx_pll->count; i++) {
- if (config[i].rate > rate)
- return i > 0 ? i - 1 : 0;
-
- if (config[i].rate == rate)
- return i;
- }
-
- return i - 1;
-}
-
-static int hw_to_idx(struct clk_zx_pll *zx_pll)
-{
- const struct zx_pll_config *config = zx_pll->lookup_table;
- u32 hw_cfg0, hw_cfg1;
- int i;
-
- hw_cfg0 = readl_relaxed(zx_pll->reg_base);
- hw_cfg1 = readl_relaxed(zx_pll->reg_base + CFG0_CFG1_OFFSET);
-
- /* For matching the value in lookup table */
- hw_cfg0 &= ~BIT(zx_pll->lock_bit);
-
- /* Check availability of pd_bit */
- if (zx_pll->pd_bit < 32)
- hw_cfg0 |= BIT(zx_pll->pd_bit);
-
- for (i = 0; i < zx_pll->count; i++) {
- if (hw_cfg0 == config[i].cfg0 && hw_cfg1 == config[i].cfg1)
- return i;
- }
-
- return -EINVAL;
-}
-
-static unsigned long zx_pll_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- struct clk_zx_pll *zx_pll = to_clk_zx_pll(hw);
- int idx;
-
- idx = hw_to_idx(zx_pll);
- if (unlikely(idx == -EINVAL))
- return 0;
-
- return zx_pll->lookup_table[idx].rate;
-}
-
-static long zx_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
-{
- struct clk_zx_pll *zx_pll = to_clk_zx_pll(hw);
- int idx;
-
- idx = rate_to_idx(zx_pll, rate);
-
- return zx_pll->lookup_table[idx].rate;
-}
-
-static int zx_pll_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
-{
- /* Assume current cpu is not running on current PLL */
- struct clk_zx_pll *zx_pll = to_clk_zx_pll(hw);
- const struct zx_pll_config *config;
- int idx;
-
- idx = rate_to_idx(zx_pll, rate);
- config = &zx_pll->lookup_table[idx];
-
- writel_relaxed(config->cfg0, zx_pll->reg_base);
- writel_relaxed(config->cfg1, zx_pll->reg_base + CFG0_CFG1_OFFSET);
-
- return 0;
-}
-
-static int zx_pll_enable(struct clk_hw *hw)
-{
- struct clk_zx_pll *zx_pll = to_clk_zx_pll(hw);
- u32 reg;
-
- /* If pd_bit is not available, simply return success. */
- if (zx_pll->pd_bit > 31)
- return 0;
-
- reg = readl_relaxed(zx_pll->reg_base);
- writel_relaxed(reg & ~BIT(zx_pll->pd_bit), zx_pll->reg_base);
-
- return readl_relaxed_poll_timeout(zx_pll->reg_base, reg,
- reg & BIT(zx_pll->lock_bit), 0, 100);
-}
-
-static void zx_pll_disable(struct clk_hw *hw)
-{
- struct clk_zx_pll *zx_pll = to_clk_zx_pll(hw);
- u32 reg;
-
- if (zx_pll->pd_bit > 31)
- return;
-
- reg = readl_relaxed(zx_pll->reg_base);
- writel_relaxed(reg | BIT(zx_pll->pd_bit), zx_pll->reg_base);
-}
-
-static int zx_pll_is_enabled(struct clk_hw *hw)
-{
- struct clk_zx_pll *zx_pll = to_clk_zx_pll(hw);
- u32 reg;
-
- reg = readl_relaxed(zx_pll->reg_base);
-
- return !(reg & BIT(zx_pll->pd_bit));
-}
-
-const struct clk_ops zx_pll_ops = {
- .recalc_rate = zx_pll_recalc_rate,
- .round_rate = zx_pll_round_rate,
- .set_rate = zx_pll_set_rate,
- .enable = zx_pll_enable,
- .disable = zx_pll_disable,
- .is_enabled = zx_pll_is_enabled,
-};
-EXPORT_SYMBOL(zx_pll_ops);
-
-struct clk *clk_register_zx_pll(const char *name, const char *parent_name,
- unsigned long flags, void __iomem *reg_base,
- const struct zx_pll_config *lookup_table,
- int count, spinlock_t *lock)
-{
- struct clk_zx_pll *zx_pll;
- struct clk *clk;
- struct clk_init_data init;
-
- zx_pll = kzalloc(sizeof(*zx_pll), GFP_KERNEL);
- if (!zx_pll)
- return ERR_PTR(-ENOMEM);
-
- init.name = name;
- init.ops = &zx_pll_ops;
- init.flags = flags;
- init.parent_names = parent_name ? &parent_name : NULL;
- init.num_parents = parent_name ? 1 : 0;
-
- zx_pll->reg_base = reg_base;
- zx_pll->lookup_table = lookup_table;
- zx_pll->count = count;
- zx_pll->lock_bit = LOCK_FLAG;
- zx_pll->pd_bit = POWER_DOWN;
- zx_pll->lock = lock;
- zx_pll->hw.init = &init;
-
- clk = clk_register(NULL, &zx_pll->hw);
- if (IS_ERR(clk))
- kfree(zx_pll);
-
- return clk;
-}
-
-#define BPAR 1000000
-static u32 calc_reg(u32 parent_rate, u32 rate)
-{
- u32 sel, integ, fra_div, tmp;
- u64 tmp64 = (u64)parent_rate * BPAR;
-
- do_div(tmp64, rate);
- integ = (u32)tmp64 / BPAR;
- integ = integ >> 1;
-
- tmp = (u32)tmp64 % BPAR;
- sel = tmp / BPAR;
-
- tmp = tmp % BPAR;
- fra_div = tmp * 0xff / BPAR;
- tmp = (sel << 24) | (integ << 16) | (0xff << 8) | fra_div;
-
- /* Set I2S integer divider as 1. This bit is reserved for SPDIF
- * and do no harm.
- */
- tmp |= BIT(28);
- return tmp;
-}
-
-static u32 calc_rate(u32 reg, u32 parent_rate)
-{
- u32 sel, integ, fra_div, tmp;
- u64 tmp64 = (u64)parent_rate * BPAR;
-
- tmp = reg;
- sel = (tmp >> 24) & BIT(0);
- integ = (tmp >> 16) & 0xff;
- fra_div = tmp & 0xff;
-
- tmp = fra_div * BPAR;
- tmp = tmp / 0xff;
- tmp += sel * BPAR;
- tmp += 2 * integ * BPAR;
- do_div(tmp64, tmp);
-
- return (u32)tmp64;
-}
-
-static unsigned long zx_audio_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- struct clk_zx_audio *zx_audio = to_clk_zx_audio(hw);
- u32 reg;
-
- reg = readl_relaxed(zx_audio->reg_base);
- return calc_rate(reg, parent_rate);
-}
-
-static long zx_audio_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
-{
- u32 reg;
-
- if (rate * 2 > *prate)
- return -EINVAL;
-
- reg = calc_reg(*prate, rate);
- return calc_rate(reg, *prate);
-}
-
-static int zx_audio_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
-{
- struct clk_zx_audio *zx_audio = to_clk_zx_audio(hw);
- u32 reg;
-
- reg = calc_reg(parent_rate, rate);
- writel_relaxed(reg, zx_audio->reg_base);
-
- return 0;
-}
-
-#define ZX_AUDIO_EN BIT(25)
-static int zx_audio_enable(struct clk_hw *hw)
-{
- struct clk_zx_audio *zx_audio = to_clk_zx_audio(hw);
- u32 reg;
-
- reg = readl_relaxed(zx_audio->reg_base);
- writel_relaxed(reg & ~ZX_AUDIO_EN, zx_audio->reg_base);
- return 0;
-}
-
-static void zx_audio_disable(struct clk_hw *hw)
-{
- struct clk_zx_audio *zx_audio = to_clk_zx_audio(hw);
- u32 reg;
-
- reg = readl_relaxed(zx_audio->reg_base);
- writel_relaxed(reg | ZX_AUDIO_EN, zx_audio->reg_base);
-}
-
-static const struct clk_ops zx_audio_ops = {
- .recalc_rate = zx_audio_recalc_rate,
- .round_rate = zx_audio_round_rate,
- .set_rate = zx_audio_set_rate,
- .enable = zx_audio_enable,
- .disable = zx_audio_disable,
-};
-
-struct clk *clk_register_zx_audio(const char *name,
- const char * const parent_name,
- unsigned long flags,
- void __iomem *reg_base)
-{
- struct clk_zx_audio *zx_audio;
- struct clk *clk;
- struct clk_init_data init;
-
- zx_audio = kzalloc(sizeof(*zx_audio), GFP_KERNEL);
- if (!zx_audio)
- return ERR_PTR(-ENOMEM);
-
- init.name = name;
- init.ops = &zx_audio_ops;
- init.flags = flags;
- init.parent_names = parent_name ? &parent_name : NULL;
- init.num_parents = parent_name ? 1 : 0;
-
- zx_audio->reg_base = reg_base;
- zx_audio->hw.init = &init;
-
- clk = clk_register(NULL, &zx_audio->hw);
- if (IS_ERR(clk))
- kfree(zx_audio);
-
- return clk;
-}
-
-#define CLK_AUDIO_DIV_FRAC BIT(0)
-#define CLK_AUDIO_DIV_INT BIT(1)
-#define CLK_AUDIO_DIV_UNCOMMON BIT(1)
-
-#define CLK_AUDIO_DIV_FRAC_NSHIFT 16
-#define CLK_AUDIO_DIV_INT_FRAC_RE BIT(16)
-#define CLK_AUDIO_DIV_INT_FRAC_MAX (0xffff)
-#define CLK_AUDIO_DIV_INT_FRAC_MIN (0x2)
-#define CLK_AUDIO_DIV_INT_INT_SHIFT 24
-#define CLK_AUDIO_DIV_INT_INT_WIDTH 4
-
-struct zx_clk_audio_div_table {
- unsigned long rate;
- unsigned int int_reg;
- unsigned int frac_reg;
-};
-
-#define to_clk_zx_audio_div(_hw) container_of(_hw, struct clk_zx_audio_divider, hw)
-
-static unsigned long audio_calc_rate(struct clk_zx_audio_divider *audio_div,
- u32 reg_frac, u32 reg_int,
- unsigned long parent_rate)
-{
- unsigned long rate, m, n;
-
- m = reg_frac & 0xffff;
- n = (reg_frac >> 16) & 0xffff;
-
- m = (reg_int & 0xffff) * n + m;
- rate = (parent_rate * n) / m;
-
- return rate;
-}
-
-static void audio_calc_reg(struct clk_zx_audio_divider *audio_div,
- struct zx_clk_audio_div_table *div_table,
- unsigned long rate, unsigned long parent_rate)
-{
- unsigned int reg_int, reg_frac;
- unsigned long m, n, div;
-
- reg_int = parent_rate / rate;
-
- if (reg_int > CLK_AUDIO_DIV_INT_FRAC_MAX)
- reg_int = CLK_AUDIO_DIV_INT_FRAC_MAX;
- else if (reg_int < CLK_AUDIO_DIV_INT_FRAC_MIN)
- reg_int = 0;
- m = parent_rate - rate * reg_int;
- n = rate;
-
- div = gcd(m, n);
- m = m / div;
- n = n / div;
-
- if ((m >> 16) || (n >> 16)) {
- if (m > n) {
- n = n * 0xffff / m;
- m = 0xffff;
- } else {
- m = m * 0xffff / n;
- n = 0xffff;
- }
- }
- reg_frac = m | (n << 16);
-
- div_table->rate = parent_rate * n / (reg_int * n + m);
- div_table->int_reg = reg_int;
- div_table->frac_reg = reg_frac;
-}
-
-static unsigned long zx_audio_div_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- struct clk_zx_audio_divider *zx_audio_div = to_clk_zx_audio_div(hw);
- u32 reg_frac, reg_int;
-
- reg_frac = readl_relaxed(zx_audio_div->reg_base);
- reg_int = readl_relaxed(zx_audio_div->reg_base + 0x4);
-
- return audio_calc_rate(zx_audio_div, reg_frac, reg_int, parent_rate);
-}
-
-static long zx_audio_div_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
-{
- struct clk_zx_audio_divider *zx_audio_div = to_clk_zx_audio_div(hw);
- struct zx_clk_audio_div_table divt;
-
- audio_calc_reg(zx_audio_div, &divt, rate, *prate);
-
- return audio_calc_rate(zx_audio_div, divt.frac_reg, divt.int_reg, *prate);
-}
-
-static int zx_audio_div_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
-{
- struct clk_zx_audio_divider *zx_audio_div = to_clk_zx_audio_div(hw);
- struct zx_clk_audio_div_table divt;
- unsigned int val;
-
- audio_calc_reg(zx_audio_div, &divt, rate, parent_rate);
- if (divt.rate != rate)
- pr_debug("the real rate is:%ld", divt.rate);
-
- writel_relaxed(divt.frac_reg, zx_audio_div->reg_base);
-
- val = readl_relaxed(zx_audio_div->reg_base + 0x4);
- val &= ~0xffff;
- val |= divt.int_reg | CLK_AUDIO_DIV_INT_FRAC_RE;
- writel_relaxed(val, zx_audio_div->reg_base + 0x4);
-
- mdelay(1);
-
- val = readl_relaxed(zx_audio_div->reg_base + 0x4);
- val &= ~CLK_AUDIO_DIV_INT_FRAC_RE;
- writel_relaxed(val, zx_audio_div->reg_base + 0x4);
-
- return 0;
-}
-
-const struct clk_ops zx_audio_div_ops = {
- .recalc_rate = zx_audio_div_recalc_rate,
- .round_rate = zx_audio_div_round_rate,
- .set_rate = zx_audio_div_set_rate,
-};
diff --git a/drivers/clk/zte/clk.h b/drivers/clk/zte/clk.h
deleted file mode 100644
index aeaf2a380ba6..000000000000
--- a/drivers/clk/zte/clk.h
+++ /dev/null
@@ -1,174 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright 2015 Linaro Ltd.
- * Copyright (C) 2014 ZTE Corporation.
- */
-
-#ifndef __ZTE_CLK_H
-#define __ZTE_CLK_H
-#include <linux/clk-provider.h>
-#include <linux/spinlock.h>
-
-#define PNAME(x) static const char *x[]
-
-struct zx_pll_config {
- unsigned long rate;
- u32 cfg0;
- u32 cfg1;
-};
-
-struct clk_zx_pll {
- struct clk_hw hw;
- void __iomem *reg_base;
- const struct zx_pll_config *lookup_table; /* order by rate asc */
- int count;
- spinlock_t *lock;
- u8 pd_bit; /* power down bit */
- u8 lock_bit; /* pll lock flag bit */
-};
-
-#define PLL_RATE(_rate, _cfg0, _cfg1) \
-{ \
- .rate = _rate, \
- .cfg0 = _cfg0, \
- .cfg1 = _cfg1, \
-}
-
-#define ZX_PLL(_name, _parent, _reg, _table, _pd, _lock) \
-{ \
- .reg_base = (void __iomem *) _reg, \
- .lookup_table = _table, \
- .count = ARRAY_SIZE(_table), \
- .pd_bit = _pd, \
- .lock_bit = _lock, \
- .hw.init = CLK_HW_INIT(_name, _parent, &zx_pll_ops, \
- CLK_GET_RATE_NOCACHE), \
-}
-
-/*
- * The pd_bit is not available on ZX296718, so let's pass something
- * bigger than 31, e.g. 0xff, to indicate that.
- */
-#define ZX296718_PLL(_name, _parent, _reg, _table) \
-ZX_PLL(_name, _parent, _reg, _table, 0xff, 30)
-
-struct zx_clk_gate {
- struct clk_gate gate;
- u16 id;
-};
-
-#define GATE(_id, _name, _parent, _reg, _bit, _flag, _gflags) \
-{ \
- .gate = { \
- .reg = (void __iomem *) _reg, \
- .bit_idx = (_bit), \
- .flags = _gflags, \
- .lock = &clk_lock, \
- .hw.init = CLK_HW_INIT(_name, \
- _parent, \
- &clk_gate_ops, \
- _flag | CLK_IGNORE_UNUSED), \
- }, \
- .id = _id, \
-}
-
-struct zx_clk_fixed_factor {
- struct clk_fixed_factor factor;
- u16 id;
-};
-
-#define FFACTOR(_id, _name, _parent, _mult, _div, _flag) \
-{ \
- .factor = { \
- .div = _div, \
- .mult = _mult, \
- .hw.init = CLK_HW_INIT(_name, \
- _parent, \
- &clk_fixed_factor_ops, \
- _flag), \
- }, \
- .id = _id, \
-}
-
-struct zx_clk_mux {
- struct clk_mux mux;
- u16 id;
-};
-
-#define MUX_F(_id, _name, _parent, _reg, _shift, _width, _flag, _mflag) \
-{ \
- .mux = { \
- .reg = (void __iomem *) _reg, \
- .mask = BIT(_width) - 1, \
- .shift = _shift, \
- .flags = _mflag, \
- .lock = &clk_lock, \
- .hw.init = CLK_HW_INIT_PARENTS(_name, \
- _parent, \
- &clk_mux_ops, \
- _flag), \
- }, \
- .id = _id, \
-}
-
-#define MUX(_id, _name, _parent, _reg, _shift, _width) \
-MUX_F(_id, _name, _parent, _reg, _shift, _width, 0, 0)
-
-struct zx_clk_div {
- struct clk_divider div;
- u16 id;
-};
-
-#define DIV_T(_id, _name, _parent, _reg, _shift, _width, _flag, _table) \
-{ \
- .div = { \
- .reg = (void __iomem *) _reg, \
- .shift = _shift, \
- .width = _width, \
- .flags = 0, \
- .table = _table, \
- .lock = &clk_lock, \
- .hw.init = CLK_HW_INIT(_name, \
- _parent, \
- &clk_divider_ops, \
- _flag), \
- }, \
- .id = _id, \
-}
-
-struct clk_zx_audio_divider {
- struct clk_hw hw;
- void __iomem *reg_base;
- unsigned int rate_count;
- spinlock_t *lock;
- u16 id;
-};
-
-#define AUDIO_DIV(_id, _name, _parent, _reg) \
-{ \
- .reg_base = (void __iomem *) _reg, \
- .lock = &clk_lock, \
- .hw.init = CLK_HW_INIT(_name, \
- _parent, \
- &zx_audio_div_ops, \
- 0), \
- .id = _id, \
-}
-
-struct clk *clk_register_zx_pll(const char *name, const char *parent_name,
- unsigned long flags, void __iomem *reg_base,
- const struct zx_pll_config *lookup_table, int count, spinlock_t *lock);
-
-struct clk_zx_audio {
- struct clk_hw hw;
- void __iomem *reg_base;
-};
-
-struct clk *clk_register_zx_audio(const char *name,
- const char * const parent_name,
- unsigned long flags, void __iomem *reg_base);
-
-extern const struct clk_ops zx_pll_ops;
-extern const struct clk_ops zx_audio_div_ops;
-
-#endif
diff --git a/drivers/clk/zynq/clkc.c b/drivers/clk/zynq/clkc.c
index ffbb9008c1c9..204b83d911b9 100644
--- a/drivers/clk/zynq/clkc.c
+++ b/drivers/clk/zynq/clkc.c
@@ -103,7 +103,6 @@ static void __init zynq_clk_register_fclk(enum zynq_clk fclk,
const char *clk_name, void __iomem *fclk_ctrl_reg,
const char **parents, int enable)
{
- struct clk *clk;
u32 enable_reg;
char *mux_name;
char *div0_name;
@@ -131,15 +130,15 @@ static void __init zynq_clk_register_fclk(enum zynq_clk fclk,
if (!div1_name)
goto err_div1_name;
- clk = clk_register_mux(NULL, mux_name, parents, 4,
+ clk_register_mux(NULL, mux_name, parents, 4,
CLK_SET_RATE_NO_REPARENT, fclk_ctrl_reg, 4, 2, 0,
fclk_lock);
- clk = clk_register_divider(NULL, div0_name, mux_name,
+ clk_register_divider(NULL, div0_name, mux_name,
0, fclk_ctrl_reg, 8, 6, CLK_DIVIDER_ONE_BASED |
CLK_DIVIDER_ALLOW_ZERO, fclk_lock);
- clk = clk_register_divider(NULL, div1_name, div0_name,
+ clk_register_divider(NULL, div1_name, div0_name,
CLK_SET_RATE_PARENT, fclk_ctrl_reg, 20, 6,
CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
fclk_lock);
@@ -176,7 +175,6 @@ static void __init zynq_clk_register_periph_clk(enum zynq_clk clk0,
const char *clk_name1, void __iomem *clk_ctrl,
const char **parents, unsigned int two_gates)
{
- struct clk *clk;
char *mux_name;
char *div_name;
spinlock_t *lock;
@@ -189,10 +187,10 @@ static void __init zynq_clk_register_periph_clk(enum zynq_clk clk0,
mux_name = kasprintf(GFP_KERNEL, "%s_mux", clk_name0);
div_name = kasprintf(GFP_KERNEL, "%s_div", clk_name0);
- clk = clk_register_mux(NULL, mux_name, parents, 4,
+ clk_register_mux(NULL, mux_name, parents, 4,
CLK_SET_RATE_NO_REPARENT, clk_ctrl, 4, 2, 0, lock);
- clk = clk_register_divider(NULL, div_name, mux_name, 0, clk_ctrl, 8, 6,
+ clk_register_divider(NULL, div_name, mux_name, 0, clk_ctrl, 8, 6,
CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, lock);
clks[clk0] = clk_register_gate(NULL, clk_name0, div_name,
@@ -217,7 +215,6 @@ static void __init zynq_clk_setup(struct device_node *np)
int i;
u32 tmp;
int ret;
- struct clk *clk;
char *clk_name;
unsigned int fclk_enable = 0;
const char *clk_output_name[clk_max];
@@ -257,19 +254,19 @@ static void __init zynq_clk_setup(struct device_node *np)
ps_clk = clk_register_fixed_rate(NULL, "ps_clk", NULL, 0, tmp);
/* PLLs */
- clk = clk_register_zynq_pll("armpll_int", "ps_clk", SLCR_ARMPLL_CTRL,
+ clk_register_zynq_pll("armpll_int", "ps_clk", SLCR_ARMPLL_CTRL,
SLCR_PLL_STATUS, 0, &armpll_lock);
clks[armpll] = clk_register_mux(NULL, clk_output_name[armpll],
armpll_parents, 2, CLK_SET_RATE_NO_REPARENT,
SLCR_ARMPLL_CTRL, 4, 1, 0, &armpll_lock);
- clk = clk_register_zynq_pll("ddrpll_int", "ps_clk", SLCR_DDRPLL_CTRL,
+ clk_register_zynq_pll("ddrpll_int", "ps_clk", SLCR_DDRPLL_CTRL,
SLCR_PLL_STATUS, 1, &ddrpll_lock);
clks[ddrpll] = clk_register_mux(NULL, clk_output_name[ddrpll],
ddrpll_parents, 2, CLK_SET_RATE_NO_REPARENT,
SLCR_DDRPLL_CTRL, 4, 1, 0, &ddrpll_lock);
- clk = clk_register_zynq_pll("iopll_int", "ps_clk", SLCR_IOPLL_CTRL,
+ clk_register_zynq_pll("iopll_int", "ps_clk", SLCR_IOPLL_CTRL,
SLCR_PLL_STATUS, 2, &iopll_lock);
clks[iopll] = clk_register_mux(NULL, clk_output_name[iopll],
iopll_parents, 2, CLK_SET_RATE_NO_REPARENT,
@@ -277,10 +274,10 @@ static void __init zynq_clk_setup(struct device_node *np)
/* CPU clocks */
tmp = readl(SLCR_621_TRUE) & 1;
- clk = clk_register_mux(NULL, "cpu_mux", cpu_parents, 4,
+ clk_register_mux(NULL, "cpu_mux", cpu_parents, 4,
CLK_SET_RATE_NO_REPARENT, SLCR_ARM_CLK_CTRL, 4, 2, 0,
&armclk_lock);
- clk = clk_register_divider(NULL, "cpu_div", "cpu_mux", 0,
+ clk_register_divider(NULL, "cpu_div", "cpu_mux", 0,
SLCR_ARM_CLK_CTRL, 8, 6, CLK_DIVIDER_ONE_BASED |
CLK_DIVIDER_ALLOW_ZERO, &armclk_lock);
@@ -288,20 +285,20 @@ static void __init zynq_clk_setup(struct device_node *np)
"cpu_div", CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
SLCR_ARM_CLK_CTRL, 24, 0, &armclk_lock);
- clk = clk_register_fixed_factor(NULL, "cpu_3or2x_div", "cpu_div", 0,
+ clk_register_fixed_factor(NULL, "cpu_3or2x_div", "cpu_div", 0,
1, 2);
clks[cpu_3or2x] = clk_register_gate(NULL, clk_output_name[cpu_3or2x],
"cpu_3or2x_div", CLK_IGNORE_UNUSED,
SLCR_ARM_CLK_CTRL, 25, 0, &armclk_lock);
- clk = clk_register_fixed_factor(NULL, "cpu_2x_div", "cpu_div", 0, 1,
+ clk_register_fixed_factor(NULL, "cpu_2x_div", "cpu_div", 0, 1,
2 + tmp);
clks[cpu_2x] = clk_register_gate(NULL, clk_output_name[cpu_2x],
"cpu_2x_div", CLK_IGNORE_UNUSED, SLCR_ARM_CLK_CTRL,
26, 0, &armclk_lock);
clk_prepare_enable(clks[cpu_2x]);
- clk = clk_register_fixed_factor(NULL, "cpu_1x_div", "cpu_div", 0, 1,
+ clk_register_fixed_factor(NULL, "cpu_1x_div", "cpu_div", 0, 1,
4 + 2 * tmp);
clks[cpu_1x] = clk_register_gate(NULL, clk_output_name[cpu_1x],
"cpu_1x_div", CLK_IGNORE_UNUSED, SLCR_ARM_CLK_CTRL, 27,
@@ -324,23 +321,23 @@ static void __init zynq_clk_setup(struct device_node *np)
&swdtclk_lock);
/* DDR clocks */
- clk = clk_register_divider(NULL, "ddr2x_div", "ddrpll", 0,
+ clk_register_divider(NULL, "ddr2x_div", "ddrpll", 0,
SLCR_DDR_CLK_CTRL, 26, 6, CLK_DIVIDER_ONE_BASED |
CLK_DIVIDER_ALLOW_ZERO, &ddrclk_lock);
clks[ddr2x] = clk_register_gate(NULL, clk_output_name[ddr2x],
"ddr2x_div", 0, SLCR_DDR_CLK_CTRL, 1, 0, &ddrclk_lock);
clk_prepare_enable(clks[ddr2x]);
- clk = clk_register_divider(NULL, "ddr3x_div", "ddrpll", 0,
+ clk_register_divider(NULL, "ddr3x_div", "ddrpll", 0,
SLCR_DDR_CLK_CTRL, 20, 6, CLK_DIVIDER_ONE_BASED |
CLK_DIVIDER_ALLOW_ZERO, &ddrclk_lock);
clks[ddr3x] = clk_register_gate(NULL, clk_output_name[ddr3x],
"ddr3x_div", 0, SLCR_DDR_CLK_CTRL, 0, 0, &ddrclk_lock);
clk_prepare_enable(clks[ddr3x]);
- clk = clk_register_divider(NULL, "dci_div0", "ddrpll", 0,
+ clk_register_divider(NULL, "dci_div0", "ddrpll", 0,
SLCR_DCI_CLK_CTRL, 8, 6, CLK_DIVIDER_ONE_BASED |
CLK_DIVIDER_ALLOW_ZERO, &dciclk_lock);
- clk = clk_register_divider(NULL, "dci_div1", "dci_div0",
+ clk_register_divider(NULL, "dci_div1", "dci_div0",
CLK_SET_RATE_PARENT, SLCR_DCI_CLK_CTRL, 20, 6,
CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
&dciclk_lock);
@@ -385,17 +382,17 @@ static void __init zynq_clk_setup(struct device_node *np)
gem0_mux_parents[i + 1] = of_clk_get_parent_name(np,
idx);
}
- clk = clk_register_mux(NULL, "gem0_mux", periph_parents, 4,
+ clk_register_mux(NULL, "gem0_mux", periph_parents, 4,
CLK_SET_RATE_NO_REPARENT, SLCR_GEM0_CLK_CTRL, 4, 2, 0,
&gem0clk_lock);
- clk = clk_register_divider(NULL, "gem0_div0", "gem0_mux", 0,
+ clk_register_divider(NULL, "gem0_div0", "gem0_mux", 0,
SLCR_GEM0_CLK_CTRL, 8, 6, CLK_DIVIDER_ONE_BASED |
CLK_DIVIDER_ALLOW_ZERO, &gem0clk_lock);
- clk = clk_register_divider(NULL, "gem0_div1", "gem0_div0",
+ clk_register_divider(NULL, "gem0_div1", "gem0_div0",
CLK_SET_RATE_PARENT, SLCR_GEM0_CLK_CTRL, 20, 6,
CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
&gem0clk_lock);
- clk = clk_register_mux(NULL, "gem0_emio_mux", gem0_mux_parents, 2,
+ clk_register_mux(NULL, "gem0_emio_mux", gem0_mux_parents, 2,
CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
SLCR_GEM0_CLK_CTRL, 6, 1, 0,
&gem0clk_lock);
@@ -410,17 +407,17 @@ static void __init zynq_clk_setup(struct device_node *np)
gem1_mux_parents[i + 1] = of_clk_get_parent_name(np,
idx);
}
- clk = clk_register_mux(NULL, "gem1_mux", periph_parents, 4,
+ clk_register_mux(NULL, "gem1_mux", periph_parents, 4,
CLK_SET_RATE_NO_REPARENT, SLCR_GEM1_CLK_CTRL, 4, 2, 0,
&gem1clk_lock);
- clk = clk_register_divider(NULL, "gem1_div0", "gem1_mux", 0,
+ clk_register_divider(NULL, "gem1_div0", "gem1_mux", 0,
SLCR_GEM1_CLK_CTRL, 8, 6, CLK_DIVIDER_ONE_BASED |
CLK_DIVIDER_ALLOW_ZERO, &gem1clk_lock);
- clk = clk_register_divider(NULL, "gem1_div1", "gem1_div0",
+ clk_register_divider(NULL, "gem1_div1", "gem1_div0",
CLK_SET_RATE_PARENT, SLCR_GEM1_CLK_CTRL, 20, 6,
CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
&gem1clk_lock);
- clk = clk_register_mux(NULL, "gem1_emio_mux", gem1_mux_parents, 2,
+ clk_register_mux(NULL, "gem1_emio_mux", gem1_mux_parents, 2,
CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
SLCR_GEM1_CLK_CTRL, 6, 1, 0,
&gem1clk_lock);
@@ -442,27 +439,27 @@ static void __init zynq_clk_setup(struct device_node *np)
can_mio_mux_parents[i] = dummy_nm;
}
kfree(clk_name);
- clk = clk_register_mux(NULL, "can_mux", periph_parents, 4,
+ clk_register_mux(NULL, "can_mux", periph_parents, 4,
CLK_SET_RATE_NO_REPARENT, SLCR_CAN_CLK_CTRL, 4, 2, 0,
&canclk_lock);
- clk = clk_register_divider(NULL, "can_div0", "can_mux", 0,
+ clk_register_divider(NULL, "can_div0", "can_mux", 0,
SLCR_CAN_CLK_CTRL, 8, 6, CLK_DIVIDER_ONE_BASED |
CLK_DIVIDER_ALLOW_ZERO, &canclk_lock);
- clk = clk_register_divider(NULL, "can_div1", "can_div0",
+ clk_register_divider(NULL, "can_div1", "can_div0",
CLK_SET_RATE_PARENT, SLCR_CAN_CLK_CTRL, 20, 6,
CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
&canclk_lock);
- clk = clk_register_gate(NULL, "can0_gate", "can_div1",
+ clk_register_gate(NULL, "can0_gate", "can_div1",
CLK_SET_RATE_PARENT, SLCR_CAN_CLK_CTRL, 0, 0,
&canclk_lock);
- clk = clk_register_gate(NULL, "can1_gate", "can_div1",
+ clk_register_gate(NULL, "can1_gate", "can_div1",
CLK_SET_RATE_PARENT, SLCR_CAN_CLK_CTRL, 1, 0,
&canclk_lock);
- clk = clk_register_mux(NULL, "can0_mio_mux",
+ clk_register_mux(NULL, "can0_mio_mux",
can_mio_mux_parents, 54, CLK_SET_RATE_PARENT |
CLK_SET_RATE_NO_REPARENT, SLCR_CAN_MIOCLK_CTRL, 0, 6, 0,
&canmioclk_lock);
- clk = clk_register_mux(NULL, "can1_mio_mux",
+ clk_register_mux(NULL, "can1_mio_mux",
can_mio_mux_parents, 54, CLK_SET_RATE_PARENT |
CLK_SET_RATE_NO_REPARENT, SLCR_CAN_MIOCLK_CTRL, 16, 6,
0, &canmioclk_lock);
@@ -482,13 +479,13 @@ static void __init zynq_clk_setup(struct device_node *np)
dbg_emio_mux_parents[i + 1] = of_clk_get_parent_name(np,
idx);
}
- clk = clk_register_mux(NULL, "dbg_mux", periph_parents, 4,
+ clk_register_mux(NULL, "dbg_mux", periph_parents, 4,
CLK_SET_RATE_NO_REPARENT, SLCR_DBG_CLK_CTRL, 4, 2, 0,
&dbgclk_lock);
- clk = clk_register_divider(NULL, "dbg_div", "dbg_mux", 0,
+ clk_register_divider(NULL, "dbg_div", "dbg_mux", 0,
SLCR_DBG_CLK_CTRL, 8, 6, CLK_DIVIDER_ONE_BASED |
CLK_DIVIDER_ALLOW_ZERO, &dbgclk_lock);
- clk = clk_register_mux(NULL, "dbg_emio_mux", dbg_emio_mux_parents, 2,
+ clk_register_mux(NULL, "dbg_emio_mux", dbg_emio_mux_parents, 2,
CLK_SET_RATE_NO_REPARENT, SLCR_DBG_CLK_CTRL, 6, 1, 0,
&dbgclk_lock);
clks[dbg_trc] = clk_register_gate(NULL, clk_output_name[dbg_trc],
diff --git a/drivers/clk/zynq/pll.c b/drivers/clk/zynq/pll.c
index dcb2037a9596..54f4184de89a 100644
--- a/drivers/clk/zynq/pll.c
+++ b/drivers/clk/zynq/pll.c
@@ -173,12 +173,12 @@ static const struct clk_ops zynq_pll_ops = {
/**
* clk_register_zynq_pll() - Register PLL with the clock framework
- * @name PLL name
- * @parent Parent clock name
- * @pll_ctrl Pointer to PLL control register
- * @pll_status Pointer to PLL status register
- * @lock_index Bit index to this PLL's lock status bit in @pll_status
- * @lock Register lock
+ * @name: PLL name
+ * @parent: Parent clock name
+ * @pll_ctrl: Pointer to PLL control register
+ * @pll_status: Pointer to PLL status register
+ * @lock_index: Bit index to this PLL's lock status bit in @pll_status
+ * @lock: Register lock
* Returns handle to the registered clock.
*/
struct clk *clk_register_zynq_pll(const char *name, const char *parent,
diff --git a/drivers/clk/zynqmp/divider.c b/drivers/clk/zynqmp/divider.c
index 66da02b83d39..e9bf7958b821 100644
--- a/drivers/clk/zynqmp/divider.c
+++ b/drivers/clk/zynqmp/divider.c
@@ -35,6 +35,7 @@
* @is_frac: The divider is a fractional divider
* @clk_id: Id of clock
* @div_type: divisor type (TYPE_DIV1 or TYPE_DIV2)
+ * @max_div: maximum supported divisor (fetched from firmware)
*/
struct zynqmp_clk_divider {
struct clk_hw hw;
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 14c7c4712478..39aa21d01e05 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -79,6 +79,7 @@ config IXP4XX_TIMER
bool "Intel XScale IXP4xx timer driver" if COMPILE_TEST
depends on HAS_IOMEM
select CLKSRC_MMIO
+ select TIMER_OF if OF
help
Enables support for the Intel XScale IXP4xx SoC timer.
@@ -197,12 +198,6 @@ config CLPS711X_TIMER
help
Enables support for the Cirrus Logic PS711 timer.
-config ATLAS7_TIMER
- bool "Atlas7 timer driver" if COMPILE_TEST
- select CLKSRC_MMIO
- help
- Enables support for the Atlas7 timer.
-
config MXS_TIMER
bool "MXS timer driver" if COMPILE_TEST
select CLKSRC_MMIO
@@ -210,19 +205,6 @@ config MXS_TIMER
help
Enables support for the MXS timer.
-config PRIMA2_TIMER
- bool "Prima2 timer driver" if COMPILE_TEST
- select CLKSRC_MMIO
- help
- Enables support for the Prima2 timer.
-
-config U300_TIMER
- bool "U300 timer driver" if COMPILE_TEST
- depends on ARM
- select CLKSRC_MMIO
- help
- Enables support for the U300 timer.
-
config NSPIRE_TIMER
bool "NSpire timer driver" if COMPILE_TEST
select CLKSRC_MMIO
@@ -242,15 +224,6 @@ config INTEGRATOR_AP_TIMER
help
Enables support for the Integrator-AP timer.
-config CLKSRC_EFM32
- bool "Clocksource for Energy Micro's EFM32 SoCs" if !ARCH_EFM32
- depends on OF && ARM && (ARCH_EFM32 || COMPILE_TEST)
- select CLKSRC_MMIO
- default ARCH_EFM32
- help
- Support to use the timers of EFM32 SoCs as clock source and clock
- event device.
-
config CLKSRC_LPC32XX
bool "Clocksource for LPC32XX" if COMPILE_TEST
depends on HAS_IOMEM
@@ -567,14 +540,6 @@ config CLKSRC_MIPS_GIC
select CLOCKSOURCE_WATCHDOG
select TIMER_OF
-config CLKSRC_TANGO_XTAL
- bool "Clocksource for Tango SoC" if COMPILE_TEST
- depends on ARM
- select TIMER_OF
- select CLKSRC_MMIO
- help
- This enables the clocksource for Tango SoC.
-
config CLKSRC_PXA
bool "Clocksource for PXA or SA-11x0 platform" if COMPILE_TEST
depends on HAS_IOMEM
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 3c75cbbf8533..c17ee32a7151 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -30,11 +30,8 @@ obj-$(CONFIG_ARMADA_370_XP_TIMER) += timer-armada-370-xp.o
obj-$(CONFIG_ORION_TIMER) += timer-orion.o
obj-$(CONFIG_BCM2835_TIMER) += bcm2835_timer.o
obj-$(CONFIG_CLPS711X_TIMER) += clps711x-timer.o
-obj-$(CONFIG_ATLAS7_TIMER) += timer-atlas7.o
obj-$(CONFIG_MXS_TIMER) += mxs_timer.o
obj-$(CONFIG_CLKSRC_PXA) += timer-pxa.o
-obj-$(CONFIG_PRIMA2_TIMER) += timer-prima2.o
-obj-$(CONFIG_U300_TIMER) += timer-u300.o
obj-$(CONFIG_SUN4I_TIMER) += timer-sun4i.o
obj-$(CONFIG_SUN5I_HSTIMER) += timer-sun5i.o
obj-$(CONFIG_MESON6_TIMER) += timer-meson6.o
@@ -43,7 +40,6 @@ obj-$(CONFIG_VT8500_TIMER) += timer-vt8500.o
obj-$(CONFIG_NSPIRE_TIMER) += timer-zevio.o
obj-$(CONFIG_BCM_KONA_TIMER) += bcm_kona_timer.o
obj-$(CONFIG_CADENCE_TTC_TIMER) += timer-cadence-ttc.o
-obj-$(CONFIG_CLKSRC_EFM32) += timer-efm32.o
obj-$(CONFIG_CLKSRC_STM32) += timer-stm32.o
obj-$(CONFIG_CLKSRC_STM32_LP) += timer-stm32-lp.o
obj-$(CONFIG_CLKSRC_EXYNOS_MCT) += exynos_mct.o
@@ -73,7 +69,6 @@ obj-$(CONFIG_KEYSTONE_TIMER) += timer-keystone.o
obj-$(CONFIG_INTEGRATOR_AP_TIMER) += timer-integrator-ap.o
obj-$(CONFIG_CLKSRC_VERSATILE) += timer-versatile.o
obj-$(CONFIG_CLKSRC_MIPS_GIC) += mips-gic-timer.o
-obj-$(CONFIG_CLKSRC_TANGO_XTAL) += timer-tango-xtal.o
obj-$(CONFIG_CLKSRC_IMX_GPT) += timer-imx-gpt.o
obj-$(CONFIG_CLKSRC_IMX_TPM) += timer-imx-tpm.o
obj-$(CONFIG_TIMER_IMX_SYS_CTR) += timer-imx-sysctr.o
diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c
index ba04cb381cd3..269a691bd2c4 100644
--- a/drivers/clocksource/hyperv_timer.c
+++ b/drivers/clocksource/hyperv_timer.c
@@ -426,6 +426,9 @@ static bool __init hv_init_tsc_clocksource(void)
if (!(ms_hyperv.features & HV_MSR_REFERENCE_TSC_AVAILABLE))
return false;
+ if (hv_root_partition)
+ return false;
+
hv_read_reference_counter = read_hv_clock_tsc;
phys_addr = virt_to_phys(hv_get_tsc_page());
diff --git a/drivers/clocksource/mxs_timer.c b/drivers/clocksource/mxs_timer.c
index bc96a4cbf26c..e52e12d27d2a 100644
--- a/drivers/clocksource/mxs_timer.c
+++ b/drivers/clocksource/mxs_timer.c
@@ -131,10 +131,7 @@ static void mxs_irq_clear(char *state)
/* Clear pending interrupt */
timrot_irq_acknowledge();
-
-#ifdef DEBUG
- pr_info("%s: changing mode to %s\n", __func__, state)
-#endif /* DEBUG */
+ pr_debug("%s: changing mode to %s\n", __func__, state);
}
static int mxs_shutdown(struct clock_event_device *evt)
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index e258230d432c..c98f8851fd68 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -235,6 +235,8 @@ static const struct sh_cmt_info sh_cmt_info[] = {
#define CMCNT 1 /* channel register */
#define CMCOR 2 /* channel register */
+#define CMCLKE 0x1000 /* CLK Enable Register (R-Car Gen2) */
+
static inline u32 sh_cmt_read_cmstr(struct sh_cmt_channel *ch)
{
if (ch->iostart)
@@ -853,6 +855,7 @@ static int sh_cmt_setup_channel(struct sh_cmt_channel *ch, unsigned int index,
unsigned int hwidx, bool clockevent,
bool clocksource, struct sh_cmt_device *cmt)
{
+ u32 value;
int ret;
/* Skip unused channels. */
@@ -882,6 +885,11 @@ static int sh_cmt_setup_channel(struct sh_cmt_channel *ch, unsigned int index,
ch->iostart = cmt->mapbase + ch->hwidx * 0x100;
ch->ioctrl = ch->iostart + 0x10;
ch->timer_bit = 0;
+
+ /* Enable the clock supply to the channel */
+ value = ioread32(cmt->mapbase + CMCLKE);
+ value |= BIT(hwidx);
+ iowrite32(value, cmt->mapbase + CMCLKE);
break;
}
@@ -1014,12 +1022,10 @@ static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev)
else
cmt->rate = clk_get_rate(cmt->clk) / 8;
- clk_disable(cmt->clk);
-
/* Map the memory resource(s). */
ret = sh_cmt_map_memory(cmt);
if (ret < 0)
- goto err_clk_unprepare;
+ goto err_clk_disable;
/* Allocate and setup the channels. */
cmt->num_channels = hweight8(cmt->hw_channels);
@@ -1047,6 +1053,8 @@ static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev)
mask &= ~(1 << hwidx);
}
+ clk_disable(cmt->clk);
+
platform_set_drvdata(pdev, cmt);
return 0;
@@ -1054,6 +1062,8 @@ static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev)
err_unmap:
kfree(cmt->channels);
iounmap(cmt->mapbase);
+err_clk_disable:
+ clk_disable(cmt->clk);
err_clk_unprepare:
clk_unprepare(cmt->clk);
err_clk_put:
diff --git a/drivers/clocksource/timer-atlas7.c b/drivers/clocksource/timer-atlas7.c
deleted file mode 100644
index c21c91c2bc56..000000000000
--- a/drivers/clocksource/timer-atlas7.c
+++ /dev/null
@@ -1,281 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * System timer for CSR SiRFprimaII
- *
- * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
- */
-
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <linux/clockchips.h>
-#include <linux/clocksource.h>
-#include <linux/cpu.h>
-#include <linux/bitops.h>
-#include <linux/irq.h>
-#include <linux/clk.h>
-#include <linux/slab.h>
-#include <linux/of.h>
-#include <linux/of_irq.h>
-#include <linux/of_address.h>
-#include <linux/sched_clock.h>
-
-#define SIRFSOC_TIMER_32COUNTER_0_CTRL 0x0000
-#define SIRFSOC_TIMER_32COUNTER_1_CTRL 0x0004
-#define SIRFSOC_TIMER_MATCH_0 0x0018
-#define SIRFSOC_TIMER_MATCH_1 0x001c
-#define SIRFSOC_TIMER_COUNTER_0 0x0048
-#define SIRFSOC_TIMER_COUNTER_1 0x004c
-#define SIRFSOC_TIMER_INTR_STATUS 0x0060
-#define SIRFSOC_TIMER_WATCHDOG_EN 0x0064
-#define SIRFSOC_TIMER_64COUNTER_CTRL 0x0068
-#define SIRFSOC_TIMER_64COUNTER_LO 0x006c
-#define SIRFSOC_TIMER_64COUNTER_HI 0x0070
-#define SIRFSOC_TIMER_64COUNTER_LOAD_LO 0x0074
-#define SIRFSOC_TIMER_64COUNTER_LOAD_HI 0x0078
-#define SIRFSOC_TIMER_64COUNTER_RLATCHED_LO 0x007c
-#define SIRFSOC_TIMER_64COUNTER_RLATCHED_HI 0x0080
-
-#define SIRFSOC_TIMER_REG_CNT 6
-
-static unsigned long atlas7_timer_rate;
-
-static const u32 sirfsoc_timer_reg_list[SIRFSOC_TIMER_REG_CNT] = {
- SIRFSOC_TIMER_WATCHDOG_EN,
- SIRFSOC_TIMER_32COUNTER_0_CTRL,
- SIRFSOC_TIMER_32COUNTER_1_CTRL,
- SIRFSOC_TIMER_64COUNTER_CTRL,
- SIRFSOC_TIMER_64COUNTER_RLATCHED_LO,
- SIRFSOC_TIMER_64COUNTER_RLATCHED_HI,
-};
-
-static u32 sirfsoc_timer_reg_val[SIRFSOC_TIMER_REG_CNT];
-
-static void __iomem *sirfsoc_timer_base;
-
-/* disable count and interrupt */
-static inline void sirfsoc_timer_count_disable(int idx)
-{
- writel_relaxed(readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_32COUNTER_0_CTRL + 4 * idx) & ~0x7,
- sirfsoc_timer_base + SIRFSOC_TIMER_32COUNTER_0_CTRL + 4 * idx);
-}
-
-/* enable count and interrupt */
-static inline void sirfsoc_timer_count_enable(int idx)
-{
- writel_relaxed(readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_32COUNTER_0_CTRL + 4 * idx) | 0x3,
- sirfsoc_timer_base + SIRFSOC_TIMER_32COUNTER_0_CTRL + 4 * idx);
-}
-
-/* timer interrupt handler */
-static irqreturn_t sirfsoc_timer_interrupt(int irq, void *dev_id)
-{
- struct clock_event_device *ce = dev_id;
- int cpu = smp_processor_id();
-
- /* clear timer interrupt */
- writel_relaxed(BIT(cpu), sirfsoc_timer_base + SIRFSOC_TIMER_INTR_STATUS);
-
- if (clockevent_state_oneshot(ce))
- sirfsoc_timer_count_disable(cpu);
-
- ce->event_handler(ce);
-
- return IRQ_HANDLED;
-}
-
-/* read 64-bit timer counter */
-static u64 sirfsoc_timer_read(struct clocksource *cs)
-{
- u64 cycles;
-
- writel_relaxed((readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_CTRL) |
- BIT(0)) & ~BIT(1), sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_CTRL);
-
- cycles = readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_RLATCHED_HI);
- cycles = (cycles << 32) | readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_RLATCHED_LO);
-
- return cycles;
-}
-
-static int sirfsoc_timer_set_next_event(unsigned long delta,
- struct clock_event_device *ce)
-{
- int cpu = smp_processor_id();
-
- /* disable timer first, then modify the related registers */
- sirfsoc_timer_count_disable(cpu);
-
- writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_0 +
- 4 * cpu);
- writel_relaxed(delta, sirfsoc_timer_base + SIRFSOC_TIMER_MATCH_0 +
- 4 * cpu);
-
- /* enable the tick */
- sirfsoc_timer_count_enable(cpu);
-
- return 0;
-}
-
-/* Oneshot is enabled in set_next_event */
-static int sirfsoc_timer_shutdown(struct clock_event_device *evt)
-{
- sirfsoc_timer_count_disable(smp_processor_id());
- return 0;
-}
-
-static void sirfsoc_clocksource_suspend(struct clocksource *cs)
-{
- int i;
-
- for (i = 0; i < SIRFSOC_TIMER_REG_CNT; i++)
- sirfsoc_timer_reg_val[i] = readl_relaxed(sirfsoc_timer_base + sirfsoc_timer_reg_list[i]);
-}
-
-static void sirfsoc_clocksource_resume(struct clocksource *cs)
-{
- int i;
-
- for (i = 0; i < SIRFSOC_TIMER_REG_CNT - 2; i++)
- writel_relaxed(sirfsoc_timer_reg_val[i], sirfsoc_timer_base + sirfsoc_timer_reg_list[i]);
-
- writel_relaxed(sirfsoc_timer_reg_val[SIRFSOC_TIMER_REG_CNT - 2],
- sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_LOAD_LO);
- writel_relaxed(sirfsoc_timer_reg_val[SIRFSOC_TIMER_REG_CNT - 1],
- sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_LOAD_HI);
-
- writel_relaxed(readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_CTRL) |
- BIT(1) | BIT(0), sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_CTRL);
-}
-
-static struct clock_event_device __percpu *sirfsoc_clockevent;
-
-static struct clocksource sirfsoc_clocksource = {
- .name = "sirfsoc_clocksource",
- .rating = 200,
- .mask = CLOCKSOURCE_MASK(64),
- .flags = CLOCK_SOURCE_IS_CONTINUOUS,
- .read = sirfsoc_timer_read,
- .suspend = sirfsoc_clocksource_suspend,
- .resume = sirfsoc_clocksource_resume,
-};
-
-static unsigned int sirfsoc_timer_irq, sirfsoc_timer1_irq;
-
-static int sirfsoc_local_timer_starting_cpu(unsigned int cpu)
-{
- struct clock_event_device *ce = per_cpu_ptr(sirfsoc_clockevent, cpu);
- unsigned int irq;
- const char *name;
-
- if (cpu == 0) {
- irq = sirfsoc_timer_irq;
- name = "sirfsoc_timer0";
- } else {
- irq = sirfsoc_timer1_irq;
- name = "sirfsoc_timer1";
- }
-
- ce->irq = irq;
- ce->name = "local_timer";
- ce->features = CLOCK_EVT_FEAT_ONESHOT;
- ce->rating = 200;
- ce->set_state_shutdown = sirfsoc_timer_shutdown;
- ce->set_state_oneshot = sirfsoc_timer_shutdown;
- ce->tick_resume = sirfsoc_timer_shutdown;
- ce->set_next_event = sirfsoc_timer_set_next_event;
- clockevents_calc_mult_shift(ce, atlas7_timer_rate, 60);
- ce->max_delta_ns = clockevent_delta2ns(-2, ce);
- ce->max_delta_ticks = (unsigned long)-2;
- ce->min_delta_ns = clockevent_delta2ns(2, ce);
- ce->min_delta_ticks = 2;
- ce->cpumask = cpumask_of(cpu);
-
- BUG_ON(request_irq(ce->irq, sirfsoc_timer_interrupt,
- IRQF_TIMER | IRQF_NOBALANCING, name, ce));
- irq_force_affinity(ce->irq, cpumask_of(cpu));
-
- clockevents_register_device(ce);
- return 0;
-}
-
-static int sirfsoc_local_timer_dying_cpu(unsigned int cpu)
-{
- struct clock_event_device *ce = per_cpu_ptr(sirfsoc_clockevent, cpu);
-
- sirfsoc_timer_count_disable(1);
-
- if (cpu == 0)
- free_irq(sirfsoc_timer_irq, ce);
- else
- free_irq(sirfsoc_timer1_irq, ce);
- return 0;
-}
-
-static int __init sirfsoc_clockevent_init(void)
-{
- sirfsoc_clockevent = alloc_percpu(struct clock_event_device);
- BUG_ON(!sirfsoc_clockevent);
-
- /* Install and invoke hotplug callbacks */
- return cpuhp_setup_state(CPUHP_AP_MARCO_TIMER_STARTING,
- "clockevents/marco:starting",
- sirfsoc_local_timer_starting_cpu,
- sirfsoc_local_timer_dying_cpu);
-}
-
-/* initialize the kernel jiffy timer source */
-static int __init sirfsoc_atlas7_timer_init(struct device_node *np)
-{
- struct clk *clk;
-
- clk = of_clk_get(np, 0);
- BUG_ON(IS_ERR(clk));
-
- BUG_ON(clk_prepare_enable(clk));
-
- atlas7_timer_rate = clk_get_rate(clk);
-
- /* timer dividers: 0, not divided */
- writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_CTRL);
- writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_32COUNTER_0_CTRL);
- writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_32COUNTER_1_CTRL);
-
- /* Initialize timer counters to 0 */
- writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_LOAD_LO);
- writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_LOAD_HI);
- writel_relaxed(readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_CTRL) |
- BIT(1) | BIT(0), sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_CTRL);
- writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_0);
- writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_1);
-
- /* Clear all interrupts */
- writel_relaxed(0xFFFF, sirfsoc_timer_base + SIRFSOC_TIMER_INTR_STATUS);
-
- BUG_ON(clocksource_register_hz(&sirfsoc_clocksource, atlas7_timer_rate));
-
- return sirfsoc_clockevent_init();
-}
-
-static int __init sirfsoc_of_timer_init(struct device_node *np)
-{
- sirfsoc_timer_base = of_iomap(np, 0);
- if (!sirfsoc_timer_base) {
- pr_err("unable to map timer cpu registers\n");
- return -ENXIO;
- }
-
- sirfsoc_timer_irq = irq_of_parse_and_map(np, 0);
- if (!sirfsoc_timer_irq) {
- pr_err("No irq passed for timer0 via DT\n");
- return -EINVAL;
- }
-
- sirfsoc_timer1_irq = irq_of_parse_and_map(np, 1);
- if (!sirfsoc_timer1_irq) {
- pr_err("No irq passed for timer1 via DT\n");
- return -EINVAL;
- }
-
- return sirfsoc_atlas7_timer_init(np);
-}
-TIMER_OF_DECLARE(sirfsoc_atlas7_timer, "sirf,atlas7-tick", sirfsoc_of_timer_init);
diff --git a/drivers/clocksource/timer-davinci.c b/drivers/clocksource/timer-davinci.c
index bb4eee31ae08..9996c0542520 100644
--- a/drivers/clocksource/timer-davinci.c
+++ b/drivers/clocksource/timer-davinci.c
@@ -7,6 +7,8 @@
* (with tiny parts adopted from code by Kevin Hilman <khilman@baylibre.com>)
*/
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
#include <linux/clk.h>
#include <linux/clockchips.h>
#include <linux/interrupt.h>
@@ -17,9 +19,6 @@
#include <clocksource/timer-davinci.h>
-#undef pr_fmt
-#define pr_fmt(fmt) "%s: " fmt, __func__
-
#define DAVINCI_TIMER_REG_TIM12 0x10
#define DAVINCI_TIMER_REG_TIM34 0x14
#define DAVINCI_TIMER_REG_PRD12 0x18
diff --git a/drivers/clocksource/timer-efm32.c b/drivers/clocksource/timer-efm32.c
deleted file mode 100644
index 441a4b916841..000000000000
--- a/drivers/clocksource/timer-efm32.c
+++ /dev/null
@@ -1,278 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2013 Pengutronix
- * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/clocksource.h>
-#include <linux/clockchips.h>
-#include <linux/irq.h>
-#include <linux/interrupt.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/clk.h>
-
-#define TIMERn_CTRL 0x00
-#define TIMERn_CTRL_PRESC(val) (((val) & 0xf) << 24)
-#define TIMERn_CTRL_PRESC_1024 TIMERn_CTRL_PRESC(10)
-#define TIMERn_CTRL_CLKSEL(val) (((val) & 0x3) << 16)
-#define TIMERn_CTRL_CLKSEL_PRESCHFPERCLK TIMERn_CTRL_CLKSEL(0)
-#define TIMERn_CTRL_OSMEN 0x00000010
-#define TIMERn_CTRL_MODE(val) (((val) & 0x3) << 0)
-#define TIMERn_CTRL_MODE_UP TIMERn_CTRL_MODE(0)
-#define TIMERn_CTRL_MODE_DOWN TIMERn_CTRL_MODE(1)
-
-#define TIMERn_CMD 0x04
-#define TIMERn_CMD_START 0x00000001
-#define TIMERn_CMD_STOP 0x00000002
-
-#define TIMERn_IEN 0x0c
-#define TIMERn_IF 0x10
-#define TIMERn_IFS 0x14
-#define TIMERn_IFC 0x18
-#define TIMERn_IRQ_UF 0x00000002
-
-#define TIMERn_TOP 0x1c
-#define TIMERn_CNT 0x24
-
-struct efm32_clock_event_ddata {
- struct clock_event_device evtdev;
- void __iomem *base;
- unsigned periodic_top;
-};
-
-static int efm32_clock_event_shutdown(struct clock_event_device *evtdev)
-{
- struct efm32_clock_event_ddata *ddata =
- container_of(evtdev, struct efm32_clock_event_ddata, evtdev);
-
- writel_relaxed(TIMERn_CMD_STOP, ddata->base + TIMERn_CMD);
- return 0;
-}
-
-static int efm32_clock_event_set_oneshot(struct clock_event_device *evtdev)
-{
- struct efm32_clock_event_ddata *ddata =
- container_of(evtdev, struct efm32_clock_event_ddata, evtdev);
-
- writel_relaxed(TIMERn_CMD_STOP, ddata->base + TIMERn_CMD);
- writel_relaxed(TIMERn_CTRL_PRESC_1024 |
- TIMERn_CTRL_CLKSEL_PRESCHFPERCLK |
- TIMERn_CTRL_OSMEN |
- TIMERn_CTRL_MODE_DOWN,
- ddata->base + TIMERn_CTRL);
- return 0;
-}
-
-static int efm32_clock_event_set_periodic(struct clock_event_device *evtdev)
-{
- struct efm32_clock_event_ddata *ddata =
- container_of(evtdev, struct efm32_clock_event_ddata, evtdev);
-
- writel_relaxed(TIMERn_CMD_STOP, ddata->base + TIMERn_CMD);
- writel_relaxed(ddata->periodic_top, ddata->base + TIMERn_TOP);
- writel_relaxed(TIMERn_CTRL_PRESC_1024 |
- TIMERn_CTRL_CLKSEL_PRESCHFPERCLK |
- TIMERn_CTRL_MODE_DOWN,
- ddata->base + TIMERn_CTRL);
- writel_relaxed(TIMERn_CMD_START, ddata->base + TIMERn_CMD);
- return 0;
-}
-
-static int efm32_clock_event_set_next_event(unsigned long evt,
- struct clock_event_device *evtdev)
-{
- struct efm32_clock_event_ddata *ddata =
- container_of(evtdev, struct efm32_clock_event_ddata, evtdev);
-
- writel_relaxed(TIMERn_CMD_STOP, ddata->base + TIMERn_CMD);
- writel_relaxed(evt, ddata->base + TIMERn_CNT);
- writel_relaxed(TIMERn_CMD_START, ddata->base + TIMERn_CMD);
-
- return 0;
-}
-
-static irqreturn_t efm32_clock_event_handler(int irq, void *dev_id)
-{
- struct efm32_clock_event_ddata *ddata = dev_id;
-
- writel_relaxed(TIMERn_IRQ_UF, ddata->base + TIMERn_IFC);
-
- ddata->evtdev.event_handler(&ddata->evtdev);
-
- return IRQ_HANDLED;
-}
-
-static struct efm32_clock_event_ddata clock_event_ddata = {
- .evtdev = {
- .name = "efm32 clockevent",
- .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
- .set_state_shutdown = efm32_clock_event_shutdown,
- .set_state_periodic = efm32_clock_event_set_periodic,
- .set_state_oneshot = efm32_clock_event_set_oneshot,
- .set_next_event = efm32_clock_event_set_next_event,
- .rating = 200,
- },
-};
-
-static int __init efm32_clocksource_init(struct device_node *np)
-{
- struct clk *clk;
- void __iomem *base;
- unsigned long rate;
- int ret;
-
- clk = of_clk_get(np, 0);
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- pr_err("failed to get clock for clocksource (%d)\n", ret);
- goto err_clk_get;
- }
-
- ret = clk_prepare_enable(clk);
- if (ret) {
- pr_err("failed to enable timer clock for clocksource (%d)\n",
- ret);
- goto err_clk_enable;
- }
- rate = clk_get_rate(clk);
-
- base = of_iomap(np, 0);
- if (!base) {
- ret = -EADDRNOTAVAIL;
- pr_err("failed to map registers for clocksource\n");
- goto err_iomap;
- }
-
- writel_relaxed(TIMERn_CTRL_PRESC_1024 |
- TIMERn_CTRL_CLKSEL_PRESCHFPERCLK |
- TIMERn_CTRL_MODE_UP, base + TIMERn_CTRL);
- writel_relaxed(TIMERn_CMD_START, base + TIMERn_CMD);
-
- ret = clocksource_mmio_init(base + TIMERn_CNT, "efm32 timer",
- DIV_ROUND_CLOSEST(rate, 1024), 200, 16,
- clocksource_mmio_readl_up);
- if (ret) {
- pr_err("failed to init clocksource (%d)\n", ret);
- goto err_clocksource_init;
- }
-
- return 0;
-
-err_clocksource_init:
-
- iounmap(base);
-err_iomap:
-
- clk_disable_unprepare(clk);
-err_clk_enable:
-
- clk_put(clk);
-err_clk_get:
-
- return ret;
-}
-
-static int __init efm32_clockevent_init(struct device_node *np)
-{
- struct clk *clk;
- void __iomem *base;
- unsigned long rate;
- int irq;
- int ret;
-
- clk = of_clk_get(np, 0);
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- pr_err("failed to get clock for clockevent (%d)\n", ret);
- goto err_clk_get;
- }
-
- ret = clk_prepare_enable(clk);
- if (ret) {
- pr_err("failed to enable timer clock for clockevent (%d)\n",
- ret);
- goto err_clk_enable;
- }
- rate = clk_get_rate(clk);
-
- base = of_iomap(np, 0);
- if (!base) {
- ret = -EADDRNOTAVAIL;
- pr_err("failed to map registers for clockevent\n");
- goto err_iomap;
- }
-
- irq = irq_of_parse_and_map(np, 0);
- if (!irq) {
- ret = -ENOENT;
- pr_err("failed to get irq for clockevent\n");
- goto err_get_irq;
- }
-
- writel_relaxed(TIMERn_IRQ_UF, base + TIMERn_IEN);
-
- clock_event_ddata.base = base;
- clock_event_ddata.periodic_top = DIV_ROUND_CLOSEST(rate, 1024 * HZ);
-
- clockevents_config_and_register(&clock_event_ddata.evtdev,
- DIV_ROUND_CLOSEST(rate, 1024),
- 0xf, 0xffff);
-
- ret = request_irq(irq, efm32_clock_event_handler, IRQF_TIMER,
- "efm32 clockevent", &clock_event_ddata);
- if (ret) {
- pr_err("Failed setup irq\n");
- goto err_setup_irq;
- }
-
- return 0;
-
-err_setup_irq:
-err_get_irq:
-
- iounmap(base);
-err_iomap:
-
- clk_disable_unprepare(clk);
-err_clk_enable:
-
- clk_put(clk);
-err_clk_get:
-
- return ret;
-}
-
-/*
- * This function asserts that we have exactly one clocksource and one
- * clock_event_device in the end.
- */
-static int __init efm32_timer_init(struct device_node *np)
-{
- static int has_clocksource, has_clockevent;
- int ret = 0;
-
- if (!has_clocksource) {
- ret = efm32_clocksource_init(np);
- if (!ret) {
- has_clocksource = 1;
- return 0;
- }
- }
-
- if (!has_clockevent) {
- ret = efm32_clockevent_init(np);
- if (!ret) {
- has_clockevent = 1;
- return 0;
- }
- }
-
- return ret;
-}
-TIMER_OF_DECLARE(efm32compat, "efm32,timer", efm32_timer_init);
-TIMER_OF_DECLARE(efm32, "energymicro,efm32-timer", efm32_timer_init);
diff --git a/drivers/clocksource/timer-microchip-pit64b.c b/drivers/clocksource/timer-microchip-pit64b.c
index 59e11ca8ee73..ab623b25a47b 100644
--- a/drivers/clocksource/timer-microchip-pit64b.c
+++ b/drivers/clocksource/timer-microchip-pit64b.c
@@ -71,10 +71,24 @@ struct mchp_pit64b_clkevt {
struct clock_event_device clkevt;
};
-#define to_mchp_pit64b_timer(x) \
+#define clkevt_to_mchp_pit64b_timer(x) \
((struct mchp_pit64b_timer *)container_of(x,\
struct mchp_pit64b_clkevt, clkevt))
+/**
+ * mchp_pit64b_clksrc - PIT64B clocksource data structure
+ * @timer: PIT64B timer
+ * @clksrc: clocksource
+ */
+struct mchp_pit64b_clksrc {
+ struct mchp_pit64b_timer timer;
+ struct clocksource clksrc;
+};
+
+#define clksrc_to_mchp_pit64b_timer(x) \
+ ((struct mchp_pit64b_timer *)container_of(x,\
+ struct mchp_pit64b_clksrc, clksrc))
+
/* Base address for clocksource timer. */
static void __iomem *mchp_pit64b_cs_base;
/* Default cycles for clockevent timer. */
@@ -116,6 +130,36 @@ static inline void mchp_pit64b_reset(struct mchp_pit64b_timer *timer,
writel_relaxed(MCHP_PIT64B_CR_START, timer->base + MCHP_PIT64B_CR);
}
+static void mchp_pit64b_suspend(struct mchp_pit64b_timer *timer)
+{
+ writel_relaxed(MCHP_PIT64B_CR_SWRST, timer->base + MCHP_PIT64B_CR);
+ if (timer->mode & MCHP_PIT64B_MR_SGCLK)
+ clk_disable_unprepare(timer->gclk);
+ clk_disable_unprepare(timer->pclk);
+}
+
+static void mchp_pit64b_resume(struct mchp_pit64b_timer *timer)
+{
+ clk_prepare_enable(timer->pclk);
+ if (timer->mode & MCHP_PIT64B_MR_SGCLK)
+ clk_prepare_enable(timer->gclk);
+}
+
+static void mchp_pit64b_clksrc_suspend(struct clocksource *cs)
+{
+ struct mchp_pit64b_timer *timer = clksrc_to_mchp_pit64b_timer(cs);
+
+ mchp_pit64b_suspend(timer);
+}
+
+static void mchp_pit64b_clksrc_resume(struct clocksource *cs)
+{
+ struct mchp_pit64b_timer *timer = clksrc_to_mchp_pit64b_timer(cs);
+
+ mchp_pit64b_resume(timer);
+ mchp_pit64b_reset(timer, ULLONG_MAX, MCHP_PIT64B_MR_CONT, 0);
+}
+
static u64 mchp_pit64b_clksrc_read(struct clocksource *cs)
{
return mchp_pit64b_cnt_read(mchp_pit64b_cs_base);
@@ -128,7 +172,7 @@ static u64 mchp_pit64b_sched_read_clk(void)
static int mchp_pit64b_clkevt_shutdown(struct clock_event_device *cedev)
{
- struct mchp_pit64b_timer *timer = to_mchp_pit64b_timer(cedev);
+ struct mchp_pit64b_timer *timer = clkevt_to_mchp_pit64b_timer(cedev);
writel_relaxed(MCHP_PIT64B_CR_SWRST, timer->base + MCHP_PIT64B_CR);
@@ -137,7 +181,7 @@ static int mchp_pit64b_clkevt_shutdown(struct clock_event_device *cedev)
static int mchp_pit64b_clkevt_set_periodic(struct clock_event_device *cedev)
{
- struct mchp_pit64b_timer *timer = to_mchp_pit64b_timer(cedev);
+ struct mchp_pit64b_timer *timer = clkevt_to_mchp_pit64b_timer(cedev);
mchp_pit64b_reset(timer, mchp_pit64b_ce_cycles, MCHP_PIT64B_MR_CONT,
MCHP_PIT64B_IER_PERIOD);
@@ -148,7 +192,7 @@ static int mchp_pit64b_clkevt_set_periodic(struct clock_event_device *cedev)
static int mchp_pit64b_clkevt_set_next_event(unsigned long evt,
struct clock_event_device *cedev)
{
- struct mchp_pit64b_timer *timer = to_mchp_pit64b_timer(cedev);
+ struct mchp_pit64b_timer *timer = clkevt_to_mchp_pit64b_timer(cedev);
mchp_pit64b_reset(timer, evt, MCHP_PIT64B_MR_ONE_SHOT,
MCHP_PIT64B_IER_PERIOD);
@@ -158,21 +202,16 @@ static int mchp_pit64b_clkevt_set_next_event(unsigned long evt,
static void mchp_pit64b_clkevt_suspend(struct clock_event_device *cedev)
{
- struct mchp_pit64b_timer *timer = to_mchp_pit64b_timer(cedev);
+ struct mchp_pit64b_timer *timer = clkevt_to_mchp_pit64b_timer(cedev);
- writel_relaxed(MCHP_PIT64B_CR_SWRST, timer->base + MCHP_PIT64B_CR);
- if (timer->mode & MCHP_PIT64B_MR_SGCLK)
- clk_disable_unprepare(timer->gclk);
- clk_disable_unprepare(timer->pclk);
+ mchp_pit64b_suspend(timer);
}
static void mchp_pit64b_clkevt_resume(struct clock_event_device *cedev)
{
- struct mchp_pit64b_timer *timer = to_mchp_pit64b_timer(cedev);
+ struct mchp_pit64b_timer *timer = clkevt_to_mchp_pit64b_timer(cedev);
- clk_prepare_enable(timer->pclk);
- if (timer->mode & MCHP_PIT64B_MR_SGCLK)
- clk_prepare_enable(timer->gclk);
+ mchp_pit64b_resume(timer);
}
static irqreturn_t mchp_pit64b_interrupt(int irq, void *dev_id)
@@ -296,20 +335,37 @@ done:
static int __init mchp_pit64b_init_clksrc(struct mchp_pit64b_timer *timer,
u32 clk_rate)
{
+ struct mchp_pit64b_clksrc *cs;
int ret;
+ cs = kzalloc(sizeof(*cs), GFP_KERNEL);
+ if (!cs)
+ return -ENOMEM;
+
mchp_pit64b_reset(timer, ULLONG_MAX, MCHP_PIT64B_MR_CONT, 0);
mchp_pit64b_cs_base = timer->base;
- ret = clocksource_mmio_init(timer->base, MCHP_PIT64B_NAME, clk_rate,
- 210, 64, mchp_pit64b_clksrc_read);
+ cs->timer.base = timer->base;
+ cs->timer.pclk = timer->pclk;
+ cs->timer.gclk = timer->gclk;
+ cs->timer.mode = timer->mode;
+ cs->clksrc.name = MCHP_PIT64B_NAME;
+ cs->clksrc.mask = CLOCKSOURCE_MASK(64);
+ cs->clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS;
+ cs->clksrc.rating = 210;
+ cs->clksrc.read = mchp_pit64b_clksrc_read;
+ cs->clksrc.suspend = mchp_pit64b_clksrc_suspend;
+ cs->clksrc.resume = mchp_pit64b_clksrc_resume;
+
+ ret = clocksource_register_hz(&cs->clksrc, clk_rate);
if (ret) {
pr_debug("clksrc: Failed to register PIT64B clocksource!\n");
/* Stop timer. */
writel_relaxed(MCHP_PIT64B_CR_SWRST,
timer->base + MCHP_PIT64B_CR);
+ kfree(cs);
return ret;
}
diff --git a/drivers/clocksource/timer-prima2.c b/drivers/clocksource/timer-prima2.c
deleted file mode 100644
index c5d469342a9d..000000000000
--- a/drivers/clocksource/timer-prima2.c
+++ /dev/null
@@ -1,242 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * System timer for CSR SiRFprimaII
- *
- * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
- */
-
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <linux/clockchips.h>
-#include <linux/clocksource.h>
-#include <linux/bitops.h>
-#include <linux/irq.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/slab.h>
-#include <linux/of.h>
-#include <linux/of_irq.h>
-#include <linux/of_address.h>
-#include <linux/sched_clock.h>
-
-#define PRIMA2_CLOCK_FREQ 1000000
-
-#define SIRFSOC_TIMER_COUNTER_LO 0x0000
-#define SIRFSOC_TIMER_COUNTER_HI 0x0004
-#define SIRFSOC_TIMER_MATCH_0 0x0008
-#define SIRFSOC_TIMER_MATCH_1 0x000C
-#define SIRFSOC_TIMER_MATCH_2 0x0010
-#define SIRFSOC_TIMER_MATCH_3 0x0014
-#define SIRFSOC_TIMER_MATCH_4 0x0018
-#define SIRFSOC_TIMER_MATCH_5 0x001C
-#define SIRFSOC_TIMER_STATUS 0x0020
-#define SIRFSOC_TIMER_INT_EN 0x0024
-#define SIRFSOC_TIMER_WATCHDOG_EN 0x0028
-#define SIRFSOC_TIMER_DIV 0x002C
-#define SIRFSOC_TIMER_LATCH 0x0030
-#define SIRFSOC_TIMER_LATCHED_LO 0x0034
-#define SIRFSOC_TIMER_LATCHED_HI 0x0038
-
-#define SIRFSOC_TIMER_WDT_INDEX 5
-
-#define SIRFSOC_TIMER_LATCH_BIT BIT(0)
-
-#define SIRFSOC_TIMER_REG_CNT 11
-
-static const u32 sirfsoc_timer_reg_list[SIRFSOC_TIMER_REG_CNT] = {
- SIRFSOC_TIMER_MATCH_0, SIRFSOC_TIMER_MATCH_1, SIRFSOC_TIMER_MATCH_2,
- SIRFSOC_TIMER_MATCH_3, SIRFSOC_TIMER_MATCH_4, SIRFSOC_TIMER_MATCH_5,
- SIRFSOC_TIMER_INT_EN, SIRFSOC_TIMER_WATCHDOG_EN, SIRFSOC_TIMER_DIV,
- SIRFSOC_TIMER_LATCHED_LO, SIRFSOC_TIMER_LATCHED_HI,
-};
-
-static u32 sirfsoc_timer_reg_val[SIRFSOC_TIMER_REG_CNT];
-
-static void __iomem *sirfsoc_timer_base;
-
-/* timer0 interrupt handler */
-static irqreturn_t sirfsoc_timer_interrupt(int irq, void *dev_id)
-{
- struct clock_event_device *ce = dev_id;
-
- WARN_ON(!(readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_STATUS) &
- BIT(0)));
-
- /* clear timer0 interrupt */
- writel_relaxed(BIT(0), sirfsoc_timer_base + SIRFSOC_TIMER_STATUS);
-
- ce->event_handler(ce);
-
- return IRQ_HANDLED;
-}
-
-/* read 64-bit timer counter */
-static u64 notrace sirfsoc_timer_read(struct clocksource *cs)
-{
- u64 cycles;
-
- /* latch the 64-bit timer counter */
- writel_relaxed(SIRFSOC_TIMER_LATCH_BIT,
- sirfsoc_timer_base + SIRFSOC_TIMER_LATCH);
- cycles = readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_LATCHED_HI);
- cycles = (cycles << 32) |
- readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_LATCHED_LO);
-
- return cycles;
-}
-
-static int sirfsoc_timer_set_next_event(unsigned long delta,
- struct clock_event_device *ce)
-{
- unsigned long now, next;
-
- writel_relaxed(SIRFSOC_TIMER_LATCH_BIT,
- sirfsoc_timer_base + SIRFSOC_TIMER_LATCH);
- now = readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_LATCHED_LO);
- next = now + delta;
- writel_relaxed(next, sirfsoc_timer_base + SIRFSOC_TIMER_MATCH_0);
- writel_relaxed(SIRFSOC_TIMER_LATCH_BIT,
- sirfsoc_timer_base + SIRFSOC_TIMER_LATCH);
- now = readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_LATCHED_LO);
-
- return next - now > delta ? -ETIME : 0;
-}
-
-static int sirfsoc_timer_shutdown(struct clock_event_device *evt)
-{
- u32 val = readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_INT_EN);
-
- writel_relaxed(val & ~BIT(0),
- sirfsoc_timer_base + SIRFSOC_TIMER_INT_EN);
- return 0;
-}
-
-static int sirfsoc_timer_set_oneshot(struct clock_event_device *evt)
-{
- u32 val = readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_INT_EN);
-
- writel_relaxed(val | BIT(0), sirfsoc_timer_base + SIRFSOC_TIMER_INT_EN);
- return 0;
-}
-
-static void sirfsoc_clocksource_suspend(struct clocksource *cs)
-{
- int i;
-
- writel_relaxed(SIRFSOC_TIMER_LATCH_BIT,
- sirfsoc_timer_base + SIRFSOC_TIMER_LATCH);
-
- for (i = 0; i < SIRFSOC_TIMER_REG_CNT; i++)
- sirfsoc_timer_reg_val[i] =
- readl_relaxed(sirfsoc_timer_base +
- sirfsoc_timer_reg_list[i]);
-}
-
-static void sirfsoc_clocksource_resume(struct clocksource *cs)
-{
- int i;
-
- for (i = 0; i < SIRFSOC_TIMER_REG_CNT - 2; i++)
- writel_relaxed(sirfsoc_timer_reg_val[i],
- sirfsoc_timer_base + sirfsoc_timer_reg_list[i]);
-
- writel_relaxed(sirfsoc_timer_reg_val[SIRFSOC_TIMER_REG_CNT - 2],
- sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_LO);
- writel_relaxed(sirfsoc_timer_reg_val[SIRFSOC_TIMER_REG_CNT - 1],
- sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_HI);
-}
-
-static struct clock_event_device sirfsoc_clockevent = {
- .name = "sirfsoc_clockevent",
- .rating = 200,
- .features = CLOCK_EVT_FEAT_ONESHOT,
- .set_state_shutdown = sirfsoc_timer_shutdown,
- .set_state_oneshot = sirfsoc_timer_set_oneshot,
- .set_next_event = sirfsoc_timer_set_next_event,
-};
-
-static struct clocksource sirfsoc_clocksource = {
- .name = "sirfsoc_clocksource",
- .rating = 200,
- .mask = CLOCKSOURCE_MASK(64),
- .flags = CLOCK_SOURCE_IS_CONTINUOUS,
- .read = sirfsoc_timer_read,
- .suspend = sirfsoc_clocksource_suspend,
- .resume = sirfsoc_clocksource_resume,
-};
-
-/* Overwrite weak default sched_clock with more precise one */
-static u64 notrace sirfsoc_read_sched_clock(void)
-{
- return sirfsoc_timer_read(NULL);
-}
-
-static void __init sirfsoc_clockevent_init(void)
-{
- sirfsoc_clockevent.cpumask = cpumask_of(0);
- clockevents_config_and_register(&sirfsoc_clockevent, PRIMA2_CLOCK_FREQ,
- 2, -2);
-}
-
-/* initialize the kernel jiffy timer source */
-static int __init sirfsoc_prima2_timer_init(struct device_node *np)
-{
- unsigned long rate;
- unsigned int irq;
- struct clk *clk;
- int ret;
-
- clk = of_clk_get(np, 0);
- if (IS_ERR(clk)) {
- pr_err("Failed to get clock\n");
- return PTR_ERR(clk);
- }
-
- ret = clk_prepare_enable(clk);
- if (ret) {
- pr_err("Failed to enable clock\n");
- return ret;
- }
-
- rate = clk_get_rate(clk);
-
- if (rate < PRIMA2_CLOCK_FREQ || rate % PRIMA2_CLOCK_FREQ) {
- pr_err("Invalid clock rate\n");
- return -EINVAL;
- }
-
- sirfsoc_timer_base = of_iomap(np, 0);
- if (!sirfsoc_timer_base) {
- pr_err("unable to map timer cpu registers\n");
- return -ENXIO;
- }
-
- irq = irq_of_parse_and_map(np, 0);
-
- writel_relaxed(rate / PRIMA2_CLOCK_FREQ / 2 - 1,
- sirfsoc_timer_base + SIRFSOC_TIMER_DIV);
- writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_LO);
- writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_HI);
- writel_relaxed(BIT(0), sirfsoc_timer_base + SIRFSOC_TIMER_STATUS);
-
- ret = clocksource_register_hz(&sirfsoc_clocksource, PRIMA2_CLOCK_FREQ);
- if (ret) {
- pr_err("Failed to register clocksource\n");
- return ret;
- }
-
- sched_clock_register(sirfsoc_read_sched_clock, 64, PRIMA2_CLOCK_FREQ);
-
- ret = request_irq(irq, sirfsoc_timer_interrupt, IRQF_TIMER,
- "sirfsoc_timer0", &sirfsoc_clockevent);
- if (ret) {
- pr_err("Failed to setup irq\n");
- return ret;
- }
-
- sirfsoc_clockevent_init();
-
- return 0;
-}
-TIMER_OF_DECLARE(sirfsoc_prima2_timer,
- "sirf,prima2-tick", sirfsoc_prima2_timer_init);
diff --git a/drivers/clocksource/timer-tango-xtal.c b/drivers/clocksource/timer-tango-xtal.c
deleted file mode 100644
index 3f94e454ef99..000000000000
--- a/drivers/clocksource/timer-tango-xtal.c
+++ /dev/null
@@ -1,57 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/clocksource.h>
-#include <linux/sched_clock.h>
-#include <linux/of_address.h>
-#include <linux/printk.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/clk.h>
-
-static void __iomem *xtal_in_cnt;
-static struct delay_timer delay_timer;
-
-static unsigned long notrace read_xtal_counter(void)
-{
- return readl_relaxed(xtal_in_cnt);
-}
-
-static u64 notrace read_sched_clock(void)
-{
- return read_xtal_counter();
-}
-
-static int __init tango_clocksource_init(struct device_node *np)
-{
- struct clk *clk;
- int xtal_freq, ret;
-
- xtal_in_cnt = of_iomap(np, 0);
- if (xtal_in_cnt == NULL) {
- pr_err("%pOF: invalid address\n", np);
- return -ENXIO;
- }
-
- clk = of_clk_get(np, 0);
- if (IS_ERR(clk)) {
- pr_err("%pOF: invalid clock\n", np);
- return PTR_ERR(clk);
- }
-
- xtal_freq = clk_get_rate(clk);
- delay_timer.freq = xtal_freq;
- delay_timer.read_current_timer = read_xtal_counter;
-
- ret = clocksource_mmio_init(xtal_in_cnt, "tango-xtal", xtal_freq, 350,
- 32, clocksource_mmio_readl_up);
- if (ret) {
- pr_err("%pOF: registration failed\n", np);
- return ret;
- }
-
- sched_clock_register(read_sched_clock, 32, xtal_freq);
- register_current_timer_delay(&delay_timer);
-
- return 0;
-}
-
-TIMER_OF_DECLARE(tango, "sigma,tick-counter", tango_clocksource_init);
diff --git a/drivers/clocksource/timer-u300.c b/drivers/clocksource/timer-u300.c
deleted file mode 100644
index 37cba8dfd45f..000000000000
--- a/drivers/clocksource/timer-u300.c
+++ /dev/null
@@ -1,457 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2007-2009 ST-Ericsson AB
- * Timer COH 901 328, runs the OS timer interrupt.
- * Author: Linus Walleij <linus.walleij@stericsson.com>
- */
-#include <linux/interrupt.h>
-#include <linux/time.h>
-#include <linux/timex.h>
-#include <linux/clockchips.h>
-#include <linux/clocksource.h>
-#include <linux/types.h>
-#include <linux/io.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/irq.h>
-#include <linux/delay.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/sched_clock.h>
-
-/* Generic stuff */
-#include <asm/mach/map.h>
-#include <asm/mach/time.h>
-
-/*
- * APP side special timer registers
- * This timer contains four timers which can fire an interrupt each.
- * OS (operating system) timer @ 32768 Hz
- * DD (device driver) timer @ 1 kHz
- * GP1 (general purpose 1) timer @ 1MHz
- * GP2 (general purpose 2) timer @ 1MHz
- */
-
-/* Reset OS Timer 32bit (-/W) */
-#define U300_TIMER_APP_ROST (0x0000)
-#define U300_TIMER_APP_ROST_TIMER_RESET (0x00000000)
-/* Enable OS Timer 32bit (-/W) */
-#define U300_TIMER_APP_EOST (0x0004)
-#define U300_TIMER_APP_EOST_TIMER_ENABLE (0x00000000)
-/* Disable OS Timer 32bit (-/W) */
-#define U300_TIMER_APP_DOST (0x0008)
-#define U300_TIMER_APP_DOST_TIMER_DISABLE (0x00000000)
-/* OS Timer Mode Register 32bit (-/W) */
-#define U300_TIMER_APP_SOSTM (0x000c)
-#define U300_TIMER_APP_SOSTM_MODE_CONTINUOUS (0x00000000)
-#define U300_TIMER_APP_SOSTM_MODE_ONE_SHOT (0x00000001)
-/* OS Timer Status Register 32bit (R/-) */
-#define U300_TIMER_APP_OSTS (0x0010)
-#define U300_TIMER_APP_OSTS_TIMER_STATE_MASK (0x0000000F)
-#define U300_TIMER_APP_OSTS_TIMER_STATE_IDLE (0x00000001)
-#define U300_TIMER_APP_OSTS_TIMER_STATE_ACTIVE (0x00000002)
-#define U300_TIMER_APP_OSTS_ENABLE_IND (0x00000010)
-#define U300_TIMER_APP_OSTS_MODE_MASK (0x00000020)
-#define U300_TIMER_APP_OSTS_MODE_CONTINUOUS (0x00000000)
-#define U300_TIMER_APP_OSTS_MODE_ONE_SHOT (0x00000020)
-#define U300_TIMER_APP_OSTS_IRQ_ENABLED_IND (0x00000040)
-#define U300_TIMER_APP_OSTS_IRQ_PENDING_IND (0x00000080)
-/* OS Timer Current Count Register 32bit (R/-) */
-#define U300_TIMER_APP_OSTCC (0x0014)
-/* OS Timer Terminal Count Register 32bit (R/W) */
-#define U300_TIMER_APP_OSTTC (0x0018)
-/* OS Timer Interrupt Enable Register 32bit (-/W) */
-#define U300_TIMER_APP_OSTIE (0x001c)
-#define U300_TIMER_APP_OSTIE_IRQ_DISABLE (0x00000000)
-#define U300_TIMER_APP_OSTIE_IRQ_ENABLE (0x00000001)
-/* OS Timer Interrupt Acknowledge Register 32bit (-/W) */
-#define U300_TIMER_APP_OSTIA (0x0020)
-#define U300_TIMER_APP_OSTIA_IRQ_ACK (0x00000080)
-
-/* Reset DD Timer 32bit (-/W) */
-#define U300_TIMER_APP_RDDT (0x0040)
-#define U300_TIMER_APP_RDDT_TIMER_RESET (0x00000000)
-/* Enable DD Timer 32bit (-/W) */
-#define U300_TIMER_APP_EDDT (0x0044)
-#define U300_TIMER_APP_EDDT_TIMER_ENABLE (0x00000000)
-/* Disable DD Timer 32bit (-/W) */
-#define U300_TIMER_APP_DDDT (0x0048)
-#define U300_TIMER_APP_DDDT_TIMER_DISABLE (0x00000000)
-/* DD Timer Mode Register 32bit (-/W) */
-#define U300_TIMER_APP_SDDTM (0x004c)
-#define U300_TIMER_APP_SDDTM_MODE_CONTINUOUS (0x00000000)
-#define U300_TIMER_APP_SDDTM_MODE_ONE_SHOT (0x00000001)
-/* DD Timer Status Register 32bit (R/-) */
-#define U300_TIMER_APP_DDTS (0x0050)
-#define U300_TIMER_APP_DDTS_TIMER_STATE_MASK (0x0000000F)
-#define U300_TIMER_APP_DDTS_TIMER_STATE_IDLE (0x00000001)
-#define U300_TIMER_APP_DDTS_TIMER_STATE_ACTIVE (0x00000002)
-#define U300_TIMER_APP_DDTS_ENABLE_IND (0x00000010)
-#define U300_TIMER_APP_DDTS_MODE_MASK (0x00000020)
-#define U300_TIMER_APP_DDTS_MODE_CONTINUOUS (0x00000000)
-#define U300_TIMER_APP_DDTS_MODE_ONE_SHOT (0x00000020)
-#define U300_TIMER_APP_DDTS_IRQ_ENABLED_IND (0x00000040)
-#define U300_TIMER_APP_DDTS_IRQ_PENDING_IND (0x00000080)
-/* DD Timer Current Count Register 32bit (R/-) */
-#define U300_TIMER_APP_DDTCC (0x0054)
-/* DD Timer Terminal Count Register 32bit (R/W) */
-#define U300_TIMER_APP_DDTTC (0x0058)
-/* DD Timer Interrupt Enable Register 32bit (-/W) */
-#define U300_TIMER_APP_DDTIE (0x005c)
-#define U300_TIMER_APP_DDTIE_IRQ_DISABLE (0x00000000)
-#define U300_TIMER_APP_DDTIE_IRQ_ENABLE (0x00000001)
-/* DD Timer Interrupt Acknowledge Register 32bit (-/W) */
-#define U300_TIMER_APP_DDTIA (0x0060)
-#define U300_TIMER_APP_DDTIA_IRQ_ACK (0x00000080)
-
-/* Reset GP1 Timer 32bit (-/W) */
-#define U300_TIMER_APP_RGPT1 (0x0080)
-#define U300_TIMER_APP_RGPT1_TIMER_RESET (0x00000000)
-/* Enable GP1 Timer 32bit (-/W) */
-#define U300_TIMER_APP_EGPT1 (0x0084)
-#define U300_TIMER_APP_EGPT1_TIMER_ENABLE (0x00000000)
-/* Disable GP1 Timer 32bit (-/W) */
-#define U300_TIMER_APP_DGPT1 (0x0088)
-#define U300_TIMER_APP_DGPT1_TIMER_DISABLE (0x00000000)
-/* GP1 Timer Mode Register 32bit (-/W) */
-#define U300_TIMER_APP_SGPT1M (0x008c)
-#define U300_TIMER_APP_SGPT1M_MODE_CONTINUOUS (0x00000000)
-#define U300_TIMER_APP_SGPT1M_MODE_ONE_SHOT (0x00000001)
-/* GP1 Timer Status Register 32bit (R/-) */
-#define U300_TIMER_APP_GPT1S (0x0090)
-#define U300_TIMER_APP_GPT1S_TIMER_STATE_MASK (0x0000000F)
-#define U300_TIMER_APP_GPT1S_TIMER_STATE_IDLE (0x00000001)
-#define U300_TIMER_APP_GPT1S_TIMER_STATE_ACTIVE (0x00000002)
-#define U300_TIMER_APP_GPT1S_ENABLE_IND (0x00000010)
-#define U300_TIMER_APP_GPT1S_MODE_MASK (0x00000020)
-#define U300_TIMER_APP_GPT1S_MODE_CONTINUOUS (0x00000000)
-#define U300_TIMER_APP_GPT1S_MODE_ONE_SHOT (0x00000020)
-#define U300_TIMER_APP_GPT1S_IRQ_ENABLED_IND (0x00000040)
-#define U300_TIMER_APP_GPT1S_IRQ_PENDING_IND (0x00000080)
-/* GP1 Timer Current Count Register 32bit (R/-) */
-#define U300_TIMER_APP_GPT1CC (0x0094)
-/* GP1 Timer Terminal Count Register 32bit (R/W) */
-#define U300_TIMER_APP_GPT1TC (0x0098)
-/* GP1 Timer Interrupt Enable Register 32bit (-/W) */
-#define U300_TIMER_APP_GPT1IE (0x009c)
-#define U300_TIMER_APP_GPT1IE_IRQ_DISABLE (0x00000000)
-#define U300_TIMER_APP_GPT1IE_IRQ_ENABLE (0x00000001)
-/* GP1 Timer Interrupt Acknowledge Register 32bit (-/W) */
-#define U300_TIMER_APP_GPT1IA (0x00a0)
-#define U300_TIMER_APP_GPT1IA_IRQ_ACK (0x00000080)
-
-/* Reset GP2 Timer 32bit (-/W) */
-#define U300_TIMER_APP_RGPT2 (0x00c0)
-#define U300_TIMER_APP_RGPT2_TIMER_RESET (0x00000000)
-/* Enable GP2 Timer 32bit (-/W) */
-#define U300_TIMER_APP_EGPT2 (0x00c4)
-#define U300_TIMER_APP_EGPT2_TIMER_ENABLE (0x00000000)
-/* Disable GP2 Timer 32bit (-/W) */
-#define U300_TIMER_APP_DGPT2 (0x00c8)
-#define U300_TIMER_APP_DGPT2_TIMER_DISABLE (0x00000000)
-/* GP2 Timer Mode Register 32bit (-/W) */
-#define U300_TIMER_APP_SGPT2M (0x00cc)
-#define U300_TIMER_APP_SGPT2M_MODE_CONTINUOUS (0x00000000)
-#define U300_TIMER_APP_SGPT2M_MODE_ONE_SHOT (0x00000001)
-/* GP2 Timer Status Register 32bit (R/-) */
-#define U300_TIMER_APP_GPT2S (0x00d0)
-#define U300_TIMER_APP_GPT2S_TIMER_STATE_MASK (0x0000000F)
-#define U300_TIMER_APP_GPT2S_TIMER_STATE_IDLE (0x00000001)
-#define U300_TIMER_APP_GPT2S_TIMER_STATE_ACTIVE (0x00000002)
-#define U300_TIMER_APP_GPT2S_ENABLE_IND (0x00000010)
-#define U300_TIMER_APP_GPT2S_MODE_MASK (0x00000020)
-#define U300_TIMER_APP_GPT2S_MODE_CONTINUOUS (0x00000000)
-#define U300_TIMER_APP_GPT2S_MODE_ONE_SHOT (0x00000020)
-#define U300_TIMER_APP_GPT2S_IRQ_ENABLED_IND (0x00000040)
-#define U300_TIMER_APP_GPT2S_IRQ_PENDING_IND (0x00000080)
-/* GP2 Timer Current Count Register 32bit (R/-) */
-#define U300_TIMER_APP_GPT2CC (0x00d4)
-/* GP2 Timer Terminal Count Register 32bit (R/W) */
-#define U300_TIMER_APP_GPT2TC (0x00d8)
-/* GP2 Timer Interrupt Enable Register 32bit (-/W) */
-#define U300_TIMER_APP_GPT2IE (0x00dc)
-#define U300_TIMER_APP_GPT2IE_IRQ_DISABLE (0x00000000)
-#define U300_TIMER_APP_GPT2IE_IRQ_ENABLE (0x00000001)
-/* GP2 Timer Interrupt Acknowledge Register 32bit (-/W) */
-#define U300_TIMER_APP_GPT2IA (0x00e0)
-#define U300_TIMER_APP_GPT2IA_IRQ_ACK (0x00000080)
-
-/* Clock request control register - all four timers */
-#define U300_TIMER_APP_CRC (0x100)
-#define U300_TIMER_APP_CRC_CLOCK_REQUEST_ENABLE (0x00000001)
-
-static void __iomem *u300_timer_base;
-
-struct u300_clockevent_data {
- struct clock_event_device cevd;
- unsigned ticks_per_jiffy;
-};
-
-static int u300_shutdown(struct clock_event_device *evt)
-{
- /* Disable interrupts on GP1 */
- writel(U300_TIMER_APP_GPT1IE_IRQ_DISABLE,
- u300_timer_base + U300_TIMER_APP_GPT1IE);
- /* Disable GP1 */
- writel(U300_TIMER_APP_DGPT1_TIMER_DISABLE,
- u300_timer_base + U300_TIMER_APP_DGPT1);
- return 0;
-}
-
-/*
- * If we have oneshot timer active, the oneshot scheduling function
- * u300_set_next_event() is called immediately after.
- */
-static int u300_set_oneshot(struct clock_event_device *evt)
-{
- /* Just return; here? */
- /*
- * The actual event will be programmed by the next event hook,
- * so we just set a dummy value somewhere at the end of the
- * universe here.
- */
- /* Disable interrupts on GPT1 */
- writel(U300_TIMER_APP_GPT1IE_IRQ_DISABLE,
- u300_timer_base + U300_TIMER_APP_GPT1IE);
- /* Disable GP1 while we're reprogramming it. */
- writel(U300_TIMER_APP_DGPT1_TIMER_DISABLE,
- u300_timer_base + U300_TIMER_APP_DGPT1);
- /*
- * Expire far in the future, u300_set_next_event() will be
- * called soon...
- */
- writel(0xFFFFFFFF, u300_timer_base + U300_TIMER_APP_GPT1TC);
- /* We run one shot per tick here! */
- writel(U300_TIMER_APP_SGPT1M_MODE_ONE_SHOT,
- u300_timer_base + U300_TIMER_APP_SGPT1M);
- /* Enable interrupts for this timer */
- writel(U300_TIMER_APP_GPT1IE_IRQ_ENABLE,
- u300_timer_base + U300_TIMER_APP_GPT1IE);
- /* Enable timer */
- writel(U300_TIMER_APP_EGPT1_TIMER_ENABLE,
- u300_timer_base + U300_TIMER_APP_EGPT1);
- return 0;
-}
-
-static int u300_set_periodic(struct clock_event_device *evt)
-{
- struct u300_clockevent_data *cevdata =
- container_of(evt, struct u300_clockevent_data, cevd);
-
- /* Disable interrupts on GPT1 */
- writel(U300_TIMER_APP_GPT1IE_IRQ_DISABLE,
- u300_timer_base + U300_TIMER_APP_GPT1IE);
- /* Disable GP1 while we're reprogramming it. */
- writel(U300_TIMER_APP_DGPT1_TIMER_DISABLE,
- u300_timer_base + U300_TIMER_APP_DGPT1);
- /*
- * Set the periodic mode to a certain number of ticks per
- * jiffy.
- */
- writel(cevdata->ticks_per_jiffy,
- u300_timer_base + U300_TIMER_APP_GPT1TC);
- /*
- * Set continuous mode, so the timer keeps triggering
- * interrupts.
- */
- writel(U300_TIMER_APP_SGPT1M_MODE_CONTINUOUS,
- u300_timer_base + U300_TIMER_APP_SGPT1M);
- /* Enable timer interrupts */
- writel(U300_TIMER_APP_GPT1IE_IRQ_ENABLE,
- u300_timer_base + U300_TIMER_APP_GPT1IE);
- /* Then enable the OS timer again */
- writel(U300_TIMER_APP_EGPT1_TIMER_ENABLE,
- u300_timer_base + U300_TIMER_APP_EGPT1);
- return 0;
-}
-
-/*
- * The app timer in one shot mode obviously has to be reprogrammed
- * in EXACTLY this sequence to work properly. Do NOT try to e.g. replace
- * the interrupt disable + timer disable commands with a reset command,
- * it will fail miserably. Apparently (and I found this the hard way)
- * the timer is very sensitive to the instruction order, though you don't
- * get that impression from the data sheet.
- */
-static int u300_set_next_event(unsigned long cycles,
- struct clock_event_device *evt)
-
-{
- /* Disable interrupts on GPT1 */
- writel(U300_TIMER_APP_GPT1IE_IRQ_DISABLE,
- u300_timer_base + U300_TIMER_APP_GPT1IE);
- /* Disable GP1 while we're reprogramming it. */
- writel(U300_TIMER_APP_DGPT1_TIMER_DISABLE,
- u300_timer_base + U300_TIMER_APP_DGPT1);
- /* Reset the General Purpose timer 1. */
- writel(U300_TIMER_APP_RGPT1_TIMER_RESET,
- u300_timer_base + U300_TIMER_APP_RGPT1);
- /* IRQ in n * cycles */
- writel(cycles, u300_timer_base + U300_TIMER_APP_GPT1TC);
- /*
- * We run one shot per tick here! (This is necessary to reconfigure,
- * the timer will tilt if you don't!)
- */
- writel(U300_TIMER_APP_SGPT1M_MODE_ONE_SHOT,
- u300_timer_base + U300_TIMER_APP_SGPT1M);
- /* Enable timer interrupts */
- writel(U300_TIMER_APP_GPT1IE_IRQ_ENABLE,
- u300_timer_base + U300_TIMER_APP_GPT1IE);
- /* Then enable the OS timer again */
- writel(U300_TIMER_APP_EGPT1_TIMER_ENABLE,
- u300_timer_base + U300_TIMER_APP_EGPT1);
- return 0;
-}
-
-static struct u300_clockevent_data u300_clockevent_data = {
- /* Use general purpose timer 1 as clock event */
- .cevd = {
- .name = "GPT1",
- /* Reasonably fast and accurate clock event */
- .rating = 300,
- .features = CLOCK_EVT_FEAT_PERIODIC |
- CLOCK_EVT_FEAT_ONESHOT,
- .set_next_event = u300_set_next_event,
- .set_state_shutdown = u300_shutdown,
- .set_state_periodic = u300_set_periodic,
- .set_state_oneshot = u300_set_oneshot,
- },
-};
-
-/* Clock event timer interrupt handler */
-static irqreturn_t u300_timer_interrupt(int irq, void *dev_id)
-{
- struct clock_event_device *evt = &u300_clockevent_data.cevd;
- /* ACK/Clear timer IRQ for the APP GPT1 Timer */
-
- writel(U300_TIMER_APP_GPT1IA_IRQ_ACK,
- u300_timer_base + U300_TIMER_APP_GPT1IA);
- evt->event_handler(evt);
- return IRQ_HANDLED;
-}
-
-/*
- * Override the global weak sched_clock symbol with this
- * local implementation which uses the clocksource to get some
- * better resolution when scheduling the kernel. We accept that
- * this wraps around for now, since it is just a relative time
- * stamp. (Inspired by OMAP implementation.)
- */
-
-static u64 notrace u300_read_sched_clock(void)
-{
- return readl(u300_timer_base + U300_TIMER_APP_GPT2CC);
-}
-
-static unsigned long u300_read_current_timer(void)
-{
- return readl(u300_timer_base + U300_TIMER_APP_GPT2CC);
-}
-
-static struct delay_timer u300_delay_timer;
-
-/*
- * This sets up the system timers, clock source and clock event.
- */
-static int __init u300_timer_init_of(struct device_node *np)
-{
- unsigned int irq;
- struct clk *clk;
- unsigned long rate;
- int ret;
-
- u300_timer_base = of_iomap(np, 0);
- if (!u300_timer_base) {
- pr_err("could not ioremap system timer\n");
- return -ENXIO;
- }
-
- /* Get the IRQ for the GP1 timer */
- irq = irq_of_parse_and_map(np, 2);
- if (!irq) {
- pr_err("no IRQ for system timer\n");
- return -EINVAL;
- }
-
- pr_info("U300 GP1 timer @ base: %p, IRQ: %u\n", u300_timer_base, irq);
-
- /* Clock the interrupt controller */
- clk = of_clk_get(np, 0);
- if (IS_ERR(clk))
- return PTR_ERR(clk);
-
- ret = clk_prepare_enable(clk);
- if (ret)
- return ret;
-
- rate = clk_get_rate(clk);
-
- u300_clockevent_data.ticks_per_jiffy = DIV_ROUND_CLOSEST(rate, HZ);
-
- sched_clock_register(u300_read_sched_clock, 32, rate);
-
- u300_delay_timer.read_current_timer = &u300_read_current_timer;
- u300_delay_timer.freq = rate;
- register_current_timer_delay(&u300_delay_timer);
-
- /*
- * Disable the "OS" and "DD" timers - these are designed for Symbian!
- * Example usage in cnh1601578 cpu subsystem pd_timer_app.c
- */
- writel(U300_TIMER_APP_CRC_CLOCK_REQUEST_ENABLE,
- u300_timer_base + U300_TIMER_APP_CRC);
- writel(U300_TIMER_APP_ROST_TIMER_RESET,
- u300_timer_base + U300_TIMER_APP_ROST);
- writel(U300_TIMER_APP_DOST_TIMER_DISABLE,
- u300_timer_base + U300_TIMER_APP_DOST);
- writel(U300_TIMER_APP_RDDT_TIMER_RESET,
- u300_timer_base + U300_TIMER_APP_RDDT);
- writel(U300_TIMER_APP_DDDT_TIMER_DISABLE,
- u300_timer_base + U300_TIMER_APP_DDDT);
-
- /* Reset the General Purpose timer 1. */
- writel(U300_TIMER_APP_RGPT1_TIMER_RESET,
- u300_timer_base + U300_TIMER_APP_RGPT1);
-
- /* Set up the IRQ handler */
- ret = request_irq(irq, u300_timer_interrupt,
- IRQF_TIMER | IRQF_IRQPOLL, "U300 Timer Tick", NULL);
- if (ret)
- return ret;
-
- /* Reset the General Purpose timer 2 */
- writel(U300_TIMER_APP_RGPT2_TIMER_RESET,
- u300_timer_base + U300_TIMER_APP_RGPT2);
- /* Set this timer to run around forever */
- writel(0xFFFFFFFFU, u300_timer_base + U300_TIMER_APP_GPT2TC);
- /* Set continuous mode so it wraps around */
- writel(U300_TIMER_APP_SGPT2M_MODE_CONTINUOUS,
- u300_timer_base + U300_TIMER_APP_SGPT2M);
- /* Disable timer interrupts */
- writel(U300_TIMER_APP_GPT2IE_IRQ_DISABLE,
- u300_timer_base + U300_TIMER_APP_GPT2IE);
- /* Then enable the GP2 timer to use as a free running us counter */
- writel(U300_TIMER_APP_EGPT2_TIMER_ENABLE,
- u300_timer_base + U300_TIMER_APP_EGPT2);
-
- /* Use general purpose timer 2 as clock source */
- ret = clocksource_mmio_init(u300_timer_base + U300_TIMER_APP_GPT2CC,
- "GPT2", rate, 300, 32, clocksource_mmio_readl_up);
- if (ret) {
- pr_err("timer: failed to initialize U300 clock source\n");
- return ret;
- }
-
- /* Configure and register the clockevent */
- clockevents_config_and_register(&u300_clockevent_data.cevd, rate,
- 1, 0xffffffff);
-
- /*
- * TODO: init and register the rest of the timers too, they can be
- * used by hrtimers!
- */
- return 0;
-}
-
-TIMER_OF_DECLARE(u300_timer, "stericsson,u300-apptimer",
- u300_timer_init_of);
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 1f73fa75b1a0..e65e0a43be64 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -289,11 +289,6 @@ config ARM_STI_CPUFREQ
this config option if you wish to add CPUFreq support for STi based
SoCs.
-config ARM_TANGO_CPUFREQ
- bool
- depends on CPUFREQ_DT && ARCH_TANGO
- default y
-
config ARM_TEGRA20_CPUFREQ
tristate "Tegra20/30 CPUFreq support"
depends on ARCH_TEGRA && CPUFREQ_DT
diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86
index 399526289320..92701a18bdd9 100644
--- a/drivers/cpufreq/Kconfig.x86
+++ b/drivers/cpufreq/Kconfig.x86
@@ -62,16 +62,6 @@ config X86_ACPI_CPUFREQ_CPB
By enabling this option the acpi_cpufreq driver provides the old
entry in addition to the new boost ones, for compatibility reasons.
-config X86_SFI_CPUFREQ
- tristate "SFI Performance-States driver"
- depends on X86_INTEL_MID && SFI
- help
- This adds a CPUFreq driver for some Silvermont based Intel Atom
- architectures like Z34xx and Z35xx which enumerate processor
- performance states through SFI.
-
- If in doubt, say N.
-
config ELAN_CPUFREQ
tristate "AMD Elan SC400 and SC410"
depends on MELAN
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index f1b7e3dd6e5d..27d3bd7ea9d4 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -43,7 +43,6 @@ obj-$(CONFIG_X86_P4_CLOCKMOD) += p4-clockmod.o
obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o
obj-$(CONFIG_X86_INTEL_PSTATE) += intel_pstate.o
obj-$(CONFIG_X86_AMD_FREQ_SENSITIVITY) += amd_freq_sensitivity.o
-obj-$(CONFIG_X86_SFI_CPUFREQ) += sfi-cpufreq.o
##################################################################################
# ARM SoC drivers
@@ -79,7 +78,6 @@ obj-$(CONFIG_ARM_SCPI_CPUFREQ) += scpi-cpufreq.o
obj-$(CONFIG_ARM_SPEAR_CPUFREQ) += spear-cpufreq.o
obj-$(CONFIG_ARM_STI_CPUFREQ) += sti-cpufreq.o
obj-$(CONFIG_ARM_ALLWINNER_SUN50I_CPUFREQ_NVMEM) += sun50i-cpufreq-nvmem.o
-obj-$(CONFIG_ARM_TANGO_CPUFREQ) += tango-cpufreq.o
obj-$(CONFIG_ARM_TEGRA20_CPUFREQ) += tegra20-cpufreq.o
obj-$(CONFIG_ARM_TEGRA124_CPUFREQ) += tegra124-cpufreq.o
obj-$(CONFIG_ARM_TEGRA186_CPUFREQ) += tegra186-cpufreq.o
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 1e4fbb002a31..d1bbc16fba4b 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -26,6 +26,7 @@
#include <linux/uaccess.h>
#include <acpi/processor.h>
+#include <acpi/cppc_acpi.h>
#include <asm/msr.h>
#include <asm/processor.h>
@@ -628,16 +629,53 @@ static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
}
#endif
+#ifdef CONFIG_ACPI_CPPC_LIB
+static u64 get_max_boost_ratio(unsigned int cpu)
+{
+ struct cppc_perf_caps perf_caps;
+ u64 highest_perf, nominal_perf;
+ int ret;
+
+ if (acpi_pstate_strict)
+ return 0;
+
+ ret = cppc_get_perf_caps(cpu, &perf_caps);
+ if (ret) {
+ pr_debug("CPU%d: Unable to get performance capabilities (%d)\n",
+ cpu, ret);
+ return 0;
+ }
+
+ highest_perf = perf_caps.highest_perf;
+ nominal_perf = perf_caps.nominal_perf;
+
+ if (!highest_perf || !nominal_perf) {
+ pr_debug("CPU%d: highest or nominal performance missing\n", cpu);
+ return 0;
+ }
+
+ if (highest_perf < nominal_perf) {
+ pr_debug("CPU%d: nominal performance above highest\n", cpu);
+ return 0;
+ }
+
+ return div_u64(highest_perf << SCHED_CAPACITY_SHIFT, nominal_perf);
+}
+#else
+static inline u64 get_max_boost_ratio(unsigned int cpu) { return 0; }
+#endif
+
static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
- unsigned int i;
- unsigned int valid_states = 0;
- unsigned int cpu = policy->cpu;
+ struct cpufreq_frequency_table *freq_table;
+ struct acpi_processor_performance *perf;
struct acpi_cpufreq_data *data;
+ unsigned int cpu = policy->cpu;
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
+ unsigned int valid_states = 0;
unsigned int result = 0;
- struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
- struct acpi_processor_performance *perf;
- struct cpufreq_frequency_table *freq_table;
+ u64 max_boost_ratio;
+ unsigned int i;
#ifdef CONFIG_SMP
static int blacklisted;
#endif
@@ -785,6 +823,28 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
valid_states++;
}
freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
+
+ max_boost_ratio = get_max_boost_ratio(cpu);
+ if (max_boost_ratio) {
+ unsigned int freq = freq_table[0].frequency;
+
+ /*
+ * Because the loop above sorts the freq_table entries in the
+ * descending order, freq is the maximum frequency in the table.
+ * Assume that it corresponds to the CPPC nominal frequency and
+ * use it to set cpuinfo.max_freq.
+ */
+ policy->cpuinfo.max_freq = freq * max_boost_ratio >> SCHED_CAPACITY_SHIFT;
+ } else {
+ /*
+ * If the maximum "boost" frequency is unknown, ask the arch
+ * scale-invariance code to use the "nominal" performance for
+ * CPU utilization scaling so as to prevent the schedutil
+ * governor from selecting inadequate CPU frequencies.
+ */
+ arch_set_max_freq_ratio(true);
+ }
+
policy->freq_table = freq_table;
perf->state = 0;
@@ -858,8 +918,9 @@ static void acpi_cpufreq_cpu_ready(struct cpufreq_policy *policy)
{
struct acpi_processor_performance *perf = per_cpu_ptr(acpi_perf_data,
policy->cpu);
+ unsigned int freq = policy->freq_table[0].frequency;
- if (perf->states[0].core_frequency * 1000 != policy->cpuinfo.max_freq)
+ if (perf->states[0].core_frequency * 1000 != freq)
pr_warn(FW_WARN "P-state 0 is not max freq\n");
}
diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c
index 3e31e5d28b79..4153150e20db 100644
--- a/drivers/cpufreq/brcmstb-avs-cpufreq.c
+++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c
@@ -597,6 +597,16 @@ unmap_base:
return ret;
}
+static void brcm_avs_prepare_uninit(struct platform_device *pdev)
+{
+ struct private_data *priv;
+
+ priv = platform_get_drvdata(pdev);
+
+ iounmap(priv->avs_intr_base);
+ iounmap(priv->base);
+}
+
static int brcm_avs_cpufreq_init(struct cpufreq_policy *policy)
{
struct cpufreq_frequency_table *freq_table;
@@ -732,21 +742,21 @@ static int brcm_avs_cpufreq_probe(struct platform_device *pdev)
brcm_avs_driver.driver_data = pdev;
- return cpufreq_register_driver(&brcm_avs_driver);
+ ret = cpufreq_register_driver(&brcm_avs_driver);
+ if (ret)
+ brcm_avs_prepare_uninit(pdev);
+
+ return ret;
}
static int brcm_avs_cpufreq_remove(struct platform_device *pdev)
{
- struct private_data *priv;
int ret;
ret = cpufreq_unregister_driver(&brcm_avs_driver);
- if (ret)
- return ret;
+ WARN_ON(ret);
- priv = platform_get_drvdata(pdev);
- iounmap(priv->base);
- iounmap(priv->avs_intr_base);
+ brcm_avs_prepare_uninit(pdev);
return 0;
}
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index bd2db0188cbb..3ba2f716fe97 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -141,8 +141,6 @@ static const struct of_device_id blacklist[] __initconst = {
{ .compatible = "st,stih410", },
{ .compatible = "st,stih418", },
- { .compatible = "sigma,tango4", },
-
{ .compatible = "ti,am33xx", },
{ .compatible = "ti,am43", },
{ .compatible = "ti,dra7", },
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index ad4234518ef6..b1e1bdc63b01 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -175,7 +175,7 @@ static int cpufreq_exit(struct cpufreq_policy *policy)
}
static struct cpufreq_driver dt_cpufreq_driver = {
- .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK |
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK |
CPUFREQ_IS_COOLING_DEV,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = set_target,
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index d0a3525ce27f..1d1b563cea4b 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -2101,7 +2101,7 @@ EXPORT_SYMBOL_GPL(cpufreq_driver_fast_switch);
* cpufreq_driver_adjust_perf - Adjust CPU performance level in one go.
* @cpu: Target CPU.
* @min_perf: Minimum (required) performance level (units of @capacity).
- * @target_perf: Terget (desired) performance level (units of @capacity).
+ * @target_perf: Target (desired) performance level (units of @capacity).
* @capacity: Capacity of the target CPU.
*
* Carry out a fast performance level switch of @cpu without sleeping.
@@ -2810,8 +2810,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
if (ret)
goto err_boost_unreg;
- if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
- list_empty(&cpufreq_policy_list)) {
+ if (unlikely(list_empty(&cpufreq_policy_list))) {
/* if all ->init() calls failed, unregister */
ret = -ENODEV;
pr_debug("%s: No CPU initialized for driver %s\n", __func__,
diff --git a/drivers/cpufreq/davinci-cpufreq.c b/drivers/cpufreq/davinci-cpufreq.c
index 91f477a6cbc4..9e97f60f8199 100644
--- a/drivers/cpufreq/davinci-cpufreq.c
+++ b/drivers/cpufreq/davinci-cpufreq.c
@@ -95,7 +95,7 @@ static int davinci_cpu_init(struct cpufreq_policy *policy)
}
static struct cpufreq_driver davinci_driver = {
- .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = davinci_target,
.get = cpufreq_generic_get,
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index f839dc9852c0..d3f756f7b5a0 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -52,7 +52,13 @@ int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
}
policy->min = policy->cpuinfo.min_freq = min_freq;
- policy->max = policy->cpuinfo.max_freq = max_freq;
+ policy->max = max_freq;
+ /*
+ * If the driver has set its own cpuinfo.max_freq above max_freq, leave
+ * it as is.
+ */
+ if (policy->cpuinfo.max_freq < max_freq)
+ policy->max = policy->cpuinfo.max_freq = max_freq;
if (policy->min == ~0)
return -EINVAL;
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index be05e038d956..5175ae3cac44 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -819,13 +819,13 @@ static struct freq_attr *hwp_cpufreq_attrs[] = {
NULL,
};
-static void intel_pstate_get_hwp_max(unsigned int cpu, int *phy_max,
+static void intel_pstate_get_hwp_max(struct cpudata *cpu, int *phy_max,
int *current_max)
{
u64 cap;
- rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap);
- WRITE_ONCE(all_cpu_data[cpu]->hwp_cap_cached, cap);
+ rdmsrl_on_cpu(cpu->cpu, MSR_HWP_CAPABILITIES, &cap);
+ WRITE_ONCE(cpu->hwp_cap_cached, cap);
if (global.no_turbo || global.turbo_disabled)
*current_max = HWP_GUARANTEED_PERF(cap);
else
@@ -914,7 +914,7 @@ static void intel_pstate_hwp_offline(struct cpudata *cpu)
}
value &= ~GENMASK_ULL(31, 0);
- min_perf = HWP_LOWEST_PERF(cpu->hwp_cap_cached);
+ min_perf = HWP_LOWEST_PERF(READ_ONCE(cpu->hwp_cap_cached));
/* Set hwp_max = hwp_min */
value |= HWP_MAX_PERF(min_perf);
@@ -1213,7 +1213,7 @@ static void update_qos_request(enum freq_qos_req_type type)
continue;
if (hwp_active)
- intel_pstate_get_hwp_max(i, &turbo_max, &max_state);
+ intel_pstate_get_hwp_max(cpu, &turbo_max, &max_state);
else
turbo_max = cpu->pstate.turbo_pstate;
@@ -1714,21 +1714,22 @@ static void intel_pstate_max_within_limits(struct cpudata *cpu)
static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
{
cpu->pstate.min_pstate = pstate_funcs.get_min();
- cpu->pstate.max_pstate = pstate_funcs.get_max();
cpu->pstate.max_pstate_physical = pstate_funcs.get_max_physical();
cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
cpu->pstate.scaling = pstate_funcs.get_scaling();
- cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling;
if (hwp_active && !hwp_mode_bdw) {
unsigned int phy_max, current_max;
- intel_pstate_get_hwp_max(cpu->cpu, &phy_max, &current_max);
+ intel_pstate_get_hwp_max(cpu, &phy_max, &current_max);
cpu->pstate.turbo_freq = phy_max * cpu->pstate.scaling;
cpu->pstate.turbo_pstate = phy_max;
+ cpu->pstate.max_pstate = HWP_GUARANTEED_PERF(READ_ONCE(cpu->hwp_cap_cached));
} else {
cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
+ cpu->pstate.max_pstate = pstate_funcs.get_max();
}
+ cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling;
if (pstate_funcs.get_aperf_mperf_shift)
cpu->aperf_mperf_shift = pstate_funcs.get_aperf_mperf_shift();
@@ -1750,6 +1751,7 @@ static int hwp_boost_hold_time_ns = 3 * NSEC_PER_MSEC;
static inline void intel_pstate_hwp_boost_up(struct cpudata *cpu)
{
u64 hwp_req = READ_ONCE(cpu->hwp_req_cached);
+ u64 hwp_cap = READ_ONCE(cpu->hwp_cap_cached);
u32 max_limit = (hwp_req & 0xff00) >> 8;
u32 min_limit = (hwp_req & 0xff);
u32 boost_level1;
@@ -1776,14 +1778,14 @@ static inline void intel_pstate_hwp_boost_up(struct cpudata *cpu)
cpu->hwp_boost_min = min_limit;
/* level at half way mark between min and guranteed */
- boost_level1 = (HWP_GUARANTEED_PERF(cpu->hwp_cap_cached) + min_limit) >> 1;
+ boost_level1 = (HWP_GUARANTEED_PERF(hwp_cap) + min_limit) >> 1;
if (cpu->hwp_boost_min < boost_level1)
cpu->hwp_boost_min = boost_level1;
- else if (cpu->hwp_boost_min < HWP_GUARANTEED_PERF(cpu->hwp_cap_cached))
- cpu->hwp_boost_min = HWP_GUARANTEED_PERF(cpu->hwp_cap_cached);
- else if (cpu->hwp_boost_min == HWP_GUARANTEED_PERF(cpu->hwp_cap_cached) &&
- max_limit != HWP_GUARANTEED_PERF(cpu->hwp_cap_cached))
+ else if (cpu->hwp_boost_min < HWP_GUARANTEED_PERF(hwp_cap))
+ cpu->hwp_boost_min = HWP_GUARANTEED_PERF(hwp_cap);
+ else if (cpu->hwp_boost_min == HWP_GUARANTEED_PERF(hwp_cap) &&
+ max_limit != HWP_GUARANTEED_PERF(hwp_cap))
cpu->hwp_boost_min = max_limit;
else
return;
@@ -2207,7 +2209,7 @@ static void intel_pstate_update_perf_limits(struct cpudata *cpu,
* rather than pure ratios.
*/
if (hwp_active) {
- intel_pstate_get_hwp_max(cpu->cpu, &turbo_max, &max_state);
+ intel_pstate_get_hwp_max(cpu, &turbo_max, &max_state);
} else {
max_state = global.no_turbo || global.turbo_disabled ?
cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
@@ -2322,7 +2324,7 @@ static void intel_pstate_verify_cpu_policy(struct cpudata *cpu,
if (hwp_active) {
int max_state, turbo_max;
- intel_pstate_get_hwp_max(cpu->cpu, &turbo_max, &max_state);
+ intel_pstate_get_hwp_max(cpu, &turbo_max, &max_state);
max_freq = max_state * cpu->pstate.scaling;
} else {
max_freq = intel_pstate_get_max_freq(cpu);
@@ -2496,7 +2498,7 @@ static int intel_cpufreq_verify_policy(struct cpufreq_policy_data *policy)
* driver call was via the normal or fast switch path. Various graphs
* output from the intel_pstate_tracer.py utility that include core_busy
* (or performance or core_avg_perf) have a fixed y-axis from 0 to 100%,
- * so we use 10 to indicate the the normal path through the driver, and
+ * so we use 10 to indicate the normal path through the driver, and
* 90 to indicate the fast switch path through the driver.
* The scaled_busy field is not used, and is set to 0.
*/
@@ -2526,7 +2528,7 @@ static void intel_cpufreq_trace(struct cpudata *cpu, unsigned int trace_type, in
fp_toint(cpu->iowait_boost * 100));
}
-static void intel_cpufreq_adjust_hwp(struct cpudata *cpu, u32 min, u32 max,
+static void intel_cpufreq_hwp_update(struct cpudata *cpu, u32 min, u32 max,
u32 desired, bool fast_switch)
{
u64 prev = READ_ONCE(cpu->hwp_req_cached), value = prev;
@@ -2550,7 +2552,7 @@ static void intel_cpufreq_adjust_hwp(struct cpudata *cpu, u32 min, u32 max,
wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
}
-static void intel_cpufreq_adjust_perf_ctl(struct cpudata *cpu,
+static void intel_cpufreq_perf_ctl_update(struct cpudata *cpu,
u32 target_pstate, bool fast_switch)
{
if (fast_switch)
@@ -2572,10 +2574,10 @@ static int intel_cpufreq_update_pstate(struct cpufreq_policy *policy,
int max_pstate = policy->strict_target ?
target_pstate : cpu->max_perf_ratio;
- intel_cpufreq_adjust_hwp(cpu, target_pstate, max_pstate, 0,
+ intel_cpufreq_hwp_update(cpu, target_pstate, max_pstate, 0,
fast_switch);
} else if (target_pstate != old_pstate) {
- intel_cpufreq_adjust_perf_ctl(cpu, target_pstate, fast_switch);
+ intel_cpufreq_perf_ctl_update(cpu, target_pstate, fast_switch);
}
cpu->pstate.current_pstate = target_pstate;
@@ -2673,7 +2675,7 @@ static void intel_cpufreq_adjust_perf(unsigned int cpunum,
target_pstate = clamp_t(int, target_pstate, min_pstate, max_pstate);
- intel_cpufreq_adjust_hwp(cpu, min_pstate, max_pstate, target_pstate, true);
+ intel_cpufreq_hwp_update(cpu, min_pstate, max_pstate, target_pstate, true);
cpu->pstate.current_pstate = target_pstate;
intel_cpufreq_trace(cpu, INTEL_PSTATE_TRACE_FAST_SWITCH, old_pstate);
@@ -2709,7 +2711,7 @@ static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
if (hwp_active) {
u64 value;
- intel_pstate_get_hwp_max(policy->cpu, &turbo_max, &max_state);
+ intel_pstate_get_hwp_max(cpu, &turbo_max, &max_state);
policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY_HWP;
rdmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, &value);
WRITE_ONCE(cpu->hwp_req_cached, value);
diff --git a/drivers/cpufreq/loongson1-cpufreq.c b/drivers/cpufreq/loongson1-cpufreq.c
index 86f612593e49..fb72d709db56 100644
--- a/drivers/cpufreq/loongson1-cpufreq.c
+++ b/drivers/cpufreq/loongson1-cpufreq.c
@@ -116,7 +116,7 @@ static int ls1x_cpufreq_exit(struct cpufreq_policy *policy)
static struct cpufreq_driver ls1x_cpufreq_driver = {
.name = "cpufreq-ls1x",
- .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = ls1x_cpufreq_target,
.get = cpufreq_generic_get,
diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c
index 022e3e966e71..f2e491b25b07 100644
--- a/drivers/cpufreq/mediatek-cpufreq.c
+++ b/drivers/cpufreq/mediatek-cpufreq.c
@@ -463,7 +463,7 @@ static int mtk_cpufreq_exit(struct cpufreq_policy *policy)
}
static struct cpufreq_driver mtk_cpufreq_driver = {
- .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK |
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK |
CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
CPUFREQ_IS_COOLING_DEV,
.verify = cpufreq_generic_frequency_table_verify,
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
index 3694bb030df3..e035ee216b0f 100644
--- a/drivers/cpufreq/omap-cpufreq.c
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -144,7 +144,7 @@ static int omap_cpu_exit(struct cpufreq_policy *policy)
}
static struct cpufreq_driver omap_driver = {
- .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = omap_target,
.get = cpufreq_generic_get,
diff --git a/drivers/cpufreq/pmac32-cpufreq.c b/drivers/cpufreq/pmac32-cpufreq.c
index 73621bc11976..4f20c6a9108d 100644
--- a/drivers/cpufreq/pmac32-cpufreq.c
+++ b/drivers/cpufreq/pmac32-cpufreq.c
@@ -439,8 +439,7 @@ static struct cpufreq_driver pmac_cpufreq_driver = {
.init = pmac_cpufreq_cpu_init,
.suspend = pmac_cpufreq_suspend,
.resume = pmac_cpufreq_resume,
- .flags = CPUFREQ_PM_NO_WARN |
- CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING,
+ .flags = CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING,
.attr = cpufreq_generic_attr,
.name = "powermac",
};
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
index 9ed5341dc515..d3c23447b892 100644
--- a/drivers/cpufreq/qcom-cpufreq-hw.c
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -32,6 +32,7 @@ struct qcom_cpufreq_soc_data {
struct qcom_cpufreq_data {
void __iomem *base;
+ struct resource *res;
const struct qcom_cpufreq_soc_data *soc_data;
};
@@ -54,7 +55,7 @@ static int qcom_cpufreq_set_bw(struct cpufreq_policy *policy,
if (IS_ERR(opp))
return PTR_ERR(opp);
- ret = dev_pm_opp_set_bw(dev, opp);
+ ret = dev_pm_opp_set_opp(dev, opp);
dev_pm_opp_put(opp);
return ret;
}
@@ -280,6 +281,7 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
struct of_phandle_args args;
struct device_node *cpu_np;
struct device *cpu_dev;
+ struct resource *res;
void __iomem *base;
struct qcom_cpufreq_data *data;
int ret, index;
@@ -303,18 +305,33 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
index = args.args[0];
- base = devm_platform_ioremap_resource(pdev, index);
- if (IS_ERR(base))
- return PTR_ERR(base);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, index);
+ if (!res) {
+ dev_err(dev, "failed to get mem resource %d\n", index);
+ return -ENODEV;
+ }
+
+ if (!request_mem_region(res->start, resource_size(res), res->name)) {
+ dev_err(dev, "failed to request resource %pR\n", res);
+ return -EBUSY;
+ }
+
+ base = ioremap(res->start, resource_size(res));
+ if (IS_ERR(base)) {
+ dev_err(dev, "failed to map resource %pR\n", res);
+ ret = PTR_ERR(base);
+ goto release_region;
+ }
- data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data) {
ret = -ENOMEM;
- goto error;
+ goto unmap_base;
}
data->soc_data = of_device_get_match_data(&pdev->dev);
data->base = base;
+ data->res = res;
/* HW should be in enabled state to proceed */
if (!(readl_relaxed(base + data->soc_data->reg_enable) & 0x1)) {
@@ -347,9 +364,19 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
dev_pm_opp_of_register_em(cpu_dev, policy->cpus);
+ if (policy_has_boost_freq(policy)) {
+ ret = cpufreq_enable_boost_support();
+ if (ret)
+ dev_warn(cpu_dev, "failed to enable boost: %d\n", ret);
+ }
+
return 0;
error:
- devm_iounmap(dev, base);
+ kfree(data);
+unmap_base:
+ iounmap(data->base);
+release_region:
+ release_mem_region(res->start, resource_size(res));
return ret;
}
@@ -357,12 +384,15 @@ static int qcom_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
{
struct device *cpu_dev = get_cpu_device(policy->cpu);
struct qcom_cpufreq_data *data = policy->driver_data;
- struct platform_device *pdev = cpufreq_get_driver_data();
+ struct resource *res = data->res;
+ void __iomem *base = data->base;
dev_pm_opp_remove_all_dynamic(cpu_dev);
dev_pm_opp_of_cpumask_remove_table(policy->related_cpus);
kfree(policy->freq_table);
- devm_iounmap(&pdev->dev, data->base);
+ kfree(data);
+ iounmap(base);
+ release_mem_region(res->start, resource_size(res));
return 0;
}
@@ -374,7 +404,7 @@ static struct freq_attr *qcom_cpufreq_hw_attr[] = {
};
static struct cpufreq_driver cpufreq_qcom_hw_driver = {
- .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK |
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK |
CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
CPUFREQ_IS_COOLING_DEV,
.verify = cpufreq_generic_frequency_table_verify,
diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c
index 37efc0dc3f91..7380c32b238e 100644
--- a/drivers/cpufreq/s3c24xx-cpufreq.c
+++ b/drivers/cpufreq/s3c24xx-cpufreq.c
@@ -420,7 +420,7 @@ static int s3c_cpufreq_resume(struct cpufreq_policy *policy)
#endif
static struct cpufreq_driver s3c24xx_driver = {
- .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.target = s3c_cpufreq_target,
.get = cpufreq_generic_get,
.init = s3c_cpufreq_init,
diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c
index bed496cf8d24..69786e5bbf05 100644
--- a/drivers/cpufreq/s5pv210-cpufreq.c
+++ b/drivers/cpufreq/s5pv210-cpufreq.c
@@ -574,7 +574,7 @@ static int s5pv210_cpufreq_reboot_notifier_event(struct notifier_block *this,
}
static struct cpufreq_driver s5pv210_driver = {
- .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = s5pv210_target,
.get = cpufreq_generic_get,
diff --git a/drivers/cpufreq/sa1100-cpufreq.c b/drivers/cpufreq/sa1100-cpufreq.c
index 5c075ef6adc0..252b9fc26124 100644
--- a/drivers/cpufreq/sa1100-cpufreq.c
+++ b/drivers/cpufreq/sa1100-cpufreq.c
@@ -186,7 +186,7 @@ static int __init sa1100_cpu_init(struct cpufreq_policy *policy)
}
static struct cpufreq_driver sa1100_driver __refdata = {
- .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK |
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK |
CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = sa1100_target,
diff --git a/drivers/cpufreq/sa1110-cpufreq.c b/drivers/cpufreq/sa1110-cpufreq.c
index d9d04d935b3a..1a83c8678a63 100644
--- a/drivers/cpufreq/sa1110-cpufreq.c
+++ b/drivers/cpufreq/sa1110-cpufreq.c
@@ -310,7 +310,7 @@ static int __init sa1110_cpu_init(struct cpufreq_policy *policy)
/* sa1110_driver needs __refdata because it must remain after init registers
* it with cpufreq_register_driver() */
static struct cpufreq_driver sa1110_driver __refdata = {
- .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK |
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK |
CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = sa1110_target,
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
index 491a0a24fb1e..5bd03b59887f 100644
--- a/drivers/cpufreq/scmi-cpufreq.c
+++ b/drivers/cpufreq/scmi-cpufreq.c
@@ -217,7 +217,7 @@ static int scmi_cpufreq_exit(struct cpufreq_policy *policy)
static struct cpufreq_driver scmi_cpufreq_driver = {
.name = "scmi",
- .flags = CPUFREQ_STICKY | CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
+ .flags = CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
CPUFREQ_NEED_INITIAL_FREQ_CHECK |
CPUFREQ_IS_COOLING_DEV,
.verify = cpufreq_generic_frequency_table_verify,
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
index e5140ad63db8..d6a698a1b5d1 100644
--- a/drivers/cpufreq/scpi-cpufreq.c
+++ b/drivers/cpufreq/scpi-cpufreq.c
@@ -191,7 +191,7 @@ static int scpi_cpufreq_exit(struct cpufreq_policy *policy)
static struct cpufreq_driver scpi_cpufreq_driver = {
.name = "scpi-cpufreq",
- .flags = CPUFREQ_STICKY | CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
+ .flags = CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
CPUFREQ_NEED_INITIAL_FREQ_CHECK |
CPUFREQ_IS_COOLING_DEV,
.verify = cpufreq_generic_frequency_table_verify,
diff --git a/drivers/cpufreq/sfi-cpufreq.c b/drivers/cpufreq/sfi-cpufreq.c
deleted file mode 100644
index 45cfdf67cf03..000000000000
--- a/drivers/cpufreq/sfi-cpufreq.c
+++ /dev/null
@@ -1,127 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * SFI Performance States Driver
- *
- * Author: Vishwesh M Rudramuni <vishwesh.m.rudramuni@intel.com>
- * Author: Srinidhi Kasagar <srinidhi.kasagar@intel.com>
- */
-
-#include <linux/cpufreq.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/sfi.h>
-#include <linux/slab.h>
-#include <linux/smp.h>
-
-#include <asm/msr.h>
-
-static struct cpufreq_frequency_table *freq_table;
-static struct sfi_freq_table_entry *sfi_cpufreq_array;
-static int num_freq_table_entries;
-
-static int sfi_parse_freq(struct sfi_table_header *table)
-{
- struct sfi_table_simple *sb;
- struct sfi_freq_table_entry *pentry;
- int totallen;
-
- sb = (struct sfi_table_simple *)table;
- num_freq_table_entries = SFI_GET_NUM_ENTRIES(sb,
- struct sfi_freq_table_entry);
- if (num_freq_table_entries <= 1) {
- pr_err("No p-states discovered\n");
- return -ENODEV;
- }
-
- pentry = (struct sfi_freq_table_entry *)sb->pentry;
- totallen = num_freq_table_entries * sizeof(*pentry);
-
- sfi_cpufreq_array = kmemdup(pentry, totallen, GFP_KERNEL);
- if (!sfi_cpufreq_array)
- return -ENOMEM;
-
- return 0;
-}
-
-static int sfi_cpufreq_target(struct cpufreq_policy *policy, unsigned int index)
-{
- unsigned int next_perf_state = 0; /* Index into perf table */
- u32 lo, hi;
-
- next_perf_state = policy->freq_table[index].driver_data;
-
- rdmsr_on_cpu(policy->cpu, MSR_IA32_PERF_CTL, &lo, &hi);
- lo = (lo & ~INTEL_PERF_CTL_MASK) |
- ((u32) sfi_cpufreq_array[next_perf_state].ctrl_val &
- INTEL_PERF_CTL_MASK);
- wrmsr_on_cpu(policy->cpu, MSR_IA32_PERF_CTL, lo, hi);
-
- return 0;
-}
-
-static int sfi_cpufreq_cpu_init(struct cpufreq_policy *policy)
-{
- policy->shared_type = CPUFREQ_SHARED_TYPE_HW;
- policy->cpuinfo.transition_latency = 100000; /* 100us */
- policy->freq_table = freq_table;
-
- return 0;
-}
-
-static struct cpufreq_driver sfi_cpufreq_driver = {
- .flags = CPUFREQ_CONST_LOOPS,
- .verify = cpufreq_generic_frequency_table_verify,
- .target_index = sfi_cpufreq_target,
- .init = sfi_cpufreq_cpu_init,
- .name = "sfi-cpufreq",
- .attr = cpufreq_generic_attr,
-};
-
-static int __init sfi_cpufreq_init(void)
-{
- int ret, i;
-
- /* parse the freq table from SFI */
- ret = sfi_table_parse(SFI_SIG_FREQ, NULL, NULL, sfi_parse_freq);
- if (ret)
- return ret;
-
- freq_table = kcalloc(num_freq_table_entries + 1, sizeof(*freq_table),
- GFP_KERNEL);
- if (!freq_table) {
- ret = -ENOMEM;
- goto err_free_array;
- }
-
- for (i = 0; i < num_freq_table_entries; i++) {
- freq_table[i].driver_data = i;
- freq_table[i].frequency = sfi_cpufreq_array[i].freq_mhz * 1000;
- }
- freq_table[i].frequency = CPUFREQ_TABLE_END;
-
- ret = cpufreq_register_driver(&sfi_cpufreq_driver);
- if (ret)
- goto err_free_tbl;
-
- return ret;
-
-err_free_tbl:
- kfree(freq_table);
-err_free_array:
- kfree(sfi_cpufreq_array);
- return ret;
-}
-late_initcall(sfi_cpufreq_init);
-
-static void __exit sfi_cpufreq_exit(void)
-{
- cpufreq_unregister_driver(&sfi_cpufreq_driver);
- kfree(freq_table);
- kfree(sfi_cpufreq_array);
-}
-module_exit(sfi_cpufreq_exit);
-
-MODULE_AUTHOR("Vishwesh M Rudramuni <vishwesh.m.rudramuni@intel.com>");
-MODULE_DESCRIPTION("SFI Performance-States Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c
index 73bd8dc47074..7d0d62a06bf3 100644
--- a/drivers/cpufreq/spear-cpufreq.c
+++ b/drivers/cpufreq/spear-cpufreq.c
@@ -160,7 +160,7 @@ static int spear_cpufreq_init(struct cpufreq_policy *policy)
static struct cpufreq_driver spear_cpufreq_driver = {
.name = "cpufreq-spear",
- .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = spear_cpufreq_target,
.get = cpufreq_generic_get,
diff --git a/drivers/cpufreq/tango-cpufreq.c b/drivers/cpufreq/tango-cpufreq.c
deleted file mode 100644
index 89a7f860bfe8..000000000000
--- a/drivers/cpufreq/tango-cpufreq.c
+++ /dev/null
@@ -1,38 +0,0 @@
-#include <linux/of.h>
-#include <linux/cpu.h>
-#include <linux/clk.h>
-#include <linux/pm_opp.h>
-#include <linux/platform_device.h>
-
-static const struct of_device_id machines[] __initconst = {
- { .compatible = "sigma,tango4" },
- { /* sentinel */ }
-};
-
-static int __init tango_cpufreq_init(void)
-{
- struct device *cpu_dev = get_cpu_device(0);
- unsigned long max_freq;
- struct clk *cpu_clk;
- void *res;
-
- if (!of_match_node(machines, of_root))
- return -ENODEV;
-
- cpu_clk = clk_get(cpu_dev, NULL);
- if (IS_ERR(cpu_clk))
- return -ENODEV;
-
- max_freq = clk_get_rate(cpu_clk);
-
- dev_pm_opp_add(cpu_dev, max_freq / 1, 0);
- dev_pm_opp_add(cpu_dev, max_freq / 2, 0);
- dev_pm_opp_add(cpu_dev, max_freq / 3, 0);
- dev_pm_opp_add(cpu_dev, max_freq / 5, 0);
- dev_pm_opp_add(cpu_dev, max_freq / 9, 0);
-
- res = platform_device_register_data(NULL, "cpufreq-dt", -1, NULL, 0);
-
- return PTR_ERR_OR_ZERO(res);
-}
-device_initcall(tango_cpufreq_init);
diff --git a/drivers/cpufreq/tegra186-cpufreq.c b/drivers/cpufreq/tegra186-cpufreq.c
index e566ea298b59..5d1943e787b0 100644
--- a/drivers/cpufreq/tegra186-cpufreq.c
+++ b/drivers/cpufreq/tegra186-cpufreq.c
@@ -117,7 +117,7 @@ static unsigned int tegra186_cpufreq_get(unsigned int cpu)
static struct cpufreq_driver tegra186_cpufreq_driver = {
.name = "tegra186",
- .flags = CPUFREQ_STICKY | CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
+ .flags = CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.get = tegra186_cpufreq_get,
.verify = cpufreq_generic_frequency_table_verify,
diff --git a/drivers/cpufreq/tegra194-cpufreq.c b/drivers/cpufreq/tegra194-cpufreq.c
index 6a67f36f3b80..a9620e4489ae 100644
--- a/drivers/cpufreq/tegra194-cpufreq.c
+++ b/drivers/cpufreq/tegra194-cpufreq.c
@@ -272,8 +272,7 @@ static int tegra194_cpufreq_set_target(struct cpufreq_policy *policy,
static struct cpufreq_driver tegra194_cpufreq_driver = {
.name = "tegra194",
- .flags = CPUFREQ_STICKY | CPUFREQ_CONST_LOOPS |
- CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .flags = CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = tegra194_cpufreq_set_target,
.get = tegra194_get_speed,
diff --git a/drivers/cpufreq/tegra20-cpufreq.c b/drivers/cpufreq/tegra20-cpufreq.c
index 8c893043953e..e8db3d75be25 100644
--- a/drivers/cpufreq/tegra20-cpufreq.c
+++ b/drivers/cpufreq/tegra20-cpufreq.c
@@ -32,6 +32,16 @@ static bool cpu0_node_has_opp_v2_prop(void)
return ret;
}
+static void tegra20_cpufreq_put_supported_hw(void *opp_table)
+{
+ dev_pm_opp_put_supported_hw(opp_table);
+}
+
+static void tegra20_cpufreq_dt_unregister(void *cpufreq_dt)
+{
+ platform_device_unregister(cpufreq_dt);
+}
+
static int tegra20_cpufreq_probe(struct platform_device *pdev)
{
struct platform_device *cpufreq_dt;
@@ -68,42 +78,31 @@ static int tegra20_cpufreq_probe(struct platform_device *pdev)
return err;
}
+ err = devm_add_action_or_reset(&pdev->dev,
+ tegra20_cpufreq_put_supported_hw,
+ opp_table);
+ if (err)
+ return err;
+
cpufreq_dt = platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
err = PTR_ERR_OR_ZERO(cpufreq_dt);
if (err) {
dev_err(&pdev->dev,
"failed to create cpufreq-dt device: %d\n", err);
- goto err_put_supported_hw;
+ return err;
}
- platform_set_drvdata(pdev, cpufreq_dt);
-
- return 0;
-
-err_put_supported_hw:
- dev_pm_opp_put_supported_hw(opp_table);
-
- return err;
-}
-
-static int tegra20_cpufreq_remove(struct platform_device *pdev)
-{
- struct platform_device *cpufreq_dt;
- struct opp_table *opp_table;
-
- cpufreq_dt = platform_get_drvdata(pdev);
- platform_device_unregister(cpufreq_dt);
-
- opp_table = dev_pm_opp_get_opp_table(get_cpu_device(0));
- dev_pm_opp_put_supported_hw(opp_table);
- dev_pm_opp_put_opp_table(opp_table);
+ err = devm_add_action_or_reset(&pdev->dev,
+ tegra20_cpufreq_dt_unregister,
+ cpufreq_dt);
+ if (err)
+ return err;
return 0;
}
static struct platform_driver tegra20_cpufreq_driver = {
.probe = tegra20_cpufreq_probe,
- .remove = tegra20_cpufreq_remove,
.driver = {
.name = "tegra20-cpufreq",
},
diff --git a/drivers/cpufreq/vexpress-spc-cpufreq.c b/drivers/cpufreq/vexpress-spc-cpufreq.c
index f711d8eaea6a..51dfa9ae6cf5 100644
--- a/drivers/cpufreq/vexpress-spc-cpufreq.c
+++ b/drivers/cpufreq/vexpress-spc-cpufreq.c
@@ -486,8 +486,7 @@ static void ve_spc_cpufreq_ready(struct cpufreq_policy *policy)
static struct cpufreq_driver ve_spc_cpufreq_driver = {
.name = "vexpress-spc",
- .flags = CPUFREQ_STICKY |
- CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
+ .flags = CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = ve_spc_cpufreq_set_target,
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index e535f28a8028..9a4c275a1335 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -348,7 +348,7 @@ config CRYPTO_DEV_PPC4XX
config HW_RANDOM_PPC4XX
bool "PowerPC 4xx generic true random number generator support"
- depends on CRYPTO_DEV_PPC4XX && HW_RANDOM
+ depends on CRYPTO_DEV_PPC4XX && HW_RANDOM=y
default y
help
This option provides the kernel-side support for the TRNG hardware
@@ -404,24 +404,6 @@ config CRYPTO_DEV_OMAP_DES
endif # CRYPTO_DEV_OMAP
-config CRYPTO_DEV_PICOXCELL
- tristate "Support for picoXcell IPSEC and Layer2 crypto engines"
- depends on (ARCH_PICOXCELL || COMPILE_TEST) && HAVE_CLK
- select CRYPTO_AEAD
- select CRYPTO_AES
- select CRYPTO_AUTHENC
- select CRYPTO_SKCIPHER
- select CRYPTO_LIB_DES
- select CRYPTO_CBC
- select CRYPTO_ECB
- select CRYPTO_SEQIV
- help
- This option enables support for the hardware offload engines in the
- Picochip picoXcell SoC devices. Select this for IPSEC ESP offload
- and for 3gpp Layer 2 ciphering support.
-
- Saying m here will build a module named picoxcell_crypto.
-
config CRYPTO_DEV_SAHARA
tristate "Support for SAHARA crypto accelerator"
depends on ARCH_MXC && OF
@@ -773,21 +755,6 @@ config CRYPTO_DEV_ZYNQMP_AES
accelerator. Select this if you want to use the ZynqMP module
for AES algorithms.
-config CRYPTO_DEV_MEDIATEK
- tristate "MediaTek's EIP97 Cryptographic Engine driver"
- depends on (ARM && ARCH_MEDIATEK) || COMPILE_TEST
- select CRYPTO_LIB_AES
- select CRYPTO_AEAD
- select CRYPTO_SKCIPHER
- select CRYPTO_SHA1
- select CRYPTO_SHA256
- select CRYPTO_SHA512
- select CRYPTO_HMAC
- help
- This driver allows you to utilize the hardware crypto accelerator
- EIP97 which can be found on the MT7623 MT2701, MT8521p, etc ....
- Select this if you want to use it for AES/SHA1/SHA2 algorithms.
-
source "drivers/crypto/chelsio/Kconfig"
source "drivers/crypto/virtio/Kconfig"
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index fff9a70348e1..fa22cb19e242 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -19,7 +19,6 @@ obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o
obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
obj-$(CONFIG_CRYPTO_DEV_MARVELL) += marvell/
-obj-$(CONFIG_CRYPTO_DEV_MEDIATEK) += mediatek/
obj-$(CONFIG_CRYPTO_DEV_MXS_DCP) += mxs-dcp.o
obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o
n2_crypto-y := n2_core.o n2_asm.o
@@ -31,7 +30,6 @@ obj-$(CONFIG_CRYPTO_DEV_OMAP_DES) += omap-des.o
obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o
obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o
-obj-$(CONFIG_CRYPTO_DEV_PICOXCELL) += picoxcell_crypto.o
obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
obj-$(CONFIG_CRYPTO_DEV_QAT) += qat/
obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/
diff --git a/drivers/crypto/allwinner/Kconfig b/drivers/crypto/allwinner/Kconfig
index 180c8a9db819..856fb2045656 100644
--- a/drivers/crypto/allwinner/Kconfig
+++ b/drivers/crypto/allwinner/Kconfig
@@ -32,6 +32,15 @@ config CRYPTO_DEV_SUN4I_SS_PRNG
Select this option if you want to provide kernel-side support for
the Pseudo-Random Number Generator found in the Security System.
+config CRYPTO_DEV_SUN4I_SS_DEBUG
+ bool "Enable sun4i-ss stats"
+ depends on CRYPTO_DEV_SUN4I_SS
+ depends on DEBUG_FS
+ help
+ Say y to enable sun4i-ss debug stats.
+ This will create /sys/kernel/debug/sun4i-ss/stats for displaying
+ the number of requests per algorithm.
+
config CRYPTO_DEV_SUN8I_CE
tristate "Support for Allwinner Crypto Engine cryptographic offloader"
select CRYPTO_SKCIPHER
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
index b72de8939497..c2e6f5ed1d79 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
@@ -20,6 +20,7 @@ static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq)
unsigned int ivsize = crypto_skcipher_ivsize(tfm);
struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
u32 mode = ctx->mode;
+ void *backup_iv = NULL;
/* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
u32 rx_cnt = SS_RX_DEFAULT;
u32 tx_cnt = 0;
@@ -30,9 +31,13 @@ static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq)
unsigned int ileft = areq->cryptlen;
unsigned int oleft = areq->cryptlen;
unsigned int todo;
+ unsigned long pi = 0, po = 0; /* progress for in and out */
+ bool miter_err;
struct sg_mapping_iter mi, mo;
unsigned int oi, oo; /* offset for in and out */
unsigned long flags;
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct sun4i_ss_alg_template *algt;
if (!areq->cryptlen)
return 0;
@@ -42,52 +47,77 @@ static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq)
return -EINVAL;
}
+ if (areq->iv && ivsize > 0 && mode & SS_DECRYPTION) {
+ backup_iv = kzalloc(ivsize, GFP_KERNEL);
+ if (!backup_iv)
+ return -ENOMEM;
+ scatterwalk_map_and_copy(backup_iv, areq->src, areq->cryptlen - ivsize, ivsize, 0);
+ }
+
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN4I_SS_DEBUG)) {
+ algt = container_of(alg, struct sun4i_ss_alg_template, alg.crypto);
+ algt->stat_opti++;
+ algt->stat_bytes += areq->cryptlen;
+ }
+
spin_lock_irqsave(&ss->slock, flags);
- for (i = 0; i < op->keylen; i += 4)
- writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
+ for (i = 0; i < op->keylen / 4; i++)
+ writesl(ss->base + SS_KEY0 + i * 4, &op->key[i], 1);
if (areq->iv) {
for (i = 0; i < 4 && i < ivsize / 4; i++) {
v = *(u32 *)(areq->iv + i * 4);
- writel(v, ss->base + SS_IV0 + i * 4);
+ writesl(ss->base + SS_IV0 + i * 4, &v, 1);
}
}
writel(mode, ss->base + SS_CTL);
- sg_miter_start(&mi, areq->src, sg_nents(areq->src),
- SG_MITER_FROM_SG | SG_MITER_ATOMIC);
- sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
- SG_MITER_TO_SG | SG_MITER_ATOMIC);
- sg_miter_next(&mi);
- sg_miter_next(&mo);
- if (!mi.addr || !mo.addr) {
- dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
- err = -EINVAL;
- goto release_ss;
- }
ileft = areq->cryptlen / 4;
oleft = areq->cryptlen / 4;
oi = 0;
oo = 0;
do {
- todo = min(rx_cnt, ileft);
- todo = min_t(size_t, todo, (mi.length - oi) / 4);
- if (todo) {
- ileft -= todo;
- writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo);
- oi += todo * 4;
- }
- if (oi == mi.length) {
- sg_miter_next(&mi);
- oi = 0;
+ if (ileft) {
+ sg_miter_start(&mi, areq->src, sg_nents(areq->src),
+ SG_MITER_FROM_SG | SG_MITER_ATOMIC);
+ if (pi)
+ sg_miter_skip(&mi, pi);
+ miter_err = sg_miter_next(&mi);
+ if (!miter_err || !mi.addr) {
+ dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
+ err = -EINVAL;
+ goto release_ss;
+ }
+ todo = min(rx_cnt, ileft);
+ todo = min_t(size_t, todo, (mi.length - oi) / 4);
+ if (todo) {
+ ileft -= todo;
+ writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo);
+ oi += todo * 4;
+ }
+ if (oi == mi.length) {
+ pi += mi.length;
+ oi = 0;
+ }
+ sg_miter_stop(&mi);
}
spaces = readl(ss->base + SS_FCSR);
rx_cnt = SS_RXFIFO_SPACES(spaces);
tx_cnt = SS_TXFIFO_SPACES(spaces);
+ sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
+ SG_MITER_TO_SG | SG_MITER_ATOMIC);
+ if (po)
+ sg_miter_skip(&mo, po);
+ miter_err = sg_miter_next(&mo);
+ if (!miter_err || !mo.addr) {
+ dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
+ err = -EINVAL;
+ goto release_ss;
+ }
todo = min(tx_cnt, oleft);
todo = min_t(size_t, todo, (mo.length - oo) / 4);
if (todo) {
@@ -96,33 +126,41 @@ static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq)
oo += todo * 4;
}
if (oo == mo.length) {
- sg_miter_next(&mo);
oo = 0;
+ po += mo.length;
}
+ sg_miter_stop(&mo);
} while (oleft);
if (areq->iv) {
- for (i = 0; i < 4 && i < ivsize / 4; i++) {
- v = readl(ss->base + SS_IV0 + i * 4);
- *(u32 *)(areq->iv + i * 4) = v;
+ if (mode & SS_DECRYPTION) {
+ memcpy(areq->iv, backup_iv, ivsize);
+ kfree_sensitive(backup_iv);
+ } else {
+ scatterwalk_map_and_copy(areq->iv, areq->dst, areq->cryptlen - ivsize,
+ ivsize, 0);
}
}
release_ss:
- sg_miter_stop(&mi);
- sg_miter_stop(&mo);
writel(0, ss->base + SS_CTL);
spin_unlock_irqrestore(&ss->slock, flags);
return err;
}
-
static int noinline_for_stack sun4i_ss_cipher_poll_fallback(struct skcipher_request *areq)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
int err;
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct sun4i_ss_alg_template *algt;
+
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN4I_SS_DEBUG)) {
+ algt = container_of(alg, struct sun4i_ss_alg_template, alg.crypto);
+ algt->stat_fb++;
+ }
skcipher_request_set_tfm(&ctx->fallback_req, op->fallback_tfm);
skcipher_request_set_callback(&ctx->fallback_req, areq->base.flags,
@@ -161,13 +199,16 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
unsigned int ileft = areq->cryptlen;
unsigned int oleft = areq->cryptlen;
unsigned int todo;
+ void *backup_iv = NULL;
struct sg_mapping_iter mi, mo;
+ unsigned long pi = 0, po = 0; /* progress for in and out */
+ bool miter_err;
unsigned int oi, oo; /* offset for in and out */
unsigned int ob = 0; /* offset in buf */
unsigned int obo = 0; /* offset in bufo*/
unsigned int obl = 0; /* length of data in bufo */
unsigned long flags;
- bool need_fallback;
+ bool need_fallback = false;
if (!areq->cryptlen)
return 0;
@@ -186,12 +227,12 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
* we can use the SS optimized function
*/
while (in_sg && no_chunk == 1) {
- if (in_sg->length % 4)
+ if ((in_sg->length | in_sg->offset) & 3u)
no_chunk = 0;
in_sg = sg_next(in_sg);
}
while (out_sg && no_chunk == 1) {
- if (out_sg->length % 4)
+ if ((out_sg->length | out_sg->offset) & 3u)
no_chunk = 0;
out_sg = sg_next(out_sg);
}
@@ -202,30 +243,31 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
if (need_fallback)
return sun4i_ss_cipher_poll_fallback(areq);
+ if (areq->iv && ivsize > 0 && mode & SS_DECRYPTION) {
+ backup_iv = kzalloc(ivsize, GFP_KERNEL);
+ if (!backup_iv)
+ return -ENOMEM;
+ scatterwalk_map_and_copy(backup_iv, areq->src, areq->cryptlen - ivsize, ivsize, 0);
+ }
+
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN4I_SS_DEBUG)) {
+ algt->stat_req++;
+ algt->stat_bytes += areq->cryptlen;
+ }
+
spin_lock_irqsave(&ss->slock, flags);
- for (i = 0; i < op->keylen; i += 4)
- writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
+ for (i = 0; i < op->keylen / 4; i++)
+ writesl(ss->base + SS_KEY0 + i * 4, &op->key[i], 1);
if (areq->iv) {
for (i = 0; i < 4 && i < ivsize / 4; i++) {
v = *(u32 *)(areq->iv + i * 4);
- writel(v, ss->base + SS_IV0 + i * 4);
+ writesl(ss->base + SS_IV0 + i * 4, &v, 1);
}
}
writel(mode, ss->base + SS_CTL);
- sg_miter_start(&mi, areq->src, sg_nents(areq->src),
- SG_MITER_FROM_SG | SG_MITER_ATOMIC);
- sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
- SG_MITER_TO_SG | SG_MITER_ATOMIC);
- sg_miter_next(&mi);
- sg_miter_next(&mo);
- if (!mi.addr || !mo.addr) {
- dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
- err = -EINVAL;
- goto release_ss;
- }
ileft = areq->cryptlen;
oleft = areq->cryptlen;
oi = 0;
@@ -233,8 +275,16 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
while (oleft) {
if (ileft) {
- char buf[4 * SS_RX_MAX];/* buffer for linearize SG src */
-
+ sg_miter_start(&mi, areq->src, sg_nents(areq->src),
+ SG_MITER_FROM_SG | SG_MITER_ATOMIC);
+ if (pi)
+ sg_miter_skip(&mi, pi);
+ miter_err = sg_miter_next(&mi);
+ if (!miter_err || !mi.addr) {
+ dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
+ err = -EINVAL;
+ goto release_ss;
+ }
/*
* todo is the number of consecutive 4byte word that we
* can read from current SG
@@ -256,52 +306,57 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
*/
todo = min(rx_cnt * 4 - ob, ileft);
todo = min_t(size_t, todo, mi.length - oi);
- memcpy(buf + ob, mi.addr + oi, todo);
+ memcpy(ss->buf + ob, mi.addr + oi, todo);
ileft -= todo;
oi += todo;
ob += todo;
if (!(ob % 4)) {
- writesl(ss->base + SS_RXFIFO, buf,
+ writesl(ss->base + SS_RXFIFO, ss->buf,
ob / 4);
ob = 0;
}
}
if (oi == mi.length) {
- sg_miter_next(&mi);
+ pi += mi.length;
oi = 0;
}
+ sg_miter_stop(&mi);
}
spaces = readl(ss->base + SS_FCSR);
rx_cnt = SS_RXFIFO_SPACES(spaces);
tx_cnt = SS_TXFIFO_SPACES(spaces);
- dev_dbg(ss->dev,
- "%x %u/%zu %u/%u cnt=%u %u/%zu %u/%u cnt=%u %u\n",
- mode,
- oi, mi.length, ileft, areq->cryptlen, rx_cnt,
- oo, mo.length, oleft, areq->cryptlen, tx_cnt, ob);
if (!tx_cnt)
continue;
+ sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
+ SG_MITER_TO_SG | SG_MITER_ATOMIC);
+ if (po)
+ sg_miter_skip(&mo, po);
+ miter_err = sg_miter_next(&mo);
+ if (!miter_err || !mo.addr) {
+ dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
+ err = -EINVAL;
+ goto release_ss;
+ }
/* todo in 4bytes word */
todo = min(tx_cnt, oleft / 4);
todo = min_t(size_t, todo, (mo.length - oo) / 4);
+
if (todo) {
readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
oleft -= todo * 4;
oo += todo * 4;
if (oo == mo.length) {
- sg_miter_next(&mo);
+ po += mo.length;
oo = 0;
}
} else {
- char bufo[4 * SS_TX_MAX]; /* buffer for linearize SG dst */
-
/*
* read obl bytes in bufo, we read at maximum for
* emptying the device
*/
- readsl(ss->base + SS_TXFIFO, bufo, tx_cnt);
+ readsl(ss->base + SS_TXFIFO, ss->bufo, tx_cnt);
obl = tx_cnt * 4;
obo = 0;
do {
@@ -313,28 +368,31 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
*/
todo = min_t(size_t,
mo.length - oo, obl - obo);
- memcpy(mo.addr + oo, bufo + obo, todo);
+ memcpy(mo.addr + oo, ss->bufo + obo, todo);
oleft -= todo;
obo += todo;
oo += todo;
if (oo == mo.length) {
+ po += mo.length;
sg_miter_next(&mo);
oo = 0;
}
} while (obo < obl);
/* bufo must be fully used here */
}
+ sg_miter_stop(&mo);
}
if (areq->iv) {
- for (i = 0; i < 4 && i < ivsize / 4; i++) {
- v = readl(ss->base + SS_IV0 + i * 4);
- *(u32 *)(areq->iv + i * 4) = v;
+ if (mode & SS_DECRYPTION) {
+ memcpy(areq->iv, backup_iv, ivsize);
+ kfree_sensitive(backup_iv);
+ } else {
+ scatterwalk_map_and_copy(areq->iv, areq->dst, areq->cryptlen - ivsize,
+ ivsize, 0);
}
}
release_ss:
- sg_miter_stop(&mi);
- sg_miter_stop(&mo);
writel(0, ss->base + SS_CTL);
spin_unlock_irqrestore(&ss->slock, flags);
@@ -503,7 +561,6 @@ int sun4i_ss_cipher_init(struct crypto_tfm *tfm)
sizeof(struct sun4i_cipher_req_ctx) +
crypto_skcipher_reqsize(op->fallback_tfm));
-
err = pm_runtime_get_sync(op->ss->dev);
if (err < 0)
goto error_pm;
@@ -590,5 +647,4 @@ int sun4i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
-
}
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
index a2b67f7f8a81..709905ec4680 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
@@ -10,6 +10,7 @@
*/
#include <linux/clk.h>
#include <linux/crypto.h>
+#include <linux/debugfs.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -234,6 +235,51 @@ static struct sun4i_ss_alg_template ss_algs[] = {
#endif
};
+static int sun4i_ss_dbgfs_read(struct seq_file *seq, void *v)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(ss_algs); i++) {
+ if (!ss_algs[i].ss)
+ continue;
+ switch (ss_algs[i].type) {
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ seq_printf(seq, "%s %s reqs=%lu opti=%lu fallback=%lu tsize=%lu\n",
+ ss_algs[i].alg.crypto.base.cra_driver_name,
+ ss_algs[i].alg.crypto.base.cra_name,
+ ss_algs[i].stat_req, ss_algs[i].stat_opti, ss_algs[i].stat_fb,
+ ss_algs[i].stat_bytes);
+ break;
+ case CRYPTO_ALG_TYPE_RNG:
+ seq_printf(seq, "%s %s reqs=%lu tsize=%lu\n",
+ ss_algs[i].alg.rng.base.cra_driver_name,
+ ss_algs[i].alg.rng.base.cra_name,
+ ss_algs[i].stat_req, ss_algs[i].stat_bytes);
+ break;
+ case CRYPTO_ALG_TYPE_AHASH:
+ seq_printf(seq, "%s %s reqs=%lu\n",
+ ss_algs[i].alg.hash.halg.base.cra_driver_name,
+ ss_algs[i].alg.hash.halg.base.cra_name,
+ ss_algs[i].stat_req);
+ break;
+ }
+ }
+ return 0;
+}
+
+static int sun4i_ss_dbgfs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, sun4i_ss_dbgfs_read, inode->i_private);
+}
+
+static const struct file_operations sun4i_ss_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = sun4i_ss_dbgfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
/*
* Power management strategy: The device is suspended unless a TFM exists for
* one of the algorithms proposed by this driver.
@@ -454,6 +500,12 @@ static int sun4i_ss_probe(struct platform_device *pdev)
break;
}
}
+
+ /* Ignore error of debugfs */
+ ss->dbgfs_dir = debugfs_create_dir("sun4i-ss", NULL);
+ ss->dbgfs_stats = debugfs_create_file("stats", 0444, ss->dbgfs_dir, ss,
+ &sun4i_ss_debugfs_fops);
+
return 0;
error_alg:
i--;
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c
index 1dff48558f53..c1b4585e9bbc 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c
@@ -191,8 +191,10 @@ static int sun4i_hash(struct ahash_request *areq)
u32 spaces, rx_cnt = SS_RX_DEFAULT, bf[32] = {0}, v, ivmode = 0;
struct sun4i_req_ctx *op = ahash_request_ctx(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
struct sun4i_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
struct sun4i_ss_ctx *ss = tfmctx->ss;
+ struct sun4i_ss_alg_template *algt;
struct scatterlist *in_sg = areq->src;
struct sg_mapping_iter mi;
int in_r, err = 0;
@@ -398,6 +400,10 @@ static int sun4i_hash(struct ahash_request *areq)
*/
hash_final:
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN4I_SS_DEBUG)) {
+ algt = container_of(alg, struct sun4i_ss_alg_template, alg.hash);
+ algt->stat_req++;
+ }
/* write the remaining words of the wait buffer */
if (op->len) {
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-prng.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-prng.c
index 729aafdbea84..443160a114bb 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-prng.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-prng.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
#include "sun4i-ss.h"
int sun4i_ss_prng_seed(struct crypto_rng *tfm, const u8 *seed,
@@ -32,6 +33,11 @@ int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
if (err < 0)
return err;
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN4I_SS_DEBUG)) {
+ algt->stat_req++;
+ algt->stat_bytes += todo;
+ }
+
spin_lock_bh(&ss->slock);
writel(mode, ss->base + SS_CTL);
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h
index 5c291e4a6857..0fee6f4e2d90 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h
@@ -148,10 +148,14 @@ struct sun4i_ss_ctx {
struct reset_control *reset;
struct device *dev;
struct resource *res;
+ char buf[4 * SS_RX_MAX];/* buffer for linearize SG src */
+ char bufo[4 * SS_TX_MAX]; /* buffer for linearize SG dst */
spinlock_t slock; /* control the use of the device */
#ifdef CONFIG_CRYPTO_DEV_SUN4I_SS_PRNG
u32 seed[SS_SEED_LEN / BITS_PER_LONG];
#endif
+ struct dentry *dbgfs_dir;
+ struct dentry *dbgfs_stats;
};
struct sun4i_ss_alg_template {
@@ -163,6 +167,10 @@ struct sun4i_ss_alg_template {
struct rng_alg rng;
} alg;
struct sun4i_ss_ctx *ss;
+ unsigned long stat_req;
+ unsigned long stat_fb;
+ unsigned long stat_bytes;
+ unsigned long stat_opti;
};
struct sun4i_tfm_ctx {
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index 30390a7324b2..851b149f7170 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -42,7 +42,7 @@
/* ================= Device Structure ================== */
-struct device_private iproc_priv;
+struct bcm_device_private iproc_priv;
/* ==================== Parameters ===================== */
@@ -471,10 +471,8 @@ static int handle_skcipher_req(struct iproc_reqctx_s *rctx)
static void handle_skcipher_resp(struct iproc_reqctx_s *rctx)
{
struct spu_hw *spu = &iproc_priv.spu;
-#ifdef DEBUG
struct crypto_async_request *areq = rctx->parent;
struct skcipher_request *req = skcipher_request_cast(areq);
-#endif
struct iproc_ctx_s *ctx = rctx->ctx;
u32 payload_len;
@@ -996,13 +994,11 @@ static int ahash_req_done(struct iproc_reqctx_s *rctx)
static void handle_ahash_resp(struct iproc_reqctx_s *rctx)
{
struct iproc_ctx_s *ctx = rctx->ctx;
-#ifdef DEBUG
struct crypto_async_request *areq = rctx->parent;
struct ahash_request *req = ahash_request_cast(areq);
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
unsigned int blocksize =
crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
-#endif
/*
* Save hash to use as input to next op if incremental. Might be copying
* too much, but that's easier than figuring out actual digest size here
diff --git a/drivers/crypto/bcm/cipher.h b/drivers/crypto/bcm/cipher.h
index 0ad5892b445d..71281a3bdbdc 100644
--- a/drivers/crypto/bcm/cipher.h
+++ b/drivers/crypto/bcm/cipher.h
@@ -420,7 +420,7 @@ struct spu_hw {
u32 num_chan;
};
-struct device_private {
+struct bcm_device_private {
struct platform_device *pdev;
struct spu_hw spu;
@@ -467,6 +467,6 @@ struct device_private {
struct mbox_chan **mbox;
};
-extern struct device_private iproc_priv;
+extern struct bcm_device_private iproc_priv;
#endif
diff --git a/drivers/crypto/bcm/spu.c b/drivers/crypto/bcm/spu.c
index fe126f95c702..007abf92cc05 100644
--- a/drivers/crypto/bcm/spu.c
+++ b/drivers/crypto/bcm/spu.c
@@ -41,7 +41,7 @@ void spum_dump_msg_hdr(u8 *buf, unsigned int buf_len)
packet_log("SPU Message header %p len: %u\n", buf, buf_len);
/* ========== Decode MH ========== */
- packet_log(" MH 0x%08x\n", be32_to_cpu(*((u32 *)ptr)));
+ packet_log(" MH 0x%08x\n", be32_to_cpup((__be32 *)ptr));
if (spuh->mh.flags & MH_SCTX_PRES)
packet_log(" SCTX present\n");
if (spuh->mh.flags & MH_BDESC_PRES)
@@ -273,22 +273,21 @@ void spum_dump_msg_hdr(u8 *buf, unsigned int buf_len)
/* ========== Decode BDESC ========== */
if (spuh->mh.flags & MH_BDESC_PRES) {
-#ifdef DEBUG
struct BDESC_HEADER *bdesc = (struct BDESC_HEADER *)ptr;
-#endif
- packet_log(" BDESC[0] 0x%08x\n", be32_to_cpu(*((u32 *)ptr)));
+
+ packet_log(" BDESC[0] 0x%08x\n", be32_to_cpup((__be32 *)ptr));
packet_log(" OffsetMAC:%u LengthMAC:%u\n",
be16_to_cpu(bdesc->offset_mac),
be16_to_cpu(bdesc->length_mac));
ptr += sizeof(u32);
- packet_log(" BDESC[1] 0x%08x\n", be32_to_cpu(*((u32 *)ptr)));
+ packet_log(" BDESC[1] 0x%08x\n", be32_to_cpup((__be32 *)ptr));
packet_log(" OffsetCrypto:%u LengthCrypto:%u\n",
be16_to_cpu(bdesc->offset_crypto),
be16_to_cpu(bdesc->length_crypto));
ptr += sizeof(u32);
- packet_log(" BDESC[2] 0x%08x\n", be32_to_cpu(*((u32 *)ptr)));
+ packet_log(" BDESC[2] 0x%08x\n", be32_to_cpup((__be32 *)ptr));
packet_log(" OffsetICV:%u OffsetIV:%u\n",
be16_to_cpu(bdesc->offset_icv),
be16_to_cpu(bdesc->offset_iv));
@@ -297,10 +296,9 @@ void spum_dump_msg_hdr(u8 *buf, unsigned int buf_len)
/* ========== Decode BD ========== */
if (spuh->mh.flags & MH_BD_PRES) {
-#ifdef DEBUG
struct BD_HEADER *bd = (struct BD_HEADER *)ptr;
-#endif
- packet_log(" BD[0] 0x%08x\n", be32_to_cpu(*((u32 *)ptr)));
+
+ packet_log(" BD[0] 0x%08x\n", be32_to_cpup((__be32 *)ptr));
packet_log(" Size:%ubytes PrevLength:%u\n",
be16_to_cpu(bd->size), be16_to_cpu(bd->prev_length));
ptr += 4;
@@ -1056,9 +1054,9 @@ void spum_request_pad(u8 *pad_start,
/* add the size at the end as required per alg */
if (auth_alg == HASH_ALG_MD5)
- *(u64 *)ptr = cpu_to_le64((u64)total_sent * 8);
+ *(__le64 *)ptr = cpu_to_le64(total_sent * 8ull);
else /* SHA1, SHA2-224, SHA2-256 */
- *(u64 *)ptr = cpu_to_be64((u64)total_sent * 8);
+ *(__be64 *)ptr = cpu_to_be64(total_sent * 8ull);
ptr += sizeof(u64);
}
}
diff --git a/drivers/crypto/bcm/spu2.c b/drivers/crypto/bcm/spu2.c
index c860ffb0b4c3..2db35b5ccaa2 100644
--- a/drivers/crypto/bcm/spu2.c
+++ b/drivers/crypto/bcm/spu2.c
@@ -964,7 +964,6 @@ u32 spu2_create_request(u8 *spu_hdr,
unsigned int cipher_offset = aead_parms->assoc_size +
aead_parms->aad_pad_len + aead_parms->iv_len;
-#ifdef DEBUG
/* total size of the data following OMD (without STAT word padding) */
unsigned int real_db_size = spu_real_db_size(aead_parms->assoc_size,
aead_parms->iv_len,
@@ -973,7 +972,6 @@ u32 spu2_create_request(u8 *spu_hdr,
aead_parms->aad_pad_len,
aead_parms->data_pad_len,
hash_parms->pad_len);
-#endif
unsigned int assoc_size = aead_parms->assoc_size;
if (req_opts->is_aead &&
@@ -1263,9 +1261,9 @@ void spu2_request_pad(u8 *pad_start, u32 gcm_padding, u32 hash_pad_len,
/* add the size at the end as required per alg */
if (auth_alg == HASH_ALG_MD5)
- *(u64 *)ptr = cpu_to_le64((u64)total_sent * 8);
+ *(__le64 *)ptr = cpu_to_le64(total_sent * 8ull);
else /* SHA1, SHA2-224, SHA2-256 */
- *(u64 *)ptr = cpu_to_be64((u64)total_sent * 8);
+ *(__be64 *)ptr = cpu_to_be64(total_sent * 8ull);
ptr += sizeof(u64);
}
diff --git a/drivers/crypto/bcm/spu2.h b/drivers/crypto/bcm/spu2.h
index 6e666bfb3cfc..a76d4e054466 100644
--- a/drivers/crypto/bcm/spu2.h
+++ b/drivers/crypto/bcm/spu2.h
@@ -73,10 +73,10 @@ enum spu2_ret_md_opts {
/* Fixed Metadata format */
struct SPU2_FMD {
- u64 ctrl0;
- u64 ctrl1;
- u64 ctrl2;
- u64 ctrl3;
+ __le64 ctrl0;
+ __le64 ctrl1;
+ __le64 ctrl2;
+ __le64 ctrl3;
};
#define FMD_SIZE sizeof(struct SPU2_FMD)
diff --git a/drivers/crypto/bcm/spum.h b/drivers/crypto/bcm/spum.h
index 6116ad1dd26e..f062f75808de 100644
--- a/drivers/crypto/bcm/spum.h
+++ b/drivers/crypto/bcm/spum.h
@@ -69,18 +69,18 @@
/* Buffer Descriptor Header [BDESC]. SPU in big-endian mode. */
struct BDESC_HEADER {
- u16 offset_mac; /* word 0 [31-16] */
- u16 length_mac; /* word 0 [15-0] */
- u16 offset_crypto; /* word 1 [31-16] */
- u16 length_crypto; /* word 1 [15-0] */
- u16 offset_icv; /* word 2 [31-16] */
- u16 offset_iv; /* word 2 [15-0] */
+ __be16 offset_mac; /* word 0 [31-16] */
+ __be16 length_mac; /* word 0 [15-0] */
+ __be16 offset_crypto; /* word 1 [31-16] */
+ __be16 length_crypto; /* word 1 [15-0] */
+ __be16 offset_icv; /* word 2 [31-16] */
+ __be16 offset_iv; /* word 2 [15-0] */
};
/* Buffer Data Header [BD]. SPU in big-endian mode. */
struct BD_HEADER {
- u16 size;
- u16 prev_length;
+ __be16 size;
+ __be16 prev_length;
};
/* Command Context Header. SPU-M in big endian mode. */
@@ -144,13 +144,13 @@ struct MHEADER {
/* Generic Mode Security Context Structure [SCTX] */
struct SCTX {
/* word 0: protocol flags */
- u32 proto_flags;
+ __be32 proto_flags;
/* word 1: cipher flags */
- u32 cipher_flags;
+ __be32 cipher_flags;
/* word 2: Extended cipher flags */
- u32 ecf;
+ __be32 ecf;
};
diff --git a/drivers/crypto/bcm/util.c b/drivers/crypto/bcm/util.c
index 2b304fc78059..c4669a96eaec 100644
--- a/drivers/crypto/bcm/util.c
+++ b/drivers/crypto/bcm/util.c
@@ -268,6 +268,7 @@ do_shash_err:
return rc;
}
+#ifdef DEBUG
/* Dump len bytes of a scatterlist starting at skip bytes into the sg */
void __dump_sg(struct scatterlist *sg, unsigned int skip, unsigned int len)
{
@@ -289,6 +290,7 @@ void __dump_sg(struct scatterlist *sg, unsigned int skip, unsigned int len)
if (debug_logging_sleep)
msleep(debug_logging_sleep);
}
+#endif
/* Returns the name for a given cipher alg/mode */
char *spu_alg_name(enum spu_cipher_alg alg, enum spu_cipher_mode mode)
@@ -348,7 +350,7 @@ char *spu_alg_name(enum spu_cipher_alg alg, enum spu_cipher_mode mode)
static ssize_t spu_debugfs_read(struct file *filp, char __user *ubuf,
size_t count, loff_t *offp)
{
- struct device_private *ipriv;
+ struct bcm_device_private *ipriv;
char *buf;
ssize_t ret, out_offset, out_count;
int i;
diff --git a/drivers/crypto/bcm/util.h b/drivers/crypto/bcm/util.h
index a89b2b9c1f52..61c256384816 100644
--- a/drivers/crypto/bcm/util.h
+++ b/drivers/crypto/bcm/util.h
@@ -58,12 +58,26 @@ void __dump_sg(struct scatterlist *sg, unsigned int skip, unsigned int len);
#else /* !DEBUG_ON */
-#define flow_log(...) do {} while (0)
-#define flow_dump(msg, var, var_len) do {} while (0)
-#define packet_log(...) do {} while (0)
-#define packet_dump(msg, var, var_len) do {} while (0)
-
-#define dump_sg(sg, skip, len) do {} while (0)
+static inline void flow_log(const char *format, ...)
+{
+}
+
+static inline void flow_dump(const char *msg, const void *var, size_t var_len)
+{
+}
+
+static inline void packet_log(const char *format, ...)
+{
+}
+
+static inline void packet_dump(const char *msg, const void *var, size_t var_len)
+{
+}
+
+static inline void dump_sg(struct scatterlist *sg, unsigned int skip,
+ unsigned int len)
+{
+}
#endif /* DEBUG_ON */
diff --git a/drivers/crypto/caam/debugfs.c b/drivers/crypto/caam/debugfs.c
index 8ebf18398166..806bb20d2aa1 100644
--- a/drivers/crypto/caam/debugfs.c
+++ b/drivers/crypto/caam/debugfs.c
@@ -19,8 +19,8 @@ static int caam_debugfs_u32_get(void *data, u64 *val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u32_ro, caam_debugfs_u32_get, NULL, "%llu\n");
-DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n");
+DEFINE_DEBUGFS_ATTRIBUTE(caam_fops_u32_ro, caam_debugfs_u32_get, NULL, "%llu\n");
+DEFINE_DEBUGFS_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n");
#ifdef CONFIG_CAAM_QI
/*
diff --git a/drivers/crypto/cavium/cpt/cptvf_main.c b/drivers/crypto/cavium/cpt/cptvf_main.c
index f016448e43bb..112b12a32542 100644
--- a/drivers/crypto/cavium/cpt/cptvf_main.c
+++ b/drivers/crypto/cavium/cpt/cptvf_main.c
@@ -233,10 +233,10 @@ static int alloc_command_queues(struct cpt_vf *cptvf,
c_size = (rem_q_size > qcsize_bytes) ? qcsize_bytes :
rem_q_size;
- curr->head = (u8 *)dma_alloc_coherent(&pdev->dev,
- c_size + CPT_NEXT_CHUNK_PTR_SIZE,
- &curr->dma_addr,
- GFP_KERNEL);
+ curr->head = dma_alloc_coherent(&pdev->dev,
+ c_size + CPT_NEXT_CHUNK_PTR_SIZE,
+ &curr->dma_addr,
+ GFP_KERNEL);
if (!curr->head) {
dev_err(&pdev->dev, "Command Q (%d) chunk (%d) allocation failed\n",
i, queue->nchunks);
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index 476113e12489..cb9b4c4e371e 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -128,6 +128,7 @@ static int sev_cmd_buffer_len(int cmd)
case SEV_CMD_LAUNCH_UPDATE_SECRET: return sizeof(struct sev_data_launch_secret);
case SEV_CMD_DOWNLOAD_FIRMWARE: return sizeof(struct sev_data_download_firmware);
case SEV_CMD_GET_ID: return sizeof(struct sev_data_get_id);
+ case SEV_CMD_ATTESTATION_REPORT: return sizeof(struct sev_data_attestation_report);
default: return 0;
}
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
index cdfee501fbd9..78833491f534 100644
--- a/drivers/crypto/ccree/cc_cipher.c
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -921,7 +921,7 @@ static int cc_cipher_process(struct skcipher_request *req,
return crypto_skcipher_decrypt(subreq);
}
- /* The IV we are handed may be allocted from the stack so
+ /* The IV we are handed may be allocated from the stack so
* we must copy it to a DMAable buffer before use.
*/
req_ctx->iv = kmemdup(iv, ivsize, flags);
diff --git a/drivers/crypto/ccree/cc_driver.h b/drivers/crypto/ccree/cc_driver.h
index 5f1d4602eb8f..f49579aa1452 100644
--- a/drivers/crypto/ccree/cc_driver.h
+++ b/drivers/crypto/ccree/cc_driver.h
@@ -23,7 +23,6 @@
#include <crypto/authenc.h>
#include <crypto/hash.h>
#include <crypto/skcipher.h>
-#include <linux/version.h>
#include <linux/clk.h>
#include <linux/platform_device.h>
diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c
index f4f18bfc2247..4ee010f39912 100644
--- a/drivers/crypto/geode-aes.c
+++ b/drivers/crypto/geode-aes.c
@@ -10,6 +10,7 @@
#include <linux/spinlock.h>
#include <crypto/algapi.h>
#include <crypto/aes.h>
+#include <crypto/internal/cipher.h>
#include <crypto/internal/skcipher.h>
#include <linux/io.h>
@@ -434,3 +435,4 @@ module_pci_driver(geode_aes_driver);
MODULE_AUTHOR("Advanced Micro Devices, Inc.");
MODULE_DESCRIPTION("Geode LX Hardware AES driver");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(CRYPTO_INTERNAL);
diff --git a/drivers/crypto/hisilicon/hpre/hpre.h b/drivers/crypto/hisilicon/hpre/hpre.h
index f69252b24671..181c109b19f7 100644
--- a/drivers/crypto/hisilicon/hpre/hpre.h
+++ b/drivers/crypto/hisilicon/hpre/hpre.h
@@ -14,8 +14,7 @@ enum {
HPRE_CLUSTER0,
HPRE_CLUSTER1,
HPRE_CLUSTER2,
- HPRE_CLUSTER3,
- HPRE_CLUSTERS_NUM,
+ HPRE_CLUSTER3
};
enum hpre_ctrl_dbgfs_file {
@@ -36,7 +35,10 @@ enum hpre_dfx_dbgfs_file {
HPRE_DFX_FILE_NUM
};
-#define HPRE_DEBUGFS_FILE_NUM (HPRE_DEBUG_FILE_NUM + HPRE_CLUSTERS_NUM - 1)
+#define HPRE_CLUSTERS_NUM_V2 (HPRE_CLUSTER3 + 1)
+#define HPRE_CLUSTERS_NUM_V3 1
+#define HPRE_CLUSTERS_NUM_MAX HPRE_CLUSTERS_NUM_V2
+#define HPRE_DEBUGFS_FILE_NUM (HPRE_DEBUG_FILE_NUM + HPRE_CLUSTERS_NUM_MAX - 1)
struct hpre_debugfs_file {
int index;
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
index e5c991913f09..e7a2c70eb9cf 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/topology.h>
+#include <linux/uacce.h>
#include "hpre.h"
#define HPRE_QUEUE_NUM_V2 1024
@@ -29,13 +30,14 @@
#define HPRE_BD_ARUSR_CFG 0x301030
#define HPRE_BD_AWUSR_CFG 0x301034
#define HPRE_TYPES_ENB 0x301038
+#define HPRE_RSA_ENB BIT(0)
+#define HPRE_ECC_ENB BIT(1)
#define HPRE_DATA_RUSER_CFG 0x30103c
#define HPRE_DATA_WUSER_CFG 0x301040
#define HPRE_INT_MASK 0x301400
#define HPRE_INT_STATUS 0x301800
#define HPRE_CORE_INT_ENABLE 0
#define HPRE_CORE_INT_DISABLE 0x003fffff
-#define HPRE_RAS_ECC_1BIT_TH 0x30140c
#define HPRE_RDCHN_INI_ST 0x301a00
#define HPRE_CLSTR_BASE 0x302000
#define HPRE_CORE_EN_OFFSET 0x04
@@ -45,7 +47,7 @@
#define HPRE_CORE_IS_SCHD_OFFSET 0x90
#define HPRE_RAS_CE_ENB 0x301410
-#define HPRE_HAC_RAS_CE_ENABLE 0x1
+#define HPRE_HAC_RAS_CE_ENABLE (BIT(0) | BIT(22) | BIT(23))
#define HPRE_RAS_NFE_ENB 0x301414
#define HPRE_HAC_RAS_NFE_ENABLE 0x3ffffe
#define HPRE_RAS_FE_ENB 0x301418
@@ -73,7 +75,8 @@
#define HPRE_QM_AXI_CFG_MASK 0xffff
#define HPRE_QM_VFG_AX_MASK 0xff
#define HPRE_BD_USR_MASK 0x3
-#define HPRE_CLUSTER_CORE_MASK 0xf
+#define HPRE_CLUSTER_CORE_MASK_V2 0xf
+#define HPRE_CLUSTER_CORE_MASK_V3 0xff
#define HPRE_AM_OOO_SHUTDOWN_ENB 0x301044
#define HPRE_AM_OOO_SHUTDOWN_ENABLE BIT(0)
@@ -86,6 +89,11 @@
#define HPRE_QM_PM_FLR BIT(11)
#define HPRE_QM_SRIOV_FLR BIT(12)
+#define HPRE_CLUSTERS_NUM(qm) \
+ (((qm)->ver >= QM_HW_V3) ? HPRE_CLUSTERS_NUM_V3 : HPRE_CLUSTERS_NUM_V2)
+#define HPRE_CLUSTER_CORE_MASK(qm) \
+ (((qm)->ver >= QM_HW_V3) ? HPRE_CLUSTER_CORE_MASK_V3 :\
+ HPRE_CLUSTER_CORE_MASK_V2)
#define HPRE_VIA_MSI_DSM 1
#define HPRE_SQE_MASK_OFFSET 8
#define HPRE_SQE_MASK_LEN 24
@@ -129,7 +137,11 @@ static const struct hpre_hw_error hpre_hw_errors[] = {
{ .int_msk = BIT(9), .msg = "cluster4_shb_timeout_int_set" },
{ .int_msk = GENMASK(15, 10), .msg = "ooo_rdrsp_err_int_set" },
{ .int_msk = GENMASK(21, 16), .msg = "ooo_wrrsp_err_int_set" },
- { /* sentinel */ }
+ { .int_msk = BIT(22), .msg = "pt_rng_timeout_int_set"},
+ { .int_msk = BIT(23), .msg = "sva_fsm_timeout_int_set"},
+ {
+ /* sentinel */
+ }
};
static const u64 hpre_cluster_offsets[] = {
@@ -178,6 +190,19 @@ static const char *hpre_dfx_files[HPRE_DFX_FILE_NUM] = {
"invalid_req_cnt"
};
+static const struct kernel_param_ops hpre_uacce_mode_ops = {
+ .set = uacce_mode_set,
+ .get = param_get_int,
+};
+
+/*
+ * uacce_mode = 0 means hpre only register to crypto,
+ * uacce_mode = 1 means hpre both register to crypto and uacce.
+ */
+static u32 uacce_mode = UACCE_MODE_NOUACCE;
+module_param_cb(uacce_mode, &hpre_uacce_mode_ops, &uacce_mode, 0444);
+MODULE_PARM_DESC(uacce_mode, UACCE_MODE_DESC);
+
static int pf_q_num_set(const char *val, const struct kernel_param *kp)
{
return q_num_set(val, kp, HPRE_PCI_DEVICE_ID);
@@ -214,6 +239,30 @@ struct hisi_qp *hpre_create_qp(void)
return NULL;
}
+static void hpre_pasid_enable(struct hisi_qm *qm)
+{
+ u32 val;
+
+ val = readl_relaxed(qm->io_base + HPRE_DATA_RUSER_CFG);
+ val |= BIT(HPRE_PASID_EN_BIT);
+ writel_relaxed(val, qm->io_base + HPRE_DATA_RUSER_CFG);
+ val = readl_relaxed(qm->io_base + HPRE_DATA_WUSER_CFG);
+ val |= BIT(HPRE_PASID_EN_BIT);
+ writel_relaxed(val, qm->io_base + HPRE_DATA_WUSER_CFG);
+}
+
+static void hpre_pasid_disable(struct hisi_qm *qm)
+{
+ u32 val;
+
+ val = readl_relaxed(qm->io_base + HPRE_DATA_RUSER_CFG);
+ val &= ~BIT(HPRE_PASID_EN_BIT);
+ writel_relaxed(val, qm->io_base + HPRE_DATA_RUSER_CFG);
+ val = readl_relaxed(qm->io_base + HPRE_DATA_WUSER_CFG);
+ val &= ~BIT(HPRE_PASID_EN_BIT);
+ writel_relaxed(val, qm->io_base + HPRE_DATA_WUSER_CFG);
+}
+
static int hpre_cfg_by_dsm(struct hisi_qm *qm)
{
struct device *dev = &qm->pdev->dev;
@@ -238,8 +287,40 @@ static int hpre_cfg_by_dsm(struct hisi_qm *qm)
return 0;
}
+static int hpre_set_cluster(struct hisi_qm *qm)
+{
+ u32 cluster_core_mask = HPRE_CLUSTER_CORE_MASK(qm);
+ u8 clusters_num = HPRE_CLUSTERS_NUM(qm);
+ struct device *dev = &qm->pdev->dev;
+ unsigned long offset;
+ u32 val = 0;
+ int ret, i;
+
+ for (i = 0; i < clusters_num; i++) {
+ offset = i * HPRE_CLSTR_ADDR_INTRVL;
+
+ /* clusters initiating */
+ writel(cluster_core_mask,
+ HPRE_ADDR(qm, offset + HPRE_CORE_ENB));
+ writel(0x1, HPRE_ADDR(qm, offset + HPRE_CORE_INI_CFG));
+ ret = readl_relaxed_poll_timeout(HPRE_ADDR(qm, offset +
+ HPRE_CORE_INI_STATUS), val,
+ ((val & cluster_core_mask) ==
+ cluster_core_mask),
+ HPRE_REG_RD_INTVRL_US,
+ HPRE_REG_RD_TMOUT_US);
+ if (ret) {
+ dev_err(dev,
+ "cluster %d int st status timeout!\n", i);
+ return -ETIMEDOUT;
+ }
+ }
+
+ return 0;
+}
+
/*
- * For Hi1620, we shoul disable FLR triggered by hardware (BME/PM/SRIOV).
+ * For Kunpeng 920, we shoul disable FLR triggered by hardware (BME/PM/SRIOV).
* Or it may stay in D3 state when we bind and unbind hpre quickly,
* as it does FLR triggered by hardware.
*/
@@ -257,9 +338,8 @@ static void disable_flr_of_bme(struct hisi_qm *qm)
static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
{
struct device *dev = &qm->pdev->dev;
- unsigned long offset;
- int ret, i;
u32 val;
+ int ret;
writel(HPRE_QM_USR_CFG_MASK, HPRE_ADDR(qm, QM_ARUSER_M_CFG_ENABLE));
writel(HPRE_QM_USR_CFG_MASK, HPRE_ADDR(qm, QM_AWUSER_M_CFG_ENABLE));
@@ -270,11 +350,15 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
val |= BIT(HPRE_TIMEOUT_ABNML_BIT);
writel_relaxed(val, HPRE_ADDR(qm, HPRE_QM_ABNML_INT_MASK));
- writel(0x1, HPRE_ADDR(qm, HPRE_TYPES_ENB));
+ if (qm->ver >= QM_HW_V3)
+ writel(HPRE_RSA_ENB | HPRE_ECC_ENB,
+ HPRE_ADDR(qm, HPRE_TYPES_ENB));
+ else
+ writel(HPRE_RSA_ENB, HPRE_ADDR(qm, HPRE_TYPES_ENB));
+
writel(HPRE_QM_VFG_AX_MASK, HPRE_ADDR(qm, HPRE_VFG_AXCACHE));
writel(0x0, HPRE_ADDR(qm, HPRE_BD_ENDIAN));
writel(0x0, HPRE_ADDR(qm, HPRE_INT_MASK));
- writel(0x0, HPRE_ADDR(qm, HPRE_RAS_ECC_1BIT_TH));
writel(0x0, HPRE_ADDR(qm, HPRE_POISON_BYPASS));
writel(0x0, HPRE_ADDR(qm, HPRE_COMM_CNT_CLR_CE));
writel(0x0, HPRE_ADDR(qm, HPRE_ECC_BYPASS));
@@ -291,37 +375,29 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
return -ETIMEDOUT;
}
- for (i = 0; i < HPRE_CLUSTERS_NUM; i++) {
- offset = i * HPRE_CLSTR_ADDR_INTRVL;
+ ret = hpre_set_cluster(qm);
+ if (ret)
+ return -ETIMEDOUT;
- /* clusters initiating */
- writel(HPRE_CLUSTER_CORE_MASK,
- HPRE_ADDR(qm, offset + HPRE_CORE_ENB));
- writel(0x1, HPRE_ADDR(qm, offset + HPRE_CORE_INI_CFG));
- ret = readl_relaxed_poll_timeout(HPRE_ADDR(qm, offset +
- HPRE_CORE_INI_STATUS), val,
- ((val & HPRE_CLUSTER_CORE_MASK) ==
- HPRE_CLUSTER_CORE_MASK),
- HPRE_REG_RD_INTVRL_US,
- HPRE_REG_RD_TMOUT_US);
- if (ret) {
- dev_err(dev,
- "cluster %d int st status timeout!\n", i);
- return -ETIMEDOUT;
- }
- }
+ /* This setting is only needed by Kunpeng 920. */
+ if (qm->ver == QM_HW_V2) {
+ ret = hpre_cfg_by_dsm(qm);
+ if (ret)
+ dev_err(dev, "acpi_evaluate_dsm err.\n");
- ret = hpre_cfg_by_dsm(qm);
- if (ret)
- dev_err(dev, "acpi_evaluate_dsm err.\n");
+ disable_flr_of_bme(qm);
- disable_flr_of_bme(qm);
+ /* Enable data buffer pasid */
+ if (qm->use_sva)
+ hpre_pasid_enable(qm);
+ }
return ret;
}
static void hpre_cnt_regs_clear(struct hisi_qm *qm)
{
+ u8 clusters_num = HPRE_CLUSTERS_NUM(qm);
unsigned long offset;
int i;
@@ -330,7 +406,7 @@ static void hpre_cnt_regs_clear(struct hisi_qm *qm)
writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF);
/* clear clusterX/cluster_ctrl */
- for (i = 0; i < HPRE_CLUSTERS_NUM; i++) {
+ for (i = 0; i < clusters_num; i++) {
offset = HPRE_CLSTR_BASE + i * HPRE_CLSTR_ADDR_INTRVL;
writel(0x0, qm->io_base + offset + HPRE_CLUSTER_INQURY);
}
@@ -629,13 +705,14 @@ static int hpre_pf_comm_regs_debugfs_init(struct hisi_qm *qm)
static int hpre_cluster_debugfs_init(struct hisi_qm *qm)
{
+ u8 clusters_num = HPRE_CLUSTERS_NUM(qm);
struct device *dev = &qm->pdev->dev;
char buf[HPRE_DBGFS_VAL_MAX_LEN];
struct debugfs_regset32 *regset;
struct dentry *tmp_d;
int i, ret;
- for (i = 0; i < HPRE_CLUSTERS_NUM; i++) {
+ for (i = 0; i < clusters_num; i++) {
ret = snprintf(buf, HPRE_DBGFS_VAL_MAX_LEN, "cluster%d", i);
if (ret < 0)
return -EINVAL;
@@ -734,6 +811,11 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
return -EINVAL;
}
+ if (pdev->revision >= QM_HW_V3)
+ qm->algs = "rsa\ndh\necdh\nx25519\nx448\necdsa\nsm2\n";
+ else
+ qm->algs = "rsa\ndh\n";
+ qm->mode = uacce_mode;
qm->pdev = pdev;
qm->ver = pdev->revision;
qm->sqe_size = HPRE_SQE_SIZE;
@@ -799,6 +881,7 @@ static const struct hisi_qm_err_ini hpre_err_ini = {
.fe = 0,
.ecc_2bits_mask = HPRE_CORE_ECC_2BIT_ERR |
HPRE_OOO_ECC_2BIT_ERR,
+ .dev_ce_mask = HPRE_HAC_RAS_CE_ENABLE,
.msi_wr_port = HPRE_WR_MSI_PORT,
.acpi_rst = "HRST",
}
@@ -872,6 +955,14 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_with_qm_start;
}
+ if (qm->uacce) {
+ ret = uacce_register(qm->uacce);
+ if (ret) {
+ pci_err(pdev, "failed to register uacce (%d)!\n", ret);
+ goto err_with_alg_register;
+ }
+ }
+
if (qm->fun_type == QM_HW_PF && vfs_num) {
ret = hisi_qm_sriov_enable(pdev, vfs_num);
if (ret < 0)
@@ -904,20 +995,24 @@ static void hpre_remove(struct pci_dev *pdev)
hisi_qm_wait_task_finish(qm, &hpre_devices);
hisi_qm_alg_unregister(qm, &hpre_devices);
if (qm->fun_type == QM_HW_PF && qm->vfs_num) {
- ret = hisi_qm_sriov_disable(pdev, qm->is_frozen);
+ ret = hisi_qm_sriov_disable(pdev, true);
if (ret) {
pci_err(pdev, "Disable SRIOV fail!\n");
return;
}
}
+
+ hpre_debugfs_exit(qm);
+ hisi_qm_stop(qm, QM_NORMAL);
+
if (qm->fun_type == QM_HW_PF) {
+ if (qm->use_sva && qm->ver == QM_HW_V2)
+ hpre_pasid_disable(qm);
hpre_cnt_regs_clear(qm);
qm->debug.curr_qm_qp_num = 0;
+ hisi_qm_dev_err_uninit(qm);
}
- hpre_debugfs_exit(qm);
- hisi_qm_stop(qm, QM_NORMAL);
- hisi_qm_dev_err_uninit(qm);
hisi_qm_uninit(qm);
}
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index f21ccae0e8ea..13cb4216561a 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -54,6 +54,8 @@
#define QM_SQ_PRIORITY_SHIFT 0
#define QM_SQ_ORDERS_SHIFT 4
#define QM_SQ_TYPE_SHIFT 8
+#define QM_QC_PASID_ENABLE 0x1
+#define QM_QC_PASID_ENABLE_SHIFT 7
#define QM_SQ_TYPE_MASK GENMASK(3, 0)
#define QM_SQ_TAIL_IDX(sqc) ((le16_to_cpu((sqc)->w11) >> 6) & 0x1)
@@ -120,7 +122,7 @@
#define QM_CQC_VFT_VALID (1ULL << 28)
#define QM_SQC_VFT_BASE_SHIFT_V2 28
-#define QM_SQC_VFT_BASE_MASK_V2 GENMASK(5, 0)
+#define QM_SQC_VFT_BASE_MASK_V2 GENMASK(15, 0)
#define QM_SQC_VFT_NUM_SHIFT_V2 45
#define QM_SQC_VFT_NUM_MASK_v2 GENMASK(9, 0)
@@ -147,7 +149,6 @@
#define QM_RAS_CE_TIMES_PER_IRQ 1
#define QM_RAS_MSI_INT_SEL 0x1040f4
-#define QM_DEV_RESET_FLAG 0
#define QM_RESET_WAIT_TIMEOUT 400
#define QM_PEH_VENDOR_ID 0x1000d8
#define ACC_VENDOR_ID_VALUE 0x5a5a
@@ -185,6 +186,10 @@
#define QM_SQE_ADDR_MASK GENMASK(7, 0)
#define QM_EQ_DEPTH (1024 * 2)
+#define QM_DRIVER_REMOVING 0
+#define QM_RST_SCHED 1
+#define QM_RESETTING 2
+
#define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \
(((hop_num) << QM_CQ_HOP_NUM_SHIFT) | \
((pg_sz) << QM_CQ_PAGE_SIZE_SHIFT) | \
@@ -619,6 +624,9 @@ static void qm_cq_head_update(struct hisi_qp *qp)
static void qm_poll_qp(struct hisi_qp *qp, struct hisi_qm *qm)
{
+ if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP))
+ return;
+
if (qp->event_cb) {
qp->event_cb(qp);
return;
@@ -717,7 +725,7 @@ static irqreturn_t qm_aeq_irq(int irq, void *data)
dev_err(&qm->pdev->dev, "%s overflow\n",
qm_fifo_overflow[type]);
else
- dev_err(&qm->pdev->dev, "unknown error type %d\n",
+ dev_err(&qm->pdev->dev, "unknown error type %u\n",
type);
if (qm->status.aeq_head == QM_Q_DEPTH - 1) {
@@ -1121,7 +1129,7 @@ static int dump_show(struct hisi_qm *qm, void *info,
dev_info(dev, "%s DUMP\n", info_name);
for (i = 0; i < info_size; i += BYTE_PER_DW) {
- pr_info("DW%d: %02X%02X %02X%02X\n", i / BYTE_PER_DW,
+ pr_info("DW%u: %02X%02X %02X%02X\n", i / BYTE_PER_DW,
info_buf[i], info_buf[i + 1UL],
info_buf[i + 2UL], info_buf[i + 3UL]);
}
@@ -1154,7 +1162,7 @@ static int qm_sqc_dump(struct hisi_qm *qm, const char *s)
ret = kstrtou32(s, 0, &qp_id);
if (ret || qp_id >= qm->qp_num) {
- dev_err(dev, "Please input qp num (0-%d)", qm->qp_num - 1);
+ dev_err(dev, "Please input qp num (0-%u)", qm->qp_num - 1);
return -EINVAL;
}
@@ -1200,7 +1208,7 @@ static int qm_cqc_dump(struct hisi_qm *qm, const char *s)
ret = kstrtou32(s, 0, &qp_id);
if (ret || qp_id >= qm->qp_num) {
- dev_err(dev, "Please input qp num (0-%d)", qm->qp_num - 1);
+ dev_err(dev, "Please input qp num (0-%u)", qm->qp_num - 1);
return -EINVAL;
}
@@ -1279,7 +1287,7 @@ static int q_dump_param_parse(struct hisi_qm *qm, char *s,
ret = kstrtou32(presult, 0, q_id);
if (ret || *q_id >= qp_num) {
- dev_err(dev, "Please input qp num (0-%d)", qp_num - 1);
+ dev_err(dev, "Please input qp num (0-%u)", qp_num - 1);
return -EINVAL;
}
@@ -1604,7 +1612,7 @@ static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status)
static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm)
{
- u32 error_status, tmp;
+ u32 error_status, tmp, val;
/* read err sts */
tmp = readl(qm->io_base + QM_ABNORMAL_INT_STATUS);
@@ -1615,9 +1623,13 @@ static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm)
qm->err_status.is_qm_ecc_mbit = true;
qm_log_hw_error(qm, error_status);
- if (error_status == QM_DB_RANDOM_INVALID) {
+ val = error_status | QM_DB_RANDOM_INVALID | QM_BASE_CE;
+ /* ce error does not need to be reset */
+ if (val == (QM_DB_RANDOM_INVALID | QM_BASE_CE)) {
writel(error_status, qm->io_base +
QM_ABNORMAL_INT_SOURCE);
+ writel(qm->err_ini->err_info.nfe,
+ qm->io_base + QM_RAS_NFE_ENABLE);
return ACC_ERR_RECOVERED;
}
@@ -1685,6 +1697,7 @@ static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type)
qp->req_cb = NULL;
qp->qp_id = qp_id;
qp->alg_type = alg_type;
+ qp->is_in_kernel = true;
qm->qp_in_used++;
atomic_set(&qp->qp_status.flags, QP_INIT);
@@ -1747,12 +1760,6 @@ static int qm_sq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
sqc = kzalloc(sizeof(struct qm_sqc), GFP_KERNEL);
if (!sqc)
return -ENOMEM;
- sqc_dma = dma_map_single(dev, sqc, sizeof(struct qm_sqc),
- DMA_TO_DEVICE);
- if (dma_mapping_error(dev, sqc_dma)) {
- kfree(sqc);
- return -ENOMEM;
- }
INIT_QC_COMMON(sqc, qp->sqe_dma, pasid);
if (ver == QM_HW_V1) {
@@ -1765,6 +1772,17 @@ static int qm_sq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
sqc->cq_num = cpu_to_le16(qp_id);
sqc->w13 = cpu_to_le16(QM_MK_SQC_W13(0, 1, qp->alg_type));
+ if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel)
+ sqc->w11 = cpu_to_le16(QM_QC_PASID_ENABLE <<
+ QM_QC_PASID_ENABLE_SHIFT);
+
+ sqc_dma = dma_map_single(dev, sqc, sizeof(struct qm_sqc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, sqc_dma)) {
+ kfree(sqc);
+ return -ENOMEM;
+ }
+
ret = qm_mb(qm, QM_MB_CMD_SQC, sqc_dma, qp_id, 0);
dma_unmap_single(dev, sqc_dma, sizeof(struct qm_sqc), DMA_TO_DEVICE);
kfree(sqc);
@@ -1784,12 +1802,6 @@ static int qm_cq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
cqc = kzalloc(sizeof(struct qm_cqc), GFP_KERNEL);
if (!cqc)
return -ENOMEM;
- cqc_dma = dma_map_single(dev, cqc, sizeof(struct qm_cqc),
- DMA_TO_DEVICE);
- if (dma_mapping_error(dev, cqc_dma)) {
- kfree(cqc);
- return -ENOMEM;
- }
INIT_QC_COMMON(cqc, qp->cqe_dma, pasid);
if (ver == QM_HW_V1) {
@@ -1802,6 +1814,16 @@ static int qm_cq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
}
cqc->dw6 = cpu_to_le32(1 << QM_CQ_PHASE_SHIFT | 1 << QM_CQ_FLAG_SHIFT);
+ if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel)
+ cqc->w11 = cpu_to_le16(QM_QC_PASID_ENABLE);
+
+ cqc_dma = dma_map_single(dev, cqc, sizeof(struct qm_cqc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, cqc_dma)) {
+ kfree(cqc);
+ return -ENOMEM;
+ }
+
ret = qm_mb(qm, QM_MB_CMD_CQC, cqc_dma, qp_id, 0);
dma_unmap_single(dev, cqc_dma, sizeof(struct qm_cqc), DMA_TO_DEVICE);
kfree(cqc);
@@ -1865,6 +1887,28 @@ int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg)
EXPORT_SYMBOL_GPL(hisi_qm_start_qp);
/**
+ * qp_stop_fail_cb() - call request cb.
+ * @qp: stopped failed qp.
+ *
+ * Callback function should be called whether task completed or not.
+ */
+static void qp_stop_fail_cb(struct hisi_qp *qp)
+{
+ int qp_used = atomic_read(&qp->qp_status.used);
+ u16 cur_tail = qp->qp_status.sq_tail;
+ u16 cur_head = (cur_tail + QM_Q_DEPTH - qp_used) % QM_Q_DEPTH;
+ struct hisi_qm *qm = qp->qm;
+ u16 pos;
+ int i;
+
+ for (i = 0; i < qp_used; i++) {
+ pos = (i + cur_head) % QM_Q_DEPTH;
+ qp->req_cb(qp, qp->sqe + (u32)(qm->sqe_size * pos));
+ atomic_dec(&qp->qp_status.used);
+ }
+}
+
+/**
* qm_drain_qp() - Drain a qp.
* @qp: The qp we want to drain.
*
@@ -1959,6 +2003,9 @@ static int qm_stop_qp_nolock(struct hisi_qp *qp)
else
flush_work(&qp->qm->work);
+ if (unlikely(qp->is_resetting && atomic_read(&qp->qp_status.used)))
+ qp_stop_fail_cb(qp);
+
dev_dbg(dev, "stop queue %u!", qp->qp_id);
return 0;
@@ -2065,6 +2112,7 @@ static int hisi_qm_uacce_get_queue(struct uacce_device *uacce,
qp->uacce_q = q;
qp->event_cb = qm_qp_event_notifier;
qp->pasid = arg;
+ qp->is_in_kernel = false;
return 0;
}
@@ -2206,7 +2254,7 @@ static int qm_alloc_uacce(struct hisi_qm *qm)
if (IS_ERR(uacce))
return PTR_ERR(uacce);
- if (uacce->flags & UACCE_DEV_SVA) {
+ if (uacce->flags & UACCE_DEV_SVA && qm->mode == UACCE_MODE_SVA) {
qm->use_sva = true;
} else {
/* only consider sva case */
@@ -2248,17 +2296,15 @@ static int qm_alloc_uacce(struct hisi_qm *qm)
*/
static int qm_frozen(struct hisi_qm *qm)
{
- down_write(&qm->qps_lock);
-
- if (qm->is_frozen) {
- up_write(&qm->qps_lock);
+ if (test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl))
return 0;
- }
+
+ down_write(&qm->qps_lock);
if (!qm->qp_in_used) {
qm->qp_in_used = qm->qp_num;
- qm->is_frozen = true;
up_write(&qm->qps_lock);
+ set_bit(QM_DRIVER_REMOVING, &qm->misc_ctl);
return 0;
}
@@ -2311,6 +2357,10 @@ void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
msleep(WAIT_PERIOD);
}
+ while (test_bit(QM_RST_SCHED, &qm->misc_ctl) ||
+ test_bit(QM_RESETTING, &qm->misc_ctl))
+ msleep(WAIT_PERIOD);
+
udelay(REMOVE_WAIT_DELAY);
}
EXPORT_SYMBOL_GPL(hisi_qm_wait_task_finish);
@@ -2439,7 +2489,7 @@ static void hisi_qm_pre_init(struct hisi_qm *qm)
mutex_init(&qm->mailbox_lock);
init_rwsem(&qm->qps_lock);
qm->qp_in_used = 0;
- qm->is_frozen = false;
+ qm->misc_ctl = false;
}
static void hisi_qm_pci_uninit(struct hisi_qm *qm)
@@ -2558,15 +2608,9 @@ static int qm_eq_ctx_cfg(struct hisi_qm *qm)
dma_addr_t eqc_dma;
int ret;
- eqc = kzalloc(sizeof(struct qm_eqc), GFP_KERNEL); //todo
+ eqc = kzalloc(sizeof(struct qm_eqc), GFP_KERNEL);
if (!eqc)
return -ENOMEM;
- eqc_dma = dma_map_single(dev, eqc, sizeof(struct qm_eqc),
- DMA_TO_DEVICE);
- if (dma_mapping_error(dev, eqc_dma)) {
- kfree(eqc);
- return -ENOMEM;
- }
eqc->base_l = cpu_to_le32(lower_32_bits(qm->eqe_dma));
eqc->base_h = cpu_to_le32(upper_32_bits(qm->eqe_dma));
@@ -2574,6 +2618,13 @@ static int qm_eq_ctx_cfg(struct hisi_qm *qm)
eqc->dw3 = cpu_to_le32(QM_EQE_AEQE_SIZE);
eqc->dw6 = cpu_to_le32((QM_EQ_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT));
+ eqc_dma = dma_map_single(dev, eqc, sizeof(struct qm_eqc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, eqc_dma)) {
+ kfree(eqc);
+ return -ENOMEM;
+ }
+
ret = qm_mb(qm, QM_MB_CMD_EQC, eqc_dma, 0, 0);
dma_unmap_single(dev, eqc_dma, sizeof(struct qm_eqc), DMA_TO_DEVICE);
kfree(eqc);
@@ -2591,6 +2642,11 @@ static int qm_aeq_ctx_cfg(struct hisi_qm *qm)
aeqc = kzalloc(sizeof(struct qm_aeqc), GFP_KERNEL);
if (!aeqc)
return -ENOMEM;
+
+ aeqc->base_l = cpu_to_le32(lower_32_bits(qm->aeqe_dma));
+ aeqc->base_h = cpu_to_le32(upper_32_bits(qm->aeqe_dma));
+ aeqc->dw6 = cpu_to_le32((QM_Q_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT));
+
aeqc_dma = dma_map_single(dev, aeqc, sizeof(struct qm_aeqc),
DMA_TO_DEVICE);
if (dma_mapping_error(dev, aeqc_dma)) {
@@ -2598,10 +2654,6 @@ static int qm_aeq_ctx_cfg(struct hisi_qm *qm)
return -ENOMEM;
}
- aeqc->base_l = cpu_to_le32(lower_32_bits(qm->aeqe_dma));
- aeqc->base_h = cpu_to_le32(upper_32_bits(qm->aeqe_dma));
- aeqc->dw6 = cpu_to_le32((QM_Q_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT));
-
ret = qm_mb(qm, QM_MB_CMD_AEQC, aeqc_dma, 0, 0);
dma_unmap_single(dev, aeqc_dma, sizeof(struct qm_aeqc), DMA_TO_DEVICE);
kfree(aeqc);
@@ -2677,7 +2729,7 @@ int hisi_qm_start(struct hisi_qm *qm)
return -EPERM;
}
- dev_dbg(dev, "qm start with %d queue pairs\n", qm->qp_num);
+ dev_dbg(dev, "qm start with %u queue pairs\n", qm->qp_num);
if (!qm->qp_num) {
dev_err(dev, "qp_num should not be 0\n");
@@ -3112,7 +3164,7 @@ int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num,
mutex_unlock(&qm_list->lock);
if (ret)
- pr_info("Failed to create qps, node[%d], alg[%d], qp[%d]!\n",
+ pr_info("Failed to create qps, node[%d], alg[%u], qp[%d]!\n",
node, alg_type, qp_num);
err:
@@ -3248,7 +3300,7 @@ EXPORT_SYMBOL_GPL(hisi_qm_sriov_disable);
int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
if (num_vfs == 0)
- return hisi_qm_sriov_disable(pdev, 0);
+ return hisi_qm_sriov_disable(pdev, false);
else
return hisi_qm_sriov_enable(pdev, num_vfs);
}
@@ -3269,12 +3321,19 @@ static enum acc_err_result qm_dev_err_handle(struct hisi_qm *qm)
if (err_sts & qm->err_ini->err_info.ecc_2bits_mask)
qm->err_status.is_dev_ecc_mbit = true;
- if (!qm->err_ini->log_dev_hw_err) {
- dev_err(&qm->pdev->dev, "Device doesn't support log hw error!\n");
- return ACC_ERR_NEED_RESET;
+ if (qm->err_ini->log_dev_hw_err)
+ qm->err_ini->log_dev_hw_err(qm, err_sts);
+
+ /* ce error does not need to be reset */
+ if ((err_sts | qm->err_ini->err_info.dev_ce_mask) ==
+ qm->err_ini->err_info.dev_ce_mask) {
+ if (qm->err_ini->clear_dev_hw_err_status)
+ qm->err_ini->clear_dev_hw_err_status(qm,
+ err_sts);
+
+ return ACC_ERR_RECOVERED;
}
- qm->err_ini->log_dev_hw_err(qm, err_sts);
return ACC_ERR_NEED_RESET;
}
@@ -3313,7 +3372,7 @@ pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev,
if (pdev->is_virtfn)
return PCI_ERS_RESULT_NONE;
- pci_info(pdev, "PCI error detected, state(=%d)!!\n", state);
+ pci_info(pdev, "PCI error detected, state(=%u)!!\n", state);
if (state == pci_channel_io_perm_failure)
return PCI_ERS_RESULT_DISCONNECT;
@@ -3465,7 +3524,7 @@ static int qm_reset_prepare_ready(struct hisi_qm *qm)
int delay = 0;
/* All reset requests need to be queued for processing */
- while (test_and_set_bit(QM_DEV_RESET_FLAG, &pf_qm->reset_flag)) {
+ while (test_and_set_bit(QM_RESETTING, &pf_qm->misc_ctl)) {
msleep(++delay);
if (delay > QM_RESET_WAIT_TIMEOUT)
return -EBUSY;
@@ -3489,6 +3548,7 @@ static int qm_controller_reset_prepare(struct hisi_qm *qm)
ret = qm_vf_reset_prepare(qm, QM_SOFT_RESET);
if (ret) {
pci_err(pdev, "Fails to stop VFs!\n");
+ clear_bit(QM_RESETTING, &qm->misc_ctl);
return ret;
}
}
@@ -3496,9 +3556,12 @@ static int qm_controller_reset_prepare(struct hisi_qm *qm)
ret = hisi_qm_stop(qm, QM_SOFT_RESET);
if (ret) {
pci_err(pdev, "Fails to stop QM!\n");
+ clear_bit(QM_RESETTING, &qm->misc_ctl);
return ret;
}
+ clear_bit(QM_RST_SCHED, &qm->misc_ctl);
+
return 0;
}
@@ -3736,7 +3799,7 @@ static int qm_controller_reset_done(struct hisi_qm *qm)
hisi_qm_dev_err_init(qm);
qm_restart_done(qm);
- clear_bit(QM_DEV_RESET_FLAG, &qm->reset_flag);
+ clear_bit(QM_RESETTING, &qm->misc_ctl);
return 0;
}
@@ -3749,18 +3812,23 @@ static int qm_controller_reset(struct hisi_qm *qm)
pci_info(pdev, "Controller resetting...\n");
ret = qm_controller_reset_prepare(qm);
- if (ret)
+ if (ret) {
+ clear_bit(QM_RST_SCHED, &qm->misc_ctl);
return ret;
+ }
ret = qm_soft_reset(qm);
if (ret) {
pci_err(pdev, "Controller reset failed (%d)\n", ret);
+ clear_bit(QM_RESETTING, &qm->misc_ctl);
return ret;
}
ret = qm_controller_reset_done(qm);
- if (ret)
+ if (ret) {
+ clear_bit(QM_RESETTING, &qm->misc_ctl);
return ret;
+ }
pci_info(pdev, "Controller reset complete\n");
@@ -3867,8 +3935,6 @@ static bool qm_flr_reset_complete(struct pci_dev *pdev)
return false;
}
- clear_bit(QM_DEV_RESET_FLAG, &qm->reset_flag);
-
return true;
}
@@ -3912,6 +3978,8 @@ void hisi_qm_reset_done(struct pci_dev *pdev)
flr_done:
if (qm_flr_reset_complete(pdev))
pci_info(pdev, "FLR reset complete\n");
+
+ clear_bit(QM_RESETTING, &qm->misc_ctl);
}
EXPORT_SYMBOL_GPL(hisi_qm_reset_done);
@@ -3922,7 +3990,9 @@ static irqreturn_t qm_abnormal_irq(int irq, void *data)
atomic64_inc(&qm->debug.dfx.abnormal_irq_cnt);
ret = qm_process_dev_error(qm);
- if (ret == ACC_ERR_NEED_RESET)
+ if (ret == ACC_ERR_NEED_RESET &&
+ !test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl) &&
+ !test_and_set_bit(QM_RST_SCHED, &qm->misc_ctl))
schedule_work(&qm->rst_work);
return IRQ_HANDLED;
@@ -3934,21 +4004,20 @@ static int qm_irq_register(struct hisi_qm *qm)
int ret;
ret = request_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR),
- qm_irq, IRQF_SHARED, qm->dev_name, qm);
+ qm_irq, 0, qm->dev_name, qm);
if (ret)
return ret;
if (qm->ver != QM_HW_V1) {
ret = request_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR),
- qm_aeq_irq, IRQF_SHARED, qm->dev_name, qm);
+ qm_aeq_irq, 0, qm->dev_name, qm);
if (ret)
goto err_aeq_irq;
if (qm->fun_type == QM_HW_PF) {
ret = request_irq(pci_irq_vector(pdev,
QM_ABNORMAL_EVENT_IRQ_VECTOR),
- qm_abnormal_irq, IRQF_SHARED,
- qm->dev_name, qm);
+ qm_abnormal_irq, 0, qm->dev_name, qm);
if (ret)
goto err_abonormal_irq;
}
@@ -4004,6 +4073,9 @@ int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
{
int flag = 0;
int ret = 0;
+ /* HW V2 not support both use uacce sva mode and hardware crypto algs */
+ if (qm->ver <= QM_HW_V2 && qm->use_sva)
+ return 0;
mutex_lock(&qm_list->lock);
if (list_empty(&qm_list->list))
@@ -4035,6 +4107,9 @@ EXPORT_SYMBOL_GPL(hisi_qm_alg_register);
*/
void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
{
+ if (qm->ver <= QM_HW_V2 && qm->use_sva)
+ return;
+
mutex_lock(&qm_list->lock);
list_del(&qm->list);
mutex_unlock(&qm_list->lock);
diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h
index 8624d1288afe..54967c6b9c78 100644
--- a/drivers/crypto/hisilicon/qm.h
+++ b/drivers/crypto/hisilicon/qm.h
@@ -85,6 +85,11 @@
/* page number for queue file region */
#define QM_DOORBELL_PAGE_NR 1
+/* uacce mode of the driver */
+#define UACCE_MODE_NOUACCE 0 /* don't use uacce */
+#define UACCE_MODE_SVA 1 /* use uacce sva mode */
+#define UACCE_MODE_DESC "0(default) means only register to crypto, 1 means both register to crypto and uacce"
+
enum qm_stop_reason {
QM_NORMAL,
QM_SOFT_RESET,
@@ -168,6 +173,7 @@ struct hisi_qm_err_info {
char *acpi_rst;
u32 msi_wr_port;
u32 ecc_2bits_mask;
+ u32 dev_ce_mask;
u32 ce;
u32 nfe;
u32 fe;
@@ -225,7 +231,7 @@ struct hisi_qm {
struct hisi_qm_status status;
const struct hisi_qm_err_ini *err_ini;
struct hisi_qm_err_status err_status;
- unsigned long reset_flag;
+ unsigned long misc_ctl; /* driver removing and reset sched */
struct rw_semaphore qps_lock;
struct idr qp_idr;
@@ -249,6 +255,7 @@ struct hisi_qm {
resource_size_t phys_base;
resource_size_t phys_size;
struct uacce_device *uacce;
+ int mode;
};
struct hisi_qp_status {
@@ -282,6 +289,7 @@ struct hisi_qp {
struct hisi_qm *qm;
bool is_resetting;
+ bool is_in_kernel;
u16 pasid;
struct uacce_queue *uacce_q;
};
@@ -299,7 +307,7 @@ static inline int q_num_set(const char *val, const struct kernel_param *kp,
if (!pdev) {
q_num = min_t(u32, QM_QNUM_V1, QM_QNUM_V2);
- pr_info("No device found currently, suppose queue number is %d\n",
+ pr_info("No device found currently, suppose queue number is %u\n",
q_num);
} else {
if (pdev->revision == QM_HW_V1)
@@ -333,6 +341,27 @@ static inline int vfs_num_set(const char *val, const struct kernel_param *kp)
return param_set_int(val, kp);
}
+static inline int mode_set(const char *val, const struct kernel_param *kp)
+{
+ u32 n;
+ int ret;
+
+ if (!val)
+ return -EINVAL;
+
+ ret = kstrtou32(val, 10, &n);
+ if (ret != 0 || (n != UACCE_MODE_SVA &&
+ n != UACCE_MODE_NOUACCE))
+ return -EINVAL;
+
+ return param_set_int(val, kp);
+}
+
+static inline int uacce_mode_set(const char *val, const struct kernel_param *kp)
+{
+ return mode_set(val, kp);
+}
+
static inline void hisi_qm_init_list(struct hisi_qm_list *qm_list)
{
INIT_LIST_HEAD(&qm_list->list);
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index b35c1c2271a3..dc68ba76f65e 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -13,6 +13,7 @@
#include <linux/pci.h>
#include <linux/seq_file.h>
#include <linux/topology.h>
+#include <linux/uacce.h>
#include "sec.h"
@@ -74,6 +75,16 @@
#define SEC_USER0_SMMU_NORMAL (BIT(23) | BIT(15))
#define SEC_USER1_SMMU_NORMAL (BIT(31) | BIT(23) | BIT(15) | BIT(7))
+#define SEC_USER1_ENABLE_CONTEXT_SSV BIT(24)
+#define SEC_USER1_ENABLE_DATA_SSV BIT(16)
+#define SEC_USER1_WB_CONTEXT_SSV BIT(8)
+#define SEC_USER1_WB_DATA_SSV BIT(0)
+#define SEC_USER1_SVA_SET (SEC_USER1_ENABLE_CONTEXT_SSV | \
+ SEC_USER1_ENABLE_DATA_SSV | \
+ SEC_USER1_WB_CONTEXT_SSV | \
+ SEC_USER1_WB_DATA_SSV)
+#define SEC_USER1_SMMU_SVA (SEC_USER1_SMMU_NORMAL | SEC_USER1_SVA_SET)
+#define SEC_USER1_SMMU_MASK (~SEC_USER1_SVA_SET)
#define SEC_CORE_INT_STATUS_M_ECC BIT(2)
#define SEC_DELAY_10_US 10
@@ -233,6 +244,18 @@ struct hisi_qp **sec_create_qps(void)
return NULL;
}
+static const struct kernel_param_ops sec_uacce_mode_ops = {
+ .set = uacce_mode_set,
+ .get = param_get_int,
+};
+
+/*
+ * uacce_mode = 0 means sec only register to crypto,
+ * uacce_mode = 1 means sec both register to crypto and uacce.
+ */
+static u32 uacce_mode = UACCE_MODE_NOUACCE;
+module_param_cb(uacce_mode, &sec_uacce_mode_ops, &uacce_mode, 0444);
+MODULE_PARM_DESC(uacce_mode, UACCE_MODE_DESC);
static const struct pci_device_id sec_dev_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_PF_PCI_DEVICE_ID) },
@@ -299,7 +322,11 @@ static int sec_engine_init(struct hisi_qm *qm)
writel_relaxed(reg, SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL0_REG));
reg = readl_relaxed(SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL1_REG));
- reg |= SEC_USER1_SMMU_NORMAL;
+ reg &= SEC_USER1_SMMU_MASK;
+ if (qm->use_sva && qm->ver == QM_HW_V2)
+ reg |= SEC_USER1_SMMU_SVA;
+ else
+ reg |= SEC_USER1_SMMU_NORMAL;
writel_relaxed(reg, SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL1_REG));
writel(SEC_SINGLE_PORT_MAX_TRANS,
@@ -725,6 +752,7 @@ static const struct hisi_qm_err_ini sec_err_ini = {
QM_ACC_WB_NOT_READY_TIMEOUT,
.fe = 0,
.ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC,
+ .dev_ce_mask = SEC_RAS_CE_ENB_MSK,
.msi_wr_port = BIT(0),
.acpi_rst = "SRST",
}
@@ -758,6 +786,8 @@ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
qm->pdev = pdev;
qm->ver = pdev->revision;
+ qm->algs = "cipher\ndigest\naead\n";
+ qm->mode = uacce_mode;
qm->sqe_size = SEC_SQE_SIZE;
qm->dev_name = sec_name;
@@ -885,6 +915,14 @@ static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_qm_stop;
}
+ if (qm->uacce) {
+ ret = uacce_register(qm->uacce);
+ if (ret) {
+ pci_err(pdev, "failed to register uacce (%d)!\n", ret);
+ goto err_alg_unregister;
+ }
+ }
+
if (qm->fun_type == QM_HW_PF && vfs_num) {
ret = hisi_qm_sriov_enable(pdev, vfs_num);
if (ret < 0)
@@ -912,7 +950,7 @@ static void sec_remove(struct pci_dev *pdev)
hisi_qm_wait_task_finish(qm, &sec_devices);
hisi_qm_alg_unregister(qm, &sec_devices);
if (qm->fun_type == QM_HW_PF && qm->vfs_num)
- hisi_qm_sriov_disable(pdev, qm->is_frozen);
+ hisi_qm_sriov_disable(pdev, true);
sec_debugfs_exit(qm);
diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
index 4fb5a32bf830..02c445722445 100644
--- a/drivers/crypto/hisilicon/zip/zip_main.c
+++ b/drivers/crypto/hisilicon/zip/zip_main.c
@@ -66,6 +66,7 @@
#define HZIP_CORE_INT_STATUS_M_ECC BIT(1)
#define HZIP_CORE_SRAM_ECC_ERR_INFO 0x301148
#define HZIP_CORE_INT_RAS_CE_ENB 0x301160
+#define HZIP_CORE_INT_RAS_CE_ENABLE 0x1
#define HZIP_CORE_INT_RAS_NFE_ENB 0x301164
#define HZIP_CORE_INT_RAS_FE_ENB 0x301168
#define HZIP_CORE_INT_RAS_NFE_ENABLE 0x7FE
@@ -211,6 +212,19 @@ static const struct debugfs_reg32 hzip_dfx_regs[] = {
{"HZIP_DECOMP_LZ77_CURR_ST ", 0x9cull},
};
+static const struct kernel_param_ops zip_uacce_mode_ops = {
+ .set = uacce_mode_set,
+ .get = param_get_int,
+};
+
+/*
+ * uacce_mode = 0 means zip only register to crypto,
+ * uacce_mode = 1 means zip both register to crypto and uacce.
+ */
+static u32 uacce_mode = UACCE_MODE_NOUACCE;
+module_param_cb(uacce_mode, &zip_uacce_mode_ops, &uacce_mode, 0444);
+MODULE_PARM_DESC(uacce_mode, UACCE_MODE_DESC);
+
static int pf_q_num_set(const char *val, const struct kernel_param *kp)
{
return q_num_set(val, kp, PCI_DEVICE_ID_ZIP_PF);
@@ -279,7 +293,7 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
writel(AXUSER_BASE, base + HZIP_SGL_RUSER_32_63);
writel(AXUSER_BASE, base + HZIP_BD_WUSER_32_63);
- if (qm->use_sva) {
+ if (qm->use_sva && qm->ver == QM_HW_V2) {
writel(AXUSER_BASE | AXUSER_SSV, base + HZIP_DATA_RUSER_32_63);
writel(AXUSER_BASE | AXUSER_SSV, base + HZIP_DATA_WUSER_32_63);
} else {
@@ -314,7 +328,8 @@ static void hisi_zip_hw_error_enable(struct hisi_qm *qm)
writel(HZIP_CORE_INT_MASK_ALL, qm->io_base + HZIP_CORE_INT_SOURCE);
/* configure error type */
- writel(0x1, qm->io_base + HZIP_CORE_INT_RAS_CE_ENB);
+ writel(HZIP_CORE_INT_RAS_CE_ENABLE,
+ qm->io_base + HZIP_CORE_INT_RAS_CE_ENB);
writel(0x0, qm->io_base + HZIP_CORE_INT_RAS_FE_ENB);
writel(HZIP_CORE_INT_RAS_NFE_ENABLE,
qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
@@ -714,6 +729,7 @@ static const struct hisi_qm_err_ini hisi_zip_err_ini = {
QM_ACC_WB_NOT_READY_TIMEOUT,
.fe = 0,
.ecc_2bits_mask = HZIP_CORE_INT_STATUS_M_ECC,
+ .dev_ce_mask = HZIP_CORE_INT_RAS_CE_ENABLE,
.msi_wr_port = HZIP_WR_PORT,
.acpi_rst = "ZRST",
}
@@ -752,6 +768,7 @@ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
qm->pdev = pdev;
qm->ver = pdev->revision;
qm->algs = "zlib\ngzip";
+ qm->mode = uacce_mode;
qm->sqe_size = HZIP_SQE_SIZE;
qm->dev_name = hisi_zip_name;
@@ -887,7 +904,7 @@ static void hisi_zip_remove(struct pci_dev *pdev)
hisi_qm_alg_unregister(qm, &zip_devices);
if (qm->fun_type == QM_HW_PF && qm->vfs_num)
- hisi_qm_sriov_disable(pdev, qm->is_frozen);
+ hisi_qm_sriov_disable(pdev, true);
hisi_zip_debugfs_exit(qm);
hisi_qm_stop(qm, QM_NORMAL);
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index 2e1562108a85..6364583b88b2 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -1166,11 +1166,8 @@ static int safexcel_request_ring_irq(void *pdev, int irqid,
dev = &plf_pdev->dev;
irq = platform_get_irq_byname(plf_pdev, irq_name);
- if (irq < 0) {
- dev_err(dev, "unable to get IRQ '%s' (err %d)\n",
- irq_name, irq);
+ if (irq < 0)
return irq;
- }
} else {
return -ENXIO;
}
@@ -1999,3 +1996,4 @@ MODULE_AUTHOR("Ofer Heifetz <oferh@marvell.com>");
MODULE_AUTHOR("Igal Liberman <igall@marvell.com>");
MODULE_DESCRIPTION("Support for SafeXcel cryptographic engines: EIP97 & EIP197");
MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(CRYPTO_INTERNAL);
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index 50fb6d90a2e0..bc60b5802256 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -13,6 +13,7 @@
#include <crypto/sha3.h>
#include <crypto/skcipher.h>
#include <crypto/sm3.h>
+#include <crypto/internal/cipher.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
diff --git a/drivers/crypto/keembay/Kconfig b/drivers/crypto/keembay/Kconfig
index f2e17b0c4fa0..00cf8f028cb9 100644
--- a/drivers/crypto/keembay/Kconfig
+++ b/drivers/crypto/keembay/Kconfig
@@ -38,3 +38,34 @@ config CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS
Provides OCS version of cts(cbc(aes)) and cts(cbc(sm4)).
Intel does not recommend use of CTS mode with AES/SM4.
+
+config CRYPTO_DEV_KEEMBAY_OCS_HCU
+ tristate "Support for Intel Keem Bay OCS HCU HW acceleration"
+ select CRYPTO_HASH
+ select CRYPTO_ENGINE
+ depends on HAS_IOMEM
+ depends on ARCH_KEEMBAY || COMPILE_TEST
+ depends on OF || COMPILE_TEST
+ help
+ Support for Intel Keem Bay Offload and Crypto Subsystem (OCS) Hash
+ Control Unit (HCU) hardware acceleration for use with Crypto API.
+
+ Provides OCS HCU hardware acceleration of sha256, sha384, sha512, and
+ sm3, as well as the HMAC variant of these algorithms.
+
+ Say Y or M if you're building for the Intel Keem Bay SoC. If compiled
+ as a module, the module will be called keembay-ocs-hcu.
+
+ If unsure, say N.
+
+config CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224
+ bool "Enable sha224 and hmac(sha224) support in Intel Keem Bay OCS HCU"
+ depends on CRYPTO_DEV_KEEMBAY_OCS_HCU
+ help
+ Enables support for sha224 and hmac(sha224) algorithms in the Intel
+ Keem Bay OCS HCU driver. Intel recommends not to use these
+ algorithms.
+
+ Provides OCS HCU hardware acceleration of sha224 and hmac(224).
+
+ If unsure, say N.
diff --git a/drivers/crypto/keembay/Makefile b/drivers/crypto/keembay/Makefile
index f21e2c4ab3b3..aea03d4432c4 100644
--- a/drivers/crypto/keembay/Makefile
+++ b/drivers/crypto/keembay/Makefile
@@ -3,3 +3,6 @@
#
obj-$(CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4) += keembay-ocs-aes.o
keembay-ocs-aes-objs := keembay-ocs-aes-core.o ocs-aes.o
+
+obj-$(CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU) += keembay-ocs-hcu.o
+keembay-ocs-hcu-objs := keembay-ocs-hcu-core.o ocs-hcu.o
diff --git a/drivers/crypto/keembay/keembay-ocs-hcu-core.c b/drivers/crypto/keembay/keembay-ocs-hcu-core.c
new file mode 100644
index 000000000000..c4b97b4160e9
--- /dev/null
+++ b/drivers/crypto/keembay/keembay-ocs-hcu-core.c
@@ -0,0 +1,1264 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel Keem Bay OCS HCU Crypto Driver.
+ *
+ * Copyright (C) 2018-2020 Intel Corporation
+ */
+
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+
+#include <crypto/engine.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/sha2.h>
+#include <crypto/sm3.h>
+#include <crypto/hmac.h>
+#include <crypto/internal/hash.h>
+
+#include "ocs-hcu.h"
+
+#define DRV_NAME "keembay-ocs-hcu"
+
+/* Flag marking a final request. */
+#define REQ_FINAL BIT(0)
+/* Flag marking a HMAC request. */
+#define REQ_FLAGS_HMAC BIT(1)
+/* Flag set when HW HMAC is being used. */
+#define REQ_FLAGS_HMAC_HW BIT(2)
+/* Flag set when SW HMAC is being used. */
+#define REQ_FLAGS_HMAC_SW BIT(3)
+
+/**
+ * struct ocs_hcu_ctx: OCS HCU Transform context.
+ * @engine_ctx: Crypto Engine context.
+ * @hcu_dev: The OCS HCU device used by the transformation.
+ * @key: The key (used only for HMAC transformations).
+ * @key_len: The length of the key.
+ * @is_sm3_tfm: Whether or not this is an SM3 transformation.
+ * @is_hmac_tfm: Whether or not this is a HMAC transformation.
+ */
+struct ocs_hcu_ctx {
+ struct crypto_engine_ctx engine_ctx;
+ struct ocs_hcu_dev *hcu_dev;
+ u8 key[SHA512_BLOCK_SIZE];
+ size_t key_len;
+ bool is_sm3_tfm;
+ bool is_hmac_tfm;
+};
+
+/**
+ * struct ocs_hcu_rctx - Context for the request.
+ * @hcu_dev: OCS HCU device to be used to service the request.
+ * @flags: Flags tracking request status.
+ * @algo: Algorithm to use for the request.
+ * @blk_sz: Block size of the transformation / request.
+ * @dig_sz: Digest size of the transformation / request.
+ * @dma_list: OCS DMA linked list.
+ * @hash_ctx: OCS HCU hashing context.
+ * @buffer: Buffer to store: partial block of data and SW HMAC
+ * artifacts (ipad, opad, etc.).
+ * @buf_cnt: Number of bytes currently stored in the buffer.
+ * @buf_dma_addr: The DMA address of @buffer (when mapped).
+ * @buf_dma_count: The number of bytes in @buffer currently DMA-mapped.
+ * @sg: Head of the scatterlist entries containing data.
+ * @sg_data_total: Total data in the SG list at any time.
+ * @sg_data_offset: Offset into the data of the current individual SG node.
+ * @sg_dma_nents: Number of sg entries mapped in dma_list.
+ */
+struct ocs_hcu_rctx {
+ struct ocs_hcu_dev *hcu_dev;
+ u32 flags;
+ enum ocs_hcu_algo algo;
+ size_t blk_sz;
+ size_t dig_sz;
+ struct ocs_hcu_dma_list *dma_list;
+ struct ocs_hcu_hash_ctx hash_ctx;
+ /*
+ * Buffer is double the block size because we need space for SW HMAC
+ * artifacts, i.e:
+ * - ipad (1 block) + a possible partial block of data.
+ * - opad (1 block) + digest of H(k ^ ipad || m)
+ */
+ u8 buffer[2 * SHA512_BLOCK_SIZE];
+ size_t buf_cnt;
+ dma_addr_t buf_dma_addr;
+ size_t buf_dma_count;
+ struct scatterlist *sg;
+ unsigned int sg_data_total;
+ unsigned int sg_data_offset;
+ unsigned int sg_dma_nents;
+};
+
+/**
+ * struct ocs_hcu_drv - Driver data
+ * @dev_list: The list of HCU devices.
+ * @lock: The lock protecting dev_list.
+ */
+struct ocs_hcu_drv {
+ struct list_head dev_list;
+ spinlock_t lock; /* Protects dev_list. */
+};
+
+static struct ocs_hcu_drv ocs_hcu = {
+ .dev_list = LIST_HEAD_INIT(ocs_hcu.dev_list),
+ .lock = __SPIN_LOCK_UNLOCKED(ocs_hcu.lock),
+};
+
+/*
+ * Return the total amount of data in the request; that is: the data in the
+ * request buffer + the data in the sg list.
+ */
+static inline unsigned int kmb_get_total_data(struct ocs_hcu_rctx *rctx)
+{
+ return rctx->sg_data_total + rctx->buf_cnt;
+}
+
+/* Move remaining content of scatter-gather list to context buffer. */
+static int flush_sg_to_ocs_buffer(struct ocs_hcu_rctx *rctx)
+{
+ size_t count;
+
+ if (rctx->sg_data_total > (sizeof(rctx->buffer) - rctx->buf_cnt)) {
+ WARN(1, "%s: sg data does not fit in buffer\n", __func__);
+ return -EINVAL;
+ }
+
+ while (rctx->sg_data_total) {
+ if (!rctx->sg) {
+ WARN(1, "%s: unexpected NULL sg\n", __func__);
+ return -EINVAL;
+ }
+ /*
+ * If current sg has been fully processed, skip to the next
+ * one.
+ */
+ if (rctx->sg_data_offset == rctx->sg->length) {
+ rctx->sg = sg_next(rctx->sg);
+ rctx->sg_data_offset = 0;
+ continue;
+ }
+ /*
+ * Determine the maximum data available to copy from the node.
+ * Minimum of the length left in the sg node, or the total data
+ * in the request.
+ */
+ count = min(rctx->sg->length - rctx->sg_data_offset,
+ rctx->sg_data_total);
+ /* Copy from scatter-list entry to context buffer. */
+ scatterwalk_map_and_copy(&rctx->buffer[rctx->buf_cnt],
+ rctx->sg, rctx->sg_data_offset,
+ count, 0);
+
+ rctx->sg_data_offset += count;
+ rctx->sg_data_total -= count;
+ rctx->buf_cnt += count;
+ }
+
+ return 0;
+}
+
+static struct ocs_hcu_dev *kmb_ocs_hcu_find_dev(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct ocs_hcu_ctx *tctx = crypto_ahash_ctx(tfm);
+
+ /* If the HCU device for the request was previously set, return it. */
+ if (tctx->hcu_dev)
+ return tctx->hcu_dev;
+
+ /*
+ * Otherwise, get the first HCU device available (there should be one
+ * and only one device).
+ */
+ spin_lock_bh(&ocs_hcu.lock);
+ tctx->hcu_dev = list_first_entry_or_null(&ocs_hcu.dev_list,
+ struct ocs_hcu_dev,
+ list);
+ spin_unlock_bh(&ocs_hcu.lock);
+
+ return tctx->hcu_dev;
+}
+
+/* Free OCS DMA linked list and DMA-able context buffer. */
+static void kmb_ocs_hcu_dma_cleanup(struct ahash_request *req,
+ struct ocs_hcu_rctx *rctx)
+{
+ struct ocs_hcu_dev *hcu_dev = rctx->hcu_dev;
+ struct device *dev = hcu_dev->dev;
+
+ /* Unmap rctx->buffer (if mapped). */
+ if (rctx->buf_dma_count) {
+ dma_unmap_single(dev, rctx->buf_dma_addr, rctx->buf_dma_count,
+ DMA_TO_DEVICE);
+ rctx->buf_dma_count = 0;
+ }
+
+ /* Unmap req->src (if mapped). */
+ if (rctx->sg_dma_nents) {
+ dma_unmap_sg(dev, req->src, rctx->sg_dma_nents, DMA_TO_DEVICE);
+ rctx->sg_dma_nents = 0;
+ }
+
+ /* Free dma_list (if allocated). */
+ if (rctx->dma_list) {
+ ocs_hcu_dma_list_free(hcu_dev, rctx->dma_list);
+ rctx->dma_list = NULL;
+ }
+}
+
+/*
+ * Prepare for DMA operation:
+ * - DMA-map request context buffer (if needed)
+ * - DMA-map SG list (only the entries to be processed, see note below)
+ * - Allocate OCS HCU DMA linked list (number of elements = SG entries to
+ * process + context buffer (if not empty)).
+ * - Add DMA-mapped request context buffer to OCS HCU DMA list.
+ * - Add SG entries to DMA list.
+ *
+ * Note: if this is a final request, we process all the data in the SG list,
+ * otherwise we can only process up to the maximum amount of block-aligned data
+ * (the remainder will be put into the context buffer and processed in the next
+ * request).
+ */
+static int kmb_ocs_dma_prepare(struct ahash_request *req)
+{
+ struct ocs_hcu_rctx *rctx = ahash_request_ctx(req);
+ struct device *dev = rctx->hcu_dev->dev;
+ unsigned int remainder = 0;
+ unsigned int total;
+ size_t nents;
+ size_t count;
+ int rc;
+ int i;
+
+ /* This function should be called only when there is data to process. */
+ total = kmb_get_total_data(rctx);
+ if (!total)
+ return -EINVAL;
+
+ /*
+ * If this is not a final DMA (terminated DMA), the data passed to the
+ * HCU must be aligned to the block size; compute the remainder data to
+ * be processed in the next request.
+ */
+ if (!(rctx->flags & REQ_FINAL))
+ remainder = total % rctx->blk_sz;
+
+ /* Determine the number of scatter gather list entries to process. */
+ nents = sg_nents_for_len(req->src, rctx->sg_data_total - remainder);
+
+ /* If there are entries to process, map them. */
+ if (nents) {
+ rctx->sg_dma_nents = dma_map_sg(dev, req->src, nents,
+ DMA_TO_DEVICE);
+ if (!rctx->sg_dma_nents) {
+ dev_err(dev, "Failed to MAP SG\n");
+ rc = -ENOMEM;
+ goto cleanup;
+ }
+ /*
+ * The value returned by dma_map_sg() can be < nents; so update
+ * nents accordingly.
+ */
+ nents = rctx->sg_dma_nents;
+ }
+
+ /*
+ * If context buffer is not empty, map it and add extra DMA entry for
+ * it.
+ */
+ if (rctx->buf_cnt) {
+ rctx->buf_dma_addr = dma_map_single(dev, rctx->buffer,
+ rctx->buf_cnt,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, rctx->buf_dma_addr)) {
+ dev_err(dev, "Failed to map request context buffer\n");
+ rc = -ENOMEM;
+ goto cleanup;
+ }
+ rctx->buf_dma_count = rctx->buf_cnt;
+ /* Increase number of dma entries. */
+ nents++;
+ }
+
+ /* Allocate OCS HCU DMA list. */
+ rctx->dma_list = ocs_hcu_dma_list_alloc(rctx->hcu_dev, nents);
+ if (!rctx->dma_list) {
+ rc = -ENOMEM;
+ goto cleanup;
+ }
+
+ /* Add request context buffer (if previously DMA-mapped) */
+ if (rctx->buf_dma_count) {
+ rc = ocs_hcu_dma_list_add_tail(rctx->hcu_dev, rctx->dma_list,
+ rctx->buf_dma_addr,
+ rctx->buf_dma_count);
+ if (rc)
+ goto cleanup;
+ }
+
+ /* Add the SG nodes to be processed to the DMA linked list. */
+ for_each_sg(req->src, rctx->sg, rctx->sg_dma_nents, i) {
+ /*
+ * The number of bytes to add to the list entry is the minimum
+ * between:
+ * - The DMA length of the SG entry.
+ * - The data left to be processed.
+ */
+ count = min(rctx->sg_data_total - remainder,
+ sg_dma_len(rctx->sg) - rctx->sg_data_offset);
+ /*
+ * Do not create a zero length DMA descriptor. Check in case of
+ * zero length SG node.
+ */
+ if (count == 0)
+ continue;
+ /* Add sg to HCU DMA list. */
+ rc = ocs_hcu_dma_list_add_tail(rctx->hcu_dev,
+ rctx->dma_list,
+ rctx->sg->dma_address,
+ count);
+ if (rc)
+ goto cleanup;
+
+ /* Update amount of data remaining in SG list. */
+ rctx->sg_data_total -= count;
+
+ /*
+ * If remaining data is equal to remainder (note: 'less than'
+ * case should never happen in practice), we are done: update
+ * offset and exit the loop.
+ */
+ if (rctx->sg_data_total <= remainder) {
+ WARN_ON(rctx->sg_data_total < remainder);
+ rctx->sg_data_offset += count;
+ break;
+ }
+
+ /*
+ * If we get here is because we need to process the next sg in
+ * the list; set offset within the sg to 0.
+ */
+ rctx->sg_data_offset = 0;
+ }
+
+ return 0;
+cleanup:
+ dev_err(dev, "Failed to prepare DMA.\n");
+ kmb_ocs_hcu_dma_cleanup(req, rctx);
+
+ return rc;
+}
+
+static void kmb_ocs_hcu_secure_cleanup(struct ahash_request *req)
+{
+ struct ocs_hcu_rctx *rctx = ahash_request_ctx(req);
+
+ /* Clear buffer of any data. */
+ memzero_explicit(rctx->buffer, sizeof(rctx->buffer));
+}
+
+static int kmb_ocs_hcu_handle_queue(struct ahash_request *req)
+{
+ struct ocs_hcu_dev *hcu_dev = kmb_ocs_hcu_find_dev(req);
+
+ if (!hcu_dev)
+ return -ENOENT;
+
+ return crypto_transfer_hash_request_to_engine(hcu_dev->engine, req);
+}
+
+static int prepare_ipad(struct ahash_request *req)
+{
+ struct ocs_hcu_rctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct ocs_hcu_ctx *ctx = crypto_ahash_ctx(tfm);
+ int i;
+
+ WARN(rctx->buf_cnt, "%s: Context buffer is not empty\n", __func__);
+ WARN(!(rctx->flags & REQ_FLAGS_HMAC_SW),
+ "%s: HMAC_SW flag is not set\n", __func__);
+ /*
+ * Key length must be equal to block size. If key is shorter,
+ * we pad it with zero (note: key cannot be longer, since
+ * longer keys are hashed by kmb_ocs_hcu_setkey()).
+ */
+ if (ctx->key_len > rctx->blk_sz) {
+ WARN(1, "%s: Invalid key length in tfm context\n", __func__);
+ return -EINVAL;
+ }
+ memzero_explicit(&ctx->key[ctx->key_len],
+ rctx->blk_sz - ctx->key_len);
+ ctx->key_len = rctx->blk_sz;
+ /*
+ * Prepare IPAD for HMAC. Only done for first block.
+ * HMAC(k,m) = H(k ^ opad || H(k ^ ipad || m))
+ * k ^ ipad will be first hashed block.
+ * k ^ opad will be calculated in the final request.
+ * Only needed if not using HW HMAC.
+ */
+ for (i = 0; i < rctx->blk_sz; i++)
+ rctx->buffer[i] = ctx->key[i] ^ HMAC_IPAD_VALUE;
+ rctx->buf_cnt = rctx->blk_sz;
+
+ return 0;
+}
+
+static int kmb_ocs_hcu_do_one_request(struct crypto_engine *engine, void *areq)
+{
+ struct ahash_request *req = container_of(areq, struct ahash_request,
+ base);
+ struct ocs_hcu_dev *hcu_dev = kmb_ocs_hcu_find_dev(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct ocs_hcu_rctx *rctx = ahash_request_ctx(req);
+ struct ocs_hcu_ctx *tctx = crypto_ahash_ctx(tfm);
+ int rc;
+ int i;
+
+ if (!hcu_dev) {
+ rc = -ENOENT;
+ goto error;
+ }
+
+ /*
+ * If hardware HMAC flag is set, perform HMAC in hardware.
+ *
+ * NOTE: this flag implies REQ_FINAL && kmb_get_total_data(rctx)
+ */
+ if (rctx->flags & REQ_FLAGS_HMAC_HW) {
+ /* Map input data into the HCU DMA linked list. */
+ rc = kmb_ocs_dma_prepare(req);
+ if (rc)
+ goto error;
+
+ rc = ocs_hcu_hmac(hcu_dev, rctx->algo, tctx->key, tctx->key_len,
+ rctx->dma_list, req->result, rctx->dig_sz);
+
+ /* Unmap data and free DMA list regardless of return code. */
+ kmb_ocs_hcu_dma_cleanup(req, rctx);
+
+ /* Process previous return code. */
+ if (rc)
+ goto error;
+
+ goto done;
+ }
+
+ /* Handle update request case. */
+ if (!(rctx->flags & REQ_FINAL)) {
+ /* Update should always have input data. */
+ if (!kmb_get_total_data(rctx))
+ return -EINVAL;
+
+ /* Map input data into the HCU DMA linked list. */
+ rc = kmb_ocs_dma_prepare(req);
+ if (rc)
+ goto error;
+
+ /* Do hashing step. */
+ rc = ocs_hcu_hash_update(hcu_dev, &rctx->hash_ctx,
+ rctx->dma_list);
+
+ /* Unmap data and free DMA list regardless of return code. */
+ kmb_ocs_hcu_dma_cleanup(req, rctx);
+
+ /* Process previous return code. */
+ if (rc)
+ goto error;
+
+ /*
+ * Reset request buffer count (data in the buffer was just
+ * processed).
+ */
+ rctx->buf_cnt = 0;
+ /*
+ * Move remaining sg data into the request buffer, so that it
+ * will be processed during the next request.
+ *
+ * NOTE: we have remaining data if kmb_get_total_data() was not
+ * a multiple of block size.
+ */
+ rc = flush_sg_to_ocs_buffer(rctx);
+ if (rc)
+ goto error;
+
+ goto done;
+ }
+
+ /* If we get here, this is a final request. */
+
+ /* If there is data to process, use finup. */
+ if (kmb_get_total_data(rctx)) {
+ /* Map input data into the HCU DMA linked list. */
+ rc = kmb_ocs_dma_prepare(req);
+ if (rc)
+ goto error;
+
+ /* Do hashing step. */
+ rc = ocs_hcu_hash_finup(hcu_dev, &rctx->hash_ctx,
+ rctx->dma_list,
+ req->result, rctx->dig_sz);
+ /* Free DMA list regardless of return code. */
+ kmb_ocs_hcu_dma_cleanup(req, rctx);
+
+ /* Process previous return code. */
+ if (rc)
+ goto error;
+
+ } else { /* Otherwise (if we have no data), use final. */
+ rc = ocs_hcu_hash_final(hcu_dev, &rctx->hash_ctx, req->result,
+ rctx->dig_sz);
+ if (rc)
+ goto error;
+ }
+
+ /*
+ * If we are finalizing a SW HMAC request, we just computed the result
+ * of: H(k ^ ipad || m).
+ *
+ * We now need to complete the HMAC calculation with the OPAD step,
+ * that is, we need to compute H(k ^ opad || digest), where digest is
+ * the digest we just obtained, i.e., H(k ^ ipad || m).
+ */
+ if (rctx->flags & REQ_FLAGS_HMAC_SW) {
+ /*
+ * Compute k ^ opad and store it in the request buffer (which
+ * is not used anymore at this point).
+ * Note: key has been padded / hashed already (so keylen ==
+ * blksz) .
+ */
+ WARN_ON(tctx->key_len != rctx->blk_sz);
+ for (i = 0; i < rctx->blk_sz; i++)
+ rctx->buffer[i] = tctx->key[i] ^ HMAC_OPAD_VALUE;
+ /* Now append the digest to the rest of the buffer. */
+ for (i = 0; (i < rctx->dig_sz); i++)
+ rctx->buffer[rctx->blk_sz + i] = req->result[i];
+
+ /* Now hash the buffer to obtain the final HMAC. */
+ rc = ocs_hcu_digest(hcu_dev, rctx->algo, rctx->buffer,
+ rctx->blk_sz + rctx->dig_sz, req->result,
+ rctx->dig_sz);
+ if (rc)
+ goto error;
+ }
+
+ /* Perform secure clean-up. */
+ kmb_ocs_hcu_secure_cleanup(req);
+done:
+ crypto_finalize_hash_request(hcu_dev->engine, req, 0);
+
+ return 0;
+
+error:
+ kmb_ocs_hcu_secure_cleanup(req);
+ return rc;
+}
+
+static int kmb_ocs_hcu_init(struct ahash_request *req)
+{
+ struct ocs_hcu_dev *hcu_dev = kmb_ocs_hcu_find_dev(req);
+ struct ocs_hcu_rctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct ocs_hcu_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ if (!hcu_dev)
+ return -ENOENT;
+
+ /* Initialize entire request context to zero. */
+ memset(rctx, 0, sizeof(*rctx));
+
+ rctx->hcu_dev = hcu_dev;
+ rctx->dig_sz = crypto_ahash_digestsize(tfm);
+
+ switch (rctx->dig_sz) {
+#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224
+ case SHA224_DIGEST_SIZE:
+ rctx->blk_sz = SHA224_BLOCK_SIZE;
+ rctx->algo = OCS_HCU_ALGO_SHA224;
+ break;
+#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224 */
+ case SHA256_DIGEST_SIZE:
+ rctx->blk_sz = SHA256_BLOCK_SIZE;
+ /*
+ * SHA256 and SM3 have the same digest size: use info from tfm
+ * context to find out which one we should use.
+ */
+ rctx->algo = ctx->is_sm3_tfm ? OCS_HCU_ALGO_SM3 :
+ OCS_HCU_ALGO_SHA256;
+ break;
+ case SHA384_DIGEST_SIZE:
+ rctx->blk_sz = SHA384_BLOCK_SIZE;
+ rctx->algo = OCS_HCU_ALGO_SHA384;
+ break;
+ case SHA512_DIGEST_SIZE:
+ rctx->blk_sz = SHA512_BLOCK_SIZE;
+ rctx->algo = OCS_HCU_ALGO_SHA512;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Initialize intermediate data. */
+ ocs_hcu_hash_init(&rctx->hash_ctx, rctx->algo);
+
+ /* If this a HMAC request, set HMAC flag. */
+ if (ctx->is_hmac_tfm)
+ rctx->flags |= REQ_FLAGS_HMAC;
+
+ return 0;
+}
+
+static int kmb_ocs_hcu_update(struct ahash_request *req)
+{
+ struct ocs_hcu_rctx *rctx = ahash_request_ctx(req);
+ int rc;
+
+ if (!req->nbytes)
+ return 0;
+
+ rctx->sg_data_total = req->nbytes;
+ rctx->sg_data_offset = 0;
+ rctx->sg = req->src;
+
+ /*
+ * If we are doing HMAC, then we must use SW-assisted HMAC, since HW
+ * HMAC does not support context switching (there it can only be used
+ * with finup() or digest()).
+ */
+ if (rctx->flags & REQ_FLAGS_HMAC &&
+ !(rctx->flags & REQ_FLAGS_HMAC_SW)) {
+ rctx->flags |= REQ_FLAGS_HMAC_SW;
+ rc = prepare_ipad(req);
+ if (rc)
+ return rc;
+ }
+
+ /*
+ * If remaining sg_data fits into ctx buffer, just copy it there; we'll
+ * process it at the next update() or final().
+ */
+ if (rctx->sg_data_total <= (sizeof(rctx->buffer) - rctx->buf_cnt))
+ return flush_sg_to_ocs_buffer(rctx);
+
+ return kmb_ocs_hcu_handle_queue(req);
+}
+
+/* Common logic for kmb_ocs_hcu_final() and kmb_ocs_hcu_finup(). */
+static int kmb_ocs_hcu_fin_common(struct ahash_request *req)
+{
+ struct ocs_hcu_rctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct ocs_hcu_ctx *ctx = crypto_ahash_ctx(tfm);
+ int rc;
+
+ rctx->flags |= REQ_FINAL;
+
+ /*
+ * If this is a HMAC request and, so far, we didn't have to switch to
+ * SW HMAC, check if we can use HW HMAC.
+ */
+ if (rctx->flags & REQ_FLAGS_HMAC &&
+ !(rctx->flags & REQ_FLAGS_HMAC_SW)) {
+ /*
+ * If we are here, it means we never processed any data so far,
+ * so we can use HW HMAC, but only if there is some data to
+ * process (since OCS HW MAC does not support zero-length
+ * messages) and the key length is supported by the hardware
+ * (OCS HCU HW only supports length <= 64); if HW HMAC cannot
+ * be used, fall back to SW-assisted HMAC.
+ */
+ if (kmb_get_total_data(rctx) &&
+ ctx->key_len <= OCS_HCU_HW_KEY_LEN) {
+ rctx->flags |= REQ_FLAGS_HMAC_HW;
+ } else {
+ rctx->flags |= REQ_FLAGS_HMAC_SW;
+ rc = prepare_ipad(req);
+ if (rc)
+ return rc;
+ }
+ }
+
+ return kmb_ocs_hcu_handle_queue(req);
+}
+
+static int kmb_ocs_hcu_final(struct ahash_request *req)
+{
+ struct ocs_hcu_rctx *rctx = ahash_request_ctx(req);
+
+ rctx->sg_data_total = 0;
+ rctx->sg_data_offset = 0;
+ rctx->sg = NULL;
+
+ return kmb_ocs_hcu_fin_common(req);
+}
+
+static int kmb_ocs_hcu_finup(struct ahash_request *req)
+{
+ struct ocs_hcu_rctx *rctx = ahash_request_ctx(req);
+
+ rctx->sg_data_total = req->nbytes;
+ rctx->sg_data_offset = 0;
+ rctx->sg = req->src;
+
+ return kmb_ocs_hcu_fin_common(req);
+}
+
+static int kmb_ocs_hcu_digest(struct ahash_request *req)
+{
+ int rc = 0;
+ struct ocs_hcu_dev *hcu_dev = kmb_ocs_hcu_find_dev(req);
+
+ if (!hcu_dev)
+ return -ENOENT;
+
+ rc = kmb_ocs_hcu_init(req);
+ if (rc)
+ return rc;
+
+ rc = kmb_ocs_hcu_finup(req);
+
+ return rc;
+}
+
+static int kmb_ocs_hcu_export(struct ahash_request *req, void *out)
+{
+ struct ocs_hcu_rctx *rctx = ahash_request_ctx(req);
+
+ /* Intermediate data is always stored and applied per request. */
+ memcpy(out, rctx, sizeof(*rctx));
+
+ return 0;
+}
+
+static int kmb_ocs_hcu_import(struct ahash_request *req, const void *in)
+{
+ struct ocs_hcu_rctx *rctx = ahash_request_ctx(req);
+
+ /* Intermediate data is always stored and applied per request. */
+ memcpy(rctx, in, sizeof(*rctx));
+
+ return 0;
+}
+
+static int kmb_ocs_hcu_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ unsigned int digestsize = crypto_ahash_digestsize(tfm);
+ struct ocs_hcu_ctx *ctx = crypto_ahash_ctx(tfm);
+ size_t blk_sz = crypto_ahash_blocksize(tfm);
+ struct crypto_ahash *ahash_tfm;
+ struct ahash_request *req;
+ struct crypto_wait wait;
+ struct scatterlist sg;
+ const char *alg_name;
+ int rc;
+
+ /*
+ * Key length must be equal to block size:
+ * - If key is shorter, we are done for now (the key will be padded
+ * later on); this is to maximize the use of HW HMAC (which works
+ * only for keys <= 64 bytes).
+ * - If key is longer, we hash it.
+ */
+ if (keylen <= blk_sz) {
+ memcpy(ctx->key, key, keylen);
+ ctx->key_len = keylen;
+ return 0;
+ }
+
+ switch (digestsize) {
+#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224
+ case SHA224_DIGEST_SIZE:
+ alg_name = "sha224-keembay-ocs";
+ break;
+#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224 */
+ case SHA256_DIGEST_SIZE:
+ alg_name = ctx->is_sm3_tfm ? "sm3-keembay-ocs" :
+ "sha256-keembay-ocs";
+ break;
+ case SHA384_DIGEST_SIZE:
+ alg_name = "sha384-keembay-ocs";
+ break;
+ case SHA512_DIGEST_SIZE:
+ alg_name = "sha512-keembay-ocs";
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ahash_tfm = crypto_alloc_ahash(alg_name, 0, 0);
+ if (IS_ERR(ahash_tfm))
+ return PTR_ERR(ahash_tfm);
+
+ req = ahash_request_alloc(ahash_tfm, GFP_KERNEL);
+ if (!req) {
+ rc = -ENOMEM;
+ goto err_free_ahash;
+ }
+
+ crypto_init_wait(&wait);
+ ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ crypto_req_done, &wait);
+ crypto_ahash_clear_flags(ahash_tfm, ~0);
+
+ sg_init_one(&sg, key, keylen);
+ ahash_request_set_crypt(req, &sg, ctx->key, keylen);
+
+ rc = crypto_wait_req(crypto_ahash_digest(req), &wait);
+ if (rc == 0)
+ ctx->key_len = digestsize;
+
+ ahash_request_free(req);
+err_free_ahash:
+ crypto_free_ahash(ahash_tfm);
+
+ return rc;
+}
+
+/* Set request size and initialize tfm context. */
+static void __cra_init(struct crypto_tfm *tfm, struct ocs_hcu_ctx *ctx)
+{
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct ocs_hcu_rctx));
+
+ /* Init context to 0. */
+ memzero_explicit(ctx, sizeof(*ctx));
+ /* Set engine ops. */
+ ctx->engine_ctx.op.do_one_request = kmb_ocs_hcu_do_one_request;
+}
+
+static int kmb_ocs_hcu_sha_cra_init(struct crypto_tfm *tfm)
+{
+ struct ocs_hcu_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ __cra_init(tfm, ctx);
+
+ return 0;
+}
+
+static int kmb_ocs_hcu_sm3_cra_init(struct crypto_tfm *tfm)
+{
+ struct ocs_hcu_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ __cra_init(tfm, ctx);
+
+ ctx->is_sm3_tfm = true;
+
+ return 0;
+}
+
+static int kmb_ocs_hcu_hmac_sm3_cra_init(struct crypto_tfm *tfm)
+{
+ struct ocs_hcu_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ __cra_init(tfm, ctx);
+
+ ctx->is_sm3_tfm = true;
+ ctx->is_hmac_tfm = true;
+
+ return 0;
+}
+
+static int kmb_ocs_hcu_hmac_cra_init(struct crypto_tfm *tfm)
+{
+ struct ocs_hcu_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ __cra_init(tfm, ctx);
+
+ ctx->is_hmac_tfm = true;
+
+ return 0;
+}
+
+/* Function called when 'tfm' is de-initialized. */
+static void kmb_ocs_hcu_hmac_cra_exit(struct crypto_tfm *tfm)
+{
+ struct ocs_hcu_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ /* Clear the key. */
+ memzero_explicit(ctx->key, sizeof(ctx->key));
+}
+
+static struct ahash_alg ocs_hcu_algs[] = {
+#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224
+{
+ .init = kmb_ocs_hcu_init,
+ .update = kmb_ocs_hcu_update,
+ .final = kmb_ocs_hcu_final,
+ .finup = kmb_ocs_hcu_finup,
+ .digest = kmb_ocs_hcu_digest,
+ .export = kmb_ocs_hcu_export,
+ .import = kmb_ocs_hcu_import,
+ .halg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .statesize = sizeof(struct ocs_hcu_rctx),
+ .base = {
+ .cra_name = "sha224",
+ .cra_driver_name = "sha224-keembay-ocs",
+ .cra_priority = 255,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct ocs_hcu_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = kmb_ocs_hcu_sha_cra_init,
+ }
+ }
+},
+{
+ .init = kmb_ocs_hcu_init,
+ .update = kmb_ocs_hcu_update,
+ .final = kmb_ocs_hcu_final,
+ .finup = kmb_ocs_hcu_finup,
+ .digest = kmb_ocs_hcu_digest,
+ .export = kmb_ocs_hcu_export,
+ .import = kmb_ocs_hcu_import,
+ .setkey = kmb_ocs_hcu_setkey,
+ .halg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .statesize = sizeof(struct ocs_hcu_rctx),
+ .base = {
+ .cra_name = "hmac(sha224)",
+ .cra_driver_name = "hmac-sha224-keembay-ocs",
+ .cra_priority = 255,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct ocs_hcu_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = kmb_ocs_hcu_hmac_cra_init,
+ .cra_exit = kmb_ocs_hcu_hmac_cra_exit,
+ }
+ }
+},
+#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224 */
+{
+ .init = kmb_ocs_hcu_init,
+ .update = kmb_ocs_hcu_update,
+ .final = kmb_ocs_hcu_final,
+ .finup = kmb_ocs_hcu_finup,
+ .digest = kmb_ocs_hcu_digest,
+ .export = kmb_ocs_hcu_export,
+ .import = kmb_ocs_hcu_import,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct ocs_hcu_rctx),
+ .base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "sha256-keembay-ocs",
+ .cra_priority = 255,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct ocs_hcu_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = kmb_ocs_hcu_sha_cra_init,
+ }
+ }
+},
+{
+ .init = kmb_ocs_hcu_init,
+ .update = kmb_ocs_hcu_update,
+ .final = kmb_ocs_hcu_final,
+ .finup = kmb_ocs_hcu_finup,
+ .digest = kmb_ocs_hcu_digest,
+ .export = kmb_ocs_hcu_export,
+ .import = kmb_ocs_hcu_import,
+ .setkey = kmb_ocs_hcu_setkey,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct ocs_hcu_rctx),
+ .base = {
+ .cra_name = "hmac(sha256)",
+ .cra_driver_name = "hmac-sha256-keembay-ocs",
+ .cra_priority = 255,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct ocs_hcu_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = kmb_ocs_hcu_hmac_cra_init,
+ .cra_exit = kmb_ocs_hcu_hmac_cra_exit,
+ }
+ }
+},
+{
+ .init = kmb_ocs_hcu_init,
+ .update = kmb_ocs_hcu_update,
+ .final = kmb_ocs_hcu_final,
+ .finup = kmb_ocs_hcu_finup,
+ .digest = kmb_ocs_hcu_digest,
+ .export = kmb_ocs_hcu_export,
+ .import = kmb_ocs_hcu_import,
+ .halg = {
+ .digestsize = SM3_DIGEST_SIZE,
+ .statesize = sizeof(struct ocs_hcu_rctx),
+ .base = {
+ .cra_name = "sm3",
+ .cra_driver_name = "sm3-keembay-ocs",
+ .cra_priority = 255,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SM3_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct ocs_hcu_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = kmb_ocs_hcu_sm3_cra_init,
+ }
+ }
+},
+{
+ .init = kmb_ocs_hcu_init,
+ .update = kmb_ocs_hcu_update,
+ .final = kmb_ocs_hcu_final,
+ .finup = kmb_ocs_hcu_finup,
+ .digest = kmb_ocs_hcu_digest,
+ .export = kmb_ocs_hcu_export,
+ .import = kmb_ocs_hcu_import,
+ .setkey = kmb_ocs_hcu_setkey,
+ .halg = {
+ .digestsize = SM3_DIGEST_SIZE,
+ .statesize = sizeof(struct ocs_hcu_rctx),
+ .base = {
+ .cra_name = "hmac(sm3)",
+ .cra_driver_name = "hmac-sm3-keembay-ocs",
+ .cra_priority = 255,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SM3_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct ocs_hcu_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = kmb_ocs_hcu_hmac_sm3_cra_init,
+ .cra_exit = kmb_ocs_hcu_hmac_cra_exit,
+ }
+ }
+},
+{
+ .init = kmb_ocs_hcu_init,
+ .update = kmb_ocs_hcu_update,
+ .final = kmb_ocs_hcu_final,
+ .finup = kmb_ocs_hcu_finup,
+ .digest = kmb_ocs_hcu_digest,
+ .export = kmb_ocs_hcu_export,
+ .import = kmb_ocs_hcu_import,
+ .halg = {
+ .digestsize = SHA384_DIGEST_SIZE,
+ .statesize = sizeof(struct ocs_hcu_rctx),
+ .base = {
+ .cra_name = "sha384",
+ .cra_driver_name = "sha384-keembay-ocs",
+ .cra_priority = 255,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct ocs_hcu_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = kmb_ocs_hcu_sha_cra_init,
+ }
+ }
+},
+{
+ .init = kmb_ocs_hcu_init,
+ .update = kmb_ocs_hcu_update,
+ .final = kmb_ocs_hcu_final,
+ .finup = kmb_ocs_hcu_finup,
+ .digest = kmb_ocs_hcu_digest,
+ .export = kmb_ocs_hcu_export,
+ .import = kmb_ocs_hcu_import,
+ .setkey = kmb_ocs_hcu_setkey,
+ .halg = {
+ .digestsize = SHA384_DIGEST_SIZE,
+ .statesize = sizeof(struct ocs_hcu_rctx),
+ .base = {
+ .cra_name = "hmac(sha384)",
+ .cra_driver_name = "hmac-sha384-keembay-ocs",
+ .cra_priority = 255,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct ocs_hcu_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = kmb_ocs_hcu_hmac_cra_init,
+ .cra_exit = kmb_ocs_hcu_hmac_cra_exit,
+ }
+ }
+},
+{
+ .init = kmb_ocs_hcu_init,
+ .update = kmb_ocs_hcu_update,
+ .final = kmb_ocs_hcu_final,
+ .finup = kmb_ocs_hcu_finup,
+ .digest = kmb_ocs_hcu_digest,
+ .export = kmb_ocs_hcu_export,
+ .import = kmb_ocs_hcu_import,
+ .halg = {
+ .digestsize = SHA512_DIGEST_SIZE,
+ .statesize = sizeof(struct ocs_hcu_rctx),
+ .base = {
+ .cra_name = "sha512",
+ .cra_driver_name = "sha512-keembay-ocs",
+ .cra_priority = 255,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct ocs_hcu_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = kmb_ocs_hcu_sha_cra_init,
+ }
+ }
+},
+{
+ .init = kmb_ocs_hcu_init,
+ .update = kmb_ocs_hcu_update,
+ .final = kmb_ocs_hcu_final,
+ .finup = kmb_ocs_hcu_finup,
+ .digest = kmb_ocs_hcu_digest,
+ .export = kmb_ocs_hcu_export,
+ .import = kmb_ocs_hcu_import,
+ .setkey = kmb_ocs_hcu_setkey,
+ .halg = {
+ .digestsize = SHA512_DIGEST_SIZE,
+ .statesize = sizeof(struct ocs_hcu_rctx),
+ .base = {
+ .cra_name = "hmac(sha512)",
+ .cra_driver_name = "hmac-sha512-keembay-ocs",
+ .cra_priority = 255,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct ocs_hcu_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = kmb_ocs_hcu_hmac_cra_init,
+ .cra_exit = kmb_ocs_hcu_hmac_cra_exit,
+ }
+ }
+},
+};
+
+/* Device tree driver match. */
+static const struct of_device_id kmb_ocs_hcu_of_match[] = {
+ {
+ .compatible = "intel,keembay-ocs-hcu",
+ },
+ {}
+};
+
+static int kmb_ocs_hcu_remove(struct platform_device *pdev)
+{
+ struct ocs_hcu_dev *hcu_dev;
+ int rc;
+
+ hcu_dev = platform_get_drvdata(pdev);
+ if (!hcu_dev)
+ return -ENODEV;
+
+ crypto_unregister_ahashes(ocs_hcu_algs, ARRAY_SIZE(ocs_hcu_algs));
+
+ rc = crypto_engine_exit(hcu_dev->engine);
+
+ spin_lock_bh(&ocs_hcu.lock);
+ list_del(&hcu_dev->list);
+ spin_unlock_bh(&ocs_hcu.lock);
+
+ return rc;
+}
+
+static int kmb_ocs_hcu_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ocs_hcu_dev *hcu_dev;
+ struct resource *hcu_mem;
+ int rc;
+
+ hcu_dev = devm_kzalloc(dev, sizeof(*hcu_dev), GFP_KERNEL);
+ if (!hcu_dev)
+ return -ENOMEM;
+
+ hcu_dev->dev = dev;
+
+ platform_set_drvdata(pdev, hcu_dev);
+ rc = dma_set_mask_and_coherent(&pdev->dev, OCS_HCU_DMA_BIT_MASK);
+ if (rc)
+ return rc;
+
+ /* Get the memory address and remap. */
+ hcu_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!hcu_mem) {
+ dev_err(dev, "Could not retrieve io mem resource.\n");
+ return -ENODEV;
+ }
+
+ hcu_dev->io_base = devm_ioremap_resource(dev, hcu_mem);
+ if (IS_ERR(hcu_dev->io_base)) {
+ dev_err(dev, "Could not io-remap mem resource.\n");
+ return PTR_ERR(hcu_dev->io_base);
+ }
+
+ init_completion(&hcu_dev->irq_done);
+
+ /* Get and request IRQ. */
+ hcu_dev->irq = platform_get_irq(pdev, 0);
+ if (hcu_dev->irq < 0)
+ return hcu_dev->irq;
+
+ rc = devm_request_threaded_irq(&pdev->dev, hcu_dev->irq,
+ ocs_hcu_irq_handler, NULL, 0,
+ "keembay-ocs-hcu", hcu_dev);
+ if (rc < 0) {
+ dev_err(dev, "Could not request IRQ.\n");
+ return rc;
+ }
+
+ INIT_LIST_HEAD(&hcu_dev->list);
+
+ spin_lock_bh(&ocs_hcu.lock);
+ list_add_tail(&hcu_dev->list, &ocs_hcu.dev_list);
+ spin_unlock_bh(&ocs_hcu.lock);
+
+ /* Initialize crypto engine */
+ hcu_dev->engine = crypto_engine_alloc_init(dev, 1);
+ if (!hcu_dev->engine)
+ goto list_del;
+
+ rc = crypto_engine_start(hcu_dev->engine);
+ if (rc) {
+ dev_err(dev, "Could not start engine.\n");
+ goto cleanup;
+ }
+
+ /* Security infrastructure guarantees OCS clock is enabled. */
+
+ rc = crypto_register_ahashes(ocs_hcu_algs, ARRAY_SIZE(ocs_hcu_algs));
+ if (rc) {
+ dev_err(dev, "Could not register algorithms.\n");
+ goto cleanup;
+ }
+
+ return 0;
+
+cleanup:
+ crypto_engine_exit(hcu_dev->engine);
+list_del:
+ spin_lock_bh(&ocs_hcu.lock);
+ list_del(&hcu_dev->list);
+ spin_unlock_bh(&ocs_hcu.lock);
+
+ return rc;
+}
+
+/* The OCS driver is a platform device. */
+static struct platform_driver kmb_ocs_hcu_driver = {
+ .probe = kmb_ocs_hcu_probe,
+ .remove = kmb_ocs_hcu_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = kmb_ocs_hcu_of_match,
+ },
+};
+
+module_platform_driver(kmb_ocs_hcu_driver);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/keembay/ocs-aes.c b/drivers/crypto/keembay/ocs-aes.c
index cc286adb1c4a..be9f32fc8f42 100644
--- a/drivers/crypto/keembay/ocs-aes.c
+++ b/drivers/crypto/keembay/ocs-aes.c
@@ -958,14 +958,14 @@ int ocs_aes_gcm_op(struct ocs_aes_dev *aes_dev,
ocs_aes_write_last_data_blk_len(aes_dev, src_size);
/* Write ciphertext bit length */
- bit_len = src_size * 8;
+ bit_len = (u64)src_size * 8;
val = bit_len & 0xFFFFFFFF;
iowrite32(val, aes_dev->base_reg + AES_MULTIPURPOSE2_0_OFFSET);
val = bit_len >> 32;
iowrite32(val, aes_dev->base_reg + AES_MULTIPURPOSE2_1_OFFSET);
/* Write aad bit length */
- bit_len = aad_size * 8;
+ bit_len = (u64)aad_size * 8;
val = bit_len & 0xFFFFFFFF;
iowrite32(val, aes_dev->base_reg + AES_MULTIPURPOSE2_2_OFFSET);
val = bit_len >> 32;
@@ -1080,15 +1080,15 @@ static int ocs_aes_ccm_write_b0(const struct ocs_aes_dev *aes_dev,
/*
* q is the octet length of Q.
* q can only be an element of {2, 3, 4, 5, 6, 7, 8} and is encoded as
- * q - 1 == iv[0]
+ * q - 1 == iv[0] & 0x7;
*/
b0[0] |= iv[0] & 0x7;
/*
* Copy the Nonce N from IV to B0; N is located in iv[1]..iv[15 - q]
* and must be copied to b0[1]..b0[15-q].
- * q == iv[0] + 1
+ * q == (iv[0] & 0x7) + 1
*/
- q = iv[0] + 1;
+ q = (iv[0] & 0x7) + 1;
for (i = 1; i <= 15 - q; i++)
b0[i] = iv[i];
/*
diff --git a/drivers/crypto/keembay/ocs-hcu.c b/drivers/crypto/keembay/ocs-hcu.c
new file mode 100644
index 000000000000..81eecacf603a
--- /dev/null
+++ b/drivers/crypto/keembay/ocs-hcu.c
@@ -0,0 +1,840 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel Keem Bay OCS HCU Crypto Driver.
+ *
+ * Copyright (C) 2018-2020 Intel Corporation
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/iopoll.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+
+#include <crypto/sha2.h>
+
+#include "ocs-hcu.h"
+
+/* Registers. */
+#define OCS_HCU_MODE 0x00
+#define OCS_HCU_CHAIN 0x04
+#define OCS_HCU_OPERATION 0x08
+#define OCS_HCU_KEY_0 0x0C
+#define OCS_HCU_ISR 0x50
+#define OCS_HCU_IER 0x54
+#define OCS_HCU_STATUS 0x58
+#define OCS_HCU_MSG_LEN_LO 0x60
+#define OCS_HCU_MSG_LEN_HI 0x64
+#define OCS_HCU_KEY_BYTE_ORDER_CFG 0x80
+#define OCS_HCU_DMA_SRC_ADDR 0x400
+#define OCS_HCU_DMA_SRC_SIZE 0x408
+#define OCS_HCU_DMA_DST_SIZE 0x40C
+#define OCS_HCU_DMA_DMA_MODE 0x410
+#define OCS_HCU_DMA_NEXT_SRC_DESCR 0x418
+#define OCS_HCU_DMA_MSI_ISR 0x480
+#define OCS_HCU_DMA_MSI_IER 0x484
+#define OCS_HCU_DMA_MSI_MASK 0x488
+
+/* Register bit definitions. */
+#define HCU_MODE_ALGO_SHIFT 16
+#define HCU_MODE_HMAC_SHIFT 22
+
+#define HCU_STATUS_BUSY BIT(0)
+
+#define HCU_BYTE_ORDER_SWAP BIT(0)
+
+#define HCU_IRQ_HASH_DONE BIT(2)
+#define HCU_IRQ_HASH_ERR_MASK (BIT(3) | BIT(1) | BIT(0))
+
+#define HCU_DMA_IRQ_SRC_DONE BIT(0)
+#define HCU_DMA_IRQ_SAI_ERR BIT(2)
+#define HCU_DMA_IRQ_BAD_COMP_ERR BIT(3)
+#define HCU_DMA_IRQ_INBUF_RD_ERR BIT(4)
+#define HCU_DMA_IRQ_INBUF_WD_ERR BIT(5)
+#define HCU_DMA_IRQ_OUTBUF_WR_ERR BIT(6)
+#define HCU_DMA_IRQ_OUTBUF_RD_ERR BIT(7)
+#define HCU_DMA_IRQ_CRD_ERR BIT(8)
+#define HCU_DMA_IRQ_ERR_MASK (HCU_DMA_IRQ_SAI_ERR | \
+ HCU_DMA_IRQ_BAD_COMP_ERR | \
+ HCU_DMA_IRQ_INBUF_RD_ERR | \
+ HCU_DMA_IRQ_INBUF_WD_ERR | \
+ HCU_DMA_IRQ_OUTBUF_WR_ERR | \
+ HCU_DMA_IRQ_OUTBUF_RD_ERR | \
+ HCU_DMA_IRQ_CRD_ERR)
+
+#define HCU_DMA_SNOOP_MASK (0x7 << 28)
+#define HCU_DMA_SRC_LL_EN BIT(25)
+#define HCU_DMA_EN BIT(31)
+
+#define OCS_HCU_ENDIANNESS_VALUE 0x2A
+
+#define HCU_DMA_MSI_UNMASK BIT(0)
+#define HCU_DMA_MSI_DISABLE 0
+#define HCU_IRQ_DISABLE 0
+
+#define OCS_HCU_START BIT(0)
+#define OCS_HCU_TERMINATE BIT(1)
+
+#define OCS_LL_DMA_FLAG_TERMINATE BIT(31)
+
+#define OCS_HCU_HW_KEY_LEN_U32 (OCS_HCU_HW_KEY_LEN / sizeof(u32))
+
+#define HCU_DATA_WRITE_ENDIANNESS_OFFSET 26
+
+#define OCS_HCU_NUM_CHAINS_SHA256_224_SM3 (SHA256_DIGEST_SIZE / sizeof(u32))
+#define OCS_HCU_NUM_CHAINS_SHA384_512 (SHA512_DIGEST_SIZE / sizeof(u32))
+
+/*
+ * While polling on a busy HCU, wait maximum 200us between one check and the
+ * other.
+ */
+#define OCS_HCU_WAIT_BUSY_RETRY_DELAY_US 200
+/* Wait on a busy HCU for maximum 1 second. */
+#define OCS_HCU_WAIT_BUSY_TIMEOUT_US 1000000
+
+/**
+ * struct ocs_hcu_dma_list - An entry in an OCS DMA linked list.
+ * @src_addr: Source address of the data.
+ * @src_len: Length of data to be fetched.
+ * @nxt_desc: Next descriptor to fetch.
+ * @ll_flags: Flags (Freeze @ terminate) for the DMA engine.
+ */
+struct ocs_hcu_dma_entry {
+ u32 src_addr;
+ u32 src_len;
+ u32 nxt_desc;
+ u32 ll_flags;
+};
+
+/**
+ * struct ocs_dma_list - OCS-specific DMA linked list.
+ * @head: The head of the list (points to the array backing the list).
+ * @tail: The current tail of the list; NULL if the list is empty.
+ * @dma_addr: The DMA address of @head (i.e., the DMA address of the backing
+ * array).
+ * @max_nents: Maximum number of entries in the list (i.e., number of elements
+ * in the backing array).
+ *
+ * The OCS DMA list is an array-backed list of OCS DMA descriptors. The array
+ * backing the list is allocated with dma_alloc_coherent() and pointed by
+ * @head.
+ */
+struct ocs_hcu_dma_list {
+ struct ocs_hcu_dma_entry *head;
+ struct ocs_hcu_dma_entry *tail;
+ dma_addr_t dma_addr;
+ size_t max_nents;
+};
+
+static inline u32 ocs_hcu_num_chains(enum ocs_hcu_algo algo)
+{
+ switch (algo) {
+ case OCS_HCU_ALGO_SHA224:
+ case OCS_HCU_ALGO_SHA256:
+ case OCS_HCU_ALGO_SM3:
+ return OCS_HCU_NUM_CHAINS_SHA256_224_SM3;
+ case OCS_HCU_ALGO_SHA384:
+ case OCS_HCU_ALGO_SHA512:
+ return OCS_HCU_NUM_CHAINS_SHA384_512;
+ default:
+ return 0;
+ };
+}
+
+static inline u32 ocs_hcu_digest_size(enum ocs_hcu_algo algo)
+{
+ switch (algo) {
+ case OCS_HCU_ALGO_SHA224:
+ return SHA224_DIGEST_SIZE;
+ case OCS_HCU_ALGO_SHA256:
+ case OCS_HCU_ALGO_SM3:
+ /* SM3 shares the same block size. */
+ return SHA256_DIGEST_SIZE;
+ case OCS_HCU_ALGO_SHA384:
+ return SHA384_DIGEST_SIZE;
+ case OCS_HCU_ALGO_SHA512:
+ return SHA512_DIGEST_SIZE;
+ default:
+ return 0;
+ }
+}
+
+/**
+ * ocs_hcu_wait_busy() - Wait for HCU OCS hardware to became usable.
+ * @hcu_dev: OCS HCU device to wait for.
+ *
+ * Return: 0 if device free, -ETIMEOUT if device busy and internal timeout has
+ * expired.
+ */
+static int ocs_hcu_wait_busy(struct ocs_hcu_dev *hcu_dev)
+{
+ long val;
+
+ return readl_poll_timeout(hcu_dev->io_base + OCS_HCU_STATUS, val,
+ !(val & HCU_STATUS_BUSY),
+ OCS_HCU_WAIT_BUSY_RETRY_DELAY_US,
+ OCS_HCU_WAIT_BUSY_TIMEOUT_US);
+}
+
+static void ocs_hcu_done_irq_en(struct ocs_hcu_dev *hcu_dev)
+{
+ /* Clear any pending interrupts. */
+ writel(0xFFFFFFFF, hcu_dev->io_base + OCS_HCU_ISR);
+ hcu_dev->irq_err = false;
+ /* Enable error and HCU done interrupts. */
+ writel(HCU_IRQ_HASH_DONE | HCU_IRQ_HASH_ERR_MASK,
+ hcu_dev->io_base + OCS_HCU_IER);
+}
+
+static void ocs_hcu_dma_irq_en(struct ocs_hcu_dev *hcu_dev)
+{
+ /* Clear any pending interrupts. */
+ writel(0xFFFFFFFF, hcu_dev->io_base + OCS_HCU_DMA_MSI_ISR);
+ hcu_dev->irq_err = false;
+ /* Only operating on DMA source completion and error interrupts. */
+ writel(HCU_DMA_IRQ_ERR_MASK | HCU_DMA_IRQ_SRC_DONE,
+ hcu_dev->io_base + OCS_HCU_DMA_MSI_IER);
+ /* Unmask */
+ writel(HCU_DMA_MSI_UNMASK, hcu_dev->io_base + OCS_HCU_DMA_MSI_MASK);
+}
+
+static void ocs_hcu_irq_dis(struct ocs_hcu_dev *hcu_dev)
+{
+ writel(HCU_IRQ_DISABLE, hcu_dev->io_base + OCS_HCU_IER);
+ writel(HCU_DMA_MSI_DISABLE, hcu_dev->io_base + OCS_HCU_DMA_MSI_IER);
+}
+
+static int ocs_hcu_wait_and_disable_irq(struct ocs_hcu_dev *hcu_dev)
+{
+ int rc;
+
+ rc = wait_for_completion_interruptible(&hcu_dev->irq_done);
+ if (rc)
+ goto exit;
+
+ if (hcu_dev->irq_err) {
+ /* Unset flag and return error. */
+ hcu_dev->irq_err = false;
+ rc = -EIO;
+ goto exit;
+ }
+
+exit:
+ ocs_hcu_irq_dis(hcu_dev);
+
+ return rc;
+}
+
+/**
+ * ocs_hcu_get_intermediate_data() - Get intermediate data.
+ * @hcu_dev: The target HCU device.
+ * @data: Where to store the intermediate.
+ * @algo: The algorithm being used.
+ *
+ * This function is used to save the current hashing process state in order to
+ * continue it in the future.
+ *
+ * Note: once all data has been processed, the intermediate data actually
+ * contains the hashing result. So this function is also used to retrieve the
+ * final result of a hashing process.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+static int ocs_hcu_get_intermediate_data(struct ocs_hcu_dev *hcu_dev,
+ struct ocs_hcu_idata *data,
+ enum ocs_hcu_algo algo)
+{
+ const int n = ocs_hcu_num_chains(algo);
+ u32 *chain;
+ int rc;
+ int i;
+
+ /* Data not requested. */
+ if (!data)
+ return -EINVAL;
+
+ chain = (u32 *)data->digest;
+
+ /* Ensure that the OCS is no longer busy before reading the chains. */
+ rc = ocs_hcu_wait_busy(hcu_dev);
+ if (rc)
+ return rc;
+
+ /*
+ * This loops is safe because data->digest is an array of
+ * SHA512_DIGEST_SIZE bytes and the maximum value returned by
+ * ocs_hcu_num_chains() is OCS_HCU_NUM_CHAINS_SHA384_512 which is equal
+ * to SHA512_DIGEST_SIZE / sizeof(u32).
+ */
+ for (i = 0; i < n; i++)
+ chain[i] = readl(hcu_dev->io_base + OCS_HCU_CHAIN);
+
+ data->msg_len_lo = readl(hcu_dev->io_base + OCS_HCU_MSG_LEN_LO);
+ data->msg_len_hi = readl(hcu_dev->io_base + OCS_HCU_MSG_LEN_HI);
+
+ return 0;
+}
+
+/**
+ * ocs_hcu_set_intermediate_data() - Set intermediate data.
+ * @hcu_dev: The target HCU device.
+ * @data: The intermediate data to be set.
+ * @algo: The algorithm being used.
+ *
+ * This function is used to continue a previous hashing process.
+ */
+static void ocs_hcu_set_intermediate_data(struct ocs_hcu_dev *hcu_dev,
+ const struct ocs_hcu_idata *data,
+ enum ocs_hcu_algo algo)
+{
+ const int n = ocs_hcu_num_chains(algo);
+ u32 *chain = (u32 *)data->digest;
+ int i;
+
+ /*
+ * This loops is safe because data->digest is an array of
+ * SHA512_DIGEST_SIZE bytes and the maximum value returned by
+ * ocs_hcu_num_chains() is OCS_HCU_NUM_CHAINS_SHA384_512 which is equal
+ * to SHA512_DIGEST_SIZE / sizeof(u32).
+ */
+ for (i = 0; i < n; i++)
+ writel(chain[i], hcu_dev->io_base + OCS_HCU_CHAIN);
+
+ writel(data->msg_len_lo, hcu_dev->io_base + OCS_HCU_MSG_LEN_LO);
+ writel(data->msg_len_hi, hcu_dev->io_base + OCS_HCU_MSG_LEN_HI);
+}
+
+static int ocs_hcu_get_digest(struct ocs_hcu_dev *hcu_dev,
+ enum ocs_hcu_algo algo, u8 *dgst, size_t dgst_len)
+{
+ u32 *chain;
+ int rc;
+ int i;
+
+ if (!dgst)
+ return -EINVAL;
+
+ /* Length of the output buffer must match the algo digest size. */
+ if (dgst_len != ocs_hcu_digest_size(algo))
+ return -EINVAL;
+
+ /* Ensure that the OCS is no longer busy before reading the chains. */
+ rc = ocs_hcu_wait_busy(hcu_dev);
+ if (rc)
+ return rc;
+
+ chain = (u32 *)dgst;
+ for (i = 0; i < dgst_len / sizeof(u32); i++)
+ chain[i] = readl(hcu_dev->io_base + OCS_HCU_CHAIN);
+
+ return 0;
+}
+
+/**
+ * ocs_hcu_hw_cfg() - Configure the HCU hardware.
+ * @hcu_dev: The HCU device to configure.
+ * @algo: The algorithm to be used by the HCU device.
+ * @use_hmac: Whether or not HW HMAC should be used.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+static int ocs_hcu_hw_cfg(struct ocs_hcu_dev *hcu_dev, enum ocs_hcu_algo algo,
+ bool use_hmac)
+{
+ u32 cfg;
+ int rc;
+
+ if (algo != OCS_HCU_ALGO_SHA256 && algo != OCS_HCU_ALGO_SHA224 &&
+ algo != OCS_HCU_ALGO_SHA384 && algo != OCS_HCU_ALGO_SHA512 &&
+ algo != OCS_HCU_ALGO_SM3)
+ return -EINVAL;
+
+ rc = ocs_hcu_wait_busy(hcu_dev);
+ if (rc)
+ return rc;
+
+ /* Ensure interrupts are disabled. */
+ ocs_hcu_irq_dis(hcu_dev);
+
+ /* Configure endianness, hashing algorithm and HW HMAC (if needed) */
+ cfg = OCS_HCU_ENDIANNESS_VALUE << HCU_DATA_WRITE_ENDIANNESS_OFFSET;
+ cfg |= algo << HCU_MODE_ALGO_SHIFT;
+ if (use_hmac)
+ cfg |= BIT(HCU_MODE_HMAC_SHIFT);
+
+ writel(cfg, hcu_dev->io_base + OCS_HCU_MODE);
+
+ return 0;
+}
+
+/**
+ * ocs_hcu_clear_key() - Clear key stored in OCS HMAC KEY registers.
+ * @hcu_dev: The OCS HCU device whose key registers should be cleared.
+ */
+static void ocs_hcu_clear_key(struct ocs_hcu_dev *hcu_dev)
+{
+ int reg_off;
+
+ /* Clear OCS_HCU_KEY_[0..15] */
+ for (reg_off = 0; reg_off < OCS_HCU_HW_KEY_LEN; reg_off += sizeof(u32))
+ writel(0, hcu_dev->io_base + OCS_HCU_KEY_0 + reg_off);
+}
+
+/**
+ * ocs_hcu_write_key() - Write key to OCS HMAC KEY registers.
+ * @hcu_dev: The OCS HCU device the key should be written to.
+ * @key: The key to be written.
+ * @len: The size of the key to write. It must be OCS_HCU_HW_KEY_LEN.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+static int ocs_hcu_write_key(struct ocs_hcu_dev *hcu_dev, const u8 *key, size_t len)
+{
+ u32 key_u32[OCS_HCU_HW_KEY_LEN_U32];
+ int i;
+
+ if (len > OCS_HCU_HW_KEY_LEN)
+ return -EINVAL;
+
+ /* Copy key into temporary u32 array. */
+ memcpy(key_u32, key, len);
+
+ /*
+ * Hardware requires all the bytes of the HW Key vector to be
+ * written. So pad with zero until we reach OCS_HCU_HW_KEY_LEN.
+ */
+ memzero_explicit((u8 *)key_u32 + len, OCS_HCU_HW_KEY_LEN - len);
+
+ /*
+ * OCS hardware expects the MSB of the key to be written at the highest
+ * address of the HCU Key vector; in other word, the key must be
+ * written in reverse order.
+ *
+ * Therefore, we first enable byte swapping for the HCU key vector;
+ * so that bytes of 32-bit word written to OCS_HCU_KEY_[0..15] will be
+ * swapped:
+ * 3 <---> 0, 2 <---> 1.
+ */
+ writel(HCU_BYTE_ORDER_SWAP,
+ hcu_dev->io_base + OCS_HCU_KEY_BYTE_ORDER_CFG);
+ /*
+ * And then we write the 32-bit words composing the key starting from
+ * the end of the key.
+ */
+ for (i = 0; i < OCS_HCU_HW_KEY_LEN_U32; i++)
+ writel(key_u32[OCS_HCU_HW_KEY_LEN_U32 - 1 - i],
+ hcu_dev->io_base + OCS_HCU_KEY_0 + (sizeof(u32) * i));
+
+ memzero_explicit(key_u32, OCS_HCU_HW_KEY_LEN);
+
+ return 0;
+}
+
+/**
+ * ocs_hcu_ll_dma_start() - Start OCS HCU hashing via DMA
+ * @hcu_dev: The OCS HCU device to use.
+ * @dma_list: The OCS DMA list mapping the data to hash.
+ * @finalize: Whether or not this is the last hashing operation and therefore
+ * the final hash should be compute even if data is not
+ * block-aligned.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+static int ocs_hcu_ll_dma_start(struct ocs_hcu_dev *hcu_dev,
+ const struct ocs_hcu_dma_list *dma_list,
+ bool finalize)
+{
+ u32 cfg = HCU_DMA_SNOOP_MASK | HCU_DMA_SRC_LL_EN | HCU_DMA_EN;
+ int rc;
+
+ if (!dma_list)
+ return -EINVAL;
+
+ /*
+ * For final requests we use HCU_DONE IRQ to be notified when all input
+ * data has been processed by the HCU; however, we cannot do so for
+ * non-final requests, because we don't get a HCU_DONE IRQ when we
+ * don't terminate the operation.
+ *
+ * Therefore, for non-final requests, we use the DMA IRQ, which
+ * triggers when DMA has finishing feeding all the input data to the
+ * HCU, but the HCU may still be processing it. This is fine, since we
+ * will wait for the HCU processing to be completed when we try to read
+ * intermediate results, in ocs_hcu_get_intermediate_data().
+ */
+ if (finalize)
+ ocs_hcu_done_irq_en(hcu_dev);
+ else
+ ocs_hcu_dma_irq_en(hcu_dev);
+
+ reinit_completion(&hcu_dev->irq_done);
+ writel(dma_list->dma_addr, hcu_dev->io_base + OCS_HCU_DMA_NEXT_SRC_DESCR);
+ writel(0, hcu_dev->io_base + OCS_HCU_DMA_SRC_SIZE);
+ writel(0, hcu_dev->io_base + OCS_HCU_DMA_DST_SIZE);
+
+ writel(OCS_HCU_START, hcu_dev->io_base + OCS_HCU_OPERATION);
+
+ writel(cfg, hcu_dev->io_base + OCS_HCU_DMA_DMA_MODE);
+
+ if (finalize)
+ writel(OCS_HCU_TERMINATE, hcu_dev->io_base + OCS_HCU_OPERATION);
+
+ rc = ocs_hcu_wait_and_disable_irq(hcu_dev);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+struct ocs_hcu_dma_list *ocs_hcu_dma_list_alloc(struct ocs_hcu_dev *hcu_dev,
+ int max_nents)
+{
+ struct ocs_hcu_dma_list *dma_list;
+
+ dma_list = kmalloc(sizeof(*dma_list), GFP_KERNEL);
+ if (!dma_list)
+ return NULL;
+
+ /* Total size of the DMA list to allocate. */
+ dma_list->head = dma_alloc_coherent(hcu_dev->dev,
+ sizeof(*dma_list->head) * max_nents,
+ &dma_list->dma_addr, GFP_KERNEL);
+ if (!dma_list->head) {
+ kfree(dma_list);
+ return NULL;
+ }
+ dma_list->max_nents = max_nents;
+ dma_list->tail = NULL;
+
+ return dma_list;
+}
+
+void ocs_hcu_dma_list_free(struct ocs_hcu_dev *hcu_dev,
+ struct ocs_hcu_dma_list *dma_list)
+{
+ if (!dma_list)
+ return;
+
+ dma_free_coherent(hcu_dev->dev,
+ sizeof(*dma_list->head) * dma_list->max_nents,
+ dma_list->head, dma_list->dma_addr);
+
+ kfree(dma_list);
+}
+
+/* Add a new DMA entry at the end of the OCS DMA list. */
+int ocs_hcu_dma_list_add_tail(struct ocs_hcu_dev *hcu_dev,
+ struct ocs_hcu_dma_list *dma_list,
+ dma_addr_t addr, u32 len)
+{
+ struct device *dev = hcu_dev->dev;
+ struct ocs_hcu_dma_entry *old_tail;
+ struct ocs_hcu_dma_entry *new_tail;
+
+ if (!len)
+ return 0;
+
+ if (!dma_list)
+ return -EINVAL;
+
+ if (addr & ~OCS_HCU_DMA_BIT_MASK) {
+ dev_err(dev,
+ "Unexpected error: Invalid DMA address for OCS HCU\n");
+ return -EINVAL;
+ }
+
+ old_tail = dma_list->tail;
+ new_tail = old_tail ? old_tail + 1 : dma_list->head;
+
+ /* Check if list is full. */
+ if (new_tail - dma_list->head >= dma_list->max_nents)
+ return -ENOMEM;
+
+ /*
+ * If there was an old tail (i.e., this is not the first element we are
+ * adding), un-terminate the old tail and make it point to the new one.
+ */
+ if (old_tail) {
+ old_tail->ll_flags &= ~OCS_LL_DMA_FLAG_TERMINATE;
+ /*
+ * The old tail 'nxt_desc' must point to the DMA address of the
+ * new tail.
+ */
+ old_tail->nxt_desc = dma_list->dma_addr +
+ sizeof(*dma_list->tail) * (new_tail -
+ dma_list->head);
+ }
+
+ new_tail->src_addr = (u32)addr;
+ new_tail->src_len = (u32)len;
+ new_tail->ll_flags = OCS_LL_DMA_FLAG_TERMINATE;
+ new_tail->nxt_desc = 0;
+
+ /* Update list tail with new tail. */
+ dma_list->tail = new_tail;
+
+ return 0;
+}
+
+/**
+ * ocs_hcu_hash_init() - Initialize hash operation context.
+ * @ctx: The context to initialize.
+ * @algo: The hashing algorithm to use.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int ocs_hcu_hash_init(struct ocs_hcu_hash_ctx *ctx, enum ocs_hcu_algo algo)
+{
+ if (!ctx)
+ return -EINVAL;
+
+ ctx->algo = algo;
+ ctx->idata.msg_len_lo = 0;
+ ctx->idata.msg_len_hi = 0;
+ /* No need to set idata.digest to 0. */
+
+ return 0;
+}
+
+/**
+ * ocs_hcu_digest() - Perform a hashing iteration.
+ * @hcu_dev: The OCS HCU device to use.
+ * @ctx: The OCS HCU hashing context.
+ * @dma_list: The OCS DMA list mapping the input data to process.
+ *
+ * Return: 0 on success; negative error code otherwise.
+ */
+int ocs_hcu_hash_update(struct ocs_hcu_dev *hcu_dev,
+ struct ocs_hcu_hash_ctx *ctx,
+ const struct ocs_hcu_dma_list *dma_list)
+{
+ int rc;
+
+ if (!hcu_dev || !ctx)
+ return -EINVAL;
+
+ /* Configure the hardware for the current request. */
+ rc = ocs_hcu_hw_cfg(hcu_dev, ctx->algo, false);
+ if (rc)
+ return rc;
+
+ /* If we already processed some data, idata needs to be set. */
+ if (ctx->idata.msg_len_lo || ctx->idata.msg_len_hi)
+ ocs_hcu_set_intermediate_data(hcu_dev, &ctx->idata, ctx->algo);
+
+ /* Start linked-list DMA hashing. */
+ rc = ocs_hcu_ll_dma_start(hcu_dev, dma_list, false);
+ if (rc)
+ return rc;
+
+ /* Update idata and return. */
+ return ocs_hcu_get_intermediate_data(hcu_dev, &ctx->idata, ctx->algo);
+}
+
+/**
+ * ocs_hcu_hash_final() - Update and finalize hash computation.
+ * @hcu_dev: The OCS HCU device to use.
+ * @ctx: The OCS HCU hashing context.
+ * @dma_list: The OCS DMA list mapping the input data to process.
+ * @dgst: The buffer where to save the computed digest.
+ * @dgst_len: The length of @dgst.
+ *
+ * Return: 0 on success; negative error code otherwise.
+ */
+int ocs_hcu_hash_finup(struct ocs_hcu_dev *hcu_dev,
+ const struct ocs_hcu_hash_ctx *ctx,
+ const struct ocs_hcu_dma_list *dma_list,
+ u8 *dgst, size_t dgst_len)
+{
+ int rc;
+
+ if (!hcu_dev || !ctx)
+ return -EINVAL;
+
+ /* Configure the hardware for the current request. */
+ rc = ocs_hcu_hw_cfg(hcu_dev, ctx->algo, false);
+ if (rc)
+ return rc;
+
+ /* If we already processed some data, idata needs to be set. */
+ if (ctx->idata.msg_len_lo || ctx->idata.msg_len_hi)
+ ocs_hcu_set_intermediate_data(hcu_dev, &ctx->idata, ctx->algo);
+
+ /* Start linked-list DMA hashing. */
+ rc = ocs_hcu_ll_dma_start(hcu_dev, dma_list, true);
+ if (rc)
+ return rc;
+
+ /* Get digest and return. */
+ return ocs_hcu_get_digest(hcu_dev, ctx->algo, dgst, dgst_len);
+}
+
+/**
+ * ocs_hcu_hash_final() - Finalize hash computation.
+ * @hcu_dev: The OCS HCU device to use.
+ * @ctx: The OCS HCU hashing context.
+ * @dgst: The buffer where to save the computed digest.
+ * @dgst_len: The length of @dgst.
+ *
+ * Return: 0 on success; negative error code otherwise.
+ */
+int ocs_hcu_hash_final(struct ocs_hcu_dev *hcu_dev,
+ const struct ocs_hcu_hash_ctx *ctx, u8 *dgst,
+ size_t dgst_len)
+{
+ int rc;
+
+ if (!hcu_dev || !ctx)
+ return -EINVAL;
+
+ /* Configure the hardware for the current request. */
+ rc = ocs_hcu_hw_cfg(hcu_dev, ctx->algo, false);
+ if (rc)
+ return rc;
+
+ /* If we already processed some data, idata needs to be set. */
+ if (ctx->idata.msg_len_lo || ctx->idata.msg_len_hi)
+ ocs_hcu_set_intermediate_data(hcu_dev, &ctx->idata, ctx->algo);
+
+ /*
+ * Enable HCU interrupts, so that HCU_DONE will be triggered once the
+ * final hash is computed.
+ */
+ ocs_hcu_done_irq_en(hcu_dev);
+ reinit_completion(&hcu_dev->irq_done);
+ writel(OCS_HCU_TERMINATE, hcu_dev->io_base + OCS_HCU_OPERATION);
+
+ rc = ocs_hcu_wait_and_disable_irq(hcu_dev);
+ if (rc)
+ return rc;
+
+ /* Get digest and return. */
+ return ocs_hcu_get_digest(hcu_dev, ctx->algo, dgst, dgst_len);
+}
+
+/**
+ * ocs_hcu_digest() - Compute hash digest.
+ * @hcu_dev: The OCS HCU device to use.
+ * @algo: The hash algorithm to use.
+ * @data: The input data to process.
+ * @data_len: The length of @data.
+ * @dgst: The buffer where to save the computed digest.
+ * @dgst_len: The length of @dgst.
+ *
+ * Return: 0 on success; negative error code otherwise.
+ */
+int ocs_hcu_digest(struct ocs_hcu_dev *hcu_dev, enum ocs_hcu_algo algo,
+ void *data, size_t data_len, u8 *dgst, size_t dgst_len)
+{
+ struct device *dev = hcu_dev->dev;
+ dma_addr_t dma_handle;
+ u32 reg;
+ int rc;
+
+ /* Configure the hardware for the current request. */
+ rc = ocs_hcu_hw_cfg(hcu_dev, algo, false);
+ if (rc)
+ return rc;
+
+ dma_handle = dma_map_single(dev, data, data_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma_handle))
+ return -EIO;
+
+ reg = HCU_DMA_SNOOP_MASK | HCU_DMA_EN;
+
+ ocs_hcu_done_irq_en(hcu_dev);
+
+ reinit_completion(&hcu_dev->irq_done);
+
+ writel(dma_handle, hcu_dev->io_base + OCS_HCU_DMA_SRC_ADDR);
+ writel(data_len, hcu_dev->io_base + OCS_HCU_DMA_SRC_SIZE);
+ writel(OCS_HCU_START, hcu_dev->io_base + OCS_HCU_OPERATION);
+ writel(reg, hcu_dev->io_base + OCS_HCU_DMA_DMA_MODE);
+
+ writel(OCS_HCU_TERMINATE, hcu_dev->io_base + OCS_HCU_OPERATION);
+
+ rc = ocs_hcu_wait_and_disable_irq(hcu_dev);
+ if (rc)
+ return rc;
+
+ dma_unmap_single(dev, dma_handle, data_len, DMA_TO_DEVICE);
+
+ return ocs_hcu_get_digest(hcu_dev, algo, dgst, dgst_len);
+}
+
+/**
+ * ocs_hcu_hmac() - Compute HMAC.
+ * @hcu_dev: The OCS HCU device to use.
+ * @algo: The hash algorithm to use with HMAC.
+ * @key: The key to use.
+ * @dma_list: The OCS DMA list mapping the input data to process.
+ * @key_len: The length of @key.
+ * @dgst: The buffer where to save the computed HMAC.
+ * @dgst_len: The length of @dgst.
+ *
+ * Return: 0 on success; negative error code otherwise.
+ */
+int ocs_hcu_hmac(struct ocs_hcu_dev *hcu_dev, enum ocs_hcu_algo algo,
+ const u8 *key, size_t key_len,
+ const struct ocs_hcu_dma_list *dma_list,
+ u8 *dgst, size_t dgst_len)
+{
+ int rc;
+
+ /* Ensure 'key' is not NULL. */
+ if (!key || key_len == 0)
+ return -EINVAL;
+
+ /* Configure the hardware for the current request. */
+ rc = ocs_hcu_hw_cfg(hcu_dev, algo, true);
+ if (rc)
+ return rc;
+
+ rc = ocs_hcu_write_key(hcu_dev, key, key_len);
+ if (rc)
+ return rc;
+
+ rc = ocs_hcu_ll_dma_start(hcu_dev, dma_list, true);
+
+ /* Clear HW key before processing return code. */
+ ocs_hcu_clear_key(hcu_dev);
+
+ if (rc)
+ return rc;
+
+ return ocs_hcu_get_digest(hcu_dev, algo, dgst, dgst_len);
+}
+
+irqreturn_t ocs_hcu_irq_handler(int irq, void *dev_id)
+{
+ struct ocs_hcu_dev *hcu_dev = dev_id;
+ u32 hcu_irq;
+ u32 dma_irq;
+
+ /* Read and clear the HCU interrupt. */
+ hcu_irq = readl(hcu_dev->io_base + OCS_HCU_ISR);
+ writel(hcu_irq, hcu_dev->io_base + OCS_HCU_ISR);
+
+ /* Read and clear the HCU DMA interrupt. */
+ dma_irq = readl(hcu_dev->io_base + OCS_HCU_DMA_MSI_ISR);
+ writel(dma_irq, hcu_dev->io_base + OCS_HCU_DMA_MSI_ISR);
+
+ /* Check for errors. */
+ if (hcu_irq & HCU_IRQ_HASH_ERR_MASK || dma_irq & HCU_DMA_IRQ_ERR_MASK) {
+ hcu_dev->irq_err = true;
+ goto complete;
+ }
+
+ /* Check for DONE IRQs. */
+ if (hcu_irq & HCU_IRQ_HASH_DONE || dma_irq & HCU_DMA_IRQ_SRC_DONE)
+ goto complete;
+
+ return IRQ_NONE;
+
+complete:
+ complete(&hcu_dev->irq_done);
+
+ return IRQ_HANDLED;
+}
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/keembay/ocs-hcu.h b/drivers/crypto/keembay/ocs-hcu.h
new file mode 100644
index 000000000000..fbbbb92a0592
--- /dev/null
+++ b/drivers/crypto/keembay/ocs-hcu.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Intel Keem Bay OCS HCU Crypto Driver.
+ *
+ * Copyright (C) 2018-2020 Intel Corporation
+ */
+
+#include <linux/dma-mapping.h>
+
+#ifndef _CRYPTO_OCS_HCU_H
+#define _CRYPTO_OCS_HCU_H
+
+#define OCS_HCU_DMA_BIT_MASK DMA_BIT_MASK(32)
+
+#define OCS_HCU_HW_KEY_LEN 64
+
+struct ocs_hcu_dma_list;
+
+enum ocs_hcu_algo {
+ OCS_HCU_ALGO_SHA256 = 2,
+ OCS_HCU_ALGO_SHA224 = 3,
+ OCS_HCU_ALGO_SHA384 = 4,
+ OCS_HCU_ALGO_SHA512 = 5,
+ OCS_HCU_ALGO_SM3 = 6,
+};
+
+/**
+ * struct ocs_hcu_dev - OCS HCU device context.
+ * @list: List of device contexts.
+ * @dev: OCS HCU device.
+ * @io_base: Base address of OCS HCU registers.
+ * @engine: Crypto engine for the device.
+ * @irq: IRQ number.
+ * @irq_done: Completion for IRQ.
+ * @irq_err: Flag indicating an IRQ error has happened.
+ */
+struct ocs_hcu_dev {
+ struct list_head list;
+ struct device *dev;
+ void __iomem *io_base;
+ struct crypto_engine *engine;
+ int irq;
+ struct completion irq_done;
+ bool irq_err;
+};
+
+/**
+ * struct ocs_hcu_idata - Intermediate data generated by the HCU.
+ * @msg_len_lo: Length of data the HCU has operated on in bits, low 32b.
+ * @msg_len_hi: Length of data the HCU has operated on in bits, high 32b.
+ * @digest: The digest read from the HCU. If the HCU is terminated, it will
+ * contain the actual hash digest. Otherwise it is the intermediate
+ * state.
+ */
+struct ocs_hcu_idata {
+ u32 msg_len_lo;
+ u32 msg_len_hi;
+ u8 digest[SHA512_DIGEST_SIZE];
+};
+
+/**
+ * struct ocs_hcu_hash_ctx - Context for OCS HCU hashing operation.
+ * @algo: The hashing algorithm being used.
+ * @idata: The current intermediate data.
+ */
+struct ocs_hcu_hash_ctx {
+ enum ocs_hcu_algo algo;
+ struct ocs_hcu_idata idata;
+};
+
+irqreturn_t ocs_hcu_irq_handler(int irq, void *dev_id);
+
+struct ocs_hcu_dma_list *ocs_hcu_dma_list_alloc(struct ocs_hcu_dev *hcu_dev,
+ int max_nents);
+
+void ocs_hcu_dma_list_free(struct ocs_hcu_dev *hcu_dev,
+ struct ocs_hcu_dma_list *dma_list);
+
+int ocs_hcu_dma_list_add_tail(struct ocs_hcu_dev *hcu_dev,
+ struct ocs_hcu_dma_list *dma_list,
+ dma_addr_t addr, u32 len);
+
+int ocs_hcu_hash_init(struct ocs_hcu_hash_ctx *ctx, enum ocs_hcu_algo algo);
+
+int ocs_hcu_hash_update(struct ocs_hcu_dev *hcu_dev,
+ struct ocs_hcu_hash_ctx *ctx,
+ const struct ocs_hcu_dma_list *dma_list);
+
+int ocs_hcu_hash_finup(struct ocs_hcu_dev *hcu_dev,
+ const struct ocs_hcu_hash_ctx *ctx,
+ const struct ocs_hcu_dma_list *dma_list,
+ u8 *dgst, size_t dgst_len);
+
+int ocs_hcu_hash_final(struct ocs_hcu_dev *hcu_dev,
+ const struct ocs_hcu_hash_ctx *ctx, u8 *dgst,
+ size_t dgst_len);
+
+int ocs_hcu_digest(struct ocs_hcu_dev *hcu_dev, enum ocs_hcu_algo algo,
+ void *data, size_t data_len, u8 *dgst, size_t dgst_len);
+
+int ocs_hcu_hmac(struct ocs_hcu_dev *hcu_dev, enum ocs_hcu_algo algo,
+ const u8 *key, size_t key_len,
+ const struct ocs_hcu_dma_list *dma_list,
+ u8 *dgst, size_t dgst_len);
+
+#endif /* _CRYPTO_OCS_HCU_H */
diff --git a/drivers/crypto/marvell/Kconfig b/drivers/crypto/marvell/Kconfig
index 13063384f958..9125199f1702 100644
--- a/drivers/crypto/marvell/Kconfig
+++ b/drivers/crypto/marvell/Kconfig
@@ -35,3 +35,18 @@ config CRYPTO_DEV_OCTEONTX_CPT
To compile this driver as module, choose M here:
the modules will be called octeontx-cpt and octeontx-cptvf
+
+config CRYPTO_DEV_OCTEONTX2_CPT
+ tristate "Marvell OcteonTX2 CPT driver"
+ depends on ARCH_THUNDER2 || COMPILE_TEST
+ depends on PCI_MSI && 64BIT
+ depends on CRYPTO_LIB_AES
+ depends on NET_VENDOR_MARVELL
+ select OCTEONTX2_MBOX
+ select CRYPTO_DEV_MARVELL
+ select CRYPTO_SKCIPHER
+ select CRYPTO_HASH
+ select CRYPTO_AEAD
+ help
+ This driver allows you to utilize the Marvell Cryptographic
+ Accelerator Unit(CPT) found in OcteonTX2 series of processors.
diff --git a/drivers/crypto/marvell/Makefile b/drivers/crypto/marvell/Makefile
index 6c6a1519b0f1..39db6d9c0aaf 100644
--- a/drivers/crypto/marvell/Makefile
+++ b/drivers/crypto/marvell/Makefile
@@ -2,3 +2,4 @@
obj-$(CONFIG_CRYPTO_DEV_MARVELL_CESA) += cesa/
obj-$(CONFIG_CRYPTO_DEV_OCTEONTX_CPT) += octeontx/
+obj-$(CONFIG_CRYPTO_DEV_OCTEONTX2_CPT) += octeontx2/
diff --git a/drivers/crypto/marvell/cesa/cesa.c b/drivers/crypto/marvell/cesa/cesa.c
index 06211858bf2e..f14aac532f53 100644
--- a/drivers/crypto/marvell/cesa/cesa.c
+++ b/drivers/crypto/marvell/cesa/cesa.c
@@ -381,10 +381,10 @@ static int mv_cesa_get_sram(struct platform_device *pdev, int idx)
engine->pool = of_gen_pool_get(cesa->dev->of_node,
"marvell,crypto-srams", idx);
if (engine->pool) {
- engine->sram = gen_pool_dma_alloc(engine->pool,
- cesa->sram_size,
- &engine->sram_dma);
- if (engine->sram)
+ engine->sram_pool = gen_pool_dma_alloc(engine->pool,
+ cesa->sram_size,
+ &engine->sram_dma);
+ if (engine->sram_pool)
return 0;
engine->pool = NULL;
@@ -422,7 +422,7 @@ static void mv_cesa_put_sram(struct platform_device *pdev, int idx)
struct mv_cesa_engine *engine = &cesa->engines[idx];
if (engine->pool)
- gen_pool_free(engine->pool, (unsigned long)engine->sram,
+ gen_pool_free(engine->pool, (unsigned long)engine->sram_pool,
cesa->sram_size);
else
dma_unmap_resource(cesa->dev, engine->sram_dma,
diff --git a/drivers/crypto/marvell/cesa/cesa.h b/drivers/crypto/marvell/cesa/cesa.h
index fa56b45620c7..c1007f2ba79c 100644
--- a/drivers/crypto/marvell/cesa/cesa.h
+++ b/drivers/crypto/marvell/cesa/cesa.h
@@ -428,6 +428,7 @@ struct mv_cesa_dev {
* @id: engine id
* @regs: engine registers
* @sram: SRAM memory region
+ * @sram_pool: SRAM memory region from pool
* @sram_dma: DMA address of the SRAM memory region
* @lock: engine lock
* @req: current crypto request
@@ -448,7 +449,10 @@ struct mv_cesa_dev {
struct mv_cesa_engine {
int id;
void __iomem *regs;
- void __iomem *sram;
+ union {
+ void __iomem *sram;
+ void *sram_pool;
+ };
dma_addr_t sram_dma;
spinlock_t lock;
struct crypto_async_request *req;
@@ -867,6 +871,31 @@ int mv_cesa_dma_add_op_transfers(struct mv_cesa_tdma_chain *chain,
struct mv_cesa_sg_dma_iter *sgiter,
gfp_t gfp_flags);
+size_t mv_cesa_sg_copy(struct mv_cesa_engine *engine,
+ struct scatterlist *sgl, unsigned int nents,
+ unsigned int sram_off, size_t buflen, off_t skip,
+ bool to_sram);
+
+static inline size_t mv_cesa_sg_copy_to_sram(struct mv_cesa_engine *engine,
+ struct scatterlist *sgl,
+ unsigned int nents,
+ unsigned int sram_off,
+ size_t buflen, off_t skip)
+{
+ return mv_cesa_sg_copy(engine, sgl, nents, sram_off, buflen, skip,
+ true);
+}
+
+static inline size_t mv_cesa_sg_copy_from_sram(struct mv_cesa_engine *engine,
+ struct scatterlist *sgl,
+ unsigned int nents,
+ unsigned int sram_off,
+ size_t buflen, off_t skip)
+{
+ return mv_cesa_sg_copy(engine, sgl, nents, sram_off, buflen, skip,
+ false);
+}
+
/* Algorithm definitions */
extern struct ahash_alg mv_md5_alg;
diff --git a/drivers/crypto/marvell/cesa/cipher.c b/drivers/crypto/marvell/cesa/cipher.c
index b4a6ff9dd6d5..b739d3b873dc 100644
--- a/drivers/crypto/marvell/cesa/cipher.c
+++ b/drivers/crypto/marvell/cesa/cipher.c
@@ -89,22 +89,29 @@ static void mv_cesa_skcipher_std_step(struct skcipher_request *req)
CESA_SA_SRAM_PAYLOAD_SIZE);
mv_cesa_adjust_op(engine, &sreq->op);
- memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op));
+ if (engine->pool)
+ memcpy(engine->sram_pool, &sreq->op, sizeof(sreq->op));
+ else
+ memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op));
- len = sg_pcopy_to_buffer(req->src, creq->src_nents,
- engine->sram + CESA_SA_DATA_SRAM_OFFSET,
- len, sreq->offset);
+ len = mv_cesa_sg_copy_to_sram(engine, req->src, creq->src_nents,
+ CESA_SA_DATA_SRAM_OFFSET, len,
+ sreq->offset);
sreq->size = len;
mv_cesa_set_crypt_op_len(&sreq->op, len);
/* FIXME: only update enc_len field */
if (!sreq->skip_ctx) {
- memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op));
+ if (engine->pool)
+ memcpy(engine->sram_pool, &sreq->op, sizeof(sreq->op));
+ else
+ memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op));
sreq->skip_ctx = true;
- } else {
+ } else if (engine->pool)
+ memcpy(engine->sram_pool, &sreq->op, sizeof(sreq->op.desc));
+ else
memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op.desc));
- }
mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
@@ -121,9 +128,9 @@ static int mv_cesa_skcipher_std_process(struct skcipher_request *req,
struct mv_cesa_engine *engine = creq->base.engine;
size_t len;
- len = sg_pcopy_from_buffer(req->dst, creq->dst_nents,
- engine->sram + CESA_SA_DATA_SRAM_OFFSET,
- sreq->size, sreq->offset);
+ len = mv_cesa_sg_copy_from_sram(engine, req->dst, creq->dst_nents,
+ CESA_SA_DATA_SRAM_OFFSET, sreq->size,
+ sreq->offset);
sreq->offset += len;
if (sreq->offset < req->cryptlen)
@@ -214,11 +221,14 @@ mv_cesa_skcipher_complete(struct crypto_async_request *req)
basereq = &creq->base;
memcpy(skreq->iv, basereq->chain.last->op->ctx.skcipher.iv,
ivsize);
- } else {
+ } else if (engine->pool)
+ memcpy(skreq->iv,
+ engine->sram_pool + CESA_SA_CRYPT_IV_SRAM_OFFSET,
+ ivsize);
+ else
memcpy_fromio(skreq->iv,
engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET,
ivsize);
- }
}
static const struct mv_cesa_req_ops mv_cesa_skcipher_req_ops = {
diff --git a/drivers/crypto/marvell/cesa/hash.c b/drivers/crypto/marvell/cesa/hash.c
index 8cf9fd518d86..c72b0672fc71 100644
--- a/drivers/crypto/marvell/cesa/hash.c
+++ b/drivers/crypto/marvell/cesa/hash.c
@@ -168,7 +168,12 @@ static void mv_cesa_ahash_std_step(struct ahash_request *req)
int i;
mv_cesa_adjust_op(engine, &creq->op_tmpl);
- memcpy_toio(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl));
+ if (engine->pool)
+ memcpy(engine->sram_pool, &creq->op_tmpl,
+ sizeof(creq->op_tmpl));
+ else
+ memcpy_toio(engine->sram, &creq->op_tmpl,
+ sizeof(creq->op_tmpl));
if (!sreq->offset) {
digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
@@ -177,9 +182,14 @@ static void mv_cesa_ahash_std_step(struct ahash_request *req)
engine->regs + CESA_IVDIG(i));
}
- if (creq->cache_ptr)
- memcpy_toio(engine->sram + CESA_SA_DATA_SRAM_OFFSET,
- creq->cache, creq->cache_ptr);
+ if (creq->cache_ptr) {
+ if (engine->pool)
+ memcpy(engine->sram_pool + CESA_SA_DATA_SRAM_OFFSET,
+ creq->cache, creq->cache_ptr);
+ else
+ memcpy_toio(engine->sram + CESA_SA_DATA_SRAM_OFFSET,
+ creq->cache, creq->cache_ptr);
+ }
len = min_t(size_t, req->nbytes + creq->cache_ptr - sreq->offset,
CESA_SA_SRAM_PAYLOAD_SIZE);
@@ -190,12 +200,10 @@ static void mv_cesa_ahash_std_step(struct ahash_request *req)
}
if (len - creq->cache_ptr)
- sreq->offset += sg_pcopy_to_buffer(req->src, creq->src_nents,
- engine->sram +
- CESA_SA_DATA_SRAM_OFFSET +
- creq->cache_ptr,
- len - creq->cache_ptr,
- sreq->offset);
+ sreq->offset += mv_cesa_sg_copy_to_sram(
+ engine, req->src, creq->src_nents,
+ CESA_SA_DATA_SRAM_OFFSET + creq->cache_ptr,
+ len - creq->cache_ptr, sreq->offset);
op = &creq->op_tmpl;
@@ -220,16 +228,28 @@ static void mv_cesa_ahash_std_step(struct ahash_request *req)
if (len + trailerlen > CESA_SA_SRAM_PAYLOAD_SIZE) {
len &= CESA_HASH_BLOCK_SIZE_MSK;
new_cache_ptr = 64 - trailerlen;
- memcpy_fromio(creq->cache,
- engine->sram +
- CESA_SA_DATA_SRAM_OFFSET + len,
- new_cache_ptr);
+ if (engine->pool)
+ memcpy(creq->cache,
+ engine->sram_pool +
+ CESA_SA_DATA_SRAM_OFFSET + len,
+ new_cache_ptr);
+ else
+ memcpy_fromio(creq->cache,
+ engine->sram +
+ CESA_SA_DATA_SRAM_OFFSET +
+ len,
+ new_cache_ptr);
} else {
i = mv_cesa_ahash_pad_req(creq, creq->cache);
len += i;
- memcpy_toio(engine->sram + len +
- CESA_SA_DATA_SRAM_OFFSET,
- creq->cache, i);
+ if (engine->pool)
+ memcpy(engine->sram_pool + len +
+ CESA_SA_DATA_SRAM_OFFSET,
+ creq->cache, i);
+ else
+ memcpy_toio(engine->sram + len +
+ CESA_SA_DATA_SRAM_OFFSET,
+ creq->cache, i);
}
if (frag_mode == CESA_SA_DESC_CFG_LAST_FRAG)
@@ -243,7 +263,10 @@ static void mv_cesa_ahash_std_step(struct ahash_request *req)
mv_cesa_update_op_cfg(op, frag_mode, CESA_SA_DESC_CFG_FRAG_MSK);
/* FIXME: only update enc_len field */
- memcpy_toio(engine->sram, op, sizeof(*op));
+ if (engine->pool)
+ memcpy(engine->sram_pool, op, sizeof(*op));
+ else
+ memcpy_toio(engine->sram, op, sizeof(*op));
if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG)
mv_cesa_update_op_cfg(op, CESA_SA_DESC_CFG_MID_FRAG,
diff --git a/drivers/crypto/marvell/cesa/tdma.c b/drivers/crypto/marvell/cesa/tdma.c
index 5d9c48fb72b2..f0b5537038c2 100644
--- a/drivers/crypto/marvell/cesa/tdma.c
+++ b/drivers/crypto/marvell/cesa/tdma.c
@@ -177,7 +177,7 @@ int mv_cesa_tdma_process(struct mv_cesa_engine *engine, u32 status)
/*
* Save the last request in error to engine->req, so that the core
- * knows which request was fautly
+ * knows which request was faulty
*/
if (res) {
spin_lock_bh(&engine->lock);
@@ -350,3 +350,53 @@ int mv_cesa_dma_add_op_transfers(struct mv_cesa_tdma_chain *chain,
return 0;
}
+
+size_t mv_cesa_sg_copy(struct mv_cesa_engine *engine,
+ struct scatterlist *sgl, unsigned int nents,
+ unsigned int sram_off, size_t buflen, off_t skip,
+ bool to_sram)
+{
+ unsigned int sg_flags = SG_MITER_ATOMIC;
+ struct sg_mapping_iter miter;
+ unsigned int offset = 0;
+
+ if (to_sram)
+ sg_flags |= SG_MITER_FROM_SG;
+ else
+ sg_flags |= SG_MITER_TO_SG;
+
+ sg_miter_start(&miter, sgl, nents, sg_flags);
+
+ if (!sg_miter_skip(&miter, skip))
+ return 0;
+
+ while ((offset < buflen) && sg_miter_next(&miter)) {
+ unsigned int len;
+
+ len = min(miter.length, buflen - offset);
+
+ if (to_sram) {
+ if (engine->pool)
+ memcpy(engine->sram_pool + sram_off + offset,
+ miter.addr, len);
+ else
+ memcpy_toio(engine->sram + sram_off + offset,
+ miter.addr, len);
+ } else {
+ if (engine->pool)
+ memcpy(miter.addr,
+ engine->sram_pool + sram_off + offset,
+ len);
+ else
+ memcpy_fromio(miter.addr,
+ engine->sram + sram_off + offset,
+ len);
+ }
+
+ offset += len;
+ }
+
+ sg_miter_stop(&miter);
+
+ return offset;
+}
diff --git a/drivers/crypto/marvell/octeontx2/Makefile b/drivers/crypto/marvell/octeontx2/Makefile
new file mode 100644
index 000000000000..b9c6201019e0
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_CRYPTO_DEV_OCTEONTX2_CPT) += octeontx2-cpt.o octeontx2-cptvf.o
+
+octeontx2-cpt-objs := otx2_cptpf_main.o otx2_cptpf_mbox.o \
+ otx2_cpt_mbox_common.o otx2_cptpf_ucode.o otx2_cptlf.o
+octeontx2-cptvf-objs := otx2_cptvf_main.o otx2_cptvf_mbox.o otx2_cptlf.o \
+ otx2_cpt_mbox_common.o otx2_cptvf_reqmgr.o \
+ otx2_cptvf_algs.o
+
+ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
new file mode 100644
index 000000000000..3518fac29834
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright (C) 2020 Marvell.
+ */
+
+#ifndef __OTX2_CPT_COMMON_H
+#define __OTX2_CPT_COMMON_H
+
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/crypto.h>
+#include "otx2_cpt_hw_types.h"
+#include "rvu.h"
+#include "mbox.h"
+
+#define OTX2_CPT_MAX_VFS_NUM 128
+#define OTX2_CPT_RVU_FUNC_ADDR_S(blk, slot, offs) \
+ (((blk) << 20) | ((slot) << 12) | (offs))
+#define OTX2_CPT_RVU_PFFUNC(pf, func) \
+ ((((pf) & RVU_PFVF_PF_MASK) << RVU_PFVF_PF_SHIFT) | \
+ (((func) & RVU_PFVF_FUNC_MASK) << RVU_PFVF_FUNC_SHIFT))
+
+#define OTX2_CPT_INVALID_CRYPTO_ENG_GRP 0xFF
+#define OTX2_CPT_NAME_LENGTH 64
+#define OTX2_CPT_DMA_MINALIGN 128
+
+#define BAD_OTX2_CPT_ENG_TYPE OTX2_CPT_MAX_ENG_TYPES
+
+enum otx2_cpt_eng_type {
+ OTX2_CPT_AE_TYPES = 1,
+ OTX2_CPT_SE_TYPES = 2,
+ OTX2_CPT_IE_TYPES = 3,
+ OTX2_CPT_MAX_ENG_TYPES,
+};
+
+/* Take mbox id from end of CPT mbox range in AF (range 0xA00 - 0xBFF) */
+#define MBOX_MSG_GET_ENG_GRP_NUM 0xBFF
+#define MBOX_MSG_GET_CAPS 0xBFD
+#define MBOX_MSG_GET_KVF_LIMITS 0xBFC
+
+/*
+ * Message request and response to get engine group number
+ * which has attached a given type of engines (SE, AE, IE)
+ * This messages are only used between CPT PF <=> CPT VF
+ */
+struct otx2_cpt_egrp_num_msg {
+ struct mbox_msghdr hdr;
+ u8 eng_type;
+};
+
+struct otx2_cpt_egrp_num_rsp {
+ struct mbox_msghdr hdr;
+ u8 eng_type;
+ u8 eng_grp_num;
+};
+
+/*
+ * Message request and response to get kernel crypto limits
+ * This messages are only used between CPT PF <-> CPT VF
+ */
+struct otx2_cpt_kvf_limits_msg {
+ struct mbox_msghdr hdr;
+};
+
+struct otx2_cpt_kvf_limits_rsp {
+ struct mbox_msghdr hdr;
+ u8 kvf_limits;
+};
+
+/* CPT HW capabilities */
+union otx2_cpt_eng_caps {
+ u64 u;
+ struct {
+ u64 reserved_0_4:5;
+ u64 mul:1;
+ u64 sha1_sha2:1;
+ u64 chacha20:1;
+ u64 zuc_snow3g:1;
+ u64 sha3:1;
+ u64 aes:1;
+ u64 kasumi:1;
+ u64 des:1;
+ u64 crc:1;
+ u64 reserved_14_63:50;
+ };
+};
+
+/*
+ * Message request and response to get HW capabilities for each
+ * engine type (SE, IE, AE).
+ * This messages are only used between CPT PF <=> CPT VF
+ */
+struct otx2_cpt_caps_msg {
+ struct mbox_msghdr hdr;
+};
+
+struct otx2_cpt_caps_rsp {
+ struct mbox_msghdr hdr;
+ u16 cpt_pf_drv_version;
+ u8 cpt_revision;
+ union otx2_cpt_eng_caps eng_caps[OTX2_CPT_MAX_ENG_TYPES];
+};
+
+static inline void otx2_cpt_write64(void __iomem *reg_base, u64 blk, u64 slot,
+ u64 offs, u64 val)
+{
+ writeq_relaxed(val, reg_base +
+ OTX2_CPT_RVU_FUNC_ADDR_S(blk, slot, offs));
+}
+
+static inline u64 otx2_cpt_read64(void __iomem *reg_base, u64 blk, u64 slot,
+ u64 offs)
+{
+ return readq_relaxed(reg_base +
+ OTX2_CPT_RVU_FUNC_ADDR_S(blk, slot, offs));
+}
+
+int otx2_cpt_send_ready_msg(struct otx2_mbox *mbox, struct pci_dev *pdev);
+int otx2_cpt_send_mbox_msg(struct otx2_mbox *mbox, struct pci_dev *pdev);
+
+int otx2_cpt_send_af_reg_requests(struct otx2_mbox *mbox,
+ struct pci_dev *pdev);
+int otx2_cpt_add_read_af_reg(struct otx2_mbox *mbox,
+ struct pci_dev *pdev, u64 reg, u64 *val);
+int otx2_cpt_add_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
+ u64 reg, u64 val);
+int otx2_cpt_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
+ u64 reg, u64 *val);
+int otx2_cpt_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
+ u64 reg, u64 val);
+struct otx2_cptlfs_info;
+int otx2_cpt_attach_rscrs_msg(struct otx2_cptlfs_info *lfs);
+int otx2_cpt_detach_rsrcs_msg(struct otx2_cptlfs_info *lfs);
+int otx2_cpt_msix_offset_msg(struct otx2_cptlfs_info *lfs);
+
+#endif /* __OTX2_CPT_COMMON_H */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_hw_types.h b/drivers/crypto/marvell/octeontx2/otx2_cpt_hw_types.h
new file mode 100644
index 000000000000..ecafc42f37a2
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_hw_types.h
@@ -0,0 +1,464 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright (C) 2020 Marvell.
+ */
+
+#ifndef __OTX2_CPT_HW_TYPES_H
+#define __OTX2_CPT_HW_TYPES_H
+
+#include <linux/types.h>
+
+/* Device IDs */
+#define OTX2_CPT_PCI_PF_DEVICE_ID 0xA0FD
+#define OTX2_CPT_PCI_VF_DEVICE_ID 0xA0FE
+
+/* Mailbox interrupts offset */
+#define OTX2_CPT_PF_MBOX_INT 6
+#define OTX2_CPT_PF_INT_VEC_E_MBOXX(x, a) ((x) + (a))
+
+/* Maximum supported microcode groups */
+#define OTX2_CPT_MAX_ENGINE_GROUPS 8
+
+/* CPT instruction size in bytes */
+#define OTX2_CPT_INST_SIZE 64
+/*
+ * CPT VF MSIX vectors and their offsets
+ */
+#define OTX2_CPT_VF_MSIX_VECTORS 1
+#define OTX2_CPT_VF_INTR_MBOX_MASK BIT(0)
+
+/* CPT LF MSIX vectors */
+#define OTX2_CPT_LF_MSIX_VECTORS 2
+
+/* OcteonTX2 CPT PF registers */
+#define OTX2_CPT_PF_CONSTANTS (0x0)
+#define OTX2_CPT_PF_RESET (0x100)
+#define OTX2_CPT_PF_DIAG (0x120)
+#define OTX2_CPT_PF_BIST_STATUS (0x160)
+#define OTX2_CPT_PF_ECC0_CTL (0x200)
+#define OTX2_CPT_PF_ECC0_FLIP (0x210)
+#define OTX2_CPT_PF_ECC0_INT (0x220)
+#define OTX2_CPT_PF_ECC0_INT_W1S (0x230)
+#define OTX2_CPT_PF_ECC0_ENA_W1S (0x240)
+#define OTX2_CPT_PF_ECC0_ENA_W1C (0x250)
+#define OTX2_CPT_PF_MBOX_INTX(b) (0x400 | (b) << 3)
+#define OTX2_CPT_PF_MBOX_INT_W1SX(b) (0x420 | (b) << 3)
+#define OTX2_CPT_PF_MBOX_ENA_W1CX(b) (0x440 | (b) << 3)
+#define OTX2_CPT_PF_MBOX_ENA_W1SX(b) (0x460 | (b) << 3)
+#define OTX2_CPT_PF_EXEC_INT (0x500)
+#define OTX2_CPT_PF_EXEC_INT_W1S (0x520)
+#define OTX2_CPT_PF_EXEC_ENA_W1C (0x540)
+#define OTX2_CPT_PF_EXEC_ENA_W1S (0x560)
+#define OTX2_CPT_PF_GX_EN(b) (0x600 | (b) << 3)
+#define OTX2_CPT_PF_EXEC_INFO (0x700)
+#define OTX2_CPT_PF_EXEC_BUSY (0x800)
+#define OTX2_CPT_PF_EXEC_INFO0 (0x900)
+#define OTX2_CPT_PF_EXEC_INFO1 (0x910)
+#define OTX2_CPT_PF_INST_REQ_PC (0x10000)
+#define OTX2_CPT_PF_INST_LATENCY_PC (0x10020)
+#define OTX2_CPT_PF_RD_REQ_PC (0x10040)
+#define OTX2_CPT_PF_RD_LATENCY_PC (0x10060)
+#define OTX2_CPT_PF_RD_UC_PC (0x10080)
+#define OTX2_CPT_PF_ACTIVE_CYCLES_PC (0x10100)
+#define OTX2_CPT_PF_EXE_CTL (0x4000000)
+#define OTX2_CPT_PF_EXE_STATUS (0x4000008)
+#define OTX2_CPT_PF_EXE_CLK (0x4000010)
+#define OTX2_CPT_PF_EXE_DBG_CTL (0x4000018)
+#define OTX2_CPT_PF_EXE_DBG_DATA (0x4000020)
+#define OTX2_CPT_PF_EXE_BIST_STATUS (0x4000028)
+#define OTX2_CPT_PF_EXE_REQ_TIMER (0x4000030)
+#define OTX2_CPT_PF_EXE_MEM_CTL (0x4000038)
+#define OTX2_CPT_PF_EXE_PERF_CTL (0x4001000)
+#define OTX2_CPT_PF_EXE_DBG_CNTX(b) (0x4001100 | (b) << 3)
+#define OTX2_CPT_PF_EXE_PERF_EVENT_CNT (0x4001180)
+#define OTX2_CPT_PF_EXE_EPCI_INBX_CNT(b) (0x4001200 | (b) << 3)
+#define OTX2_CPT_PF_EXE_EPCI_OUTBX_CNT(b) (0x4001240 | (b) << 3)
+#define OTX2_CPT_PF_ENGX_UCODE_BASE(b) (0x4002000 | (b) << 3)
+#define OTX2_CPT_PF_QX_CTL(b) (0x8000000 | (b) << 20)
+#define OTX2_CPT_PF_QX_GMCTL(b) (0x8000020 | (b) << 20)
+#define OTX2_CPT_PF_QX_CTL2(b) (0x8000100 | (b) << 20)
+#define OTX2_CPT_PF_VFX_MBOXX(b, c) (0x8001000 | (b) << 20 | \
+ (c) << 8)
+
+/* OcteonTX2 CPT LF registers */
+#define OTX2_CPT_LF_CTL (0x10)
+#define OTX2_CPT_LF_DONE_WAIT (0x30)
+#define OTX2_CPT_LF_INPROG (0x40)
+#define OTX2_CPT_LF_DONE (0x50)
+#define OTX2_CPT_LF_DONE_ACK (0x60)
+#define OTX2_CPT_LF_DONE_INT_ENA_W1S (0x90)
+#define OTX2_CPT_LF_DONE_INT_ENA_W1C (0xa0)
+#define OTX2_CPT_LF_MISC_INT (0xb0)
+#define OTX2_CPT_LF_MISC_INT_W1S (0xc0)
+#define OTX2_CPT_LF_MISC_INT_ENA_W1S (0xd0)
+#define OTX2_CPT_LF_MISC_INT_ENA_W1C (0xe0)
+#define OTX2_CPT_LF_Q_BASE (0xf0)
+#define OTX2_CPT_LF_Q_SIZE (0x100)
+#define OTX2_CPT_LF_Q_INST_PTR (0x110)
+#define OTX2_CPT_LF_Q_GRP_PTR (0x120)
+#define OTX2_CPT_LF_NQX(a) (0x400 | (a) << 3)
+#define OTX2_CPT_RVU_FUNC_BLKADDR_SHIFT 20
+/* LMT LF registers */
+#define OTX2_CPT_LMT_LFBASE BIT_ULL(OTX2_CPT_RVU_FUNC_BLKADDR_SHIFT)
+#define OTX2_CPT_LMT_LF_LMTLINEX(a) (OTX2_CPT_LMT_LFBASE | 0x000 | \
+ (a) << 12)
+/* RVU VF registers */
+#define OTX2_RVU_VF_INT (0x20)
+#define OTX2_RVU_VF_INT_W1S (0x28)
+#define OTX2_RVU_VF_INT_ENA_W1S (0x30)
+#define OTX2_RVU_VF_INT_ENA_W1C (0x38)
+
+/*
+ * Enumeration otx2_cpt_ucode_error_code_e
+ *
+ * Enumerates ucode errors
+ */
+enum otx2_cpt_ucode_comp_code_e {
+ OTX2_CPT_UCC_SUCCESS = 0x00,
+ OTX2_CPT_UCC_INVALID_OPCODE = 0x01,
+
+ /* Scatter gather */
+ OTX2_CPT_UCC_SG_WRITE_LENGTH = 0x02,
+ OTX2_CPT_UCC_SG_LIST = 0x03,
+ OTX2_CPT_UCC_SG_NOT_SUPPORTED = 0x04,
+
+};
+
+/*
+ * Enumeration otx2_cpt_comp_e
+ *
+ * OcteonTX2 CPT Completion Enumeration
+ * Enumerates the values of CPT_RES_S[COMPCODE].
+ */
+enum otx2_cpt_comp_e {
+ OTX2_CPT_COMP_E_NOTDONE = 0x00,
+ OTX2_CPT_COMP_E_GOOD = 0x01,
+ OTX2_CPT_COMP_E_FAULT = 0x02,
+ OTX2_CPT_COMP_E_HWERR = 0x04,
+ OTX2_CPT_COMP_E_INSTERR = 0x05,
+ OTX2_CPT_COMP_E_LAST_ENTRY = 0x06
+};
+
+/*
+ * Enumeration otx2_cpt_vf_int_vec_e
+ *
+ * OcteonTX2 CPT VF MSI-X Vector Enumeration
+ * Enumerates the MSI-X interrupt vectors.
+ */
+enum otx2_cpt_vf_int_vec_e {
+ OTX2_CPT_VF_INT_VEC_E_MBOX = 0x00
+};
+
+/*
+ * Enumeration otx2_cpt_lf_int_vec_e
+ *
+ * OcteonTX2 CPT LF MSI-X Vector Enumeration
+ * Enumerates the MSI-X interrupt vectors.
+ */
+enum otx2_cpt_lf_int_vec_e {
+ OTX2_CPT_LF_INT_VEC_E_MISC = 0x00,
+ OTX2_CPT_LF_INT_VEC_E_DONE = 0x01
+};
+
+/*
+ * Structure otx2_cpt_inst_s
+ *
+ * CPT Instruction Structure
+ * This structure specifies the instruction layout. Instructions are
+ * stored in memory as little-endian unless CPT()_PF_Q()_CTL[INST_BE] is set.
+ * cpt_inst_s_s
+ * Word 0
+ * doneint:1 Done interrupt.
+ * 0 = No interrupts related to this instruction.
+ * 1 = When the instruction completes, CPT()_VQ()_DONE[DONE] will be
+ * incremented,and based on the rules described there an interrupt may
+ * occur.
+ * Word 1
+ * res_addr [127: 64] Result IOVA.
+ * If nonzero, specifies where to write CPT_RES_S.
+ * If zero, no result structure will be written.
+ * Address must be 16-byte aligned.
+ * Bits <63:49> are ignored by hardware; software should use a
+ * sign-extended bit <48> for forward compatibility.
+ * Word 2
+ * grp:10 [171:162] If [WQ_PTR] is nonzero, the SSO guest-group to use when
+ * CPT submits work SSO.
+ * For the SSO to not discard the add-work request, FPA_PF_MAP() must map
+ * [GRP] and CPT()_PF_Q()_GMCTL[GMID] as valid.
+ * tt:2 [161:160] If [WQ_PTR] is nonzero, the SSO tag type to use when CPT
+ * submits work to SSO
+ * tag:32 [159:128] If [WQ_PTR] is nonzero, the SSO tag to use when CPT
+ * submits work to SSO.
+ * Word 3
+ * wq_ptr [255:192] If [WQ_PTR] is nonzero, it is a pointer to a
+ * work-queue entry that CPT submits work to SSO after all context,
+ * output data, and result write operations are visible to other
+ * CNXXXX units and the cores. Bits <2:0> must be zero.
+ * Bits <63:49> are ignored by hardware; software should
+ * use a sign-extended bit <48> for forward compatibility.
+ * Internal:
+ * Bits <63:49>, <2:0> are ignored by hardware, treated as always 0x0.
+ * Word 4
+ * ei0; [319:256] Engine instruction word 0. Passed to the AE/SE.
+ * Word 5
+ * ei1; [383:320] Engine instruction word 1. Passed to the AE/SE.
+ * Word 6
+ * ei2; [447:384] Engine instruction word 1. Passed to the AE/SE.
+ * Word 7
+ * ei3; [511:448] Engine instruction word 1. Passed to the AE/SE.
+ *
+ */
+union otx2_cpt_inst_s {
+ u64 u[8];
+
+ struct {
+ /* Word 0 */
+ u64 nixtxl:3;
+ u64 doneint:1;
+ u64 nixtx_addr:60;
+ /* Word 1 */
+ u64 res_addr;
+ /* Word 2 */
+ u64 tag:32;
+ u64 tt:2;
+ u64 grp:10;
+ u64 reserved_172_175:4;
+ u64 rvu_pf_func:16;
+ /* Word 3 */
+ u64 qord:1;
+ u64 reserved_194_193:2;
+ u64 wq_ptr:61;
+ /* Word 4 */
+ u64 ei0;
+ /* Word 5 */
+ u64 ei1;
+ /* Word 6 */
+ u64 ei2;
+ /* Word 7 */
+ u64 ei3;
+ } s;
+};
+
+/*
+ * Structure otx2_cpt_res_s
+ *
+ * CPT Result Structure
+ * The CPT coprocessor writes the result structure after it completes a
+ * CPT_INST_S instruction. The result structure is exactly 16 bytes, and
+ * each instruction completion produces exactly one result structure.
+ *
+ * This structure is stored in memory as little-endian unless
+ * CPT()_PF_Q()_CTL[INST_BE] is set.
+ * cpt_res_s_s
+ * Word 0
+ * doneint:1 [16:16] Done interrupt. This bit is copied from the
+ * corresponding instruction's CPT_INST_S[DONEINT].
+ * compcode:8 [7:0] Indicates completion/error status of the CPT coprocessor
+ * for the associated instruction, as enumerated by CPT_COMP_E.
+ * Core software may write the memory location containing [COMPCODE] to
+ * 0x0 before ringing the doorbell, and then poll for completion by
+ * checking for a nonzero value.
+ * Once the core observes a nonzero [COMPCODE] value in this case,the CPT
+ * coprocessor will have also completed L2/DRAM write operations.
+ * Word 1
+ * reserved
+ *
+ */
+union otx2_cpt_res_s {
+ u64 u[2];
+
+ struct {
+ u64 compcode:8;
+ u64 uc_compcode:8;
+ u64 doneint:1;
+ u64 reserved_17_63:47;
+ u64 reserved_64_127;
+ } s;
+};
+
+/*
+ * Register (RVU_PF_BAR0) cpt#_af_constants1
+ *
+ * CPT AF Constants Register
+ * This register contains implementation-related parameters of CPT.
+ */
+union otx2_cptx_af_constants1 {
+ u64 u;
+ struct otx2_cptx_af_constants1_s {
+ u64 se:16;
+ u64 ie:16;
+ u64 ae:16;
+ u64 reserved_48_63:16;
+ } s;
+};
+
+/*
+ * RVU_PFVF_BAR2 - cpt_lf_misc_int
+ *
+ * This register contain the per-queue miscellaneous interrupts.
+ *
+ */
+union otx2_cptx_lf_misc_int {
+ u64 u;
+ struct otx2_cptx_lf_misc_int_s {
+ u64 reserved_0:1;
+ u64 nqerr:1;
+ u64 irde:1;
+ u64 nwrp:1;
+ u64 reserved_4:1;
+ u64 hwerr:1;
+ u64 fault:1;
+ u64 reserved_7_63:57;
+ } s;
+};
+
+/*
+ * RVU_PFVF_BAR2 - cpt_lf_misc_int_ena_w1s
+ *
+ * This register sets interrupt enable bits.
+ *
+ */
+union otx2_cptx_lf_misc_int_ena_w1s {
+ u64 u;
+ struct otx2_cptx_lf_misc_int_ena_w1s_s {
+ u64 reserved_0:1;
+ u64 nqerr:1;
+ u64 irde:1;
+ u64 nwrp:1;
+ u64 reserved_4:1;
+ u64 hwerr:1;
+ u64 fault:1;
+ u64 reserved_7_63:57;
+ } s;
+};
+
+/*
+ * RVU_PFVF_BAR2 - cpt_lf_ctl
+ *
+ * This register configures the queue.
+ *
+ * When the queue is not execution-quiescent (see CPT_LF_INPROG[EENA,INFLIGHT]),
+ * software must only write this register with [ENA]=0.
+ */
+union otx2_cptx_lf_ctl {
+ u64 u;
+ struct otx2_cptx_lf_ctl_s {
+ u64 ena:1;
+ u64 fc_ena:1;
+ u64 fc_up_crossing:1;
+ u64 reserved_3:1;
+ u64 fc_hyst_bits:4;
+ u64 reserved_8_63:56;
+ } s;
+};
+
+/*
+ * RVU_PFVF_BAR2 - cpt_lf_done_wait
+ *
+ * This register specifies the per-queue interrupt coalescing settings.
+ */
+union otx2_cptx_lf_done_wait {
+ u64 u;
+ struct otx2_cptx_lf_done_wait_s {
+ u64 num_wait:20;
+ u64 reserved_20_31:12;
+ u64 time_wait:16;
+ u64 reserved_48_63:16;
+ } s;
+};
+
+/*
+ * RVU_PFVF_BAR2 - cpt_lf_done
+ *
+ * This register contain the per-queue instruction done count.
+ */
+union otx2_cptx_lf_done {
+ u64 u;
+ struct otx2_cptx_lf_done_s {
+ u64 done:20;
+ u64 reserved_20_63:44;
+ } s;
+};
+
+/*
+ * RVU_PFVF_BAR2 - cpt_lf_inprog
+ *
+ * These registers contain the per-queue instruction in flight registers.
+ *
+ */
+union otx2_cptx_lf_inprog {
+ u64 u;
+ struct otx2_cptx_lf_inprog_s {
+ u64 inflight:9;
+ u64 reserved_9_15:7;
+ u64 eena:1;
+ u64 grp_drp:1;
+ u64 reserved_18_30:13;
+ u64 grb_partial:1;
+ u64 grb_cnt:8;
+ u64 gwb_cnt:8;
+ u64 reserved_48_63:16;
+ } s;
+};
+
+/*
+ * RVU_PFVF_BAR2 - cpt_lf_q_base
+ *
+ * CPT initializes these CSR fields to these values on any CPT_LF_Q_BASE write:
+ * _ CPT_LF_Q_INST_PTR[XQ_XOR]=0.
+ * _ CPT_LF_Q_INST_PTR[NQ_PTR]=2.
+ * _ CPT_LF_Q_INST_PTR[DQ_PTR]=2.
+ * _ CPT_LF_Q_GRP_PTR[XQ_XOR]=0.
+ * _ CPT_LF_Q_GRP_PTR[NQ_PTR]=1.
+ * _ CPT_LF_Q_GRP_PTR[DQ_PTR]=1.
+ */
+union otx2_cptx_lf_q_base {
+ u64 u;
+ struct otx2_cptx_lf_q_base_s {
+ u64 fault:1;
+ u64 reserved_1_6:6;
+ u64 addr:46;
+ u64 reserved_53_63:11;
+ } s;
+};
+
+/*
+ * RVU_PFVF_BAR2 - cpt_lf_q_size
+ *
+ * CPT initializes these CSR fields to these values on any CPT_LF_Q_SIZE write:
+ * _ CPT_LF_Q_INST_PTR[XQ_XOR]=0.
+ * _ CPT_LF_Q_INST_PTR[NQ_PTR]=2.
+ * _ CPT_LF_Q_INST_PTR[DQ_PTR]=2.
+ * _ CPT_LF_Q_GRP_PTR[XQ_XOR]=0.
+ * _ CPT_LF_Q_GRP_PTR[NQ_PTR]=1.
+ * _ CPT_LF_Q_GRP_PTR[DQ_PTR]=1.
+ */
+union otx2_cptx_lf_q_size {
+ u64 u;
+ struct otx2_cptx_lf_q_size_s {
+ u64 size_div40:15;
+ u64 reserved_15_63:49;
+ } s;
+};
+
+/*
+ * RVU_PF_BAR0 - cpt_af_lf_ctl
+ *
+ * This register configures queues. This register should be written only
+ * when the queue is execution-quiescent (see CPT_LF_INPROG[INFLIGHT]).
+ */
+union otx2_cptx_af_lf_ctrl {
+ u64 u;
+ struct otx2_cptx_af_lf_ctrl_s {
+ u64 pri:1;
+ u64 reserved_1_8:8;
+ u64 pf_func_inst:1;
+ u64 cont_err:1;
+ u64 reserved_11_15:5;
+ u64 nixtx_en:1;
+ u64 reserved_17_47:31;
+ u64 grp:8;
+ u64 reserved_56_63:8;
+ } s;
+};
+
+#endif /* __OTX2_CPT_HW_TYPES_H */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c b/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
new file mode 100644
index 000000000000..51cb6404ded7
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
@@ -0,0 +1,202 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2020 Marvell. */
+
+#include "otx2_cpt_common.h"
+#include "otx2_cptlf.h"
+
+int otx2_cpt_send_mbox_msg(struct otx2_mbox *mbox, struct pci_dev *pdev)
+{
+ int ret;
+
+ otx2_mbox_msg_send(mbox, 0);
+ ret = otx2_mbox_wait_for_rsp(mbox, 0);
+ if (ret == -EIO) {
+ dev_err(&pdev->dev, "RVU MBOX timeout.\n");
+ return ret;
+ } else if (ret) {
+ dev_err(&pdev->dev, "RVU MBOX error: %d.\n", ret);
+ return -EFAULT;
+ }
+ return ret;
+}
+
+int otx2_cpt_send_ready_msg(struct otx2_mbox *mbox, struct pci_dev *pdev)
+{
+ struct mbox_msghdr *req;
+
+ req = otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
+ sizeof(struct ready_msg_rsp));
+ if (req == NULL) {
+ dev_err(&pdev->dev, "RVU MBOX failed to get message.\n");
+ return -EFAULT;
+ }
+ req->id = MBOX_MSG_READY;
+ req->sig = OTX2_MBOX_REQ_SIG;
+ req->pcifunc = 0;
+
+ return otx2_cpt_send_mbox_msg(mbox, pdev);
+}
+
+int otx2_cpt_send_af_reg_requests(struct otx2_mbox *mbox, struct pci_dev *pdev)
+{
+ return otx2_cpt_send_mbox_msg(mbox, pdev);
+}
+
+int otx2_cpt_add_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
+ u64 reg, u64 *val)
+{
+ struct cpt_rd_wr_reg_msg *reg_msg;
+
+ reg_msg = (struct cpt_rd_wr_reg_msg *)
+ otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*reg_msg),
+ sizeof(*reg_msg));
+ if (reg_msg == NULL) {
+ dev_err(&pdev->dev, "RVU MBOX failed to get message.\n");
+ return -EFAULT;
+ }
+
+ reg_msg->hdr.id = MBOX_MSG_CPT_RD_WR_REGISTER;
+ reg_msg->hdr.sig = OTX2_MBOX_REQ_SIG;
+ reg_msg->hdr.pcifunc = 0;
+
+ reg_msg->is_write = 0;
+ reg_msg->reg_offset = reg;
+ reg_msg->ret_val = val;
+
+ return 0;
+}
+
+int otx2_cpt_add_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
+ u64 reg, u64 val)
+{
+ struct cpt_rd_wr_reg_msg *reg_msg;
+
+ reg_msg = (struct cpt_rd_wr_reg_msg *)
+ otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*reg_msg),
+ sizeof(*reg_msg));
+ if (reg_msg == NULL) {
+ dev_err(&pdev->dev, "RVU MBOX failed to get message.\n");
+ return -EFAULT;
+ }
+
+ reg_msg->hdr.id = MBOX_MSG_CPT_RD_WR_REGISTER;
+ reg_msg->hdr.sig = OTX2_MBOX_REQ_SIG;
+ reg_msg->hdr.pcifunc = 0;
+
+ reg_msg->is_write = 1;
+ reg_msg->reg_offset = reg;
+ reg_msg->val = val;
+
+ return 0;
+}
+
+int otx2_cpt_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
+ u64 reg, u64 *val)
+{
+ int ret;
+
+ ret = otx2_cpt_add_read_af_reg(mbox, pdev, reg, val);
+ if (ret)
+ return ret;
+
+ return otx2_cpt_send_mbox_msg(mbox, pdev);
+}
+
+int otx2_cpt_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
+ u64 reg, u64 val)
+{
+ int ret;
+
+ ret = otx2_cpt_add_write_af_reg(mbox, pdev, reg, val);
+ if (ret)
+ return ret;
+
+ return otx2_cpt_send_mbox_msg(mbox, pdev);
+}
+
+int otx2_cpt_attach_rscrs_msg(struct otx2_cptlfs_info *lfs)
+{
+ struct otx2_mbox *mbox = lfs->mbox;
+ struct rsrc_attach *req;
+ int ret;
+
+ req = (struct rsrc_attach *)
+ otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
+ sizeof(struct msg_rsp));
+ if (req == NULL) {
+ dev_err(&lfs->pdev->dev, "RVU MBOX failed to get message.\n");
+ return -EFAULT;
+ }
+
+ req->hdr.id = MBOX_MSG_ATTACH_RESOURCES;
+ req->hdr.sig = OTX2_MBOX_REQ_SIG;
+ req->hdr.pcifunc = 0;
+ req->cptlfs = lfs->lfs_num;
+ ret = otx2_cpt_send_mbox_msg(mbox, lfs->pdev);
+ if (ret)
+ return ret;
+
+ if (!lfs->are_lfs_attached)
+ ret = -EINVAL;
+
+ return ret;
+}
+
+int otx2_cpt_detach_rsrcs_msg(struct otx2_cptlfs_info *lfs)
+{
+ struct otx2_mbox *mbox = lfs->mbox;
+ struct rsrc_detach *req;
+ int ret;
+
+ req = (struct rsrc_detach *)
+ otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
+ sizeof(struct msg_rsp));
+ if (req == NULL) {
+ dev_err(&lfs->pdev->dev, "RVU MBOX failed to get message.\n");
+ return -EFAULT;
+ }
+
+ req->hdr.id = MBOX_MSG_DETACH_RESOURCES;
+ req->hdr.sig = OTX2_MBOX_REQ_SIG;
+ req->hdr.pcifunc = 0;
+ ret = otx2_cpt_send_mbox_msg(mbox, lfs->pdev);
+ if (ret)
+ return ret;
+
+ if (lfs->are_lfs_attached)
+ ret = -EINVAL;
+
+ return ret;
+}
+
+int otx2_cpt_msix_offset_msg(struct otx2_cptlfs_info *lfs)
+{
+ struct otx2_mbox *mbox = lfs->mbox;
+ struct pci_dev *pdev = lfs->pdev;
+ struct mbox_msghdr *req;
+ int ret, i;
+
+ req = otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
+ sizeof(struct msix_offset_rsp));
+ if (req == NULL) {
+ dev_err(&pdev->dev, "RVU MBOX failed to get message.\n");
+ return -EFAULT;
+ }
+
+ req->id = MBOX_MSG_MSIX_OFFSET;
+ req->sig = OTX2_MBOX_REQ_SIG;
+ req->pcifunc = 0;
+ ret = otx2_cpt_send_mbox_msg(mbox, pdev);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < lfs->lfs_num; i++) {
+ if (lfs->lf[i].msix_offset == MSIX_VECTOR_INVALID) {
+ dev_err(&pdev->dev,
+ "Invalid msix offset %d for LF %d\n",
+ lfs->lf[i].msix_offset, i);
+ return -EINVAL;
+ }
+ }
+ return ret;
+}
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_reqmgr.h b/drivers/crypto/marvell/octeontx2/otx2_cpt_reqmgr.h
new file mode 100644
index 000000000000..dbb1ee746f4c
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_reqmgr.h
@@ -0,0 +1,197 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright (C) 2020 Marvell.
+ */
+
+#ifndef __OTX2_CPT_REQMGR_H
+#define __OTX2_CPT_REQMGR_H
+
+#include "otx2_cpt_common.h"
+
+/* Completion code size and initial value */
+#define OTX2_CPT_COMPLETION_CODE_SIZE 8
+#define OTX2_CPT_COMPLETION_CODE_INIT OTX2_CPT_COMP_E_NOTDONE
+/*
+ * Maximum total number of SG buffers is 100, we divide it equally
+ * between input and output
+ */
+#define OTX2_CPT_MAX_SG_IN_CNT 50
+#define OTX2_CPT_MAX_SG_OUT_CNT 50
+
+/* DMA mode direct or SG */
+#define OTX2_CPT_DMA_MODE_DIRECT 0
+#define OTX2_CPT_DMA_MODE_SG 1
+
+/* Context source CPTR or DPTR */
+#define OTX2_CPT_FROM_CPTR 0
+#define OTX2_CPT_FROM_DPTR 1
+
+#define OTX2_CPT_MAX_REQ_SIZE 65535
+
+union otx2_cpt_opcode {
+ u16 flags;
+ struct {
+ u8 major;
+ u8 minor;
+ } s;
+};
+
+struct otx2_cptvf_request {
+ u32 param1;
+ u32 param2;
+ u16 dlen;
+ union otx2_cpt_opcode opcode;
+};
+
+/*
+ * CPT_INST_S software command definitions
+ * Words EI (0-3)
+ */
+union otx2_cpt_iq_cmd_word0 {
+ u64 u;
+ struct {
+ __be16 opcode;
+ __be16 param1;
+ __be16 param2;
+ __be16 dlen;
+ } s;
+};
+
+union otx2_cpt_iq_cmd_word3 {
+ u64 u;
+ struct {
+ u64 cptr:61;
+ u64 grp:3;
+ } s;
+};
+
+struct otx2_cpt_iq_command {
+ union otx2_cpt_iq_cmd_word0 cmd;
+ u64 dptr;
+ u64 rptr;
+ union otx2_cpt_iq_cmd_word3 cptr;
+};
+
+struct otx2_cpt_pending_entry {
+ void *completion_addr; /* Completion address */
+ void *info;
+ /* Kernel async request callback */
+ void (*callback)(int status, void *arg1, void *arg2);
+ struct crypto_async_request *areq; /* Async request callback arg */
+ u8 resume_sender; /* Notify sender to resume sending requests */
+ u8 busy; /* Entry status (free/busy) */
+};
+
+struct otx2_cpt_pending_queue {
+ struct otx2_cpt_pending_entry *head; /* Head of the queue */
+ u32 front; /* Process work from here */
+ u32 rear; /* Append new work here */
+ u32 pending_count; /* Pending requests count */
+ u32 qlen; /* Queue length */
+ spinlock_t lock; /* Queue lock */
+};
+
+struct otx2_cpt_buf_ptr {
+ u8 *vptr;
+ dma_addr_t dma_addr;
+ u16 size;
+};
+
+union otx2_cpt_ctrl_info {
+ u32 flags;
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u32 reserved_6_31:26;
+ u32 grp:3; /* Group bits */
+ u32 dma_mode:2; /* DMA mode */
+ u32 se_req:1; /* To SE core */
+#else
+ u32 se_req:1; /* To SE core */
+ u32 dma_mode:2; /* DMA mode */
+ u32 grp:3; /* Group bits */
+ u32 reserved_6_31:26;
+#endif
+ } s;
+};
+
+struct otx2_cpt_req_info {
+ /* Kernel async request callback */
+ void (*callback)(int status, void *arg1, void *arg2);
+ struct crypto_async_request *areq; /* Async request callback arg */
+ struct otx2_cptvf_request req;/* Request information (core specific) */
+ union otx2_cpt_ctrl_info ctrl;/* User control information */
+ struct otx2_cpt_buf_ptr in[OTX2_CPT_MAX_SG_IN_CNT];
+ struct otx2_cpt_buf_ptr out[OTX2_CPT_MAX_SG_OUT_CNT];
+ u8 *iv_out; /* IV to send back */
+ u16 rlen; /* Output length */
+ u8 in_cnt; /* Number of input buffers */
+ u8 out_cnt; /* Number of output buffers */
+ u8 req_type; /* Type of request */
+ u8 is_enc; /* Is a request an encryption request */
+ u8 is_trunc_hmac;/* Is truncated hmac used */
+};
+
+struct otx2_cpt_inst_info {
+ struct otx2_cpt_pending_entry *pentry;
+ struct otx2_cpt_req_info *req;
+ struct pci_dev *pdev;
+ void *completion_addr;
+ u8 *out_buffer;
+ u8 *in_buffer;
+ dma_addr_t dptr_baddr;
+ dma_addr_t rptr_baddr;
+ dma_addr_t comp_baddr;
+ unsigned long time_in;
+ u32 dlen;
+ u32 dma_len;
+ u8 extra_time;
+};
+
+struct otx2_cpt_sglist_component {
+ __be16 len0;
+ __be16 len1;
+ __be16 len2;
+ __be16 len3;
+ __be64 ptr0;
+ __be64 ptr1;
+ __be64 ptr2;
+ __be64 ptr3;
+};
+
+static inline void otx2_cpt_info_destroy(struct pci_dev *pdev,
+ struct otx2_cpt_inst_info *info)
+{
+ struct otx2_cpt_req_info *req;
+ int i;
+
+ if (info->dptr_baddr)
+ dma_unmap_single(&pdev->dev, info->dptr_baddr,
+ info->dma_len, DMA_BIDIRECTIONAL);
+
+ if (info->req) {
+ req = info->req;
+ for (i = 0; i < req->out_cnt; i++) {
+ if (req->out[i].dma_addr)
+ dma_unmap_single(&pdev->dev,
+ req->out[i].dma_addr,
+ req->out[i].size,
+ DMA_BIDIRECTIONAL);
+ }
+
+ for (i = 0; i < req->in_cnt; i++) {
+ if (req->in[i].dma_addr)
+ dma_unmap_single(&pdev->dev,
+ req->in[i].dma_addr,
+ req->in[i].size,
+ DMA_BIDIRECTIONAL);
+ }
+ }
+ kfree(info);
+}
+
+struct otx2_cptlf_wqe;
+int otx2_cpt_do_request(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
+ int cpu_num);
+void otx2_cpt_post_process(struct otx2_cptlf_wqe *wqe);
+int otx2_cpt_get_kcrypto_eng_grp_num(struct pci_dev *pdev);
+
+#endif /* __OTX2_CPT_REQMGR_H */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptlf.c b/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
new file mode 100644
index 000000000000..823a4571fd67
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
@@ -0,0 +1,428 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2020 Marvell. */
+
+#include "otx2_cpt_common.h"
+#include "otx2_cptlf.h"
+#include "rvu_reg.h"
+
+#define CPT_TIMER_HOLD 0x03F
+#define CPT_COUNT_HOLD 32
+
+static void cptlf_do_set_done_time_wait(struct otx2_cptlf_info *lf,
+ int time_wait)
+{
+ union otx2_cptx_lf_done_wait done_wait;
+
+ done_wait.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ OTX2_CPT_LF_DONE_WAIT);
+ done_wait.s.time_wait = time_wait;
+ otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ OTX2_CPT_LF_DONE_WAIT, done_wait.u);
+}
+
+static void cptlf_do_set_done_num_wait(struct otx2_cptlf_info *lf, int num_wait)
+{
+ union otx2_cptx_lf_done_wait done_wait;
+
+ done_wait.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ OTX2_CPT_LF_DONE_WAIT);
+ done_wait.s.num_wait = num_wait;
+ otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ OTX2_CPT_LF_DONE_WAIT, done_wait.u);
+}
+
+static void cptlf_set_done_time_wait(struct otx2_cptlfs_info *lfs,
+ int time_wait)
+{
+ int slot;
+
+ for (slot = 0; slot < lfs->lfs_num; slot++)
+ cptlf_do_set_done_time_wait(&lfs->lf[slot], time_wait);
+}
+
+static void cptlf_set_done_num_wait(struct otx2_cptlfs_info *lfs, int num_wait)
+{
+ int slot;
+
+ for (slot = 0; slot < lfs->lfs_num; slot++)
+ cptlf_do_set_done_num_wait(&lfs->lf[slot], num_wait);
+}
+
+static int cptlf_set_pri(struct otx2_cptlf_info *lf, int pri)
+{
+ struct otx2_cptlfs_info *lfs = lf->lfs;
+ union otx2_cptx_af_lf_ctrl lf_ctrl;
+ int ret;
+
+ ret = otx2_cpt_read_af_reg(lfs->mbox, lfs->pdev,
+ CPT_AF_LFX_CTL(lf->slot),
+ &lf_ctrl.u);
+ if (ret)
+ return ret;
+
+ lf_ctrl.s.pri = pri ? 1 : 0;
+
+ ret = otx2_cpt_write_af_reg(lfs->mbox, lfs->pdev,
+ CPT_AF_LFX_CTL(lf->slot),
+ lf_ctrl.u);
+ return ret;
+}
+
+static int cptlf_set_eng_grps_mask(struct otx2_cptlf_info *lf,
+ int eng_grps_mask)
+{
+ struct otx2_cptlfs_info *lfs = lf->lfs;
+ union otx2_cptx_af_lf_ctrl lf_ctrl;
+ int ret;
+
+ ret = otx2_cpt_read_af_reg(lfs->mbox, lfs->pdev,
+ CPT_AF_LFX_CTL(lf->slot),
+ &lf_ctrl.u);
+ if (ret)
+ return ret;
+
+ lf_ctrl.s.grp = eng_grps_mask;
+
+ ret = otx2_cpt_write_af_reg(lfs->mbox, lfs->pdev,
+ CPT_AF_LFX_CTL(lf->slot),
+ lf_ctrl.u);
+ return ret;
+}
+
+static int cptlf_set_grp_and_pri(struct otx2_cptlfs_info *lfs,
+ int eng_grp_mask, int pri)
+{
+ int slot, ret = 0;
+
+ for (slot = 0; slot < lfs->lfs_num; slot++) {
+ ret = cptlf_set_pri(&lfs->lf[slot], pri);
+ if (ret)
+ return ret;
+
+ ret = cptlf_set_eng_grps_mask(&lfs->lf[slot], eng_grp_mask);
+ if (ret)
+ return ret;
+ }
+ return ret;
+}
+
+static void cptlf_hw_init(struct otx2_cptlfs_info *lfs)
+{
+ /* Disable instruction queues */
+ otx2_cptlf_disable_iqueues(lfs);
+
+ /* Set instruction queues base addresses */
+ otx2_cptlf_set_iqueues_base_addr(lfs);
+
+ /* Set instruction queues sizes */
+ otx2_cptlf_set_iqueues_size(lfs);
+
+ /* Set done interrupts time wait */
+ cptlf_set_done_time_wait(lfs, CPT_TIMER_HOLD);
+
+ /* Set done interrupts num wait */
+ cptlf_set_done_num_wait(lfs, CPT_COUNT_HOLD);
+
+ /* Enable instruction queues */
+ otx2_cptlf_enable_iqueues(lfs);
+}
+
+static void cptlf_hw_cleanup(struct otx2_cptlfs_info *lfs)
+{
+ /* Disable instruction queues */
+ otx2_cptlf_disable_iqueues(lfs);
+}
+
+static void cptlf_set_misc_intrs(struct otx2_cptlfs_info *lfs, u8 enable)
+{
+ union otx2_cptx_lf_misc_int_ena_w1s irq_misc = { .u = 0x0 };
+ u64 reg = enable ? OTX2_CPT_LF_MISC_INT_ENA_W1S :
+ OTX2_CPT_LF_MISC_INT_ENA_W1C;
+ int slot;
+
+ irq_misc.s.fault = 0x1;
+ irq_misc.s.hwerr = 0x1;
+ irq_misc.s.irde = 0x1;
+ irq_misc.s.nqerr = 0x1;
+ irq_misc.s.nwrp = 0x1;
+
+ for (slot = 0; slot < lfs->lfs_num; slot++)
+ otx2_cpt_write64(lfs->reg_base, BLKADDR_CPT0, slot, reg,
+ irq_misc.u);
+}
+
+static void cptlf_enable_intrs(struct otx2_cptlfs_info *lfs)
+{
+ int slot;
+
+ /* Enable done interrupts */
+ for (slot = 0; slot < lfs->lfs_num; slot++)
+ otx2_cpt_write64(lfs->reg_base, BLKADDR_CPT0, slot,
+ OTX2_CPT_LF_DONE_INT_ENA_W1S, 0x1);
+ /* Enable Misc interrupts */
+ cptlf_set_misc_intrs(lfs, true);
+}
+
+static void cptlf_disable_intrs(struct otx2_cptlfs_info *lfs)
+{
+ int slot;
+
+ for (slot = 0; slot < lfs->lfs_num; slot++)
+ otx2_cpt_write64(lfs->reg_base, BLKADDR_CPT0, slot,
+ OTX2_CPT_LF_DONE_INT_ENA_W1C, 0x1);
+ cptlf_set_misc_intrs(lfs, false);
+}
+
+static inline int cptlf_read_done_cnt(struct otx2_cptlf_info *lf)
+{
+ union otx2_cptx_lf_done irq_cnt;
+
+ irq_cnt.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ OTX2_CPT_LF_DONE);
+ return irq_cnt.s.done;
+}
+
+static irqreturn_t cptlf_misc_intr_handler(int __always_unused irq, void *arg)
+{
+ union otx2_cptx_lf_misc_int irq_misc, irq_misc_ack;
+ struct otx2_cptlf_info *lf = arg;
+ struct device *dev;
+
+ dev = &lf->lfs->pdev->dev;
+ irq_misc.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ OTX2_CPT_LF_MISC_INT);
+ irq_misc_ack.u = 0x0;
+
+ if (irq_misc.s.fault) {
+ dev_err(dev, "Memory error detected while executing CPT_INST_S, LF %d.\n",
+ lf->slot);
+ irq_misc_ack.s.fault = 0x1;
+
+ } else if (irq_misc.s.hwerr) {
+ dev_err(dev, "HW error from an engine executing CPT_INST_S, LF %d.",
+ lf->slot);
+ irq_misc_ack.s.hwerr = 0x1;
+
+ } else if (irq_misc.s.nwrp) {
+ dev_err(dev, "SMMU fault while writing CPT_RES_S to CPT_INST_S[RES_ADDR], LF %d.\n",
+ lf->slot);
+ irq_misc_ack.s.nwrp = 0x1;
+
+ } else if (irq_misc.s.irde) {
+ dev_err(dev, "Memory error when accessing instruction memory queue CPT_LF_Q_BASE[ADDR].\n");
+ irq_misc_ack.s.irde = 0x1;
+
+ } else if (irq_misc.s.nqerr) {
+ dev_err(dev, "Error enqueuing an instruction received at CPT_LF_NQ.\n");
+ irq_misc_ack.s.nqerr = 0x1;
+
+ } else {
+ dev_err(dev, "Unhandled interrupt in CPT LF %d\n", lf->slot);
+ return IRQ_NONE;
+ }
+
+ /* Acknowledge interrupts */
+ otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ OTX2_CPT_LF_MISC_INT, irq_misc_ack.u);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t cptlf_done_intr_handler(int irq, void *arg)
+{
+ union otx2_cptx_lf_done_wait done_wait;
+ struct otx2_cptlf_info *lf = arg;
+ int irq_cnt;
+
+ /* Read the number of completed requests */
+ irq_cnt = cptlf_read_done_cnt(lf);
+ if (irq_cnt) {
+ done_wait.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0,
+ lf->slot, OTX2_CPT_LF_DONE_WAIT);
+ /* Acknowledge the number of completed requests */
+ otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ OTX2_CPT_LF_DONE_ACK, irq_cnt);
+
+ otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ OTX2_CPT_LF_DONE_WAIT, done_wait.u);
+ if (unlikely(!lf->wqe)) {
+ dev_err(&lf->lfs->pdev->dev, "No work for LF %d\n",
+ lf->slot);
+ return IRQ_NONE;
+ }
+
+ /* Schedule processing of completed requests */
+ tasklet_hi_schedule(&lf->wqe->work);
+ }
+ return IRQ_HANDLED;
+}
+
+void otx2_cptlf_unregister_interrupts(struct otx2_cptlfs_info *lfs)
+{
+ int i, offs, vector;
+
+ for (i = 0; i < lfs->lfs_num; i++) {
+ for (offs = 0; offs < OTX2_CPT_LF_MSIX_VECTORS; offs++) {
+ if (!lfs->lf[i].is_irq_reg[offs])
+ continue;
+
+ vector = pci_irq_vector(lfs->pdev,
+ lfs->lf[i].msix_offset + offs);
+ free_irq(vector, &lfs->lf[i]);
+ lfs->lf[i].is_irq_reg[offs] = false;
+ }
+ }
+ cptlf_disable_intrs(lfs);
+}
+
+static int cptlf_do_register_interrrupts(struct otx2_cptlfs_info *lfs,
+ int lf_num, int irq_offset,
+ irq_handler_t handler)
+{
+ int ret, vector;
+
+ vector = pci_irq_vector(lfs->pdev, lfs->lf[lf_num].msix_offset +
+ irq_offset);
+ ret = request_irq(vector, handler, 0,
+ lfs->lf[lf_num].irq_name[irq_offset],
+ &lfs->lf[lf_num]);
+ if (ret)
+ return ret;
+
+ lfs->lf[lf_num].is_irq_reg[irq_offset] = true;
+
+ return ret;
+}
+
+int otx2_cptlf_register_interrupts(struct otx2_cptlfs_info *lfs)
+{
+ int irq_offs, ret, i;
+
+ for (i = 0; i < lfs->lfs_num; i++) {
+ irq_offs = OTX2_CPT_LF_INT_VEC_E_MISC;
+ snprintf(lfs->lf[i].irq_name[irq_offs], 32, "CPTLF Misc%d", i);
+ ret = cptlf_do_register_interrrupts(lfs, i, irq_offs,
+ cptlf_misc_intr_handler);
+ if (ret)
+ goto free_irq;
+
+ irq_offs = OTX2_CPT_LF_INT_VEC_E_DONE;
+ snprintf(lfs->lf[i].irq_name[irq_offs], 32, "OTX2_CPTLF Done%d",
+ i);
+ ret = cptlf_do_register_interrrupts(lfs, i, irq_offs,
+ cptlf_done_intr_handler);
+ if (ret)
+ goto free_irq;
+ }
+ cptlf_enable_intrs(lfs);
+ return 0;
+
+free_irq:
+ otx2_cptlf_unregister_interrupts(lfs);
+ return ret;
+}
+
+void otx2_cptlf_free_irqs_affinity(struct otx2_cptlfs_info *lfs)
+{
+ int slot, offs;
+
+ for (slot = 0; slot < lfs->lfs_num; slot++) {
+ for (offs = 0; offs < OTX2_CPT_LF_MSIX_VECTORS; offs++)
+ irq_set_affinity_hint(pci_irq_vector(lfs->pdev,
+ lfs->lf[slot].msix_offset +
+ offs), NULL);
+ free_cpumask_var(lfs->lf[slot].affinity_mask);
+ }
+}
+
+int otx2_cptlf_set_irqs_affinity(struct otx2_cptlfs_info *lfs)
+{
+ struct otx2_cptlf_info *lf = lfs->lf;
+ int slot, offs, ret;
+
+ for (slot = 0; slot < lfs->lfs_num; slot++) {
+ if (!zalloc_cpumask_var(&lf[slot].affinity_mask, GFP_KERNEL)) {
+ dev_err(&lfs->pdev->dev,
+ "cpumask allocation failed for LF %d", slot);
+ ret = -ENOMEM;
+ goto free_affinity_mask;
+ }
+
+ cpumask_set_cpu(cpumask_local_spread(slot,
+ dev_to_node(&lfs->pdev->dev)),
+ lf[slot].affinity_mask);
+
+ for (offs = 0; offs < OTX2_CPT_LF_MSIX_VECTORS; offs++) {
+ ret = irq_set_affinity_hint(pci_irq_vector(lfs->pdev,
+ lf[slot].msix_offset + offs),
+ lf[slot].affinity_mask);
+ if (ret)
+ goto free_affinity_mask;
+ }
+ }
+ return 0;
+
+free_affinity_mask:
+ otx2_cptlf_free_irqs_affinity(lfs);
+ return ret;
+}
+
+int otx2_cptlf_init(struct otx2_cptlfs_info *lfs, u8 eng_grp_mask, int pri,
+ int lfs_num)
+{
+ int slot, ret;
+
+ if (!lfs->pdev || !lfs->reg_base)
+ return -EINVAL;
+
+ lfs->lfs_num = lfs_num;
+ for (slot = 0; slot < lfs->lfs_num; slot++) {
+ lfs->lf[slot].lfs = lfs;
+ lfs->lf[slot].slot = slot;
+ lfs->lf[slot].lmtline = lfs->reg_base +
+ OTX2_CPT_RVU_FUNC_ADDR_S(BLKADDR_LMT, slot,
+ OTX2_CPT_LMT_LF_LMTLINEX(0));
+ lfs->lf[slot].ioreg = lfs->reg_base +
+ OTX2_CPT_RVU_FUNC_ADDR_S(BLKADDR_CPT0, slot,
+ OTX2_CPT_LF_NQX(0));
+ }
+ /* Send request to attach LFs */
+ ret = otx2_cpt_attach_rscrs_msg(lfs);
+ if (ret)
+ goto clear_lfs_num;
+
+ ret = otx2_cpt_alloc_instruction_queues(lfs);
+ if (ret) {
+ dev_err(&lfs->pdev->dev,
+ "Allocating instruction queues failed\n");
+ goto detach_rsrcs;
+ }
+ cptlf_hw_init(lfs);
+ /*
+ * Allow each LF to execute requests destined to any of 8 engine
+ * groups and set queue priority of each LF to high
+ */
+ ret = cptlf_set_grp_and_pri(lfs, eng_grp_mask, pri);
+ if (ret)
+ goto free_iq;
+
+ return 0;
+
+free_iq:
+ otx2_cpt_free_instruction_queues(lfs);
+ cptlf_hw_cleanup(lfs);
+detach_rsrcs:
+ otx2_cpt_detach_rsrcs_msg(lfs);
+clear_lfs_num:
+ lfs->lfs_num = 0;
+ return ret;
+}
+
+void otx2_cptlf_shutdown(struct otx2_cptlfs_info *lfs)
+{
+ lfs->lfs_num = 0;
+ /* Cleanup LFs hardware side */
+ cptlf_hw_cleanup(lfs);
+ /* Send request to detach LFs */
+ otx2_cpt_detach_rsrcs_msg(lfs);
+}
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptlf.h b/drivers/crypto/marvell/octeontx2/otx2_cptlf.h
new file mode 100644
index 000000000000..314e97354100
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptlf.h
@@ -0,0 +1,353 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright (C) 2020 Marvell.
+ */
+#ifndef __OTX2_CPTLF_H
+#define __OTX2_CPTLF_H
+
+#include <linux/soc/marvell/octeontx2/asm.h>
+#include <mbox.h>
+#include <rvu.h>
+#include "otx2_cpt_common.h"
+#include "otx2_cpt_reqmgr.h"
+
+/*
+ * CPT instruction and pending queues user requested length in CPT_INST_S msgs
+ */
+#define OTX2_CPT_USER_REQUESTED_QLEN_MSGS 8200
+
+/*
+ * CPT instruction queue size passed to HW is in units of 40*CPT_INST_S
+ * messages.
+ */
+#define OTX2_CPT_SIZE_DIV40 (OTX2_CPT_USER_REQUESTED_QLEN_MSGS/40)
+
+/*
+ * CPT instruction and pending queues length in CPT_INST_S messages
+ */
+#define OTX2_CPT_INST_QLEN_MSGS ((OTX2_CPT_SIZE_DIV40 - 1) * 40)
+
+/* CPT instruction queue length in bytes */
+#define OTX2_CPT_INST_QLEN_BYTES (OTX2_CPT_SIZE_DIV40 * 40 * \
+ OTX2_CPT_INST_SIZE)
+
+/* CPT instruction group queue length in bytes */
+#define OTX2_CPT_INST_GRP_QLEN_BYTES (OTX2_CPT_SIZE_DIV40 * 16)
+
+/* CPT FC length in bytes */
+#define OTX2_CPT_Q_FC_LEN 128
+
+/* CPT instruction queue alignment */
+#define OTX2_CPT_INST_Q_ALIGNMENT 128
+
+/* Mask which selects all engine groups */
+#define OTX2_CPT_ALL_ENG_GRPS_MASK 0xFF
+
+/* Maximum LFs supported in OcteonTX2 for CPT */
+#define OTX2_CPT_MAX_LFS_NUM 64
+
+/* Queue priority */
+#define OTX2_CPT_QUEUE_HI_PRIO 0x1
+#define OTX2_CPT_QUEUE_LOW_PRIO 0x0
+
+enum otx2_cptlf_state {
+ OTX2_CPTLF_IN_RESET,
+ OTX2_CPTLF_STARTED,
+};
+
+struct otx2_cpt_inst_queue {
+ u8 *vaddr;
+ u8 *real_vaddr;
+ dma_addr_t dma_addr;
+ dma_addr_t real_dma_addr;
+ u32 size;
+};
+
+struct otx2_cptlfs_info;
+struct otx2_cptlf_wqe {
+ struct tasklet_struct work;
+ struct otx2_cptlfs_info *lfs;
+ u8 lf_num;
+};
+
+struct otx2_cptlf_info {
+ struct otx2_cptlfs_info *lfs; /* Ptr to cptlfs_info struct */
+ void __iomem *lmtline; /* Address of LMTLINE */
+ void __iomem *ioreg; /* LMTLINE send register */
+ int msix_offset; /* MSI-X interrupts offset */
+ cpumask_var_t affinity_mask; /* IRQs affinity mask */
+ u8 irq_name[OTX2_CPT_LF_MSIX_VECTORS][32];/* Interrupts name */
+ u8 is_irq_reg[OTX2_CPT_LF_MSIX_VECTORS]; /* Is interrupt registered */
+ u8 slot; /* Slot number of this LF */
+
+ struct otx2_cpt_inst_queue iqueue;/* Instruction queue */
+ struct otx2_cpt_pending_queue pqueue; /* Pending queue */
+ struct otx2_cptlf_wqe *wqe; /* Tasklet work info */
+};
+
+struct otx2_cptlfs_info {
+ /* Registers start address of VF/PF LFs are attached to */
+ void __iomem *reg_base;
+ struct pci_dev *pdev; /* Device LFs are attached to */
+ struct otx2_cptlf_info lf[OTX2_CPT_MAX_LFS_NUM];
+ struct otx2_mbox *mbox;
+ u8 are_lfs_attached; /* Whether CPT LFs are attached */
+ u8 lfs_num; /* Number of CPT LFs */
+ u8 kcrypto_eng_grp_num; /* Kernel crypto engine group number */
+ u8 kvf_limits; /* Kernel crypto limits */
+ atomic_t state; /* LF's state. started/reset */
+};
+
+static inline void otx2_cpt_free_instruction_queues(
+ struct otx2_cptlfs_info *lfs)
+{
+ struct otx2_cpt_inst_queue *iq;
+ int i;
+
+ for (i = 0; i < lfs->lfs_num; i++) {
+ iq = &lfs->lf[i].iqueue;
+ if (iq->real_vaddr)
+ dma_free_coherent(&lfs->pdev->dev,
+ iq->size,
+ iq->real_vaddr,
+ iq->real_dma_addr);
+ iq->real_vaddr = NULL;
+ iq->vaddr = NULL;
+ }
+}
+
+static inline int otx2_cpt_alloc_instruction_queues(
+ struct otx2_cptlfs_info *lfs)
+{
+ struct otx2_cpt_inst_queue *iq;
+ int ret = 0, i;
+
+ if (!lfs->lfs_num)
+ return -EINVAL;
+
+ for (i = 0; i < lfs->lfs_num; i++) {
+ iq = &lfs->lf[i].iqueue;
+ iq->size = OTX2_CPT_INST_QLEN_BYTES +
+ OTX2_CPT_Q_FC_LEN +
+ OTX2_CPT_INST_GRP_QLEN_BYTES +
+ OTX2_CPT_INST_Q_ALIGNMENT;
+ iq->real_vaddr = dma_alloc_coherent(&lfs->pdev->dev, iq->size,
+ &iq->real_dma_addr, GFP_KERNEL);
+ if (!iq->real_vaddr) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ iq->vaddr = iq->real_vaddr + OTX2_CPT_INST_GRP_QLEN_BYTES;
+ iq->dma_addr = iq->real_dma_addr + OTX2_CPT_INST_GRP_QLEN_BYTES;
+
+ /* Align pointers */
+ iq->vaddr = PTR_ALIGN(iq->vaddr, OTX2_CPT_INST_Q_ALIGNMENT);
+ iq->dma_addr = PTR_ALIGN(iq->dma_addr,
+ OTX2_CPT_INST_Q_ALIGNMENT);
+ }
+ return 0;
+
+error:
+ otx2_cpt_free_instruction_queues(lfs);
+ return ret;
+}
+
+static inline void otx2_cptlf_set_iqueues_base_addr(
+ struct otx2_cptlfs_info *lfs)
+{
+ union otx2_cptx_lf_q_base lf_q_base;
+ int slot;
+
+ for (slot = 0; slot < lfs->lfs_num; slot++) {
+ lf_q_base.u = lfs->lf[slot].iqueue.dma_addr;
+ otx2_cpt_write64(lfs->reg_base, BLKADDR_CPT0, slot,
+ OTX2_CPT_LF_Q_BASE, lf_q_base.u);
+ }
+}
+
+static inline void otx2_cptlf_do_set_iqueue_size(struct otx2_cptlf_info *lf)
+{
+ union otx2_cptx_lf_q_size lf_q_size = { .u = 0x0 };
+
+ lf_q_size.s.size_div40 = OTX2_CPT_SIZE_DIV40;
+ otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ OTX2_CPT_LF_Q_SIZE, lf_q_size.u);
+}
+
+static inline void otx2_cptlf_set_iqueues_size(struct otx2_cptlfs_info *lfs)
+{
+ int slot;
+
+ for (slot = 0; slot < lfs->lfs_num; slot++)
+ otx2_cptlf_do_set_iqueue_size(&lfs->lf[slot]);
+}
+
+static inline void otx2_cptlf_do_disable_iqueue(struct otx2_cptlf_info *lf)
+{
+ union otx2_cptx_lf_ctl lf_ctl = { .u = 0x0 };
+ union otx2_cptx_lf_inprog lf_inprog;
+ int timeout = 20;
+
+ /* Disable instructions enqueuing */
+ otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ OTX2_CPT_LF_CTL, lf_ctl.u);
+
+ /* Wait for instruction queue to become empty */
+ do {
+ lf_inprog.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0,
+ lf->slot, OTX2_CPT_LF_INPROG);
+ if (!lf_inprog.s.inflight)
+ break;
+
+ usleep_range(10000, 20000);
+ if (timeout-- < 0) {
+ dev_err(&lf->lfs->pdev->dev,
+ "Error LF %d is still busy.\n", lf->slot);
+ break;
+ }
+
+ } while (1);
+
+ /*
+ * Disable executions in the LF's queue,
+ * the queue should be empty at this point
+ */
+ lf_inprog.s.eena = 0x0;
+ otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ OTX2_CPT_LF_INPROG, lf_inprog.u);
+}
+
+static inline void otx2_cptlf_disable_iqueues(struct otx2_cptlfs_info *lfs)
+{
+ int slot;
+
+ for (slot = 0; slot < lfs->lfs_num; slot++)
+ otx2_cptlf_do_disable_iqueue(&lfs->lf[slot]);
+}
+
+static inline void otx2_cptlf_set_iqueue_enq(struct otx2_cptlf_info *lf,
+ bool enable)
+{
+ union otx2_cptx_lf_ctl lf_ctl;
+
+ lf_ctl.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ OTX2_CPT_LF_CTL);
+
+ /* Set iqueue's enqueuing */
+ lf_ctl.s.ena = enable ? 0x1 : 0x0;
+ otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ OTX2_CPT_LF_CTL, lf_ctl.u);
+}
+
+static inline void otx2_cptlf_enable_iqueue_enq(struct otx2_cptlf_info *lf)
+{
+ otx2_cptlf_set_iqueue_enq(lf, true);
+}
+
+static inline void otx2_cptlf_set_iqueue_exec(struct otx2_cptlf_info *lf,
+ bool enable)
+{
+ union otx2_cptx_lf_inprog lf_inprog;
+
+ lf_inprog.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ OTX2_CPT_LF_INPROG);
+
+ /* Set iqueue's execution */
+ lf_inprog.s.eena = enable ? 0x1 : 0x0;
+ otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ OTX2_CPT_LF_INPROG, lf_inprog.u);
+}
+
+static inline void otx2_cptlf_enable_iqueue_exec(struct otx2_cptlf_info *lf)
+{
+ otx2_cptlf_set_iqueue_exec(lf, true);
+}
+
+static inline void otx2_cptlf_disable_iqueue_exec(struct otx2_cptlf_info *lf)
+{
+ otx2_cptlf_set_iqueue_exec(lf, false);
+}
+
+static inline void otx2_cptlf_enable_iqueues(struct otx2_cptlfs_info *lfs)
+{
+ int slot;
+
+ for (slot = 0; slot < lfs->lfs_num; slot++) {
+ otx2_cptlf_enable_iqueue_exec(&lfs->lf[slot]);
+ otx2_cptlf_enable_iqueue_enq(&lfs->lf[slot]);
+ }
+}
+
+static inline void otx2_cpt_fill_inst(union otx2_cpt_inst_s *cptinst,
+ struct otx2_cpt_iq_command *iq_cmd,
+ u64 comp_baddr)
+{
+ cptinst->u[0] = 0x0;
+ cptinst->s.doneint = true;
+ cptinst->s.res_addr = comp_baddr;
+ cptinst->u[2] = 0x0;
+ cptinst->u[3] = 0x0;
+ cptinst->s.ei0 = iq_cmd->cmd.u;
+ cptinst->s.ei1 = iq_cmd->dptr;
+ cptinst->s.ei2 = iq_cmd->rptr;
+ cptinst->s.ei3 = iq_cmd->cptr.u;
+}
+
+/*
+ * On OcteonTX2 platform the parameter insts_num is used as a count of
+ * instructions to be enqueued. The valid values for insts_num are:
+ * 1 - 1 CPT instruction will be enqueued during LMTST operation
+ * 2 - 2 CPT instructions will be enqueued during LMTST operation
+ */
+static inline void otx2_cpt_send_cmd(union otx2_cpt_inst_s *cptinst,
+ u32 insts_num, struct otx2_cptlf_info *lf)
+{
+ void __iomem *lmtline = lf->lmtline;
+ long ret;
+
+ /*
+ * Make sure memory areas pointed in CPT_INST_S
+ * are flushed before the instruction is sent to CPT
+ */
+ dma_wmb();
+
+ do {
+ /* Copy CPT command to LMTLINE */
+ memcpy_toio(lmtline, cptinst, insts_num * OTX2_CPT_INST_SIZE);
+
+ /*
+ * LDEOR initiates atomic transfer to I/O device
+ * The following will cause the LMTST to fail (the LDEOR
+ * returns zero):
+ * - No stores have been performed to the LMTLINE since it was
+ * last invalidated.
+ * - The bytes which have been stored to LMTLINE since it was
+ * last invalidated form a pattern that is non-contiguous, does
+ * not start at byte 0, or does not end on a 8-byte boundary.
+ * (i.e.comprises a formation of other than 1–16 8-byte
+ * words.)
+ *
+ * These rules are designed such that an operating system
+ * context switch or hypervisor guest switch need have no
+ * knowledge of the LMTST operations; the switch code does not
+ * need to store to LMTCANCEL. Also note as LMTLINE data cannot
+ * be read, there is no information leakage between processes.
+ */
+ ret = otx2_lmt_flush(lf->ioreg);
+
+ } while (!ret);
+}
+
+static inline bool otx2_cptlf_started(struct otx2_cptlfs_info *lfs)
+{
+ return atomic_read(&lfs->state) == OTX2_CPTLF_STARTED;
+}
+
+int otx2_cptlf_init(struct otx2_cptlfs_info *lfs, u8 eng_grp_msk, int pri,
+ int lfs_num);
+void otx2_cptlf_shutdown(struct otx2_cptlfs_info *lfs);
+int otx2_cptlf_register_interrupts(struct otx2_cptlfs_info *lfs);
+void otx2_cptlf_unregister_interrupts(struct otx2_cptlfs_info *lfs);
+void otx2_cptlf_free_irqs_affinity(struct otx2_cptlfs_info *lfs);
+int otx2_cptlf_set_irqs_affinity(struct otx2_cptlfs_info *lfs);
+
+#endif /* __OTX2_CPTLF_H */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf.h b/drivers/crypto/marvell/octeontx2/otx2_cptpf.h
new file mode 100644
index 000000000000..8c899ad531a5
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright (C) 2020 Marvell.
+ */
+
+#ifndef __OTX2_CPTPF_H
+#define __OTX2_CPTPF_H
+
+#include "otx2_cpt_common.h"
+#include "otx2_cptpf_ucode.h"
+#include "otx2_cptlf.h"
+
+struct otx2_cptpf_dev;
+struct otx2_cptvf_info {
+ struct otx2_cptpf_dev *cptpf; /* PF pointer this VF belongs to */
+ struct work_struct vfpf_mbox_work;
+ struct pci_dev *vf_dev;
+ int vf_id;
+ int intr_idx;
+};
+
+struct cptpf_flr_work {
+ struct work_struct work;
+ struct otx2_cptpf_dev *pf;
+};
+
+struct otx2_cptpf_dev {
+ void __iomem *reg_base; /* CPT PF registers start address */
+ void __iomem *afpf_mbox_base; /* PF-AF mbox start address */
+ void __iomem *vfpf_mbox_base; /* VF-PF mbox start address */
+ struct pci_dev *pdev; /* PCI device handle */
+ struct otx2_cptvf_info vf[OTX2_CPT_MAX_VFS_NUM];
+ struct otx2_cpt_eng_grps eng_grps;/* Engine groups information */
+ struct otx2_cptlfs_info lfs; /* CPT LFs attached to this PF */
+ /* HW capabilities for each engine type */
+ union otx2_cpt_eng_caps eng_caps[OTX2_CPT_MAX_ENG_TYPES];
+ bool is_eng_caps_discovered;
+
+ /* AF <=> PF mbox */
+ struct otx2_mbox afpf_mbox;
+ struct work_struct afpf_mbox_work;
+ struct workqueue_struct *afpf_mbox_wq;
+
+ /* VF <=> PF mbox */
+ struct otx2_mbox vfpf_mbox;
+ struct workqueue_struct *vfpf_mbox_wq;
+
+ struct workqueue_struct *flr_wq;
+ struct cptpf_flr_work *flr_work;
+
+ u8 pf_id; /* RVU PF number */
+ u8 max_vfs; /* Maximum number of VFs supported by CPT */
+ u8 enabled_vfs; /* Number of enabled VFs */
+ u8 kvf_limits; /* Kernel crypto limits */
+};
+
+irqreturn_t otx2_cptpf_afpf_mbox_intr(int irq, void *arg);
+void otx2_cptpf_afpf_mbox_handler(struct work_struct *work);
+irqreturn_t otx2_cptpf_vfpf_mbox_intr(int irq, void *arg);
+void otx2_cptpf_vfpf_mbox_handler(struct work_struct *work);
+
+#endif /* __OTX2_CPTPF_H */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
new file mode 100644
index 000000000000..5277e04badd9
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
@@ -0,0 +1,713 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2020 Marvell. */
+
+#include <linux/firmware.h>
+#include "otx2_cpt_hw_types.h"
+#include "otx2_cpt_common.h"
+#include "otx2_cptpf_ucode.h"
+#include "otx2_cptpf.h"
+#include "rvu_reg.h"
+
+#define OTX2_CPT_DRV_NAME "octeontx2-cpt"
+#define OTX2_CPT_DRV_STRING "Marvell OcteonTX2 CPT Physical Function Driver"
+
+static void cptpf_enable_vfpf_mbox_intr(struct otx2_cptpf_dev *cptpf,
+ int num_vfs)
+{
+ int ena_bits;
+
+ /* Clear any pending interrupts */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFPF_MBOX_INTX(0), ~0x0ULL);
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFPF_MBOX_INTX(1), ~0x0ULL);
+
+ /* Enable VF interrupts for VFs from 0 to 63 */
+ ena_bits = ((num_vfs - 1) % 64);
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0),
+ GENMASK_ULL(ena_bits, 0));
+
+ if (num_vfs > 64) {
+ /* Enable VF interrupts for VFs from 64 to 127 */
+ ena_bits = num_vfs - 64 - 1;
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
+ GENMASK_ULL(ena_bits, 0));
+ }
+}
+
+static void cptpf_disable_vfpf_mbox_intr(struct otx2_cptpf_dev *cptpf,
+ int num_vfs)
+{
+ int vector;
+
+ /* Disable VF-PF interrupts */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), ~0ULL);
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1), ~0ULL);
+ /* Clear any pending interrupts */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFPF_MBOX_INTX(0), ~0ULL);
+
+ vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
+ free_irq(vector, cptpf);
+
+ if (num_vfs > 64) {
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFPF_MBOX_INTX(1), ~0ULL);
+ vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
+ free_irq(vector, cptpf);
+ }
+}
+
+static void cptpf_enable_vf_flr_intrs(struct otx2_cptpf_dev *cptpf)
+{
+ /* Clear interrupt if any */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(0),
+ ~0x0ULL);
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(1),
+ ~0x0ULL);
+
+ /* Enable VF FLR interrupts */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFFLR_INT_ENA_W1SX(0), ~0x0ULL);
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFFLR_INT_ENA_W1SX(1), ~0x0ULL);
+}
+
+static void cptpf_disable_vf_flr_intrs(struct otx2_cptpf_dev *cptpf,
+ int num_vfs)
+{
+ int vector;
+
+ /* Disable VF FLR interrupts */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFFLR_INT_ENA_W1CX(0), ~0x0ULL);
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFFLR_INT_ENA_W1CX(1), ~0x0ULL);
+
+ /* Clear interrupt if any */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(0),
+ ~0x0ULL);
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(1),
+ ~0x0ULL);
+
+ vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFFLR0);
+ free_irq(vector, cptpf);
+
+ if (num_vfs > 64) {
+ vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFFLR1);
+ free_irq(vector, cptpf);
+ }
+}
+
+static void cptpf_flr_wq_handler(struct work_struct *work)
+{
+ struct cptpf_flr_work *flr_work;
+ struct otx2_cptpf_dev *pf;
+ struct mbox_msghdr *req;
+ struct otx2_mbox *mbox;
+ int vf, reg = 0;
+
+ flr_work = container_of(work, struct cptpf_flr_work, work);
+ pf = flr_work->pf;
+ mbox = &pf->afpf_mbox;
+
+ vf = flr_work - pf->flr_work;
+
+ req = otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
+ sizeof(struct msg_rsp));
+ if (!req)
+ return;
+
+ req->sig = OTX2_MBOX_REQ_SIG;
+ req->id = MBOX_MSG_VF_FLR;
+ req->pcifunc &= RVU_PFVF_FUNC_MASK;
+ req->pcifunc |= (vf + 1) & RVU_PFVF_FUNC_MASK;
+
+ otx2_cpt_send_mbox_msg(mbox, pf->pdev);
+
+ if (vf >= 64) {
+ reg = 1;
+ vf = vf - 64;
+ }
+ /* Clear transaction pending register */
+ otx2_cpt_write64(pf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
+ otx2_cpt_write64(pf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf));
+}
+
+static irqreturn_t cptpf_vf_flr_intr(int __always_unused irq, void *arg)
+{
+ int reg, dev, vf, start_vf, num_reg = 1;
+ struct otx2_cptpf_dev *cptpf = arg;
+ u64 intr;
+
+ if (cptpf->max_vfs > 64)
+ num_reg = 2;
+
+ for (reg = 0; reg < num_reg; reg++) {
+ intr = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFFLR_INTX(reg));
+ if (!intr)
+ continue;
+ start_vf = 64 * reg;
+ for (vf = 0; vf < 64; vf++) {
+ if (!(intr & BIT_ULL(vf)))
+ continue;
+ dev = vf + start_vf;
+ queue_work(cptpf->flr_wq, &cptpf->flr_work[dev].work);
+ /* Clear interrupt */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf));
+ /* Disable the interrupt */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFFLR_INT_ENA_W1CX(reg),
+ BIT_ULL(vf));
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+static void cptpf_unregister_vfpf_intr(struct otx2_cptpf_dev *cptpf,
+ int num_vfs)
+{
+ cptpf_disable_vfpf_mbox_intr(cptpf, num_vfs);
+ cptpf_disable_vf_flr_intrs(cptpf, num_vfs);
+}
+
+static int cptpf_register_vfpf_intr(struct otx2_cptpf_dev *cptpf, int num_vfs)
+{
+ struct pci_dev *pdev = cptpf->pdev;
+ struct device *dev = &pdev->dev;
+ int ret, vector;
+
+ vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
+ /* Register VF-PF mailbox interrupt handler */
+ ret = request_irq(vector, otx2_cptpf_vfpf_mbox_intr, 0, "CPTVFPF Mbox0",
+ cptpf);
+ if (ret) {
+ dev_err(dev,
+ "IRQ registration failed for PFVF mbox0 irq\n");
+ return ret;
+ }
+ vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR0);
+ /* Register VF FLR interrupt handler */
+ ret = request_irq(vector, cptpf_vf_flr_intr, 0, "CPTPF FLR0", cptpf);
+ if (ret) {
+ dev_err(dev,
+ "IRQ registration failed for VFFLR0 irq\n");
+ goto free_mbox0_irq;
+ }
+ if (num_vfs > 64) {
+ vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
+ ret = request_irq(vector, otx2_cptpf_vfpf_mbox_intr, 0,
+ "CPTVFPF Mbox1", cptpf);
+ if (ret) {
+ dev_err(dev,
+ "IRQ registration failed for PFVF mbox1 irq\n");
+ goto free_flr0_irq;
+ }
+ vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR1);
+ /* Register VF FLR interrupt handler */
+ ret = request_irq(vector, cptpf_vf_flr_intr, 0, "CPTPF FLR1",
+ cptpf);
+ if (ret) {
+ dev_err(dev,
+ "IRQ registration failed for VFFLR1 irq\n");
+ goto free_mbox1_irq;
+ }
+ }
+ cptpf_enable_vfpf_mbox_intr(cptpf, num_vfs);
+ cptpf_enable_vf_flr_intrs(cptpf);
+
+ return 0;
+
+free_mbox1_irq:
+ vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
+ free_irq(vector, cptpf);
+free_flr0_irq:
+ vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR0);
+ free_irq(vector, cptpf);
+free_mbox0_irq:
+ vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
+ free_irq(vector, cptpf);
+ return ret;
+}
+
+static void cptpf_flr_wq_destroy(struct otx2_cptpf_dev *pf)
+{
+ if (!pf->flr_wq)
+ return;
+ destroy_workqueue(pf->flr_wq);
+ pf->flr_wq = NULL;
+ kfree(pf->flr_work);
+}
+
+static int cptpf_flr_wq_init(struct otx2_cptpf_dev *cptpf, int num_vfs)
+{
+ int vf;
+
+ cptpf->flr_wq = alloc_ordered_workqueue("cptpf_flr_wq", 0);
+ if (!cptpf->flr_wq)
+ return -ENOMEM;
+
+ cptpf->flr_work = kcalloc(num_vfs, sizeof(struct cptpf_flr_work),
+ GFP_KERNEL);
+ if (!cptpf->flr_work)
+ goto destroy_wq;
+
+ for (vf = 0; vf < num_vfs; vf++) {
+ cptpf->flr_work[vf].pf = cptpf;
+ INIT_WORK(&cptpf->flr_work[vf].work, cptpf_flr_wq_handler);
+ }
+ return 0;
+
+destroy_wq:
+ destroy_workqueue(cptpf->flr_wq);
+ return -ENOMEM;
+}
+
+static int cptpf_vfpf_mbox_init(struct otx2_cptpf_dev *cptpf, int num_vfs)
+{
+ struct device *dev = &cptpf->pdev->dev;
+ u64 vfpf_mbox_base;
+ int err, i;
+
+ cptpf->vfpf_mbox_wq = alloc_workqueue("cpt_vfpf_mailbox",
+ WQ_UNBOUND | WQ_HIGHPRI |
+ WQ_MEM_RECLAIM, 1);
+ if (!cptpf->vfpf_mbox_wq)
+ return -ENOMEM;
+
+ /* Map VF-PF mailbox memory */
+ vfpf_mbox_base = readq(cptpf->reg_base + RVU_PF_VF_BAR4_ADDR);
+ if (!vfpf_mbox_base) {
+ dev_err(dev, "VF-PF mailbox address not configured\n");
+ err = -ENOMEM;
+ goto free_wqe;
+ }
+ cptpf->vfpf_mbox_base = devm_ioremap_wc(dev, vfpf_mbox_base,
+ MBOX_SIZE * cptpf->max_vfs);
+ if (!cptpf->vfpf_mbox_base) {
+ dev_err(dev, "Mapping of VF-PF mailbox address failed\n");
+ err = -ENOMEM;
+ goto free_wqe;
+ }
+ err = otx2_mbox_init(&cptpf->vfpf_mbox, cptpf->vfpf_mbox_base,
+ cptpf->pdev, cptpf->reg_base, MBOX_DIR_PFVF,
+ num_vfs);
+ if (err)
+ goto free_wqe;
+
+ for (i = 0; i < num_vfs; i++) {
+ cptpf->vf[i].vf_id = i;
+ cptpf->vf[i].cptpf = cptpf;
+ cptpf->vf[i].intr_idx = i % 64;
+ INIT_WORK(&cptpf->vf[i].vfpf_mbox_work,
+ otx2_cptpf_vfpf_mbox_handler);
+ }
+ return 0;
+
+free_wqe:
+ destroy_workqueue(cptpf->vfpf_mbox_wq);
+ return err;
+}
+
+static void cptpf_vfpf_mbox_destroy(struct otx2_cptpf_dev *cptpf)
+{
+ destroy_workqueue(cptpf->vfpf_mbox_wq);
+ otx2_mbox_destroy(&cptpf->vfpf_mbox);
+}
+
+static void cptpf_disable_afpf_mbox_intr(struct otx2_cptpf_dev *cptpf)
+{
+ /* Disable AF-PF interrupt */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT_ENA_W1C,
+ 0x1ULL);
+ /* Clear interrupt if any */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT, 0x1ULL);
+}
+
+static int cptpf_register_afpf_mbox_intr(struct otx2_cptpf_dev *cptpf)
+{
+ struct pci_dev *pdev = cptpf->pdev;
+ struct device *dev = &pdev->dev;
+ int ret, irq;
+
+ irq = pci_irq_vector(pdev, RVU_PF_INT_VEC_AFPF_MBOX);
+ /* Register AF-PF mailbox interrupt handler */
+ ret = devm_request_irq(dev, irq, otx2_cptpf_afpf_mbox_intr, 0,
+ "CPTAFPF Mbox", cptpf);
+ if (ret) {
+ dev_err(dev,
+ "IRQ registration failed for PFAF mbox irq\n");
+ return ret;
+ }
+ /* Clear interrupt if any, to avoid spurious interrupts */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT, 0x1ULL);
+ /* Enable AF-PF interrupt */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT_ENA_W1S,
+ 0x1ULL);
+
+ ret = otx2_cpt_send_ready_msg(&cptpf->afpf_mbox, cptpf->pdev);
+ if (ret) {
+ dev_warn(dev,
+ "AF not responding to mailbox, deferring probe\n");
+ cptpf_disable_afpf_mbox_intr(cptpf);
+ return -EPROBE_DEFER;
+ }
+ return 0;
+}
+
+static int cptpf_afpf_mbox_init(struct otx2_cptpf_dev *cptpf)
+{
+ int err;
+
+ cptpf->afpf_mbox_wq = alloc_workqueue("cpt_afpf_mailbox",
+ WQ_UNBOUND | WQ_HIGHPRI |
+ WQ_MEM_RECLAIM, 1);
+ if (!cptpf->afpf_mbox_wq)
+ return -ENOMEM;
+
+ err = otx2_mbox_init(&cptpf->afpf_mbox, cptpf->afpf_mbox_base,
+ cptpf->pdev, cptpf->reg_base, MBOX_DIR_PFAF, 1);
+ if (err)
+ goto error;
+
+ INIT_WORK(&cptpf->afpf_mbox_work, otx2_cptpf_afpf_mbox_handler);
+ return 0;
+
+error:
+ destroy_workqueue(cptpf->afpf_mbox_wq);
+ return err;
+}
+
+static void cptpf_afpf_mbox_destroy(struct otx2_cptpf_dev *cptpf)
+{
+ destroy_workqueue(cptpf->afpf_mbox_wq);
+ otx2_mbox_destroy(&cptpf->afpf_mbox);
+}
+
+static ssize_t kvf_limits_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", cptpf->kvf_limits);
+}
+
+static ssize_t kvf_limits_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
+ int lfs_num;
+
+ if (kstrtoint(buf, 0, &lfs_num)) {
+ dev_err(dev, "lfs count %d must be in range [1 - %d]\n",
+ lfs_num, num_online_cpus());
+ return -EINVAL;
+ }
+ if (lfs_num < 1 || lfs_num > num_online_cpus()) {
+ dev_err(dev, "lfs count %d must be in range [1 - %d]\n",
+ lfs_num, num_online_cpus());
+ return -EINVAL;
+ }
+ cptpf->kvf_limits = lfs_num;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(kvf_limits);
+static struct attribute *cptpf_attrs[] = {
+ &dev_attr_kvf_limits.attr,
+ NULL
+};
+
+static const struct attribute_group cptpf_sysfs_group = {
+ .attrs = cptpf_attrs,
+};
+
+static int cpt_is_pf_usable(struct otx2_cptpf_dev *cptpf)
+{
+ u64 rev;
+
+ rev = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_RVUM));
+ rev = (rev >> 12) & 0xFF;
+ /*
+ * Check if AF has setup revision for RVUM block, otherwise
+ * driver probe should be deferred until AF driver comes up
+ */
+ if (!rev) {
+ dev_warn(&cptpf->pdev->dev,
+ "AF is not initialized, deferring probe\n");
+ return -EPROBE_DEFER;
+ }
+ return 0;
+}
+
+static int cptpf_device_reset(struct otx2_cptpf_dev *cptpf)
+{
+ int timeout = 10, ret;
+ u64 reg = 0;
+
+ ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
+ CPT_AF_BLK_RST, 0x1);
+ if (ret)
+ return ret;
+
+ do {
+ ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
+ CPT_AF_BLK_RST, &reg);
+ if (ret)
+ return ret;
+
+ if (!((reg >> 63) & 0x1))
+ break;
+
+ usleep_range(10000, 20000);
+ if (timeout-- < 0)
+ return -EBUSY;
+ } while (1);
+
+ return ret;
+}
+
+static int cptpf_device_init(struct otx2_cptpf_dev *cptpf)
+{
+ union otx2_cptx_af_constants1 af_cnsts1 = {0};
+ int ret = 0;
+
+ /* Reset the CPT PF device */
+ ret = cptpf_device_reset(cptpf);
+ if (ret)
+ return ret;
+
+ /* Get number of SE, IE and AE engines */
+ ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
+ CPT_AF_CONSTANTS1, &af_cnsts1.u);
+ if (ret)
+ return ret;
+
+ cptpf->eng_grps.avail.max_se_cnt = af_cnsts1.s.se;
+ cptpf->eng_grps.avail.max_ie_cnt = af_cnsts1.s.ie;
+ cptpf->eng_grps.avail.max_ae_cnt = af_cnsts1.s.ae;
+
+ /* Disable all cores */
+ ret = otx2_cpt_disable_all_cores(cptpf);
+
+ return ret;
+}
+
+static int cptpf_sriov_disable(struct pci_dev *pdev)
+{
+ struct otx2_cptpf_dev *cptpf = pci_get_drvdata(pdev);
+ int num_vfs = pci_num_vf(pdev);
+
+ if (!num_vfs)
+ return 0;
+
+ pci_disable_sriov(pdev);
+ cptpf_unregister_vfpf_intr(cptpf, num_vfs);
+ cptpf_flr_wq_destroy(cptpf);
+ cptpf_vfpf_mbox_destroy(cptpf);
+ module_put(THIS_MODULE);
+ cptpf->enabled_vfs = 0;
+
+ return 0;
+}
+
+static int cptpf_sriov_enable(struct pci_dev *pdev, int num_vfs)
+{
+ struct otx2_cptpf_dev *cptpf = pci_get_drvdata(pdev);
+ int ret;
+
+ /* Initialize VF<=>PF mailbox */
+ ret = cptpf_vfpf_mbox_init(cptpf, num_vfs);
+ if (ret)
+ return ret;
+
+ ret = cptpf_flr_wq_init(cptpf, num_vfs);
+ if (ret)
+ goto destroy_mbox;
+ /* Register VF<=>PF mailbox interrupt */
+ ret = cptpf_register_vfpf_intr(cptpf, num_vfs);
+ if (ret)
+ goto destroy_flr;
+
+ /* Get CPT HW capabilities using LOAD_FVC operation. */
+ ret = otx2_cpt_discover_eng_capabilities(cptpf);
+ if (ret)
+ goto disable_intr;
+
+ ret = otx2_cpt_create_eng_grps(cptpf->pdev, &cptpf->eng_grps);
+ if (ret)
+ goto disable_intr;
+
+ cptpf->enabled_vfs = num_vfs;
+ ret = pci_enable_sriov(pdev, num_vfs);
+ if (ret)
+ goto disable_intr;
+
+ dev_notice(&cptpf->pdev->dev, "VFs enabled: %d\n", num_vfs);
+
+ try_module_get(THIS_MODULE);
+ return num_vfs;
+
+disable_intr:
+ cptpf_unregister_vfpf_intr(cptpf, num_vfs);
+ cptpf->enabled_vfs = 0;
+destroy_flr:
+ cptpf_flr_wq_destroy(cptpf);
+destroy_mbox:
+ cptpf_vfpf_mbox_destroy(cptpf);
+ return ret;
+}
+
+static int otx2_cptpf_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ if (num_vfs > 0) {
+ return cptpf_sriov_enable(pdev, num_vfs);
+ } else {
+ return cptpf_sriov_disable(pdev);
+ }
+}
+
+static int otx2_cptpf_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct device *dev = &pdev->dev;
+ resource_size_t offset, size;
+ struct otx2_cptpf_dev *cptpf;
+ int err;
+
+ cptpf = devm_kzalloc(dev, sizeof(*cptpf), GFP_KERNEL);
+ if (!cptpf)
+ return -ENOMEM;
+
+ err = pcim_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ goto clear_drvdata;
+ }
+
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+ if (err) {
+ dev_err(dev, "Unable to get usable DMA configuration\n");
+ goto clear_drvdata;
+ }
+ /* Map PF's configuration registers */
+ err = pcim_iomap_regions_request_all(pdev, 1 << PCI_PF_REG_BAR_NUM,
+ OTX2_CPT_DRV_NAME);
+ if (err) {
+ dev_err(dev, "Couldn't get PCI resources 0x%x\n", err);
+ goto clear_drvdata;
+ }
+ pci_set_master(pdev);
+ pci_set_drvdata(pdev, cptpf);
+ cptpf->pdev = pdev;
+
+ cptpf->reg_base = pcim_iomap_table(pdev)[PCI_PF_REG_BAR_NUM];
+
+ /* Check if AF driver is up, otherwise defer probe */
+ err = cpt_is_pf_usable(cptpf);
+ if (err)
+ goto clear_drvdata;
+
+ offset = pci_resource_start(pdev, PCI_MBOX_BAR_NUM);
+ size = pci_resource_len(pdev, PCI_MBOX_BAR_NUM);
+ /* Map AF-PF mailbox memory */
+ cptpf->afpf_mbox_base = devm_ioremap_wc(dev, offset, size);
+ if (!cptpf->afpf_mbox_base) {
+ dev_err(&pdev->dev, "Unable to map BAR4\n");
+ err = -ENODEV;
+ goto clear_drvdata;
+ }
+ err = pci_alloc_irq_vectors(pdev, RVU_PF_INT_VEC_CNT,
+ RVU_PF_INT_VEC_CNT, PCI_IRQ_MSIX);
+ if (err < 0) {
+ dev_err(dev, "Request for %d msix vectors failed\n",
+ RVU_PF_INT_VEC_CNT);
+ goto clear_drvdata;
+ }
+ /* Initialize AF-PF mailbox */
+ err = cptpf_afpf_mbox_init(cptpf);
+ if (err)
+ goto clear_drvdata;
+ /* Register mailbox interrupt */
+ err = cptpf_register_afpf_mbox_intr(cptpf);
+ if (err)
+ goto destroy_afpf_mbox;
+
+ cptpf->max_vfs = pci_sriov_get_totalvfs(pdev);
+
+ /* Initialize CPT PF device */
+ err = cptpf_device_init(cptpf);
+ if (err)
+ goto unregister_intr;
+
+ /* Initialize engine groups */
+ err = otx2_cpt_init_eng_grps(pdev, &cptpf->eng_grps);
+ if (err)
+ goto unregister_intr;
+
+ err = sysfs_create_group(&dev->kobj, &cptpf_sysfs_group);
+ if (err)
+ goto cleanup_eng_grps;
+ return 0;
+
+cleanup_eng_grps:
+ otx2_cpt_cleanup_eng_grps(pdev, &cptpf->eng_grps);
+unregister_intr:
+ cptpf_disable_afpf_mbox_intr(cptpf);
+destroy_afpf_mbox:
+ cptpf_afpf_mbox_destroy(cptpf);
+clear_drvdata:
+ pci_set_drvdata(pdev, NULL);
+ return err;
+}
+
+static void otx2_cptpf_remove(struct pci_dev *pdev)
+{
+ struct otx2_cptpf_dev *cptpf = pci_get_drvdata(pdev);
+
+ if (!cptpf)
+ return;
+
+ cptpf_sriov_disable(pdev);
+ /* Delete sysfs entry created for kernel VF limits */
+ sysfs_remove_group(&pdev->dev.kobj, &cptpf_sysfs_group);
+ /* Cleanup engine groups */
+ otx2_cpt_cleanup_eng_grps(pdev, &cptpf->eng_grps);
+ /* Disable AF-PF mailbox interrupt */
+ cptpf_disable_afpf_mbox_intr(cptpf);
+ /* Destroy AF-PF mbox */
+ cptpf_afpf_mbox_destroy(cptpf);
+ pci_set_drvdata(pdev, NULL);
+}
+
+/* Supported devices */
+static const struct pci_device_id otx2_cpt_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OTX2_CPT_PCI_PF_DEVICE_ID) },
+ { 0, } /* end of table */
+};
+
+static struct pci_driver otx2_cpt_pci_driver = {
+ .name = OTX2_CPT_DRV_NAME,
+ .id_table = otx2_cpt_id_table,
+ .probe = otx2_cptpf_probe,
+ .remove = otx2_cptpf_remove,
+ .sriov_configure = otx2_cptpf_sriov_configure
+};
+
+module_pci_driver(otx2_cpt_pci_driver);
+
+MODULE_AUTHOR("Marvell");
+MODULE_DESCRIPTION(OTX2_CPT_DRV_STRING);
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(pci, otx2_cpt_id_table);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c
new file mode 100644
index 000000000000..186f1c1190c1
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c
@@ -0,0 +1,356 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2020 Marvell. */
+
+#include "otx2_cpt_common.h"
+#include "otx2_cptpf.h"
+#include "rvu_reg.h"
+
+/*
+ * CPT PF driver version, It will be incremented by 1 for every feature
+ * addition in CPT mailbox messages.
+ */
+#define OTX2_CPT_PF_DRV_VERSION 0x1
+
+static int forward_to_af(struct otx2_cptpf_dev *cptpf,
+ struct otx2_cptvf_info *vf,
+ struct mbox_msghdr *req, int size)
+{
+ struct mbox_msghdr *msg;
+ int ret;
+
+ msg = otx2_mbox_alloc_msg(&cptpf->afpf_mbox, 0, size);
+ if (msg == NULL)
+ return -ENOMEM;
+
+ memcpy((uint8_t *)msg + sizeof(struct mbox_msghdr),
+ (uint8_t *)req + sizeof(struct mbox_msghdr), size);
+ msg->id = req->id;
+ msg->pcifunc = req->pcifunc;
+ msg->sig = req->sig;
+ msg->ver = req->ver;
+
+ otx2_mbox_msg_send(&cptpf->afpf_mbox, 0);
+ ret = otx2_mbox_wait_for_rsp(&cptpf->afpf_mbox, 0);
+ if (ret == -EIO) {
+ dev_err(&cptpf->pdev->dev, "RVU MBOX timeout.\n");
+ return ret;
+ } else if (ret) {
+ dev_err(&cptpf->pdev->dev, "RVU MBOX error: %d.\n", ret);
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static int handle_msg_get_caps(struct otx2_cptpf_dev *cptpf,
+ struct otx2_cptvf_info *vf,
+ struct mbox_msghdr *req)
+{
+ struct otx2_cpt_caps_rsp *rsp;
+
+ rsp = (struct otx2_cpt_caps_rsp *)
+ otx2_mbox_alloc_msg(&cptpf->vfpf_mbox, vf->vf_id,
+ sizeof(*rsp));
+ if (!rsp)
+ return -ENOMEM;
+
+ rsp->hdr.id = MBOX_MSG_GET_CAPS;
+ rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
+ rsp->hdr.pcifunc = req->pcifunc;
+ rsp->cpt_pf_drv_version = OTX2_CPT_PF_DRV_VERSION;
+ rsp->cpt_revision = cptpf->pdev->revision;
+ memcpy(&rsp->eng_caps, &cptpf->eng_caps, sizeof(rsp->eng_caps));
+
+ return 0;
+}
+
+static int handle_msg_get_eng_grp_num(struct otx2_cptpf_dev *cptpf,
+ struct otx2_cptvf_info *vf,
+ struct mbox_msghdr *req)
+{
+ struct otx2_cpt_egrp_num_msg *grp_req;
+ struct otx2_cpt_egrp_num_rsp *rsp;
+
+ grp_req = (struct otx2_cpt_egrp_num_msg *)req;
+ rsp = (struct otx2_cpt_egrp_num_rsp *)
+ otx2_mbox_alloc_msg(&cptpf->vfpf_mbox, vf->vf_id, sizeof(*rsp));
+ if (!rsp)
+ return -ENOMEM;
+
+ rsp->hdr.id = MBOX_MSG_GET_ENG_GRP_NUM;
+ rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
+ rsp->hdr.pcifunc = req->pcifunc;
+ rsp->eng_type = grp_req->eng_type;
+ rsp->eng_grp_num = otx2_cpt_get_eng_grp(&cptpf->eng_grps,
+ grp_req->eng_type);
+
+ return 0;
+}
+
+static int handle_msg_kvf_limits(struct otx2_cptpf_dev *cptpf,
+ struct otx2_cptvf_info *vf,
+ struct mbox_msghdr *req)
+{
+ struct otx2_cpt_kvf_limits_rsp *rsp;
+
+ rsp = (struct otx2_cpt_kvf_limits_rsp *)
+ otx2_mbox_alloc_msg(&cptpf->vfpf_mbox, vf->vf_id, sizeof(*rsp));
+ if (!rsp)
+ return -ENOMEM;
+
+ rsp->hdr.id = MBOX_MSG_GET_KVF_LIMITS;
+ rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
+ rsp->hdr.pcifunc = req->pcifunc;
+ rsp->kvf_limits = cptpf->kvf_limits;
+
+ return 0;
+}
+
+static int cptpf_handle_vf_req(struct otx2_cptpf_dev *cptpf,
+ struct otx2_cptvf_info *vf,
+ struct mbox_msghdr *req, int size)
+{
+ int err = 0;
+
+ /* Check if msg is valid, if not reply with an invalid msg */
+ if (req->sig != OTX2_MBOX_REQ_SIG)
+ goto inval_msg;
+
+ switch (req->id) {
+ case MBOX_MSG_GET_ENG_GRP_NUM:
+ err = handle_msg_get_eng_grp_num(cptpf, vf, req);
+ break;
+ case MBOX_MSG_GET_CAPS:
+ err = handle_msg_get_caps(cptpf, vf, req);
+ break;
+ case MBOX_MSG_GET_KVF_LIMITS:
+ err = handle_msg_kvf_limits(cptpf, vf, req);
+ break;
+ default:
+ err = forward_to_af(cptpf, vf, req, size);
+ break;
+ }
+ return err;
+
+inval_msg:
+ otx2_reply_invalid_msg(&cptpf->vfpf_mbox, vf->vf_id, 0, req->id);
+ otx2_mbox_msg_send(&cptpf->vfpf_mbox, vf->vf_id);
+ return err;
+}
+
+irqreturn_t otx2_cptpf_vfpf_mbox_intr(int __always_unused irq, void *arg)
+{
+ struct otx2_cptpf_dev *cptpf = arg;
+ struct otx2_cptvf_info *vf;
+ int i, vf_idx;
+ u64 intr;
+
+ /*
+ * Check which VF has raised an interrupt and schedule
+ * corresponding work queue to process the messages
+ */
+ for (i = 0; i < 2; i++) {
+ /* Read the interrupt bits */
+ intr = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFPF_MBOX_INTX(i));
+
+ for (vf_idx = i * 64; vf_idx < cptpf->enabled_vfs; vf_idx++) {
+ vf = &cptpf->vf[vf_idx];
+ if (intr & (1ULL << vf->intr_idx)) {
+ queue_work(cptpf->vfpf_mbox_wq,
+ &vf->vfpf_mbox_work);
+ /* Clear the interrupt */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM,
+ 0, RVU_PF_VFPF_MBOX_INTX(i),
+ BIT_ULL(vf->intr_idx));
+ }
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+void otx2_cptpf_vfpf_mbox_handler(struct work_struct *work)
+{
+ struct otx2_cptpf_dev *cptpf;
+ struct otx2_cptvf_info *vf;
+ struct otx2_mbox_dev *mdev;
+ struct mbox_hdr *req_hdr;
+ struct mbox_msghdr *msg;
+ struct otx2_mbox *mbox;
+ int offset, i, err;
+
+ vf = container_of(work, struct otx2_cptvf_info, vfpf_mbox_work);
+ cptpf = vf->cptpf;
+ mbox = &cptpf->vfpf_mbox;
+ /* sync with mbox memory region */
+ smp_rmb();
+ mdev = &mbox->dev[vf->vf_id];
+ /* Process received mbox messages */
+ req_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ offset = mbox->rx_start + ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
+
+ for (i = 0; i < req_hdr->num_msgs; i++) {
+ msg = (struct mbox_msghdr *)(mdev->mbase + offset);
+
+ /* Set which VF sent this message based on mbox IRQ */
+ msg->pcifunc = ((u16)cptpf->pf_id << RVU_PFVF_PF_SHIFT) |
+ ((vf->vf_id + 1) & RVU_PFVF_FUNC_MASK);
+
+ err = cptpf_handle_vf_req(cptpf, vf, msg,
+ msg->next_msgoff - offset);
+ /*
+ * Behave as the AF, drop the msg if there is
+ * no memory, timeout handling also goes here
+ */
+ if (err == -ENOMEM || err == -EIO)
+ break;
+ offset = msg->next_msgoff;
+ }
+ /* Send mbox responses to VF */
+ if (mdev->num_msgs)
+ otx2_mbox_msg_send(mbox, vf->vf_id);
+}
+
+irqreturn_t otx2_cptpf_afpf_mbox_intr(int __always_unused irq, void *arg)
+{
+ struct otx2_cptpf_dev *cptpf = arg;
+ u64 intr;
+
+ /* Read the interrupt bits */
+ intr = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT);
+
+ if (intr & 0x1ULL) {
+ /* Schedule work queue function to process the MBOX request */
+ queue_work(cptpf->afpf_mbox_wq, &cptpf->afpf_mbox_work);
+ /* Clear and ack the interrupt */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT,
+ 0x1ULL);
+ }
+ return IRQ_HANDLED;
+}
+
+static void process_afpf_mbox_msg(struct otx2_cptpf_dev *cptpf,
+ struct mbox_msghdr *msg)
+{
+ struct device *dev = &cptpf->pdev->dev;
+ struct cpt_rd_wr_reg_msg *rsp_rd_wr;
+
+ if (msg->id >= MBOX_MSG_MAX) {
+ dev_err(dev, "MBOX msg with unknown ID %d\n", msg->id);
+ return;
+ }
+ if (msg->sig != OTX2_MBOX_RSP_SIG) {
+ dev_err(dev, "MBOX msg with wrong signature %x, ID %d\n",
+ msg->sig, msg->id);
+ return;
+ }
+
+ switch (msg->id) {
+ case MBOX_MSG_READY:
+ cptpf->pf_id = (msg->pcifunc >> RVU_PFVF_PF_SHIFT) &
+ RVU_PFVF_PF_MASK;
+ break;
+ case MBOX_MSG_CPT_RD_WR_REGISTER:
+ rsp_rd_wr = (struct cpt_rd_wr_reg_msg *)msg;
+ if (msg->rc) {
+ dev_err(dev, "Reg %llx rd/wr(%d) failed %d\n",
+ rsp_rd_wr->reg_offset, rsp_rd_wr->is_write,
+ msg->rc);
+ return;
+ }
+ if (!rsp_rd_wr->is_write)
+ *rsp_rd_wr->ret_val = rsp_rd_wr->val;
+ break;
+ case MBOX_MSG_ATTACH_RESOURCES:
+ if (!msg->rc)
+ cptpf->lfs.are_lfs_attached = 1;
+ break;
+ case MBOX_MSG_DETACH_RESOURCES:
+ if (!msg->rc)
+ cptpf->lfs.are_lfs_attached = 0;
+ break;
+
+ default:
+ dev_err(dev,
+ "Unsupported msg %d received.\n", msg->id);
+ break;
+ }
+}
+
+static void forward_to_vf(struct otx2_cptpf_dev *cptpf, struct mbox_msghdr *msg,
+ int vf_id, int size)
+{
+ struct otx2_mbox *vfpf_mbox;
+ struct mbox_msghdr *fwd;
+
+ if (msg->id >= MBOX_MSG_MAX) {
+ dev_err(&cptpf->pdev->dev,
+ "MBOX msg with unknown ID %d\n", msg->id);
+ return;
+ }
+ if (msg->sig != OTX2_MBOX_RSP_SIG) {
+ dev_err(&cptpf->pdev->dev,
+ "MBOX msg with wrong signature %x, ID %d\n",
+ msg->sig, msg->id);
+ return;
+ }
+ vfpf_mbox = &cptpf->vfpf_mbox;
+ vf_id--;
+ if (vf_id >= cptpf->enabled_vfs) {
+ dev_err(&cptpf->pdev->dev,
+ "MBOX msg to unknown VF: %d >= %d\n",
+ vf_id, cptpf->enabled_vfs);
+ return;
+ }
+ if (msg->id == MBOX_MSG_VF_FLR)
+ return;
+
+ fwd = otx2_mbox_alloc_msg(vfpf_mbox, vf_id, size);
+ if (!fwd) {
+ dev_err(&cptpf->pdev->dev,
+ "Forwarding to VF%d failed.\n", vf_id);
+ return;
+ }
+ memcpy((uint8_t *)fwd + sizeof(struct mbox_msghdr),
+ (uint8_t *)msg + sizeof(struct mbox_msghdr), size);
+ fwd->id = msg->id;
+ fwd->pcifunc = msg->pcifunc;
+ fwd->sig = msg->sig;
+ fwd->ver = msg->ver;
+ fwd->rc = msg->rc;
+}
+
+/* Handle mailbox messages received from AF */
+void otx2_cptpf_afpf_mbox_handler(struct work_struct *work)
+{
+ struct otx2_cptpf_dev *cptpf;
+ struct otx2_mbox *afpf_mbox;
+ struct otx2_mbox_dev *mdev;
+ struct mbox_hdr *rsp_hdr;
+ struct mbox_msghdr *msg;
+ int offset, vf_id, i;
+
+ cptpf = container_of(work, struct otx2_cptpf_dev, afpf_mbox_work);
+ afpf_mbox = &cptpf->afpf_mbox;
+ mdev = &afpf_mbox->dev[0];
+ /* Sync mbox data into memory */
+ smp_wmb();
+
+ rsp_hdr = (struct mbox_hdr *)(mdev->mbase + afpf_mbox->rx_start);
+ offset = ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
+
+ for (i = 0; i < rsp_hdr->num_msgs; i++) {
+ msg = (struct mbox_msghdr *)(mdev->mbase + afpf_mbox->rx_start +
+ offset);
+ vf_id = (msg->pcifunc >> RVU_PFVF_FUNC_SHIFT) &
+ RVU_PFVF_FUNC_MASK;
+ if (vf_id > 0)
+ forward_to_vf(cptpf, msg, vf_id,
+ msg->next_msgoff - offset);
+ else
+ process_afpf_mbox_msg(cptpf, msg);
+
+ offset = msg->next_msgoff;
+ mdev->msgs_acked++;
+ }
+ otx2_mbox_reset(afpf_mbox, 0);
+}
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
new file mode 100644
index 000000000000..1dc3ba298139
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
@@ -0,0 +1,1415 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2020 Marvell. */
+
+#include <linux/ctype.h>
+#include <linux/firmware.h>
+#include "otx2_cptpf_ucode.h"
+#include "otx2_cpt_common.h"
+#include "otx2_cptpf.h"
+#include "otx2_cptlf.h"
+#include "otx2_cpt_reqmgr.h"
+#include "rvu_reg.h"
+
+#define CSR_DELAY 30
+
+#define LOADFVC_RLEN 8
+#define LOADFVC_MAJOR_OP 0x01
+#define LOADFVC_MINOR_OP 0x08
+
+struct fw_info_t {
+ struct list_head ucodes;
+};
+
+static struct otx2_cpt_bitmap get_cores_bmap(struct device *dev,
+ struct otx2_cpt_eng_grp_info *eng_grp)
+{
+ struct otx2_cpt_bitmap bmap = { {0} };
+ bool found = false;
+ int i;
+
+ if (eng_grp->g->engs_num > OTX2_CPT_MAX_ENGINES) {
+ dev_err(dev, "unsupported number of engines %d on octeontx2\n",
+ eng_grp->g->engs_num);
+ return bmap;
+ }
+
+ for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
+ if (eng_grp->engs[i].type) {
+ bitmap_or(bmap.bits, bmap.bits,
+ eng_grp->engs[i].bmap,
+ eng_grp->g->engs_num);
+ bmap.size = eng_grp->g->engs_num;
+ found = true;
+ }
+ }
+
+ if (!found)
+ dev_err(dev, "No engines reserved for engine group %d\n",
+ eng_grp->idx);
+ return bmap;
+}
+
+static int is_eng_type(int val, int eng_type)
+{
+ return val & (1 << eng_type);
+}
+
+static int is_2nd_ucode_used(struct otx2_cpt_eng_grp_info *eng_grp)
+{
+ if (eng_grp->ucode[1].type)
+ return true;
+ else
+ return false;
+}
+
+static void set_ucode_filename(struct otx2_cpt_ucode *ucode,
+ const char *filename)
+{
+ strlcpy(ucode->filename, filename, OTX2_CPT_NAME_LENGTH);
+}
+
+static char *get_eng_type_str(int eng_type)
+{
+ char *str = "unknown";
+
+ switch (eng_type) {
+ case OTX2_CPT_SE_TYPES:
+ str = "SE";
+ break;
+
+ case OTX2_CPT_IE_TYPES:
+ str = "IE";
+ break;
+
+ case OTX2_CPT_AE_TYPES:
+ str = "AE";
+ break;
+ }
+ return str;
+}
+
+static char *get_ucode_type_str(int ucode_type)
+{
+ char *str = "unknown";
+
+ switch (ucode_type) {
+ case (1 << OTX2_CPT_SE_TYPES):
+ str = "SE";
+ break;
+
+ case (1 << OTX2_CPT_IE_TYPES):
+ str = "IE";
+ break;
+
+ case (1 << OTX2_CPT_AE_TYPES):
+ str = "AE";
+ break;
+
+ case (1 << OTX2_CPT_SE_TYPES | 1 << OTX2_CPT_IE_TYPES):
+ str = "SE+IPSEC";
+ break;
+ }
+ return str;
+}
+
+static int get_ucode_type(struct device *dev,
+ struct otx2_cpt_ucode_hdr *ucode_hdr,
+ int *ucode_type)
+{
+ struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
+ char ver_str_prefix[OTX2_CPT_UCODE_VER_STR_SZ];
+ char tmp_ver_str[OTX2_CPT_UCODE_VER_STR_SZ];
+ struct pci_dev *pdev = cptpf->pdev;
+ int i, val = 0;
+ u8 nn;
+
+ strlcpy(tmp_ver_str, ucode_hdr->ver_str, OTX2_CPT_UCODE_VER_STR_SZ);
+ for (i = 0; i < strlen(tmp_ver_str); i++)
+ tmp_ver_str[i] = tolower(tmp_ver_str[i]);
+
+ sprintf(ver_str_prefix, "ocpt-%02d", pdev->revision);
+ if (!strnstr(tmp_ver_str, ver_str_prefix, OTX2_CPT_UCODE_VER_STR_SZ))
+ return -EINVAL;
+
+ nn = ucode_hdr->ver_num.nn;
+ if (strnstr(tmp_ver_str, "se-", OTX2_CPT_UCODE_VER_STR_SZ) &&
+ (nn == OTX2_CPT_SE_UC_TYPE1 || nn == OTX2_CPT_SE_UC_TYPE2 ||
+ nn == OTX2_CPT_SE_UC_TYPE3))
+ val |= 1 << OTX2_CPT_SE_TYPES;
+ if (strnstr(tmp_ver_str, "ie-", OTX2_CPT_UCODE_VER_STR_SZ) &&
+ (nn == OTX2_CPT_IE_UC_TYPE1 || nn == OTX2_CPT_IE_UC_TYPE2 ||
+ nn == OTX2_CPT_IE_UC_TYPE3))
+ val |= 1 << OTX2_CPT_IE_TYPES;
+ if (strnstr(tmp_ver_str, "ae", OTX2_CPT_UCODE_VER_STR_SZ) &&
+ nn == OTX2_CPT_AE_UC_TYPE)
+ val |= 1 << OTX2_CPT_AE_TYPES;
+
+ *ucode_type = val;
+
+ if (!val)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int __write_ucode_base(struct otx2_cptpf_dev *cptpf, int eng,
+ dma_addr_t dma_addr)
+{
+ return otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
+ CPT_AF_EXEX_UCODE_BASE(eng),
+ (u64)dma_addr);
+}
+
+static int cpt_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp, void *obj)
+{
+ struct otx2_cptpf_dev *cptpf = obj;
+ struct otx2_cpt_engs_rsvd *engs;
+ dma_addr_t dma_addr;
+ int i, bit, ret;
+
+ /* Set PF number for microcode fetches */
+ ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
+ CPT_AF_PF_FUNC,
+ cptpf->pf_id << RVU_PFVF_PF_SHIFT);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
+ engs = &eng_grp->engs[i];
+ if (!engs->type)
+ continue;
+
+ dma_addr = engs->ucode->dma;
+
+ /*
+ * Set UCODE_BASE only for the cores which are not used,
+ * other cores should have already valid UCODE_BASE set
+ */
+ for_each_set_bit(bit, engs->bmap, eng_grp->g->engs_num)
+ if (!eng_grp->g->eng_ref_cnt[bit]) {
+ ret = __write_ucode_base(cptpf, bit, dma_addr);
+ if (ret)
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static int cpt_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
+ void *obj)
+{
+ struct otx2_cptpf_dev *cptpf = obj;
+ struct otx2_cpt_bitmap bmap;
+ int i, timeout = 10;
+ int busy, ret;
+ u64 reg = 0;
+
+ bmap = get_cores_bmap(&cptpf->pdev->dev, eng_grp);
+ if (!bmap.size)
+ return -EINVAL;
+
+ /* Detach the cores from group */
+ for_each_set_bit(i, bmap.bits, bmap.size) {
+ ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
+ CPT_AF_EXEX_CTL2(i), &reg);
+ if (ret)
+ return ret;
+
+ if (reg & (1ull << eng_grp->idx)) {
+ eng_grp->g->eng_ref_cnt[i]--;
+ reg &= ~(1ull << eng_grp->idx);
+
+ ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,
+ cptpf->pdev,
+ CPT_AF_EXEX_CTL2(i), reg);
+ if (ret)
+ return ret;
+ }
+ }
+
+ /* Wait for cores to become idle */
+ do {
+ busy = 0;
+ usleep_range(10000, 20000);
+ if (timeout-- < 0)
+ return -EBUSY;
+
+ for_each_set_bit(i, bmap.bits, bmap.size) {
+ ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox,
+ cptpf->pdev,
+ CPT_AF_EXEX_STS(i), &reg);
+ if (ret)
+ return ret;
+
+ if (reg & 0x1) {
+ busy = 1;
+ break;
+ }
+ }
+ } while (busy);
+
+ /* Disable the cores only if they are not used anymore */
+ for_each_set_bit(i, bmap.bits, bmap.size) {
+ if (!eng_grp->g->eng_ref_cnt[i]) {
+ ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,
+ cptpf->pdev,
+ CPT_AF_EXEX_CTL(i), 0x0);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int cpt_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
+ void *obj)
+{
+ struct otx2_cptpf_dev *cptpf = obj;
+ struct otx2_cpt_bitmap bmap;
+ u64 reg = 0;
+ int i, ret;
+
+ bmap = get_cores_bmap(&cptpf->pdev->dev, eng_grp);
+ if (!bmap.size)
+ return -EINVAL;
+
+ /* Attach the cores to the group */
+ for_each_set_bit(i, bmap.bits, bmap.size) {
+ ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
+ CPT_AF_EXEX_CTL2(i), &reg);
+ if (ret)
+ return ret;
+
+ if (!(reg & (1ull << eng_grp->idx))) {
+ eng_grp->g->eng_ref_cnt[i]++;
+ reg |= 1ull << eng_grp->idx;
+
+ ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,
+ cptpf->pdev,
+ CPT_AF_EXEX_CTL2(i), reg);
+ if (ret)
+ return ret;
+ }
+ }
+
+ /* Enable the cores */
+ for_each_set_bit(i, bmap.bits, bmap.size) {
+ ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox,
+ cptpf->pdev,
+ CPT_AF_EXEX_CTL(i), 0x1);
+ if (ret)
+ return ret;
+ }
+ ret = otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);
+
+ return ret;
+}
+
+static int load_fw(struct device *dev, struct fw_info_t *fw_info,
+ char *filename)
+{
+ struct otx2_cpt_ucode_hdr *ucode_hdr;
+ struct otx2_cpt_uc_info_t *uc_info;
+ int ucode_type, ucode_size;
+ int ret;
+
+ uc_info = kzalloc(sizeof(*uc_info), GFP_KERNEL);
+ if (!uc_info)
+ return -ENOMEM;
+
+ ret = request_firmware(&uc_info->fw, filename, dev);
+ if (ret)
+ goto free_uc_info;
+
+ ucode_hdr = (struct otx2_cpt_ucode_hdr *)uc_info->fw->data;
+ ret = get_ucode_type(dev, ucode_hdr, &ucode_type);
+ if (ret)
+ goto release_fw;
+
+ ucode_size = ntohl(ucode_hdr->code_length) * 2;
+ if (!ucode_size) {
+ dev_err(dev, "Ucode %s invalid size\n", filename);
+ ret = -EINVAL;
+ goto release_fw;
+ }
+
+ set_ucode_filename(&uc_info->ucode, filename);
+ memcpy(uc_info->ucode.ver_str, ucode_hdr->ver_str,
+ OTX2_CPT_UCODE_VER_STR_SZ);
+ uc_info->ucode.ver_num = ucode_hdr->ver_num;
+ uc_info->ucode.type = ucode_type;
+ uc_info->ucode.size = ucode_size;
+ list_add_tail(&uc_info->list, &fw_info->ucodes);
+
+ return 0;
+
+release_fw:
+ release_firmware(uc_info->fw);
+free_uc_info:
+ kfree(uc_info);
+ return ret;
+}
+
+static void cpt_ucode_release_fw(struct fw_info_t *fw_info)
+{
+ struct otx2_cpt_uc_info_t *curr, *temp;
+
+ if (!fw_info)
+ return;
+
+ list_for_each_entry_safe(curr, temp, &fw_info->ucodes, list) {
+ list_del(&curr->list);
+ release_firmware(curr->fw);
+ kfree(curr);
+ }
+}
+
+static struct otx2_cpt_uc_info_t *get_ucode(struct fw_info_t *fw_info,
+ int ucode_type)
+{
+ struct otx2_cpt_uc_info_t *curr;
+
+ list_for_each_entry(curr, &fw_info->ucodes, list) {
+ if (!is_eng_type(curr->ucode.type, ucode_type))
+ continue;
+
+ return curr;
+ }
+ return NULL;
+}
+
+static void print_uc_info(struct fw_info_t *fw_info)
+{
+ struct otx2_cpt_uc_info_t *curr;
+
+ list_for_each_entry(curr, &fw_info->ucodes, list) {
+ pr_debug("Ucode filename %s\n", curr->ucode.filename);
+ pr_debug("Ucode version string %s\n", curr->ucode.ver_str);
+ pr_debug("Ucode version %d.%d.%d.%d\n",
+ curr->ucode.ver_num.nn, curr->ucode.ver_num.xx,
+ curr->ucode.ver_num.yy, curr->ucode.ver_num.zz);
+ pr_debug("Ucode type (%d) %s\n", curr->ucode.type,
+ get_ucode_type_str(curr->ucode.type));
+ pr_debug("Ucode size %d\n", curr->ucode.size);
+ pr_debug("Ucode ptr %p\n", curr->fw->data);
+ }
+}
+
+static int cpt_ucode_load_fw(struct pci_dev *pdev, struct fw_info_t *fw_info)
+{
+ char filename[OTX2_CPT_NAME_LENGTH];
+ char eng_type[8] = {0};
+ int ret, e, i;
+
+ INIT_LIST_HEAD(&fw_info->ucodes);
+
+ for (e = 1; e < OTX2_CPT_MAX_ENG_TYPES; e++) {
+ strcpy(eng_type, get_eng_type_str(e));
+ for (i = 0; i < strlen(eng_type); i++)
+ eng_type[i] = tolower(eng_type[i]);
+
+ snprintf(filename, sizeof(filename), "mrvl/cpt%02d/%s.out",
+ pdev->revision, eng_type);
+ /* Request firmware for each engine type */
+ ret = load_fw(&pdev->dev, fw_info, filename);
+ if (ret)
+ goto release_fw;
+ }
+ print_uc_info(fw_info);
+ return 0;
+
+release_fw:
+ cpt_ucode_release_fw(fw_info);
+ return ret;
+}
+
+static struct otx2_cpt_engs_rsvd *find_engines_by_type(
+ struct otx2_cpt_eng_grp_info *eng_grp,
+ int eng_type)
+{
+ int i;
+
+ for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
+ if (!eng_grp->engs[i].type)
+ continue;
+
+ if (eng_grp->engs[i].type == eng_type)
+ return &eng_grp->engs[i];
+ }
+ return NULL;
+}
+
+static int eng_grp_has_eng_type(struct otx2_cpt_eng_grp_info *eng_grp,
+ int eng_type)
+{
+ struct otx2_cpt_engs_rsvd *engs;
+
+ engs = find_engines_by_type(eng_grp, eng_type);
+
+ return (engs != NULL ? 1 : 0);
+}
+
+static int update_engines_avail_count(struct device *dev,
+ struct otx2_cpt_engs_available *avail,
+ struct otx2_cpt_engs_rsvd *engs, int val)
+{
+ switch (engs->type) {
+ case OTX2_CPT_SE_TYPES:
+ avail->se_cnt += val;
+ break;
+
+ case OTX2_CPT_IE_TYPES:
+ avail->ie_cnt += val;
+ break;
+
+ case OTX2_CPT_AE_TYPES:
+ avail->ae_cnt += val;
+ break;
+
+ default:
+ dev_err(dev, "Invalid engine type %d\n", engs->type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int update_engines_offset(struct device *dev,
+ struct otx2_cpt_engs_available *avail,
+ struct otx2_cpt_engs_rsvd *engs)
+{
+ switch (engs->type) {
+ case OTX2_CPT_SE_TYPES:
+ engs->offset = 0;
+ break;
+
+ case OTX2_CPT_IE_TYPES:
+ engs->offset = avail->max_se_cnt;
+ break;
+
+ case OTX2_CPT_AE_TYPES:
+ engs->offset = avail->max_se_cnt + avail->max_ie_cnt;
+ break;
+
+ default:
+ dev_err(dev, "Invalid engine type %d\n", engs->type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int release_engines(struct device *dev,
+ struct otx2_cpt_eng_grp_info *grp)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
+ if (!grp->engs[i].type)
+ continue;
+
+ if (grp->engs[i].count > 0) {
+ ret = update_engines_avail_count(dev, &grp->g->avail,
+ &grp->engs[i],
+ grp->engs[i].count);
+ if (ret)
+ return ret;
+ }
+
+ grp->engs[i].type = 0;
+ grp->engs[i].count = 0;
+ grp->engs[i].offset = 0;
+ grp->engs[i].ucode = NULL;
+ bitmap_zero(grp->engs[i].bmap, grp->g->engs_num);
+ }
+ return 0;
+}
+
+static int do_reserve_engines(struct device *dev,
+ struct otx2_cpt_eng_grp_info *grp,
+ struct otx2_cpt_engines *req_engs)
+{
+ struct otx2_cpt_engs_rsvd *engs = NULL;
+ int i, ret;
+
+ for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
+ if (!grp->engs[i].type) {
+ engs = &grp->engs[i];
+ break;
+ }
+ }
+
+ if (!engs)
+ return -ENOMEM;
+
+ engs->type = req_engs->type;
+ engs->count = req_engs->count;
+
+ ret = update_engines_offset(dev, &grp->g->avail, engs);
+ if (ret)
+ return ret;
+
+ if (engs->count > 0) {
+ ret = update_engines_avail_count(dev, &grp->g->avail, engs,
+ -engs->count);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int check_engines_availability(struct device *dev,
+ struct otx2_cpt_eng_grp_info *grp,
+ struct otx2_cpt_engines *req_eng)
+{
+ int avail_cnt = 0;
+
+ switch (req_eng->type) {
+ case OTX2_CPT_SE_TYPES:
+ avail_cnt = grp->g->avail.se_cnt;
+ break;
+
+ case OTX2_CPT_IE_TYPES:
+ avail_cnt = grp->g->avail.ie_cnt;
+ break;
+
+ case OTX2_CPT_AE_TYPES:
+ avail_cnt = grp->g->avail.ae_cnt;
+ break;
+
+ default:
+ dev_err(dev, "Invalid engine type %d\n", req_eng->type);
+ return -EINVAL;
+ }
+
+ if (avail_cnt < req_eng->count) {
+ dev_err(dev,
+ "Error available %s engines %d < than requested %d\n",
+ get_eng_type_str(req_eng->type),
+ avail_cnt, req_eng->count);
+ return -EBUSY;
+ }
+ return 0;
+}
+
+static int reserve_engines(struct device *dev,
+ struct otx2_cpt_eng_grp_info *grp,
+ struct otx2_cpt_engines *req_engs, int ucodes_cnt)
+{
+ int i, ret = 0;
+
+ /* Validate if a number of requested engines are available */
+ for (i = 0; i < ucodes_cnt; i++) {
+ ret = check_engines_availability(dev, grp, &req_engs[i]);
+ if (ret)
+ return ret;
+ }
+
+ /* Reserve requested engines for this engine group */
+ for (i = 0; i < ucodes_cnt; i++) {
+ ret = do_reserve_engines(dev, grp, &req_engs[i]);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static void ucode_unload(struct device *dev, struct otx2_cpt_ucode *ucode)
+{
+ if (ucode->va) {
+ dma_free_coherent(dev, ucode->size, ucode->va, ucode->dma);
+ ucode->va = NULL;
+ ucode->dma = 0;
+ ucode->size = 0;
+ }
+
+ memset(&ucode->ver_str, 0, OTX2_CPT_UCODE_VER_STR_SZ);
+ memset(&ucode->ver_num, 0, sizeof(struct otx2_cpt_ucode_ver_num));
+ set_ucode_filename(ucode, "");
+ ucode->type = 0;
+}
+
+static int copy_ucode_to_dma_mem(struct device *dev,
+ struct otx2_cpt_ucode *ucode,
+ const u8 *ucode_data)
+{
+ u32 i;
+
+ /* Allocate DMAable space */
+ ucode->va = dma_alloc_coherent(dev, ucode->size, &ucode->dma,
+ GFP_KERNEL);
+ if (!ucode->va)
+ return -ENOMEM;
+
+ memcpy(ucode->va, ucode_data + sizeof(struct otx2_cpt_ucode_hdr),
+ ucode->size);
+
+ /* Byte swap 64-bit */
+ for (i = 0; i < (ucode->size / 8); i++)
+ cpu_to_be64s(&((u64 *)ucode->va)[i]);
+ /* Ucode needs 16-bit swap */
+ for (i = 0; i < (ucode->size / 2); i++)
+ cpu_to_be16s(&((u16 *)ucode->va)[i]);
+ return 0;
+}
+
+static int enable_eng_grp(struct otx2_cpt_eng_grp_info *eng_grp,
+ void *obj)
+{
+ int ret;
+
+ /* Point microcode to each core of the group */
+ ret = cpt_set_ucode_base(eng_grp, obj);
+ if (ret)
+ return ret;
+
+ /* Attach the cores to the group and enable them */
+ ret = cpt_attach_and_enable_cores(eng_grp, obj);
+
+ return ret;
+}
+
+static int disable_eng_grp(struct device *dev,
+ struct otx2_cpt_eng_grp_info *eng_grp,
+ void *obj)
+{
+ int i, ret;
+
+ /* Disable all engines used by this group */
+ ret = cpt_detach_and_disable_cores(eng_grp, obj);
+ if (ret)
+ return ret;
+
+ /* Unload ucode used by this engine group */
+ ucode_unload(dev, &eng_grp->ucode[0]);
+ ucode_unload(dev, &eng_grp->ucode[1]);
+
+ for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
+ if (!eng_grp->engs[i].type)
+ continue;
+
+ eng_grp->engs[i].ucode = &eng_grp->ucode[0];
+ }
+
+ /* Clear UCODE_BASE register for each engine used by this group */
+ ret = cpt_set_ucode_base(eng_grp, obj);
+
+ return ret;
+}
+
+static void setup_eng_grp_mirroring(struct otx2_cpt_eng_grp_info *dst_grp,
+ struct otx2_cpt_eng_grp_info *src_grp)
+{
+ /* Setup fields for engine group which is mirrored */
+ src_grp->mirror.is_ena = false;
+ src_grp->mirror.idx = 0;
+ src_grp->mirror.ref_count++;
+
+ /* Setup fields for mirroring engine group */
+ dst_grp->mirror.is_ena = true;
+ dst_grp->mirror.idx = src_grp->idx;
+ dst_grp->mirror.ref_count = 0;
+}
+
+static void remove_eng_grp_mirroring(struct otx2_cpt_eng_grp_info *dst_grp)
+{
+ struct otx2_cpt_eng_grp_info *src_grp;
+
+ if (!dst_grp->mirror.is_ena)
+ return;
+
+ src_grp = &dst_grp->g->grp[dst_grp->mirror.idx];
+
+ src_grp->mirror.ref_count--;
+ dst_grp->mirror.is_ena = false;
+ dst_grp->mirror.idx = 0;
+ dst_grp->mirror.ref_count = 0;
+}
+
+static void update_requested_engs(struct otx2_cpt_eng_grp_info *mirror_eng_grp,
+ struct otx2_cpt_engines *engs, int engs_cnt)
+{
+ struct otx2_cpt_engs_rsvd *mirrored_engs;
+ int i;
+
+ for (i = 0; i < engs_cnt; i++) {
+ mirrored_engs = find_engines_by_type(mirror_eng_grp,
+ engs[i].type);
+ if (!mirrored_engs)
+ continue;
+
+ /*
+ * If mirrored group has this type of engines attached then
+ * there are 3 scenarios possible:
+ * 1) mirrored_engs.count == engs[i].count then all engines
+ * from mirrored engine group will be shared with this engine
+ * group
+ * 2) mirrored_engs.count > engs[i].count then only a subset of
+ * engines from mirrored engine group will be shared with this
+ * engine group
+ * 3) mirrored_engs.count < engs[i].count then all engines
+ * from mirrored engine group will be shared with this group
+ * and additional engines will be reserved for exclusively use
+ * by this engine group
+ */
+ engs[i].count -= mirrored_engs->count;
+ }
+}
+
+static struct otx2_cpt_eng_grp_info *find_mirrored_eng_grp(
+ struct otx2_cpt_eng_grp_info *grp)
+{
+ struct otx2_cpt_eng_grps *eng_grps = grp->g;
+ int i;
+
+ for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
+ if (!eng_grps->grp[i].is_enabled)
+ continue;
+ if (eng_grps->grp[i].ucode[0].type &&
+ eng_grps->grp[i].ucode[1].type)
+ continue;
+ if (grp->idx == i)
+ continue;
+ if (!strncasecmp(eng_grps->grp[i].ucode[0].ver_str,
+ grp->ucode[0].ver_str,
+ OTX2_CPT_UCODE_VER_STR_SZ))
+ return &eng_grps->grp[i];
+ }
+
+ return NULL;
+}
+
+static struct otx2_cpt_eng_grp_info *find_unused_eng_grp(
+ struct otx2_cpt_eng_grps *eng_grps)
+{
+ int i;
+
+ for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
+ if (!eng_grps->grp[i].is_enabled)
+ return &eng_grps->grp[i];
+ }
+ return NULL;
+}
+
+static int eng_grp_update_masks(struct device *dev,
+ struct otx2_cpt_eng_grp_info *eng_grp)
+{
+ struct otx2_cpt_engs_rsvd *engs, *mirrored_engs;
+ struct otx2_cpt_bitmap tmp_bmap = { {0} };
+ int i, j, cnt, max_cnt;
+ int bit;
+
+ for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
+ engs = &eng_grp->engs[i];
+ if (!engs->type)
+ continue;
+ if (engs->count <= 0)
+ continue;
+
+ switch (engs->type) {
+ case OTX2_CPT_SE_TYPES:
+ max_cnt = eng_grp->g->avail.max_se_cnt;
+ break;
+
+ case OTX2_CPT_IE_TYPES:
+ max_cnt = eng_grp->g->avail.max_ie_cnt;
+ break;
+
+ case OTX2_CPT_AE_TYPES:
+ max_cnt = eng_grp->g->avail.max_ae_cnt;
+ break;
+
+ default:
+ dev_err(dev, "Invalid engine type %d\n", engs->type);
+ return -EINVAL;
+ }
+
+ cnt = engs->count;
+ WARN_ON(engs->offset + max_cnt > OTX2_CPT_MAX_ENGINES);
+ bitmap_zero(tmp_bmap.bits, eng_grp->g->engs_num);
+ for (j = engs->offset; j < engs->offset + max_cnt; j++) {
+ if (!eng_grp->g->eng_ref_cnt[j]) {
+ bitmap_set(tmp_bmap.bits, j, 1);
+ cnt--;
+ if (!cnt)
+ break;
+ }
+ }
+
+ if (cnt)
+ return -ENOSPC;
+
+ bitmap_copy(engs->bmap, tmp_bmap.bits, eng_grp->g->engs_num);
+ }
+
+ if (!eng_grp->mirror.is_ena)
+ return 0;
+
+ for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
+ engs = &eng_grp->engs[i];
+ if (!engs->type)
+ continue;
+
+ mirrored_engs = find_engines_by_type(
+ &eng_grp->g->grp[eng_grp->mirror.idx],
+ engs->type);
+ WARN_ON(!mirrored_engs && engs->count <= 0);
+ if (!mirrored_engs)
+ continue;
+
+ bitmap_copy(tmp_bmap.bits, mirrored_engs->bmap,
+ eng_grp->g->engs_num);
+ if (engs->count < 0) {
+ bit = find_first_bit(mirrored_engs->bmap,
+ eng_grp->g->engs_num);
+ bitmap_clear(tmp_bmap.bits, bit, -engs->count);
+ }
+ bitmap_or(engs->bmap, engs->bmap, tmp_bmap.bits,
+ eng_grp->g->engs_num);
+ }
+ return 0;
+}
+
+static int delete_engine_group(struct device *dev,
+ struct otx2_cpt_eng_grp_info *eng_grp)
+{
+ int ret;
+
+ if (!eng_grp->is_enabled)
+ return 0;
+
+ if (eng_grp->mirror.ref_count)
+ return -EINVAL;
+
+ /* Removing engine group mirroring if enabled */
+ remove_eng_grp_mirroring(eng_grp);
+
+ /* Disable engine group */
+ ret = disable_eng_grp(dev, eng_grp, eng_grp->g->obj);
+ if (ret)
+ return ret;
+
+ /* Release all engines held by this engine group */
+ ret = release_engines(dev, eng_grp);
+ if (ret)
+ return ret;
+
+ eng_grp->is_enabled = false;
+
+ return 0;
+}
+
+static void update_ucode_ptrs(struct otx2_cpt_eng_grp_info *eng_grp)
+{
+ struct otx2_cpt_ucode *ucode;
+
+ if (eng_grp->mirror.is_ena)
+ ucode = &eng_grp->g->grp[eng_grp->mirror.idx].ucode[0];
+ else
+ ucode = &eng_grp->ucode[0];
+ WARN_ON(!eng_grp->engs[0].type);
+ eng_grp->engs[0].ucode = ucode;
+
+ if (eng_grp->engs[1].type) {
+ if (is_2nd_ucode_used(eng_grp))
+ eng_grp->engs[1].ucode = &eng_grp->ucode[1];
+ else
+ eng_grp->engs[1].ucode = ucode;
+ }
+}
+
+static int create_engine_group(struct device *dev,
+ struct otx2_cpt_eng_grps *eng_grps,
+ struct otx2_cpt_engines *engs, int ucodes_cnt,
+ void *ucode_data[], int is_print)
+{
+ struct otx2_cpt_eng_grp_info *mirrored_eng_grp;
+ struct otx2_cpt_eng_grp_info *eng_grp;
+ struct otx2_cpt_uc_info_t *uc_info;
+ int i, ret = 0;
+
+ /* Find engine group which is not used */
+ eng_grp = find_unused_eng_grp(eng_grps);
+ if (!eng_grp) {
+ dev_err(dev, "Error all engine groups are being used\n");
+ return -ENOSPC;
+ }
+ /* Load ucode */
+ for (i = 0; i < ucodes_cnt; i++) {
+ uc_info = (struct otx2_cpt_uc_info_t *) ucode_data[i];
+ eng_grp->ucode[i] = uc_info->ucode;
+ ret = copy_ucode_to_dma_mem(dev, &eng_grp->ucode[i],
+ uc_info->fw->data);
+ if (ret)
+ goto unload_ucode;
+ }
+
+ /* Check if this group mirrors another existing engine group */
+ mirrored_eng_grp = find_mirrored_eng_grp(eng_grp);
+ if (mirrored_eng_grp) {
+ /* Setup mirroring */
+ setup_eng_grp_mirroring(eng_grp, mirrored_eng_grp);
+
+ /*
+ * Update count of requested engines because some
+ * of them might be shared with mirrored group
+ */
+ update_requested_engs(mirrored_eng_grp, engs, ucodes_cnt);
+ }
+ ret = reserve_engines(dev, eng_grp, engs, ucodes_cnt);
+ if (ret)
+ goto unload_ucode;
+
+ /* Update ucode pointers used by engines */
+ update_ucode_ptrs(eng_grp);
+
+ /* Update engine masks used by this group */
+ ret = eng_grp_update_masks(dev, eng_grp);
+ if (ret)
+ goto release_engs;
+
+ /* Enable engine group */
+ ret = enable_eng_grp(eng_grp, eng_grps->obj);
+ if (ret)
+ goto release_engs;
+
+ /*
+ * If this engine group mirrors another engine group
+ * then we need to unload ucode as we will use ucode
+ * from mirrored engine group
+ */
+ if (eng_grp->mirror.is_ena)
+ ucode_unload(dev, &eng_grp->ucode[0]);
+
+ eng_grp->is_enabled = true;
+
+ if (!is_print)
+ return 0;
+
+ if (mirrored_eng_grp)
+ dev_info(dev,
+ "Engine_group%d: reuse microcode %s from group %d\n",
+ eng_grp->idx, mirrored_eng_grp->ucode[0].ver_str,
+ mirrored_eng_grp->idx);
+ else
+ dev_info(dev, "Engine_group%d: microcode loaded %s\n",
+ eng_grp->idx, eng_grp->ucode[0].ver_str);
+ if (is_2nd_ucode_used(eng_grp))
+ dev_info(dev, "Engine_group%d: microcode loaded %s\n",
+ eng_grp->idx, eng_grp->ucode[1].ver_str);
+
+ return 0;
+
+release_engs:
+ release_engines(dev, eng_grp);
+unload_ucode:
+ ucode_unload(dev, &eng_grp->ucode[0]);
+ ucode_unload(dev, &eng_grp->ucode[1]);
+ return ret;
+}
+
+static void delete_engine_grps(struct pci_dev *pdev,
+ struct otx2_cpt_eng_grps *eng_grps)
+{
+ int i;
+
+ /* First delete all mirroring engine groups */
+ for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++)
+ if (eng_grps->grp[i].mirror.is_ena)
+ delete_engine_group(&pdev->dev, &eng_grps->grp[i]);
+
+ /* Delete remaining engine groups */
+ for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++)
+ delete_engine_group(&pdev->dev, &eng_grps->grp[i]);
+}
+
+int otx2_cpt_get_eng_grp(struct otx2_cpt_eng_grps *eng_grps, int eng_type)
+{
+
+ int eng_grp_num = OTX2_CPT_INVALID_CRYPTO_ENG_GRP;
+ struct otx2_cpt_eng_grp_info *grp;
+ int i;
+
+ for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
+ grp = &eng_grps->grp[i];
+ if (!grp->is_enabled)
+ continue;
+
+ if (eng_type == OTX2_CPT_SE_TYPES) {
+ if (eng_grp_has_eng_type(grp, eng_type) &&
+ !eng_grp_has_eng_type(grp, OTX2_CPT_IE_TYPES)) {
+ eng_grp_num = i;
+ break;
+ }
+ } else {
+ if (eng_grp_has_eng_type(grp, eng_type)) {
+ eng_grp_num = i;
+ break;
+ }
+ }
+ }
+ return eng_grp_num;
+}
+
+int otx2_cpt_create_eng_grps(struct pci_dev *pdev,
+ struct otx2_cpt_eng_grps *eng_grps)
+{
+ struct otx2_cpt_uc_info_t *uc_info[OTX2_CPT_MAX_ETYPES_PER_GRP] = { };
+ struct otx2_cpt_engines engs[OTX2_CPT_MAX_ETYPES_PER_GRP] = { {0} };
+ struct fw_info_t fw_info;
+ int ret;
+
+ /*
+ * We don't create engine groups if it was already
+ * made (when user enabled VFs for the first time)
+ */
+ if (eng_grps->is_grps_created)
+ return 0;
+
+ ret = cpt_ucode_load_fw(pdev, &fw_info);
+ if (ret)
+ return ret;
+
+ /*
+ * Create engine group with SE engines for kernel
+ * crypto functionality (symmetric crypto)
+ */
+ uc_info[0] = get_ucode(&fw_info, OTX2_CPT_SE_TYPES);
+ if (uc_info[0] == NULL) {
+ dev_err(&pdev->dev, "Unable to find firmware for SE\n");
+ ret = -EINVAL;
+ goto release_fw;
+ }
+ engs[0].type = OTX2_CPT_SE_TYPES;
+ engs[0].count = eng_grps->avail.max_se_cnt;
+
+ ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
+ (void **) uc_info, 1);
+ if (ret)
+ goto release_fw;
+
+ /*
+ * Create engine group with SE+IE engines for IPSec.
+ * All SE engines will be shared with engine group 0.
+ */
+ uc_info[0] = get_ucode(&fw_info, OTX2_CPT_SE_TYPES);
+ uc_info[1] = get_ucode(&fw_info, OTX2_CPT_IE_TYPES);
+
+ if (uc_info[1] == NULL) {
+ dev_err(&pdev->dev, "Unable to find firmware for IE");
+ ret = -EINVAL;
+ goto delete_eng_grp;
+ }
+ engs[0].type = OTX2_CPT_SE_TYPES;
+ engs[0].count = eng_grps->avail.max_se_cnt;
+ engs[1].type = OTX2_CPT_IE_TYPES;
+ engs[1].count = eng_grps->avail.max_ie_cnt;
+
+ ret = create_engine_group(&pdev->dev, eng_grps, engs, 2,
+ (void **) uc_info, 1);
+ if (ret)
+ goto delete_eng_grp;
+
+ /*
+ * Create engine group with AE engines for asymmetric
+ * crypto functionality.
+ */
+ uc_info[0] = get_ucode(&fw_info, OTX2_CPT_AE_TYPES);
+ if (uc_info[0] == NULL) {
+ dev_err(&pdev->dev, "Unable to find firmware for AE");
+ ret = -EINVAL;
+ goto delete_eng_grp;
+ }
+ engs[0].type = OTX2_CPT_AE_TYPES;
+ engs[0].count = eng_grps->avail.max_ae_cnt;
+
+ ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
+ (void **) uc_info, 1);
+ if (ret)
+ goto delete_eng_grp;
+
+ eng_grps->is_grps_created = true;
+
+ cpt_ucode_release_fw(&fw_info);
+ return 0;
+
+delete_eng_grp:
+ delete_engine_grps(pdev, eng_grps);
+release_fw:
+ cpt_ucode_release_fw(&fw_info);
+ return ret;
+}
+
+int otx2_cpt_disable_all_cores(struct otx2_cptpf_dev *cptpf)
+{
+ int i, ret, busy, total_cores;
+ int timeout = 10;
+ u64 reg = 0;
+
+ total_cores = cptpf->eng_grps.avail.max_se_cnt +
+ cptpf->eng_grps.avail.max_ie_cnt +
+ cptpf->eng_grps.avail.max_ae_cnt;
+
+ /* Disengage the cores from groups */
+ for (i = 0; i < total_cores; i++) {
+ ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
+ CPT_AF_EXEX_CTL2(i), 0x0);
+ if (ret)
+ return ret;
+
+ cptpf->eng_grps.eng_ref_cnt[i] = 0;
+ }
+ ret = otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);
+ if (ret)
+ return ret;
+
+ /* Wait for cores to become idle */
+ do {
+ busy = 0;
+ usleep_range(10000, 20000);
+ if (timeout-- < 0)
+ return -EBUSY;
+
+ for (i = 0; i < total_cores; i++) {
+ ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox,
+ cptpf->pdev,
+ CPT_AF_EXEX_STS(i), &reg);
+ if (ret)
+ return ret;
+
+ if (reg & 0x1) {
+ busy = 1;
+ break;
+ }
+ }
+ } while (busy);
+
+ /* Disable the cores */
+ for (i = 0; i < total_cores; i++) {
+ ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
+ CPT_AF_EXEX_CTL(i), 0x0);
+ if (ret)
+ return ret;
+ }
+ return otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);
+}
+
+void otx2_cpt_cleanup_eng_grps(struct pci_dev *pdev,
+ struct otx2_cpt_eng_grps *eng_grps)
+{
+ struct otx2_cpt_eng_grp_info *grp;
+ int i, j;
+
+ delete_engine_grps(pdev, eng_grps);
+ /* Release memory */
+ for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
+ grp = &eng_grps->grp[i];
+ for (j = 0; j < OTX2_CPT_MAX_ETYPES_PER_GRP; j++) {
+ kfree(grp->engs[j].bmap);
+ grp->engs[j].bmap = NULL;
+ }
+ }
+}
+
+int otx2_cpt_init_eng_grps(struct pci_dev *pdev,
+ struct otx2_cpt_eng_grps *eng_grps)
+{
+ struct otx2_cpt_eng_grp_info *grp;
+ int i, j, ret;
+
+ eng_grps->obj = pci_get_drvdata(pdev);
+ eng_grps->avail.se_cnt = eng_grps->avail.max_se_cnt;
+ eng_grps->avail.ie_cnt = eng_grps->avail.max_ie_cnt;
+ eng_grps->avail.ae_cnt = eng_grps->avail.max_ae_cnt;
+
+ eng_grps->engs_num = eng_grps->avail.max_se_cnt +
+ eng_grps->avail.max_ie_cnt +
+ eng_grps->avail.max_ae_cnt;
+ if (eng_grps->engs_num > OTX2_CPT_MAX_ENGINES) {
+ dev_err(&pdev->dev,
+ "Number of engines %d > than max supported %d\n",
+ eng_grps->engs_num, OTX2_CPT_MAX_ENGINES);
+ ret = -EINVAL;
+ goto cleanup_eng_grps;
+ }
+
+ for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
+ grp = &eng_grps->grp[i];
+ grp->g = eng_grps;
+ grp->idx = i;
+
+ for (j = 0; j < OTX2_CPT_MAX_ETYPES_PER_GRP; j++) {
+ grp->engs[j].bmap =
+ kcalloc(BITS_TO_LONGS(eng_grps->engs_num),
+ sizeof(long), GFP_KERNEL);
+ if (!grp->engs[j].bmap) {
+ ret = -ENOMEM;
+ goto cleanup_eng_grps;
+ }
+ }
+ }
+ return 0;
+
+cleanup_eng_grps:
+ otx2_cpt_cleanup_eng_grps(pdev, eng_grps);
+ return ret;
+}
+
+static int create_eng_caps_discovery_grps(struct pci_dev *pdev,
+ struct otx2_cpt_eng_grps *eng_grps)
+{
+ struct otx2_cpt_uc_info_t *uc_info[OTX2_CPT_MAX_ETYPES_PER_GRP] = { };
+ struct otx2_cpt_engines engs[OTX2_CPT_MAX_ETYPES_PER_GRP] = { {0} };
+ struct fw_info_t fw_info;
+ int ret;
+
+ ret = cpt_ucode_load_fw(pdev, &fw_info);
+ if (ret)
+ return ret;
+
+ uc_info[0] = get_ucode(&fw_info, OTX2_CPT_SE_TYPES);
+ if (uc_info[0] == NULL) {
+ dev_err(&pdev->dev, "Unable to find firmware for AE\n");
+ ret = -EINVAL;
+ goto release_fw;
+ }
+ engs[0].type = OTX2_CPT_AE_TYPES;
+ engs[0].count = 2;
+
+ ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
+ (void **) uc_info, 0);
+ if (ret)
+ goto release_fw;
+
+ uc_info[0] = get_ucode(&fw_info, OTX2_CPT_SE_TYPES);
+ if (uc_info[0] == NULL) {
+ dev_err(&pdev->dev, "Unable to find firmware for SE\n");
+ ret = -EINVAL;
+ goto delete_eng_grp;
+ }
+ engs[0].type = OTX2_CPT_SE_TYPES;
+ engs[0].count = 2;
+
+ ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
+ (void **) uc_info, 0);
+ if (ret)
+ goto delete_eng_grp;
+
+ uc_info[0] = get_ucode(&fw_info, OTX2_CPT_IE_TYPES);
+ if (uc_info[0] == NULL) {
+ dev_err(&pdev->dev, "Unable to find firmware for IE\n");
+ ret = -EINVAL;
+ goto delete_eng_grp;
+ }
+ engs[0].type = OTX2_CPT_IE_TYPES;
+ engs[0].count = 2;
+
+ ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
+ (void **) uc_info, 0);
+ if (ret)
+ goto delete_eng_grp;
+
+ cpt_ucode_release_fw(&fw_info);
+ return 0;
+
+delete_eng_grp:
+ delete_engine_grps(pdev, eng_grps);
+release_fw:
+ cpt_ucode_release_fw(&fw_info);
+ return ret;
+}
+
+/*
+ * Get CPT HW capabilities using LOAD_FVC operation.
+ */
+int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf)
+{
+ struct otx2_cptlfs_info *lfs = &cptpf->lfs;
+ struct otx2_cpt_iq_command iq_cmd;
+ union otx2_cpt_opcode opcode;
+ union otx2_cpt_res_s *result;
+ union otx2_cpt_inst_s inst;
+ dma_addr_t rptr_baddr;
+ struct pci_dev *pdev;
+ u32 len, compl_rlen;
+ int ret, etype;
+ void *rptr;
+
+ /*
+ * We don't get capabilities if it was already done
+ * (when user enabled VFs for the first time)
+ */
+ if (cptpf->is_eng_caps_discovered)
+ return 0;
+
+ pdev = cptpf->pdev;
+ /*
+ * Create engine groups for each type to submit LOAD_FVC op and
+ * get engine's capabilities.
+ */
+ ret = create_eng_caps_discovery_grps(pdev, &cptpf->eng_grps);
+ if (ret)
+ goto delete_grps;
+
+ lfs->pdev = pdev;
+ lfs->reg_base = cptpf->reg_base;
+ lfs->mbox = &cptpf->afpf_mbox;
+ ret = otx2_cptlf_init(&cptpf->lfs, OTX2_CPT_ALL_ENG_GRPS_MASK,
+ OTX2_CPT_QUEUE_HI_PRIO, 1);
+ if (ret)
+ goto delete_grps;
+
+ compl_rlen = ALIGN(sizeof(union otx2_cpt_res_s), OTX2_CPT_DMA_MINALIGN);
+ len = compl_rlen + LOADFVC_RLEN;
+
+ result = kzalloc(len, GFP_KERNEL);
+ if (!result) {
+ ret = -ENOMEM;
+ goto lf_cleanup;
+ }
+ rptr_baddr = dma_map_single(&pdev->dev, (void *)result, len,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(&pdev->dev, rptr_baddr)) {
+ dev_err(&pdev->dev, "DMA mapping failed\n");
+ ret = -EFAULT;
+ goto free_result;
+ }
+ rptr = (u8 *)result + compl_rlen;
+
+ /* Fill in the command */
+ opcode.s.major = LOADFVC_MAJOR_OP;
+ opcode.s.minor = LOADFVC_MINOR_OP;
+
+ iq_cmd.cmd.u = 0;
+ iq_cmd.cmd.s.opcode = cpu_to_be16(opcode.flags);
+
+ /* 64-bit swap for microcode data reads, not needed for addresses */
+ cpu_to_be64s(&iq_cmd.cmd.u);
+ iq_cmd.dptr = 0;
+ iq_cmd.rptr = rptr_baddr + compl_rlen;
+ iq_cmd.cptr.u = 0;
+
+ for (etype = 1; etype < OTX2_CPT_MAX_ENG_TYPES; etype++) {
+ result->s.compcode = OTX2_CPT_COMPLETION_CODE_INIT;
+ iq_cmd.cptr.s.grp = otx2_cpt_get_eng_grp(&cptpf->eng_grps,
+ etype);
+ otx2_cpt_fill_inst(&inst, &iq_cmd, rptr_baddr);
+ otx2_cpt_send_cmd(&inst, 1, &cptpf->lfs.lf[0]);
+
+ while (result->s.compcode == OTX2_CPT_COMPLETION_CODE_INIT)
+ cpu_relax();
+
+ cptpf->eng_caps[etype].u = be64_to_cpup(rptr);
+ }
+ dma_unmap_single(&pdev->dev, rptr_baddr, len, DMA_BIDIRECTIONAL);
+ cptpf->is_eng_caps_discovered = true;
+
+free_result:
+ kfree(result);
+lf_cleanup:
+ otx2_cptlf_shutdown(&cptpf->lfs);
+delete_grps:
+ delete_engine_grps(pdev, &cptpf->eng_grps);
+
+ return ret;
+}
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h
new file mode 100644
index 000000000000..6b0d432de0af
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h
@@ -0,0 +1,162 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright (C) 2020 Marvell.
+ */
+
+#ifndef __OTX2_CPTPF_UCODE_H
+#define __OTX2_CPTPF_UCODE_H
+
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include "otx2_cpt_hw_types.h"
+#include "otx2_cpt_common.h"
+
+/*
+ * On OcteonTX2 platform IPSec ucode can use both IE and SE engines therefore
+ * IE and SE engines can be attached to the same engine group.
+ */
+#define OTX2_CPT_MAX_ETYPES_PER_GRP 2
+
+/* CPT ucode signature size */
+#define OTX2_CPT_UCODE_SIGN_LEN 256
+
+/* Microcode version string length */
+#define OTX2_CPT_UCODE_VER_STR_SZ 44
+
+/* Maximum number of supported engines/cores on OcteonTX2 platform */
+#define OTX2_CPT_MAX_ENGINES 128
+
+#define OTX2_CPT_ENGS_BITMASK_LEN BITS_TO_LONGS(OTX2_CPT_MAX_ENGINES)
+
+/* Microcode types */
+enum otx2_cpt_ucode_type {
+ OTX2_CPT_AE_UC_TYPE = 1, /* AE-MAIN */
+ OTX2_CPT_SE_UC_TYPE1 = 20,/* SE-MAIN - combination of 21 and 22 */
+ OTX2_CPT_SE_UC_TYPE2 = 21,/* Fast Path IPSec + AirCrypto */
+ OTX2_CPT_SE_UC_TYPE3 = 22,/*
+ * Hash + HMAC + FlexiCrypto + RNG +
+ * Full Feature IPSec + AirCrypto + Kasumi
+ */
+ OTX2_CPT_IE_UC_TYPE1 = 30, /* IE-MAIN - combination of 31 and 32 */
+ OTX2_CPT_IE_UC_TYPE2 = 31, /* Fast Path IPSec */
+ OTX2_CPT_IE_UC_TYPE3 = 32, /*
+ * Hash + HMAC + FlexiCrypto + RNG +
+ * Full Future IPSec
+ */
+};
+
+struct otx2_cpt_bitmap {
+ unsigned long bits[OTX2_CPT_ENGS_BITMASK_LEN];
+ int size;
+};
+
+struct otx2_cpt_engines {
+ int type;
+ int count;
+};
+
+/* Microcode version number */
+struct otx2_cpt_ucode_ver_num {
+ u8 nn;
+ u8 xx;
+ u8 yy;
+ u8 zz;
+};
+
+struct otx2_cpt_ucode_hdr {
+ struct otx2_cpt_ucode_ver_num ver_num;
+ u8 ver_str[OTX2_CPT_UCODE_VER_STR_SZ];
+ __be32 code_length;
+ u32 padding[3];
+};
+
+struct otx2_cpt_ucode {
+ u8 ver_str[OTX2_CPT_UCODE_VER_STR_SZ];/*
+ * ucode version in readable
+ * format
+ */
+ struct otx2_cpt_ucode_ver_num ver_num;/* ucode version number */
+ char filename[OTX2_CPT_NAME_LENGTH];/* ucode filename */
+ dma_addr_t dma; /* phys address of ucode image */
+ void *va; /* virt address of ucode image */
+ u32 size; /* ucode image size */
+ int type; /* ucode image type SE, IE, AE or SE+IE */
+};
+
+struct otx2_cpt_uc_info_t {
+ struct list_head list;
+ struct otx2_cpt_ucode ucode;/* microcode information */
+ const struct firmware *fw;
+};
+
+/* Maximum and current number of engines available for all engine groups */
+struct otx2_cpt_engs_available {
+ int max_se_cnt;
+ int max_ie_cnt;
+ int max_ae_cnt;
+ int se_cnt;
+ int ie_cnt;
+ int ae_cnt;
+};
+
+/* Engines reserved to an engine group */
+struct otx2_cpt_engs_rsvd {
+ int type; /* engine type */
+ int count; /* number of engines attached */
+ int offset; /* constant offset of engine type in the bitmap */
+ unsigned long *bmap; /* attached engines bitmap */
+ struct otx2_cpt_ucode *ucode; /* ucode used by these engines */
+};
+
+struct otx2_cpt_mirror_info {
+ int is_ena; /*
+ * is mirroring enabled, it is set only for engine
+ * group which mirrors another engine group
+ */
+ int idx; /*
+ * index of engine group which is mirrored by this
+ * group, set only for engine group which mirrors
+ * another group
+ */
+ int ref_count; /*
+ * number of times this engine group is mirrored by
+ * other groups, this is set only for engine group
+ * which is mirrored by other group(s)
+ */
+};
+
+struct otx2_cpt_eng_grp_info {
+ struct otx2_cpt_eng_grps *g; /* pointer to engine_groups structure */
+ /* engines attached */
+ struct otx2_cpt_engs_rsvd engs[OTX2_CPT_MAX_ETYPES_PER_GRP];
+ /* ucodes information */
+ struct otx2_cpt_ucode ucode[OTX2_CPT_MAX_ETYPES_PER_GRP];
+ /* engine group mirroring information */
+ struct otx2_cpt_mirror_info mirror;
+ int idx; /* engine group index */
+ bool is_enabled; /*
+ * is engine group enabled, engine group is enabled
+ * when it has engines attached and ucode loaded
+ */
+};
+
+struct otx2_cpt_eng_grps {
+ struct otx2_cpt_eng_grp_info grp[OTX2_CPT_MAX_ENGINE_GROUPS];
+ struct otx2_cpt_engs_available avail;
+ void *obj; /* device specific data */
+ int engs_num; /* total number of engines supported */
+ u8 eng_ref_cnt[OTX2_CPT_MAX_ENGINES];/* engines reference count */
+ bool is_grps_created; /* Is the engine groups are already created */
+};
+struct otx2_cptpf_dev;
+int otx2_cpt_init_eng_grps(struct pci_dev *pdev,
+ struct otx2_cpt_eng_grps *eng_grps);
+void otx2_cpt_cleanup_eng_grps(struct pci_dev *pdev,
+ struct otx2_cpt_eng_grps *eng_grps);
+int otx2_cpt_create_eng_grps(struct pci_dev *pdev,
+ struct otx2_cpt_eng_grps *eng_grps);
+int otx2_cpt_disable_all_cores(struct otx2_cptpf_dev *cptpf);
+int otx2_cpt_get_eng_grp(struct otx2_cpt_eng_grps *eng_grps, int eng_type);
+int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf);
+
+#endif /* __OTX2_CPTPF_UCODE_H */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf.h b/drivers/crypto/marvell/octeontx2/otx2_cptvf.h
new file mode 100644
index 000000000000..4f0a169fddbd
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright (C) 2020 Marvell.
+ */
+
+#ifndef __OTX2_CPTVF_H
+#define __OTX2_CPTVF_H
+
+#include "mbox.h"
+#include "otx2_cptlf.h"
+
+struct otx2_cptvf_dev {
+ void __iomem *reg_base; /* Register start address */
+ void __iomem *pfvf_mbox_base; /* PF-VF mbox start address */
+ struct pci_dev *pdev; /* PCI device handle */
+ struct otx2_cptlfs_info lfs; /* CPT LFs attached to this VF */
+ u8 vf_id; /* Virtual function index */
+
+ /* PF <=> VF mbox */
+ struct otx2_mbox pfvf_mbox;
+ struct work_struct pfvf_mbox_work;
+ struct workqueue_struct *pfvf_mbox_wq;
+};
+
+irqreturn_t otx2_cptvf_pfvf_mbox_intr(int irq, void *arg);
+void otx2_cptvf_pfvf_mbox_handler(struct work_struct *work);
+int otx2_cptvf_send_eng_grp_num_msg(struct otx2_cptvf_dev *cptvf, int eng_type);
+int otx2_cptvf_send_kvf_limits_msg(struct otx2_cptvf_dev *cptvf);
+
+#endif /* __OTX2_CPTVF_H */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
new file mode 100644
index 000000000000..a72723455df7
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
@@ -0,0 +1,1758 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2020 Marvell. */
+
+#include <crypto/aes.h>
+#include <crypto/authenc.h>
+#include <crypto/cryptd.h>
+#include <crypto/des.h>
+#include <crypto/internal/aead.h>
+#include <crypto/sha1.h>
+#include <crypto/sha2.h>
+#include <crypto/xts.h>
+#include <crypto/gcm.h>
+#include <crypto/scatterwalk.h>
+#include <linux/rtnetlink.h>
+#include <linux/sort.h>
+#include <linux/module.h>
+#include "otx2_cptvf.h"
+#include "otx2_cptvf_algs.h"
+#include "otx2_cpt_reqmgr.h"
+
+/* Size of salt in AES GCM mode */
+#define AES_GCM_SALT_SIZE 4
+/* Size of IV in AES GCM mode */
+#define AES_GCM_IV_SIZE 8
+/* Size of ICV (Integrity Check Value) in AES GCM mode */
+#define AES_GCM_ICV_SIZE 16
+/* Offset of IV in AES GCM mode */
+#define AES_GCM_IV_OFFSET 8
+#define CONTROL_WORD_LEN 8
+#define KEY2_OFFSET 48
+#define DMA_MODE_FLAG(dma_mode) \
+ (((dma_mode) == OTX2_CPT_DMA_MODE_SG) ? (1 << 7) : 0)
+
+/* Truncated SHA digest size */
+#define SHA1_TRUNC_DIGEST_SIZE 12
+#define SHA256_TRUNC_DIGEST_SIZE 16
+#define SHA384_TRUNC_DIGEST_SIZE 24
+#define SHA512_TRUNC_DIGEST_SIZE 32
+
+static DEFINE_MUTEX(mutex);
+static int is_crypto_registered;
+
+struct cpt_device_desc {
+ struct pci_dev *dev;
+ int num_queues;
+};
+
+struct cpt_device_table {
+ atomic_t count;
+ struct cpt_device_desc desc[OTX2_CPT_MAX_LFS_NUM];
+};
+
+static struct cpt_device_table se_devices = {
+ .count = ATOMIC_INIT(0)
+};
+
+static inline int get_se_device(struct pci_dev **pdev, int *cpu_num)
+{
+ int count;
+
+ count = atomic_read(&se_devices.count);
+ if (count < 1)
+ return -ENODEV;
+
+ *cpu_num = get_cpu();
+ /*
+ * On OcteonTX2 platform CPT instruction queue is bound to each
+ * local function LF, in turn LFs can be attached to PF
+ * or VF therefore we always use first device. We get maximum
+ * performance if one CPT queue is available for each cpu
+ * otherwise CPT queues need to be shared between cpus.
+ */
+ if (*cpu_num >= se_devices.desc[0].num_queues)
+ *cpu_num %= se_devices.desc[0].num_queues;
+ *pdev = se_devices.desc[0].dev;
+
+ put_cpu();
+
+ return 0;
+}
+
+static inline int validate_hmac_cipher_null(struct otx2_cpt_req_info *cpt_req)
+{
+ struct otx2_cpt_req_ctx *rctx;
+ struct aead_request *req;
+ struct crypto_aead *tfm;
+
+ req = container_of(cpt_req->areq, struct aead_request, base);
+ tfm = crypto_aead_reqtfm(req);
+ rctx = aead_request_ctx(req);
+ if (memcmp(rctx->fctx.hmac.s.hmac_calc,
+ rctx->fctx.hmac.s.hmac_recv,
+ crypto_aead_authsize(tfm)) != 0)
+ return -EBADMSG;
+
+ return 0;
+}
+
+static void otx2_cpt_aead_callback(int status, void *arg1, void *arg2)
+{
+ struct otx2_cpt_inst_info *inst_info = arg2;
+ struct crypto_async_request *areq = arg1;
+ struct otx2_cpt_req_info *cpt_req;
+ struct pci_dev *pdev;
+
+ if (inst_info) {
+ cpt_req = inst_info->req;
+ if (!status) {
+ /*
+ * When selected cipher is NULL we need to manually
+ * verify whether calculated hmac value matches
+ * received hmac value
+ */
+ if (cpt_req->req_type ==
+ OTX2_CPT_AEAD_ENC_DEC_NULL_REQ &&
+ !cpt_req->is_enc)
+ status = validate_hmac_cipher_null(cpt_req);
+ }
+ pdev = inst_info->pdev;
+ otx2_cpt_info_destroy(pdev, inst_info);
+ }
+ if (areq)
+ areq->complete(areq, status);
+}
+
+static void output_iv_copyback(struct crypto_async_request *areq)
+{
+ struct otx2_cpt_req_info *req_info;
+ struct otx2_cpt_req_ctx *rctx;
+ struct skcipher_request *sreq;
+ struct crypto_skcipher *stfm;
+ struct otx2_cpt_enc_ctx *ctx;
+ u32 start, ivsize;
+
+ sreq = container_of(areq, struct skcipher_request, base);
+ stfm = crypto_skcipher_reqtfm(sreq);
+ ctx = crypto_skcipher_ctx(stfm);
+ if (ctx->cipher_type == OTX2_CPT_AES_CBC ||
+ ctx->cipher_type == OTX2_CPT_DES3_CBC) {
+ rctx = skcipher_request_ctx(sreq);
+ req_info = &rctx->cpt_req;
+ ivsize = crypto_skcipher_ivsize(stfm);
+ start = sreq->cryptlen - ivsize;
+
+ if (req_info->is_enc) {
+ scatterwalk_map_and_copy(sreq->iv, sreq->dst, start,
+ ivsize, 0);
+ } else {
+ if (sreq->src != sreq->dst) {
+ scatterwalk_map_and_copy(sreq->iv, sreq->src,
+ start, ivsize, 0);
+ } else {
+ memcpy(sreq->iv, req_info->iv_out, ivsize);
+ kfree(req_info->iv_out);
+ }
+ }
+ }
+}
+
+static void otx2_cpt_skcipher_callback(int status, void *arg1, void *arg2)
+{
+ struct otx2_cpt_inst_info *inst_info = arg2;
+ struct crypto_async_request *areq = arg1;
+ struct pci_dev *pdev;
+
+ if (areq) {
+ if (!status)
+ output_iv_copyback(areq);
+ if (inst_info) {
+ pdev = inst_info->pdev;
+ otx2_cpt_info_destroy(pdev, inst_info);
+ }
+ areq->complete(areq, status);
+ }
+}
+
+static inline void update_input_data(struct otx2_cpt_req_info *req_info,
+ struct scatterlist *inp_sg,
+ u32 nbytes, u32 *argcnt)
+{
+ req_info->req.dlen += nbytes;
+
+ while (nbytes) {
+ u32 len = (nbytes < inp_sg->length) ? nbytes : inp_sg->length;
+ u8 *ptr = sg_virt(inp_sg);
+
+ req_info->in[*argcnt].vptr = (void *)ptr;
+ req_info->in[*argcnt].size = len;
+ nbytes -= len;
+ ++(*argcnt);
+ inp_sg = sg_next(inp_sg);
+ }
+}
+
+static inline void update_output_data(struct otx2_cpt_req_info *req_info,
+ struct scatterlist *outp_sg,
+ u32 offset, u32 nbytes, u32 *argcnt)
+{
+ u32 len, sg_len;
+ u8 *ptr;
+
+ req_info->rlen += nbytes;
+
+ while (nbytes) {
+ sg_len = outp_sg->length - offset;
+ len = (nbytes < sg_len) ? nbytes : sg_len;
+ ptr = sg_virt(outp_sg);
+
+ req_info->out[*argcnt].vptr = (void *) (ptr + offset);
+ req_info->out[*argcnt].size = len;
+ nbytes -= len;
+ ++(*argcnt);
+ offset = 0;
+ outp_sg = sg_next(outp_sg);
+ }
+}
+
+static inline int create_ctx_hdr(struct skcipher_request *req, u32 enc,
+ u32 *argcnt)
+{
+ struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
+ struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx(req);
+ struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(stfm);
+ struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
+ struct otx2_cpt_fc_ctx *fctx = &rctx->fctx;
+ int ivsize = crypto_skcipher_ivsize(stfm);
+ u32 start = req->cryptlen - ivsize;
+ gfp_t flags;
+
+ flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ req_info->ctrl.s.dma_mode = OTX2_CPT_DMA_MODE_SG;
+ req_info->ctrl.s.se_req = 1;
+
+ req_info->req.opcode.s.major = OTX2_CPT_MAJOR_OP_FC |
+ DMA_MODE_FLAG(OTX2_CPT_DMA_MODE_SG);
+ if (enc) {
+ req_info->req.opcode.s.minor = 2;
+ } else {
+ req_info->req.opcode.s.minor = 3;
+ if ((ctx->cipher_type == OTX2_CPT_AES_CBC ||
+ ctx->cipher_type == OTX2_CPT_DES3_CBC) &&
+ req->src == req->dst) {
+ req_info->iv_out = kmalloc(ivsize, flags);
+ if (!req_info->iv_out)
+ return -ENOMEM;
+
+ scatterwalk_map_and_copy(req_info->iv_out, req->src,
+ start, ivsize, 0);
+ }
+ }
+ /* Encryption data length */
+ req_info->req.param1 = req->cryptlen;
+ /* Authentication data length */
+ req_info->req.param2 = 0;
+
+ fctx->enc.enc_ctrl.e.enc_cipher = ctx->cipher_type;
+ fctx->enc.enc_ctrl.e.aes_key = ctx->key_type;
+ fctx->enc.enc_ctrl.e.iv_source = OTX2_CPT_FROM_CPTR;
+
+ if (ctx->cipher_type == OTX2_CPT_AES_XTS)
+ memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len * 2);
+ else
+ memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len);
+
+ memcpy(fctx->enc.encr_iv, req->iv, crypto_skcipher_ivsize(stfm));
+
+ cpu_to_be64s(&fctx->enc.enc_ctrl.u);
+
+ /*
+ * Storing Packet Data Information in offset
+ * Control Word First 8 bytes
+ */
+ req_info->in[*argcnt].vptr = (u8 *)&rctx->ctrl_word;
+ req_info->in[*argcnt].size = CONTROL_WORD_LEN;
+ req_info->req.dlen += CONTROL_WORD_LEN;
+ ++(*argcnt);
+
+ req_info->in[*argcnt].vptr = (u8 *)fctx;
+ req_info->in[*argcnt].size = sizeof(struct otx2_cpt_fc_ctx);
+ req_info->req.dlen += sizeof(struct otx2_cpt_fc_ctx);
+
+ ++(*argcnt);
+
+ return 0;
+}
+
+static inline int create_input_list(struct skcipher_request *req, u32 enc,
+ u32 enc_iv_len)
+{
+ struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx(req);
+ struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
+ u32 argcnt = 0;
+ int ret;
+
+ ret = create_ctx_hdr(req, enc, &argcnt);
+ if (ret)
+ return ret;
+
+ update_input_data(req_info, req->src, req->cryptlen, &argcnt);
+ req_info->in_cnt = argcnt;
+
+ return 0;
+}
+
+static inline void create_output_list(struct skcipher_request *req,
+ u32 enc_iv_len)
+{
+ struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx(req);
+ struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
+ u32 argcnt = 0;
+
+ /*
+ * OUTPUT Buffer Processing
+ * AES encryption/decryption output would be
+ * received in the following format
+ *
+ * ------IV--------|------ENCRYPTED/DECRYPTED DATA-----|
+ * [ 16 Bytes/ [ Request Enc/Dec/ DATA Len AES CBC ]
+ */
+ update_output_data(req_info, req->dst, 0, req->cryptlen, &argcnt);
+ req_info->out_cnt = argcnt;
+}
+
+static int skcipher_do_fallback(struct skcipher_request *req, bool is_enc)
+{
+ struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
+ struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx(req);
+ struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(stfm);
+ int ret;
+
+ if (ctx->fbk_cipher) {
+ skcipher_request_set_tfm(&rctx->sk_fbk_req, ctx->fbk_cipher);
+ skcipher_request_set_callback(&rctx->sk_fbk_req,
+ req->base.flags,
+ req->base.complete,
+ req->base.data);
+ skcipher_request_set_crypt(&rctx->sk_fbk_req, req->src,
+ req->dst, req->cryptlen, req->iv);
+ ret = is_enc ? crypto_skcipher_encrypt(&rctx->sk_fbk_req) :
+ crypto_skcipher_decrypt(&rctx->sk_fbk_req);
+ } else {
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+static inline int cpt_enc_dec(struct skcipher_request *req, u32 enc)
+{
+ struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
+ struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx(req);
+ struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(stfm);
+ struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
+ u32 enc_iv_len = crypto_skcipher_ivsize(stfm);
+ struct pci_dev *pdev;
+ int status, cpu_num;
+
+ if (req->cryptlen == 0)
+ return 0;
+
+ if (!IS_ALIGNED(req->cryptlen, ctx->enc_align_len))
+ return -EINVAL;
+
+ if (req->cryptlen > OTX2_CPT_MAX_REQ_SIZE)
+ return skcipher_do_fallback(req, enc);
+
+ /* Clear control words */
+ rctx->ctrl_word.flags = 0;
+ rctx->fctx.enc.enc_ctrl.u = 0;
+
+ status = create_input_list(req, enc, enc_iv_len);
+ if (status)
+ return status;
+ create_output_list(req, enc_iv_len);
+
+ status = get_se_device(&pdev, &cpu_num);
+ if (status)
+ return status;
+
+ req_info->callback = otx2_cpt_skcipher_callback;
+ req_info->areq = &req->base;
+ req_info->req_type = OTX2_CPT_ENC_DEC_REQ;
+ req_info->is_enc = enc;
+ req_info->is_trunc_hmac = false;
+ req_info->ctrl.s.grp = otx2_cpt_get_kcrypto_eng_grp_num(pdev);
+
+ /*
+ * We perform an asynchronous send and once
+ * the request is completed the driver would
+ * intimate through registered call back functions
+ */
+ status = otx2_cpt_do_request(pdev, req_info, cpu_num);
+
+ return status;
+}
+
+static int otx2_cpt_skcipher_encrypt(struct skcipher_request *req)
+{
+ return cpt_enc_dec(req, true);
+}
+
+static int otx2_cpt_skcipher_decrypt(struct skcipher_request *req)
+{
+ return cpt_enc_dec(req, false);
+}
+
+static int otx2_cpt_skcipher_xts_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, u32 keylen)
+{
+ struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
+ const u8 *key2 = key + (keylen / 2);
+ const u8 *key1 = key;
+ int ret;
+
+ ret = xts_check_key(crypto_skcipher_tfm(tfm), key, keylen);
+ if (ret)
+ return ret;
+ ctx->key_len = keylen;
+ ctx->enc_align_len = 1;
+ memcpy(ctx->enc_key, key1, keylen / 2);
+ memcpy(ctx->enc_key + KEY2_OFFSET, key2, keylen / 2);
+ ctx->cipher_type = OTX2_CPT_AES_XTS;
+ switch (ctx->key_len) {
+ case 2 * AES_KEYSIZE_128:
+ ctx->key_type = OTX2_CPT_AES_128_BIT;
+ break;
+ case 2 * AES_KEYSIZE_192:
+ ctx->key_type = OTX2_CPT_AES_192_BIT;
+ break;
+ case 2 * AES_KEYSIZE_256:
+ ctx->key_type = OTX2_CPT_AES_256_BIT;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return crypto_skcipher_setkey(ctx->fbk_cipher, key, keylen);
+}
+
+static int cpt_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ u32 keylen, u8 cipher_type)
+{
+ struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ if (keylen != DES3_EDE_KEY_SIZE)
+ return -EINVAL;
+
+ ctx->key_len = keylen;
+ ctx->cipher_type = cipher_type;
+ ctx->enc_align_len = 8;
+
+ memcpy(ctx->enc_key, key, keylen);
+
+ return crypto_skcipher_setkey(ctx->fbk_cipher, key, keylen);
+}
+
+static int cpt_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ u32 keylen, u8 cipher_type)
+{
+ struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ switch (keylen) {
+ case AES_KEYSIZE_128:
+ ctx->key_type = OTX2_CPT_AES_128_BIT;
+ break;
+ case AES_KEYSIZE_192:
+ ctx->key_type = OTX2_CPT_AES_192_BIT;
+ break;
+ case AES_KEYSIZE_256:
+ ctx->key_type = OTX2_CPT_AES_256_BIT;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (cipher_type == OTX2_CPT_AES_CBC || cipher_type == OTX2_CPT_AES_ECB)
+ ctx->enc_align_len = 16;
+ else
+ ctx->enc_align_len = 1;
+
+ ctx->key_len = keylen;
+ ctx->cipher_type = cipher_type;
+
+ memcpy(ctx->enc_key, key, keylen);
+
+ return crypto_skcipher_setkey(ctx->fbk_cipher, key, keylen);
+}
+
+static int otx2_cpt_skcipher_cbc_aes_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, u32 keylen)
+{
+ return cpt_aes_setkey(tfm, key, keylen, OTX2_CPT_AES_CBC);
+}
+
+static int otx2_cpt_skcipher_ecb_aes_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, u32 keylen)
+{
+ return cpt_aes_setkey(tfm, key, keylen, OTX2_CPT_AES_ECB);
+}
+
+static int otx2_cpt_skcipher_cbc_des3_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, u32 keylen)
+{
+ return cpt_des_setkey(tfm, key, keylen, OTX2_CPT_DES3_CBC);
+}
+
+static int otx2_cpt_skcipher_ecb_des3_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, u32 keylen)
+{
+ return cpt_des_setkey(tfm, key, keylen, OTX2_CPT_DES3_ECB);
+}
+
+static int cpt_skcipher_fallback_init(struct otx2_cpt_enc_ctx *ctx,
+ struct crypto_alg *alg)
+{
+ if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
+ ctx->fbk_cipher =
+ crypto_alloc_skcipher(alg->cra_name, 0,
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->fbk_cipher)) {
+ pr_err("%s() failed to allocate fallback for %s\n",
+ __func__, alg->cra_name);
+ return PTR_ERR(ctx->fbk_cipher);
+ }
+ }
+ return 0;
+}
+
+static int otx2_cpt_enc_dec_init(struct crypto_skcipher *stfm)
+{
+ struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(stfm);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
+ struct crypto_alg *alg = tfm->__crt_alg;
+
+ memset(ctx, 0, sizeof(*ctx));
+ /*
+ * Additional memory for skcipher_request is
+ * allocated since the cryptd daemon uses
+ * this memory for request_ctx information
+ */
+ crypto_skcipher_set_reqsize(stfm, sizeof(struct otx2_cpt_req_ctx) +
+ sizeof(struct skcipher_request));
+
+ return cpt_skcipher_fallback_init(ctx, alg);
+}
+
+static void otx2_cpt_skcipher_exit(struct crypto_skcipher *tfm)
+{
+ struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ if (ctx->fbk_cipher) {
+ crypto_free_skcipher(ctx->fbk_cipher);
+ ctx->fbk_cipher = NULL;
+ }
+}
+
+static int cpt_aead_fallback_init(struct otx2_cpt_aead_ctx *ctx,
+ struct crypto_alg *alg)
+{
+ if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
+ ctx->fbk_cipher =
+ crypto_alloc_aead(alg->cra_name, 0,
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->fbk_cipher)) {
+ pr_err("%s() failed to allocate fallback for %s\n",
+ __func__, alg->cra_name);
+ return PTR_ERR(ctx->fbk_cipher);
+ }
+ }
+ return 0;
+}
+
+static int cpt_aead_init(struct crypto_aead *atfm, u8 cipher_type, u8 mac_type)
+{
+ struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(atfm);
+ struct crypto_tfm *tfm = crypto_aead_tfm(atfm);
+ struct crypto_alg *alg = tfm->__crt_alg;
+
+ ctx->cipher_type = cipher_type;
+ ctx->mac_type = mac_type;
+
+ /*
+ * When selected cipher is NULL we use HMAC opcode instead of
+ * FLEXICRYPTO opcode therefore we don't need to use HASH algorithms
+ * for calculating ipad and opad
+ */
+ if (ctx->cipher_type != OTX2_CPT_CIPHER_NULL) {
+ switch (ctx->mac_type) {
+ case OTX2_CPT_SHA1:
+ ctx->hashalg = crypto_alloc_shash("sha1", 0,
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(ctx->hashalg))
+ return PTR_ERR(ctx->hashalg);
+ break;
+
+ case OTX2_CPT_SHA256:
+ ctx->hashalg = crypto_alloc_shash("sha256", 0,
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(ctx->hashalg))
+ return PTR_ERR(ctx->hashalg);
+ break;
+
+ case OTX2_CPT_SHA384:
+ ctx->hashalg = crypto_alloc_shash("sha384", 0,
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(ctx->hashalg))
+ return PTR_ERR(ctx->hashalg);
+ break;
+
+ case OTX2_CPT_SHA512:
+ ctx->hashalg = crypto_alloc_shash("sha512", 0,
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(ctx->hashalg))
+ return PTR_ERR(ctx->hashalg);
+ break;
+ }
+ }
+ switch (ctx->cipher_type) {
+ case OTX2_CPT_AES_CBC:
+ case OTX2_CPT_AES_ECB:
+ ctx->enc_align_len = 16;
+ break;
+ case OTX2_CPT_DES3_CBC:
+ case OTX2_CPT_DES3_ECB:
+ ctx->enc_align_len = 8;
+ break;
+ case OTX2_CPT_AES_GCM:
+ case OTX2_CPT_CIPHER_NULL:
+ ctx->enc_align_len = 1;
+ break;
+ }
+ crypto_aead_set_reqsize(atfm, sizeof(struct otx2_cpt_req_ctx));
+
+ return cpt_aead_fallback_init(ctx, alg);
+}
+
+static int otx2_cpt_aead_cbc_aes_sha1_init(struct crypto_aead *tfm)
+{
+ return cpt_aead_init(tfm, OTX2_CPT_AES_CBC, OTX2_CPT_SHA1);
+}
+
+static int otx2_cpt_aead_cbc_aes_sha256_init(struct crypto_aead *tfm)
+{
+ return cpt_aead_init(tfm, OTX2_CPT_AES_CBC, OTX2_CPT_SHA256);
+}
+
+static int otx2_cpt_aead_cbc_aes_sha384_init(struct crypto_aead *tfm)
+{
+ return cpt_aead_init(tfm, OTX2_CPT_AES_CBC, OTX2_CPT_SHA384);
+}
+
+static int otx2_cpt_aead_cbc_aes_sha512_init(struct crypto_aead *tfm)
+{
+ return cpt_aead_init(tfm, OTX2_CPT_AES_CBC, OTX2_CPT_SHA512);
+}
+
+static int otx2_cpt_aead_ecb_null_sha1_init(struct crypto_aead *tfm)
+{
+ return cpt_aead_init(tfm, OTX2_CPT_CIPHER_NULL, OTX2_CPT_SHA1);
+}
+
+static int otx2_cpt_aead_ecb_null_sha256_init(struct crypto_aead *tfm)
+{
+ return cpt_aead_init(tfm, OTX2_CPT_CIPHER_NULL, OTX2_CPT_SHA256);
+}
+
+static int otx2_cpt_aead_ecb_null_sha384_init(struct crypto_aead *tfm)
+{
+ return cpt_aead_init(tfm, OTX2_CPT_CIPHER_NULL, OTX2_CPT_SHA384);
+}
+
+static int otx2_cpt_aead_ecb_null_sha512_init(struct crypto_aead *tfm)
+{
+ return cpt_aead_init(tfm, OTX2_CPT_CIPHER_NULL, OTX2_CPT_SHA512);
+}
+
+static int otx2_cpt_aead_gcm_aes_init(struct crypto_aead *tfm)
+{
+ return cpt_aead_init(tfm, OTX2_CPT_AES_GCM, OTX2_CPT_MAC_NULL);
+}
+
+static void otx2_cpt_aead_exit(struct crypto_aead *tfm)
+{
+ struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
+
+ kfree(ctx->ipad);
+ kfree(ctx->opad);
+ if (ctx->hashalg)
+ crypto_free_shash(ctx->hashalg);
+ kfree(ctx->sdesc);
+
+ if (ctx->fbk_cipher) {
+ crypto_free_aead(ctx->fbk_cipher);
+ ctx->fbk_cipher = NULL;
+ }
+}
+
+static int otx2_cpt_aead_gcm_set_authsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
+
+ if (crypto_rfc4106_check_authsize(authsize))
+ return -EINVAL;
+
+ tfm->authsize = authsize;
+ /* Set authsize for fallback case */
+ if (ctx->fbk_cipher)
+ ctx->fbk_cipher->authsize = authsize;
+
+ return 0;
+}
+
+static int otx2_cpt_aead_set_authsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ tfm->authsize = authsize;
+
+ return 0;
+}
+
+static int otx2_cpt_aead_null_set_authsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
+
+ ctx->is_trunc_hmac = true;
+ tfm->authsize = authsize;
+
+ return 0;
+}
+
+static struct otx2_cpt_sdesc *alloc_sdesc(struct crypto_shash *alg)
+{
+ struct otx2_cpt_sdesc *sdesc;
+ int size;
+
+ size = sizeof(struct shash_desc) + crypto_shash_descsize(alg);
+ sdesc = kmalloc(size, GFP_KERNEL);
+ if (!sdesc)
+ return NULL;
+
+ sdesc->shash.tfm = alg;
+
+ return sdesc;
+}
+
+static inline void swap_data32(void *buf, u32 len)
+{
+ cpu_to_be32_array(buf, buf, len / 4);
+}
+
+static inline void swap_data64(void *buf, u32 len)
+{
+ u64 *src = buf;
+ int i = 0;
+
+ for (i = 0 ; i < len / 8; i++, src++)
+ cpu_to_be64s(src);
+}
+
+static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad)
+{
+ struct sha512_state *sha512;
+ struct sha256_state *sha256;
+ struct sha1_state *sha1;
+
+ switch (mac_type) {
+ case OTX2_CPT_SHA1:
+ sha1 = (struct sha1_state *) in_pad;
+ swap_data32(sha1->state, SHA1_DIGEST_SIZE);
+ memcpy(out_pad, &sha1->state, SHA1_DIGEST_SIZE);
+ break;
+
+ case OTX2_CPT_SHA256:
+ sha256 = (struct sha256_state *) in_pad;
+ swap_data32(sha256->state, SHA256_DIGEST_SIZE);
+ memcpy(out_pad, &sha256->state, SHA256_DIGEST_SIZE);
+ break;
+
+ case OTX2_CPT_SHA384:
+ case OTX2_CPT_SHA512:
+ sha512 = (struct sha512_state *) in_pad;
+ swap_data64(sha512->state, SHA512_DIGEST_SIZE);
+ memcpy(out_pad, &sha512->state, SHA512_DIGEST_SIZE);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int aead_hmac_init(struct crypto_aead *cipher)
+{
+ struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(cipher);
+ int state_size = crypto_shash_statesize(ctx->hashalg);
+ int ds = crypto_shash_digestsize(ctx->hashalg);
+ int bs = crypto_shash_blocksize(ctx->hashalg);
+ int authkeylen = ctx->auth_key_len;
+ u8 *ipad = NULL, *opad = NULL;
+ int ret = 0, icount = 0;
+
+ ctx->sdesc = alloc_sdesc(ctx->hashalg);
+ if (!ctx->sdesc)
+ return -ENOMEM;
+
+ ctx->ipad = kzalloc(bs, GFP_KERNEL);
+ if (!ctx->ipad) {
+ ret = -ENOMEM;
+ goto calc_fail;
+ }
+
+ ctx->opad = kzalloc(bs, GFP_KERNEL);
+ if (!ctx->opad) {
+ ret = -ENOMEM;
+ goto calc_fail;
+ }
+
+ ipad = kzalloc(state_size, GFP_KERNEL);
+ if (!ipad) {
+ ret = -ENOMEM;
+ goto calc_fail;
+ }
+
+ opad = kzalloc(state_size, GFP_KERNEL);
+ if (!opad) {
+ ret = -ENOMEM;
+ goto calc_fail;
+ }
+
+ if (authkeylen > bs) {
+ ret = crypto_shash_digest(&ctx->sdesc->shash, ctx->key,
+ authkeylen, ipad);
+ if (ret)
+ goto calc_fail;
+
+ authkeylen = ds;
+ } else {
+ memcpy(ipad, ctx->key, authkeylen);
+ }
+
+ memset(ipad + authkeylen, 0, bs - authkeylen);
+ memcpy(opad, ipad, bs);
+
+ for (icount = 0; icount < bs; icount++) {
+ ipad[icount] ^= 0x36;
+ opad[icount] ^= 0x5c;
+ }
+
+ /*
+ * Partial Hash calculated from the software
+ * algorithm is retrieved for IPAD & OPAD
+ */
+
+ /* IPAD Calculation */
+ crypto_shash_init(&ctx->sdesc->shash);
+ crypto_shash_update(&ctx->sdesc->shash, ipad, bs);
+ crypto_shash_export(&ctx->sdesc->shash, ipad);
+ ret = copy_pad(ctx->mac_type, ctx->ipad, ipad);
+ if (ret)
+ goto calc_fail;
+
+ /* OPAD Calculation */
+ crypto_shash_init(&ctx->sdesc->shash);
+ crypto_shash_update(&ctx->sdesc->shash, opad, bs);
+ crypto_shash_export(&ctx->sdesc->shash, opad);
+ ret = copy_pad(ctx->mac_type, ctx->opad, opad);
+ if (ret)
+ goto calc_fail;
+
+ kfree(ipad);
+ kfree(opad);
+
+ return 0;
+
+calc_fail:
+ kfree(ctx->ipad);
+ ctx->ipad = NULL;
+ kfree(ctx->opad);
+ ctx->opad = NULL;
+ kfree(ipad);
+ kfree(opad);
+ kfree(ctx->sdesc);
+ ctx->sdesc = NULL;
+
+ return ret;
+}
+
+static int otx2_cpt_aead_cbc_aes_sha_setkey(struct crypto_aead *cipher,
+ const unsigned char *key,
+ unsigned int keylen)
+{
+ struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(cipher);
+ struct crypto_authenc_key_param *param;
+ int enckeylen = 0, authkeylen = 0;
+ struct rtattr *rta = (void *)key;
+ int status;
+
+ if (!RTA_OK(rta, keylen))
+ return -EINVAL;
+
+ if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
+ return -EINVAL;
+
+ if (RTA_PAYLOAD(rta) < sizeof(*param))
+ return -EINVAL;
+
+ param = RTA_DATA(rta);
+ enckeylen = be32_to_cpu(param->enckeylen);
+ key += RTA_ALIGN(rta->rta_len);
+ keylen -= RTA_ALIGN(rta->rta_len);
+ if (keylen < enckeylen)
+ return -EINVAL;
+
+ if (keylen > OTX2_CPT_MAX_KEY_SIZE)
+ return -EINVAL;
+
+ authkeylen = keylen - enckeylen;
+ memcpy(ctx->key, key, keylen);
+
+ switch (enckeylen) {
+ case AES_KEYSIZE_128:
+ ctx->key_type = OTX2_CPT_AES_128_BIT;
+ break;
+ case AES_KEYSIZE_192:
+ ctx->key_type = OTX2_CPT_AES_192_BIT;
+ break;
+ case AES_KEYSIZE_256:
+ ctx->key_type = OTX2_CPT_AES_256_BIT;
+ break;
+ default:
+ /* Invalid key length */
+ return -EINVAL;
+ }
+
+ ctx->enc_key_len = enckeylen;
+ ctx->auth_key_len = authkeylen;
+
+ status = aead_hmac_init(cipher);
+ if (status)
+ return status;
+
+ return 0;
+}
+
+static int otx2_cpt_aead_ecb_null_sha_setkey(struct crypto_aead *cipher,
+ const unsigned char *key,
+ unsigned int keylen)
+{
+ struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(cipher);
+ struct crypto_authenc_key_param *param;
+ struct rtattr *rta = (void *)key;
+ int enckeylen = 0;
+
+ if (!RTA_OK(rta, keylen))
+ return -EINVAL;
+
+ if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
+ return -EINVAL;
+
+ if (RTA_PAYLOAD(rta) < sizeof(*param))
+ return -EINVAL;
+
+ param = RTA_DATA(rta);
+ enckeylen = be32_to_cpu(param->enckeylen);
+ key += RTA_ALIGN(rta->rta_len);
+ keylen -= RTA_ALIGN(rta->rta_len);
+ if (enckeylen != 0)
+ return -EINVAL;
+
+ if (keylen > OTX2_CPT_MAX_KEY_SIZE)
+ return -EINVAL;
+
+ memcpy(ctx->key, key, keylen);
+ ctx->enc_key_len = enckeylen;
+ ctx->auth_key_len = keylen;
+
+ return 0;
+}
+
+static int otx2_cpt_aead_gcm_aes_setkey(struct crypto_aead *cipher,
+ const unsigned char *key,
+ unsigned int keylen)
+{
+ struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(cipher);
+
+ /*
+ * For aes gcm we expect to get encryption key (16, 24, 32 bytes)
+ * and salt (4 bytes)
+ */
+ switch (keylen) {
+ case AES_KEYSIZE_128 + AES_GCM_SALT_SIZE:
+ ctx->key_type = OTX2_CPT_AES_128_BIT;
+ ctx->enc_key_len = AES_KEYSIZE_128;
+ break;
+ case AES_KEYSIZE_192 + AES_GCM_SALT_SIZE:
+ ctx->key_type = OTX2_CPT_AES_192_BIT;
+ ctx->enc_key_len = AES_KEYSIZE_192;
+ break;
+ case AES_KEYSIZE_256 + AES_GCM_SALT_SIZE:
+ ctx->key_type = OTX2_CPT_AES_256_BIT;
+ ctx->enc_key_len = AES_KEYSIZE_256;
+ break;
+ default:
+ /* Invalid key and salt length */
+ return -EINVAL;
+ }
+
+ /* Store encryption key and salt */
+ memcpy(ctx->key, key, keylen);
+
+ return crypto_aead_setkey(ctx->fbk_cipher, key, keylen);
+}
+
+static inline int create_aead_ctx_hdr(struct aead_request *req, u32 enc,
+ u32 *argcnt)
+{
+ struct otx2_cpt_req_ctx *rctx = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
+ struct otx2_cpt_fc_ctx *fctx = &rctx->fctx;
+ int mac_len = crypto_aead_authsize(tfm);
+ int ds;
+
+ rctx->ctrl_word.e.enc_data_offset = req->assoclen;
+
+ switch (ctx->cipher_type) {
+ case OTX2_CPT_AES_CBC:
+ if (req->assoclen > 248 || !IS_ALIGNED(req->assoclen, 8))
+ return -EINVAL;
+
+ fctx->enc.enc_ctrl.e.iv_source = OTX2_CPT_FROM_CPTR;
+ /* Copy encryption key to context */
+ memcpy(fctx->enc.encr_key, ctx->key + ctx->auth_key_len,
+ ctx->enc_key_len);
+ /* Copy IV to context */
+ memcpy(fctx->enc.encr_iv, req->iv, crypto_aead_ivsize(tfm));
+
+ ds = crypto_shash_digestsize(ctx->hashalg);
+ if (ctx->mac_type == OTX2_CPT_SHA384)
+ ds = SHA512_DIGEST_SIZE;
+ if (ctx->ipad)
+ memcpy(fctx->hmac.e.ipad, ctx->ipad, ds);
+ if (ctx->opad)
+ memcpy(fctx->hmac.e.opad, ctx->opad, ds);
+ break;
+
+ case OTX2_CPT_AES_GCM:
+ if (crypto_ipsec_check_assoclen(req->assoclen))
+ return -EINVAL;
+
+ fctx->enc.enc_ctrl.e.iv_source = OTX2_CPT_FROM_DPTR;
+ /* Copy encryption key to context */
+ memcpy(fctx->enc.encr_key, ctx->key, ctx->enc_key_len);
+ /* Copy salt to context */
+ memcpy(fctx->enc.encr_iv, ctx->key + ctx->enc_key_len,
+ AES_GCM_SALT_SIZE);
+
+ rctx->ctrl_word.e.iv_offset = req->assoclen - AES_GCM_IV_OFFSET;
+ break;
+
+ default:
+ /* Unknown cipher type */
+ return -EINVAL;
+ }
+ cpu_to_be64s(&rctx->ctrl_word.flags);
+
+ req_info->ctrl.s.dma_mode = OTX2_CPT_DMA_MODE_SG;
+ req_info->ctrl.s.se_req = 1;
+ req_info->req.opcode.s.major = OTX2_CPT_MAJOR_OP_FC |
+ DMA_MODE_FLAG(OTX2_CPT_DMA_MODE_SG);
+ if (enc) {
+ req_info->req.opcode.s.minor = 2;
+ req_info->req.param1 = req->cryptlen;
+ req_info->req.param2 = req->cryptlen + req->assoclen;
+ } else {
+ req_info->req.opcode.s.minor = 3;
+ req_info->req.param1 = req->cryptlen - mac_len;
+ req_info->req.param2 = req->cryptlen + req->assoclen - mac_len;
+ }
+
+ fctx->enc.enc_ctrl.e.enc_cipher = ctx->cipher_type;
+ fctx->enc.enc_ctrl.e.aes_key = ctx->key_type;
+ fctx->enc.enc_ctrl.e.mac_type = ctx->mac_type;
+ fctx->enc.enc_ctrl.e.mac_len = mac_len;
+ cpu_to_be64s(&fctx->enc.enc_ctrl.u);
+
+ /*
+ * Storing Packet Data Information in offset
+ * Control Word First 8 bytes
+ */
+ req_info->in[*argcnt].vptr = (u8 *)&rctx->ctrl_word;
+ req_info->in[*argcnt].size = CONTROL_WORD_LEN;
+ req_info->req.dlen += CONTROL_WORD_LEN;
+ ++(*argcnt);
+
+ req_info->in[*argcnt].vptr = (u8 *)fctx;
+ req_info->in[*argcnt].size = sizeof(struct otx2_cpt_fc_ctx);
+ req_info->req.dlen += sizeof(struct otx2_cpt_fc_ctx);
+ ++(*argcnt);
+
+ return 0;
+}
+
+static inline void create_hmac_ctx_hdr(struct aead_request *req, u32 *argcnt,
+ u32 enc)
+{
+ struct otx2_cpt_req_ctx *rctx = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
+
+ req_info->ctrl.s.dma_mode = OTX2_CPT_DMA_MODE_SG;
+ req_info->ctrl.s.se_req = 1;
+ req_info->req.opcode.s.major = OTX2_CPT_MAJOR_OP_HMAC |
+ DMA_MODE_FLAG(OTX2_CPT_DMA_MODE_SG);
+ req_info->is_trunc_hmac = ctx->is_trunc_hmac;
+
+ req_info->req.opcode.s.minor = 0;
+ req_info->req.param1 = ctx->auth_key_len;
+ req_info->req.param2 = ctx->mac_type << 8;
+
+ /* Add authentication key */
+ req_info->in[*argcnt].vptr = ctx->key;
+ req_info->in[*argcnt].size = round_up(ctx->auth_key_len, 8);
+ req_info->req.dlen += round_up(ctx->auth_key_len, 8);
+ ++(*argcnt);
+}
+
+static inline int create_aead_input_list(struct aead_request *req, u32 enc)
+{
+ struct otx2_cpt_req_ctx *rctx = aead_request_ctx(req);
+ struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
+ u32 inputlen = req->cryptlen + req->assoclen;
+ u32 status, argcnt = 0;
+
+ status = create_aead_ctx_hdr(req, enc, &argcnt);
+ if (status)
+ return status;
+ update_input_data(req_info, req->src, inputlen, &argcnt);
+ req_info->in_cnt = argcnt;
+
+ return 0;
+}
+
+static inline void create_aead_output_list(struct aead_request *req, u32 enc,
+ u32 mac_len)
+{
+ struct otx2_cpt_req_ctx *rctx = aead_request_ctx(req);
+ struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
+ u32 argcnt = 0, outputlen = 0;
+
+ if (enc)
+ outputlen = req->cryptlen + req->assoclen + mac_len;
+ else
+ outputlen = req->cryptlen + req->assoclen - mac_len;
+
+ update_output_data(req_info, req->dst, 0, outputlen, &argcnt);
+ req_info->out_cnt = argcnt;
+}
+
+static inline void create_aead_null_input_list(struct aead_request *req,
+ u32 enc, u32 mac_len)
+{
+ struct otx2_cpt_req_ctx *rctx = aead_request_ctx(req);
+ struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
+ u32 inputlen, argcnt = 0;
+
+ if (enc)
+ inputlen = req->cryptlen + req->assoclen;
+ else
+ inputlen = req->cryptlen + req->assoclen - mac_len;
+
+ create_hmac_ctx_hdr(req, &argcnt, enc);
+ update_input_data(req_info, req->src, inputlen, &argcnt);
+ req_info->in_cnt = argcnt;
+}
+
+static inline int create_aead_null_output_list(struct aead_request *req,
+ u32 enc, u32 mac_len)
+{
+ struct otx2_cpt_req_ctx *rctx = aead_request_ctx(req);
+ struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
+ struct scatterlist *dst;
+ u8 *ptr = NULL;
+ int argcnt = 0, status, offset;
+ u32 inputlen;
+
+ if (enc)
+ inputlen = req->cryptlen + req->assoclen;
+ else
+ inputlen = req->cryptlen + req->assoclen - mac_len;
+
+ /*
+ * If source and destination are different
+ * then copy payload to destination
+ */
+ if (req->src != req->dst) {
+
+ ptr = kmalloc(inputlen, (req_info->areq->flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC);
+ if (!ptr)
+ return -ENOMEM;
+
+ status = sg_copy_to_buffer(req->src, sg_nents(req->src), ptr,
+ inputlen);
+ if (status != inputlen) {
+ status = -EINVAL;
+ goto error_free;
+ }
+ status = sg_copy_from_buffer(req->dst, sg_nents(req->dst), ptr,
+ inputlen);
+ if (status != inputlen) {
+ status = -EINVAL;
+ goto error_free;
+ }
+ kfree(ptr);
+ }
+
+ if (enc) {
+ /*
+ * In an encryption scenario hmac needs
+ * to be appended after payload
+ */
+ dst = req->dst;
+ offset = inputlen;
+ while (offset >= dst->length) {
+ offset -= dst->length;
+ dst = sg_next(dst);
+ if (!dst)
+ return -ENOENT;
+ }
+
+ update_output_data(req_info, dst, offset, mac_len, &argcnt);
+ } else {
+ /*
+ * In a decryption scenario calculated hmac for received
+ * payload needs to be compare with hmac received
+ */
+ status = sg_copy_buffer(req->src, sg_nents(req->src),
+ rctx->fctx.hmac.s.hmac_recv, mac_len,
+ inputlen, true);
+ if (status != mac_len)
+ return -EINVAL;
+
+ req_info->out[argcnt].vptr = rctx->fctx.hmac.s.hmac_calc;
+ req_info->out[argcnt].size = mac_len;
+ argcnt++;
+ }
+
+ req_info->out_cnt = argcnt;
+ return 0;
+
+error_free:
+ kfree(ptr);
+ return status;
+}
+
+static int aead_do_fallback(struct aead_request *req, bool is_enc)
+{
+ struct otx2_cpt_req_ctx *rctx = aead_request_ctx(req);
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(aead);
+ int ret;
+
+ if (ctx->fbk_cipher) {
+ /* Store the cipher tfm and then use the fallback tfm */
+ aead_request_set_tfm(&rctx->fbk_req, ctx->fbk_cipher);
+ aead_request_set_callback(&rctx->fbk_req, req->base.flags,
+ req->base.complete, req->base.data);
+ aead_request_set_crypt(&rctx->fbk_req, req->src,
+ req->dst, req->cryptlen, req->iv);
+ ret = is_enc ? crypto_aead_encrypt(&rctx->fbk_req) :
+ crypto_aead_decrypt(&rctx->fbk_req);
+ } else {
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int cpt_aead_enc_dec(struct aead_request *req, u8 reg_type, u8 enc)
+{
+ struct otx2_cpt_req_ctx *rctx = aead_request_ctx(req);
+ struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct pci_dev *pdev;
+ int status, cpu_num;
+
+ /* Clear control words */
+ rctx->ctrl_word.flags = 0;
+ rctx->fctx.enc.enc_ctrl.u = 0;
+
+ req_info->callback = otx2_cpt_aead_callback;
+ req_info->areq = &req->base;
+ req_info->req_type = reg_type;
+ req_info->is_enc = enc;
+ req_info->is_trunc_hmac = false;
+
+ switch (reg_type) {
+ case OTX2_CPT_AEAD_ENC_DEC_REQ:
+ status = create_aead_input_list(req, enc);
+ if (status)
+ return status;
+ create_aead_output_list(req, enc, crypto_aead_authsize(tfm));
+ break;
+
+ case OTX2_CPT_AEAD_ENC_DEC_NULL_REQ:
+ create_aead_null_input_list(req, enc,
+ crypto_aead_authsize(tfm));
+ status = create_aead_null_output_list(req, enc,
+ crypto_aead_authsize(tfm));
+ if (status)
+ return status;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ if (!IS_ALIGNED(req_info->req.param1, ctx->enc_align_len))
+ return -EINVAL;
+
+ if (!req_info->req.param2 ||
+ (req_info->req.param1 > OTX2_CPT_MAX_REQ_SIZE) ||
+ (req_info->req.param2 > OTX2_CPT_MAX_REQ_SIZE))
+ return aead_do_fallback(req, enc);
+
+ status = get_se_device(&pdev, &cpu_num);
+ if (status)
+ return status;
+
+ req_info->ctrl.s.grp = otx2_cpt_get_kcrypto_eng_grp_num(pdev);
+
+ /*
+ * We perform an asynchronous send and once
+ * the request is completed the driver would
+ * intimate through registered call back functions
+ */
+ return otx2_cpt_do_request(pdev, req_info, cpu_num);
+}
+
+static int otx2_cpt_aead_encrypt(struct aead_request *req)
+{
+ return cpt_aead_enc_dec(req, OTX2_CPT_AEAD_ENC_DEC_REQ, true);
+}
+
+static int otx2_cpt_aead_decrypt(struct aead_request *req)
+{
+ return cpt_aead_enc_dec(req, OTX2_CPT_AEAD_ENC_DEC_REQ, false);
+}
+
+static int otx2_cpt_aead_null_encrypt(struct aead_request *req)
+{
+ return cpt_aead_enc_dec(req, OTX2_CPT_AEAD_ENC_DEC_NULL_REQ, true);
+}
+
+static int otx2_cpt_aead_null_decrypt(struct aead_request *req)
+{
+ return cpt_aead_enc_dec(req, OTX2_CPT_AEAD_ENC_DEC_NULL_REQ, false);
+}
+
+static struct skcipher_alg otx2_cpt_skciphers[] = { {
+ .base.cra_name = "xts(aes)",
+ .base.cra_driver_name = "cpt_xts_aes",
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct otx2_cpt_enc_ctx),
+ .base.cra_alignmask = 7,
+ .base.cra_priority = 4001,
+ .base.cra_module = THIS_MODULE,
+
+ .init = otx2_cpt_enc_dec_init,
+ .exit = otx2_cpt_skcipher_exit,
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .setkey = otx2_cpt_skcipher_xts_setkey,
+ .encrypt = otx2_cpt_skcipher_encrypt,
+ .decrypt = otx2_cpt_skcipher_decrypt,
+}, {
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cpt_cbc_aes",
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct otx2_cpt_enc_ctx),
+ .base.cra_alignmask = 7,
+ .base.cra_priority = 4001,
+ .base.cra_module = THIS_MODULE,
+
+ .init = otx2_cpt_enc_dec_init,
+ .exit = otx2_cpt_skcipher_exit,
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = otx2_cpt_skcipher_cbc_aes_setkey,
+ .encrypt = otx2_cpt_skcipher_encrypt,
+ .decrypt = otx2_cpt_skcipher_decrypt,
+}, {
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "cpt_ecb_aes",
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct otx2_cpt_enc_ctx),
+ .base.cra_alignmask = 7,
+ .base.cra_priority = 4001,
+ .base.cra_module = THIS_MODULE,
+
+ .init = otx2_cpt_enc_dec_init,
+ .exit = otx2_cpt_skcipher_exit,
+ .ivsize = 0,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = otx2_cpt_skcipher_ecb_aes_setkey,
+ .encrypt = otx2_cpt_skcipher_encrypt,
+ .decrypt = otx2_cpt_skcipher_decrypt,
+}, {
+ .base.cra_name = "cbc(des3_ede)",
+ .base.cra_driver_name = "cpt_cbc_des3_ede",
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct otx2_cpt_enc_ctx),
+ .base.cra_alignmask = 7,
+ .base.cra_priority = 4001,
+ .base.cra_module = THIS_MODULE,
+
+ .init = otx2_cpt_enc_dec_init,
+ .exit = otx2_cpt_skcipher_exit,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = otx2_cpt_skcipher_cbc_des3_setkey,
+ .encrypt = otx2_cpt_skcipher_encrypt,
+ .decrypt = otx2_cpt_skcipher_decrypt,
+}, {
+ .base.cra_name = "ecb(des3_ede)",
+ .base.cra_driver_name = "cpt_ecb_des3_ede",
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct otx2_cpt_enc_ctx),
+ .base.cra_alignmask = 7,
+ .base.cra_priority = 4001,
+ .base.cra_module = THIS_MODULE,
+
+ .init = otx2_cpt_enc_dec_init,
+ .exit = otx2_cpt_skcipher_exit,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = 0,
+ .setkey = otx2_cpt_skcipher_ecb_des3_setkey,
+ .encrypt = otx2_cpt_skcipher_encrypt,
+ .decrypt = otx2_cpt_skcipher_decrypt,
+} };
+
+static struct aead_alg otx2_cpt_aeads[] = { {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(aes))",
+ .cra_driver_name = "cpt_hmac_sha1_cbc_aes",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx),
+ .cra_priority = 4001,
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = otx2_cpt_aead_cbc_aes_sha1_init,
+ .exit = otx2_cpt_aead_exit,
+ .setkey = otx2_cpt_aead_cbc_aes_sha_setkey,
+ .setauthsize = otx2_cpt_aead_set_authsize,
+ .encrypt = otx2_cpt_aead_encrypt,
+ .decrypt = otx2_cpt_aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+}, {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),cbc(aes))",
+ .cra_driver_name = "cpt_hmac_sha256_cbc_aes",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx),
+ .cra_priority = 4001,
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = otx2_cpt_aead_cbc_aes_sha256_init,
+ .exit = otx2_cpt_aead_exit,
+ .setkey = otx2_cpt_aead_cbc_aes_sha_setkey,
+ .setauthsize = otx2_cpt_aead_set_authsize,
+ .encrypt = otx2_cpt_aead_encrypt,
+ .decrypt = otx2_cpt_aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+}, {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),cbc(aes))",
+ .cra_driver_name = "cpt_hmac_sha384_cbc_aes",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx),
+ .cra_priority = 4001,
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = otx2_cpt_aead_cbc_aes_sha384_init,
+ .exit = otx2_cpt_aead_exit,
+ .setkey = otx2_cpt_aead_cbc_aes_sha_setkey,
+ .setauthsize = otx2_cpt_aead_set_authsize,
+ .encrypt = otx2_cpt_aead_encrypt,
+ .decrypt = otx2_cpt_aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+}, {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),cbc(aes))",
+ .cra_driver_name = "cpt_hmac_sha512_cbc_aes",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx),
+ .cra_priority = 4001,
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = otx2_cpt_aead_cbc_aes_sha512_init,
+ .exit = otx2_cpt_aead_exit,
+ .setkey = otx2_cpt_aead_cbc_aes_sha_setkey,
+ .setauthsize = otx2_cpt_aead_set_authsize,
+ .encrypt = otx2_cpt_aead_encrypt,
+ .decrypt = otx2_cpt_aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+}, {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),ecb(cipher_null))",
+ .cra_driver_name = "cpt_hmac_sha1_ecb_null",
+ .cra_blocksize = 1,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx),
+ .cra_priority = 4001,
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = otx2_cpt_aead_ecb_null_sha1_init,
+ .exit = otx2_cpt_aead_exit,
+ .setkey = otx2_cpt_aead_ecb_null_sha_setkey,
+ .setauthsize = otx2_cpt_aead_null_set_authsize,
+ .encrypt = otx2_cpt_aead_null_encrypt,
+ .decrypt = otx2_cpt_aead_null_decrypt,
+ .ivsize = 0,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+}, {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),ecb(cipher_null))",
+ .cra_driver_name = "cpt_hmac_sha256_ecb_null",
+ .cra_blocksize = 1,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx),
+ .cra_priority = 4001,
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = otx2_cpt_aead_ecb_null_sha256_init,
+ .exit = otx2_cpt_aead_exit,
+ .setkey = otx2_cpt_aead_ecb_null_sha_setkey,
+ .setauthsize = otx2_cpt_aead_null_set_authsize,
+ .encrypt = otx2_cpt_aead_null_encrypt,
+ .decrypt = otx2_cpt_aead_null_decrypt,
+ .ivsize = 0,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+}, {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),ecb(cipher_null))",
+ .cra_driver_name = "cpt_hmac_sha384_ecb_null",
+ .cra_blocksize = 1,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx),
+ .cra_priority = 4001,
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = otx2_cpt_aead_ecb_null_sha384_init,
+ .exit = otx2_cpt_aead_exit,
+ .setkey = otx2_cpt_aead_ecb_null_sha_setkey,
+ .setauthsize = otx2_cpt_aead_null_set_authsize,
+ .encrypt = otx2_cpt_aead_null_encrypt,
+ .decrypt = otx2_cpt_aead_null_decrypt,
+ .ivsize = 0,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+}, {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),ecb(cipher_null))",
+ .cra_driver_name = "cpt_hmac_sha512_ecb_null",
+ .cra_blocksize = 1,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx),
+ .cra_priority = 4001,
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = otx2_cpt_aead_ecb_null_sha512_init,
+ .exit = otx2_cpt_aead_exit,
+ .setkey = otx2_cpt_aead_ecb_null_sha_setkey,
+ .setauthsize = otx2_cpt_aead_null_set_authsize,
+ .encrypt = otx2_cpt_aead_null_encrypt,
+ .decrypt = otx2_cpt_aead_null_decrypt,
+ .ivsize = 0,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+}, {
+ .base = {
+ .cra_name = "rfc4106(gcm(aes))",
+ .cra_driver_name = "cpt_rfc4106_gcm_aes",
+ .cra_blocksize = 1,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx),
+ .cra_priority = 4001,
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = otx2_cpt_aead_gcm_aes_init,
+ .exit = otx2_cpt_aead_exit,
+ .setkey = otx2_cpt_aead_gcm_aes_setkey,
+ .setauthsize = otx2_cpt_aead_gcm_set_authsize,
+ .encrypt = otx2_cpt_aead_encrypt,
+ .decrypt = otx2_cpt_aead_decrypt,
+ .ivsize = AES_GCM_IV_SIZE,
+ .maxauthsize = AES_GCM_ICV_SIZE,
+} };
+
+static inline int cpt_register_algs(void)
+{
+ int i, err = 0;
+
+ if (!IS_ENABLED(CONFIG_DM_CRYPT)) {
+ for (i = 0; i < ARRAY_SIZE(otx2_cpt_skciphers); i++)
+ otx2_cpt_skciphers[i].base.cra_flags &=
+ ~CRYPTO_ALG_DEAD;
+
+ err = crypto_register_skciphers(otx2_cpt_skciphers,
+ ARRAY_SIZE(otx2_cpt_skciphers));
+ if (err)
+ return err;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(otx2_cpt_aeads); i++)
+ otx2_cpt_aeads[i].base.cra_flags &= ~CRYPTO_ALG_DEAD;
+
+ err = crypto_register_aeads(otx2_cpt_aeads,
+ ARRAY_SIZE(otx2_cpt_aeads));
+ if (err) {
+ crypto_unregister_skciphers(otx2_cpt_skciphers,
+ ARRAY_SIZE(otx2_cpt_skciphers));
+ return err;
+ }
+
+ return 0;
+}
+
+static inline void cpt_unregister_algs(void)
+{
+ crypto_unregister_skciphers(otx2_cpt_skciphers,
+ ARRAY_SIZE(otx2_cpt_skciphers));
+ crypto_unregister_aeads(otx2_cpt_aeads, ARRAY_SIZE(otx2_cpt_aeads));
+}
+
+static int compare_func(const void *lptr, const void *rptr)
+{
+ const struct cpt_device_desc *ldesc = (struct cpt_device_desc *) lptr;
+ const struct cpt_device_desc *rdesc = (struct cpt_device_desc *) rptr;
+
+ if (ldesc->dev->devfn < rdesc->dev->devfn)
+ return -1;
+ if (ldesc->dev->devfn > rdesc->dev->devfn)
+ return 1;
+ return 0;
+}
+
+static void swap_func(void *lptr, void *rptr, int size)
+{
+ struct cpt_device_desc *ldesc = lptr;
+ struct cpt_device_desc *rdesc = rptr;
+ struct cpt_device_desc desc;
+
+ desc = *ldesc;
+ *ldesc = *rdesc;
+ *rdesc = desc;
+}
+
+int otx2_cpt_crypto_init(struct pci_dev *pdev, struct module *mod,
+ int num_queues, int num_devices)
+{
+ int ret = 0;
+ int count;
+
+ mutex_lock(&mutex);
+ count = atomic_read(&se_devices.count);
+ if (count >= OTX2_CPT_MAX_LFS_NUM) {
+ dev_err(&pdev->dev, "No space to add a new device\n");
+ ret = -ENOSPC;
+ goto unlock;
+ }
+ se_devices.desc[count].num_queues = num_queues;
+ se_devices.desc[count++].dev = pdev;
+ atomic_inc(&se_devices.count);
+
+ if (atomic_read(&se_devices.count) == num_devices &&
+ is_crypto_registered == false) {
+ if (cpt_register_algs()) {
+ dev_err(&pdev->dev,
+ "Error in registering crypto algorithms\n");
+ ret = -EINVAL;
+ goto unlock;
+ }
+ try_module_get(mod);
+ is_crypto_registered = true;
+ }
+ sort(se_devices.desc, count, sizeof(struct cpt_device_desc),
+ compare_func, swap_func);
+
+unlock:
+ mutex_unlock(&mutex);
+ return ret;
+}
+
+void otx2_cpt_crypto_exit(struct pci_dev *pdev, struct module *mod)
+{
+ struct cpt_device_table *dev_tbl;
+ bool dev_found = false;
+ int i, j, count;
+
+ mutex_lock(&mutex);
+
+ dev_tbl = &se_devices;
+ count = atomic_read(&dev_tbl->count);
+ for (i = 0; i < count; i++) {
+ if (pdev == dev_tbl->desc[i].dev) {
+ for (j = i; j < count-1; j++)
+ dev_tbl->desc[j] = dev_tbl->desc[j+1];
+ dev_found = true;
+ break;
+ }
+ }
+
+ if (!dev_found) {
+ dev_err(&pdev->dev, "%s device not found\n", __func__);
+ goto unlock;
+ }
+ if (atomic_dec_and_test(&se_devices.count)) {
+ cpt_unregister_algs();
+ module_put(mod);
+ is_crypto_registered = false;
+ }
+
+unlock:
+ mutex_unlock(&mutex);
+}
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.h b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.h
new file mode 100644
index 000000000000..f04184bd1744
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.h
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright (C) 2020 Marvell.
+ */
+
+#ifndef __OTX2_CPT_ALGS_H
+#define __OTX2_CPT_ALGS_H
+
+#include <crypto/hash.h>
+#include <crypto/skcipher.h>
+#include <crypto/aead.h>
+#include "otx2_cpt_common.h"
+
+#define OTX2_CPT_MAX_ENC_KEY_SIZE 32
+#define OTX2_CPT_MAX_HASH_KEY_SIZE 64
+#define OTX2_CPT_MAX_KEY_SIZE (OTX2_CPT_MAX_ENC_KEY_SIZE + \
+ OTX2_CPT_MAX_HASH_KEY_SIZE)
+enum otx2_cpt_request_type {
+ OTX2_CPT_ENC_DEC_REQ = 0x1,
+ OTX2_CPT_AEAD_ENC_DEC_REQ = 0x2,
+ OTX2_CPT_AEAD_ENC_DEC_NULL_REQ = 0x3,
+ OTX2_CPT_PASSTHROUGH_REQ = 0x4
+};
+
+enum otx2_cpt_major_opcodes {
+ OTX2_CPT_MAJOR_OP_MISC = 0x01,
+ OTX2_CPT_MAJOR_OP_FC = 0x33,
+ OTX2_CPT_MAJOR_OP_HMAC = 0x35,
+};
+
+enum otx2_cpt_cipher_type {
+ OTX2_CPT_CIPHER_NULL = 0x0,
+ OTX2_CPT_DES3_CBC = 0x1,
+ OTX2_CPT_DES3_ECB = 0x2,
+ OTX2_CPT_AES_CBC = 0x3,
+ OTX2_CPT_AES_ECB = 0x4,
+ OTX2_CPT_AES_CFB = 0x5,
+ OTX2_CPT_AES_CTR = 0x6,
+ OTX2_CPT_AES_GCM = 0x7,
+ OTX2_CPT_AES_XTS = 0x8
+};
+
+enum otx2_cpt_mac_type {
+ OTX2_CPT_MAC_NULL = 0x0,
+ OTX2_CPT_MD5 = 0x1,
+ OTX2_CPT_SHA1 = 0x2,
+ OTX2_CPT_SHA224 = 0x3,
+ OTX2_CPT_SHA256 = 0x4,
+ OTX2_CPT_SHA384 = 0x5,
+ OTX2_CPT_SHA512 = 0x6,
+ OTX2_CPT_GMAC = 0x7
+};
+
+enum otx2_cpt_aes_key_len {
+ OTX2_CPT_AES_128_BIT = 0x1,
+ OTX2_CPT_AES_192_BIT = 0x2,
+ OTX2_CPT_AES_256_BIT = 0x3
+};
+
+union otx2_cpt_encr_ctrl {
+ u64 u;
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 enc_cipher:4;
+ u64 reserved_59:1;
+ u64 aes_key:2;
+ u64 iv_source:1;
+ u64 mac_type:4;
+ u64 reserved_49_51:3;
+ u64 auth_input_type:1;
+ u64 mac_len:8;
+ u64 reserved_32_39:8;
+ u64 encr_offset:16;
+ u64 iv_offset:8;
+ u64 auth_offset:8;
+#else
+ u64 auth_offset:8;
+ u64 iv_offset:8;
+ u64 encr_offset:16;
+ u64 reserved_32_39:8;
+ u64 mac_len:8;
+ u64 auth_input_type:1;
+ u64 reserved_49_51:3;
+ u64 mac_type:4;
+ u64 iv_source:1;
+ u64 aes_key:2;
+ u64 reserved_59:1;
+ u64 enc_cipher:4;
+#endif
+ } e;
+};
+
+struct otx2_cpt_cipher {
+ const char *name;
+ u8 value;
+};
+
+struct otx2_cpt_fc_enc_ctx {
+ union otx2_cpt_encr_ctrl enc_ctrl;
+ u8 encr_key[32];
+ u8 encr_iv[16];
+};
+
+union otx2_cpt_fc_hmac_ctx {
+ struct {
+ u8 ipad[64];
+ u8 opad[64];
+ } e;
+ struct {
+ u8 hmac_calc[64]; /* HMAC calculated */
+ u8 hmac_recv[64]; /* HMAC received */
+ } s;
+};
+
+struct otx2_cpt_fc_ctx {
+ struct otx2_cpt_fc_enc_ctx enc;
+ union otx2_cpt_fc_hmac_ctx hmac;
+};
+
+struct otx2_cpt_enc_ctx {
+ u32 key_len;
+ u8 enc_key[OTX2_CPT_MAX_KEY_SIZE];
+ u8 cipher_type;
+ u8 key_type;
+ u8 enc_align_len;
+ struct crypto_skcipher *fbk_cipher;
+};
+
+union otx2_cpt_offset_ctrl {
+ u64 flags;
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved:32;
+ u64 enc_data_offset:16;
+ u64 iv_offset:8;
+ u64 auth_offset:8;
+#else
+ u64 auth_offset:8;
+ u64 iv_offset:8;
+ u64 enc_data_offset:16;
+ u64 reserved:32;
+#endif
+ } e;
+};
+
+struct otx2_cpt_req_ctx {
+ struct otx2_cpt_req_info cpt_req;
+ union otx2_cpt_offset_ctrl ctrl_word;
+ struct otx2_cpt_fc_ctx fctx;
+ union {
+ struct skcipher_request sk_fbk_req;
+ struct aead_request fbk_req;
+ };
+};
+
+struct otx2_cpt_sdesc {
+ struct shash_desc shash;
+};
+
+struct otx2_cpt_aead_ctx {
+ u8 key[OTX2_CPT_MAX_KEY_SIZE];
+ struct crypto_shash *hashalg;
+ struct otx2_cpt_sdesc *sdesc;
+ struct crypto_aead *fbk_cipher;
+ u8 *ipad;
+ u8 *opad;
+ u32 enc_key_len;
+ u32 auth_key_len;
+ u8 cipher_type;
+ u8 mac_type;
+ u8 key_type;
+ u8 is_trunc_hmac;
+ u8 enc_align_len;
+};
+int otx2_cpt_crypto_init(struct pci_dev *pdev, struct module *mod,
+ int num_queues, int num_devices);
+void otx2_cpt_crypto_exit(struct pci_dev *pdev, struct module *mod);
+
+#endif /* __OTX2_CPT_ALGS_H */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
new file mode 100644
index 000000000000..47f378731024
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
@@ -0,0 +1,410 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2020 Marvell. */
+
+#include "otx2_cpt_common.h"
+#include "otx2_cptvf.h"
+#include "otx2_cptlf.h"
+#include "otx2_cptvf_algs.h"
+#include <rvu_reg.h>
+
+#define OTX2_CPTVF_DRV_NAME "octeontx2-cptvf"
+
+static void cptvf_enable_pfvf_mbox_intrs(struct otx2_cptvf_dev *cptvf)
+{
+ /* Clear interrupt if any */
+ otx2_cpt_write64(cptvf->reg_base, BLKADDR_RVUM, 0, OTX2_RVU_VF_INT,
+ 0x1ULL);
+
+ /* Enable PF-VF interrupt */
+ otx2_cpt_write64(cptvf->reg_base, BLKADDR_RVUM, 0,
+ OTX2_RVU_VF_INT_ENA_W1S, 0x1ULL);
+}
+
+static void cptvf_disable_pfvf_mbox_intrs(struct otx2_cptvf_dev *cptvf)
+{
+ /* Disable PF-VF interrupt */
+ otx2_cpt_write64(cptvf->reg_base, BLKADDR_RVUM, 0,
+ OTX2_RVU_VF_INT_ENA_W1C, 0x1ULL);
+
+ /* Clear interrupt if any */
+ otx2_cpt_write64(cptvf->reg_base, BLKADDR_RVUM, 0, OTX2_RVU_VF_INT,
+ 0x1ULL);
+}
+
+static int cptvf_register_interrupts(struct otx2_cptvf_dev *cptvf)
+{
+ int ret, irq;
+ int num_vec;
+
+ num_vec = pci_msix_vec_count(cptvf->pdev);
+ if (num_vec <= 0)
+ return -EINVAL;
+
+ /* Enable MSI-X */
+ ret = pci_alloc_irq_vectors(cptvf->pdev, num_vec, num_vec,
+ PCI_IRQ_MSIX);
+ if (ret < 0) {
+ dev_err(&cptvf->pdev->dev,
+ "Request for %d msix vectors failed\n", num_vec);
+ return ret;
+ }
+ irq = pci_irq_vector(cptvf->pdev, OTX2_CPT_VF_INT_VEC_E_MBOX);
+ /* Register VF<=>PF mailbox interrupt handler */
+ ret = devm_request_irq(&cptvf->pdev->dev, irq,
+ otx2_cptvf_pfvf_mbox_intr, 0,
+ "CPTPFVF Mbox", cptvf);
+ if (ret)
+ return ret;
+ /* Enable PF-VF mailbox interrupts */
+ cptvf_enable_pfvf_mbox_intrs(cptvf);
+
+ ret = otx2_cpt_send_ready_msg(&cptvf->pfvf_mbox, cptvf->pdev);
+ if (ret) {
+ dev_warn(&cptvf->pdev->dev,
+ "PF not responding to mailbox, deferring probe\n");
+ cptvf_disable_pfvf_mbox_intrs(cptvf);
+ return -EPROBE_DEFER;
+ }
+ return 0;
+}
+
+static int cptvf_pfvf_mbox_init(struct otx2_cptvf_dev *cptvf)
+{
+ int ret;
+
+ cptvf->pfvf_mbox_wq = alloc_workqueue("cpt_pfvf_mailbox",
+ WQ_UNBOUND | WQ_HIGHPRI |
+ WQ_MEM_RECLAIM, 1);
+ if (!cptvf->pfvf_mbox_wq)
+ return -ENOMEM;
+
+ ret = otx2_mbox_init(&cptvf->pfvf_mbox, cptvf->pfvf_mbox_base,
+ cptvf->pdev, cptvf->reg_base, MBOX_DIR_VFPF, 1);
+ if (ret)
+ goto free_wqe;
+
+ INIT_WORK(&cptvf->pfvf_mbox_work, otx2_cptvf_pfvf_mbox_handler);
+ return 0;
+
+free_wqe:
+ destroy_workqueue(cptvf->pfvf_mbox_wq);
+ return ret;
+}
+
+static void cptvf_pfvf_mbox_destroy(struct otx2_cptvf_dev *cptvf)
+{
+ destroy_workqueue(cptvf->pfvf_mbox_wq);
+ otx2_mbox_destroy(&cptvf->pfvf_mbox);
+}
+
+static void cptlf_work_handler(unsigned long data)
+{
+ otx2_cpt_post_process((struct otx2_cptlf_wqe *) data);
+}
+
+static void cleanup_tasklet_work(struct otx2_cptlfs_info *lfs)
+{
+ int i;
+
+ for (i = 0; i < lfs->lfs_num; i++) {
+ if (!lfs->lf[i].wqe)
+ continue;
+
+ tasklet_kill(&lfs->lf[i].wqe->work);
+ kfree(lfs->lf[i].wqe);
+ lfs->lf[i].wqe = NULL;
+ }
+}
+
+static int init_tasklet_work(struct otx2_cptlfs_info *lfs)
+{
+ struct otx2_cptlf_wqe *wqe;
+ int i, ret = 0;
+
+ for (i = 0; i < lfs->lfs_num; i++) {
+ wqe = kzalloc(sizeof(struct otx2_cptlf_wqe), GFP_KERNEL);
+ if (!wqe) {
+ ret = -ENOMEM;
+ goto cleanup_tasklet;
+ }
+
+ tasklet_init(&wqe->work, cptlf_work_handler, (u64) wqe);
+ wqe->lfs = lfs;
+ wqe->lf_num = i;
+ lfs->lf[i].wqe = wqe;
+ }
+ return 0;
+
+cleanup_tasklet:
+ cleanup_tasklet_work(lfs);
+ return ret;
+}
+
+static void free_pending_queues(struct otx2_cptlfs_info *lfs)
+{
+ int i;
+
+ for (i = 0; i < lfs->lfs_num; i++) {
+ kfree(lfs->lf[i].pqueue.head);
+ lfs->lf[i].pqueue.head = NULL;
+ }
+}
+
+static int alloc_pending_queues(struct otx2_cptlfs_info *lfs)
+{
+ int size, ret, i;
+
+ if (!lfs->lfs_num)
+ return -EINVAL;
+
+ for (i = 0; i < lfs->lfs_num; i++) {
+ lfs->lf[i].pqueue.qlen = OTX2_CPT_INST_QLEN_MSGS;
+ size = lfs->lf[i].pqueue.qlen *
+ sizeof(struct otx2_cpt_pending_entry);
+
+ lfs->lf[i].pqueue.head = kzalloc(size, GFP_KERNEL);
+ if (!lfs->lf[i].pqueue.head) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ /* Initialize spin lock */
+ spin_lock_init(&lfs->lf[i].pqueue.lock);
+ }
+ return 0;
+
+error:
+ free_pending_queues(lfs);
+ return ret;
+}
+
+static void lf_sw_cleanup(struct otx2_cptlfs_info *lfs)
+{
+ cleanup_tasklet_work(lfs);
+ free_pending_queues(lfs);
+}
+
+static int lf_sw_init(struct otx2_cptlfs_info *lfs)
+{
+ int ret;
+
+ ret = alloc_pending_queues(lfs);
+ if (ret) {
+ dev_err(&lfs->pdev->dev,
+ "Allocating pending queues failed\n");
+ return ret;
+ }
+ ret = init_tasklet_work(lfs);
+ if (ret) {
+ dev_err(&lfs->pdev->dev,
+ "Tasklet work init failed\n");
+ goto pending_queues_free;
+ }
+ return 0;
+
+pending_queues_free:
+ free_pending_queues(lfs);
+ return ret;
+}
+
+static void cptvf_lf_shutdown(struct otx2_cptlfs_info *lfs)
+{
+ atomic_set(&lfs->state, OTX2_CPTLF_IN_RESET);
+
+ /* Remove interrupts affinity */
+ otx2_cptlf_free_irqs_affinity(lfs);
+ /* Disable instruction queue */
+ otx2_cptlf_disable_iqueues(lfs);
+ /* Unregister crypto algorithms */
+ otx2_cpt_crypto_exit(lfs->pdev, THIS_MODULE);
+ /* Unregister LFs interrupts */
+ otx2_cptlf_unregister_interrupts(lfs);
+ /* Cleanup LFs software side */
+ lf_sw_cleanup(lfs);
+ /* Send request to detach LFs */
+ otx2_cpt_detach_rsrcs_msg(lfs);
+}
+
+static int cptvf_lf_init(struct otx2_cptvf_dev *cptvf)
+{
+ struct otx2_cptlfs_info *lfs = &cptvf->lfs;
+ struct device *dev = &cptvf->pdev->dev;
+ int ret, lfs_num;
+ u8 eng_grp_msk;
+
+ /* Get engine group number for symmetric crypto */
+ cptvf->lfs.kcrypto_eng_grp_num = OTX2_CPT_INVALID_CRYPTO_ENG_GRP;
+ ret = otx2_cptvf_send_eng_grp_num_msg(cptvf, OTX2_CPT_SE_TYPES);
+ if (ret)
+ return ret;
+
+ if (cptvf->lfs.kcrypto_eng_grp_num == OTX2_CPT_INVALID_CRYPTO_ENG_GRP) {
+ dev_err(dev, "Engine group for kernel crypto not available\n");
+ ret = -ENOENT;
+ return ret;
+ }
+ eng_grp_msk = 1 << cptvf->lfs.kcrypto_eng_grp_num;
+
+ ret = otx2_cptvf_send_kvf_limits_msg(cptvf);
+ if (ret)
+ return ret;
+
+ lfs->reg_base = cptvf->reg_base;
+ lfs->pdev = cptvf->pdev;
+ lfs->mbox = &cptvf->pfvf_mbox;
+
+ lfs_num = cptvf->lfs.kvf_limits ? cptvf->lfs.kvf_limits :
+ num_online_cpus();
+ ret = otx2_cptlf_init(lfs, eng_grp_msk, OTX2_CPT_QUEUE_HI_PRIO,
+ lfs_num);
+ if (ret)
+ return ret;
+
+ /* Get msix offsets for attached LFs */
+ ret = otx2_cpt_msix_offset_msg(lfs);
+ if (ret)
+ goto cleanup_lf;
+
+ /* Initialize LFs software side */
+ ret = lf_sw_init(lfs);
+ if (ret)
+ goto cleanup_lf;
+
+ /* Register LFs interrupts */
+ ret = otx2_cptlf_register_interrupts(lfs);
+ if (ret)
+ goto cleanup_lf_sw;
+
+ /* Set interrupts affinity */
+ ret = otx2_cptlf_set_irqs_affinity(lfs);
+ if (ret)
+ goto unregister_intr;
+
+ atomic_set(&lfs->state, OTX2_CPTLF_STARTED);
+ /* Register crypto algorithms */
+ ret = otx2_cpt_crypto_init(lfs->pdev, THIS_MODULE, lfs_num, 1);
+ if (ret) {
+ dev_err(&lfs->pdev->dev, "algorithms registration failed\n");
+ goto disable_irqs;
+ }
+ return 0;
+
+disable_irqs:
+ otx2_cptlf_free_irqs_affinity(lfs);
+unregister_intr:
+ otx2_cptlf_unregister_interrupts(lfs);
+cleanup_lf_sw:
+ lf_sw_cleanup(lfs);
+cleanup_lf:
+ otx2_cptlf_shutdown(lfs);
+
+ return ret;
+}
+
+static int otx2_cptvf_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct device *dev = &pdev->dev;
+ resource_size_t offset, size;
+ struct otx2_cptvf_dev *cptvf;
+ int ret;
+
+ cptvf = devm_kzalloc(dev, sizeof(*cptvf), GFP_KERNEL);
+ if (!cptvf)
+ return -ENOMEM;
+
+ ret = pcim_enable_device(pdev);
+ if (ret) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ goto clear_drvdata;
+ }
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+ if (ret) {
+ dev_err(dev, "Unable to get usable DMA configuration\n");
+ goto clear_drvdata;
+ }
+ /* Map VF's configuration registers */
+ ret = pcim_iomap_regions_request_all(pdev, 1 << PCI_PF_REG_BAR_NUM,
+ OTX2_CPTVF_DRV_NAME);
+ if (ret) {
+ dev_err(dev, "Couldn't get PCI resources 0x%x\n", ret);
+ goto clear_drvdata;
+ }
+ pci_set_master(pdev);
+ pci_set_drvdata(pdev, cptvf);
+ cptvf->pdev = pdev;
+
+ cptvf->reg_base = pcim_iomap_table(pdev)[PCI_PF_REG_BAR_NUM];
+
+ offset = pci_resource_start(pdev, PCI_MBOX_BAR_NUM);
+ size = pci_resource_len(pdev, PCI_MBOX_BAR_NUM);
+ /* Map PF-VF mailbox memory */
+ cptvf->pfvf_mbox_base = devm_ioremap_wc(dev, offset, size);
+ if (!cptvf->pfvf_mbox_base) {
+ dev_err(&pdev->dev, "Unable to map BAR4\n");
+ ret = -ENODEV;
+ goto clear_drvdata;
+ }
+ /* Initialize PF<=>VF mailbox */
+ ret = cptvf_pfvf_mbox_init(cptvf);
+ if (ret)
+ goto clear_drvdata;
+
+ /* Register interrupts */
+ ret = cptvf_register_interrupts(cptvf);
+ if (ret)
+ goto destroy_pfvf_mbox;
+
+ /* Initialize CPT LFs */
+ ret = cptvf_lf_init(cptvf);
+ if (ret)
+ goto unregister_interrupts;
+
+ return 0;
+
+unregister_interrupts:
+ cptvf_disable_pfvf_mbox_intrs(cptvf);
+destroy_pfvf_mbox:
+ cptvf_pfvf_mbox_destroy(cptvf);
+clear_drvdata:
+ pci_set_drvdata(pdev, NULL);
+
+ return ret;
+}
+
+static void otx2_cptvf_remove(struct pci_dev *pdev)
+{
+ struct otx2_cptvf_dev *cptvf = pci_get_drvdata(pdev);
+
+ if (!cptvf) {
+ dev_err(&pdev->dev, "Invalid CPT VF device.\n");
+ return;
+ }
+ cptvf_lf_shutdown(&cptvf->lfs);
+ /* Disable PF-VF mailbox interrupt */
+ cptvf_disable_pfvf_mbox_intrs(cptvf);
+ /* Destroy PF-VF mbox */
+ cptvf_pfvf_mbox_destroy(cptvf);
+ pci_set_drvdata(pdev, NULL);
+}
+
+/* Supported devices */
+static const struct pci_device_id otx2_cptvf_id_table[] = {
+ {PCI_VDEVICE(CAVIUM, OTX2_CPT_PCI_VF_DEVICE_ID), 0},
+ { 0, } /* end of table */
+};
+
+static struct pci_driver otx2_cptvf_pci_driver = {
+ .name = OTX2_CPTVF_DRV_NAME,
+ .id_table = otx2_cptvf_id_table,
+ .probe = otx2_cptvf_probe,
+ .remove = otx2_cptvf_remove,
+};
+
+module_pci_driver(otx2_cptvf_pci_driver);
+
+MODULE_AUTHOR("Marvell");
+MODULE_DESCRIPTION("Marvell OcteonTX2 CPT Virtual Function Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(pci, otx2_cptvf_id_table);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c
new file mode 100644
index 000000000000..5d73b711cba6
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c
@@ -0,0 +1,167 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2020 Marvell. */
+
+#include "otx2_cpt_common.h"
+#include "otx2_cptvf.h"
+#include <rvu_reg.h>
+
+irqreturn_t otx2_cptvf_pfvf_mbox_intr(int __always_unused irq, void *arg)
+{
+ struct otx2_cptvf_dev *cptvf = arg;
+ u64 intr;
+
+ /* Read the interrupt bits */
+ intr = otx2_cpt_read64(cptvf->reg_base, BLKADDR_RVUM, 0,
+ OTX2_RVU_VF_INT);
+
+ if (intr & 0x1ULL) {
+ /* Schedule work queue function to process the MBOX request */
+ queue_work(cptvf->pfvf_mbox_wq, &cptvf->pfvf_mbox_work);
+ /* Clear and ack the interrupt */
+ otx2_cpt_write64(cptvf->reg_base, BLKADDR_RVUM, 0,
+ OTX2_RVU_VF_INT, 0x1ULL);
+ }
+ return IRQ_HANDLED;
+}
+
+static void process_pfvf_mbox_mbox_msg(struct otx2_cptvf_dev *cptvf,
+ struct mbox_msghdr *msg)
+{
+ struct otx2_cptlfs_info *lfs = &cptvf->lfs;
+ struct otx2_cpt_kvf_limits_rsp *rsp_limits;
+ struct otx2_cpt_egrp_num_rsp *rsp_grp;
+ struct cpt_rd_wr_reg_msg *rsp_reg;
+ struct msix_offset_rsp *rsp_msix;
+ int i;
+
+ if (msg->id >= MBOX_MSG_MAX) {
+ dev_err(&cptvf->pdev->dev,
+ "MBOX msg with unknown ID %d\n", msg->id);
+ return;
+ }
+ if (msg->sig != OTX2_MBOX_RSP_SIG) {
+ dev_err(&cptvf->pdev->dev,
+ "MBOX msg with wrong signature %x, ID %d\n",
+ msg->sig, msg->id);
+ return;
+ }
+ switch (msg->id) {
+ case MBOX_MSG_READY:
+ cptvf->vf_id = ((msg->pcifunc >> RVU_PFVF_FUNC_SHIFT)
+ & RVU_PFVF_FUNC_MASK) - 1;
+ break;
+ case MBOX_MSG_ATTACH_RESOURCES:
+ /* Check if resources were successfully attached */
+ if (!msg->rc)
+ lfs->are_lfs_attached = 1;
+ break;
+ case MBOX_MSG_DETACH_RESOURCES:
+ /* Check if resources were successfully detached */
+ if (!msg->rc)
+ lfs->are_lfs_attached = 0;
+ break;
+ case MBOX_MSG_MSIX_OFFSET:
+ rsp_msix = (struct msix_offset_rsp *) msg;
+ for (i = 0; i < rsp_msix->cptlfs; i++)
+ lfs->lf[i].msix_offset = rsp_msix->cptlf_msixoff[i];
+ break;
+ case MBOX_MSG_CPT_RD_WR_REGISTER:
+ rsp_reg = (struct cpt_rd_wr_reg_msg *) msg;
+ if (msg->rc) {
+ dev_err(&cptvf->pdev->dev,
+ "Reg %llx rd/wr(%d) failed %d\n",
+ rsp_reg->reg_offset, rsp_reg->is_write,
+ msg->rc);
+ return;
+ }
+ if (!rsp_reg->is_write)
+ *rsp_reg->ret_val = rsp_reg->val;
+ break;
+ case MBOX_MSG_GET_ENG_GRP_NUM:
+ rsp_grp = (struct otx2_cpt_egrp_num_rsp *) msg;
+ cptvf->lfs.kcrypto_eng_grp_num = rsp_grp->eng_grp_num;
+ break;
+ case MBOX_MSG_GET_KVF_LIMITS:
+ rsp_limits = (struct otx2_cpt_kvf_limits_rsp *) msg;
+ cptvf->lfs.kvf_limits = rsp_limits->kvf_limits;
+ break;
+ default:
+ dev_err(&cptvf->pdev->dev, "Unsupported msg %d received.\n",
+ msg->id);
+ break;
+ }
+}
+
+void otx2_cptvf_pfvf_mbox_handler(struct work_struct *work)
+{
+ struct otx2_cptvf_dev *cptvf;
+ struct otx2_mbox *pfvf_mbox;
+ struct otx2_mbox_dev *mdev;
+ struct mbox_hdr *rsp_hdr;
+ struct mbox_msghdr *msg;
+ int offset, i;
+
+ /* sync with mbox memory region */
+ smp_rmb();
+
+ cptvf = container_of(work, struct otx2_cptvf_dev, pfvf_mbox_work);
+ pfvf_mbox = &cptvf->pfvf_mbox;
+ mdev = &pfvf_mbox->dev[0];
+ rsp_hdr = (struct mbox_hdr *)(mdev->mbase + pfvf_mbox->rx_start);
+ if (rsp_hdr->num_msgs == 0)
+ return;
+ offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
+
+ for (i = 0; i < rsp_hdr->num_msgs; i++) {
+ msg = (struct mbox_msghdr *)(mdev->mbase + pfvf_mbox->rx_start +
+ offset);
+ process_pfvf_mbox_mbox_msg(cptvf, msg);
+ offset = msg->next_msgoff;
+ mdev->msgs_acked++;
+ }
+ otx2_mbox_reset(pfvf_mbox, 0);
+}
+
+int otx2_cptvf_send_eng_grp_num_msg(struct otx2_cptvf_dev *cptvf, int eng_type)
+{
+ struct otx2_mbox *mbox = &cptvf->pfvf_mbox;
+ struct pci_dev *pdev = cptvf->pdev;
+ struct otx2_cpt_egrp_num_msg *req;
+
+ req = (struct otx2_cpt_egrp_num_msg *)
+ otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
+ sizeof(struct otx2_cpt_egrp_num_rsp));
+ if (req == NULL) {
+ dev_err(&pdev->dev, "RVU MBOX failed to get message.\n");
+ return -EFAULT;
+ }
+ req->hdr.id = MBOX_MSG_GET_ENG_GRP_NUM;
+ req->hdr.sig = OTX2_MBOX_REQ_SIG;
+ req->hdr.pcifunc = OTX2_CPT_RVU_PFFUNC(cptvf->vf_id, 0);
+ req->eng_type = eng_type;
+
+ return otx2_cpt_send_mbox_msg(mbox, pdev);
+}
+
+int otx2_cptvf_send_kvf_limits_msg(struct otx2_cptvf_dev *cptvf)
+{
+ struct otx2_mbox *mbox = &cptvf->pfvf_mbox;
+ struct pci_dev *pdev = cptvf->pdev;
+ struct mbox_msghdr *req;
+ int ret;
+
+ req = (struct mbox_msghdr *)
+ otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
+ sizeof(struct otx2_cpt_kvf_limits_rsp));
+ if (req == NULL) {
+ dev_err(&pdev->dev, "RVU MBOX failed to get message.\n");
+ return -EFAULT;
+ }
+ req->id = MBOX_MSG_GET_KVF_LIMITS;
+ req->sig = OTX2_MBOX_REQ_SIG;
+ req->pcifunc = OTX2_CPT_RVU_PFFUNC(cptvf->vf_id, 0);
+
+ ret = otx2_cpt_send_mbox_msg(mbox, pdev);
+
+ return ret;
+}
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c
new file mode 100644
index 000000000000..d5c1c1b7c7e4
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c
@@ -0,0 +1,541 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2020 Marvell. */
+
+#include "otx2_cptvf.h"
+#include "otx2_cpt_common.h"
+
+/* SG list header size in bytes */
+#define SG_LIST_HDR_SIZE 8
+
+/* Default timeout when waiting for free pending entry in us */
+#define CPT_PENTRY_TIMEOUT 1000
+#define CPT_PENTRY_STEP 50
+
+/* Default threshold for stopping and resuming sender requests */
+#define CPT_IQ_STOP_MARGIN 128
+#define CPT_IQ_RESUME_MARGIN 512
+
+/* Default command timeout in seconds */
+#define CPT_COMMAND_TIMEOUT 4
+#define CPT_TIME_IN_RESET_COUNT 5
+
+static void otx2_cpt_dump_sg_list(struct pci_dev *pdev,
+ struct otx2_cpt_req_info *req)
+{
+ int i;
+
+ pr_debug("Gather list size %d\n", req->in_cnt);
+ for (i = 0; i < req->in_cnt; i++) {
+ pr_debug("Buffer %d size %d, vptr 0x%p, dmaptr 0x%p\n", i,
+ req->in[i].size, req->in[i].vptr,
+ (void *) req->in[i].dma_addr);
+ pr_debug("Buffer hexdump (%d bytes)\n",
+ req->in[i].size);
+ print_hex_dump_debug("", DUMP_PREFIX_NONE, 16, 1,
+ req->in[i].vptr, req->in[i].size, false);
+ }
+ pr_debug("Scatter list size %d\n", req->out_cnt);
+ for (i = 0; i < req->out_cnt; i++) {
+ pr_debug("Buffer %d size %d, vptr 0x%p, dmaptr 0x%p\n", i,
+ req->out[i].size, req->out[i].vptr,
+ (void *) req->out[i].dma_addr);
+ pr_debug("Buffer hexdump (%d bytes)\n", req->out[i].size);
+ print_hex_dump_debug("", DUMP_PREFIX_NONE, 16, 1,
+ req->out[i].vptr, req->out[i].size, false);
+ }
+}
+
+static inline struct otx2_cpt_pending_entry *get_free_pending_entry(
+ struct otx2_cpt_pending_queue *q,
+ int qlen)
+{
+ struct otx2_cpt_pending_entry *ent = NULL;
+
+ ent = &q->head[q->rear];
+ if (unlikely(ent->busy))
+ return NULL;
+
+ q->rear++;
+ if (unlikely(q->rear == qlen))
+ q->rear = 0;
+
+ return ent;
+}
+
+static inline u32 modulo_inc(u32 index, u32 length, u32 inc)
+{
+ if (WARN_ON(inc > length))
+ inc = length;
+
+ index += inc;
+ if (unlikely(index >= length))
+ index -= length;
+
+ return index;
+}
+
+static inline void free_pentry(struct otx2_cpt_pending_entry *pentry)
+{
+ pentry->completion_addr = NULL;
+ pentry->info = NULL;
+ pentry->callback = NULL;
+ pentry->areq = NULL;
+ pentry->resume_sender = false;
+ pentry->busy = false;
+}
+
+static inline int setup_sgio_components(struct pci_dev *pdev,
+ struct otx2_cpt_buf_ptr *list,
+ int buf_count, u8 *buffer)
+{
+ struct otx2_cpt_sglist_component *sg_ptr = NULL;
+ int ret = 0, i, j;
+ int components;
+
+ if (unlikely(!list)) {
+ dev_err(&pdev->dev, "Input list pointer is NULL\n");
+ return -EFAULT;
+ }
+
+ for (i = 0; i < buf_count; i++) {
+ if (unlikely(!list[i].vptr))
+ continue;
+ list[i].dma_addr = dma_map_single(&pdev->dev, list[i].vptr,
+ list[i].size,
+ DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(&pdev->dev, list[i].dma_addr))) {
+ dev_err(&pdev->dev, "Dma mapping failed\n");
+ ret = -EIO;
+ goto sg_cleanup;
+ }
+ }
+ components = buf_count / 4;
+ sg_ptr = (struct otx2_cpt_sglist_component *)buffer;
+ for (i = 0; i < components; i++) {
+ sg_ptr->len0 = cpu_to_be16(list[i * 4 + 0].size);
+ sg_ptr->len1 = cpu_to_be16(list[i * 4 + 1].size);
+ sg_ptr->len2 = cpu_to_be16(list[i * 4 + 2].size);
+ sg_ptr->len3 = cpu_to_be16(list[i * 4 + 3].size);
+ sg_ptr->ptr0 = cpu_to_be64(list[i * 4 + 0].dma_addr);
+ sg_ptr->ptr1 = cpu_to_be64(list[i * 4 + 1].dma_addr);
+ sg_ptr->ptr2 = cpu_to_be64(list[i * 4 + 2].dma_addr);
+ sg_ptr->ptr3 = cpu_to_be64(list[i * 4 + 3].dma_addr);
+ sg_ptr++;
+ }
+ components = buf_count % 4;
+
+ switch (components) {
+ case 3:
+ sg_ptr->len2 = cpu_to_be16(list[i * 4 + 2].size);
+ sg_ptr->ptr2 = cpu_to_be64(list[i * 4 + 2].dma_addr);
+ fallthrough;
+ case 2:
+ sg_ptr->len1 = cpu_to_be16(list[i * 4 + 1].size);
+ sg_ptr->ptr1 = cpu_to_be64(list[i * 4 + 1].dma_addr);
+ fallthrough;
+ case 1:
+ sg_ptr->len0 = cpu_to_be16(list[i * 4 + 0].size);
+ sg_ptr->ptr0 = cpu_to_be64(list[i * 4 + 0].dma_addr);
+ break;
+ default:
+ break;
+ }
+ return ret;
+
+sg_cleanup:
+ for (j = 0; j < i; j++) {
+ if (list[j].dma_addr) {
+ dma_unmap_single(&pdev->dev, list[j].dma_addr,
+ list[j].size, DMA_BIDIRECTIONAL);
+ }
+
+ list[j].dma_addr = 0;
+ }
+ return ret;
+}
+
+static inline struct otx2_cpt_inst_info *info_create(struct pci_dev *pdev,
+ struct otx2_cpt_req_info *req,
+ gfp_t gfp)
+{
+ int align = OTX2_CPT_DMA_MINALIGN;
+ struct otx2_cpt_inst_info *info;
+ u32 dlen, align_dlen, info_len;
+ u16 g_sz_bytes, s_sz_bytes;
+ u32 total_mem_len;
+
+ if (unlikely(req->in_cnt > OTX2_CPT_MAX_SG_IN_CNT ||
+ req->out_cnt > OTX2_CPT_MAX_SG_OUT_CNT)) {
+ dev_err(&pdev->dev, "Error too many sg components\n");
+ return NULL;
+ }
+
+ g_sz_bytes = ((req->in_cnt + 3) / 4) *
+ sizeof(struct otx2_cpt_sglist_component);
+ s_sz_bytes = ((req->out_cnt + 3) / 4) *
+ sizeof(struct otx2_cpt_sglist_component);
+
+ dlen = g_sz_bytes + s_sz_bytes + SG_LIST_HDR_SIZE;
+ align_dlen = ALIGN(dlen, align);
+ info_len = ALIGN(sizeof(*info), align);
+ total_mem_len = align_dlen + info_len + sizeof(union otx2_cpt_res_s);
+
+ info = kzalloc(total_mem_len, gfp);
+ if (unlikely(!info))
+ return NULL;
+
+ info->dlen = dlen;
+ info->in_buffer = (u8 *)info + info_len;
+
+ ((u16 *)info->in_buffer)[0] = req->out_cnt;
+ ((u16 *)info->in_buffer)[1] = req->in_cnt;
+ ((u16 *)info->in_buffer)[2] = 0;
+ ((u16 *)info->in_buffer)[3] = 0;
+ cpu_to_be64s((u64 *)info->in_buffer);
+
+ /* Setup gather (input) components */
+ if (setup_sgio_components(pdev, req->in, req->in_cnt,
+ &info->in_buffer[8])) {
+ dev_err(&pdev->dev, "Failed to setup gather list\n");
+ goto destroy_info;
+ }
+
+ if (setup_sgio_components(pdev, req->out, req->out_cnt,
+ &info->in_buffer[8 + g_sz_bytes])) {
+ dev_err(&pdev->dev, "Failed to setup scatter list\n");
+ goto destroy_info;
+ }
+
+ info->dma_len = total_mem_len - info_len;
+ info->dptr_baddr = dma_map_single(&pdev->dev, info->in_buffer,
+ info->dma_len, DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(&pdev->dev, info->dptr_baddr))) {
+ dev_err(&pdev->dev, "DMA Mapping failed for cpt req\n");
+ goto destroy_info;
+ }
+ /*
+ * Get buffer for union otx2_cpt_res_s response
+ * structure and its physical address
+ */
+ info->completion_addr = info->in_buffer + align_dlen;
+ info->comp_baddr = info->dptr_baddr + align_dlen;
+
+ return info;
+
+destroy_info:
+ otx2_cpt_info_destroy(pdev, info);
+ return NULL;
+}
+
+static int process_request(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
+ struct otx2_cpt_pending_queue *pqueue,
+ struct otx2_cptlf_info *lf)
+{
+ struct otx2_cptvf_request *cpt_req = &req->req;
+ struct otx2_cpt_pending_entry *pentry = NULL;
+ union otx2_cpt_ctrl_info *ctrl = &req->ctrl;
+ struct otx2_cpt_inst_info *info = NULL;
+ union otx2_cpt_res_s *result = NULL;
+ struct otx2_cpt_iq_command iq_cmd;
+ union otx2_cpt_inst_s cptinst;
+ int retry, ret = 0;
+ u8 resume_sender;
+ gfp_t gfp;
+
+ gfp = (req->areq->flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL :
+ GFP_ATOMIC;
+ if (unlikely(!otx2_cptlf_started(lf->lfs)))
+ return -ENODEV;
+
+ info = info_create(pdev, req, gfp);
+ if (unlikely(!info)) {
+ dev_err(&pdev->dev, "Setting up cpt inst info failed");
+ return -ENOMEM;
+ }
+ cpt_req->dlen = info->dlen;
+
+ result = info->completion_addr;
+ result->s.compcode = OTX2_CPT_COMPLETION_CODE_INIT;
+
+ spin_lock_bh(&pqueue->lock);
+ pentry = get_free_pending_entry(pqueue, pqueue->qlen);
+ retry = CPT_PENTRY_TIMEOUT / CPT_PENTRY_STEP;
+ while (unlikely(!pentry) && retry--) {
+ spin_unlock_bh(&pqueue->lock);
+ udelay(CPT_PENTRY_STEP);
+ spin_lock_bh(&pqueue->lock);
+ pentry = get_free_pending_entry(pqueue, pqueue->qlen);
+ }
+
+ if (unlikely(!pentry)) {
+ ret = -ENOSPC;
+ goto destroy_info;
+ }
+
+ /*
+ * Check if we are close to filling in entire pending queue,
+ * if so then tell the sender to stop/sleep by returning -EBUSY
+ * We do it only for context which can sleep (GFP_KERNEL)
+ */
+ if (gfp == GFP_KERNEL &&
+ pqueue->pending_count > (pqueue->qlen - CPT_IQ_STOP_MARGIN)) {
+ pentry->resume_sender = true;
+ } else
+ pentry->resume_sender = false;
+ resume_sender = pentry->resume_sender;
+ pqueue->pending_count++;
+
+ pentry->completion_addr = info->completion_addr;
+ pentry->info = info;
+ pentry->callback = req->callback;
+ pentry->areq = req->areq;
+ pentry->busy = true;
+ info->pentry = pentry;
+ info->time_in = jiffies;
+ info->req = req;
+
+ /* Fill in the command */
+ iq_cmd.cmd.u = 0;
+ iq_cmd.cmd.s.opcode = cpu_to_be16(cpt_req->opcode.flags);
+ iq_cmd.cmd.s.param1 = cpu_to_be16(cpt_req->param1);
+ iq_cmd.cmd.s.param2 = cpu_to_be16(cpt_req->param2);
+ iq_cmd.cmd.s.dlen = cpu_to_be16(cpt_req->dlen);
+
+ /* 64-bit swap for microcode data reads, not needed for addresses*/
+ cpu_to_be64s(&iq_cmd.cmd.u);
+ iq_cmd.dptr = info->dptr_baddr;
+ iq_cmd.rptr = 0;
+ iq_cmd.cptr.u = 0;
+ iq_cmd.cptr.s.grp = ctrl->s.grp;
+
+ /* Fill in the CPT_INST_S type command for HW interpretation */
+ otx2_cpt_fill_inst(&cptinst, &iq_cmd, info->comp_baddr);
+
+ /* Print debug info if enabled */
+ otx2_cpt_dump_sg_list(pdev, req);
+ pr_debug("Cpt_inst_s hexdump (%d bytes)\n", OTX2_CPT_INST_SIZE);
+ print_hex_dump_debug("", 0, 16, 1, &cptinst, OTX2_CPT_INST_SIZE, false);
+ pr_debug("Dptr hexdump (%d bytes)\n", cpt_req->dlen);
+ print_hex_dump_debug("", 0, 16, 1, info->in_buffer,
+ cpt_req->dlen, false);
+
+ /* Send CPT command */
+ otx2_cpt_send_cmd(&cptinst, 1, lf);
+
+ /*
+ * We allocate and prepare pending queue entry in critical section
+ * together with submitting CPT instruction to CPT instruction queue
+ * to make sure that order of CPT requests is the same in both
+ * pending and instruction queues
+ */
+ spin_unlock_bh(&pqueue->lock);
+
+ ret = resume_sender ? -EBUSY : -EINPROGRESS;
+ return ret;
+
+destroy_info:
+ spin_unlock_bh(&pqueue->lock);
+ otx2_cpt_info_destroy(pdev, info);
+ return ret;
+}
+
+int otx2_cpt_do_request(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
+ int cpu_num)
+{
+ struct otx2_cptvf_dev *cptvf = pci_get_drvdata(pdev);
+ struct otx2_cptlfs_info *lfs = &cptvf->lfs;
+
+ return process_request(lfs->pdev, req, &lfs->lf[cpu_num].pqueue,
+ &lfs->lf[cpu_num]);
+}
+
+static int cpt_process_ccode(struct pci_dev *pdev,
+ union otx2_cpt_res_s *cpt_status,
+ struct otx2_cpt_inst_info *info,
+ u32 *res_code)
+{
+ u8 uc_ccode = cpt_status->s.uc_compcode;
+ u8 ccode = cpt_status->s.compcode;
+
+ switch (ccode) {
+ case OTX2_CPT_COMP_E_FAULT:
+ dev_err(&pdev->dev,
+ "Request failed with DMA fault\n");
+ otx2_cpt_dump_sg_list(pdev, info->req);
+ break;
+
+ case OTX2_CPT_COMP_E_HWERR:
+ dev_err(&pdev->dev,
+ "Request failed with hardware error\n");
+ otx2_cpt_dump_sg_list(pdev, info->req);
+ break;
+
+ case OTX2_CPT_COMP_E_INSTERR:
+ dev_err(&pdev->dev,
+ "Request failed with instruction error\n");
+ otx2_cpt_dump_sg_list(pdev, info->req);
+ break;
+
+ case OTX2_CPT_COMP_E_NOTDONE:
+ /* check for timeout */
+ if (time_after_eq(jiffies, info->time_in +
+ CPT_COMMAND_TIMEOUT * HZ))
+ dev_warn(&pdev->dev,
+ "Request timed out 0x%p", info->req);
+ else if (info->extra_time < CPT_TIME_IN_RESET_COUNT) {
+ info->time_in = jiffies;
+ info->extra_time++;
+ }
+ return 1;
+
+ case OTX2_CPT_COMP_E_GOOD:
+ /*
+ * Check microcode completion code, it is only valid
+ * when completion code is CPT_COMP_E::GOOD
+ */
+ if (uc_ccode != OTX2_CPT_UCC_SUCCESS) {
+ /*
+ * If requested hmac is truncated and ucode returns
+ * s/g write length error then we report success
+ * because ucode writes as many bytes of calculated
+ * hmac as available in gather buffer and reports
+ * s/g write length error if number of bytes in gather
+ * buffer is less than full hmac size.
+ */
+ if (info->req->is_trunc_hmac &&
+ uc_ccode == OTX2_CPT_UCC_SG_WRITE_LENGTH) {
+ *res_code = 0;
+ break;
+ }
+
+ dev_err(&pdev->dev,
+ "Request failed with software error code 0x%x\n",
+ cpt_status->s.uc_compcode);
+ otx2_cpt_dump_sg_list(pdev, info->req);
+ break;
+ }
+ /* Request has been processed with success */
+ *res_code = 0;
+ break;
+
+ default:
+ dev_err(&pdev->dev,
+ "Request returned invalid status %d\n", ccode);
+ break;
+ }
+ return 0;
+}
+
+static inline void process_pending_queue(struct pci_dev *pdev,
+ struct otx2_cpt_pending_queue *pqueue)
+{
+ struct otx2_cpt_pending_entry *resume_pentry = NULL;
+ void (*callback)(int status, void *arg, void *req);
+ struct otx2_cpt_pending_entry *pentry = NULL;
+ union otx2_cpt_res_s *cpt_status = NULL;
+ struct otx2_cpt_inst_info *info = NULL;
+ struct otx2_cpt_req_info *req = NULL;
+ struct crypto_async_request *areq;
+ u32 res_code, resume_index;
+
+ while (1) {
+ spin_lock_bh(&pqueue->lock);
+ pentry = &pqueue->head[pqueue->front];
+
+ if (WARN_ON(!pentry)) {
+ spin_unlock_bh(&pqueue->lock);
+ break;
+ }
+
+ res_code = -EINVAL;
+ if (unlikely(!pentry->busy)) {
+ spin_unlock_bh(&pqueue->lock);
+ break;
+ }
+
+ if (unlikely(!pentry->callback)) {
+ dev_err(&pdev->dev, "Callback NULL\n");
+ goto process_pentry;
+ }
+
+ info = pentry->info;
+ if (unlikely(!info)) {
+ dev_err(&pdev->dev, "Pending entry post arg NULL\n");
+ goto process_pentry;
+ }
+
+ req = info->req;
+ if (unlikely(!req)) {
+ dev_err(&pdev->dev, "Request NULL\n");
+ goto process_pentry;
+ }
+
+ cpt_status = pentry->completion_addr;
+ if (unlikely(!cpt_status)) {
+ dev_err(&pdev->dev, "Completion address NULL\n");
+ goto process_pentry;
+ }
+
+ if (cpt_process_ccode(pdev, cpt_status, info, &res_code)) {
+ spin_unlock_bh(&pqueue->lock);
+ return;
+ }
+ info->pdev = pdev;
+
+process_pentry:
+ /*
+ * Check if we should inform sending side to resume
+ * We do it CPT_IQ_RESUME_MARGIN elements in advance before
+ * pending queue becomes empty
+ */
+ resume_index = modulo_inc(pqueue->front, pqueue->qlen,
+ CPT_IQ_RESUME_MARGIN);
+ resume_pentry = &pqueue->head[resume_index];
+ if (resume_pentry &&
+ resume_pentry->resume_sender) {
+ resume_pentry->resume_sender = false;
+ callback = resume_pentry->callback;
+ areq = resume_pentry->areq;
+
+ if (callback) {
+ spin_unlock_bh(&pqueue->lock);
+
+ /*
+ * EINPROGRESS is an indication for sending
+ * side that it can resume sending requests
+ */
+ callback(-EINPROGRESS, areq, info);
+ spin_lock_bh(&pqueue->lock);
+ }
+ }
+
+ callback = pentry->callback;
+ areq = pentry->areq;
+ free_pentry(pentry);
+
+ pqueue->pending_count--;
+ pqueue->front = modulo_inc(pqueue->front, pqueue->qlen, 1);
+ spin_unlock_bh(&pqueue->lock);
+
+ /*
+ * Call callback after current pending entry has been
+ * processed, we don't do it if the callback pointer is
+ * invalid.
+ */
+ if (callback)
+ callback(res_code, areq, info);
+ }
+}
+
+void otx2_cpt_post_process(struct otx2_cptlf_wqe *wqe)
+{
+ process_pending_queue(wqe->lfs->pdev,
+ &wqe->lfs->lf[wqe->lf_num].pqueue);
+}
+
+int otx2_cpt_get_kcrypto_eng_grp_num(struct pci_dev *pdev)
+{
+ struct otx2_cptvf_dev *cptvf = pci_get_drvdata(pdev);
+
+ return cptvf->lfs.kcrypto_eng_grp_num;
+}
diff --git a/drivers/crypto/mediatek/Makefile b/drivers/crypto/mediatek/Makefile
deleted file mode 100644
index 196a4653974e..000000000000
--- a/drivers/crypto/mediatek/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_CRYPTO_DEV_MEDIATEK) += mtk-crypto.o
-mtk-crypto-objs:= mtk-platform.o mtk-aes.o mtk-sha.o
diff --git a/drivers/crypto/mediatek/mtk-aes.c b/drivers/crypto/mediatek/mtk-aes.c
deleted file mode 100644
index 7323066724c3..000000000000
--- a/drivers/crypto/mediatek/mtk-aes.c
+++ /dev/null
@@ -1,1271 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Cryptographic API.
- *
- * Driver for EIP97 AES acceleration.
- *
- * Copyright (c) 2016 Ryder Lee <ryder.lee@mediatek.com>
- *
- * Some ideas are from atmel-aes.c drivers.
- */
-
-#include <crypto/aes.h>
-#include <crypto/gcm.h>
-#include <crypto/internal/skcipher.h>
-#include "mtk-platform.h"
-
-#define AES_QUEUE_SIZE 512
-#define AES_BUF_ORDER 2
-#define AES_BUF_SIZE ((PAGE_SIZE << AES_BUF_ORDER) \
- & ~(AES_BLOCK_SIZE - 1))
-#define AES_MAX_STATE_BUF_SIZE SIZE_IN_WORDS(AES_KEYSIZE_256 + \
- AES_BLOCK_SIZE * 2)
-#define AES_MAX_CT_SIZE 6
-
-#define AES_CT_CTRL_HDR cpu_to_le32(0x00220000)
-
-/* AES-CBC/ECB/CTR/OFB/CFB command token */
-#define AES_CMD0 cpu_to_le32(0x05000000)
-#define AES_CMD1 cpu_to_le32(0x2d060000)
-#define AES_CMD2 cpu_to_le32(0xe4a63806)
-/* AES-GCM command token */
-#define AES_GCM_CMD0 cpu_to_le32(0x0b000000)
-#define AES_GCM_CMD1 cpu_to_le32(0xa0800000)
-#define AES_GCM_CMD2 cpu_to_le32(0x25000010)
-#define AES_GCM_CMD3 cpu_to_le32(0x0f020000)
-#define AES_GCM_CMD4 cpu_to_le32(0x21e60000)
-#define AES_GCM_CMD5 cpu_to_le32(0x40e60000)
-#define AES_GCM_CMD6 cpu_to_le32(0xd0070000)
-
-/* AES transform information word 0 fields */
-#define AES_TFM_BASIC_OUT cpu_to_le32(0x4 << 0)
-#define AES_TFM_BASIC_IN cpu_to_le32(0x5 << 0)
-#define AES_TFM_GCM_OUT cpu_to_le32(0x6 << 0)
-#define AES_TFM_GCM_IN cpu_to_le32(0xf << 0)
-#define AES_TFM_SIZE(x) cpu_to_le32((x) << 8)
-#define AES_TFM_128BITS cpu_to_le32(0xb << 16)
-#define AES_TFM_192BITS cpu_to_le32(0xd << 16)
-#define AES_TFM_256BITS cpu_to_le32(0xf << 16)
-#define AES_TFM_GHASH_DIGEST cpu_to_le32(0x2 << 21)
-#define AES_TFM_GHASH cpu_to_le32(0x4 << 23)
-/* AES transform information word 1 fields */
-#define AES_TFM_ECB cpu_to_le32(0x0 << 0)
-#define AES_TFM_CBC cpu_to_le32(0x1 << 0)
-#define AES_TFM_OFB cpu_to_le32(0x4 << 0)
-#define AES_TFM_CFB128 cpu_to_le32(0x5 << 0)
-#define AES_TFM_CTR_INIT cpu_to_le32(0x2 << 0) /* init counter to 1 */
-#define AES_TFM_CTR_LOAD cpu_to_le32(0x6 << 0) /* load/reuse counter */
-#define AES_TFM_3IV cpu_to_le32(0x7 << 5) /* using IV 0-2 */
-#define AES_TFM_FULL_IV cpu_to_le32(0xf << 5) /* using IV 0-3 */
-#define AES_TFM_IV_CTR_MODE cpu_to_le32(0x1 << 10)
-#define AES_TFM_ENC_HASH cpu_to_le32(0x1 << 17)
-
-/* AES flags */
-#define AES_FLAGS_CIPHER_MSK GENMASK(4, 0)
-#define AES_FLAGS_ECB BIT(0)
-#define AES_FLAGS_CBC BIT(1)
-#define AES_FLAGS_CTR BIT(2)
-#define AES_FLAGS_OFB BIT(3)
-#define AES_FLAGS_CFB128 BIT(4)
-#define AES_FLAGS_GCM BIT(5)
-#define AES_FLAGS_ENCRYPT BIT(6)
-#define AES_FLAGS_BUSY BIT(7)
-
-#define AES_AUTH_TAG_ERR cpu_to_le32(BIT(26))
-
-/**
- * mtk_aes_info - hardware information of AES
- * @cmd: command token, hardware instruction
- * @tfm: transform state of cipher algorithm.
- * @state: contains keys and initial vectors.
- *
- * Memory layout of GCM buffer:
- * /-----------\
- * | AES KEY | 128/196/256 bits
- * |-----------|
- * | HASH KEY | a string 128 zero bits encrypted using the block cipher
- * |-----------|
- * | IVs | 4 * 4 bytes
- * \-----------/
- *
- * The engine requires all these info to do:
- * - Commands decoding and control of the engine's data path.
- * - Coordinating hardware data fetch and store operations.
- * - Result token construction and output.
- */
-struct mtk_aes_info {
- __le32 cmd[AES_MAX_CT_SIZE];
- __le32 tfm[2];
- __le32 state[AES_MAX_STATE_BUF_SIZE];
-};
-
-struct mtk_aes_reqctx {
- u64 mode;
-};
-
-struct mtk_aes_base_ctx {
- struct mtk_cryp *cryp;
- u32 keylen;
- __le32 key[12];
- __le32 keymode;
-
- mtk_aes_fn start;
-
- struct mtk_aes_info info;
- dma_addr_t ct_dma;
- dma_addr_t tfm_dma;
-
- __le32 ct_hdr;
- u32 ct_size;
-};
-
-struct mtk_aes_ctx {
- struct mtk_aes_base_ctx base;
-};
-
-struct mtk_aes_ctr_ctx {
- struct mtk_aes_base_ctx base;
-
- __be32 iv[AES_BLOCK_SIZE / sizeof(u32)];
- size_t offset;
- struct scatterlist src[2];
- struct scatterlist dst[2];
-};
-
-struct mtk_aes_gcm_ctx {
- struct mtk_aes_base_ctx base;
-
- u32 authsize;
- size_t textlen;
-};
-
-struct mtk_aes_drv {
- struct list_head dev_list;
- /* Device list lock */
- spinlock_t lock;
-};
-
-static struct mtk_aes_drv mtk_aes = {
- .dev_list = LIST_HEAD_INIT(mtk_aes.dev_list),
- .lock = __SPIN_LOCK_UNLOCKED(mtk_aes.lock),
-};
-
-static inline u32 mtk_aes_read(struct mtk_cryp *cryp, u32 offset)
-{
- return readl_relaxed(cryp->base + offset);
-}
-
-static inline void mtk_aes_write(struct mtk_cryp *cryp,
- u32 offset, u32 value)
-{
- writel_relaxed(value, cryp->base + offset);
-}
-
-static struct mtk_cryp *mtk_aes_find_dev(struct mtk_aes_base_ctx *ctx)
-{
- struct mtk_cryp *cryp = NULL;
- struct mtk_cryp *tmp;
-
- spin_lock_bh(&mtk_aes.lock);
- if (!ctx->cryp) {
- list_for_each_entry(tmp, &mtk_aes.dev_list, aes_list) {
- cryp = tmp;
- break;
- }
- ctx->cryp = cryp;
- } else {
- cryp = ctx->cryp;
- }
- spin_unlock_bh(&mtk_aes.lock);
-
- return cryp;
-}
-
-static inline size_t mtk_aes_padlen(size_t len)
-{
- len &= AES_BLOCK_SIZE - 1;
- return len ? AES_BLOCK_SIZE - len : 0;
-}
-
-static bool mtk_aes_check_aligned(struct scatterlist *sg, size_t len,
- struct mtk_aes_dma *dma)
-{
- int nents;
-
- if (!IS_ALIGNED(len, AES_BLOCK_SIZE))
- return false;
-
- for (nents = 0; sg; sg = sg_next(sg), ++nents) {
- if (!IS_ALIGNED(sg->offset, sizeof(u32)))
- return false;
-
- if (len <= sg->length) {
- if (!IS_ALIGNED(len, AES_BLOCK_SIZE))
- return false;
-
- dma->nents = nents + 1;
- dma->remainder = sg->length - len;
- sg->length = len;
- return true;
- }
-
- if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE))
- return false;
-
- len -= sg->length;
- }
-
- return false;
-}
-
-static inline void mtk_aes_set_mode(struct mtk_aes_rec *aes,
- const struct mtk_aes_reqctx *rctx)
-{
- /* Clear all but persistent flags and set request flags. */
- aes->flags = (aes->flags & AES_FLAGS_BUSY) | rctx->mode;
-}
-
-static inline void mtk_aes_restore_sg(const struct mtk_aes_dma *dma)
-{
- struct scatterlist *sg = dma->sg;
- int nents = dma->nents;
-
- if (!dma->remainder)
- return;
-
- while (--nents > 0 && sg)
- sg = sg_next(sg);
-
- if (!sg)
- return;
-
- sg->length += dma->remainder;
-}
-
-static inline int mtk_aes_complete(struct mtk_cryp *cryp,
- struct mtk_aes_rec *aes,
- int err)
-{
- aes->flags &= ~AES_FLAGS_BUSY;
- aes->areq->complete(aes->areq, err);
- /* Handle new request */
- tasklet_schedule(&aes->queue_task);
- return err;
-}
-
-/*
- * Write descriptors for processing. This will configure the engine, load
- * the transform information and then start the packet processing.
- */
-static int mtk_aes_xmit(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
-{
- struct mtk_ring *ring = cryp->ring[aes->id];
- struct mtk_desc *cmd = NULL, *res = NULL;
- struct scatterlist *ssg = aes->src.sg, *dsg = aes->dst.sg;
- u32 slen = aes->src.sg_len, dlen = aes->dst.sg_len;
- int nents;
-
- /* Write command descriptors */
- for (nents = 0; nents < slen; ++nents, ssg = sg_next(ssg)) {
- cmd = ring->cmd_next;
- cmd->hdr = MTK_DESC_BUF_LEN(ssg->length);
- cmd->buf = cpu_to_le32(sg_dma_address(ssg));
-
- if (nents == 0) {
- cmd->hdr |= MTK_DESC_FIRST |
- MTK_DESC_CT_LEN(aes->ctx->ct_size);
- cmd->ct = cpu_to_le32(aes->ctx->ct_dma);
- cmd->ct_hdr = aes->ctx->ct_hdr;
- cmd->tfm = cpu_to_le32(aes->ctx->tfm_dma);
- }
-
- /* Shift ring buffer and check boundary */
- if (++ring->cmd_next == ring->cmd_base + MTK_DESC_NUM)
- ring->cmd_next = ring->cmd_base;
- }
- cmd->hdr |= MTK_DESC_LAST;
-
- /* Prepare result descriptors */
- for (nents = 0; nents < dlen; ++nents, dsg = sg_next(dsg)) {
- res = ring->res_next;
- res->hdr = MTK_DESC_BUF_LEN(dsg->length);
- res->buf = cpu_to_le32(sg_dma_address(dsg));
-
- if (nents == 0)
- res->hdr |= MTK_DESC_FIRST;
-
- /* Shift ring buffer and check boundary */
- if (++ring->res_next == ring->res_base + MTK_DESC_NUM)
- ring->res_next = ring->res_base;
- }
- res->hdr |= MTK_DESC_LAST;
-
- /* Pointer to current result descriptor */
- ring->res_prev = res;
-
- /* Prepare enough space for authenticated tag */
- if (aes->flags & AES_FLAGS_GCM)
- le32_add_cpu(&res->hdr, AES_BLOCK_SIZE);
-
- /*
- * Make sure that all changes to the DMA ring are done before we
- * start engine.
- */
- wmb();
- /* Start DMA transfer */
- mtk_aes_write(cryp, RDR_PREP_COUNT(aes->id), MTK_DESC_CNT(dlen));
- mtk_aes_write(cryp, CDR_PREP_COUNT(aes->id), MTK_DESC_CNT(slen));
-
- return -EINPROGRESS;
-}
-
-static void mtk_aes_unmap(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
-{
- struct mtk_aes_base_ctx *ctx = aes->ctx;
-
- dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(ctx->info),
- DMA_TO_DEVICE);
-
- if (aes->src.sg == aes->dst.sg) {
- dma_unmap_sg(cryp->dev, aes->src.sg, aes->src.nents,
- DMA_BIDIRECTIONAL);
-
- if (aes->src.sg != &aes->aligned_sg)
- mtk_aes_restore_sg(&aes->src);
- } else {
- dma_unmap_sg(cryp->dev, aes->dst.sg, aes->dst.nents,
- DMA_FROM_DEVICE);
-
- if (aes->dst.sg != &aes->aligned_sg)
- mtk_aes_restore_sg(&aes->dst);
-
- dma_unmap_sg(cryp->dev, aes->src.sg, aes->src.nents,
- DMA_TO_DEVICE);
-
- if (aes->src.sg != &aes->aligned_sg)
- mtk_aes_restore_sg(&aes->src);
- }
-
- if (aes->dst.sg == &aes->aligned_sg)
- sg_copy_from_buffer(aes->real_dst, sg_nents(aes->real_dst),
- aes->buf, aes->total);
-}
-
-static int mtk_aes_map(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
-{
- struct mtk_aes_base_ctx *ctx = aes->ctx;
- struct mtk_aes_info *info = &ctx->info;
-
- ctx->ct_dma = dma_map_single(cryp->dev, info, sizeof(*info),
- DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(cryp->dev, ctx->ct_dma)))
- goto exit;
-
- ctx->tfm_dma = ctx->ct_dma + sizeof(info->cmd);
-
- if (aes->src.sg == aes->dst.sg) {
- aes->src.sg_len = dma_map_sg(cryp->dev, aes->src.sg,
- aes->src.nents,
- DMA_BIDIRECTIONAL);
- aes->dst.sg_len = aes->src.sg_len;
- if (unlikely(!aes->src.sg_len))
- goto sg_map_err;
- } else {
- aes->src.sg_len = dma_map_sg(cryp->dev, aes->src.sg,
- aes->src.nents, DMA_TO_DEVICE);
- if (unlikely(!aes->src.sg_len))
- goto sg_map_err;
-
- aes->dst.sg_len = dma_map_sg(cryp->dev, aes->dst.sg,
- aes->dst.nents, DMA_FROM_DEVICE);
- if (unlikely(!aes->dst.sg_len)) {
- dma_unmap_sg(cryp->dev, aes->src.sg, aes->src.nents,
- DMA_TO_DEVICE);
- goto sg_map_err;
- }
- }
-
- return mtk_aes_xmit(cryp, aes);
-
-sg_map_err:
- dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(*info), DMA_TO_DEVICE);
-exit:
- return mtk_aes_complete(cryp, aes, -EINVAL);
-}
-
-/* Initialize transform information of CBC/ECB/CTR/OFB/CFB mode */
-static void mtk_aes_info_init(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
- size_t len)
-{
- struct skcipher_request *req = skcipher_request_cast(aes->areq);
- struct mtk_aes_base_ctx *ctx = aes->ctx;
- struct mtk_aes_info *info = &ctx->info;
- u32 cnt = 0;
-
- ctx->ct_hdr = AES_CT_CTRL_HDR | cpu_to_le32(len);
- info->cmd[cnt++] = AES_CMD0 | cpu_to_le32(len);
- info->cmd[cnt++] = AES_CMD1;
-
- info->tfm[0] = AES_TFM_SIZE(ctx->keylen) | ctx->keymode;
- if (aes->flags & AES_FLAGS_ENCRYPT)
- info->tfm[0] |= AES_TFM_BASIC_OUT;
- else
- info->tfm[0] |= AES_TFM_BASIC_IN;
-
- switch (aes->flags & AES_FLAGS_CIPHER_MSK) {
- case AES_FLAGS_CBC:
- info->tfm[1] = AES_TFM_CBC;
- break;
- case AES_FLAGS_ECB:
- info->tfm[1] = AES_TFM_ECB;
- goto ecb;
- case AES_FLAGS_CTR:
- info->tfm[1] = AES_TFM_CTR_LOAD;
- goto ctr;
- case AES_FLAGS_OFB:
- info->tfm[1] = AES_TFM_OFB;
- break;
- case AES_FLAGS_CFB128:
- info->tfm[1] = AES_TFM_CFB128;
- break;
- default:
- /* Should not happen... */
- return;
- }
-
- memcpy(info->state + ctx->keylen, req->iv, AES_BLOCK_SIZE);
-ctr:
- le32_add_cpu(&info->tfm[0],
- le32_to_cpu(AES_TFM_SIZE(SIZE_IN_WORDS(AES_BLOCK_SIZE))));
- info->tfm[1] |= AES_TFM_FULL_IV;
- info->cmd[cnt++] = AES_CMD2;
-ecb:
- ctx->ct_size = cnt;
-}
-
-static int mtk_aes_dma(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
- struct scatterlist *src, struct scatterlist *dst,
- size_t len)
-{
- size_t padlen = 0;
- bool src_aligned, dst_aligned;
-
- aes->total = len;
- aes->src.sg = src;
- aes->dst.sg = dst;
- aes->real_dst = dst;
-
- src_aligned = mtk_aes_check_aligned(src, len, &aes->src);
- if (src == dst)
- dst_aligned = src_aligned;
- else
- dst_aligned = mtk_aes_check_aligned(dst, len, &aes->dst);
-
- if (!src_aligned || !dst_aligned) {
- padlen = mtk_aes_padlen(len);
-
- if (len + padlen > AES_BUF_SIZE)
- return mtk_aes_complete(cryp, aes, -ENOMEM);
-
- if (!src_aligned) {
- sg_copy_to_buffer(src, sg_nents(src), aes->buf, len);
- aes->src.sg = &aes->aligned_sg;
- aes->src.nents = 1;
- aes->src.remainder = 0;
- }
-
- if (!dst_aligned) {
- aes->dst.sg = &aes->aligned_sg;
- aes->dst.nents = 1;
- aes->dst.remainder = 0;
- }
-
- sg_init_table(&aes->aligned_sg, 1);
- sg_set_buf(&aes->aligned_sg, aes->buf, len + padlen);
- }
-
- mtk_aes_info_init(cryp, aes, len + padlen);
-
- return mtk_aes_map(cryp, aes);
-}
-
-static int mtk_aes_handle_queue(struct mtk_cryp *cryp, u8 id,
- struct crypto_async_request *new_areq)
-{
- struct mtk_aes_rec *aes = cryp->aes[id];
- struct crypto_async_request *areq, *backlog;
- struct mtk_aes_base_ctx *ctx;
- unsigned long flags;
- int ret = 0;
-
- spin_lock_irqsave(&aes->lock, flags);
- if (new_areq)
- ret = crypto_enqueue_request(&aes->queue, new_areq);
- if (aes->flags & AES_FLAGS_BUSY) {
- spin_unlock_irqrestore(&aes->lock, flags);
- return ret;
- }
- backlog = crypto_get_backlog(&aes->queue);
- areq = crypto_dequeue_request(&aes->queue);
- if (areq)
- aes->flags |= AES_FLAGS_BUSY;
- spin_unlock_irqrestore(&aes->lock, flags);
-
- if (!areq)
- return ret;
-
- if (backlog)
- backlog->complete(backlog, -EINPROGRESS);
-
- ctx = crypto_tfm_ctx(areq->tfm);
- /* Write key into state buffer */
- memcpy(ctx->info.state, ctx->key, sizeof(ctx->key));
-
- aes->areq = areq;
- aes->ctx = ctx;
-
- return ctx->start(cryp, aes);
-}
-
-static int mtk_aes_transfer_complete(struct mtk_cryp *cryp,
- struct mtk_aes_rec *aes)
-{
- return mtk_aes_complete(cryp, aes, 0);
-}
-
-static int mtk_aes_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
-{
- struct skcipher_request *req = skcipher_request_cast(aes->areq);
- struct mtk_aes_reqctx *rctx = skcipher_request_ctx(req);
-
- mtk_aes_set_mode(aes, rctx);
- aes->resume = mtk_aes_transfer_complete;
-
- return mtk_aes_dma(cryp, aes, req->src, req->dst, req->cryptlen);
-}
-
-static inline struct mtk_aes_ctr_ctx *
-mtk_aes_ctr_ctx_cast(struct mtk_aes_base_ctx *ctx)
-{
- return container_of(ctx, struct mtk_aes_ctr_ctx, base);
-}
-
-static int mtk_aes_ctr_transfer(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
-{
- struct mtk_aes_base_ctx *ctx = aes->ctx;
- struct mtk_aes_ctr_ctx *cctx = mtk_aes_ctr_ctx_cast(ctx);
- struct skcipher_request *req = skcipher_request_cast(aes->areq);
- struct scatterlist *src, *dst;
- u32 start, end, ctr, blocks;
- size_t datalen;
- bool fragmented = false;
-
- /* Check for transfer completion. */
- cctx->offset += aes->total;
- if (cctx->offset >= req->cryptlen)
- return mtk_aes_transfer_complete(cryp, aes);
-
- /* Compute data length. */
- datalen = req->cryptlen - cctx->offset;
- blocks = DIV_ROUND_UP(datalen, AES_BLOCK_SIZE);
- ctr = be32_to_cpu(cctx->iv[3]);
-
- /* Check 32bit counter overflow. */
- start = ctr;
- end = start + blocks - 1;
- if (end < start) {
- ctr = 0xffffffff;
- datalen = AES_BLOCK_SIZE * -start;
- fragmented = true;
- }
-
- /* Jump to offset. */
- src = scatterwalk_ffwd(cctx->src, req->src, cctx->offset);
- dst = ((req->src == req->dst) ? src :
- scatterwalk_ffwd(cctx->dst, req->dst, cctx->offset));
-
- /* Write IVs into transform state buffer. */
- memcpy(ctx->info.state + ctx->keylen, cctx->iv, AES_BLOCK_SIZE);
-
- if (unlikely(fragmented)) {
- /*
- * Increment the counter manually to cope with the hardware
- * counter overflow.
- */
- cctx->iv[3] = cpu_to_be32(ctr);
- crypto_inc((u8 *)cctx->iv, AES_BLOCK_SIZE);
- }
-
- return mtk_aes_dma(cryp, aes, src, dst, datalen);
-}
-
-static int mtk_aes_ctr_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
-{
- struct mtk_aes_ctr_ctx *cctx = mtk_aes_ctr_ctx_cast(aes->ctx);
- struct skcipher_request *req = skcipher_request_cast(aes->areq);
- struct mtk_aes_reqctx *rctx = skcipher_request_ctx(req);
-
- mtk_aes_set_mode(aes, rctx);
-
- memcpy(cctx->iv, req->iv, AES_BLOCK_SIZE);
- cctx->offset = 0;
- aes->total = 0;
- aes->resume = mtk_aes_ctr_transfer;
-
- return mtk_aes_ctr_transfer(cryp, aes);
-}
-
-/* Check and set the AES key to transform state buffer */
-static int mtk_aes_setkey(struct crypto_skcipher *tfm,
- const u8 *key, u32 keylen)
-{
- struct mtk_aes_base_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- switch (keylen) {
- case AES_KEYSIZE_128:
- ctx->keymode = AES_TFM_128BITS;
- break;
- case AES_KEYSIZE_192:
- ctx->keymode = AES_TFM_192BITS;
- break;
- case AES_KEYSIZE_256:
- ctx->keymode = AES_TFM_256BITS;
- break;
-
- default:
- return -EINVAL;
- }
-
- ctx->keylen = SIZE_IN_WORDS(keylen);
- memcpy(ctx->key, key, keylen);
-
- return 0;
-}
-
-static int mtk_aes_crypt(struct skcipher_request *req, u64 mode)
-{
- struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
- struct mtk_aes_base_ctx *ctx = crypto_skcipher_ctx(skcipher);
- struct mtk_aes_reqctx *rctx;
- struct mtk_cryp *cryp;
-
- cryp = mtk_aes_find_dev(ctx);
- if (!cryp)
- return -ENODEV;
-
- rctx = skcipher_request_ctx(req);
- rctx->mode = mode;
-
- return mtk_aes_handle_queue(cryp, !(mode & AES_FLAGS_ENCRYPT),
- &req->base);
-}
-
-static int mtk_aes_ecb_encrypt(struct skcipher_request *req)
-{
- return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_ECB);
-}
-
-static int mtk_aes_ecb_decrypt(struct skcipher_request *req)
-{
- return mtk_aes_crypt(req, AES_FLAGS_ECB);
-}
-
-static int mtk_aes_cbc_encrypt(struct skcipher_request *req)
-{
- return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CBC);
-}
-
-static int mtk_aes_cbc_decrypt(struct skcipher_request *req)
-{
- return mtk_aes_crypt(req, AES_FLAGS_CBC);
-}
-
-static int mtk_aes_ctr_encrypt(struct skcipher_request *req)
-{
- return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CTR);
-}
-
-static int mtk_aes_ctr_decrypt(struct skcipher_request *req)
-{
- return mtk_aes_crypt(req, AES_FLAGS_CTR);
-}
-
-static int mtk_aes_ofb_encrypt(struct skcipher_request *req)
-{
- return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_OFB);
-}
-
-static int mtk_aes_ofb_decrypt(struct skcipher_request *req)
-{
- return mtk_aes_crypt(req, AES_FLAGS_OFB);
-}
-
-static int mtk_aes_cfb_encrypt(struct skcipher_request *req)
-{
- return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CFB128);
-}
-
-static int mtk_aes_cfb_decrypt(struct skcipher_request *req)
-{
- return mtk_aes_crypt(req, AES_FLAGS_CFB128);
-}
-
-static int mtk_aes_init_tfm(struct crypto_skcipher *tfm)
-{
- struct mtk_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- crypto_skcipher_set_reqsize(tfm, sizeof(struct mtk_aes_reqctx));
- ctx->base.start = mtk_aes_start;
- return 0;
-}
-
-static int mtk_aes_ctr_init_tfm(struct crypto_skcipher *tfm)
-{
- struct mtk_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- crypto_skcipher_set_reqsize(tfm, sizeof(struct mtk_aes_reqctx));
- ctx->base.start = mtk_aes_ctr_start;
- return 0;
-}
-
-static struct skcipher_alg aes_algs[] = {
-{
- .base.cra_name = "cbc(aes)",
- .base.cra_driver_name = "cbc-aes-mtk",
- .base.cra_priority = 400,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
- .base.cra_blocksize = AES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct mtk_aes_ctx),
- .base.cra_alignmask = 0xf,
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = mtk_aes_setkey,
- .encrypt = mtk_aes_cbc_encrypt,
- .decrypt = mtk_aes_cbc_decrypt,
- .ivsize = AES_BLOCK_SIZE,
- .init = mtk_aes_init_tfm,
-},
-{
- .base.cra_name = "ecb(aes)",
- .base.cra_driver_name = "ecb-aes-mtk",
- .base.cra_priority = 400,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
- .base.cra_blocksize = AES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct mtk_aes_ctx),
- .base.cra_alignmask = 0xf,
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = mtk_aes_setkey,
- .encrypt = mtk_aes_ecb_encrypt,
- .decrypt = mtk_aes_ecb_decrypt,
- .init = mtk_aes_init_tfm,
-},
-{
- .base.cra_name = "ctr(aes)",
- .base.cra_driver_name = "ctr-aes-mtk",
- .base.cra_priority = 400,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct mtk_aes_ctx),
- .base.cra_alignmask = 0xf,
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = mtk_aes_setkey,
- .encrypt = mtk_aes_ctr_encrypt,
- .decrypt = mtk_aes_ctr_decrypt,
- .init = mtk_aes_ctr_init_tfm,
-},
-{
- .base.cra_name = "ofb(aes)",
- .base.cra_driver_name = "ofb-aes-mtk",
- .base.cra_priority = 400,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
- .base.cra_blocksize = AES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct mtk_aes_ctx),
- .base.cra_alignmask = 0xf,
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = mtk_aes_setkey,
- .encrypt = mtk_aes_ofb_encrypt,
- .decrypt = mtk_aes_ofb_decrypt,
-},
-{
- .base.cra_name = "cfb(aes)",
- .base.cra_driver_name = "cfb-aes-mtk",
- .base.cra_priority = 400,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct mtk_aes_ctx),
- .base.cra_alignmask = 0xf,
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = mtk_aes_setkey,
- .encrypt = mtk_aes_cfb_encrypt,
- .decrypt = mtk_aes_cfb_decrypt,
-},
-};
-
-static inline struct mtk_aes_gcm_ctx *
-mtk_aes_gcm_ctx_cast(struct mtk_aes_base_ctx *ctx)
-{
- return container_of(ctx, struct mtk_aes_gcm_ctx, base);
-}
-
-/*
- * Engine will verify and compare tag automatically, so we just need
- * to check returned status which stored in the result descriptor.
- */
-static int mtk_aes_gcm_tag_verify(struct mtk_cryp *cryp,
- struct mtk_aes_rec *aes)
-{
- __le32 status = cryp->ring[aes->id]->res_prev->ct;
-
- return mtk_aes_complete(cryp, aes, (status & AES_AUTH_TAG_ERR) ?
- -EBADMSG : 0);
-}
-
-/* Initialize transform information of GCM mode */
-static void mtk_aes_gcm_info_init(struct mtk_cryp *cryp,
- struct mtk_aes_rec *aes,
- size_t len)
-{
- struct aead_request *req = aead_request_cast(aes->areq);
- struct mtk_aes_base_ctx *ctx = aes->ctx;
- struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx);
- struct mtk_aes_info *info = &ctx->info;
- u32 ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
- u32 cnt = 0;
-
- ctx->ct_hdr = AES_CT_CTRL_HDR | cpu_to_le32(len);
-
- info->cmd[cnt++] = AES_GCM_CMD0 | cpu_to_le32(req->assoclen);
- info->cmd[cnt++] = AES_GCM_CMD1 | cpu_to_le32(req->assoclen);
- info->cmd[cnt++] = AES_GCM_CMD2;
- info->cmd[cnt++] = AES_GCM_CMD3 | cpu_to_le32(gctx->textlen);
-
- if (aes->flags & AES_FLAGS_ENCRYPT) {
- info->cmd[cnt++] = AES_GCM_CMD4 | cpu_to_le32(gctx->authsize);
- info->tfm[0] = AES_TFM_GCM_OUT;
- } else {
- info->cmd[cnt++] = AES_GCM_CMD5 | cpu_to_le32(gctx->authsize);
- info->cmd[cnt++] = AES_GCM_CMD6 | cpu_to_le32(gctx->authsize);
- info->tfm[0] = AES_TFM_GCM_IN;
- }
- ctx->ct_size = cnt;
-
- info->tfm[0] |= AES_TFM_GHASH_DIGEST | AES_TFM_GHASH | AES_TFM_SIZE(
- ctx->keylen + SIZE_IN_WORDS(AES_BLOCK_SIZE + ivsize)) |
- ctx->keymode;
- info->tfm[1] = AES_TFM_CTR_INIT | AES_TFM_IV_CTR_MODE | AES_TFM_3IV |
- AES_TFM_ENC_HASH;
-
- memcpy(info->state + ctx->keylen + SIZE_IN_WORDS(AES_BLOCK_SIZE),
- req->iv, ivsize);
-}
-
-static int mtk_aes_gcm_dma(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
- struct scatterlist *src, struct scatterlist *dst,
- size_t len)
-{
- bool src_aligned, dst_aligned;
-
- aes->src.sg = src;
- aes->dst.sg = dst;
- aes->real_dst = dst;
-
- src_aligned = mtk_aes_check_aligned(src, len, &aes->src);
- if (src == dst)
- dst_aligned = src_aligned;
- else
- dst_aligned = mtk_aes_check_aligned(dst, len, &aes->dst);
-
- if (!src_aligned || !dst_aligned) {
- if (aes->total > AES_BUF_SIZE)
- return mtk_aes_complete(cryp, aes, -ENOMEM);
-
- if (!src_aligned) {
- sg_copy_to_buffer(src, sg_nents(src), aes->buf, len);
- aes->src.sg = &aes->aligned_sg;
- aes->src.nents = 1;
- aes->src.remainder = 0;
- }
-
- if (!dst_aligned) {
- aes->dst.sg = &aes->aligned_sg;
- aes->dst.nents = 1;
- aes->dst.remainder = 0;
- }
-
- sg_init_table(&aes->aligned_sg, 1);
- sg_set_buf(&aes->aligned_sg, aes->buf, aes->total);
- }
-
- mtk_aes_gcm_info_init(cryp, aes, len);
-
- return mtk_aes_map(cryp, aes);
-}
-
-/* Todo: GMAC */
-static int mtk_aes_gcm_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
-{
- struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(aes->ctx);
- struct aead_request *req = aead_request_cast(aes->areq);
- struct mtk_aes_reqctx *rctx = aead_request_ctx(req);
- u32 len = req->assoclen + req->cryptlen;
-
- mtk_aes_set_mode(aes, rctx);
-
- if (aes->flags & AES_FLAGS_ENCRYPT) {
- u32 tag[4];
-
- aes->resume = mtk_aes_transfer_complete;
- /* Compute total process length. */
- aes->total = len + gctx->authsize;
- /* Hardware will append authenticated tag to output buffer */
- scatterwalk_map_and_copy(tag, req->dst, len, gctx->authsize, 1);
- } else {
- aes->resume = mtk_aes_gcm_tag_verify;
- aes->total = len;
- }
-
- return mtk_aes_gcm_dma(cryp, aes, req->src, req->dst, len);
-}
-
-static int mtk_aes_gcm_crypt(struct aead_request *req, u64 mode)
-{
- struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
- struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx);
- struct mtk_aes_reqctx *rctx = aead_request_ctx(req);
- struct mtk_cryp *cryp;
- bool enc = !!(mode & AES_FLAGS_ENCRYPT);
-
- cryp = mtk_aes_find_dev(ctx);
- if (!cryp)
- return -ENODEV;
-
- /* Compute text length. */
- gctx->textlen = req->cryptlen - (enc ? 0 : gctx->authsize);
-
- /* Empty messages are not supported yet */
- if (!gctx->textlen && !req->assoclen)
- return -EINVAL;
-
- rctx->mode = AES_FLAGS_GCM | mode;
-
- return mtk_aes_handle_queue(cryp, enc, &req->base);
-}
-
-/*
- * Because of the hardware limitation, we need to pre-calculate key(H)
- * for the GHASH operation. The result of the encryption operation
- * need to be stored in the transform state buffer.
- */
-static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
- u32 keylen)
-{
- struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(aead);
- union {
- u32 x32[SIZE_IN_WORDS(AES_BLOCK_SIZE)];
- u8 x8[AES_BLOCK_SIZE];
- } hash = {};
- struct crypto_aes_ctx aes_ctx;
- int err;
- int i;
-
- switch (keylen) {
- case AES_KEYSIZE_128:
- ctx->keymode = AES_TFM_128BITS;
- break;
- case AES_KEYSIZE_192:
- ctx->keymode = AES_TFM_192BITS;
- break;
- case AES_KEYSIZE_256:
- ctx->keymode = AES_TFM_256BITS;
- break;
-
- default:
- return -EINVAL;
- }
-
- ctx->keylen = SIZE_IN_WORDS(keylen);
-
- err = aes_expandkey(&aes_ctx, key, keylen);
- if (err)
- return err;
-
- aes_encrypt(&aes_ctx, hash.x8, hash.x8);
- memzero_explicit(&aes_ctx, sizeof(aes_ctx));
-
- memcpy(ctx->key, key, keylen);
-
- /* Why do we need to do this? */
- for (i = 0; i < SIZE_IN_WORDS(AES_BLOCK_SIZE); i++)
- hash.x32[i] = swab32(hash.x32[i]);
-
- memcpy(ctx->key + ctx->keylen, &hash, AES_BLOCK_SIZE);
-
- return 0;
-}
-
-static int mtk_aes_gcm_setauthsize(struct crypto_aead *aead,
- u32 authsize)
-{
- struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(aead);
- struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx);
-
- /* Same as crypto_gcm_authsize() from crypto/gcm.c */
- switch (authsize) {
- case 8:
- case 12:
- case 16:
- break;
- default:
- return -EINVAL;
- }
-
- gctx->authsize = authsize;
- return 0;
-}
-
-static int mtk_aes_gcm_encrypt(struct aead_request *req)
-{
- return mtk_aes_gcm_crypt(req, AES_FLAGS_ENCRYPT);
-}
-
-static int mtk_aes_gcm_decrypt(struct aead_request *req)
-{
- return mtk_aes_gcm_crypt(req, 0);
-}
-
-static int mtk_aes_gcm_init(struct crypto_aead *aead)
-{
- struct mtk_aes_gcm_ctx *ctx = crypto_aead_ctx(aead);
-
- crypto_aead_set_reqsize(aead, sizeof(struct mtk_aes_reqctx));
- ctx->base.start = mtk_aes_gcm_start;
- return 0;
-}
-
-static struct aead_alg aes_gcm_alg = {
- .setkey = mtk_aes_gcm_setkey,
- .setauthsize = mtk_aes_gcm_setauthsize,
- .encrypt = mtk_aes_gcm_encrypt,
- .decrypt = mtk_aes_gcm_decrypt,
- .init = mtk_aes_gcm_init,
- .ivsize = GCM_AES_IV_SIZE,
- .maxauthsize = AES_BLOCK_SIZE,
-
- .base = {
- .cra_name = "gcm(aes)",
- .cra_driver_name = "gcm-aes-mtk",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct mtk_aes_gcm_ctx),
- .cra_alignmask = 0xf,
- .cra_module = THIS_MODULE,
- },
-};
-
-static void mtk_aes_queue_task(unsigned long data)
-{
- struct mtk_aes_rec *aes = (struct mtk_aes_rec *)data;
-
- mtk_aes_handle_queue(aes->cryp, aes->id, NULL);
-}
-
-static void mtk_aes_done_task(unsigned long data)
-{
- struct mtk_aes_rec *aes = (struct mtk_aes_rec *)data;
- struct mtk_cryp *cryp = aes->cryp;
-
- mtk_aes_unmap(cryp, aes);
- aes->resume(cryp, aes);
-}
-
-static irqreturn_t mtk_aes_irq(int irq, void *dev_id)
-{
- struct mtk_aes_rec *aes = (struct mtk_aes_rec *)dev_id;
- struct mtk_cryp *cryp = aes->cryp;
- u32 val = mtk_aes_read(cryp, RDR_STAT(aes->id));
-
- mtk_aes_write(cryp, RDR_STAT(aes->id), val);
-
- if (likely(AES_FLAGS_BUSY & aes->flags)) {
- mtk_aes_write(cryp, RDR_PROC_COUNT(aes->id), MTK_CNT_RST);
- mtk_aes_write(cryp, RDR_THRESH(aes->id),
- MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE);
-
- tasklet_schedule(&aes->done_task);
- } else {
- dev_warn(cryp->dev, "AES interrupt when no active requests.\n");
- }
- return IRQ_HANDLED;
-}
-
-/*
- * The purpose of creating encryption and decryption records is
- * to process outbound/inbound data in parallel, it can improve
- * performance in most use cases, such as IPSec VPN, especially
- * under heavy network traffic.
- */
-static int mtk_aes_record_init(struct mtk_cryp *cryp)
-{
- struct mtk_aes_rec **aes = cryp->aes;
- int i, err = -ENOMEM;
-
- for (i = 0; i < MTK_REC_NUM; i++) {
- aes[i] = kzalloc(sizeof(**aes), GFP_KERNEL);
- if (!aes[i])
- goto err_cleanup;
-
- aes[i]->buf = (void *)__get_free_pages(GFP_KERNEL,
- AES_BUF_ORDER);
- if (!aes[i]->buf)
- goto err_cleanup;
-
- aes[i]->cryp = cryp;
-
- spin_lock_init(&aes[i]->lock);
- crypto_init_queue(&aes[i]->queue, AES_QUEUE_SIZE);
-
- tasklet_init(&aes[i]->queue_task, mtk_aes_queue_task,
- (unsigned long)aes[i]);
- tasklet_init(&aes[i]->done_task, mtk_aes_done_task,
- (unsigned long)aes[i]);
- }
-
- /* Link to ring0 and ring1 respectively */
- aes[0]->id = MTK_RING0;
- aes[1]->id = MTK_RING1;
-
- return 0;
-
-err_cleanup:
- for (; i--; ) {
- free_page((unsigned long)aes[i]->buf);
- kfree(aes[i]);
- }
-
- return err;
-}
-
-static void mtk_aes_record_free(struct mtk_cryp *cryp)
-{
- int i;
-
- for (i = 0; i < MTK_REC_NUM; i++) {
- tasklet_kill(&cryp->aes[i]->done_task);
- tasklet_kill(&cryp->aes[i]->queue_task);
-
- free_page((unsigned long)cryp->aes[i]->buf);
- kfree(cryp->aes[i]);
- }
-}
-
-static void mtk_aes_unregister_algs(void)
-{
- int i;
-
- crypto_unregister_aead(&aes_gcm_alg);
-
- for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
- crypto_unregister_skcipher(&aes_algs[i]);
-}
-
-static int mtk_aes_register_algs(void)
-{
- int err, i;
-
- for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
- err = crypto_register_skcipher(&aes_algs[i]);
- if (err)
- goto err_aes_algs;
- }
-
- err = crypto_register_aead(&aes_gcm_alg);
- if (err)
- goto err_aes_algs;
-
- return 0;
-
-err_aes_algs:
- for (; i--; )
- crypto_unregister_skcipher(&aes_algs[i]);
-
- return err;
-}
-
-int mtk_cipher_alg_register(struct mtk_cryp *cryp)
-{
- int ret;
-
- INIT_LIST_HEAD(&cryp->aes_list);
-
- /* Initialize two cipher records */
- ret = mtk_aes_record_init(cryp);
- if (ret)
- goto err_record;
-
- ret = devm_request_irq(cryp->dev, cryp->irq[MTK_RING0], mtk_aes_irq,
- 0, "mtk-aes", cryp->aes[0]);
- if (ret) {
- dev_err(cryp->dev, "unable to request AES irq.\n");
- goto err_res;
- }
-
- ret = devm_request_irq(cryp->dev, cryp->irq[MTK_RING1], mtk_aes_irq,
- 0, "mtk-aes", cryp->aes[1]);
- if (ret) {
- dev_err(cryp->dev, "unable to request AES irq.\n");
- goto err_res;
- }
-
- /* Enable ring0 and ring1 interrupt */
- mtk_aes_write(cryp, AIC_ENABLE_SET(MTK_RING0), MTK_IRQ_RDR0);
- mtk_aes_write(cryp, AIC_ENABLE_SET(MTK_RING1), MTK_IRQ_RDR1);
-
- spin_lock(&mtk_aes.lock);
- list_add_tail(&cryp->aes_list, &mtk_aes.dev_list);
- spin_unlock(&mtk_aes.lock);
-
- ret = mtk_aes_register_algs();
- if (ret)
- goto err_algs;
-
- return 0;
-
-err_algs:
- spin_lock(&mtk_aes.lock);
- list_del(&cryp->aes_list);
- spin_unlock(&mtk_aes.lock);
-err_res:
- mtk_aes_record_free(cryp);
-err_record:
-
- dev_err(cryp->dev, "mtk-aes initialization failed.\n");
- return ret;
-}
-
-void mtk_cipher_alg_release(struct mtk_cryp *cryp)
-{
- spin_lock(&mtk_aes.lock);
- list_del(&cryp->aes_list);
- spin_unlock(&mtk_aes.lock);
-
- mtk_aes_unregister_algs();
- mtk_aes_record_free(cryp);
-}
diff --git a/drivers/crypto/mediatek/mtk-platform.c b/drivers/crypto/mediatek/mtk-platform.c
deleted file mode 100644
index 9d878620e5c9..000000000000
--- a/drivers/crypto/mediatek/mtk-platform.c
+++ /dev/null
@@ -1,586 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Driver for EIP97 cryptographic accelerator.
- *
- * Copyright (c) 2016 Ryder Lee <ryder.lee@mediatek.com>
- */
-
-#include <linux/clk.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/mod_devicetable.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-#include "mtk-platform.h"
-
-#define MTK_BURST_SIZE_MSK GENMASK(7, 4)
-#define MTK_BURST_SIZE(x) ((x) << 4)
-#define MTK_DESC_SIZE(x) ((x) << 0)
-#define MTK_DESC_OFFSET(x) ((x) << 16)
-#define MTK_DESC_FETCH_SIZE(x) ((x) << 0)
-#define MTK_DESC_FETCH_THRESH(x) ((x) << 16)
-#define MTK_DESC_OVL_IRQ_EN BIT(25)
-#define MTK_DESC_ATP_PRESENT BIT(30)
-
-#define MTK_DFSE_IDLE GENMASK(3, 0)
-#define MTK_DFSE_THR_CTRL_EN BIT(30)
-#define MTK_DFSE_THR_CTRL_RESET BIT(31)
-#define MTK_DFSE_RING_ID(x) (((x) >> 12) & GENMASK(3, 0))
-#define MTK_DFSE_MIN_DATA(x) ((x) << 0)
-#define MTK_DFSE_MAX_DATA(x) ((x) << 8)
-#define MTK_DFE_MIN_CTRL(x) ((x) << 16)
-#define MTK_DFE_MAX_CTRL(x) ((x) << 24)
-
-#define MTK_IN_BUF_MIN_THRESH(x) ((x) << 8)
-#define MTK_IN_BUF_MAX_THRESH(x) ((x) << 12)
-#define MTK_OUT_BUF_MIN_THRESH(x) ((x) << 0)
-#define MTK_OUT_BUF_MAX_THRESH(x) ((x) << 4)
-#define MTK_IN_TBUF_SIZE(x) (((x) >> 4) & GENMASK(3, 0))
-#define MTK_IN_DBUF_SIZE(x) (((x) >> 8) & GENMASK(3, 0))
-#define MTK_OUT_DBUF_SIZE(x) (((x) >> 16) & GENMASK(3, 0))
-#define MTK_CMD_FIFO_SIZE(x) (((x) >> 8) & GENMASK(3, 0))
-#define MTK_RES_FIFO_SIZE(x) (((x) >> 12) & GENMASK(3, 0))
-
-#define MTK_PE_TK_LOC_AVL BIT(2)
-#define MTK_PE_PROC_HELD BIT(14)
-#define MTK_PE_TK_TIMEOUT_EN BIT(22)
-#define MTK_PE_INPUT_DMA_ERR BIT(0)
-#define MTK_PE_OUTPUT_DMA_ERR BIT(1)
-#define MTK_PE_PKT_PORC_ERR BIT(2)
-#define MTK_PE_PKT_TIMEOUT BIT(3)
-#define MTK_PE_FATAL_ERR BIT(14)
-#define MTK_PE_INPUT_DMA_ERR_EN BIT(16)
-#define MTK_PE_OUTPUT_DMA_ERR_EN BIT(17)
-#define MTK_PE_PKT_PORC_ERR_EN BIT(18)
-#define MTK_PE_PKT_TIMEOUT_EN BIT(19)
-#define MTK_PE_FATAL_ERR_EN BIT(30)
-#define MTK_PE_INT_OUT_EN BIT(31)
-
-#define MTK_HIA_SIGNATURE ((u16)0x35ca)
-#define MTK_HIA_DATA_WIDTH(x) (((x) >> 25) & GENMASK(1, 0))
-#define MTK_HIA_DMA_LENGTH(x) (((x) >> 20) & GENMASK(4, 0))
-#define MTK_CDR_STAT_CLR GENMASK(4, 0)
-#define MTK_RDR_STAT_CLR GENMASK(7, 0)
-
-#define MTK_AIC_INT_MSK GENMASK(5, 0)
-#define MTK_AIC_VER_MSK (GENMASK(15, 0) | GENMASK(27, 20))
-#define MTK_AIC_VER11 0x011036c9
-#define MTK_AIC_VER12 0x012036c9
-#define MTK_AIC_G_CLR GENMASK(30, 20)
-
-/**
- * EIP97 is an integrated security subsystem to accelerate cryptographic
- * functions and protocols to offload the host processor.
- * Some important hardware modules are briefly introduced below:
- *
- * Host Interface Adapter(HIA) - the main interface between the host
- * system and the hardware subsystem. It is responsible for attaching
- * processing engine to the specific host bus interface and provides a
- * standardized software view for off loading tasks to the engine.
- *
- * Command Descriptor Ring Manager(CDR Manager) - keeps track of how many
- * CD the host has prepared in the CDR. It monitors the fill level of its
- * CD-FIFO and if there's sufficient space for the next block of descriptors,
- * then it fires off a DMA request to fetch a block of CDs.
- *
- * Data fetch engine(DFE) - It is responsible for parsing the CD and
- * setting up the required control and packet data DMA transfers from
- * system memory to the processing engine.
- *
- * Result Descriptor Ring Manager(RDR Manager) - same as CDR Manager,
- * but target is result descriptors, Moreover, it also handles the RD
- * updates under control of the DSE. For each packet data segment
- * processed, the DSE triggers the RDR Manager to write the updated RD.
- * If triggered to update, the RDR Manager sets up a DMA operation to
- * copy the RD from the DSE to the correct location in the RDR.
- *
- * Data Store Engine(DSE) - It is responsible for parsing the prepared RD
- * and setting up the required control and packet data DMA transfers from
- * the processing engine to system memory.
- *
- * Advanced Interrupt Controllers(AICs) - receive interrupt request signals
- * from various sources and combine them into one interrupt output.
- * The AICs are used by:
- * - One for the HIA global and processing engine interrupts.
- * - The others for the descriptor ring interrupts.
- */
-
-/* Cryptographic engine capabilities */
-struct mtk_sys_cap {
- /* host interface adapter */
- u32 hia_ver;
- u32 hia_opt;
- /* packet engine */
- u32 pkt_eng_opt;
- /* global hardware */
- u32 hw_opt;
-};
-
-static void mtk_desc_ring_link(struct mtk_cryp *cryp, u32 mask)
-{
- /* Assign rings to DFE/DSE thread and enable it */
- writel(MTK_DFSE_THR_CTRL_EN | mask, cryp->base + DFE_THR_CTRL);
- writel(MTK_DFSE_THR_CTRL_EN | mask, cryp->base + DSE_THR_CTRL);
-}
-
-static void mtk_dfe_dse_buf_setup(struct mtk_cryp *cryp,
- struct mtk_sys_cap *cap)
-{
- u32 width = MTK_HIA_DATA_WIDTH(cap->hia_opt) + 2;
- u32 len = MTK_HIA_DMA_LENGTH(cap->hia_opt) - 1;
- u32 ipbuf = min((u32)MTK_IN_DBUF_SIZE(cap->hw_opt) + width, len);
- u32 opbuf = min((u32)MTK_OUT_DBUF_SIZE(cap->hw_opt) + width, len);
- u32 itbuf = min((u32)MTK_IN_TBUF_SIZE(cap->hw_opt) + width, len);
-
- writel(MTK_DFSE_MIN_DATA(ipbuf - 1) |
- MTK_DFSE_MAX_DATA(ipbuf) |
- MTK_DFE_MIN_CTRL(itbuf - 1) |
- MTK_DFE_MAX_CTRL(itbuf),
- cryp->base + DFE_CFG);
-
- writel(MTK_DFSE_MIN_DATA(opbuf - 1) |
- MTK_DFSE_MAX_DATA(opbuf),
- cryp->base + DSE_CFG);
-
- writel(MTK_IN_BUF_MIN_THRESH(ipbuf - 1) |
- MTK_IN_BUF_MAX_THRESH(ipbuf),
- cryp->base + PE_IN_DBUF_THRESH);
-
- writel(MTK_IN_BUF_MIN_THRESH(itbuf - 1) |
- MTK_IN_BUF_MAX_THRESH(itbuf),
- cryp->base + PE_IN_TBUF_THRESH);
-
- writel(MTK_OUT_BUF_MIN_THRESH(opbuf - 1) |
- MTK_OUT_BUF_MAX_THRESH(opbuf),
- cryp->base + PE_OUT_DBUF_THRESH);
-
- writel(0, cryp->base + PE_OUT_TBUF_THRESH);
- writel(0, cryp->base + PE_OUT_BUF_CTRL);
-}
-
-static int mtk_dfe_dse_state_check(struct mtk_cryp *cryp)
-{
- int ret = -EINVAL;
- u32 val;
-
- /* Check for completion of all DMA transfers */
- val = readl(cryp->base + DFE_THR_STAT);
- if (MTK_DFSE_RING_ID(val) == MTK_DFSE_IDLE) {
- val = readl(cryp->base + DSE_THR_STAT);
- if (MTK_DFSE_RING_ID(val) == MTK_DFSE_IDLE)
- ret = 0;
- }
-
- if (!ret) {
- /* Take DFE/DSE thread out of reset */
- writel(0, cryp->base + DFE_THR_CTRL);
- writel(0, cryp->base + DSE_THR_CTRL);
- } else {
- return -EBUSY;
- }
-
- return 0;
-}
-
-static int mtk_dfe_dse_reset(struct mtk_cryp *cryp)
-{
- /* Reset DSE/DFE and correct system priorities for all rings. */
- writel(MTK_DFSE_THR_CTRL_RESET, cryp->base + DFE_THR_CTRL);
- writel(0, cryp->base + DFE_PRIO_0);
- writel(0, cryp->base + DFE_PRIO_1);
- writel(0, cryp->base + DFE_PRIO_2);
- writel(0, cryp->base + DFE_PRIO_3);
-
- writel(MTK_DFSE_THR_CTRL_RESET, cryp->base + DSE_THR_CTRL);
- writel(0, cryp->base + DSE_PRIO_0);
- writel(0, cryp->base + DSE_PRIO_1);
- writel(0, cryp->base + DSE_PRIO_2);
- writel(0, cryp->base + DSE_PRIO_3);
-
- return mtk_dfe_dse_state_check(cryp);
-}
-
-static void mtk_cmd_desc_ring_setup(struct mtk_cryp *cryp,
- int i, struct mtk_sys_cap *cap)
-{
- /* Full descriptor that fits FIFO minus one */
- u32 count =
- ((1 << MTK_CMD_FIFO_SIZE(cap->hia_opt)) / MTK_DESC_SZ) - 1;
-
- /* Temporarily disable external triggering */
- writel(0, cryp->base + CDR_CFG(i));
-
- /* Clear CDR count */
- writel(MTK_CNT_RST, cryp->base + CDR_PREP_COUNT(i));
- writel(MTK_CNT_RST, cryp->base + CDR_PROC_COUNT(i));
-
- writel(0, cryp->base + CDR_PREP_PNTR(i));
- writel(0, cryp->base + CDR_PROC_PNTR(i));
- writel(0, cryp->base + CDR_DMA_CFG(i));
-
- /* Configure CDR host address space */
- writel(0, cryp->base + CDR_BASE_ADDR_HI(i));
- writel(cryp->ring[i]->cmd_dma, cryp->base + CDR_BASE_ADDR_LO(i));
-
- writel(MTK_DESC_RING_SZ, cryp->base + CDR_RING_SIZE(i));
-
- /* Clear and disable all CDR interrupts */
- writel(MTK_CDR_STAT_CLR, cryp->base + CDR_STAT(i));
-
- /*
- * Set command descriptor offset and enable additional
- * token present in descriptor.
- */
- writel(MTK_DESC_SIZE(MTK_DESC_SZ) |
- MTK_DESC_OFFSET(MTK_DESC_OFF) |
- MTK_DESC_ATP_PRESENT,
- cryp->base + CDR_DESC_SIZE(i));
-
- writel(MTK_DESC_FETCH_SIZE(count * MTK_DESC_OFF) |
- MTK_DESC_FETCH_THRESH(count * MTK_DESC_SZ),
- cryp->base + CDR_CFG(i));
-}
-
-static void mtk_res_desc_ring_setup(struct mtk_cryp *cryp,
- int i, struct mtk_sys_cap *cap)
-{
- u32 rndup = 2;
- u32 count = ((1 << MTK_RES_FIFO_SIZE(cap->hia_opt)) / rndup) - 1;
-
- /* Temporarily disable external triggering */
- writel(0, cryp->base + RDR_CFG(i));
-
- /* Clear RDR count */
- writel(MTK_CNT_RST, cryp->base + RDR_PREP_COUNT(i));
- writel(MTK_CNT_RST, cryp->base + RDR_PROC_COUNT(i));
-
- writel(0, cryp->base + RDR_PREP_PNTR(i));
- writel(0, cryp->base + RDR_PROC_PNTR(i));
- writel(0, cryp->base + RDR_DMA_CFG(i));
-
- /* Configure RDR host address space */
- writel(0, cryp->base + RDR_BASE_ADDR_HI(i));
- writel(cryp->ring[i]->res_dma, cryp->base + RDR_BASE_ADDR_LO(i));
-
- writel(MTK_DESC_RING_SZ, cryp->base + RDR_RING_SIZE(i));
- writel(MTK_RDR_STAT_CLR, cryp->base + RDR_STAT(i));
-
- /*
- * RDR manager generates update interrupts on a per-completed-packet,
- * and the rd_proc_thresh_irq interrupt is fired when proc_pkt_count
- * for the RDR exceeds the number of packets.
- */
- writel(MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE,
- cryp->base + RDR_THRESH(i));
-
- /*
- * Configure a threshold and time-out value for the processed
- * result descriptors (or complete packets) that are written to
- * the RDR.
- */
- writel(MTK_DESC_SIZE(MTK_DESC_SZ) | MTK_DESC_OFFSET(MTK_DESC_OFF),
- cryp->base + RDR_DESC_SIZE(i));
-
- /*
- * Configure HIA fetch size and fetch threshold that are used to
- * fetch blocks of multiple descriptors.
- */
- writel(MTK_DESC_FETCH_SIZE(count * MTK_DESC_OFF) |
- MTK_DESC_FETCH_THRESH(count * rndup) |
- MTK_DESC_OVL_IRQ_EN,
- cryp->base + RDR_CFG(i));
-}
-
-static int mtk_packet_engine_setup(struct mtk_cryp *cryp)
-{
- struct mtk_sys_cap cap;
- int i, err;
- u32 val;
-
- cap.hia_ver = readl(cryp->base + HIA_VERSION);
- cap.hia_opt = readl(cryp->base + HIA_OPTIONS);
- cap.hw_opt = readl(cryp->base + EIP97_OPTIONS);
-
- if (!(((u16)cap.hia_ver) == MTK_HIA_SIGNATURE))
- return -EINVAL;
-
- /* Configure endianness conversion method for master (DMA) interface */
- writel(0, cryp->base + EIP97_MST_CTRL);
-
- /* Set HIA burst size */
- val = readl(cryp->base + HIA_MST_CTRL);
- val &= ~MTK_BURST_SIZE_MSK;
- val |= MTK_BURST_SIZE(5);
- writel(val, cryp->base + HIA_MST_CTRL);
-
- err = mtk_dfe_dse_reset(cryp);
- if (err) {
- dev_err(cryp->dev, "Failed to reset DFE and DSE.\n");
- return err;
- }
-
- mtk_dfe_dse_buf_setup(cryp, &cap);
-
- /* Enable the 4 rings for the packet engines. */
- mtk_desc_ring_link(cryp, 0xf);
-
- for (i = 0; i < MTK_RING_MAX; i++) {
- mtk_cmd_desc_ring_setup(cryp, i, &cap);
- mtk_res_desc_ring_setup(cryp, i, &cap);
- }
-
- writel(MTK_PE_TK_LOC_AVL | MTK_PE_PROC_HELD | MTK_PE_TK_TIMEOUT_EN,
- cryp->base + PE_TOKEN_CTRL_STAT);
-
- /* Clear all pending interrupts */
- writel(MTK_AIC_G_CLR, cryp->base + AIC_G_ACK);
- writel(MTK_PE_INPUT_DMA_ERR | MTK_PE_OUTPUT_DMA_ERR |
- MTK_PE_PKT_PORC_ERR | MTK_PE_PKT_TIMEOUT |
- MTK_PE_FATAL_ERR | MTK_PE_INPUT_DMA_ERR_EN |
- MTK_PE_OUTPUT_DMA_ERR_EN | MTK_PE_PKT_PORC_ERR_EN |
- MTK_PE_PKT_TIMEOUT_EN | MTK_PE_FATAL_ERR_EN |
- MTK_PE_INT_OUT_EN,
- cryp->base + PE_INTERRUPT_CTRL_STAT);
-
- return 0;
-}
-
-static int mtk_aic_cap_check(struct mtk_cryp *cryp, int hw)
-{
- u32 val;
-
- if (hw == MTK_RING_MAX)
- val = readl(cryp->base + AIC_G_VERSION);
- else
- val = readl(cryp->base + AIC_VERSION(hw));
-
- val &= MTK_AIC_VER_MSK;
- if (val != MTK_AIC_VER11 && val != MTK_AIC_VER12)
- return -ENXIO;
-
- if (hw == MTK_RING_MAX)
- val = readl(cryp->base + AIC_G_OPTIONS);
- else
- val = readl(cryp->base + AIC_OPTIONS(hw));
-
- val &= MTK_AIC_INT_MSK;
- if (!val || val > 32)
- return -ENXIO;
-
- return 0;
-}
-
-static int mtk_aic_init(struct mtk_cryp *cryp, int hw)
-{
- int err;
-
- err = mtk_aic_cap_check(cryp, hw);
- if (err)
- return err;
-
- /* Disable all interrupts and set initial configuration */
- if (hw == MTK_RING_MAX) {
- writel(0, cryp->base + AIC_G_ENABLE_CTRL);
- writel(0, cryp->base + AIC_G_POL_CTRL);
- writel(0, cryp->base + AIC_G_TYPE_CTRL);
- writel(0, cryp->base + AIC_G_ENABLE_SET);
- } else {
- writel(0, cryp->base + AIC_ENABLE_CTRL(hw));
- writel(0, cryp->base + AIC_POL_CTRL(hw));
- writel(0, cryp->base + AIC_TYPE_CTRL(hw));
- writel(0, cryp->base + AIC_ENABLE_SET(hw));
- }
-
- return 0;
-}
-
-static int mtk_accelerator_init(struct mtk_cryp *cryp)
-{
- int i, err;
-
- /* Initialize advanced interrupt controller(AIC) */
- for (i = 0; i < MTK_IRQ_NUM; i++) {
- err = mtk_aic_init(cryp, i);
- if (err) {
- dev_err(cryp->dev, "Failed to initialize AIC.\n");
- return err;
- }
- }
-
- /* Initialize packet engine */
- err = mtk_packet_engine_setup(cryp);
- if (err) {
- dev_err(cryp->dev, "Failed to configure packet engine.\n");
- return err;
- }
-
- return 0;
-}
-
-static void mtk_desc_dma_free(struct mtk_cryp *cryp)
-{
- int i;
-
- for (i = 0; i < MTK_RING_MAX; i++) {
- dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ,
- cryp->ring[i]->res_base,
- cryp->ring[i]->res_dma);
- dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ,
- cryp->ring[i]->cmd_base,
- cryp->ring[i]->cmd_dma);
- kfree(cryp->ring[i]);
- }
-}
-
-static int mtk_desc_ring_alloc(struct mtk_cryp *cryp)
-{
- struct mtk_ring **ring = cryp->ring;
- int i;
-
- for (i = 0; i < MTK_RING_MAX; i++) {
- ring[i] = kzalloc(sizeof(**ring), GFP_KERNEL);
- if (!ring[i])
- goto err_cleanup;
-
- ring[i]->cmd_base = dma_alloc_coherent(cryp->dev,
- MTK_DESC_RING_SZ,
- &ring[i]->cmd_dma,
- GFP_KERNEL);
- if (!ring[i]->cmd_base)
- goto err_cleanup;
-
- ring[i]->res_base = dma_alloc_coherent(cryp->dev,
- MTK_DESC_RING_SZ,
- &ring[i]->res_dma,
- GFP_KERNEL);
- if (!ring[i]->res_base)
- goto err_cleanup;
-
- ring[i]->cmd_next = ring[i]->cmd_base;
- ring[i]->res_next = ring[i]->res_base;
- }
- return 0;
-
-err_cleanup:
- do {
- dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ,
- ring[i]->res_base, ring[i]->res_dma);
- dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ,
- ring[i]->cmd_base, ring[i]->cmd_dma);
- kfree(ring[i]);
- } while (i--);
- return -ENOMEM;
-}
-
-static int mtk_crypto_probe(struct platform_device *pdev)
-{
- struct mtk_cryp *cryp;
- int i, err;
-
- cryp = devm_kzalloc(&pdev->dev, sizeof(*cryp), GFP_KERNEL);
- if (!cryp)
- return -ENOMEM;
-
- cryp->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(cryp->base))
- return PTR_ERR(cryp->base);
-
- for (i = 0; i < MTK_IRQ_NUM; i++) {
- cryp->irq[i] = platform_get_irq(pdev, i);
- if (cryp->irq[i] < 0)
- return cryp->irq[i];
- }
-
- cryp->clk_cryp = devm_clk_get(&pdev->dev, "cryp");
- if (IS_ERR(cryp->clk_cryp))
- return -EPROBE_DEFER;
-
- cryp->dev = &pdev->dev;
- pm_runtime_enable(cryp->dev);
- pm_runtime_get_sync(cryp->dev);
-
- err = clk_prepare_enable(cryp->clk_cryp);
- if (err)
- goto err_clk_cryp;
-
- /* Allocate four command/result descriptor rings */
- err = mtk_desc_ring_alloc(cryp);
- if (err) {
- dev_err(cryp->dev, "Unable to allocate descriptor rings.\n");
- goto err_resource;
- }
-
- /* Initialize hardware modules */
- err = mtk_accelerator_init(cryp);
- if (err) {
- dev_err(cryp->dev, "Failed to initialize cryptographic engine.\n");
- goto err_engine;
- }
-
- err = mtk_cipher_alg_register(cryp);
- if (err) {
- dev_err(cryp->dev, "Unable to register cipher algorithm.\n");
- goto err_cipher;
- }
-
- err = mtk_hash_alg_register(cryp);
- if (err) {
- dev_err(cryp->dev, "Unable to register hash algorithm.\n");
- goto err_hash;
- }
-
- platform_set_drvdata(pdev, cryp);
- return 0;
-
-err_hash:
- mtk_cipher_alg_release(cryp);
-err_cipher:
- mtk_dfe_dse_reset(cryp);
-err_engine:
- mtk_desc_dma_free(cryp);
-err_resource:
- clk_disable_unprepare(cryp->clk_cryp);
-err_clk_cryp:
- pm_runtime_put_sync(cryp->dev);
- pm_runtime_disable(cryp->dev);
-
- return err;
-}
-
-static int mtk_crypto_remove(struct platform_device *pdev)
-{
- struct mtk_cryp *cryp = platform_get_drvdata(pdev);
-
- mtk_hash_alg_release(cryp);
- mtk_cipher_alg_release(cryp);
- mtk_desc_dma_free(cryp);
-
- clk_disable_unprepare(cryp->clk_cryp);
-
- pm_runtime_put_sync(cryp->dev);
- pm_runtime_disable(cryp->dev);
- platform_set_drvdata(pdev, NULL);
-
- return 0;
-}
-
-static const struct of_device_id of_crypto_id[] = {
- { .compatible = "mediatek,eip97-crypto" },
- {},
-};
-MODULE_DEVICE_TABLE(of, of_crypto_id);
-
-static struct platform_driver mtk_crypto_driver = {
- .probe = mtk_crypto_probe,
- .remove = mtk_crypto_remove,
- .driver = {
- .name = "mtk-crypto",
- .of_match_table = of_crypto_id,
- },
-};
-module_platform_driver(mtk_crypto_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Ryder Lee <ryder.lee@mediatek.com>");
-MODULE_DESCRIPTION("Cryptographic accelerator driver for EIP97");
diff --git a/drivers/crypto/mediatek/mtk-platform.h b/drivers/crypto/mediatek/mtk-platform.h
deleted file mode 100644
index 47920c51abac..000000000000
--- a/drivers/crypto/mediatek/mtk-platform.h
+++ /dev/null
@@ -1,231 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Driver for EIP97 cryptographic accelerator.
- *
- * Copyright (c) 2016 Ryder Lee <ryder.lee@mediatek.com>
- */
-
-#ifndef __MTK_PLATFORM_H_
-#define __MTK_PLATFORM_H_
-
-#include <crypto/algapi.h>
-#include <crypto/internal/aead.h>
-#include <crypto/internal/hash.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/skcipher.h>
-#include <linux/crypto.h>
-#include <linux/dma-mapping.h>
-#include <linux/interrupt.h>
-#include <linux/scatterlist.h>
-#include "mtk-regs.h"
-
-#define MTK_RDR_PROC_THRESH BIT(0)
-#define MTK_RDR_PROC_MODE BIT(23)
-#define MTK_CNT_RST BIT(31)
-#define MTK_IRQ_RDR0 BIT(1)
-#define MTK_IRQ_RDR1 BIT(3)
-#define MTK_IRQ_RDR2 BIT(5)
-#define MTK_IRQ_RDR3 BIT(7)
-
-#define SIZE_IN_WORDS(x) ((x) >> 2)
-
-/**
- * Ring 0/1 are used by AES encrypt and decrypt.
- * Ring 2/3 are used by SHA.
- */
-enum {
- MTK_RING0,
- MTK_RING1,
- MTK_RING2,
- MTK_RING3,
- MTK_RING_MAX
-};
-
-#define MTK_REC_NUM (MTK_RING_MAX / 2)
-#define MTK_IRQ_NUM 5
-
-/**
- * struct mtk_desc - DMA descriptor
- * @hdr: the descriptor control header
- * @buf: DMA address of input buffer segment
- * @ct: DMA address of command token that control operation flow
- * @ct_hdr: the command token control header
- * @tag: the user-defined field
- * @tfm: DMA address of transform state
- * @bound: align descriptors offset boundary
- *
- * Structure passed to the crypto engine to describe where source
- * data needs to be fetched and how it needs to be processed.
- */
-struct mtk_desc {
- __le32 hdr;
- __le32 buf;
- __le32 ct;
- __le32 ct_hdr;
- __le32 tag;
- __le32 tfm;
- __le32 bound[2];
-};
-
-#define MTK_DESC_NUM 512
-#define MTK_DESC_OFF SIZE_IN_WORDS(sizeof(struct mtk_desc))
-#define MTK_DESC_SZ (MTK_DESC_OFF - 2)
-#define MTK_DESC_RING_SZ ((sizeof(struct mtk_desc) * MTK_DESC_NUM))
-#define MTK_DESC_CNT(x) ((MTK_DESC_OFF * (x)) << 2)
-#define MTK_DESC_LAST cpu_to_le32(BIT(22))
-#define MTK_DESC_FIRST cpu_to_le32(BIT(23))
-#define MTK_DESC_BUF_LEN(x) cpu_to_le32(x)
-#define MTK_DESC_CT_LEN(x) cpu_to_le32((x) << 24)
-
-/**
- * struct mtk_ring - Descriptor ring
- * @cmd_base: pointer to command descriptor ring base
- * @cmd_next: pointer to the next command descriptor
- * @cmd_dma: DMA address of command descriptor ring
- * @res_base: pointer to result descriptor ring base
- * @res_next: pointer to the next result descriptor
- * @res_prev: pointer to the previous result descriptor
- * @res_dma: DMA address of result descriptor ring
- *
- * A descriptor ring is a circular buffer that is used to manage
- * one or more descriptors. There are two type of descriptor rings;
- * the command descriptor ring and result descriptor ring.
- */
-struct mtk_ring {
- struct mtk_desc *cmd_base;
- struct mtk_desc *cmd_next;
- dma_addr_t cmd_dma;
- struct mtk_desc *res_base;
- struct mtk_desc *res_next;
- struct mtk_desc *res_prev;
- dma_addr_t res_dma;
-};
-
-/**
- * struct mtk_aes_dma - Structure that holds sg list info
- * @sg: pointer to scatter-gather list
- * @nents: number of entries in the sg list
- * @remainder: remainder of sg list
- * @sg_len: number of entries in the sg mapped list
- */
-struct mtk_aes_dma {
- struct scatterlist *sg;
- int nents;
- u32 remainder;
- u32 sg_len;
-};
-
-struct mtk_aes_base_ctx;
-struct mtk_aes_rec;
-struct mtk_cryp;
-
-typedef int (*mtk_aes_fn)(struct mtk_cryp *cryp, struct mtk_aes_rec *aes);
-
-/**
- * struct mtk_aes_rec - AES operation record
- * @cryp: pointer to Cryptographic device
- * @queue: crypto request queue
- * @areq: pointer to async request
- * @done_task: the tasklet is use in AES interrupt
- * @queue_task: the tasklet is used to dequeue request
- * @ctx: pointer to current context
- * @src: the structure that holds source sg list info
- * @dst: the structure that holds destination sg list info
- * @aligned_sg: the scatter list is use to alignment
- * @real_dst: pointer to the destination sg list
- * @resume: pointer to resume function
- * @total: request buffer length
- * @buf: pointer to page buffer
- * @id: the current use of ring
- * @flags: it's describing AES operation state
- * @lock: the async queue lock
- *
- * Structure used to record AES execution state.
- */
-struct mtk_aes_rec {
- struct mtk_cryp *cryp;
- struct crypto_queue queue;
- struct crypto_async_request *areq;
- struct tasklet_struct done_task;
- struct tasklet_struct queue_task;
- struct mtk_aes_base_ctx *ctx;
- struct mtk_aes_dma src;
- struct mtk_aes_dma dst;
-
- struct scatterlist aligned_sg;
- struct scatterlist *real_dst;
-
- mtk_aes_fn resume;
-
- size_t total;
- void *buf;
-
- u8 id;
- unsigned long flags;
- /* queue lock */
- spinlock_t lock;
-};
-
-/**
- * struct mtk_sha_rec - SHA operation record
- * @cryp: pointer to Cryptographic device
- * @queue: crypto request queue
- * @req: pointer to ahash request
- * @done_task: the tasklet is use in SHA interrupt
- * @queue_task: the tasklet is used to dequeue request
- * @id: the current use of ring
- * @flags: it's describing SHA operation state
- * @lock: the async queue lock
- *
- * Structure used to record SHA execution state.
- */
-struct mtk_sha_rec {
- struct mtk_cryp *cryp;
- struct crypto_queue queue;
- struct ahash_request *req;
- struct tasklet_struct done_task;
- struct tasklet_struct queue_task;
-
- u8 id;
- unsigned long flags;
- /* queue lock */
- spinlock_t lock;
-};
-
-/**
- * struct mtk_cryp - Cryptographic device
- * @base: pointer to mapped register I/O base
- * @dev: pointer to device
- * @clk_cryp: pointer to crypto clock
- * @irq: global system and rings IRQ
- * @ring: pointer to descriptor rings
- * @aes: pointer to operation record of AES
- * @sha: pointer to operation record of SHA
- * @aes_list: device list of AES
- * @sha_list: device list of SHA
- * @rec: it's used to select SHA record for tfm
- *
- * Structure storing cryptographic device information.
- */
-struct mtk_cryp {
- void __iomem *base;
- struct device *dev;
- struct clk *clk_cryp;
- int irq[MTK_IRQ_NUM];
-
- struct mtk_ring *ring[MTK_RING_MAX];
- struct mtk_aes_rec *aes[MTK_REC_NUM];
- struct mtk_sha_rec *sha[MTK_REC_NUM];
-
- struct list_head aes_list;
- struct list_head sha_list;
-
- bool rec;
-};
-
-int mtk_cipher_alg_register(struct mtk_cryp *cryp);
-void mtk_cipher_alg_release(struct mtk_cryp *cryp);
-int mtk_hash_alg_register(struct mtk_cryp *cryp);
-void mtk_hash_alg_release(struct mtk_cryp *cryp);
-
-#endif /* __MTK_PLATFORM_H_ */
diff --git a/drivers/crypto/mediatek/mtk-regs.h b/drivers/crypto/mediatek/mtk-regs.h
deleted file mode 100644
index d3defda7a750..000000000000
--- a/drivers/crypto/mediatek/mtk-regs.h
+++ /dev/null
@@ -1,190 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Support for MediaTek cryptographic accelerator.
- *
- * Copyright (c) 2016 MediaTek Inc.
- * Author: Ryder Lee <ryder.lee@mediatek.com>
- */
-
-#ifndef __MTK_REGS_H__
-#define __MTK_REGS_H__
-
-/* HIA, Command Descriptor Ring Manager */
-#define CDR_BASE_ADDR_LO(x) (0x0 + ((x) << 12))
-#define CDR_BASE_ADDR_HI(x) (0x4 + ((x) << 12))
-#define CDR_DATA_BASE_ADDR_LO(x) (0x8 + ((x) << 12))
-#define CDR_DATA_BASE_ADDR_HI(x) (0xC + ((x) << 12))
-#define CDR_ACD_BASE_ADDR_LO(x) (0x10 + ((x) << 12))
-#define CDR_ACD_BASE_ADDR_HI(x) (0x14 + ((x) << 12))
-#define CDR_RING_SIZE(x) (0x18 + ((x) << 12))
-#define CDR_DESC_SIZE(x) (0x1C + ((x) << 12))
-#define CDR_CFG(x) (0x20 + ((x) << 12))
-#define CDR_DMA_CFG(x) (0x24 + ((x) << 12))
-#define CDR_THRESH(x) (0x28 + ((x) << 12))
-#define CDR_PREP_COUNT(x) (0x2C + ((x) << 12))
-#define CDR_PROC_COUNT(x) (0x30 + ((x) << 12))
-#define CDR_PREP_PNTR(x) (0x34 + ((x) << 12))
-#define CDR_PROC_PNTR(x) (0x38 + ((x) << 12))
-#define CDR_STAT(x) (0x3C + ((x) << 12))
-
-/* HIA, Result Descriptor Ring Manager */
-#define RDR_BASE_ADDR_LO(x) (0x800 + ((x) << 12))
-#define RDR_BASE_ADDR_HI(x) (0x804 + ((x) << 12))
-#define RDR_DATA_BASE_ADDR_LO(x) (0x808 + ((x) << 12))
-#define RDR_DATA_BASE_ADDR_HI(x) (0x80C + ((x) << 12))
-#define RDR_ACD_BASE_ADDR_LO(x) (0x810 + ((x) << 12))
-#define RDR_ACD_BASE_ADDR_HI(x) (0x814 + ((x) << 12))
-#define RDR_RING_SIZE(x) (0x818 + ((x) << 12))
-#define RDR_DESC_SIZE(x) (0x81C + ((x) << 12))
-#define RDR_CFG(x) (0x820 + ((x) << 12))
-#define RDR_DMA_CFG(x) (0x824 + ((x) << 12))
-#define RDR_THRESH(x) (0x828 + ((x) << 12))
-#define RDR_PREP_COUNT(x) (0x82C + ((x) << 12))
-#define RDR_PROC_COUNT(x) (0x830 + ((x) << 12))
-#define RDR_PREP_PNTR(x) (0x834 + ((x) << 12))
-#define RDR_PROC_PNTR(x) (0x838 + ((x) << 12))
-#define RDR_STAT(x) (0x83C + ((x) << 12))
-
-/* HIA, Ring AIC */
-#define AIC_POL_CTRL(x) (0xE000 - ((x) << 12))
-#define AIC_TYPE_CTRL(x) (0xE004 - ((x) << 12))
-#define AIC_ENABLE_CTRL(x) (0xE008 - ((x) << 12))
-#define AIC_RAW_STAL(x) (0xE00C - ((x) << 12))
-#define AIC_ENABLE_SET(x) (0xE00C - ((x) << 12))
-#define AIC_ENABLED_STAT(x) (0xE010 - ((x) << 12))
-#define AIC_ACK(x) (0xE010 - ((x) << 12))
-#define AIC_ENABLE_CLR(x) (0xE014 - ((x) << 12))
-#define AIC_OPTIONS(x) (0xE018 - ((x) << 12))
-#define AIC_VERSION(x) (0xE01C - ((x) << 12))
-
-/* HIA, Global AIC */
-#define AIC_G_POL_CTRL 0xF800
-#define AIC_G_TYPE_CTRL 0xF804
-#define AIC_G_ENABLE_CTRL 0xF808
-#define AIC_G_RAW_STAT 0xF80C
-#define AIC_G_ENABLE_SET 0xF80C
-#define AIC_G_ENABLED_STAT 0xF810
-#define AIC_G_ACK 0xF810
-#define AIC_G_ENABLE_CLR 0xF814
-#define AIC_G_OPTIONS 0xF818
-#define AIC_G_VERSION 0xF81C
-
-/* HIA, Data Fetch Engine */
-#define DFE_CFG 0xF000
-#define DFE_PRIO_0 0xF010
-#define DFE_PRIO_1 0xF014
-#define DFE_PRIO_2 0xF018
-#define DFE_PRIO_3 0xF01C
-
-/* HIA, Data Fetch Engine access monitoring for CDR */
-#define DFE_RING_REGION_LO(x) (0xF080 + ((x) << 3))
-#define DFE_RING_REGION_HI(x) (0xF084 + ((x) << 3))
-
-/* HIA, Data Fetch Engine thread control and status for thread */
-#define DFE_THR_CTRL 0xF200
-#define DFE_THR_STAT 0xF204
-#define DFE_THR_DESC_CTRL 0xF208
-#define DFE_THR_DESC_DPTR_LO 0xF210
-#define DFE_THR_DESC_DPTR_HI 0xF214
-#define DFE_THR_DESC_ACDPTR_LO 0xF218
-#define DFE_THR_DESC_ACDPTR_HI 0xF21C
-
-/* HIA, Data Store Engine */
-#define DSE_CFG 0xF400
-#define DSE_PRIO_0 0xF410
-#define DSE_PRIO_1 0xF414
-#define DSE_PRIO_2 0xF418
-#define DSE_PRIO_3 0xF41C
-
-/* HIA, Data Store Engine access monitoring for RDR */
-#define DSE_RING_REGION_LO(x) (0xF480 + ((x) << 3))
-#define DSE_RING_REGION_HI(x) (0xF484 + ((x) << 3))
-
-/* HIA, Data Store Engine thread control and status for thread */
-#define DSE_THR_CTRL 0xF600
-#define DSE_THR_STAT 0xF604
-#define DSE_THR_DESC_CTRL 0xF608
-#define DSE_THR_DESC_DPTR_LO 0xF610
-#define DSE_THR_DESC_DPTR_HI 0xF614
-#define DSE_THR_DESC_S_DPTR_LO 0xF618
-#define DSE_THR_DESC_S_DPTR_HI 0xF61C
-#define DSE_THR_ERROR_STAT 0xF620
-
-/* HIA Global */
-#define HIA_MST_CTRL 0xFFF4
-#define HIA_OPTIONS 0xFFF8
-#define HIA_VERSION 0xFFFC
-
-/* Processing Engine Input Side, Processing Engine */
-#define PE_IN_DBUF_THRESH 0x10000
-#define PE_IN_TBUF_THRESH 0x10100
-
-/* Packet Engine Configuration / Status Registers */
-#define PE_TOKEN_CTRL_STAT 0x11000
-#define PE_FUNCTION_EN 0x11004
-#define PE_CONTEXT_CTRL 0x11008
-#define PE_INTERRUPT_CTRL_STAT 0x11010
-#define PE_CONTEXT_STAT 0x1100C
-#define PE_OUT_TRANS_CTRL_STAT 0x11018
-#define PE_OUT_BUF_CTRL 0x1101C
-
-/* Packet Engine PRNG Registers */
-#define PE_PRNG_STAT 0x11040
-#define PE_PRNG_CTRL 0x11044
-#define PE_PRNG_SEED_L 0x11048
-#define PE_PRNG_SEED_H 0x1104C
-#define PE_PRNG_KEY_0_L 0x11050
-#define PE_PRNG_KEY_0_H 0x11054
-#define PE_PRNG_KEY_1_L 0x11058
-#define PE_PRNG_KEY_1_H 0x1105C
-#define PE_PRNG_RES_0 0x11060
-#define PE_PRNG_RES_1 0x11064
-#define PE_PRNG_RES_2 0x11068
-#define PE_PRNG_RES_3 0x1106C
-#define PE_PRNG_LFSR_L 0x11070
-#define PE_PRNG_LFSR_H 0x11074
-
-/* Packet Engine AIC */
-#define PE_EIP96_AIC_POL_CTRL 0x113C0
-#define PE_EIP96_AIC_TYPE_CTRL 0x113C4
-#define PE_EIP96_AIC_ENABLE_CTRL 0x113C8
-#define PE_EIP96_AIC_RAW_STAT 0x113CC
-#define PE_EIP96_AIC_ENABLE_SET 0x113CC
-#define PE_EIP96_AIC_ENABLED_STAT 0x113D0
-#define PE_EIP96_AIC_ACK 0x113D0
-#define PE_EIP96_AIC_ENABLE_CLR 0x113D4
-#define PE_EIP96_AIC_OPTIONS 0x113D8
-#define PE_EIP96_AIC_VERSION 0x113DC
-
-/* Packet Engine Options & Version Registers */
-#define PE_EIP96_OPTIONS 0x113F8
-#define PE_EIP96_VERSION 0x113FC
-
-/* Processing Engine Output Side */
-#define PE_OUT_DBUF_THRESH 0x11C00
-#define PE_OUT_TBUF_THRESH 0x11D00
-
-/* Processing Engine Local AIC */
-#define PE_AIC_POL_CTRL 0x11F00
-#define PE_AIC_TYPE_CTRL 0x11F04
-#define PE_AIC_ENABLE_CTRL 0x11F08
-#define PE_AIC_RAW_STAT 0x11F0C
-#define PE_AIC_ENABLE_SET 0x11F0C
-#define PE_AIC_ENABLED_STAT 0x11F10
-#define PE_AIC_ENABLE_CLR 0x11F14
-#define PE_AIC_OPTIONS 0x11F18
-#define PE_AIC_VERSION 0x11F1C
-
-/* Processing Engine General Configuration and Version */
-#define PE_IN_FLIGHT 0x11FF0
-#define PE_OPTIONS 0x11FF8
-#define PE_VERSION 0x11FFC
-
-/* EIP-97 - Global */
-#define EIP97_CLOCK_STATE 0x1FFE4
-#define EIP97_FORCE_CLOCK_ON 0x1FFE8
-#define EIP97_FORCE_CLOCK_OFF 0x1FFEC
-#define EIP97_MST_CTRL 0x1FFF4
-#define EIP97_OPTIONS 0x1FFF8
-#define EIP97_VERSION 0x1FFFC
-#endif /* __MTK_REGS_H__ */
diff --git a/drivers/crypto/mediatek/mtk-sha.c b/drivers/crypto/mediatek/mtk-sha.c
deleted file mode 100644
index f55aacdafbef..000000000000
--- a/drivers/crypto/mediatek/mtk-sha.c
+++ /dev/null
@@ -1,1353 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Cryptographic API.
- *
- * Driver for EIP97 SHA1/SHA2(HMAC) acceleration.
- *
- * Copyright (c) 2016 Ryder Lee <ryder.lee@mediatek.com>
- *
- * Some ideas are from atmel-sha.c and omap-sham.c drivers.
- */
-
-#include <crypto/hmac.h>
-#include <crypto/sha1.h>
-#include <crypto/sha2.h>
-#include "mtk-platform.h"
-
-#define SHA_ALIGN_MSK (sizeof(u32) - 1)
-#define SHA_QUEUE_SIZE 512
-#define SHA_BUF_SIZE ((u32)PAGE_SIZE)
-
-#define SHA_OP_UPDATE 1
-#define SHA_OP_FINAL 2
-
-#define SHA_DATA_LEN_MSK cpu_to_le32(GENMASK(16, 0))
-#define SHA_MAX_DIGEST_BUF_SIZE 32
-
-/* SHA command token */
-#define SHA_CT_SIZE 5
-#define SHA_CT_CTRL_HDR cpu_to_le32(0x02220000)
-#define SHA_CMD0 cpu_to_le32(0x03020000)
-#define SHA_CMD1 cpu_to_le32(0x21060000)
-#define SHA_CMD2 cpu_to_le32(0xe0e63802)
-
-/* SHA transform information */
-#define SHA_TFM_HASH cpu_to_le32(0x2 << 0)
-#define SHA_TFM_SIZE(x) cpu_to_le32((x) << 8)
-#define SHA_TFM_START cpu_to_le32(0x1 << 4)
-#define SHA_TFM_CONTINUE cpu_to_le32(0x1 << 5)
-#define SHA_TFM_HASH_STORE cpu_to_le32(0x1 << 19)
-#define SHA_TFM_SHA1 cpu_to_le32(0x2 << 23)
-#define SHA_TFM_SHA256 cpu_to_le32(0x3 << 23)
-#define SHA_TFM_SHA224 cpu_to_le32(0x4 << 23)
-#define SHA_TFM_SHA512 cpu_to_le32(0x5 << 23)
-#define SHA_TFM_SHA384 cpu_to_le32(0x6 << 23)
-#define SHA_TFM_DIGEST(x) cpu_to_le32(((x) & GENMASK(3, 0)) << 24)
-
-/* SHA flags */
-#define SHA_FLAGS_BUSY BIT(0)
-#define SHA_FLAGS_FINAL BIT(1)
-#define SHA_FLAGS_FINUP BIT(2)
-#define SHA_FLAGS_SG BIT(3)
-#define SHA_FLAGS_ALGO_MSK GENMASK(8, 4)
-#define SHA_FLAGS_SHA1 BIT(4)
-#define SHA_FLAGS_SHA224 BIT(5)
-#define SHA_FLAGS_SHA256 BIT(6)
-#define SHA_FLAGS_SHA384 BIT(7)
-#define SHA_FLAGS_SHA512 BIT(8)
-#define SHA_FLAGS_HMAC BIT(9)
-#define SHA_FLAGS_PAD BIT(10)
-
-/**
- * mtk_sha_info - hardware information of AES
- * @cmd: command token, hardware instruction
- * @tfm: transform state of cipher algorithm.
- * @state: contains keys and initial vectors.
- *
- */
-struct mtk_sha_info {
- __le32 ctrl[2];
- __le32 cmd[3];
- __le32 tfm[2];
- __le32 digest[SHA_MAX_DIGEST_BUF_SIZE];
-};
-
-struct mtk_sha_reqctx {
- struct mtk_sha_info info;
- unsigned long flags;
- unsigned long op;
-
- u64 digcnt;
- size_t bufcnt;
- dma_addr_t dma_addr;
-
- __le32 ct_hdr;
- u32 ct_size;
- dma_addr_t ct_dma;
- dma_addr_t tfm_dma;
-
- /* Walk state */
- struct scatterlist *sg;
- u32 offset; /* Offset in current sg */
- u32 total; /* Total request */
- size_t ds;
- size_t bs;
-
- u8 *buffer;
-};
-
-struct mtk_sha_hmac_ctx {
- struct crypto_shash *shash;
- u8 ipad[SHA512_BLOCK_SIZE] __aligned(sizeof(u32));
- u8 opad[SHA512_BLOCK_SIZE] __aligned(sizeof(u32));
-};
-
-struct mtk_sha_ctx {
- struct mtk_cryp *cryp;
- unsigned long flags;
- u8 id;
- u8 buf[SHA_BUF_SIZE] __aligned(sizeof(u32));
-
- struct mtk_sha_hmac_ctx base[];
-};
-
-struct mtk_sha_drv {
- struct list_head dev_list;
- /* Device list lock */
- spinlock_t lock;
-};
-
-static struct mtk_sha_drv mtk_sha = {
- .dev_list = LIST_HEAD_INIT(mtk_sha.dev_list),
- .lock = __SPIN_LOCK_UNLOCKED(mtk_sha.lock),
-};
-
-static int mtk_sha_handle_queue(struct mtk_cryp *cryp, u8 id,
- struct ahash_request *req);
-
-static inline u32 mtk_sha_read(struct mtk_cryp *cryp, u32 offset)
-{
- return readl_relaxed(cryp->base + offset);
-}
-
-static inline void mtk_sha_write(struct mtk_cryp *cryp,
- u32 offset, u32 value)
-{
- writel_relaxed(value, cryp->base + offset);
-}
-
-static inline void mtk_sha_ring_shift(struct mtk_ring *ring,
- struct mtk_desc **cmd_curr,
- struct mtk_desc **res_curr,
- int *count)
-{
- *cmd_curr = ring->cmd_next++;
- *res_curr = ring->res_next++;
- (*count)++;
-
- if (ring->cmd_next == ring->cmd_base + MTK_DESC_NUM) {
- ring->cmd_next = ring->cmd_base;
- ring->res_next = ring->res_base;
- }
-}
-
-static struct mtk_cryp *mtk_sha_find_dev(struct mtk_sha_ctx *tctx)
-{
- struct mtk_cryp *cryp = NULL;
- struct mtk_cryp *tmp;
-
- spin_lock_bh(&mtk_sha.lock);
- if (!tctx->cryp) {
- list_for_each_entry(tmp, &mtk_sha.dev_list, sha_list) {
- cryp = tmp;
- break;
- }
- tctx->cryp = cryp;
- } else {
- cryp = tctx->cryp;
- }
-
- /*
- * Assign record id to tfm in round-robin fashion, and this
- * will help tfm to bind to corresponding descriptor rings.
- */
- tctx->id = cryp->rec;
- cryp->rec = !cryp->rec;
-
- spin_unlock_bh(&mtk_sha.lock);
-
- return cryp;
-}
-
-static int mtk_sha_append_sg(struct mtk_sha_reqctx *ctx)
-{
- size_t count;
-
- while ((ctx->bufcnt < SHA_BUF_SIZE) && ctx->total) {
- count = min(ctx->sg->length - ctx->offset, ctx->total);
- count = min(count, SHA_BUF_SIZE - ctx->bufcnt);
-
- if (count <= 0) {
- /*
- * Check if count <= 0 because the buffer is full or
- * because the sg length is 0. In the latest case,
- * check if there is another sg in the list, a 0 length
- * sg doesn't necessarily mean the end of the sg list.
- */
- if ((ctx->sg->length == 0) && !sg_is_last(ctx->sg)) {
- ctx->sg = sg_next(ctx->sg);
- continue;
- } else {
- break;
- }
- }
-
- scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, ctx->sg,
- ctx->offset, count, 0);
-
- ctx->bufcnt += count;
- ctx->offset += count;
- ctx->total -= count;
-
- if (ctx->offset == ctx->sg->length) {
- ctx->sg = sg_next(ctx->sg);
- if (ctx->sg)
- ctx->offset = 0;
- else
- ctx->total = 0;
- }
- }
-
- return 0;
-}
-
-/*
- * The purpose of this padding is to ensure that the padded message is a
- * multiple of 512 bits (SHA1/SHA224/SHA256) or 1024 bits (SHA384/SHA512).
- * The bit "1" is appended at the end of the message followed by
- * "padlen-1" zero bits. Then a 64 bits block (SHA1/SHA224/SHA256) or
- * 128 bits block (SHA384/SHA512) equals to the message length in bits
- * is appended.
- *
- * For SHA1/SHA224/SHA256, padlen is calculated as followed:
- * - if message length < 56 bytes then padlen = 56 - message length
- * - else padlen = 64 + 56 - message length
- *
- * For SHA384/SHA512, padlen is calculated as followed:
- * - if message length < 112 bytes then padlen = 112 - message length
- * - else padlen = 128 + 112 - message length
- */
-static void mtk_sha_fill_padding(struct mtk_sha_reqctx *ctx, u32 len)
-{
- u32 index, padlen;
- __be64 bits[2];
- u64 size = ctx->digcnt;
-
- size += ctx->bufcnt;
- size += len;
-
- bits[1] = cpu_to_be64(size << 3);
- bits[0] = cpu_to_be64(size >> 61);
-
- switch (ctx->flags & SHA_FLAGS_ALGO_MSK) {
- case SHA_FLAGS_SHA384:
- case SHA_FLAGS_SHA512:
- index = ctx->bufcnt & 0x7f;
- padlen = (index < 112) ? (112 - index) : ((128 + 112) - index);
- *(ctx->buffer + ctx->bufcnt) = 0x80;
- memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen - 1);
- memcpy(ctx->buffer + ctx->bufcnt + padlen, bits, 16);
- ctx->bufcnt += padlen + 16;
- ctx->flags |= SHA_FLAGS_PAD;
- break;
-
- default:
- index = ctx->bufcnt & 0x3f;
- padlen = (index < 56) ? (56 - index) : ((64 + 56) - index);
- *(ctx->buffer + ctx->bufcnt) = 0x80;
- memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen - 1);
- memcpy(ctx->buffer + ctx->bufcnt + padlen, &bits[1], 8);
- ctx->bufcnt += padlen + 8;
- ctx->flags |= SHA_FLAGS_PAD;
- break;
- }
-}
-
-/* Initialize basic transform information of SHA */
-static void mtk_sha_info_init(struct mtk_sha_reqctx *ctx)
-{
- struct mtk_sha_info *info = &ctx->info;
-
- ctx->ct_hdr = SHA_CT_CTRL_HDR;
- ctx->ct_size = SHA_CT_SIZE;
-
- info->tfm[0] = SHA_TFM_HASH | SHA_TFM_SIZE(SIZE_IN_WORDS(ctx->ds));
-
- switch (ctx->flags & SHA_FLAGS_ALGO_MSK) {
- case SHA_FLAGS_SHA1:
- info->tfm[0] |= SHA_TFM_SHA1;
- break;
- case SHA_FLAGS_SHA224:
- info->tfm[0] |= SHA_TFM_SHA224;
- break;
- case SHA_FLAGS_SHA256:
- info->tfm[0] |= SHA_TFM_SHA256;
- break;
- case SHA_FLAGS_SHA384:
- info->tfm[0] |= SHA_TFM_SHA384;
- break;
- case SHA_FLAGS_SHA512:
- info->tfm[0] |= SHA_TFM_SHA512;
- break;
-
- default:
- /* Should not happen... */
- return;
- }
-
- info->tfm[1] = SHA_TFM_HASH_STORE;
- info->ctrl[0] = info->tfm[0] | SHA_TFM_CONTINUE | SHA_TFM_START;
- info->ctrl[1] = info->tfm[1];
-
- info->cmd[0] = SHA_CMD0;
- info->cmd[1] = SHA_CMD1;
- info->cmd[2] = SHA_CMD2 | SHA_TFM_DIGEST(SIZE_IN_WORDS(ctx->ds));
-}
-
-/*
- * Update input data length field of transform information and
- * map it to DMA region.
- */
-static int mtk_sha_info_update(struct mtk_cryp *cryp,
- struct mtk_sha_rec *sha,
- size_t len1, size_t len2)
-{
- struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
- struct mtk_sha_info *info = &ctx->info;
-
- ctx->ct_hdr &= ~SHA_DATA_LEN_MSK;
- ctx->ct_hdr |= cpu_to_le32(len1 + len2);
- info->cmd[0] &= ~SHA_DATA_LEN_MSK;
- info->cmd[0] |= cpu_to_le32(len1 + len2);
-
- /* Setting SHA_TFM_START only for the first iteration */
- if (ctx->digcnt)
- info->ctrl[0] &= ~SHA_TFM_START;
-
- ctx->digcnt += len1;
-
- ctx->ct_dma = dma_map_single(cryp->dev, info, sizeof(*info),
- DMA_BIDIRECTIONAL);
- if (unlikely(dma_mapping_error(cryp->dev, ctx->ct_dma))) {
- dev_err(cryp->dev, "dma %zu bytes error\n", sizeof(*info));
- return -EINVAL;
- }
-
- ctx->tfm_dma = ctx->ct_dma + sizeof(info->ctrl) + sizeof(info->cmd);
-
- return 0;
-}
-
-/*
- * Because of hardware limitation, we must pre-calculate the inner
- * and outer digest that need to be processed firstly by engine, then
- * apply the result digest to the input message. These complex hashing
- * procedures limits HMAC performance, so we use fallback SW encoding.
- */
-static int mtk_sha_finish_hmac(struct ahash_request *req)
-{
- struct mtk_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
- struct mtk_sha_hmac_ctx *bctx = tctx->base;
- struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
-
- SHASH_DESC_ON_STACK(shash, bctx->shash);
-
- shash->tfm = bctx->shash;
-
- return crypto_shash_init(shash) ?:
- crypto_shash_update(shash, bctx->opad, ctx->bs) ?:
- crypto_shash_finup(shash, req->result, ctx->ds, req->result);
-}
-
-/* Initialize request context */
-static int mtk_sha_init(struct ahash_request *req)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct mtk_sha_ctx *tctx = crypto_ahash_ctx(tfm);
- struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
-
- ctx->flags = 0;
- ctx->ds = crypto_ahash_digestsize(tfm);
-
- switch (ctx->ds) {
- case SHA1_DIGEST_SIZE:
- ctx->flags |= SHA_FLAGS_SHA1;
- ctx->bs = SHA1_BLOCK_SIZE;
- break;
- case SHA224_DIGEST_SIZE:
- ctx->flags |= SHA_FLAGS_SHA224;
- ctx->bs = SHA224_BLOCK_SIZE;
- break;
- case SHA256_DIGEST_SIZE:
- ctx->flags |= SHA_FLAGS_SHA256;
- ctx->bs = SHA256_BLOCK_SIZE;
- break;
- case SHA384_DIGEST_SIZE:
- ctx->flags |= SHA_FLAGS_SHA384;
- ctx->bs = SHA384_BLOCK_SIZE;
- break;
- case SHA512_DIGEST_SIZE:
- ctx->flags |= SHA_FLAGS_SHA512;
- ctx->bs = SHA512_BLOCK_SIZE;
- break;
- default:
- return -EINVAL;
- }
-
- ctx->bufcnt = 0;
- ctx->digcnt = 0;
- ctx->buffer = tctx->buf;
-
- if (tctx->flags & SHA_FLAGS_HMAC) {
- struct mtk_sha_hmac_ctx *bctx = tctx->base;
-
- memcpy(ctx->buffer, bctx->ipad, ctx->bs);
- ctx->bufcnt = ctx->bs;
- ctx->flags |= SHA_FLAGS_HMAC;
- }
-
- return 0;
-}
-
-static int mtk_sha_xmit(struct mtk_cryp *cryp, struct mtk_sha_rec *sha,
- dma_addr_t addr1, size_t len1,
- dma_addr_t addr2, size_t len2)
-{
- struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
- struct mtk_ring *ring = cryp->ring[sha->id];
- struct mtk_desc *cmd, *res;
- int err, count = 0;
-
- err = mtk_sha_info_update(cryp, sha, len1, len2);
- if (err)
- return err;
-
- /* Fill in the command/result descriptors */
- mtk_sha_ring_shift(ring, &cmd, &res, &count);
-
- res->hdr = MTK_DESC_FIRST | MTK_DESC_BUF_LEN(len1);
- cmd->hdr = MTK_DESC_FIRST | MTK_DESC_BUF_LEN(len1) |
- MTK_DESC_CT_LEN(ctx->ct_size);
- cmd->buf = cpu_to_le32(addr1);
- cmd->ct = cpu_to_le32(ctx->ct_dma);
- cmd->ct_hdr = ctx->ct_hdr;
- cmd->tfm = cpu_to_le32(ctx->tfm_dma);
-
- if (len2) {
- mtk_sha_ring_shift(ring, &cmd, &res, &count);
-
- res->hdr = MTK_DESC_BUF_LEN(len2);
- cmd->hdr = MTK_DESC_BUF_LEN(len2);
- cmd->buf = cpu_to_le32(addr2);
- }
-
- cmd->hdr |= MTK_DESC_LAST;
- res->hdr |= MTK_DESC_LAST;
-
- /*
- * Make sure that all changes to the DMA ring are done before we
- * start engine.
- */
- wmb();
- /* Start DMA transfer */
- mtk_sha_write(cryp, RDR_PREP_COUNT(sha->id), MTK_DESC_CNT(count));
- mtk_sha_write(cryp, CDR_PREP_COUNT(sha->id), MTK_DESC_CNT(count));
-
- return -EINPROGRESS;
-}
-
-static int mtk_sha_dma_map(struct mtk_cryp *cryp,
- struct mtk_sha_rec *sha,
- struct mtk_sha_reqctx *ctx,
- size_t count)
-{
- ctx->dma_addr = dma_map_single(cryp->dev, ctx->buffer,
- SHA_BUF_SIZE, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(cryp->dev, ctx->dma_addr))) {
- dev_err(cryp->dev, "dma map error\n");
- return -EINVAL;
- }
-
- ctx->flags &= ~SHA_FLAGS_SG;
-
- return mtk_sha_xmit(cryp, sha, ctx->dma_addr, count, 0, 0);
-}
-
-static int mtk_sha_update_slow(struct mtk_cryp *cryp,
- struct mtk_sha_rec *sha)
-{
- struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
- size_t count;
- u32 final;
-
- mtk_sha_append_sg(ctx);
-
- final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total;
-
- dev_dbg(cryp->dev, "slow: bufcnt: %zu\n", ctx->bufcnt);
-
- if (final) {
- sha->flags |= SHA_FLAGS_FINAL;
- mtk_sha_fill_padding(ctx, 0);
- }
-
- if (final || (ctx->bufcnt == SHA_BUF_SIZE && ctx->total)) {
- count = ctx->bufcnt;
- ctx->bufcnt = 0;
-
- return mtk_sha_dma_map(cryp, sha, ctx, count);
- }
- return 0;
-}
-
-static int mtk_sha_update_start(struct mtk_cryp *cryp,
- struct mtk_sha_rec *sha)
-{
- struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
- u32 len, final, tail;
- struct scatterlist *sg;
-
- if (!ctx->total)
- return 0;
-
- if (ctx->bufcnt || ctx->offset)
- return mtk_sha_update_slow(cryp, sha);
-
- sg = ctx->sg;
-
- if (!IS_ALIGNED(sg->offset, sizeof(u32)))
- return mtk_sha_update_slow(cryp, sha);
-
- if (!sg_is_last(sg) && !IS_ALIGNED(sg->length, ctx->bs))
- /* size is not ctx->bs aligned */
- return mtk_sha_update_slow(cryp, sha);
-
- len = min(ctx->total, sg->length);
-
- if (sg_is_last(sg)) {
- if (!(ctx->flags & SHA_FLAGS_FINUP)) {
- /* not last sg must be ctx->bs aligned */
- tail = len & (ctx->bs - 1);
- len -= tail;
- }
- }
-
- ctx->total -= len;
- ctx->offset = len; /* offset where to start slow */
-
- final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total;
-
- /* Add padding */
- if (final) {
- size_t count;
-
- tail = len & (ctx->bs - 1);
- len -= tail;
- ctx->total += tail;
- ctx->offset = len; /* offset where to start slow */
-
- sg = ctx->sg;
- mtk_sha_append_sg(ctx);
- mtk_sha_fill_padding(ctx, len);
-
- ctx->dma_addr = dma_map_single(cryp->dev, ctx->buffer,
- SHA_BUF_SIZE, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(cryp->dev, ctx->dma_addr))) {
- dev_err(cryp->dev, "dma map bytes error\n");
- return -EINVAL;
- }
-
- sha->flags |= SHA_FLAGS_FINAL;
- count = ctx->bufcnt;
- ctx->bufcnt = 0;
-
- if (len == 0) {
- ctx->flags &= ~SHA_FLAGS_SG;
- return mtk_sha_xmit(cryp, sha, ctx->dma_addr,
- count, 0, 0);
-
- } else {
- ctx->sg = sg;
- if (!dma_map_sg(cryp->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
- dev_err(cryp->dev, "dma_map_sg error\n");
- return -EINVAL;
- }
-
- ctx->flags |= SHA_FLAGS_SG;
- return mtk_sha_xmit(cryp, sha, sg_dma_address(ctx->sg),
- len, ctx->dma_addr, count);
- }
- }
-
- if (!dma_map_sg(cryp->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
- dev_err(cryp->dev, "dma_map_sg error\n");
- return -EINVAL;
- }
-
- ctx->flags |= SHA_FLAGS_SG;
-
- return mtk_sha_xmit(cryp, sha, sg_dma_address(ctx->sg),
- len, 0, 0);
-}
-
-static int mtk_sha_final_req(struct mtk_cryp *cryp,
- struct mtk_sha_rec *sha)
-{
- struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
- size_t count;
-
- mtk_sha_fill_padding(ctx, 0);
-
- sha->flags |= SHA_FLAGS_FINAL;
- count = ctx->bufcnt;
- ctx->bufcnt = 0;
-
- return mtk_sha_dma_map(cryp, sha, ctx, count);
-}
-
-/* Copy ready hash (+ finalize hmac) */
-static int mtk_sha_finish(struct ahash_request *req)
-{
- struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
- __le32 *digest = ctx->info.digest;
- u32 *result = (u32 *)req->result;
- int i;
-
- /* Get the hash from the digest buffer */
- for (i = 0; i < SIZE_IN_WORDS(ctx->ds); i++)
- result[i] = le32_to_cpu(digest[i]);
-
- if (ctx->flags & SHA_FLAGS_HMAC)
- return mtk_sha_finish_hmac(req);
-
- return 0;
-}
-
-static void mtk_sha_finish_req(struct mtk_cryp *cryp,
- struct mtk_sha_rec *sha,
- int err)
-{
- if (likely(!err && (SHA_FLAGS_FINAL & sha->flags)))
- err = mtk_sha_finish(sha->req);
-
- sha->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL);
-
- sha->req->base.complete(&sha->req->base, err);
-
- /* Handle new request */
- tasklet_schedule(&sha->queue_task);
-}
-
-static int mtk_sha_handle_queue(struct mtk_cryp *cryp, u8 id,
- struct ahash_request *req)
-{
- struct mtk_sha_rec *sha = cryp->sha[id];
- struct crypto_async_request *async_req, *backlog;
- struct mtk_sha_reqctx *ctx;
- unsigned long flags;
- int err = 0, ret = 0;
-
- spin_lock_irqsave(&sha->lock, flags);
- if (req)
- ret = ahash_enqueue_request(&sha->queue, req);
-
- if (SHA_FLAGS_BUSY & sha->flags) {
- spin_unlock_irqrestore(&sha->lock, flags);
- return ret;
- }
-
- backlog = crypto_get_backlog(&sha->queue);
- async_req = crypto_dequeue_request(&sha->queue);
- if (async_req)
- sha->flags |= SHA_FLAGS_BUSY;
- spin_unlock_irqrestore(&sha->lock, flags);
-
- if (!async_req)
- return ret;
-
- if (backlog)
- backlog->complete(backlog, -EINPROGRESS);
-
- req = ahash_request_cast(async_req);
- ctx = ahash_request_ctx(req);
-
- sha->req = req;
-
- mtk_sha_info_init(ctx);
-
- if (ctx->op == SHA_OP_UPDATE) {
- err = mtk_sha_update_start(cryp, sha);
- if (err != -EINPROGRESS && (ctx->flags & SHA_FLAGS_FINUP))
- /* No final() after finup() */
- err = mtk_sha_final_req(cryp, sha);
- } else if (ctx->op == SHA_OP_FINAL) {
- err = mtk_sha_final_req(cryp, sha);
- }
-
- if (unlikely(err != -EINPROGRESS))
- /* Task will not finish it, so do it here */
- mtk_sha_finish_req(cryp, sha, err);
-
- return ret;
-}
-
-static int mtk_sha_enqueue(struct ahash_request *req, u32 op)
-{
- struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
- struct mtk_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
-
- ctx->op = op;
-
- return mtk_sha_handle_queue(tctx->cryp, tctx->id, req);
-}
-
-static void mtk_sha_unmap(struct mtk_cryp *cryp, struct mtk_sha_rec *sha)
-{
- struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
-
- dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(ctx->info),
- DMA_BIDIRECTIONAL);
-
- if (ctx->flags & SHA_FLAGS_SG) {
- dma_unmap_sg(cryp->dev, ctx->sg, 1, DMA_TO_DEVICE);
- if (ctx->sg->length == ctx->offset) {
- ctx->sg = sg_next(ctx->sg);
- if (ctx->sg)
- ctx->offset = 0;
- }
- if (ctx->flags & SHA_FLAGS_PAD) {
- dma_unmap_single(cryp->dev, ctx->dma_addr,
- SHA_BUF_SIZE, DMA_TO_DEVICE);
- }
- } else
- dma_unmap_single(cryp->dev, ctx->dma_addr,
- SHA_BUF_SIZE, DMA_TO_DEVICE);
-}
-
-static void mtk_sha_complete(struct mtk_cryp *cryp,
- struct mtk_sha_rec *sha)
-{
- int err = 0;
-
- err = mtk_sha_update_start(cryp, sha);
- if (err != -EINPROGRESS)
- mtk_sha_finish_req(cryp, sha, err);
-}
-
-static int mtk_sha_update(struct ahash_request *req)
-{
- struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
-
- ctx->total = req->nbytes;
- ctx->sg = req->src;
- ctx->offset = 0;
-
- if ((ctx->bufcnt + ctx->total < SHA_BUF_SIZE) &&
- !(ctx->flags & SHA_FLAGS_FINUP))
- return mtk_sha_append_sg(ctx);
-
- return mtk_sha_enqueue(req, SHA_OP_UPDATE);
-}
-
-static int mtk_sha_final(struct ahash_request *req)
-{
- struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
-
- ctx->flags |= SHA_FLAGS_FINUP;
-
- if (ctx->flags & SHA_FLAGS_PAD)
- return mtk_sha_finish(req);
-
- return mtk_sha_enqueue(req, SHA_OP_FINAL);
-}
-
-static int mtk_sha_finup(struct ahash_request *req)
-{
- struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
- int err1, err2;
-
- ctx->flags |= SHA_FLAGS_FINUP;
-
- err1 = mtk_sha_update(req);
- if (err1 == -EINPROGRESS ||
- (err1 == -EBUSY && (ahash_request_flags(req) &
- CRYPTO_TFM_REQ_MAY_BACKLOG)))
- return err1;
- /*
- * final() has to be always called to cleanup resources
- * even if update() failed
- */
- err2 = mtk_sha_final(req);
-
- return err1 ?: err2;
-}
-
-static int mtk_sha_digest(struct ahash_request *req)
-{
- return mtk_sha_init(req) ?: mtk_sha_finup(req);
-}
-
-static int mtk_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
- u32 keylen)
-{
- struct mtk_sha_ctx *tctx = crypto_ahash_ctx(tfm);
- struct mtk_sha_hmac_ctx *bctx = tctx->base;
- size_t bs = crypto_shash_blocksize(bctx->shash);
- size_t ds = crypto_shash_digestsize(bctx->shash);
- int err, i;
-
- if (keylen > bs) {
- err = crypto_shash_tfm_digest(bctx->shash, key, keylen,
- bctx->ipad);
- if (err)
- return err;
- keylen = ds;
- } else {
- memcpy(bctx->ipad, key, keylen);
- }
-
- memset(bctx->ipad + keylen, 0, bs - keylen);
- memcpy(bctx->opad, bctx->ipad, bs);
-
- for (i = 0; i < bs; i++) {
- bctx->ipad[i] ^= HMAC_IPAD_VALUE;
- bctx->opad[i] ^= HMAC_OPAD_VALUE;
- }
-
- return 0;
-}
-
-static int mtk_sha_export(struct ahash_request *req, void *out)
-{
- const struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
-
- memcpy(out, ctx, sizeof(*ctx));
- return 0;
-}
-
-static int mtk_sha_import(struct ahash_request *req, const void *in)
-{
- struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
-
- memcpy(ctx, in, sizeof(*ctx));
- return 0;
-}
-
-static int mtk_sha_cra_init_alg(struct crypto_tfm *tfm,
- const char *alg_base)
-{
- struct mtk_sha_ctx *tctx = crypto_tfm_ctx(tfm);
- struct mtk_cryp *cryp = NULL;
-
- cryp = mtk_sha_find_dev(tctx);
- if (!cryp)
- return -ENODEV;
-
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct mtk_sha_reqctx));
-
- if (alg_base) {
- struct mtk_sha_hmac_ctx *bctx = tctx->base;
-
- tctx->flags |= SHA_FLAGS_HMAC;
- bctx->shash = crypto_alloc_shash(alg_base, 0,
- CRYPTO_ALG_NEED_FALLBACK);
- if (IS_ERR(bctx->shash)) {
- pr_err("base driver %s could not be loaded.\n",
- alg_base);
-
- return PTR_ERR(bctx->shash);
- }
- }
- return 0;
-}
-
-static int mtk_sha_cra_init(struct crypto_tfm *tfm)
-{
- return mtk_sha_cra_init_alg(tfm, NULL);
-}
-
-static int mtk_sha_cra_sha1_init(struct crypto_tfm *tfm)
-{
- return mtk_sha_cra_init_alg(tfm, "sha1");
-}
-
-static int mtk_sha_cra_sha224_init(struct crypto_tfm *tfm)
-{
- return mtk_sha_cra_init_alg(tfm, "sha224");
-}
-
-static int mtk_sha_cra_sha256_init(struct crypto_tfm *tfm)
-{
- return mtk_sha_cra_init_alg(tfm, "sha256");
-}
-
-static int mtk_sha_cra_sha384_init(struct crypto_tfm *tfm)
-{
- return mtk_sha_cra_init_alg(tfm, "sha384");
-}
-
-static int mtk_sha_cra_sha512_init(struct crypto_tfm *tfm)
-{
- return mtk_sha_cra_init_alg(tfm, "sha512");
-}
-
-static void mtk_sha_cra_exit(struct crypto_tfm *tfm)
-{
- struct mtk_sha_ctx *tctx = crypto_tfm_ctx(tfm);
-
- if (tctx->flags & SHA_FLAGS_HMAC) {
- struct mtk_sha_hmac_ctx *bctx = tctx->base;
-
- crypto_free_shash(bctx->shash);
- }
-}
-
-static struct ahash_alg algs_sha1_sha224_sha256[] = {
-{
- .init = mtk_sha_init,
- .update = mtk_sha_update,
- .final = mtk_sha_final,
- .finup = mtk_sha_finup,
- .digest = mtk_sha_digest,
- .export = mtk_sha_export,
- .import = mtk_sha_import,
- .halg.digestsize = SHA1_DIGEST_SIZE,
- .halg.statesize = sizeof(struct mtk_sha_reqctx),
- .halg.base = {
- .cra_name = "sha1",
- .cra_driver_name = "mtk-sha1",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct mtk_sha_ctx),
- .cra_alignmask = SHA_ALIGN_MSK,
- .cra_module = THIS_MODULE,
- .cra_init = mtk_sha_cra_init,
- .cra_exit = mtk_sha_cra_exit,
- }
-},
-{
- .init = mtk_sha_init,
- .update = mtk_sha_update,
- .final = mtk_sha_final,
- .finup = mtk_sha_finup,
- .digest = mtk_sha_digest,
- .export = mtk_sha_export,
- .import = mtk_sha_import,
- .halg.digestsize = SHA224_DIGEST_SIZE,
- .halg.statesize = sizeof(struct mtk_sha_reqctx),
- .halg.base = {
- .cra_name = "sha224",
- .cra_driver_name = "mtk-sha224",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA224_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct mtk_sha_ctx),
- .cra_alignmask = SHA_ALIGN_MSK,
- .cra_module = THIS_MODULE,
- .cra_init = mtk_sha_cra_init,
- .cra_exit = mtk_sha_cra_exit,
- }
-},
-{
- .init = mtk_sha_init,
- .update = mtk_sha_update,
- .final = mtk_sha_final,
- .finup = mtk_sha_finup,
- .digest = mtk_sha_digest,
- .export = mtk_sha_export,
- .import = mtk_sha_import,
- .halg.digestsize = SHA256_DIGEST_SIZE,
- .halg.statesize = sizeof(struct mtk_sha_reqctx),
- .halg.base = {
- .cra_name = "sha256",
- .cra_driver_name = "mtk-sha256",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct mtk_sha_ctx),
- .cra_alignmask = SHA_ALIGN_MSK,
- .cra_module = THIS_MODULE,
- .cra_init = mtk_sha_cra_init,
- .cra_exit = mtk_sha_cra_exit,
- }
-},
-{
- .init = mtk_sha_init,
- .update = mtk_sha_update,
- .final = mtk_sha_final,
- .finup = mtk_sha_finup,
- .digest = mtk_sha_digest,
- .export = mtk_sha_export,
- .import = mtk_sha_import,
- .setkey = mtk_sha_setkey,
- .halg.digestsize = SHA1_DIGEST_SIZE,
- .halg.statesize = sizeof(struct mtk_sha_reqctx),
- .halg.base = {
- .cra_name = "hmac(sha1)",
- .cra_driver_name = "mtk-hmac-sha1",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct mtk_sha_ctx) +
- sizeof(struct mtk_sha_hmac_ctx),
- .cra_alignmask = SHA_ALIGN_MSK,
- .cra_module = THIS_MODULE,
- .cra_init = mtk_sha_cra_sha1_init,
- .cra_exit = mtk_sha_cra_exit,
- }
-},
-{
- .init = mtk_sha_init,
- .update = mtk_sha_update,
- .final = mtk_sha_final,
- .finup = mtk_sha_finup,
- .digest = mtk_sha_digest,
- .export = mtk_sha_export,
- .import = mtk_sha_import,
- .setkey = mtk_sha_setkey,
- .halg.digestsize = SHA224_DIGEST_SIZE,
- .halg.statesize = sizeof(struct mtk_sha_reqctx),
- .halg.base = {
- .cra_name = "hmac(sha224)",
- .cra_driver_name = "mtk-hmac-sha224",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = SHA224_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct mtk_sha_ctx) +
- sizeof(struct mtk_sha_hmac_ctx),
- .cra_alignmask = SHA_ALIGN_MSK,
- .cra_module = THIS_MODULE,
- .cra_init = mtk_sha_cra_sha224_init,
- .cra_exit = mtk_sha_cra_exit,
- }
-},
-{
- .init = mtk_sha_init,
- .update = mtk_sha_update,
- .final = mtk_sha_final,
- .finup = mtk_sha_finup,
- .digest = mtk_sha_digest,
- .export = mtk_sha_export,
- .import = mtk_sha_import,
- .setkey = mtk_sha_setkey,
- .halg.digestsize = SHA256_DIGEST_SIZE,
- .halg.statesize = sizeof(struct mtk_sha_reqctx),
- .halg.base = {
- .cra_name = "hmac(sha256)",
- .cra_driver_name = "mtk-hmac-sha256",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct mtk_sha_ctx) +
- sizeof(struct mtk_sha_hmac_ctx),
- .cra_alignmask = SHA_ALIGN_MSK,
- .cra_module = THIS_MODULE,
- .cra_init = mtk_sha_cra_sha256_init,
- .cra_exit = mtk_sha_cra_exit,
- }
-},
-};
-
-static struct ahash_alg algs_sha384_sha512[] = {
-{
- .init = mtk_sha_init,
- .update = mtk_sha_update,
- .final = mtk_sha_final,
- .finup = mtk_sha_finup,
- .digest = mtk_sha_digest,
- .export = mtk_sha_export,
- .import = mtk_sha_import,
- .halg.digestsize = SHA384_DIGEST_SIZE,
- .halg.statesize = sizeof(struct mtk_sha_reqctx),
- .halg.base = {
- .cra_name = "sha384",
- .cra_driver_name = "mtk-sha384",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA384_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct mtk_sha_ctx),
- .cra_alignmask = SHA_ALIGN_MSK,
- .cra_module = THIS_MODULE,
- .cra_init = mtk_sha_cra_init,
- .cra_exit = mtk_sha_cra_exit,
- }
-},
-{
- .init = mtk_sha_init,
- .update = mtk_sha_update,
- .final = mtk_sha_final,
- .finup = mtk_sha_finup,
- .digest = mtk_sha_digest,
- .export = mtk_sha_export,
- .import = mtk_sha_import,
- .halg.digestsize = SHA512_DIGEST_SIZE,
- .halg.statesize = sizeof(struct mtk_sha_reqctx),
- .halg.base = {
- .cra_name = "sha512",
- .cra_driver_name = "mtk-sha512",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA512_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct mtk_sha_ctx),
- .cra_alignmask = SHA_ALIGN_MSK,
- .cra_module = THIS_MODULE,
- .cra_init = mtk_sha_cra_init,
- .cra_exit = mtk_sha_cra_exit,
- }
-},
-{
- .init = mtk_sha_init,
- .update = mtk_sha_update,
- .final = mtk_sha_final,
- .finup = mtk_sha_finup,
- .digest = mtk_sha_digest,
- .export = mtk_sha_export,
- .import = mtk_sha_import,
- .setkey = mtk_sha_setkey,
- .halg.digestsize = SHA384_DIGEST_SIZE,
- .halg.statesize = sizeof(struct mtk_sha_reqctx),
- .halg.base = {
- .cra_name = "hmac(sha384)",
- .cra_driver_name = "mtk-hmac-sha384",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = SHA384_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct mtk_sha_ctx) +
- sizeof(struct mtk_sha_hmac_ctx),
- .cra_alignmask = SHA_ALIGN_MSK,
- .cra_module = THIS_MODULE,
- .cra_init = mtk_sha_cra_sha384_init,
- .cra_exit = mtk_sha_cra_exit,
- }
-},
-{
- .init = mtk_sha_init,
- .update = mtk_sha_update,
- .final = mtk_sha_final,
- .finup = mtk_sha_finup,
- .digest = mtk_sha_digest,
- .export = mtk_sha_export,
- .import = mtk_sha_import,
- .setkey = mtk_sha_setkey,
- .halg.digestsize = SHA512_DIGEST_SIZE,
- .halg.statesize = sizeof(struct mtk_sha_reqctx),
- .halg.base = {
- .cra_name = "hmac(sha512)",
- .cra_driver_name = "mtk-hmac-sha512",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = SHA512_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct mtk_sha_ctx) +
- sizeof(struct mtk_sha_hmac_ctx),
- .cra_alignmask = SHA_ALIGN_MSK,
- .cra_module = THIS_MODULE,
- .cra_init = mtk_sha_cra_sha512_init,
- .cra_exit = mtk_sha_cra_exit,
- }
-},
-};
-
-static void mtk_sha_queue_task(unsigned long data)
-{
- struct mtk_sha_rec *sha = (struct mtk_sha_rec *)data;
-
- mtk_sha_handle_queue(sha->cryp, sha->id - MTK_RING2, NULL);
-}
-
-static void mtk_sha_done_task(unsigned long data)
-{
- struct mtk_sha_rec *sha = (struct mtk_sha_rec *)data;
- struct mtk_cryp *cryp = sha->cryp;
-
- mtk_sha_unmap(cryp, sha);
- mtk_sha_complete(cryp, sha);
-}
-
-static irqreturn_t mtk_sha_irq(int irq, void *dev_id)
-{
- struct mtk_sha_rec *sha = (struct mtk_sha_rec *)dev_id;
- struct mtk_cryp *cryp = sha->cryp;
- u32 val = mtk_sha_read(cryp, RDR_STAT(sha->id));
-
- mtk_sha_write(cryp, RDR_STAT(sha->id), val);
-
- if (likely((SHA_FLAGS_BUSY & sha->flags))) {
- mtk_sha_write(cryp, RDR_PROC_COUNT(sha->id), MTK_CNT_RST);
- mtk_sha_write(cryp, RDR_THRESH(sha->id),
- MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE);
-
- tasklet_schedule(&sha->done_task);
- } else {
- dev_warn(cryp->dev, "SHA interrupt when no active requests.\n");
- }
- return IRQ_HANDLED;
-}
-
-/*
- * The purpose of two SHA records is used to get extra performance.
- * It is similar to mtk_aes_record_init().
- */
-static int mtk_sha_record_init(struct mtk_cryp *cryp)
-{
- struct mtk_sha_rec **sha = cryp->sha;
- int i, err = -ENOMEM;
-
- for (i = 0; i < MTK_REC_NUM; i++) {
- sha[i] = kzalloc(sizeof(**sha), GFP_KERNEL);
- if (!sha[i])
- goto err_cleanup;
-
- sha[i]->cryp = cryp;
-
- spin_lock_init(&sha[i]->lock);
- crypto_init_queue(&sha[i]->queue, SHA_QUEUE_SIZE);
-
- tasklet_init(&sha[i]->queue_task, mtk_sha_queue_task,
- (unsigned long)sha[i]);
- tasklet_init(&sha[i]->done_task, mtk_sha_done_task,
- (unsigned long)sha[i]);
- }
-
- /* Link to ring2 and ring3 respectively */
- sha[0]->id = MTK_RING2;
- sha[1]->id = MTK_RING3;
-
- cryp->rec = 1;
-
- return 0;
-
-err_cleanup:
- for (; i--; )
- kfree(sha[i]);
- return err;
-}
-
-static void mtk_sha_record_free(struct mtk_cryp *cryp)
-{
- int i;
-
- for (i = 0; i < MTK_REC_NUM; i++) {
- tasklet_kill(&cryp->sha[i]->done_task);
- tasklet_kill(&cryp->sha[i]->queue_task);
-
- kfree(cryp->sha[i]);
- }
-}
-
-static void mtk_sha_unregister_algs(void)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(algs_sha1_sha224_sha256); i++)
- crypto_unregister_ahash(&algs_sha1_sha224_sha256[i]);
-
- for (i = 0; i < ARRAY_SIZE(algs_sha384_sha512); i++)
- crypto_unregister_ahash(&algs_sha384_sha512[i]);
-}
-
-static int mtk_sha_register_algs(void)
-{
- int err, i;
-
- for (i = 0; i < ARRAY_SIZE(algs_sha1_sha224_sha256); i++) {
- err = crypto_register_ahash(&algs_sha1_sha224_sha256[i]);
- if (err)
- goto err_sha_224_256_algs;
- }
-
- for (i = 0; i < ARRAY_SIZE(algs_sha384_sha512); i++) {
- err = crypto_register_ahash(&algs_sha384_sha512[i]);
- if (err)
- goto err_sha_384_512_algs;
- }
-
- return 0;
-
-err_sha_384_512_algs:
- for (; i--; )
- crypto_unregister_ahash(&algs_sha384_sha512[i]);
- i = ARRAY_SIZE(algs_sha1_sha224_sha256);
-err_sha_224_256_algs:
- for (; i--; )
- crypto_unregister_ahash(&algs_sha1_sha224_sha256[i]);
-
- return err;
-}
-
-int mtk_hash_alg_register(struct mtk_cryp *cryp)
-{
- int err;
-
- INIT_LIST_HEAD(&cryp->sha_list);
-
- /* Initialize two hash records */
- err = mtk_sha_record_init(cryp);
- if (err)
- goto err_record;
-
- err = devm_request_irq(cryp->dev, cryp->irq[MTK_RING2], mtk_sha_irq,
- 0, "mtk-sha", cryp->sha[0]);
- if (err) {
- dev_err(cryp->dev, "unable to request sha irq0.\n");
- goto err_res;
- }
-
- err = devm_request_irq(cryp->dev, cryp->irq[MTK_RING3], mtk_sha_irq,
- 0, "mtk-sha", cryp->sha[1]);
- if (err) {
- dev_err(cryp->dev, "unable to request sha irq1.\n");
- goto err_res;
- }
-
- /* Enable ring2 and ring3 interrupt for hash */
- mtk_sha_write(cryp, AIC_ENABLE_SET(MTK_RING2), MTK_IRQ_RDR2);
- mtk_sha_write(cryp, AIC_ENABLE_SET(MTK_RING3), MTK_IRQ_RDR3);
-
- spin_lock(&mtk_sha.lock);
- list_add_tail(&cryp->sha_list, &mtk_sha.dev_list);
- spin_unlock(&mtk_sha.lock);
-
- err = mtk_sha_register_algs();
- if (err)
- goto err_algs;
-
- return 0;
-
-err_algs:
- spin_lock(&mtk_sha.lock);
- list_del(&cryp->sha_list);
- spin_unlock(&mtk_sha.lock);
-err_res:
- mtk_sha_record_free(cryp);
-err_record:
-
- dev_err(cryp->dev, "mtk-sha initialization failed.\n");
- return err;
-}
-
-void mtk_hash_alg_release(struct mtk_cryp *cryp)
-{
- spin_lock(&mtk_sha.lock);
- list_del(&cryp->sha_list);
- spin_unlock(&mtk_sha.lock);
-
- mtk_sha_unregister_algs();
- mtk_sha_record_free(cryp);
-}
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
deleted file mode 100644
index 84f9c16d984c..000000000000
--- a/drivers/crypto/picoxcell_crypto.c
+++ /dev/null
@@ -1,1807 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (c) 2010-2011 Picochip Ltd., Jamie Iles
- */
-#include <crypto/internal/aead.h>
-#include <crypto/aes.h>
-#include <crypto/algapi.h>
-#include <crypto/authenc.h>
-#include <crypto/internal/des.h>
-#include <crypto/md5.h>
-#include <crypto/sha1.h>
-#include <crypto/sha2.h>
-#include <crypto/internal/skcipher.h>
-#include <linux/clk.h>
-#include <linux/crypto.h>
-#include <linux/delay.h>
-#include <linux/dma-mapping.h>
-#include <linux/dmapool.h>
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-#include <linux/pm.h>
-#include <linux/rtnetlink.h>
-#include <linux/scatterlist.h>
-#include <linux/sched.h>
-#include <linux/sizes.h>
-#include <linux/slab.h>
-#include <linux/timer.h>
-
-#include "picoxcell_crypto_regs.h"
-
-/*
- * The threshold for the number of entries in the CMD FIFO available before
- * the CMD0_CNT interrupt is raised. Increasing this value will reduce the
- * number of interrupts raised to the CPU.
- */
-#define CMD0_IRQ_THRESHOLD 1
-
-/*
- * The timeout period (in jiffies) for a PDU. When the the number of PDUs in
- * flight is greater than the STAT_IRQ_THRESHOLD or 0 the timer is disabled.
- * When there are packets in flight but lower than the threshold, we enable
- * the timer and at expiry, attempt to remove any processed packets from the
- * queue and if there are still packets left, schedule the timer again.
- */
-#define PACKET_TIMEOUT 1
-
-/* The priority to register each algorithm with. */
-#define SPACC_CRYPTO_ALG_PRIORITY 10000
-
-#define SPACC_CRYPTO_KASUMI_F8_KEY_LEN 16
-#define SPACC_CRYPTO_IPSEC_CIPHER_PG_SZ 64
-#define SPACC_CRYPTO_IPSEC_HASH_PG_SZ 64
-#define SPACC_CRYPTO_IPSEC_MAX_CTXS 32
-#define SPACC_CRYPTO_IPSEC_FIFO_SZ 32
-#define SPACC_CRYPTO_L2_CIPHER_PG_SZ 64
-#define SPACC_CRYPTO_L2_HASH_PG_SZ 64
-#define SPACC_CRYPTO_L2_MAX_CTXS 128
-#define SPACC_CRYPTO_L2_FIFO_SZ 128
-
-#define MAX_DDT_LEN 16
-
-/* DDT format. This must match the hardware DDT format exactly. */
-struct spacc_ddt {
- dma_addr_t p;
- u32 len;
-};
-
-/*
- * Asynchronous crypto request structure.
- *
- * This structure defines a request that is either queued for processing or
- * being processed.
- */
-struct spacc_req {
- struct list_head list;
- struct spacc_engine *engine;
- struct crypto_async_request *req;
- int result;
- bool is_encrypt;
- unsigned ctx_id;
- dma_addr_t src_addr, dst_addr;
- struct spacc_ddt *src_ddt, *dst_ddt;
- void (*complete)(struct spacc_req *req);
- struct skcipher_request fallback_req; // keep at the end
-};
-
-struct spacc_aead {
- unsigned long ctrl_default;
- unsigned long type;
- struct aead_alg alg;
- struct spacc_engine *engine;
- struct list_head entry;
- int key_offs;
- int iv_offs;
-};
-
-struct spacc_engine {
- void __iomem *regs;
- struct list_head pending;
- int next_ctx;
- spinlock_t hw_lock;
- int in_flight;
- struct list_head completed;
- struct list_head in_progress;
- struct tasklet_struct complete;
- unsigned long fifo_sz;
- void __iomem *cipher_ctx_base;
- void __iomem *hash_key_base;
- struct spacc_alg *algs;
- unsigned num_algs;
- struct list_head registered_algs;
- struct spacc_aead *aeads;
- unsigned num_aeads;
- struct list_head registered_aeads;
- size_t cipher_pg_sz;
- size_t hash_pg_sz;
- const char *name;
- struct clk *clk;
- struct device *dev;
- unsigned max_ctxs;
- struct timer_list packet_timeout;
- unsigned stat_irq_thresh;
- struct dma_pool *req_pool;
-};
-
-/* Algorithm type mask. */
-#define SPACC_CRYPTO_ALG_MASK 0x7
-
-/* SPACC definition of a crypto algorithm. */
-struct spacc_alg {
- unsigned long ctrl_default;
- unsigned long type;
- struct skcipher_alg alg;
- struct spacc_engine *engine;
- struct list_head entry;
- int key_offs;
- int iv_offs;
-};
-
-/* Generic context structure for any algorithm type. */
-struct spacc_generic_ctx {
- struct spacc_engine *engine;
- int flags;
- int key_offs;
- int iv_offs;
-};
-
-/* Block cipher context. */
-struct spacc_ablk_ctx {
- struct spacc_generic_ctx generic;
- u8 key[AES_MAX_KEY_SIZE];
- u8 key_len;
- /*
- * The fallback cipher. If the operation can't be done in hardware,
- * fallback to a software version.
- */
- struct crypto_skcipher *sw_cipher;
-};
-
-/* AEAD cipher context. */
-struct spacc_aead_ctx {
- struct spacc_generic_ctx generic;
- u8 cipher_key[AES_MAX_KEY_SIZE];
- u8 hash_ctx[SPACC_CRYPTO_IPSEC_HASH_PG_SZ];
- u8 cipher_key_len;
- u8 hash_key_len;
- struct crypto_aead *sw_cipher;
-};
-
-static int spacc_ablk_submit(struct spacc_req *req);
-
-static inline struct spacc_alg *to_spacc_skcipher(struct skcipher_alg *alg)
-{
- return alg ? container_of(alg, struct spacc_alg, alg) : NULL;
-}
-
-static inline struct spacc_aead *to_spacc_aead(struct aead_alg *alg)
-{
- return container_of(alg, struct spacc_aead, alg);
-}
-
-static inline int spacc_fifo_cmd_full(struct spacc_engine *engine)
-{
- u32 fifo_stat = readl(engine->regs + SPA_FIFO_STAT_REG_OFFSET);
-
- return fifo_stat & SPA_FIFO_CMD_FULL;
-}
-
-/*
- * Given a cipher context, and a context number, get the base address of the
- * context page.
- *
- * Returns the address of the context page where the key/context may
- * be written.
- */
-static inline void __iomem *spacc_ctx_page_addr(struct spacc_generic_ctx *ctx,
- unsigned indx,
- bool is_cipher_ctx)
-{
- return is_cipher_ctx ? ctx->engine->cipher_ctx_base +
- (indx * ctx->engine->cipher_pg_sz) :
- ctx->engine->hash_key_base + (indx * ctx->engine->hash_pg_sz);
-}
-
-/* The context pages can only be written with 32-bit accesses. */
-static inline void memcpy_toio32(u32 __iomem *dst, const void *src,
- unsigned count)
-{
- const u32 *src32 = (const u32 *) src;
-
- while (count--)
- writel(*src32++, dst++);
-}
-
-static void spacc_cipher_write_ctx(struct spacc_generic_ctx *ctx,
- void __iomem *page_addr, const u8 *key,
- size_t key_len, const u8 *iv, size_t iv_len)
-{
- void __iomem *key_ptr = page_addr + ctx->key_offs;
- void __iomem *iv_ptr = page_addr + ctx->iv_offs;
-
- memcpy_toio32(key_ptr, key, key_len / 4);
- memcpy_toio32(iv_ptr, iv, iv_len / 4);
-}
-
-/*
- * Load a context into the engines context memory.
- *
- * Returns the index of the context page where the context was loaded.
- */
-static unsigned spacc_load_ctx(struct spacc_generic_ctx *ctx,
- const u8 *ciph_key, size_t ciph_len,
- const u8 *iv, size_t ivlen, const u8 *hash_key,
- size_t hash_len)
-{
- unsigned indx = ctx->engine->next_ctx++;
- void __iomem *ciph_page_addr, *hash_page_addr;
-
- ciph_page_addr = spacc_ctx_page_addr(ctx, indx, 1);
- hash_page_addr = spacc_ctx_page_addr(ctx, indx, 0);
-
- ctx->engine->next_ctx &= ctx->engine->fifo_sz - 1;
- spacc_cipher_write_ctx(ctx, ciph_page_addr, ciph_key, ciph_len, iv,
- ivlen);
- writel(ciph_len | (indx << SPA_KEY_SZ_CTX_INDEX_OFFSET) |
- (1 << SPA_KEY_SZ_CIPHER_OFFSET),
- ctx->engine->regs + SPA_KEY_SZ_REG_OFFSET);
-
- if (hash_key) {
- memcpy_toio32(hash_page_addr, hash_key, hash_len / 4);
- writel(hash_len | (indx << SPA_KEY_SZ_CTX_INDEX_OFFSET),
- ctx->engine->regs + SPA_KEY_SZ_REG_OFFSET);
- }
-
- return indx;
-}
-
-static inline void ddt_set(struct spacc_ddt *ddt, dma_addr_t phys, size_t len)
-{
- ddt->p = phys;
- ddt->len = len;
-}
-
-/*
- * Take a crypto request and scatterlists for the data and turn them into DDTs
- * for passing to the crypto engines. This also DMA maps the data so that the
- * crypto engines can DMA to/from them.
- */
-static struct spacc_ddt *spacc_sg_to_ddt(struct spacc_engine *engine,
- struct scatterlist *payload,
- unsigned nbytes,
- enum dma_data_direction dir,
- dma_addr_t *ddt_phys)
-{
- unsigned mapped_ents;
- struct scatterlist *cur;
- struct spacc_ddt *ddt;
- int i;
- int nents;
-
- nents = sg_nents_for_len(payload, nbytes);
- if (nents < 0) {
- dev_err(engine->dev, "Invalid numbers of SG.\n");
- return NULL;
- }
- mapped_ents = dma_map_sg(engine->dev, payload, nents, dir);
-
- if (mapped_ents + 1 > MAX_DDT_LEN)
- goto out;
-
- ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, ddt_phys);
- if (!ddt)
- goto out;
-
- for_each_sg(payload, cur, mapped_ents, i)
- ddt_set(&ddt[i], sg_dma_address(cur), sg_dma_len(cur));
- ddt_set(&ddt[mapped_ents], 0, 0);
-
- return ddt;
-
-out:
- dma_unmap_sg(engine->dev, payload, nents, dir);
- return NULL;
-}
-
-static int spacc_aead_make_ddts(struct aead_request *areq)
-{
- struct crypto_aead *aead = crypto_aead_reqtfm(areq);
- struct spacc_req *req = aead_request_ctx(areq);
- struct spacc_engine *engine = req->engine;
- struct spacc_ddt *src_ddt, *dst_ddt;
- unsigned total;
- int src_nents, dst_nents;
- struct scatterlist *cur;
- int i, dst_ents, src_ents;
-
- total = areq->assoclen + areq->cryptlen;
- if (req->is_encrypt)
- total += crypto_aead_authsize(aead);
-
- src_nents = sg_nents_for_len(areq->src, total);
- if (src_nents < 0) {
- dev_err(engine->dev, "Invalid numbers of src SG.\n");
- return src_nents;
- }
- if (src_nents + 1 > MAX_DDT_LEN)
- return -E2BIG;
-
- dst_nents = 0;
- if (areq->src != areq->dst) {
- dst_nents = sg_nents_for_len(areq->dst, total);
- if (dst_nents < 0) {
- dev_err(engine->dev, "Invalid numbers of dst SG.\n");
- return dst_nents;
- }
- if (src_nents + 1 > MAX_DDT_LEN)
- return -E2BIG;
- }
-
- src_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->src_addr);
- if (!src_ddt)
- goto err;
-
- dst_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->dst_addr);
- if (!dst_ddt)
- goto err_free_src;
-
- req->src_ddt = src_ddt;
- req->dst_ddt = dst_ddt;
-
- if (dst_nents) {
- src_ents = dma_map_sg(engine->dev, areq->src, src_nents,
- DMA_TO_DEVICE);
- if (!src_ents)
- goto err_free_dst;
-
- dst_ents = dma_map_sg(engine->dev, areq->dst, dst_nents,
- DMA_FROM_DEVICE);
-
- if (!dst_ents) {
- dma_unmap_sg(engine->dev, areq->src, src_nents,
- DMA_TO_DEVICE);
- goto err_free_dst;
- }
- } else {
- src_ents = dma_map_sg(engine->dev, areq->src, src_nents,
- DMA_BIDIRECTIONAL);
- if (!src_ents)
- goto err_free_dst;
- dst_ents = src_ents;
- }
-
- /*
- * Now map in the payload for the source and destination and terminate
- * with the NULL pointers.
- */
- for_each_sg(areq->src, cur, src_ents, i)
- ddt_set(src_ddt++, sg_dma_address(cur), sg_dma_len(cur));
-
- /* For decryption we need to skip the associated data. */
- total = req->is_encrypt ? 0 : areq->assoclen;
- for_each_sg(areq->dst, cur, dst_ents, i) {
- unsigned len = sg_dma_len(cur);
-
- if (len <= total) {
- total -= len;
- continue;
- }
-
- ddt_set(dst_ddt++, sg_dma_address(cur) + total, len - total);
- }
-
- ddt_set(src_ddt, 0, 0);
- ddt_set(dst_ddt, 0, 0);
-
- return 0;
-
-err_free_dst:
- dma_pool_free(engine->req_pool, dst_ddt, req->dst_addr);
-err_free_src:
- dma_pool_free(engine->req_pool, src_ddt, req->src_addr);
-err:
- return -ENOMEM;
-}
-
-static void spacc_aead_free_ddts(struct spacc_req *req)
-{
- struct aead_request *areq = container_of(req->req, struct aead_request,
- base);
- struct crypto_aead *aead = crypto_aead_reqtfm(areq);
- unsigned total = areq->assoclen + areq->cryptlen +
- (req->is_encrypt ? crypto_aead_authsize(aead) : 0);
- struct spacc_aead_ctx *aead_ctx = crypto_aead_ctx(aead);
- struct spacc_engine *engine = aead_ctx->generic.engine;
- int nents = sg_nents_for_len(areq->src, total);
-
- /* sg_nents_for_len should not fail since it works when mapping sg */
- if (unlikely(nents < 0)) {
- dev_err(engine->dev, "Invalid numbers of src SG.\n");
- return;
- }
-
- if (areq->src != areq->dst) {
- dma_unmap_sg(engine->dev, areq->src, nents, DMA_TO_DEVICE);
- nents = sg_nents_for_len(areq->dst, total);
- if (unlikely(nents < 0)) {
- dev_err(engine->dev, "Invalid numbers of dst SG.\n");
- return;
- }
- dma_unmap_sg(engine->dev, areq->dst, nents, DMA_FROM_DEVICE);
- } else
- dma_unmap_sg(engine->dev, areq->src, nents, DMA_BIDIRECTIONAL);
-
- dma_pool_free(engine->req_pool, req->src_ddt, req->src_addr);
- dma_pool_free(engine->req_pool, req->dst_ddt, req->dst_addr);
-}
-
-static void spacc_free_ddt(struct spacc_req *req, struct spacc_ddt *ddt,
- dma_addr_t ddt_addr, struct scatterlist *payload,
- unsigned nbytes, enum dma_data_direction dir)
-{
- int nents = sg_nents_for_len(payload, nbytes);
-
- if (nents < 0) {
- dev_err(req->engine->dev, "Invalid numbers of SG.\n");
- return;
- }
-
- dma_unmap_sg(req->engine->dev, payload, nents, dir);
- dma_pool_free(req->engine->req_pool, ddt, ddt_addr);
-}
-
-static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
- struct crypto_authenc_keys keys;
- int err;
-
- crypto_aead_clear_flags(ctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
- crypto_aead_set_flags(ctx->sw_cipher, crypto_aead_get_flags(tfm) &
- CRYPTO_TFM_REQ_MASK);
- err = crypto_aead_setkey(ctx->sw_cipher, key, keylen);
- if (err)
- return err;
-
- if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
- goto badkey;
-
- if (keys.enckeylen > AES_MAX_KEY_SIZE)
- goto badkey;
-
- if (keys.authkeylen > sizeof(ctx->hash_ctx))
- goto badkey;
-
- memcpy(ctx->cipher_key, keys.enckey, keys.enckeylen);
- ctx->cipher_key_len = keys.enckeylen;
-
- memcpy(ctx->hash_ctx, keys.authkey, keys.authkeylen);
- ctx->hash_key_len = keys.authkeylen;
-
- memzero_explicit(&keys, sizeof(keys));
- return 0;
-
-badkey:
- memzero_explicit(&keys, sizeof(keys));
- return -EINVAL;
-}
-
-static int spacc_aead_setauthsize(struct crypto_aead *tfm,
- unsigned int authsize)
-{
- struct spacc_aead_ctx *ctx = crypto_tfm_ctx(crypto_aead_tfm(tfm));
-
- return crypto_aead_setauthsize(ctx->sw_cipher, authsize);
-}
-
-/*
- * Check if an AEAD request requires a fallback operation. Some requests can't
- * be completed in hardware because the hardware may not support certain key
- * sizes. In these cases we need to complete the request in software.
- */
-static int spacc_aead_need_fallback(struct aead_request *aead_req)
-{
- struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
- struct aead_alg *alg = crypto_aead_alg(aead);
- struct spacc_aead *spacc_alg = to_spacc_aead(alg);
- struct spacc_aead_ctx *ctx = crypto_aead_ctx(aead);
-
- /*
- * If we have a non-supported key-length, then we need to do a
- * software fallback.
- */
- if ((spacc_alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) ==
- SPA_CTRL_CIPH_ALG_AES &&
- ctx->cipher_key_len != AES_KEYSIZE_128 &&
- ctx->cipher_key_len != AES_KEYSIZE_256)
- return 1;
-
- return 0;
-}
-
-static int spacc_aead_do_fallback(struct aead_request *req, unsigned alg_type,
- bool is_encrypt)
-{
- struct crypto_tfm *old_tfm = crypto_aead_tfm(crypto_aead_reqtfm(req));
- struct spacc_aead_ctx *ctx = crypto_tfm_ctx(old_tfm);
- struct aead_request *subreq = aead_request_ctx(req);
-
- aead_request_set_tfm(subreq, ctx->sw_cipher);
- aead_request_set_callback(subreq, req->base.flags,
- req->base.complete, req->base.data);
- aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
- req->iv);
- aead_request_set_ad(subreq, req->assoclen);
-
- return is_encrypt ? crypto_aead_encrypt(subreq) :
- crypto_aead_decrypt(subreq);
-}
-
-static void spacc_aead_complete(struct spacc_req *req)
-{
- spacc_aead_free_ddts(req);
- req->req->complete(req->req, req->result);
-}
-
-static int spacc_aead_submit(struct spacc_req *req)
-{
- struct aead_request *aead_req =
- container_of(req->req, struct aead_request, base);
- struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
- unsigned int authsize = crypto_aead_authsize(aead);
- struct spacc_aead_ctx *ctx = crypto_aead_ctx(aead);
- struct aead_alg *alg = crypto_aead_alg(aead);
- struct spacc_aead *spacc_alg = to_spacc_aead(alg);
- struct spacc_engine *engine = ctx->generic.engine;
- u32 ctrl, proc_len, assoc_len;
-
- req->result = -EINPROGRESS;
- req->ctx_id = spacc_load_ctx(&ctx->generic, ctx->cipher_key,
- ctx->cipher_key_len, aead_req->iv, crypto_aead_ivsize(aead),
- ctx->hash_ctx, ctx->hash_key_len);
-
- /* Set the source and destination DDT pointers. */
- writel(req->src_addr, engine->regs + SPA_SRC_PTR_REG_OFFSET);
- writel(req->dst_addr, engine->regs + SPA_DST_PTR_REG_OFFSET);
- writel(0, engine->regs + SPA_OFFSET_REG_OFFSET);
-
- assoc_len = aead_req->assoclen;
- proc_len = aead_req->cryptlen + assoc_len;
-
- /*
- * If we are decrypting, we need to take the length of the ICV out of
- * the processing length.
- */
- if (!req->is_encrypt)
- proc_len -= authsize;
-
- writel(proc_len, engine->regs + SPA_PROC_LEN_REG_OFFSET);
- writel(assoc_len, engine->regs + SPA_AAD_LEN_REG_OFFSET);
- writel(authsize, engine->regs + SPA_ICV_LEN_REG_OFFSET);
- writel(0, engine->regs + SPA_ICV_OFFSET_REG_OFFSET);
- writel(0, engine->regs + SPA_AUX_INFO_REG_OFFSET);
-
- ctrl = spacc_alg->ctrl_default | (req->ctx_id << SPA_CTRL_CTX_IDX) |
- (1 << SPA_CTRL_ICV_APPEND);
- if (req->is_encrypt)
- ctrl |= (1 << SPA_CTRL_ENCRYPT_IDX) | (1 << SPA_CTRL_AAD_COPY);
- else
- ctrl |= (1 << SPA_CTRL_KEY_EXP);
-
- mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT);
-
- writel(ctrl, engine->regs + SPA_CTRL_REG_OFFSET);
-
- return -EINPROGRESS;
-}
-
-static int spacc_req_submit(struct spacc_req *req);
-
-static void spacc_push(struct spacc_engine *engine)
-{
- struct spacc_req *req;
-
- while (!list_empty(&engine->pending) &&
- engine->in_flight + 1 <= engine->fifo_sz) {
-
- ++engine->in_flight;
- req = list_first_entry(&engine->pending, struct spacc_req,
- list);
- list_move_tail(&req->list, &engine->in_progress);
-
- req->result = spacc_req_submit(req);
- }
-}
-
-/*
- * Setup an AEAD request for processing. This will configure the engine, load
- * the context and then start the packet processing.
- */
-static int spacc_aead_setup(struct aead_request *req,
- unsigned alg_type, bool is_encrypt)
-{
- struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct aead_alg *alg = crypto_aead_alg(aead);
- struct spacc_engine *engine = to_spacc_aead(alg)->engine;
- struct spacc_req *dev_req = aead_request_ctx(req);
- int err;
- unsigned long flags;
-
- dev_req->req = &req->base;
- dev_req->is_encrypt = is_encrypt;
- dev_req->result = -EBUSY;
- dev_req->engine = engine;
- dev_req->complete = spacc_aead_complete;
-
- if (unlikely(spacc_aead_need_fallback(req) ||
- ((err = spacc_aead_make_ddts(req)) == -E2BIG)))
- return spacc_aead_do_fallback(req, alg_type, is_encrypt);
-
- if (err)
- goto out;
-
- err = -EINPROGRESS;
- spin_lock_irqsave(&engine->hw_lock, flags);
- if (unlikely(spacc_fifo_cmd_full(engine)) ||
- engine->in_flight + 1 > engine->fifo_sz) {
- if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
- err = -EBUSY;
- spin_unlock_irqrestore(&engine->hw_lock, flags);
- goto out_free_ddts;
- }
- list_add_tail(&dev_req->list, &engine->pending);
- } else {
- list_add_tail(&dev_req->list, &engine->pending);
- spacc_push(engine);
- }
- spin_unlock_irqrestore(&engine->hw_lock, flags);
-
- goto out;
-
-out_free_ddts:
- spacc_aead_free_ddts(dev_req);
-out:
- return err;
-}
-
-static int spacc_aead_encrypt(struct aead_request *req)
-{
- struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct spacc_aead *alg = to_spacc_aead(crypto_aead_alg(aead));
-
- return spacc_aead_setup(req, alg->type, 1);
-}
-
-static int spacc_aead_decrypt(struct aead_request *req)
-{
- struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct spacc_aead *alg = to_spacc_aead(crypto_aead_alg(aead));
-
- return spacc_aead_setup(req, alg->type, 0);
-}
-
-/*
- * Initialise a new AEAD context. This is responsible for allocating the
- * fallback cipher and initialising the context.
- */
-static int spacc_aead_cra_init(struct crypto_aead *tfm)
-{
- struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
- struct aead_alg *alg = crypto_aead_alg(tfm);
- struct spacc_aead *spacc_alg = to_spacc_aead(alg);
- struct spacc_engine *engine = spacc_alg->engine;
-
- ctx->generic.flags = spacc_alg->type;
- ctx->generic.engine = engine;
- ctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
- CRYPTO_ALG_NEED_FALLBACK);
- if (IS_ERR(ctx->sw_cipher))
- return PTR_ERR(ctx->sw_cipher);
- ctx->generic.key_offs = spacc_alg->key_offs;
- ctx->generic.iv_offs = spacc_alg->iv_offs;
-
- crypto_aead_set_reqsize(
- tfm,
- max(sizeof(struct spacc_req),
- sizeof(struct aead_request) +
- crypto_aead_reqsize(ctx->sw_cipher)));
-
- return 0;
-}
-
-/*
- * Destructor for an AEAD context. This is called when the transform is freed
- * and must free the fallback cipher.
- */
-static void spacc_aead_cra_exit(struct crypto_aead *tfm)
-{
- struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
-
- crypto_free_aead(ctx->sw_cipher);
-}
-
-/*
- * Set the DES key for a block cipher transform. This also performs weak key
- * checking if the transform has requested it.
- */
-static int spacc_des_setkey(struct crypto_skcipher *cipher, const u8 *key,
- unsigned int len)
-{
- struct spacc_ablk_ctx *ctx = crypto_skcipher_ctx(cipher);
- int err;
-
- err = verify_skcipher_des_key(cipher, key);
- if (err)
- return err;
-
- memcpy(ctx->key, key, len);
- ctx->key_len = len;
-
- return 0;
-}
-
-/*
- * Set the 3DES key for a block cipher transform. This also performs weak key
- * checking if the transform has requested it.
- */
-static int spacc_des3_setkey(struct crypto_skcipher *cipher, const u8 *key,
- unsigned int len)
-{
- struct spacc_ablk_ctx *ctx = crypto_skcipher_ctx(cipher);
- int err;
-
- err = verify_skcipher_des3_key(cipher, key);
- if (err)
- return err;
-
- memcpy(ctx->key, key, len);
- ctx->key_len = len;
-
- return 0;
-}
-
-/*
- * Set the key for an AES block cipher. Some key lengths are not supported in
- * hardware so this must also check whether a fallback is needed.
- */
-static int spacc_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
- unsigned int len)
-{
- struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
- struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
- int err = 0;
-
- if (len > AES_MAX_KEY_SIZE)
- return -EINVAL;
-
- /*
- * IPSec engine only supports 128 and 256 bit AES keys. If we get a
- * request for any other size (192 bits) then we need to do a software
- * fallback.
- */
- if (len != AES_KEYSIZE_128 && len != AES_KEYSIZE_256) {
- if (!ctx->sw_cipher)
- return -EINVAL;
-
- /*
- * Set the fallback transform to use the same request flags as
- * the hardware transform.
- */
- crypto_skcipher_clear_flags(ctx->sw_cipher,
- CRYPTO_TFM_REQ_MASK);
- crypto_skcipher_set_flags(ctx->sw_cipher,
- cipher->base.crt_flags &
- CRYPTO_TFM_REQ_MASK);
-
- err = crypto_skcipher_setkey(ctx->sw_cipher, key, len);
- if (err)
- goto sw_setkey_failed;
- }
-
- memcpy(ctx->key, key, len);
- ctx->key_len = len;
-
-sw_setkey_failed:
- return err;
-}
-
-static int spacc_kasumi_f8_setkey(struct crypto_skcipher *cipher,
- const u8 *key, unsigned int len)
-{
- struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
- struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
- int err = 0;
-
- if (len > AES_MAX_KEY_SIZE) {
- err = -EINVAL;
- goto out;
- }
-
- memcpy(ctx->key, key, len);
- ctx->key_len = len;
-
-out:
- return err;
-}
-
-static int spacc_ablk_need_fallback(struct spacc_req *req)
-{
- struct skcipher_request *ablk_req = skcipher_request_cast(req->req);
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(ablk_req);
- struct spacc_alg *spacc_alg = to_spacc_skcipher(crypto_skcipher_alg(tfm));
- struct spacc_ablk_ctx *ctx;
-
- ctx = crypto_skcipher_ctx(tfm);
-
- return (spacc_alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) ==
- SPA_CTRL_CIPH_ALG_AES &&
- ctx->key_len != AES_KEYSIZE_128 &&
- ctx->key_len != AES_KEYSIZE_256;
-}
-
-static void spacc_ablk_complete(struct spacc_req *req)
-{
- struct skcipher_request *ablk_req = skcipher_request_cast(req->req);
-
- if (ablk_req->src != ablk_req->dst) {
- spacc_free_ddt(req, req->src_ddt, req->src_addr, ablk_req->src,
- ablk_req->cryptlen, DMA_TO_DEVICE);
- spacc_free_ddt(req, req->dst_ddt, req->dst_addr, ablk_req->dst,
- ablk_req->cryptlen, DMA_FROM_DEVICE);
- } else
- spacc_free_ddt(req, req->dst_ddt, req->dst_addr, ablk_req->dst,
- ablk_req->cryptlen, DMA_BIDIRECTIONAL);
-
- req->req->complete(req->req, req->result);
-}
-
-static int spacc_ablk_submit(struct spacc_req *req)
-{
- struct skcipher_request *ablk_req = skcipher_request_cast(req->req);
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(ablk_req);
- struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
- struct spacc_alg *spacc_alg = to_spacc_skcipher(alg);
- struct spacc_ablk_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct spacc_engine *engine = ctx->generic.engine;
- u32 ctrl;
-
- req->ctx_id = spacc_load_ctx(&ctx->generic, ctx->key,
- ctx->key_len, ablk_req->iv, alg->ivsize,
- NULL, 0);
-
- writel(req->src_addr, engine->regs + SPA_SRC_PTR_REG_OFFSET);
- writel(req->dst_addr, engine->regs + SPA_DST_PTR_REG_OFFSET);
- writel(0, engine->regs + SPA_OFFSET_REG_OFFSET);
-
- writel(ablk_req->cryptlen, engine->regs + SPA_PROC_LEN_REG_OFFSET);
- writel(0, engine->regs + SPA_ICV_OFFSET_REG_OFFSET);
- writel(0, engine->regs + SPA_AUX_INFO_REG_OFFSET);
- writel(0, engine->regs + SPA_AAD_LEN_REG_OFFSET);
-
- ctrl = spacc_alg->ctrl_default | (req->ctx_id << SPA_CTRL_CTX_IDX) |
- (req->is_encrypt ? (1 << SPA_CTRL_ENCRYPT_IDX) :
- (1 << SPA_CTRL_KEY_EXP));
-
- mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT);
-
- writel(ctrl, engine->regs + SPA_CTRL_REG_OFFSET);
-
- return -EINPROGRESS;
-}
-
-static int spacc_ablk_do_fallback(struct skcipher_request *req,
- unsigned alg_type, bool is_encrypt)
-{
- struct crypto_tfm *old_tfm =
- crypto_skcipher_tfm(crypto_skcipher_reqtfm(req));
- struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(old_tfm);
- struct spacc_req *dev_req = skcipher_request_ctx(req);
- int err;
-
- /*
- * Change the request to use the software fallback transform, and once
- * the ciphering has completed, put the old transform back into the
- * request.
- */
- skcipher_request_set_tfm(&dev_req->fallback_req, ctx->sw_cipher);
- skcipher_request_set_callback(&dev_req->fallback_req, req->base.flags,
- req->base.complete, req->base.data);
- skcipher_request_set_crypt(&dev_req->fallback_req, req->src, req->dst,
- req->cryptlen, req->iv);
- err = is_encrypt ? crypto_skcipher_encrypt(&dev_req->fallback_req) :
- crypto_skcipher_decrypt(&dev_req->fallback_req);
-
- return err;
-}
-
-static int spacc_ablk_setup(struct skcipher_request *req, unsigned alg_type,
- bool is_encrypt)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
- struct spacc_engine *engine = to_spacc_skcipher(alg)->engine;
- struct spacc_req *dev_req = skcipher_request_ctx(req);
- unsigned long flags;
- int err = -ENOMEM;
-
- dev_req->req = &req->base;
- dev_req->is_encrypt = is_encrypt;
- dev_req->engine = engine;
- dev_req->complete = spacc_ablk_complete;
- dev_req->result = -EINPROGRESS;
-
- if (unlikely(spacc_ablk_need_fallback(dev_req)))
- return spacc_ablk_do_fallback(req, alg_type, is_encrypt);
-
- /*
- * Create the DDT's for the engine. If we share the same source and
- * destination then we can optimize by reusing the DDT's.
- */
- if (req->src != req->dst) {
- dev_req->src_ddt = spacc_sg_to_ddt(engine, req->src,
- req->cryptlen, DMA_TO_DEVICE, &dev_req->src_addr);
- if (!dev_req->src_ddt)
- goto out;
-
- dev_req->dst_ddt = spacc_sg_to_ddt(engine, req->dst,
- req->cryptlen, DMA_FROM_DEVICE, &dev_req->dst_addr);
- if (!dev_req->dst_ddt)
- goto out_free_src;
- } else {
- dev_req->dst_ddt = spacc_sg_to_ddt(engine, req->dst,
- req->cryptlen, DMA_BIDIRECTIONAL, &dev_req->dst_addr);
- if (!dev_req->dst_ddt)
- goto out;
-
- dev_req->src_ddt = NULL;
- dev_req->src_addr = dev_req->dst_addr;
- }
-
- err = -EINPROGRESS;
- spin_lock_irqsave(&engine->hw_lock, flags);
- /*
- * Check if the engine will accept the operation now. If it won't then
- * we either stick it on the end of a pending list if we can backlog,
- * or bailout with an error if not.
- */
- if (unlikely(spacc_fifo_cmd_full(engine)) ||
- engine->in_flight + 1 > engine->fifo_sz) {
- if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
- err = -EBUSY;
- spin_unlock_irqrestore(&engine->hw_lock, flags);
- goto out_free_ddts;
- }
- list_add_tail(&dev_req->list, &engine->pending);
- } else {
- list_add_tail(&dev_req->list, &engine->pending);
- spacc_push(engine);
- }
- spin_unlock_irqrestore(&engine->hw_lock, flags);
-
- goto out;
-
-out_free_ddts:
- spacc_free_ddt(dev_req, dev_req->dst_ddt, dev_req->dst_addr, req->dst,
- req->cryptlen, req->src == req->dst ?
- DMA_BIDIRECTIONAL : DMA_FROM_DEVICE);
-out_free_src:
- if (req->src != req->dst)
- spacc_free_ddt(dev_req, dev_req->src_ddt, dev_req->src_addr,
- req->src, req->cryptlen, DMA_TO_DEVICE);
-out:
- return err;
-}
-
-static int spacc_ablk_init_tfm(struct crypto_skcipher *tfm)
-{
- struct spacc_ablk_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
- struct spacc_alg *spacc_alg = to_spacc_skcipher(alg);
- struct spacc_engine *engine = spacc_alg->engine;
-
- ctx->generic.flags = spacc_alg->type;
- ctx->generic.engine = engine;
- if (alg->base.cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
- ctx->sw_cipher = crypto_alloc_skcipher(alg->base.cra_name, 0,
- CRYPTO_ALG_NEED_FALLBACK);
- if (IS_ERR(ctx->sw_cipher)) {
- dev_warn(engine->dev, "failed to allocate fallback for %s\n",
- alg->base.cra_name);
- return PTR_ERR(ctx->sw_cipher);
- }
- crypto_skcipher_set_reqsize(tfm, sizeof(struct spacc_req) +
- crypto_skcipher_reqsize(ctx->sw_cipher));
- } else {
- /* take the size without the fallback skcipher_request at the end */
- crypto_skcipher_set_reqsize(tfm, offsetof(struct spacc_req,
- fallback_req));
- }
-
- ctx->generic.key_offs = spacc_alg->key_offs;
- ctx->generic.iv_offs = spacc_alg->iv_offs;
-
- return 0;
-}
-
-static void spacc_ablk_exit_tfm(struct crypto_skcipher *tfm)
-{
- struct spacc_ablk_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- crypto_free_skcipher(ctx->sw_cipher);
-}
-
-static int spacc_ablk_encrypt(struct skcipher_request *req)
-{
- struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
- struct skcipher_alg *alg = crypto_skcipher_alg(cipher);
- struct spacc_alg *spacc_alg = to_spacc_skcipher(alg);
-
- return spacc_ablk_setup(req, spacc_alg->type, 1);
-}
-
-static int spacc_ablk_decrypt(struct skcipher_request *req)
-{
- struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
- struct skcipher_alg *alg = crypto_skcipher_alg(cipher);
- struct spacc_alg *spacc_alg = to_spacc_skcipher(alg);
-
- return spacc_ablk_setup(req, spacc_alg->type, 0);
-}
-
-static inline int spacc_fifo_stat_empty(struct spacc_engine *engine)
-{
- return readl(engine->regs + SPA_FIFO_STAT_REG_OFFSET) &
- SPA_FIFO_STAT_EMPTY;
-}
-
-static void spacc_process_done(struct spacc_engine *engine)
-{
- struct spacc_req *req;
- unsigned long flags;
-
- spin_lock_irqsave(&engine->hw_lock, flags);
-
- while (!spacc_fifo_stat_empty(engine)) {
- req = list_first_entry(&engine->in_progress, struct spacc_req,
- list);
- list_move_tail(&req->list, &engine->completed);
- --engine->in_flight;
-
- /* POP the status register. */
- writel(~0, engine->regs + SPA_STAT_POP_REG_OFFSET);
- req->result = (readl(engine->regs + SPA_STATUS_REG_OFFSET) &
- SPA_STATUS_RES_CODE_MASK) >> SPA_STATUS_RES_CODE_OFFSET;
-
- /*
- * Convert the SPAcc error status into the standard POSIX error
- * codes.
- */
- if (unlikely(req->result)) {
- switch (req->result) {
- case SPA_STATUS_ICV_FAIL:
- req->result = -EBADMSG;
- break;
-
- case SPA_STATUS_MEMORY_ERROR:
- dev_warn(engine->dev,
- "memory error triggered\n");
- req->result = -EFAULT;
- break;
-
- case SPA_STATUS_BLOCK_ERROR:
- dev_warn(engine->dev,
- "block error triggered\n");
- req->result = -EIO;
- break;
- }
- }
- }
-
- tasklet_schedule(&engine->complete);
-
- spin_unlock_irqrestore(&engine->hw_lock, flags);
-}
-
-static irqreturn_t spacc_spacc_irq(int irq, void *dev)
-{
- struct spacc_engine *engine = (struct spacc_engine *)dev;
- u32 spacc_irq_stat = readl(engine->regs + SPA_IRQ_STAT_REG_OFFSET);
-
- writel(spacc_irq_stat, engine->regs + SPA_IRQ_STAT_REG_OFFSET);
- spacc_process_done(engine);
-
- return IRQ_HANDLED;
-}
-
-static void spacc_packet_timeout(struct timer_list *t)
-{
- struct spacc_engine *engine = from_timer(engine, t, packet_timeout);
-
- spacc_process_done(engine);
-}
-
-static int spacc_req_submit(struct spacc_req *req)
-{
- struct crypto_alg *alg = req->req->tfm->__crt_alg;
-
- if (CRYPTO_ALG_TYPE_AEAD == (CRYPTO_ALG_TYPE_MASK & alg->cra_flags))
- return spacc_aead_submit(req);
- else
- return spacc_ablk_submit(req);
-}
-
-static void spacc_spacc_complete(unsigned long data)
-{
- struct spacc_engine *engine = (struct spacc_engine *)data;
- struct spacc_req *req, *tmp;
- unsigned long flags;
- LIST_HEAD(completed);
-
- spin_lock_irqsave(&engine->hw_lock, flags);
-
- list_splice_init(&engine->completed, &completed);
- spacc_push(engine);
- if (engine->in_flight)
- mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT);
-
- spin_unlock_irqrestore(&engine->hw_lock, flags);
-
- list_for_each_entry_safe(req, tmp, &completed, list) {
- list_del(&req->list);
- req->complete(req);
- }
-}
-
-#ifdef CONFIG_PM
-static int spacc_suspend(struct device *dev)
-{
- struct spacc_engine *engine = dev_get_drvdata(dev);
-
- /*
- * We only support standby mode. All we have to do is gate the clock to
- * the spacc. The hardware will preserve state until we turn it back
- * on again.
- */
- clk_disable(engine->clk);
-
- return 0;
-}
-
-static int spacc_resume(struct device *dev)
-{
- struct spacc_engine *engine = dev_get_drvdata(dev);
-
- return clk_enable(engine->clk);
-}
-
-static const struct dev_pm_ops spacc_pm_ops = {
- .suspend = spacc_suspend,
- .resume = spacc_resume,
-};
-#endif /* CONFIG_PM */
-
-static inline struct spacc_engine *spacc_dev_to_engine(struct device *dev)
-{
- return dev ? dev_get_drvdata(dev) : NULL;
-}
-
-static ssize_t spacc_stat_irq_thresh_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct spacc_engine *engine = spacc_dev_to_engine(dev);
-
- return snprintf(buf, PAGE_SIZE, "%u\n", engine->stat_irq_thresh);
-}
-
-static ssize_t spacc_stat_irq_thresh_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- struct spacc_engine *engine = spacc_dev_to_engine(dev);
- unsigned long thresh;
-
- if (kstrtoul(buf, 0, &thresh))
- return -EINVAL;
-
- thresh = clamp(thresh, 1UL, engine->fifo_sz - 1);
-
- engine->stat_irq_thresh = thresh;
- writel(engine->stat_irq_thresh << SPA_IRQ_CTRL_STAT_CNT_OFFSET,
- engine->regs + SPA_IRQ_CTRL_REG_OFFSET);
-
- return len;
-}
-static DEVICE_ATTR(stat_irq_thresh, 0644, spacc_stat_irq_thresh_show,
- spacc_stat_irq_thresh_store);
-
-static struct spacc_alg ipsec_engine_algs[] = {
- {
- .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC,
- .key_offs = 0,
- .iv_offs = AES_MAX_KEY_SIZE,
- .alg = {
- .base.cra_name = "cbc(aes)",
- .base.cra_driver_name = "cbc-aes-picoxcell",
- .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_ALLOCATES_MEMORY |
- CRYPTO_ALG_NEED_FALLBACK,
- .base.cra_blocksize = AES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
- .base.cra_module = THIS_MODULE,
-
- .setkey = spacc_aes_setkey,
- .encrypt = spacc_ablk_encrypt,
- .decrypt = spacc_ablk_decrypt,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .init = spacc_ablk_init_tfm,
- .exit = spacc_ablk_exit_tfm,
- },
- },
- {
- .key_offs = 0,
- .iv_offs = AES_MAX_KEY_SIZE,
- .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_ECB,
- .alg = {
- .base.cra_name = "ecb(aes)",
- .base.cra_driver_name = "ecb-aes-picoxcell",
- .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_ALLOCATES_MEMORY |
- CRYPTO_ALG_NEED_FALLBACK,
- .base.cra_blocksize = AES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
- .base.cra_module = THIS_MODULE,
-
- .setkey = spacc_aes_setkey,
- .encrypt = spacc_ablk_encrypt,
- .decrypt = spacc_ablk_decrypt,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .init = spacc_ablk_init_tfm,
- .exit = spacc_ablk_exit_tfm,
- },
- },
- {
- .key_offs = DES_BLOCK_SIZE,
- .iv_offs = 0,
- .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC,
- .alg = {
- .base.cra_name = "cbc(des)",
- .base.cra_driver_name = "cbc-des-picoxcell",
- .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_ALLOCATES_MEMORY,
- .base.cra_blocksize = DES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
- .base.cra_module = THIS_MODULE,
-
- .setkey = spacc_des_setkey,
- .encrypt = spacc_ablk_encrypt,
- .decrypt = spacc_ablk_decrypt,
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .init = spacc_ablk_init_tfm,
- .exit = spacc_ablk_exit_tfm,
- },
- },
- {
- .key_offs = DES_BLOCK_SIZE,
- .iv_offs = 0,
- .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_ECB,
- .alg = {
- .base.cra_name = "ecb(des)",
- .base.cra_driver_name = "ecb-des-picoxcell",
- .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_ALLOCATES_MEMORY,
- .base.cra_blocksize = DES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
- .base.cra_module = THIS_MODULE,
-
- .setkey = spacc_des_setkey,
- .encrypt = spacc_ablk_encrypt,
- .decrypt = spacc_ablk_decrypt,
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .init = spacc_ablk_init_tfm,
- .exit = spacc_ablk_exit_tfm,
- },
- },
- {
- .key_offs = DES_BLOCK_SIZE,
- .iv_offs = 0,
- .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC,
- .alg = {
- .base.cra_name = "cbc(des3_ede)",
- .base.cra_driver_name = "cbc-des3-ede-picoxcell",
- .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_ALLOCATES_MEMORY |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
- .base.cra_module = THIS_MODULE,
-
- .setkey = spacc_des3_setkey,
- .encrypt = spacc_ablk_encrypt,
- .decrypt = spacc_ablk_decrypt,
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .init = spacc_ablk_init_tfm,
- .exit = spacc_ablk_exit_tfm,
- },
- },
- {
- .key_offs = DES_BLOCK_SIZE,
- .iv_offs = 0,
- .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_ECB,
- .alg = {
- .base.cra_name = "ecb(des3_ede)",
- .base.cra_driver_name = "ecb-des3-ede-picoxcell",
- .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_ALLOCATES_MEMORY |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
- .base.cra_module = THIS_MODULE,
-
- .setkey = spacc_des3_setkey,
- .encrypt = spacc_ablk_encrypt,
- .decrypt = spacc_ablk_decrypt,
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .init = spacc_ablk_init_tfm,
- .exit = spacc_ablk_exit_tfm,
- },
- },
-};
-
-static struct spacc_aead ipsec_engine_aeads[] = {
- {
- .ctrl_default = SPA_CTRL_CIPH_ALG_AES |
- SPA_CTRL_CIPH_MODE_CBC |
- SPA_CTRL_HASH_ALG_SHA |
- SPA_CTRL_HASH_MODE_HMAC,
- .key_offs = 0,
- .iv_offs = AES_MAX_KEY_SIZE,
- .alg = {
- .base = {
- .cra_name = "authenc(hmac(sha1),cbc(aes))",
- .cra_driver_name = "authenc-hmac-sha1-"
- "cbc-aes-picoxcell",
- .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
- .cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_ALLOCATES_MEMORY |
- CRYPTO_ALG_NEED_FALLBACK |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct spacc_aead_ctx),
- .cra_module = THIS_MODULE,
- },
- .setkey = spacc_aead_setkey,
- .setauthsize = spacc_aead_setauthsize,
- .encrypt = spacc_aead_encrypt,
- .decrypt = spacc_aead_decrypt,
- .ivsize = AES_BLOCK_SIZE,
- .maxauthsize = SHA1_DIGEST_SIZE,
- .init = spacc_aead_cra_init,
- .exit = spacc_aead_cra_exit,
- },
- },
- {
- .ctrl_default = SPA_CTRL_CIPH_ALG_AES |
- SPA_CTRL_CIPH_MODE_CBC |
- SPA_CTRL_HASH_ALG_SHA256 |
- SPA_CTRL_HASH_MODE_HMAC,
- .key_offs = 0,
- .iv_offs = AES_MAX_KEY_SIZE,
- .alg = {
- .base = {
- .cra_name = "authenc(hmac(sha256),cbc(aes))",
- .cra_driver_name = "authenc-hmac-sha256-"
- "cbc-aes-picoxcell",
- .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
- .cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_ALLOCATES_MEMORY |
- CRYPTO_ALG_NEED_FALLBACK |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct spacc_aead_ctx),
- .cra_module = THIS_MODULE,
- },
- .setkey = spacc_aead_setkey,
- .setauthsize = spacc_aead_setauthsize,
- .encrypt = spacc_aead_encrypt,
- .decrypt = spacc_aead_decrypt,
- .ivsize = AES_BLOCK_SIZE,
- .maxauthsize = SHA256_DIGEST_SIZE,
- .init = spacc_aead_cra_init,
- .exit = spacc_aead_cra_exit,
- },
- },
- {
- .key_offs = 0,
- .iv_offs = AES_MAX_KEY_SIZE,
- .ctrl_default = SPA_CTRL_CIPH_ALG_AES |
- SPA_CTRL_CIPH_MODE_CBC |
- SPA_CTRL_HASH_ALG_MD5 |
- SPA_CTRL_HASH_MODE_HMAC,
- .alg = {
- .base = {
- .cra_name = "authenc(hmac(md5),cbc(aes))",
- .cra_driver_name = "authenc-hmac-md5-"
- "cbc-aes-picoxcell",
- .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
- .cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_ALLOCATES_MEMORY |
- CRYPTO_ALG_NEED_FALLBACK |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct spacc_aead_ctx),
- .cra_module = THIS_MODULE,
- },
- .setkey = spacc_aead_setkey,
- .setauthsize = spacc_aead_setauthsize,
- .encrypt = spacc_aead_encrypt,
- .decrypt = spacc_aead_decrypt,
- .ivsize = AES_BLOCK_SIZE,
- .maxauthsize = MD5_DIGEST_SIZE,
- .init = spacc_aead_cra_init,
- .exit = spacc_aead_cra_exit,
- },
- },
- {
- .key_offs = DES_BLOCK_SIZE,
- .iv_offs = 0,
- .ctrl_default = SPA_CTRL_CIPH_ALG_DES |
- SPA_CTRL_CIPH_MODE_CBC |
- SPA_CTRL_HASH_ALG_SHA |
- SPA_CTRL_HASH_MODE_HMAC,
- .alg = {
- .base = {
- .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
- .cra_driver_name = "authenc-hmac-sha1-"
- "cbc-3des-picoxcell",
- .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
- .cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_ALLOCATES_MEMORY |
- CRYPTO_ALG_NEED_FALLBACK |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct spacc_aead_ctx),
- .cra_module = THIS_MODULE,
- },
- .setkey = spacc_aead_setkey,
- .setauthsize = spacc_aead_setauthsize,
- .encrypt = spacc_aead_encrypt,
- .decrypt = spacc_aead_decrypt,
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .maxauthsize = SHA1_DIGEST_SIZE,
- .init = spacc_aead_cra_init,
- .exit = spacc_aead_cra_exit,
- },
- },
- {
- .key_offs = DES_BLOCK_SIZE,
- .iv_offs = 0,
- .ctrl_default = SPA_CTRL_CIPH_ALG_AES |
- SPA_CTRL_CIPH_MODE_CBC |
- SPA_CTRL_HASH_ALG_SHA256 |
- SPA_CTRL_HASH_MODE_HMAC,
- .alg = {
- .base = {
- .cra_name = "authenc(hmac(sha256),"
- "cbc(des3_ede))",
- .cra_driver_name = "authenc-hmac-sha256-"
- "cbc-3des-picoxcell",
- .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
- .cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_ALLOCATES_MEMORY |
- CRYPTO_ALG_NEED_FALLBACK |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct spacc_aead_ctx),
- .cra_module = THIS_MODULE,
- },
- .setkey = spacc_aead_setkey,
- .setauthsize = spacc_aead_setauthsize,
- .encrypt = spacc_aead_encrypt,
- .decrypt = spacc_aead_decrypt,
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .maxauthsize = SHA256_DIGEST_SIZE,
- .init = spacc_aead_cra_init,
- .exit = spacc_aead_cra_exit,
- },
- },
- {
- .key_offs = DES_BLOCK_SIZE,
- .iv_offs = 0,
- .ctrl_default = SPA_CTRL_CIPH_ALG_DES |
- SPA_CTRL_CIPH_MODE_CBC |
- SPA_CTRL_HASH_ALG_MD5 |
- SPA_CTRL_HASH_MODE_HMAC,
- .alg = {
- .base = {
- .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
- .cra_driver_name = "authenc-hmac-md5-"
- "cbc-3des-picoxcell",
- .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
- .cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_ALLOCATES_MEMORY |
- CRYPTO_ALG_NEED_FALLBACK |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct spacc_aead_ctx),
- .cra_module = THIS_MODULE,
- },
- .setkey = spacc_aead_setkey,
- .setauthsize = spacc_aead_setauthsize,
- .encrypt = spacc_aead_encrypt,
- .decrypt = spacc_aead_decrypt,
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .maxauthsize = MD5_DIGEST_SIZE,
- .init = spacc_aead_cra_init,
- .exit = spacc_aead_cra_exit,
- },
- },
-};
-
-static struct spacc_alg l2_engine_algs[] = {
- {
- .key_offs = 0,
- .iv_offs = SPACC_CRYPTO_KASUMI_F8_KEY_LEN,
- .ctrl_default = SPA_CTRL_CIPH_ALG_KASUMI |
- SPA_CTRL_CIPH_MODE_F8,
- .alg = {
- .base.cra_name = "f8(kasumi)",
- .base.cra_driver_name = "f8-kasumi-picoxcell",
- .base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_ALLOCATES_MEMORY |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .base.cra_blocksize = 8,
- .base.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
- .base.cra_module = THIS_MODULE,
-
- .setkey = spacc_kasumi_f8_setkey,
- .encrypt = spacc_ablk_encrypt,
- .decrypt = spacc_ablk_decrypt,
- .min_keysize = 16,
- .max_keysize = 16,
- .ivsize = 8,
- .init = spacc_ablk_init_tfm,
- .exit = spacc_ablk_exit_tfm,
- },
- },
-};
-
-#ifdef CONFIG_OF
-static const struct of_device_id spacc_of_id_table[] = {
- { .compatible = "picochip,spacc-ipsec" },
- { .compatible = "picochip,spacc-l2" },
- {}
-};
-MODULE_DEVICE_TABLE(of, spacc_of_id_table);
-#endif /* CONFIG_OF */
-
-static void spacc_tasklet_kill(void *data)
-{
- tasklet_kill(data);
-}
-
-static int spacc_probe(struct platform_device *pdev)
-{
- int i, err, ret;
- struct resource *irq;
- struct device_node *np = pdev->dev.of_node;
- struct spacc_engine *engine = devm_kzalloc(&pdev->dev, sizeof(*engine),
- GFP_KERNEL);
- if (!engine)
- return -ENOMEM;
-
- if (of_device_is_compatible(np, "picochip,spacc-ipsec")) {
- engine->max_ctxs = SPACC_CRYPTO_IPSEC_MAX_CTXS;
- engine->cipher_pg_sz = SPACC_CRYPTO_IPSEC_CIPHER_PG_SZ;
- engine->hash_pg_sz = SPACC_CRYPTO_IPSEC_HASH_PG_SZ;
- engine->fifo_sz = SPACC_CRYPTO_IPSEC_FIFO_SZ;
- engine->algs = ipsec_engine_algs;
- engine->num_algs = ARRAY_SIZE(ipsec_engine_algs);
- engine->aeads = ipsec_engine_aeads;
- engine->num_aeads = ARRAY_SIZE(ipsec_engine_aeads);
- } else if (of_device_is_compatible(np, "picochip,spacc-l2")) {
- engine->max_ctxs = SPACC_CRYPTO_L2_MAX_CTXS;
- engine->cipher_pg_sz = SPACC_CRYPTO_L2_CIPHER_PG_SZ;
- engine->hash_pg_sz = SPACC_CRYPTO_L2_HASH_PG_SZ;
- engine->fifo_sz = SPACC_CRYPTO_L2_FIFO_SZ;
- engine->algs = l2_engine_algs;
- engine->num_algs = ARRAY_SIZE(l2_engine_algs);
- } else {
- return -EINVAL;
- }
-
- engine->name = dev_name(&pdev->dev);
-
- engine->regs = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(engine->regs))
- return PTR_ERR(engine->regs);
-
- irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!irq) {
- dev_err(&pdev->dev, "no memory/irq resource for engine\n");
- return -ENXIO;
- }
-
- tasklet_init(&engine->complete, spacc_spacc_complete,
- (unsigned long)engine);
-
- ret = devm_add_action(&pdev->dev, spacc_tasklet_kill,
- &engine->complete);
- if (ret)
- return ret;
-
- if (devm_request_irq(&pdev->dev, irq->start, spacc_spacc_irq, 0,
- engine->name, engine)) {
- dev_err(engine->dev, "failed to request IRQ\n");
- return -EBUSY;
- }
-
- engine->dev = &pdev->dev;
- engine->cipher_ctx_base = engine->regs + SPA_CIPH_KEY_BASE_REG_OFFSET;
- engine->hash_key_base = engine->regs + SPA_HASH_KEY_BASE_REG_OFFSET;
-
- engine->req_pool = dmam_pool_create(engine->name, engine->dev,
- MAX_DDT_LEN * sizeof(struct spacc_ddt), 8, SZ_64K);
- if (!engine->req_pool)
- return -ENOMEM;
-
- spin_lock_init(&engine->hw_lock);
-
- engine->clk = clk_get(&pdev->dev, "ref");
- if (IS_ERR(engine->clk)) {
- dev_info(&pdev->dev, "clk unavailable\n");
- return PTR_ERR(engine->clk);
- }
-
- if (clk_prepare_enable(engine->clk)) {
- dev_info(&pdev->dev, "unable to prepare/enable clk\n");
- ret = -EIO;
- goto err_clk_put;
- }
-
- /*
- * Use an IRQ threshold of 50% as a default. This seems to be a
- * reasonable trade off of latency against throughput but can be
- * changed at runtime.
- */
- engine->stat_irq_thresh = (engine->fifo_sz / 2);
-
- ret = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh);
- if (ret)
- goto err_clk_disable;
-
- /*
- * Configure the interrupts. We only use the STAT_CNT interrupt as we
- * only submit a new packet for processing when we complete another in
- * the queue. This minimizes time spent in the interrupt handler.
- */
- writel(engine->stat_irq_thresh << SPA_IRQ_CTRL_STAT_CNT_OFFSET,
- engine->regs + SPA_IRQ_CTRL_REG_OFFSET);
- writel(SPA_IRQ_EN_STAT_EN | SPA_IRQ_EN_GLBL_EN,
- engine->regs + SPA_IRQ_EN_REG_OFFSET);
-
- timer_setup(&engine->packet_timeout, spacc_packet_timeout, 0);
-
- INIT_LIST_HEAD(&engine->pending);
- INIT_LIST_HEAD(&engine->completed);
- INIT_LIST_HEAD(&engine->in_progress);
- engine->in_flight = 0;
-
- platform_set_drvdata(pdev, engine);
-
- ret = -EINVAL;
- INIT_LIST_HEAD(&engine->registered_algs);
- for (i = 0; i < engine->num_algs; ++i) {
- engine->algs[i].engine = engine;
- err = crypto_register_skcipher(&engine->algs[i].alg);
- if (!err) {
- list_add_tail(&engine->algs[i].entry,
- &engine->registered_algs);
- ret = 0;
- }
- if (err)
- dev_err(engine->dev, "failed to register alg \"%s\"\n",
- engine->algs[i].alg.base.cra_name);
- else
- dev_dbg(engine->dev, "registered alg \"%s\"\n",
- engine->algs[i].alg.base.cra_name);
- }
-
- INIT_LIST_HEAD(&engine->registered_aeads);
- for (i = 0; i < engine->num_aeads; ++i) {
- engine->aeads[i].engine = engine;
- err = crypto_register_aead(&engine->aeads[i].alg);
- if (!err) {
- list_add_tail(&engine->aeads[i].entry,
- &engine->registered_aeads);
- ret = 0;
- }
- if (err)
- dev_err(engine->dev, "failed to register alg \"%s\"\n",
- engine->aeads[i].alg.base.cra_name);
- else
- dev_dbg(engine->dev, "registered alg \"%s\"\n",
- engine->aeads[i].alg.base.cra_name);
- }
-
- if (!ret)
- return 0;
-
- del_timer_sync(&engine->packet_timeout);
- device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh);
-err_clk_disable:
- clk_disable_unprepare(engine->clk);
-err_clk_put:
- clk_put(engine->clk);
-
- return ret;
-}
-
-static int spacc_remove(struct platform_device *pdev)
-{
- struct spacc_aead *aead, *an;
- struct spacc_alg *alg, *next;
- struct spacc_engine *engine = platform_get_drvdata(pdev);
-
- del_timer_sync(&engine->packet_timeout);
- device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh);
-
- list_for_each_entry_safe(aead, an, &engine->registered_aeads, entry) {
- list_del(&aead->entry);
- crypto_unregister_aead(&aead->alg);
- }
-
- list_for_each_entry_safe(alg, next, &engine->registered_algs, entry) {
- list_del(&alg->entry);
- crypto_unregister_skcipher(&alg->alg);
- }
-
- clk_disable_unprepare(engine->clk);
- clk_put(engine->clk);
-
- return 0;
-}
-
-static struct platform_driver spacc_driver = {
- .probe = spacc_probe,
- .remove = spacc_remove,
- .driver = {
- .name = "picochip,spacc",
-#ifdef CONFIG_PM
- .pm = &spacc_pm_ops,
-#endif /* CONFIG_PM */
- .of_match_table = of_match_ptr(spacc_of_id_table),
- },
-};
-
-module_platform_driver(spacc_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Jamie Iles");
diff --git a/drivers/crypto/picoxcell_crypto_regs.h b/drivers/crypto/picoxcell_crypto_regs.h
deleted file mode 100644
index b870a50238ba..000000000000
--- a/drivers/crypto/picoxcell_crypto_regs.h
+++ /dev/null
@@ -1,115 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (c) 2010 Picochip Ltd., Jamie Iles
- */
-#ifndef __PICOXCELL_CRYPTO_REGS_H__
-#define __PICOXCELL_CRYPTO_REGS_H__
-
-#define SPA_STATUS_OK 0
-#define SPA_STATUS_ICV_FAIL 1
-#define SPA_STATUS_MEMORY_ERROR 2
-#define SPA_STATUS_BLOCK_ERROR 3
-
-#define SPA_IRQ_CTRL_STAT_CNT_OFFSET 16
-#define SPA_IRQ_STAT_STAT_MASK (1 << 4)
-#define SPA_FIFO_STAT_STAT_OFFSET 16
-#define SPA_FIFO_STAT_STAT_CNT_MASK (0x3F << SPA_FIFO_STAT_STAT_OFFSET)
-#define SPA_STATUS_RES_CODE_OFFSET 24
-#define SPA_STATUS_RES_CODE_MASK (0x3 << SPA_STATUS_RES_CODE_OFFSET)
-#define SPA_KEY_SZ_CTX_INDEX_OFFSET 8
-#define SPA_KEY_SZ_CIPHER_OFFSET 31
-
-#define SPA_IRQ_EN_REG_OFFSET 0x00000000
-#define SPA_IRQ_STAT_REG_OFFSET 0x00000004
-#define SPA_IRQ_CTRL_REG_OFFSET 0x00000008
-#define SPA_FIFO_STAT_REG_OFFSET 0x0000000C
-#define SPA_SDMA_BRST_SZ_REG_OFFSET 0x00000010
-#define SPA_SRC_PTR_REG_OFFSET 0x00000020
-#define SPA_DST_PTR_REG_OFFSET 0x00000024
-#define SPA_OFFSET_REG_OFFSET 0x00000028
-#define SPA_AAD_LEN_REG_OFFSET 0x0000002C
-#define SPA_PROC_LEN_REG_OFFSET 0x00000030
-#define SPA_ICV_LEN_REG_OFFSET 0x00000034
-#define SPA_ICV_OFFSET_REG_OFFSET 0x00000038
-#define SPA_SW_CTRL_REG_OFFSET 0x0000003C
-#define SPA_CTRL_REG_OFFSET 0x00000040
-#define SPA_AUX_INFO_REG_OFFSET 0x0000004C
-#define SPA_STAT_POP_REG_OFFSET 0x00000050
-#define SPA_STATUS_REG_OFFSET 0x00000054
-#define SPA_KEY_SZ_REG_OFFSET 0x00000100
-#define SPA_CIPH_KEY_BASE_REG_OFFSET 0x00004000
-#define SPA_HASH_KEY_BASE_REG_OFFSET 0x00008000
-#define SPA_RC4_CTX_BASE_REG_OFFSET 0x00020000
-
-#define SPA_IRQ_EN_REG_RESET 0x00000000
-#define SPA_IRQ_CTRL_REG_RESET 0x00000000
-#define SPA_FIFO_STAT_REG_RESET 0x00000000
-#define SPA_SDMA_BRST_SZ_REG_RESET 0x00000000
-#define SPA_SRC_PTR_REG_RESET 0x00000000
-#define SPA_DST_PTR_REG_RESET 0x00000000
-#define SPA_OFFSET_REG_RESET 0x00000000
-#define SPA_AAD_LEN_REG_RESET 0x00000000
-#define SPA_PROC_LEN_REG_RESET 0x00000000
-#define SPA_ICV_LEN_REG_RESET 0x00000000
-#define SPA_ICV_OFFSET_REG_RESET 0x00000000
-#define SPA_SW_CTRL_REG_RESET 0x00000000
-#define SPA_CTRL_REG_RESET 0x00000000
-#define SPA_AUX_INFO_REG_RESET 0x00000000
-#define SPA_STAT_POP_REG_RESET 0x00000000
-#define SPA_STATUS_REG_RESET 0x00000000
-#define SPA_KEY_SZ_REG_RESET 0x00000000
-
-#define SPA_CTRL_HASH_ALG_IDX 4
-#define SPA_CTRL_CIPH_MODE_IDX 8
-#define SPA_CTRL_HASH_MODE_IDX 12
-#define SPA_CTRL_CTX_IDX 16
-#define SPA_CTRL_ENCRYPT_IDX 24
-#define SPA_CTRL_AAD_COPY 25
-#define SPA_CTRL_ICV_PT 26
-#define SPA_CTRL_ICV_ENC 27
-#define SPA_CTRL_ICV_APPEND 28
-#define SPA_CTRL_KEY_EXP 29
-
-#define SPA_KEY_SZ_CXT_IDX 8
-#define SPA_KEY_SZ_CIPHER_IDX 31
-
-#define SPA_IRQ_EN_CMD0_EN (1 << 0)
-#define SPA_IRQ_EN_STAT_EN (1 << 4)
-#define SPA_IRQ_EN_GLBL_EN (1 << 31)
-
-#define SPA_CTRL_CIPH_ALG_NULL 0x00
-#define SPA_CTRL_CIPH_ALG_DES 0x01
-#define SPA_CTRL_CIPH_ALG_AES 0x02
-#define SPA_CTRL_CIPH_ALG_RC4 0x03
-#define SPA_CTRL_CIPH_ALG_MULTI2 0x04
-#define SPA_CTRL_CIPH_ALG_KASUMI 0x05
-
-#define SPA_CTRL_HASH_ALG_NULL (0x00 << SPA_CTRL_HASH_ALG_IDX)
-#define SPA_CTRL_HASH_ALG_MD5 (0x01 << SPA_CTRL_HASH_ALG_IDX)
-#define SPA_CTRL_HASH_ALG_SHA (0x02 << SPA_CTRL_HASH_ALG_IDX)
-#define SPA_CTRL_HASH_ALG_SHA224 (0x03 << SPA_CTRL_HASH_ALG_IDX)
-#define SPA_CTRL_HASH_ALG_SHA256 (0x04 << SPA_CTRL_HASH_ALG_IDX)
-#define SPA_CTRL_HASH_ALG_SHA384 (0x05 << SPA_CTRL_HASH_ALG_IDX)
-#define SPA_CTRL_HASH_ALG_SHA512 (0x06 << SPA_CTRL_HASH_ALG_IDX)
-#define SPA_CTRL_HASH_ALG_AESMAC (0x07 << SPA_CTRL_HASH_ALG_IDX)
-#define SPA_CTRL_HASH_ALG_AESCMAC (0x08 << SPA_CTRL_HASH_ALG_IDX)
-#define SPA_CTRL_HASH_ALG_KASF9 (0x09 << SPA_CTRL_HASH_ALG_IDX)
-
-#define SPA_CTRL_CIPH_MODE_NULL (0x00 << SPA_CTRL_CIPH_MODE_IDX)
-#define SPA_CTRL_CIPH_MODE_ECB (0x00 << SPA_CTRL_CIPH_MODE_IDX)
-#define SPA_CTRL_CIPH_MODE_CBC (0x01 << SPA_CTRL_CIPH_MODE_IDX)
-#define SPA_CTRL_CIPH_MODE_CTR (0x02 << SPA_CTRL_CIPH_MODE_IDX)
-#define SPA_CTRL_CIPH_MODE_CCM (0x03 << SPA_CTRL_CIPH_MODE_IDX)
-#define SPA_CTRL_CIPH_MODE_GCM (0x05 << SPA_CTRL_CIPH_MODE_IDX)
-#define SPA_CTRL_CIPH_MODE_OFB (0x07 << SPA_CTRL_CIPH_MODE_IDX)
-#define SPA_CTRL_CIPH_MODE_CFB (0x08 << SPA_CTRL_CIPH_MODE_IDX)
-#define SPA_CTRL_CIPH_MODE_F8 (0x09 << SPA_CTRL_CIPH_MODE_IDX)
-
-#define SPA_CTRL_HASH_MODE_RAW (0x00 << SPA_CTRL_HASH_MODE_IDX)
-#define SPA_CTRL_HASH_MODE_SSLMAC (0x01 << SPA_CTRL_HASH_MODE_IDX)
-#define SPA_CTRL_HASH_MODE_HMAC (0x02 << SPA_CTRL_HASH_MODE_IDX)
-
-#define SPA_FIFO_STAT_EMPTY (1 << 31)
-#define SPA_FIFO_CMD_FULL (1 << 7)
-
-#endif /* __PICOXCELL_CRYPTO_REGS_H__ */
diff --git a/drivers/crypto/qat/Kconfig b/drivers/crypto/qat/Kconfig
index 846a3d90b41a..77783feb62b2 100644
--- a/drivers/crypto/qat/Kconfig
+++ b/drivers/crypto/qat/Kconfig
@@ -11,7 +11,7 @@ config CRYPTO_DEV_QAT
select CRYPTO_SHA1
select CRYPTO_SHA256
select CRYPTO_SHA512
- select CRYPTO_AES
+ select CRYPTO_LIB_AES
select FW_LOADER
config CRYPTO_DEV_QAT_DH895xCC
diff --git a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c
index 344bfae45bff..6a9be01fdf33 100644
--- a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c
+++ b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c
@@ -19,7 +19,7 @@ static struct adf_fw_config adf_4xxx_fw_config[] = {
};
/* Worker thread to service arbiter mappings */
-static u32 thrd_to_arb_map[] = {
+static const u32 thrd_to_arb_map[ADF_4XXX_MAX_ACCELENGINES] = {
0x5555555, 0x5555555, 0x5555555, 0x5555555,
0xAAAAAAA, 0xAAAAAAA, 0xAAAAAAA, 0xAAAAAAA,
0x0
@@ -119,17 +119,9 @@ static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
return DEV_SKU_1;
}
-static void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev,
- u32 const **arb_map_config)
+static const u32 *adf_get_arbiter_mapping(void)
{
- struct adf_hw_device_data *hw_device = accel_dev->hw_device;
- unsigned long ae_mask = hw_device->ae_mask;
- int i;
-
- for_each_clear_bit(i, &ae_mask, ADF_4XXX_MAX_ACCELENGINES)
- thrd_to_arb_map[i] = 0;
-
- *arb_map_config = thrd_to_arb_map;
+ return thrd_to_arb_map;
}
static void get_arb_info(struct arb_info *arb_info)
diff --git a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c
index eb45f1b1ae3e..f5990d042c9a 100644
--- a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c
+++ b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c
@@ -7,8 +7,8 @@
#include "adf_c3xxx_hw_data.h"
#include "icp_qat_hw.h"
-/* Worker thread to service arbiter mappings based on dev SKUs */
-static const u32 thrd_to_arb_map_6_me_sku[] = {
+/* Worker thread to service arbiter mappings */
+static const u32 thrd_to_arb_map[ADF_C3XXX_MAX_ACCELENGINES] = {
0x12222AAA, 0x11222AAA, 0x12222AAA,
0x11222AAA, 0x12222AAA, 0x11222AAA
};
@@ -101,18 +101,9 @@ static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
return DEV_SKU_UNKNOWN;
}
-static void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev,
- u32 const **arb_map_config)
+static const u32 *adf_get_arbiter_mapping(void)
{
- switch (accel_dev->accel_pci_dev.sku) {
- case DEV_SKU_4:
- *arb_map_config = thrd_to_arb_map_6_me_sku;
- break;
- default:
- dev_err(&GET_DEV(accel_dev),
- "The configuration doesn't match any SKU");
- *arb_map_config = NULL;
- }
+ return thrd_to_arb_map;
}
static u32 get_pf2vf_offset(u32 i)
diff --git a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c
index babdffbcb846..cadcf12884c8 100644
--- a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c
+++ b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c
@@ -7,13 +7,8 @@
#include "adf_c62x_hw_data.h"
#include "icp_qat_hw.h"
-/* Worker thread to service arbiter mappings based on dev SKUs */
-static const u32 thrd_to_arb_map_8_me_sku[] = {
- 0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA,
- 0x11222AAA, 0x12222AAA, 0x11222AAA, 0, 0
-};
-
-static const u32 thrd_to_arb_map_10_me_sku[] = {
+/* Worker thread to service arbiter mappings */
+static const u32 thrd_to_arb_map[ADF_C62X_MAX_ACCELENGINES] = {
0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA,
0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA
};
@@ -108,21 +103,9 @@ static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
return DEV_SKU_UNKNOWN;
}
-static void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev,
- u32 const **arb_map_config)
-{
- switch (accel_dev->accel_pci_dev.sku) {
- case DEV_SKU_2:
- *arb_map_config = thrd_to_arb_map_8_me_sku;
- break;
- case DEV_SKU_4:
- *arb_map_config = thrd_to_arb_map_10_me_sku;
- break;
- default:
- dev_err(&GET_DEV(accel_dev),
- "The configuration doesn't match any SKU");
- *arb_map_config = NULL;
- }
+static const u32 *adf_get_arbiter_mapping(void)
+{
+ return thrd_to_arb_map;
}
static u32 get_pf2vf_offset(u32 i)
diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h
index c46a5805b294..5527344546e5 100644
--- a/drivers/crypto/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h
@@ -168,8 +168,7 @@ struct adf_hw_device_data {
int (*send_admin_init)(struct adf_accel_dev *accel_dev);
int (*init_arb)(struct adf_accel_dev *accel_dev);
void (*exit_arb)(struct adf_accel_dev *accel_dev);
- void (*get_arb_mapping)(struct adf_accel_dev *accel_dev,
- const u32 **cfg);
+ const u32 *(*get_arb_mapping)(void);
void (*disable_iov)(struct adf_accel_dev *accel_dev);
void (*configure_iov_threads)(struct adf_accel_dev *accel_dev,
bool enable);
diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
index eb9b3be9d8eb..96b437bfe3de 100644
--- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c
+++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
@@ -464,3 +464,4 @@ MODULE_AUTHOR("Intel");
MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
MODULE_ALIAS_CRYPTO("intel_qat");
MODULE_VERSION(ADF_DRV_VERSION);
+MODULE_IMPORT_NS(CRYPTO_INTERNAL);
diff --git a/drivers/crypto/qat/qat_common/adf_hw_arbiter.c b/drivers/crypto/qat/qat_common/adf_hw_arbiter.c
index 9f5240d9488b..64e4596a24f4 100644
--- a/drivers/crypto/qat/qat_common/adf_hw_arbiter.c
+++ b/drivers/crypto/qat/qat_common/adf_hw_arbiter.c
@@ -19,6 +19,7 @@ int adf_init_arb(struct adf_accel_dev *accel_dev)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
void __iomem *csr = accel_dev->transport->banks[0].csr_addr;
+ unsigned long ae_mask = hw_data->ae_mask;
u32 arb_off, wt_off, arb_cfg;
const u32 *thd_2_arb_cfg;
struct arb_info info;
@@ -35,12 +36,9 @@ int adf_init_arb(struct adf_accel_dev *accel_dev)
WRITE_CSR_ARB_SARCONFIG(csr, arb_off, arb, arb_cfg);
/* Map worker threads to service arbiters */
- hw_data->get_arb_mapping(accel_dev, &thd_2_arb_cfg);
+ thd_2_arb_cfg = hw_data->get_arb_mapping();
- if (!thd_2_arb_cfg)
- return -EFAULT;
-
- for (i = 0; i < hw_data->num_engines; i++)
+ for_each_set_bit(i, &ae_mask, hw_data->num_engines)
WRITE_CSR_ARB_WT2SAM(csr, arb_off, wt_off, i, thd_2_arb_cfg[i]);
return 0;
diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c
index 5a7030acdc33..888c1e047295 100644
--- a/drivers/crypto/qat/qat_common/adf_transport.c
+++ b/drivers/crypto/qat/qat_common/adf_transport.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/delay.h>
+#include <linux/nospec.h>
#include "adf_accel_devices.h"
#include "adf_transport_internal.h"
#include "adf_transport_access_macros.h"
@@ -246,6 +247,7 @@ int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
return -EFAULT;
}
+ ring_num = array_index_nospec(ring_num, num_rings_per_bank);
bank = &transport_data->banks[bank_num];
if (adf_reserve_ring(bank, ring_num)) {
dev_err(&GET_DEV(accel_dev), "Ring %d, %s already exists.\n",
diff --git a/drivers/crypto/qat/qat_common/adf_transport_debug.c b/drivers/crypto/qat/qat_common/adf_transport_debug.c
index 1205186ad51e..e69e5907f595 100644
--- a/drivers/crypto/qat/qat_common/adf_transport_debug.c
+++ b/drivers/crypto/qat/qat_common/adf_transport_debug.c
@@ -62,8 +62,8 @@ static int adf_ring_show(struct seq_file *sfile, void *v)
seq_printf(sfile, "head %x, tail %x, empty: %d\n",
head, tail, (empty & 1 << ring->ring_number)
>> ring->ring_number);
- seq_printf(sfile, "ring size %d, msg size %d\n",
- ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size),
+ seq_printf(sfile, "ring size %lld, msg size %d\n",
+ (long long)ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size),
ADF_MSG_SIZE_TO_BYTES(ring->msg_size));
seq_puts(sfile, "----------- Ring data ------------\n");
return 0;
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index 31c7a206a629..ff78c73c47e3 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -4,6 +4,7 @@
#include <linux/slab.h>
#include <linux/crypto.h>
#include <crypto/internal/aead.h>
+#include <crypto/internal/cipher.h>
#include <crypto/internal/skcipher.h>
#include <crypto/aes.h>
#include <crypto/sha1.h>
diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c
index 2c863d25327a..b0b78445418b 100644
--- a/drivers/crypto/qat/qat_common/qat_asym_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c
@@ -321,13 +321,13 @@ static int qat_dh_compute_value(struct kpp_request *req)
qat_req->out.dh.out_tab[1] = 0;
/* Mapping in.in.b or in.in_g2.xa is the same */
qat_req->phy_in = dma_map_single(dev, &qat_req->in.dh.in.b,
- sizeof(struct qat_dh_input_params),
+ sizeof(qat_req->in.dh.in.b),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
goto unmap_dst;
qat_req->phy_out = dma_map_single(dev, &qat_req->out.dh.r,
- sizeof(struct qat_dh_output_params),
+ sizeof(qat_req->out.dh.r),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
goto unmap_in_params;
@@ -716,13 +716,13 @@ static int qat_rsa_enc(struct akcipher_request *req)
qat_req->in.rsa.in_tab[3] = 0;
qat_req->out.rsa.out_tab[1] = 0;
qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.enc.m,
- sizeof(struct qat_rsa_input_params),
+ sizeof(qat_req->in.rsa.enc.m),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
goto unmap_dst;
qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa.enc.c,
- sizeof(struct qat_rsa_output_params),
+ sizeof(qat_req->out.rsa.enc.c),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
goto unmap_in_params;
@@ -864,13 +864,13 @@ static int qat_rsa_dec(struct akcipher_request *req)
qat_req->in.rsa.in_tab[3] = 0;
qat_req->out.rsa.out_tab[1] = 0;
qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.dec.c,
- sizeof(struct qat_rsa_input_params),
+ sizeof(qat_req->in.rsa.dec.c),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
goto unmap_dst;
qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa.dec.m,
- sizeof(struct qat_rsa_output_params),
+ sizeof(qat_req->out.rsa.dec.m),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
goto unmap_in_params;
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
index 1e83d9397b11..7dd7cd6c3ef8 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
@@ -7,14 +7,8 @@
#include "adf_dh895xcc_hw_data.h"
#include "icp_qat_hw.h"
-/* Worker thread to service arbiter mappings based on dev SKUs */
-static const u32 thrd_to_arb_map_sku4[] = {
- 0x12222AAA, 0x11666666, 0x12222AAA, 0x11666666,
- 0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000
-};
-
-static const u32 thrd_to_arb_map_sku6[] = {
+/* Worker thread to service arbiter mappings */
+static const u32 thrd_to_arb_map[ADF_DH895XCC_MAX_ACCELENGINES] = {
0x12222AAA, 0x11666666, 0x12222AAA, 0x11666666,
0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222,
0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222
@@ -127,23 +121,9 @@ static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
return DEV_SKU_UNKNOWN;
}
-static void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev,
- u32 const **arb_map_config)
+static const u32 *adf_get_arbiter_mapping(void)
{
- switch (accel_dev->accel_pci_dev.sku) {
- case DEV_SKU_1:
- *arb_map_config = thrd_to_arb_map_sku4;
- break;
-
- case DEV_SKU_2:
- case DEV_SKU_4:
- *arb_map_config = thrd_to_arb_map_sku6;
- break;
- default:
- dev_err(&GET_DEV(accel_dev),
- "The configuration doesn't match any SKU");
- *arb_map_config = NULL;
- }
+ return thrd_to_arb_map;
}
static u32 get_pf2vf_offset(u32 i)
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 8b5be29cb4dc..457084b344c1 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -1350,12 +1350,6 @@ static void sahara_unregister_algs(struct sahara_dev *dev)
crypto_unregister_ahash(&sha_v4_algs[i]);
}
-static const struct platform_device_id sahara_platform_ids[] = {
- { .name = "sahara-imx27" },
- { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(platform, sahara_platform_ids);
-
static const struct of_device_id sahara_dt_ids[] = {
{ .compatible = "fsl,imx53-sahara" },
{ .compatible = "fsl,imx27-sahara" },
@@ -1540,7 +1534,6 @@ static struct platform_driver sahara_driver = {
.name = SAHARA_NAME,
.of_match_table = sahara_dt_ids,
},
- .id_table = sahara_platform_ids,
};
module_platform_driver(sahara_driver);
diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c
index 2670c30332fa..2a4793176c71 100644
--- a/drivers/crypto/stm32/stm32-cryp.c
+++ b/drivers/crypto/stm32/stm32-cryp.c
@@ -1229,7 +1229,7 @@ static void stm32_cryp_check_ctr_counter(struct stm32_cryp *cryp)
cr = stm32_cryp_read(cryp, CRYP_CR);
stm32_cryp_write(cryp, CRYP_CR, cr & ~CR_CRYPEN);
- stm32_cryp_hw_write_iv(cryp, (u32 *)cryp->last_ctr);
+ stm32_cryp_hw_write_iv(cryp, (__be32 *)cryp->last_ctr);
stm32_cryp_write(cryp, CRYP_CR, cr);
}
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 4fd85f31630a..25c9f825b8b5 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -1093,11 +1093,12 @@ static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
*/
static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
unsigned int offset, int datalen, int elen,
- struct talitos_ptr *link_tbl_ptr)
+ struct talitos_ptr *link_tbl_ptr, int align)
{
int n_sg = elen ? sg_count + 1 : sg_count;
int count = 0;
int cryptlen = datalen + elen;
+ int padding = ALIGN(cryptlen, align) - cryptlen;
while (cryptlen && sg && n_sg--) {
unsigned int len = sg_dma_len(sg);
@@ -1121,7 +1122,7 @@ static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
offset += datalen;
}
to_talitos_ptr(link_tbl_ptr + count,
- sg_dma_address(sg) + offset, len, 0);
+ sg_dma_address(sg) + offset, sg_next(sg) ? len : len + padding, 0);
to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
count++;
cryptlen -= len;
@@ -1144,10 +1145,11 @@ static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
unsigned int len, struct talitos_edesc *edesc,
struct talitos_ptr *ptr, int sg_count,
unsigned int offset, int tbl_off, int elen,
- bool force)
+ bool force, int align)
{
struct talitos_private *priv = dev_get_drvdata(dev);
bool is_sec1 = has_ftr_sec1(priv);
+ int aligned_len = ALIGN(len, align);
if (!src) {
to_talitos_ptr(ptr, 0, 0, is_sec1);
@@ -1155,22 +1157,22 @@ static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
}
to_talitos_ptr_ext_set(ptr, elen, is_sec1);
if (sg_count == 1 && !force) {
- to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1);
+ to_talitos_ptr(ptr, sg_dma_address(src) + offset, aligned_len, is_sec1);
return sg_count;
}
if (is_sec1) {
- to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, len, is_sec1);
+ to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, aligned_len, is_sec1);
return sg_count;
}
sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len, elen,
- &edesc->link_tbl[tbl_off]);
+ &edesc->link_tbl[tbl_off], align);
if (sg_count == 1 && !force) {
/* Only one segment now, so no link tbl needed*/
copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1);
return sg_count;
}
to_talitos_ptr(ptr, edesc->dma_link_tbl +
- tbl_off * sizeof(struct talitos_ptr), len, is_sec1);
+ tbl_off * sizeof(struct talitos_ptr), aligned_len, is_sec1);
to_talitos_ptr_ext_or(ptr, DESC_PTR_LNKTBL_JUMP, is_sec1);
return sg_count;
@@ -1182,7 +1184,7 @@ static int talitos_sg_map(struct device *dev, struct scatterlist *src,
unsigned int offset, int tbl_off)
{
return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset,
- tbl_off, 0, false);
+ tbl_off, 0, false, 1);
}
/*
@@ -1251,7 +1253,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
sg_count, areq->assoclen, tbl_off, elen,
- false);
+ false, 1);
if (ret > 1) {
tbl_off += ret;
@@ -1271,7 +1273,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
elen = 0;
ret = talitos_sg_map_ext(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
sg_count, areq->assoclen, tbl_off, elen,
- is_ipsec_esp && !encrypt);
+ is_ipsec_esp && !encrypt, 1);
tbl_off += ret;
if (!encrypt && is_ipsec_esp) {
@@ -1577,6 +1579,8 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
bool sync_needed = false;
struct talitos_private *priv = dev_get_drvdata(dev);
bool is_sec1 = has_ftr_sec1(priv);
+ bool is_ctr = (desc->hdr & DESC_HDR_SEL0_MASK) == DESC_HDR_SEL0_AESU &&
+ (desc->hdr & DESC_HDR_MODE0_AESU_MASK) == DESC_HDR_MODE0_AESU_CTR;
/* first DWORD empty */
@@ -1597,8 +1601,8 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
/*
* cipher in
*/
- sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc,
- &desc->ptr[3], sg_count, 0, 0);
+ sg_count = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[3],
+ sg_count, 0, 0, 0, false, is_ctr ? 16 : 1);
if (sg_count > 1)
sync_needed = true;
@@ -2763,6 +2767,22 @@ static struct talitos_alg_template driver_algs[] = {
},
{ .type = CRYPTO_ALG_TYPE_SKCIPHER,
.alg.skcipher = {
+ .base.cra_name = "ctr(aes)",
+ .base.cra_driver_name = "ctr-aes-talitos",
+ .base.cra_blocksize = 1,
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = skcipher_aes_setkey,
+ },
+ .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+ DESC_HDR_SEL0_AESU |
+ DESC_HDR_MODE0_AESU_CTR,
+ },
+ { .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
.base.cra_name = "ecb(des)",
.base.cra_driver_name = "ecb-des-talitos",
.base.cra_blocksize = DES_BLOCK_SIZE,
@@ -3178,6 +3198,12 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
t_alg->algt.alg.skcipher.setkey ?: skcipher_setkey;
t_alg->algt.alg.skcipher.encrypt = skcipher_encrypt;
t_alg->algt.alg.skcipher.decrypt = skcipher_decrypt;
+ if (!strcmp(alg->cra_name, "ctr(aes)") && !has_ftr_sec1(priv) &&
+ DESC_TYPE(t_alg->algt.desc_hdr_template) !=
+ DESC_TYPE(DESC_HDR_TYPE_AESU_CTR_NONSNOOP)) {
+ devm_kfree(dev, t_alg);
+ return ERR_PTR(-ENOTSUPP);
+ }
break;
case CRYPTO_ALG_TYPE_AEAD:
alg = &t_alg->algt.alg.aead.base;
diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h
index 1469b956948a..32825119e880 100644
--- a/drivers/crypto/talitos.h
+++ b/drivers/crypto/talitos.h
@@ -344,6 +344,7 @@ static inline bool has_ftr_sec1(struct talitos_private *priv)
/* primary execution unit mode (MODE0) and derivatives */
#define DESC_HDR_MODE0_ENCRYPT cpu_to_be32(0x00100000)
+#define DESC_HDR_MODE0_AESU_MASK cpu_to_be32(0x00600000)
#define DESC_HDR_MODE0_AESU_CBC cpu_to_be32(0x00200000)
#define DESC_HDR_MODE0_AESU_CTR cpu_to_be32(0x00600000)
#define DESC_HDR_MODE0_DEU_CBC cpu_to_be32(0x00400000)
diff --git a/drivers/crypto/vmx/aes.c b/drivers/crypto/vmx/aes.c
index 2bc5d4e1adf4..d05c02baebcf 100644
--- a/drivers/crypto/vmx/aes.c
+++ b/drivers/crypto/vmx/aes.c
@@ -14,6 +14,7 @@
#include <asm/simd.h>
#include <asm/switch_to.h>
#include <crypto/aes.h>
+#include <crypto/internal/cipher.h>
#include <crypto/internal/simd.h>
#include "aesp8-ppc.h"
diff --git a/drivers/crypto/vmx/aesp8-ppc.h b/drivers/crypto/vmx/aesp8-ppc.h
index 01774a4d26a2..5764d4438388 100644
--- a/drivers/crypto/vmx/aesp8-ppc.h
+++ b/drivers/crypto/vmx/aesp8-ppc.h
@@ -7,6 +7,12 @@ struct aes_key {
int rounds;
};
+extern struct shash_alg p8_ghash_alg;
+extern struct crypto_alg p8_aes_alg;
+extern struct skcipher_alg p8_aes_cbc_alg;
+extern struct skcipher_alg p8_aes_ctr_alg;
+extern struct skcipher_alg p8_aes_xts_alg;
+
int aes_p8_set_encrypt_key(const u8 *userKey, const int bits,
struct aes_key *key);
int aes_p8_set_decrypt_key(const u8 *userKey, const int bits,
diff --git a/drivers/crypto/vmx/vmx.c b/drivers/crypto/vmx/vmx.c
index 3e0335fb406c..a40d08e75fc0 100644
--- a/drivers/crypto/vmx/vmx.c
+++ b/drivers/crypto/vmx/vmx.c
@@ -17,11 +17,7 @@
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
-extern struct shash_alg p8_ghash_alg;
-extern struct crypto_alg p8_aes_alg;
-extern struct skcipher_alg p8_aes_cbc_alg;
-extern struct skcipher_alg p8_aes_ctr_alg;
-extern struct skcipher_alg p8_aes_xts_alg;
+#include "aesp8-ppc.h"
static int __init p8_init(void)
{
@@ -78,3 +74,4 @@ MODULE_DESCRIPTION("IBM VMX cryptographic acceleration instructions "
"support on Power 8");
MODULE_LICENSE("GPL");
MODULE_VERSION("1.0.0");
+MODULE_IMPORT_NS(CRYPTO_INTERNAL);
diff --git a/drivers/cxl/Kconfig b/drivers/cxl/Kconfig
new file mode 100644
index 000000000000..97dc4d751651
--- /dev/null
+++ b/drivers/cxl/Kconfig
@@ -0,0 +1,53 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menuconfig CXL_BUS
+ tristate "CXL (Compute Express Link) Devices Support"
+ depends on PCI
+ help
+ CXL is a bus that is electrically compatible with PCI Express, but
+ layers three protocols on that signalling (CXL.io, CXL.cache, and
+ CXL.mem). The CXL.cache protocol allows devices to hold cachelines
+ locally, the CXL.mem protocol allows devices to be fully coherent
+ memory targets, the CXL.io protocol is equivalent to PCI Express.
+ Say 'y' to enable support for the configuration and management of
+ devices supporting these protocols.
+
+if CXL_BUS
+
+config CXL_MEM
+ tristate "CXL.mem: Memory Devices"
+ help
+ The CXL.mem protocol allows a device to act as a provider of
+ "System RAM" and/or "Persistent Memory" that is fully coherent
+ as if the memory was attached to the typical CPU memory
+ controller.
+
+ Say 'y/m' to enable a driver (named "cxl_mem.ko" when built as
+ a module) that will attach to CXL.mem devices for
+ configuration, provisioning, and health monitoring. This
+ driver is required for dynamic provisioning of CXL.mem
+ attached memory which is a prerequisite for persistent memory
+ support. Typically volatile memory is mapped by platform
+ firmware and included in the platform memory map, but in some
+ cases the OS is responsible for mapping that memory. See
+ Chapter 2.3 Type 3 CXL Device in the CXL 2.0 specification.
+
+ If unsure say 'm'.
+
+config CXL_MEM_RAW_COMMANDS
+ bool "RAW Command Interface for Memory Devices"
+ depends on CXL_MEM
+ help
+ Enable CXL RAW command interface.
+
+ The CXL driver ioctl interface may assign a kernel ioctl command
+ number for each specification defined opcode. At any given point in
+ time the number of opcodes that the specification defines and a device
+ may implement may exceed the kernel's set of associated ioctl function
+ numbers. The mismatch is either by omission, specification is too new,
+ or by design. When prototyping new hardware, or developing / debugging
+ the driver it is useful to be able to submit any possible command to
+ the hardware, even commands that may crash the kernel due to their
+ potential impact to memory currently in use by the kernel.
+
+ If developing CXL hardware or the driver say Y, otherwise say N.
+endif
diff --git a/drivers/cxl/Makefile b/drivers/cxl/Makefile
new file mode 100644
index 000000000000..a314a1891f4d
--- /dev/null
+++ b/drivers/cxl/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_CXL_BUS) += cxl_bus.o
+obj-$(CONFIG_CXL_MEM) += cxl_mem.o
+
+ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=CXL
+cxl_bus-y := bus.o
+cxl_mem-y := mem.o
diff --git a/drivers/cxl/bus.c b/drivers/cxl/bus.c
new file mode 100644
index 000000000000..58f74796d525
--- /dev/null
+++ b/drivers/cxl/bus.c
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2020 Intel Corporation. All rights reserved. */
+#include <linux/device.h>
+#include <linux/module.h>
+
+/**
+ * DOC: cxl bus
+ *
+ * The CXL bus provides namespace for control devices and a rendezvous
+ * point for cross-device interleave coordination.
+ */
+struct bus_type cxl_bus_type = {
+ .name = "cxl",
+};
+EXPORT_SYMBOL_GPL(cxl_bus_type);
+
+static __init int cxl_bus_init(void)
+{
+ return bus_register(&cxl_bus_type);
+}
+
+static void cxl_bus_exit(void)
+{
+ bus_unregister(&cxl_bus_type);
+}
+
+module_init(cxl_bus_init);
+module_exit(cxl_bus_exit);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
new file mode 100644
index 000000000000..6f14838c2d25
--- /dev/null
+++ b/drivers/cxl/cxl.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2020 Intel Corporation. */
+
+#ifndef __CXL_H__
+#define __CXL_H__
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+
+/* CXL 2.0 8.2.8.1 Device Capabilities Array Register */
+#define CXLDEV_CAP_ARRAY_OFFSET 0x0
+#define CXLDEV_CAP_ARRAY_CAP_ID 0
+#define CXLDEV_CAP_ARRAY_ID_MASK GENMASK_ULL(15, 0)
+#define CXLDEV_CAP_ARRAY_COUNT_MASK GENMASK_ULL(47, 32)
+/* CXL 2.0 8.2.8.2 CXL Device Capability Header Register */
+#define CXLDEV_CAP_HDR_CAP_ID_MASK GENMASK(15, 0)
+/* CXL 2.0 8.2.8.2.1 CXL Device Capabilities */
+#define CXLDEV_CAP_CAP_ID_DEVICE_STATUS 0x1
+#define CXLDEV_CAP_CAP_ID_PRIMARY_MAILBOX 0x2
+#define CXLDEV_CAP_CAP_ID_SECONDARY_MAILBOX 0x3
+#define CXLDEV_CAP_CAP_ID_MEMDEV 0x4000
+
+/* CXL 2.0 8.2.8.4 Mailbox Registers */
+#define CXLDEV_MBOX_CAPS_OFFSET 0x00
+#define CXLDEV_MBOX_CAP_PAYLOAD_SIZE_MASK GENMASK(4, 0)
+#define CXLDEV_MBOX_CTRL_OFFSET 0x04
+#define CXLDEV_MBOX_CTRL_DOORBELL BIT(0)
+#define CXLDEV_MBOX_CMD_OFFSET 0x08
+#define CXLDEV_MBOX_CMD_COMMAND_OPCODE_MASK GENMASK_ULL(15, 0)
+#define CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK GENMASK_ULL(36, 16)
+#define CXLDEV_MBOX_STATUS_OFFSET 0x10
+#define CXLDEV_MBOX_STATUS_RET_CODE_MASK GENMASK_ULL(47, 32)
+#define CXLDEV_MBOX_BG_CMD_STATUS_OFFSET 0x18
+#define CXLDEV_MBOX_PAYLOAD_OFFSET 0x20
+
+/* CXL 2.0 8.2.8.5.1.1 Memory Device Status Register */
+#define CXLMDEV_STATUS_OFFSET 0x0
+#define CXLMDEV_DEV_FATAL BIT(0)
+#define CXLMDEV_FW_HALT BIT(1)
+#define CXLMDEV_STATUS_MEDIA_STATUS_MASK GENMASK(3, 2)
+#define CXLMDEV_MS_NOT_READY 0
+#define CXLMDEV_MS_READY 1
+#define CXLMDEV_MS_ERROR 2
+#define CXLMDEV_MS_DISABLED 3
+#define CXLMDEV_READY(status) \
+ (FIELD_GET(CXLMDEV_STATUS_MEDIA_STATUS_MASK, status) == \
+ CXLMDEV_MS_READY)
+#define CXLMDEV_MBOX_IF_READY BIT(4)
+#define CXLMDEV_RESET_NEEDED_MASK GENMASK(7, 5)
+#define CXLMDEV_RESET_NEEDED_NOT 0
+#define CXLMDEV_RESET_NEEDED_COLD 1
+#define CXLMDEV_RESET_NEEDED_WARM 2
+#define CXLMDEV_RESET_NEEDED_HOT 3
+#define CXLMDEV_RESET_NEEDED_CXL 4
+#define CXLMDEV_RESET_NEEDED(status) \
+ (FIELD_GET(CXLMDEV_RESET_NEEDED_MASK, status) != \
+ CXLMDEV_RESET_NEEDED_NOT)
+
+struct cxl_memdev;
+/**
+ * struct cxl_mem - A CXL memory device
+ * @pdev: The PCI device associated with this CXL device.
+ * @regs: IO mappings to the device's MMIO
+ * @status_regs: CXL 2.0 8.2.8.3 Device Status Registers
+ * @mbox_regs: CXL 2.0 8.2.8.4 Mailbox Registers
+ * @memdev_regs: CXL 2.0 8.2.8.5 Memory Device Registers
+ * @payload_size: Size of space for payload
+ * (CXL 2.0 8.2.8.4.3 Mailbox Capabilities Register)
+ * @mbox_mutex: Mutex to synchronize mailbox access.
+ * @firmware_version: Firmware version for the memory device.
+ * @enabled_commands: Hardware commands found enabled in CEL.
+ * @pmem_range: Persistent memory capacity information.
+ * @ram_range: Volatile memory capacity information.
+ */
+struct cxl_mem {
+ struct pci_dev *pdev;
+ void __iomem *regs;
+ struct cxl_memdev *cxlmd;
+
+ void __iomem *status_regs;
+ void __iomem *mbox_regs;
+ void __iomem *memdev_regs;
+
+ size_t payload_size;
+ struct mutex mbox_mutex; /* Protects device mailbox and firmware */
+ char firmware_version[0x10];
+ unsigned long *enabled_cmds;
+
+ struct range pmem_range;
+ struct range ram_range;
+};
+
+extern struct bus_type cxl_bus_type;
+#endif /* __CXL_H__ */
diff --git a/drivers/cxl/mem.c b/drivers/cxl/mem.c
new file mode 100644
index 000000000000..244cb7d89678
--- /dev/null
+++ b/drivers/cxl/mem.c
@@ -0,0 +1,1552 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2020 Intel Corporation. All rights reserved. */
+#include <uapi/linux/cxl_mem.h>
+#include <linux/security.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/cdev.h>
+#include <linux/idr.h>
+#include <linux/pci.h>
+#include <linux/io.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include "pci.h"
+#include "cxl.h"
+
+/**
+ * DOC: cxl mem
+ *
+ * This implements a CXL memory device ("type-3") as it is defined by the
+ * Compute Express Link specification.
+ *
+ * The driver has several responsibilities, mainly:
+ * - Create the memX device and register on the CXL bus.
+ * - Enumerate device's register interface and map them.
+ * - Probe the device attributes to establish sysfs interface.
+ * - Provide an IOCTL interface to userspace to communicate with the device for
+ * things like firmware update.
+ * - Support management of interleave sets.
+ * - Handle and manage error conditions.
+ */
+
+/*
+ * An entire PCI topology full of devices should be enough for any
+ * config
+ */
+#define CXL_MEM_MAX_DEVS 65536
+
+#define cxl_doorbell_busy(cxlm) \
+ (readl((cxlm)->mbox_regs + CXLDEV_MBOX_CTRL_OFFSET) & \
+ CXLDEV_MBOX_CTRL_DOORBELL)
+
+/* CXL 2.0 - 8.2.8.4 */
+#define CXL_MAILBOX_TIMEOUT_MS (2 * HZ)
+
+enum opcode {
+ CXL_MBOX_OP_INVALID = 0x0000,
+ CXL_MBOX_OP_RAW = CXL_MBOX_OP_INVALID,
+ CXL_MBOX_OP_GET_FW_INFO = 0x0200,
+ CXL_MBOX_OP_ACTIVATE_FW = 0x0202,
+ CXL_MBOX_OP_GET_SUPPORTED_LOGS = 0x0400,
+ CXL_MBOX_OP_GET_LOG = 0x0401,
+ CXL_MBOX_OP_IDENTIFY = 0x4000,
+ CXL_MBOX_OP_GET_PARTITION_INFO = 0x4100,
+ CXL_MBOX_OP_SET_PARTITION_INFO = 0x4101,
+ CXL_MBOX_OP_GET_LSA = 0x4102,
+ CXL_MBOX_OP_SET_LSA = 0x4103,
+ CXL_MBOX_OP_GET_HEALTH_INFO = 0x4200,
+ CXL_MBOX_OP_SET_SHUTDOWN_STATE = 0x4204,
+ CXL_MBOX_OP_SCAN_MEDIA = 0x4304,
+ CXL_MBOX_OP_GET_SCAN_MEDIA = 0x4305,
+ CXL_MBOX_OP_MAX = 0x10000
+};
+
+/**
+ * struct mbox_cmd - A command to be submitted to hardware.
+ * @opcode: (input) The command set and command submitted to hardware.
+ * @payload_in: (input) Pointer to the input payload.
+ * @payload_out: (output) Pointer to the output payload. Must be allocated by
+ * the caller.
+ * @size_in: (input) Number of bytes to load from @payload_in.
+ * @size_out: (input) Max number of bytes loaded into @payload_out.
+ * (output) Number of bytes generated by the device. For fixed size
+ * outputs commands this is always expected to be deterministic. For
+ * variable sized output commands, it tells the exact number of bytes
+ * written.
+ * @return_code: (output) Error code returned from hardware.
+ *
+ * This is the primary mechanism used to send commands to the hardware.
+ * All the fields except @payload_* correspond exactly to the fields described in
+ * Command Register section of the CXL 2.0 8.2.8.4.5. @payload_in and
+ * @payload_out are written to, and read from the Command Payload Registers
+ * defined in CXL 2.0 8.2.8.4.8.
+ */
+struct mbox_cmd {
+ u16 opcode;
+ void *payload_in;
+ void *payload_out;
+ size_t size_in;
+ size_t size_out;
+ u16 return_code;
+#define CXL_MBOX_SUCCESS 0
+};
+
+/**
+ * struct cxl_memdev - CXL bus object representing a Type-3 Memory Device
+ * @dev: driver core device object
+ * @cdev: char dev core object for ioctl operations
+ * @cxlm: pointer to the parent device driver data
+ * @ops_active: active user of @cxlm in ops handlers
+ * @ops_dead: completion when all @cxlm ops users have exited
+ * @id: id number of this memdev instance.
+ */
+struct cxl_memdev {
+ struct device dev;
+ struct cdev cdev;
+ struct cxl_mem *cxlm;
+ struct percpu_ref ops_active;
+ struct completion ops_dead;
+ int id;
+};
+
+static int cxl_mem_major;
+static DEFINE_IDA(cxl_memdev_ida);
+static struct dentry *cxl_debugfs;
+static bool cxl_raw_allow_all;
+
+enum {
+ CEL_UUID,
+ VENDOR_DEBUG_UUID,
+};
+
+/* See CXL 2.0 Table 170. Get Log Input Payload */
+static const uuid_t log_uuid[] = {
+ [CEL_UUID] = UUID_INIT(0xda9c0b5, 0xbf41, 0x4b78, 0x8f, 0x79, 0x96,
+ 0xb1, 0x62, 0x3b, 0x3f, 0x17),
+ [VENDOR_DEBUG_UUID] = UUID_INIT(0xe1819d9, 0x11a9, 0x400c, 0x81, 0x1f,
+ 0xd6, 0x07, 0x19, 0x40, 0x3d, 0x86),
+};
+
+/**
+ * struct cxl_mem_command - Driver representation of a memory device command
+ * @info: Command information as it exists for the UAPI
+ * @opcode: The actual bits used for the mailbox protocol
+ * @flags: Set of flags effecting driver behavior.
+ *
+ * * %CXL_CMD_FLAG_FORCE_ENABLE: In cases of error, commands with this flag
+ * will be enabled by the driver regardless of what hardware may have
+ * advertised.
+ *
+ * The cxl_mem_command is the driver's internal representation of commands that
+ * are supported by the driver. Some of these commands may not be supported by
+ * the hardware. The driver will use @info to validate the fields passed in by
+ * the user then submit the @opcode to the hardware.
+ *
+ * See struct cxl_command_info.
+ */
+struct cxl_mem_command {
+ struct cxl_command_info info;
+ enum opcode opcode;
+ u32 flags;
+#define CXL_CMD_FLAG_NONE 0
+#define CXL_CMD_FLAG_FORCE_ENABLE BIT(0)
+};
+
+#define CXL_CMD(_id, sin, sout, _flags) \
+ [CXL_MEM_COMMAND_ID_##_id] = { \
+ .info = { \
+ .id = CXL_MEM_COMMAND_ID_##_id, \
+ .size_in = sin, \
+ .size_out = sout, \
+ }, \
+ .opcode = CXL_MBOX_OP_##_id, \
+ .flags = _flags, \
+ }
+
+/*
+ * This table defines the supported mailbox commands for the driver. This table
+ * is made up of a UAPI structure. Non-negative values as parameters in the
+ * table will be validated against the user's input. For example, if size_in is
+ * 0, and the user passed in 1, it is an error.
+ */
+static struct cxl_mem_command mem_commands[] = {
+ CXL_CMD(IDENTIFY, 0, 0x43, CXL_CMD_FLAG_FORCE_ENABLE),
+#ifdef CONFIG_CXL_MEM_RAW_COMMANDS
+ CXL_CMD(RAW, ~0, ~0, 0),
+#endif
+ CXL_CMD(GET_SUPPORTED_LOGS, 0, ~0, CXL_CMD_FLAG_FORCE_ENABLE),
+ CXL_CMD(GET_FW_INFO, 0, 0x50, 0),
+ CXL_CMD(GET_PARTITION_INFO, 0, 0x20, 0),
+ CXL_CMD(GET_LSA, 0x8, ~0, 0),
+ CXL_CMD(GET_HEALTH_INFO, 0, 0x12, 0),
+ CXL_CMD(GET_LOG, 0x18, ~0, CXL_CMD_FLAG_FORCE_ENABLE),
+};
+
+/*
+ * Commands that RAW doesn't permit. The rationale for each:
+ *
+ * CXL_MBOX_OP_ACTIVATE_FW: Firmware activation requires adjustment /
+ * coordination of transaction timeout values at the root bridge level.
+ *
+ * CXL_MBOX_OP_SET_PARTITION_INFO: The device memory map may change live
+ * and needs to be coordinated with HDM updates.
+ *
+ * CXL_MBOX_OP_SET_LSA: The label storage area may be cached by the
+ * driver and any writes from userspace invalidates those contents.
+ *
+ * CXL_MBOX_OP_SET_SHUTDOWN_STATE: Set shutdown state assumes no writes
+ * to the device after it is marked clean, userspace can not make that
+ * assertion.
+ *
+ * CXL_MBOX_OP_[GET_]SCAN_MEDIA: The kernel provides a native error list that
+ * is kept up to date with patrol notifications and error management.
+ */
+static u16 cxl_disabled_raw_commands[] = {
+ CXL_MBOX_OP_ACTIVATE_FW,
+ CXL_MBOX_OP_SET_PARTITION_INFO,
+ CXL_MBOX_OP_SET_LSA,
+ CXL_MBOX_OP_SET_SHUTDOWN_STATE,
+ CXL_MBOX_OP_SCAN_MEDIA,
+ CXL_MBOX_OP_GET_SCAN_MEDIA,
+};
+
+/*
+ * Command sets that RAW doesn't permit. All opcodes in this set are
+ * disabled because they pass plain text security payloads over the
+ * user/kernel boundary. This functionality is intended to be wrapped
+ * behind the keys ABI which allows for encrypted payloads in the UAPI
+ */
+static u8 security_command_sets[] = {
+ 0x44, /* Sanitize */
+ 0x45, /* Persistent Memory Data-at-rest Security */
+ 0x46, /* Security Passthrough */
+};
+
+#define cxl_for_each_cmd(cmd) \
+ for ((cmd) = &mem_commands[0]; \
+ ((cmd) - mem_commands) < ARRAY_SIZE(mem_commands); (cmd)++)
+
+#define cxl_cmd_count ARRAY_SIZE(mem_commands)
+
+static int cxl_mem_wait_for_doorbell(struct cxl_mem *cxlm)
+{
+ const unsigned long start = jiffies;
+ unsigned long end = start;
+
+ while (cxl_doorbell_busy(cxlm)) {
+ end = jiffies;
+
+ if (time_after(end, start + CXL_MAILBOX_TIMEOUT_MS)) {
+ /* Check again in case preempted before timeout test */
+ if (!cxl_doorbell_busy(cxlm))
+ break;
+ return -ETIMEDOUT;
+ }
+ cpu_relax();
+ }
+
+ dev_dbg(&cxlm->pdev->dev, "Doorbell wait took %dms",
+ jiffies_to_msecs(end) - jiffies_to_msecs(start));
+ return 0;
+}
+
+static bool cxl_is_security_command(u16 opcode)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(security_command_sets); i++)
+ if (security_command_sets[i] == (opcode >> 8))
+ return true;
+ return false;
+}
+
+static void cxl_mem_mbox_timeout(struct cxl_mem *cxlm,
+ struct mbox_cmd *mbox_cmd)
+{
+ struct device *dev = &cxlm->pdev->dev;
+
+ dev_dbg(dev, "Mailbox command (opcode: %#x size: %zub) timed out\n",
+ mbox_cmd->opcode, mbox_cmd->size_in);
+}
+
+/**
+ * __cxl_mem_mbox_send_cmd() - Execute a mailbox command
+ * @cxlm: The CXL memory device to communicate with.
+ * @mbox_cmd: Command to send to the memory device.
+ *
+ * Context: Any context. Expects mbox_mutex to be held.
+ * Return: -ETIMEDOUT if timeout occurred waiting for completion. 0 on success.
+ * Caller should check the return code in @mbox_cmd to make sure it
+ * succeeded.
+ *
+ * This is a generic form of the CXL mailbox send command thus only using the
+ * registers defined by the mailbox capability ID - CXL 2.0 8.2.8.4. Memory
+ * devices, and perhaps other types of CXL devices may have further information
+ * available upon error conditions. Driver facilities wishing to send mailbox
+ * commands should use the wrapper command.
+ *
+ * The CXL spec allows for up to two mailboxes. The intention is for the primary
+ * mailbox to be OS controlled and the secondary mailbox to be used by system
+ * firmware. This allows the OS and firmware to communicate with the device and
+ * not need to coordinate with each other. The driver only uses the primary
+ * mailbox.
+ */
+static int __cxl_mem_mbox_send_cmd(struct cxl_mem *cxlm,
+ struct mbox_cmd *mbox_cmd)
+{
+ void __iomem *payload = cxlm->mbox_regs + CXLDEV_MBOX_PAYLOAD_OFFSET;
+ u64 cmd_reg, status_reg;
+ size_t out_len;
+ int rc;
+
+ lockdep_assert_held(&cxlm->mbox_mutex);
+
+ /*
+ * Here are the steps from 8.2.8.4 of the CXL 2.0 spec.
+ * 1. Caller reads MB Control Register to verify doorbell is clear
+ * 2. Caller writes Command Register
+ * 3. Caller writes Command Payload Registers if input payload is non-empty
+ * 4. Caller writes MB Control Register to set doorbell
+ * 5. Caller either polls for doorbell to be clear or waits for interrupt if configured
+ * 6. Caller reads MB Status Register to fetch Return code
+ * 7. If command successful, Caller reads Command Register to get Payload Length
+ * 8. If output payload is non-empty, host reads Command Payload Registers
+ *
+ * Hardware is free to do whatever it wants before the doorbell is rung,
+ * and isn't allowed to change anything after it clears the doorbell. As
+ * such, steps 2 and 3 can happen in any order, and steps 6, 7, 8 can
+ * also happen in any order (though some orders might not make sense).
+ */
+
+ /* #1 */
+ if (cxl_doorbell_busy(cxlm)) {
+ dev_err_ratelimited(&cxlm->pdev->dev,
+ "Mailbox re-busy after acquiring\n");
+ return -EBUSY;
+ }
+
+ cmd_reg = FIELD_PREP(CXLDEV_MBOX_CMD_COMMAND_OPCODE_MASK,
+ mbox_cmd->opcode);
+ if (mbox_cmd->size_in) {
+ if (WARN_ON(!mbox_cmd->payload_in))
+ return -EINVAL;
+
+ cmd_reg |= FIELD_PREP(CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK,
+ mbox_cmd->size_in);
+ memcpy_toio(payload, mbox_cmd->payload_in, mbox_cmd->size_in);
+ }
+
+ /* #2, #3 */
+ writeq(cmd_reg, cxlm->mbox_regs + CXLDEV_MBOX_CMD_OFFSET);
+
+ /* #4 */
+ dev_dbg(&cxlm->pdev->dev, "Sending command\n");
+ writel(CXLDEV_MBOX_CTRL_DOORBELL,
+ cxlm->mbox_regs + CXLDEV_MBOX_CTRL_OFFSET);
+
+ /* #5 */
+ rc = cxl_mem_wait_for_doorbell(cxlm);
+ if (rc == -ETIMEDOUT) {
+ cxl_mem_mbox_timeout(cxlm, mbox_cmd);
+ return rc;
+ }
+
+ /* #6 */
+ status_reg = readq(cxlm->mbox_regs + CXLDEV_MBOX_STATUS_OFFSET);
+ mbox_cmd->return_code =
+ FIELD_GET(CXLDEV_MBOX_STATUS_RET_CODE_MASK, status_reg);
+
+ if (mbox_cmd->return_code != 0) {
+ dev_dbg(&cxlm->pdev->dev, "Mailbox operation had an error\n");
+ return 0;
+ }
+
+ /* #7 */
+ cmd_reg = readq(cxlm->mbox_regs + CXLDEV_MBOX_CMD_OFFSET);
+ out_len = FIELD_GET(CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK, cmd_reg);
+
+ /* #8 */
+ if (out_len && mbox_cmd->payload_out) {
+ /*
+ * Sanitize the copy. If hardware misbehaves, out_len per the
+ * spec can actually be greater than the max allowed size (21
+ * bits available but spec defined 1M max). The caller also may
+ * have requested less data than the hardware supplied even
+ * within spec.
+ */
+ size_t n = min3(mbox_cmd->size_out, cxlm->payload_size, out_len);
+
+ memcpy_fromio(mbox_cmd->payload_out, payload, n);
+ mbox_cmd->size_out = n;
+ } else {
+ mbox_cmd->size_out = 0;
+ }
+
+ return 0;
+}
+
+/**
+ * cxl_mem_mbox_get() - Acquire exclusive access to the mailbox.
+ * @cxlm: The memory device to gain access to.
+ *
+ * Context: Any context. Takes the mbox_mutex.
+ * Return: 0 if exclusive access was acquired.
+ */
+static int cxl_mem_mbox_get(struct cxl_mem *cxlm)
+{
+ struct device *dev = &cxlm->pdev->dev;
+ u64 md_status;
+ int rc;
+
+ mutex_lock_io(&cxlm->mbox_mutex);
+
+ /*
+ * XXX: There is some amount of ambiguity in the 2.0 version of the spec
+ * around the mailbox interface ready (8.2.8.5.1.1). The purpose of the
+ * bit is to allow firmware running on the device to notify the driver
+ * that it's ready to receive commands. It is unclear if the bit needs
+ * to be read for each transaction mailbox, ie. the firmware can switch
+ * it on and off as needed. Second, there is no defined timeout for
+ * mailbox ready, like there is for the doorbell interface.
+ *
+ * Assumptions:
+ * 1. The firmware might toggle the Mailbox Interface Ready bit, check
+ * it for every command.
+ *
+ * 2. If the doorbell is clear, the firmware should have first set the
+ * Mailbox Interface Ready bit. Therefore, waiting for the doorbell
+ * to be ready is sufficient.
+ */
+ rc = cxl_mem_wait_for_doorbell(cxlm);
+ if (rc) {
+ dev_warn(dev, "Mailbox interface not ready\n");
+ goto out;
+ }
+
+ md_status = readq(cxlm->memdev_regs + CXLMDEV_STATUS_OFFSET);
+ if (!(md_status & CXLMDEV_MBOX_IF_READY && CXLMDEV_READY(md_status))) {
+ dev_err(dev, "mbox: reported doorbell ready, but not mbox ready\n");
+ rc = -EBUSY;
+ goto out;
+ }
+
+ /*
+ * Hardware shouldn't allow a ready status but also have failure bits
+ * set. Spit out an error, this should be a bug report
+ */
+ rc = -EFAULT;
+ if (md_status & CXLMDEV_DEV_FATAL) {
+ dev_err(dev, "mbox: reported ready, but fatal\n");
+ goto out;
+ }
+ if (md_status & CXLMDEV_FW_HALT) {
+ dev_err(dev, "mbox: reported ready, but halted\n");
+ goto out;
+ }
+ if (CXLMDEV_RESET_NEEDED(md_status)) {
+ dev_err(dev, "mbox: reported ready, but reset needed\n");
+ goto out;
+ }
+
+ /* with lock held */
+ return 0;
+
+out:
+ mutex_unlock(&cxlm->mbox_mutex);
+ return rc;
+}
+
+/**
+ * cxl_mem_mbox_put() - Release exclusive access to the mailbox.
+ * @cxlm: The CXL memory device to communicate with.
+ *
+ * Context: Any context. Expects mbox_mutex to be held.
+ */
+static void cxl_mem_mbox_put(struct cxl_mem *cxlm)
+{
+ mutex_unlock(&cxlm->mbox_mutex);
+}
+
+/**
+ * handle_mailbox_cmd_from_user() - Dispatch a mailbox command for userspace.
+ * @cxlm: The CXL memory device to communicate with.
+ * @cmd: The validated command.
+ * @in_payload: Pointer to userspace's input payload.
+ * @out_payload: Pointer to userspace's output payload.
+ * @size_out: (Input) Max payload size to copy out.
+ * (Output) Payload size hardware generated.
+ * @retval: Hardware generated return code from the operation.
+ *
+ * Return:
+ * * %0 - Mailbox transaction succeeded. This implies the mailbox
+ * protocol completed successfully not that the operation itself
+ * was successful.
+ * * %-ENOMEM - Couldn't allocate a bounce buffer.
+ * * %-EFAULT - Something happened with copy_to/from_user.
+ * * %-EINTR - Mailbox acquisition interrupted.
+ * * %-EXXX - Transaction level failures.
+ *
+ * Creates the appropriate mailbox command and dispatches it on behalf of a
+ * userspace request. The input and output payloads are copied between
+ * userspace.
+ *
+ * See cxl_send_cmd().
+ */
+static int handle_mailbox_cmd_from_user(struct cxl_mem *cxlm,
+ const struct cxl_mem_command *cmd,
+ u64 in_payload, u64 out_payload,
+ s32 *size_out, u32 *retval)
+{
+ struct device *dev = &cxlm->pdev->dev;
+ struct mbox_cmd mbox_cmd = {
+ .opcode = cmd->opcode,
+ .size_in = cmd->info.size_in,
+ .size_out = cmd->info.size_out,
+ };
+ int rc;
+
+ if (cmd->info.size_out) {
+ mbox_cmd.payload_out = kvzalloc(cmd->info.size_out, GFP_KERNEL);
+ if (!mbox_cmd.payload_out)
+ return -ENOMEM;
+ }
+
+ if (cmd->info.size_in) {
+ mbox_cmd.payload_in = vmemdup_user(u64_to_user_ptr(in_payload),
+ cmd->info.size_in);
+ if (IS_ERR(mbox_cmd.payload_in)) {
+ kvfree(mbox_cmd.payload_out);
+ return PTR_ERR(mbox_cmd.payload_in);
+ }
+ }
+
+ rc = cxl_mem_mbox_get(cxlm);
+ if (rc)
+ goto out;
+
+ dev_dbg(dev,
+ "Submitting %s command for user\n"
+ "\topcode: %x\n"
+ "\tsize: %ub\n",
+ cxl_command_names[cmd->info.id].name, mbox_cmd.opcode,
+ cmd->info.size_in);
+
+ dev_WARN_ONCE(dev, cmd->info.id == CXL_MEM_COMMAND_ID_RAW,
+ "raw command path used\n");
+
+ rc = __cxl_mem_mbox_send_cmd(cxlm, &mbox_cmd);
+ cxl_mem_mbox_put(cxlm);
+ if (rc)
+ goto out;
+
+ /*
+ * @size_out contains the max size that's allowed to be written back out
+ * to userspace. While the payload may have written more output than
+ * this it will have to be ignored.
+ */
+ if (mbox_cmd.size_out) {
+ dev_WARN_ONCE(dev, mbox_cmd.size_out > *size_out,
+ "Invalid return size\n");
+ if (copy_to_user(u64_to_user_ptr(out_payload),
+ mbox_cmd.payload_out, mbox_cmd.size_out)) {
+ rc = -EFAULT;
+ goto out;
+ }
+ }
+
+ *size_out = mbox_cmd.size_out;
+ *retval = mbox_cmd.return_code;
+
+out:
+ kvfree(mbox_cmd.payload_in);
+ kvfree(mbox_cmd.payload_out);
+ return rc;
+}
+
+static bool cxl_mem_raw_command_allowed(u16 opcode)
+{
+ int i;
+
+ if (!IS_ENABLED(CONFIG_CXL_MEM_RAW_COMMANDS))
+ return false;
+
+ if (security_locked_down(LOCKDOWN_NONE))
+ return false;
+
+ if (cxl_raw_allow_all)
+ return true;
+
+ if (cxl_is_security_command(opcode))
+ return false;
+
+ for (i = 0; i < ARRAY_SIZE(cxl_disabled_raw_commands); i++)
+ if (cxl_disabled_raw_commands[i] == opcode)
+ return false;
+
+ return true;
+}
+
+/**
+ * cxl_validate_cmd_from_user() - Check fields for CXL_MEM_SEND_COMMAND.
+ * @cxlm: &struct cxl_mem device whose mailbox will be used.
+ * @send_cmd: &struct cxl_send_command copied in from userspace.
+ * @out_cmd: Sanitized and populated &struct cxl_mem_command.
+ *
+ * Return:
+ * * %0 - @out_cmd is ready to send.
+ * * %-ENOTTY - Invalid command specified.
+ * * %-EINVAL - Reserved fields or invalid values were used.
+ * * %-ENOMEM - Input or output buffer wasn't sized properly.
+ * * %-EPERM - Attempted to use a protected command.
+ *
+ * The result of this command is a fully validated command in @out_cmd that is
+ * safe to send to the hardware.
+ *
+ * See handle_mailbox_cmd_from_user()
+ */
+static int cxl_validate_cmd_from_user(struct cxl_mem *cxlm,
+ const struct cxl_send_command *send_cmd,
+ struct cxl_mem_command *out_cmd)
+{
+ const struct cxl_command_info *info;
+ struct cxl_mem_command *c;
+
+ if (send_cmd->id == 0 || send_cmd->id >= CXL_MEM_COMMAND_ID_MAX)
+ return -ENOTTY;
+
+ /*
+ * The user can never specify an input payload larger than what hardware
+ * supports, but output can be arbitrarily large (simply write out as
+ * much data as the hardware provides).
+ */
+ if (send_cmd->in.size > cxlm->payload_size)
+ return -EINVAL;
+
+ /*
+ * Checks are bypassed for raw commands but a WARN/taint will occur
+ * later in the callchain
+ */
+ if (send_cmd->id == CXL_MEM_COMMAND_ID_RAW) {
+ const struct cxl_mem_command temp = {
+ .info = {
+ .id = CXL_MEM_COMMAND_ID_RAW,
+ .flags = 0,
+ .size_in = send_cmd->in.size,
+ .size_out = send_cmd->out.size,
+ },
+ .opcode = send_cmd->raw.opcode
+ };
+
+ if (send_cmd->raw.rsvd)
+ return -EINVAL;
+
+ /*
+ * Unlike supported commands, the output size of RAW commands
+ * gets passed along without further checking, so it must be
+ * validated here.
+ */
+ if (send_cmd->out.size > cxlm->payload_size)
+ return -EINVAL;
+
+ if (!cxl_mem_raw_command_allowed(send_cmd->raw.opcode))
+ return -EPERM;
+
+ memcpy(out_cmd, &temp, sizeof(temp));
+
+ return 0;
+ }
+
+ if (send_cmd->flags & ~CXL_MEM_COMMAND_FLAG_MASK)
+ return -EINVAL;
+
+ if (send_cmd->rsvd)
+ return -EINVAL;
+
+ if (send_cmd->in.rsvd || send_cmd->out.rsvd)
+ return -EINVAL;
+
+ /* Convert user's command into the internal representation */
+ c = &mem_commands[send_cmd->id];
+ info = &c->info;
+
+ /* Check that the command is enabled for hardware */
+ if (!test_bit(info->id, cxlm->enabled_cmds))
+ return -ENOTTY;
+
+ /* Check the input buffer is the expected size */
+ if (info->size_in >= 0 && info->size_in != send_cmd->in.size)
+ return -ENOMEM;
+
+ /* Check the output buffer is at least large enough */
+ if (info->size_out >= 0 && send_cmd->out.size < info->size_out)
+ return -ENOMEM;
+
+ memcpy(out_cmd, c, sizeof(*c));
+ out_cmd->info.size_in = send_cmd->in.size;
+ /*
+ * XXX: out_cmd->info.size_out will be controlled by the driver, and the
+ * specified number of bytes @send_cmd->out.size will be copied back out
+ * to userspace.
+ */
+
+ return 0;
+}
+
+static int cxl_query_cmd(struct cxl_memdev *cxlmd,
+ struct cxl_mem_query_commands __user *q)
+{
+ struct device *dev = &cxlmd->dev;
+ struct cxl_mem_command *cmd;
+ u32 n_commands;
+ int j = 0;
+
+ dev_dbg(dev, "Query IOCTL\n");
+
+ if (get_user(n_commands, &q->n_commands))
+ return -EFAULT;
+
+ /* returns the total number if 0 elements are requested. */
+ if (n_commands == 0)
+ return put_user(cxl_cmd_count, &q->n_commands);
+
+ /*
+ * otherwise, return max(n_commands, total commands) cxl_command_info
+ * structures.
+ */
+ cxl_for_each_cmd(cmd) {
+ const struct cxl_command_info *info = &cmd->info;
+
+ if (copy_to_user(&q->commands[j++], info, sizeof(*info)))
+ return -EFAULT;
+
+ if (j == n_commands)
+ break;
+ }
+
+ return 0;
+}
+
+static int cxl_send_cmd(struct cxl_memdev *cxlmd,
+ struct cxl_send_command __user *s)
+{
+ struct cxl_mem *cxlm = cxlmd->cxlm;
+ struct device *dev = &cxlmd->dev;
+ struct cxl_send_command send;
+ struct cxl_mem_command c;
+ int rc;
+
+ dev_dbg(dev, "Send IOCTL\n");
+
+ if (copy_from_user(&send, s, sizeof(send)))
+ return -EFAULT;
+
+ rc = cxl_validate_cmd_from_user(cxlmd->cxlm, &send, &c);
+ if (rc)
+ return rc;
+
+ /* Prepare to handle a full payload for variable sized output */
+ if (c.info.size_out < 0)
+ c.info.size_out = cxlm->payload_size;
+
+ rc = handle_mailbox_cmd_from_user(cxlm, &c, send.in.payload,
+ send.out.payload, &send.out.size,
+ &send.retval);
+ if (rc)
+ return rc;
+
+ if (copy_to_user(s, &send, sizeof(send)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static long __cxl_memdev_ioctl(struct cxl_memdev *cxlmd, unsigned int cmd,
+ unsigned long arg)
+{
+ switch (cmd) {
+ case CXL_MEM_QUERY_COMMANDS:
+ return cxl_query_cmd(cxlmd, (void __user *)arg);
+ case CXL_MEM_SEND_COMMAND:
+ return cxl_send_cmd(cxlmd, (void __user *)arg);
+ default:
+ return -ENOTTY;
+ }
+}
+
+static long cxl_memdev_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct cxl_memdev *cxlmd;
+ struct inode *inode;
+ int rc = -ENOTTY;
+
+ inode = file_inode(file);
+ cxlmd = container_of(inode->i_cdev, typeof(*cxlmd), cdev);
+
+ if (!percpu_ref_tryget_live(&cxlmd->ops_active))
+ return -ENXIO;
+
+ rc = __cxl_memdev_ioctl(cxlmd, cmd, arg);
+
+ percpu_ref_put(&cxlmd->ops_active);
+
+ return rc;
+}
+
+static const struct file_operations cxl_memdev_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = cxl_memdev_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
+ .llseek = noop_llseek,
+};
+
+static inline struct cxl_mem_command *cxl_mem_find_command(u16 opcode)
+{
+ struct cxl_mem_command *c;
+
+ cxl_for_each_cmd(c)
+ if (c->opcode == opcode)
+ return c;
+
+ return NULL;
+}
+
+/**
+ * cxl_mem_mbox_send_cmd() - Send a mailbox command to a memory device.
+ * @cxlm: The CXL memory device to communicate with.
+ * @opcode: Opcode for the mailbox command.
+ * @in: The input payload for the mailbox command.
+ * @in_size: The length of the input payload
+ * @out: Caller allocated buffer for the output.
+ * @out_size: Expected size of output.
+ *
+ * Context: Any context. Will acquire and release mbox_mutex.
+ * Return:
+ * * %>=0 - Number of bytes returned in @out.
+ * * %-E2BIG - Payload is too large for hardware.
+ * * %-EBUSY - Couldn't acquire exclusive mailbox access.
+ * * %-EFAULT - Hardware error occurred.
+ * * %-ENXIO - Command completed, but device reported an error.
+ * * %-EIO - Unexpected output size.
+ *
+ * Mailbox commands may execute successfully yet the device itself reported an
+ * error. While this distinction can be useful for commands from userspace, the
+ * kernel will only be able to use results when both are successful.
+ *
+ * See __cxl_mem_mbox_send_cmd()
+ */
+static int cxl_mem_mbox_send_cmd(struct cxl_mem *cxlm, u16 opcode,
+ void *in, size_t in_size,
+ void *out, size_t out_size)
+{
+ const struct cxl_mem_command *cmd = cxl_mem_find_command(opcode);
+ struct mbox_cmd mbox_cmd = {
+ .opcode = opcode,
+ .payload_in = in,
+ .size_in = in_size,
+ .size_out = out_size,
+ .payload_out = out,
+ };
+ int rc;
+
+ if (out_size > cxlm->payload_size)
+ return -E2BIG;
+
+ rc = cxl_mem_mbox_get(cxlm);
+ if (rc)
+ return rc;
+
+ rc = __cxl_mem_mbox_send_cmd(cxlm, &mbox_cmd);
+ cxl_mem_mbox_put(cxlm);
+ if (rc)
+ return rc;
+
+ /* TODO: Map return code to proper kernel style errno */
+ if (mbox_cmd.return_code != CXL_MBOX_SUCCESS)
+ return -ENXIO;
+
+ /*
+ * Variable sized commands can't be validated and so it's up to the
+ * caller to do that if they wish.
+ */
+ if (cmd->info.size_out >= 0 && mbox_cmd.size_out != out_size)
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * cxl_mem_setup_regs() - Setup necessary MMIO.
+ * @cxlm: The CXL memory device to communicate with.
+ *
+ * Return: 0 if all necessary registers mapped.
+ *
+ * A memory device is required by spec to implement a certain set of MMIO
+ * regions. The purpose of this function is to enumerate and map those
+ * registers.
+ */
+static int cxl_mem_setup_regs(struct cxl_mem *cxlm)
+{
+ struct device *dev = &cxlm->pdev->dev;
+ int cap, cap_count;
+ u64 cap_array;
+
+ cap_array = readq(cxlm->regs + CXLDEV_CAP_ARRAY_OFFSET);
+ if (FIELD_GET(CXLDEV_CAP_ARRAY_ID_MASK, cap_array) !=
+ CXLDEV_CAP_ARRAY_CAP_ID)
+ return -ENODEV;
+
+ cap_count = FIELD_GET(CXLDEV_CAP_ARRAY_COUNT_MASK, cap_array);
+
+ for (cap = 1; cap <= cap_count; cap++) {
+ void __iomem *register_block;
+ u32 offset;
+ u16 cap_id;
+
+ cap_id = FIELD_GET(CXLDEV_CAP_HDR_CAP_ID_MASK,
+ readl(cxlm->regs + cap * 0x10));
+ offset = readl(cxlm->regs + cap * 0x10 + 0x4);
+ register_block = cxlm->regs + offset;
+
+ switch (cap_id) {
+ case CXLDEV_CAP_CAP_ID_DEVICE_STATUS:
+ dev_dbg(dev, "found Status capability (0x%x)\n", offset);
+ cxlm->status_regs = register_block;
+ break;
+ case CXLDEV_CAP_CAP_ID_PRIMARY_MAILBOX:
+ dev_dbg(dev, "found Mailbox capability (0x%x)\n", offset);
+ cxlm->mbox_regs = register_block;
+ break;
+ case CXLDEV_CAP_CAP_ID_SECONDARY_MAILBOX:
+ dev_dbg(dev, "found Secondary Mailbox capability (0x%x)\n", offset);
+ break;
+ case CXLDEV_CAP_CAP_ID_MEMDEV:
+ dev_dbg(dev, "found Memory Device capability (0x%x)\n", offset);
+ cxlm->memdev_regs = register_block;
+ break;
+ default:
+ dev_dbg(dev, "Unknown cap ID: %d (0x%x)\n", cap_id, offset);
+ break;
+ }
+ }
+
+ if (!cxlm->status_regs || !cxlm->mbox_regs || !cxlm->memdev_regs) {
+ dev_err(dev, "registers not found: %s%s%s\n",
+ !cxlm->status_regs ? "status " : "",
+ !cxlm->mbox_regs ? "mbox " : "",
+ !cxlm->memdev_regs ? "memdev" : "");
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static int cxl_mem_setup_mailbox(struct cxl_mem *cxlm)
+{
+ const int cap = readl(cxlm->mbox_regs + CXLDEV_MBOX_CAPS_OFFSET);
+
+ cxlm->payload_size =
+ 1 << FIELD_GET(CXLDEV_MBOX_CAP_PAYLOAD_SIZE_MASK, cap);
+
+ /*
+ * CXL 2.0 8.2.8.4.3 Mailbox Capabilities Register
+ *
+ * If the size is too small, mandatory commands will not work and so
+ * there's no point in going forward. If the size is too large, there's
+ * no harm is soft limiting it.
+ */
+ cxlm->payload_size = min_t(size_t, cxlm->payload_size, SZ_1M);
+ if (cxlm->payload_size < 256) {
+ dev_err(&cxlm->pdev->dev, "Mailbox is too small (%zub)",
+ cxlm->payload_size);
+ return -ENXIO;
+ }
+
+ dev_dbg(&cxlm->pdev->dev, "Mailbox payload sized %zu",
+ cxlm->payload_size);
+
+ return 0;
+}
+
+static struct cxl_mem *cxl_mem_create(struct pci_dev *pdev, u32 reg_lo,
+ u32 reg_hi)
+{
+ struct device *dev = &pdev->dev;
+ struct cxl_mem *cxlm;
+ void __iomem *regs;
+ u64 offset;
+ u8 bar;
+ int rc;
+
+ cxlm = devm_kzalloc(&pdev->dev, sizeof(*cxlm), GFP_KERNEL);
+ if (!cxlm) {
+ dev_err(dev, "No memory available\n");
+ return NULL;
+ }
+
+ offset = ((u64)reg_hi << 32) | FIELD_GET(CXL_REGLOC_ADDR_MASK, reg_lo);
+ bar = FIELD_GET(CXL_REGLOC_BIR_MASK, reg_lo);
+
+ /* Basic sanity check that BAR is big enough */
+ if (pci_resource_len(pdev, bar) < offset) {
+ dev_err(dev, "BAR%d: %pr: too small (offset: %#llx)\n", bar,
+ &pdev->resource[bar], (unsigned long long)offset);
+ return NULL;
+ }
+
+ rc = pcim_iomap_regions(pdev, BIT(bar), pci_name(pdev));
+ if (rc) {
+ dev_err(dev, "failed to map registers\n");
+ return NULL;
+ }
+ regs = pcim_iomap_table(pdev)[bar];
+
+ mutex_init(&cxlm->mbox_mutex);
+ cxlm->pdev = pdev;
+ cxlm->regs = regs + offset;
+ cxlm->enabled_cmds =
+ devm_kmalloc_array(dev, BITS_TO_LONGS(cxl_cmd_count),
+ sizeof(unsigned long),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!cxlm->enabled_cmds) {
+ dev_err(dev, "No memory available for bitmap\n");
+ return NULL;
+ }
+
+ dev_dbg(dev, "Mapped CXL Memory Device resource\n");
+ return cxlm;
+}
+
+static int cxl_mem_dvsec(struct pci_dev *pdev, int dvsec)
+{
+ int pos;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DVSEC);
+ if (!pos)
+ return 0;
+
+ while (pos) {
+ u16 vendor, id;
+
+ pci_read_config_word(pdev, pos + PCI_DVSEC_HEADER1, &vendor);
+ pci_read_config_word(pdev, pos + PCI_DVSEC_HEADER2, &id);
+ if (vendor == PCI_DVSEC_VENDOR_ID_CXL && dvsec == id)
+ return pos;
+
+ pos = pci_find_next_ext_capability(pdev, pos,
+ PCI_EXT_CAP_ID_DVSEC);
+ }
+
+ return 0;
+}
+
+static struct cxl_memdev *to_cxl_memdev(struct device *dev)
+{
+ return container_of(dev, struct cxl_memdev, dev);
+}
+
+static void cxl_memdev_release(struct device *dev)
+{
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+
+ percpu_ref_exit(&cxlmd->ops_active);
+ ida_free(&cxl_memdev_ida, cxlmd->id);
+ kfree(cxlmd);
+}
+
+static char *cxl_memdev_devnode(struct device *dev, umode_t *mode, kuid_t *uid,
+ kgid_t *gid)
+{
+ return kasprintf(GFP_KERNEL, "cxl/%s", dev_name(dev));
+}
+
+static ssize_t firmware_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+ struct cxl_mem *cxlm = cxlmd->cxlm;
+
+ return sprintf(buf, "%.16s\n", cxlm->firmware_version);
+}
+static DEVICE_ATTR_RO(firmware_version);
+
+static ssize_t payload_max_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+ struct cxl_mem *cxlm = cxlmd->cxlm;
+
+ return sprintf(buf, "%zu\n", cxlm->payload_size);
+}
+static DEVICE_ATTR_RO(payload_max);
+
+static ssize_t ram_size_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+ struct cxl_mem *cxlm = cxlmd->cxlm;
+ unsigned long long len = range_len(&cxlm->ram_range);
+
+ return sprintf(buf, "%#llx\n", len);
+}
+
+static struct device_attribute dev_attr_ram_size =
+ __ATTR(size, 0444, ram_size_show, NULL);
+
+static ssize_t pmem_size_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+ struct cxl_mem *cxlm = cxlmd->cxlm;
+ unsigned long long len = range_len(&cxlm->pmem_range);
+
+ return sprintf(buf, "%#llx\n", len);
+}
+
+static struct device_attribute dev_attr_pmem_size =
+ __ATTR(size, 0444, pmem_size_show, NULL);
+
+static struct attribute *cxl_memdev_attributes[] = {
+ &dev_attr_firmware_version.attr,
+ &dev_attr_payload_max.attr,
+ NULL,
+};
+
+static struct attribute *cxl_memdev_pmem_attributes[] = {
+ &dev_attr_pmem_size.attr,
+ NULL,
+};
+
+static struct attribute *cxl_memdev_ram_attributes[] = {
+ &dev_attr_ram_size.attr,
+ NULL,
+};
+
+static struct attribute_group cxl_memdev_attribute_group = {
+ .attrs = cxl_memdev_attributes,
+};
+
+static struct attribute_group cxl_memdev_ram_attribute_group = {
+ .name = "ram",
+ .attrs = cxl_memdev_ram_attributes,
+};
+
+static struct attribute_group cxl_memdev_pmem_attribute_group = {
+ .name = "pmem",
+ .attrs = cxl_memdev_pmem_attributes,
+};
+
+static const struct attribute_group *cxl_memdev_attribute_groups[] = {
+ &cxl_memdev_attribute_group,
+ &cxl_memdev_ram_attribute_group,
+ &cxl_memdev_pmem_attribute_group,
+ NULL,
+};
+
+static const struct device_type cxl_memdev_type = {
+ .name = "cxl_memdev",
+ .release = cxl_memdev_release,
+ .devnode = cxl_memdev_devnode,
+ .groups = cxl_memdev_attribute_groups,
+};
+
+static void cxlmdev_unregister(void *_cxlmd)
+{
+ struct cxl_memdev *cxlmd = _cxlmd;
+ struct device *dev = &cxlmd->dev;
+
+ percpu_ref_kill(&cxlmd->ops_active);
+ cdev_device_del(&cxlmd->cdev, dev);
+ wait_for_completion(&cxlmd->ops_dead);
+ cxlmd->cxlm = NULL;
+ put_device(dev);
+}
+
+static void cxlmdev_ops_active_release(struct percpu_ref *ref)
+{
+ struct cxl_memdev *cxlmd =
+ container_of(ref, typeof(*cxlmd), ops_active);
+
+ complete(&cxlmd->ops_dead);
+}
+
+static int cxl_mem_add_memdev(struct cxl_mem *cxlm)
+{
+ struct pci_dev *pdev = cxlm->pdev;
+ struct cxl_memdev *cxlmd;
+ struct device *dev;
+ struct cdev *cdev;
+ int rc;
+
+ cxlmd = kzalloc(sizeof(*cxlmd), GFP_KERNEL);
+ if (!cxlmd)
+ return -ENOMEM;
+ init_completion(&cxlmd->ops_dead);
+
+ /*
+ * @cxlm is deallocated when the driver unbinds so operations
+ * that are using it need to hold a live reference.
+ */
+ cxlmd->cxlm = cxlm;
+ rc = percpu_ref_init(&cxlmd->ops_active, cxlmdev_ops_active_release, 0,
+ GFP_KERNEL);
+ if (rc)
+ goto err_ref;
+
+ rc = ida_alloc_range(&cxl_memdev_ida, 0, CXL_MEM_MAX_DEVS, GFP_KERNEL);
+ if (rc < 0)
+ goto err_id;
+ cxlmd->id = rc;
+
+ dev = &cxlmd->dev;
+ device_initialize(dev);
+ dev->parent = &pdev->dev;
+ dev->bus = &cxl_bus_type;
+ dev->devt = MKDEV(cxl_mem_major, cxlmd->id);
+ dev->type = &cxl_memdev_type;
+ dev_set_name(dev, "mem%d", cxlmd->id);
+
+ cdev = &cxlmd->cdev;
+ cdev_init(cdev, &cxl_memdev_fops);
+
+ rc = cdev_device_add(cdev, dev);
+ if (rc)
+ goto err_add;
+
+ return devm_add_action_or_reset(dev->parent, cxlmdev_unregister, cxlmd);
+
+err_add:
+ ida_free(&cxl_memdev_ida, cxlmd->id);
+err_id:
+ /*
+ * Theoretically userspace could have already entered the fops,
+ * so flush ops_active.
+ */
+ percpu_ref_kill(&cxlmd->ops_active);
+ wait_for_completion(&cxlmd->ops_dead);
+ percpu_ref_exit(&cxlmd->ops_active);
+err_ref:
+ kfree(cxlmd);
+
+ return rc;
+}
+
+static int cxl_xfer_log(struct cxl_mem *cxlm, uuid_t *uuid, u32 size, u8 *out)
+{
+ u32 remaining = size;
+ u32 offset = 0;
+
+ while (remaining) {
+ u32 xfer_size = min_t(u32, remaining, cxlm->payload_size);
+ struct cxl_mbox_get_log {
+ uuid_t uuid;
+ __le32 offset;
+ __le32 length;
+ } __packed log = {
+ .uuid = *uuid,
+ .offset = cpu_to_le32(offset),
+ .length = cpu_to_le32(xfer_size)
+ };
+ int rc;
+
+ rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_GET_LOG, &log,
+ sizeof(log), out, xfer_size);
+ if (rc < 0)
+ return rc;
+
+ out += xfer_size;
+ remaining -= xfer_size;
+ offset += xfer_size;
+ }
+
+ return 0;
+}
+
+/**
+ * cxl_walk_cel() - Walk through the Command Effects Log.
+ * @cxlm: Device.
+ * @size: Length of the Command Effects Log.
+ * @cel: CEL
+ *
+ * Iterate over each entry in the CEL and determine if the driver supports the
+ * command. If so, the command is enabled for the device and can be used later.
+ */
+static void cxl_walk_cel(struct cxl_mem *cxlm, size_t size, u8 *cel)
+{
+ struct cel_entry {
+ __le16 opcode;
+ __le16 effect;
+ } __packed * cel_entry;
+ const int cel_entries = size / sizeof(*cel_entry);
+ int i;
+
+ cel_entry = (struct cel_entry *)cel;
+
+ for (i = 0; i < cel_entries; i++) {
+ u16 opcode = le16_to_cpu(cel_entry[i].opcode);
+ struct cxl_mem_command *cmd = cxl_mem_find_command(opcode);
+
+ if (!cmd) {
+ dev_dbg(&cxlm->pdev->dev,
+ "Opcode 0x%04x unsupported by driver", opcode);
+ continue;
+ }
+
+ set_bit(cmd->info.id, cxlm->enabled_cmds);
+ }
+}
+
+struct cxl_mbox_get_supported_logs {
+ __le16 entries;
+ u8 rsvd[6];
+ struct gsl_entry {
+ uuid_t uuid;
+ __le32 size;
+ } __packed entry[];
+} __packed;
+
+static struct cxl_mbox_get_supported_logs *cxl_get_gsl(struct cxl_mem *cxlm)
+{
+ struct cxl_mbox_get_supported_logs *ret;
+ int rc;
+
+ ret = kvmalloc(cxlm->payload_size, GFP_KERNEL);
+ if (!ret)
+ return ERR_PTR(-ENOMEM);
+
+ rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_GET_SUPPORTED_LOGS, NULL,
+ 0, ret, cxlm->payload_size);
+ if (rc < 0) {
+ kvfree(ret);
+ return ERR_PTR(rc);
+ }
+
+ return ret;
+}
+
+/**
+ * cxl_mem_enumerate_cmds() - Enumerate commands for a device.
+ * @cxlm: The device.
+ *
+ * Returns 0 if enumerate completed successfully.
+ *
+ * CXL devices have optional support for certain commands. This function will
+ * determine the set of supported commands for the hardware and update the
+ * enabled_cmds bitmap in the @cxlm.
+ */
+static int cxl_mem_enumerate_cmds(struct cxl_mem *cxlm)
+{
+ struct cxl_mbox_get_supported_logs *gsl;
+ struct device *dev = &cxlm->pdev->dev;
+ struct cxl_mem_command *cmd;
+ int i, rc;
+
+ gsl = cxl_get_gsl(cxlm);
+ if (IS_ERR(gsl))
+ return PTR_ERR(gsl);
+
+ rc = -ENOENT;
+ for (i = 0; i < le16_to_cpu(gsl->entries); i++) {
+ u32 size = le32_to_cpu(gsl->entry[i].size);
+ uuid_t uuid = gsl->entry[i].uuid;
+ u8 *log;
+
+ dev_dbg(dev, "Found LOG type %pU of size %d", &uuid, size);
+
+ if (!uuid_equal(&uuid, &log_uuid[CEL_UUID]))
+ continue;
+
+ log = kvmalloc(size, GFP_KERNEL);
+ if (!log) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ rc = cxl_xfer_log(cxlm, &uuid, size, log);
+ if (rc) {
+ kvfree(log);
+ goto out;
+ }
+
+ cxl_walk_cel(cxlm, size, log);
+ kvfree(log);
+
+ /* In case CEL was bogus, enable some default commands. */
+ cxl_for_each_cmd(cmd)
+ if (cmd->flags & CXL_CMD_FLAG_FORCE_ENABLE)
+ set_bit(cmd->info.id, cxlm->enabled_cmds);
+
+ /* Found the required CEL */
+ rc = 0;
+ }
+
+out:
+ kvfree(gsl);
+ return rc;
+}
+
+/**
+ * cxl_mem_identify() - Send the IDENTIFY command to the device.
+ * @cxlm: The device to identify.
+ *
+ * Return: 0 if identify was executed successfully.
+ *
+ * This will dispatch the identify command to the device and on success populate
+ * structures to be exported to sysfs.
+ */
+static int cxl_mem_identify(struct cxl_mem *cxlm)
+{
+ struct cxl_mbox_identify {
+ char fw_revision[0x10];
+ __le64 total_capacity;
+ __le64 volatile_capacity;
+ __le64 persistent_capacity;
+ __le64 partition_align;
+ __le16 info_event_log_size;
+ __le16 warning_event_log_size;
+ __le16 failure_event_log_size;
+ __le16 fatal_event_log_size;
+ __le32 lsa_size;
+ u8 poison_list_max_mer[3];
+ __le16 inject_poison_limit;
+ u8 poison_caps;
+ u8 qos_telemetry_caps;
+ } __packed id;
+ int rc;
+
+ rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_IDENTIFY, NULL, 0, &id,
+ sizeof(id));
+ if (rc < 0)
+ return rc;
+
+ /*
+ * TODO: enumerate DPA map, as 'ram' and 'pmem' do not alias.
+ * For now, only the capacity is exported in sysfs
+ */
+ cxlm->ram_range.start = 0;
+ cxlm->ram_range.end = le64_to_cpu(id.volatile_capacity) - 1;
+
+ cxlm->pmem_range.start = 0;
+ cxlm->pmem_range.end = le64_to_cpu(id.persistent_capacity) - 1;
+
+ memcpy(cxlm->firmware_version, id.fw_revision, sizeof(id.fw_revision));
+
+ return 0;
+}
+
+static int cxl_mem_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct device *dev = &pdev->dev;
+ struct cxl_mem *cxlm = NULL;
+ u32 regloc_size, regblocks;
+ int rc, regloc, i;
+
+ rc = pcim_enable_device(pdev);
+ if (rc)
+ return rc;
+
+ regloc = cxl_mem_dvsec(pdev, PCI_DVSEC_ID_CXL_REGLOC_OFFSET);
+ if (!regloc) {
+ dev_err(dev, "register location dvsec not found\n");
+ return -ENXIO;
+ }
+
+ /* Get the size of the Register Locator DVSEC */
+ pci_read_config_dword(pdev, regloc + PCI_DVSEC_HEADER1, &regloc_size);
+ regloc_size = FIELD_GET(PCI_DVSEC_HEADER1_LENGTH_MASK, regloc_size);
+
+ regloc += PCI_DVSEC_ID_CXL_REGLOC_BLOCK1_OFFSET;
+ regblocks = (regloc_size - PCI_DVSEC_ID_CXL_REGLOC_BLOCK1_OFFSET) / 8;
+
+ for (i = 0; i < regblocks; i++, regloc += 8) {
+ u32 reg_lo, reg_hi;
+ u8 reg_type;
+
+ /* "register low and high" contain other bits */
+ pci_read_config_dword(pdev, regloc, &reg_lo);
+ pci_read_config_dword(pdev, regloc + 4, &reg_hi);
+
+ reg_type = FIELD_GET(CXL_REGLOC_RBI_MASK, reg_lo);
+
+ if (reg_type == CXL_REGLOC_RBI_MEMDEV) {
+ cxlm = cxl_mem_create(pdev, reg_lo, reg_hi);
+ break;
+ }
+ }
+
+ if (!cxlm)
+ return -ENODEV;
+
+ rc = cxl_mem_setup_regs(cxlm);
+ if (rc)
+ return rc;
+
+ rc = cxl_mem_setup_mailbox(cxlm);
+ if (rc)
+ return rc;
+
+ rc = cxl_mem_enumerate_cmds(cxlm);
+ if (rc)
+ return rc;
+
+ rc = cxl_mem_identify(cxlm);
+ if (rc)
+ return rc;
+
+ return cxl_mem_add_memdev(cxlm);
+}
+
+static const struct pci_device_id cxl_mem_pci_tbl[] = {
+ /* PCI class code for CXL.mem Type-3 Devices */
+ { PCI_DEVICE_CLASS((PCI_CLASS_MEMORY_CXL << 8 | CXL_MEMORY_PROGIF), ~0)},
+ { /* terminate list */ },
+};
+MODULE_DEVICE_TABLE(pci, cxl_mem_pci_tbl);
+
+static struct pci_driver cxl_mem_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = cxl_mem_pci_tbl,
+ .probe = cxl_mem_probe,
+ .driver = {
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+};
+
+static __init int cxl_mem_init(void)
+{
+ struct dentry *mbox_debugfs;
+ dev_t devt;
+ int rc;
+
+ rc = alloc_chrdev_region(&devt, 0, CXL_MEM_MAX_DEVS, "cxl");
+ if (rc)
+ return rc;
+
+ cxl_mem_major = MAJOR(devt);
+
+ rc = pci_register_driver(&cxl_mem_driver);
+ if (rc) {
+ unregister_chrdev_region(MKDEV(cxl_mem_major, 0),
+ CXL_MEM_MAX_DEVS);
+ return rc;
+ }
+
+ cxl_debugfs = debugfs_create_dir("cxl", NULL);
+ mbox_debugfs = debugfs_create_dir("mbox", cxl_debugfs);
+ debugfs_create_bool("raw_allow_all", 0600, mbox_debugfs,
+ &cxl_raw_allow_all);
+
+ return 0;
+}
+
+static __exit void cxl_mem_exit(void)
+{
+ debugfs_remove_recursive(cxl_debugfs);
+ pci_unregister_driver(&cxl_mem_driver);
+ unregister_chrdev_region(MKDEV(cxl_mem_major, 0), CXL_MEM_MAX_DEVS);
+}
+
+MODULE_LICENSE("GPL v2");
+module_init(cxl_mem_init);
+module_exit(cxl_mem_exit);
+MODULE_IMPORT_NS(CXL);
diff --git a/drivers/cxl/pci.h b/drivers/cxl/pci.h
new file mode 100644
index 000000000000..af3ec078cf6c
--- /dev/null
+++ b/drivers/cxl/pci.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2020 Intel Corporation. All rights reserved. */
+#ifndef __CXL_PCI_H__
+#define __CXL_PCI_H__
+
+#define CXL_MEMORY_PROGIF 0x10
+
+/*
+ * See section 8.1 Configuration Space Registers in the CXL 2.0
+ * Specification
+ */
+#define PCI_DVSEC_HEADER1_LENGTH_MASK GENMASK(31, 20)
+#define PCI_DVSEC_VENDOR_ID_CXL 0x1E98
+#define PCI_DVSEC_ID_CXL 0x0
+
+#define PCI_DVSEC_ID_CXL_REGLOC_OFFSET 0x8
+#define PCI_DVSEC_ID_CXL_REGLOC_BLOCK1_OFFSET 0xC
+
+/* BAR Indicator Register (BIR) */
+#define CXL_REGLOC_BIR_MASK GENMASK(2, 0)
+
+/* Register Block Identifier (RBI) */
+#define CXL_REGLOC_RBI_MASK GENMASK(15, 8)
+#define CXL_REGLOC_RBI_EMPTY 0
+#define CXL_REGLOC_RBI_COMPONENT 1
+#define CXL_REGLOC_RBI_VIRT 2
+#define CXL_REGLOC_RBI_MEMDEV 3
+
+#define CXL_REGLOC_ADDR_MASK GENMASK(31, 16)
+
+#endif /* __CXL_PCI_H__ */
diff --git a/drivers/dax/bus.c b/drivers/dax/bus.c
index 737b207c9e30..452e85ae87a8 100644
--- a/drivers/dax/bus.c
+++ b/drivers/dax/bus.c
@@ -179,7 +179,10 @@ static int dax_bus_remove(struct device *dev)
struct dax_device_driver *dax_drv = to_dax_drv(dev->driver);
struct dev_dax *dev_dax = to_dev_dax(dev);
- return dax_drv->remove(dev_dax);
+ if (dax_drv->remove)
+ dax_drv->remove(dev_dax);
+
+ return 0;
}
static struct bus_type dax_bus_type = {
@@ -1038,7 +1041,7 @@ static ssize_t range_parse(const char *opt, size_t len, struct range *range)
{
unsigned long long addr = 0;
char *start, *end, *str;
- ssize_t rc = EINVAL;
+ ssize_t rc = -EINVAL;
str = kstrdup(opt, GFP_KERNEL);
if (!str)
@@ -1392,6 +1395,13 @@ int __dax_driver_register(struct dax_device_driver *dax_drv,
struct device_driver *drv = &dax_drv->drv;
int rc = 0;
+ /*
+ * dax_bus_probe() calls dax_drv->probe() unconditionally.
+ * So better be safe than sorry and ensure it is provided.
+ */
+ if (!dax_drv->probe)
+ return -EINVAL;
+
INIT_LIST_HEAD(&dax_drv->ids);
drv->owner = module;
drv->name = mod_name;
@@ -1409,7 +1419,15 @@ int __dax_driver_register(struct dax_device_driver *dax_drv,
mutex_unlock(&dax_bus_lock);
if (rc)
return rc;
- return driver_register(drv);
+
+ rc = driver_register(drv);
+ if (rc && dax_drv->match_always) {
+ mutex_lock(&dax_bus_lock);
+ match_always_count -= dax_drv->match_always;
+ mutex_unlock(&dax_bus_lock);
+ }
+
+ return rc;
}
EXPORT_SYMBOL_GPL(__dax_driver_register);
diff --git a/drivers/dax/bus.h b/drivers/dax/bus.h
index 72b92f95509f..1e946ad7780a 100644
--- a/drivers/dax/bus.h
+++ b/drivers/dax/bus.h
@@ -39,7 +39,7 @@ struct dax_device_driver {
struct list_head ids;
int match_always;
int (*probe)(struct dev_dax *dev);
- int (*remove)(struct dev_dax *dev);
+ void (*remove)(struct dev_dax *dev);
};
int __dax_driver_register(struct dax_device_driver *dax_drv,
diff --git a/drivers/dax/device.c b/drivers/dax/device.c
index 5da2980bb16b..db92573c94e8 100644
--- a/drivers/dax/device.c
+++ b/drivers/dax/device.c
@@ -452,15 +452,9 @@ int dev_dax_probe(struct dev_dax *dev_dax)
}
EXPORT_SYMBOL_GPL(dev_dax_probe);
-static int dev_dax_remove(struct dev_dax *dev_dax)
-{
- /* all probe actions are unwound by devm */
- return 0;
-}
-
static struct dax_device_driver device_dax_driver = {
.probe = dev_dax_probe,
- .remove = dev_dax_remove,
+ /* all probe actions are unwound by devm, so .remove isn't necessary */
.match_always = 1,
};
diff --git a/drivers/dax/kmem.c b/drivers/dax/kmem.c
index 403ec42472d1..ac231cc36359 100644
--- a/drivers/dax/kmem.c
+++ b/drivers/dax/kmem.c
@@ -136,7 +136,7 @@ err_res_name:
}
#ifdef CONFIG_MEMORY_HOTREMOVE
-static int dev_dax_kmem_remove(struct dev_dax *dev_dax)
+static void dev_dax_kmem_remove(struct dev_dax *dev_dax)
{
int i, success = 0;
struct device *dev = &dev_dax->dev;
@@ -176,11 +176,9 @@ static int dev_dax_kmem_remove(struct dev_dax *dev_dax)
kfree(data);
dev_set_drvdata(dev, NULL);
}
-
- return 0;
}
#else
-static int dev_dax_kmem_remove(struct dev_dax *dev_dax)
+static void dev_dax_kmem_remove(struct dev_dax *dev_dax)
{
/*
* Without hotremove purposely leak the request_mem_region() for the
@@ -190,7 +188,6 @@ static int dev_dax_kmem_remove(struct dev_dax *dev_dax)
* request_mem_region().
*/
any_hotremove_failed = true;
- return 0;
}
#endif /* CONFIG_MEMORY_HOTREMOVE */
diff --git a/drivers/dax/pmem/compat.c b/drivers/dax/pmem/compat.c
index 863c114fd88c..d81dc35fd65d 100644
--- a/drivers/dax/pmem/compat.c
+++ b/drivers/dax/pmem/compat.c
@@ -41,10 +41,9 @@ static int dax_pmem_compat_release(struct device *dev, void *data)
return 0;
}
-static int dax_pmem_compat_remove(struct device *dev)
+static void dax_pmem_compat_remove(struct device *dev)
{
device_for_each_child(dev, NULL, dax_pmem_compat_release);
- return 0;
}
static struct nd_device_driver dax_pmem_compat_driver = {
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index cadbd0a1a1ef..5fa6ae9dbc8b 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -480,7 +480,7 @@ static void dax_free_inode(struct inode *inode)
kfree(dax_dev->host);
dax_dev->host = NULL;
if (inode->i_rdev)
- ida_simple_remove(&dax_minor_ida, MINOR(inode->i_rdev));
+ ida_simple_remove(&dax_minor_ida, iminor(inode));
kmem_cache_free(dax_cache, dax_dev);
}
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 6aa10de792b3..bf3047896e41 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -757,6 +757,9 @@ static void devfreq_dev_release(struct device *dev)
if (devfreq->profile->exit)
devfreq->profile->exit(devfreq->dev.parent);
+ if (devfreq->opp_table)
+ dev_pm_opp_put_opp_table(devfreq->opp_table);
+
mutex_destroy(&devfreq->lock);
kfree(devfreq);
}
@@ -844,6 +847,10 @@ struct devfreq *devfreq_add_device(struct device *dev,
}
devfreq->suspend_freq = dev_pm_opp_get_suspend_opp_freq(dev);
+ devfreq->opp_table = dev_pm_opp_get_opp_table(dev);
+ if (IS_ERR(devfreq->opp_table))
+ devfreq->opp_table = NULL;
+
atomic_set(&devfreq->suspend_count, 0);
dev_set_name(&devfreq->dev, "%s", dev_name(dev));
@@ -893,13 +900,13 @@ struct devfreq *devfreq_add_device(struct device *dev,
goto err_devfreq;
devfreq->nb_min.notifier_call = qos_min_notifier_call;
- err = dev_pm_qos_add_notifier(devfreq->dev.parent, &devfreq->nb_min,
+ err = dev_pm_qos_add_notifier(dev, &devfreq->nb_min,
DEV_PM_QOS_MIN_FREQUENCY);
if (err)
goto err_devfreq;
devfreq->nb_max.notifier_call = qos_max_notifier_call;
- err = dev_pm_qos_add_notifier(devfreq->dev.parent, &devfreq->nb_max,
+ err = dev_pm_qos_add_notifier(dev, &devfreq->nb_max,
DEV_PM_QOS_MAX_FREQUENCY);
if (err)
goto err_devfreq;
diff --git a/drivers/devfreq/governor.h b/drivers/devfreq/governor.h
index 2a52f97b542d..70f44b3ca42e 100644
--- a/drivers/devfreq/governor.h
+++ b/drivers/devfreq/governor.h
@@ -40,7 +40,7 @@
/*
* Definition of governor attribute flags except for common sysfs attributes
* - DEVFREQ_GOV_ATTR_POLLING_INTERVAL
- * : Indicate polling_interal sysfs attribute
+ * : Indicate polling_interval sysfs attribute
* - DEVFREQ_GOV_ATTR_TIMER
* : Indicate timer sysfs attribute
*/
diff --git a/drivers/devfreq/governor_passive.c b/drivers/devfreq/governor_passive.c
index 63332e4a65ae..b094132bd20b 100644
--- a/drivers/devfreq/governor_passive.c
+++ b/drivers/devfreq/governor_passive.c
@@ -19,18 +19,16 @@ static int devfreq_passive_get_target_freq(struct devfreq *devfreq,
= (struct devfreq_passive_data *)devfreq->data;
struct devfreq *parent_devfreq = (struct devfreq *)p_data->parent;
unsigned long child_freq = ULONG_MAX;
- struct dev_pm_opp *opp;
- int i, count, ret = 0;
+ struct dev_pm_opp *opp, *p_opp;
+ int i, count;
/*
* If the devfreq device with passive governor has the specific method
* to determine the next frequency, should use the get_target_freq()
* of struct devfreq_passive_data.
*/
- if (p_data->get_target_freq) {
- ret = p_data->get_target_freq(devfreq, freq);
- goto out;
- }
+ if (p_data->get_target_freq)
+ return p_data->get_target_freq(devfreq, freq);
/*
* If the parent and passive devfreq device uses the OPP table,
@@ -56,26 +54,35 @@ static int devfreq_passive_get_target_freq(struct devfreq *devfreq,
* list of parent device. Because in this case, *freq is temporary
* value which is decided by ondemand governor.
*/
- opp = devfreq_recommended_opp(parent_devfreq->dev.parent, freq, 0);
- if (IS_ERR(opp)) {
- ret = PTR_ERR(opp);
- goto out;
- }
+ if (devfreq->opp_table && parent_devfreq->opp_table) {
+ p_opp = devfreq_recommended_opp(parent_devfreq->dev.parent,
+ freq, 0);
+ if (IS_ERR(p_opp))
+ return PTR_ERR(p_opp);
+
+ opp = dev_pm_opp_xlate_required_opp(parent_devfreq->opp_table,
+ devfreq->opp_table, p_opp);
+ dev_pm_opp_put(p_opp);
- dev_pm_opp_put(opp);
+ if (IS_ERR(opp))
+ return PTR_ERR(opp);
+
+ *freq = dev_pm_opp_get_freq(opp);
+ dev_pm_opp_put(opp);
+
+ return 0;
+ }
/*
- * Get the OPP table's index of decided freqeuncy by governor
+ * Get the OPP table's index of decided frequency by governor
* of parent device.
*/
for (i = 0; i < parent_devfreq->profile->max_state; i++)
if (parent_devfreq->profile->freq_table[i] == *freq)
break;
- if (i == parent_devfreq->profile->max_state) {
- ret = -EINVAL;
- goto out;
- }
+ if (i == parent_devfreq->profile->max_state)
+ return -EINVAL;
/* Get the suitable frequency by using index of parent device. */
if (i < devfreq->profile->max_state) {
@@ -88,8 +95,7 @@ static int devfreq_passive_get_target_freq(struct devfreq *devfreq,
/* Return the suitable frequency for passive device. */
*freq = child_freq;
-out:
- return ret;
+ return 0;
}
static int devfreq_passive_notifier_call(struct notifier_block *nb,
diff --git a/drivers/devfreq/rk3399_dmc.c b/drivers/devfreq/rk3399_dmc.c
index 2e912166a993..9e9d3b4c6d48 100644
--- a/drivers/devfreq/rk3399_dmc.c
+++ b/drivers/devfreq/rk3399_dmc.c
@@ -400,7 +400,7 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
default:
ret = -EINVAL;
goto err_edev;
- };
+ }
no_pmu:
arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, 0, 0,
diff --git a/drivers/devfreq/tegra30-devfreq.c b/drivers/devfreq/tegra30-devfreq.c
index 117cad7968ab..ce83f883ca65 100644
--- a/drivers/devfreq/tegra30-devfreq.c
+++ b/drivers/devfreq/tegra30-devfreq.c
@@ -647,7 +647,7 @@ static int tegra_devfreq_target(struct device *dev, unsigned long *freq,
return PTR_ERR(opp);
}
- ret = dev_pm_opp_set_bw(dev, opp);
+ ret = dev_pm_opp_set_opp(dev, opp);
dev_pm_opp_put(opp);
return ret;
@@ -849,7 +849,7 @@ static int tegra_devfreq_probe(struct platform_device *pdev)
return err;
}
- err = dev_pm_opp_of_add_table(&pdev->dev);
+ err = dev_pm_opp_of_add_table_noclk(&pdev->dev, 0);
if (err) {
dev_err(&pdev->dev, "Failed to add OPP table: %d\n", err);
goto put_hw;
diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig
index 4f8224a6ac95..4e16c71c24b7 100644
--- a/drivers/dma-buf/Kconfig
+++ b/drivers/dma-buf/Kconfig
@@ -50,6 +50,14 @@ config DMABUF_MOVE_NOTIFY
This is marked experimental because we don't yet have a consistent
execution context and memory management between drivers.
+config DMABUF_DEBUG
+ bool "DMA-BUF debug checks"
+ default y if DMA_API_DEBUG
+ help
+ This option enables additional checks for DMA-BUF importers and
+ exporters. Specifically it validates that importers do not peek at the
+ underlying struct page when they import a buffer.
+
config DMABUF_SELFTESTS
tristate "Selftests for the dma-buf interfaces"
default n
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 9ad6397aaa97..f264b70c383e 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -493,7 +493,7 @@ err_alloc_file:
*
* 4. Once a driver is done with a shared buffer it needs to call
* dma_buf_detach() (after cleaning up any mappings) and then release the
- * reference acquired with dma_buf_get by calling dma_buf_put().
+ * reference acquired with dma_buf_get() by calling dma_buf_put().
*
* For the detailed semantics exporters are expected to implement see
* &dma_buf_ops.
@@ -509,9 +509,10 @@ err_alloc_file:
* by the exporter. see &struct dma_buf_export_info
* for further details.
*
- * Returns, on success, a newly created dma_buf object, which wraps the
- * supplied private data and operations for dma_buf_ops. On either missing
- * ops, or error in allocating struct dma_buf, will return negative error.
+ * Returns, on success, a newly created struct dma_buf object, which wraps the
+ * supplied private data and operations for struct dma_buf_ops. On either
+ * missing ops, or error in allocating struct dma_buf, will return negative
+ * error.
*
* For most cases the easiest way to create @exp_info is through the
* %DEFINE_DMA_BUF_EXPORT_INFO macro.
@@ -597,7 +598,7 @@ err_module:
EXPORT_SYMBOL_GPL(dma_buf_export);
/**
- * dma_buf_fd - returns a file descriptor for the given dma_buf
+ * dma_buf_fd - returns a file descriptor for the given struct dma_buf
* @dmabuf: [in] pointer to dma_buf for which fd is required.
* @flags: [in] flags to give to fd
*
@@ -621,10 +622,10 @@ int dma_buf_fd(struct dma_buf *dmabuf, int flags)
EXPORT_SYMBOL_GPL(dma_buf_fd);
/**
- * dma_buf_get - returns the dma_buf structure related to an fd
- * @fd: [in] fd associated with the dma_buf to be returned
+ * dma_buf_get - returns the struct dma_buf related to an fd
+ * @fd: [in] fd associated with the struct dma_buf to be returned
*
- * On success, returns the dma_buf structure associated with an fd; uses
+ * On success, returns the struct dma_buf associated with an fd; uses
* file's refcounting done by fget to increase refcount. returns ERR_PTR
* otherwise.
*/
@@ -665,9 +666,36 @@ void dma_buf_put(struct dma_buf *dmabuf)
}
EXPORT_SYMBOL_GPL(dma_buf_put);
+static void mangle_sg_table(struct sg_table *sg_table)
+{
+#ifdef CONFIG_DMABUF_DEBUG
+ int i;
+ struct scatterlist *sg;
+
+ /* To catch abuse of the underlying struct page by importers mix
+ * up the bits, but take care to preserve the low SG_ bits to
+ * not corrupt the sgt. The mixing is undone in __unmap_dma_buf
+ * before passing the sgt back to the exporter. */
+ for_each_sgtable_sg(sg_table, sg, i)
+ sg->page_link ^= ~0xffUL;
+#endif
+
+}
+static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach,
+ enum dma_data_direction direction)
+{
+ struct sg_table *sg_table;
+
+ sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
+
+ if (!IS_ERR_OR_NULL(sg_table))
+ mangle_sg_table(sg_table);
+
+ return sg_table;
+}
+
/**
- * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list; optionally,
- * calls attach() of dma_buf_ops to allow device-specific attach functionality
+ * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list
* @dmabuf: [in] buffer to attach device to.
* @dev: [in] device to be attached.
* @importer_ops: [in] importer operations for the attachment
@@ -676,6 +704,9 @@ EXPORT_SYMBOL_GPL(dma_buf_put);
* Returns struct dma_buf_attachment pointer for this attachment. Attachments
* must be cleaned up by calling dma_buf_detach().
*
+ * Optionally this calls &dma_buf_ops.attach to allow device-specific attach
+ * functionality.
+ *
* Returns:
*
* A pointer to newly created &dma_buf_attachment on success, or a negative
@@ -734,7 +765,7 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
goto err_unlock;
}
- sgt = dmabuf->ops->map_dma_buf(attach, DMA_BIDIRECTIONAL);
+ sgt = __map_dma_buf(attach, DMA_BIDIRECTIONAL);
if (!sgt)
sgt = ERR_PTR(-ENOMEM);
if (IS_ERR(sgt)) {
@@ -781,13 +812,24 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
}
EXPORT_SYMBOL_GPL(dma_buf_attach);
+static void __unmap_dma_buf(struct dma_buf_attachment *attach,
+ struct sg_table *sg_table,
+ enum dma_data_direction direction)
+{
+ /* uses XOR, hence this unmangles */
+ mangle_sg_table(sg_table);
+
+ attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
+}
+
/**
- * dma_buf_detach - Remove the given attachment from dmabuf's attachments list;
- * optionally calls detach() of dma_buf_ops for device-specific detach
+ * dma_buf_detach - Remove the given attachment from dmabuf's attachments list
* @dmabuf: [in] buffer to detach from.
* @attach: [in] attachment to be detached; is free'd after this call.
*
* Clean up a device attachment obtained by calling dma_buf_attach().
+ *
+ * Optionally this calls &dma_buf_ops.detach for device-specific detach.
*/
void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
{
@@ -798,7 +840,7 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
if (dma_buf_is_dynamic(attach->dmabuf))
dma_resv_lock(attach->dmabuf->resv, NULL);
- dmabuf->ops->unmap_dma_buf(attach, attach->sgt, attach->dir);
+ __unmap_dma_buf(attach, attach->sgt, attach->dir);
if (dma_buf_is_dynamic(attach->dmabuf)) {
dma_buf_unpin(attach);
@@ -818,9 +860,15 @@ EXPORT_SYMBOL_GPL(dma_buf_detach);
/**
* dma_buf_pin - Lock down the DMA-buf
- *
* @attach: [in] attachment which should be pinned
*
+ * Only dynamic importers (who set up @attach with dma_buf_dynamic_attach()) may
+ * call this, and only for limited use cases like scanout and not for temporary
+ * pin operations. It is not permitted to allow userspace to pin arbitrary
+ * amounts of buffers through this interface.
+ *
+ * Buffers must be unpinned by calling dma_buf_unpin().
+ *
* Returns:
* 0 on success, negative error code on failure.
*/
@@ -829,6 +877,8 @@ int dma_buf_pin(struct dma_buf_attachment *attach)
struct dma_buf *dmabuf = attach->dmabuf;
int ret = 0;
+ WARN_ON(!dma_buf_attachment_is_dynamic(attach));
+
dma_resv_assert_held(dmabuf->resv);
if (dmabuf->ops->pin)
@@ -839,14 +889,19 @@ int dma_buf_pin(struct dma_buf_attachment *attach)
EXPORT_SYMBOL_GPL(dma_buf_pin);
/**
- * dma_buf_unpin - Remove lock from DMA-buf
- *
+ * dma_buf_unpin - Unpin a DMA-buf
* @attach: [in] attachment which should be unpinned
+ *
+ * This unpins a buffer pinned by dma_buf_pin() and allows the exporter to move
+ * any mapping of @attach again and inform the importer through
+ * &dma_buf_attach_ops.move_notify.
*/
void dma_buf_unpin(struct dma_buf_attachment *attach)
{
struct dma_buf *dmabuf = attach->dmabuf;
+ WARN_ON(!dma_buf_attachment_is_dynamic(attach));
+
dma_resv_assert_held(dmabuf->resv);
if (dmabuf->ops->unpin)
@@ -907,7 +962,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
}
}
- sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
+ sg_table = __map_dma_buf(attach, direction);
if (!sg_table)
sg_table = ERR_PTR(-ENOMEM);
@@ -970,7 +1025,7 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
if (dma_buf_is_dynamic(attach->dmabuf))
dma_resv_assert_held(attach->dmabuf->resv);
- attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
+ __unmap_dma_buf(attach, sg_table, direction);
if (dma_buf_is_dynamic(attach->dmabuf) &&
!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
@@ -1014,15 +1069,15 @@ EXPORT_SYMBOL_GPL(dma_buf_move_notify);
* vmalloc space might be limited and result in vmap calls failing.
*
* Interfaces::
+ *
* void \*dma_buf_vmap(struct dma_buf \*dmabuf)
* void dma_buf_vunmap(struct dma_buf \*dmabuf, void \*vaddr)
*
* The vmap call can fail if there is no vmap support in the exporter, or if
- * it runs out of vmalloc space. Fallback to kmap should be implemented. Note
- * that the dma-buf layer keeps a reference count for all vmap access and
- * calls down into the exporter's vmap function only when no vmapping exists,
- * and only unmaps it once. Protection against concurrent vmap/vunmap calls is
- * provided by taking the dma_buf->lock mutex.
+ * it runs out of vmalloc space. Note that the dma-buf layer keeps a reference
+ * count for all vmap access and calls down into the exporter's vmap function
+ * only when no vmapping exists, and only unmaps it once. Protection against
+ * concurrent vmap/vunmap calls is provided by taking the &dma_buf.lock mutex.
*
* - For full compatibility on the importer side with existing userspace
* interfaces, which might already support mmap'ing buffers. This is needed in
@@ -1074,11 +1129,12 @@ EXPORT_SYMBOL_GPL(dma_buf_move_notify);
* shootdowns would increase the complexity quite a bit.
*
* Interface::
+ *
* int dma_buf_mmap(struct dma_buf \*, struct vm_area_struct \*,
* unsigned long);
*
* If the importing subsystem simply provides a special-purpose mmap call to
- * set up a mapping in userspace, calling do_mmap with dma_buf->file will
+ * set up a mapping in userspace, calling do_mmap with &dma_buf.file will
* equally achieve that for a dma-buf object.
*/
@@ -1111,6 +1167,11 @@ static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
* dma_buf_end_cpu_access(). Only when cpu access is braketed by both calls is
* it guaranteed to be coherent with other DMA access.
*
+ * This function will also wait for any DMA transactions tracked through
+ * implicit synchronization in &dma_buf.resv. For DMA transactions with explicit
+ * synchronization this function will only ensure cache coherency, callers must
+ * ensure synchronization with such DMA transactions on their own.
+ *
* Can return negative error values, returns 0 on success.
*/
int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
@@ -1121,6 +1182,8 @@ int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
if (WARN_ON(!dmabuf))
return -EINVAL;
+ might_lock(&dmabuf->resv->lock.base);
+
if (dmabuf->ops->begin_cpu_access)
ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
@@ -1154,6 +1217,8 @@ int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
WARN_ON(!dmabuf);
+ might_lock(&dmabuf->resv->lock.base);
+
if (dmabuf->ops->end_cpu_access)
ret = dmabuf->ops->end_cpu_access(dmabuf, direction);
@@ -1212,7 +1277,10 @@ EXPORT_SYMBOL_GPL(dma_buf_mmap);
* This call may fail due to lack of virtual mapping address space.
* These calls are optional in drivers. The intended use for them
* is for mapping objects linear in kernel space for high use objects.
- * Please attempt to use kmap/kunmap before thinking about these interfaces.
+ *
+ * To ensure coherency users must call dma_buf_begin_cpu_access() and
+ * dma_buf_end_cpu_access() around any cpu access performed through this
+ * mapping.
*
* Returns 0 on success, or a negative errno code otherwise.
*/
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index 7475e09b0680..d64fc03929be 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -312,22 +312,25 @@ void __dma_fence_might_wait(void)
/**
- * dma_fence_signal_locked - signal completion of a fence
+ * dma_fence_signal_timestamp_locked - signal completion of a fence
* @fence: the fence to signal
+ * @timestamp: fence signal timestamp in kernel's CLOCK_MONOTONIC time domain
*
* Signal completion for software callbacks on a fence, this will unblock
* dma_fence_wait() calls and run all the callbacks added with
* dma_fence_add_callback(). Can be called multiple times, but since a fence
* can only go from the unsignaled to the signaled state and not back, it will
- * only be effective the first time.
+ * only be effective the first time. Set the timestamp provided as the fence
+ * signal timestamp.
*
- * Unlike dma_fence_signal(), this function must be called with &dma_fence.lock
- * held.
+ * Unlike dma_fence_signal_timestamp(), this function must be called with
+ * &dma_fence.lock held.
*
* Returns 0 on success and a negative error value when @fence has been
* signalled already.
*/
-int dma_fence_signal_locked(struct dma_fence *fence)
+int dma_fence_signal_timestamp_locked(struct dma_fence *fence,
+ ktime_t timestamp)
{
struct dma_fence_cb *cur, *tmp;
struct list_head cb_list;
@@ -341,7 +344,7 @@ int dma_fence_signal_locked(struct dma_fence *fence)
/* Stash the cb_list before replacing it with the timestamp */
list_replace(&fence->cb_list, &cb_list);
- fence->timestamp = ktime_get();
+ fence->timestamp = timestamp;
set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
trace_dma_fence_signaled(fence);
@@ -352,6 +355,59 @@ int dma_fence_signal_locked(struct dma_fence *fence)
return 0;
}
+EXPORT_SYMBOL(dma_fence_signal_timestamp_locked);
+
+/**
+ * dma_fence_signal_timestamp - signal completion of a fence
+ * @fence: the fence to signal
+ * @timestamp: fence signal timestamp in kernel's CLOCK_MONOTONIC time domain
+ *
+ * Signal completion for software callbacks on a fence, this will unblock
+ * dma_fence_wait() calls and run all the callbacks added with
+ * dma_fence_add_callback(). Can be called multiple times, but since a fence
+ * can only go from the unsignaled to the signaled state and not back, it will
+ * only be effective the first time. Set the timestamp provided as the fence
+ * signal timestamp.
+ *
+ * Returns 0 on success and a negative error value when @fence has been
+ * signalled already.
+ */
+int dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp)
+{
+ unsigned long flags;
+ int ret;
+
+ if (!fence)
+ return -EINVAL;
+
+ spin_lock_irqsave(fence->lock, flags);
+ ret = dma_fence_signal_timestamp_locked(fence, timestamp);
+ spin_unlock_irqrestore(fence->lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(dma_fence_signal_timestamp);
+
+/**
+ * dma_fence_signal_locked - signal completion of a fence
+ * @fence: the fence to signal
+ *
+ * Signal completion for software callbacks on a fence, this will unblock
+ * dma_fence_wait() calls and run all the callbacks added with
+ * dma_fence_add_callback(). Can be called multiple times, but since a fence
+ * can only go from the unsignaled to the signaled state and not back, it will
+ * only be effective the first time.
+ *
+ * Unlike dma_fence_signal(), this function must be called with &dma_fence.lock
+ * held.
+ *
+ * Returns 0 on success and a negative error value when @fence has been
+ * signalled already.
+ */
+int dma_fence_signal_locked(struct dma_fence *fence)
+{
+ return dma_fence_signal_timestamp_locked(fence, ktime_get());
+}
EXPORT_SYMBOL(dma_fence_signal_locked);
/**
@@ -379,7 +435,7 @@ int dma_fence_signal(struct dma_fence *fence)
tmp = dma_fence_begin_signalling();
spin_lock_irqsave(fence->lock, flags);
- ret = dma_fence_signal_locked(fence);
+ ret = dma_fence_signal_timestamp_locked(fence, ktime_get());
spin_unlock_irqrestore(fence->lock, flags);
dma_fence_end_signalling(tmp);
diff --git a/drivers/dma-buf/dma-heap.c b/drivers/dma-buf/dma-heap.c
index afd22c9dbdcf..6b5db954569f 100644
--- a/drivers/dma-buf/dma-heap.c
+++ b/drivers/dma-buf/dma-heap.c
@@ -52,6 +52,9 @@ static int dma_heap_buffer_alloc(struct dma_heap *heap, size_t len,
unsigned int fd_flags,
unsigned int heap_flags)
{
+ struct dma_buf *dmabuf;
+ int fd;
+
/*
* Allocations from all heaps have to begin
* and end on page boundaries.
@@ -60,7 +63,16 @@ static int dma_heap_buffer_alloc(struct dma_heap *heap, size_t len,
if (!len)
return -EINVAL;
- return heap->ops->allocate(heap, len, fd_flags, heap_flags);
+ dmabuf = heap->ops->allocate(heap, len, fd_flags, heap_flags);
+ if (IS_ERR(dmabuf))
+ return PTR_ERR(dmabuf);
+
+ fd = dma_buf_fd(dmabuf, fd_flags);
+ if (fd < 0) {
+ dma_buf_put(dmabuf);
+ /* just return, as put will call release and that will free */
+ }
+ return fd;
}
static int dma_heap_open(struct inode *inode, struct file *file)
diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c
index 364fc2f3e499..5d64eccd21d6 100644
--- a/drivers/dma-buf/heaps/cma_heap.c
+++ b/drivers/dma-buf/heaps/cma_heap.c
@@ -271,10 +271,10 @@ static const struct dma_buf_ops cma_heap_buf_ops = {
.release = cma_heap_dma_buf_release,
};
-static int cma_heap_allocate(struct dma_heap *heap,
- unsigned long len,
- unsigned long fd_flags,
- unsigned long heap_flags)
+static struct dma_buf *cma_heap_allocate(struct dma_heap *heap,
+ unsigned long len,
+ unsigned long fd_flags,
+ unsigned long heap_flags)
{
struct cma_heap *cma_heap = dma_heap_get_drvdata(heap);
struct cma_heap_buffer *buffer;
@@ -289,7 +289,7 @@ static int cma_heap_allocate(struct dma_heap *heap,
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
if (!buffer)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&buffer->attachments);
mutex_init(&buffer->lock);
@@ -348,15 +348,7 @@ static int cma_heap_allocate(struct dma_heap *heap,
ret = PTR_ERR(dmabuf);
goto free_pages;
}
-
- ret = dma_buf_fd(dmabuf, fd_flags);
- if (ret < 0) {
- dma_buf_put(dmabuf);
- /* just return, as put will call release and that will free */
- return ret;
- }
-
- return ret;
+ return dmabuf;
free_pages:
kfree(buffer->pages);
@@ -365,7 +357,7 @@ free_cma:
free_buffer:
kfree(buffer);
- return ret;
+ return ERR_PTR(ret);
}
static const struct dma_heap_ops cma_heap_ops = {
diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c
index 17e0e9a68baf..29e49ac17251 100644
--- a/drivers/dma-buf/heaps/system_heap.c
+++ b/drivers/dma-buf/heaps/system_heap.c
@@ -331,10 +331,10 @@ static struct page *alloc_largest_available(unsigned long size,
return NULL;
}
-static int system_heap_allocate(struct dma_heap *heap,
- unsigned long len,
- unsigned long fd_flags,
- unsigned long heap_flags)
+static struct dma_buf *system_heap_allocate(struct dma_heap *heap,
+ unsigned long len,
+ unsigned long fd_flags,
+ unsigned long heap_flags)
{
struct system_heap_buffer *buffer;
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
@@ -349,7 +349,7 @@ static int system_heap_allocate(struct dma_heap *heap,
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
if (!buffer)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&buffer->attachments);
mutex_init(&buffer->lock);
@@ -363,8 +363,10 @@ static int system_heap_allocate(struct dma_heap *heap,
* Avoid trying to allocate memory if the process
* has been killed by SIGKILL
*/
- if (fatal_signal_pending(current))
+ if (fatal_signal_pending(current)) {
+ ret = -EINTR;
goto free_buffer;
+ }
page = alloc_largest_available(size_remaining, max_order);
if (!page)
@@ -397,14 +399,7 @@ static int system_heap_allocate(struct dma_heap *heap,
ret = PTR_ERR(dmabuf);
goto free_pages;
}
-
- ret = dma_buf_fd(dmabuf, fd_flags);
- if (ret < 0) {
- dma_buf_put(dmabuf);
- /* just return, as put will call release and that will free */
- return ret;
- }
- return ret;
+ return dmabuf;
free_pages:
for_each_sgtable_sg(table, sg, i) {
@@ -418,7 +413,7 @@ free_buffer:
__free_pages(page, compound_order(page));
kfree(buffer);
- return ret;
+ return ERR_PTR(ret);
}
static const struct dma_heap_ops system_heap_ops = {
diff --git a/drivers/dma-buf/st-dma-fence.c b/drivers/dma-buf/st-dma-fence.c
index e593064341c8..c8a12d7ad71a 100644
--- a/drivers/dma-buf/st-dma-fence.c
+++ b/drivers/dma-buf/st-dma-fence.c
@@ -471,8 +471,11 @@ static int thread_signal_callback(void *arg)
dma_fence_signal(f1);
smp_store_mb(cb.seen, false);
- if (!f2 || dma_fence_add_callback(f2, &cb.cb, simple_callback))
- miss++, cb.seen = true;
+ if (!f2 ||
+ dma_fence_add_callback(f2, &cb.cb, simple_callback)) {
+ miss++;
+ cb.seen = true;
+ }
if (!t->before)
dma_fence_signal(f1);
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index d242c7632621..0c2827fd8c19 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -124,13 +124,6 @@ config BCM_SBA_RAID
has the capability to offload memcpy, xor and pq computation
for raid5/6.
-config COH901318
- bool "ST-Ericsson COH901318 DMA support"
- select DMA_ENGINE
- depends on ARCH_U300 || COMPILE_TEST
- help
- Enable support for ST-Ericsson COH 901 318 DMA.
-
config DMA_BCM2835
tristate "BCM2835 DMA engine support"
depends on ARCH_BCM2835
@@ -179,6 +172,7 @@ config DMA_SUN6I
config DW_AXI_DMAC
tristate "Synopsys DesignWare AXI DMA support"
depends on OF || COMPILE_TEST
+ depends on HAS_IOMEM
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
@@ -378,14 +372,14 @@ config MILBEAUT_XDMAC
XDMAC device.
config MMP_PDMA
- bool "MMP PDMA support"
+ tristate "MMP PDMA support"
depends on ARCH_MMP || ARCH_PXA || COMPILE_TEST
select DMA_ENGINE
help
Support the MMP PDMA engine for PXA and MMP platform.
config MMP_TDMA
- bool "MMP Two-Channel DMA support"
+ tristate "MMP Two-Channel DMA support"
depends on ARCH_MMP || COMPILE_TEST
select DMA_ENGINE
select GENERIC_ALLOCATOR
@@ -519,13 +513,6 @@ config PLX_DMA
These are exposed via extra functions on the switch's
upstream port. Each function exposes one DMA channel.
-config SIRF_DMA
- tristate "CSR SiRFprimaII/SiRFmarco DMA support"
- depends on ARCH_SIRF
- select DMA_ENGINE
- help
- Enable support for the CSR SiRFprimaII DMA engine.
-
config STE_DMA40
bool "ST-Ericsson DMA40 support"
depends on ARCH_U8500
@@ -710,15 +697,6 @@ config XILINX_ZYNQMP_DPDMA
driver provides the dmaengine required by the DisplayPort subsystem
display driver.
-config ZX_DMA
- tristate "ZTE ZX DMA support"
- depends on ARCH_ZX || COMPILE_TEST
- select DMA_ENGINE
- select DMA_VIRTUAL_CHANNELS
- help
- Support the DMA engine for ZTE ZX family platform devices.
-
-
# driver files
source "drivers/dma/bestcomm/Kconfig"
@@ -740,6 +718,8 @@ source "drivers/dma/ti/Kconfig"
source "drivers/dma/fsl-dpaa2-qdma/Kconfig"
+source "drivers/dma/lgm/Kconfig"
+
# clients
comment "DMA Clients"
depends on DMA_ENGINE
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 948a8da05f8b..aa69094e3547 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -20,7 +20,6 @@ obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
obj-$(CONFIG_AT_XDMAC) += at_xdmac.o
obj-$(CONFIG_AXI_DMAC) += dma-axi-dmac.o
obj-$(CONFIG_BCM_SBA_RAID) += bcm-sba-raid.o
-obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o
obj-$(CONFIG_DMA_BCM2835) += bcm2835-dma.o
obj-$(CONFIG_DMA_JZ4780) += dma-jz4780.o
obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o
@@ -65,7 +64,6 @@ obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/
obj-$(CONFIG_PXA_DMA) += pxa_dma.o
obj-$(CONFIG_RENESAS_DMA) += sh/
obj-$(CONFIG_SF_PDMA) += sf-pdma/
-obj-$(CONFIG_SIRF_DMA) += sirf-dma.o
obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
obj-$(CONFIG_STM32_DMA) += stm32-dma.o
obj-$(CONFIG_STM32_DMAMUX) += stm32-dmamux.o
@@ -79,9 +77,9 @@ obj-$(CONFIG_TIMB_DMA) += timb_dma.o
obj-$(CONFIG_UNIPHIER_MDMAC) += uniphier-mdmac.o
obj-$(CONFIG_UNIPHIER_XDMAC) += uniphier-xdmac.o
obj-$(CONFIG_XGENE_DMA) += xgene-dma.o
-obj-$(CONFIG_ZX_DMA) += zx_dma.o
obj-$(CONFIG_ST_FDMA) += st_fdma.o
obj-$(CONFIG_FSL_DPAA2_QDMA) += fsl-dpaa2-qdma/
+obj-$(CONFIG_INTEL_LDMA) += lgm/
obj-y += mediatek/
obj-y += qcom/
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 7eaee5b705b1..30ae36124b1d 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -54,6 +54,25 @@ module_param(init_nr_desc_per_channel, uint, 0644);
MODULE_PARM_DESC(init_nr_desc_per_channel,
"initial descriptors per channel (default: 64)");
+/**
+ * struct at_dma_platform_data - Controller configuration parameters
+ * @nr_channels: Number of channels supported by hardware (max 8)
+ * @cap_mask: dma_capability flags supported by the platform
+ */
+struct at_dma_platform_data {
+ unsigned int nr_channels;
+ dma_cap_mask_t cap_mask;
+};
+
+/**
+ * struct at_dma_slave - Controller-specific information about a slave
+ * @dma_dev: required DMA master device
+ * @cfg: Platform-specific initializer for the CFG register
+ */
+struct at_dma_slave {
+ struct device *dma_dev;
+ u32 cfg;
+};
/* prototypes */
static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx);
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index 80fc2fe8c77e..4d1ebc040031 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -7,8 +7,6 @@
#ifndef AT_HDMAC_REGS_H
#define AT_HDMAC_REGS_H
-#include <linux/platform_data/dma-atmel.h>
-
#define AT_DMA_MAX_NR_CHANNELS 8
@@ -148,7 +146,31 @@
#define ATC_AUTO (0x1 << 31) /* Auto multiple buffer tx enable */
/* Bitfields in CFG */
-/* are in at_hdmac.h */
+#define ATC_PER_MSB(h) ((0x30U & (h)) >> 4) /* Extract most significant bits of a handshaking identifier */
+
+#define ATC_SRC_PER(h) (0xFU & (h)) /* Channel src rq associated with periph handshaking ifc h */
+#define ATC_DST_PER(h) ((0xFU & (h)) << 4) /* Channel dst rq associated with periph handshaking ifc h */
+#define ATC_SRC_REP (0x1 << 8) /* Source Replay Mod */
+#define ATC_SRC_H2SEL (0x1 << 9) /* Source Handshaking Mod */
+#define ATC_SRC_H2SEL_SW (0x0 << 9)
+#define ATC_SRC_H2SEL_HW (0x1 << 9)
+#define ATC_SRC_PER_MSB(h) (ATC_PER_MSB(h) << 10) /* Channel src rq (most significant bits) */
+#define ATC_DST_REP (0x1 << 12) /* Destination Replay Mod */
+#define ATC_DST_H2SEL (0x1 << 13) /* Destination Handshaking Mod */
+#define ATC_DST_H2SEL_SW (0x0 << 13)
+#define ATC_DST_H2SEL_HW (0x1 << 13)
+#define ATC_DST_PER_MSB(h) (ATC_PER_MSB(h) << 14) /* Channel dst rq (most significant bits) */
+#define ATC_SOD (0x1 << 16) /* Stop On Done */
+#define ATC_LOCK_IF (0x1 << 20) /* Interface Lock */
+#define ATC_LOCK_B (0x1 << 21) /* AHB Bus Lock */
+#define ATC_LOCK_IF_L (0x1 << 22) /* Master Interface Arbiter Lock */
+#define ATC_LOCK_IF_L_CHUNK (0x0 << 22)
+#define ATC_LOCK_IF_L_BUFFER (0x1 << 22)
+#define ATC_AHB_PROT_MASK (0x7 << 24) /* AHB Protection */
+#define ATC_FIFOCFG_MASK (0x3 << 28) /* FIFO Request Configuration */
+#define ATC_FIFOCFG_LARGESTBURST (0x0 << 28)
+#define ATC_FIFOCFG_HALFFIFO (0x1 << 28)
+#define ATC_FIFOCFG_ENOUGHSPACE (0x2 << 28)
/* Bitfields in SPIP */
#define ATC_SPIP_HOLE(x) (0xFFFFU & (x))
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
deleted file mode 100644
index 95b9b2f5358e..000000000000
--- a/drivers/dma/coh901318.c
+++ /dev/null
@@ -1,2808 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * driver/dma/coh901318.c
- *
- * Copyright (C) 2007-2009 ST-Ericsson
- * DMA driver for COH 901 318
- * Author: Per Friden <per.friden@stericsson.com>
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/kernel.h> /* printk() */
-#include <linux/fs.h> /* everything... */
-#include <linux/scatterlist.h>
-#include <linux/slab.h> /* kmalloc() */
-#include <linux/dmaengine.h>
-#include <linux/platform_device.h>
-#include <linux/device.h>
-#include <linux/irqreturn.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/uaccess.h>
-#include <linux/debugfs.h>
-#include <linux/platform_data/dma-coh901318.h>
-#include <linux/of_dma.h>
-
-#include "coh901318.h"
-#include "dmaengine.h"
-
-#define COH901318_MOD32_MASK (0x1F)
-#define COH901318_WORD_MASK (0xFFFFFFFF)
-/* INT_STATUS - Interrupt Status Registers 32bit (R/-) */
-#define COH901318_INT_STATUS1 (0x0000)
-#define COH901318_INT_STATUS2 (0x0004)
-/* TC_INT_STATUS - Terminal Count Interrupt Status Registers 32bit (R/-) */
-#define COH901318_TC_INT_STATUS1 (0x0008)
-#define COH901318_TC_INT_STATUS2 (0x000C)
-/* TC_INT_CLEAR - Terminal Count Interrupt Clear Registers 32bit (-/W) */
-#define COH901318_TC_INT_CLEAR1 (0x0010)
-#define COH901318_TC_INT_CLEAR2 (0x0014)
-/* RAW_TC_INT_STATUS - Raw Term Count Interrupt Status Registers 32bit (R/-) */
-#define COH901318_RAW_TC_INT_STATUS1 (0x0018)
-#define COH901318_RAW_TC_INT_STATUS2 (0x001C)
-/* BE_INT_STATUS - Bus Error Interrupt Status Registers 32bit (R/-) */
-#define COH901318_BE_INT_STATUS1 (0x0020)
-#define COH901318_BE_INT_STATUS2 (0x0024)
-/* BE_INT_CLEAR - Bus Error Interrupt Clear Registers 32bit (-/W) */
-#define COH901318_BE_INT_CLEAR1 (0x0028)
-#define COH901318_BE_INT_CLEAR2 (0x002C)
-/* RAW_BE_INT_STATUS - Raw Term Count Interrupt Status Registers 32bit (R/-) */
-#define COH901318_RAW_BE_INT_STATUS1 (0x0030)
-#define COH901318_RAW_BE_INT_STATUS2 (0x0034)
-
-/*
- * CX_CFG - Channel Configuration Registers 32bit (R/W)
- */
-#define COH901318_CX_CFG (0x0100)
-#define COH901318_CX_CFG_SPACING (0x04)
-/* Channel enable activates tha dma job */
-#define COH901318_CX_CFG_CH_ENABLE (0x00000001)
-#define COH901318_CX_CFG_CH_DISABLE (0x00000000)
-/* Request Mode */
-#define COH901318_CX_CFG_RM_MASK (0x00000006)
-#define COH901318_CX_CFG_RM_MEMORY_TO_MEMORY (0x0 << 1)
-#define COH901318_CX_CFG_RM_PRIMARY_TO_MEMORY (0x1 << 1)
-#define COH901318_CX_CFG_RM_MEMORY_TO_PRIMARY (0x1 << 1)
-#define COH901318_CX_CFG_RM_PRIMARY_TO_SECONDARY (0x3 << 1)
-#define COH901318_CX_CFG_RM_SECONDARY_TO_PRIMARY (0x3 << 1)
-/* Linked channel request field. RM must == 11 */
-#define COH901318_CX_CFG_LCRF_SHIFT 3
-#define COH901318_CX_CFG_LCRF_MASK (0x000001F8)
-#define COH901318_CX_CFG_LCR_DISABLE (0x00000000)
-/* Terminal Counter Interrupt Request Mask */
-#define COH901318_CX_CFG_TC_IRQ_ENABLE (0x00000200)
-#define COH901318_CX_CFG_TC_IRQ_DISABLE (0x00000000)
-/* Bus Error interrupt Mask */
-#define COH901318_CX_CFG_BE_IRQ_ENABLE (0x00000400)
-#define COH901318_CX_CFG_BE_IRQ_DISABLE (0x00000000)
-
-/*
- * CX_STAT - Channel Status Registers 32bit (R/-)
- */
-#define COH901318_CX_STAT (0x0200)
-#define COH901318_CX_STAT_SPACING (0x04)
-#define COH901318_CX_STAT_RBE_IRQ_IND (0x00000008)
-#define COH901318_CX_STAT_RTC_IRQ_IND (0x00000004)
-#define COH901318_CX_STAT_ACTIVE (0x00000002)
-#define COH901318_CX_STAT_ENABLED (0x00000001)
-
-/*
- * CX_CTRL - Channel Control Registers 32bit (R/W)
- */
-#define COH901318_CX_CTRL (0x0400)
-#define COH901318_CX_CTRL_SPACING (0x10)
-/* Transfer Count Enable */
-#define COH901318_CX_CTRL_TC_ENABLE (0x00001000)
-#define COH901318_CX_CTRL_TC_DISABLE (0x00000000)
-/* Transfer Count Value 0 - 4095 */
-#define COH901318_CX_CTRL_TC_VALUE_MASK (0x00000FFF)
-/* Burst count */
-#define COH901318_CX_CTRL_BURST_COUNT_MASK (0x0000E000)
-#define COH901318_CX_CTRL_BURST_COUNT_64_BYTES (0x7 << 13)
-#define COH901318_CX_CTRL_BURST_COUNT_48_BYTES (0x6 << 13)
-#define COH901318_CX_CTRL_BURST_COUNT_32_BYTES (0x5 << 13)
-#define COH901318_CX_CTRL_BURST_COUNT_16_BYTES (0x4 << 13)
-#define COH901318_CX_CTRL_BURST_COUNT_8_BYTES (0x3 << 13)
-#define COH901318_CX_CTRL_BURST_COUNT_4_BYTES (0x2 << 13)
-#define COH901318_CX_CTRL_BURST_COUNT_2_BYTES (0x1 << 13)
-#define COH901318_CX_CTRL_BURST_COUNT_1_BYTE (0x0 << 13)
-/* Source bus size */
-#define COH901318_CX_CTRL_SRC_BUS_SIZE_MASK (0x00030000)
-#define COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS (0x2 << 16)
-#define COH901318_CX_CTRL_SRC_BUS_SIZE_16_BITS (0x1 << 16)
-#define COH901318_CX_CTRL_SRC_BUS_SIZE_8_BITS (0x0 << 16)
-/* Source address increment */
-#define COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE (0x00040000)
-#define COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE (0x00000000)
-/* Destination Bus Size */
-#define COH901318_CX_CTRL_DST_BUS_SIZE_MASK (0x00180000)
-#define COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS (0x2 << 19)
-#define COH901318_CX_CTRL_DST_BUS_SIZE_16_BITS (0x1 << 19)
-#define COH901318_CX_CTRL_DST_BUS_SIZE_8_BITS (0x0 << 19)
-/* Destination address increment */
-#define COH901318_CX_CTRL_DST_ADDR_INC_ENABLE (0x00200000)
-#define COH901318_CX_CTRL_DST_ADDR_INC_DISABLE (0x00000000)
-/* Master Mode (Master2 is only connected to MSL) */
-#define COH901318_CX_CTRL_MASTER_MODE_MASK (0x00C00000)
-#define COH901318_CX_CTRL_MASTER_MODE_M2R_M1W (0x3 << 22)
-#define COH901318_CX_CTRL_MASTER_MODE_M1R_M2W (0x2 << 22)
-#define COH901318_CX_CTRL_MASTER_MODE_M2RW (0x1 << 22)
-#define COH901318_CX_CTRL_MASTER_MODE_M1RW (0x0 << 22)
-/* Terminal Count flag to PER enable */
-#define COH901318_CX_CTRL_TCP_ENABLE (0x01000000)
-#define COH901318_CX_CTRL_TCP_DISABLE (0x00000000)
-/* Terminal Count flags to CPU enable */
-#define COH901318_CX_CTRL_TC_IRQ_ENABLE (0x02000000)
-#define COH901318_CX_CTRL_TC_IRQ_DISABLE (0x00000000)
-/* Hand shake to peripheral */
-#define COH901318_CX_CTRL_HSP_ENABLE (0x04000000)
-#define COH901318_CX_CTRL_HSP_DISABLE (0x00000000)
-#define COH901318_CX_CTRL_HSS_ENABLE (0x08000000)
-#define COH901318_CX_CTRL_HSS_DISABLE (0x00000000)
-/* DMA mode */
-#define COH901318_CX_CTRL_DDMA_MASK (0x30000000)
-#define COH901318_CX_CTRL_DDMA_LEGACY (0x0 << 28)
-#define COH901318_CX_CTRL_DDMA_DEMAND_DMA1 (0x1 << 28)
-#define COH901318_CX_CTRL_DDMA_DEMAND_DMA2 (0x2 << 28)
-/* Primary Request Data Destination */
-#define COH901318_CX_CTRL_PRDD_MASK (0x40000000)
-#define COH901318_CX_CTRL_PRDD_DEST (0x1 << 30)
-#define COH901318_CX_CTRL_PRDD_SOURCE (0x0 << 30)
-
-/*
- * CX_SRC_ADDR - Channel Source Address Registers 32bit (R/W)
- */
-#define COH901318_CX_SRC_ADDR (0x0404)
-#define COH901318_CX_SRC_ADDR_SPACING (0x10)
-
-/*
- * CX_DST_ADDR - Channel Destination Address Registers 32bit R/W
- */
-#define COH901318_CX_DST_ADDR (0x0408)
-#define COH901318_CX_DST_ADDR_SPACING (0x10)
-
-/*
- * CX_LNK_ADDR - Channel Link Address Registers 32bit (R/W)
- */
-#define COH901318_CX_LNK_ADDR (0x040C)
-#define COH901318_CX_LNK_ADDR_SPACING (0x10)
-#define COH901318_CX_LNK_LINK_IMMEDIATE (0x00000001)
-
-/**
- * struct coh901318_params - parameters for DMAC configuration
- * @config: DMA config register
- * @ctrl_lli_last: DMA control register for the last lli in the list
- * @ctrl_lli: DMA control register for an lli
- * @ctrl_lli_chained: DMA control register for a chained lli
- */
-struct coh901318_params {
- u32 config;
- u32 ctrl_lli_last;
- u32 ctrl_lli;
- u32 ctrl_lli_chained;
-};
-
-/**
- * struct coh_dma_channel - dma channel base
- * @name: ascii name of dma channel
- * @number: channel id number
- * @desc_nbr_max: number of preallocated descriptors
- * @priority_high: prio of channel, 0 low otherwise high.
- * @param: configuration parameters
- */
-struct coh_dma_channel {
- const char name[32];
- const int number;
- const int desc_nbr_max;
- const int priority_high;
- const struct coh901318_params param;
-};
-
-/**
- * struct powersave - DMA power save structure
- * @lock: lock protecting data in this struct
- * @started_channels: bit mask indicating active dma channels
- */
-struct powersave {
- spinlock_t lock;
- u64 started_channels;
-};
-
-/* points out all dma slave channels.
- * Syntax is [A1, B1, A2, B2, .... ,-1,-1]
- * Select all channels from A to B, end of list is marked with -1,-1
- */
-static int dma_slave_channels[] = {
- U300_DMA_MSL_TX_0, U300_DMA_SPI_RX,
- U300_DMA_UART1_TX, U300_DMA_UART1_RX, -1, -1};
-
-/* points out all dma memcpy channels. */
-static int dma_memcpy_channels[] = {
- U300_DMA_GENERAL_PURPOSE_0, U300_DMA_GENERAL_PURPOSE_8, -1, -1};
-
-#define flags_memcpy_config (COH901318_CX_CFG_CH_DISABLE | \
- COH901318_CX_CFG_RM_MEMORY_TO_MEMORY | \
- COH901318_CX_CFG_LCR_DISABLE | \
- COH901318_CX_CFG_TC_IRQ_ENABLE | \
- COH901318_CX_CFG_BE_IRQ_ENABLE)
-#define flags_memcpy_lli_chained (COH901318_CX_CTRL_TC_ENABLE | \
- COH901318_CX_CTRL_BURST_COUNT_32_BYTES | \
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | \
- COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | \
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | \
- COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | \
- COH901318_CX_CTRL_MASTER_MODE_M1RW | \
- COH901318_CX_CTRL_TCP_DISABLE | \
- COH901318_CX_CTRL_TC_IRQ_DISABLE | \
- COH901318_CX_CTRL_HSP_DISABLE | \
- COH901318_CX_CTRL_HSS_DISABLE | \
- COH901318_CX_CTRL_DDMA_LEGACY | \
- COH901318_CX_CTRL_PRDD_SOURCE)
-#define flags_memcpy_lli (COH901318_CX_CTRL_TC_ENABLE | \
- COH901318_CX_CTRL_BURST_COUNT_32_BYTES | \
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | \
- COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | \
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | \
- COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | \
- COH901318_CX_CTRL_MASTER_MODE_M1RW | \
- COH901318_CX_CTRL_TCP_DISABLE | \
- COH901318_CX_CTRL_TC_IRQ_DISABLE | \
- COH901318_CX_CTRL_HSP_DISABLE | \
- COH901318_CX_CTRL_HSS_DISABLE | \
- COH901318_CX_CTRL_DDMA_LEGACY | \
- COH901318_CX_CTRL_PRDD_SOURCE)
-#define flags_memcpy_lli_last (COH901318_CX_CTRL_TC_ENABLE | \
- COH901318_CX_CTRL_BURST_COUNT_32_BYTES | \
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | \
- COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | \
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | \
- COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | \
- COH901318_CX_CTRL_MASTER_MODE_M1RW | \
- COH901318_CX_CTRL_TCP_DISABLE | \
- COH901318_CX_CTRL_TC_IRQ_ENABLE | \
- COH901318_CX_CTRL_HSP_DISABLE | \
- COH901318_CX_CTRL_HSS_DISABLE | \
- COH901318_CX_CTRL_DDMA_LEGACY | \
- COH901318_CX_CTRL_PRDD_SOURCE)
-
-static const struct coh_dma_channel chan_config[U300_DMA_CHANNELS] = {
- {
- .number = U300_DMA_MSL_TX_0,
- .name = "MSL TX 0",
- .priority_high = 0,
- },
- {
- .number = U300_DMA_MSL_TX_1,
- .name = "MSL TX 1",
- .priority_high = 0,
- .param.config = COH901318_CX_CFG_CH_DISABLE |
- COH901318_CX_CFG_LCR_DISABLE |
- COH901318_CX_CFG_TC_IRQ_ENABLE |
- COH901318_CX_CFG_BE_IRQ_ENABLE,
- .param.ctrl_lli_chained = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
- COH901318_CX_CTRL_TCP_DISABLE |
- COH901318_CX_CTRL_TC_IRQ_DISABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY |
- COH901318_CX_CTRL_PRDD_SOURCE,
- .param.ctrl_lli = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
- COH901318_CX_CTRL_TCP_ENABLE |
- COH901318_CX_CTRL_TC_IRQ_DISABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY |
- COH901318_CX_CTRL_PRDD_SOURCE,
- .param.ctrl_lli_last = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
- COH901318_CX_CTRL_TCP_ENABLE |
- COH901318_CX_CTRL_TC_IRQ_ENABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY |
- COH901318_CX_CTRL_PRDD_SOURCE,
- },
- {
- .number = U300_DMA_MSL_TX_2,
- .name = "MSL TX 2",
- .priority_high = 0,
- .param.config = COH901318_CX_CFG_CH_DISABLE |
- COH901318_CX_CFG_LCR_DISABLE |
- COH901318_CX_CFG_TC_IRQ_ENABLE |
- COH901318_CX_CFG_BE_IRQ_ENABLE,
- .param.ctrl_lli_chained = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
- COH901318_CX_CTRL_TCP_DISABLE |
- COH901318_CX_CTRL_TC_IRQ_DISABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY |
- COH901318_CX_CTRL_PRDD_SOURCE,
- .param.ctrl_lli = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
- COH901318_CX_CTRL_TCP_ENABLE |
- COH901318_CX_CTRL_TC_IRQ_DISABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY |
- COH901318_CX_CTRL_PRDD_SOURCE,
- .param.ctrl_lli_last = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
- COH901318_CX_CTRL_TCP_ENABLE |
- COH901318_CX_CTRL_TC_IRQ_ENABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY |
- COH901318_CX_CTRL_PRDD_SOURCE,
- .desc_nbr_max = 10,
- },
- {
- .number = U300_DMA_MSL_TX_3,
- .name = "MSL TX 3",
- .priority_high = 0,
- .param.config = COH901318_CX_CFG_CH_DISABLE |
- COH901318_CX_CFG_LCR_DISABLE |
- COH901318_CX_CFG_TC_IRQ_ENABLE |
- COH901318_CX_CFG_BE_IRQ_ENABLE,
- .param.ctrl_lli_chained = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
- COH901318_CX_CTRL_TCP_DISABLE |
- COH901318_CX_CTRL_TC_IRQ_DISABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY |
- COH901318_CX_CTRL_PRDD_SOURCE,
- .param.ctrl_lli = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
- COH901318_CX_CTRL_TCP_ENABLE |
- COH901318_CX_CTRL_TC_IRQ_DISABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY |
- COH901318_CX_CTRL_PRDD_SOURCE,
- .param.ctrl_lli_last = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
- COH901318_CX_CTRL_TCP_ENABLE |
- COH901318_CX_CTRL_TC_IRQ_ENABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY |
- COH901318_CX_CTRL_PRDD_SOURCE,
- },
- {
- .number = U300_DMA_MSL_TX_4,
- .name = "MSL TX 4",
- .priority_high = 0,
- .param.config = COH901318_CX_CFG_CH_DISABLE |
- COH901318_CX_CFG_LCR_DISABLE |
- COH901318_CX_CFG_TC_IRQ_ENABLE |
- COH901318_CX_CFG_BE_IRQ_ENABLE,
- .param.ctrl_lli_chained = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
- COH901318_CX_CTRL_TCP_DISABLE |
- COH901318_CX_CTRL_TC_IRQ_DISABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY |
- COH901318_CX_CTRL_PRDD_SOURCE,
- .param.ctrl_lli = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
- COH901318_CX_CTRL_TCP_ENABLE |
- COH901318_CX_CTRL_TC_IRQ_DISABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY |
- COH901318_CX_CTRL_PRDD_SOURCE,
- .param.ctrl_lli_last = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
- COH901318_CX_CTRL_TCP_ENABLE |
- COH901318_CX_CTRL_TC_IRQ_ENABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY |
- COH901318_CX_CTRL_PRDD_SOURCE,
- },
- {
- .number = U300_DMA_MSL_TX_5,
- .name = "MSL TX 5",
- .priority_high = 0,
- },
- {
- .number = U300_DMA_MSL_TX_6,
- .name = "MSL TX 6",
- .priority_high = 0,
- },
- {
- .number = U300_DMA_MSL_RX_0,
- .name = "MSL RX 0",
- .priority_high = 0,
- },
- {
- .number = U300_DMA_MSL_RX_1,
- .name = "MSL RX 1",
- .priority_high = 0,
- .param.config = COH901318_CX_CFG_CH_DISABLE |
- COH901318_CX_CFG_LCR_DISABLE |
- COH901318_CX_CFG_TC_IRQ_ENABLE |
- COH901318_CX_CFG_BE_IRQ_ENABLE,
- .param.ctrl_lli_chained = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
- COH901318_CX_CTRL_TCP_DISABLE |
- COH901318_CX_CTRL_TC_IRQ_DISABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
- COH901318_CX_CTRL_PRDD_DEST,
- .param.ctrl_lli = 0,
- .param.ctrl_lli_last = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
- COH901318_CX_CTRL_TCP_DISABLE |
- COH901318_CX_CTRL_TC_IRQ_ENABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
- COH901318_CX_CTRL_PRDD_DEST,
- },
- {
- .number = U300_DMA_MSL_RX_2,
- .name = "MSL RX 2",
- .priority_high = 0,
- .param.config = COH901318_CX_CFG_CH_DISABLE |
- COH901318_CX_CFG_LCR_DISABLE |
- COH901318_CX_CFG_TC_IRQ_ENABLE |
- COH901318_CX_CFG_BE_IRQ_ENABLE,
- .param.ctrl_lli_chained = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
- COH901318_CX_CTRL_TCP_DISABLE |
- COH901318_CX_CTRL_TC_IRQ_DISABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
- COH901318_CX_CTRL_PRDD_DEST,
- .param.ctrl_lli = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
- COH901318_CX_CTRL_TCP_DISABLE |
- COH901318_CX_CTRL_TC_IRQ_ENABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
- COH901318_CX_CTRL_PRDD_DEST,
- .param.ctrl_lli_last = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
- COH901318_CX_CTRL_TCP_DISABLE |
- COH901318_CX_CTRL_TC_IRQ_ENABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
- COH901318_CX_CTRL_PRDD_DEST,
- },
- {
- .number = U300_DMA_MSL_RX_3,
- .name = "MSL RX 3",
- .priority_high = 0,
- .param.config = COH901318_CX_CFG_CH_DISABLE |
- COH901318_CX_CFG_LCR_DISABLE |
- COH901318_CX_CFG_TC_IRQ_ENABLE |
- COH901318_CX_CFG_BE_IRQ_ENABLE,
- .param.ctrl_lli_chained = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
- COH901318_CX_CTRL_TCP_DISABLE |
- COH901318_CX_CTRL_TC_IRQ_DISABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
- COH901318_CX_CTRL_PRDD_DEST,
- .param.ctrl_lli = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
- COH901318_CX_CTRL_TCP_DISABLE |
- COH901318_CX_CTRL_TC_IRQ_ENABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
- COH901318_CX_CTRL_PRDD_DEST,
- .param.ctrl_lli_last = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
- COH901318_CX_CTRL_TCP_DISABLE |
- COH901318_CX_CTRL_TC_IRQ_ENABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
- COH901318_CX_CTRL_PRDD_DEST,
- },
- {
- .number = U300_DMA_MSL_RX_4,
- .name = "MSL RX 4",
- .priority_high = 0,
- .param.config = COH901318_CX_CFG_CH_DISABLE |
- COH901318_CX_CFG_LCR_DISABLE |
- COH901318_CX_CFG_TC_IRQ_ENABLE |
- COH901318_CX_CFG_BE_IRQ_ENABLE,
- .param.ctrl_lli_chained = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
- COH901318_CX_CTRL_TCP_DISABLE |
- COH901318_CX_CTRL_TC_IRQ_DISABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
- COH901318_CX_CTRL_PRDD_DEST,
- .param.ctrl_lli = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
- COH901318_CX_CTRL_TCP_DISABLE |
- COH901318_CX_CTRL_TC_IRQ_ENABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
- COH901318_CX_CTRL_PRDD_DEST,
- .param.ctrl_lli_last = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
- COH901318_CX_CTRL_TCP_DISABLE |
- COH901318_CX_CTRL_TC_IRQ_ENABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
- COH901318_CX_CTRL_PRDD_DEST,
- },
- {
- .number = U300_DMA_MSL_RX_5,
- .name = "MSL RX 5",
- .priority_high = 0,
- .param.config = COH901318_CX_CFG_CH_DISABLE |
- COH901318_CX_CFG_LCR_DISABLE |
- COH901318_CX_CFG_TC_IRQ_ENABLE |
- COH901318_CX_CFG_BE_IRQ_ENABLE,
- .param.ctrl_lli_chained = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
- COH901318_CX_CTRL_TCP_DISABLE |
- COH901318_CX_CTRL_TC_IRQ_DISABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
- COH901318_CX_CTRL_PRDD_DEST,
- .param.ctrl_lli = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
- COH901318_CX_CTRL_TCP_DISABLE |
- COH901318_CX_CTRL_TC_IRQ_ENABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
- COH901318_CX_CTRL_PRDD_DEST,
- .param.ctrl_lli_last = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
- COH901318_CX_CTRL_TCP_DISABLE |
- COH901318_CX_CTRL_TC_IRQ_ENABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
- COH901318_CX_CTRL_PRDD_DEST,
- },
- {
- .number = U300_DMA_MSL_RX_6,
- .name = "MSL RX 6",
- .priority_high = 0,
- },
- /*
- * Don't set up device address, burst count or size of src
- * or dst bus for this peripheral - handled by PrimeCell
- * DMA extension.
- */
- {
- .number = U300_DMA_MMCSD_RX_TX,
- .name = "MMCSD RX TX",
- .priority_high = 0,
- .param.config = COH901318_CX_CFG_CH_DISABLE |
- COH901318_CX_CFG_LCR_DISABLE |
- COH901318_CX_CFG_TC_IRQ_ENABLE |
- COH901318_CX_CFG_BE_IRQ_ENABLE,
- .param.ctrl_lli_chained = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1RW |
- COH901318_CX_CTRL_TCP_ENABLE |
- COH901318_CX_CTRL_TC_IRQ_DISABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY,
- .param.ctrl_lli = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1RW |
- COH901318_CX_CTRL_TCP_ENABLE |
- COH901318_CX_CTRL_TC_IRQ_DISABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY,
- .param.ctrl_lli_last = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1RW |
- COH901318_CX_CTRL_TCP_DISABLE |
- COH901318_CX_CTRL_TC_IRQ_ENABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY,
-
- },
- {
- .number = U300_DMA_MSPRO_TX,
- .name = "MSPRO TX",
- .priority_high = 0,
- },
- {
- .number = U300_DMA_MSPRO_RX,
- .name = "MSPRO RX",
- .priority_high = 0,
- },
- /*
- * Don't set up device address, burst count or size of src
- * or dst bus for this peripheral - handled by PrimeCell
- * DMA extension.
- */
- {
- .number = U300_DMA_UART0_TX,
- .name = "UART0 TX",
- .priority_high = 0,
- .param.config = COH901318_CX_CFG_CH_DISABLE |
- COH901318_CX_CFG_LCR_DISABLE |
- COH901318_CX_CFG_TC_IRQ_ENABLE |
- COH901318_CX_CFG_BE_IRQ_ENABLE,
- .param.ctrl_lli_chained = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1RW |
- COH901318_CX_CTRL_TCP_ENABLE |
- COH901318_CX_CTRL_TC_IRQ_DISABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY,
- .param.ctrl_lli = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1RW |
- COH901318_CX_CTRL_TCP_ENABLE |
- COH901318_CX_CTRL_TC_IRQ_ENABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY,
- .param.ctrl_lli_last = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1RW |
- COH901318_CX_CTRL_TCP_ENABLE |
- COH901318_CX_CTRL_TC_IRQ_ENABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY,
- },
- {
- .number = U300_DMA_UART0_RX,
- .name = "UART0 RX",
- .priority_high = 0,
- .param.config = COH901318_CX_CFG_CH_DISABLE |
- COH901318_CX_CFG_LCR_DISABLE |
- COH901318_CX_CFG_TC_IRQ_ENABLE |
- COH901318_CX_CFG_BE_IRQ_ENABLE,
- .param.ctrl_lli_chained = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1RW |
- COH901318_CX_CTRL_TCP_ENABLE |
- COH901318_CX_CTRL_TC_IRQ_DISABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY,
- .param.ctrl_lli = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1RW |
- COH901318_CX_CTRL_TCP_ENABLE |
- COH901318_CX_CTRL_TC_IRQ_ENABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY,
- .param.ctrl_lli_last = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1RW |
- COH901318_CX_CTRL_TCP_ENABLE |
- COH901318_CX_CTRL_TC_IRQ_ENABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY,
- },
- {
- .number = U300_DMA_APEX_TX,
- .name = "APEX TX",
- .priority_high = 0,
- },
- {
- .number = U300_DMA_APEX_RX,
- .name = "APEX RX",
- .priority_high = 0,
- },
- {
- .number = U300_DMA_PCM_I2S0_TX,
- .name = "PCM I2S0 TX",
- .priority_high = 1,
- .param.config = COH901318_CX_CFG_CH_DISABLE |
- COH901318_CX_CFG_LCR_DISABLE |
- COH901318_CX_CFG_TC_IRQ_ENABLE |
- COH901318_CX_CFG_BE_IRQ_ENABLE,
- .param.ctrl_lli_chained = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1RW |
- COH901318_CX_CTRL_TCP_DISABLE |
- COH901318_CX_CTRL_TC_IRQ_DISABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY |
- COH901318_CX_CTRL_PRDD_SOURCE,
- .param.ctrl_lli = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1RW |
- COH901318_CX_CTRL_TCP_ENABLE |
- COH901318_CX_CTRL_TC_IRQ_DISABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY |
- COH901318_CX_CTRL_PRDD_SOURCE,
- .param.ctrl_lli_last = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1RW |
- COH901318_CX_CTRL_TCP_ENABLE |
- COH901318_CX_CTRL_TC_IRQ_DISABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY |
- COH901318_CX_CTRL_PRDD_SOURCE,
- },
- {
- .number = U300_DMA_PCM_I2S0_RX,
- .name = "PCM I2S0 RX",
- .priority_high = 1,
- .param.config = COH901318_CX_CFG_CH_DISABLE |
- COH901318_CX_CFG_LCR_DISABLE |
- COH901318_CX_CFG_TC_IRQ_ENABLE |
- COH901318_CX_CFG_BE_IRQ_ENABLE,
- .param.ctrl_lli_chained = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1RW |
- COH901318_CX_CTRL_TCP_DISABLE |
- COH901318_CX_CTRL_TC_IRQ_DISABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY |
- COH901318_CX_CTRL_PRDD_DEST,
- .param.ctrl_lli = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1RW |
- COH901318_CX_CTRL_TCP_ENABLE |
- COH901318_CX_CTRL_TC_IRQ_DISABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY |
- COH901318_CX_CTRL_PRDD_DEST,
- .param.ctrl_lli_last = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1RW |
- COH901318_CX_CTRL_TCP_ENABLE |
- COH901318_CX_CTRL_TC_IRQ_ENABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY |
- COH901318_CX_CTRL_PRDD_DEST,
- },
- {
- .number = U300_DMA_PCM_I2S1_TX,
- .name = "PCM I2S1 TX",
- .priority_high = 1,
- .param.config = COH901318_CX_CFG_CH_DISABLE |
- COH901318_CX_CFG_LCR_DISABLE |
- COH901318_CX_CFG_TC_IRQ_ENABLE |
- COH901318_CX_CFG_BE_IRQ_ENABLE,
- .param.ctrl_lli_chained = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1RW |
- COH901318_CX_CTRL_TCP_DISABLE |
- COH901318_CX_CTRL_TC_IRQ_DISABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY |
- COH901318_CX_CTRL_PRDD_SOURCE,
- .param.ctrl_lli = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1RW |
- COH901318_CX_CTRL_TCP_ENABLE |
- COH901318_CX_CTRL_TC_IRQ_DISABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY |
- COH901318_CX_CTRL_PRDD_SOURCE,
- .param.ctrl_lli_last = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1RW |
- COH901318_CX_CTRL_TCP_ENABLE |
- COH901318_CX_CTRL_TC_IRQ_ENABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY |
- COH901318_CX_CTRL_PRDD_SOURCE,
- },
- {
- .number = U300_DMA_PCM_I2S1_RX,
- .name = "PCM I2S1 RX",
- .priority_high = 1,
- .param.config = COH901318_CX_CFG_CH_DISABLE |
- COH901318_CX_CFG_LCR_DISABLE |
- COH901318_CX_CFG_TC_IRQ_ENABLE |
- COH901318_CX_CFG_BE_IRQ_ENABLE,
- .param.ctrl_lli_chained = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1RW |
- COH901318_CX_CTRL_TCP_DISABLE |
- COH901318_CX_CTRL_TC_IRQ_DISABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY |
- COH901318_CX_CTRL_PRDD_DEST,
- .param.ctrl_lli = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1RW |
- COH901318_CX_CTRL_TCP_ENABLE |
- COH901318_CX_CTRL_TC_IRQ_DISABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY |
- COH901318_CX_CTRL_PRDD_DEST,
- .param.ctrl_lli_last = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1RW |
- COH901318_CX_CTRL_TCP_ENABLE |
- COH901318_CX_CTRL_TC_IRQ_ENABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY |
- COH901318_CX_CTRL_PRDD_DEST,
- },
- {
- .number = U300_DMA_XGAM_CDI,
- .name = "XGAM CDI",
- .priority_high = 0,
- },
- {
- .number = U300_DMA_XGAM_PDI,
- .name = "XGAM PDI",
- .priority_high = 0,
- },
- /*
- * Don't set up device address, burst count or size of src
- * or dst bus for this peripheral - handled by PrimeCell
- * DMA extension.
- */
- {
- .number = U300_DMA_SPI_TX,
- .name = "SPI TX",
- .priority_high = 0,
- .param.config = COH901318_CX_CFG_CH_DISABLE |
- COH901318_CX_CFG_LCR_DISABLE |
- COH901318_CX_CFG_TC_IRQ_ENABLE |
- COH901318_CX_CFG_BE_IRQ_ENABLE,
- .param.ctrl_lli_chained = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1RW |
- COH901318_CX_CTRL_TCP_DISABLE |
- COH901318_CX_CTRL_TC_IRQ_DISABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY,
- .param.ctrl_lli = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1RW |
- COH901318_CX_CTRL_TCP_DISABLE |
- COH901318_CX_CTRL_TC_IRQ_ENABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY,
- .param.ctrl_lli_last = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1RW |
- COH901318_CX_CTRL_TCP_DISABLE |
- COH901318_CX_CTRL_TC_IRQ_ENABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY,
- },
- {
- .number = U300_DMA_SPI_RX,
- .name = "SPI RX",
- .priority_high = 0,
- .param.config = COH901318_CX_CFG_CH_DISABLE |
- COH901318_CX_CFG_LCR_DISABLE |
- COH901318_CX_CFG_TC_IRQ_ENABLE |
- COH901318_CX_CFG_BE_IRQ_ENABLE,
- .param.ctrl_lli_chained = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1RW |
- COH901318_CX_CTRL_TCP_DISABLE |
- COH901318_CX_CTRL_TC_IRQ_DISABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY,
- .param.ctrl_lli = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1RW |
- COH901318_CX_CTRL_TCP_DISABLE |
- COH901318_CX_CTRL_TC_IRQ_ENABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY,
- .param.ctrl_lli_last = 0 |
- COH901318_CX_CTRL_TC_ENABLE |
- COH901318_CX_CTRL_MASTER_MODE_M1RW |
- COH901318_CX_CTRL_TCP_DISABLE |
- COH901318_CX_CTRL_TC_IRQ_ENABLE |
- COH901318_CX_CTRL_HSP_ENABLE |
- COH901318_CX_CTRL_HSS_DISABLE |
- COH901318_CX_CTRL_DDMA_LEGACY,
-
- },
- {
- .number = U300_DMA_GENERAL_PURPOSE_0,
- .name = "GENERAL 00",
- .priority_high = 0,
-
- .param.config = flags_memcpy_config,
- .param.ctrl_lli_chained = flags_memcpy_lli_chained,
- .param.ctrl_lli = flags_memcpy_lli,
- .param.ctrl_lli_last = flags_memcpy_lli_last,
- },
- {
- .number = U300_DMA_GENERAL_PURPOSE_1,
- .name = "GENERAL 01",
- .priority_high = 0,
-
- .param.config = flags_memcpy_config,
- .param.ctrl_lli_chained = flags_memcpy_lli_chained,
- .param.ctrl_lli = flags_memcpy_lli,
- .param.ctrl_lli_last = flags_memcpy_lli_last,
- },
- {
- .number = U300_DMA_GENERAL_PURPOSE_2,
- .name = "GENERAL 02",
- .priority_high = 0,
-
- .param.config = flags_memcpy_config,
- .param.ctrl_lli_chained = flags_memcpy_lli_chained,
- .param.ctrl_lli = flags_memcpy_lli,
- .param.ctrl_lli_last = flags_memcpy_lli_last,
- },
- {
- .number = U300_DMA_GENERAL_PURPOSE_3,
- .name = "GENERAL 03",
- .priority_high = 0,
-
- .param.config = flags_memcpy_config,
- .param.ctrl_lli_chained = flags_memcpy_lli_chained,
- .param.ctrl_lli = flags_memcpy_lli,
- .param.ctrl_lli_last = flags_memcpy_lli_last,
- },
- {
- .number = U300_DMA_GENERAL_PURPOSE_4,
- .name = "GENERAL 04",
- .priority_high = 0,
-
- .param.config = flags_memcpy_config,
- .param.ctrl_lli_chained = flags_memcpy_lli_chained,
- .param.ctrl_lli = flags_memcpy_lli,
- .param.ctrl_lli_last = flags_memcpy_lli_last,
- },
- {
- .number = U300_DMA_GENERAL_PURPOSE_5,
- .name = "GENERAL 05",
- .priority_high = 0,
-
- .param.config = flags_memcpy_config,
- .param.ctrl_lli_chained = flags_memcpy_lli_chained,
- .param.ctrl_lli = flags_memcpy_lli,
- .param.ctrl_lli_last = flags_memcpy_lli_last,
- },
- {
- .number = U300_DMA_GENERAL_PURPOSE_6,
- .name = "GENERAL 06",
- .priority_high = 0,
-
- .param.config = flags_memcpy_config,
- .param.ctrl_lli_chained = flags_memcpy_lli_chained,
- .param.ctrl_lli = flags_memcpy_lli,
- .param.ctrl_lli_last = flags_memcpy_lli_last,
- },
- {
- .number = U300_DMA_GENERAL_PURPOSE_7,
- .name = "GENERAL 07",
- .priority_high = 0,
-
- .param.config = flags_memcpy_config,
- .param.ctrl_lli_chained = flags_memcpy_lli_chained,
- .param.ctrl_lli = flags_memcpy_lli,
- .param.ctrl_lli_last = flags_memcpy_lli_last,
- },
- {
- .number = U300_DMA_GENERAL_PURPOSE_8,
- .name = "GENERAL 08",
- .priority_high = 0,
-
- .param.config = flags_memcpy_config,
- .param.ctrl_lli_chained = flags_memcpy_lli_chained,
- .param.ctrl_lli = flags_memcpy_lli,
- .param.ctrl_lli_last = flags_memcpy_lli_last,
- },
- {
- .number = U300_DMA_UART1_TX,
- .name = "UART1 TX",
- .priority_high = 0,
- },
- {
- .number = U300_DMA_UART1_RX,
- .name = "UART1 RX",
- .priority_high = 0,
- }
-};
-
-#define COHC_2_DEV(cohc) (&cohc->chan.dev->device)
-
-#ifdef VERBOSE_DEBUG
-#define COH_DBG(x) ({ if (1) x; 0; })
-#else
-#define COH_DBG(x) ({ if (0) x; 0; })
-#endif
-
-struct coh901318_desc {
- struct dma_async_tx_descriptor desc;
- struct list_head node;
- struct scatterlist *sg;
- unsigned int sg_len;
- struct coh901318_lli *lli;
- enum dma_transfer_direction dir;
- unsigned long flags;
- u32 head_config;
- u32 head_ctrl;
-};
-
-struct coh901318_base {
- struct device *dev;
- void __iomem *virtbase;
- unsigned int irq;
- struct coh901318_pool pool;
- struct powersave pm;
- struct dma_device dma_slave;
- struct dma_device dma_memcpy;
- struct coh901318_chan *chans;
-};
-
-struct coh901318_chan {
- spinlock_t lock;
- int allocated;
- int id;
- int stopped;
-
- struct work_struct free_work;
- struct dma_chan chan;
-
- struct tasklet_struct tasklet;
-
- struct list_head active;
- struct list_head queue;
- struct list_head free;
-
- unsigned long nbr_active_done;
- unsigned long busy;
-
- struct dma_slave_config config;
- u32 addr;
- u32 ctrl;
-
- struct coh901318_base *base;
-};
-
-static void coh901318_list_print(struct coh901318_chan *cohc,
- struct coh901318_lli *lli)
-{
- struct coh901318_lli *l = lli;
- int i = 0;
-
- while (l) {
- dev_vdbg(COHC_2_DEV(cohc), "i %d, lli %p, ctrl 0x%x, src %pad"
- ", dst %pad, link %pad virt_link_addr 0x%p\n",
- i, l, l->control, &l->src_addr, &l->dst_addr,
- &l->link_addr, l->virt_link_addr);
- i++;
- l = l->virt_link_addr;
- }
-}
-
-#ifdef CONFIG_DEBUG_FS
-
-#define COH901318_DEBUGFS_ASSIGN(x, y) (x = y)
-
-static struct coh901318_base *debugfs_dma_base;
-static struct dentry *dma_dentry;
-
-static ssize_t coh901318_debugfs_read(struct file *file, char __user *buf,
- size_t count, loff_t *f_pos)
-{
- u64 started_channels = debugfs_dma_base->pm.started_channels;
- int pool_count = debugfs_dma_base->pool.debugfs_pool_counter;
- char *dev_buf;
- char *tmp;
- int ret;
- int i;
-
- dev_buf = kmalloc(4*1024, GFP_KERNEL);
- if (dev_buf == NULL)
- return -ENOMEM;
- tmp = dev_buf;
-
- tmp += sprintf(tmp, "DMA -- enabled dma channels\n");
-
- for (i = 0; i < U300_DMA_CHANNELS; i++) {
- if (started_channels & (1ULL << i))
- tmp += sprintf(tmp, "channel %d\n", i);
- }
-
- tmp += sprintf(tmp, "Pool alloc nbr %d\n", pool_count);
-
- ret = simple_read_from_buffer(buf, count, f_pos, dev_buf,
- tmp - dev_buf);
- kfree(dev_buf);
- return ret;
-}
-
-static const struct file_operations coh901318_debugfs_status_operations = {
- .open = simple_open,
- .read = coh901318_debugfs_read,
- .llseek = default_llseek,
-};
-
-
-static int __init init_coh901318_debugfs(void)
-{
-
- dma_dentry = debugfs_create_dir("dma", NULL);
-
- debugfs_create_file("status", S_IFREG | S_IRUGO, dma_dentry, NULL,
- &coh901318_debugfs_status_operations);
- return 0;
-}
-
-static void __exit exit_coh901318_debugfs(void)
-{
- debugfs_remove_recursive(dma_dentry);
-}
-
-module_init(init_coh901318_debugfs);
-module_exit(exit_coh901318_debugfs);
-#else
-
-#define COH901318_DEBUGFS_ASSIGN(x, y)
-
-#endif /* CONFIG_DEBUG_FS */
-
-static inline struct coh901318_chan *to_coh901318_chan(struct dma_chan *chan)
-{
- return container_of(chan, struct coh901318_chan, chan);
-}
-
-static int coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
- struct dma_slave_config *config,
- enum dma_transfer_direction direction);
-
-static inline const struct coh901318_params *
-cohc_chan_param(struct coh901318_chan *cohc)
-{
- return &chan_config[cohc->id].param;
-}
-
-static inline const struct coh_dma_channel *
-cohc_chan_conf(struct coh901318_chan *cohc)
-{
- return &chan_config[cohc->id];
-}
-
-static void enable_powersave(struct coh901318_chan *cohc)
-{
- unsigned long flags;
- struct powersave *pm = &cohc->base->pm;
-
- spin_lock_irqsave(&pm->lock, flags);
-
- pm->started_channels &= ~(1ULL << cohc->id);
-
- spin_unlock_irqrestore(&pm->lock, flags);
-}
-static void disable_powersave(struct coh901318_chan *cohc)
-{
- unsigned long flags;
- struct powersave *pm = &cohc->base->pm;
-
- spin_lock_irqsave(&pm->lock, flags);
-
- pm->started_channels |= (1ULL << cohc->id);
-
- spin_unlock_irqrestore(&pm->lock, flags);
-}
-
-static inline int coh901318_set_ctrl(struct coh901318_chan *cohc, u32 control)
-{
- int channel = cohc->id;
- void __iomem *virtbase = cohc->base->virtbase;
-
- writel(control,
- virtbase + COH901318_CX_CTRL +
- COH901318_CX_CTRL_SPACING * channel);
- return 0;
-}
-
-static inline int coh901318_set_conf(struct coh901318_chan *cohc, u32 conf)
-{
- int channel = cohc->id;
- void __iomem *virtbase = cohc->base->virtbase;
-
- writel(conf,
- virtbase + COH901318_CX_CFG +
- COH901318_CX_CFG_SPACING*channel);
- return 0;
-}
-
-
-static int coh901318_start(struct coh901318_chan *cohc)
-{
- u32 val;
- int channel = cohc->id;
- void __iomem *virtbase = cohc->base->virtbase;
-
- disable_powersave(cohc);
-
- val = readl(virtbase + COH901318_CX_CFG +
- COH901318_CX_CFG_SPACING * channel);
-
- /* Enable channel */
- val |= COH901318_CX_CFG_CH_ENABLE;
- writel(val, virtbase + COH901318_CX_CFG +
- COH901318_CX_CFG_SPACING * channel);
-
- return 0;
-}
-
-static int coh901318_prep_linked_list(struct coh901318_chan *cohc,
- struct coh901318_lli *lli)
-{
- int channel = cohc->id;
- void __iomem *virtbase = cohc->base->virtbase;
-
- BUG_ON(readl(virtbase + COH901318_CX_STAT +
- COH901318_CX_STAT_SPACING*channel) &
- COH901318_CX_STAT_ACTIVE);
-
- writel(lli->src_addr,
- virtbase + COH901318_CX_SRC_ADDR +
- COH901318_CX_SRC_ADDR_SPACING * channel);
-
- writel(lli->dst_addr, virtbase +
- COH901318_CX_DST_ADDR +
- COH901318_CX_DST_ADDR_SPACING * channel);
-
- writel(lli->link_addr, virtbase + COH901318_CX_LNK_ADDR +
- COH901318_CX_LNK_ADDR_SPACING * channel);
-
- writel(lli->control, virtbase + COH901318_CX_CTRL +
- COH901318_CX_CTRL_SPACING * channel);
-
- return 0;
-}
-
-static struct coh901318_desc *
-coh901318_desc_get(struct coh901318_chan *cohc)
-{
- struct coh901318_desc *desc;
-
- if (list_empty(&cohc->free)) {
- /* alloc new desc because we're out of used ones
- * TODO: alloc a pile of descs instead of just one,
- * avoid many small allocations.
- */
- desc = kzalloc(sizeof(struct coh901318_desc), GFP_NOWAIT);
- if (desc == NULL)
- goto out;
- INIT_LIST_HEAD(&desc->node);
- dma_async_tx_descriptor_init(&desc->desc, &cohc->chan);
- } else {
- /* Reuse an old desc. */
- desc = list_first_entry(&cohc->free,
- struct coh901318_desc,
- node);
- list_del(&desc->node);
- /* Initialize it a bit so it's not insane */
- desc->sg = NULL;
- desc->sg_len = 0;
- desc->desc.callback = NULL;
- desc->desc.callback_param = NULL;
- }
-
- out:
- return desc;
-}
-
-static void
-coh901318_desc_free(struct coh901318_chan *cohc, struct coh901318_desc *cohd)
-{
- list_add_tail(&cohd->node, &cohc->free);
-}
-
-/* call with irq lock held */
-static void
-coh901318_desc_submit(struct coh901318_chan *cohc, struct coh901318_desc *desc)
-{
- list_add_tail(&desc->node, &cohc->active);
-}
-
-static struct coh901318_desc *
-coh901318_first_active_get(struct coh901318_chan *cohc)
-{
- return list_first_entry_or_null(&cohc->active, struct coh901318_desc,
- node);
-}
-
-static void
-coh901318_desc_remove(struct coh901318_desc *cohd)
-{
- list_del(&cohd->node);
-}
-
-static void
-coh901318_desc_queue(struct coh901318_chan *cohc, struct coh901318_desc *desc)
-{
- list_add_tail(&desc->node, &cohc->queue);
-}
-
-static struct coh901318_desc *
-coh901318_first_queued(struct coh901318_chan *cohc)
-{
- return list_first_entry_or_null(&cohc->queue, struct coh901318_desc,
- node);
-}
-
-static inline u32 coh901318_get_bytes_in_lli(struct coh901318_lli *in_lli)
-{
- struct coh901318_lli *lli = in_lli;
- u32 bytes = 0;
-
- while (lli) {
- bytes += lli->control & COH901318_CX_CTRL_TC_VALUE_MASK;
- lli = lli->virt_link_addr;
- }
- return bytes;
-}
-
-/*
- * Get the number of bytes left to transfer on this channel,
- * it is unwise to call this before stopping the channel for
- * absolute measures, but for a rough guess you can still call
- * it.
- */
-static u32 coh901318_get_bytes_left(struct dma_chan *chan)
-{
- struct coh901318_chan *cohc = to_coh901318_chan(chan);
- struct coh901318_desc *cohd;
- struct list_head *pos;
- unsigned long flags;
- u32 left = 0;
- int i = 0;
-
- spin_lock_irqsave(&cohc->lock, flags);
-
- /*
- * If there are many queued jobs, we iterate and add the
- * size of them all. We take a special look on the first
- * job though, since it is probably active.
- */
- list_for_each(pos, &cohc->active) {
- /*
- * The first job in the list will be working on the
- * hardware. The job can be stopped but still active,
- * so that the transfer counter is somewhere inside
- * the buffer.
- */
- cohd = list_entry(pos, struct coh901318_desc, node);
-
- if (i == 0) {
- struct coh901318_lli *lli;
- dma_addr_t ladd;
-
- /* Read current transfer count value */
- left = readl(cohc->base->virtbase +
- COH901318_CX_CTRL +
- COH901318_CX_CTRL_SPACING * cohc->id) &
- COH901318_CX_CTRL_TC_VALUE_MASK;
-
- /* See if the transfer is linked... */
- ladd = readl(cohc->base->virtbase +
- COH901318_CX_LNK_ADDR +
- COH901318_CX_LNK_ADDR_SPACING *
- cohc->id) &
- ~COH901318_CX_LNK_LINK_IMMEDIATE;
- /* Single transaction */
- if (!ladd)
- continue;
-
- /*
- * Linked transaction, follow the lli, find the
- * currently processing lli, and proceed to the next
- */
- lli = cohd->lli;
- while (lli && lli->link_addr != ladd)
- lli = lli->virt_link_addr;
-
- if (lli)
- lli = lli->virt_link_addr;
-
- /*
- * Follow remaining lli links around to count the total
- * number of bytes left
- */
- left += coh901318_get_bytes_in_lli(lli);
- } else {
- left += coh901318_get_bytes_in_lli(cohd->lli);
- }
- i++;
- }
-
- /* Also count bytes in the queued jobs */
- list_for_each(pos, &cohc->queue) {
- cohd = list_entry(pos, struct coh901318_desc, node);
- left += coh901318_get_bytes_in_lli(cohd->lli);
- }
-
- spin_unlock_irqrestore(&cohc->lock, flags);
-
- return left;
-}
-
-/*
- * Pauses a transfer without losing data. Enables power save.
- * Use this function in conjunction with coh901318_resume.
- */
-static int coh901318_pause(struct dma_chan *chan)
-{
- u32 val;
- unsigned long flags;
- struct coh901318_chan *cohc = to_coh901318_chan(chan);
- int channel = cohc->id;
- void __iomem *virtbase = cohc->base->virtbase;
-
- spin_lock_irqsave(&cohc->lock, flags);
-
- /* Disable channel in HW */
- val = readl(virtbase + COH901318_CX_CFG +
- COH901318_CX_CFG_SPACING * channel);
-
- /* Stopping infinite transfer */
- if ((val & COH901318_CX_CTRL_TC_ENABLE) == 0 &&
- (val & COH901318_CX_CFG_CH_ENABLE))
- cohc->stopped = 1;
-
-
- val &= ~COH901318_CX_CFG_CH_ENABLE;
- /* Enable twice, HW bug work around */
- writel(val, virtbase + COH901318_CX_CFG +
- COH901318_CX_CFG_SPACING * channel);
- writel(val, virtbase + COH901318_CX_CFG +
- COH901318_CX_CFG_SPACING * channel);
-
- /* Spin-wait for it to actually go inactive */
- while (readl(virtbase + COH901318_CX_STAT+COH901318_CX_STAT_SPACING *
- channel) & COH901318_CX_STAT_ACTIVE)
- cpu_relax();
-
- /* Check if we stopped an active job */
- if ((readl(virtbase + COH901318_CX_CTRL+COH901318_CX_CTRL_SPACING *
- channel) & COH901318_CX_CTRL_TC_VALUE_MASK) > 0)
- cohc->stopped = 1;
-
- enable_powersave(cohc);
-
- spin_unlock_irqrestore(&cohc->lock, flags);
- return 0;
-}
-
-/* Resumes a transfer that has been stopped via 300_dma_stop(..).
- Power save is handled.
-*/
-static int coh901318_resume(struct dma_chan *chan)
-{
- u32 val;
- unsigned long flags;
- struct coh901318_chan *cohc = to_coh901318_chan(chan);
- int channel = cohc->id;
-
- spin_lock_irqsave(&cohc->lock, flags);
-
- disable_powersave(cohc);
-
- if (cohc->stopped) {
- /* Enable channel in HW */
- val = readl(cohc->base->virtbase + COH901318_CX_CFG +
- COH901318_CX_CFG_SPACING * channel);
-
- val |= COH901318_CX_CFG_CH_ENABLE;
-
- writel(val, cohc->base->virtbase + COH901318_CX_CFG +
- COH901318_CX_CFG_SPACING*channel);
-
- cohc->stopped = 0;
- }
-
- spin_unlock_irqrestore(&cohc->lock, flags);
- return 0;
-}
-
-bool coh901318_filter_id(struct dma_chan *chan, void *chan_id)
-{
- unsigned long ch_nr = (unsigned long) chan_id;
-
- if (ch_nr == to_coh901318_chan(chan)->id)
- return true;
-
- return false;
-}
-EXPORT_SYMBOL(coh901318_filter_id);
-
-struct coh901318_filter_args {
- struct coh901318_base *base;
- unsigned int ch_nr;
-};
-
-static bool coh901318_filter_base_and_id(struct dma_chan *chan, void *data)
-{
- struct coh901318_filter_args *args = data;
-
- if (&args->base->dma_slave == chan->device &&
- args->ch_nr == to_coh901318_chan(chan)->id)
- return true;
-
- return false;
-}
-
-static struct dma_chan *coh901318_xlate(struct of_phandle_args *dma_spec,
- struct of_dma *ofdma)
-{
- struct coh901318_filter_args args = {
- .base = ofdma->of_dma_data,
- .ch_nr = dma_spec->args[0],
- };
- dma_cap_mask_t cap;
- dma_cap_zero(cap);
- dma_cap_set(DMA_SLAVE, cap);
-
- return dma_request_channel(cap, coh901318_filter_base_and_id, &args);
-}
-/*
- * DMA channel allocation
- */
-static int coh901318_config(struct coh901318_chan *cohc,
- struct coh901318_params *param)
-{
- const struct coh901318_params *p;
- int channel = cohc->id;
- void __iomem *virtbase = cohc->base->virtbase;
-
- if (param)
- p = param;
- else
- p = cohc_chan_param(cohc);
-
- /* Clear any pending BE or TC interrupt */
- if (channel < 32) {
- writel(1 << channel, virtbase + COH901318_BE_INT_CLEAR1);
- writel(1 << channel, virtbase + COH901318_TC_INT_CLEAR1);
- } else {
- writel(1 << (channel - 32), virtbase +
- COH901318_BE_INT_CLEAR2);
- writel(1 << (channel - 32), virtbase +
- COH901318_TC_INT_CLEAR2);
- }
-
- coh901318_set_conf(cohc, p->config);
- coh901318_set_ctrl(cohc, p->ctrl_lli_last);
-
- return 0;
-}
-
-/* must lock when calling this function
- * start queued jobs, if any
- * TODO: start all queued jobs in one go
- *
- * Returns descriptor if queued job is started otherwise NULL.
- * If the queue is empty NULL is returned.
- */
-static struct coh901318_desc *coh901318_queue_start(struct coh901318_chan *cohc)
-{
- struct coh901318_desc *cohd;
-
- /*
- * start queued jobs, if any
- * TODO: transmit all queued jobs in one go
- */
- cohd = coh901318_first_queued(cohc);
-
- if (cohd != NULL) {
- /* Remove from queue */
- coh901318_desc_remove(cohd);
- /* initiate DMA job */
- cohc->busy = 1;
-
- coh901318_desc_submit(cohc, cohd);
-
- /* Program the transaction head */
- coh901318_set_conf(cohc, cohd->head_config);
- coh901318_set_ctrl(cohc, cohd->head_ctrl);
- coh901318_prep_linked_list(cohc, cohd->lli);
-
- /* start dma job on this channel */
- coh901318_start(cohc);
-
- }
-
- return cohd;
-}
-
-/*
- * This tasklet is called from the interrupt handler to
- * handle each descriptor (DMA job) that is sent to a channel.
- */
-static void dma_tasklet(struct tasklet_struct *t)
-{
- struct coh901318_chan *cohc = from_tasklet(cohc, t, tasklet);
- struct coh901318_desc *cohd_fin;
- unsigned long flags;
- struct dmaengine_desc_callback cb;
-
- dev_vdbg(COHC_2_DEV(cohc), "[%s] chan_id %d"
- " nbr_active_done %ld\n", __func__,
- cohc->id, cohc->nbr_active_done);
-
- spin_lock_irqsave(&cohc->lock, flags);
-
- /* get first active descriptor entry from list */
- cohd_fin = coh901318_first_active_get(cohc);
-
- if (cohd_fin == NULL)
- goto err;
-
- /* locate callback to client */
- dmaengine_desc_get_callback(&cohd_fin->desc, &cb);
-
- /* sign this job as completed on the channel */
- dma_cookie_complete(&cohd_fin->desc);
-
- /* release the lli allocation and remove the descriptor */
- coh901318_lli_free(&cohc->base->pool, &cohd_fin->lli);
-
- /* return desc to free-list */
- coh901318_desc_remove(cohd_fin);
- coh901318_desc_free(cohc, cohd_fin);
-
- spin_unlock_irqrestore(&cohc->lock, flags);
-
- /* Call the callback when we're done */
- dmaengine_desc_callback_invoke(&cb, NULL);
-
- spin_lock_irqsave(&cohc->lock, flags);
-
- /*
- * If another interrupt fired while the tasklet was scheduling,
- * we don't get called twice, so we have this number of active
- * counter that keep track of the number of IRQs expected to
- * be handled for this channel. If there happen to be more than
- * one IRQ to be ack:ed, we simply schedule this tasklet again.
- */
- cohc->nbr_active_done--;
- if (cohc->nbr_active_done) {
- dev_dbg(COHC_2_DEV(cohc), "scheduling tasklet again, new IRQs "
- "came in while we were scheduling this tasklet\n");
- if (cohc_chan_conf(cohc)->priority_high)
- tasklet_hi_schedule(&cohc->tasklet);
- else
- tasklet_schedule(&cohc->tasklet);
- }
-
- spin_unlock_irqrestore(&cohc->lock, flags);
-
- return;
-
- err:
- spin_unlock_irqrestore(&cohc->lock, flags);
- dev_err(COHC_2_DEV(cohc), "[%s] No active dma desc\n", __func__);
-}
-
-
-/* called from interrupt context */
-static void dma_tc_handle(struct coh901318_chan *cohc)
-{
- /*
- * If the channel is not allocated, then we shouldn't have
- * any TC interrupts on it.
- */
- if (!cohc->allocated) {
- dev_err(COHC_2_DEV(cohc), "spurious interrupt from "
- "unallocated channel\n");
- return;
- }
-
- /*
- * When we reach this point, at least one queue item
- * should have been moved over from cohc->queue to
- * cohc->active and run to completion, that is why we're
- * getting a terminal count interrupt is it not?
- * If you get this BUG() the most probable cause is that
- * the individual nodes in the lli chain have IRQ enabled,
- * so check your platform config for lli chain ctrl.
- */
- BUG_ON(list_empty(&cohc->active));
-
- cohc->nbr_active_done++;
-
- /*
- * This attempt to take a job from cohc->queue, put it
- * into cohc->active and start it.
- */
- if (coh901318_queue_start(cohc) == NULL)
- cohc->busy = 0;
-
- /*
- * This tasklet will remove items from cohc->active
- * and thus terminates them.
- */
- if (cohc_chan_conf(cohc)->priority_high)
- tasklet_hi_schedule(&cohc->tasklet);
- else
- tasklet_schedule(&cohc->tasklet);
-}
-
-
-static irqreturn_t dma_irq_handler(int irq, void *dev_id)
-{
- u32 status1;
- u32 status2;
- int i;
- int ch;
- struct coh901318_base *base = dev_id;
- struct coh901318_chan *cohc;
- void __iomem *virtbase = base->virtbase;
-
- status1 = readl(virtbase + COH901318_INT_STATUS1);
- status2 = readl(virtbase + COH901318_INT_STATUS2);
-
- if (unlikely(status1 == 0 && status2 == 0)) {
- dev_warn(base->dev, "spurious DMA IRQ from no channel!\n");
- return IRQ_HANDLED;
- }
-
- /* TODO: consider handle IRQ in tasklet here to
- * minimize interrupt latency */
-
- /* Check the first 32 DMA channels for IRQ */
- while (status1) {
- /* Find first bit set, return as a number. */
- i = ffs(status1) - 1;
- ch = i;
-
- cohc = &base->chans[ch];
- spin_lock(&cohc->lock);
-
- /* Mask off this bit */
- status1 &= ~(1 << i);
- /* Check the individual channel bits */
- if (test_bit(i, virtbase + COH901318_BE_INT_STATUS1)) {
- dev_crit(COHC_2_DEV(cohc),
- "DMA bus error on channel %d!\n", ch);
- BUG_ON(1);
- /* Clear BE interrupt */
- __set_bit(i, virtbase + COH901318_BE_INT_CLEAR1);
- } else {
- /* Caused by TC, really? */
- if (unlikely(!test_bit(i, virtbase +
- COH901318_TC_INT_STATUS1))) {
- dev_warn(COHC_2_DEV(cohc),
- "ignoring interrupt not caused by terminal count on channel %d\n", ch);
- /* Clear TC interrupt */
- BUG_ON(1);
- __set_bit(i, virtbase + COH901318_TC_INT_CLEAR1);
- } else {
- /* Enable powersave if transfer has finished */
- if (!(readl(virtbase + COH901318_CX_STAT +
- COH901318_CX_STAT_SPACING*ch) &
- COH901318_CX_STAT_ENABLED)) {
- enable_powersave(cohc);
- }
-
- /* Must clear TC interrupt before calling
- * dma_tc_handle
- * in case tc_handle initiate a new dma job
- */
- __set_bit(i, virtbase + COH901318_TC_INT_CLEAR1);
-
- dma_tc_handle(cohc);
- }
- }
- spin_unlock(&cohc->lock);
- }
-
- /* Check the remaining 32 DMA channels for IRQ */
- while (status2) {
- /* Find first bit set, return as a number. */
- i = ffs(status2) - 1;
- ch = i + 32;
- cohc = &base->chans[ch];
- spin_lock(&cohc->lock);
-
- /* Mask off this bit */
- status2 &= ~(1 << i);
- /* Check the individual channel bits */
- if (test_bit(i, virtbase + COH901318_BE_INT_STATUS2)) {
- dev_crit(COHC_2_DEV(cohc),
- "DMA bus error on channel %d!\n", ch);
- /* Clear BE interrupt */
- BUG_ON(1);
- __set_bit(i, virtbase + COH901318_BE_INT_CLEAR2);
- } else {
- /* Caused by TC, really? */
- if (unlikely(!test_bit(i, virtbase +
- COH901318_TC_INT_STATUS2))) {
- dev_warn(COHC_2_DEV(cohc),
- "ignoring interrupt not caused by terminal count on channel %d\n", ch);
- /* Clear TC interrupt */
- __set_bit(i, virtbase + COH901318_TC_INT_CLEAR2);
- BUG_ON(1);
- } else {
- /* Enable powersave if transfer has finished */
- if (!(readl(virtbase + COH901318_CX_STAT +
- COH901318_CX_STAT_SPACING*ch) &
- COH901318_CX_STAT_ENABLED)) {
- enable_powersave(cohc);
- }
- /* Must clear TC interrupt before calling
- * dma_tc_handle
- * in case tc_handle initiate a new dma job
- */
- __set_bit(i, virtbase + COH901318_TC_INT_CLEAR2);
-
- dma_tc_handle(cohc);
- }
- }
- spin_unlock(&cohc->lock);
- }
-
- return IRQ_HANDLED;
-}
-
-static int coh901318_terminate_all(struct dma_chan *chan)
-{
- unsigned long flags;
- struct coh901318_chan *cohc = to_coh901318_chan(chan);
- struct coh901318_desc *cohd;
- void __iomem *virtbase = cohc->base->virtbase;
-
- /* The remainder of this function terminates the transfer */
- coh901318_pause(chan);
- spin_lock_irqsave(&cohc->lock, flags);
-
- /* Clear any pending BE or TC interrupt */
- if (cohc->id < 32) {
- writel(1 << cohc->id, virtbase + COH901318_BE_INT_CLEAR1);
- writel(1 << cohc->id, virtbase + COH901318_TC_INT_CLEAR1);
- } else {
- writel(1 << (cohc->id - 32), virtbase +
- COH901318_BE_INT_CLEAR2);
- writel(1 << (cohc->id - 32), virtbase +
- COH901318_TC_INT_CLEAR2);
- }
-
- enable_powersave(cohc);
-
- while ((cohd = coh901318_first_active_get(cohc))) {
- /* release the lli allocation*/
- coh901318_lli_free(&cohc->base->pool, &cohd->lli);
-
- /* return desc to free-list */
- coh901318_desc_remove(cohd);
- coh901318_desc_free(cohc, cohd);
- }
-
- while ((cohd = coh901318_first_queued(cohc))) {
- /* release the lli allocation*/
- coh901318_lli_free(&cohc->base->pool, &cohd->lli);
-
- /* return desc to free-list */
- coh901318_desc_remove(cohd);
- coh901318_desc_free(cohc, cohd);
- }
-
-
- cohc->nbr_active_done = 0;
- cohc->busy = 0;
-
- spin_unlock_irqrestore(&cohc->lock, flags);
-
- return 0;
-}
-
-static int coh901318_alloc_chan_resources(struct dma_chan *chan)
-{
- struct coh901318_chan *cohc = to_coh901318_chan(chan);
- unsigned long flags;
-
- dev_vdbg(COHC_2_DEV(cohc), "[%s] DMA channel %d\n",
- __func__, cohc->id);
-
- if (chan->client_count > 1)
- return -EBUSY;
-
- spin_lock_irqsave(&cohc->lock, flags);
-
- coh901318_config(cohc, NULL);
-
- cohc->allocated = 1;
- dma_cookie_init(chan);
-
- spin_unlock_irqrestore(&cohc->lock, flags);
-
- return 1;
-}
-
-static void
-coh901318_free_chan_resources(struct dma_chan *chan)
-{
- struct coh901318_chan *cohc = to_coh901318_chan(chan);
- int channel = cohc->id;
- unsigned long flags;
-
- spin_lock_irqsave(&cohc->lock, flags);
-
- /* Disable HW */
- writel(0x00000000U, cohc->base->virtbase + COH901318_CX_CFG +
- COH901318_CX_CFG_SPACING*channel);
- writel(0x00000000U, cohc->base->virtbase + COH901318_CX_CTRL +
- COH901318_CX_CTRL_SPACING*channel);
-
- cohc->allocated = 0;
-
- spin_unlock_irqrestore(&cohc->lock, flags);
-
- coh901318_terminate_all(chan);
-}
-
-
-static dma_cookie_t
-coh901318_tx_submit(struct dma_async_tx_descriptor *tx)
-{
- struct coh901318_desc *cohd = container_of(tx, struct coh901318_desc,
- desc);
- struct coh901318_chan *cohc = to_coh901318_chan(tx->chan);
- unsigned long flags;
- dma_cookie_t cookie;
-
- spin_lock_irqsave(&cohc->lock, flags);
- cookie = dma_cookie_assign(tx);
-
- coh901318_desc_queue(cohc, cohd);
-
- spin_unlock_irqrestore(&cohc->lock, flags);
-
- return cookie;
-}
-
-static struct dma_async_tx_descriptor *
-coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
- size_t size, unsigned long flags)
-{
- struct coh901318_lli *lli;
- struct coh901318_desc *cohd;
- unsigned long flg;
- struct coh901318_chan *cohc = to_coh901318_chan(chan);
- int lli_len;
- u32 ctrl_last = cohc_chan_param(cohc)->ctrl_lli_last;
- int ret;
-
- spin_lock_irqsave(&cohc->lock, flg);
-
- dev_vdbg(COHC_2_DEV(cohc),
- "[%s] channel %d src %pad dest %pad size %zu\n",
- __func__, cohc->id, &src, &dest, size);
-
- if (flags & DMA_PREP_INTERRUPT)
- /* Trigger interrupt after last lli */
- ctrl_last |= COH901318_CX_CTRL_TC_IRQ_ENABLE;
-
- lli_len = size >> MAX_DMA_PACKET_SIZE_SHIFT;
- if ((lli_len << MAX_DMA_PACKET_SIZE_SHIFT) < size)
- lli_len++;
-
- lli = coh901318_lli_alloc(&cohc->base->pool, lli_len);
-
- if (lli == NULL)
- goto err;
-
- ret = coh901318_lli_fill_memcpy(
- &cohc->base->pool, lli, src, size, dest,
- cohc_chan_param(cohc)->ctrl_lli_chained,
- ctrl_last);
- if (ret)
- goto err;
-
- COH_DBG(coh901318_list_print(cohc, lli));
-
- /* Pick a descriptor to handle this transfer */
- cohd = coh901318_desc_get(cohc);
- cohd->lli = lli;
- cohd->flags = flags;
- cohd->desc.tx_submit = coh901318_tx_submit;
-
- spin_unlock_irqrestore(&cohc->lock, flg);
-
- return &cohd->desc;
- err:
- spin_unlock_irqrestore(&cohc->lock, flg);
- return NULL;
-}
-
-static struct dma_async_tx_descriptor *
-coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_transfer_direction direction,
- unsigned long flags, void *context)
-{
- struct coh901318_chan *cohc = to_coh901318_chan(chan);
- struct coh901318_lli *lli;
- struct coh901318_desc *cohd;
- const struct coh901318_params *params;
- struct scatterlist *sg;
- int len = 0;
- int size;
- int i;
- u32 ctrl_chained = cohc_chan_param(cohc)->ctrl_lli_chained;
- u32 ctrl = cohc_chan_param(cohc)->ctrl_lli;
- u32 ctrl_last = cohc_chan_param(cohc)->ctrl_lli_last;
- u32 config;
- unsigned long flg;
- int ret;
-
- if (!sgl)
- goto out;
- if (sg_dma_len(sgl) == 0)
- goto out;
-
- spin_lock_irqsave(&cohc->lock, flg);
-
- dev_vdbg(COHC_2_DEV(cohc), "[%s] sg_len %d dir %d\n",
- __func__, sg_len, direction);
-
- if (flags & DMA_PREP_INTERRUPT)
- /* Trigger interrupt after last lli */
- ctrl_last |= COH901318_CX_CTRL_TC_IRQ_ENABLE;
-
- params = cohc_chan_param(cohc);
- config = params->config;
- /*
- * Add runtime-specific control on top, make
- * sure the bits you set per peripheral channel are
- * cleared in the default config from the platform.
- */
- ctrl_chained |= cohc->ctrl;
- ctrl_last |= cohc->ctrl;
- ctrl |= cohc->ctrl;
-
- if (direction == DMA_MEM_TO_DEV) {
- u32 tx_flags = COH901318_CX_CTRL_PRDD_SOURCE |
- COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE;
-
- config |= COH901318_CX_CFG_RM_MEMORY_TO_PRIMARY;
- ctrl_chained |= tx_flags;
- ctrl_last |= tx_flags;
- ctrl |= tx_flags;
- } else if (direction == DMA_DEV_TO_MEM) {
- u32 rx_flags = COH901318_CX_CTRL_PRDD_DEST |
- COH901318_CX_CTRL_DST_ADDR_INC_ENABLE;
-
- config |= COH901318_CX_CFG_RM_PRIMARY_TO_MEMORY;
- ctrl_chained |= rx_flags;
- ctrl_last |= rx_flags;
- ctrl |= rx_flags;
- } else
- goto err_direction;
-
- /* The dma only supports transmitting packages up to
- * MAX_DMA_PACKET_SIZE. Calculate to total number of
- * dma elemts required to send the entire sg list
- */
- for_each_sg(sgl, sg, sg_len, i) {
- unsigned int factor;
- size = sg_dma_len(sg);
-
- if (size <= MAX_DMA_PACKET_SIZE) {
- len++;
- continue;
- }
-
- factor = size >> MAX_DMA_PACKET_SIZE_SHIFT;
- if ((factor << MAX_DMA_PACKET_SIZE_SHIFT) < size)
- factor++;
-
- len += factor;
- }
-
- pr_debug("Allocate %d lli:s for this transfer\n", len);
- lli = coh901318_lli_alloc(&cohc->base->pool, len);
-
- if (lli == NULL)
- goto err_dma_alloc;
-
- coh901318_dma_set_runtimeconfig(chan, &cohc->config, direction);
-
- /* initiate allocated lli list */
- ret = coh901318_lli_fill_sg(&cohc->base->pool, lli, sgl, sg_len,
- cohc->addr,
- ctrl_chained,
- ctrl,
- ctrl_last,
- direction, COH901318_CX_CTRL_TC_IRQ_ENABLE);
- if (ret)
- goto err_lli_fill;
-
-
- COH_DBG(coh901318_list_print(cohc, lli));
-
- /* Pick a descriptor to handle this transfer */
- cohd = coh901318_desc_get(cohc);
- cohd->head_config = config;
- /*
- * Set the default head ctrl for the channel to the one from the
- * lli, things may have changed due to odd buffer alignment
- * etc.
- */
- cohd->head_ctrl = lli->control;
- cohd->dir = direction;
- cohd->flags = flags;
- cohd->desc.tx_submit = coh901318_tx_submit;
- cohd->lli = lli;
-
- spin_unlock_irqrestore(&cohc->lock, flg);
-
- return &cohd->desc;
- err_lli_fill:
- err_dma_alloc:
- err_direction:
- spin_unlock_irqrestore(&cohc->lock, flg);
- out:
- return NULL;
-}
-
-static enum dma_status
-coh901318_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
- struct dma_tx_state *txstate)
-{
- struct coh901318_chan *cohc = to_coh901318_chan(chan);
- enum dma_status ret;
-
- ret = dma_cookie_status(chan, cookie, txstate);
- if (ret == DMA_COMPLETE || !txstate)
- return ret;
-
- dma_set_residue(txstate, coh901318_get_bytes_left(chan));
-
- if (ret == DMA_IN_PROGRESS && cohc->stopped)
- ret = DMA_PAUSED;
-
- return ret;
-}
-
-static void
-coh901318_issue_pending(struct dma_chan *chan)
-{
- struct coh901318_chan *cohc = to_coh901318_chan(chan);
- unsigned long flags;
-
- spin_lock_irqsave(&cohc->lock, flags);
-
- /*
- * Busy means that pending jobs are already being processed,
- * and then there is no point in starting the queue: the
- * terminal count interrupt on the channel will take the next
- * job on the queue and execute it anyway.
- */
- if (!cohc->busy)
- coh901318_queue_start(cohc);
-
- spin_unlock_irqrestore(&cohc->lock, flags);
-}
-
-/*
- * Here we wrap in the runtime dma control interface
- */
-struct burst_table {
- int burst_8bit;
- int burst_16bit;
- int burst_32bit;
- u32 reg;
-};
-
-static const struct burst_table burst_sizes[] = {
- {
- .burst_8bit = 64,
- .burst_16bit = 32,
- .burst_32bit = 16,
- .reg = COH901318_CX_CTRL_BURST_COUNT_64_BYTES,
- },
- {
- .burst_8bit = 48,
- .burst_16bit = 24,
- .burst_32bit = 12,
- .reg = COH901318_CX_CTRL_BURST_COUNT_48_BYTES,
- },
- {
- .burst_8bit = 32,
- .burst_16bit = 16,
- .burst_32bit = 8,
- .reg = COH901318_CX_CTRL_BURST_COUNT_32_BYTES,
- },
- {
- .burst_8bit = 16,
- .burst_16bit = 8,
- .burst_32bit = 4,
- .reg = COH901318_CX_CTRL_BURST_COUNT_16_BYTES,
- },
- {
- .burst_8bit = 8,
- .burst_16bit = 4,
- .burst_32bit = 2,
- .reg = COH901318_CX_CTRL_BURST_COUNT_8_BYTES,
- },
- {
- .burst_8bit = 4,
- .burst_16bit = 2,
- .burst_32bit = 1,
- .reg = COH901318_CX_CTRL_BURST_COUNT_4_BYTES,
- },
- {
- .burst_8bit = 2,
- .burst_16bit = 1,
- .burst_32bit = 0,
- .reg = COH901318_CX_CTRL_BURST_COUNT_2_BYTES,
- },
- {
- .burst_8bit = 1,
- .burst_16bit = 0,
- .burst_32bit = 0,
- .reg = COH901318_CX_CTRL_BURST_COUNT_1_BYTE,
- },
-};
-
-static int coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
- struct dma_slave_config *config,
- enum dma_transfer_direction direction)
-{
- struct coh901318_chan *cohc = to_coh901318_chan(chan);
- dma_addr_t addr;
- enum dma_slave_buswidth addr_width;
- u32 maxburst;
- u32 ctrl = 0;
- int i = 0;
-
- /* We only support mem to per or per to mem transfers */
- if (direction == DMA_DEV_TO_MEM) {
- addr = config->src_addr;
- addr_width = config->src_addr_width;
- maxburst = config->src_maxburst;
- } else if (direction == DMA_MEM_TO_DEV) {
- addr = config->dst_addr;
- addr_width = config->dst_addr_width;
- maxburst = config->dst_maxburst;
- } else {
- dev_err(COHC_2_DEV(cohc), "illegal channel mode\n");
- return -EINVAL;
- }
-
- dev_dbg(COHC_2_DEV(cohc), "configure channel for %d byte transfers\n",
- addr_width);
- switch (addr_width) {
- case DMA_SLAVE_BUSWIDTH_1_BYTE:
- ctrl |=
- COH901318_CX_CTRL_SRC_BUS_SIZE_8_BITS |
- COH901318_CX_CTRL_DST_BUS_SIZE_8_BITS;
-
- while (i < ARRAY_SIZE(burst_sizes)) {
- if (burst_sizes[i].burst_8bit <= maxburst)
- break;
- i++;
- }
-
- break;
- case DMA_SLAVE_BUSWIDTH_2_BYTES:
- ctrl |=
- COH901318_CX_CTRL_SRC_BUS_SIZE_16_BITS |
- COH901318_CX_CTRL_DST_BUS_SIZE_16_BITS;
-
- while (i < ARRAY_SIZE(burst_sizes)) {
- if (burst_sizes[i].burst_16bit <= maxburst)
- break;
- i++;
- }
-
- break;
- case DMA_SLAVE_BUSWIDTH_4_BYTES:
- /* Direction doesn't matter here, it's 32/32 bits */
- ctrl |=
- COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
- COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS;
-
- while (i < ARRAY_SIZE(burst_sizes)) {
- if (burst_sizes[i].burst_32bit <= maxburst)
- break;
- i++;
- }
-
- break;
- default:
- dev_err(COHC_2_DEV(cohc),
- "bad runtimeconfig: alien address width\n");
- return -EINVAL;
- }
-
- ctrl |= burst_sizes[i].reg;
- dev_dbg(COHC_2_DEV(cohc),
- "selected burst size %d bytes for address width %d bytes, maxburst %d\n",
- burst_sizes[i].burst_8bit, addr_width, maxburst);
-
- cohc->addr = addr;
- cohc->ctrl = ctrl;
-
- return 0;
-}
-
-static int coh901318_dma_slave_config(struct dma_chan *chan,
- struct dma_slave_config *config)
-{
- struct coh901318_chan *cohc = to_coh901318_chan(chan);
-
- memcpy(&cohc->config, config, sizeof(*config));
-
- return 0;
-}
-
-static void coh901318_base_init(struct dma_device *dma, const int *pick_chans,
- struct coh901318_base *base)
-{
- int chans_i;
- int i = 0;
- struct coh901318_chan *cohc;
-
- INIT_LIST_HEAD(&dma->channels);
-
- for (chans_i = 0; pick_chans[chans_i] != -1; chans_i += 2) {
- for (i = pick_chans[chans_i]; i <= pick_chans[chans_i+1]; i++) {
- cohc = &base->chans[i];
-
- cohc->base = base;
- cohc->chan.device = dma;
- cohc->id = i;
-
- /* TODO: do we really need this lock if only one
- * client is connected to each channel?
- */
-
- spin_lock_init(&cohc->lock);
-
- cohc->nbr_active_done = 0;
- cohc->busy = 0;
- INIT_LIST_HEAD(&cohc->free);
- INIT_LIST_HEAD(&cohc->active);
- INIT_LIST_HEAD(&cohc->queue);
-
- tasklet_setup(&cohc->tasklet, dma_tasklet);
-
- list_add_tail(&cohc->chan.device_node,
- &dma->channels);
- }
- }
-}
-
-static int __init coh901318_probe(struct platform_device *pdev)
-{
- int err = 0;
- struct coh901318_base *base;
- int irq;
- struct resource *io;
-
- io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!io)
- return -ENODEV;
-
- /* Map DMA controller registers to virtual memory */
- if (devm_request_mem_region(&pdev->dev,
- io->start,
- resource_size(io),
- pdev->dev.driver->name) == NULL)
- return -ENOMEM;
-
- base = devm_kzalloc(&pdev->dev,
- ALIGN(sizeof(struct coh901318_base), 4) +
- U300_DMA_CHANNELS *
- sizeof(struct coh901318_chan),
- GFP_KERNEL);
- if (!base)
- return -ENOMEM;
-
- base->chans = ((void *)base) + ALIGN(sizeof(struct coh901318_base), 4);
-
- base->virtbase = devm_ioremap(&pdev->dev, io->start, resource_size(io));
- if (!base->virtbase)
- return -ENOMEM;
-
- base->dev = &pdev->dev;
- spin_lock_init(&base->pm.lock);
- base->pm.started_channels = 0;
-
- COH901318_DEBUGFS_ASSIGN(debugfs_dma_base, base);
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
-
- err = devm_request_irq(&pdev->dev, irq, dma_irq_handler, 0,
- "coh901318", base);
- if (err)
- return err;
-
- base->irq = irq;
-
- err = coh901318_pool_create(&base->pool, &pdev->dev,
- sizeof(struct coh901318_lli),
- 32);
- if (err)
- return err;
-
- /* init channels for device transfers */
- coh901318_base_init(&base->dma_slave, dma_slave_channels,
- base);
-
- dma_cap_zero(base->dma_slave.cap_mask);
- dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
-
- base->dma_slave.device_alloc_chan_resources = coh901318_alloc_chan_resources;
- base->dma_slave.device_free_chan_resources = coh901318_free_chan_resources;
- base->dma_slave.device_prep_slave_sg = coh901318_prep_slave_sg;
- base->dma_slave.device_tx_status = coh901318_tx_status;
- base->dma_slave.device_issue_pending = coh901318_issue_pending;
- base->dma_slave.device_config = coh901318_dma_slave_config;
- base->dma_slave.device_pause = coh901318_pause;
- base->dma_slave.device_resume = coh901318_resume;
- base->dma_slave.device_terminate_all = coh901318_terminate_all;
- base->dma_slave.dev = &pdev->dev;
-
- err = dma_async_device_register(&base->dma_slave);
-
- if (err)
- goto err_register_slave;
-
- /* init channels for memcpy */
- coh901318_base_init(&base->dma_memcpy, dma_memcpy_channels,
- base);
-
- dma_cap_zero(base->dma_memcpy.cap_mask);
- dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
-
- base->dma_memcpy.device_alloc_chan_resources = coh901318_alloc_chan_resources;
- base->dma_memcpy.device_free_chan_resources = coh901318_free_chan_resources;
- base->dma_memcpy.device_prep_dma_memcpy = coh901318_prep_memcpy;
- base->dma_memcpy.device_tx_status = coh901318_tx_status;
- base->dma_memcpy.device_issue_pending = coh901318_issue_pending;
- base->dma_memcpy.device_config = coh901318_dma_slave_config;
- base->dma_memcpy.device_pause = coh901318_pause;
- base->dma_memcpy.device_resume = coh901318_resume;
- base->dma_memcpy.device_terminate_all = coh901318_terminate_all;
- base->dma_memcpy.dev = &pdev->dev;
- /*
- * This controller can only access address at even 32bit boundaries,
- * i.e. 2^2
- */
- base->dma_memcpy.copy_align = DMAENGINE_ALIGN_4_BYTES;
- err = dma_async_device_register(&base->dma_memcpy);
-
- if (err)
- goto err_register_memcpy;
-
- err = of_dma_controller_register(pdev->dev.of_node, coh901318_xlate,
- base);
- if (err)
- goto err_register_of_dma;
-
- platform_set_drvdata(pdev, base);
- dev_info(&pdev->dev, "Initialized COH901318 DMA on virtual base 0x%p\n",
- base->virtbase);
-
- return err;
-
- err_register_of_dma:
- dma_async_device_unregister(&base->dma_memcpy);
- err_register_memcpy:
- dma_async_device_unregister(&base->dma_slave);
- err_register_slave:
- coh901318_pool_destroy(&base->pool);
- return err;
-}
-static void coh901318_base_remove(struct coh901318_base *base, const int *pick_chans)
-{
- int chans_i;
- int i = 0;
- struct coh901318_chan *cohc;
-
- for (chans_i = 0; pick_chans[chans_i] != -1; chans_i += 2) {
- for (i = pick_chans[chans_i]; i <= pick_chans[chans_i+1]; i++) {
- cohc = &base->chans[i];
-
- tasklet_kill(&cohc->tasklet);
- }
- }
-
-}
-
-static int coh901318_remove(struct platform_device *pdev)
-{
- struct coh901318_base *base = platform_get_drvdata(pdev);
-
- devm_free_irq(&pdev->dev, base->irq, base);
-
- coh901318_base_remove(base, dma_slave_channels);
- coh901318_base_remove(base, dma_memcpy_channels);
-
- of_dma_controller_free(pdev->dev.of_node);
- dma_async_device_unregister(&base->dma_memcpy);
- dma_async_device_unregister(&base->dma_slave);
- coh901318_pool_destroy(&base->pool);
- return 0;
-}
-
-static const struct of_device_id coh901318_dt_match[] = {
- { .compatible = "stericsson,coh901318" },
- {},
-};
-
-static struct platform_driver coh901318_driver = {
- .remove = coh901318_remove,
- .driver = {
- .name = "coh901318",
- .of_match_table = coh901318_dt_match,
- },
-};
-
-static int __init coh901318_init(void)
-{
- return platform_driver_probe(&coh901318_driver, coh901318_probe);
-}
-subsys_initcall(coh901318_init);
-
-static void __exit coh901318_exit(void)
-{
- platform_driver_unregister(&coh901318_driver);
-}
-module_exit(coh901318_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Per Friden");
diff --git a/drivers/dma/coh901318.h b/drivers/dma/coh901318.h
deleted file mode 100644
index bbf533600558..000000000000
--- a/drivers/dma/coh901318.h
+++ /dev/null
@@ -1,141 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2007-2013 ST-Ericsson
- * DMA driver for COH 901 318
- * Author: Per Friden <per.friden@stericsson.com>
- */
-
-#ifndef COH901318_H
-#define COH901318_H
-
-#define MAX_DMA_PACKET_SIZE_SHIFT 11
-#define MAX_DMA_PACKET_SIZE (1 << MAX_DMA_PACKET_SIZE_SHIFT)
-
-struct device;
-
-struct coh901318_pool {
- spinlock_t lock;
- struct dma_pool *dmapool;
- struct device *dev;
-
-#ifdef CONFIG_DEBUG_FS
- int debugfs_pool_counter;
-#endif
-};
-
-/**
- * struct coh901318_lli - linked list item for DMAC
- * @control: control settings for DMAC
- * @src_addr: transfer source address
- * @dst_addr: transfer destination address
- * @link_addr: physical address to next lli
- * @virt_link_addr: virtual address of next lli (only used by pool_free)
- * @phy_this: physical address of current lli (only used by pool_free)
- */
-struct coh901318_lli {
- u32 control;
- dma_addr_t src_addr;
- dma_addr_t dst_addr;
- dma_addr_t link_addr;
-
- void *virt_link_addr;
- dma_addr_t phy_this;
-};
-
-/**
- * coh901318_pool_create() - Creates an dma pool for lli:s
- * @pool: pool handle
- * @dev: dma device
- * @lli_nbr: number of lli:s in the pool
- * @algin: address alignemtn of lli:s
- * returns 0 on success otherwise none zero
- */
-int coh901318_pool_create(struct coh901318_pool *pool,
- struct device *dev,
- size_t lli_nbr, size_t align);
-
-/**
- * coh901318_pool_destroy() - Destroys the dma pool
- * @pool: pool handle
- * returns 0 on success otherwise none zero
- */
-int coh901318_pool_destroy(struct coh901318_pool *pool);
-
-/**
- * coh901318_lli_alloc() - Allocates a linked list
- *
- * @pool: pool handle
- * @len: length to list
- * return: none NULL if success otherwise NULL
- */
-struct coh901318_lli *
-coh901318_lli_alloc(struct coh901318_pool *pool,
- unsigned int len);
-
-/**
- * coh901318_lli_free() - Returns the linked list items to the pool
- * @pool: pool handle
- * @lli: reference to lli pointer to be freed
- */
-void coh901318_lli_free(struct coh901318_pool *pool,
- struct coh901318_lli **lli);
-
-/**
- * coh901318_lli_fill_memcpy() - Prepares the lli:s for dma memcpy
- * @pool: pool handle
- * @lli: allocated lli
- * @src: src address
- * @size: transfer size
- * @dst: destination address
- * @ctrl_chained: ctrl for chained lli
- * @ctrl_last: ctrl for the last lli
- * returns number of CPU interrupts for the lli, negative on error.
- */
-int
-coh901318_lli_fill_memcpy(struct coh901318_pool *pool,
- struct coh901318_lli *lli,
- dma_addr_t src, unsigned int size,
- dma_addr_t dst, u32 ctrl_chained, u32 ctrl_last);
-
-/**
- * coh901318_lli_fill_single() - Prepares the lli:s for dma single transfer
- * @pool: pool handle
- * @lli: allocated lli
- * @buf: transfer buffer
- * @size: transfer size
- * @dev_addr: address of periphal
- * @ctrl_chained: ctrl for chained lli
- * @ctrl_last: ctrl for the last lli
- * @dir: direction of transfer (to or from device)
- * returns number of CPU interrupts for the lli, negative on error.
- */
-int
-coh901318_lli_fill_single(struct coh901318_pool *pool,
- struct coh901318_lli *lli,
- dma_addr_t buf, unsigned int size,
- dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl_last,
- enum dma_transfer_direction dir);
-
-/**
- * coh901318_lli_fill_single() - Prepares the lli:s for dma scatter list transfer
- * @pool: pool handle
- * @lli: allocated lli
- * @sg: scatter gather list
- * @nents: number of entries in sg
- * @dev_addr: address of periphal
- * @ctrl_chained: ctrl for chained lli
- * @ctrl: ctrl of middle lli
- * @ctrl_last: ctrl for the last lli
- * @dir: direction of transfer (to or from device)
- * @ctrl_irq_mask: ctrl mask for CPU interrupt
- * returns number of CPU interrupts for the lli, negative on error.
- */
-int
-coh901318_lli_fill_sg(struct coh901318_pool *pool,
- struct coh901318_lli *lli,
- struct scatterlist *sg, unsigned int nents,
- dma_addr_t dev_addr, u32 ctrl_chained,
- u32 ctrl, u32 ctrl_last,
- enum dma_transfer_direction dir, u32 ctrl_irq_mask);
-
-#endif /* COH901318_H */
diff --git a/drivers/dma/coh901318_lli.c b/drivers/dma/coh901318_lli.c
deleted file mode 100644
index 6b6c2fd0865a..000000000000
--- a/drivers/dma/coh901318_lli.c
+++ /dev/null
@@ -1,313 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * driver/dma/coh901318_lli.c
- *
- * Copyright (C) 2007-2009 ST-Ericsson
- * Support functions for handling lli for dma
- * Author: Per Friden <per.friden@stericsson.com>
- */
-
-#include <linux/spinlock.h>
-#include <linux/memory.h>
-#include <linux/gfp.h>
-#include <linux/dmapool.h>
-#include <linux/dmaengine.h>
-
-#include "coh901318.h"
-
-#if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_U300_DEBUG))
-#define DEBUGFS_POOL_COUNTER_RESET(pool) (pool->debugfs_pool_counter = 0)
-#define DEBUGFS_POOL_COUNTER_ADD(pool, add) (pool->debugfs_pool_counter += add)
-#else
-#define DEBUGFS_POOL_COUNTER_RESET(pool)
-#define DEBUGFS_POOL_COUNTER_ADD(pool, add)
-#endif
-
-static struct coh901318_lli *
-coh901318_lli_next(struct coh901318_lli *data)
-{
- if (data == NULL || data->link_addr == 0)
- return NULL;
-
- return (struct coh901318_lli *) data->virt_link_addr;
-}
-
-int coh901318_pool_create(struct coh901318_pool *pool,
- struct device *dev,
- size_t size, size_t align)
-{
- spin_lock_init(&pool->lock);
- pool->dev = dev;
- pool->dmapool = dma_pool_create("lli_pool", dev, size, align, 0);
-
- DEBUGFS_POOL_COUNTER_RESET(pool);
- return 0;
-}
-
-int coh901318_pool_destroy(struct coh901318_pool *pool)
-{
-
- dma_pool_destroy(pool->dmapool);
- return 0;
-}
-
-struct coh901318_lli *
-coh901318_lli_alloc(struct coh901318_pool *pool, unsigned int len)
-{
- int i;
- struct coh901318_lli *head;
- struct coh901318_lli *lli;
- struct coh901318_lli *lli_prev;
- dma_addr_t phy;
-
- if (len == 0)
- return NULL;
-
- spin_lock(&pool->lock);
-
- head = dma_pool_alloc(pool->dmapool, GFP_NOWAIT, &phy);
-
- if (head == NULL)
- goto err;
-
- DEBUGFS_POOL_COUNTER_ADD(pool, 1);
-
- lli = head;
- lli->phy_this = phy;
- lli->link_addr = 0x00000000;
- lli->virt_link_addr = NULL;
-
- for (i = 1; i < len; i++) {
- lli_prev = lli;
-
- lli = dma_pool_alloc(pool->dmapool, GFP_NOWAIT, &phy);
-
- if (lli == NULL)
- goto err_clean_up;
-
- DEBUGFS_POOL_COUNTER_ADD(pool, 1);
- lli->phy_this = phy;
- lli->link_addr = 0x00000000;
- lli->virt_link_addr = NULL;
-
- lli_prev->link_addr = phy;
- lli_prev->virt_link_addr = lli;
- }
-
- spin_unlock(&pool->lock);
-
- return head;
-
- err:
- spin_unlock(&pool->lock);
- return NULL;
-
- err_clean_up:
- lli_prev->link_addr = 0x00000000U;
- spin_unlock(&pool->lock);
- coh901318_lli_free(pool, &head);
- return NULL;
-}
-
-void coh901318_lli_free(struct coh901318_pool *pool,
- struct coh901318_lli **lli)
-{
- struct coh901318_lli *l;
- struct coh901318_lli *next;
-
- if (lli == NULL)
- return;
-
- l = *lli;
-
- if (l == NULL)
- return;
-
- spin_lock(&pool->lock);
-
- while (l->link_addr) {
- next = l->virt_link_addr;
- dma_pool_free(pool->dmapool, l, l->phy_this);
- DEBUGFS_POOL_COUNTER_ADD(pool, -1);
- l = next;
- }
- dma_pool_free(pool->dmapool, l, l->phy_this);
- DEBUGFS_POOL_COUNTER_ADD(pool, -1);
-
- spin_unlock(&pool->lock);
- *lli = NULL;
-}
-
-int
-coh901318_lli_fill_memcpy(struct coh901318_pool *pool,
- struct coh901318_lli *lli,
- dma_addr_t source, unsigned int size,
- dma_addr_t destination, u32 ctrl_chained,
- u32 ctrl_eom)
-{
- int s = size;
- dma_addr_t src = source;
- dma_addr_t dst = destination;
-
- lli->src_addr = src;
- lli->dst_addr = dst;
-
- while (lli->link_addr) {
- lli->control = ctrl_chained | MAX_DMA_PACKET_SIZE;
- lli->src_addr = src;
- lli->dst_addr = dst;
-
- s -= MAX_DMA_PACKET_SIZE;
- lli = coh901318_lli_next(lli);
-
- src += MAX_DMA_PACKET_SIZE;
- dst += MAX_DMA_PACKET_SIZE;
- }
-
- lli->control = ctrl_eom | s;
- lli->src_addr = src;
- lli->dst_addr = dst;
-
- return 0;
-}
-
-int
-coh901318_lli_fill_single(struct coh901318_pool *pool,
- struct coh901318_lli *lli,
- dma_addr_t buf, unsigned int size,
- dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl_eom,
- enum dma_transfer_direction dir)
-{
- int s = size;
- dma_addr_t src;
- dma_addr_t dst;
-
-
- if (dir == DMA_MEM_TO_DEV) {
- src = buf;
- dst = dev_addr;
-
- } else if (dir == DMA_DEV_TO_MEM) {
-
- src = dev_addr;
- dst = buf;
- } else {
- return -EINVAL;
- }
-
- while (lli->link_addr) {
- size_t block_size = MAX_DMA_PACKET_SIZE;
- lli->control = ctrl_chained | MAX_DMA_PACKET_SIZE;
-
- /* If we are on the next-to-final block and there will
- * be less than half a DMA packet left for the last
- * block, then we want to make this block a little
- * smaller to balance the sizes. This is meant to
- * avoid too small transfers if the buffer size is
- * (MAX_DMA_PACKET_SIZE*N + 1) */
- if (s < (MAX_DMA_PACKET_SIZE + MAX_DMA_PACKET_SIZE/2))
- block_size = MAX_DMA_PACKET_SIZE/2;
-
- s -= block_size;
- lli->src_addr = src;
- lli->dst_addr = dst;
-
- lli = coh901318_lli_next(lli);
-
- if (dir == DMA_MEM_TO_DEV)
- src += block_size;
- else if (dir == DMA_DEV_TO_MEM)
- dst += block_size;
- }
-
- lli->control = ctrl_eom | s;
- lli->src_addr = src;
- lli->dst_addr = dst;
-
- return 0;
-}
-
-int
-coh901318_lli_fill_sg(struct coh901318_pool *pool,
- struct coh901318_lli *lli,
- struct scatterlist *sgl, unsigned int nents,
- dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl,
- u32 ctrl_last,
- enum dma_transfer_direction dir, u32 ctrl_irq_mask)
-{
- int i;
- struct scatterlist *sg;
- u32 ctrl_sg;
- dma_addr_t src = 0;
- dma_addr_t dst = 0;
- u32 bytes_to_transfer;
- u32 elem_size;
-
- if (lli == NULL)
- goto err;
-
- spin_lock(&pool->lock);
-
- if (dir == DMA_MEM_TO_DEV)
- dst = dev_addr;
- else if (dir == DMA_DEV_TO_MEM)
- src = dev_addr;
- else
- goto err;
-
- for_each_sg(sgl, sg, nents, i) {
- if (sg_is_chain(sg)) {
- /* sg continues to the next sg-element don't
- * send ctrl_finish until the last
- * sg-element in the chain
- */
- ctrl_sg = ctrl_chained;
- } else if (i == nents - 1)
- ctrl_sg = ctrl_last;
- else
- ctrl_sg = ctrl ? ctrl : ctrl_last;
-
-
- if (dir == DMA_MEM_TO_DEV)
- /* increment source address */
- src = sg_dma_address(sg);
- else
- /* increment destination address */
- dst = sg_dma_address(sg);
-
- bytes_to_transfer = sg_dma_len(sg);
-
- while (bytes_to_transfer) {
- u32 val;
-
- if (bytes_to_transfer > MAX_DMA_PACKET_SIZE) {
- elem_size = MAX_DMA_PACKET_SIZE;
- val = ctrl_chained;
- } else {
- elem_size = bytes_to_transfer;
- val = ctrl_sg;
- }
-
- lli->control = val | elem_size;
- lli->src_addr = src;
- lli->dst_addr = dst;
-
- if (dir == DMA_DEV_TO_MEM)
- dst += elem_size;
- else
- src += elem_size;
-
- BUG_ON(lli->link_addr & 3);
-
- bytes_to_transfer -= elem_size;
- lli = coh901318_lli_next(lli);
- }
-
- }
- spin_unlock(&pool->lock);
-
- return 0;
- err:
- spin_unlock(&pool->lock);
- return -EINVAL;
-}
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
index 612d353648cf..ebee94dbd630 100644
--- a/drivers/dma/dma-jz4780.c
+++ b/drivers/dma/dma-jz4780.c
@@ -1004,6 +1004,18 @@ static const struct jz4780_dma_soc_data jz4725b_dma_soc_data = {
JZ_SOC_DATA_BREAK_LINKS,
};
+static const struct jz4780_dma_soc_data jz4760_dma_soc_data = {
+ .nb_channels = 5,
+ .transfer_ord_max = 6,
+ .flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC,
+};
+
+static const struct jz4780_dma_soc_data jz4760b_dma_soc_data = {
+ .nb_channels = 5,
+ .transfer_ord_max = 6,
+ .flags = JZ_SOC_DATA_PER_CHAN_PM,
+};
+
static const struct jz4780_dma_soc_data jz4770_dma_soc_data = {
.nb_channels = 6,
.transfer_ord_max = 6,
@@ -1031,6 +1043,8 @@ static const struct jz4780_dma_soc_data x1830_dma_soc_data = {
static const struct of_device_id jz4780_dma_dt_match[] = {
{ .compatible = "ingenic,jz4740-dma", .data = &jz4740_dma_soc_data },
{ .compatible = "ingenic,jz4725b-dma", .data = &jz4725b_dma_soc_data },
+ { .compatible = "ingenic,jz4760-dma", .data = &jz4760_dma_soc_data },
+ { .compatible = "ingenic,jz4760b-dma", .data = &jz4760b_dma_soc_data },
{ .compatible = "ingenic,jz4770-dma", .data = &jz4770_dma_soc_data },
{ .compatible = "ingenic,jz4780-dma", .data = &jz4780_dma_soc_data },
{ .compatible = "ingenic,x1000-dma", .data = &x1000_dma_soc_data },
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 962cbb5e5f7f..fe6a460c4373 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -1110,7 +1110,6 @@ static void __dma_async_device_channel_unregister(struct dma_device *device,
"%s called while %d clients hold a reference\n",
__func__, chan->client_count);
mutex_lock(&dma_list_mutex);
- list_del(&chan->device_node);
device->chancnt--;
chan->dev->chan = NULL;
mutex_unlock(&dma_list_mutex);
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
index e164f3295f5d..d9e4ac3edb4e 100644
--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
+++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
@@ -12,15 +12,20 @@
#include <linux/device.h>
#include <linux/dmaengine.h>
#include <linux/dmapool.h>
+#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_dma.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/property.h>
+#include <linux/slab.h>
#include <linux/types.h>
#include "dw-axi-dmac.h"
@@ -195,43 +200,56 @@ static inline const char *axi_chan_name(struct axi_dma_chan *chan)
return dma_chan_name(&chan->vc.chan);
}
-static struct axi_dma_desc *axi_desc_get(struct axi_dma_chan *chan)
+static struct axi_dma_desc *axi_desc_alloc(u32 num)
{
- struct dw_axi_dma *dw = chan->chip->dw;
struct axi_dma_desc *desc;
+
+ desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
+ if (!desc)
+ return NULL;
+
+ desc->hw_desc = kcalloc(num, sizeof(*desc->hw_desc), GFP_NOWAIT);
+ if (!desc->hw_desc) {
+ kfree(desc);
+ return NULL;
+ }
+
+ return desc;
+}
+
+static struct axi_dma_lli *axi_desc_get(struct axi_dma_chan *chan,
+ dma_addr_t *addr)
+{
+ struct axi_dma_lli *lli;
dma_addr_t phys;
- desc = dma_pool_zalloc(dw->desc_pool, GFP_NOWAIT, &phys);
- if (unlikely(!desc)) {
+ lli = dma_pool_zalloc(chan->desc_pool, GFP_NOWAIT, &phys);
+ if (unlikely(!lli)) {
dev_err(chan2dev(chan), "%s: not enough descriptors available\n",
axi_chan_name(chan));
return NULL;
}
atomic_inc(&chan->descs_allocated);
- INIT_LIST_HEAD(&desc->xfer_list);
- desc->vd.tx.phys = phys;
- desc->chan = chan;
+ *addr = phys;
- return desc;
+ return lli;
}
static void axi_desc_put(struct axi_dma_desc *desc)
{
struct axi_dma_chan *chan = desc->chan;
- struct dw_axi_dma *dw = chan->chip->dw;
- struct axi_dma_desc *child, *_next;
- unsigned int descs_put = 0;
+ int count = atomic_read(&chan->descs_allocated);
+ struct axi_dma_hw_desc *hw_desc;
+ int descs_put;
- list_for_each_entry_safe(child, _next, &desc->xfer_list, xfer_list) {
- list_del(&child->xfer_list);
- dma_pool_free(dw->desc_pool, child, child->vd.tx.phys);
- descs_put++;
+ for (descs_put = 0; descs_put < count; descs_put++) {
+ hw_desc = &desc->hw_desc[descs_put];
+ dma_pool_free(chan->desc_pool, hw_desc->lli, hw_desc->llp);
}
- dma_pool_free(dw->desc_pool, desc, desc->vd.tx.phys);
- descs_put++;
-
+ kfree(desc->hw_desc);
+ kfree(desc);
atomic_sub(descs_put, &chan->descs_allocated);
dev_vdbg(chan2dev(chan), "%s: %d descs put, %d still allocated\n",
axi_chan_name(chan), descs_put,
@@ -248,19 +266,41 @@ dma_chan_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
struct dma_tx_state *txstate)
{
struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
- enum dma_status ret;
+ struct virt_dma_desc *vdesc;
+ enum dma_status status;
+ u32 completed_length;
+ unsigned long flags;
+ u32 completed_blocks;
+ size_t bytes = 0;
+ u32 length;
+ u32 len;
- ret = dma_cookie_status(dchan, cookie, txstate);
+ status = dma_cookie_status(dchan, cookie, txstate);
+ if (status == DMA_COMPLETE || !txstate)
+ return status;
- if (chan->is_paused && ret == DMA_IN_PROGRESS)
- ret = DMA_PAUSED;
+ spin_lock_irqsave(&chan->vc.lock, flags);
- return ret;
+ vdesc = vchan_find_desc(&chan->vc, cookie);
+ if (vdesc) {
+ length = vd_to_axi_desc(vdesc)->length;
+ completed_blocks = vd_to_axi_desc(vdesc)->completed_blocks;
+ len = vd_to_axi_desc(vdesc)->hw_desc[0].len;
+ completed_length = completed_blocks * len;
+ bytes = length - completed_length;
+ } else {
+ bytes = vd_to_axi_desc(vdesc)->length;
+ }
+
+ spin_unlock_irqrestore(&chan->vc.lock, flags);
+ dma_set_residue(txstate, bytes);
+
+ return status;
}
-static void write_desc_llp(struct axi_dma_desc *desc, dma_addr_t adr)
+static void write_desc_llp(struct axi_dma_hw_desc *desc, dma_addr_t adr)
{
- desc->lli.llp = cpu_to_le64(adr);
+ desc->lli->llp = cpu_to_le64(adr);
}
static void write_chan_llp(struct axi_dma_chan *chan, dma_addr_t adr)
@@ -268,6 +308,29 @@ static void write_chan_llp(struct axi_dma_chan *chan, dma_addr_t adr)
axi_chan_iowrite64(chan, CH_LLP, adr);
}
+static void dw_axi_dma_set_byte_halfword(struct axi_dma_chan *chan, bool set)
+{
+ u32 offset = DMAC_APB_BYTE_WR_CH_EN;
+ u32 reg_width, val;
+
+ if (!chan->chip->apb_regs) {
+ dev_dbg(chan->chip->dev, "apb_regs not initialized\n");
+ return;
+ }
+
+ reg_width = __ffs(chan->config.dst_addr_width);
+ if (reg_width == DWAXIDMAC_TRANS_WIDTH_16)
+ offset = DMAC_APB_HALFWORD_WR_CH_EN;
+
+ val = ioread32(chan->chip->apb_regs + offset);
+
+ if (set)
+ val |= BIT(chan->id);
+ else
+ val &= ~BIT(chan->id);
+
+ iowrite32(val, chan->chip->apb_regs + offset);
+}
/* Called in chan locked context */
static void axi_chan_block_xfer_start(struct axi_dma_chan *chan,
struct axi_dma_desc *first)
@@ -293,9 +356,26 @@ static void axi_chan_block_xfer_start(struct axi_dma_chan *chan,
priority << CH_CFG_H_PRIORITY_POS |
DWAXIDMAC_HS_SEL_HW << CH_CFG_H_HS_SEL_DST_POS |
DWAXIDMAC_HS_SEL_HW << CH_CFG_H_HS_SEL_SRC_POS);
+ switch (chan->direction) {
+ case DMA_MEM_TO_DEV:
+ dw_axi_dma_set_byte_halfword(chan, true);
+ reg |= (chan->config.device_fc ?
+ DWAXIDMAC_TT_FC_MEM_TO_PER_DST :
+ DWAXIDMAC_TT_FC_MEM_TO_PER_DMAC)
+ << CH_CFG_H_TT_FC_POS;
+ break;
+ case DMA_DEV_TO_MEM:
+ reg |= (chan->config.device_fc ?
+ DWAXIDMAC_TT_FC_PER_TO_MEM_SRC :
+ DWAXIDMAC_TT_FC_PER_TO_MEM_DMAC)
+ << CH_CFG_H_TT_FC_POS;
+ break;
+ default:
+ break;
+ }
axi_chan_iowrite32(chan, CH_CFG_H, reg);
- write_chan_llp(chan, first->vd.tx.phys | lms);
+ write_chan_llp(chan, first->hw_desc[0].llp | lms);
irq_mask = DWAXIDMAC_IRQ_DMA_TRF | DWAXIDMAC_IRQ_ALL_ERR;
axi_chan_irq_sig_set(chan, irq_mask);
@@ -333,6 +413,13 @@ static void dma_chan_issue_pending(struct dma_chan *dchan)
spin_unlock_irqrestore(&chan->vc.lock, flags);
}
+static void dw_axi_dma_synchronize(struct dma_chan *dchan)
+{
+ struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
+
+ vchan_synchronize(&chan->vc);
+}
+
static int dma_chan_alloc_chan_resources(struct dma_chan *dchan)
{
struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
@@ -344,6 +431,15 @@ static int dma_chan_alloc_chan_resources(struct dma_chan *dchan)
return -EBUSY;
}
+ /* LLI address must be aligned to a 64-byte boundary */
+ chan->desc_pool = dma_pool_create(dev_name(chan2dev(chan)),
+ chan->chip->dev,
+ sizeof(struct axi_dma_lli),
+ 64, 0);
+ if (!chan->desc_pool) {
+ dev_err(chan2dev(chan), "No memory for descriptors\n");
+ return -ENOMEM;
+ }
dev_vdbg(dchan2dev(dchan), "%s: allocating\n", axi_chan_name(chan));
pm_runtime_get(chan->chip->dev);
@@ -365,6 +461,8 @@ static void dma_chan_free_chan_resources(struct dma_chan *dchan)
vchan_free_chan_resources(&chan->vc);
+ dma_pool_destroy(chan->desc_pool);
+ chan->desc_pool = NULL;
dev_vdbg(dchan2dev(dchan),
"%s: free resources, descriptor still allocated: %u\n",
axi_chan_name(chan), atomic_read(&chan->descs_allocated));
@@ -372,73 +470,398 @@ static void dma_chan_free_chan_resources(struct dma_chan *dchan)
pm_runtime_put(chan->chip->dev);
}
+static void dw_axi_dma_set_hw_channel(struct axi_dma_chip *chip,
+ u32 handshake_num, bool set)
+{
+ unsigned long start = 0;
+ unsigned long reg_value;
+ unsigned long reg_mask;
+ unsigned long reg_set;
+ unsigned long mask;
+ unsigned long val;
+
+ if (!chip->apb_regs) {
+ dev_dbg(chip->dev, "apb_regs not initialized\n");
+ return;
+ }
+
+ /*
+ * An unused DMA channel has a default value of 0x3F.
+ * Lock the DMA channel by assign a handshake number to the channel.
+ * Unlock the DMA channel by assign 0x3F to the channel.
+ */
+ if (set) {
+ reg_set = UNUSED_CHANNEL;
+ val = handshake_num;
+ } else {
+ reg_set = handshake_num;
+ val = UNUSED_CHANNEL;
+ }
+
+ reg_value = lo_hi_readq(chip->apb_regs + DMAC_APB_HW_HS_SEL_0);
+
+ for_each_set_clump8(start, reg_mask, &reg_value, 64) {
+ if (reg_mask == reg_set) {
+ mask = GENMASK_ULL(start + 7, start);
+ reg_value &= ~mask;
+ reg_value |= rol64(val, start);
+ lo_hi_writeq(reg_value,
+ chip->apb_regs + DMAC_APB_HW_HS_SEL_0);
+ break;
+ }
+ }
+}
+
/*
* If DW_axi_dmac sees CHx_CTL.ShadowReg_Or_LLI_Last bit of the fetched LLI
* as 1, it understands that the current block is the final block in the
* transfer and completes the DMA transfer operation at the end of current
* block transfer.
*/
-static void set_desc_last(struct axi_dma_desc *desc)
+static void set_desc_last(struct axi_dma_hw_desc *desc)
{
u32 val;
- val = le32_to_cpu(desc->lli.ctl_hi);
+ val = le32_to_cpu(desc->lli->ctl_hi);
val |= CH_CTL_H_LLI_LAST;
- desc->lli.ctl_hi = cpu_to_le32(val);
+ desc->lli->ctl_hi = cpu_to_le32(val);
}
-static void write_desc_sar(struct axi_dma_desc *desc, dma_addr_t adr)
+static void write_desc_sar(struct axi_dma_hw_desc *desc, dma_addr_t adr)
{
- desc->lli.sar = cpu_to_le64(adr);
+ desc->lli->sar = cpu_to_le64(adr);
}
-static void write_desc_dar(struct axi_dma_desc *desc, dma_addr_t adr)
+static void write_desc_dar(struct axi_dma_hw_desc *desc, dma_addr_t adr)
{
- desc->lli.dar = cpu_to_le64(adr);
+ desc->lli->dar = cpu_to_le64(adr);
}
-static void set_desc_src_master(struct axi_dma_desc *desc)
+static void set_desc_src_master(struct axi_dma_hw_desc *desc)
{
u32 val;
/* Select AXI0 for source master */
- val = le32_to_cpu(desc->lli.ctl_lo);
+ val = le32_to_cpu(desc->lli->ctl_lo);
val &= ~CH_CTL_L_SRC_MAST;
- desc->lli.ctl_lo = cpu_to_le32(val);
+ desc->lli->ctl_lo = cpu_to_le32(val);
}
-static void set_desc_dest_master(struct axi_dma_desc *desc)
+static void set_desc_dest_master(struct axi_dma_hw_desc *hw_desc,
+ struct axi_dma_desc *desc)
{
u32 val;
/* Select AXI1 for source master if available */
- val = le32_to_cpu(desc->lli.ctl_lo);
+ val = le32_to_cpu(hw_desc->lli->ctl_lo);
if (desc->chan->chip->dw->hdata->nr_masters > 1)
val |= CH_CTL_L_DST_MAST;
else
val &= ~CH_CTL_L_DST_MAST;
- desc->lli.ctl_lo = cpu_to_le32(val);
+ hw_desc->lli->ctl_lo = cpu_to_le32(val);
+}
+
+static int dw_axi_dma_set_hw_desc(struct axi_dma_chan *chan,
+ struct axi_dma_hw_desc *hw_desc,
+ dma_addr_t mem_addr, size_t len)
+{
+ unsigned int data_width = BIT(chan->chip->dw->hdata->m_data_width);
+ unsigned int reg_width;
+ unsigned int mem_width;
+ dma_addr_t device_addr;
+ size_t axi_block_ts;
+ size_t block_ts;
+ u32 ctllo, ctlhi;
+ u32 burst_len;
+
+ axi_block_ts = chan->chip->dw->hdata->block_size[chan->id];
+
+ mem_width = __ffs(data_width | mem_addr | len);
+ if (mem_width > DWAXIDMAC_TRANS_WIDTH_32)
+ mem_width = DWAXIDMAC_TRANS_WIDTH_32;
+
+ if (!IS_ALIGNED(mem_addr, 4)) {
+ dev_err(chan->chip->dev, "invalid buffer alignment\n");
+ return -EINVAL;
+ }
+
+ switch (chan->direction) {
+ case DMA_MEM_TO_DEV:
+ reg_width = __ffs(chan->config.dst_addr_width);
+ device_addr = chan->config.dst_addr;
+ ctllo = reg_width << CH_CTL_L_DST_WIDTH_POS |
+ mem_width << CH_CTL_L_SRC_WIDTH_POS |
+ DWAXIDMAC_CH_CTL_L_NOINC << CH_CTL_L_DST_INC_POS |
+ DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_SRC_INC_POS;
+ block_ts = len >> mem_width;
+ break;
+ case DMA_DEV_TO_MEM:
+ reg_width = __ffs(chan->config.src_addr_width);
+ device_addr = chan->config.src_addr;
+ ctllo = reg_width << CH_CTL_L_SRC_WIDTH_POS |
+ mem_width << CH_CTL_L_DST_WIDTH_POS |
+ DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_DST_INC_POS |
+ DWAXIDMAC_CH_CTL_L_NOINC << CH_CTL_L_SRC_INC_POS;
+ block_ts = len >> reg_width;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (block_ts > axi_block_ts)
+ return -EINVAL;
+
+ hw_desc->lli = axi_desc_get(chan, &hw_desc->llp);
+ if (unlikely(!hw_desc->lli))
+ return -ENOMEM;
+
+ ctlhi = CH_CTL_H_LLI_VALID;
+
+ if (chan->chip->dw->hdata->restrict_axi_burst_len) {
+ burst_len = chan->chip->dw->hdata->axi_rw_burst_len;
+ ctlhi |= CH_CTL_H_ARLEN_EN | CH_CTL_H_AWLEN_EN |
+ burst_len << CH_CTL_H_ARLEN_POS |
+ burst_len << CH_CTL_H_AWLEN_POS;
+ }
+
+ hw_desc->lli->ctl_hi = cpu_to_le32(ctlhi);
+
+ if (chan->direction == DMA_MEM_TO_DEV) {
+ write_desc_sar(hw_desc, mem_addr);
+ write_desc_dar(hw_desc, device_addr);
+ } else {
+ write_desc_sar(hw_desc, device_addr);
+ write_desc_dar(hw_desc, mem_addr);
+ }
+
+ hw_desc->lli->block_ts_lo = cpu_to_le32(block_ts - 1);
+
+ ctllo |= DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_DST_MSIZE_POS |
+ DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_SRC_MSIZE_POS;
+ hw_desc->lli->ctl_lo = cpu_to_le32(ctllo);
+
+ set_desc_src_master(hw_desc);
+
+ hw_desc->len = len;
+ return 0;
+}
+
+static size_t calculate_block_len(struct axi_dma_chan *chan,
+ dma_addr_t dma_addr, size_t buf_len,
+ enum dma_transfer_direction direction)
+{
+ u32 data_width, reg_width, mem_width;
+ size_t axi_block_ts, block_len;
+
+ axi_block_ts = chan->chip->dw->hdata->block_size[chan->id];
+
+ switch (direction) {
+ case DMA_MEM_TO_DEV:
+ data_width = BIT(chan->chip->dw->hdata->m_data_width);
+ mem_width = __ffs(data_width | dma_addr | buf_len);
+ if (mem_width > DWAXIDMAC_TRANS_WIDTH_32)
+ mem_width = DWAXIDMAC_TRANS_WIDTH_32;
+
+ block_len = axi_block_ts << mem_width;
+ break;
+ case DMA_DEV_TO_MEM:
+ reg_width = __ffs(chan->config.src_addr_width);
+ block_len = axi_block_ts << reg_width;
+ break;
+ default:
+ block_len = 0;
+ }
+
+ return block_len;
+}
+
+static struct dma_async_tx_descriptor *
+dw_axi_dma_chan_prep_cyclic(struct dma_chan *dchan, dma_addr_t dma_addr,
+ size_t buf_len, size_t period_len,
+ enum dma_transfer_direction direction,
+ unsigned long flags)
+{
+ struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
+ struct axi_dma_hw_desc *hw_desc = NULL;
+ struct axi_dma_desc *desc = NULL;
+ dma_addr_t src_addr = dma_addr;
+ u32 num_periods, num_segments;
+ size_t axi_block_len;
+ u32 total_segments;
+ u32 segment_len;
+ unsigned int i;
+ int status;
+ u64 llp = 0;
+ u8 lms = 0; /* Select AXI0 master for LLI fetching */
+
+ num_periods = buf_len / period_len;
+
+ axi_block_len = calculate_block_len(chan, dma_addr, buf_len, direction);
+ if (axi_block_len == 0)
+ return NULL;
+
+ num_segments = DIV_ROUND_UP(period_len, axi_block_len);
+ segment_len = DIV_ROUND_UP(period_len, num_segments);
+
+ total_segments = num_periods * num_segments;
+
+ desc = axi_desc_alloc(total_segments);
+ if (unlikely(!desc))
+ goto err_desc_get;
+
+ chan->direction = direction;
+ desc->chan = chan;
+ chan->cyclic = true;
+ desc->length = 0;
+ desc->period_len = period_len;
+
+ for (i = 0; i < total_segments; i++) {
+ hw_desc = &desc->hw_desc[i];
+
+ status = dw_axi_dma_set_hw_desc(chan, hw_desc, src_addr,
+ segment_len);
+ if (status < 0)
+ goto err_desc_get;
+
+ desc->length += hw_desc->len;
+ /* Set end-of-link to the linked descriptor, so that cyclic
+ * callback function can be triggered during interrupt.
+ */
+ set_desc_last(hw_desc);
+
+ src_addr += segment_len;
+ }
+
+ llp = desc->hw_desc[0].llp;
+
+ /* Managed transfer list */
+ do {
+ hw_desc = &desc->hw_desc[--total_segments];
+ write_desc_llp(hw_desc, llp | lms);
+ llp = hw_desc->llp;
+ } while (total_segments);
+
+ dw_axi_dma_set_hw_channel(chan->chip, chan->hw_handshake_num, true);
+
+ return vchan_tx_prep(&chan->vc, &desc->vd, flags);
+
+err_desc_get:
+ if (desc)
+ axi_desc_put(desc);
+
+ return NULL;
+}
+
+static struct dma_async_tx_descriptor *
+dw_axi_dma_chan_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
+ unsigned int sg_len,
+ enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
+ struct axi_dma_hw_desc *hw_desc = NULL;
+ struct axi_dma_desc *desc = NULL;
+ u32 num_segments, segment_len;
+ unsigned int loop = 0;
+ struct scatterlist *sg;
+ size_t axi_block_len;
+ u32 len, num_sgs = 0;
+ unsigned int i;
+ dma_addr_t mem;
+ int status;
+ u64 llp = 0;
+ u8 lms = 0; /* Select AXI0 master for LLI fetching */
+
+ if (unlikely(!is_slave_direction(direction) || !sg_len))
+ return NULL;
+
+ mem = sg_dma_address(sgl);
+ len = sg_dma_len(sgl);
+
+ axi_block_len = calculate_block_len(chan, mem, len, direction);
+ if (axi_block_len == 0)
+ return NULL;
+
+ for_each_sg(sgl, sg, sg_len, i)
+ num_sgs += DIV_ROUND_UP(sg_dma_len(sg), axi_block_len);
+
+ desc = axi_desc_alloc(num_sgs);
+ if (unlikely(!desc))
+ goto err_desc_get;
+
+ desc->chan = chan;
+ desc->length = 0;
+ chan->direction = direction;
+
+ for_each_sg(sgl, sg, sg_len, i) {
+ mem = sg_dma_address(sg);
+ len = sg_dma_len(sg);
+ num_segments = DIV_ROUND_UP(sg_dma_len(sg), axi_block_len);
+ segment_len = DIV_ROUND_UP(sg_dma_len(sg), num_segments);
+
+ do {
+ hw_desc = &desc->hw_desc[loop++];
+ status = dw_axi_dma_set_hw_desc(chan, hw_desc, mem, segment_len);
+ if (status < 0)
+ goto err_desc_get;
+
+ desc->length += hw_desc->len;
+ len -= segment_len;
+ mem += segment_len;
+ } while (len >= segment_len);
+ }
+
+ /* Set end-of-link to the last link descriptor of list */
+ set_desc_last(&desc->hw_desc[num_sgs - 1]);
+
+ /* Managed transfer list */
+ do {
+ hw_desc = &desc->hw_desc[--num_sgs];
+ write_desc_llp(hw_desc, llp | lms);
+ llp = hw_desc->llp;
+ } while (num_sgs);
+
+ dw_axi_dma_set_hw_channel(chan->chip, chan->hw_handshake_num, true);
+
+ return vchan_tx_prep(&chan->vc, &desc->vd, flags);
+
+err_desc_get:
+ if (desc)
+ axi_desc_put(desc);
+
+ return NULL;
}
static struct dma_async_tx_descriptor *
dma_chan_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst_adr,
dma_addr_t src_adr, size_t len, unsigned long flags)
{
- struct axi_dma_desc *first = NULL, *desc = NULL, *prev = NULL;
struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
size_t block_ts, max_block_ts, xfer_len;
- u32 xfer_width, reg;
+ struct axi_dma_hw_desc *hw_desc = NULL;
+ struct axi_dma_desc *desc = NULL;
+ u32 xfer_width, reg, num;
+ u64 llp = 0;
u8 lms = 0; /* Select AXI0 master for LLI fetching */
dev_dbg(chan2dev(chan), "%s: memcpy: src: %pad dst: %pad length: %zd flags: %#lx",
axi_chan_name(chan), &src_adr, &dst_adr, len, flags);
max_block_ts = chan->chip->dw->hdata->block_size[chan->id];
+ xfer_width = axi_chan_get_xfer_width(chan, src_adr, dst_adr, len);
+ num = DIV_ROUND_UP(len, max_block_ts << xfer_width);
+ desc = axi_desc_alloc(num);
+ if (unlikely(!desc))
+ goto err_desc_get;
+ desc->chan = chan;
+ num = 0;
+ desc->length = 0;
while (len) {
xfer_len = len;
+ hw_desc = &desc->hw_desc[num];
/*
* Take care for the alignment.
* Actually source and destination widths can be different, but
@@ -457,13 +880,13 @@ dma_chan_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst_adr,
xfer_len = max_block_ts << xfer_width;
}
- desc = axi_desc_get(chan);
- if (unlikely(!desc))
+ hw_desc->lli = axi_desc_get(chan, &hw_desc->llp);
+ if (unlikely(!hw_desc->lli))
goto err_desc_get;
- write_desc_sar(desc, src_adr);
- write_desc_dar(desc, dst_adr);
- desc->lli.block_ts_lo = cpu_to_le32(block_ts - 1);
+ write_desc_sar(hw_desc, src_adr);
+ write_desc_dar(hw_desc, dst_adr);
+ hw_desc->lli->block_ts_lo = cpu_to_le32(block_ts - 1);
reg = CH_CTL_H_LLI_VALID;
if (chan->chip->dw->hdata->restrict_axi_burst_len) {
@@ -474,7 +897,7 @@ dma_chan_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst_adr,
CH_CTL_H_AWLEN_EN |
burst_len << CH_CTL_H_AWLEN_POS);
}
- desc->lli.ctl_hi = cpu_to_le32(reg);
+ hw_desc->lli->ctl_hi = cpu_to_le32(reg);
reg = (DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_DST_MSIZE_POS |
DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_SRC_MSIZE_POS |
@@ -482,62 +905,68 @@ dma_chan_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst_adr,
xfer_width << CH_CTL_L_SRC_WIDTH_POS |
DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_DST_INC_POS |
DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_SRC_INC_POS);
- desc->lli.ctl_lo = cpu_to_le32(reg);
+ hw_desc->lli->ctl_lo = cpu_to_le32(reg);
- set_desc_src_master(desc);
- set_desc_dest_master(desc);
-
- /* Manage transfer list (xfer_list) */
- if (!first) {
- first = desc;
- } else {
- list_add_tail(&desc->xfer_list, &first->xfer_list);
- write_desc_llp(prev, desc->vd.tx.phys | lms);
- }
- prev = desc;
+ set_desc_src_master(hw_desc);
+ set_desc_dest_master(hw_desc, desc);
+ hw_desc->len = xfer_len;
+ desc->length += hw_desc->len;
/* update the length and addresses for the next loop cycle */
len -= xfer_len;
dst_adr += xfer_len;
src_adr += xfer_len;
+ num++;
}
- /* Total len of src/dest sg == 0, so no descriptor were allocated */
- if (unlikely(!first))
- return NULL;
-
/* Set end-of-link to the last link descriptor of list */
- set_desc_last(desc);
+ set_desc_last(&desc->hw_desc[num - 1]);
+ /* Managed transfer list */
+ do {
+ hw_desc = &desc->hw_desc[--num];
+ write_desc_llp(hw_desc, llp | lms);
+ llp = hw_desc->llp;
+ } while (num);
- return vchan_tx_prep(&chan->vc, &first->vd, flags);
+ return vchan_tx_prep(&chan->vc, &desc->vd, flags);
err_desc_get:
- if (first)
- axi_desc_put(first);
+ if (desc)
+ axi_desc_put(desc);
return NULL;
}
+static int dw_axi_dma_chan_slave_config(struct dma_chan *dchan,
+ struct dma_slave_config *config)
+{
+ struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
+
+ memcpy(&chan->config, config, sizeof(*config));
+
+ return 0;
+}
+
static void axi_chan_dump_lli(struct axi_dma_chan *chan,
- struct axi_dma_desc *desc)
+ struct axi_dma_hw_desc *desc)
{
dev_err(dchan2dev(&chan->vc.chan),
"SAR: 0x%llx DAR: 0x%llx LLP: 0x%llx BTS 0x%x CTL: 0x%x:%08x",
- le64_to_cpu(desc->lli.sar),
- le64_to_cpu(desc->lli.dar),
- le64_to_cpu(desc->lli.llp),
- le32_to_cpu(desc->lli.block_ts_lo),
- le32_to_cpu(desc->lli.ctl_hi),
- le32_to_cpu(desc->lli.ctl_lo));
+ le64_to_cpu(desc->lli->sar),
+ le64_to_cpu(desc->lli->dar),
+ le64_to_cpu(desc->lli->llp),
+ le32_to_cpu(desc->lli->block_ts_lo),
+ le32_to_cpu(desc->lli->ctl_hi),
+ le32_to_cpu(desc->lli->ctl_lo));
}
static void axi_chan_list_dump_lli(struct axi_dma_chan *chan,
struct axi_dma_desc *desc_head)
{
- struct axi_dma_desc *desc;
+ int count = atomic_read(&chan->descs_allocated);
+ int i;
- axi_chan_dump_lli(chan, desc_head);
- list_for_each_entry(desc, &desc_head->xfer_list, xfer_list)
- axi_chan_dump_lli(chan, desc);
+ for (i = 0; i < count; i++)
+ axi_chan_dump_lli(chan, &desc_head->hw_desc[i]);
}
static noinline void axi_chan_handle_err(struct axi_dma_chan *chan, u32 status)
@@ -570,8 +999,13 @@ static noinline void axi_chan_handle_err(struct axi_dma_chan *chan, u32 status)
static void axi_chan_block_xfer_complete(struct axi_dma_chan *chan)
{
+ int count = atomic_read(&chan->descs_allocated);
+ struct axi_dma_hw_desc *hw_desc;
+ struct axi_dma_desc *desc;
struct virt_dma_desc *vd;
unsigned long flags;
+ u64 llp;
+ int i;
spin_lock_irqsave(&chan->vc.lock, flags);
if (unlikely(axi_chan_is_hw_enable(chan))) {
@@ -582,12 +1016,34 @@ static void axi_chan_block_xfer_complete(struct axi_dma_chan *chan)
/* The completed descriptor currently is in the head of vc list */
vd = vchan_next_desc(&chan->vc);
- /* Remove the completed descriptor from issued list before completing */
- list_del(&vd->node);
- vchan_cookie_complete(vd);
- /* Submit queued descriptors after processing the completed ones */
- axi_chan_start_first_queued(chan);
+ if (chan->cyclic) {
+ desc = vd_to_axi_desc(vd);
+ if (desc) {
+ llp = lo_hi_readq(chan->chan_regs + CH_LLP);
+ for (i = 0; i < count; i++) {
+ hw_desc = &desc->hw_desc[i];
+ if (hw_desc->llp == llp) {
+ axi_chan_irq_clear(chan, hw_desc->lli->status_lo);
+ hw_desc->lli->ctl_hi |= CH_CTL_H_LLI_VALID;
+ desc->completed_blocks = i;
+
+ if (((hw_desc->len * (i + 1)) % desc->period_len) == 0)
+ vchan_cyclic_callback(vd);
+ break;
+ }
+ }
+
+ axi_chan_enable(chan);
+ }
+ } else {
+ /* Remove the completed descriptor from issued list before completing */
+ list_del(&vd->node);
+ vchan_cookie_complete(vd);
+
+ /* Submit queued descriptors after processing the completed ones */
+ axi_chan_start_first_queued(chan);
+ }
spin_unlock_irqrestore(&chan->vc.lock, flags);
}
@@ -627,15 +1083,31 @@ static irqreturn_t dw_axi_dma_interrupt(int irq, void *dev_id)
static int dma_chan_terminate_all(struct dma_chan *dchan)
{
struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
+ u32 chan_active = BIT(chan->id) << DMAC_CHAN_EN_SHIFT;
unsigned long flags;
+ u32 val;
+ int ret;
LIST_HEAD(head);
- spin_lock_irqsave(&chan->vc.lock, flags);
-
axi_chan_disable(chan);
+ ret = readl_poll_timeout_atomic(chan->chip->regs + DMAC_CHEN, val,
+ !(val & chan_active), 1000, 10000);
+ if (ret == -ETIMEDOUT)
+ dev_warn(dchan2dev(dchan),
+ "%s failed to stop\n", axi_chan_name(chan));
+
+ if (chan->direction != DMA_MEM_TO_MEM)
+ dw_axi_dma_set_hw_channel(chan->chip,
+ chan->hw_handshake_num, false);
+ if (chan->direction == DMA_MEM_TO_DEV)
+ dw_axi_dma_set_byte_halfword(chan, false);
+
+ spin_lock_irqsave(&chan->vc.lock, flags);
+
vchan_get_all_descriptors(&chan->vc, &head);
+ chan->cyclic = false;
spin_unlock_irqrestore(&chan->vc.lock, flags);
vchan_dma_desc_free_list(&chan->vc, &head);
@@ -746,6 +1218,22 @@ static int __maybe_unused axi_dma_runtime_resume(struct device *dev)
return axi_dma_resume(chip);
}
+static struct dma_chan *dw_axi_dma_of_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct dw_axi_dma *dw = ofdma->of_dma_data;
+ struct axi_dma_chan *chan;
+ struct dma_chan *dchan;
+
+ dchan = dma_get_any_slave_channel(&dw->dma);
+ if (!dchan)
+ return NULL;
+
+ chan = dchan_to_axi_dma_chan(dchan);
+ chan->hw_handshake_num = dma_spec->args[0];
+ return dchan;
+}
+
static int parse_device_properties(struct axi_dma_chip *chip)
{
struct device *dev = chip->dev;
@@ -816,6 +1304,7 @@ static int parse_device_properties(struct axi_dma_chip *chip)
static int dw_probe(struct platform_device *pdev)
{
+ struct device_node *node = pdev->dev.of_node;
struct axi_dma_chip *chip;
struct resource *mem;
struct dw_axi_dma *dw;
@@ -848,6 +1337,12 @@ static int dw_probe(struct platform_device *pdev)
if (IS_ERR(chip->regs))
return PTR_ERR(chip->regs);
+ if (of_device_is_compatible(node, "intel,kmb-axi-dma")) {
+ chip->apb_regs = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(chip->apb_regs))
+ return PTR_ERR(chip->apb_regs);
+ }
+
chip->core_clk = devm_clk_get(chip->dev, "core-clk");
if (IS_ERR(chip->core_clk))
return PTR_ERR(chip->core_clk);
@@ -870,13 +1365,6 @@ static int dw_probe(struct platform_device *pdev)
if (ret)
return ret;
- /* Lli address must be aligned to a 64-byte boundary */
- dw->desc_pool = dmam_pool_create(KBUILD_MODNAME, chip->dev,
- sizeof(struct axi_dma_desc), 64, 0);
- if (!dw->desc_pool) {
- dev_err(chip->dev, "No memory for descriptors dma pool\n");
- return -ENOMEM;
- }
INIT_LIST_HEAD(&dw->dma.channels);
for (i = 0; i < hdata->nr_channels; i++) {
@@ -893,13 +1381,16 @@ static int dw_probe(struct platform_device *pdev)
/* Set capabilities */
dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
+ dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
+ dma_cap_set(DMA_CYCLIC, dw->dma.cap_mask);
/* DMA capabilities */
dw->dma.chancnt = hdata->nr_channels;
dw->dma.src_addr_widths = AXI_DMA_BUSWIDTHS;
dw->dma.dst_addr_widths = AXI_DMA_BUSWIDTHS;
dw->dma.directions = BIT(DMA_MEM_TO_MEM);
- dw->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+ dw->dma.directions |= BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
+ dw->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
dw->dma.dev = chip->dev;
dw->dma.device_tx_status = dma_chan_tx_status;
@@ -912,7 +1403,18 @@ static int dw_probe(struct platform_device *pdev)
dw->dma.device_free_chan_resources = dma_chan_free_chan_resources;
dw->dma.device_prep_dma_memcpy = dma_chan_prep_dma_memcpy;
+ dw->dma.device_synchronize = dw_axi_dma_synchronize;
+ dw->dma.device_config = dw_axi_dma_chan_slave_config;
+ dw->dma.device_prep_slave_sg = dw_axi_dma_chan_prep_slave_sg;
+ dw->dma.device_prep_dma_cyclic = dw_axi_dma_chan_prep_cyclic;
+ /*
+ * Synopsis DesignWare AxiDMA datasheet mentioned Maximum
+ * supported blocks is 1024. Device register width is 4 bytes.
+ * Therefore, set constraint to 1024 * 4.
+ */
+ dw->dma.dev->dma_parms = &dw->dma_parms;
+ dma_set_max_seg_size(&pdev->dev, MAX_BLOCK_SIZE);
platform_set_drvdata(pdev, chip);
pm_runtime_enable(chip->dev);
@@ -935,6 +1437,13 @@ static int dw_probe(struct platform_device *pdev)
if (ret)
goto err_pm_disable;
+ /* Register with OF helpers for DMA lookups */
+ ret = of_dma_controller_register(pdev->dev.of_node,
+ dw_axi_dma_of_xlate, dw);
+ if (ret < 0)
+ dev_warn(&pdev->dev,
+ "Failed to register OF DMA controller, fallback to MEM_TO_MEM mode\n");
+
dev_info(chip->dev, "DesignWare AXI DMA Controller, %d channels\n",
dw->hdata->nr_channels);
@@ -968,6 +1477,8 @@ static int dw_remove(struct platform_device *pdev)
devm_free_irq(chip->dev, chip->irq, chip);
+ of_dma_controller_free(chip->dev->of_node);
+
list_for_each_entry_safe(chan, _chan, &dw->dma.channels,
vc.chan.device_node) {
list_del(&chan->vc.chan.device_node);
@@ -983,6 +1494,7 @@ static const struct dev_pm_ops dw_axi_dma_pm_ops = {
static const struct of_device_id dw_dma_of_id_table[] = {
{ .compatible = "snps,axi-dma-1.01a" },
+ { .compatible = "intel,kmb-axi-dma" },
{}
};
MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h
index 18b6014cf9b4..b69897887c76 100644
--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h
+++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h
@@ -37,10 +37,16 @@ struct axi_dma_chan {
struct axi_dma_chip *chip;
void __iomem *chan_regs;
u8 id;
+ u8 hw_handshake_num;
atomic_t descs_allocated;
+ struct dma_pool *desc_pool;
struct virt_dma_chan vc;
+ struct axi_dma_desc *desc;
+ struct dma_slave_config config;
+ enum dma_transfer_direction direction;
+ bool cyclic;
/* these other elements are all protected by vc.lock */
bool is_paused;
};
@@ -48,7 +54,7 @@ struct axi_dma_chan {
struct dw_axi_dma {
struct dma_device dma;
struct dw_axi_dma_hcfg *hdata;
- struct dma_pool *desc_pool;
+ struct device_dma_parameters dma_parms;
/* channels */
struct axi_dma_chan *chan;
@@ -58,6 +64,7 @@ struct axi_dma_chip {
struct device *dev;
int irq;
void __iomem *regs;
+ void __iomem *apb_regs;
struct clk *core_clk;
struct clk *cfgr_clk;
struct dw_axi_dma *dw;
@@ -80,12 +87,20 @@ struct __packed axi_dma_lli {
__le32 reserved_hi;
};
+struct axi_dma_hw_desc {
+ struct axi_dma_lli *lli;
+ dma_addr_t llp;
+ u32 len;
+};
+
struct axi_dma_desc {
- struct axi_dma_lli lli;
+ struct axi_dma_hw_desc *hw_desc;
struct virt_dma_desc vd;
struct axi_dma_chan *chan;
- struct list_head xfer_list;
+ u32 completed_blocks;
+ u32 length;
+ u32 period_len;
};
static inline struct device *dchan2dev(struct dma_chan *dchan)
@@ -157,6 +172,19 @@ static inline struct axi_dma_chan *dchan_to_axi_dma_chan(struct dma_chan *dchan)
#define CH_INTSIGNAL_ENA 0x090 /* R/W Chan Interrupt Signal Enable */
#define CH_INTCLEAR 0x098 /* W Chan Interrupt Clear */
+/* These Apb registers are used by Intel KeemBay SoC */
+#define DMAC_APB_CFG 0x000 /* DMAC Apb Configuration Register */
+#define DMAC_APB_STAT 0x004 /* DMAC Apb Status Register */
+#define DMAC_APB_DEBUG_STAT_0 0x008 /* DMAC Apb Debug Status Register 0 */
+#define DMAC_APB_DEBUG_STAT_1 0x00C /* DMAC Apb Debug Status Register 1 */
+#define DMAC_APB_HW_HS_SEL_0 0x010 /* DMAC Apb HW HS register 0 */
+#define DMAC_APB_HW_HS_SEL_1 0x014 /* DMAC Apb HW HS register 1 */
+#define DMAC_APB_LPI 0x018 /* DMAC Apb Low Power Interface Reg */
+#define DMAC_APB_BYTE_WR_CH_EN 0x01C /* DMAC Apb Byte Write Enable */
+#define DMAC_APB_HALFWORD_WR_CH_EN 0x020 /* DMAC Halfword write enables */
+
+#define UNUSED_CHANNEL 0x3F /* Set unused DMA channel to 0x3F */
+#define MAX_BLOCK_SIZE 0x1000 /* 1024 blocks * 4 bytes data width */
/* DMAC_CFG */
#define DMAC_EN_POS 0
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index 19a23767533a..7ab83fe601ed 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -982,11 +982,8 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
dev_vdbg(chan2dev(chan), "%s\n", __func__);
- pm_runtime_get_sync(dw->dma.dev);
-
/* ASSERT: channel is idle */
if (dma_readl(dw, CH_EN) & dwc->mask) {
- pm_runtime_put_sync_suspend(dw->dma.dev);
dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
return -EIO;
}
@@ -1003,7 +1000,6 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
* We need controller-specific data to set up slave transfers.
*/
if (chan->private && !dw_dma_filter(chan, chan->private)) {
- pm_runtime_put_sync_suspend(dw->dma.dev);
dev_warn(chan2dev(chan), "Wrong controller-specific data\n");
return -EINVAL;
}
@@ -1047,8 +1043,6 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
if (!dw->in_use)
do_dw_dma_off(dw);
- pm_runtime_put_sync_suspend(dw->dma.dev);
-
dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
}
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 0feb323bae1e..f8459cc5315d 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -1214,6 +1214,7 @@ static int fsldma_of_probe(struct platform_device *op)
{
struct fsldma_device *fdev;
struct device_node *child;
+ unsigned int i;
int err;
fdev = kzalloc(sizeof(*fdev), GFP_KERNEL);
@@ -1292,6 +1293,10 @@ static int fsldma_of_probe(struct platform_device *op)
return 0;
out_free_fdev:
+ for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
+ if (fdev->chan[i])
+ fsl_dma_chan_remove(fdev->chan[i]);
+ }
irq_dispose_mapping(fdev->irq);
iounmap(fdev->regs);
out_free:
@@ -1314,6 +1319,7 @@ static int fsldma_of_remove(struct platform_device *op)
if (fdev->chan[i])
fsl_dma_chan_remove(fdev->chan[i]);
}
+ irq_dispose_mapping(fdev->irq);
iounmap(fdev->regs);
kfree(fdev);
diff --git a/drivers/dma/hsu/pci.c b/drivers/dma/hsu/pci.c
index 07cc7320a614..9045a6f7f589 100644
--- a/drivers/dma/hsu/pci.c
+++ b/drivers/dma/hsu/pci.c
@@ -26,22 +26,12 @@
static irqreturn_t hsu_pci_irq(int irq, void *dev)
{
struct hsu_dma_chip *chip = dev;
- struct pci_dev *pdev = to_pci_dev(chip->dev);
u32 dmaisr;
u32 status;
unsigned short i;
int ret = 0;
int err;
- /*
- * On Intel Tangier B0 and Anniedale the interrupt line, disregarding
- * to have different numbers, is shared between HSU DMA and UART IPs.
- * Thus on such SoCs we are expecting that IRQ handler is called in
- * UART driver only.
- */
- if (pdev->device == PCI_DEVICE_ID_INTEL_MRFLD_HSU_DMA)
- return IRQ_HANDLED;
-
dmaisr = readl(chip->regs + HSU_PCI_DMAISR);
for (i = 0; i < chip->hsu->nr_channels; i++) {
if (dmaisr & 0x1) {
@@ -105,6 +95,17 @@ static int hsu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
goto err_register_irq;
+ /*
+ * On Intel Tangier B0 and Anniedale the interrupt line, disregarding
+ * to have different numbers, is shared between HSU DMA and UART IPs.
+ * Thus on such SoCs we are expecting that IRQ handler is called in
+ * UART driver only. Instead of handling the spurious interrupt
+ * from HSU DMA here and waste CPU time and delay HSU UART interrupt
+ * handling, disable the interrupt entirely.
+ */
+ if (pdev->device == PCI_DEVICE_ID_INTEL_MRFLD_HSU_DMA)
+ disable_irq_nosync(chip->irq);
+
pci_set_drvdata(pdev, chip);
return 0;
diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
index 95f94a3ed6be..84a6ea60ecf0 100644
--- a/drivers/dma/idxd/device.c
+++ b/drivers/dma/idxd/device.c
@@ -398,17 +398,31 @@ static inline bool idxd_is_enabled(struct idxd_device *idxd)
return false;
}
+static inline bool idxd_device_is_halted(struct idxd_device *idxd)
+{
+ union gensts_reg gensts;
+
+ gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
+
+ return (gensts.state == IDXD_DEVICE_STATE_HALT);
+}
+
/*
* This is function is only used for reset during probe and will
* poll for completion. Once the device is setup with interrupts,
* all commands will be done via interrupt completion.
*/
-void idxd_device_init_reset(struct idxd_device *idxd)
+int idxd_device_init_reset(struct idxd_device *idxd)
{
struct device *dev = &idxd->pdev->dev;
union idxd_command_reg cmd;
unsigned long flags;
+ if (idxd_device_is_halted(idxd)) {
+ dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
+ return -ENXIO;
+ }
+
memset(&cmd, 0, sizeof(cmd));
cmd.cmd = IDXD_CMD_RESET_DEVICE;
dev_dbg(dev, "%s: sending reset for init.\n", __func__);
@@ -419,6 +433,7 @@ void idxd_device_init_reset(struct idxd_device *idxd)
IDXD_CMDSTS_ACTIVE)
cpu_relax();
spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ return 0;
}
static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
@@ -428,6 +443,12 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
DECLARE_COMPLETION_ONSTACK(done);
unsigned long flags;
+ if (idxd_device_is_halted(idxd)) {
+ dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
+ *status = IDXD_CMDSTS_HW_ERR;
+ return;
+ }
+
memset(&cmd, 0, sizeof(cmd));
cmd.cmd = cmd_code;
cmd.operand = operand;
diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c
index 8ed2773d8285..a15e50126434 100644
--- a/drivers/dma/idxd/dma.c
+++ b/drivers/dma/idxd/dma.c
@@ -165,6 +165,7 @@ int idxd_register_dma_device(struct idxd_device *idxd)
INIT_LIST_HEAD(&dma->channels);
dma->dev = &idxd->pdev->dev;
+ dma_cap_set(DMA_PRIVATE, dma->cap_mask);
dma_cap_set(DMA_COMPLETION_NO_ORDER, dma->cap_mask);
dma->device_release = idxd_dma_release;
@@ -205,5 +206,8 @@ int idxd_register_dma_channel(struct idxd_wq *wq)
void idxd_unregister_dma_channel(struct idxd_wq *wq)
{
- dma_async_device_channel_unregister(&wq->idxd->dma_dev, &wq->dma_chan);
+ struct dma_chan *chan = &wq->dma_chan;
+
+ dma_async_device_channel_unregister(&wq->idxd->dma_dev, chan);
+ list_del(&chan->device_node);
}
diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
index 5a50e91c71bf..81a0e65fd316 100644
--- a/drivers/dma/idxd/idxd.h
+++ b/drivers/dma/idxd/idxd.h
@@ -326,7 +326,7 @@ void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id);
void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id);
/* device control */
-void idxd_device_init_reset(struct idxd_device *idxd);
+int idxd_device_init_reset(struct idxd_device *idxd);
int idxd_device_enable(struct idxd_device *idxd);
int idxd_device_disable(struct idxd_device *idxd);
void idxd_device_reset(struct idxd_device *idxd);
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index 2c051e07c34c..085a0c3b62c6 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -26,12 +26,16 @@ MODULE_VERSION(IDXD_DRIVER_VERSION);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Intel Corporation");
+static bool sva = true;
+module_param(sva, bool, 0644);
+MODULE_PARM_DESC(sva, "Toggle SVA support on/off");
+
#define DRV_NAME "idxd"
bool support_enqcmd;
static struct idr idxd_idrs[IDXD_TYPE_MAX];
-static struct mutex idxd_idr_lock;
+static DEFINE_MUTEX(idxd_idr_lock);
static struct pci_device_id idxd_pci_tbl[] = {
/* DSA ver 1.0 platforms */
@@ -335,15 +339,20 @@ static int idxd_probe(struct idxd_device *idxd)
int rc;
dev_dbg(dev, "%s entered and resetting device\n", __func__);
- idxd_device_init_reset(idxd);
+ rc = idxd_device_init_reset(idxd);
+ if (rc < 0)
+ return rc;
+
dev_dbg(dev, "IDXD reset complete\n");
- if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM)) {
+ if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM) && sva) {
rc = idxd_enable_system_pasid(idxd);
if (rc < 0)
dev_warn(dev, "Failed to enable PASID. No SVA support: %d\n", rc);
else
set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
+ } else if (!sva) {
+ dev_warn(dev, "User forced SVA off via module param.\n");
}
idxd_read_caps(idxd);
@@ -544,7 +553,6 @@ static int __init idxd_init_module(void)
else
support_enqcmd = true;
- mutex_init(&idxd_idr_lock);
for (i = 0; i < IDXD_TYPE_MAX; i++)
idr_init(&idxd_idrs[i]);
diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
index 593a2f6ed16c..a60ca11a5784 100644
--- a/drivers/dma/idxd/irq.c
+++ b/drivers/dma/idxd/irq.c
@@ -111,19 +111,14 @@ irqreturn_t idxd_irq_handler(int vec, void *data)
return IRQ_WAKE_THREAD;
}
-irqreturn_t idxd_misc_thread(int vec, void *data)
+static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
{
- struct idxd_irq_entry *irq_entry = data;
- struct idxd_device *idxd = irq_entry->idxd;
struct device *dev = &idxd->pdev->dev;
union gensts_reg gensts;
- u32 cause, val = 0;
+ u32 val = 0;
int i;
bool err = false;
- cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET);
- iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
-
if (cause & IDXD_INTC_ERR) {
spin_lock_bh(&idxd->dev_lock);
for (i = 0; i < 4; i++)
@@ -181,7 +176,7 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
val);
if (!err)
- goto out;
+ return 0;
/*
* This case should rarely happen and typically is due to software
@@ -211,37 +206,58 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
gensts.reset_type == IDXD_DEVICE_RESET_FLR ?
"FLR" : "system reset");
spin_unlock_bh(&idxd->dev_lock);
+ return -ENXIO;
}
}
- out:
+ return 0;
+}
+
+irqreturn_t idxd_misc_thread(int vec, void *data)
+{
+ struct idxd_irq_entry *irq_entry = data;
+ struct idxd_device *idxd = irq_entry->idxd;
+ int rc;
+ u32 cause;
+
+ cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET);
+ if (cause)
+ iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
+
+ while (cause) {
+ rc = process_misc_interrupts(idxd, cause);
+ if (rc < 0)
+ break;
+ cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET);
+ if (cause)
+ iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
+ }
+
idxd_unmask_msix_vector(idxd, irq_entry->id);
return IRQ_HANDLED;
}
-static bool process_fault(struct idxd_desc *desc, u64 fault_addr)
+static inline bool match_fault(struct idxd_desc *desc, u64 fault_addr)
{
/*
* Completion address can be bad as well. Check fault address match for descriptor
* and completion address.
*/
- if ((u64)desc->hw == fault_addr ||
- (u64)desc->completion == fault_addr) {
- idxd_dma_complete_txd(desc, IDXD_COMPLETE_DEV_FAIL);
+ if ((u64)desc->hw == fault_addr || (u64)desc->completion == fault_addr) {
+ struct idxd_device *idxd = desc->wq->idxd;
+ struct device *dev = &idxd->pdev->dev;
+
+ dev_warn(dev, "desc with fault address: %#llx\n", fault_addr);
return true;
}
return false;
}
-static bool complete_desc(struct idxd_desc *desc)
+static inline void complete_desc(struct idxd_desc *desc, enum idxd_complete_type reason)
{
- if (desc->completion->status) {
- idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL);
- return true;
- }
-
- return false;
+ idxd_dma_complete_txd(desc, reason);
+ idxd_free_desc(desc->wq, desc);
}
static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
@@ -251,25 +267,25 @@ static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
struct idxd_desc *desc, *t;
struct llist_node *head;
int queued = 0;
- bool completed = false;
unsigned long flags;
+ enum idxd_complete_type reason;
*processed = 0;
head = llist_del_all(&irq_entry->pending_llist);
if (!head)
goto out;
- llist_for_each_entry_safe(desc, t, head, llnode) {
- if (wtype == IRQ_WORK_NORMAL)
- completed = complete_desc(desc);
- else if (wtype == IRQ_WORK_PROCESS_FAULT)
- completed = process_fault(desc, data);
+ if (wtype == IRQ_WORK_NORMAL)
+ reason = IDXD_COMPLETE_NORMAL;
+ else
+ reason = IDXD_COMPLETE_DEV_FAIL;
- if (completed) {
- idxd_free_desc(desc->wq, desc);
+ llist_for_each_entry_safe(desc, t, head, llnode) {
+ if (desc->completion->status) {
+ if ((desc->completion->status & DSA_COMP_STATUS_MASK) != DSA_COMP_SUCCESS)
+ match_fault(desc, data);
+ complete_desc(desc, reason);
(*processed)++;
- if (wtype == IRQ_WORK_PROCESS_FAULT)
- break;
} else {
spin_lock_irqsave(&irq_entry->list_lock, flags);
list_add_tail(&desc->list,
@@ -287,42 +303,46 @@ static int irq_process_work_list(struct idxd_irq_entry *irq_entry,
enum irq_work_type wtype,
int *processed, u64 data)
{
- struct list_head *node, *next;
int queued = 0;
- bool completed = false;
unsigned long flags;
+ LIST_HEAD(flist);
+ struct idxd_desc *desc, *n;
+ enum idxd_complete_type reason;
*processed = 0;
- spin_lock_irqsave(&irq_entry->list_lock, flags);
- if (list_empty(&irq_entry->work_list))
- goto out;
-
- list_for_each_safe(node, next, &irq_entry->work_list) {
- struct idxd_desc *desc =
- container_of(node, struct idxd_desc, list);
+ if (wtype == IRQ_WORK_NORMAL)
+ reason = IDXD_COMPLETE_NORMAL;
+ else
+ reason = IDXD_COMPLETE_DEV_FAIL;
+ /*
+ * This lock protects list corruption from access of list outside of the irq handler
+ * thread.
+ */
+ spin_lock_irqsave(&irq_entry->list_lock, flags);
+ if (list_empty(&irq_entry->work_list)) {
spin_unlock_irqrestore(&irq_entry->list_lock, flags);
- if (wtype == IRQ_WORK_NORMAL)
- completed = complete_desc(desc);
- else if (wtype == IRQ_WORK_PROCESS_FAULT)
- completed = process_fault(desc, data);
+ return 0;
+ }
- if (completed) {
- spin_lock_irqsave(&irq_entry->list_lock, flags);
+ list_for_each_entry_safe(desc, n, &irq_entry->work_list, list) {
+ if (desc->completion->status) {
list_del(&desc->list);
- spin_unlock_irqrestore(&irq_entry->list_lock, flags);
- idxd_free_desc(desc->wq, desc);
(*processed)++;
- if (wtype == IRQ_WORK_PROCESS_FAULT)
- return queued;
+ list_add_tail(&desc->list, &flist);
} else {
queued++;
}
- spin_lock_irqsave(&irq_entry->list_lock, flags);
}
- out:
spin_unlock_irqrestore(&irq_entry->list_lock, flags);
+
+ list_for_each_entry(desc, &flist, list) {
+ if ((desc->completion->status & DSA_COMP_STATUS_MASK) != DSA_COMP_SUCCESS)
+ match_fault(desc, data);
+ complete_desc(desc, reason);
+ }
+
return queued;
}
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 41ba21eea7c8..d5590c08db51 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -1952,8 +1952,6 @@ static struct dma_chan *sdma_xlate(struct of_phandle_args *dma_spec,
static int sdma_probe(struct platform_device *pdev)
{
- const struct of_device_id *of_id =
- of_match_device(sdma_dt_ids, &pdev->dev);
struct device_node *np = pdev->dev.of_node;
struct device_node *spba_bus;
const char *fw_name;
@@ -1961,17 +1959,9 @@ static int sdma_probe(struct platform_device *pdev)
int irq;
struct resource *iores;
struct resource spba_res;
- struct sdma_platform_data *pdata = dev_get_platdata(&pdev->dev);
int i;
struct sdma_engine *sdma;
s32 *saddr_arr;
- const struct sdma_driver_data *drvdata = NULL;
-
- drvdata = of_id->data;
- if (!drvdata) {
- dev_err(&pdev->dev, "unable to find driver data\n");
- return -EINVAL;
- }
ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (ret)
@@ -1984,7 +1974,7 @@ static int sdma_probe(struct platform_device *pdev)
spin_lock_init(&sdma->channel_0_lock);
sdma->dev = &pdev->dev;
- sdma->drvdata = drvdata;
+ sdma->drvdata = of_device_get_match_data(sdma->dev);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
@@ -2063,8 +2053,6 @@ static int sdma_probe(struct platform_device *pdev)
if (sdma->drvdata->script_addrs)
sdma_add_scripts(sdma, sdma->drvdata->script_addrs);
- if (pdata && pdata->script_addrs)
- sdma_add_scripts(sdma, pdata->script_addrs);
sdma->dma_device.dev = &pdev->dev;
@@ -2110,30 +2098,18 @@ static int sdma_probe(struct platform_device *pdev)
}
/*
- * Kick off firmware loading as the very last step:
- * attempt to load firmware only if we're not on the error path, because
- * the firmware callback requires a fully functional and allocated sdma
- * instance.
+ * Because that device tree does not encode ROM script address,
+ * the RAM script in firmware is mandatory for device tree
+ * probe, otherwise it fails.
*/
- if (pdata) {
- ret = sdma_get_firmware(sdma, pdata->fw_name);
- if (ret)
- dev_warn(&pdev->dev, "failed to get firmware from platform data\n");
+ ret = of_property_read_string(np, "fsl,sdma-ram-script-name",
+ &fw_name);
+ if (ret) {
+ dev_warn(&pdev->dev, "failed to get firmware name\n");
} else {
- /*
- * Because that device tree does not encode ROM script address,
- * the RAM script in firmware is mandatory for device tree
- * probe, otherwise it fails.
- */
- ret = of_property_read_string(np, "fsl,sdma-ram-script-name",
- &fw_name);
- if (ret) {
- dev_warn(&pdev->dev, "failed to get firmware name\n");
- } else {
- ret = sdma_get_firmware(sdma, fw_name);
- if (ret)
- dev_warn(&pdev->dev, "failed to get firmware from device tree\n");
- }
+ ret = sdma_get_firmware(sdma, fw_name);
+ if (ret)
+ dev_warn(&pdev->dev, "failed to get firmware from device tree\n");
}
return 0;
diff --git a/drivers/dma/lgm/Kconfig b/drivers/dma/lgm/Kconfig
new file mode 100644
index 000000000000..9194330ed0f2
--- /dev/null
+++ b/drivers/dma/lgm/Kconfig
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config INTEL_LDMA
+ bool "Lightning Mountain centralized DMA controllers"
+ depends on X86 || COMPILE_TEST
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Enable support for Intel Lightning Mountain SOC DMA controllers.
+ These controllers provide DMA capabilities for a variety of on-chip
+ devices such as HSNAND and GSWIP (Gigabit Switch IP).
diff --git a/drivers/dma/lgm/Makefile b/drivers/dma/lgm/Makefile
new file mode 100644
index 000000000000..f318a8eff464
--- /dev/null
+++ b/drivers/dma/lgm/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_INTEL_LDMA) += lgm-dma.o
diff --git a/drivers/dma/lgm/lgm-dma.c b/drivers/dma/lgm/lgm-dma.c
new file mode 100644
index 000000000000..efe8bd3a0e2a
--- /dev/null
+++ b/drivers/dma/lgm/lgm-dma.c
@@ -0,0 +1,1739 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Lightning Mountain centralized DMA controller driver
+ *
+ * Copyright (c) 2016 - 2020 Intel Corporation.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/of_dma.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#include "../dmaengine.h"
+#include "../virt-dma.h"
+
+#define DRIVER_NAME "lgm-dma"
+
+#define DMA_ID 0x0008
+#define DMA_ID_REV GENMASK(7, 0)
+#define DMA_ID_PNR GENMASK(19, 16)
+#define DMA_ID_CHNR GENMASK(26, 20)
+#define DMA_ID_DW_128B BIT(27)
+#define DMA_ID_AW_36B BIT(28)
+#define DMA_VER32 0x32
+#define DMA_VER31 0x31
+#define DMA_VER22 0x0A
+
+#define DMA_CTRL 0x0010
+#define DMA_CTRL_RST BIT(0)
+#define DMA_CTRL_DSRAM_PATH BIT(1)
+#define DMA_CTRL_DBURST_WR BIT(3)
+#define DMA_CTRL_VLD_DF_ACK BIT(4)
+#define DMA_CTRL_CH_FL BIT(6)
+#define DMA_CTRL_DS_FOD BIT(7)
+#define DMA_CTRL_DRB BIT(8)
+#define DMA_CTRL_ENBE BIT(9)
+#define DMA_CTRL_DESC_TMOUT_CNT_V31 GENMASK(27, 16)
+#define DMA_CTRL_DESC_TMOUT_EN_V31 BIT(30)
+#define DMA_CTRL_PKTARB BIT(31)
+
+#define DMA_CPOLL 0x0014
+#define DMA_CPOLL_CNT GENMASK(15, 4)
+#define DMA_CPOLL_EN BIT(31)
+
+#define DMA_CS 0x0018
+#define DMA_CS_MASK GENMASK(5, 0)
+
+#define DMA_CCTRL 0x001C
+#define DMA_CCTRL_ON BIT(0)
+#define DMA_CCTRL_RST BIT(1)
+#define DMA_CCTRL_CH_POLL_EN BIT(2)
+#define DMA_CCTRL_CH_ABC BIT(3) /* Adaptive Burst Chop */
+#define DMA_CDBA_MSB GENMASK(7, 4)
+#define DMA_CCTRL_DIR_TX BIT(8)
+#define DMA_CCTRL_CLASS GENMASK(11, 9)
+#define DMA_CCTRL_CLASSH GENMASK(19, 18)
+#define DMA_CCTRL_WR_NP_EN BIT(21)
+#define DMA_CCTRL_PDEN BIT(23)
+#define DMA_MAX_CLASS (SZ_32 - 1)
+
+#define DMA_CDBA 0x0020
+#define DMA_CDLEN 0x0024
+#define DMA_CIS 0x0028
+#define DMA_CIE 0x002C
+#define DMA_CI_EOP BIT(1)
+#define DMA_CI_DUR BIT(2)
+#define DMA_CI_DESCPT BIT(3)
+#define DMA_CI_CHOFF BIT(4)
+#define DMA_CI_RDERR BIT(5)
+#define DMA_CI_ALL \
+ (DMA_CI_EOP | DMA_CI_DUR | DMA_CI_DESCPT | DMA_CI_CHOFF | DMA_CI_RDERR)
+
+#define DMA_PS 0x0040
+#define DMA_PCTRL 0x0044
+#define DMA_PCTRL_RXBL16 BIT(0)
+#define DMA_PCTRL_TXBL16 BIT(1)
+#define DMA_PCTRL_RXBL GENMASK(3, 2)
+#define DMA_PCTRL_RXBL_8 3
+#define DMA_PCTRL_TXBL GENMASK(5, 4)
+#define DMA_PCTRL_TXBL_8 3
+#define DMA_PCTRL_PDEN BIT(6)
+#define DMA_PCTRL_RXBL32 BIT(7)
+#define DMA_PCTRL_RXENDI GENMASK(9, 8)
+#define DMA_PCTRL_TXENDI GENMASK(11, 10)
+#define DMA_PCTRL_TXBL32 BIT(15)
+#define DMA_PCTRL_MEM_FLUSH BIT(16)
+
+#define DMA_IRNEN1 0x00E8
+#define DMA_IRNCR1 0x00EC
+#define DMA_IRNEN 0x00F4
+#define DMA_IRNCR 0x00F8
+#define DMA_C_DP_TICK 0x100
+#define DMA_C_DP_TICK_TIKNARB GENMASK(15, 0)
+#define DMA_C_DP_TICK_TIKARB GENMASK(31, 16)
+
+#define DMA_C_HDRM 0x110
+/*
+ * If header mode is set in DMA descriptor,
+ * If bit 30 is disabled, HDR_LEN must be configured according to channel
+ * requirement.
+ * If bit 30 is enabled(checksum with heade mode), HDR_LEN has no need to
+ * be configured. It will enable check sum for switch
+ * If header mode is not set in DMA descriptor,
+ * This register setting doesn't matter
+ */
+#define DMA_C_HDRM_HDR_SUM BIT(30)
+
+#define DMA_C_BOFF 0x120
+#define DMA_C_BOFF_BOF_LEN GENMASK(7, 0)
+#define DMA_C_BOFF_EN BIT(31)
+
+#define DMA_ORRC 0x190
+#define DMA_ORRC_ORRCNT GENMASK(8, 4)
+#define DMA_ORRC_EN BIT(31)
+
+#define DMA_C_ENDIAN 0x200
+#define DMA_C_END_DATAENDI GENMASK(1, 0)
+#define DMA_C_END_DE_EN BIT(7)
+#define DMA_C_END_DESENDI GENMASK(9, 8)
+#define DMA_C_END_DES_EN BIT(16)
+
+/* DMA controller capability */
+#define DMA_ADDR_36BIT BIT(0)
+#define DMA_DATA_128BIT BIT(1)
+#define DMA_CHAN_FLOW_CTL BIT(2)
+#define DMA_DESC_FOD BIT(3)
+#define DMA_DESC_IN_SRAM BIT(4)
+#define DMA_EN_BYTE_EN BIT(5)
+#define DMA_DBURST_WR BIT(6)
+#define DMA_VALID_DESC_FETCH_ACK BIT(7)
+#define DMA_DFT_DRB BIT(8)
+
+#define DMA_ORRC_MAX_CNT (SZ_32 - 1)
+#define DMA_DFT_POLL_CNT SZ_4
+#define DMA_DFT_BURST_V22 SZ_2
+#define DMA_BURSTL_8DW SZ_8
+#define DMA_BURSTL_16DW SZ_16
+#define DMA_BURSTL_32DW SZ_32
+#define DMA_DFT_BURST DMA_BURSTL_16DW
+#define DMA_MAX_DESC_NUM (SZ_8K - 1)
+#define DMA_CHAN_BOFF_MAX (SZ_256 - 1)
+#define DMA_DFT_ENDIAN 0
+
+#define DMA_DFT_DESC_TCNT 50
+#define DMA_HDR_LEN_MAX (SZ_16K - 1)
+
+/* DMA flags */
+#define DMA_TX_CH BIT(0)
+#define DMA_RX_CH BIT(1)
+#define DEVICE_ALLOC_DESC BIT(2)
+#define CHAN_IN_USE BIT(3)
+#define DMA_HW_DESC BIT(4)
+
+/* Descriptor fields */
+#define DESC_DATA_LEN GENMASK(15, 0)
+#define DESC_BYTE_OFF GENMASK(25, 23)
+#define DESC_EOP BIT(28)
+#define DESC_SOP BIT(29)
+#define DESC_C BIT(30)
+#define DESC_OWN BIT(31)
+
+#define DMA_CHAN_RST 1
+#define DMA_MAX_SIZE (BIT(16) - 1)
+#define MAX_LOWER_CHANS 32
+#define MASK_LOWER_CHANS GENMASK(4, 0)
+#define DMA_OWN 1
+#define HIGH_4_BITS GENMASK(3, 0)
+#define DMA_DFT_DESC_NUM 1
+#define DMA_PKT_DROP_DIS 0
+
+enum ldma_chan_on_off {
+ DMA_CH_OFF = 0,
+ DMA_CH_ON = 1,
+};
+
+enum {
+ DMA_TYPE_TX = 0,
+ DMA_TYPE_RX,
+ DMA_TYPE_MCPY,
+};
+
+struct ldma_dev;
+struct ldma_port;
+
+struct ldma_chan {
+ struct virt_dma_chan vchan;
+ struct ldma_port *port; /* back pointer */
+ char name[8]; /* Channel name */
+ int nr; /* Channel id in hardware */
+ u32 flags; /* central way or channel based way */
+ enum ldma_chan_on_off onoff;
+ dma_addr_t desc_phys;
+ void *desc_base; /* Virtual address */
+ u32 desc_cnt; /* Number of descriptors */
+ int rst;
+ u32 hdrm_len;
+ bool hdrm_csum;
+ u32 boff_len;
+ u32 data_endian;
+ u32 desc_endian;
+ bool pden;
+ bool desc_rx_np;
+ bool data_endian_en;
+ bool desc_endian_en;
+ bool abc_en;
+ bool desc_init;
+ struct dma_pool *desc_pool; /* Descriptors pool */
+ u32 desc_num;
+ struct dw2_desc_sw *ds;
+ struct work_struct work;
+ struct dma_slave_config config;
+};
+
+struct ldma_port {
+ struct ldma_dev *ldev; /* back pointer */
+ u32 portid;
+ u32 rxbl;
+ u32 txbl;
+ u32 rxendi;
+ u32 txendi;
+ u32 pkt_drop;
+};
+
+/* Instance specific data */
+struct ldma_inst_data {
+ bool desc_in_sram;
+ bool chan_fc;
+ bool desc_fod; /* Fetch On Demand */
+ bool valid_desc_fetch_ack;
+ u32 orrc; /* Outstanding read count */
+ const char *name;
+ u32 type;
+};
+
+struct ldma_dev {
+ struct device *dev;
+ void __iomem *base;
+ struct reset_control *rst;
+ struct clk *core_clk;
+ struct dma_device dma_dev;
+ u32 ver;
+ int irq;
+ struct ldma_port *ports;
+ struct ldma_chan *chans; /* channel list on this DMA or port */
+ spinlock_t dev_lock; /* Controller register exclusive */
+ u32 chan_nrs;
+ u32 port_nrs;
+ u32 channels_mask;
+ u32 flags;
+ u32 pollcnt;
+ const struct ldma_inst_data *inst;
+ struct workqueue_struct *wq;
+};
+
+struct dw2_desc {
+ u32 field;
+ u32 addr;
+} __packed __aligned(8);
+
+struct dw2_desc_sw {
+ struct virt_dma_desc vdesc;
+ struct ldma_chan *chan;
+ dma_addr_t desc_phys;
+ size_t desc_cnt;
+ size_t size;
+ struct dw2_desc *desc_hw;
+};
+
+static inline void
+ldma_update_bits(struct ldma_dev *d, u32 mask, u32 val, u32 ofs)
+{
+ u32 old_val, new_val;
+
+ old_val = readl(d->base + ofs);
+ new_val = (old_val & ~mask) | (val & mask);
+
+ if (new_val != old_val)
+ writel(new_val, d->base + ofs);
+}
+
+static inline struct ldma_chan *to_ldma_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct ldma_chan, vchan.chan);
+}
+
+static inline struct ldma_dev *to_ldma_dev(struct dma_device *dma_dev)
+{
+ return container_of(dma_dev, struct ldma_dev, dma_dev);
+}
+
+static inline struct dw2_desc_sw *to_lgm_dma_desc(struct virt_dma_desc *vdesc)
+{
+ return container_of(vdesc, struct dw2_desc_sw, vdesc);
+}
+
+static inline bool ldma_chan_tx(struct ldma_chan *c)
+{
+ return !!(c->flags & DMA_TX_CH);
+}
+
+static inline bool ldma_chan_is_hw_desc(struct ldma_chan *c)
+{
+ return !!(c->flags & DMA_HW_DESC);
+}
+
+static void ldma_dev_reset(struct ldma_dev *d)
+
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&d->dev_lock, flags);
+ ldma_update_bits(d, DMA_CTRL_RST, DMA_CTRL_RST, DMA_CTRL);
+ spin_unlock_irqrestore(&d->dev_lock, flags);
+}
+
+static void ldma_dev_pkt_arb_cfg(struct ldma_dev *d, bool enable)
+{
+ unsigned long flags;
+ u32 mask = DMA_CTRL_PKTARB;
+ u32 val = enable ? DMA_CTRL_PKTARB : 0;
+
+ spin_lock_irqsave(&d->dev_lock, flags);
+ ldma_update_bits(d, mask, val, DMA_CTRL);
+ spin_unlock_irqrestore(&d->dev_lock, flags);
+}
+
+static void ldma_dev_sram_desc_cfg(struct ldma_dev *d, bool enable)
+{
+ unsigned long flags;
+ u32 mask = DMA_CTRL_DSRAM_PATH;
+ u32 val = enable ? DMA_CTRL_DSRAM_PATH : 0;
+
+ spin_lock_irqsave(&d->dev_lock, flags);
+ ldma_update_bits(d, mask, val, DMA_CTRL);
+ spin_unlock_irqrestore(&d->dev_lock, flags);
+}
+
+static void ldma_dev_chan_flow_ctl_cfg(struct ldma_dev *d, bool enable)
+{
+ unsigned long flags;
+ u32 mask, val;
+
+ if (d->inst->type != DMA_TYPE_TX)
+ return;
+
+ mask = DMA_CTRL_CH_FL;
+ val = enable ? DMA_CTRL_CH_FL : 0;
+
+ spin_lock_irqsave(&d->dev_lock, flags);
+ ldma_update_bits(d, mask, val, DMA_CTRL);
+ spin_unlock_irqrestore(&d->dev_lock, flags);
+}
+
+static void ldma_dev_global_polling_enable(struct ldma_dev *d)
+{
+ unsigned long flags;
+ u32 mask = DMA_CPOLL_EN | DMA_CPOLL_CNT;
+ u32 val = DMA_CPOLL_EN;
+
+ val |= FIELD_PREP(DMA_CPOLL_CNT, d->pollcnt);
+
+ spin_lock_irqsave(&d->dev_lock, flags);
+ ldma_update_bits(d, mask, val, DMA_CPOLL);
+ spin_unlock_irqrestore(&d->dev_lock, flags);
+}
+
+static void ldma_dev_desc_fetch_on_demand_cfg(struct ldma_dev *d, bool enable)
+{
+ unsigned long flags;
+ u32 mask, val;
+
+ if (d->inst->type == DMA_TYPE_MCPY)
+ return;
+
+ mask = DMA_CTRL_DS_FOD;
+ val = enable ? DMA_CTRL_DS_FOD : 0;
+
+ spin_lock_irqsave(&d->dev_lock, flags);
+ ldma_update_bits(d, mask, val, DMA_CTRL);
+ spin_unlock_irqrestore(&d->dev_lock, flags);
+}
+
+static void ldma_dev_byte_enable_cfg(struct ldma_dev *d, bool enable)
+{
+ unsigned long flags;
+ u32 mask = DMA_CTRL_ENBE;
+ u32 val = enable ? DMA_CTRL_ENBE : 0;
+
+ spin_lock_irqsave(&d->dev_lock, flags);
+ ldma_update_bits(d, mask, val, DMA_CTRL);
+ spin_unlock_irqrestore(&d->dev_lock, flags);
+}
+
+static void ldma_dev_orrc_cfg(struct ldma_dev *d)
+{
+ unsigned long flags;
+ u32 val = 0;
+ u32 mask;
+
+ if (d->inst->type == DMA_TYPE_RX)
+ return;
+
+ mask = DMA_ORRC_EN | DMA_ORRC_ORRCNT;
+ if (d->inst->orrc > 0 && d->inst->orrc <= DMA_ORRC_MAX_CNT)
+ val = DMA_ORRC_EN | FIELD_PREP(DMA_ORRC_ORRCNT, d->inst->orrc);
+
+ spin_lock_irqsave(&d->dev_lock, flags);
+ ldma_update_bits(d, mask, val, DMA_ORRC);
+ spin_unlock_irqrestore(&d->dev_lock, flags);
+}
+
+static void ldma_dev_df_tout_cfg(struct ldma_dev *d, bool enable, int tcnt)
+{
+ u32 mask = DMA_CTRL_DESC_TMOUT_CNT_V31;
+ unsigned long flags;
+ u32 val;
+
+ if (enable)
+ val = DMA_CTRL_DESC_TMOUT_EN_V31 | FIELD_PREP(DMA_CTRL_DESC_TMOUT_CNT_V31, tcnt);
+ else
+ val = 0;
+
+ spin_lock_irqsave(&d->dev_lock, flags);
+ ldma_update_bits(d, mask, val, DMA_CTRL);
+ spin_unlock_irqrestore(&d->dev_lock, flags);
+}
+
+static void ldma_dev_dburst_wr_cfg(struct ldma_dev *d, bool enable)
+{
+ unsigned long flags;
+ u32 mask, val;
+
+ if (d->inst->type != DMA_TYPE_RX && d->inst->type != DMA_TYPE_MCPY)
+ return;
+
+ mask = DMA_CTRL_DBURST_WR;
+ val = enable ? DMA_CTRL_DBURST_WR : 0;
+
+ spin_lock_irqsave(&d->dev_lock, flags);
+ ldma_update_bits(d, mask, val, DMA_CTRL);
+ spin_unlock_irqrestore(&d->dev_lock, flags);
+}
+
+static void ldma_dev_vld_fetch_ack_cfg(struct ldma_dev *d, bool enable)
+{
+ unsigned long flags;
+ u32 mask, val;
+
+ if (d->inst->type != DMA_TYPE_TX)
+ return;
+
+ mask = DMA_CTRL_VLD_DF_ACK;
+ val = enable ? DMA_CTRL_VLD_DF_ACK : 0;
+
+ spin_lock_irqsave(&d->dev_lock, flags);
+ ldma_update_bits(d, mask, val, DMA_CTRL);
+ spin_unlock_irqrestore(&d->dev_lock, flags);
+}
+
+static void ldma_dev_drb_cfg(struct ldma_dev *d, int enable)
+{
+ unsigned long flags;
+ u32 mask = DMA_CTRL_DRB;
+ u32 val = enable ? DMA_CTRL_DRB : 0;
+
+ spin_lock_irqsave(&d->dev_lock, flags);
+ ldma_update_bits(d, mask, val, DMA_CTRL);
+ spin_unlock_irqrestore(&d->dev_lock, flags);
+}
+
+static int ldma_dev_cfg(struct ldma_dev *d)
+{
+ bool enable;
+
+ ldma_dev_pkt_arb_cfg(d, true);
+ ldma_dev_global_polling_enable(d);
+
+ enable = !!(d->flags & DMA_DFT_DRB);
+ ldma_dev_drb_cfg(d, enable);
+
+ enable = !!(d->flags & DMA_EN_BYTE_EN);
+ ldma_dev_byte_enable_cfg(d, enable);
+
+ enable = !!(d->flags & DMA_CHAN_FLOW_CTL);
+ ldma_dev_chan_flow_ctl_cfg(d, enable);
+
+ enable = !!(d->flags & DMA_DESC_FOD);
+ ldma_dev_desc_fetch_on_demand_cfg(d, enable);
+
+ enable = !!(d->flags & DMA_DESC_IN_SRAM);
+ ldma_dev_sram_desc_cfg(d, enable);
+
+ enable = !!(d->flags & DMA_DBURST_WR);
+ ldma_dev_dburst_wr_cfg(d, enable);
+
+ enable = !!(d->flags & DMA_VALID_DESC_FETCH_ACK);
+ ldma_dev_vld_fetch_ack_cfg(d, enable);
+
+ if (d->ver > DMA_VER22) {
+ ldma_dev_orrc_cfg(d);
+ ldma_dev_df_tout_cfg(d, true, DMA_DFT_DESC_TCNT);
+ }
+
+ dev_dbg(d->dev, "%s Controller 0x%08x configuration done\n",
+ d->inst->name, readl(d->base + DMA_CTRL));
+
+ return 0;
+}
+
+static int ldma_chan_cctrl_cfg(struct ldma_chan *c, u32 val)
+{
+ struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
+ u32 class_low, class_high;
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&d->dev_lock, flags);
+ ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
+ reg = readl(d->base + DMA_CCTRL);
+ /* Read from hardware */
+ if (reg & DMA_CCTRL_DIR_TX)
+ c->flags |= DMA_TX_CH;
+ else
+ c->flags |= DMA_RX_CH;
+
+ /* Keep the class value unchanged */
+ class_low = FIELD_GET(DMA_CCTRL_CLASS, reg);
+ class_high = FIELD_GET(DMA_CCTRL_CLASSH, reg);
+ val &= ~DMA_CCTRL_CLASS;
+ val |= FIELD_PREP(DMA_CCTRL_CLASS, class_low);
+ val &= ~DMA_CCTRL_CLASSH;
+ val |= FIELD_PREP(DMA_CCTRL_CLASSH, class_high);
+ writel(val, d->base + DMA_CCTRL);
+ spin_unlock_irqrestore(&d->dev_lock, flags);
+
+ return 0;
+}
+
+static void ldma_chan_irq_init(struct ldma_chan *c)
+{
+ struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
+ unsigned long flags;
+ u32 enofs, crofs;
+ u32 cn_bit;
+
+ if (c->nr < MAX_LOWER_CHANS) {
+ enofs = DMA_IRNEN;
+ crofs = DMA_IRNCR;
+ } else {
+ enofs = DMA_IRNEN1;
+ crofs = DMA_IRNCR1;
+ }
+
+ cn_bit = BIT(c->nr & MASK_LOWER_CHANS);
+ spin_lock_irqsave(&d->dev_lock, flags);
+ ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
+
+ /* Clear all interrupts and disabled it */
+ writel(0, d->base + DMA_CIE);
+ writel(DMA_CI_ALL, d->base + DMA_CIS);
+
+ ldma_update_bits(d, cn_bit, 0, enofs);
+ writel(cn_bit, d->base + crofs);
+ spin_unlock_irqrestore(&d->dev_lock, flags);
+}
+
+static void ldma_chan_set_class(struct ldma_chan *c, u32 val)
+{
+ struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
+ u32 class_val;
+
+ if (d->inst->type == DMA_TYPE_MCPY || val > DMA_MAX_CLASS)
+ return;
+
+ /* 3 bits low */
+ class_val = FIELD_PREP(DMA_CCTRL_CLASS, val & 0x7);
+ /* 2 bits high */
+ class_val |= FIELD_PREP(DMA_CCTRL_CLASSH, (val >> 3) & 0x3);
+
+ ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
+ ldma_update_bits(d, DMA_CCTRL_CLASS | DMA_CCTRL_CLASSH, class_val,
+ DMA_CCTRL);
+}
+
+static int ldma_chan_on(struct ldma_chan *c)
+{
+ struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
+ unsigned long flags;
+
+ /* If descriptors not configured, not allow to turn on channel */
+ if (WARN_ON(!c->desc_init))
+ return -EINVAL;
+
+ spin_lock_irqsave(&d->dev_lock, flags);
+ ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
+ ldma_update_bits(d, DMA_CCTRL_ON, DMA_CCTRL_ON, DMA_CCTRL);
+ spin_unlock_irqrestore(&d->dev_lock, flags);
+
+ c->onoff = DMA_CH_ON;
+
+ return 0;
+}
+
+static int ldma_chan_off(struct ldma_chan *c)
+{
+ struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
+ unsigned long flags;
+ u32 val;
+ int ret;
+
+ spin_lock_irqsave(&d->dev_lock, flags);
+ ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
+ ldma_update_bits(d, DMA_CCTRL_ON, 0, DMA_CCTRL);
+ spin_unlock_irqrestore(&d->dev_lock, flags);
+
+ ret = readl_poll_timeout_atomic(d->base + DMA_CCTRL, val,
+ !(val & DMA_CCTRL_ON), 0, 10000);
+ if (ret)
+ return ret;
+
+ c->onoff = DMA_CH_OFF;
+
+ return 0;
+}
+
+static void ldma_chan_desc_hw_cfg(struct ldma_chan *c, dma_addr_t desc_base,
+ int desc_num)
+{
+ struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
+ unsigned long flags;
+
+ spin_lock_irqsave(&d->dev_lock, flags);
+ ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
+ writel(lower_32_bits(desc_base), d->base + DMA_CDBA);
+
+ /* Higher 4 bits of 36 bit addressing */
+ if (IS_ENABLED(CONFIG_64BIT)) {
+ u32 hi = upper_32_bits(desc_base) & HIGH_4_BITS;
+
+ ldma_update_bits(d, DMA_CDBA_MSB,
+ FIELD_PREP(DMA_CDBA_MSB, hi), DMA_CCTRL);
+ }
+ writel(desc_num, d->base + DMA_CDLEN);
+ spin_unlock_irqrestore(&d->dev_lock, flags);
+
+ c->desc_init = true;
+}
+
+static struct dma_async_tx_descriptor *
+ldma_chan_desc_cfg(struct dma_chan *chan, dma_addr_t desc_base, int desc_num)
+{
+ struct ldma_chan *c = to_ldma_chan(chan);
+ struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
+ struct dma_async_tx_descriptor *tx;
+ struct dw2_desc_sw *ds;
+
+ if (!desc_num) {
+ dev_err(d->dev, "Channel %d must allocate descriptor first\n",
+ c->nr);
+ return NULL;
+ }
+
+ if (desc_num > DMA_MAX_DESC_NUM) {
+ dev_err(d->dev, "Channel %d descriptor number out of range %d\n",
+ c->nr, desc_num);
+ return NULL;
+ }
+
+ ldma_chan_desc_hw_cfg(c, desc_base, desc_num);
+
+ c->flags |= DMA_HW_DESC;
+ c->desc_cnt = desc_num;
+ c->desc_phys = desc_base;
+
+ ds = kzalloc(sizeof(*ds), GFP_NOWAIT);
+ if (!ds)
+ return NULL;
+
+ tx = &ds->vdesc.tx;
+ dma_async_tx_descriptor_init(tx, chan);
+
+ return tx;
+}
+
+static int ldma_chan_reset(struct ldma_chan *c)
+{
+ struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
+ unsigned long flags;
+ u32 val;
+ int ret;
+
+ ret = ldma_chan_off(c);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&d->dev_lock, flags);
+ ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
+ ldma_update_bits(d, DMA_CCTRL_RST, DMA_CCTRL_RST, DMA_CCTRL);
+ spin_unlock_irqrestore(&d->dev_lock, flags);
+
+ ret = readl_poll_timeout_atomic(d->base + DMA_CCTRL, val,
+ !(val & DMA_CCTRL_RST), 0, 10000);
+ if (ret)
+ return ret;
+
+ c->rst = 1;
+ c->desc_init = false;
+
+ return 0;
+}
+
+static void ldma_chan_byte_offset_cfg(struct ldma_chan *c, u32 boff_len)
+{
+ struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
+ u32 mask = DMA_C_BOFF_EN | DMA_C_BOFF_BOF_LEN;
+ u32 val;
+
+ if (boff_len > 0 && boff_len <= DMA_CHAN_BOFF_MAX)
+ val = FIELD_PREP(DMA_C_BOFF_BOF_LEN, boff_len) | DMA_C_BOFF_EN;
+ else
+ val = 0;
+
+ ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
+ ldma_update_bits(d, mask, val, DMA_C_BOFF);
+}
+
+static void ldma_chan_data_endian_cfg(struct ldma_chan *c, bool enable,
+ u32 endian_type)
+{
+ struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
+ u32 mask = DMA_C_END_DE_EN | DMA_C_END_DATAENDI;
+ u32 val;
+
+ if (enable)
+ val = DMA_C_END_DE_EN | FIELD_PREP(DMA_C_END_DATAENDI, endian_type);
+ else
+ val = 0;
+
+ ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
+ ldma_update_bits(d, mask, val, DMA_C_ENDIAN);
+}
+
+static void ldma_chan_desc_endian_cfg(struct ldma_chan *c, bool enable,
+ u32 endian_type)
+{
+ struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
+ u32 mask = DMA_C_END_DES_EN | DMA_C_END_DESENDI;
+ u32 val;
+
+ if (enable)
+ val = DMA_C_END_DES_EN | FIELD_PREP(DMA_C_END_DESENDI, endian_type);
+ else
+ val = 0;
+
+ ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
+ ldma_update_bits(d, mask, val, DMA_C_ENDIAN);
+}
+
+static void ldma_chan_hdr_mode_cfg(struct ldma_chan *c, u32 hdr_len, bool csum)
+{
+ struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
+ u32 mask, val;
+
+ /* NB, csum disabled, hdr length must be provided */
+ if (!csum && (!hdr_len || hdr_len > DMA_HDR_LEN_MAX))
+ return;
+
+ mask = DMA_C_HDRM_HDR_SUM;
+ val = DMA_C_HDRM_HDR_SUM;
+
+ if (!csum && hdr_len)
+ val = hdr_len;
+
+ ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
+ ldma_update_bits(d, mask, val, DMA_C_HDRM);
+}
+
+static void ldma_chan_rxwr_np_cfg(struct ldma_chan *c, bool enable)
+{
+ struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
+ u32 mask, val;
+
+ /* Only valid for RX channel */
+ if (ldma_chan_tx(c))
+ return;
+
+ mask = DMA_CCTRL_WR_NP_EN;
+ val = enable ? DMA_CCTRL_WR_NP_EN : 0;
+
+ ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
+ ldma_update_bits(d, mask, val, DMA_CCTRL);
+}
+
+static void ldma_chan_abc_cfg(struct ldma_chan *c, bool enable)
+{
+ struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
+ u32 mask, val;
+
+ if (d->ver < DMA_VER32 || ldma_chan_tx(c))
+ return;
+
+ mask = DMA_CCTRL_CH_ABC;
+ val = enable ? DMA_CCTRL_CH_ABC : 0;
+
+ ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
+ ldma_update_bits(d, mask, val, DMA_CCTRL);
+}
+
+static int ldma_port_cfg(struct ldma_port *p)
+{
+ unsigned long flags;
+ struct ldma_dev *d;
+ u32 reg;
+
+ d = p->ldev;
+ reg = FIELD_PREP(DMA_PCTRL_TXENDI, p->txendi);
+ reg |= FIELD_PREP(DMA_PCTRL_RXENDI, p->rxendi);
+
+ if (d->ver == DMA_VER22) {
+ reg |= FIELD_PREP(DMA_PCTRL_TXBL, p->txbl);
+ reg |= FIELD_PREP(DMA_PCTRL_RXBL, p->rxbl);
+ } else {
+ reg |= FIELD_PREP(DMA_PCTRL_PDEN, p->pkt_drop);
+
+ if (p->txbl == DMA_BURSTL_32DW)
+ reg |= DMA_PCTRL_TXBL32;
+ else if (p->txbl == DMA_BURSTL_16DW)
+ reg |= DMA_PCTRL_TXBL16;
+ else
+ reg |= FIELD_PREP(DMA_PCTRL_TXBL, DMA_PCTRL_TXBL_8);
+
+ if (p->rxbl == DMA_BURSTL_32DW)
+ reg |= DMA_PCTRL_RXBL32;
+ else if (p->rxbl == DMA_BURSTL_16DW)
+ reg |= DMA_PCTRL_RXBL16;
+ else
+ reg |= FIELD_PREP(DMA_PCTRL_RXBL, DMA_PCTRL_RXBL_8);
+ }
+
+ spin_lock_irqsave(&d->dev_lock, flags);
+ writel(p->portid, d->base + DMA_PS);
+ writel(reg, d->base + DMA_PCTRL);
+ spin_unlock_irqrestore(&d->dev_lock, flags);
+
+ reg = readl(d->base + DMA_PCTRL); /* read back */
+ dev_dbg(d->dev, "Port Control 0x%08x configuration done\n", reg);
+
+ return 0;
+}
+
+static int ldma_chan_cfg(struct ldma_chan *c)
+{
+ struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
+ unsigned long flags;
+ u32 reg;
+
+ reg = c->pden ? DMA_CCTRL_PDEN : 0;
+ reg |= c->onoff ? DMA_CCTRL_ON : 0;
+ reg |= c->rst ? DMA_CCTRL_RST : 0;
+
+ ldma_chan_cctrl_cfg(c, reg);
+ ldma_chan_irq_init(c);
+
+ if (d->ver <= DMA_VER22)
+ return 0;
+
+ spin_lock_irqsave(&d->dev_lock, flags);
+ ldma_chan_set_class(c, c->nr);
+ ldma_chan_byte_offset_cfg(c, c->boff_len);
+ ldma_chan_data_endian_cfg(c, c->data_endian_en, c->data_endian);
+ ldma_chan_desc_endian_cfg(c, c->desc_endian_en, c->desc_endian);
+ ldma_chan_hdr_mode_cfg(c, c->hdrm_len, c->hdrm_csum);
+ ldma_chan_rxwr_np_cfg(c, c->desc_rx_np);
+ ldma_chan_abc_cfg(c, c->abc_en);
+ spin_unlock_irqrestore(&d->dev_lock, flags);
+
+ if (ldma_chan_is_hw_desc(c))
+ ldma_chan_desc_hw_cfg(c, c->desc_phys, c->desc_cnt);
+
+ return 0;
+}
+
+static void ldma_dev_init(struct ldma_dev *d)
+{
+ unsigned long ch_mask = (unsigned long)d->channels_mask;
+ struct ldma_port *p;
+ struct ldma_chan *c;
+ int i;
+ u32 j;
+
+ spin_lock_init(&d->dev_lock);
+ ldma_dev_reset(d);
+ ldma_dev_cfg(d);
+
+ /* DMA port initialization */
+ for (i = 0; i < d->port_nrs; i++) {
+ p = &d->ports[i];
+ ldma_port_cfg(p);
+ }
+
+ /* DMA channel initialization */
+ for_each_set_bit(j, &ch_mask, d->chan_nrs) {
+ c = &d->chans[j];
+ ldma_chan_cfg(c);
+ }
+}
+
+static int ldma_cfg_init(struct ldma_dev *d)
+{
+ struct fwnode_handle *fwnode = dev_fwnode(d->dev);
+ struct ldma_port *p;
+ int i;
+
+ if (fwnode_property_read_bool(fwnode, "intel,dma-byte-en"))
+ d->flags |= DMA_EN_BYTE_EN;
+
+ if (fwnode_property_read_bool(fwnode, "intel,dma-dburst-wr"))
+ d->flags |= DMA_DBURST_WR;
+
+ if (fwnode_property_read_bool(fwnode, "intel,dma-drb"))
+ d->flags |= DMA_DFT_DRB;
+
+ if (fwnode_property_read_u32(fwnode, "intel,dma-poll-cnt",
+ &d->pollcnt))
+ d->pollcnt = DMA_DFT_POLL_CNT;
+
+ if (d->inst->chan_fc)
+ d->flags |= DMA_CHAN_FLOW_CTL;
+
+ if (d->inst->desc_fod)
+ d->flags |= DMA_DESC_FOD;
+
+ if (d->inst->desc_in_sram)
+ d->flags |= DMA_DESC_IN_SRAM;
+
+ if (d->inst->valid_desc_fetch_ack)
+ d->flags |= DMA_VALID_DESC_FETCH_ACK;
+
+ if (d->ver > DMA_VER22) {
+ if (!d->port_nrs)
+ return -EINVAL;
+
+ for (i = 0; i < d->port_nrs; i++) {
+ p = &d->ports[i];
+ p->rxendi = DMA_DFT_ENDIAN;
+ p->txendi = DMA_DFT_ENDIAN;
+ p->rxbl = DMA_DFT_BURST;
+ p->txbl = DMA_DFT_BURST;
+ p->pkt_drop = DMA_PKT_DROP_DIS;
+ }
+ }
+
+ return 0;
+}
+
+static void dma_free_desc_resource(struct virt_dma_desc *vdesc)
+{
+ struct dw2_desc_sw *ds = to_lgm_dma_desc(vdesc);
+ struct ldma_chan *c = ds->chan;
+
+ dma_pool_free(c->desc_pool, ds->desc_hw, ds->desc_phys);
+ kfree(ds);
+}
+
+static struct dw2_desc_sw *
+dma_alloc_desc_resource(int num, struct ldma_chan *c)
+{
+ struct device *dev = c->vchan.chan.device->dev;
+ struct dw2_desc_sw *ds;
+
+ if (num > c->desc_num) {
+ dev_err(dev, "sg num %d exceed max %d\n", num, c->desc_num);
+ return NULL;
+ }
+
+ ds = kzalloc(sizeof(*ds), GFP_NOWAIT);
+ if (!ds)
+ return NULL;
+
+ ds->chan = c;
+ ds->desc_hw = dma_pool_zalloc(c->desc_pool, GFP_ATOMIC,
+ &ds->desc_phys);
+ if (!ds->desc_hw) {
+ dev_dbg(dev, "out of memory for link descriptor\n");
+ kfree(ds);
+ return NULL;
+ }
+ ds->desc_cnt = num;
+
+ return ds;
+}
+
+static void ldma_chan_irq_en(struct ldma_chan *c)
+{
+ struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
+ unsigned long flags;
+
+ spin_lock_irqsave(&d->dev_lock, flags);
+ writel(c->nr, d->base + DMA_CS);
+ writel(DMA_CI_EOP, d->base + DMA_CIE);
+ writel(BIT(c->nr), d->base + DMA_IRNEN);
+ spin_unlock_irqrestore(&d->dev_lock, flags);
+}
+
+static void ldma_issue_pending(struct dma_chan *chan)
+{
+ struct ldma_chan *c = to_ldma_chan(chan);
+ struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
+ unsigned long flags;
+
+ if (d->ver == DMA_VER22) {
+ spin_lock_irqsave(&c->vchan.lock, flags);
+ if (vchan_issue_pending(&c->vchan)) {
+ struct virt_dma_desc *vdesc;
+
+ /* Get the next descriptor */
+ vdesc = vchan_next_desc(&c->vchan);
+ if (!vdesc) {
+ c->ds = NULL;
+ spin_unlock_irqrestore(&c->vchan.lock, flags);
+ return;
+ }
+ list_del(&vdesc->node);
+ c->ds = to_lgm_dma_desc(vdesc);
+ ldma_chan_desc_hw_cfg(c, c->ds->desc_phys, c->ds->desc_cnt);
+ ldma_chan_irq_en(c);
+ }
+ spin_unlock_irqrestore(&c->vchan.lock, flags);
+ }
+ ldma_chan_on(c);
+}
+
+static void ldma_synchronize(struct dma_chan *chan)
+{
+ struct ldma_chan *c = to_ldma_chan(chan);
+
+ /*
+ * clear any pending work if any. In that
+ * case the resource needs to be free here.
+ */
+ cancel_work_sync(&c->work);
+ vchan_synchronize(&c->vchan);
+ if (c->ds)
+ dma_free_desc_resource(&c->ds->vdesc);
+}
+
+static int ldma_terminate_all(struct dma_chan *chan)
+{
+ struct ldma_chan *c = to_ldma_chan(chan);
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&c->vchan.lock, flags);
+ vchan_get_all_descriptors(&c->vchan, &head);
+ spin_unlock_irqrestore(&c->vchan.lock, flags);
+ vchan_dma_desc_free_list(&c->vchan, &head);
+
+ return ldma_chan_reset(c);
+}
+
+static int ldma_resume_chan(struct dma_chan *chan)
+{
+ struct ldma_chan *c = to_ldma_chan(chan);
+
+ ldma_chan_on(c);
+
+ return 0;
+}
+
+static int ldma_pause_chan(struct dma_chan *chan)
+{
+ struct ldma_chan *c = to_ldma_chan(chan);
+
+ return ldma_chan_off(c);
+}
+
+static enum dma_status
+ldma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct ldma_chan *c = to_ldma_chan(chan);
+ struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
+ enum dma_status status = DMA_COMPLETE;
+
+ if (d->ver == DMA_VER22)
+ status = dma_cookie_status(chan, cookie, txstate);
+
+ return status;
+}
+
+static void dma_chan_irq(int irq, void *data)
+{
+ struct ldma_chan *c = data;
+ struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
+ u32 stat;
+
+ /* Disable channel interrupts */
+ writel(c->nr, d->base + DMA_CS);
+ stat = readl(d->base + DMA_CIS);
+ if (!stat)
+ return;
+
+ writel(readl(d->base + DMA_CIE) & ~DMA_CI_ALL, d->base + DMA_CIE);
+ writel(stat, d->base + DMA_CIS);
+ queue_work(d->wq, &c->work);
+}
+
+static irqreturn_t dma_interrupt(int irq, void *dev_id)
+{
+ struct ldma_dev *d = dev_id;
+ struct ldma_chan *c;
+ unsigned long irncr;
+ u32 cid;
+
+ irncr = readl(d->base + DMA_IRNCR);
+ if (!irncr) {
+ dev_err(d->dev, "dummy interrupt\n");
+ return IRQ_NONE;
+ }
+
+ for_each_set_bit(cid, &irncr, d->chan_nrs) {
+ /* Mask */
+ writel(readl(d->base + DMA_IRNEN) & ~BIT(cid), d->base + DMA_IRNEN);
+ /* Ack */
+ writel(readl(d->base + DMA_IRNCR) | BIT(cid), d->base + DMA_IRNCR);
+
+ c = &d->chans[cid];
+ dma_chan_irq(irq, c);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void prep_slave_burst_len(struct ldma_chan *c)
+{
+ struct ldma_port *p = c->port;
+ struct dma_slave_config *cfg = &c->config;
+
+ if (cfg->dst_maxburst)
+ cfg->src_maxburst = cfg->dst_maxburst;
+
+ /* TX and RX has the same burst length */
+ p->txbl = ilog2(cfg->src_maxburst);
+ p->rxbl = p->txbl;
+}
+
+static struct dma_async_tx_descriptor *
+ldma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sglen, enum dma_transfer_direction dir,
+ unsigned long flags, void *context)
+{
+ struct ldma_chan *c = to_ldma_chan(chan);
+ struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
+ size_t len, avail, total = 0;
+ struct dw2_desc *hw_ds;
+ struct dw2_desc_sw *ds;
+ struct scatterlist *sg;
+ int num = sglen, i;
+ dma_addr_t addr;
+
+ if (!sgl)
+ return NULL;
+
+ if (d->ver > DMA_VER22)
+ return ldma_chan_desc_cfg(chan, sgl->dma_address, sglen);
+
+ for_each_sg(sgl, sg, sglen, i) {
+ avail = sg_dma_len(sg);
+ if (avail > DMA_MAX_SIZE)
+ num += DIV_ROUND_UP(avail, DMA_MAX_SIZE) - 1;
+ }
+
+ ds = dma_alloc_desc_resource(num, c);
+ if (!ds)
+ return NULL;
+
+ c->ds = ds;
+
+ num = 0;
+ /* sop and eop has to be handled nicely */
+ for_each_sg(sgl, sg, sglen, i) {
+ addr = sg_dma_address(sg);
+ avail = sg_dma_len(sg);
+ total += avail;
+
+ do {
+ len = min_t(size_t, avail, DMA_MAX_SIZE);
+
+ hw_ds = &ds->desc_hw[num];
+ switch (sglen) {
+ case 1:
+ hw_ds->field &= ~DESC_SOP;
+ hw_ds->field |= FIELD_PREP(DESC_SOP, 1);
+
+ hw_ds->field &= ~DESC_EOP;
+ hw_ds->field |= FIELD_PREP(DESC_EOP, 1);
+ break;
+ default:
+ if (num == 0) {
+ hw_ds->field &= ~DESC_SOP;
+ hw_ds->field |= FIELD_PREP(DESC_SOP, 1);
+
+ hw_ds->field &= ~DESC_EOP;
+ hw_ds->field |= FIELD_PREP(DESC_EOP, 0);
+ } else if (num == (sglen - 1)) {
+ hw_ds->field &= ~DESC_SOP;
+ hw_ds->field |= FIELD_PREP(DESC_SOP, 0);
+ hw_ds->field &= ~DESC_EOP;
+ hw_ds->field |= FIELD_PREP(DESC_EOP, 1);
+ } else {
+ hw_ds->field &= ~DESC_SOP;
+ hw_ds->field |= FIELD_PREP(DESC_SOP, 0);
+
+ hw_ds->field &= ~DESC_EOP;
+ hw_ds->field |= FIELD_PREP(DESC_EOP, 0);
+ }
+ break;
+ }
+ /* Only 32 bit address supported */
+ hw_ds->addr = (u32)addr;
+
+ hw_ds->field &= ~DESC_DATA_LEN;
+ hw_ds->field |= FIELD_PREP(DESC_DATA_LEN, len);
+
+ hw_ds->field &= ~DESC_C;
+ hw_ds->field |= FIELD_PREP(DESC_C, 0);
+
+ hw_ds->field &= ~DESC_BYTE_OFF;
+ hw_ds->field |= FIELD_PREP(DESC_BYTE_OFF, addr & 0x3);
+
+ /* Ensure data ready before ownership change */
+ wmb();
+ hw_ds->field &= ~DESC_OWN;
+ hw_ds->field |= FIELD_PREP(DESC_OWN, DMA_OWN);
+
+ /* Ensure ownership changed before moving forward */
+ wmb();
+ num++;
+ addr += len;
+ avail -= len;
+ } while (avail);
+ }
+
+ ds->size = total;
+ prep_slave_burst_len(c);
+
+ return vchan_tx_prep(&c->vchan, &ds->vdesc, DMA_CTRL_ACK);
+}
+
+static int
+ldma_slave_config(struct dma_chan *chan, struct dma_slave_config *cfg)
+{
+ struct ldma_chan *c = to_ldma_chan(chan);
+
+ memcpy(&c->config, cfg, sizeof(c->config));
+
+ return 0;
+}
+
+static int ldma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct ldma_chan *c = to_ldma_chan(chan);
+ struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
+ struct device *dev = c->vchan.chan.device->dev;
+ size_t desc_sz;
+
+ if (d->ver > DMA_VER22) {
+ c->flags |= CHAN_IN_USE;
+ return 0;
+ }
+
+ if (c->desc_pool)
+ return c->desc_num;
+
+ desc_sz = c->desc_num * sizeof(struct dw2_desc);
+ c->desc_pool = dma_pool_create(c->name, dev, desc_sz,
+ __alignof__(struct dw2_desc), 0);
+
+ if (!c->desc_pool) {
+ dev_err(dev, "unable to allocate descriptor pool\n");
+ return -ENOMEM;
+ }
+
+ return c->desc_num;
+}
+
+static void ldma_free_chan_resources(struct dma_chan *chan)
+{
+ struct ldma_chan *c = to_ldma_chan(chan);
+ struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
+
+ if (d->ver == DMA_VER22) {
+ dma_pool_destroy(c->desc_pool);
+ c->desc_pool = NULL;
+ vchan_free_chan_resources(to_virt_chan(chan));
+ ldma_chan_reset(c);
+ } else {
+ c->flags &= ~CHAN_IN_USE;
+ }
+}
+
+static void dma_work(struct work_struct *work)
+{
+ struct ldma_chan *c = container_of(work, struct ldma_chan, work);
+ struct dma_async_tx_descriptor *tx = &c->ds->vdesc.tx;
+ struct virt_dma_chan *vc = &c->vchan;
+ struct dmaengine_desc_callback cb;
+ struct virt_dma_desc *vd, *_vd;
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&c->vchan.lock, flags);
+ list_splice_tail_init(&vc->desc_completed, &head);
+ spin_unlock_irqrestore(&c->vchan.lock, flags);
+ dmaengine_desc_get_callback(tx, &cb);
+ dma_cookie_complete(tx);
+ dmaengine_desc_callback_invoke(&cb, NULL);
+
+ list_for_each_entry_safe(vd, _vd, &head, node) {
+ dmaengine_desc_get_callback(tx, &cb);
+ dma_cookie_complete(tx);
+ list_del(&vd->node);
+ dmaengine_desc_callback_invoke(&cb, NULL);
+
+ vchan_vdesc_fini(vd);
+ }
+ c->ds = NULL;
+}
+
+static void
+update_burst_len_v22(struct ldma_chan *c, struct ldma_port *p, u32 burst)
+{
+ if (ldma_chan_tx(c))
+ p->txbl = ilog2(burst);
+ else
+ p->rxbl = ilog2(burst);
+}
+
+static void
+update_burst_len_v3X(struct ldma_chan *c, struct ldma_port *p, u32 burst)
+{
+ if (ldma_chan_tx(c))
+ p->txbl = burst;
+ else
+ p->rxbl = burst;
+}
+
+static int
+update_client_configs(struct of_dma *ofdma, struct of_phandle_args *spec)
+{
+ struct ldma_dev *d = ofdma->of_dma_data;
+ u32 chan_id = spec->args[0];
+ u32 port_id = spec->args[1];
+ u32 burst = spec->args[2];
+ struct ldma_port *p;
+ struct ldma_chan *c;
+
+ if (chan_id >= d->chan_nrs || port_id >= d->port_nrs)
+ return 0;
+
+ p = &d->ports[port_id];
+ c = &d->chans[chan_id];
+ c->port = p;
+
+ if (d->ver == DMA_VER22)
+ update_burst_len_v22(c, p, burst);
+ else
+ update_burst_len_v3X(c, p, burst);
+
+ ldma_port_cfg(p);
+
+ return 1;
+}
+
+static struct dma_chan *ldma_xlate(struct of_phandle_args *spec,
+ struct of_dma *ofdma)
+{
+ struct ldma_dev *d = ofdma->of_dma_data;
+ u32 chan_id = spec->args[0];
+ int ret;
+
+ if (!spec->args_count)
+ return NULL;
+
+ /* if args_count is 1 driver use default settings */
+ if (spec->args_count > 1) {
+ ret = update_client_configs(ofdma, spec);
+ if (!ret)
+ return NULL;
+ }
+
+ return dma_get_slave_channel(&d->chans[chan_id].vchan.chan);
+}
+
+static void ldma_dma_init_v22(int i, struct ldma_dev *d)
+{
+ struct ldma_chan *c;
+
+ c = &d->chans[i];
+ c->nr = i; /* Real channel number */
+ c->rst = DMA_CHAN_RST;
+ c->desc_num = DMA_DFT_DESC_NUM;
+ snprintf(c->name, sizeof(c->name), "chan%d", c->nr);
+ INIT_WORK(&c->work, dma_work);
+ c->vchan.desc_free = dma_free_desc_resource;
+ vchan_init(&c->vchan, &d->dma_dev);
+}
+
+static void ldma_dma_init_v3X(int i, struct ldma_dev *d)
+{
+ struct ldma_chan *c;
+
+ c = &d->chans[i];
+ c->data_endian = DMA_DFT_ENDIAN;
+ c->desc_endian = DMA_DFT_ENDIAN;
+ c->data_endian_en = false;
+ c->desc_endian_en = false;
+ c->desc_rx_np = false;
+ c->flags |= DEVICE_ALLOC_DESC;
+ c->onoff = DMA_CH_OFF;
+ c->rst = DMA_CHAN_RST;
+ c->abc_en = true;
+ c->hdrm_csum = false;
+ c->boff_len = 0;
+ c->nr = i;
+ c->vchan.desc_free = dma_free_desc_resource;
+ vchan_init(&c->vchan, &d->dma_dev);
+}
+
+static int ldma_init_v22(struct ldma_dev *d, struct platform_device *pdev)
+{
+ int ret;
+
+ ret = device_property_read_u32(d->dev, "dma-channels", &d->chan_nrs);
+ if (ret < 0) {
+ dev_err(d->dev, "unable to read dma-channels property\n");
+ return ret;
+ }
+
+ d->irq = platform_get_irq(pdev, 0);
+ if (d->irq < 0)
+ return d->irq;
+
+ ret = devm_request_irq(&pdev->dev, d->irq, dma_interrupt, 0,
+ DRIVER_NAME, d);
+ if (ret)
+ return ret;
+
+ d->wq = alloc_ordered_workqueue("dma_wq", WQ_MEM_RECLAIM |
+ WQ_HIGHPRI);
+ if (!d->wq)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void ldma_clk_disable(void *data)
+{
+ struct ldma_dev *d = data;
+
+ clk_disable_unprepare(d->core_clk);
+ reset_control_assert(d->rst);
+}
+
+static const struct ldma_inst_data dma0 = {
+ .name = "dma0",
+ .chan_fc = false,
+ .desc_fod = false,
+ .desc_in_sram = false,
+ .valid_desc_fetch_ack = false,
+};
+
+static const struct ldma_inst_data dma2tx = {
+ .name = "dma2tx",
+ .type = DMA_TYPE_TX,
+ .orrc = 16,
+ .chan_fc = true,
+ .desc_fod = true,
+ .desc_in_sram = true,
+ .valid_desc_fetch_ack = true,
+};
+
+static const struct ldma_inst_data dma1rx = {
+ .name = "dma1rx",
+ .type = DMA_TYPE_RX,
+ .orrc = 16,
+ .chan_fc = false,
+ .desc_fod = true,
+ .desc_in_sram = true,
+ .valid_desc_fetch_ack = false,
+};
+
+static const struct ldma_inst_data dma1tx = {
+ .name = "dma1tx",
+ .type = DMA_TYPE_TX,
+ .orrc = 16,
+ .chan_fc = true,
+ .desc_fod = true,
+ .desc_in_sram = true,
+ .valid_desc_fetch_ack = true,
+};
+
+static const struct ldma_inst_data dma0tx = {
+ .name = "dma0tx",
+ .type = DMA_TYPE_TX,
+ .orrc = 16,
+ .chan_fc = true,
+ .desc_fod = true,
+ .desc_in_sram = true,
+ .valid_desc_fetch_ack = true,
+};
+
+static const struct ldma_inst_data dma3 = {
+ .name = "dma3",
+ .type = DMA_TYPE_MCPY,
+ .orrc = 16,
+ .chan_fc = false,
+ .desc_fod = false,
+ .desc_in_sram = true,
+ .valid_desc_fetch_ack = false,
+};
+
+static const struct ldma_inst_data toe_dma30 = {
+ .name = "toe_dma30",
+ .type = DMA_TYPE_MCPY,
+ .orrc = 16,
+ .chan_fc = false,
+ .desc_fod = false,
+ .desc_in_sram = true,
+ .valid_desc_fetch_ack = true,
+};
+
+static const struct ldma_inst_data toe_dma31 = {
+ .name = "toe_dma31",
+ .type = DMA_TYPE_MCPY,
+ .orrc = 16,
+ .chan_fc = false,
+ .desc_fod = false,
+ .desc_in_sram = true,
+ .valid_desc_fetch_ack = true,
+};
+
+static const struct of_device_id intel_ldma_match[] = {
+ { .compatible = "intel,lgm-cdma", .data = &dma0},
+ { .compatible = "intel,lgm-dma2tx", .data = &dma2tx},
+ { .compatible = "intel,lgm-dma1rx", .data = &dma1rx},
+ { .compatible = "intel,lgm-dma1tx", .data = &dma1tx},
+ { .compatible = "intel,lgm-dma0tx", .data = &dma0tx},
+ { .compatible = "intel,lgm-dma3", .data = &dma3},
+ { .compatible = "intel,lgm-toe-dma30", .data = &toe_dma30},
+ { .compatible = "intel,lgm-toe-dma31", .data = &toe_dma31},
+ {}
+};
+
+static int intel_ldma_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dma_device *dma_dev;
+ unsigned long ch_mask;
+ struct ldma_chan *c;
+ struct ldma_port *p;
+ struct ldma_dev *d;
+ u32 id, bitn = 32, j;
+ int i, ret;
+
+ d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL);
+ if (!d)
+ return -ENOMEM;
+
+ /* Link controller to platform device */
+ d->dev = &pdev->dev;
+
+ d->inst = device_get_match_data(dev);
+ if (!d->inst) {
+ dev_err(dev, "No device match found\n");
+ return -ENODEV;
+ }
+
+ d->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(d->base))
+ return PTR_ERR(d->base);
+
+ /* Power up and reset the dma engine, some DMAs always on?? */
+ d->core_clk = devm_clk_get_optional(dev, NULL);
+ if (IS_ERR(d->core_clk))
+ return PTR_ERR(d->core_clk);
+ clk_prepare_enable(d->core_clk);
+
+ d->rst = devm_reset_control_get_optional(dev, NULL);
+ if (IS_ERR(d->rst))
+ return PTR_ERR(d->rst);
+ reset_control_deassert(d->rst);
+
+ ret = devm_add_action_or_reset(dev, ldma_clk_disable, d);
+ if (ret) {
+ dev_err(dev, "Failed to devm_add_action_or_reset, %d\n", ret);
+ return ret;
+ }
+
+ id = readl(d->base + DMA_ID);
+ d->chan_nrs = FIELD_GET(DMA_ID_CHNR, id);
+ d->port_nrs = FIELD_GET(DMA_ID_PNR, id);
+ d->ver = FIELD_GET(DMA_ID_REV, id);
+
+ if (id & DMA_ID_AW_36B)
+ d->flags |= DMA_ADDR_36BIT;
+
+ if (IS_ENABLED(CONFIG_64BIT) && (id & DMA_ID_AW_36B))
+ bitn = 36;
+
+ if (id & DMA_ID_DW_128B)
+ d->flags |= DMA_DATA_128BIT;
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(bitn));
+ if (ret) {
+ dev_err(dev, "No usable DMA configuration\n");
+ return ret;
+ }
+
+ if (d->ver == DMA_VER22) {
+ ret = ldma_init_v22(d, pdev);
+ if (ret)
+ return ret;
+ }
+
+ ret = device_property_read_u32(dev, "dma-channel-mask", &d->channels_mask);
+ if (ret < 0)
+ d->channels_mask = GENMASK(d->chan_nrs - 1, 0);
+
+ dma_dev = &d->dma_dev;
+
+ dma_cap_zero(dma_dev->cap_mask);
+ dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
+
+ /* Channel initializations */
+ INIT_LIST_HEAD(&dma_dev->channels);
+
+ /* Port Initializations */
+ d->ports = devm_kcalloc(dev, d->port_nrs, sizeof(*p), GFP_KERNEL);
+ if (!d->ports)
+ return -ENOMEM;
+
+ /* Channels Initializations */
+ d->chans = devm_kcalloc(d->dev, d->chan_nrs, sizeof(*c), GFP_KERNEL);
+ if (!d->chans)
+ return -ENOMEM;
+
+ for (i = 0; i < d->port_nrs; i++) {
+ p = &d->ports[i];
+ p->portid = i;
+ p->ldev = d;
+ }
+
+ ret = ldma_cfg_init(d);
+ if (ret)
+ return ret;
+
+ dma_dev->dev = &pdev->dev;
+
+ ch_mask = (unsigned long)d->channels_mask;
+ for_each_set_bit(j, &ch_mask, d->chan_nrs) {
+ if (d->ver == DMA_VER22)
+ ldma_dma_init_v22(j, d);
+ else
+ ldma_dma_init_v3X(j, d);
+ }
+
+ dma_dev->device_alloc_chan_resources = ldma_alloc_chan_resources;
+ dma_dev->device_free_chan_resources = ldma_free_chan_resources;
+ dma_dev->device_terminate_all = ldma_terminate_all;
+ dma_dev->device_issue_pending = ldma_issue_pending;
+ dma_dev->device_tx_status = ldma_tx_status;
+ dma_dev->device_resume = ldma_resume_chan;
+ dma_dev->device_pause = ldma_pause_chan;
+ dma_dev->device_prep_slave_sg = ldma_prep_slave_sg;
+
+ if (d->ver == DMA_VER22) {
+ dma_dev->device_config = ldma_slave_config;
+ dma_dev->device_synchronize = ldma_synchronize;
+ dma_dev->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+ dma_dev->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+ dma_dev->directions = BIT(DMA_MEM_TO_DEV) |
+ BIT(DMA_DEV_TO_MEM);
+ dma_dev->residue_granularity =
+ DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+ }
+
+ platform_set_drvdata(pdev, d);
+
+ ldma_dev_init(d);
+
+ ret = dma_async_device_register(dma_dev);
+ if (ret) {
+ dev_err(dev, "Failed to register slave DMA engine device\n");
+ return ret;
+ }
+
+ ret = of_dma_controller_register(pdev->dev.of_node, ldma_xlate, d);
+ if (ret) {
+ dev_err(dev, "Failed to register of DMA controller\n");
+ dma_async_device_unregister(dma_dev);
+ return ret;
+ }
+
+ dev_info(dev, "Init done - rev: %x, ports: %d channels: %d\n", d->ver,
+ d->port_nrs, d->chan_nrs);
+
+ return 0;
+}
+
+static struct platform_driver intel_ldma_driver = {
+ .probe = intel_ldma_probe,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = intel_ldma_match,
+ },
+};
+
+/*
+ * Perform this driver as device_initcall to make sure initialization happens
+ * before its DMA clients of some are platform specific and also to provide
+ * registered DMA channels and DMA capabilities to clients before their
+ * initialization.
+ */
+static int __init intel_ldma_init(void)
+{
+ return platform_driver_register(&intel_ldma_driver);
+}
+
+device_initcall(intel_ldma_init);
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index b84303be8edf..89f1814ff27a 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -18,7 +18,6 @@
#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/of.h>
-#include <linux/dma/mmp-pdma.h>
#include "dmaengine.h"
@@ -1148,19 +1147,6 @@ static struct platform_driver mmp_pdma_driver = {
.remove = mmp_pdma_remove,
};
-bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param)
-{
- struct mmp_pdma_chan *c = to_mmp_pdma_chan(chan);
-
- if (chan->device->dev->driver != &mmp_pdma_driver.driver)
- return false;
-
- c->drcmr = *(unsigned int *)param;
-
- return true;
-}
-EXPORT_SYMBOL_GPL(mmp_pdma_filter_fn);
-
module_platform_driver(mmp_pdma_driver);
MODULE_DESCRIPTION("MARVELL MMP Peripheral DMA Driver");
diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c
index 9fede32641e9..1f0bbaed4643 100644
--- a/drivers/dma/owl-dma.c
+++ b/drivers/dma/owl-dma.c
@@ -1080,8 +1080,9 @@ static struct dma_chan *owl_dma_of_xlate(struct of_phandle_args *dma_spec,
}
static const struct of_device_id owl_dma_match[] = {
- { .compatible = "actions,s900-dma", .data = (void *)S900_DMA,},
+ { .compatible = "actions,s500-dma", .data = (void *)S900_DMA,},
{ .compatible = "actions,s700-dma", .data = (void *)S700_DMA,},
+ { .compatible = "actions,s900-dma", .data = (void *)S900_DMA,},
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, owl_dma_match);
@@ -1245,6 +1246,7 @@ static int owl_dma_remove(struct platform_device *pdev)
owl_dma_free(od);
clk_disable_unprepare(od->clk);
+ dma_pool_destroy(od->lli_pool);
return 0;
}
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index bc0f66af0f11..fd8d2bc3be9f 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -3195,7 +3195,7 @@ probe_err2:
return ret;
}
-static int pl330_remove(struct amba_device *adev)
+static void pl330_remove(struct amba_device *adev)
{
struct pl330_dmac *pl330 = amba_get_drvdata(adev);
struct dma_pl330_chan *pch, *_p;
@@ -3235,7 +3235,6 @@ static int pl330_remove(struct amba_device *adev)
if (pl330->rstc)
reset_control_assert(pl330->rstc);
- return 0;
}
static const struct amba_id pl330_ids[] = {
diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c
index 88579857ca1d..c8a77b428b52 100644
--- a/drivers/dma/qcom/bam_dma.c
+++ b/drivers/dma/qcom/bam_dma.c
@@ -1270,13 +1270,13 @@ static int bam_dma_probe(struct platform_device *pdev)
dev_err(bdev->dev, "num-ees unspecified in dt\n");
}
- bdev->bamclk = devm_clk_get(bdev->dev, "bam_clk");
- if (IS_ERR(bdev->bamclk)) {
- if (!bdev->controlled_remotely)
- return PTR_ERR(bdev->bamclk);
+ if (bdev->controlled_remotely)
+ bdev->bamclk = devm_clk_get_optional(bdev->dev, "bam_clk");
+ else
+ bdev->bamclk = devm_clk_get(bdev->dev, "bam_clk");
- bdev->bamclk = NULL;
- }
+ if (IS_ERR(bdev->bamclk))
+ return PTR_ERR(bdev->bamclk);
ret = clk_prepare_enable(bdev->bamclk);
if (ret) {
@@ -1350,7 +1350,7 @@ static int bam_dma_probe(struct platform_device *pdev)
if (ret)
goto err_unregister_dma;
- if (bdev->controlled_remotely) {
+ if (!bdev->bamclk) {
pm_runtime_disable(&pdev->dev);
return 0;
}
@@ -1438,10 +1438,10 @@ static int __maybe_unused bam_dma_suspend(struct device *dev)
{
struct bam_device *bdev = dev_get_drvdata(dev);
- if (!bdev->controlled_remotely)
+ if (bdev->bamclk) {
pm_runtime_force_suspend(dev);
-
- clk_unprepare(bdev->bamclk);
+ clk_unprepare(bdev->bamclk);
+ }
return 0;
}
@@ -1451,12 +1451,13 @@ static int __maybe_unused bam_dma_resume(struct device *dev)
struct bam_device *bdev = dev_get_drvdata(dev);
int ret;
- ret = clk_prepare(bdev->bamclk);
- if (ret)
- return ret;
+ if (bdev->bamclk) {
+ ret = clk_prepare(bdev->bamclk);
+ if (ret)
+ return ret;
- if (!bdev->controlled_remotely)
pm_runtime_force_resume(dev);
+ }
return 0;
}
diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c
index 1a0bf6b0567a..57f5ee4235c7 100644
--- a/drivers/dma/qcom/gpi.c
+++ b/drivers/dma/qcom/gpi.c
@@ -584,7 +584,7 @@ static inline void gpi_write_reg_field(struct gpii *gpii, void __iomem *addr,
gpi_write_reg(gpii, addr, val);
}
-static inline void
+static __always_inline void
gpi_update_reg(struct gpii *gpii, u32 offset, u32 mask, u32 val)
{
void __iomem *addr = gpii->regs + offset;
@@ -1700,7 +1700,7 @@ static int gpi_create_i2c_tre(struct gchan *chan, struct gpi_desc *desc,
tre->dword[3] = u32_encode_bits(TRE_TYPE_DMA, TRE_FLAGS_TYPE);
tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_IEOT);
- };
+ }
for (i = 0; i < tre_idx; i++)
dev_dbg(dev, "TRE:%d %x:%x:%x:%x\n", i, desc->tre[i].dword[0],
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index a57705356e8b..d530c1bf11d9 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -189,7 +189,8 @@ struct rcar_dmac_chan {
* struct rcar_dmac - R-Car Gen2 DMA Controller
* @engine: base DMA engine object
* @dev: the hardware device
- * @iomem: remapped I/O memory base
+ * @dmac_base: remapped base register block
+ * @chan_base: remapped channel register block (optional)
* @n_channels: number of available channels
* @channels: array of DMAC channels
* @channels_mask: bitfield of which DMA channels are managed by this driver
@@ -198,7 +199,8 @@ struct rcar_dmac_chan {
struct rcar_dmac {
struct dma_device engine;
struct device *dev;
- void __iomem *iomem;
+ void __iomem *dmac_base;
+ void __iomem *chan_base;
unsigned int n_channels;
struct rcar_dmac_chan *channels;
@@ -209,6 +211,10 @@ struct rcar_dmac {
#define to_rcar_dmac(d) container_of(d, struct rcar_dmac, engine)
+#define for_each_rcar_dmac_chan(i, dmac, chan) \
+ for (i = 0, chan = &(dmac)->channels[0]; i < (dmac)->n_channels; i++, chan++) \
+ if (!((dmac)->channels_mask & BIT(i))) continue; else
+
/*
* struct rcar_dmac_of_data - This driver's OF data
* @chan_offset_base: DMAC channels base offset
@@ -230,7 +236,7 @@ struct rcar_dmac_of_data {
#define RCAR_DMAOR_PRI_ROUND_ROBIN (3 << 8)
#define RCAR_DMAOR_AE (1 << 2)
#define RCAR_DMAOR_DME (1 << 0)
-#define RCAR_DMACHCLR 0x0080
+#define RCAR_DMACHCLR 0x0080 /* Not on R-Car V3U */
#define RCAR_DMADPSEC 0x00a0
#define RCAR_DMASAR 0x0000
@@ -293,6 +299,9 @@ struct rcar_dmac_of_data {
#define RCAR_DMAFIXDAR 0x0014
#define RCAR_DMAFIXDPBASE 0x0060
+/* For R-Car V3U */
+#define RCAR_V3U_DMACHCLR 0x0100
+
/* Hardcode the MEMCPY transfer size to 4 bytes. */
#define RCAR_DMAC_MEMCPY_XFER_SIZE 4
@@ -303,17 +312,17 @@ struct rcar_dmac_of_data {
static void rcar_dmac_write(struct rcar_dmac *dmac, u32 reg, u32 data)
{
if (reg == RCAR_DMAOR)
- writew(data, dmac->iomem + reg);
+ writew(data, dmac->dmac_base + reg);
else
- writel(data, dmac->iomem + reg);
+ writel(data, dmac->dmac_base + reg);
}
static u32 rcar_dmac_read(struct rcar_dmac *dmac, u32 reg)
{
if (reg == RCAR_DMAOR)
- return readw(dmac->iomem + reg);
+ return readw(dmac->dmac_base + reg);
else
- return readl(dmac->iomem + reg);
+ return readl(dmac->dmac_base + reg);
}
static u32 rcar_dmac_chan_read(struct rcar_dmac_chan *chan, u32 reg)
@@ -332,6 +341,28 @@ static void rcar_dmac_chan_write(struct rcar_dmac_chan *chan, u32 reg, u32 data)
writel(data, chan->iomem + reg);
}
+static void rcar_dmac_chan_clear(struct rcar_dmac *dmac,
+ struct rcar_dmac_chan *chan)
+{
+ if (dmac->chan_base)
+ rcar_dmac_chan_write(chan, RCAR_V3U_DMACHCLR, 1);
+ else
+ rcar_dmac_write(dmac, RCAR_DMACHCLR, BIT(chan->index));
+}
+
+static void rcar_dmac_chan_clear_all(struct rcar_dmac *dmac)
+{
+ struct rcar_dmac_chan *chan;
+ unsigned int i;
+
+ if (dmac->chan_base) {
+ for_each_rcar_dmac_chan(i, dmac, chan)
+ rcar_dmac_chan_write(chan, RCAR_V3U_DMACHCLR, 1);
+ } else {
+ rcar_dmac_write(dmac, RCAR_DMACHCLR, dmac->channels_mask);
+ }
+}
+
/* -----------------------------------------------------------------------------
* Initialization and configuration
*/
@@ -447,7 +478,7 @@ static int rcar_dmac_init(struct rcar_dmac *dmac)
u16 dmaor;
/* Clear all channels and enable the DMAC globally. */
- rcar_dmac_write(dmac, RCAR_DMACHCLR, dmac->channels_mask);
+ rcar_dmac_chan_clear_all(dmac);
rcar_dmac_write(dmac, RCAR_DMAOR,
RCAR_DMAOR_PRI_FIXED | RCAR_DMAOR_DME);
@@ -817,15 +848,11 @@ static void rcar_dmac_chan_reinit(struct rcar_dmac_chan *chan)
static void rcar_dmac_stop_all_chan(struct rcar_dmac *dmac)
{
+ struct rcar_dmac_chan *chan;
unsigned int i;
/* Stop all channels. */
- for (i = 0; i < dmac->n_channels; ++i) {
- struct rcar_dmac_chan *chan = &dmac->channels[i];
-
- if (!(dmac->channels_mask & BIT(i)))
- continue;
-
+ for_each_rcar_dmac_chan(i, dmac, chan) {
/* Stop and reinitialize the channel. */
spin_lock_irq(&chan->lock);
rcar_dmac_chan_halt(chan);
@@ -1566,7 +1593,7 @@ static irqreturn_t rcar_dmac_isr_channel(int irq, void *dev)
* because channel is already stopped in error case.
* We need to clear register and check DE bit as recovery.
*/
- rcar_dmac_write(dmac, RCAR_DMACHCLR, 1 << chan->index);
+ rcar_dmac_chan_clear(dmac, chan);
rcar_dmac_chcr_de_barrier(chan);
reinit = true;
goto spin_lock_end;
@@ -1732,9 +1759,7 @@ static const struct dev_pm_ops rcar_dmac_pm = {
*/
static int rcar_dmac_chan_probe(struct rcar_dmac *dmac,
- struct rcar_dmac_chan *rchan,
- const struct rcar_dmac_of_data *data,
- unsigned int index)
+ struct rcar_dmac_chan *rchan)
{
struct platform_device *pdev = to_platform_device(dmac->dev);
struct dma_chan *chan = &rchan->chan;
@@ -1742,9 +1767,6 @@ static int rcar_dmac_chan_probe(struct rcar_dmac *dmac,
char *irqname;
int ret;
- rchan->index = index;
- rchan->iomem = dmac->iomem + data->chan_offset_base +
- data->chan_offset_stride * index;
rchan->mid_rid = -EINVAL;
spin_lock_init(&rchan->lock);
@@ -1756,13 +1778,13 @@ static int rcar_dmac_chan_probe(struct rcar_dmac *dmac,
INIT_LIST_HEAD(&rchan->desc.wait);
/* Request the channel interrupt. */
- sprintf(pdev_irqname, "ch%u", index);
+ sprintf(pdev_irqname, "ch%u", rchan->index);
rchan->irq = platform_get_irq_byname(pdev, pdev_irqname);
if (rchan->irq < 0)
return -ENODEV;
irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:%u",
- dev_name(dmac->dev), index);
+ dev_name(dmac->dev), rchan->index);
if (!irqname)
return -ENOMEM;
@@ -1828,9 +1850,11 @@ static int rcar_dmac_probe(struct platform_device *pdev)
DMA_SLAVE_BUSWIDTH_2_BYTES | DMA_SLAVE_BUSWIDTH_4_BYTES |
DMA_SLAVE_BUSWIDTH_8_BYTES | DMA_SLAVE_BUSWIDTH_16_BYTES |
DMA_SLAVE_BUSWIDTH_32_BYTES | DMA_SLAVE_BUSWIDTH_64_BYTES;
+ const struct rcar_dmac_of_data *data;
+ struct rcar_dmac_chan *chan;
struct dma_device *engine;
+ void __iomem *chan_base;
struct rcar_dmac *dmac;
- const struct rcar_dmac_of_data *data;
unsigned int i;
int ret;
@@ -1868,9 +1892,24 @@ static int rcar_dmac_probe(struct platform_device *pdev)
return -ENOMEM;
/* Request resources. */
- dmac->iomem = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(dmac->iomem))
- return PTR_ERR(dmac->iomem);
+ dmac->dmac_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(dmac->dmac_base))
+ return PTR_ERR(dmac->dmac_base);
+
+ if (!data->chan_offset_base) {
+ dmac->chan_base = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(dmac->chan_base))
+ return PTR_ERR(dmac->chan_base);
+
+ chan_base = dmac->chan_base;
+ } else {
+ chan_base = dmac->dmac_base + data->chan_offset_base;
+ }
+
+ for_each_rcar_dmac_chan(i, dmac, chan) {
+ chan->index = i;
+ chan->iomem = chan_base + i * data->chan_offset_stride;
+ }
/* Enable runtime PM and initialize the device. */
pm_runtime_enable(&pdev->dev);
@@ -1916,11 +1955,8 @@ static int rcar_dmac_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&engine->channels);
- for (i = 0; i < dmac->n_channels; ++i) {
- if (!(dmac->channels_mask & BIT(i)))
- continue;
-
- ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i], data, i);
+ for_each_rcar_dmac_chan(i, dmac, chan) {
+ ret = rcar_dmac_chan_probe(dmac, chan);
if (ret < 0)
goto error;
}
@@ -1968,14 +2004,22 @@ static void rcar_dmac_shutdown(struct platform_device *pdev)
}
static const struct rcar_dmac_of_data rcar_dmac_data = {
- .chan_offset_base = 0x8000,
- .chan_offset_stride = 0x80,
+ .chan_offset_base = 0x8000,
+ .chan_offset_stride = 0x80,
+};
+
+static const struct rcar_dmac_of_data rcar_v3u_dmac_data = {
+ .chan_offset_base = 0x0,
+ .chan_offset_stride = 0x1000,
};
static const struct of_device_id rcar_dmac_of_ids[] = {
{
.compatible = "renesas,rcar-dmac",
.data = &rcar_dmac_data,
+ }, {
+ .compatible = "renesas,dmac-r8a779a0",
+ .data = &rcar_v3u_dmac_data,
},
{ /* Sentinel */ }
};
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
deleted file mode 100644
index a5c2843384fd..000000000000
--- a/drivers/dma/sirf-dma.c
+++ /dev/null
@@ -1,1170 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * DMA controller driver for CSR SiRFprimaII
- *
- * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
- */
-
-#include <linux/module.h>
-#include <linux/dmaengine.h>
-#include <linux/dma-mapping.h>
-#include <linux/pm_runtime.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/of_irq.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
-#include <linux/of_platform.h>
-#include <linux/clk.h>
-#include <linux/of_dma.h>
-#include <linux/sirfsoc_dma.h>
-
-#include "dmaengine.h"
-
-#define SIRFSOC_DMA_VER_A7V1 1
-#define SIRFSOC_DMA_VER_A7V2 2
-#define SIRFSOC_DMA_VER_A6 4
-
-#define SIRFSOC_DMA_DESCRIPTORS 16
-#define SIRFSOC_DMA_CHANNELS 16
-#define SIRFSOC_DMA_TABLE_NUM 256
-
-#define SIRFSOC_DMA_CH_ADDR 0x00
-#define SIRFSOC_DMA_CH_XLEN 0x04
-#define SIRFSOC_DMA_CH_YLEN 0x08
-#define SIRFSOC_DMA_CH_CTRL 0x0C
-
-#define SIRFSOC_DMA_WIDTH_0 0x100
-#define SIRFSOC_DMA_CH_VALID 0x140
-#define SIRFSOC_DMA_CH_INT 0x144
-#define SIRFSOC_DMA_INT_EN 0x148
-#define SIRFSOC_DMA_INT_EN_CLR 0x14C
-#define SIRFSOC_DMA_CH_LOOP_CTRL 0x150
-#define SIRFSOC_DMA_CH_LOOP_CTRL_CLR 0x154
-#define SIRFSOC_DMA_WIDTH_ATLAS7 0x10
-#define SIRFSOC_DMA_VALID_ATLAS7 0x14
-#define SIRFSOC_DMA_INT_ATLAS7 0x18
-#define SIRFSOC_DMA_INT_EN_ATLAS7 0x1c
-#define SIRFSOC_DMA_LOOP_CTRL_ATLAS7 0x20
-#define SIRFSOC_DMA_CUR_DATA_ADDR 0x34
-#define SIRFSOC_DMA_MUL_ATLAS7 0x38
-#define SIRFSOC_DMA_CH_LOOP_CTRL_ATLAS7 0x158
-#define SIRFSOC_DMA_CH_LOOP_CTRL_CLR_ATLAS7 0x15C
-#define SIRFSOC_DMA_IOBG_SCMD_EN 0x800
-#define SIRFSOC_DMA_EARLY_RESP_SET 0x818
-#define SIRFSOC_DMA_EARLY_RESP_CLR 0x81C
-
-#define SIRFSOC_DMA_MODE_CTRL_BIT 4
-#define SIRFSOC_DMA_DIR_CTRL_BIT 5
-#define SIRFSOC_DMA_MODE_CTRL_BIT_ATLAS7 2
-#define SIRFSOC_DMA_CHAIN_CTRL_BIT_ATLAS7 3
-#define SIRFSOC_DMA_DIR_CTRL_BIT_ATLAS7 4
-#define SIRFSOC_DMA_TAB_NUM_ATLAS7 7
-#define SIRFSOC_DMA_CHAIN_INT_BIT_ATLAS7 5
-#define SIRFSOC_DMA_CHAIN_FLAG_SHIFT_ATLAS7 25
-#define SIRFSOC_DMA_CHAIN_ADDR_SHIFT 32
-
-#define SIRFSOC_DMA_INT_FINI_INT_ATLAS7 BIT(0)
-#define SIRFSOC_DMA_INT_CNT_INT_ATLAS7 BIT(1)
-#define SIRFSOC_DMA_INT_PAU_INT_ATLAS7 BIT(2)
-#define SIRFSOC_DMA_INT_LOOP_INT_ATLAS7 BIT(3)
-#define SIRFSOC_DMA_INT_INV_INT_ATLAS7 BIT(4)
-#define SIRFSOC_DMA_INT_END_INT_ATLAS7 BIT(5)
-#define SIRFSOC_DMA_INT_ALL_ATLAS7 0x3F
-
-/* xlen and dma_width register is in 4 bytes boundary */
-#define SIRFSOC_DMA_WORD_LEN 4
-#define SIRFSOC_DMA_XLEN_MAX_V1 0x800
-#define SIRFSOC_DMA_XLEN_MAX_V2 0x1000
-
-struct sirfsoc_dma_desc {
- struct dma_async_tx_descriptor desc;
- struct list_head node;
-
- /* SiRFprimaII 2D-DMA parameters */
-
- int xlen; /* DMA xlen */
- int ylen; /* DMA ylen */
- int width; /* DMA width */
- int dir;
- bool cyclic; /* is loop DMA? */
- bool chain; /* is chain DMA? */
- u32 addr; /* DMA buffer address */
- u64 chain_table[SIRFSOC_DMA_TABLE_NUM]; /* chain tbl */
-};
-
-struct sirfsoc_dma_chan {
- struct dma_chan chan;
- struct list_head free;
- struct list_head prepared;
- struct list_head queued;
- struct list_head active;
- struct list_head completed;
- unsigned long happened_cyclic;
- unsigned long completed_cyclic;
-
- /* Lock for this structure */
- spinlock_t lock;
-
- int mode;
-};
-
-struct sirfsoc_dma_regs {
- u32 ctrl[SIRFSOC_DMA_CHANNELS];
- u32 interrupt_en;
-};
-
-struct sirfsoc_dma {
- struct dma_device dma;
- struct tasklet_struct tasklet;
- struct sirfsoc_dma_chan channels[SIRFSOC_DMA_CHANNELS];
- void __iomem *base;
- int irq;
- struct clk *clk;
- int type;
- void (*exec_desc)(struct sirfsoc_dma_desc *sdesc,
- int cid, int burst_mode, void __iomem *base);
- struct sirfsoc_dma_regs regs_save;
-};
-
-struct sirfsoc_dmadata {
- void (*exec)(struct sirfsoc_dma_desc *sdesc,
- int cid, int burst_mode, void __iomem *base);
- int type;
-};
-
-enum sirfsoc_dma_chain_flag {
- SIRFSOC_DMA_CHAIN_NORMAL = 0x01,
- SIRFSOC_DMA_CHAIN_PAUSE = 0x02,
- SIRFSOC_DMA_CHAIN_LOOP = 0x03,
- SIRFSOC_DMA_CHAIN_END = 0x04
-};
-
-#define DRV_NAME "sirfsoc_dma"
-
-static int sirfsoc_dma_runtime_suspend(struct device *dev);
-
-/* Convert struct dma_chan to struct sirfsoc_dma_chan */
-static inline
-struct sirfsoc_dma_chan *dma_chan_to_sirfsoc_dma_chan(struct dma_chan *c)
-{
- return container_of(c, struct sirfsoc_dma_chan, chan);
-}
-
-/* Convert struct dma_chan to struct sirfsoc_dma */
-static inline struct sirfsoc_dma *dma_chan_to_sirfsoc_dma(struct dma_chan *c)
-{
- struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(c);
- return container_of(schan, struct sirfsoc_dma, channels[c->chan_id]);
-}
-
-static void sirfsoc_dma_execute_hw_a7v2(struct sirfsoc_dma_desc *sdesc,
- int cid, int burst_mode, void __iomem *base)
-{
- if (sdesc->chain) {
- /* DMA v2 HW chain mode */
- writel_relaxed((sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT_ATLAS7) |
- (sdesc->chain <<
- SIRFSOC_DMA_CHAIN_CTRL_BIT_ATLAS7) |
- (0x8 << SIRFSOC_DMA_TAB_NUM_ATLAS7) | 0x3,
- base + SIRFSOC_DMA_CH_CTRL);
- } else {
- /* DMA v2 legacy mode */
- writel_relaxed(sdesc->xlen, base + SIRFSOC_DMA_CH_XLEN);
- writel_relaxed(sdesc->ylen, base + SIRFSOC_DMA_CH_YLEN);
- writel_relaxed(sdesc->width, base + SIRFSOC_DMA_WIDTH_ATLAS7);
- writel_relaxed((sdesc->width*((sdesc->ylen+1)>>1)),
- base + SIRFSOC_DMA_MUL_ATLAS7);
- writel_relaxed((sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT_ATLAS7) |
- (sdesc->chain <<
- SIRFSOC_DMA_CHAIN_CTRL_BIT_ATLAS7) |
- 0x3, base + SIRFSOC_DMA_CH_CTRL);
- }
- writel_relaxed(sdesc->chain ? SIRFSOC_DMA_INT_END_INT_ATLAS7 :
- (SIRFSOC_DMA_INT_FINI_INT_ATLAS7 |
- SIRFSOC_DMA_INT_LOOP_INT_ATLAS7),
- base + SIRFSOC_DMA_INT_EN_ATLAS7);
- writel(sdesc->addr, base + SIRFSOC_DMA_CH_ADDR);
- if (sdesc->cyclic)
- writel(0x10001, base + SIRFSOC_DMA_LOOP_CTRL_ATLAS7);
-}
-
-static void sirfsoc_dma_execute_hw_a7v1(struct sirfsoc_dma_desc *sdesc,
- int cid, int burst_mode, void __iomem *base)
-{
- writel_relaxed(1, base + SIRFSOC_DMA_IOBG_SCMD_EN);
- writel_relaxed((1 << cid), base + SIRFSOC_DMA_EARLY_RESP_SET);
- writel_relaxed(sdesc->width, base + SIRFSOC_DMA_WIDTH_0 + cid * 4);
- writel_relaxed(cid | (burst_mode << SIRFSOC_DMA_MODE_CTRL_BIT) |
- (sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT),
- base + cid * 0x10 + SIRFSOC_DMA_CH_CTRL);
- writel_relaxed(sdesc->xlen, base + cid * 0x10 + SIRFSOC_DMA_CH_XLEN);
- writel_relaxed(sdesc->ylen, base + cid * 0x10 + SIRFSOC_DMA_CH_YLEN);
- writel_relaxed(readl_relaxed(base + SIRFSOC_DMA_INT_EN) |
- (1 << cid), base + SIRFSOC_DMA_INT_EN);
- writel(sdesc->addr >> 2, base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR);
- if (sdesc->cyclic) {
- writel((1 << cid) | 1 << (cid + 16) |
- readl_relaxed(base + SIRFSOC_DMA_CH_LOOP_CTRL_ATLAS7),
- base + SIRFSOC_DMA_CH_LOOP_CTRL_ATLAS7);
- }
-
-}
-
-static void sirfsoc_dma_execute_hw_a6(struct sirfsoc_dma_desc *sdesc,
- int cid, int burst_mode, void __iomem *base)
-{
- writel_relaxed(sdesc->width, base + SIRFSOC_DMA_WIDTH_0 + cid * 4);
- writel_relaxed(cid | (burst_mode << SIRFSOC_DMA_MODE_CTRL_BIT) |
- (sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT),
- base + cid * 0x10 + SIRFSOC_DMA_CH_CTRL);
- writel_relaxed(sdesc->xlen, base + cid * 0x10 + SIRFSOC_DMA_CH_XLEN);
- writel_relaxed(sdesc->ylen, base + cid * 0x10 + SIRFSOC_DMA_CH_YLEN);
- writel_relaxed(readl_relaxed(base + SIRFSOC_DMA_INT_EN) |
- (1 << cid), base + SIRFSOC_DMA_INT_EN);
- writel(sdesc->addr >> 2, base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR);
- if (sdesc->cyclic) {
- writel((1 << cid) | 1 << (cid + 16) |
- readl_relaxed(base + SIRFSOC_DMA_CH_LOOP_CTRL),
- base + SIRFSOC_DMA_CH_LOOP_CTRL);
- }
-
-}
-
-/* Execute all queued DMA descriptors */
-static void sirfsoc_dma_execute(struct sirfsoc_dma_chan *schan)
-{
- struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
- int cid = schan->chan.chan_id;
- struct sirfsoc_dma_desc *sdesc = NULL;
- void __iomem *base;
-
- /*
- * lock has been held by functions calling this, so we don't hold
- * lock again
- */
- base = sdma->base;
- sdesc = list_first_entry(&schan->queued, struct sirfsoc_dma_desc,
- node);
- /* Move the first queued descriptor to active list */
- list_move_tail(&sdesc->node, &schan->active);
-
- if (sdma->type == SIRFSOC_DMA_VER_A7V2)
- cid = 0;
-
- /* Start the DMA transfer */
- sdma->exec_desc(sdesc, cid, schan->mode, base);
-
- if (sdesc->cyclic)
- schan->happened_cyclic = schan->completed_cyclic = 0;
-}
-
-/* Interrupt handler */
-static irqreturn_t sirfsoc_dma_irq(int irq, void *data)
-{
- struct sirfsoc_dma *sdma = data;
- struct sirfsoc_dma_chan *schan;
- struct sirfsoc_dma_desc *sdesc = NULL;
- u32 is;
- bool chain;
- int ch;
- void __iomem *reg;
-
- switch (sdma->type) {
- case SIRFSOC_DMA_VER_A6:
- case SIRFSOC_DMA_VER_A7V1:
- is = readl(sdma->base + SIRFSOC_DMA_CH_INT);
- reg = sdma->base + SIRFSOC_DMA_CH_INT;
- while ((ch = fls(is) - 1) >= 0) {
- is &= ~(1 << ch);
- writel_relaxed(1 << ch, reg);
- schan = &sdma->channels[ch];
- spin_lock(&schan->lock);
- sdesc = list_first_entry(&schan->active,
- struct sirfsoc_dma_desc, node);
- if (!sdesc->cyclic) {
- /* Execute queued descriptors */
- list_splice_tail_init(&schan->active,
- &schan->completed);
- dma_cookie_complete(&sdesc->desc);
- if (!list_empty(&schan->queued))
- sirfsoc_dma_execute(schan);
- } else
- schan->happened_cyclic++;
- spin_unlock(&schan->lock);
- }
- break;
-
- case SIRFSOC_DMA_VER_A7V2:
- is = readl(sdma->base + SIRFSOC_DMA_INT_ATLAS7);
-
- reg = sdma->base + SIRFSOC_DMA_INT_ATLAS7;
- writel_relaxed(SIRFSOC_DMA_INT_ALL_ATLAS7, reg);
- schan = &sdma->channels[0];
- spin_lock(&schan->lock);
- sdesc = list_first_entry(&schan->active,
- struct sirfsoc_dma_desc, node);
- if (!sdesc->cyclic) {
- chain = sdesc->chain;
- if ((chain && (is & SIRFSOC_DMA_INT_END_INT_ATLAS7)) ||
- (!chain &&
- (is & SIRFSOC_DMA_INT_FINI_INT_ATLAS7))) {
- /* Execute queued descriptors */
- list_splice_tail_init(&schan->active,
- &schan->completed);
- dma_cookie_complete(&sdesc->desc);
- if (!list_empty(&schan->queued))
- sirfsoc_dma_execute(schan);
- }
- } else if (sdesc->cyclic && (is &
- SIRFSOC_DMA_INT_LOOP_INT_ATLAS7))
- schan->happened_cyclic++;
-
- spin_unlock(&schan->lock);
- break;
-
- default:
- break;
- }
-
- /* Schedule tasklet */
- tasklet_schedule(&sdma->tasklet);
-
- return IRQ_HANDLED;
-}
-
-/* process completed descriptors */
-static void sirfsoc_dma_process_completed(struct sirfsoc_dma *sdma)
-{
- dma_cookie_t last_cookie = 0;
- struct sirfsoc_dma_chan *schan;
- struct sirfsoc_dma_desc *sdesc;
- struct dma_async_tx_descriptor *desc;
- unsigned long flags;
- unsigned long happened_cyclic;
- LIST_HEAD(list);
- int i;
-
- for (i = 0; i < sdma->dma.chancnt; i++) {
- schan = &sdma->channels[i];
-
- /* Get all completed descriptors */
- spin_lock_irqsave(&schan->lock, flags);
- if (!list_empty(&schan->completed)) {
- list_splice_tail_init(&schan->completed, &list);
- spin_unlock_irqrestore(&schan->lock, flags);
-
- /* Execute callbacks and run dependencies */
- list_for_each_entry(sdesc, &list, node) {
- desc = &sdesc->desc;
-
- dmaengine_desc_get_callback_invoke(desc, NULL);
- last_cookie = desc->cookie;
- dma_run_dependencies(desc);
- }
-
- /* Free descriptors */
- spin_lock_irqsave(&schan->lock, flags);
- list_splice_tail_init(&list, &schan->free);
- schan->chan.completed_cookie = last_cookie;
- spin_unlock_irqrestore(&schan->lock, flags);
- } else {
- if (list_empty(&schan->active)) {
- spin_unlock_irqrestore(&schan->lock, flags);
- continue;
- }
-
- /* for cyclic channel, desc is always in active list */
- sdesc = list_first_entry(&schan->active,
- struct sirfsoc_dma_desc, node);
-
- /* cyclic DMA */
- happened_cyclic = schan->happened_cyclic;
- spin_unlock_irqrestore(&schan->lock, flags);
-
- desc = &sdesc->desc;
- while (happened_cyclic != schan->completed_cyclic) {
- dmaengine_desc_get_callback_invoke(desc, NULL);
- schan->completed_cyclic++;
- }
- }
- }
-}
-
-/* DMA Tasklet */
-static void sirfsoc_dma_tasklet(struct tasklet_struct *t)
-{
- struct sirfsoc_dma *sdma = from_tasklet(sdma, t, tasklet);
-
- sirfsoc_dma_process_completed(sdma);
-}
-
-/* Submit descriptor to hardware */
-static dma_cookie_t sirfsoc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
-{
- struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(txd->chan);
- struct sirfsoc_dma_desc *sdesc;
- unsigned long flags;
- dma_cookie_t cookie;
-
- sdesc = container_of(txd, struct sirfsoc_dma_desc, desc);
-
- spin_lock_irqsave(&schan->lock, flags);
-
- /* Move descriptor to queue */
- list_move_tail(&sdesc->node, &schan->queued);
-
- cookie = dma_cookie_assign(txd);
-
- spin_unlock_irqrestore(&schan->lock, flags);
-
- return cookie;
-}
-
-static int sirfsoc_dma_slave_config(struct dma_chan *chan,
- struct dma_slave_config *config)
-{
- struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
- unsigned long flags;
-
- if ((config->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
- (config->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES))
- return -EINVAL;
-
- spin_lock_irqsave(&schan->lock, flags);
- schan->mode = (config->src_maxburst == 4 ? 1 : 0);
- spin_unlock_irqrestore(&schan->lock, flags);
-
- return 0;
-}
-
-static int sirfsoc_dma_terminate_all(struct dma_chan *chan)
-{
- struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
- struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
- int cid = schan->chan.chan_id;
- unsigned long flags;
-
- spin_lock_irqsave(&schan->lock, flags);
-
- switch (sdma->type) {
- case SIRFSOC_DMA_VER_A7V1:
- writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_INT_EN_CLR);
- writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_INT);
- writel_relaxed((1 << cid) | 1 << (cid + 16),
- sdma->base +
- SIRFSOC_DMA_CH_LOOP_CTRL_CLR_ATLAS7);
- writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_VALID);
- break;
- case SIRFSOC_DMA_VER_A7V2:
- writel_relaxed(0, sdma->base + SIRFSOC_DMA_INT_EN_ATLAS7);
- writel_relaxed(SIRFSOC_DMA_INT_ALL_ATLAS7,
- sdma->base + SIRFSOC_DMA_INT_ATLAS7);
- writel_relaxed(0, sdma->base + SIRFSOC_DMA_LOOP_CTRL_ATLAS7);
- writel_relaxed(0, sdma->base + SIRFSOC_DMA_VALID_ATLAS7);
- break;
- case SIRFSOC_DMA_VER_A6:
- writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) &
- ~(1 << cid), sdma->base + SIRFSOC_DMA_INT_EN);
- writel_relaxed(readl_relaxed(sdma->base +
- SIRFSOC_DMA_CH_LOOP_CTRL) &
- ~((1 << cid) | 1 << (cid + 16)),
- sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
- writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_VALID);
- break;
- default:
- break;
- }
-
- list_splice_tail_init(&schan->active, &schan->free);
- list_splice_tail_init(&schan->queued, &schan->free);
-
- spin_unlock_irqrestore(&schan->lock, flags);
-
- return 0;
-}
-
-static int sirfsoc_dma_pause_chan(struct dma_chan *chan)
-{
- struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
- struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
- int cid = schan->chan.chan_id;
- unsigned long flags;
-
- spin_lock_irqsave(&schan->lock, flags);
-
- switch (sdma->type) {
- case SIRFSOC_DMA_VER_A7V1:
- writel_relaxed((1 << cid) | 1 << (cid + 16),
- sdma->base +
- SIRFSOC_DMA_CH_LOOP_CTRL_CLR_ATLAS7);
- break;
- case SIRFSOC_DMA_VER_A7V2:
- writel_relaxed(0, sdma->base + SIRFSOC_DMA_LOOP_CTRL_ATLAS7);
- break;
- case SIRFSOC_DMA_VER_A6:
- writel_relaxed(readl_relaxed(sdma->base +
- SIRFSOC_DMA_CH_LOOP_CTRL) &
- ~((1 << cid) | 1 << (cid + 16)),
- sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
- break;
-
- default:
- break;
- }
-
- spin_unlock_irqrestore(&schan->lock, flags);
-
- return 0;
-}
-
-static int sirfsoc_dma_resume_chan(struct dma_chan *chan)
-{
- struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
- struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
- int cid = schan->chan.chan_id;
- unsigned long flags;
-
- spin_lock_irqsave(&schan->lock, flags);
- switch (sdma->type) {
- case SIRFSOC_DMA_VER_A7V1:
- writel_relaxed((1 << cid) | 1 << (cid + 16),
- sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL_ATLAS7);
- break;
- case SIRFSOC_DMA_VER_A7V2:
- writel_relaxed(0x10001,
- sdma->base + SIRFSOC_DMA_LOOP_CTRL_ATLAS7);
- break;
- case SIRFSOC_DMA_VER_A6:
- writel_relaxed(readl_relaxed(sdma->base +
- SIRFSOC_DMA_CH_LOOP_CTRL) |
- ((1 << cid) | 1 << (cid + 16)),
- sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
- break;
-
- default:
- break;
- }
-
- spin_unlock_irqrestore(&schan->lock, flags);
-
- return 0;
-}
-
-/* Alloc channel resources */
-static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan)
-{
- struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
- struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
- struct sirfsoc_dma_desc *sdesc;
- unsigned long flags;
- LIST_HEAD(descs);
- int i;
-
- pm_runtime_get_sync(sdma->dma.dev);
-
- /* Alloc descriptors for this channel */
- for (i = 0; i < SIRFSOC_DMA_DESCRIPTORS; i++) {
- sdesc = kzalloc(sizeof(*sdesc), GFP_KERNEL);
- if (!sdesc) {
- dev_notice(sdma->dma.dev, "Memory allocation error. "
- "Allocated only %u descriptors\n", i);
- break;
- }
-
- dma_async_tx_descriptor_init(&sdesc->desc, chan);
- sdesc->desc.flags = DMA_CTRL_ACK;
- sdesc->desc.tx_submit = sirfsoc_dma_tx_submit;
-
- list_add_tail(&sdesc->node, &descs);
- }
-
- /* Return error only if no descriptors were allocated */
- if (i == 0)
- return -ENOMEM;
-
- spin_lock_irqsave(&schan->lock, flags);
-
- list_splice_tail_init(&descs, &schan->free);
- spin_unlock_irqrestore(&schan->lock, flags);
-
- return i;
-}
-
-/* Free channel resources */
-static void sirfsoc_dma_free_chan_resources(struct dma_chan *chan)
-{
- struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
- struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
- struct sirfsoc_dma_desc *sdesc, *tmp;
- unsigned long flags;
- LIST_HEAD(descs);
-
- spin_lock_irqsave(&schan->lock, flags);
-
- /* Channel must be idle */
- BUG_ON(!list_empty(&schan->prepared));
- BUG_ON(!list_empty(&schan->queued));
- BUG_ON(!list_empty(&schan->active));
- BUG_ON(!list_empty(&schan->completed));
-
- /* Move data */
- list_splice_tail_init(&schan->free, &descs);
-
- spin_unlock_irqrestore(&schan->lock, flags);
-
- /* Free descriptors */
- list_for_each_entry_safe(sdesc, tmp, &descs, node)
- kfree(sdesc);
-
- pm_runtime_put(sdma->dma.dev);
-}
-
-/* Send pending descriptor to hardware */
-static void sirfsoc_dma_issue_pending(struct dma_chan *chan)
-{
- struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
- unsigned long flags;
-
- spin_lock_irqsave(&schan->lock, flags);
-
- if (list_empty(&schan->active) && !list_empty(&schan->queued))
- sirfsoc_dma_execute(schan);
-
- spin_unlock_irqrestore(&schan->lock, flags);
-}
-
-/* Check request completion status */
-static enum dma_status
-sirfsoc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
- struct dma_tx_state *txstate)
-{
- struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
- struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
- unsigned long flags;
- enum dma_status ret;
- struct sirfsoc_dma_desc *sdesc;
- int cid = schan->chan.chan_id;
- unsigned long dma_pos;
- unsigned long dma_request_bytes;
- unsigned long residue;
-
- spin_lock_irqsave(&schan->lock, flags);
-
- if (list_empty(&schan->active)) {
- ret = dma_cookie_status(chan, cookie, txstate);
- dma_set_residue(txstate, 0);
- spin_unlock_irqrestore(&schan->lock, flags);
- return ret;
- }
- sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc, node);
- if (sdesc->cyclic)
- dma_request_bytes = (sdesc->xlen + 1) * (sdesc->ylen + 1) *
- (sdesc->width * SIRFSOC_DMA_WORD_LEN);
- else
- dma_request_bytes = sdesc->xlen * SIRFSOC_DMA_WORD_LEN;
-
- ret = dma_cookie_status(chan, cookie, txstate);
-
- if (sdma->type == SIRFSOC_DMA_VER_A7V2)
- cid = 0;
-
- if (sdma->type == SIRFSOC_DMA_VER_A7V2) {
- dma_pos = readl_relaxed(sdma->base + SIRFSOC_DMA_CUR_DATA_ADDR);
- } else {
- dma_pos = readl_relaxed(
- sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR) << 2;
- }
-
- residue = dma_request_bytes - (dma_pos - sdesc->addr);
- dma_set_residue(txstate, residue);
-
- spin_unlock_irqrestore(&schan->lock, flags);
-
- return ret;
-}
-
-static struct dma_async_tx_descriptor *sirfsoc_dma_prep_interleaved(
- struct dma_chan *chan, struct dma_interleaved_template *xt,
- unsigned long flags)
-{
- struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
- struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
- struct sirfsoc_dma_desc *sdesc = NULL;
- unsigned long iflags;
- int ret;
-
- if ((xt->dir != DMA_MEM_TO_DEV) && (xt->dir != DMA_DEV_TO_MEM)) {
- ret = -EINVAL;
- goto err_dir;
- }
-
- /* Get free descriptor */
- spin_lock_irqsave(&schan->lock, iflags);
- if (!list_empty(&schan->free)) {
- sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc,
- node);
- list_del(&sdesc->node);
- }
- spin_unlock_irqrestore(&schan->lock, iflags);
-
- if (!sdesc) {
- /* try to free completed descriptors */
- sirfsoc_dma_process_completed(sdma);
- ret = 0;
- goto no_desc;
- }
-
- /* Place descriptor in prepared list */
- spin_lock_irqsave(&schan->lock, iflags);
-
- /*
- * Number of chunks in a frame can only be 1 for prima2
- * and ylen (number of frame - 1) must be at least 0
- */
- if ((xt->frame_size == 1) && (xt->numf > 0)) {
- sdesc->cyclic = 0;
- sdesc->xlen = xt->sgl[0].size / SIRFSOC_DMA_WORD_LEN;
- sdesc->width = (xt->sgl[0].size + xt->sgl[0].icg) /
- SIRFSOC_DMA_WORD_LEN;
- sdesc->ylen = xt->numf - 1;
- if (xt->dir == DMA_MEM_TO_DEV) {
- sdesc->addr = xt->src_start;
- sdesc->dir = 1;
- } else {
- sdesc->addr = xt->dst_start;
- sdesc->dir = 0;
- }
-
- list_add_tail(&sdesc->node, &schan->prepared);
- } else {
- pr_err("sirfsoc DMA Invalid xfer\n");
- ret = -EINVAL;
- goto err_xfer;
- }
- spin_unlock_irqrestore(&schan->lock, iflags);
-
- return &sdesc->desc;
-err_xfer:
- spin_unlock_irqrestore(&schan->lock, iflags);
-no_desc:
-err_dir:
- return ERR_PTR(ret);
-}
-
-static struct dma_async_tx_descriptor *
-sirfsoc_dma_prep_cyclic(struct dma_chan *chan, dma_addr_t addr,
- size_t buf_len, size_t period_len,
- enum dma_transfer_direction direction, unsigned long flags)
-{
- struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
- struct sirfsoc_dma_desc *sdesc = NULL;
- unsigned long iflags;
-
- /*
- * we only support cycle transfer with 2 period
- * If the X-length is set to 0, it would be the loop mode.
- * The DMA address keeps increasing until reaching the end of a loop
- * area whose size is defined by (DMA_WIDTH x (Y_LENGTH + 1)). Then
- * the DMA address goes back to the beginning of this area.
- * In loop mode, the DMA data region is divided into two parts, BUFA
- * and BUFB. DMA controller generates interrupts twice in each loop:
- * when the DMA address reaches the end of BUFA or the end of the
- * BUFB
- */
- if (buf_len != 2 * period_len)
- return ERR_PTR(-EINVAL);
-
- /* Get free descriptor */
- spin_lock_irqsave(&schan->lock, iflags);
- if (!list_empty(&schan->free)) {
- sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc,
- node);
- list_del(&sdesc->node);
- }
- spin_unlock_irqrestore(&schan->lock, iflags);
-
- if (!sdesc)
- return NULL;
-
- /* Place descriptor in prepared list */
- spin_lock_irqsave(&schan->lock, iflags);
- sdesc->addr = addr;
- sdesc->cyclic = 1;
- sdesc->xlen = 0;
- sdesc->ylen = buf_len / SIRFSOC_DMA_WORD_LEN - 1;
- sdesc->width = 1;
- list_add_tail(&sdesc->node, &schan->prepared);
- spin_unlock_irqrestore(&schan->lock, iflags);
-
- return &sdesc->desc;
-}
-
-/*
- * The DMA controller consists of 16 independent DMA channels.
- * Each channel is allocated to a different function
- */
-bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id)
-{
- unsigned int ch_nr = (unsigned int) chan_id;
-
- if (ch_nr == chan->chan_id +
- chan->device->dev_id * SIRFSOC_DMA_CHANNELS)
- return true;
-
- return false;
-}
-EXPORT_SYMBOL(sirfsoc_dma_filter_id);
-
-#define SIRFSOC_DMA_BUSWIDTHS \
- (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
- BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
- BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
- BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
- BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
-
-static struct dma_chan *of_dma_sirfsoc_xlate(struct of_phandle_args *dma_spec,
- struct of_dma *ofdma)
-{
- struct sirfsoc_dma *sdma = ofdma->of_dma_data;
- unsigned int request = dma_spec->args[0];
-
- if (request >= SIRFSOC_DMA_CHANNELS)
- return NULL;
-
- return dma_get_slave_channel(&sdma->channels[request].chan);
-}
-
-static int sirfsoc_dma_probe(struct platform_device *op)
-{
- struct device_node *dn = op->dev.of_node;
- struct device *dev = &op->dev;
- struct dma_device *dma;
- struct sirfsoc_dma *sdma;
- struct sirfsoc_dma_chan *schan;
- struct sirfsoc_dmadata *data;
- struct resource res;
- ulong regs_start, regs_size;
- u32 id;
- int ret, i;
-
- sdma = devm_kzalloc(dev, sizeof(*sdma), GFP_KERNEL);
- if (!sdma)
- return -ENOMEM;
-
- data = (struct sirfsoc_dmadata *)
- (of_match_device(op->dev.driver->of_match_table,
- &op->dev)->data);
- sdma->exec_desc = data->exec;
- sdma->type = data->type;
-
- if (of_property_read_u32(dn, "cell-index", &id)) {
- dev_err(dev, "Fail to get DMAC index\n");
- return -ENODEV;
- }
-
- sdma->irq = irq_of_parse_and_map(dn, 0);
- if (!sdma->irq) {
- dev_err(dev, "Error mapping IRQ!\n");
- return -EINVAL;
- }
-
- sdma->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(sdma->clk)) {
- dev_err(dev, "failed to get a clock.\n");
- return PTR_ERR(sdma->clk);
- }
-
- ret = of_address_to_resource(dn, 0, &res);
- if (ret) {
- dev_err(dev, "Error parsing memory region!\n");
- goto irq_dispose;
- }
-
- regs_start = res.start;
- regs_size = resource_size(&res);
-
- sdma->base = devm_ioremap(dev, regs_start, regs_size);
- if (!sdma->base) {
- dev_err(dev, "Error mapping memory region!\n");
- ret = -ENOMEM;
- goto irq_dispose;
- }
-
- ret = request_irq(sdma->irq, &sirfsoc_dma_irq, 0, DRV_NAME, sdma);
- if (ret) {
- dev_err(dev, "Error requesting IRQ!\n");
- ret = -EINVAL;
- goto irq_dispose;
- }
-
- dma = &sdma->dma;
- dma->dev = dev;
-
- dma->device_alloc_chan_resources = sirfsoc_dma_alloc_chan_resources;
- dma->device_free_chan_resources = sirfsoc_dma_free_chan_resources;
- dma->device_issue_pending = sirfsoc_dma_issue_pending;
- dma->device_config = sirfsoc_dma_slave_config;
- dma->device_pause = sirfsoc_dma_pause_chan;
- dma->device_resume = sirfsoc_dma_resume_chan;
- dma->device_terminate_all = sirfsoc_dma_terminate_all;
- dma->device_tx_status = sirfsoc_dma_tx_status;
- dma->device_prep_interleaved_dma = sirfsoc_dma_prep_interleaved;
- dma->device_prep_dma_cyclic = sirfsoc_dma_prep_cyclic;
- dma->src_addr_widths = SIRFSOC_DMA_BUSWIDTHS;
- dma->dst_addr_widths = SIRFSOC_DMA_BUSWIDTHS;
- dma->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
-
- INIT_LIST_HEAD(&dma->channels);
- dma_cap_set(DMA_SLAVE, dma->cap_mask);
- dma_cap_set(DMA_CYCLIC, dma->cap_mask);
- dma_cap_set(DMA_INTERLEAVE, dma->cap_mask);
- dma_cap_set(DMA_PRIVATE, dma->cap_mask);
-
- for (i = 0; i < SIRFSOC_DMA_CHANNELS; i++) {
- schan = &sdma->channels[i];
-
- schan->chan.device = dma;
- dma_cookie_init(&schan->chan);
-
- INIT_LIST_HEAD(&schan->free);
- INIT_LIST_HEAD(&schan->prepared);
- INIT_LIST_HEAD(&schan->queued);
- INIT_LIST_HEAD(&schan->active);
- INIT_LIST_HEAD(&schan->completed);
-
- spin_lock_init(&schan->lock);
- list_add_tail(&schan->chan.device_node, &dma->channels);
- }
-
- tasklet_setup(&sdma->tasklet, sirfsoc_dma_tasklet);
-
- /* Register DMA engine */
- dev_set_drvdata(dev, sdma);
-
- ret = dma_async_device_register(dma);
- if (ret)
- goto free_irq;
-
- /* Device-tree DMA controller registration */
- ret = of_dma_controller_register(dn, of_dma_sirfsoc_xlate, sdma);
- if (ret) {
- dev_err(dev, "failed to register DMA controller\n");
- goto unreg_dma_dev;
- }
-
- pm_runtime_enable(&op->dev);
- dev_info(dev, "initialized SIRFSOC DMAC driver\n");
-
- return 0;
-
-unreg_dma_dev:
- dma_async_device_unregister(dma);
-free_irq:
- free_irq(sdma->irq, sdma);
-irq_dispose:
- irq_dispose_mapping(sdma->irq);
- return ret;
-}
-
-static int sirfsoc_dma_remove(struct platform_device *op)
-{
- struct device *dev = &op->dev;
- struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
-
- of_dma_controller_free(op->dev.of_node);
- dma_async_device_unregister(&sdma->dma);
- free_irq(sdma->irq, sdma);
- tasklet_kill(&sdma->tasklet);
- irq_dispose_mapping(sdma->irq);
- pm_runtime_disable(&op->dev);
- if (!pm_runtime_status_suspended(&op->dev))
- sirfsoc_dma_runtime_suspend(&op->dev);
-
- return 0;
-}
-
-static int __maybe_unused sirfsoc_dma_runtime_suspend(struct device *dev)
-{
- struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
-
- clk_disable_unprepare(sdma->clk);
- return 0;
-}
-
-static int __maybe_unused sirfsoc_dma_runtime_resume(struct device *dev)
-{
- struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
- int ret;
-
- ret = clk_prepare_enable(sdma->clk);
- if (ret < 0) {
- dev_err(dev, "clk_enable failed: %d\n", ret);
- return ret;
- }
- return 0;
-}
-
-static int __maybe_unused sirfsoc_dma_pm_suspend(struct device *dev)
-{
- struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
- struct sirfsoc_dma_regs *save = &sdma->regs_save;
- struct sirfsoc_dma_chan *schan;
- int ch;
- int ret;
- int count;
- u32 int_offset;
-
- /*
- * if we were runtime-suspended before, resume to enable clock
- * before accessing register
- */
- if (pm_runtime_status_suspended(dev)) {
- ret = sirfsoc_dma_runtime_resume(dev);
- if (ret < 0)
- return ret;
- }
-
- if (sdma->type == SIRFSOC_DMA_VER_A7V2) {
- count = 1;
- int_offset = SIRFSOC_DMA_INT_EN_ATLAS7;
- } else {
- count = SIRFSOC_DMA_CHANNELS;
- int_offset = SIRFSOC_DMA_INT_EN;
- }
-
- /*
- * DMA controller will lose all registers while suspending
- * so we need to save registers for active channels
- */
- for (ch = 0; ch < count; ch++) {
- schan = &sdma->channels[ch];
- if (list_empty(&schan->active))
- continue;
- save->ctrl[ch] = readl_relaxed(sdma->base +
- ch * 0x10 + SIRFSOC_DMA_CH_CTRL);
- }
- save->interrupt_en = readl_relaxed(sdma->base + int_offset);
-
- /* Disable clock */
- sirfsoc_dma_runtime_suspend(dev);
-
- return 0;
-}
-
-static int __maybe_unused sirfsoc_dma_pm_resume(struct device *dev)
-{
- struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
- struct sirfsoc_dma_regs *save = &sdma->regs_save;
- struct sirfsoc_dma_desc *sdesc;
- struct sirfsoc_dma_chan *schan;
- int ch;
- int ret;
- int count;
- u32 int_offset;
- u32 width_offset;
-
- /* Enable clock before accessing register */
- ret = sirfsoc_dma_runtime_resume(dev);
- if (ret < 0)
- return ret;
-
- if (sdma->type == SIRFSOC_DMA_VER_A7V2) {
- count = 1;
- int_offset = SIRFSOC_DMA_INT_EN_ATLAS7;
- width_offset = SIRFSOC_DMA_WIDTH_ATLAS7;
- } else {
- count = SIRFSOC_DMA_CHANNELS;
- int_offset = SIRFSOC_DMA_INT_EN;
- width_offset = SIRFSOC_DMA_WIDTH_0;
- }
-
- writel_relaxed(save->interrupt_en, sdma->base + int_offset);
- for (ch = 0; ch < count; ch++) {
- schan = &sdma->channels[ch];
- if (list_empty(&schan->active))
- continue;
- sdesc = list_first_entry(&schan->active,
- struct sirfsoc_dma_desc,
- node);
- writel_relaxed(sdesc->width,
- sdma->base + width_offset + ch * 4);
- writel_relaxed(sdesc->xlen,
- sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_XLEN);
- writel_relaxed(sdesc->ylen,
- sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_YLEN);
- writel_relaxed(save->ctrl[ch],
- sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_CTRL);
- if (sdma->type == SIRFSOC_DMA_VER_A7V2) {
- writel_relaxed(sdesc->addr,
- sdma->base + SIRFSOC_DMA_CH_ADDR);
- } else {
- writel_relaxed(sdesc->addr >> 2,
- sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_ADDR);
-
- }
- }
-
- /* if we were runtime-suspended before, suspend again */
- if (pm_runtime_status_suspended(dev))
- sirfsoc_dma_runtime_suspend(dev);
-
- return 0;
-}
-
-static const struct dev_pm_ops sirfsoc_dma_pm_ops = {
- SET_RUNTIME_PM_OPS(sirfsoc_dma_runtime_suspend, sirfsoc_dma_runtime_resume, NULL)
- SET_SYSTEM_SLEEP_PM_OPS(sirfsoc_dma_pm_suspend, sirfsoc_dma_pm_resume)
-};
-
-static struct sirfsoc_dmadata sirfsoc_dmadata_a6 = {
- .exec = sirfsoc_dma_execute_hw_a6,
- .type = SIRFSOC_DMA_VER_A6,
-};
-
-static struct sirfsoc_dmadata sirfsoc_dmadata_a7v1 = {
- .exec = sirfsoc_dma_execute_hw_a7v1,
- .type = SIRFSOC_DMA_VER_A7V1,
-};
-
-static struct sirfsoc_dmadata sirfsoc_dmadata_a7v2 = {
- .exec = sirfsoc_dma_execute_hw_a7v2,
- .type = SIRFSOC_DMA_VER_A7V2,
-};
-
-static const struct of_device_id sirfsoc_dma_match[] = {
- { .compatible = "sirf,prima2-dmac", .data = &sirfsoc_dmadata_a6,},
- { .compatible = "sirf,atlas7-dmac", .data = &sirfsoc_dmadata_a7v1,},
- { .compatible = "sirf,atlas7-dmac-v2", .data = &sirfsoc_dmadata_a7v2,},
- {},
-};
-MODULE_DEVICE_TABLE(of, sirfsoc_dma_match);
-
-static struct platform_driver sirfsoc_dma_driver = {
- .probe = sirfsoc_dma_probe,
- .remove = sirfsoc_dma_remove,
- .driver = {
- .name = DRV_NAME,
- .pm = &sirfsoc_dma_pm_ops,
- .of_match_table = sirfsoc_dma_match,
- },
-};
-
-static __init int sirfsoc_dma_init(void)
-{
- return platform_driver_register(&sirfsoc_dma_driver);
-}
-
-static void __exit sirfsoc_dma_exit(void)
-{
- platform_driver_unregister(&sirfsoc_dma_driver);
-}
-
-subsys_initcall(sirfsoc_dma_init);
-module_exit(sirfsoc_dma_exit);
-
-MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>");
-MODULE_AUTHOR("Barry Song <baohua.song@csr.com>");
-MODULE_DESCRIPTION("SIRFSOC DMA control driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 4256e55bbf25..265d7c07b348 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -78,7 +78,7 @@ static int dma40_memcpy_channels[] = {
DB8500_DMA_MEMCPY_EV_5,
};
-/* Default configuration for physcial memcpy */
+/* Default configuration for physical memcpy */
static const struct stedma40_chan_cfg dma40_memcpy_conf_phy = {
.mode = STEDMA40_MODE_PHYSICAL,
.dir = DMA_MEM_TO_MEM,
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index 298460438bb4..96ad21869ba7 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -121,6 +121,11 @@ struct udma_oes_offsets {
#define UDMA_FLAG_PDMA_ACC32 BIT(0)
#define UDMA_FLAG_PDMA_BURST BIT(1)
#define UDMA_FLAG_TDTYPE BIT(2)
+#define UDMA_FLAG_BURST_SIZE BIT(3)
+#define UDMA_FLAGS_J7_CLASS (UDMA_FLAG_PDMA_ACC32 | \
+ UDMA_FLAG_PDMA_BURST | \
+ UDMA_FLAG_TDTYPE | \
+ UDMA_FLAG_BURST_SIZE)
struct udma_match_data {
enum k3_dma_type type;
@@ -128,6 +133,7 @@ struct udma_match_data {
bool enable_memcpy_support;
u32 flags;
u32 statictr_z_mask;
+ u8 burst_size[3];
};
struct udma_soc_data {
@@ -436,6 +442,18 @@ static void k3_configure_chan_coherency(struct dma_chan *chan, u32 asel)
}
}
+static u8 udma_get_chan_tpl_index(struct udma_tpl *tpl_map, int chan_id)
+{
+ int i;
+
+ for (i = 0; i < tpl_map->levels; i++) {
+ if (chan_id >= tpl_map->start_idx[i])
+ return i;
+ }
+
+ return 0;
+}
+
static void udma_reset_uchan(struct udma_chan *uc)
{
memset(&uc->config, 0, sizeof(uc->config));
@@ -1811,13 +1829,21 @@ static int udma_tisci_m2m_channel_config(struct udma_chan *uc)
const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
struct udma_tchan *tchan = uc->tchan;
struct udma_rchan *rchan = uc->rchan;
- int ret = 0;
+ u8 burst_size = 0;
+ int ret;
+ u8 tpl;
/* Non synchronized - mem to mem type of transfer */
int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring);
struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
+ if (ud->match_data->flags & UDMA_FLAG_BURST_SIZE) {
+ tpl = udma_get_chan_tpl_index(&ud->tchan_tpl, tchan->id);
+
+ burst_size = ud->match_data->burst_size[tpl];
+ }
+
req_tx.valid_params = TISCI_UDMA_TCHAN_VALID_PARAMS;
req_tx.nav_id = tisci_rm->tisci_dev_id;
req_tx.index = tchan->id;
@@ -1825,6 +1851,10 @@ static int udma_tisci_m2m_channel_config(struct udma_chan *uc)
req_tx.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
req_tx.txcq_qnum = tc_ring;
req_tx.tx_atype = ud->atype;
+ if (burst_size) {
+ req_tx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID;
+ req_tx.tx_burst_size = burst_size;
+ }
ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
if (ret) {
@@ -1839,6 +1869,10 @@ static int udma_tisci_m2m_channel_config(struct udma_chan *uc)
req_rx.rxcq_qnum = tc_ring;
req_rx.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
req_rx.rx_atype = ud->atype;
+ if (burst_size) {
+ req_rx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID;
+ req_rx.rx_burst_size = burst_size;
+ }
ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
if (ret)
@@ -1854,12 +1888,24 @@ static int bcdma_tisci_m2m_channel_config(struct udma_chan *uc)
const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
struct udma_bchan *bchan = uc->bchan;
- int ret = 0;
+ u8 burst_size = 0;
+ int ret;
+ u8 tpl;
+
+ if (ud->match_data->flags & UDMA_FLAG_BURST_SIZE) {
+ tpl = udma_get_chan_tpl_index(&ud->bchan_tpl, bchan->id);
+
+ burst_size = ud->match_data->burst_size[tpl];
+ }
req_tx.valid_params = TISCI_BCDMA_BCHAN_VALID_PARAMS;
req_tx.nav_id = tisci_rm->tisci_dev_id;
req_tx.extended_ch_type = TI_SCI_RM_BCDMA_EXTENDED_CH_TYPE_BCHAN;
req_tx.index = bchan->id;
+ if (burst_size) {
+ req_tx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID;
+ req_tx.tx_burst_size = burst_size;
+ }
ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
if (ret)
@@ -1877,7 +1923,7 @@ static int udma_tisci_tx_channel_config(struct udma_chan *uc)
int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring);
struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
u32 mode, fetch_size;
- int ret = 0;
+ int ret;
if (uc->config.pkt_mode) {
mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
@@ -1918,7 +1964,7 @@ static int bcdma_tisci_tx_channel_config(struct udma_chan *uc)
const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
struct udma_tchan *tchan = uc->tchan;
struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
- int ret = 0;
+ int ret;
req_tx.valid_params = TISCI_BCDMA_TCHAN_VALID_PARAMS;
req_tx.nav_id = tisci_rm->tisci_dev_id;
@@ -1951,7 +1997,7 @@ static int udma_tisci_rx_channel_config(struct udma_chan *uc)
struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
u32 mode, fetch_size;
- int ret = 0;
+ int ret;
if (uc->config.pkt_mode) {
mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
@@ -2028,7 +2074,7 @@ static int bcdma_tisci_rx_channel_config(struct udma_chan *uc)
const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
struct udma_rchan *rchan = uc->rchan;
struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
- int ret = 0;
+ int ret;
req_rx.valid_params = TISCI_BCDMA_RCHAN_VALID_PARAMS;
req_rx.nav_id = tisci_rm->tisci_dev_id;
@@ -2048,7 +2094,7 @@ static int pktdma_tisci_rx_channel_config(struct udma_chan *uc)
const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
- int ret = 0;
+ int ret;
req_rx.valid_params = TISCI_BCDMA_RCHAN_VALID_PARAMS;
req_rx.nav_id = tisci_rm->tisci_dev_id;
@@ -2401,7 +2447,8 @@ static int bcdma_alloc_chan_resources(struct dma_chan *chan)
dev_err(ud->ddev.dev,
"Descriptor pool allocation failed\n");
uc->use_dma_pool = false;
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_res_free;
}
uc->use_dma_pool = true;
@@ -4167,6 +4214,11 @@ static struct udma_match_data am654_main_data = {
.psil_base = 0x1000,
.enable_memcpy_support = true,
.statictr_z_mask = GENMASK(11, 0),
+ .burst_size = {
+ TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
+ TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* H Channels */
+ 0, /* No UH Channels */
+ },
};
static struct udma_match_data am654_mcu_data = {
@@ -4174,38 +4226,63 @@ static struct udma_match_data am654_mcu_data = {
.psil_base = 0x6000,
.enable_memcpy_support = false,
.statictr_z_mask = GENMASK(11, 0),
+ .burst_size = {
+ TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
+ TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* H Channels */
+ 0, /* No UH Channels */
+ },
};
static struct udma_match_data j721e_main_data = {
.type = DMA_TYPE_UDMA,
.psil_base = 0x1000,
.enable_memcpy_support = true,
- .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
+ .flags = UDMA_FLAGS_J7_CLASS,
.statictr_z_mask = GENMASK(23, 0),
+ .burst_size = {
+ TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
+ TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES, /* H Channels */
+ TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES, /* UH Channels */
+ },
};
static struct udma_match_data j721e_mcu_data = {
.type = DMA_TYPE_UDMA,
.psil_base = 0x6000,
.enable_memcpy_support = false, /* MEM_TO_MEM is slow via MCU UDMA */
- .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
+ .flags = UDMA_FLAGS_J7_CLASS,
.statictr_z_mask = GENMASK(23, 0),
+ .burst_size = {
+ TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
+ TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_128_BYTES, /* H Channels */
+ 0, /* No UH Channels */
+ },
};
static struct udma_match_data am64_bcdma_data = {
.type = DMA_TYPE_BCDMA,
.psil_base = 0x2000, /* for tchan and rchan, not applicable to bchan */
.enable_memcpy_support = true, /* Supported via bchan */
- .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
+ .flags = UDMA_FLAGS_J7_CLASS,
.statictr_z_mask = GENMASK(23, 0),
+ .burst_size = {
+ TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
+ 0, /* No H Channels */
+ 0, /* No UH Channels */
+ },
};
static struct udma_match_data am64_pktdma_data = {
.type = DMA_TYPE_PKTDMA,
.psil_base = 0x1000,
.enable_memcpy_support = false, /* PKTDMA does not support MEM_TO_MEM */
- .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
+ .flags = UDMA_FLAGS_J7_CLASS,
.statictr_z_mask = GENMASK(23, 0),
+ .burst_size = {
+ TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
+ 0, /* No H Channels */
+ 0, /* No UH Channels */
+ },
};
static const struct of_device_id udma_of_match[] = {
@@ -4305,6 +4382,7 @@ static int udma_get_mmrs(struct platform_device *pdev, struct udma_dev *ud)
ud->bchan_cnt = BCDMA_CAP2_BCHAN_CNT(cap2);
ud->tchan_cnt = BCDMA_CAP2_TCHAN_CNT(cap2);
ud->rchan_cnt = BCDMA_CAP2_RCHAN_CNT(cap2);
+ ud->rflow_cnt = ud->rchan_cnt;
break;
case DMA_TYPE_PKTDMA:
cap4 = udma_read(ud->mmrs[MMR_GCFG], 0x30);
@@ -5045,6 +5123,34 @@ static void udma_dbg_summary_show(struct seq_file *s,
}
#endif /* CONFIG_DEBUG_FS */
+static enum dmaengine_alignment udma_get_copy_align(struct udma_dev *ud)
+{
+ const struct udma_match_data *match_data = ud->match_data;
+ u8 tpl;
+
+ if (!match_data->enable_memcpy_support)
+ return DMAENGINE_ALIGN_8_BYTES;
+
+ /* Get the highest TPL level the device supports for memcpy */
+ if (ud->bchan_cnt)
+ tpl = udma_get_chan_tpl_index(&ud->bchan_tpl, 0);
+ else if (ud->tchan_cnt)
+ tpl = udma_get_chan_tpl_index(&ud->tchan_tpl, 0);
+ else
+ return DMAENGINE_ALIGN_8_BYTES;
+
+ switch (match_data->burst_size[tpl]) {
+ case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES:
+ return DMAENGINE_ALIGN_256_BYTES;
+ case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_128_BYTES:
+ return DMAENGINE_ALIGN_128_BYTES;
+ case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES:
+ fallthrough;
+ default:
+ return DMAENGINE_ALIGN_64_BYTES;
+ }
+}
+
#define TI_UDMAC_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
@@ -5201,7 +5307,6 @@ static int udma_probe(struct platform_device *pdev)
ud->ddev.dst_addr_widths = TI_UDMAC_BUSWIDTHS;
ud->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
ud->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
- ud->ddev.copy_align = DMAENGINE_ALIGN_8_BYTES;
ud->ddev.desc_metadata_modes = DESC_METADATA_CLIENT |
DESC_METADATA_ENGINE;
if (ud->match_data->enable_memcpy_support &&
@@ -5283,6 +5388,9 @@ static int udma_probe(struct platform_device *pdev)
INIT_DELAYED_WORK(&uc->tx_drain.work, udma_check_tx_completion);
}
+ /* Configure the copy_align to the maximum burst size the device supports */
+ ud->ddev.copy_align = udma_get_copy_align(ud);
+
ret = dma_async_device_register(&ud->ddev);
if (ret) {
dev_err(dev, "failed to register slave DMA engine: %d\n", ret);
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 79777550a6ff..3aded7861fef 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -800,7 +800,7 @@ xilinx_dma_alloc_tx_descriptor(struct xilinx_dma_chan *chan)
{
struct xilinx_dma_tx_descriptor *desc;
- desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
if (!desc)
return NULL;
diff --git a/drivers/dma/zx_dma.c b/drivers/dma/zx_dma.c
deleted file mode 100644
index b057582b2fac..000000000000
--- a/drivers/dma/zx_dma.c
+++ /dev/null
@@ -1,941 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright 2015 Linaro.
- */
-#include <linux/sched.h>
-#include <linux/device.h>
-#include <linux/dmaengine.h>
-#include <linux/dma-mapping.h>
-#include <linux/dmapool.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/of_device.h>
-#include <linux/of.h>
-#include <linux/clk.h>
-#include <linux/of_dma.h>
-
-#include "virt-dma.h"
-
-#define DRIVER_NAME "zx-dma"
-#define DMA_ALIGN 4
-#define DMA_MAX_SIZE (0x10000 - 512)
-#define LLI_BLOCK_SIZE (4 * PAGE_SIZE)
-
-#define REG_ZX_SRC_ADDR 0x00
-#define REG_ZX_DST_ADDR 0x04
-#define REG_ZX_TX_X_COUNT 0x08
-#define REG_ZX_TX_ZY_COUNT 0x0c
-#define REG_ZX_SRC_ZY_STEP 0x10
-#define REG_ZX_DST_ZY_STEP 0x14
-#define REG_ZX_LLI_ADDR 0x1c
-#define REG_ZX_CTRL 0x20
-#define REG_ZX_TC_IRQ 0x800
-#define REG_ZX_SRC_ERR_IRQ 0x804
-#define REG_ZX_DST_ERR_IRQ 0x808
-#define REG_ZX_CFG_ERR_IRQ 0x80c
-#define REG_ZX_TC_IRQ_RAW 0x810
-#define REG_ZX_SRC_ERR_IRQ_RAW 0x814
-#define REG_ZX_DST_ERR_IRQ_RAW 0x818
-#define REG_ZX_CFG_ERR_IRQ_RAW 0x81c
-#define REG_ZX_STATUS 0x820
-#define REG_ZX_DMA_GRP_PRIO 0x824
-#define REG_ZX_DMA_ARB 0x828
-
-#define ZX_FORCE_CLOSE BIT(31)
-#define ZX_DST_BURST_WIDTH(x) (((x) & 0x7) << 13)
-#define ZX_MAX_BURST_LEN 16
-#define ZX_SRC_BURST_LEN(x) (((x) & 0xf) << 9)
-#define ZX_SRC_BURST_WIDTH(x) (((x) & 0x7) << 6)
-#define ZX_IRQ_ENABLE_ALL (3 << 4)
-#define ZX_DST_FIFO_MODE BIT(3)
-#define ZX_SRC_FIFO_MODE BIT(2)
-#define ZX_SOFT_REQ BIT(1)
-#define ZX_CH_ENABLE BIT(0)
-
-#define ZX_DMA_BUSWIDTHS \
- (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
- BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
- BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
- BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
- BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
-
-enum zx_dma_burst_width {
- ZX_DMA_WIDTH_8BIT = 0,
- ZX_DMA_WIDTH_16BIT = 1,
- ZX_DMA_WIDTH_32BIT = 2,
- ZX_DMA_WIDTH_64BIT = 3,
-};
-
-struct zx_desc_hw {
- u32 saddr;
- u32 daddr;
- u32 src_x;
- u32 src_zy;
- u32 src_zy_step;
- u32 dst_zy_step;
- u32 reserved1;
- u32 lli;
- u32 ctr;
- u32 reserved[7]; /* pack as hardware registers region size */
-} __aligned(32);
-
-struct zx_dma_desc_sw {
- struct virt_dma_desc vd;
- dma_addr_t desc_hw_lli;
- size_t desc_num;
- size_t size;
- struct zx_desc_hw *desc_hw;
-};
-
-struct zx_dma_phy;
-
-struct zx_dma_chan {
- struct dma_slave_config slave_cfg;
- int id; /* Request phy chan id */
- u32 ccfg;
- u32 cyclic;
- struct virt_dma_chan vc;
- struct zx_dma_phy *phy;
- struct list_head node;
- dma_addr_t dev_addr;
- enum dma_status status;
-};
-
-struct zx_dma_phy {
- u32 idx;
- void __iomem *base;
- struct zx_dma_chan *vchan;
- struct zx_dma_desc_sw *ds_run;
- struct zx_dma_desc_sw *ds_done;
-};
-
-struct zx_dma_dev {
- struct dma_device slave;
- void __iomem *base;
- spinlock_t lock; /* lock for ch and phy */
- struct list_head chan_pending;
- struct zx_dma_phy *phy;
- struct zx_dma_chan *chans;
- struct clk *clk;
- struct dma_pool *pool;
- u32 dma_channels;
- u32 dma_requests;
- int irq;
-};
-
-#define to_zx_dma(dmadev) container_of(dmadev, struct zx_dma_dev, slave)
-
-static struct zx_dma_chan *to_zx_chan(struct dma_chan *chan)
-{
- return container_of(chan, struct zx_dma_chan, vc.chan);
-}
-
-static void zx_dma_terminate_chan(struct zx_dma_phy *phy, struct zx_dma_dev *d)
-{
- u32 val = 0;
-
- val = readl_relaxed(phy->base + REG_ZX_CTRL);
- val &= ~ZX_CH_ENABLE;
- val |= ZX_FORCE_CLOSE;
- writel_relaxed(val, phy->base + REG_ZX_CTRL);
-
- val = 0x1 << phy->idx;
- writel_relaxed(val, d->base + REG_ZX_TC_IRQ_RAW);
- writel_relaxed(val, d->base + REG_ZX_SRC_ERR_IRQ_RAW);
- writel_relaxed(val, d->base + REG_ZX_DST_ERR_IRQ_RAW);
- writel_relaxed(val, d->base + REG_ZX_CFG_ERR_IRQ_RAW);
-}
-
-static void zx_dma_set_desc(struct zx_dma_phy *phy, struct zx_desc_hw *hw)
-{
- writel_relaxed(hw->saddr, phy->base + REG_ZX_SRC_ADDR);
- writel_relaxed(hw->daddr, phy->base + REG_ZX_DST_ADDR);
- writel_relaxed(hw->src_x, phy->base + REG_ZX_TX_X_COUNT);
- writel_relaxed(0, phy->base + REG_ZX_TX_ZY_COUNT);
- writel_relaxed(0, phy->base + REG_ZX_SRC_ZY_STEP);
- writel_relaxed(0, phy->base + REG_ZX_DST_ZY_STEP);
- writel_relaxed(hw->lli, phy->base + REG_ZX_LLI_ADDR);
- writel_relaxed(hw->ctr, phy->base + REG_ZX_CTRL);
-}
-
-static u32 zx_dma_get_curr_lli(struct zx_dma_phy *phy)
-{
- return readl_relaxed(phy->base + REG_ZX_LLI_ADDR);
-}
-
-static u32 zx_dma_get_chan_stat(struct zx_dma_dev *d)
-{
- return readl_relaxed(d->base + REG_ZX_STATUS);
-}
-
-static void zx_dma_init_state(struct zx_dma_dev *d)
-{
- /* set same priority */
- writel_relaxed(0x0, d->base + REG_ZX_DMA_ARB);
- /* clear all irq */
- writel_relaxed(0xffffffff, d->base + REG_ZX_TC_IRQ_RAW);
- writel_relaxed(0xffffffff, d->base + REG_ZX_SRC_ERR_IRQ_RAW);
- writel_relaxed(0xffffffff, d->base + REG_ZX_DST_ERR_IRQ_RAW);
- writel_relaxed(0xffffffff, d->base + REG_ZX_CFG_ERR_IRQ_RAW);
-}
-
-static int zx_dma_start_txd(struct zx_dma_chan *c)
-{
- struct zx_dma_dev *d = to_zx_dma(c->vc.chan.device);
- struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
-
- if (!c->phy)
- return -EAGAIN;
-
- if (BIT(c->phy->idx) & zx_dma_get_chan_stat(d))
- return -EAGAIN;
-
- if (vd) {
- struct zx_dma_desc_sw *ds =
- container_of(vd, struct zx_dma_desc_sw, vd);
- /*
- * fetch and remove request from vc->desc_issued
- * so vc->desc_issued only contains desc pending
- */
- list_del(&ds->vd.node);
- c->phy->ds_run = ds;
- c->phy->ds_done = NULL;
- /* start dma */
- zx_dma_set_desc(c->phy, ds->desc_hw);
- return 0;
- }
- c->phy->ds_done = NULL;
- c->phy->ds_run = NULL;
- return -EAGAIN;
-}
-
-static void zx_dma_task(struct zx_dma_dev *d)
-{
- struct zx_dma_phy *p;
- struct zx_dma_chan *c, *cn;
- unsigned pch, pch_alloc = 0;
- unsigned long flags;
-
- /* check new dma request of running channel in vc->desc_issued */
- list_for_each_entry_safe(c, cn, &d->slave.channels,
- vc.chan.device_node) {
- spin_lock_irqsave(&c->vc.lock, flags);
- p = c->phy;
- if (p && p->ds_done && zx_dma_start_txd(c)) {
- /* No current txd associated with this channel */
- dev_dbg(d->slave.dev, "pchan %u: free\n", p->idx);
- /* Mark this channel free */
- c->phy = NULL;
- p->vchan = NULL;
- }
- spin_unlock_irqrestore(&c->vc.lock, flags);
- }
-
- /* check new channel request in d->chan_pending */
- spin_lock_irqsave(&d->lock, flags);
- while (!list_empty(&d->chan_pending)) {
- c = list_first_entry(&d->chan_pending,
- struct zx_dma_chan, node);
- p = &d->phy[c->id];
- if (!p->vchan) {
- /* remove from d->chan_pending */
- list_del_init(&c->node);
- pch_alloc |= 1 << c->id;
- /* Mark this channel allocated */
- p->vchan = c;
- c->phy = p;
- } else {
- dev_dbg(d->slave.dev, "pchan %u: busy!\n", c->id);
- }
- }
- spin_unlock_irqrestore(&d->lock, flags);
-
- for (pch = 0; pch < d->dma_channels; pch++) {
- if (pch_alloc & (1 << pch)) {
- p = &d->phy[pch];
- c = p->vchan;
- if (c) {
- spin_lock_irqsave(&c->vc.lock, flags);
- zx_dma_start_txd(c);
- spin_unlock_irqrestore(&c->vc.lock, flags);
- }
- }
- }
-}
-
-static irqreturn_t zx_dma_int_handler(int irq, void *dev_id)
-{
- struct zx_dma_dev *d = (struct zx_dma_dev *)dev_id;
- struct zx_dma_phy *p;
- struct zx_dma_chan *c;
- u32 tc = readl_relaxed(d->base + REG_ZX_TC_IRQ);
- u32 serr = readl_relaxed(d->base + REG_ZX_SRC_ERR_IRQ);
- u32 derr = readl_relaxed(d->base + REG_ZX_DST_ERR_IRQ);
- u32 cfg = readl_relaxed(d->base + REG_ZX_CFG_ERR_IRQ);
- u32 i, irq_chan = 0, task = 0;
-
- while (tc) {
- i = __ffs(tc);
- tc &= ~BIT(i);
- p = &d->phy[i];
- c = p->vchan;
- if (c) {
- spin_lock(&c->vc.lock);
- if (c->cyclic) {
- vchan_cyclic_callback(&p->ds_run->vd);
- } else {
- vchan_cookie_complete(&p->ds_run->vd);
- p->ds_done = p->ds_run;
- task = 1;
- }
- spin_unlock(&c->vc.lock);
- irq_chan |= BIT(i);
- }
- }
-
- if (serr || derr || cfg)
- dev_warn(d->slave.dev, "DMA ERR src 0x%x, dst 0x%x, cfg 0x%x\n",
- serr, derr, cfg);
-
- writel_relaxed(irq_chan, d->base + REG_ZX_TC_IRQ_RAW);
- writel_relaxed(serr, d->base + REG_ZX_SRC_ERR_IRQ_RAW);
- writel_relaxed(derr, d->base + REG_ZX_DST_ERR_IRQ_RAW);
- writel_relaxed(cfg, d->base + REG_ZX_CFG_ERR_IRQ_RAW);
-
- if (task)
- zx_dma_task(d);
- return IRQ_HANDLED;
-}
-
-static void zx_dma_free_chan_resources(struct dma_chan *chan)
-{
- struct zx_dma_chan *c = to_zx_chan(chan);
- struct zx_dma_dev *d = to_zx_dma(chan->device);
- unsigned long flags;
-
- spin_lock_irqsave(&d->lock, flags);
- list_del_init(&c->node);
- spin_unlock_irqrestore(&d->lock, flags);
-
- vchan_free_chan_resources(&c->vc);
- c->ccfg = 0;
-}
-
-static enum dma_status zx_dma_tx_status(struct dma_chan *chan,
- dma_cookie_t cookie,
- struct dma_tx_state *state)
-{
- struct zx_dma_chan *c = to_zx_chan(chan);
- struct zx_dma_phy *p;
- struct virt_dma_desc *vd;
- unsigned long flags;
- enum dma_status ret;
- size_t bytes = 0;
-
- ret = dma_cookie_status(&c->vc.chan, cookie, state);
- if (ret == DMA_COMPLETE || !state)
- return ret;
-
- spin_lock_irqsave(&c->vc.lock, flags);
- p = c->phy;
- ret = c->status;
-
- /*
- * If the cookie is on our issue queue, then the residue is
- * its total size.
- */
- vd = vchan_find_desc(&c->vc, cookie);
- if (vd) {
- bytes = container_of(vd, struct zx_dma_desc_sw, vd)->size;
- } else if ((!p) || (!p->ds_run)) {
- bytes = 0;
- } else {
- struct zx_dma_desc_sw *ds = p->ds_run;
- u32 clli = 0, index = 0;
-
- bytes = 0;
- clli = zx_dma_get_curr_lli(p);
- index = (clli - ds->desc_hw_lli) /
- sizeof(struct zx_desc_hw) + 1;
- for (; index < ds->desc_num; index++) {
- bytes += ds->desc_hw[index].src_x;
- /* end of lli */
- if (!ds->desc_hw[index].lli)
- break;
- }
- }
- spin_unlock_irqrestore(&c->vc.lock, flags);
- dma_set_residue(state, bytes);
- return ret;
-}
-
-static void zx_dma_issue_pending(struct dma_chan *chan)
-{
- struct zx_dma_chan *c = to_zx_chan(chan);
- struct zx_dma_dev *d = to_zx_dma(chan->device);
- unsigned long flags;
- int issue = 0;
-
- spin_lock_irqsave(&c->vc.lock, flags);
- /* add request to vc->desc_issued */
- if (vchan_issue_pending(&c->vc)) {
- spin_lock(&d->lock);
- if (!c->phy && list_empty(&c->node)) {
- /* if new channel, add chan_pending */
- list_add_tail(&c->node, &d->chan_pending);
- issue = 1;
- dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc);
- }
- spin_unlock(&d->lock);
- } else {
- dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc);
- }
- spin_unlock_irqrestore(&c->vc.lock, flags);
-
- if (issue)
- zx_dma_task(d);
-}
-
-static void zx_dma_fill_desc(struct zx_dma_desc_sw *ds, dma_addr_t dst,
- dma_addr_t src, size_t len, u32 num, u32 ccfg)
-{
- if ((num + 1) < ds->desc_num)
- ds->desc_hw[num].lli = ds->desc_hw_lli + (num + 1) *
- sizeof(struct zx_desc_hw);
- ds->desc_hw[num].saddr = src;
- ds->desc_hw[num].daddr = dst;
- ds->desc_hw[num].src_x = len;
- ds->desc_hw[num].ctr = ccfg;
-}
-
-static struct zx_dma_desc_sw *zx_alloc_desc_resource(int num,
- struct dma_chan *chan)
-{
- struct zx_dma_chan *c = to_zx_chan(chan);
- struct zx_dma_desc_sw *ds;
- struct zx_dma_dev *d = to_zx_dma(chan->device);
- int lli_limit = LLI_BLOCK_SIZE / sizeof(struct zx_desc_hw);
-
- if (num > lli_limit) {
- dev_dbg(chan->device->dev, "vch %p: sg num %d exceed max %d\n",
- &c->vc, num, lli_limit);
- return NULL;
- }
-
- ds = kzalloc(sizeof(*ds), GFP_ATOMIC);
- if (!ds)
- return NULL;
-
- ds->desc_hw = dma_pool_zalloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli);
- if (!ds->desc_hw) {
- dev_dbg(chan->device->dev, "vch %p: dma alloc fail\n", &c->vc);
- kfree(ds);
- return NULL;
- }
- ds->desc_num = num;
- return ds;
-}
-
-static enum zx_dma_burst_width zx_dma_burst_width(enum dma_slave_buswidth width)
-{
- switch (width) {
- case DMA_SLAVE_BUSWIDTH_1_BYTE:
- case DMA_SLAVE_BUSWIDTH_2_BYTES:
- case DMA_SLAVE_BUSWIDTH_4_BYTES:
- case DMA_SLAVE_BUSWIDTH_8_BYTES:
- return ffs(width) - 1;
- default:
- return ZX_DMA_WIDTH_32BIT;
- }
-}
-
-static int zx_pre_config(struct zx_dma_chan *c, enum dma_transfer_direction dir)
-{
- struct dma_slave_config *cfg = &c->slave_cfg;
- enum zx_dma_burst_width src_width;
- enum zx_dma_burst_width dst_width;
- u32 maxburst = 0;
-
- switch (dir) {
- case DMA_MEM_TO_MEM:
- c->ccfg = ZX_CH_ENABLE | ZX_SOFT_REQ
- | ZX_SRC_BURST_LEN(ZX_MAX_BURST_LEN - 1)
- | ZX_SRC_BURST_WIDTH(ZX_DMA_WIDTH_32BIT)
- | ZX_DST_BURST_WIDTH(ZX_DMA_WIDTH_32BIT);
- break;
- case DMA_MEM_TO_DEV:
- c->dev_addr = cfg->dst_addr;
- /* dst len is calculated from src width, len and dst width.
- * We need make sure dst len not exceed MAX LEN.
- * Trailing single transaction that does not fill a full
- * burst also require identical src/dst data width.
- */
- dst_width = zx_dma_burst_width(cfg->dst_addr_width);
- maxburst = cfg->dst_maxburst;
- maxburst = maxburst < ZX_MAX_BURST_LEN ?
- maxburst : ZX_MAX_BURST_LEN;
- c->ccfg = ZX_DST_FIFO_MODE | ZX_CH_ENABLE
- | ZX_SRC_BURST_LEN(maxburst - 1)
- | ZX_SRC_BURST_WIDTH(dst_width)
- | ZX_DST_BURST_WIDTH(dst_width);
- break;
- case DMA_DEV_TO_MEM:
- c->dev_addr = cfg->src_addr;
- src_width = zx_dma_burst_width(cfg->src_addr_width);
- maxburst = cfg->src_maxburst;
- maxburst = maxburst < ZX_MAX_BURST_LEN ?
- maxburst : ZX_MAX_BURST_LEN;
- c->ccfg = ZX_SRC_FIFO_MODE | ZX_CH_ENABLE
- | ZX_SRC_BURST_LEN(maxburst - 1)
- | ZX_SRC_BURST_WIDTH(src_width)
- | ZX_DST_BURST_WIDTH(src_width);
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static struct dma_async_tx_descriptor *zx_dma_prep_memcpy(
- struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
- size_t len, unsigned long flags)
-{
- struct zx_dma_chan *c = to_zx_chan(chan);
- struct zx_dma_desc_sw *ds;
- size_t copy = 0;
- int num = 0;
-
- if (!len)
- return NULL;
-
- if (zx_pre_config(c, DMA_MEM_TO_MEM))
- return NULL;
-
- num = DIV_ROUND_UP(len, DMA_MAX_SIZE);
-
- ds = zx_alloc_desc_resource(num, chan);
- if (!ds)
- return NULL;
-
- ds->size = len;
- num = 0;
-
- do {
- copy = min_t(size_t, len, DMA_MAX_SIZE);
- zx_dma_fill_desc(ds, dst, src, copy, num++, c->ccfg);
-
- src += copy;
- dst += copy;
- len -= copy;
- } while (len);
-
- c->cyclic = 0;
- ds->desc_hw[num - 1].lli = 0; /* end of link */
- ds->desc_hw[num - 1].ctr |= ZX_IRQ_ENABLE_ALL;
- return vchan_tx_prep(&c->vc, &ds->vd, flags);
-}
-
-static struct dma_async_tx_descriptor *zx_dma_prep_slave_sg(
- struct dma_chan *chan, struct scatterlist *sgl, unsigned int sglen,
- enum dma_transfer_direction dir, unsigned long flags, void *context)
-{
- struct zx_dma_chan *c = to_zx_chan(chan);
- struct zx_dma_desc_sw *ds;
- size_t len, avail, total = 0;
- struct scatterlist *sg;
- dma_addr_t addr, src = 0, dst = 0;
- int num = sglen, i;
-
- if (!sgl)
- return NULL;
-
- if (zx_pre_config(c, dir))
- return NULL;
-
- for_each_sg(sgl, sg, sglen, i) {
- avail = sg_dma_len(sg);
- if (avail > DMA_MAX_SIZE)
- num += DIV_ROUND_UP(avail, DMA_MAX_SIZE) - 1;
- }
-
- ds = zx_alloc_desc_resource(num, chan);
- if (!ds)
- return NULL;
-
- c->cyclic = 0;
- num = 0;
- for_each_sg(sgl, sg, sglen, i) {
- addr = sg_dma_address(sg);
- avail = sg_dma_len(sg);
- total += avail;
-
- do {
- len = min_t(size_t, avail, DMA_MAX_SIZE);
-
- if (dir == DMA_MEM_TO_DEV) {
- src = addr;
- dst = c->dev_addr;
- } else if (dir == DMA_DEV_TO_MEM) {
- src = c->dev_addr;
- dst = addr;
- }
-
- zx_dma_fill_desc(ds, dst, src, len, num++, c->ccfg);
-
- addr += len;
- avail -= len;
- } while (avail);
- }
-
- ds->desc_hw[num - 1].lli = 0; /* end of link */
- ds->desc_hw[num - 1].ctr |= ZX_IRQ_ENABLE_ALL;
- ds->size = total;
- return vchan_tx_prep(&c->vc, &ds->vd, flags);
-}
-
-static struct dma_async_tx_descriptor *zx_dma_prep_dma_cyclic(
- struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
- size_t period_len, enum dma_transfer_direction dir,
- unsigned long flags)
-{
- struct zx_dma_chan *c = to_zx_chan(chan);
- struct zx_dma_desc_sw *ds;
- dma_addr_t src = 0, dst = 0;
- int num_periods = buf_len / period_len;
- int buf = 0, num = 0;
-
- if (period_len > DMA_MAX_SIZE) {
- dev_err(chan->device->dev, "maximum period size exceeded\n");
- return NULL;
- }
-
- if (zx_pre_config(c, dir))
- return NULL;
-
- ds = zx_alloc_desc_resource(num_periods, chan);
- if (!ds)
- return NULL;
- c->cyclic = 1;
-
- while (buf < buf_len) {
- if (dir == DMA_MEM_TO_DEV) {
- src = dma_addr;
- dst = c->dev_addr;
- } else if (dir == DMA_DEV_TO_MEM) {
- src = c->dev_addr;
- dst = dma_addr;
- }
- zx_dma_fill_desc(ds, dst, src, period_len, num++,
- c->ccfg | ZX_IRQ_ENABLE_ALL);
- dma_addr += period_len;
- buf += period_len;
- }
-
- ds->desc_hw[num - 1].lli = ds->desc_hw_lli;
- ds->size = buf_len;
- return vchan_tx_prep(&c->vc, &ds->vd, flags);
-}
-
-static int zx_dma_config(struct dma_chan *chan,
- struct dma_slave_config *cfg)
-{
- struct zx_dma_chan *c = to_zx_chan(chan);
-
- if (!cfg)
- return -EINVAL;
-
- memcpy(&c->slave_cfg, cfg, sizeof(*cfg));
-
- return 0;
-}
-
-static int zx_dma_terminate_all(struct dma_chan *chan)
-{
- struct zx_dma_chan *c = to_zx_chan(chan);
- struct zx_dma_dev *d = to_zx_dma(chan->device);
- struct zx_dma_phy *p = c->phy;
- unsigned long flags;
- LIST_HEAD(head);
-
- dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
-
- /* Prevent this channel being scheduled */
- spin_lock(&d->lock);
- list_del_init(&c->node);
- spin_unlock(&d->lock);
-
- /* Clear the tx descriptor lists */
- spin_lock_irqsave(&c->vc.lock, flags);
- vchan_get_all_descriptors(&c->vc, &head);
- if (p) {
- /* vchan is assigned to a pchan - stop the channel */
- zx_dma_terminate_chan(p, d);
- c->phy = NULL;
- p->vchan = NULL;
- p->ds_run = NULL;
- p->ds_done = NULL;
- }
- spin_unlock_irqrestore(&c->vc.lock, flags);
- vchan_dma_desc_free_list(&c->vc, &head);
-
- return 0;
-}
-
-static int zx_dma_transfer_pause(struct dma_chan *chan)
-{
- struct zx_dma_chan *c = to_zx_chan(chan);
- u32 val = 0;
-
- val = readl_relaxed(c->phy->base + REG_ZX_CTRL);
- val &= ~ZX_CH_ENABLE;
- writel_relaxed(val, c->phy->base + REG_ZX_CTRL);
-
- return 0;
-}
-
-static int zx_dma_transfer_resume(struct dma_chan *chan)
-{
- struct zx_dma_chan *c = to_zx_chan(chan);
- u32 val = 0;
-
- val = readl_relaxed(c->phy->base + REG_ZX_CTRL);
- val |= ZX_CH_ENABLE;
- writel_relaxed(val, c->phy->base + REG_ZX_CTRL);
-
- return 0;
-}
-
-static void zx_dma_free_desc(struct virt_dma_desc *vd)
-{
- struct zx_dma_desc_sw *ds =
- container_of(vd, struct zx_dma_desc_sw, vd);
- struct zx_dma_dev *d = to_zx_dma(vd->tx.chan->device);
-
- dma_pool_free(d->pool, ds->desc_hw, ds->desc_hw_lli);
- kfree(ds);
-}
-
-static const struct of_device_id zx6702_dma_dt_ids[] = {
- { .compatible = "zte,zx296702-dma", },
- {}
-};
-MODULE_DEVICE_TABLE(of, zx6702_dma_dt_ids);
-
-static struct dma_chan *zx_of_dma_simple_xlate(struct of_phandle_args *dma_spec,
- struct of_dma *ofdma)
-{
- struct zx_dma_dev *d = ofdma->of_dma_data;
- unsigned int request = dma_spec->args[0];
- struct dma_chan *chan;
- struct zx_dma_chan *c;
-
- if (request >= d->dma_requests)
- return NULL;
-
- chan = dma_get_any_slave_channel(&d->slave);
- if (!chan) {
- dev_err(d->slave.dev, "get channel fail in %s.\n", __func__);
- return NULL;
- }
- c = to_zx_chan(chan);
- c->id = request;
- dev_info(d->slave.dev, "zx_dma: pchan %u: alloc vchan %p\n",
- c->id, &c->vc);
- return chan;
-}
-
-static int zx_dma_probe(struct platform_device *op)
-{
- struct zx_dma_dev *d;
- int i, ret = 0;
-
- d = devm_kzalloc(&op->dev, sizeof(*d), GFP_KERNEL);
- if (!d)
- return -ENOMEM;
-
- d->base = devm_platform_ioremap_resource(op, 0);
- if (IS_ERR(d->base))
- return PTR_ERR(d->base);
-
- of_property_read_u32((&op->dev)->of_node,
- "dma-channels", &d->dma_channels);
- of_property_read_u32((&op->dev)->of_node,
- "dma-requests", &d->dma_requests);
- if (!d->dma_requests || !d->dma_channels)
- return -EINVAL;
-
- d->clk = devm_clk_get(&op->dev, NULL);
- if (IS_ERR(d->clk)) {
- dev_err(&op->dev, "no dma clk\n");
- return PTR_ERR(d->clk);
- }
-
- d->irq = platform_get_irq(op, 0);
- ret = devm_request_irq(&op->dev, d->irq, zx_dma_int_handler,
- 0, DRIVER_NAME, d);
- if (ret)
- return ret;
-
- /* A DMA memory pool for LLIs, align on 32-byte boundary */
- d->pool = dmam_pool_create(DRIVER_NAME, &op->dev,
- LLI_BLOCK_SIZE, 32, 0);
- if (!d->pool)
- return -ENOMEM;
-
- /* init phy channel */
- d->phy = devm_kcalloc(&op->dev,
- d->dma_channels, sizeof(struct zx_dma_phy), GFP_KERNEL);
- if (!d->phy)
- return -ENOMEM;
-
- for (i = 0; i < d->dma_channels; i++) {
- struct zx_dma_phy *p = &d->phy[i];
-
- p->idx = i;
- p->base = d->base + i * 0x40;
- }
-
- INIT_LIST_HEAD(&d->slave.channels);
- dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
- dma_cap_set(DMA_MEMCPY, d->slave.cap_mask);
- dma_cap_set(DMA_CYCLIC, d->slave.cap_mask);
- dma_cap_set(DMA_PRIVATE, d->slave.cap_mask);
- d->slave.dev = &op->dev;
- d->slave.device_free_chan_resources = zx_dma_free_chan_resources;
- d->slave.device_tx_status = zx_dma_tx_status;
- d->slave.device_prep_dma_memcpy = zx_dma_prep_memcpy;
- d->slave.device_prep_slave_sg = zx_dma_prep_slave_sg;
- d->slave.device_prep_dma_cyclic = zx_dma_prep_dma_cyclic;
- d->slave.device_issue_pending = zx_dma_issue_pending;
- d->slave.device_config = zx_dma_config;
- d->slave.device_terminate_all = zx_dma_terminate_all;
- d->slave.device_pause = zx_dma_transfer_pause;
- d->slave.device_resume = zx_dma_transfer_resume;
- d->slave.copy_align = DMA_ALIGN;
- d->slave.src_addr_widths = ZX_DMA_BUSWIDTHS;
- d->slave.dst_addr_widths = ZX_DMA_BUSWIDTHS;
- d->slave.directions = BIT(DMA_MEM_TO_MEM) | BIT(DMA_MEM_TO_DEV)
- | BIT(DMA_DEV_TO_MEM);
- d->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
-
- /* init virtual channel */
- d->chans = devm_kcalloc(&op->dev,
- d->dma_requests, sizeof(struct zx_dma_chan), GFP_KERNEL);
- if (!d->chans)
- return -ENOMEM;
-
- for (i = 0; i < d->dma_requests; i++) {
- struct zx_dma_chan *c = &d->chans[i];
-
- c->status = DMA_IN_PROGRESS;
- INIT_LIST_HEAD(&c->node);
- c->vc.desc_free = zx_dma_free_desc;
- vchan_init(&c->vc, &d->slave);
- }
-
- /* Enable clock before accessing registers */
- ret = clk_prepare_enable(d->clk);
- if (ret < 0) {
- dev_err(&op->dev, "clk_prepare_enable failed: %d\n", ret);
- goto zx_dma_out;
- }
-
- zx_dma_init_state(d);
-
- spin_lock_init(&d->lock);
- INIT_LIST_HEAD(&d->chan_pending);
- platform_set_drvdata(op, d);
-
- ret = dma_async_device_register(&d->slave);
- if (ret)
- goto clk_dis;
-
- ret = of_dma_controller_register((&op->dev)->of_node,
- zx_of_dma_simple_xlate, d);
- if (ret)
- goto of_dma_register_fail;
-
- dev_info(&op->dev, "initialized\n");
- return 0;
-
-of_dma_register_fail:
- dma_async_device_unregister(&d->slave);
-clk_dis:
- clk_disable_unprepare(d->clk);
-zx_dma_out:
- return ret;
-}
-
-static int zx_dma_remove(struct platform_device *op)
-{
- struct zx_dma_chan *c, *cn;
- struct zx_dma_dev *d = platform_get_drvdata(op);
-
- /* explictly free the irq */
- devm_free_irq(&op->dev, d->irq, d);
-
- dma_async_device_unregister(&d->slave);
- of_dma_controller_free((&op->dev)->of_node);
-
- list_for_each_entry_safe(c, cn, &d->slave.channels,
- vc.chan.device_node) {
- list_del(&c->vc.chan.device_node);
- }
- clk_disable_unprepare(d->clk);
-
- return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int zx_dma_suspend_dev(struct device *dev)
-{
- struct zx_dma_dev *d = dev_get_drvdata(dev);
- u32 stat = 0;
-
- stat = zx_dma_get_chan_stat(d);
- if (stat) {
- dev_warn(d->slave.dev,
- "chan %d is running fail to suspend\n", stat);
- return -1;
- }
- clk_disable_unprepare(d->clk);
- return 0;
-}
-
-static int zx_dma_resume_dev(struct device *dev)
-{
- struct zx_dma_dev *d = dev_get_drvdata(dev);
- int ret = 0;
-
- ret = clk_prepare_enable(d->clk);
- if (ret < 0) {
- dev_err(d->slave.dev, "clk_prepare_enable failed: %d\n", ret);
- return ret;
- }
- zx_dma_init_state(d);
- return 0;
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(zx_dma_pmops, zx_dma_suspend_dev, zx_dma_resume_dev);
-
-static struct platform_driver zx_pdma_driver = {
- .driver = {
- .name = DRIVER_NAME,
- .pm = &zx_dma_pmops,
- .of_match_table = zx6702_dma_dt_ids,
- },
- .probe = zx_dma_probe,
- .remove = zx_dma_remove,
-};
-
-module_platform_driver(zx_pdma_driver);
-
-MODULE_DESCRIPTION("ZTE ZX296702 DMA Driver");
-MODULE_AUTHOR("Jun Nie jun.nie@linaro.org");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 81c42664f21b..27d0c4cdc58d 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -81,14 +81,13 @@ config EDAC_AMD64
Support for error detection and correction of DRAM ECC errors on
the AMD64 families (>= K8) of memory controllers.
-config EDAC_AMD64_ERROR_INJECTION
- bool "Sysfs HW Error injection facilities"
- depends on EDAC_AMD64
- help
- Recent Opterons (Family 10h and later) provide for Memory Error
- Injection into the ECC detection circuits. The amd64_edac module
- allows the operator/user to inject Uncorrectable and Correctable
- errors into DRAM.
+ When EDAC_DEBUG is enabled, hardware error injection facilities
+ through sysfs are available:
+
+ AMD CPUs up to and excluding family 0x17 provide for Memory
+ Error Injection into the ECC detection circuits. The amd64_edac
+ module allows the operator/user to inject Uncorrectable and
+ Correctable errors into DRAM.
When enabled, in each of the respective memory controller directories
(/sys/devices/system/edac/mc/mcX), there are 3 input files:
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index 464d3d8d850a..2d1641a27a28 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -44,12 +44,7 @@ obj-$(CONFIG_EDAC_IE31200) += ie31200_edac.o
obj-$(CONFIG_EDAC_X38) += x38_edac.o
obj-$(CONFIG_EDAC_I82860) += i82860_edac.o
obj-$(CONFIG_EDAC_R82600) += r82600_edac.o
-
-amd64_edac_mod-y := amd64_edac.o
-amd64_edac_mod-$(CONFIG_EDAC_DEBUG) += amd64_edac_dbg.o
-amd64_edac_mod-$(CONFIG_EDAC_AMD64_ERROR_INJECTION) += amd64_edac_inj.o
-
-obj-$(CONFIG_EDAC_AMD64) += amd64_edac_mod.o
+obj-$(CONFIG_EDAC_AMD64) += amd64_edac.o
obj-$(CONFIG_EDAC_PASEMI) += pasemi_edac.o
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index f7087ddddb90..9fa4dfc6ebee 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -500,8 +500,8 @@ static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
* complete 32-bit values despite the fact that the bitfields in the DHAR
* only represent bits 31-24 of the base and offset values.
*/
-int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
- u64 *hole_offset, u64 *hole_size)
+static int get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
+ u64 *hole_offset, u64 *hole_size)
{
struct amd64_pvt *pvt = mci->pvt_info;
@@ -554,7 +554,292 @@ int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
return 0;
}
-EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info);
+
+#ifdef CONFIG_EDAC_DEBUG
+#define EDAC_DCT_ATTR_SHOW(reg) \
+static ssize_t reg##_show(struct device *dev, \
+ struct device_attribute *mattr, char *data) \
+{ \
+ struct mem_ctl_info *mci = to_mci(dev); \
+ struct amd64_pvt *pvt = mci->pvt_info; \
+ \
+ return sprintf(data, "0x%016llx\n", (u64)pvt->reg); \
+}
+
+EDAC_DCT_ATTR_SHOW(dhar);
+EDAC_DCT_ATTR_SHOW(dbam0);
+EDAC_DCT_ATTR_SHOW(top_mem);
+EDAC_DCT_ATTR_SHOW(top_mem2);
+
+static ssize_t hole_show(struct device *dev, struct device_attribute *mattr,
+ char *data)
+{
+ struct mem_ctl_info *mci = to_mci(dev);
+
+ u64 hole_base = 0;
+ u64 hole_offset = 0;
+ u64 hole_size = 0;
+
+ get_dram_hole_info(mci, &hole_base, &hole_offset, &hole_size);
+
+ return sprintf(data, "%llx %llx %llx\n", hole_base, hole_offset,
+ hole_size);
+}
+
+/*
+ * update NUM_DBG_ATTRS in case you add new members
+ */
+static DEVICE_ATTR(dhar, S_IRUGO, dhar_show, NULL);
+static DEVICE_ATTR(dbam, S_IRUGO, dbam0_show, NULL);
+static DEVICE_ATTR(topmem, S_IRUGO, top_mem_show, NULL);
+static DEVICE_ATTR(topmem2, S_IRUGO, top_mem2_show, NULL);
+static DEVICE_ATTR(dram_hole, S_IRUGO, hole_show, NULL);
+
+static struct attribute *dbg_attrs[] = {
+ &dev_attr_dhar.attr,
+ &dev_attr_dbam.attr,
+ &dev_attr_topmem.attr,
+ &dev_attr_topmem2.attr,
+ &dev_attr_dram_hole.attr,
+ NULL
+};
+
+static const struct attribute_group dbg_group = {
+ .attrs = dbg_attrs,
+};
+
+static ssize_t inject_section_show(struct device *dev,
+ struct device_attribute *mattr, char *buf)
+{
+ struct mem_ctl_info *mci = to_mci(dev);
+ struct amd64_pvt *pvt = mci->pvt_info;
+ return sprintf(buf, "0x%x\n", pvt->injection.section);
+}
+
+/*
+ * store error injection section value which refers to one of 4 16-byte sections
+ * within a 64-byte cacheline
+ *
+ * range: 0..3
+ */
+static ssize_t inject_section_store(struct device *dev,
+ struct device_attribute *mattr,
+ const char *data, size_t count)
+{
+ struct mem_ctl_info *mci = to_mci(dev);
+ struct amd64_pvt *pvt = mci->pvt_info;
+ unsigned long value;
+ int ret;
+
+ ret = kstrtoul(data, 10, &value);
+ if (ret < 0)
+ return ret;
+
+ if (value > 3) {
+ amd64_warn("%s: invalid section 0x%lx\n", __func__, value);
+ return -EINVAL;
+ }
+
+ pvt->injection.section = (u32) value;
+ return count;
+}
+
+static ssize_t inject_word_show(struct device *dev,
+ struct device_attribute *mattr, char *buf)
+{
+ struct mem_ctl_info *mci = to_mci(dev);
+ struct amd64_pvt *pvt = mci->pvt_info;
+ return sprintf(buf, "0x%x\n", pvt->injection.word);
+}
+
+/*
+ * store error injection word value which refers to one of 9 16-bit word of the
+ * 16-byte (128-bit + ECC bits) section
+ *
+ * range: 0..8
+ */
+static ssize_t inject_word_store(struct device *dev,
+ struct device_attribute *mattr,
+ const char *data, size_t count)
+{
+ struct mem_ctl_info *mci = to_mci(dev);
+ struct amd64_pvt *pvt = mci->pvt_info;
+ unsigned long value;
+ int ret;
+
+ ret = kstrtoul(data, 10, &value);
+ if (ret < 0)
+ return ret;
+
+ if (value > 8) {
+ amd64_warn("%s: invalid word 0x%lx\n", __func__, value);
+ return -EINVAL;
+ }
+
+ pvt->injection.word = (u32) value;
+ return count;
+}
+
+static ssize_t inject_ecc_vector_show(struct device *dev,
+ struct device_attribute *mattr,
+ char *buf)
+{
+ struct mem_ctl_info *mci = to_mci(dev);
+ struct amd64_pvt *pvt = mci->pvt_info;
+ return sprintf(buf, "0x%x\n", pvt->injection.bit_map);
+}
+
+/*
+ * store 16 bit error injection vector which enables injecting errors to the
+ * corresponding bit within the error injection word above. When used during a
+ * DRAM ECC read, it holds the contents of the of the DRAM ECC bits.
+ */
+static ssize_t inject_ecc_vector_store(struct device *dev,
+ struct device_attribute *mattr,
+ const char *data, size_t count)
+{
+ struct mem_ctl_info *mci = to_mci(dev);
+ struct amd64_pvt *pvt = mci->pvt_info;
+ unsigned long value;
+ int ret;
+
+ ret = kstrtoul(data, 16, &value);
+ if (ret < 0)
+ return ret;
+
+ if (value & 0xFFFF0000) {
+ amd64_warn("%s: invalid EccVector: 0x%lx\n", __func__, value);
+ return -EINVAL;
+ }
+
+ pvt->injection.bit_map = (u32) value;
+ return count;
+}
+
+/*
+ * Do a DRAM ECC read. Assemble staged values in the pvt area, format into
+ * fields needed by the injection registers and read the NB Array Data Port.
+ */
+static ssize_t inject_read_store(struct device *dev,
+ struct device_attribute *mattr,
+ const char *data, size_t count)
+{
+ struct mem_ctl_info *mci = to_mci(dev);
+ struct amd64_pvt *pvt = mci->pvt_info;
+ unsigned long value;
+ u32 section, word_bits;
+ int ret;
+
+ ret = kstrtoul(data, 10, &value);
+ if (ret < 0)
+ return ret;
+
+ /* Form value to choose 16-byte section of cacheline */
+ section = F10_NB_ARRAY_DRAM | SET_NB_ARRAY_ADDR(pvt->injection.section);
+
+ amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_ADDR, section);
+
+ word_bits = SET_NB_DRAM_INJECTION_READ(pvt->injection);
+
+ /* Issue 'word' and 'bit' along with the READ request */
+ amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
+
+ edac_dbg(0, "section=0x%x word_bits=0x%x\n", section, word_bits);
+
+ return count;
+}
+
+/*
+ * Do a DRAM ECC write. Assemble staged values in the pvt area and format into
+ * fields needed by the injection registers.
+ */
+static ssize_t inject_write_store(struct device *dev,
+ struct device_attribute *mattr,
+ const char *data, size_t count)
+{
+ struct mem_ctl_info *mci = to_mci(dev);
+ struct amd64_pvt *pvt = mci->pvt_info;
+ u32 section, word_bits, tmp;
+ unsigned long value;
+ int ret;
+
+ ret = kstrtoul(data, 10, &value);
+ if (ret < 0)
+ return ret;
+
+ /* Form value to choose 16-byte section of cacheline */
+ section = F10_NB_ARRAY_DRAM | SET_NB_ARRAY_ADDR(pvt->injection.section);
+
+ amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_ADDR, section);
+
+ word_bits = SET_NB_DRAM_INJECTION_WRITE(pvt->injection);
+
+ pr_notice_once("Don't forget to decrease MCE polling interval in\n"
+ "/sys/bus/machinecheck/devices/machinecheck<CPUNUM>/check_interval\n"
+ "so that you can get the error report faster.\n");
+
+ on_each_cpu(disable_caches, NULL, 1);
+
+ /* Issue 'word' and 'bit' along with the READ request */
+ amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
+
+ retry:
+ /* wait until injection happens */
+ amd64_read_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, &tmp);
+ if (tmp & F10_NB_ARR_ECC_WR_REQ) {
+ cpu_relax();
+ goto retry;
+ }
+
+ on_each_cpu(enable_caches, NULL, 1);
+
+ edac_dbg(0, "section=0x%x word_bits=0x%x\n", section, word_bits);
+
+ return count;
+}
+
+/*
+ * update NUM_INJ_ATTRS in case you add new members
+ */
+
+static DEVICE_ATTR(inject_section, S_IRUGO | S_IWUSR,
+ inject_section_show, inject_section_store);
+static DEVICE_ATTR(inject_word, S_IRUGO | S_IWUSR,
+ inject_word_show, inject_word_store);
+static DEVICE_ATTR(inject_ecc_vector, S_IRUGO | S_IWUSR,
+ inject_ecc_vector_show, inject_ecc_vector_store);
+static DEVICE_ATTR(inject_write, S_IWUSR,
+ NULL, inject_write_store);
+static DEVICE_ATTR(inject_read, S_IWUSR,
+ NULL, inject_read_store);
+
+static struct attribute *inj_attrs[] = {
+ &dev_attr_inject_section.attr,
+ &dev_attr_inject_word.attr,
+ &dev_attr_inject_ecc_vector.attr,
+ &dev_attr_inject_write.attr,
+ &dev_attr_inject_read.attr,
+ NULL
+};
+
+static umode_t inj_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct mem_ctl_info *mci = container_of(dev, struct mem_ctl_info, dev);
+ struct amd64_pvt *pvt = mci->pvt_info;
+
+ /* Families which have that injection hw */
+ if (pvt->fam >= 0x10 && pvt->fam <= 0x16)
+ return attr->mode;
+
+ return 0;
+}
+
+static const struct attribute_group inj_group = {
+ .attrs = inj_attrs,
+ .is_visible = inj_is_visible,
+};
+#endif /* CONFIG_EDAC_DEBUG */
/*
* Return the DramAddr that the SysAddr given by @sys_addr maps to. It is
@@ -593,8 +878,7 @@ static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
dram_base = get_dram_base(pvt, pvt->mc_node_id);
- ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
- &hole_size);
+ ret = get_dram_hole_info(mci, &hole_base, &hole_offset, &hole_size);
if (!ret) {
if ((sys_addr >= (1ULL << 32)) &&
(sys_addr < ((1ULL << 32) + hole_size))) {
@@ -2665,7 +2949,7 @@ reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 pci_id1, u16 pci_id2)
if (pvt->umc) {
pvt->F0 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
if (!pvt->F0) {
- amd64_err("F0 not found, device 0x%x (broken BIOS?)\n", pci_id1);
+ edac_dbg(1, "F0 not found, device 0x%x\n", pci_id1);
return -ENODEV;
}
@@ -2674,7 +2958,7 @@ reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 pci_id1, u16 pci_id2)
pci_dev_put(pvt->F0);
pvt->F0 = NULL;
- amd64_err("F6 not found: device 0x%x (broken BIOS?)\n", pci_id2);
+ edac_dbg(1, "F6 not found: device 0x%x\n", pci_id2);
return -ENODEV;
}
@@ -2691,7 +2975,7 @@ reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 pci_id1, u16 pci_id2)
/* Reserve the ADDRESS MAP Device */
pvt->F1 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
if (!pvt->F1) {
- amd64_err("F1 not found: device 0x%x (broken BIOS?)\n", pci_id1);
+ edac_dbg(1, "F1 not found: device 0x%x\n", pci_id1);
return -ENODEV;
}
@@ -2701,7 +2985,7 @@ reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 pci_id1, u16 pci_id2)
pci_dev_put(pvt->F1);
pvt->F1 = NULL;
- amd64_err("F2 not found: device 0x%x (broken BIOS?)\n", pci_id2);
+ edac_dbg(1, "F2 not found: device 0x%x\n", pci_id2);
return -ENODEV;
}
@@ -3244,8 +3528,7 @@ static bool ecc_enabled(struct amd64_pvt *pvt)
MSR_IA32_MCG_CTL, nid);
}
- amd64_info("Node %d: DRAM ECC %s.\n",
- nid, (ecc_en ? "enabled" : "disabled"));
+ edac_dbg(3, "Node %d: DRAM ECC %s.\n", nid, (ecc_en ? "enabled" : "disabled"));
if (!ecc_en || !nb_mce_en)
return false;
@@ -3342,10 +3625,13 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
fam_type = &family_types[F15_M60H_CPUS];
pvt->ops = &family_types[F15_M60H_CPUS].ops;
break;
+ /* Richland is only client */
+ } else if (pvt->model == 0x13) {
+ return NULL;
+ } else {
+ fam_type = &family_types[F15_CPUS];
+ pvt->ops = &family_types[F15_CPUS].ops;
}
-
- fam_type = &family_types[F15_CPUS];
- pvt->ops = &family_types[F15_CPUS].ops;
break;
case 0x16:
@@ -3402,20 +3688,13 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
return NULL;
}
- amd64_info("%s %sdetected (node %d).\n", fam_type->ctl_name,
- (pvt->fam == 0xf ?
- (pvt->ext_model >= K8_REV_F ? "revF or later "
- : "revE or earlier ")
- : ""), pvt->mc_node_id);
return fam_type;
}
static const struct attribute_group *amd64_edac_attr_groups[] = {
#ifdef CONFIG_EDAC_DEBUG
- &amd64_edac_dbg_group,
-#endif
-#ifdef CONFIG_EDAC_AMD64_ERROR_INJECTION
- &amd64_edac_inj_group,
+ &dbg_group,
+ &inj_group,
#endif
NULL
};
@@ -3539,6 +3818,7 @@ static int probe_one_instance(unsigned int nid)
pvt->mc_node_id = nid;
pvt->F3 = F3;
+ ret = -ENODEV;
fam_type = per_family_init(pvt);
if (!fam_type)
goto err_enable;
@@ -3579,6 +3859,12 @@ static int probe_one_instance(unsigned int nid)
goto err_enable;
}
+ amd64_info("%s %sdetected (node %d).\n", fam_type->ctl_name,
+ (pvt->fam == 0xf ?
+ (pvt->ext_model >= K8_REV_F ? "revF or later "
+ : "revE or earlier ")
+ : ""), pvt->mc_node_id);
+
dump_misc_regs(pvt);
return ret;
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index 52b5d03eeba0..85aa820bc165 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -462,14 +462,6 @@ struct ecc_settings {
} flags;
};
-#ifdef CONFIG_EDAC_DEBUG
-extern const struct attribute_group amd64_edac_dbg_group;
-#endif
-
-#ifdef CONFIG_EDAC_AMD64_ERROR_INJECTION
-extern const struct attribute_group amd64_edac_inj_group;
-#endif
-
/*
* Each of the PCI Device IDs types have their own set of hardware accessor
* functions and per device encoding/decoding logic.
@@ -501,9 +493,6 @@ int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
#define amd64_write_pci_cfg(pdev, offset, val) \
__amd64_write_pci_cfg_dword(pdev, offset, val, __func__)
-int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
- u64 *hole_offset, u64 *hole_size);
-
#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
/* Injection helpers */
diff --git a/drivers/edac/amd64_edac_dbg.c b/drivers/edac/amd64_edac_dbg.c
deleted file mode 100644
index 393be3351493..000000000000
--- a/drivers/edac/amd64_edac_dbg.c
+++ /dev/null
@@ -1,55 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include "amd64_edac.h"
-
-#define EDAC_DCT_ATTR_SHOW(reg) \
-static ssize_t amd64_##reg##_show(struct device *dev, \
- struct device_attribute *mattr, \
- char *data) \
-{ \
- struct mem_ctl_info *mci = to_mci(dev); \
- struct amd64_pvt *pvt = mci->pvt_info; \
- return sprintf(data, "0x%016llx\n", (u64)pvt->reg); \
-}
-
-EDAC_DCT_ATTR_SHOW(dhar);
-EDAC_DCT_ATTR_SHOW(dbam0);
-EDAC_DCT_ATTR_SHOW(top_mem);
-EDAC_DCT_ATTR_SHOW(top_mem2);
-
-static ssize_t amd64_hole_show(struct device *dev,
- struct device_attribute *mattr,
- char *data)
-{
- struct mem_ctl_info *mci = to_mci(dev);
-
- u64 hole_base = 0;
- u64 hole_offset = 0;
- u64 hole_size = 0;
-
- amd64_get_dram_hole_info(mci, &hole_base, &hole_offset, &hole_size);
-
- return sprintf(data, "%llx %llx %llx\n", hole_base, hole_offset,
- hole_size);
-}
-
-/*
- * update NUM_DBG_ATTRS in case you add new members
- */
-static DEVICE_ATTR(dhar, S_IRUGO, amd64_dhar_show, NULL);
-static DEVICE_ATTR(dbam, S_IRUGO, amd64_dbam0_show, NULL);
-static DEVICE_ATTR(topmem, S_IRUGO, amd64_top_mem_show, NULL);
-static DEVICE_ATTR(topmem2, S_IRUGO, amd64_top_mem2_show, NULL);
-static DEVICE_ATTR(dram_hole, S_IRUGO, amd64_hole_show, NULL);
-
-static struct attribute *amd64_edac_dbg_attrs[] = {
- &dev_attr_dhar.attr,
- &dev_attr_dbam.attr,
- &dev_attr_topmem.attr,
- &dev_attr_topmem2.attr,
- &dev_attr_dram_hole.attr,
- NULL
-};
-
-const struct attribute_group amd64_edac_dbg_group = {
- .attrs = amd64_edac_dbg_attrs,
-};
diff --git a/drivers/edac/amd64_edac_inj.c b/drivers/edac/amd64_edac_inj.c
deleted file mode 100644
index d96d6116f0fb..000000000000
--- a/drivers/edac/amd64_edac_inj.c
+++ /dev/null
@@ -1,235 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include "amd64_edac.h"
-
-static ssize_t amd64_inject_section_show(struct device *dev,
- struct device_attribute *mattr,
- char *buf)
-{
- struct mem_ctl_info *mci = to_mci(dev);
- struct amd64_pvt *pvt = mci->pvt_info;
- return sprintf(buf, "0x%x\n", pvt->injection.section);
-}
-
-/*
- * store error injection section value which refers to one of 4 16-byte sections
- * within a 64-byte cacheline
- *
- * range: 0..3
- */
-static ssize_t amd64_inject_section_store(struct device *dev,
- struct device_attribute *mattr,
- const char *data, size_t count)
-{
- struct mem_ctl_info *mci = to_mci(dev);
- struct amd64_pvt *pvt = mci->pvt_info;
- unsigned long value;
- int ret;
-
- ret = kstrtoul(data, 10, &value);
- if (ret < 0)
- return ret;
-
- if (value > 3) {
- amd64_warn("%s: invalid section 0x%lx\n", __func__, value);
- return -EINVAL;
- }
-
- pvt->injection.section = (u32) value;
- return count;
-}
-
-static ssize_t amd64_inject_word_show(struct device *dev,
- struct device_attribute *mattr,
- char *buf)
-{
- struct mem_ctl_info *mci = to_mci(dev);
- struct amd64_pvt *pvt = mci->pvt_info;
- return sprintf(buf, "0x%x\n", pvt->injection.word);
-}
-
-/*
- * store error injection word value which refers to one of 9 16-bit word of the
- * 16-byte (128-bit + ECC bits) section
- *
- * range: 0..8
- */
-static ssize_t amd64_inject_word_store(struct device *dev,
- struct device_attribute *mattr,
- const char *data, size_t count)
-{
- struct mem_ctl_info *mci = to_mci(dev);
- struct amd64_pvt *pvt = mci->pvt_info;
- unsigned long value;
- int ret;
-
- ret = kstrtoul(data, 10, &value);
- if (ret < 0)
- return ret;
-
- if (value > 8) {
- amd64_warn("%s: invalid word 0x%lx\n", __func__, value);
- return -EINVAL;
- }
-
- pvt->injection.word = (u32) value;
- return count;
-}
-
-static ssize_t amd64_inject_ecc_vector_show(struct device *dev,
- struct device_attribute *mattr,
- char *buf)
-{
- struct mem_ctl_info *mci = to_mci(dev);
- struct amd64_pvt *pvt = mci->pvt_info;
- return sprintf(buf, "0x%x\n", pvt->injection.bit_map);
-}
-
-/*
- * store 16 bit error injection vector which enables injecting errors to the
- * corresponding bit within the error injection word above. When used during a
- * DRAM ECC read, it holds the contents of the of the DRAM ECC bits.
- */
-static ssize_t amd64_inject_ecc_vector_store(struct device *dev,
- struct device_attribute *mattr,
- const char *data, size_t count)
-{
- struct mem_ctl_info *mci = to_mci(dev);
- struct amd64_pvt *pvt = mci->pvt_info;
- unsigned long value;
- int ret;
-
- ret = kstrtoul(data, 16, &value);
- if (ret < 0)
- return ret;
-
- if (value & 0xFFFF0000) {
- amd64_warn("%s: invalid EccVector: 0x%lx\n", __func__, value);
- return -EINVAL;
- }
-
- pvt->injection.bit_map = (u32) value;
- return count;
-}
-
-/*
- * Do a DRAM ECC read. Assemble staged values in the pvt area, format into
- * fields needed by the injection registers and read the NB Array Data Port.
- */
-static ssize_t amd64_inject_read_store(struct device *dev,
- struct device_attribute *mattr,
- const char *data, size_t count)
-{
- struct mem_ctl_info *mci = to_mci(dev);
- struct amd64_pvt *pvt = mci->pvt_info;
- unsigned long value;
- u32 section, word_bits;
- int ret;
-
- ret = kstrtoul(data, 10, &value);
- if (ret < 0)
- return ret;
-
- /* Form value to choose 16-byte section of cacheline */
- section = F10_NB_ARRAY_DRAM | SET_NB_ARRAY_ADDR(pvt->injection.section);
-
- amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_ADDR, section);
-
- word_bits = SET_NB_DRAM_INJECTION_READ(pvt->injection);
-
- /* Issue 'word' and 'bit' along with the READ request */
- amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
-
- edac_dbg(0, "section=0x%x word_bits=0x%x\n", section, word_bits);
-
- return count;
-}
-
-/*
- * Do a DRAM ECC write. Assemble staged values in the pvt area and format into
- * fields needed by the injection registers.
- */
-static ssize_t amd64_inject_write_store(struct device *dev,
- struct device_attribute *mattr,
- const char *data, size_t count)
-{
- struct mem_ctl_info *mci = to_mci(dev);
- struct amd64_pvt *pvt = mci->pvt_info;
- u32 section, word_bits, tmp;
- unsigned long value;
- int ret;
-
- ret = kstrtoul(data, 10, &value);
- if (ret < 0)
- return ret;
-
- /* Form value to choose 16-byte section of cacheline */
- section = F10_NB_ARRAY_DRAM | SET_NB_ARRAY_ADDR(pvt->injection.section);
-
- amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_ADDR, section);
-
- word_bits = SET_NB_DRAM_INJECTION_WRITE(pvt->injection);
-
- pr_notice_once("Don't forget to decrease MCE polling interval in\n"
- "/sys/bus/machinecheck/devices/machinecheck<CPUNUM>/check_interval\n"
- "so that you can get the error report faster.\n");
-
- on_each_cpu(disable_caches, NULL, 1);
-
- /* Issue 'word' and 'bit' along with the READ request */
- amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
-
- retry:
- /* wait until injection happens */
- amd64_read_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, &tmp);
- if (tmp & F10_NB_ARR_ECC_WR_REQ) {
- cpu_relax();
- goto retry;
- }
-
- on_each_cpu(enable_caches, NULL, 1);
-
- edac_dbg(0, "section=0x%x word_bits=0x%x\n", section, word_bits);
-
- return count;
-}
-
-/*
- * update NUM_INJ_ATTRS in case you add new members
- */
-
-static DEVICE_ATTR(inject_section, S_IRUGO | S_IWUSR,
- amd64_inject_section_show, amd64_inject_section_store);
-static DEVICE_ATTR(inject_word, S_IRUGO | S_IWUSR,
- amd64_inject_word_show, amd64_inject_word_store);
-static DEVICE_ATTR(inject_ecc_vector, S_IRUGO | S_IWUSR,
- amd64_inject_ecc_vector_show, amd64_inject_ecc_vector_store);
-static DEVICE_ATTR(inject_write, S_IWUSR,
- NULL, amd64_inject_write_store);
-static DEVICE_ATTR(inject_read, S_IWUSR,
- NULL, amd64_inject_read_store);
-
-static struct attribute *amd64_edac_inj_attrs[] = {
- &dev_attr_inject_section.attr,
- &dev_attr_inject_word.attr,
- &dev_attr_inject_ecc_vector.attr,
- &dev_attr_inject_write.attr,
- &dev_attr_inject_read.attr,
- NULL
-};
-
-static umode_t amd64_edac_inj_is_visible(struct kobject *kobj,
- struct attribute *attr, int idx)
-{
- struct device *dev = kobj_to_dev(kobj);
- struct mem_ctl_info *mci = container_of(dev, struct mem_ctl_info, dev);
- struct amd64_pvt *pvt = mci->pvt_info;
-
- if (pvt->fam < 0x10)
- return 0;
- return attr->mode;
-}
-
-const struct attribute_group amd64_edac_inj_group = {
- .attrs = amd64_edac_inj_attrs,
- .is_visible = amd64_edac_inj_is_visible,
-};
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
index 677095769182..6793f6d799e7 100644
--- a/drivers/edac/ppc4xx_edac.c
+++ b/drivers/edac/ppc4xx_edac.c
@@ -1058,7 +1058,7 @@ static int ppc4xx_edac_mc_init(struct mem_ctl_info *mci,
/* Initialize strings */
mci->mod_name = PPC4XX_EDAC_MODULE_NAME;
- mci->ctl_name = ppc4xx_edac_match->compatible,
+ mci->ctl_name = ppc4xx_edac_match->compatible;
mci->dev_name = np->full_name;
/* Initialize callbacks */
diff --git a/drivers/edac/xgene_edac.c b/drivers/edac/xgene_edac.c
index 1d2c27a00a4a..2ccd1db5e98f 100644
--- a/drivers/edac/xgene_edac.c
+++ b/drivers/edac/xgene_edac.c
@@ -1916,7 +1916,7 @@ static int xgene_edac_probe(struct platform_device *pdev)
int i;
for (i = 0; i < 3; i++) {
- irq = platform_get_irq(pdev, i);
+ irq = platform_get_irq_optional(pdev, i);
if (irq < 0) {
dev_err(&pdev->dev, "No IRQ resource\n");
rc = -EINVAL;
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index 80db43a22069..68216988391f 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -192,7 +192,9 @@ static int fw_unit_remove(struct device *dev)
struct fw_driver *driver =
container_of(dev->driver, struct fw_driver, driver);
- return driver->remove(fw_unit(dev)), 0;
+ driver->remove(fw_unit(dev));
+
+ return 0;
}
static int get_modalias(struct fw_unit *unit, char *buffer, size_t buffer_size)
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index 5392e1fc6b4e..cacdf1589b10 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -848,8 +848,6 @@ static int scmi_remove(struct platform_device *pdev)
struct scmi_info *info = platform_get_drvdata(pdev);
struct idr *idr = &info->tx_idr;
- scmi_notification_exit(&info->handle);
-
mutex_lock(&scmi_list_mutex);
if (info->users)
ret = -EBUSY;
@@ -860,6 +858,8 @@ static int scmi_remove(struct platform_device *pdev)
if (ret)
return ret;
+ scmi_notification_exit(&info->handle);
+
/* Safe to free channels since no more users */
ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
idr_destroy(&info->tx_idr);
diff --git a/drivers/firmware/arm_scmi/smc.c b/drivers/firmware/arm_scmi/smc.c
index 82a82a5dc86a..fcbe2677f84b 100644
--- a/drivers/firmware/arm_scmi/smc.c
+++ b/drivers/firmware/arm_scmi/smc.c
@@ -9,9 +9,11 @@
#include <linux/arm-smccc.h>
#include <linux/device.h>
#include <linux/err.h>
+#include <linux/interrupt.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/slab.h>
#include "common.h"
@@ -23,6 +25,8 @@
* @shmem: Transmit/Receive shared memory area
* @shmem_lock: Lock to protect access to Tx/Rx shared memory area
* @func_id: smc/hvc call function id
+ * @irq: Optional; employed when platforms indicates msg completion by intr.
+ * @tx_complete: Optional, employed only when irq is valid.
*/
struct scmi_smc {
@@ -30,8 +34,19 @@ struct scmi_smc {
struct scmi_shared_mem __iomem *shmem;
struct mutex shmem_lock;
u32 func_id;
+ int irq;
+ struct completion tx_complete;
};
+static irqreturn_t smc_msg_done_isr(int irq, void *data)
+{
+ struct scmi_smc *scmi_info = data;
+
+ complete(&scmi_info->tx_complete);
+
+ return IRQ_HANDLED;
+}
+
static bool smc_chan_available(struct device *dev, int idx)
{
struct device_node *np = of_parse_phandle(dev->of_node, "shmem", 0);
@@ -51,7 +66,7 @@ static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
struct resource res;
struct device_node *np;
u32 func_id;
- int ret;
+ int ret, irq;
if (!tx)
return -ENODEV;
@@ -79,6 +94,24 @@ static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
if (ret < 0)
return ret;
+ /*
+ * If there is an interrupt named "a2p", then the service and
+ * completion of a message is signaled by an interrupt rather than by
+ * the return of the SMC call.
+ */
+ irq = of_irq_get_byname(cdev->of_node, "a2p");
+ if (irq > 0) {
+ ret = devm_request_irq(dev, irq, smc_msg_done_isr,
+ IRQF_NO_SUSPEND,
+ dev_name(dev), scmi_info);
+ if (ret) {
+ dev_err(dev, "failed to setup SCMI smc irq\n");
+ return ret;
+ }
+ init_completion(&scmi_info->tx_complete);
+ scmi_info->irq = irq;
+ }
+
scmi_info->func_id = func_id;
scmi_info->cinfo = cinfo;
mutex_init(&scmi_info->shmem_lock);
@@ -110,7 +143,14 @@ static int smc_send_message(struct scmi_chan_info *cinfo,
shmem_tx_prepare(scmi_info->shmem, xfer);
+ if (scmi_info->irq)
+ reinit_completion(&scmi_info->tx_complete);
+
arm_smccc_1_1_invoke(scmi_info->func_id, 0, 0, 0, 0, 0, 0, 0, &res);
+
+ if (scmi_info->irq)
+ wait_for_completion(&scmi_info->tx_complete);
+
scmi_rx_callback(scmi_info->cinfo, shmem_read_header(scmi_info->shmem));
mutex_unlock(&scmi_info->shmem_lock);
diff --git a/drivers/firmware/efi/apple-properties.c b/drivers/firmware/efi/apple-properties.c
index 34f53d898acb..e1926483ae2f 100644
--- a/drivers/firmware/efi/apple-properties.c
+++ b/drivers/firmware/efi/apple-properties.c
@@ -3,8 +3,9 @@
* apple-properties.c - EFI device properties on Macs
* Copyright (C) 2016 Lukas Wunner <lukas@wunner.de>
*
- * Note, all properties are considered as u8 arrays.
- * To get a value of any of them the caller must use device_property_read_u8_array().
+ * Properties are stored either as:
+ * u8 arrays which can be retrieved with device_property_read_u8_array() or
+ * booleans which can be queried with device_property_present().
*/
#define pr_fmt(fmt) "apple-properties: " fmt
@@ -88,8 +89,12 @@ static void __init unmarshal_key_value_pairs(struct dev_header *dev_header,
entry_data = ptr + key_len + sizeof(val_len);
entry_len = val_len - sizeof(val_len);
- entry[i] = PROPERTY_ENTRY_U8_ARRAY_LEN(key, entry_data,
- entry_len);
+ if (entry_len)
+ entry[i] = PROPERTY_ENTRY_U8_ARRAY_LEN(key, entry_data,
+ entry_len);
+ else
+ entry[i] = PROPERTY_ENTRY_BOOL(key);
+
if (dump_properties) {
dev_info(dev, "property: %s\n", key);
print_hex_dump(KERN_INFO, pr_fmt(), DUMP_PREFIX_OFFSET,
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index 8a94388e38b3..c23466e05e60 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -38,6 +38,8 @@ KBUILD_CFLAGS := $(cflags-y) -Os -DDISABLE_BRANCH_PROFILING \
# remove SCS flags from all objects in this directory
KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_SCS), $(KBUILD_CFLAGS))
+# disable LTO
+KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_LTO), $(KBUILD_CFLAGS))
GCOV_PROFILE := n
# Sanitizer runtimes are unavailable and cannot be linked here.
diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c
index 22ece1ad68a8..b69d63143e0d 100644
--- a/drivers/firmware/efi/libstub/arm64-stub.c
+++ b/drivers/firmware/efi/libstub/arm64-stub.c
@@ -61,10 +61,10 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
status = efi_get_random_bytes(sizeof(phys_seed),
(u8 *)&phys_seed);
if (status == EFI_NOT_FOUND) {
- efi_info("EFI_RNG_PROTOCOL unavailable, KASLR will be disabled\n");
+ efi_info("EFI_RNG_PROTOCOL unavailable\n");
efi_nokaslr = true;
} else if (status != EFI_SUCCESS) {
- efi_err("efi_get_random_bytes() failed (0x%lx), KASLR will be disabled\n",
+ efi_err("efi_get_random_bytes() failed (0x%lx)\n",
status);
efi_nokaslr = true;
}
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index b50a6c67d9bd..cde0a2ef507d 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -672,7 +672,7 @@ typedef union efi_tcg2_protocol efi_tcg2_protocol_t;
union efi_tcg2_protocol {
struct {
void *get_capability;
- efi_status_t (__efiapi *get_event_log)(efi_handle_t,
+ efi_status_t (__efiapi *get_event_log)(efi_tcg2_protocol_t *,
efi_tcg2_event_log_format,
efi_physical_addr_t *,
efi_physical_addr_t *,
@@ -849,4 +849,13 @@ void efi_handle_post_ebs_state(void);
enum efi_secureboot_mode efi_get_secureboot(void);
+#ifdef CONFIG_RESET_ATTACK_MITIGATION
+void efi_enable_reset_attack_mitigation(void);
+#else
+static inline void
+efi_enable_reset_attack_mitigation(void) { }
+#endif
+
+void efi_retrieve_tpm2_eventlog(void);
+
#endif
diff --git a/drivers/firmware/google/coreboot_table.c b/drivers/firmware/google/coreboot_table.c
index 0205987a4fd4..dc83ea118c67 100644
--- a/drivers/firmware/google/coreboot_table.c
+++ b/drivers/firmware/google/coreboot_table.c
@@ -46,14 +46,13 @@ static int coreboot_bus_probe(struct device *dev)
static int coreboot_bus_remove(struct device *dev)
{
- int ret = 0;
struct coreboot_device *device = CB_DEV(dev);
struct coreboot_driver *driver = CB_DRV(dev->driver);
if (driver->remove)
- ret = driver->remove(device);
+ driver->remove(device);
- return ret;
+ return 0;
}
static struct bus_type coreboot_bus_type = {
diff --git a/drivers/firmware/google/coreboot_table.h b/drivers/firmware/google/coreboot_table.h
index 7b7b4a6eedda..beb778674acd 100644
--- a/drivers/firmware/google/coreboot_table.h
+++ b/drivers/firmware/google/coreboot_table.h
@@ -72,7 +72,7 @@ struct coreboot_device {
/* A driver for handling devices described in coreboot tables. */
struct coreboot_driver {
int (*probe)(struct coreboot_device *);
- int (*remove)(struct coreboot_device *);
+ void (*remove)(struct coreboot_device *);
struct device_driver drv;
u32 tag;
};
diff --git a/drivers/firmware/google/framebuffer-coreboot.c b/drivers/firmware/google/framebuffer-coreboot.c
index 916f26adc595..c6dcc1ef93ac 100644
--- a/drivers/firmware/google/framebuffer-coreboot.c
+++ b/drivers/firmware/google/framebuffer-coreboot.c
@@ -72,13 +72,11 @@ static int framebuffer_probe(struct coreboot_device *dev)
return PTR_ERR_OR_ZERO(pdev);
}
-static int framebuffer_remove(struct coreboot_device *dev)
+static void framebuffer_remove(struct coreboot_device *dev)
{
struct platform_device *pdev = dev_get_drvdata(&dev->dev);
platform_device_unregister(pdev);
-
- return 0;
}
static struct coreboot_driver framebuffer_driver = {
diff --git a/drivers/firmware/google/memconsole-coreboot.c b/drivers/firmware/google/memconsole-coreboot.c
index d17e4d6ac9bc..74b5286518ee 100644
--- a/drivers/firmware/google/memconsole-coreboot.c
+++ b/drivers/firmware/google/memconsole-coreboot.c
@@ -91,11 +91,9 @@ static int memconsole_probe(struct coreboot_device *dev)
return memconsole_sysfs_init();
}
-static int memconsole_remove(struct coreboot_device *dev)
+static void memconsole_remove(struct coreboot_device *dev)
{
memconsole_exit();
-
- return 0;
}
static struct coreboot_driver memconsole_driver = {
diff --git a/drivers/firmware/google/vpd.c b/drivers/firmware/google/vpd.c
index d23c5c69ab52..ee6e08c0592b 100644
--- a/drivers/firmware/google/vpd.c
+++ b/drivers/firmware/google/vpd.c
@@ -298,14 +298,12 @@ static int vpd_probe(struct coreboot_device *dev)
return 0;
}
-static int vpd_remove(struct coreboot_device *dev)
+static void vpd_remove(struct coreboot_device *dev)
{
vpd_section_destroy(&ro_vpd);
vpd_section_destroy(&rw_vpd);
kobject_put(vpd_kobj);
-
- return 0;
}
static struct coreboot_driver vpd_driver = {
diff --git a/drivers/firmware/imx/Kconfig b/drivers/firmware/imx/Kconfig
index 1d2e5b85d7ca..c027d99f2a59 100644
--- a/drivers/firmware/imx/Kconfig
+++ b/drivers/firmware/imx/Kconfig
@@ -13,6 +13,7 @@ config IMX_DSP
config IMX_SCU
bool "IMX SCU Protocol driver"
depends on IMX_MBOX
+ select SOC_BUS
help
The System Controller Firmware (SCFW) is a low-level system function
which runs on a dedicated Cortex-M core to provide power, clock, and
diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
index 7be48c1bec96..f57779fc7ee9 100644
--- a/drivers/firmware/qcom_scm.c
+++ b/drivers/firmware/qcom_scm.c
@@ -965,8 +965,11 @@ EXPORT_SYMBOL(qcom_scm_ice_available);
* qcom_scm_ice_invalidate_key() - Invalidate an inline encryption key
* @index: the keyslot to invalidate
*
- * The UFSHCI standard defines a standard way to do this, but it doesn't work on
- * these SoCs; only this SCM call does.
+ * The UFSHCI and eMMC standards define a standard way to do this, but it
+ * doesn't work on these SoCs; only this SCM call does.
+ *
+ * It is assumed that the SoC has only one ICE instance being used, as this SCM
+ * call doesn't specify which ICE instance the keyslot belongs to.
*
* Return: 0 on success; -errno on failure.
*/
@@ -995,10 +998,13 @@ EXPORT_SYMBOL(qcom_scm_ice_invalidate_key);
* units, e.g. 1 = 512 bytes, 8 = 4096 bytes, etc.
*
* Program a key into a keyslot of Qualcomm ICE (Inline Crypto Engine), where it
- * can then be used to encrypt/decrypt UFS I/O requests inline.
+ * can then be used to encrypt/decrypt UFS or eMMC I/O requests inline.
+ *
+ * The UFSHCI and eMMC standards define a standard way to do this, but it
+ * doesn't work on these SoCs; only this SCM call does.
*
- * The UFSHCI standard defines a standard way to do this, but it doesn't work on
- * these SoCs; only this SCM call does.
+ * It is assumed that the SoC has only one ICE instance being used, as this SCM
+ * call doesn't specify which ICE instance the keyslot belongs to.
*
* Return: 0 on success; -errno on failure.
*/
diff --git a/drivers/firmware/smccc/smccc.c b/drivers/firmware/smccc/smccc.c
index 00c88b809c0c..d52bfc5ed5e4 100644
--- a/drivers/firmware/smccc/smccc.c
+++ b/drivers/firmware/smccc/smccc.c
@@ -5,16 +5,22 @@
#define pr_fmt(fmt) "smccc: " fmt
+#include <linux/cache.h>
#include <linux/init.h>
#include <linux/arm-smccc.h>
+#include <asm/archrandom.h>
static u32 smccc_version = ARM_SMCCC_VERSION_1_0;
static enum arm_smccc_conduit smccc_conduit = SMCCC_CONDUIT_NONE;
+bool __ro_after_init smccc_trng_available = false;
+
void __init arm_smccc_version_init(u32 version, enum arm_smccc_conduit conduit)
{
smccc_version = version;
smccc_conduit = conduit;
+
+ smccc_trng_available = smccc_probe_trng();
}
enum arm_smccc_conduit arm_smccc_1_1_get_conduit(void)
diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig
index 5645226ca3ce..5ff9438b7b46 100644
--- a/drivers/fpga/Kconfig
+++ b/drivers/fpga/Kconfig
@@ -192,6 +192,17 @@ config FPGA_DFL_AFU
to the FPGA infrastructure via a Port. There may be more than one
Port/AFU per DFL based FPGA device.
+config FPGA_DFL_NIOS_INTEL_PAC_N3000
+ tristate "FPGA DFL NIOS Driver for Intel PAC N3000"
+ depends on FPGA_DFL
+ select REGMAP
+ help
+ This is the driver for the N3000 Nios private feature on Intel
+ PAC (Programmable Acceleration Card) N3000. It communicates
+ with the embedded Nios processor to configure the retimers on
+ the card. It also instantiates the SPI master (spi-altera) for
+ the card's BMC (Board Management Controller).
+
config FPGA_DFL_PCI
tristate "FPGA DFL PCIe Device Driver"
depends on PCI && FPGA_DFL
diff --git a/drivers/fpga/Makefile b/drivers/fpga/Makefile
index d8e21dfc6778..18dc9885883a 100644
--- a/drivers/fpga/Makefile
+++ b/drivers/fpga/Makefile
@@ -44,5 +44,7 @@ dfl-fme-objs += dfl-fme-perf.o
dfl-afu-objs := dfl-afu-main.o dfl-afu-region.o dfl-afu-dma-region.o
dfl-afu-objs += dfl-afu-error.o
+obj-$(CONFIG_FPGA_DFL_NIOS_INTEL_PAC_N3000) += dfl-n3000-nios.o
+
# Drivers for FPGAs which implement DFL
obj-$(CONFIG_FPGA_DFL_PCI) += dfl-pci.o
diff --git a/drivers/fpga/dfl-fme-perf.c b/drivers/fpga/dfl-fme-perf.c
index 531266287eee..4299145ef347 100644
--- a/drivers/fpga/dfl-fme-perf.c
+++ b/drivers/fpga/dfl-fme-perf.c
@@ -192,7 +192,7 @@ static struct attribute *fme_perf_cpumask_attrs[] = {
NULL,
};
-static struct attribute_group fme_perf_cpumask_group = {
+static const struct attribute_group fme_perf_cpumask_group = {
.attrs = fme_perf_cpumask_attrs,
};
@@ -225,7 +225,7 @@ static struct attribute *fme_perf_format_attrs[] = {
NULL,
};
-static struct attribute_group fme_perf_format_group = {
+static const struct attribute_group fme_perf_format_group = {
.name = "format",
.attrs = fme_perf_format_attrs,
};
@@ -239,7 +239,7 @@ static struct attribute *fme_perf_events_attrs_empty[] = {
NULL,
};
-static struct attribute_group fme_perf_events_group = {
+static const struct attribute_group fme_perf_events_group = {
.name = "events",
.attrs = fme_perf_events_attrs_empty,
};
diff --git a/drivers/fpga/dfl-n3000-nios.c b/drivers/fpga/dfl-n3000-nios.c
new file mode 100644
index 000000000000..7a95366f6516
--- /dev/null
+++ b/drivers/fpga/dfl-n3000-nios.c
@@ -0,0 +1,588 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DFL device driver for Nios private feature on Intel PAC (Programmable
+ * Acceleration Card) N3000
+ *
+ * Copyright (C) 2019-2020 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Wu Hao <hao.wu@intel.com>
+ * Xu Yilun <yilun.xu@intel.com>
+ */
+#include <linux/bitfield.h>
+#include <linux/dfl.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/stddef.h>
+#include <linux/spi/altera.h>
+#include <linux/spi/spi.h>
+#include <linux/types.h>
+
+/*
+ * N3000 Nios private feature registers, named as NIOS_SPI_XX on spec.
+ * NS is the abbreviation of NIOS_SPI.
+ */
+#define N3000_NS_PARAM 0x8
+#define N3000_NS_PARAM_SHIFT_MODE_MSK BIT_ULL(1)
+#define N3000_NS_PARAM_SHIFT_MODE_MSB 0
+#define N3000_NS_PARAM_SHIFT_MODE_LSB 1
+#define N3000_NS_PARAM_DATA_WIDTH GENMASK_ULL(7, 2)
+#define N3000_NS_PARAM_NUM_CS GENMASK_ULL(13, 8)
+#define N3000_NS_PARAM_CLK_POL BIT_ULL(14)
+#define N3000_NS_PARAM_CLK_PHASE BIT_ULL(15)
+#define N3000_NS_PARAM_PERIPHERAL_ID GENMASK_ULL(47, 32)
+
+#define N3000_NS_CTRL 0x10
+#define N3000_NS_CTRL_WR_DATA GENMASK_ULL(31, 0)
+#define N3000_NS_CTRL_ADDR GENMASK_ULL(44, 32)
+#define N3000_NS_CTRL_CMD_MSK GENMASK_ULL(63, 62)
+#define N3000_NS_CTRL_CMD_NOP 0
+#define N3000_NS_CTRL_CMD_RD 1
+#define N3000_NS_CTRL_CMD_WR 2
+
+#define N3000_NS_STAT 0x18
+#define N3000_NS_STAT_RD_DATA GENMASK_ULL(31, 0)
+#define N3000_NS_STAT_RW_VAL BIT_ULL(32)
+
+/* Nios handshake registers, indirect access */
+#define N3000_NIOS_INIT 0x1000
+#define N3000_NIOS_INIT_DONE BIT(0)
+#define N3000_NIOS_INIT_START BIT(1)
+/* Mode for retimer A, link 0, the same below */
+#define N3000_NIOS_INIT_REQ_FEC_MODE_A0_MSK GENMASK(9, 8)
+#define N3000_NIOS_INIT_REQ_FEC_MODE_A1_MSK GENMASK(11, 10)
+#define N3000_NIOS_INIT_REQ_FEC_MODE_A2_MSK GENMASK(13, 12)
+#define N3000_NIOS_INIT_REQ_FEC_MODE_A3_MSK GENMASK(15, 14)
+#define N3000_NIOS_INIT_REQ_FEC_MODE_B0_MSK GENMASK(17, 16)
+#define N3000_NIOS_INIT_REQ_FEC_MODE_B1_MSK GENMASK(19, 18)
+#define N3000_NIOS_INIT_REQ_FEC_MODE_B2_MSK GENMASK(21, 20)
+#define N3000_NIOS_INIT_REQ_FEC_MODE_B3_MSK GENMASK(23, 22)
+#define N3000_NIOS_INIT_REQ_FEC_MODE_NO 0x0
+#define N3000_NIOS_INIT_REQ_FEC_MODE_KR 0x1
+#define N3000_NIOS_INIT_REQ_FEC_MODE_RS 0x2
+
+#define N3000_NIOS_FW_VERSION 0x1004
+#define N3000_NIOS_FW_VERSION_PATCH GENMASK(23, 20)
+#define N3000_NIOS_FW_VERSION_MINOR GENMASK(27, 24)
+#define N3000_NIOS_FW_VERSION_MAJOR GENMASK(31, 28)
+
+/* The retimers we use on Intel PAC N3000 is Parkvale, abbreviated to PKVL */
+#define N3000_NIOS_PKVL_A_MODE_STS 0x1020
+#define N3000_NIOS_PKVL_B_MODE_STS 0x1024
+#define N3000_NIOS_PKVL_MODE_STS_GROUP_MSK GENMASK(15, 8)
+#define N3000_NIOS_PKVL_MODE_STS_GROUP_OK 0x0
+#define N3000_NIOS_PKVL_MODE_STS_ID_MSK GENMASK(7, 0)
+/* When GROUP MASK field == GROUP_OK */
+#define N3000_NIOS_PKVL_MODE_ID_RESET 0x0
+#define N3000_NIOS_PKVL_MODE_ID_4X10G 0x1
+#define N3000_NIOS_PKVL_MODE_ID_4X25G 0x2
+#define N3000_NIOS_PKVL_MODE_ID_2X25G 0x3
+#define N3000_NIOS_PKVL_MODE_ID_2X25G_2X10G 0x4
+#define N3000_NIOS_PKVL_MODE_ID_1X25G 0x5
+
+#define N3000_NIOS_REGBUS_RETRY_COUNT 10000 /* loop count */
+
+#define N3000_NIOS_INIT_TIMEOUT 10000000 /* usec */
+#define N3000_NIOS_INIT_TIME_INTV 100000 /* usec */
+
+#define N3000_NIOS_INIT_REQ_FEC_MODE_MSK_ALL \
+ (N3000_NIOS_INIT_REQ_FEC_MODE_A0_MSK | \
+ N3000_NIOS_INIT_REQ_FEC_MODE_A1_MSK | \
+ N3000_NIOS_INIT_REQ_FEC_MODE_A2_MSK | \
+ N3000_NIOS_INIT_REQ_FEC_MODE_A3_MSK | \
+ N3000_NIOS_INIT_REQ_FEC_MODE_B0_MSK | \
+ N3000_NIOS_INIT_REQ_FEC_MODE_B1_MSK | \
+ N3000_NIOS_INIT_REQ_FEC_MODE_B2_MSK | \
+ N3000_NIOS_INIT_REQ_FEC_MODE_B3_MSK)
+
+#define N3000_NIOS_INIT_REQ_FEC_MODE_NO_ALL \
+ (FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_A0_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_NO) | \
+ FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_A1_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_NO) | \
+ FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_A2_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_NO) | \
+ FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_A3_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_NO) | \
+ FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_B0_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_NO) | \
+ FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_B1_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_NO) | \
+ FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_B2_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_NO) | \
+ FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_B3_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_NO))
+
+#define N3000_NIOS_INIT_REQ_FEC_MODE_KR_ALL \
+ (FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_A0_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_KR) | \
+ FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_A1_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_KR) | \
+ FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_A2_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_KR) | \
+ FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_A3_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_KR) | \
+ FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_B0_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_KR) | \
+ FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_B1_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_KR) | \
+ FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_B2_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_KR) | \
+ FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_B3_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_KR))
+
+#define N3000_NIOS_INIT_REQ_FEC_MODE_RS_ALL \
+ (FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_A0_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_RS) | \
+ FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_A1_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_RS) | \
+ FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_A2_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_RS) | \
+ FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_A3_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_RS) | \
+ FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_B0_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_RS) | \
+ FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_B1_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_RS) | \
+ FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_B2_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_RS) | \
+ FIELD_PREP(N3000_NIOS_INIT_REQ_FEC_MODE_B3_MSK, \
+ N3000_NIOS_INIT_REQ_FEC_MODE_RS))
+
+struct n3000_nios {
+ void __iomem *base;
+ struct regmap *regmap;
+ struct device *dev;
+ struct platform_device *altera_spi;
+};
+
+static ssize_t nios_fw_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct n3000_nios *nn = dev_get_drvdata(dev);
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(nn->regmap, N3000_NIOS_FW_VERSION, &val);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%x.%x.%x\n",
+ (u8)FIELD_GET(N3000_NIOS_FW_VERSION_MAJOR, val),
+ (u8)FIELD_GET(N3000_NIOS_FW_VERSION_MINOR, val),
+ (u8)FIELD_GET(N3000_NIOS_FW_VERSION_PATCH, val));
+}
+static DEVICE_ATTR_RO(nios_fw_version);
+
+#define IS_MODE_STATUS_OK(mode_stat) \
+ (FIELD_GET(N3000_NIOS_PKVL_MODE_STS_GROUP_MSK, (mode_stat)) == \
+ N3000_NIOS_PKVL_MODE_STS_GROUP_OK)
+
+#define IS_RETIMER_FEC_SUPPORTED(retimer_mode) \
+ ((retimer_mode) != N3000_NIOS_PKVL_MODE_ID_RESET && \
+ (retimer_mode) != N3000_NIOS_PKVL_MODE_ID_4X10G)
+
+static int get_retimer_mode(struct n3000_nios *nn, unsigned int mode_stat_reg,
+ unsigned int *retimer_mode)
+{
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(nn->regmap, mode_stat_reg, &val);
+ if (ret)
+ return ret;
+
+ if (!IS_MODE_STATUS_OK(val))
+ return -EFAULT;
+
+ *retimer_mode = FIELD_GET(N3000_NIOS_PKVL_MODE_STS_ID_MSK, val);
+
+ return 0;
+}
+
+static ssize_t retimer_A_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct n3000_nios *nn = dev_get_drvdata(dev);
+ unsigned int mode;
+ int ret;
+
+ ret = get_retimer_mode(nn, N3000_NIOS_PKVL_A_MODE_STS, &mode);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "0x%x\n", mode);
+}
+static DEVICE_ATTR_RO(retimer_A_mode);
+
+static ssize_t retimer_B_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct n3000_nios *nn = dev_get_drvdata(dev);
+ unsigned int mode;
+ int ret;
+
+ ret = get_retimer_mode(nn, N3000_NIOS_PKVL_B_MODE_STS, &mode);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "0x%x\n", mode);
+}
+static DEVICE_ATTR_RO(retimer_B_mode);
+
+static ssize_t fec_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned int val, retimer_a_mode, retimer_b_mode, fec_modes;
+ struct n3000_nios *nn = dev_get_drvdata(dev);
+ int ret;
+
+ /* FEC mode setting is not supported in early FW versions */
+ ret = regmap_read(nn->regmap, N3000_NIOS_FW_VERSION, &val);
+ if (ret)
+ return ret;
+
+ if (FIELD_GET(N3000_NIOS_FW_VERSION_MAJOR, val) < 3)
+ return sysfs_emit(buf, "not supported\n");
+
+ /* If no 25G links, FEC mode setting is not supported either */
+ ret = get_retimer_mode(nn, N3000_NIOS_PKVL_A_MODE_STS, &retimer_a_mode);
+ if (ret)
+ return ret;
+
+ ret = get_retimer_mode(nn, N3000_NIOS_PKVL_B_MODE_STS, &retimer_b_mode);
+ if (ret)
+ return ret;
+
+ if (!IS_RETIMER_FEC_SUPPORTED(retimer_a_mode) &&
+ !IS_RETIMER_FEC_SUPPORTED(retimer_b_mode))
+ return sysfs_emit(buf, "not supported\n");
+
+ /* get the valid FEC mode for 25G links */
+ ret = regmap_read(nn->regmap, N3000_NIOS_INIT, &val);
+ if (ret)
+ return ret;
+
+ /*
+ * FEC mode should always be the same for all links, as we set them
+ * in this way.
+ */
+ fec_modes = (val & N3000_NIOS_INIT_REQ_FEC_MODE_MSK_ALL);
+ if (fec_modes == N3000_NIOS_INIT_REQ_FEC_MODE_NO_ALL)
+ return sysfs_emit(buf, "no\n");
+ else if (fec_modes == N3000_NIOS_INIT_REQ_FEC_MODE_KR_ALL)
+ return sysfs_emit(buf, "kr\n");
+ else if (fec_modes == N3000_NIOS_INIT_REQ_FEC_MODE_RS_ALL)
+ return sysfs_emit(buf, "rs\n");
+
+ return -EFAULT;
+}
+static DEVICE_ATTR_RO(fec_mode);
+
+static struct attribute *n3000_nios_attrs[] = {
+ &dev_attr_nios_fw_version.attr,
+ &dev_attr_retimer_A_mode.attr,
+ &dev_attr_retimer_B_mode.attr,
+ &dev_attr_fec_mode.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(n3000_nios);
+
+static int n3000_nios_init_done_check(struct n3000_nios *nn)
+{
+ unsigned int val, state_a, state_b;
+ struct device *dev = nn->dev;
+ int ret, ret2;
+
+ /*
+ * The SPI is shared by the Nios core inside the FPGA, Nios will use
+ * this SPI master to do some one time initialization after power up,
+ * and then release the control to OS. The driver needs to poll on
+ * INIT_DONE to see when driver could take the control.
+ *
+ * Please note that after Nios firmware version 3.0.0, INIT_START is
+ * introduced, so driver needs to trigger START firstly and then check
+ * INIT_DONE.
+ */
+
+ ret = regmap_read(nn->regmap, N3000_NIOS_FW_VERSION, &val);
+ if (ret)
+ return ret;
+
+ /*
+ * If Nios version register is totally uninitialized(== 0x0), then the
+ * Nios firmware is missing. So host could take control of SPI master
+ * safely, but initialization work for Nios is not done. To restore the
+ * card, we need to reprogram a new Nios firmware via the BMC chip on
+ * SPI bus. So the driver doesn't error out, it continues to create the
+ * spi controller device and spi_board_info for BMC.
+ */
+ if (val == 0) {
+ dev_err(dev, "Nios version reg = 0x%x, skip INIT_DONE check, but the retimer may be uninitialized\n",
+ val);
+ return 0;
+ }
+
+ if (FIELD_GET(N3000_NIOS_FW_VERSION_MAJOR, val) >= 3) {
+ /* read NIOS_INIT to check if retimer initialization is done */
+ ret = regmap_read(nn->regmap, N3000_NIOS_INIT, &val);
+ if (ret)
+ return ret;
+
+ /* check if retimers are initialized already */
+ if (val & (N3000_NIOS_INIT_DONE | N3000_NIOS_INIT_START))
+ goto nios_init_done;
+
+ /* configure FEC mode per module param */
+ val = N3000_NIOS_INIT_START;
+
+ /*
+ * When the retimer is to be set to 10G mode, there is no FEC
+ * mode setting, so the REQ_FEC_MODE field will be ignored by
+ * Nios firmware in this case. But we should still fill the FEC
+ * mode field cause host could not get the retimer working mode
+ * until the Nios init is done.
+ *
+ * For now the driver doesn't support the retimer FEC mode
+ * switching per user's request. It is always set to Reed
+ * Solomon FEC.
+ *
+ * The driver will set the same FEC mode for all links.
+ */
+ val |= N3000_NIOS_INIT_REQ_FEC_MODE_RS_ALL;
+
+ ret = regmap_write(nn->regmap, N3000_NIOS_INIT, val);
+ if (ret)
+ return ret;
+ }
+
+nios_init_done:
+ /* polls on NIOS_INIT_DONE */
+ ret = regmap_read_poll_timeout(nn->regmap, N3000_NIOS_INIT, val,
+ val & N3000_NIOS_INIT_DONE,
+ N3000_NIOS_INIT_TIME_INTV,
+ N3000_NIOS_INIT_TIMEOUT);
+ if (ret)
+ dev_err(dev, "NIOS_INIT_DONE %s\n",
+ (ret == -ETIMEDOUT) ? "timed out" : "check error");
+
+ ret2 = regmap_read(nn->regmap, N3000_NIOS_PKVL_A_MODE_STS, &state_a);
+ if (ret2)
+ return ret2;
+
+ ret2 = regmap_read(nn->regmap, N3000_NIOS_PKVL_B_MODE_STS, &state_b);
+ if (ret2)
+ return ret2;
+
+ if (!ret) {
+ /*
+ * After INIT_DONE is detected, it still needs to check if the
+ * Nios firmware reports any error during the retimer
+ * configuration.
+ */
+ if (IS_MODE_STATUS_OK(state_a) && IS_MODE_STATUS_OK(state_b))
+ return 0;
+
+ /*
+ * If the retimer configuration is failed, the Nios firmware
+ * will still release the spi controller for host to
+ * communicate with the BMC. It makes possible for people to
+ * reprogram a new Nios firmware and restore the card. So the
+ * driver doesn't error out, it continues to create the spi
+ * controller device and spi_board_info for BMC.
+ */
+ dev_err(dev, "NIOS_INIT_DONE OK, but err on retimer init\n");
+ }
+
+ dev_err(nn->dev, "PKVL_A_MODE_STS 0x%x\n", state_a);
+ dev_err(nn->dev, "PKVL_B_MODE_STS 0x%x\n", state_b);
+
+ return ret;
+}
+
+static struct spi_board_info m10_n3000_info = {
+ .modalias = "m10-n3000",
+ .max_speed_hz = 12500000,
+ .bus_num = 0,
+ .chip_select = 0,
+};
+
+static int create_altera_spi_controller(struct n3000_nios *nn)
+{
+ struct altera_spi_platform_data pdata = { 0 };
+ struct platform_device_info pdevinfo = { 0 };
+ void __iomem *base = nn->base;
+ u64 v;
+
+ v = readq(base + N3000_NS_PARAM);
+
+ pdata.mode_bits = SPI_CS_HIGH;
+ if (FIELD_GET(N3000_NS_PARAM_CLK_POL, v))
+ pdata.mode_bits |= SPI_CPOL;
+ if (FIELD_GET(N3000_NS_PARAM_CLK_PHASE, v))
+ pdata.mode_bits |= SPI_CPHA;
+
+ pdata.num_chipselect = FIELD_GET(N3000_NS_PARAM_NUM_CS, v);
+ pdata.bits_per_word_mask =
+ SPI_BPW_RANGE_MASK(1, FIELD_GET(N3000_NS_PARAM_DATA_WIDTH, v));
+
+ pdata.num_devices = 1;
+ pdata.devices = &m10_n3000_info;
+
+ dev_dbg(nn->dev, "%s cs %u bpm 0x%x mode 0x%x\n", __func__,
+ pdata.num_chipselect, pdata.bits_per_word_mask,
+ pdata.mode_bits);
+
+ pdevinfo.name = "subdev_spi_altera";
+ pdevinfo.id = PLATFORM_DEVID_AUTO;
+ pdevinfo.parent = nn->dev;
+ pdevinfo.data = &pdata;
+ pdevinfo.size_data = sizeof(pdata);
+
+ nn->altera_spi = platform_device_register_full(&pdevinfo);
+ return PTR_ERR_OR_ZERO(nn->altera_spi);
+}
+
+static void destroy_altera_spi_controller(struct n3000_nios *nn)
+{
+ platform_device_unregister(nn->altera_spi);
+}
+
+static int n3000_nios_poll_stat_timeout(void __iomem *base, u64 *v)
+{
+ int loops;
+
+ /*
+ * We don't use the time based timeout here for performance.
+ *
+ * The regbus read/write is on the critical path of Intel PAC N3000
+ * image programing. The time based timeout checking will add too much
+ * overhead on it. Usually the state changes in 1 or 2 loops on the
+ * test server, and we set 10000 times loop here for safety.
+ */
+ for (loops = N3000_NIOS_REGBUS_RETRY_COUNT; loops > 0 ; loops--) {
+ *v = readq(base + N3000_NS_STAT);
+ if (*v & N3000_NS_STAT_RW_VAL)
+ break;
+ cpu_relax();
+ }
+
+ return (loops > 0) ? 0 : -ETIMEDOUT;
+}
+
+static int n3000_nios_reg_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct n3000_nios *nn = context;
+ u64 v;
+ int ret;
+
+ v = FIELD_PREP(N3000_NS_CTRL_CMD_MSK, N3000_NS_CTRL_CMD_WR) |
+ FIELD_PREP(N3000_NS_CTRL_ADDR, reg) |
+ FIELD_PREP(N3000_NS_CTRL_WR_DATA, val);
+ writeq(v, nn->base + N3000_NS_CTRL);
+
+ ret = n3000_nios_poll_stat_timeout(nn->base, &v);
+ if (ret)
+ dev_err(nn->dev, "fail to write reg 0x%x val 0x%x: %d\n",
+ reg, val, ret);
+
+ return ret;
+}
+
+static int n3000_nios_reg_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct n3000_nios *nn = context;
+ u64 v;
+ int ret;
+
+ v = FIELD_PREP(N3000_NS_CTRL_CMD_MSK, N3000_NS_CTRL_CMD_RD) |
+ FIELD_PREP(N3000_NS_CTRL_ADDR, reg);
+ writeq(v, nn->base + N3000_NS_CTRL);
+
+ ret = n3000_nios_poll_stat_timeout(nn->base, &v);
+ if (ret)
+ dev_err(nn->dev, "fail to read reg 0x%x: %d\n", reg, ret);
+ else
+ *val = FIELD_GET(N3000_NS_STAT_RD_DATA, v);
+
+ return ret;
+}
+
+static const struct regmap_config n3000_nios_regbus_cfg = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+
+ .reg_write = n3000_nios_reg_write,
+ .reg_read = n3000_nios_reg_read,
+};
+
+static int n3000_nios_probe(struct dfl_device *ddev)
+{
+ struct device *dev = &ddev->dev;
+ struct n3000_nios *nn;
+ int ret;
+
+ nn = devm_kzalloc(dev, sizeof(*nn), GFP_KERNEL);
+ if (!nn)
+ return -ENOMEM;
+
+ dev_set_drvdata(&ddev->dev, nn);
+
+ nn->dev = dev;
+
+ nn->base = devm_ioremap_resource(&ddev->dev, &ddev->mmio_res);
+ if (IS_ERR(nn->base))
+ return PTR_ERR(nn->base);
+
+ nn->regmap = devm_regmap_init(dev, NULL, nn, &n3000_nios_regbus_cfg);
+ if (IS_ERR(nn->regmap))
+ return PTR_ERR(nn->regmap);
+
+ ret = n3000_nios_init_done_check(nn);
+ if (ret)
+ return ret;
+
+ ret = create_altera_spi_controller(nn);
+ if (ret)
+ dev_err(dev, "altera spi controller create failed: %d\n", ret);
+
+ return ret;
+}
+
+static void n3000_nios_remove(struct dfl_device *ddev)
+{
+ struct n3000_nios *nn = dev_get_drvdata(&ddev->dev);
+
+ destroy_altera_spi_controller(nn);
+}
+
+#define FME_FEATURE_ID_N3000_NIOS 0xd
+
+static const struct dfl_device_id n3000_nios_ids[] = {
+ { FME_ID, FME_FEATURE_ID_N3000_NIOS },
+ { }
+};
+MODULE_DEVICE_TABLE(dfl, n3000_nios_ids);
+
+static struct dfl_driver n3000_nios_driver = {
+ .drv = {
+ .name = "dfl-n3000-nios",
+ .dev_groups = n3000_nios_groups,
+ },
+ .id_table = n3000_nios_ids,
+ .probe = n3000_nios_probe,
+ .remove = n3000_nios_remove,
+};
+
+module_dfl_driver(n3000_nios_driver);
+
+MODULE_DESCRIPTION("Driver for Nios private feature on Intel PAC N3000");
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/dfl-pci.c b/drivers/fpga/dfl-pci.c
index a2203d03c9e2..04e47e266f26 100644
--- a/drivers/fpga/dfl-pci.c
+++ b/drivers/fpga/dfl-pci.c
@@ -27,6 +27,14 @@
#define DRV_VERSION "0.8"
#define DRV_NAME "dfl-pci"
+#define PCI_VSEC_ID_INTEL_DFLS 0x43
+
+#define PCI_VNDR_DFLS_CNT 0x8
+#define PCI_VNDR_DFLS_RES 0xc
+
+#define PCI_VNDR_DFLS_RES_BAR_MASK GENMASK(2, 0)
+#define PCI_VNDR_DFLS_RES_OFF_MASK GENMASK(31, 3)
+
struct cci_drvdata {
struct dfl_fpga_cdev *cdev; /* container device */
};
@@ -119,49 +127,94 @@ static int *cci_pci_create_irq_table(struct pci_dev *pcidev, unsigned int nvec)
return table;
}
-/* enumerate feature devices under pci device */
-static int cci_enumerate_feature_devs(struct pci_dev *pcidev)
+static int find_dfls_by_vsec(struct pci_dev *pcidev, struct dfl_fpga_enum_info *info)
{
- struct cci_drvdata *drvdata = pci_get_drvdata(pcidev);
- int port_num, bar, i, nvec, ret = 0;
- struct dfl_fpga_enum_info *info;
- struct dfl_fpga_cdev *cdev;
+ u32 bir, offset, vndr_hdr, dfl_cnt, dfl_res;
+ int dfl_res_off, i, bars, voff = 0;
resource_size_t start, len;
- void __iomem *base;
- int *irq_table;
- u32 offset;
- u64 v;
- /* allocate enumeration info via pci_dev */
- info = dfl_fpga_enum_info_alloc(&pcidev->dev);
- if (!info)
- return -ENOMEM;
+ while ((voff = pci_find_next_ext_capability(pcidev, voff, PCI_EXT_CAP_ID_VNDR))) {
+ vndr_hdr = 0;
+ pci_read_config_dword(pcidev, voff + PCI_VNDR_HEADER, &vndr_hdr);
- /* add irq info for enumeration if the device support irq */
- nvec = cci_pci_alloc_irq(pcidev);
- if (nvec < 0) {
- dev_err(&pcidev->dev, "Fail to alloc irq %d.\n", nvec);
- ret = nvec;
- goto enum_info_free_exit;
- } else if (nvec) {
- irq_table = cci_pci_create_irq_table(pcidev, nvec);
- if (!irq_table) {
- ret = -ENOMEM;
- goto irq_free_exit;
+ if (PCI_VNDR_HEADER_ID(vndr_hdr) == PCI_VSEC_ID_INTEL_DFLS &&
+ pcidev->vendor == PCI_VENDOR_ID_INTEL)
+ break;
+ }
+
+ if (!voff) {
+ dev_dbg(&pcidev->dev, "%s no DFL VSEC found\n", __func__);
+ return -ENODEV;
+ }
+
+ dfl_cnt = 0;
+ pci_read_config_dword(pcidev, voff + PCI_VNDR_DFLS_CNT, &dfl_cnt);
+ if (dfl_cnt > PCI_STD_NUM_BARS) {
+ dev_err(&pcidev->dev, "%s too many DFLs %d > %d\n",
+ __func__, dfl_cnt, PCI_STD_NUM_BARS);
+ return -EINVAL;
+ }
+
+ dfl_res_off = voff + PCI_VNDR_DFLS_RES;
+ if (dfl_res_off + (dfl_cnt * sizeof(u32)) > PCI_CFG_SPACE_EXP_SIZE) {
+ dev_err(&pcidev->dev, "%s DFL VSEC too big for PCIe config space\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ for (i = 0, bars = 0; i < dfl_cnt; i++, dfl_res_off += sizeof(u32)) {
+ dfl_res = GENMASK(31, 0);
+ pci_read_config_dword(pcidev, dfl_res_off, &dfl_res);
+
+ bir = dfl_res & PCI_VNDR_DFLS_RES_BAR_MASK;
+ if (bir >= PCI_STD_NUM_BARS) {
+ dev_err(&pcidev->dev, "%s bad bir number %d\n",
+ __func__, bir);
+ return -EINVAL;
}
- ret = dfl_fpga_enum_info_add_irq(info, nvec, irq_table);
- kfree(irq_table);
- if (ret)
- goto irq_free_exit;
+ if (bars & BIT(bir)) {
+ dev_err(&pcidev->dev, "%s DFL for BAR %d already specified\n",
+ __func__, bir);
+ return -EINVAL;
+ }
+
+ bars |= BIT(bir);
+
+ len = pci_resource_len(pcidev, bir);
+ offset = dfl_res & PCI_VNDR_DFLS_RES_OFF_MASK;
+ if (offset >= len) {
+ dev_err(&pcidev->dev, "%s bad offset %u >= %pa\n",
+ __func__, offset, &len);
+ return -EINVAL;
+ }
+
+ dev_dbg(&pcidev->dev, "%s BAR %d offset 0x%x\n", __func__, bir, offset);
+
+ len -= offset;
+
+ start = pci_resource_start(pcidev, bir) + offset;
+
+ dfl_fpga_enum_info_add_dfl(info, start, len);
}
- /* start to find Device Feature List in Bar 0 */
+ return 0;
+}
+
+/* default method of finding dfls starting at offset 0 of bar 0 */
+static int find_dfls_by_default(struct pci_dev *pcidev,
+ struct dfl_fpga_enum_info *info)
+{
+ int port_num, bar, i, ret = 0;
+ resource_size_t start, len;
+ void __iomem *base;
+ u32 offset;
+ u64 v;
+
+ /* start to find Device Feature List from Bar 0 */
base = cci_pci_ioremap_bar0(pcidev);
- if (!base) {
- ret = -ENOMEM;
- goto irq_free_exit;
- }
+ if (!base)
+ return -ENOMEM;
/*
* PF device has FME and Ports/AFUs, and VF device only has one
@@ -208,12 +261,54 @@ static int cci_enumerate_feature_devs(struct pci_dev *pcidev)
dfl_fpga_enum_info_add_dfl(info, start, len);
} else {
ret = -ENODEV;
- goto irq_free_exit;
}
/* release I/O mappings for next step enumeration */
pcim_iounmap_regions(pcidev, BIT(0));
+ return ret;
+}
+
+/* enumerate feature devices under pci device */
+static int cci_enumerate_feature_devs(struct pci_dev *pcidev)
+{
+ struct cci_drvdata *drvdata = pci_get_drvdata(pcidev);
+ struct dfl_fpga_enum_info *info;
+ struct dfl_fpga_cdev *cdev;
+ int nvec, ret = 0;
+ int *irq_table;
+
+ /* allocate enumeration info via pci_dev */
+ info = dfl_fpga_enum_info_alloc(&pcidev->dev);
+ if (!info)
+ return -ENOMEM;
+
+ /* add irq info for enumeration if the device support irq */
+ nvec = cci_pci_alloc_irq(pcidev);
+ if (nvec < 0) {
+ dev_err(&pcidev->dev, "Fail to alloc irq %d.\n", nvec);
+ ret = nvec;
+ goto enum_info_free_exit;
+ } else if (nvec) {
+ irq_table = cci_pci_create_irq_table(pcidev, nvec);
+ if (!irq_table) {
+ ret = -ENOMEM;
+ goto irq_free_exit;
+ }
+
+ ret = dfl_fpga_enum_info_add_irq(info, nvec, irq_table);
+ kfree(irq_table);
+ if (ret)
+ goto irq_free_exit;
+ }
+
+ ret = find_dfls_by_vsec(pcidev, info);
+ if (ret == -ENODEV)
+ ret = find_dfls_by_default(pcidev, info);
+
+ if (ret)
+ goto irq_free_exit;
+
/* start enumeration with prepared enumeration information */
cdev = dfl_fpga_feature_devs_enumerate(info);
if (IS_ERR(cdev)) {
diff --git a/drivers/fpga/dfl.c b/drivers/fpga/dfl.c
index b450870b75ed..511b20ff35a3 100644
--- a/drivers/fpga/dfl.c
+++ b/drivers/fpga/dfl.c
@@ -10,6 +10,7 @@
* Wu Hao <hao.wu@intel.com>
* Xiao Guangrong <guangrong.xiao@linux.intel.com>
*/
+#include <linux/dfl.h>
#include <linux/fpga-dfl.h>
#include <linux/module.h>
#include <linux/uaccess.h>
@@ -298,8 +299,7 @@ static int dfl_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
{
struct dfl_device *ddev = to_dfl_dev(dev);
- /* The type has 4 valid bits and feature_id has 12 valid bits */
- return add_uevent_var(env, "MODALIAS=dfl:t%01Xf%03X",
+ return add_uevent_var(env, "MODALIAS=dfl:t%04Xf%04X",
ddev->type, ddev->feature_id);
}
diff --git a/drivers/fpga/dfl.h b/drivers/fpga/dfl.h
index 5dc758f655b7..2b82c96ba56c 100644
--- a/drivers/fpga/dfl.h
+++ b/drivers/fpga/dfl.h
@@ -22,6 +22,7 @@
#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/uuid.h>
@@ -516,88 +517,4 @@ long dfl_feature_ioctl_set_irq(struct platform_device *pdev,
struct dfl_feature *feature,
unsigned long arg);
-/**
- * enum dfl_id_type - define the DFL FIU types
- */
-enum dfl_id_type {
- FME_ID,
- PORT_ID,
- DFL_ID_MAX,
-};
-
-/**
- * struct dfl_device_id - dfl device identifier
- * @type: contains 4 bits DFL FIU type of the device. See enum dfl_id_type.
- * @feature_id: contains 12 bits feature identifier local to its DFL FIU type.
- * @driver_data: driver specific data.
- */
-struct dfl_device_id {
- u8 type;
- u16 feature_id;
- unsigned long driver_data;
-};
-
-/**
- * struct dfl_device - represent an dfl device on dfl bus
- *
- * @dev: generic device interface.
- * @id: id of the dfl device.
- * @type: type of DFL FIU of the device. See enum dfl_id_type.
- * @feature_id: 16 bits feature identifier local to its DFL FIU type.
- * @mmio_res: mmio resource of this dfl device.
- * @irqs: list of Linux IRQ numbers of this dfl device.
- * @num_irqs: number of IRQs supported by this dfl device.
- * @cdev: pointer to DFL FPGA container device this dfl device belongs to.
- * @id_entry: matched id entry in dfl driver's id table.
- */
-struct dfl_device {
- struct device dev;
- int id;
- u8 type;
- u16 feature_id;
- struct resource mmio_res;
- int *irqs;
- unsigned int num_irqs;
- struct dfl_fpga_cdev *cdev;
- const struct dfl_device_id *id_entry;
-};
-
-/**
- * struct dfl_driver - represent an dfl device driver
- *
- * @drv: driver model structure.
- * @id_table: pointer to table of device IDs the driver is interested in.
- * { } member terminated.
- * @probe: mandatory callback for device binding.
- * @remove: callback for device unbinding.
- */
-struct dfl_driver {
- struct device_driver drv;
- const struct dfl_device_id *id_table;
-
- int (*probe)(struct dfl_device *dfl_dev);
- void (*remove)(struct dfl_device *dfl_dev);
-};
-
-#define to_dfl_dev(d) container_of(d, struct dfl_device, dev)
-#define to_dfl_drv(d) container_of(d, struct dfl_driver, drv)
-
-/*
- * use a macro to avoid include chaining to get THIS_MODULE.
- */
-#define dfl_driver_register(drv) \
- __dfl_driver_register(drv, THIS_MODULE)
-int __dfl_driver_register(struct dfl_driver *dfl_drv, struct module *owner);
-void dfl_driver_unregister(struct dfl_driver *dfl_drv);
-
-/*
- * module_dfl_driver() - Helper macro for drivers that don't do
- * anything special in module init/exit. This eliminates a lot of
- * boilerplate. Each module may only use this macro once, and
- * calling it replaces module_init() and module_exit().
- */
-#define module_dfl_driver(__dfl_driver) \
- module_driver(__dfl_driver, dfl_driver_register, \
- dfl_driver_unregister)
-
#endif /* __FPGA_DFL_H */
diff --git a/drivers/fpga/fpga-bridge.c b/drivers/fpga/fpga-bridge.c
index 2deccacc3aa7..e9266b2a357f 100644
--- a/drivers/fpga/fpga-bridge.c
+++ b/drivers/fpga/fpga-bridge.c
@@ -17,7 +17,7 @@ static DEFINE_IDA(fpga_bridge_ida);
static struct class *fpga_bridge_class;
/* Lock for adding/removing bridges to linked lists*/
-static spinlock_t bridge_list_lock;
+static DEFINE_SPINLOCK(bridge_list_lock);
/**
* fpga_bridge_enable - Enable transactions on the bridge
@@ -479,8 +479,6 @@ static void fpga_bridge_dev_release(struct device *dev)
static int __init fpga_bridge_dev_init(void)
{
- spin_lock_init(&bridge_list_lock);
-
fpga_bridge_class = class_create(THIS_MODULE, "fpga_bridge");
if (IS_ERR(fpga_bridge_class))
return PTR_ERR(fpga_bridge_class);
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index dea65d85594f..e3607ec4c2e8 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -441,8 +441,9 @@ config GPIO_MXC
select GENERIC_IRQ_CHIP
config GPIO_MXS
- def_bool y
+ bool "Freescale MXS GPIO support" if COMPILE_TEST
depends on ARCH_MXS || COMPILE_TEST
+ default y if ARCH_MXS
select GPIO_GENERIC
select GENERIC_IRQ_CHIP
@@ -486,11 +487,11 @@ config GPIO_PXA
Say yes here to support the PXA GPIO device
config GPIO_RCAR
- tristate "Renesas R-Car GPIO"
+ tristate "Renesas R-Car and RZ/G GPIO support"
depends on ARCH_RENESAS || COMPILE_TEST
select GPIOLIB_IRQCHIP
help
- Say yes here to support GPIO on Renesas R-Car SoCs.
+ Say yes here to support GPIO on Renesas R-Car or RZ/G SoCs.
config GPIO_RDA
bool "RDA Micro GPIO controller support"
@@ -594,7 +595,7 @@ config GPIO_TB10X
select OF_GPIO
config GPIO_TEGRA
- bool "NVIDIA Tegra GPIO support"
+ tristate "NVIDIA Tegra GPIO support"
default ARCH_TEGRA
depends on ARCH_TEGRA || COMPILE_TEST
depends on OF_GPIO
@@ -647,6 +648,16 @@ config GPIO_VF610
help
Say yes here to support Vybrid vf610 GPIOs.
+config GPIO_VISCONTI
+ tristate "Toshiba Visconti GPIO support"
+ depends on ARCH_VISCONTI || COMPILE_TEST
+ depends on OF_GPIO
+ select GPIOLIB_IRQCHIP
+ select GPIO_GENERIC
+ select IRQ_DOMAIN_HIERARCHY
+ help
+ Say yes here to support GPIO on Tohisba Visconti.
+
config GPIO_VR41XX
tristate "NEC VR4100 series General-purpose I/O Uint support"
depends on CPU_VR41XX
@@ -669,7 +680,7 @@ config GPIO_WCD934X
tristate "Qualcomm Technologies Inc WCD9340/WCD9341 gpio controller driver"
depends on MFD_WCD934X && OF_GPIO
help
- This driver is to supprot GPIO block found on the Qualcomm Technologies
+ This driver is to support GPIO block found on the Qualcomm Technologies
Inc WCD9340/WCD9341 Audio Codec.
config GPIO_XGENE
@@ -693,6 +704,8 @@ config GPIO_XGENE_SB
config GPIO_XILINX
tristate "Xilinx GPIO support"
+ select GPIOLIB_IRQCHIP
+ depends on OF_GPIO
help
Say yes here to support the Xilinx FPGA GPIO device
@@ -730,13 +743,6 @@ config GPIO_ZYNQ
help
Say yes here to support Xilinx Zynq GPIO controller.
-config GPIO_ZX
- bool "ZTE ZX GPIO support"
- depends on ARCH_ZX || COMPILE_TEST
- select GPIOLIB_IRQCHIP
- help
- Say yes here to support the GPIO device on ZTE ZX SoCs.
-
config GPIO_LOONGSON1
tristate "Loongson1 GPIO support"
depends on MACH_LOONGSON32
@@ -1252,13 +1258,6 @@ config GPIO_MAX77650
GPIO driver for MAX77650/77651 PMIC from Maxim Semiconductor.
These chips have a single pin that can be configured as GPIO.
-config GPIO_MSIC
- bool "Intel MSIC mixed signal gpio support"
- depends on (X86 || COMPILE_TEST) && MFD_INTEL_MSIC
- help
- Enable support for GPIO on intel MSIC controllers found in
- intel MID devices
-
config GPIO_PALMAS
bool "TI PALMAS series PMICs GPIO"
depends on MFD_PALMAS
@@ -1454,13 +1453,6 @@ config GPIO_BT8XX
If unsure, say N.
-config GPIO_INTEL_MID
- bool "Intel MID GPIO support"
- depends on X86_INTEL_MID
- select GPIOLIB_IRQCHIP
- help
- Say Y here to support Intel MID GPIO.
-
config GPIO_MERRIFIELD
tristate "Intel Merrifield GPIO support"
depends on X86_INTEL_MID
@@ -1636,8 +1628,7 @@ config GPIO_MOCKUP
select IRQ_SIM
help
This enables GPIO Testing driver, which provides a way to test GPIO
- subsystem through sysfs(or char device) and debugfs. GPIO_SYSFS
- must be selected for this test.
+ subsystem through sysfs (or char device) and debugfs.
User could use it through the script in
tools/testing/selftests/gpio/gpio-mockup.sh. Reference the usage in
it.
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 35e3b6026665..c58a90a3c3b1 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -67,7 +67,6 @@ obj-$(CONFIG_GPIO_HISI) += gpio-hisi.o
obj-$(CONFIG_GPIO_HLWD) += gpio-hlwd.o
obj-$(CONFIG_HTC_EGPIO) += gpio-htc-egpio.o
obj-$(CONFIG_GPIO_ICH) += gpio-ich.o
-obj-$(CONFIG_GPIO_INTEL_MID) += gpio-intel-mid.o
obj-$(CONFIG_GPIO_IOP) += gpio-iop.o
obj-$(CONFIG_GPIO_IT87) += gpio-it87.o
obj-$(CONFIG_GPIO_IXP4XX) += gpio-ixp4xx.o
@@ -103,7 +102,6 @@ obj-$(CONFIG_GPIO_MOXTET) += gpio-moxtet.o
obj-$(CONFIG_GPIO_MPC5200) += gpio-mpc5200.o
obj-$(CONFIG_GPIO_MPC8XXX) += gpio-mpc8xxx.o
obj-$(CONFIG_GPIO_MSC313) += gpio-msc313.o
-obj-$(CONFIG_GPIO_MSIC) += gpio-msic.o
obj-$(CONFIG_GPIO_MT7621) += gpio-mt7621.o
obj-$(CONFIG_GPIO_MVEBU) += gpio-mvebu.o
obj-$(CONFIG_GPIO_MXC) += gpio-mxc.o
@@ -164,6 +162,7 @@ obj-$(CONFIG_GPIO_UCB1400) += gpio-ucb1400.o
obj-$(CONFIG_GPIO_UNIPHIER) += gpio-uniphier.o
obj-$(CONFIG_GPIO_VF610) += gpio-vf610.o
obj-$(CONFIG_GPIO_VIPERBOARD) += gpio-viperboard.o
+obj-$(CONFIG_GPIO_VISCONTI) += gpio-visconti.o
obj-$(CONFIG_GPIO_VR41XX) += gpio-vr41xx.o
obj-$(CONFIG_GPIO_VX855) += gpio-vx855.o
obj-$(CONFIG_GPIO_WCD934X) += gpio-wcd934x.o
@@ -180,5 +179,4 @@ obj-$(CONFIG_GPIO_XLP) += gpio-xlp.o
obj-$(CONFIG_GPIO_XRA1403) += gpio-xra1403.o
obj-$(CONFIG_GPIO_XTENSA) += gpio-xtensa.o
obj-$(CONFIG_GPIO_ZEVIO) += gpio-zevio.o
-obj-$(CONFIG_GPIO_ZX) += gpio-zx.o
obj-$(CONFIG_GPIO_ZYNQ) += gpio-zynq.o
diff --git a/drivers/gpio/TODO b/drivers/gpio/TODO
index 0229fa79499e..b8b1473a5b1e 100644
--- a/drivers/gpio/TODO
+++ b/drivers/gpio/TODO
@@ -101,7 +101,7 @@ for a few GPIOs. Those should stay where they are.
At the same time it makes sense to get rid of code duplication in existing or
new coming drivers. For example, gpio-ml-ioh should be incorporated into
-gpio-pch. In similar way gpio-intel-mid into gpio-pxa.
+gpio-pch.
Generic MMIO GPIO
diff --git a/drivers/gpio/gpio-aggregator.c b/drivers/gpio/gpio-aggregator.c
index dfd8a4876a27..08171431bb8f 100644
--- a/drivers/gpio/gpio-aggregator.c
+++ b/drivers/gpio/gpio-aggregator.c
@@ -62,34 +62,6 @@ static char *get_arg(char **args)
return start;
}
-static bool isrange(const char *s)
-{
- size_t n;
-
- if (IS_ERR_OR_NULL(s))
- return false;
-
- while (1) {
- n = strspn(s, "0123456789");
- if (!n)
- return false;
-
- s += n;
-
- switch (*s++) {
- case '\0':
- return true;
-
- case '-':
- case ',':
- break;
-
- default:
- return false;
- }
- }
-}
-
static int aggr_add_gpio(struct gpio_aggregator *aggr, const char *key,
int hwnum, unsigned int *n)
{
@@ -100,8 +72,7 @@ static int aggr_add_gpio(struct gpio_aggregator *aggr, const char *key,
if (!lookups)
return -ENOMEM;
- lookups->table[*n] =
- (struct gpiod_lookup)GPIO_LOOKUP_IDX(key, hwnum, NULL, *n, 0);
+ lookups->table[*n] = GPIO_LOOKUP_IDX(key, hwnum, NULL, *n, 0);
(*n)++;
memset(&lookups->table[*n], 0, sizeof(lookups->table[*n]));
@@ -112,10 +83,10 @@ static int aggr_add_gpio(struct gpio_aggregator *aggr, const char *key,
static int aggr_parse(struct gpio_aggregator *aggr)
{
+ char *name, *offsets, *p;
char *args = aggr->args;
unsigned long *bitmap;
unsigned int i, n = 0;
- char *name, *offsets;
int error = 0;
bitmap = bitmap_alloc(ARCH_NR_GPIOS, GFP_KERNEL);
@@ -130,7 +101,8 @@ static int aggr_parse(struct gpio_aggregator *aggr)
goto free_bitmap;
}
- if (!isrange(offsets)) {
+ p = get_options(offsets, 0, &error);
+ if (error == 0 || *p) {
/* Named GPIO line */
error = aggr_add_gpio(aggr, name, U16_MAX, &n);
if (error)
@@ -271,7 +243,7 @@ static DRIVER_ATTR_WO(delete_device);
static struct attribute *gpio_aggregator_attrs[] = {
&driver_attr_new_device.attr,
&driver_attr_delete_device.attr,
- NULL,
+ NULL
};
ATTRIBUTE_GROUPS(gpio_aggregator);
@@ -545,7 +517,7 @@ static const struct of_device_id gpio_aggregator_dt_ids[] = {
* Add GPIO-operated devices controlled from userspace below,
* or use "driver_override" in sysfs
*/
- {},
+ {}
};
MODULE_DEVICE_TABLE(of, gpio_aggregator_dt_ids);
#endif
diff --git a/drivers/gpio/gpio-bd70528.c b/drivers/gpio/gpio-bd70528.c
index 45b3da8da336..397a50d6bc65 100644
--- a/drivers/gpio/gpio-bd70528.c
+++ b/drivers/gpio/gpio-bd70528.c
@@ -12,7 +12,8 @@
#define GPIO_OUT_REG(offset) (BD70528_REG_GPIO1_OUT + (offset) * 2)
struct bd70528_gpio {
- struct rohm_regmap_dev chip;
+ struct regmap *regmap;
+ struct device *dev;
struct gpio_chip gpio;
};
@@ -35,11 +36,11 @@ static int bd70528_set_debounce(struct bd70528_gpio *bdgpio,
val = BD70528_DEBOUNCE_50MS;
break;
default:
- dev_err(bdgpio->chip.dev,
+ dev_err(bdgpio->dev,
"Invalid debounce value %u\n", debounce);
return -EINVAL;
}
- return regmap_update_bits(bdgpio->chip.regmap, GPIO_IN_REG(offset),
+ return regmap_update_bits(bdgpio->regmap, GPIO_IN_REG(offset),
BD70528_DEBOUNCE_MASK, val);
}
@@ -49,9 +50,9 @@ static int bd70528_get_direction(struct gpio_chip *chip, unsigned int offset)
int val, ret;
/* Do we need to do something to IRQs here? */
- ret = regmap_read(bdgpio->chip.regmap, GPIO_OUT_REG(offset), &val);
+ ret = regmap_read(bdgpio->regmap, GPIO_OUT_REG(offset), &val);
if (ret) {
- dev_err(bdgpio->chip.dev, "Could not read gpio direction\n");
+ dev_err(bdgpio->dev, "Could not read gpio direction\n");
return ret;
}
if (val & BD70528_GPIO_OUT_EN_MASK)
@@ -67,13 +68,13 @@ static int bd70528_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
switch (pinconf_to_config_param(config)) {
case PIN_CONFIG_DRIVE_OPEN_DRAIN:
- return regmap_update_bits(bdgpio->chip.regmap,
+ return regmap_update_bits(bdgpio->regmap,
GPIO_OUT_REG(offset),
BD70528_GPIO_DRIVE_MASK,
BD70528_GPIO_OPEN_DRAIN);
break;
case PIN_CONFIG_DRIVE_PUSH_PULL:
- return regmap_update_bits(bdgpio->chip.regmap,
+ return regmap_update_bits(bdgpio->regmap,
GPIO_OUT_REG(offset),
BD70528_GPIO_DRIVE_MASK,
BD70528_GPIO_PUSH_PULL);
@@ -93,7 +94,7 @@ static int bd70528_direction_input(struct gpio_chip *chip, unsigned int offset)
struct bd70528_gpio *bdgpio = gpiochip_get_data(chip);
/* Do we need to do something to IRQs here? */
- return regmap_update_bits(bdgpio->chip.regmap, GPIO_OUT_REG(offset),
+ return regmap_update_bits(bdgpio->regmap, GPIO_OUT_REG(offset),
BD70528_GPIO_OUT_EN_MASK,
BD70528_GPIO_OUT_DISABLE);
}
@@ -105,10 +106,10 @@ static void bd70528_gpio_set(struct gpio_chip *chip, unsigned int offset,
struct bd70528_gpio *bdgpio = gpiochip_get_data(chip);
u8 val = (value) ? BD70528_GPIO_OUT_HI : BD70528_GPIO_OUT_LO;
- ret = regmap_update_bits(bdgpio->chip.regmap, GPIO_OUT_REG(offset),
+ ret = regmap_update_bits(bdgpio->regmap, GPIO_OUT_REG(offset),
BD70528_GPIO_OUT_MASK, val);
if (ret)
- dev_err(bdgpio->chip.dev, "Could not set gpio to %d\n", value);
+ dev_err(bdgpio->dev, "Could not set gpio to %d\n", value);
}
static int bd70528_direction_output(struct gpio_chip *chip, unsigned int offset,
@@ -117,7 +118,7 @@ static int bd70528_direction_output(struct gpio_chip *chip, unsigned int offset,
struct bd70528_gpio *bdgpio = gpiochip_get_data(chip);
bd70528_gpio_set(chip, offset, value);
- return regmap_update_bits(bdgpio->chip.regmap, GPIO_OUT_REG(offset),
+ return regmap_update_bits(bdgpio->regmap, GPIO_OUT_REG(offset),
BD70528_GPIO_OUT_EN_MASK,
BD70528_GPIO_OUT_ENABLE);
}
@@ -129,11 +130,11 @@ static int bd70528_gpio_get_o(struct bd70528_gpio *bdgpio, unsigned int offset)
int ret;
unsigned int val;
- ret = regmap_read(bdgpio->chip.regmap, GPIO_OUT_REG(offset), &val);
+ ret = regmap_read(bdgpio->regmap, GPIO_OUT_REG(offset), &val);
if (!ret)
ret = !!(val & BD70528_GPIO_OUT_MASK);
else
- dev_err(bdgpio->chip.dev, "GPIO (out) state read failed\n");
+ dev_err(bdgpio->dev, "GPIO (out) state read failed\n");
return ret;
}
@@ -143,12 +144,12 @@ static int bd70528_gpio_get_i(struct bd70528_gpio *bdgpio, unsigned int offset)
unsigned int val;
int ret;
- ret = regmap_read(bdgpio->chip.regmap, BD70528_REG_GPIO_STATE, &val);
+ ret = regmap_read(bdgpio->regmap, BD70528_REG_GPIO_STATE, &val);
if (!ret)
ret = !(val & GPIO_IN_STATE_MASK(offset));
else
- dev_err(bdgpio->chip.dev, "GPIO (in) state read failed\n");
+ dev_err(bdgpio->dev, "GPIO (in) state read failed\n");
return ret;
}
@@ -173,29 +174,22 @@ static int bd70528_gpio_get(struct gpio_chip *chip, unsigned int offset)
else if (ret == GPIO_LINE_DIRECTION_IN)
ret = bd70528_gpio_get_i(bdgpio, offset);
else
- dev_err(bdgpio->chip.dev, "failed to read GPIO direction\n");
+ dev_err(bdgpio->dev, "failed to read GPIO direction\n");
return ret;
}
static int bd70528_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct bd70528_gpio *bdgpio;
- struct rohm_regmap_dev *bd70528;
int ret;
- bd70528 = dev_get_drvdata(pdev->dev.parent);
- if (!bd70528) {
- dev_err(&pdev->dev, "No MFD driver data\n");
- return -EINVAL;
- }
-
- bdgpio = devm_kzalloc(&pdev->dev, sizeof(*bdgpio),
- GFP_KERNEL);
+ bdgpio = devm_kzalloc(dev, sizeof(*bdgpio), GFP_KERNEL);
if (!bdgpio)
return -ENOMEM;
- bdgpio->chip.dev = &pdev->dev;
- bdgpio->gpio.parent = pdev->dev.parent;
+ bdgpio->dev = dev;
+ bdgpio->gpio.parent = dev->parent;
bdgpio->gpio.label = "bd70528-gpio";
bdgpio->gpio.owner = THIS_MODULE;
bdgpio->gpio.get_direction = bd70528_get_direction;
@@ -208,14 +202,15 @@ static int bd70528_probe(struct platform_device *pdev)
bdgpio->gpio.ngpio = 4;
bdgpio->gpio.base = -1;
#ifdef CONFIG_OF_GPIO
- bdgpio->gpio.of_node = pdev->dev.parent->of_node;
+ bdgpio->gpio.of_node = dev->parent->of_node;
#endif
- bdgpio->chip.regmap = bd70528->regmap;
+ bdgpio->regmap = dev_get_regmap(dev->parent, NULL);
+ if (!bdgpio->regmap)
+ return -ENODEV;
- ret = devm_gpiochip_add_data(&pdev->dev, &bdgpio->gpio,
- bdgpio);
+ ret = devm_gpiochip_add_data(dev, &bdgpio->gpio, bdgpio);
if (ret)
- dev_err(&pdev->dev, "gpio_init: Failed to add bd70528-gpio\n");
+ dev_err(dev, "gpio_init: Failed to add bd70528-gpio\n");
return ret;
}
diff --git a/drivers/gpio/gpio-bd71828.c b/drivers/gpio/gpio-bd71828.c
index 3dbbc638e9a9..c8e382b53f2f 100644
--- a/drivers/gpio/gpio-bd71828.c
+++ b/drivers/gpio/gpio-bd71828.c
@@ -11,7 +11,8 @@
#define HALL_GPIO_OFFSET 3
struct bd71828_gpio {
- struct rohm_regmap_dev chip;
+ struct regmap *regmap;
+ struct device *dev;
struct gpio_chip gpio;
};
@@ -29,10 +30,10 @@ static void bd71828_gpio_set(struct gpio_chip *chip, unsigned int offset,
if (offset == HALL_GPIO_OFFSET)
return;
- ret = regmap_update_bits(bdgpio->chip.regmap, GPIO_OUT_REG(offset),
+ ret = regmap_update_bits(bdgpio->regmap, GPIO_OUT_REG(offset),
BD71828_GPIO_OUT_MASK, val);
if (ret)
- dev_err(bdgpio->chip.dev, "Could not set gpio to %d\n", value);
+ dev_err(bdgpio->dev, "Could not set gpio to %d\n", value);
}
static int bd71828_gpio_get(struct gpio_chip *chip, unsigned int offset)
@@ -42,10 +43,10 @@ static int bd71828_gpio_get(struct gpio_chip *chip, unsigned int offset)
struct bd71828_gpio *bdgpio = gpiochip_get_data(chip);
if (offset == HALL_GPIO_OFFSET)
- ret = regmap_read(bdgpio->chip.regmap, BD71828_REG_IO_STAT,
+ ret = regmap_read(bdgpio->regmap, BD71828_REG_IO_STAT,
&val);
else
- ret = regmap_read(bdgpio->chip.regmap, GPIO_OUT_REG(offset),
+ ret = regmap_read(bdgpio->regmap, GPIO_OUT_REG(offset),
&val);
if (!ret)
ret = (val & BD71828_GPIO_OUT_MASK);
@@ -63,12 +64,12 @@ static int bd71828_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
switch (pinconf_to_config_param(config)) {
case PIN_CONFIG_DRIVE_OPEN_DRAIN:
- return regmap_update_bits(bdgpio->chip.regmap,
+ return regmap_update_bits(bdgpio->regmap,
GPIO_OUT_REG(offset),
BD71828_GPIO_DRIVE_MASK,
BD71828_GPIO_OPEN_DRAIN);
case PIN_CONFIG_DRIVE_PUSH_PULL:
- return regmap_update_bits(bdgpio->chip.regmap,
+ return regmap_update_bits(bdgpio->regmap,
GPIO_OUT_REG(offset),
BD71828_GPIO_DRIVE_MASK,
BD71828_GPIO_PUSH_PULL);
@@ -96,22 +97,15 @@ static int bd71828_get_direction(struct gpio_chip *chip, unsigned int offset)
static int bd71828_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct bd71828_gpio *bdgpio;
- struct rohm_regmap_dev *bd71828;
- bd71828 = dev_get_drvdata(pdev->dev.parent);
- if (!bd71828) {
- dev_err(&pdev->dev, "No MFD driver data\n");
- return -EINVAL;
- }
-
- bdgpio = devm_kzalloc(&pdev->dev, sizeof(*bdgpio),
- GFP_KERNEL);
+ bdgpio = devm_kzalloc(dev, sizeof(*bdgpio), GFP_KERNEL);
if (!bdgpio)
return -ENOMEM;
- bdgpio->chip.dev = &pdev->dev;
- bdgpio->gpio.parent = pdev->dev.parent;
+ bdgpio->dev = dev;
+ bdgpio->gpio.parent = dev->parent;
bdgpio->gpio.label = "bd71828-gpio";
bdgpio->gpio.owner = THIS_MODULE;
bdgpio->gpio.get_direction = bd71828_get_direction;
@@ -127,11 +121,12 @@ static int bd71828_probe(struct platform_device *pdev)
* "gpio-reserved-ranges" and exclude them from control
*/
bdgpio->gpio.ngpio = 4;
- bdgpio->gpio.of_node = pdev->dev.parent->of_node;
- bdgpio->chip.regmap = bd71828->regmap;
+ bdgpio->gpio.of_node = dev->parent->of_node;
+ bdgpio->regmap = dev_get_regmap(dev->parent, NULL);
+ if (!bdgpio->regmap)
+ return -ENODEV;
- return devm_gpiochip_add_data(&pdev->dev, &bdgpio->gpio,
- bdgpio);
+ return devm_gpiochip_add_data(dev, &bdgpio->gpio, bdgpio);
}
static struct platform_driver bd71828_gpio = {
diff --git a/drivers/gpio/gpio-bd9571mwv.c b/drivers/gpio/gpio-bd9571mwv.c
index c0abc9c6851b..df6102b57734 100644
--- a/drivers/gpio/gpio-bd9571mwv.c
+++ b/drivers/gpio/gpio-bd9571mwv.c
@@ -1,31 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
- * ROHM BD9571MWV-M GPIO driver
+ * ROHM BD9571MWV-M and BD9574MWF-M GPIO driver
*
* Copyright (C) 2017 Marek Vasut <marek.vasut+renesas@gmail.com>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether expressed or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License version 2 for more details.
- *
* Based on the TPS65086 driver
*
* NOTE: Interrupts are not supported yet.
*/
#include <linux/gpio/driver.h>
+#include <linux/mfd/rohm-generic.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/mfd/bd9571mwv.h>
struct bd9571mwv_gpio {
+ struct regmap *regmap;
struct gpio_chip chip;
- struct bd9571mwv *bd;
};
static int bd9571mwv_gpio_get_direction(struct gpio_chip *chip,
@@ -34,7 +27,7 @@ static int bd9571mwv_gpio_get_direction(struct gpio_chip *chip,
struct bd9571mwv_gpio *gpio = gpiochip_get_data(chip);
int ret, val;
- ret = regmap_read(gpio->bd->regmap, BD9571MWV_GPIO_DIR, &val);
+ ret = regmap_read(gpio->regmap, BD9571MWV_GPIO_DIR, &val);
if (ret < 0)
return ret;
if (val & BIT(offset))
@@ -48,8 +41,7 @@ static int bd9571mwv_gpio_direction_input(struct gpio_chip *chip,
{
struct bd9571mwv_gpio *gpio = gpiochip_get_data(chip);
- regmap_update_bits(gpio->bd->regmap, BD9571MWV_GPIO_DIR,
- BIT(offset), 0);
+ regmap_update_bits(gpio->regmap, BD9571MWV_GPIO_DIR, BIT(offset), 0);
return 0;
}
@@ -60,9 +52,9 @@ static int bd9571mwv_gpio_direction_output(struct gpio_chip *chip,
struct bd9571mwv_gpio *gpio = gpiochip_get_data(chip);
/* Set the initial value */
- regmap_update_bits(gpio->bd->regmap, BD9571MWV_GPIO_OUT,
+ regmap_update_bits(gpio->regmap, BD9571MWV_GPIO_OUT,
BIT(offset), value ? BIT(offset) : 0);
- regmap_update_bits(gpio->bd->regmap, BD9571MWV_GPIO_DIR,
+ regmap_update_bits(gpio->regmap, BD9571MWV_GPIO_DIR,
BIT(offset), BIT(offset));
return 0;
@@ -73,7 +65,7 @@ static int bd9571mwv_gpio_get(struct gpio_chip *chip, unsigned int offset)
struct bd9571mwv_gpio *gpio = gpiochip_get_data(chip);
int ret, val;
- ret = regmap_read(gpio->bd->regmap, BD9571MWV_GPIO_IN, &val);
+ ret = regmap_read(gpio->regmap, BD9571MWV_GPIO_IN, &val);
if (ret < 0)
return ret;
@@ -85,7 +77,7 @@ static void bd9571mwv_gpio_set(struct gpio_chip *chip, unsigned int offset,
{
struct bd9571mwv_gpio *gpio = gpiochip_get_data(chip);
- regmap_update_bits(gpio->bd->regmap, BD9571MWV_GPIO_OUT,
+ regmap_update_bits(gpio->regmap, BD9571MWV_GPIO_OUT,
BIT(offset), value ? BIT(offset) : 0);
}
@@ -113,9 +105,9 @@ static int bd9571mwv_gpio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, gpio);
- gpio->bd = dev_get_drvdata(pdev->dev.parent);
+ gpio->regmap = dev_get_regmap(pdev->dev.parent, NULL);
gpio->chip = template_chip;
- gpio->chip.parent = gpio->bd->dev;
+ gpio->chip.parent = pdev->dev.parent;
ret = devm_gpiochip_add_data(&pdev->dev, &gpio->chip, gpio);
if (ret < 0) {
@@ -127,7 +119,8 @@ static int bd9571mwv_gpio_probe(struct platform_device *pdev)
}
static const struct platform_device_id bd9571mwv_gpio_id_table[] = {
- { "bd9571mwv-gpio", },
+ { "bd9571mwv-gpio", ROHM_CHIP_TYPE_BD9571 },
+ { "bd9574mwf-gpio", ROHM_CHIP_TYPE_BD9574 },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(platform, bd9571mwv_gpio_id_table);
diff --git a/drivers/gpio/gpio-ep93xx.c b/drivers/gpio/gpio-ep93xx.c
index 226da8df6f10..ef148b26b587 100644
--- a/drivers/gpio/gpio-ep93xx.c
+++ b/drivers/gpio/gpio-ep93xx.c
@@ -25,83 +25,89 @@
/* Maximum value for gpio line identifiers */
#define EP93XX_GPIO_LINE_MAX 63
+/* Number of GPIO chips in EP93XX */
+#define EP93XX_GPIO_CHIP_NUM 8
+
/* Maximum value for irq capable line identifiers */
#define EP93XX_GPIO_LINE_MAX_IRQ 23
+#define EP93XX_GPIO_A_IRQ_BASE 64
+#define EP93XX_GPIO_B_IRQ_BASE 72
/*
* Static mapping of GPIO bank F IRQS:
* F0..F7 (16..24) to irq 80..87.
*/
#define EP93XX_GPIO_F_IRQ_BASE 80
-struct ep93xx_gpio {
- void __iomem *base;
- struct gpio_chip gc[8];
+struct ep93xx_gpio_irq_chip {
+ struct irq_chip ic;
+ u8 irq_offset;
+ u8 int_unmasked;
+ u8 int_enabled;
+ u8 int_type1;
+ u8 int_type2;
+ u8 int_debounce;
};
-/*************************************************************************
- * Interrupt handling for EP93xx on-chip GPIOs
- *************************************************************************/
-static unsigned char gpio_int_unmasked[3];
-static unsigned char gpio_int_enabled[3];
-static unsigned char gpio_int_type1[3];
-static unsigned char gpio_int_type2[3];
-static unsigned char gpio_int_debounce[3];
-
-/* Port ordering is: A B F */
-static const u8 int_type1_register_offset[3] = { 0x90, 0xac, 0x4c };
-static const u8 int_type2_register_offset[3] = { 0x94, 0xb0, 0x50 };
-static const u8 eoi_register_offset[3] = { 0x98, 0xb4, 0x54 };
-static const u8 int_en_register_offset[3] = { 0x9c, 0xb8, 0x58 };
-static const u8 int_debounce_register_offset[3] = { 0xa8, 0xc4, 0x64 };
-
-static void ep93xx_gpio_update_int_params(struct ep93xx_gpio *epg, unsigned port)
-{
- BUG_ON(port > 2);
+struct ep93xx_gpio_chip {
+ struct gpio_chip gc;
+ struct ep93xx_gpio_irq_chip *eic;
+};
- writeb_relaxed(0, epg->base + int_en_register_offset[port]);
+struct ep93xx_gpio {
+ void __iomem *base;
+ struct ep93xx_gpio_chip gc[EP93XX_GPIO_CHIP_NUM];
+};
- writeb_relaxed(gpio_int_type2[port],
- epg->base + int_type2_register_offset[port]);
+#define to_ep93xx_gpio_chip(x) container_of(x, struct ep93xx_gpio_chip, gc)
- writeb_relaxed(gpio_int_type1[port],
- epg->base + int_type1_register_offset[port]);
+static struct ep93xx_gpio_irq_chip *to_ep93xx_gpio_irq_chip(struct gpio_chip *gc)
+{
+ struct ep93xx_gpio_chip *egc = to_ep93xx_gpio_chip(gc);
- writeb(gpio_int_unmasked[port] & gpio_int_enabled[port],
- epg->base + int_en_register_offset[port]);
+ return egc->eic;
}
-static int ep93xx_gpio_port(struct gpio_chip *gc)
+/*************************************************************************
+ * Interrupt handling for EP93xx on-chip GPIOs
+ *************************************************************************/
+#define EP93XX_INT_TYPE1_OFFSET 0x00
+#define EP93XX_INT_TYPE2_OFFSET 0x04
+#define EP93XX_INT_EOI_OFFSET 0x08
+#define EP93XX_INT_EN_OFFSET 0x0c
+#define EP93XX_INT_STATUS_OFFSET 0x10
+#define EP93XX_INT_RAW_STATUS_OFFSET 0x14
+#define EP93XX_INT_DEBOUNCE_OFFSET 0x18
+
+static void ep93xx_gpio_update_int_params(struct ep93xx_gpio *epg,
+ struct ep93xx_gpio_irq_chip *eic)
{
- struct ep93xx_gpio *epg = gpiochip_get_data(gc);
- int port = 0;
+ writeb_relaxed(0, epg->base + eic->irq_offset + EP93XX_INT_EN_OFFSET);
- while (port < ARRAY_SIZE(epg->gc) && gc != &epg->gc[port])
- port++;
+ writeb_relaxed(eic->int_type2,
+ epg->base + eic->irq_offset + EP93XX_INT_TYPE2_OFFSET);
- /* This should not happen but is there as a last safeguard */
- if (port == ARRAY_SIZE(epg->gc)) {
- pr_crit("can't find the GPIO port\n");
- return 0;
- }
+ writeb_relaxed(eic->int_type1,
+ epg->base + eic->irq_offset + EP93XX_INT_TYPE1_OFFSET);
- return port;
+ writeb_relaxed(eic->int_unmasked & eic->int_enabled,
+ epg->base + eic->irq_offset + EP93XX_INT_EN_OFFSET);
}
static void ep93xx_gpio_int_debounce(struct gpio_chip *gc,
unsigned int offset, bool enable)
{
struct ep93xx_gpio *epg = gpiochip_get_data(gc);
- int port = ep93xx_gpio_port(gc);
+ struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc);
int port_mask = BIT(offset);
if (enable)
- gpio_int_debounce[port] |= port_mask;
+ eic->int_debounce |= port_mask;
else
- gpio_int_debounce[port] &= ~port_mask;
+ eic->int_debounce &= ~port_mask;
- writeb(gpio_int_debounce[port],
- epg->base + int_debounce_register_offset[port]);
+ writeb(eic->int_debounce,
+ epg->base + eic->irq_offset + EP93XX_INT_DEBOUNCE_OFFSET);
}
static void ep93xx_gpio_ab_irq_handler(struct irq_desc *desc)
@@ -122,12 +128,12 @@ static void ep93xx_gpio_ab_irq_handler(struct irq_desc *desc)
*/
stat = readb(epg->base + EP93XX_GPIO_A_INT_STATUS);
for_each_set_bit(offset, &stat, 8)
- generic_handle_irq(irq_find_mapping(epg->gc[0].irq.domain,
+ generic_handle_irq(irq_find_mapping(epg->gc[0].gc.irq.domain,
offset));
stat = readb(epg->base + EP93XX_GPIO_B_INT_STATUS);
for_each_set_bit(offset, &stat, 8)
- generic_handle_irq(irq_find_mapping(epg->gc[1].irq.domain,
+ generic_handle_irq(irq_find_mapping(epg->gc[1].gc.irq.domain,
offset));
chained_irq_exit(irqchip, desc);
@@ -153,52 +159,52 @@ static void ep93xx_gpio_f_irq_handler(struct irq_desc *desc)
static void ep93xx_gpio_irq_ack(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc);
struct ep93xx_gpio *epg = gpiochip_get_data(gc);
- int port = ep93xx_gpio_port(gc);
int port_mask = BIT(d->irq & 7);
if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) {
- gpio_int_type2[port] ^= port_mask; /* switch edge direction */
- ep93xx_gpio_update_int_params(epg, port);
+ eic->int_type2 ^= port_mask; /* switch edge direction */
+ ep93xx_gpio_update_int_params(epg, eic);
}
- writeb(port_mask, epg->base + eoi_register_offset[port]);
+ writeb(port_mask, epg->base + eic->irq_offset + EP93XX_INT_EOI_OFFSET);
}
static void ep93xx_gpio_irq_mask_ack(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc);
struct ep93xx_gpio *epg = gpiochip_get_data(gc);
- int port = ep93xx_gpio_port(gc);
int port_mask = BIT(d->irq & 7);
if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH)
- gpio_int_type2[port] ^= port_mask; /* switch edge direction */
+ eic->int_type2 ^= port_mask; /* switch edge direction */
- gpio_int_unmasked[port] &= ~port_mask;
- ep93xx_gpio_update_int_params(epg, port);
+ eic->int_unmasked &= ~port_mask;
+ ep93xx_gpio_update_int_params(epg, eic);
- writeb(port_mask, epg->base + eoi_register_offset[port]);
+ writeb(port_mask, epg->base + eic->irq_offset + EP93XX_INT_EOI_OFFSET);
}
static void ep93xx_gpio_irq_mask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc);
struct ep93xx_gpio *epg = gpiochip_get_data(gc);
- int port = ep93xx_gpio_port(gc);
- gpio_int_unmasked[port] &= ~BIT(d->irq & 7);
- ep93xx_gpio_update_int_params(epg, port);
+ eic->int_unmasked &= ~BIT(d->irq & 7);
+ ep93xx_gpio_update_int_params(epg, eic);
}
static void ep93xx_gpio_irq_unmask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc);
struct ep93xx_gpio *epg = gpiochip_get_data(gc);
- int port = ep93xx_gpio_port(gc);
- gpio_int_unmasked[port] |= BIT(d->irq & 7);
- ep93xx_gpio_update_int_params(epg, port);
+ eic->int_unmasked |= BIT(d->irq & 7);
+ ep93xx_gpio_update_int_params(epg, eic);
}
/*
@@ -209,8 +215,8 @@ static void ep93xx_gpio_irq_unmask(struct irq_data *d)
static int ep93xx_gpio_irq_type(struct irq_data *d, unsigned int type)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc);
struct ep93xx_gpio *epg = gpiochip_get_data(gc);
- int port = ep93xx_gpio_port(gc);
int offset = d->irq & 7;
int port_mask = BIT(offset);
irq_flow_handler_t handler;
@@ -219,32 +225,32 @@ static int ep93xx_gpio_irq_type(struct irq_data *d, unsigned int type)
switch (type) {
case IRQ_TYPE_EDGE_RISING:
- gpio_int_type1[port] |= port_mask;
- gpio_int_type2[port] |= port_mask;
+ eic->int_type1 |= port_mask;
+ eic->int_type2 |= port_mask;
handler = handle_edge_irq;
break;
case IRQ_TYPE_EDGE_FALLING:
- gpio_int_type1[port] |= port_mask;
- gpio_int_type2[port] &= ~port_mask;
+ eic->int_type1 |= port_mask;
+ eic->int_type2 &= ~port_mask;
handler = handle_edge_irq;
break;
case IRQ_TYPE_LEVEL_HIGH:
- gpio_int_type1[port] &= ~port_mask;
- gpio_int_type2[port] |= port_mask;
+ eic->int_type1 &= ~port_mask;
+ eic->int_type2 |= port_mask;
handler = handle_level_irq;
break;
case IRQ_TYPE_LEVEL_LOW:
- gpio_int_type1[port] &= ~port_mask;
- gpio_int_type2[port] &= ~port_mask;
+ eic->int_type1 &= ~port_mask;
+ eic->int_type2 &= ~port_mask;
handler = handle_level_irq;
break;
case IRQ_TYPE_EDGE_BOTH:
- gpio_int_type1[port] |= port_mask;
+ eic->int_type1 |= port_mask;
/* set initial polarity based on current input level */
if (gc->get(gc, offset))
- gpio_int_type2[port] &= ~port_mask; /* falling */
+ eic->int_type2 &= ~port_mask; /* falling */
else
- gpio_int_type2[port] |= port_mask; /* rising */
+ eic->int_type2 |= port_mask; /* rising */
handler = handle_edge_irq;
break;
default:
@@ -253,22 +259,13 @@ static int ep93xx_gpio_irq_type(struct irq_data *d, unsigned int type)
irq_set_handler_locked(d, handler);
- gpio_int_enabled[port] |= port_mask;
+ eic->int_enabled |= port_mask;
- ep93xx_gpio_update_int_params(epg, port);
+ ep93xx_gpio_update_int_params(epg, eic);
return 0;
}
-static struct irq_chip ep93xx_gpio_irq_chip = {
- .name = "GPIO",
- .irq_ack = ep93xx_gpio_irq_ack,
- .irq_mask_ack = ep93xx_gpio_irq_mask_ack,
- .irq_mask = ep93xx_gpio_irq_mask,
- .irq_unmask = ep93xx_gpio_irq_unmask,
- .irq_set_type = ep93xx_gpio_irq_type,
-};
-
/*************************************************************************
* gpiolib interface for EP93xx on-chip GPIOs
*************************************************************************/
@@ -276,17 +273,19 @@ struct ep93xx_gpio_bank {
const char *label;
int data;
int dir;
+ int irq;
int base;
bool has_irq;
bool has_hierarchical_irq;
unsigned int irq_base;
};
-#define EP93XX_GPIO_BANK(_label, _data, _dir, _base, _has_irq, _has_hier, _irq_base) \
+#define EP93XX_GPIO_BANK(_label, _data, _dir, _irq, _base, _has_irq, _has_hier, _irq_base) \
{ \
.label = _label, \
.data = _data, \
.dir = _dir, \
+ .irq = _irq, \
.base = _base, \
.has_irq = _has_irq, \
.has_hierarchical_irq = _has_hier, \
@@ -295,16 +294,16 @@ struct ep93xx_gpio_bank {
static struct ep93xx_gpio_bank ep93xx_gpio_banks[] = {
/* Bank A has 8 IRQs */
- EP93XX_GPIO_BANK("A", 0x00, 0x10, 0, true, false, 64),
+ EP93XX_GPIO_BANK("A", 0x00, 0x10, 0x90, 0, true, false, EP93XX_GPIO_A_IRQ_BASE),
/* Bank B has 8 IRQs */
- EP93XX_GPIO_BANK("B", 0x04, 0x14, 8, true, false, 72),
- EP93XX_GPIO_BANK("C", 0x08, 0x18, 40, false, false, 0),
- EP93XX_GPIO_BANK("D", 0x0c, 0x1c, 24, false, false, 0),
- EP93XX_GPIO_BANK("E", 0x20, 0x24, 32, false, false, 0),
+ EP93XX_GPIO_BANK("B", 0x04, 0x14, 0xac, 8, true, false, EP93XX_GPIO_B_IRQ_BASE),
+ EP93XX_GPIO_BANK("C", 0x08, 0x18, 0x00, 40, false, false, 0),
+ EP93XX_GPIO_BANK("D", 0x0c, 0x1c, 0x00, 24, false, false, 0),
+ EP93XX_GPIO_BANK("E", 0x20, 0x24, 0x00, 32, false, false, 0),
/* Bank F has 8 IRQs */
- EP93XX_GPIO_BANK("F", 0x30, 0x34, 16, false, true, 0),
- EP93XX_GPIO_BANK("G", 0x38, 0x3c, 48, false, false, 0),
- EP93XX_GPIO_BANK("H", 0x40, 0x44, 56, false, false, 0),
+ EP93XX_GPIO_BANK("F", 0x30, 0x34, 0x4c, 16, false, true, EP93XX_GPIO_F_IRQ_BASE),
+ EP93XX_GPIO_BANK("G", 0x38, 0x3c, 0x00, 48, false, false, 0),
+ EP93XX_GPIO_BANK("H", 0x40, 0x44, 0x00, 56, false, false, 0),
};
static int ep93xx_gpio_set_config(struct gpio_chip *gc, unsigned offset,
@@ -321,18 +320,23 @@ static int ep93xx_gpio_set_config(struct gpio_chip *gc, unsigned offset,
return 0;
}
-static int ep93xx_gpio_f_to_irq(struct gpio_chip *gc, unsigned offset)
+static void ep93xx_init_irq_chip(struct device *dev, struct irq_chip *ic)
{
- return EP93XX_GPIO_F_IRQ_BASE + offset;
+ ic->irq_ack = ep93xx_gpio_irq_ack;
+ ic->irq_mask_ack = ep93xx_gpio_irq_mask_ack;
+ ic->irq_mask = ep93xx_gpio_irq_mask;
+ ic->irq_unmask = ep93xx_gpio_irq_unmask;
+ ic->irq_set_type = ep93xx_gpio_irq_type;
}
-static int ep93xx_gpio_add_bank(struct gpio_chip *gc,
+static int ep93xx_gpio_add_bank(struct ep93xx_gpio_chip *egc,
struct platform_device *pdev,
struct ep93xx_gpio *epg,
struct ep93xx_gpio_bank *bank)
{
void __iomem *data = epg->base + bank->data;
void __iomem *dir = epg->base + bank->dir;
+ struct gpio_chip *gc = &egc->gc;
struct device *dev = &pdev->dev;
struct gpio_irq_chip *girq;
int err;
@@ -346,8 +350,21 @@ static int ep93xx_gpio_add_bank(struct gpio_chip *gc,
girq = &gc->irq;
if (bank->has_irq || bank->has_hierarchical_irq) {
+ struct irq_chip *ic;
+
gc->set_config = ep93xx_gpio_set_config;
- girq->chip = &ep93xx_gpio_irq_chip;
+ egc->eic = devm_kcalloc(dev, 1,
+ sizeof(*egc->eic),
+ GFP_KERNEL);
+ if (!egc->eic)
+ return -ENOMEM;
+ egc->eic->irq_offset = bank->irq;
+ ic = &egc->eic->ic;
+ ic->name = devm_kasprintf(dev, GFP_KERNEL, "gpio-irq-%s", bank->label);
+ if (!ic->name)
+ return -ENOMEM;
+ ep93xx_init_irq_chip(dev, ic);
+ girq->chip = ic;
}
if (bank->has_irq) {
@@ -355,7 +372,7 @@ static int ep93xx_gpio_add_bank(struct gpio_chip *gc,
girq->parent_handler = ep93xx_gpio_ab_irq_handler;
girq->num_parents = 1;
- girq->parents = devm_kcalloc(dev, 1,
+ girq->parents = devm_kcalloc(dev, girq->num_parents,
sizeof(*girq->parents),
GFP_KERNEL);
if (!girq->parents)
@@ -373,29 +390,28 @@ static int ep93xx_gpio_add_bank(struct gpio_chip *gc,
/*
* FIXME: convert this to use hierarchical IRQ support!
- * this requires fixing the root irqchip to be hierarchial.
+ * this requires fixing the root irqchip to be hierarchical.
*/
girq->parent_handler = ep93xx_gpio_f_irq_handler;
girq->num_parents = 8;
- girq->parents = devm_kcalloc(dev, 8,
+ girq->parents = devm_kcalloc(dev, girq->num_parents,
sizeof(*girq->parents),
GFP_KERNEL);
if (!girq->parents)
return -ENOMEM;
/* Pick resources 1..8 for these IRQs */
- for (i = 1; i <= 8; i++)
- girq->parents[i - 1] = platform_get_irq(pdev, i);
- for (i = 0; i < 8; i++) {
- gpio_irq = EP93XX_GPIO_F_IRQ_BASE + i;
+ for (i = 0; i < girq->num_parents; i++) {
+ girq->parents[i] = platform_get_irq(pdev, i + 1);
+ gpio_irq = bank->irq_base + i;
irq_set_chip_data(gpio_irq, &epg->gc[5]);
irq_set_chip_and_handler(gpio_irq,
- &ep93xx_gpio_irq_chip,
+ girq->chip,
handle_level_irq);
irq_clear_status_flags(gpio_irq, IRQ_NOREQUEST);
}
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_level_irq;
- gc->to_irq = ep93xx_gpio_f_to_irq;
+ girq->first = bank->irq_base;
}
return devm_gpiochip_add_data(dev, gc, epg);
@@ -415,7 +431,7 @@ static int ep93xx_gpio_probe(struct platform_device *pdev)
return PTR_ERR(epg->base);
for (i = 0; i < ARRAY_SIZE(ep93xx_gpio_banks); i++) {
- struct gpio_chip *gc = &epg->gc[i];
+ struct ep93xx_gpio_chip *gc = &epg->gc[i];
struct ep93xx_gpio_bank *bank = &ep93xx_gpio_banks[i];
if (ep93xx_gpio_add_bank(gc, pdev, epg, bank))
diff --git a/drivers/gpio/gpio-intel-mid.c b/drivers/gpio/gpio-intel-mid.c
deleted file mode 100644
index 86a10c808ef6..000000000000
--- a/drivers/gpio/gpio-intel-mid.c
+++ /dev/null
@@ -1,414 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Intel MID GPIO driver
- *
- * Copyright (c) 2008-2014,2016 Intel Corporation.
- */
-
-/* Supports:
- * Moorestown platform Langwell chip.
- * Medfield platform Penwell chip.
- * Clovertrail platform Cloverview chip.
- */
-
-#include <linux/delay.h>
-#include <linux/gpio/driver.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/slab.h>
-#include <linux/stddef.h>
-
-#define INTEL_MID_IRQ_TYPE_EDGE (1 << 0)
-#define INTEL_MID_IRQ_TYPE_LEVEL (1 << 1)
-
-/*
- * Langwell chip has 64 pins and thus there are 2 32bit registers to control
- * each feature, while Penwell chip has 96 pins for each block, and need 3 32bit
- * registers to control them, so we only define the order here instead of a
- * structure, to get a bit offset for a pin (use GPDR as an example):
- *
- * nreg = ngpio / 32;
- * reg = offset / 32;
- * bit = offset % 32;
- * reg_addr = reg_base + GPDR * nreg * 4 + reg * 4;
- *
- * so the bit of reg_addr is to control pin offset's GPDR feature
-*/
-
-enum GPIO_REG {
- GPLR = 0, /* pin level read-only */
- GPDR, /* pin direction */
- GPSR, /* pin set */
- GPCR, /* pin clear */
- GRER, /* rising edge detect */
- GFER, /* falling edge detect */
- GEDR, /* edge detect result */
- GAFR, /* alt function */
-};
-
-/* intel_mid gpio driver data */
-struct intel_mid_gpio_ddata {
- u16 ngpio; /* number of gpio pins */
- u32 chip_irq_type; /* chip interrupt type */
-};
-
-struct intel_mid_gpio {
- struct gpio_chip chip;
- void __iomem *reg_base;
- spinlock_t lock;
- struct pci_dev *pdev;
-};
-
-static void __iomem *gpio_reg(struct gpio_chip *chip, unsigned offset,
- enum GPIO_REG reg_type)
-{
- struct intel_mid_gpio *priv = gpiochip_get_data(chip);
- unsigned nreg = chip->ngpio / 32;
- u8 reg = offset / 32;
-
- return priv->reg_base + reg_type * nreg * 4 + reg * 4;
-}
-
-static void __iomem *gpio_reg_2bit(struct gpio_chip *chip, unsigned offset,
- enum GPIO_REG reg_type)
-{
- struct intel_mid_gpio *priv = gpiochip_get_data(chip);
- unsigned nreg = chip->ngpio / 32;
- u8 reg = offset / 16;
-
- return priv->reg_base + reg_type * nreg * 4 + reg * 4;
-}
-
-static int intel_gpio_request(struct gpio_chip *chip, unsigned offset)
-{
- void __iomem *gafr = gpio_reg_2bit(chip, offset, GAFR);
- u32 value = readl(gafr);
- int shift = (offset % 16) << 1, af = (value >> shift) & 3;
-
- if (af) {
- value &= ~(3 << shift);
- writel(value, gafr);
- }
- return 0;
-}
-
-static int intel_gpio_get(struct gpio_chip *chip, unsigned offset)
-{
- void __iomem *gplr = gpio_reg(chip, offset, GPLR);
-
- return !!(readl(gplr) & BIT(offset % 32));
-}
-
-static void intel_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
-{
- void __iomem *gpsr, *gpcr;
-
- if (value) {
- gpsr = gpio_reg(chip, offset, GPSR);
- writel(BIT(offset % 32), gpsr);
- } else {
- gpcr = gpio_reg(chip, offset, GPCR);
- writel(BIT(offset % 32), gpcr);
- }
-}
-
-static int intel_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
-{
- struct intel_mid_gpio *priv = gpiochip_get_data(chip);
- void __iomem *gpdr = gpio_reg(chip, offset, GPDR);
- u32 value;
- unsigned long flags;
-
- if (priv->pdev)
- pm_runtime_get(&priv->pdev->dev);
-
- spin_lock_irqsave(&priv->lock, flags);
- value = readl(gpdr);
- value &= ~BIT(offset % 32);
- writel(value, gpdr);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- if (priv->pdev)
- pm_runtime_put(&priv->pdev->dev);
-
- return 0;
-}
-
-static int intel_gpio_direction_output(struct gpio_chip *chip,
- unsigned offset, int value)
-{
- struct intel_mid_gpio *priv = gpiochip_get_data(chip);
- void __iomem *gpdr = gpio_reg(chip, offset, GPDR);
- unsigned long flags;
-
- intel_gpio_set(chip, offset, value);
-
- if (priv->pdev)
- pm_runtime_get(&priv->pdev->dev);
-
- spin_lock_irqsave(&priv->lock, flags);
- value = readl(gpdr);
- value |= BIT(offset % 32);
- writel(value, gpdr);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- if (priv->pdev)
- pm_runtime_put(&priv->pdev->dev);
-
- return 0;
-}
-
-static int intel_mid_irq_type(struct irq_data *d, unsigned type)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct intel_mid_gpio *priv = gpiochip_get_data(gc);
- u32 gpio = irqd_to_hwirq(d);
- unsigned long flags;
- u32 value;
- void __iomem *grer = gpio_reg(&priv->chip, gpio, GRER);
- void __iomem *gfer = gpio_reg(&priv->chip, gpio, GFER);
-
- if (gpio >= priv->chip.ngpio)
- return -EINVAL;
-
- if (priv->pdev)
- pm_runtime_get(&priv->pdev->dev);
-
- spin_lock_irqsave(&priv->lock, flags);
- if (type & IRQ_TYPE_EDGE_RISING)
- value = readl(grer) | BIT(gpio % 32);
- else
- value = readl(grer) & (~BIT(gpio % 32));
- writel(value, grer);
-
- if (type & IRQ_TYPE_EDGE_FALLING)
- value = readl(gfer) | BIT(gpio % 32);
- else
- value = readl(gfer) & (~BIT(gpio % 32));
- writel(value, gfer);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- if (priv->pdev)
- pm_runtime_put(&priv->pdev->dev);
-
- return 0;
-}
-
-static void intel_mid_irq_unmask(struct irq_data *d)
-{
-}
-
-static void intel_mid_irq_mask(struct irq_data *d)
-{
-}
-
-static struct irq_chip intel_mid_irqchip = {
- .name = "INTEL_MID-GPIO",
- .irq_mask = intel_mid_irq_mask,
- .irq_unmask = intel_mid_irq_unmask,
- .irq_set_type = intel_mid_irq_type,
-};
-
-static const struct intel_mid_gpio_ddata gpio_lincroft = {
- .ngpio = 64,
-};
-
-static const struct intel_mid_gpio_ddata gpio_penwell_aon = {
- .ngpio = 96,
- .chip_irq_type = INTEL_MID_IRQ_TYPE_EDGE,
-};
-
-static const struct intel_mid_gpio_ddata gpio_penwell_core = {
- .ngpio = 96,
- .chip_irq_type = INTEL_MID_IRQ_TYPE_EDGE,
-};
-
-static const struct intel_mid_gpio_ddata gpio_cloverview_aon = {
- .ngpio = 96,
- .chip_irq_type = INTEL_MID_IRQ_TYPE_EDGE | INTEL_MID_IRQ_TYPE_LEVEL,
-};
-
-static const struct intel_mid_gpio_ddata gpio_cloverview_core = {
- .ngpio = 96,
- .chip_irq_type = INTEL_MID_IRQ_TYPE_EDGE,
-};
-
-static const struct pci_device_id intel_gpio_ids[] = {
- {
- /* Lincroft */
- PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080f),
- .driver_data = (kernel_ulong_t)&gpio_lincroft,
- },
- {
- /* Penwell AON */
- PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081f),
- .driver_data = (kernel_ulong_t)&gpio_penwell_aon,
- },
- {
- /* Penwell Core */
- PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081a),
- .driver_data = (kernel_ulong_t)&gpio_penwell_core,
- },
- {
- /* Cloverview Aon */
- PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x08eb),
- .driver_data = (kernel_ulong_t)&gpio_cloverview_aon,
- },
- {
- /* Cloverview Core */
- PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x08f7),
- .driver_data = (kernel_ulong_t)&gpio_cloverview_core,
- },
- { }
-};
-
-static void intel_mid_irq_handler(struct irq_desc *desc)
-{
- struct gpio_chip *gc = irq_desc_get_handler_data(desc);
- struct intel_mid_gpio *priv = gpiochip_get_data(gc);
- struct irq_data *data = irq_desc_get_irq_data(desc);
- struct irq_chip *chip = irq_data_get_irq_chip(data);
- u32 base, gpio, mask;
- unsigned long pending;
- void __iomem *gedr;
-
- /* check GPIO controller to check which pin triggered the interrupt */
- for (base = 0; base < priv->chip.ngpio; base += 32) {
- gedr = gpio_reg(&priv->chip, base, GEDR);
- while ((pending = readl(gedr))) {
- gpio = __ffs(pending);
- mask = BIT(gpio);
- /* Clear before handling so we can't lose an edge */
- writel(mask, gedr);
- generic_handle_irq(irq_find_mapping(gc->irq.domain,
- base + gpio));
- }
- }
-
- chip->irq_eoi(data);
-}
-
-static int intel_mid_irq_init_hw(struct gpio_chip *chip)
-{
- struct intel_mid_gpio *priv = gpiochip_get_data(chip);
- void __iomem *reg;
- unsigned base;
-
- for (base = 0; base < priv->chip.ngpio; base += 32) {
- /* Clear the rising-edge detect register */
- reg = gpio_reg(&priv->chip, base, GRER);
- writel(0, reg);
- /* Clear the falling-edge detect register */
- reg = gpio_reg(&priv->chip, base, GFER);
- writel(0, reg);
- /* Clear the edge detect status register */
- reg = gpio_reg(&priv->chip, base, GEDR);
- writel(~0, reg);
- }
-
- return 0;
-}
-
-static int __maybe_unused intel_gpio_runtime_idle(struct device *dev)
-{
- int err = pm_schedule_suspend(dev, 500);
- return err ?: -EBUSY;
-}
-
-static const struct dev_pm_ops intel_gpio_pm_ops = {
- SET_RUNTIME_PM_OPS(NULL, NULL, intel_gpio_runtime_idle)
-};
-
-static int intel_gpio_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
-{
- void __iomem *base;
- struct intel_mid_gpio *priv;
- u32 gpio_base;
- u32 irq_base;
- int retval;
- struct gpio_irq_chip *girq;
- struct intel_mid_gpio_ddata *ddata =
- (struct intel_mid_gpio_ddata *)id->driver_data;
-
- retval = pcim_enable_device(pdev);
- if (retval)
- return retval;
-
- retval = pcim_iomap_regions(pdev, 1 << 0 | 1 << 1, pci_name(pdev));
- if (retval) {
- dev_err(&pdev->dev, "I/O memory mapping error\n");
- return retval;
- }
-
- base = pcim_iomap_table(pdev)[1];
-
- irq_base = readl(base);
- gpio_base = readl(sizeof(u32) + base);
-
- /* release the IO mapping, since we already get the info from bar1 */
- pcim_iounmap_regions(pdev, 1 << 1);
-
- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- priv->reg_base = pcim_iomap_table(pdev)[0];
- priv->chip.label = dev_name(&pdev->dev);
- priv->chip.parent = &pdev->dev;
- priv->chip.request = intel_gpio_request;
- priv->chip.direction_input = intel_gpio_direction_input;
- priv->chip.direction_output = intel_gpio_direction_output;
- priv->chip.get = intel_gpio_get;
- priv->chip.set = intel_gpio_set;
- priv->chip.base = gpio_base;
- priv->chip.ngpio = ddata->ngpio;
- priv->chip.can_sleep = false;
- priv->pdev = pdev;
-
- spin_lock_init(&priv->lock);
-
- girq = &priv->chip.irq;
- girq->chip = &intel_mid_irqchip;
- girq->init_hw = intel_mid_irq_init_hw;
- girq->parent_handler = intel_mid_irq_handler;
- girq->num_parents = 1;
- girq->parents = devm_kcalloc(&pdev->dev, girq->num_parents,
- sizeof(*girq->parents),
- GFP_KERNEL);
- if (!girq->parents)
- return -ENOMEM;
- girq->parents[0] = pdev->irq;
- girq->first = irq_base;
- girq->default_type = IRQ_TYPE_NONE;
- girq->handler = handle_simple_irq;
-
- pci_set_drvdata(pdev, priv);
-
- retval = devm_gpiochip_add_data(&pdev->dev, &priv->chip, priv);
- if (retval) {
- dev_err(&pdev->dev, "gpiochip_add error %d\n", retval);
- return retval;
- }
-
- pm_runtime_put_noidle(&pdev->dev);
- pm_runtime_allow(&pdev->dev);
-
- return 0;
-}
-
-static struct pci_driver intel_gpio_driver = {
- .name = "intel_mid_gpio",
- .id_table = intel_gpio_ids,
- .probe = intel_gpio_probe,
- .driver = {
- .pm = &intel_gpio_pm_ops,
- },
-};
-
-builtin_pci_driver(intel_gpio_driver);
diff --git a/drivers/gpio/gpio-max77620.c b/drivers/gpio/gpio-max77620.c
index 7c0a9ef0b500..82b3a913005d 100644
--- a/drivers/gpio/gpio-max77620.c
+++ b/drivers/gpio/gpio-max77620.c
@@ -325,7 +325,7 @@ static int max77620_gpio_probe(struct platform_device *pdev)
girq->parents = NULL;
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_edge_irq;
- girq->init_hw = max77620_gpio_irq_init_hw,
+ girq->init_hw = max77620_gpio_irq_init_hw;
girq->threaded = true;
platform_set_drvdata(pdev, mgpio);
diff --git a/drivers/gpio/gpio-merrifield.c b/drivers/gpio/gpio-merrifield.c
index 706687fab634..22f3ce218f5d 100644
--- a/drivers/gpio/gpio-merrifield.c
+++ b/drivers/gpio/gpio-merrifield.c
@@ -194,6 +194,11 @@ static int mrfld_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
{
u32 debounce;
+ if ((pinconf_to_config_param(config) == PIN_CONFIG_BIAS_DISABLE) ||
+ (pinconf_to_config_param(config) == PIN_CONFIG_BIAS_PULL_UP) ||
+ (pinconf_to_config_param(config) == PIN_CONFIG_BIAS_PULL_DOWN))
+ return gpiochip_generic_config(chip, offset, config);
+
if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE)
return -ENOTSUPP;
diff --git a/drivers/gpio/gpio-msic.c b/drivers/gpio/gpio-msic.c
deleted file mode 100644
index 7e3c96e4ab2c..000000000000
--- a/drivers/gpio/gpio-msic.c
+++ /dev/null
@@ -1,314 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Intel Medfield MSIC GPIO driver>
- * Copyright (c) 2011, Intel Corporation.
- *
- * Author: Mathias Nyman <mathias.nyman@linux.intel.com>
- * Based on intel_pmic_gpio.c
- */
-
-#include <linux/gpio/driver.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/mfd/intel_msic.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-
-/* the offset for the mapping of global gpio pin to irq */
-#define MSIC_GPIO_IRQ_OFFSET 0x100
-
-#define MSIC_GPIO_DIR_IN 0
-#define MSIC_GPIO_DIR_OUT BIT(5)
-#define MSIC_GPIO_TRIG_FALL BIT(1)
-#define MSIC_GPIO_TRIG_RISE BIT(2)
-
-/* masks for msic gpio output GPIOxxxxCTLO registers */
-#define MSIC_GPIO_DIR_MASK BIT(5)
-#define MSIC_GPIO_DRV_MASK BIT(4)
-#define MSIC_GPIO_REN_MASK BIT(3)
-#define MSIC_GPIO_RVAL_MASK (BIT(2) | BIT(1))
-#define MSIC_GPIO_DOUT_MASK BIT(0)
-
-/* masks for msic gpio input GPIOxxxxCTLI registers */
-#define MSIC_GPIO_GLBYP_MASK BIT(5)
-#define MSIC_GPIO_DBNC_MASK (BIT(4) | BIT(3))
-#define MSIC_GPIO_INTCNT_MASK (BIT(2) | BIT(1))
-#define MSIC_GPIO_DIN_MASK BIT(0)
-
-#define MSIC_NUM_GPIO 24
-
-struct msic_gpio {
- struct platform_device *pdev;
- struct mutex buslock;
- struct gpio_chip chip;
- int irq;
- unsigned irq_base;
- unsigned long trig_change_mask;
- unsigned trig_type;
-};
-
-/*
- * MSIC has 24 gpios, 16 low voltage (1.2-1.8v) and 8 high voltage (3v).
- * Both the high and low voltage gpios are divided in two banks.
- * GPIOs are numbered with GPIO0LV0 as gpio_base in the following order:
- * GPIO0LV0..GPIO0LV7: low voltage, bank 0, gpio_base
- * GPIO1LV0..GPIO1LV7: low voltage, bank 1, gpio_base + 8
- * GPIO0HV0..GPIO0HV3: high voltage, bank 0, gpio_base + 16
- * GPIO1HV0..GPIO1HV3: high voltage, bank 1, gpio_base + 20
- */
-
-static int msic_gpio_to_ireg(unsigned offset)
-{
- if (offset >= MSIC_NUM_GPIO)
- return -EINVAL;
-
- if (offset < 8)
- return INTEL_MSIC_GPIO0LV0CTLI - offset;
- if (offset < 16)
- return INTEL_MSIC_GPIO1LV0CTLI - offset + 8;
- if (offset < 20)
- return INTEL_MSIC_GPIO0HV0CTLI - offset + 16;
-
- return INTEL_MSIC_GPIO1HV0CTLI - offset + 20;
-}
-
-static int msic_gpio_to_oreg(unsigned offset)
-{
- if (offset >= MSIC_NUM_GPIO)
- return -EINVAL;
-
- if (offset < 8)
- return INTEL_MSIC_GPIO0LV0CTLO - offset;
- if (offset < 16)
- return INTEL_MSIC_GPIO1LV0CTLO - offset + 8;
- if (offset < 20)
- return INTEL_MSIC_GPIO0HV0CTLO - offset + 16;
-
- return INTEL_MSIC_GPIO1HV0CTLO - offset + 20;
-}
-
-static int msic_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
-{
- int reg;
-
- reg = msic_gpio_to_oreg(offset);
- if (reg < 0)
- return reg;
-
- return intel_msic_reg_update(reg, MSIC_GPIO_DIR_IN, MSIC_GPIO_DIR_MASK);
-}
-
-static int msic_gpio_direction_output(struct gpio_chip *chip,
- unsigned offset, int value)
-{
- int reg;
- unsigned mask;
-
- value = (!!value) | MSIC_GPIO_DIR_OUT;
- mask = MSIC_GPIO_DIR_MASK | MSIC_GPIO_DOUT_MASK;
-
- reg = msic_gpio_to_oreg(offset);
- if (reg < 0)
- return reg;
-
- return intel_msic_reg_update(reg, value, mask);
-}
-
-static int msic_gpio_get(struct gpio_chip *chip, unsigned offset)
-{
- u8 r;
- int ret;
- int reg;
-
- reg = msic_gpio_to_ireg(offset);
- if (reg < 0)
- return reg;
-
- ret = intel_msic_reg_read(reg, &r);
- if (ret < 0)
- return ret;
-
- return !!(r & MSIC_GPIO_DIN_MASK);
-}
-
-static void msic_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
-{
- int reg;
-
- reg = msic_gpio_to_oreg(offset);
- if (reg < 0)
- return;
-
- intel_msic_reg_update(reg, !!value , MSIC_GPIO_DOUT_MASK);
-}
-
-/*
- * This is called from genirq with mg->buslock locked and
- * irq_desc->lock held. We can not access the scu bus here, so we
- * store the change and update in the bus_sync_unlock() function below
- */
-static int msic_irq_type(struct irq_data *data, unsigned type)
-{
- struct msic_gpio *mg = irq_data_get_irq_chip_data(data);
- u32 gpio = data->irq - mg->irq_base;
-
- if (gpio >= mg->chip.ngpio)
- return -EINVAL;
-
- /* mark for which gpio the trigger changed, protected by buslock */
- mg->trig_change_mask |= (1 << gpio);
- mg->trig_type = type;
-
- return 0;
-}
-
-static int msic_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
-{
- struct msic_gpio *mg = gpiochip_get_data(chip);
- return mg->irq_base + offset;
-}
-
-static void msic_bus_lock(struct irq_data *data)
-{
- struct msic_gpio *mg = irq_data_get_irq_chip_data(data);
- mutex_lock(&mg->buslock);
-}
-
-static void msic_bus_sync_unlock(struct irq_data *data)
-{
- struct msic_gpio *mg = irq_data_get_irq_chip_data(data);
- int offset;
- int reg;
- u8 trig = 0;
-
- /* We can only get one change at a time as the buslock covers the
- entire transaction. The irq_desc->lock is dropped before we are
- called but that is fine */
- if (mg->trig_change_mask) {
- offset = __ffs(mg->trig_change_mask);
-
- reg = msic_gpio_to_ireg(offset);
- if (reg < 0)
- goto out;
-
- if (mg->trig_type & IRQ_TYPE_EDGE_RISING)
- trig |= MSIC_GPIO_TRIG_RISE;
- if (mg->trig_type & IRQ_TYPE_EDGE_FALLING)
- trig |= MSIC_GPIO_TRIG_FALL;
-
- intel_msic_reg_update(reg, trig, MSIC_GPIO_INTCNT_MASK);
- mg->trig_change_mask = 0;
- }
-out:
- mutex_unlock(&mg->buslock);
-}
-
-/* Firmware does all the masking and unmasking for us, no masking here. */
-static void msic_irq_unmask(struct irq_data *data) { }
-
-static void msic_irq_mask(struct irq_data *data) { }
-
-static struct irq_chip msic_irqchip = {
- .name = "MSIC-GPIO",
- .irq_mask = msic_irq_mask,
- .irq_unmask = msic_irq_unmask,
- .irq_set_type = msic_irq_type,
- .irq_bus_lock = msic_bus_lock,
- .irq_bus_sync_unlock = msic_bus_sync_unlock,
-};
-
-static void msic_gpio_irq_handler(struct irq_desc *desc)
-{
- struct irq_data *data = irq_desc_get_irq_data(desc);
- struct msic_gpio *mg = irq_data_get_irq_handler_data(data);
- struct irq_chip *chip = irq_data_get_irq_chip(data);
- struct intel_msic *msic = pdev_to_intel_msic(mg->pdev);
- unsigned long pending;
- int i;
- int bitnr;
- u8 pin;
-
- for (i = 0; i < (mg->chip.ngpio / BITS_PER_BYTE); i++) {
- intel_msic_irq_read(msic, INTEL_MSIC_GPIO0LVIRQ + i, &pin);
- pending = pin;
-
- for_each_set_bit(bitnr, &pending, BITS_PER_BYTE)
- generic_handle_irq(mg->irq_base + i * BITS_PER_BYTE + bitnr);
- }
- chip->irq_eoi(data);
-}
-
-static int platform_msic_gpio_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct intel_msic_gpio_pdata *pdata = dev_get_platdata(dev);
- struct msic_gpio *mg;
- int irq = platform_get_irq(pdev, 0);
- int retval;
- int i;
-
- if (irq < 0) {
- dev_err(dev, "no IRQ line: %d\n", irq);
- return irq;
- }
-
- if (!pdata || !pdata->gpio_base) {
- dev_err(dev, "incorrect or missing platform data\n");
- return -EINVAL;
- }
-
- mg = kzalloc(sizeof(*mg), GFP_KERNEL);
- if (!mg)
- return -ENOMEM;
-
- dev_set_drvdata(dev, mg);
-
- mg->pdev = pdev;
- mg->irq = irq;
- mg->irq_base = pdata->gpio_base + MSIC_GPIO_IRQ_OFFSET;
- mg->chip.label = "msic_gpio";
- mg->chip.direction_input = msic_gpio_direction_input;
- mg->chip.direction_output = msic_gpio_direction_output;
- mg->chip.get = msic_gpio_get;
- mg->chip.set = msic_gpio_set;
- mg->chip.to_irq = msic_gpio_to_irq;
- mg->chip.base = pdata->gpio_base;
- mg->chip.ngpio = MSIC_NUM_GPIO;
- mg->chip.can_sleep = true;
- mg->chip.parent = dev;
-
- mutex_init(&mg->buslock);
-
- retval = gpiochip_add_data(&mg->chip, mg);
- if (retval) {
- dev_err(dev, "Adding MSIC gpio chip failed\n");
- goto err;
- }
-
- for (i = 0; i < mg->chip.ngpio; i++) {
- irq_set_chip_data(i + mg->irq_base, mg);
- irq_set_chip_and_handler(i + mg->irq_base,
- &msic_irqchip,
- handle_simple_irq);
- }
- irq_set_chained_handler_and_data(mg->irq, msic_gpio_irq_handler, mg);
-
- return 0;
-err:
- kfree(mg);
- return retval;
-}
-
-static struct platform_driver platform_msic_gpio_driver = {
- .driver = {
- .name = "msic_gpio",
- },
- .probe = platform_msic_gpio_probe,
-};
-
-static int __init platform_msic_gpio_init(void)
-{
- return platform_driver_register(&platform_msic_gpio_driver);
-}
-subsys_initcall(platform_msic_gpio_init);
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index a912a8fed197..8f429d9f3661 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -70,7 +70,12 @@
*/
#define PWM_BLINK_ON_DURATION_OFF 0x0
#define PWM_BLINK_OFF_DURATION_OFF 0x4
+#define PWM_BLINK_COUNTER_B_OFF 0x8
+/* Armada 8k variant gpios register offsets */
+#define AP80X_GPIO0_OFF_A8K 0x1040
+#define CP11X_GPIO0_OFF_A8K 0x100
+#define CP11X_GPIO1_OFF_A8K 0x140
/* The MV78200 has per-CPU registers for edge mask and level mask */
#define GPIO_EDGE_MASK_MV78200_OFF(cpu) ((cpu) ? 0x30 : 0x18)
@@ -93,6 +98,7 @@
struct mvebu_pwm {
struct regmap *regs;
+ u32 offset;
unsigned long clk_rate;
struct gpio_desc *gpiod;
struct pwm_chip chip;
@@ -283,12 +289,12 @@ mvebu_gpio_write_level_mask(struct mvebu_gpio_chip *mvchip, u32 val)
*/
static unsigned int mvebu_pwmreg_blink_on_duration(struct mvebu_pwm *mvpwm)
{
- return PWM_BLINK_ON_DURATION_OFF;
+ return mvpwm->offset + PWM_BLINK_ON_DURATION_OFF;
}
static unsigned int mvebu_pwmreg_blink_off_duration(struct mvebu_pwm *mvpwm)
{
- return PWM_BLINK_OFF_DURATION_OFF;
+ return mvpwm->offset + PWM_BLINK_OFF_DURATION_OFF;
}
/*
@@ -667,26 +673,21 @@ static void mvebu_pwm_get_state(struct pwm_chip *chip,
spin_lock_irqsave(&mvpwm->lock, flags);
regmap_read(mvpwm->regs, mvebu_pwmreg_blink_on_duration(mvpwm), &u);
- val = (unsigned long long) u * NSEC_PER_SEC;
- do_div(val, mvpwm->clk_rate);
- if (val > UINT_MAX)
- state->duty_cycle = UINT_MAX;
- else if (val)
- state->duty_cycle = val;
+ /* Hardware treats zero as 2^32. See mvebu_pwm_apply(). */
+ if (u > 0)
+ val = u;
else
- state->duty_cycle = 1;
+ val = UINT_MAX + 1ULL;
+ state->duty_cycle = DIV_ROUND_UP_ULL(val * NSEC_PER_SEC,
+ mvpwm->clk_rate);
- val = (unsigned long long) u; /* on duration */
regmap_read(mvpwm->regs, mvebu_pwmreg_blink_off_duration(mvpwm), &u);
- val += (unsigned long long) u; /* period = on + off duration */
- val *= NSEC_PER_SEC;
- do_div(val, mvpwm->clk_rate);
- if (val > UINT_MAX)
- state->period = UINT_MAX;
- else if (val)
- state->period = val;
+ /* period = on + off duration */
+ if (u > 0)
+ val += u;
else
- state->period = 1;
+ val += UINT_MAX + 1ULL;
+ state->period = DIV_ROUND_UP_ULL(val * NSEC_PER_SEC, mvpwm->clk_rate);
regmap_read(mvchip->regs, GPIO_BLINK_EN_OFF + mvchip->offset, &u);
if (u)
@@ -708,19 +709,27 @@ static int mvebu_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
val = (unsigned long long) mvpwm->clk_rate * state->duty_cycle;
do_div(val, NSEC_PER_SEC);
- if (val > UINT_MAX)
+ if (val > UINT_MAX + 1ULL)
return -EINVAL;
- if (val)
+ /*
+ * Zero on/off values don't work as expected. Experimentation shows
+ * that zero value is treated as 2^32. This behavior is not documented.
+ */
+ if (val == UINT_MAX + 1ULL)
+ on = 0;
+ else if (val)
on = val;
else
on = 1;
- val = (unsigned long long) mvpwm->clk_rate *
- (state->period - state->duty_cycle);
+ val = (unsigned long long) mvpwm->clk_rate * state->period;
do_div(val, NSEC_PER_SEC);
- if (val > UINT_MAX)
+ val -= on;
+ if (val > UINT_MAX + 1ULL)
return -EINVAL;
- if (val)
+ if (val == UINT_MAX + 1ULL)
+ off = 0;
+ else if (val)
off = val;
else
off = 1;
@@ -778,51 +787,80 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
struct device *dev = &pdev->dev;
struct mvebu_pwm *mvpwm;
void __iomem *base;
+ u32 offset;
u32 set;
- if (!of_device_is_compatible(mvchip->chip.of_node,
- "marvell,armada-370-gpio"))
- return 0;
-
- /*
- * There are only two sets of PWM configuration registers for
- * all the GPIO lines on those SoCs which this driver reserves
- * for the first two GPIO chips. So if the resource is missing
- * we can't treat it as an error.
- */
- if (!platform_get_resource_byname(pdev, IORESOURCE_MEM, "pwm"))
+ if (of_device_is_compatible(mvchip->chip.of_node,
+ "marvell,armada-370-gpio")) {
+ /*
+ * There are only two sets of PWM configuration registers for
+ * all the GPIO lines on those SoCs which this driver reserves
+ * for the first two GPIO chips. So if the resource is missing
+ * we can't treat it as an error.
+ */
+ if (!platform_get_resource_byname(pdev, IORESOURCE_MEM, "pwm"))
+ return 0;
+ offset = 0;
+ } else if (mvchip->soc_variant == MVEBU_GPIO_SOC_VARIANT_A8K) {
+ int ret = of_property_read_u32(dev->of_node,
+ "marvell,pwm-offset", &offset);
+ if (ret < 0)
+ return 0;
+ } else {
return 0;
+ }
if (IS_ERR(mvchip->clk))
return PTR_ERR(mvchip->clk);
- /*
- * Use set A for lines of GPIO chip with id 0, B for GPIO chip
- * with id 1. Don't allow further GPIO chips to be used for PWM.
- */
- if (id == 0)
- set = 0;
- else if (id == 1)
- set = U32_MAX;
- else
- return -EINVAL;
- regmap_write(mvchip->regs,
- GPIO_BLINK_CNT_SELECT_OFF + mvchip->offset, set);
-
mvpwm = devm_kzalloc(dev, sizeof(struct mvebu_pwm), GFP_KERNEL);
if (!mvpwm)
return -ENOMEM;
mvchip->mvpwm = mvpwm;
mvpwm->mvchip = mvchip;
+ mvpwm->offset = offset;
+
+ if (mvchip->soc_variant == MVEBU_GPIO_SOC_VARIANT_A8K) {
+ mvpwm->regs = mvchip->regs;
+
+ switch (mvchip->offset) {
+ case AP80X_GPIO0_OFF_A8K:
+ case CP11X_GPIO0_OFF_A8K:
+ /* Blink counter A */
+ set = 0;
+ break;
+ case CP11X_GPIO1_OFF_A8K:
+ /* Blink counter B */
+ set = U32_MAX;
+ mvpwm->offset += PWM_BLINK_COUNTER_B_OFF;
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ base = devm_platform_ioremap_resource_byname(pdev, "pwm");
+ if (IS_ERR(base))
+ return PTR_ERR(base);
- base = devm_platform_ioremap_resource_byname(pdev, "pwm");
- if (IS_ERR(base))
- return PTR_ERR(base);
+ mvpwm->regs = devm_regmap_init_mmio(&pdev->dev, base,
+ &mvebu_gpio_regmap_config);
+ if (IS_ERR(mvpwm->regs))
+ return PTR_ERR(mvpwm->regs);
- mvpwm->regs = devm_regmap_init_mmio(&pdev->dev, base,
- &mvebu_gpio_regmap_config);
- if (IS_ERR(mvpwm->regs))
- return PTR_ERR(mvpwm->regs);
+ /*
+ * Use set A for lines of GPIO chip with id 0, B for GPIO chip
+ * with id 1. Don't allow further GPIO chips to be used for PWM.
+ */
+ if (id == 0)
+ set = 0;
+ else if (id == 1)
+ set = U32_MAX;
+ else
+ return -EINVAL;
+ }
+
+ regmap_write(mvchip->regs,
+ GPIO_BLINK_CNT_SELECT_OFF + mvchip->offset, set);
mvpwm->clk_rate = clk_get_rate(mvchip->clk);
if (!mvpwm->clk_rate) {
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 825b362eb4b7..5ea09fd01544 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -73,6 +73,7 @@
static const struct i2c_device_id pca953x_id[] = {
{ "pca6416", 16 | PCA953X_TYPE | PCA_INT, },
{ "pca9505", 40 | PCA953X_TYPE | PCA_INT, },
+ { "pca9506", 40 | PCA953X_TYPE | PCA_INT, },
{ "pca9534", 8 | PCA953X_TYPE | PCA_INT, },
{ "pca9535", 16 | PCA953X_TYPE | PCA_INT, },
{ "pca9536", 4 | PCA953X_TYPE, },
@@ -1236,6 +1237,7 @@ static int pca953x_resume(struct device *dev)
static const struct of_device_id pca953x_dt_ids[] = {
{ .compatible = "nxp,pca6416", .data = OF_953X(16, PCA_INT), },
{ .compatible = "nxp,pca9505", .data = OF_953X(40, PCA_INT), },
+ { .compatible = "nxp,pca9506", .data = OF_953X(40, PCA_INT), },
{ .compatible = "nxp,pca9534", .data = OF_953X( 8, PCA_INT), },
{ .compatible = "nxp,pca9535", .data = OF_953X(16, PCA_INT), },
{ .compatible = "nxp,pca9536", .data = OF_953X( 4, 0), },
diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c
index a2a8d155c75e..b7568ee33696 100644
--- a/drivers/gpio/gpio-pcf857x.c
+++ b/drivers/gpio/gpio-pcf857x.c
@@ -332,7 +332,7 @@ static int pcf857x_probe(struct i2c_client *client,
* reset state. Otherwise it flags pins to be driven low.
*/
gpio->out = ~n_latch;
- gpio->status = gpio->out;
+ gpio->status = gpio->read(gpio->client);
/* Enable irqchip if we have an interrupt */
if (client->irq) {
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index 0b572dbc4a36..e7092d5fe700 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -35,6 +35,8 @@ struct gpio_rcar_bank_info {
struct gpio_rcar_info {
bool has_outdtsel;
bool has_both_edge_trigger;
+ bool has_always_in;
+ bool has_inen;
};
struct gpio_rcar_priv {
@@ -62,6 +64,7 @@ struct gpio_rcar_priv {
#define FILONOFF 0x28 /* Chattering Prevention On/Off Register */
#define OUTDTSEL 0x40 /* Output Data Select Register */
#define BOTHEDGE 0x4c /* One Edge/Both Edge Select Register */
+#define INEN 0x50 /* General Input Enable Register */
#define RCAR_MAX_GPIO_PER_BANK 32
@@ -302,9 +305,11 @@ static int gpio_rcar_get(struct gpio_chip *chip, unsigned offset)
struct gpio_rcar_priv *p = gpiochip_get_data(chip);
u32 bit = BIT(offset);
- /* testing on r8a7790 shows that INDT does not show correct pin state
- * when configured as output, so use OUTDT in case of output pins */
- if (gpio_rcar_read(p, INOUTSEL) & bit)
+ /*
+ * Before R-Car Gen3, INDT does not show correct pin state when
+ * configured as output, so use OUTDT in case of output pins
+ */
+ if (!p->info.has_always_in && (gpio_rcar_read(p, INOUTSEL) & bit))
return !!(gpio_rcar_read(p, OUTDT) & bit);
else
return !!(gpio_rcar_read(p, INDT) & bit);
@@ -324,6 +329,11 @@ static int gpio_rcar_get_multiple(struct gpio_chip *chip, unsigned long *mask,
if (!bankmask)
return 0;
+ if (p->info.has_always_in) {
+ bits[0] = gpio_rcar_read(p, INDT) & bankmask;
+ return 0;
+ }
+
spin_lock_irqsave(&p->lock, flags);
outputs = gpio_rcar_read(p, INOUTSEL);
m = outputs & bankmask;
@@ -383,41 +393,35 @@ static int gpio_rcar_direction_output(struct gpio_chip *chip, unsigned offset,
static const struct gpio_rcar_info gpio_rcar_info_gen1 = {
.has_outdtsel = false,
.has_both_edge_trigger = false,
+ .has_always_in = false,
+ .has_inen = false,
};
static const struct gpio_rcar_info gpio_rcar_info_gen2 = {
.has_outdtsel = true,
.has_both_edge_trigger = true,
+ .has_always_in = false,
+ .has_inen = false,
+};
+
+static const struct gpio_rcar_info gpio_rcar_info_gen3 = {
+ .has_outdtsel = true,
+ .has_both_edge_trigger = true,
+ .has_always_in = true,
+ .has_inen = false,
+};
+
+static const struct gpio_rcar_info gpio_rcar_info_v3u = {
+ .has_outdtsel = true,
+ .has_both_edge_trigger = true,
+ .has_always_in = true,
+ .has_inen = true,
};
static const struct of_device_id gpio_rcar_of_table[] = {
{
- .compatible = "renesas,gpio-r8a7743",
- /* RZ/G1 GPIO is identical to R-Car Gen2. */
- .data = &gpio_rcar_info_gen2,
- }, {
- .compatible = "renesas,gpio-r8a7790",
- .data = &gpio_rcar_info_gen2,
- }, {
- .compatible = "renesas,gpio-r8a7791",
- .data = &gpio_rcar_info_gen2,
- }, {
- .compatible = "renesas,gpio-r8a7792",
- .data = &gpio_rcar_info_gen2,
- }, {
- .compatible = "renesas,gpio-r8a7793",
- .data = &gpio_rcar_info_gen2,
- }, {
- .compatible = "renesas,gpio-r8a7794",
- .data = &gpio_rcar_info_gen2,
- }, {
- .compatible = "renesas,gpio-r8a7795",
- /* Gen3 GPIO is identical to Gen2. */
- .data = &gpio_rcar_info_gen2,
- }, {
- .compatible = "renesas,gpio-r8a7796",
- /* Gen3 GPIO is identical to Gen2. */
- .data = &gpio_rcar_info_gen2,
+ .compatible = "renesas,gpio-r8a779a0",
+ .data = &gpio_rcar_info_v3u,
}, {
.compatible = "renesas,rcar-gen1-gpio",
.data = &gpio_rcar_info_gen1,
@@ -426,8 +430,7 @@ static const struct of_device_id gpio_rcar_of_table[] = {
.data = &gpio_rcar_info_gen2,
}, {
.compatible = "renesas,rcar-gen3-gpio",
- /* Gen3 GPIO is identical to Gen2. */
- .data = &gpio_rcar_info_gen2,
+ .data = &gpio_rcar_info_gen3,
}, {
.compatible = "renesas,gpio-rcar",
.data = &gpio_rcar_info_gen1,
@@ -460,6 +463,17 @@ static int gpio_rcar_parse_dt(struct gpio_rcar_priv *p, unsigned int *npins)
return 0;
}
+static void gpio_rcar_enable_inputs(struct gpio_rcar_priv *p)
+{
+ u32 mask = GENMASK(p->gpio_chip.ngpio - 1, 0);
+
+ /* Select "Input Enable" in INEN */
+ if (p->gpio_chip.valid_mask)
+ mask &= p->gpio_chip.valid_mask[0];
+ if (mask)
+ gpio_rcar_write(p, INEN, gpio_rcar_read(p, INEN) | mask);
+}
+
static int gpio_rcar_probe(struct platform_device *pdev)
{
struct gpio_rcar_priv *p;
@@ -549,6 +563,12 @@ static int gpio_rcar_probe(struct platform_device *pdev)
goto err1;
}
+ if (p->info.has_inen) {
+ pm_runtime_get_sync(p->dev);
+ gpio_rcar_enable_inputs(p);
+ pm_runtime_put(p->dev);
+ }
+
dev_info(dev, "driving %d GPIOs\n", npins);
return 0;
@@ -624,6 +644,9 @@ static int gpio_rcar_resume(struct device *dev)
}
}
+ if (p->info.has_inen)
+ gpio_rcar_enable_inputs(p);
+
return 0;
}
#endif /* CONFIG_PM_SLEEP*/
diff --git a/drivers/gpio/gpio-sl28cpld.c b/drivers/gpio/gpio-sl28cpld.c
index 889b8f5622c2..52404736ac86 100644
--- a/drivers/gpio/gpio-sl28cpld.c
+++ b/drivers/gpio/gpio-sl28cpld.c
@@ -65,13 +65,13 @@ static int sl28cpld_gpio_irq_init(struct platform_device *pdev,
if (!irq_chip)
return -ENOMEM;
- irq_chip->name = "sl28cpld-gpio-irq",
+ irq_chip->name = "sl28cpld-gpio-irq";
irq_chip->irqs = sl28cpld_gpio_irqs;
irq_chip->num_irqs = ARRAY_SIZE(sl28cpld_gpio_irqs);
irq_chip->num_regs = 1;
irq_chip->status_base = base + GPIO_REG_IP;
irq_chip->mask_base = base + GPIO_REG_IE;
- irq_chip->mask_invert = true,
+ irq_chip->mask_invert = true;
irq_chip->ack_base = base + GPIO_REG_IP;
ret = devm_regmap_add_irq_chip_fwnode(dev, dev_fwnode(dev),
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index e19ebff6018c..0025f613d9b3 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -60,7 +60,6 @@ struct tegra_gpio_info;
struct tegra_gpio_bank {
unsigned int bank;
- unsigned int irq;
/*
* IRQ-core code uses raw locking, and thus, nested locking also
@@ -81,7 +80,6 @@ struct tegra_gpio_bank {
u32 dbc_enb[4];
#endif
u32 dbc_cnt[4];
- struct tegra_gpio_info *tgi;
};
struct tegra_gpio_soc_config {
@@ -93,12 +91,12 @@ struct tegra_gpio_soc_config {
struct tegra_gpio_info {
struct device *dev;
void __iomem *regs;
- struct irq_domain *irq_domain;
struct tegra_gpio_bank *bank_info;
const struct tegra_gpio_soc_config *soc;
struct gpio_chip gc;
struct irq_chip ic;
u32 bank_count;
+ unsigned int *irqs;
};
static inline void tegra_gpio_writel(struct tegra_gpio_info *tgi,
@@ -274,17 +272,10 @@ static int tegra_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
return tegra_gpio_set_debounce(chip, offset, debounce);
}
-static int tegra_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
-{
- struct tegra_gpio_info *tgi = gpiochip_get_data(chip);
-
- return irq_find_mapping(tgi->irq_domain, offset);
-}
-
static void tegra_gpio_irq_ack(struct irq_data *d)
{
- struct tegra_gpio_bank *bank = irq_data_get_irq_chip_data(d);
- struct tegra_gpio_info *tgi = bank->tgi;
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
+ struct tegra_gpio_info *tgi = gpiochip_get_data(chip);
unsigned int gpio = d->hwirq;
tegra_gpio_writel(tgi, 1 << GPIO_BIT(gpio), GPIO_INT_CLR(tgi, gpio));
@@ -292,8 +283,8 @@ static void tegra_gpio_irq_ack(struct irq_data *d)
static void tegra_gpio_irq_mask(struct irq_data *d)
{
- struct tegra_gpio_bank *bank = irq_data_get_irq_chip_data(d);
- struct tegra_gpio_info *tgi = bank->tgi;
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
+ struct tegra_gpio_info *tgi = gpiochip_get_data(chip);
unsigned int gpio = d->hwirq;
tegra_gpio_mask_write(tgi, GPIO_MSK_INT_ENB(tgi, gpio), gpio, 0);
@@ -301,8 +292,8 @@ static void tegra_gpio_irq_mask(struct irq_data *d)
static void tegra_gpio_irq_unmask(struct irq_data *d)
{
- struct tegra_gpio_bank *bank = irq_data_get_irq_chip_data(d);
- struct tegra_gpio_info *tgi = bank->tgi;
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
+ struct tegra_gpio_info *tgi = gpiochip_get_data(chip);
unsigned int gpio = d->hwirq;
tegra_gpio_mask_write(tgi, GPIO_MSK_INT_ENB(tgi, gpio), gpio, 1);
@@ -311,11 +302,14 @@ static void tegra_gpio_irq_unmask(struct irq_data *d)
static int tegra_gpio_irq_set_type(struct irq_data *d, unsigned int type)
{
unsigned int gpio = d->hwirq, port = GPIO_PORT(gpio), lvl_type;
- struct tegra_gpio_bank *bank = irq_data_get_irq_chip_data(d);
- struct tegra_gpio_info *tgi = bank->tgi;
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
+ struct tegra_gpio_info *tgi = gpiochip_get_data(chip);
+ struct tegra_gpio_bank *bank;
unsigned long flags;
- u32 val;
int ret;
+ u32 val;
+
+ bank = &tgi->bank_info[GPIO_BANK(d->hwirq)];
switch (type & IRQ_TYPE_SENSE_MASK) {
case IRQ_TYPE_EDGE_RISING:
@@ -367,13 +361,16 @@ static int tegra_gpio_irq_set_type(struct irq_data *d, unsigned int type)
else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
irq_set_handler_locked(d, handle_edge_irq);
- return 0;
+ if (d->parent_data)
+ ret = irq_chip_set_type_parent(d, type);
+
+ return ret;
}
static void tegra_gpio_irq_shutdown(struct irq_data *d)
{
- struct tegra_gpio_bank *bank = irq_data_get_irq_chip_data(d);
- struct tegra_gpio_info *tgi = bank->tgi;
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
+ struct tegra_gpio_info *tgi = gpiochip_get_data(chip);
unsigned int gpio = d->hwirq;
tegra_gpio_irq_mask(d);
@@ -382,13 +379,25 @@ static void tegra_gpio_irq_shutdown(struct irq_data *d)
static void tegra_gpio_irq_handler(struct irq_desc *desc)
{
- unsigned int port, pin, gpio;
+ struct tegra_gpio_info *tgi = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct irq_domain *domain = tgi->gc.irq.domain;
+ unsigned int irq = irq_desc_get_irq(desc);
+ struct tegra_gpio_bank *bank = NULL;
+ unsigned int port, pin, gpio, i;
bool unmasked = false;
- u32 lvl;
unsigned long sta;
- struct irq_chip *chip = irq_desc_get_chip(desc);
- struct tegra_gpio_bank *bank = irq_desc_get_handler_data(desc);
- struct tegra_gpio_info *tgi = bank->tgi;
+ u32 lvl;
+
+ for (i = 0; i < tgi->bank_count; i++) {
+ if (tgi->irqs[i] == irq) {
+ bank = &tgi->bank_info[i];
+ break;
+ }
+ }
+
+ if (WARN_ON(bank == NULL))
+ return;
chained_irq_enter(chip, desc);
@@ -411,14 +420,47 @@ static void tegra_gpio_irq_handler(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
- generic_handle_irq(irq_find_mapping(tgi->irq_domain,
- gpio + pin));
+ irq = irq_find_mapping(domain, gpio + pin);
+ if (WARN_ON(irq == 0))
+ continue;
+
+ generic_handle_irq(irq);
}
}
if (!unmasked)
chained_irq_exit(chip, desc);
+}
+
+static int tegra_gpio_child_to_parent_hwirq(struct gpio_chip *chip,
+ unsigned int hwirq,
+ unsigned int type,
+ unsigned int *parent_hwirq,
+ unsigned int *parent_type)
+{
+ *parent_hwirq = chip->irq.child_offset_to_irq(chip, hwirq);
+ *parent_type = type;
+
+ return 0;
+}
+
+static void *tegra_gpio_populate_parent_fwspec(struct gpio_chip *chip,
+ unsigned int parent_hwirq,
+ unsigned int parent_type)
+{
+ struct irq_fwspec *fwspec;
+
+ fwspec = kmalloc(sizeof(*fwspec), GFP_KERNEL);
+ if (!fwspec)
+ return NULL;
+ fwspec->fwnode = chip->irq.parent_domain->fwnode;
+ fwspec->param_count = 3;
+ fwspec->param[0] = 0;
+ fwspec->param[1] = parent_hwirq;
+ fwspec->param[2] = parent_type;
+
+ return fwspec;
}
#ifdef CONFIG_PM_SLEEP
@@ -497,19 +539,31 @@ static int tegra_gpio_suspend(struct device *dev)
static int tegra_gpio_irq_set_wake(struct irq_data *d, unsigned int enable)
{
- struct tegra_gpio_bank *bank = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
+ struct tegra_gpio_info *tgi = gpiochip_get_data(chip);
+ struct tegra_gpio_bank *bank;
unsigned int gpio = d->hwirq;
u32 port, bit, mask;
int err;
- err = irq_set_irq_wake(bank->irq, enable);
- if (err)
- return err;
+ bank = &tgi->bank_info[GPIO_BANK(d->hwirq)];
port = GPIO_PORT(gpio);
bit = GPIO_BIT(gpio);
mask = BIT(bit);
+ err = irq_set_irq_wake(tgi->irqs[bank->bank], enable);
+ if (err)
+ return err;
+
+ if (d->parent_data) {
+ err = irq_chip_set_wake_parent(d, enable);
+ if (err) {
+ irq_set_irq_wake(tgi->irqs[bank->bank], !enable);
+ return err;
+ }
+ }
+
if (enable)
bank->wake_enb[port] |= mask;
else
@@ -519,6 +573,35 @@ static int tegra_gpio_irq_set_wake(struct irq_data *d, unsigned int enable)
}
#endif
+static int tegra_gpio_irq_set_affinity(struct irq_data *data,
+ const struct cpumask *dest,
+ bool force)
+{
+ if (data->parent_data)
+ return irq_chip_set_affinity_parent(data, dest, force);
+
+ return -EINVAL;
+}
+
+static int tegra_gpio_irq_request_resources(struct irq_data *d)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
+ struct tegra_gpio_info *tgi = gpiochip_get_data(chip);
+
+ tegra_gpio_enable(tgi, d->hwirq);
+
+ return gpiochip_reqres_irq(chip, d->hwirq);
+}
+
+static void tegra_gpio_irq_release_resources(struct irq_data *d)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
+ struct tegra_gpio_info *tgi = gpiochip_get_data(chip);
+
+ gpiochip_relres_irq(chip, d->hwirq);
+ tegra_gpio_enable(tgi, d->hwirq);
+}
+
#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
@@ -526,7 +609,7 @@ static int tegra_gpio_irq_set_wake(struct irq_data *d, unsigned int enable)
static int tegra_dbg_gpio_show(struct seq_file *s, void *unused)
{
- struct tegra_gpio_info *tgi = s->private;
+ struct tegra_gpio_info *tgi = dev_get_drvdata(s->private);
unsigned int i, j;
for (i = 0; i < tgi->bank_count; i++) {
@@ -548,12 +631,10 @@ static int tegra_dbg_gpio_show(struct seq_file *s, void *unused)
return 0;
}
-DEFINE_SHOW_ATTRIBUTE(tegra_dbg_gpio);
-
static void tegra_gpio_debuginit(struct tegra_gpio_info *tgi)
{
- debugfs_create_file("tegra_gpio", 0444, NULL, tgi,
- &tegra_dbg_gpio_fops);
+ debugfs_create_devm_seqfile(tgi->dev, "tegra_gpio", NULL,
+ tegra_dbg_gpio_show);
}
#else
@@ -568,14 +649,18 @@ static const struct dev_pm_ops tegra_gpio_pm_ops = {
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(tegra_gpio_suspend, tegra_gpio_resume)
};
-static struct lock_class_key gpio_lock_class;
-static struct lock_class_key gpio_request_class;
+static const struct of_device_id tegra_pmc_of_match[] = {
+ { .compatible = "nvidia,tegra210-pmc", },
+ { /* sentinel */ },
+};
static int tegra_gpio_probe(struct platform_device *pdev)
{
- struct tegra_gpio_info *tgi;
struct tegra_gpio_bank *bank;
- unsigned int gpio, i, j;
+ struct tegra_gpio_info *tgi;
+ struct gpio_irq_chip *irq;
+ struct device_node *np;
+ unsigned int i, j;
int ret;
tgi = devm_kzalloc(&pdev->dev, sizeof(*tgi), GFP_KERNEL);
@@ -604,7 +689,6 @@ static int tegra_gpio_probe(struct platform_device *pdev)
tgi->gc.direction_output = tegra_gpio_direction_output;
tgi->gc.set = tegra_gpio_set;
tgi->gc.get_direction = tegra_gpio_get_direction;
- tgi->gc.to_irq = tegra_gpio_to_irq;
tgi->gc.base = 0;
tgi->gc.ngpio = tgi->bank_count * 32;
tgi->gc.parent = &pdev->dev;
@@ -619,6 +703,8 @@ static int tegra_gpio_probe(struct platform_device *pdev)
#ifdef CONFIG_PM_SLEEP
tgi->ic.irq_set_wake = tegra_gpio_irq_set_wake;
#endif
+ tgi->ic.irq_request_resources = tegra_gpio_irq_request_resources;
+ tgi->ic.irq_release_resources = tegra_gpio_irq_release_resources;
platform_set_drvdata(pdev, tgi);
@@ -630,11 +716,10 @@ static int tegra_gpio_probe(struct platform_device *pdev)
if (!tgi->bank_info)
return -ENOMEM;
- tgi->irq_domain = irq_domain_add_linear(pdev->dev.of_node,
- tgi->gc.ngpio,
- &irq_domain_simple_ops, NULL);
- if (!tgi->irq_domain)
- return -ENODEV;
+ tgi->irqs = devm_kcalloc(&pdev->dev, tgi->bank_count,
+ sizeof(*tgi->irqs), GFP_KERNEL);
+ if (!tgi->irqs)
+ return -ENOMEM;
for (i = 0; i < tgi->bank_count; i++) {
ret = platform_get_irq(pdev, i);
@@ -643,8 +728,36 @@ static int tegra_gpio_probe(struct platform_device *pdev)
bank = &tgi->bank_info[i];
bank->bank = i;
- bank->irq = ret;
- bank->tgi = tgi;
+
+ tgi->irqs[i] = ret;
+
+ for (j = 0; j < 4; j++) {
+ raw_spin_lock_init(&bank->lvl_lock[j]);
+ spin_lock_init(&bank->dbc_lock[j]);
+ }
+ }
+
+ irq = &tgi->gc.irq;
+ irq->chip = &tgi->ic;
+ irq->fwnode = of_node_to_fwnode(pdev->dev.of_node);
+ irq->child_to_parent_hwirq = tegra_gpio_child_to_parent_hwirq;
+ irq->populate_parent_alloc_arg = tegra_gpio_populate_parent_fwspec;
+ irq->handler = handle_simple_irq;
+ irq->default_type = IRQ_TYPE_NONE;
+ irq->parent_handler = tegra_gpio_irq_handler;
+ irq->parent_handler_data = tgi;
+ irq->num_parents = tgi->bank_count;
+ irq->parents = tgi->irqs;
+
+ np = of_find_matching_node(NULL, tegra_pmc_of_match);
+ if (np) {
+ irq->parent_domain = irq_find_host(np);
+ of_node_put(np);
+
+ if (!irq->parent_domain)
+ return -EPROBE_DEFER;
+
+ tgi->ic.irq_set_affinity = tegra_gpio_irq_set_affinity;
}
tgi->regs = devm_platform_ioremap_resource(pdev, 0);
@@ -660,33 +773,8 @@ static int tegra_gpio_probe(struct platform_device *pdev)
}
ret = devm_gpiochip_add_data(&pdev->dev, &tgi->gc, tgi);
- if (ret < 0) {
- irq_domain_remove(tgi->irq_domain);
+ if (ret < 0)
return ret;
- }
-
- for (gpio = 0; gpio < tgi->gc.ngpio; gpio++) {
- int irq = irq_create_mapping(tgi->irq_domain, gpio);
- /* No validity check; all Tegra GPIOs are valid IRQs */
-
- bank = &tgi->bank_info[GPIO_BANK(gpio)];
-
- irq_set_chip_data(irq, bank);
- irq_set_lockdep_class(irq, &gpio_lock_class, &gpio_request_class);
- irq_set_chip_and_handler(irq, &tgi->ic, handle_simple_irq);
- }
-
- for (i = 0; i < tgi->bank_count; i++) {
- bank = &tgi->bank_info[i];
-
- irq_set_chained_handler_and_data(bank->irq,
- tegra_gpio_irq_handler, bank);
-
- for (j = 0; j < 4; j++) {
- raw_spin_lock_init(&bank->lvl_lock[j]);
- spin_lock_init(&bank->dbc_lock[j]);
- }
- }
tegra_gpio_debuginit(tgi);
@@ -715,18 +803,21 @@ static const struct of_device_id tegra_gpio_of_match[] = {
{ .compatible = "nvidia,tegra20-gpio", .data = &tegra20_gpio_config },
{ },
};
+MODULE_DEVICE_TABLE(of, tegra_gpio_of_match);
static struct platform_driver tegra_gpio_driver = {
- .driver = {
- .name = "tegra-gpio",
- .pm = &tegra_gpio_pm_ops,
+ .driver = {
+ .name = "tegra-gpio",
+ .pm = &tegra_gpio_pm_ops,
.of_match_table = tegra_gpio_of_match,
},
- .probe = tegra_gpio_probe,
+ .probe = tegra_gpio_probe,
};
-
-static int __init tegra_gpio_init(void)
-{
- return platform_driver_register(&tegra_gpio_driver);
-}
-subsys_initcall(tegra_gpio_init);
+module_platform_driver(tegra_gpio_driver);
+
+MODULE_DESCRIPTION("NVIDIA Tegra GPIO controller driver");
+MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
+MODULE_AUTHOR("Stephen Warren <swarren@nvidia.com>");
+MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
+MODULE_AUTHOR("Erik Gilling <konkers@google.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c
index 286e0b1f46e4..1bd9e44df718 100644
--- a/drivers/gpio/gpio-tegra186.c
+++ b/drivers/gpio/gpio-tegra186.c
@@ -657,7 +657,7 @@ static int tegra186_gpio_probe(struct platform_device *pdev)
gpio->gpio.get_direction = tegra186_gpio_get_direction;
gpio->gpio.direction_input = tegra186_gpio_direction_input;
gpio->gpio.direction_output = tegra186_gpio_direction_output;
- gpio->gpio.get = tegra186_gpio_get,
+ gpio->gpio.get = tegra186_gpio_get;
gpio->gpio.set = tegra186_gpio_set;
gpio->gpio.set_config = tegra186_gpio_set_config;
gpio->gpio.add_pin_ranges = tegra186_gpio_add_pin_ranges;
diff --git a/drivers/gpio/gpio-visconti.c b/drivers/gpio/gpio-visconti.c
new file mode 100644
index 000000000000..0e3d19828eb1
--- /dev/null
+++ b/drivers/gpio/gpio-visconti.c
@@ -0,0 +1,218 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Toshiba Visconti GPIO Support
+ *
+ * (C) Copyright 2020 Toshiba Electronic Devices & Storage Corporation
+ * (C) Copyright 2020 TOSHIBA CORPORATION
+ *
+ * Nobuhiro Iwamatsu <nobuhiro1.iwamatsu@toshiba.co.jp>
+ */
+
+#include <linux/gpio/driver.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/bitops.h>
+
+/* register offset */
+#define GPIO_DIR 0x00
+#define GPIO_IDATA 0x08
+#define GPIO_ODATA 0x10
+#define GPIO_OSET 0x18
+#define GPIO_OCLR 0x20
+#define GPIO_INTMODE 0x30
+
+#define BASE_HW_IRQ 24
+
+struct visconti_gpio {
+ void __iomem *base;
+ spinlock_t lock; /* protect gpio register */
+ struct gpio_chip gpio_chip;
+ struct irq_chip irq_chip;
+};
+
+static int visconti_gpio_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct visconti_gpio *priv = gpiochip_get_data(gc);
+ u32 offset = irqd_to_hwirq(d);
+ u32 bit = BIT(offset);
+ u32 intc_type = IRQ_TYPE_EDGE_RISING;
+ u32 intmode, odata;
+ int ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ odata = readl(priv->base + GPIO_ODATA);
+ intmode = readl(priv->base + GPIO_INTMODE);
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ odata &= ~bit;
+ intmode &= ~bit;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ odata |= bit;
+ intmode &= ~bit;
+ break;
+ case IRQ_TYPE_EDGE_BOTH:
+ intmode |= bit;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ intc_type = IRQ_TYPE_LEVEL_HIGH;
+ odata &= ~bit;
+ intmode &= ~bit;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ intc_type = IRQ_TYPE_LEVEL_HIGH;
+ odata |= bit;
+ intmode &= ~bit;
+ break;
+ default:
+ ret = -EINVAL;
+ goto err;
+ }
+
+ writel(odata, priv->base + GPIO_ODATA);
+ writel(intmode, priv->base + GPIO_INTMODE);
+ irq_set_irq_type(offset, intc_type);
+
+ ret = irq_chip_set_type_parent(d, type);
+err:
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return ret;
+}
+
+static int visconti_gpio_child_to_parent_hwirq(struct gpio_chip *gc,
+ unsigned int child,
+ unsigned int child_type,
+ unsigned int *parent,
+ unsigned int *parent_type)
+{
+ /* Interrupts 0..15 mapped to interrupts 24..39 on the GIC */
+ if (child < 16) {
+ /* All these interrupts are level high in the CPU */
+ *parent_type = IRQ_TYPE_LEVEL_HIGH;
+ *parent = child + BASE_HW_IRQ;
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static void *visconti_gpio_populate_parent_fwspec(struct gpio_chip *chip,
+ unsigned int parent_hwirq,
+ unsigned int parent_type)
+{
+ struct irq_fwspec *fwspec;
+
+ fwspec = kmalloc(sizeof(*fwspec), GFP_KERNEL);
+ if (!fwspec)
+ return NULL;
+
+ fwspec->fwnode = chip->irq.parent_domain->fwnode;
+ fwspec->param_count = 3;
+ fwspec->param[0] = 0;
+ fwspec->param[1] = parent_hwirq;
+ fwspec->param[2] = parent_type;
+
+ return fwspec;
+}
+
+static int visconti_gpio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct visconti_gpio *priv;
+ struct irq_chip *irq_chip;
+ struct gpio_irq_chip *girq;
+ struct irq_domain *parent;
+ struct device_node *irq_parent;
+ struct fwnode_handle *fwnode;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ spin_lock_init(&priv->lock);
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ irq_parent = of_irq_find_parent(dev->of_node);
+ if (!irq_parent) {
+ dev_err(dev, "No IRQ parent node\n");
+ return -ENODEV;
+ }
+
+ parent = irq_find_host(irq_parent);
+ if (!parent) {
+ dev_err(dev, "No IRQ parent domain\n");
+ return -ENODEV;
+ }
+
+ fwnode = of_node_to_fwnode(irq_parent);
+ of_node_put(irq_parent);
+
+ ret = bgpio_init(&priv->gpio_chip, dev, 4,
+ priv->base + GPIO_IDATA,
+ priv->base + GPIO_OSET,
+ priv->base + GPIO_OCLR,
+ priv->base + GPIO_DIR,
+ NULL,
+ 0);
+ if (ret) {
+ dev_err(dev, "unable to init generic GPIO\n");
+ return ret;
+ }
+
+ irq_chip = &priv->irq_chip;
+ irq_chip->name = dev_name(dev);
+ irq_chip->irq_mask = irq_chip_mask_parent;
+ irq_chip->irq_unmask = irq_chip_unmask_parent;
+ irq_chip->irq_eoi = irq_chip_eoi_parent;
+ irq_chip->irq_set_type = visconti_gpio_irq_set_type;
+ irq_chip->flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_MASK_ON_SUSPEND;
+
+ girq = &priv->gpio_chip.irq;
+ girq->chip = irq_chip;
+ girq->fwnode = fwnode;
+ girq->parent_domain = parent;
+ girq->child_to_parent_hwirq = visconti_gpio_child_to_parent_hwirq;
+ girq->populate_parent_alloc_arg = visconti_gpio_populate_parent_fwspec;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_level_irq;
+
+ ret = devm_gpiochip_add_data(dev, &priv->gpio_chip, priv);
+ if (ret) {
+ dev_err(dev, "failed to add GPIO chip\n");
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, priv);
+
+ return ret;
+}
+
+static const struct of_device_id visconti_gpio_of_match[] = {
+ { .compatible = "toshiba,gpio-tmpv7708", },
+ { /* end of table */ }
+};
+MODULE_DEVICE_TABLE(of, visconti_gpio_of_match);
+
+static struct platform_driver visconti_gpio_driver = {
+ .probe = visconti_gpio_probe,
+ .driver = {
+ .name = "visconti_gpio",
+ .of_match_table = of_match_ptr(visconti_gpio_of_match),
+ }
+};
+module_platform_driver(visconti_gpio_driver);
+
+MODULE_AUTHOR("Nobuhiro Iwamatsu <nobuhiro1.iwamatsu@toshiba.co.jp>");
+MODULE_DESCRIPTION("Toshiba Visconti GPIO Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-vx855.c b/drivers/gpio/gpio-vx855.c
index 3bf397b8dfbc..69713fd5485b 100644
--- a/drivers/gpio/gpio-vx855.c
+++ b/drivers/gpio/gpio-vx855.c
@@ -216,7 +216,7 @@ static void vx855gpio_gpio_setup(struct vx855_gpio *vg)
c->direction_output = vx855gpio_direction_output;
c->get = vx855gpio_get;
c->set = vx855gpio_set;
- c->set_config = vx855gpio_set_config,
+ c->set_config = vx855gpio_set_config;
c->dbg_show = NULL;
c->base = 0;
c->ngpio = NR_VX855_GP;
diff --git a/drivers/gpio/gpio-wcove.c b/drivers/gpio/gpio-wcove.c
index b5fbba5a783a..a19eeef6cf1e 100644
--- a/drivers/gpio/gpio-wcove.c
+++ b/drivers/gpio/gpio-wcove.c
@@ -73,6 +73,8 @@
enum ctrl_register {
CTRL_IN,
CTRL_OUT,
+ IRQ_STATUS,
+ IRQ_MASK,
};
/*
@@ -112,22 +114,29 @@ static inline int to_reg(int gpio, enum ctrl_register reg_type)
return reg;
}
-static void wcove_update_irq_mask(struct wcove_gpio *wg, int gpio)
+static inline int to_ireg(int gpio, enum ctrl_register type, unsigned int *mask)
{
- unsigned int reg, mask;
+ unsigned int reg = type == IRQ_STATUS ? IRQ_STATUS_BASE : IRQ_MASK_BASE;
if (gpio < GROUP0_NR_IRQS) {
- reg = IRQ_MASK_BASE;
- mask = BIT(gpio % GROUP0_NR_IRQS);
+ reg += 0;
+ *mask = BIT(gpio);
} else {
- reg = IRQ_MASK_BASE + 1;
- mask = BIT((gpio - GROUP0_NR_IRQS) % GROUP1_NR_IRQS);
+ reg += 1;
+ *mask = BIT(gpio - GROUP0_NR_IRQS);
}
+ return reg;
+}
+
+static void wcove_update_irq_mask(struct wcove_gpio *wg, int gpio)
+{
+ unsigned int mask, reg = to_ireg(gpio, IRQ_MASK, &mask);
+
if (wg->set_irq_mask)
- regmap_update_bits(wg->regmap, reg, mask, mask);
+ regmap_set_bits(wg->regmap, reg, mask);
else
- regmap_update_bits(wg->regmap, reg, mask, 0);
+ regmap_clear_bits(wg->regmap, reg, mask);
}
static void wcove_update_irq_ctrl(struct wcove_gpio *wg, int gpio)
@@ -207,9 +216,9 @@ static void wcove_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value)
return;
if (value)
- regmap_update_bits(wg->regmap, reg, 1, 1);
+ regmap_set_bits(wg->regmap, reg, 1);
else
- regmap_update_bits(wg->regmap, reg, 1, 0);
+ regmap_clear_bits(wg->regmap, reg, 1);
}
static int wcove_gpio_set_config(struct gpio_chip *chip, unsigned int gpio,
@@ -324,7 +333,8 @@ static struct irq_chip wcove_irqchip = {
static irqreturn_t wcove_gpio_irq_handler(int irq, void *data)
{
struct wcove_gpio *wg = (struct wcove_gpio *)data;
- unsigned int pending, virq, gpio, mask, offset;
+ unsigned int virq, gpio;
+ unsigned long pending;
u8 p[2];
if (regmap_bulk_read(wg->regmap, IRQ_STATUS_BASE, p, 2)) {
@@ -339,15 +349,12 @@ static irqreturn_t wcove_gpio_irq_handler(int irq, void *data)
/* Iterate until no interrupt is pending */
while (pending) {
/* One iteration is for all pending bits */
- for_each_set_bit(gpio, (const unsigned long *)&pending,
- WCOVE_GPIO_NUM) {
- offset = (gpio > GROUP0_NR_IRQS) ? 1 : 0;
- mask = (offset == 1) ? BIT(gpio - GROUP0_NR_IRQS) :
- BIT(gpio);
+ for_each_set_bit(gpio, &pending, WCOVE_GPIO_NUM) {
+ unsigned int mask, reg = to_ireg(gpio, IRQ_STATUS, &mask);
+
virq = irq_find_mapping(wg->chip.irq.domain, gpio);
handle_nested_irq(virq);
- regmap_update_bits(wg->regmap, IRQ_STATUS_BASE + offset,
- mask, mask);
+ regmap_set_bits(wg->regmap, reg, mask);
}
/* Next iteration */
@@ -367,30 +374,26 @@ static void wcove_gpio_dbg_show(struct seq_file *s,
{
unsigned int ctlo, ctli, irq_mask, irq_status;
struct wcove_gpio *wg = gpiochip_get_data(chip);
- int gpio, offset, group, ret = 0;
+ int gpio, mask, ret = 0;
for (gpio = 0; gpio < WCOVE_GPIO_NUM; gpio++) {
- group = gpio < GROUP0_NR_IRQS ? 0 : 1;
ret += regmap_read(wg->regmap, to_reg(gpio, CTRL_OUT), &ctlo);
ret += regmap_read(wg->regmap, to_reg(gpio, CTRL_IN), &ctli);
- ret += regmap_read(wg->regmap, IRQ_MASK_BASE + group,
- &irq_mask);
- ret += regmap_read(wg->regmap, IRQ_STATUS_BASE + group,
- &irq_status);
+ ret += regmap_read(wg->regmap, to_ireg(gpio, IRQ_MASK, &mask), &irq_mask);
+ ret += regmap_read(wg->regmap, to_ireg(gpio, IRQ_STATUS, &mask), &irq_status);
if (ret) {
pr_err("Failed to read registers: ctrl out/in or irq status/mask\n");
break;
}
- offset = gpio % 8;
seq_printf(s, " gpio-%-2d %s %s %s %s ctlo=%2x,%s %s\n",
gpio, ctlo & CTLO_DIR_OUT ? "out" : "in ",
ctli & 0x1 ? "hi" : "lo",
ctli & CTLI_INTCNT_NE ? "fall" : " ",
ctli & CTLI_INTCNT_PE ? "rise" : " ",
ctlo,
- irq_mask & BIT(offset) ? "mask " : "unmask",
- irq_status & BIT(offset) ? "pending" : " ");
+ irq_mask & mask ? "mask " : "unmask",
+ irq_status & mask ? "pending" : " ");
}
}
@@ -434,7 +437,7 @@ static int wcove_gpio_probe(struct platform_device *pdev)
wg->chip.get_direction = wcove_gpio_get_direction;
wg->chip.get = wcove_gpio_get;
wg->chip.set = wcove_gpio_set;
- wg->chip.set_config = wcove_gpio_set_config,
+ wg->chip.set_config = wcove_gpio_set_config;
wg->chip.base = -1;
wg->chip.ngpio = WCOVE_VGPIO_NUM;
wg->chip.can_sleep = true;
@@ -473,14 +476,12 @@ static int wcove_gpio_probe(struct platform_device *pdev)
}
/* Enable GPIO0 interrupts */
- ret = regmap_update_bits(wg->regmap, IRQ_MASK_BASE, GPIO_IRQ0_MASK,
- 0x00);
+ ret = regmap_clear_bits(wg->regmap, IRQ_MASK_BASE + 0, GPIO_IRQ0_MASK);
if (ret)
return ret;
/* Enable GPIO1 interrupts */
- ret = regmap_update_bits(wg->regmap, IRQ_MASK_BASE + 1, GPIO_IRQ1_MASK,
- 0x00);
+ ret = regmap_clear_bits(wg->regmap, IRQ_MASK_BASE + 1, GPIO_IRQ1_MASK);
if (ret)
return ret;
diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c
index be539381fd82..b411d3156e0b 100644
--- a/drivers/gpio/gpio-xilinx.c
+++ b/drivers/gpio/gpio-xilinx.c
@@ -10,10 +10,13 @@
#include <linux/errno.h>
#include <linux/gpio/driver.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/irq.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
/* Register Offset Definitions */
@@ -22,6 +25,11 @@
#define XGPIO_CHANNEL_OFFSET 0x8
+#define XGPIO_GIER_OFFSET 0x11c /* Global Interrupt Enable */
+#define XGPIO_GIER_IE BIT(31)
+#define XGPIO_IPISR_OFFSET 0x120 /* IP Interrupt Status */
+#define XGPIO_IPIER_OFFSET 0x128 /* IP Interrupt Enable */
+
/* Read/Write access to the GPIO registers */
#if defined(CONFIG_ARCH_ZYNQ) || defined(CONFIG_X86)
# define xgpio_readreg(offset) readl(offset)
@@ -36,9 +44,15 @@
* @gc: GPIO chip
* @regs: register block
* @gpio_width: GPIO width for every channel
- * @gpio_state: GPIO state shadow register
+ * @gpio_state: GPIO write state shadow register
+ * @gpio_last_irq_read: GPIO read state register from last interrupt
* @gpio_dir: GPIO direction shadow register
* @gpio_lock: Lock used for synchronization
+ * @irq: IRQ used by GPIO device
+ * @irqchip: IRQ chip
+ * @irq_enable: GPIO IRQ enable/disable bitfield
+ * @irq_rising_edge: GPIO IRQ rising edge enable/disable bitfield
+ * @irq_falling_edge: GPIO IRQ falling edge enable/disable bitfield
* @clk: clock resource for this driver
*/
struct xgpio_instance {
@@ -46,8 +60,14 @@ struct xgpio_instance {
void __iomem *regs;
unsigned int gpio_width[2];
u32 gpio_state[2];
+ u32 gpio_last_irq_read[2];
u32 gpio_dir[2];
- spinlock_t gpio_lock[2];
+ spinlock_t gpio_lock; /* For serializing operations */
+ int irq;
+ struct irq_chip irqchip;
+ u32 irq_enable[2];
+ u32 irq_rising_edge[2];
+ u32 irq_falling_edge[2];
struct clk *clk;
};
@@ -113,7 +133,7 @@ static void xgpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
int index = xgpio_index(chip, gpio);
int offset = xgpio_offset(chip, gpio);
- spin_lock_irqsave(&chip->gpio_lock[index], flags);
+ spin_lock_irqsave(&chip->gpio_lock, flags);
/* Write to GPIO signal and set its direction to output */
if (val)
@@ -124,7 +144,7 @@ static void xgpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
xgpio_writereg(chip->regs + XGPIO_DATA_OFFSET +
xgpio_regoffset(chip, gpio), chip->gpio_state[index]);
- spin_unlock_irqrestore(&chip->gpio_lock[index], flags);
+ spin_unlock_irqrestore(&chip->gpio_lock, flags);
}
/**
@@ -144,7 +164,7 @@ static void xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
int index = xgpio_index(chip, 0);
int offset, i;
- spin_lock_irqsave(&chip->gpio_lock[index], flags);
+ spin_lock_irqsave(&chip->gpio_lock, flags);
/* Write to GPIO signals */
for (i = 0; i < gc->ngpio; i++) {
@@ -155,9 +175,9 @@ static void xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
xgpio_writereg(chip->regs + XGPIO_DATA_OFFSET +
index * XGPIO_CHANNEL_OFFSET,
chip->gpio_state[index]);
- spin_unlock_irqrestore(&chip->gpio_lock[index], flags);
+ spin_unlock_irqrestore(&chip->gpio_lock, flags);
index = xgpio_index(chip, i);
- spin_lock_irqsave(&chip->gpio_lock[index], flags);
+ spin_lock_irqsave(&chip->gpio_lock, flags);
}
if (__test_and_clear_bit(i, mask)) {
offset = xgpio_offset(chip, i);
@@ -171,7 +191,7 @@ static void xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
xgpio_writereg(chip->regs + XGPIO_DATA_OFFSET +
index * XGPIO_CHANNEL_OFFSET, chip->gpio_state[index]);
- spin_unlock_irqrestore(&chip->gpio_lock[index], flags);
+ spin_unlock_irqrestore(&chip->gpio_lock, flags);
}
/**
@@ -190,14 +210,14 @@ static int xgpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
int index = xgpio_index(chip, gpio);
int offset = xgpio_offset(chip, gpio);
- spin_lock_irqsave(&chip->gpio_lock[index], flags);
+ spin_lock_irqsave(&chip->gpio_lock, flags);
/* Set the GPIO bit in shadow register and set direction as input */
chip->gpio_dir[index] |= BIT(offset);
xgpio_writereg(chip->regs + XGPIO_TRI_OFFSET +
xgpio_regoffset(chip, gpio), chip->gpio_dir[index]);
- spin_unlock_irqrestore(&chip->gpio_lock[index], flags);
+ spin_unlock_irqrestore(&chip->gpio_lock, flags);
return 0;
}
@@ -221,7 +241,7 @@ static int xgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
int index = xgpio_index(chip, gpio);
int offset = xgpio_offset(chip, gpio);
- spin_lock_irqsave(&chip->gpio_lock[index], flags);
+ spin_lock_irqsave(&chip->gpio_lock, flags);
/* Write state of GPIO signal */
if (val)
@@ -236,7 +256,7 @@ static int xgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
xgpio_writereg(chip->regs + XGPIO_TRI_OFFSET +
xgpio_regoffset(chip, gpio), chip->gpio_dir[index]);
- spin_unlock_irqrestore(&chip->gpio_lock[index], flags);
+ spin_unlock_irqrestore(&chip->gpio_lock, flags);
return 0;
}
@@ -259,6 +279,39 @@ static void xgpio_save_regs(struct xgpio_instance *chip)
chip->gpio_dir[1]);
}
+static int xgpio_request(struct gpio_chip *chip, unsigned int offset)
+{
+ int ret;
+
+ ret = pm_runtime_get_sync(chip->parent);
+ /*
+ * If the device is already active pm_runtime_get() will return 1 on
+ * success, but gpio_request still needs to return 0.
+ */
+ return ret < 0 ? ret : 0;
+}
+
+static void xgpio_free(struct gpio_chip *chip, unsigned int offset)
+{
+ pm_runtime_put(chip->parent);
+}
+
+static int __maybe_unused xgpio_suspend(struct device *dev)
+{
+ struct xgpio_instance *gpio = dev_get_drvdata(dev);
+ struct irq_data *data = irq_get_irq_data(gpio->irq);
+
+ if (!data) {
+ dev_err(dev, "irq_get_irq_data() failed\n");
+ return -EINVAL;
+ }
+
+ if (!irqd_is_wakeup_set(data))
+ return pm_runtime_force_suspend(dev);
+
+ return 0;
+}
+
/**
* xgpio_remove - Remove method for the GPIO device.
* @pdev: pointer to the platform device
@@ -271,12 +324,224 @@ static int xgpio_remove(struct platform_device *pdev)
{
struct xgpio_instance *gpio = platform_get_drvdata(pdev);
+ pm_runtime_get_sync(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
clk_disable_unprepare(gpio->clk);
return 0;
}
/**
+ * xgpio_irq_ack - Acknowledge a child GPIO interrupt.
+ * @irq_data: per IRQ and chip data passed down to chip functions
+ * This currently does nothing, but irq_ack is unconditionally called by
+ * handle_edge_irq and therefore must be defined.
+ */
+static void xgpio_irq_ack(struct irq_data *irq_data)
+{
+}
+
+static int __maybe_unused xgpio_resume(struct device *dev)
+{
+ struct xgpio_instance *gpio = dev_get_drvdata(dev);
+ struct irq_data *data = irq_get_irq_data(gpio->irq);
+
+ if (!data) {
+ dev_err(dev, "irq_get_irq_data() failed\n");
+ return -EINVAL;
+ }
+
+ if (!irqd_is_wakeup_set(data))
+ return pm_runtime_force_resume(dev);
+
+ return 0;
+}
+
+static int __maybe_unused xgpio_runtime_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct xgpio_instance *gpio = platform_get_drvdata(pdev);
+
+ clk_disable(gpio->clk);
+
+ return 0;
+}
+
+static int __maybe_unused xgpio_runtime_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct xgpio_instance *gpio = platform_get_drvdata(pdev);
+
+ return clk_enable(gpio->clk);
+}
+
+static const struct dev_pm_ops xgpio_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(xgpio_suspend, xgpio_resume)
+ SET_RUNTIME_PM_OPS(xgpio_runtime_suspend,
+ xgpio_runtime_resume, NULL)
+};
+
+/**
+ * xgpio_irq_mask - Write the specified signal of the GPIO device.
+ * @irq_data: per IRQ and chip data passed down to chip functions
+ */
+static void xgpio_irq_mask(struct irq_data *irq_data)
+{
+ unsigned long flags;
+ struct xgpio_instance *chip = irq_data_get_irq_chip_data(irq_data);
+ int irq_offset = irqd_to_hwirq(irq_data);
+ int index = xgpio_index(chip, irq_offset);
+ int offset = xgpio_offset(chip, irq_offset);
+
+ spin_lock_irqsave(&chip->gpio_lock, flags);
+
+ chip->irq_enable[index] &= ~BIT(offset);
+
+ if (!chip->irq_enable[index]) {
+ /* Disable per channel interrupt */
+ u32 temp = xgpio_readreg(chip->regs + XGPIO_IPIER_OFFSET);
+
+ temp &= ~BIT(index);
+ xgpio_writereg(chip->regs + XGPIO_IPIER_OFFSET, temp);
+ }
+ spin_unlock_irqrestore(&chip->gpio_lock, flags);
+}
+
+/**
+ * xgpio_irq_unmask - Write the specified signal of the GPIO device.
+ * @irq_data: per IRQ and chip data passed down to chip functions
+ */
+static void xgpio_irq_unmask(struct irq_data *irq_data)
+{
+ unsigned long flags;
+ struct xgpio_instance *chip = irq_data_get_irq_chip_data(irq_data);
+ int irq_offset = irqd_to_hwirq(irq_data);
+ int index = xgpio_index(chip, irq_offset);
+ int offset = xgpio_offset(chip, irq_offset);
+ u32 old_enable = chip->irq_enable[index];
+
+ spin_lock_irqsave(&chip->gpio_lock, flags);
+
+ chip->irq_enable[index] |= BIT(offset);
+
+ if (!old_enable) {
+ /* Clear any existing per-channel interrupts */
+ u32 val = xgpio_readreg(chip->regs + XGPIO_IPISR_OFFSET) &
+ BIT(index);
+
+ if (val)
+ xgpio_writereg(chip->regs + XGPIO_IPISR_OFFSET, val);
+
+ /* Update GPIO IRQ read data before enabling interrupt*/
+ val = xgpio_readreg(chip->regs + XGPIO_DATA_OFFSET +
+ index * XGPIO_CHANNEL_OFFSET);
+ chip->gpio_last_irq_read[index] = val;
+
+ /* Enable per channel interrupt */
+ val = xgpio_readreg(chip->regs + XGPIO_IPIER_OFFSET);
+ val |= BIT(index);
+ xgpio_writereg(chip->regs + XGPIO_IPIER_OFFSET, val);
+ }
+
+ spin_unlock_irqrestore(&chip->gpio_lock, flags);
+}
+
+/**
+ * xgpio_set_irq_type - Write the specified signal of the GPIO device.
+ * @irq_data: Per IRQ and chip data passed down to chip functions
+ * @type: Interrupt type that is to be set for the gpio pin
+ *
+ * Return:
+ * 0 if interrupt type is supported otherwise -EINVAL
+ */
+static int xgpio_set_irq_type(struct irq_data *irq_data, unsigned int type)
+{
+ struct xgpio_instance *chip = irq_data_get_irq_chip_data(irq_data);
+ int irq_offset = irqd_to_hwirq(irq_data);
+ int index = xgpio_index(chip, irq_offset);
+ int offset = xgpio_offset(chip, irq_offset);
+
+ /*
+ * The Xilinx GPIO hardware provides a single interrupt status
+ * indication for any state change in a given GPIO channel (bank).
+ * Therefore, only rising edge or falling edge triggers are
+ * supported.
+ */
+ switch (type & IRQ_TYPE_SENSE_MASK) {
+ case IRQ_TYPE_EDGE_BOTH:
+ chip->irq_rising_edge[index] |= BIT(offset);
+ chip->irq_falling_edge[index] |= BIT(offset);
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ chip->irq_rising_edge[index] |= BIT(offset);
+ chip->irq_falling_edge[index] &= ~BIT(offset);
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ chip->irq_rising_edge[index] &= ~BIT(offset);
+ chip->irq_falling_edge[index] |= BIT(offset);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ irq_set_handler_locked(irq_data, handle_edge_irq);
+ return 0;
+}
+
+/**
+ * xgpio_irqhandler - Gpio interrupt service routine
+ * @desc: Pointer to interrupt description
+ */
+static void xgpio_irqhandler(struct irq_desc *desc)
+{
+ struct xgpio_instance *chip = irq_desc_get_handler_data(desc);
+ struct irq_chip *irqchip = irq_desc_get_chip(desc);
+ u32 num_channels = chip->gpio_width[1] ? 2 : 1;
+ u32 offset = 0, index;
+ u32 status = xgpio_readreg(chip->regs + XGPIO_IPISR_OFFSET);
+
+ xgpio_writereg(chip->regs + XGPIO_IPISR_OFFSET, status);
+
+ chained_irq_enter(irqchip, desc);
+ for (index = 0; index < num_channels; index++) {
+ if ((status & BIT(index))) {
+ unsigned long rising_events, falling_events, all_events;
+ unsigned long flags;
+ u32 data, bit;
+ unsigned int irq;
+
+ spin_lock_irqsave(&chip->gpio_lock, flags);
+ data = xgpio_readreg(chip->regs + XGPIO_DATA_OFFSET +
+ index * XGPIO_CHANNEL_OFFSET);
+ rising_events = data &
+ ~chip->gpio_last_irq_read[index] &
+ chip->irq_enable[index] &
+ chip->irq_rising_edge[index];
+ falling_events = ~data &
+ chip->gpio_last_irq_read[index] &
+ chip->irq_enable[index] &
+ chip->irq_falling_edge[index];
+ dev_dbg(chip->gc.parent,
+ "IRQ chan %u rising 0x%lx falling 0x%lx\n",
+ index, rising_events, falling_events);
+ all_events = rising_events | falling_events;
+ chip->gpio_last_irq_read[index] = data;
+ spin_unlock_irqrestore(&chip->gpio_lock, flags);
+
+ for_each_set_bit(bit, &all_events, 32) {
+ irq = irq_find_mapping(chip->gc.irq.domain,
+ offset + bit);
+ generic_handle_irq(irq);
+ }
+ }
+ offset += chip->gpio_width[index];
+ }
+
+ chained_irq_exit(irqchip, desc);
+}
+
+/**
* xgpio_of_probe - Probe method for the GPIO device.
* @pdev: pointer to the platform device
*
@@ -289,7 +554,10 @@ static int xgpio_probe(struct platform_device *pdev)
struct xgpio_instance *chip;
int status = 0;
struct device_node *np = pdev->dev.of_node;
- u32 is_dual;
+ u32 is_dual = 0;
+ u32 cells = 2;
+ struct gpio_irq_chip *girq;
+ u32 temp;
chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
@@ -305,6 +573,15 @@ static int xgpio_probe(struct platform_device *pdev)
if (of_property_read_u32(np, "xlnx,tri-default", &chip->gpio_dir[0]))
chip->gpio_dir[0] = 0xFFFFFFFF;
+ /* Update cells with gpio-cells value */
+ if (of_property_read_u32(np, "#gpio-cells", &cells))
+ dev_dbg(&pdev->dev, "Missing gpio-cells property\n");
+
+ if (cells != 2) {
+ dev_err(&pdev->dev, "#gpio-cells mismatch\n");
+ return -EINVAL;
+ }
+
/*
* Check device node and parent device node for device width
* and assume default width of 32
@@ -312,7 +589,10 @@ static int xgpio_probe(struct platform_device *pdev)
if (of_property_read_u32(np, "xlnx,gpio-width", &chip->gpio_width[0]))
chip->gpio_width[0] = 32;
- spin_lock_init(&chip->gpio_lock[0]);
+ if (chip->gpio_width[0] > 32)
+ return -EINVAL;
+
+ spin_lock_init(&chip->gpio_lock);
if (of_property_read_u32(np, "xlnx,is-dual", &is_dual))
is_dual = 0;
@@ -336,7 +616,8 @@ static int xgpio_probe(struct platform_device *pdev)
&chip->gpio_width[1]))
chip->gpio_width[1] = 32;
- spin_lock_init(&chip->gpio_lock[1]);
+ if (chip->gpio_width[1] > 32)
+ return -EINVAL;
}
chip->gc.base = -1;
@@ -344,8 +625,11 @@ static int xgpio_probe(struct platform_device *pdev)
chip->gc.parent = &pdev->dev;
chip->gc.direction_input = xgpio_dir_in;
chip->gc.direction_output = xgpio_dir_out;
+ chip->gc.of_gpio_n_cells = cells;
chip->gc.get = xgpio_get;
chip->gc.set = xgpio_set;
+ chip->gc.request = xgpio_request;
+ chip->gc.free = xgpio_free;
chip->gc.set_multiple = xgpio_set_multiple;
chip->gc.label = dev_name(&pdev->dev);
@@ -357,28 +641,68 @@ static int xgpio_probe(struct platform_device *pdev)
}
chip->clk = devm_clk_get_optional(&pdev->dev, NULL);
- if (IS_ERR(chip->clk)) {
- if (PTR_ERR(chip->clk) != -EPROBE_DEFER)
- dev_dbg(&pdev->dev, "Input clock not found\n");
- return PTR_ERR(chip->clk);
- }
+ if (IS_ERR(chip->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(chip->clk), "input clock not found.\n");
status = clk_prepare_enable(chip->clk);
if (status < 0) {
dev_err(&pdev->dev, "Failed to prepare clk\n");
return status;
}
+ pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
xgpio_save_regs(chip);
+ chip->irq = platform_get_irq_optional(pdev, 0);
+ if (chip->irq <= 0)
+ goto skip_irq;
+
+ chip->irqchip.name = "gpio-xilinx";
+ chip->irqchip.irq_ack = xgpio_irq_ack;
+ chip->irqchip.irq_mask = xgpio_irq_mask;
+ chip->irqchip.irq_unmask = xgpio_irq_unmask;
+ chip->irqchip.irq_set_type = xgpio_set_irq_type;
+
+ /* Disable per-channel interrupts */
+ xgpio_writereg(chip->regs + XGPIO_IPIER_OFFSET, 0);
+ /* Clear any existing per-channel interrupts */
+ temp = xgpio_readreg(chip->regs + XGPIO_IPISR_OFFSET);
+ xgpio_writereg(chip->regs + XGPIO_IPISR_OFFSET, temp);
+ /* Enable global interrupts */
+ xgpio_writereg(chip->regs + XGPIO_GIER_OFFSET, XGPIO_GIER_IE);
+
+ girq = &chip->gc.irq;
+ girq->chip = &chip->irqchip;
+ girq->parent_handler = xgpio_irqhandler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(&pdev->dev, 1,
+ sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents) {
+ status = -ENOMEM;
+ goto err_pm_put;
+ }
+ girq->parents[0] = chip->irq;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_bad_irq;
+
+skip_irq:
status = devm_gpiochip_add_data(&pdev->dev, &chip->gc, chip);
if (status) {
dev_err(&pdev->dev, "failed to add GPIO chip\n");
- clk_disable_unprepare(chip->clk);
- return status;
+ goto err_pm_put;
}
+ pm_runtime_put(&pdev->dev);
return 0;
+
+err_pm_put:
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+ clk_disable_unprepare(chip->clk);
+ return status;
}
static const struct of_device_id xgpio_of_match[] = {
@@ -394,6 +718,7 @@ static struct platform_driver xgpio_plat_driver = {
.driver = {
.name = "gpio-xilinx",
.of_match_table = xgpio_of_match,
+ .pm = &xgpio_dev_pm_ops,
},
};
diff --git a/drivers/gpio/gpio-zx.c b/drivers/gpio/gpio-zx.c
deleted file mode 100644
index 64bfb722756a..000000000000
--- a/drivers/gpio/gpio-zx.c
+++ /dev/null
@@ -1,289 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * ZTE ZX296702 GPIO driver
- *
- * Author: Jun Nie <jun.nie@linaro.org>
- *
- * Copyright (C) 2015 Linaro Ltd.
- */
-#include <linux/bitops.h>
-#include <linux/device.h>
-#include <linux/errno.h>
-#include <linux/gpio/driver.h>
-#include <linux/irqchip/chained_irq.h>
-#include <linux/init.h>
-#include <linux/of.h>
-#include <linux/pinctrl/consumer.h>
-#include <linux/platform_device.h>
-#include <linux/pm.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-
-#define ZX_GPIO_DIR 0x00
-#define ZX_GPIO_IVE 0x04
-#define ZX_GPIO_IV 0x08
-#define ZX_GPIO_IEP 0x0C
-#define ZX_GPIO_IEN 0x10
-#define ZX_GPIO_DI 0x14
-#define ZX_GPIO_DO1 0x18
-#define ZX_GPIO_DO0 0x1C
-#define ZX_GPIO_DO 0x20
-
-#define ZX_GPIO_IM 0x28
-#define ZX_GPIO_IE 0x2C
-
-#define ZX_GPIO_MIS 0x30
-#define ZX_GPIO_IC 0x34
-
-#define ZX_GPIO_NR 16
-
-struct zx_gpio {
- raw_spinlock_t lock;
-
- void __iomem *base;
- struct gpio_chip gc;
-};
-
-static int zx_direction_input(struct gpio_chip *gc, unsigned offset)
-{
- struct zx_gpio *chip = gpiochip_get_data(gc);
- unsigned long flags;
- u16 gpiodir;
-
- if (offset >= gc->ngpio)
- return -EINVAL;
-
- raw_spin_lock_irqsave(&chip->lock, flags);
- gpiodir = readw_relaxed(chip->base + ZX_GPIO_DIR);
- gpiodir &= ~BIT(offset);
- writew_relaxed(gpiodir, chip->base + ZX_GPIO_DIR);
- raw_spin_unlock_irqrestore(&chip->lock, flags);
-
- return 0;
-}
-
-static int zx_direction_output(struct gpio_chip *gc, unsigned offset,
- int value)
-{
- struct zx_gpio *chip = gpiochip_get_data(gc);
- unsigned long flags;
- u16 gpiodir;
-
- if (offset >= gc->ngpio)
- return -EINVAL;
-
- raw_spin_lock_irqsave(&chip->lock, flags);
- gpiodir = readw_relaxed(chip->base + ZX_GPIO_DIR);
- gpiodir |= BIT(offset);
- writew_relaxed(gpiodir, chip->base + ZX_GPIO_DIR);
-
- if (value)
- writew_relaxed(BIT(offset), chip->base + ZX_GPIO_DO1);
- else
- writew_relaxed(BIT(offset), chip->base + ZX_GPIO_DO0);
- raw_spin_unlock_irqrestore(&chip->lock, flags);
-
- return 0;
-}
-
-static int zx_get_value(struct gpio_chip *gc, unsigned offset)
-{
- struct zx_gpio *chip = gpiochip_get_data(gc);
-
- return !!(readw_relaxed(chip->base + ZX_GPIO_DI) & BIT(offset));
-}
-
-static void zx_set_value(struct gpio_chip *gc, unsigned offset, int value)
-{
- struct zx_gpio *chip = gpiochip_get_data(gc);
-
- if (value)
- writew_relaxed(BIT(offset), chip->base + ZX_GPIO_DO1);
- else
- writew_relaxed(BIT(offset), chip->base + ZX_GPIO_DO0);
-}
-
-static int zx_irq_type(struct irq_data *d, unsigned trigger)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct zx_gpio *chip = gpiochip_get_data(gc);
- int offset = irqd_to_hwirq(d);
- unsigned long flags;
- u16 gpiois, gpioi_epos, gpioi_eneg, gpioiev;
- u16 bit = BIT(offset);
-
- if (offset < 0 || offset >= ZX_GPIO_NR)
- return -EINVAL;
-
- raw_spin_lock_irqsave(&chip->lock, flags);
-
- gpioiev = readw_relaxed(chip->base + ZX_GPIO_IV);
- gpiois = readw_relaxed(chip->base + ZX_GPIO_IVE);
- gpioi_epos = readw_relaxed(chip->base + ZX_GPIO_IEP);
- gpioi_eneg = readw_relaxed(chip->base + ZX_GPIO_IEN);
-
- if (trigger & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) {
- gpiois |= bit;
- if (trigger & IRQ_TYPE_LEVEL_HIGH)
- gpioiev |= bit;
- else
- gpioiev &= ~bit;
- } else
- gpiois &= ~bit;
-
- if ((trigger & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH) {
- gpioi_epos |= bit;
- gpioi_eneg |= bit;
- } else {
- if (trigger & IRQ_TYPE_EDGE_RISING) {
- gpioi_epos |= bit;
- gpioi_eneg &= ~bit;
- } else if (trigger & IRQ_TYPE_EDGE_FALLING) {
- gpioi_eneg |= bit;
- gpioi_epos &= ~bit;
- }
- }
-
- writew_relaxed(gpiois, chip->base + ZX_GPIO_IVE);
- writew_relaxed(gpioi_epos, chip->base + ZX_GPIO_IEP);
- writew_relaxed(gpioi_eneg, chip->base + ZX_GPIO_IEN);
- writew_relaxed(gpioiev, chip->base + ZX_GPIO_IV);
- raw_spin_unlock_irqrestore(&chip->lock, flags);
-
- return 0;
-}
-
-static void zx_irq_handler(struct irq_desc *desc)
-{
- unsigned long pending;
- int offset;
- struct gpio_chip *gc = irq_desc_get_handler_data(desc);
- struct zx_gpio *chip = gpiochip_get_data(gc);
- struct irq_chip *irqchip = irq_desc_get_chip(desc);
-
- chained_irq_enter(irqchip, desc);
-
- pending = readw_relaxed(chip->base + ZX_GPIO_MIS);
- writew_relaxed(pending, chip->base + ZX_GPIO_IC);
- if (pending) {
- for_each_set_bit(offset, &pending, ZX_GPIO_NR)
- generic_handle_irq(irq_find_mapping(gc->irq.domain,
- offset));
- }
-
- chained_irq_exit(irqchip, desc);
-}
-
-static void zx_irq_mask(struct irq_data *d)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct zx_gpio *chip = gpiochip_get_data(gc);
- u16 mask = BIT(irqd_to_hwirq(d) % ZX_GPIO_NR);
- u16 gpioie;
-
- raw_spin_lock(&chip->lock);
- gpioie = readw_relaxed(chip->base + ZX_GPIO_IM) | mask;
- writew_relaxed(gpioie, chip->base + ZX_GPIO_IM);
- gpioie = readw_relaxed(chip->base + ZX_GPIO_IE) & ~mask;
- writew_relaxed(gpioie, chip->base + ZX_GPIO_IE);
- raw_spin_unlock(&chip->lock);
-}
-
-static void zx_irq_unmask(struct irq_data *d)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct zx_gpio *chip = gpiochip_get_data(gc);
- u16 mask = BIT(irqd_to_hwirq(d) % ZX_GPIO_NR);
- u16 gpioie;
-
- raw_spin_lock(&chip->lock);
- gpioie = readw_relaxed(chip->base + ZX_GPIO_IM) & ~mask;
- writew_relaxed(gpioie, chip->base + ZX_GPIO_IM);
- gpioie = readw_relaxed(chip->base + ZX_GPIO_IE) | mask;
- writew_relaxed(gpioie, chip->base + ZX_GPIO_IE);
- raw_spin_unlock(&chip->lock);
-}
-
-static struct irq_chip zx_irqchip = {
- .name = "zx-gpio",
- .irq_mask = zx_irq_mask,
- .irq_unmask = zx_irq_unmask,
- .irq_set_type = zx_irq_type,
-};
-
-static int zx_gpio_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct zx_gpio *chip;
- struct gpio_irq_chip *girq;
- int irq, id, ret;
-
- chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
- if (!chip)
- return -ENOMEM;
-
- chip->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(chip->base))
- return PTR_ERR(chip->base);
-
- id = of_alias_get_id(dev->of_node, "gpio");
-
- raw_spin_lock_init(&chip->lock);
- chip->gc.request = gpiochip_generic_request;
- chip->gc.free = gpiochip_generic_free;
- chip->gc.direction_input = zx_direction_input;
- chip->gc.direction_output = zx_direction_output;
- chip->gc.get = zx_get_value;
- chip->gc.set = zx_set_value;
- chip->gc.base = ZX_GPIO_NR * id;
- chip->gc.ngpio = ZX_GPIO_NR;
- chip->gc.label = dev_name(dev);
- chip->gc.parent = dev;
- chip->gc.owner = THIS_MODULE;
-
- /*
- * irq_chip support
- */
- writew_relaxed(0xffff, chip->base + ZX_GPIO_IM);
- writew_relaxed(0, chip->base + ZX_GPIO_IE);
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
- girq = &chip->gc.irq;
- girq->chip = &zx_irqchip;
- girq->parent_handler = zx_irq_handler;
- girq->num_parents = 1;
- girq->parents = devm_kcalloc(&pdev->dev, 1,
- sizeof(*girq->parents),
- GFP_KERNEL);
- if (!girq->parents)
- return -ENOMEM;
- girq->parents[0] = irq;
- girq->default_type = IRQ_TYPE_NONE;
- girq->handler = handle_simple_irq;
-
- ret = gpiochip_add_data(&chip->gc, chip);
- if (ret)
- return ret;
-
- platform_set_drvdata(pdev, chip);
- dev_info(dev, "ZX GPIO chip registered\n");
-
- return 0;
-}
-
-static const struct of_device_id zx_gpio_match[] = {
- {
- .compatible = "zte,zx296702-gpio",
- },
- { },
-};
-
-static struct platform_driver zx_gpio_driver = {
- .probe = zx_gpio_probe,
- .driver = {
- .name = "zx_gpio",
- .of_match_table = of_match_ptr(zx_gpio_match),
- },
-};
-builtin_platform_driver(zx_gpio_driver)
diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
index 1a7b51163528..1631727bf0da 100644
--- a/drivers/gpio/gpiolib-cdev.c
+++ b/drivers/gpio/gpiolib-cdev.c
@@ -776,6 +776,8 @@ static void edge_detector_stop(struct line *line)
cancel_delayed_work_sync(&line->work);
WRITE_ONCE(line->sw_debounced, 0);
WRITE_ONCE(line->eflags, 0);
+ if (line->desc)
+ WRITE_ONCE(line->desc->debounce_period_us, 0);
/* do not change line->level - see comment in debounced_value() */
}
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index b4a71119a4b0..baf0153b7bca 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -1039,3 +1039,14 @@ void of_gpiochip_remove(struct gpio_chip *chip)
{
of_node_put(chip->of_node);
}
+
+void of_gpio_dev_init(struct gpio_chip *gc, struct gpio_device *gdev)
+{
+ /* If the gpiochip has an assigned OF node this takes precedence */
+ if (gc->of_node)
+ gdev->dev.of_node = gc->of_node;
+ else
+ gc->of_node = gdev->dev.of_node;
+ if (gdev->dev.of_node)
+ gdev->dev.fwnode = of_fwnode_handle(gdev->dev.of_node);
+}
diff --git a/drivers/gpio/gpiolib-of.h b/drivers/gpio/gpiolib-of.h
index ed26664f1537..8af2bc899aab 100644
--- a/drivers/gpio/gpiolib-of.h
+++ b/drivers/gpio/gpiolib-of.h
@@ -15,6 +15,7 @@ int of_gpiochip_add(struct gpio_chip *gc);
void of_gpiochip_remove(struct gpio_chip *gc);
int of_gpio_get_count(struct device *dev, const char *con_id);
bool of_gpio_need_valid_mask(const struct gpio_chip *gc);
+void of_gpio_dev_init(struct gpio_chip *gc, struct gpio_device *gdev);
#else
static inline struct gpio_desc *of_find_gpio(struct device *dev,
const char *con_id,
@@ -33,6 +34,10 @@ static inline bool of_gpio_need_valid_mask(const struct gpio_chip *gc)
{
return false;
}
+static inline void of_gpio_dev_init(struct gpio_chip *gc,
+ struct gpio_device *gdev)
+{
+}
#endif /* CONFIG_OF_GPIO */
extern struct notifier_block gpio_of_notifier;
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index b78a634cca24..adf55db080d8 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -56,8 +56,10 @@
static DEFINE_IDA(gpio_ida);
static dev_t gpio_devt;
#define GPIO_DEV_MAX 256 /* 256 GPIO chip devices supported */
+static int gpio_bus_match(struct device *dev, struct device_driver *drv);
static struct bus_type gpio_bus_type = {
.name = "gpio",
+ .match = gpio_bus_match,
};
/*
@@ -590,20 +592,18 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
gdev->dev.of_node = gc->parent->of_node;
}
-#ifdef CONFIG_OF_GPIO
- /* If the gpiochip has an assigned OF node this takes precedence */
- if (gc->of_node)
- gdev->dev.of_node = gc->of_node;
- else
- gc->of_node = gdev->dev.of_node;
-#endif
+ of_gpio_dev_init(gc, gdev);
gdev->id = ida_alloc(&gpio_ida, GFP_KERNEL);
if (gdev->id < 0) {
ret = gdev->id;
goto err_free_gdev;
}
- dev_set_name(&gdev->dev, GPIOCHIP_NAME "%d", gdev->id);
+
+ ret = dev_set_name(&gdev->dev, GPIOCHIP_NAME "%d", gdev->id);
+ if (ret)
+ goto err_free_ida;
+
device_initialize(&gdev->dev);
dev_set_drvdata(&gdev->dev, gdev);
if (gc->parent && gc->parent->driver)
@@ -617,7 +617,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
gdev->descs = kcalloc(gc->ngpio, sizeof(gdev->descs[0]), GFP_KERNEL);
if (!gdev->descs) {
ret = -ENOMEM;
- goto err_free_ida;
+ goto err_free_dev_name;
}
if (gc->ngpio == 0) {
@@ -768,6 +768,8 @@ err_free_label:
kfree_const(gdev->label);
err_free_descs:
kfree(gdev->descs);
+err_free_dev_name:
+ kfree(dev_name(&gdev->dev));
err_free_ida:
ida_free(&gpio_ida, gdev->id);
err_free_gdev:
@@ -2551,7 +2553,7 @@ int gpiod_get_array_value_complex(bool raw, bool can_sleep,
struct gpio_chip *gc = desc_array[i]->gdev->chip;
unsigned long fastpath[2 * BITS_TO_LONGS(FASTPATH_NGPIO)];
unsigned long *mask, *bits;
- int first, j, ret;
+ int first, j;
if (likely(gc->ngpio <= FASTPATH_NGPIO)) {
mask = fastpath;
@@ -3463,6 +3465,10 @@ EXPORT_SYMBOL_GPL(gpiod_add_lookup_table);
*/
void gpiod_remove_lookup_table(struct gpiod_lookup_table *table)
{
+ /* Nothing to remove */
+ if (!table)
+ return;
+
mutex_lock(&gpio_lookup_lock);
list_del(&table->list);
@@ -4205,6 +4211,41 @@ void gpiod_put_array(struct gpio_descs *descs)
}
EXPORT_SYMBOL_GPL(gpiod_put_array);
+
+static int gpio_bus_match(struct device *dev, struct device_driver *drv)
+{
+ /*
+ * Only match if the fwnode doesn't already have a proper struct device
+ * created for it.
+ */
+ if (dev->fwnode && dev->fwnode->dev != dev)
+ return 0;
+ return 1;
+}
+
+static int gpio_stub_drv_probe(struct device *dev)
+{
+ /*
+ * The DT node of some GPIO chips have a "compatible" property, but
+ * never have a struct device added and probed by a driver to register
+ * the GPIO chip with gpiolib. In such cases, fw_devlink=on will cause
+ * the consumers of the GPIO chip to get probe deferred forever because
+ * they will be waiting for a device associated with the GPIO chip
+ * firmware node to get added and bound to a driver.
+ *
+ * To allow these consumers to probe, we associate the struct
+ * gpio_device of the GPIO chip with the firmware node and then simply
+ * bind it to this stub driver.
+ */
+ return 0;
+}
+
+static struct device_driver gpio_stub_drv = {
+ .name = "gpio_stub_drv",
+ .bus = &gpio_bus_type,
+ .probe = gpio_stub_drv_probe,
+};
+
static int __init gpiolib_dev_init(void)
{
int ret;
@@ -4216,9 +4257,16 @@ static int __init gpiolib_dev_init(void)
return ret;
}
+ if (driver_register(&gpio_stub_drv) < 0) {
+ pr_err("gpiolib: could not register GPIO stub driver\n");
+ bus_unregister(&gpio_bus_type);
+ return ret;
+ }
+
ret = alloc_chrdev_region(&gpio_devt, 0, GPIO_DEV_MAX, GPIOCHIP_NAME);
if (ret < 0) {
pr_err("gpiolib: failed to allocate char dev region\n");
+ driver_unregister(&gpio_stub_drv);
bus_unregister(&gpio_bus_type);
return ret;
}
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 0973f408d75f..e392a90ca687 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -15,6 +15,9 @@ menuconfig DRM
select I2C_ALGOBIT
select DMA_SHARED_BUFFER
select SYNC_FILE
+# gallium uses SYS_kcmp for os_same_file_description() to de-duplicate
+# device and dmabuf fd. Let's make sure that is available for our userspace.
+ select KCMP
help
Kernel-level support for the Direct Rendering Infrastructure (DRI)
introduced in XFree86 4.0. If you say Y here, you need to select
@@ -214,10 +217,6 @@ config DRM_GEM_SHMEM_HELPER
help
Choose this if you need the GEM shmem helper functions
-config DRM_VM
- bool
- depends on DRM && MMU
-
config DRM_SCHED
tristate
depends on DRM
@@ -391,7 +390,6 @@ source "drivers/gpu/drm/xlnx/Kconfig"
menuconfig DRM_LEGACY
bool "Enable legacy drivers (DANGEROUS)"
depends on DRM && MMU
- select DRM_VM
help
Enable legacy DRI1 drivers. Those drivers expose unsafe and dangerous
APIs to user-space, which can be used to circumvent access
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index fefaff4c832d..926adef289db 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -5,7 +5,7 @@
drm-y := drm_auth.o drm_cache.o \
drm_file.o drm_gem.o drm_ioctl.o drm_irq.o \
- drm_memory.o drm_drv.o \
+ drm_drv.o \
drm_sysfs.o drm_hashtab.o drm_mm.o \
drm_crtc.o drm_fourcc.o drm_modes.o drm_edid.o \
drm_encoder_slave.o \
@@ -20,9 +20,9 @@ drm-y := drm_auth.o drm_cache.o \
drm_client_modeset.o drm_atomic_uapi.o drm_hdcp.o \
drm_managed.o drm_vblank_work.o
-drm-$(CONFIG_DRM_LEGACY) += drm_legacy_misc.o drm_bufs.o drm_context.o drm_dma.o drm_scatter.o drm_lock.o
+drm-$(CONFIG_DRM_LEGACY) += drm_bufs.o drm_context.o drm_dma.o drm_legacy_misc.o drm_lock.o \
+ drm_memory.o drm_scatter.o drm_vm.o
drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o
-drm-$(CONFIG_DRM_VM) += drm_vm.o
drm-$(CONFIG_COMPAT) += drm_ioc32.o
drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
drm-$(CONFIG_DRM_GEM_SHMEM_HELPER) += drm_gem_shmem_helper.o
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 6bf6cfaea3f1..13ebb1f71e49 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -56,7 +56,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
amdgpu_gmc.o amdgpu_mmhub.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \
amdgpu_vm_sdma.o amdgpu_discovery.o amdgpu_ras_eeprom.o amdgpu_nbio.o \
amdgpu_umc.o smu_v11_0_i2c.o amdgpu_fru_eeprom.o amdgpu_rap.o \
- amdgpu_fw_attestation.o
+ amdgpu_fw_attestation.o amdgpu_securedisplay.o
amdgpu-$(CONFIG_PERF_EVENTS) += amdgpu_pmu.o
@@ -71,7 +71,7 @@ amdgpu-y += \
vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o \
vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o navi10_reg_init.o navi14_reg_init.o \
arct_reg_init.o navi12_reg_init.o mxgpu_nv.o sienna_cichlid_reg_init.o vangogh_reg_init.o \
- nbio_v7_2.o dimgrey_cavefish_reg_init.o
+ nbio_v7_2.o dimgrey_cavefish_reg_init.o hdp_v4_0.o hdp_v5_0.o
# add DF block
amdgpu-y += \
@@ -97,6 +97,7 @@ amdgpu-y += \
tonga_ih.o \
cz_ih.o \
vega10_ih.o \
+ vega20_ih.o \
navi10_ih.o
# add PSP block
@@ -170,7 +171,8 @@ amdgpu-y += \
# add SMUIO block
amdgpu-y += \
smuio_v9_0.o \
- smuio_v11_0.o
+ smuio_v11_0.o \
+ smuio_v11_0_6.o
# add amdkfd interfaces
amdgpu-y += amdgpu_amdkfd.o
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 5993dd0fdd8e..b6879d97c9c9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -55,7 +55,6 @@
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
-#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_execbuf_util.h>
#include <drm/amdgpu_drm.h>
@@ -89,6 +88,7 @@
#include "amdgpu_gfx.h"
#include "amdgpu_sdma.h"
#include "amdgpu_nbio.h"
+#include "amdgpu_hdp.h"
#include "amdgpu_dm.h"
#include "amdgpu_virt.h"
#include "amdgpu_csa.h"
@@ -107,6 +107,7 @@
#include "amdgpu_gfxhub.h"
#include "amdgpu_df.h"
#include "amdgpu_smuio.h"
+#include "amdgpu_hdp.h"
#define MAX_GPU_INSTANCE 16
@@ -286,7 +287,7 @@ enum amdgpu_kiq_irq {
#define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */
#define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */
-#define MAX_KIQ_REG_TRY 80 /* 20 -> 80 */
+#define MAX_KIQ_REG_TRY 1000
int amdgpu_device_ip_set_clockgating_state(void *dev,
enum amd_ip_block_type block_type,
@@ -578,7 +579,8 @@ enum amd_reset_method {
AMD_RESET_METHOD_MODE0,
AMD_RESET_METHOD_MODE1,
AMD_RESET_METHOD_MODE2,
- AMD_RESET_METHOD_BACO
+ AMD_RESET_METHOD_BACO,
+ AMD_RESET_METHOD_PCI,
};
/*
@@ -608,7 +610,6 @@ struct amdgpu_asic_funcs {
/* invalidate hdp read cache */
void (*invalidate_hdp)(struct amdgpu_device *adev,
struct amdgpu_ring *ring);
- void (*reset_hdp_ras_error_count)(struct amdgpu_device *adev);
/* check if the asic needs a full reset of if soft reset will work */
bool (*need_full_reset)(struct amdgpu_device *adev);
/* initialize doorbell layout for specific asic*/
@@ -891,6 +892,7 @@ struct amdgpu_device {
/* For pre-DCE11. DCE11 and later are in "struct amdgpu_device->dm" */
struct work_struct hotplug_work;
struct amdgpu_irq_src crtc_irq;
+ struct amdgpu_irq_src vline0_irq;
struct amdgpu_irq_src vupdate_irq;
struct amdgpu_irq_src pageflip_irq;
struct amdgpu_irq_src hpd_irq;
@@ -921,6 +923,9 @@ struct amdgpu_device {
/* nbio */
struct amdgpu_nbio nbio;
+ /* hdp */
+ struct amdgpu_hdp hdp;
+
/* smuio */
struct amdgpu_smuio smuio;
@@ -1003,6 +1008,12 @@ struct amdgpu_device {
bool in_suspend;
bool in_hibernate;
+ /*
+ * The combination flag in_poweroff_reboot_com used to identify the poweroff
+ * and reboot opt in the s0i3 system-wide suspend.
+ */
+ bool in_poweroff_reboot_com;
+
atomic_t in_gpu_reset;
enum pp_mp1_state mp1_state;
struct rw_semaphore reset_sem;
@@ -1202,8 +1213,10 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
#define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
#define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
#define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev))
-#define amdgpu_asic_flush_hdp(adev, r) (adev)->asic_funcs->flush_hdp((adev), (r))
-#define amdgpu_asic_invalidate_hdp(adev, r) (adev)->asic_funcs->invalidate_hdp((adev), (r))
+#define amdgpu_asic_flush_hdp(adev, r) \
+ ((adev)->asic_funcs->flush_hdp ? (adev)->asic_funcs->flush_hdp((adev), (r)) : (adev)->hdp.funcs->flush_hdp((adev), (r)))
+#define amdgpu_asic_invalidate_hdp(adev, r) \
+ ((adev)->asic_funcs->invalidate_hdp ? (adev)->asic_funcs->invalidate_hdp((adev), (r)) : (adev)->hdp.funcs->invalidate_hdp((adev), (r)))
#define amdgpu_asic_need_full_reset(adev) (adev)->asic_funcs->need_full_reset((adev))
#define amdgpu_asic_init_doorbell_index(adev) (adev)->asic_funcs->init_doorbell_index((adev))
#define amdgpu_asic_get_pcie_usage(adev, cnt0, cnt1) ((adev)->asic_funcs->get_pcie_usage((adev), (cnt0), (cnt1)))
@@ -1222,6 +1235,7 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev);
int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
struct amdgpu_job* job);
void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
+int amdgpu_device_pci_reset(struct amdgpu_device *adev);
bool amdgpu_device_need_post(struct amdgpu_device *adev);
void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 8155c54392c8..36a741d63ddc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -903,10 +903,11 @@ void amdgpu_acpi_fini(struct amdgpu_device *adev)
*/
bool amdgpu_acpi_is_s0ix_supported(struct amdgpu_device *adev)
{
+#if defined(CONFIG_AMD_PMC)
if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) {
if (adev->flags & AMD_IS_APU)
return true;
}
-
+#endif
return false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index db96d69eb45e..c5343a5eecbe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -47,12 +47,8 @@ int amdgpu_amdkfd_init(void)
amdgpu_amdkfd_total_mem_size = si.totalram - si.totalhigh;
amdgpu_amdkfd_total_mem_size *= si.mem_unit;
-#ifdef CONFIG_HSA_AMD
ret = kgd2kfd_init();
amdgpu_amdkfd_gpuvm_init_mem_limits();
-#else
- ret = -ENOENT;
-#endif
kfd_initialized = !ret;
return ret;
@@ -696,86 +692,3 @@ bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev *kgd)
return adev->have_atomics_support;
}
-
-#ifndef CONFIG_HSA_AMD
-bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm)
-{
- return false;
-}
-
-void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
-{
-}
-
-int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
-{
- return 0;
-}
-
-void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
- struct amdgpu_vm *vm)
-{
-}
-
-struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f)
-{
- return NULL;
-}
-
-int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm)
-{
- return 0;
-}
-
-struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev,
- unsigned int asic_type, bool vf)
-{
- return NULL;
-}
-
-bool kgd2kfd_device_init(struct kfd_dev *kfd,
- struct drm_device *ddev,
- const struct kgd2kfd_shared_resources *gpu_resources)
-{
- return false;
-}
-
-void kgd2kfd_device_exit(struct kfd_dev *kfd)
-{
-}
-
-void kgd2kfd_exit(void)
-{
-}
-
-void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
-{
-}
-
-int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
-{
- return 0;
-}
-
-int kgd2kfd_pre_reset(struct kfd_dev *kfd)
-{
- return 0;
-}
-
-int kgd2kfd_post_reset(struct kfd_dev *kfd)
-{
- return 0;
-}
-
-void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
-{
-}
-
-void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd)
-{
-}
-
-void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint32_t throttle_bitmask)
-{
-}
-#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index ea391ca7f2f1..a81d9cacf9b8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -94,11 +94,6 @@ enum kgd_engine_type {
KGD_ENGINE_MAX
};
-struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context,
- struct mm_struct *mm);
-bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm);
-struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f);
-int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo);
struct amdkfd_process_info {
/* List head of all VMs that belong to a KFD process */
@@ -132,8 +127,6 @@ void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev);
void amdgpu_amdkfd_device_init(struct amdgpu_device *adev);
void amdgpu_amdkfd_device_fini(struct amdgpu_device *adev);
-
-int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm);
int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
uint32_t vmid, uint64_t gpu_addr,
uint32_t *ib_cmd, uint32_t ib_len);
@@ -153,6 +146,38 @@ void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd);
int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev,
int queue_bit);
+struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context,
+ struct mm_struct *mm);
+#if IS_ENABLED(CONFIG_HSA_AMD)
+bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm);
+struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f);
+int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo);
+int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm);
+#else
+static inline
+bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm)
+{
+ return false;
+}
+
+static inline
+struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f)
+{
+ return NULL;
+}
+
+static inline
+int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
+{
+ return 0;
+}
+
+static inline
+int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm)
+{
+ return 0;
+}
+#endif
/* Shared API */
int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
void **mem_obj, uint64_t *gpu_addr,
@@ -215,8 +240,6 @@ int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
struct file *filp, u32 pasid,
void **vm, void **process_info,
struct dma_fence **ef);
-void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
- struct amdgpu_vm *vm);
void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm);
void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm);
uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm);
@@ -236,23 +259,43 @@ int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
struct kgd_mem *mem, void **kptr, uint64_t *size);
int amdgpu_amdkfd_gpuvm_restore_process_bos(void *process_info,
struct dma_fence **ef);
-
int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
struct kfd_vm_fault_info *info);
-
int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
struct dma_buf *dmabuf,
uint64_t va, void *vm,
struct kgd_mem **mem, uint64_t *size,
uint64_t *mmap_offset);
-
-void amdgpu_amdkfd_gpuvm_init_mem_limits(void);
-void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo);
-
int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
struct tile_config *config);
+#if IS_ENABLED(CONFIG_HSA_AMD)
+void amdgpu_amdkfd_gpuvm_init_mem_limits(void);
+void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm);
+void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo);
+#else
+static inline
+void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
+{
+}
+static inline
+void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm)
+{
+}
+
+static inline
+void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
+{
+}
+#endif
/* KGD2KFD callbacks */
+int kgd2kfd_quiesce_mm(struct mm_struct *mm);
+int kgd2kfd_resume_mm(struct mm_struct *mm);
+int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
+ struct dma_fence *fence);
+#if IS_ENABLED(CONFIG_HSA_AMD)
int kgd2kfd_init(void);
void kgd2kfd_exit(void);
struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev,
@@ -266,11 +309,68 @@ int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm);
int kgd2kfd_pre_reset(struct kfd_dev *kfd);
int kgd2kfd_post_reset(struct kfd_dev *kfd);
void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry);
-int kgd2kfd_quiesce_mm(struct mm_struct *mm);
-int kgd2kfd_resume_mm(struct mm_struct *mm);
-int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
- struct dma_fence *fence);
void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd);
void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint32_t throttle_bitmask);
+#else
+static inline int kgd2kfd_init(void)
+{
+ return -ENOENT;
+}
+static inline void kgd2kfd_exit(void)
+{
+}
+
+static inline
+struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev,
+ unsigned int asic_type, bool vf)
+{
+ return NULL;
+}
+
+static inline
+bool kgd2kfd_device_init(struct kfd_dev *kfd, struct drm_device *ddev,
+ const struct kgd2kfd_shared_resources *gpu_resources)
+{
+ return false;
+}
+
+static inline void kgd2kfd_device_exit(struct kfd_dev *kfd)
+{
+}
+
+static inline void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
+{
+}
+
+static inline int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
+{
+ return 0;
+}
+
+static inline int kgd2kfd_pre_reset(struct kfd_dev *kfd)
+{
+ return 0;
+}
+
+static inline int kgd2kfd_post_reset(struct kfd_dev *kfd)
+{
+ return 0;
+}
+
+static inline
+void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
+{
+}
+
+static inline
+void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd)
+{
+}
+
+static inline
+void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint32_t throttle_bitmask)
+{
+}
+#endif
#endif /* AMDGPU_AMDKFD_H_INCLUDED */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
index 4763bab7a4d0..62aa1a6f64ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
@@ -23,7 +23,6 @@
#include "amdgpu_amdkfd.h"
#include "gc/gc_10_1_0_offset.h"
#include "gc/gc_10_1_0_sh_mask.h"
-#include "navi10_enum.h"
#include "athub/athub_2_0_0_offset.h"
#include "athub/athub_2_0_0_sh_mask.h"
#include "oss/osssys_5_0_0_offset.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
index 50016bf9c427..fad3b91f74f5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
@@ -24,7 +24,6 @@
#include "amdgpu_amdkfd.h"
#include "gc/gc_10_3_0_offset.h"
#include "gc/gc_10_3_0_sh_mask.h"
-#include "navi10_enum.h"
#include "oss/osssys_5_0_0_offset.h"
#include "oss/osssys_5_0_0_sh_mask.h"
#include "soc15_common.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 2d991da2cead..ac0a432a9bf7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -26,6 +26,7 @@
#include <linux/sched/task.h>
#include "amdgpu_object.h"
+#include "amdgpu_gem.h"
#include "amdgpu_vm.h"
#include "amdgpu_amdkfd.h"
#include "amdgpu_dma_buf.h"
@@ -453,7 +454,7 @@ static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
struct amdgpu_bo *bo = mem->bo;
uint64_t va = mem->va;
struct list_head *list_bo_va = &mem->bo_va_list;
- unsigned long bo_size = bo->tbo.mem.size;
+ unsigned long bo_size = bo->tbo.base.size;
if (!va) {
pr_err("Invalid VA when adding BO to VM\n");
@@ -1152,7 +1153,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
struct sg_table *sg = NULL;
uint64_t user_addr = 0;
struct amdgpu_bo *bo;
- struct amdgpu_bo_param bp;
+ struct drm_gem_object *gobj;
u32 domain, alloc_domain;
u64 alloc_flags;
int ret;
@@ -1220,19 +1221,14 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
va, size, domain_string(alloc_domain));
- memset(&bp, 0, sizeof(bp));
- bp.size = size;
- bp.byte_align = 1;
- bp.domain = alloc_domain;
- bp.flags = alloc_flags;
- bp.type = bo_type;
- bp.resv = NULL;
- ret = amdgpu_bo_create(adev, &bp, &bo);
+ ret = amdgpu_gem_object_create(adev, size, 1, alloc_domain, alloc_flags,
+ bo_type, NULL, &gobj);
if (ret) {
pr_debug("Failed to create BO on domain %s. ret %d\n",
- domain_string(alloc_domain), ret);
+ domain_string(alloc_domain), ret);
goto err_bo_create;
}
+ bo = gem_to_amdgpu_bo(gobj);
if (bo_type == ttm_bo_type_sg) {
bo->tbo.sg = sg;
bo->tbo.ttm->sg = sg;
@@ -1281,7 +1277,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
struct kgd_dev *kgd, struct kgd_mem *mem, uint64_t *size)
{
struct amdkfd_process_info *process_info = mem->process_info;
- unsigned long bo_size = mem->bo->tbo.mem.size;
+ unsigned long bo_size = mem->bo->tbo.base.size;
struct kfd_bo_va_list *entry, *tmp;
struct bo_vm_reservation_context ctx;
struct ttm_validate_buffer *bo_list_entry;
@@ -1402,7 +1398,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
mutex_lock(&mem->lock);
domain = mem->domain;
- bo_size = bo->tbo.mem.size;
+ bo_size = bo->tbo.base.size;
pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n",
mem->va,
@@ -1506,7 +1502,7 @@ int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct amdkfd_process_info *process_info =
((struct amdgpu_vm *)vm)->process_info;
- unsigned long bo_size = mem->bo->tbo.mem.size;
+ unsigned long bo_size = mem->bo->tbo.base.size;
struct kfd_bo_va_list *entry;
struct bo_vm_reservation_context ctx;
int ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
index 6333cada1e09..cfb1a9a04477 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
@@ -155,7 +155,7 @@ static bool amdgpu_read_bios_from_rom(struct amdgpu_device *adev)
u8 header[AMD_VBIOS_SIGNATURE_END+1] = {0};
int len;
- if (!adev->asic_funcs->read_bios_from_rom)
+ if (!adev->asic_funcs || !adev->asic_funcs->read_bios_from_rom)
return false;
/* validate VBIOS signature */
@@ -291,7 +291,7 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev)
continue;
status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
- if (!ACPI_FAILURE(status)) {
+ if (ACPI_SUCCESS(status)) {
found = true;
break;
}
@@ -304,7 +304,7 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev)
continue;
status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
- if (!ACPI_FAILURE(status)) {
+ if (ACPI_SUCCESS(status)) {
found = true;
break;
}
@@ -348,7 +348,8 @@ static bool amdgpu_read_disabled_bios(struct amdgpu_device *adev)
if (adev->flags & AMD_IS_APU)
return igp_read_bios_from_vram(adev);
else
- return amdgpu_asic_read_disabled_bios(adev);
+ return (!adev->asic_funcs || !adev->asic_funcs->read_disabled_bios) ?
+ false : amdgpu_asic_read_disabled_bios(adev);
}
#ifdef CONFIG_ACPI
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 594a0108e90f..3e240b952e79 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -98,8 +98,7 @@ static int amdgpu_cs_bo_handles_chunk(struct amdgpu_cs_parser *p,
return 0;
error_free:
- if (info)
- kvfree(info);
+ kvfree(info);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index a6667a2ca0db..43059ead733b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -35,6 +35,7 @@
#include "amdgpu_dm_debugfs.h"
#include "amdgpu_ras.h"
#include "amdgpu_rap.h"
+#include "amdgpu_securedisplay.h"
#include "amdgpu_fw_attestation.h"
/**
@@ -356,7 +357,7 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
while (size) {
uint32_t value;
- value = RREG32_PCIE(*pos >> 2);
+ value = RREG32_PCIE(*pos);
r = put_user(value, (uint32_t *)buf);
if (r) {
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
@@ -423,7 +424,7 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user
return r;
}
- WREG32_PCIE(*pos >> 2, value);
+ WREG32_PCIE(*pos, value);
result += 4;
buf += 4;
@@ -1427,7 +1428,7 @@ static void amdgpu_ib_preempt_job_recovery(struct drm_gpu_scheduler *sched)
struct dma_fence *fence;
spin_lock(&sched->job_list_lock);
- list_for_each_entry(s_job, &sched->ring_mirror_list, node) {
+ list_for_each_entry(s_job, &sched->pending_list, list) {
fence = sched->ops->run_job(s_job);
dma_fence_put(fence);
}
@@ -1459,10 +1460,10 @@ static void amdgpu_ib_preempt_mark_partial_job(struct amdgpu_ring *ring)
no_preempt:
spin_lock(&sched->job_list_lock);
- list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
+ list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
if (dma_fence_is_signaled(&s_job->s_fence->finished)) {
/* remove job from ring_mirror_list */
- list_del_init(&s_job->node);
+ list_del_init(&s_job->list);
sched->ops->free_job(s_job);
continue;
}
@@ -1669,6 +1670,8 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
amdgpu_rap_debugfs_init(adev);
+ amdgpu_securedisplay_debugfs_init(adev);
+
amdgpu_fw_attestation_debugfs_init(adev);
return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_list,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index cab1ebaf6d62..6447cd6ca5a8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -929,6 +929,18 @@ void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
}
+/**
+ * amdgpu_device_pci_reset - reset the GPU using generic PCI means
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
+ */
+int amdgpu_device_pci_reset(struct amdgpu_device *adev)
+{
+ return pci_reset_function(adev->pdev);
+}
+
/*
* GPU doorbell aperture helpers function.
*/
@@ -1105,8 +1117,7 @@ void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
*/
int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
{
- u64 space_needed = roundup_pow_of_two(adev->gmc.real_vram_size);
- u32 rbar_size = order_base_2(((space_needed >> 20) | 1)) - 1;
+ int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
struct pci_bus *root;
struct resource *res;
unsigned i;
@@ -1137,6 +1148,10 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
if (!res)
return 0;
+ /* Limit the BAR size to what is available */
+ rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
+ rbar_size);
+
/* Disable memory decoding while we change the BAR addresses and size */
pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
pci_write_config_word(adev->pdev, PCI_COMMAND,
@@ -1422,24 +1437,22 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
/* don't suspend or resume card normally */
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
- pci_set_power_state(dev->pdev, PCI_D0);
- amdgpu_device_load_pci_state(dev->pdev);
- r = pci_enable_device(dev->pdev);
+ pci_set_power_state(pdev, PCI_D0);
+ amdgpu_device_load_pci_state(pdev);
+ r = pci_enable_device(pdev);
if (r)
DRM_WARN("pci_enable_device failed (%d)\n", r);
amdgpu_device_resume(dev, true);
dev->switch_power_state = DRM_SWITCH_POWER_ON;
- drm_kms_helper_poll_enable(dev);
} else {
pr_info("switched off\n");
- drm_kms_helper_poll_disable(dev);
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
amdgpu_device_suspend(dev, true);
- amdgpu_device_cache_pci_state(dev->pdev);
+ amdgpu_device_cache_pci_state(pdev);
/* Shut down the device */
- pci_disable_device(dev->pdev);
- pci_set_power_state(dev->pdev, PCI_D3cold);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, PCI_D3cold);
dev->switch_power_state = DRM_SWITCH_POWER_OFF;
}
}
@@ -1702,8 +1715,7 @@ static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
adev->enable_virtual_display = false;
if (amdgpu_virtual_display) {
- struct drm_device *ddev = adev_to_drm(adev);
- const char *pci_address_name = pci_name(ddev->pdev);
+ const char *pci_address_name = pci_name(adev->pdev);
char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
@@ -2666,7 +2678,8 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
{
int i, r;
- if (!amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev)) {
+ if (adev->in_poweroff_reboot_com ||
+ !amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev)) {
amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
}
@@ -3116,7 +3129,10 @@ static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
*/
adev->gfx_timeout = msecs_to_jiffies(10000);
adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
- if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
+ if (amdgpu_sriov_vf(adev))
+ adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
+ msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
+ else if (amdgpu_passthrough(adev))
adev->compute_timeout = msecs_to_jiffies(60000);
else
adev->compute_timeout = MAX_SCHEDULE_TIMEOUT;
@@ -3396,7 +3412,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
}
}
- pci_enable_pcie_error_reporting(adev->ddev.pdev);
+ pci_enable_pcie_error_reporting(adev->pdev);
/* Post card if necessary */
if (amdgpu_device_need_post(adev)) {
@@ -3719,14 +3735,15 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
r = amdgpu_device_ip_suspend_phase1(adev);
- amdgpu_amdkfd_suspend(adev, !fbcon);
+ amdgpu_amdkfd_suspend(adev, adev->in_runpm);
/* evict vram memory */
amdgpu_bo_evict_vram(adev);
amdgpu_fence_driver_suspend(adev);
- if (!amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev))
+ if (adev->in_poweroff_reboot_com ||
+ !amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev))
r = amdgpu_device_ip_suspend_phase2(adev);
else
amdgpu_gfx_state_change_set(adev, sGpuChangeState_D3Entry);
@@ -3803,7 +3820,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
}
}
}
- r = amdgpu_amdkfd_resume(adev, !fbcon);
+ r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
if (r)
return r;
@@ -4154,8 +4171,8 @@ bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
continue;
spin_lock(&ring->sched.job_list_lock);
- job = list_first_entry_or_null(&ring->sched.ring_mirror_list,
- struct drm_sched_job, node);
+ job = list_first_entry_or_null(&ring->sched.pending_list,
+ struct drm_sched_job, list);
spin_unlock(&ring->sched.job_list_lock);
if (job)
return true;
@@ -4205,6 +4222,8 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
case CHIP_NAVI14:
case CHIP_NAVI12:
case CHIP_SIENNA_CICHLID:
+ case CHIP_NAVY_FLOUNDER:
+ case CHIP_DIMGREY_CAVEFISH:
break;
default:
goto disabled;
@@ -4454,6 +4473,46 @@ static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
up_write(&adev->reset_sem);
}
+/*
+ * to lockup a list of amdgpu devices in a hive safely, if not a hive
+ * with multiple nodes, it will be similar as amdgpu_device_lock_adev.
+ *
+ * unlock won't require roll back.
+ */
+static int amdgpu_device_lock_hive_adev(struct amdgpu_device *adev, struct amdgpu_hive_info *hive)
+{
+ struct amdgpu_device *tmp_adev = NULL;
+
+ if (adev->gmc.xgmi.num_physical_nodes > 1) {
+ if (!hive) {
+ dev_err(adev->dev, "Hive is NULL while device has multiple xgmi nodes");
+ return -ENODEV;
+ }
+ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
+ if (!amdgpu_device_lock_adev(tmp_adev, hive))
+ goto roll_back;
+ }
+ } else if (!amdgpu_device_lock_adev(adev, hive))
+ return -EAGAIN;
+
+ return 0;
+roll_back:
+ if (!list_is_first(&tmp_adev->gmc.xgmi.head, &hive->device_list)) {
+ /*
+ * if the lockup iteration break in the middle of a hive,
+ * it may means there may has a race issue,
+ * or a hive device locked up independently.
+ * we may be in trouble and may not, so will try to roll back
+ * the lock and give out a warnning.
+ */
+ dev_warn(tmp_adev->dev, "Hive lock iteration broke in the middle. Rolling back to unlock");
+ list_for_each_entry_continue_reverse(tmp_adev, &hive->device_list, gmc.xgmi.head) {
+ amdgpu_device_unlock_adev(tmp_adev);
+ }
+ }
+ return -EAGAIN;
+}
+
static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
{
struct pci_dev *p = NULL;
@@ -4567,20 +4626,36 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress",
job ? job->base.id : -1, hive->hive_id);
amdgpu_put_xgmi_hive(hive);
+ if (job)
+ drm_sched_increase_karma(&job->base);
return 0;
}
mutex_lock(&hive->hive_lock);
}
/*
+ * lock the device before we try to operate the linked list
+ * if didn't get the device lock, don't touch the linked list since
+ * others may iterating it.
+ */
+ r = amdgpu_device_lock_hive_adev(adev, hive);
+ if (r) {
+ dev_info(adev->dev, "Bailing on TDR for s_job:%llx, as another already in progress",
+ job ? job->base.id : -1);
+
+ /* even we skipped this reset, still need to set the job to guilty */
+ if (job)
+ drm_sched_increase_karma(&job->base);
+ goto skip_recovery;
+ }
+
+ /*
* Build list of devices to reset.
* In case we are in XGMI hive mode, resort the device list
* to put adev in the 1st position.
*/
INIT_LIST_HEAD(&device_list);
if (adev->gmc.xgmi.num_physical_nodes > 1) {
- if (!hive)
- return -ENODEV;
if (!list_is_first(&adev->gmc.xgmi.head, &hive->device_list))
list_rotate_to_front(&adev->gmc.xgmi.head, &hive->device_list);
device_list_handle = &hive->device_list;
@@ -4591,13 +4666,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
/* block all schedulers and reset given job's ring */
list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
- if (!amdgpu_device_lock_adev(tmp_adev, hive)) {
- dev_info(tmp_adev->dev, "Bailing on TDR for s_job:%llx, as another already in progress",
- job ? job->base.id : -1);
- r = 0;
- goto skip_recovery;
- }
-
/*
* Try to put the audio codec into suspend state
* before gpu reset started.
@@ -4735,7 +4803,7 @@ skip_recovery:
amdgpu_put_xgmi_hive(hive);
}
- if (r)
+ if (r && r != -EAGAIN)
dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
return r;
}
@@ -4785,7 +4853,13 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
} else {
- if (speed_cap == PCIE_SPEED_16_0GT)
+ if (speed_cap == PCIE_SPEED_32_0GT)
+ adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
+ CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
+ CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
+ CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 |
+ CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5);
+ else if (speed_cap == PCIE_SPEED_16_0GT)
adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
@@ -4805,7 +4879,13 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
} else {
- if (platform_speed_cap == PCIE_SPEED_16_0GT)
+ if (platform_speed_cap == PCIE_SPEED_32_0GT)
+ adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
+ CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
+ CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
+ CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 |
+ CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5);
+ else if (platform_speed_cap == PCIE_SPEED_16_0GT)
adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
@@ -4949,8 +5029,8 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta
case pci_channel_io_normal:
return PCI_ERS_RESULT_CAN_RECOVER;
/* Fatal error, prepare for slot reset */
- case pci_channel_io_frozen:
- /*
+ case pci_channel_io_frozen:
+ /*
* Cancel and wait for all TDRs in progress if failing to
* set adev->in_gpu_reset in amdgpu_device_lock_adev
*
@@ -5041,7 +5121,7 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
goto out;
}
- adev->in_pci_err_recovery = true;
+ adev->in_pci_err_recovery = true;
r = amdgpu_device_pre_asic_reset(adev, NULL, &need_full_reset);
adev->in_pci_err_recovery = false;
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index f764803c53a4..48cb33e5b382 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -926,8 +926,10 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
- struct drm_gem_object *obj;
struct amdgpu_framebuffer *amdgpu_fb;
+ struct drm_gem_object *obj;
+ struct amdgpu_bo *bo;
+ uint32_t domains;
int ret;
obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
@@ -938,7 +940,9 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev,
}
/* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */
- if (obj->import_attach) {
+ bo = gem_to_amdgpu_bo(obj);
+ domains = amdgpu_display_supported_domains(drm_to_adev(dev), bo->flags);
+ if (obj->import_attach && !(domains & AMDGPU_GEM_DOMAIN_GTT)) {
drm_dbg_kms(dev, "Cannot create framebuffer from imported dma_buf\n");
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index e42175e1acf1..47e0b48dc26f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -40,6 +40,7 @@
#include <linux/dma-buf.h>
#include <linux/dma-fence-array.h>
#include <linux/pci-p2pdma.h>
+#include <linux/pm_runtime.h>
/**
* amdgpu_gem_prime_mmap - &drm_driver.gem_prime_mmap implementation
@@ -151,9 +152,13 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
if (attach->dev->driver == adev->dev->driver)
return 0;
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
+ if (r < 0)
+ goto out;
+
r = amdgpu_bo_reserve(bo, false);
if (unlikely(r != 0))
- return r;
+ goto out;
/*
* We only create shared fences for internal use, but importers
@@ -165,11 +170,15 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
*/
r = __dma_resv_make_exclusive(bo->tbo.base.resv);
if (r)
- return r;
+ goto out;
bo->prime_shared_count++;
amdgpu_bo_unreserve(bo);
return 0;
+
+out:
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ return r;
}
/**
@@ -189,6 +198,9 @@ static void amdgpu_dma_buf_detach(struct dma_buf *dmabuf,
if (attach->dev->driver != adev->dev->driver && bo->prime_shared_count)
bo->prime_shared_count--;
+
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
}
/**
@@ -269,7 +281,7 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
case TTM_PL_TT:
sgt = drm_prime_pages_to_sg(obj->dev,
bo->tbo.ttm->pages,
- bo->tbo.num_pages);
+ bo->tbo.ttm->num_pages);
if (IS_ERR(sgt))
return sgt;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 7169fb5e3d9c..4575192d9b08 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -132,8 +132,12 @@ uint amdgpu_pg_mask = 0xffffffff;
uint amdgpu_sdma_phase_quantum = 32;
char *amdgpu_disable_cu = NULL;
char *amdgpu_virtual_display = NULL;
-/* OverDrive(bit 14) disabled by default*/
-uint amdgpu_pp_feature_mask = 0xffffbfff;
+
+/*
+ * OverDrive(bit 14) disabled by default
+ * GFX DCS(bit 19) disabled by default
+ */
+uint amdgpu_pp_feature_mask = 0xfff7bfff;
uint amdgpu_force_long_training;
int amdgpu_job_hang_limit;
int amdgpu_lbpw = -1;
@@ -789,9 +793,9 @@ module_param_named(tmz, amdgpu_tmz, int, 0444);
/**
* DOC: reset_method (int)
- * GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco)
+ * GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco, 5 = pci)
*/
-MODULE_PARM_DESC(reset_method, "GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco/bamaco)");
+MODULE_PARM_DESC(reset_method, "GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco/bamaco, 5 = pci)");
module_param_named(reset_method, amdgpu_reset_method, int, 0444);
/**
@@ -1094,6 +1098,7 @@ static const struct pci_device_id pciidlist[] = {
/* Sienna_Cichlid */
{0x1002, 0x73A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
+ {0x1002, 0x73A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
{0x1002, 0x73A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
{0x1002, 0x73A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
{0x1002, 0x73AB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
@@ -1206,7 +1211,6 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
if (ret)
return ret;
- ddev->pdev = pdev;
pci_set_drvdata(pdev, ddev);
ret = amdgpu_driver_load_kms(adev, ent->driver_data);
@@ -1266,7 +1270,9 @@ amdgpu_pci_shutdown(struct pci_dev *pdev)
*/
if (!amdgpu_passthrough(adev))
adev->mp1_state = PP_MP1_STATE_UNLOAD;
+ adev->in_poweroff_reboot_com = true;
amdgpu_device_ip_suspend(adev);
+ adev->in_poweroff_reboot_com = false;
adev->mp1_state = PP_MP1_STATE_NONE;
}
@@ -1308,8 +1314,13 @@ static int amdgpu_pmops_thaw(struct device *dev)
static int amdgpu_pmops_poweroff(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
+ int r;
- return amdgpu_device_suspend(drm_dev, true);
+ adev->in_poweroff_reboot_com = true;
+ r = amdgpu_device_suspend(drm_dev, true);
+ adev->in_poweroff_reboot_com = false;
+ return r;
}
static int amdgpu_pmops_restore(struct device *dev)
@@ -1344,11 +1355,12 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
adev->in_runpm = true;
if (amdgpu_device_supports_atpx(drm_dev))
drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
- drm_kms_helper_poll_disable(drm_dev);
ret = amdgpu_device_suspend(drm_dev, false);
- if (ret)
+ if (ret) {
+ adev->in_runpm = false;
return ret;
+ }
if (amdgpu_device_supports_atpx(drm_dev)) {
/* Only need to handle PCI state in the driver for ATPX
@@ -1401,7 +1413,6 @@ static int amdgpu_pmops_runtime_resume(struct device *dev)
amdgpu_device_baco_exit(drm_dev);
}
ret = amdgpu_device_resume(drm_dev, false);
- drm_kms_helper_poll_enable(drm_dev);
if (amdgpu_device_supports_atpx(drm_dev))
drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
adev->in_runpm = false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index 0bf7d36c6686..51cd49c6f38f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -271,7 +271,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
DRM_INFO("fb depth is %d\n", fb->format->depth);
DRM_INFO(" pitch is %d\n", fb->pitches[0]);
- vga_switcheroo_client_fb_set(adev_to_drm(adev)->pdev, info);
+ vga_switcheroo_client_fb_set(adev->pdev, info);
return 0;
out:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c
index 7c6e02e35573..8d1ad294cb02 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c
@@ -47,10 +47,9 @@ typedef struct FW_ATT_RECORD
uint16_t AttFwIdV2; /* V2 FW ID field */
uint32_t AttFWVersion; /* FW Version */
uint16_t AttFWActiveFunctionID; /* The VF ID (only in VF Attestation Table) */
- uint16_t AttSource; /* FW source indicator */
- uint16_t RecordValid; /* Indicates whether the record is a valid entry */
- uint8_t AttFwTaId; /* Ta ID (only in TA Attestation Table) */
- uint8_t Reserved;
+ uint8_t AttSource; /* FW source indicator */
+ uint8_t RecordValid; /* Indicates whether the record is a valid entry */
+ uint32_t AttFwTaId; /* Ta ID (only in TA Attestation Table) */
} FW_ATT_RECORD;
static ssize_t amdgpu_fw_attestation_debugfs_read(struct file *f,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index d0a1fee1f5f6..b443907afcea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -269,8 +269,8 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
resv = vm->root.base.bo->tbo.base.resv;
}
-retry:
initial_domain = (u32)(0xffffffff & args->in.domains);
+retry:
r = amdgpu_gem_object_create(adev, size, args->in.alignment,
initial_domain,
flags, ttm_bo_type_device, resv, &gobj);
@@ -619,7 +619,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
int r = 0;
if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
- dev_dbg(&dev->pdev->dev,
+ dev_dbg(dev->dev,
"va_address 0x%LX is in reserved area 0x%LX\n",
args->va_address, AMDGPU_VA_RESERVED_SIZE);
return -EINVAL;
@@ -627,7 +627,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
if (args->va_address >= AMDGPU_GMC_HOLE_START &&
args->va_address < AMDGPU_GMC_HOLE_END) {
- dev_dbg(&dev->pdev->dev,
+ dev_dbg(dev->dev,
"va_address 0x%LX is in VA hole 0x%LX-0x%LX\n",
args->va_address, AMDGPU_GMC_HOLE_START,
AMDGPU_GMC_HOLE_END);
@@ -639,14 +639,14 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
vm_size -= AMDGPU_VA_RESERVED_SIZE;
if (args->va_address + args->map_size > vm_size) {
- dev_dbg(&dev->pdev->dev,
+ dev_dbg(dev->dev,
"va_address 0x%llx is in top reserved area 0x%llx\n",
args->va_address + args->map_size, vm_size);
return -EINVAL;
}
if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) {
- dev_dbg(&dev->pdev->dev, "invalid flags combination 0x%08X\n",
+ dev_dbg(dev->dev, "invalid flags combination 0x%08X\n",
args->flags);
return -EINVAL;
}
@@ -658,7 +658,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
case AMDGPU_VA_OP_REPLACE:
break;
default:
- dev_dbg(&dev->pdev->dev, "unsupported operation %d\n",
+ dev_dbg(dev->dev, "unsupported operation %d\n",
args->operation);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index cd2c676a2797..8e0a6c62322e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -193,15 +193,16 @@ static bool amdgpu_gfx_is_multipipe_capable(struct amdgpu_device *adev)
}
bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev,
- int pipe, int queue)
+ struct amdgpu_ring *ring)
{
- bool multipipe_policy = amdgpu_gfx_is_multipipe_capable(adev);
- int cond;
- /* Policy: alternate between normal and high priority */
- cond = multipipe_policy ? pipe : queue;
-
- return ((cond % 2) != 0);
+ /* Policy: use 1st queue as high priority compute queue if we
+ * have more than one compute queue.
+ */
+ if (adev->gfx.num_compute_rings > 1 &&
+ ring == &adev->gfx.compute_ring[0])
+ return true;
+ return false;
}
void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index 6b5a8f4642cc..72dbcd2bc6a6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -380,7 +380,7 @@ void amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit,
bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev, int mec,
int pipe, int queue);
bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev,
- int pipe, int queue);
+ struct amdgpu_ring *ring);
int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev, int me,
int pipe, int queue);
void amdgpu_gfx_bit_to_me_queue(struct amdgpu_device *adev, int bit,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 6e679db5e46f..fe1a39ffda72 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -120,7 +120,7 @@ uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
- if (bo->num_pages != 1 || bo->ttm->caching == ttm_cached)
+ if (bo->ttm->num_pages != 1 || bo->ttm->caching == ttm_cached)
return AMDGPU_BO_INVALID_OFFSET;
if (bo->ttm->dma_address[0] + PAGE_SIZE >= adev->gmc.agp_size)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h
new file mode 100644
index 000000000000..43caf9f8cc11
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __AMDGPU_HDP_H__
+#define __AMDGPU_HDP_H__
+
+struct amdgpu_hdp_funcs {
+ void (*flush_hdp)(struct amdgpu_device *adev, struct amdgpu_ring *ring);
+ void (*invalidate_hdp)(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring);
+ void (*reset_ras_error_count)(struct amdgpu_device *adev);
+ void (*update_clock_gating)(struct amdgpu_device *adev, bool enable);
+ void (*get_clock_gating_state)(struct amdgpu_device *adev, u32 *flags);
+ void (*init_registers)(struct amdgpu_device *adev);
+};
+
+struct amdgpu_hdp {
+ const struct amdgpu_hdp_funcs *funcs;
+};
+
+#endif /* __AMDGPU_HDP_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
index 47cad23a6b9e..bca4dddd5a15 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
@@ -176,7 +176,7 @@ struct amdgpu_i2c_chan *amdgpu_i2c_create(struct drm_device *dev,
i2c->rec = *rec;
i2c->adapter.owner = THIS_MODULE;
i2c->adapter.class = I2C_CLASS_DDC;
- i2c->adapter.dev.parent = &dev->pdev->dev;
+ i2c->adapter.dev.parent = dev->dev;
i2c->dev = dev;
i2c_set_adapdata(&i2c->adapter, i2c);
mutex_init(&i2c->mutex);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 024d0a563a65..7645223ea0ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -195,6 +195,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
if ((ib->flags & AMDGPU_IB_FLAG_EMIT_MEM_SYNC) && ring->funcs->emit_mem_sync)
ring->funcs->emit_mem_sync(ring);
+ if (ring->funcs->emit_wave_limit &&
+ ring->hw_prio == AMDGPU_GFX_PIPE_PRIO_HIGH)
+ ring->funcs->emit_wave_limit(ring, true);
+
if (ring->funcs->insert_start)
ring->funcs->insert_start(ring);
@@ -295,6 +299,11 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
ring->current_ctx = fence_ctx;
if (vm && ring->funcs->emit_switch_buffer)
amdgpu_ring_emit_switch_buffer(ring);
+
+ if (ring->funcs->emit_wave_limit &&
+ ring->hw_prio == AMDGPU_GFX_PIPE_PRIO_HIGH)
+ ring->funcs->emit_wave_limit(ring, false);
+
amdgpu_ring_commit(ring);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
index dcd9b4a8e20b..dc852af4f3b7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
@@ -205,3 +205,48 @@ restart_ih:
return IRQ_HANDLED;
}
+/**
+ * amdgpu_ih_decode_iv_helper - decode an interrupt vector
+ *
+ * @adev: amdgpu_device pointer
+ * @ih: ih ring to process
+ * @entry: IV entry
+ *
+ * Decodes the interrupt vector at the current rptr
+ * position and also advance the position for for Vega10
+ * and later GPUs.
+ */
+void amdgpu_ih_decode_iv_helper(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih,
+ struct amdgpu_iv_entry *entry)
+{
+ /* wptr/rptr are in bytes! */
+ u32 ring_index = ih->rptr >> 2;
+ uint32_t dw[8];
+
+ dw[0] = le32_to_cpu(ih->ring[ring_index + 0]);
+ dw[1] = le32_to_cpu(ih->ring[ring_index + 1]);
+ dw[2] = le32_to_cpu(ih->ring[ring_index + 2]);
+ dw[3] = le32_to_cpu(ih->ring[ring_index + 3]);
+ dw[4] = le32_to_cpu(ih->ring[ring_index + 4]);
+ dw[5] = le32_to_cpu(ih->ring[ring_index + 5]);
+ dw[6] = le32_to_cpu(ih->ring[ring_index + 6]);
+ dw[7] = le32_to_cpu(ih->ring[ring_index + 7]);
+
+ entry->client_id = dw[0] & 0xff;
+ entry->src_id = (dw[0] >> 8) & 0xff;
+ entry->ring_id = (dw[0] >> 16) & 0xff;
+ entry->vmid = (dw[0] >> 24) & 0xf;
+ entry->vmid_src = (dw[0] >> 31);
+ entry->timestamp = dw[1] | ((u64)(dw[2] & 0xffff) << 32);
+ entry->timestamp_src = dw[2] >> 31;
+ entry->pasid = dw[3] & 0xffff;
+ entry->pasid_src = dw[3] >> 31;
+ entry->src_data[0] = dw[4];
+ entry->src_data[1] = dw[5];
+ entry->src_data[2] = dw[6];
+ entry->src_data[3] = dw[7];
+
+ /* wptr/rptr are in bytes! */
+ ih->rptr += 32;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
index 3c9cfe7eecff..6ed4a85fc7c3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
@@ -30,6 +30,18 @@
struct amdgpu_device;
struct amdgpu_iv_entry;
+struct amdgpu_ih_regs {
+ uint32_t ih_rb_base;
+ uint32_t ih_rb_base_hi;
+ uint32_t ih_rb_cntl;
+ uint32_t ih_rb_wptr;
+ uint32_t ih_rb_rptr;
+ uint32_t ih_doorbell_rptr;
+ uint32_t ih_rb_wptr_addr_lo;
+ uint32_t ih_rb_wptr_addr_hi;
+ uint32_t psp_reg_id;
+};
+
/*
* R6xx+ IH ring
*/
@@ -53,6 +65,7 @@ struct amdgpu_ih_ring {
bool enabled;
unsigned rptr;
atomic_t lock;
+ struct amdgpu_ih_regs ih_regs;
};
/* provided by the ih block */
@@ -75,5 +88,7 @@ void amdgpu_ih_ring_fini(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih);
void amdgpu_ih_ring_write(struct amdgpu_ih_ring *ih, const uint32_t *iv,
unsigned int num_dw);
int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih);
-
+void amdgpu_ih_decode_iv_helper(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih,
+ struct amdgpu_iv_entry *entry);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index bea57e8e793f..afbbec82a289 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -444,7 +444,8 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev,
} else if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) {
DRM_DEBUG("Invalid src_id in IV: %d\n", src_id);
- } else if (adev->irq.virq[src_id]) {
+ } else if ((client_id == AMDGPU_IRQ_CLIENTID_LEGACY) &&
+ adev->irq.virq[src_id]) {
generic_handle_irq(irq_find_mapping(adev->irq.domain, src_id));
} else if (!adev->irq.client[client_id].sources) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index dcfe8a3b03ff..ff48101bab55 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -271,7 +271,7 @@ void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
}
/* Signal all jobs already scheduled to HW */
- list_for_each_entry(s_job, &sched->ring_mirror_list, node) {
+ list_for_each_entry(s_job, &sched->pending_list, list) {
struct drm_sched_fence *s_fence = s_job->s_fence;
dma_fence_set_error(&s_fence->finished, -EHWPOISON);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index b16b32797624..64beb3399604 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -142,7 +142,7 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
(amdgpu_is_atpx_hybrid() ||
amdgpu_has_atpx_dgpu_power_cntl()) &&
((flags & AMD_IS_APU) == 0) &&
- !pci_is_thunderbolt_attached(dev->pdev))
+ !pci_is_thunderbolt_attached(to_pci_dev(dev->dev)))
flags |= AMD_IS_PX;
parent = pci_upstream_bridge(adev->pdev);
@@ -156,7 +156,7 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
*/
r = amdgpu_device_init(adev, flags);
if (r) {
- dev_err(&dev->pdev->dev, "Fatal error during GPU init\n");
+ dev_err(dev->dev, "Fatal error during GPU init\n");
goto out;
}
@@ -173,8 +173,6 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
switch (adev->asic_type) {
case CHIP_VEGA20:
case CHIP_ARCTURUS:
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
/* enable runpm if runpm=1 */
if (amdgpu_runtime_pm > 0)
adev->runpm = true;
@@ -199,7 +197,7 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
acpi_status = amdgpu_acpi_init(adev);
if (acpi_status)
- dev_dbg(&dev->pdev->dev, "Error during ACPI methods call\n");
+ dev_dbg(dev->dev, "Error during ACPI methods call\n");
if (adev->runpm) {
/* only need to skip on ATPX */
@@ -735,10 +733,10 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
if (!dev_info)
return -ENOMEM;
- dev_info->device_id = dev->pdev->device;
+ dev_info->device_id = adev->pdev->device;
dev_info->chip_rev = adev->rev_id;
dev_info->external_rev = adev->external_rev_id;
- dev_info->pci_rev = dev->pdev->revision;
+ dev_info->pci_rev = adev->pdev->revision;
dev_info->family = adev->family;
dev_info->num_shader_engines = adev->gfx.config.max_shader_engines;
dev_info->num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
index e62cc0e1a5ad..7c11bce4514b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
@@ -57,7 +57,6 @@ struct amdgpu_nbio_funcs {
u32 (*get_pcie_port_data_offset)(struct amdgpu_device *adev);
u32 (*get_rev_id)(struct amdgpu_device *adev);
void (*mc_access_enable)(struct amdgpu_device *adev, bool enable);
- void (*hdp_flush)(struct amdgpu_device *adev, struct amdgpu_ring *ring);
u32 (*get_memsize)(struct amdgpu_device *adev);
void (*sdma_doorbell_range)(struct amdgpu_device *adev, int instance,
bool use_doorbell, int doorbell_index, int doorbell_size);
@@ -89,6 +88,7 @@ struct amdgpu_nbio_funcs {
int (*ras_late_init)(struct amdgpu_device *adev);
void (*enable_aspm)(struct amdgpu_device *adev,
bool enable);
+ void (*program_aspm)(struct amdgpu_device *adev);
};
struct amdgpu_nbio {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 25ec4d57333f..4b29b8205442 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -787,7 +787,7 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
if (r < 0)
return r;
- r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
+ r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.mem.num_pages, &bo->kmap);
if (r)
return r;
@@ -897,7 +897,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
return -EINVAL;
/* A shared bo cannot be migrated to VRAM */
- if (bo->prime_shared_count) {
+ if (bo->prime_shared_count || bo->tbo.base.import_attach) {
if (domain & AMDGPU_GEM_DOMAIN_GTT)
domain = AMDGPU_GEM_DOMAIN_GTT;
else
@@ -911,10 +911,16 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
if (bo->tbo.pin_count) {
uint32_t mem_type = bo->tbo.mem.mem_type;
+ uint32_t mem_flags = bo->tbo.mem.placement;
if (!(domain & amdgpu_mem_type_to_domain(mem_type)))
return -EINVAL;
+ if ((mem_type == TTM_PL_VRAM) &&
+ (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) &&
+ !(mem_flags & TTM_PL_FLAG_CONTIGUOUS))
+ return -EINVAL;
+
ttm_bo_pin(&bo->tbo);
if (max_offset != 0) {
@@ -930,7 +936,6 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
if (bo->tbo.base.import_attach)
dma_buf_pin(bo->tbo.base.import_attach);
- bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
/* force to pin into visible video ram */
if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS))
bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
@@ -983,6 +988,7 @@ error:
*/
int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain)
{
+ bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
return amdgpu_bo_pin_restricted(bo, domain, 0, 0);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 79120ec41396..9ac37569823f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -174,12 +174,12 @@ static inline void amdgpu_bo_unreserve(struct amdgpu_bo *bo)
static inline unsigned long amdgpu_bo_size(struct amdgpu_bo *bo)
{
- return bo->tbo.num_pages << PAGE_SHIFT;
+ return bo->tbo.base.size;
}
static inline unsigned amdgpu_bo_ngpu_pages(struct amdgpu_bo *bo)
{
- return (bo->tbo.num_pages << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE;
+ return bo->tbo.base.size / AMDGPU_GPU_PAGE_SIZE;
}
static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo *bo)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 347fec669424..839917eb7bc3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -36,6 +36,7 @@
#include "psp_v12_0.h"
#include "amdgpu_ras.h"
+#include "amdgpu_securedisplay.h"
static int psp_sysfs_init(struct amdgpu_device *adev);
static void psp_sysfs_fini(struct amdgpu_device *adev);
@@ -249,7 +250,7 @@ psp_cmd_submit_buf(struct psp_context *psp,
{
int ret;
int index;
- int timeout = 2000;
+ int timeout = 20000;
bool ras_intr = false;
bool skip_unsupport = false;
@@ -282,7 +283,7 @@ psp_cmd_submit_buf(struct psp_context *psp,
ras_intr = amdgpu_ras_intr_triggered();
if (ras_intr)
break;
- msleep(1);
+ usleep_range(10, 100);
amdgpu_asic_invalidate_hdp(psp->adev, NULL);
}
@@ -1652,6 +1653,175 @@ int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
}
// RAP end
+/* securedisplay start */
+static int psp_securedisplay_init_shared_buf(struct psp_context *psp)
+{
+ int ret;
+
+ /*
+ * Allocate 16k memory aligned to 4k from Frame Buffer (local
+ * physical) for sa ta <-> Driver
+ */
+ ret = amdgpu_bo_create_kernel(psp->adev, PSP_SECUREDISPLAY_SHARED_MEM_SIZE,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
+ &psp->securedisplay_context.securedisplay_shared_bo,
+ &psp->securedisplay_context.securedisplay_shared_mc_addr,
+ &psp->securedisplay_context.securedisplay_shared_buf);
+
+ return ret;
+}
+
+static int psp_securedisplay_load(struct psp_context *psp)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd;
+
+ cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ memset(psp->fw_pri_buf, 0, PSP_1_MEG);
+ memcpy(psp->fw_pri_buf, psp->ta_securedisplay_start_addr, psp->ta_securedisplay_ucode_size);
+
+ psp_prep_ta_load_cmd_buf(cmd,
+ psp->fw_pri_mc_addr,
+ psp->ta_securedisplay_ucode_size,
+ psp->securedisplay_context.securedisplay_shared_mc_addr,
+ PSP_SECUREDISPLAY_SHARED_MEM_SIZE);
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
+
+ if (ret)
+ goto failed;
+
+ psp->securedisplay_context.securedisplay_initialized = true;
+ psp->securedisplay_context.session_id = cmd->resp.session_id;
+ mutex_init(&psp->securedisplay_context.mutex);
+
+failed:
+ kfree(cmd);
+ return ret;
+}
+
+static int psp_securedisplay_unload(struct psp_context *psp)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd;
+
+ cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ psp_prep_ta_unload_cmd_buf(cmd, psp->securedisplay_context.session_id);
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
+
+ kfree(cmd);
+
+ return ret;
+}
+
+static int psp_securedisplay_initialize(struct psp_context *psp)
+{
+ int ret;
+ struct securedisplay_cmd *securedisplay_cmd;
+
+ /*
+ * TODO: bypass the initialize in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ if (!psp->adev->psp.ta_securedisplay_ucode_size ||
+ !psp->adev->psp.ta_securedisplay_start_addr) {
+ dev_info(psp->adev->dev, "SECUREDISPLAY: securedisplay ta ucode is not available\n");
+ return 0;
+ }
+
+ if (!psp->securedisplay_context.securedisplay_initialized) {
+ ret = psp_securedisplay_init_shared_buf(psp);
+ if (ret)
+ return ret;
+ }
+
+ ret = psp_securedisplay_load(psp);
+ if (ret)
+ return ret;
+
+ psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
+ TA_SECUREDISPLAY_COMMAND__QUERY_TA);
+
+ ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__QUERY_TA);
+ if (ret) {
+ psp_securedisplay_unload(psp);
+
+ amdgpu_bo_free_kernel(&psp->securedisplay_context.securedisplay_shared_bo,
+ &psp->securedisplay_context.securedisplay_shared_mc_addr,
+ &psp->securedisplay_context.securedisplay_shared_buf);
+
+ psp->securedisplay_context.securedisplay_initialized = false;
+
+ dev_err(psp->adev->dev, "SECUREDISPLAY TA initialize fail.\n");
+ return -EINVAL;
+ }
+
+ if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) {
+ psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status);
+ dev_err(psp->adev->dev, "SECUREDISPLAY: query securedisplay TA failed. ret 0x%x\n",
+ securedisplay_cmd->securedisplay_out_message.query_ta.query_cmd_ret);
+ }
+
+ return 0;
+}
+
+static int psp_securedisplay_terminate(struct psp_context *psp)
+{
+ int ret;
+
+ /*
+ * TODO:bypass the terminate in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ if (!psp->securedisplay_context.securedisplay_initialized)
+ return 0;
+
+ ret = psp_securedisplay_unload(psp);
+ if (ret)
+ return ret;
+
+ psp->securedisplay_context.securedisplay_initialized = false;
+
+ /* free securedisplay shared memory */
+ amdgpu_bo_free_kernel(&psp->securedisplay_context.securedisplay_shared_bo,
+ &psp->securedisplay_context.securedisplay_shared_mc_addr,
+ &psp->securedisplay_context.securedisplay_shared_buf);
+
+ return ret;
+}
+
+int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
+{
+ int ret;
+
+ if (!psp->securedisplay_context.securedisplay_initialized)
+ return -EINVAL;
+
+ if (ta_cmd_id != TA_SECUREDISPLAY_COMMAND__QUERY_TA &&
+ ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC)
+ return -EINVAL;
+
+ mutex_lock(&psp->securedisplay_context.mutex);
+
+ ret = psp_ta_invoke(psp, ta_cmd_id, psp->securedisplay_context.session_id);
+
+ mutex_unlock(&psp->securedisplay_context.mutex);
+
+ return ret;
+}
+/* SECUREDISPLAY end */
+
static int psp_hw_start(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
@@ -2126,6 +2296,11 @@ skip_memalloc:
if (ret)
dev_err(psp->adev->dev,
"RAP: Failed to initialize RAP\n");
+
+ ret = psp_securedisplay_initialize(psp);
+ if (ret)
+ dev_err(psp->adev->dev,
+ "SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n");
}
return 0;
@@ -2176,6 +2351,7 @@ static int psp_hw_fini(void *handle)
if (psp->adev->psp.ta_fw) {
psp_ras_terminate(psp);
+ psp_securedisplay_terminate(psp);
psp_rap_terminate(psp);
psp_dtm_terminate(psp);
psp_hdcp_terminate(psp);
@@ -2240,6 +2416,11 @@ static int psp_suspend(void *handle)
DRM_ERROR("Failed to terminate rap ta\n");
return ret;
}
+ ret = psp_securedisplay_terminate(psp);
+ if (ret) {
+ DRM_ERROR("Failed to terminate securedisplay ta\n");
+ return ret;
+ }
}
ret = psp_asd_unload(psp);
@@ -2323,6 +2504,11 @@ static int psp_resume(void *handle)
if (ret)
dev_err(psp->adev->dev,
"RAP: Failed to initialize RAP\n");
+
+ ret = psp_securedisplay_initialize(psp);
+ if (ret)
+ dev_err(psp->adev->dev,
+ "SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n");
}
mutex_unlock(&adev->firmware.mutex);
@@ -2629,6 +2815,11 @@ static int parse_ta_bin_descriptor(struct psp_context *psp,
psp->ta_rap_ucode_size = le32_to_cpu(desc->size_bytes);
psp->ta_rap_start_addr = ucode_start_addr;
break;
+ case TA_FW_TYPE_PSP_SECUREDISPLAY:
+ psp->ta_securedisplay_ucode_version = le32_to_cpu(desc->fw_version);
+ psp->ta_securedisplay_ucode_size = le32_to_cpu(desc->size_bytes);
+ psp->ta_securedisplay_start_addr = ucode_start_addr;
+ break;
default:
dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type);
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index da250bc1ac57..cb50ba445f8c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -30,6 +30,7 @@
#include "ta_xgmi_if.h"
#include "ta_ras_if.h"
#include "ta_rap_if.h"
+#include "ta_secureDisplay_if.h"
#define PSP_FENCE_BUFFER_SIZE 0x1000
#define PSP_CMD_BUFFER_SIZE 0x1000
@@ -40,6 +41,7 @@
#define PSP_HDCP_SHARED_MEM_SIZE 0x4000
#define PSP_DTM_SHARED_MEM_SIZE 0x4000
#define PSP_RAP_SHARED_MEM_SIZE 0x4000
+#define PSP_SECUREDISPLAY_SHARED_MEM_SIZE 0x4000
#define PSP_SHARED_MEM_SIZE 0x4000
#define PSP_FW_NAME_LEN 0x24
@@ -171,6 +173,15 @@ struct psp_rap_context {
struct mutex mutex;
};
+struct psp_securedisplay_context {
+ bool securedisplay_initialized;
+ uint32_t session_id;
+ struct amdgpu_bo *securedisplay_shared_bo;
+ uint64_t securedisplay_shared_mc_addr;
+ void *securedisplay_shared_buf;
+ struct mutex mutex;
+};
+
#define MEM_TRAIN_SYSTEM_SIGNATURE 0x54534942
#define GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES 0x1000
#define GDDR6_MEM_TRAINING_OFFSET 0x8000
@@ -298,12 +309,17 @@ struct psp_context
uint32_t ta_rap_ucode_size;
uint8_t *ta_rap_start_addr;
+ uint32_t ta_securedisplay_ucode_version;
+ uint32_t ta_securedisplay_ucode_size;
+ uint8_t *ta_securedisplay_start_addr;
+
struct psp_asd_context asd_context;
struct psp_xgmi_context xgmi_context;
struct psp_ras_context ras;
struct psp_hdcp_context hdcp_context;
struct psp_dtm_context dtm_context;
struct psp_rap_context rap_context;
+ struct psp_securedisplay_context securedisplay_context;
struct mutex mutex;
struct psp_memory_training_context mem_train_ctx;
};
@@ -380,6 +396,7 @@ int psp_ras_trigger_error(struct psp_context *psp,
int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
+int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
int psp_rlc_autoload_start(struct psp_context *psp);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 82e952696d24..1fb2a91ad30a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -846,7 +846,7 @@ static int amdgpu_ras_error_inject_xgmi(struct amdgpu_device *adev,
if (amdgpu_dpm_allow_xgmi_power_down(adev, true))
dev_warn(adev->dev, "Failed to allow XGMI power down");
- if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
+ if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW))
dev_warn(adev->dev, "Failed to allow df cstate");
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 1a612f51ecd9..b644c78475fd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -166,7 +166,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
unsigned int max_dw, struct amdgpu_irq_src *irq_src,
unsigned int irq_type, unsigned int hw_prio)
{
- int r, i;
+ int r;
int sched_hw_submission = amdgpu_sched_hw_submission;
u32 *num_sched;
u32 hw_ip;
@@ -258,8 +258,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
}
ring->max_dw = max_dw;
- ring->priority = DRM_SCHED_PRIORITY_NORMAL;
- mutex_init(&ring->priority_mutex);
+ ring->hw_prio = hw_prio;
if (!ring->no_scheduler) {
hw_ip = ring->funcs->type;
@@ -268,9 +267,6 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
&ring->sched;
}
- for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_COUNT; ++i)
- atomic_set(&ring->num_jobs[i], 0);
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 7112137689db..56acec1075ac 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -197,6 +197,7 @@ struct amdgpu_ring_funcs {
void (*soft_recovery)(struct amdgpu_ring *ring, unsigned vmid);
int (*preempt_ib)(struct amdgpu_ring *ring);
void (*emit_mem_sync)(struct amdgpu_ring *ring);
+ void (*emit_wave_limit)(struct amdgpu_ring *ring, bool enable);
};
struct amdgpu_ring {
@@ -242,11 +243,7 @@ struct amdgpu_ring {
struct dma_fence *vmid_wait;
bool has_compute_vm_bug;
bool no_scheduler;
-
- atomic_t num_jobs[DRM_SCHED_PRIORITY_COUNT];
- struct mutex priority_mutex;
- /* protected by priority_mutex */
- int priority;
+ int hw_prio;
#if defined(CONFIG_DEBUG_FS)
struct dentry *ent;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c
new file mode 100644
index 000000000000..834440ab9ff7
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c
@@ -0,0 +1,176 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+#include <linux/debugfs.h>
+#include <linux/pm_runtime.h>
+
+#include "amdgpu.h"
+#include "amdgpu_securedisplay.h"
+
+/**
+ * DOC: AMDGPU SECUREDISPLAY debugfs test interface
+ *
+ * how to use?
+ * echo opcode <value> > <debugfs_dir>/dri/xxx/securedisplay_test
+ * eg. echo 1 > <debugfs_dir>/dri/xxx/securedisplay_test
+ * eg. echo 2 phy_id > <debugfs_dir>/dri/xxx/securedisplay_test
+ *
+ * opcode:
+ * 1:Query whether TA is responding used only for validation pupose
+ * 2: Send region of Interest and CRC value to I2C. (uint32)phy_id is
+ * send to determine which DIO scratch register should be used to get
+ * ROI and receive i2c_buf as the output.
+ *
+ * You can refer more detail from header file ta_securedisplay_if.h
+ *
+ */
+
+void psp_securedisplay_parse_resp_status(struct psp_context *psp,
+ enum ta_securedisplay_status status)
+{
+ switch (status) {
+ case TA_SECUREDISPLAY_STATUS__SUCCESS:
+ break;
+ case TA_SECUREDISPLAY_STATUS__GENERIC_FAILURE:
+ dev_err(psp->adev->dev, "Secure display: Generic Failure.");
+ break;
+ case TA_SECUREDISPLAY_STATUS__INVALID_PARAMETER:
+ dev_err(psp->adev->dev, "Secure display: Invalid Parameter.");
+ break;
+ case TA_SECUREDISPLAY_STATUS__NULL_POINTER:
+ dev_err(psp->adev->dev, "Secure display: Null Pointer.");
+ break;
+ case TA_SECUREDISPLAY_STATUS__I2C_WRITE_ERROR:
+ dev_err(psp->adev->dev, "Secure display: Failed to write to I2C.");
+ break;
+ case TA_SECUREDISPLAY_STATUS__READ_DIO_SCRATCH_ERROR:
+ dev_err(psp->adev->dev, "Secure display: Failed to Read DIO Scratch Register.");
+ break;
+ case TA_SECUREDISPLAY_STATUS__READ_CRC_ERROR:
+ dev_err(psp->adev->dev, "Secure display: Failed to Read CRC");
+ break;
+ default:
+ dev_err(psp->adev->dev, "Secure display: Failed to parse status: %d\n", status);
+ }
+}
+
+void psp_prep_securedisplay_cmd_buf(struct psp_context *psp, struct securedisplay_cmd **cmd,
+ enum ta_securedisplay_command command_id)
+{
+ *cmd = (struct securedisplay_cmd *)psp->securedisplay_context.securedisplay_shared_buf;
+ memset(*cmd, 0, sizeof(struct securedisplay_cmd));
+ (*cmd)->status = TA_SECUREDISPLAY_STATUS__GENERIC_FAILURE;
+ (*cmd)->cmd_id = command_id;
+}
+
+static ssize_t amdgpu_securedisplay_debugfs_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
+ struct psp_context *psp = &adev->psp;
+ struct securedisplay_cmd *securedisplay_cmd;
+ struct drm_device *dev = adev_to_drm(adev);
+ uint32_t phy_id;
+ uint32_t op;
+ int i;
+ char str[64];
+ char i2c_output[256];
+ int ret;
+
+ if (*pos || size > sizeof(str) - 1)
+ return -EINVAL;
+
+ memset(str, 0, sizeof(str));
+ ret = copy_from_user(str, buf, size);
+ if (ret)
+ return -EFAULT;
+
+ ret = pm_runtime_get_sync(dev->dev);
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(dev->dev);
+ return ret;
+ }
+
+ if (size < 3)
+ sscanf(str, "%u ", &op);
+ else
+ sscanf(str, "%u %u", &op, &phy_id);
+
+ switch (op) {
+ case 1:
+ psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
+ TA_SECUREDISPLAY_COMMAND__QUERY_TA);
+ ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__QUERY_TA);
+ if (!ret) {
+ if (securedisplay_cmd->status == TA_SECUREDISPLAY_STATUS__SUCCESS)
+ dev_info(adev->dev, "SECUREDISPLAY: query securedisplay TA ret is 0x%X\n",
+ securedisplay_cmd->securedisplay_out_message.query_ta.query_cmd_ret);
+ else
+ psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status);
+ }
+ break;
+ case 2:
+ psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
+ TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC);
+ securedisplay_cmd->securedisplay_in_message.send_roi_crc.phy_id = phy_id;
+ ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC);
+ if (!ret) {
+ if (securedisplay_cmd->status == TA_SECUREDISPLAY_STATUS__SUCCESS) {
+ memset(i2c_output, 0, sizeof(i2c_output));
+ for (i = 0; i < TA_SECUREDISPLAY_I2C_BUFFER_SIZE; i++)
+ sprintf(i2c_output, "%s 0x%X", i2c_output,
+ securedisplay_cmd->securedisplay_out_message.send_roi_crc.i2c_buf[i]);
+ dev_info(adev->dev, "SECUREDISPLAY: I2C buffer out put is :%s\n", i2c_output);
+ } else {
+ psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status);
+ }
+ }
+ break;
+ default:
+ dev_err(adev->dev, "Invalid input: %s\n", str);
+ }
+
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
+
+ return size;
+}
+
+static const struct file_operations amdgpu_securedisplay_debugfs_ops = {
+ .owner = THIS_MODULE,
+ .read = NULL,
+ .write = amdgpu_securedisplay_debugfs_write,
+ .llseek = default_llseek
+};
+
+void amdgpu_securedisplay_debugfs_init(struct amdgpu_device *adev)
+{
+#if defined(CONFIG_DEBUG_FS)
+
+ if (!adev->psp.securedisplay_context.securedisplay_initialized)
+ return;
+
+ debugfs_create_file("securedisplay_test", S_IWUSR, adev_to_drm(adev)->primary->debugfs_root,
+ adev, &amdgpu_securedisplay_debugfs_ops);
+#endif
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.h
new file mode 100644
index 000000000000..fe98574748f4
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+#ifndef _AMDGPU_SECUREDISPLAY_H
+#define _AMDGPU_SECUREDISPLAY_H
+
+#include "amdgpu.h"
+#include "ta_secureDisplay_if.h"
+
+void amdgpu_securedisplay_debugfs_init(struct amdgpu_device *adev);
+void psp_securedisplay_parse_resp_status(struct psp_context *psp,
+ enum ta_securedisplay_status status);
+void psp_prep_securedisplay_cmd_buf(struct psp_context *psp, struct securedisplay_cmd **cmd,
+ enum ta_securedisplay_command command_id);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 6752d8b13118..792d20261846 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -21,7 +21,7 @@
*
*/
-#if !defined(_AMDGPU_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#if !defined(_AMDGPU_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
#define _AMDGPU_TRACE_H_
#include <linux/stringify.h>
@@ -127,7 +127,7 @@ TRACE_EVENT(amdgpu_bo_create,
TP_fast_assign(
__entry->bo = bo;
- __entry->pages = bo->tbo.num_pages;
+ __entry->pages = bo->tbo.mem.num_pages;
__entry->type = bo->tbo.mem.mem_type;
__entry->prefer = bo->preferred_domains;
__entry->allow = bo->allowed_domains;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 4d8f19ab1014..9fd2157b133a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -46,7 +46,6 @@
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
-#include <drm/ttm/ttm_module.h>
#include <drm/drm_debugfs.h>
#include <drm/amdgpu_drm.h>
@@ -637,7 +636,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
out:
/* update statistics */
- atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved);
+ atomic64_add(bo->base.size, &adev->num_bytes_moved);
amdgpu_bo_move_notify(bo, evict, new_mem);
return 0;
}
@@ -918,8 +917,8 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_bo_device *bdev,
goto release_sg;
/* convert SG to linear array of pages and dma addresses */
- drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
- gtt->ttm.dma_address, ttm->num_pages);
+ drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
+ ttm->num_pages);
return 0;
@@ -1265,9 +1264,8 @@ static int amdgpu_ttm_tt_populate(struct ttm_bo_device *bdev,
ttm->sg = sgt;
}
- drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
- gtt->ttm.dma_address,
- ttm->num_pages);
+ drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
+ ttm->num_pages);
return 0;
}
@@ -2124,7 +2122,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
return r;
}
- num_pages = bo->tbo.num_pages;
+ num_pages = bo->tbo.mem.num_pages;
mm_node = bo->tbo.mem.mm_node;
num_loops = 0;
while (num_pages) {
@@ -2154,7 +2152,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
}
}
- num_pages = bo->tbo.num_pages;
+ num_pages = bo->tbo.mem.num_pages;
mm_node = bo->tbo.mem.mm_node;
while (num_pages) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index 0e43b46d3ab5..46449e70348b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -122,6 +122,9 @@ struct ta_firmware_header_v1_0 {
uint32_t ta_dtm_ucode_version;
uint32_t ta_dtm_offset_bytes;
uint32_t ta_dtm_size_bytes;
+ uint32_t ta_securedisplay_ucode_version;
+ uint32_t ta_securedisplay_offset_bytes;
+ uint32_t ta_securedisplay_size_bytes;
};
enum ta_fw_type {
@@ -132,6 +135,7 @@ enum ta_fw_type {
TA_FW_TYPE_PSP_HDCP,
TA_FW_TYPE_PSP_DTM,
TA_FW_TYPE_PSP_RAP,
+ TA_FW_TYPE_PSP_SECUREDISPLAY,
};
struct ta_fw_bin_desc {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 8b989670ed66..e2ed4689118a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -1170,7 +1170,7 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
int r, i;
r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_DOMAIN_GTT,
&bo, NULL, (void **)&msg);
if (r)
return r;
@@ -1202,7 +1202,7 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
int r, i;
r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_DOMAIN_GTT,
&bo, NULL, (void **)&msg);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 0d5284b936e4..ea6a62f67e38 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -1160,6 +1160,6 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
error:
dma_fence_put(fence);
amdgpu_bo_unreserve(bo);
- amdgpu_bo_unref(&bo);
+ amdgpu_bo_free_kernel(&bo, NULL, NULL);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 4a77c7424dfc..99b82f3c2617 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -496,6 +496,7 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
struct amdgpu_job *job;
struct amdgpu_ib *ib;
uint64_t addr;
+ void *msg = NULL;
int i, r;
r = amdgpu_job_alloc_with_ib(adev, 64,
@@ -505,6 +506,7 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
ib = &job->ibs[0];
addr = amdgpu_bo_gpu_offset(bo);
+ msg = amdgpu_bo_kptr(bo);
ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0);
ib->ptr[1] = addr;
ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0);
@@ -523,7 +525,7 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
amdgpu_bo_fence(bo, f, false);
amdgpu_bo_unreserve(bo);
- amdgpu_bo_unref(&bo);
+ amdgpu_bo_free_kernel(&bo, NULL, (void **)&msg);
if (fence)
*fence = dma_fence_get(f);
@@ -536,7 +538,7 @@ err_free:
err:
amdgpu_bo_unreserve(bo);
- amdgpu_bo_unref(&bo);
+ amdgpu_bo_free_kernel(&bo, NULL, (void **)&msg);
return r;
}
@@ -890,6 +892,7 @@ int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
error:
dma_fence_put(fence);
amdgpu_bo_unreserve(bo);
- amdgpu_bo_unref(&bo);
+ amdgpu_bo_free_kernel(&bo, NULL, NULL);
+
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 2d51b7694d1f..5da04d45b637 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -560,10 +560,14 @@ static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev)
static void amdgpu_virt_update_vf2pf_work_item(struct work_struct *work)
{
struct amdgpu_device *adev = container_of(work, struct amdgpu_device, virt.vf2pf_work.work);
+ int ret;
- amdgpu_virt_read_pf2vf_data(adev);
+ ret = amdgpu_virt_read_pf2vf_data(adev);
+ if (ret)
+ goto out;
amdgpu_virt_write_vf2pf_data(adev);
+out:
schedule_delayed_work(&(adev->virt.vf2pf_work), adev->virt.vf2pf_update_interval_ms);
}
@@ -571,8 +575,8 @@ void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev)
{
if (adev->virt.vf2pf_update_interval_ms != 0) {
DRM_INFO("clean up the vf2pf work item\n");
- flush_delayed_work(&adev->virt.vf2pf_work);
cancel_delayed_work_sync(&adev->virt.vf2pf_work);
+ adev->virt.vf2pf_update_interval_ms = 0;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 0768c8686983..ad91c0c3c423 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -653,9 +653,11 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
if (!bo->parent)
continue;
- ttm_bo_move_to_lru_tail(&bo->tbo, &vm->lru_bulk_move);
+ ttm_bo_move_to_lru_tail(&bo->tbo, &bo->tbo.mem,
+ &vm->lru_bulk_move);
if (bo->shadow)
ttm_bo_move_to_lru_tail(&bo->shadow->tbo,
+ &bo->shadow->tbo.mem,
&vm->lru_bulk_move);
}
spin_unlock(&ttm_bo_glob.lru_lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index d2de2a720a3d..c89b66bb70e2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -473,6 +473,9 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
for (i = 0; pages_left >= pages_per_node; ++i) {
unsigned long pages = rounddown_pow_of_two(pages_left);
+ /* Limit maximum size to 2GB due to SG table limitations */
+ pages = min(pages, (2UL << (30 - PAGE_SHIFT)));
+
r = drm_mm_insert_node_in_range(mm, &nodes[i], pages,
pages_per_node, 0,
place->fpfn, lpfn,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 541ef6be390f..659b385b27b5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -324,7 +324,7 @@ static void amdgpu_xgmi_sysfs_rem_dev_info(struct amdgpu_device *adev,
struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev)
{
- struct amdgpu_hive_info *hive = NULL, *tmp = NULL;
+ struct amdgpu_hive_info *hive = NULL;
int ret;
if (!adev->gmc.xgmi.hive_id)
@@ -337,11 +337,9 @@ struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev)
mutex_lock(&xgmi_mutex);
- if (!list_empty(&xgmi_hive_list)) {
- list_for_each_entry_safe(hive, tmp, &xgmi_hive_list, node) {
- if (hive->hive_id == adev->gmc.xgmi.hive_id)
- goto pro_end;
- }
+ list_for_each_entry(hive, &xgmi_hive_list, node) {
+ if (hive->hive_id == adev->gmc.xgmi.hive_id)
+ goto pro_end;
}
hive = kzalloc(sizeof(*hive), GFP_KERNEL);
diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c
index 921a69abda55..5b90efd6f6d0 100644
--- a/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c
@@ -27,7 +27,6 @@
#include "athub/athub_2_0_0_offset.h"
#include "athub/athub_2_0_0_sh_mask.h"
#include "athub/athub_2_0_0_default.h"
-#include "navi10_enum.h"
#include "soc15_common.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c b/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c
index 66c183ddd43e..7b1b18350bf9 100644
--- a/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c
@@ -26,7 +26,6 @@
#include "athub/athub_2_1_0_offset.h"
#include "athub/athub_2_1_0_sh_mask.h"
-#include "navi10_enum.h"
#include "soc15_common.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 13737b317f7c..4d6832cc7fb0 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -1251,13 +1251,22 @@ static void kv_restore_regs_for_reset(struct amdgpu_device *adev,
WREG32(mmGMCON_RENG_EXECUTE, save->gmcon_reng_execute);
}
-static int cik_gpu_pci_config_reset(struct amdgpu_device *adev)
+/**
+ * cik_asic_pci_config_reset - soft reset GPU
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Use PCI Config method to reset the GPU.
+ *
+ * Returns 0 for success.
+ */
+static int cik_asic_pci_config_reset(struct amdgpu_device *adev)
{
struct kv_reset_save_regs kv_save = { 0 };
u32 i;
int r = -EINVAL;
- dev_info(adev->dev, "GPU pci config reset\n");
+ amdgpu_atombios_scratch_regs_engine_hung(adev, true);
if (adev->flags & AMD_IS_APU)
kv_save_regs_for_reset(adev, &kv_save);
@@ -1285,26 +1294,6 @@ static int cik_gpu_pci_config_reset(struct amdgpu_device *adev)
if (adev->flags & AMD_IS_APU)
kv_restore_regs_for_reset(adev, &kv_save);
- return r;
-}
-
-/**
- * cik_asic_pci_config_reset - soft reset GPU
- *
- * @adev: amdgpu_device pointer
- *
- * Use PCI Config method to reset the GPU.
- *
- * Returns 0 for success.
- */
-static int cik_asic_pci_config_reset(struct amdgpu_device *adev)
-{
- int r;
-
- amdgpu_atombios_scratch_regs_engine_hung(adev, true);
-
- r = cik_gpu_pci_config_reset(adev);
-
amdgpu_atombios_scratch_regs_engine_hung(adev, false);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
index da37f8a900af..307c01301c87 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
@@ -194,19 +194,30 @@ static u32 cz_ih_get_wptr(struct amdgpu_device *adev,
wptr = le32_to_cpu(*ih->wptr_cpu);
- if (REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) {
- wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
- /* When a ring buffer overflow happen start parsing interrupt
- * from the last not overwritten vector (wptr + 16). Hopefully
- * this should allow us to catchup.
- */
- dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
- wptr, ih->rptr, (wptr + 16) & ih->ptr_mask);
- ih->rptr = (wptr + 16) & ih->ptr_mask;
- tmp = RREG32(mmIH_RB_CNTL);
- tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
- WREG32(mmIH_RB_CNTL, tmp);
- }
+ if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
+ goto out;
+
+ /* Double check that the overflow wasn't already cleared. */
+ wptr = RREG32(mmIH_RB_WPTR);
+
+ if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
+ goto out;
+
+ wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
+
+ /* When a ring buffer overflow happen start parsing interrupt
+ * from the last not overwritten vector (wptr + 16). Hopefully
+ * this should allow us to catchup.
+ */
+ dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
+ wptr, ih->rptr, (wptr + 16) & ih->ptr_mask);
+ ih->rptr = (wptr + 16) & ih->ptr_mask;
+ tmp = RREG32(mmIH_RB_CNTL);
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
+ WREG32(mmIH_RB_CNTL, tmp);
+
+
+out:
return (wptr & ih->ptr_mask);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index ffcc64ec6473..9810af712cc0 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -294,7 +294,7 @@ static int dce_virtual_get_modes(struct drm_connector *connector)
static const struct mode_size {
int w;
int h;
- } common_modes[21] = {
+ } common_modes[] = {
{ 640, 480},
{ 720, 480},
{ 800, 600},
@@ -312,13 +312,14 @@ static int dce_virtual_get_modes(struct drm_connector *connector)
{1600, 1200},
{1920, 1080},
{1920, 1200},
+ {2560, 1440},
{4096, 3112},
{3656, 2664},
{3840, 2160},
{4096, 2160},
};
- for (i = 0; i < 21; i++) {
+ for (i = 0; i < ARRAY_SIZE(common_modes); i++) {
mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false);
drm_mode_probed_add(connector, mode);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 346963e3cf73..45d1172b7bff 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -38,7 +38,6 @@
#include "smuio/smuio_11_0_0_offset.h"
#include "smuio/smuio_11_0_0_sh_mask.h"
#include "navi10_enum.h"
-#include "hdp/hdp_5_0_0_offset.h"
#include "ivsrcid/gfx/irqsrcs_gfx_10_1.h"
#include "soc15.h"
@@ -71,6 +70,11 @@
#define GB_ADDR_CONFIG__NUM_PKRS__SHIFT 0x8
#define GB_ADDR_CONFIG__NUM_PKRS_MASK 0x00000700L
+#define mmCGTS_TCC_DISABLE_gc_10_3 0x5006
+#define mmCGTS_TCC_DISABLE_gc_10_3_BASE_IDX 1
+#define mmCGTS_USER_TCC_DISABLE_gc_10_3 0x5007
+#define mmCGTS_USER_TCC_DISABLE_gc_10_3_BASE_IDX 1
+
#define mmCP_MEC_CNTL_Sienna_Cichlid 0x0f55
#define mmCP_MEC_CNTL_Sienna_Cichlid_BASE_IDX 0
#define mmRLC_SAFE_MODE_Sienna_Cichlid 0x4ca0
@@ -121,6 +125,7 @@
#define mmSPI_CONFIG_CNTL_Vangogh_BASE_IDX 1
#define mmGCR_GENERAL_CNTL_Vangogh 0x1580
#define mmGCR_GENERAL_CNTL_Vangogh_BASE_IDX 0
+#define RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK_Vangogh 0x0000FFFFL
#define mmCP_HYP_PFP_UCODE_ADDR 0x5814
#define mmCP_HYP_PFP_UCODE_ADDR_BASE_IDX 1
@@ -3778,9 +3783,6 @@ static void gfx_v10_0_check_gfxoff_flag(struct amdgpu_device *adev)
if (!gfx_v10_0_navi10_gfxoff_should_enable(adev))
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
break;
- case CHIP_VANGOGH:
- adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
- break;
default:
break;
}
@@ -4490,8 +4492,7 @@ static int gfx_v10_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
+ ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
+ ring->pipe;
- hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring->pipe,
- ring->queue) ?
+ hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
/* type-2 packets are deprecated on MEC, use type-3 instead */
r = amdgpu_ring_init(adev, ring, 1024,
@@ -4936,8 +4937,15 @@ static void gfx_v10_0_tcp_harvest(struct amdgpu_device *adev)
static void gfx_v10_0_get_tcc_info(struct amdgpu_device *adev)
{
/* TCCs are global (not instanced). */
- uint32_t tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE) |
- RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE);
+ uint32_t tcc_disable;
+
+ if (adev->asic_type >= CHIP_SIENNA_CICHLID) {
+ tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE_gc_10_3) |
+ RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE_gc_10_3);
+ } else {
+ tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE) |
+ RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE);
+ }
adev->gfx.config.tcc_disabled_mask =
REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, TCC_DISABLE) |
@@ -5701,7 +5709,7 @@ static int gfx_v10_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev)
}
if (amdgpu_emu_mode == 1)
- adev->nbio.funcs->hdp_flush(adev, NULL);
+ adev->hdp.funcs->flush_hdp(adev, NULL);
tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0);
@@ -5779,7 +5787,7 @@ static int gfx_v10_0_cp_gfx_load_ce_microcode(struct amdgpu_device *adev)
}
if (amdgpu_emu_mode == 1)
- adev->nbio.funcs->hdp_flush(adev, NULL);
+ adev->hdp.funcs->flush_hdp(adev, NULL);
tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_CE_IC_BASE_CNTL, VMID, 0);
@@ -5856,7 +5864,7 @@ static int gfx_v10_0_cp_gfx_load_me_microcode(struct amdgpu_device *adev)
}
if (amdgpu_emu_mode == 1)
- adev->nbio.funcs->hdp_flush(adev, NULL);
+ adev->hdp.funcs->flush_hdp(adev, NULL);
tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0);
@@ -6225,7 +6233,7 @@ static int gfx_v10_0_cp_compute_load_microcode(struct amdgpu_device *adev)
}
if (amdgpu_emu_mode == 1)
- adev->nbio.funcs->hdp_flush(adev, NULL);
+ adev->hdp.funcs->flush_hdp(adev, NULL);
tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
@@ -6533,8 +6541,7 @@ static void gfx_v10_0_compute_mqd_set_priority(struct amdgpu_ring *ring, struct
struct amdgpu_device *adev = ring->adev;
if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
- if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->pipe,
- ring->queue)) {
+ if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) {
mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
mqd->cp_hqd_queue_priority =
AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
@@ -7833,6 +7840,20 @@ static void gfx_v10_cntl_power_gating(struct amdgpu_device *adev, bool enable)
data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
WREG32_SOC15(GC, 0, mmRLC_PG_CNTL, data);
+
+ /*
+ * CGPG enablement required and the register to program the hysteresis value
+ * RLC_PG_DELAY_3.CGCG_ACTIVE_BEFORE_CGPG to the desired CGPG hysteresis value
+ * in refclk count. Note that RLC FW is modified to take 16 bits from
+ * RLC_PG_DELAY_3[15:0] as the hysteresis instead of just 8 bits.
+ *
+ * The recommendation from RLC team is setting RLC_PG_DELAY_3 to 200us(0x4E20)
+ * as part of CGPG enablement starting point.
+ */
+ if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && adev->asic_type == CHIP_VANGOGH) {
+ data = 0x4E20 & RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK_Vangogh;
+ WREG32_SOC15(GC, 0, mmRLC_PG_DELAY_3, data);
+ }
}
static void gfx_v10_cntl_pg(struct amdgpu_device *adev, bool enable)
@@ -7894,6 +7915,7 @@ static int gfx_v10_0_set_powergating_state(void *handle,
break;
case CHIP_VANGOGH:
gfx_v10_cntl_pg(adev, enable);
+ amdgpu_gfx_off_ctrl(adev, enable);
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 37639214cbbb..84d2eaa38101 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -29,6 +29,7 @@
#include "amdgpu.h"
#include "amdgpu_gfx.h"
+#include "amdgpu_ring.h"
#include "vi.h"
#include "vi_structs.h"
#include "vid.h"
@@ -1923,8 +1924,7 @@ static int gfx_v8_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
+ ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
+ ring->pipe;
- hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring->pipe,
- ring->queue) ?
+ hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_RING_PRIO_DEFAULT;
/* type-2 packets are deprecated on MEC, use type-3 instead */
r = amdgpu_ring_init(adev, ring, 1024,
@@ -4442,8 +4442,7 @@ static void gfx_v8_0_mqd_set_priority(struct amdgpu_ring *ring, struct vi_mqd *m
struct amdgpu_device *adev = ring->adev;
if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
- if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->pipe,
- ring->queue)) {
+ if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) {
mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
mqd->cp_hqd_queue_priority =
AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
@@ -6847,6 +6846,66 @@ static void gfx_v8_0_emit_mem_sync_compute(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, 0x0000000A); /* poll interval */
}
+
+/* mmSPI_WCL_PIPE_PERCENT_CS[0-7]_DEFAULT values are same */
+#define mmSPI_WCL_PIPE_PERCENT_CS_DEFAULT 0x0000007f
+static void gfx_v8_0_emit_wave_limit_cs(struct amdgpu_ring *ring,
+ uint32_t pipe, bool enable)
+{
+ uint32_t val;
+ uint32_t wcl_cs_reg;
+
+ val = enable ? 0x1 : mmSPI_WCL_PIPE_PERCENT_CS_DEFAULT;
+
+ switch (pipe) {
+ case 0:
+ wcl_cs_reg = mmSPI_WCL_PIPE_PERCENT_CS0;
+ break;
+ case 1:
+ wcl_cs_reg = mmSPI_WCL_PIPE_PERCENT_CS1;
+ break;
+ case 2:
+ wcl_cs_reg = mmSPI_WCL_PIPE_PERCENT_CS2;
+ break;
+ case 3:
+ wcl_cs_reg = mmSPI_WCL_PIPE_PERCENT_CS3;
+ break;
+ default:
+ DRM_DEBUG("invalid pipe %d\n", pipe);
+ return;
+ }
+
+ amdgpu_ring_emit_wreg(ring, wcl_cs_reg, val);
+
+}
+
+#define mmSPI_WCL_PIPE_PERCENT_GFX_DEFAULT 0x07ffffff
+static void gfx_v8_0_emit_wave_limit(struct amdgpu_ring *ring, bool enable)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t val;
+ int i;
+
+ /* mmSPI_WCL_PIPE_PERCENT_GFX is 7 bit multiplier register to limit
+ * number of gfx waves. Setting 5 bit will make sure gfx only gets
+ * around 25% of gpu resources.
+ */
+ val = enable ? 0x1f : mmSPI_WCL_PIPE_PERCENT_GFX_DEFAULT;
+ amdgpu_ring_emit_wreg(ring, mmSPI_WCL_PIPE_PERCENT_GFX, val);
+
+ /* Restrict waves for normal/low priority compute queues as well
+ * to get best QoS for high priority compute jobs.
+ *
+ * amdgpu controls only 1st ME(0-3 CS pipes).
+ */
+ for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
+ if (i != ring->pipe)
+ gfx_v8_0_emit_wave_limit_cs(ring, i, enable);
+
+ }
+
+}
+
static const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
.name = "gfx_v8_0",
.early_init = gfx_v8_0_early_init,
@@ -6930,7 +6989,9 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
7 + /* gfx_v8_0_ring_emit_pipeline_sync */
VI_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + /* gfx_v8_0_ring_emit_vm_flush */
7 + 7 + 7 + /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */
- 7, /* gfx_v8_0_emit_mem_sync_compute */
+ 7 + /* gfx_v8_0_emit_mem_sync_compute */
+ 5 + /* gfx_v8_0_emit_wave_limit for updating mmSPI_WCL_PIPE_PERCENT_GFX register */
+ 15, /* for updating 3 mmSPI_WCL_PIPE_PERCENT_CS registers */
.emit_ib_size = 7, /* gfx_v8_0_ring_emit_ib_compute */
.emit_ib = gfx_v8_0_ring_emit_ib_compute,
.emit_fence = gfx_v8_0_ring_emit_fence_compute,
@@ -6944,6 +7005,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_wreg = gfx_v8_0_ring_emit_wreg,
.emit_mem_sync = gfx_v8_0_emit_mem_sync_compute,
+ .emit_wave_limit = gfx_v8_0_emit_wave_limit,
};
static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_kiq = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 5f4805e4d04a..65db88bb6cbc 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -38,7 +38,6 @@
#include "gc/gc_9_0_sh_mask.h"
#include "vega10_enum.h"
-#include "hdp/hdp_4_0_offset.h"
#include "soc15_common.h"
#include "clearstate_gfx9.h"
@@ -53,6 +52,7 @@
#include "asic_reg/pwr/pwr_10_0_offset.h"
#include "asic_reg/pwr/pwr_10_0_sh_mask.h"
+#include "asic_reg/gc/gc_9_0_default.h"
#define GFX9_NUM_GFX_RINGS 1
#define GFX9_MEC_HPD_SIZE 4096
@@ -2228,8 +2228,7 @@ static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
+ ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
+ ring->pipe;
- hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring->pipe,
- ring->queue) ?
+ hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
/* type-2 packets are deprecated on MEC, use type-3 instead */
return amdgpu_ring_init(adev, ring, 1024,
@@ -3391,9 +3390,7 @@ static void gfx_v9_0_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *m
struct amdgpu_device *adev = ring->adev;
if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
- if (amdgpu_gfx_is_high_priority_compute_queue(adev,
- ring->pipe,
- ring->queue)) {
+ if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) {
mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
mqd->cp_hqd_queue_priority =
AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
@@ -6671,6 +6668,65 @@ static void gfx_v9_0_emit_mem_sync(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */
}
+static void gfx_v9_0_emit_wave_limit_cs(struct amdgpu_ring *ring,
+ uint32_t pipe, bool enable)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t val;
+ uint32_t wcl_cs_reg;
+
+ /* mmSPI_WCL_PIPE_PERCENT_CS[0-7]_DEFAULT values are same */
+ val = enable ? 0x1 : mmSPI_WCL_PIPE_PERCENT_CS0_DEFAULT;
+
+ switch (pipe) {
+ case 0:
+ wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS0);
+ break;
+ case 1:
+ wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS1);
+ break;
+ case 2:
+ wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS2);
+ break;
+ case 3:
+ wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS3);
+ break;
+ default:
+ DRM_DEBUG("invalid pipe %d\n", pipe);
+ return;
+ }
+
+ amdgpu_ring_emit_wreg(ring, wcl_cs_reg, val);
+
+}
+static void gfx_v9_0_emit_wave_limit(struct amdgpu_ring *ring, bool enable)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t val;
+ int i;
+
+
+ /* mmSPI_WCL_PIPE_PERCENT_GFX is 7 bit multiplier register to limit
+ * number of gfx waves. Setting 5 bit will make sure gfx only gets
+ * around 25% of gpu resources.
+ */
+ val = enable ? 0x1f : mmSPI_WCL_PIPE_PERCENT_GFX_DEFAULT;
+ amdgpu_ring_emit_wreg(ring,
+ SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_GFX),
+ val);
+
+ /* Restrict waves for normal/low priority compute queues as well
+ * to get best QoS for high priority compute jobs.
+ *
+ * amdgpu controls only 1st ME(0-3 CS pipes).
+ */
+ for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
+ if (i != ring->pipe)
+ gfx_v9_0_emit_wave_limit_cs(ring, i, enable);
+
+ }
+}
+
static const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
.name = "gfx_v9_0",
.early_init = gfx_v9_0_early_init,
@@ -6760,7 +6816,9 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
2 + /* gfx_v9_0_ring_emit_vm_flush */
8 + 8 + 8 + /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
- 7, /* gfx_v9_0_emit_mem_sync */
+ 7 + /* gfx_v9_0_emit_mem_sync */
+ 5 + /* gfx_v9_0_emit_wave_limit for updating mmSPI_WCL_PIPE_PERCENT_GFX register */
+ 15, /* for updating 3 mmSPI_WCL_PIPE_PERCENT_CS registers */
.emit_ib_size = 7, /* gfx_v9_0_ring_emit_ib_compute */
.emit_ib = gfx_v9_0_ring_emit_ib_compute,
.emit_fence = gfx_v9_0_ring_emit_fence,
@@ -6776,6 +6834,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
.emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
.emit_mem_sync = gfx_v9_0_emit_mem_sync,
+ .emit_wave_limit = gfx_v9_0_emit_wave_limit,
};
static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 5648c48be77f..3b7c6c31fce1 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -27,8 +27,6 @@
#include "gmc_v10_0.h"
#include "umc_v8_7.h"
-#include "hdp/hdp_5_0_0_offset.h"
-#include "hdp/hdp_5_0_0_sh_mask.h"
#include "athub/athub_2_0_0_sh_mask.h"
#include "athub/athub_2_0_0_offset.h"
#include "dcn/dcn_2_0_0_offset.h"
@@ -312,7 +310,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
int r;
/* flush hdp cache */
- adev->nbio.funcs->hdp_flush(adev, NULL);
+ adev->hdp.funcs->flush_hdp(adev, NULL);
/* For SRIOV run time, driver shouldn't access the register through MMIO
* Directly use kiq to do the vm invalidation instead
@@ -995,7 +993,6 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
{
int r;
bool value;
- u32 tmp;
if (adev->gart.bo == NULL) {
dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
@@ -1014,15 +1011,10 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
if (r)
return r;
- tmp = RREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL);
- tmp |= HDP_MISC_CNTL__FLUSH_INVALIDATE_CACHE_MASK;
- WREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL, tmp);
-
- tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
- WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
+ adev->hdp.funcs->init_registers(adev);
/* Flush HDP after it is initialized */
- adev->nbio.funcs->hdp_flush(adev, NULL);
+ adev->hdp.funcs->flush_hdp(adev, NULL);
value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
false : true;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index e22268f9dba7..3686e777c76c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -31,8 +31,6 @@
#include "amdgpu_atomfirmware.h"
#include "amdgpu_gem.h"
-#include "hdp/hdp_4_0_offset.h"
-#include "hdp/hdp_4_0_sh_mask.h"
#include "gc/gc_9_0_sh_mask.h"
#include "dce/dce_12_0_offset.h"
#include "dce/dce_12_0_sh_mask.h"
@@ -241,60 +239,44 @@ static const char *mmhub_client_ids_vega20[][2] = {
};
static const char *mmhub_client_ids_arcturus[][2] = {
+ [0][0] = "DBGU1",
+ [1][0] = "XDP",
[2][0] = "MP1",
- [3][0] = "MP0",
- [10][0] = "UTCL2",
- [13][0] = "OSS",
[14][0] = "HDP",
- [15][0] = "SDMA0",
- [32+15][0] = "SDMA1",
- [64+15][0] = "SDMA2",
- [96+15][0] = "SDMA3",
- [128+15][0] = "SDMA4",
- [160+11][0] = "JPEG",
- [160+12][0] = "VCN",
- [160+13][0] = "VCNU",
- [160+15][0] = "SDMA5",
- [192+10][0] = "UTCL2",
- [192+11][0] = "JPEG1",
- [192+12][0] = "VCN1",
- [192+13][0] = "VCN1U",
- [192+15][0] = "SDMA6",
- [224+15][0] = "SDMA7",
+ [171][0] = "JPEG",
+ [172][0] = "VCN",
+ [173][0] = "VCNU",
+ [203][0] = "JPEG1",
+ [204][0] = "VCN1",
+ [205][0] = "VCN1U",
+ [256][0] = "SDMA0",
+ [257][0] = "SDMA1",
+ [258][0] = "SDMA2",
+ [259][0] = "SDMA3",
+ [260][0] = "SDMA4",
+ [261][0] = "SDMA5",
+ [262][0] = "SDMA6",
+ [263][0] = "SDMA7",
+ [384][0] = "OSS",
[0][1] = "DBGU1",
[1][1] = "XDP",
[2][1] = "MP1",
- [3][1] = "MP0",
- [13][1] = "OSS",
[14][1] = "HDP",
- [15][1] = "SDMA0",
- [32+15][1] = "SDMA1",
- [64+15][1] = "SDMA2",
- [96+15][1] = "SDMA3",
- [128+15][1] = "SDMA4",
- [160+11][1] = "JPEG",
- [160+12][1] = "VCN",
- [160+13][1] = "VCNU",
- [160+15][1] = "SDMA5",
- [192+11][1] = "JPEG1",
- [192+12][1] = "VCN1",
- [192+13][1] = "VCN1U",
- [192+15][1] = "SDMA6",
- [224+15][1] = "SDMA7",
-};
-
-static const u32 golden_settings_vega10_hdp[] =
-{
- 0xf64, 0x0fffffff, 0x00000000,
- 0xf65, 0x0fffffff, 0x00000000,
- 0xf66, 0x0fffffff, 0x00000000,
- 0xf67, 0x0fffffff, 0x00000000,
- 0xf68, 0x0fffffff, 0x00000000,
- 0xf6a, 0x0fffffff, 0x00000000,
- 0xf6b, 0x0fffffff, 0x00000000,
- 0xf6c, 0x0fffffff, 0x00000000,
- 0xf6d, 0x0fffffff, 0x00000000,
- 0xf6e, 0x0fffffff, 0x00000000,
+ [171][1] = "JPEG",
+ [172][1] = "VCN",
+ [173][1] = "VCNU",
+ [203][1] = "JPEG1",
+ [204][1] = "VCN1",
+ [205][1] = "VCN1U",
+ [256][1] = "SDMA0",
+ [257][1] = "SDMA1",
+ [258][1] = "SDMA2",
+ [259][1] = "SDMA3",
+ [260][1] = "SDMA4",
+ [261][1] = "SDMA5",
+ [262][1] = "SDMA6",
+ [263][1] = "SDMA7",
+ [384][1] = "OSS",
};
static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] =
@@ -1571,7 +1553,6 @@ static int gmc_v9_0_hw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool value;
int r, i;
- u32 tmp;
/* The sequence of these two function calls matters.*/
gmc_v9_0_init_golden_registers(adev);
@@ -1583,31 +1564,13 @@ static int gmc_v9_0_hw_init(void *handle)
WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
}
- amdgpu_device_program_register_sequence(adev,
- golden_settings_vega10_hdp,
- ARRAY_SIZE(golden_settings_vega10_hdp));
-
if (adev->mmhub.funcs->update_power_gating)
adev->mmhub.funcs->update_power_gating(adev, true);
- switch (adev->asic_type) {
- case CHIP_ARCTURUS:
- WREG32_FIELD15(HDP, 0, HDP_MMHUB_CNTL, HDP_MMHUB_GCC, 1);
- break;
- default:
- break;
- }
-
- WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
-
- tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
- WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
-
- WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8));
- WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE_HI, (adev->gmc.vram_start >> 40));
+ adev->hdp.funcs->init_registers(adev);
/* After HDP is initialized, flush HDP.*/
- adev->nbio.funcs->hdp_flush(adev, NULL);
+ adev->hdp.funcs->flush_hdp(adev, NULL);
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
value = false;
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
new file mode 100644
index 000000000000..e46621fed5b9
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
@@ -0,0 +1,137 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "amdgpu_atombios.h"
+#include "hdp_v4_0.h"
+#include "amdgpu_ras.h"
+
+#include "hdp/hdp_4_0_offset.h"
+#include "hdp/hdp_4_0_sh_mask.h"
+#include <uapi/linux/kfd_ioctl.h>
+
+/* for Vega20 register name change */
+#define mmHDP_MEM_POWER_CTRL 0x00d4
+#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK 0x00000001L
+#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK 0x00000002L
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK 0x00010000L
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK 0x00020000L
+#define mmHDP_MEM_POWER_CTRL_BASE_IDX 0
+
+static void hdp_v4_0_flush_hdp(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring)
+{
+ if (!ring || !ring->funcs->emit_wreg)
+ WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
+ else
+ amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
+}
+
+static void hdp_v4_0_invalidate_hdp(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring)
+{
+ if (!ring || !ring->funcs->emit_wreg)
+ WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
+ else
+ amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
+ HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
+}
+
+static void hdp_v4_0_reset_ras_error_count(struct amdgpu_device *adev)
+{
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__HDP))
+ return;
+ /*read back hdp ras counter to reset it to 0 */
+ RREG32_SOC15(HDP, 0, mmHDP_EDC_CNT);
+}
+
+static void hdp_v4_0_update_clock_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t def, data;
+
+ if (adev->asic_type == CHIP_VEGA10 ||
+ adev->asic_type == CHIP_VEGA12 ||
+ adev->asic_type == CHIP_RAVEN) {
+ def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
+
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
+ data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
+ else
+ data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
+
+ if (def != data)
+ WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS), data);
+ } else {
+ def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL));
+
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
+ data |= HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK |
+ HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK |
+ HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK |
+ HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK;
+ else
+ data &= ~(HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK |
+ HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK |
+ HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK |
+ HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK);
+
+ if (def != data)
+ WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL), data);
+ }
+}
+
+static void hdp_v4_0_get_clockgating_state(struct amdgpu_device *adev,
+ u32 *flags)
+{
+ int data;
+
+ /* AMD_CG_SUPPORT_HDP_LS */
+ data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
+ if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK)
+ *flags |= AMD_CG_SUPPORT_HDP_LS;
+}
+
+static void hdp_v4_0_init_registers(struct amdgpu_device *adev)
+{
+ switch (adev->asic_type) {
+ case CHIP_ARCTURUS:
+ WREG32_FIELD15(HDP, 0, HDP_MMHUB_CNTL, HDP_MMHUB_GCC, 1);
+ break;
+ default:
+ break;
+ }
+
+ WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
+
+ WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8));
+ WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE_HI, (adev->gmc.vram_start >> 40));
+}
+
+const struct amdgpu_hdp_funcs hdp_v4_0_funcs = {
+ .flush_hdp = hdp_v4_0_flush_hdp,
+ .invalidate_hdp = hdp_v4_0_invalidate_hdp,
+ .reset_ras_error_count = hdp_v4_0_reset_ras_error_count,
+ .update_clock_gating = hdp_v4_0_update_clock_gating,
+ .get_clock_gating_state = hdp_v4_0_get_clockgating_state,
+ .init_registers = hdp_v4_0_init_registers,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.h b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.h
new file mode 100644
index 000000000000..d1e6399e8c46
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __HDP_V4_0_H__
+#define __HDP_V4_0_H__
+
+#include "soc15_common.h"
+
+extern const struct amdgpu_hdp_funcs hdp_v4_0_funcs;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c
new file mode 100644
index 000000000000..7a15e669b68d
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c
@@ -0,0 +1,212 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "amdgpu_atombios.h"
+#include "hdp_v5_0.h"
+
+#include "hdp/hdp_5_0_0_offset.h"
+#include "hdp/hdp_5_0_0_sh_mask.h"
+#include <uapi/linux/kfd_ioctl.h>
+
+static void hdp_v5_0_flush_hdp(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring)
+{
+ if (!ring || !ring->funcs->emit_wreg)
+ WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
+ else
+ amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
+}
+
+static void hdp_v5_0_invalidate_hdp(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring)
+{
+ if (!ring || !ring->funcs->emit_wreg) {
+ WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
+ } else {
+ amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
+ HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
+ }
+}
+
+static void hdp_v5_0_update_mem_power_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t hdp_clk_cntl, hdp_clk_cntl1;
+ uint32_t hdp_mem_pwr_cntl;
+
+ if (!(adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS |
+ AMD_CG_SUPPORT_HDP_DS |
+ AMD_CG_SUPPORT_HDP_SD)))
+ return;
+
+ hdp_clk_cntl = hdp_clk_cntl1 = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
+ hdp_mem_pwr_cntl = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL);
+
+ /* Before doing clock/power mode switch,
+ * forced on IPH & RC clock */
+ hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
+ IPH_MEM_CLK_SOFT_OVERRIDE, 1);
+ hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
+ RC_MEM_CLK_SOFT_OVERRIDE, 1);
+ WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl);
+
+ /* HDP 5.0 doesn't support dynamic power mode switch,
+ * disable clock and power gating before any changing */
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ IPH_MEM_POWER_CTRL_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ IPH_MEM_POWER_LS_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ IPH_MEM_POWER_DS_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ IPH_MEM_POWER_SD_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_CTRL_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_LS_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_DS_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_SD_EN, 0);
+ WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
+
+ /* only one clock gating mode (LS/DS/SD) can be enabled */
+ if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ IPH_MEM_POWER_LS_EN, enable);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_LS_EN, enable);
+ } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS) {
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ IPH_MEM_POWER_DS_EN, enable);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_DS_EN, enable);
+ } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_SD) {
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ IPH_MEM_POWER_SD_EN, enable);
+ /* RC should not use shut down mode, fallback to ds */
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_DS_EN, enable);
+ }
+
+ /* confirmed that IPH_MEM_POWER_CTRL_EN and RC_MEM_POWER_CTRL_EN have to
+ * be set for SRAM LS/DS/SD */
+ if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_DS |
+ AMD_CG_SUPPORT_HDP_SD)) {
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ IPH_MEM_POWER_CTRL_EN, 1);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_CTRL_EN, 1);
+ }
+
+ WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
+
+ /* restore IPH & RC clock override after clock/power mode changing */
+ WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl1);
+}
+
+static void hdp_v5_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t hdp_clk_cntl;
+
+ if (!(adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
+ return;
+
+ hdp_clk_cntl = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
+
+ if (enable) {
+ hdp_clk_cntl &=
+ ~(uint32_t)
+ (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK);
+ } else {
+ hdp_clk_cntl |= HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK;
+ }
+
+ WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl);
+}
+
+static void hdp_v5_0_update_clock_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ hdp_v5_0_update_mem_power_gating(adev, enable);
+ hdp_v5_0_update_medium_grain_clock_gating(adev, enable);
+}
+
+static void hdp_v5_0_get_clockgating_state(struct amdgpu_device *adev,
+ u32 *flags)
+{
+ uint32_t tmp;
+
+ /* AMD_CG_SUPPORT_HDP_MGCG */
+ tmp = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
+ if (!(tmp & (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK)))
+ *flags |= AMD_CG_SUPPORT_HDP_MGCG;
+
+ /* AMD_CG_SUPPORT_HDP_LS/DS/SD */
+ tmp = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL);
+ if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK)
+ *flags |= AMD_CG_SUPPORT_HDP_LS;
+ else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_DS_EN_MASK)
+ *flags |= AMD_CG_SUPPORT_HDP_DS;
+ else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_SD_EN_MASK)
+ *flags |= AMD_CG_SUPPORT_HDP_SD;
+}
+
+static void hdp_v5_0_init_registers(struct amdgpu_device *adev)
+{
+ u32 tmp;
+
+ tmp = RREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL);
+ tmp |= HDP_MISC_CNTL__FLUSH_INVALIDATE_CACHE_MASK;
+ WREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL, tmp);
+}
+
+const struct amdgpu_hdp_funcs hdp_v5_0_funcs = {
+ .flush_hdp = hdp_v5_0_flush_hdp,
+ .invalidate_hdp = hdp_v5_0_invalidate_hdp,
+ .update_clock_gating = hdp_v5_0_update_clock_gating,
+ .get_clock_gating_state = hdp_v5_0_get_clockgating_state,
+ .init_registers = hdp_v5_0_init_registers,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.h b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.h
new file mode 100644
index 000000000000..2d5ec2b419f3
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __HDP_V5_0_H__
+#define __HDP_V5_0_H__
+
+#include "soc15_common.h"
+
+extern const struct amdgpu_hdp_funcs hdp_v5_0_funcs;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
index 37d8b6ca4dab..cc957471f31e 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
@@ -194,19 +194,29 @@ static u32 iceland_ih_get_wptr(struct amdgpu_device *adev,
wptr = le32_to_cpu(*ih->wptr_cpu);
- if (REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) {
- wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
- /* When a ring buffer overflow happen start parsing interrupt
- * from the last not overwritten vector (wptr + 16). Hopefully
- * this should allow us to catchup.
- */
- dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
- wptr, ih->rptr, (wptr + 16) & ih->ptr_mask);
- ih->rptr = (wptr + 16) & ih->ptr_mask;
- tmp = RREG32(mmIH_RB_CNTL);
- tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
- WREG32(mmIH_RB_CNTL, tmp);
- }
+ if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
+ goto out;
+
+ /* Double check that the overflow wasn't already cleared. */
+ wptr = RREG32(mmIH_RB_WPTR);
+
+ if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
+ goto out;
+
+ wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
+ /* When a ring buffer overflow happen start parsing interrupt
+ * from the last not overwritten vector (wptr + 16). Hopefully
+ * this should allow us to catchup.
+ */
+ dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
+ wptr, ih->rptr, (wptr + 16) & ih->ptr_mask);
+ ih->rptr = (wptr + 16) & ih->ptr_mask;
+ tmp = RREG32(mmIH_RB_CNTL);
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
+ WREG32(mmIH_RB_CNTL, tmp);
+
+
+out:
return (wptr & ih->ptr_mask);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
index 985e454463e1..7f30629f21a2 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
@@ -554,7 +554,7 @@ static int mes_v10_1_allocate_eop_buf(struct amdgpu_device *adev)
return r;
}
- memset(eop, 0, adev->mes.eop_gpu_obj->tbo.mem.size);
+ memset(eop, 0, adev->mes.eop_gpu_obj->tbo.base.size);
amdgpu_bo_kunmap(adev->mes.eop_gpu_obj);
amdgpu_bo_unreserve(adev->mes.eop_gpu_obj);
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
index 1961745e89c7..ab9be5ad5a5f 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
@@ -531,12 +531,12 @@ mmhub_v2_3_update_medium_grain_light_sleep(struct amdgpu_device *adev,
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS)) {
data &= ~MM_ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK;
- data1 &= !(DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
+ data1 &= ~(DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK);
- data2 &= !(DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
+ data2 &= ~(DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index 7767ccca526b..3ee481557fc9 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -255,6 +255,7 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
if (!down_read_trylock(&adev->reset_sem))
return;
+ amdgpu_virt_fini_data_exchange(adev);
atomic_set(&adev->in_gpu_reset, 1);
do {
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
index dd5c1e6ce009..48e588d3c409 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
@@ -276,6 +276,7 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
if (!down_read_trylock(&adev->reset_sem))
return;
+ amdgpu_virt_fini_data_exchange(adev);
atomic_set(&adev->in_gpu_reset, 1);
do {
diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
index 7ba229e43799..f4e4040bbd25 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
@@ -40,6 +40,53 @@
static void navi10_ih_set_interrupt_funcs(struct amdgpu_device *adev);
/**
+ * navi10_ih_init_register_offset - Initialize register offset for ih rings
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Initialize register offset ih rings (NAVI10).
+ */
+static void navi10_ih_init_register_offset(struct amdgpu_device *adev)
+{
+ struct amdgpu_ih_regs *ih_regs;
+
+ if (adev->irq.ih.ring_size) {
+ ih_regs = &adev->irq.ih.ih_regs;
+ ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE);
+ ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI);
+ ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL);
+ ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR);
+ ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR);
+ ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR);
+ ih_regs->ih_rb_wptr_addr_lo = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO);
+ ih_regs->ih_rb_wptr_addr_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI);
+ ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL;
+ }
+
+ if (adev->irq.ih1.ring_size) {
+ ih_regs = &adev->irq.ih1.ih_regs;
+ ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_RING1);
+ ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI_RING1);
+ ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING1);
+ ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING1);
+ ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING1);
+ ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING1);
+ ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL_RING1;
+ }
+
+ if (adev->irq.ih2.ring_size) {
+ ih_regs = &adev->irq.ih2.ih_regs;
+ ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_RING2);
+ ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI_RING2);
+ ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING2);
+ ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING2);
+ ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING2);
+ ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING2);
+ ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL_RING2;
+ }
+}
+
+/**
* force_update_wptr_for_self_int - Force update the wptr for self interrupt
*
* @adev: amdgpu_device pointer
@@ -82,133 +129,66 @@ force_update_wptr_for_self_int(struct amdgpu_device *adev,
}
/**
- * navi10_ih_enable_interrupts - Enable the interrupt ring buffer
+ * navi10_ih_toggle_ring_interrupts - toggle the interrupt ring buffer
*
* @adev: amdgpu_device pointer
+ * @ih: amdgpu_ih_ring pointet
+ * @enable: true - enable the interrupts, false - disable the interrupts
*
- * Enable the interrupt ring buffer (NAVI10).
+ * Toggle the interrupt ring buffer (NAVI10)
*/
-static void navi10_ih_enable_interrupts(struct amdgpu_device *adev)
+static int navi10_ih_toggle_ring_interrupts(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih,
+ bool enable)
{
- u32 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
-
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 1);
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 1);
- if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
- if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
- DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
- return;
- }
- } else {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
- }
+ struct amdgpu_ih_regs *ih_regs;
+ uint32_t tmp;
- adev->irq.ih.enabled = true;
+ ih_regs = &ih->ih_regs;
- if (adev->irq.ih1.ring_size) {
- ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
- RB_ENABLE, 1);
- if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
- if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1,
- ih_rb_cntl)) {
- DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
- return;
- }
- } else {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
- }
- adev->irq.ih1.enabled = true;
- }
+ tmp = RREG32(ih_regs->ih_rb_cntl);
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0));
+ /* enable_intr field is only valid in ring0 */
+ if (ih == &adev->irq.ih)
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0));
+ WREG32(ih_regs->ih_rb_cntl, tmp);
- if (adev->irq.ih2.ring_size) {
- ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
- RB_ENABLE, 1);
- if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
- if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2,
- ih_rb_cntl)) {
- DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
- return;
- }
- } else {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
- }
- adev->irq.ih2.enabled = true;
+ if (enable) {
+ ih->enabled = true;
+ } else {
+ /* set rptr, wptr to 0 */
+ WREG32(ih_regs->ih_rb_rptr, 0);
+ WREG32(ih_regs->ih_rb_wptr, 0);
+ ih->enabled = false;
+ ih->rptr = 0;
}
- if (adev->irq.ih_soft.ring_size)
- adev->irq.ih_soft.enabled = true;
+ return 0;
}
/**
- * navi10_ih_disable_interrupts - Disable the interrupt ring buffer
+ * navi10_ih_toggle_interrupts - Toggle all the available interrupt ring buffers
*
* @adev: amdgpu_device pointer
+ * @enable: enable or disable interrupt ring buffers
*
- * Disable the interrupt ring buffer (NAVI10).
+ * Toggle all the available interrupt ring buffers (NAVI10).
*/
-static void navi10_ih_disable_interrupts(struct amdgpu_device *adev)
+static int navi10_ih_toggle_interrupts(struct amdgpu_device *adev, bool enable)
{
- u32 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
-
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 0);
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 0);
- if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
- if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
- DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
- return;
- }
- } else {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
- }
-
- /* set rptr, wptr to 0 */
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0);
- adev->irq.ih.enabled = false;
- adev->irq.ih.rptr = 0;
-
- if (adev->irq.ih1.ring_size) {
- ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
- RB_ENABLE, 0);
- if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
- if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1,
- ih_rb_cntl)) {
- DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
- return;
- }
- } else {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
- }
- /* set rptr, wptr to 0 */
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, 0);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING1, 0);
- adev->irq.ih1.enabled = false;
- adev->irq.ih1.rptr = 0;
- }
+ struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1, &adev->irq.ih2};
+ int i;
+ int r;
- if (adev->irq.ih2.ring_size) {
- ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
- RB_ENABLE, 0);
- if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
- if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2,
- ih_rb_cntl)) {
- DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
- return;
- }
- } else {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
+ for (i = 0; i < ARRAY_SIZE(ih); i++) {
+ if (ih[i]->ring_size) {
+ r = navi10_ih_toggle_ring_interrupts(adev, ih[i], enable);
+ if (r)
+ return r;
}
- /* set rptr, wptr to 0 */
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, 0);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING2, 0);
- adev->irq.ih2.enabled = false;
- adev->irq.ih2.rptr = 0;
}
+ return 0;
}
static uint32_t navi10_ih_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl)
@@ -253,22 +233,49 @@ static uint32_t navi10_ih_doorbell_rptr(struct amdgpu_ih_ring *ih)
return ih_doorbell_rtpr;
}
-static void navi10_ih_reroute_ih(struct amdgpu_device *adev)
+/**
+ * navi10_ih_enable_ring - enable an ih ring buffer
+ *
+ * @adev: amdgpu_device pointer
+ * @ih: amdgpu_ih_ring pointer
+ *
+ * Enable an ih ring buffer (NAVI10)
+ */
+static int navi10_ih_enable_ring(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih)
{
+ struct amdgpu_ih_regs *ih_regs;
uint32_t tmp;
- /* Reroute to IH ring 1 for VMC */
- WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_INDEX, 0x12);
- tmp = RREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA);
- tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, CLIENT_TYPE, 1);
- tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
- WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA, tmp);
-
- /* Reroute IH ring 1 for UMC */
- WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_INDEX, 0x1B);
- tmp = RREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA);
- tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
- WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA, tmp);
+ ih_regs = &ih->ih_regs;
+
+ /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
+ WREG32(ih_regs->ih_rb_base, ih->gpu_addr >> 8);
+ WREG32(ih_regs->ih_rb_base_hi, (ih->gpu_addr >> 40) & 0xff);
+
+ tmp = RREG32(ih_regs->ih_rb_cntl);
+ tmp = navi10_ih_rb_cntl(ih, tmp);
+ if (ih == &adev->irq.ih)
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RPTR_REARM, !!adev->irq.msi_enabled);
+ if (ih == &adev->irq.ih1) {
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 0);
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1);
+ }
+ WREG32(ih_regs->ih_rb_cntl, tmp);
+
+ if (ih == &adev->irq.ih) {
+ /* set the ih ring 0 writeback address whether it's enabled or not */
+ WREG32(ih_regs->ih_rb_wptr_addr_lo, lower_32_bits(ih->wptr_addr));
+ WREG32(ih_regs->ih_rb_wptr_addr_hi, upper_32_bits(ih->wptr_addr) & 0xFFFF);
+ }
+
+ /* set rptr, wptr to 0 */
+ WREG32(ih_regs->ih_rb_wptr, 0);
+ WREG32(ih_regs->ih_rb_rptr, 0);
+
+ WREG32(ih_regs->ih_doorbell_rptr, navi10_ih_doorbell_rptr(ih));
+
+ return 0;
}
/**
@@ -284,36 +291,21 @@ static void navi10_ih_reroute_ih(struct amdgpu_device *adev)
*/
static int navi10_ih_irq_init(struct amdgpu_device *adev)
{
- struct amdgpu_ih_ring *ih = &adev->irq.ih;
- u32 ih_rb_cntl, ih_chicken;
+ struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1, &adev->irq.ih2};
+ u32 ih_chicken;
u32 tmp;
+ int ret;
+ int i;
/* disable irqs */
- navi10_ih_disable_interrupts(adev);
+ ret = navi10_ih_toggle_interrupts(adev, false);
+ if (ret)
+ return ret;
adev->nbio.funcs->ih_control(adev);
- /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE, ih->gpu_addr >> 8);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI, (ih->gpu_addr >> 40) & 0xff);
-
- ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
- ih_rb_cntl = navi10_ih_rb_cntl(ih, ih_rb_cntl);
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RPTR_REARM,
- !!adev->irq.msi_enabled);
- if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
- if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
- DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
- return -ETIMEDOUT;
- }
- } else {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
- }
- if (adev->irq.ih1.ring_size)
- navi10_ih_reroute_ih(adev);
-
if (unlikely(adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT)) {
- if (ih->use_bus_addr) {
+ if (ih[0]->use_bus_addr) {
switch (adev->asic_type) {
case CHIP_SIENNA_CICHLID:
case CHIP_NAVY_FLOUNDER:
@@ -334,77 +326,17 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
}
}
- /* set the writeback address whether it's enabled or not */
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO,
- lower_32_bits(ih->wptr_addr));
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI,
- upper_32_bits(ih->wptr_addr) & 0xFFFF);
-
- /* set rptr, wptr to 0 */
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0);
-
- WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR,
- navi10_ih_doorbell_rptr(ih));
-
- adev->nbio.funcs->ih_doorbell_range(adev, ih->use_doorbell,
- ih->doorbell_index);
-
- ih = &adev->irq.ih1;
- if (ih->ring_size) {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_RING1, ih->gpu_addr >> 8);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI_RING1,
- (ih->gpu_addr >> 40) & 0xff);
-
- ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
- ih_rb_cntl = navi10_ih_rb_cntl(ih, ih_rb_cntl);
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
- WPTR_OVERFLOW_ENABLE, 0);
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
- RB_FULL_DRAIN_ENABLE, 1);
- if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
- if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1,
- ih_rb_cntl)) {
- DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
- return -ETIMEDOUT;
- }
- } else {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
+ for (i = 0; i < ARRAY_SIZE(ih); i++) {
+ if (ih[i]->ring_size) {
+ ret = navi10_ih_enable_ring(adev, ih[i]);
+ if (ret)
+ return ret;
}
- /* set rptr, wptr to 0 */
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING1, 0);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, 0);
-
- WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING1,
- navi10_ih_doorbell_rptr(ih));
- }
-
- ih = &adev->irq.ih2;
- if (ih->ring_size) {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_RING2, ih->gpu_addr >> 8);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI_RING2,
- (ih->gpu_addr >> 40) & 0xff);
-
- ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
- ih_rb_cntl = navi10_ih_rb_cntl(ih, ih_rb_cntl);
-
- if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
- if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2,
- ih_rb_cntl)) {
- DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
- return -ETIMEDOUT;
- }
- } else {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
- }
- /* set rptr, wptr to 0 */
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING2, 0);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, 0);
-
- WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING2,
- navi10_ih_doorbell_rptr(ih));
}
+ /* update doorbell range for ih ring 0*/
+ adev->nbio.funcs->ih_doorbell_range(adev, ih[0]->use_doorbell,
+ ih[0]->doorbell_index);
tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,
@@ -418,10 +350,15 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
pci_set_master(adev->pdev);
/* enable interrupts */
- navi10_ih_enable_interrupts(adev);
+ ret = navi10_ih_toggle_interrupts(adev, true);
+ if (ret)
+ return ret;
/* enable wptr force update for self int */
force_update_wptr_for_self_int(adev, 0, 8, true);
+ if (adev->irq.ih_soft.ring_size)
+ adev->irq.ih_soft.enabled = true;
+
return 0;
}
@@ -435,7 +372,7 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
static void navi10_ih_irq_disable(struct amdgpu_device *adev)
{
force_update_wptr_for_self_int(adev, 0, 8, false);
- navi10_ih_disable_interrupts(adev);
+ navi10_ih_toggle_interrupts(adev, false);
/* Wait and acknowledge irq */
mdelay(1);
@@ -455,23 +392,16 @@ static void navi10_ih_irq_disable(struct amdgpu_device *adev)
static u32 navi10_ih_get_wptr(struct amdgpu_device *adev,
struct amdgpu_ih_ring *ih)
{
- u32 wptr, reg, tmp;
+ u32 wptr, tmp;
+ struct amdgpu_ih_regs *ih_regs;
wptr = le32_to_cpu(*ih->wptr_cpu);
+ ih_regs = &ih->ih_regs;
if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
goto out;
- if (ih == &adev->irq.ih)
- reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR);
- else if (ih == &adev->irq.ih1)
- reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING1);
- else if (ih == &adev->irq.ih2)
- reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING2);
- else
- BUG();
-
- wptr = RREG32_NO_KIQ(reg);
+ wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr);
if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
goto out;
wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
@@ -486,68 +416,14 @@ static u32 navi10_ih_get_wptr(struct amdgpu_device *adev,
wptr, ih->rptr, tmp);
ih->rptr = tmp;
- if (ih == &adev->irq.ih)
- reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL);
- else if (ih == &adev->irq.ih1)
- reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING1);
- else if (ih == &adev->irq.ih2)
- reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING2);
- else
- BUG();
-
- tmp = RREG32_NO_KIQ(reg);
+ tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl);
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
- WREG32_NO_KIQ(reg, tmp);
+ WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
out:
return (wptr & ih->ptr_mask);
}
/**
- * navi10_ih_decode_iv - decode an interrupt vector
- *
- * @adev: amdgpu_device pointer
- * @ih: IH ring buffer to decode
- * @entry: IV entry to place decoded information into
- *
- * Decodes the interrupt vector at the current rptr
- * position and also advance the position.
- */
-static void navi10_ih_decode_iv(struct amdgpu_device *adev,
- struct amdgpu_ih_ring *ih,
- struct amdgpu_iv_entry *entry)
-{
- /* wptr/rptr are in bytes! */
- u32 ring_index = ih->rptr >> 2;
- uint32_t dw[8];
-
- dw[0] = le32_to_cpu(ih->ring[ring_index + 0]);
- dw[1] = le32_to_cpu(ih->ring[ring_index + 1]);
- dw[2] = le32_to_cpu(ih->ring[ring_index + 2]);
- dw[3] = le32_to_cpu(ih->ring[ring_index + 3]);
- dw[4] = le32_to_cpu(ih->ring[ring_index + 4]);
- dw[5] = le32_to_cpu(ih->ring[ring_index + 5]);
- dw[6] = le32_to_cpu(ih->ring[ring_index + 6]);
- dw[7] = le32_to_cpu(ih->ring[ring_index + 7]);
-
- entry->client_id = dw[0] & 0xff;
- entry->src_id = (dw[0] >> 8) & 0xff;
- entry->ring_id = (dw[0] >> 16) & 0xff;
- entry->vmid = (dw[0] >> 24) & 0xf;
- entry->vmid_src = (dw[0] >> 31);
- entry->timestamp = dw[1] | ((u64)(dw[2] & 0xffff) << 32);
- entry->timestamp_src = dw[2] >> 31;
- entry->pasid = dw[3] & 0xffff;
- entry->pasid_src = dw[3] >> 31;
- entry->src_data[0] = dw[4];
- entry->src_data[1] = dw[5];
- entry->src_data[2] = dw[6];
- entry->src_data[3] = dw[7];
-
- /* wptr/rptr are in bytes! */
- ih->rptr += 32;
-}
-
-/**
* navi10_ih_irq_rearm - rearm IRQ if lost
*
* @adev: amdgpu_device pointer
@@ -557,22 +433,15 @@ static void navi10_ih_decode_iv(struct amdgpu_device *adev,
static void navi10_ih_irq_rearm(struct amdgpu_device *adev,
struct amdgpu_ih_ring *ih)
{
- uint32_t reg_rptr = 0;
uint32_t v = 0;
uint32_t i = 0;
+ struct amdgpu_ih_regs *ih_regs;
- if (ih == &adev->irq.ih)
- reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR);
- else if (ih == &adev->irq.ih1)
- reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING1);
- else if (ih == &adev->irq.ih2)
- reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING2);
- else
- return;
+ ih_regs = &ih->ih_regs;
/* Rearm IRQ / re-write doorbell if doorbell write is lost */
for (i = 0; i < MAX_REARM_RETRY; i++) {
- v = RREG32_NO_KIQ(reg_rptr);
+ v = RREG32_NO_KIQ(ih_regs->ih_rb_rptr);
if ((v < ih->ring_size) && (v != ih->rptr))
WDOORBELL32(ih->doorbell_index, ih->rptr);
else
@@ -591,6 +460,8 @@ static void navi10_ih_irq_rearm(struct amdgpu_device *adev,
static void navi10_ih_set_rptr(struct amdgpu_device *adev,
struct amdgpu_ih_ring *ih)
{
+ struct amdgpu_ih_regs *ih_regs;
+
if (ih->use_doorbell) {
/* XXX check if swapping is necessary on BE */
*ih->rptr_cpu = ih->rptr;
@@ -598,12 +469,9 @@ static void navi10_ih_set_rptr(struct amdgpu_device *adev,
if (amdgpu_sriov_vf(adev))
navi10_ih_irq_rearm(adev, ih);
- } else if (ih == &adev->irq.ih) {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, ih->rptr);
- } else if (ih == &adev->irq.ih1) {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, ih->rptr);
- } else if (ih == &adev->irq.ih2) {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, ih->rptr);
+ } else {
+ ih_regs = &ih->ih_regs;
+ WREG32(ih_regs->ih_rb_rptr, ih->rptr);
}
}
@@ -685,23 +553,8 @@ static int navi10_ih_sw_init(void *handle)
adev->irq.ih1.ring_size = 0;
adev->irq.ih2.ring_size = 0;
- if (adev->asic_type < CHIP_NAVI10) {
- r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, PAGE_SIZE, true);
- if (r)
- return r;
-
- adev->irq.ih1.use_doorbell = true;
- adev->irq.ih1.doorbell_index =
- (adev->doorbell_index.ih + 1) << 1;
-
- r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true);
- if (r)
- return r;
-
- adev->irq.ih2.use_doorbell = true;
- adev->irq.ih2.doorbell_index =
- (adev->doorbell_index.ih + 2) << 1;
- }
+ /* initialize ih control registers offset */
+ navi10_ih_init_register_offset(adev);
r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, PAGE_SIZE, true);
if (r)
@@ -717,6 +570,7 @@ static int navi10_ih_sw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
amdgpu_irq_fini(adev);
+ amdgpu_ih_ring_fini(adev, &adev->irq.ih_soft);
amdgpu_ih_ring_fini(adev, &adev->irq.ih2);
amdgpu_ih_ring_fini(adev, &adev->irq.ih1);
amdgpu_ih_ring_fini(adev, &adev->irq.ih);
@@ -848,7 +702,7 @@ static const struct amd_ip_funcs navi10_ih_ip_funcs = {
static const struct amdgpu_ih_funcs navi10_ih_funcs = {
.get_wptr = navi10_ih_get_wptr,
- .decode_iv = navi10_ih_decode_iv,
+ .decode_iv = amdgpu_ih_decode_iv_helper,
.set_rptr = navi10_ih_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
index b5c3db16c2b0..05ddec7ba7e2 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
@@ -34,6 +34,14 @@
#define smnCPM_CONTROL 0x11180460
#define smnPCIE_CNTL2 0x11180070
#define smnPCIE_LC_CNTL 0x11140280
+#define smnPCIE_LC_CNTL3 0x111402d4
+#define smnPCIE_LC_CNTL6 0x111402ec
+#define smnPCIE_LC_CNTL7 0x111402f0
+#define smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2 0x1014008c
+#define smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL 0x10123538
+#define smnBIF_CFG_DEV0_EPF0_PCIE_LTR_CAP 0x10140324
+#define smnPSWUSP0_PCIE_LC_CNTL2 0x111402c4
+#define smnNBIF_MGCG_CTRL_LCLK 0x1013a21c
#define mmBIF_SDMA2_DOORBELL_RANGE 0x01d6
#define mmBIF_SDMA2_DOORBELL_RANGE_BASE_IDX 2
@@ -80,15 +88,6 @@ static void nbio_v2_3_mc_access_enable(struct amdgpu_device *adev, bool enable)
WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0);
}
-static void nbio_v2_3_hdp_flush(struct amdgpu_device *adev,
- struct amdgpu_ring *ring)
-{
- if (!ring || !ring->funcs->emit_wreg)
- WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
- else
- amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
-}
-
static u32 nbio_v2_3_get_memsize(struct amdgpu_device *adev)
{
return RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_RCC_CONFIG_MEMSIZE);
@@ -359,6 +358,111 @@ static void nbio_v2_3_enable_aspm(struct amdgpu_device *adev,
WREG32_PCIE(smnPCIE_LC_CNTL, data);
}
+static void nbio_v2_3_program_ltr(struct amdgpu_device *adev)
+{
+ uint32_t def, data;
+
+ WREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL, 0x75EB);
+
+ def = data = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP2);
+ data &= ~RCC_BIF_STRAP2__STRAP_LTR_IN_ASPML1_DIS_MASK;
+ if (def != data)
+ WREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP2, data);
+
+ def = data = RREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL);
+ data &= ~EP_PCIE_TX_LTR_CNTL__LTR_PRIV_MSG_DIS_IN_PM_NON_D0_MASK;
+ if (def != data)
+ WREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL, data);
+
+ def = data = RREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2);
+ data |= BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK;
+ if (def != data)
+ WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data);
+}
+
+static void nbio_v2_3_program_aspm(struct amdgpu_device *adev)
+{
+ uint32_t def, data;
+
+ def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
+ data &= ~PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK;
+ data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
+ data |= PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
+ if (def != data)
+ WREG32_PCIE(smnPCIE_LC_CNTL, data);
+
+ def = data = RREG32_PCIE(smnPCIE_LC_CNTL7);
+ data |= PCIE_LC_CNTL7__LC_NBIF_ASPM_INPUT_EN_MASK;
+ if (def != data)
+ WREG32_PCIE(smnPCIE_LC_CNTL7, data);
+
+ def = data = RREG32_PCIE(smnNBIF_MGCG_CTRL_LCLK);
+ data |= NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_REG_DIS_LCLK_MASK;
+ if (def != data)
+ WREG32_PCIE(smnNBIF_MGCG_CTRL_LCLK, data);
+
+ def = data = RREG32_PCIE(smnPCIE_LC_CNTL3);
+ data |= PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK;
+ if (def != data)
+ WREG32_PCIE(smnPCIE_LC_CNTL3, data);
+
+ def = data = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP3);
+ data &= ~RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER_MASK;
+ data &= ~RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER_MASK;
+ if (def != data)
+ WREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP3, data);
+
+ def = data = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP5);
+ data &= ~RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER_MASK;
+ if (def != data)
+ WREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP5, data);
+
+ def = data = RREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2);
+ data &= ~BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK;
+ if (def != data)
+ WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data);
+
+ WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_PCIE_LTR_CAP, 0x10011001);
+
+ def = data = RREG32_PCIE(smnPSWUSP0_PCIE_LC_CNTL2);
+ data |= PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK |
+ PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L23_MASK;
+ data &= ~PSWUSP0_PCIE_LC_CNTL2__LC_RCV_L0_TO_RCV_L0S_DIS_MASK;
+ if (def != data)
+ WREG32_PCIE(smnPSWUSP0_PCIE_LC_CNTL2, data);
+
+ def = data = RREG32_PCIE(smnPCIE_LC_CNTL6);
+ data |= PCIE_LC_CNTL6__LC_L1_POWERDOWN_MASK |
+ PCIE_LC_CNTL6__LC_RX_L0S_STANDBY_EN_MASK;
+ if (def != data)
+ WREG32_PCIE(smnPCIE_LC_CNTL6, data);
+
+ nbio_v2_3_program_ltr(adev);
+
+ def = data = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP3);
+ data |= 0x5DE0 << RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT;
+ data |= 0x0010 << RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER__SHIFT;
+ if (def != data)
+ WREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP3, data);
+
+ def = data = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP5);
+ data |= 0x0010 << RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER__SHIFT;
+ if (def != data)
+ WREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP5, data);
+
+ def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
+ data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
+ data |= 0x9 << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
+ data |= 0x1 << PCIE_LC_CNTL__LC_PMI_TO_L1_DIS__SHIFT;
+ if (def != data)
+ WREG32_PCIE(smnPCIE_LC_CNTL, data);
+
+ def = data = RREG32_PCIE(smnPCIE_LC_CNTL3);
+ data &= ~PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK;
+ if (def != data)
+ WREG32_PCIE(smnPCIE_LC_CNTL3, data);
+}
+
const struct amdgpu_nbio_funcs nbio_v2_3_funcs = {
.get_hdp_flush_req_offset = nbio_v2_3_get_hdp_flush_req_offset,
.get_hdp_flush_done_offset = nbio_v2_3_get_hdp_flush_done_offset,
@@ -366,7 +470,6 @@ const struct amdgpu_nbio_funcs nbio_v2_3_funcs = {
.get_pcie_data_offset = nbio_v2_3_get_pcie_data_offset,
.get_rev_id = nbio_v2_3_get_rev_id,
.mc_access_enable = nbio_v2_3_mc_access_enable,
- .hdp_flush = nbio_v2_3_hdp_flush,
.get_memsize = nbio_v2_3_get_memsize,
.sdma_doorbell_range = nbio_v2_3_sdma_doorbell_range,
.vcn_doorbell_range = nbio_v2_3_vcn_doorbell_range,
@@ -380,4 +483,5 @@ const struct amdgpu_nbio_funcs nbio_v2_3_funcs = {
.init_registers = nbio_v2_3_init_registers,
.remap_hdp_registers = nbio_v2_3_remap_hdp_registers,
.enable_aspm = nbio_v2_3_enable_aspm,
+ .program_aspm = nbio_v2_3_program_aspm,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
index d2f1fe55d388..83ea063388fd 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
@@ -29,6 +29,15 @@
#include "nbio/nbio_6_1_sh_mask.h"
#include "nbio/nbio_6_1_smn.h"
#include "vega10_enum.h"
+#include <uapi/linux/kfd_ioctl.h>
+
+static void nbio_v6_1_remap_hdp_registers(struct amdgpu_device *adev)
+{
+ WREG32_SOC15(NBIO, 0, mmREMAP_HDP_MEM_FLUSH_CNTL,
+ adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL);
+ WREG32_SOC15(NBIO, 0, mmREMAP_HDP_REG_FLUSH_CNTL,
+ adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL);
+}
static u32 nbio_v6_1_get_rev_id(struct amdgpu_device *adev)
{
@@ -50,18 +59,6 @@ static void nbio_v6_1_mc_access_enable(struct amdgpu_device *adev, bool enable)
WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0);
}
-static void nbio_v6_1_hdp_flush(struct amdgpu_device *adev,
- struct amdgpu_ring *ring)
-{
- if (!ring || !ring->funcs->emit_wreg)
- WREG32_SOC15_NO_KIQ(NBIO, 0,
- mmBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL,
- 0);
- else
- amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
- NBIO, 0, mmBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL), 0);
-}
-
static u32 nbio_v6_1_get_memsize(struct amdgpu_device *adev)
{
return RREG32_SOC15(NBIO, 0, mmRCC_PF_0_0_RCC_CONFIG_MEMSIZE);
@@ -266,7 +263,6 @@ const struct amdgpu_nbio_funcs nbio_v6_1_funcs = {
.get_pcie_data_offset = nbio_v6_1_get_pcie_data_offset,
.get_rev_id = nbio_v6_1_get_rev_id,
.mc_access_enable = nbio_v6_1_mc_access_enable,
- .hdp_flush = nbio_v6_1_hdp_flush,
.get_memsize = nbio_v6_1_get_memsize,
.sdma_doorbell_range = nbio_v6_1_sdma_doorbell_range,
.enable_doorbell_aperture = nbio_v6_1_enable_doorbell_aperture,
@@ -277,4 +273,5 @@ const struct amdgpu_nbio_funcs nbio_v6_1_funcs = {
.get_clockgating_state = nbio_v6_1_get_clockgating_state,
.ih_control = nbio_v6_1_ih_control,
.init_registers = nbio_v6_1_init_registers,
+ .remap_hdp_registers = nbio_v6_1_remap_hdp_registers,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
index ae685813c419..3c00666a13e1 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
@@ -60,15 +60,6 @@ static void nbio_v7_0_mc_access_enable(struct amdgpu_device *adev, bool enable)
WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0);
}
-static void nbio_v7_0_hdp_flush(struct amdgpu_device *adev,
- struct amdgpu_ring *ring)
-{
- if (!ring || !ring->funcs->emit_wreg)
- WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
- else
- amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
-}
-
static u32 nbio_v7_0_get_memsize(struct amdgpu_device *adev)
{
return RREG32_SOC15(NBIO, 0, mmRCC_CONFIG_MEMSIZE);
@@ -292,7 +283,6 @@ const struct amdgpu_nbio_funcs nbio_v7_0_funcs = {
.get_pcie_data_offset = nbio_v7_0_get_pcie_data_offset,
.get_rev_id = nbio_v7_0_get_rev_id,
.mc_access_enable = nbio_v7_0_mc_access_enable,
- .hdp_flush = nbio_v7_0_hdp_flush,
.get_memsize = nbio_v7_0_get_memsize,
.sdma_doorbell_range = nbio_v7_0_sdma_doorbell_range,
.vcn_doorbell_range = nbio_v7_0_vcn_doorbell_range,
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c
index aa36022670f9..598ce0e93627 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c
@@ -56,15 +56,6 @@ static void nbio_v7_2_mc_access_enable(struct amdgpu_device *adev, bool enable)
WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_FB_EN, 0);
}
-static void nbio_v7_2_hdp_flush(struct amdgpu_device *adev,
- struct amdgpu_ring *ring)
-{
- if (!ring || !ring->funcs->emit_wreg)
- WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
- else
- amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
-}
-
static u32 nbio_v7_2_get_memsize(struct amdgpu_device *adev)
{
return RREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF0_0_RCC_CONFIG_MEMSIZE);
@@ -325,7 +316,6 @@ const struct amdgpu_nbio_funcs nbio_v7_2_funcs = {
.get_pcie_port_data_offset = nbio_v7_2_get_pcie_port_data_offset,
.get_rev_id = nbio_v7_2_get_rev_id,
.mc_access_enable = nbio_v7_2_mc_access_enable,
- .hdp_flush = nbio_v7_2_hdp_flush,
.get_memsize = nbio_v7_2_get_memsize,
.sdma_doorbell_range = nbio_v7_2_sdma_doorbell_range,
.vcn_doorbell_range = nbio_v7_2_vcn_doorbell_range,
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index eadc9526d33f..4bc1d1434065 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -82,15 +82,6 @@ static void nbio_v7_4_mc_access_enable(struct amdgpu_device *adev, bool enable)
WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0);
}
-static void nbio_v7_4_hdp_flush(struct amdgpu_device *adev,
- struct amdgpu_ring *ring)
-{
- if (!ring || !ring->funcs->emit_wreg)
- WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
- else
- amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
-}
-
static u32 nbio_v7_4_get_memsize(struct amdgpu_device *adev)
{
return RREG32_SOC15(NBIO, 0, mmRCC_CONFIG_MEMSIZE);
@@ -541,7 +532,6 @@ const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
.get_pcie_data_offset = nbio_v7_4_get_pcie_data_offset,
.get_rev_id = nbio_v7_4_get_rev_id,
.mc_access_enable = nbio_v7_4_mc_access_enable,
- .hdp_flush = nbio_v7_4_hdp_flush,
.get_memsize = nbio_v7_4_get_memsize,
.sdma_doorbell_range = nbio_v7_4_sdma_doorbell_range,
.vcn_doorbell_range = nbio_v7_4_vcn_doorbell_range,
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index 6bee3677394a..c625c5d8ed89 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -38,9 +38,6 @@
#include "gc/gc_10_1_0_offset.h"
#include "gc/gc_10_1_0_sh_mask.h"
-#include "hdp/hdp_5_0_0_offset.h"
-#include "hdp/hdp_5_0_0_sh_mask.h"
-#include "smuio/smuio_11_0_0_offset.h"
#include "mp/mp_11_0_offset.h"
#include "soc15.h"
@@ -50,6 +47,7 @@
#include "mmhub_v2_0.h"
#include "nbio_v2_3.h"
#include "nbio_v7_2.h"
+#include "hdp_v5_0.h"
#include "nv.h"
#include "navi10_ih.h"
#include "gfx_v10_0.h"
@@ -62,6 +60,8 @@
#include "dce_virtual.h"
#include "mes_v10_1.h"
#include "mxgpu_nv.h"
+#include "smuio_v11_0.h"
+#include "smuio_v11_0_6.h"
static const struct amd_ip_funcs nv_common_ip_funcs;
@@ -203,6 +203,7 @@ static bool nv_read_bios_from_rom(struct amdgpu_device *adev,
{
u32 *dw_ptr;
u32 i, length_dw;
+ u32 rom_index_offset, rom_data_offset;
if (bios == NULL)
return false;
@@ -215,11 +216,16 @@ static bool nv_read_bios_from_rom(struct amdgpu_device *adev,
dw_ptr = (u32 *)bios;
length_dw = ALIGN(length_bytes, 4) / 4;
+ rom_index_offset =
+ adev->smuio.funcs->get_rom_index_offset(adev);
+ rom_data_offset =
+ adev->smuio.funcs->get_rom_data_offset(adev);
+
/* set rom index to 0 */
- WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0);
+ WREG32(rom_index_offset, 0);
/* read out the rom data */
for (i = 0; i < length_dw; i++)
- dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA));
+ dw_ptr[i] = RREG32(rom_data_offset);
return true;
}
@@ -336,6 +342,38 @@ static int nv_asic_mode1_reset(struct amdgpu_device *adev)
return ret;
}
+static int nv_asic_mode2_reset(struct amdgpu_device *adev)
+{
+ u32 i;
+ int ret = 0;
+
+ amdgpu_atombios_scratch_regs_engine_hung(adev, true);
+
+ /* disable BM */
+ pci_clear_master(adev->pdev);
+
+ amdgpu_device_cache_pci_state(adev->pdev);
+
+ ret = amdgpu_dpm_mode2_reset(adev);
+ if (ret)
+ dev_err(adev->dev, "GPU mode2 reset failed\n");
+
+ amdgpu_device_load_pci_state(adev->pdev);
+
+ /* wait for asic to come out of reset */
+ for (i = 0; i < adev->usec_timeout; i++) {
+ u32 memsize = adev->nbio.funcs->get_memsize(adev);
+
+ if (memsize != 0xffffffff)
+ break;
+ udelay(1);
+ }
+
+ amdgpu_atombios_scratch_regs_engine_hung(adev, false);
+
+ return ret;
+}
+
static bool nv_asic_supports_baco(struct amdgpu_device *adev)
{
struct smu_context *smu = &adev->smu;
@@ -352,7 +390,9 @@ nv_asic_reset_method(struct amdgpu_device *adev)
struct smu_context *smu = &adev->smu;
if (amdgpu_reset_method == AMD_RESET_METHOD_MODE1 ||
- amdgpu_reset_method == AMD_RESET_METHOD_BACO)
+ amdgpu_reset_method == AMD_RESET_METHOD_MODE2 ||
+ amdgpu_reset_method == AMD_RESET_METHOD_BACO ||
+ amdgpu_reset_method == AMD_RESET_METHOD_PCI)
return amdgpu_reset_method;
if (amdgpu_reset_method != -1)
@@ -360,6 +400,8 @@ nv_asic_reset_method(struct amdgpu_device *adev)
amdgpu_reset_method);
switch (adev->asic_type) {
+ case CHIP_VANGOGH:
+ return AMD_RESET_METHOD_MODE2;
case CHIP_SIENNA_CICHLID:
case CHIP_NAVY_FLOUNDER:
case CHIP_DIMGREY_CAVEFISH:
@@ -377,7 +419,16 @@ static int nv_asic_reset(struct amdgpu_device *adev)
int ret = 0;
struct smu_context *smu = &adev->smu;
- if (nv_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
+ /* skip reset on vangogh for now */
+ if (adev->asic_type == CHIP_VANGOGH)
+ return 0;
+
+ switch (nv_asic_reset_method(adev)) {
+ case AMD_RESET_METHOD_PCI:
+ dev_info(adev->dev, "PCI reset\n");
+ ret = amdgpu_device_pci_reset(adev);
+ break;
+ case AMD_RESET_METHOD_BACO:
dev_info(adev->dev, "BACO reset\n");
ret = smu_baco_enter(smu);
@@ -386,9 +437,15 @@ static int nv_asic_reset(struct amdgpu_device *adev)
ret = smu_baco_exit(smu);
if (ret)
return ret;
- } else {
+ break;
+ case AMD_RESET_METHOD_MODE2:
+ dev_info(adev->dev, "MODE2 reset\n");
+ ret = nv_asic_mode2_reset(adev);
+ break;
+ default:
dev_info(adev->dev, "MODE1 reset\n");
ret = nv_asic_mode1_reset(adev);
+ break;
}
return ret;
@@ -423,11 +480,14 @@ static void nv_pcie_gen3_enable(struct amdgpu_device *adev)
static void nv_program_aspm(struct amdgpu_device *adev)
{
-
- if (amdgpu_aspm == 0)
+ if (amdgpu_aspm != 1)
return;
- /* todo */
+ if ((adev->asic_type >= CHIP_SIENNA_CICHLID) &&
+ !(adev->flags & AMD_IS_APU) &&
+ (adev->nbio.funcs->program_aspm))
+ adev->nbio.funcs->program_aspm(adev);
+
}
static void nv_enable_doorbell_aperture(struct amdgpu_device *adev,
@@ -498,7 +558,8 @@ static bool nv_is_headless_sku(struct pci_dev *pdev)
{
if ((pdev->device == 0x731E &&
(pdev->revision == 0xC6 || pdev->revision == 0xC7)) ||
- (pdev->device == 0x7340 && pdev->revision == 0xC9))
+ (pdev->device == 0x7340 && pdev->revision == 0xC9) ||
+ (pdev->device == 0x7360 && pdev->revision == 0xC7))
return true;
return false;
}
@@ -514,6 +575,12 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
adev->nbio.funcs = &nbio_v2_3_funcs;
adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
}
+ adev->hdp.funcs = &hdp_v5_0_funcs;
+
+ if (adev->asic_type >= CHIP_SIENNA_CICHLID)
+ adev->smuio.funcs = &smuio_v11_0_6_funcs;
+ else
+ adev->smuio.funcs = &smuio_v11_0_funcs;
if (adev->asic_type == CHIP_SIENNA_CICHLID)
adev->gmc.xgmi.supported = true;
@@ -568,7 +635,8 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
!amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
- amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
+ if (!nv_is_headless_sku(adev->pdev))
+ amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
if (!amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
break;
@@ -669,22 +737,6 @@ static uint32_t nv_get_rev_id(struct amdgpu_device *adev)
return adev->nbio.funcs->get_rev_id(adev);
}
-static void nv_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
-{
- adev->nbio.funcs->hdp_flush(adev, ring);
-}
-
-static void nv_invalidate_hdp(struct amdgpu_device *adev,
- struct amdgpu_ring *ring)
-{
- if (!ring || !ring->funcs->emit_wreg) {
- WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
- } else {
- amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
- HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
- }
-}
-
static bool nv_need_full_reset(struct amdgpu_device *adev)
{
return true;
@@ -768,10 +820,10 @@ static int nv_update_umd_stable_pstate(struct amdgpu_device *adev,
* The ASPM function is not fully enabled and verified on
* Navi yet. Temporarily skip this until ASPM enabled.
*/
-#if 0
- if (adev->nbio.funcs->enable_aspm)
+ if ((adev->asic_type >= CHIP_SIENNA_CICHLID) &&
+ !(adev->flags & AMD_IS_APU) &&
+ (adev->nbio.funcs->enable_aspm))
adev->nbio.funcs->enable_aspm(adev, !enter);
-#endif
return 0;
}
@@ -788,8 +840,6 @@ static const struct amdgpu_asic_funcs nv_asic_funcs =
.set_uvd_clocks = &nv_set_uvd_clocks,
.set_vce_clocks = &nv_set_vce_clocks,
.get_config_memsize = &nv_get_config_memsize,
- .flush_hdp = &nv_flush_hdp,
- .invalidate_hdp = &nv_invalidate_hdp,
.init_doorbell_index = &nv_init_doorbell_index,
.need_full_reset = &nv_need_full_reset,
.need_reset_on_init = &nv_need_reset_on_init,
@@ -1080,120 +1130,6 @@ static int nv_common_soft_reset(void *handle)
return 0;
}
-static void nv_update_hdp_mem_power_gating(struct amdgpu_device *adev,
- bool enable)
-{
- uint32_t hdp_clk_cntl, hdp_clk_cntl1;
- uint32_t hdp_mem_pwr_cntl;
-
- if (!(adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS |
- AMD_CG_SUPPORT_HDP_DS |
- AMD_CG_SUPPORT_HDP_SD)))
- return;
-
- hdp_clk_cntl = hdp_clk_cntl1 = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
- hdp_mem_pwr_cntl = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL);
-
- /* Before doing clock/power mode switch,
- * forced on IPH & RC clock */
- hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
- IPH_MEM_CLK_SOFT_OVERRIDE, 1);
- hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
- RC_MEM_CLK_SOFT_OVERRIDE, 1);
- WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl);
-
- /* HDP 5.0 doesn't support dynamic power mode switch,
- * disable clock and power gating before any changing */
- hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
- IPH_MEM_POWER_CTRL_EN, 0);
- hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
- IPH_MEM_POWER_LS_EN, 0);
- hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
- IPH_MEM_POWER_DS_EN, 0);
- hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
- IPH_MEM_POWER_SD_EN, 0);
- hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
- RC_MEM_POWER_CTRL_EN, 0);
- hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
- RC_MEM_POWER_LS_EN, 0);
- hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
- RC_MEM_POWER_DS_EN, 0);
- hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
- RC_MEM_POWER_SD_EN, 0);
- WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
-
- /* only one clock gating mode (LS/DS/SD) can be enabled */
- if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
- hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
- HDP_MEM_POWER_CTRL,
- IPH_MEM_POWER_LS_EN, enable);
- hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
- HDP_MEM_POWER_CTRL,
- RC_MEM_POWER_LS_EN, enable);
- } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS) {
- hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
- HDP_MEM_POWER_CTRL,
- IPH_MEM_POWER_DS_EN, enable);
- hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
- HDP_MEM_POWER_CTRL,
- RC_MEM_POWER_DS_EN, enable);
- } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_SD) {
- hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
- HDP_MEM_POWER_CTRL,
- IPH_MEM_POWER_SD_EN, enable);
- /* RC should not use shut down mode, fallback to ds */
- hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
- HDP_MEM_POWER_CTRL,
- RC_MEM_POWER_DS_EN, enable);
- }
-
- /* confirmed that IPH_MEM_POWER_CTRL_EN and RC_MEM_POWER_CTRL_EN have to
- * be set for SRAM LS/DS/SD */
- if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_DS |
- AMD_CG_SUPPORT_HDP_SD)) {
- hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
- IPH_MEM_POWER_CTRL_EN, 1);
- hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
- RC_MEM_POWER_CTRL_EN, 1);
- }
-
- WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
-
- /* restore IPH & RC clock override after clock/power mode changing */
- WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl1);
-}
-
-static void nv_update_hdp_clock_gating(struct amdgpu_device *adev,
- bool enable)
-{
- uint32_t hdp_clk_cntl;
-
- if (!(adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
- return;
-
- hdp_clk_cntl = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
-
- if (enable) {
- hdp_clk_cntl &=
- ~(uint32_t)
- (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
- HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
- HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
- HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
- HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
- HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK);
- } else {
- hdp_clk_cntl |= HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
- HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
- HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
- HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
- HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
- HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK;
- }
-
- WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl);
-}
-
static int nv_common_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
@@ -1213,9 +1149,9 @@ static int nv_common_set_clockgating_state(void *handle,
state == AMD_CG_STATE_GATE);
adev->nbio.funcs->update_medium_grain_light_sleep(adev,
state == AMD_CG_STATE_GATE);
- nv_update_hdp_mem_power_gating(adev,
- state == AMD_CG_STATE_GATE);
- nv_update_hdp_clock_gating(adev,
+ adev->hdp.funcs->update_clock_gating(adev,
+ state == AMD_CG_STATE_GATE);
+ adev->smuio.funcs->update_rom_clock_gating(adev,
state == AMD_CG_STATE_GATE);
break;
default:
@@ -1234,31 +1170,15 @@ static int nv_common_set_powergating_state(void *handle,
static void nv_common_get_clockgating_state(void *handle, u32 *flags)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- uint32_t tmp;
if (amdgpu_sriov_vf(adev))
*flags = 0;
adev->nbio.funcs->get_clockgating_state(adev, flags);
- /* AMD_CG_SUPPORT_HDP_MGCG */
- tmp = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
- if (!(tmp & (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
- HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
- HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
- HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
- HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
- HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK)))
- *flags |= AMD_CG_SUPPORT_HDP_MGCG;
-
- /* AMD_CG_SUPPORT_HDP_LS/DS/SD */
- tmp = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL);
- if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK)
- *flags |= AMD_CG_SUPPORT_HDP_LS;
- else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_DS_EN_MASK)
- *flags |= AMD_CG_SUPPORT_HDP_DS;
- else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_SD_EN_MASK)
- *flags |= AMD_CG_SUPPORT_HDP_SD;
+ adev->hdp.funcs->get_clock_gating_state(adev, flags);
+
+ adev->smuio.funcs->get_clock_gating_state(adev, flags);
return;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
index d7f92634eba2..4b1cc5e9ee92 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
@@ -92,8 +92,6 @@ static int psp_v10_0_init_microcode(struct psp_context *psp)
(uint8_t *)ta_hdr +
le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
- adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
-
adev->psp.ta_dtm_ucode_version =
le32_to_cpu(ta_hdr->ta_dtm_ucode_version);
adev->psp.ta_dtm_ucode_size =
@@ -101,6 +99,16 @@ static int psp_v10_0_init_microcode(struct psp_context *psp)
adev->psp.ta_dtm_start_addr =
(uint8_t *)adev->psp.ta_hdcp_start_addr +
le32_to_cpu(ta_hdr->ta_dtm_offset_bytes);
+
+ adev->psp.ta_securedisplay_ucode_version =
+ le32_to_cpu(ta_hdr->ta_securedisplay_ucode_version);
+ adev->psp.ta_securedisplay_ucode_size =
+ le32_to_cpu(ta_hdr->ta_securedisplay_size_bytes);
+ adev->psp.ta_securedisplay_start_addr =
+ (uint8_t *)adev->psp.ta_hdcp_start_addr +
+ le32_to_cpu(ta_hdr->ta_securedisplay_offset_bytes);
+
+ adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index bd4248c93c49..c325d6f53a71 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -392,37 +392,6 @@ static int psp_v11_0_bootloader_load_sos(struct psp_context *psp)
return ret;
}
-static void psp_v11_0_reroute_ih(struct psp_context *psp)
-{
- struct amdgpu_device *adev = psp->adev;
- uint32_t tmp;
-
- /* Change IH ring for VMC */
- tmp = REG_SET_FIELD(0, IH_CLIENT_CFG_DATA, CREDIT_RETURN_ADDR, 0x1244b);
- tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, CLIENT_TYPE, 1);
- tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
-
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, 3);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, tmp);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, GFX_CTRL_CMD_ID_GBR_IH_SET);
-
- mdelay(20);
- psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
-
- /* Change IH ring for UMC */
- tmp = REG_SET_FIELD(0, IH_CLIENT_CFG_DATA, CREDIT_RETURN_ADDR, 0x1216b);
- tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
-
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, 4);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, tmp);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, GFX_CTRL_CMD_ID_GBR_IH_SET);
-
- mdelay(20);
- psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
-}
-
static int psp_v11_0_ring_init(struct psp_context *psp,
enum psp_ring_type ring_type)
{
@@ -430,11 +399,6 @@ static int psp_v11_0_ring_init(struct psp_context *psp,
struct psp_ring *ring;
struct amdgpu_device *adev = psp->adev;
- if ((!amdgpu_sriov_vf(adev)) &&
- !(adev->asic_type >= CHIP_SIENNA_CICHLID &&
- adev->asic_type <= CHIP_DIMGREY_CAVEFISH))
- psp_v11_0_reroute_ih(psp);
-
ring = &psp->km_ring;
ring->ring_type = ring_type;
@@ -726,7 +690,7 @@ static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops)
}
memcpy_toio(adev->mman.aper_base_kaddr, buf, sz);
- adev->nbio.funcs->hdp_flush(adev, NULL);
+ adev->hdp.funcs->flush_hdp(adev, NULL);
vfree(buf);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index ce56e93c6886..c8c22c1d1e65 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -46,7 +46,6 @@
#include "sdma6/sdma6_4_2_2_sh_mask.h"
#include "sdma7/sdma7_4_2_2_offset.h"
#include "sdma7/sdma7_4_2_2_sh_mask.h"
-#include "hdp/hdp_4_0_offset.h"
#include "sdma0/sdma0_4_1_default.h"
#include "soc15_common.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index b208b81005bb..d345e324837d 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -32,7 +32,6 @@
#include "gc/gc_10_1_0_offset.h"
#include "gc/gc_10_1_0_sh_mask.h"
-#include "hdp/hdp_5_0_0_offset.h"
#include "ivsrcid/sdma0/irqsrcs_sdma0_5_0.h"
#include "ivsrcid/sdma1/irqsrcs_sdma1_5_0.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index f1ba36a094da..690a5090475a 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -119,15 +119,7 @@ static int sdma_v5_2_init_inst_ctx(struct amdgpu_sdma_instance *sdma_inst)
static void sdma_v5_2_destroy_inst_ctx(struct amdgpu_device *adev)
{
- int i;
-
- for (i = 0; i < adev->sdma.num_instances; i++) {
- release_firmware(adev->sdma.instance[i].fw);
- adev->sdma.instance[i].fw = NULL;
-
- if (adev->asic_type == CHIP_SIENNA_CICHLID)
- break;
- }
+ release_firmware(adev->sdma.instance[0].fw);
memset((void *)adev->sdma.instance, 0,
sizeof(struct amdgpu_sdma_instance) * AMDGPU_MAX_SDMA_INSTANCES);
@@ -185,23 +177,10 @@ static int sdma_v5_2_init_microcode(struct amdgpu_device *adev)
if (err)
goto out;
- for (i = 1; i < adev->sdma.num_instances; i++) {
- if (adev->asic_type >= CHIP_SIENNA_CICHLID &&
- adev->asic_type <= CHIP_DIMGREY_CAVEFISH) {
- memcpy((void *)&adev->sdma.instance[i],
- (void *)&adev->sdma.instance[0],
- sizeof(struct amdgpu_sdma_instance));
- } else {
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma%d.bin", chip_name, i);
- err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
- if (err)
- goto out;
-
- err = sdma_v5_2_init_inst_ctx(&adev->sdma.instance[i]);
- if (err)
- goto out;
- }
- }
+ for (i = 1; i < adev->sdma.num_instances; i++)
+ memcpy((void *)&adev->sdma.instance[i],
+ (void *)&adev->sdma.instance[0],
+ sizeof(struct amdgpu_sdma_instance));
DRM_DEBUG("psp_load == '%s'\n",
adev->firmware.load_type == AMDGPU_FW_LOAD_PSP ? "true" : "false");
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index 3cf0589bfea5..6b5cf7882a12 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -1270,7 +1270,7 @@ static int si_gpu_pci_config_reset(struct amdgpu_device *adev)
u32 i;
int r = -EINVAL;
- dev_info(adev->dev, "GPU pci config reset\n");
+ amdgpu_atombios_scratch_regs_engine_hung(adev, true);
/* set mclk/sclk to bypass */
si_set_clk_bypass_mode(adev);
@@ -1294,20 +1294,6 @@ static int si_gpu_pci_config_reset(struct amdgpu_device *adev)
}
udelay(1);
}
-
- return r;
-}
-
-static int si_asic_reset(struct amdgpu_device *adev)
-{
- int r;
-
- dev_info(adev->dev, "PCI CONFIG reset\n");
-
- amdgpu_atombios_scratch_regs_engine_hung(adev, true);
-
- r = si_gpu_pci_config_reset(adev);
-
amdgpu_atombios_scratch_regs_engine_hung(adev, false);
return r;
@@ -1321,14 +1307,34 @@ static bool si_asic_supports_baco(struct amdgpu_device *adev)
static enum amd_reset_method
si_asic_reset_method(struct amdgpu_device *adev)
{
- if (amdgpu_reset_method != AMD_RESET_METHOD_LEGACY &&
- amdgpu_reset_method != -1)
+ if (amdgpu_reset_method == AMD_RESET_METHOD_PCI)
+ return amdgpu_reset_method;
+ else if (amdgpu_reset_method != AMD_RESET_METHOD_LEGACY &&
+ amdgpu_reset_method != -1)
dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n",
- amdgpu_reset_method);
+ amdgpu_reset_method);
return AMD_RESET_METHOD_LEGACY;
}
+static int si_asic_reset(struct amdgpu_device *adev)
+{
+ int r;
+
+ switch (si_asic_reset_method(adev)) {
+ case AMD_RESET_METHOD_PCI:
+ dev_info(adev->dev, "PCI reset\n");
+ r = amdgpu_device_pci_reset(adev);
+ break;
+ default:
+ dev_info(adev->dev, "PCI CONFIG reset\n");
+ r = si_gpu_pci_config_reset(adev);
+ break;
+ }
+
+ return r;
+}
+
static u32 si_get_config_memsize(struct amdgpu_device *adev)
{
return RREG32(mmCONFIG_MEMSIZE);
diff --git a/drivers/gpu/drm/amd/amdgpu/smuio_v11_0_6.c b/drivers/gpu/drm/amd/amdgpu/smuio_v11_0_6.c
new file mode 100644
index 000000000000..3a18dbb55c32
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/smuio_v11_0_6.c
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "smuio_v11_0_6.h"
+#include "smuio/smuio_11_0_6_offset.h"
+#include "smuio/smuio_11_0_6_sh_mask.h"
+
+static u32 smuio_v11_0_6_get_rom_index_offset(struct amdgpu_device *adev)
+{
+ return SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX);
+}
+
+static u32 smuio_v11_0_6_get_rom_data_offset(struct amdgpu_device *adev)
+{
+ return SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA);
+}
+
+static void smuio_v11_0_6_update_rom_clock_gating(struct amdgpu_device *adev, bool enable)
+{
+ u32 def, data;
+
+ /* enable/disable ROM CG is not supported on APU */
+ if (adev->flags & AMD_IS_APU)
+ return;
+
+ def = data = RREG32_SOC15(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0);
+
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG))
+ data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
+ CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK);
+ else
+ data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
+ CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK;
+
+ if (def != data)
+ WREG32_SOC15(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0, data);
+}
+
+static void smuio_v11_0_6_get_clock_gating_state(struct amdgpu_device *adev, u32 *flags)
+{
+ u32 data;
+
+ /* CGTT_ROM_CLK_CTRL0 is not available for APU */
+ if (adev->flags & AMD_IS_APU)
+ return;
+
+ data = RREG32_SOC15(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0);
+ if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK))
+ *flags |= AMD_CG_SUPPORT_ROM_MGCG;
+}
+
+const struct amdgpu_smuio_funcs smuio_v11_0_6_funcs = {
+ .get_rom_index_offset = smuio_v11_0_6_get_rom_index_offset,
+ .get_rom_data_offset = smuio_v11_0_6_get_rom_data_offset,
+ .update_rom_clock_gating = smuio_v11_0_6_update_rom_clock_gating,
+ .get_clock_gating_state = smuio_v11_0_6_get_clock_gating_state,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/smuio_v11_0_6.h b/drivers/gpu/drm/amd/amdgpu/smuio_v11_0_6.h
new file mode 100644
index 000000000000..3c3f4ab0bc9b
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/smuio_v11_0_6.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __SMUIO_V11_0_6_H__
+#define __SMUIO_V11_0_6_H__
+
+#include "soc15_common.h"
+
+extern const struct amdgpu_smuio_funcs smuio_v11_0_6_funcs;
+
+#endif /* __SMUIO_V11_0_6_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 0b3516c4eefb..1221aa6b40a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -40,8 +40,6 @@
#include "gc/gc_9_0_sh_mask.h"
#include "sdma0/sdma0_4_0_offset.h"
#include "sdma1/sdma1_4_0_offset.h"
-#include "hdp/hdp_4_0_offset.h"
-#include "hdp/hdp_4_0_sh_mask.h"
#include "nbio/nbio_7_0_default.h"
#include "nbio/nbio_7_0_offset.h"
#include "nbio/nbio_7_0_sh_mask.h"
@@ -59,7 +57,9 @@
#include "nbio_v6_1.h"
#include "nbio_v7_0.h"
#include "nbio_v7_4.h"
+#include "hdp_v4_0.h"
#include "vega10_ih.h"
+#include "vega20_ih.h"
#include "navi10_ih.h"
#include "sdma_v4_0.h"
#include "uvd_v7_0.h"
@@ -83,14 +83,6 @@
#define mmMP0_MISC_LIGHT_SLEEP_CTRL 0x01ba
#define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX 0
-/* for Vega20 register name change */
-#define mmHDP_MEM_POWER_CTRL 0x00d4
-#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK 0x00000001L
-#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK 0x00000002L
-#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK 0x00010000L
-#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK 0x00020000L
-#define mmHDP_MEM_POWER_CTRL_BASE_IDX 0
-
/*
* Indirect registers accessor
*/
@@ -241,6 +233,8 @@ static u32 soc15_get_xclk(struct amdgpu_device *adev)
{
u32 reference_clock = adev->clock.spll.reference_freq;
+ if (adev->asic_type == CHIP_RENOIR)
+ return 10000;
if (adev->asic_type == CHIP_RAVEN)
return reference_clock / 4;
@@ -487,7 +481,8 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
if (amdgpu_reset_method == AMD_RESET_METHOD_MODE1 ||
amdgpu_reset_method == AMD_RESET_METHOD_MODE2 ||
- amdgpu_reset_method == AMD_RESET_METHOD_BACO)
+ amdgpu_reset_method == AMD_RESET_METHOD_BACO ||
+ amdgpu_reset_method == AMD_RESET_METHOD_PCI)
return amdgpu_reset_method;
if (amdgpu_reset_method != -1)
@@ -532,15 +527,18 @@ static int soc15_asic_reset(struct amdgpu_device *adev)
return 0;
switch (soc15_asic_reset_method(adev)) {
- case AMD_RESET_METHOD_BACO:
- dev_info(adev->dev, "BACO reset\n");
- return soc15_asic_baco_reset(adev);
- case AMD_RESET_METHOD_MODE2:
- dev_info(adev->dev, "MODE2 reset\n");
- return amdgpu_dpm_mode2_reset(adev);
- default:
- dev_info(adev->dev, "MODE1 reset\n");
- return soc15_asic_mode1_reset(adev);
+ case AMD_RESET_METHOD_PCI:
+ dev_info(adev->dev, "PCI reset\n");
+ return amdgpu_device_pci_reset(adev);
+ case AMD_RESET_METHOD_BACO:
+ dev_info(adev->dev, "BACO reset\n");
+ return soc15_asic_baco_reset(adev);
+ case AMD_RESET_METHOD_MODE2:
+ dev_info(adev->dev, "MODE2 reset\n");
+ return amdgpu_dpm_mode2_reset(adev);
+ default:
+ dev_info(adev->dev, "MODE1 reset\n");
+ return soc15_asic_mode1_reset(adev);
}
}
@@ -699,6 +697,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
adev->nbio.funcs = &nbio_v6_1_funcs;
adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg;
}
+ adev->hdp.funcs = &hdp_v4_0_funcs;
if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS)
adev->df.funcs = &df_v3_6_funcs;
@@ -729,12 +728,12 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
}
if (adev->asic_type == CHIP_VEGA20)
- amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
else
amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
} else {
if (adev->asic_type == CHIP_VEGA20)
- amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
else
amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
@@ -787,9 +786,9 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
if (amdgpu_sriov_vf(adev)) {
if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
- amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
} else {
- amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
}
@@ -834,35 +833,12 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
return 0;
}
-static void soc15_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
-{
- adev->nbio.funcs->hdp_flush(adev, ring);
-}
-
-static void soc15_invalidate_hdp(struct amdgpu_device *adev,
- struct amdgpu_ring *ring)
-{
- if (!ring || !ring->funcs->emit_wreg)
- WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
- else
- amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
- HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
-}
-
static bool soc15_need_full_reset(struct amdgpu_device *adev)
{
/* change this when we implement soft reset */
return true;
}
-static void vega20_reset_hdp_ras_error_count(struct amdgpu_device *adev)
-{
- if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__HDP))
- return;
- /*read back hdp ras counter to reset it to 0 */
- RREG32_SOC15(HDP, 0, mmHDP_EDC_CNT);
-}
-
static void soc15_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
uint64_t *count1)
{
@@ -1011,8 +987,6 @@ static const struct amdgpu_asic_funcs soc15_asic_funcs =
.set_uvd_clocks = &soc15_set_uvd_clocks,
.set_vce_clocks = &soc15_set_vce_clocks,
.get_config_memsize = &soc15_get_config_memsize,
- .flush_hdp = &soc15_flush_hdp,
- .invalidate_hdp = &soc15_invalidate_hdp,
.need_full_reset = &soc15_need_full_reset,
.init_doorbell_index = &vega10_doorbell_index_init,
.get_pcie_usage = &soc15_get_pcie_usage,
@@ -1034,9 +1008,6 @@ static const struct amdgpu_asic_funcs vega20_asic_funcs =
.set_uvd_clocks = &soc15_set_uvd_clocks,
.set_vce_clocks = &soc15_set_vce_clocks,
.get_config_memsize = &soc15_get_config_memsize,
- .flush_hdp = &soc15_flush_hdp,
- .invalidate_hdp = &soc15_invalidate_hdp,
- .reset_hdp_ras_error_count = &vega20_reset_hdp_ras_error_count,
.need_full_reset = &soc15_need_full_reset,
.init_doorbell_index = &vega20_doorbell_index_init,
.get_pcie_usage = &vega20_get_pcie_usage,
@@ -1294,9 +1265,8 @@ static int soc15_common_late_init(void *handle)
if (amdgpu_sriov_vf(adev))
xgpu_ai_mailbox_get_irq(adev);
- if (adev->asic_funcs &&
- adev->asic_funcs->reset_hdp_ras_error_count)
- adev->asic_funcs->reset_hdp_ras_error_count(adev);
+ if (adev->hdp.funcs->reset_ras_error_count)
+ adev->hdp.funcs->reset_ras_error_count(adev);
if (adev->nbio.funcs->ras_late_init)
r = adev->nbio.funcs->ras_late_init(adev);
@@ -1422,41 +1392,6 @@ static int soc15_common_soft_reset(void *handle)
return 0;
}
-static void soc15_update_hdp_light_sleep(struct amdgpu_device *adev, bool enable)
-{
- uint32_t def, data;
-
- if (adev->asic_type == CHIP_VEGA20 ||
- adev->asic_type == CHIP_ARCTURUS ||
- adev->asic_type == CHIP_RENOIR) {
- def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL));
-
- if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
- data |= HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK |
- HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK |
- HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK |
- HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK;
- else
- data &= ~(HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK |
- HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK |
- HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK |
- HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK);
-
- if (def != data)
- WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL), data);
- } else {
- def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
-
- if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
- data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
- else
- data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
-
- if (def != data)
- WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS), data);
- }
-}
-
static void soc15_update_drm_clock_gating(struct amdgpu_device *adev, bool enable)
{
uint32_t def, data;
@@ -1517,7 +1452,7 @@ static int soc15_common_set_clockgating_state(void *handle,
state == AMD_CG_STATE_GATE);
adev->nbio.funcs->update_medium_grain_light_sleep(adev,
state == AMD_CG_STATE_GATE);
- soc15_update_hdp_light_sleep(adev,
+ adev->hdp.funcs->update_clock_gating(adev,
state == AMD_CG_STATE_GATE);
soc15_update_drm_clock_gating(adev,
state == AMD_CG_STATE_GATE);
@@ -1534,7 +1469,7 @@ static int soc15_common_set_clockgating_state(void *handle,
state == AMD_CG_STATE_GATE);
adev->nbio.funcs->update_medium_grain_light_sleep(adev,
state == AMD_CG_STATE_GATE);
- soc15_update_hdp_light_sleep(adev,
+ adev->hdp.funcs->update_clock_gating(adev,
state == AMD_CG_STATE_GATE);
soc15_update_drm_clock_gating(adev,
state == AMD_CG_STATE_GATE);
@@ -1542,7 +1477,7 @@ static int soc15_common_set_clockgating_state(void *handle,
state == AMD_CG_STATE_GATE);
break;
case CHIP_ARCTURUS:
- soc15_update_hdp_light_sleep(adev,
+ adev->hdp.funcs->update_clock_gating(adev,
state == AMD_CG_STATE_GATE);
break;
default:
@@ -1561,10 +1496,7 @@ static void soc15_common_get_clockgating_state(void *handle, u32 *flags)
adev->nbio.funcs->get_clockgating_state(adev, flags);
- /* AMD_CG_SUPPORT_HDP_LS */
- data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
- if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK)
- *flags |= AMD_CG_SUPPORT_HDP_LS;
+ adev->hdp.funcs->get_clock_gating_state(adev, flags);
/* AMD_CG_SUPPORT_DRM_MGCG */
data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
diff --git a/drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h b/drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h
new file mode 100644
index 000000000000..5039375bb1d4
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _TA_SECUREDISPLAY_IF_H
+#define _TA_SECUREDISPLAY_IF_H
+
+/** Secure Display related enumerations */
+/**********************************************************/
+
+/** @enum ta_securedisplay_command
+ * Secure Display Command ID
+ */
+enum ta_securedisplay_command {
+ /* Query whether TA is responding used only for validation purpose */
+ TA_SECUREDISPLAY_COMMAND__QUERY_TA = 1,
+ /* Send region of Interest and CRC value to I2C */
+ TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC = 2,
+ /* Maximum Command ID */
+ TA_SECUREDISPLAY_COMMAND__MAX_ID = 0x7FFFFFFF,
+};
+
+/** @enum ta_securedisplay_status
+ * Secure Display status returns in shared buffer status
+ */
+enum ta_securedisplay_status {
+ TA_SECUREDISPLAY_STATUS__SUCCESS = 0x00, /* Success */
+ TA_SECUREDISPLAY_STATUS__GENERIC_FAILURE = 0x01, /* Generic Failure */
+ TA_SECUREDISPLAY_STATUS__INVALID_PARAMETER = 0x02, /* Invalid Parameter */
+ TA_SECUREDISPLAY_STATUS__NULL_POINTER = 0x03, /* Null Pointer*/
+ TA_SECUREDISPLAY_STATUS__I2C_WRITE_ERROR = 0x04, /* Fail to Write to I2C */
+ TA_SECUREDISPLAY_STATUS__READ_DIO_SCRATCH_ERROR = 0x05, /*Fail Read DIO Scratch Register*/
+ TA_SECUREDISPLAY_STATUS__READ_CRC_ERROR = 0x06, /* Fail to Read CRC*/
+
+ TA_SECUREDISPLAY_STATUS__MAX = 0x7FFFFFFF,/* Maximum Value for status*/
+};
+
+/** @enum ta_securedisplay_max_phy
+ * Physical ID number to use for reading corresponding DIO Scratch register for ROI
+ */
+enum ta_securedisplay_max_phy {
+ TA_SECUREDISPLAY_PHY0 = 0,
+ TA_SECUREDISPLAY_PHY1 = 1,
+ TA_SECUREDISPLAY_PHY2 = 2,
+ TA_SECUREDISPLAY_PHY3 = 3,
+ TA_SECUREDISPLAY_MAX_PHY = 4,
+};
+
+/** @enum ta_securedisplay_ta_query_cmd_ret
+ * A predefined specific reteurn value which is 0xAB only used to validate
+ * communication to Secure Display TA is functional.
+ * This value is used to validate whether TA is responding successfully
+ */
+enum ta_securedisplay_ta_query_cmd_ret {
+ /* This is a value to validate if TA is loaded successfully */
+ TA_SECUREDISPLAY_QUERY_CMD_RET = 0xAB,
+};
+
+/** @enum ta_securedisplay_buffer_size
+ * I2C Buffer size which contains 8 bytes of ROI (X start, X end, Y start, Y end)
+ * and 6 bytes of CRC( R,G,B) and 1 byte for physical ID
+ */
+enum ta_securedisplay_buffer_size {
+ /* 15 bytes = 8 byte (ROI) + 6 byte(CRC) + 1 byte(phy_id) */
+ TA_SECUREDISPLAY_I2C_BUFFER_SIZE = 15,
+};
+
+/** Input/output structures for Secure Display commands */
+/**********************************************************/
+/**
+ * Input structures
+ */
+
+/** @struct ta_securedisplay_send_roi_crc_input
+ * Physical ID to determine which DIO scratch register should be used to get ROI
+ */
+struct ta_securedisplay_send_roi_crc_input {
+ uint32_t phy_id; /* Physical ID */
+};
+
+/** @union ta_securedisplay_cmd_input
+ * Input buffer
+ */
+union ta_securedisplay_cmd_input {
+ /* send ROI and CRC input buffer format */
+ struct ta_securedisplay_send_roi_crc_input send_roi_crc;
+ uint32_t reserved[4];
+};
+
+/**
+ * Output structures
+ */
+
+/** @struct ta_securedisplay_query_ta_output
+ * Output buffer format for query TA whether TA is responding used only for validation purpose
+ */
+struct ta_securedisplay_query_ta_output {
+ /* return value from TA when it is queried for validation purpose only */
+ uint32_t query_cmd_ret;
+};
+
+/** @struct ta_securedisplay_send_roi_crc_output
+ * Output buffer format for send ROI CRC command which will pass I2c buffer created inside TA
+ * and used to write to I2C used only for validation purpose
+ */
+struct ta_securedisplay_send_roi_crc_output {
+ uint8_t i2c_buf[TA_SECUREDISPLAY_I2C_BUFFER_SIZE]; /* I2C buffer */
+ uint8_t reserved;
+};
+
+/** @union ta_securedisplay_cmd_output
+ * Output buffer
+ */
+union ta_securedisplay_cmd_output {
+ /* Query TA output buffer format used only for validation purpose*/
+ struct ta_securedisplay_query_ta_output query_ta;
+ /* Send ROI CRC output buffer format used only for validation purpose */
+ struct ta_securedisplay_send_roi_crc_output send_roi_crc;
+ uint32_t reserved[4];
+};
+
+/** @struct securedisplay_cmd
+ * Secure Display Command which is shared buffer memory
+ */
+struct securedisplay_cmd {
+ uint32_t cmd_id; /* +0 Bytes Command ID */
+ enum ta_securedisplay_status status; /* +4 Bytes Status of Secure Display TA */
+ uint32_t reserved[2]; /* +8 Bytes Reserved */
+ union ta_securedisplay_cmd_input securedisplay_in_message; /* +16 Bytes Input Buffer */
+ union ta_securedisplay_cmd_output securedisplay_out_message;/* +32 Bytes Output Buffer */
+ /**@note Total 48 Bytes */
+};
+
+#endif //_TA_SECUREDISPLAY_IF_H
+
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index ce3319993b4b..249fcbee7871 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -196,19 +196,30 @@ static u32 tonga_ih_get_wptr(struct amdgpu_device *adev,
wptr = le32_to_cpu(*ih->wptr_cpu);
- if (REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) {
- wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
- /* When a ring buffer overflow happen start parsing interrupt
- * from the last not overwritten vector (wptr + 16). Hopefully
- * this should allow us to catchup.
- */
- dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
- wptr, ih->rptr, (wptr + 16) & ih->ptr_mask);
- ih->rptr = (wptr + 16) & ih->ptr_mask;
- tmp = RREG32(mmIH_RB_CNTL);
- tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
- WREG32(mmIH_RB_CNTL, tmp);
- }
+ if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
+ goto out;
+
+ /* Double check that the overflow wasn't already cleared. */
+ wptr = RREG32(mmIH_RB_WPTR);
+
+ if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
+ goto out;
+
+ wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
+
+ /* When a ring buffer overflow happen start parsing interrupt
+ * from the last not overwritten vector (wptr + 16). Hopefully
+ * this should allow us to catchup.
+ */
+
+ dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
+ wptr, ih->rptr, (wptr + 16) & ih->ptr_mask);
+ ih->rptr = (wptr + 16) & ih->ptr_mask;
+ tmp = RREG32(mmIH_RB_CNTL);
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
+ WREG32(mmIH_RB_CNTL, tmp);
+
+out:
return (wptr & ih->ptr_mask);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 312ecf6d24a0..7cd67cb2ac5f 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -36,7 +36,6 @@
#include "vce/vce_4_0_default.h"
#include "vce/vce_4_0_sh_mask.h"
#include "nbif/nbif_6_1_offset.h"
-#include "hdp/hdp_4_0_offset.h"
#include "mmhub/mmhub_1_0_offset.h"
#include "mmhub/mmhub_1_0_sh_mask.h"
#include "ivsrcid/uvd/irqsrcs_uvd_7_0.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index c734e31a9e65..6117931fa8d7 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -32,7 +32,6 @@
#include "vcn/vcn_1_0_offset.h"
#include "vcn/vcn_1_0_sh_mask.h"
-#include "hdp/hdp_4_0_offset.h"
#include "mmhub/mmhub_9_1_offset.h"
#include "mmhub/mmhub_9_1_sh_mask.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index e5ae31eb744e..88626d83e07b 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -38,132 +38,120 @@
static void vega10_ih_set_interrupt_funcs(struct amdgpu_device *adev);
/**
- * vega10_ih_enable_interrupts - Enable the interrupt ring buffer
+ * vega10_ih_init_register_offset - Initialize register offset for ih rings
*
* @adev: amdgpu_device pointer
*
- * Enable the interrupt ring buffer (VEGA10).
+ * Initialize register offset ih rings (VEGA10).
*/
-static void vega10_ih_enable_interrupts(struct amdgpu_device *adev)
+static void vega10_ih_init_register_offset(struct amdgpu_device *adev)
{
- u32 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
-
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 1);
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 1);
- if (amdgpu_sriov_vf(adev)) {
- if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
- DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
- return;
- }
- } else {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
+ struct amdgpu_ih_regs *ih_regs;
+
+ if (adev->irq.ih.ring_size) {
+ ih_regs = &adev->irq.ih.ih_regs;
+ ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE);
+ ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI);
+ ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL);
+ ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR);
+ ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR);
+ ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR);
+ ih_regs->ih_rb_wptr_addr_lo = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO);
+ ih_regs->ih_rb_wptr_addr_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI);
+ ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL;
}
- adev->irq.ih.enabled = true;
if (adev->irq.ih1.ring_size) {
- ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
- RB_ENABLE, 1);
- if (amdgpu_sriov_vf(adev)) {
- if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1,
- ih_rb_cntl)) {
- DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
- return;
- }
- } else {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
- }
- adev->irq.ih1.enabled = true;
+ ih_regs = &adev->irq.ih1.ih_regs;
+ ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_RING1);
+ ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI_RING1);
+ ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING1);
+ ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING1);
+ ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING1);
+ ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING1);
+ ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL_RING1;
}
if (adev->irq.ih2.ring_size) {
- ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
- RB_ENABLE, 1);
- if (amdgpu_sriov_vf(adev)) {
- if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2,
- ih_rb_cntl)) {
- DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
- return;
- }
- } else {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
- }
- adev->irq.ih2.enabled = true;
+ ih_regs = &adev->irq.ih2.ih_regs;
+ ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_RING2);
+ ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI_RING2);
+ ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING2);
+ ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING2);
+ ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING2);
+ ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING2);
+ ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL_RING2;
}
-
- if (adev->irq.ih_soft.ring_size)
- adev->irq.ih_soft.enabled = true;
}
/**
- * vega10_ih_disable_interrupts - Disable the interrupt ring buffer
+ * vega10_ih_toggle_ring_interrupts - toggle the interrupt ring buffer
*
* @adev: amdgpu_device pointer
+ * @ih: amdgpu_ih_ring pointet
+ * @enable: true - enable the interrupts, false - disable the interrupts
*
- * Disable the interrupt ring buffer (VEGA10).
+ * Toggle the interrupt ring buffer (VEGA10)
*/
-static void vega10_ih_disable_interrupts(struct amdgpu_device *adev)
+static int vega10_ih_toggle_ring_interrupts(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih,
+ bool enable)
{
- u32 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
+ struct amdgpu_ih_regs *ih_regs;
+ uint32_t tmp;
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 0);
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 0);
+ ih_regs = &ih->ih_regs;
+
+ tmp = RREG32(ih_regs->ih_rb_cntl);
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0));
+ /* enable_intr field is only valid in ring0 */
+ if (ih == &adev->irq.ih)
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0));
if (amdgpu_sriov_vf(adev)) {
- if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
- DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
- return;
+ if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) {
+ dev_err(adev->dev, "PSP program IH_RB_CNTL failed!\n");
+ return -ETIMEDOUT;
}
} else {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
+ WREG32(ih_regs->ih_rb_cntl, tmp);
}
- /* set rptr, wptr to 0 */
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0);
- adev->irq.ih.enabled = false;
- adev->irq.ih.rptr = 0;
-
- if (adev->irq.ih1.ring_size) {
- ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
- RB_ENABLE, 0);
- if (amdgpu_sriov_vf(adev)) {
- if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1,
- ih_rb_cntl)) {
- DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
- return;
- }
- } else {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
- }
+ if (enable) {
+ ih->enabled = true;
+ } else {
/* set rptr, wptr to 0 */
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, 0);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING1, 0);
- adev->irq.ih1.enabled = false;
- adev->irq.ih1.rptr = 0;
+ WREG32(ih_regs->ih_rb_rptr, 0);
+ WREG32(ih_regs->ih_rb_wptr, 0);
+ ih->enabled = false;
+ ih->rptr = 0;
}
- if (adev->irq.ih2.ring_size) {
- ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
- RB_ENABLE, 0);
- if (amdgpu_sriov_vf(adev)) {
- if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2,
- ih_rb_cntl)) {
- DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
- return;
- }
- } else {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
- }
+ return 0;
+}
- /* set rptr, wptr to 0 */
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, 0);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING2, 0);
- adev->irq.ih2.enabled = false;
- adev->irq.ih2.rptr = 0;
+/**
+ * vega10_ih_toggle_interrupts - Toggle all the available interrupt ring buffers
+ *
+ * @adev: amdgpu_device pointer
+ * @enable: enable or disable interrupt ring buffers
+ *
+ * Toggle all the available interrupt ring buffers (VEGA10).
+ */
+static int vega10_ih_toggle_interrupts(struct amdgpu_device *adev, bool enable)
+{
+ struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1, &adev->irq.ih2};
+ int i;
+ int r;
+
+ for (i = 0; i < ARRAY_SIZE(ih); i++) {
+ if (ih[i]->ring_size) {
+ r = vega10_ih_toggle_ring_interrupts(adev, ih[i], enable);
+ if (r)
+ return r;
+ }
}
+
+ return 0;
}
static uint32_t vega10_ih_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl)
@@ -209,6 +197,58 @@ static uint32_t vega10_ih_doorbell_rptr(struct amdgpu_ih_ring *ih)
}
/**
+ * vega10_ih_enable_ring - enable an ih ring buffer
+ *
+ * @adev: amdgpu_device pointer
+ * @ih: amdgpu_ih_ring pointer
+ *
+ * Enable an ih ring buffer (VEGA10)
+ */
+static int vega10_ih_enable_ring(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih)
+{
+ struct amdgpu_ih_regs *ih_regs;
+ uint32_t tmp;
+
+ ih_regs = &ih->ih_regs;
+
+ /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
+ WREG32(ih_regs->ih_rb_base, ih->gpu_addr >> 8);
+ WREG32(ih_regs->ih_rb_base_hi, (ih->gpu_addr >> 40) & 0xff);
+
+ tmp = RREG32(ih_regs->ih_rb_cntl);
+ tmp = vega10_ih_rb_cntl(ih, tmp);
+ if (ih == &adev->irq.ih)
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RPTR_REARM, !!adev->irq.msi_enabled);
+ if (ih == &adev->irq.ih1) {
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 0);
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1);
+ }
+ if (amdgpu_sriov_vf(adev)) {
+ if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) {
+ dev_err(adev->dev, "PSP program IH_RB_CNTL failed!\n");
+ return -ETIMEDOUT;
+ }
+ } else {
+ WREG32(ih_regs->ih_rb_cntl, tmp);
+ }
+
+ if (ih == &adev->irq.ih) {
+ /* set the ih ring 0 writeback address whether it's enabled or not */
+ WREG32(ih_regs->ih_rb_wptr_addr_lo, lower_32_bits(ih->wptr_addr));
+ WREG32(ih_regs->ih_rb_wptr_addr_hi, upper_32_bits(ih->wptr_addr) & 0xFFFF);
+ }
+
+ /* set rptr, wptr to 0 */
+ WREG32(ih_regs->ih_rb_wptr, 0);
+ WREG32(ih_regs->ih_rb_rptr, 0);
+
+ WREG32(ih_regs->ih_doorbell_rptr, vega10_ih_doorbell_rptr(ih));
+
+ return 0;
+}
+
+/**
* vega10_ih_irq_init - init and enable the interrupt ring
*
* @adev: amdgpu_device pointer
@@ -221,116 +261,34 @@ static uint32_t vega10_ih_doorbell_rptr(struct amdgpu_ih_ring *ih)
*/
static int vega10_ih_irq_init(struct amdgpu_device *adev)
{
- struct amdgpu_ih_ring *ih;
- u32 ih_rb_cntl, ih_chicken;
- int ret = 0;
+ struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1, &adev->irq.ih2};
+ u32 ih_chicken;
+ int ret;
+ int i;
u32 tmp;
/* disable irqs */
- vega10_ih_disable_interrupts(adev);
+ ret = vega10_ih_toggle_interrupts(adev, false);
+ if (ret)
+ return ret;
adev->nbio.funcs->ih_control(adev);
- ih = &adev->irq.ih;
- /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE, ih->gpu_addr >> 8);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI, (ih->gpu_addr >> 40) & 0xff);
-
- ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
- ih_rb_cntl = vega10_ih_rb_cntl(ih, ih_rb_cntl);
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RPTR_REARM,
- !!adev->irq.msi_enabled);
- if (amdgpu_sriov_vf(adev)) {
- if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
- DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
- return -ETIMEDOUT;
- }
- } else {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
- }
-
- if ((adev->asic_type == CHIP_ARCTURUS &&
- adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) ||
- adev->asic_type == CHIP_RENOIR) {
+ if (adev->asic_type == CHIP_RENOIR) {
ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN);
if (adev->irq.ih.use_bus_addr) {
ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN,
MC_SPACE_GPA_ENABLE, 1);
- } else {
- ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN,
- MC_SPACE_FBPA_ENABLE, 1);
}
WREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN, ih_chicken);
}
- /* set the writeback address whether it's enabled or not */
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO,
- lower_32_bits(ih->wptr_addr));
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI,
- upper_32_bits(ih->wptr_addr) & 0xFFFF);
-
- /* set rptr, wptr to 0 */
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
-
- WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR,
- vega10_ih_doorbell_rptr(ih));
-
- ih = &adev->irq.ih1;
- if (ih->ring_size) {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_RING1, ih->gpu_addr >> 8);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI_RING1,
- (ih->gpu_addr >> 40) & 0xff);
-
- ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
- ih_rb_cntl = vega10_ih_rb_cntl(ih, ih_rb_cntl);
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
- WPTR_OVERFLOW_ENABLE, 0);
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
- RB_FULL_DRAIN_ENABLE, 1);
- if (amdgpu_sriov_vf(adev)) {
- if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1,
- ih_rb_cntl)) {
- DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
- return -ETIMEDOUT;
- }
- } else {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
+ for (i = 0; i < ARRAY_SIZE(ih); i++) {
+ if (ih[i]->ring_size) {
+ ret = vega10_ih_enable_ring(adev, ih[i]);
+ if (ret)
+ return ret;
}
-
- /* set rptr, wptr to 0 */
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING1, 0);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, 0);
-
- WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING1,
- vega10_ih_doorbell_rptr(ih));
- }
-
- ih = &adev->irq.ih2;
- if (ih->ring_size) {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_RING2, ih->gpu_addr >> 8);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI_RING2,
- (ih->gpu_addr >> 40) & 0xff);
-
- ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
- ih_rb_cntl = vega10_ih_rb_cntl(ih, ih_rb_cntl);
-
- if (amdgpu_sriov_vf(adev)) {
- if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2,
- ih_rb_cntl)) {
- DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
- return -ETIMEDOUT;
- }
- } else {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
- }
-
- /* set rptr, wptr to 0 */
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING2, 0);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, 0);
-
- WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING2,
- vega10_ih_doorbell_rptr(ih));
}
tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
@@ -345,9 +303,14 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
pci_set_master(adev->pdev);
/* enable interrupts */
- vega10_ih_enable_interrupts(adev);
+ ret = vega10_ih_toggle_interrupts(adev, true);
+ if (ret)
+ return ret;
- return ret;
+ if (adev->irq.ih_soft.ring_size)
+ adev->irq.ih_soft.enabled = true;
+
+ return 0;
}
/**
@@ -359,7 +322,7 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
*/
static void vega10_ih_irq_disable(struct amdgpu_device *adev)
{
- vega10_ih_disable_interrupts(adev);
+ vega10_ih_toggle_interrupts(adev, false);
/* Wait and acknowledge irq */
mdelay(1);
@@ -379,25 +342,17 @@ static void vega10_ih_irq_disable(struct amdgpu_device *adev)
static u32 vega10_ih_get_wptr(struct amdgpu_device *adev,
struct amdgpu_ih_ring *ih)
{
- u32 wptr, reg, tmp;
+ u32 wptr, tmp;
+ struct amdgpu_ih_regs *ih_regs;
wptr = le32_to_cpu(*ih->wptr_cpu);
+ ih_regs = &ih->ih_regs;
if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
goto out;
/* Double check that the overflow wasn't already cleared. */
-
- if (ih == &adev->irq.ih)
- reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR);
- else if (ih == &adev->irq.ih1)
- reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING1);
- else if (ih == &adev->irq.ih2)
- reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING2);
- else
- BUG();
-
- wptr = RREG32_NO_KIQ(reg);
+ wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr);
if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
goto out;
@@ -413,69 +368,15 @@ static u32 vega10_ih_get_wptr(struct amdgpu_device *adev,
wptr, ih->rptr, tmp);
ih->rptr = tmp;
- if (ih == &adev->irq.ih)
- reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL);
- else if (ih == &adev->irq.ih1)
- reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING1);
- else if (ih == &adev->irq.ih2)
- reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING2);
- else
- BUG();
-
- tmp = RREG32_NO_KIQ(reg);
+ tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl);
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
- WREG32_NO_KIQ(reg, tmp);
+ WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
out:
return (wptr & ih->ptr_mask);
}
/**
- * vega10_ih_decode_iv - decode an interrupt vector
- *
- * @adev: amdgpu_device pointer
- * @ih: IH ring buffer to decode
- * @entry: IV entry to place decoded information into
- *
- * Decodes the interrupt vector at the current rptr
- * position and also advance the position.
- */
-static void vega10_ih_decode_iv(struct amdgpu_device *adev,
- struct amdgpu_ih_ring *ih,
- struct amdgpu_iv_entry *entry)
-{
- /* wptr/rptr are in bytes! */
- u32 ring_index = ih->rptr >> 2;
- uint32_t dw[8];
-
- dw[0] = le32_to_cpu(ih->ring[ring_index + 0]);
- dw[1] = le32_to_cpu(ih->ring[ring_index + 1]);
- dw[2] = le32_to_cpu(ih->ring[ring_index + 2]);
- dw[3] = le32_to_cpu(ih->ring[ring_index + 3]);
- dw[4] = le32_to_cpu(ih->ring[ring_index + 4]);
- dw[5] = le32_to_cpu(ih->ring[ring_index + 5]);
- dw[6] = le32_to_cpu(ih->ring[ring_index + 6]);
- dw[7] = le32_to_cpu(ih->ring[ring_index + 7]);
-
- entry->client_id = dw[0] & 0xff;
- entry->src_id = (dw[0] >> 8) & 0xff;
- entry->ring_id = (dw[0] >> 16) & 0xff;
- entry->vmid = (dw[0] >> 24) & 0xf;
- entry->vmid_src = (dw[0] >> 31);
- entry->timestamp = dw[1] | ((u64)(dw[2] & 0xffff) << 32);
- entry->timestamp_src = dw[2] >> 31;
- entry->pasid = dw[3] & 0xffff;
- entry->pasid_src = dw[3] >> 31;
- entry->src_data[0] = dw[4];
- entry->src_data[1] = dw[5];
- entry->src_data[2] = dw[6];
- entry->src_data[3] = dw[7];
-
- /* wptr/rptr are in bytes! */
- ih->rptr += 32;
-}
-
-/**
* vega10_ih_irq_rearm - rearm IRQ if lost
*
* @adev: amdgpu_device pointer
@@ -485,22 +386,14 @@ static void vega10_ih_decode_iv(struct amdgpu_device *adev,
static void vega10_ih_irq_rearm(struct amdgpu_device *adev,
struct amdgpu_ih_ring *ih)
{
- uint32_t reg_rptr = 0;
uint32_t v = 0;
uint32_t i = 0;
+ struct amdgpu_ih_regs *ih_regs;
- if (ih == &adev->irq.ih)
- reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR);
- else if (ih == &adev->irq.ih1)
- reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING1);
- else if (ih == &adev->irq.ih2)
- reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING2);
- else
- return;
-
+ ih_regs = &ih->ih_regs;
/* Rearm IRQ / re-wwrite doorbell if doorbell write is lost */
for (i = 0; i < MAX_REARM_RETRY; i++) {
- v = RREG32_NO_KIQ(reg_rptr);
+ v = RREG32_NO_KIQ(ih_regs->ih_rb_rptr);
if ((v < ih->ring_size) && (v != ih->rptr))
WDOORBELL32(ih->doorbell_index, ih->rptr);
else
@@ -519,6 +412,8 @@ static void vega10_ih_irq_rearm(struct amdgpu_device *adev,
static void vega10_ih_set_rptr(struct amdgpu_device *adev,
struct amdgpu_ih_ring *ih)
{
+ struct amdgpu_ih_regs *ih_regs;
+
if (ih->use_doorbell) {
/* XXX check if swapping is necessary on BE */
*ih->rptr_cpu = ih->rptr;
@@ -526,12 +421,9 @@ static void vega10_ih_set_rptr(struct amdgpu_device *adev,
if (amdgpu_sriov_vf(adev))
vega10_ih_irq_rearm(adev, ih);
- } else if (ih == &adev->irq.ih) {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, ih->rptr);
- } else if (ih == &adev->irq.ih1) {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, ih->rptr);
- } else if (ih == &adev->irq.ih2) {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, ih->rptr);
+ } else {
+ ih_regs = &ih->ih_regs;
+ WREG32(ih_regs->ih_rb_rptr, ih->rptr);
}
}
@@ -600,19 +492,23 @@ static int vega10_ih_sw_init(void *handle)
adev->irq.ih.use_doorbell = true;
adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1;
- r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, PAGE_SIZE, true);
- if (r)
- return r;
+ if (!(adev->flags & AMD_IS_APU)) {
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, PAGE_SIZE, true);
+ if (r)
+ return r;
- adev->irq.ih1.use_doorbell = true;
- adev->irq.ih1.doorbell_index = (adev->doorbell_index.ih + 1) << 1;
+ adev->irq.ih1.use_doorbell = true;
+ adev->irq.ih1.doorbell_index = (adev->doorbell_index.ih + 1) << 1;
- r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true);
- if (r)
- return r;
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true);
+ if (r)
+ return r;
- adev->irq.ih2.use_doorbell = true;
- adev->irq.ih2.doorbell_index = (adev->doorbell_index.ih + 2) << 1;
+ adev->irq.ih2.use_doorbell = true;
+ adev->irq.ih2.doorbell_index = (adev->doorbell_index.ih + 2) << 1;
+ }
+ /* initialize ih control registers offset */
+ vega10_ih_init_register_offset(adev);
r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, PAGE_SIZE, true);
if (r)
@@ -628,6 +524,7 @@ static int vega10_ih_sw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
amdgpu_irq_fini(adev);
+ amdgpu_ih_ring_fini(adev, &adev->irq.ih_soft);
amdgpu_ih_ring_fini(adev, &adev->irq.ih2);
amdgpu_ih_ring_fini(adev, &adev->irq.ih1);
amdgpu_ih_ring_fini(adev, &adev->irq.ih);
@@ -698,15 +595,11 @@ static void vega10_ih_update_clockgating_state(struct amdgpu_device *adev,
def = data = RREG32_SOC15(OSSSYS, 0, mmIH_CLK_CTRL);
field_val = enable ? 0 : 1;
/**
- * Vega10 does not have IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE
- * and IH_BUFFER_MEM_CLK_SOFT_OVERRIDE field.
+ * Vega10/12 and RAVEN don't have IH_BUFFER_MEM_CLK_SOFT_OVERRIDE field.
*/
- if (adev->asic_type > CHIP_VEGA10) {
- data = REG_SET_FIELD(data, IH_CLK_CTRL,
- IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE, field_val);
+ if (adev->asic_type == CHIP_RENOIR)
data = REG_SET_FIELD(data, IH_CLK_CTRL,
IH_BUFFER_MEM_CLK_SOFT_OVERRIDE, field_val);
- }
data = REG_SET_FIELD(data, IH_CLK_CTRL,
DBUS_MUX_CLK_SOFT_OVERRIDE, field_val);
@@ -759,7 +652,7 @@ const struct amd_ip_funcs vega10_ih_ip_funcs = {
static const struct amdgpu_ih_funcs vega10_ih_funcs = {
.get_wptr = vega10_ih_get_wptr,
- .decode_iv = vega10_ih_decode_iv,
+ .decode_iv = amdgpu_ih_decode_iv_helper,
.set_rptr = vega10_ih_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
new file mode 100644
index 000000000000..5a3c867d5881
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
@@ -0,0 +1,703 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/pci.h>
+
+#include "amdgpu.h"
+#include "amdgpu_ih.h"
+#include "soc15.h"
+
+#include "oss/osssys_4_2_0_offset.h"
+#include "oss/osssys_4_2_0_sh_mask.h"
+
+#include "soc15_common.h"
+#include "vega20_ih.h"
+
+#define MAX_REARM_RETRY 10
+
+static void vega20_ih_set_interrupt_funcs(struct amdgpu_device *adev);
+
+/**
+ * vega20_ih_init_register_offset - Initialize register offset for ih rings
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Initialize register offset ih rings (VEGA20).
+ */
+static void vega20_ih_init_register_offset(struct amdgpu_device *adev)
+{
+ struct amdgpu_ih_regs *ih_regs;
+
+ if (adev->irq.ih.ring_size) {
+ ih_regs = &adev->irq.ih.ih_regs;
+ ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE);
+ ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI);
+ ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL);
+ ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR);
+ ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR);
+ ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR);
+ ih_regs->ih_rb_wptr_addr_lo = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO);
+ ih_regs->ih_rb_wptr_addr_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI);
+ ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL;
+ }
+
+ if (adev->irq.ih1.ring_size) {
+ ih_regs = &adev->irq.ih1.ih_regs;
+ ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_RING1);
+ ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI_RING1);
+ ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING1);
+ ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING1);
+ ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING1);
+ ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING1);
+ ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL_RING1;
+ }
+
+ if (adev->irq.ih2.ring_size) {
+ ih_regs = &adev->irq.ih2.ih_regs;
+ ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_RING2);
+ ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI_RING2);
+ ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING2);
+ ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING2);
+ ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING2);
+ ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING2);
+ ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL_RING2;
+ }
+}
+
+/**
+ * vega20_ih_toggle_ring_interrupts - toggle the interrupt ring buffer
+ *
+ * @adev: amdgpu_device pointer
+ * @ih: amdgpu_ih_ring pointer
+ * @enable: true - enable the interrupts, false - disable the interrupts
+ *
+ * Toggle the interrupt ring buffer (VEGA20)
+ */
+static int vega20_ih_toggle_ring_interrupts(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih,
+ bool enable)
+{
+ struct amdgpu_ih_regs *ih_regs;
+ uint32_t tmp;
+
+ ih_regs = &ih->ih_regs;
+
+ tmp = RREG32(ih_regs->ih_rb_cntl);
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0));
+ /* enable_intr field is only valid in ring0 */
+ if (ih == &adev->irq.ih)
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0));
+ if (amdgpu_sriov_vf(adev)) {
+ if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) {
+ dev_err(adev->dev, "PSP program IH_RB_CNTL failed!\n");
+ return -ETIMEDOUT;
+ }
+ } else {
+ WREG32(ih_regs->ih_rb_cntl, tmp);
+ }
+
+ if (enable) {
+ ih->enabled = true;
+ } else {
+ /* set rptr, wptr to 0 */
+ WREG32(ih_regs->ih_rb_rptr, 0);
+ WREG32(ih_regs->ih_rb_wptr, 0);
+ ih->enabled = false;
+ ih->rptr = 0;
+ }
+
+ return 0;
+}
+
+/**
+ * vega20_ih_toggle_interrupts - Toggle all the available interrupt ring buffers
+ *
+ * @adev: amdgpu_device pointer
+ * @enable: enable or disable interrupt ring buffers
+ *
+ * Toggle all the available interrupt ring buffers (VEGA20).
+ */
+static int vega20_ih_toggle_interrupts(struct amdgpu_device *adev, bool enable)
+{
+ struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1, &adev->irq.ih2};
+ int i;
+ int r;
+
+ for (i = 0; i < ARRAY_SIZE(ih); i++) {
+ if (ih[i]->ring_size) {
+ r = vega20_ih_toggle_ring_interrupts(adev, ih[i], enable);
+ if (r)
+ return r;
+ }
+ }
+
+ return 0;
+}
+
+static uint32_t vega20_ih_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl)
+{
+ int rb_bufsz = order_base_2(ih->ring_size / 4);
+
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
+ MC_SPACE, ih->use_bus_addr ? 1 : 4);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
+ WPTR_OVERFLOW_CLEAR, 1);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
+ WPTR_OVERFLOW_ENABLE, 1);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_SIZE, rb_bufsz);
+ /* Ring Buffer write pointer writeback. If enabled, IH_RB_WPTR register
+ * value is written to memory
+ */
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
+ WPTR_WRITEBACK_ENABLE, 1);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_SNOOP, 1);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_RO, 0);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_VMID, 0);
+
+ return ih_rb_cntl;
+}
+
+static uint32_t vega20_ih_doorbell_rptr(struct amdgpu_ih_ring *ih)
+{
+ u32 ih_doorbell_rtpr = 0;
+
+ if (ih->use_doorbell) {
+ ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
+ IH_DOORBELL_RPTR, OFFSET,
+ ih->doorbell_index);
+ ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
+ IH_DOORBELL_RPTR,
+ ENABLE, 1);
+ } else {
+ ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
+ IH_DOORBELL_RPTR,
+ ENABLE, 0);
+ }
+ return ih_doorbell_rtpr;
+}
+
+/**
+ * vega20_ih_enable_ring - enable an ih ring buffer
+ *
+ * @adev: amdgpu_device pointer
+ * @ih: amdgpu_ih_ring pointer
+ *
+ * Enable an ih ring buffer (VEGA20)
+ */
+static int vega20_ih_enable_ring(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih)
+{
+ struct amdgpu_ih_regs *ih_regs;
+ uint32_t tmp;
+
+ ih_regs = &ih->ih_regs;
+
+ /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
+ WREG32(ih_regs->ih_rb_base, ih->gpu_addr >> 8);
+ WREG32(ih_regs->ih_rb_base_hi, (ih->gpu_addr >> 40) & 0xff);
+
+ tmp = RREG32(ih_regs->ih_rb_cntl);
+ tmp = vega20_ih_rb_cntl(ih, tmp);
+ if (ih == &adev->irq.ih)
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RPTR_REARM, !!adev->irq.msi_enabled);
+ if (ih == &adev->irq.ih1) {
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 0);
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1);
+ }
+ if (amdgpu_sriov_vf(adev)) {
+ if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) {
+ dev_err(adev->dev, "PSP program IH_RB_CNTL failed!\n");
+ return -ETIMEDOUT;
+ }
+ } else {
+ WREG32(ih_regs->ih_rb_cntl, tmp);
+ }
+
+ if (ih == &adev->irq.ih) {
+ /* set the ih ring 0 writeback address whether it's enabled or not */
+ WREG32(ih_regs->ih_rb_wptr_addr_lo, lower_32_bits(ih->wptr_addr));
+ WREG32(ih_regs->ih_rb_wptr_addr_hi, upper_32_bits(ih->wptr_addr) & 0xFFFF);
+ }
+
+ /* set rptr, wptr to 0 */
+ WREG32(ih_regs->ih_rb_wptr, 0);
+ WREG32(ih_regs->ih_rb_rptr, 0);
+
+ WREG32(ih_regs->ih_doorbell_rptr, vega20_ih_doorbell_rptr(ih));
+
+ return 0;
+}
+
+/**
+ * vega20_ih_reroute_ih - reroute VMC/UTCL2 ih to an ih ring
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Reroute VMC and UMC interrupts on primary ih ring to
+ * ih ring 1 so they won't lose when bunches of page faults
+ * interrupts overwhelms the interrupt handler(VEGA20)
+ */
+static void vega20_ih_reroute_ih(struct amdgpu_device *adev)
+{
+ uint32_t tmp;
+
+ /* vega20 ih reroute will go through psp
+ * this function is only used for arcturus
+ */
+ if (adev->asic_type == CHIP_ARCTURUS) {
+ /* Reroute to IH ring 1 for VMC */
+ WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_INDEX, 0x12);
+ tmp = RREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA);
+ tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, CLIENT_TYPE, 1);
+ tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
+ WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA, tmp);
+
+ /* Reroute IH ring 1 for UTCL2 */
+ WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_INDEX, 0x1B);
+ tmp = RREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA);
+ tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
+ WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA, tmp);
+ }
+}
+
+/**
+ * vega20_ih_irq_init - init and enable the interrupt ring
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Allocate a ring buffer for the interrupt controller,
+ * enable the RLC, disable interrupts, enable the IH
+ * ring buffer and enable it (VI).
+ * Called at device load and reume.
+ * Returns 0 for success, errors for failure.
+ */
+static int vega20_ih_irq_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1, &adev->irq.ih2};
+ u32 ih_chicken;
+ int ret;
+ int i;
+ u32 tmp;
+
+ /* disable irqs */
+ ret = vega20_ih_toggle_interrupts(adev, false);
+ if (ret)
+ return ret;
+
+ adev->nbio.funcs->ih_control(adev);
+
+ if (adev->asic_type == CHIP_ARCTURUS &&
+ adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
+ ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN);
+ if (adev->irq.ih.use_bus_addr) {
+ ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN,
+ MC_SPACE_GPA_ENABLE, 1);
+ }
+ WREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN, ih_chicken);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ih); i++) {
+ if (ih[i]->ring_size) {
+ if (i == 1)
+ vega20_ih_reroute_ih(adev);
+ ret = vega20_ih_enable_ring(adev, ih[i]);
+ if (ret)
+ return ret;
+ }
+ }
+
+ tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
+ tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,
+ CLIENT18_IS_STORM_CLIENT, 1);
+ WREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL, tmp);
+
+ tmp = RREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL);
+ tmp = REG_SET_FIELD(tmp, IH_INT_FLOOD_CNTL, FLOOD_CNTL_ENABLE, 1);
+ WREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL, tmp);
+
+ pci_set_master(adev->pdev);
+
+ /* enable interrupts */
+ ret = vega20_ih_toggle_interrupts(adev, true);
+ if (ret)
+ return ret;
+
+ if (adev->irq.ih_soft.ring_size)
+ adev->irq.ih_soft.enabled = true;
+
+ return 0;
+}
+
+/**
+ * vega20_ih_irq_disable - disable interrupts
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Disable interrupts on the hw (VEGA20).
+ */
+static void vega20_ih_irq_disable(struct amdgpu_device *adev)
+{
+ vega20_ih_toggle_interrupts(adev, false);
+
+ /* Wait and acknowledge irq */
+ mdelay(1);
+}
+
+/**
+ * vega20_ih_get_wptr - get the IH ring buffer wptr
+ *
+ * @adev: amdgpu_device pointer
+ * @ih: amdgpu_ih_ring pointer
+ *
+ * Get the IH ring buffer wptr from either the register
+ * or the writeback memory buffer (VEGA20). Also check for
+ * ring buffer overflow and deal with it.
+ * Returns the value of the wptr.
+ */
+static u32 vega20_ih_get_wptr(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih)
+{
+ u32 wptr, tmp;
+ struct amdgpu_ih_regs *ih_regs;
+
+ wptr = le32_to_cpu(*ih->wptr_cpu);
+ ih_regs = &ih->ih_regs;
+
+ if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
+ goto out;
+
+ /* Double check that the overflow wasn't already cleared. */
+ wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr);
+ if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
+ goto out;
+
+ wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
+
+ /* When a ring buffer overflow happen start parsing interrupt
+ * from the last not overwritten vector (wptr + 32). Hopefully
+ * this should allow us to catchup.
+ */
+ tmp = (wptr + 32) & ih->ptr_mask;
+ dev_warn(adev->dev, "IH ring buffer overflow "
+ "(0x%08X, 0x%08X, 0x%08X)\n",
+ wptr, ih->rptr, tmp);
+ ih->rptr = tmp;
+
+ tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl);
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
+ WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
+
+out:
+ return (wptr & ih->ptr_mask);
+}
+
+/**
+ * vega20_ih_irq_rearm - rearm IRQ if lost
+ *
+ * @adev: amdgpu_device pointer
+ * @ih: amdgpu_ih_ring pointer
+ *
+ */
+static void vega20_ih_irq_rearm(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih)
+{
+ uint32_t v = 0;
+ uint32_t i = 0;
+ struct amdgpu_ih_regs *ih_regs;
+
+ ih_regs = &ih->ih_regs;
+
+ /* Rearm IRQ / re-wwrite doorbell if doorbell write is lost */
+ for (i = 0; i < MAX_REARM_RETRY; i++) {
+ v = RREG32_NO_KIQ(ih_regs->ih_rb_rptr);
+ if ((v < ih->ring_size) && (v != ih->rptr))
+ WDOORBELL32(ih->doorbell_index, ih->rptr);
+ else
+ break;
+ }
+}
+
+/**
+ * vega20_ih_set_rptr - set the IH ring buffer rptr
+ *
+ * @adev: amdgpu_device pointer
+ * @ih: amdgpu_ih_ring pointer
+ *
+ * Set the IH ring buffer rptr.
+ */
+static void vega20_ih_set_rptr(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih)
+{
+ struct amdgpu_ih_regs *ih_regs;
+
+ if (ih->use_doorbell) {
+ /* XXX check if swapping is necessary on BE */
+ *ih->rptr_cpu = ih->rptr;
+ WDOORBELL32(ih->doorbell_index, ih->rptr);
+
+ if (amdgpu_sriov_vf(adev))
+ vega20_ih_irq_rearm(adev, ih);
+ } else {
+ ih_regs = &ih->ih_regs;
+ WREG32(ih_regs->ih_rb_rptr, ih->rptr);
+ }
+}
+
+/**
+ * vega20_ih_self_irq - dispatch work for ring 1 and 2
+ *
+ * @adev: amdgpu_device pointer
+ * @source: irq source
+ * @entry: IV with WPTR update
+ *
+ * Update the WPTR from the IV and schedule work to handle the entries.
+ */
+static int vega20_ih_self_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ uint32_t wptr = cpu_to_le32(entry->src_data[0]);
+
+ switch (entry->ring_id) {
+ case 1:
+ *adev->irq.ih1.wptr_cpu = wptr;
+ schedule_work(&adev->irq.ih1_work);
+ break;
+ case 2:
+ *adev->irq.ih2.wptr_cpu = wptr;
+ schedule_work(&adev->irq.ih2_work);
+ break;
+ default: break;
+ }
+ return 0;
+}
+
+static const struct amdgpu_irq_src_funcs vega20_ih_self_irq_funcs = {
+ .process = vega20_ih_self_irq,
+};
+
+static void vega20_ih_set_self_irq_funcs(struct amdgpu_device *adev)
+{
+ adev->irq.self_irq.num_types = 0;
+ adev->irq.self_irq.funcs = &vega20_ih_self_irq_funcs;
+}
+
+static int vega20_ih_early_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ vega20_ih_set_interrupt_funcs(adev);
+ vega20_ih_set_self_irq_funcs(adev);
+ return 0;
+}
+
+static int vega20_ih_sw_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int r;
+
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_IH, 0,
+ &adev->irq.self_irq);
+ if (r)
+ return r;
+
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, true);
+ if (r)
+ return r;
+
+ adev->irq.ih.use_doorbell = true;
+ adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1;
+
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, PAGE_SIZE, true);
+ if (r)
+ return r;
+
+ adev->irq.ih1.use_doorbell = true;
+ adev->irq.ih1.doorbell_index = (adev->doorbell_index.ih + 1) << 1;
+
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true);
+ if (r)
+ return r;
+
+ adev->irq.ih2.use_doorbell = true;
+ adev->irq.ih2.doorbell_index = (adev->doorbell_index.ih + 2) << 1;
+
+ /* initialize ih control registers offset */
+ vega20_ih_init_register_offset(adev);
+
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, PAGE_SIZE, true);
+ if (r)
+ return r;
+
+ r = amdgpu_irq_init(adev);
+
+ return r;
+}
+
+static int vega20_ih_sw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ amdgpu_irq_fini(adev);
+ amdgpu_ih_ring_fini(adev, &adev->irq.ih_soft);
+ amdgpu_ih_ring_fini(adev, &adev->irq.ih2);
+ amdgpu_ih_ring_fini(adev, &adev->irq.ih1);
+ amdgpu_ih_ring_fini(adev, &adev->irq.ih);
+
+ return 0;
+}
+
+static int vega20_ih_hw_init(void *handle)
+{
+ int r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ r = vega20_ih_irq_init(adev);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+static int vega20_ih_hw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ vega20_ih_irq_disable(adev);
+
+ return 0;
+}
+
+static int vega20_ih_suspend(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ return vega20_ih_hw_fini(adev);
+}
+
+static int vega20_ih_resume(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ return vega20_ih_hw_init(adev);
+}
+
+static bool vega20_ih_is_idle(void *handle)
+{
+ /* todo */
+ return true;
+}
+
+static int vega20_ih_wait_for_idle(void *handle)
+{
+ /* todo */
+ return -ETIMEDOUT;
+}
+
+static int vega20_ih_soft_reset(void *handle)
+{
+ /* todo */
+
+ return 0;
+}
+
+static void vega20_ih_update_clockgating_state(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t data, def, field_val;
+
+ if (adev->cg_flags & AMD_CG_SUPPORT_IH_CG) {
+ def = data = RREG32_SOC15(OSSSYS, 0, mmIH_CLK_CTRL);
+ field_val = enable ? 0 : 1;
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE, field_val);
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ IH_BUFFER_MEM_CLK_SOFT_OVERRIDE, field_val);
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ DBUS_MUX_CLK_SOFT_OVERRIDE, field_val);
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ OSSSYS_SHARE_CLK_SOFT_OVERRIDE, field_val);
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ LIMIT_SMN_CLK_SOFT_OVERRIDE, field_val);
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ DYN_CLK_SOFT_OVERRIDE, field_val);
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ REG_CLK_SOFT_OVERRIDE, field_val);
+ if (def != data)
+ WREG32_SOC15(OSSSYS, 0, mmIH_CLK_CTRL, data);
+ }
+}
+
+static int vega20_ih_set_clockgating_state(void *handle,
+ enum amd_clockgating_state state)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ vega20_ih_update_clockgating_state(adev,
+ state == AMD_CG_STATE_GATE);
+ return 0;
+
+}
+
+static int vega20_ih_set_powergating_state(void *handle,
+ enum amd_powergating_state state)
+{
+ return 0;
+}
+
+const struct amd_ip_funcs vega20_ih_ip_funcs = {
+ .name = "vega20_ih",
+ .early_init = vega20_ih_early_init,
+ .late_init = NULL,
+ .sw_init = vega20_ih_sw_init,
+ .sw_fini = vega20_ih_sw_fini,
+ .hw_init = vega20_ih_hw_init,
+ .hw_fini = vega20_ih_hw_fini,
+ .suspend = vega20_ih_suspend,
+ .resume = vega20_ih_resume,
+ .is_idle = vega20_ih_is_idle,
+ .wait_for_idle = vega20_ih_wait_for_idle,
+ .soft_reset = vega20_ih_soft_reset,
+ .set_clockgating_state = vega20_ih_set_clockgating_state,
+ .set_powergating_state = vega20_ih_set_powergating_state,
+};
+
+static const struct amdgpu_ih_funcs vega20_ih_funcs = {
+ .get_wptr = vega20_ih_get_wptr,
+ .decode_iv = amdgpu_ih_decode_iv_helper,
+ .set_rptr = vega20_ih_set_rptr
+};
+
+static void vega20_ih_set_interrupt_funcs(struct amdgpu_device *adev)
+{
+ adev->irq.ih_funcs = &vega20_ih_funcs;
+}
+
+const struct amdgpu_ip_block_version vega20_ih_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 4,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &vega20_ih_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.h b/drivers/gpu/drm/amd/amdgpu/vega20_ih.h
new file mode 100644
index 000000000000..7af6d8758ee3
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __VEGA20_IH_H__
+#define __VEGA20_IH_H__
+
+extern const struct amd_ip_funcs vega20_ih_ip_funcs;
+extern const struct amdgpu_ip_block_version vega20_ih_ip_block;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index d56b474b3a21..eafb76aebd00 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -642,11 +642,21 @@ static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
return -EINVAL;
}
-static int vi_gpu_pci_config_reset(struct amdgpu_device *adev)
+/**
+ * vi_asic_pci_config_reset - soft reset GPU
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Use PCI Config method to reset the GPU.
+ *
+ * Returns 0 for success.
+ */
+static int vi_asic_pci_config_reset(struct amdgpu_device *adev)
{
u32 i;
+ int r = -EINVAL;
- dev_info(adev->dev, "GPU pci config reset\n");
+ amdgpu_atombios_scratch_regs_engine_hung(adev, true);
/* disable BM */
pci_clear_master(adev->pdev);
@@ -661,29 +671,11 @@ static int vi_gpu_pci_config_reset(struct amdgpu_device *adev)
/* enable BM */
pci_set_master(adev->pdev);
adev->has_hw_reset = true;
- return 0;
+ r = 0;
+ break;
}
udelay(1);
}
- return -EINVAL;
-}
-
-/**
- * vi_asic_pci_config_reset - soft reset GPU
- *
- * @adev: amdgpu_device pointer
- *
- * Use PCI Config method to reset the GPU.
- *
- * Returns 0 for success.
- */
-static int vi_asic_pci_config_reset(struct amdgpu_device *adev)
-{
- int r;
-
- amdgpu_atombios_scratch_regs_engine_hung(adev, true);
-
- r = vi_gpu_pci_config_reset(adev);
amdgpu_atombios_scratch_regs_engine_hung(adev, false);
diff --git a/drivers/gpu/drm/amd/amdkfd/Kconfig b/drivers/gpu/drm/amd/amdkfd/Kconfig
index e8fb10c41f16..f02c938f75da 100644
--- a/drivers/gpu/drm/amd/amdkfd/Kconfig
+++ b/drivers/gpu/drm/amd/amdkfd/Kconfig
@@ -7,6 +7,8 @@ config HSA_AMD
bool "HSA kernel driver for AMD GPU devices"
depends on DRM_AMDGPU && (X86_64 || ARM64 || PPC64)
imply AMD_IOMMU_V2 if X86_64
+ select HMM_MIRROR
select MMU_NOTIFIER
+ select DRM_AMDGPU_USERPTR
help
Enable this if you want to use HSA features on AMD GPU devices.
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index 16262e5d93f5..7351dd195274 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -243,11 +243,11 @@ get_sh_mem_bases_nybble_64(struct kfd_process_device *pdd)
static inline void dqm_lock(struct device_queue_manager *dqm)
{
mutex_lock(&dqm->lock_hidden);
- dqm->saved_flags = memalloc_nofs_save();
+ dqm->saved_flags = memalloc_noreclaim_save();
}
static inline void dqm_unlock(struct device_queue_manager *dqm)
{
- memalloc_nofs_restore(dqm->saved_flags);
+ memalloc_noreclaim_restore(dqm->saved_flags);
mutex_unlock(&dqm->lock_hidden);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
index 241bd6ff79f4..74a460be077b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
@@ -44,6 +44,25 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev,
client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry);
pasid = SOC15_PASID_FROM_IH_ENTRY(ih_ring_entry);
+ /* Only handle clients we care about */
+ if (client_id != SOC15_IH_CLIENTID_GRBM_CP &&
+ client_id != SOC15_IH_CLIENTID_SDMA0 &&
+ client_id != SOC15_IH_CLIENTID_SDMA1 &&
+ client_id != SOC15_IH_CLIENTID_SDMA2 &&
+ client_id != SOC15_IH_CLIENTID_SDMA3 &&
+ client_id != SOC15_IH_CLIENTID_SDMA4 &&
+ client_id != SOC15_IH_CLIENTID_SDMA5 &&
+ client_id != SOC15_IH_CLIENTID_SDMA6 &&
+ client_id != SOC15_IH_CLIENTID_SDMA7 &&
+ client_id != SOC15_IH_CLIENTID_VMC &&
+ client_id != SOC15_IH_CLIENTID_VMC1 &&
+ client_id != SOC15_IH_CLIENTID_UTCL2 &&
+ client_id != SOC15_IH_CLIENTID_SE0SH &&
+ client_id != SOC15_IH_CLIENTID_SE1SH &&
+ client_id != SOC15_IH_CLIENTID_SE2SH &&
+ client_id != SOC15_IH_CLIENTID_SE3SH)
+ return false;
+
/* This is a known issue for gfx9. Under non HWS, pasid is not set
* in the interrupt payload, so we need to find out the pasid on our
* own.
@@ -96,17 +115,30 @@ static void event_interrupt_wq_v9(struct kfd_dev *dev,
vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry);
context_id = SOC15_CONTEXT_ID0_FROM_IH_ENTRY(ih_ring_entry);
- if (source_id == SOC15_INTSRC_CP_END_OF_PIPE)
- kfd_signal_event_interrupt(pasid, context_id, 32);
- else if (source_id == SOC15_INTSRC_SDMA_TRAP)
- kfd_signal_event_interrupt(pasid, context_id & 0xfffffff, 28);
- else if (source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG)
- kfd_signal_event_interrupt(pasid, context_id & 0xffffff, 24);
- else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE)
- kfd_signal_hw_exception_event(pasid);
- else if (client_id == SOC15_IH_CLIENTID_VMC ||
- client_id == SOC15_IH_CLIENTID_VMC1 ||
- client_id == SOC15_IH_CLIENTID_UTCL2) {
+ if (client_id == SOC15_IH_CLIENTID_GRBM_CP ||
+ client_id == SOC15_IH_CLIENTID_SE0SH ||
+ client_id == SOC15_IH_CLIENTID_SE1SH ||
+ client_id == SOC15_IH_CLIENTID_SE2SH ||
+ client_id == SOC15_IH_CLIENTID_SE3SH) {
+ if (source_id == SOC15_INTSRC_CP_END_OF_PIPE)
+ kfd_signal_event_interrupt(pasid, context_id, 32);
+ else if (source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG)
+ kfd_signal_event_interrupt(pasid, context_id & 0xffffff, 24);
+ else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE)
+ kfd_signal_hw_exception_event(pasid);
+ } else if (client_id == SOC15_IH_CLIENTID_SDMA0 ||
+ client_id == SOC15_IH_CLIENTID_SDMA1 ||
+ client_id == SOC15_IH_CLIENTID_SDMA2 ||
+ client_id == SOC15_IH_CLIENTID_SDMA3 ||
+ client_id == SOC15_IH_CLIENTID_SDMA4 ||
+ client_id == SOC15_IH_CLIENTID_SDMA5 ||
+ client_id == SOC15_IH_CLIENTID_SDMA6 ||
+ client_id == SOC15_IH_CLIENTID_SDMA7) {
+ if (source_id == SOC15_INTSRC_SDMA_TRAP)
+ kfd_signal_event_interrupt(pasid, context_id & 0xfffffff, 28);
+ } else if (client_id == SOC15_IH_CLIENTID_VMC ||
+ client_id == SOC15_IH_CLIENTID_VMC1 ||
+ client_id == SOC15_IH_CLIENTID_UTCL2) {
struct kfd_vm_fault_info info = {0};
uint16_t ring_id = SOC15_RING_ID_FROM_IH_ENTRY(ih_ring_entry);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index a3fc23873819..0be72789ccbc 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -497,8 +497,6 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
dev->node_props.num_sdma_queues_per_engine);
sysfs_show_32bit_prop(buffer, offs, "num_cp_queues",
dev->node_props.num_cp_queues);
- sysfs_show_64bit_prop(buffer, offs, "unique_id",
- dev->node_props.unique_id);
if (dev->gpu) {
log_max_watch_addr =
@@ -529,6 +527,9 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
dev->node_props.capability);
sysfs_show_32bit_prop(buffer, offs, "sdma_fw_version",
dev->gpu->sdma_fw_version);
+ sysfs_show_64bit_prop(buffer, offs, "unique_id",
+ amdgpu_amdkfd_get_unique_id(dev->gpu->kgd));
+
}
return sysfs_show_32bit_prop(buffer, offs, "max_engine_clk_ccompute",
@@ -1340,7 +1341,6 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
dev->gpu->dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) ?
amdgpu_amdkfd_get_num_gws(dev->gpu->kgd) : 0;
dev->node_props.num_cp_queues = get_cp_queues_num(dev->gpu->dqm);
- dev->node_props.unique_id = amdgpu_amdkfd_get_unique_id(dev->gpu->kgd);
kfd_fill_mem_clk_max_info(dev);
kfd_fill_iolink_non_crat_info(dev);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
index 326d9b26b7aa..416fd910e12e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
@@ -57,7 +57,6 @@
struct kfd_node_properties {
uint64_t hive_id;
- uint64_t unique_id;
uint32_t cpu_cores_count;
uint32_t simd_count;
uint32_t mem_banks_count;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index c6da89df055d..3e1fd1e7d09f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -60,7 +60,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/version.h>
#include <linux/types.h>
#include <linux/pm_runtime.h>
#include <linux/pci.h>
@@ -938,7 +937,49 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
}
#endif
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+static void event_mall_stutter(struct work_struct *work)
+{
+
+ struct vblank_workqueue *vblank_work = container_of(work, struct vblank_workqueue, mall_work);
+ struct amdgpu_display_manager *dm = vblank_work->dm;
+
+ mutex_lock(&dm->dc_lock);
+
+ if (vblank_work->enable)
+ dm->active_vblank_irq_count++;
+ else
+ dm->active_vblank_irq_count--;
+
+
+ dc_allow_idle_optimizations(
+ dm->dc, dm->active_vblank_irq_count == 0 ? true : false);
+
+ DRM_DEBUG_DRIVER("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
+
+
+ mutex_unlock(&dm->dc_lock);
+}
+static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc)
+{
+
+ int max_caps = dc->caps.max_links;
+ struct vblank_workqueue *vblank_work;
+ int i = 0;
+
+ vblank_work = kcalloc(max_caps, sizeof(*vblank_work), GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(vblank_work)) {
+ kfree(vblank_work);
+ return NULL;
+ }
+
+ for (i = 0; i < max_caps; i++)
+ INIT_WORK(&vblank_work[i].mall_work, event_mall_stutter);
+
+ return vblank_work;
+}
+#endif
static int amdgpu_dm_init(struct amdgpu_device *adev)
{
struct dc_init_data init_data;
@@ -958,6 +999,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
mutex_init(&adev->dm.dc_lock);
mutex_init(&adev->dm.audio_lock);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ spin_lock_init(&adev->dm.vblank_lock);
+#endif
if(amdgpu_dm_irq_init(adev)) {
DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
@@ -1016,8 +1060,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
init_data.flags.power_down_display_on_boot = true;
- init_data.soc_bounding_box = adev->dm.soc_bounding_box;
-
/* Display Core create. */
adev->dm.dc = dc_create(&init_data);
@@ -1074,6 +1116,17 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
amdgpu_dm_init_color_mod();
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (adev->dm.dc->caps.max_links > 0) {
+ adev->dm.vblank_workqueue = vblank_create_workqueue(adev, adev->dm.dc);
+
+ if (!adev->dm.vblank_workqueue)
+ DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
+ else
+ DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done %p.\n", adev->dm.vblank_workqueue);
+ }
+#endif
+
#ifdef CONFIG_DRM_AMD_DC_HDCP
if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
@@ -1131,7 +1184,7 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
#ifdef CONFIG_DRM_AMD_DC_HDCP
if (adev->dm.hdcp_workqueue) {
- hdcp_destroy(adev->dm.hdcp_workqueue);
+ hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
adev->dm.hdcp_workqueue = NULL;
}
@@ -1778,6 +1831,11 @@ static int dm_suspend(void *handle)
if (amdgpu_in_reset(adev)) {
mutex_lock(&dm->dc_lock);
+
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ dc_allow_idle_optimizations(adev->dm.dc, false);
+#endif
+
dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
@@ -1833,8 +1891,8 @@ static void emulated_link_detect(struct dc_link *link)
link->type = dc_connection_none;
prev_sink = link->local_sink;
- if (prev_sink != NULL)
- dc_sink_retain(prev_sink);
+ if (prev_sink)
+ dc_sink_release(prev_sink);
switch (link->connector_signal) {
case SIGNAL_TYPE_HDMI_TYPE_A: {
@@ -2330,8 +2388,10 @@ void amdgpu_dm_update_connector_after_detect(
* TODO: check if we still need the S3 mode update workaround.
* If yes, put it here.
*/
- if (aconnector->dc_sink)
+ if (aconnector->dc_sink) {
amdgpu_dm_update_freesync_caps(connector, NULL);
+ dc_sink_release(aconnector->dc_sink);
+ }
aconnector->dc_sink = sink;
dc_sink_retain(aconnector->dc_sink);
@@ -2347,8 +2407,6 @@ void amdgpu_dm_update_connector_after_detect(
drm_connector_update_edid_property(connector,
aconnector->edid);
- drm_add_edid_modes(connector, aconnector->edid);
-
if (aconnector->dc_link->aux_mode)
drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
aconnector->edid);
@@ -3720,10 +3778,53 @@ static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
};
+static void get_min_max_dc_plane_scaling(struct drm_device *dev,
+ struct drm_framebuffer *fb,
+ int *min_downscale, int *max_upscale)
+{
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct dc *dc = adev->dm.dc;
+ /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
+ struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
+
+ switch (fb->format->format) {
+ case DRM_FORMAT_P010:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ *max_upscale = plane_cap->max_upscale_factor.nv12;
+ *min_downscale = plane_cap->max_downscale_factor.nv12;
+ break;
+
+ case DRM_FORMAT_XRGB16161616F:
+ case DRM_FORMAT_ARGB16161616F:
+ case DRM_FORMAT_XBGR16161616F:
+ case DRM_FORMAT_ABGR16161616F:
+ *max_upscale = plane_cap->max_upscale_factor.fp16;
+ *min_downscale = plane_cap->max_downscale_factor.fp16;
+ break;
+
+ default:
+ *max_upscale = plane_cap->max_upscale_factor.argb8888;
+ *min_downscale = plane_cap->max_downscale_factor.argb8888;
+ break;
+ }
+
+ /*
+ * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
+ * scaling factor of 1.0 == 1000 units.
+ */
+ if (*max_upscale == 1)
+ *max_upscale = 1000;
+
+ if (*min_downscale == 1)
+ *min_downscale = 1000;
+}
+
+
static int fill_dc_scaling_info(const struct drm_plane_state *state,
struct dc_scaling_info *scaling_info)
{
- int scale_w, scale_h;
+ int scale_w, scale_h, min_downscale, max_upscale;
memset(scaling_info, 0, sizeof(*scaling_info));
@@ -3755,17 +3856,25 @@ static int fill_dc_scaling_info(const struct drm_plane_state *state,
/* DRM doesn't specify clipping on destination output. */
scaling_info->clip_rect = scaling_info->dst_rect;
- /* TODO: Validate scaling per-format with DC plane caps */
+ /* Validate scaling per-format with DC plane caps */
+ if (state->plane && state->plane->dev && state->fb) {
+ get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
+ &min_downscale, &max_upscale);
+ } else {
+ min_downscale = 250;
+ max_upscale = 16000;
+ }
+
scale_w = scaling_info->dst_rect.width * 1000 /
scaling_info->src_rect.width;
- if (scale_w < 250 || scale_w > 16000)
+ if (scale_w < min_downscale || scale_w > max_upscale)
return -EINVAL;
scale_h = scaling_info->dst_rect.height * 1000 /
scaling_info->src_rect.height;
- if (scale_h < 250 || scale_h > 16000)
+ if (scale_h < min_downscale || scale_h > max_upscale)
return -EINVAL;
/*
@@ -5322,6 +5431,10 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
struct amdgpu_device *adev = drm_to_adev(crtc->dev);
struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ struct amdgpu_display_manager *dm = &adev->dm;
+ unsigned long flags;
+#endif
int rc = 0;
if (enable) {
@@ -5337,7 +5450,23 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
return rc;
irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
- return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
+
+ if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
+ return -EBUSY;
+
+ if (amdgpu_in_reset(adev))
+ return 0;
+
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ spin_lock_irqsave(&dm->vblank_lock, flags);
+ dm->vblank_workqueue->dm = dm;
+ dm->vblank_workqueue->otg_inst = acrtc->otg_inst;
+ dm->vblank_workqueue->enable = enable;
+ spin_unlock_irqrestore(&dm->vblank_lock, flags);
+ schedule_work(&dm->vblank_workqueue->mall_work);
+#endif
+
+ return 0;
}
static int dm_enable_vblank(struct drm_crtc *crtc)
@@ -5354,7 +5483,6 @@ static void dm_disable_vblank(struct drm_crtc *crtc)
static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
.reset = dm_crtc_reset_state,
.destroy = amdgpu_dm_crtc_destroy,
- .gamma_set = drm_atomic_helper_legacy_gamma_set,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
.atomic_duplicate_state = dm_crtc_duplicate_state,
@@ -6329,12 +6457,51 @@ static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
static int dm_plane_helper_check_state(struct drm_plane_state *state,
struct drm_crtc_state *new_crtc_state)
{
- int max_downscale = 0;
- int max_upscale = INT_MAX;
+ struct drm_framebuffer *fb = state->fb;
+ int min_downscale, max_upscale;
+ int min_scale = 0;
+ int max_scale = INT_MAX;
+
+ /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
+ if (fb && state->crtc) {
+ /* Validate viewport to cover the case when only the position changes */
+ if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
+ int viewport_width = state->crtc_w;
+ int viewport_height = state->crtc_h;
+
+ if (state->crtc_x < 0)
+ viewport_width += state->crtc_x;
+ else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
+ viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
+
+ if (state->crtc_y < 0)
+ viewport_height += state->crtc_y;
+ else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
+ viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
+
+ /* If completely outside of screen, viewport_width and/or viewport_height will be negative,
+ * which is still OK to satisfy the condition below, thereby also covering these cases
+ * (when plane is completely outside of screen).
+ * x2 for width is because of pipe-split.
+ */
+ if (viewport_width < MIN_VIEWPORT_SIZE*2 || viewport_height < MIN_VIEWPORT_SIZE)
+ return -EINVAL;
+ }
+
+ /* Get min/max allowed scaling factors from plane caps. */
+ get_min_max_dc_plane_scaling(state->crtc->dev, fb,
+ &min_downscale, &max_upscale);
+ /*
+ * Convert to drm convention: 16.16 fixed point, instead of dc's
+ * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
+ * dst/src, so min_scale = 1.0 / max_upscale, etc.
+ */
+ min_scale = (1000 << 16) / max_upscale;
+ max_scale = (1000 << 16) / min_downscale;
+ }
- /* TODO: These should be checked against DC plane caps */
return drm_atomic_helper_check_plane_state(
- state, new_crtc_state, max_downscale, max_upscale, true, true);
+ state, new_crtc_state, min_scale, max_scale, true, true);
}
static int dm_plane_atomic_check(struct drm_plane *plane,
@@ -8359,14 +8526,14 @@ static int dm_force_atomic_commit(struct drm_connector *connector)
ret = PTR_ERR_OR_ZERO(conn_state);
if (ret)
- goto err;
+ goto out;
/* Attach crtc to drm_atomic_state*/
crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
ret = PTR_ERR_OR_ZERO(crtc_state);
if (ret)
- goto err;
+ goto out;
/* force a restore */
crtc_state->mode_changed = true;
@@ -8376,17 +8543,15 @@ static int dm_force_atomic_commit(struct drm_connector *connector)
ret = PTR_ERR_OR_ZERO(plane_state);
if (ret)
- goto err;
-
+ goto out;
/* Call commit internally with the state we just constructed */
ret = drm_atomic_commit(state);
- if (!ret)
- return 0;
-err:
- DRM_ERROR("Restoring old state failed with %i\n", ret);
+out:
drm_atomic_state_put(state);
+ if (ret)
+ DRM_ERROR("Restoring old state failed with %i\n", ret);
return ret;
}
@@ -9593,6 +9758,10 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
amdgpu_dm_connector->max_vfreq = range->max_vfreq;
amdgpu_dm_connector->pixel_clock_mhz =
range->pixel_clock_mhz * 10;
+
+ connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
+ connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
+
break;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 1182dafcef02..8bfe901cf237 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -58,10 +58,10 @@
/* Forward declarations */
struct amdgpu_device;
struct drm_device;
-struct amdgpu_dm_irq_handler_data;
struct dc;
struct amdgpu_bo;
struct dmub_srv;
+struct dc_plane_state;
struct common_irq_params {
struct amdgpu_device *adev;
@@ -93,6 +93,20 @@ struct dm_compressor_info {
};
/**
+ * struct vblank_workqueue - Works to be executed in a separate thread during vblank
+ * @mall_work: work for mall stutter
+ * @dm: amdgpu display manager device
+ * @otg_inst: otg instance of which vblank is being set
+ * @enable: true if enable vblank
+ */
+struct vblank_workqueue {
+ struct work_struct mall_work;
+ struct amdgpu_display_manager *dm;
+ int otg_inst;
+ bool enable;
+};
+
+/**
* struct amdgpu_dm_backlight_caps - Information about backlight
*
* Describe the backlight support for ACPI or eDP AUX.
@@ -244,6 +258,15 @@ struct amdgpu_display_manager {
struct mutex audio_lock;
/**
+ * @vblank_work_lock:
+ *
+ * Guards access to deferred vblank work state.
+ */
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ spinlock_t vblank_lock;
+#endif
+
+ /**
* @audio_component:
*
* Used to notify ELD changes to sound driver.
@@ -321,6 +344,10 @@ struct amdgpu_display_manager {
struct hdcp_workqueue *hdcp_workqueue;
#endif
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ struct vblank_workqueue *vblank_workqueue;
+#endif
+
struct drm_atomic_state *cached_state;
struct dc_state *cached_dc_state;
@@ -337,6 +364,13 @@ struct amdgpu_display_manager {
const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box;
/**
+ * @active_vblank_irq_count:
+ *
+ * number of currently active vblank irqs
+ */
+ uint32_t active_vblank_irq_count;
+
+ /**
* @mst_encoders:
*
* fake encoders used for DP MST.
@@ -412,11 +446,6 @@ struct amdgpu_dm_connector {
extern const struct amdgpu_ip_block_version dm_ip_block;
-struct amdgpu_framebuffer;
-struct amdgpu_display_manager;
-struct dc_validation_set;
-struct dc_plane_state;
-
struct dm_plane_state {
struct drm_plane_state base;
struct dc_plane_state *dc_state;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 11459fb09a37..360952129b6d 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -691,7 +691,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us
return size;
}
-/**
+/*
* Returns the DMCUB tracebuffer contents.
* Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_dmub_tracebuffer
*/
@@ -735,7 +735,7 @@ static int dmub_tracebuffer_show(struct seq_file *m, void *data)
return 0;
}
-/**
+/*
* Returns the DMCUB firmware state contents.
* Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_dmub_fw_state
*/
@@ -1063,7 +1063,7 @@ static int dp_dsc_fec_support_show(struct seq_file *m, void *data)
* echo 0 > /sys/kernel/debug/dri/0/DP-X/trigger_hotplug
*
*/
-static ssize_t dp_trigger_hotplug(struct file *f, const char __user *buf,
+static ssize_t trigger_hotplug(struct file *f, const char __user *buf,
size_t size, loff_t *pos)
{
struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
@@ -2214,9 +2214,9 @@ static const struct file_operations dp_dsc_slice_bpg_offset_debugfs_fops = {
.llseek = default_llseek
};
-static const struct file_operations dp_trigger_hotplug_debugfs_fops = {
+static const struct file_operations trigger_hotplug_debugfs_fops = {
.owner = THIS_MODULE,
- .write = dp_trigger_hotplug,
+ .write = trigger_hotplug,
.llseek = default_llseek
};
@@ -2270,7 +2270,6 @@ static const struct {
const struct file_operations *fops;
} dp_debugfs_entries[] = {
{"link_settings", &dp_link_settings_debugfs_fops},
- {"trigger_hotplug", &dp_trigger_hotplug_debugfs_fops},
{"phy_settings", &dp_phy_settings_debugfs_fop},
{"test_pattern", &dp_phy_test_pattern_fops},
#ifdef CONFIG_DRM_AMD_DC_HDCP
@@ -2367,6 +2366,9 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector)
debugfs_create_file("output_bpc", 0644, dir, connector,
&output_bpc_fops);
+ debugfs_create_file("trigger_hotplug", 0644, dir, connector,
+ &trigger_hotplug_debugfs_fops);
+
connector->debugfs_dpcd_address = 0;
connector->debugfs_dpcd_size = 0;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
index c2cd184f0bbd..0cdbfcd475ec 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -376,7 +376,7 @@ static void event_cpirq(struct work_struct *work)
}
-void hdcp_destroy(struct hdcp_workqueue *hdcp_work)
+void hdcp_destroy(struct kobject *kobj, struct hdcp_workqueue *hdcp_work)
{
int i = 0;
@@ -385,6 +385,7 @@ void hdcp_destroy(struct hdcp_workqueue *hdcp_work)
cancel_delayed_work_sync(&hdcp_work[i].watchdog_timer_dwork);
}
+ sysfs_remove_bin_file(kobj, &hdcp_work[0].attr);
kfree(hdcp_work->srm);
kfree(hdcp_work->srm_temp);
kfree(hdcp_work);
@@ -449,11 +450,12 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
link->mode = mod_hdcp_signal_type_to_operation_mode(aconnector->dc_sink->sink_signal);
display->controller = CONTROLLER_ID_D0 + config->otg_inst;
- display->dig_fe = config->stream_enc_inst;
- link->dig_be = config->link_enc_inst;
+ display->dig_fe = config->dig_fe;
+ link->dig_be = config->dig_be;
link->ddc_line = aconnector->dc_link->ddc_hw_inst + 1;
link->dp.rev = aconnector->dc_link->dpcd_caps.dpcd_rev.raw;
- link->dp.mst_supported = config->mst_supported;
+ link->dp.assr_enabled = config->assr_enabled;
+ link->dp.mst_enabled = config->mst_enabled;
display->adjust.disable = 1;
link->adjust.auth_delay = 3;
link->adjust.hdcp1.disable = 0;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
index 5159b3a5e5b0..09294ff122fe 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
@@ -69,7 +69,7 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
void hdcp_reset_display(struct hdcp_workqueue *work, unsigned int link_index);
void hdcp_handle_cpirq(struct hdcp_workqueue *work, unsigned int link_index);
-void hdcp_destroy(struct hdcp_workqueue *work);
+void hdcp_destroy(struct kobject *kobj, struct hdcp_workqueue *work);
struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct cp_psp *cp_psp, struct dc *dc);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index f6f487e9fe2d..5750818db8f6 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -25,7 +25,6 @@
#include <linux/string.h>
#include <linux/acpi.h>
-#include <linux/version.h>
#include <linux/i2c.h>
#include <drm/drm_probe_helper.h>
@@ -527,11 +526,11 @@ bool dm_helpers_submit_i2c(
bool dm_helpers_dp_write_dsc_enable(
struct dc_context *ctx,
const struct dc_stream_state *stream,
- bool enable
-)
+ bool enable)
{
uint8_t enable_dsc = enable ? 1 : 0;
struct amdgpu_dm_connector *aconnector;
+ uint8_t ret = 0;
if (!stream)
return false;
@@ -542,13 +541,13 @@ bool dm_helpers_dp_write_dsc_enable(
if (!aconnector->dsc_aux)
return false;
- return (drm_dp_dpcd_write(aconnector->dsc_aux, DP_DSC_ENABLE, &enable_dsc, 1) >= 0);
+ ret = drm_dp_dpcd_write(aconnector->dsc_aux, DP_DSC_ENABLE, &enable_dsc, 1);
}
if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT)
return dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1);
- return false;
+ return (ret > 0);
}
bool dm_helpers_is_dp_sink_present(struct dc_link *link)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
index 26ed70e5538a..e0000c180ed1 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -662,6 +662,20 @@ static int amdgpu_dm_set_crtc_irq_state(struct amdgpu_device *adev,
__func__);
}
+static int amdgpu_dm_set_vline0_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned int crtc_id,
+ enum amdgpu_interrupt_state state)
+{
+ return dm_irq_state(
+ adev,
+ source,
+ crtc_id,
+ state,
+ IRQ_TYPE_VLINE0,
+ __func__);
+}
+
static int amdgpu_dm_set_vupdate_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned int crtc_id,
@@ -681,6 +695,11 @@ static const struct amdgpu_irq_src_funcs dm_crtc_irq_funcs = {
.process = amdgpu_dm_irq_handler,
};
+static const struct amdgpu_irq_src_funcs dm_vline0_irq_funcs = {
+ .set = amdgpu_dm_set_vline0_irq_state,
+ .process = amdgpu_dm_irq_handler,
+};
+
static const struct amdgpu_irq_src_funcs dm_vupdate_irq_funcs = {
.set = amdgpu_dm_set_vupdate_irq_state,
.process = amdgpu_dm_irq_handler,
@@ -702,6 +721,9 @@ void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev)
adev->crtc_irq.num_types = adev->mode_info.num_crtc;
adev->crtc_irq.funcs = &dm_crtc_irq_funcs;
+ adev->vline0_irq.num_types = adev->mode_info.num_crtc;
+ adev->vline0_irq.funcs = &dm_vline0_irq_funcs;
+
adev->vupdate_irq.num_types = adev->mode_info.num_crtc;
adev->vupdate_irq.funcs = &dm_vupdate_irq_funcs;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 8ab0b9060d2b..41b09ab22233 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -23,7 +23,6 @@
*
*/
-#include <linux/version.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_dp_mst_helper.h>
@@ -833,6 +832,9 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
if (computed_streams[i])
continue;
+ if (dcn20_remove_stream_from_ctx(stream->ctx->dc, dc_state, stream) != DC_OK)
+ return false;
+
mutex_lock(&aconnector->mst_mgr.lock);
if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link)) {
mutex_unlock(&aconnector->mst_mgr.lock);
@@ -850,7 +852,8 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
stream = dc_state->streams[i];
if (stream->timing.flags.DSC == 1)
- dc_stream_add_dsc_to_resource(stream->ctx->dc, dc_state, stream);
+ if (dc_stream_add_dsc_to_resource(stream->ctx->dc, dc_state, stream) != DC_OK)
+ return false;
}
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile
index bf8fe0471b8f..5bf2f2375b40 100644
--- a/drivers/gpu/drm/amd/display/dc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/Makefile
@@ -69,5 +69,7 @@ AMD_DISPLAY_FILES += $(AMD_DISPLAY_CORE)
AMD_DISPLAY_FILES += $(AMD_DM_REG_UPDATE)
DC_DMUB += dc_dmub_srv.o
+DC_EDID += dc_edid_parser.o
AMD_DISPLAY_DMUB = $(addprefix $(AMDDALPATH)/dc/,$(DC_DMUB))
-AMD_DISPLAY_FILES += $(AMD_DISPLAY_DMUB)
+AMD_DISPLAY_EDID = $(addprefix $(AMDDALPATH)/dc/,$(DC_EDID))
+AMD_DISPLAY_FILES += $(AMD_DISPLAY_DMUB) $(AMD_DISPLAY_EDID)
diff --git a/drivers/gpu/drm/amd/display/dc/basics/conversion.c b/drivers/gpu/drm/amd/display/dc/basics/conversion.c
index 24ed03d8cda7..6767fab55c26 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/conversion.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/conversion.c
@@ -73,12 +73,9 @@ uint16_t fixed_point_to_int_frac(
return result;
}
-/**
-* convert_float_matrix
-* This converts a double into HW register spec defined format S2D13.
-* @param :
-* @return None
-*/
+/*
+ * convert_float_matrix - This converts a double into HW register spec defined format S2D13.
+ */
void convert_float_matrix(
uint16_t *matrix,
struct fixed31_32 *flt,
diff --git a/drivers/gpu/drm/amd/display/dc/basics/dc_common.c b/drivers/gpu/drm/amd/display/dc/basics/dc_common.c
index b2fc4f8e6482..ad04ef98e652 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/dc_common.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/dc_common.c
@@ -49,20 +49,24 @@ bool is_rgb_cspace(enum dc_color_space output_color_space)
}
}
-bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
+bool is_child_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
{
if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible)
return true;
- if (pipe_ctx->bottom_pipe && is_lower_pipe_tree_visible(pipe_ctx->bottom_pipe))
+ if (pipe_ctx->bottom_pipe && is_child_pipe_tree_visible(pipe_ctx->bottom_pipe))
+ return true;
+ if (pipe_ctx->next_odm_pipe && is_child_pipe_tree_visible(pipe_ctx->next_odm_pipe))
return true;
return false;
}
-bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
+bool is_parent_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
{
if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible)
return true;
- if (pipe_ctx->top_pipe && is_upper_pipe_tree_visible(pipe_ctx->top_pipe))
+ if (pipe_ctx->top_pipe && is_parent_pipe_tree_visible(pipe_ctx->top_pipe))
+ return true;
+ if (pipe_ctx->prev_odm_pipe && is_parent_pipe_tree_visible(pipe_ctx->prev_odm_pipe))
return true;
return false;
}
@@ -71,9 +75,13 @@ bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
{
if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible)
return true;
- if (pipe_ctx->top_pipe && is_upper_pipe_tree_visible(pipe_ctx->top_pipe))
+ if (pipe_ctx->top_pipe && is_parent_pipe_tree_visible(pipe_ctx->top_pipe))
+ return true;
+ if (pipe_ctx->bottom_pipe && is_child_pipe_tree_visible(pipe_ctx->bottom_pipe))
+ return true;
+ if (pipe_ctx->prev_odm_pipe && is_parent_pipe_tree_visible(pipe_ctx->prev_odm_pipe))
return true;
- if (pipe_ctx->bottom_pipe && is_lower_pipe_tree_visible(pipe_ctx->bottom_pipe))
+ if (pipe_ctx->next_odm_pipe && is_child_pipe_tree_visible(pipe_ctx->next_odm_pipe))
return true;
return false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/basics/dc_common.h b/drivers/gpu/drm/amd/display/dc/basics/dc_common.h
index 7c0cbf47e8ce..b061497480b8 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/dc_common.h
+++ b/drivers/gpu/drm/amd/display/dc/basics/dc_common.h
@@ -30,9 +30,9 @@
bool is_rgb_cspace(enum dc_color_space output_color_space);
-bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx);
+bool is_child_pipe_tree_visible(struct pipe_ctx *pipe_ctx);
-bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx);
+bool is_parent_pipe_tree_visible(struct pipe_ctx *pipe_ctx);
bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
index 23a373ca94b5..c67d21a5ee52 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
@@ -911,11 +911,11 @@ static enum bp_result get_ss_info_from_tbl(
* ver 2.1 can co-exist with SS_Info table. Expect ASIC_InternalSS_Info ver 3.1,
* there is only one entry for each signal /ss id. However, there is
* no planning of supporting multiple spread Sprectum entry for EverGreen
- * @param [in] this
- * @param [in] signal, ASSignalType to be converted to info index
- * @param [in] index, number of entries that match the converted info index
- * @param [out] ss_info, sprectrum information structure,
- * @return Bios parser result code
+ * @dcb: pointer to the DC BIOS
+ * @signal: ASSignalType to be converted to info index
+ * @index: number of entries that match the converted info index
+ * @ss_info: sprectrum information structure,
+ * return: Bios parser result code
*/
static enum bp_result bios_parser_get_spread_spectrum_info(
struct dc_bios *dcb,
@@ -985,10 +985,10 @@ static enum bp_result get_ss_info_from_internal_ss_info_tbl_V2_1(
* There can not be more than 1 entry for ASIC_InternalSS_Info Ver 2.1 or
* SS_Info.
*
- * @param this
- * @param id, spread sprectrum info index
- * @param pSSinfo, sprectrum information structure,
- * @return Bios parser result code
+ * @bp: pointer to the BIOS parser
+ * @id: spread sprectrum info index
+ * @ss_info: sprectrum information structure,
+ * return: BIOS parser result code
*/
static enum bp_result get_ss_info_from_tbl(
struct bios_parser *bp,
@@ -1011,9 +1011,10 @@ static enum bp_result get_ss_info_from_tbl(
* from the VBIOS
* There will not be multiple entry for Ver 2.1
*
- * @param id, spread sprectrum info index
- * @param pSSinfo, sprectrum information structure,
- * @return Bios parser result code
+ * @bp: pointer to the Bios parser
+ * @id: spread sprectrum info index
+ * @info: sprectrum information structure,
+ * return: Bios parser result code
*/
static enum bp_result get_ss_info_from_internal_ss_info_tbl_V2_1(
struct bios_parser *bp,
@@ -1076,9 +1077,10 @@ static enum bp_result get_ss_info_from_internal_ss_info_tbl_V2_1(
* of entries that matches the id
* for, the SS_Info table, there should not be more than 1 entry match.
*
- * @param [in] id, spread sprectrum id
- * @param [out] pSSinfo, sprectrum information structure,
- * @return Bios parser result code
+ * @bp: pointer to the Bios parser
+ * @id: spread sprectrum id
+ * @ss_info: sprectrum information structure,
+ * return: Bios parser result code
*/
static enum bp_result get_ss_info_from_ss_info_table(
struct bios_parser *bp,
@@ -1451,16 +1453,14 @@ static enum bp_result get_embedded_panel_info_v1_3(
}
/**
- * bios_parser_get_encoder_cap_info
+ * bios_parser_get_encoder_cap_info - get encoder capability
+ * information of input object id
*
- * @brief
- * Get encoder capability information of input object id
- *
- * @param object_id, Object id
- * @param object_id, encoder cap information structure
- *
- * @return Bios parser result code
+ * @dcb: pointer to the DC BIOS
+ * @object_id: object id
+ * @info: encoder cap information structure
*
+ * return: Bios parser result code
*/
static enum bp_result bios_parser_get_encoder_cap_info(
struct dc_bios *dcb,
@@ -1490,17 +1490,12 @@ static enum bp_result bios_parser_get_encoder_cap_info(
}
/**
- * get_encoder_cap_record
- *
- * @brief
- * Get encoder cap record for the object
- *
- * @param object, ATOM object
+ * get_encoder_cap_record - Get encoder cap record for the object
*
- * @return atom encoder cap record
- *
- * @note
- * search all records to find the ATOM_ENCODER_CAP_RECORD_V2 record
+ * @bp: pointer to the BIOS parser
+ * @object: ATOM object
+ * return: atom encoder cap record
+ * note: search all records to find the ATOM_ENCODER_CAP_RECORD_V2 record
*/
static ATOM_ENCODER_CAP_RECORD_V2 *get_encoder_cap_record(
struct bios_parser *bp,
@@ -1557,8 +1552,9 @@ static uint32_t get_ss_entry_number_from_ss_info_tbl(
* Get Number of SpreadSpectrum Entry from the ASIC_InternalSS_Info table from
* the VBIOS that match the SSid (to be converted from signal)
*
- * @param[in] signal, ASSignalType to be converted to SSid
- * @return number of SS Entry that match the signal
+ * @dcb: pointer to the DC BIOS
+ * @signal: ASSignalType to be converted to SSid
+ * return: number of SS Entry that match the signal
*/
static uint32_t bios_parser_get_ss_entry_number(
struct dc_bios *dcb,
@@ -1608,10 +1604,10 @@ static uint32_t bios_parser_get_ss_entry_number(
* get_ss_entry_number_from_ss_info_tbl
* Get Number of spread spectrum entry from the SS_Info table from the VBIOS.
*
- * @note There can only be one entry for each id for SS_Info Table
- *
- * @param [in] id, spread spectrum id
- * @return number of SS Entry that match the id
+ * @bp: pointer to the BIOS parser
+ * @id: spread spectrum id
+ * return: number of SS Entry that match the id
+ * note: There can only be one entry for each id for SS_Info Table
*/
static uint32_t get_ss_entry_number_from_ss_info_tbl(
struct bios_parser *bp,
@@ -1679,8 +1675,9 @@ static uint32_t get_ss_entry_number_from_ss_info_tbl(
* There can not be more than 1 entry for ASIC_InternalSS_Info Ver 2.1 or
* SS_Info.
*
- * @param id, spread sprectrum info index
- * @return Bios parser result code
+ * @bp: pointer to the BIOS parser
+ * @id: spread sprectrum info index
+ * return: Bios parser result code
*/
static uint32_t get_ss_entry_number(struct bios_parser *bp, uint32_t id)
{
@@ -1696,8 +1693,9 @@ static uint32_t get_ss_entry_number(struct bios_parser *bp, uint32_t id)
* Ver 2.1 from the VBIOS
* There will not be multiple entry for Ver 2.1
*
- * @param id, spread sprectrum info index
- * @return number of SS Entry that match the id
+ * @bp: pointer to the BIOS parser
+ * @id: spread sprectrum info index
+ * return: number of SS Entry that match the id
*/
static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_v2_1(
struct bios_parser *bp,
@@ -1731,8 +1729,9 @@ static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_v2_1(
* Get Number of SpreadSpectrum Entry from the ASIC_InternalSS_Info table of
* the VBIOS that matches id
*
- * @param[in] id, spread sprectrum id
- * @return number of SS Entry that match the id
+ * @bp: pointer to the BIOS parser
+ * @id: spread sprectrum id
+ * return: number of SS Entry that match the id
*/
static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_V3_1(
struct bios_parser *bp,
@@ -1767,10 +1766,11 @@ static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_V3_1(
* bios_parser_get_gpio_pin_info
* Get GpioPin information of input gpio id
*
- * @param gpio_id, GPIO ID
- * @param info, GpioPin information structure
- * @return Bios parser result code
- * @note
+ * @dcb: pointer to the DC BIOS
+ * @gpio_id: GPIO ID
+ * @info: GpioPin information structure
+ * return: Bios parser result code
+ * note:
* to get the GPIO PIN INFO, we need:
* 1. get the GPIO_ID from other object table, see GetHPDInfo()
* 2. in DATA_TABLE.GPIO_Pin_LUT, search all records, to get the registerA
@@ -2197,13 +2197,10 @@ static uint32_t get_support_mask_for_device_id(struct device_id device_id)
}
/**
- * bios_parser_set_scratch_critical_state
- *
- * @brief
- * update critical state bit in VBIOS scratch register
- *
- * @param
- * bool - to set or reset state
+ * bios_parser_set_scratch_critical_state - update critical state
+ * bit in VBIOS scratch register
+ * @dcb: pointer to the DC BIOS
+ * @state: set or reset state
*/
static void bios_parser_set_scratch_critical_state(
struct dc_bios *dcb,
@@ -2222,7 +2219,7 @@ static void bios_parser_set_scratch_critical_state(
* bios_parser *bp - [in]BIOS parser handler to get master data table
* integrated_info *info - [out] store and output integrated info
*
- * @return
+ * return:
* enum bp_result - BP_RESULT_OK if information is available,
* BP_RESULT_BADBIOSTABLE otherwise.
*/
@@ -2372,7 +2369,7 @@ static enum bp_result get_integrated_info_v8(
* bios_parser *bp - [in]BIOS parser handler to get master data table
* integrated_info *info - [out] store and output integrated info
*
- * @return
+ * return:
* enum bp_result - BP_RESULT_OK if information is available,
* BP_RESULT_BADBIOSTABLE otherwise.
*/
@@ -2509,7 +2506,7 @@ static enum bp_result get_integrated_info_v9(
* bios_parser *bp - [in]BIOS parser handler to get master data table
* integrated_info *info - [out] store and output integrated info
*
- * @return
+ * return:
* enum bp_result - BP_RESULT_OK if information is available,
* BP_RESULT_BADBIOSTABLE otherwise.
*/
@@ -2585,7 +2582,7 @@ static struct integrated_info *bios_parser_create_integrated_info(
return NULL;
}
-enum bp_result update_slot_layout_info(
+static enum bp_result update_slot_layout_info(
struct dc_bios *dcb,
unsigned int i,
struct slot_layout_info *slot_layout_info,
@@ -2689,7 +2686,7 @@ enum bp_result update_slot_layout_info(
}
-enum bp_result get_bracket_layout_record(
+static enum bp_result get_bracket_layout_record(
struct dc_bios *dcb,
unsigned int bracket_layout_id,
struct slot_layout_info *slot_layout_info)
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index 670c26583817..9f9fda3118d1 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -485,10 +485,11 @@ static struct atom_hpd_int_record *get_hpd_record(
* bios_parser_get_gpio_pin_info
* Get GpioPin information of input gpio id
*
- * @param gpio_id, GPIO ID
- * @param info, GpioPin information structure
- * @return Bios parser result code
- * @note
+ * @dcb: pointer to the DC BIOS
+ * @gpio_id: GPIO ID
+ * @info: GpioPin information structure
+ * return: Bios parser result code
+ * note:
* to get the GPIO PIN INFO, we need:
* 1. get the GPIO_ID from other object table, see GetHPDInfo()
* 2. in DATA_TABLE.GPIO_Pin_LUT, search all records,
@@ -801,11 +802,11 @@ static enum bp_result get_ss_info_v4_2(
* ver 3.1,
* there is only one entry for each signal /ss id. However, there is
* no planning of supporting multiple spread Sprectum entry for EverGreen
- * @param [in] this
- * @param [in] signal, ASSignalType to be converted to info index
- * @param [in] index, number of entries that match the converted info index
- * @param [out] ss_info, sprectrum information structure,
- * @return Bios parser result code
+ * @dcb: pointer to the DC BIOS
+ * @signal: ASSignalType to be converted to info index
+ * @index: number of entries that match the converted info index
+ * @ss_info: sprectrum information structure,
+ * return: Bios parser result code
*/
static enum bp_result bios_parser_get_spread_spectrum_info(
struct dc_bios *dcb,
@@ -1196,13 +1197,11 @@ static bool bios_parser_is_accelerated_mode(
}
/**
- * bios_parser_set_scratch_critical_state
+ * bios_parser_set_scratch_critical_state - update critical state bit
+ * in VBIOS scratch register
*
- * @brief
- * update critical state bit in VBIOS scratch register
- *
- * @param
- * bool - to set or reset state
+ * @dcb: pointer to the DC BIO
+ * @state: set or reset state
*/
static void bios_parser_set_scratch_critical_state(
struct dc_bios *dcb,
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
index 070459e3e407..afc10b954ffa 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
@@ -245,6 +245,23 @@ static enum bp_result encoder_control_digx_v3(
cntl->enable_dp_audio);
params.ucLaneNum = (uint8_t)(cntl->lanes_number);
+ switch (cntl->color_depth) {
+ case COLOR_DEPTH_888:
+ params.ucBitPerColor = PANEL_8BIT_PER_COLOR;
+ break;
+ case COLOR_DEPTH_101010:
+ params.ucBitPerColor = PANEL_10BIT_PER_COLOR;
+ break;
+ case COLOR_DEPTH_121212:
+ params.ucBitPerColor = PANEL_12BIT_PER_COLOR;
+ break;
+ case COLOR_DEPTH_161616:
+ params.ucBitPerColor = PANEL_16BIT_PER_COLOR;
+ break;
+ default:
+ break;
+ }
+
if (EXEC_BIOS_CMD_TABLE(DIGxEncoderControl, params))
result = BP_RESULT_OK;
@@ -274,6 +291,23 @@ static enum bp_result encoder_control_digx_v4(
cntl->enable_dp_audio));
params.ucLaneNum = (uint8_t)(cntl->lanes_number);
+ switch (cntl->color_depth) {
+ case COLOR_DEPTH_888:
+ params.ucBitPerColor = PANEL_8BIT_PER_COLOR;
+ break;
+ case COLOR_DEPTH_101010:
+ params.ucBitPerColor = PANEL_10BIT_PER_COLOR;
+ break;
+ case COLOR_DEPTH_121212:
+ params.ucBitPerColor = PANEL_12BIT_PER_COLOR;
+ break;
+ case COLOR_DEPTH_161616:
+ params.ucBitPerColor = PANEL_16BIT_PER_COLOR;
+ break;
+ default:
+ break;
+ }
+
if (EXEC_BIOS_CMD_TABLE(DIGxEncoderControl, params))
result = BP_RESULT_OK;
@@ -1057,6 +1091,19 @@ static enum bp_result set_pixel_clock_v5(
* driver choose program it itself, i.e. here we program it
* to 888 by default.
*/
+ if (bp_params->signal_type == SIGNAL_TYPE_HDMI_TYPE_A)
+ switch (bp_params->color_depth) {
+ case TRANSMITTER_COLOR_DEPTH_30:
+ /* yes this is correct, the atom define is wrong */
+ clk.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_32BPP;
+ break;
+ case TRANSMITTER_COLOR_DEPTH_36:
+ /* yes this is correct, the atom define is wrong */
+ clk.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_30BPP;
+ break;
+ default:
+ break;
+ }
if (EXEC_BIOS_CMD_TABLE(SetPixelClock, clk))
result = BP_RESULT_OK;
@@ -1135,6 +1182,20 @@ static enum bp_result set_pixel_clock_v6(
* driver choose program it itself, i.e. here we pass required
* target rate that includes deep color.
*/
+ if (bp_params->signal_type == SIGNAL_TYPE_HDMI_TYPE_A)
+ switch (bp_params->color_depth) {
+ case TRANSMITTER_COLOR_DEPTH_30:
+ clk.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_30BPP_V6;
+ break;
+ case TRANSMITTER_COLOR_DEPTH_36:
+ clk.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_36BPP_V6;
+ break;
+ case TRANSMITTER_COLOR_DEPTH_48:
+ clk.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_48BPP;
+ break;
+ default:
+ break;
+ }
if (EXEC_BIOS_CMD_TABLE(SetPixelClock, clk))
result = BP_RESULT_OK;
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c
index 48b4ef03fc8f..5b77251e0590 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c
@@ -114,18 +114,14 @@ bool dal_cmd_table_helper_controller_id_to_atom(
}
/**
-* translate_transmitter_bp_to_atom
-*
-* @brief
-* Translate the Transmitter to the corresponding ATOM BIOS value
-*
-* @param
-* input transmitter
-* output digitalTransmitter
-* // =00: Digital Transmitter1 ( UNIPHY linkAB )
-* // =01: Digital Transmitter2 ( UNIPHY linkCD )
-* // =02: Digital Transmitter3 ( UNIPHY linkEF )
-*/
+ * translate_transmitter_bp_to_atom - Translate the Transmitter to the
+ * corresponding ATOM BIOS value
+ * @t: transmitter
+ * returns: output digitalTransmitter
+ * // =00: Digital Transmitter1 ( UNIPHY linkAB )
+ * // =01: Digital Transmitter2 ( UNIPHY linkCD )
+ * // =02: Digital Transmitter3 ( UNIPHY linkEF )
+ */
uint8_t dal_cmd_table_helper_transmitter_bp_to_atom(
enum transmitter t)
{
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
index 7736c92d55c4..455ee2be15a3 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
@@ -128,18 +128,14 @@ bool dal_cmd_table_helper_controller_id_to_atom2(
}
/**
-* translate_transmitter_bp_to_atom
-*
-* @brief
-* Translate the Transmitter to the corresponding ATOM BIOS value
-*
-* @param
-* input transmitter
-* output digitalTransmitter
-* // =00: Digital Transmitter1 ( UNIPHY linkAB )
-* // =01: Digital Transmitter2 ( UNIPHY linkCD )
-* // =02: Digital Transmitter3 ( UNIPHY linkEF )
-*/
+ * translate_transmitter_bp_to_atom2 - Translate the Transmitter to the
+ * corresponding ATOM BIOS value
+ * @t: transmitter
+ * returns: digitalTransmitter
+ * // =00: Digital Transmitter1 ( UNIPHY linkAB )
+ * // =01: Digital Transmitter2 ( UNIPHY linkCD )
+ * // =02: Digital Transmitter3 ( UNIPHY linkEF )
+ */
uint8_t dal_cmd_table_helper_transmitter_bp_to_atom2(
enum transmitter t)
{
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
index ef41b287cbe2..e633f8a51edb 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
@@ -106,7 +106,6 @@ static void calculate_bandwidth(
bool lpt_enabled;
enum bw_defines sclk_message;
enum bw_defines yclk_message;
- enum bw_defines v_filter_init_mode[maximum_number_of_surfaces];
enum bw_defines tiling_mode[maximum_number_of_surfaces];
enum bw_defines surface_type[maximum_number_of_surfaces];
enum bw_defines voltage;
@@ -792,12 +791,8 @@ static void calculate_bandwidth(
data->v_filter_init[i] = bw_add(data->v_filter_init[i], bw_int_to_fixed(1));
}
if (data->stereo_mode[i] == bw_def_top_bottom) {
- v_filter_init_mode[i] = bw_def_manual;
data->v_filter_init[i] = bw_min2(data->v_filter_init[i], bw_int_to_fixed(4));
}
- else {
- v_filter_init_mode[i] = bw_def_auto;
- }
if (data->stereo_mode[i] == bw_def_top_bottom) {
data->num_lines_at_frame_start = bw_int_to_fixed(1);
}
@@ -2730,7 +2725,7 @@ void bw_calcs_init(struct bw_calcs_dceip *bw_dceip,
}
-/**
+/*
* Compare calculated (required) clocks against the clocks available at
* maximum voltage (max Performance Level).
*/
@@ -3001,13 +2996,12 @@ static bool all_displays_in_sync(const struct pipe_ctx pipe[],
return true;
}
-/**
+/*
* Return:
* true - Display(s) configuration supported.
* In this case 'calcs_output' contains data for HW programming
* false - Display(s) configuration not supported (not enough bandwidth).
*/
-
bool bw_calcs(struct dc_context *ctx,
const struct bw_calcs_dceip *dceip,
const struct bw_calcs_vbios *vbios,
@@ -3028,7 +3022,7 @@ bool bw_calcs(struct dc_context *ctx,
calcs_output->all_displays_in_sync = false;
if (data->number_of_displays != 0) {
- uint8_t yclk_lvl, sclk_lvl;
+ uint8_t yclk_lvl;
struct bw_fixed high_sclk = vbios->high_sclk;
struct bw_fixed mid1_sclk = vbios->mid1_sclk;
struct bw_fixed mid2_sclk = vbios->mid2_sclk;
@@ -3049,7 +3043,6 @@ bool bw_calcs(struct dc_context *ctx,
calculate_bandwidth(dceip, vbios, data);
yclk_lvl = data->y_clk_level;
- sclk_lvl = data->sclk_level;
calcs_output->nbp_state_change_enable =
data->nbp_state_change_enable;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c
index 75b8240ed059..e133edc587d3 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c
@@ -187,17 +187,6 @@ static void ramp_up_dispclk_with_dpp(
clk_mgr->base.clks.max_supported_dppclk_khz = new_clocks->max_supported_dppclk_khz;
}
-static bool is_mpo_enabled(struct dc_state *context)
-{
- int i;
-
- for (i = 0; i < context->stream_count; i++) {
- if (context->stream_status[i].plane_count > 1)
- return true;
- }
- return false;
-}
-
static void rv1_update_clocks(struct clk_mgr *clk_mgr_base,
struct dc_state *context,
bool safe_to_lower)
@@ -295,22 +284,9 @@ static void rv1_update_clocks(struct clk_mgr *clk_mgr_base,
if (pp_smu->set_hard_min_fclk_by_freq &&
pp_smu->set_hard_min_dcfclk_by_freq &&
pp_smu->set_min_deep_sleep_dcfclk) {
- // Only increase clocks when display is active and MPO is enabled
- if (display_count && is_mpo_enabled(context)) {
- pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu,
- ((new_clocks->fclk_khz / 1000) * 101) / 100);
- pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu,
- ((new_clocks->dcfclk_khz / 1000) * 101) / 100);
- pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu,
- (new_clocks->dcfclk_deep_sleep_khz + 999) / 1000);
- } else {
- pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu,
- new_clocks->fclk_khz / 1000);
- pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu,
- new_clocks->dcfclk_khz / 1000);
- pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu,
- (new_clocks->dcfclk_deep_sleep_khz + 999) / 1000);
- }
+ pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, new_clocks->fclk_khz / 1000);
+ pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, new_clocks->dcfclk_khz / 1000);
+ pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, (new_clocks->dcfclk_deep_sleep_khz + 999) / 1000);
}
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
index f2114bc910bf..ec9dc265cde0 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
@@ -257,8 +257,7 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
if (update_dppclk || update_dispclk)
dcn20_update_clocks_update_dentist(clk_mgr);
// always update dtos unless clock is lowered and not safe to lower
- if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz)
- dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
+ dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
index ab98c259ef69..c7e5a64e06af 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
@@ -146,15 +146,15 @@ static noinline void dcn3_build_wm_range_table(struct clk_mgr_internal *clk_mgr)
clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.max_uclk = 0xFFFF;
/* Set D - MALL - SR enter and exit times adjusted for MALL */
-// clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].valid = true;
-// clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us = pstate_latency_us;
-// clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].dml_input.sr_exit_time_us = 2;
-// clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us = 4;
-// clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.wm_type = WATERMARKS_MALL;
-// clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.min_dcfclk = 0;
-// clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.max_dcfclk = 0xFFFF;
-// clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.min_uclk = min_uclk_mhz;
-// clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.max_uclk = 0xFFFF;
+ clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].valid = true;
+ clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us = pstate_latency_us;
+ clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].dml_input.sr_exit_time_us = 2;
+ clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us = 4;
+ clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.wm_type = WATERMARKS_MALL;
+ clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.min_dcfclk = 0;
+ clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.max_dcfclk = 0xFFFF;
+ clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.min_uclk = min_uclk_mhz;
+ clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.max_uclk = 0xFFFF;
}
void dcn3_init_clocks(struct clk_mgr *clk_mgr_base)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c
index cfa8e02cf103..68942bbc7472 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c
@@ -103,7 +103,7 @@ int dcn301_smu_send_msg_with_param(
/* Trigger the message transaction by writing the message ID */
REG_WRITE(MP1_SMN_C2PMSG_67, msg_id);
- result = dcn301_smu_wait_for_response(clk_mgr, 10, 1000);
+ result = dcn301_smu_wait_for_response(clk_mgr, 10, 200000);
ASSERT(result == VBIOSSMC_Result_OK);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
index 991b9c5beaa3..aadb801447a7 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
@@ -75,7 +75,8 @@ int vg_get_active_display_cnt_wa(
const struct dc_link *link = dc->links[i];
/* abusing the fact that the dig and phy are coupled to see if the phy is enabled */
- if (link->link_enc->funcs->is_dig_enabled(link->link_enc))
+ if (link->link_enc->funcs->is_dig_enabled &&
+ link->link_enc->funcs->is_dig_enabled(link->link_enc))
display_count++;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 58eb0d69873a..8f8a13c7cf73 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -175,6 +175,8 @@ static bool create_links(
connectors_num = bios->funcs->get_connectors_number(bios);
+ DC_LOG_DC("BIOS object table - number of connectors: %d", connectors_num);
+
if (connectors_num > ENUM_ID_COUNT) {
dm_error(
"DC: Number of connectors %d exceeds maximum of %d!\n",
@@ -193,6 +195,8 @@ static bool create_links(
struct link_init_data link_init_params = {0};
struct dc_link *link;
+ DC_LOG_DC("BIOS object table - printing link object info for connector number: %d, link_index: %d", i, dc->link_count);
+
link_init_params.ctx = dc->ctx;
/* next BIOS object table connector */
link_init_params.connector_index = i;
@@ -201,30 +205,14 @@ static bool create_links(
link = link_create(&link_init_params);
if (link) {
- bool should_destory_link = false;
-
- if (link->connector_signal == SIGNAL_TYPE_EDP) {
- if (dc->config.edp_not_connected) {
- if (!IS_DIAG_DC(dc->ctx->dce_environment))
- should_destory_link = true;
- } else {
- enum dc_connection_type type;
- dc_link_detect_sink(link, &type);
- if (type == dc_connection_none)
- should_destory_link = true;
- }
- }
-
- if (dc->config.force_enum_edp || !should_destory_link) {
dc->links[dc->link_count] = link;
link->dc = dc;
++dc->link_count;
- } else {
- link_destroy(&link);
- }
}
}
+ DC_LOG_DC("BIOS object table - end");
+
for (i = 0; i < num_virtual_links; i++) {
struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL);
struct encoder_init_data enc_init = {0};
@@ -284,20 +272,16 @@ static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace)
}
/**
- *****************************************************************************
- * Function: dc_stream_adjust_vmin_vmax
+ * dc_stream_adjust_vmin_vmax:
*
- * @brief
- * Looks up the pipe context of dc_stream_state and updates the
- * vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh
- * Rate, which is a power-saving feature that targets reducing panel
- * refresh rate while the screen is static
+ * Looks up the pipe context of dc_stream_state and updates the
+ * vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh
+ * Rate, which is a power-saving feature that targets reducing panel
+ * refresh rate while the screen is static
*
- * @param [in] dc: dc reference
- * @param [in] stream: Initial dc stream state
- * @param [in] adjust: Updated parameters for vertical_total_min and
- * vertical_total_max
- *****************************************************************************
+ * @dc: dc reference
+ * @stream: Initial dc stream state
+ * @adjust: Updated parameters for vertical_total_min and vertical_total_max
*/
bool dc_stream_adjust_vmin_vmax(struct dc *dc,
struct dc_stream_state *stream,
@@ -355,6 +339,7 @@ bool dc_stream_get_crtc_position(struct dc *dc,
* @dc: DC Object
* @stream: The stream to configure CRC on.
* @enable: Enable CRC if true, disable otherwise.
+ * @crc_window: CRC window (x/y start/end) information
* @continuous: Capture CRC on every frame if true. Otherwise, only capture
* once.
*
@@ -420,7 +405,9 @@ bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
* dc_stream_get_crc() - Get CRC values for the given stream.
* @dc: DC object
* @stream: The DC stream state of the stream to get CRCs from.
- * @r_cr, g_y, b_cb: CRC values for the three channels are stored here.
+ * @r_cr: CRC value for the first of the 3 channels stored here.
+ * @g_y: CRC value for the second of the 3 channels stored here.
+ * @b_cb: CRC value for the third of the 3 channels stored here.
*
* dc_stream_configure_crc needs to be called beforehand to enable CRCs.
* Return false if stream is not found, or if CRCs are not enabled.
@@ -707,7 +694,6 @@ static bool dc_construct(struct dc *dc,
}
dc->dcn_ip = dcn_ip;
- dc->soc_bounding_box = init_params->soc_bounding_box;
#endif
if (!dc_construct_ctx(dc, init_params)) {
@@ -757,6 +743,10 @@ static bool dc_construct(struct dc *dc,
if (!dc->res_pool)
goto fail;
+ /* set i2c speed if not done by the respective dcnxxx__resource.c */
+ if (dc->caps.i2c_speed_in_khz_hdcp == 0)
+ dc->caps.i2c_speed_in_khz_hdcp = dc->caps.i2c_speed_in_khz;
+
dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg);
if (!dc->clk_mgr)
goto fail;
@@ -764,8 +754,6 @@ static bool dc_construct(struct dc *dc,
dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present;
#endif
- dc->debug.force_ignore_link_settings = init_params->force_ignore_link_settings;
-
if (dc->res_pool->funcs->update_bw_bounding_box)
dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
@@ -803,7 +791,8 @@ static void disable_all_writeback_pipes_for_stream(
stream->writeback_info[i].wb_enabled = false;
}
-void apply_ctx_interdependent_lock(struct dc *dc, struct dc_state *context, struct dc_stream_state *stream, bool lock)
+static void apply_ctx_interdependent_lock(struct dc *dc, struct dc_state *context,
+ struct dc_stream_state *stream, bool lock)
{
int i = 0;
@@ -964,19 +953,15 @@ struct dc *dc_create(const struct dc_init_data *init_params)
struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
unsigned int full_pipe_count;
- if (NULL == dc)
- goto alloc_fail;
+ if (!dc)
+ return NULL;
if (init_params->dce_environment == DCE_ENV_VIRTUAL_HW) {
- if (false == dc_construct_ctx(dc, init_params)) {
- dc_destruct(dc);
- goto construct_fail;
- }
+ if (!dc_construct_ctx(dc, init_params))
+ goto destruct_dc;
} else {
- if (false == dc_construct(dc, init_params)) {
- dc_destruct(dc);
- goto construct_fail;
- }
+ if (!dc_construct(dc, init_params))
+ goto destruct_dc;
full_pipe_count = dc->res_pool->pipe_count;
if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
@@ -1007,15 +992,36 @@ struct dc *dc_create(const struct dc_init_data *init_params)
return dc;
-construct_fail:
+destruct_dc:
+ dc_destruct(dc);
kfree(dc);
-
-alloc_fail:
return NULL;
}
+static void detect_edp_presence(struct dc *dc)
+{
+ struct dc_link *edp_link = get_edp_link(dc);
+ bool edp_sink_present = true;
+
+ if (!edp_link)
+ return;
+
+ if (dc->config.edp_not_connected) {
+ edp_sink_present = false;
+ } else {
+ enum dc_connection_type type;
+ dc_link_detect_sink(edp_link, &type);
+ if (type == dc_connection_none)
+ edp_sink_present = false;
+ }
+
+ edp_link->edp_sink_present = edp_sink_present;
+}
+
void dc_hardware_init(struct dc *dc)
{
+
+ detect_edp_presence(dc);
if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW)
dc->hwss.init_hw(dc);
}
@@ -1493,7 +1499,7 @@ bool dc_commit_state(struct dc *dc, struct dc_state *context)
enum dc_status result = DC_ERROR_UNEXPECTED;
int i;
- if (false == context_changed(dc, context))
+ if (!context_changed(dc, context))
return DC_OK;
DC_LOG_DC("%s: %d streams\n",
@@ -1540,7 +1546,7 @@ bool dc_acquire_release_mpc_3dlut(
if (found_pipe_idx) {
if (acquire && pool->funcs->acquire_post_bldn_3dlut)
ret = pool->funcs->acquire_post_bldn_3dlut(res_ctx, pool, mpcc_id, lut, shaper);
- else if (acquire == false && pool->funcs->release_post_bldn_3dlut)
+ else if (!acquire && pool->funcs->release_post_bldn_3dlut)
ret = pool->funcs->release_post_bldn_3dlut(res_ctx, pool, lut, shaper);
}
}
@@ -2016,7 +2022,7 @@ static enum surface_update_type check_update_surfaces_for_stream(
return overall_type;
}
-/**
+/*
* dc_check_update_surfaces_for_stream() - Determine update type (fast, med, or full)
*
* See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types
@@ -2270,6 +2276,9 @@ static void copy_stream_update_to_stream(struct dc *dc,
if (update->dither_option)
stream->dither_option = *update->dither_option;
+
+ if (update->pending_test_pattern)
+ stream->test_pattern = *update->pending_test_pattern;
/* update current stream with writeback info */
if (update->wb_update) {
int i;
@@ -2366,6 +2375,15 @@ static void commit_planes_do_stream_update(struct dc *dc,
}
}
+ if (stream_update->pending_test_pattern) {
+ dc_link_dp_set_test_pattern(stream->link,
+ stream->test_pattern.type,
+ stream->test_pattern.color_space,
+ stream->test_pattern.p_link_settings,
+ stream->test_pattern.p_custom_pattern,
+ stream->test_pattern.cust_pattern_size);
+ }
+
/* Full fe update*/
if (update_type == UPDATE_TYPE_FAST)
continue;
@@ -2819,7 +2837,7 @@ enum dc_irq_source dc_interrupt_to_irq_source(
return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
}
-/**
+/*
* dc_interrupt_set() - Enable/disable an AMD hw interrupt source
*/
bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
@@ -2953,7 +2971,7 @@ static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink
return true;
}
-/**
+/*
* dc_link_add_remote_sink() - Create a sink and attach it to an existing link
*
* EDID length is in bytes
@@ -3016,7 +3034,7 @@ fail_add_sink:
return NULL;
}
-/**
+/*
* dc_link_remove_remote_sink() - Remove a remote sink from a dc_link
*
* Note that this just removes the struct dc_sink - it doesn't
@@ -3143,9 +3161,11 @@ void dc_lock_memory_clock_frequency(struct dc *dc)
core_link_enable_stream(dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]);
}
-bool dc_is_plane_eligible_for_idle_optimizaitons(struct dc *dc,
- struct dc_plane_state *plane)
+bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_state *plane,
+ struct dc_cursor_attributes *cursor_attr)
{
+ if (dc->hwss.does_plane_fit_in_mall && dc->hwss.does_plane_fit_in_mall(dc, plane, cursor_attr))
+ return true;
return false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index f4a2088ab179..fa5059f71727 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -203,9 +203,21 @@ static bool program_hpd_filter(const struct dc_link *link)
return result;
}
+bool dc_link_wait_for_t12(struct dc_link *link)
+{
+ if (link->connector_signal == SIGNAL_TYPE_EDP && link->dc->hwss.edp_wait_for_T12) {
+ link->dc->hwss.edp_wait_for_T12(link);
+
+ return true;
+ }
+
+ return false;
+}
+
/**
* dc_link_detect_sink() - Determine if there is a sink connected
*
+ * @link: pointer to the dc link
* @type: Returned connection type
* Does not detect downstream devices, such as MST sinks
* or display connected through active dongles
@@ -342,7 +354,7 @@ static enum signal_type get_basic_signal_type(struct graphics_object_id encoder,
return SIGNAL_TYPE_NONE;
}
-/**
+/*
* dc_link_is_dp_sink_present() - Check if there is a native DP
* or passive DP-HDMI dongle connected
*/
@@ -596,8 +608,6 @@ static void query_hdcp_capability(enum signal_type signal, struct dc_link *link)
dc_process_hdcp_msg(signal, link, &msg22);
if (signal == SIGNAL_TYPE_DISPLAY_PORT || signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
- enum hdcp_message_status status = HDCP_MESSAGE_UNSUPPORTED;
-
msg14.data = &link->hdcp_caps.bcaps.raw;
msg14.length = sizeof(link->hdcp_caps.bcaps.raw);
msg14.msg_id = HDCP_MESSAGE_ID_READ_BCAPS;
@@ -605,7 +615,7 @@ static void query_hdcp_capability(enum signal_type signal, struct dc_link *link)
msg14.link = HDCP_LINK_PRIMARY;
msg14.max_retries = 5;
- status = dc_process_hdcp_msg(signal, link, &msg14);
+ dc_process_hdcp_msg(signal, link, &msg14);
}
}
@@ -830,7 +840,7 @@ static bool wait_for_entering_dp_alt_mode(struct dc_link *link)
return false;
}
-/**
+/*
* dc_link_detect() - Detect if a sink is attached to a given link
*
* link->local_sink is created or destroyed as needed.
@@ -1065,9 +1075,6 @@ static bool dc_link_detect_helper(struct dc_link *link,
break;
}
- if (link->local_sink->edid_caps.panel_patch.disable_fec)
- link->ctx->dc->debug.disable_fec = true;
-
// Check if edid is the same
if ((prev_sink) &&
(edid_status == EDID_THE_SAME || edid_status == EDID_OK))
@@ -1366,13 +1373,17 @@ static bool dc_link_construct(struct dc_link *link,
struct dc_context *dc_ctx = init_params->ctx;
struct encoder_init_data enc_init_data = { 0 };
struct panel_cntl_init_data panel_cntl_init_data = { 0 };
- struct integrated_info info = {{{ 0 }}};
+ struct integrated_info *info;
struct dc_bios *bios = init_params->dc->ctx->dc_bios;
const struct dc_vbios_funcs *bp_funcs = bios->funcs;
struct bp_disp_connector_caps_info disp_connect_caps_info = { 0 };
DC_LOGGER_INIT(dc_ctx->logger);
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ goto create_fail;
+
link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
link->irq_source_hpd_rx = DC_IRQ_SOURCE_INVALID;
@@ -1390,10 +1401,12 @@ static bool dc_link_construct(struct dc_link *link,
link->link_id =
bios->funcs->get_connector_id(bios, init_params->connector_index);
+ DC_LOG_DC("BIOS object table - link_id: %d", link->link_id.id);
if (bios->funcs->get_disp_connector_caps_info) {
bios->funcs->get_disp_connector_caps_info(bios, link->link_id, &disp_connect_caps_info);
link->is_internal_display = disp_connect_caps_info.INTERNAL_DISPLAY;
+ DC_LOG_DC("BIOS object table - is_internal_display: %d", link->is_internal_display);
}
if (link->link_id.type != OBJECT_TYPE_CONNECTOR) {
@@ -1408,10 +1421,14 @@ static bool dc_link_construct(struct dc_link *link,
link->hpd_gpio = get_hpd_gpio(link->ctx->dc_bios, link->link_id,
link->ctx->gpio_service);
+
if (link->hpd_gpio) {
dal_gpio_open(link->hpd_gpio, GPIO_MODE_INTERRUPT);
dal_gpio_unlock_pin(link->hpd_gpio);
link->irq_source_hpd = dal_irq_get_source(link->hpd_gpio);
+
+ DC_LOG_DC("BIOS object table - hpd_gpio id: %d", link->hpd_gpio->id);
+ DC_LOG_DC("BIOS object table - hpd_gpio en: %d", link->hpd_gpio->en);
}
switch (link->link_id.id) {
@@ -1470,6 +1487,11 @@ static bool dc_link_construct(struct dc_link *link,
goto ddc_create_fail;
}
+ if (!link->ddc->ddc_pin) {
+ DC_ERROR("Failed to get I2C info for connector!\n");
+ goto ddc_create_fail;
+ }
+
link->ddc_hw_inst =
dal_ddc_get_line(dal_ddc_service_get_ddc_pin(link->ddc));
@@ -1508,6 +1530,8 @@ static bool dc_link_construct(struct dc_link *link,
goto link_enc_create_fail;
}
+ DC_LOG_DC("BIOS object table - DP_IS_USB_C: %d", link->link_enc->features.flags.bits.DP_IS_USB_C);
+
link->link_enc_hw_inst = link->link_enc->transmitter;
for (i = 0; i < 4; i++) {
@@ -1530,16 +1554,20 @@ static bool dc_link_construct(struct dc_link *link,
if (link->device_tag.dev_id.device_type == DEVICE_TYPE_LCD &&
link->connector_signal == SIGNAL_TYPE_RGB)
continue;
+
+ DC_LOG_DC("BIOS object table - device_tag.acpi_device: %d", link->device_tag.acpi_device);
+ DC_LOG_DC("BIOS object table - device_tag.dev_id.device_type: %d", link->device_tag.dev_id.device_type);
+ DC_LOG_DC("BIOS object table - device_tag.dev_id.enum_id: %d", link->device_tag.dev_id.enum_id);
break;
}
if (bios->integrated_info)
- info = *bios->integrated_info;
+ memcpy(info, bios->integrated_info, sizeof(*info));
/* Look for channel mapping corresponding to connector and device tag */
for (i = 0; i < MAX_NUMBER_OF_EXT_DISPLAY_PATH; i++) {
struct external_display_path *path =
- &info.ext_disp_conn_info.path[i];
+ &info->ext_disp_conn_info.path[i];
if (path->device_connector_id.enum_id == link->link_id.enum_id &&
path->device_connector_id.id == link->link_id.id &&
@@ -1548,10 +1576,14 @@ static bool dc_link_construct(struct dc_link *link,
path->device_acpi_enum == link->device_tag.acpi_device) {
link->ddi_channel_mapping = path->channel_mapping;
link->chip_caps = path->caps;
+ DC_LOG_DC("BIOS object table - ddi_channel_mapping: 0x%04X", link->ddi_channel_mapping.raw);
+ DC_LOG_DC("BIOS object table - chip_caps: %d", link->chip_caps);
} else if (path->device_tag ==
link->device_tag.dev_id.raw_device_tag) {
link->ddi_channel_mapping = path->channel_mapping;
link->chip_caps = path->caps;
+ DC_LOG_DC("BIOS object table - ddi_channel_mapping: 0x%04X", link->ddi_channel_mapping.raw);
+ DC_LOG_DC("BIOS object table - chip_caps: %d", link->chip_caps);
}
break;
}
@@ -1570,6 +1602,7 @@ static bool dc_link_construct(struct dc_link *link,
link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
+ DC_LOG_DC("BIOS object table - %s finished successfully.\n", __func__);
return true;
device_tag_fail:
link->link_enc->funcs->destroy(&link->link_enc);
@@ -1586,6 +1619,9 @@ create_fail:
link->hpd_gpio = NULL;
}
+ DC_LOG_DC("BIOS object table - %s failed.\n", __func__);
+ kfree(info);
+
return false;
}
@@ -3133,17 +3169,17 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
{
struct cp_psp *cp_psp = &pipe_ctx->stream->ctx->cp_psp;
if (cp_psp && cp_psp->funcs.update_stream_config) {
- struct cp_psp_stream_config config;
-
- memset(&config, 0, sizeof(config));
+ struct cp_psp_stream_config config = {0};
+ enum dp_panel_mode panel_mode =
+ dp_get_panel_mode(pipe_ctx->stream->link);
config.otg_inst = (uint8_t) pipe_ctx->stream_res.tg->inst;
- /*stream_enc_inst*/
- config.stream_enc_inst = (uint8_t) pipe_ctx->stream_res.stream_enc->stream_enc_inst;
- config.link_enc_inst = pipe_ctx->stream->link->link_enc_hw_inst;
+ config.dig_fe = (uint8_t) pipe_ctx->stream_res.stream_enc->stream_enc_inst;
+ config.dig_be = pipe_ctx->stream->link->link_enc_hw_inst;
config.dpms_off = dpms_off;
config.dm_stream_ctx = pipe_ctx->stream->dm_stream_context;
- config.mst_supported = (pipe_ctx->stream->signal ==
+ config.assr_enabled = (panel_mode == DP_PANEL_MODE_EDP);
+ config.mst_enabled = (pipe_ctx->stream->signal ==
SIGNAL_TYPE_DISPLAY_PORT_MST);
cp_psp->funcs.update_stream_config(cp_psp->handle, &config);
}
@@ -3396,10 +3432,7 @@ void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
}
/**
- *****************************************************************************
- * Function: dc_link_enable_hpd_filter
- *
- * @brief
+ * dc_link_enable_hpd_filter:
* If enable is true, programs HPD filter on associated HPD line using
* delay_on_disconnect/delay_on_connect values dependent on
* link->connector_signal
@@ -3407,9 +3440,8 @@ void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
* If enable is false, programs HPD filter on associated HPD line with no
* delays on connect or disconnect
*
- * @param [in] link: pointer to the dc link
- * @param [in] enable: boolean specifying whether to enable hbd
- *****************************************************************************
+ * @link: pointer to the dc link
+ * @enable: boolean specifying whether to enable hbd
*/
void dc_link_enable_hpd_filter(struct dc_link *link, bool enable)
{
@@ -3635,7 +3667,7 @@ uint32_t dc_link_bandwidth_kbps(
link_bw_kbps *= 8; /* 8 bits per byte*/
link_bw_kbps *= link_setting->lane_count;
- if (dc_link_is_fec_supported(link) && !link->dc->debug.disable_fec) {
+ if (dc_link_should_enable_fec(link)) {
/* Account for FEC overhead.
* We have to do it based on caps,
* and not based on FEC being set ready,
@@ -3656,8 +3688,8 @@ uint32_t dc_link_bandwidth_kbps(
* but the difference is minimal and is in a safe direction,
* which all works well around potential ambiguity of DP 1.4a spec.
*/
- link_bw_kbps = mul_u64_u32_shr(BIT_ULL(32) * 970LL / 1000,
- link_bw_kbps, 32);
+ long long fec_link_bw_kbps = link_bw_kbps * 970LL;
+ link_bw_kbps = (uint32_t)(div64_s64(fec_link_bw_kbps, 1000LL));
}
return link_bw_kbps;
@@ -3687,3 +3719,19 @@ bool dc_link_is_fec_supported(const struct dc_link *link)
!IS_FPGA_MAXIMUS_DC(link->ctx->dce_environment));
}
+bool dc_link_should_enable_fec(const struct dc_link *link)
+{
+ bool is_fec_disable = false;
+ bool ret = false;
+
+ if ((link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT_MST &&
+ link->local_sink &&
+ link->local_sink->edid_caps.panel_patch.disable_fec) ||
+ link->connector_signal == SIGNAL_TYPE_EDP) // Disable FEC for eDP
+ is_fec_disable = true;
+
+ if (dc_link_is_fec_supported(link) && !link->dc->debug.disable_fec && !is_fec_disable)
+ ret = true;
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
index c5936e064360..ae6484ab567b 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
@@ -37,12 +37,16 @@
#include "dc_link_ddc.h"
#include "dce/dce_aux.h"
-/*DP to Dual link DVI converter*/
+#define DC_LOGGER_INIT(logger)
+
+static const uint8_t DP_VGA_DONGLE_BRANCH_DEV_NAME[] = "DpVga";
+/* DP to Dual link DVI converter */
static const uint8_t DP_DVI_CONVERTER_ID_4[] = "m2DVIa";
static const uint8_t DP_DVI_CONVERTER_ID_5[] = "3393N2";
#define AUX_POWER_UP_WA_DELAY 500
#define I2C_OVER_AUX_DEFER_WA_DELAY 70
+#define DPVGA_DONGLE_AUX_DEFER_WA_DELAY 40
#define I2C_OVER_AUX_DEFER_WA_DELAY_1MS 1
/* CV smart dongle slave address for retrieving supported HDTV modes*/
@@ -194,6 +198,10 @@ static void ddc_service_construct(
if (BP_RESULT_OK != dcb->funcs->get_i2c_info(dcb, init_data->id, &i2c_info)) {
ddc_service->ddc_pin = NULL;
} else {
+ DC_LOGGER_INIT(ddc_service->ctx->logger);
+ DC_LOG_DC("BIOS object table - i2c_line: %d", i2c_info.i2c_line);
+ DC_LOG_DC("BIOS object table - i2c_engine_id: %d", i2c_info.i2c_engine_id);
+
hw_info.ddc_channel = i2c_info.i2c_line;
if (ddc_service->link != NULL)
hw_info.hw_supported = i2c_info.i2c_hw_assist;
@@ -286,6 +294,15 @@ static uint32_t defer_delay_converter_wa(
{
struct dc_link *link = ddc->link;
+ if (link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_VGA_CONVERTER &&
+ link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_0080E1 &&
+ !memcmp(link->dpcd_caps.branch_dev_name,
+ DP_VGA_DONGLE_BRANCH_DEV_NAME,
+ sizeof(link->dpcd_caps.branch_dev_name)))
+
+ return defer_delay > DPVGA_DONGLE_AUX_DEFER_WA_DELAY ?
+ defer_delay : DPVGA_DONGLE_AUX_DEFER_WA_DELAY;
+
if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_0080E1 &&
!memcmp(link->dpcd_caps.branch_dev_name,
DP_DVI_CONVERTER_ID_4,
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index f95bade59624..c1391bfb7a9b 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -126,9 +126,7 @@ static void dpcd_set_training_pattern(
static enum dc_dp_training_pattern decide_cr_training_pattern(
const struct dc_link_settings *link_settings)
{
- enum dc_dp_training_pattern pattern = DP_TRAINING_PATTERN_SEQUENCE_1;
-
- return pattern;
+ return DP_TRAINING_PATTERN_SEQUENCE_1;
}
static enum dc_dp_training_pattern decide_eq_training_pattern(struct dc_link *link,
@@ -892,13 +890,13 @@ static uint32_t translate_training_aux_read_interval(uint32_t dpcd_aux_read_inte
switch (dpcd_aux_read_interval) {
case 0x01:
- aux_rd_interval_us = 400;
+ aux_rd_interval_us = 4000;
break;
case 0x02:
- aux_rd_interval_us = 4000;
+ aux_rd_interval_us = 8000;
break;
case 0x03:
- aux_rd_interval_us = 8000;
+ aux_rd_interval_us = 12000;
break;
case 0x04:
aux_rd_interval_us = 16000;
@@ -3710,7 +3708,7 @@ bool detect_dp_sink_caps(struct dc_link *link)
/* TODO save sink caps in link->sink */
}
-enum dc_link_rate linkRateInKHzToLinkRateMultiplier(uint32_t link_rate_in_khz)
+static enum dc_link_rate linkRateInKHzToLinkRateMultiplier(uint32_t link_rate_in_khz)
{
enum dc_link_rate link_rate;
// LinkRate is normally stored as a multiplier of 0.27 Gbps per lane. Do the translation.
@@ -4348,7 +4346,7 @@ void dp_set_fec_ready(struct dc_link *link, bool ready)
struct link_encoder *link_enc = link->link_enc;
uint8_t fec_config = 0;
- if (!dc_link_is_fec_supported(link) || link->dc->debug.disable_fec)
+ if (!dc_link_should_enable_fec(link))
return;
if (link_enc->funcs->fec_set_ready &&
@@ -4383,7 +4381,7 @@ void dp_set_fec_enable(struct dc_link *link, bool enable)
{
struct link_encoder *link_enc = link->link_enc;
- if (!dc_link_is_fec_supported(link) || link->dc->debug.disable_fec)
+ if (!dc_link_should_enable_fec(link))
return;
if (link_enc->funcs->fec_set_enable &&
@@ -4409,25 +4407,40 @@ void dp_set_fec_enable(struct dc_link *link, bool enable)
void dpcd_set_source_specific_data(struct dc_link *link)
{
if (!link->dc->vendor_signature.is_valid) {
- enum dc_status result_write_min_hblank = DC_NOT_SUPPORTED;
- struct dpcd_amd_signature amd_signature;
- amd_signature.AMD_IEEE_TxSignature_byte1 = 0x0;
- amd_signature.AMD_IEEE_TxSignature_byte2 = 0x0;
- amd_signature.AMD_IEEE_TxSignature_byte3 = 0x1A;
- amd_signature.device_id_byte1 =
+ enum dc_status __maybe_unused result_write_min_hblank = DC_NOT_SUPPORTED;
+ struct dpcd_amd_signature amd_signature = {0};
+ struct dpcd_amd_device_id amd_device_id = {0};
+
+ amd_device_id.device_id_byte1 =
(uint8_t)(link->ctx->asic_id.chip_id);
- amd_signature.device_id_byte2 =
+ amd_device_id.device_id_byte2 =
(uint8_t)(link->ctx->asic_id.chip_id >> 8);
- memset(&amd_signature.zero, 0, 4);
- amd_signature.dce_version =
+ amd_device_id.dce_version =
(uint8_t)(link->ctx->dce_version);
- amd_signature.dal_version_byte1 = 0x0; // needed? where to get?
- amd_signature.dal_version_byte2 = 0x0; // needed? where to get?
+ amd_device_id.dal_version_byte1 = 0x0; // needed? where to get?
+ amd_device_id.dal_version_byte2 = 0x0; // needed? where to get?
- core_link_write_dpcd(link, DP_SOURCE_OUI,
+ core_link_read_dpcd(link, DP_SOURCE_OUI,
(uint8_t *)(&amd_signature),
sizeof(amd_signature));
+ if (!((amd_signature.AMD_IEEE_TxSignature_byte1 == 0x0) &&
+ (amd_signature.AMD_IEEE_TxSignature_byte2 == 0x0) &&
+ (amd_signature.AMD_IEEE_TxSignature_byte3 == 0x1A))) {
+
+ amd_signature.AMD_IEEE_TxSignature_byte1 = 0x0;
+ amd_signature.AMD_IEEE_TxSignature_byte2 = 0x0;
+ amd_signature.AMD_IEEE_TxSignature_byte3 = 0x1A;
+
+ core_link_write_dpcd(link, DP_SOURCE_OUI,
+ (uint8_t *)(&amd_signature),
+ sizeof(amd_signature));
+ }
+
+ core_link_write_dpcd(link, DP_SOURCE_OUI+0x03,
+ (uint8_t *)(&amd_device_id),
+ sizeof(amd_device_id));
+
if (link->ctx->dce_version >= DCN_VERSION_2_0 &&
link->dc->caps.min_horizontal_blanking_period != 0) {
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 07c22556480b..0c26c2ade782 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1117,7 +1117,7 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx)
* We also need to make sure pipe_ctx->plane_res.scl_data.h_active uses the
* original h_border_left value in its calculation.
*/
-int shift_border_left_to_dst(struct pipe_ctx *pipe_ctx)
+static int shift_border_left_to_dst(struct pipe_ctx *pipe_ctx)
{
int store_h_border_left = pipe_ctx->stream->timing.h_border_left;
@@ -1128,8 +1128,8 @@ int shift_border_left_to_dst(struct pipe_ctx *pipe_ctx)
return store_h_border_left;
}
-void restore_border_left_from_dst(struct pipe_ctx *pipe_ctx,
- int store_h_border_left)
+static void restore_border_left_from_dst(struct pipe_ctx *pipe_ctx,
+ int store_h_border_left)
{
pipe_ctx->stream->dst.x -= store_h_border_left;
pipe_ctx->stream->timing.h_border_left = store_h_border_left;
@@ -1153,8 +1153,8 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
calculate_viewport(pipe_ctx);
- if (pipe_ctx->plane_res.scl_data.viewport.height < 12 ||
- pipe_ctx->plane_res.scl_data.viewport.width < 12) {
+ if (pipe_ctx->plane_res.scl_data.viewport.height < MIN_VIEWPORT_SIZE ||
+ pipe_ctx->plane_res.scl_data.viewport.width < MIN_VIEWPORT_SIZE) {
if (store_h_border_left) {
restore_border_left_from_dst(pipe_ctx,
store_h_border_left);
@@ -1697,7 +1697,7 @@ static bool are_stream_backends_same(
return true;
}
-/**
+/*
* dc_is_stream_unchanged() - Compare two stream states for equivalence.
*
* Checks if there a difference between the two states
@@ -1718,7 +1718,7 @@ bool dc_is_stream_unchanged(
return true;
}
-/**
+/*
* dc_is_stream_scaling_unchanged() - Compare scaling rectangles of two streams.
*/
bool dc_is_stream_scaling_unchanged(
@@ -1833,7 +1833,7 @@ static struct audio *find_first_free_audio(
return 0;
}
-/**
+/*
* dc_add_stream_to_ctx() - Add a new dc_stream_state to a dc_state.
*/
enum dc_status dc_add_stream_to_ctx(
@@ -1860,7 +1860,7 @@ enum dc_status dc_add_stream_to_ctx(
return res;
}
-/**
+/*
* dc_remove_stream_from_ctx() - Remove a stream from a dc_state.
*/
enum dc_status dc_remove_stream_from_ctx(
@@ -2075,6 +2075,20 @@ static int acquire_resource_from_hw_enabled_state(
return -1;
}
+static void mark_seamless_boot_stream(
+ const struct dc *dc,
+ struct dc_stream_state *stream)
+{
+ struct dc_bios *dcb = dc->ctx->dc_bios;
+
+ /* TODO: Check Linux */
+ if (dc->config.allow_seamless_boot_optimization &&
+ !dcb->funcs->is_accelerated_mode(dcb)) {
+ if (dc_validate_seamless_boot_timing(dc, stream->sink, &stream->timing))
+ stream->apply_seamless_boot_optimization = true;
+ }
+}
+
enum dc_status resource_map_pool_resources(
const struct dc *dc,
struct dc_state *context,
@@ -2085,22 +2099,20 @@ enum dc_status resource_map_pool_resources(
struct dc_context *dc_ctx = dc->ctx;
struct pipe_ctx *pipe_ctx = NULL;
int pipe_idx = -1;
- struct dc_bios *dcb = dc->ctx->dc_bios;
calculate_phy_pix_clks(stream);
- /* TODO: Check Linux */
- if (dc->config.allow_seamless_boot_optimization &&
- !dcb->funcs->is_accelerated_mode(dcb)) {
- if (dc_validate_seamless_boot_timing(dc, stream->sink, &stream->timing))
- stream->apply_seamless_boot_optimization = true;
- }
+ mark_seamless_boot_stream(dc, stream);
- if (stream->apply_seamless_boot_optimization)
+ if (stream->apply_seamless_boot_optimization) {
pipe_idx = acquire_resource_from_hw_enabled_state(
&context->res_ctx,
pool,
stream);
+ if (pipe_idx < 0)
+ /* hw resource was assigned to other stream */
+ stream->apply_seamless_boot_optimization = false;
+ }
if (pipe_idx < 0)
/* acquire new resources */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index c103f858375d..25fa712a7847 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -244,7 +244,7 @@ struct dc_stream_status *dc_stream_get_status(
}
#ifndef TRIM_FSFT
-/**
+/*
* dc_optimize_timing_for_fsft() - dc to optimize timing
*/
bool dc_optimize_timing_for_fsft(
@@ -260,8 +260,7 @@ bool dc_optimize_timing_for_fsft(
}
#endif
-
-/**
+/*
* dc_stream_set_cursor_attributes() - Update cursor attributes and set cursor surface address
*/
bool dc_stream_set_cursor_attributes(
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
index 3d7d27435f15..e6b9c6a71841 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
@@ -115,7 +115,7 @@ struct dc_plane_state *dc_create_plane_state(struct dc *dc)
return plane_state;
}
-/**
+/*
*****************************************************************************
* Function: dc_plane_get_status
*
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 3aedadb34548..4eee3a55fa30 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -42,12 +42,13 @@
#include "inc/hw/dmcu.h"
#include "dml/display_mode_lib.h"
-#define DC_VER "3.2.116"
+#define DC_VER "3.2.122"
#define MAX_SURFACES 3
#define MAX_PLANES 6
#define MAX_STREAMS 6
#define MAX_SINKS_PER_LINK 4
+#define MIN_VIEWPORT_SIZE 12
/*******************************************************************************
* Display Core Interfaces
@@ -171,6 +172,9 @@ struct dc_caps {
bool dmcub_support;
uint32_t num_of_internal_disp;
enum dp_protocol_version max_dp_protocol_version;
+ unsigned int mall_size_per_mem_channel;
+ unsigned int mall_size_total;
+ unsigned int cursor_cache_size;
struct dc_plane_cap planes[MAX_PLANES];
struct dc_color_caps color;
};
@@ -481,7 +485,6 @@ struct dc_debug_options {
bool performance_trace;
bool az_endpoint_mute_only;
bool always_use_regamma;
- bool p010_mpo_support;
bool recovery_enabled;
bool avoid_vbios_exec_table;
bool scl_reset_length10;
@@ -499,6 +502,9 @@ struct dc_debug_options {
bool dmcub_emulation;
#if defined(CONFIG_DRM_AMD_DC_DCN)
bool disable_idle_power_optimizations;
+ unsigned int mall_size_override;
+ unsigned int mall_additional_timer_percent;
+ bool mall_error_as_fatal;
#endif
bool dmub_command_table; /* for testing only */
struct dc_bw_validation_profile bw_val_profile;
@@ -521,7 +527,6 @@ struct dc_debug_options {
bool usbc_combo_phy_reset_wa;
bool disable_dsc;
bool enable_dram_clock_change_one_display_vactive;
- bool force_ignore_link_settings;
union mem_low_power_enable_options enable_mem_low_power;
};
@@ -633,7 +638,6 @@ struct dc {
const char *build_id;
struct vm_helper *vm_helper;
- const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box;
};
enum frame_buffer_mode {
@@ -671,16 +675,10 @@ struct dc_init_data {
struct dc_config flags;
uint64_t log_mask;
- /**
- * gpu_info FW provided soc bounding box struct or 0 if not
- * available in FW
- */
- const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box;
struct dpcd_vendor_signature vendor_signature;
#if defined(CONFIG_DRM_AMD_DC_DCN)
bool force_smu_not_present;
#endif
- bool force_ignore_link_settings;
};
struct dc_callback_init {
@@ -1269,8 +1267,8 @@ enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32
void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg);
#if defined(CONFIG_DRM_AMD_DC_DCN)
-bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc,
- struct dc_plane_state *plane);
+bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_state *plane,
+ struct dc_cursor_attributes *cursor_attr);
void dc_allow_idle_optimizations(struct dc *dc, bool allow);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index 80a2191a3115..cc6fb838420e 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -451,6 +451,9 @@ struct dpcd_amd_signature {
uint8_t AMD_IEEE_TxSignature_byte1;
uint8_t AMD_IEEE_TxSignature_byte2;
uint8_t AMD_IEEE_TxSignature_byte3;
+};
+
+struct dpcd_amd_device_id {
uint8_t device_id_byte1;
uint8_t device_id_byte2;
uint8_t zero[4];
diff --git a/drivers/gpu/drm/amd/display/dc/dc_edid_parser.c b/drivers/gpu/drm/amd/display/dc/dc_edid_parser.c
new file mode 100644
index 000000000000..0db5b49e9d5e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc_edid_parser.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dce/dce_dmcu.h"
+#include "dc_edid_parser.h"
+
+bool dc_edid_parser_send_cea(struct dc *dc,
+ int offset,
+ int total_length,
+ uint8_t *data,
+ int length)
+{
+ struct dmcu *dmcu = dc->res_pool->dmcu;
+
+ if (dmcu &&
+ dmcu->funcs->is_dmcu_initialized(dmcu) &&
+ dmcu->funcs->send_edid_cea) {
+ return dmcu->funcs->send_edid_cea(dmcu,
+ offset,
+ total_length,
+ data,
+ length);
+ }
+
+ return false;
+}
+
+bool dc_edid_parser_recv_cea_ack(struct dc *dc, int *offset)
+{
+ struct dmcu *dmcu = dc->res_pool->dmcu;
+
+ if (dmcu &&
+ dmcu->funcs->is_dmcu_initialized(dmcu) &&
+ dmcu->funcs->recv_edid_cea_ack) {
+ return dmcu->funcs->recv_edid_cea_ack(dmcu, offset);
+ }
+
+ return false;
+}
+
+bool dc_edid_parser_recv_amd_vsdb(struct dc *dc,
+ int *version,
+ int *min_frame_rate,
+ int *max_frame_rate)
+{
+ struct dmcu *dmcu = dc->res_pool->dmcu;
+
+ if (dmcu &&
+ dmcu->funcs->is_dmcu_initialized(dmcu) &&
+ dmcu->funcs->recv_amd_vsdb) {
+ return dmcu->funcs->recv_amd_vsdb(dmcu,
+ version,
+ min_frame_rate,
+ max_frame_rate);
+ }
+
+ return false;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dc_edid_parser.h b/drivers/gpu/drm/amd/display/dc/dc_edid_parser.h
new file mode 100644
index 000000000000..da67ec06f0a2
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc_edid_parser.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DC_EDID_PARSER_H_
+#define _DC_EDID_PARSER_H_
+
+#include "core_types.h"
+
+bool dc_edid_parser_send_cea(struct dc *dc,
+ int offset,
+ int total_length,
+ uint8_t *data,
+ int length);
+
+bool dc_edid_parser_recv_cea_ack(struct dc *dc, int *offset);
+
+bool dc_edid_parser_recv_amd_vsdb(struct dc *dc,
+ int *version,
+ int *min_frame_rate,
+ int *max_frame_rate);
+
+#endif /* _DC_EDID_PARSER_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c
index 57edb25fc381..a612ba6dc389 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c
@@ -34,6 +34,7 @@
#include "dc.h"
#include "dc_dmub_srv.h"
+#include "reg_helper.h"
static inline void submit_dmub_read_modify_write(
struct dc_reg_helper_state *offload,
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index 701aa7178a89..b41e6367b15e 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -71,6 +71,7 @@ struct dc_plane_address {
union {
struct{
PHYSICAL_ADDRESS_LOC addr;
+ PHYSICAL_ADDRESS_LOC cursor_cache_addr;
PHYSICAL_ADDRESS_LOC meta_addr;
union large_integer dcc_const_color;
} grph;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index 6d9a60c9dcc0..e189f16bc026 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -103,6 +103,8 @@ struct dc_link {
bool lttpr_non_transparent_mode;
bool is_internal_display;
+ bool edp_sink_present;
+
/* caps is the same as reported_link_cap. link_traing use
* reported_link_cap. Will clean up. TODO
*/
@@ -259,6 +261,13 @@ enum dc_status dc_link_reallocate_mst_payload(struct dc_link *link);
bool dc_link_handle_hpd_rx_irq(struct dc_link *dc_link,
union hpd_irq_data *hpd_irq_dpcd_data, bool *out_link_loss);
+/*
+ * On eDP links this function call will stall until T12 has elapsed.
+ * If the panel is not in power off state, this function will return
+ * immediately.
+ */
+bool dc_link_wait_for_t12(struct dc_link *link);
+
enum dc_status read_hpd_rx_irq_data(
struct dc_link *link,
union hpd_irq_data *irq_data);
@@ -369,5 +378,6 @@ uint32_t dc_bandwidth_in_kbps_from_timing(
const struct dc_crtc_timing *timing);
bool dc_link_is_fec_supported(const struct dc_link *link);
+bool dc_link_should_enable_fec(const struct dc_link *link);
#endif /* DC_LINK_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index b7910976b81a..80b67b860091 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -130,6 +130,14 @@ union stream_update_flags {
uint32_t raw;
};
+struct test_pattern {
+ enum dp_test_pattern type;
+ enum dp_test_pattern_color_space color_space;
+ struct link_training_settings const *p_link_settings;
+ unsigned char const *p_custom_pattern;
+ unsigned int cust_pattern_size;
+};
+
struct dc_stream_state {
// sink is deprecated, new code should not reference
// this pointer
@@ -227,6 +235,8 @@ struct dc_stream_state {
uint32_t stream_id;
bool is_dsc_enabled;
+
+ struct test_pattern test_pattern;
union stream_update_flags update_flags;
};
@@ -261,6 +271,7 @@ struct dc_stream_update {
struct dc_dsc_config *dsc_config;
struct dc_transfer_func *func_shaper;
struct dc_3dlut *lut3d_func;
+ struct test_pattern *pending_test_pattern;
};
bool dc_is_stream_unchanged(
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
index 2a2a0fdb9253..7866cf2a668f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
@@ -868,7 +868,7 @@ void dce_aud_wall_dto_setup(
}
#if defined(CONFIG_DRM_AMD_DC_SI)
-void dce60_aud_wall_dto_setup(
+static void dce60_aud_wall_dto_setup(
struct audio *audio,
enum signal_type signal,
const struct audio_crtc_info *crtc_info,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
index cda5fd0464bc..d51b5fe91287 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -388,12 +388,6 @@ static enum aux_channel_operation_result get_channel_status(
}
}
-enum i2caux_engine_type get_engine_type(
- const struct dce_aux *engine)
-{
- return I2CAUX_ENGINE_TYPE_AUX;
-}
-
static bool acquire(
struct dce_aux *engine,
struct ddc *ddc)
@@ -582,7 +576,7 @@ int dce_aux_transfer_raw(struct ddc_service *ddc,
*operation_result = get_channel_status(aux_engine, &returned_bytes);
if (*operation_result == AUX_CHANNEL_OPERATION_SUCCEEDED) {
- int bytes_replied = 0;
+ int __maybe_unused bytes_replied = 0;
bytes_replied = read_channel_reply(aux_engine, payload->length,
payload->data, payload->reply,
&status);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
index 382465862f29..277484cf853e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
@@ -124,7 +124,6 @@ struct dce110_aux_registers {
AUX_SF(AUX_SW_CONTROL, AUX_SW_GO, mask_sh),\
AUX_SF(AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
AUX_SF(AUX_SW_DATA, AUX_SW_DATA_RW, mask_sh),\
- AUX_SF(AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
AUX_SF(AUX_SW_DATA, AUX_SW_INDEX, mask_sh),\
AUX_SF(AUX_SW_DATA, AUX_SW_DATA, mask_sh),\
AUX_SF(AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index fb733f573715..dec58b3c42e4 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -113,20 +113,19 @@ static const struct spread_spectrum_data *get_ss_data_entry(
}
/**
- * Function: calculate_fb_and_fractional_fb_divider
+ * calculate_fb_and_fractional_fb_divider - Calculates feedback and fractional
+ * feedback dividers values
*
- * * DESCRIPTION: Calculates feedback and fractional feedback dividers values
+ * @calc_pll_cs: Pointer to clock source information
+ * @target_pix_clk_100hz: Desired frequency in 100 Hz
+ * @ref_divider: Reference divider (already known)
+ * @post_divider: Post Divider (already known)
+ * @feedback_divider_param: Pointer where to store
+ * calculated feedback divider value
+ * @fract_feedback_divider_param: Pointer where to store
+ * calculated fract feedback divider value
*
- *PARAMETERS:
- * targetPixelClock Desired frequency in 100 Hz
- * ref_divider Reference divider (already known)
- * postDivider Post Divider (already known)
- * feedback_divider_param Pointer where to store
- * calculated feedback divider value
- * fract_feedback_divider_param Pointer where to store
- * calculated fract feedback divider value
- *
- *RETURNS:
+ * return:
* It fills the locations pointed by feedback_divider_param
* and fract_feedback_divider_param
* It returns - true if feedback divider not 0
@@ -175,22 +174,22 @@ static bool calculate_fb_and_fractional_fb_divider(
}
/**
-*calc_fb_divider_checking_tolerance
-*
-*DESCRIPTION: Calculates Feedback and Fractional Feedback divider values
-* for passed Reference and Post divider, checking for tolerance.
-*PARAMETERS:
-* pll_settings Pointer to structure
-* ref_divider Reference divider (already known)
-* postDivider Post Divider (already known)
-* tolerance Tolerance for Calculated Pixel Clock to be within
-*
-*RETURNS:
-* It fills the PLLSettings structure with PLL Dividers values
-* if calculated values are within required tolerance
-* It returns - true if error is within tolerance
-* - false if error is not within tolerance
-*/
+ * calc_fb_divider_checking_tolerance - Calculates Feedback and
+ * Fractional Feedback divider values
+ * for passed Reference and Post divider,
+ * checking for tolerance.
+ * @calc_pll_cs: Pointer to clock source information
+ * @pll_settings: Pointer to PLL settings
+ * @ref_divider: Reference divider (already known)
+ * @post_divider: Post Divider (already known)
+ * @tolerance: Tolerance for Calculated Pixel Clock to be within
+ *
+ * return:
+ * It fills the PLLSettings structure with PLL Dividers values
+ * if calculated values are within required tolerance
+ * It returns - true if error is within tolerance
+ * - false if error is not within tolerance
+ */
static bool calc_fb_divider_checking_tolerance(
struct calc_pll_clock_source *calc_pll_cs,
struct pll_settings *pll_settings,
@@ -241,7 +240,7 @@ static bool calc_fb_divider_checking_tolerance(
pll_settings->calculated_pix_clk_100hz =
actual_calculated_clock_100hz;
pll_settings->vco_freq =
- actual_calculated_clock_100hz * post_divider / 10;
+ div_u64((u64)actual_calculated_clock_100hz * post_divider, 10);
return true;
}
return false;
@@ -460,7 +459,7 @@ static bool pll_adjust_pix_clk(
return false;
}
-/**
+/*
* Calculate PLL Dividers for given Clock Value.
* First will call VBIOS Adjust Exec table to check if requested Pixel clock
* will be Adjusted based on usage.
@@ -871,6 +870,20 @@ static bool dce110_program_pix_clk(
bp_pc_params.flags.SET_EXTERNAL_REF_DIV_SRC =
pll_settings->use_external_clk;
+ switch (pix_clk_params->color_depth) {
+ case COLOR_DEPTH_101010:
+ bp_pc_params.color_depth = TRANSMITTER_COLOR_DEPTH_30;
+ break;
+ case COLOR_DEPTH_121212:
+ bp_pc_params.color_depth = TRANSMITTER_COLOR_DEPTH_36;
+ break;
+ case COLOR_DEPTH_161616:
+ bp_pc_params.color_depth = TRANSMITTER_COLOR_DEPTH_48;
+ break;
+ default:
+ break;
+ }
+
if (clk_src->bios->funcs->set_pixel_clock(
clk_src->bios, &bp_pc_params) != BP_RESULT_OK)
return false;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
index f3ed8b619caf..ddc789daf3b1 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
@@ -57,6 +57,9 @@
#define MCP_SYNC_PHY_LOCK 0x90
#define MCP_SYNC_PHY_UNLOCK 0x91
#define MCP_BL_SET_PWM_FRAC 0x6A /* Enable or disable Fractional PWM */
+#define MCP_SEND_EDID_CEA 0xA0
+#define EDID_CEA_CMD_ACK 1
+#define EDID_CEA_CMD_NACK 2
#define MASTER_COMM_CNTL_REG__MASTER_COMM_INTERRUPT_MASK 0x00000001L
// PSP FW version
@@ -65,13 +68,17 @@
//Register access policy version
#define mmMP0_SMN_C2PMSG_91 0x1609B
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+static const uint32_t abm_gain_stepsize = 0x0060;
+#endif
+
static bool dce_dmcu_init(struct dmcu *dmcu)
{
// Do nothing
return true;
}
-bool dce_dmcu_load_iram(struct dmcu *dmcu,
+static bool dce_dmcu_load_iram(struct dmcu *dmcu,
unsigned int start_offset,
const char *src,
unsigned int bytes)
@@ -807,6 +814,120 @@ static bool dcn20_unlock_phy(struct dmcu *dmcu)
return true;
}
+static bool dcn10_send_edid_cea(struct dmcu *dmcu,
+ int offset,
+ int total_length,
+ uint8_t *data,
+ int length)
+{
+ struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
+ uint32_t header, data1, data2;
+
+ /* If microcontroller is not running, do nothing */
+ if (dmcu->dmcu_state != DMCU_RUNNING)
+ return false;
+
+ if (length > 8 || length <= 0)
+ return false;
+
+ header = ((uint32_t)offset & 0xFFFF) << 16 | (total_length & 0xFFFF);
+ data1 = (((uint32_t)data[0]) << 24) | (((uint32_t)data[1]) << 16) |
+ (((uint32_t)data[2]) << 8) | ((uint32_t)data[3]);
+ data2 = (((uint32_t)data[4]) << 24) | (((uint32_t)data[5]) << 16) |
+ (((uint32_t)data[6]) << 8) | ((uint32_t)data[7]);
+
+ /* waitDMCUReadyForCmd */
+ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 1, 10000);
+
+ /* setDMCUParam_Cmd */
+ REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, MCP_SEND_EDID_CEA);
+
+ REG_WRITE(MASTER_COMM_DATA_REG1, header);
+ REG_WRITE(MASTER_COMM_DATA_REG2, data1);
+ REG_WRITE(MASTER_COMM_DATA_REG3, data2);
+
+ /* notifyDMCUMsg */
+ REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+
+ /* waitDMCUReadyForCmd */
+ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 1, 10000);
+
+ return true;
+}
+
+static bool dcn10_get_scp_results(struct dmcu *dmcu,
+ uint32_t *cmd,
+ uint32_t *data1,
+ uint32_t *data2,
+ uint32_t *data3)
+{
+ struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
+
+ /* If microcontroller is not running, do nothing */
+ if (dmcu->dmcu_state != DMCU_RUNNING)
+ return false;
+
+ *cmd = REG_READ(SLAVE_COMM_CMD_REG);
+ *data1 = REG_READ(SLAVE_COMM_DATA_REG1);
+ *data2 = REG_READ(SLAVE_COMM_DATA_REG2);
+ *data3 = REG_READ(SLAVE_COMM_DATA_REG3);
+
+ /* clear SCP interrupt */
+ REG_UPDATE(SLAVE_COMM_CNTL_REG, SLAVE_COMM_INTERRUPT, 0);
+
+ return true;
+}
+
+static bool dcn10_recv_amd_vsdb(struct dmcu *dmcu,
+ int *version,
+ int *min_frame_rate,
+ int *max_frame_rate)
+{
+ uint32_t data[4];
+ int cmd, ack, len;
+
+ if (!dcn10_get_scp_results(dmcu, &data[0], &data[1], &data[2], &data[3]))
+ return false;
+
+ cmd = data[0] & 0x3FF;
+ len = (data[0] >> 10) & 0x3F;
+ ack = data[1];
+
+ if (cmd != MCP_SEND_EDID_CEA || ack != EDID_CEA_CMD_ACK || len != 12)
+ return false;
+
+ if ((data[2] & 0xFF)) {
+ *version = (data[2] >> 8) & 0xFF;
+ *min_frame_rate = (data[3] >> 16) & 0xFFFF;
+ *max_frame_rate = data[3] & 0xFFFF;
+ return true;
+ }
+
+ return false;
+}
+
+static bool dcn10_recv_edid_cea_ack(struct dmcu *dmcu, int *offset)
+{
+ uint32_t data[4];
+ int cmd, ack;
+
+ if (!dcn10_get_scp_results(dmcu,
+ &data[0], &data[1], &data[2], &data[3]))
+ return false;
+
+ cmd = data[0] & 0x3FF;
+ ack = data[1];
+
+ if (cmd != MCP_SEND_EDID_CEA)
+ return false;
+
+ if (ack == EDID_CEA_CMD_ACK)
+ return true;
+
+ *offset = data[2]; /* nack */
+ return false;
+}
+
#endif //(CONFIG_DRM_AMD_DC_DCN)
static const struct dmcu_funcs dce_funcs = {
@@ -829,6 +950,9 @@ static const struct dmcu_funcs dcn10_funcs = {
.get_psr_state = dcn10_get_dmcu_psr_state,
.set_psr_wait_loop = dcn10_psr_wait_loop,
.get_psr_wait_loop = dcn10_get_psr_wait_loop,
+ .send_edid_cea = dcn10_send_edid_cea,
+ .recv_amd_vsdb = dcn10_recv_amd_vsdb,
+ .recv_edid_cea_ack = dcn10_recv_edid_cea_ack,
.is_dmcu_initialized = dcn10_is_dmcu_initialized
};
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
index 93e7f34d4775..ff726b35ef6a 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
@@ -40,6 +40,10 @@
SR(MASTER_COMM_DATA_REG3), \
SR(MASTER_COMM_CMD_REG), \
SR(MASTER_COMM_CNTL_REG), \
+ SR(SLAVE_COMM_DATA_REG1), \
+ SR(SLAVE_COMM_DATA_REG2), \
+ SR(SLAVE_COMM_DATA_REG3), \
+ SR(SLAVE_COMM_CMD_REG), \
SR(DMCU_IRAM_RD_CTRL), \
SR(DMCU_IRAM_RD_DATA), \
SR(DMCU_INTERRUPT_TO_UC_EN_MASK), \
@@ -112,6 +116,7 @@
DMCU_SF(MASTER_COMM_CMD_REG, \
MASTER_COMM_CMD_REG_BYTE0, mask_sh), \
DMCU_SF(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, mask_sh), \
+ DMCU_SF(SLAVE_COMM_CNTL_REG, SLAVE_COMM_INTERRUPT, mask_sh), \
DMCU_SF(DMCU_INTERRUPT_TO_UC_EN_MASK, \
STATIC_SCREEN1_INT_TO_UC_EN, mask_sh), \
DMCU_SF(DMCU_INTERRUPT_TO_UC_EN_MASK, \
@@ -179,6 +184,7 @@
type UC_IN_RESET; \
type MASTER_COMM_CMD_REG_BYTE0; \
type MASTER_COMM_INTERRUPT; \
+ type SLAVE_COMM_INTERRUPT; \
type DPHY_RX_FAST_TRAINING_CAPABLE; \
type DPHY_LOAD_BS_COUNT; \
type STATIC_SCREEN1_INT_TO_UC_EN; \
@@ -211,6 +217,11 @@ struct dce_dmcu_registers {
uint32_t MASTER_COMM_DATA_REG3;
uint32_t MASTER_COMM_CMD_REG;
uint32_t MASTER_COMM_CNTL_REG;
+ uint32_t SLAVE_COMM_DATA_REG1;
+ uint32_t SLAVE_COMM_DATA_REG2;
+ uint32_t SLAVE_COMM_DATA_REG3;
+ uint32_t SLAVE_COMM_CMD_REG;
+ uint32_t SLAVE_COMM_CNTL_REG;
uint32_t DMCU_IRAM_RD_CTRL;
uint32_t DMCU_IRAM_RD_DATA;
uint32_t DMCU_INTERRUPT_TO_UC_EN_MASK;
@@ -317,6 +328,4 @@ struct dmcu *dcn21_dmcu_create(
void dce_dmcu_destroy(struct dmcu **dmcu);
-static const uint32_t abm_gain_stepsize = 0x0060;
-
#endif /* _DCE_ABM_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
index 7fbd92fbc63a..a524f471e0d7 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
@@ -435,7 +435,7 @@ struct dce_i2c_hw *acquire_i2c_hw_engine(
return dce_i2c_hw;
}
-enum i2c_channel_operation_result dce_i2c_hw_engine_wait_on_operation_result(
+static enum i2c_channel_operation_result dce_i2c_hw_engine_wait_on_operation_result(
struct dce_i2c_hw *dce_i2c_hw,
uint32_t timeout,
enum i2c_channel_operation_result expected_result)
@@ -502,7 +502,7 @@ static uint32_t get_transaction_timeout_hw(
return period_timeout * num_of_clock_stretches;
}
-bool dce_i2c_hw_engine_submit_payload(
+static bool dce_i2c_hw_engine_submit_payload(
struct dce_i2c_hw *dce_i2c_hw,
struct i2c_payload *payload,
bool middle_of_transaction,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c
index 87d8428df6c4..6846afd83701 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c
@@ -339,7 +339,7 @@ static bool start_sync_sw(
return false;
}
-void dce_i2c_sw_engine_set_speed(
+static void dce_i2c_sw_engine_set_speed(
struct dce_i2c_sw *engine,
uint32_t speed)
{
@@ -353,7 +353,7 @@ void dce_i2c_sw_engine_set_speed(
engine->clock_delay = 12;
}
-bool dce_i2c_sw_engine_acquire_engine(
+static bool dce_i2c_sw_engine_acquire_engine(
struct dce_i2c_sw *engine,
struct ddc *ddc)
{
@@ -397,7 +397,7 @@ bool dce_i2c_engine_acquire_sw(
-void dce_i2c_sw_engine_submit_channel_request(
+static void dce_i2c_sw_engine_submit_channel_request(
struct dce_i2c_sw *engine,
struct i2c_request_transaction_data *req)
{
@@ -440,7 +440,8 @@ void dce_i2c_sw_engine_submit_channel_request(
I2C_CHANNEL_OPERATION_SUCCEEDED :
I2C_CHANNEL_OPERATION_FAILED;
}
-bool dce_i2c_sw_engine_submit_payload(
+
+static bool dce_i2c_sw_engine_submit_payload(
struct dce_i2c_sw *engine,
struct i2c_payload *payload,
bool middle_of_transaction)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
index 210466b2d863..1e77ffee71b3 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
@@ -1197,7 +1197,7 @@ void dce110_link_encoder_enable_dp_mst_output(
#if defined(CONFIG_DRM_AMD_DC_SI)
/* enables DP PHY output */
-void dce60_link_encoder_enable_dp_output(
+static void dce60_link_encoder_enable_dp_output(
struct link_encoder *enc,
const struct dc_link_settings *link_settings,
enum clock_source_id clock_source)
@@ -1236,7 +1236,7 @@ void dce60_link_encoder_enable_dp_output(
}
/* enables DP PHY output in MST mode */
-void dce60_link_encoder_enable_dp_mst_output(
+static void dce60_link_encoder_enable_dp_mst_output(
struct link_encoder *enc,
const struct dc_link_settings *link_settings,
enum clock_source_id clock_source)
@@ -1426,7 +1426,7 @@ void dce110_link_encoder_dp_set_phy_pattern(
#if defined(CONFIG_DRM_AMD_DC_SI)
/* set DP PHY test and training patterns */
-void dce60_link_encoder_dp_set_phy_pattern(
+static void dce60_link_encoder_dp_set_phy_pattern(
struct link_encoder *enc,
const struct encoder_set_dp_phy_pattern_param *param)
{
@@ -1503,7 +1503,6 @@ void dce110_link_encoder_update_mst_stream_allocation_table(
const struct link_mst_stream_allocation_table *table)
{
struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
- uint32_t value0 = 0;
uint32_t value1 = 0;
uint32_t value2 = 0;
uint32_t slots = 0;
@@ -1604,7 +1603,7 @@ void dce110_link_encoder_update_mst_stream_allocation_table(
do {
udelay(10);
- value0 = REG_READ(DP_MSE_SAT_UPDATE);
+ REG_READ(DP_MSE_SAT_UPDATE);
REG_GET(DP_MSE_SAT_UPDATE,
DP_MSE_SAT_UPDATE, &value1);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c b/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c
index e459ae65aaf7..4600231da6cb 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c
@@ -97,7 +97,7 @@ enum {
-/**
+/*
* set_truncation
* 1) set truncation depth: 0 for 18 bpp or 1 for 24 bpp
* 2) enable truncation
@@ -142,7 +142,7 @@ static void set_truncation(
}
#if defined(CONFIG_DRM_AMD_DC_SI)
-/**
+/*
* dce60_set_truncation
* 1) set truncation depth: 0 for 18 bpp or 1 for 24 bpp
* 2) enable truncation
@@ -183,7 +183,7 @@ static void dce60_set_truncation(
}
#endif
-/**
+/*
* set_spatial_dither
* 1) set spatial dithering mode: pattern of seed
* 2) set spatial dithering depth: 0 for 18bpp or 1 for 24bpp
@@ -291,7 +291,7 @@ static void set_spatial_dither(
FMT_SPATIAL_DITHER_EN, 1);
}
-/**
+/*
* SetTemporalDither (Frame Modulation)
* 1) set temporal dither depth
* 2) select pattern: from hard-coded pattern or programmable pattern
@@ -355,7 +355,7 @@ static void set_temporal_dither(
FMT_TEMPORAL_DITHER_EN, 1);
}
-/**
+/*
* Set Clamping
* 1) Set clamping format based on bpc - 0 for 6bpc (No clamping)
* 1 for 8 bpc
@@ -415,7 +415,7 @@ void dce110_opp_set_clamping(
}
#if defined(CONFIG_DRM_AMD_DC_SI)
-/**
+/*
* Set Clamping for DCE6 parts
* 1) Set clamping format based on bpc - 0 for 6bpc (No clamping)
* 1 for 8 bpc
@@ -424,7 +424,7 @@ void dce110_opp_set_clamping(
* 7 for programable
* 2) Enable clamp if Limited range requested
*/
-void dce60_opp_set_clamping(
+static void dce60_opp_set_clamping(
struct dce110_opp *opp110,
const struct clamping_and_pixel_encoding_params *params)
{
@@ -465,7 +465,7 @@ void dce60_opp_set_clamping(
}
#endif
-/**
+/*
* set_pixel_encoding
*
* Set Pixel Encoding
@@ -501,7 +501,7 @@ static void set_pixel_encoding(
}
#if defined(CONFIG_DRM_AMD_DC_SI)
-/**
+/*
* dce60_set_pixel_encoding
* DCE6 has no FMT_SUBSAMPLING_{MODE,ORDER} bits in FMT_CONTROL reg
* Set Pixel Encoding
@@ -545,7 +545,7 @@ void dce110_opp_program_bit_depth_reduction(
}
#if defined(CONFIG_DRM_AMD_DC_SI)
-void dce60_opp_program_bit_depth_reduction(
+static void dce60_opp_program_bit_depth_reduction(
struct output_pixel_processor *opp,
const struct bit_depth_reduction_params *params)
{
@@ -568,7 +568,7 @@ void dce110_opp_program_clamping_and_pixel_encoding(
}
#if defined(CONFIG_DRM_AMD_DC_SI)
-void dce60_opp_program_clamping_and_pixel_encoding(
+static void dce60_opp_program_clamping_and_pixel_encoding(
struct output_pixel_processor *opp,
const struct clamping_and_pixel_encoding_params *params)
{
@@ -678,7 +678,7 @@ void dce110_opp_program_fmt(
}
#if defined(CONFIG_DRM_AMD_DC_SI)
-void dce60_opp_program_fmt(
+static void dce60_opp_program_fmt(
struct output_pixel_processor *opp,
struct bit_depth_reduction_params *fmt_bit_depth,
struct clamping_and_pixel_encoding_params *clamping)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_opp.h b/drivers/gpu/drm/amd/display/dc/dce/dce_opp.h
index 4d484ef60f35..bf1ffc3629c7 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_opp.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_opp.h
@@ -111,7 +111,6 @@ enum dce110_opp_reg_type {
OPP_SF(FMT_DITHER_RAND_R_SEED, FMT_RAND_R_SEED, mask_sh),\
OPP_SF(FMT_DITHER_RAND_G_SEED, FMT_RAND_G_SEED, mask_sh),\
OPP_SF(FMT_DITHER_RAND_B_SEED, FMT_RAND_B_SEED, mask_sh),\
- OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_EN, mask_sh),\
OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_RESET, mask_sh),\
OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_OFFSET, mask_sh),\
OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_DEPTH, mask_sh),\
@@ -219,7 +218,6 @@ enum dce110_opp_reg_type {
OPP_SF(FMT_DITHER_RAND_R_SEED, FMT_RAND_R_SEED, mask_sh),\
OPP_SF(FMT_DITHER_RAND_G_SEED, FMT_RAND_G_SEED, mask_sh),\
OPP_SF(FMT_DITHER_RAND_B_SEED, FMT_RAND_B_SEED, mask_sh),\
- OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_EN, mask_sh),\
OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_RESET, mask_sh),\
OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_OFFSET, mask_sh),\
OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_DEPTH, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
index 761fdfc1f5bd..e92339235863 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
@@ -50,16 +50,16 @@ static unsigned int dce_get_16_bit_backlight_from_pwm(struct panel_cntl *panel_c
{
uint64_t current_backlight;
uint32_t round_result;
- uint32_t pwm_period_cntl, bl_period, bl_int_count;
- uint32_t bl_pwm_cntl, bl_pwm, fractional_duty_cycle_en;
+ uint32_t bl_period, bl_int_count;
+ uint32_t bl_pwm, fractional_duty_cycle_en;
uint32_t bl_period_mask, bl_pwm_mask;
struct dce_panel_cntl *dce_panel_cntl = TO_DCE_PANEL_CNTL(panel_cntl);
- pwm_period_cntl = REG_READ(BL_PWM_PERIOD_CNTL);
+ REG_READ(BL_PWM_PERIOD_CNTL);
REG_GET(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD, &bl_period);
REG_GET(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD_BITCNT, &bl_int_count);
- bl_pwm_cntl = REG_READ(BL_PWM_CNTL);
+ REG_READ(BL_PWM_CNTL);
REG_GET(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, (uint32_t *)(&bl_pwm));
REG_GET(BL_PWM_CNTL, BL_PWM_FRACTIONAL_EN, &fractional_duty_cycle_en);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
index ada57f745fd7..8d4263da59f2 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
@@ -67,7 +67,6 @@ static void dce110_update_generic_info_packet(
uint32_t packet_index,
const struct dc_info_packet *info_packet)
{
- uint32_t regval;
/* TODOFPGA Figure out a proper number for max_retries polling for lock
* use 50 for now.
*/
@@ -99,7 +98,7 @@ static void dce110_update_generic_info_packet(
}
/* choose which generic packet to use */
{
- regval = REG_READ(AFMT_VBI_PACKET_CONTROL);
+ REG_READ(AFMT_VBI_PACKET_CONTROL);
REG_UPDATE(AFMT_VBI_PACKET_CONTROL,
AFMT_GENERIC_INDEX, packet_index);
}
@@ -564,6 +563,7 @@ static void dce110_stream_encoder_hdmi_set_stream_attribute(
cntl.enable_dp_audio = enable_audio;
cntl.pixel_clock = actual_pix_clk_khz;
cntl.lanes_number = LANE_COUNT_FOUR;
+ cntl.color_depth = crtc_timing->display_color_depth;
if (enc110->base.bp->funcs->encoder_control(
enc110->base.bp, &cntl) != BP_RESULT_OK)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
index 130a0a0c8332..151dc7bf6d23 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
@@ -493,7 +493,6 @@ static void dce60_transform_set_scaler(
{
struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
bool is_scaling_required;
- bool filter_updated = false;
const uint16_t *coeffs_v, *coeffs_h;
/*Use whole line buffer memory always*/
@@ -558,7 +557,6 @@ static void dce60_transform_set_scaler(
xfm_dce->filter_v = coeffs_v;
xfm_dce->filter_h = coeffs_h;
- filter_updated = true;
}
}
@@ -601,12 +599,12 @@ static void set_clamp(
clamp_max = 0x3FC0;
break;
case COLOR_DEPTH_101010:
- /* 10bit MSB aligned on 14 bit bus '11 1111 1111 1100' */
- clamp_max = 0x3FFC;
+ /* 10bit MSB aligned on 14 bit bus '11 1111 1111 0000' */
+ clamp_max = 0x3FF0;
break;
case COLOR_DEPTH_121212:
- /* 12bit MSB aligned on 14 bit bus '11 1111 1111 1111' */
- clamp_max = 0x3FFF;
+ /* 12bit MSB aligned on 14 bit bus '11 1111 1111 1100' */
+ clamp_max = 0x3FFC;
break;
default:
clamp_max = 0x3FC0;
@@ -1037,34 +1035,23 @@ static void dce60_transform_set_pixel_storage_depth(
const struct bit_depth_reduction_params *bit_depth_params)
{
struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
- int pixel_depth, expan_mode;
enum dc_color_depth color_depth;
switch (depth) {
case LB_PIXEL_DEPTH_18BPP:
color_depth = COLOR_DEPTH_666;
- pixel_depth = 2;
- expan_mode = 1;
break;
case LB_PIXEL_DEPTH_24BPP:
color_depth = COLOR_DEPTH_888;
- pixel_depth = 1;
- expan_mode = 1;
break;
case LB_PIXEL_DEPTH_30BPP:
color_depth = COLOR_DEPTH_101010;
- pixel_depth = 0;
- expan_mode = 1;
break;
case LB_PIXEL_DEPTH_36BPP:
color_depth = COLOR_DEPTH_121212;
- pixel_depth = 3;
- expan_mode = 0;
break;
default:
color_depth = COLOR_DEPTH_101010;
- pixel_depth = 0;
- expan_mode = 1;
BREAK_TO_DEBUGGER();
break;
}
@@ -1113,7 +1100,7 @@ static void program_gamut_remap(
}
-/**
+/*
*****************************************************************************
* Function: dal_transform_wide_gamut_set_gamut_remap
*
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
index 0cf130dc4e52..453aaa5757bd 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
@@ -57,6 +57,7 @@ static void dmub_abm_enable_fractional_pwm(struct dc_context *dc)
union dmub_rb_cmd cmd;
uint32_t fractional_pwm = (dc->dc->config.disable_fractional_pwm == false) ? 1 : 0;
+ memset(&cmd, 0, sizeof(cmd));
cmd.abm_set_pwm_frac.header.type = DMUB_CMD__ABM;
cmd.abm_set_pwm_frac.header.sub_type = DMUB_CMD__ABM_SET_PWM_FRAC;
cmd.abm_set_pwm_frac.abm_set_pwm_frac_data.fractional_pwm = fractional_pwm;
@@ -135,6 +136,7 @@ static bool dmub_abm_set_level(struct abm *abm, uint32_t level)
union dmub_rb_cmd cmd;
struct dc_context *dc = abm->ctx;
+ memset(&cmd, 0, sizeof(cmd));
cmd.abm_set_level.header.type = DMUB_CMD__ABM;
cmd.abm_set_level.header.sub_type = DMUB_CMD__ABM_SET_LEVEL;
cmd.abm_set_level.abm_set_level_data.level = level;
@@ -160,6 +162,7 @@ static bool dmub_abm_init_config(struct abm *abm,
// Copy iramtable into cw7
memcpy(dc->dmub_srv->dmub->scratch_mem_fb.cpu_addr, (void *)src, bytes);
+ memset(&cmd, 0, sizeof(cmd));
// Fw will copy from cw7 to fw_state
cmd.abm_init_config.header.type = DMUB_CMD__ABM;
cmd.abm_init_config.header.sub_type = DMUB_CMD__ABM_INIT_CONFIG;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
index d399270fd17e..c97ee5abc0ef 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
@@ -33,8 +33,9 @@ void dmub_hw_lock_mgr_cmd(struct dc_dmub_srv *dmub_srv,
union dmub_hw_lock_flags *hw_locks,
struct dmub_hw_lock_inst_flags *inst_flags)
{
- union dmub_rb_cmd cmd = { 0 };
+ union dmub_rb_cmd cmd;
+ memset(&cmd, 0, sizeof(cmd));
cmd.lock_hw.header.type = DMUB_CMD__HW_LOCK;
cmd.lock_hw.header.sub_type = 0;
cmd.lock_hw.header.payload_bytes = sizeof(struct dmub_cmd_lock_hw_data);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
index 17e84f34ceba..69e34bef274c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
@@ -31,7 +31,7 @@
#define MAX_PIPES 6
-/**
+/*
* Convert dmcub psr state to dmcu psr state.
*/
static enum dc_psr_state convert_psr_state(uint32_t raw_state)
@@ -74,7 +74,7 @@ static enum dc_psr_state convert_psr_state(uint32_t raw_state)
return state;
}
-/**
+/*
* Get PSR state from firmware.
*/
static void dmub_psr_get_state(struct dmub_psr *dmub, enum dc_psr_state *state)
@@ -90,7 +90,7 @@ static void dmub_psr_get_state(struct dmub_psr *dmub, enum dc_psr_state *state)
*state = convert_psr_state(raw_state);
}
-/**
+/*
* Set PSR version.
*/
static bool dmub_psr_set_version(struct dmub_psr *dmub, struct dc_stream_state *stream)
@@ -101,6 +101,7 @@ static bool dmub_psr_set_version(struct dmub_psr *dmub, struct dc_stream_state *
if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED)
return false;
+ memset(&cmd, 0, sizeof(cmd));
cmd.psr_set_version.header.type = DMUB_CMD__PSR;
cmd.psr_set_version.header.sub_type = DMUB_CMD__PSR_SET_VERSION;
switch (stream->link->psr_settings.psr_version) {
@@ -121,7 +122,7 @@ static bool dmub_psr_set_version(struct dmub_psr *dmub, struct dc_stream_state *
return true;
}
-/**
+/*
* Enable/Disable PSR.
*/
static void dmub_psr_enable(struct dmub_psr *dmub, bool enable, bool wait)
@@ -131,7 +132,7 @@ static void dmub_psr_enable(struct dmub_psr *dmub, bool enable, bool wait)
uint32_t retry_count;
enum dc_psr_state state = PSR_STATE0;
-
+ memset(&cmd, 0, sizeof(cmd));
cmd.psr_enable.header.type = DMUB_CMD__PSR;
if (enable)
@@ -170,7 +171,7 @@ static void dmub_psr_enable(struct dmub_psr *dmub, bool enable, bool wait)
}
}
-/**
+/*
* Set PSR level.
*/
static void dmub_psr_set_level(struct dmub_psr *dmub, uint16_t psr_level)
@@ -184,6 +185,7 @@ static void dmub_psr_set_level(struct dmub_psr *dmub, uint16_t psr_level)
if (state == PSR_STATE0)
return;
+ memset(&cmd, 0, sizeof(cmd));
cmd.psr_set_level.header.type = DMUB_CMD__PSR;
cmd.psr_set_level.header.sub_type = DMUB_CMD__PSR_SET_LEVEL;
cmd.psr_set_level.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_level_data);
@@ -194,7 +196,7 @@ static void dmub_psr_set_level(struct dmub_psr *dmub, uint16_t psr_level)
dc_dmub_srv_wait_idle(dc->dmub_srv);
}
-/**
+/*
* Setup PSR by programming phy registers and sending psr hw context values to firmware.
*/
static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
@@ -233,6 +235,7 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
link->link_enc->funcs->psr_program_secondary_packet(link->link_enc,
psr_context->sdpTransmitLineNumDeadline);
+ memset(&cmd, 0, sizeof(cmd));
cmd.psr_copy_settings.header.type = DMUB_CMD__PSR;
cmd.psr_copy_settings.header.sub_type = DMUB_CMD__PSR_COPY_SETTINGS;
cmd.psr_copy_settings.header.payload_bytes = sizeof(struct dmub_cmd_psr_copy_settings_data);
@@ -277,7 +280,7 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
return true;
}
-/**
+/*
* Send command to PSR to force static ENTER and ignore all state changes until exit
*/
static void dmub_psr_force_static(struct dmub_psr *dmub)
@@ -285,6 +288,7 @@ static void dmub_psr_force_static(struct dmub_psr *dmub)
union dmub_rb_cmd cmd;
struct dc_context *dc = dmub->ctx;
+ memset(&cmd, 0, sizeof(cmd));
cmd.psr_force_static.header.type = DMUB_CMD__PSR;
cmd.psr_force_static.header.sub_type = DMUB_CMD__PSR_FORCE_STATIC;
cmd.psr_enable.header.payload_bytes = 0;
@@ -294,7 +298,7 @@ static void dmub_psr_force_static(struct dmub_psr *dmub)
dc_dmub_srv_wait_idle(dc->dmub_srv);
}
-/**
+/*
* Get PSR residency from firmware.
*/
static void dmub_psr_get_residency(struct dmub_psr *dmub, uint32_t *residency)
@@ -316,7 +320,7 @@ static const struct dmub_psr_funcs psr_funcs = {
.psr_get_residency = dmub_psr_get_residency,
};
-/**
+/*
* Construct PSR object.
*/
static void dmub_psr_construct(struct dmub_psr *psr, struct dc_context *ctx)
@@ -325,7 +329,7 @@ static void dmub_psr_construct(struct dmub_psr *psr, struct dc_context *ctx)
psr->funcs = &psr_funcs;
}
-/**
+/*
* Allocate and initialize PSR object.
*/
struct dmub_psr *dmub_psr_create(struct dc_context *ctx)
@@ -342,7 +346,7 @@ struct dmub_psr *dmub_psr_create(struct dc_context *ctx)
return psr;
}
-/**
+/*
* Deallocate PSR object.
*/
void dmub_psr_destroy(struct dmub_psr **dmub)
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/Makefile b/drivers/gpu/drm/amd/display/dc/dce100/Makefile
index a822d4e2a169..ff20c47f559e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce100/Makefile
@@ -23,6 +23,8 @@
# Makefile for the 'controller' sub-component of DAL.
# It provides the control and status of HW CRTC block.
+CFLAGS_$(AMDDALPATH)/dc/dce100/dce100_resource.o = $(call cc-disable-warning, override-init)
+
DCE100 = dce100_resource.o dce100_hw_sequencer.o
AMD_DAL_DCE100 = $(addprefix $(AMDDALPATH)/dc/dce100/,$(DCE100))
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
index 8ab9d6c79808..635ef0e7c782 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
@@ -58,6 +58,8 @@
#include "dce/dce_abm.h"
#include "dce/dce_i2c.h"
+#include "dce100_resource.h"
+
#ifndef mmMC_HUB_RDREQ_DMIF_LIMIT
#include "gmc/gmc_8_2_d.h"
#include "gmc/gmc_8_2_sh_mask.h"
@@ -385,7 +387,7 @@ static const struct dc_plane_cap plane_cap = {
.pixel_format_support = {
.argb8888 = true,
.nv12 = false,
- .fp16 = false
+ .fp16 = true
},
.max_upscale_factor = {
@@ -611,7 +613,7 @@ static const struct encoder_feature_support link_enc_feature = {
.flags.bits.IS_TPS3_CAPABLE = true
};
-struct link_encoder *dce100_link_encoder_create(
+static struct link_encoder *dce100_link_encoder_create(
const struct encoder_init_data *enc_init_data)
{
struct dce110_link_encoder *enc110 =
@@ -650,7 +652,7 @@ static struct panel_cntl *dce100_panel_cntl_create(const struct panel_cntl_init_
return &panel_cntl->base;
}
-struct output_pixel_processor *dce100_opp_create(
+static struct output_pixel_processor *dce100_opp_create(
struct dc_context *ctx,
uint32_t inst)
{
@@ -665,7 +667,7 @@ struct output_pixel_processor *dce100_opp_create(
return &opp->base;
}
-struct dce_aux *dce100_aux_engine_create(
+static struct dce_aux *dce100_aux_engine_create(
struct dc_context *ctx,
uint32_t inst)
{
@@ -703,7 +705,7 @@ static const struct dce_i2c_mask i2c_masks = {
I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
};
-struct dce_i2c_hw *dce100_i2c_hw_create(
+static struct dce_i2c_hw *dce100_i2c_hw_create(
struct dc_context *ctx,
uint32_t inst)
{
@@ -718,7 +720,7 @@ struct dce_i2c_hw *dce100_i2c_hw_create(
return dce_i2c_hw;
}
-struct clock_source *dce100_clock_source_create(
+static struct clock_source *dce100_clock_source_create(
struct dc_context *ctx,
struct dc_bios *bios,
enum clock_source_id id,
@@ -742,7 +744,7 @@ struct clock_source *dce100_clock_source_create(
return NULL;
}
-void dce100_clock_source_destroy(struct clock_source **clk_src)
+static void dce100_clock_source_destroy(struct clock_source **clk_src)
{
kfree(TO_DCE110_CLK_SRC(*clk_src));
*clk_src = NULL;
@@ -831,7 +833,7 @@ static enum dc_status build_mapped_resource(
return DC_OK;
}
-bool dce100_validate_bandwidth(
+static bool dce100_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
bool fast_validate)
@@ -876,7 +878,7 @@ static bool dce100_validate_surface_sets(
return true;
}
-enum dc_status dce100_validate_global(
+static enum dc_status dce100_validate_global(
struct dc *dc,
struct dc_state *context)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/Makefile b/drivers/gpu/drm/amd/display/dc/dce110/Makefile
index d564c0eb8b04..84ab48df0c26 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce110/Makefile
@@ -23,6 +23,8 @@
# Makefile for the 'controller' sub-component of DAL.
# It provides the control and status of HW CRTC block.
+CFLAGS_$(AMDDALPATH)/dc/dce110/dce110_resource.o = $(call cc-disable-warning, override-init)
+
DCE110 = dce110_timing_generator.o \
dce110_compressor.o dce110_hw_sequencer.o dce110_resource.o \
dce110_opp_regamma_v.o dce110_opp_csc_v.o dce110_timing_generator_v.o \
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
index 72b580a4eb85..44564a4742b5 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
@@ -412,36 +412,6 @@ void dce110_compressor_destroy(struct compressor **compressor)
*compressor = NULL;
}
-bool dce110_get_required_compressed_surfacesize(struct fbc_input_info fbc_input_info,
- struct fbc_requested_compressed_size size)
-{
- bool result = false;
-
- unsigned int max_x = FBC_MAX_X, max_y = FBC_MAX_Y;
-
- get_max_support_fbc_buffersize(&max_x, &max_y);
-
- if (fbc_input_info.dynamic_fbc_buffer_alloc == 0) {
- /*
- * For DCE11 here use Max HW supported size: HW Support up to 3840x2400 resolution
- * or 18000 chunks.
- */
- size.preferred_size = size.min_size = align_to_chunks_number_per_line(max_x) * max_y * 4; /* (For FBC when LPT not supported). */
- size.preferred_size_alignment = size.min_size_alignment = 0x100; /* For FBC when LPT not supported */
- size.bits.preferred_must_be_framebuffer_pool = 1;
- size.bits.min_must_be_framebuffer_pool = 1;
-
- result = true;
- }
- /*
- * Maybe to add registry key support with optional size here to override above
- * for debugging purposes
- */
-
- return result;
-}
-
-
void get_max_support_fbc_buffersize(unsigned int *max_x, unsigned int *max_y)
{
*max_x = FBC_MAX_X;
@@ -455,31 +425,6 @@ void get_max_support_fbc_buffersize(unsigned int *max_x, unsigned int *max_y)
*/
}
-
-unsigned int controller_id_to_index(enum controller_id controller_id)
-{
- unsigned int index = 0;
-
- switch (controller_id) {
- case CONTROLLER_ID_D0:
- index = 0;
- break;
- case CONTROLLER_ID_D1:
- index = 1;
- break;
- case CONTROLLER_ID_D2:
- index = 2;
- break;
- case CONTROLLER_ID_D3:
- index = 3;
- break;
- default:
- break;
- }
- return index;
-}
-
-
static const struct compressor_funcs dce110_compressor_funcs = {
.power_up_fbc = dce110_compressor_power_up_fbc,
.enable_fbc = dce110_compressor_enable_fbc,
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 4c230f1de9a3..caee1c9f54bd 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -921,6 +921,37 @@ void dce110_edp_power_control(
}
}
+void dce110_edp_wait_for_T12(
+ struct dc_link *link)
+{
+ struct dc_context *ctx = link->ctx;
+
+ if (dal_graphics_object_id_get_connector_id(link->link_enc->connector)
+ != CONNECTOR_ID_EDP) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ if (!link->panel_cntl)
+ return;
+
+ if (!link->panel_cntl->funcs->is_panel_powered_on(link->panel_cntl) &&
+ link->link_trace.time_stamp.edp_poweroff != 0) {
+ unsigned int t12_duration = 500; // Default T12 as per spec
+ unsigned long long current_ts = dm_get_timestamp(ctx);
+ unsigned long long time_since_edp_poweroff_ms =
+ div64_u64(dm_get_elapse_time_in_ns(
+ ctx,
+ current_ts,
+ link->link_trace.time_stamp.edp_poweroff), 1000000);
+
+ t12_duration += link->local_sink->edid_caps.panel_patch.extra_t12_ms; // Add extra T12
+
+ if (time_since_edp_poweroff_ms < t12_duration)
+ msleep(t12_duration - time_since_edp_poweroff_ms);
+ }
+}
+
/*todo: cloned in stream enc, fix*/
/*
* @brief
@@ -1628,7 +1659,7 @@ static struct dc_link *get_edp_link_with_sink(
return link;
}
-/**
+/*
* When ASIC goes from VBIOS/VGA mode to driver/accelerated mode we need:
* 1. Power down all DC HW blocks
* 2. Disable VGA engine on all controllers
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
index d54172d88f5f..8bbb499067f7 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
@@ -34,6 +34,7 @@
#include "inc/dce_calcs.h"
#include "dce/dce_mem_input.h"
+#include "dce110_mem_input_v.h"
static void set_flip_control(
struct dce_mem_input *mem_input110,
@@ -468,7 +469,7 @@ static void program_pixel_format(
}
}
-bool dce_mem_input_v_is_surface_pending(struct mem_input *mem_input)
+static bool dce_mem_input_v_is_surface_pending(struct mem_input *mem_input)
{
struct dce_mem_input *mem_input110 = TO_DCE_MEM_INPUT(mem_input);
uint32_t value;
@@ -483,7 +484,7 @@ bool dce_mem_input_v_is_surface_pending(struct mem_input *mem_input)
return false;
}
-bool dce_mem_input_v_program_surface_flip_and_addr(
+static bool dce_mem_input_v_program_surface_flip_and_addr(
struct mem_input *mem_input,
const struct dc_plane_address *address,
bool flip_immediate)
@@ -560,7 +561,7 @@ static const unsigned int *get_dvmm_hw_setting(
}
}
-void dce_mem_input_v_program_pte_vm(
+static void dce_mem_input_v_program_pte_vm(
struct mem_input *mem_input,
enum surface_pixel_format format,
union dc_tiling_info *tiling_info,
@@ -633,7 +634,7 @@ void dce_mem_input_v_program_pte_vm(
dm_write_reg(mem_input110->base.ctx, mmUNP_DVMM_PTE_ARB_CONTROL_C, value);
}
-void dce_mem_input_v_program_surface_config(
+static void dce_mem_input_v_program_surface_config(
struct mem_input *mem_input,
enum surface_pixel_format format,
union dc_tiling_info *tiling_info,
@@ -919,7 +920,7 @@ static void program_nbp_watermark_c(
marks);
}
-void dce_mem_input_v_program_display_marks(
+static void dce_mem_input_v_program_display_marks(
struct mem_input *mem_input,
struct dce_watermarks nbp,
struct dce_watermarks stutter,
@@ -942,7 +943,7 @@ void dce_mem_input_v_program_display_marks(
}
-void dce_mem_input_program_chroma_display_marks(
+static void dce_mem_input_program_chroma_display_marks(
struct mem_input *mem_input,
struct dce_watermarks nbp,
struct dce_watermarks stutter,
@@ -963,7 +964,7 @@ void dce_mem_input_program_chroma_display_marks(
stutter);
}
-void dce110_allocate_mem_input_v(
+static void dce110_allocate_mem_input_v(
struct mem_input *mi,
uint32_t h_total,/* for current stream */
uint32_t v_total,/* for current stream */
@@ -1005,7 +1006,7 @@ void dce110_allocate_mem_input_v(
}
-void dce110_free_mem_input_v(
+static void dce110_free_mem_input_v(
struct mem_input *mi,
uint32_t total_stream_num)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
index 3f63822b8e28..d7fcc5cccdce 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -410,7 +410,7 @@ static const struct dc_plane_cap plane_cap = {
.pixel_format_support = {
.argb8888 = true,
.nv12 = false,
- .fp16 = false
+ .fp16 = true
},
.max_upscale_factor = {
@@ -715,7 +715,7 @@ static struct output_pixel_processor *dce110_opp_create(
return &opp->base;
}
-struct dce_aux *dce110_aux_engine_create(
+static struct dce_aux *dce110_aux_engine_create(
struct dc_context *ctx,
uint32_t inst)
{
@@ -753,7 +753,7 @@ static const struct dce_i2c_mask i2c_masks = {
I2C_COMMON_MASK_SH_LIST_DCE110(_MASK)
};
-struct dce_i2c_hw *dce110_i2c_hw_create(
+static struct dce_i2c_hw *dce110_i2c_hw_create(
struct dc_context *ctx,
uint32_t inst)
{
@@ -768,7 +768,7 @@ struct dce_i2c_hw *dce110_i2c_hw_create(
return dce_i2c_hw;
}
-struct clock_source *dce110_clock_source_create(
+static struct clock_source *dce110_clock_source_create(
struct dc_context *ctx,
struct dc_bios *bios,
enum clock_source_id id,
@@ -792,7 +792,7 @@ struct clock_source *dce110_clock_source_create(
return NULL;
}
-void dce110_clock_source_destroy(struct clock_source **clk_src)
+static void dce110_clock_source_destroy(struct clock_source **clk_src)
{
struct dce110_clk_src *dce110_clk_src;
@@ -1034,8 +1034,8 @@ static bool dce110_validate_bandwidth(
return result;
}
-enum dc_status dce110_validate_plane(const struct dc_plane_state *plane_state,
- struct dc_caps *caps)
+static enum dc_status dce110_validate_plane(const struct dc_plane_state *plane_state,
+ struct dc_caps *caps)
{
if (((plane_state->dst_rect.width * 2) < plane_state->src_rect.width) ||
((plane_state->dst_rect.height * 2) < plane_state->src_rect.height))
@@ -1089,7 +1089,7 @@ static bool dce110_validate_surface_sets(
return true;
}
-enum dc_status dce110_validate_global(
+static enum dc_status dce110_validate_global(
struct dc *dc,
struct dc_state *context)
{
@@ -1272,7 +1272,6 @@ static bool underlay_create(struct dc_context *ctx, struct resource_pool *pool)
/* update the public caps to indicate an underlay is available */
ctx->dc->caps.max_slave_planes = 1;
- ctx->dc->caps.max_slave_planes = 1;
return true;
}
@@ -1333,7 +1332,7 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
1000);
}
-const struct resource_caps *dce110_resource_cap(
+static const struct resource_caps *dce110_resource_cap(
struct hw_asic_id *asic_id)
{
if (ASIC_REV_IS_STONEY(asic_id->hw_internal_rev))
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
index 1ea7db8eeb98..d88a74559edd 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
@@ -75,7 +75,7 @@ static void dce110_timing_generator_apply_front_porch_workaround(
}
}
-/**
+/*
*****************************************************************************
* Function: is_in_vertical_blank
*
@@ -116,7 +116,7 @@ void dce110_timing_generator_set_early_control(
dm_write_reg(tg->ctx, address, regval);
}
-/**
+/*
* Enable CRTC
* Enable CRTC - call ASIC Control Object to enable Timing generator.
*/
@@ -175,7 +175,7 @@ void dce110_timing_generator_program_blank_color(
dm_write_reg(tg->ctx, addr, value);
}
-/**
+/*
*****************************************************************************
* Function: disable_stereo
*
@@ -226,7 +226,7 @@ static void disable_stereo(struct timing_generator *tg)
}
#endif
-/**
+/*
* disable_crtc - call ASIC Control Object to disable Timing generator.
*/
bool dce110_timing_generator_disable_crtc(struct timing_generator *tg)
@@ -247,11 +247,10 @@ bool dce110_timing_generator_disable_crtc(struct timing_generator *tg)
return result == BP_RESULT_OK;
}
-/**
-* program_horz_count_by_2
-* Programs DxCRTC_HORZ_COUNT_BY2_EN - 1 for DVI 30bpp mode, 0 otherwise
-*
-*/
+/*
+ * program_horz_count_by_2
+ * Programs DxCRTC_HORZ_COUNT_BY2_EN - 1 for DVI 30bpp mode, 0 otherwise
+ */
static void program_horz_count_by_2(
struct timing_generator *tg,
const struct dc_crtc_timing *timing)
@@ -273,7 +272,7 @@ static void program_horz_count_by_2(
CRTC_REG(mmCRTC_COUNT_CONTROL), regval);
}
-/**
+/*
* program_timing_generator
* Program CRTC Timing Registers - DxCRTC_H_*, DxCRTC_V_*, Pixel repetition.
* Call ASIC Control Object to program Timings.
@@ -352,7 +351,7 @@ bool dce110_timing_generator_program_timing_generator(
return result == BP_RESULT_OK;
}
-/**
+/*
*****************************************************************************
* Function: set_drr
*
@@ -521,7 +520,7 @@ uint32_t dce110_timing_generator_get_vblank_counter(struct timing_generator *tg)
return field;
}
-/**
+/*
*****************************************************************************
* Function: dce110_timing_generator_get_position
*
@@ -557,7 +556,7 @@ void dce110_timing_generator_get_position(struct timing_generator *tg,
CRTC_VERT_COUNT_NOM);
}
-/**
+/*
*****************************************************************************
* Function: get_crtc_scanoutpos
*
@@ -1106,11 +1105,11 @@ void dce110_timing_generator_set_test_pattern(
}
}
-/**
-* dce110_timing_generator_validate_timing
-* The timing generators support a maximum display size of is 8192 x 8192 pixels,
-* including both active display and blanking periods. Check H Total and V Total.
-*/
+/*
+ * dce110_timing_generator_validate_timing
+ * The timing generators support a maximum display size of is 8192 x 8192 pixels,
+ * including both active display and blanking periods. Check H Total and V Total.
+ */
bool dce110_timing_generator_validate_timing(
struct timing_generator *tg,
const struct dc_crtc_timing *timing,
@@ -1167,9 +1166,9 @@ bool dce110_timing_generator_validate_timing(
return true;
}
-/**
-* Wait till we are at the beginning of VBlank.
-*/
+/*
+ * Wait till we are at the beginning of VBlank.
+ */
void dce110_timing_generator_wait_for_vblank(struct timing_generator *tg)
{
/* We want to catch beginning of VBlank here, so if the first try are
@@ -1191,9 +1190,9 @@ void dce110_timing_generator_wait_for_vblank(struct timing_generator *tg)
}
}
-/**
-* Wait till we are in VActive (anywhere in VActive)
-*/
+/*
+ * Wait till we are in VActive (anywhere in VActive)
+ */
void dce110_timing_generator_wait_for_vactive(struct timing_generator *tg)
{
while (dce110_timing_generator_is_in_vertical_blank(tg)) {
@@ -1204,7 +1203,7 @@ void dce110_timing_generator_wait_for_vactive(struct timing_generator *tg)
}
}
-/**
+/*
*****************************************************************************
* Function: dce110_timing_generator_setup_global_swap_lock
*
@@ -1215,7 +1214,6 @@ void dce110_timing_generator_wait_for_vactive(struct timing_generator *tg)
* @param [in] gsl_params: setup data
*****************************************************************************
*/
-
void dce110_timing_generator_setup_global_swap_lock(
struct timing_generator *tg,
const struct dcp_gsl_params *gsl_params)
@@ -1351,10 +1349,7 @@ void dce110_timing_generator_tear_down_global_swap_lock(
/* Restore DCP_GSL_PURPOSE_SURFACE_FLIP */
{
- uint32_t value_crtc_vtotal;
-
- value_crtc_vtotal = dm_read_reg(tg->ctx,
- CRTC_REG(mmCRTC_V_TOTAL));
+ dm_read_reg(tg->ctx, CRTC_REG(mmCRTC_V_TOTAL));
set_reg_field_value(value,
0,
@@ -1385,7 +1380,7 @@ void dce110_timing_generator_tear_down_global_swap_lock(
dm_write_reg(tg->ctx, address, value);
}
-/**
+/*
*****************************************************************************
* Function: is_counter_moving
*
@@ -1767,7 +1762,7 @@ void dce110_timing_generator_disable_reset_trigger(
dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_TRIGB_CNTL), value);
}
-/**
+/*
*****************************************************************************
* @brief
* Checks whether CRTC triggered reset occurred
@@ -1794,7 +1789,7 @@ bool dce110_timing_generator_did_triggered_reset_occur(
return (force || vert_sync);
}
-/**
+/*
* dce110_timing_generator_disable_vga
* Turn OFF VGA Mode and Timing - DxVGA_CONTROL
* VGA Mode and VGA Timing is used by VBIOS on CRT Monitors;
@@ -1840,14 +1835,13 @@ void dce110_timing_generator_disable_vga(
dm_write_reg(tg->ctx, addr, value);
}
-/**
-* set_overscan_color_black
-*
-* @param :black_color is one of the color space
-* :this routine will set overscan black color according to the color space.
-* @return none
-*/
-
+/*
+ * set_overscan_color_black
+ *
+ * @param :black_color is one of the color space
+ * :this routine will set overscan black color according to the color space.
+ * @return none
+ */
void dce110_timing_generator_set_overscan_color_black(
struct timing_generator *tg,
const struct tg_color *color)
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c
index a13a2f58944e..c509384fff54 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c
@@ -46,17 +46,16 @@
*
**********************************************************************************/
-/**
-* Enable CRTCV
-*/
+/*
+ * Enable CRTCV
+ */
static bool dce110_timing_generator_v_enable_crtc(struct timing_generator *tg)
{
/*
-* Set MASTER_UPDATE_MODE to 0
-* This is needed for DRR, and also suggested to be default value by Syed.
-*/
-
+ * Set MASTER_UPDATE_MODE to 0
+ * This is needed for DRR, and also suggested to be default value by Syed.
+ */
uint32_t value;
value = 0;
@@ -209,9 +208,9 @@ static void dce110_timing_generator_v_wait_for_vblank(struct timing_generator *t
}
}
-/**
-* Wait till we are in VActive (anywhere in VActive)
-*/
+/*
+ * Wait till we are in VActive (anywhere in VActive)
+ */
static void dce110_timing_generator_v_wait_for_vactive(struct timing_generator *tg)
{
while (dce110_timing_generator_v_is_in_vertical_blank(tg)) {
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c
index b1aaab5590cc..29438c6050db 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c
@@ -217,16 +217,15 @@ static bool setup_scaling_configuration(
return is_scaling_needed;
}
-/**
-* Function:
-* void program_overscan
-*
-* Purpose: Programs overscan border
-* Input: overscan
-*
-* Output:
- void
-*/
+/*
+ * Function:
+ * void program_overscan
+ *
+ * Purpose: Programs overscan border
+ * Input: overscan
+ *
+ * Output: void
+ */
static void program_overscan(
struct dce_transform *xfm_dce,
const struct scaler_data *data)
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/Makefile b/drivers/gpu/drm/amd/display/dc/dce112/Makefile
index 8e090446d511..9de6501702d2 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce112/Makefile
@@ -23,6 +23,8 @@
# Makefile for the 'controller' sub-component of DAL.
# It provides the control and status of HW CRTC block.
+CFLAGS_$(AMDDALPATH)/dc/dce112/dce112_resource.o = $(call cc-disable-warning, override-init)
+
DCE112 = dce112_compressor.o dce112_hw_sequencer.o \
dce112_resource.o
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
index f99b1c084590..ee55cda854bf 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
@@ -59,7 +59,9 @@
#include "dce/dce_11_2_sh_mask.h"
#include "dce100/dce100_resource.h"
-#define DC_LOGGER \
+#include "dce112_resource.h"
+
+#define DC_LOGGER \
dc->ctx->logger
#ifndef mmDP_DPHY_INTERNAL_CTRL
@@ -617,7 +619,7 @@ static const struct encoder_feature_support link_enc_feature = {
.flags.bits.IS_TPS4_CAPABLE = true
};
-struct link_encoder *dce112_link_encoder_create(
+static struct link_encoder *dce112_link_encoder_create(
const struct encoder_init_data *enc_init_data)
{
struct dce110_link_encoder *enc110 =
@@ -671,7 +673,7 @@ static struct input_pixel_processor *dce112_ipp_create(
return &ipp->base;
}
-struct output_pixel_processor *dce112_opp_create(
+static struct output_pixel_processor *dce112_opp_create(
struct dc_context *ctx,
uint32_t inst)
{
@@ -686,7 +688,7 @@ struct output_pixel_processor *dce112_opp_create(
return &opp->base;
}
-struct dce_aux *dce112_aux_engine_create(
+static struct dce_aux *dce112_aux_engine_create(
struct dc_context *ctx,
uint32_t inst)
{
@@ -724,7 +726,7 @@ static const struct dce_i2c_mask i2c_masks = {
I2C_COMMON_MASK_SH_LIST_DCE110(_MASK)
};
-struct dce_i2c_hw *dce112_i2c_hw_create(
+static struct dce_i2c_hw *dce112_i2c_hw_create(
struct dc_context *ctx,
uint32_t inst)
{
@@ -739,7 +741,7 @@ struct dce_i2c_hw *dce112_i2c_hw_create(
return dce_i2c_hw;
}
-struct clock_source *dce112_clock_source_create(
+static struct clock_source *dce112_clock_source_create(
struct dc_context *ctx,
struct dc_bios *bios,
enum clock_source_id id,
@@ -763,7 +765,7 @@ struct clock_source *dce112_clock_source_create(
return NULL;
}
-void dce112_clock_source_destroy(struct clock_source **clk_src)
+static void dce112_clock_source_destroy(struct clock_source **clk_src)
{
kfree(TO_DCE110_CLK_SRC(*clk_src));
*clk_src = NULL;
@@ -1024,7 +1026,7 @@ enum dc_status dce112_add_stream_to_ctx(
return result;
}
-enum dc_status dce112_validate_global(
+static enum dc_status dce112_validate_global(
struct dc *dc,
struct dc_state *context)
{
@@ -1202,7 +1204,7 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
dm_pp_notify_wm_clock_changes(dc->ctx, &clk_ranges);
}
-const struct resource_caps *dce112_resource_cap(
+static const struct resource_caps *dce112_resource_cap(
struct hw_asic_id *asic_id)
{
if (ASIC_REV_IS_POLARIS11_M(asic_id->hw_internal_rev) ||
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/Makefile b/drivers/gpu/drm/amd/display/dc/dce120/Makefile
index 37db1f8d45ea..a9cc4b73270b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce120/Makefile
@@ -24,6 +24,8 @@
# It provides the control and status of HW CRTC block.
+CFLAGS_$(AMDDALPATH)/dc/dce120/dce120_resource.o = $(call cc-disable-warning, override-init)
+
DCE120 = dce120_resource.o dce120_timing_generator.o \
dce120_hw_sequencer.o
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
index 66a13aa39c95..d4afe6c824d2 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
@@ -50,6 +50,7 @@ struct dce120_hw_seq_reg_offsets {
uint32_t crtc;
};
+#if 0
static const struct dce120_hw_seq_reg_offsets reg_offsets[] = {
{
.crtc = (mmCRTC0_CRTC_GSL_CONTROL - mmCRTC0_CRTC_GSL_CONTROL),
@@ -79,7 +80,6 @@ static const struct dce120_hw_seq_reg_offsets reg_offsets[] = {
/*******************************************************************************
* Private definitions
******************************************************************************/
-#if 0
static void dce120_init_pte(struct dc_context *ctx, uint8_t controller_id)
{
uint32_t addr;
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
index f1e3d2888eac..c65e4d125c8e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
@@ -423,7 +423,7 @@ static const struct dce110_clk_src_mask cs_mask = {
CS_COMMON_MASK_SH_LIST_DCE_112(_MASK)
};
-struct output_pixel_processor *dce120_opp_create(
+static struct output_pixel_processor *dce120_opp_create(
struct dc_context *ctx,
uint32_t inst)
{
@@ -437,7 +437,7 @@ struct output_pixel_processor *dce120_opp_create(
ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask);
return &opp->base;
}
-struct dce_aux *dce120_aux_engine_create(
+static struct dce_aux *dce120_aux_engine_create(
struct dc_context *ctx,
uint32_t inst)
{
@@ -475,7 +475,7 @@ static const struct dce_i2c_mask i2c_masks = {
I2C_COMMON_MASK_SH_LIST_DCE110(_MASK)
};
-struct dce_i2c_hw *dce120_i2c_hw_create(
+static struct dce_i2c_hw *dce120_i2c_hw_create(
struct dc_context *ctx,
uint32_t inst)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
index 915fbb8e8168..b57c466124e7 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
@@ -69,7 +69,7 @@
#define CRTC_REG_SET_3(reg, field1, val1, field2, val2, field3, val3) \
CRTC_REG_SET_N(reg, 3, FD(reg##__##field1), val1, FD(reg##__##field2), val2, FD(reg##__##field3), val3)
-/**
+/*
*****************************************************************************
* Function: is_in_vertical_blank
*
@@ -98,7 +98,7 @@ static bool dce120_timing_generator_is_in_vertical_blank(
/* determine if given timing can be supported by TG */
-bool dce120_timing_generator_validate_timing(
+static bool dce120_timing_generator_validate_timing(
struct timing_generator *tg,
const struct dc_crtc_timing *timing,
enum signal_type signal)
@@ -125,7 +125,7 @@ bool dce120_timing_generator_validate_timing(
return true;
}
-bool dce120_tg_validate_timing(struct timing_generator *tg,
+static bool dce120_tg_validate_timing(struct timing_generator *tg,
const struct dc_crtc_timing *timing)
{
return dce120_timing_generator_validate_timing(tg, timing, SIGNAL_TYPE_NONE);
@@ -133,7 +133,7 @@ bool dce120_tg_validate_timing(struct timing_generator *tg,
/******** HW programming ************/
/* Disable/Enable Timing Generator */
-bool dce120_timing_generator_enable_crtc(struct timing_generator *tg)
+static bool dce120_timing_generator_enable_crtc(struct timing_generator *tg)
{
enum bp_result result;
struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
@@ -153,7 +153,7 @@ bool dce120_timing_generator_enable_crtc(struct timing_generator *tg)
return result == BP_RESULT_OK;
}
-void dce120_timing_generator_set_early_control(
+static void dce120_timing_generator_set_early_control(
struct timing_generator *tg,
uint32_t early_cntl)
{
@@ -166,7 +166,7 @@ void dce120_timing_generator_set_early_control(
/**************** TG current status ******************/
/* return the current frame counter. Used by Linux kernel DRM */
-uint32_t dce120_timing_generator_get_vblank_counter(
+static uint32_t dce120_timing_generator_get_vblank_counter(
struct timing_generator *tg)
{
struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
@@ -181,7 +181,7 @@ uint32_t dce120_timing_generator_get_vblank_counter(
}
/* Get current H and V position */
-void dce120_timing_generator_get_crtc_position(
+static void dce120_timing_generator_get_crtc_position(
struct timing_generator *tg,
struct crtc_position *position)
{
@@ -207,7 +207,7 @@ void dce120_timing_generator_get_crtc_position(
}
/* wait until TG is in beginning of vertical blank region */
-void dce120_timing_generator_wait_for_vblank(struct timing_generator *tg)
+static void dce120_timing_generator_wait_for_vblank(struct timing_generator *tg)
{
/* We want to catch beginning of VBlank here, so if the first try are
* in VBlank, we might be very close to Active, in this case wait for
@@ -229,7 +229,7 @@ void dce120_timing_generator_wait_for_vblank(struct timing_generator *tg)
}
/* wait until TG is in beginning of active region */
-void dce120_timing_generator_wait_for_vactive(struct timing_generator *tg)
+static void dce120_timing_generator_wait_for_vactive(struct timing_generator *tg)
{
while (dce120_timing_generator_is_in_vertical_blank(tg)) {
if (!tg->funcs->is_counter_moving(tg)) {
@@ -242,7 +242,7 @@ void dce120_timing_generator_wait_for_vactive(struct timing_generator *tg)
/*********** Timing Generator Synchronization routines ****/
/* Setups Global Swap Lock group, TimingServer or TimingClient*/
-void dce120_timing_generator_setup_global_swap_lock(
+static void dce120_timing_generator_setup_global_swap_lock(
struct timing_generator *tg,
const struct dcp_gsl_params *gsl_params)
{
@@ -279,7 +279,7 @@ void dce120_timing_generator_setup_global_swap_lock(
}
/* Clear all the register writes done by setup_global_swap_lock */
-void dce120_timing_generator_tear_down_global_swap_lock(
+static void dce120_timing_generator_tear_down_global_swap_lock(
struct timing_generator *tg)
{
struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
@@ -300,7 +300,7 @@ void dce120_timing_generator_tear_down_global_swap_lock(
}
/* Reset slave controllers on master VSync */
-void dce120_timing_generator_enable_reset_trigger(
+static void dce120_timing_generator_enable_reset_trigger(
struct timing_generator *tg,
int source)
{
@@ -347,7 +347,7 @@ void dce120_timing_generator_enable_reset_trigger(
}
/* disabling trigger-reset */
-void dce120_timing_generator_disable_reset_trigger(
+static void dce120_timing_generator_disable_reset_trigger(
struct timing_generator *tg)
{
struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
@@ -367,7 +367,7 @@ void dce120_timing_generator_disable_reset_trigger(
}
/* Checks whether CRTC triggered reset occurred */
-bool dce120_timing_generator_did_triggered_reset_occur(
+static bool dce120_timing_generator_did_triggered_reset_occur(
struct timing_generator *tg)
{
struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
@@ -384,7 +384,7 @@ bool dce120_timing_generator_did_triggered_reset_occur(
/******** Stuff to move to other virtual HW objects *****************/
/* Move to enable accelerated mode */
-void dce120_timing_generator_disable_vga(struct timing_generator *tg)
+static void dce120_timing_generator_disable_vga(struct timing_generator *tg)
{
uint32_t offset = 0;
uint32_t value = 0;
@@ -425,7 +425,7 @@ void dce120_timing_generator_disable_vga(struct timing_generator *tg)
}
/* TODO: Should we move it to transform */
/* Fully program CRTC timing in timing generator */
-void dce120_timing_generator_program_blanking(
+static void dce120_timing_generator_program_blanking(
struct timing_generator *tg,
const struct dc_crtc_timing *timing)
{
@@ -485,7 +485,7 @@ void dce120_timing_generator_program_blanking(
/* TODO: Should we move it to opp? */
/* Combine with below and move YUV/RGB color conversion to SW layer */
-void dce120_timing_generator_program_blank_color(
+static void dce120_timing_generator_program_blank_color(
struct timing_generator *tg,
const struct tg_color *black_color)
{
@@ -498,7 +498,7 @@ void dce120_timing_generator_program_blank_color(
CRTC_BLACK_COLOR_R_CR, black_color->color_r_cr);
}
/* Combine with above and move YUV/RGB color conversion to SW layer */
-void dce120_timing_generator_set_overscan_color_black(
+static void dce120_timing_generator_set_overscan_color_black(
struct timing_generator *tg,
const struct tg_color *color)
{
@@ -540,7 +540,7 @@ void dce120_timing_generator_set_overscan_color_black(
*/
}
-void dce120_timing_generator_set_drr(
+static void dce120_timing_generator_set_drr(
struct timing_generator *tg,
const struct drr_params *params)
{
@@ -589,50 +589,7 @@ void dce120_timing_generator_set_drr(
}
}
-/**
- *****************************************************************************
- * Function: dce120_timing_generator_get_position
- *
- * @brief
- * Returns CRTC vertical/horizontal counters
- *
- * @param [out] position
- *****************************************************************************
- */
-void dce120_timing_generator_get_position(struct timing_generator *tg,
- struct crtc_position *position)
-{
- uint32_t value;
- struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
-
- value = dm_read_reg_soc15(
- tg->ctx,
- mmCRTC0_CRTC_STATUS_POSITION,
- tg110->offsets.crtc);
-
- position->horizontal_count = get_reg_field_value(
- value,
- CRTC0_CRTC_STATUS_POSITION,
- CRTC_HORZ_COUNT);
-
- position->vertical_count = get_reg_field_value(
- value,
- CRTC0_CRTC_STATUS_POSITION,
- CRTC_VERT_COUNT);
-
- value = dm_read_reg_soc15(
- tg->ctx,
- mmCRTC0_CRTC_NOM_VERT_POSITION,
- tg110->offsets.crtc);
-
- position->nominal_vcount = get_reg_field_value(
- value,
- CRTC0_CRTC_NOM_VERT_POSITION,
- CRTC_VERT_COUNT_NOM);
-}
-
-
-void dce120_timing_generator_get_crtc_scanoutpos(
+static void dce120_timing_generator_get_crtc_scanoutpos(
struct timing_generator *tg,
uint32_t *v_blank_start,
uint32_t *v_blank_end,
@@ -661,7 +618,7 @@ void dce120_timing_generator_get_crtc_scanoutpos(
*v_position = position.vertical_count;
}
-void dce120_timing_generator_enable_advanced_request(
+static void dce120_timing_generator_enable_advanced_request(
struct timing_generator *tg,
bool enable,
const struct dc_crtc_timing *timing)
@@ -699,7 +656,7 @@ void dce120_timing_generator_enable_advanced_request(
value);
}
-void dce120_tg_program_blank_color(struct timing_generator *tg,
+static void dce120_tg_program_blank_color(struct timing_generator *tg,
const struct tg_color *black_color)
{
struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
@@ -722,7 +679,7 @@ void dce120_tg_program_blank_color(struct timing_generator *tg,
value);
}
-void dce120_tg_set_overscan_color(struct timing_generator *tg,
+static void dce120_tg_set_overscan_color(struct timing_generator *tg,
const struct tg_color *overscan_color)
{
struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
@@ -749,7 +706,7 @@ static void dce120_tg_program_timing(struct timing_generator *tg,
dce120_timing_generator_program_blanking(tg, timing);
}
-bool dce120_tg_is_blanked(struct timing_generator *tg)
+static bool dce120_tg_is_blanked(struct timing_generator *tg)
{
struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
uint32_t value = dm_read_reg_soc15(
@@ -770,7 +727,7 @@ bool dce120_tg_is_blanked(struct timing_generator *tg)
return false;
}
-void dce120_tg_set_blank(struct timing_generator *tg,
+static void dce120_tg_set_blank(struct timing_generator *tg,
bool enable_blanking)
{
struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
@@ -789,7 +746,7 @@ void dce120_tg_set_blank(struct timing_generator *tg,
bool dce120_tg_validate_timing(struct timing_generator *tg,
const struct dc_crtc_timing *timing);
-void dce120_tg_wait_for_state(struct timing_generator *tg,
+static void dce120_tg_wait_for_state(struct timing_generator *tg,
enum crtc_state state)
{
switch (state) {
@@ -806,7 +763,7 @@ void dce120_tg_wait_for_state(struct timing_generator *tg,
}
}
-void dce120_tg_set_colors(struct timing_generator *tg,
+static void dce120_tg_set_colors(struct timing_generator *tg,
const struct tg_color *blank_color,
const struct tg_color *overscan_color)
{
@@ -833,7 +790,7 @@ static void dce120_timing_generator_set_static_screen_control(
CRTC_STATIC_SCREEN_FRAME_COUNT, num_frames);
}
-void dce120_timing_generator_set_test_pattern(
+static void dce120_timing_generator_set_test_pattern(
struct timing_generator *tg,
/* TODO: replace 'controller_dp_test_pattern' by 'test_pattern_mode'
* because this is not DP-specific (which is probably somewhere in DP
diff --git a/drivers/gpu/drm/amd/display/dc/dce60/Makefile b/drivers/gpu/drm/amd/display/dc/dce60/Makefile
index 7036c3bd0f87..dda596fa1cd7 100644
--- a/drivers/gpu/drm/amd/display/dc/dce60/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce60/Makefile
@@ -23,6 +23,8 @@
# Makefile for the 'controller' sub-component of DAL.
# It provides the control and status of HW CRTC block.
+CFLAGS_AMDDALPATH)/dc/dce60/dce60_resource.o = $(call cc-disable-warning, override-init)
+
DCE60 = dce60_timing_generator.o dce60_hw_sequencer.o \
dce60_resource.o
diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c b/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c
index e9dd78c484d6..dcfa0a3efa00 100644
--- a/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c
@@ -60,6 +60,8 @@
#include "dce/dce_i2c.h"
/* TODO remove this include */
+#include "dce60_resource.h"
+
#ifndef mmMC_HUB_RDREQ_DMIF_LIMIT
#include "gmc/gmc_6_0_d.h"
#include "gmc/gmc_6_0_sh_mask.h"
@@ -519,7 +521,7 @@ static struct output_pixel_processor *dce60_opp_create(
return &opp->base;
}
-struct dce_aux *dce60_aux_engine_create(
+static struct dce_aux *dce60_aux_engine_create(
struct dc_context *ctx,
uint32_t inst)
{
@@ -557,7 +559,7 @@ static const struct dce_i2c_mask i2c_masks = {
I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
};
-struct dce_i2c_hw *dce60_i2c_hw_create(
+static struct dce_i2c_hw *dce60_i2c_hw_create(
struct dc_context *ctx,
uint32_t inst)
{
@@ -573,7 +575,7 @@ struct dce_i2c_hw *dce60_i2c_hw_create(
return dce_i2c_hw;
}
-struct dce_i2c_sw *dce60_i2c_sw_create(
+static struct dce_i2c_sw *dce60_i2c_sw_create(
struct dc_context *ctx)
{
struct dce_i2c_sw *dce_i2c_sw =
@@ -707,7 +709,7 @@ static const struct encoder_feature_support link_enc_feature = {
.flags.bits.IS_TPS3_CAPABLE = true
};
-struct link_encoder *dce60_link_encoder_create(
+static struct link_encoder *dce60_link_encoder_create(
const struct encoder_init_data *enc_init_data)
{
struct dce110_link_encoder *enc110 =
@@ -746,7 +748,7 @@ static struct panel_cntl *dce60_panel_cntl_create(const struct panel_cntl_init_d
return &panel_cntl->base;
}
-struct clock_source *dce60_clock_source_create(
+static struct clock_source *dce60_clock_source_create(
struct dc_context *ctx,
struct dc_bios *bios,
enum clock_source_id id,
@@ -770,7 +772,7 @@ struct clock_source *dce60_clock_source_create(
return NULL;
}
-void dce60_clock_source_destroy(struct clock_source **clk_src)
+static void dce60_clock_source_destroy(struct clock_source **clk_src)
{
kfree(TO_DCE110_CLK_SRC(*clk_src));
*clk_src = NULL;
@@ -860,7 +862,7 @@ static void dce60_resource_destruct(struct dce110_resource_pool *pool)
}
}
-bool dce60_validate_bandwidth(
+static bool dce60_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
bool fast_validate)
@@ -905,7 +907,7 @@ static bool dce60_validate_surface_sets(
return true;
}
-enum dc_status dce60_validate_global(
+static enum dc_status dce60_validate_global(
struct dc *dc,
struct dc_state *context)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c
index fc1af0ff0ca4..c1a85ee374d9 100644
--- a/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c
@@ -189,8 +189,8 @@ static bool dce60_is_tg_enabled(struct timing_generator *tg)
return field == 1;
}
-bool dce60_configure_crc(struct timing_generator *tg,
- const struct crc_params *params)
+static bool dce60_configure_crc(struct timing_generator *tg,
+ const struct crc_params *params)
{
/* Cannot configure crc on a CRTC that is disabled */
if (!dce60_is_tg_enabled(tg))
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/Makefile b/drivers/gpu/drm/amd/display/dc/dce80/Makefile
index 666fcb2bdbba..0a9d1a350d8b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce80/Makefile
@@ -23,6 +23,8 @@
# Makefile for the 'controller' sub-component of DAL.
# It provides the control and status of HW CRTC block.
+CFLAGS_$(AMDDALPATH)/dc/dce80/dce80_resource.o = $(call cc-disable-warning, override-init)
+
DCE80 = dce80_timing_generator.o dce80_hw_sequencer.o \
dce80_resource.o
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
index 390a0fa37239..612450f99278 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
@@ -60,6 +60,8 @@
#include "dce/dce_i2c.h"
/* TODO remove this include */
+#include "dce80_resource.h"
+
#ifndef mmMC_HUB_RDREQ_DMIF_LIMIT
#include "gmc/gmc_7_1_d.h"
#include "gmc/gmc_7_1_sh_mask.h"
@@ -402,7 +404,7 @@ static const struct dc_plane_cap plane_cap = {
.pixel_format_support = {
.argb8888 = true,
.nv12 = false,
- .fp16 = false
+ .fp16 = true
},
.max_upscale_factor = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
index 4d3f7d5e1473..904c2d278998 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
@@ -577,7 +577,7 @@ void dpp1_power_on_degamma_lut(
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
REG_SET(CM_MEM_PWR_CTRL, 0,
- SHARED_MEM_PWR_DIS, power_on == true ? 0:1);
+ SHARED_MEM_PWR_DIS, power_on ? 0:1);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 017b67b830e6..89912bb5014f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -1224,6 +1224,7 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
// signals when OTG blanked. This is to prevent pipe from
// requesting data while in PSR.
tg->funcs->tg_init(tg);
+ hubp->power_gated = true;
continue;
}
@@ -2634,7 +2635,7 @@ static void dcn10_update_dchubp_dpp(
hws->funcs.update_plane_addr(dc, pipe_ctx);
if (is_pipe_tree_visible(pipe_ctx))
- hubp->funcs->set_blank(hubp, false);
+ dc->hwss.set_hubp_blank(dc, pipe_ctx, false);
}
void dcn10_blank_pixel_data(
@@ -3134,7 +3135,7 @@ void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc)
pipe_ctx->stream_res.opp->funcs->opp_program_stereo(
pipe_ctx->stream_res.opp,
- flags.PROGRAM_STEREO == 1 ? true:false,
+ flags.PROGRAM_STEREO == 1,
&stream->timing);
pipe_ctx->stream_res.tg->funcs->program_stereo(
@@ -3145,13 +3146,16 @@ void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc)
return;
}
-static struct hubp *get_hubp_by_inst(struct resource_pool *res_pool, int mpcc_inst)
+static struct pipe_ctx *get_pipe_ctx_by_hubp_inst(struct dc_state *context, int mpcc_inst)
{
int i;
- for (i = 0; i < res_pool->pipe_count; i++) {
- if (res_pool->hubps[i]->inst == mpcc_inst)
- return res_pool->hubps[i];
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (context->res_ctx.pipe_ctx[i].plane_res.hubp
+ && context->res_ctx.pipe_ctx[i].plane_res.hubp->inst == mpcc_inst) {
+ return &context->res_ctx.pipe_ctx[i];
+ }
+
}
ASSERT(false);
return NULL;
@@ -3174,11 +3178,23 @@ void dcn10_wait_for_mpcc_disconnect(
for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) {
if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
- struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
+ struct pipe_ctx *restore_bottom_pipe;
+ struct pipe_ctx *restore_top_pipe;
+ struct pipe_ctx *inst_pipe_ctx = get_pipe_ctx_by_hubp_inst(dc->current_state, mpcc_inst);
+ ASSERT(inst_pipe_ctx);
res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
- hubp->funcs->set_blank(hubp, true);
+ /*
+ * Set top and bottom pipes NULL, as we don't want
+ * to blank those pipes when disconnecting from MPCC
+ */
+ restore_bottom_pipe = inst_pipe_ctx->bottom_pipe;
+ restore_top_pipe = inst_pipe_ctx->top_pipe;
+ inst_pipe_ctx->top_pipe = inst_pipe_ctx->bottom_pipe = NULL;
+ dc->hwss.set_hubp_blank(dc, inst_pipe_ctx, true);
+ inst_pipe_ctx->top_pipe = restore_top_pipe;
+ inst_pipe_ctx->bottom_pipe = restore_bottom_pipe;
}
}
@@ -3731,3 +3747,10 @@ void dcn10_get_clock(struct dc *dc,
dc->clk_mgr->funcs->get_clock(dc->clk_mgr, context, clock_type, clock_cfg);
}
+
+void dcn10_set_hubp_blank(const struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool blank_enable)
+{
+ pipe_ctx->plane_res.hubp->funcs->set_blank(pipe_ctx->plane_res.hubp, blank_enable);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
index e5691e499023..89e6dfb63da0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
@@ -163,6 +163,8 @@ void dcn10_wait_for_mpcc_disconnect(
void dce110_edp_backlight_control(
struct dc_link *link,
bool enable);
+void dce110_edp_wait_for_T12(
+ struct dc_link *link);
void dce110_edp_power_control(
struct dc_link *link,
bool power_up);
@@ -202,5 +204,8 @@ void dcn10_wait_for_pending_cleared(struct dc *dc,
struct dc_state *context);
void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx);
void dcn10_verify_allow_pstate_change_high(struct dc *dc);
+void dcn10_set_hubp_blank(const struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool blank_enable);
#endif /* __DC_HWSS_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
index 7f4766e45dff..e8b6065fffad 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
@@ -47,7 +47,7 @@
unsigned int snprintf_count(char *pBuf, unsigned int bufSize, char *fmt, ...)
{
- unsigned int ret_vsnprintf;
+ int ret_vsnprintf;
unsigned int chars_printed;
va_list args;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
index 254300b06b43..2f1b802e66a1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
@@ -79,6 +79,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
.set_backlight_level = dce110_set_backlight_level,
.set_abm_immediate_disable = dce110_set_abm_immediate_disable,
.set_pipe = dce110_set_pipe,
+ .set_hubp_blank = dcn10_set_hubp_blank,
};
static const struct hwseq_private_funcs dcn10_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
index 81db0179f7ea..e4701825b5a0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
@@ -480,7 +480,6 @@ unsigned int dcn10_get_dig_frontend(struct link_encoder *enc)
break;
default:
// invalid source select DIG
- ASSERT(false);
result = ENGINE_ID_UNKNOWN;
}
@@ -956,6 +955,21 @@ void dcn10_link_encoder_enable_tmds_output(
}
}
+void dcn10_link_encoder_enable_tmds_output_with_clk_pattern_wa(
+ struct link_encoder *enc,
+ enum clock_source_id clock_source,
+ enum dc_color_depth color_depth,
+ enum signal_type signal,
+ uint32_t pixel_clock)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+
+ dcn10_link_encoder_enable_tmds_output(
+ enc, clock_source, color_depth, signal, pixel_clock);
+
+ REG_UPDATE(DIG_CLOCK_PATTERN, DIG_CLOCK_PATTERN, 0x1F);
+}
+
/* enables DP PHY output */
void dcn10_link_encoder_enable_dp_output(
struct link_encoder *enc,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
index d4caad670855..3e1a582e4b88 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
@@ -42,6 +42,7 @@
#define LE_DCN_COMMON_REG_LIST(id) \
SRI(DIG_BE_CNTL, DIG, id), \
SRI(DIG_BE_EN_CNTL, DIG, id), \
+ SRI(DIG_CLOCK_PATTERN, DIG, id), \
SRI(TMDS_CTL_BITS, DIG, id), \
SRI(DP_CONFIG, DP, id), \
SRI(DP_DPHY_CNTL, DP, id), \
@@ -83,6 +84,7 @@ struct dcn10_link_enc_hpd_registers {
struct dcn10_link_enc_registers {
uint32_t DIG_BE_CNTL;
uint32_t DIG_BE_EN_CNTL;
+ uint32_t DIG_CLOCK_PATTERN;
uint32_t DP_CONFIG;
uint32_t DP_DPHY_CNTL;
uint32_t DP_DPHY_INTERNAL_CTRL;
@@ -168,6 +170,7 @@ struct dcn10_link_enc_registers {
LE_SF(DIG0_DIG_BE_CNTL, DIG_HPD_SELECT, mask_sh),\
LE_SF(DIG0_DIG_BE_CNTL, DIG_MODE, mask_sh),\
LE_SF(DIG0_DIG_BE_CNTL, DIG_FE_SOURCE_SELECT, mask_sh),\
+ LE_SF(DIG0_DIG_CLOCK_PATTERN, DIG_CLOCK_PATTERN, mask_sh),\
LE_SF(DIG0_TMDS_CTL_BITS, TMDS_CTL0, mask_sh), \
LE_SF(DP0_DP_DPHY_CNTL, DPHY_BYPASS, mask_sh),\
LE_SF(DP0_DP_DPHY_CNTL, DPHY_ATEST_SEL_LANE0, mask_sh),\
@@ -218,6 +221,7 @@ struct dcn10_link_enc_registers {
type DIG_HPD_SELECT;\
type DIG_MODE;\
type DIG_FE_SOURCE_SELECT;\
+ type DIG_CLOCK_PATTERN;\
type DPHY_BYPASS;\
type DPHY_ATEST_SEL_LANE0;\
type DPHY_ATEST_SEL_LANE1;\
@@ -536,6 +540,13 @@ void dcn10_link_encoder_enable_tmds_output(
enum signal_type signal,
uint32_t pixel_clock);
+void dcn10_link_encoder_enable_tmds_output_with_clk_pattern_wa(
+ struct link_encoder *enc,
+ enum clock_source_id clock_source,
+ enum dc_color_depth color_depth,
+ enum signal_type signal,
+ uint32_t pixel_clock);
+
/* enables DP PHY output */
void dcn10_link_encoder_enable_dp_output(
struct link_encoder *enc,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index f033397a84e9..6138f4887de7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -659,6 +659,16 @@ void optc1_unlock(struct timing_generator *optc)
OTG_MASTER_UPDATE_LOCK, 0);
}
+bool optc1_is_locked(struct timing_generator *optc)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+ uint32_t locked;
+
+ REG_GET(OTG_MASTER_UPDATE_LOCK, UPDATE_LOCK_STATUS, &locked);
+
+ return (locked == 1);
+}
+
void optc1_get_position(struct timing_generator *optc,
struct crtc_position *position)
{
@@ -1513,6 +1523,7 @@ static const struct timing_generator_funcs dcn10_tg_funcs = {
.enable_crtc_reset = optc1_enable_crtc_reset,
.disable_reset_trigger = optc1_disable_reset_trigger,
.lock = optc1_lock,
+ .is_locked = optc1_is_locked,
.unlock = optc1_unlock,
.enable_optc_clock = optc1_enable_optc_clock,
.set_drr = optc1_set_drr,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
index b12bd9aae52f..b222c67973d4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
@@ -638,6 +638,7 @@ void optc1_set_blank(struct timing_generator *optc,
bool enable_blanking);
bool optc1_is_blanked(struct timing_generator *optc);
+bool optc1_is_locked(struct timing_generator *optc);
void optc1_program_blank_color(
struct timing_generator *optc,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index 480d928cb1ca..0726fb435e2a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -1575,8 +1575,8 @@ static void dcn20_update_dchubp_dpp(
- if (pipe_ctx->update_flags.bits.enable)
- hubp->funcs->set_blank(hubp, false);
+ if (is_pipe_tree_visible(pipe_ctx))
+ dc->hwss.set_hubp_blank(dc, pipe_ctx, false);
}
@@ -1770,6 +1770,14 @@ void dcn20_post_unlock_program_front_end(
}
}
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->vtp_locked) {
+ dc->hwss.set_hubp_blank(dc, pipe, true);
+ pipe->vtp_locked = false;
+ }
+ }
/* WA to apply WM setting*/
if (hwseq->wa.DEGVIDCN21)
dc->res_pool->hubbub->funcs->apply_DEDCN21_147_wa(dc->res_pool->hubbub);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
index de9dcbeea150..51a4166e9750 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
@@ -94,6 +94,7 @@ static const struct hw_sequencer_funcs dcn20_funcs = {
.optimize_timing_for_fsft = dcn20_optimize_timing_for_fsft,
#endif
.set_disp_pattern_generator = dcn20_set_disp_pattern_generator,
+ .set_hubp_blank = dcn10_set_hubp_blank,
};
static const struct hwseq_private_funcs dcn20_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c
index 15c2ff264ff6..fa013496e26b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c
@@ -363,7 +363,7 @@ static const struct link_encoder_funcs dcn20_link_enc_funcs = {
dcn10_link_encoder_validate_output_with_stream,
.hw_init = enc2_hw_init,
.setup = dcn10_link_encoder_setup,
- .enable_tmds_output = dcn10_link_encoder_enable_tmds_output,
+ .enable_tmds_output = dcn10_link_encoder_enable_tmds_output_with_clk_pattern_wa,
.enable_dp_output = dcn20_link_encoder_enable_dp_output,
.enable_dp_mst_output = dcn10_link_encoder_enable_dp_mst_output,
.disable_output = dcn10_link_encoder_disable_output,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index 5ed18cac57e8..2c2dbfcd8957 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -297,8 +297,8 @@ static struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc = {
},
},
.num_states = 5,
- .sr_exit_time_us = 11.6,
- .sr_enter_plus_exit_time_us = 13.9,
+ .sr_exit_time_us = 8.6,
+ .sr_enter_plus_exit_time_us = 10.9,
.urgent_latency_us = 4.0,
.urgent_latency_pixel_data_only_us = 4.0,
.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
@@ -2097,6 +2097,7 @@ int dcn20_populate_dml_pipes_from_context(
pipes[pipe_cnt].pipe.dest.pixel_rate_mhz *= 2;
pipes[pipe_cnt].pipe.dest.otg_inst = res_ctx->pipe_ctx[i].stream_res.tg->inst;
pipes[pipe_cnt].dout.dp_lanes = 4;
+ pipes[pipe_cnt].dout.is_virtual = 0;
pipes[pipe_cnt].pipe.dest.vtotal_min = res_ctx->pipe_ctx[i].stream->adjust.v_total_min;
pipes[pipe_cnt].pipe.dest.vtotal_max = res_ctx->pipe_ctx[i].stream->adjust.v_total_max;
switch (get_num_odm_splits(&res_ctx->pipe_ctx[i])) {
@@ -2150,6 +2151,7 @@ int dcn20_populate_dml_pipes_from_context(
break;
default:
/* In case there is no signal, set dp with 4 lanes to allow max config */
+ pipes[pipe_cnt].dout.is_virtual = 1;
pipes[pipe_cnt].dout.output_type = dm_dp;
pipes[pipe_cnt].dout.dp_lanes = 4;
}
@@ -3245,7 +3247,7 @@ restore_dml_state:
bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
bool fast_validate)
{
- bool voltage_supported = false;
+ bool voltage_supported;
DC_FP_START();
voltage_supported = dcn20_validate_bandwidth_fp(dc, context, fast_validate);
DC_FP_END();
@@ -3506,7 +3508,8 @@ void dcn20_update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_s
calculated_states[i].dram_speed_mts = uclk_states[i] * 16 / 1000;
// FCLK:UCLK ratio is 1.08
- min_fclk_required_by_uclk = mul_u64_u32_shr(BIT_ULL(32) * 1080 / 1000000, uclk_states[i], 32);
+ min_fclk_required_by_uclk = div_u64(((unsigned long long)uclk_states[i]) * 1080,
+ 1000000);
calculated_states[i].fabricclk_mhz = (min_fclk_required_by_uclk < min_dcfclk) ?
min_dcfclk : min_fclk_required_by_uclk;
@@ -3606,7 +3609,6 @@ static enum dml_project get_dml_project_version(uint32_t hw_internal_rev)
static bool init_soc_bounding_box(struct dc *dc,
struct dcn20_resource_pool *pool)
{
- const struct gpu_info_soc_bounding_box_v1_0 *bb = dc->soc_bounding_box;
struct _vcs_dpi_soc_bounding_box_st *loaded_bb =
get_asic_rev_soc_bb(dc->ctx->asic_id.hw_internal_rev);
struct _vcs_dpi_ip_params_st *loaded_ip =
@@ -3614,116 +3616,6 @@ static bool init_soc_bounding_box(struct dc *dc,
DC_LOGGER_INIT(dc->ctx->logger);
- /* TODO: upstream NV12 bounding box when its launched */
- if (!bb && ASICREV_IS_NAVI12_P(dc->ctx->asic_id.hw_internal_rev)) {
- DC_LOG_ERROR("%s: not valid soc bounding box/n", __func__);
- return false;
- }
-
- if (bb && ASICREV_IS_NAVI12_P(dc->ctx->asic_id.hw_internal_rev)) {
- int i;
-
- dcn2_0_nv12_soc.sr_exit_time_us =
- fixed16_to_double_to_cpu(bb->sr_exit_time_us);
- dcn2_0_nv12_soc.sr_enter_plus_exit_time_us =
- fixed16_to_double_to_cpu(bb->sr_enter_plus_exit_time_us);
- dcn2_0_nv12_soc.urgent_latency_us =
- fixed16_to_double_to_cpu(bb->urgent_latency_us);
- dcn2_0_nv12_soc.urgent_latency_pixel_data_only_us =
- fixed16_to_double_to_cpu(bb->urgent_latency_pixel_data_only_us);
- dcn2_0_nv12_soc.urgent_latency_pixel_mixed_with_vm_data_us =
- fixed16_to_double_to_cpu(bb->urgent_latency_pixel_mixed_with_vm_data_us);
- dcn2_0_nv12_soc.urgent_latency_vm_data_only_us =
- fixed16_to_double_to_cpu(bb->urgent_latency_vm_data_only_us);
- dcn2_0_nv12_soc.urgent_out_of_order_return_per_channel_pixel_only_bytes =
- le32_to_cpu(bb->urgent_out_of_order_return_per_channel_pixel_only_bytes);
- dcn2_0_nv12_soc.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes =
- le32_to_cpu(bb->urgent_out_of_order_return_per_channel_pixel_and_vm_bytes);
- dcn2_0_nv12_soc.urgent_out_of_order_return_per_channel_vm_only_bytes =
- le32_to_cpu(bb->urgent_out_of_order_return_per_channel_vm_only_bytes);
- dcn2_0_nv12_soc.pct_ideal_dram_sdp_bw_after_urgent_pixel_only =
- fixed16_to_double_to_cpu(bb->pct_ideal_dram_sdp_bw_after_urgent_pixel_only);
- dcn2_0_nv12_soc.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm =
- fixed16_to_double_to_cpu(bb->pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm);
- dcn2_0_nv12_soc.pct_ideal_dram_sdp_bw_after_urgent_vm_only =
- fixed16_to_double_to_cpu(bb->pct_ideal_dram_sdp_bw_after_urgent_vm_only);
- dcn2_0_nv12_soc.max_avg_sdp_bw_use_normal_percent =
- fixed16_to_double_to_cpu(bb->max_avg_sdp_bw_use_normal_percent);
- dcn2_0_nv12_soc.max_avg_dram_bw_use_normal_percent =
- fixed16_to_double_to_cpu(bb->max_avg_dram_bw_use_normal_percent);
- dcn2_0_nv12_soc.writeback_latency_us =
- fixed16_to_double_to_cpu(bb->writeback_latency_us);
- dcn2_0_nv12_soc.ideal_dram_bw_after_urgent_percent =
- fixed16_to_double_to_cpu(bb->ideal_dram_bw_after_urgent_percent);
- dcn2_0_nv12_soc.max_request_size_bytes =
- le32_to_cpu(bb->max_request_size_bytes);
- dcn2_0_nv12_soc.dram_channel_width_bytes =
- le32_to_cpu(bb->dram_channel_width_bytes);
- dcn2_0_nv12_soc.fabric_datapath_to_dcn_data_return_bytes =
- le32_to_cpu(bb->fabric_datapath_to_dcn_data_return_bytes);
- dcn2_0_nv12_soc.dcn_downspread_percent =
- fixed16_to_double_to_cpu(bb->dcn_downspread_percent);
- dcn2_0_nv12_soc.downspread_percent =
- fixed16_to_double_to_cpu(bb->downspread_percent);
- dcn2_0_nv12_soc.dram_page_open_time_ns =
- fixed16_to_double_to_cpu(bb->dram_page_open_time_ns);
- dcn2_0_nv12_soc.dram_rw_turnaround_time_ns =
- fixed16_to_double_to_cpu(bb->dram_rw_turnaround_time_ns);
- dcn2_0_nv12_soc.dram_return_buffer_per_channel_bytes =
- le32_to_cpu(bb->dram_return_buffer_per_channel_bytes);
- dcn2_0_nv12_soc.round_trip_ping_latency_dcfclk_cycles =
- le32_to_cpu(bb->round_trip_ping_latency_dcfclk_cycles);
- dcn2_0_nv12_soc.urgent_out_of_order_return_per_channel_bytes =
- le32_to_cpu(bb->urgent_out_of_order_return_per_channel_bytes);
- dcn2_0_nv12_soc.channel_interleave_bytes =
- le32_to_cpu(bb->channel_interleave_bytes);
- dcn2_0_nv12_soc.num_banks =
- le32_to_cpu(bb->num_banks);
- dcn2_0_nv12_soc.num_chans =
- le32_to_cpu(bb->num_chans);
- dcn2_0_nv12_soc.vmm_page_size_bytes =
- le32_to_cpu(bb->vmm_page_size_bytes);
- dcn2_0_nv12_soc.dram_clock_change_latency_us =
- fixed16_to_double_to_cpu(bb->dram_clock_change_latency_us);
- // HACK!! Lower uclock latency switch time so we don't switch
- dcn2_0_nv12_soc.dram_clock_change_latency_us = 10;
- dcn2_0_nv12_soc.writeback_dram_clock_change_latency_us =
- fixed16_to_double_to_cpu(bb->writeback_dram_clock_change_latency_us);
- dcn2_0_nv12_soc.return_bus_width_bytes =
- le32_to_cpu(bb->return_bus_width_bytes);
- dcn2_0_nv12_soc.dispclk_dppclk_vco_speed_mhz =
- le32_to_cpu(bb->dispclk_dppclk_vco_speed_mhz);
- dcn2_0_nv12_soc.xfc_bus_transport_time_us =
- le32_to_cpu(bb->xfc_bus_transport_time_us);
- dcn2_0_nv12_soc.xfc_xbuf_latency_tolerance_us =
- le32_to_cpu(bb->xfc_xbuf_latency_tolerance_us);
- dcn2_0_nv12_soc.use_urgent_burst_bw =
- le32_to_cpu(bb->use_urgent_burst_bw);
- dcn2_0_nv12_soc.num_states =
- le32_to_cpu(bb->num_states);
-
- for (i = 0; i < dcn2_0_nv12_soc.num_states; i++) {
- dcn2_0_nv12_soc.clock_limits[i].state =
- le32_to_cpu(bb->clock_limits[i].state);
- dcn2_0_nv12_soc.clock_limits[i].dcfclk_mhz =
- fixed16_to_double_to_cpu(bb->clock_limits[i].dcfclk_mhz);
- dcn2_0_nv12_soc.clock_limits[i].fabricclk_mhz =
- fixed16_to_double_to_cpu(bb->clock_limits[i].fabricclk_mhz);
- dcn2_0_nv12_soc.clock_limits[i].dispclk_mhz =
- fixed16_to_double_to_cpu(bb->clock_limits[i].dispclk_mhz);
- dcn2_0_nv12_soc.clock_limits[i].dppclk_mhz =
- fixed16_to_double_to_cpu(bb->clock_limits[i].dppclk_mhz);
- dcn2_0_nv12_soc.clock_limits[i].phyclk_mhz =
- fixed16_to_double_to_cpu(bb->clock_limits[i].phyclk_mhz);
- dcn2_0_nv12_soc.clock_limits[i].socclk_mhz =
- fixed16_to_double_to_cpu(bb->clock_limits[i].socclk_mhz);
- dcn2_0_nv12_soc.clock_limits[i].dscclk_mhz =
- fixed16_to_double_to_cpu(bb->clock_limits[i].dscclk_mhz);
- dcn2_0_nv12_soc.clock_limits[i].dram_speed_mts =
- fixed16_to_double_to_cpu(bb->clock_limits[i].dram_speed_mts);
- }
- }
-
if (pool->base.pp_smu) {
struct pp_smu_nv_clock_table max_clocks = {0};
unsigned int uclk_states[8] = {0};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
index 96ee0b82f458..d3b643089603 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
@@ -123,7 +123,7 @@ void dcn21_optimize_pwr_state(
* PHY will hang on the next mode set attempt.
* if enable PLL follow by disable PLL (without executing lane enable/disable),
* RDPCS_PHY_DP_MPLLB_STATE remains 1,
- * which indicate that PLL disable attempt actually didn’t go through.
+ * which indicate that PLL disable attempt actually didn't go through.
* As a workaround, insert PHY lane enable/disable before PLL disable.
*/
void dcn21_PLAT_58856_wa(struct dc_state *context, struct pipe_ctx *pipe_ctx)
@@ -143,6 +143,7 @@ static bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t optio
struct dc_context *dc = abm->ctx;
uint32_t ramping_boundary = 0xFFFF;
+ memset(&cmd, 0, sizeof(cmd));
cmd.abm_set_pipe.header.type = DMUB_CMD__ABM;
cmd.abm_set_pipe.header.sub_type = DMUB_CMD__ABM_SET_PIPE;
cmd.abm_set_pipe.abm_set_pipe_data.otg_inst = otg_inst;
@@ -212,6 +213,7 @@ bool dcn21_set_backlight_level(struct pipe_ctx *pipe_ctx,
if (abm && panel_cntl)
dmub_abm_set_pipe(abm, otg_inst, SET_ABM_PIPE_NORMAL, panel_cntl->inst);
+ memset(&cmd, 0, sizeof(cmd));
cmd.abm_set_backlight.header.type = DMUB_CMD__ABM;
cmd.abm_set_backlight.header.sub_type = DMUB_CMD__ABM_SET_BACKLIGHT;
cmd.abm_set_backlight.abm_set_backlight_data.frame_ramp = frame_ramp;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
index 074e2713257f..0597391b2171 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
@@ -99,6 +99,7 @@ static const struct hw_sequencer_funcs dcn21_funcs = {
#endif
.is_abm_supported = dcn21_is_abm_supported,
.set_disp_pattern_generator = dcn20_set_disp_pattern_generator,
+ .set_hubp_blank = dcn10_set_hubp_blank,
};
static const struct hwseq_private_funcs dcn21_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index b000b43a820d..072f8c880924 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -906,6 +906,8 @@ enum dcn20_clk_src_array_id {
DCN20_CLK_SRC_PLL0,
DCN20_CLK_SRC_PLL1,
DCN20_CLK_SRC_PLL2,
+ DCN20_CLK_SRC_PLL3,
+ DCN20_CLK_SRC_PLL4,
DCN20_CLK_SRC_TOTAL_DCN21
};
@@ -1327,8 +1329,8 @@ validate_out:
return out;
}
-bool dcn21_validate_bandwidth(struct dc *dc, struct dc_state *context,
- bool fast_validate)
+static noinline bool dcn21_validate_bandwidth_fp(struct dc *dc,
+ struct dc_state *context, bool fast_validate)
{
bool out = false;
@@ -1381,6 +1383,22 @@ validate_out:
return out;
}
+
+/*
+ * Some of the functions further below use the FPU, so we need to wrap this
+ * with DC_FP_START()/DC_FP_END(). Use the same approach as for
+ * dcn20_validate_bandwidth in dcn20_resource.c.
+ */
+bool dcn21_validate_bandwidth(struct dc *dc, struct dc_state *context,
+ bool fast_validate)
+{
+ bool voltage_supported;
+ DC_FP_START();
+ voltage_supported = dcn21_validate_bandwidth_fp(dc, context, fast_validate);
+ DC_FP_END();
+ return voltage_supported;
+}
+
static void dcn21_destroy_resource_pool(struct resource_pool **pool)
{
struct dcn21_resource_pool *dcn21_pool = TO_DCN21_RES_POOL(*pool);
@@ -2030,6 +2048,14 @@ static bool dcn21_resource_construct(
dcn21_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL2,
&clk_src_regs[2], false);
+ pool->base.clock_sources[DCN20_CLK_SRC_PLL3] =
+ dcn21_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL3,
+ &clk_src_regs[3], false);
+ pool->base.clock_sources[DCN20_CLK_SRC_PLL4] =
+ dcn21_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL4,
+ &clk_src_regs[4], false);
pool->base.clk_src_count = DCN20_CLK_SRC_TOTAL_DCN21;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/Makefile b/drivers/gpu/drm/amd/display/dc/dcn30/Makefile
index c20331eb62e0..dfd77b3cc84d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/Makefile
@@ -32,8 +32,8 @@ DCN30 = dcn30_init.o dcn30_hubbub.o dcn30_hubp.o dcn30_dpp.o dcn30_optc.o \
ifdef CONFIG_X86
-CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o := -mhard-float -msse
-CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o := -mhard-float -msse
+CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o := -msse
+CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o := -msse
endif
ifdef CONFIG_PPC64
@@ -45,6 +45,8 @@ ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1
endif
+CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o += -mhard-float
+CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o += -mhard-float
endif
ifdef CONFIG_X86
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c
index 9da66e491116..33985401f25c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c
@@ -133,7 +133,6 @@ static void dpp3_power_on_gamcor_lut(
struct dpp *dpp_base,
bool power_on)
{
- uint32_t power_status;
struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) {
@@ -143,12 +142,6 @@ static void dpp3_power_on_gamcor_lut(
} else
REG_SET(CM_MEM_PWR_CTRL, 0,
GAMCOR_MEM_PWR_DIS, power_on == true ? 0:1);
-
- REG_GET(CM_MEM_PWR_STATUS, GAMCOR_MEM_PWR_STATE, &power_status);
- if (power_status != 0)
- BREAK_TO_DEBUGGER();
-
-
}
void dpp3_program_cm_dealpha(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h
index 5fa150f34c60..705fbfc37502 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h
@@ -62,6 +62,7 @@
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_NO_OUTSTANDING_REQ, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_VTG_SEL, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_DISABLE, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_IN_BLANK, mask_sh),\
HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, NUM_PIPES, mask_sh),\
HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, PIPE_INTERLEAVE, mask_sh),\
HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, MAX_COMPRESSED_FRAGS, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
index 3deb3fb1724d..06dc1e2e8383 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
@@ -539,6 +539,8 @@ void dcn30_init_hw(struct dc *dc)
fe = dc->links[i]->link_enc->funcs->get_dig_frontend(
dc->links[i]->link_enc);
+ if (fe == ENGINE_ID_UNKNOWN)
+ continue;
for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
if (fe == dc->res_pool->stream_enc[j]->id) {
@@ -710,8 +712,11 @@ void dcn30_program_dmdata_engine(struct pipe_ctx *pipe_ctx)
bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable)
{
union dmub_rb_cmd cmd;
- unsigned int surface_size, refresh_hz, denom;
uint32_t tmr_delay = 0, tmr_scale = 0;
+ struct dc_cursor_attributes cursor_attr;
+ bool cursor_cache_enable = false;
+ struct dc_stream_state *stream = NULL;
+ struct dc_plane_state *plane = NULL;
if (!dc->ctx->dmub_srv)
return false;
@@ -722,72 +727,150 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable)
/* First, check no-memory-requests case */
for (i = 0; i < dc->current_state->stream_count; i++) {
- if (dc->current_state->stream_status[i]
- .plane_count)
+ if (dc->current_state->stream_status[i].plane_count)
/* Fail eligibility on a visible stream */
break;
}
- if (dc->current_state->stream_count == 1 // single display only
- && dc->current_state->stream_status[0].plane_count == 1 // single surface only
- && dc->current_state->stream_status[0].plane_states[0]->address.page_table_base.quad_part == 0 // no VM
- // Only 8 and 16 bit formats
- && dc->current_state->stream_status[0].plane_states[0]->format <= SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F
- && dc->current_state->stream_status[0].plane_states[0]->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB8888) {
- surface_size = dc->current_state->stream_status[0].plane_states[0]->plane_size.surface_pitch *
- dc->current_state->stream_status[0].plane_states[0]->plane_size.surface_size.height *
- (dc->current_state->stream_status[0].plane_states[0]->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ?
- 8 : 4);
- } else {
- // TODO: remove hard code size
- surface_size = 128 * 1024 * 1024;
+ if (i == dc->current_state->stream_count) {
+ /* Enable no-memory-requests case */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.mall.header.type = DMUB_CMD__MALL;
+ cmd.mall.header.sub_type = DMUB_CMD__MALL_ACTION_NO_DF_REQ;
+ cmd.mall.header.payload_bytes = sizeof(cmd.mall) - sizeof(cmd.mall.header);
+
+ dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd);
+ dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
+
+ return true;
+ }
+
+ stream = dc->current_state->streams[0];
+ plane = (stream ? dc->current_state->stream_status[0].plane_states[0] : NULL);
+
+ if (stream && plane) {
+ cursor_cache_enable = stream->cursor_position.enable &&
+ plane->address.grph.cursor_cache_addr.quad_part;
+ cursor_attr = stream->cursor_attributes;
}
- // TODO: remove hard code size
- if (surface_size < 128 * 1024 * 1024) {
- refresh_hz = div_u64((unsigned long long) dc->current_state->streams[0]->timing.pix_clk_100hz *
- 100LL,
- (dc->current_state->streams[0]->timing.v_total *
- dc->current_state->streams[0]->timing.h_total));
+ /*
+ * Second, check MALL eligibility
+ *
+ * single display only, single surface only, 8 and 16 bit formats only, no VM,
+ * do not use MALL for displays that support PSR as they use D0i3.2 in DMCUB FW
+ *
+ * TODO: When we implement multi-display, PSR displays will be allowed if there is
+ * a non-PSR display present, since in that case we can't do D0i3.2
+ */
+ if (dc->current_state->stream_count == 1 &&
+ stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED &&
+ dc->current_state->stream_status[0].plane_count == 1 &&
+ plane->format <= SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F &&
+ plane->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB8888 &&
+ plane->address.page_table_base.quad_part == 0 &&
+ dc->hwss.does_plane_fit_in_mall &&
+ dc->hwss.does_plane_fit_in_mall(dc, plane,
+ cursor_cache_enable ? &cursor_attr : NULL)) {
+ unsigned int v_total = stream->adjust.v_total_max ?
+ stream->adjust.v_total_max : stream->timing.v_total;
+ unsigned int refresh_hz = div_u64((unsigned long long) stream->timing.pix_clk_100hz *
+ 100LL, (v_total * stream->timing.h_total));
/*
- * Delay_Us = 65.28 * (64 + MallFrameCacheTmrDly) * 2^MallFrameCacheTmrScale
- * Delay_Us / 65.28 = (64 + MallFrameCacheTmrDly) * 2^MallFrameCacheTmrScale
- * (Delay_Us / 65.28) / 2^MallFrameCacheTmrScale = 64 + MallFrameCacheTmrDly
- * MallFrameCacheTmrDly = ((Delay_Us / 65.28) / 2^MallFrameCacheTmrScale) - 64
- * = (1000000 / refresh) / 65.28 / 2^MallFrameCacheTmrScale - 64
- * = 1000000 / (refresh * 65.28 * 2^MallFrameCacheTmrScale) - 64
- * = (1000000 * 100) / (refresh * 6528 * 2^MallFrameCacheTmrScale) - 64
+ * one frame time in microsec:
+ * Delay_Us = 1000000 / refresh
+ * dynamic_delay_us = 1000000 / refresh + 2 * stutter_period
+ *
+ * one frame time modified by 'additional timer percent' (p):
+ * Delay_Us_modified = dynamic_delay_us + dynamic_delay_us * p / 100
+ * = dynamic_delay_us * (1 + p / 100)
+ * = (1000000 / refresh + 2 * stutter_period) * (100 + p) / 100
+ * = (1000000 + 2 * stutter_period * refresh) * (100 + p) / (100 * refresh)
+ *
+ * formula for timer duration based on parameters, from regspec:
+ * dynamic_delay_us = 65.28 * (64 + MallFrameCacheTmrDly) * 2^MallFrameCacheTmrScale
+ *
+ * dynamic_delay_us / 65.28 = (64 + MallFrameCacheTmrDly) * 2^MallFrameCacheTmrScale
+ * (dynamic_delay_us / 65.28) / 2^MallFrameCacheTmrScale = 64 + MallFrameCacheTmrDly
+ * MallFrameCacheTmrDly = ((dynamic_delay_us / 65.28) / 2^MallFrameCacheTmrScale) - 64
+ * = (1000000 + 2 * stutter_period * refresh) * (100 + p) / (100 * refresh) / 65.28 / 2^MallFrameCacheTmrScale - 64
+ * = (1000000 + 2 * stutter_period * refresh) * (100 + p) / (refresh * 6528 * 2^MallFrameCacheTmrScale) - 64
*
* need to round up the result of the division before the subtraction
*/
- denom = refresh_hz * 6528;
- tmr_delay = div_u64((100000000LL + denom - 1), denom) - 64LL;
+ unsigned int denom = refresh_hz * 6528;
+ unsigned int stutter_period = dc->current_state->perf_params.stutter_period_us;
+
+ tmr_delay = div_u64(((1000000LL + 2 * stutter_period * refresh_hz) *
+ (100LL + dc->debug.mall_additional_timer_percent) + denom - 1),
+ denom) - 64LL;
/* scale should be increased until it fits into 6 bits */
while (tmr_delay & ~0x3F) {
tmr_scale++;
if (tmr_scale > 3) {
- /* The delay exceeds the range of the hystersis timer */
+ /* Delay exceeds range of hysteresis timer */
ASSERT(false);
return false;
}
denom *= 2;
- tmr_delay = div_u64((100000000LL + denom - 1), denom) - 64LL;
+ tmr_delay = div_u64(((1000000LL + 2 * stutter_period * refresh_hz) *
+ (100LL + dc->debug.mall_additional_timer_percent) + denom - 1),
+ denom) - 64LL;
+ }
+
+ /* Copy HW cursor */
+ if (cursor_cache_enable) {
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.mall.header.type = DMUB_CMD__MALL;
+ cmd.mall.header.sub_type = DMUB_CMD__MALL_ACTION_COPY_CURSOR;
+ cmd.mall.header.payload_bytes =
+ sizeof(cmd.mall) - sizeof(cmd.mall.header);
+
+ switch (cursor_attr.color_format) {
+ case CURSOR_MODE_MONO:
+ cmd.mall.cursor_bpp = 2;
+ break;
+ case CURSOR_MODE_COLOR_1BIT_AND:
+ case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA:
+ case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA:
+ cmd.mall.cursor_bpp = 32;
+ break;
+
+ case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED:
+ case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED:
+ cmd.mall.cursor_bpp = 64;
+ break;
+ }
+
+ cmd.mall.cursor_copy_src.quad_part = cursor_attr.address.quad_part;
+ cmd.mall.cursor_copy_dst.quad_part =
+ plane->address.grph.cursor_cache_addr.quad_part;
+ cmd.mall.cursor_width = cursor_attr.width;
+ cmd.mall.cursor_height = cursor_attr.height;
+ cmd.mall.cursor_pitch = cursor_attr.pitch;
+
+ dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd);
+ dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
+ dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
+
+ /* Use copied cursor, and it's okay to not switch back */
+ cursor_attr.address.quad_part =
+ plane->address.grph.cursor_cache_addr.quad_part;
+ dc_stream_set_cursor_attributes(stream, &cursor_attr);
}
/* Enable MALL */
memset(&cmd, 0, sizeof(cmd));
cmd.mall.header.type = DMUB_CMD__MALL;
- cmd.mall.header.sub_type =
- DMUB_CMD__MALL_ACTION_ALLOW;
- cmd.mall.header.payload_bytes =
- sizeof(cmd.mall) -
- sizeof(cmd.mall.header);
+ cmd.mall.header.sub_type = DMUB_CMD__MALL_ACTION_ALLOW;
+ cmd.mall.header.payload_bytes = sizeof(cmd.mall) - sizeof(cmd.mall.header);
cmd.mall.tmr_delay = tmr_delay;
cmd.mall.tmr_scale = tmr_scale;
+ cmd.mall.debug_bits = dc->debug.mall_error_as_fatal;
dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd);
dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
@@ -814,6 +897,40 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable)
return true;
}
+bool dcn30_does_plane_fit_in_mall(struct dc *dc, struct dc_plane_state *plane, struct dc_cursor_attributes *cursor_attr)
+{
+ // add meta size?
+ unsigned int surface_size = plane->plane_size.surface_pitch * plane->plane_size.surface_size.height *
+ (plane->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4);
+ unsigned int mall_size = dc->caps.mall_size_total;
+ unsigned int cursor_size = 0;
+
+ if (dc->debug.mall_size_override)
+ mall_size = 1024 * 1024 * dc->debug.mall_size_override;
+
+ if (cursor_attr) {
+ cursor_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size;
+
+ switch (cursor_attr->color_format) {
+ case CURSOR_MODE_MONO:
+ cursor_size /= 2;
+ break;
+ case CURSOR_MODE_COLOR_1BIT_AND:
+ case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA:
+ case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA:
+ cursor_size *= 4;
+ break;
+
+ case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED:
+ case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED:
+ cursor_size *= 8;
+ break;
+ }
+ }
+
+ return (surface_size + cursor_size) < mall_size;
+}
+
void dcn30_hardware_release(struct dc *dc)
{
/* if pstate unsupported, force it supported */
@@ -823,6 +940,53 @@ void dcn30_hardware_release(struct dc *dc)
dc->res_pool->hubbub, true, true);
}
+void dcn30_set_hubp_blank(const struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool blank_enable)
+{
+ struct pipe_ctx *mpcc_pipe;
+ struct pipe_ctx *odm_pipe;
+
+ if (blank_enable) {
+ struct plane_resource *plane_res = &pipe_ctx->plane_res;
+ struct stream_resource *stream_res = &pipe_ctx->stream_res;
+
+ /* Wait for enter vblank */
+ stream_res->tg->funcs->wait_for_state(stream_res->tg, CRTC_STATE_VBLANK);
+
+ /* Blank HUBP to allow p-state during blank on all timings */
+ pipe_ctx->plane_res.hubp->funcs->set_blank(pipe_ctx->plane_res.hubp, true);
+ /* Confirm hubp in blank */
+ ASSERT(plane_res->hubp->funcs->hubp_in_blank(plane_res->hubp));
+ /* Toggle HUBP_DISABLE */
+ plane_res->hubp->funcs->hubp_soft_reset(plane_res->hubp, true);
+ plane_res->hubp->funcs->hubp_soft_reset(plane_res->hubp, false);
+ for (mpcc_pipe = pipe_ctx->bottom_pipe; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe) {
+ mpcc_pipe->plane_res.hubp->funcs->set_blank(mpcc_pipe->plane_res.hubp, true);
+ /* Confirm hubp in blank */
+ ASSERT(mpcc_pipe->plane_res.hubp->funcs->hubp_in_blank(mpcc_pipe->plane_res.hubp));
+ /* Toggle HUBP_DISABLE */
+ mpcc_pipe->plane_res.hubp->funcs->hubp_soft_reset(mpcc_pipe->plane_res.hubp, true);
+ mpcc_pipe->plane_res.hubp->funcs->hubp_soft_reset(mpcc_pipe->plane_res.hubp, false);
+
+ }
+ for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
+ odm_pipe->plane_res.hubp->funcs->set_blank(odm_pipe->plane_res.hubp, true);
+ /* Confirm hubp in blank */
+ ASSERT(odm_pipe->plane_res.hubp->funcs->hubp_in_blank(odm_pipe->plane_res.hubp));
+ /* Toggle HUBP_DISABLE */
+ odm_pipe->plane_res.hubp->funcs->hubp_soft_reset(odm_pipe->plane_res.hubp, true);
+ odm_pipe->plane_res.hubp->funcs->hubp_soft_reset(odm_pipe->plane_res.hubp, false);
+ }
+ } else {
+ pipe_ctx->plane_res.hubp->funcs->set_blank(pipe_ctx->plane_res.hubp, false);
+ for (mpcc_pipe = pipe_ctx->bottom_pipe; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe)
+ mpcc_pipe->plane_res.hubp->funcs->set_blank(mpcc_pipe->plane_res.hubp, false);
+ for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
+ odm_pipe->plane_res.hubp->funcs->set_blank(odm_pipe->plane_res.hubp, false);
+ }
+}
+
void dcn30_set_disp_pattern_generator(const struct dc *dc,
struct pipe_ctx *pipe_ctx,
enum controller_dp_test_pattern test_pattern,
@@ -831,6 +995,25 @@ void dcn30_set_disp_pattern_generator(const struct dc *dc,
const struct tg_color *solid_color,
int width, int height, int offset)
{
- pipe_ctx->stream_res.opp->funcs->opp_set_disp_pattern_generator(pipe_ctx->stream_res.opp, test_pattern,
- color_space, color_depth, solid_color, width, height, offset);
+ struct stream_resource *stream_res = &pipe_ctx->stream_res;
+
+ if (test_pattern != CONTROLLER_DP_TEST_PATTERN_VIDEOMODE) {
+ pipe_ctx->vtp_locked = false;
+ /* turning on DPG */
+ stream_res->opp->funcs->opp_set_disp_pattern_generator(stream_res->opp, test_pattern, color_space,
+ color_depth, solid_color, width, height, offset);
+
+ /* Defer hubp blank if tg is locked */
+ if (stream_res->tg->funcs->is_tg_enabled(stream_res->tg)) {
+ if (stream_res->tg->funcs->is_locked(stream_res->tg))
+ pipe_ctx->vtp_locked = true;
+ else
+ dc->hwss.set_hubp_blank(dc, pipe_ctx, true);
+ }
+ } else {
+ dc->hwss.set_hubp_blank(dc, pipe_ctx, false);
+ /* turning off DPG */
+ stream_res->opp->funcs->opp_set_disp_pattern_generator(stream_res->opp, test_pattern, color_space,
+ color_depth, solid_color, width, height, offset);
+ }
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h
index 7d32c43aafe0..3b7d4812e311 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h
@@ -65,6 +65,9 @@ void dcn30_set_avmute(struct pipe_ctx *pipe_ctx, bool enable);
void dcn30_update_info_frame(struct pipe_ctx *pipe_ctx);
void dcn30_program_dmdata_engine(struct pipe_ctx *pipe_ctx);
+bool dcn30_does_plane_fit_in_mall(struct dc *dc, struct dc_plane_state *plane,
+ struct dc_cursor_attributes *cursor_attr);
+
bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable);
void dcn30_hardware_release(struct dc *dc);
@@ -77,4 +80,8 @@ void dcn30_set_disp_pattern_generator(const struct dc *dc,
const struct tg_color *solid_color,
int width, int height, int offset);
+void dcn30_set_hubp_blank(const struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool blank_enable);
+
#endif /* __DC_HWSS_DCN30_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
index 6125fe440ad0..204444fead97 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
@@ -71,6 +71,7 @@ static const struct hw_sequencer_funcs dcn30_funcs = {
.edp_backlight_control = dce110_edp_backlight_control,
.edp_power_control = dce110_edp_power_control,
.edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
+ .edp_wait_for_T12 = dce110_edp_wait_for_T12,
.set_cursor_position = dcn10_set_cursor_position,
.set_cursor_attribute = dcn10_set_cursor_attribute,
.set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level,
@@ -91,11 +92,13 @@ static const struct hw_sequencer_funcs dcn30_funcs = {
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
.calc_vupdate_position = dcn10_calc_vupdate_position,
.apply_idle_power_optimizations = dcn30_apply_idle_power_optimizations,
+ .does_plane_fit_in_mall = dcn30_does_plane_fit_in_mall,
.set_backlight_level = dcn21_set_backlight_level,
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
.hardware_release = dcn30_hardware_release,
.set_pipe = dcn21_set_pipe,
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
+ .set_hubp_blank = dcn30_set_hubp_blank,
};
static const struct hwseq_private_funcs dcn30_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
index 3ba3991ee612..8980c90b2277 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
@@ -309,6 +309,7 @@ static struct timing_generator_funcs dcn30_tg_funcs = {
.enable_crtc_reset = optc1_enable_crtc_reset,
.disable_reset_trigger = optc1_disable_reset_trigger,
.lock = optc3_lock,
+ .is_locked = optc1_is_locked,
.unlock = optc1_unlock,
.lock_doublebuffer_enable = optc3_lock_doublebuffer_enable,
.lock_doublebuffer_disable = optc3_lock_doublebuffer_disable,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
index 5e126fdf6ec1..8d0f663489ac 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
@@ -1716,125 +1716,22 @@ static bool is_soc_bounding_box_valid(struct dc *dc)
static bool init_soc_bounding_box(struct dc *dc,
struct dcn30_resource_pool *pool)
{
- const struct gpu_info_soc_bounding_box_v1_0 *bb = dc->soc_bounding_box;
struct _vcs_dpi_soc_bounding_box_st *loaded_bb = &dcn3_0_soc;
struct _vcs_dpi_ip_params_st *loaded_ip = &dcn3_0_ip;
DC_LOGGER_INIT(dc->ctx->logger);
- if (!bb && !is_soc_bounding_box_valid(dc)) {
+ if (!is_soc_bounding_box_valid(dc)) {
DC_LOG_ERROR("%s: not valid soc bounding box/n", __func__);
return false;
}
- if (bb && !is_soc_bounding_box_valid(dc)) {
- int i;
-
- dcn3_0_soc.sr_exit_time_us =
- fixed16_to_double_to_cpu(bb->sr_exit_time_us);
- dcn3_0_soc.sr_enter_plus_exit_time_us =
- fixed16_to_double_to_cpu(bb->sr_enter_plus_exit_time_us);
- dcn3_0_soc.urgent_latency_us =
- fixed16_to_double_to_cpu(bb->urgent_latency_us);
- dcn3_0_soc.urgent_latency_pixel_data_only_us =
- fixed16_to_double_to_cpu(bb->urgent_latency_pixel_data_only_us);
- dcn3_0_soc.urgent_latency_pixel_mixed_with_vm_data_us =
- fixed16_to_double_to_cpu(bb->urgent_latency_pixel_mixed_with_vm_data_us);
- dcn3_0_soc.urgent_latency_vm_data_only_us =
- fixed16_to_double_to_cpu(bb->urgent_latency_vm_data_only_us);
- dcn3_0_soc.urgent_out_of_order_return_per_channel_pixel_only_bytes =
- le32_to_cpu(bb->urgent_out_of_order_return_per_channel_pixel_only_bytes);
- dcn3_0_soc.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes =
- le32_to_cpu(bb->urgent_out_of_order_return_per_channel_pixel_and_vm_bytes);
- dcn3_0_soc.urgent_out_of_order_return_per_channel_vm_only_bytes =
- le32_to_cpu(bb->urgent_out_of_order_return_per_channel_vm_only_bytes);
- dcn3_0_soc.pct_ideal_dram_sdp_bw_after_urgent_pixel_only =
- fixed16_to_double_to_cpu(bb->pct_ideal_dram_sdp_bw_after_urgent_pixel_only);
- dcn3_0_soc.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm =
- fixed16_to_double_to_cpu(bb->pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm);
- dcn3_0_soc.pct_ideal_dram_sdp_bw_after_urgent_vm_only =
- fixed16_to_double_to_cpu(bb->pct_ideal_dram_sdp_bw_after_urgent_vm_only);
- dcn3_0_soc.max_avg_sdp_bw_use_normal_percent =
- fixed16_to_double_to_cpu(bb->max_avg_sdp_bw_use_normal_percent);
- dcn3_0_soc.max_avg_dram_bw_use_normal_percent =
- fixed16_to_double_to_cpu(bb->max_avg_dram_bw_use_normal_percent);
- dcn3_0_soc.writeback_latency_us =
- fixed16_to_double_to_cpu(bb->writeback_latency_us);
- dcn3_0_soc.ideal_dram_bw_after_urgent_percent =
- fixed16_to_double_to_cpu(bb->ideal_dram_bw_after_urgent_percent);
- dcn3_0_soc.max_request_size_bytes =
- le32_to_cpu(bb->max_request_size_bytes);
- dcn3_0_soc.dram_channel_width_bytes =
- le32_to_cpu(bb->dram_channel_width_bytes);
- dcn3_0_soc.fabric_datapath_to_dcn_data_return_bytes =
- le32_to_cpu(bb->fabric_datapath_to_dcn_data_return_bytes);
- dcn3_0_soc.dcn_downspread_percent =
- fixed16_to_double_to_cpu(bb->dcn_downspread_percent);
- dcn3_0_soc.downspread_percent =
- fixed16_to_double_to_cpu(bb->downspread_percent);
- dcn3_0_soc.dram_page_open_time_ns =
- fixed16_to_double_to_cpu(bb->dram_page_open_time_ns);
- dcn3_0_soc.dram_rw_turnaround_time_ns =
- fixed16_to_double_to_cpu(bb->dram_rw_turnaround_time_ns);
- dcn3_0_soc.dram_return_buffer_per_channel_bytes =
- le32_to_cpu(bb->dram_return_buffer_per_channel_bytes);
- dcn3_0_soc.round_trip_ping_latency_dcfclk_cycles =
- le32_to_cpu(bb->round_trip_ping_latency_dcfclk_cycles);
- dcn3_0_soc.urgent_out_of_order_return_per_channel_bytes =
- le32_to_cpu(bb->urgent_out_of_order_return_per_channel_bytes);
- dcn3_0_soc.channel_interleave_bytes =
- le32_to_cpu(bb->channel_interleave_bytes);
- dcn3_0_soc.num_banks =
- le32_to_cpu(bb->num_banks);
- dcn3_0_soc.num_chans =
- le32_to_cpu(bb->num_chans);
- dcn3_0_soc.gpuvm_min_page_size_bytes =
- le32_to_cpu(bb->vmm_page_size_bytes);
- dcn3_0_soc.dram_clock_change_latency_us =
- fixed16_to_double_to_cpu(bb->dram_clock_change_latency_us);
- dcn3_0_soc.writeback_dram_clock_change_latency_us =
- fixed16_to_double_to_cpu(bb->writeback_dram_clock_change_latency_us);
- dcn3_0_soc.return_bus_width_bytes =
- le32_to_cpu(bb->return_bus_width_bytes);
- dcn3_0_soc.dispclk_dppclk_vco_speed_mhz =
- le32_to_cpu(bb->dispclk_dppclk_vco_speed_mhz);
- dcn3_0_soc.xfc_bus_transport_time_us =
- le32_to_cpu(bb->xfc_bus_transport_time_us);
- dcn3_0_soc.xfc_xbuf_latency_tolerance_us =
- le32_to_cpu(bb->xfc_xbuf_latency_tolerance_us);
- dcn3_0_soc.use_urgent_burst_bw =
- le32_to_cpu(bb->use_urgent_burst_bw);
- dcn3_0_soc.num_states =
- le32_to_cpu(bb->num_states);
-
- for (i = 0; i < dcn3_0_soc.num_states; i++) {
- dcn3_0_soc.clock_limits[i].state =
- le32_to_cpu(bb->clock_limits[i].state);
- dcn3_0_soc.clock_limits[i].dcfclk_mhz =
- fixed16_to_double_to_cpu(bb->clock_limits[i].dcfclk_mhz);
- dcn3_0_soc.clock_limits[i].fabricclk_mhz =
- fixed16_to_double_to_cpu(bb->clock_limits[i].fabricclk_mhz);
- dcn3_0_soc.clock_limits[i].dispclk_mhz =
- fixed16_to_double_to_cpu(bb->clock_limits[i].dispclk_mhz);
- dcn3_0_soc.clock_limits[i].dppclk_mhz =
- fixed16_to_double_to_cpu(bb->clock_limits[i].dppclk_mhz);
- dcn3_0_soc.clock_limits[i].phyclk_mhz =
- fixed16_to_double_to_cpu(bb->clock_limits[i].phyclk_mhz);
- dcn3_0_soc.clock_limits[i].socclk_mhz =
- fixed16_to_double_to_cpu(bb->clock_limits[i].socclk_mhz);
- dcn3_0_soc.clock_limits[i].dscclk_mhz =
- fixed16_to_double_to_cpu(bb->clock_limits[i].dscclk_mhz);
- dcn3_0_soc.clock_limits[i].dram_speed_mts =
- fixed16_to_double_to_cpu(bb->clock_limits[i].dram_speed_mts);
- }
- }
-
loaded_ip->max_num_otg = pool->base.res_cap->num_timing_generator;
loaded_ip->max_num_dpp = pool->base.pipe_count;
loaded_ip->clamp_min_dcfclk = dc->config.clamp_min_dcfclk;
dcn20_patch_bounding_box(dc, loaded_bb);
- if (!bb && dc->ctx->dc_bios->funcs->get_soc_bb_info) {
+ if (dc->ctx->dc_bios->funcs->get_soc_bb_info) {
struct bp_soc_bb_info bb_info = {0};
if (dc->ctx->dc_bios->funcs->get_soc_bb_info(dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) {
@@ -2292,17 +2189,15 @@ static noinline void dcn30_calculate_wm_and_dlg_fp(
unsigned int min_dram_speed_mts = context->bw_ctx.dml.vba.DRAMSpeed;
unsigned int min_dram_speed_mts_margin = 160;
- context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->dummy_pstate_table[0].dummy_pstate_latency_us;
-
if (context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] == dm_dram_clock_change_unsupported)
min_dram_speed_mts = dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz * 16;
- for (i = 3; i > 0; i--) {
- if ((min_dram_speed_mts + min_dram_speed_mts_margin > dc->clk_mgr->bw_params->dummy_pstate_table[i].dram_speed_mts) &&
- (min_dram_speed_mts - min_dram_speed_mts_margin < dc->clk_mgr->bw_params->dummy_pstate_table[i].dram_speed_mts))
- context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->dummy_pstate_table[i].dummy_pstate_latency_us;
- }
+ /* find largest table entry that is lower than dram speed, but lower than DPM0 still uses DPM0 */
+ for (i = 3; i > 0; i--)
+ if (min_dram_speed_mts + min_dram_speed_mts_margin > dc->clk_mgr->bw_params->dummy_pstate_table[i].dram_speed_mts)
+ break;
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->dummy_pstate_table[i].dummy_pstate_latency_us;
context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us;
context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us;
}
@@ -2437,16 +2332,28 @@ validate_out:
return out;
}
-static noinline void get_optimal_dcfclk_fclk_for_uclk(unsigned int uclk_mts,
- unsigned int *optimal_dcfclk,
- unsigned int *optimal_fclk)
+/*
+ * This must be noinline to ensure anything that deals with FP registers
+ * is contained within this call; previously our compiling with hard-float
+ * would result in fp instructions being emitted outside of the boundaries
+ * of the DC_FP_START/END macros, which makes sense as the compiler has no
+ * idea about what is wrapped and what is not
+ *
+ * This is largely just a workaround to avoid breakage introduced with 5.6,
+ * ideally all fp-using code should be moved into its own file, only that
+ * should be compiled with hard-float, and all code exported from there
+ * should be strictly wrapped with DC_FP_START/END
+ */
+static noinline void dcn30_get_optimal_dcfclk_fclk_for_uclk(unsigned int uclk_mts,
+ unsigned int *optimal_dcfclk,
+ unsigned int *optimal_fclk)
{
double bw_from_dram, bw_from_dram1, bw_from_dram2;
bw_from_dram1 = uclk_mts * dcn3_0_soc.num_chans *
- dcn3_0_soc.dram_channel_width_bytes * (dcn3_0_soc.max_avg_dram_bw_use_normal_percent / 100);
+ dcn3_0_soc.dram_channel_width_bytes * (dcn3_0_soc.max_avg_dram_bw_use_normal_percent / 100);
bw_from_dram2 = uclk_mts * dcn3_0_soc.num_chans *
- dcn3_0_soc.dram_channel_width_bytes * (dcn3_0_soc.max_avg_sdp_bw_use_normal_percent / 100);
+ dcn3_0_soc.dram_channel_width_bytes * (dcn3_0_soc.max_avg_sdp_bw_use_normal_percent / 100);
bw_from_dram = (bw_from_dram1 < bw_from_dram2) ? bw_from_dram1 : bw_from_dram2;
@@ -2505,7 +2412,7 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
// Calculate optimal dcfclk for each uclk
for (i = 0; i < num_uclk_states; i++) {
DC_FP_START();
- get_optimal_dcfclk_fclk_for_uclk(bw_params->clk_table.entries[i].memclk_mhz * 16,
+ dcn30_get_optimal_dcfclk_fclk_for_uclk(bw_params->clk_table.entries[i].memclk_mhz * 16,
&optimal_dcfclk_for_uclk[i], NULL);
DC_FP_END();
if (optimal_dcfclk_for_uclk[i] < bw_params->clk_table.entries[0].dcfclk_mhz) {
@@ -2631,6 +2538,10 @@ static bool dcn30_resource_construct(
dc->caps.max_cursor_size = 256;
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
+ dc->caps.mall_size_per_mem_channel = 8;
+ /* total size = mall per channel * num channels * 1024 * 1024 */
+ dc->caps.mall_size_total = dc->caps.mall_size_per_mem_channel * dc->ctx->dc_bios->vram_info.num_chans * 1048576;
+ dc->caps.cursor_cache_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size * 8;
dc->caps.max_slave_planes = 1;
dc->caps.post_blend_color_processing = true;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/Makefile b/drivers/gpu/drm/amd/display/dc/dcn301/Makefile
index 3ca7d911d25c..09264716d1dc 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/Makefile
@@ -14,7 +14,7 @@ DCN301 = dcn301_init.o dcn301_resource.o dcn301_dccg.o \
dcn301_dio_link_encoder.o dcn301_hwseq.o dcn301_panel_cntl.o dcn301_hubbub.o
ifdef CONFIG_X86
-CFLAGS_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o := -mhard-float -msse
+CFLAGS_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o := -msse
endif
ifdef CONFIG_PPC64
@@ -25,6 +25,7 @@ ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1
endif
+CFLAGS_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o += -mhard-float
endif
ifdef CONFIG_X86
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c
index bdad72140cbc..b8bf6d61005b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c
@@ -98,6 +98,7 @@ static const struct hw_sequencer_funcs dcn301_funcs = {
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
.set_pipe = dcn21_set_pipe,
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
+ .set_hubp_blank = dcn30_set_hubp_blank,
};
static const struct hwseq_private_funcs dcn301_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
index 35f5bf08ae96..5d4b2c60192e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
@@ -1489,124 +1489,21 @@ static bool is_soc_bounding_box_valid(struct dc *dc)
static bool init_soc_bounding_box(struct dc *dc,
struct dcn301_resource_pool *pool)
{
- const struct gpu_info_soc_bounding_box_v1_0 *bb = dc->soc_bounding_box;
struct _vcs_dpi_soc_bounding_box_st *loaded_bb = &dcn3_01_soc;
struct _vcs_dpi_ip_params_st *loaded_ip = &dcn3_01_ip;
DC_LOGGER_INIT(dc->ctx->logger);
- if (!bb && !is_soc_bounding_box_valid(dc)) {
+ if (!is_soc_bounding_box_valid(dc)) {
DC_LOG_ERROR("%s: not valid soc bounding box/n", __func__);
return false;
}
- if (bb && !is_soc_bounding_box_valid(dc)) {
- int i;
-
- dcn3_01_soc.sr_exit_time_us =
- fixed16_to_double_to_cpu(bb->sr_exit_time_us);
- dcn3_01_soc.sr_enter_plus_exit_time_us =
- fixed16_to_double_to_cpu(bb->sr_enter_plus_exit_time_us);
- dcn3_01_soc.urgent_latency_us =
- fixed16_to_double_to_cpu(bb->urgent_latency_us);
- dcn3_01_soc.urgent_latency_pixel_data_only_us =
- fixed16_to_double_to_cpu(bb->urgent_latency_pixel_data_only_us);
- dcn3_01_soc.urgent_latency_pixel_mixed_with_vm_data_us =
- fixed16_to_double_to_cpu(bb->urgent_latency_pixel_mixed_with_vm_data_us);
- dcn3_01_soc.urgent_latency_vm_data_only_us =
- fixed16_to_double_to_cpu(bb->urgent_latency_vm_data_only_us);
- dcn3_01_soc.urgent_out_of_order_return_per_channel_pixel_only_bytes =
- le32_to_cpu(bb->urgent_out_of_order_return_per_channel_pixel_only_bytes);
- dcn3_01_soc.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes =
- le32_to_cpu(bb->urgent_out_of_order_return_per_channel_pixel_and_vm_bytes);
- dcn3_01_soc.urgent_out_of_order_return_per_channel_vm_only_bytes =
- le32_to_cpu(bb->urgent_out_of_order_return_per_channel_vm_only_bytes);
- dcn3_01_soc.pct_ideal_dram_sdp_bw_after_urgent_pixel_only =
- fixed16_to_double_to_cpu(bb->pct_ideal_dram_sdp_bw_after_urgent_pixel_only);
- dcn3_01_soc.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm =
- fixed16_to_double_to_cpu(bb->pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm);
- dcn3_01_soc.pct_ideal_dram_sdp_bw_after_urgent_vm_only =
- fixed16_to_double_to_cpu(bb->pct_ideal_dram_sdp_bw_after_urgent_vm_only);
- dcn3_01_soc.max_avg_sdp_bw_use_normal_percent =
- fixed16_to_double_to_cpu(bb->max_avg_sdp_bw_use_normal_percent);
- dcn3_01_soc.max_avg_dram_bw_use_normal_percent =
- fixed16_to_double_to_cpu(bb->max_avg_dram_bw_use_normal_percent);
- dcn3_01_soc.writeback_latency_us =
- fixed16_to_double_to_cpu(bb->writeback_latency_us);
- dcn3_01_soc.ideal_dram_bw_after_urgent_percent =
- fixed16_to_double_to_cpu(bb->ideal_dram_bw_after_urgent_percent);
- dcn3_01_soc.max_request_size_bytes =
- le32_to_cpu(bb->max_request_size_bytes);
- dcn3_01_soc.dram_channel_width_bytes =
- le32_to_cpu(bb->dram_channel_width_bytes);
- dcn3_01_soc.fabric_datapath_to_dcn_data_return_bytes =
- le32_to_cpu(bb->fabric_datapath_to_dcn_data_return_bytes);
- dcn3_01_soc.dcn_downspread_percent =
- fixed16_to_double_to_cpu(bb->dcn_downspread_percent);
- dcn3_01_soc.downspread_percent =
- fixed16_to_double_to_cpu(bb->downspread_percent);
- dcn3_01_soc.dram_page_open_time_ns =
- fixed16_to_double_to_cpu(bb->dram_page_open_time_ns);
- dcn3_01_soc.dram_rw_turnaround_time_ns =
- fixed16_to_double_to_cpu(bb->dram_rw_turnaround_time_ns);
- dcn3_01_soc.dram_return_buffer_per_channel_bytes =
- le32_to_cpu(bb->dram_return_buffer_per_channel_bytes);
- dcn3_01_soc.round_trip_ping_latency_dcfclk_cycles =
- le32_to_cpu(bb->round_trip_ping_latency_dcfclk_cycles);
- dcn3_01_soc.urgent_out_of_order_return_per_channel_bytes =
- le32_to_cpu(bb->urgent_out_of_order_return_per_channel_bytes);
- dcn3_01_soc.channel_interleave_bytes =
- le32_to_cpu(bb->channel_interleave_bytes);
- dcn3_01_soc.num_banks =
- le32_to_cpu(bb->num_banks);
- dcn3_01_soc.num_chans =
- le32_to_cpu(bb->num_chans);
- dcn3_01_soc.gpuvm_min_page_size_bytes =
- le32_to_cpu(bb->vmm_page_size_bytes);
- dcn3_01_soc.dram_clock_change_latency_us =
- fixed16_to_double_to_cpu(bb->dram_clock_change_latency_us);
- dcn3_01_soc.writeback_dram_clock_change_latency_us =
- fixed16_to_double_to_cpu(bb->writeback_dram_clock_change_latency_us);
- dcn3_01_soc.return_bus_width_bytes =
- le32_to_cpu(bb->return_bus_width_bytes);
- dcn3_01_soc.dispclk_dppclk_vco_speed_mhz =
- le32_to_cpu(bb->dispclk_dppclk_vco_speed_mhz);
- dcn3_01_soc.xfc_bus_transport_time_us =
- le32_to_cpu(bb->xfc_bus_transport_time_us);
- dcn3_01_soc.xfc_xbuf_latency_tolerance_us =
- le32_to_cpu(bb->xfc_xbuf_latency_tolerance_us);
- dcn3_01_soc.use_urgent_burst_bw =
- le32_to_cpu(bb->use_urgent_burst_bw);
- dcn3_01_soc.num_states =
- le32_to_cpu(bb->num_states);
-
- for (i = 0; i < dcn3_01_soc.num_states; i++) {
- dcn3_01_soc.clock_limits[i].state =
- le32_to_cpu(bb->clock_limits[i].state);
- dcn3_01_soc.clock_limits[i].dcfclk_mhz =
- fixed16_to_double_to_cpu(bb->clock_limits[i].dcfclk_mhz);
- dcn3_01_soc.clock_limits[i].fabricclk_mhz =
- fixed16_to_double_to_cpu(bb->clock_limits[i].fabricclk_mhz);
- dcn3_01_soc.clock_limits[i].dispclk_mhz =
- fixed16_to_double_to_cpu(bb->clock_limits[i].dispclk_mhz);
- dcn3_01_soc.clock_limits[i].dppclk_mhz =
- fixed16_to_double_to_cpu(bb->clock_limits[i].dppclk_mhz);
- dcn3_01_soc.clock_limits[i].phyclk_mhz =
- fixed16_to_double_to_cpu(bb->clock_limits[i].phyclk_mhz);
- dcn3_01_soc.clock_limits[i].socclk_mhz =
- fixed16_to_double_to_cpu(bb->clock_limits[i].socclk_mhz);
- dcn3_01_soc.clock_limits[i].dscclk_mhz =
- fixed16_to_double_to_cpu(bb->clock_limits[i].dscclk_mhz);
- dcn3_01_soc.clock_limits[i].dram_speed_mts =
- fixed16_to_double_to_cpu(bb->clock_limits[i].dram_speed_mts);
- }
- }
-
loaded_ip->max_num_otg = pool->base.res_cap->num_timing_generator;
loaded_ip->max_num_dpp = pool->base.pipe_count;
dcn20_patch_bounding_box(dc, loaded_bb);
- if (!bb && dc->ctx->dc_bios->funcs->get_soc_bb_info) {
+ if (dc->ctx->dc_bios->funcs->get_soc_bb_info) {
struct bp_soc_bb_info bb_info = {0};
if (dc->ctx->dc_bios->funcs->get_soc_bb_info(dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/Makefile b/drivers/gpu/drm/amd/display/dc/dcn302/Makefile
index 8d4924b7dc22..101620a8867a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn302/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn302/Makefile
@@ -13,7 +13,7 @@
DCN3_02 = dcn302_init.o dcn302_hwseq.o dcn302_resource.o
ifdef CONFIG_X86
-CFLAGS_$(AMDDALPATH)/dc/dcn302/dcn302_resource.o := -mhard-float -msse
+CFLAGS_$(AMDDALPATH)/dc/dcn302/dcn302_resource.o := -msse
endif
ifdef CONFIG_PPC64
@@ -24,6 +24,7 @@ ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1
endif
+CFLAGS_$(AMDDALPATH)/dc/dcn302/dcn302_resource.o += -mhard-float
endif
ifdef CONFIG_X86
diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
index 808c4dcdb3ac..4b659b63f75b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
@@ -53,6 +53,8 @@
#include "dce/dce_i2c_hw.h"
#include "dce/dce_panel_cntl.h"
#include "dce/dmub_abm.h"
+#include "dce/dmub_psr.h"
+#include "clk_mgr.h"
#include "hw_sequencer_private.h"
#include "reg_helper.h"
@@ -162,8 +164,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_02_soc = {
.min_dcfclk = 500.0, /* TODO: set this to actual min DCFCLK */
.num_states = 1,
- .sr_exit_time_us = 5.20,
- .sr_enter_plus_exit_time_us = 9.60,
+ .sr_exit_time_us = 12,
+ .sr_enter_plus_exit_time_us = 20,
.urgent_latency_us = 4.0,
.urgent_latency_pixel_data_only_us = 4.0,
.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
@@ -190,7 +192,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_02_soc = {
.num_banks = 8,
.gpuvm_min_page_size_bytes = 4096,
.hostvm_min_page_size_bytes = 4096,
- .dram_clock_change_latency_us = 350,
+ .dram_clock_change_latency_us = 404,
.dummy_pstate_latency_us = 5,
.writeback_dram_clock_change_latency_us = 23.0,
.return_bus_width_bytes = 64,
@@ -238,6 +240,7 @@ static const struct dc_debug_options debug_defaults_diags = {
.dwb_fi_phase = -1, // -1 = disable
.dmub_command_table = true,
.enable_tri_buf = true,
+ .disable_psr = true,
};
enum dcn302_clk_src_array_id {
@@ -1213,6 +1216,9 @@ static void dcn302_resource_destruct(struct resource_pool *pool)
dce_abm_destroy(&pool->multiple_abms[i]);
}
+ if (pool->psr != NULL)
+ dmub_psr_destroy(&pool->psr);
+
if (pool->dccg != NULL)
dcn_dccg_destroy(&pool->dccg);
}
@@ -1224,6 +1230,165 @@ static void dcn302_destroy_resource_pool(struct resource_pool **pool)
*pool = NULL;
}
+static void dcn302_get_optimal_dcfclk_fclk_for_uclk(unsigned int uclk_mts,
+ unsigned int *optimal_dcfclk,
+ unsigned int *optimal_fclk)
+{
+ double bw_from_dram, bw_from_dram1, bw_from_dram2;
+
+ bw_from_dram1 = uclk_mts * dcn3_02_soc.num_chans *
+ dcn3_02_soc.dram_channel_width_bytes * (dcn3_02_soc.max_avg_dram_bw_use_normal_percent / 100);
+ bw_from_dram2 = uclk_mts * dcn3_02_soc.num_chans *
+ dcn3_02_soc.dram_channel_width_bytes * (dcn3_02_soc.max_avg_sdp_bw_use_normal_percent / 100);
+
+ bw_from_dram = (bw_from_dram1 < bw_from_dram2) ? bw_from_dram1 : bw_from_dram2;
+
+ if (optimal_fclk)
+ *optimal_fclk = bw_from_dram /
+ (dcn3_02_soc.fabric_datapath_to_dcn_data_return_bytes * (dcn3_02_soc.max_avg_sdp_bw_use_normal_percent / 100));
+
+ if (optimal_dcfclk)
+ *optimal_dcfclk = bw_from_dram /
+ (dcn3_02_soc.return_bus_width_bytes * (dcn3_02_soc.max_avg_sdp_bw_use_normal_percent / 100));
+}
+
+void dcn302_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
+{
+ unsigned int i, j;
+ unsigned int num_states = 0;
+
+ unsigned int dcfclk_mhz[DC__VOLTAGE_STATES] = {0};
+ unsigned int dram_speed_mts[DC__VOLTAGE_STATES] = {0};
+ unsigned int optimal_uclk_for_dcfclk_sta_targets[DC__VOLTAGE_STATES] = {0};
+ unsigned int optimal_dcfclk_for_uclk[DC__VOLTAGE_STATES] = {0};
+
+ unsigned int dcfclk_sta_targets[DC__VOLTAGE_STATES] = {694, 875, 1000, 1200};
+ unsigned int num_dcfclk_sta_targets = 4;
+ unsigned int num_uclk_states;
+
+
+ if (dc->ctx->dc_bios->vram_info.num_chans)
+ dcn3_02_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans;
+
+ if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes)
+ dcn3_02_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes;
+
+ dcn3_02_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
+ dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
+
+ if (bw_params->clk_table.entries[0].memclk_mhz) {
+ int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0, max_phyclk_mhz = 0;
+
+ for (i = 0; i < MAX_NUM_DPM_LVL; i++) {
+ if (bw_params->clk_table.entries[i].dcfclk_mhz > max_dcfclk_mhz)
+ max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz;
+ if (bw_params->clk_table.entries[i].dispclk_mhz > max_dispclk_mhz)
+ max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz;
+ if (bw_params->clk_table.entries[i].dppclk_mhz > max_dppclk_mhz)
+ max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz;
+ if (bw_params->clk_table.entries[i].phyclk_mhz > max_phyclk_mhz)
+ max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz;
+ }
+ if (!max_dcfclk_mhz)
+ max_dcfclk_mhz = dcn3_02_soc.clock_limits[0].dcfclk_mhz;
+ if (!max_dispclk_mhz)
+ max_dispclk_mhz = dcn3_02_soc.clock_limits[0].dispclk_mhz;
+ if (!max_dppclk_mhz)
+ max_dppclk_mhz = dcn3_02_soc.clock_limits[0].dppclk_mhz;
+ if (!max_phyclk_mhz)
+ max_phyclk_mhz = dcn3_02_soc.clock_limits[0].phyclk_mhz;
+
+ if (max_dcfclk_mhz > dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
+ /* If max DCFCLK is greater than the max DCFCLK STA target, insert into the DCFCLK STA target array */
+ dcfclk_sta_targets[num_dcfclk_sta_targets] = max_dcfclk_mhz;
+ num_dcfclk_sta_targets++;
+ } else if (max_dcfclk_mhz < dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
+ /* If max DCFCLK is less than the max DCFCLK STA target, cap values and remove duplicates */
+ for (i = 0; i < num_dcfclk_sta_targets; i++) {
+ if (dcfclk_sta_targets[i] > max_dcfclk_mhz) {
+ dcfclk_sta_targets[i] = max_dcfclk_mhz;
+ break;
+ }
+ }
+ /* Update size of array since we "removed" duplicates */
+ num_dcfclk_sta_targets = i + 1;
+ }
+
+ num_uclk_states = bw_params->clk_table.num_entries;
+
+ /* Calculate optimal dcfclk for each uclk */
+ for (i = 0; i < num_uclk_states; i++) {
+ dcn302_get_optimal_dcfclk_fclk_for_uclk(bw_params->clk_table.entries[i].memclk_mhz * 16,
+ &optimal_dcfclk_for_uclk[i], NULL);
+ if (optimal_dcfclk_for_uclk[i] < bw_params->clk_table.entries[0].dcfclk_mhz) {
+ optimal_dcfclk_for_uclk[i] = bw_params->clk_table.entries[0].dcfclk_mhz;
+ }
+ }
+
+ /* Calculate optimal uclk for each dcfclk sta target */
+ for (i = 0; i < num_dcfclk_sta_targets; i++) {
+ for (j = 0; j < num_uclk_states; j++) {
+ if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) {
+ optimal_uclk_for_dcfclk_sta_targets[i] =
+ bw_params->clk_table.entries[j].memclk_mhz * 16;
+ break;
+ }
+ }
+ }
+
+ i = 0;
+ j = 0;
+ /* create the final dcfclk and uclk table */
+ while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) {
+ if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j] && i < num_dcfclk_sta_targets) {
+ dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
+ dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
+ } else {
+ if (j < num_uclk_states && optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) {
+ dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j];
+ dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
+ } else {
+ j = num_uclk_states;
+ }
+ }
+ }
+
+ while (i < num_dcfclk_sta_targets && num_states < DC__VOLTAGE_STATES) {
+ dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
+ dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
+ }
+
+ while (j < num_uclk_states && num_states < DC__VOLTAGE_STATES &&
+ optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) {
+ dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j];
+ dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
+ }
+
+ dcn3_02_soc.num_states = num_states;
+ for (i = 0; i < dcn3_02_soc.num_states; i++) {
+ dcn3_02_soc.clock_limits[i].state = i;
+ dcn3_02_soc.clock_limits[i].dcfclk_mhz = dcfclk_mhz[i];
+ dcn3_02_soc.clock_limits[i].fabricclk_mhz = dcfclk_mhz[i];
+ dcn3_02_soc.clock_limits[i].dram_speed_mts = dram_speed_mts[i];
+
+ /* Fill all states with max values of all other clocks */
+ dcn3_02_soc.clock_limits[i].dispclk_mhz = max_dispclk_mhz;
+ dcn3_02_soc.clock_limits[i].dppclk_mhz = max_dppclk_mhz;
+ dcn3_02_soc.clock_limits[i].phyclk_mhz = max_phyclk_mhz;
+ dcn3_02_soc.clock_limits[i].dtbclk_mhz = dcn3_02_soc.clock_limits[0].dtbclk_mhz;
+ /* These clocks cannot come from bw_params, always fill from dcn3_02_soc[1] */
+ /* FCLK, PHYCLK_D18, SOCCLK, DSCCLK */
+ dcn3_02_soc.clock_limits[i].phyclk_d18_mhz = dcn3_02_soc.clock_limits[0].phyclk_d18_mhz;
+ dcn3_02_soc.clock_limits[i].socclk_mhz = dcn3_02_soc.clock_limits[0].socclk_mhz;
+ dcn3_02_soc.clock_limits[i].dscclk_mhz = dcn3_02_soc.clock_limits[0].dscclk_mhz;
+ }
+ /* re-init DML with updated bb */
+ dml_init_instance(&dc->dml, &dcn3_02_soc, &dcn3_02_ip, DML_PROJECT_DCN30);
+ if (dc->current_state)
+ dml_init_instance(&dc->current_state->bw_ctx.dml, &dcn3_02_soc, &dcn3_02_ip, DML_PROJECT_DCN30);
+ }
+}
+
static struct resource_funcs dcn302_res_pool_funcs = {
.destroy = dcn302_destroy_resource_pool,
.link_enc_create = dcn302_link_encoder_create,
@@ -1240,7 +1405,7 @@ static struct resource_funcs dcn302_res_pool_funcs = {
.find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link,
.acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut,
.release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,
- .update_bw_bounding_box = dcn30_update_bw_bounding_box,
+ .update_bw_bounding_box = dcn302_update_bw_bounding_box,
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
};
@@ -1311,7 +1476,10 @@ static bool dcn302_resource_construct(
dc->caps.max_cursor_size = 256;
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
-
+ dc->caps.mall_size_per_mem_channel = 4;
+ /* total size = mall per channel * num channels * 1024 * 1024 */
+ dc->caps.mall_size_total = dc->caps.mall_size_per_mem_channel * dc->ctx->dc_bios->vram_info.num_chans * 1048576;
+ dc->caps.cursor_cache_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size * 8;
dc->caps.max_slave_planes = 1;
dc->caps.post_blend_color_processing = true;
dc->caps.force_dp_tps4_for_cp2520 = true;
@@ -1354,8 +1522,6 @@ static bool dcn302_resource_construct(
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
- else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS)
- dc->debug = debug_defaults_diags;
else
dc->debug = debug_defaults_diags;
@@ -1469,6 +1635,14 @@ static bool dcn302_resource_construct(
}
pool->timing_generator_count = i;
+ /* PSR */
+ pool->psr = dmub_psr_create(ctx);
+ if (pool->psr == NULL) {
+ dm_error("DC: failed to create psr!\n");
+ BREAK_TO_DEBUGGER();
+ goto create_fail;
+ }
+
/* ABMs */
for (i = 0; i < pool->res_cap->num_timing_generator; i++) {
pool->multiple_abms[i] = dmub_abm_create(ctx, &abm_regs[i], &abm_shift, &abm_mask);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.h b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.h
index 71f7deed18e3..42d2c73e30bc 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.h
@@ -30,4 +30,6 @@
struct resource_pool *dcn302_create_resource_pool(const struct dc_init_data *init_data, struct dc *dc);
+void dcn302_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params);
+
#endif /* _DCN302_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h
index 5da7677627a1..cac0b2c0d31b 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h
@@ -30,9 +30,10 @@ struct dc_link;
struct cp_psp_stream_config {
uint8_t otg_inst;
- uint8_t link_enc_inst;
- uint8_t stream_enc_inst;
- uint8_t mst_supported;
+ uint8_t dig_be;
+ uint8_t dig_fe;
+ uint8_t assr_enabled;
+ uint8_t mst_enabled;
void *dm_stream_ctx;
bool dpms_off;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
index 45f028986a8d..0f3f510fd83b 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
@@ -3138,7 +3138,7 @@ static void CalculateFlipSchedule(
4.0 * (TimeForFetchingMetaPTEImmediateFlip / LineTime + 0.125),
1) / 4.0;
- if ((GPUVMEnable == true || DCCEnable == true)) {
+ if ((GPUVMEnable || DCCEnable)) {
mode_lib->vba.ImmediateFlipBW[0] = BandwidthAvailableForImmediateFlip
* ImmediateFlipBytes / TotImmediateFlipBytes;
TimeForFetchingRowInVBlankImmediateFlip = dml_max(
@@ -4168,10 +4168,11 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
locals->DIOSupport[i] = true;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- if (locals->OutputBppPerState[i][k] == BPP_INVALID
- || (mode_lib->vba.OutputFormat[k] == dm_420
+ if (!mode_lib->vba.skip_dio_check[k]
+ && (locals->OutputBppPerState[i][k] == BPP_INVALID
+ || (mode_lib->vba.OutputFormat[k] == dm_420
&& mode_lib->vba.Interlace[k] == true
- && mode_lib->vba.ProgressiveToInterlaceUnitInOPP == true)) {
+ && mode_lib->vba.ProgressiveToInterlaceUnitInOPP == true))) {
locals->DIOSupport[i] = false;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
index 80170f9721ce..210c96cd5b03 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
@@ -3263,6 +3263,7 @@ static void CalculateFlipSchedule(
static unsigned int TruncToValidBPP(
double DecimalBPP,
+ double DesiredBPP,
bool DSCEnabled,
enum output_encoder_class Output,
enum output_format_class Format,
@@ -3270,31 +3271,31 @@ static unsigned int TruncToValidBPP(
{
if (Output == dm_hdmi) {
if (Format == dm_420) {
- if (DecimalBPP >= 18)
+ if (DecimalBPP >= 18 && (DesiredBPP == 0 || DesiredBPP == 18))
return 18;
- else if (DecimalBPP >= 15)
+ else if (DecimalBPP >= 15 && (DesiredBPP == 0 || DesiredBPP == 15))
return 15;
- else if (DecimalBPP >= 12)
+ else if (DecimalBPP >= 12 && (DesiredBPP == 0 || DesiredBPP == 12))
return 12;
else
return BPP_INVALID;
} else if (Format == dm_444) {
- if (DecimalBPP >= 36)
+ if (DecimalBPP >= 36 && (DesiredBPP == 0 || DesiredBPP == 36))
return 36;
- else if (DecimalBPP >= 30)
+ else if (DecimalBPP >= 30 && (DesiredBPP == 0 || DesiredBPP == 30))
return 30;
- else if (DecimalBPP >= 24)
+ else if (DecimalBPP >= 24 && (DesiredBPP == 0 || DesiredBPP == 24))
return 24;
- else if (DecimalBPP >= 18)
+ else if (DecimalBPP >= 18 && (DesiredBPP == 0 || DesiredBPP == 18))
return 18;
else
return BPP_INVALID;
} else {
- if (DecimalBPP / 1.5 >= 24)
+ if (DecimalBPP / 1.5 >= 24 && (DesiredBPP == 0 || DesiredBPP == 24))
return 24;
- else if (DecimalBPP / 1.5 >= 20)
+ else if (DecimalBPP / 1.5 >= 20 && (DesiredBPP == 0 || DesiredBPP == 20))
return 20;
- else if (DecimalBPP / 1.5 >= 16)
+ else if (DecimalBPP / 1.5 >= 16 && (DesiredBPP == 0 || DesiredBPP == 16))
return 16;
else
return BPP_INVALID;
@@ -3302,53 +3303,86 @@ static unsigned int TruncToValidBPP(
} else {
if (DSCEnabled) {
if (Format == dm_420) {
- if (DecimalBPP < 6)
- return BPP_INVALID;
- else if (DecimalBPP >= 1.5 * DSCInputBitPerComponent - 1 / 16)
- return 1.5 * DSCInputBitPerComponent - 1 / 16;
- else
- return dml_floor(16 * DecimalBPP, 1) / 16;
+ if (DesiredBPP == 0) {
+ if (DecimalBPP < 6)
+ return BPP_INVALID;
+ else if (DecimalBPP >= 1.5 * DSCInputBitPerComponent - 1.0 / 16.0)
+ return 1.5 * DSCInputBitPerComponent - 1.0 / 16.0;
+ else
+ return dml_floor(16 * DecimalBPP, 1) / 16.0;
+ } else {
+ if (DecimalBPP < 6
+ || DesiredBPP < 6
+ || DesiredBPP > 1.5 * DSCInputBitPerComponent - 1.0 / 16.0
+ || DecimalBPP < DesiredBPP) {
+ return BPP_INVALID;
+ } else {
+ return DesiredBPP;
+ }
+ }
} else if (Format == dm_n422) {
- if (DecimalBPP < 7)
- return BPP_INVALID;
- else if (DecimalBPP >= 2 * DSCInputBitPerComponent - 1 / 16)
- return 2 * DSCInputBitPerComponent - 1 / 16;
- else
- return dml_floor(16 * DecimalBPP, 1) / 16;
+ if (DesiredBPP == 0) {
+ if (DecimalBPP < 7)
+ return BPP_INVALID;
+ else if (DecimalBPP >= 2 * DSCInputBitPerComponent - 1.0 / 16.0)
+ return 2 * DSCInputBitPerComponent - 1.0 / 16.0;
+ else
+ return dml_floor(16 * DecimalBPP, 1) / 16.0;
+ } else {
+ if (DecimalBPP < 7
+ || DesiredBPP < 7
+ || DesiredBPP > 2 * DSCInputBitPerComponent - 1.0 / 16.0
+ || DecimalBPP < DesiredBPP) {
+ return BPP_INVALID;
+ } else {
+ return DesiredBPP;
+ }
+ }
} else {
- if (DecimalBPP < 8)
- return BPP_INVALID;
- else if (DecimalBPP >= 3 * DSCInputBitPerComponent - 1 / 16)
- return 3 * DSCInputBitPerComponent - 1 / 16;
- else
- return dml_floor(16 * DecimalBPP, 1) / 16;
+ if (DesiredBPP == 0) {
+ if (DecimalBPP < 8)
+ return BPP_INVALID;
+ else if (DecimalBPP >= 3 * DSCInputBitPerComponent - 1.0 / 16.0)
+ return 3 * DSCInputBitPerComponent - 1.0 / 16.0;
+ else
+ return dml_floor(16 * DecimalBPP, 1) / 16.0;
+ } else {
+ if (DecimalBPP < 8
+ || DesiredBPP < 8
+ || DesiredBPP > 3 * DSCInputBitPerComponent - 1.0 / 16.0
+ || DecimalBPP < DesiredBPP) {
+ return BPP_INVALID;
+ } else {
+ return DesiredBPP;
+ }
+ }
}
} else if (Format == dm_420) {
- if (DecimalBPP >= 18)
+ if (DecimalBPP >= 18 && (DesiredBPP == 0 || DesiredBPP == 18))
return 18;
- else if (DecimalBPP >= 15)
+ else if (DecimalBPP >= 15 && (DesiredBPP == 0 || DesiredBPP == 15))
return 15;
- else if (DecimalBPP >= 12)
+ else if (DecimalBPP >= 12 && (DesiredBPP == 0 || DesiredBPP == 12))
return 12;
else
return BPP_INVALID;
} else if (Format == dm_s422 || Format == dm_n422) {
- if (DecimalBPP >= 24)
+ if (DecimalBPP >= 24 && (DesiredBPP == 0 || DesiredBPP == 24))
return 24;
- else if (DecimalBPP >= 20)
+ else if (DecimalBPP >= 20 && (DesiredBPP == 0 || DesiredBPP == 20))
return 20;
- else if (DecimalBPP >= 16)
+ else if (DecimalBPP >= 16 && (DesiredBPP == 0 || DesiredBPP == 16))
return 16;
else
return BPP_INVALID;
} else {
- if (DecimalBPP >= 36)
+ if (DecimalBPP >= 36 && (DesiredBPP == 0 || DesiredBPP == 36))
return 36;
- else if (DecimalBPP >= 30)
+ else if (DecimalBPP >= 30 && (DesiredBPP == 0 || DesiredBPP == 30))
return 30;
- else if (DecimalBPP >= 24)
+ else if (DecimalBPP >= 24 && (DesiredBPP == 0 || DesiredBPP == 24))
return 24;
- else if (DecimalBPP >= 18)
+ else if (DecimalBPP >= 18 && (DesiredBPP == 0 || DesiredBPP == 18))
return 18;
else
return BPP_INVALID;
@@ -4137,6 +4171,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
locals->RequiresFEC[i][k] = 0;
locals->OutputBppPerState[i][k] = TruncToValidBPP(
dml_min(600.0, mode_lib->vba.PHYCLKPerState[i]) / mode_lib->vba.PixelClockBackEnd[k] * 24,
+ mode_lib->vba.ForcedOutputLinkBPP[k],
false,
mode_lib->vba.Output[k],
mode_lib->vba.OutputFormat[k],
@@ -4153,6 +4188,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
mode_lib->vba.Outbpp = TruncToValidBPP(
(1.0 - mode_lib->vba.Downspreading / 100.0) * 270.0
* mode_lib->vba.OutputLinkDPLanes[k] / mode_lib->vba.PixelClockBackEnd[k] * 8.0,
+ mode_lib->vba.ForcedOutputLinkBPP[k],
false,
mode_lib->vba.Output[k],
mode_lib->vba.OutputFormat[k],
@@ -4160,6 +4196,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
mode_lib->vba.OutbppDSC = TruncToValidBPP(
(1.0 - mode_lib->vba.Downspreading / 100.0) * (1.0 - mode_lib->vba.EffectiveFECOverhead / 100.0) * 270.0
* mode_lib->vba.OutputLinkDPLanes[k] / mode_lib->vba.PixelClockBackEnd[k] * 8.0,
+ mode_lib->vba.ForcedOutputLinkBPP[k],
true,
mode_lib->vba.Output[k],
mode_lib->vba.OutputFormat[k],
@@ -4182,6 +4219,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
mode_lib->vba.Outbpp = TruncToValidBPP(
(1.0 - mode_lib->vba.Downspreading / 100.0) * 540.0
* mode_lib->vba.OutputLinkDPLanes[k] / mode_lib->vba.PixelClockBackEnd[k] * 8.0,
+ mode_lib->vba.ForcedOutputLinkBPP[k],
false,
mode_lib->vba.Output[k],
mode_lib->vba.OutputFormat[k],
@@ -4189,6 +4227,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
mode_lib->vba.OutbppDSC = TruncToValidBPP(
(1.0 - mode_lib->vba.Downspreading / 100.0) * (1.0 - mode_lib->vba.EffectiveFECOverhead / 100.0) * 540.0
* mode_lib->vba.OutputLinkDPLanes[k] / mode_lib->vba.PixelClockBackEnd[k] * 8.0,
+ mode_lib->vba.ForcedOutputLinkBPP[k],
true,
mode_lib->vba.Output[k],
mode_lib->vba.OutputFormat[k],
@@ -4213,6 +4252,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
mode_lib->vba.Outbpp = TruncToValidBPP(
(1.0 - mode_lib->vba.Downspreading / 100.0) * 810.0
* mode_lib->vba.OutputLinkDPLanes[k] / mode_lib->vba.PixelClockBackEnd[k] * 8.0,
+ mode_lib->vba.ForcedOutputLinkBPP[k],
false,
mode_lib->vba.Output[k],
mode_lib->vba.OutputFormat[k],
@@ -4220,6 +4260,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
mode_lib->vba.OutbppDSC = TruncToValidBPP(
(1.0 - mode_lib->vba.Downspreading / 100.0) * (1.0 - mode_lib->vba.EffectiveFECOverhead / 100.0) * 810.0
* mode_lib->vba.OutputLinkDPLanes[k] / mode_lib->vba.PixelClockBackEnd[k] * 8.0,
+ mode_lib->vba.ForcedOutputLinkBPP[k],
true,
mode_lib->vba.Output[k],
mode_lib->vba.OutputFormat[k],
@@ -4248,10 +4289,11 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
locals->DIOSupport[i] = true;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- if (locals->OutputBppPerState[i][k] == BPP_INVALID
- || (mode_lib->vba.OutputFormat[k] == dm_420
+ if (!mode_lib->vba.skip_dio_check[k]
+ && (locals->OutputBppPerState[i][k] == BPP_INVALID
+ || (mode_lib->vba.OutputFormat[k] == dm_420
&& mode_lib->vba.Interlace[k] == true
- && mode_lib->vba.ProgressiveToInterlaceUnitInOPP == true)) {
+ && mode_lib->vba.ProgressiveToInterlaceUnitInOPP == true))) {
locals->DIOSupport[i] = false;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
index 86ff24dffc3e..398210d1af34 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
@@ -4257,10 +4257,11 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
locals->DIOSupport[i] = true;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- if (locals->OutputBppPerState[i][k] == BPP_INVALID
- || (mode_lib->vba.OutputFormat[k] == dm_420
+ if (!mode_lib->vba.skip_dio_check[k]
+ && (locals->OutputBppPerState[i][k] == BPP_INVALID
+ || (mode_lib->vba.OutputFormat[k] == dm_420
&& mode_lib->vba.Interlace[k] == true
- && mode_lib->vba.ProgressiveToInterlaceUnitInOPP == true)) {
+ && mode_lib->vba.ProgressiveToInterlaceUnitInOPP == true))) {
locals->DIOSupport[i] = false;
}
}
@@ -5121,48 +5122,48 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
for (j = 0; j < 2; j++) {
enum dm_validation_status status = DML_VALIDATION_OK;
- if (mode_lib->vba.ScaleRatioAndTapsSupport != true) {
+ if (!mode_lib->vba.ScaleRatioAndTapsSupport) {
status = DML_FAIL_SCALE_RATIO_TAP;
- } else if (mode_lib->vba.SourceFormatPixelAndScanSupport != true) {
+ } else if (!mode_lib->vba.SourceFormatPixelAndScanSupport) {
status = DML_FAIL_SOURCE_PIXEL_FORMAT;
- } else if (locals->ViewportSizeSupport[i][0] != true) {
+ } else if (!locals->ViewportSizeSupport[i][0]) {
status = DML_FAIL_VIEWPORT_SIZE;
- } else if (locals->DIOSupport[i] != true) {
+ } else if (!locals->DIOSupport[i]) {
status = DML_FAIL_DIO_SUPPORT;
- } else if (locals->NotEnoughDSCUnits[i] != false) {
+ } else if (locals->NotEnoughDSCUnits[i]) {
status = DML_FAIL_NOT_ENOUGH_DSC;
- } else if (locals->DSCCLKRequiredMoreThanSupported[i] != false) {
+ } else if (locals->DSCCLKRequiredMoreThanSupported[i]) {
status = DML_FAIL_DSC_CLK_REQUIRED;
- } else if (locals->ROBSupport[i][0] != true) {
+ } else if (!locals->ROBSupport[i][0]) {
status = DML_FAIL_REORDERING_BUFFER;
- } else if (locals->DISPCLK_DPPCLK_Support[i][j] != true) {
+ } else if (!locals->DISPCLK_DPPCLK_Support[i][j]) {
status = DML_FAIL_DISPCLK_DPPCLK;
- } else if (locals->TotalAvailablePipesSupport[i][j] != true) {
+ } else if (!locals->TotalAvailablePipesSupport[i][j]) {
status = DML_FAIL_TOTAL_AVAILABLE_PIPES;
- } else if (mode_lib->vba.NumberOfOTGSupport != true) {
+ } else if (!mode_lib->vba.NumberOfOTGSupport) {
status = DML_FAIL_NUM_OTG;
- } else if (mode_lib->vba.WritebackModeSupport != true) {
+ } else if (!mode_lib->vba.WritebackModeSupport) {
status = DML_FAIL_WRITEBACK_MODE;
- } else if (mode_lib->vba.WritebackLatencySupport != true) {
+ } else if (!mode_lib->vba.WritebackLatencySupport) {
status = DML_FAIL_WRITEBACK_LATENCY;
- } else if (mode_lib->vba.WritebackScaleRatioAndTapsSupport != true) {
+ } else if (!mode_lib->vba.WritebackScaleRatioAndTapsSupport) {
status = DML_FAIL_WRITEBACK_SCALE_RATIO_TAP;
- } else if (mode_lib->vba.CursorSupport != true) {
+ } else if (!mode_lib->vba.CursorSupport) {
status = DML_FAIL_CURSOR_SUPPORT;
- } else if (mode_lib->vba.PitchSupport != true) {
+ } else if (!mode_lib->vba.PitchSupport) {
status = DML_FAIL_PITCH_SUPPORT;
- } else if (locals->TotalVerticalActiveBandwidthSupport[i][0] != true) {
+ } else if (!locals->TotalVerticalActiveBandwidthSupport[i][0]) {
status = DML_FAIL_TOTAL_V_ACTIVE_BW;
- } else if (locals->PTEBufferSizeNotExceeded[i][j] != true) {
+ } else if (!locals->PTEBufferSizeNotExceeded[i][j]) {
status = DML_FAIL_PTE_BUFFER_SIZE;
- } else if (mode_lib->vba.NonsupportedDSCInputBPC != false) {
+ } else if (mode_lib->vba.NonsupportedDSCInputBPC) {
status = DML_FAIL_DSC_INPUT_BPC;
- } else if ((mode_lib->vba.HostVMEnable != false
- && locals->ImmediateFlipSupportedForState[i][j] != true)) {
+ } else if ((mode_lib->vba.HostVMEnable
+ && !locals->ImmediateFlipSupportedForState[i][j])) {
status = DML_FAIL_HOST_VM_IMMEDIATE_FLIP;
- } else if (locals->PrefetchSupported[i][j] != true) {
+ } else if (!locals->PrefetchSupported[i][j]) {
status = DML_FAIL_PREFETCH_SUPPORT;
- } else if (locals->VRatioInPrefetchSupported[i][j] != true) {
+ } else if (!locals->VRatioInPrefetchSupported[i][j]) {
status = DML_FAIL_V_RATIO_PREFETCH;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
index 319dec59bcd1..bc07082c1357 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
@@ -1219,13 +1219,13 @@ static bool CalculatePrefetchSchedule(
dml_print("DML: prefetch_bw_equ: %f\n", prefetch_bw_equ);
if (prefetch_bw_equ > 0) {
- if (GPUVMEnable == true) {
+ if (GPUVMEnable) {
Tvm_equ = dml_max3(*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / prefetch_bw_equ, Tvm_trips, LineTime / 4);
} else {
Tvm_equ = LineTime / 4;
}
- if ((GPUVMEnable == true || myPipe->DCCEnable == true)) {
+ if ((GPUVMEnable || myPipe->DCCEnable)) {
Tr0_equ = dml_max4(
(MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / prefetch_bw_equ,
Tr0_trips,
@@ -4263,7 +4263,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
for (i = 0; i < v->soc.num_states; i++) {
v->DIOSupport[i] = true;
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
- if (v->BlendingAndTiming[k] == k && (v->Output[k] == dm_dp || v->Output[k] == dm_edp || v->Output[k] == dm_hdmi)
+ if (!v->skip_dio_check[k] && v->BlendingAndTiming[k] == k && (v->Output[k] == dm_dp || v->Output[k] == dm_edp || v->Output[k] == dm_hdmi)
&& (v->OutputBppPerState[i][k] == 0
|| (v->OutputFormat[k] == dm_420 && v->Interlace[k] == true && v->ProgressiveToInterlaceUnitInOPP == true))) {
v->DIOSupport[i] = false;
@@ -5558,7 +5558,7 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
}
}
- if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 0 && PrefetchMode == 0) {
+ if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 0) {
*DRAMClockChangeSupport = dm_dram_clock_change_vactive;
} else if (((mode_lib->vba.SynchronizedVBlank == true || mode_lib->vba.TotalNumberOfActiveOTG == 1 || SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank > 0) && PrefetchMode == 0)) {
*DRAMClockChangeSupport = dm_dram_clock_change_vblank;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
index 5b5916b5bc71..0f14f205ebe5 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
@@ -165,8 +165,8 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,
unsigned int swath_bytes_c = 0;
unsigned int full_swath_bytes_packed_l = 0;
unsigned int full_swath_bytes_packed_c = 0;
- bool req128_l = 0;
- bool req128_c = 0;
+ bool req128_l = false;
+ bool req128_c = false;
bool surf_linear = (pipe_src_param.sw_mode == dm_sw_linear);
bool surf_vert = (pipe_src_param.source_scan == dm_vert);
unsigned int log2_swath_height_l = 0;
@@ -191,37 +191,37 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,
total_swath_bytes = 2 * full_swath_bytes_packed_l;
if (total_swath_bytes <= detile_buf_size_in_bytes) { //full 256b request
- req128_l = 0;
- req128_c = 0;
+ req128_l = false;
+ req128_c = false;
swath_bytes_l = full_swath_bytes_packed_l;
swath_bytes_c = full_swath_bytes_packed_c;
} else if (!rq_param->yuv420) {
- req128_l = 1;
- req128_c = 0;
+ req128_l = true;
+ req128_c = false;
swath_bytes_c = full_swath_bytes_packed_c;
swath_bytes_l = full_swath_bytes_packed_l / 2;
} else if ((double)full_swath_bytes_packed_l / (double)full_swath_bytes_packed_c < 1.5) {
- req128_l = 0;
- req128_c = 1;
+ req128_l = false;
+ req128_c = true;
swath_bytes_l = full_swath_bytes_packed_l;
swath_bytes_c = full_swath_bytes_packed_c / 2;
total_swath_bytes = 2 * swath_bytes_l + 2 * swath_bytes_c;
if (total_swath_bytes > detile_buf_size_in_bytes) {
- req128_l = 1;
+ req128_l = true;
swath_bytes_l = full_swath_bytes_packed_l / 2;
}
} else {
- req128_l = 1;
- req128_c = 0;
+ req128_l = true;
+ req128_c = false;
swath_bytes_l = full_swath_bytes_packed_l/2;
swath_bytes_c = full_swath_bytes_packed_c;
total_swath_bytes = 2 * swath_bytes_l + 2 * swath_bytes_c;
if (total_swath_bytes > detile_buf_size_in_bytes) {
- req128_c = 1;
+ req128_c = true;
swath_bytes_c = full_swath_bytes_packed_c/2;
}
}
@@ -1006,8 +1006,8 @@ static void dml_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
double min_dst_y_ttu_vblank = 0;
unsigned int dlg_vblank_start = 0;
- bool dual_plane = 0;
- bool mode_422 = 0;
+ bool dual_plane = false;
+ bool mode_422 = false;
unsigned int access_dir = 0;
unsigned int vp_height_l = 0;
unsigned int vp_width_l = 0;
@@ -1021,7 +1021,7 @@ static void dml_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
double hratio_c = 0;
double vratio_l = 0;
double vratio_c = 0;
- bool scl_enable = 0;
+ bool scl_enable = false;
double line_time_in_us = 0;
// double vinit_l;
@@ -1156,7 +1156,7 @@ static void dml_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
// Source
// dcc_en = src.dcc;
dual_plane = is_dual_plane((enum source_format_class)(src->source_format));
- mode_422 = 0; // TODO
+ mode_422 = false; // TODO
access_dir = (src->source_scan == dm_vert); // vp access direction: horizontal or vertical accessed
vp_height_l = src->viewport_height;
vp_width_l = src->viewport_width;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index dd0c3b1780d7..0c5128187e08 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -297,6 +297,7 @@ struct _vcs_dpi_display_output_params_st {
int num_active_wb;
int output_bpc;
int output_type;
+ int is_virtual;
int output_format;
int dsc_slices;
int max_audio_sample_rate;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
index c9fbb33f05a3..bc0485a59018 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
@@ -451,6 +451,8 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
dout->output_bpp;
mode_lib->vba.Output[mode_lib->vba.NumberOfActivePlanes] =
(enum output_encoder_class) (dout->output_type);
+ mode_lib->vba.skip_dio_check[mode_lib->vba.NumberOfActivePlanes] =
+ dout->is_virtual;
if (!dout->dsc_enable)
mode_lib->vba.ForcedOutputLinkBPP[mode_lib->vba.NumberOfActivePlanes] = dout->output_bpp;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
index 3529fedc4c52..025aa5bd8ea0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
@@ -340,6 +340,7 @@ struct vba_vars_st {
unsigned int DSCInputBitPerComponent[DC__NUM_DPP__MAX];
enum output_format_class OutputFormat[DC__NUM_DPP__MAX];
enum output_encoder_class Output[DC__NUM_DPP__MAX];
+ bool skip_dio_check[DC__NUM_DPP__MAX];
unsigned int BlendingAndTiming[DC__NUM_DPP__MAX];
bool SynchronizedVBlank;
unsigned int NumberOfCursors[DC__NUM_DPP__MAX];
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.c b/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.c
index df68430aeb0c..c6e28f6bf1a2 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.c
@@ -28,6 +28,7 @@
*/
#include "dm_services.h"
+#include "hw_factory_diag.h"
#include "include/gpio_types.h"
#include "../hw_factory.h"
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.h b/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.h
index 8a74f6adb8ee..bf68eb1d9a1d 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.h
+++ b/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.h
@@ -26,6 +26,8 @@
#ifndef __DAL_HW_FACTORY_DIAG_FPGA_H__
#define __DAL_HW_FACTORY_DIAG_FPGA_H__
+struct hw_factory;
+
/* Initialize HW factory function pointers and pin info */
void dal_hw_factory_diag_fpga_init(struct hw_factory *factory);
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_translate_diag.c b/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_translate_diag.c
index bf9068846927..e5138a5a8eb5 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_translate_diag.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_translate_diag.c
@@ -24,6 +24,7 @@
*/
#include "dm_services.h"
+#include "hw_translate_diag.h"
#include "include/gpio_types.h"
#include "../hw_translate.h"
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c
index 1ae153eab31d..7a8cec2d7a90 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c
@@ -107,13 +107,12 @@ static enum gpio_result set_config(
msleep(3);
}
} else {
- uint32_t reg2;
uint32_t sda_pd_dis = 0;
uint32_t scl_pd_dis = 0;
- reg2 = REG_GET_2(gpio.MASK_reg,
- DC_GPIO_SDA_PD_DIS, &sda_pd_dis,
- DC_GPIO_SCL_PD_DIS, &scl_pd_dis);
+ REG_GET_2(gpio.MASK_reg,
+ DC_GPIO_SDA_PD_DIS, &sda_pd_dis,
+ DC_GPIO_SCL_PD_DIS, &scl_pd_dis);
if (sda_pd_dis) {
REG_SET(gpio.MASK_reg, regval,
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
index da73bfb3cacd..92c65d2fa7d7 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
@@ -119,17 +119,3 @@ bool dal_hw_factory_init(
return false;
}
}
-
-void dal_hw_factory_destroy(
- struct dc_context *ctx,
- struct hw_factory **factory)
-{
- if (!factory || !*factory) {
- BREAK_TO_DEBUGGER();
- return;
- }
-
- kfree(*factory);
-
- *factory = NULL;
-}
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index 2d77eac66cb0..8efa1b80546d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -333,6 +333,7 @@ struct pipe_ctx {
union pipe_update_flags update_flags;
struct dwbc *dwbc;
struct mcif_wb *mcif_wb;
+ bool vtp_locked;
};
struct resource_context {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
index ffd37696b6b9..316301fc1e30 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
@@ -309,9 +309,9 @@ static inline bool should_set_clock(bool safe_to_lower, int calc_clk, int cur_cl
static inline bool should_update_pstate_support(bool safe_to_lower, bool calc_support, bool cur_support)
{
if (cur_support != calc_support) {
- if (calc_support == true && safe_to_lower)
+ if (calc_support && safe_to_lower)
return true;
- else if (calc_support == false && !safe_to_lower)
+ else if (!calc_support && !safe_to_lower)
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
index 69d9fbfb4bec..cd1c0dc32bf8 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
@@ -74,6 +74,16 @@ struct dmcu_funcs {
bool (*is_dmcu_initialized)(struct dmcu *dmcu);
bool (*lock_phy)(struct dmcu *dmcu);
bool (*unlock_phy)(struct dmcu *dmcu);
+ bool (*send_edid_cea)(struct dmcu *dmcu,
+ int offset,
+ int total_length,
+ uint8_t *data,
+ int length);
+ bool (*recv_amd_vsdb)(struct dmcu *dmcu,
+ int *version,
+ int *min_frame_rate,
+ int *max_frame_rate);
+ bool (*recv_edid_cea_ack)(struct dmcu *dmcu, int *offset);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index f7632fe25976..754832d216fd 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -190,6 +190,7 @@ struct timing_generator_funcs {
void (*set_blank)(struct timing_generator *tg,
bool enable_blanking);
bool (*is_blanked)(struct timing_generator *tg);
+ bool (*is_locked)(struct timing_generator *tg);
void (*set_overscan_blank_color) (struct timing_generator *tg, const struct tg_color *color);
void (*set_blank_color)(struct timing_generator *tg, const struct tg_color *color);
void (*set_colors)(struct timing_generator *tg,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index 62804dc7b698..0586ab2ffd6a 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -54,6 +54,7 @@ struct hw_sequencer_funcs {
/* Embedded Display Related */
void (*edp_power_control)(struct dc_link *link, bool enable);
void (*edp_wait_for_hpd_ready)(struct dc_link *link, bool power_up);
+ void (*edp_wait_for_T12)(struct dc_link *link);
/* Pipe Programming Related */
void (*init_hw)(struct dc *dc);
@@ -217,6 +218,9 @@ struct hw_sequencer_funcs {
/* Idle Optimization Related */
bool (*apply_idle_power_optimizations)(struct dc *dc, bool enable);
+ bool (*does_plane_fit_in_mall)(struct dc *dc, struct dc_plane_state *plane,
+ struct dc_cursor_attributes *cursor_attr);
+
bool (*is_abm_supported)(struct dc *dc,
struct dc_state *context, struct dc_stream_state *stream);
@@ -227,6 +231,10 @@ struct hw_sequencer_funcs {
enum dc_color_depth color_depth,
const struct tg_color *solid_color,
int width, int height, int offset);
+
+ void (*set_hubp_blank)(const struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool blank_enable);
};
void color_space_to_black_color(
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
index f956b3bde680..34f43cb650f8 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
@@ -58,6 +58,18 @@ enum dc_irq_source to_dal_irq_source_dcn10(
return DC_IRQ_SOURCE_VBLANK5;
case DCN_1_0__SRCID__DC_D6_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK6;
+ case DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL:
+ return DC_IRQ_SOURCE_DC1_VLINE0;
+ case DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL:
+ return DC_IRQ_SOURCE_DC2_VLINE0;
+ case DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL:
+ return DC_IRQ_SOURCE_DC3_VLINE0;
+ case DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL:
+ return DC_IRQ_SOURCE_DC4_VLINE0;
+ case DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL:
+ return DC_IRQ_SOURCE_DC5_VLINE0;
+ case DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL:
+ return DC_IRQ_SOURCE_DC6_VLINE0;
case DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE1;
case DCN_1_0__SRCID__OTG1_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
@@ -167,6 +179,11 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
.ack = NULL
};
+static const struct irq_source_info_funcs vline0_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
static const struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = {
.set = NULL,
.ack = NULL
@@ -241,6 +258,14 @@ static const struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = {
.funcs = &vblank_irq_info_funcs\
}
+#define vline0_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_DC1_VLINE0 + reg_num] = {\
+ IRQ_REG_ENTRY(OTG, reg_num,\
+ OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_INT_ENABLE,\
+ OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_CLEAR),\
+ .funcs = &vline0_irq_info_funcs\
+ }
+
#define dummy_irq_entry() \
{\
.funcs = &dummy_irq_info_funcs\
@@ -349,6 +374,12 @@ irq_source_info_dcn10[DAL_IRQ_SOURCES_NUMBER] = {
vblank_int_entry(3),
vblank_int_entry(4),
vblank_int_entry(5),
+ vline0_int_entry(0),
+ vline0_int_entry(1),
+ vline0_int_entry(2),
+ vline0_int_entry(3),
+ vline0_int_entry(4),
+ vline0_int_entry(5),
};
static const struct irq_service_funcs irq_service_funcs_dcn10 = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
index 1b971265418b..0e0f494fbb5e 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
@@ -168,6 +168,11 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
.ack = NULL
};
+static const struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
#undef BASE_INNER
#define BASE_INNER(seg) DMU_BASE__INST0_SEG ## seg
@@ -230,6 +235,17 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
.funcs = &vblank_irq_info_funcs\
}
+/* vupdate_no_lock_int_entry maps to DC_IRQ_SOURCE_VUPDATEx, to match semantic
+ * of DCE's DC_IRQ_SOURCE_VUPDATEx.
+ */
+#define vupdate_no_lock_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\
+ IRQ_REG_ENTRY(OTG, reg_num,\
+ OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_INT_EN,\
+ OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_EVENT_CLEAR),\
+ .funcs = &vupdate_no_lock_irq_info_funcs\
+ }
+
#define vblank_int_entry(reg_num)\
[DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\
IRQ_REG_ENTRY(OTG, reg_num,\
@@ -338,6 +354,12 @@ irq_source_info_dcn21[DAL_IRQ_SOURCES_NUMBER] = {
vupdate_int_entry(3),
vupdate_int_entry(4),
vupdate_int_entry(5),
+ vupdate_no_lock_int_entry(0),
+ vupdate_no_lock_int_entry(1),
+ vupdate_no_lock_int_entry(2),
+ vupdate_no_lock_int_entry(3),
+ vupdate_no_lock_int_entry(4),
+ vupdate_no_lock_int_entry(5),
vblank_int_entry(0),
vblank_int_entry(1),
vblank_int_entry(2),
diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
index 6bf27bde8724..5f245bde54ff 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
@@ -79,7 +79,7 @@ void dal_irq_service_destroy(struct irq_service **irq_service)
*irq_service = NULL;
}
-const struct irq_source_info *find_irq_source_info(
+static const struct irq_source_info *find_irq_source_info(
struct irq_service *irq_service,
enum dc_irq_source source)
{
diff --git a/drivers/gpu/drm/amd/display/dc/irq_types.h b/drivers/gpu/drm/amd/display/dc/irq_types.h
index d0ccd81ad5b4..87812d81fed3 100644
--- a/drivers/gpu/drm/amd/display/dc/irq_types.h
+++ b/drivers/gpu/drm/amd/display/dc/irq_types.h
@@ -160,6 +160,7 @@ enum irq_type
IRQ_TYPE_PFLIP = DC_IRQ_SOURCE_PFLIP1,
IRQ_TYPE_VUPDATE = DC_IRQ_SOURCE_VUPDATE1,
IRQ_TYPE_VBLANK = DC_IRQ_SOURCE_VBLANK1,
+ IRQ_TYPE_VLINE0 = DC_IRQ_SOURCE_DC1_VLINE0,
};
#define DAL_VALID_IRQ_SRC_NUM(src) \
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index 249a076d6f69..072b4e7e624b 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -47,10 +47,10 @@
/* Firmware versioning. */
#ifdef DMUB_EXPOSE_VERSION
-#define DMUB_FW_VERSION_GIT_HASH 0xf51b86a
+#define DMUB_FW_VERSION_GIT_HASH 0x6444c02e7
#define DMUB_FW_VERSION_MAJOR 0
#define DMUB_FW_VERSION_MINOR 0
-#define DMUB_FW_VERSION_REVISION 47
+#define DMUB_FW_VERSION_REVISION 51
#define DMUB_FW_VERSION_TEST 0
#define DMUB_FW_VERSION_VBIOS 0
#define DMUB_FW_VERSION_HOTFIX 0
@@ -458,6 +458,10 @@ struct dmub_rb_cmd_mall {
uint16_t cursor_pitch;
uint16_t cursor_height;
uint8_t cursor_bpp;
+ uint8_t debug_bits;
+
+ uint8_t reserved1;
+ uint8_t reserved2;
};
struct dmub_cmd_digx_encoder_control_data {
@@ -487,13 +491,34 @@ struct dmub_rb_cmd_enable_disp_power_gating {
struct dmub_cmd_enable_disp_power_gating_data power_gating;
};
-struct dmub_cmd_dig1_transmitter_control_data {
+struct dmub_dig_transmitter_control_data_v1_7 {
+ uint8_t phyid; /**< 0=UNIPHYA, 1=UNIPHYB, 2=UNIPHYC, 3=UNIPHYD, 4=UNIPHYE, 5=UNIPHYF */
+ uint8_t action; /**< Defined as ATOM_TRANSMITER_ACTION_xxx */
+ union {
+ uint8_t digmode; /**< enum atom_encode_mode_def */
+ uint8_t dplaneset; /**< DP voltage swing and pre-emphasis value, "DP_LANE_SET__xDB_y_zV" */
+ } mode_laneset;
+ uint8_t lanenum; /**< Number of lanes */
+ union {
+ uint32_t symclk_10khz; /**< Symbol Clock in 10Khz */
+ } symclk_units;
+ uint8_t hpdsel; /**< =1: HPD1, =2: HPD2, ..., =6: HPD6, =0: HPD is not assigned */
+ uint8_t digfe_sel; /**< DIG front-end selection, bit0 means DIG0 FE is enabled */
+ uint8_t connobj_id; /**< Connector Object Id defined in ObjectId.h */
+ uint8_t reserved0; /**< For future use */
+ uint8_t reserved1; /**< For future use */
+ uint8_t reserved2[3]; /**< For future use */
+ uint32_t reserved3[11]; /**< For future use */
+};
+
+union dmub_cmd_dig1_transmitter_control_data {
struct dig_transmitter_control_parameters_v1_6 dig;
+ struct dmub_dig_transmitter_control_data_v1_7 dig_v1_7;
};
struct dmub_rb_cmd_dig1_transmitter_control {
struct dmub_cmd_header header;
- struct dmub_cmd_dig1_transmitter_control_data transmitter_control;
+ union dmub_cmd_dig1_transmitter_control_data transmitter_control;
};
struct dmub_rb_cmd_dpphy_init {
@@ -624,6 +649,7 @@ enum dmub_cmd_mall_type {
DMUB_CMD__MALL_ACTION_ALLOW = 0,
DMUB_CMD__MALL_ACTION_DISALLOW = 1,
DMUB_CMD__MALL_ACTION_COPY_CURSOR = 2,
+ DMUB_CMD__MALL_ACTION_NO_DF_REQ = 3,
};
struct dmub_cmd_psr_copy_settings_data {
@@ -648,6 +674,7 @@ struct dmub_cmd_psr_copy_settings_data {
uint8_t multi_disp_optimizations_en;
uint16_t init_sdp_deadline;
uint16_t pad2;
+ uint32_t line_time_in_us;
};
struct dmub_rb_cmd_psr_copy_settings {
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
index cafba1d23c6a..8e8e65fa83c0 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
@@ -81,6 +81,13 @@ static inline void dmub_dcn20_translate_addr(const union dmub_addr *addr_in,
addr_out->quad_part = addr_in->quad_part - fb_base + fb_offset;
}
+bool dmub_dcn20_use_cached_inbox(struct dmub_srv *dmub)
+{
+ /* Cached inbox is not supported in this fw version range */
+ return !(dmub->fw_version >= DMUB_FW_VERSION(1, 0, 0) &&
+ dmub->fw_version <= DMUB_FW_VERSION(1, 10, 0));
+}
+
void dmub_dcn20_reset(struct dmub_srv *dmub)
{
union dmub_gpint_data_register cmd;
@@ -216,7 +223,7 @@ void dmub_dcn20_setup_windows(struct dmub_srv *dmub,
dmub_dcn20_translate_addr(&cw4->offset, fb_base, fb_offset, &offset);
/* New firmware can support CW4. */
- if (dmub->fw_version > DMUB_FW_VERSION(1, 0, 10)) {
+ if (dmub_dcn20_use_cached_inbox(dmub)) {
REG_WRITE(DMCUB_REGION3_CW4_OFFSET, offset.u.low_part);
REG_WRITE(DMCUB_REGION3_CW4_OFFSET_HIGH, offset.u.high_part);
REG_WRITE(DMCUB_REGION3_CW4_BASE_ADDRESS, cw4->region.base);
@@ -255,7 +262,7 @@ void dmub_dcn20_setup_mailbox(struct dmub_srv *dmub,
const struct dmub_region *inbox1)
{
/* New firmware can support CW4 for the inbox. */
- if (dmub->fw_version > DMUB_FW_VERSION(1, 0, 10))
+ if (dmub_dcn20_use_cached_inbox(dmub))
REG_WRITE(DMCUB_INBOX1_BASE_ADDRESS, inbox1->base);
else
REG_WRITE(DMCUB_INBOX1_BASE_ADDRESS, 0x80000000);
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
index d438f365cbb0..a62be9c0652e 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
@@ -198,4 +198,6 @@ void dmub_dcn20_skip_dmub_panel_power_sequence(struct dmub_srv *dmub, bool skip)
union dmub_fw_boot_status dmub_dcn20_get_fw_boot_status(struct dmub_srv *dmub);
+bool dmub_dcn20_use_cached_inbox(struct dmub_srv *dmub);
+
#endif /* _DMUB_DCN20_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c
index f00df02ded81..b4bc0df2f14a 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c
@@ -26,6 +26,7 @@
#include "../dmub_srv.h"
#include "dmub_reg.h"
#include "dmub_dcn20.h"
+#include "dmub_dcn30.h"
#include "sienna_cichlid_ip_offset.h"
#include "dcn/dcn_3_0_0_offset.h"
@@ -154,7 +155,7 @@ void dmub_dcn30_setup_windows(struct dmub_srv *dmub,
offset = cw4->offset;
/* New firmware can support CW4. */
- if (dmub->fw_version > DMUB_FW_VERSION(1, 0, 10)) {
+ if (dmub_dcn20_use_cached_inbox(dmub)) {
REG_WRITE(DMCUB_REGION3_CW4_OFFSET, offset.u.low_part);
REG_WRITE(DMCUB_REGION3_CW4_OFFSET_HIGH, offset.u.high_part);
REG_WRITE(DMCUB_REGION3_CW4_BASE_ADDRESS, cw4->region.base);
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
index f388d36af0b6..61f64a295f06 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -406,6 +406,9 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
dmub->fb_offset = params->fb_offset;
dmub->psp_version = params->psp_version;
+ if (dmub->hw_funcs.reset)
+ dmub->hw_funcs.reset(dmub);
+
if (inst_fb && data_fb) {
cw0.offset.quad_part = inst_fb->gpu_addr;
cw0.region.base = DMUB_CW0_BASE;
@@ -427,9 +430,6 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
}
- if (dmub->hw_funcs.reset)
- dmub->hw_funcs.reset(dmub);
-
if (inst_fb && data_fb && bios_fb && mail_fb && tracebuff_fb &&
fw_state_fb && scratch_mem_fb) {
cw2.offset.quad_part = data_fb->gpu_addr;
@@ -489,9 +489,6 @@ enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub)
if (!dmub->sw_init)
return DMUB_STATUS_INVALID;
- if (dmub->hw_init == false)
- return DMUB_STATUS_OK;
-
if (dmub->hw_funcs.reset)
dmub->hw_funcs.reset(dmub);
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_table.c b/drivers/gpu/drm/amd/display/modules/color/color_table.c
index 692e536e7d05..410f2a82b9a2 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_table.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_table.c
@@ -1,10 +1,26 @@
/*
- * Copyright (c) 2019 Advanced Micro Devices, Inc. (unpublished)
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
*
- * All rights reserved. This notice is intended as a precaution against
- * inadvertent publication and does not imply publication or any waiver
- * of confidentiality. The year included in the foregoing notice is the
- * year of creation of the work.
*/
#include "color_table.h"
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
index 6c678cfb82e3..5c22cf7e6118 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
@@ -397,7 +397,7 @@ static inline uint8_t is_dp_hdcp(struct mod_hdcp *hdcp)
static inline uint8_t is_dp_mst_hdcp(struct mod_hdcp *hdcp)
{
return (hdcp->connection.link.mode == MOD_HDCP_MODE_DP &&
- hdcp->connection.link.dp.mst_supported);
+ hdcp->connection.link.dp.mst_enabled);
}
static inline uint8_t is_hdmi_dvi_sl_hdcp(struct mod_hdcp *hdcp)
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
index 3a367a5968ae..904ce9b88088 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
@@ -106,7 +106,7 @@ enum mod_hdcp_status mod_hdcp_add_display_to_topology(struct mod_hdcp *hdcp,
dtm_cmd->dtm_in_message.topology_update_v2.dig_be = link->dig_be;
dtm_cmd->dtm_in_message.topology_update_v2.dig_fe = display->dig_fe;
if (is_dp_hdcp(hdcp))
- dtm_cmd->dtm_in_message.topology_update_v2.is_assr = link->dp.assr_supported;
+ dtm_cmd->dtm_in_message.topology_update_v2.is_assr = link->dp.assr_enabled;
dtm_cmd->dtm_in_message.topology_update_v2.dp_mst_vcid = display->vc_id;
dtm_cmd->dtm_in_message.topology_update_v2.max_hdcp_supported_version =
@@ -548,6 +548,8 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_ake_cert(struct mod_hdcp *hdcp)
TA_HDCP2_MSG_AUTHENTICATION_STATUS__RECEIVERID_REVOKED) {
hdcp->connection.is_hdcp2_revoked = 1;
status = MOD_HDCP_STATUS_HDCP2_AKE_CERT_REVOKED;
+ } else {
+ status = MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE;
}
}
mutex_unlock(&psp->hdcp_context.mutex);
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
index eed560eecbab..d223ed3be5d3 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
@@ -101,8 +101,8 @@ enum mod_hdcp_status {
struct mod_hdcp_displayport {
uint8_t rev;
- uint8_t assr_supported;
- uint8_t mst_supported;
+ uint8_t assr_enabled;
+ uint8_t mst_enabled;
};
struct mod_hdcp_hdmi {
diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
index 0fdf7a3e96de..57f198de5e2c 100644
--- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
+++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
@@ -409,16 +409,11 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
}
/**
- *****************************************************************************
- * Function: mod_build_hf_vsif_infopacket
+ * mod_build_hf_vsif_infopacket - Prepare HDMI Vendor Specific info frame.
+ * Follows HDMI Spec to build up Vendor Specific info frame
*
- * @brief
- * Prepare HDMI Vendor Specific info frame.
- * Follows HDMI Spec to build up Vendor Specific info frame
- *
- * @param [in] stream: contains data we may need to construct VSIF (i.e. timing_3d_format, etc.)
- * @param [out] info_packet: output structure where to store VSIF
- *****************************************************************************
+ * @stream: contains data we may need to construct VSIF (i.e. timing_3d_format, etc.)
+ * @info_packet: output structure where to store VSIF
*/
void mod_build_hf_vsif_infopacket(const struct dc_stream_state *stream,
struct dc_info_packet *info_packet)
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
index 4fd8bce95d84..6270ecbd2438 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -266,7 +266,7 @@ static void fill_backlight_transform_table_v_2_2(struct dmcu_iram_parameters par
* format U4.10.
*/
for (i = 1; i+1 < num_entries; i++) {
- lut_index = (params.backlight_lut_array_size - 1) * i / (num_entries - 1);
+ lut_index = DIV_ROUNDUP((i * params.backlight_lut_array_size), num_entries);
ASSERT(lut_index < params.backlight_lut_array_size);
table->backlight_thresholds[i] = (big_endian) ?
@@ -278,7 +278,7 @@ static void fill_backlight_transform_table_v_2_2(struct dmcu_iram_parameters par
}
}
-void fill_iram_v_2(struct iram_table_v_2 *ram_table, struct dmcu_iram_parameters params)
+static void fill_iram_v_2(struct iram_table_v_2 *ram_table, struct dmcu_iram_parameters params)
{
unsigned int set = params.set;
@@ -452,7 +452,7 @@ void fill_iram_v_2(struct iram_table_v_2 *ram_table, struct dmcu_iram_parameters
params, ram_table);
}
-void fill_iram_v_2_2(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parameters params)
+static void fill_iram_v_2_2(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parameters params)
{
unsigned int set = params.set;
@@ -598,7 +598,7 @@ void fill_iram_v_2_2(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parame
params, ram_table, true);
}
-void fill_iram_v_2_3(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parameters params, bool big_endian)
+static void fill_iram_v_2_3(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parameters params, bool big_endian)
{
unsigned int i, j;
unsigned int set = params.set;
diff --git a/drivers/gpu/drm/amd/include/amd_pcie.h b/drivers/gpu/drm/amd/include/amd_pcie.h
index 9cb9ceb4d74d..a1ece3eecdf5 100644
--- a/drivers/gpu/drm/amd/include/amd_pcie.h
+++ b/drivers/gpu/drm/amd/include/amd_pcie.h
@@ -28,6 +28,7 @@
#define CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 0x00020000
#define CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 0x00040000
#define CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 0x00080000
+#define CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5 0x00100000
#define CAIL_PCIE_LINK_SPEED_SUPPORT_MASK 0xFFFF0000
#define CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT 16
@@ -36,6 +37,7 @@
#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 0x00000002
#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 0x00000004
#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 0x00000008
+#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5 0x00000010
#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_MASK 0x0000FFFF
#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_SHIFT 0
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 9676016a37ce..43ed6291b2b8 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -213,6 +213,7 @@ enum PP_FEATURE_MASK {
PP_ACG_MASK = 0x10000,
PP_STUTTER_MODE = 0x20000,
PP_AVFS_MASK = 0x40000,
+ PP_GFX_DCS_MASK = 0x80000,
};
enum DC_FEATURE_MASK {
diff --git a/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_2_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_2_0_offset.h
new file mode 100644
index 000000000000..bd129266ebfd
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_2_0_offset.h
@@ -0,0 +1,345 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _osssys_4_2_0_OFFSET_HEADER
+#define _osssys_4_2_0_OFFSET_HEADER
+
+
+
+// addressBlock: osssys_osssysdec
+// base address: 0x4280
+#define mmIH_VMID_0_LUT 0x0000
+#define mmIH_VMID_0_LUT_BASE_IDX 0
+#define mmIH_VMID_1_LUT 0x0001
+#define mmIH_VMID_1_LUT_BASE_IDX 0
+#define mmIH_VMID_2_LUT 0x0002
+#define mmIH_VMID_2_LUT_BASE_IDX 0
+#define mmIH_VMID_3_LUT 0x0003
+#define mmIH_VMID_3_LUT_BASE_IDX 0
+#define mmIH_VMID_4_LUT 0x0004
+#define mmIH_VMID_4_LUT_BASE_IDX 0
+#define mmIH_VMID_5_LUT 0x0005
+#define mmIH_VMID_5_LUT_BASE_IDX 0
+#define mmIH_VMID_6_LUT 0x0006
+#define mmIH_VMID_6_LUT_BASE_IDX 0
+#define mmIH_VMID_7_LUT 0x0007
+#define mmIH_VMID_7_LUT_BASE_IDX 0
+#define mmIH_VMID_8_LUT 0x0008
+#define mmIH_VMID_8_LUT_BASE_IDX 0
+#define mmIH_VMID_9_LUT 0x0009
+#define mmIH_VMID_9_LUT_BASE_IDX 0
+#define mmIH_VMID_10_LUT 0x000a
+#define mmIH_VMID_10_LUT_BASE_IDX 0
+#define mmIH_VMID_11_LUT 0x000b
+#define mmIH_VMID_11_LUT_BASE_IDX 0
+#define mmIH_VMID_12_LUT 0x000c
+#define mmIH_VMID_12_LUT_BASE_IDX 0
+#define mmIH_VMID_13_LUT 0x000d
+#define mmIH_VMID_13_LUT_BASE_IDX 0
+#define mmIH_VMID_14_LUT 0x000e
+#define mmIH_VMID_14_LUT_BASE_IDX 0
+#define mmIH_VMID_15_LUT 0x000f
+#define mmIH_VMID_15_LUT_BASE_IDX 0
+#define mmIH_VMID_0_LUT_MM 0x0010
+#define mmIH_VMID_0_LUT_MM_BASE_IDX 0
+#define mmIH_VMID_1_LUT_MM 0x0011
+#define mmIH_VMID_1_LUT_MM_BASE_IDX 0
+#define mmIH_VMID_2_LUT_MM 0x0012
+#define mmIH_VMID_2_LUT_MM_BASE_IDX 0
+#define mmIH_VMID_3_LUT_MM 0x0013
+#define mmIH_VMID_3_LUT_MM_BASE_IDX 0
+#define mmIH_VMID_4_LUT_MM 0x0014
+#define mmIH_VMID_4_LUT_MM_BASE_IDX 0
+#define mmIH_VMID_5_LUT_MM 0x0015
+#define mmIH_VMID_5_LUT_MM_BASE_IDX 0
+#define mmIH_VMID_6_LUT_MM 0x0016
+#define mmIH_VMID_6_LUT_MM_BASE_IDX 0
+#define mmIH_VMID_7_LUT_MM 0x0017
+#define mmIH_VMID_7_LUT_MM_BASE_IDX 0
+#define mmIH_VMID_8_LUT_MM 0x0018
+#define mmIH_VMID_8_LUT_MM_BASE_IDX 0
+#define mmIH_VMID_9_LUT_MM 0x0019
+#define mmIH_VMID_9_LUT_MM_BASE_IDX 0
+#define mmIH_VMID_10_LUT_MM 0x001a
+#define mmIH_VMID_10_LUT_MM_BASE_IDX 0
+#define mmIH_VMID_11_LUT_MM 0x001b
+#define mmIH_VMID_11_LUT_MM_BASE_IDX 0
+#define mmIH_VMID_12_LUT_MM 0x001c
+#define mmIH_VMID_12_LUT_MM_BASE_IDX 0
+#define mmIH_VMID_13_LUT_MM 0x001d
+#define mmIH_VMID_13_LUT_MM_BASE_IDX 0
+#define mmIH_VMID_14_LUT_MM 0x001e
+#define mmIH_VMID_14_LUT_MM_BASE_IDX 0
+#define mmIH_VMID_15_LUT_MM 0x001f
+#define mmIH_VMID_15_LUT_MM_BASE_IDX 0
+#define mmIH_COOKIE_0 0x0020
+#define mmIH_COOKIE_0_BASE_IDX 0
+#define mmIH_COOKIE_1 0x0021
+#define mmIH_COOKIE_1_BASE_IDX 0
+#define mmIH_COOKIE_2 0x0022
+#define mmIH_COOKIE_2_BASE_IDX 0
+#define mmIH_COOKIE_3 0x0023
+#define mmIH_COOKIE_3_BASE_IDX 0
+#define mmIH_COOKIE_4 0x0024
+#define mmIH_COOKIE_4_BASE_IDX 0
+#define mmIH_COOKIE_5 0x0025
+#define mmIH_COOKIE_5_BASE_IDX 0
+#define mmIH_COOKIE_6 0x0026
+#define mmIH_COOKIE_6_BASE_IDX 0
+#define mmIH_COOKIE_7 0x0027
+#define mmIH_COOKIE_7_BASE_IDX 0
+#define mmIH_REGISTER_LAST_PART0 0x003f
+#define mmIH_REGISTER_LAST_PART0_BASE_IDX 0
+#define mmSEM_REQ_INPUT_0 0x0040
+#define mmSEM_REQ_INPUT_0_BASE_IDX 0
+#define mmSEM_REQ_INPUT_1 0x0041
+#define mmSEM_REQ_INPUT_1_BASE_IDX 0
+#define mmSEM_REQ_INPUT_2 0x0042
+#define mmSEM_REQ_INPUT_2_BASE_IDX 0
+#define mmSEM_REQ_INPUT_3 0x0043
+#define mmSEM_REQ_INPUT_3_BASE_IDX 0
+#define mmSEM_REGISTER_LAST_PART0 0x007f
+#define mmSEM_REGISTER_LAST_PART0_BASE_IDX 0
+#define mmIH_RB_CNTL 0x0080
+#define mmIH_RB_CNTL_BASE_IDX 0
+#define mmIH_RB_BASE 0x0081
+#define mmIH_RB_BASE_BASE_IDX 0
+#define mmIH_RB_BASE_HI 0x0082
+#define mmIH_RB_BASE_HI_BASE_IDX 0
+#define mmIH_RB_RPTR 0x0083
+#define mmIH_RB_RPTR_BASE_IDX 0
+#define mmIH_RB_WPTR 0x0084
+#define mmIH_RB_WPTR_BASE_IDX 0
+#define mmIH_RB_WPTR_ADDR_HI 0x0085
+#define mmIH_RB_WPTR_ADDR_HI_BASE_IDX 0
+#define mmIH_RB_WPTR_ADDR_LO 0x0086
+#define mmIH_RB_WPTR_ADDR_LO_BASE_IDX 0
+#define mmIH_DOORBELL_RPTR 0x0087
+#define mmIH_DOORBELL_RPTR_BASE_IDX 0
+#define mmIH_RB_CNTL_RING1 0x008c
+#define mmIH_RB_CNTL_RING1_BASE_IDX 0
+#define mmIH_RB_BASE_RING1 0x008d
+#define mmIH_RB_BASE_RING1_BASE_IDX 0
+#define mmIH_RB_BASE_HI_RING1 0x008e
+#define mmIH_RB_BASE_HI_RING1_BASE_IDX 0
+#define mmIH_RB_RPTR_RING1 0x008f
+#define mmIH_RB_RPTR_RING1_BASE_IDX 0
+#define mmIH_RB_WPTR_RING1 0x0090
+#define mmIH_RB_WPTR_RING1_BASE_IDX 0
+#define mmIH_DOORBELL_RPTR_RING1 0x0093
+#define mmIH_DOORBELL_RPTR_RING1_BASE_IDX 0
+#define mmIH_RB_CNTL_RING2 0x0098
+#define mmIH_RB_CNTL_RING2_BASE_IDX 0
+#define mmIH_RB_BASE_RING2 0x0099
+#define mmIH_RB_BASE_RING2_BASE_IDX 0
+#define mmIH_RB_BASE_HI_RING2 0x009a
+#define mmIH_RB_BASE_HI_RING2_BASE_IDX 0
+#define mmIH_RB_RPTR_RING2 0x009b
+#define mmIH_RB_RPTR_RING2_BASE_IDX 0
+#define mmIH_RB_WPTR_RING2 0x009c
+#define mmIH_RB_WPTR_RING2_BASE_IDX 0
+#define mmIH_DOORBELL_RPTR_RING2 0x009f
+#define mmIH_DOORBELL_RPTR_RING2_BASE_IDX 0
+#define mmIH_VERSION 0x00a5
+#define mmIH_VERSION_BASE_IDX 0
+#define mmIH_CNTL 0x00c0
+#define mmIH_CNTL_BASE_IDX 0
+#define mmIH_CNTL2 0x00c1
+#define mmIH_CNTL2_BASE_IDX 0
+#define mmIH_STATUS 0x00c2
+#define mmIH_STATUS_BASE_IDX 0
+#define mmIH_PERFMON_CNTL 0x00c3
+#define mmIH_PERFMON_CNTL_BASE_IDX 0
+#define mmIH_PERFCOUNTER0_RESULT 0x00c4
+#define mmIH_PERFCOUNTER0_RESULT_BASE_IDX 0
+#define mmIH_PERFCOUNTER1_RESULT 0x00c5
+#define mmIH_PERFCOUNTER1_RESULT_BASE_IDX 0
+#define mmIH_DSM_MATCH_VALUE_BIT_31_0 0x00c7
+#define mmIH_DSM_MATCH_VALUE_BIT_31_0_BASE_IDX 0
+#define mmIH_DSM_MATCH_VALUE_BIT_63_32 0x00c8
+#define mmIH_DSM_MATCH_VALUE_BIT_63_32_BASE_IDX 0
+#define mmIH_DSM_MATCH_VALUE_BIT_95_64 0x00c9
+#define mmIH_DSM_MATCH_VALUE_BIT_95_64_BASE_IDX 0
+#define mmIH_DSM_MATCH_FIELD_CONTROL 0x00ca
+#define mmIH_DSM_MATCH_FIELD_CONTROL_BASE_IDX 0
+#define mmIH_DSM_MATCH_DATA_CONTROL 0x00cb
+#define mmIH_DSM_MATCH_DATA_CONTROL_BASE_IDX 0
+#define mmIH_DSM_MATCH_FCN_ID 0x00cc
+#define mmIH_DSM_MATCH_FCN_ID_BASE_IDX 0
+#define mmIH_LIMIT_INT_RATE_CNTL 0x00cd
+#define mmIH_LIMIT_INT_RATE_CNTL_BASE_IDX 0
+#define mmIH_VF_RB_STATUS 0x00ce
+#define mmIH_VF_RB_STATUS_BASE_IDX 0
+#define mmIH_VF_RB_STATUS2 0x00cf
+#define mmIH_VF_RB_STATUS2_BASE_IDX 0
+#define mmIH_VF_RB1_STATUS 0x00d0
+#define mmIH_VF_RB1_STATUS_BASE_IDX 0
+#define mmIH_VF_RB1_STATUS2 0x00d1
+#define mmIH_VF_RB1_STATUS2_BASE_IDX 0
+#define mmIH_VF_RB2_STATUS 0x00d2
+#define mmIH_VF_RB2_STATUS_BASE_IDX 0
+#define mmIH_VF_RB2_STATUS2 0x00d3
+#define mmIH_VF_RB2_STATUS2_BASE_IDX 0
+#define mmIH_INT_FLOOD_CNTL 0x00d5
+#define mmIH_INT_FLOOD_CNTL_BASE_IDX 0
+#define mmIH_RB0_INT_FLOOD_STATUS 0x00d6
+#define mmIH_RB0_INT_FLOOD_STATUS_BASE_IDX 0
+#define mmIH_RB1_INT_FLOOD_STATUS 0x00d7
+#define mmIH_RB1_INT_FLOOD_STATUS_BASE_IDX 0
+#define mmIH_RB2_INT_FLOOD_STATUS 0x00d8
+#define mmIH_RB2_INT_FLOOD_STATUS_BASE_IDX 0
+#define mmIH_INT_FLOOD_STATUS 0x00d9
+#define mmIH_INT_FLOOD_STATUS_BASE_IDX 0
+#define mmIH_STORM_CLIENT_LIST_CNTL 0x00da
+#define mmIH_STORM_CLIENT_LIST_CNTL_BASE_IDX 0
+#define mmIH_CLK_CTRL 0x00db
+#define mmIH_CLK_CTRL_BASE_IDX 0
+#define mmIH_INT_FLAGS 0x00dc
+#define mmIH_INT_FLAGS_BASE_IDX 0
+#define mmIH_LAST_INT_INFO0 0x00dd
+#define mmIH_LAST_INT_INFO0_BASE_IDX 0
+#define mmIH_LAST_INT_INFO1 0x00de
+#define mmIH_LAST_INT_INFO1_BASE_IDX 0
+#define mmIH_LAST_INT_INFO2 0x00df
+#define mmIH_LAST_INT_INFO2_BASE_IDX 0
+#define mmIH_SCRATCH 0x00e0
+#define mmIH_SCRATCH_BASE_IDX 0
+#define mmIH_CLIENT_CREDIT_ERROR 0x00e1
+#define mmIH_CLIENT_CREDIT_ERROR_BASE_IDX 0
+#define mmIH_GPU_IOV_VIOLATION_LOG 0x00e2
+#define mmIH_GPU_IOV_VIOLATION_LOG_BASE_IDX 0
+#define mmIH_COOKIE_REC_VIOLATION_LOG 0x00e3
+#define mmIH_COOKIE_REC_VIOLATION_LOG_BASE_IDX 0
+#define mmIH_CREDIT_STATUS 0x00e4
+#define mmIH_CREDIT_STATUS_BASE_IDX 0
+#define mmIH_MMHUB_ERROR 0x00e5
+#define mmIH_MMHUB_ERROR_BASE_IDX 0
+#define mmIH_MEM_POWER_CTRL 0x00e8
+#define mmIH_MEM_POWER_CTRL_BASE_IDX 0
+#define mmIH_REGISTER_LAST_PART2 0x00ff
+#define mmIH_REGISTER_LAST_PART2_BASE_IDX 0
+#define mmSEM_CLK_CTRL 0x0100
+#define mmSEM_CLK_CTRL_BASE_IDX 0
+#define mmSEM_UTC_CREDIT 0x0101
+#define mmSEM_UTC_CREDIT_BASE_IDX 0
+#define mmSEM_UTC_CONFIG 0x0102
+#define mmSEM_UTC_CONFIG_BASE_IDX 0
+#define mmSEM_UTCL2_TRAN_EN_LUT 0x0103
+#define mmSEM_UTCL2_TRAN_EN_LUT_BASE_IDX 0
+#define mmSEM_MCIF_CONFIG 0x0104
+#define mmSEM_MCIF_CONFIG_BASE_IDX 0
+#define mmSEM_PERFMON_CNTL 0x0105
+#define mmSEM_PERFMON_CNTL_BASE_IDX 0
+#define mmSEM_PERFCOUNTER0_RESULT 0x0106
+#define mmSEM_PERFCOUNTER0_RESULT_BASE_IDX 0
+#define mmSEM_PERFCOUNTER1_RESULT 0x0107
+#define mmSEM_PERFCOUNTER1_RESULT_BASE_IDX 0
+#define mmSEM_STATUS 0x0108
+#define mmSEM_STATUS_BASE_IDX 0
+#define mmSEM_MAILBOX_CLIENTCONFIG 0x0109
+#define mmSEM_MAILBOX_CLIENTCONFIG_BASE_IDX 0
+#define mmSEM_MAILBOX 0x010a
+#define mmSEM_MAILBOX_BASE_IDX 0
+#define mmSEM_MAILBOX_CONTROL 0x010b
+#define mmSEM_MAILBOX_CONTROL_BASE_IDX 0
+#define mmSEM_CHICKEN_BITS 0x010c
+#define mmSEM_CHICKEN_BITS_BASE_IDX 0
+#define mmSEM_MAILBOX_CLIENTCONFIG_EXTRA 0x010d
+#define mmSEM_MAILBOX_CLIENTCONFIG_EXTRA_BASE_IDX 0
+#define mmSEM_GPU_IOV_VIOLATION_LOG 0x010e
+#define mmSEM_GPU_IOV_VIOLATION_LOG_BASE_IDX 0
+#define mmSEM_OUTSTANDING_THRESHOLD 0x010f
+#define mmSEM_OUTSTANDING_THRESHOLD_BASE_IDX 0
+#define mmSEM_MEM_POWER_CTRL 0x0110
+#define mmSEM_MEM_POWER_CTRL_BASE_IDX 0
+#define mmSEM_REGISTER_LAST_PART2 0x017f
+#define mmSEM_REGISTER_LAST_PART2_BASE_IDX 0
+#define mmIH_ACTIVE_FCN_ID 0x0180
+#define mmIH_ACTIVE_FCN_ID_BASE_IDX 0
+#define mmIH_VIRT_RESET_REQ 0x0181
+#define mmIH_VIRT_RESET_REQ_BASE_IDX 0
+#define mmIH_CLIENT_CFG 0x0184
+#define mmIH_CLIENT_CFG_BASE_IDX 0
+#define mmIH_CLIENT_CFG_INDEX 0x0188
+#define mmIH_CLIENT_CFG_INDEX_BASE_IDX 0
+#define mmIH_CLIENT_CFG_DATA 0x0189
+#define mmIH_CLIENT_CFG_DATA_BASE_IDX 0
+#define mmIH_CID_REMAP_INDEX 0x018a
+#define mmIH_CID_REMAP_INDEX_BASE_IDX 0
+#define mmIH_CID_REMAP_DATA 0x018b
+#define mmIH_CID_REMAP_DATA_BASE_IDX 0
+#define mmIH_CHICKEN 0x018c
+#define mmIH_CHICKEN_BASE_IDX 0
+#define mmIH_MMHUB_CNTL 0x018d
+#define mmIH_MMHUB_CNTL_BASE_IDX 0
+#define mmIH_INT_DROP_CNTL 0x018e
+#define mmIH_INT_DROP_CNTL_BASE_IDX 0
+#define mmIH_INT_DROP_MATCH_VALUE0 0x018f
+#define mmIH_INT_DROP_MATCH_VALUE0_BASE_IDX 0
+#define mmIH_INT_DROP_MATCH_VALUE1 0x0190
+#define mmIH_INT_DROP_MATCH_VALUE1_BASE_IDX 0
+#define mmIH_INT_DROP_MATCH_MASK0 0x0191
+#define mmIH_INT_DROP_MATCH_MASK0_BASE_IDX 0
+#define mmIH_INT_DROP_MATCH_MASK1 0x0192
+#define mmIH_INT_DROP_MATCH_MASK1_BASE_IDX 0
+#define mmIH_REGISTER_LAST_PART1 0x019f
+#define mmIH_REGISTER_LAST_PART1_BASE_IDX 0
+#define mmSEM_ACTIVE_FCN_ID 0x01a0
+#define mmSEM_ACTIVE_FCN_ID_BASE_IDX 0
+#define mmSEM_VIRT_RESET_REQ 0x01a1
+#define mmSEM_VIRT_RESET_REQ_BASE_IDX 0
+#define mmSEM_RESP_SDMA0 0x01a4
+#define mmSEM_RESP_SDMA0_BASE_IDX 0
+#define mmSEM_RESP_SDMA1 0x01a5
+#define mmSEM_RESP_SDMA1_BASE_IDX 0
+#define mmSEM_RESP_UVD 0x01a6
+#define mmSEM_RESP_UVD_BASE_IDX 0
+#define mmSEM_RESP_VCE_0 0x01a7
+#define mmSEM_RESP_VCE_0_BASE_IDX 0
+#define mmSEM_RESP_ACP 0x01a8
+#define mmSEM_RESP_ACP_BASE_IDX 0
+#define mmSEM_RESP_ISP 0x01a9
+#define mmSEM_RESP_ISP_BASE_IDX 0
+#define mmSEM_RESP_VCE_1 0x01aa
+#define mmSEM_RESP_VCE_1_BASE_IDX 0
+#define mmSEM_RESP_VP8 0x01ab
+#define mmSEM_RESP_VP8_BASE_IDX 0
+#define mmSEM_RESP_GC 0x01ac
+#define mmSEM_RESP_GC_BASE_IDX 0
+#define mmSEM_RESP_UVD_1 0x01ad
+#define mmSEM_RESP_UVD_1_BASE_IDX 0
+#define mmSEM_CID_REMAP_INDEX 0x01b0
+#define mmSEM_CID_REMAP_INDEX_BASE_IDX 0
+#define mmSEM_CID_REMAP_DATA 0x01b1
+#define mmSEM_CID_REMAP_DATA_BASE_IDX 0
+#define mmSEM_ATOMIC_OP_LUT 0x01b2
+#define mmSEM_ATOMIC_OP_LUT_BASE_IDX 0
+#define mmSEM_EDC_CONFIG 0x01b3
+#define mmSEM_EDC_CONFIG_BASE_IDX 0
+#define mmSEM_CHICKEN_BITS2 0x01b4
+#define mmSEM_CHICKEN_BITS2_BASE_IDX 0
+#define mmSEM_MMHUB_CNTL 0x01b5
+#define mmSEM_MMHUB_CNTL_BASE_IDX 0
+#define mmSEM_REGISTER_LAST_PART1 0x01bf
+#define mmSEM_REGISTER_LAST_PART1_BASE_IDX 0
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_2_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_2_0_sh_mask.h
new file mode 100644
index 000000000000..3ea83ea9ce3a
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_2_0_sh_mask.h
@@ -0,0 +1,1300 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _osssys_4_2_0_SH_MASK_HEADER
+#define _osssys_4_2_0_SH_MASK_HEADER
+
+
+// addressBlock: osssys_osssysdec
+//IH_VMID_0_LUT
+#define IH_VMID_0_LUT__PASID__SHIFT 0x0
+#define IH_VMID_0_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_1_LUT
+#define IH_VMID_1_LUT__PASID__SHIFT 0x0
+#define IH_VMID_1_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_2_LUT
+#define IH_VMID_2_LUT__PASID__SHIFT 0x0
+#define IH_VMID_2_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_3_LUT
+#define IH_VMID_3_LUT__PASID__SHIFT 0x0
+#define IH_VMID_3_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_4_LUT
+#define IH_VMID_4_LUT__PASID__SHIFT 0x0
+#define IH_VMID_4_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_5_LUT
+#define IH_VMID_5_LUT__PASID__SHIFT 0x0
+#define IH_VMID_5_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_6_LUT
+#define IH_VMID_6_LUT__PASID__SHIFT 0x0
+#define IH_VMID_6_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_7_LUT
+#define IH_VMID_7_LUT__PASID__SHIFT 0x0
+#define IH_VMID_7_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_8_LUT
+#define IH_VMID_8_LUT__PASID__SHIFT 0x0
+#define IH_VMID_8_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_9_LUT
+#define IH_VMID_9_LUT__PASID__SHIFT 0x0
+#define IH_VMID_9_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_10_LUT
+#define IH_VMID_10_LUT__PASID__SHIFT 0x0
+#define IH_VMID_10_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_11_LUT
+#define IH_VMID_11_LUT__PASID__SHIFT 0x0
+#define IH_VMID_11_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_12_LUT
+#define IH_VMID_12_LUT__PASID__SHIFT 0x0
+#define IH_VMID_12_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_13_LUT
+#define IH_VMID_13_LUT__PASID__SHIFT 0x0
+#define IH_VMID_13_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_14_LUT
+#define IH_VMID_14_LUT__PASID__SHIFT 0x0
+#define IH_VMID_14_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_15_LUT
+#define IH_VMID_15_LUT__PASID__SHIFT 0x0
+#define IH_VMID_15_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_0_LUT_MM
+#define IH_VMID_0_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_0_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_1_LUT_MM
+#define IH_VMID_1_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_1_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_2_LUT_MM
+#define IH_VMID_2_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_2_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_3_LUT_MM
+#define IH_VMID_3_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_3_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_4_LUT_MM
+#define IH_VMID_4_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_4_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_5_LUT_MM
+#define IH_VMID_5_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_5_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_6_LUT_MM
+#define IH_VMID_6_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_6_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_7_LUT_MM
+#define IH_VMID_7_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_7_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_8_LUT_MM
+#define IH_VMID_8_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_8_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_9_LUT_MM
+#define IH_VMID_9_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_9_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_10_LUT_MM
+#define IH_VMID_10_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_10_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_11_LUT_MM
+#define IH_VMID_11_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_11_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_12_LUT_MM
+#define IH_VMID_12_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_12_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_13_LUT_MM
+#define IH_VMID_13_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_13_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_14_LUT_MM
+#define IH_VMID_14_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_14_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_15_LUT_MM
+#define IH_VMID_15_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_15_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_COOKIE_0
+#define IH_COOKIE_0__CLIENT_ID__SHIFT 0x0
+#define IH_COOKIE_0__SOURCE_ID__SHIFT 0x8
+#define IH_COOKIE_0__RING_ID__SHIFT 0x10
+#define IH_COOKIE_0__VM_ID__SHIFT 0x18
+#define IH_COOKIE_0__RESERVED__SHIFT 0x1c
+#define IH_COOKIE_0__VMID_TYPE__SHIFT 0x1f
+#define IH_COOKIE_0__CLIENT_ID_MASK 0x000000FFL
+#define IH_COOKIE_0__SOURCE_ID_MASK 0x0000FF00L
+#define IH_COOKIE_0__RING_ID_MASK 0x00FF0000L
+#define IH_COOKIE_0__VM_ID_MASK 0x0F000000L
+#define IH_COOKIE_0__RESERVED_MASK 0x70000000L
+#define IH_COOKIE_0__VMID_TYPE_MASK 0x80000000L
+//IH_COOKIE_1
+#define IH_COOKIE_1__TIMESTAMP_31_0__SHIFT 0x0
+#define IH_COOKIE_1__TIMESTAMP_31_0_MASK 0xFFFFFFFFL
+//IH_COOKIE_2
+#define IH_COOKIE_2__TIMESTAMP_47_32__SHIFT 0x0
+#define IH_COOKIE_2__RESERVED__SHIFT 0x10
+#define IH_COOKIE_2__TIMESTAMP_SRC__SHIFT 0x1f
+#define IH_COOKIE_2__TIMESTAMP_47_32_MASK 0x0000FFFFL
+#define IH_COOKIE_2__RESERVED_MASK 0x7FFF0000L
+#define IH_COOKIE_2__TIMESTAMP_SRC_MASK 0x80000000L
+//IH_COOKIE_3
+#define IH_COOKIE_3__PAS_ID__SHIFT 0x0
+#define IH_COOKIE_3__RESERVED__SHIFT 0x10
+#define IH_COOKIE_3__PASID_SRC__SHIFT 0x1f
+#define IH_COOKIE_3__PAS_ID_MASK 0x0000FFFFL
+#define IH_COOKIE_3__RESERVED_MASK 0x7FFF0000L
+#define IH_COOKIE_3__PASID_SRC_MASK 0x80000000L
+//IH_COOKIE_4
+#define IH_COOKIE_4__CONTEXT_ID_31_0__SHIFT 0x0
+#define IH_COOKIE_4__CONTEXT_ID_31_0_MASK 0xFFFFFFFFL
+//IH_COOKIE_5
+#define IH_COOKIE_5__CONTEXT_ID_63_32__SHIFT 0x0
+#define IH_COOKIE_5__CONTEXT_ID_63_32_MASK 0xFFFFFFFFL
+//IH_COOKIE_6
+#define IH_COOKIE_6__CONTEXT_ID_95_64__SHIFT 0x0
+#define IH_COOKIE_6__CONTEXT_ID_95_64_MASK 0xFFFFFFFFL
+//IH_COOKIE_7
+#define IH_COOKIE_7__CONTEXT_ID_128_96__SHIFT 0x0
+#define IH_COOKIE_7__CONTEXT_ID_128_96_MASK 0xFFFFFFFFL
+//IH_REGISTER_LAST_PART0
+#define IH_REGISTER_LAST_PART0__RESERVED__SHIFT 0x0
+#define IH_REGISTER_LAST_PART0__RESERVED_MASK 0xFFFFFFFFL
+//SEM_REQ_INPUT_0
+#define SEM_REQ_INPUT_0__DATA__SHIFT 0x0
+#define SEM_REQ_INPUT_0__DATA_MASK 0xFFFFFFFFL
+//SEM_REQ_INPUT_1
+#define SEM_REQ_INPUT_1__DATA__SHIFT 0x0
+#define SEM_REQ_INPUT_1__DATA_MASK 0xFFFFFFFFL
+//SEM_REQ_INPUT_2
+#define SEM_REQ_INPUT_2__DATA__SHIFT 0x0
+#define SEM_REQ_INPUT_2__DATA_MASK 0xFFFFFFFFL
+//SEM_REQ_INPUT_3
+#define SEM_REQ_INPUT_3__DATA__SHIFT 0x0
+#define SEM_REQ_INPUT_3__DATA_MASK 0xFFFFFFFFL
+//SEM_REGISTER_LAST_PART0
+#define SEM_REGISTER_LAST_PART0__RESERVED__SHIFT 0x0
+#define SEM_REGISTER_LAST_PART0__RESERVED_MASK 0xFFFFFFFFL
+//IH_RB_CNTL
+#define IH_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define IH_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define IH_RB_CNTL__RB_GPU_TS_ENABLE__SHIFT 0x7
+#define IH_RB_CNTL__WPTR_WRITEBACK_ENABLE__SHIFT 0x8
+#define IH_RB_CNTL__RB_FULL_DRAIN_ENABLE__SHIFT 0x9
+#define IH_RB_CNTL__FULL_DRAIN_CLEAR__SHIFT 0xa
+#define IH_RB_CNTL__PAGE_RB_CLEAR__SHIFT 0xb
+#define IH_RB_CNTL__RB_USED_INT_THRESHOLD__SHIFT 0xc
+#define IH_RB_CNTL__WPTR_OVERFLOW_ENABLE__SHIFT 0x10
+#define IH_RB_CNTL__ENABLE_INTR__SHIFT 0x11
+#define IH_RB_CNTL__MC_SWAP__SHIFT 0x12
+#define IH_RB_CNTL__MC_SNOOP__SHIFT 0x14
+#define IH_RB_CNTL__RPTR_REARM__SHIFT 0x15
+#define IH_RB_CNTL__MC_RO__SHIFT 0x16
+#define IH_RB_CNTL__MC_VMID__SHIFT 0x18
+#define IH_RB_CNTL__MC_SPACE__SHIFT 0x1c
+#define IH_RB_CNTL__WPTR_OVERFLOW_CLEAR__SHIFT 0x1f
+#define IH_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define IH_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define IH_RB_CNTL__RB_GPU_TS_ENABLE_MASK 0x00000080L
+#define IH_RB_CNTL__WPTR_WRITEBACK_ENABLE_MASK 0x00000100L
+#define IH_RB_CNTL__RB_FULL_DRAIN_ENABLE_MASK 0x00000200L
+#define IH_RB_CNTL__FULL_DRAIN_CLEAR_MASK 0x00000400L
+#define IH_RB_CNTL__PAGE_RB_CLEAR_MASK 0x00000800L
+#define IH_RB_CNTL__RB_USED_INT_THRESHOLD_MASK 0x0000F000L
+#define IH_RB_CNTL__WPTR_OVERFLOW_ENABLE_MASK 0x00010000L
+#define IH_RB_CNTL__ENABLE_INTR_MASK 0x00020000L
+#define IH_RB_CNTL__MC_SWAP_MASK 0x000C0000L
+#define IH_RB_CNTL__MC_SNOOP_MASK 0x00100000L
+#define IH_RB_CNTL__RPTR_REARM_MASK 0x00200000L
+#define IH_RB_CNTL__MC_RO_MASK 0x00400000L
+#define IH_RB_CNTL__MC_VMID_MASK 0x0F000000L
+#define IH_RB_CNTL__MC_SPACE_MASK 0x70000000L
+#define IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK 0x80000000L
+//IH_RB_BASE
+#define IH_RB_BASE__ADDR__SHIFT 0x0
+#define IH_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//IH_RB_BASE_HI
+#define IH_RB_BASE_HI__ADDR__SHIFT 0x0
+#define IH_RB_BASE_HI__ADDR_MASK 0x000000FFL
+//IH_RB_RPTR
+#define IH_RB_RPTR__OFFSET__SHIFT 0x2
+#define IH_RB_RPTR__OFFSET_MASK 0x0003FFFCL
+//IH_RB_WPTR
+#define IH_RB_WPTR__RB_OVERFLOW__SHIFT 0x0
+#define IH_RB_WPTR__OFFSET__SHIFT 0x2
+#define IH_RB_WPTR__RB_LEFT_NONE__SHIFT 0x12
+#define IH_RB_WPTR__RB_MAY_OVERFLOW__SHIFT 0x13
+#define IH_RB_WPTR__RB_OVERFLOW_MASK 0x00000001L
+#define IH_RB_WPTR__OFFSET_MASK 0x0003FFFCL
+#define IH_RB_WPTR__RB_LEFT_NONE_MASK 0x00040000L
+#define IH_RB_WPTR__RB_MAY_OVERFLOW_MASK 0x00080000L
+//IH_RB_WPTR_ADDR_HI
+#define IH_RB_WPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define IH_RB_WPTR_ADDR_HI__ADDR_MASK 0x0000FFFFL
+//IH_RB_WPTR_ADDR_LO
+#define IH_RB_WPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define IH_RB_WPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//IH_DOORBELL_RPTR
+#define IH_DOORBELL_RPTR__OFFSET__SHIFT 0x0
+#define IH_DOORBELL_RPTR__ENABLE__SHIFT 0x1c
+#define IH_DOORBELL_RPTR__OFFSET_MASK 0x03FFFFFFL
+#define IH_DOORBELL_RPTR__ENABLE_MASK 0x10000000L
+//IH_RB_CNTL_RING1
+#define IH_RB_CNTL_RING1__RB_ENABLE__SHIFT 0x0
+#define IH_RB_CNTL_RING1__RB_SIZE__SHIFT 0x1
+#define IH_RB_CNTL_RING1__RB_GPU_TS_ENABLE__SHIFT 0x7
+#define IH_RB_CNTL_RING1__RB_FULL_DRAIN_ENABLE__SHIFT 0x9
+#define IH_RB_CNTL_RING1__FULL_DRAIN_CLEAR__SHIFT 0xa
+#define IH_RB_CNTL_RING1__PAGE_RB_CLEAR__SHIFT 0xb
+#define IH_RB_CNTL_RING1__RB_USED_INT_THRESHOLD__SHIFT 0xc
+#define IH_RB_CNTL_RING1__WPTR_OVERFLOW_ENABLE__SHIFT 0x10
+#define IH_RB_CNTL_RING1__MC_SWAP__SHIFT 0x12
+#define IH_RB_CNTL_RING1__MC_SNOOP__SHIFT 0x14
+#define IH_RB_CNTL_RING1__MC_RO__SHIFT 0x16
+#define IH_RB_CNTL_RING1__MC_VMID__SHIFT 0x18
+#define IH_RB_CNTL_RING1__MC_SPACE__SHIFT 0x1c
+#define IH_RB_CNTL_RING1__WPTR_OVERFLOW_CLEAR__SHIFT 0x1f
+#define IH_RB_CNTL_RING1__RB_ENABLE_MASK 0x00000001L
+#define IH_RB_CNTL_RING1__RB_SIZE_MASK 0x0000003EL
+#define IH_RB_CNTL_RING1__RB_GPU_TS_ENABLE_MASK 0x00000080L
+#define IH_RB_CNTL_RING1__RB_FULL_DRAIN_ENABLE_MASK 0x00000200L
+#define IH_RB_CNTL_RING1__FULL_DRAIN_CLEAR_MASK 0x00000400L
+#define IH_RB_CNTL_RING1__PAGE_RB_CLEAR_MASK 0x00000800L
+#define IH_RB_CNTL_RING1__RB_USED_INT_THRESHOLD_MASK 0x0000F000L
+#define IH_RB_CNTL_RING1__WPTR_OVERFLOW_ENABLE_MASK 0x00010000L
+#define IH_RB_CNTL_RING1__MC_SWAP_MASK 0x000C0000L
+#define IH_RB_CNTL_RING1__MC_SNOOP_MASK 0x00100000L
+#define IH_RB_CNTL_RING1__MC_RO_MASK 0x00400000L
+#define IH_RB_CNTL_RING1__MC_VMID_MASK 0x0F000000L
+#define IH_RB_CNTL_RING1__MC_SPACE_MASK 0x70000000L
+#define IH_RB_CNTL_RING1__WPTR_OVERFLOW_CLEAR_MASK 0x80000000L
+//IH_RB_BASE_RING1
+#define IH_RB_BASE_RING1__ADDR__SHIFT 0x0
+#define IH_RB_BASE_RING1__ADDR_MASK 0xFFFFFFFFL
+//IH_RB_BASE_HI_RING1
+#define IH_RB_BASE_HI_RING1__ADDR__SHIFT 0x0
+#define IH_RB_BASE_HI_RING1__ADDR_MASK 0x000000FFL
+//IH_RB_RPTR_RING1
+#define IH_RB_RPTR_RING1__OFFSET__SHIFT 0x2
+#define IH_RB_RPTR_RING1__OFFSET_MASK 0x0003FFFCL
+//IH_RB_WPTR_RING1
+#define IH_RB_WPTR_RING1__RB_OVERFLOW__SHIFT 0x0
+#define IH_RB_WPTR_RING1__OFFSET__SHIFT 0x2
+#define IH_RB_WPTR_RING1__RB_LEFT_NONE__SHIFT 0x12
+#define IH_RB_WPTR_RING1__RB_MAY_OVERFLOW__SHIFT 0x13
+#define IH_RB_WPTR_RING1__RB_OVERFLOW_MASK 0x00000001L
+#define IH_RB_WPTR_RING1__OFFSET_MASK 0x0003FFFCL
+#define IH_RB_WPTR_RING1__RB_LEFT_NONE_MASK 0x00040000L
+#define IH_RB_WPTR_RING1__RB_MAY_OVERFLOW_MASK 0x00080000L
+//IH_DOORBELL_RPTR_RING1
+#define IH_DOORBELL_RPTR_RING1__OFFSET__SHIFT 0x0
+#define IH_DOORBELL_RPTR_RING1__ENABLE__SHIFT 0x1c
+#define IH_DOORBELL_RPTR_RING1__OFFSET_MASK 0x03FFFFFFL
+#define IH_DOORBELL_RPTR_RING1__ENABLE_MASK 0x10000000L
+//IH_RB_CNTL_RING2
+#define IH_RB_CNTL_RING2__RB_ENABLE__SHIFT 0x0
+#define IH_RB_CNTL_RING2__RB_SIZE__SHIFT 0x1
+#define IH_RB_CNTL_RING2__RB_GPU_TS_ENABLE__SHIFT 0x7
+#define IH_RB_CNTL_RING2__RB_FULL_DRAIN_ENABLE__SHIFT 0x9
+#define IH_RB_CNTL_RING2__FULL_DRAIN_CLEAR__SHIFT 0xa
+#define IH_RB_CNTL_RING2__PAGE_RB_CLEAR__SHIFT 0xb
+#define IH_RB_CNTL_RING2__RB_USED_INT_THRESHOLD__SHIFT 0xc
+#define IH_RB_CNTL_RING2__WPTR_OVERFLOW_ENABLE__SHIFT 0x10
+#define IH_RB_CNTL_RING2__MC_SWAP__SHIFT 0x12
+#define IH_RB_CNTL_RING2__MC_SNOOP__SHIFT 0x14
+#define IH_RB_CNTL_RING2__MC_RO__SHIFT 0x16
+#define IH_RB_CNTL_RING2__MC_VMID__SHIFT 0x18
+#define IH_RB_CNTL_RING2__MC_SPACE__SHIFT 0x1c
+#define IH_RB_CNTL_RING2__WPTR_OVERFLOW_CLEAR__SHIFT 0x1f
+#define IH_RB_CNTL_RING2__RB_ENABLE_MASK 0x00000001L
+#define IH_RB_CNTL_RING2__RB_SIZE_MASK 0x0000003EL
+#define IH_RB_CNTL_RING2__RB_GPU_TS_ENABLE_MASK 0x00000080L
+#define IH_RB_CNTL_RING2__RB_FULL_DRAIN_ENABLE_MASK 0x00000200L
+#define IH_RB_CNTL_RING2__FULL_DRAIN_CLEAR_MASK 0x00000400L
+#define IH_RB_CNTL_RING2__PAGE_RB_CLEAR_MASK 0x00000800L
+#define IH_RB_CNTL_RING2__RB_USED_INT_THRESHOLD_MASK 0x0000F000L
+#define IH_RB_CNTL_RING2__WPTR_OVERFLOW_ENABLE_MASK 0x00010000L
+#define IH_RB_CNTL_RING2__MC_SWAP_MASK 0x000C0000L
+#define IH_RB_CNTL_RING2__MC_SNOOP_MASK 0x00100000L
+#define IH_RB_CNTL_RING2__MC_RO_MASK 0x00400000L
+#define IH_RB_CNTL_RING2__MC_VMID_MASK 0x0F000000L
+#define IH_RB_CNTL_RING2__MC_SPACE_MASK 0x70000000L
+#define IH_RB_CNTL_RING2__WPTR_OVERFLOW_CLEAR_MASK 0x80000000L
+//IH_RB_BASE_RING2
+#define IH_RB_BASE_RING2__ADDR__SHIFT 0x0
+#define IH_RB_BASE_RING2__ADDR_MASK 0xFFFFFFFFL
+//IH_RB_BASE_HI_RING2
+#define IH_RB_BASE_HI_RING2__ADDR__SHIFT 0x0
+#define IH_RB_BASE_HI_RING2__ADDR_MASK 0x000000FFL
+//IH_RB_RPTR_RING2
+#define IH_RB_RPTR_RING2__OFFSET__SHIFT 0x2
+#define IH_RB_RPTR_RING2__OFFSET_MASK 0x0003FFFCL
+//IH_RB_WPTR_RING2
+#define IH_RB_WPTR_RING2__RB_OVERFLOW__SHIFT 0x0
+#define IH_RB_WPTR_RING2__OFFSET__SHIFT 0x2
+#define IH_RB_WPTR_RING2__RB_LEFT_NONE__SHIFT 0x12
+#define IH_RB_WPTR_RING2__RB_MAY_OVERFLOW__SHIFT 0x13
+#define IH_RB_WPTR_RING2__RB_OVERFLOW_MASK 0x00000001L
+#define IH_RB_WPTR_RING2__OFFSET_MASK 0x0003FFFCL
+#define IH_RB_WPTR_RING2__RB_LEFT_NONE_MASK 0x00040000L
+#define IH_RB_WPTR_RING2__RB_MAY_OVERFLOW_MASK 0x00080000L
+//IH_DOORBELL_RPTR_RING2
+#define IH_DOORBELL_RPTR_RING2__OFFSET__SHIFT 0x0
+#define IH_DOORBELL_RPTR_RING2__ENABLE__SHIFT 0x1c
+#define IH_DOORBELL_RPTR_RING2__OFFSET_MASK 0x03FFFFFFL
+#define IH_DOORBELL_RPTR_RING2__ENABLE_MASK 0x10000000L
+//IH_VERSION
+#define IH_VERSION__MINVER__SHIFT 0x0
+#define IH_VERSION__MAJVER__SHIFT 0x8
+#define IH_VERSION__REV__SHIFT 0x10
+#define IH_VERSION__MINVER_MASK 0x0000007FL
+#define IH_VERSION__MAJVER_MASK 0x00007F00L
+#define IH_VERSION__REV_MASK 0x003F0000L
+//IH_CNTL
+#define IH_CNTL__WPTR_WRITEBACK_TIMER__SHIFT 0x0
+#define IH_CNTL__IH_IDLE_HYSTERESIS_CNTL__SHIFT 0x6
+#define IH_CNTL__IH_FIFO_HIGHWATER__SHIFT 0x8
+#define IH_CNTL__MC_WR_CLEAN_CNT__SHIFT 0x14
+#define IH_CNTL__WPTR_WRITEBACK_TIMER_MASK 0x0000001FL
+#define IH_CNTL__IH_IDLE_HYSTERESIS_CNTL_MASK 0x000000C0L
+#define IH_CNTL__IH_FIFO_HIGHWATER_MASK 0x00007F00L
+#define IH_CNTL__MC_WR_CLEAN_CNT_MASK 0x01F00000L
+//IH_CNTL2
+#define IH_CNTL2__SELF_IV_FORCE_WPTR_UPDATE_TIMEOUT__SHIFT 0x0
+#define IH_CNTL2__SELF_IV_FORCE_WPTR_UPDATE_ENABLE__SHIFT 0x8
+#define IH_CNTL2__SELF_IV_FORCE_WPTR_UPDATE_TIMEOUT_MASK 0x0000001FL
+#define IH_CNTL2__SELF_IV_FORCE_WPTR_UPDATE_ENABLE_MASK 0x00000100L
+//IH_STATUS
+#define IH_STATUS__IDLE__SHIFT 0x0
+#define IH_STATUS__INPUT_IDLE__SHIFT 0x1
+#define IH_STATUS__BUFFER_IDLE__SHIFT 0x2
+#define IH_STATUS__RB_FULL__SHIFT 0x3
+#define IH_STATUS__RB_FULL_DRAIN__SHIFT 0x4
+#define IH_STATUS__RB_OVERFLOW__SHIFT 0x5
+#define IH_STATUS__MC_WR_IDLE__SHIFT 0x6
+#define IH_STATUS__MC_WR_STALL__SHIFT 0x7
+#define IH_STATUS__MC_WR_CLEAN_PENDING__SHIFT 0x8
+#define IH_STATUS__MC_WR_CLEAN_STALL__SHIFT 0x9
+#define IH_STATUS__BIF_INTERRUPT_LINE__SHIFT 0xa
+#define IH_STATUS__SWITCH_READY__SHIFT 0xb
+#define IH_STATUS__RB1_FULL__SHIFT 0xc
+#define IH_STATUS__RB1_FULL_DRAIN__SHIFT 0xd
+#define IH_STATUS__RB1_OVERFLOW__SHIFT 0xe
+#define IH_STATUS__RB2_FULL__SHIFT 0xf
+#define IH_STATUS__RB2_FULL_DRAIN__SHIFT 0x10
+#define IH_STATUS__RB2_OVERFLOW__SHIFT 0x11
+#define IH_STATUS__SELF_INT_GEN_IDLE__SHIFT 0x12
+#define IH_STATUS__IDLE_MASK 0x00000001L
+#define IH_STATUS__INPUT_IDLE_MASK 0x00000002L
+#define IH_STATUS__BUFFER_IDLE_MASK 0x00000004L
+#define IH_STATUS__RB_FULL_MASK 0x00000008L
+#define IH_STATUS__RB_FULL_DRAIN_MASK 0x00000010L
+#define IH_STATUS__RB_OVERFLOW_MASK 0x00000020L
+#define IH_STATUS__MC_WR_IDLE_MASK 0x00000040L
+#define IH_STATUS__MC_WR_STALL_MASK 0x00000080L
+#define IH_STATUS__MC_WR_CLEAN_PENDING_MASK 0x00000100L
+#define IH_STATUS__MC_WR_CLEAN_STALL_MASK 0x00000200L
+#define IH_STATUS__BIF_INTERRUPT_LINE_MASK 0x00000400L
+#define IH_STATUS__SWITCH_READY_MASK 0x00000800L
+#define IH_STATUS__RB1_FULL_MASK 0x00001000L
+#define IH_STATUS__RB1_FULL_DRAIN_MASK 0x00002000L
+#define IH_STATUS__RB1_OVERFLOW_MASK 0x00004000L
+#define IH_STATUS__RB2_FULL_MASK 0x00008000L
+#define IH_STATUS__RB2_FULL_DRAIN_MASK 0x00010000L
+#define IH_STATUS__RB2_OVERFLOW_MASK 0x00020000L
+#define IH_STATUS__SELF_INT_GEN_IDLE_MASK 0x00040000L
+//IH_PERFMON_CNTL
+#define IH_PERFMON_CNTL__ENABLE0__SHIFT 0x0
+#define IH_PERFMON_CNTL__CLEAR0__SHIFT 0x1
+#define IH_PERFMON_CNTL__PERF_SEL0__SHIFT 0x2
+#define IH_PERFMON_CNTL__ENABLE1__SHIFT 0x10
+#define IH_PERFMON_CNTL__CLEAR1__SHIFT 0x11
+#define IH_PERFMON_CNTL__PERF_SEL1__SHIFT 0x12
+#define IH_PERFMON_CNTL__ENABLE0_MASK 0x00000001L
+#define IH_PERFMON_CNTL__CLEAR0_MASK 0x00000002L
+#define IH_PERFMON_CNTL__PERF_SEL0_MASK 0x000007FCL
+#define IH_PERFMON_CNTL__ENABLE1_MASK 0x00010000L
+#define IH_PERFMON_CNTL__CLEAR1_MASK 0x00020000L
+#define IH_PERFMON_CNTL__PERF_SEL1_MASK 0x07FC0000L
+//IH_PERFCOUNTER0_RESULT
+#define IH_PERFCOUNTER0_RESULT__PERF_COUNT__SHIFT 0x0
+#define IH_PERFCOUNTER0_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL
+//IH_PERFCOUNTER1_RESULT
+#define IH_PERFCOUNTER1_RESULT__PERF_COUNT__SHIFT 0x0
+#define IH_PERFCOUNTER1_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL
+//IH_DSM_MATCH_VALUE_BIT_31_0
+#define IH_DSM_MATCH_VALUE_BIT_31_0__VALUE__SHIFT 0x0
+#define IH_DSM_MATCH_VALUE_BIT_31_0__VALUE_MASK 0xFFFFFFFFL
+//IH_DSM_MATCH_VALUE_BIT_63_32
+#define IH_DSM_MATCH_VALUE_BIT_63_32__VALUE__SHIFT 0x0
+#define IH_DSM_MATCH_VALUE_BIT_63_32__VALUE_MASK 0xFFFFFFFFL
+//IH_DSM_MATCH_VALUE_BIT_95_64
+#define IH_DSM_MATCH_VALUE_BIT_95_64__VALUE__SHIFT 0x0
+#define IH_DSM_MATCH_VALUE_BIT_95_64__VALUE_MASK 0xFFFFFFFFL
+//IH_DSM_MATCH_FIELD_CONTROL
+#define IH_DSM_MATCH_FIELD_CONTROL__SRC_EN__SHIFT 0x0
+#define IH_DSM_MATCH_FIELD_CONTROL__FCNID_EN__SHIFT 0x1
+#define IH_DSM_MATCH_FIELD_CONTROL__TIMESTAMP_EN__SHIFT 0x2
+#define IH_DSM_MATCH_FIELD_CONTROL__RINGID_EN__SHIFT 0x3
+#define IH_DSM_MATCH_FIELD_CONTROL__VMID_EN__SHIFT 0x4
+#define IH_DSM_MATCH_FIELD_CONTROL__PASID_EN__SHIFT 0x5
+#define IH_DSM_MATCH_FIELD_CONTROL__CLIENT_ID_EN__SHIFT 0x6
+#define IH_DSM_MATCH_FIELD_CONTROL__SRC_EN_MASK 0x00000001L
+#define IH_DSM_MATCH_FIELD_CONTROL__FCNID_EN_MASK 0x00000002L
+#define IH_DSM_MATCH_FIELD_CONTROL__TIMESTAMP_EN_MASK 0x00000004L
+#define IH_DSM_MATCH_FIELD_CONTROL__RINGID_EN_MASK 0x00000008L
+#define IH_DSM_MATCH_FIELD_CONTROL__VMID_EN_MASK 0x00000010L
+#define IH_DSM_MATCH_FIELD_CONTROL__PASID_EN_MASK 0x00000020L
+#define IH_DSM_MATCH_FIELD_CONTROL__CLIENT_ID_EN_MASK 0x00000040L
+//IH_DSM_MATCH_DATA_CONTROL
+#define IH_DSM_MATCH_DATA_CONTROL__VALUE__SHIFT 0x0
+#define IH_DSM_MATCH_DATA_CONTROL__VALUE_MASK 0x0FFFFFFFL
+//IH_DSM_MATCH_FCN_ID
+#define IH_DSM_MATCH_FCN_ID__PF_VF__SHIFT 0x0
+#define IH_DSM_MATCH_FCN_ID__VF_ID__SHIFT 0x1
+#define IH_DSM_MATCH_FCN_ID__PF_VF_MASK 0x00000001L
+#define IH_DSM_MATCH_FCN_ID__VF_ID_MASK 0x0000001EL
+//IH_LIMIT_INT_RATE_CNTL
+#define IH_LIMIT_INT_RATE_CNTL__LIMIT_ENABLE__SHIFT 0x0
+#define IH_LIMIT_INT_RATE_CNTL__PERF_INTERVAL__SHIFT 0x1
+#define IH_LIMIT_INT_RATE_CNTL__PERF_THRESHOLD__SHIFT 0x5
+#define IH_LIMIT_INT_RATE_CNTL__RETURN_DELAY__SHIFT 0x11
+#define IH_LIMIT_INT_RATE_CNTL__PERF_RESULT__SHIFT 0x15
+#define IH_LIMIT_INT_RATE_CNTL__LIMIT_ENABLE_MASK 0x00000001L
+#define IH_LIMIT_INT_RATE_CNTL__PERF_INTERVAL_MASK 0x0000001EL
+#define IH_LIMIT_INT_RATE_CNTL__PERF_THRESHOLD_MASK 0x0000FFE0L
+#define IH_LIMIT_INT_RATE_CNTL__RETURN_DELAY_MASK 0x001E0000L
+#define IH_LIMIT_INT_RATE_CNTL__PERF_RESULT_MASK 0xFFE00000L
+//IH_VF_RB_STATUS
+#define IH_VF_RB_STATUS__RB_FULL_DRAIN_VF__SHIFT 0x0
+#define IH_VF_RB_STATUS__RB_OVERFLOW_VF__SHIFT 0x10
+#define IH_VF_RB_STATUS__RB_FULL_DRAIN_VF_MASK 0x0000FFFFL
+#define IH_VF_RB_STATUS__RB_OVERFLOW_VF_MASK 0xFFFF0000L
+//IH_VF_RB_STATUS2
+#define IH_VF_RB_STATUS2__RB_FULL_VF__SHIFT 0x0
+#define IH_VF_RB_STATUS2__BIF_INTERRUPT_LINE_VF__SHIFT 0x10
+#define IH_VF_RB_STATUS2__RB_FULL_VF_MASK 0x0000FFFFL
+#define IH_VF_RB_STATUS2__BIF_INTERRUPT_LINE_VF_MASK 0xFFFF0000L
+//IH_VF_RB1_STATUS
+#define IH_VF_RB1_STATUS__RB_FULL_DRAIN_VF__SHIFT 0x0
+#define IH_VF_RB1_STATUS__RB_OVERFLOW_VF__SHIFT 0x10
+#define IH_VF_RB1_STATUS__RB_FULL_DRAIN_VF_MASK 0x0000FFFFL
+#define IH_VF_RB1_STATUS__RB_OVERFLOW_VF_MASK 0xFFFF0000L
+//IH_VF_RB1_STATUS2
+#define IH_VF_RB1_STATUS2__RB_FULL_VF__SHIFT 0x0
+#define IH_VF_RB1_STATUS2__RB_FULL_VF_MASK 0x0000FFFFL
+//IH_VF_RB2_STATUS
+#define IH_VF_RB2_STATUS__RB_FULL_DRAIN_VF__SHIFT 0x0
+#define IH_VF_RB2_STATUS__RB_OVERFLOW_VF__SHIFT 0x10
+#define IH_VF_RB2_STATUS__RB_FULL_DRAIN_VF_MASK 0x0000FFFFL
+#define IH_VF_RB2_STATUS__RB_OVERFLOW_VF_MASK 0xFFFF0000L
+//IH_VF_RB2_STATUS2
+#define IH_VF_RB2_STATUS2__RB_FULL_VF__SHIFT 0x0
+#define IH_VF_RB2_STATUS2__RB_FULL_VF_MASK 0x0000FFFFL
+//IH_INT_FLOOD_CNTL
+#define IH_INT_FLOOD_CNTL__HIGHWATER__SHIFT 0x0
+#define IH_INT_FLOOD_CNTL__FLOOD_CNTL_ENABLE__SHIFT 0x3
+#define IH_INT_FLOOD_CNTL__CLEAR_INT_FLOOD_STATUS__SHIFT 0x4
+#define IH_INT_FLOOD_CNTL__HIGHWATER_MASK 0x00000007L
+#define IH_INT_FLOOD_CNTL__FLOOD_CNTL_ENABLE_MASK 0x00000008L
+#define IH_INT_FLOOD_CNTL__CLEAR_INT_FLOOD_STATUS_MASK 0x00000010L
+//IH_RB0_INT_FLOOD_STATUS
+#define IH_RB0_INT_FLOOD_STATUS__RB_INT_DROPPED_VF__SHIFT 0x0
+#define IH_RB0_INT_FLOOD_STATUS__RB_INT_DROPPED__SHIFT 0x1f
+#define IH_RB0_INT_FLOOD_STATUS__RB_INT_DROPPED_VF_MASK 0x0000FFFFL
+#define IH_RB0_INT_FLOOD_STATUS__RB_INT_DROPPED_MASK 0x80000000L
+//IH_RB1_INT_FLOOD_STATUS
+#define IH_RB1_INT_FLOOD_STATUS__RB_INT_DROPPED_VF__SHIFT 0x0
+#define IH_RB1_INT_FLOOD_STATUS__RB_INT_DROPPED__SHIFT 0x1f
+#define IH_RB1_INT_FLOOD_STATUS__RB_INT_DROPPED_VF_MASK 0x0000FFFFL
+#define IH_RB1_INT_FLOOD_STATUS__RB_INT_DROPPED_MASK 0x80000000L
+//IH_RB2_INT_FLOOD_STATUS
+#define IH_RB2_INT_FLOOD_STATUS__RB_INT_DROPPED_VF__SHIFT 0x0
+#define IH_RB2_INT_FLOOD_STATUS__RB_INT_DROPPED__SHIFT 0x1f
+#define IH_RB2_INT_FLOOD_STATUS__RB_INT_DROPPED_VF_MASK 0x0000FFFFL
+#define IH_RB2_INT_FLOOD_STATUS__RB_INT_DROPPED_MASK 0x80000000L
+//IH_INT_FLOOD_STATUS
+#define IH_INT_FLOOD_STATUS__INT_DROP_CNT__SHIFT 0x0
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_CLIENT_ID__SHIFT 0x8
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_SOURCE_ID__SHIFT 0x10
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_VF_ID__SHIFT 0x18
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_VF__SHIFT 0x1c
+#define IH_INT_FLOOD_STATUS__INT_DROPPED__SHIFT 0x1e
+#define IH_INT_FLOOD_STATUS__INT_DROP_CNT_MASK 0x000000FFL
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_CLIENT_ID_MASK 0x0000FF00L
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_SOURCE_ID_MASK 0x00FF0000L
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_VF_ID_MASK 0x0F000000L
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_VF_MASK 0x10000000L
+#define IH_INT_FLOOD_STATUS__INT_DROPPED_MASK 0x40000000L
+//IH_STORM_CLIENT_LIST_CNTL
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT1_IS_STORM_CLIENT__SHIFT 0x1
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT2_IS_STORM_CLIENT__SHIFT 0x2
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT3_IS_STORM_CLIENT__SHIFT 0x3
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT4_IS_STORM_CLIENT__SHIFT 0x4
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT5_IS_STORM_CLIENT__SHIFT 0x5
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT6_IS_STORM_CLIENT__SHIFT 0x6
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT7_IS_STORM_CLIENT__SHIFT 0x7
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT8_IS_STORM_CLIENT__SHIFT 0x8
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT9_IS_STORM_CLIENT__SHIFT 0x9
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT10_IS_STORM_CLIENT__SHIFT 0xa
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT11_IS_STORM_CLIENT__SHIFT 0xb
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT12_IS_STORM_CLIENT__SHIFT 0xc
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT13_IS_STORM_CLIENT__SHIFT 0xd
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT14_IS_STORM_CLIENT__SHIFT 0xe
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT15_IS_STORM_CLIENT__SHIFT 0xf
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT16_IS_STORM_CLIENT__SHIFT 0x10
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT17_IS_STORM_CLIENT__SHIFT 0x11
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT18_IS_STORM_CLIENT__SHIFT 0x12
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT19_IS_STORM_CLIENT__SHIFT 0x13
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT20_IS_STORM_CLIENT__SHIFT 0x14
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT21_IS_STORM_CLIENT__SHIFT 0x15
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT22_IS_STORM_CLIENT__SHIFT 0x16
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT23_IS_STORM_CLIENT__SHIFT 0x17
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT24_IS_STORM_CLIENT__SHIFT 0x18
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT25_IS_STORM_CLIENT__SHIFT 0x19
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT26_IS_STORM_CLIENT__SHIFT 0x1a
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT27_IS_STORM_CLIENT__SHIFT 0x1b
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT28_IS_STORM_CLIENT__SHIFT 0x1c
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT29_IS_STORM_CLIENT__SHIFT 0x1d
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT30_IS_STORM_CLIENT__SHIFT 0x1e
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT31_IS_STORM_CLIENT__SHIFT 0x1f
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT1_IS_STORM_CLIENT_MASK 0x00000002L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT2_IS_STORM_CLIENT_MASK 0x00000004L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT3_IS_STORM_CLIENT_MASK 0x00000008L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT4_IS_STORM_CLIENT_MASK 0x00000010L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT5_IS_STORM_CLIENT_MASK 0x00000020L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT6_IS_STORM_CLIENT_MASK 0x00000040L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT7_IS_STORM_CLIENT_MASK 0x00000080L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT8_IS_STORM_CLIENT_MASK 0x00000100L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT9_IS_STORM_CLIENT_MASK 0x00000200L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT10_IS_STORM_CLIENT_MASK 0x00000400L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT11_IS_STORM_CLIENT_MASK 0x00000800L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT12_IS_STORM_CLIENT_MASK 0x00001000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT13_IS_STORM_CLIENT_MASK 0x00002000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT14_IS_STORM_CLIENT_MASK 0x00004000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT15_IS_STORM_CLIENT_MASK 0x00008000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT16_IS_STORM_CLIENT_MASK 0x00010000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT17_IS_STORM_CLIENT_MASK 0x00020000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT18_IS_STORM_CLIENT_MASK 0x00040000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT19_IS_STORM_CLIENT_MASK 0x00080000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT20_IS_STORM_CLIENT_MASK 0x00100000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT21_IS_STORM_CLIENT_MASK 0x00200000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT22_IS_STORM_CLIENT_MASK 0x00400000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT23_IS_STORM_CLIENT_MASK 0x00800000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT24_IS_STORM_CLIENT_MASK 0x01000000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT25_IS_STORM_CLIENT_MASK 0x02000000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT26_IS_STORM_CLIENT_MASK 0x04000000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT27_IS_STORM_CLIENT_MASK 0x08000000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT28_IS_STORM_CLIENT_MASK 0x10000000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT29_IS_STORM_CLIENT_MASK 0x20000000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT30_IS_STORM_CLIENT_MASK 0x40000000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT31_IS_STORM_CLIENT_MASK 0x80000000L
+//IH_CLK_CTRL
+#define IH_CLK_CTRL__IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE__SHIFT 0x19
+#define IH_CLK_CTRL__IH_BUFFER_MEM_CLK_SOFT_OVERRIDE__SHIFT 0x1a
+#define IH_CLK_CTRL__DBUS_MUX_CLK_SOFT_OVERRIDE__SHIFT 0x1b
+#define IH_CLK_CTRL__OSSSYS_SHARE_CLK_SOFT_OVERRIDE__SHIFT 0x1c
+#define IH_CLK_CTRL__LIMIT_SMN_CLK_SOFT_OVERRIDE__SHIFT 0x1d
+#define IH_CLK_CTRL__DYN_CLK_SOFT_OVERRIDE__SHIFT 0x1e
+#define IH_CLK_CTRL__REG_CLK_SOFT_OVERRIDE__SHIFT 0x1f
+#define IH_CLK_CTRL__IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE_MASK 0x02000000L
+#define IH_CLK_CTRL__IH_BUFFER_MEM_CLK_SOFT_OVERRIDE_MASK 0x04000000L
+#define IH_CLK_CTRL__DBUS_MUX_CLK_SOFT_OVERRIDE_MASK 0x08000000L
+#define IH_CLK_CTRL__OSSSYS_SHARE_CLK_SOFT_OVERRIDE_MASK 0x10000000L
+#define IH_CLK_CTRL__LIMIT_SMN_CLK_SOFT_OVERRIDE_MASK 0x20000000L
+#define IH_CLK_CTRL__DYN_CLK_SOFT_OVERRIDE_MASK 0x40000000L
+#define IH_CLK_CTRL__REG_CLK_SOFT_OVERRIDE_MASK 0x80000000L
+//IH_INT_FLAGS
+#define IH_INT_FLAGS__CLIENT_0_FLAG__SHIFT 0x0
+#define IH_INT_FLAGS__CLIENT_1_FLAG__SHIFT 0x1
+#define IH_INT_FLAGS__CLIENT_2_FLAG__SHIFT 0x2
+#define IH_INT_FLAGS__CLIENT_3_FLAG__SHIFT 0x3
+#define IH_INT_FLAGS__CLIENT_4_FLAG__SHIFT 0x4
+#define IH_INT_FLAGS__CLIENT_5_FLAG__SHIFT 0x5
+#define IH_INT_FLAGS__CLIENT_6_FLAG__SHIFT 0x6
+#define IH_INT_FLAGS__CLIENT_7_FLAG__SHIFT 0x7
+#define IH_INT_FLAGS__CLIENT_8_FLAG__SHIFT 0x8
+#define IH_INT_FLAGS__CLIENT_9_FLAG__SHIFT 0x9
+#define IH_INT_FLAGS__CLIENT_10_FLAG__SHIFT 0xa
+#define IH_INT_FLAGS__CLIENT_11_FLAG__SHIFT 0xb
+#define IH_INT_FLAGS__CLIENT_12_FLAG__SHIFT 0xc
+#define IH_INT_FLAGS__CLIENT_13_FLAG__SHIFT 0xd
+#define IH_INT_FLAGS__CLIENT_14_FLAG__SHIFT 0xe
+#define IH_INT_FLAGS__CLIENT_15_FLAG__SHIFT 0xf
+#define IH_INT_FLAGS__CLIENT_16_FLAG__SHIFT 0x10
+#define IH_INT_FLAGS__CLIENT_17_FLAG__SHIFT 0x11
+#define IH_INT_FLAGS__CLIENT_18_FLAG__SHIFT 0x12
+#define IH_INT_FLAGS__CLIENT_19_FLAG__SHIFT 0x13
+#define IH_INT_FLAGS__CLIENT_20_FLAG__SHIFT 0x14
+#define IH_INT_FLAGS__CLIENT_21_FLAG__SHIFT 0x15
+#define IH_INT_FLAGS__CLIENT_22_FLAG__SHIFT 0x16
+#define IH_INT_FLAGS__CLIENT_23_FLAG__SHIFT 0x17
+#define IH_INT_FLAGS__CLIENT_24_FLAG__SHIFT 0x18
+#define IH_INT_FLAGS__CLIENT_25_FLAG__SHIFT 0x19
+#define IH_INT_FLAGS__CLIENT_26_FLAG__SHIFT 0x1a
+#define IH_INT_FLAGS__CLIENT_27_FLAG__SHIFT 0x1b
+#define IH_INT_FLAGS__CLIENT_28_FLAG__SHIFT 0x1c
+#define IH_INT_FLAGS__CLIENT_29_FLAG__SHIFT 0x1d
+#define IH_INT_FLAGS__CLIENT_30_FLAG__SHIFT 0x1e
+#define IH_INT_FLAGS__CLIENT_31_FLAG__SHIFT 0x1f
+#define IH_INT_FLAGS__CLIENT_0_FLAG_MASK 0x00000001L
+#define IH_INT_FLAGS__CLIENT_1_FLAG_MASK 0x00000002L
+#define IH_INT_FLAGS__CLIENT_2_FLAG_MASK 0x00000004L
+#define IH_INT_FLAGS__CLIENT_3_FLAG_MASK 0x00000008L
+#define IH_INT_FLAGS__CLIENT_4_FLAG_MASK 0x00000010L
+#define IH_INT_FLAGS__CLIENT_5_FLAG_MASK 0x00000020L
+#define IH_INT_FLAGS__CLIENT_6_FLAG_MASK 0x00000040L
+#define IH_INT_FLAGS__CLIENT_7_FLAG_MASK 0x00000080L
+#define IH_INT_FLAGS__CLIENT_8_FLAG_MASK 0x00000100L
+#define IH_INT_FLAGS__CLIENT_9_FLAG_MASK 0x00000200L
+#define IH_INT_FLAGS__CLIENT_10_FLAG_MASK 0x00000400L
+#define IH_INT_FLAGS__CLIENT_11_FLAG_MASK 0x00000800L
+#define IH_INT_FLAGS__CLIENT_12_FLAG_MASK 0x00001000L
+#define IH_INT_FLAGS__CLIENT_13_FLAG_MASK 0x00002000L
+#define IH_INT_FLAGS__CLIENT_14_FLAG_MASK 0x00004000L
+#define IH_INT_FLAGS__CLIENT_15_FLAG_MASK 0x00008000L
+#define IH_INT_FLAGS__CLIENT_16_FLAG_MASK 0x00010000L
+#define IH_INT_FLAGS__CLIENT_17_FLAG_MASK 0x00020000L
+#define IH_INT_FLAGS__CLIENT_18_FLAG_MASK 0x00040000L
+#define IH_INT_FLAGS__CLIENT_19_FLAG_MASK 0x00080000L
+#define IH_INT_FLAGS__CLIENT_20_FLAG_MASK 0x00100000L
+#define IH_INT_FLAGS__CLIENT_21_FLAG_MASK 0x00200000L
+#define IH_INT_FLAGS__CLIENT_22_FLAG_MASK 0x00400000L
+#define IH_INT_FLAGS__CLIENT_23_FLAG_MASK 0x00800000L
+#define IH_INT_FLAGS__CLIENT_24_FLAG_MASK 0x01000000L
+#define IH_INT_FLAGS__CLIENT_25_FLAG_MASK 0x02000000L
+#define IH_INT_FLAGS__CLIENT_26_FLAG_MASK 0x04000000L
+#define IH_INT_FLAGS__CLIENT_27_FLAG_MASK 0x08000000L
+#define IH_INT_FLAGS__CLIENT_28_FLAG_MASK 0x10000000L
+#define IH_INT_FLAGS__CLIENT_29_FLAG_MASK 0x20000000L
+#define IH_INT_FLAGS__CLIENT_30_FLAG_MASK 0x40000000L
+#define IH_INT_FLAGS__CLIENT_31_FLAG_MASK 0x80000000L
+//IH_LAST_INT_INFO0
+#define IH_LAST_INT_INFO0__CLIENT_ID__SHIFT 0x0
+#define IH_LAST_INT_INFO0__SOURCE_ID__SHIFT 0x8
+#define IH_LAST_INT_INFO0__RING_ID__SHIFT 0x10
+#define IH_LAST_INT_INFO0__VM_ID__SHIFT 0x18
+#define IH_LAST_INT_INFO0__VMID_TYPE__SHIFT 0x1f
+#define IH_LAST_INT_INFO0__CLIENT_ID_MASK 0x000000FFL
+#define IH_LAST_INT_INFO0__SOURCE_ID_MASK 0x0000FF00L
+#define IH_LAST_INT_INFO0__RING_ID_MASK 0x00FF0000L
+#define IH_LAST_INT_INFO0__VM_ID_MASK 0x0F000000L
+#define IH_LAST_INT_INFO0__VMID_TYPE_MASK 0x80000000L
+//IH_LAST_INT_INFO1
+#define IH_LAST_INT_INFO1__CONTEXT_ID__SHIFT 0x0
+#define IH_LAST_INT_INFO1__CONTEXT_ID_MASK 0xFFFFFFFFL
+//IH_LAST_INT_INFO2
+#define IH_LAST_INT_INFO2__PAS_ID__SHIFT 0x0
+#define IH_LAST_INT_INFO2__VF_ID__SHIFT 0x10
+#define IH_LAST_INT_INFO2__VF__SHIFT 0x14
+#define IH_LAST_INT_INFO2__PAS_ID_MASK 0x0000FFFFL
+#define IH_LAST_INT_INFO2__VF_ID_MASK 0x000F0000L
+#define IH_LAST_INT_INFO2__VF_MASK 0x00100000L
+//IH_SCRATCH
+#define IH_SCRATCH__DATA__SHIFT 0x0
+#define IH_SCRATCH__DATA_MASK 0xFFFFFFFFL
+//IH_CLIENT_CREDIT_ERROR
+#define IH_CLIENT_CREDIT_ERROR__CLEAR__SHIFT 0x0
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_1_ERROR__SHIFT 0x1
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_2_ERROR__SHIFT 0x2
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_3_ERROR__SHIFT 0x3
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_4_ERROR__SHIFT 0x4
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_5_ERROR__SHIFT 0x5
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_6_ERROR__SHIFT 0x6
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_7_ERROR__SHIFT 0x7
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_8_ERROR__SHIFT 0x8
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_9_ERROR__SHIFT 0x9
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_10_ERROR__SHIFT 0xa
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_11_ERROR__SHIFT 0xb
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_12_ERROR__SHIFT 0xc
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_13_ERROR__SHIFT 0xd
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_14_ERROR__SHIFT 0xe
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_15_ERROR__SHIFT 0xf
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_16_ERROR__SHIFT 0x10
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_17_ERROR__SHIFT 0x11
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_18_ERROR__SHIFT 0x12
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_19_ERROR__SHIFT 0x13
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_20_ERROR__SHIFT 0x14
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_21_ERROR__SHIFT 0x15
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_22_ERROR__SHIFT 0x16
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_23_ERROR__SHIFT 0x17
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_24_ERROR__SHIFT 0x18
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_25_ERROR__SHIFT 0x19
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_26_ERROR__SHIFT 0x1a
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_27_ERROR__SHIFT 0x1b
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_28_ERROR__SHIFT 0x1c
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_29_ERROR__SHIFT 0x1d
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_30_ERROR__SHIFT 0x1e
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_31_ERROR__SHIFT 0x1f
+#define IH_CLIENT_CREDIT_ERROR__CLEAR_MASK 0x00000001L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_1_ERROR_MASK 0x00000002L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_2_ERROR_MASK 0x00000004L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_3_ERROR_MASK 0x00000008L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_4_ERROR_MASK 0x00000010L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_5_ERROR_MASK 0x00000020L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_6_ERROR_MASK 0x00000040L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_7_ERROR_MASK 0x00000080L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_8_ERROR_MASK 0x00000100L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_9_ERROR_MASK 0x00000200L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_10_ERROR_MASK 0x00000400L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_11_ERROR_MASK 0x00000800L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_12_ERROR_MASK 0x00001000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_13_ERROR_MASK 0x00002000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_14_ERROR_MASK 0x00004000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_15_ERROR_MASK 0x00008000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_16_ERROR_MASK 0x00010000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_17_ERROR_MASK 0x00020000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_18_ERROR_MASK 0x00040000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_19_ERROR_MASK 0x00080000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_20_ERROR_MASK 0x00100000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_21_ERROR_MASK 0x00200000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_22_ERROR_MASK 0x00400000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_23_ERROR_MASK 0x00800000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_24_ERROR_MASK 0x01000000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_25_ERROR_MASK 0x02000000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_26_ERROR_MASK 0x04000000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_27_ERROR_MASK 0x08000000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_28_ERROR_MASK 0x10000000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_29_ERROR_MASK 0x20000000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_30_ERROR_MASK 0x40000000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_31_ERROR_MASK 0x80000000L
+//IH_GPU_IOV_VIOLATION_LOG
+#define IH_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0
+#define IH_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS__SHIFT 0x1
+#define IH_GPU_IOV_VIOLATION_LOG__ADDRESS__SHIFT 0x2
+#define IH_GPU_IOV_VIOLATION_LOG__OPCODE__SHIFT 0x12
+#define IH_GPU_IOV_VIOLATION_LOG__VF__SHIFT 0x13
+#define IH_GPU_IOV_VIOLATION_LOG__VF_ID__SHIFT 0x14
+#define IH_GPU_IOV_VIOLATION_LOG__INITIATOR_ID__SHIFT 0x18
+#define IH_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L
+#define IH_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS_MASK 0x00000002L
+#define IH_GPU_IOV_VIOLATION_LOG__ADDRESS_MASK 0x0003FFFCL
+#define IH_GPU_IOV_VIOLATION_LOG__OPCODE_MASK 0x00040000L
+#define IH_GPU_IOV_VIOLATION_LOG__VF_MASK 0x00080000L
+#define IH_GPU_IOV_VIOLATION_LOG__VF_ID_MASK 0x00F00000L
+#define IH_GPU_IOV_VIOLATION_LOG__INITIATOR_ID_MASK 0xFF000000L
+//IH_COOKIE_REC_VIOLATION_LOG
+#define IH_COOKIE_REC_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0
+#define IH_COOKIE_REC_VIOLATION_LOG__CLIENT_ID__SHIFT 0x10
+#define IH_COOKIE_REC_VIOLATION_LOG__INITIATOR_ID__SHIFT 0x18
+#define IH_COOKIE_REC_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L
+#define IH_COOKIE_REC_VIOLATION_LOG__CLIENT_ID_MASK 0x00FF0000L
+#define IH_COOKIE_REC_VIOLATION_LOG__INITIATOR_ID_MASK 0xFF000000L
+//IH_CREDIT_STATUS
+#define IH_CREDIT_STATUS__CLIENT_1_CREDIT_RETURNED__SHIFT 0x1
+#define IH_CREDIT_STATUS__CLIENT_2_CREDIT_RETURNED__SHIFT 0x2
+#define IH_CREDIT_STATUS__CLIENT_3_CREDIT_RETURNED__SHIFT 0x3
+#define IH_CREDIT_STATUS__CLIENT_4_CREDIT_RETURNED__SHIFT 0x4
+#define IH_CREDIT_STATUS__CLIENT_5_CREDIT_RETURNED__SHIFT 0x5
+#define IH_CREDIT_STATUS__CLIENT_6_CREDIT_RETURNED__SHIFT 0x6
+#define IH_CREDIT_STATUS__CLIENT_7_CREDIT_RETURNED__SHIFT 0x7
+#define IH_CREDIT_STATUS__CLIENT_8_CREDIT_RETURNED__SHIFT 0x8
+#define IH_CREDIT_STATUS__CLIENT_9_CREDIT_RETURNED__SHIFT 0x9
+#define IH_CREDIT_STATUS__CLIENT_10_CREDIT_RETURNED__SHIFT 0xa
+#define IH_CREDIT_STATUS__CLIENT_11_CREDIT_RETURNED__SHIFT 0xb
+#define IH_CREDIT_STATUS__CLIENT_12_CREDIT_RETURNED__SHIFT 0xc
+#define IH_CREDIT_STATUS__CLIENT_13_CREDIT_RETURNED__SHIFT 0xd
+#define IH_CREDIT_STATUS__CLIENT_14_CREDIT_RETURNED__SHIFT 0xe
+#define IH_CREDIT_STATUS__CLIENT_15_CREDIT_RETURNED__SHIFT 0xf
+#define IH_CREDIT_STATUS__CLIENT_16_CREDIT_RETURNED__SHIFT 0x10
+#define IH_CREDIT_STATUS__CLIENT_17_CREDIT_RETURNED__SHIFT 0x11
+#define IH_CREDIT_STATUS__CLIENT_18_CREDIT_RETURNED__SHIFT 0x12
+#define IH_CREDIT_STATUS__CLIENT_19_CREDIT_RETURNED__SHIFT 0x13
+#define IH_CREDIT_STATUS__CLIENT_20_CREDIT_RETURNED__SHIFT 0x14
+#define IH_CREDIT_STATUS__CLIENT_21_CREDIT_RETURNED__SHIFT 0x15
+#define IH_CREDIT_STATUS__CLIENT_22_CREDIT_RETURNED__SHIFT 0x16
+#define IH_CREDIT_STATUS__CLIENT_23_CREDIT_RETURNED__SHIFT 0x17
+#define IH_CREDIT_STATUS__CLIENT_24_CREDIT_RETURNED__SHIFT 0x18
+#define IH_CREDIT_STATUS__CLIENT_25_CREDIT_RETURNED__SHIFT 0x19
+#define IH_CREDIT_STATUS__CLIENT_26_CREDIT_RETURNED__SHIFT 0x1a
+#define IH_CREDIT_STATUS__CLIENT_27_CREDIT_RETURNED__SHIFT 0x1b
+#define IH_CREDIT_STATUS__CLIENT_28_CREDIT_RETURNED__SHIFT 0x1c
+#define IH_CREDIT_STATUS__CLIENT_29_CREDIT_RETURNED__SHIFT 0x1d
+#define IH_CREDIT_STATUS__CLIENT_30_CREDIT_RETURNED__SHIFT 0x1e
+#define IH_CREDIT_STATUS__CLIENT_31_CREDIT_RETURNED__SHIFT 0x1f
+#define IH_CREDIT_STATUS__CLIENT_1_CREDIT_RETURNED_MASK 0x00000002L
+#define IH_CREDIT_STATUS__CLIENT_2_CREDIT_RETURNED_MASK 0x00000004L
+#define IH_CREDIT_STATUS__CLIENT_3_CREDIT_RETURNED_MASK 0x00000008L
+#define IH_CREDIT_STATUS__CLIENT_4_CREDIT_RETURNED_MASK 0x00000010L
+#define IH_CREDIT_STATUS__CLIENT_5_CREDIT_RETURNED_MASK 0x00000020L
+#define IH_CREDIT_STATUS__CLIENT_6_CREDIT_RETURNED_MASK 0x00000040L
+#define IH_CREDIT_STATUS__CLIENT_7_CREDIT_RETURNED_MASK 0x00000080L
+#define IH_CREDIT_STATUS__CLIENT_8_CREDIT_RETURNED_MASK 0x00000100L
+#define IH_CREDIT_STATUS__CLIENT_9_CREDIT_RETURNED_MASK 0x00000200L
+#define IH_CREDIT_STATUS__CLIENT_10_CREDIT_RETURNED_MASK 0x00000400L
+#define IH_CREDIT_STATUS__CLIENT_11_CREDIT_RETURNED_MASK 0x00000800L
+#define IH_CREDIT_STATUS__CLIENT_12_CREDIT_RETURNED_MASK 0x00001000L
+#define IH_CREDIT_STATUS__CLIENT_13_CREDIT_RETURNED_MASK 0x00002000L
+#define IH_CREDIT_STATUS__CLIENT_14_CREDIT_RETURNED_MASK 0x00004000L
+#define IH_CREDIT_STATUS__CLIENT_15_CREDIT_RETURNED_MASK 0x00008000L
+#define IH_CREDIT_STATUS__CLIENT_16_CREDIT_RETURNED_MASK 0x00010000L
+#define IH_CREDIT_STATUS__CLIENT_17_CREDIT_RETURNED_MASK 0x00020000L
+#define IH_CREDIT_STATUS__CLIENT_18_CREDIT_RETURNED_MASK 0x00040000L
+#define IH_CREDIT_STATUS__CLIENT_19_CREDIT_RETURNED_MASK 0x00080000L
+#define IH_CREDIT_STATUS__CLIENT_20_CREDIT_RETURNED_MASK 0x00100000L
+#define IH_CREDIT_STATUS__CLIENT_21_CREDIT_RETURNED_MASK 0x00200000L
+#define IH_CREDIT_STATUS__CLIENT_22_CREDIT_RETURNED_MASK 0x00400000L
+#define IH_CREDIT_STATUS__CLIENT_23_CREDIT_RETURNED_MASK 0x00800000L
+#define IH_CREDIT_STATUS__CLIENT_24_CREDIT_RETURNED_MASK 0x01000000L
+#define IH_CREDIT_STATUS__CLIENT_25_CREDIT_RETURNED_MASK 0x02000000L
+#define IH_CREDIT_STATUS__CLIENT_26_CREDIT_RETURNED_MASK 0x04000000L
+#define IH_CREDIT_STATUS__CLIENT_27_CREDIT_RETURNED_MASK 0x08000000L
+#define IH_CREDIT_STATUS__CLIENT_28_CREDIT_RETURNED_MASK 0x10000000L
+#define IH_CREDIT_STATUS__CLIENT_29_CREDIT_RETURNED_MASK 0x20000000L
+#define IH_CREDIT_STATUS__CLIENT_30_CREDIT_RETURNED_MASK 0x40000000L
+#define IH_CREDIT_STATUS__CLIENT_31_CREDIT_RETURNED_MASK 0x80000000L
+//IH_MMHUB_ERROR
+#define IH_MMHUB_ERROR__IH_BRESP_01__SHIFT 0x1
+#define IH_MMHUB_ERROR__IH_BRESP_10__SHIFT 0x2
+#define IH_MMHUB_ERROR__IH_BRESP_11__SHIFT 0x3
+#define IH_MMHUB_ERROR__IH_BUSER_NACK_01__SHIFT 0x5
+#define IH_MMHUB_ERROR__IH_BUSER_NACK_10__SHIFT 0x6
+#define IH_MMHUB_ERROR__IH_BUSER_NACK_11__SHIFT 0x7
+#define IH_MMHUB_ERROR__IH_BRESP_01_MASK 0x00000002L
+#define IH_MMHUB_ERROR__IH_BRESP_10_MASK 0x00000004L
+#define IH_MMHUB_ERROR__IH_BRESP_11_MASK 0x00000008L
+#define IH_MMHUB_ERROR__IH_BUSER_NACK_01_MASK 0x00000020L
+#define IH_MMHUB_ERROR__IH_BUSER_NACK_10_MASK 0x00000040L
+#define IH_MMHUB_ERROR__IH_BUSER_NACK_11_MASK 0x00000080L
+//IH_MEM_POWER_CTRL
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_CTRL_EN__SHIFT 0x0
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_LS_EN__SHIFT 0x1
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_DS_EN__SHIFT 0x2
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_SD_EN__SHIFT 0x3
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_IDLE_HYSTERESIS__SHIFT 0x4
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_UP_RECOVER_DELAY__SHIFT 0x8
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_DOWN_LS_ENTER_DELAY__SHIFT 0xe
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_CTRL_EN_MASK 0x00000001L
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_LS_EN_MASK 0x00000002L
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_DS_EN_MASK 0x00000004L
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_SD_EN_MASK 0x00000008L
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_IDLE_HYSTERESIS_MASK 0x00000070L
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_UP_RECOVER_DELAY_MASK 0x00003F00L
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_DOWN_LS_ENTER_DELAY_MASK 0x0000C000L
+//IH_REGISTER_LAST_PART2
+#define IH_REGISTER_LAST_PART2__RESERVED__SHIFT 0x0
+#define IH_REGISTER_LAST_PART2__RESERVED_MASK 0xFFFFFFFFL
+//SEM_CLK_CTRL
+#define SEM_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define SEM_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define SEM_CLK_CTRL__RESERVED__SHIFT 0xc
+#define SEM_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define SEM_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define SEM_CLK_CTRL__MEM_CLK_SOFT_OVERRIDE__SHIFT 0x1a
+#define SEM_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define SEM_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define SEM_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define SEM_CLK_CTRL__DYN_CLK_SOFT_OVERRIDE__SHIFT 0x1e
+#define SEM_CLK_CTRL__REG_CLK_SOFT_OVERRIDE__SHIFT 0x1f
+#define SEM_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define SEM_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define SEM_CLK_CTRL__RESERVED_MASK 0x00FFF000L
+#define SEM_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define SEM_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define SEM_CLK_CTRL__MEM_CLK_SOFT_OVERRIDE_MASK 0x04000000L
+#define SEM_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define SEM_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define SEM_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define SEM_CLK_CTRL__DYN_CLK_SOFT_OVERRIDE_MASK 0x40000000L
+#define SEM_CLK_CTRL__REG_CLK_SOFT_OVERRIDE_MASK 0x80000000L
+//SEM_UTC_CREDIT
+#define SEM_UTC_CREDIT__UTCL2_CREDIT__SHIFT 0x0
+#define SEM_UTC_CREDIT__WATERMARK__SHIFT 0x8
+#define SEM_UTC_CREDIT__UTCL2_CREDIT_MASK 0x0000001FL
+#define SEM_UTC_CREDIT__WATERMARK_MASK 0x00000F00L
+//SEM_UTC_CONFIG
+#define SEM_UTC_CONFIG__USE_MTYPE__SHIFT 0x0
+#define SEM_UTC_CONFIG__FORCE_SNOOP__SHIFT 0x3
+#define SEM_UTC_CONFIG__FORCE_GCC__SHIFT 0x4
+#define SEM_UTC_CONFIG__USE_PT_SNOOP__SHIFT 0x5
+#define SEM_UTC_CONFIG__USE_MTYPE_MASK 0x00000007L
+#define SEM_UTC_CONFIG__FORCE_SNOOP_MASK 0x00000008L
+#define SEM_UTC_CONFIG__FORCE_GCC_MASK 0x00000010L
+#define SEM_UTC_CONFIG__USE_PT_SNOOP_MASK 0x00000020L
+//SEM_UTCL2_TRAN_EN_LUT
+#define SEM_UTCL2_TRAN_EN_LUT__SDMA0_UTCL2_EN__SHIFT 0x0
+#define SEM_UTCL2_TRAN_EN_LUT__SDMA1_UTCL2_EN__SHIFT 0x1
+#define SEM_UTCL2_TRAN_EN_LUT__UVD_UTCL2_EN__SHIFT 0x2
+#define SEM_UTCL2_TRAN_EN_LUT__VCE0_UTCL2_EN__SHIFT 0x3
+#define SEM_UTCL2_TRAN_EN_LUT__ACP_UTCL2_EN__SHIFT 0x4
+#define SEM_UTCL2_TRAN_EN_LUT__ISP_UTCL2_EN__SHIFT 0x5
+#define SEM_UTCL2_TRAN_EN_LUT__VCE1_UTCL2_EN__SHIFT 0x6
+#define SEM_UTCL2_TRAN_EN_LUT__VP8_UTCL2_EN__SHIFT 0x7
+#define SEM_UTCL2_TRAN_EN_LUT__UVD1_UTCL2_EN__SHIFT 0x8
+#define SEM_UTCL2_TRAN_EN_LUT__RESERVED__SHIFT 0x9
+#define SEM_UTCL2_TRAN_EN_LUT__CP_UTCL2_EN__SHIFT 0x1f
+#define SEM_UTCL2_TRAN_EN_LUT__SDMA0_UTCL2_EN_MASK 0x00000001L
+#define SEM_UTCL2_TRAN_EN_LUT__SDMA1_UTCL2_EN_MASK 0x00000002L
+#define SEM_UTCL2_TRAN_EN_LUT__UVD_UTCL2_EN_MASK 0x00000004L
+#define SEM_UTCL2_TRAN_EN_LUT__VCE0_UTCL2_EN_MASK 0x00000008L
+#define SEM_UTCL2_TRAN_EN_LUT__ACP_UTCL2_EN_MASK 0x00000010L
+#define SEM_UTCL2_TRAN_EN_LUT__ISP_UTCL2_EN_MASK 0x00000020L
+#define SEM_UTCL2_TRAN_EN_LUT__VCE1_UTCL2_EN_MASK 0x00000040L
+#define SEM_UTCL2_TRAN_EN_LUT__VP8_UTCL2_EN_MASK 0x00000080L
+#define SEM_UTCL2_TRAN_EN_LUT__UVD1_UTCL2_EN_MASK 0x00000100L
+#define SEM_UTCL2_TRAN_EN_LUT__RESERVED_MASK 0x7FFFFE00L
+#define SEM_UTCL2_TRAN_EN_LUT__CP_UTCL2_EN_MASK 0x80000000L
+//SEM_MCIF_CONFIG
+#define SEM_MCIF_CONFIG__MC_REQ_SWAP__SHIFT 0x0
+#define SEM_MCIF_CONFIG__MC_WRREQ_CREDIT__SHIFT 0x2
+#define SEM_MCIF_CONFIG__MC_RDREQ_CREDIT__SHIFT 0x8
+#define SEM_MCIF_CONFIG__MC_REQ_SWAP_MASK 0x00000003L
+#define SEM_MCIF_CONFIG__MC_WRREQ_CREDIT_MASK 0x000000FCL
+#define SEM_MCIF_CONFIG__MC_RDREQ_CREDIT_MASK 0x00003F00L
+//SEM_PERFMON_CNTL
+#define SEM_PERFMON_CNTL__PERF_ENABLE0__SHIFT 0x0
+#define SEM_PERFMON_CNTL__PERF_CLEAR0__SHIFT 0x1
+#define SEM_PERFMON_CNTL__PERF_SEL0__SHIFT 0x2
+#define SEM_PERFMON_CNTL__PERF_ENABLE1__SHIFT 0xa
+#define SEM_PERFMON_CNTL__PERF_CLEAR1__SHIFT 0xb
+#define SEM_PERFMON_CNTL__PERF_SEL1__SHIFT 0xc
+#define SEM_PERFMON_CNTL__PERF_ENABLE0_MASK 0x00000001L
+#define SEM_PERFMON_CNTL__PERF_CLEAR0_MASK 0x00000002L
+#define SEM_PERFMON_CNTL__PERF_SEL0_MASK 0x000003FCL
+#define SEM_PERFMON_CNTL__PERF_ENABLE1_MASK 0x00000400L
+#define SEM_PERFMON_CNTL__PERF_CLEAR1_MASK 0x00000800L
+#define SEM_PERFMON_CNTL__PERF_SEL1_MASK 0x000FF000L
+//SEM_PERFCOUNTER0_RESULT
+#define SEM_PERFCOUNTER0_RESULT__PERF_COUNT__SHIFT 0x0
+#define SEM_PERFCOUNTER0_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL
+//SEM_PERFCOUNTER1_RESULT
+#define SEM_PERFCOUNTER1_RESULT__PERF_COUNT__SHIFT 0x0
+#define SEM_PERFCOUNTER1_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL
+//SEM_STATUS
+#define SEM_STATUS__SEM_IDLE__SHIFT 0x0
+#define SEM_STATUS__SEM_INTERNAL_IDLE__SHIFT 0x1
+#define SEM_STATUS__MC_RDREQ_FIFO_FULL__SHIFT 0x2
+#define SEM_STATUS__MC_WRREQ_FIFO_FULL__SHIFT 0x3
+#define SEM_STATUS__WRITE1_FIFO_FULL__SHIFT 0x4
+#define SEM_STATUS__CHECK0_FIFO_FULL__SHIFT 0x5
+#define SEM_STATUS__MC_RDREQ_PENDING__SHIFT 0x6
+#define SEM_STATUS__MC_WRREQ_PENDING__SHIFT 0x7
+#define SEM_STATUS__SDMA0_MAILBOX_PENDING__SHIFT 0x8
+#define SEM_STATUS__SDMA1_MAILBOX_PENDING__SHIFT 0x9
+#define SEM_STATUS__UVD_MAILBOX_PENDING__SHIFT 0xa
+#define SEM_STATUS__VCE_MAILBOX_PENDING__SHIFT 0xb
+#define SEM_STATUS__CPG1_MAILBOX_PENDING__SHIFT 0xc
+#define SEM_STATUS__CPG2_MAILBOX_PENDING__SHIFT 0xd
+#define SEM_STATUS__VCE1_MAILBOX_PENDING__SHIFT 0xe
+#define SEM_STATUS__ATC_REQ_PENDING__SHIFT 0xf
+#define SEM_STATUS__OUTSTANDING_CLEAN__SHIFT 0x10
+#define SEM_STATUS__INVREQ_FLUSH_VF_MISMATCH__SHIFT 0x11
+#define SEM_STATUS__INVREQ_NONFLUSH_VF_MISMATCH__SHIFT 0x12
+#define SEM_STATUS__INVREQ_CNT_IDLE__SHIFT 0x13
+#define SEM_STATUS__ENTRYLIST_IDLE__SHIFT 0x14
+#define SEM_STATUS__MIF_IDLE__SHIFT 0x15
+#define SEM_STATUS__REGISTER_IDLE__SHIFT 0x16
+#define SEM_STATUS__ATCL2_INVREQ_IDLE__SHIFT 0x17
+#define SEM_STATUS__UVD1_MAILBOX_PENDING__SHIFT 0x18
+#define SEM_STATUS__SWITCH_READY__SHIFT 0x1f
+#define SEM_STATUS__SEM_IDLE_MASK 0x00000001L
+#define SEM_STATUS__SEM_INTERNAL_IDLE_MASK 0x00000002L
+#define SEM_STATUS__MC_RDREQ_FIFO_FULL_MASK 0x00000004L
+#define SEM_STATUS__MC_WRREQ_FIFO_FULL_MASK 0x00000008L
+#define SEM_STATUS__WRITE1_FIFO_FULL_MASK 0x00000010L
+#define SEM_STATUS__CHECK0_FIFO_FULL_MASK 0x00000020L
+#define SEM_STATUS__MC_RDREQ_PENDING_MASK 0x00000040L
+#define SEM_STATUS__MC_WRREQ_PENDING_MASK 0x00000080L
+#define SEM_STATUS__SDMA0_MAILBOX_PENDING_MASK 0x00000100L
+#define SEM_STATUS__SDMA1_MAILBOX_PENDING_MASK 0x00000200L
+#define SEM_STATUS__UVD_MAILBOX_PENDING_MASK 0x00000400L
+#define SEM_STATUS__VCE_MAILBOX_PENDING_MASK 0x00000800L
+#define SEM_STATUS__CPG1_MAILBOX_PENDING_MASK 0x00001000L
+#define SEM_STATUS__CPG2_MAILBOX_PENDING_MASK 0x00002000L
+#define SEM_STATUS__VCE1_MAILBOX_PENDING_MASK 0x00004000L
+#define SEM_STATUS__ATC_REQ_PENDING_MASK 0x00008000L
+#define SEM_STATUS__OUTSTANDING_CLEAN_MASK 0x00010000L
+#define SEM_STATUS__INVREQ_FLUSH_VF_MISMATCH_MASK 0x00020000L
+#define SEM_STATUS__INVREQ_NONFLUSH_VF_MISMATCH_MASK 0x00040000L
+#define SEM_STATUS__INVREQ_CNT_IDLE_MASK 0x00080000L
+#define SEM_STATUS__ENTRYLIST_IDLE_MASK 0x00100000L
+#define SEM_STATUS__MIF_IDLE_MASK 0x00200000L
+#define SEM_STATUS__REGISTER_IDLE_MASK 0x00400000L
+#define SEM_STATUS__ATCL2_INVREQ_IDLE_MASK 0x00800000L
+#define SEM_STATUS__UVD1_MAILBOX_PENDING_MASK 0x01000000L
+#define SEM_STATUS__SWITCH_READY_MASK 0x80000000L
+//SEM_MAILBOX_CLIENTCONFIG
+#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT0__SHIFT 0x0
+#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT1__SHIFT 0x3
+#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT2__SHIFT 0x6
+#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT3__SHIFT 0x9
+#define SEM_MAILBOX_CLIENTCONFIG__SDMA_CLIENT0__SHIFT 0xc
+#define SEM_MAILBOX_CLIENTCONFIG__UVD_CLIENT0__SHIFT 0xf
+#define SEM_MAILBOX_CLIENTCONFIG__SDMA1_CLIENT0__SHIFT 0x12
+#define SEM_MAILBOX_CLIENTCONFIG__VCE_CLIENT0__SHIFT 0x15
+#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT0_MASK 0x00000007L
+#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT1_MASK 0x00000038L
+#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT2_MASK 0x000001C0L
+#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT3_MASK 0x00000E00L
+#define SEM_MAILBOX_CLIENTCONFIG__SDMA_CLIENT0_MASK 0x00007000L
+#define SEM_MAILBOX_CLIENTCONFIG__UVD_CLIENT0_MASK 0x00038000L
+#define SEM_MAILBOX_CLIENTCONFIG__SDMA1_CLIENT0_MASK 0x001C0000L
+#define SEM_MAILBOX_CLIENTCONFIG__VCE_CLIENT0_MASK 0x00E00000L
+//SEM_MAILBOX
+#define SEM_MAILBOX__HOSTPORT__SHIFT 0x0
+#define SEM_MAILBOX__RESERVED__SHIFT 0x10
+#define SEM_MAILBOX__HOSTPORT_MASK 0x0000FFFFL
+#define SEM_MAILBOX__RESERVED_MASK 0xFFFF0000L
+//SEM_MAILBOX_CONTROL
+#define SEM_MAILBOX_CONTROL__HOSTPORT_ENABLE__SHIFT 0x0
+#define SEM_MAILBOX_CONTROL__RESERVED__SHIFT 0x10
+#define SEM_MAILBOX_CONTROL__HOSTPORT_ENABLE_MASK 0x0000FFFFL
+#define SEM_MAILBOX_CONTROL__RESERVED_MASK 0xFFFF0000L
+//SEM_CHICKEN_BITS
+#define SEM_CHICKEN_BITS__VMID_PIPELINE_EN__SHIFT 0x0
+#define SEM_CHICKEN_BITS__ENTRY_PIPELINE_EN__SHIFT 0x1
+#define SEM_CHICKEN_BITS__CHECK_COUNTER_EN__SHIFT 0x2
+#define SEM_CHICKEN_BITS__ECC_BEHAVIOR__SHIFT 0x3
+#define SEM_CHICKEN_BITS__PHY_TRAN_EN__SHIFT 0x6
+#define SEM_CHICKEN_BITS__ADDR_CMP_UNTRAN_EN__SHIFT 0x7
+#define SEM_CHICKEN_BITS__IDLE_COUNTER_INDEX__SHIFT 0x8
+#define SEM_CHICKEN_BITS__OUTSTANDING_CLEAN_COUNTER_INDEX__SHIFT 0xa
+#define SEM_CHICKEN_BITS__ATCL2_BUS_ID__SHIFT 0xc
+#define SEM_CHICKEN_BITS__ATOMIC_EN__SHIFT 0xe
+#define SEM_CHICKEN_BITS__EXTERNAL_ATOMIC_CHECK__SHIFT 0xf
+#define SEM_CHICKEN_BITS__CLEAR_MAILBOX__SHIFT 0x10
+#define SEM_CHICKEN_BITS__INVACK_AFTER_OUTSTANDING_CLEAN__SHIFT 0x12
+#define SEM_CHICKEN_BITS__UTC_TAG_CONFLICT_CHECK__SHIFT 0x13
+#define SEM_CHICKEN_BITS__VMID_PIPELINE_EN_MASK 0x00000001L
+#define SEM_CHICKEN_BITS__ENTRY_PIPELINE_EN_MASK 0x00000002L
+#define SEM_CHICKEN_BITS__CHECK_COUNTER_EN_MASK 0x00000004L
+#define SEM_CHICKEN_BITS__ECC_BEHAVIOR_MASK 0x00000018L
+#define SEM_CHICKEN_BITS__PHY_TRAN_EN_MASK 0x00000040L
+#define SEM_CHICKEN_BITS__ADDR_CMP_UNTRAN_EN_MASK 0x00000080L
+#define SEM_CHICKEN_BITS__IDLE_COUNTER_INDEX_MASK 0x00000300L
+#define SEM_CHICKEN_BITS__OUTSTANDING_CLEAN_COUNTER_INDEX_MASK 0x00000C00L
+#define SEM_CHICKEN_BITS__ATCL2_BUS_ID_MASK 0x00003000L
+#define SEM_CHICKEN_BITS__ATOMIC_EN_MASK 0x00004000L
+#define SEM_CHICKEN_BITS__EXTERNAL_ATOMIC_CHECK_MASK 0x00008000L
+#define SEM_CHICKEN_BITS__CLEAR_MAILBOX_MASK 0x00030000L
+#define SEM_CHICKEN_BITS__INVACK_AFTER_OUTSTANDING_CLEAN_MASK 0x00040000L
+#define SEM_CHICKEN_BITS__UTC_TAG_CONFLICT_CHECK_MASK 0x00080000L
+//SEM_MAILBOX_CLIENTCONFIG_EXTRA
+#define SEM_MAILBOX_CLIENTCONFIG_EXTRA__VCE1_CLIENT0__SHIFT 0x0
+#define SEM_MAILBOX_CLIENTCONFIG_EXTRA__UVD1_CLIENT0__SHIFT 0x4
+#define SEM_MAILBOX_CLIENTCONFIG_EXTRA__VCE1_CLIENT0_MASK 0x0000000FL
+#define SEM_MAILBOX_CLIENTCONFIG_EXTRA__UVD1_CLIENT0_MASK 0x000000F0L
+//SEM_GPU_IOV_VIOLATION_LOG
+#define SEM_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0
+#define SEM_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS__SHIFT 0x1
+#define SEM_GPU_IOV_VIOLATION_LOG__ADDRESS__SHIFT 0x2
+#define SEM_GPU_IOV_VIOLATION_LOG__OPCODE__SHIFT 0x12
+#define SEM_GPU_IOV_VIOLATION_LOG__VF__SHIFT 0x13
+#define SEM_GPU_IOV_VIOLATION_LOG__VF_ID__SHIFT 0x14
+#define SEM_GPU_IOV_VIOLATION_LOG__INITIATOR_ID__SHIFT 0x18
+#define SEM_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L
+#define SEM_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS_MASK 0x00000002L
+#define SEM_GPU_IOV_VIOLATION_LOG__ADDRESS_MASK 0x0003FFFCL
+#define SEM_GPU_IOV_VIOLATION_LOG__OPCODE_MASK 0x00040000L
+#define SEM_GPU_IOV_VIOLATION_LOG__VF_MASK 0x00080000L
+#define SEM_GPU_IOV_VIOLATION_LOG__VF_ID_MASK 0x00F00000L
+#define SEM_GPU_IOV_VIOLATION_LOG__INITIATOR_ID_MASK 0xFF000000L
+//SEM_OUTSTANDING_THRESHOLD
+#define SEM_OUTSTANDING_THRESHOLD__VALUE__SHIFT 0x0
+#define SEM_OUTSTANDING_THRESHOLD__VALUE_MASK 0x000000FFL
+//SEM_MEM_POWER_CTRL
+#define SEM_MEM_POWER_CTRL__MEM_POWER_CTRL_EN__SHIFT 0x0
+#define SEM_MEM_POWER_CTRL__MEM_POWER_LS_EN__SHIFT 0x1
+#define SEM_MEM_POWER_CTRL__MEM_POWER_DS_EN__SHIFT 0x2
+#define SEM_MEM_POWER_CTRL__MEM_POWER_SD_EN__SHIFT 0x3
+#define SEM_MEM_POWER_CTRL__MEM_IDLE_HYSTERESIS__SHIFT 0x4
+#define SEM_MEM_POWER_CTRL__MEM_POWER_UP_RECOVER_DELAY__SHIFT 0x8
+#define SEM_MEM_POWER_CTRL__MEM_POWER_DOWN_LS_ENTER_DELAY__SHIFT 0xe
+#define SEM_MEM_POWER_CTRL__MEM_POWER_CTRL_EN_MASK 0x00000001L
+#define SEM_MEM_POWER_CTRL__MEM_POWER_LS_EN_MASK 0x00000002L
+#define SEM_MEM_POWER_CTRL__MEM_POWER_DS_EN_MASK 0x00000004L
+#define SEM_MEM_POWER_CTRL__MEM_POWER_SD_EN_MASK 0x00000008L
+#define SEM_MEM_POWER_CTRL__MEM_IDLE_HYSTERESIS_MASK 0x00000070L
+#define SEM_MEM_POWER_CTRL__MEM_POWER_UP_RECOVER_DELAY_MASK 0x00003F00L
+#define SEM_MEM_POWER_CTRL__MEM_POWER_DOWN_LS_ENTER_DELAY_MASK 0x0000C000L
+//SEM_REGISTER_LAST_PART2
+#define SEM_REGISTER_LAST_PART2__RESERVED__SHIFT 0x0
+#define SEM_REGISTER_LAST_PART2__RESERVED_MASK 0xFFFFFFFFL
+//IH_ACTIVE_FCN_ID
+#define IH_ACTIVE_FCN_ID__VF_ID__SHIFT 0x0
+#define IH_ACTIVE_FCN_ID__RESERVED__SHIFT 0x4
+#define IH_ACTIVE_FCN_ID__PF_VF__SHIFT 0x1f
+#define IH_ACTIVE_FCN_ID__VF_ID_MASK 0x0000000FL
+#define IH_ACTIVE_FCN_ID__RESERVED_MASK 0x7FFFFFF0L
+#define IH_ACTIVE_FCN_ID__PF_VF_MASK 0x80000000L
+//IH_VIRT_RESET_REQ
+#define IH_VIRT_RESET_REQ__VF__SHIFT 0x0
+#define IH_VIRT_RESET_REQ__PF__SHIFT 0x1f
+#define IH_VIRT_RESET_REQ__VF_MASK 0x0000FFFFL
+#define IH_VIRT_RESET_REQ__PF_MASK 0x80000000L
+//IH_CLIENT_CFG
+#define IH_CLIENT_CFG__TOTAL_CLIENT_NUM__SHIFT 0x0
+#define IH_CLIENT_CFG__TOTAL_CLIENT_NUM_MASK 0x0000001FL
+//IH_CLIENT_CFG_INDEX
+#define IH_CLIENT_CFG_INDEX__INDEX__SHIFT 0x0
+#define IH_CLIENT_CFG_INDEX__INDEX_MASK 0x0000001FL
+//IH_CLIENT_CFG_DATA
+#define IH_CLIENT_CFG_DATA__CREDIT_RETURN_ADDR__SHIFT 0x0
+#define IH_CLIENT_CFG_DATA__CLIENT_TYPE__SHIFT 0x12
+#define IH_CLIENT_CFG_DATA__RING_ID__SHIFT 0x14
+#define IH_CLIENT_CFG_DATA__VF_RB_SELECT__SHIFT 0x16
+#define IH_CLIENT_CFG_DATA__OVERWRITE_RING_ID_WITH_ACTIVE_FCN_ID__SHIFT 0x18
+#define IH_CLIENT_CFG_DATA__CREDIT_RETURN_ADDR_MASK 0x0003FFFFL
+#define IH_CLIENT_CFG_DATA__CLIENT_TYPE_MASK 0x000C0000L
+#define IH_CLIENT_CFG_DATA__RING_ID_MASK 0x00300000L
+#define IH_CLIENT_CFG_DATA__VF_RB_SELECT_MASK 0x00C00000L
+#define IH_CLIENT_CFG_DATA__OVERWRITE_RING_ID_WITH_ACTIVE_FCN_ID_MASK 0x01000000L
+//IH_CID_REMAP_INDEX
+#define IH_CID_REMAP_INDEX__INDEX__SHIFT 0x0
+#define IH_CID_REMAP_INDEX__INDEX_MASK 0x00000003L
+//IH_CID_REMAP_DATA
+#define IH_CID_REMAP_DATA__CLIENT_ID__SHIFT 0x0
+#define IH_CID_REMAP_DATA__INITIATOR_ID__SHIFT 0x8
+#define IH_CID_REMAP_DATA__CLIENT_ID_REMAP__SHIFT 0x10
+#define IH_CID_REMAP_DATA__CLIENT_ID_MASK 0x000000FFL
+#define IH_CID_REMAP_DATA__INITIATOR_ID_MASK 0x0000FF00L
+#define IH_CID_REMAP_DATA__CLIENT_ID_REMAP_MASK 0x00FF0000L
+//IH_CHICKEN
+#define IH_CHICKEN__ACTIVE_FCN_ID_PROT_ENABLE__SHIFT 0x0
+#define IH_CHICKEN__MC_SPACE_FBPA_ENABLE__SHIFT 0x3
+#define IH_CHICKEN__MC_SPACE_GPA_ENABLE__SHIFT 0x4
+#define IH_CHICKEN__ACTIVE_FCN_ID_PROT_ENABLE_MASK 0x00000001L
+#define IH_CHICKEN__MC_SPACE_FBPA_ENABLE_MASK 0x00000008L
+#define IH_CHICKEN__MC_SPACE_GPA_ENABLE_MASK 0x00000010L
+//IH_MMHUB_CNTL
+#define IH_MMHUB_CNTL__UNITID__SHIFT 0x0
+#define IH_MMHUB_CNTL__IV_TLVL__SHIFT 0x8
+#define IH_MMHUB_CNTL__WPTR_WB_TLVL__SHIFT 0xc
+#define IH_MMHUB_CNTL__UNITID_MASK 0x0000003FL
+#define IH_MMHUB_CNTL__IV_TLVL_MASK 0x00000700L
+#define IH_MMHUB_CNTL__WPTR_WB_TLVL_MASK 0x00007000L
+//IH_INT_DROP_CNTL
+#define IH_INT_DROP_CNTL__INT_DROP_EN__SHIFT 0x0
+#define IH_INT_DROP_CNTL__CLIENT_ID_MATCH_EN__SHIFT 0x1
+#define IH_INT_DROP_CNTL__SOURCE_ID_MATCH_EN__SHIFT 0x2
+#define IH_INT_DROP_CNTL__VF_ID_MATCH_EN__SHIFT 0x3
+#define IH_INT_DROP_CNTL__VF_MATCH_EN__SHIFT 0x4
+#define IH_INT_DROP_CNTL__CONTEXT_ID_MATCH_EN__SHIFT 0x5
+#define IH_INT_DROP_CNTL__INT_DROP_MODE__SHIFT 0x6
+#define IH_INT_DROP_CNTL__UTCL2_RETRY_INT_DROP_EN__SHIFT 0x8
+#define IH_INT_DROP_CNTL__INT_DROPPED__SHIFT 0x10
+#define IH_INT_DROP_CNTL__INT_DROP_EN_MASK 0x00000001L
+#define IH_INT_DROP_CNTL__CLIENT_ID_MATCH_EN_MASK 0x00000002L
+#define IH_INT_DROP_CNTL__SOURCE_ID_MATCH_EN_MASK 0x00000004L
+#define IH_INT_DROP_CNTL__VF_ID_MATCH_EN_MASK 0x00000008L
+#define IH_INT_DROP_CNTL__VF_MATCH_EN_MASK 0x00000010L
+#define IH_INT_DROP_CNTL__CONTEXT_ID_MATCH_EN_MASK 0x00000020L
+#define IH_INT_DROP_CNTL__INT_DROP_MODE_MASK 0x000000C0L
+#define IH_INT_DROP_CNTL__UTCL2_RETRY_INT_DROP_EN_MASK 0x00000100L
+#define IH_INT_DROP_CNTL__INT_DROPPED_MASK 0x00010000L
+//IH_INT_DROP_MATCH_VALUE0
+#define IH_INT_DROP_MATCH_VALUE0__CLIENT_ID_MATCH_VALUE__SHIFT 0x0
+#define IH_INT_DROP_MATCH_VALUE0__SOURCE_ID_MATCH_VALUE__SHIFT 0x8
+#define IH_INT_DROP_MATCH_VALUE0__VF_ID_MATCH_VALUE__SHIFT 0x10
+#define IH_INT_DROP_MATCH_VALUE0__VF_MATCH_VALUE__SHIFT 0x17
+#define IH_INT_DROP_MATCH_VALUE0__CONTEXT_ID_39_32_MATCH_VALUE__SHIFT 0x18
+#define IH_INT_DROP_MATCH_VALUE0__CLIENT_ID_MATCH_VALUE_MASK 0x000000FFL
+#define IH_INT_DROP_MATCH_VALUE0__SOURCE_ID_MATCH_VALUE_MASK 0x0000FF00L
+#define IH_INT_DROP_MATCH_VALUE0__VF_ID_MATCH_VALUE_MASK 0x000F0000L
+#define IH_INT_DROP_MATCH_VALUE0__VF_MATCH_VALUE_MASK 0x00800000L
+#define IH_INT_DROP_MATCH_VALUE0__CONTEXT_ID_39_32_MATCH_VALUE_MASK 0xFF000000L
+//IH_INT_DROP_MATCH_VALUE1
+#define IH_INT_DROP_MATCH_VALUE1__CONTEXT_ID_31_0_MATCH_VALUE__SHIFT 0x0
+#define IH_INT_DROP_MATCH_VALUE1__CONTEXT_ID_31_0_MATCH_VALUE_MASK 0xFFFFFFFFL
+//IH_INT_DROP_MATCH_MASK0
+#define IH_INT_DROP_MATCH_MASK0__CLIENT_ID_MATCH_MASK__SHIFT 0x0
+#define IH_INT_DROP_MATCH_MASK0__SOURCE_ID_MATCH_MASK__SHIFT 0x8
+#define IH_INT_DROP_MATCH_MASK0__VF_ID_MATCH_MASK__SHIFT 0x10
+#define IH_INT_DROP_MATCH_MASK0__VF_MATCH_MASK__SHIFT 0x17
+#define IH_INT_DROP_MATCH_MASK0__CONTEXT_ID_39_32_MATCH_MASK__SHIFT 0x18
+#define IH_INT_DROP_MATCH_MASK0__CLIENT_ID_MATCH_MASK_MASK 0x000000FFL
+#define IH_INT_DROP_MATCH_MASK0__SOURCE_ID_MATCH_MASK_MASK 0x0000FF00L
+#define IH_INT_DROP_MATCH_MASK0__VF_ID_MATCH_MASK_MASK 0x000F0000L
+#define IH_INT_DROP_MATCH_MASK0__VF_MATCH_MASK_MASK 0x00800000L
+#define IH_INT_DROP_MATCH_MASK0__CONTEXT_ID_39_32_MATCH_MASK_MASK 0xFF000000L
+//IH_INT_DROP_MATCH_MASK1
+#define IH_INT_DROP_MATCH_MASK1__CONTEXT_ID_31_0_MATCH_MASK__SHIFT 0x0
+#define IH_INT_DROP_MATCH_MASK1__CONTEXT_ID_31_0_MATCH_MASK_MASK 0xFFFFFFFFL
+//IH_REGISTER_LAST_PART1
+#define IH_REGISTER_LAST_PART1__RESERVED__SHIFT 0x0
+#define IH_REGISTER_LAST_PART1__RESERVED_MASK 0xFFFFFFFFL
+//SEM_ACTIVE_FCN_ID
+#define SEM_ACTIVE_FCN_ID__VFID__SHIFT 0x0
+#define SEM_ACTIVE_FCN_ID__VF__SHIFT 0x1f
+#define SEM_ACTIVE_FCN_ID__VFID_MASK 0x0000000FL
+#define SEM_ACTIVE_FCN_ID__VF_MASK 0x80000000L
+//SEM_VIRT_RESET_REQ
+#define SEM_VIRT_RESET_REQ__VF__SHIFT 0x0
+#define SEM_VIRT_RESET_REQ__PF__SHIFT 0x1f
+#define SEM_VIRT_RESET_REQ__VF_MASK 0x0000FFFFL
+#define SEM_VIRT_RESET_REQ__PF_MASK 0x80000000L
+//SEM_RESP_SDMA0
+#define SEM_RESP_SDMA0__ADDR__SHIFT 0x2
+#define SEM_RESP_SDMA0__ADDR_MASK 0x000FFFFCL
+//SEM_RESP_SDMA1
+#define SEM_RESP_SDMA1__ADDR__SHIFT 0x2
+#define SEM_RESP_SDMA1__ADDR_MASK 0x000FFFFCL
+//SEM_RESP_UVD
+#define SEM_RESP_UVD__ADDR__SHIFT 0x2
+#define SEM_RESP_UVD__ADDR_MASK 0x000FFFFCL
+//SEM_RESP_VCE_0
+#define SEM_RESP_VCE_0__ADDR__SHIFT 0x2
+#define SEM_RESP_VCE_0__ADDR_MASK 0x000FFFFCL
+//SEM_RESP_ACP
+#define SEM_RESP_ACP__ADDR__SHIFT 0x2
+#define SEM_RESP_ACP__ADDR_MASK 0x000FFFFCL
+//SEM_RESP_ISP
+#define SEM_RESP_ISP__ADDR__SHIFT 0x2
+#define SEM_RESP_ISP__ADDR_MASK 0x000FFFFCL
+//SEM_RESP_VCE_1
+#define SEM_RESP_VCE_1__ADDR__SHIFT 0x2
+#define SEM_RESP_VCE_1__ADDR_MASK 0x000FFFFCL
+//SEM_RESP_VP8
+#define SEM_RESP_VP8__ADDR__SHIFT 0x2
+#define SEM_RESP_VP8__ADDR_MASK 0x000FFFFCL
+//SEM_RESP_GC
+#define SEM_RESP_GC__ADDR__SHIFT 0x2
+#define SEM_RESP_GC__ADDR_MASK 0x000FFFFCL
+//SEM_RESP_UVD_1
+#define SEM_RESP_UVD_1__ADDR__SHIFT 0x2
+#define SEM_RESP_UVD_1__ADDR_MASK 0x000FFFFCL
+//SEM_CID_REMAP_INDEX
+#define SEM_CID_REMAP_INDEX__INDEX__SHIFT 0x0
+#define SEM_CID_REMAP_INDEX__INDEX_MASK 0x00000003L
+//SEM_CID_REMAP_DATA
+#define SEM_CID_REMAP_DATA__CLIENT_ID__SHIFT 0x0
+#define SEM_CID_REMAP_DATA__INITIATOR_ID__SHIFT 0x8
+#define SEM_CID_REMAP_DATA__CLIENT_ID_REMAP__SHIFT 0x10
+#define SEM_CID_REMAP_DATA__CLIENT_ID_MASK 0x000000FFL
+#define SEM_CID_REMAP_DATA__INITIATOR_ID_MASK 0x0000FF00L
+#define SEM_CID_REMAP_DATA__CLIENT_ID_REMAP_MASK 0x00FF0000L
+//SEM_ATOMIC_OP_LUT
+#define SEM_ATOMIC_OP_LUT__SIGNAL_NORMAL__SHIFT 0x0
+#define SEM_ATOMIC_OP_LUT__SIGNAL_WRITE1__SHIFT 0x7
+#define SEM_ATOMIC_OP_LUT__WAIT_NORMAL__SHIFT 0xe
+#define SEM_ATOMIC_OP_LUT__WAIT_CHECK0__SHIFT 0x15
+#define SEM_ATOMIC_OP_LUT__SIGNAL_NORMAL_MASK 0x0000007FL
+#define SEM_ATOMIC_OP_LUT__SIGNAL_WRITE1_MASK 0x00003F80L
+#define SEM_ATOMIC_OP_LUT__WAIT_NORMAL_MASK 0x001FC000L
+#define SEM_ATOMIC_OP_LUT__WAIT_CHECK0_MASK 0x0FE00000L
+//SEM_EDC_CONFIG
+#define SEM_EDC_CONFIG__WRITE_DIS__SHIFT 0x0
+#define SEM_EDC_CONFIG__DIS_EDC__SHIFT 0x1
+#define SEM_EDC_CONFIG__WRITE_DIS_MASK 0x00000001L
+#define SEM_EDC_CONFIG__DIS_EDC_MASK 0x00000002L
+//SEM_CHICKEN_BITS2
+#define SEM_CHICKEN_BITS2__ACTIVE_FCN_ID_PROT_ENABLE__SHIFT 0x0
+#define SEM_CHICKEN_BITS2__MM_CLIENT_USE_CONFIG_VFID__SHIFT 0x1
+#define SEM_CHICKEN_BITS2__ACTIVE_FCN_ID_PROT_ENABLE_MASK 0x00000001L
+#define SEM_CHICKEN_BITS2__MM_CLIENT_USE_CONFIG_VFID_MASK 0x00000002L
+//SEM_MMHUB_CNTL
+#define SEM_MMHUB_CNTL__UNIT_ID__SHIFT 0x0
+#define SEM_MMHUB_CNTL__TLVL_VALUE__SHIFT 0x8
+#define SEM_MMHUB_CNTL__UNIT_ID_MASK 0x0000003FL
+#define SEM_MMHUB_CNTL__TLVL_VALUE_MASK 0x00000700L
+//SEM_REGISTER_LAST_PART1
+#define SEM_REGISTER_LAST_PART1__RESERVED__SHIFT 0x0
+#define SEM_REGISTER_LAST_PART1__RESERVED_MASK 0xFFFFFFFFL
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_6_offset.h b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_6_offset.h
new file mode 100644
index 000000000000..55facadea54b
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_6_offset.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _smuio_11_0_6_OFFSET_HEADER
+#define _smuio_11_0_6_OFFSET_HEADER
+
+
+
+// addressBlock: smuio_smuio_SmuSmuioDec
+// base address: 0x5a000
+#define mmCGTT_ROM_CLK_CTRL0 0x00e4
+#define mmCGTT_ROM_CLK_CTRL0_BASE_IDX 0
+#define mmROM_INDEX 0x00e5
+#define mmROM_INDEX_BASE_IDX 0
+#define mmROM_DATA 0x00e6
+#define mmROM_DATA_BASE_IDX 0
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_6_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_6_sh_mask.h
new file mode 100644
index 000000000000..7d6a2fac2839
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_6_sh_mask.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _smuio_11_0_6_SH_MASK_HEADER
+#define _smuio_11_0_6_SH_MASK_HEADER
+
+
+//CGTT_ROM_CLK_CTRL0
+#define CGTT_ROM_CLK_CTRL0__ON_DELAY__SHIFT 0x0
+#define CGTT_ROM_CLK_CTRL0__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1__SHIFT 0x1e
+#define CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0__SHIFT 0x1f
+#define CGTT_ROM_CLK_CTRL0__ON_DELAY_MASK 0x0000000FL
+#define CGTT_ROM_CLK_CTRL0__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK 0x40000000L
+#define CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK 0x80000000L
+//ROM_INDEX
+#define ROM_INDEX__ROM_INDEX__SHIFT 0x0
+#define ROM_INDEX__ROM_INDEX_MASK 0x01FFFFFFL
+//ROM_DATA
+#define ROM_DATA__ROM_DATA__SHIFT 0x0
+#define ROM_DATA__ROM_DATA_MASK 0xFFFFFFFFL
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index f775aac6c1bd..a41875ac5dfb 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -103,6 +103,7 @@ enum pp_clock_type {
enum amd_pp_sensors {
AMDGPU_PP_SENSOR_GFX_SCLK = 0,
+ AMDGPU_PP_SENSOR_CPU_CLK,
AMDGPU_PP_SENSOR_VDDNB,
AMDGPU_PP_SENSOR_VDDGFX,
AMDGPU_PP_SENSOR_UVD_VCLK,
@@ -155,9 +156,11 @@ enum {
enum PP_OD_DPM_TABLE_COMMAND {
PP_OD_EDIT_SCLK_VDDC_TABLE,
PP_OD_EDIT_MCLK_VDDC_TABLE,
+ PP_OD_EDIT_CCLK_VDDC_TABLE,
PP_OD_EDIT_VDDC_CURVE,
PP_OD_RESTORE_DEFAULT_TABLE,
- PP_OD_COMMIT_DPM_TABLE
+ PP_OD_COMMIT_DPM_TABLE,
+ PP_OD_EDIT_VDDGFX_OFFSET
};
struct pp_states_info {
diff --git a/drivers/gpu/drm/amd/include/renoir_ip_offset.h b/drivers/gpu/drm/amd/include/renoir_ip_offset.h
index 07633e22e99a..7dff85c81e5a 100644
--- a/drivers/gpu/drm/amd/include/renoir_ip_offset.h
+++ b/drivers/gpu/drm/amd/include/renoir_ip_offset.h
@@ -33,7 +33,7 @@ struct IP_BASE_INSTANCE
struct IP_BASE
{
struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
-};
+} __maybe_unused;
static const struct IP_BASE ACP_BASE ={ { { { 0x02403800, 0x00480000, 0, 0, 0 } },
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index 7b6ef05a1d35..5fa65f191a37 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -36,6 +36,7 @@
#include <linux/hwmon-sysfs.h>
#include <linux/nospec.h>
#include <linux/pm_runtime.h>
+#include <asm/processor.h>
#include "hwmgr.h"
static const struct cg_flag_name clocks[] = {
@@ -730,11 +731,18 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
*
* - minimum and maximum engine clock labeled OD_SCLK
*
- * - maximum memory clock labeled OD_MCLK
+ * - minimum(not available for Vega20 and Navi1x) and maximum memory
+ * clock labeled OD_MCLK
*
* - three <frequency, voltage> points labeled OD_VDDC_CURVE.
* They can be used to calibrate the sclk voltage curve.
*
+ * - voltage offset(in mV) applied on target voltage calculation.
+ * This is available for Sienna Cichlid, Navy Flounder and Dimgrey
+ * Cavefish. For these ASICs, the target voltage calculation can be
+ * illustrated by "voltage = voltage calculated from v/f curve +
+ * overdrive vddgfx offset"
+ *
* - a list of valid ranges for sclk, mclk, and voltage curve points
* labeled OD_RANGE
*
@@ -755,6 +763,11 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
* 600mV. "vc 2 1000 1000" will update point3 with clock set
* as 1000Mhz and voltage 1000mV.
*
+ * To update the voltage offset applied for gfxclk/voltage calculation,
+ * enter the new value by writing a string that contains "vo offset".
+ * This is supported by Sienna Cichlid, Navy Flounder and Dimgrey Cavefish.
+ * And the offset can be a positive or negative value.
+ *
* - When you have edited all of the states as needed, write "c" (commit)
* to the file to commit your changes
*
@@ -787,6 +800,8 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
if (*buf == 's')
type = PP_OD_EDIT_SCLK_VDDC_TABLE;
+ else if (*buf == 'p')
+ type = PP_OD_EDIT_CCLK_VDDC_TABLE;
else if (*buf == 'm')
type = PP_OD_EDIT_MCLK_VDDC_TABLE;
else if(*buf == 'r')
@@ -795,6 +810,8 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
type = PP_OD_COMMIT_DPM_TABLE;
else if (!strncmp(buf, "vc", 2))
type = PP_OD_EDIT_VDDC_CURVE;
+ else if (!strncmp(buf, "vo", 2))
+ type = PP_OD_EDIT_VDDGFX_OFFSET;
else
return -EINVAL;
@@ -802,12 +819,14 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
tmp_str = buf_cpy;
- if (type == PP_OD_EDIT_VDDC_CURVE)
+ if ((type == PP_OD_EDIT_VDDC_CURVE) ||
+ (type == PP_OD_EDIT_VDDGFX_OFFSET))
tmp_str++;
while (isspace(*++tmp_str));
- while (tmp_str[0]) {
- sub_str = strsep(&tmp_str, delimiter);
+ while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
+ if (strlen(sub_str) == 0)
+ continue;
ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
if (ret)
return -EINVAL;
@@ -898,7 +917,9 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
size = smu_print_clk_levels(&adev->smu, SMU_OD_SCLK, buf);
size += smu_print_clk_levels(&adev->smu, SMU_OD_MCLK, buf+size);
size += smu_print_clk_levels(&adev->smu, SMU_OD_VDDC_CURVE, buf+size);
+ size += smu_print_clk_levels(&adev->smu, SMU_OD_VDDGFX_OFFSET, buf+size);
size += smu_print_clk_levels(&adev->smu, SMU_OD_RANGE, buf+size);
+ size += smu_print_clk_levels(&adev->smu, SMU_OD_CCLK, buf+size);
} else if (adev->powerplay.pp_funcs->print_clock_levels) {
size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size);
@@ -1074,7 +1095,7 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
{
int ret;
- long level;
+ unsigned long level;
char *sub_str = NULL;
char *tmp;
char buf_cpy[AMDGPU_MASK_BUF_MAX + 1];
@@ -1087,11 +1108,10 @@ static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
memcpy(buf_cpy, buf, bytes);
buf_cpy[bytes] = '\0';
tmp = buf_cpy;
- while (tmp[0]) {
- sub_str = strsep(&tmp, delimiter);
+ while ((sub_str = strsep(&tmp, delimiter)) != NULL) {
if (strlen(sub_str)) {
- ret = kstrtol(sub_str, 0, &level);
- if (ret)
+ ret = kstrtoul(sub_str, 0, &level);
+ if (ret || level > 31)
return -EINVAL;
*mask |= 1 << level;
} else
@@ -1346,6 +1366,138 @@ static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
return count;
}
+static ssize_t amdgpu_get_pp_dpm_vclk(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ ssize_t size;
+ int ret;
+
+ if (amdgpu_in_reset(adev))
+ return -EPERM;
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(ddev->dev);
+ return ret;
+ }
+
+ if (is_support_sw_smu(adev))
+ size = smu_print_clk_levels(&adev->smu, SMU_VCLK, buf);
+ else
+ size = snprintf(buf, PAGE_SIZE, "\n");
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
+ return size;
+}
+
+static ssize_t amdgpu_set_pp_dpm_vclk(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ int ret;
+ uint32_t mask = 0;
+
+ if (amdgpu_in_reset(adev))
+ return -EPERM;
+
+ ret = amdgpu_read_mask(buf, count, &mask);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(ddev->dev);
+ return ret;
+ }
+
+ if (is_support_sw_smu(adev))
+ ret = smu_force_clk_levels(&adev->smu, SMU_VCLK, mask);
+ else
+ ret = 0;
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
+ if (ret)
+ return -EINVAL;
+
+ return count;
+}
+
+static ssize_t amdgpu_get_pp_dpm_dclk(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ ssize_t size;
+ int ret;
+
+ if (amdgpu_in_reset(adev))
+ return -EPERM;
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(ddev->dev);
+ return ret;
+ }
+
+ if (is_support_sw_smu(adev))
+ size = smu_print_clk_levels(&adev->smu, SMU_DCLK, buf);
+ else
+ size = snprintf(buf, PAGE_SIZE, "\n");
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
+ return size;
+}
+
+static ssize_t amdgpu_set_pp_dpm_dclk(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ int ret;
+ uint32_t mask = 0;
+
+ if (amdgpu_in_reset(adev))
+ return -EPERM;
+
+ ret = amdgpu_read_mask(buf, count, &mask);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(ddev->dev);
+ return ret;
+ }
+
+ if (is_support_sw_smu(adev))
+ ret = smu_force_clk_levels(&adev->smu, SMU_DCLK, mask);
+ else
+ ret = 0;
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
+ if (ret)
+ return -EINVAL;
+
+ return count;
+}
+
static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -1717,8 +1869,9 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
i++;
memcpy(buf_cpy, buf, count-i);
tmp_str = buf_cpy;
- while (tmp_str[0]) {
- sub_str = strsep(&tmp_str, delimiter);
+ while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
+ if (strlen(sub_str) == 0)
+ continue;
ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
if (ret)
return -EINVAL;
@@ -2025,6 +2178,8 @@ static struct amdgpu_device_attr amdgpu_device_attrs[] = {
AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+ AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+ AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk, ATTR_FLAG_BASIC),
AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie, ATTR_FLAG_BASIC),
AMDGPU_DEVICE_ATTR_RW(pp_sclk_od, ATTR_FLAG_BASIC),
@@ -2067,7 +2222,8 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
} else if (DEVICE_ATTR_IS(pp_od_clk_voltage)) {
*states = ATTR_STATE_UNSUPPORTED;
if ((is_support_sw_smu(adev) && adev->smu.od_enabled) ||
- (!is_support_sw_smu(adev) && hwmgr->od_enabled))
+ (is_support_sw_smu(adev) && adev->smu.is_apu) ||
+ (!is_support_sw_smu(adev) && hwmgr->od_enabled))
*states = ATTR_STATE_SUPPORTED;
} else if (DEVICE_ATTR_IS(mem_busy_percent)) {
if (adev->flags & AMD_IS_APU || asic_type == CHIP_VEGA10)
@@ -2087,6 +2243,12 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
} else if (DEVICE_ATTR_IS(gpu_metrics)) {
if (asic_type < CHIP_VEGA12)
*states = ATTR_STATE_UNSUPPORTED;
+ } else if (DEVICE_ATTR_IS(pp_dpm_vclk)) {
+ if (!(asic_type == CHIP_VANGOGH))
+ *states = ATTR_STATE_UNSUPPORTED;
+ } else if (DEVICE_ATTR_IS(pp_dpm_dclk)) {
+ if (!(asic_type == CHIP_VANGOGH))
+ *states = ATTR_STATE_UNSUPPORTED;
}
if (asic_type == CHIP_ARCTURUS) {
@@ -2897,7 +3059,8 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
char *buf)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
- uint32_t limit = 0;
+ int limit_type = to_sensor_dev_attr(attr)->index;
+ uint32_t limit = limit_type << 24;
ssize_t size;
int r;
@@ -2911,7 +3074,7 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
}
if (is_support_sw_smu(adev)) {
- smu_get_power_limit(&adev->smu, &limit, true);
+ smu_get_power_limit(&adev->smu, &limit, SMU_PPT_LIMIT_MAX);
size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
} else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, true);
@@ -2931,7 +3094,8 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
char *buf)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
- uint32_t limit = 0;
+ int limit_type = to_sensor_dev_attr(attr)->index;
+ uint32_t limit = limit_type << 24;
ssize_t size;
int r;
@@ -2945,7 +3109,7 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
}
if (is_support_sw_smu(adev)) {
- smu_get_power_limit(&adev->smu, &limit, false);
+ smu_get_power_limit(&adev->smu, &limit, SMU_PPT_LIMIT_CURRENT);
size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
} else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, false);
@@ -2960,6 +3124,15 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
return size;
}
+static ssize_t amdgpu_hwmon_show_power_label(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int limit_type = to_sensor_dev_attr(attr)->index;
+
+ return snprintf(buf, PAGE_SIZE, "%s\n",
+ limit_type == SMU_FAST_PPT_LIMIT ? "fastPPT" : "slowPPT");
+}
static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
struct device_attribute *attr,
@@ -2967,6 +3140,7 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
size_t count)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
+ int limit_type = to_sensor_dev_attr(attr)->index;
int err;
u32 value;
@@ -2981,7 +3155,7 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
return err;
value = value / 1000000; /* convert to Watt */
-
+ value |= limit_type << 24;
err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (err < 0) {
@@ -3193,6 +3367,12 @@ static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg,
static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0);
static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 0);
static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0);
+static SENSOR_DEVICE_ATTR(power1_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 0);
+static SENSOR_DEVICE_ATTR(power2_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 1);
+static SENSOR_DEVICE_ATTR(power2_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 1);
+static SENSOR_DEVICE_ATTR(power2_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 1);
+static SENSOR_DEVICE_ATTR(power2_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 1);
+static SENSOR_DEVICE_ATTR(power2_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 1);
static SENSOR_DEVICE_ATTR(freq1_input, S_IRUGO, amdgpu_hwmon_show_sclk, NULL, 0);
static SENSOR_DEVICE_ATTR(freq1_label, S_IRUGO, amdgpu_hwmon_show_sclk_label, NULL, 0);
static SENSOR_DEVICE_ATTR(freq2_input, S_IRUGO, amdgpu_hwmon_show_mclk, NULL, 0);
@@ -3231,6 +3411,12 @@ static struct attribute *hwmon_attributes[] = {
&sensor_dev_attr_power1_cap_max.dev_attr.attr,
&sensor_dev_attr_power1_cap_min.dev_attr.attr,
&sensor_dev_attr_power1_cap.dev_attr.attr,
+ &sensor_dev_attr_power1_label.dev_attr.attr,
+ &sensor_dev_attr_power2_average.dev_attr.attr,
+ &sensor_dev_attr_power2_cap_max.dev_attr.attr,
+ &sensor_dev_attr_power2_cap_min.dev_attr.attr,
+ &sensor_dev_attr_power2_cap.dev_attr.attr,
+ &sensor_dev_attr_power2_label.dev_attr.attr,
&sensor_dev_attr_freq1_input.dev_attr.attr,
&sensor_dev_attr_freq1_label.dev_attr.attr,
&sensor_dev_attr_freq2_input.dev_attr.attr,
@@ -3323,8 +3509,9 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
effective_mode &= ~S_IWUSR;
}
- if (((adev->flags & AMD_IS_APU) ||
- adev->family == AMDGPU_FAMILY_SI) && /* not implemented yet */
+ if (((adev->family == AMDGPU_FAMILY_SI) ||
+ ((adev->flags & AMD_IS_APU) &&
+ (adev->asic_type != CHIP_VANGOGH))) && /* not implemented yet */
(attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr||
attr == &sensor_dev_attr_power1_cap.dev_attr.attr))
@@ -3387,6 +3574,16 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
attr == &sensor_dev_attr_temp3_label.dev_attr.attr))
return 0;
+ /* only Vangogh has fast PPT limit and power labels */
+ if (!(adev->asic_type == CHIP_VANGOGH) &&
+ (attr == &sensor_dev_attr_power2_average.dev_attr.attr ||
+ attr == &sensor_dev_attr_power2_cap_max.dev_attr.attr ||
+ attr == &sensor_dev_attr_power2_cap_min.dev_attr.attr ||
+ attr == &sensor_dev_attr_power2_cap.dev_attr.attr ||
+ attr == &sensor_dev_attr_power2_label.dev_attr.attr ||
+ attr == &sensor_dev_attr_power1_label.dev_attr.attr))
+ return 0;
+
return effective_mode;
}
@@ -3465,6 +3662,27 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
*/
#if defined(CONFIG_DEBUG_FS)
+static void amdgpu_debugfs_prints_cpu_info(struct seq_file *m,
+ struct amdgpu_device *adev) {
+ uint16_t *p_val;
+ uint32_t size;
+ int i;
+
+ if (is_support_cclk_dpm(adev)) {
+ p_val = kcalloc(adev->smu.cpu_core_num, sizeof(uint16_t),
+ GFP_KERNEL);
+
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_CPU_CLK,
+ (void *)p_val, &size)) {
+ for (i = 0; i < adev->smu.cpu_core_num; i++)
+ seq_printf(m, "\t%u MHz (CPU%d)\n",
+ *(p_val + i), i);
+ }
+
+ kfree(p_val);
+ }
+}
+
static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev)
{
uint32_t value;
@@ -3475,6 +3693,9 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a
/* GPU Clocks */
size = sizeof(value);
seq_printf(m, "GFX Clocks and Power:\n");
+
+ amdgpu_debugfs_prints_cpu_info(m, adev);
+
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, (void *)&value, &size))
seq_printf(m, "\t%u MHz (MCLK)\n", value/100);
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, (void *)&value, &size))
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
index 4bdbcce7092d..10b0624ade65 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
@@ -33,6 +33,8 @@
#define SMU_TEMPERATURE_UNITS_PER_CENTIGRADES 1000
#define SMU_FW_NAME_LEN 0x24
+#define SMU_DPM_USER_PROFILE_RESTORE (1 << 0)
+
struct smu_hw_power_state {
unsigned int magic;
};
@@ -159,6 +161,19 @@ enum smu_power_src_type
SMU_POWER_SOURCE_COUNT,
};
+enum smu_ppt_limit_type
+{
+ SMU_DEFAULT_PPT_LIMIT = 0,
+ SMU_FAST_PPT_LIMIT,
+};
+
+enum smu_ppt_limit_level
+{
+ SMU_PPT_LIMIT_MIN = -1,
+ SMU_PPT_LIMIT_CURRENT,
+ SMU_PPT_LIMIT_MAX,
+};
+
enum smu_memory_pool_size
{
SMU_MEMORY_POOL_SIZE_ZERO = 0,
@@ -168,6 +183,17 @@ enum smu_memory_pool_size
SMU_MEMORY_POOL_SIZE_2_GB = 0x80000000,
};
+struct smu_user_dpm_profile {
+ uint32_t fan_mode;
+ uint32_t power_limit;
+ uint32_t fan_speed_percent;
+ uint32_t flags;
+
+ /* user clock state information */
+ uint32_t clk_mask[SMU_CLK_COUNT];
+ uint32_t clk_dependency;
+};
+
#define SMU_TABLE_INIT(tables, table_id, s, a, d) \
do { \
tables[table_id].size = s; \
@@ -459,130 +485,672 @@ struct smu_context
struct work_struct interrupt_work;
unsigned fan_max_rpm;
- unsigned manual_fan_speed_rpm;
+ unsigned manual_fan_speed_percent;
uint32_t gfx_default_hard_min_freq;
uint32_t gfx_default_soft_max_freq;
uint32_t gfx_actual_hard_min_freq;
uint32_t gfx_actual_soft_max_freq;
+
+ /* APU only */
+ uint32_t cpu_default_soft_min_freq;
+ uint32_t cpu_default_soft_max_freq;
+ uint32_t cpu_actual_soft_min_freq;
+ uint32_t cpu_actual_soft_max_freq;
+ uint32_t cpu_core_id_select;
+ uint16_t cpu_core_num;
+
+ struct smu_user_dpm_profile user_dpm_profile;
};
struct i2c_adapter;
+/**
+ * struct pptable_funcs - Callbacks used to interact with the SMU.
+ */
struct pptable_funcs {
+ /**
+ * @run_btc: Calibrate voltage/frequency curve to fit the system's
+ * power delivery and voltage margins. Required for adaptive
+ * voltage frequency scaling (AVFS).
+ */
int (*run_btc)(struct smu_context *smu);
+
+ /**
+ * @get_allowed_feature_mask: Get allowed feature mask.
+ * &feature_mask: Array to store feature mask.
+ * &num: Elements in &feature_mask.
+ */
int (*get_allowed_feature_mask)(struct smu_context *smu, uint32_t *feature_mask, uint32_t num);
+
+ /**
+ * @get_current_power_state: Get the current power state.
+ *
+ * Return: Current power state on success, negative errno on failure.
+ */
enum amd_pm_state_type (*get_current_power_state)(struct smu_context *smu);
+
+ /**
+ * @set_default_dpm_table: Retrieve the default overdrive settings from
+ * the SMU.
+ */
int (*set_default_dpm_table)(struct smu_context *smu);
+
int (*set_power_state)(struct smu_context *smu);
+
+ /**
+ * @populate_umd_state_clk: Populate the UMD power state table with
+ * defaults.
+ */
int (*populate_umd_state_clk)(struct smu_context *smu);
+
+ /**
+ * @print_clk_levels: Print DPM clock levels for a clock domain
+ * to buffer. Star current level.
+ *
+ * Used for sysfs interfaces.
+ */
int (*print_clk_levels)(struct smu_context *smu, enum smu_clk_type clk_type, char *buf);
+
+ /**
+ * @force_clk_levels: Set a range of allowed DPM levels for a clock
+ * domain.
+ * &clk_type: Clock domain.
+ * &mask: Range of allowed DPM levels.
+ */
int (*force_clk_levels)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t mask);
+
+ /**
+ * @od_edit_dpm_table: Edit the custom overdrive DPM table.
+ * &type: Type of edit.
+ * &input: Edit parameters.
+ * &size: Size of &input.
+ */
int (*od_edit_dpm_table)(struct smu_context *smu,
enum PP_OD_DPM_TABLE_COMMAND type,
long *input, uint32_t size);
+
+ /**
+ * @get_clock_by_type_with_latency: Get the speed and latency of a clock
+ * domain.
+ */
int (*get_clock_by_type_with_latency)(struct smu_context *smu,
enum smu_clk_type clk_type,
struct
pp_clock_levels_with_latency
*clocks);
+ /**
+ * @get_clock_by_type_with_voltage: Get the speed and voltage of a clock
+ * domain.
+ */
+ int (*get_clock_by_type_with_voltage)(struct smu_context *smu,
+ enum amd_pp_clock_type type,
+ struct
+ pp_clock_levels_with_voltage
+ *clocks);
+
+ /**
+ * @get_power_profile_mode: Print all power profile modes to
+ * buffer. Star current mode.
+ */
int (*get_power_profile_mode)(struct smu_context *smu, char *buf);
+
+ /**
+ * @set_power_profile_mode: Set a power profile mode. Also used to
+ * create/set custom power profile modes.
+ * &input: Power profile mode parameters.
+ * &size: Size of &input.
+ */
int (*set_power_profile_mode)(struct smu_context *smu, long *input, uint32_t size);
+
+ /**
+ * @dpm_set_vcn_enable: Enable/disable VCN engine dynamic power
+ * management.
+ */
int (*dpm_set_vcn_enable)(struct smu_context *smu, bool enable);
+
+ /**
+ * @dpm_set_jpeg_enable: Enable/disable JPEG engine dynamic power
+ * management.
+ */
int (*dpm_set_jpeg_enable)(struct smu_context *smu, bool enable);
+
+ /**
+ * @read_sensor: Read data from a sensor.
+ * &sensor: Sensor to read data from.
+ * &data: Sensor reading.
+ * &size: Size of &data.
+ */
int (*read_sensor)(struct smu_context *smu, enum amd_pp_sensors sensor,
void *data, uint32_t *size);
+
+ /**
+ * @pre_display_config_changed: Prepare GPU for a display configuration
+ * change.
+ *
+ * Disable display tracking and pin memory clock speed to maximum. Used
+ * in display component synchronization.
+ */
int (*pre_display_config_changed)(struct smu_context *smu);
+
+ /**
+ * @display_config_changed: Notify the SMU of the current display
+ * configuration.
+ *
+ * Allows SMU to properly track blanking periods for memory clock
+ * adjustment. Used in display component synchronization.
+ */
int (*display_config_changed)(struct smu_context *smu);
+
int (*apply_clocks_adjust_rules)(struct smu_context *smu);
+
+ /**
+ * @notify_smc_display_config: Applies display requirements to the
+ * current power state.
+ *
+ * Optimize deep sleep DCEFclk and mclk for the current display
+ * configuration. Used in display component synchronization.
+ */
int (*notify_smc_display_config)(struct smu_context *smu);
+
+ /**
+ * @is_dpm_running: Check if DPM is running.
+ *
+ * Return: True if DPM is running, false otherwise.
+ */
bool (*is_dpm_running)(struct smu_context *smu);
- int (*get_fan_speed_rpm)(struct smu_context *smu, uint32_t *speed);
+
+ /**
+ * @get_fan_speed_percent: Get the current fan speed in percent.
+ */
+ int (*get_fan_speed_percent)(struct smu_context *smu, uint32_t *speed);
+
+ /**
+ * @set_watermarks_table: Configure and upload the watermarks tables to
+ * the SMU.
+ */
int (*set_watermarks_table)(struct smu_context *smu,
struct pp_smu_wm_range_sets *clock_ranges);
+
+ /**
+ * @get_thermal_temperature_range: Get safe thermal limits in Celcius.
+ */
int (*get_thermal_temperature_range)(struct smu_context *smu, struct smu_temperature_range *range);
+
+ /**
+ * @get_uclk_dpm_states: Get memory clock DPM levels in kHz.
+ * &clocks_in_khz: Array of DPM levels.
+ * &num_states: Elements in &clocks_in_khz.
+ */
int (*get_uclk_dpm_states)(struct smu_context *smu, uint32_t *clocks_in_khz, uint32_t *num_states);
+
+ /**
+ * @set_default_od_settings: Set the overdrive tables to defaults.
+ */
int (*set_default_od_settings)(struct smu_context *smu);
+
+ /**
+ * @set_performance_level: Set a performance level.
+ */
int (*set_performance_level)(struct smu_context *smu, enum amd_dpm_forced_level level);
+
+ /**
+ * @display_disable_memory_clock_switch: Enable/disable dynamic memory
+ * clock switching.
+ *
+ * Disabling this feature forces memory clock speed to maximum.
+ * Enabling sets the minimum memory clock capable of driving the
+ * current display configuration.
+ */
int (*display_disable_memory_clock_switch)(struct smu_context *smu, bool disable_memory_clock_switch);
+
+ /**
+ * @dump_pptable: Print the power play table to the system log.
+ */
void (*dump_pptable)(struct smu_context *smu);
+
+ /**
+ * @get_power_limit: Get the device's power limits.
+ */
int (*get_power_limit)(struct smu_context *smu);
+
+ /**
+ * @get_ppt_limit: Get the device's ppt limits.
+ */
+ int (*get_ppt_limit)(struct smu_context *smu, uint32_t *ppt_limit,
+ enum smu_ppt_limit_type limit_type, enum smu_ppt_limit_level limit_level);
+
+ /**
+ * @set_df_cstate: Set data fabric cstate.
+ */
int (*set_df_cstate)(struct smu_context *smu, enum pp_df_cstate state);
+
+ /**
+ * @allow_xgmi_power_down: Enable/disable external global memory
+ * interconnect power down.
+ */
int (*allow_xgmi_power_down)(struct smu_context *smu, bool en);
+
+ /**
+ * @update_pcie_parameters: Update and upload the system's PCIe
+ * capabilites to the SMU.
+ * &pcie_gen_cap: Maximum allowed PCIe generation.
+ * &pcie_width_cap: Maximum allowed PCIe width.
+ */
int (*update_pcie_parameters)(struct smu_context *smu, uint32_t pcie_gen_cap, uint32_t pcie_width_cap);
+
+ /**
+ * @i2c_init: Initialize i2c.
+ *
+ * The i2c bus is used internally by the SMU voltage regulators and
+ * other devices. The i2c's EEPROM also stores bad page tables on boards
+ * with ECC.
+ */
int (*i2c_init)(struct smu_context *smu, struct i2c_adapter *control);
+
+ /**
+ * @i2c_fini: Tear down i2c.
+ */
void (*i2c_fini)(struct smu_context *smu, struct i2c_adapter *control);
+
+ /**
+ * @get_unique_id: Get the GPU's unique id. Used for asset tracking.
+ */
void (*get_unique_id)(struct smu_context *smu);
+
+ /**
+ * @get_dpm_clock_table: Get a copy of the DPM clock table.
+ *
+ * Used by display component in bandwidth and watermark calculations.
+ */
int (*get_dpm_clock_table)(struct smu_context *smu, struct dpm_clocks *clock_table);
+
+ /**
+ * @init_microcode: Request the SMU's firmware from the kernel.
+ */
int (*init_microcode)(struct smu_context *smu);
+
+ /**
+ * @load_microcode: Load firmware onto the SMU.
+ */
int (*load_microcode)(struct smu_context *smu);
+
+ /**
+ * @fini_microcode: Release the SMU's firmware.
+ */
void (*fini_microcode)(struct smu_context *smu);
+
+ /**
+ * @init_smc_tables: Initialize the SMU tables.
+ */
int (*init_smc_tables)(struct smu_context *smu);
+
+ /**
+ * @fini_smc_tables: Release the SMU tables.
+ */
int (*fini_smc_tables)(struct smu_context *smu);
+
+ /**
+ * @init_power: Initialize the power gate table context.
+ */
int (*init_power)(struct smu_context *smu);
+
+ /**
+ * @fini_power: Release the power gate table context.
+ */
int (*fini_power)(struct smu_context *smu);
+
+ /**
+ * @check_fw_status: Check the SMU's firmware status.
+ *
+ * Return: Zero if check passes, negative errno on failure.
+ */
int (*check_fw_status)(struct smu_context *smu);
+
+ /**
+ * @setup_pptable: Initialize the power play table and populate it with
+ * default values.
+ */
int (*setup_pptable)(struct smu_context *smu);
+
+ /**
+ * @get_vbios_bootup_values: Get default boot values from the VBIOS.
+ */
int (*get_vbios_bootup_values)(struct smu_context *smu);
+
+ /**
+ * @check_fw_version: Print driver and SMU interface versions to the
+ * system log.
+ *
+ * Interface mismatch is not a critical failure.
+ */
int (*check_fw_version)(struct smu_context *smu);
+
+ /**
+ * @powergate_sdma: Power up/down system direct memory access.
+ */
int (*powergate_sdma)(struct smu_context *smu, bool gate);
+
+ /**
+ * @set_gfx_cgpg: Enable/disable graphics engine course grain power
+ * gating.
+ */
int (*set_gfx_cgpg)(struct smu_context *smu, bool enable);
+
+ /**
+ * @write_pptable: Write the power play table to the SMU.
+ */
int (*write_pptable)(struct smu_context *smu);
+
+ /**
+ * @set_driver_table_location: Send the location of the driver table to
+ * the SMU.
+ */
int (*set_driver_table_location)(struct smu_context *smu);
+
+ /**
+ * @set_tool_table_location: Send the location of the tool table to the
+ * SMU.
+ */
int (*set_tool_table_location)(struct smu_context *smu);
+
+ /**
+ * @notify_memory_pool_location: Send the location of the memory pool to
+ * the SMU.
+ */
int (*notify_memory_pool_location)(struct smu_context *smu);
+
+ /**
+ * @system_features_control: Enable/disable all SMU features.
+ */
int (*system_features_control)(struct smu_context *smu, bool en);
+
+ /**
+ * @send_smc_msg_with_param: Send a message with a parameter to the SMU.
+ * &msg: Type of message.
+ * &param: Message parameter.
+ * &read_arg: SMU response (optional).
+ */
int (*send_smc_msg_with_param)(struct smu_context *smu,
enum smu_message_type msg, uint32_t param, uint32_t *read_arg);
+
+ /**
+ * @send_smc_msg: Send a message to the SMU.
+ * &msg: Type of message.
+ * &read_arg: SMU response (optional).
+ */
int (*send_smc_msg)(struct smu_context *smu,
enum smu_message_type msg,
uint32_t *read_arg);
+
+ /**
+ * @init_display_count: Notify the SMU of the number of display
+ * components in current display configuration.
+ */
int (*init_display_count)(struct smu_context *smu, uint32_t count);
+
+ /**
+ * @set_allowed_mask: Notify the SMU of the features currently allowed
+ * by the driver.
+ */
int (*set_allowed_mask)(struct smu_context *smu);
+
+ /**
+ * @get_enabled_mask: Get a mask of features that are currently enabled
+ * on the SMU.
+ * &feature_mask: Array representing enabled feature mask.
+ * &num: Elements in &feature_mask.
+ */
int (*get_enabled_mask)(struct smu_context *smu, uint32_t *feature_mask, uint32_t num);
+
+ /**
+ * @feature_is_enabled: Test if a feature is enabled.
+ *
+ * Return: One if enabled, zero if disabled.
+ */
int (*feature_is_enabled)(struct smu_context *smu, enum smu_feature_mask mask);
+
+ /**
+ * @disable_all_features_with_exception: Disable all features with
+ * exception to those in &mask.
+ */
int (*disable_all_features_with_exception)(struct smu_context *smu, enum smu_feature_mask mask);
+
+ /**
+ * @notify_display_change: Enable fast memory clock switching.
+ *
+ * Allows for fine grained memory clock switching but has more stringent
+ * timing requirements.
+ */
int (*notify_display_change)(struct smu_context *smu);
+
+ /**
+ * @set_power_limit: Set power limit in watts.
+ */
int (*set_power_limit)(struct smu_context *smu, uint32_t n);
+
+ /**
+ * @init_max_sustainable_clocks: Populate max sustainable clock speed
+ * table with values from the SMU.
+ */
int (*init_max_sustainable_clocks)(struct smu_context *smu);
+
+ /**
+ * @enable_thermal_alert: Enable thermal alert interrupts.
+ */
int (*enable_thermal_alert)(struct smu_context *smu);
+
+ /**
+ * @disable_thermal_alert: Disable thermal alert interrupts.
+ */
int (*disable_thermal_alert)(struct smu_context *smu);
+
+ /**
+ * @set_min_dcef_deep_sleep: Set a minimum display fabric deep sleep
+ * clock speed in MHz.
+ */
int (*set_min_dcef_deep_sleep)(struct smu_context *smu, uint32_t clk);
+
+ /**
+ * @display_clock_voltage_request: Set a hard minimum frequency
+ * for a clock domain.
+ */
int (*display_clock_voltage_request)(struct smu_context *smu, struct
pp_display_clock_request
*clock_req);
+
+ /**
+ * @get_fan_control_mode: Get the current fan control mode.
+ */
uint32_t (*get_fan_control_mode)(struct smu_context *smu);
+
+ /**
+ * @set_fan_control_mode: Set the fan control mode.
+ */
int (*set_fan_control_mode)(struct smu_context *smu, uint32_t mode);
- int (*set_fan_speed_rpm)(struct smu_context *smu, uint32_t speed);
+
+ /**
+ * @set_fan_speed_percent: Set a static fan speed in percent.
+ */
+ int (*set_fan_speed_percent)(struct smu_context *smu, uint32_t speed);
+
+ /**
+ * @set_xgmi_pstate: Set inter-chip global memory interconnect pstate.
+ * &pstate: Pstate to set. D0 if Nonzero, D3 otherwise.
+ */
int (*set_xgmi_pstate)(struct smu_context *smu, uint32_t pstate);
+
+ /**
+ * @gfx_off_control: Enable/disable graphics engine poweroff.
+ */
int (*gfx_off_control)(struct smu_context *smu, bool enable);
+
+
+ /**
+ * @get_gfx_off_status: Get graphics engine poweroff status.
+ *
+ * Return:
+ * 0 - GFXOFF(default).
+ * 1 - Transition out of GFX State.
+ * 2 - Not in GFXOFF.
+ * 3 - Transition into GFXOFF.
+ */
uint32_t (*get_gfx_off_status)(struct smu_context *smu);
+
+ /**
+ * @register_irq_handler: Register interupt request handlers.
+ */
int (*register_irq_handler)(struct smu_context *smu);
+
+ /**
+ * @set_azalia_d3_pme: Wake the audio decode engine from d3 sleep.
+ */
int (*set_azalia_d3_pme)(struct smu_context *smu);
+
+ /**
+ * @get_max_sustainable_clocks_by_dc: Get a copy of the max sustainable
+ * clock speeds table.
+ *
+ * Provides a way for the display component (DC) to get the max
+ * sustainable clocks from the SMU.
+ */
int (*get_max_sustainable_clocks_by_dc)(struct smu_context *smu, struct pp_smu_nv_clock_table *max_clocks);
+
+ /**
+ * @baco_is_support: Check if GPU supports BACO (Bus Active, Chip Off).
+ */
bool (*baco_is_support)(struct smu_context *smu);
+
+ /**
+ * @baco_get_state: Get the current BACO state.
+ *
+ * Return: Current BACO state.
+ */
enum smu_baco_state (*baco_get_state)(struct smu_context *smu);
+
+ /**
+ * @baco_set_state: Enter/exit BACO.
+ */
int (*baco_set_state)(struct smu_context *smu, enum smu_baco_state state);
+
+ /**
+ * @baco_enter: Enter BACO.
+ */
int (*baco_enter)(struct smu_context *smu);
+
+ /**
+ * @baco_exit: Exit Baco.
+ */
int (*baco_exit)(struct smu_context *smu);
+
+ /**
+ * @mode1_reset_is_support: Check if GPU supports mode1 reset.
+ */
bool (*mode1_reset_is_support)(struct smu_context *smu);
+
+ /**
+ * @mode1_reset: Perform mode1 reset.
+ *
+ * Complete GPU reset.
+ */
int (*mode1_reset)(struct smu_context *smu);
+
+ /**
+ * @mode2_reset: Perform mode2 reset.
+ *
+ * Mode2 reset generally does not reset as many IPs as mode1 reset. The
+ * IPs reset varies by asic.
+ */
int (*mode2_reset)(struct smu_context *smu);
+
+ /**
+ * @get_dpm_ultimate_freq: Get the hard frequency range of a clock
+ * domain in MHz.
+ */
int (*get_dpm_ultimate_freq)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *min, uint32_t *max);
+
+ /**
+ * @set_soft_freq_limited_range: Set the soft frequency range of a clock
+ * domain in MHz.
+ */
int (*set_soft_freq_limited_range)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, uint32_t max);
+
+ /**
+ * @set_power_source: Notify the SMU of the current power source.
+ */
int (*set_power_source)(struct smu_context *smu, enum smu_power_src_type power_src);
+
+ /**
+ * @log_thermal_throttling_event: Print a thermal throttling warning to
+ * the system's log.
+ */
void (*log_thermal_throttling_event)(struct smu_context *smu);
+
+ /**
+ * @get_pp_feature_mask: Print a human readable table of enabled
+ * features to buffer.
+ */
size_t (*get_pp_feature_mask)(struct smu_context *smu, char *buf);
+
+ /**
+ * @set_pp_feature_mask: Request the SMU enable/disable features to
+ * match those enabled in &new_mask.
+ */
int (*set_pp_feature_mask)(struct smu_context *smu, uint64_t new_mask);
+
+ /**
+ * @get_gpu_metrics: Get a copy of the GPU metrics table from the SMU.
+ *
+ * Return: Size of &table
+ */
ssize_t (*get_gpu_metrics)(struct smu_context *smu, void **table);
+
+ /**
+ * @enable_mgpu_fan_boost: Enable multi-GPU fan boost.
+ */
int (*enable_mgpu_fan_boost)(struct smu_context *smu);
+
+ /**
+ * @gfx_ulv_control: Enable/disable ultra low voltage.
+ */
int (*gfx_ulv_control)(struct smu_context *smu, bool enablement);
+
+ /**
+ * @deep_sleep_control: Enable/disable deep sleep.
+ */
int (*deep_sleep_control)(struct smu_context *smu, bool enablement);
+
+ /**
+ * @get_fan_parameters: Get fan parameters.
+ *
+ * Get maximum fan speed from the power play table.
+ */
int (*get_fan_parameters)(struct smu_context *smu);
+
+ /**
+ * @post_init: Helper function for asic specific workarounds.
+ */
int (*post_init)(struct smu_context *smu);
+
+ /**
+ * @interrupt_work: Work task scheduled from SMU interrupt handler.
+ */
void (*interrupt_work)(struct smu_context *smu);
+
+ /**
+ * @gpo_control: Enable/disable graphics power optimization if supported.
+ */
int (*gpo_control)(struct smu_context *smu, bool enablement);
+
+ /**
+ * @gfx_state_change_set: Send the current graphics state to the SMU.
+ */
int (*gfx_state_change_set)(struct smu_context *smu, uint32_t state);
+
+ /**
+ * @set_fine_grain_gfx_freq_parameters: Set fine grain graphics clock
+ * parameters to defaults.
+ */
int (*set_fine_grain_gfx_freq_parameters)(struct smu_context *smu);
};
@@ -596,6 +1164,7 @@ typedef enum {
METRICS_CURR_DCLK1,
METRICS_CURR_FCLK,
METRICS_CURR_DCEFCLK,
+ METRICS_AVERAGE_CPUCLK,
METRICS_AVERAGE_GFXCLK,
METRICS_AVERAGE_SOCCLK,
METRICS_AVERAGE_FCLK,
@@ -636,6 +1205,12 @@ enum smu_cmn2asic_mapping_type {
#define FEA_MAP(fea) \
[SMU_FEATURE_##fea##_BIT] = {1, FEATURE_##fea##_BIT}
+#define FEA_MAP_REVERSE(fea) \
+ [SMU_FEATURE_DPM_##fea##_BIT] = {1, FEATURE_##fea##_DPM_BIT}
+
+#define FEA_MAP_HALF_REVERSE(fea) \
+ [SMU_FEATURE_DPM_##fea##CLK_BIT] = {1, FEATURE_##fea##_DPM_BIT}
+
#define TAB_MAP(tab) \
[SMU_TABLE_##tab] = {1, TABLE_##tab}
@@ -662,7 +1237,7 @@ int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed);
int smu_get_power_limit(struct smu_context *smu,
uint32_t *limit,
- bool max_setting);
+ enum smu_ppt_limit_level limit_level);
int smu_set_power_limit(struct smu_context *smu, uint32_t limit);
int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf);
@@ -718,6 +1293,7 @@ extern const struct amdgpu_ip_block_version smu_v11_0_ip_block;
extern const struct amdgpu_ip_block_version smu_v12_0_ip_block;
bool is_support_sw_smu(struct amdgpu_device *adev);
+bool is_support_cclk_dpm(struct amdgpu_device *adev);
int smu_reset(struct smu_context *smu);
int smu_sys_get_pp_table(struct smu_context *smu, void **table);
int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size);
diff --git a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_vangogh.h b/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_vangogh.h
index 1c19eae93ff1..6e23a3f803a7 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_vangogh.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_vangogh.h
@@ -141,7 +141,6 @@ typedef struct {
uint32_t MaxGfxClk;
uint8_t NumDfPstatesEnabled;
- uint8_t NumDpmLevelsEnabled;
uint8_t NumDcfclkLevelsEnabled;
uint8_t NumDispClkLevelsEnabled; //applies to both dispclk and dppclk
uint8_t NumSocClkLevelsEnabled;
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_types.h b/drivers/gpu/drm/amd/pm/inc/smu_types.h
index 720d15612fe1..aa4822202587 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_types.h
@@ -133,8 +133,6 @@
__SMU_DUMMY_MAP(PowerUpSdma), \
__SMU_DUMMY_MAP(SetHardMinIspclkByFreq), \
__SMU_DUMMY_MAP(SetHardMinVcn), \
- __SMU_DUMMY_MAP(Spare1), \
- __SMU_DUMMY_MAP(Spare2), \
__SMU_DUMMY_MAP(SetAllowFclkSwitch), \
__SMU_DUMMY_MAP(SetMinVideoGfxclkFreq), \
__SMU_DUMMY_MAP(ActiveProcessNotify), \
@@ -211,6 +209,11 @@
__SMU_DUMMY_MAP(SetGpoFeaturePMask), \
__SMU_DUMMY_MAP(DisallowGpo), \
__SMU_DUMMY_MAP(Enable2ndUSB20Port), \
+ __SMU_DUMMY_MAP(RequestActiveWgp), \
+ __SMU_DUMMY_MAP(SetFastPPTLimit), \
+ __SMU_DUMMY_MAP(SetSlowPPTLimit), \
+ __SMU_DUMMY_MAP(GetFastPPTLimit), \
+ __SMU_DUMMY_MAP(GetSlowPPTLimit), \
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) SMU_MSG_##type
@@ -236,10 +239,12 @@ enum smu_clk_type {
SMU_SCLK,
SMU_MCLK,
SMU_PCIE,
+ SMU_OD_CCLK,
SMU_OD_SCLK,
SMU_OD_MCLK,
SMU_OD_VDDC_CURVE,
SMU_OD_RANGE,
+ SMU_OD_VDDGFX_OFFSET,
SMU_CLK_COUNT,
};
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
index 13de692a4213..d4cddd2390a2 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
@@ -31,7 +31,7 @@
#define SMU11_DRIVER_IF_VERSION_NV12 0x36
#define SMU11_DRIVER_IF_VERSION_NV14 0x36
#define SMU11_DRIVER_IF_VERSION_Sienna_Cichlid 0x3D
-#define SMU11_DRIVER_IF_VERSION_Navy_Flounder 0xC
+#define SMU11_DRIVER_IF_VERSION_Navy_Flounder 0xE
#define SMU11_DRIVER_IF_VERSION_VANGOGH 0x02
#define SMU11_DRIVER_IF_VERSION_Dimgrey_Cavefish 0xF
@@ -129,6 +129,15 @@ struct smu_11_0_power_context {
enum smu_11_0_power_state power_state;
};
+struct smu_11_5_power_context {
+ uint32_t power_source;
+ uint8_t in_power_limit_boost_mode;
+ enum smu_11_0_power_state power_state;
+
+ uint32_t current_fast_ppt_limit;
+ uint32_t max_fast_ppt_limit;
+};
+
enum smu_v11_0_baco_seq {
BACO_SEQ_BACO = 0,
BACO_SEQ_MSR,
@@ -203,11 +212,8 @@ int
smu_v11_0_set_fan_control_mode(struct smu_context *smu,
uint32_t mode);
-int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
- uint32_t speed);
-
-int smu_v11_0_get_fan_speed_rpm(struct smu_context *smu,
- uint32_t *speed);
+int smu_v11_0_set_fan_speed_percent(struct smu_context *smu,
+ uint32_t speed);
int smu_v11_0_set_xgmi_pstate(struct smu_context *smu,
uint32_t pstate);
@@ -275,10 +281,6 @@ int smu_v11_0_get_current_pcie_link_speed_level(struct smu_context *smu);
int smu_v11_0_get_current_pcie_link_speed(struct smu_context *smu);
-void smu_v11_0_init_gpu_metrics_v1_0(struct gpu_metrics_v1_0 *gpu_metrics);
-
-void smu_v11_0_init_gpu_metrics_v2_0(struct gpu_metrics_v2_0 *gpu_metrics);
-
int smu_v11_0_gfx_ulv_control(struct smu_context *smu,
bool enablement);
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v11_5_ppsmc.h b/drivers/gpu/drm/amd/pm/inc/smu_v11_5_ppsmc.h
index 55d7892e4e0e..fe130a497d6c 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v11_5_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_v11_5_ppsmc.h
@@ -104,7 +104,11 @@
#define PPSMC_MSG_DramLogSetDramBufferSize 0x46
#define PPSMC_MSG_RequestActiveWgp 0x47
#define PPSMC_MSG_QueryActiveWgp 0x48
-#define PPSMC_Message_Count 0x49
+#define PPSMC_MSG_SetFastPPTLimit 0x49
+#define PPSMC_MSG_SetSlowPPTLimit 0x4A
+#define PPSMC_MSG_GetFastPPTLimit 0x4B
+#define PPSMC_MSG_GetSlowPPTLimit 0x4C
+#define PPSMC_Message_Count 0x4D
//Argument for PPSMC_MSG_GfxDeviceDriverReset
enum {
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v12_0.h b/drivers/gpu/drm/amd/pm/inc/smu_v12_0.h
index fa2e8cb07967..02de3b6199e5 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v12_0.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_v12_0.h
@@ -60,7 +60,5 @@ int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_
int smu_v12_0_set_driver_table_location(struct smu_context *smu);
-void smu_v12_0_init_gpu_metrics_v2_0(struct gpu_metrics_v2_0 *gpu_metrics);
-
#endif
#endif
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hwmgr.c
index 6a7de8b898fa..f2cef0930aa9 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hwmgr.c
@@ -33,6 +33,7 @@
#include "ppsmc.h"
#include "amd_acpi.h"
#include "pp_psm.h"
+#include "vega10_hwmgr.h"
extern const struct pp_smumgr_func ci_smu_funcs;
extern const struct pp_smumgr_func smu8_smu_funcs;
@@ -46,7 +47,6 @@ extern const struct pp_smumgr_func vega12_smu_funcs;
extern const struct pp_smumgr_func smu10_smu_funcs;
extern const struct pp_smumgr_func vega20_smu_funcs;
-extern int vega10_hwmgr_init(struct pp_hwmgr *hwmgr);
extern int smu10_init_function_pointers(struct pp_hwmgr *hwmgr);
static int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
index 83a6504e093c..b1038d30c8dc 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
@@ -279,7 +279,7 @@ static const ATOM_VOLTAGE_OBJECT_V3 *atomctrl_lookup_voltage_type_v3(
*
* @hwmgr: input parameter: pointer to HwMgr
* @clock_value: input parameter: memory clock
- * @dividers: output parameter: memory PLL dividers
+ * @mpll_param: output parameter: memory clock parameters
* @strobe_mode: input parameter: 1 for strobe mode, 0 for performance mode
*/
int atomctrl_get_memory_pll_dividers_si(
@@ -332,7 +332,7 @@ int atomctrl_get_memory_pll_dividers_si(
*
* @hwmgr: input parameter: pointer to HwMgr
* @clock_value: input parameter: memory clock
- * @dividers: output parameter: memory PLL dividers
+ * @mpll_param: output parameter: memory clock parameters
*/
int atomctrl_get_memory_pll_dividers_vi(struct pp_hwmgr *hwmgr,
uint32_t clock_value, pp_atomctrl_memory_clock_param *mpll_param)
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c
index 741e03ad5311..f2a55c1413f5 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c
@@ -1362,6 +1362,7 @@ static int ppt_get_vce_state_table_entry_v1_0(struct pp_hwmgr *hwmgr, uint32_t i
* @hwmgr: Pointer to the hardware manager.
* @entry_index: The index of the entry to be extracted from the table.
* @power_state: The address of the PowerState instance being created.
+ * @call_back_func: The function to call into to fill power state
* Return: -1 if the entry cannot be retrieved.
*/
int get_powerplay_table_entry_v1_0(struct pp_hwmgr *hwmgr,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
index 88322781e447..ed05a30d1139 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
@@ -1487,7 +1487,7 @@ static int smu10_set_fine_grain_clk_vol(struct pp_hwmgr *hwmgr,
}
if (!smu10_data->fine_grain_enabled) {
- pr_err("Fine grain not started\n");
+ pr_err("pp_od_clk_voltage is not accessible if power_dpm_force_perfomance_level is not in manual mode!\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
index 82676c086ce4..c57dc9ae81f2 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
@@ -235,7 +235,7 @@ static int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
/**
* smu7_enable_smc_voltage_controller - Enable voltage control
*
- * @hwmgr the address of the powerplay hardware manager.
+ * @hwmgr: the address of the powerplay hardware manager.
* Return: always PP_Result_OK
*/
static int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
@@ -4501,7 +4501,7 @@ static int smu7_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
* smu7_set_max_fan_rpm_output - Set maximum target operating fan output RPM
*
* @hwmgr: the address of the powerplay hardware manager.
- * @usMaxFanRpm: max operating fan RPM value.
+ * @us_max_fan_rpm: max operating fan RPM value.
* Return: The response that came from the SMC.
*/
static int smu7_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_rpm)
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
index 1b47f94e0331..29c99642d22d 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
@@ -542,11 +542,11 @@ static int vega10_get_socclk_for_voltage_evv(struct pp_hwmgr *hwmgr,
#define ATOM_VIRTUAL_VOLTAGE_ID0 0xff01
/**
-* Get Leakage VDDC based on leakage ID.
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always 0.
-*/
+ * Get Leakage VDDC based on leakage ID.
+ *
+ * @hwmgr: the address of the powerplay hardware manager.
+ * return: always 0.
+ */
static int vega10_get_evv_voltages(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = hwmgr->backend;
@@ -600,9 +600,9 @@ static int vega10_get_evv_voltages(struct pp_hwmgr *hwmgr)
/**
* Change virtual leakage voltage to actual value.
*
- * @param hwmgr the address of the powerplay hardware manager.
- * @param pointer to changing voltage
- * @param pointer to leakage table
+ * @hwmgr: the address of the powerplay hardware manager.
+ * @voltage: pointer to changing voltage
+ * @leakage_table: pointer to leakage table
*/
static void vega10_patch_with_vdd_leakage(struct pp_hwmgr *hwmgr,
uint16_t *voltage, struct vega10_leakage_voltage *leakage_table)
@@ -624,13 +624,13 @@ static void vega10_patch_with_vdd_leakage(struct pp_hwmgr *hwmgr,
}
/**
-* Patch voltage lookup table by EVV leakages.
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pointer to voltage lookup table
-* @param pointer to leakage table
-* @return always 0
-*/
+ * Patch voltage lookup table by EVV leakages.
+ *
+ * @hwmgr: the address of the powerplay hardware manager.
+ * @lookup_table: pointer to voltage lookup table
+ * @leakage_table: pointer to leakage table
+ * return: always 0
+ */
static int vega10_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
phm_ppt_v1_voltage_lookup_table *lookup_table,
struct vega10_leakage_voltage *leakage_table)
@@ -1001,13 +1001,12 @@ static int vega10_setup_asic_task(struct pp_hwmgr *hwmgr)
}
/**
-* Remove repeated voltage values and create table with unique values.
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @param vol_table the pointer to changing voltage table
-* @return 0 in success
-*/
-
+ * Remove repeated voltage values and create table with unique values.
+ *
+ * @hwmgr: the address of the powerplay hardware manager.
+ * @vol_table: the pointer to changing voltage table
+ * return: 0 in success
+ */
static int vega10_trim_voltage_table(struct pp_hwmgr *hwmgr,
struct pp_atomfwctrl_voltage_table *vol_table)
{
@@ -1151,11 +1150,11 @@ static void vega10_trim_voltage_table_to_fit_state_table(
}
/**
-* Create Voltage Tables.
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always 0
-*/
+ * Create Voltage Tables.
+ *
+ * @hwmgr: the address of the powerplay hardware manager.
+ * return: always 0
+ */
static int vega10_construct_voltage_tables(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = hwmgr->backend;
@@ -1212,11 +1211,11 @@ static int vega10_construct_voltage_tables(struct pp_hwmgr *hwmgr)
}
/*
- * @fn vega10_init_dpm_state
- * @brief Function to initialize all Soft Min/Max and Hard Min/Max to 0xff.
+ * vega10_init_dpm_state
+ * Function to initialize all Soft Min/Max and Hard Min/Max to 0xff.
*
- * @param dpm_state - the address of the DPM Table to initiailize.
- * @return None.
+ * @dpm_state: - the address of the DPM Table to initiailize.
+ * return: None.
*/
static void vega10_init_dpm_state(struct vega10_dpm_state *dpm_state)
{
@@ -1460,11 +1459,11 @@ static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
}
/*
- * @fn vega10_populate_ulv_state
- * @brief Function to provide parameters for Utral Low Voltage state to SMC.
+ * vega10_populate_ulv_state
+ * Function to provide parameters for Utral Low Voltage state to SMC.
*
- * @param hwmgr - the address of the hardware manager.
- * @return Always 0.
+ * @hwmgr: - the address of the hardware manager.
+ * return: Always 0.
*/
static int vega10_populate_ulv_state(struct pp_hwmgr *hwmgr)
{
@@ -1545,13 +1544,13 @@ static int vega10_populate_smc_link_levels(struct pp_hwmgr *hwmgr)
}
/**
-* Populates single SMC GFXSCLK structure using the provided engine clock
-*
-* @param hwmgr the address of the hardware manager
-* @param gfx_clock the GFX clock to use to populate the structure.
-* @param current_gfxclk_level location in PPTable for the SMC GFXCLK structure.
-*/
-
+ * Populates single SMC GFXSCLK structure using the provided engine clock
+ *
+ * @hwmgr: the address of the hardware manager
+ * @gfx_clock: the GFX clock to use to populate the structure.
+ * @current_gfxclk_level: location in PPTable for the SMC GFXCLK structure.
+ * @acg_freq: ACG frequenty to return (MHz)
+ */
static int vega10_populate_single_gfx_level(struct pp_hwmgr *hwmgr,
uint32_t gfx_clock, PllSetting_t *current_gfxclk_level,
uint32_t *acg_freq)
@@ -1610,12 +1609,13 @@ static int vega10_populate_single_gfx_level(struct pp_hwmgr *hwmgr,
}
/**
- * @brief Populates single SMC SOCCLK structure using the provided clock.
+ * Populates single SMC SOCCLK structure using the provided clock.
*
- * @param hwmgr - the address of the hardware manager.
- * @param soc_clock - the SOC clock to use to populate the structure.
- * @param current_socclk_level - location in PPTable for the SMC SOCCLK structure.
- * @return 0 on success..
+ * @hwmgr: the address of the hardware manager.
+ * @soc_clock: the SOC clock to use to populate the structure.
+ * @current_soc_did: DFS divider to pass back to caller
+ * @current_vol_index: index of current VDD to pass back to caller
+ * return: 0 on success
*/
static int vega10_populate_single_soc_level(struct pp_hwmgr *hwmgr,
uint32_t soc_clock, uint8_t *current_soc_did,
@@ -1659,10 +1659,10 @@ static int vega10_populate_single_soc_level(struct pp_hwmgr *hwmgr,
}
/**
-* Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
-*
-* @param hwmgr the address of the hardware manager
-*/
+ * Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
+ *
+ * @hwmgr: the address of the hardware manager
+ */
static int vega10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = hwmgr->backend;
@@ -1746,12 +1746,12 @@ static void vega10_populate_vddc_soc_levels(struct pp_hwmgr *hwmgr)
}
}
-/**
- * @brief Populates single SMC GFXCLK structure using the provided clock.
+/*
+ * Populates single SMC GFXCLK structure using the provided clock.
*
- * @param hwmgr - the address of the hardware manager.
- * @param mem_clock - the memory clock to use to populate the structure.
- * @return 0 on success..
+ * @hwmgr: the address of the hardware manager.
+ * @mem_clock: the memory clock to use to populate the structure.
+ * return: 0 on success..
*/
static int vega10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
uint32_t mem_clock, uint8_t *current_mem_vid,
@@ -1808,10 +1808,10 @@ static int vega10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
}
/**
- * @brief Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states.
+ * Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states.
*
- * @param pHwMgr - the address of the hardware manager.
- * @return PP_Result_OK on success.
+ * @hwmgr: the address of the hardware manager.
+ * return: PP_Result_OK on success.
*/
static int vega10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
{
@@ -2486,12 +2486,11 @@ static void vega10_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
}
/**
-* Initializes the SMC table and uploads it
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data (PowerState)
-* @return always 0
-*/
+ * Initializes the SMC table and uploads it
+ *
+ * @hwmgr: the address of the powerplay hardware manager.
+ * return: always 0
+ */
static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
{
int result;
@@ -2864,11 +2863,11 @@ static int vega10_stop_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
}
/**
- * @brief Tell SMC to enabled the supported DPMs.
+ * Tell SMC to enabled the supported DPMs.
*
- * @param hwmgr - the address of the powerplay hardware manager.
- * @Param bitmap - bitmap for the features to enabled.
- * @return 0 on at least one DPM is successfully enabled.
+ * @hwmgr: the address of the powerplay hardware manager.
+ * @bitmap: bitmap for the features to enabled.
+ * return: 0 on at least one DPM is successfully enabled.
*/
static int vega10_start_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
{
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.h
index f752b4ad0c8a..07c06f8c90b0 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.h
@@ -442,5 +442,6 @@ int vega10_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate);
int vega10_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate);
int vega10_update_acp_dpm(struct pp_hwmgr *hwmgr, bool bgate);
int vega10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
+int vega10_hwmgr_init(struct pp_hwmgr *hwmgr);
#endif /* _VEGA10_HWMGR_H_ */
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
index dc206fa88c5e..c0753029a8e2 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
@@ -718,12 +718,11 @@ static int vega12_save_default_power_profile(struct pp_hwmgr *hwmgr)
#endif
/**
-* Initializes the SMC table and uploads it
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data (PowerState)
-* @return always 0
-*/
+ * Initializes the SMC table and uploads it
+ *
+ * @hwmgr: the address of the powerplay hardware manager.
+ * return: always 0
+ */
static int vega12_init_smc_table(struct pp_hwmgr *hwmgr)
{
int result;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
index da84012b7fd5..87811b005b85 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
@@ -771,12 +771,11 @@ static int vega20_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
}
/**
-* Initializes the SMC table and uploads it
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data (PowerState)
-* @return always 0
-*/
+ * Initializes the SMC table and uploads it
+ *
+ * @hwmgr: the address of the powerplay hardware manager.
+ * return: always 0
+ */
static int vega20_init_smc_table(struct pp_hwmgr *hwmgr)
{
int result;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 8b867a6d52b5..d143ef1b460b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -266,6 +266,119 @@ int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
return ret;
}
+/**
+ * smu_set_user_clk_dependencies - set user profile clock dependencies
+ *
+ * @smu: smu_context pointer
+ * @clk: enum smu_clk_type type
+ *
+ * Enable/Disable the clock dependency for the @clk type.
+ */
+static void smu_set_user_clk_dependencies(struct smu_context *smu, enum smu_clk_type clk)
+{
+ if (smu->adev->in_suspend)
+ return;
+
+ /*
+ * mclk, fclk and socclk are interdependent
+ * on each other
+ */
+ if (clk == SMU_MCLK) {
+ /* reset clock dependency */
+ smu->user_dpm_profile.clk_dependency = 0;
+ /* set mclk dependent clocks(fclk and socclk) */
+ smu->user_dpm_profile.clk_dependency = BIT(SMU_FCLK) | BIT(SMU_SOCCLK);
+ } else if (clk == SMU_FCLK) {
+ /* give priority to mclk, if mclk dependent clocks are set */
+ if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK)))
+ return;
+
+ /* reset clock dependency */
+ smu->user_dpm_profile.clk_dependency = 0;
+ /* set fclk dependent clocks(mclk and socclk) */
+ smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_SOCCLK);
+ } else if (clk == SMU_SOCCLK) {
+ /* give priority to mclk, if mclk dependent clocks are set */
+ if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK)))
+ return;
+
+ /* reset clock dependency */
+ smu->user_dpm_profile.clk_dependency = 0;
+ /* set socclk dependent clocks(mclk and fclk) */
+ smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_FCLK);
+ } else
+ /* add clk dependencies here, if any */
+ return;
+}
+
+/**
+ * smu_restore_dpm_user_profile - reinstate user dpm profile
+ *
+ * @smu: smu_context pointer
+ *
+ * Restore the saved user power configurations include power limit,
+ * clock frequencies, fan control mode and fan speed.
+ */
+static void smu_restore_dpm_user_profile(struct smu_context *smu)
+{
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+ int ret = 0;
+
+ if (!smu->adev->in_suspend)
+ return;
+
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return;
+
+ /* Enable restore flag */
+ smu->user_dpm_profile.flags = SMU_DPM_USER_PROFILE_RESTORE;
+
+ /* set the user dpm power limit */
+ if (smu->user_dpm_profile.power_limit) {
+ ret = smu_set_power_limit(smu, smu->user_dpm_profile.power_limit);
+ if (ret)
+ dev_err(smu->adev->dev, "Failed to set power limit value\n");
+ }
+
+ /* set the user dpm clock configurations */
+ if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
+ enum smu_clk_type clk_type;
+
+ for (clk_type = 0; clk_type < SMU_CLK_COUNT; clk_type++) {
+ /*
+ * Iterate over smu clk type and force the saved user clk
+ * configs, skip if clock dependency is enabled
+ */
+ if (!(smu->user_dpm_profile.clk_dependency & BIT(clk_type)) &&
+ smu->user_dpm_profile.clk_mask[clk_type]) {
+ ret = smu_force_clk_levels(smu, clk_type,
+ smu->user_dpm_profile.clk_mask[clk_type]);
+ if (ret)
+ dev_err(smu->adev->dev, "Failed to set clock type = %d\n",
+ clk_type);
+ }
+ }
+ }
+
+ /* set the user dpm fan configurations */
+ if (smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_MANUAL) {
+ ret = smu_set_fan_control_mode(smu, smu->user_dpm_profile.fan_mode);
+ if (ret) {
+ dev_err(smu->adev->dev, "Failed to set manual fan control mode\n");
+ return;
+ }
+
+ if (!ret && smu->user_dpm_profile.fan_speed_percent) {
+ ret = smu_set_fan_speed_percent(smu, smu->user_dpm_profile.fan_speed_percent);
+ if (ret)
+ dev_err(smu->adev->dev, "Failed to set manual fan speed\n");
+ }
+ }
+
+ /* Disable restore flag */
+ smu->user_dpm_profile.flags &= ~SMU_DPM_USER_PROFILE_RESTORE;
+}
+
int smu_get_power_num_states(struct smu_context *smu,
struct pp_states_info *state_info)
{
@@ -288,6 +401,20 @@ bool is_support_sw_smu(struct amdgpu_device *adev)
return false;
}
+bool is_support_cclk_dpm(struct amdgpu_device *adev)
+{
+ struct smu_context *smu = &adev->smu;
+
+ if (!is_support_sw_smu(adev))
+ return false;
+
+ if (!smu_feature_is_enabled(smu, SMU_FEATURE_CCLK_DPM_BIT))
+ return false;
+
+ return true;
+}
+
+
int smu_sys_get_pp_table(struct smu_context *smu, void **table)
{
struct smu_table_context *smu_table = &smu->smu_table;
@@ -405,8 +532,6 @@ static int smu_set_funcs(struct amdgpu_device *adev)
break;
case CHIP_VANGOGH:
vangogh_set_ppt_funcs(smu);
- /* enable the OD by default to allow the fine grain tuning function */
- smu->od_enabled = true;
break;
default:
return -EINVAL;
@@ -478,9 +603,6 @@ static int smu_late_init(void *handle)
smu_set_fine_grain_gfx_freq_parameters(smu);
- if (adev->asic_type == CHIP_VANGOGH)
- return 0;
-
if (!smu->pm_enabled)
return 0;
@@ -517,6 +639,8 @@ static int smu_late_init(void *handle)
AMD_PP_TASK_COMPLETE_INIT,
false);
+ smu_restore_dpm_user_profile(smu);
+
return 0;
}
@@ -1610,6 +1734,12 @@ int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_lev
mutex_unlock(&smu->mutex);
+ /* reset user dpm clock state */
+ if (!ret && smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
+ memset(smu->user_dpm_profile.clk_mask, 0, sizeof(smu->user_dpm_profile.clk_mask));
+ smu->user_dpm_profile.clk_dependency = 0;
+ }
+
return ret;
}
@@ -1644,8 +1774,13 @@ int smu_force_clk_levels(struct smu_context *smu,
mutex_lock(&smu->mutex);
- if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels)
+ if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) {
ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
+ if (!ret && smu->user_dpm_profile.flags != SMU_DPM_USER_PROFILE_RESTORE) {
+ smu->user_dpm_profile.clk_mask[clk_type] = mask;
+ smu_set_user_clk_dependencies(smu, clk_type);
+ }
+ }
mutex_unlock(&smu->mutex);
@@ -1887,6 +2022,7 @@ int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed)
{
+ u32 percent;
int ret = 0;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
@@ -1894,8 +2030,12 @@ int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed)
mutex_lock(&smu->mutex);
- if (smu->ppt_funcs->set_fan_speed_rpm)
- ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);
+ if (smu->ppt_funcs->set_fan_speed_percent) {
+ percent = speed * 100 / smu->fan_max_rpm;
+ ret = smu->ppt_funcs->set_fan_speed_percent(smu, percent);
+ if (!ret && smu->user_dpm_profile.flags != SMU_DPM_USER_PROFILE_RESTORE)
+ smu->user_dpm_profile.fan_speed_percent = percent;
+ }
mutex_unlock(&smu->mutex);
@@ -1904,22 +2044,40 @@ int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed)
int smu_get_power_limit(struct smu_context *smu,
uint32_t *limit,
- bool max_setting)
+ enum smu_ppt_limit_level limit_level)
{
+ uint32_t limit_type = *limit >> 24;
+ int ret = 0;
+
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
mutex_lock(&smu->mutex);
- *limit = (max_setting ? smu->max_power_limit : smu->current_power_limit);
+ if (limit_type != SMU_DEFAULT_PPT_LIMIT) {
+ if (smu->ppt_funcs->get_ppt_limit)
+ ret = smu->ppt_funcs->get_ppt_limit(smu, limit, limit_type, limit_level);
+ } else {
+ switch (limit_level) {
+ case SMU_PPT_LIMIT_CURRENT:
+ *limit = smu->current_power_limit;
+ break;
+ case SMU_PPT_LIMIT_MAX:
+ *limit = smu->max_power_limit;
+ break;
+ default:
+ break;
+ }
+ }
mutex_unlock(&smu->mutex);
- return 0;
+ return ret;
}
int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
{
+ uint32_t limit_type = limit >> 24;
int ret = 0;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
@@ -1927,6 +2085,12 @@ int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
mutex_lock(&smu->mutex);
+ if (limit_type != SMU_DEFAULT_PPT_LIMIT)
+ if (smu->ppt_funcs->set_power_limit) {
+ ret = smu->ppt_funcs->set_power_limit(smu, limit);
+ goto out;
+ }
+
if (limit > smu->max_power_limit) {
dev_err(smu->adev->dev,
"New power limit (%d) is over the max allowed %d\n",
@@ -1937,8 +2101,11 @@ int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
if (!limit)
limit = smu->current_power_limit;
- if (smu->ppt_funcs->set_power_limit)
+ if (smu->ppt_funcs->set_power_limit) {
ret = smu->ppt_funcs->set_power_limit(smu, limit);
+ if (!ret && smu->user_dpm_profile.flags != SMU_DPM_USER_PROFILE_RESTORE)
+ smu->user_dpm_profile.power_limit = limit;
+ }
out:
mutex_unlock(&smu->mutex);
@@ -2115,11 +2282,19 @@ int smu_set_fan_control_mode(struct smu_context *smu, int value)
mutex_lock(&smu->mutex);
- if (smu->ppt_funcs->set_fan_control_mode)
+ if (smu->ppt_funcs->set_fan_control_mode) {
ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
+ if (!ret && smu->user_dpm_profile.flags != SMU_DPM_USER_PROFILE_RESTORE)
+ smu->user_dpm_profile.fan_mode = value;
+ }
mutex_unlock(&smu->mutex);
+ /* reset user dpm fan speed */
+ if (!ret && value != AMD_FAN_CTRL_MANUAL &&
+ smu->user_dpm_profile.flags != SMU_DPM_USER_PROFILE_RESTORE)
+ smu->user_dpm_profile.fan_speed_percent = 0;
+
return ret;
}
@@ -2127,17 +2302,15 @@ int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed)
{
int ret = 0;
uint32_t percent;
- uint32_t current_rpm;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
mutex_lock(&smu->mutex);
- if (smu->ppt_funcs->get_fan_speed_rpm) {
- ret = smu->ppt_funcs->get_fan_speed_rpm(smu, &current_rpm);
+ if (smu->ppt_funcs->get_fan_speed_percent) {
+ ret = smu->ppt_funcs->get_fan_speed_percent(smu, &percent);
if (!ret) {
- percent = current_rpm * 100 / smu->fan_max_rpm;
*speed = percent > 100 ? 100 : percent;
}
}
@@ -2151,18 +2324,18 @@ int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed)
int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
{
int ret = 0;
- uint32_t rpm;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
mutex_lock(&smu->mutex);
- if (smu->ppt_funcs->set_fan_speed_rpm) {
+ if (smu->ppt_funcs->set_fan_speed_percent) {
if (speed > 100)
speed = 100;
- rpm = speed * smu->fan_max_rpm / 100;
- ret = smu->ppt_funcs->set_fan_speed_rpm(smu, rpm);
+ ret = smu->ppt_funcs->set_fan_speed_percent(smu, speed);
+ if (!ret && smu->user_dpm_profile.flags != SMU_DPM_USER_PROFILE_RESTORE)
+ smu->user_dpm_profile.fan_speed_percent = speed;
}
mutex_unlock(&smu->mutex);
@@ -2173,14 +2346,17 @@ int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed)
{
int ret = 0;
+ u32 percent;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
mutex_lock(&smu->mutex);
- if (smu->ppt_funcs->get_fan_speed_rpm)
- ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
+ if (smu->ppt_funcs->get_fan_speed_percent) {
+ ret = smu->ppt_funcs->get_fan_speed_percent(smu, &percent);
+ *speed = percent * smu->fan_max_rpm / 100;
+ }
mutex_unlock(&smu->mutex);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index cd7b411457ff..9f0d03ae3109 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -1080,15 +1080,27 @@ static int arcturus_read_sensor(struct smu_context *smu,
return ret;
}
-static int arcturus_get_fan_speed_rpm(struct smu_context *smu,
- uint32_t *speed)
+static int arcturus_get_fan_speed_percent(struct smu_context *smu,
+ uint32_t *speed)
{
+ int ret;
+ u32 rpm;
+
if (!speed)
return -EINVAL;
- return arcturus_get_smu_metrics_data(smu,
- METRICS_CURR_FANSPEED,
- speed);
+ switch (smu_v11_0_get_fan_control_mode(smu)) {
+ case AMD_FAN_CTRL_AUTO:
+ ret = arcturus_get_smu_metrics_data(smu,
+ METRICS_CURR_FANSPEED,
+ &rpm);
+ if (!ret && smu->fan_max_rpm)
+ *speed = rpm * 100 / smu->fan_max_rpm;
+ return ret;
+ default:
+ *speed = smu->user_dpm_profile.fan_speed_percent;
+ return 0;
+ }
}
static int arcturus_get_fan_parameters(struct smu_context *smu)
@@ -1310,7 +1322,7 @@ static int arcturus_set_power_profile_mode(struct smu_context *smu,
CMN2ASIC_MAPPING_WORKLOAD,
profile_mode);
if (workload_type < 0) {
- dev_err(smu->adev->dev, "Unsupported power profile mode %d on arcturus\n", profile_mode);
+ dev_dbg(smu->adev->dev, "Unsupported power profile mode %d on arcturus\n", profile_mode);
return -EINVAL;
}
@@ -2227,7 +2239,7 @@ static ssize_t arcturus_get_gpu_metrics(struct smu_context *smu,
if (ret)
return ret;
- smu_v11_0_init_gpu_metrics_v1_0(gpu_metrics);
+ smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 0);
gpu_metrics->temperature_edge = metrics.TemperatureEdge;
gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
@@ -2264,6 +2276,8 @@ static ssize_t arcturus_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->pcie_link_speed =
arcturus_get_current_pcie_link_speed(smu);
+ gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
+
*table = (void *)gpu_metrics;
return sizeof(struct gpu_metrics_v1_0);
@@ -2281,7 +2295,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.print_clk_levels = arcturus_print_clk_levels,
.force_clk_levels = arcturus_force_clk_levels,
.read_sensor = arcturus_read_sensor,
- .get_fan_speed_rpm = arcturus_get_fan_speed_rpm,
+ .get_fan_speed_percent = arcturus_get_fan_speed_percent,
.get_power_profile_mode = arcturus_get_power_profile_mode,
.set_power_profile_mode = arcturus_set_power_profile_mode,
.set_performance_level = arcturus_set_performance_level,
@@ -2326,7 +2340,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
.get_fan_control_mode = smu_v11_0_get_fan_control_mode,
.set_fan_control_mode = smu_v11_0_set_fan_control_mode,
- .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
+ .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
.set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
.gfx_off_control = smu_v11_0_gfx_off_control,
.register_irq_handler = smu_v11_0_register_irq_handler,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index 51e83123f72a..6e641f1513d8 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -1317,15 +1317,27 @@ static bool navi10_is_dpm_running(struct smu_context *smu)
return !!(feature_enabled & SMC_DPM_FEATURE);
}
-static int navi10_get_fan_speed_rpm(struct smu_context *smu,
- uint32_t *speed)
+static int navi10_get_fan_speed_percent(struct smu_context *smu,
+ uint32_t *speed)
{
+ int ret;
+ u32 rpm;
+
if (!speed)
return -EINVAL;
- return navi10_get_smu_metrics_data(smu,
- METRICS_CURR_FANSPEED,
- speed);
+ switch (smu_v11_0_get_fan_control_mode(smu)) {
+ case AMD_FAN_CTRL_AUTO:
+ ret = navi10_get_smu_metrics_data(smu,
+ METRICS_CURR_FANSPEED,
+ &rpm);
+ if (!ret && smu->fan_max_rpm)
+ *speed = rpm * 100 / smu->fan_max_rpm;
+ return ret;
+ default:
+ *speed = smu->user_dpm_profile.fan_speed_percent;
+ return 0;
+ }
}
static int navi10_get_fan_parameters(struct smu_context *smu)
@@ -1673,7 +1685,7 @@ static int navi10_read_sensor(struct smu_context *smu,
*size = 4;
break;
case AMDGPU_PP_SENSOR_GFX_SCLK:
- ret = navi10_get_current_clk_freq_by_table(smu, SMU_GFXCLK, (uint32_t *)data);
+ ret = navi10_get_smu_metrics_data(smu, METRICS_AVERAGE_GFXCLK, (uint32_t *)data);
*(uint32_t *)data *= 100;
*size = 4;
break;
@@ -2302,7 +2314,7 @@ static ssize_t navi10_get_gpu_metrics(struct smu_context *smu,
mutex_unlock(&smu->metrics_lock);
- smu_v11_0_init_gpu_metrics_v1_0(gpu_metrics);
+ smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 0);
gpu_metrics->temperature_edge = metrics.TemperatureEdge;
gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
@@ -2342,6 +2354,8 @@ static ssize_t navi10_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->pcie_link_speed =
smu_v11_0_get_current_pcie_link_speed(smu);
+ gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
+
*table = (void *)gpu_metrics;
return sizeof(struct gpu_metrics_v1_0);
@@ -2413,7 +2427,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.display_config_changed = navi10_display_config_changed,
.notify_smc_display_config = navi10_notify_smc_display_config,
.is_dpm_running = navi10_is_dpm_running,
- .get_fan_speed_rpm = navi10_get_fan_speed_rpm,
+ .get_fan_speed_percent = navi10_get_fan_speed_percent,
.get_power_profile_mode = navi10_get_power_profile_mode,
.set_power_profile_mode = navi10_set_power_profile_mode,
.set_watermarks_table = navi10_set_watermarks_table,
@@ -2456,7 +2470,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
.get_fan_control_mode = smu_v11_0_get_fan_control_mode,
.set_fan_control_mode = smu_v11_0_set_fan_control_mode,
- .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
+ .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
.set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
.gfx_off_control = smu_v11_0_gfx_off_control,
.register_irq_handler = smu_v11_0_register_irq_handler,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 12b36eb0ff6a..af73e1430af5 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -261,6 +261,11 @@ sienna_cichlid_get_allowed_feature_mask(struct smu_context *smu,
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFX_GPO_BIT);
}
+ if ((adev->pm.pp_feature & PP_GFX_DCS_MASK) &&
+ (adev->asic_type > CHIP_SIENNA_CICHLID) &&
+ !(adev->flags & AMD_IS_APU))
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_DCS_BIT);
+
if (adev->pm.pp_feature & PP_MCLK_DPM_MASK)
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_UCLK_BIT)
| FEATURE_MASK(FEATURE_MEM_VDDCI_SCALING_BIT)
@@ -294,6 +299,12 @@ sienna_cichlid_get_allowed_feature_mask(struct smu_context *smu,
smu->adev->pg_flags & AMD_PG_SUPPORT_JPEG)
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_MM_DPM_PG_BIT);
+ if (smu->dc_controlled_by_gpio)
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_ACDC_BIT);
+
+ if (amdgpu_aspm == 1)
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_LCLK_BIT);
+
return 0;
}
@@ -314,6 +325,12 @@ static int sienna_cichlid_check_powerplay_table(struct smu_context *smu)
table_context->thermal_controller_type =
powerplay_table->thermal_controller_type;
+ /*
+ * Instead of having its own buffer space and get overdrive_table copied,
+ * smu->od_settings just points to the actual overdrive_table
+ */
+ smu->od_settings = &powerplay_table->overdrive_table;
+
return 0;
}
@@ -907,6 +924,22 @@ static bool sienna_cichlid_is_support_fine_grained_dpm(struct smu_context *smu,
return dpm_desc->SnapToDiscrete == 0 ? true : false;
}
+static bool sienna_cichlid_is_od_feature_supported(struct smu_11_0_7_overdrive_table *od_table,
+ enum SMU_11_0_7_ODFEATURE_CAP cap)
+{
+ return od_table->cap[cap];
+}
+
+static void sienna_cichlid_get_od_setting_range(struct smu_11_0_7_overdrive_table *od_table,
+ enum SMU_11_0_7_ODSETTING_ID setting,
+ uint32_t *min, uint32_t *max)
+{
+ if (min)
+ *min = od_table->min[setting];
+ if (max)
+ *max = od_table->max[setting];
+}
+
static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
@@ -915,11 +948,16 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
struct smu_11_0_dpm_context *dpm_context = smu_dpm->dpm_context;
PPTable_t *pptable = (PPTable_t *)table_context->driver_pptable;
+ struct smu_11_0_7_overdrive_table *od_settings = smu->od_settings;
+ OverDriveTable_t *od_table =
+ (OverDriveTable_t *)table_context->overdrive_table;
int i, size = 0, ret = 0;
uint32_t cur_value = 0, value = 0, count = 0;
uint32_t freq_values[3] = {0};
uint32_t mark_index = 0;
uint32_t gen_speed, lane_width;
+ uint32_t min_value, max_value;
+ uint32_t smu_version;
switch (clk_type) {
case SMU_GFXCLK:
@@ -995,6 +1033,70 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
(lane_width == dpm_context->dpm_tables.pcie_table.pcie_lane[i]) ?
"*" : "");
break;
+ case SMU_OD_SCLK:
+ if (!smu->od_enabled || !od_table || !od_settings)
+ break;
+
+ if (!sienna_cichlid_is_od_feature_supported(od_settings, SMU_11_0_7_ODCAP_GFXCLK_LIMITS))
+ break;
+
+ size += sprintf(buf + size, "OD_SCLK:\n");
+ size += sprintf(buf + size, "0: %uMhz\n1: %uMhz\n", od_table->GfxclkFmin, od_table->GfxclkFmax);
+ break;
+
+ case SMU_OD_MCLK:
+ if (!smu->od_enabled || !od_table || !od_settings)
+ break;
+
+ if (!sienna_cichlid_is_od_feature_supported(od_settings, SMU_11_0_7_ODCAP_UCLK_LIMITS))
+ break;
+
+ size += sprintf(buf + size, "OD_MCLK:\n");
+ size += sprintf(buf + size, "0: %uMhz\n1: %uMHz\n", od_table->UclkFmin, od_table->UclkFmax);
+ break;
+
+ case SMU_OD_VDDGFX_OFFSET:
+ if (!smu->od_enabled || !od_table || !od_settings)
+ break;
+
+ /*
+ * OD GFX Voltage Offset functionality is supported only by 58.41.0
+ * and onwards SMU firmwares.
+ */
+ smu_cmn_get_smc_version(smu, NULL, &smu_version);
+ if ((adev->asic_type == CHIP_SIENNA_CICHLID) &&
+ (smu_version < 0x003a2900))
+ break;
+
+ size += sprintf(buf + size, "OD_VDDGFX_OFFSET:\n");
+ size += sprintf(buf + size, "%dmV\n", od_table->VddGfxOffset);
+ break;
+
+ case SMU_OD_RANGE:
+ if (!smu->od_enabled || !od_table || !od_settings)
+ break;
+
+ size = sprintf(buf, "%s:\n", "OD_RANGE");
+
+ if (sienna_cichlid_is_od_feature_supported(od_settings, SMU_11_0_7_ODCAP_GFXCLK_LIMITS)) {
+ sienna_cichlid_get_od_setting_range(od_settings, SMU_11_0_7_ODSETTING_GFXCLKFMIN,
+ &min_value, NULL);
+ sienna_cichlid_get_od_setting_range(od_settings, SMU_11_0_7_ODSETTING_GFXCLKFMAX,
+ NULL, &max_value);
+ size += sprintf(buf + size, "SCLK: %7uMhz %10uMhz\n",
+ min_value, max_value);
+ }
+
+ if (sienna_cichlid_is_od_feature_supported(od_settings, SMU_11_0_7_ODCAP_UCLK_LIMITS)) {
+ sienna_cichlid_get_od_setting_range(od_settings, SMU_11_0_7_ODSETTING_UCLKFMIN,
+ &min_value, NULL);
+ sienna_cichlid_get_od_setting_range(od_settings, SMU_11_0_7_ODSETTING_UCLKFMAX,
+ NULL, &max_value);
+ size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n",
+ min_value, max_value);
+ }
+ break;
+
default:
break;
}
@@ -1146,15 +1248,27 @@ static bool sienna_cichlid_is_dpm_running(struct smu_context *smu)
return !!(feature_enabled & SMC_DPM_FEATURE);
}
-static int sienna_cichlid_get_fan_speed_rpm(struct smu_context *smu,
- uint32_t *speed)
+static int sienna_cichlid_get_fan_speed_percent(struct smu_context *smu,
+ uint32_t *speed)
{
+ int ret;
+ u32 rpm;
+
if (!speed)
return -EINVAL;
- return sienna_cichlid_get_smu_metrics_data(smu,
- METRICS_CURR_FANSPEED,
- speed);
+ switch (smu_v11_0_get_fan_control_mode(smu)) {
+ case AMD_FAN_CTRL_AUTO:
+ ret = sienna_cichlid_get_smu_metrics_data(smu,
+ METRICS_CURR_FANSPEED,
+ &rpm);
+ if (!ret && smu->fan_max_rpm)
+ *speed = rpm * 100 / smu->fan_max_rpm;
+ return ret;
+ default:
+ *speed = smu->user_dpm_profile.fan_speed_percent;
+ return 0;
+ }
}
static int sienna_cichlid_get_fan_parameters(struct smu_context *smu)
@@ -1694,6 +1808,243 @@ static int sienna_cichlid_get_dpm_ultimate_freq(struct smu_context *smu,
return ret;
}
+static void sienna_cichlid_dump_od_table(struct smu_context *smu,
+ OverDriveTable_t *od_table)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t smu_version;
+
+ dev_dbg(smu->adev->dev, "OD: Gfxclk: (%d, %d)\n", od_table->GfxclkFmin,
+ od_table->GfxclkFmax);
+ dev_dbg(smu->adev->dev, "OD: Uclk: (%d, %d)\n", od_table->UclkFmin,
+ od_table->UclkFmax);
+
+ smu_cmn_get_smc_version(smu, NULL, &smu_version);
+ if (!((adev->asic_type == CHIP_SIENNA_CICHLID) &&
+ (smu_version < 0x003a2900)))
+ dev_dbg(smu->adev->dev, "OD: VddGfxOffset: %d\n", od_table->VddGfxOffset);
+}
+
+static int sienna_cichlid_set_default_od_settings(struct smu_context *smu)
+{
+ OverDriveTable_t *od_table =
+ (OverDriveTable_t *)smu->smu_table.overdrive_table;
+ OverDriveTable_t *boot_od_table =
+ (OverDriveTable_t *)smu->smu_table.boot_overdrive_table;
+ int ret = 0;
+
+ ret = smu_cmn_update_table(smu, SMU_TABLE_OVERDRIVE,
+ 0, (void *)od_table, false);
+ if (ret) {
+ dev_err(smu->adev->dev, "Failed to get overdrive table!\n");
+ return ret;
+ }
+
+ memcpy(boot_od_table, od_table, sizeof(OverDriveTable_t));
+
+ sienna_cichlid_dump_od_table(smu, od_table);
+
+ return 0;
+}
+
+static int sienna_cichlid_od_setting_check_range(struct smu_context *smu,
+ struct smu_11_0_7_overdrive_table *od_table,
+ enum SMU_11_0_7_ODSETTING_ID setting,
+ uint32_t value)
+{
+ if (value < od_table->min[setting]) {
+ dev_warn(smu->adev->dev, "OD setting (%d, %d) is less than the minimum allowed (%d)\n",
+ setting, value, od_table->min[setting]);
+ return -EINVAL;
+ }
+ if (value > od_table->max[setting]) {
+ dev_warn(smu->adev->dev, "OD setting (%d, %d) is greater than the maximum allowed (%d)\n",
+ setting, value, od_table->max[setting]);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int sienna_cichlid_od_edit_dpm_table(struct smu_context *smu,
+ enum PP_OD_DPM_TABLE_COMMAND type,
+ long input[], uint32_t size)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ OverDriveTable_t *od_table =
+ (OverDriveTable_t *)table_context->overdrive_table;
+ struct smu_11_0_7_overdrive_table *od_settings =
+ (struct smu_11_0_7_overdrive_table *)smu->od_settings;
+ struct amdgpu_device *adev = smu->adev;
+ enum SMU_11_0_7_ODSETTING_ID freq_setting;
+ uint16_t *freq_ptr;
+ int i, ret = 0;
+ uint32_t smu_version;
+
+ if (!smu->od_enabled) {
+ dev_warn(smu->adev->dev, "OverDrive is not enabled!\n");
+ return -EINVAL;
+ }
+
+ if (!smu->od_settings) {
+ dev_err(smu->adev->dev, "OD board limits are not set!\n");
+ return -ENOENT;
+ }
+
+ if (!(table_context->overdrive_table && table_context->boot_overdrive_table)) {
+ dev_err(smu->adev->dev, "Overdrive table was not initialized!\n");
+ return -EINVAL;
+ }
+
+ switch (type) {
+ case PP_OD_EDIT_SCLK_VDDC_TABLE:
+ if (!sienna_cichlid_is_od_feature_supported(od_settings,
+ SMU_11_0_7_ODCAP_GFXCLK_LIMITS)) {
+ dev_warn(smu->adev->dev, "GFXCLK_LIMITS not supported!\n");
+ return -ENOTSUPP;
+ }
+
+ for (i = 0; i < size; i += 2) {
+ if (i + 2 > size) {
+ dev_info(smu->adev->dev, "invalid number of input parameters %d\n", size);
+ return -EINVAL;
+ }
+
+ switch (input[i]) {
+ case 0:
+ if (input[i + 1] > od_table->GfxclkFmax) {
+ dev_info(smu->adev->dev, "GfxclkFmin (%ld) must be <= GfxclkFmax (%u)!\n",
+ input[i + 1], od_table->GfxclkFmax);
+ return -EINVAL;
+ }
+
+ freq_setting = SMU_11_0_7_ODSETTING_GFXCLKFMIN;
+ freq_ptr = &od_table->GfxclkFmin;
+ break;
+
+ case 1:
+ if (input[i + 1] < od_table->GfxclkFmin) {
+ dev_info(smu->adev->dev, "GfxclkFmax (%ld) must be >= GfxclkFmin (%u)!\n",
+ input[i + 1], od_table->GfxclkFmin);
+ return -EINVAL;
+ }
+
+ freq_setting = SMU_11_0_7_ODSETTING_GFXCLKFMAX;
+ freq_ptr = &od_table->GfxclkFmax;
+ break;
+
+ default:
+ dev_info(smu->adev->dev, "Invalid SCLK_VDDC_TABLE index: %ld\n", input[i]);
+ dev_info(smu->adev->dev, "Supported indices: [0:min,1:max]\n");
+ return -EINVAL;
+ }
+
+ ret = sienna_cichlid_od_setting_check_range(smu, od_settings,
+ freq_setting, input[i + 1]);
+ if (ret)
+ return ret;
+
+ *freq_ptr = (uint16_t)input[i + 1];
+ }
+ break;
+
+ case PP_OD_EDIT_MCLK_VDDC_TABLE:
+ if (!sienna_cichlid_is_od_feature_supported(od_settings, SMU_11_0_7_ODCAP_UCLK_LIMITS)) {
+ dev_warn(smu->adev->dev, "UCLK_LIMITS not supported!\n");
+ return -ENOTSUPP;
+ }
+
+ for (i = 0; i < size; i += 2) {
+ if (i + 2 > size) {
+ dev_info(smu->adev->dev, "invalid number of input parameters %d\n", size);
+ return -EINVAL;
+ }
+
+ switch (input[i]) {
+ case 0:
+ if (input[i + 1] > od_table->UclkFmax) {
+ dev_info(smu->adev->dev, "UclkFmin (%ld) must be <= UclkFmax (%u)!\n",
+ input[i + 1], od_table->UclkFmax);
+ return -EINVAL;
+ }
+
+ freq_setting = SMU_11_0_7_ODSETTING_UCLKFMIN;
+ freq_ptr = &od_table->UclkFmin;
+ break;
+
+ case 1:
+ if (input[i + 1] < od_table->UclkFmin) {
+ dev_info(smu->adev->dev, "UclkFmax (%ld) must be >= UclkFmin (%u)!\n",
+ input[i + 1], od_table->UclkFmin);
+ return -EINVAL;
+ }
+
+ freq_setting = SMU_11_0_7_ODSETTING_UCLKFMAX;
+ freq_ptr = &od_table->UclkFmax;
+ break;
+
+ default:
+ dev_info(smu->adev->dev, "Invalid MCLK_VDDC_TABLE index: %ld\n", input[i]);
+ dev_info(smu->adev->dev, "Supported indices: [0:min,1:max]\n");
+ return -EINVAL;
+ }
+
+ ret = sienna_cichlid_od_setting_check_range(smu, od_settings,
+ freq_setting, input[i + 1]);
+ if (ret)
+ return ret;
+
+ *freq_ptr = (uint16_t)input[i + 1];
+ }
+ break;
+
+ case PP_OD_RESTORE_DEFAULT_TABLE:
+ memcpy(table_context->overdrive_table,
+ table_context->boot_overdrive_table,
+ sizeof(OverDriveTable_t));
+ fallthrough;
+
+ case PP_OD_COMMIT_DPM_TABLE:
+ sienna_cichlid_dump_od_table(smu, od_table);
+
+ ret = smu_cmn_update_table(smu, SMU_TABLE_OVERDRIVE,
+ 0, (void *)od_table, true);
+ if (ret) {
+ dev_err(smu->adev->dev, "Failed to import overdrive table!\n");
+ return ret;
+ }
+ break;
+
+ case PP_OD_EDIT_VDDGFX_OFFSET:
+ if (size != 1) {
+ dev_info(smu->adev->dev, "invalid number of parameters: %d\n", size);
+ return -EINVAL;
+ }
+
+ /*
+ * OD GFX Voltage Offset functionality is supported only by 58.41.0
+ * and onwards SMU firmwares.
+ */
+ smu_cmn_get_smc_version(smu, NULL, &smu_version);
+ if ((adev->asic_type == CHIP_SIENNA_CICHLID) &&
+ (smu_version < 0x003a2900)) {
+ dev_err(smu->adev->dev, "OD GFX Voltage offset functionality is supported "
+ "only by 58.41.0 and onwards SMU firmwares!\n");
+ return -EOPNOTSUPP;
+ }
+
+ od_table->VddGfxOffset = (int16_t)input[0];
+
+ sienna_cichlid_dump_od_table(smu, od_table);
+ break;
+
+ default:
+ return -ENOSYS;
+ }
+
+ return ret;
+}
+
static int sienna_cichlid_run_btc(struct smu_context *smu)
{
return smu_cmn_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL);
@@ -2610,7 +2961,7 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
if (ret)
return ret;
- smu_v11_0_init_gpu_metrics_v1_0(gpu_metrics);
+ smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 0);
gpu_metrics->temperature_edge = metrics->TemperatureEdge;
gpu_metrics->temperature_hotspot = metrics->TemperatureHotspot;
@@ -2653,6 +3004,8 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->pcie_link_speed =
smu_v11_0_get_current_pcie_link_speed(smu);
+ gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
+
*table = (void *)gpu_metrics;
return sizeof(struct gpu_metrics_v1_0);
@@ -2759,7 +3112,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.display_config_changed = sienna_cichlid_display_config_changed,
.notify_smc_display_config = sienna_cichlid_notify_smc_display_config,
.is_dpm_running = sienna_cichlid_is_dpm_running,
- .get_fan_speed_rpm = sienna_cichlid_get_fan_speed_rpm,
+ .get_fan_speed_percent = sienna_cichlid_get_fan_speed_percent,
.get_power_profile_mode = sienna_cichlid_get_power_profile_mode,
.set_power_profile_mode = sienna_cichlid_set_power_profile_mode,
.set_watermarks_table = sienna_cichlid_set_watermarks_table,
@@ -2802,7 +3155,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
.get_fan_control_mode = smu_v11_0_get_fan_control_mode,
.set_fan_control_mode = smu_v11_0_set_fan_control_mode,
- .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
+ .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
.set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
.gfx_off_control = smu_v11_0_gfx_off_control,
.register_irq_handler = smu_v11_0_register_irq_handler,
@@ -2817,6 +3170,8 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.mode1_reset = smu_v11_0_mode1_reset,
.get_dpm_ultimate_freq = sienna_cichlid_get_dpm_ultimate_freq,
.set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
+ .set_default_od_settings = sienna_cichlid_set_default_od_settings,
+ .od_edit_dpm_table = sienna_cichlid_od_edit_dpm_table,
.run_btc = sienna_cichlid_run_btc,
.set_power_source = smu_v11_0_set_power_source,
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
index b279dbbbce6b..a6211858ead4 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
@@ -78,6 +78,9 @@ MODULE_FIRMWARE("amdgpu/dimgrey_cavefish_smc.bin");
#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK 0xC000
#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT 0xE
+#define mmTHM_BACO_CNTL_ARCT 0xA7
+#define mmTHM_BACO_CNTL_ARCT_BASE_IDX 0
+
static int link_width[] = {0, 1, 2, 4, 8, 12, 16};
static int link_speed[] = {25, 50, 80, 160};
@@ -474,12 +477,14 @@ int smu_v11_0_fini_smc_tables(struct smu_context *smu)
int smu_v11_0_init_power(struct smu_context *smu)
{
struct smu_power_context *smu_power = &smu->smu_power;
+ size_t size = smu->adev->asic_type == CHIP_VANGOGH ?
+ sizeof(struct smu_11_5_power_context) :
+ sizeof(struct smu_11_0_power_context);
- smu_power->power_context = kzalloc(sizeof(struct smu_11_0_power_context),
- GFP_KERNEL);
+ smu_power->power_context = kzalloc(size, GFP_KERNEL);
if (!smu_power->power_context)
return -ENOMEM;
- smu_power->power_context_size = sizeof(struct smu_11_0_power_context);
+ smu_power->power_context_size = size;
return 0;
}
@@ -1119,6 +1124,7 @@ int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
case CHIP_SIENNA_CICHLID:
case CHIP_NAVY_FLOUNDER:
case CHIP_DIMGREY_CAVEFISH:
+ case CHIP_VANGOGH:
if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
return 0;
if (enable)
@@ -1136,10 +1142,10 @@ int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
uint32_t
smu_v11_0_get_fan_control_mode(struct smu_context *smu)
{
- if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT))
- return AMD_FAN_CTRL_MANUAL;
- else
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT))
return AMD_FAN_CTRL_AUTO;
+ else
+ return smu->user_dpm_profile.fan_mode;
}
static int
@@ -1174,6 +1180,35 @@ smu_v11_0_set_fan_static_mode(struct smu_context *smu, uint32_t mode)
}
int
+smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t duty100, duty;
+ uint64_t tmp64;
+
+ if (speed > 100)
+ speed = 100;
+
+ if (smu_v11_0_auto_fan_control(smu, 0))
+ return -EINVAL;
+
+ duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1),
+ CG_FDO_CTRL1, FMAX_DUTY100);
+ if (!duty100)
+ return -EINVAL;
+
+ tmp64 = (uint64_t)speed * duty100;
+ do_div(tmp64, 100);
+ duty = (uint32_t)tmp64;
+
+ WREG32_SOC15(THM, 0, mmCG_FDO_CTRL0,
+ REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL0),
+ CG_FDO_CTRL0, FDO_STATIC_DUTY, duty));
+
+ return smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC);
+}
+
+int
smu_v11_0_set_fan_control_mode(struct smu_context *smu,
uint32_t mode)
{
@@ -1181,7 +1216,7 @@ smu_v11_0_set_fan_control_mode(struct smu_context *smu,
switch (mode) {
case AMD_FAN_CTRL_NONE:
- ret = smu_v11_0_set_fan_speed_rpm(smu, smu->fan_max_rpm);
+ ret = smu_v11_0_set_fan_speed_percent(smu, 100);
break;
case AMD_FAN_CTRL_MANUAL:
ret = smu_v11_0_auto_fan_control(smu, 0);
@@ -1201,58 +1236,6 @@ smu_v11_0_set_fan_control_mode(struct smu_context *smu,
return ret;
}
-int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
- uint32_t speed)
-{
- struct amdgpu_device *adev = smu->adev;
- int ret;
- uint32_t tach_period, crystal_clock_freq;
-
- if (!speed)
- return -EINVAL;
-
- ret = smu_v11_0_auto_fan_control(smu, 0);
- if (ret)
- return ret;
-
- /*
- * crystal_clock_freq div by 4 is required since the fan control
- * module refers to 25MHz
- */
-
- crystal_clock_freq = amdgpu_asic_get_xclk(adev) / 4;
- tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
- WREG32_SOC15(THM, 0, mmCG_TACH_CTRL,
- REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL),
- CG_TACH_CTRL, TARGET_PERIOD,
- tach_period));
-
- ret = smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC_RPM);
-
- return ret;
-}
-
-int smu_v11_0_get_fan_speed_rpm(struct smu_context *smu,
- uint32_t *speed)
-{
- struct amdgpu_device *adev = smu->adev;
- uint32_t tach_period, crystal_clock_freq;
- uint64_t tmp64;
-
- tach_period = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL),
- CG_TACH_CTRL, TARGET_PERIOD);
- if (!tach_period)
- return -EINVAL;
-
- crystal_clock_freq = amdgpu_asic_get_xclk(adev);
-
- tmp64 = (uint64_t)crystal_clock_freq * 60 * 10000;
- do_div(tmp64, (tach_period * 8));
- *speed = (uint32_t)tmp64;
-
- return 0;
-}
-
int smu_v11_0_set_xgmi_pstate(struct smu_context *smu,
uint32_t pstate)
{
@@ -1552,9 +1535,15 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
break;
default:
if (!ras || !ras->supported) {
- data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL);
- data |= 0x80000000;
- WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data);
+ if (adev->asic_type == CHIP_ARCTURUS) {
+ data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL_ARCT);
+ data |= 0x80000000;
+ WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL_ARCT, data);
+ } else {
+ data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL);
+ data |= 0x80000000;
+ WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data);
+ }
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 0, NULL);
} else {
@@ -2043,30 +2032,6 @@ int smu_v11_0_get_current_pcie_link_speed(struct smu_context *smu)
return link_speed[speed_level];
}
-void smu_v11_0_init_gpu_metrics_v1_0(struct gpu_metrics_v1_0 *gpu_metrics)
-{
- memset(gpu_metrics, 0xFF, sizeof(struct gpu_metrics_v1_0));
-
- gpu_metrics->common_header.structure_size =
- sizeof(struct gpu_metrics_v1_0);
- gpu_metrics->common_header.format_revision = 1;
- gpu_metrics->common_header.content_revision = 0;
-
- gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
-}
-
-void smu_v11_0_init_gpu_metrics_v2_0(struct gpu_metrics_v2_0 *gpu_metrics)
-{
- memset(gpu_metrics, 0xFF, sizeof(struct gpu_metrics_v2_0));
-
- gpu_metrics->common_header.structure_size =
- sizeof(struct gpu_metrics_v2_0);
- gpu_metrics->common_header.format_revision = 2;
- gpu_metrics->common_header.content_revision = 0;
-
- gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
-}
-
int smu_v11_0_gfx_ulv_control(struct smu_context *smu,
bool enablement)
{
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
index 5c1482d4ca43..7ddbaecb11c2 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
@@ -31,6 +31,10 @@
#include "smu_v11_5_ppsmc.h"
#include "smu_v11_5_pmfw.h"
#include "smu_cmn.h"
+#include "soc15_common.h"
+#include "asic_reg/gc/gc_10_3_0_offset.h"
+#include "asic_reg/gc/gc_10_3_0_sh_mask.h"
+#include <asm/processor.h>
/*
* DO NOT use these for err/warn/info/debug messages.
@@ -59,7 +63,8 @@ static struct cmn2asic_msg_mapping vangogh_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 0),
MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 0),
MSG_MAP(EnableGfxOff, PPSMC_MSG_EnableGfxOff, 0),
- MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisableGfxOff, 0),
+ MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 0),
+ MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 0),
MSG_MAP(PowerDownIspByTile, PPSMC_MSG_PowerDownIspByTile, 0),
MSG_MAP(PowerUpIspByTile, PPSMC_MSG_PowerUpIspByTile, 0),
MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 0),
@@ -76,7 +81,6 @@ static struct cmn2asic_msg_mapping vangogh_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 0),
MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDeviceDriverReset, 0),
MSG_MAP(GetEnabledSmuFeatures, PPSMC_MSG_GetEnabledSmuFeatures, 0),
- MSG_MAP(Spare1, PPSMC_MSG_spare1, 0),
MSG_MAP(SetHardMinSocclkByFreq, PPSMC_MSG_SetHardMinSocclkByFreq, 0),
MSG_MAP(SetSoftMinFclk, PPSMC_MSG_SetSoftMinFclk, 0),
MSG_MAP(SetSoftMinVcn, PPSMC_MSG_SetSoftMinVcn, 0),
@@ -88,7 +92,6 @@ static struct cmn2asic_msg_mapping vangogh_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(SetSoftMaxSocclkByFreq, PPSMC_MSG_SetSoftMaxSocclkByFreq, 0),
MSG_MAP(SetSoftMaxFclkByFreq, PPSMC_MSG_SetSoftMaxFclkByFreq, 0),
MSG_MAP(SetSoftMaxVcn, PPSMC_MSG_SetSoftMaxVcn, 0),
- MSG_MAP(Spare2, PPSMC_MSG_spare2, 0),
MSG_MAP(SetPowerLimitPercentage, PPSMC_MSG_SetPowerLimitPercentage, 0),
MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 0),
MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 0),
@@ -118,6 +121,11 @@ static struct cmn2asic_msg_mapping vangogh_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(StopDramLogging, PPSMC_MSG_StopDramLogging, 0),
MSG_MAP(SetSoftMinCclk, PPSMC_MSG_SetSoftMinCclk, 0),
MSG_MAP(SetSoftMaxCclk, PPSMC_MSG_SetSoftMaxCclk, 0),
+ MSG_MAP(RequestActiveWgp, PPSMC_MSG_RequestActiveWgp, 0),
+ MSG_MAP(SetFastPPTLimit, PPSMC_MSG_SetFastPPTLimit, 0),
+ MSG_MAP(SetSlowPPTLimit, PPSMC_MSG_SetSlowPPTLimit, 0),
+ MSG_MAP(GetFastPPTLimit, PPSMC_MSG_GetFastPPTLimit, 0),
+ MSG_MAP(GetSlowPPTLimit, PPSMC_MSG_GetSlowPPTLimit, 0),
};
static struct cmn2asic_mapping vangogh_feature_mask_map[SMU_FEATURE_COUNT] = {
@@ -162,6 +170,9 @@ static struct cmn2asic_mapping vangogh_feature_mask_map[SMU_FEATURE_COUNT] = {
FEA_MAP(A55_DPM),
FEA_MAP(CVIP_DSP_DPM),
FEA_MAP(MSMU_LOW_POWER),
+ FEA_MAP_REVERSE(SOCCLK),
+ FEA_MAP_REVERSE(FCLK),
+ FEA_MAP_HALF_REVERSE(GFX),
};
static struct cmn2asic_mapping vangogh_table_map[SMU_TABLE_COUNT] = {
@@ -171,6 +182,14 @@ static struct cmn2asic_mapping vangogh_table_map[SMU_TABLE_COUNT] = {
TAB_MAP_VALID(DPMCLOCKS),
};
+static struct cmn2asic_mapping vangogh_workload_map[PP_SMC_POWER_PROFILE_COUNT] = {
+ WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D, WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT),
+ WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT),
+ WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT),
+ WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT),
+ WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT),
+};
+
static int vangogh_tables_init(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
@@ -242,6 +261,12 @@ static int vangogh_get_smu_metrics_data(struct smu_context *smu,
case METRICS_AVERAGE_SOCCLK:
*value = metrics->SocclkFrequency;
break;
+ case METRICS_AVERAGE_VCLK:
+ *value = metrics->VclkFrequency;
+ break;
+ case METRICS_AVERAGE_DCLK:
+ *value = metrics->DclkFrequency;
+ break;
case METRICS_AVERAGE_UCLK:
*value = metrics->MemclkFrequency;
break;
@@ -272,6 +297,10 @@ static int vangogh_get_smu_metrics_data(struct smu_context *smu,
case METRICS_VOLTAGE_VDDSOC:
*value = metrics->Voltage[1];
break;
+ case METRICS_AVERAGE_CPUCLK:
+ memcpy(value, &metrics->CoreFrequency[0],
+ smu->cpu_core_num * sizeof(uint16_t));
+ break;
default:
*value = UINT_MAX;
break;
@@ -308,6 +337,13 @@ static int vangogh_init_smc_tables(struct smu_context *smu)
if (ret)
return ret;
+#ifdef CONFIG_X86
+ /* AMD x86 APU only */
+ smu->cpu_core_num = boot_cpu_data.x86_max_cores;
+#else
+ smu->cpu_core_num = 4;
+#endif
+
return smu_v11_0_init_smc_tables(smu);
}
@@ -317,17 +353,13 @@ static int vangogh_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
if (enable) {
/* vcn dpm on is a prerequisite for vcn power gate messages */
- if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0, NULL);
- if (ret)
- return ret;
- }
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0, NULL);
+ if (ret)
+ return ret;
} else {
- if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownVcn, 0, NULL);
- if (ret)
- return ret;
- }
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownVcn, 0, NULL);
+ if (ret)
+ return ret;
}
return ret;
@@ -338,54 +370,18 @@ static int vangogh_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
int ret = 0;
if (enable) {
- if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0, NULL);
- if (ret)
- return ret;
- }
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0, NULL);
+ if (ret)
+ return ret;
} else {
- if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL);
- if (ret)
- return ret;
- }
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL);
+ if (ret)
+ return ret;
}
return ret;
}
-static int vangogh_get_allowed_feature_mask(struct smu_context *smu,
- uint32_t *feature_mask,
- uint32_t num)
-{
- struct amdgpu_device *adev = smu->adev;
-
- if (num > 2)
- return -EINVAL;
-
- memset(feature_mask, 0, sizeof(uint32_t) * num);
-
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_DPM_BIT)
- | FEATURE_MASK(FEATURE_MP0CLK_DPM_BIT)
- | FEATURE_MASK(FEATURE_DS_SOCCLK_BIT)
- | FEATURE_MASK(FEATURE_PPT_BIT)
- | FEATURE_MASK(FEATURE_TDC_BIT)
- | FEATURE_MASK(FEATURE_FAN_CONTROLLER_BIT)
- | FEATURE_MASK(FEATURE_DS_LCLK_BIT)
- | FEATURE_MASK(FEATURE_DS_DCFCLK_BIT);
-
- if (adev->pm.pp_feature & PP_SOCCLK_DPM_MASK)
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_SOCCLK_DPM_BIT);
-
- if (adev->pm.pp_feature & PP_DCEFCLK_DPM_MASK)
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DCFCLK_DPM_BIT);
-
- if (smu->adev->pg_flags & AMD_PG_SUPPORT_ATHUB)
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_ATHUB_PG_BIT);
-
- return 0;
-}
-
static bool vangogh_is_dpm_running(struct smu_context *smu)
{
int ret = 0;
@@ -403,14 +399,68 @@ static bool vangogh_is_dpm_running(struct smu_context *smu)
return !!(feature_enabled & SMC_DPM_FEATURE);
}
+static int vangogh_get_dpm_clk_limited(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t dpm_level, uint32_t *freq)
+{
+ DpmClocks_t *clk_table = smu->smu_table.clocks_table;
+
+ if (!clk_table || clk_type >= SMU_CLK_COUNT)
+ return -EINVAL;
+
+ switch (clk_type) {
+ case SMU_SOCCLK:
+ if (dpm_level >= clk_table->NumSocClkLevelsEnabled)
+ return -EINVAL;
+ *freq = clk_table->SocClocks[dpm_level];
+ break;
+ case SMU_VCLK:
+ if (dpm_level >= clk_table->VcnClkLevelsEnabled)
+ return -EINVAL;
+ *freq = clk_table->VcnClocks[dpm_level].vclk;
+ break;
+ case SMU_DCLK:
+ if (dpm_level >= clk_table->VcnClkLevelsEnabled)
+ return -EINVAL;
+ *freq = clk_table->VcnClocks[dpm_level].dclk;
+ break;
+ case SMU_UCLK:
+ case SMU_MCLK:
+ if (dpm_level >= clk_table->NumDfPstatesEnabled)
+ return -EINVAL;
+ *freq = clk_table->DfPstateTable[dpm_level].memclk;
+
+ break;
+ case SMU_FCLK:
+ if (dpm_level >= clk_table->NumDfPstatesEnabled)
+ return -EINVAL;
+ *freq = clk_table->DfPstateTable[dpm_level].fclk;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int vangogh_print_fine_grain_clk(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
- int size = 0;
+ DpmClocks_t *clk_table = smu->smu_table.clocks_table;
+ SmuMetrics_t metrics;
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+ int i, size = 0, ret = 0;
+ uint32_t cur_value = 0, value = 0, count = 0;
+ bool cur_value_match_level = false;
+
+ memset(&metrics, 0, sizeof(metrics));
+
+ ret = smu_cmn_get_metrics_table(smu, &metrics, false);
+ if (ret)
+ return ret;
switch (clk_type) {
case SMU_OD_SCLK:
- if (smu->od_enabled) {
+ if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
size = sprintf(buf, "%s:\n", "OD_SCLK");
size += sprintf(buf + size, "0: %10uMhz\n",
(smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
@@ -418,12 +468,71 @@ static int vangogh_print_fine_grain_clk(struct smu_context *smu,
(smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq);
}
break;
+ case SMU_OD_CCLK:
+ if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
+ size = sprintf(buf, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select);
+ size += sprintf(buf + size, "0: %10uMhz\n",
+ (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq);
+ size += sprintf(buf + size, "1: %10uMhz\n",
+ (smu->cpu_actual_soft_max_freq > 0) ? smu->cpu_actual_soft_max_freq : smu->cpu_default_soft_max_freq);
+ }
+ break;
case SMU_OD_RANGE:
- if (smu->od_enabled) {
+ if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
size = sprintf(buf, "%s:\n", "OD_RANGE");
size += sprintf(buf + size, "SCLK: %7uMhz %10uMhz\n",
smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq);
+ size += sprintf(buf + size, "CCLK: %7uMhz %10uMhz\n",
+ smu->cpu_default_soft_min_freq, smu->cpu_default_soft_max_freq);
+ }
+ break;
+ case SMU_SOCCLK:
+ /* the level 3 ~ 6 of socclk use the same frequency for vangogh */
+ count = clk_table->NumSocClkLevelsEnabled;
+ cur_value = metrics.SocclkFrequency;
+ break;
+ case SMU_VCLK:
+ count = clk_table->VcnClkLevelsEnabled;
+ cur_value = metrics.VclkFrequency;
+ break;
+ case SMU_DCLK:
+ count = clk_table->VcnClkLevelsEnabled;
+ cur_value = metrics.DclkFrequency;
+ break;
+ case SMU_MCLK:
+ count = clk_table->NumDfPstatesEnabled;
+ cur_value = metrics.MemclkFrequency;
+ break;
+ case SMU_FCLK:
+ count = clk_table->NumDfPstatesEnabled;
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetFclkFrequency, 0, &cur_value);
+ if (ret)
+ return ret;
+ break;
+ default:
+ break;
+ }
+
+ switch (clk_type) {
+ case SMU_SOCCLK:
+ case SMU_VCLK:
+ case SMU_DCLK:
+ case SMU_MCLK:
+ case SMU_FCLK:
+ for (i = 0; i < count; i++) {
+ ret = vangogh_get_dpm_clk_limited(smu, clk_type, i, &value);
+ if (ret)
+ return ret;
+ if (!value)
+ continue;
+ size += sprintf(buf + size, "%d: %uMhz %s\n", i, value,
+ cur_value == value ? "*" : "");
+ if (cur_value == value)
+ cur_value_match_level = true;
}
+
+ if (!cur_value_match_level)
+ size += sprintf(buf + size, " %uMhz *\n", cur_value);
break;
default:
break;
@@ -432,6 +541,726 @@ static int vangogh_print_fine_grain_clk(struct smu_context *smu,
return size;
}
+static int vangogh_get_profiling_clk_mask(struct smu_context *smu,
+ enum amd_dpm_forced_level level,
+ uint32_t *vclk_mask,
+ uint32_t *dclk_mask,
+ uint32_t *mclk_mask,
+ uint32_t *fclk_mask,
+ uint32_t *soc_mask)
+{
+ DpmClocks_t *clk_table = smu->smu_table.clocks_table;
+
+ if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
+ if (mclk_mask)
+ *mclk_mask = clk_table->NumDfPstatesEnabled - 1;
+
+ if (fclk_mask)
+ *fclk_mask = clk_table->NumDfPstatesEnabled - 1;
+
+ if (soc_mask)
+ *soc_mask = 0;
+ } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+ if (mclk_mask)
+ *mclk_mask = 0;
+
+ if (fclk_mask)
+ *fclk_mask = 0;
+
+ if (soc_mask)
+ *soc_mask = 1;
+
+ if (vclk_mask)
+ *vclk_mask = 1;
+
+ if (dclk_mask)
+ *dclk_mask = 1;
+ } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) {
+ if (mclk_mask)
+ *mclk_mask = 0;
+
+ if (fclk_mask)
+ *fclk_mask = 0;
+
+ if (soc_mask)
+ *soc_mask = 1;
+
+ if (vclk_mask)
+ *vclk_mask = 1;
+
+ if (dclk_mask)
+ *dclk_mask = 1;
+ }
+
+ return 0;
+}
+
+static bool vangogh_clk_dpm_is_enabled(struct smu_context *smu,
+ enum smu_clk_type clk_type)
+{
+ enum smu_feature_mask feature_id = 0;
+
+ switch (clk_type) {
+ case SMU_MCLK:
+ case SMU_UCLK:
+ case SMU_FCLK:
+ feature_id = SMU_FEATURE_DPM_FCLK_BIT;
+ break;
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
+ break;
+ case SMU_SOCCLK:
+ feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
+ break;
+ case SMU_VCLK:
+ case SMU_DCLK:
+ feature_id = SMU_FEATURE_VCN_DPM_BIT;
+ break;
+ default:
+ return true;
+ }
+
+ if (!smu_cmn_feature_is_enabled(smu, feature_id))
+ return false;
+
+ return true;
+}
+
+static int vangogh_get_dpm_ultimate_freq(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *min,
+ uint32_t *max)
+{
+ int ret = 0;
+ uint32_t soc_mask;
+ uint32_t vclk_mask;
+ uint32_t dclk_mask;
+ uint32_t mclk_mask;
+ uint32_t fclk_mask;
+ uint32_t clock_limit;
+
+ if (!vangogh_clk_dpm_is_enabled(smu, clk_type)) {
+ switch (clk_type) {
+ case SMU_MCLK:
+ case SMU_UCLK:
+ clock_limit = smu->smu_table.boot_values.uclk;
+ break;
+ case SMU_FCLK:
+ clock_limit = smu->smu_table.boot_values.fclk;
+ break;
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ clock_limit = smu->smu_table.boot_values.gfxclk;
+ break;
+ case SMU_SOCCLK:
+ clock_limit = smu->smu_table.boot_values.socclk;
+ break;
+ case SMU_VCLK:
+ clock_limit = smu->smu_table.boot_values.vclk;
+ break;
+ case SMU_DCLK:
+ clock_limit = smu->smu_table.boot_values.dclk;
+ break;
+ default:
+ clock_limit = 0;
+ break;
+ }
+
+ /* clock in Mhz unit */
+ if (min)
+ *min = clock_limit / 100;
+ if (max)
+ *max = clock_limit / 100;
+
+ return 0;
+ }
+ if (max) {
+ ret = vangogh_get_profiling_clk_mask(smu,
+ AMD_DPM_FORCED_LEVEL_PROFILE_PEAK,
+ &vclk_mask,
+ &dclk_mask,
+ &mclk_mask,
+ &fclk_mask,
+ &soc_mask);
+ if (ret)
+ goto failed;
+
+ switch (clk_type) {
+ case SMU_UCLK:
+ case SMU_MCLK:
+ ret = vangogh_get_dpm_clk_limited(smu, clk_type, mclk_mask, max);
+ if (ret)
+ goto failed;
+ break;
+ case SMU_SOCCLK:
+ ret = vangogh_get_dpm_clk_limited(smu, clk_type, soc_mask, max);
+ if (ret)
+ goto failed;
+ break;
+ case SMU_FCLK:
+ ret = vangogh_get_dpm_clk_limited(smu, clk_type, fclk_mask, max);
+ if (ret)
+ goto failed;
+ break;
+ case SMU_VCLK:
+ ret = vangogh_get_dpm_clk_limited(smu, clk_type, vclk_mask, max);
+ if (ret)
+ goto failed;
+ break;
+ case SMU_DCLK:
+ ret = vangogh_get_dpm_clk_limited(smu, clk_type, dclk_mask, max);
+ if (ret)
+ goto failed;
+ break;
+ default:
+ ret = -EINVAL;
+ goto failed;
+ }
+ }
+ if (min) {
+ switch (clk_type) {
+ case SMU_UCLK:
+ case SMU_MCLK:
+ ret = vangogh_get_dpm_clk_limited(smu, clk_type, mclk_mask, min);
+ if (ret)
+ goto failed;
+ break;
+ case SMU_SOCCLK:
+ ret = vangogh_get_dpm_clk_limited(smu, clk_type, soc_mask, min);
+ if (ret)
+ goto failed;
+ break;
+ case SMU_FCLK:
+ ret = vangogh_get_dpm_clk_limited(smu, clk_type, fclk_mask, min);
+ if (ret)
+ goto failed;
+ break;
+ case SMU_VCLK:
+ ret = vangogh_get_dpm_clk_limited(smu, clk_type, vclk_mask, min);
+ if (ret)
+ goto failed;
+ break;
+ case SMU_DCLK:
+ ret = vangogh_get_dpm_clk_limited(smu, clk_type, dclk_mask, min);
+ if (ret)
+ goto failed;
+ break;
+ default:
+ ret = -EINVAL;
+ goto failed;
+ }
+ }
+failed:
+ return ret;
+}
+
+static int vangogh_get_power_profile_mode(struct smu_context *smu,
+ char *buf)
+{
+ static const char *profile_name[] = {
+ "BOOTUP_DEFAULT",
+ "3D_FULL_SCREEN",
+ "POWER_SAVING",
+ "VIDEO",
+ "VR",
+ "COMPUTE",
+ "CUSTOM"};
+ uint32_t i, size = 0;
+ int16_t workload_type = 0;
+
+ if (!buf)
+ return -EINVAL;
+
+ for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
+ /*
+ * Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT
+ * Not all profile modes are supported on vangogh.
+ */
+ workload_type = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_WORKLOAD,
+ i);
+
+ if (workload_type < 0)
+ continue;
+
+ size += sprintf(buf + size, "%2d %14s%s\n",
+ i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
+ }
+
+ return size;
+}
+
+static int vangogh_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
+{
+ int workload_type, ret;
+ uint32_t profile_mode = input[size];
+
+ if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
+ dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode);
+ return -EINVAL;
+ }
+
+ if (profile_mode == PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT ||
+ profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING)
+ return 0;
+
+ /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
+ workload_type = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_WORKLOAD,
+ profile_mode);
+ if (workload_type < 0) {
+ dev_dbg(smu->adev->dev, "Unsupported power profile mode %d on VANGOGH\n",
+ profile_mode);
+ return -EINVAL;
+ }
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify,
+ 1 << workload_type,
+ NULL);
+ if (ret) {
+ dev_err_once(smu->adev->dev, "Fail to set workload type %d\n",
+ workload_type);
+ return ret;
+ }
+
+ smu->power_profile_mode = profile_mode;
+
+ return 0;
+}
+
+static int vangogh_set_soft_freq_limited_range(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t min,
+ uint32_t max)
+{
+ int ret = 0;
+
+ if (!vangogh_clk_dpm_is_enabled(smu, clk_type))
+ return 0;
+
+ switch (clk_type) {
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetHardMinGfxClk,
+ min, NULL);
+ if (ret)
+ return ret;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetSoftMaxGfxClk,
+ max, NULL);
+ if (ret)
+ return ret;
+ break;
+ case SMU_FCLK:
+ case SMU_MCLK:
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetHardMinFclkByFreq,
+ min, NULL);
+ if (ret)
+ return ret;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetSoftMaxFclkByFreq,
+ max, NULL);
+ if (ret)
+ return ret;
+ break;
+ case SMU_SOCCLK:
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetHardMinSocclkByFreq,
+ min, NULL);
+ if (ret)
+ return ret;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetSoftMaxSocclkByFreq,
+ max, NULL);
+ if (ret)
+ return ret;
+ break;
+ case SMU_VCLK:
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetHardMinVcn,
+ min << 16, NULL);
+ if (ret)
+ return ret;
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetSoftMaxVcn,
+ max << 16, NULL);
+ if (ret)
+ return ret;
+ break;
+ case SMU_DCLK:
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetHardMinVcn,
+ min, NULL);
+ if (ret)
+ return ret;
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetSoftMaxVcn,
+ max, NULL);
+ if (ret)
+ return ret;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int vangogh_force_clk_levels(struct smu_context *smu,
+ enum smu_clk_type clk_type, uint32_t mask)
+{
+ uint32_t soft_min_level = 0, soft_max_level = 0;
+ uint32_t min_freq = 0, max_freq = 0;
+ int ret = 0 ;
+
+ soft_min_level = mask ? (ffs(mask) - 1) : 0;
+ soft_max_level = mask ? (fls(mask) - 1) : 0;
+
+ switch (clk_type) {
+ case SMU_SOCCLK:
+ ret = vangogh_get_dpm_clk_limited(smu, clk_type,
+ soft_min_level, &min_freq);
+ if (ret)
+ return ret;
+ ret = vangogh_get_dpm_clk_limited(smu, clk_type,
+ soft_max_level, &max_freq);
+ if (ret)
+ return ret;
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetSoftMaxSocclkByFreq,
+ max_freq, NULL);
+ if (ret)
+ return ret;
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetHardMinSocclkByFreq,
+ min_freq, NULL);
+ if (ret)
+ return ret;
+ break;
+ case SMU_MCLK:
+ case SMU_FCLK:
+ ret = vangogh_get_dpm_clk_limited(smu,
+ clk_type, soft_min_level, &min_freq);
+ if (ret)
+ return ret;
+ ret = vangogh_get_dpm_clk_limited(smu,
+ clk_type, soft_max_level, &max_freq);
+ if (ret)
+ return ret;
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetSoftMaxFclkByFreq,
+ max_freq, NULL);
+ if (ret)
+ return ret;
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetHardMinFclkByFreq,
+ min_freq, NULL);
+ if (ret)
+ return ret;
+ break;
+ case SMU_VCLK:
+ ret = vangogh_get_dpm_clk_limited(smu,
+ clk_type, soft_min_level, &min_freq);
+ if (ret)
+ return ret;
+
+ ret = vangogh_get_dpm_clk_limited(smu,
+ clk_type, soft_max_level, &max_freq);
+ if (ret)
+ return ret;
+
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetHardMinVcn,
+ min_freq << 16, NULL);
+ if (ret)
+ return ret;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetSoftMaxVcn,
+ max_freq << 16, NULL);
+ if (ret)
+ return ret;
+
+ break;
+ case SMU_DCLK:
+ ret = vangogh_get_dpm_clk_limited(smu,
+ clk_type, soft_min_level, &min_freq);
+ if (ret)
+ return ret;
+
+ ret = vangogh_get_dpm_clk_limited(smu,
+ clk_type, soft_max_level, &max_freq);
+ if (ret)
+ return ret;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetHardMinVcn,
+ min_freq, NULL);
+ if (ret)
+ return ret;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetSoftMaxVcn,
+ max_freq, NULL);
+ if (ret)
+ return ret;
+
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int vangogh_force_dpm_limit_value(struct smu_context *smu, bool highest)
+{
+ int ret = 0, i = 0;
+ uint32_t min_freq, max_freq, force_freq;
+ enum smu_clk_type clk_type;
+
+ enum smu_clk_type clks[] = {
+ SMU_SOCCLK,
+ SMU_VCLK,
+ SMU_DCLK,
+ SMU_MCLK,
+ SMU_FCLK,
+ };
+
+ for (i = 0; i < ARRAY_SIZE(clks); i++) {
+ clk_type = clks[i];
+ ret = vangogh_get_dpm_ultimate_freq(smu, clk_type, &min_freq, &max_freq);
+ if (ret)
+ return ret;
+
+ force_freq = highest ? max_freq : min_freq;
+ ret = vangogh_set_soft_freq_limited_range(smu, clk_type, force_freq, force_freq);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int vangogh_unforce_dpm_levels(struct smu_context *smu)
+{
+ int ret = 0, i = 0;
+ uint32_t min_freq, max_freq;
+ enum smu_clk_type clk_type;
+
+ struct clk_feature_map {
+ enum smu_clk_type clk_type;
+ uint32_t feature;
+ } clk_feature_map[] = {
+ {SMU_MCLK, SMU_FEATURE_DPM_FCLK_BIT},
+ {SMU_FCLK, SMU_FEATURE_DPM_FCLK_BIT},
+ {SMU_SOCCLK, SMU_FEATURE_DPM_SOCCLK_BIT},
+ {SMU_VCLK, SMU_FEATURE_VCN_DPM_BIT},
+ {SMU_DCLK, SMU_FEATURE_VCN_DPM_BIT},
+ };
+
+ for (i = 0; i < ARRAY_SIZE(clk_feature_map); i++) {
+
+ if (!smu_cmn_feature_is_enabled(smu, clk_feature_map[i].feature))
+ continue;
+
+ clk_type = clk_feature_map[i].clk_type;
+
+ ret = vangogh_get_dpm_ultimate_freq(smu, clk_type, &min_freq, &max_freq);
+
+ if (ret)
+ return ret;
+
+ ret = vangogh_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq);
+
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int vangogh_set_peak_clock_by_device(struct smu_context *smu)
+{
+ int ret = 0;
+ uint32_t socclk_freq = 0, fclk_freq = 0;
+ uint32_t vclk_freq = 0, dclk_freq = 0;
+
+ ret = vangogh_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &fclk_freq);
+ if (ret)
+ return ret;
+
+ ret = vangogh_set_soft_freq_limited_range(smu, SMU_FCLK, fclk_freq, fclk_freq);
+ if (ret)
+ return ret;
+
+ ret = vangogh_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &socclk_freq);
+ if (ret)
+ return ret;
+
+ ret = vangogh_set_soft_freq_limited_range(smu, SMU_SOCCLK, socclk_freq, socclk_freq);
+ if (ret)
+ return ret;
+
+ ret = vangogh_get_dpm_ultimate_freq(smu, SMU_VCLK, NULL, &vclk_freq);
+ if (ret)
+ return ret;
+
+ ret = vangogh_set_soft_freq_limited_range(smu, SMU_VCLK, vclk_freq, vclk_freq);
+ if (ret)
+ return ret;
+
+ ret = vangogh_get_dpm_ultimate_freq(smu, SMU_DCLK, NULL, &dclk_freq);
+ if (ret)
+ return ret;
+
+ ret = vangogh_set_soft_freq_limited_range(smu, SMU_DCLK, dclk_freq, dclk_freq);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int vangogh_set_performance_level(struct smu_context *smu,
+ enum amd_dpm_forced_level level)
+{
+ int ret = 0;
+ uint32_t soc_mask, mclk_mask, fclk_mask;
+ uint32_t vclk_mask = 0, dclk_mask = 0;
+
+ switch (level) {
+ case AMD_DPM_FORCED_LEVEL_HIGH:
+ smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
+ smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
+
+ smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq;
+ smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq;
+
+ ret = vangogh_force_dpm_limit_value(smu, true);
+ break;
+ case AMD_DPM_FORCED_LEVEL_LOW:
+ smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
+ smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
+
+ smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq;
+ smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq;
+
+ ret = vangogh_force_dpm_limit_value(smu, false);
+ break;
+ case AMD_DPM_FORCED_LEVEL_AUTO:
+ smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
+ smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
+
+ smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq;
+ smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq;
+
+ ret = vangogh_unforce_dpm_levels(smu);
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+ smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
+ smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
+
+ smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq;
+ smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetHardMinGfxClk,
+ VANGOGH_UMD_PSTATE_STANDARD_GFXCLK, NULL);
+ if (ret)
+ return ret;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetSoftMaxGfxClk,
+ VANGOGH_UMD_PSTATE_STANDARD_GFXCLK, NULL);
+ if (ret)
+ return ret;
+
+ ret = vangogh_get_profiling_clk_mask(smu, level,
+ &vclk_mask,
+ &dclk_mask,
+ &mclk_mask,
+ &fclk_mask,
+ &soc_mask);
+ if (ret)
+ return ret;
+
+ vangogh_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask);
+ vangogh_force_clk_levels(smu, SMU_FCLK, 1 << fclk_mask);
+ vangogh_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask);
+ vangogh_force_clk_levels(smu, SMU_VCLK, 1 << vclk_mask);
+ vangogh_force_clk_levels(smu, SMU_DCLK, 1 << dclk_mask);
+
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+ smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
+ smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
+
+ smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq;
+ smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinVcn,
+ VANGOGH_UMD_PSTATE_PEAK_DCLK, NULL);
+ if (ret)
+ return ret;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxVcn,
+ VANGOGH_UMD_PSTATE_PEAK_DCLK, NULL);
+ if (ret)
+ return ret;
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+ smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
+ smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
+
+ smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq;
+ smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq;
+
+ ret = vangogh_get_profiling_clk_mask(smu, level,
+ NULL,
+ NULL,
+ &mclk_mask,
+ &fclk_mask,
+ NULL);
+ if (ret)
+ return ret;
+
+ vangogh_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask);
+ vangogh_force_clk_levels(smu, SMU_FCLK, 1 << fclk_mask);
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+ smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
+ smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
+
+ smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq;
+ smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
+ VANGOGH_UMD_PSTATE_PEAK_GFXCLK, NULL);
+ if (ret)
+ return ret;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
+ VANGOGH_UMD_PSTATE_PEAK_GFXCLK, NULL);
+ if (ret)
+ return ret;
+
+ ret = vangogh_set_peak_clock_by_device(smu);
+ break;
+ case AMD_DPM_FORCED_LEVEL_MANUAL:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
+ default:
+ break;
+ }
+ return ret;
+}
+
static int vangogh_read_sensor(struct smu_context *smu,
enum amd_pp_sensors sensor,
void *data, uint32_t *size)
@@ -493,6 +1322,12 @@ static int vangogh_read_sensor(struct smu_context *smu,
(uint32_t *)data);
*size = 4;
break;
+ case AMDGPU_PP_SENSOR_CPU_CLK:
+ ret = vangogh_get_smu_metrics_data(smu,
+ METRICS_AVERAGE_CPUCLK,
+ (uint32_t *)data);
+ *size = smu->cpu_core_num * sizeof(uint16_t);
+ break;
default:
ret = -EOPNOTSUPP;
break;
@@ -514,7 +1349,7 @@ static int vangogh_set_watermarks_table(struct smu_context *smu,
if (clock_ranges) {
if (clock_ranges->num_reader_wm_sets > NUM_WM_RANGES ||
- clock_ranges->num_writer_wm_sets > NUM_WM_RANGES)
+ clock_ranges->num_writer_wm_sets > NUM_WM_RANGES)
return -EINVAL;
for (i = 0; i < clock_ranges->num_reader_wm_sets; i++) {
@@ -575,7 +1410,7 @@ static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu,
if (ret)
return ret;
- smu_v11_0_init_gpu_metrics_v2_0(gpu_metrics);
+ smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 0);
gpu_metrics->temperature_gfx = metrics.GfxTemperature;
gpu_metrics->temperature_soc = metrics.SocTemperature;
@@ -591,14 +1426,17 @@ static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->average_socket_power = metrics.CurrentSocketPower;
gpu_metrics->average_cpu_power = metrics.Power[0];
gpu_metrics->average_soc_power = metrics.Power[1];
+ gpu_metrics->average_gfx_power = metrics.Power[2];
memcpy(&gpu_metrics->average_core_power[0],
&metrics.CorePower[0],
sizeof(uint16_t) * 8);
gpu_metrics->average_gfxclk_frequency = metrics.GfxclkFrequency;
gpu_metrics->average_socclk_frequency = metrics.SocclkFrequency;
+ gpu_metrics->average_uclk_frequency = metrics.MemclkFrequency;
gpu_metrics->average_fclk_frequency = metrics.MemclkFrequency;
gpu_metrics->average_vclk_frequency = metrics.VclkFrequency;
+ gpu_metrics->average_dclk_frequency = metrics.DclkFrequency;
memcpy(&gpu_metrics->current_coreclk[0],
&metrics.CoreFrequency[0],
@@ -608,22 +1446,55 @@ static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->throttle_status = metrics.ThrottlerStatus;
+ gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
+
*table = (void *)gpu_metrics;
return sizeof(struct gpu_metrics_v2_0);
}
static int vangogh_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type,
- long input[], uint32_t size)
+ long input[], uint32_t size)
{
int ret = 0;
+ int i;
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
- if (!smu->od_enabled) {
- dev_warn(smu->adev->dev, "Fine grain is not enabled!\n");
+ if (!(smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)) {
+ dev_warn(smu->adev->dev,
+ "pp_od_clk_voltage is not accessible if power_dpm_force_perfomance_level is not in manual mode!\n");
return -EINVAL;
}
switch (type) {
+ case PP_OD_EDIT_CCLK_VDDC_TABLE:
+ if (size != 3) {
+ dev_err(smu->adev->dev, "Input parameter number not correct (should be 4 for processor)\n");
+ return -EINVAL;
+ }
+ if (input[0] >= smu->cpu_core_num) {
+ dev_err(smu->adev->dev, "core index is overflow, should be less than %d\n",
+ smu->cpu_core_num);
+ }
+ smu->cpu_core_id_select = input[0];
+ if (input[1] == 0) {
+ if (input[2] < smu->cpu_default_soft_min_freq) {
+ dev_warn(smu->adev->dev, "Fine grain setting minimum cclk (%ld) MHz is less than the minimum allowed (%d) MHz\n",
+ input[2], smu->cpu_default_soft_min_freq);
+ return -EINVAL;
+ }
+ smu->cpu_actual_soft_min_freq = input[2];
+ } else if (input[1] == 1) {
+ if (input[2] > smu->cpu_default_soft_max_freq) {
+ dev_warn(smu->adev->dev, "Fine grain setting maximum cclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n",
+ input[2], smu->cpu_default_soft_max_freq);
+ return -EINVAL;
+ }
+ smu->cpu_actual_soft_max_freq = input[2];
+ } else {
+ return -EINVAL;
+ }
+ break;
case PP_OD_EDIT_SCLK_VDDC_TABLE:
if (size != 2) {
dev_err(smu->adev->dev, "Input parameter number not correct\n");
@@ -632,14 +1503,16 @@ static int vangogh_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TAB
if (input[0] == 0) {
if (input[1] < smu->gfx_default_hard_min_freq) {
- dev_warn(smu->adev->dev, "Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n",
+ dev_warn(smu->adev->dev,
+ "Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n",
input[1], smu->gfx_default_hard_min_freq);
return -EINVAL;
}
smu->gfx_actual_hard_min_freq = input[1];
} else if (input[0] == 1) {
if (input[1] > smu->gfx_default_soft_max_freq) {
- dev_warn(smu->adev->dev, "Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n",
+ dev_warn(smu->adev->dev,
+ "Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n",
input[1], smu->gfx_default_soft_max_freq);
return -EINVAL;
}
@@ -655,6 +1528,8 @@ static int vangogh_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TAB
} else {
smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
+ smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq;
+ smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq;
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
smu->gfx_actual_hard_min_freq, NULL);
@@ -669,6 +1544,29 @@ static int vangogh_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TAB
dev_err(smu->adev->dev, "Restore the default soft max sclk failed!");
return ret;
}
+
+ if (smu->adev->pm.fw_version < 0x43f1b00) {
+ dev_warn(smu->adev->dev, "CPUSoftMax/CPUSoftMin are not supported, please update SBIOS!\n");
+ break;
+ }
+
+ for (i = 0; i < smu->cpu_core_num; i++) {
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinCclk,
+ (i << 20) | smu->cpu_actual_soft_min_freq,
+ NULL);
+ if (ret) {
+ dev_err(smu->adev->dev, "Set hard min cclk failed!");
+ return ret;
+ }
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxCclk,
+ (i << 20) | smu->cpu_actual_soft_max_freq,
+ NULL);
+ if (ret) {
+ dev_err(smu->adev->dev, "Set soft max cclk failed!");
+ return ret;
+ }
+ }
}
break;
case PP_OD_COMMIT_DPM_TABLE:
@@ -677,8 +1575,10 @@ static int vangogh_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TAB
return -EINVAL;
} else {
if (smu->gfx_actual_hard_min_freq > smu->gfx_actual_soft_max_freq) {
- dev_err(smu->adev->dev, "The setting minimun sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
- smu->gfx_actual_hard_min_freq, smu->gfx_actual_soft_max_freq);
+ dev_err(smu->adev->dev,
+ "The setting minimun sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
+ smu->gfx_actual_hard_min_freq,
+ smu->gfx_actual_soft_max_freq);
return -EINVAL;
}
@@ -695,6 +1595,29 @@ static int vangogh_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TAB
dev_err(smu->adev->dev, "Set soft max sclk failed!");
return ret;
}
+
+ if (smu->adev->pm.fw_version < 0x43f1b00) {
+ dev_warn(smu->adev->dev, "CPUSoftMax/CPUSoftMin are not supported, please update SBIOS!\n");
+ break;
+ }
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinCclk,
+ ((smu->cpu_core_id_select << 20)
+ | smu->cpu_actual_soft_min_freq),
+ NULL);
+ if (ret) {
+ dev_err(smu->adev->dev, "Set hard min cclk failed!");
+ return ret;
+ }
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxCclk,
+ ((smu->cpu_core_id_select << 20)
+ | smu->cpu_actual_soft_max_freq),
+ NULL);
+ if (ret) {
+ dev_err(smu->adev->dev, "Set soft max cclk failed!");
+ return ret;
+ }
}
break;
default:
@@ -720,18 +1643,245 @@ static int vangogh_set_fine_grain_gfx_freq_parameters(struct smu_context *smu)
smu->gfx_actual_hard_min_freq = 0;
smu->gfx_actual_soft_max_freq = 0;
+ smu->cpu_default_soft_min_freq = 1400;
+ smu->cpu_default_soft_max_freq = 3500;
+ smu->cpu_actual_soft_min_freq = 0;
+ smu->cpu_actual_soft_max_freq = 0;
+
+ return 0;
+}
+
+static int vangogh_get_dpm_clock_table(struct smu_context *smu, struct dpm_clocks *clock_table)
+{
+ DpmClocks_t *table = smu->smu_table.clocks_table;
+ int i;
+
+ if (!clock_table || !table)
+ return -EINVAL;
+
+ for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++) {
+ clock_table->SocClocks[i].Freq = table->SocClocks[i];
+ clock_table->SocClocks[i].Vol = table->SocVoltage[i];
+ }
+
+ for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) {
+ clock_table->FClocks[i].Freq = table->DfPstateTable[i].fclk;
+ clock_table->FClocks[i].Vol = table->DfPstateTable[i].voltage;
+ }
+
+ for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) {
+ clock_table->MemClocks[i].Freq = table->DfPstateTable[i].memclk;
+ clock_table->MemClocks[i].Vol = table->DfPstateTable[i].voltage;
+ }
+
return 0;
}
+
static int vangogh_system_features_control(struct smu_context *smu, bool en)
{
struct amdgpu_device *adev = smu->adev;
+ struct smu_feature *feature = &smu->smu_feature;
+ uint32_t feature_mask[2];
+ int ret = 0;
+
+ if (adev->pm.fw_version >= 0x43f1700 && !en)
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_RlcPowerNotify,
+ RLC_STATUS_OFF, NULL);
+
+ bitmap_zero(feature->enabled, feature->feature_num);
+ bitmap_zero(feature->supported, feature->feature_num);
+
+ if (!en)
+ return ret;
+
+ ret = smu_cmn_get_enabled_32_bits_mask(smu, feature_mask, 2);
+ if (ret)
+ return ret;
+
+ bitmap_copy(feature->enabled, (unsigned long *)&feature_mask,
+ feature->feature_num);
+ bitmap_copy(feature->supported, (unsigned long *)&feature_mask,
+ feature->feature_num);
+
+ return 0;
+}
- if (adev->pm.fw_version >= 0x43f1700)
- return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_RlcPowerNotify,
- en ? RLC_STATUS_NORMAL : RLC_STATUS_OFF, NULL);
- else
+static int vangogh_post_smu_init(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t tmp;
+ int ret = 0;
+ uint8_t aon_bits = 0;
+ /* Two CUs in one WGP */
+ uint32_t req_active_wgps = adev->gfx.cu_info.number/2;
+ uint32_t total_cu = adev->gfx.config.max_cu_per_sh *
+ adev->gfx.config.max_sh_per_se * adev->gfx.config.max_shader_engines;
+
+ /* allow message will be sent after enable message on Vangogh*/
+ if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_GFXCLK_BIT) &&
+ (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_EnableGfxOff, NULL);
+ if (ret) {
+ dev_err(adev->dev, "Failed to Enable GfxOff!\n");
+ return ret;
+ }
+ } else {
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
+ dev_info(adev->dev, "If GFX DPM or power gate disabled, disable GFXOFF\n");
+ }
+
+ /* if all CUs are active, no need to power off any WGPs */
+ if (total_cu == adev->gfx.cu_info.number)
return 0;
+
+ /*
+ * Calculate the total bits number of always on WGPs for all SA/SEs in
+ * RLC_PG_ALWAYS_ON_WGP_MASK.
+ */
+ tmp = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_ALWAYS_ON_WGP_MASK));
+ tmp &= RLC_PG_ALWAYS_ON_WGP_MASK__AON_WGP_MASK_MASK;
+
+ aon_bits = hweight32(tmp) * adev->gfx.config.max_sh_per_se * adev->gfx.config.max_shader_engines;
+
+ /* Do not request any WGPs less than set in the AON_WGP_MASK */
+ if (aon_bits > req_active_wgps) {
+ dev_info(adev->dev, "Number of always on WGPs greater than active WGPs: WGP power save not requested.\n");
+ return 0;
+ } else {
+ return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_RequestActiveWgp, req_active_wgps, NULL);
+ }
+}
+
+static int vangogh_mode_reset(struct smu_context *smu, int type)
+{
+ int ret = 0, index = 0;
+
+ index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
+ SMU_MSG_GfxDeviceDriverReset);
+ if (index < 0)
+ return index == -EACCES ? 0 : index;
+
+ mutex_lock(&smu->message_lock);
+
+ ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index, type);
+
+ mutex_unlock(&smu->message_lock);
+
+ mdelay(10);
+
+ return ret;
+}
+
+static int vangogh_mode2_reset(struct smu_context *smu)
+{
+ return vangogh_mode_reset(smu, SMU_RESET_MODE_2);
+}
+
+static int vangogh_get_power_limit(struct smu_context *smu)
+{
+ struct smu_11_5_power_context *power_context =
+ smu->smu_power.power_context;
+ uint32_t ppt_limit;
+ int ret = 0;
+
+ if (smu->adev->pm.fw_version < 0x43f1e00)
+ return ret;
+
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetSlowPPTLimit, &ppt_limit);
+ if (ret) {
+ dev_err(smu->adev->dev, "Get slow PPT limit failed!\n");
+ return ret;
+ }
+ /* convert from milliwatt to watt */
+ smu->current_power_limit = ppt_limit / 1000;
+ smu->max_power_limit = 29;
+
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetFastPPTLimit, &ppt_limit);
+ if (ret) {
+ dev_err(smu->adev->dev, "Get fast PPT limit failed!\n");
+ return ret;
+ }
+ /* convert from milliwatt to watt */
+ power_context->current_fast_ppt_limit = ppt_limit / 1000;
+ power_context->max_fast_ppt_limit = 30;
+
+ return ret;
+}
+
+static int vangogh_get_ppt_limit(struct smu_context *smu,
+ uint32_t *ppt_limit,
+ enum smu_ppt_limit_type type,
+ enum smu_ppt_limit_level level)
+{
+ struct smu_11_5_power_context *power_context =
+ smu->smu_power.power_context;
+
+ if (!power_context)
+ return -EOPNOTSUPP;
+
+ if (type == SMU_FAST_PPT_LIMIT) {
+ switch (level) {
+ case SMU_PPT_LIMIT_MAX:
+ *ppt_limit = power_context->max_fast_ppt_limit;
+ break;
+ case SMU_PPT_LIMIT_CURRENT:
+ *ppt_limit = power_context->current_fast_ppt_limit;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int vangogh_set_power_limit(struct smu_context *smu, uint32_t ppt_limit)
+{
+ struct smu_11_5_power_context *power_context =
+ smu->smu_power.power_context;
+ uint32_t limit_type = ppt_limit >> 24;
+ int ret = 0;
+
+ if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
+ dev_err(smu->adev->dev, "Setting new power limit is not supported!\n");
+ return -EOPNOTSUPP;
+ }
+
+ switch (limit_type) {
+ case SMU_DEFAULT_PPT_LIMIT:
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetSlowPPTLimit,
+ ppt_limit * 1000, /* convert from watt to milliwatt */
+ NULL);
+ if (ret)
+ return ret;
+
+ smu->current_power_limit = ppt_limit;
+ break;
+ case SMU_FAST_PPT_LIMIT:
+ ppt_limit &= ~(SMU_FAST_PPT_LIMIT << 24);
+ if (ppt_limit > power_context->max_fast_ppt_limit) {
+ dev_err(smu->adev->dev,
+ "New power limit (%d) is over the max allowed %d\n",
+ ppt_limit, power_context->max_fast_ppt_limit);
+ return ret;
+ }
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetFastPPTLimit,
+ ppt_limit * 1000, /* convert from watt to milliwatt */
+ NULL);
+ if (ret)
+ return ret;
+
+ power_context->current_fast_ppt_limit = ppt_limit;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
}
static const struct pptable_funcs vangogh_ppt_funcs = {
@@ -743,7 +1893,6 @@ static const struct pptable_funcs vangogh_ppt_funcs = {
.init_power = smu_v11_0_init_power,
.fini_power = smu_v11_0_fini_power,
.register_irq_handler = smu_v11_0_register_irq_handler,
- .get_allowed_feature_mask = vangogh_get_allowed_feature_mask,
.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
.send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param,
.send_smc_msg = smu_cmn_send_smc_msg,
@@ -762,6 +1911,18 @@ static const struct pptable_funcs vangogh_ppt_funcs = {
.set_default_dpm_table = vangogh_set_default_dpm_tables,
.set_fine_grain_gfx_freq_parameters = vangogh_set_fine_grain_gfx_freq_parameters,
.system_features_control = vangogh_system_features_control,
+ .feature_is_enabled = smu_cmn_feature_is_enabled,
+ .set_power_profile_mode = vangogh_set_power_profile_mode,
+ .get_power_profile_mode = vangogh_get_power_profile_mode,
+ .get_dpm_clock_table = vangogh_get_dpm_clock_table,
+ .force_clk_levels = vangogh_force_clk_levels,
+ .set_performance_level = vangogh_set_performance_level,
+ .post_init = vangogh_post_smu_init,
+ .mode2_reset = vangogh_mode2_reset,
+ .gfx_off_control = smu_v11_0_gfx_off_control,
+ .get_ppt_limit = vangogh_get_ppt_limit,
+ .get_power_limit = vangogh_get_power_limit,
+ .set_power_limit = vangogh_set_power_limit,
};
void vangogh_set_ppt_funcs(struct smu_context *smu)
@@ -770,5 +1931,6 @@ void vangogh_set_ppt_funcs(struct smu_context *smu)
smu->message_map = vangogh_message_map;
smu->feature_map = vangogh_feature_mask_map;
smu->table_map = vangogh_table_map;
+ smu->workload_map = vangogh_workload_map;
smu->is_apu = true;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.h
index eab455493076..c56d4583dc72 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.h
@@ -28,9 +28,29 @@
extern void vangogh_set_ppt_funcs(struct smu_context *smu);
/* UMD PState Vangogh Msg Parameters in MHz */
-#define VANGOGH_UMD_PSTATE_GFXCLK 700
-#define VANGOGH_UMD_PSTATE_SOCCLK 678
-#define VANGOGH_UMD_PSTATE_FCLK 800
+#define VANGOGH_UMD_PSTATE_STANDARD_GFXCLK 1100
+#define VANGOGH_UMD_PSTATE_STANDARD_SOCCLK 600
+#define VANGOGH_UMD_PSTATE_STANDARD_FCLK 800
+#define VANGOGH_UMD_PSTATE_STANDARD_VCLK 705
+#define VANGOGH_UMD_PSTATE_STANDARD_DCLK 600
+
+#define VANGOGH_UMD_PSTATE_PEAK_GFXCLK 1300
+#define VANGOGH_UMD_PSTATE_PEAK_SOCCLK 600
+#define VANGOGH_UMD_PSTATE_PEAK_FCLK 800
+#define VANGOGH_UMD_PSTATE_PEAK_VCLK 705
+#define VANGOGH_UMD_PSTATE_PEAK_DCLK 600
+
+#define VANGOGH_UMD_PSTATE_MIN_SCLK_GFXCLK 400
+#define VANGOGH_UMD_PSTATE_MIN_SCLK_SOCCLK 1000
+#define VANGOGH_UMD_PSTATE_MIN_SCLK_FCLK 800
+#define VANGOGH_UMD_PSTATE_MIN_SCLK_VCLK 1000
+#define VANGOGH_UMD_PSTATE_MIN_SCLK_DCLK 800
+
+#define VANGOGH_UMD_PSTATE_MIN_MCLK_GFXCLK 1100
+#define VANGOGH_UMD_PSTATE_MIN_MCLK_SOCCLK 1000
+#define VANGOGH_UMD_PSTATE_MIN_MCLK_FCLK 400
+#define VANGOGH_UMD_PSTATE_MIN_MCLK_VCLK 1000
+#define VANGOGH_UMD_PSTATE_MIN_MCLK_DCLK 800
/* RLC Power Status */
#define RLC_STATUS_OFF 0
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
index 9a9697038016..5493388fcb10 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
@@ -56,8 +56,6 @@ static struct cmn2asic_msg_mapping renoir_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(PowerUpSdma, PPSMC_MSG_PowerUpSdma, 1),
MSG_MAP(SetHardMinIspclkByFreq, PPSMC_MSG_SetHardMinIspclkByFreq, 1),
MSG_MAP(SetHardMinVcn, PPSMC_MSG_SetHardMinVcn, 1),
- MSG_MAP(Spare1, PPSMC_MSG_spare1, 1),
- MSG_MAP(Spare2, PPSMC_MSG_spare2, 1),
MSG_MAP(SetAllowFclkSwitch, PPSMC_MSG_SetAllowFclkSwitch, 1),
MSG_MAP(SetMinVideoGfxclkFreq, PPSMC_MSG_SetMinVideoGfxclkFreq, 1),
MSG_MAP(ActiveProcessNotify, PPSMC_MSG_ActiveProcessNotify, 1),
@@ -344,12 +342,142 @@ failed:
return ret;
}
+static int renoir_od_edit_dpm_table(struct smu_context *smu,
+ enum PP_OD_DPM_TABLE_COMMAND type,
+ long input[], uint32_t size)
+{
+ int ret = 0;
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+
+ if (!(smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)) {
+ dev_warn(smu->adev->dev,
+ "pp_od_clk_voltage is not accessible if power_dpm_force_perfomance_level is not in manual mode!\n");
+ return -EINVAL;
+ }
+
+ switch (type) {
+ case PP_OD_EDIT_SCLK_VDDC_TABLE:
+ if (size != 2) {
+ dev_err(smu->adev->dev, "Input parameter number not correct\n");
+ return -EINVAL;
+ }
+
+ if (input[0] == 0) {
+ if (input[1] < smu->gfx_default_hard_min_freq) {
+ dev_warn(smu->adev->dev,
+ "Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n",
+ input[1], smu->gfx_default_hard_min_freq);
+ return -EINVAL;
+ }
+ smu->gfx_actual_hard_min_freq = input[1];
+ } else if (input[0] == 1) {
+ if (input[1] > smu->gfx_default_soft_max_freq) {
+ dev_warn(smu->adev->dev,
+ "Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n",
+ input[1], smu->gfx_default_soft_max_freq);
+ return -EINVAL;
+ }
+ smu->gfx_actual_soft_max_freq = input[1];
+ } else {
+ return -EINVAL;
+ }
+ break;
+ case PP_OD_RESTORE_DEFAULT_TABLE:
+ if (size != 0) {
+ dev_err(smu->adev->dev, "Input parameter number not correct\n");
+ return -EINVAL;
+ }
+ smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
+ smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetHardMinGfxClk,
+ smu->gfx_actual_hard_min_freq,
+ NULL);
+ if (ret) {
+ dev_err(smu->adev->dev, "Restore the default hard min sclk failed!");
+ return ret;
+ }
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetSoftMaxGfxClk,
+ smu->gfx_actual_soft_max_freq,
+ NULL);
+ if (ret) {
+ dev_err(smu->adev->dev, "Restore the default soft max sclk failed!");
+ return ret;
+ }
+ break;
+ case PP_OD_COMMIT_DPM_TABLE:
+ if (size != 0) {
+ dev_err(smu->adev->dev, "Input parameter number not correct\n");
+ return -EINVAL;
+ } else {
+ if (smu->gfx_actual_hard_min_freq > smu->gfx_actual_soft_max_freq) {
+ dev_err(smu->adev->dev,
+ "The setting minimun sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
+ smu->gfx_actual_hard_min_freq,
+ smu->gfx_actual_soft_max_freq);
+ return -EINVAL;
+ }
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetHardMinGfxClk,
+ smu->gfx_actual_hard_min_freq,
+ NULL);
+ if (ret) {
+ dev_err(smu->adev->dev, "Set hard min sclk failed!");
+ return ret;
+ }
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetSoftMaxGfxClk,
+ smu->gfx_actual_soft_max_freq,
+ NULL);
+ if (ret) {
+ dev_err(smu->adev->dev, "Set soft max sclk failed!");
+ return ret;
+ }
+ }
+ break;
+ default:
+ return -ENOSYS;
+ }
+
+ return ret;
+}
+
+static int renoir_set_fine_grain_gfx_freq_parameters(struct smu_context *smu)
+{
+ uint32_t min = 0, max = 0;
+ uint32_t ret = 0;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetMinGfxclkFrequency,
+ 0, &min);
+ if (ret)
+ return ret;
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetMaxGfxclkFrequency,
+ 0, &max);
+ if (ret)
+ return ret;
+
+ smu->gfx_default_hard_min_freq = min;
+ smu->gfx_default_soft_max_freq = max;
+ smu->gfx_actual_hard_min_freq = 0;
+ smu->gfx_actual_soft_max_freq = 0;
+
+ return 0;
+}
+
static int renoir_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
int i, size = 0, ret = 0;
uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0;
SmuMetrics_t metrics;
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
bool cur_value_match_level = false;
memset(&metrics, 0, sizeof(metrics));
@@ -359,6 +487,30 @@ static int renoir_print_clk_levels(struct smu_context *smu,
return ret;
switch (clk_type) {
+ case SMU_OD_RANGE:
+ if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetMinGfxclkFrequency,
+ 0, &min);
+ if (ret)
+ return ret;
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetMaxGfxclkFrequency,
+ 0, &max);
+ if (ret)
+ return ret;
+ size += sprintf(buf + size, "OD_RANGE\nSCLK: %10uMhz %10uMhz\n", min, max);
+ }
+ break;
+ case SMU_OD_SCLK:
+ if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
+ min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq;
+ max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq;
+ size += sprintf(buf + size, "OD_SCLK\n");
+ size += sprintf(buf + size, "0:%10uMhz\n", min);
+ size += sprintf(buf + size, "1:%10uMhz\n", max);
+ }
+ break;
case SMU_GFXCLK:
case SMU_SCLK:
/* retirve table returned paramters unit is MHz */
@@ -399,23 +551,35 @@ static int renoir_print_clk_levels(struct smu_context *smu,
cur_value = metrics.ClockFrequency[CLOCK_FCLK];
break;
default:
- return -EINVAL;
+ break;
}
- for (i = 0; i < count; i++) {
- ret = renoir_get_dpm_clk_limited(smu, clk_type, i, &value);
- if (ret)
- return ret;
- if (!value)
- continue;
- size += sprintf(buf + size, "%d: %uMhz %s\n", i, value,
- cur_value == value ? "*" : "");
- if (cur_value == value)
- cur_value_match_level = true;
- }
+ switch (clk_type) {
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ case SMU_SOCCLK:
+ case SMU_MCLK:
+ case SMU_DCEFCLK:
+ case SMU_FCLK:
+ for (i = 0; i < count; i++) {
+ ret = renoir_get_dpm_clk_limited(smu, clk_type, i, &value);
+ if (ret)
+ return ret;
+ if (!value)
+ continue;
+ size += sprintf(buf + size, "%d: %uMhz %s\n", i, value,
+ cur_value == value ? "*" : "");
+ if (cur_value == value)
+ cur_value_match_level = true;
+ }
+
+ if (!cur_value_match_level)
+ size += sprintf(buf + size, " %uMhz *\n", cur_value);
- if (!cur_value_match_level)
- size += sprintf(buf + size, " %uMhz *\n", cur_value);
+ break;
+ default:
+ break;
+ }
return size;
}
@@ -667,6 +831,10 @@ static int renoir_set_power_profile_mode(struct smu_context *smu, long *input, u
return -EINVAL;
}
+ if (profile_mode == PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT ||
+ profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING)
+ return 0;
+
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
workload_type = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_WORKLOAD,
@@ -676,7 +844,7 @@ static int renoir_set_power_profile_mode(struct smu_context *smu, long *input, u
* TODO: If some case need switch to powersave/default power mode
* then can consider enter WORKLOAD_COMPUTE/WORKLOAD_CUSTOM for power saving.
*/
- dev_err_once(smu->adev->dev, "Unsupported power profile mode %d on RENOIR\n", profile_mode);
+ dev_dbg(smu->adev->dev, "Unsupported power profile mode %d on RENOIR\n", profile_mode);
return -EINVAL;
}
@@ -725,15 +893,27 @@ static int renoir_set_performance_level(struct smu_context *smu,
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
+ smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
+ smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
+
ret = renoir_force_dpm_limit_value(smu, true);
break;
case AMD_DPM_FORCED_LEVEL_LOW:
+ smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
+ smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
+
ret = renoir_force_dpm_limit_value(smu, false);
break;
case AMD_DPM_FORCED_LEVEL_AUTO:
+ smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
+ smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
+
ret = renoir_unforce_dpm_levels(smu);
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+ smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
+ smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
+
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetHardMinGfxClk,
RENOIR_UMD_PSTATE_GFXCLK,
@@ -786,6 +966,9 @@ static int renoir_set_performance_level(struct smu_context *smu,
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+ smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
+ smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
+
ret = renoir_get_profiling_clk_mask(smu, level,
&sclk_mask,
&mclk_mask,
@@ -797,6 +980,9 @@ static int renoir_set_performance_level(struct smu_context *smu,
renoir_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask);
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+ smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
+ smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
+
ret = renoir_set_peak_clock_by_device(smu);
break;
case AMD_DPM_FORCED_LEVEL_MANUAL:
@@ -944,7 +1130,7 @@ static int renoir_get_smu_metrics_data(struct smu_context *smu,
*value = metrics->AverageUvdActivity / 100;
break;
case METRICS_AVERAGE_SOCKETPOWER:
- *value = metrics->CurrentSocketPower << 8;
+ *value = (metrics->CurrentSocketPower << 8) / 1000;
break;
case METRICS_TEMPERATURE_EDGE:
*value = (metrics->GfxTemperature / 100) *
@@ -1072,7 +1258,7 @@ static ssize_t renoir_get_gpu_metrics(struct smu_context *smu,
if (ret)
return ret;
- smu_v12_0_init_gpu_metrics_v2_0(gpu_metrics);
+ smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 0);
gpu_metrics->temperature_gfx = metrics.GfxTemperature;
gpu_metrics->temperature_soc = metrics.SocTemperature;
@@ -1113,6 +1299,8 @@ static ssize_t renoir_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->fan_pwm = metrics.FanPwm;
+ gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
+
*table = (void *)gpu_metrics;
return sizeof(struct gpu_metrics_v2_0);
@@ -1160,6 +1348,8 @@ static const struct pptable_funcs renoir_ppt_funcs = {
.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
.get_gpu_metrics = renoir_get_gpu_metrics,
.gfx_state_change_set = renoir_gfx_state_change_set,
+ .set_fine_grain_gfx_freq_parameters = renoir_set_fine_grain_gfx_freq_parameters,
+ .od_edit_dpm_table = renoir_od_edit_dpm_table,
};
void renoir_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
index 06abf2a7ce9e..6cc4855c8a37 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
@@ -278,15 +278,3 @@ int smu_v12_0_set_driver_table_location(struct smu_context *smu)
return ret;
}
-
-void smu_v12_0_init_gpu_metrics_v2_0(struct gpu_metrics_v2_0 *gpu_metrics)
-{
- memset(gpu_metrics, 0xFF, sizeof(struct gpu_metrics_v2_0));
-
- gpu_metrics->common_header.structure_size =
- sizeof(struct gpu_metrics_v2_0);
- gpu_metrics->common_header.format_revision = 2;
- gpu_metrics->common_header.content_revision = 0;
-
- gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
-}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index f8260769061c..bcedd4d92e35 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -68,14 +68,6 @@ static const char *smu_get_message_name(struct smu_context *smu,
return __smu_message_names[type];
}
-static void smu_cmn_send_msg_without_waiting(struct smu_context *smu,
- uint16_t msg)
-{
- struct amdgpu_device *adev = smu->adev;
-
- WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
-}
-
static void smu_cmn_read_arg(struct smu_context *smu,
uint32_t *arg)
{
@@ -92,7 +84,7 @@ static int smu_cmn_wait_for_response(struct smu_context *smu)
for (i = 0; i < timeout; i++) {
cur_value = RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90);
if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0)
- return cur_value == 0x1 ? 0 : -EIO;
+ return cur_value;
udelay(1);
}
@@ -101,7 +93,29 @@ static int smu_cmn_wait_for_response(struct smu_context *smu)
if (i == timeout)
return -ETIME;
- return RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO;
+ return RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90);
+}
+
+int smu_cmn_send_msg_without_waiting(struct smu_context *smu,
+ uint16_t msg, uint32_t param)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int ret;
+
+ ret = smu_cmn_wait_for_response(smu);
+ if (ret != 0x1) {
+ dev_err(adev->dev, "Msg issuing pre-check failed and "
+ "SMU may be not in the right state!\n");
+ if (ret != -ETIME)
+ ret = -EIO;
+ return ret;
+ }
+
+ WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
+ WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
+ WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
+
+ return 0;
}
int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,
@@ -122,29 +136,28 @@ int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,
return index == -EACCES ? 0 : index;
mutex_lock(&smu->message_lock);
- ret = smu_cmn_wait_for_response(smu);
- if (ret) {
- dev_err(adev->dev, "Msg issuing pre-check failed and "
- "SMU may be not in the right state!\n");
+ ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index, param);
+ if (ret)
goto out;
- }
-
- WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
-
- WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
-
- smu_cmn_send_msg_without_waiting(smu, (uint16_t)index);
ret = smu_cmn_wait_for_response(smu);
- if (ret) {
- dev_err(adev->dev, "failed send message: %10s (%d) \tparam: 0x%08x response %#x\n",
- smu_get_message_name(smu, msg), index, param, ret);
+ if (ret != 0x1) {
+ if (ret == -ETIME) {
+ dev_err(adev->dev, "message: %15s (%d) \tparam: 0x%08x is timeout (no response)\n",
+ smu_get_message_name(smu, msg), index, param);
+ } else {
+ dev_err(adev->dev, "failed send message: %15s (%d) \tparam: 0x%08x response %#x\n",
+ smu_get_message_name(smu, msg), index, param,
+ ret);
+ ret = -EIO;
+ }
goto out;
}
if (read_arg)
smu_cmn_read_arg(smu, read_arg);
+ ret = 0; /* 0 as driver return value */
out:
mutex_unlock(&smu->message_lock);
return ret;
@@ -269,11 +282,13 @@ int smu_cmn_feature_is_enabled(struct smu_context *smu,
enum smu_feature_mask mask)
{
struct smu_feature *feature = &smu->smu_feature;
+ struct amdgpu_device *adev = smu->adev;
int feature_id;
int ret = 0;
- if (smu->is_apu)
+ if (smu->is_apu && adev->family < AMDGPU_FAMILY_VGH)
return 1;
+
feature_id = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_FEATURE,
mask);
@@ -731,3 +746,31 @@ int smu_cmn_get_metrics_table(struct smu_context *smu,
return ret;
}
+
+void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev)
+{
+ struct metrics_table_header *header = (struct metrics_table_header *)table;
+ uint16_t structure_size;
+
+#define METRICS_VERSION(a, b) ((a << 16) | b )
+
+ switch (METRICS_VERSION(frev, crev)) {
+ case METRICS_VERSION(1, 0):
+ structure_size = sizeof(struct gpu_metrics_v1_0);
+ break;
+ case METRICS_VERSION(2, 0):
+ structure_size = sizeof(struct gpu_metrics_v2_0);
+ break;
+ default:
+ return;
+ }
+
+#undef METRICS_VERSION
+
+ memset(header, 0xFF, structure_size);
+
+ header->format_revision = frev;
+ header->content_revision = crev;
+ header->structure_size = structure_size;
+
+}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
index 01e825d83d8d..c69250185575 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
@@ -26,6 +26,8 @@
#include "amdgpu_smu.h"
#if defined(SWSMU_CODE_LAYER_L2) || defined(SWSMU_CODE_LAYER_L3) || defined(SWSMU_CODE_LAYER_L4)
+int smu_cmn_send_msg_without_waiting(struct smu_context *smu,
+ uint16_t msg, uint32_t param);
int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,
enum smu_message_type msg,
uint32_t param,
@@ -95,5 +97,7 @@ int smu_cmn_get_metrics_table(struct smu_context *smu,
void *metrics_table,
bool bypass_cache);
+void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev);
+
#endif
#endif
diff --git a/drivers/gpu/drm/arc/arcpgu_crtc.c b/drivers/gpu/drm/arc/arcpgu_crtc.c
index 042d7b54a6de..895cdd991af6 100644
--- a/drivers/gpu/drm/arc/arcpgu_crtc.c
+++ b/drivers/gpu/drm/arc/arcpgu_crtc.c
@@ -162,15 +162,10 @@ static const struct drm_plane_helper_funcs arc_pgu_plane_helper_funcs = {
.atomic_update = arc_pgu_plane_atomic_update,
};
-static void arc_pgu_plane_destroy(struct drm_plane *plane)
-{
- drm_plane_cleanup(plane);
-}
-
static const struct drm_plane_funcs arc_pgu_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = arc_pgu_plane_destroy,
+ .destroy = drm_plane_cleanup,
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
@@ -213,7 +208,7 @@ int arc_pgu_setup_crtc(struct drm_device *drm)
ret = drm_crtc_init_with_planes(drm, &arcpgu->crtc, primary, NULL,
&arc_pgu_crtc_funcs, NULL);
if (ret) {
- arc_pgu_plane_destroy(primary);
+ drm_plane_cleanup(primary);
return ret;
}
diff --git a/drivers/gpu/drm/arc/arcpgu_drv.c b/drivers/gpu/drm/arc/arcpgu_drv.c
index f164818ec477..077d006b1fbf 100644
--- a/drivers/gpu/drm/arc/arcpgu_drv.c
+++ b/drivers/gpu/drm/arc/arcpgu_drv.c
@@ -145,7 +145,7 @@ static void arcpgu_debugfs_init(struct drm_minor *minor)
}
#endif
-static struct drm_driver arcpgu_drm_driver = {
+static const struct drm_driver arcpgu_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.name = "arcpgu",
.desc = "ARC PGU Controller",
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
index 4b485eb512e2..59172acb9738 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
@@ -550,7 +550,6 @@ static void komeda_crtc_vblank_disable(struct drm_crtc *crtc)
}
static const struct drm_crtc_funcs komeda_crtc_funcs = {
- .gamma_set = drm_atomic_helper_legacy_gamma_set,
.destroy = drm_crtc_cleanup,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
diff --git a/drivers/gpu/drm/arm/malidp_crtc.c b/drivers/gpu/drm/arm/malidp_crtc.c
index 108e7a31bd26..494075ddbef6 100644
--- a/drivers/gpu/drm/arm/malidp_crtc.c
+++ b/drivers/gpu/drm/arm/malidp_crtc.c
@@ -510,7 +510,6 @@ static void malidp_crtc_disable_vblank(struct drm_crtc *crtc)
}
static const struct drm_crtc_funcs malidp_crtc_funcs = {
- .gamma_set = drm_atomic_helper_legacy_gamma_set,
.destroy = drm_crtc_cleanup,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index 3ebcf5a52c8b..b7bb90ae787f 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -820,7 +820,6 @@ static const struct drm_crtc_funcs armada_crtc_funcs = {
.cursor_set = armada_drm_crtc_cursor_set,
.cursor_move = armada_drm_crtc_cursor_move,
.destroy = armada_drm_crtc_destroy,
- .gamma_set = drm_atomic_helper_legacy_gamma_set,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
diff --git a/drivers/gpu/drm/ast/ast_cursor.c b/drivers/gpu/drm/ast/ast_cursor.c
index 742d43a7edf4..fac1ee79c372 100644
--- a/drivers/gpu/drm/ast/ast_cursor.c
+++ b/drivers/gpu/drm/ast/ast_cursor.c
@@ -39,7 +39,6 @@ static void ast_cursor_fini(struct ast_private *ast)
for (i = 0; i < ARRAY_SIZE(ast->cursor.gbo); ++i) {
gbo = ast->cursor.gbo[i];
- drm_gem_vram_vunmap(gbo, &ast->cursor.map[i]);
drm_gem_vram_unpin(gbo);
drm_gem_vram_put(gbo);
}
@@ -53,14 +52,13 @@ static void ast_cursor_release(struct drm_device *dev, void *ptr)
}
/*
- * Allocate cursor BOs and pins them at the end of VRAM.
+ * Allocate cursor BOs and pin them at the end of VRAM.
*/
int ast_cursor_init(struct ast_private *ast)
{
struct drm_device *dev = &ast->base;
size_t size, i;
struct drm_gem_vram_object *gbo;
- struct dma_buf_map map;
int ret;
size = roundup(AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE, PAGE_SIZE);
@@ -77,15 +75,7 @@ int ast_cursor_init(struct ast_private *ast)
drm_gem_vram_put(gbo);
goto err_drm_gem_vram_put;
}
- ret = drm_gem_vram_vmap(gbo, &map);
- if (ret) {
- drm_gem_vram_unpin(gbo);
- drm_gem_vram_put(gbo);
- goto err_drm_gem_vram_put;
- }
-
ast->cursor.gbo[i] = gbo;
- ast->cursor.map[i] = map;
}
return drmm_add_action_or_reset(dev, ast_cursor_release, NULL);
@@ -94,7 +84,6 @@ err_drm_gem_vram_put:
while (i) {
--i;
gbo = ast->cursor.gbo[i];
- drm_gem_vram_vunmap(gbo, &ast->cursor.map[i]);
drm_gem_vram_unpin(gbo);
drm_gem_vram_put(gbo);
}
@@ -168,38 +157,37 @@ static void update_cursor_image(u8 __iomem *dst, const u8 *src, int width, int h
int ast_cursor_blit(struct ast_private *ast, struct drm_framebuffer *fb)
{
struct drm_device *dev = &ast->base;
- struct drm_gem_vram_object *gbo;
- struct dma_buf_map map;
- int ret;
- void *src;
+ struct drm_gem_vram_object *dst_gbo = ast->cursor.gbo[ast->cursor.next_index];
+ struct drm_gem_vram_object *src_gbo = drm_gem_vram_of_gem(fb->obj[0]);
+ struct dma_buf_map src_map, dst_map;
void __iomem *dst;
+ void *src;
+ int ret;
if (drm_WARN_ON_ONCE(dev, fb->width > AST_MAX_HWC_WIDTH) ||
drm_WARN_ON_ONCE(dev, fb->height > AST_MAX_HWC_HEIGHT))
return -EINVAL;
- gbo = drm_gem_vram_of_gem(fb->obj[0]);
-
- ret = drm_gem_vram_pin(gbo, 0);
+ ret = drm_gem_vram_vmap(src_gbo, &src_map);
if (ret)
return ret;
- ret = drm_gem_vram_vmap(gbo, &map);
- if (ret)
- goto err_drm_gem_vram_unpin;
- src = map.vaddr; /* TODO: Use mapping abstraction properly */
+ src = src_map.vaddr; /* TODO: Use mapping abstraction properly */
- dst = ast->cursor.map[ast->cursor.next_index].vaddr_iomem;
+ ret = drm_gem_vram_vmap(dst_gbo, &dst_map);
+ if (ret)
+ goto err_drm_gem_vram_vunmap;
+ dst = dst_map.vaddr_iomem; /* TODO: Use mapping abstraction properly */
/* do data transfer to cursor BO */
update_cursor_image(dst, src, fb->width, fb->height);
- drm_gem_vram_vunmap(gbo, &map);
- drm_gem_vram_unpin(gbo);
+ drm_gem_vram_vunmap(dst_gbo, &dst_map);
+ drm_gem_vram_vunmap(src_gbo, &src_map);
return 0;
-err_drm_gem_vram_unpin:
- drm_gem_vram_unpin(gbo);
+err_drm_gem_vram_vunmap:
+ drm_gem_vram_vunmap(src_gbo, &src_map);
return ret;
}
@@ -251,17 +239,26 @@ static void ast_cursor_set_location(struct ast_private *ast, u16 x, u16 y,
void ast_cursor_show(struct ast_private *ast, int x, int y,
unsigned int offset_x, unsigned int offset_y)
{
+ struct drm_device *dev = &ast->base;
+ struct drm_gem_vram_object *gbo = ast->cursor.gbo[ast->cursor.next_index];
+ struct dma_buf_map map;
u8 x_offset, y_offset;
u8 __iomem *dst;
u8 __iomem *sig;
u8 jreg;
+ int ret;
- dst = ast->cursor.map[ast->cursor.next_index].vaddr;
+ ret = drm_gem_vram_vmap(gbo, &map);
+ if (drm_WARN_ONCE(dev, ret, "drm_gem_vram_vmap() failed, ret=%d\n", ret))
+ return;
+ dst = map.vaddr_iomem; /* TODO: Use mapping abstraction properly */
sig = dst + AST_HWC_SIZE;
writel(x, sig + AST_HWC_SIGNATURE_X);
writel(y, sig + AST_HWC_SIGNATURE_Y);
+ drm_gem_vram_vunmap(gbo, &map);
+
if (x < 0) {
x_offset = (-x) + offset_x;
x = 0;
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index 667b450606ef..ea8164e7a6dc 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -147,7 +147,7 @@ static int ast_drm_freeze(struct drm_device *dev)
error = drm_mode_config_helper_suspend(dev);
if (error)
return error;
- pci_save_state(dev->pdev);
+ pci_save_state(to_pci_dev(dev->dev));
return 0;
}
@@ -162,7 +162,7 @@ static int ast_drm_resume(struct drm_device *dev)
{
int ret;
- if (pci_enable_device(dev->pdev))
+ if (pci_enable_device(to_pci_dev(dev->dev)))
return -EIO;
ret = ast_drm_thaw(dev);
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index ccaff81924ee..f871fc36c2f7 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -28,7 +28,6 @@
#ifndef __AST_DRV_H__
#define __AST_DRV_H__
-#include <linux/dma-buf-map.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include <linux/io.h>
@@ -133,7 +132,6 @@ struct ast_private {
struct {
struct drm_gem_vram_object *gbo[AST_DEFAULT_HWC_NUM];
- struct dma_buf_map map[AST_DEFAULT_HWC_NUM];
unsigned int next_index;
} cursor;
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 1b13199858cb..0ac3c2039c4b 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -67,8 +67,9 @@ uint8_t ast_get_index_reg_mask(struct ast_private *ast,
static void ast_detect_config_mode(struct drm_device *dev, u32 *scu_rev)
{
- struct device_node *np = dev->pdev->dev.of_node;
+ struct device_node *np = dev->dev->of_node;
struct ast_private *ast = to_ast_private(dev);
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
uint32_t data, jregd0, jregd1;
/* Defaults */
@@ -85,7 +86,7 @@ static void ast_detect_config_mode(struct drm_device *dev, u32 *scu_rev)
}
/* Not all families have a P2A bridge */
- if (dev->pdev->device != PCI_CHIP_AST2000)
+ if (pdev->device != PCI_CHIP_AST2000)
return;
/*
@@ -119,6 +120,7 @@ static void ast_detect_config_mode(struct drm_device *dev, u32 *scu_rev)
static int ast_detect_chip(struct drm_device *dev, bool *need_post)
{
struct ast_private *ast = to_ast_private(dev);
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
uint32_t jreg, scu_rev;
/*
@@ -143,19 +145,19 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
ast_detect_config_mode(dev, &scu_rev);
/* Identify chipset */
- if (dev->pdev->revision >= 0x50) {
+ if (pdev->revision >= 0x50) {
ast->chip = AST2600;
drm_info(dev, "AST 2600 detected\n");
- } else if (dev->pdev->revision >= 0x40) {
+ } else if (pdev->revision >= 0x40) {
ast->chip = AST2500;
drm_info(dev, "AST 2500 detected\n");
- } else if (dev->pdev->revision >= 0x30) {
+ } else if (pdev->revision >= 0x30) {
ast->chip = AST2400;
drm_info(dev, "AST 2400 detected\n");
- } else if (dev->pdev->revision >= 0x20) {
+ } else if (pdev->revision >= 0x20) {
ast->chip = AST2300;
drm_info(dev, "AST 2300 detected\n");
- } else if (dev->pdev->revision >= 0x10) {
+ } else if (pdev->revision >= 0x10) {
switch (scu_rev & 0x0300) {
case 0x0200:
ast->chip = AST1100;
@@ -265,7 +267,7 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
static int ast_get_dram_info(struct drm_device *dev)
{
- struct device_node *np = dev->pdev->dev.of_node;
+ struct device_node *np = dev->dev->of_node;
struct ast_private *ast = to_ast_private(dev);
uint32_t mcr_cfg, mcr_scu_mpll, mcr_scu_strap;
uint32_t denum, num, div, ref_pll, dsel;
@@ -409,10 +411,9 @@ struct ast_private *ast_device_create(const struct drm_driver *drv,
return ast;
dev = &ast->base;
- dev->pdev = pdev;
pci_set_drvdata(pdev, dev);
- ast->regs = pci_iomap(dev->pdev, 1, 0);
+ ast->regs = pci_iomap(pdev, 1, 0);
if (!ast->regs)
return ERR_PTR(-EIO);
@@ -421,14 +422,14 @@ struct ast_private *ast_device_create(const struct drm_driver *drv,
* assume the chip has MMIO enabled by default (rev 0x20
* and higher).
*/
- if (!(pci_resource_flags(dev->pdev, 2) & IORESOURCE_IO)) {
+ if (!(pci_resource_flags(pdev, 2) & IORESOURCE_IO)) {
drm_info(dev, "platform has no IO space, trying MMIO\n");
ast->ioregs = ast->regs + AST_IO_MM_OFFSET;
}
/* "map" IO regs if the above hasn't done so already */
if (!ast->ioregs) {
- ast->ioregs = pci_iomap(dev->pdev, 2, 0);
+ ast->ioregs = pci_iomap(pdev, 2, 0);
if (!ast->ioregs)
return ERR_PTR(-EIO);
}
diff --git a/drivers/gpu/drm/ast/ast_mm.c b/drivers/gpu/drm/ast/ast_mm.c
index 8392ebde504b..7592f1b9e1f1 100644
--- a/drivers/gpu/drm/ast/ast_mm.c
+++ b/drivers/gpu/drm/ast/ast_mm.c
@@ -77,31 +77,32 @@ static u32 ast_get_vram_size(struct ast_private *ast)
static void ast_mm_release(struct drm_device *dev, void *ptr)
{
struct ast_private *ast = to_ast_private(dev);
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
arch_phys_wc_del(ast->fb_mtrr);
- arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
- pci_resource_len(dev->pdev, 0));
+ arch_io_free_memtype_wc(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
}
int ast_mm_init(struct ast_private *ast)
{
struct drm_device *dev = &ast->base;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
u32 vram_size;
int ret;
vram_size = ast_get_vram_size(ast);
- ret = drmm_vram_helper_init(dev, pci_resource_start(dev->pdev, 0),
- vram_size);
+ ret = drmm_vram_helper_init(dev, pci_resource_start(pdev, 0), vram_size);
if (ret) {
drm_err(dev, "Error initializing VRAM MM; %d\n", ret);
return ret;
}
- arch_io_reserve_memtype_wc(pci_resource_start(dev->pdev, 0),
- pci_resource_len(dev->pdev, 0));
- ast->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
- pci_resource_len(dev->pdev, 0));
+ arch_io_reserve_memtype_wc(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ ast->fb_mtrr = arch_phys_wc_add(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
return drmm_add_action_or_reset(dev, ast_mm_release, NULL);
}
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 9db371f4054f..988b270fea5e 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -903,7 +903,6 @@ static void ast_crtc_atomic_destroy_state(struct drm_crtc *crtc,
static const struct drm_crtc_funcs ast_crtc_funcs = {
.reset = ast_crtc_reset,
- .gamma_set = drm_atomic_helper_legacy_gamma_set,
.destroy = drm_crtc_cleanup,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
@@ -1107,6 +1106,7 @@ static const struct drm_mode_config_funcs ast_mode_config_funcs = {
int ast_mode_config_init(struct ast_private *ast)
{
struct drm_device *dev = &ast->base;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
int ret;
ret = ast_cursor_init(ast);
@@ -1122,7 +1122,7 @@ int ast_mode_config_init(struct ast_private *ast)
dev->mode_config.min_height = 0;
dev->mode_config.preferred_depth = 24;
dev->mode_config.prefer_shadow = 1;
- dev->mode_config.fb_base = pci_resource_start(dev->pdev, 0);
+ dev->mode_config.fb_base = pci_resource_start(pdev, 0);
if (ast->chip == AST2100 ||
ast->chip == AST2200 ||
@@ -1259,7 +1259,7 @@ static struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev)
i2c->adapter.owner = THIS_MODULE;
i2c->adapter.class = I2C_CLASS_DDC;
- i2c->adapter.dev.parent = &dev->pdev->dev;
+ i2c->adapter.dev.parent = dev->dev;
i2c->dev = dev;
i2c_set_adapdata(&i2c->adapter, i2c);
snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
index 8902c2f84bf9..0607658dde51 100644
--- a/drivers/gpu/drm/ast/ast_post.c
+++ b/drivers/gpu/drm/ast/ast_post.c
@@ -71,6 +71,7 @@ static void
ast_set_def_ext_reg(struct drm_device *dev)
{
struct ast_private *ast = to_ast_private(dev);
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
u8 i, index, reg;
const u8 *ext_reg_info;
@@ -80,7 +81,7 @@ ast_set_def_ext_reg(struct drm_device *dev)
if (ast->chip == AST2300 || ast->chip == AST2400 ||
ast->chip == AST2500) {
- if (dev->pdev->revision >= 0x20)
+ if (pdev->revision >= 0x20)
ext_reg_info = extreginfo_ast2300;
else
ext_reg_info = extreginfo_ast2300a0;
@@ -366,11 +367,12 @@ static void ast_init_dram_reg(struct drm_device *dev)
void ast_post_gpu(struct drm_device *dev)
{
struct ast_private *ast = to_ast_private(dev);
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
u32 reg;
- pci_read_config_dword(dev->pdev, 0x04, &reg);
+ pci_read_config_dword(pdev, 0x04, &reg);
reg |= 0x3;
- pci_write_config_dword(dev->pdev, 0x04, reg);
+ pci_write_config_dword(pdev, 0x04, reg);
ast_enable_vga(dev);
ast_open_key(ast);
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
index c58fa00b4848..05ad75d155e8 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
@@ -473,7 +473,6 @@ static const struct drm_crtc_funcs atmel_hlcdc_crtc_funcs = {
.atomic_destroy_state = atmel_hlcdc_crtc_destroy_state,
.enable_vblank = atmel_hlcdc_crtc_enable_vblank,
.disable_vblank = atmel_hlcdc_crtc_disable_vblank,
- .gamma_set = drm_atomic_helper_legacy_gamma_set,
};
int atmel_hlcdc_crtc_create(struct drm_device *dev)
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c
index fd454225fd19..b469624fe40d 100644
--- a/drivers/gpu/drm/bochs/bochs_drv.c
+++ b/drivers/gpu/drm/bochs/bochs_drv.c
@@ -121,7 +121,6 @@ static int bochs_pci_probe(struct pci_dev *pdev,
if (ret)
goto err_free_dev;
- dev->pdev = pdev;
pci_set_drvdata(pdev, dev);
ret = bochs_load(dev);
diff --git a/drivers/gpu/drm/bochs/bochs_hw.c b/drivers/gpu/drm/bochs/bochs_hw.c
index dce4672e3fc8..2d7380a9890e 100644
--- a/drivers/gpu/drm/bochs/bochs_hw.c
+++ b/drivers/gpu/drm/bochs/bochs_hw.c
@@ -110,7 +110,7 @@ int bochs_hw_load_edid(struct bochs_device *bochs)
int bochs_hw_init(struct drm_device *dev)
{
struct bochs_device *bochs = dev->dev_private;
- struct pci_dev *pdev = dev->pdev;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
unsigned long addr, size, mem, ioaddr, iosize;
u16 id;
@@ -201,7 +201,7 @@ void bochs_hw_fini(struct drm_device *dev)
release_region(VBE_DISPI_IOPORT_INDEX, 2);
if (bochs->fb_map)
iounmap(bochs->fb_map);
- pci_release_regions(dev->pdev);
+ pci_release_regions(to_pci_dev(dev->dev));
kfree(bochs->edid);
}
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index a0d392c338da..76555ae64e9c 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -1292,8 +1292,7 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
err_unregister_cec:
i2c_unregister_device(adv7511->i2c_cec);
- if (adv7511->cec_clk)
- clk_disable_unprepare(adv7511->cec_clk);
+ clk_disable_unprepare(adv7511->cec_clk);
err_i2c_unregister_packet:
i2c_unregister_device(adv7511->i2c_packet);
err_i2c_unregister_edid:
@@ -1311,8 +1310,7 @@ static int adv7511_remove(struct i2c_client *i2c)
if (adv7511->type == ADV7533 || adv7511->type == ADV7535)
adv7533_detach_dsi(adv7511);
i2c_unregister_device(adv7511->i2c_cec);
- if (adv7511->cec_clk)
- clk_disable_unprepare(adv7511->cec_clk);
+ clk_disable_unprepare(adv7511->cec_clk);
adv7511_uninit_regulators(adv7511);
diff --git a/drivers/gpu/drm/bridge/display-connector.c b/drivers/gpu/drm/bridge/display-connector.c
index 4d278573cdb9..05eb759da6fc 100644
--- a/drivers/gpu/drm/bridge/display-connector.c
+++ b/drivers/gpu/drm/bridge/display-connector.c
@@ -11,6 +11,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
#include <drm/drm_bridge.h>
#include <drm/drm_edid.h>
@@ -20,6 +21,8 @@ struct display_connector {
struct gpio_desc *hpd_gpio;
int hpd_irq;
+
+ struct regulator *dp_pwr;
};
static inline struct display_connector *
@@ -172,11 +175,12 @@ static int display_connector_probe(struct platform_device *pdev)
of_property_read_string(pdev->dev.of_node, "label", &label);
/*
- * Get the HPD GPIO for DVI and HDMI connectors. If the GPIO can provide
+ * Get the HPD GPIO for DVI, HDMI and DP connectors. If the GPIO can provide
* edge interrupts, register an interrupt handler.
*/
if (type == DRM_MODE_CONNECTOR_DVII ||
- type == DRM_MODE_CONNECTOR_HDMIA) {
+ type == DRM_MODE_CONNECTOR_HDMIA ||
+ type == DRM_MODE_CONNECTOR_DisplayPort) {
conn->hpd_gpio = devm_gpiod_get_optional(&pdev->dev, "hpd",
GPIOD_IN);
if (IS_ERR(conn->hpd_gpio)) {
@@ -223,6 +227,38 @@ static int display_connector_probe(struct platform_device *pdev)
}
}
+ /* Get the DP PWR for DP connector. */
+ if (type == DRM_MODE_CONNECTOR_DisplayPort) {
+ int ret;
+
+ conn->dp_pwr = devm_regulator_get_optional(&pdev->dev, "dp-pwr");
+
+ if (IS_ERR(conn->dp_pwr)) {
+ ret = PTR_ERR(conn->dp_pwr);
+
+ switch (ret) {
+ case -ENODEV:
+ conn->dp_pwr = NULL;
+ break;
+
+ case -EPROBE_DEFER:
+ return -EPROBE_DEFER;
+
+ default:
+ dev_err(&pdev->dev, "failed to get DP PWR regulator: %d\n", ret);
+ return ret;
+ }
+ }
+
+ if (conn->dp_pwr) {
+ ret = regulator_enable(conn->dp_pwr);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable DP PWR regulator: %d\n", ret);
+ return ret;
+ }
+ }
+ }
+
conn->bridge.funcs = &display_connector_bridge_funcs;
conn->bridge.of_node = pdev->dev.of_node;
@@ -251,6 +287,9 @@ static int display_connector_remove(struct platform_device *pdev)
{
struct display_connector *conn = platform_get_drvdata(pdev);
+ if (conn->dp_pwr)
+ regulator_disable(conn->dp_pwr);
+
drm_bridge_remove(&conn->bridge);
if (!IS_ERR(conn->bridge.ddc))
@@ -275,6 +314,9 @@ static const struct of_device_id display_connector_match[] = {
}, {
.compatible = "vga-connector",
.data = (void *)DRM_MODE_CONNECTOR_VGA,
+ }, {
+ .compatible = "dp-connector",
+ .data = (void *)DRM_MODE_CONNECTOR_DisplayPort,
},
{},
};
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
index 0c98d27f84ac..fee27952ec6d 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
@@ -14,6 +14,7 @@
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/wait.h>
+#include <linux/workqueue.h>
#include <sound/hdmi-codec.h>
@@ -36,6 +37,7 @@ struct lt9611uxc {
struct mutex ocm_lock;
struct wait_queue_head wq;
+ struct work_struct work;
struct device_node *dsi0_node;
struct device_node *dsi1_node;
@@ -52,6 +54,8 @@ struct lt9611uxc {
bool hpd_supported;
bool edid_read;
+ /* can be accessed from different threads, so protect this with ocm_lock */
+ bool hdmi_connected;
uint8_t fw_version;
};
@@ -143,21 +147,41 @@ static irqreturn_t lt9611uxc_irq_thread_handler(int irq, void *dev_id)
if (irq_status)
regmap_write(lt9611uxc->regmap, 0xb022, 0);
- lt9611uxc_unlock(lt9611uxc);
-
- if (irq_status & BIT(0))
+ if (irq_status & BIT(0)) {
lt9611uxc->edid_read = !!(hpd_status & BIT(0));
+ wake_up_all(&lt9611uxc->wq);
+ }
if (irq_status & BIT(1)) {
- if (lt9611uxc->connector.dev)
- drm_kms_helper_hotplug_event(lt9611uxc->connector.dev);
- else
- drm_bridge_hpd_notify(&lt9611uxc->bridge, !!(hpd_status & BIT(1)));
+ lt9611uxc->hdmi_connected = hpd_status & BIT(1);
+ schedule_work(&lt9611uxc->work);
}
+ lt9611uxc_unlock(lt9611uxc);
+
return IRQ_HANDLED;
}
+static void lt9611uxc_hpd_work(struct work_struct *work)
+{
+ struct lt9611uxc *lt9611uxc = container_of(work, struct lt9611uxc, work);
+ bool connected;
+
+ if (lt9611uxc->connector.dev)
+ drm_kms_helper_hotplug_event(lt9611uxc->connector.dev);
+ else {
+
+ mutex_lock(&lt9611uxc->ocm_lock);
+ connected = lt9611uxc->hdmi_connected;
+ mutex_unlock(&lt9611uxc->ocm_lock);
+
+ drm_bridge_hpd_notify(&lt9611uxc->bridge,
+ connected ?
+ connector_status_connected :
+ connector_status_disconnected);
+ }
+}
+
static void lt9611uxc_reset(struct lt9611uxc *lt9611uxc)
{
gpiod_set_value_cansleep(lt9611uxc->reset_gpio, 1);
@@ -445,18 +469,21 @@ static enum drm_connector_status lt9611uxc_bridge_detect(struct drm_bridge *brid
struct lt9611uxc *lt9611uxc = bridge_to_lt9611uxc(bridge);
unsigned int reg_val = 0;
int ret;
- int connected = 1;
+ bool connected = true;
+
+ lt9611uxc_lock(lt9611uxc);
if (lt9611uxc->hpd_supported) {
- lt9611uxc_lock(lt9611uxc);
ret = regmap_read(lt9611uxc->regmap, 0xb023, &reg_val);
- lt9611uxc_unlock(lt9611uxc);
if (ret)
dev_err(lt9611uxc->dev, "failed to read hpd status: %d\n", ret);
else
connected = reg_val & BIT(1);
}
+ lt9611uxc->hdmi_connected = connected;
+
+ lt9611uxc_unlock(lt9611uxc);
return connected ? connector_status_connected :
connector_status_disconnected;
@@ -465,7 +492,7 @@ static enum drm_connector_status lt9611uxc_bridge_detect(struct drm_bridge *brid
static int lt9611uxc_wait_for_edid(struct lt9611uxc *lt9611uxc)
{
return wait_event_interruptible_timeout(lt9611uxc->wq, lt9611uxc->edid_read,
- msecs_to_jiffies(100));
+ msecs_to_jiffies(500));
}
static int lt9611uxc_get_edid_block(void *data, u8 *buf, unsigned int block, size_t len)
@@ -503,7 +530,10 @@ static struct edid *lt9611uxc_bridge_get_edid(struct drm_bridge *bridge,
ret = lt9611uxc_wait_for_edid(lt9611uxc);
if (ret < 0) {
dev_err(lt9611uxc->dev, "wait for EDID failed: %d\n", ret);
- return ERR_PTR(ret);
+ return NULL;
+ } else if (ret == 0) {
+ dev_err(lt9611uxc->dev, "wait for EDID timeout\n");
+ return NULL;
}
return drm_do_get_edid(connector, lt9611uxc_get_edid_block, lt9611uxc);
@@ -926,6 +956,8 @@ retry:
lt9611uxc->fw_version = ret;
init_waitqueue_head(&lt9611uxc->wq);
+ INIT_WORK(&lt9611uxc->work, lt9611uxc_hpd_work);
+
ret = devm_request_threaded_irq(dev, client->irq, NULL,
lt9611uxc_irq_thread_handler,
IRQF_ONESHOT, "lt9611uxc", lt9611uxc);
@@ -962,6 +994,7 @@ static int lt9611uxc_remove(struct i2c_client *client)
struct lt9611uxc *lt9611uxc = i2c_get_clientdata(client);
disable_irq(client->irq);
+ flush_scheduled_work();
lt9611uxc_audio_exit(lt9611uxc);
drm_bridge_remove(&lt9611uxc->bridge);
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index 0c79a9ba48bb..dda4fa9a1a08 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -3440,8 +3440,7 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev,
err_iahb:
clk_disable_unprepare(hdmi->iahb_clk);
- if (hdmi->cec_clk)
- clk_disable_unprepare(hdmi->cec_clk);
+ clk_disable_unprepare(hdmi->cec_clk);
err_isfr:
clk_disable_unprepare(hdmi->isfr_clk);
err_res:
@@ -3465,8 +3464,7 @@ void dw_hdmi_remove(struct dw_hdmi *hdmi)
clk_disable_unprepare(hdmi->iahb_clk);
clk_disable_unprepare(hdmi->isfr_clk);
- if (hdmi->cec_clk)
- clk_disable_unprepare(hdmi->cec_clk);
+ clk_disable_unprepare(hdmi->cec_clk);
if (hdmi->i2c)
i2c_del_adapter(&hdmi->i2c->adap);
diff --git a/drivers/gpu/drm/bridge/thc63lvd1024.c b/drivers/gpu/drm/bridge/thc63lvd1024.c
index 86b06975bfdd..e21078b2f8b5 100644
--- a/drivers/gpu/drm/bridge/thc63lvd1024.c
+++ b/drivers/gpu/drm/bridge/thc63lvd1024.c
@@ -202,7 +202,7 @@ static int thc63_probe(struct platform_device *pdev)
thc63->dev = &pdev->dev;
platform_set_drvdata(pdev, thc63);
- thc63->vcc = devm_regulator_get_optional(thc63->dev, "vcc");
+ thc63->vcc = devm_regulator_get(thc63->dev, "vcc");
if (IS_ERR(thc63->vcc)) {
if (PTR_ERR(thc63->vcc) == -EPROBE_DEFER)
return -EPROBE_DEFER;
diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c
index 4c7ad46fdd21..5311d03d49cc 100644
--- a/drivers/gpu/drm/drm_agpsupport.c
+++ b/drivers/gpu/drm/drm_agpsupport.c
@@ -45,13 +45,9 @@
#include "drm_legacy.h"
-/**
+/*
* Get AGP information.
*
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a (output) drm_agp_info structure.
* \return zero on success or a negative number on failure.
*
* Verifies the AGP device has been initialized and acquired and fills in the
@@ -92,7 +88,7 @@ int drm_agp_info_ioctl(struct drm_device *dev, void *data,
return 0;
}
-/**
+/*
* Acquire the AGP device.
*
* \param dev DRM device that is to acquire AGP.
@@ -103,11 +99,13 @@ int drm_agp_info_ioctl(struct drm_device *dev, void *data,
*/
int drm_agp_acquire(struct drm_device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+
if (!dev->agp)
return -ENODEV;
if (dev->agp->acquired)
return -EBUSY;
- dev->agp->bridge = agp_backend_acquire(dev->pdev);
+ dev->agp->bridge = agp_backend_acquire(pdev);
if (!dev->agp->bridge)
return -ENODEV;
dev->agp->acquired = 1;
@@ -115,13 +113,9 @@ int drm_agp_acquire(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_agp_acquire);
-/**
+/*
* Acquire the AGP device (ioctl).
*
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument.
* \return zero on success or a negative number on failure.
*
* Verifies the AGP device hasn't been acquired before and calls
@@ -133,7 +127,7 @@ int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
return drm_agp_acquire((struct drm_device *) file_priv->minor->dev);
}
-/**
+/*
* Release the AGP device.
*
* \param dev DRM device that is to release AGP.
@@ -157,7 +151,7 @@ int drm_agp_release_ioctl(struct drm_device *dev, void *data,
return drm_agp_release(dev);
}
-/**
+/*
* Enable the AGP bus.
*
* \param dev DRM device that has previously acquired AGP.
@@ -187,13 +181,9 @@ int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
return drm_agp_enable(dev, *mode);
}
-/**
+/*
* Allocate AGP memory.
*
- * \param inode device inode.
- * \param file_priv file private pointer.
- * \param cmd command.
- * \param arg pointer to a drm_agp_buffer structure.
* \return zero on success or a negative number on failure.
*
* Verifies the AGP device is present and has been acquired, allocates the
@@ -242,7 +232,7 @@ int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
return drm_agp_alloc(dev, request);
}
-/**
+/*
* Search for the AGP memory entry associated with a handle.
*
* \param dev DRM device structure.
@@ -263,13 +253,9 @@ static struct drm_agp_mem *drm_agp_lookup_entry(struct drm_device *dev,
return NULL;
}
-/**
+/*
* Unbind AGP memory from the GATT (ioctl).
*
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a drm_agp_binding structure.
* \return zero on success or a negative number on failure.
*
* Verifies the AGP device is present and acquired, looks-up the AGP memory
@@ -285,7 +271,7 @@ int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request)
entry = drm_agp_lookup_entry(dev, request->handle);
if (!entry || !entry->bound)
return -EINVAL;
- ret = drm_unbind_agp(entry->memory);
+ ret = agp_unbind_memory(entry->memory);
if (ret == 0)
entry->bound = 0;
return ret;
@@ -301,13 +287,9 @@ int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
return drm_agp_unbind(dev, request);
}
-/**
+/*
* Bind AGP memory into the GATT (ioctl)
*
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a drm_agp_binding structure.
* \return zero on success or a negative number on failure.
*
* Verifies the AGP device is present and has been acquired and that no memory
@@ -326,7 +308,7 @@ int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request)
if (!entry || entry->bound)
return -EINVAL;
page = DIV_ROUND_UP(request->offset, PAGE_SIZE);
- retcode = drm_bind_agp(entry->memory, page);
+ retcode = agp_bind_memory(entry->memory, page);
if (retcode)
return retcode;
entry->bound = dev->agp->base + (page << PAGE_SHIFT);
@@ -345,13 +327,9 @@ int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
return drm_agp_bind(dev, request);
}
-/**
+/*
* Free AGP memory (ioctl).
*
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a drm_agp_buffer structure.
* \return zero on success or a negative number on failure.
*
* Verifies the AGP device is present and has been acquired and looks up the
@@ -369,11 +347,11 @@ int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request)
if (!entry)
return -EINVAL;
if (entry->bound)
- drm_unbind_agp(entry->memory);
+ agp_unbind_memory(entry->memory);
list_del(&entry->head);
- drm_free_agp(entry->memory, entry->pages);
+ agp_free_memory(entry->memory);
kfree(entry);
return 0;
}
@@ -388,7 +366,7 @@ int drm_agp_free_ioctl(struct drm_device *dev, void *data,
return drm_agp_free(dev, request);
}
-/**
+/*
* Initialize the AGP resources.
*
* \return pointer to a drm_agp_head structure.
@@ -402,14 +380,15 @@ int drm_agp_free_ioctl(struct drm_device *dev, void *data,
*/
struct drm_agp_head *drm_agp_init(struct drm_device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
struct drm_agp_head *head = NULL;
head = kzalloc(sizeof(*head), GFP_KERNEL);
if (!head)
return NULL;
- head->bridge = agp_find_bridge(dev->pdev);
+ head->bridge = agp_find_bridge(pdev);
if (!head->bridge) {
- head->bridge = agp_backend_acquire(dev->pdev);
+ head->bridge = agp_backend_acquire(pdev);
if (!head->bridge) {
kfree(head);
return NULL;
@@ -453,8 +432,8 @@ void drm_legacy_agp_clear(struct drm_device *dev)
list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
if (entry->bound)
- drm_unbind_agp(entry->memory);
- drm_free_agp(entry->memory, entry->pages);
+ agp_unbind_memory(entry->memory);
+ agp_free_memory(entry->memory);
kfree(entry);
}
INIT_LIST_HEAD(&dev->agp->memory);
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 4a8cbec832bc..560aaecba31b 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -2040,6 +2040,9 @@ crtc_or_fake_commit(struct drm_atomic_state *state, struct drm_crtc *crtc)
* should always call this function from their
* &drm_mode_config_funcs.atomic_commit hook.
*
+ * Drivers that need to extend the commit setup to private objects can use the
+ * &drm_mode_config_helper_funcs.atomic_commit_setup hook.
+ *
* To be able to use this support drivers need to use a few more helper
* functions. drm_atomic_helper_wait_for_dependencies() must be called before
* actually committing the hardware state, and for nonblocking commits this call
@@ -2083,8 +2086,11 @@ int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
struct drm_plane *plane;
struct drm_plane_state *old_plane_state, *new_plane_state;
struct drm_crtc_commit *commit;
+ const struct drm_mode_config_helper_funcs *funcs;
int i, ret;
+ funcs = state->dev->mode_config.helper_private;
+
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
commit = kzalloc(sizeof(*commit), GFP_KERNEL);
if (!commit)
@@ -2169,6 +2175,9 @@ int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
new_plane_state->commit = drm_crtc_commit_get(commit);
}
+ if (funcs && funcs->atomic_commit_setup)
+ return funcs->atomic_commit_setup(state);
+
return 0;
}
EXPORT_SYMBOL(drm_atomic_helper_setup_commit);
@@ -3500,76 +3509,6 @@ fail:
EXPORT_SYMBOL(drm_atomic_helper_page_flip_target);
/**
- * drm_atomic_helper_legacy_gamma_set - set the legacy gamma correction table
- * @crtc: CRTC object
- * @red: red correction table
- * @green: green correction table
- * @blue: green correction table
- * @size: size of the tables
- * @ctx: lock acquire context
- *
- * Implements support for legacy gamma correction table for drivers
- * that support color management through the DEGAMMA_LUT/GAMMA_LUT
- * properties. See drm_crtc_enable_color_mgmt() and the containing chapter for
- * how the atomic color management and gamma tables work.
- */
-int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
- u16 *red, u16 *green, u16 *blue,
- uint32_t size,
- struct drm_modeset_acquire_ctx *ctx)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_atomic_state *state;
- struct drm_crtc_state *crtc_state;
- struct drm_property_blob *blob = NULL;
- struct drm_color_lut *blob_data;
- int i, ret = 0;
- bool replaced;
-
- state = drm_atomic_state_alloc(crtc->dev);
- if (!state)
- return -ENOMEM;
-
- blob = drm_property_create_blob(dev,
- sizeof(struct drm_color_lut) * size,
- NULL);
- if (IS_ERR(blob)) {
- ret = PTR_ERR(blob);
- blob = NULL;
- goto fail;
- }
-
- /* Prepare GAMMA_LUT with the legacy values. */
- blob_data = blob->data;
- for (i = 0; i < size; i++) {
- blob_data[i].red = red[i];
- blob_data[i].green = green[i];
- blob_data[i].blue = blue[i];
- }
-
- state->acquire_ctx = ctx;
- crtc_state = drm_atomic_get_crtc_state(state, crtc);
- if (IS_ERR(crtc_state)) {
- ret = PTR_ERR(crtc_state);
- goto fail;
- }
-
- /* Reset DEGAMMA_LUT and CTM properties. */
- replaced = drm_property_replace_blob(&crtc_state->degamma_lut, NULL);
- replaced |= drm_property_replace_blob(&crtc_state->ctm, NULL);
- replaced |= drm_property_replace_blob(&crtc_state->gamma_lut, blob);
- crtc_state->color_mgmt_changed |= replaced;
-
- ret = drm_atomic_commit(state);
-
-fail:
- drm_atomic_state_put(state);
- drm_property_blob_put(blob);
- return ret;
-}
-EXPORT_SYMBOL(drm_atomic_helper_legacy_gamma_set);
-
-/**
* drm_atomic_helper_bridge_propagate_bus_fmt() - Propagate output format to
* the input end of a bridge
* @bridge: bridge control structure
diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c
index 5c2141e9a9f4..26e2f2ffd255 100644
--- a/drivers/gpu/drm/drm_blend.c
+++ b/drivers/gpu/drm/drm_blend.c
@@ -185,12 +185,6 @@
* plane does not expose the "alpha" property, then this is
* assumed to be 1.0
*
- * IN_FORMATS:
- * Blob property which contains the set of buffer format and modifier
- * pairs supported by this plane. The blob is a drm_format_modifier_blob
- * struct. Without this property the plane doesn't support buffers with
- * modifiers. Userspace cannot change this property.
- *
* Note that all the property extensions described here apply either to the
* plane or the CRTC (e.g. for the background color, which currently is not
* exposed and assumed to be black).
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index aeb1327e3077..e3d77dfefb0a 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -326,7 +326,7 @@ static int drm_addmap_core(struct drm_device *dev, resource_size_t offset,
* As we're limiting the address to 2^32-1 (or less),
* casting it down to 32 bits is no problem, but we
* need to point to a 64bit variable first. */
- map->handle = dma_alloc_coherent(&dev->pdev->dev,
+ map->handle = dma_alloc_coherent(dev->dev,
map->size,
&map->offset,
GFP_KERNEL);
@@ -556,7 +556,7 @@ int drm_legacy_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
case _DRM_SCATTER_GATHER:
break;
case _DRM_CONSISTENT:
- dma_free_coherent(&dev->pdev->dev,
+ dma_free_coherent(dev->dev,
map->size,
map->handle,
map->offset);
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
index 0fe3c496002a..79a50ef1250f 100644
--- a/drivers/gpu/drm/drm_cache.c
+++ b/drivers/gpu/drm/drm_cache.c
@@ -30,6 +30,8 @@
#include <linux/export.h>
#include <linux/highmem.h>
+#include <linux/mem_encrypt.h>
+#include <xen/xen.h>
#include <drm/drm_cache.h>
@@ -176,3 +178,34 @@ drm_clflush_virt_range(void *addr, unsigned long length)
#endif
}
EXPORT_SYMBOL(drm_clflush_virt_range);
+
+bool drm_need_swiotlb(int dma_bits)
+{
+ struct resource *tmp;
+ resource_size_t max_iomem = 0;
+
+ /*
+ * Xen paravirtual hosts require swiotlb regardless of requested dma
+ * transfer size.
+ *
+ * NOTE: Really, what it requires is use of the dma_alloc_coherent
+ * allocator used in ttm_dma_populate() instead of
+ * ttm_populate_and_map_pages(), which bounce buffers so much in
+ * Xen it leads to swiotlb buffer exhaustion.
+ */
+ if (xen_pv_domain())
+ return true;
+
+ /*
+ * Enforce dma_alloc_coherent when memory encryption is active as well
+ * for the same reasons as for Xen paravirtual hosts.
+ */
+ if (mem_encrypt_active())
+ return true;
+
+ for (tmp = iomem_resource.child; tmp; tmp = tmp->sibling)
+ max_iomem = max(max_iomem, tmp->end);
+
+ return max_iomem > ((u64)1 << dma_bits);
+}
+EXPORT_SYMBOL(drm_need_swiotlb);
diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c
index b7e9e1c2564c..ced09c7c06f9 100644
--- a/drivers/gpu/drm/drm_client_modeset.c
+++ b/drivers/gpu/drm/drm_client_modeset.c
@@ -7,6 +7,7 @@
* Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
*/
+#include "drm/drm_modeset_lock.h"
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
@@ -1181,9 +1182,11 @@ static void drm_client_modeset_dpms_legacy(struct drm_client_dev *client, int dp
struct drm_device *dev = client->dev;
struct drm_connector *connector;
struct drm_mode_set *modeset;
+ struct drm_modeset_acquire_ctx ctx;
int j;
+ int ret;
- drm_modeset_lock_all(dev);
+ DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret);
drm_client_for_each_modeset(modeset, client) {
if (!modeset->crtc->enabled)
continue;
@@ -1195,7 +1198,7 @@ static void drm_client_modeset_dpms_legacy(struct drm_client_dev *client, int dp
dev->mode_config.dpms_property, dpms_mode);
}
}
- drm_modeset_unlock_all(dev);
+ DRM_MODESET_LOCK_ALL_END(dev, ctx, ret);
}
/**
diff --git a/drivers/gpu/drm/drm_color_mgmt.c b/drivers/gpu/drm/drm_color_mgmt.c
index 3bcabc2f6e0e..bb14f488c8f6 100644
--- a/drivers/gpu/drm/drm_color_mgmt.c
+++ b/drivers/gpu/drm/drm_color_mgmt.c
@@ -22,6 +22,7 @@
#include <linux/uaccess.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_color_mgmt.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
@@ -89,9 +90,8 @@
* modes) appropriately.
*
* There is also support for a legacy gamma table, which is set up by calling
- * drm_mode_crtc_set_gamma_size(). Drivers which support both should use
- * drm_atomic_helper_legacy_gamma_set() to alias the legacy gamma ramp with the
- * "GAMMA_LUT" property above.
+ * drm_mode_crtc_set_gamma_size(). The DRM core will then alias the legacy gamma
+ * ramp with "GAMMA_LUT" or, if that is unavailable, "DEGAMMA_LUT".
*
* Support for different non RGB color encodings is controlled through
* &drm_plane specific COLOR_ENCODING and COLOR_RANGE properties. They
@@ -156,9 +156,6 @@ EXPORT_SYMBOL(drm_color_ctm_s31_32_to_qm_n);
* optional. The gamma and degamma properties are only attached if
* their size is not 0 and ctm_property is only attached if has_ctm is
* true.
- *
- * Drivers should use drm_atomic_helper_legacy_gamma_set() to implement the
- * legacy &drm_crtc_funcs.gamma_set callback.
*/
void drm_crtc_enable_color_mgmt(struct drm_crtc *crtc,
uint degamma_lut_size,
@@ -232,6 +229,116 @@ int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
EXPORT_SYMBOL(drm_mode_crtc_set_gamma_size);
/**
+ * drm_crtc_supports_legacy_gamma - does the crtc support legacy gamma correction table
+ * @crtc: CRTC object
+ *
+ * Returns true/false if the given crtc supports setting the legacy gamma
+ * correction table.
+ */
+static bool drm_crtc_supports_legacy_gamma(struct drm_crtc *crtc)
+{
+ u32 gamma_id = crtc->dev->mode_config.gamma_lut_property->base.id;
+ u32 degamma_id = crtc->dev->mode_config.degamma_lut_property->base.id;
+
+ if (!crtc->gamma_size)
+ return false;
+
+ if (crtc->funcs->gamma_set)
+ return true;
+
+ return !!(drm_mode_obj_find_prop_id(&crtc->base, gamma_id) ||
+ drm_mode_obj_find_prop_id(&crtc->base, degamma_id));
+}
+
+/**
+ * drm_crtc_legacy_gamma_set - set the legacy gamma correction table
+ * @crtc: CRTC object
+ * @red: red correction table
+ * @green: green correction table
+ * @blue: green correction table
+ * @size: size of the tables
+ * @ctx: lock acquire context
+ *
+ * Implements support for legacy gamma correction table for drivers
+ * that have set drm_crtc_funcs.gamma_set or that support color management
+ * through the DEGAMMA_LUT/GAMMA_LUT properties. See
+ * drm_crtc_enable_color_mgmt() and the containing chapter for
+ * how the atomic color management and gamma tables work.
+ *
+ * This function sets the gamma using drm_crtc_funcs.gamma_set if set, or
+ * alternatively using crtc color management properties.
+ */
+static int drm_crtc_legacy_gamma_set(struct drm_crtc *crtc,
+ u16 *red, u16 *green, u16 *blue,
+ u32 size,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_atomic_state *state;
+ struct drm_crtc_state *crtc_state;
+ struct drm_property_blob *blob;
+ struct drm_color_lut *blob_data;
+ u32 gamma_id = dev->mode_config.gamma_lut_property->base.id;
+ u32 degamma_id = dev->mode_config.degamma_lut_property->base.id;
+ bool use_gamma_lut;
+ int i, ret = 0;
+ bool replaced;
+
+ if (crtc->funcs->gamma_set)
+ return crtc->funcs->gamma_set(crtc, red, green, blue, size, ctx);
+
+ if (drm_mode_obj_find_prop_id(&crtc->base, gamma_id))
+ use_gamma_lut = true;
+ else if (drm_mode_obj_find_prop_id(&crtc->base, degamma_id))
+ use_gamma_lut = false;
+ else
+ return -ENODEV;
+
+ state = drm_atomic_state_alloc(crtc->dev);
+ if (!state)
+ return -ENOMEM;
+
+ blob = drm_property_create_blob(dev,
+ sizeof(struct drm_color_lut) * size,
+ NULL);
+ if (IS_ERR(blob)) {
+ ret = PTR_ERR(blob);
+ blob = NULL;
+ goto fail;
+ }
+
+ /* Prepare GAMMA_LUT with the legacy values. */
+ blob_data = blob->data;
+ for (i = 0; i < size; i++) {
+ blob_data[i].red = red[i];
+ blob_data[i].green = green[i];
+ blob_data[i].blue = blue[i];
+ }
+
+ state->acquire_ctx = ctx;
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state)) {
+ ret = PTR_ERR(crtc_state);
+ goto fail;
+ }
+
+ /* Set GAMMA_LUT and reset DEGAMMA_LUT and CTM */
+ replaced = drm_property_replace_blob(&crtc_state->degamma_lut,
+ use_gamma_lut ? NULL : blob);
+ replaced |= drm_property_replace_blob(&crtc_state->ctm, NULL);
+ replaced |= drm_property_replace_blob(&crtc_state->gamma_lut,
+ use_gamma_lut ? blob : NULL);
+ crtc_state->color_mgmt_changed |= replaced;
+
+ ret = drm_atomic_commit(state);
+
+fail:
+ drm_atomic_state_put(state);
+ drm_property_blob_put(blob);
+ return ret;
+}
+
+/**
* drm_mode_gamma_set_ioctl - set the gamma table
* @dev: DRM device
* @data: ioctl data
@@ -262,7 +369,7 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev,
if (!crtc)
return -ENOENT;
- if (crtc->funcs->gamma_set == NULL)
+ if (!drm_crtc_supports_legacy_gamma(crtc))
return -ENOSYS;
/* memcpy into gamma store */
@@ -290,8 +397,8 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev,
goto out;
}
- ret = crtc->funcs->gamma_set(crtc, r_base, g_base, b_base,
- crtc->gamma_size, &ctx);
+ ret = drm_crtc_legacy_gamma_set(crtc, r_base, g_base, b_base,
+ crtc->gamma_size, &ctx);
out:
DRM_MODESET_LOCK_ALL_END(dev, ctx, ret);
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 74090fc3aa55..9c4f9947b194 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -38,6 +38,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_managed.h>
#include <drm/drm_modeset_lock.h>
#include <drm/drm_atomic.h>
#include <drm/drm_auth.h>
@@ -67,7 +68,7 @@
* &drm_crtc_funcs.page_flip and &drm_crtc_funcs.cursor_set2, and other legacy
* operations like &drm_crtc_funcs.gamma_set. For atomic drivers all these
* features are controlled through &drm_property and
- * &drm_mode_config_funcs.atomic_check and &drm_mode_config_funcs.atomic_check.
+ * &drm_mode_config_funcs.atomic_check.
*/
/**
@@ -240,30 +241,12 @@ struct dma_fence *drm_crtc_create_fence(struct drm_crtc *crtc)
* Nearest Neighbor scaling filter
*/
-/**
- * drm_crtc_init_with_planes - Initialise a new CRTC object with
- * specified primary and cursor planes.
- * @dev: DRM device
- * @crtc: CRTC object to init
- * @primary: Primary plane for CRTC
- * @cursor: Cursor plane for CRTC
- * @funcs: callbacks for the new CRTC
- * @name: printf style format string for the CRTC name, or NULL for default name
- *
- * Inits a new object created as base part of a driver crtc object. Drivers
- * should use this function instead of drm_crtc_init(), which is only provided
- * for backwards compatibility with drivers which do not yet support universal
- * planes). For really simple hardware which has only 1 plane look at
- * drm_simple_display_pipe_init() instead.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
- struct drm_plane *primary,
- struct drm_plane *cursor,
- const struct drm_crtc_funcs *funcs,
- const char *name, ...)
+__printf(6, 0)
+static int __drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
+ struct drm_plane *primary,
+ struct drm_plane *cursor,
+ const struct drm_crtc_funcs *funcs,
+ const char *name, va_list ap)
{
struct drm_mode_config *config = &dev->mode_config;
int ret;
@@ -291,11 +274,7 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
return ret;
if (name) {
- va_list ap;
-
- va_start(ap, name);
crtc->name = kvasprintf(GFP_KERNEL, name, ap);
- va_end(ap);
} else {
crtc->name = kasprintf(GFP_KERNEL, "crtc-%d",
drm_num_crtcs(dev));
@@ -339,8 +318,101 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
return 0;
}
+
+/**
+ * drm_crtc_init_with_planes - Initialise a new CRTC object with
+ * specified primary and cursor planes.
+ * @dev: DRM device
+ * @crtc: CRTC object to init
+ * @primary: Primary plane for CRTC
+ * @cursor: Cursor plane for CRTC
+ * @funcs: callbacks for the new CRTC
+ * @name: printf style format string for the CRTC name, or NULL for default name
+ *
+ * Inits a new object created as base part of a driver crtc object. Drivers
+ * should use this function instead of drm_crtc_init(), which is only provided
+ * for backwards compatibility with drivers which do not yet support universal
+ * planes). For really simple hardware which has only 1 plane look at
+ * drm_simple_display_pipe_init() instead.
+ * The &drm_crtc_funcs.destroy hook should call drm_crtc_cleanup() and kfree()
+ * the crtc structure. The crtc structure should not be allocated with
+ * devm_kzalloc().
+ *
+ * The @primary and @cursor planes are only relevant for legacy uAPI, see
+ * &drm_crtc.primary and &drm_crtc.cursor.
+ *
+ * Note: consider using drmm_crtc_alloc_with_planes() instead of
+ * drm_crtc_init_with_planes() to let the DRM managed resource infrastructure
+ * take care of cleanup and deallocation.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
+ struct drm_plane *primary,
+ struct drm_plane *cursor,
+ const struct drm_crtc_funcs *funcs,
+ const char *name, ...)
+{
+ va_list ap;
+ int ret;
+
+ WARN_ON(!funcs->destroy);
+
+ va_start(ap, name);
+ ret = __drm_crtc_init_with_planes(dev, crtc, primary, cursor, funcs,
+ name, ap);
+ va_end(ap);
+
+ return ret;
+}
EXPORT_SYMBOL(drm_crtc_init_with_planes);
+static void drmm_crtc_alloc_with_planes_cleanup(struct drm_device *dev,
+ void *ptr)
+{
+ struct drm_crtc *crtc = ptr;
+
+ drm_crtc_cleanup(crtc);
+}
+
+void *__drmm_crtc_alloc_with_planes(struct drm_device *dev,
+ size_t size, size_t offset,
+ struct drm_plane *primary,
+ struct drm_plane *cursor,
+ const struct drm_crtc_funcs *funcs,
+ const char *name, ...)
+{
+ void *container;
+ struct drm_crtc *crtc;
+ va_list ap;
+ int ret;
+
+ if (WARN_ON(!funcs || funcs->destroy))
+ return ERR_PTR(-EINVAL);
+
+ container = drmm_kzalloc(dev, size, GFP_KERNEL);
+ if (!container)
+ return ERR_PTR(-ENOMEM);
+
+ crtc = container + offset;
+
+ va_start(ap, name);
+ ret = __drm_crtc_init_with_planes(dev, crtc, primary, cursor, funcs,
+ name, ap);
+ va_end(ap);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = drmm_add_action_or_reset(dev, drmm_crtc_alloc_with_planes_cleanup,
+ crtc);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return container;
+}
+EXPORT_SYMBOL(__drmm_crtc_alloc_with_planes);
+
/**
* drm_crtc_cleanup - Clean up the core crtc usage
* @crtc: CRTC to cleanup
diff --git a/drivers/gpu/drm/drm_crtc_helper_internal.h b/drivers/gpu/drm/drm_crtc_helper_internal.h
index 25ce42e79995..61e09f8a8d0f 100644
--- a/drivers/gpu/drm/drm_crtc_helper_internal.h
+++ b/drivers/gpu/drm/drm_crtc_helper_internal.h
@@ -32,16 +32,6 @@
#include <drm/drm_encoder.h>
#include <drm/drm_modes.h>
-/* drm_fb_helper.c */
-#ifdef CONFIG_DRM_FBDEV_EMULATION
-int drm_fb_helper_modinit(void);
-#else
-static inline int drm_fb_helper_modinit(void)
-{
- return 0;
-}
-#endif
-
/* drm_dp_aux_dev.c */
#ifdef CONFIG_DRM_DP_AUX_CHARDEV
int drm_dp_aux_dev_init(void);
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 5bd0934004e3..eedbb48815b7 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -950,6 +950,38 @@ bool drm_dp_downstream_444_to_420_conversion(const u8 dpcd[DP_RECEIVER_CAP_SIZE]
EXPORT_SYMBOL(drm_dp_downstream_444_to_420_conversion);
/**
+ * drm_dp_downstream_rgb_to_ycbcr_conversion() - determine downstream facing port
+ * RGB->YCbCr conversion capability
+ * @dpcd: DisplayPort configuration data
+ * @port_cap: downstream facing port capabilities
+ * @color_spc: Colorspace for which conversion cap is sought
+ *
+ * Returns: whether the downstream facing port can convert RGB->YCbCr for a given
+ * colorspace.
+ */
+bool drm_dp_downstream_rgb_to_ycbcr_conversion(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ const u8 port_cap[4],
+ u8 color_spc)
+{
+ if (!drm_dp_is_branch(dpcd))
+ return false;
+
+ if (dpcd[DP_DPCD_REV] < 0x13)
+ return false;
+
+ switch (port_cap[0] & DP_DS_PORT_TYPE_MASK) {
+ case DP_DS_PORT_TYPE_HDMI:
+ if ((dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DETAILED_CAP_INFO_AVAILABLE) == 0)
+ return false;
+
+ return port_cap[3] & color_spc;
+ default:
+ return false;
+ }
+}
+EXPORT_SYMBOL(drm_dp_downstream_rgb_to_ycbcr_conversion);
+
+/**
* drm_dp_downstream_mode() - return a mode for downstream facing port
* @dev: DRM device
* @dpcd: DisplayPort configuration data
@@ -1204,7 +1236,7 @@ bool drm_dp_read_sink_count_cap(struct drm_connector *connector,
return connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
dpcd[DP_DPCD_REV] >= DP_DPCD_REV_11 &&
dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT &&
- !drm_dp_has_quirk(desc, 0, DP_DPCD_QUIRK_NO_SINK_COUNT);
+ !drm_dp_has_quirk(desc, DP_DPCD_QUIRK_NO_SINK_COUNT);
}
EXPORT_SYMBOL(drm_dp_read_sink_count_cap);
@@ -1925,87 +1957,6 @@ drm_dp_get_quirks(const struct drm_dp_dpcd_ident *ident, bool is_branch)
#undef DEVICE_ID_ANY
#undef DEVICE_ID
-struct edid_quirk {
- u8 mfg_id[2];
- u8 prod_id[2];
- u32 quirks;
-};
-
-#define MFG(first, second) { (first), (second) }
-#define PROD_ID(first, second) { (first), (second) }
-
-/*
- * Some devices have unreliable OUIDs where they don't set the device ID
- * correctly, and as a result we need to use the EDID for finding additional
- * DP quirks in such cases.
- */
-static const struct edid_quirk edid_quirk_list[] = {
- /* Optional 4K AMOLED panel in the ThinkPad X1 Extreme 2nd Generation
- * only supports DPCD backlight controls
- */
- { MFG(0x4c, 0x83), PROD_ID(0x41, 0x41), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
- /*
- * Some Dell CML 2020 systems have panels support both AUX and PWM
- * backlight control, and some only support AUX backlight control. All
- * said panels start up in AUX mode by default, and we don't have any
- * support for disabling HDR mode on these panels which would be
- * required to switch to PWM backlight control mode (plus, I'm not
- * even sure we want PWM backlight controls over DPCD backlight
- * controls anyway...). Until we have a better way of detecting these,
- * force DPCD backlight mode on all of them.
- */
- { MFG(0x06, 0xaf), PROD_ID(0x9b, 0x32), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
- { MFG(0x06, 0xaf), PROD_ID(0xeb, 0x41), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
- { MFG(0x4d, 0x10), PROD_ID(0xc7, 0x14), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
- { MFG(0x4d, 0x10), PROD_ID(0xe6, 0x14), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
- { MFG(0x4c, 0x83), PROD_ID(0x47, 0x41), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
- { MFG(0x09, 0xe5), PROD_ID(0xde, 0x08), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
-};
-
-#undef MFG
-#undef PROD_ID
-
-/**
- * drm_dp_get_edid_quirks() - Check the EDID of a DP device to find additional
- * DP-specific quirks
- * @edid: The EDID to check
- *
- * While OUIDs are meant to be used to recognize a DisplayPort device, a lot
- * of manufacturers don't seem to like following standards and neglect to fill
- * the dev-ID in, making it impossible to only use OUIDs for determining
- * quirks in some cases. This function can be used to check the EDID and look
- * up any additional DP quirks. The bits returned by this function correspond
- * to the quirk bits in &drm_dp_quirk.
- *
- * Returns: a bitmask of quirks, if any. The driver can check this using
- * drm_dp_has_quirk().
- */
-u32 drm_dp_get_edid_quirks(const struct edid *edid)
-{
- const struct edid_quirk *quirk;
- u32 quirks = 0;
- int i;
-
- if (!edid)
- return 0;
-
- for (i = 0; i < ARRAY_SIZE(edid_quirk_list); i++) {
- quirk = &edid_quirk_list[i];
- if (memcmp(quirk->mfg_id, edid->mfg_id,
- sizeof(edid->mfg_id)) == 0 &&
- memcmp(quirk->prod_id, edid->prod_code,
- sizeof(edid->prod_code)) == 0)
- quirks |= quirk->quirks;
- }
-
- DRM_DEBUG_KMS("DP sink: EDID mfg %*phD prod-ID %*phD quirks: 0x%04x\n",
- (int)sizeof(edid->mfg_id), edid->mfg_id,
- (int)sizeof(edid->prod_code), edid->prod_code, quirks);
-
- return quirks;
-}
-EXPORT_SYMBOL(drm_dp_get_edid_quirks);
-
/**
* drm_dp_read_desc - read sink/branch descriptor from DPCD
* @aux: DisplayPort AUX channel
@@ -2596,3 +2547,538 @@ void drm_dp_vsc_sdp_log(const char *level, struct device *dev,
#undef DP_SDP_LOG
}
EXPORT_SYMBOL(drm_dp_vsc_sdp_log);
+
+/**
+ * drm_dp_get_pcon_max_frl_bw() - maximum frl supported by PCON
+ * @dpcd: DisplayPort configuration data
+ * @port_cap: port capabilities
+ *
+ * Returns maximum frl bandwidth supported by PCON in GBPS,
+ * returns 0 if not supported.
+ */
+int drm_dp_get_pcon_max_frl_bw(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ const u8 port_cap[4])
+{
+ int bw;
+ u8 buf;
+
+ buf = port_cap[2];
+ bw = buf & DP_PCON_MAX_FRL_BW;
+
+ switch (bw) {
+ case DP_PCON_MAX_9GBPS:
+ return 9;
+ case DP_PCON_MAX_18GBPS:
+ return 18;
+ case DP_PCON_MAX_24GBPS:
+ return 24;
+ case DP_PCON_MAX_32GBPS:
+ return 32;
+ case DP_PCON_MAX_40GBPS:
+ return 40;
+ case DP_PCON_MAX_48GBPS:
+ return 48;
+ case DP_PCON_MAX_0GBPS:
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_get_pcon_max_frl_bw);
+
+/**
+ * drm_dp_pcon_frl_prepare() - Prepare PCON for FRL.
+ * @aux: DisplayPort AUX channel
+ * @enable_frl_ready_hpd: Configure DP_PCON_ENABLE_HPD_READY.
+ *
+ * Returns 0 if success, else returns negative error code.
+ */
+int drm_dp_pcon_frl_prepare(struct drm_dp_aux *aux, bool enable_frl_ready_hpd)
+{
+ int ret;
+ u8 buf = DP_PCON_ENABLE_SOURCE_CTL_MODE |
+ DP_PCON_ENABLE_LINK_FRL_MODE;
+
+ if (enable_frl_ready_hpd)
+ buf |= DP_PCON_ENABLE_HPD_READY;
+
+ ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_1, buf);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_dp_pcon_frl_prepare);
+
+/**
+ * drm_dp_pcon_is_frl_ready() - Is PCON ready for FRL
+ * @aux: DisplayPort AUX channel
+ *
+ * Returns true if success, else returns false.
+ */
+bool drm_dp_pcon_is_frl_ready(struct drm_dp_aux *aux)
+{
+ int ret;
+ u8 buf;
+
+ ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_TX_LINK_STATUS, &buf);
+ if (ret < 0)
+ return false;
+
+ if (buf & DP_PCON_FRL_READY)
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL(drm_dp_pcon_is_frl_ready);
+
+/**
+ * drm_dp_pcon_frl_configure_1() - Set HDMI LINK Configuration-Step1
+ * @aux: DisplayPort AUX channel
+ * @max_frl_gbps: maximum frl bw to be configured between PCON and HDMI sink
+ * @concurrent_mode: true if concurrent mode or operation is required,
+ * false otherwise.
+ *
+ * Returns 0 if success, else returns negative error code.
+ */
+
+int drm_dp_pcon_frl_configure_1(struct drm_dp_aux *aux, int max_frl_gbps,
+ bool concurrent_mode)
+{
+ int ret;
+ u8 buf;
+
+ ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf);
+ if (ret < 0)
+ return ret;
+
+ if (concurrent_mode)
+ buf |= DP_PCON_ENABLE_CONCURRENT_LINK;
+ else
+ buf &= ~DP_PCON_ENABLE_CONCURRENT_LINK;
+
+ switch (max_frl_gbps) {
+ case 9:
+ buf |= DP_PCON_ENABLE_MAX_BW_9GBPS;
+ break;
+ case 18:
+ buf |= DP_PCON_ENABLE_MAX_BW_18GBPS;
+ break;
+ case 24:
+ buf |= DP_PCON_ENABLE_MAX_BW_24GBPS;
+ break;
+ case 32:
+ buf |= DP_PCON_ENABLE_MAX_BW_32GBPS;
+ break;
+ case 40:
+ buf |= DP_PCON_ENABLE_MAX_BW_40GBPS;
+ break;
+ case 48:
+ buf |= DP_PCON_ENABLE_MAX_BW_48GBPS;
+ break;
+ case 0:
+ buf |= DP_PCON_ENABLE_MAX_BW_0GBPS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_1, buf);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_pcon_frl_configure_1);
+
+/**
+ * drm_dp_pcon_frl_configure_2() - Set HDMI Link configuration Step-2
+ * @aux: DisplayPort AUX channel
+ * @max_frl_mask : Max FRL BW to be tried by the PCON with HDMI Sink
+ * @extended_train_mode : true for Extended Mode, false for Normal Mode.
+ * In Normal mode, the PCON tries each frl bw from the max_frl_mask starting
+ * from min, and stops when link training is successful. In Extended mode, all
+ * frl bw selected in the mask are trained by the PCON.
+ *
+ * Returns 0 if success, else returns negative error code.
+ */
+int drm_dp_pcon_frl_configure_2(struct drm_dp_aux *aux, int max_frl_mask,
+ bool extended_train_mode)
+{
+ int ret;
+ u8 buf = max_frl_mask;
+
+ if (extended_train_mode)
+ buf |= DP_PCON_FRL_LINK_TRAIN_EXTENDED;
+
+ ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_2, buf);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_pcon_frl_configure_2);
+
+/**
+ * drm_dp_pcon_reset_frl_config() - Re-Set HDMI Link configuration.
+ * @aux: DisplayPort AUX channel
+ *
+ * Returns 0 if success, else returns negative error code.
+ */
+int drm_dp_pcon_reset_frl_config(struct drm_dp_aux *aux)
+{
+ int ret;
+
+ ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_1, 0x0);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_pcon_reset_frl_config);
+
+/**
+ * drm_dp_pcon_frl_enable() - Enable HDMI link through FRL
+ * @aux: DisplayPort AUX channel
+ *
+ * Returns 0 if success, else returns negative error code.
+ */
+int drm_dp_pcon_frl_enable(struct drm_dp_aux *aux)
+{
+ int ret;
+ u8 buf = 0;
+
+ ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf);
+ if (ret < 0)
+ return ret;
+ if (!(buf & DP_PCON_ENABLE_SOURCE_CTL_MODE)) {
+ DRM_DEBUG_KMS("PCON in Autonomous mode, can't enable FRL\n");
+ return -EINVAL;
+ }
+ buf |= DP_PCON_ENABLE_HDMI_LINK;
+ ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_1, buf);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_pcon_frl_enable);
+
+/**
+ * drm_dp_pcon_hdmi_link_active() - check if the PCON HDMI LINK status is active.
+ * @aux: DisplayPort AUX channel
+ *
+ * Returns true if link is active else returns false.
+ */
+bool drm_dp_pcon_hdmi_link_active(struct drm_dp_aux *aux)
+{
+ u8 buf;
+ int ret;
+
+ ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_TX_LINK_STATUS, &buf);
+ if (ret < 0)
+ return false;
+
+ return buf & DP_PCON_HDMI_TX_LINK_ACTIVE;
+}
+EXPORT_SYMBOL(drm_dp_pcon_hdmi_link_active);
+
+/**
+ * drm_dp_pcon_hdmi_link_mode() - get the PCON HDMI LINK MODE
+ * @aux: DisplayPort AUX channel
+ * @frl_trained_mask: pointer to store bitmask of the trained bw configuration.
+ * Valid only if the MODE returned is FRL. For Normal Link training mode
+ * only 1 of the bits will be set, but in case of Extended mode, more than
+ * one bits can be set.
+ *
+ * Returns the link mode : TMDS or FRL on success, else returns negative error
+ * code.
+ */
+int drm_dp_pcon_hdmi_link_mode(struct drm_dp_aux *aux, u8 *frl_trained_mask)
+{
+ u8 buf;
+ int mode;
+ int ret;
+
+ ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_POST_FRL_STATUS, &buf);
+ if (ret < 0)
+ return ret;
+
+ mode = buf & DP_PCON_HDMI_LINK_MODE;
+
+ if (frl_trained_mask && DP_PCON_HDMI_MODE_FRL == mode)
+ *frl_trained_mask = (buf & DP_PCON_HDMI_FRL_TRAINED_BW) >> 1;
+
+ return mode;
+}
+EXPORT_SYMBOL(drm_dp_pcon_hdmi_link_mode);
+
+/**
+ * drm_dp_pcon_hdmi_frl_link_error_count() - print the error count per lane
+ * during link failure between PCON and HDMI sink
+ * @aux: DisplayPort AUX channel
+ * @connector: DRM connector
+ * code.
+ **/
+
+void drm_dp_pcon_hdmi_frl_link_error_count(struct drm_dp_aux *aux,
+ struct drm_connector *connector)
+{
+ u8 buf, error_count;
+ int i, num_error;
+ struct drm_hdmi_info *hdmi = &connector->display_info.hdmi;
+
+ for (i = 0; i < hdmi->max_lanes; i++) {
+ if (drm_dp_dpcd_readb(aux, DP_PCON_HDMI_ERROR_STATUS_LN0 + i, &buf) < 0)
+ return;
+
+ error_count = buf & DP_PCON_HDMI_ERROR_COUNT_MASK;
+ switch (error_count) {
+ case DP_PCON_HDMI_ERROR_COUNT_HUNDRED_PLUS:
+ num_error = 100;
+ break;
+ case DP_PCON_HDMI_ERROR_COUNT_TEN_PLUS:
+ num_error = 10;
+ break;
+ case DP_PCON_HDMI_ERROR_COUNT_THREE_PLUS:
+ num_error = 3;
+ break;
+ default:
+ num_error = 0;
+ }
+
+ DRM_ERROR("More than %d errors since the last read for lane %d", num_error, i);
+ }
+}
+EXPORT_SYMBOL(drm_dp_pcon_hdmi_frl_link_error_count);
+
+/*
+ * drm_dp_pcon_enc_is_dsc_1_2 - Does PCON Encoder supports DSC 1.2
+ * @pcon_dsc_dpcd: DSC capabilities of the PCON DSC Encoder
+ *
+ * Returns true is PCON encoder is DSC 1.2 else returns false.
+ */
+bool drm_dp_pcon_enc_is_dsc_1_2(const u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE])
+{
+ u8 buf;
+ u8 major_v, minor_v;
+
+ buf = pcon_dsc_dpcd[DP_PCON_DSC_VERSION - DP_PCON_DSC_ENCODER];
+ major_v = (buf & DP_PCON_DSC_MAJOR_MASK) >> DP_PCON_DSC_MAJOR_SHIFT;
+ minor_v = (buf & DP_PCON_DSC_MINOR_MASK) >> DP_PCON_DSC_MINOR_SHIFT;
+
+ if (major_v == 1 && minor_v == 2)
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL(drm_dp_pcon_enc_is_dsc_1_2);
+
+/*
+ * drm_dp_pcon_dsc_max_slices - Get max slices supported by PCON DSC Encoder
+ * @pcon_dsc_dpcd: DSC capabilities of the PCON DSC Encoder
+ *
+ * Returns maximum no. of slices supported by the PCON DSC Encoder.
+ */
+int drm_dp_pcon_dsc_max_slices(const u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE])
+{
+ u8 slice_cap1, slice_cap2;
+
+ slice_cap1 = pcon_dsc_dpcd[DP_PCON_DSC_SLICE_CAP_1 - DP_PCON_DSC_ENCODER];
+ slice_cap2 = pcon_dsc_dpcd[DP_PCON_DSC_SLICE_CAP_2 - DP_PCON_DSC_ENCODER];
+
+ if (slice_cap2 & DP_PCON_DSC_24_PER_DSC_ENC)
+ return 24;
+ if (slice_cap2 & DP_PCON_DSC_20_PER_DSC_ENC)
+ return 20;
+ if (slice_cap2 & DP_PCON_DSC_16_PER_DSC_ENC)
+ return 16;
+ if (slice_cap1 & DP_PCON_DSC_12_PER_DSC_ENC)
+ return 12;
+ if (slice_cap1 & DP_PCON_DSC_10_PER_DSC_ENC)
+ return 10;
+ if (slice_cap1 & DP_PCON_DSC_8_PER_DSC_ENC)
+ return 8;
+ if (slice_cap1 & DP_PCON_DSC_6_PER_DSC_ENC)
+ return 6;
+ if (slice_cap1 & DP_PCON_DSC_4_PER_DSC_ENC)
+ return 4;
+ if (slice_cap1 & DP_PCON_DSC_2_PER_DSC_ENC)
+ return 2;
+ if (slice_cap1 & DP_PCON_DSC_1_PER_DSC_ENC)
+ return 1;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_pcon_dsc_max_slices);
+
+/*
+ * drm_dp_pcon_dsc_max_slice_width() - Get max slice width for Pcon DSC encoder
+ * @pcon_dsc_dpcd: DSC capabilities of the PCON DSC Encoder
+ *
+ * Returns maximum width of the slices in pixel width i.e. no. of pixels x 320.
+ */
+int drm_dp_pcon_dsc_max_slice_width(const u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE])
+{
+ u8 buf;
+
+ buf = pcon_dsc_dpcd[DP_PCON_DSC_MAX_SLICE_WIDTH - DP_PCON_DSC_ENCODER];
+
+ return buf * DP_DSC_SLICE_WIDTH_MULTIPLIER;
+}
+EXPORT_SYMBOL(drm_dp_pcon_dsc_max_slice_width);
+
+/*
+ * drm_dp_pcon_dsc_bpp_incr() - Get bits per pixel increment for PCON DSC encoder
+ * @pcon_dsc_dpcd: DSC capabilities of the PCON DSC Encoder
+ *
+ * Returns the bpp precision supported by the PCON encoder.
+ */
+int drm_dp_pcon_dsc_bpp_incr(const u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE])
+{
+ u8 buf;
+
+ buf = pcon_dsc_dpcd[DP_PCON_DSC_BPP_INCR - DP_PCON_DSC_ENCODER];
+
+ switch (buf & DP_PCON_DSC_BPP_INCR_MASK) {
+ case DP_PCON_DSC_ONE_16TH_BPP:
+ return 16;
+ case DP_PCON_DSC_ONE_8TH_BPP:
+ return 8;
+ case DP_PCON_DSC_ONE_4TH_BPP:
+ return 4;
+ case DP_PCON_DSC_ONE_HALF_BPP:
+ return 2;
+ case DP_PCON_DSC_ONE_BPP:
+ return 1;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_pcon_dsc_bpp_incr);
+
+static
+int drm_dp_pcon_configure_dsc_enc(struct drm_dp_aux *aux, u8 pps_buf_config)
+{
+ u8 buf;
+ int ret;
+
+ ret = drm_dp_dpcd_readb(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, &buf);
+ if (ret < 0)
+ return ret;
+
+ buf |= DP_PCON_ENABLE_DSC_ENCODER;
+
+ if (pps_buf_config <= DP_PCON_ENC_PPS_OVERRIDE_EN_BUFFER) {
+ buf &= ~DP_PCON_ENCODER_PPS_OVERRIDE_MASK;
+ buf |= pps_buf_config << 2;
+ }
+
+ ret = drm_dp_dpcd_writeb(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, buf);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * drm_dp_pcon_pps_default() - Let PCON fill the default pps parameters
+ * for DSC1.2 between PCON & HDMI2.1 sink
+ * @aux: DisplayPort AUX channel
+ *
+ * Returns 0 on success, else returns negative error code.
+ */
+int drm_dp_pcon_pps_default(struct drm_dp_aux *aux)
+{
+ int ret;
+
+ ret = drm_dp_pcon_configure_dsc_enc(aux, DP_PCON_ENC_PPS_OVERRIDE_DISABLED);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_pcon_pps_default);
+
+/**
+ * drm_dp_pcon_pps_override_buf() - Configure PPS encoder override buffer for
+ * HDMI sink
+ * @aux: DisplayPort AUX channel
+ * @pps_buf: 128 bytes to be written into PPS buffer for HDMI sink by PCON.
+ *
+ * Returns 0 on success, else returns negative error code.
+ */
+int drm_dp_pcon_pps_override_buf(struct drm_dp_aux *aux, u8 pps_buf[128])
+{
+ int ret;
+
+ ret = drm_dp_dpcd_write(aux, DP_PCON_HDMI_PPS_OVERRIDE_BASE, &pps_buf, 128);
+ if (ret < 0)
+ return ret;
+
+ ret = drm_dp_pcon_configure_dsc_enc(aux, DP_PCON_ENC_PPS_OVERRIDE_EN_BUFFER);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_pcon_pps_override_buf);
+
+/*
+ * drm_dp_pcon_pps_override_param() - Write PPS parameters to DSC encoder
+ * override registers
+ * @aux: DisplayPort AUX channel
+ * @pps_param: 3 Parameters (2 Bytes each) : Slice Width, Slice Height,
+ * bits_per_pixel.
+ *
+ * Returns 0 on success, else returns negative error code.
+ */
+int drm_dp_pcon_pps_override_param(struct drm_dp_aux *aux, u8 pps_param[6])
+{
+ int ret;
+
+ ret = drm_dp_dpcd_write(aux, DP_PCON_HDMI_PPS_OVRD_SLICE_HEIGHT, &pps_param[0], 2);
+ if (ret < 0)
+ return ret;
+ ret = drm_dp_dpcd_write(aux, DP_PCON_HDMI_PPS_OVRD_SLICE_WIDTH, &pps_param[2], 2);
+ if (ret < 0)
+ return ret;
+ ret = drm_dp_dpcd_write(aux, DP_PCON_HDMI_PPS_OVRD_BPP, &pps_param[4], 2);
+ if (ret < 0)
+ return ret;
+
+ ret = drm_dp_pcon_configure_dsc_enc(aux, DP_PCON_ENC_PPS_OVERRIDE_EN_BUFFER);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_pcon_pps_override_param);
+
+/*
+ * drm_dp_pcon_convert_rgb_to_ycbcr() - Configure the PCon to convert RGB to Ycbcr
+ * @aux: displayPort AUX channel
+ * @color_spc: Color-space/s for which conversion is to be enabled, 0 for disable.
+ *
+ * Returns 0 on success, else returns negative error code.
+ */
+int drm_dp_pcon_convert_rgb_to_ycbcr(struct drm_dp_aux *aux, u8 color_spc)
+{
+ int ret;
+ u8 buf;
+
+ ret = drm_dp_dpcd_readb(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, &buf);
+ if (ret < 0)
+ return ret;
+
+ if (color_spc & DP_CONVERSION_RGB_YCBCR_MASK)
+ buf |= (color_spc & DP_CONVERSION_RGB_YCBCR_MASK);
+ else
+ buf &= ~DP_CONVERSION_RGB_YCBCR_MASK;
+
+ ret = drm_dp_dpcd_writeb(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, buf);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_pcon_convert_rgb_to_ycbcr);
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 0401b2f47500..309afe61afdd 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -2302,7 +2302,8 @@ drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb,
}
if (port->pdt != DP_PEER_DEVICE_NONE &&
- drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
+ drm_dp_mst_is_end_device(port->pdt, port->mcs) &&
+ port->port_num >= DP_MST_LOGICAL_PORT_0) {
port->cached_edid = drm_get_edid(port->connector,
&port->aux.ddc);
drm_connector_set_tile_property(port->connector);
@@ -2751,7 +2752,7 @@ static void drm_dp_mst_link_probe_work(struct work_struct *work)
drm_dp_mst_topology_put_mstb(mstb);
mutex_unlock(&mgr->probe_lock);
- if (ret)
+ if (ret > 0)
drm_kms_helper_hotplug_event(dev);
}
@@ -3629,14 +3630,26 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
return 0;
}
-static int drm_dp_get_vc_payload_bw(u8 dp_link_bw, u8 dp_link_count)
+/**
+ * drm_dp_get_vc_payload_bw - get the VC payload BW for an MST link
+ * @link_rate: link rate in 10kbits/s units
+ * @link_lane_count: lane count
+ *
+ * Calculate the total bandwidth of a MultiStream Transport link. The returned
+ * value is in units of PBNs/(timeslots/1 MTP). This value can be used to
+ * convert the number of PBNs required for a given stream to the number of
+ * timeslots this stream requires in each MTP.
+ */
+int drm_dp_get_vc_payload_bw(int link_rate, int link_lane_count)
{
- if (dp_link_bw == 0 || dp_link_count == 0)
- DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n",
- dp_link_bw, dp_link_count);
+ if (link_rate == 0 || link_lane_count == 0)
+ DRM_DEBUG_KMS("invalid link rate/lane count: (%d / %d)\n",
+ link_rate, link_lane_count);
- return dp_link_bw * dp_link_count / 2;
+ /* See DP v2.0 2.6.4.2, VCPayload_Bandwidth_for_OneTimeSlotPer_MTP_Allocation */
+ return link_rate * link_lane_count / 54000;
}
+EXPORT_SYMBOL(drm_dp_get_vc_payload_bw);
/**
* drm_dp_read_mst_cap() - check whether or not a sink supports MST
@@ -3692,7 +3705,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
goto out_unlock;
}
- mgr->pbn_div = drm_dp_get_vc_payload_bw(mgr->dpcd[1],
+ mgr->pbn_div = drm_dp_get_vc_payload_bw(drm_dp_bw_code_to_link_rate(mgr->dpcd[1]),
mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK);
if (mgr->pbn_div == 0) {
ret = -EINVAL;
@@ -4212,6 +4225,7 @@ drm_dp_mst_detect_port(struct drm_connector *connector,
switch (port->pdt) {
case DP_PEER_DEVICE_NONE:
+ break;
case DP_PEER_DEVICE_MST_BRANCHING:
if (!port->mcs)
ret = connector_status_connected;
@@ -5824,8 +5838,7 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
if (drm_dp_read_desc(port->mgr->aux, &desc, true))
return NULL;
- if (drm_dp_has_quirk(&desc, 0,
- DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) &&
+ if (drm_dp_has_quirk(&desc, DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) &&
port->mgr->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14 &&
port->parent == port->mgr->mst_primary) {
u8 downstreamport;
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 734303802bc3..20d22e41d7ce 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -469,6 +469,9 @@ void drm_dev_unplug(struct drm_device *dev)
synchronize_srcu(&drm_unplug_srcu);
drm_dev_unregister(dev);
+
+ /* Clear all CPU mappings pointing to this device */
+ unmap_mapping_range(dev->anon_inode->i_mapping, 0, 0, 1);
}
EXPORT_SYMBOL(drm_dev_unplug);
@@ -589,11 +592,7 @@ static int drm_dev_init(struct drm_device *dev,
kref_init(&dev->ref);
dev->dev = get_device(parent);
-#ifdef CONFIG_DRM_LEGACY
- dev->driver = (struct drm_driver *)driver;
-#else
dev->driver = driver;
-#endif
INIT_LIST_HEAD(&dev->managed.resources);
spin_lock_init(&dev->managed.lock);
@@ -675,11 +674,8 @@ static int devm_drm_dev_init(struct device *parent,
if (ret)
return ret;
- ret = devm_add_action(parent, devm_drm_dev_init_release, dev);
- if (ret)
- devm_drm_dev_init_release(dev);
-
- return ret;
+ return devm_add_action_or_reset(parent,
+ devm_drm_dev_init_release, dev);
}
void *__devm_drm_dev_alloc(struct device *parent,
@@ -897,8 +893,6 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
if (drm_core_check_feature(dev, DRIVER_MODESET))
drm_modeset_register_all(dev);
- ret = 0;
-
DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
driver->name, driver->major, driver->minor,
driver->patchlevel, driver->date,
diff --git a/drivers/gpu/drm/drm_dsc.c b/drivers/gpu/drm/drm_dsc.c
index 4a475d9696ff..ff602f7ec65b 100644
--- a/drivers/gpu/drm/drm_dsc.c
+++ b/drivers/gpu/drm/drm_dsc.c
@@ -50,6 +50,33 @@ void drm_dsc_dp_pps_header_init(struct dp_sdp_header *pps_header)
EXPORT_SYMBOL(drm_dsc_dp_pps_header_init);
/**
+ * drm_dsc_dp_rc_buffer_size - get rc buffer size in bytes
+ * @rc_buffer_block_size: block size code, according to DPCD offset 62h
+ * @rc_buffer_size: number of blocks - 1, according to DPCD offset 63h
+ *
+ * return:
+ * buffer size in bytes, or 0 on invalid input
+ */
+int drm_dsc_dp_rc_buffer_size(u8 rc_buffer_block_size, u8 rc_buffer_size)
+{
+ int size = 1024 * (rc_buffer_size + 1);
+
+ switch (rc_buffer_block_size) {
+ case DP_DSC_RC_BUF_BLK_SIZE_1:
+ return 1 * size;
+ case DP_DSC_RC_BUF_BLK_SIZE_4:
+ return 4 * size;
+ case DP_DSC_RC_BUF_BLK_SIZE_16:
+ return 16 * size;
+ case DP_DSC_RC_BUF_BLK_SIZE_64:
+ return 64 * size;
+ default:
+ return 0;
+ }
+}
+EXPORT_SYMBOL(drm_dsc_dp_rc_buffer_size);
+
+/**
* drm_dsc_pps_payload_pack() - Populates the DSC PPS
*
* @pps_payload:
@@ -186,8 +213,7 @@ void drm_dsc_pps_payload_pack(struct drm_dsc_picture_parameter_set *pps_payload,
pps_payload->flatness_max_qp = dsc_cfg->flatness_max_qp;
/* PPS 38, 39 */
- pps_payload->rc_model_size =
- cpu_to_be16(DSC_RC_MODEL_SIZE_CONST);
+ pps_payload->rc_model_size = cpu_to_be16(dsc_cfg->rc_model_size);
/* PPS 40 */
pps_payload->rc_edge_factor = DSC_RC_EDGE_FACTOR_CONST;
diff --git a/drivers/gpu/drm/drm_dumb_buffers.c b/drivers/gpu/drm/drm_dumb_buffers.c
index d18a740fe0f1..ad17fa21cebb 100644
--- a/drivers/gpu/drm/drm_dumb_buffers.c
+++ b/drivers/gpu/drm/drm_dumb_buffers.c
@@ -29,6 +29,7 @@
#include <drm/drm_mode.h>
#include "drm_crtc_internal.h"
+#include "drm_internal.h"
/**
* DOC: overview
@@ -46,9 +47,10 @@
* KMS frame buffers.
*
* To support dumb objects drivers must implement the &drm_driver.dumb_create
- * operation. &drm_driver.dumb_destroy defaults to drm_gem_dumb_destroy() if
- * not set and &drm_driver.dumb_map_offset defaults to
- * drm_gem_dumb_map_offset(). See the callbacks for further details.
+ * and &drm_driver.dumb_map_offset operations (the latter defaults to
+ * drm_gem_dumb_map_offset() if not set). Drivers that don't use GEM handles
+ * additionally need to implement the &drm_driver.dumb_destroy operation. See
+ * the callbacks for further details.
*
* Note that dumb objects may not be used for gpu acceleration, as has been
* attempted on some ARM embedded platforms. Such drivers really must have
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index e95cce8e736d..c2bbe7bee7b6 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -32,6 +32,7 @@
#include <linux/i2c.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/vga_switcheroo.h>
@@ -2075,9 +2076,13 @@ EXPORT_SYMBOL(drm_get_edid);
struct edid *drm_get_edid_switcheroo(struct drm_connector *connector,
struct i2c_adapter *adapter)
{
- struct pci_dev *pdev = connector->dev->pdev;
+ struct drm_device *dev = connector->dev;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
struct edid *edid;
+ if (drm_WARN_ON_ONCE(dev, !dev_is_pci(dev->dev)))
+ return NULL;
+
vga_switcheroo_lock_ddc(pdev);
edid = drm_get_edid(connector, adapter);
vga_switcheroo_unlock_ddc(pdev);
@@ -4851,6 +4856,41 @@ static void drm_parse_vcdb(struct drm_connector *connector, const u8 *db)
info->rgb_quant_range_selectable = true;
}
+static
+void drm_get_max_frl_rate(int max_frl_rate, u8 *max_lanes, u8 *max_rate_per_lane)
+{
+ switch (max_frl_rate) {
+ case 1:
+ *max_lanes = 3;
+ *max_rate_per_lane = 3;
+ break;
+ case 2:
+ *max_lanes = 3;
+ *max_rate_per_lane = 6;
+ break;
+ case 3:
+ *max_lanes = 4;
+ *max_rate_per_lane = 6;
+ break;
+ case 4:
+ *max_lanes = 4;
+ *max_rate_per_lane = 8;
+ break;
+ case 5:
+ *max_lanes = 4;
+ *max_rate_per_lane = 10;
+ break;
+ case 6:
+ *max_lanes = 4;
+ *max_rate_per_lane = 12;
+ break;
+ case 0:
+ default:
+ *max_lanes = 0;
+ *max_rate_per_lane = 0;
+ }
+}
+
static void drm_parse_ycbcr420_deep_color_info(struct drm_connector *connector,
const u8 *db)
{
@@ -4904,6 +4944,74 @@ static void drm_parse_hdmi_forum_vsdb(struct drm_connector *connector,
}
}
+ if (hf_vsdb[7]) {
+ u8 max_frl_rate;
+ u8 dsc_max_frl_rate;
+ u8 dsc_max_slices;
+ struct drm_hdmi_dsc_cap *hdmi_dsc = &hdmi->dsc_cap;
+
+ DRM_DEBUG_KMS("hdmi_21 sink detected. parsing edid\n");
+ max_frl_rate = (hf_vsdb[7] & DRM_EDID_MAX_FRL_RATE_MASK) >> 4;
+ drm_get_max_frl_rate(max_frl_rate, &hdmi->max_lanes,
+ &hdmi->max_frl_rate_per_lane);
+ hdmi_dsc->v_1p2 = hf_vsdb[11] & DRM_EDID_DSC_1P2;
+
+ if (hdmi_dsc->v_1p2) {
+ hdmi_dsc->native_420 = hf_vsdb[11] & DRM_EDID_DSC_NATIVE_420;
+ hdmi_dsc->all_bpp = hf_vsdb[11] & DRM_EDID_DSC_ALL_BPP;
+
+ if (hf_vsdb[11] & DRM_EDID_DSC_16BPC)
+ hdmi_dsc->bpc_supported = 16;
+ else if (hf_vsdb[11] & DRM_EDID_DSC_12BPC)
+ hdmi_dsc->bpc_supported = 12;
+ else if (hf_vsdb[11] & DRM_EDID_DSC_10BPC)
+ hdmi_dsc->bpc_supported = 10;
+ else
+ hdmi_dsc->bpc_supported = 0;
+
+ dsc_max_frl_rate = (hf_vsdb[12] & DRM_EDID_DSC_MAX_FRL_RATE_MASK) >> 4;
+ drm_get_max_frl_rate(dsc_max_frl_rate, &hdmi_dsc->max_lanes,
+ &hdmi_dsc->max_frl_rate_per_lane);
+ hdmi_dsc->total_chunk_kbytes = hf_vsdb[13] & DRM_EDID_DSC_TOTAL_CHUNK_KBYTES;
+
+ dsc_max_slices = hf_vsdb[12] & DRM_EDID_DSC_MAX_SLICES;
+ switch (dsc_max_slices) {
+ case 1:
+ hdmi_dsc->max_slices = 1;
+ hdmi_dsc->clk_per_slice = 340;
+ break;
+ case 2:
+ hdmi_dsc->max_slices = 2;
+ hdmi_dsc->clk_per_slice = 340;
+ break;
+ case 3:
+ hdmi_dsc->max_slices = 4;
+ hdmi_dsc->clk_per_slice = 340;
+ break;
+ case 4:
+ hdmi_dsc->max_slices = 8;
+ hdmi_dsc->clk_per_slice = 340;
+ break;
+ case 5:
+ hdmi_dsc->max_slices = 8;
+ hdmi_dsc->clk_per_slice = 400;
+ break;
+ case 6:
+ hdmi_dsc->max_slices = 12;
+ hdmi_dsc->clk_per_slice = 400;
+ break;
+ case 7:
+ hdmi_dsc->max_slices = 16;
+ hdmi_dsc->clk_per_slice = 400;
+ break;
+ case 0:
+ default:
+ hdmi_dsc->max_slices = 0;
+ hdmi_dsc->clk_per_slice = 0;
+ }
+ }
+ }
+
drm_parse_ycbcr420_deep_color_info(connector, hf_vsdb);
}
diff --git a/drivers/gpu/drm/drm_encoder.c b/drivers/gpu/drm/drm_encoder.c
index e555281f43d4..72e982323a5e 100644
--- a/drivers/gpu/drm/drm_encoder.c
+++ b/drivers/gpu/drm/drm_encoder.c
@@ -26,6 +26,7 @@
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_encoder.h>
+#include <drm/drm_managed.h>
#include "drm_crtc_internal.h"
@@ -72,7 +73,7 @@ int drm_encoder_register_all(struct drm_device *dev)
int ret = 0;
drm_for_each_encoder(encoder, dev) {
- if (encoder->funcs->late_register)
+ if (encoder->funcs && encoder->funcs->late_register)
ret = encoder->funcs->late_register(encoder);
if (ret)
return ret;
@@ -86,30 +87,16 @@ void drm_encoder_unregister_all(struct drm_device *dev)
struct drm_encoder *encoder;
drm_for_each_encoder(encoder, dev) {
- if (encoder->funcs->early_unregister)
+ if (encoder->funcs && encoder->funcs->early_unregister)
encoder->funcs->early_unregister(encoder);
}
}
-/**
- * drm_encoder_init - Init a preallocated encoder
- * @dev: drm device
- * @encoder: the encoder to init
- * @funcs: callbacks for this encoder
- * @encoder_type: user visible type of the encoder
- * @name: printf style format string for the encoder name, or NULL for default name
- *
- * Initialises a preallocated encoder. Encoder should be subclassed as part of
- * driver encoder objects. At driver unload time drm_encoder_cleanup() should be
- * called from the driver's &drm_encoder_funcs.destroy hook.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_encoder_init(struct drm_device *dev,
- struct drm_encoder *encoder,
- const struct drm_encoder_funcs *funcs,
- int encoder_type, const char *name, ...)
+__printf(5, 0)
+static int __drm_encoder_init(struct drm_device *dev,
+ struct drm_encoder *encoder,
+ const struct drm_encoder_funcs *funcs,
+ int encoder_type, const char *name, va_list ap)
{
int ret;
@@ -125,11 +112,7 @@ int drm_encoder_init(struct drm_device *dev,
encoder->encoder_type = encoder_type;
encoder->funcs = funcs;
if (name) {
- va_list ap;
-
- va_start(ap, name);
encoder->name = kvasprintf(GFP_KERNEL, name, ap);
- va_end(ap);
} else {
encoder->name = kasprintf(GFP_KERNEL, "%s-%d",
drm_encoder_enum_list[encoder_type].name,
@@ -150,6 +133,44 @@ out_put:
return ret;
}
+
+/**
+ * drm_encoder_init - Init a preallocated encoder
+ * @dev: drm device
+ * @encoder: the encoder to init
+ * @funcs: callbacks for this encoder
+ * @encoder_type: user visible type of the encoder
+ * @name: printf style format string for the encoder name, or NULL for default name
+ *
+ * Initializes a preallocated encoder. Encoder should be subclassed as part of
+ * driver encoder objects. At driver unload time the driver's
+ * &drm_encoder_funcs.destroy hook should call drm_encoder_cleanup() and kfree()
+ * the encoder structure. The encoder structure should not be allocated with
+ * devm_kzalloc().
+ *
+ * Note: consider using drmm_encoder_alloc() instead of drm_encoder_init() to
+ * let the DRM managed resource infrastructure take care of cleanup and
+ * deallocation.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_encoder_init(struct drm_device *dev,
+ struct drm_encoder *encoder,
+ const struct drm_encoder_funcs *funcs,
+ int encoder_type, const char *name, ...)
+{
+ va_list ap;
+ int ret;
+
+ WARN_ON(!funcs->destroy);
+
+ va_start(ap, name);
+ ret = __drm_encoder_init(dev, encoder, funcs, encoder_type, name, ap);
+ va_end(ap);
+
+ return ret;
+}
EXPORT_SYMBOL(drm_encoder_init);
/**
@@ -181,6 +202,48 @@ void drm_encoder_cleanup(struct drm_encoder *encoder)
}
EXPORT_SYMBOL(drm_encoder_cleanup);
+static void drmm_encoder_alloc_release(struct drm_device *dev, void *ptr)
+{
+ struct drm_encoder *encoder = ptr;
+
+ if (WARN_ON(!encoder->dev))
+ return;
+
+ drm_encoder_cleanup(encoder);
+}
+
+void *__drmm_encoder_alloc(struct drm_device *dev, size_t size, size_t offset,
+ const struct drm_encoder_funcs *funcs,
+ int encoder_type, const char *name, ...)
+{
+ void *container;
+ struct drm_encoder *encoder;
+ va_list ap;
+ int ret;
+
+ if (WARN_ON(funcs && funcs->destroy))
+ return ERR_PTR(-EINVAL);
+
+ container = drmm_kzalloc(dev, size, GFP_KERNEL);
+ if (!container)
+ return ERR_PTR(-EINVAL);
+
+ encoder = container + offset;
+
+ va_start(ap, name);
+ ret = __drm_encoder_init(dev, encoder, funcs, encoder_type, name, ap);
+ va_end(ap);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = drmm_add_action_or_reset(dev, drmm_encoder_alloc_release, encoder);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return container;
+}
+EXPORT_SYMBOL(__drmm_encoder_alloc);
+
static struct drm_crtc *drm_encoder_get_crtc(struct drm_encoder *encoder)
{
struct drm_connector *connector;
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 4b8119510687..b9a616737c0e 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -946,11 +946,15 @@ static int setcmap_legacy(struct fb_cmap *cmap, struct fb_info *info)
drm_modeset_lock_all(fb_helper->dev);
drm_client_for_each_modeset(modeset, &fb_helper->client) {
crtc = modeset->crtc;
- if (!crtc->funcs->gamma_set || !crtc->gamma_size)
- return -EINVAL;
+ if (!crtc->funcs->gamma_set || !crtc->gamma_size) {
+ ret = -EINVAL;
+ goto out;
+ }
- if (cmap->start + cmap->len > crtc->gamma_size)
- return -EINVAL;
+ if (cmap->start + cmap->len > crtc->gamma_size) {
+ ret = -EINVAL;
+ goto out;
+ }
r = crtc->gamma_store;
g = r + crtc->gamma_size;
@@ -963,8 +967,9 @@ static int setcmap_legacy(struct fb_cmap *cmap, struct fb_info *info)
ret = crtc->funcs->gamma_set(crtc, r, g, b,
crtc->gamma_size, NULL);
if (ret)
- return ret;
+ goto out;
}
+out:
drm_modeset_unlock_all(fb_helper->dev);
return ret;
@@ -1054,6 +1059,11 @@ retry:
goto out_state;
}
+ /*
+ * FIXME: This always uses gamma_lut. Some HW have only
+ * degamma_lut, in which case we should reset gamma_lut and set
+ * degamma_lut. See drm_crtc_legacy_gamma_set().
+ */
replaced = drm_property_replace_blob(&crtc_state->degamma_lut,
NULL);
replaced |= drm_property_replace_blob(&crtc_state->ctm, NULL);
@@ -2486,6 +2496,11 @@ void drm_fbdev_generic_setup(struct drm_device *dev,
return;
}
+ /*
+ * FIXME: This mixes up depth with bpp, which results in a glorious
+ * mess, resulting in some drivers picking wrong fbdev defaults and
+ * others wrong preferred_depth defaults.
+ */
if (!preferred_bpp)
preferred_bpp = dev->mode_config.preferred_depth;
if (!preferred_bpp)
@@ -2499,24 +2514,3 @@ void drm_fbdev_generic_setup(struct drm_device *dev,
drm_client_register(&fb_helper->client);
}
EXPORT_SYMBOL(drm_fbdev_generic_setup);
-
-/* The Kconfig DRM_KMS_HELPER selects FRAMEBUFFER_CONSOLE (if !EXPERT)
- * but the module doesn't depend on any fb console symbols. At least
- * attempt to load fbcon to avoid leaving the system without a usable console.
- */
-int __init drm_fb_helper_modinit(void)
-{
-#if defined(CONFIG_FRAMEBUFFER_CONSOLE_MODULE) && !defined(CONFIG_EXPERT)
- const char name[] = "fbcon";
- struct module *fbcon;
-
- mutex_lock(&module_mutex);
- fbcon = find_module(name);
- mutex_unlock(&module_mutex);
-
- if (!fbcon)
- request_module_nowait(name);
-#endif
- return 0;
-}
-EXPORT_SYMBOL(drm_fb_helper_modinit);
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index b50380fa80ce..7efbccffc2ea 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -113,8 +113,7 @@ bool drm_dev_needs_global_mutex(struct drm_device *dev)
* The memory mapping implementation will vary depending on how the driver
* manages memory. Legacy drivers will use the deprecated drm_legacy_mmap()
* function, modern drivers should use one of the provided memory-manager
- * specific implementations. For GEM-based drivers this is drm_gem_mmap(), and
- * for drivers which use the CMA GEM helpers it's drm_gem_cma_mmap().
+ * specific implementations. For GEM-based drivers this is drm_gem_mmap().
*
* No other file operations are supported by the DRM userspace API. Overall the
* following is an example &file_operations structure::
@@ -240,9 +239,6 @@ static void drm_events_release(struct drm_file *file_priv)
* before calling this.
*
* If NULL is passed, this is a no-op.
- *
- * RETURNS:
- * 0 on success, or error code on failure.
*/
void drm_file_free(struct drm_file *file)
{
@@ -371,6 +367,7 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
list_add(&priv->lhead, &dev->filelist);
mutex_unlock(&dev->filelist_mutex);
+#ifdef CONFIG_DRM_LEGACY
#ifdef __alpha__
/*
* Default the hose
@@ -391,6 +388,7 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
}
}
#endif
+#endif
return 0;
}
@@ -777,20 +775,19 @@ void drm_event_cancel_free(struct drm_device *dev,
EXPORT_SYMBOL(drm_event_cancel_free);
/**
- * drm_send_event_locked - send DRM event to file descriptor
+ * drm_send_event_helper - send DRM event to file descriptor
* @dev: DRM device
* @e: DRM event to deliver
+ * @timestamp: timestamp to set for the fence event in kernel's CLOCK_MONOTONIC
+ * time domain
*
- * This function sends the event @e, initialized with drm_event_reserve_init(),
- * to its associated userspace DRM file. Callers must already hold
- * &drm_device.event_lock, see drm_send_event() for the unlocked version.
- *
- * Note that the core will take care of unlinking and disarming events when the
- * corresponding DRM file is closed. Drivers need not worry about whether the
- * DRM file for this event still exists and can call this function upon
- * completion of the asynchronous work unconditionally.
+ * This helper function sends the event @e, initialized with
+ * drm_event_reserve_init(), to its associated userspace DRM file.
+ * The timestamp variant of dma_fence_signal is used when the caller
+ * sends a valid timestamp.
*/
-void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
+void drm_send_event_helper(struct drm_device *dev,
+ struct drm_pending_event *e, ktime_t timestamp)
{
assert_spin_locked(&dev->event_lock);
@@ -801,7 +798,10 @@ void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
}
if (e->fence) {
- dma_fence_signal(e->fence);
+ if (timestamp)
+ dma_fence_signal_timestamp(e->fence, timestamp);
+ else
+ dma_fence_signal(e->fence);
dma_fence_put(e->fence);
}
@@ -816,6 +816,48 @@ void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
wake_up_interruptible_poll(&e->file_priv->event_wait,
EPOLLIN | EPOLLRDNORM);
}
+
+/**
+ * drm_send_event_timestamp_locked - send DRM event to file descriptor
+ * @dev: DRM device
+ * @e: DRM event to deliver
+ * @timestamp: timestamp to set for the fence event in kernel's CLOCK_MONOTONIC
+ * time domain
+ *
+ * This function sends the event @e, initialized with drm_event_reserve_init(),
+ * to its associated userspace DRM file. Callers must already hold
+ * &drm_device.event_lock.
+ *
+ * Note that the core will take care of unlinking and disarming events when the
+ * corresponding DRM file is closed. Drivers need not worry about whether the
+ * DRM file for this event still exists and can call this function upon
+ * completion of the asynchronous work unconditionally.
+ */
+void drm_send_event_timestamp_locked(struct drm_device *dev,
+ struct drm_pending_event *e, ktime_t timestamp)
+{
+ drm_send_event_helper(dev, e, timestamp);
+}
+EXPORT_SYMBOL(drm_send_event_timestamp_locked);
+
+/**
+ * drm_send_event_locked - send DRM event to file descriptor
+ * @dev: DRM device
+ * @e: DRM event to deliver
+ *
+ * This function sends the event @e, initialized with drm_event_reserve_init(),
+ * to its associated userspace DRM file. Callers must already hold
+ * &drm_device.event_lock, see drm_send_event() for the unlocked version.
+ *
+ * Note that the core will take care of unlinking and disarming events when the
+ * corresponding DRM file is closed. Drivers need not worry about whether the
+ * DRM file for this event still exists and can call this function upon
+ * completion of the asynchronous work unconditionally.
+ */
+void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
+{
+ drm_send_event_helper(dev, e, 0);
+}
EXPORT_SYMBOL(drm_send_event_locked);
/**
@@ -838,7 +880,7 @@ void drm_send_event(struct drm_device *dev, struct drm_pending_event *e)
unsigned long irqflags;
spin_lock_irqsave(&dev->event_lock, irqflags);
- drm_send_event_locked(dev, e);
+ drm_send_event_helper(dev, e, 0);
spin_unlock_irqrestore(&dev->event_lock, irqflags);
}
EXPORT_SYMBOL(drm_send_event);
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 92f89cee213e..c2ce78c4edc3 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -335,22 +335,12 @@ out:
}
EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset);
-/**
- * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
- * @file: drm file-private structure to remove the dumb handle from
- * @dev: corresponding drm_device
- * @handle: the dumb handle to remove
- *
- * This implements the &drm_driver.dumb_destroy kms driver callback for drivers
- * which use gem to manage their backing storage.
- */
int drm_gem_dumb_destroy(struct drm_file *file,
struct drm_device *dev,
- uint32_t handle)
+ u32 handle)
{
return drm_gem_handle_delete(file, handle);
}
-EXPORT_SYMBOL(drm_gem_dumb_destroy);
/**
* drm_gem_handle_create_tail - internal functions to create a handle
@@ -1078,20 +1068,17 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
drm_gem_object_get(obj);
vma->vm_private_data = obj;
+ vma->vm_ops = obj->funcs->vm_ops;
if (obj->funcs->mmap) {
ret = obj->funcs->mmap(obj, vma);
- if (ret) {
- drm_gem_object_put(obj);
- return ret;
- }
+ if (ret)
+ goto err_drm_gem_object_put;
WARN_ON(!(vma->vm_flags & VM_DONTEXPAND));
} else {
- if (obj->funcs->vm_ops)
- vma->vm_ops = obj->funcs->vm_ops;
- else {
- drm_gem_object_put(obj);
- return -EINVAL;
+ if (!vma->vm_ops) {
+ ret = -EINVAL;
+ goto err_drm_gem_object_put;
}
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
@@ -1100,6 +1087,10 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
}
return 0;
+
+err_drm_gem_object_put:
+ drm_gem_object_put(obj);
+ return ret;
}
EXPORT_SYMBOL(drm_gem_mmap_obj);
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index 4d5c1d86b022..7942cf05cd93 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -36,8 +36,9 @@
static const struct drm_gem_object_funcs drm_gem_cma_default_funcs = {
.free = drm_gem_cma_free_object,
.print_info = drm_gem_cma_print_info,
- .get_sg_table = drm_gem_cma_prime_get_sg_table,
- .vmap = drm_gem_cma_prime_vmap,
+ .get_sg_table = drm_gem_cma_get_sg_table,
+ .vmap = drm_gem_cma_vmap,
+ .mmap = drm_gem_cma_mmap,
.vm_ops = &drm_gem_cma_vm_ops,
};
@@ -277,62 +278,6 @@ const struct vm_operations_struct drm_gem_cma_vm_ops = {
};
EXPORT_SYMBOL_GPL(drm_gem_cma_vm_ops);
-static int drm_gem_cma_mmap_obj(struct drm_gem_cma_object *cma_obj,
- struct vm_area_struct *vma)
-{
- int ret;
-
- /*
- * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
- * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
- * the whole buffer.
- */
- vma->vm_flags &= ~VM_PFNMAP;
- vma->vm_pgoff = 0;
-
- ret = dma_mmap_wc(cma_obj->base.dev->dev, vma, cma_obj->vaddr,
- cma_obj->paddr, vma->vm_end - vma->vm_start);
- if (ret)
- drm_gem_vm_close(vma);
-
- return ret;
-}
-
-/**
- * drm_gem_cma_mmap - memory-map a CMA GEM object
- * @filp: file object
- * @vma: VMA for the area to be mapped
- *
- * This function implements an augmented version of the GEM DRM file mmap
- * operation for CMA objects: In addition to the usual GEM VMA setup it
- * immediately faults in the entire object instead of using on-demaind
- * faulting. Drivers which employ the CMA helpers should use this function
- * as their ->mmap() handler in the DRM device file's file_operations
- * structure.
- *
- * Instead of directly referencing this function, drivers should use the
- * DEFINE_DRM_GEM_CMA_FOPS().macro.
- *
- * Returns:
- * 0 on success or a negative error code on failure.
- */
-int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- struct drm_gem_cma_object *cma_obj;
- struct drm_gem_object *gem_obj;
- int ret;
-
- ret = drm_gem_mmap(filp, vma);
- if (ret)
- return ret;
-
- gem_obj = vma->vm_private_data;
- cma_obj = to_drm_gem_cma_obj(gem_obj);
-
- return drm_gem_cma_mmap_obj(cma_obj, vma);
-}
-EXPORT_SYMBOL_GPL(drm_gem_cma_mmap);
-
#ifndef CONFIG_MMU
/**
* drm_gem_cma_get_unmapped_area - propose address for mapping in noMMU cases
@@ -424,18 +369,18 @@ void drm_gem_cma_print_info(struct drm_printer *p, unsigned int indent,
EXPORT_SYMBOL(drm_gem_cma_print_info);
/**
- * drm_gem_cma_prime_get_sg_table - provide a scatter/gather table of pinned
+ * drm_gem_cma_get_sg_table - provide a scatter/gather table of pinned
* pages for a CMA GEM object
* @obj: GEM object
*
- * This function exports a scatter/gather table suitable for PRIME usage by
+ * This function exports a scatter/gather table by
* calling the standard DMA mapping API. Drivers using the CMA helpers should
* set this as their &drm_gem_object_funcs.get_sg_table callback.
*
* Returns:
* A pointer to the scatter/gather table of pinned pages or NULL on failure.
*/
-struct sg_table *drm_gem_cma_prime_get_sg_table(struct drm_gem_object *obj)
+struct sg_table *drm_gem_cma_get_sg_table(struct drm_gem_object *obj)
{
struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
struct sg_table *sgt;
@@ -456,7 +401,7 @@ out:
kfree(sgt);
return ERR_PTR(ret);
}
-EXPORT_SYMBOL_GPL(drm_gem_cma_prime_get_sg_table);
+EXPORT_SYMBOL_GPL(drm_gem_cma_get_sg_table);
/**
* drm_gem_cma_prime_import_sg_table - produce a CMA GEM object from another
@@ -501,40 +446,13 @@ drm_gem_cma_prime_import_sg_table(struct drm_device *dev,
EXPORT_SYMBOL_GPL(drm_gem_cma_prime_import_sg_table);
/**
- * drm_gem_cma_prime_mmap - memory-map an exported CMA GEM object
- * @obj: GEM object
- * @vma: VMA for the area to be mapped
- *
- * This function maps a buffer imported via DRM PRIME into a userspace
- * process's address space. Drivers that use the CMA helpers should set this
- * as their &drm_driver.gem_prime_mmap callback.
- *
- * Returns:
- * 0 on success or a negative error code on failure.
- */
-int drm_gem_cma_prime_mmap(struct drm_gem_object *obj,
- struct vm_area_struct *vma)
-{
- struct drm_gem_cma_object *cma_obj;
- int ret;
-
- ret = drm_gem_mmap_obj(obj, obj->size, vma);
- if (ret < 0)
- return ret;
-
- cma_obj = to_drm_gem_cma_obj(obj);
- return drm_gem_cma_mmap_obj(cma_obj, vma);
-}
-EXPORT_SYMBOL_GPL(drm_gem_cma_prime_mmap);
-
-/**
- * drm_gem_cma_prime_vmap - map a CMA GEM object into the kernel's virtual
+ * drm_gem_cma_vmap - map a CMA GEM object into the kernel's virtual
* address space
* @obj: GEM object
* @map: Returns the kernel virtual address of the CMA GEM object's backing
* store.
*
- * This function maps a buffer exported via DRM PRIME into the kernel's
+ * This function maps a buffer into the kernel's
* virtual address space. Since the CMA buffers are already mapped into the
* kernel virtual address space this simply returns the cached virtual
* address. Drivers using the CMA helpers should set this as their DRM
@@ -543,7 +461,7 @@ EXPORT_SYMBOL_GPL(drm_gem_cma_prime_mmap);
* Returns:
* 0 on success, or a negative error code otherwise.
*/
-int drm_gem_cma_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
+int drm_gem_cma_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
{
struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
@@ -551,7 +469,44 @@ int drm_gem_cma_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
return 0;
}
-EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vmap);
+EXPORT_SYMBOL_GPL(drm_gem_cma_vmap);
+
+/**
+ * drm_gem_cma_mmap - memory-map an exported CMA GEM object
+ * @obj: GEM object
+ * @vma: VMA for the area to be mapped
+ *
+ * This function maps a buffer into a userspace process's address space.
+ * In addition to the usual GEM VMA setup it immediately faults in the entire
+ * object instead of using on-demand faulting. Drivers that use the CMA
+ * helpers should set this as their &drm_gem_object_funcs.mmap callback.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+int drm_gem_cma_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+ struct drm_gem_cma_object *cma_obj;
+ int ret;
+
+ /*
+ * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
+ * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
+ * the whole buffer.
+ */
+ vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
+ vma->vm_flags &= ~VM_PFNMAP;
+
+ cma_obj = to_drm_gem_cma_obj(obj);
+
+ ret = dma_mmap_wc(cma_obj->base.dev->dev, vma, cma_obj->vaddr,
+ cma_obj->paddr, vma->vm_end - vma->vm_start);
+ if (ret)
+ drm_gem_vm_close(vma);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_mmap);
/**
* drm_gem_cma_prime_import_sg_table_vmap - PRIME import another driver's
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 81d386b5b92a..fad2249ee67b 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -191,6 +191,9 @@ void drm_gem_unpin(struct drm_gem_object *obj);
int drm_gem_vmap(struct drm_gem_object *obj, struct dma_buf_map *map);
void drm_gem_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map);
+int drm_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
+ u32 handle);
+
/* drm_debugfs.c drm_debugfs_crc.c */
#if defined(CONFIG_DEBUG_FS)
int drm_debugfs_init(struct drm_minor *minor, int minor_id,
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 09d6e9e2e075..c3bd664ea733 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -122,7 +122,7 @@ int drm_irq_install(struct drm_device *dev, int irq)
dev->driver->irq_preinstall(dev);
/* PCI devices require shared interrupts. */
- if (dev->pdev)
+ if (dev_is_pci(dev->dev))
sh_flags = IRQF_SHARED;
ret = request_irq(irq, dev->driver->irq_handler,
@@ -140,7 +140,7 @@ int drm_irq_install(struct drm_device *dev, int irq)
if (ret < 0) {
dev->irq_enabled = false;
if (drm_core_check_feature(dev, DRIVER_LEGACY))
- vga_client_register(dev->pdev, NULL, NULL, NULL);
+ vga_client_register(to_pci_dev(dev->dev), NULL, NULL, NULL);
free_irq(irq, dev);
} else {
dev->irq = irq;
@@ -203,7 +203,7 @@ int drm_irq_uninstall(struct drm_device *dev)
DRM_DEBUG("irq=%d\n", dev->irq);
if (drm_core_check_feature(dev, DRIVER_LEGACY))
- vga_client_register(dev->pdev, NULL, NULL, NULL);
+ vga_client_register(to_pci_dev(dev->dev), NULL, NULL, NULL);
if (dev->driver->irq_uninstall)
dev->driver->irq_uninstall(dev);
@@ -214,12 +214,45 @@ int drm_irq_uninstall(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_irq_uninstall);
+static void devm_drm_irq_uninstall(void *data)
+{
+ drm_irq_uninstall(data);
+}
+
+/**
+ * devm_drm_irq_install - install IRQ handler
+ * @dev: DRM device
+ * @irq: IRQ number to install the handler for
+ *
+ * devm_drm_irq_install is a help function of drm_irq_install.
+ *
+ * if the driver uses devm_drm_irq_install, there is no need
+ * to call drm_irq_uninstall when the drm module get unloaded,
+ * as this will done automagically.
+ *
+ * Returns:
+ * Zero on success or a negative error code on failure.
+ */
+int devm_drm_irq_install(struct drm_device *dev, int irq)
+{
+ int ret;
+
+ ret = drm_irq_install(dev, irq);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(dev->dev,
+ devm_drm_irq_uninstall, dev);
+}
+EXPORT_SYMBOL(devm_drm_irq_install);
+
#if IS_ENABLED(CONFIG_DRM_LEGACY)
int drm_legacy_irq_control(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_control *ctl = data;
int ret = 0, irq;
+ struct pci_dev *pdev;
/* if we haven't irq we fallback for compatibility reasons -
* this used to be a separate function in drm_dma.h
@@ -230,12 +263,13 @@ int drm_legacy_irq_control(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return 0;
/* UMS was only ever supported on pci devices. */
- if (WARN_ON(!dev->pdev))
+ if (WARN_ON(!dev_is_pci(dev->dev)))
return -EINVAL;
switch (ctl->func) {
case DRM_INST_HANDLER:
- irq = dev->pdev->irq;
+ pdev = to_pci_dev(dev->dev);
+ irq = pdev->irq;
if (dev->if_version < DRM_IF_VERSION(1, 2) &&
ctl->irq != irq)
diff --git a/drivers/gpu/drm/drm_kms_helper_common.c b/drivers/gpu/drm/drm_kms_helper_common.c
index 221a8528c993..f933da1656eb 100644
--- a/drivers/gpu/drm/drm_kms_helper_common.c
+++ b/drivers/gpu/drm/drm_kms_helper_common.c
@@ -64,19 +64,18 @@ MODULE_PARM_DESC(edid_firmware,
static int __init drm_kms_helper_init(void)
{
- int ret;
-
- /* Call init functions from specific kms helpers here */
- ret = drm_fb_helper_modinit();
- if (ret < 0)
- goto out;
-
- ret = drm_dp_aux_dev_init();
- if (ret < 0)
- goto out;
-
-out:
- return ret;
+ /*
+ * The Kconfig DRM_KMS_HELPER selects FRAMEBUFFER_CONSOLE (if !EXPERT)
+ * but the module doesn't depend on any fb console symbols. At least
+ * attempt to load fbcon to avoid leaving the system without a usable
+ * console.
+ */
+ if (IS_ENABLED(CONFIG_DRM_FBDEV_EMULATION) &&
+ IS_MODULE(CONFIG_FRAMEBUFFER_CONSOLE) &&
+ !IS_ENABLED(CONFIG_EXPERT))
+ request_module_nowait("fbcon");
+
+ return drm_dp_aux_dev_init();
}
static void __exit drm_kms_helper_exit(void)
diff --git a/drivers/gpu/drm/drm_legacy.h b/drivers/gpu/drm/drm_legacy.h
index 1be3ea320474..f71358f9eac9 100644
--- a/drivers/gpu/drm/drm_legacy.h
+++ b/drivers/gpu/drm/drm_legacy.h
@@ -127,7 +127,7 @@ static inline void drm_legacy_master_rmmaps(struct drm_device *dev,
static inline void drm_legacy_rmmaps(struct drm_device *dev) {}
#endif
-#if IS_ENABLED(CONFIG_DRM_VM) && IS_ENABLED(CONFIG_DRM_LEGACY)
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
void drm_legacy_vma_flush(struct drm_device *d);
#else
static inline void drm_legacy_vma_flush(struct drm_device *d)
diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c
index fbea69d6f909..e4f20a2eb6e7 100644
--- a/drivers/gpu/drm/drm_memory.c
+++ b/drivers/gpu/drm/drm_memory.c
@@ -37,7 +37,6 @@
#include <linux/highmem.h>
#include <linux/pci.h>
#include <linux/vmalloc.h>
-#include <xen/xen.h>
#include <drm/drm_agpsupport.h>
#include <drm/drm_cache.h>
@@ -100,24 +99,6 @@ static void *agp_remap(unsigned long offset, unsigned long size,
return addr;
}
-/** Wrapper around agp_free_memory() */
-void drm_free_agp(struct agp_memory *handle, int pages)
-{
- agp_free_memory(handle);
-}
-
-/** Wrapper around agp_bind_memory() */
-int drm_bind_agp(struct agp_memory *handle, unsigned int start)
-{
- return agp_bind_memory(handle, start);
-}
-
-/** Wrapper around agp_unbind_memory() */
-int drm_unbind_agp(struct agp_memory *handle)
-{
- return agp_unbind_memory(handle);
-}
-
#else /* CONFIG_AGP */
static inline void *agp_remap(unsigned long offset, unsigned long size,
struct drm_device *dev)
@@ -156,35 +137,3 @@ void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev)
iounmap(map->handle);
}
EXPORT_SYMBOL(drm_legacy_ioremapfree);
-
-bool drm_need_swiotlb(int dma_bits)
-{
- struct resource *tmp;
- resource_size_t max_iomem = 0;
-
- /*
- * Xen paravirtual hosts require swiotlb regardless of requested dma
- * transfer size.
- *
- * NOTE: Really, what it requires is use of the dma_alloc_coherent
- * allocator used in ttm_dma_populate() instead of
- * ttm_populate_and_map_pages(), which bounce buffers so much in
- * Xen it leads to swiotlb buffer exhaustion.
- */
- if (xen_pv_domain())
- return true;
-
- /*
- * Enforce dma_alloc_coherent when memory encryption is active as well
- * for the same reasons as for Xen paravirtual hosts.
- */
- if (mem_encrypt_active())
- return true;
-
- for (tmp = iomem_resource.child; tmp; tmp = tmp->sibling) {
- max_iomem = max(max_iomem, tmp->end);
- }
-
- return max_iomem > ((u64)1 << dma_bits);
-}
-EXPORT_SYMBOL(drm_need_swiotlb);
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index f1affc1bb679..37b4b9f0e468 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -195,7 +195,7 @@ void drm_mode_config_reset(struct drm_device *dev)
crtc->funcs->reset(crtc);
drm_for_each_encoder(encoder, dev)
- if (encoder->funcs->reset)
+ if (encoder->funcs && encoder->funcs->reset)
encoder->funcs->reset(encoder);
drm_connector_list_iter_begin(dev, &conn_iter);
@@ -625,6 +625,10 @@ static void validate_encoder_possible_crtcs(struct drm_encoder *encoder)
void drm_mode_config_validate(struct drm_device *dev)
{
struct drm_encoder *encoder;
+ struct drm_crtc *crtc;
+ struct drm_plane *plane;
+ u32 primary_with_crtc = 0, cursor_with_crtc = 0;
+ unsigned int num_primary = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return;
@@ -636,4 +640,49 @@ void drm_mode_config_validate(struct drm_device *dev)
validate_encoder_possible_clones(encoder);
validate_encoder_possible_crtcs(encoder);
}
+
+ drm_for_each_crtc(crtc, dev) {
+ WARN(!crtc->primary, "Missing primary plane on [CRTC:%d:%s]\n",
+ crtc->base.id, crtc->name);
+
+ WARN(crtc->cursor && crtc->funcs->cursor_set,
+ "[CRTC:%d:%s] must not have both a cursor plane and a cursor_set func",
+ crtc->base.id, crtc->name);
+ WARN(crtc->cursor && crtc->funcs->cursor_set2,
+ "[CRTC:%d:%s] must not have both a cursor plane and a cursor_set2 func",
+ crtc->base.id, crtc->name);
+ WARN(crtc->cursor && crtc->funcs->cursor_move,
+ "[CRTC:%d:%s] must not have both a cursor plane and a cursor_move func",
+ crtc->base.id, crtc->name);
+
+ if (crtc->primary) {
+ WARN(!(crtc->primary->possible_crtcs & drm_crtc_mask(crtc)),
+ "Bogus primary plane possible_crtcs: [PLANE:%d:%s] must be compatible with [CRTC:%d:%s]\n",
+ crtc->primary->base.id, crtc->primary->name,
+ crtc->base.id, crtc->name);
+ WARN(primary_with_crtc & drm_plane_mask(crtc->primary),
+ "Primary plane [PLANE:%d:%s] used for multiple CRTCs",
+ crtc->primary->base.id, crtc->primary->name);
+ primary_with_crtc |= drm_plane_mask(crtc->primary);
+ }
+ if (crtc->cursor) {
+ WARN(!(crtc->cursor->possible_crtcs & drm_crtc_mask(crtc)),
+ "Bogus cursor plane possible_crtcs: [PLANE:%d:%s] must be compatible with [CRTC:%d:%s]\n",
+ crtc->cursor->base.id, crtc->cursor->name,
+ crtc->base.id, crtc->name);
+ WARN(cursor_with_crtc & drm_plane_mask(crtc->cursor),
+ "Cursor plane [PLANE:%d:%s] used for multiple CRTCs",
+ crtc->cursor->base.id, crtc->cursor->name);
+ cursor_with_crtc |= drm_plane_mask(crtc->cursor);
+ }
+ }
+
+ drm_for_each_plane(plane, dev) {
+ if (plane->type == DRM_PLANE_TYPE_PRIMARY)
+ num_primary++;
+ }
+
+ WARN(num_primary != dev->mode_config.num_crtc,
+ "Must have as many primary planes as there are CRTCs, but have %u primary planes and %u CRTCs",
+ num_primary, dev->mode_config.num_crtc);
}
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 33fb2f05ce66..1ac67d4505e0 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -762,7 +762,7 @@ int drm_mode_vrefresh(const struct drm_display_mode *mode)
if (mode->htotal == 0 || mode->vtotal == 0)
return 0;
- num = mode->clock * 1000;
+ num = mode->clock;
den = mode->htotal * mode->vtotal;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
@@ -772,7 +772,7 @@ int drm_mode_vrefresh(const struct drm_display_mode *mode)
if (mode->vscan > 1)
den *= mode->vscan;
- return DIV_ROUND_CLOSEST(num, den);
+ return DIV_ROUND_CLOSEST_ULL(mul_u32_u32(num, 1000), den);
}
EXPORT_SYMBOL(drm_mode_vrefresh);
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 6dba4b8ce4fe..2294a1580d35 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -24,6 +24,8 @@
#include <linux/dma-mapping.h>
#include <linux/export.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/slab.h>
@@ -36,6 +38,9 @@
#include "drm_legacy.h"
#ifdef CONFIG_DRM_LEGACY
+/* List of devices hanging off drivers with stealth attach. */
+static LIST_HEAD(legacy_dev_list);
+static DEFINE_MUTEX(legacy_dev_list_lock);
/**
* drm_pci_alloc - Allocate a PCI consistent memory block, for DMA.
@@ -65,7 +70,7 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t ali
return NULL;
dmah->size = size;
- dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size,
+ dmah->vaddr = dma_alloc_coherent(dev->dev, size,
&dmah->busaddr,
GFP_KERNEL);
@@ -88,7 +93,7 @@ EXPORT_SYMBOL(drm_pci_alloc);
*/
void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
{
- dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr,
+ dma_free_coherent(dev->dev, dmah->size, dmah->vaddr,
dmah->busaddr);
kfree(dmah);
}
@@ -107,16 +112,18 @@ static int drm_get_pci_domain(struct drm_device *dev)
return 0;
#endif /* __alpha__ */
- return pci_domain_nr(dev->pdev->bus);
+ return pci_domain_nr(to_pci_dev(dev->dev)->bus);
}
int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master)
{
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+
master->unique = kasprintf(GFP_KERNEL, "pci:%04x:%02x:%02x.%d",
drm_get_pci_domain(dev),
- dev->pdev->bus->number,
- PCI_SLOT(dev->pdev->devfn),
- PCI_FUNC(dev->pdev->devfn));
+ pdev->bus->number,
+ PCI_SLOT(pdev->devfn),
+ PCI_FUNC(pdev->devfn));
if (!master->unique)
return -ENOMEM;
@@ -126,12 +133,14 @@ int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master)
static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
{
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+
if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
- (p->busnum & 0xff) != dev->pdev->bus->number ||
- p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
+ (p->busnum & 0xff) != pdev->bus->number ||
+ p->devnum != PCI_SLOT(pdev->devfn) || p->funcnum != PCI_FUNC(pdev->devfn))
return -EINVAL;
- p->irq = dev->pdev->irq;
+ p->irq = pdev->irq;
DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
p->irq);
@@ -159,7 +168,7 @@ int drm_legacy_irq_by_busid(struct drm_device *dev, void *data,
return -EOPNOTSUPP;
/* UMS was only ever support on PCI devices. */
- if (WARN_ON(!dev->pdev))
+ if (WARN_ON(!dev_is_pci(dev->dev)))
return -EINVAL;
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
@@ -183,7 +192,7 @@ void drm_pci_agp_destroy(struct drm_device *dev)
static void drm_pci_agp_init(struct drm_device *dev)
{
if (drm_core_check_feature(dev, DRIVER_USE_AGP)) {
- if (pci_find_capability(dev->pdev, PCI_CAP_ID_AGP))
+ if (pci_find_capability(to_pci_dev(dev->dev), PCI_CAP_ID_AGP))
dev->agp = drm_agp_init(dev);
if (dev->agp) {
dev->agp->agp_mtrr = arch_phys_wc_add(
@@ -196,7 +205,7 @@ static void drm_pci_agp_init(struct drm_device *dev)
static int drm_get_pci_dev(struct pci_dev *pdev,
const struct pci_device_id *ent,
- struct drm_driver *driver)
+ const struct drm_driver *driver)
{
struct drm_device *dev;
int ret;
@@ -225,10 +234,11 @@ static int drm_get_pci_dev(struct pci_dev *pdev,
if (ret)
goto err_agp;
- /* No locking needed since shadow-attach is single-threaded since it may
- * only be called from the per-driver module init hook. */
- if (drm_core_check_feature(dev, DRIVER_LEGACY))
- list_add_tail(&dev->legacy_dev_list, &driver->legacy_dev_list);
+ if (drm_core_check_feature(dev, DRIVER_LEGACY)) {
+ mutex_lock(&legacy_dev_list_lock);
+ list_add_tail(&dev->legacy_dev_list, &legacy_dev_list);
+ mutex_unlock(&legacy_dev_list_lock);
+ }
return 0;
@@ -249,7 +259,8 @@ err_free:
*
* Return: 0 on success or a negative error code on failure.
*/
-int drm_legacy_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
+int drm_legacy_pci_init(const struct drm_driver *driver,
+ struct pci_driver *pdriver)
{
struct pci_dev *pdev = NULL;
const struct pci_device_id *pid;
@@ -261,7 +272,6 @@ int drm_legacy_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
return -EINVAL;
/* If not using KMS, fall back to stealth mode manual scanning. */
- INIT_LIST_HEAD(&driver->legacy_dev_list);
for (i = 0; pdriver->id_table[i].vendor != 0; i++) {
pid = &pdriver->id_table[i];
@@ -295,7 +305,8 @@ EXPORT_SYMBOL(drm_legacy_pci_init);
* Unregister a DRM driver shadow-attached through drm_legacy_pci_init(). This
* is deprecated and only used by dri1 drivers.
*/
-void drm_legacy_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver)
+void drm_legacy_pci_exit(const struct drm_driver *driver,
+ struct pci_driver *pdriver)
{
struct drm_device *dev, *tmp;
@@ -304,11 +315,15 @@ void drm_legacy_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver)
if (!(driver->driver_features & DRIVER_LEGACY)) {
WARN_ON(1);
} else {
- list_for_each_entry_safe(dev, tmp, &driver->legacy_dev_list,
+ mutex_lock(&legacy_dev_list_lock);
+ list_for_each_entry_safe(dev, tmp, &legacy_dev_list,
legacy_dev_list) {
- list_del(&dev->legacy_dev_list);
- drm_put_dev(dev);
+ if (dev->driver == driver) {
+ list_del(&dev->legacy_dev_list);
+ drm_put_dev(dev);
+ }
}
+ mutex_unlock(&legacy_dev_list_lock);
}
DRM_INFO("Module unloaded\n");
}
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index a0cb746bcb0a..338650abd267 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -30,6 +30,7 @@
#include <drm/drm_file.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_managed.h>
#include <drm/drm_vblank.h>
#include "drm_crtc_internal.h"
@@ -40,7 +41,7 @@
* A plane represents an image source that can be blended with or overlayed on
* top of a CRTC during the scanout process. Planes take their input data from a
* &drm_framebuffer object. The plane itself specifies the cropping and scaling
- * of that image, and where it is placed on the visible are of a display
+ * of that image, and where it is placed on the visible area of a display
* pipeline, represented by &drm_crtc. A plane can also have additional
* properties that specify how the pixels are positioned and blended, like
* rotation or Z-position. All these properties are stored in &drm_plane_state.
@@ -49,14 +50,34 @@
* &struct drm_plane (possibly as part of a larger structure) and registers it
* with a call to drm_universal_plane_init().
*
- * Cursor and overlay planes are optional. All drivers should provide one
- * primary plane per CRTC to avoid surprising userspace too much. See enum
- * drm_plane_type for a more in-depth discussion of these special uapi-relevant
- * plane types. Special planes are associated with their CRTC by calling
- * drm_crtc_init_with_planes().
- *
* The type of a plane is exposed in the immutable "type" enumeration property,
- * which has one of the following values: "Overlay", "Primary", "Cursor".
+ * which has one of the following values: "Overlay", "Primary", "Cursor" (see
+ * enum drm_plane_type). A plane can be compatible with multiple CRTCs, see
+ * &drm_plane.possible_crtcs.
+ *
+ * Each CRTC must have a unique primary plane userspace can attach to enable
+ * the CRTC. In other words, userspace must be able to attach a different
+ * primary plane to each CRTC at the same time. Primary planes can still be
+ * compatible with multiple CRTCs. There must be exactly as many primary planes
+ * as there are CRTCs.
+ *
+ * Legacy uAPI doesn't expose the primary and cursor planes directly. DRM core
+ * relies on the driver to set the primary and optionally the cursor plane used
+ * for legacy IOCTLs. This is done by calling drm_crtc_init_with_planes(). All
+ * drivers must provide one primary plane per CRTC to avoid surprising legacy
+ * userspace too much.
+ */
+
+/**
+ * DOC: standard plane properties
+ *
+ * DRM planes have a few standardized properties:
+ *
+ * IN_FORMATS:
+ * Blob property which contains the set of buffer format and modifier
+ * pairs supported by this plane. The blob is a struct
+ * drm_format_modifier_blob. Without this property the plane doesn't
+ * support buffers with modifiers. Userspace cannot change this property.
*/
static unsigned int drm_num_planes(struct drm_device *dev)
@@ -152,31 +173,16 @@ done:
return 0;
}
-/**
- * drm_universal_plane_init - Initialize a new universal plane object
- * @dev: DRM device
- * @plane: plane object to init
- * @possible_crtcs: bitmask of possible CRTCs
- * @funcs: callbacks for the new plane
- * @formats: array of supported formats (DRM_FORMAT\_\*)
- * @format_count: number of elements in @formats
- * @format_modifiers: array of struct drm_format modifiers terminated by
- * DRM_FORMAT_MOD_INVALID
- * @type: type of plane (overlay, primary, cursor)
- * @name: printf style format string for the plane name, or NULL for default name
- *
- * Initializes a plane object of type @type.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
- uint32_t possible_crtcs,
- const struct drm_plane_funcs *funcs,
- const uint32_t *formats, unsigned int format_count,
- const uint64_t *format_modifiers,
- enum drm_plane_type type,
- const char *name, ...)
+__printf(9, 0)
+static int __drm_universal_plane_init(struct drm_device *dev,
+ struct drm_plane *plane,
+ uint32_t possible_crtcs,
+ const struct drm_plane_funcs *funcs,
+ const uint32_t *formats,
+ unsigned int format_count,
+ const uint64_t *format_modifiers,
+ enum drm_plane_type type,
+ const char *name, va_list ap)
{
struct drm_mode_config *config = &dev->mode_config;
unsigned int format_modifier_count = 0;
@@ -237,11 +243,7 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
}
if (name) {
- va_list ap;
-
- va_start(ap, name);
plane->name = kvasprintf(GFP_KERNEL, name, ap);
- va_end(ap);
} else {
plane->name = kasprintf(GFP_KERNEL, "plane-%d",
drm_num_planes(dev));
@@ -286,8 +288,102 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
return 0;
}
+
+/**
+ * drm_universal_plane_init - Initialize a new universal plane object
+ * @dev: DRM device
+ * @plane: plane object to init
+ * @possible_crtcs: bitmask of possible CRTCs
+ * @funcs: callbacks for the new plane
+ * @formats: array of supported formats (DRM_FORMAT\_\*)
+ * @format_count: number of elements in @formats
+ * @format_modifiers: array of struct drm_format modifiers terminated by
+ * DRM_FORMAT_MOD_INVALID
+ * @type: type of plane (overlay, primary, cursor)
+ * @name: printf style format string for the plane name, or NULL for default name
+ *
+ * Initializes a plane object of type @type. The &drm_plane_funcs.destroy hook
+ * should call drm_plane_cleanup() and kfree() the plane structure. The plane
+ * structure should not be allocated with devm_kzalloc().
+ *
+ * Note: consider using drmm_universal_plane_alloc() instead of
+ * drm_universal_plane_init() to let the DRM managed resource infrastructure
+ * take care of cleanup and deallocation.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
+ uint32_t possible_crtcs,
+ const struct drm_plane_funcs *funcs,
+ const uint32_t *formats, unsigned int format_count,
+ const uint64_t *format_modifiers,
+ enum drm_plane_type type,
+ const char *name, ...)
+{
+ va_list ap;
+ int ret;
+
+ WARN_ON(!funcs->destroy);
+
+ va_start(ap, name);
+ ret = __drm_universal_plane_init(dev, plane, possible_crtcs, funcs,
+ formats, format_count, format_modifiers,
+ type, name, ap);
+ va_end(ap);
+ return ret;
+}
EXPORT_SYMBOL(drm_universal_plane_init);
+static void drmm_universal_plane_alloc_release(struct drm_device *dev, void *ptr)
+{
+ struct drm_plane *plane = ptr;
+
+ if (WARN_ON(!plane->dev))
+ return;
+
+ drm_plane_cleanup(plane);
+}
+
+void *__drmm_universal_plane_alloc(struct drm_device *dev, size_t size,
+ size_t offset, uint32_t possible_crtcs,
+ const struct drm_plane_funcs *funcs,
+ const uint32_t *formats, unsigned int format_count,
+ const uint64_t *format_modifiers,
+ enum drm_plane_type type,
+ const char *name, ...)
+{
+ void *container;
+ struct drm_plane *plane;
+ va_list ap;
+ int ret;
+
+ if (WARN_ON(!funcs || funcs->destroy))
+ return ERR_PTR(-EINVAL);
+
+ container = drmm_kzalloc(dev, size, GFP_KERNEL);
+ if (!container)
+ return ERR_PTR(-ENOMEM);
+
+ plane = container + offset;
+
+ va_start(ap, name);
+ ret = __drm_universal_plane_init(dev, plane, possible_crtcs, funcs,
+ formats, format_count, format_modifiers,
+ type, name, ap);
+ va_end(ap);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = drmm_add_action_or_reset(dev, drmm_universal_plane_alloc_release,
+ plane);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return container;
+}
+EXPORT_SYMBOL(__drmm_universal_plane_alloc);
+
int drm_plane_register_all(struct drm_device *dev)
{
unsigned int num_planes = 0;
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 7db55fce35d8..2a54f86856af 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -717,6 +717,8 @@ int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
vma->vm_pgoff += drm_vma_node_start(&obj->vma_node);
if (obj->funcs && obj->funcs->mmap) {
+ vma->vm_ops = obj->funcs->vm_ops;
+
ret = obj->funcs->mmap(obj, vma);
if (ret)
return ret;
@@ -978,44 +980,58 @@ struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
EXPORT_SYMBOL(drm_gem_prime_import);
/**
- * drm_prime_sg_to_page_addr_arrays - convert an sg table into a page array
+ * drm_prime_sg_to_page_array - convert an sg table into a page array
+ * @sgt: scatter-gather table to convert
+ * @pages: array of page pointers to store the pages in
+ * @max_entries: size of the passed-in array
+ *
+ * Exports an sg table into an array of pages.
+ *
+ * This function is deprecated and strongly discouraged to be used.
+ * The page array is only useful for page faults and those can corrupt fields
+ * in the struct page if they are not handled by the exporting driver.
+ */
+int __deprecated drm_prime_sg_to_page_array(struct sg_table *sgt,
+ struct page **pages,
+ int max_entries)
+{
+ struct sg_page_iter page_iter;
+ struct page **p = pages;
+
+ for_each_sgtable_page(sgt, &page_iter, 0) {
+ if (WARN_ON(p - pages >= max_entries))
+ return -1;
+ *p++ = sg_page_iter_page(&page_iter);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(drm_prime_sg_to_page_array);
+
+/**
+ * drm_prime_sg_to_dma_addr_array - convert an sg table into a dma addr array
* @sgt: scatter-gather table to convert
- * @pages: optional array of page pointers to store the page array in
- * @addrs: optional array to store the dma bus address of each page
+ * @addrs: array to store the dma bus address of each page
* @max_entries: size of both the passed-in arrays
*
- * Exports an sg table into an array of pages and addresses. This is currently
- * required by the TTM driver in order to do correct fault handling.
+ * Exports an sg table into an array of addresses.
*
- * Drivers can use this in their &drm_driver.gem_prime_import_sg_table
+ * Drivers should use this in their &drm_driver.gem_prime_import_sg_table
* implementation.
*/
-int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
- dma_addr_t *addrs, int max_entries)
+int drm_prime_sg_to_dma_addr_array(struct sg_table *sgt, dma_addr_t *addrs,
+ int max_entries)
{
struct sg_dma_page_iter dma_iter;
- struct sg_page_iter page_iter;
- struct page **p = pages;
dma_addr_t *a = addrs;
- if (pages) {
- for_each_sgtable_page(sgt, &page_iter, 0) {
- if (WARN_ON(p - pages >= max_entries))
- return -1;
- *p++ = sg_page_iter_page(&page_iter);
- }
+ for_each_sgtable_dma_page(sgt, &dma_iter, 0) {
+ if (WARN_ON(a - addrs >= max_entries))
+ return -1;
+ *a++ = sg_page_iter_dma_address(&dma_iter);
}
- if (addrs) {
- for_each_sgtable_dma_page(sgt, &dma_iter, 0) {
- if (WARN_ON(a - addrs >= max_entries))
- return -1;
- *a++ = sg_page_iter_dma_address(&dma_iter);
- }
- }
-
return 0;
}
-EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
+EXPORT_SYMBOL(drm_prime_sg_to_dma_addr_array);
/**
* drm_prime_gem_destroy - helper to clean up a PRIME-imported GEM object
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index d6017726cc2a..ad59a51eab6d 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -515,7 +515,8 @@ retry:
if (count == 0 && connector->status == connector_status_connected)
count = drm_add_override_edid_modes(connector);
- if (count == 0 && connector->status == connector_status_connected)
+ if (count == 0 && (connector->status == connector_status_connected ||
+ connector->status == connector_status_unknown))
count = drm_add_modes_noedid(connector, 1024, 768);
count += drm_helper_probe_add_cmdline_mode(connector);
if (count == 0)
diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c
index 743e57c1b44f..6ce8f5cd1eb5 100644
--- a/drivers/gpu/drm/drm_simple_kms_helper.c
+++ b/drivers/gpu/drm/drm_simple_kms_helper.c
@@ -9,6 +9,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
+#include <drm/drm_managed.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
@@ -55,8 +56,9 @@ static const struct drm_encoder_funcs drm_simple_encoder_funcs_cleanup = {
* stored in the device structure. Free the encoder's memory as part of
* the device release function.
*
- * FIXME: Later improvements to DRM's resource management may allow for
- * an automated kfree() of the encoder's memory.
+ * Note: consider using drmm_simple_encoder_alloc() instead of
+ * drm_simple_encoder_init() to let the DRM managed resource infrastructure
+ * take care of cleanup and deallocation.
*
* Returns:
* Zero on success, error code on failure.
@@ -71,6 +73,14 @@ int drm_simple_encoder_init(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_simple_encoder_init);
+void *__drmm_simple_encoder_alloc(struct drm_device *dev, size_t size,
+ size_t offset, int encoder_type)
+{
+ return __drmm_encoder_alloc(dev, size, offset, NULL, encoder_type,
+ NULL);
+}
+EXPORT_SYMBOL(__drmm_simple_encoder_alloc);
+
static enum drm_mode_status
drm_simple_kms_crtc_mode_valid(struct drm_crtc *crtc,
const struct drm_display_mode *mode)
diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
index d30e2f2b8f3c..893165eeddf3 100644
--- a/drivers/gpu/drm/drm_vblank.c
+++ b/drivers/gpu/drm/drm_vblank.c
@@ -74,7 +74,7 @@
* |↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓| updates the
* | | frame as it
* | | travels down
- * | | ("sacn out")
+ * | | ("scan out")
* | Old frame |
* | |
* | |
@@ -1006,7 +1006,14 @@ static void send_vblank_event(struct drm_device *dev,
break;
}
trace_drm_vblank_event_delivered(e->base.file_priv, e->pipe, seq);
- drm_send_event_locked(dev, &e->base);
+ /*
+ * Use the same timestamp for any associated fence signal to avoid
+ * mismatch in timestamps for vsync & fence events triggered by the
+ * same HW event. Frameworks like SurfaceFlinger in Android expects the
+ * retire-fence timestamp to match exactly with HW vsync as it uses it
+ * for its software vsync modeling.
+ */
+ drm_send_event_timestamp_locked(dev, &e->base, now);
}
/**
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 6d5a03b32238..9b3b989d7cad 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -278,7 +278,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
case _DRM_SCATTER_GATHER:
break;
case _DRM_CONSISTENT:
- dma_free_coherent(&dev->pdev->dev,
+ dma_free_coherent(dev->dev,
map->size,
map->handle,
map->offset);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
index d9bd83203a15..b390dd4d60b7 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
@@ -135,8 +135,7 @@ struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev,
goto fail;
}
- ret = drm_prime_sg_to_page_addr_arrays(sgt, etnaviv_obj->pages,
- NULL, npages);
+ ret = drm_prime_sg_to_page_array(sgt, etnaviv_obj->pages, npages);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 951d5f708e92..6a251e3aa779 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -89,7 +89,6 @@ comment "Sub-drivers"
config DRM_EXYNOS_G2D
bool "G2D"
depends on VIDEO_SAMSUNG_S5P_G2D=n || COMPILE_TEST
- select FRAME_VECTOR
help
Choose this option if you want to use Exynos G2D for DRM.
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 967a5cdc120e..1e0c5a7f206e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -205,7 +205,8 @@ struct g2d_cmdlist_userptr {
dma_addr_t dma_addr;
unsigned long userptr;
unsigned long size;
- struct frame_vector *vec;
+ struct page **pages;
+ unsigned int npages;
struct sg_table *sgt;
atomic_t refcount;
bool in_pool;
@@ -378,7 +379,6 @@ static void g2d_userptr_put_dma_addr(struct g2d_data *g2d,
bool force)
{
struct g2d_cmdlist_userptr *g2d_userptr = obj;
- struct page **pages;
if (!obj)
return;
@@ -398,15 +398,9 @@ out:
dma_unmap_sgtable(to_dma_dev(g2d->drm_dev), g2d_userptr->sgt,
DMA_BIDIRECTIONAL, 0);
- pages = frame_vector_pages(g2d_userptr->vec);
- if (!IS_ERR(pages)) {
- int i;
-
- for (i = 0; i < frame_vector_count(g2d_userptr->vec); i++)
- set_page_dirty_lock(pages[i]);
- }
- put_vaddr_frames(g2d_userptr->vec);
- frame_vector_destroy(g2d_userptr->vec);
+ unpin_user_pages_dirty_lock(g2d_userptr->pages, g2d_userptr->npages,
+ true);
+ kvfree(g2d_userptr->pages);
if (!g2d_userptr->out_of_list)
list_del_init(&g2d_userptr->list);
@@ -474,35 +468,35 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct g2d_data *g2d,
offset = userptr & ~PAGE_MASK;
end = PAGE_ALIGN(userptr + size);
npages = (end - start) >> PAGE_SHIFT;
- g2d_userptr->vec = frame_vector_create(npages);
- if (!g2d_userptr->vec) {
+ g2d_userptr->pages = kvmalloc_array(npages, sizeof(*g2d_userptr->pages),
+ GFP_KERNEL);
+ if (!g2d_userptr->pages) {
ret = -ENOMEM;
goto err_free;
}
- ret = get_vaddr_frames(start, npages, FOLL_FORCE | FOLL_WRITE,
- g2d_userptr->vec);
+ ret = pin_user_pages_fast(start, npages,
+ FOLL_FORCE | FOLL_WRITE | FOLL_LONGTERM,
+ g2d_userptr->pages);
if (ret != npages) {
DRM_DEV_ERROR(g2d->dev,
"failed to get user pages from userptr.\n");
if (ret < 0)
- goto err_destroy_framevec;
- ret = -EFAULT;
- goto err_put_framevec;
- }
- if (frame_vector_to_pages(g2d_userptr->vec) < 0) {
+ goto err_destroy_pages;
+ npages = ret;
ret = -EFAULT;
- goto err_put_framevec;
+ goto err_unpin_pages;
}
+ g2d_userptr->npages = npages;
sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
if (!sgt) {
ret = -ENOMEM;
- goto err_put_framevec;
+ goto err_unpin_pages;
}
ret = sg_alloc_table_from_pages(sgt,
- frame_vector_pages(g2d_userptr->vec),
+ g2d_userptr->pages,
npages, offset, size, GFP_KERNEL);
if (ret < 0) {
DRM_DEV_ERROR(g2d->dev, "failed to get sgt from pages.\n");
@@ -538,11 +532,11 @@ err_sg_free_table:
err_free_sgt:
kfree(sgt);
-err_put_framevec:
- put_vaddr_frames(g2d_userptr->vec);
+err_unpin_pages:
+ unpin_user_pages(g2d_userptr->pages, npages);
-err_destroy_framevec:
- frame_vector_destroy(g2d_userptr->vec);
+err_destroy_pages:
+ kvfree(g2d_userptr->pages);
err_free:
kfree(g2d_userptr);
diff --git a/drivers/gpu/drm/gma500/Kconfig b/drivers/gpu/drm/gma500/Kconfig
index 0e23c93a1094..ec395658a43f 100644
--- a/drivers/gpu/drm/gma500/Kconfig
+++ b/drivers/gpu/drm/gma500/Kconfig
@@ -1,9 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_GMA500
- tristate "Intel GMA5/600 KMS Framebuffer"
+ tristate "Intel GMA500/600/3600/3650 KMS Framebuffer"
depends on DRM && PCI && X86 && MMU
select DRM_KMS_HELPER
- select DRM_TTM
# GMA500 depends on ACPI_VIDEO when ACPI is enabled, just like i915
select ACPI_VIDEO if ACPI
select BACKLIGHT_CLASS_DEVICE if ACPI
@@ -19,17 +18,3 @@ config DRM_GMA600
help
Say yes to include support for GMA600 (Intel Moorestown/Oaktrail)
platforms with LVDS ports. MIPI is not currently supported.
-
-config DRM_GMA3600
- bool "Intel GMA3600/3650 support (Experimental)"
- depends on DRM_GMA500
- help
- Say yes to include basic support for Intel GMA3600/3650 (Intel
- Cedar Trail) platforms.
-
-config DRM_MEDFIELD
- bool "Intel Medfield support (Experimental)"
- depends on DRM_GMA500 && X86_INTEL_MID
- help
- Say yes to include support for the Intel Medfield platform.
-
diff --git a/drivers/gpu/drm/gma500/Makefile b/drivers/gpu/drm/gma500/Makefile
index c8f2c89be99d..884ab1f9063e 100644
--- a/drivers/gpu/drm/gma500/Makefile
+++ b/drivers/gpu/drm/gma500/Makefile
@@ -6,36 +6,35 @@
gma500_gfx-y += \
accel_2d.o \
backlight.o \
+ blitter.o \
+ cdv_device.o \
+ cdv_intel_crt.o \
+ cdv_intel_display.o \
+ cdv_intel_dp.o \
+ cdv_intel_hdmi.o \
+ cdv_intel_lvds.o \
framebuffer.o \
gem.o \
+ gma_device.o \
+ gma_display.o \
gtt.o \
intel_bios.o \
- intel_i2c.o \
intel_gmbus.o \
+ intel_i2c.o \
+ mid_bios.o \
mmu.o \
- blitter.o \
power.o \
+ psb_device.o \
psb_drv.o \
- gma_display.o \
- gma_device.o \
psb_intel_display.o \
psb_intel_lvds.o \
psb_intel_modes.o \
psb_intel_sdvo.o \
psb_lid.o \
- psb_irq.o \
- psb_device.o \
- mid_bios.o
+ psb_irq.o
gma500_gfx-$(CONFIG_ACPI) += opregion.o \
-gma500_gfx-$(CONFIG_DRM_GMA3600) += cdv_device.o \
- cdv_intel_crt.o \
- cdv_intel_display.o \
- cdv_intel_hdmi.o \
- cdv_intel_lvds.o \
- cdv_intel_dp.o
-
gma500_gfx-$(CONFIG_DRM_GMA600) += oaktrail_device.o \
oaktrail_crtc.o \
oaktrail_lvds.o \
@@ -43,14 +42,4 @@ gma500_gfx-$(CONFIG_DRM_GMA600) += oaktrail_device.o \
oaktrail_hdmi.o \
oaktrail_hdmi_i2c.o
-gma500_gfx-$(CONFIG_DRM_MEDFIELD) += mdfld_device.o \
- mdfld_output.o \
- mdfld_intel_display.o \
- mdfld_dsi_output.o \
- mdfld_dsi_dpi.o \
- mdfld_dsi_pkg_sender.o \
- mdfld_tpo_vid.o \
- mdfld_tmd_vid.o \
- tc35876x-dsi-lvds.o
-
obj-$(CONFIG_DRM_GMA500) += gma500_gfx.o
diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
index e75293e4a52f..19e055dbd4c2 100644
--- a/drivers/gpu/drm/gma500/cdv_device.c
+++ b/drivers/gpu/drm/gma500/cdv_device.c
@@ -95,13 +95,14 @@ static u32 cdv_get_max_backlight(struct drm_device *dev)
static int cdv_get_brightness(struct backlight_device *bd)
{
struct drm_device *dev = bl_get_data(bd);
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
u32 val = REG_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
if (cdv_backlight_combination_mode(dev)) {
u8 lbpc;
val &= ~1;
- pci_read_config_byte(dev->pdev, 0xF4, &lbpc);
+ pci_read_config_byte(pdev, 0xF4, &lbpc);
val *= lbpc;
}
return (val * 100)/cdv_get_max_backlight(dev);
@@ -111,6 +112,7 @@ static int cdv_get_brightness(struct backlight_device *bd)
static int cdv_set_brightness(struct backlight_device *bd)
{
struct drm_device *dev = bl_get_data(bd);
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
int level = bd->props.brightness;
u32 blc_pwm_ctl;
@@ -128,7 +130,7 @@ static int cdv_set_brightness(struct backlight_device *bd)
lbpc = level * 0xfe / max + 1;
level /= lbpc;
- pci_write_config_byte(dev->pdev, 0xF4, lbpc);
+ pci_write_config_byte(pdev, 0xF4, lbpc);
}
blc_pwm_ctl = REG_READ(BLC_PWM_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
@@ -205,8 +207,9 @@ static inline void CDV_MSG_WRITE32(int domain, uint port, uint offset,
static void cdv_init_pm(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
u32 pwr_cnt;
- int domain = pci_domain_nr(dev->pdev->bus);
+ int domain = pci_domain_nr(pdev->bus);
int i;
dev_priv->apm_base = CDV_MSG_READ32(domain, PSB_PUNIT_PORT,
@@ -234,6 +237,8 @@ static void cdv_init_pm(struct drm_device *dev)
static void cdv_errata(struct drm_device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+
/* Disable bonus launch.
* CPU and GPU competes for memory and display misses updates and
* flickers. Worst with dual core, dual displays.
@@ -242,7 +247,7 @@ static void cdv_errata(struct drm_device *dev)
* Bonus Launch to work around the issue, by degrading
* performance.
*/
- CDV_MSG_WRITE32(pci_domain_nr(dev->pdev->bus), 3, 0x30, 0x08027108);
+ CDV_MSG_WRITE32(pci_domain_nr(pdev->bus), 3, 0x30, 0x08027108);
}
/**
@@ -255,12 +260,13 @@ static void cdv_errata(struct drm_device *dev)
static int cdv_save_display_registers(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
struct psb_save_area *regs = &dev_priv->regs;
struct drm_connector *connector;
dev_dbg(dev->dev, "Saving GPU registers.\n");
- pci_read_config_byte(dev->pdev, 0xF4, &regs->cdv.saveLBB);
+ pci_read_config_byte(pdev, 0xF4, &regs->cdv.saveLBB);
regs->cdv.saveDSPCLK_GATE_D = REG_READ(DSPCLK_GATE_D);
regs->cdv.saveRAMCLK_GATE_D = REG_READ(RAMCLK_GATE_D);
@@ -309,11 +315,12 @@ static int cdv_save_display_registers(struct drm_device *dev)
static int cdv_restore_display_registers(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
struct psb_save_area *regs = &dev_priv->regs;
struct drm_connector *connector;
u32 temp;
- pci_write_config_byte(dev->pdev, 0xF4, regs->cdv.saveLBB);
+ pci_write_config_byte(pdev, 0xF4, regs->cdv.saveLBB);
REG_WRITE(DSPCLK_GATE_D, regs->cdv.saveDSPCLK_GATE_D);
REG_WRITE(RAMCLK_GATE_D, regs->cdv.saveRAMCLK_GATE_D);
@@ -421,16 +428,16 @@ static int cdv_power_up(struct drm_device *dev)
static void cdv_hotplug_work_func(struct work_struct *work)
{
struct drm_psb_private *dev_priv = container_of(work, struct drm_psb_private,
- hotplug_work);
+ hotplug_work);
struct drm_device *dev = dev_priv->dev;
/* Just fire off a uevent and let userspace tell us what to do */
drm_helper_hpd_irq_event(dev);
-}
+}
/* The core driver has received a hotplug IRQ. We are in IRQ context
so extract the needed information and kick off queued processing */
-
+
static int cdv_hotplug_event(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
@@ -449,7 +456,7 @@ static void cdv_hotplug_enable(struct drm_device *dev, bool on)
} else {
REG_WRITE(PORT_HOTPLUG_EN, 0);
REG_WRITE(PORT_HOTPLUG_STAT, REG_READ(PORT_HOTPLUG_STAT));
- }
+ }
}
static const char *force_audio_names[] = {
@@ -568,9 +575,10 @@ static const struct psb_offset cdv_regmap[2] = {
static int cdv_chip_setup(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
INIT_WORK(&dev_priv->hotplug_work, cdv_hotplug_work_func);
- if (pci_enable_msi(dev->pdev))
+ if (pci_enable_msi(pdev))
dev_warn(dev->dev, "Enabling MSI failed!\n");
dev_priv->regmap = cdv_regmap;
gma_get_core_freq(dev);
diff --git a/drivers/gpu/drm/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c
index 88535f5aacc5..c48c9d322dfb 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_crt.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_crt.c
@@ -127,7 +127,7 @@ static void cdv_intel_crt_mode_set(struct drm_encoder *encoder,
}
-/**
+/*
* Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect CRT presence.
*
* \return true if CRT is connected.
@@ -278,8 +278,7 @@ void cdv_intel_crt_init(struct drm_device *dev,
gma_encoder->ddc_bus = psb_intel_i2c_create(dev,
i2c_reg, "CRTDDC_A");
if (!gma_encoder->ddc_bus) {
- dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
- "failed.\n");
+ dev_printk(KERN_ERR, dev->dev, "DDC bus registration failed.\n");
goto failed_ddc;
}
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
index 686385a66167..5d3302249779 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_display.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
@@ -551,7 +551,7 @@ void cdv_update_wm(struct drm_device *dev, struct drm_crtc *crtc)
}
}
-/**
+/*
* Return the pipe currently connected to the panel fitter,
* or -1 if the panel fitter is not present or not in use
*/
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index bfd9a15d63b1..6d3ada39ff86 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -306,7 +306,7 @@ static uint32_t dp_vswing_premph_table[] = {
};
/**
* is_edp - is the given port attached to an eDP panel (either CPU or PCH)
- * @intel_dp: DP struct
+ * @encoder: GMA encoder struct
*
* If a CPU or PCH DP output is attached to an eDP panel, this function
* will return true, and false otherwise.
@@ -1687,7 +1687,7 @@ static enum drm_connector_status cdv_dp_detect(struct gma_encoder *encoder)
return status;
}
-/**
+/*
* Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection.
*
* \return true if DP port is connected.
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
index 0d12c6ffbc40..e525689f84f0 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -22,9 +22,6 @@
*
* Authors:
* jim liu <jim.liu@intel.com>
- *
- * FIXME:
- * We should probably make this generic and share it with Medfield
*/
#include <linux/pm_runtime.h>
@@ -56,7 +53,6 @@ struct mid_intel_hdmi_priv {
bool has_hdmi_audio;
/* Should set this when detect hotplug */
bool hdmi_device_connected;
- struct mdfld_hdmi_i2c *i2c_bus;
struct i2c_adapter *hdmi_i2c_adapter; /* for control functions */
struct drm_device *dev;
};
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index eaaf4efec217..5bff7d9e3aa6 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -74,7 +74,7 @@ static u32 cdv_intel_lvds_get_max_backlight(struct drm_device *dev)
return retval;
}
-/**
+/*
* Sets the backlight level.
*
* level backlight level, from 0 to cdv_intel_lvds_get_max_backlight().
@@ -99,7 +99,7 @@ static void cdv_intel_lvds_set_backlight(struct drm_device *dev, int level)
}
}
-/**
+/*
* Sets the power state for the panel.
*/
static void cdv_intel_lvds_set_power(struct drm_device *dev,
@@ -291,7 +291,7 @@ static void cdv_intel_lvds_mode_set(struct drm_encoder *encoder,
REG_WRITE(PFIT_CONTROL, pfit_control);
}
-/**
+/*
* Return the list of DDC modes if available, or the BIOS fixed mode otherwise.
*/
static int cdv_intel_lvds_get_modes(struct drm_connector *connector)
@@ -471,6 +471,7 @@ static bool lvds_is_present_in_vbt(struct drm_device *dev,
/**
* cdv_intel_lvds_init - setup LVDS connectors on this device
* @dev: drm device
+ * @mode_dev: PSB mode device
*
* Create the connector, register the LVDS DDC bus, and try to figure out what
* modes we can display on the LVDS panel (if present).
@@ -554,7 +555,7 @@ void cdv_intel_lvds_init(struct drm_device *dev,
"LVDSBLC_B");
if (!gma_encoder->i2c_bus) {
dev_printk(KERN_ERR,
- &dev->pdev->dev, "I2C bus registration failed.\n");
+ dev->dev, "I2C bus registration failed.\n");
goto failed_blc_i2c;
}
gma_encoder->i2c_bus->slave_addr = 0x2C;
@@ -575,7 +576,7 @@ void cdv_intel_lvds_init(struct drm_device *dev,
GPIOC,
"LVDSDDC_C");
if (!gma_encoder->ddc_bus) {
- dev_printk(KERN_ERR, &dev->pdev->dev,
+ dev_printk(KERN_ERR, dev->dev,
"DDC bus registration " "failed.\n");
goto failed_ddc;
}
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index fc4fda1d258b..ebe9dccf2d83 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -159,7 +159,7 @@ static const struct fb_ops psbfb_unaccel_ops = {
* @dev: our DRM device
* @fb: framebuffer to set up
* @mode_cmd: mode description
- * @gt: backing object
+ * @obj: backing object
*
* Configure and fill in the boilerplate for our frame buffer. Return
* 0 on success or an error code if we fail.
@@ -197,7 +197,7 @@ static int psb_framebuffer_init(struct drm_device *dev,
* psb_framebuffer_create - create a framebuffer backed by gt
* @dev: our DRM device
* @mode_cmd: the description of the requested mode
- * @gt: the backing object
+ * @obj: the backing object
*
* Create a framebuffer object backed by the gt, and fill in the
* boilerplate required
@@ -252,7 +252,7 @@ static struct gtt_range *psbfb_alloc(struct drm_device *dev, int aligned_size)
/**
* psbfb_create - create a framebuffer
- * @fbdev: the framebuffer device
+ * @fb_helper: the framebuffer helper
* @sizes: specification of the layout
*
* Create a framebuffer to the specifications provided
@@ -262,6 +262,7 @@ static int psbfb_create(struct drm_fb_helper *fb_helper,
{
struct drm_device *dev = fb_helper->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
struct fb_info *info;
struct drm_framebuffer *fb;
struct drm_mode_fb_cmd2 mode_cmd;
@@ -325,8 +326,8 @@ static int psbfb_create(struct drm_fb_helper *fb_helper,
drm_fb_helper_fill_info(info, fb_helper, sizes);
- info->fix.mmio_start = pci_resource_start(dev->pdev, 0);
- info->fix.mmio_len = pci_resource_len(dev->pdev, 0);
+ info->fix.mmio_start = pci_resource_start(pdev, 0);
+ info->fix.mmio_len = pci_resource_len(pdev, 0);
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
@@ -529,6 +530,7 @@ void psb_modeset_init(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
int i;
drm_mode_config_init(dev);
@@ -540,8 +542,7 @@ void psb_modeset_init(struct drm_device *dev)
/* set memory base */
/* Oaktrail and Poulsbo should use BAR 2*/
- pci_read_config_dword(dev->pdev, PSB_BSM, (u32 *)
- &(dev->mode_config.fb_base));
+ pci_read_config_dword(pdev, PSB_BSM, (u32 *)&(dev->mode_config.fb_base));
/* num pipes is 2 for PSB but 1 for Mrst */
for (i = 0; i < dev_priv->num_pipe; i++)
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
index db827e591403..fbf420051ef5 100644
--- a/drivers/gpu/drm/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -16,6 +16,7 @@
#include <drm/drm.h>
#include <drm/drm_vma_manager.h>
+#include "gem.h"
#include "psb_drv.h"
static vm_fault_t psb_gem_fault(struct vm_fault *vmf);
@@ -49,6 +50,8 @@ const struct drm_gem_object_funcs psb_gem_object_funcs = {
* @dev: our device
* @size: the size requested
* @handlep: returned handle (opaque number)
+ * @stolen: unused
+ * @align: unused
*
* Create a GEM object, fill in the boilerplate and attach a handle to
* it so that userspace can speak about it. This does the core work
@@ -97,7 +100,7 @@ int psb_gem_create(struct drm_file *file, struct drm_device *dev, u64 size,
/**
* psb_gem_dumb_create - create a dumb buffer
- * @drm_file: our client file
+ * @file: our client file
* @dev: our device
* @args: the requested arguments copied from userspace
*
@@ -116,7 +119,6 @@ int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
/**
* psb_gem_fault - pagefault handler for GEM objects
- * @vma: the VMA of the GEM object
* @vmf: fault detail
*
* Invoked when a fault occurs on an mmap of a GEM managed area. GEM
diff --git a/drivers/gpu/drm/gma500/gem.h b/drivers/gpu/drm/gma500/gem.h
index 3741a711b9fd..bae6454ead29 100644
--- a/drivers/gpu/drm/gma500/gem.h
+++ b/drivers/gpu/drm/gma500/gem.h
@@ -8,6 +8,8 @@
#ifndef _GEM_H
#define _GEM_H
+struct drm_device;
+
extern const struct drm_gem_object_funcs psb_gem_object_funcs;
extern int psb_gem_create(struct drm_file *file, struct drm_device *dev,
diff --git a/drivers/gpu/drm/gma500/gma_device.c b/drivers/gpu/drm/gma500/gma_device.c
index 869f30392566..4c91e86f4b14 100644
--- a/drivers/gpu/drm/gma500/gma_device.c
+++ b/drivers/gpu/drm/gma500/gma_device.c
@@ -6,12 +6,14 @@
**************************************************************************/
#include "psb_drv.h"
+#include "gma_device.h"
void gma_get_core_freq(struct drm_device *dev)
{
uint32_t clock;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
struct pci_dev *pci_root =
- pci_get_domain_bus_and_slot(pci_domain_nr(dev->pdev->bus),
+ pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
0, 0);
struct drm_psb_private *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c
index 3df6d6e850f5..b03f7b8241f2 100644
--- a/drivers/gpu/drm/gma500/gma_display.c
+++ b/drivers/gpu/drm/gma500/gma_display.c
@@ -20,7 +20,7 @@
#include "psb_intel_drv.h"
#include "psb_intel_reg.h"
-/**
+/*
* Returns whether any output on the specified pipe is of the specified type
*/
bool gma_pipe_has_type(struct drm_crtc *crtc, int type)
@@ -180,7 +180,7 @@ int gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue,
return 0;
}
-/**
+/*
* Sets the power management mode of the pipe and plane.
*
* This code should probably grow support for turning the cursor off and back
@@ -559,14 +559,14 @@ int gma_crtc_set_config(struct drm_mode_set *set,
if (!dev_priv->rpm_enabled)
return drm_crtc_helper_set_config(set, ctx);
- pm_runtime_forbid(&dev->pdev->dev);
+ pm_runtime_forbid(dev->dev);
ret = drm_crtc_helper_set_config(set, ctx);
- pm_runtime_allow(&dev->pdev->dev);
+ pm_runtime_allow(dev->dev);
return ret;
}
-/**
+/*
* Save HW states of given crtc
*/
void gma_crtc_save(struct drm_crtc *crtc)
@@ -609,7 +609,7 @@ void gma_crtc_save(struct drm_crtc *crtc)
crtc_state->savePalette[i] = REG_READ(palette_reg + (i << 2));
}
-/**
+/*
* Restore HW states of given crtc
*/
void gma_crtc_restore(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
index d246b1f70366..e884750bc123 100644
--- a/drivers/gpu/drm/gma500/gtt.c
+++ b/drivers/gpu/drm/gma500/gtt.c
@@ -340,13 +340,14 @@ static void psb_gtt_alloc(struct drm_device *dev)
void psb_gtt_takedown(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
if (dev_priv->gtt_map) {
iounmap(dev_priv->gtt_map);
dev_priv->gtt_map = NULL;
}
if (dev_priv->gtt_initialized) {
- pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
+ pci_write_config_word(pdev, PSB_GMCH_CTRL,
dev_priv->gmch_ctrl);
PSB_WVDC32(dev_priv->pge_ctl, PSB_PGETBL_CTL);
(void) PSB_RVDC32(PSB_PGETBL_CTL);
@@ -358,6 +359,7 @@ void psb_gtt_takedown(struct drm_device *dev)
int psb_gtt_init(struct drm_device *dev, int resume)
{
struct drm_psb_private *dev_priv = dev->dev_private;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
unsigned gtt_pages;
unsigned long stolen_size, vram_stolen_size;
unsigned i, num_pages;
@@ -376,8 +378,8 @@ int psb_gtt_init(struct drm_device *dev, int resume)
pg = &dev_priv->gtt;
/* Enable the GTT */
- pci_read_config_word(dev->pdev, PSB_GMCH_CTRL, &dev_priv->gmch_ctrl);
- pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
+ pci_read_config_word(pdev, PSB_GMCH_CTRL, &dev_priv->gmch_ctrl);
+ pci_write_config_word(pdev, PSB_GMCH_CTRL,
dev_priv->gmch_ctrl | _PSB_GMCH_ENABLED);
dev_priv->pge_ctl = PSB_RVDC32(PSB_PGETBL_CTL);
@@ -397,8 +399,8 @@ int psb_gtt_init(struct drm_device *dev, int resume)
*/
pg->mmu_gatt_start = 0xE0000000;
- pg->gtt_start = pci_resource_start(dev->pdev, PSB_GTT_RESOURCE);
- gtt_pages = pci_resource_len(dev->pdev, PSB_GTT_RESOURCE)
+ pg->gtt_start = pci_resource_start(pdev, PSB_GTT_RESOURCE);
+ gtt_pages = pci_resource_len(pdev, PSB_GTT_RESOURCE)
>> PAGE_SHIFT;
/* CDV doesn't report this. In which case the system has 64 gtt pages */
if (pg->gtt_start == 0 || gtt_pages == 0) {
@@ -407,10 +409,10 @@ int psb_gtt_init(struct drm_device *dev, int resume)
pg->gtt_start = dev_priv->pge_ctl;
}
- pg->gatt_start = pci_resource_start(dev->pdev, PSB_GATT_RESOURCE);
- pg->gatt_pages = pci_resource_len(dev->pdev, PSB_GATT_RESOURCE)
+ pg->gatt_start = pci_resource_start(pdev, PSB_GATT_RESOURCE);
+ pg->gatt_pages = pci_resource_len(pdev, PSB_GATT_RESOURCE)
>> PAGE_SHIFT;
- dev_priv->gtt_mem = &dev->pdev->resource[PSB_GATT_RESOURCE];
+ dev_priv->gtt_mem = &pdev->resource[PSB_GATT_RESOURCE];
if (pg->gatt_pages == 0 || pg->gatt_start == 0) {
static struct resource fudge; /* Preferably peppermint */
@@ -431,7 +433,7 @@ int psb_gtt_init(struct drm_device *dev, int resume)
dev_priv->gtt_mem = &fudge;
}
- pci_read_config_dword(dev->pdev, PSB_BSM, &dev_priv->stolen_base);
+ pci_read_config_dword(pdev, PSB_BSM, &dev_priv->stolen_base);
vram_stolen_size = pg->gtt_phys_start - dev_priv->stolen_base
- PAGE_SIZE;
diff --git a/drivers/gpu/drm/gma500/intel_bios.c b/drivers/gpu/drm/gma500/intel_bios.c
index 8ad6337eeba3..d838369f0119 100644
--- a/drivers/gpu/drm/gma500/intel_bios.c
+++ b/drivers/gpu/drm/gma500/intel_bios.c
@@ -50,7 +50,7 @@ parse_edp(struct drm_psb_private *dev_priv, struct bdb_header *bdb)
uint8_t panel_type;
edp = find_section(bdb, BDB_EDP);
-
+
dev_priv->edp.bpp = 18;
if (!edp) {
if (dev_priv->edp.support) {
@@ -80,7 +80,7 @@ parse_edp(struct drm_psb_private *dev_priv, struct bdb_header *bdb)
dev_priv->edp.pps = *edp_pps;
DRM_DEBUG_KMS("EDP timing in vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
- dev_priv->edp.pps.t1_t3, dev_priv->edp.pps.t8,
+ dev_priv->edp.pps.t1_t3, dev_priv->edp.pps.t8,
dev_priv->edp.pps.t9, dev_priv->edp.pps.t10,
dev_priv->edp.pps.t11_t12);
@@ -516,7 +516,7 @@ parse_device_mapping(struct drm_psb_private *dev_priv,
int psb_intel_init_bios(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
- struct pci_dev *pdev = dev->pdev;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
struct vbt_header *vbt = NULL;
struct bdb_header *bdb = NULL;
u8 __iomem *bios = NULL;
@@ -574,7 +574,7 @@ int psb_intel_init_bios(struct drm_device *dev)
return 0;
}
-/**
+/*
* Destroy and free VBT data
*/
void psb_intel_destroy_bios(struct drm_device *dev)
diff --git a/drivers/gpu/drm/gma500/intel_gmbus.c b/drivers/gpu/drm/gma500/intel_gmbus.c
index a083fbfe35b8..370bd6451bd9 100644
--- a/drivers/gpu/drm/gma500/intel_gmbus.c
+++ b/drivers/gpu/drm/gma500/intel_gmbus.c
@@ -196,7 +196,7 @@ intel_gpio_create(struct drm_psb_private *dev_priv, u32 pin)
"gma500 GPIO%c", "?BACDE?F"[pin]);
gpio->adapter.owner = THIS_MODULE;
gpio->adapter.algo_data = &gpio->algo;
- gpio->adapter.dev.parent = &dev_priv->dev->pdev->dev;
+ gpio->adapter.dev.parent = dev_priv->dev->dev;
gpio->algo.setsda = set_data;
gpio->algo.setscl = set_clock;
gpio->algo.getsda = get_data;
@@ -417,7 +417,7 @@ int gma_intel_setup_gmbus(struct drm_device *dev)
"gma500 gmbus %s",
names[i]);
- bus->adapter.dev.parent = &dev->pdev->dev;
+ bus->adapter.dev.parent = dev->dev;
bus->adapter.algo_data = dev_priv;
bus->adapter.algo = &gmbus_algorithm;
diff --git a/drivers/gpu/drm/gma500/intel_i2c.c b/drivers/gpu/drm/gma500/intel_i2c.c
index de8810188190..5e1b4d70c317 100644
--- a/drivers/gpu/drm/gma500/intel_i2c.c
+++ b/drivers/gpu/drm/gma500/intel_i2c.c
@@ -85,7 +85,6 @@ static void set_data(void *data, int state_high)
/**
* psb_intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg
* @dev: DRM device
- * @output: driver specific output device
* @reg: GPIO reg to use
* @name: name for this bus
*
@@ -117,7 +116,7 @@ struct psb_intel_i2c_chan *psb_intel_i2c_create(struct drm_device *dev,
snprintf(chan->adapter.name, I2C_NAME_SIZE, "intel drm %s", name);
chan->adapter.owner = THIS_MODULE;
chan->adapter.algo_data = &chan->algo;
- chan->adapter.dev.parent = &dev->pdev->dev;
+ chan->adapter.dev.parent = dev->dev;
chan->algo.setsda = set_data;
chan->algo.setscl = set_clock;
chan->algo.getsda = get_data;
@@ -145,7 +144,7 @@ out_free:
/**
* psb_intel_i2c_destroy - unregister and free i2c bus resources
- * @output: channel to free
+ * @chan: channel to free
*
* Unregister the adapter from the i2c layer, then free the structure.
*/
diff --git a/drivers/gpu/drm/gma500/mdfld_device.c b/drivers/gpu/drm/gma500/mdfld_device.c
deleted file mode 100644
index b83d59b21de5..000000000000
--- a/drivers/gpu/drm/gma500/mdfld_device.c
+++ /dev/null
@@ -1,562 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/**************************************************************************
- * Copyright (c) 2011, Intel Corporation.
- * All Rights Reserved.
- *
- **************************************************************************/
-
-#include <linux/delay.h>
-#include <linux/gpio/machine.h>
-
-#include <asm/intel_scu_ipc.h>
-
-#include "mdfld_dsi_output.h"
-#include "mdfld_output.h"
-#include "mid_bios.h"
-#include "psb_drv.h"
-#include "tc35876x-dsi-lvds.h"
-
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
-
-#define MRST_BLC_MAX_PWM_REG_FREQ 0xFFFF
-#define BLC_PWM_PRECISION_FACTOR 100 /* 10000000 */
-#define BLC_PWM_FREQ_CALC_CONSTANT 32
-#define MHz 1000000
-#define BRIGHTNESS_MIN_LEVEL 1
-#define BRIGHTNESS_MAX_LEVEL 100
-#define BRIGHTNESS_MASK 0xFF
-#define BLC_POLARITY_NORMAL 0
-#define BLC_POLARITY_INVERSE 1
-#define BLC_ADJUSTMENT_MAX 100
-
-#define MDFLD_BLC_PWM_PRECISION_FACTOR 10
-#define MDFLD_BLC_MAX_PWM_REG_FREQ 0xFFFE
-#define MDFLD_BLC_MIN_PWM_REG_FREQ 0x2
-
-#define MDFLD_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
-#define MDFLD_BACKLIGHT_PWM_CTL_SHIFT (16)
-
-static struct backlight_device *mdfld_backlight_device;
-
-int mdfld_set_brightness(struct backlight_device *bd)
-{
- struct drm_device *dev =
- (struct drm_device *)bl_get_data(mdfld_backlight_device);
- struct drm_psb_private *dev_priv = dev->dev_private;
- int level = bd->props.brightness;
-
- DRM_DEBUG_DRIVER("backlight level set to %d\n", level);
-
- /* Perform value bounds checking */
- if (level < BRIGHTNESS_MIN_LEVEL)
- level = BRIGHTNESS_MIN_LEVEL;
-
- if (gma_power_begin(dev, false)) {
- u32 adjusted_level = 0;
-
- /*
- * Adjust the backlight level with the percent in
- * dev_priv->blc_adj2
- */
- adjusted_level = level * dev_priv->blc_adj2;
- adjusted_level = adjusted_level / BLC_ADJUSTMENT_MAX;
- dev_priv->brightness_adjusted = adjusted_level;
-
- if (mdfld_get_panel_type(dev, 0) == TC35876X) {
- if (dev_priv->dpi_panel_on[0] ||
- dev_priv->dpi_panel_on[2])
- tc35876x_brightness_control(dev,
- dev_priv->brightness_adjusted);
- } else {
- if (dev_priv->dpi_panel_on[0])
- mdfld_dsi_brightness_control(dev, 0,
- dev_priv->brightness_adjusted);
- }
-
- if (dev_priv->dpi_panel_on[2])
- mdfld_dsi_brightness_control(dev, 2,
- dev_priv->brightness_adjusted);
- gma_power_end(dev);
- }
-
- /* cache the brightness for later use */
- dev_priv->brightness = level;
- return 0;
-}
-
-static int mdfld_get_brightness(struct backlight_device *bd)
-{
- struct drm_device *dev =
- (struct drm_device *)bl_get_data(mdfld_backlight_device);
- struct drm_psb_private *dev_priv = dev->dev_private;
-
- DRM_DEBUG_DRIVER("brightness = 0x%x \n", dev_priv->brightness);
-
- /* return locally cached var instead of HW read (due to DPST etc.) */
- return dev_priv->brightness;
-}
-
-static const struct backlight_ops mdfld_ops = {
- .get_brightness = mdfld_get_brightness,
- .update_status = mdfld_set_brightness,
-};
-
-static int device_backlight_init(struct drm_device *dev)
-{
- struct drm_psb_private *dev_priv = (struct drm_psb_private *)
- dev->dev_private;
-
- dev_priv->blc_adj1 = BLC_ADJUSTMENT_MAX;
- dev_priv->blc_adj2 = BLC_ADJUSTMENT_MAX;
-
- return 0;
-}
-
-static int mdfld_backlight_init(struct drm_device *dev)
-{
- struct backlight_properties props;
- int ret = 0;
-
- memset(&props, 0, sizeof(struct backlight_properties));
- props.max_brightness = BRIGHTNESS_MAX_LEVEL;
- props.type = BACKLIGHT_PLATFORM;
- mdfld_backlight_device = backlight_device_register("mdfld-bl",
- NULL, (void *)dev, &mdfld_ops, &props);
-
- if (IS_ERR(mdfld_backlight_device))
- return PTR_ERR(mdfld_backlight_device);
-
- ret = device_backlight_init(dev);
- if (ret)
- return ret;
-
- mdfld_backlight_device->props.brightness = BRIGHTNESS_MAX_LEVEL;
- mdfld_backlight_device->props.max_brightness = BRIGHTNESS_MAX_LEVEL;
- backlight_update_status(mdfld_backlight_device);
- return 0;
-}
-#endif
-
-struct backlight_device *mdfld_get_backlight_device(void)
-{
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
- return mdfld_backlight_device;
-#else
- return NULL;
-#endif
-}
-
-/*
- * mdfld_save_display_registers
- *
- * Description: We are going to suspend so save current display
- * register state.
- *
- * Notes: FIXME_JLIU7 need to add the support for DPI MIPI & HDMI audio
- */
-static int mdfld_save_display_registers(struct drm_device *dev, int pipenum)
-{
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct medfield_state *regs = &dev_priv->regs.mdfld;
- struct psb_pipe *pipe = &dev_priv->regs.pipe[pipenum];
- const struct psb_offset *map = &dev_priv->regmap[pipenum];
- int i;
- u32 *mipi_val;
-
- /* register */
- u32 mipi_reg = MIPI;
-
- switch (pipenum) {
- case 0:
- mipi_val = &regs->saveMIPI;
- break;
- case 1:
- mipi_val = &regs->saveMIPI;
- break;
- case 2:
- /* register */
- mipi_reg = MIPI_C;
- /* pointer to values */
- mipi_val = &regs->saveMIPI_C;
- break;
- default:
- DRM_ERROR("%s, invalid pipe number.\n", __func__);
- return -EINVAL;
- }
-
- /* Pipe & plane A info */
- pipe->dpll = PSB_RVDC32(map->dpll);
- pipe->fp0 = PSB_RVDC32(map->fp0);
- pipe->conf = PSB_RVDC32(map->conf);
- pipe->htotal = PSB_RVDC32(map->htotal);
- pipe->hblank = PSB_RVDC32(map->hblank);
- pipe->hsync = PSB_RVDC32(map->hsync);
- pipe->vtotal = PSB_RVDC32(map->vtotal);
- pipe->vblank = PSB_RVDC32(map->vblank);
- pipe->vsync = PSB_RVDC32(map->vsync);
- pipe->src = PSB_RVDC32(map->src);
- pipe->stride = PSB_RVDC32(map->stride);
- pipe->linoff = PSB_RVDC32(map->linoff);
- pipe->tileoff = PSB_RVDC32(map->tileoff);
- pipe->size = PSB_RVDC32(map->size);
- pipe->pos = PSB_RVDC32(map->pos);
- pipe->surf = PSB_RVDC32(map->surf);
- pipe->cntr = PSB_RVDC32(map->cntr);
- pipe->status = PSB_RVDC32(map->status);
-
- /*save palette (gamma) */
- for (i = 0; i < 256; i++)
- pipe->palette[i] = PSB_RVDC32(map->palette + (i << 2));
-
- if (pipenum == 1) {
- regs->savePFIT_CONTROL = PSB_RVDC32(PFIT_CONTROL);
- regs->savePFIT_PGM_RATIOS = PSB_RVDC32(PFIT_PGM_RATIOS);
-
- regs->saveHDMIPHYMISCCTL = PSB_RVDC32(HDMIPHYMISCCTL);
- regs->saveHDMIB_CONTROL = PSB_RVDC32(HDMIB_CONTROL);
- return 0;
- }
-
- *mipi_val = PSB_RVDC32(mipi_reg);
- return 0;
-}
-
-/*
- * mdfld_restore_display_registers
- *
- * Description: We are going to resume so restore display register state.
- *
- * Notes: FIXME_JLIU7 need to add the support for DPI MIPI & HDMI audio
- */
-static int mdfld_restore_display_registers(struct drm_device *dev, int pipenum)
-{
- /* To get panel out of ULPS mode. */
- u32 temp = 0;
- u32 device_ready_reg = DEVICE_READY_REG;
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct mdfld_dsi_config *dsi_config = NULL;
- struct medfield_state *regs = &dev_priv->regs.mdfld;
- struct psb_pipe *pipe = &dev_priv->regs.pipe[pipenum];
- const struct psb_offset *map = &dev_priv->regmap[pipenum];
- u32 i;
- u32 dpll;
- u32 timeout = 0;
-
- /* register */
- u32 mipi_reg = MIPI;
-
- /* values */
- u32 dpll_val = pipe->dpll;
- u32 mipi_val = regs->saveMIPI;
-
- switch (pipenum) {
- case 0:
- dpll_val &= ~DPLL_VCO_ENABLE;
- dsi_config = dev_priv->dsi_configs[0];
- break;
- case 1:
- dpll_val &= ~DPLL_VCO_ENABLE;
- break;
- case 2:
- mipi_reg = MIPI_C;
- mipi_val = regs->saveMIPI_C;
- dsi_config = dev_priv->dsi_configs[1];
- break;
- default:
- DRM_ERROR("%s, invalid pipe number.\n", __func__);
- return -EINVAL;
- }
-
- /*make sure VGA plane is off. it initializes to on after reset!*/
- PSB_WVDC32(0x80000000, VGACNTRL);
-
- if (pipenum == 1) {
- PSB_WVDC32(dpll_val & ~DPLL_VCO_ENABLE, map->dpll);
- PSB_RVDC32(map->dpll);
-
- PSB_WVDC32(pipe->fp0, map->fp0);
- } else {
-
- dpll = PSB_RVDC32(map->dpll);
-
- if (!(dpll & DPLL_VCO_ENABLE)) {
-
- /* When ungating power of DPLL, needs to wait 0.5us
- before enable the VCO */
- if (dpll & MDFLD_PWR_GATE_EN) {
- dpll &= ~MDFLD_PWR_GATE_EN;
- PSB_WVDC32(dpll, map->dpll);
- /* FIXME_MDFLD PO - change 500 to 1 after PO */
- udelay(500);
- }
-
- PSB_WVDC32(pipe->fp0, map->fp0);
- PSB_WVDC32(dpll_val, map->dpll);
- /* FIXME_MDFLD PO - change 500 to 1 after PO */
- udelay(500);
-
- dpll_val |= DPLL_VCO_ENABLE;
- PSB_WVDC32(dpll_val, map->dpll);
- PSB_RVDC32(map->dpll);
-
- /* wait for DSI PLL to lock */
- while (timeout < 20000 &&
- !(PSB_RVDC32(map->conf) & PIPECONF_DSIPLL_LOCK)) {
- udelay(150);
- timeout++;
- }
-
- if (timeout == 20000) {
- DRM_ERROR("%s, can't lock DSIPLL.\n",
- __func__);
- return -EINVAL;
- }
- }
- }
- /* Restore mode */
- PSB_WVDC32(pipe->htotal, map->htotal);
- PSB_WVDC32(pipe->hblank, map->hblank);
- PSB_WVDC32(pipe->hsync, map->hsync);
- PSB_WVDC32(pipe->vtotal, map->vtotal);
- PSB_WVDC32(pipe->vblank, map->vblank);
- PSB_WVDC32(pipe->vsync, map->vsync);
- PSB_WVDC32(pipe->src, map->src);
- PSB_WVDC32(pipe->status, map->status);
-
- /*set up the plane*/
- PSB_WVDC32(pipe->stride, map->stride);
- PSB_WVDC32(pipe->linoff, map->linoff);
- PSB_WVDC32(pipe->tileoff, map->tileoff);
- PSB_WVDC32(pipe->size, map->size);
- PSB_WVDC32(pipe->pos, map->pos);
- PSB_WVDC32(pipe->surf, map->surf);
-
- if (pipenum == 1) {
- /* restore palette (gamma) */
- /* udelay(50000); */
- for (i = 0; i < 256; i++)
- PSB_WVDC32(pipe->palette[i], map->palette + (i << 2));
-
- PSB_WVDC32(regs->savePFIT_CONTROL, PFIT_CONTROL);
- PSB_WVDC32(regs->savePFIT_PGM_RATIOS, PFIT_PGM_RATIOS);
-
- /*TODO: resume HDMI port */
-
- /*TODO: resume pipe*/
-
- /*enable the plane*/
- PSB_WVDC32(pipe->cntr & ~DISPLAY_PLANE_ENABLE, map->cntr);
-
- return 0;
- }
-
- /*set up pipe related registers*/
- PSB_WVDC32(mipi_val, mipi_reg);
-
- /*setup MIPI adapter + MIPI IP registers*/
- if (dsi_config)
- mdfld_dsi_controller_init(dsi_config, pipenum);
-
- if (in_atomic() || in_interrupt())
- mdelay(20);
- else
- msleep(20);
-
- /*enable the plane*/
- PSB_WVDC32(pipe->cntr, map->cntr);
-
- if (in_atomic() || in_interrupt())
- mdelay(20);
- else
- msleep(20);
-
- /* LP Hold Release */
- temp = REG_READ(mipi_reg);
- temp |= LP_OUTPUT_HOLD_RELEASE;
- REG_WRITE(mipi_reg, temp);
- mdelay(1);
-
-
- /* Set DSI host to exit from Utra Low Power State */
- temp = REG_READ(device_ready_reg);
- temp &= ~ULPS_MASK;
- temp |= 0x3;
- temp |= EXIT_ULPS_DEV_READY;
- REG_WRITE(device_ready_reg, temp);
- mdelay(1);
-
- temp = REG_READ(device_ready_reg);
- temp &= ~ULPS_MASK;
- temp |= EXITING_ULPS;
- REG_WRITE(device_ready_reg, temp);
- mdelay(1);
-
- /*enable the pipe*/
- PSB_WVDC32(pipe->conf, map->conf);
-
- /* restore palette (gamma) */
- /* udelay(50000); */
- for (i = 0; i < 256; i++)
- PSB_WVDC32(pipe->palette[i], map->palette + (i << 2));
-
- return 0;
-}
-
-static int mdfld_save_registers(struct drm_device *dev)
-{
- /* mdfld_save_cursor_overlay_registers(dev); */
- mdfld_save_display_registers(dev, 0);
- mdfld_save_display_registers(dev, 2);
- mdfld_disable_crtc(dev, 0);
- mdfld_disable_crtc(dev, 2);
-
- return 0;
-}
-
-static int mdfld_restore_registers(struct drm_device *dev)
-{
- mdfld_restore_display_registers(dev, 2);
- mdfld_restore_display_registers(dev, 0);
- /* mdfld_restore_cursor_overlay_registers(dev); */
-
- return 0;
-}
-
-static int mdfld_power_down(struct drm_device *dev)
-{
- /* FIXME */
- return 0;
-}
-
-static int mdfld_power_up(struct drm_device *dev)
-{
- /* FIXME */
- return 0;
-}
-
-/* Medfield */
-static const struct psb_offset mdfld_regmap[3] = {
- {
- .fp0 = MRST_FPA0,
- .fp1 = MRST_FPA1,
- .cntr = DSPACNTR,
- .conf = PIPEACONF,
- .src = PIPEASRC,
- .dpll = MRST_DPLL_A,
- .htotal = HTOTAL_A,
- .hblank = HBLANK_A,
- .hsync = HSYNC_A,
- .vtotal = VTOTAL_A,
- .vblank = VBLANK_A,
- .vsync = VSYNC_A,
- .stride = DSPASTRIDE,
- .size = DSPASIZE,
- .pos = DSPAPOS,
- .surf = DSPASURF,
- .addr = MRST_DSPABASE,
- .status = PIPEASTAT,
- .linoff = DSPALINOFF,
- .tileoff = DSPATILEOFF,
- .palette = PALETTE_A,
- },
- {
- .fp0 = MDFLD_DPLL_DIV0,
- .cntr = DSPBCNTR,
- .conf = PIPEBCONF,
- .src = PIPEBSRC,
- .dpll = MDFLD_DPLL_B,
- .htotal = HTOTAL_B,
- .hblank = HBLANK_B,
- .hsync = HSYNC_B,
- .vtotal = VTOTAL_B,
- .vblank = VBLANK_B,
- .vsync = VSYNC_B,
- .stride = DSPBSTRIDE,
- .size = DSPBSIZE,
- .pos = DSPBPOS,
- .surf = DSPBSURF,
- .addr = MRST_DSPBBASE,
- .status = PIPEBSTAT,
- .linoff = DSPBLINOFF,
- .tileoff = DSPBTILEOFF,
- .palette = PALETTE_B,
- },
- {
- .fp0 = MRST_FPA0, /* This is what the old code did ?? */
- .cntr = DSPCCNTR,
- .conf = PIPECCONF,
- .src = PIPECSRC,
- /* No DPLL_C */
- .dpll = MRST_DPLL_A,
- .htotal = HTOTAL_C,
- .hblank = HBLANK_C,
- .hsync = HSYNC_C,
- .vtotal = VTOTAL_C,
- .vblank = VBLANK_C,
- .vsync = VSYNC_C,
- .stride = DSPCSTRIDE,
- .size = DSPBSIZE,
- .pos = DSPCPOS,
- .surf = DSPCSURF,
- .addr = MDFLD_DSPCBASE,
- .status = PIPECSTAT,
- .linoff = DSPCLINOFF,
- .tileoff = DSPCTILEOFF,
- .palette = PALETTE_C,
- },
-};
-
-/*
- * The GPIO lines for resetting DSI pipe 0 and 2 are available in the
- * PCI device 0000:00:0c.0 on the Medfield.
- */
-static struct gpiod_lookup_table mdfld_dsi_pipe_gpio_table = {
- .table = {
- GPIO_LOOKUP("0000:00:0c.0", 128, "dsi-pipe0-reset",
- GPIO_ACTIVE_HIGH),
- GPIO_LOOKUP("0000:00:0c.0", 34, "dsi-pipe2-reset",
- GPIO_ACTIVE_HIGH),
- { },
- },
-};
-
-static int mdfld_chip_setup(struct drm_device *dev)
-{
- struct drm_psb_private *dev_priv = dev->dev_private;
- if (pci_enable_msi(dev->pdev))
- dev_warn(dev->dev, "Enabling MSI failed!\n");
- dev_priv->regmap = mdfld_regmap;
-
- /* Associate the GPIO lines with the DRM device */
- mdfld_dsi_pipe_gpio_table.dev_id = dev_name(dev->dev);
- gpiod_add_lookup_table(&mdfld_dsi_pipe_gpio_table);
-
- return mid_chip_setup(dev);
-}
-
-const struct psb_ops mdfld_chip_ops = {
- .name = "mdfld",
- .pipes = 3,
- .crtcs = 3,
- .lvds_mask = (1 << 1),
- .hdmi_mask = (1 << 1),
- .cursor_needs_phys = 0,
- .sgx_offset = MRST_SGX_OFFSET,
-
- .chip_setup = mdfld_chip_setup,
- .crtc_helper = &mdfld_helper_funcs,
- .crtc_funcs = &psb_intel_crtc_funcs,
-
- .output_init = mdfld_output_init,
-
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
- .backlight_init = mdfld_backlight_init,
-#endif
-
- .save_regs = mdfld_save_registers,
- .restore_regs = mdfld_restore_registers,
- .save_crtc = gma_crtc_save,
- .restore_crtc = gma_crtc_restore,
- .power_down = mdfld_power_down,
- .power_up = mdfld_power_up,
-};
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
deleted file mode 100644
index ae1223f631a7..000000000000
--- a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
+++ /dev/null
@@ -1,1017 +0,0 @@
-/*
- * Copyright © 2010 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * jim liu <jim.liu@intel.com>
- * Jackie Li<yaodong.li@intel.com>
- */
-
-#include <linux/delay.h>
-
-#include <drm/drm_simple_kms_helper.h>
-
-#include "mdfld_dsi_dpi.h"
-#include "mdfld_dsi_pkg_sender.h"
-#include "mdfld_output.h"
-#include "psb_drv.h"
-#include "tc35876x-dsi-lvds.h"
-
-static void mdfld_dsi_dpi_shut_down(struct mdfld_dsi_dpi_output *output,
- int pipe);
-
-static void mdfld_wait_for_HS_DATA_FIFO(struct drm_device *dev, u32 pipe)
-{
- u32 gen_fifo_stat_reg = MIPI_GEN_FIFO_STAT_REG(pipe);
- int timeout = 0;
-
- udelay(500);
-
- /* This will time out after approximately 2+ seconds */
- while ((timeout < 20000) &&
- (REG_READ(gen_fifo_stat_reg) & DSI_FIFO_GEN_HS_DATA_FULL)) {
- udelay(100);
- timeout++;
- }
-
- if (timeout == 20000)
- DRM_INFO("MIPI: HS Data FIFO was never cleared!\n");
-}
-
-static void mdfld_wait_for_HS_CTRL_FIFO(struct drm_device *dev, u32 pipe)
-{
- u32 gen_fifo_stat_reg = MIPI_GEN_FIFO_STAT_REG(pipe);
- int timeout = 0;
-
- udelay(500);
-
- /* This will time out after approximately 2+ seconds */
- while ((timeout < 20000) && (REG_READ(gen_fifo_stat_reg)
- & DSI_FIFO_GEN_HS_CTRL_FULL)) {
- udelay(100);
- timeout++;
- }
- if (timeout == 20000)
- DRM_INFO("MIPI: HS CMD FIFO was never cleared!\n");
-}
-
-static void mdfld_wait_for_DPI_CTRL_FIFO(struct drm_device *dev, u32 pipe)
-{
- u32 gen_fifo_stat_reg = MIPI_GEN_FIFO_STAT_REG(pipe);
- int timeout = 0;
-
- udelay(500);
-
- /* This will time out after approximately 2+ seconds */
- while ((timeout < 20000) && ((REG_READ(gen_fifo_stat_reg) &
- DPI_FIFO_EMPTY) != DPI_FIFO_EMPTY)) {
- udelay(100);
- timeout++;
- }
-
- if (timeout == 20000)
- DRM_ERROR("MIPI: DPI FIFO was never cleared\n");
-}
-
-static void mdfld_wait_for_SPL_PKG_SENT(struct drm_device *dev, u32 pipe)
-{
- u32 intr_stat_reg = MIPI_INTR_STAT_REG(pipe);
- int timeout = 0;
-
- udelay(500);
-
- /* This will time out after approximately 2+ seconds */
- while ((timeout < 20000) && (!(REG_READ(intr_stat_reg)
- & DSI_INTR_STATE_SPL_PKG_SENT))) {
- udelay(100);
- timeout++;
- }
-
- if (timeout == 20000)
- DRM_ERROR("MIPI: SPL_PKT_SENT_INTERRUPT was not sent successfully!\n");
-}
-
-/* For TC35876X */
-
-static void dsi_set_device_ready_state(struct drm_device *dev, int state,
- int pipe)
-{
- REG_FLD_MOD(MIPI_DEVICE_READY_REG(pipe), !!state, 0, 0);
-}
-
-static void dsi_set_pipe_plane_enable_state(struct drm_device *dev,
- int state, int pipe)
-{
- struct drm_psb_private *dev_priv = dev->dev_private;
- u32 pipeconf_reg = PIPEACONF;
- u32 dspcntr_reg = DSPACNTR;
-
- u32 dspcntr = dev_priv->dspcntr[pipe];
- u32 mipi = MIPI_PORT_EN | PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX;
-
- if (pipe) {
- pipeconf_reg = PIPECCONF;
- dspcntr_reg = DSPCCNTR;
- } else
- mipi &= (~0x03);
-
- if (state) {
- /*Set up pipe */
- REG_WRITE(pipeconf_reg, BIT(31));
-
- if (REG_BIT_WAIT(pipeconf_reg, 1, 30))
- dev_err(&dev->pdev->dev, "%s: Pipe enable timeout\n",
- __func__);
-
- /*Set up display plane */
- REG_WRITE(dspcntr_reg, dspcntr);
- } else {
- u32 dspbase_reg = pipe ? MDFLD_DSPCBASE : MRST_DSPABASE;
-
- /* Put DSI lanes to ULPS to disable pipe */
- REG_FLD_MOD(MIPI_DEVICE_READY_REG(pipe), 2, 2, 1);
- REG_READ(MIPI_DEVICE_READY_REG(pipe)); /* posted write? */
-
- /* LP Hold */
- REG_FLD_MOD(MIPI_PORT_CONTROL(pipe), 0, 16, 16);
- REG_READ(MIPI_PORT_CONTROL(pipe)); /* posted write? */
-
- /* Disable display plane */
- REG_FLD_MOD(dspcntr_reg, 0, 31, 31);
-
- /* Flush the plane changes ??? posted write? */
- REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
- REG_READ(dspbase_reg);
-
- /* Disable PIPE */
- REG_FLD_MOD(pipeconf_reg, 0, 31, 31);
-
- if (REG_BIT_WAIT(pipeconf_reg, 0, 30))
- dev_err(&dev->pdev->dev, "%s: Pipe disable timeout\n",
- __func__);
-
- if (REG_BIT_WAIT(MIPI_GEN_FIFO_STAT_REG(pipe), 1, 28))
- dev_err(&dev->pdev->dev, "%s: FIFO not empty\n",
- __func__);
- }
-}
-
-static void mdfld_dsi_configure_down(struct mdfld_dsi_encoder *dsi_encoder,
- int pipe)
-{
- struct mdfld_dsi_dpi_output *dpi_output =
- MDFLD_DSI_DPI_OUTPUT(dsi_encoder);
- struct mdfld_dsi_config *dsi_config =
- mdfld_dsi_encoder_get_config(dsi_encoder);
- struct drm_device *dev = dsi_config->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
-
- if (!dev_priv->dpi_panel_on[pipe]) {
- dev_err(dev->dev, "DPI panel is already off\n");
- return;
- }
- tc35876x_toshiba_bridge_panel_off(dev);
- tc35876x_set_bridge_reset_state(dev, 1);
- dsi_set_pipe_plane_enable_state(dev, 0, pipe);
- mdfld_dsi_dpi_shut_down(dpi_output, pipe);
- dsi_set_device_ready_state(dev, 0, pipe);
-}
-
-static void mdfld_dsi_configure_up(struct mdfld_dsi_encoder *dsi_encoder,
- int pipe)
-{
- struct mdfld_dsi_dpi_output *dpi_output =
- MDFLD_DSI_DPI_OUTPUT(dsi_encoder);
- struct mdfld_dsi_config *dsi_config =
- mdfld_dsi_encoder_get_config(dsi_encoder);
- struct drm_device *dev = dsi_config->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
-
- if (dev_priv->dpi_panel_on[pipe]) {
- dev_err(dev->dev, "DPI panel is already on\n");
- return;
- }
-
- /* For resume path sequence */
- mdfld_dsi_dpi_shut_down(dpi_output, pipe);
- dsi_set_device_ready_state(dev, 0, pipe);
-
- dsi_set_device_ready_state(dev, 1, pipe);
- tc35876x_set_bridge_reset_state(dev, 0);
- tc35876x_configure_lvds_bridge(dev);
- mdfld_dsi_dpi_turn_on(dpi_output, pipe); /* Send turn on command */
- dsi_set_pipe_plane_enable_state(dev, 1, pipe);
-}
-/* End for TC35876X */
-
-/* ************************************************************************* *\
- * FUNCTION: mdfld_dsi_tpo_ic_init
- *
- * DESCRIPTION: This function is called only by mrst_dsi_mode_set and
- * restore_display_registers. since this function does not
- * acquire the mutex, it is important that the calling function
- * does!
-\* ************************************************************************* */
-static void mdfld_dsi_tpo_ic_init(struct mdfld_dsi_config *dsi_config, u32 pipe)
-{
- struct drm_device *dev = dsi_config->dev;
- u32 dcsChannelNumber = dsi_config->channel_num;
- u32 gen_data_reg = MIPI_HS_GEN_DATA_REG(pipe);
- u32 gen_ctrl_reg = MIPI_HS_GEN_CTRL_REG(pipe);
- u32 gen_ctrl_val = GEN_LONG_WRITE;
-
- DRM_INFO("Enter mrst init TPO MIPI display.\n");
-
- gen_ctrl_val |= dcsChannelNumber << DCS_CHANNEL_NUMBER_POS;
-
- /* Flip page order */
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x00008036);
- mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
- REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x02 << WORD_COUNTS_POS));
-
- /* 0xF0 */
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x005a5af0);
- mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
- REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS));
-
- /* Write protection key */
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x005a5af1);
- mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
- REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS));
-
- /* 0xFC */
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x005a5afc);
- mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
- REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS));
-
- /* 0xB7 */
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x770000b7);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x00000044);
- mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
- REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x05 << WORD_COUNTS_POS));
-
- /* 0xB6 */
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x000a0ab6);
- mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
- REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS));
-
- /* 0xF2 */
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x081010f2);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x4a070708);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x000000c5);
- mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
- REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x09 << WORD_COUNTS_POS));
-
- /* 0xF8 */
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x024003f8);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x01030a04);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x0e020220);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x00000004);
- mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
- REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x0d << WORD_COUNTS_POS));
-
- /* 0xE2 */
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x398fc3e2);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x0000916f);
- mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
- REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x06 << WORD_COUNTS_POS));
-
- /* 0xB0 */
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x000000b0);
- mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
- REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x02 << WORD_COUNTS_POS));
-
- /* 0xF4 */
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x240242f4);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x78ee2002);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x2a071050);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x507fee10);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x10300710);
- mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
- REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x14 << WORD_COUNTS_POS));
-
- /* 0xBA */
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x19fe07ba);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x101c0a31);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x00000010);
- mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
- REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x09 << WORD_COUNTS_POS));
-
- /* 0xBB */
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x28ff07bb);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x24280a31);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x00000034);
- mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
- REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x09 << WORD_COUNTS_POS));
-
- /* 0xFB */
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x535d05fb);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x1b1a2130);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x221e180e);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x131d2120);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x535d0508);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x1c1a2131);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x231f160d);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x111b2220);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x535c2008);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x1f1d2433);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x2c251a10);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x2c34372d);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x00000023);
- mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
- REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x31 << WORD_COUNTS_POS));
-
- /* 0xFA */
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x525c0bfa);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x1c1c232f);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x2623190e);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x18212625);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x545d0d0e);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x1e1d2333);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x26231a10);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x1a222725);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x545d280f);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x21202635);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x31292013);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x31393d33);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x00000029);
- mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
- REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x31 << WORD_COUNTS_POS));
-
- /* Set DM */
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x000100f7);
- mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
- REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS));
-}
-
-static u16 mdfld_dsi_dpi_to_byte_clock_count(int pixel_clock_count,
- int num_lane, int bpp)
-{
- return (u16)((pixel_clock_count * bpp) / (num_lane * 8));
-}
-
-/*
- * Calculate the dpi time basing on a given drm mode @mode
- * return 0 on success.
- * FIXME: I was using proposed mode value for calculation, may need to
- * use crtc mode values later
- */
-int mdfld_dsi_dpi_timing_calculation(struct drm_display_mode *mode,
- struct mdfld_dsi_dpi_timing *dpi_timing,
- int num_lane, int bpp)
-{
- int pclk_hsync, pclk_hfp, pclk_hbp, pclk_hactive;
- int pclk_vsync, pclk_vfp, pclk_vbp;
-
- pclk_hactive = mode->hdisplay;
- pclk_hfp = mode->hsync_start - mode->hdisplay;
- pclk_hsync = mode->hsync_end - mode->hsync_start;
- pclk_hbp = mode->htotal - mode->hsync_end;
-
- pclk_vfp = mode->vsync_start - mode->vdisplay;
- pclk_vsync = mode->vsync_end - mode->vsync_start;
- pclk_vbp = mode->vtotal - mode->vsync_end;
-
- /*
- * byte clock counts were calculated by following formula
- * bclock_count = pclk_count * bpp / num_lane / 8
- */
- dpi_timing->hsync_count = mdfld_dsi_dpi_to_byte_clock_count(
- pclk_hsync, num_lane, bpp);
- dpi_timing->hbp_count = mdfld_dsi_dpi_to_byte_clock_count(
- pclk_hbp, num_lane, bpp);
- dpi_timing->hfp_count = mdfld_dsi_dpi_to_byte_clock_count(
- pclk_hfp, num_lane, bpp);
- dpi_timing->hactive_count = mdfld_dsi_dpi_to_byte_clock_count(
- pclk_hactive, num_lane, bpp);
- dpi_timing->vsync_count = mdfld_dsi_dpi_to_byte_clock_count(
- pclk_vsync, num_lane, bpp);
- dpi_timing->vbp_count = mdfld_dsi_dpi_to_byte_clock_count(
- pclk_vbp, num_lane, bpp);
- dpi_timing->vfp_count = mdfld_dsi_dpi_to_byte_clock_count(
- pclk_vfp, num_lane, bpp);
-
- return 0;
-}
-
-void mdfld_dsi_dpi_controller_init(struct mdfld_dsi_config *dsi_config,
- int pipe)
-{
- struct drm_device *dev = dsi_config->dev;
- int lane_count = dsi_config->lane_count;
- struct mdfld_dsi_dpi_timing dpi_timing;
- struct drm_display_mode *mode = dsi_config->mode;
- u32 val;
-
- /*un-ready device*/
- REG_FLD_MOD(MIPI_DEVICE_READY_REG(pipe), 0, 0, 0);
-
- /*init dsi adapter before kicking off*/
- REG_WRITE(MIPI_CTRL_REG(pipe), 0x00000018);
-
- /*enable all interrupts*/
- REG_WRITE(MIPI_INTR_EN_REG(pipe), 0xffffffff);
-
- /*set up func_prg*/
- val = lane_count;
- val |= dsi_config->channel_num << DSI_DPI_VIRT_CHANNEL_OFFSET;
-
- switch (dsi_config->bpp) {
- case 16:
- val |= DSI_DPI_COLOR_FORMAT_RGB565;
- break;
- case 18:
- val |= DSI_DPI_COLOR_FORMAT_RGB666;
- break;
- case 24:
- val |= DSI_DPI_COLOR_FORMAT_RGB888;
- break;
- default:
- DRM_ERROR("unsupported color format, bpp = %d\n",
- dsi_config->bpp);
- }
- REG_WRITE(MIPI_DSI_FUNC_PRG_REG(pipe), val);
-
- REG_WRITE(MIPI_HS_TX_TIMEOUT_REG(pipe),
- (mode->vtotal * mode->htotal * dsi_config->bpp /
- (8 * lane_count)) & DSI_HS_TX_TIMEOUT_MASK);
- REG_WRITE(MIPI_LP_RX_TIMEOUT_REG(pipe),
- 0xffff & DSI_LP_RX_TIMEOUT_MASK);
-
- /*max value: 20 clock cycles of txclkesc*/
- REG_WRITE(MIPI_TURN_AROUND_TIMEOUT_REG(pipe),
- 0x14 & DSI_TURN_AROUND_TIMEOUT_MASK);
-
- /*min 21 txclkesc, max: ffffh*/
- REG_WRITE(MIPI_DEVICE_RESET_TIMER_REG(pipe),
- 0xffff & DSI_RESET_TIMER_MASK);
-
- REG_WRITE(MIPI_DPI_RESOLUTION_REG(pipe),
- mode->vdisplay << 16 | mode->hdisplay);
-
- /*set DPI timing registers*/
- mdfld_dsi_dpi_timing_calculation(mode, &dpi_timing,
- dsi_config->lane_count, dsi_config->bpp);
-
- REG_WRITE(MIPI_HSYNC_COUNT_REG(pipe),
- dpi_timing.hsync_count & DSI_DPI_TIMING_MASK);
- REG_WRITE(MIPI_HBP_COUNT_REG(pipe),
- dpi_timing.hbp_count & DSI_DPI_TIMING_MASK);
- REG_WRITE(MIPI_HFP_COUNT_REG(pipe),
- dpi_timing.hfp_count & DSI_DPI_TIMING_MASK);
- REG_WRITE(MIPI_HACTIVE_COUNT_REG(pipe),
- dpi_timing.hactive_count & DSI_DPI_TIMING_MASK);
- REG_WRITE(MIPI_VSYNC_COUNT_REG(pipe),
- dpi_timing.vsync_count & DSI_DPI_TIMING_MASK);
- REG_WRITE(MIPI_VBP_COUNT_REG(pipe),
- dpi_timing.vbp_count & DSI_DPI_TIMING_MASK);
- REG_WRITE(MIPI_VFP_COUNT_REG(pipe),
- dpi_timing.vfp_count & DSI_DPI_TIMING_MASK);
-
- REG_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT_REG(pipe), 0x46);
-
- /*min: 7d0 max: 4e20*/
- REG_WRITE(MIPI_INIT_COUNT_REG(pipe), 0x000007d0);
-
- /*set up video mode*/
- val = dsi_config->video_mode | DSI_DPI_COMPLETE_LAST_LINE;
- REG_WRITE(MIPI_VIDEO_MODE_FORMAT_REG(pipe), val);
-
- REG_WRITE(MIPI_EOT_DISABLE_REG(pipe), 0x00000000);
-
- REG_WRITE(MIPI_LP_BYTECLK_REG(pipe), 0x00000004);
-
- /*TODO: figure out how to setup these registers*/
- if (mdfld_get_panel_type(dev, pipe) == TC35876X)
- REG_WRITE(MIPI_DPHY_PARAM_REG(pipe), 0x2A0c6008);
- else
- REG_WRITE(MIPI_DPHY_PARAM_REG(pipe), 0x150c3408);
-
- REG_WRITE(MIPI_CLK_LANE_SWITCH_TIME_CNT_REG(pipe), (0xa << 16) | 0x14);
-
- if (mdfld_get_panel_type(dev, pipe) == TC35876X)
- tc35876x_set_bridge_reset_state(dev, 0); /*Pull High Reset */
-
- /*set device ready*/
- REG_FLD_MOD(MIPI_DEVICE_READY_REG(pipe), 1, 0, 0);
-}
-
-void mdfld_dsi_dpi_turn_on(struct mdfld_dsi_dpi_output *output, int pipe)
-{
- struct drm_device *dev = output->dev;
-
- /* clear special packet sent bit */
- if (REG_READ(MIPI_INTR_STAT_REG(pipe)) & DSI_INTR_STATE_SPL_PKG_SENT)
- REG_WRITE(MIPI_INTR_STAT_REG(pipe),
- DSI_INTR_STATE_SPL_PKG_SENT);
-
- /*send turn on package*/
- REG_WRITE(MIPI_DPI_CONTROL_REG(pipe), DSI_DPI_CTRL_HS_TURN_ON);
-
- /*wait for SPL_PKG_SENT interrupt*/
- mdfld_wait_for_SPL_PKG_SENT(dev, pipe);
-
- if (REG_READ(MIPI_INTR_STAT_REG(pipe)) & DSI_INTR_STATE_SPL_PKG_SENT)
- REG_WRITE(MIPI_INTR_STAT_REG(pipe),
- DSI_INTR_STATE_SPL_PKG_SENT);
-
- output->panel_on = 1;
-
- /* FIXME the following is disabled to WA the X slow start issue
- for TMD panel
- if (pipe == 2)
- dev_priv->dpi_panel_on2 = true;
- else if (pipe == 0)
- dev_priv->dpi_panel_on = true; */
-}
-
-static void mdfld_dsi_dpi_shut_down(struct mdfld_dsi_dpi_output *output,
- int pipe)
-{
- struct drm_device *dev = output->dev;
-
- /*if output is on, or mode setting didn't happen, ignore this*/
- if ((!output->panel_on) || output->first_boot) {
- output->first_boot = 0;
- return;
- }
-
- /* Wait for dpi fifo to empty */
- mdfld_wait_for_DPI_CTRL_FIFO(dev, pipe);
-
- /* Clear the special packet interrupt bit if set */
- if (REG_READ(MIPI_INTR_STAT_REG(pipe)) & DSI_INTR_STATE_SPL_PKG_SENT)
- REG_WRITE(MIPI_INTR_STAT_REG(pipe),
- DSI_INTR_STATE_SPL_PKG_SENT);
-
- if (REG_READ(MIPI_DPI_CONTROL_REG(pipe)) == DSI_DPI_CTRL_HS_SHUTDOWN)
- goto shutdown_out;
-
- REG_WRITE(MIPI_DPI_CONTROL_REG(pipe), DSI_DPI_CTRL_HS_SHUTDOWN);
-
-shutdown_out:
- output->panel_on = 0;
- output->first_boot = 0;
-
- /* FIXME the following is disabled to WA the X slow start issue
- for TMD panel
- if (pipe == 2)
- dev_priv->dpi_panel_on2 = false;
- else if (pipe == 0)
- dev_priv->dpi_panel_on = false; */
-}
-
-static void mdfld_dsi_dpi_set_power(struct drm_encoder *encoder, bool on)
-{
- struct mdfld_dsi_encoder *dsi_encoder = mdfld_dsi_encoder(encoder);
- struct mdfld_dsi_dpi_output *dpi_output =
- MDFLD_DSI_DPI_OUTPUT(dsi_encoder);
- struct mdfld_dsi_config *dsi_config =
- mdfld_dsi_encoder_get_config(dsi_encoder);
- int pipe = mdfld_dsi_encoder_get_pipe(dsi_encoder);
- struct drm_device *dev = dsi_config->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
-
- /*start up display island if it was shutdown*/
- if (!gma_power_begin(dev, true))
- return;
-
- if (on) {
- if (mdfld_get_panel_type(dev, pipe) == TMD_VID)
- mdfld_dsi_dpi_turn_on(dpi_output, pipe);
- else if (mdfld_get_panel_type(dev, pipe) == TC35876X)
- mdfld_dsi_configure_up(dsi_encoder, pipe);
- else {
- /*enable mipi port*/
- REG_WRITE(MIPI_PORT_CONTROL(pipe),
- REG_READ(MIPI_PORT_CONTROL(pipe)) | BIT(31));
- REG_READ(MIPI_PORT_CONTROL(pipe));
-
- mdfld_dsi_dpi_turn_on(dpi_output, pipe);
- mdfld_dsi_tpo_ic_init(dsi_config, pipe);
- }
- dev_priv->dpi_panel_on[pipe] = true;
- } else {
- if (mdfld_get_panel_type(dev, pipe) == TMD_VID)
- mdfld_dsi_dpi_shut_down(dpi_output, pipe);
- else if (mdfld_get_panel_type(dev, pipe) == TC35876X)
- mdfld_dsi_configure_down(dsi_encoder, pipe);
- else {
- mdfld_dsi_dpi_shut_down(dpi_output, pipe);
-
- /*disable mipi port*/
- REG_WRITE(MIPI_PORT_CONTROL(pipe),
- REG_READ(MIPI_PORT_CONTROL(pipe)) & ~BIT(31));
- REG_READ(MIPI_PORT_CONTROL(pipe));
- }
- dev_priv->dpi_panel_on[pipe] = false;
- }
- gma_power_end(dev);
-}
-
-void mdfld_dsi_dpi_dpms(struct drm_encoder *encoder, int mode)
-{
- mdfld_dsi_dpi_set_power(encoder, mode == DRM_MODE_DPMS_ON);
-}
-
-bool mdfld_dsi_dpi_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct mdfld_dsi_encoder *dsi_encoder = mdfld_dsi_encoder(encoder);
- struct mdfld_dsi_config *dsi_config =
- mdfld_dsi_encoder_get_config(dsi_encoder);
- struct drm_display_mode *fixed_mode = dsi_config->fixed_mode;
-
- if (fixed_mode) {
- adjusted_mode->hdisplay = fixed_mode->hdisplay;
- adjusted_mode->hsync_start = fixed_mode->hsync_start;
- adjusted_mode->hsync_end = fixed_mode->hsync_end;
- adjusted_mode->htotal = fixed_mode->htotal;
- adjusted_mode->vdisplay = fixed_mode->vdisplay;
- adjusted_mode->vsync_start = fixed_mode->vsync_start;
- adjusted_mode->vsync_end = fixed_mode->vsync_end;
- adjusted_mode->vtotal = fixed_mode->vtotal;
- adjusted_mode->clock = fixed_mode->clock;
- drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
- }
- return true;
-}
-
-void mdfld_dsi_dpi_prepare(struct drm_encoder *encoder)
-{
- mdfld_dsi_dpi_set_power(encoder, false);
-}
-
-void mdfld_dsi_dpi_commit(struct drm_encoder *encoder)
-{
- mdfld_dsi_dpi_set_power(encoder, true);
-}
-
-/* For TC35876X */
-/* This functionality was implemented in FW in iCDK */
-/* But removed in DV0 and later. So need to add here. */
-static void mipi_set_properties(struct mdfld_dsi_config *dsi_config, int pipe)
-{
- struct drm_device *dev = dsi_config->dev;
-
- REG_WRITE(MIPI_CTRL_REG(pipe), 0x00000018);
- REG_WRITE(MIPI_INTR_EN_REG(pipe), 0xffffffff);
- REG_WRITE(MIPI_HS_TX_TIMEOUT_REG(pipe), 0xffffff);
- REG_WRITE(MIPI_LP_RX_TIMEOUT_REG(pipe), 0xffffff);
- REG_WRITE(MIPI_TURN_AROUND_TIMEOUT_REG(pipe), 0x14);
- REG_WRITE(MIPI_DEVICE_RESET_TIMER_REG(pipe), 0xff);
- REG_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT_REG(pipe), 0x25);
- REG_WRITE(MIPI_INIT_COUNT_REG(pipe), 0xf0);
- REG_WRITE(MIPI_EOT_DISABLE_REG(pipe), 0x00000000);
- REG_WRITE(MIPI_LP_BYTECLK_REG(pipe), 0x00000004);
- REG_WRITE(MIPI_DBI_BW_CTRL_REG(pipe), 0x00000820);
- REG_WRITE(MIPI_CLK_LANE_SWITCH_TIME_CNT_REG(pipe), (0xa << 16) | 0x14);
-}
-
-static void mdfld_mipi_set_video_timing(struct mdfld_dsi_config *dsi_config,
- int pipe)
-{
- struct drm_device *dev = dsi_config->dev;
- struct mdfld_dsi_dpi_timing dpi_timing;
- struct drm_display_mode *mode = dsi_config->mode;
-
- mdfld_dsi_dpi_timing_calculation(mode, &dpi_timing,
- dsi_config->lane_count,
- dsi_config->bpp);
-
- REG_WRITE(MIPI_DPI_RESOLUTION_REG(pipe),
- mode->vdisplay << 16 | mode->hdisplay);
- REG_WRITE(MIPI_HSYNC_COUNT_REG(pipe),
- dpi_timing.hsync_count & DSI_DPI_TIMING_MASK);
- REG_WRITE(MIPI_HBP_COUNT_REG(pipe),
- dpi_timing.hbp_count & DSI_DPI_TIMING_MASK);
- REG_WRITE(MIPI_HFP_COUNT_REG(pipe),
- dpi_timing.hfp_count & DSI_DPI_TIMING_MASK);
- REG_WRITE(MIPI_HACTIVE_COUNT_REG(pipe),
- dpi_timing.hactive_count & DSI_DPI_TIMING_MASK);
- REG_WRITE(MIPI_VSYNC_COUNT_REG(pipe),
- dpi_timing.vsync_count & DSI_DPI_TIMING_MASK);
- REG_WRITE(MIPI_VBP_COUNT_REG(pipe),
- dpi_timing.vbp_count & DSI_DPI_TIMING_MASK);
- REG_WRITE(MIPI_VFP_COUNT_REG(pipe),
- dpi_timing.vfp_count & DSI_DPI_TIMING_MASK);
-}
-
-static void mdfld_mipi_config(struct mdfld_dsi_config *dsi_config, int pipe)
-{
- struct drm_device *dev = dsi_config->dev;
- int lane_count = dsi_config->lane_count;
-
- if (pipe) {
- REG_WRITE(MIPI_PORT_CONTROL(0), 0x00000002);
- REG_WRITE(MIPI_PORT_CONTROL(2), 0x80000000);
- } else {
- REG_WRITE(MIPI_PORT_CONTROL(0), 0x80010000);
- REG_WRITE(MIPI_PORT_CONTROL(2), 0x00);
- }
-
- REG_WRITE(MIPI_DPHY_PARAM_REG(pipe), 0x150A600F);
- REG_WRITE(MIPI_VIDEO_MODE_FORMAT_REG(pipe), 0x0000000F);
-
- /* lane_count = 3 */
- REG_WRITE(MIPI_DSI_FUNC_PRG_REG(pipe), 0x00000200 | lane_count);
-
- mdfld_mipi_set_video_timing(dsi_config, pipe);
-}
-
-static void mdfld_set_pipe_timing(struct mdfld_dsi_config *dsi_config, int pipe)
-{
- struct drm_device *dev = dsi_config->dev;
- struct drm_display_mode *mode = dsi_config->mode;
-
- REG_WRITE(HTOTAL_A, ((mode->htotal - 1) << 16) | (mode->hdisplay - 1));
- REG_WRITE(HBLANK_A, ((mode->htotal - 1) << 16) | (mode->hdisplay - 1));
- REG_WRITE(HSYNC_A,
- ((mode->hsync_end - 1) << 16) | (mode->hsync_start - 1));
-
- REG_WRITE(VTOTAL_A, ((mode->vtotal - 1) << 16) | (mode->vdisplay - 1));
- REG_WRITE(VBLANK_A, ((mode->vtotal - 1) << 16) | (mode->vdisplay - 1));
- REG_WRITE(VSYNC_A,
- ((mode->vsync_end - 1) << 16) | (mode->vsync_start - 1));
-
- REG_WRITE(PIPEASRC,
- ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
-}
-/* End for TC35876X */
-
-void mdfld_dsi_dpi_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct mdfld_dsi_encoder *dsi_encoder = mdfld_dsi_encoder(encoder);
- struct mdfld_dsi_dpi_output *dpi_output =
- MDFLD_DSI_DPI_OUTPUT(dsi_encoder);
- struct mdfld_dsi_config *dsi_config =
- mdfld_dsi_encoder_get_config(dsi_encoder);
- struct drm_device *dev = dsi_config->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
- int pipe = mdfld_dsi_encoder_get_pipe(dsi_encoder);
- u32 pipeconf_reg = PIPEACONF;
- u32 dspcntr_reg = DSPACNTR;
- u32 pipeconf, dspcntr;
-
- u32 mipi = MIPI_PORT_EN | PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX;
-
- if (WARN_ON(pipe < 0))
- return;
-
- pipeconf = dev_priv->pipeconf[pipe];
- dspcntr = dev_priv->dspcntr[pipe];
-
- if (pipe) {
- pipeconf_reg = PIPECCONF;
- dspcntr_reg = DSPCCNTR;
- } else {
- if (mdfld_get_panel_type(dev, pipe) == TC35876X)
- mipi &= (~0x03); /* Use all four lanes */
- else
- mipi |= 2;
- }
-
- /*start up display island if it was shutdown*/
- if (!gma_power_begin(dev, true))
- return;
-
- if (mdfld_get_panel_type(dev, pipe) == TC35876X) {
- /*
- * The following logic is required to reset the bridge and
- * configure. This also starts the DSI clock at 200MHz.
- */
- tc35876x_set_bridge_reset_state(dev, 0); /*Pull High Reset */
- tc35876x_toshiba_bridge_panel_on(dev);
- udelay(100);
- /* Now start the DSI clock */
- REG_WRITE(MRST_DPLL_A, 0x00);
- REG_WRITE(MRST_FPA0, 0xC1);
- REG_WRITE(MRST_DPLL_A, 0x00800000);
- udelay(500);
- REG_WRITE(MRST_DPLL_A, 0x80800000);
-
- if (REG_BIT_WAIT(pipeconf_reg, 1, 29))
- dev_err(&dev->pdev->dev, "%s: DSI PLL lock timeout\n",
- __func__);
-
- REG_WRITE(MIPI_DPHY_PARAM_REG(pipe), 0x2A0c6008);
-
- mipi_set_properties(dsi_config, pipe);
- mdfld_mipi_config(dsi_config, pipe);
- mdfld_set_pipe_timing(dsi_config, pipe);
-
- REG_WRITE(DSPABASE, 0x00);
- REG_WRITE(DSPASIZE,
- ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
-
- REG_WRITE(DSPACNTR, 0x98000000);
- REG_WRITE(DSPASURF, 0x00);
-
- REG_WRITE(VGACNTRL, 0x80000000);
- REG_WRITE(DEVICE_READY_REG, 0x00000001);
-
- REG_WRITE(MIPI_PORT_CONTROL(pipe), 0x80810000);
- } else {
- /*set up mipi port FIXME: do at init time */
- REG_WRITE(MIPI_PORT_CONTROL(pipe), mipi);
- }
- REG_READ(MIPI_PORT_CONTROL(pipe));
-
- if (mdfld_get_panel_type(dev, pipe) == TMD_VID) {
- /* NOP */
- } else if (mdfld_get_panel_type(dev, pipe) == TC35876X) {
- /* set up DSI controller DPI interface */
- mdfld_dsi_dpi_controller_init(dsi_config, pipe);
-
- /* Configure MIPI Bridge and Panel */
- tc35876x_configure_lvds_bridge(dev);
- dev_priv->dpi_panel_on[pipe] = true;
- } else {
- /*turn on DPI interface*/
- mdfld_dsi_dpi_turn_on(dpi_output, pipe);
- }
-
- /*set up pipe*/
- REG_WRITE(pipeconf_reg, pipeconf);
- REG_READ(pipeconf_reg);
-
- /*set up display plane*/
- REG_WRITE(dspcntr_reg, dspcntr);
- REG_READ(dspcntr_reg);
-
- msleep(20); /* FIXME: this should wait for vblank */
-
- if (mdfld_get_panel_type(dev, pipe) == TMD_VID) {
- /* NOP */
- } else if (mdfld_get_panel_type(dev, pipe) == TC35876X) {
- mdfld_dsi_dpi_turn_on(dpi_output, pipe);
- } else {
- /* init driver ic */
- mdfld_dsi_tpo_ic_init(dsi_config, pipe);
- /*init backlight*/
- mdfld_dsi_brightness_init(dsi_config, pipe);
- }
-
- gma_power_end(dev);
-}
-
-/*
- * Init DSI DPI encoder.
- * Allocate an mdfld_dsi_encoder and attach it to given @dsi_connector
- * return pointer of newly allocated DPI encoder, NULL on error
- */
-struct mdfld_dsi_encoder *mdfld_dsi_dpi_init(struct drm_device *dev,
- struct mdfld_dsi_connector *dsi_connector,
- const struct panel_funcs *p_funcs)
-{
- struct mdfld_dsi_dpi_output *dpi_output = NULL;
- struct mdfld_dsi_config *dsi_config;
- struct drm_connector *connector = NULL;
- struct drm_encoder *encoder = NULL;
- int pipe;
- u32 data;
- int ret;
-
- pipe = dsi_connector->pipe;
-
- if (mdfld_get_panel_type(dev, pipe) != TC35876X) {
- dsi_config = mdfld_dsi_get_config(dsi_connector);
-
- /* panel hard-reset */
- if (p_funcs->reset) {
- ret = p_funcs->reset(dev, pipe);
- if (ret) {
- DRM_ERROR("Panel %d hard-reset failed\n", pipe);
- return NULL;
- }
- }
-
- /* panel drvIC init */
- if (p_funcs->drv_ic_init)
- p_funcs->drv_ic_init(dsi_config, pipe);
-
- /* panel power mode detect */
- ret = mdfld_dsi_get_power_mode(dsi_config, &data, false);
- if (ret) {
- DRM_ERROR("Panel %d get power mode failed\n", pipe);
- dsi_connector->status = connector_status_disconnected;
- } else {
- DRM_INFO("pipe %d power mode 0x%x\n", pipe, data);
- dsi_connector->status = connector_status_connected;
- }
- }
-
- dpi_output = kzalloc(sizeof(struct mdfld_dsi_dpi_output), GFP_KERNEL);
- if (!dpi_output) {
- DRM_ERROR("No memory\n");
- return NULL;
- }
-
- dpi_output->panel_on = 0;
- dpi_output->dev = dev;
- if (mdfld_get_panel_type(dev, pipe) != TC35876X)
- dpi_output->p_funcs = p_funcs;
- dpi_output->first_boot = 1;
-
- /*get fixed mode*/
- dsi_config = mdfld_dsi_get_config(dsi_connector);
-
- /*create drm encoder object*/
- connector = &dsi_connector->base.base;
- encoder = &dpi_output->base.base.base;
- drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_LVDS);
- drm_encoder_helper_add(encoder,
- p_funcs->encoder_helper_funcs);
-
- /*attach to given connector*/
- drm_connector_attach_encoder(connector, encoder);
-
- /*set possible crtcs and clones*/
- if (dsi_connector->pipe) {
- encoder->possible_crtcs = (1 << 2);
- encoder->possible_clones = 0;
- } else {
- encoder->possible_crtcs = (1 << 0);
- encoder->possible_clones = 0;
- }
-
- dsi_connector->base.encoder = &dpi_output->base.base;
-
- return &dpi_output->base;
-}
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.h b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.h
deleted file mode 100644
index 2b40663e1696..000000000000
--- a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Copyright © 2010 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * jim liu <jim.liu@intel.com>
- * Jackie Li<yaodong.li@intel.com>
- */
-
-#ifndef __MDFLD_DSI_DPI_H__
-#define __MDFLD_DSI_DPI_H__
-
-#include "mdfld_dsi_output.h"
-#include "mdfld_output.h"
-
-struct mdfld_dsi_dpi_timing {
- u16 hsync_count;
- u16 hbp_count;
- u16 hfp_count;
- u16 hactive_count;
- u16 vsync_count;
- u16 vbp_count;
- u16 vfp_count;
-};
-
-struct mdfld_dsi_dpi_output {
- struct mdfld_dsi_encoder base;
- struct drm_device *dev;
-
- int panel_on;
- int first_boot;
-
- const struct panel_funcs *p_funcs;
-};
-
-#define MDFLD_DSI_DPI_OUTPUT(dsi_encoder)\
- container_of(dsi_encoder, struct mdfld_dsi_dpi_output, base)
-
-/* Export functions */
-extern int mdfld_dsi_dpi_timing_calculation(struct drm_display_mode *mode,
- struct mdfld_dsi_dpi_timing *dpi_timing,
- int num_lane, int bpp);
-extern struct mdfld_dsi_encoder *mdfld_dsi_dpi_init(struct drm_device *dev,
- struct mdfld_dsi_connector *dsi_connector,
- const struct panel_funcs *p_funcs);
-
-/* MDFLD DPI helper functions */
-extern void mdfld_dsi_dpi_dpms(struct drm_encoder *encoder, int mode);
-extern bool mdfld_dsi_dpi_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
-extern void mdfld_dsi_dpi_prepare(struct drm_encoder *encoder);
-extern void mdfld_dsi_dpi_commit(struct drm_encoder *encoder);
-extern void mdfld_dsi_dpi_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
-extern void mdfld_dsi_dpi_turn_on(struct mdfld_dsi_dpi_output *output,
- int pipe);
-extern void mdfld_dsi_dpi_controller_init(struct mdfld_dsi_config *dsi_config,
- int pipe);
-#endif /*__MDFLD_DSI_DPI_H__*/
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
deleted file mode 100644
index 4aab76613bd9..000000000000
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c
+++ /dev/null
@@ -1,603 +0,0 @@
-/*
- * Copyright © 2010 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * jim liu <jim.liu@intel.com>
- * Jackie Li<yaodong.li@intel.com>
- */
-
-#include <linux/delay.h>
-#include <linux/moduleparam.h>
-#include <linux/pm_runtime.h>
-#include <linux/gpio/consumer.h>
-
-#include <asm/intel_scu_ipc.h>
-
-#include "mdfld_dsi_dpi.h"
-#include "mdfld_dsi_output.h"
-#include "mdfld_dsi_pkg_sender.h"
-#include "mdfld_output.h"
-#include "tc35876x-dsi-lvds.h"
-
-/* get the LABC from command line. */
-static int LABC_control = 1;
-
-#ifdef MODULE
-module_param(LABC_control, int, 0644);
-#else
-
-static int __init parse_LABC_control(char *arg)
-{
- /* LABC control can be passed in as a cmdline parameter */
- /* to enable this feature add LABC=1 to cmdline */
- /* to disable this feature add LABC=0 to cmdline */
- if (!arg)
- return -EINVAL;
-
- if (!strcasecmp(arg, "0"))
- LABC_control = 0;
- else if (!strcasecmp(arg, "1"))
- LABC_control = 1;
-
- return 0;
-}
-early_param("LABC", parse_LABC_control);
-#endif
-
-/**
- * Check and see if the generic control or data buffer is empty and ready.
- */
-void mdfld_dsi_gen_fifo_ready(struct drm_device *dev, u32 gen_fifo_stat_reg,
- u32 fifo_stat)
-{
- u32 GEN_BF_time_out_count;
-
- /* Check MIPI Adatper command registers */
- for (GEN_BF_time_out_count = 0;
- GEN_BF_time_out_count < GEN_FB_TIME_OUT;
- GEN_BF_time_out_count++) {
- if ((REG_READ(gen_fifo_stat_reg) & fifo_stat) == fifo_stat)
- break;
- udelay(100);
- }
-
- if (GEN_BF_time_out_count == GEN_FB_TIME_OUT)
- DRM_ERROR("mdfld_dsi_gen_fifo_ready, Timeout. gen_fifo_stat_reg = 0x%x.\n",
- gen_fifo_stat_reg);
-}
-
-/**
- * Manage the DSI MIPI keyboard and display brightness.
- * FIXME: this is exported to OSPM code. should work out an specific
- * display interface to OSPM.
- */
-
-void mdfld_dsi_brightness_init(struct mdfld_dsi_config *dsi_config, int pipe)
-{
- struct mdfld_dsi_pkg_sender *sender =
- mdfld_dsi_get_pkg_sender(dsi_config);
- struct drm_device *dev;
- struct drm_psb_private *dev_priv;
- u32 gen_ctrl_val;
-
- if (!sender) {
- DRM_ERROR("No sender found\n");
- return;
- }
-
- dev = sender->dev;
- dev_priv = dev->dev_private;
-
- /* Set default display backlight value to 85% (0xd8)*/
- mdfld_dsi_send_mcs_short(sender, write_display_brightness, 0xd8, 1,
- true);
-
- /* Set minimum brightness setting of CABC function to 20% (0x33)*/
- mdfld_dsi_send_mcs_short(sender, write_cabc_min_bright, 0x33, 1, true);
-
- /* Enable backlight or/and LABC */
- gen_ctrl_val = BRIGHT_CNTL_BLOCK_ON | DISPLAY_DIMMING_ON |
- BACKLIGHT_ON;
- if (LABC_control == 1)
- gen_ctrl_val |= DISPLAY_DIMMING_ON | DISPLAY_BRIGHTNESS_AUTO
- | GAMMA_AUTO;
-
- if (LABC_control == 1)
- gen_ctrl_val |= AMBIENT_LIGHT_SENSE_ON;
-
- dev_priv->mipi_ctrl_display = gen_ctrl_val;
-
- mdfld_dsi_send_mcs_short(sender, write_ctrl_display, (u8)gen_ctrl_val,
- 1, true);
-
- mdfld_dsi_send_mcs_short(sender, write_ctrl_cabc, UI_IMAGE, 1, true);
-}
-
-void mdfld_dsi_brightness_control(struct drm_device *dev, int pipe, int level)
-{
- struct mdfld_dsi_pkg_sender *sender;
- struct drm_psb_private *dev_priv;
- struct mdfld_dsi_config *dsi_config;
- u32 gen_ctrl_val = 0;
- int p_type = TMD_VID;
-
- if (!dev || (pipe != 0 && pipe != 2)) {
- DRM_ERROR("Invalid parameter\n");
- return;
- }
-
- p_type = mdfld_get_panel_type(dev, 0);
-
- dev_priv = dev->dev_private;
-
- if (pipe)
- dsi_config = dev_priv->dsi_configs[1];
- else
- dsi_config = dev_priv->dsi_configs[0];
-
- sender = mdfld_dsi_get_pkg_sender(dsi_config);
-
- if (!sender) {
- DRM_ERROR("No sender found\n");
- return;
- }
-
- gen_ctrl_val = (level * 0xff / MDFLD_DSI_BRIGHTNESS_MAX_LEVEL) & 0xff;
-
- dev_dbg(sender->dev->dev, "pipe = %d, gen_ctrl_val = %d.\n",
- pipe, gen_ctrl_val);
-
- if (p_type == TMD_VID) {
- /* Set display backlight value */
- mdfld_dsi_send_mcs_short(sender, tmd_write_display_brightness,
- (u8)gen_ctrl_val, 1, true);
- } else {
- /* Set display backlight value */
- mdfld_dsi_send_mcs_short(sender, write_display_brightness,
- (u8)gen_ctrl_val, 1, true);
-
- /* Enable backlight control */
- if (level == 0)
- gen_ctrl_val = 0;
- else
- gen_ctrl_val = dev_priv->mipi_ctrl_display;
-
- mdfld_dsi_send_mcs_short(sender, write_ctrl_display,
- (u8)gen_ctrl_val, 1, true);
- }
-}
-
-static int mdfld_dsi_get_panel_status(struct mdfld_dsi_config *dsi_config,
- u8 dcs, u32 *data, bool hs)
-{
- struct mdfld_dsi_pkg_sender *sender
- = mdfld_dsi_get_pkg_sender(dsi_config);
-
- if (!sender || !data) {
- DRM_ERROR("Invalid parameter\n");
- return -EINVAL;
- }
-
- return mdfld_dsi_read_mcs(sender, dcs, data, 1, hs);
-}
-
-int mdfld_dsi_get_power_mode(struct mdfld_dsi_config *dsi_config, u32 *mode,
- bool hs)
-{
- if (!dsi_config || !mode) {
- DRM_ERROR("Invalid parameter\n");
- return -EINVAL;
- }
-
- return mdfld_dsi_get_panel_status(dsi_config, 0x0a, mode, hs);
-}
-
-/*
- * NOTE: this function was used by OSPM.
- * TODO: will be removed later, should work out display interfaces for OSPM
- */
-void mdfld_dsi_controller_init(struct mdfld_dsi_config *dsi_config, int pipe)
-{
- if (!dsi_config || ((pipe != 0) && (pipe != 2))) {
- DRM_ERROR("Invalid parameters\n");
- return;
- }
-
- mdfld_dsi_dpi_controller_init(dsi_config, pipe);
-}
-
-static void mdfld_dsi_connector_save(struct drm_connector *connector)
-{
-}
-
-static void mdfld_dsi_connector_restore(struct drm_connector *connector)
-{
-}
-
-/* FIXME: start using the force parameter */
-static enum drm_connector_status
-mdfld_dsi_connector_detect(struct drm_connector *connector, bool force)
-{
- struct mdfld_dsi_connector *dsi_connector
- = mdfld_dsi_connector(connector);
-
- dsi_connector->status = connector_status_connected;
-
- return dsi_connector->status;
-}
-
-static int mdfld_dsi_connector_set_property(struct drm_connector *connector,
- struct drm_property *property,
- uint64_t value)
-{
- struct drm_encoder *encoder = connector->encoder;
-
- if (!strcmp(property->name, "scaling mode") && encoder) {
- struct gma_crtc *gma_crtc = to_gma_crtc(encoder->crtc);
- bool centerechange;
- uint64_t val;
-
- if (!gma_crtc)
- goto set_prop_error;
-
- switch (value) {
- case DRM_MODE_SCALE_FULLSCREEN:
- break;
- case DRM_MODE_SCALE_NO_SCALE:
- break;
- case DRM_MODE_SCALE_ASPECT:
- break;
- default:
- goto set_prop_error;
- }
-
- if (drm_object_property_get_value(&connector->base, property, &val))
- goto set_prop_error;
-
- if (val == value)
- goto set_prop_done;
-
- if (drm_object_property_set_value(&connector->base,
- property, value))
- goto set_prop_error;
-
- centerechange = (val == DRM_MODE_SCALE_NO_SCALE) ||
- (value == DRM_MODE_SCALE_NO_SCALE);
-
- if (gma_crtc->saved_mode.hdisplay != 0 &&
- gma_crtc->saved_mode.vdisplay != 0) {
- if (centerechange) {
- if (!drm_crtc_helper_set_mode(encoder->crtc,
- &gma_crtc->saved_mode,
- encoder->crtc->x,
- encoder->crtc->y,
- encoder->crtc->primary->fb))
- goto set_prop_error;
- } else {
- const struct drm_encoder_helper_funcs *funcs =
- encoder->helper_private;
- funcs->mode_set(encoder,
- &gma_crtc->saved_mode,
- &gma_crtc->saved_adjusted_mode);
- }
- }
- } else if (!strcmp(property->name, "backlight") && encoder) {
- if (drm_object_property_set_value(&connector->base, property,
- value))
- goto set_prop_error;
- else
- gma_backlight_set(encoder->dev, value);
- }
-set_prop_done:
- return 0;
-set_prop_error:
- return -1;
-}
-
-static void mdfld_dsi_connector_destroy(struct drm_connector *connector)
-{
- struct mdfld_dsi_connector *dsi_connector =
- mdfld_dsi_connector(connector);
- struct mdfld_dsi_pkg_sender *sender;
-
- if (!dsi_connector)
- return;
- drm_connector_unregister(connector);
- drm_connector_cleanup(connector);
- sender = dsi_connector->pkg_sender;
- mdfld_dsi_pkg_sender_destroy(sender);
- kfree(dsi_connector);
-}
-
-static int mdfld_dsi_connector_get_modes(struct drm_connector *connector)
-{
- struct mdfld_dsi_connector *dsi_connector =
- mdfld_dsi_connector(connector);
- struct mdfld_dsi_config *dsi_config =
- mdfld_dsi_get_config(dsi_connector);
- struct drm_display_mode *fixed_mode = dsi_config->fixed_mode;
- struct drm_display_mode *dup_mode = NULL;
- struct drm_device *dev = connector->dev;
-
- if (fixed_mode) {
- dev_dbg(dev->dev, "fixed_mode %dx%d\n",
- fixed_mode->hdisplay, fixed_mode->vdisplay);
- dup_mode = drm_mode_duplicate(dev, fixed_mode);
- drm_mode_probed_add(connector, dup_mode);
- return 1;
- }
- DRM_ERROR("Didn't get any modes!\n");
- return 0;
-}
-
-static enum drm_mode_status mdfld_dsi_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- struct mdfld_dsi_connector *dsi_connector =
- mdfld_dsi_connector(connector);
- struct mdfld_dsi_config *dsi_config =
- mdfld_dsi_get_config(dsi_connector);
- struct drm_display_mode *fixed_mode = dsi_config->fixed_mode;
-
- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
- return MODE_NO_DBLESCAN;
-
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- return MODE_NO_INTERLACE;
-
- /**
- * FIXME: current DC has no fitting unit, reject any mode setting
- * request
- * Will figure out a way to do up-scaling(panel fitting) later.
- **/
- if (fixed_mode) {
- if (mode->hdisplay != fixed_mode->hdisplay)
- return MODE_PANEL;
-
- if (mode->vdisplay != fixed_mode->vdisplay)
- return MODE_PANEL;
- }
-
- return MODE_OK;
-}
-
-static struct drm_encoder *mdfld_dsi_connector_best_encoder(
- struct drm_connector *connector)
-{
- struct mdfld_dsi_connector *dsi_connector =
- mdfld_dsi_connector(connector);
- struct mdfld_dsi_config *dsi_config =
- mdfld_dsi_get_config(dsi_connector);
- return &dsi_config->encoder->base.base;
-}
-
-/*DSI connector funcs*/
-static const struct drm_connector_funcs mdfld_dsi_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
- .detect = mdfld_dsi_connector_detect,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .set_property = mdfld_dsi_connector_set_property,
- .destroy = mdfld_dsi_connector_destroy,
-};
-
-/*DSI connector helper funcs*/
-static const struct drm_connector_helper_funcs
- mdfld_dsi_connector_helper_funcs = {
- .get_modes = mdfld_dsi_connector_get_modes,
- .mode_valid = mdfld_dsi_connector_mode_valid,
- .best_encoder = mdfld_dsi_connector_best_encoder,
-};
-
-static int mdfld_dsi_get_default_config(struct drm_device *dev,
- struct mdfld_dsi_config *config, int pipe)
-{
- if (!dev || !config) {
- DRM_ERROR("Invalid parameters");
- return -EINVAL;
- }
-
- config->bpp = 24;
- if (mdfld_get_panel_type(dev, pipe) == TC35876X)
- config->lane_count = 4;
- else
- config->lane_count = 2;
- config->channel_num = 0;
-
- if (mdfld_get_panel_type(dev, pipe) == TMD_VID)
- config->video_mode = MDFLD_DSI_VIDEO_NON_BURST_MODE_SYNC_PULSE;
- else if (mdfld_get_panel_type(dev, pipe) == TC35876X)
- config->video_mode =
- MDFLD_DSI_VIDEO_NON_BURST_MODE_SYNC_EVENTS;
- else
- config->video_mode = MDFLD_DSI_VIDEO_BURST_MODE;
-
- return 0;
-}
-
-int mdfld_dsi_panel_reset(struct drm_device *ddev, int pipe)
-{
- struct device *dev = ddev->dev;
- struct gpio_desc *gpiod;
-
- /*
- * Raise the GPIO reset line for the corresponding pipe to HIGH,
- * this is probably because it is active low so this takes the
- * respective pipe out of reset. (We have no code to put it back
- * into reset in this driver.)
- */
- switch (pipe) {
- case 0:
- gpiod = gpiod_get(dev, "dsi-pipe0-reset", GPIOD_OUT_HIGH);
- if (IS_ERR(gpiod))
- return PTR_ERR(gpiod);
- break;
- case 2:
- gpiod = gpiod_get(dev, "dsi-pipe2-reset", GPIOD_OUT_HIGH);
- if (IS_ERR(gpiod))
- return PTR_ERR(gpiod);
- break;
- default:
- DRM_DEV_ERROR(dev, "Invalid output pipe\n");
- return -EINVAL;
- }
- gpiod_put(gpiod);
-
- /* Flush posted writes on the device */
- gpiod = gpiod_get(dev, "dsi-pipe0-reset", GPIOD_ASIS);
- if (IS_ERR(gpiod))
- return PTR_ERR(gpiod);
- gpiod_get_value(gpiod);
- gpiod_put(gpiod);
-
- return 0;
-}
-
-/*
- * MIPI output init
- * @dev drm device
- * @pipe pipe number. 0 or 2
- * @config
- *
- * Do the initialization of a MIPI output, including create DRM mode objects
- * initialization of DSI output on @pipe
- */
-void mdfld_dsi_output_init(struct drm_device *dev,
- int pipe,
- const struct panel_funcs *p_vid_funcs)
-{
- struct mdfld_dsi_config *dsi_config;
- struct mdfld_dsi_connector *dsi_connector;
- struct drm_connector *connector;
- struct mdfld_dsi_encoder *encoder;
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct panel_info dsi_panel_info;
- u32 width_mm, height_mm;
-
- dev_dbg(dev->dev, "init DSI output on pipe %d\n", pipe);
-
- if (pipe != 0 && pipe != 2) {
- DRM_ERROR("Invalid parameter\n");
- return;
- }
-
- /*create a new connector*/
- dsi_connector = kzalloc(sizeof(struct mdfld_dsi_connector), GFP_KERNEL);
- if (!dsi_connector) {
- DRM_ERROR("No memory");
- return;
- }
-
- dsi_connector->pipe = pipe;
-
- dsi_config = kzalloc(sizeof(struct mdfld_dsi_config),
- GFP_KERNEL);
- if (!dsi_config) {
- DRM_ERROR("cannot allocate memory for DSI config\n");
- goto dsi_init_err0;
- }
- mdfld_dsi_get_default_config(dev, dsi_config, pipe);
-
- dsi_connector->private = dsi_config;
-
- dsi_config->changed = 1;
- dsi_config->dev = dev;
-
- dsi_config->fixed_mode = p_vid_funcs->get_config_mode(dev);
- if (p_vid_funcs->get_panel_info(dev, pipe, &dsi_panel_info))
- goto dsi_init_err0;
-
- width_mm = dsi_panel_info.width_mm;
- height_mm = dsi_panel_info.height_mm;
-
- dsi_config->mode = dsi_config->fixed_mode;
- dsi_config->connector = dsi_connector;
-
- if (!dsi_config->fixed_mode) {
- DRM_ERROR("No panel fixed mode was found\n");
- goto dsi_init_err0;
- }
-
- if (pipe && dev_priv->dsi_configs[0]) {
- dsi_config->dvr_ic_inited = 0;
- dev_priv->dsi_configs[1] = dsi_config;
- } else if (pipe == 0) {
- dsi_config->dvr_ic_inited = 1;
- dev_priv->dsi_configs[0] = dsi_config;
- } else {
- DRM_ERROR("Trying to init MIPI1 before MIPI0\n");
- goto dsi_init_err0;
- }
-
-
- connector = &dsi_connector->base.base;
- dsi_connector->base.save = mdfld_dsi_connector_save;
- dsi_connector->base.restore = mdfld_dsi_connector_restore;
-
- drm_connector_init(dev, connector, &mdfld_dsi_connector_funcs,
- DRM_MODE_CONNECTOR_LVDS);
- drm_connector_helper_add(connector, &mdfld_dsi_connector_helper_funcs);
-
- connector->display_info.subpixel_order = SubPixelHorizontalRGB;
- connector->display_info.width_mm = width_mm;
- connector->display_info.height_mm = height_mm;
- connector->interlace_allowed = false;
- connector->doublescan_allowed = false;
-
- /*attach properties*/
- drm_object_attach_property(&connector->base,
- dev->mode_config.scaling_mode_property,
- DRM_MODE_SCALE_FULLSCREEN);
- drm_object_attach_property(&connector->base,
- dev_priv->backlight_property,
- MDFLD_DSI_BRIGHTNESS_MAX_LEVEL);
-
- /*init DSI package sender on this output*/
- if (mdfld_dsi_pkg_sender_init(dsi_connector, pipe)) {
- DRM_ERROR("Package Sender initialization failed on pipe %d\n",
- pipe);
- goto dsi_init_err0;
- }
-
- encoder = mdfld_dsi_dpi_init(dev, dsi_connector, p_vid_funcs);
- if (!encoder) {
- DRM_ERROR("Create DPI encoder failed\n");
- goto dsi_init_err1;
- }
- encoder->private = dsi_config;
- dsi_config->encoder = encoder;
- encoder->base.type = (pipe == 0) ? INTEL_OUTPUT_MIPI :
- INTEL_OUTPUT_MIPI2;
- drm_connector_register(connector);
- return;
-
- /*TODO: add code to destroy outputs on error*/
-dsi_init_err1:
- /*destroy sender*/
- mdfld_dsi_pkg_sender_destroy(dsi_connector->pkg_sender);
-
- drm_connector_cleanup(connector);
-
- kfree(dsi_config->fixed_mode);
- kfree(dsi_config);
-dsi_init_err0:
- kfree(dsi_connector);
-}
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.h b/drivers/gpu/drm/gma500/mdfld_dsi_output.h
deleted file mode 100644
index 5c0db3c2903f..000000000000
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.h
+++ /dev/null
@@ -1,377 +0,0 @@
-/*
- * Copyright © 2010 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * jim liu <jim.liu@intel.com>
- * Jackie Li<yaodong.li@intel.com>
- */
-
-#ifndef __MDFLD_DSI_OUTPUT_H__
-#define __MDFLD_DSI_OUTPUT_H__
-
-#include <linux/backlight.h>
-
-#include <asm/intel-mid.h>
-
-#include <drm/drm.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_edid.h>
-
-#include "mdfld_output.h"
-#include "psb_drv.h"
-#include "psb_intel_drv.h"
-#include "psb_intel_reg.h"
-
-#define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end))
-#define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end))
-#define FLD_GET(val, start, end) (((val) & FLD_MASK(start, end)) >> (end))
-#define FLD_MOD(orig, val, start, end) \
- (((orig) & ~FLD_MASK(start, end)) | FLD_VAL(val, start, end))
-
-#define REG_FLD_MOD(reg, val, start, end) \
- REG_WRITE(reg, FLD_MOD(REG_READ(reg), val, start, end))
-
-static inline int REGISTER_FLD_WAIT(struct drm_device *dev, u32 reg,
- u32 val, int start, int end)
-{
- int t = 100000;
-
- while (FLD_GET(REG_READ(reg), start, end) != val) {
- if (--t == 0)
- return 1;
- }
-
- return 0;
-}
-
-#define REG_FLD_WAIT(reg, val, start, end) \
- REGISTER_FLD_WAIT(dev, reg, val, start, end)
-
-#define REG_BIT_WAIT(reg, val, bitnum) \
- REGISTER_FLD_WAIT(dev, reg, val, bitnum, bitnum)
-
-#define MDFLD_DSI_BRIGHTNESS_MAX_LEVEL 100
-
-#ifdef DEBUG
-#define CHECK_PIPE(pipe) ({ \
- const typeof(pipe) __pipe = (pipe); \
- BUG_ON(__pipe != 0 && __pipe != 2); \
- __pipe; })
-#else
-#define CHECK_PIPE(pipe) (pipe)
-#endif
-
-/*
- * Actual MIPIA->MIPIC reg offset is 0x800, value 0x400 is valid for 0 and 2
- */
-#define REG_OFFSET(pipe) (CHECK_PIPE(pipe) * 0x400)
-
-/* mdfld DSI controller registers */
-#define MIPI_DEVICE_READY_REG(pipe) (0xb000 + REG_OFFSET(pipe))
-#define MIPI_INTR_STAT_REG(pipe) (0xb004 + REG_OFFSET(pipe))
-#define MIPI_INTR_EN_REG(pipe) (0xb008 + REG_OFFSET(pipe))
-#define MIPI_DSI_FUNC_PRG_REG(pipe) (0xb00c + REG_OFFSET(pipe))
-#define MIPI_HS_TX_TIMEOUT_REG(pipe) (0xb010 + REG_OFFSET(pipe))
-#define MIPI_LP_RX_TIMEOUT_REG(pipe) (0xb014 + REG_OFFSET(pipe))
-#define MIPI_TURN_AROUND_TIMEOUT_REG(pipe) (0xb018 + REG_OFFSET(pipe))
-#define MIPI_DEVICE_RESET_TIMER_REG(pipe) (0xb01c + REG_OFFSET(pipe))
-#define MIPI_DPI_RESOLUTION_REG(pipe) (0xb020 + REG_OFFSET(pipe))
-#define MIPI_DBI_FIFO_THROTTLE_REG(pipe) (0xb024 + REG_OFFSET(pipe))
-#define MIPI_HSYNC_COUNT_REG(pipe) (0xb028 + REG_OFFSET(pipe))
-#define MIPI_HBP_COUNT_REG(pipe) (0xb02c + REG_OFFSET(pipe))
-#define MIPI_HFP_COUNT_REG(pipe) (0xb030 + REG_OFFSET(pipe))
-#define MIPI_HACTIVE_COUNT_REG(pipe) (0xb034 + REG_OFFSET(pipe))
-#define MIPI_VSYNC_COUNT_REG(pipe) (0xb038 + REG_OFFSET(pipe))
-#define MIPI_VBP_COUNT_REG(pipe) (0xb03c + REG_OFFSET(pipe))
-#define MIPI_VFP_COUNT_REG(pipe) (0xb040 + REG_OFFSET(pipe))
-#define MIPI_HIGH_LOW_SWITCH_COUNT_REG(pipe) (0xb044 + REG_OFFSET(pipe))
-#define MIPI_DPI_CONTROL_REG(pipe) (0xb048 + REG_OFFSET(pipe))
-#define MIPI_DPI_DATA_REG(pipe) (0xb04c + REG_OFFSET(pipe))
-#define MIPI_INIT_COUNT_REG(pipe) (0xb050 + REG_OFFSET(pipe))
-#define MIPI_MAX_RETURN_PACK_SIZE_REG(pipe) (0xb054 + REG_OFFSET(pipe))
-#define MIPI_VIDEO_MODE_FORMAT_REG(pipe) (0xb058 + REG_OFFSET(pipe))
-#define MIPI_EOT_DISABLE_REG(pipe) (0xb05c + REG_OFFSET(pipe))
-#define MIPI_LP_BYTECLK_REG(pipe) (0xb060 + REG_OFFSET(pipe))
-#define MIPI_LP_GEN_DATA_REG(pipe) (0xb064 + REG_OFFSET(pipe))
-#define MIPI_HS_GEN_DATA_REG(pipe) (0xb068 + REG_OFFSET(pipe))
-#define MIPI_LP_GEN_CTRL_REG(pipe) (0xb06c + REG_OFFSET(pipe))
-#define MIPI_HS_GEN_CTRL_REG(pipe) (0xb070 + REG_OFFSET(pipe))
-#define MIPI_GEN_FIFO_STAT_REG(pipe) (0xb074 + REG_OFFSET(pipe))
-#define MIPI_HS_LS_DBI_ENABLE_REG(pipe) (0xb078 + REG_OFFSET(pipe))
-#define MIPI_DPHY_PARAM_REG(pipe) (0xb080 + REG_OFFSET(pipe))
-#define MIPI_DBI_BW_CTRL_REG(pipe) (0xb084 + REG_OFFSET(pipe))
-#define MIPI_CLK_LANE_SWITCH_TIME_CNT_REG(pipe) (0xb088 + REG_OFFSET(pipe))
-
-#define MIPI_CTRL_REG(pipe) (0xb104 + REG_OFFSET(pipe))
-#define MIPI_DATA_ADD_REG(pipe) (0xb108 + REG_OFFSET(pipe))
-#define MIPI_DATA_LEN_REG(pipe) (0xb10c + REG_OFFSET(pipe))
-#define MIPI_CMD_ADD_REG(pipe) (0xb110 + REG_OFFSET(pipe))
-#define MIPI_CMD_LEN_REG(pipe) (0xb114 + REG_OFFSET(pipe))
-
-/* non-uniform reg offset */
-#define MIPI_PORT_CONTROL(pipe) (CHECK_PIPE(pipe) ? MIPI_C : MIPI)
-
-#define DSI_DEVICE_READY (0x1)
-#define DSI_POWER_STATE_ULPS_ENTER (0x2 << 1)
-#define DSI_POWER_STATE_ULPS_EXIT (0x1 << 1)
-#define DSI_POWER_STATE_ULPS_OFFSET (0x1)
-
-
-#define DSI_ONE_DATA_LANE (0x1)
-#define DSI_TWO_DATA_LANE (0x2)
-#define DSI_THREE_DATA_LANE (0X3)
-#define DSI_FOUR_DATA_LANE (0x4)
-#define DSI_DPI_VIRT_CHANNEL_OFFSET (0x3)
-#define DSI_DBI_VIRT_CHANNEL_OFFSET (0x5)
-#define DSI_DPI_COLOR_FORMAT_RGB565 (0x01 << 7)
-#define DSI_DPI_COLOR_FORMAT_RGB666 (0x02 << 7)
-#define DSI_DPI_COLOR_FORMAT_RGB666_UNPACK (0x03 << 7)
-#define DSI_DPI_COLOR_FORMAT_RGB888 (0x04 << 7)
-#define DSI_DBI_COLOR_FORMAT_OPTION2 (0x05 << 13)
-
-#define DSI_INTR_STATE_RXSOTERROR BIT(0)
-
-#define DSI_INTR_STATE_SPL_PKG_SENT BIT(30)
-#define DSI_INTR_STATE_TE BIT(31)
-
-#define DSI_HS_TX_TIMEOUT_MASK (0xffffff)
-
-#define DSI_LP_RX_TIMEOUT_MASK (0xffffff)
-
-#define DSI_TURN_AROUND_TIMEOUT_MASK (0x3f)
-
-#define DSI_RESET_TIMER_MASK (0xffff)
-
-#define DSI_DBI_FIFO_WM_HALF (0x0)
-#define DSI_DBI_FIFO_WM_QUARTER (0x1)
-#define DSI_DBI_FIFO_WM_LOW (0x2)
-
-#define DSI_DPI_TIMING_MASK (0xffff)
-
-#define DSI_INIT_TIMER_MASK (0xffff)
-
-#define DSI_DBI_RETURN_PACK_SIZE_MASK (0x3ff)
-
-#define DSI_LP_BYTECLK_MASK (0x0ffff)
-
-#define DSI_HS_CTRL_GEN_SHORT_W0 (0x03)
-#define DSI_HS_CTRL_GEN_SHORT_W1 (0x13)
-#define DSI_HS_CTRL_GEN_SHORT_W2 (0x23)
-#define DSI_HS_CTRL_GEN_R0 (0x04)
-#define DSI_HS_CTRL_GEN_R1 (0x14)
-#define DSI_HS_CTRL_GEN_R2 (0x24)
-#define DSI_HS_CTRL_GEN_LONG_W (0x29)
-#define DSI_HS_CTRL_MCS_SHORT_W0 (0x05)
-#define DSI_HS_CTRL_MCS_SHORT_W1 (0x15)
-#define DSI_HS_CTRL_MCS_R0 (0x06)
-#define DSI_HS_CTRL_MCS_LONG_W (0x39)
-#define DSI_HS_CTRL_VC_OFFSET (0x06)
-#define DSI_HS_CTRL_WC_OFFSET (0x08)
-
-#define DSI_FIFO_GEN_HS_DATA_FULL BIT(0)
-#define DSI_FIFO_GEN_HS_DATA_HALF_EMPTY BIT(1)
-#define DSI_FIFO_GEN_HS_DATA_EMPTY BIT(2)
-#define DSI_FIFO_GEN_LP_DATA_FULL BIT(8)
-#define DSI_FIFO_GEN_LP_DATA_HALF_EMPTY BIT(9)
-#define DSI_FIFO_GEN_LP_DATA_EMPTY BIT(10)
-#define DSI_FIFO_GEN_HS_CTRL_FULL BIT(16)
-#define DSI_FIFO_GEN_HS_CTRL_HALF_EMPTY BIT(17)
-#define DSI_FIFO_GEN_HS_CTRL_EMPTY BIT(18)
-#define DSI_FIFO_GEN_LP_CTRL_FULL BIT(24)
-#define DSI_FIFO_GEN_LP_CTRL_HALF_EMPTY BIT(25)
-#define DSI_FIFO_GEN_LP_CTRL_EMPTY BIT(26)
-#define DSI_FIFO_DBI_EMPTY BIT(27)
-#define DSI_FIFO_DPI_EMPTY BIT(28)
-
-#define DSI_DBI_HS_LP_SWITCH_MASK (0x1)
-
-#define DSI_HS_LP_SWITCH_COUNTER_OFFSET (0x0)
-#define DSI_LP_HS_SWITCH_COUNTER_OFFSET (0x16)
-
-#define DSI_DPI_CTRL_HS_SHUTDOWN (0x00000001)
-#define DSI_DPI_CTRL_HS_TURN_ON (0x00000002)
-
-/*dsi power modes*/
-#define DSI_POWER_MODE_DISPLAY_ON BIT(2)
-#define DSI_POWER_MODE_NORMAL_ON BIT(3)
-#define DSI_POWER_MODE_SLEEP_OUT BIT(4)
-#define DSI_POWER_MODE_PARTIAL_ON BIT(5)
-#define DSI_POWER_MODE_IDLE_ON BIT(6)
-
-enum {
- MDFLD_DSI_VIDEO_NON_BURST_MODE_SYNC_PULSE = 1,
- MDFLD_DSI_VIDEO_NON_BURST_MODE_SYNC_EVENTS = 2,
- MDFLD_DSI_VIDEO_BURST_MODE = 3,
-};
-
-#define DSI_DPI_COMPLETE_LAST_LINE BIT(2)
-#define DSI_DPI_DISABLE_BTA BIT(3)
-
-struct mdfld_dsi_connector {
- struct gma_connector base;
-
- int pipe;
- void *private;
- void *pkg_sender;
-
- /* Connection status */
- enum drm_connector_status status;
-};
-
-struct mdfld_dsi_encoder {
- struct gma_encoder base;
- void *private;
-};
-
-/*
- * DSI config, consists of one DSI connector, two DSI encoders.
- * DRM will pick up on DSI encoder basing on differents configs.
- */
-struct mdfld_dsi_config {
- struct drm_device *dev;
- struct drm_display_mode *fixed_mode;
- struct drm_display_mode *mode;
-
- struct mdfld_dsi_connector *connector;
- struct mdfld_dsi_encoder *encoder;
-
- int changed;
-
- int bpp;
- int lane_count;
- /*Virtual channel number for this encoder*/
- int channel_num;
- /*video mode configure*/
- int video_mode;
-
- int dvr_ic_inited;
-};
-
-static inline struct mdfld_dsi_connector *mdfld_dsi_connector(
- struct drm_connector *connector)
-{
- struct gma_connector *gma_connector;
-
- gma_connector = to_gma_connector(connector);
-
- return container_of(gma_connector, struct mdfld_dsi_connector, base);
-}
-
-static inline struct mdfld_dsi_encoder *mdfld_dsi_encoder(
- struct drm_encoder *encoder)
-{
- struct gma_encoder *gma_encoder;
-
- gma_encoder = to_gma_encoder(encoder);
-
- return container_of(gma_encoder, struct mdfld_dsi_encoder, base);
-}
-
-static inline struct mdfld_dsi_config *
- mdfld_dsi_get_config(struct mdfld_dsi_connector *connector)
-{
- if (!connector)
- return NULL;
- return (struct mdfld_dsi_config *)connector->private;
-}
-
-static inline void *mdfld_dsi_get_pkg_sender(struct mdfld_dsi_config *config)
-{
- struct mdfld_dsi_connector *dsi_connector;
-
- if (!config)
- return NULL;
-
- dsi_connector = config->connector;
-
- if (!dsi_connector)
- return NULL;
-
- return dsi_connector->pkg_sender;
-}
-
-static inline struct mdfld_dsi_config *
- mdfld_dsi_encoder_get_config(struct mdfld_dsi_encoder *encoder)
-{
- if (!encoder)
- return NULL;
- return (struct mdfld_dsi_config *)encoder->private;
-}
-
-static inline struct mdfld_dsi_connector *
- mdfld_dsi_encoder_get_connector(struct mdfld_dsi_encoder *encoder)
-{
- struct mdfld_dsi_config *config;
-
- if (!encoder)
- return NULL;
-
- config = mdfld_dsi_encoder_get_config(encoder);
- if (!config)
- return NULL;
-
- return config->connector;
-}
-
-static inline void *mdfld_dsi_encoder_get_pkg_sender(
- struct mdfld_dsi_encoder *encoder)
-{
- struct mdfld_dsi_config *dsi_config;
-
- dsi_config = mdfld_dsi_encoder_get_config(encoder);
- if (!dsi_config)
- return NULL;
-
- return mdfld_dsi_get_pkg_sender(dsi_config);
-}
-
-static inline int mdfld_dsi_encoder_get_pipe(struct mdfld_dsi_encoder *encoder)
-{
- struct mdfld_dsi_connector *connector;
-
- if (!encoder)
- return -1;
-
- connector = mdfld_dsi_encoder_get_connector(encoder);
- if (!connector)
- return -1;
- return connector->pipe;
-}
-
-/* Export functions */
-extern void mdfld_dsi_gen_fifo_ready(struct drm_device *dev,
- u32 gen_fifo_stat_reg, u32 fifo_stat);
-extern void mdfld_dsi_brightness_init(struct mdfld_dsi_config *dsi_config,
- int pipe);
-extern void mdfld_dsi_brightness_control(struct drm_device *dev, int pipe,
- int level);
-extern void mdfld_dsi_output_init(struct drm_device *dev,
- int pipe,
- const struct panel_funcs *p_vid_funcs);
-extern void mdfld_dsi_controller_init(struct mdfld_dsi_config *dsi_config,
- int pipe);
-
-extern int mdfld_dsi_get_power_mode(struct mdfld_dsi_config *dsi_config,
- u32 *mode, bool hs);
-extern int mdfld_dsi_panel_reset(struct drm_device *dev, int pipe);
-
-#endif /*__MDFLD_DSI_OUTPUT_H__*/
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
deleted file mode 100644
index 6e0de83e9f7d..000000000000
--- a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
+++ /dev/null
@@ -1,679 +0,0 @@
-/*
- * Copyright © 2010 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Jackie Li<yaodong.li@intel.com>
- */
-
-#include <linux/delay.h>
-#include <linux/freezer.h>
-
-#include <video/mipi_display.h>
-
-#include "mdfld_dsi_dpi.h"
-#include "mdfld_dsi_output.h"
-#include "mdfld_dsi_pkg_sender.h"
-
-#define MDFLD_DSI_READ_MAX_COUNT 5000
-
-enum {
- MDFLD_DSI_PANEL_MODE_SLEEP = 0x1,
-};
-
-enum {
- MDFLD_DSI_PKG_SENDER_FREE = 0x0,
- MDFLD_DSI_PKG_SENDER_BUSY = 0x1,
-};
-
-static const char *const dsi_errors[] = {
- "RX SOT Error",
- "RX SOT Sync Error",
- "RX EOT Sync Error",
- "RX Escape Mode Entry Error",
- "RX LP TX Sync Error",
- "RX HS Receive Timeout Error",
- "RX False Control Error",
- "RX ECC Single Bit Error",
- "RX ECC Multibit Error",
- "RX Checksum Error",
- "RX DSI Data Type Not Recognised",
- "RX DSI VC ID Invalid",
- "TX False Control Error",
- "TX ECC Single Bit Error",
- "TX ECC Multibit Error",
- "TX Checksum Error",
- "TX DSI Data Type Not Recognised",
- "TX DSI VC ID invalid",
- "High Contention",
- "Low contention",
- "DPI FIFO Under run",
- "HS TX Timeout",
- "LP RX Timeout",
- "Turn Around ACK Timeout",
- "ACK With No Error",
- "RX Invalid TX Length",
- "RX Prot Violation",
- "HS Generic Write FIFO Full",
- "LP Generic Write FIFO Full",
- "Generic Read Data Avail",
- "Special Packet Sent",
- "Tearing Effect",
-};
-
-static inline int wait_for_gen_fifo_empty(struct mdfld_dsi_pkg_sender *sender,
- u32 mask)
-{
- struct drm_device *dev = sender->dev;
- u32 gen_fifo_stat_reg = sender->mipi_gen_fifo_stat_reg;
- int retry = 0xffff;
-
- while (retry--) {
- if ((mask & REG_READ(gen_fifo_stat_reg)) == mask)
- return 0;
- udelay(100);
- }
- DRM_ERROR("fifo is NOT empty 0x%08x\n", REG_READ(gen_fifo_stat_reg));
- return -EIO;
-}
-
-static int wait_for_all_fifos_empty(struct mdfld_dsi_pkg_sender *sender)
-{
- return wait_for_gen_fifo_empty(sender, (BIT(2) | BIT(10) | BIT(18) |
- BIT(26) | BIT(27) | BIT(28)));
-}
-
-static int wait_for_lp_fifos_empty(struct mdfld_dsi_pkg_sender *sender)
-{
- return wait_for_gen_fifo_empty(sender, (BIT(10) | BIT(26)));
-}
-
-static int wait_for_hs_fifos_empty(struct mdfld_dsi_pkg_sender *sender)
-{
- return wait_for_gen_fifo_empty(sender, (BIT(2) | BIT(18)));
-}
-
-static int handle_dsi_error(struct mdfld_dsi_pkg_sender *sender, u32 mask)
-{
- u32 intr_stat_reg = sender->mipi_intr_stat_reg;
- struct drm_device *dev = sender->dev;
-
- dev_dbg(sender->dev->dev, "Handling error 0x%08x\n", mask);
-
- switch (mask) {
- case BIT(0):
- case BIT(1):
- case BIT(2):
- case BIT(3):
- case BIT(4):
- case BIT(5):
- case BIT(6):
- case BIT(7):
- case BIT(8):
- case BIT(9):
- case BIT(10):
- case BIT(11):
- case BIT(12):
- case BIT(13):
- dev_dbg(sender->dev->dev, "No Action required\n");
- break;
- case BIT(14):
- /*wait for all fifo empty*/
- /*wait_for_all_fifos_empty(sender)*/
- break;
- case BIT(15):
- dev_dbg(sender->dev->dev, "No Action required\n");
- break;
- case BIT(16):
- break;
- case BIT(17):
- break;
- case BIT(18):
- case BIT(19):
- dev_dbg(sender->dev->dev, "High/Low contention detected\n");
- /*wait for contention recovery time*/
- /*mdelay(10);*/
- /*wait for all fifo empty*/
- if (0)
- wait_for_all_fifos_empty(sender);
- break;
- case BIT(20):
- dev_dbg(sender->dev->dev, "No Action required\n");
- break;
- case BIT(21):
- /*wait for all fifo empty*/
- /*wait_for_all_fifos_empty(sender);*/
- break;
- case BIT(22):
- break;
- case BIT(23):
- case BIT(24):
- case BIT(25):
- case BIT(26):
- case BIT(27):
- dev_dbg(sender->dev->dev, "HS Gen fifo full\n");
- REG_WRITE(intr_stat_reg, mask);
- wait_for_hs_fifos_empty(sender);
- break;
- case BIT(28):
- dev_dbg(sender->dev->dev, "LP Gen fifo full\n");
- REG_WRITE(intr_stat_reg, mask);
- wait_for_lp_fifos_empty(sender);
- break;
- case BIT(29):
- case BIT(30):
- case BIT(31):
- dev_dbg(sender->dev->dev, "No Action required\n");
- break;
- }
-
- if (mask & REG_READ(intr_stat_reg))
- dev_dbg(sender->dev->dev,
- "Cannot clean interrupt 0x%08x\n", mask);
- return 0;
-}
-
-static int dsi_error_handler(struct mdfld_dsi_pkg_sender *sender)
-{
- struct drm_device *dev = sender->dev;
- u32 intr_stat_reg = sender->mipi_intr_stat_reg;
- u32 mask;
- u32 intr_stat;
- int i;
- int err = 0;
-
- intr_stat = REG_READ(intr_stat_reg);
-
- for (i = 0; i < 32; i++) {
- mask = (0x00000001UL) << i;
- if (intr_stat & mask) {
- dev_dbg(sender->dev->dev, "[DSI]: %s\n", dsi_errors[i]);
- err = handle_dsi_error(sender, mask);
- if (err)
- DRM_ERROR("Cannot handle error\n");
- }
- }
- return err;
-}
-
-static int send_short_pkg(struct mdfld_dsi_pkg_sender *sender, u8 data_type,
- u8 cmd, u8 param, bool hs)
-{
- struct drm_device *dev = sender->dev;
- u32 ctrl_reg;
- u32 val;
- u8 virtual_channel = 0;
-
- if (hs) {
- ctrl_reg = sender->mipi_hs_gen_ctrl_reg;
-
- /* FIXME: wait_for_hs_fifos_empty(sender); */
- } else {
- ctrl_reg = sender->mipi_lp_gen_ctrl_reg;
-
- /* FIXME: wait_for_lp_fifos_empty(sender); */
- }
-
- val = FLD_VAL(param, 23, 16) | FLD_VAL(cmd, 15, 8) |
- FLD_VAL(virtual_channel, 7, 6) | FLD_VAL(data_type, 5, 0);
-
- REG_WRITE(ctrl_reg, val);
-
- return 0;
-}
-
-static int send_long_pkg(struct mdfld_dsi_pkg_sender *sender, u8 data_type,
- u8 *data, int len, bool hs)
-{
- struct drm_device *dev = sender->dev;
- u32 ctrl_reg;
- u32 data_reg;
- u32 val;
- u8 *p;
- u8 b1, b2, b3, b4;
- u8 virtual_channel = 0;
- int i;
-
- if (hs) {
- ctrl_reg = sender->mipi_hs_gen_ctrl_reg;
- data_reg = sender->mipi_hs_gen_data_reg;
-
- /* FIXME: wait_for_hs_fifos_empty(sender); */
- } else {
- ctrl_reg = sender->mipi_lp_gen_ctrl_reg;
- data_reg = sender->mipi_lp_gen_data_reg;
-
- /* FIXME: wait_for_lp_fifos_empty(sender); */
- }
-
- p = data;
- for (i = 0; i < len / 4; i++) {
- b1 = *p++;
- b2 = *p++;
- b3 = *p++;
- b4 = *p++;
-
- REG_WRITE(data_reg, b4 << 24 | b3 << 16 | b2 << 8 | b1);
- }
-
- i = len % 4;
- if (i) {
- b1 = 0; b2 = 0; b3 = 0;
-
- switch (i) {
- case 3:
- b1 = *p++;
- b2 = *p++;
- b3 = *p++;
- break;
- case 2:
- b1 = *p++;
- b2 = *p++;
- break;
- case 1:
- b1 = *p++;
- break;
- }
-
- REG_WRITE(data_reg, b3 << 16 | b2 << 8 | b1);
- }
-
- val = FLD_VAL(len, 23, 8) | FLD_VAL(virtual_channel, 7, 6) |
- FLD_VAL(data_type, 5, 0);
-
- REG_WRITE(ctrl_reg, val);
-
- return 0;
-}
-
-static int send_pkg_prepare(struct mdfld_dsi_pkg_sender *sender, u8 data_type,
- u8 *data, u16 len)
-{
- u8 cmd;
-
- switch (data_type) {
- case MIPI_DSI_DCS_SHORT_WRITE:
- case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
- case MIPI_DSI_DCS_LONG_WRITE:
- cmd = *data;
- break;
- default:
- return 0;
- }
-
- /*this prevents other package sending while doing msleep*/
- sender->status = MDFLD_DSI_PKG_SENDER_BUSY;
-
- /*wait for 120 milliseconds in case exit_sleep_mode just be sent*/
- if (unlikely(cmd == MIPI_DCS_ENTER_SLEEP_MODE)) {
- /*TODO: replace it with msleep later*/
- mdelay(120);
- }
-
- if (unlikely(cmd == MIPI_DCS_EXIT_SLEEP_MODE)) {
- /*TODO: replace it with msleep later*/
- mdelay(120);
- }
- return 0;
-}
-
-static int send_pkg_done(struct mdfld_dsi_pkg_sender *sender, u8 data_type,
- u8 *data, u16 len)
-{
- u8 cmd;
-
- switch (data_type) {
- case MIPI_DSI_DCS_SHORT_WRITE:
- case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
- case MIPI_DSI_DCS_LONG_WRITE:
- cmd = *data;
- break;
- default:
- return 0;
- }
-
- /*update panel status*/
- if (unlikely(cmd == MIPI_DCS_ENTER_SLEEP_MODE)) {
- sender->panel_mode |= MDFLD_DSI_PANEL_MODE_SLEEP;
- /*TODO: replace it with msleep later*/
- mdelay(120);
- } else if (unlikely(cmd == MIPI_DCS_EXIT_SLEEP_MODE)) {
- sender->panel_mode &= ~MDFLD_DSI_PANEL_MODE_SLEEP;
- /*TODO: replace it with msleep later*/
- mdelay(120);
- } else if (unlikely(cmd == MIPI_DCS_SOFT_RESET)) {
- /*TODO: replace it with msleep later*/
- mdelay(5);
- }
-
- sender->status = MDFLD_DSI_PKG_SENDER_FREE;
-
- return 0;
-}
-
-static int send_pkg(struct mdfld_dsi_pkg_sender *sender, u8 data_type,
- u8 *data, u16 len, bool hs)
-{
- int ret;
-
- /*handle DSI error*/
- ret = dsi_error_handler(sender);
- if (ret) {
- DRM_ERROR("Error handling failed\n");
- return -EAGAIN;
- }
-
- /* send pkg */
- if (sender->status == MDFLD_DSI_PKG_SENDER_BUSY) {
- DRM_ERROR("sender is busy\n");
- return -EAGAIN;
- }
-
- ret = send_pkg_prepare(sender, data_type, data, len);
- if (ret) {
- DRM_ERROR("send_pkg_prepare error\n");
- return ret;
- }
-
- switch (data_type) {
- case MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM:
- case MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM:
- case MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM:
- case MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM:
- case MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM:
- case MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM:
- case MIPI_DSI_DCS_SHORT_WRITE:
- case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
- case MIPI_DSI_DCS_READ:
- ret = send_short_pkg(sender, data_type, data[0], data[1], hs);
- break;
- case MIPI_DSI_GENERIC_LONG_WRITE:
- case MIPI_DSI_DCS_LONG_WRITE:
- ret = send_long_pkg(sender, data_type, data, len, hs);
- break;
- }
-
- send_pkg_done(sender, data_type, data, len);
-
- /*FIXME: should I query complete and fifo empty here?*/
-
- return ret;
-}
-
-int mdfld_dsi_send_mcs_long(struct mdfld_dsi_pkg_sender *sender, u8 *data,
- u32 len, bool hs)
-{
- unsigned long flags;
-
- if (!sender || !data || !len) {
- DRM_ERROR("Invalid parameters\n");
- return -EINVAL;
- }
-
- spin_lock_irqsave(&sender->lock, flags);
- send_pkg(sender, MIPI_DSI_DCS_LONG_WRITE, data, len, hs);
- spin_unlock_irqrestore(&sender->lock, flags);
-
- return 0;
-}
-
-int mdfld_dsi_send_mcs_short(struct mdfld_dsi_pkg_sender *sender, u8 cmd,
- u8 param, u8 param_num, bool hs)
-{
- u8 data[2];
- unsigned long flags;
- u8 data_type;
-
- if (!sender) {
- DRM_ERROR("Invalid parameter\n");
- return -EINVAL;
- }
-
- data[0] = cmd;
-
- if (param_num) {
- data_type = MIPI_DSI_DCS_SHORT_WRITE_PARAM;
- data[1] = param;
- } else {
- data_type = MIPI_DSI_DCS_SHORT_WRITE;
- data[1] = 0;
- }
-
- spin_lock_irqsave(&sender->lock, flags);
- send_pkg(sender, data_type, data, sizeof(data), hs);
- spin_unlock_irqrestore(&sender->lock, flags);
-
- return 0;
-}
-
-int mdfld_dsi_send_gen_short(struct mdfld_dsi_pkg_sender *sender, u8 param0,
- u8 param1, u8 param_num, bool hs)
-{
- u8 data[2];
- unsigned long flags;
- u8 data_type;
-
- if (!sender || param_num > 2) {
- DRM_ERROR("Invalid parameter\n");
- return -EINVAL;
- }
-
- switch (param_num) {
- case 0:
- data_type = MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM;
- data[0] = 0;
- data[1] = 0;
- break;
- case 1:
- data_type = MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM;
- data[0] = param0;
- data[1] = 0;
- break;
- case 2:
- data_type = MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM;
- data[0] = param0;
- data[1] = param1;
- break;
- }
-
- spin_lock_irqsave(&sender->lock, flags);
- send_pkg(sender, data_type, data, sizeof(data), hs);
- spin_unlock_irqrestore(&sender->lock, flags);
-
- return 0;
-}
-
-int mdfld_dsi_send_gen_long(struct mdfld_dsi_pkg_sender *sender, u8 *data,
- u32 len, bool hs)
-{
- unsigned long flags;
-
- if (!sender || !data || !len) {
- DRM_ERROR("Invalid parameters\n");
- return -EINVAL;
- }
-
- spin_lock_irqsave(&sender->lock, flags);
- send_pkg(sender, MIPI_DSI_GENERIC_LONG_WRITE, data, len, hs);
- spin_unlock_irqrestore(&sender->lock, flags);
-
- return 0;
-}
-
-static int __read_panel_data(struct mdfld_dsi_pkg_sender *sender, u8 data_type,
- u8 *data, u16 len, u32 *data_out, u16 len_out, bool hs)
-{
- unsigned long flags;
- struct drm_device *dev;
- int i;
- u32 gen_data_reg;
- int retry = MDFLD_DSI_READ_MAX_COUNT;
-
- if (!sender || !data_out || !len_out) {
- DRM_ERROR("Invalid parameters\n");
- return -EINVAL;
- }
-
- dev = sender->dev;
-
- /**
- * do reading.
- * 0) send out generic read request
- * 1) polling read data avail interrupt
- * 2) read data
- */
- spin_lock_irqsave(&sender->lock, flags);
-
- REG_WRITE(sender->mipi_intr_stat_reg, BIT(29));
-
- if ((REG_READ(sender->mipi_intr_stat_reg) & BIT(29)))
- DRM_ERROR("Can NOT clean read data valid interrupt\n");
-
- /*send out read request*/
- send_pkg(sender, data_type, data, len, hs);
-
- /*polling read data avail interrupt*/
- while (retry && !(REG_READ(sender->mipi_intr_stat_reg) & BIT(29))) {
- udelay(100);
- retry--;
- }
-
- if (!retry) {
- spin_unlock_irqrestore(&sender->lock, flags);
- return -ETIMEDOUT;
- }
-
- REG_WRITE(sender->mipi_intr_stat_reg, BIT(29));
-
- /*read data*/
- if (hs)
- gen_data_reg = sender->mipi_hs_gen_data_reg;
- else
- gen_data_reg = sender->mipi_lp_gen_data_reg;
-
- for (i = 0; i < len_out; i++)
- *(data_out + i) = REG_READ(gen_data_reg);
-
- spin_unlock_irqrestore(&sender->lock, flags);
-
- return 0;
-}
-
-int mdfld_dsi_read_mcs(struct mdfld_dsi_pkg_sender *sender, u8 cmd,
- u32 *data, u16 len, bool hs)
-{
- if (!sender || !data || !len) {
- DRM_ERROR("Invalid parameters\n");
- return -EINVAL;
- }
-
- return __read_panel_data(sender, MIPI_DSI_DCS_READ, &cmd, 1,
- data, len, hs);
-}
-
-int mdfld_dsi_pkg_sender_init(struct mdfld_dsi_connector *dsi_connector,
- int pipe)
-{
- struct mdfld_dsi_pkg_sender *pkg_sender;
- struct mdfld_dsi_config *dsi_config =
- mdfld_dsi_get_config(dsi_connector);
- struct drm_device *dev = dsi_config->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
- const struct psb_offset *map = &dev_priv->regmap[pipe];
- u32 mipi_val = 0;
-
- if (!dsi_connector) {
- DRM_ERROR("Invalid parameter\n");
- return -EINVAL;
- }
-
- pkg_sender = dsi_connector->pkg_sender;
-
- if (!pkg_sender || IS_ERR(pkg_sender)) {
- pkg_sender = kzalloc(sizeof(struct mdfld_dsi_pkg_sender),
- GFP_KERNEL);
- if (!pkg_sender) {
- DRM_ERROR("Create DSI pkg sender failed\n");
- return -ENOMEM;
- }
- dsi_connector->pkg_sender = (void *)pkg_sender;
- }
-
- pkg_sender->dev = dev;
- pkg_sender->dsi_connector = dsi_connector;
- pkg_sender->pipe = pipe;
- pkg_sender->pkg_num = 0;
- pkg_sender->panel_mode = 0;
- pkg_sender->status = MDFLD_DSI_PKG_SENDER_FREE;
-
- /*init regs*/
- /* FIXME: should just copy the regmap ptr ? */
- pkg_sender->dpll_reg = map->dpll;
- pkg_sender->dspcntr_reg = map->cntr;
- pkg_sender->pipeconf_reg = map->conf;
- pkg_sender->dsplinoff_reg = map->linoff;
- pkg_sender->dspsurf_reg = map->surf;
- pkg_sender->pipestat_reg = map->status;
-
- pkg_sender->mipi_intr_stat_reg = MIPI_INTR_STAT_REG(pipe);
- pkg_sender->mipi_lp_gen_data_reg = MIPI_LP_GEN_DATA_REG(pipe);
- pkg_sender->mipi_hs_gen_data_reg = MIPI_HS_GEN_DATA_REG(pipe);
- pkg_sender->mipi_lp_gen_ctrl_reg = MIPI_LP_GEN_CTRL_REG(pipe);
- pkg_sender->mipi_hs_gen_ctrl_reg = MIPI_HS_GEN_CTRL_REG(pipe);
- pkg_sender->mipi_gen_fifo_stat_reg = MIPI_GEN_FIFO_STAT_REG(pipe);
- pkg_sender->mipi_data_addr_reg = MIPI_DATA_ADD_REG(pipe);
- pkg_sender->mipi_data_len_reg = MIPI_DATA_LEN_REG(pipe);
- pkg_sender->mipi_cmd_addr_reg = MIPI_CMD_ADD_REG(pipe);
- pkg_sender->mipi_cmd_len_reg = MIPI_CMD_LEN_REG(pipe);
-
- /*init lock*/
- spin_lock_init(&pkg_sender->lock);
-
- if (mdfld_get_panel_type(dev, pipe) != TC35876X) {
- /**
- * For video mode, don't enable DPI timing output here,
- * will init the DPI timing output during mode setting.
- */
- mipi_val = PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX;
-
- if (pipe == 0)
- mipi_val |= 0x2;
-
- REG_WRITE(MIPI_PORT_CONTROL(pipe), mipi_val);
- REG_READ(MIPI_PORT_CONTROL(pipe));
-
- /* do dsi controller init */
- mdfld_dsi_controller_init(dsi_config, pipe);
- }
-
- return 0;
-}
-
-void mdfld_dsi_pkg_sender_destroy(struct mdfld_dsi_pkg_sender *sender)
-{
- if (!sender || IS_ERR(sender))
- return;
-
- /*free*/
- kfree(sender);
-}
-
-
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.h b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.h
deleted file mode 100644
index 0478a21c15d5..000000000000
--- a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.h
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Copyright © 2010 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Jackie Li<yaodong.li@intel.com>
- */
-#ifndef __MDFLD_DSI_PKG_SENDER_H__
-#define __MDFLD_DSI_PKG_SENDER_H__
-
-#include <linux/kthread.h>
-
-#define MDFLD_MAX_DCS_PARAM 8
-
-struct mdfld_dsi_pkg_sender {
- struct drm_device *dev;
- struct mdfld_dsi_connector *dsi_connector;
- u32 status;
- u32 panel_mode;
-
- int pipe;
-
- spinlock_t lock;
-
- u32 pkg_num;
-
- /* Registers */
- u32 dpll_reg;
- u32 dspcntr_reg;
- u32 pipeconf_reg;
- u32 pipestat_reg;
- u32 dsplinoff_reg;
- u32 dspsurf_reg;
-
- u32 mipi_intr_stat_reg;
- u32 mipi_lp_gen_data_reg;
- u32 mipi_hs_gen_data_reg;
- u32 mipi_lp_gen_ctrl_reg;
- u32 mipi_hs_gen_ctrl_reg;
- u32 mipi_gen_fifo_stat_reg;
- u32 mipi_data_addr_reg;
- u32 mipi_data_len_reg;
- u32 mipi_cmd_addr_reg;
- u32 mipi_cmd_len_reg;
-};
-
-extern int mdfld_dsi_pkg_sender_init(struct mdfld_dsi_connector *dsi_connector,
- int pipe);
-extern void mdfld_dsi_pkg_sender_destroy(struct mdfld_dsi_pkg_sender *sender);
-int mdfld_dsi_send_mcs_short(struct mdfld_dsi_pkg_sender *sender, u8 cmd,
- u8 param, u8 param_num, bool hs);
-int mdfld_dsi_send_mcs_long(struct mdfld_dsi_pkg_sender *sender, u8 *data,
- u32 len, bool hs);
-int mdfld_dsi_send_gen_short(struct mdfld_dsi_pkg_sender *sender, u8 param0,
- u8 param1, u8 param_num, bool hs);
-int mdfld_dsi_send_gen_long(struct mdfld_dsi_pkg_sender *sender, u8 *data,
- u32 len, bool hs);
-/* Read interfaces */
-int mdfld_dsi_read_mcs(struct mdfld_dsi_pkg_sender *sender, u8 cmd,
- u32 *data, u16 len, bool hs);
-
-#endif
diff --git a/drivers/gpu/drm/gma500/mdfld_intel_display.c b/drivers/gpu/drm/gma500/mdfld_intel_display.c
deleted file mode 100644
index aae2d358364c..000000000000
--- a/drivers/gpu/drm/gma500/mdfld_intel_display.c
+++ /dev/null
@@ -1,966 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright © 2006-2007 Intel Corporation
- *
- * Authors:
- * Eric Anholt <eric@anholt.net>
- */
-
-#include <linux/delay.h>
-#include <linux/i2c.h>
-#include <linux/pm_runtime.h>
-
-#include <drm/drm_crtc.h>
-#include <drm/drm_fourcc.h>
-
-#include "framebuffer.h"
-#include "gma_display.h"
-#include "mdfld_dsi_output.h"
-#include "mdfld_output.h"
-#include "psb_intel_reg.h"
-
-/* Hardcoded currently */
-static int ksel = KSEL_CRYSTAL_19;
-
-struct psb_intel_range_t {
- int min, max;
-};
-
-struct mrst_limit_t {
- struct psb_intel_range_t dot, m, p1;
-};
-
-struct mrst_clock_t {
- /* derived values */
- int dot;
- int m;
- int p1;
-};
-
-#define COUNT_MAX 0x10000000
-
-void mdfldWaitForPipeDisable(struct drm_device *dev, int pipe)
-{
- struct drm_psb_private *dev_priv = dev->dev_private;
- const struct psb_offset *map = &dev_priv->regmap[pipe];
- int count, temp;
-
- switch (pipe) {
- case 0:
- case 1:
- case 2:
- break;
- default:
- DRM_ERROR("Illegal Pipe Number.\n");
- return;
- }
-
- /* FIXME JLIU7_PO */
- gma_wait_for_vblank(dev);
- return;
-
- /* Wait for for the pipe disable to take effect. */
- for (count = 0; count < COUNT_MAX; count++) {
- temp = REG_READ(map->conf);
- if ((temp & PIPEACONF_PIPE_STATE) == 0)
- break;
- }
-}
-
-void mdfldWaitForPipeEnable(struct drm_device *dev, int pipe)
-{
- struct drm_psb_private *dev_priv = dev->dev_private;
- const struct psb_offset *map = &dev_priv->regmap[pipe];
- int count, temp;
-
- switch (pipe) {
- case 0:
- case 1:
- case 2:
- break;
- default:
- DRM_ERROR("Illegal Pipe Number.\n");
- return;
- }
-
- /* FIXME JLIU7_PO */
- gma_wait_for_vblank(dev);
- return;
-
- /* Wait for for the pipe enable to take effect. */
- for (count = 0; count < COUNT_MAX; count++) {
- temp = REG_READ(map->conf);
- if (temp & PIPEACONF_PIPE_STATE)
- break;
- }
-}
-
-/**
- * Return the pipe currently connected to the panel fitter,
- * or -1 if the panel fitter is not present or not in use
- */
-static int psb_intel_panel_fitter_pipe(struct drm_device *dev)
-{
- u32 pfit_control;
-
- pfit_control = REG_READ(PFIT_CONTROL);
-
- /* See if the panel fitter is in use */
- if ((pfit_control & PFIT_ENABLE) == 0)
- return -1;
-
- /* 965 can place panel fitter on either pipe */
- return (pfit_control >> 29) & 0x3;
-}
-
-static int check_fb(struct drm_framebuffer *fb)
-{
- if (!fb)
- return 0;
-
- switch (fb->format->cpp[0] * 8) {
- case 8:
- case 16:
- case 24:
- case 32:
- return 0;
- default:
- DRM_ERROR("Unknown color depth\n");
- return -EINVAL;
- }
-}
-
-static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct drm_framebuffer *fb = crtc->primary->fb;
- struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
- int pipe = gma_crtc->pipe;
- const struct psb_offset *map = &dev_priv->regmap[pipe];
- unsigned long start, offset;
- u32 dspcntr;
- int ret;
-
- dev_dbg(dev->dev, "pipe = 0x%x.\n", pipe);
-
- /* no fb bound */
- if (!fb) {
- dev_dbg(dev->dev, "No FB bound\n");
- return 0;
- }
-
- ret = check_fb(fb);
- if (ret)
- return ret;
-
- if (pipe > 2) {
- DRM_ERROR("Illegal Pipe Number.\n");
- return -EINVAL;
- }
-
- if (!gma_power_begin(dev, true))
- return 0;
-
- start = to_gtt_range(fb->obj[0])->offset;
- offset = y * fb->pitches[0] + x * fb->format->cpp[0];
-
- REG_WRITE(map->stride, fb->pitches[0]);
- dspcntr = REG_READ(map->cntr);
- dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
-
- switch (fb->format->cpp[0] * 8) {
- case 8:
- dspcntr |= DISPPLANE_8BPP;
- break;
- case 16:
- if (fb->format->depth == 15)
- dspcntr |= DISPPLANE_15_16BPP;
- else
- dspcntr |= DISPPLANE_16BPP;
- break;
- case 24:
- case 32:
- dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
- break;
- }
- REG_WRITE(map->cntr, dspcntr);
-
- dev_dbg(dev->dev, "Writing base %08lX %08lX %d %d\n",
- start, offset, x, y);
- REG_WRITE(map->linoff, offset);
- REG_READ(map->linoff);
- REG_WRITE(map->surf, start);
- REG_READ(map->surf);
-
- gma_power_end(dev);
-
- return 0;
-}
-
-/*
- * Disable the pipe, plane and pll.
- *
- */
-void mdfld_disable_crtc(struct drm_device *dev, int pipe)
-{
- struct drm_psb_private *dev_priv = dev->dev_private;
- const struct psb_offset *map = &dev_priv->regmap[pipe];
- u32 temp;
-
- dev_dbg(dev->dev, "pipe = %d\n", pipe);
-
-
- if (pipe != 1)
- mdfld_dsi_gen_fifo_ready(dev, MIPI_GEN_FIFO_STAT_REG(pipe),
- HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY);
-
- /* Disable display plane */
- temp = REG_READ(map->cntr);
- if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
- REG_WRITE(map->cntr,
- temp & ~DISPLAY_PLANE_ENABLE);
- /* Flush the plane changes */
- REG_WRITE(map->base, REG_READ(map->base));
- REG_READ(map->base);
- }
-
- /* FIXME_JLIU7 MDFLD_PO revisit */
-
- /* Next, disable display pipes */
- temp = REG_READ(map->conf);
- if ((temp & PIPEACONF_ENABLE) != 0) {
- temp &= ~PIPEACONF_ENABLE;
- temp |= PIPECONF_PLANE_OFF | PIPECONF_CURSOR_OFF;
- REG_WRITE(map->conf, temp);
- REG_READ(map->conf);
-
- /* Wait for for the pipe disable to take effect. */
- mdfldWaitForPipeDisable(dev, pipe);
- }
-
- temp = REG_READ(map->dpll);
- if (temp & DPLL_VCO_ENABLE) {
- if ((pipe != 1 &&
- !((REG_READ(PIPEACONF) | REG_READ(PIPECCONF))
- & PIPEACONF_ENABLE)) || pipe == 1) {
- temp &= ~(DPLL_VCO_ENABLE);
- REG_WRITE(map->dpll, temp);
- REG_READ(map->dpll);
- /* Wait for the clocks to turn off. */
- /* FIXME_MDFLD PO may need more delay */
- udelay(500);
-
- if (!(temp & MDFLD_PWR_GATE_EN)) {
- /* gating power of DPLL */
- REG_WRITE(map->dpll, temp | MDFLD_PWR_GATE_EN);
- /* FIXME_MDFLD PO - change 500 to 1 after PO */
- udelay(5000);
- }
- }
- }
-
-}
-
-/**
- * Sets the power management mode of the pipe and plane.
- *
- * This code should probably grow support for turning the cursor off and back
- * on appropriately at the same time as we're turning the pipe off/on.
- */
-static void mdfld_crtc_dpms(struct drm_crtc *crtc, int mode)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
- int pipe = gma_crtc->pipe;
- const struct psb_offset *map = &dev_priv->regmap[pipe];
- u32 pipeconf = dev_priv->pipeconf[pipe];
- u32 temp;
- int timeout = 0;
-
- dev_dbg(dev->dev, "mode = %d, pipe = %d\n", mode, pipe);
-
- /* Note: Old code uses pipe a stat for pipe b but that appears
- to be a bug */
-
- if (!gma_power_begin(dev, true))
- return;
-
- /* XXX: When our outputs are all unaware of DPMS modes other than off
- * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
- */
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- /* Enable the DPLL */
- temp = REG_READ(map->dpll);
-
- if ((temp & DPLL_VCO_ENABLE) == 0) {
- /* When ungating power of DPLL, needs to wait 0.5us
- before enable the VCO */
- if (temp & MDFLD_PWR_GATE_EN) {
- temp &= ~MDFLD_PWR_GATE_EN;
- REG_WRITE(map->dpll, temp);
- /* FIXME_MDFLD PO - change 500 to 1 after PO */
- udelay(500);
- }
-
- REG_WRITE(map->dpll, temp);
- REG_READ(map->dpll);
- /* FIXME_MDFLD PO - change 500 to 1 after PO */
- udelay(500);
-
- REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
- REG_READ(map->dpll);
-
- /**
- * wait for DSI PLL to lock
- * NOTE: only need to poll status of pipe 0 and pipe 1,
- * since both MIPI pipes share the same PLL.
- */
- while ((pipe != 2) && (timeout < 20000) &&
- !(REG_READ(map->conf) & PIPECONF_DSIPLL_LOCK)) {
- udelay(150);
- timeout++;
- }
- }
-
- /* Enable the plane */
- temp = REG_READ(map->cntr);
- if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
- REG_WRITE(map->cntr,
- temp | DISPLAY_PLANE_ENABLE);
- /* Flush the plane changes */
- REG_WRITE(map->base, REG_READ(map->base));
- }
-
- /* Enable the pipe */
- temp = REG_READ(map->conf);
- if ((temp & PIPEACONF_ENABLE) == 0) {
- REG_WRITE(map->conf, pipeconf);
-
- /* Wait for for the pipe enable to take effect. */
- mdfldWaitForPipeEnable(dev, pipe);
- }
-
- /*workaround for sighting 3741701 Random X blank display*/
- /*perform w/a in video mode only on pipe A or C*/
- if (pipe == 0 || pipe == 2) {
- REG_WRITE(map->status, REG_READ(map->status));
- msleep(100);
- if (PIPE_VBLANK_STATUS & REG_READ(map->status))
- dev_dbg(dev->dev, "OK");
- else {
- dev_dbg(dev->dev, "STUCK!!!!");
- /*shutdown controller*/
- temp = REG_READ(map->cntr);
- REG_WRITE(map->cntr,
- temp & ~DISPLAY_PLANE_ENABLE);
- REG_WRITE(map->base, REG_READ(map->base));
- /*mdfld_dsi_dpi_shut_down(dev, pipe);*/
- REG_WRITE(0xb048, 1);
- msleep(100);
- temp = REG_READ(map->conf);
- temp &= ~PIPEACONF_ENABLE;
- REG_WRITE(map->conf, temp);
- msleep(100); /*wait for pipe disable*/
- REG_WRITE(MIPI_DEVICE_READY_REG(pipe), 0);
- msleep(100);
- REG_WRITE(0xb004, REG_READ(0xb004));
- /* try to bring the controller back up again*/
- REG_WRITE(MIPI_DEVICE_READY_REG(pipe), 1);
- temp = REG_READ(map->cntr);
- REG_WRITE(map->cntr,
- temp | DISPLAY_PLANE_ENABLE);
- REG_WRITE(map->base, REG_READ(map->base));
- /*mdfld_dsi_dpi_turn_on(dev, pipe);*/
- REG_WRITE(0xb048, 2);
- msleep(100);
- temp = REG_READ(map->conf);
- temp |= PIPEACONF_ENABLE;
- REG_WRITE(map->conf, temp);
- }
- }
-
- gma_crtc_load_lut(crtc);
-
- /* Give the overlay scaler a chance to enable
- if it's on this pipe */
- /* psb_intel_crtc_dpms_video(crtc, true); TODO */
-
- break;
- case DRM_MODE_DPMS_OFF:
- /* Give the overlay scaler a chance to disable
- * if it's on this pipe */
- /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
- if (pipe != 1)
- mdfld_dsi_gen_fifo_ready(dev,
- MIPI_GEN_FIFO_STAT_REG(pipe),
- HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY);
-
- /* Disable the VGA plane that we never use */
- REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
-
- /* Disable display plane */
- temp = REG_READ(map->cntr);
- if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
- REG_WRITE(map->cntr,
- temp & ~DISPLAY_PLANE_ENABLE);
- /* Flush the plane changes */
- REG_WRITE(map->base, REG_READ(map->base));
- REG_READ(map->base);
- }
-
- /* Next, disable display pipes */
- temp = REG_READ(map->conf);
- if ((temp & PIPEACONF_ENABLE) != 0) {
- temp &= ~PIPEACONF_ENABLE;
- temp |= PIPECONF_PLANE_OFF | PIPECONF_CURSOR_OFF;
- REG_WRITE(map->conf, temp);
- REG_READ(map->conf);
-
- /* Wait for for the pipe disable to take effect. */
- mdfldWaitForPipeDisable(dev, pipe);
- }
-
- temp = REG_READ(map->dpll);
- if (temp & DPLL_VCO_ENABLE) {
- if ((pipe != 1 && !((REG_READ(PIPEACONF)
- | REG_READ(PIPECCONF)) & PIPEACONF_ENABLE))
- || pipe == 1) {
- temp &= ~(DPLL_VCO_ENABLE);
- REG_WRITE(map->dpll, temp);
- REG_READ(map->dpll);
- /* Wait for the clocks to turn off. */
- /* FIXME_MDFLD PO may need more delay */
- udelay(500);
- }
- }
- break;
- }
- gma_power_end(dev);
-}
-
-
-#define MDFLD_LIMT_DPLL_19 0
-#define MDFLD_LIMT_DPLL_25 1
-#define MDFLD_LIMT_DPLL_83 2
-#define MDFLD_LIMT_DPLL_100 3
-#define MDFLD_LIMT_DSIPLL_19 4
-#define MDFLD_LIMT_DSIPLL_25 5
-#define MDFLD_LIMT_DSIPLL_83 6
-#define MDFLD_LIMT_DSIPLL_100 7
-
-#define MDFLD_DOT_MIN 19750
-#define MDFLD_DOT_MAX 120000
-#define MDFLD_DPLL_M_MIN_19 113
-#define MDFLD_DPLL_M_MAX_19 155
-#define MDFLD_DPLL_P1_MIN_19 2
-#define MDFLD_DPLL_P1_MAX_19 10
-#define MDFLD_DPLL_M_MIN_25 101
-#define MDFLD_DPLL_M_MAX_25 130
-#define MDFLD_DPLL_P1_MIN_25 2
-#define MDFLD_DPLL_P1_MAX_25 10
-#define MDFLD_DPLL_M_MIN_83 64
-#define MDFLD_DPLL_M_MAX_83 64
-#define MDFLD_DPLL_P1_MIN_83 2
-#define MDFLD_DPLL_P1_MAX_83 2
-#define MDFLD_DPLL_M_MIN_100 64
-#define MDFLD_DPLL_M_MAX_100 64
-#define MDFLD_DPLL_P1_MIN_100 2
-#define MDFLD_DPLL_P1_MAX_100 2
-#define MDFLD_DSIPLL_M_MIN_19 131
-#define MDFLD_DSIPLL_M_MAX_19 175
-#define MDFLD_DSIPLL_P1_MIN_19 3
-#define MDFLD_DSIPLL_P1_MAX_19 8
-#define MDFLD_DSIPLL_M_MIN_25 97
-#define MDFLD_DSIPLL_M_MAX_25 140
-#define MDFLD_DSIPLL_P1_MIN_25 3
-#define MDFLD_DSIPLL_P1_MAX_25 9
-#define MDFLD_DSIPLL_M_MIN_83 33
-#define MDFLD_DSIPLL_M_MAX_83 92
-#define MDFLD_DSIPLL_P1_MIN_83 2
-#define MDFLD_DSIPLL_P1_MAX_83 3
-#define MDFLD_DSIPLL_M_MIN_100 97
-#define MDFLD_DSIPLL_M_MAX_100 140
-#define MDFLD_DSIPLL_P1_MIN_100 3
-#define MDFLD_DSIPLL_P1_MAX_100 9
-
-static const struct mrst_limit_t mdfld_limits[] = {
- { /* MDFLD_LIMT_DPLL_19 */
- .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
- .m = {.min = MDFLD_DPLL_M_MIN_19, .max = MDFLD_DPLL_M_MAX_19},
- .p1 = {.min = MDFLD_DPLL_P1_MIN_19, .max = MDFLD_DPLL_P1_MAX_19},
- },
- { /* MDFLD_LIMT_DPLL_25 */
- .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
- .m = {.min = MDFLD_DPLL_M_MIN_25, .max = MDFLD_DPLL_M_MAX_25},
- .p1 = {.min = MDFLD_DPLL_P1_MIN_25, .max = MDFLD_DPLL_P1_MAX_25},
- },
- { /* MDFLD_LIMT_DPLL_83 */
- .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
- .m = {.min = MDFLD_DPLL_M_MIN_83, .max = MDFLD_DPLL_M_MAX_83},
- .p1 = {.min = MDFLD_DPLL_P1_MIN_83, .max = MDFLD_DPLL_P1_MAX_83},
- },
- { /* MDFLD_LIMT_DPLL_100 */
- .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
- .m = {.min = MDFLD_DPLL_M_MIN_100, .max = MDFLD_DPLL_M_MAX_100},
- .p1 = {.min = MDFLD_DPLL_P1_MIN_100, .max = MDFLD_DPLL_P1_MAX_100},
- },
- { /* MDFLD_LIMT_DSIPLL_19 */
- .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
- .m = {.min = MDFLD_DSIPLL_M_MIN_19, .max = MDFLD_DSIPLL_M_MAX_19},
- .p1 = {.min = MDFLD_DSIPLL_P1_MIN_19, .max = MDFLD_DSIPLL_P1_MAX_19},
- },
- { /* MDFLD_LIMT_DSIPLL_25 */
- .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
- .m = {.min = MDFLD_DSIPLL_M_MIN_25, .max = MDFLD_DSIPLL_M_MAX_25},
- .p1 = {.min = MDFLD_DSIPLL_P1_MIN_25, .max = MDFLD_DSIPLL_P1_MAX_25},
- },
- { /* MDFLD_LIMT_DSIPLL_83 */
- .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
- .m = {.min = MDFLD_DSIPLL_M_MIN_83, .max = MDFLD_DSIPLL_M_MAX_83},
- .p1 = {.min = MDFLD_DSIPLL_P1_MIN_83, .max = MDFLD_DSIPLL_P1_MAX_83},
- },
- { /* MDFLD_LIMT_DSIPLL_100 */
- .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
- .m = {.min = MDFLD_DSIPLL_M_MIN_100, .max = MDFLD_DSIPLL_M_MAX_100},
- .p1 = {.min = MDFLD_DSIPLL_P1_MIN_100, .max = MDFLD_DSIPLL_P1_MAX_100},
- },
-};
-
-#define MDFLD_M_MIN 21
-#define MDFLD_M_MAX 180
-static const u32 mdfld_m_converts[] = {
-/* M configuration table from 9-bit LFSR table */
- 224, 368, 440, 220, 366, 439, 219, 365, 182, 347, /* 21 - 30 */
- 173, 342, 171, 85, 298, 149, 74, 37, 18, 265, /* 31 - 40 */
- 388, 194, 353, 432, 216, 108, 310, 155, 333, 166, /* 41 - 50 */
- 83, 41, 276, 138, 325, 162, 337, 168, 340, 170, /* 51 - 60 */
- 341, 426, 469, 234, 373, 442, 221, 110, 311, 411, /* 61 - 70 */
- 461, 486, 243, 377, 188, 350, 175, 343, 427, 213, /* 71 - 80 */
- 106, 53, 282, 397, 354, 227, 113, 56, 284, 142, /* 81 - 90 */
- 71, 35, 273, 136, 324, 418, 465, 488, 500, 506, /* 91 - 100 */
- 253, 126, 63, 287, 399, 455, 483, 241, 376, 444, /* 101 - 110 */
- 478, 495, 503, 251, 381, 446, 479, 239, 375, 443, /* 111 - 120 */
- 477, 238, 119, 315, 157, 78, 295, 147, 329, 420, /* 121 - 130 */
- 210, 105, 308, 154, 77, 38, 275, 137, 68, 290, /* 131 - 140 */
- 145, 328, 164, 82, 297, 404, 458, 485, 498, 249, /* 141 - 150 */
- 380, 190, 351, 431, 471, 235, 117, 314, 413, 206, /* 151 - 160 */
- 103, 51, 25, 12, 262, 387, 193, 96, 48, 280, /* 161 - 170 */
- 396, 198, 99, 305, 152, 76, 294, 403, 457, 228, /* 171 - 180 */
-};
-
-static const struct mrst_limit_t *mdfld_limit(struct drm_crtc *crtc)
-{
- const struct mrst_limit_t *limit = NULL;
- struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
-
- if (gma_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)
- || gma_pipe_has_type(crtc, INTEL_OUTPUT_MIPI2)) {
- if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19))
- limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_19];
- else if (ksel == KSEL_BYPASS_25)
- limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_25];
- else if ((ksel == KSEL_BYPASS_83_100) &&
- (dev_priv->core_freq == 166))
- limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_83];
- else if ((ksel == KSEL_BYPASS_83_100) &&
- (dev_priv->core_freq == 100 ||
- dev_priv->core_freq == 200))
- limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_100];
- } else if (gma_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) {
- if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19))
- limit = &mdfld_limits[MDFLD_LIMT_DPLL_19];
- else if (ksel == KSEL_BYPASS_25)
- limit = &mdfld_limits[MDFLD_LIMT_DPLL_25];
- else if ((ksel == KSEL_BYPASS_83_100) &&
- (dev_priv->core_freq == 166))
- limit = &mdfld_limits[MDFLD_LIMT_DPLL_83];
- else if ((ksel == KSEL_BYPASS_83_100) &&
- (dev_priv->core_freq == 100 ||
- dev_priv->core_freq == 200))
- limit = &mdfld_limits[MDFLD_LIMT_DPLL_100];
- } else {
- limit = NULL;
- dev_dbg(dev->dev, "mdfld_limit Wrong display type.\n");
- }
-
- return limit;
-}
-
-/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
-static void mdfld_clock(int refclk, struct mrst_clock_t *clock)
-{
- clock->dot = (refclk * clock->m) / clock->p1;
-}
-
-/**
- * Returns a set of divisors for the desired target clock with the given refclk,
- * or FALSE. Divisor values are the actual divisors for
- */
-static bool
-mdfldFindBestPLL(struct drm_crtc *crtc, int target, int refclk,
- struct mrst_clock_t *best_clock)
-{
- struct mrst_clock_t clock;
- const struct mrst_limit_t *limit = mdfld_limit(crtc);
- int err = target;
-
- memset(best_clock, 0, sizeof(*best_clock));
-
- for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) {
- for (clock.p1 = limit->p1.min; clock.p1 <= limit->p1.max;
- clock.p1++) {
- int this_err;
-
- mdfld_clock(refclk, &clock);
-
- this_err = abs(clock.dot - target);
- if (this_err < err) {
- *best_clock = clock;
- err = this_err;
- }
- }
- }
- return err != target;
-}
-
-static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode,
- int x, int y,
- struct drm_framebuffer *old_fb)
-{
- struct drm_device *dev = crtc->dev;
- struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
- struct drm_psb_private *dev_priv = dev->dev_private;
- int pipe = gma_crtc->pipe;
- const struct psb_offset *map = &dev_priv->regmap[pipe];
- int refclk = 0;
- int clk_n = 0, clk_p2 = 0, clk_byte = 1, clk = 0, m_conv = 0,
- clk_tmp = 0;
- struct mrst_clock_t clock;
- bool ok;
- u32 dpll = 0, fp = 0;
- bool is_mipi = false, is_mipi2 = false, is_hdmi = false;
- struct drm_mode_config *mode_config = &dev->mode_config;
- struct gma_encoder *gma_encoder = NULL;
- uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN;
- struct drm_encoder *encoder;
- struct drm_connector *connector;
- int timeout = 0;
- int ret;
-
- dev_dbg(dev->dev, "pipe = 0x%x\n", pipe);
-
- ret = check_fb(crtc->primary->fb);
- if (ret)
- return ret;
-
- dev_dbg(dev->dev, "adjusted_hdisplay = %d\n",
- adjusted_mode->hdisplay);
- dev_dbg(dev->dev, "adjusted_vdisplay = %d\n",
- adjusted_mode->vdisplay);
- dev_dbg(dev->dev, "adjusted_hsync_start = %d\n",
- adjusted_mode->hsync_start);
- dev_dbg(dev->dev, "adjusted_hsync_end = %d\n",
- adjusted_mode->hsync_end);
- dev_dbg(dev->dev, "adjusted_htotal = %d\n",
- adjusted_mode->htotal);
- dev_dbg(dev->dev, "adjusted_vsync_start = %d\n",
- adjusted_mode->vsync_start);
- dev_dbg(dev->dev, "adjusted_vsync_end = %d\n",
- adjusted_mode->vsync_end);
- dev_dbg(dev->dev, "adjusted_vtotal = %d\n",
- adjusted_mode->vtotal);
- dev_dbg(dev->dev, "adjusted_clock = %d\n",
- adjusted_mode->clock);
- dev_dbg(dev->dev, "hdisplay = %d\n",
- mode->hdisplay);
- dev_dbg(dev->dev, "vdisplay = %d\n",
- mode->vdisplay);
-
- if (!gma_power_begin(dev, true))
- return 0;
-
- memcpy(&gma_crtc->saved_mode, mode,
- sizeof(struct drm_display_mode));
- memcpy(&gma_crtc->saved_adjusted_mode, adjusted_mode,
- sizeof(struct drm_display_mode));
-
- list_for_each_entry(connector, &mode_config->connector_list, head) {
- encoder = connector->encoder;
- if (!encoder)
- continue;
-
- if (encoder->crtc != crtc)
- continue;
-
- gma_encoder = gma_attached_encoder(connector);
-
- switch (gma_encoder->type) {
- case INTEL_OUTPUT_MIPI:
- is_mipi = true;
- break;
- case INTEL_OUTPUT_MIPI2:
- is_mipi2 = true;
- break;
- case INTEL_OUTPUT_HDMI:
- is_hdmi = true;
- break;
- }
- }
-
- /* Disable the VGA plane that we never use */
- REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
-
- /* Disable the panel fitter if it was on our pipe */
- if (psb_intel_panel_fitter_pipe(dev) == pipe)
- REG_WRITE(PFIT_CONTROL, 0);
-
- /* pipesrc and dspsize control the size that is scaled from,
- * which should always be the user's requested size.
- */
- if (pipe == 1) {
- /* FIXME: To make HDMI display with 864x480 (TPO), 480x864
- * (PYR) or 480x854 (TMD), set the sprite width/height and
- * souce image size registers with the adjusted mode for
- * pipe B.
- */
-
- /*
- * The defined sprite rectangle must always be completely
- * contained within the displayable area of the screen image
- * (frame buffer).
- */
- REG_WRITE(map->size, ((min(mode->crtc_vdisplay, adjusted_mode->crtc_vdisplay) - 1) << 16)
- | (min(mode->crtc_hdisplay, adjusted_mode->crtc_hdisplay) - 1));
- /* Set the CRTC with encoder mode. */
- REG_WRITE(map->src, ((mode->crtc_hdisplay - 1) << 16)
- | (mode->crtc_vdisplay - 1));
- } else {
- REG_WRITE(map->size,
- ((mode->crtc_vdisplay - 1) << 16) |
- (mode->crtc_hdisplay - 1));
- REG_WRITE(map->src,
- ((mode->crtc_hdisplay - 1) << 16) |
- (mode->crtc_vdisplay - 1));
- }
-
- REG_WRITE(map->pos, 0);
-
- if (gma_encoder)
- drm_object_property_get_value(&connector->base,
- dev->mode_config.scaling_mode_property, &scalingType);
-
- if (scalingType == DRM_MODE_SCALE_NO_SCALE) {
- /* Medfield doesn't have register support for centering so we
- * need to mess with the h/vblank and h/vsync start and ends
- * to get centering
- */
- int offsetX = 0, offsetY = 0;
-
- offsetX = (adjusted_mode->crtc_hdisplay -
- mode->crtc_hdisplay) / 2;
- offsetY = (adjusted_mode->crtc_vdisplay -
- mode->crtc_vdisplay) / 2;
-
- REG_WRITE(map->htotal, (mode->crtc_hdisplay - 1) |
- ((adjusted_mode->crtc_htotal - 1) << 16));
- REG_WRITE(map->vtotal, (mode->crtc_vdisplay - 1) |
- ((adjusted_mode->crtc_vtotal - 1) << 16));
- REG_WRITE(map->hblank, (adjusted_mode->crtc_hblank_start -
- offsetX - 1) |
- ((adjusted_mode->crtc_hblank_end - offsetX - 1) << 16));
- REG_WRITE(map->hsync, (adjusted_mode->crtc_hsync_start -
- offsetX - 1) |
- ((adjusted_mode->crtc_hsync_end - offsetX - 1) << 16));
- REG_WRITE(map->vblank, (adjusted_mode->crtc_vblank_start -
- offsetY - 1) |
- ((adjusted_mode->crtc_vblank_end - offsetY - 1) << 16));
- REG_WRITE(map->vsync, (adjusted_mode->crtc_vsync_start -
- offsetY - 1) |
- ((adjusted_mode->crtc_vsync_end - offsetY - 1) << 16));
- } else {
- REG_WRITE(map->htotal, (adjusted_mode->crtc_hdisplay - 1) |
- ((adjusted_mode->crtc_htotal - 1) << 16));
- REG_WRITE(map->vtotal, (adjusted_mode->crtc_vdisplay - 1) |
- ((adjusted_mode->crtc_vtotal - 1) << 16));
- REG_WRITE(map->hblank, (adjusted_mode->crtc_hblank_start - 1) |
- ((adjusted_mode->crtc_hblank_end - 1) << 16));
- REG_WRITE(map->hsync, (adjusted_mode->crtc_hsync_start - 1) |
- ((adjusted_mode->crtc_hsync_end - 1) << 16));
- REG_WRITE(map->vblank, (adjusted_mode->crtc_vblank_start - 1) |
- ((adjusted_mode->crtc_vblank_end - 1) << 16));
- REG_WRITE(map->vsync, (adjusted_mode->crtc_vsync_start - 1) |
- ((adjusted_mode->crtc_vsync_end - 1) << 16));
- }
-
- /* Flush the plane changes */
- {
- const struct drm_crtc_helper_funcs *crtc_funcs =
- crtc->helper_private;
- crtc_funcs->mode_set_base(crtc, x, y, old_fb);
- }
-
- /* setup pipeconf */
- dev_priv->pipeconf[pipe] = PIPEACONF_ENABLE; /* FIXME_JLIU7 REG_READ(pipeconf_reg); */
-
- /* Set up the display plane register */
- dev_priv->dspcntr[pipe] = REG_READ(map->cntr);
- dev_priv->dspcntr[pipe] |= pipe << DISPPLANE_SEL_PIPE_POS;
- dev_priv->dspcntr[pipe] |= DISPLAY_PLANE_ENABLE;
-
- if (is_mipi2)
- goto mrst_crtc_mode_set_exit;
- clk = adjusted_mode->clock;
-
- if (is_hdmi) {
- if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19)) {
- refclk = 19200;
-
- if (is_mipi || is_mipi2)
- clk_n = 1, clk_p2 = 8;
- else if (is_hdmi)
- clk_n = 1, clk_p2 = 10;
- } else if (ksel == KSEL_BYPASS_25) {
- refclk = 25000;
-
- if (is_mipi || is_mipi2)
- clk_n = 1, clk_p2 = 8;
- else if (is_hdmi)
- clk_n = 1, clk_p2 = 10;
- } else if ((ksel == KSEL_BYPASS_83_100) &&
- dev_priv->core_freq == 166) {
- refclk = 83000;
-
- if (is_mipi || is_mipi2)
- clk_n = 4, clk_p2 = 8;
- else if (is_hdmi)
- clk_n = 4, clk_p2 = 10;
- } else if ((ksel == KSEL_BYPASS_83_100) &&
- (dev_priv->core_freq == 100 ||
- dev_priv->core_freq == 200)) {
- refclk = 100000;
- if (is_mipi || is_mipi2)
- clk_n = 4, clk_p2 = 8;
- else if (is_hdmi)
- clk_n = 4, clk_p2 = 10;
- }
-
- if (is_mipi)
- clk_byte = dev_priv->bpp / 8;
- else if (is_mipi2)
- clk_byte = dev_priv->bpp2 / 8;
-
- clk_tmp = clk * clk_n * clk_p2 * clk_byte;
-
- dev_dbg(dev->dev, "clk = %d, clk_n = %d, clk_p2 = %d.\n",
- clk, clk_n, clk_p2);
- dev_dbg(dev->dev, "adjusted_mode->clock = %d, clk_tmp = %d.\n",
- adjusted_mode->clock, clk_tmp);
-
- ok = mdfldFindBestPLL(crtc, clk_tmp, refclk, &clock);
-
- if (!ok) {
- DRM_ERROR
- ("mdfldFindBestPLL fail in mdfld_crtc_mode_set.\n");
- } else {
- m_conv = mdfld_m_converts[(clock.m - MDFLD_M_MIN)];
-
- dev_dbg(dev->dev, "dot clock = %d,"
- "m = %d, p1 = %d, m_conv = %d.\n",
- clock.dot, clock.m,
- clock.p1, m_conv);
- }
-
- dpll = REG_READ(map->dpll);
-
- if (dpll & DPLL_VCO_ENABLE) {
- dpll &= ~DPLL_VCO_ENABLE;
- REG_WRITE(map->dpll, dpll);
- REG_READ(map->dpll);
-
- /* FIXME jliu7 check the DPLL lock bit PIPEACONF[29] */
- /* FIXME_MDFLD PO - change 500 to 1 after PO */
- udelay(500);
-
- /* reset M1, N1 & P1 */
- REG_WRITE(map->fp0, 0);
- dpll &= ~MDFLD_P1_MASK;
- REG_WRITE(map->dpll, dpll);
- /* FIXME_MDFLD PO - change 500 to 1 after PO */
- udelay(500);
- }
-
- /* When ungating power of DPLL, needs to wait 0.5us before
- * enable the VCO */
- if (dpll & MDFLD_PWR_GATE_EN) {
- dpll &= ~MDFLD_PWR_GATE_EN;
- REG_WRITE(map->dpll, dpll);
- /* FIXME_MDFLD PO - change 500 to 1 after PO */
- udelay(500);
- }
- dpll = 0;
-
- if (is_hdmi)
- dpll |= MDFLD_VCO_SEL;
-
- fp = (clk_n / 2) << 16;
- fp |= m_conv;
-
- /* compute bitmask from p1 value */
- dpll |= (1 << (clock.p1 - 2)) << 17;
-
- } else {
- dpll = 0x00800000;
- fp = 0x000000c1;
- }
-
- REG_WRITE(map->fp0, fp);
- REG_WRITE(map->dpll, dpll);
- /* FIXME_MDFLD PO - change 500 to 1 after PO */
- udelay(500);
-
- dpll |= DPLL_VCO_ENABLE;
- REG_WRITE(map->dpll, dpll);
- REG_READ(map->dpll);
-
- /* wait for DSI PLL to lock */
- while (timeout < 20000 &&
- !(REG_READ(map->conf) & PIPECONF_DSIPLL_LOCK)) {
- udelay(150);
- timeout++;
- }
-
- if (is_mipi)
- goto mrst_crtc_mode_set_exit;
-
- dev_dbg(dev->dev, "is_mipi = 0x%x\n", is_mipi);
-
- REG_WRITE(map->conf, dev_priv->pipeconf[pipe]);
- REG_READ(map->conf);
-
- /* Wait for for the pipe enable to take effect. */
- REG_WRITE(map->cntr, dev_priv->dspcntr[pipe]);
- gma_wait_for_vblank(dev);
-
-mrst_crtc_mode_set_exit:
-
- gma_power_end(dev);
-
- return 0;
-}
-
-const struct drm_crtc_helper_funcs mdfld_helper_funcs = {
- .dpms = mdfld_crtc_dpms,
- .mode_set = mdfld_crtc_mode_set,
- .mode_set_base = mdfld__intel_pipe_set_base,
- .prepare = gma_crtc_prepare,
- .commit = gma_crtc_commit,
-};
diff --git a/drivers/gpu/drm/gma500/mdfld_output.c b/drivers/gpu/drm/gma500/mdfld_output.c
deleted file mode 100644
index c95966bb0c96..000000000000
--- a/drivers/gpu/drm/gma500/mdfld_output.c
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Copyright (c) 2010 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicensen
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Thomas Eaton <thomas.g.eaton@intel.com>
- * Scott Rowe <scott.m.rowe@intel.com>
-*/
-
-#include "mdfld_output.h"
-#include "mdfld_dsi_dpi.h"
-#include "mdfld_dsi_output.h"
-
-#include "tc35876x-dsi-lvds.h"
-
-int mdfld_get_panel_type(struct drm_device *dev, int pipe)
-{
- struct drm_psb_private *dev_priv = dev->dev_private;
- return dev_priv->mdfld_panel_id;
-}
-
-static void mdfld_init_panel(struct drm_device *dev, int mipi_pipe,
- int p_type)
-{
- switch (p_type) {
- case TPO_VID:
- mdfld_dsi_output_init(dev, mipi_pipe, &mdfld_tpo_vid_funcs);
- break;
- case TC35876X:
- tc35876x_init(dev);
- mdfld_dsi_output_init(dev, mipi_pipe, &mdfld_tc35876x_funcs);
- break;
- case TMD_VID:
- mdfld_dsi_output_init(dev, mipi_pipe, &mdfld_tmd_vid_funcs);
- break;
- case HDMI:
-/* if (dev_priv->mdfld_hdmi_present)
- mdfld_hdmi_init(dev, &dev_priv->mode_dev); */
- break;
- }
-}
-
-
-int mdfld_output_init(struct drm_device *dev)
-{
- struct drm_psb_private *dev_priv = dev->dev_private;
-
- /* FIXME: hardcoded for now */
- dev_priv->mdfld_panel_id = TC35876X;
- /* MIPI panel 1 */
- mdfld_init_panel(dev, 0, dev_priv->mdfld_panel_id);
- /* HDMI panel */
- mdfld_init_panel(dev, 1, HDMI);
- return 0;
-}
-
diff --git a/drivers/gpu/drm/gma500/mdfld_output.h b/drivers/gpu/drm/gma500/mdfld_output.h
deleted file mode 100644
index 37a516cc56be..000000000000
--- a/drivers/gpu/drm/gma500/mdfld_output.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Copyright (c) 2010 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicensen
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Thomas Eaton <thomas.g.eaton@intel.com>
- * Scott Rowe <scott.m.rowe@intel.com>
-*/
-
-#ifndef MDFLD_OUTPUT_H
-#define MDFLD_OUTPUT_H
-
-#include "psb_drv.h"
-
-#define TPO_PANEL_WIDTH 84
-#define TPO_PANEL_HEIGHT 46
-#define TMD_PANEL_WIDTH 39
-#define TMD_PANEL_HEIGHT 71
-
-struct mdfld_dsi_config;
-
-enum panel_type {
- TPO_VID,
- TMD_VID,
- HDMI,
- TC35876X,
-};
-
-struct panel_info {
- u32 width_mm;
- u32 height_mm;
- /* Other info */
-};
-
-struct panel_funcs {
- const struct drm_encoder_helper_funcs *encoder_helper_funcs;
- struct drm_display_mode * (*get_config_mode)(struct drm_device *);
- int (*get_panel_info)(struct drm_device *, int, struct panel_info *);
- int (*reset)(struct drm_device *, int);
- void (*drv_ic_init)(struct mdfld_dsi_config *dsi_config, int pipe);
-};
-
-int mdfld_output_init(struct drm_device *dev);
-
-struct backlight_device *mdfld_get_backlight_device(void);
-int mdfld_set_brightness(struct backlight_device *bd);
-
-int mdfld_get_panel_type(struct drm_device *dev, int pipe);
-
-extern const struct drm_crtc_helper_funcs mdfld_helper_funcs;
-
-extern const struct panel_funcs mdfld_tmd_vid_funcs;
-extern const struct panel_funcs mdfld_tpo_vid_funcs;
-
-extern void mdfld_disable_crtc(struct drm_device *dev, int pipe);
-extern void mdfldWaitForPipeEnable(struct drm_device *dev, int pipe);
-extern void mdfldWaitForPipeDisable(struct drm_device *dev, int pipe);
-#endif
diff --git a/drivers/gpu/drm/gma500/mdfld_tmd_vid.c b/drivers/gpu/drm/gma500/mdfld_tmd_vid.c
deleted file mode 100644
index 25e897b98f86..000000000000
--- a/drivers/gpu/drm/gma500/mdfld_tmd_vid.c
+++ /dev/null
@@ -1,197 +0,0 @@
-/*
- * Copyright © 2010 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Jim Liu <jim.liu@intel.com>
- * Jackie Li<yaodong.li@intel.com>
- * Gideon Eaton <eaton.
- * Scott Rowe <scott.m.rowe@intel.com>
- */
-
-#include <linux/delay.h>
-
-#include "mdfld_dsi_dpi.h"
-#include "mdfld_dsi_pkg_sender.h"
-
-static struct drm_display_mode *tmd_vid_get_config_mode(struct drm_device *dev)
-{
- struct drm_display_mode *mode;
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct oaktrail_timing_info *ti = &dev_priv->gct_data.DTD;
- bool use_gct = false; /*Disable GCT for now*/
-
- mode = kzalloc(sizeof(*mode), GFP_KERNEL);
- if (!mode)
- return NULL;
-
- if (use_gct) {
- mode->hdisplay = (ti->hactive_hi << 8) | ti->hactive_lo;
- mode->vdisplay = (ti->vactive_hi << 8) | ti->vactive_lo;
- mode->hsync_start = mode->hdisplay + \
- ((ti->hsync_offset_hi << 8) | \
- ti->hsync_offset_lo);
- mode->hsync_end = mode->hsync_start + \
- ((ti->hsync_pulse_width_hi << 8) | \
- ti->hsync_pulse_width_lo);
- mode->htotal = mode->hdisplay + ((ti->hblank_hi << 8) | \
- ti->hblank_lo);
- mode->vsync_start = \
- mode->vdisplay + ((ti->vsync_offset_hi << 8) | \
- ti->vsync_offset_lo);
- mode->vsync_end = \
- mode->vsync_start + ((ti->vsync_pulse_width_hi << 8) | \
- ti->vsync_pulse_width_lo);
- mode->vtotal = mode->vdisplay + \
- ((ti->vblank_hi << 8) | ti->vblank_lo);
- mode->clock = ti->pixel_clock * 10;
-
- dev_dbg(dev->dev, "hdisplay is %d\n", mode->hdisplay);
- dev_dbg(dev->dev, "vdisplay is %d\n", mode->vdisplay);
- dev_dbg(dev->dev, "HSS is %d\n", mode->hsync_start);
- dev_dbg(dev->dev, "HSE is %d\n", mode->hsync_end);
- dev_dbg(dev->dev, "htotal is %d\n", mode->htotal);
- dev_dbg(dev->dev, "VSS is %d\n", mode->vsync_start);
- dev_dbg(dev->dev, "VSE is %d\n", mode->vsync_end);
- dev_dbg(dev->dev, "vtotal is %d\n", mode->vtotal);
- dev_dbg(dev->dev, "clock is %d\n", mode->clock);
- } else {
- mode->hdisplay = 480;
- mode->vdisplay = 854;
- mode->hsync_start = 487;
- mode->hsync_end = 490;
- mode->htotal = 499;
- mode->vsync_start = 861;
- mode->vsync_end = 865;
- mode->vtotal = 873;
- mode->clock = 33264;
- }
-
- drm_mode_set_name(mode);
- drm_mode_set_crtcinfo(mode, 0);
-
- mode->type |= DRM_MODE_TYPE_PREFERRED;
-
- return mode;
-}
-
-static int tmd_vid_get_panel_info(struct drm_device *dev,
- int pipe,
- struct panel_info *pi)
-{
- if (!dev || !pi)
- return -EINVAL;
-
- pi->width_mm = TMD_PANEL_WIDTH;
- pi->height_mm = TMD_PANEL_HEIGHT;
-
- return 0;
-}
-
-/* ************************************************************************* *\
- * FUNCTION: mdfld_init_TMD_MIPI
- *
- * DESCRIPTION: This function is called only by mrst_dsi_mode_set and
- * restore_display_registers. since this function does not
- * acquire the mutex, it is important that the calling function
- * does!
-\* ************************************************************************* */
-
-/* FIXME: make the below data u8 instead of u32; note byte order! */
-static u32 tmd_cmd_mcap_off[] = {0x000000b2};
-static u32 tmd_cmd_enable_lane_switch[] = {0x000101ef};
-static u32 tmd_cmd_set_lane_num[] = {0x006360ef};
-static u32 tmd_cmd_pushing_clock0[] = {0x00cc2fef};
-static u32 tmd_cmd_pushing_clock1[] = {0x00dd6eef};
-static u32 tmd_cmd_set_mode[] = {0x000000b3};
-static u32 tmd_cmd_set_sync_pulse_mode[] = {0x000961ef};
-static u32 tmd_cmd_set_column[] = {0x0100002a, 0x000000df};
-static u32 tmd_cmd_set_page[] = {0x0300002b, 0x00000055};
-static u32 tmd_cmd_set_video_mode[] = {0x00000153};
-/*no auto_bl,need add in furture*/
-static u32 tmd_cmd_enable_backlight[] = {0x00005ab4};
-static u32 tmd_cmd_set_backlight_dimming[] = {0x00000ebd};
-
-static void mdfld_dsi_tmd_drv_ic_init(struct mdfld_dsi_config *dsi_config,
- int pipe)
-{
- struct mdfld_dsi_pkg_sender *sender
- = mdfld_dsi_get_pkg_sender(dsi_config);
-
- DRM_INFO("Enter mdfld init TMD MIPI display.\n");
-
- if (!sender) {
- DRM_ERROR("Cannot get sender\n");
- return;
- }
-
- if (dsi_config->dvr_ic_inited)
- return;
-
- msleep(3);
-
- /* FIXME: make the below data u8 instead of u32; note byte order! */
-
- mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_mcap_off,
- sizeof(tmd_cmd_mcap_off), false);
- mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_enable_lane_switch,
- sizeof(tmd_cmd_enable_lane_switch), false);
- mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_set_lane_num,
- sizeof(tmd_cmd_set_lane_num), false);
- mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_pushing_clock0,
- sizeof(tmd_cmd_pushing_clock0), false);
- mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_pushing_clock1,
- sizeof(tmd_cmd_pushing_clock1), false);
- mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_set_mode,
- sizeof(tmd_cmd_set_mode), false);
- mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_set_sync_pulse_mode,
- sizeof(tmd_cmd_set_sync_pulse_mode), false);
- mdfld_dsi_send_mcs_long(sender, (u8 *) tmd_cmd_set_column,
- sizeof(tmd_cmd_set_column), false);
- mdfld_dsi_send_mcs_long(sender, (u8 *) tmd_cmd_set_page,
- sizeof(tmd_cmd_set_page), false);
- mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_set_video_mode,
- sizeof(tmd_cmd_set_video_mode), false);
- mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_enable_backlight,
- sizeof(tmd_cmd_enable_backlight), false);
- mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_set_backlight_dimming,
- sizeof(tmd_cmd_set_backlight_dimming), false);
-
- dsi_config->dvr_ic_inited = 1;
-}
-
-/*TPO DPI encoder helper funcs*/
-static const struct drm_encoder_helper_funcs
- mdfld_tpo_dpi_encoder_helper_funcs = {
- .dpms = mdfld_dsi_dpi_dpms,
- .mode_fixup = mdfld_dsi_dpi_mode_fixup,
- .prepare = mdfld_dsi_dpi_prepare,
- .mode_set = mdfld_dsi_dpi_mode_set,
- .commit = mdfld_dsi_dpi_commit,
-};
-
-const struct panel_funcs mdfld_tmd_vid_funcs = {
- .encoder_helper_funcs = &mdfld_tpo_dpi_encoder_helper_funcs,
- .get_config_mode = &tmd_vid_get_config_mode,
- .get_panel_info = tmd_vid_get_panel_info,
- .reset = mdfld_dsi_panel_reset,
- .drv_ic_init = mdfld_dsi_tmd_drv_ic_init,
-};
diff --git a/drivers/gpu/drm/gma500/mdfld_tpo_vid.c b/drivers/gpu/drm/gma500/mdfld_tpo_vid.c
deleted file mode 100644
index 11845978fb0a..000000000000
--- a/drivers/gpu/drm/gma500/mdfld_tpo_vid.c
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Copyright © 2010 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * jim liu <jim.liu@intel.com>
- * Jackie Li<yaodong.li@intel.com>
- */
-
-#include "mdfld_dsi_dpi.h"
-
-static struct drm_display_mode *tpo_vid_get_config_mode(struct drm_device *dev)
-{
- struct drm_display_mode *mode;
-
- mode = kzalloc(sizeof(*mode), GFP_KERNEL);
- if (!mode)
- return NULL;
-
- mode->hdisplay = 864;
- mode->vdisplay = 480;
- mode->hsync_start = 873;
- mode->hsync_end = 876;
- mode->htotal = 887;
- mode->vsync_start = 487;
- mode->vsync_end = 490;
- mode->vtotal = 499;
- mode->clock = 33264;
-
- drm_mode_set_name(mode);
- drm_mode_set_crtcinfo(mode, 0);
-
- mode->type |= DRM_MODE_TYPE_PREFERRED;
-
- return mode;
-}
-
-static int tpo_vid_get_panel_info(struct drm_device *dev,
- int pipe,
- struct panel_info *pi)
-{
- if (!dev || !pi)
- return -EINVAL;
-
- pi->width_mm = TPO_PANEL_WIDTH;
- pi->height_mm = TPO_PANEL_HEIGHT;
-
- return 0;
-}
-
-/*TPO DPI encoder helper funcs*/
-static const struct drm_encoder_helper_funcs
- mdfld_tpo_dpi_encoder_helper_funcs = {
- .dpms = mdfld_dsi_dpi_dpms,
- .mode_fixup = mdfld_dsi_dpi_mode_fixup,
- .prepare = mdfld_dsi_dpi_prepare,
- .mode_set = mdfld_dsi_dpi_mode_set,
- .commit = mdfld_dsi_dpi_commit,
-};
-
-const struct panel_funcs mdfld_tpo_vid_funcs = {
- .encoder_helper_funcs = &mdfld_tpo_dpi_encoder_helper_funcs,
- .get_config_mode = &tpo_vid_get_config_mode,
- .get_panel_info = tpo_vid_get_panel_info,
-};
diff --git a/drivers/gpu/drm/gma500/mid_bios.c b/drivers/gpu/drm/gma500/mid_bios.c
index 8ab44fec4bfa..68e787924ed0 100644
--- a/drivers/gpu/drm/gma500/mid_bios.c
+++ b/drivers/gpu/drm/gma500/mid_bios.c
@@ -19,8 +19,9 @@
static void mid_get_fuse_settings(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
struct pci_dev *pci_root =
- pci_get_domain_bus_and_slot(pci_domain_nr(dev->pdev->bus),
+ pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
0, 0);
uint32_t fuse_value = 0;
uint32_t fuse_value_tmp = 0;
@@ -93,7 +94,8 @@ static void mid_get_fuse_settings(struct drm_device *dev)
static void mid_get_pci_revID(struct drm_psb_private *dev_priv)
{
uint32_t platform_rev_id = 0;
- int domain = pci_domain_nr(dev_priv->dev->pdev->bus);
+ struct pci_dev *pdev = to_pci_dev(dev_priv->dev->dev);
+ int domain = pci_domain_nr(pdev->bus);
struct pci_dev *pci_gfx_root =
pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(2, 0));
@@ -269,11 +271,12 @@ out:
static void mid_get_vbt_data(struct drm_psb_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
u32 addr;
u8 __iomem *vbt_virtual;
struct mid_vbt_header vbt_header;
struct pci_dev *pci_gfx_root =
- pci_get_domain_bus_and_slot(pci_domain_nr(dev->pdev->bus),
+ pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
0, PCI_DEVFN(2, 0));
int ret = -1;
diff --git a/drivers/gpu/drm/gma500/mmu.c b/drivers/gpu/drm/gma500/mmu.c
index 505044c9a673..d856580b8111 100644
--- a/drivers/gpu/drm/gma500/mmu.c
+++ b/drivers/gpu/drm/gma500/mmu.c
@@ -48,7 +48,6 @@ static inline uint32_t psb_mmu_pd_index(uint32_t offset)
return offset >> PSB_PDE_SHIFT;
}
-#if defined(CONFIG_X86)
static inline void psb_clflush(void *addr)
{
__asm__ __volatile__("clflush (%0)\n" : : "r"(addr) : "memory");
@@ -63,13 +62,6 @@ static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, void *addr)
psb_clflush(addr);
mb();
}
-#else
-
-static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, void *addr)
-{;
-}
-
-#endif
static void psb_mmu_flush_pd_locked(struct psb_mmu_driver *driver, int force)
{
@@ -293,7 +285,6 @@ static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
*ptes++ = pd->invalid_pte;
-#if defined(CONFIG_X86)
if (pd->driver->has_clflush && pd->hw_context != -1) {
mb();
for (i = 0; i < clflush_count; ++i) {
@@ -302,7 +293,6 @@ static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
}
mb();
}
-#endif
kunmap_atomic(v);
spin_unlock(lock);
@@ -313,8 +303,8 @@ static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
return pt;
}
-struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
- unsigned long addr)
+static struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
+ unsigned long addr)
{
uint32_t index = psb_mmu_pd_index(addr);
struct psb_mmu_pt *pt;
@@ -416,15 +406,6 @@ struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver)
return pd;
}
-/* Returns the physical address of the PD shared by sgx/msvdx */
-uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver)
-{
- struct psb_mmu_pd *pd;
-
- pd = psb_mmu_get_default_pd(driver);
- return page_to_pfn(pd->p) << PAGE_SHIFT;
-}
-
void psb_mmu_driver_takedown(struct psb_mmu_driver *driver)
{
struct drm_device *dev = driver->dev;
@@ -468,7 +449,6 @@ struct psb_mmu_driver *psb_mmu_driver_init(struct drm_device *dev,
driver->has_clflush = 0;
-#if defined(CONFIG_X86)
if (boot_cpu_has(X86_FEATURE_CLFLUSH)) {
uint32_t tfms, misc, cap0, cap4, clflush_size;
@@ -485,7 +465,6 @@ struct psb_mmu_driver *psb_mmu_driver_init(struct drm_device *dev,
driver->clflush_mask = driver->clflush_add - 1;
driver->clflush_mask = ~driver->clflush_mask;
}
-#endif
up_write(&driver->sem);
return driver;
@@ -495,7 +474,6 @@ out_err1:
return NULL;
}
-#if defined(CONFIG_X86)
static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address,
uint32_t num_pages, uint32_t desired_tile_stride,
uint32_t hw_tile_stride)
@@ -543,14 +521,6 @@ static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address,
}
mb();
}
-#else
-static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address,
- uint32_t num_pages, uint32_t desired_tile_stride,
- uint32_t hw_tile_stride)
-{
- drm_ttm_cache_flush();
-}
-#endif
void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
unsigned long address, uint32_t num_pages)
@@ -690,7 +660,7 @@ out:
if (pd->hw_context != -1)
psb_mmu_flush(pd->driver);
- return 0;
+ return ret;
}
int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
index 900e5499249d..129f87971002 100644
--- a/drivers/gpu/drm/gma500/oaktrail_crtc.c
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -174,7 +174,7 @@ static bool mrst_sdvo_find_best_pll(const struct gma_limit_t *limit,
return min_error == 0;
}
-/**
+/*
* Returns a set of divisors for the desired target clock with the given refclk,
* or FALSE. Divisor values are the actual divisors for
*/
@@ -205,7 +205,7 @@ static bool mrst_lvds_find_best_pll(const struct gma_limit_t *limit,
return err != target;
}
-/**
+/*
* Sets the power management mode of the pipe and plane.
*
* This code should probably grow support for turning the cursor off and back
@@ -337,7 +337,7 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
gma_power_end(dev);
}
-/**
+/*
* Return the pipe currently connected to the panel fitter,
* or -1 if the panel fitter is not present or not in use
*/
diff --git a/drivers/gpu/drm/gma500/oaktrail_device.c b/drivers/gpu/drm/gma500/oaktrail_device.c
index 8754290b0e23..aff0534831ef 100644
--- a/drivers/gpu/drm/gma500/oaktrail_device.c
+++ b/drivers/gpu/drm/gma500/oaktrail_device.c
@@ -10,9 +10,6 @@
#include <linux/dmi.h>
#include <linux/module.h>
-#include <asm/intel-mid.h>
-#include <asm/intel_scu_ipc.h>
-
#include <drm/drm.h>
#include "intel_bios.h"
@@ -504,9 +501,10 @@ static const struct psb_offset oaktrail_regmap[2] = {
static int oaktrail_chip_setup(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
int ret;
-
- if (pci_enable_msi(dev->pdev))
+
+ if (pci_enable_msi(pdev))
dev_warn(dev->dev, "Enabling MSI failed!\n");
dev_priv->regmap = oaktrail_regmap;
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
index e28107061148..fc9a34ed58bd 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
@@ -279,11 +279,8 @@ int oaktrail_hdmi_i2c_init(struct pci_dev *dev)
hdmi_dev = pci_get_drvdata(dev);
i2c_dev = kzalloc(sizeof(struct hdmi_i2c_dev), GFP_KERNEL);
- if (i2c_dev == NULL) {
- DRM_ERROR("Can't allocate interface\n");
- ret = -ENOMEM;
- goto exit;
- }
+ if (!i2c_dev)
+ return -ENOMEM;
i2c_dev->adap = &oaktrail_hdmi_i2c_adapter;
i2c_dev->status = I2C_STAT_INIT;
@@ -300,16 +297,23 @@ int oaktrail_hdmi_i2c_init(struct pci_dev *dev)
oaktrail_hdmi_i2c_adapter.name, hdmi_dev);
if (ret) {
DRM_ERROR("Failed to request IRQ for I2C controller\n");
- goto err;
+ goto free_dev;
}
/* Adapter registration */
ret = i2c_add_numbered_adapter(&oaktrail_hdmi_i2c_adapter);
- return ret;
+ if (ret) {
+ DRM_ERROR("Failed to add I2C adapter\n");
+ goto free_irq;
+ }
-err:
+ return 0;
+
+free_irq:
+ free_irq(dev->irq, hdmi_dev);
+free_dev:
kfree(i2c_dev);
-exit:
+
return ret;
}
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
index 2828360153d1..432bdcc57ac9 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -29,7 +29,7 @@
#define MRST_BLC_MAX_PWM_REG_FREQ 0xFFFF
#define BRIGHTNESS_MAX_LEVEL 100
-/**
+/*
* Sets the power state for the panel.
*/
static void oaktrail_lvds_set_power(struct drm_device *dev,
@@ -60,7 +60,7 @@ static void oaktrail_lvds_set_power(struct drm_device *dev,
pp_status = REG_READ(PP_STATUS);
} while (pp_status & PP_ON);
dev_priv->is_lvds_on = false;
- pm_request_idle(&dev->pdev->dev);
+ pm_request_idle(dev->dev);
}
gma_power_end(dev);
}
@@ -282,6 +282,7 @@ static void oaktrail_lvds_get_configuration_mode(struct drm_device *dev,
/**
* oaktrail_lvds_init - setup LVDS connectors on this device
* @dev: drm device
+ * @mode_dev: PSB mode device
*
* Create the connector, register the LVDS DDC bus, and try to figure out what
* modes we can display on the LVDS panel (if present).
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c b/drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c
index baaf8212e01d..1d2dd6ea1c71 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c
@@ -66,12 +66,12 @@
static int get_clock(void *data)
{
struct psb_intel_i2c_chan *chan = data;
- u32 val, tmp;
+ u32 val;
val = LPC_READ_REG(chan, RGIO);
val |= GPIO_CLOCK;
LPC_WRITE_REG(chan, RGIO, val);
- tmp = LPC_READ_REG(chan, RGLVL);
+ LPC_READ_REG(chan, RGLVL);
val = (LPC_READ_REG(chan, RGLVL) & GPIO_CLOCK) ? 1 : 0;
return val;
@@ -80,12 +80,12 @@ static int get_clock(void *data)
static int get_data(void *data)
{
struct psb_intel_i2c_chan *chan = data;
- u32 val, tmp;
+ u32 val;
val = LPC_READ_REG(chan, RGIO);
val |= GPIO_DATA;
LPC_WRITE_REG(chan, RGIO, val);
- tmp = LPC_READ_REG(chan, RGLVL);
+ LPC_READ_REG(chan, RGLVL);
val = (LPC_READ_REG(chan, RGLVL) & GPIO_DATA) ? 1 : 0;
return val;
@@ -145,7 +145,7 @@ void oaktrail_lvds_i2c_init(struct drm_encoder *encoder)
strncpy(chan->adapter.name, "gma500 LPC", I2C_NAME_SIZE - 1);
chan->adapter.owner = THIS_MODULE;
chan->adapter.algo_data = &chan->algo;
- chan->adapter.dev.parent = &dev->pdev->dev;
+ chan->adapter.dev.parent = dev->dev;
chan->algo.setsda = set_data;
chan->algo.setscl = set_clock;
chan->algo.getsda = get_data;
diff --git a/drivers/gpu/drm/gma500/opregion.c b/drivers/gpu/drm/gma500/opregion.c
index eab6d889bde9..a1ffc6a1c255 100644
--- a/drivers/gpu/drm/gma500/opregion.c
+++ b/drivers/gpu/drm/gma500/opregion.c
@@ -305,12 +305,13 @@ void psb_intel_opregion_fini(struct drm_device *dev)
int psb_intel_opregion_setup(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
struct psb_intel_opregion *opregion = &dev_priv->opregion;
u32 opregion_phy, mboxes;
void __iomem *base;
int err = 0;
- pci_read_config_dword(dev->pdev, PCI_ASLS, &opregion_phy);
+ pci_read_config_dword(pdev, PCI_ASLS, &opregion_phy);
if (opregion_phy == 0) {
DRM_DEBUG_DRIVER("ACPI Opregion not supported\n");
return -ENOTSUPP;
diff --git a/drivers/gpu/drm/gma500/power.c b/drivers/gpu/drm/gma500/power.c
index bea8578846d1..56ef88237ef6 100644
--- a/drivers/gpu/drm/gma500/power.c
+++ b/drivers/gpu/drm/gma500/power.c
@@ -70,8 +70,8 @@ void gma_power_init(struct drm_device *dev)
*/
void gma_power_uninit(struct drm_device *dev)
{
- pm_runtime_disable(&dev->pdev->dev);
- pm_runtime_set_suspended(&dev->pdev->dev);
+ pm_runtime_disable(dev->dev);
+ pm_runtime_set_suspended(dev->dev);
}
/**
@@ -93,6 +93,7 @@ static void gma_suspend_display(struct drm_device *dev)
/**
* gma_resume_display - resume display side logic
+ * @pdev: PCI device
*
* Resume the display hardware restoring state and enabling
* as necessary.
@@ -146,7 +147,7 @@ static void gma_suspend_pci(struct pci_dev *pdev)
/**
* gma_resume_pci - resume helper
- * @dev: our PCI device
+ * @pdev: our PCI device
*
* Perform the resume processing on our PCI device state - rewrite
* register state and re-enable the PCI device
@@ -178,8 +179,7 @@ static bool gma_resume_pci(struct pci_dev *pdev)
/**
* gma_power_suspend - bus callback for suspend
- * @pdev: our PCI device
- * @state: suspend type
+ * @_dev: our device
*
* Called back by the PCI layer during a suspend of the system. We
* perform the necessary shut down steps and save enough state that
@@ -208,7 +208,7 @@ int gma_power_suspend(struct device *_dev)
/**
* gma_power_resume - resume power
- * @pdev: PCI device
+ * @_dev: our device
*
* Resume the PCI side of the graphics and then the displays
*/
@@ -249,6 +249,7 @@ bool gma_power_is_on(struct drm_device *dev)
bool gma_power_begin(struct drm_device *dev, bool force_on)
{
struct drm_psb_private *dev_priv = dev->dev_private;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
int ret;
unsigned long flags;
@@ -256,7 +257,7 @@ bool gma_power_begin(struct drm_device *dev, bool force_on)
/* Power already on ? */
if (dev_priv->display_power) {
dev_priv->display_count++;
- pm_runtime_get(&dev->pdev->dev);
+ pm_runtime_get(dev->dev);
spin_unlock_irqrestore(&power_ctrl_lock, flags);
return true;
}
@@ -264,11 +265,11 @@ bool gma_power_begin(struct drm_device *dev, bool force_on)
goto out_false;
/* Ok power up needed */
- ret = gma_resume_pci(dev->pdev);
+ ret = gma_resume_pci(pdev);
if (ret == 0) {
psb_irq_preinstall(dev);
psb_irq_postinstall(dev);
- pm_runtime_get(&dev->pdev->dev);
+ pm_runtime_get(dev->dev);
dev_priv->display_count++;
spin_unlock_irqrestore(&power_ctrl_lock, flags);
return true;
@@ -293,7 +294,7 @@ void gma_power_end(struct drm_device *dev)
dev_priv->display_count--;
WARN_ON(dev_priv->display_count < 0);
spin_unlock_irqrestore(&power_ctrl_lock, flags);
- pm_runtime_put(&dev->pdev->dev);
+ pm_runtime_put(dev->dev);
}
int psb_runtime_suspend(struct device *dev)
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index cc2d59e8471d..0bcab065242c 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -46,13 +46,12 @@ static int psb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
* PowerVR SGX535 - Poulsbo - Intel GMA 500, Intel Atom Z5xx
* PowerVR SGX535 - Moorestown - Intel GMA 600
* PowerVR SGX535 - Oaktrail - Intel GMA 600, Intel Atom Z6xx, E6xx
- * PowerVR SGX540 - Medfield - Intel Atom Z2460
- * PowerVR SGX544MP2 - Medfield -
* PowerVR SGX545 - Cedartrail - Intel GMA 3600, Intel Atom D2500, N2600
* PowerVR SGX545 - Cedartrail - Intel GMA 3650, Intel Atom D2550, D2700,
* N2800
*/
static const struct pci_device_id pciidlist[] = {
+ /* Poulsbo */
{ 0x8086, 0x8108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &psb_chip_ops },
{ 0x8086, 0x8109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &psb_chip_ops },
#if defined(CONFIG_DRM_GMA600)
@@ -66,17 +65,7 @@ static const struct pci_device_id pciidlist[] = {
{ 0x8086, 0x4107, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
{ 0x8086, 0x4108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
#endif
-#if defined(CONFIG_DRM_MEDFIELD)
- { 0x8086, 0x0130, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops },
- { 0x8086, 0x0131, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops },
- { 0x8086, 0x0132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops },
- { 0x8086, 0x0133, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops },
- { 0x8086, 0x0134, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops },
- { 0x8086, 0x0135, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops },
- { 0x8086, 0x0136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops },
- { 0x8086, 0x0137, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops },
-#endif
-#if defined(CONFIG_DRM_GMA3600)
+ /* Cedartrail */
{ 0x8086, 0x0be0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
{ 0x8086, 0x0be1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
{ 0x8086, 0x0be2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
@@ -93,7 +82,6 @@ static const struct pci_device_id pciidlist[] = {
{ 0x8086, 0x0bed, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
{ 0x8086, 0x0bee, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
{ 0x8086, 0x0bef, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
-#endif
{ 0, }
};
MODULE_DEVICE_TABLE(pci, pciidlist);
@@ -208,6 +196,7 @@ static void psb_driver_unload(struct drm_device *dev)
static int psb_driver_load(struct drm_device *dev, unsigned long flags)
{
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
struct drm_psb_private *dev_priv;
unsigned long resource_start, resource_len;
unsigned long irqflags;
@@ -227,11 +216,11 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
pg = &dev_priv->gtt;
- pci_set_master(dev->pdev);
+ pci_set_master(pdev);
dev_priv->num_pipe = dev_priv->ops->pipes;
- resource_start = pci_resource_start(dev->pdev, PSB_MMIO_RESOURCE);
+ resource_start = pci_resource_start(pdev, PSB_MMIO_RESOURCE);
dev_priv->vdc_reg =
ioremap(resource_start + PSB_VDC_OFFSET, PSB_VDC_SIZE);
@@ -244,7 +233,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
goto out_err;
if (IS_MRST(dev)) {
- int domain = pci_domain_nr(dev->pdev->bus);
+ int domain = pci_domain_nr(pdev->bus);
dev_priv->aux_pdev =
pci_get_domain_bus_and_slot(domain, 0,
@@ -312,6 +301,8 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
if (ret)
goto out_err;
+ ret = -ENOMEM;
+
dev_priv->mmu = psb_mmu_driver_init(dev, 1, 0, 0);
if (!dev_priv->mmu)
goto out_err;
@@ -359,7 +350,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
PSB_WVDC32(0xFFFFFFFF, PSB_INT_MASK_R);
spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
- drm_irq_install(dev, dev->pdev->irq);
+ drm_irq_install(dev, pdev->irq);
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
@@ -385,8 +376,8 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
psb_intel_opregion_enable_asle(dev);
#if 0
/* Enable runtime pm at last */
- pm_runtime_enable(&dev->pdev->dev);
- pm_runtime_set_active(&dev->pdev->dev);
+ pm_runtime_enable(dev->dev);
+ pm_runtime_set_active(dev->dev);
#endif
/* Intel drm driver load is done, continue doing pvr load */
return 0;
@@ -415,7 +406,7 @@ static long psb_unlocked_ioctl(struct file *filp, unsigned int cmd,
if (runtime_allowed == 1 && dev_priv->is_lvds_on) {
runtime_allowed++;
- pm_runtime_allow(&dev->pdev->dev);
+ pm_runtime_allow(dev->dev);
dev_priv->rpm_enabled = 1;
}
return drm_ioctl(filp, cmd, arg);
@@ -437,7 +428,6 @@ static int psb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_pci_disable_device;
}
- dev->pdev = pdev;
pci_set_drvdata(pdev, dev);
ret = psb_driver_load(dev, ent->driver_data);
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index 5b7f7a312d53..694495070c65 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -40,19 +40,16 @@ enum {
CHIP_PSB_8108 = 0, /* Poulsbo */
CHIP_PSB_8109 = 1, /* Poulsbo */
CHIP_MRST_4100 = 2, /* Moorestown/Oaktrail */
- CHIP_MFLD_0130 = 3, /* Medfield */
};
-#define IS_PSB(dev) (((dev)->pdev->device & 0xfffe) == 0x8108)
-#define IS_MRST(dev) (((dev)->pdev->device & 0xfff0) == 0x4100)
-#define IS_MFLD(dev) (((dev)->pdev->device & 0xfff8) == 0x0130)
-#define IS_CDV(dev) (((dev)->pdev->device & 0xfff0) == 0x0be0)
+#define IS_PSB(drm) ((to_pci_dev((drm)->dev)->device & 0xfffe) == 0x8108)
+#define IS_MRST(drm) ((to_pci_dev((drm)->dev)->device & 0xfff0) == 0x4100)
+#define IS_CDV(drm) ((to_pci_dev((drm)->dev)->device & 0xfff0) == 0x0be0)
/* Hardware offsets */
#define PSB_VDC_OFFSET 0x00000000
#define PSB_VDC_SIZE 0x000080000
#define MRST_MMIO_SIZE 0x0000C0000
-#define MDFLD_MMIO_SIZE 0x000100000
#define PSB_SGX_SIZE 0x8000
#define PSB_SGX_OFFSET 0x00040000
#define MRST_SGX_OFFSET 0x00080000
@@ -109,8 +106,6 @@ enum {
#define _PSB_DPST_PIPEA_FLAG (1<<6)
#define _PSB_PIPEA_EVENT_FLAG (1<<6)
#define _PSB_VSYNC_PIPEA_FLAG (1<<7)
-#define _MDFLD_MIPIA_FLAG (1<<16)
-#define _MDFLD_MIPIC_FLAG (1<<17)
#define _PSB_IRQ_DISP_HOTSYNC (1<<17)
#define _PSB_IRQ_SGX_FLAG (1<<18)
#define _PSB_IRQ_MSVDX_FLAG (1<<19)
@@ -119,13 +114,6 @@ enum {
#define _PSB_PIPE_EVENT_FLAG (_PSB_VSYNC_PIPEA_FLAG | \
_PSB_VSYNC_PIPEB_FLAG)
-/* This flag includes all the display IRQ bits excepts the vblank irqs. */
-#define _MDFLD_DISP_ALL_IRQ_FLAG (_MDFLD_PIPEC_EVENT_FLAG | \
- _MDFLD_PIPEB_EVENT_FLAG | \
- _PSB_PIPEA_EVENT_FLAG | \
- _PSB_VSYNC_PIPEA_FLAG | \
- _MDFLD_MIPIA_FLAG | \
- _MDFLD_MIPIC_FLAG)
#define PSB_INT_IDENTITY_R 0x20A4
#define PSB_INT_MASK_R 0x20A8
#define PSB_INT_ENABLE_R 0x20A0
@@ -191,25 +179,6 @@ enum {
#define PSB_WATCHDOG_DELAY (HZ * 2)
#define PSB_LID_DELAY (HZ / 10)
-#define MDFLD_PNW_B0 0x04
-#define MDFLD_PNW_C0 0x08
-
-#define MDFLD_DSR_2D_3D_0 (1 << 0)
-#define MDFLD_DSR_2D_3D_2 (1 << 1)
-#define MDFLD_DSR_CURSOR_0 (1 << 2)
-#define MDFLD_DSR_CURSOR_2 (1 << 3)
-#define MDFLD_DSR_OVERLAY_0 (1 << 4)
-#define MDFLD_DSR_OVERLAY_2 (1 << 5)
-#define MDFLD_DSR_MIPI_CONTROL (1 << 6)
-#define MDFLD_DSR_DAMAGE_MASK_0 ((1 << 0) | (1 << 2) | (1 << 4))
-#define MDFLD_DSR_DAMAGE_MASK_2 ((1 << 1) | (1 << 3) | (1 << 5))
-#define MDFLD_DSR_2D_3D (MDFLD_DSR_2D_3D_0 | MDFLD_DSR_2D_3D_2)
-
-#define MDFLD_DSR_RR 45
-#define MDFLD_DPU_ENABLE (1 << 31)
-#define MDFLD_DSR_FULLSCREEN (1 << 30)
-#define MDFLD_DSR_DELAY (HZ / MDFLD_DSR_RR)
-
#define PSB_PWR_STATE_ON 1
#define PSB_PWR_STATE_OFF 2
@@ -382,16 +351,6 @@ struct psb_state {
uint32_t savePWM_CONTROL_LOGIC;
};
-struct medfield_state {
- uint32_t saveMIPI;
- uint32_t saveMIPI_C;
-
- uint32_t savePFIT_CONTROL;
- uint32_t savePFIT_PGM_RATIOS;
- uint32_t saveHDMIPHYMISCCTL;
- uint32_t saveHDMIB_CONTROL;
-};
-
struct cdv_state {
uint32_t saveDSPCLK_GATE_D;
uint32_t saveRAMCLK_GATE_D;
@@ -417,7 +376,6 @@ struct psb_save_area {
uint32_t saveVBT;
union {
struct psb_state psb;
- struct medfield_state mdfld;
struct cdv_state cdv;
};
uint32_t saveBLC_PWM_CTL2;
@@ -428,6 +386,8 @@ struct psb_ops;
#define PSB_NUM_PIPE 3
+struct intel_scu_ipc_dev;
+
struct drm_psb_private {
struct drm_device *dev;
struct pci_dev *aux_pdev; /* Currently only used by mrst */
@@ -567,6 +527,7 @@ struct drm_psb_private {
* Used for modifying backlight from
* xrandr -- consider removing and using HAL instead
*/
+ struct intel_scu_ipc_dev *scu;
struct backlight_device *backlight_device;
struct drm_property *backlight_property;
bool backlight_enabled;
@@ -590,8 +551,6 @@ struct drm_psb_private {
u32 pipeconf[3];
u32 dspcntr[3];
- int mdfld_panel_id;
-
bool dplla_96mhz; /* DPLL data from the VBT */
struct {
@@ -737,9 +696,6 @@ extern const struct psb_ops psb_chip_ops;
/* oaktrail_device.c */
extern const struct psb_ops oaktrail_chip_ops;
-/* mdlfd_device.c */
-extern const struct psb_ops mdfld_chip_ops;
-
/* cdv_device.c */
extern const struct psb_ops cdv_chip_ops;
@@ -779,25 +735,6 @@ static inline void MRST_MSG_WRITE32(int domain, uint port, uint offset,
pci_write_config_dword(pci_root, 0xD0, mcr);
pci_dev_put(pci_root);
}
-static inline u32 MDFLD_MSG_READ32(int domain, uint port, uint offset)
-{
- int mcr = (0x10<<24) | (port << 16) | (offset << 8);
- uint32_t ret_val = 0;
- struct pci_dev *pci_root = pci_get_domain_bus_and_slot(domain, 0, 0);
- pci_write_config_dword(pci_root, 0xD0, mcr);
- pci_read_config_dword(pci_root, 0xD4, &ret_val);
- pci_dev_put(pci_root);
- return ret_val;
-}
-static inline void MDFLD_MSG_WRITE32(int domain, uint port, uint offset,
- u32 value)
-{
- int mcr = (0x11<<24) | (port << 16) | (offset << 8) | 0xF0;
- struct pci_dev *pci_root = pci_get_domain_bus_and_slot(domain, 0, 0);
- pci_write_config_dword(pci_root, 0xD4, value);
- pci_write_config_dword(pci_root, 0xD0, mcr);
- pci_dev_put(pci_root);
-}
static inline uint32_t REGISTER_READ(struct drm_device *dev, uint32_t reg)
{
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index 531c5485be17..9c3cb1b80bbd 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -71,7 +71,7 @@ static void psb_intel_clock(int refclk, struct gma_clock_t *clock)
clock->dot = clock->vco / clock->p;
}
-/**
+/*
* Return the pipe currently connected to the panel fitter,
* or -1 if the panel fitter is not present or not in use
*/
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index 063c66bb946d..ace95d4bdb6f 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -216,7 +216,7 @@ static void psb_intel_lvds_set_power(struct drm_device *dev, bool on)
dev_err(dev->dev, "set power, chip off!\n");
return;
}
-
+
if (on) {
REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
POWER_TARGET_ON);
@@ -626,6 +626,7 @@ const struct drm_connector_funcs psb_intel_lvds_connector_funcs = {
/**
* psb_intel_lvds_init - setup LVDS connectors on this device
* @dev: drm device
+ * @mode_dev: mode device
*
* Create the connector, register the LVDS DDC bus, and try to figure out what
* modes we can display on the LVDS panel (if present).
@@ -700,7 +701,7 @@ void psb_intel_lvds_init(struct drm_device *dev,
lvds_priv->i2c_bus = psb_intel_i2c_create(dev, GPIOB, "LVDSBLC_B");
if (!lvds_priv->i2c_bus) {
dev_printk(KERN_ERR,
- &dev->pdev->dev, "I2C bus registration failed.\n");
+ dev->dev, "I2C bus registration failed.\n");
goto failed_blc_i2c;
}
lvds_priv->i2c_bus->slave_addr = 0x2C;
@@ -719,7 +720,7 @@ void psb_intel_lvds_init(struct drm_device *dev,
/* Set up the DDC bus. */
lvds_priv->ddc_bus = psb_intel_i2c_create(dev, GPIOC, "LVDSDDC_C");
if (!lvds_priv->ddc_bus) {
- dev_printk(KERN_ERR, &dev->pdev->dev,
+ dev_printk(KERN_ERR, dev->dev,
"DDC bus registration " "failed.\n");
goto failed_ddc;
}
diff --git a/drivers/gpu/drm/gma500/psb_intel_modes.c b/drivers/gpu/drm/gma500/psb_intel_modes.c
index 88653a40aeb5..60306780e16c 100644
--- a/drivers/gpu/drm/gma500/psb_intel_modes.c
+++ b/drivers/gpu/drm/gma500/psb_intel_modes.c
@@ -11,7 +11,7 @@
/**
* psb_intel_ddc_probe
- *
+ * @adapter: Associated I2C adaptor
*/
bool psb_intel_ddc_probe(struct i2c_adapter *adapter)
{
@@ -43,6 +43,7 @@ bool psb_intel_ddc_probe(struct i2c_adapter *adapter)
/**
* psb_intel_ddc_get_modes - get modelist from monitor
* @connector: DRM connector device to use
+ * @adapter: Associated I2C adaptor
*
* Fetch the EDID information from @connector using the DDC bus.
*/
diff --git a/drivers/gpu/drm/gma500/psb_intel_reg.h b/drivers/gpu/drm/gma500/psb_intel_reg.h
index 835cc924c45a..364ea8f06f9c 100644
--- a/drivers/gpu/drm/gma500/psb_intel_reg.h
+++ b/drivers/gpu/drm/gma500/psb_intel_reg.h
@@ -595,7 +595,7 @@ struct dpst_guardband {
#define PIPE_PIXEL_MASK 0x00ffffff
#define PIPE_PIXEL_SHIFT 0
-#define FW_BLC_SELF 0x20e0
+#define FW_BLC_SELF 0x20e0
#define FW_BLC_SELF_EN (1<<15)
#define DSPARB 0x70030
@@ -789,17 +789,9 @@ struct dpst_guardband {
* MOORESTOWN delta registers
*/
#define MRST_DPLL_A 0x0f014
-#define MDFLD_DPLL_B 0x0f018
-#define MDFLD_INPUT_REF_SEL (1 << 14)
-#define MDFLD_VCO_SEL (1 << 16)
#define DPLLA_MODE_LVDS (2 << 26) /* mrst */
-#define MDFLD_PLL_LATCHEN (1 << 28)
-#define MDFLD_PWR_GATE_EN (1 << 30)
-#define MDFLD_P1_MASK (0x1FF << 17)
#define MRST_FPA0 0x0f040
#define MRST_FPA1 0x0f044
-#define MDFLD_DPLL_DIV0 0x0f048
-#define MDFLD_DPLL_DIV1 0x0f04c
#define MRST_PERF_MODE 0x020f4
/*
@@ -848,7 +840,6 @@ struct dpst_guardband {
#define MRST_DSPABASE 0x7019c
#define MRST_DSPBBASE 0x7119c
-#define MDFLD_DSPCBASE 0x7219c
/*
* Moorestown registers.
@@ -930,7 +921,6 @@ struct dpst_guardband {
#define DEVICE_RESET_REG 0xb01C
#define DPI_RESOLUTION_REG 0xb020
#define RES_V_POS 0x10
-#define DBI_RESOLUTION_REG 0xb024 /* Reserved for MDFLD */
#define HORIZ_SYNC_PAD_COUNT_REG 0xb028
#define HORIZ_BACK_PORCH_COUNT_REG 0xb02C
#define HORIZ_FRONT_PORCH_COUNT_REG 0xb030
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index 907f966d6f22..355da2856389 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -221,7 +221,7 @@ static bool
psb_intel_sdvo_create_enhance_property(struct psb_intel_sdvo *psb_intel_sdvo,
struct psb_intel_sdvo_connector *psb_intel_sdvo_connector);
-/**
+/*
* Writes the SDVOB or SDVOC with the given value, but always writes both
* SDVOB and SDVOC to work around apparent hardware issues (according to
* comments in the BIOS).
@@ -588,7 +588,7 @@ static bool psb_intel_sdvo_set_target_input(struct psb_intel_sdvo *psb_intel_sdv
&targets, sizeof(targets));
}
-/**
+/*
* Return whether each input is trained.
*
* This function is making an assumption about the layout of the response,
@@ -1818,7 +1818,7 @@ psb_intel_sdvo_guess_ddc_bus(struct psb_intel_sdvo *sdvo)
#endif
}
-/**
+/*
* Choose the appropriate DDC bus for control bus switch command for this
* SDVO output based on the controlled output.
*
@@ -2406,7 +2406,7 @@ psb_intel_sdvo_init_ddc_proxy(struct psb_intel_sdvo *sdvo,
sdvo->ddc.owner = THIS_MODULE;
sdvo->ddc.class = I2C_CLASS_DDC;
snprintf(sdvo->ddc.name, I2C_NAME_SIZE, "SDVO DDC proxy");
- sdvo->ddc.dev.parent = &dev->pdev->dev;
+ sdvo->ddc.dev.parent = dev->dev;
sdvo->ddc.algo_data = sdvo;
sdvo->ddc.algo = &psb_intel_sdvo_ddc_proxy;
diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
index 361e3a0c5ab6..ae9b100e640b 100644
--- a/drivers/gpu/drm/gma500/psb_irq.c
+++ b/drivers/gpu/drm/gma500/psb_irq.c
@@ -10,7 +10,6 @@
#include <drm/drm_vblank.h>
-#include "mdfld_output.h"
#include "power.h"
#include "psb_drv.h"
#include "psb_intel_reg.h"
@@ -126,9 +125,8 @@ static void mid_disable_pipe_event(struct drm_psb_private *dev_priv, int pipe)
}
}
-/**
+/*
* Display controller interrupt handler for pipe event.
- *
*/
static void mid_pipe_event_handler(struct drm_device *dev, int pipe)
{
@@ -165,8 +163,7 @@ static void mid_pipe_event_handler(struct drm_device *dev, int pipe)
"%s, can't clear status bits for pipe %d, its value = 0x%x.\n",
__func__, pipe, PSB_RVDC32(pipe_stat_reg));
- if (pipe_stat_val & PIPE_VBLANK_STATUS ||
- (IS_MFLD(dev) && pipe_stat_val & PIPE_TE_STATUS)) {
+ if (pipe_stat_val & PIPE_VBLANK_STATUS) {
struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe);
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
unsigned long flags;
@@ -264,11 +261,6 @@ irqreturn_t psb_irq_handler(int irq, void *arg)
if (vdc_stat & (_PSB_PIPE_EVENT_FLAG|_PSB_IRQ_ASLE))
dsp_int = 1;
- /* FIXME: Handle Medfield
- if (vdc_stat & _MDFLD_DISP_ALL_IRQ_FLAG)
- dsp_int = 1;
- */
-
if (vdc_stat & _PSB_IRQ_SGX_FLAG)
sgx_int = 1;
if (vdc_stat & _PSB_IRQ_DISP_HOTSYNC)
@@ -326,13 +318,6 @@ void psb_irq_preinstall(struct drm_device *dev)
if (dev->vblank[1].enabled)
dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG;
- /* FIXME: Handle Medfield irq mask
- if (dev->vblank[1].enabled)
- dev_priv->vdc_irq_mask |= _MDFLD_PIPEB_EVENT_FLAG;
- if (dev->vblank[2].enabled)
- dev_priv->vdc_irq_mask |= _MDFLD_PIPEC_EVENT_FLAG;
- */
-
/* Revisit this area - want per device masks ? */
if (dev_priv->ops->hotplug)
dev_priv->vdc_irq_mask |= _PSB_IRQ_DISP_HOTSYNC;
@@ -505,11 +490,6 @@ int psb_enable_vblank(struct drm_crtc *crtc)
uint32_t reg_val = 0;
uint32_t pipeconf_reg = mid_pipeconf(pipe);
- /* Medfield is different - we should perhaps extract out vblank
- and blacklight etc ops */
- if (IS_MFLD(dev))
- return mdfld_enable_te(dev, pipe);
-
if (gma_power_begin(dev, false)) {
reg_val = REG_READ(pipeconf_reg);
gma_power_end(dev);
@@ -544,8 +524,6 @@ void psb_disable_vblank(struct drm_crtc *crtc)
struct drm_psb_private *dev_priv = dev->dev_private;
unsigned long irqflags;
- if (IS_MFLD(dev))
- mdfld_disable_te(dev, pipe);
spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
if (pipe == 0)
@@ -560,55 +538,6 @@ void psb_disable_vblank(struct drm_crtc *crtc)
spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
}
-/*
- * It is used to enable TE interrupt
- */
-int mdfld_enable_te(struct drm_device *dev, int pipe)
-{
- struct drm_psb_private *dev_priv =
- (struct drm_psb_private *) dev->dev_private;
- unsigned long irqflags;
- uint32_t reg_val = 0;
- uint32_t pipeconf_reg = mid_pipeconf(pipe);
-
- if (gma_power_begin(dev, false)) {
- reg_val = REG_READ(pipeconf_reg);
- gma_power_end(dev);
- }
-
- if (!(reg_val & PIPEACONF_ENABLE))
- return -EINVAL;
-
- spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
-
- mid_enable_pipe_event(dev_priv, pipe);
- psb_enable_pipestat(dev_priv, pipe, PIPE_TE_ENABLE);
-
- spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
-
- return 0;
-}
-
-/*
- * It is used to disable TE interrupt
- */
-void mdfld_disable_te(struct drm_device *dev, int pipe)
-{
- struct drm_psb_private *dev_priv =
- (struct drm_psb_private *) dev->dev_private;
- unsigned long irqflags;
-
- if (!dev_priv->dsr_enable)
- return;
-
- spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
-
- mid_disable_pipe_event(dev_priv, pipe);
- psb_disable_pipestat(dev_priv, pipe, PIPE_TE_ENABLE);
-
- spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
-}
-
/* Called from drm generic code, passed a 'crtc', which
* we use as a pipe index
*/
diff --git a/drivers/gpu/drm/gma500/psb_irq.h b/drivers/gpu/drm/gma500/psb_irq.h
index 4f73998848d1..1b577fa7010a 100644
--- a/drivers/gpu/drm/gma500/psb_irq.h
+++ b/drivers/gpu/drm/gma500/psb_irq.h
@@ -31,6 +31,4 @@ int psb_enable_vblank(struct drm_crtc *crtc);
void psb_disable_vblank(struct drm_crtc *crtc);
u32 psb_get_vblank_counter(struct drm_crtc *crtc);
-int mdfld_enable_te(struct drm_device *dev, int pipe);
-void mdfld_disable_te(struct drm_device *dev, int pipe);
#endif /* _PSB_IRQ_H_ */
diff --git a/drivers/gpu/drm/gma500/psb_reg.h b/drivers/gpu/drm/gma500/psb_reg.h
index fb22bac5bb74..2a229a0ef36c 100644
--- a/drivers/gpu/drm/gma500/psb_reg.h
+++ b/drivers/gpu/drm/gma500/psb_reg.h
@@ -550,21 +550,7 @@
#define PSB_PM_SSC 0x20
#define PSB_PM_SSS 0x30
#define PSB_PWRGT_DISPLAY_MASK 0xc /*on a different BA than video/gfx*/
-#define MDFLD_PWRGT_DISPLAY_A_CNTR 0x0000000c
-#define MDFLD_PWRGT_DISPLAY_B_CNTR 0x0000c000
-#define MDFLD_PWRGT_DISPLAY_C_CNTR 0x00030000
-#define MDFLD_PWRGT_DISP_MIPI_CNTR 0x000c0000
-#define MDFLD_PWRGT_DISPLAY_CNTR (MDFLD_PWRGT_DISPLAY_A_CNTR | MDFLD_PWRGT_DISPLAY_B_CNTR | MDFLD_PWRGT_DISPLAY_C_CNTR | MDFLD_PWRGT_DISP_MIPI_CNTR) /* 0x000fc00c */
/* Display SSS register bits are different in A0 vs. B0 */
#define PSB_PWRGT_GFX_MASK 0x3
-#define MDFLD_PWRGT_DISPLAY_A_STS 0x000000c0
-#define MDFLD_PWRGT_DISPLAY_B_STS 0x00000300
-#define MDFLD_PWRGT_DISPLAY_C_STS 0x00000c00
#define PSB_PWRGT_GFX_MASK_B0 0xc3
-#define MDFLD_PWRGT_DISPLAY_A_STS_B0 0x0000000c
-#define MDFLD_PWRGT_DISPLAY_B_STS_B0 0x0000c000
-#define MDFLD_PWRGT_DISPLAY_C_STS_B0 0x00030000
-#define MDFLD_PWRGT_DISP_MIPI_STS 0x000c0000
-#define MDFLD_PWRGT_DISPLAY_STS_A0 (MDFLD_PWRGT_DISPLAY_A_STS | MDFLD_PWRGT_DISPLAY_B_STS | MDFLD_PWRGT_DISPLAY_C_STS | MDFLD_PWRGT_DISP_MIPI_STS) /* 0x000fc00c */
-#define MDFLD_PWRGT_DISPLAY_STS_B0 (MDFLD_PWRGT_DISPLAY_A_STS_B0 | MDFLD_PWRGT_DISPLAY_B_STS_B0 | MDFLD_PWRGT_DISPLAY_C_STS_B0 | MDFLD_PWRGT_DISP_MIPI_STS) /* 0x000fc00c */
#endif
diff --git a/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c b/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c
deleted file mode 100644
index e5bdd99ad453..000000000000
--- a/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c
+++ /dev/null
@@ -1,805 +0,0 @@
-/*
- * Copyright © 2011 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <linux/delay.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/gpio/consumer.h>
-
-#include <asm/intel_scu_ipc.h>
-
-#include "mdfld_dsi_dpi.h"
-#include "mdfld_dsi_pkg_sender.h"
-#include "mdfld_output.h"
-#include "tc35876x-dsi-lvds.h"
-
-static struct i2c_client *tc35876x_client;
-static struct i2c_client *cmi_lcd_i2c_client;
-/* Panel GPIOs */
-static struct gpio_desc *bridge_reset;
-static struct gpio_desc *bridge_bl_enable;
-static struct gpio_desc *backlight_voltage;
-
-
-#define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end))
-#define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end))
-
-/* DSI D-PHY Layer Registers */
-#define D0W_DPHYCONTTX 0x0004
-#define CLW_DPHYCONTRX 0x0020
-#define D0W_DPHYCONTRX 0x0024
-#define D1W_DPHYCONTRX 0x0028
-#define D2W_DPHYCONTRX 0x002C
-#define D3W_DPHYCONTRX 0x0030
-#define COM_DPHYCONTRX 0x0038
-#define CLW_CNTRL 0x0040
-#define D0W_CNTRL 0x0044
-#define D1W_CNTRL 0x0048
-#define D2W_CNTRL 0x004C
-#define D3W_CNTRL 0x0050
-#define DFTMODE_CNTRL 0x0054
-
-/* DSI PPI Layer Registers */
-#define PPI_STARTPPI 0x0104
-#define PPI_BUSYPPI 0x0108
-#define PPI_LINEINITCNT 0x0110
-#define PPI_LPTXTIMECNT 0x0114
-#define PPI_LANEENABLE 0x0134
-#define PPI_TX_RX_TA 0x013C
-#define PPI_CLS_ATMR 0x0140
-#define PPI_D0S_ATMR 0x0144
-#define PPI_D1S_ATMR 0x0148
-#define PPI_D2S_ATMR 0x014C
-#define PPI_D3S_ATMR 0x0150
-#define PPI_D0S_CLRSIPOCOUNT 0x0164
-#define PPI_D1S_CLRSIPOCOUNT 0x0168
-#define PPI_D2S_CLRSIPOCOUNT 0x016C
-#define PPI_D3S_CLRSIPOCOUNT 0x0170
-#define CLS_PRE 0x0180
-#define D0S_PRE 0x0184
-#define D1S_PRE 0x0188
-#define D2S_PRE 0x018C
-#define D3S_PRE 0x0190
-#define CLS_PREP 0x01A0
-#define D0S_PREP 0x01A4
-#define D1S_PREP 0x01A8
-#define D2S_PREP 0x01AC
-#define D3S_PREP 0x01B0
-#define CLS_ZERO 0x01C0
-#define D0S_ZERO 0x01C4
-#define D1S_ZERO 0x01C8
-#define D2S_ZERO 0x01CC
-#define D3S_ZERO 0x01D0
-#define PPI_CLRFLG 0x01E0
-#define PPI_CLRSIPO 0x01E4
-#define HSTIMEOUT 0x01F0
-#define HSTIMEOUTENABLE 0x01F4
-
-/* DSI Protocol Layer Registers */
-#define DSI_STARTDSI 0x0204
-#define DSI_BUSYDSI 0x0208
-#define DSI_LANEENABLE 0x0210
-#define DSI_LANESTATUS0 0x0214
-#define DSI_LANESTATUS1 0x0218
-#define DSI_INTSTATUS 0x0220
-#define DSI_INTMASK 0x0224
-#define DSI_INTCLR 0x0228
-#define DSI_LPTXTO 0x0230
-
-/* DSI General Registers */
-#define DSIERRCNT 0x0300
-
-/* DSI Application Layer Registers */
-#define APLCTRL 0x0400
-#define RDPKTLN 0x0404
-
-/* Video Path Registers */
-#define VPCTRL 0x0450
-#define HTIM1 0x0454
-#define HTIM2 0x0458
-#define VTIM1 0x045C
-#define VTIM2 0x0460
-#define VFUEN 0x0464
-
-/* LVDS Registers */
-#define LVMX0003 0x0480
-#define LVMX0407 0x0484
-#define LVMX0811 0x0488
-#define LVMX1215 0x048C
-#define LVMX1619 0x0490
-#define LVMX2023 0x0494
-#define LVMX2427 0x0498
-#define LVCFG 0x049C
-#define LVPHY0 0x04A0
-#define LVPHY1 0x04A4
-
-/* System Registers */
-#define SYSSTAT 0x0500
-#define SYSRST 0x0504
-
-/* GPIO Registers */
-/*#define GPIOC 0x0520*/
-#define GPIOO 0x0524
-#define GPIOI 0x0528
-
-/* I2C Registers */
-#define I2CTIMCTRL 0x0540
-#define I2CMADDR 0x0544
-#define WDATAQ 0x0548
-#define RDATAQ 0x054C
-
-/* Chip/Rev Registers */
-#define IDREG 0x0580
-
-/* Debug Registers */
-#define DEBUG00 0x05A0
-#define DEBUG01 0x05A4
-
-/* Panel CABC registers */
-#define PANEL_PWM_CONTROL 0x90
-#define PANEL_FREQ_DIVIDER_HI 0x91
-#define PANEL_FREQ_DIVIDER_LO 0x92
-#define PANEL_DUTY_CONTROL 0x93
-#define PANEL_MODIFY_RGB 0x94
-#define PANEL_FRAMERATE_CONTROL 0x96
-#define PANEL_PWM_MIN 0x97
-#define PANEL_PWM_REF 0x98
-#define PANEL_PWM_MAX 0x99
-#define PANEL_ALLOW_DISTORT 0x9A
-#define PANEL_BYPASS_PWMI 0x9B
-
-/* Panel color management registers */
-#define PANEL_CM_ENABLE 0x700
-#define PANEL_CM_HUE 0x701
-#define PANEL_CM_SATURATION 0x702
-#define PANEL_CM_INTENSITY 0x703
-#define PANEL_CM_BRIGHTNESS 0x704
-#define PANEL_CM_CE_ENABLE 0x705
-#define PANEL_CM_PEAK_EN 0x710
-#define PANEL_CM_GAIN 0x711
-#define PANEL_CM_HUETABLE_START 0x730
-#define PANEL_CM_HUETABLE_END 0x747 /* inclusive */
-
-/* Input muxing for registers LVMX0003...LVMX2427 */
-enum {
- INPUT_R0, /* 0 */
- INPUT_R1,
- INPUT_R2,
- INPUT_R3,
- INPUT_R4,
- INPUT_R5,
- INPUT_R6,
- INPUT_R7,
- INPUT_G0, /* 8 */
- INPUT_G1,
- INPUT_G2,
- INPUT_G3,
- INPUT_G4,
- INPUT_G5,
- INPUT_G6,
- INPUT_G7,
- INPUT_B0, /* 16 */
- INPUT_B1,
- INPUT_B2,
- INPUT_B3,
- INPUT_B4,
- INPUT_B5,
- INPUT_B6,
- INPUT_B7,
- INPUT_HSYNC, /* 24 */
- INPUT_VSYNC,
- INPUT_DE,
- LOGIC_0,
- /* 28...31 undefined */
-};
-
-#define INPUT_MUX(lvmx03, lvmx02, lvmx01, lvmx00) \
- (FLD_VAL(lvmx03, 29, 24) | FLD_VAL(lvmx02, 20, 16) | \
- FLD_VAL(lvmx01, 12, 8) | FLD_VAL(lvmx00, 4, 0))
-
-/**
- * tc35876x_regw - Write DSI-LVDS bridge register using I2C
- * @client: struct i2c_client to use
- * @reg: register address
- * @value: value to write
- *
- * Returns 0 on success, or a negative error value.
- */
-static int tc35876x_regw(struct i2c_client *client, u16 reg, u32 value)
-{
- int r;
- u8 tx_data[] = {
- /* NOTE: Register address big-endian, data little-endian. */
- (reg >> 8) & 0xff,
- reg & 0xff,
- value & 0xff,
- (value >> 8) & 0xff,
- (value >> 16) & 0xff,
- (value >> 24) & 0xff,
- };
- struct i2c_msg msgs[] = {
- {
- .addr = client->addr,
- .flags = 0,
- .buf = tx_data,
- .len = ARRAY_SIZE(tx_data),
- },
- };
-
- r = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
- if (r < 0) {
- dev_err(&client->dev, "%s: reg 0x%04x val 0x%08x error %d\n",
- __func__, reg, value, r);
- return r;
- }
-
- if (r < ARRAY_SIZE(msgs)) {
- dev_err(&client->dev, "%s: reg 0x%04x val 0x%08x msgs %d\n",
- __func__, reg, value, r);
- return -EAGAIN;
- }
-
- dev_dbg(&client->dev, "%s: reg 0x%04x val 0x%08x\n",
- __func__, reg, value);
-
- return 0;
-}
-
-/**
- * tc35876x_regr - Read DSI-LVDS bridge register using I2C
- * @client: struct i2c_client to use
- * @reg: register address
- * @value: pointer for storing the value
- *
- * Returns 0 on success, or a negative error value.
- */
-static int tc35876x_regr(struct i2c_client *client, u16 reg, u32 *value)
-{
- int r;
- u8 tx_data[] = {
- (reg >> 8) & 0xff,
- reg & 0xff,
- };
- u8 rx_data[4];
- struct i2c_msg msgs[] = {
- {
- .addr = client->addr,
- .flags = 0,
- .buf = tx_data,
- .len = ARRAY_SIZE(tx_data),
- },
- {
- .addr = client->addr,
- .flags = I2C_M_RD,
- .buf = rx_data,
- .len = ARRAY_SIZE(rx_data),
- },
- };
-
- r = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
- if (r < 0) {
- dev_err(&client->dev, "%s: reg 0x%04x error %d\n", __func__,
- reg, r);
- return r;
- }
-
- if (r < ARRAY_SIZE(msgs)) {
- dev_err(&client->dev, "%s: reg 0x%04x msgs %d\n", __func__,
- reg, r);
- return -EAGAIN;
- }
-
- *value = rx_data[0] << 24 | rx_data[1] << 16 |
- rx_data[2] << 8 | rx_data[3];
-
- dev_dbg(&client->dev, "%s: reg 0x%04x value 0x%08x\n", __func__,
- reg, *value);
-
- return 0;
-}
-
-void tc35876x_set_bridge_reset_state(struct drm_device *dev, int state)
-{
- if (WARN(!tc35876x_client, "%s called before probe", __func__))
- return;
-
- dev_dbg(&tc35876x_client->dev, "%s: state %d\n", __func__, state);
-
- if (!bridge_reset)
- return;
-
- if (state) {
- gpiod_set_value_cansleep(bridge_reset, 0);
- mdelay(10);
- } else {
- /* Pull MIPI Bridge reset pin to Low */
- gpiod_set_value_cansleep(bridge_reset, 0);
- mdelay(20);
- /* Pull MIPI Bridge reset pin to High */
- gpiod_set_value_cansleep(bridge_reset, 1);
- mdelay(40);
- }
-}
-
-void tc35876x_configure_lvds_bridge(struct drm_device *dev)
-{
- struct i2c_client *i2c = tc35876x_client;
- u32 ppi_lptxtimecnt;
- u32 txtagocnt;
- u32 txtasurecnt;
- u32 id;
-
- if (WARN(!tc35876x_client, "%s called before probe", __func__))
- return;
-
- dev_dbg(&tc35876x_client->dev, "%s\n", __func__);
-
- if (!tc35876x_regr(i2c, IDREG, &id))
- dev_info(&tc35876x_client->dev, "tc35876x ID 0x%08x\n", id);
- else
- dev_err(&tc35876x_client->dev, "Cannot read ID\n");
-
- ppi_lptxtimecnt = 4;
- txtagocnt = (5 * ppi_lptxtimecnt - 3) / 4;
- txtasurecnt = 3 * ppi_lptxtimecnt / 2;
- tc35876x_regw(i2c, PPI_TX_RX_TA, FLD_VAL(txtagocnt, 26, 16) |
- FLD_VAL(txtasurecnt, 10, 0));
- tc35876x_regw(i2c, PPI_LPTXTIMECNT, FLD_VAL(ppi_lptxtimecnt, 10, 0));
-
- tc35876x_regw(i2c, PPI_D0S_CLRSIPOCOUNT, FLD_VAL(1, 5, 0));
- tc35876x_regw(i2c, PPI_D1S_CLRSIPOCOUNT, FLD_VAL(1, 5, 0));
- tc35876x_regw(i2c, PPI_D2S_CLRSIPOCOUNT, FLD_VAL(1, 5, 0));
- tc35876x_regw(i2c, PPI_D3S_CLRSIPOCOUNT, FLD_VAL(1, 5, 0));
-
- /* Enabling MIPI & PPI lanes, Enable 4 lanes */
- tc35876x_regw(i2c, PPI_LANEENABLE,
- BIT(4) | BIT(3) | BIT(2) | BIT(1) | BIT(0));
- tc35876x_regw(i2c, DSI_LANEENABLE,
- BIT(4) | BIT(3) | BIT(2) | BIT(1) | BIT(0));
- tc35876x_regw(i2c, PPI_STARTPPI, BIT(0));
- tc35876x_regw(i2c, DSI_STARTDSI, BIT(0));
-
- /* Setting LVDS output frequency */
- tc35876x_regw(i2c, LVPHY0, FLD_VAL(1, 20, 16) |
- FLD_VAL(2, 15, 14) | FLD_VAL(6, 4, 0)); /* 0x00048006 */
-
- /* Setting video panel control register,0x00000120 VTGen=ON ?!?!? */
- tc35876x_regw(i2c, VPCTRL, BIT(8) | BIT(5));
-
- /* Horizontal back porch and horizontal pulse width. 0x00280028 */
- tc35876x_regw(i2c, HTIM1, FLD_VAL(40, 24, 16) | FLD_VAL(40, 8, 0));
-
- /* Horizontal front porch and horizontal active video size. 0x00500500*/
- tc35876x_regw(i2c, HTIM2, FLD_VAL(80, 24, 16) | FLD_VAL(1280, 10, 0));
-
- /* Vertical back porch and vertical sync pulse width. 0x000e000a */
- tc35876x_regw(i2c, VTIM1, FLD_VAL(14, 23, 16) | FLD_VAL(10, 7, 0));
-
- /* Vertical front porch and vertical display size. 0x000e0320 */
- tc35876x_regw(i2c, VTIM2, FLD_VAL(14, 23, 16) | FLD_VAL(800, 10, 0));
-
- /* Set above HTIM1, HTIM2, VTIM1, and VTIM2 at next VSYNC. */
- tc35876x_regw(i2c, VFUEN, BIT(0));
-
- /* Soft reset LCD controller. */
- tc35876x_regw(i2c, SYSRST, BIT(2));
-
- /* LVDS-TX input muxing */
- tc35876x_regw(i2c, LVMX0003,
- INPUT_MUX(INPUT_R5, INPUT_R4, INPUT_R3, INPUT_R2));
- tc35876x_regw(i2c, LVMX0407,
- INPUT_MUX(INPUT_G2, INPUT_R7, INPUT_R1, INPUT_R6));
- tc35876x_regw(i2c, LVMX0811,
- INPUT_MUX(INPUT_G1, INPUT_G0, INPUT_G4, INPUT_G3));
- tc35876x_regw(i2c, LVMX1215,
- INPUT_MUX(INPUT_B2, INPUT_G7, INPUT_G6, INPUT_G5));
- tc35876x_regw(i2c, LVMX1619,
- INPUT_MUX(INPUT_B4, INPUT_B3, INPUT_B1, INPUT_B0));
- tc35876x_regw(i2c, LVMX2023,
- INPUT_MUX(LOGIC_0, INPUT_B7, INPUT_B6, INPUT_B5));
- tc35876x_regw(i2c, LVMX2427,
- INPUT_MUX(INPUT_R0, INPUT_DE, INPUT_VSYNC, INPUT_HSYNC));
-
- /* Enable LVDS transmitter. */
- tc35876x_regw(i2c, LVCFG, BIT(0));
-
- /* Clear notifications. Don't write reserved bits. Was write 0xffffffff
- * to 0x0288, must be in error?! */
- tc35876x_regw(i2c, DSI_INTCLR, FLD_MASK(31, 30) | FLD_MASK(22, 0));
-}
-
-#define GPIOPWMCTRL 0x38F
-#define PWM0CLKDIV0 0x62 /* low byte */
-#define PWM0CLKDIV1 0x61 /* high byte */
-
-#define SYSTEMCLK 19200000UL /* 19.2 MHz */
-#define PWM_FREQUENCY 9600 /* Hz */
-
-/* f = baseclk / (clkdiv + 1) => clkdiv = (baseclk - f) / f */
-static inline u16 calc_clkdiv(unsigned long baseclk, unsigned int f)
-{
- return (baseclk - f) / f;
-}
-
-static void tc35876x_brightness_init(struct drm_device *dev)
-{
- int ret;
- u8 pwmctrl;
- u16 clkdiv;
-
- /* Make sure the PWM reference is the 19.2 MHz system clock. Read first
- * instead of setting directly to catch potential conflicts between PWM
- * users. */
- ret = intel_scu_ipc_ioread8(GPIOPWMCTRL, &pwmctrl);
- if (ret || pwmctrl != 0x01) {
- if (ret)
- dev_err(&dev->pdev->dev, "GPIOPWMCTRL read failed\n");
- else
- dev_warn(&dev->pdev->dev, "GPIOPWMCTRL was not set to system clock (pwmctrl = 0x%02x)\n", pwmctrl);
-
- ret = intel_scu_ipc_iowrite8(GPIOPWMCTRL, 0x01);
- if (ret)
- dev_err(&dev->pdev->dev, "GPIOPWMCTRL set failed\n");
- }
-
- clkdiv = calc_clkdiv(SYSTEMCLK, PWM_FREQUENCY);
-
- ret = intel_scu_ipc_iowrite8(PWM0CLKDIV1, (clkdiv >> 8) & 0xff);
- if (!ret)
- ret = intel_scu_ipc_iowrite8(PWM0CLKDIV0, clkdiv & 0xff);
-
- if (ret)
- dev_err(&dev->pdev->dev, "PWM0CLKDIV set failed\n");
- else
- dev_dbg(&dev->pdev->dev, "PWM0CLKDIV set to 0x%04x (%d Hz)\n",
- clkdiv, PWM_FREQUENCY);
-}
-
-#define PWM0DUTYCYCLE 0x67
-
-void tc35876x_brightness_control(struct drm_device *dev, int level)
-{
- int ret;
- u8 duty_val;
- u8 panel_duty_val;
-
- level = clamp(level, 0, MDFLD_DSI_BRIGHTNESS_MAX_LEVEL);
-
- /* PWM duty cycle 0x00...0x63 corresponds to 0...99% */
- duty_val = level * 0x63 / MDFLD_DSI_BRIGHTNESS_MAX_LEVEL;
-
- /* I won't pretend to understand this formula. The panel spec is quite
- * bad engrish.
- */
- panel_duty_val = (2 * level - 100) * 0xA9 /
- MDFLD_DSI_BRIGHTNESS_MAX_LEVEL + 0x56;
-
- ret = intel_scu_ipc_iowrite8(PWM0DUTYCYCLE, duty_val);
- if (ret)
- dev_err(&tc35876x_client->dev, "%s: ipc write fail\n",
- __func__);
-
- if (cmi_lcd_i2c_client) {
- ret = i2c_smbus_write_byte_data(cmi_lcd_i2c_client,
- PANEL_PWM_MAX, panel_duty_val);
- if (ret < 0)
- dev_err(&cmi_lcd_i2c_client->dev, "%s: i2c write failed\n",
- __func__);
- }
-}
-
-void tc35876x_toshiba_bridge_panel_off(struct drm_device *dev)
-{
- if (WARN(!tc35876x_client, "%s called before probe", __func__))
- return;
-
- dev_dbg(&tc35876x_client->dev, "%s\n", __func__);
-
- if (bridge_bl_enable)
- gpiod_set_value_cansleep(bridge_bl_enable, 0);
-
- if (backlight_voltage)
- gpiod_set_value_cansleep(backlight_voltage, 0);
-}
-
-void tc35876x_toshiba_bridge_panel_on(struct drm_device *dev)
-{
- struct drm_psb_private *dev_priv = dev->dev_private;
-
- if (WARN(!tc35876x_client, "%s called before probe", __func__))
- return;
-
- dev_dbg(&tc35876x_client->dev, "%s\n", __func__);
-
- if (backlight_voltage) {
- gpiod_set_value_cansleep(backlight_voltage, 1);
- msleep(260);
- }
-
- if (cmi_lcd_i2c_client) {
- int ret;
- dev_dbg(&cmi_lcd_i2c_client->dev, "setting TCON\n");
- /* Bit 4 is average_saving. Setting it to 1, the brightness is
- * referenced to the average of the frame content. 0 means
- * reference to the maximum of frame contents. Bits 3:0 are
- * allow_distort. When set to a nonzero value, all color values
- * between 255-allow_distort*2 and 255 are mapped to the
- * 255-allow_distort*2 value.
- */
- ret = i2c_smbus_write_byte_data(cmi_lcd_i2c_client,
- PANEL_ALLOW_DISTORT, 0x10);
- if (ret < 0)
- dev_err(&cmi_lcd_i2c_client->dev,
- "i2c write failed (%d)\n", ret);
- ret = i2c_smbus_write_byte_data(cmi_lcd_i2c_client,
- PANEL_BYPASS_PWMI, 0);
- if (ret < 0)
- dev_err(&cmi_lcd_i2c_client->dev,
- "i2c write failed (%d)\n", ret);
- /* Set minimum brightness value - this is tunable */
- ret = i2c_smbus_write_byte_data(cmi_lcd_i2c_client,
- PANEL_PWM_MIN, 0x35);
- if (ret < 0)
- dev_err(&cmi_lcd_i2c_client->dev,
- "i2c write failed (%d)\n", ret);
- }
-
- if (bridge_bl_enable)
- gpiod_set_value_cansleep(bridge_bl_enable, 1);
-
- tc35876x_brightness_control(dev, dev_priv->brightness_adjusted);
-}
-
-static struct drm_display_mode *tc35876x_get_config_mode(struct drm_device *dev)
-{
- struct drm_display_mode *mode;
-
- dev_dbg(&dev->pdev->dev, "%s\n", __func__);
-
- mode = kzalloc(sizeof(*mode), GFP_KERNEL);
- if (!mode)
- return NULL;
-
- /* FIXME: do this properly. */
- mode->hdisplay = 1280;
- mode->vdisplay = 800;
- mode->hsync_start = 1360;
- mode->hsync_end = 1400;
- mode->htotal = 1440;
- mode->vsync_start = 814;
- mode->vsync_end = 824;
- mode->vtotal = 838;
- mode->clock = 33324 << 1;
-
- dev_info(&dev->pdev->dev, "hdisplay(w) = %d\n", mode->hdisplay);
- dev_info(&dev->pdev->dev, "vdisplay(h) = %d\n", mode->vdisplay);
- dev_info(&dev->pdev->dev, "HSS = %d\n", mode->hsync_start);
- dev_info(&dev->pdev->dev, "HSE = %d\n", mode->hsync_end);
- dev_info(&dev->pdev->dev, "htotal = %d\n", mode->htotal);
- dev_info(&dev->pdev->dev, "VSS = %d\n", mode->vsync_start);
- dev_info(&dev->pdev->dev, "VSE = %d\n", mode->vsync_end);
- dev_info(&dev->pdev->dev, "vtotal = %d\n", mode->vtotal);
- dev_info(&dev->pdev->dev, "clock = %d\n", mode->clock);
-
- drm_mode_set_name(mode);
- drm_mode_set_crtcinfo(mode, 0);
-
- mode->type |= DRM_MODE_TYPE_PREFERRED;
-
- return mode;
-}
-
-/* DV1 Active area 216.96 x 135.6 mm */
-#define DV1_PANEL_WIDTH 217
-#define DV1_PANEL_HEIGHT 136
-
-static int tc35876x_get_panel_info(struct drm_device *dev, int pipe,
- struct panel_info *pi)
-{
- if (!dev || !pi)
- return -EINVAL;
-
- pi->width_mm = DV1_PANEL_WIDTH;
- pi->height_mm = DV1_PANEL_HEIGHT;
-
- return 0;
-}
-
-static int tc35876x_bridge_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- dev_info(&client->dev, "%s\n", __func__);
-
- if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
- dev_err(&client->dev, "%s: i2c_check_functionality() failed\n",
- __func__);
- return -ENODEV;
- }
-
- bridge_reset = devm_gpiod_get_optional(&client->dev, "bridge-reset", GPIOD_OUT_LOW);
- if (IS_ERR(bridge_reset))
- return PTR_ERR(bridge_reset);
- if (bridge_reset)
- gpiod_set_consumer_name(bridge_reset, "tc35876x bridge reset");
-
- bridge_bl_enable = devm_gpiod_get_optional(&client->dev, "bl-en", GPIOD_OUT_LOW);
- if (IS_ERR(bridge_bl_enable))
- return PTR_ERR(bridge_bl_enable);
- if (bridge_bl_enable)
- gpiod_set_consumer_name(bridge_bl_enable, "tc35876x panel bl en");
-
- backlight_voltage = devm_gpiod_get_optional(&client->dev, "vadd", GPIOD_OUT_LOW);
- if (IS_ERR(backlight_voltage))
- return PTR_ERR(backlight_voltage);
- if (backlight_voltage)
- gpiod_set_consumer_name(backlight_voltage, "tc35876x panel vadd");
-
- tc35876x_client = client;
-
- return 0;
-}
-
-static int tc35876x_bridge_remove(struct i2c_client *client)
-{
- dev_dbg(&client->dev, "%s\n", __func__);
-
- tc35876x_client = NULL;
-
- return 0;
-}
-
-static const struct i2c_device_id tc35876x_bridge_id[] = {
- { "i2c_disp_brig", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, tc35876x_bridge_id);
-
-static struct i2c_driver tc35876x_bridge_i2c_driver = {
- .driver = {
- .name = "i2c_disp_brig",
- },
- .id_table = tc35876x_bridge_id,
- .probe = tc35876x_bridge_probe,
- .remove = tc35876x_bridge_remove,
-};
-
-/* LCD panel I2C */
-static int cmi_lcd_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- dev_info(&client->dev, "%s\n", __func__);
-
- if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
- dev_err(&client->dev, "%s: i2c_check_functionality() failed\n",
- __func__);
- return -ENODEV;
- }
-
- cmi_lcd_i2c_client = client;
-
- return 0;
-}
-
-static int cmi_lcd_i2c_remove(struct i2c_client *client)
-{
- dev_dbg(&client->dev, "%s\n", __func__);
-
- cmi_lcd_i2c_client = NULL;
-
- return 0;
-}
-
-static const struct i2c_device_id cmi_lcd_i2c_id[] = {
- { "cmi-lcd", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, cmi_lcd_i2c_id);
-
-static struct i2c_driver cmi_lcd_i2c_driver = {
- .driver = {
- .name = "cmi-lcd",
- },
- .id_table = cmi_lcd_i2c_id,
- .probe = cmi_lcd_i2c_probe,
- .remove = cmi_lcd_i2c_remove,
-};
-
-/* HACK to create I2C device while it's not created by platform code */
-#define CMI_LCD_I2C_ADAPTER 2
-#define CMI_LCD_I2C_ADDR 0x60
-
-static int cmi_lcd_hack_create_device(void)
-{
- struct i2c_adapter *adapter;
- struct i2c_client *client;
- struct i2c_board_info info = {
- .type = "cmi-lcd",
- .addr = CMI_LCD_I2C_ADDR,
- };
-
- pr_debug("%s\n", __func__);
-
- adapter = i2c_get_adapter(CMI_LCD_I2C_ADAPTER);
- if (!adapter) {
- pr_err("%s: i2c_get_adapter(%d) failed\n", __func__,
- CMI_LCD_I2C_ADAPTER);
- return -EINVAL;
- }
-
- client = i2c_new_client_device(adapter, &info);
- if (IS_ERR(client)) {
- pr_err("%s: creating I2C device failed\n", __func__);
- i2c_put_adapter(adapter);
- return PTR_ERR(client);
- }
-
- return 0;
-}
-
-static const struct drm_encoder_helper_funcs tc35876x_encoder_helper_funcs = {
- .dpms = mdfld_dsi_dpi_dpms,
- .mode_fixup = mdfld_dsi_dpi_mode_fixup,
- .prepare = mdfld_dsi_dpi_prepare,
- .mode_set = mdfld_dsi_dpi_mode_set,
- .commit = mdfld_dsi_dpi_commit,
-};
-
-const struct panel_funcs mdfld_tc35876x_funcs = {
- .encoder_helper_funcs = &tc35876x_encoder_helper_funcs,
- .get_config_mode = tc35876x_get_config_mode,
- .get_panel_info = tc35876x_get_panel_info,
-};
-
-void tc35876x_init(struct drm_device *dev)
-{
- int r;
-
- dev_dbg(&dev->pdev->dev, "%s\n", __func__);
-
- cmi_lcd_hack_create_device();
-
- r = i2c_add_driver(&cmi_lcd_i2c_driver);
- if (r < 0)
- dev_err(&dev->pdev->dev,
- "%s: i2c_add_driver() for %s failed (%d)\n",
- __func__, cmi_lcd_i2c_driver.driver.name, r);
-
- r = i2c_add_driver(&tc35876x_bridge_i2c_driver);
- if (r < 0)
- dev_err(&dev->pdev->dev,
- "%s: i2c_add_driver() for %s failed (%d)\n",
- __func__, tc35876x_bridge_i2c_driver.driver.name, r);
-
- tc35876x_brightness_init(dev);
-}
-
-void tc35876x_exit(void)
-{
- pr_debug("%s\n", __func__);
-
- i2c_del_driver(&tc35876x_bridge_i2c_driver);
-
- if (cmi_lcd_i2c_client)
- i2c_del_driver(&cmi_lcd_i2c_driver);
-}
diff --git a/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.h b/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.h
deleted file mode 100644
index b14b7f9e7d1e..000000000000
--- a/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright © 2011 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef __MDFLD_DSI_LVDS_BRIDGE_H__
-#define __MDFLD_DSI_LVDS_BRIDGE_H__
-
-void tc35876x_set_bridge_reset_state(struct drm_device *dev, int state);
-void tc35876x_configure_lvds_bridge(struct drm_device *dev);
-void tc35876x_brightness_control(struct drm_device *dev, int level);
-void tc35876x_toshiba_bridge_panel_off(struct drm_device *dev);
-void tc35876x_toshiba_bridge_panel_on(struct drm_device *dev);
-void tc35876x_init(struct drm_device *dev);
-void tc35876x_exit(void);
-
-extern const struct panel_funcs mdfld_tc35876x_funcs;
-
-#endif /*__MDFLD_DSI_LVDS_BRIDGE_H__*/
diff --git a/drivers/gpu/drm/hisilicon/hibmc/Makefile b/drivers/gpu/drm/hisilicon/hibmc/Makefile
index 684ef794eb7c..d25c75e60d3d 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/Makefile
+++ b/drivers/gpu/drm/hisilicon/hibmc/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-hibmc-drm-y := hibmc_drm_drv.o hibmc_drm_de.o hibmc_drm_vdac.o hibmc_ttm.o hibmc_drm_i2c.o
+hibmc-drm-y := hibmc_drm_drv.o hibmc_drm_de.o hibmc_drm_vdac.o hibmc_drm_i2c.o
obj-$(CONFIG_DRM_HISI_HIBMC) += hibmc-drm.o
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
index ea962acfeae0..096eea985b6f 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
@@ -499,7 +499,7 @@ static const struct drm_crtc_helper_funcs hibmc_crtc_helper_funcs = {
int hibmc_de_init(struct hibmc_drm_private *priv)
{
- struct drm_device *dev = priv->dev;
+ struct drm_device *dev = &priv->dev;
struct drm_crtc *crtc = &priv->crtc;
struct drm_plane *plane = &priv->primary_plane;
int ret;
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
index d845657fd99c..abd6250d5a14 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
@@ -16,6 +16,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_irq.h>
#include <drm/drm_managed.h>
@@ -43,6 +44,12 @@ static irqreturn_t hibmc_drm_interrupt(int irq, void *arg)
return IRQ_HANDLED;
}
+static int hibmc_dumb_create(struct drm_file *file, struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ return drm_gem_vram_fill_create_dumb(file, dev, 0, 128, args);
+}
+
static const struct drm_driver hibmc_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &hibmc_fops,
@@ -77,47 +84,48 @@ static const struct dev_pm_ops hibmc_pm_ops = {
hibmc_pm_resume)
};
+static const struct drm_mode_config_funcs hibmc_mode_funcs = {
+ .mode_valid = drm_vram_helper_mode_valid,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+ .fb_create = drm_gem_fb_create,
+};
+
static int hibmc_kms_init(struct hibmc_drm_private *priv)
{
+ struct drm_device *dev = &priv->dev;
int ret;
- drm_mode_config_init(priv->dev);
- priv->mode_config_initialized = true;
+ ret = drmm_mode_config_init(dev);
+ if (ret)
+ return ret;
- priv->dev->mode_config.min_width = 0;
- priv->dev->mode_config.min_height = 0;
- priv->dev->mode_config.max_width = 1920;
- priv->dev->mode_config.max_height = 1200;
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+ dev->mode_config.max_width = 1920;
+ dev->mode_config.max_height = 1200;
- priv->dev->mode_config.fb_base = priv->fb_base;
- priv->dev->mode_config.preferred_depth = 32;
- priv->dev->mode_config.prefer_shadow = 1;
+ dev->mode_config.fb_base = priv->fb_base;
+ dev->mode_config.preferred_depth = 32;
+ dev->mode_config.prefer_shadow = 1;
- priv->dev->mode_config.funcs = (void *)&hibmc_mode_funcs;
+ dev->mode_config.funcs = (void *)&hibmc_mode_funcs;
ret = hibmc_de_init(priv);
if (ret) {
- drm_err(priv->dev, "failed to init de: %d\n", ret);
+ drm_err(dev, "failed to init de: %d\n", ret);
return ret;
}
ret = hibmc_vdac_init(priv);
if (ret) {
- drm_err(priv->dev, "failed to init vdac: %d\n", ret);
+ drm_err(dev, "failed to init vdac: %d\n", ret);
return ret;
}
return 0;
}
-static void hibmc_kms_fini(struct hibmc_drm_private *priv)
-{
- if (priv->mode_config_initialized) {
- drm_mode_config_cleanup(priv->dev);
- priv->mode_config_initialized = false;
- }
-}
-
/*
* It can operate in one of three modes: 0, 1 or Sleep.
*/
@@ -202,8 +210,8 @@ static void hibmc_hw_config(struct hibmc_drm_private *priv)
static int hibmc_hw_map(struct hibmc_drm_private *priv)
{
- struct drm_device *dev = priv->dev;
- struct pci_dev *pdev = dev->pdev;
+ struct drm_device *dev = &priv->dev;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
resource_size_t addr, size, ioaddr, iosize;
ioaddr = pci_resource_start(pdev, 1);
@@ -242,40 +250,31 @@ static int hibmc_hw_init(struct hibmc_drm_private *priv)
static int hibmc_unload(struct drm_device *dev)
{
- struct hibmc_drm_private *priv = to_hibmc_drm_private(dev);
-
drm_atomic_helper_shutdown(dev);
if (dev->irq_enabled)
drm_irq_uninstall(dev);
- pci_disable_msi(dev->pdev);
- hibmc_kms_fini(priv);
- hibmc_mm_fini(priv);
- dev->dev_private = NULL;
+ pci_disable_msi(to_pci_dev(dev->dev));
+
return 0;
}
static int hibmc_load(struct drm_device *dev)
{
- struct hibmc_drm_private *priv;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+ struct hibmc_drm_private *priv = to_hibmc_drm_private(dev);
int ret;
- priv = drmm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- drm_err(dev, "no memory to allocate for hibmc_drm_private\n");
- return -ENOMEM;
- }
- dev->dev_private = priv;
- priv->dev = dev;
-
ret = hibmc_hw_init(priv);
if (ret)
goto err;
- ret = hibmc_mm_init(priv);
- if (ret)
+ ret = drmm_vram_helper_init(dev, pci_resource_start(pdev, 0), priv->fb_size);
+ if (ret) {
+ drm_err(dev, "Error initializing VRAM MM; %d\n", ret);
goto err;
+ }
ret = hibmc_kms_init(priv);
if (ret)
@@ -287,11 +286,11 @@ static int hibmc_load(struct drm_device *dev)
goto err;
}
- ret = pci_enable_msi(dev->pdev);
+ ret = pci_enable_msi(pdev);
if (ret) {
drm_warn(dev, "enabling MSI failed: %d\n", ret);
} else {
- ret = drm_irq_install(dev, dev->pdev->irq);
+ ret = drm_irq_install(dev, pdev->irq);
if (ret)
drm_warn(dev, "install irq failed: %d\n", ret);
}
@@ -310,6 +309,7 @@ err:
static int hibmc_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
+ struct hibmc_drm_private *priv;
struct drm_device *dev;
int ret;
@@ -318,25 +318,26 @@ static int hibmc_pci_probe(struct pci_dev *pdev,
if (ret)
return ret;
- dev = drm_dev_alloc(&hibmc_driver, &pdev->dev);
- if (IS_ERR(dev)) {
+ priv = devm_drm_dev_alloc(&pdev->dev, &hibmc_driver,
+ struct hibmc_drm_private, dev);
+ if (IS_ERR(priv)) {
DRM_ERROR("failed to allocate drm_device\n");
- return PTR_ERR(dev);
+ return PTR_ERR(priv);
}
- dev->pdev = pdev;
+ dev = &priv->dev;
pci_set_drvdata(pdev, dev);
- ret = pci_enable_device(pdev);
+ ret = pcim_enable_device(pdev);
if (ret) {
drm_err(dev, "failed to enable pci device: %d\n", ret);
- goto err_free;
+ goto err_return;
}
ret = hibmc_load(dev);
if (ret) {
drm_err(dev, "failed to load hibmc: %d\n", ret);
- goto err_disable;
+ goto err_return;
}
ret = drm_dev_register(dev, 0);
@@ -352,11 +353,7 @@ static int hibmc_pci_probe(struct pci_dev *pdev,
err_unload:
hibmc_unload(dev);
-err_disable:
- pci_disable_device(pdev);
-err_free:
- drm_dev_put(dev);
-
+err_return:
return ret;
}
@@ -366,7 +363,6 @@ static void hibmc_pci_remove(struct pci_dev *pdev)
drm_dev_unregister(dev);
hibmc_unload(dev);
- drm_dev_put(dev);
}
static const struct pci_device_id hibmc_pci_table[] = {
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
index f310a83d9c48..7d263f4d7078 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
@@ -37,12 +37,11 @@ struct hibmc_drm_private {
resource_size_t fb_size;
/* drm */
- struct drm_device *dev;
+ struct drm_device dev;
struct drm_plane primary_plane;
struct drm_crtc crtc;
struct drm_encoder encoder;
struct hibmc_connector connector;
- bool mode_config_initialized;
};
static inline struct hibmc_connector *to_hibmc_connector(struct drm_connector *connector)
@@ -52,7 +51,7 @@ static inline struct hibmc_connector *to_hibmc_connector(struct drm_connector *c
static inline struct hibmc_drm_private *to_hibmc_drm_private(struct drm_device *dev)
{
- return dev->dev_private;
+ return container_of(dev, struct hibmc_drm_private, dev);
}
void hibmc_set_power_mode(struct hibmc_drm_private *priv,
@@ -64,11 +63,6 @@ int hibmc_de_init(struct hibmc_drm_private *priv);
int hibmc_vdac_init(struct hibmc_drm_private *priv);
int hibmc_mm_init(struct hibmc_drm_private *hibmc);
-void hibmc_mm_fini(struct hibmc_drm_private *hibmc);
-int hibmc_dumb_create(struct drm_file *file, struct drm_device *dev,
- struct drm_mode_create_dumb *args);
int hibmc_ddc_create(struct drm_device *drm_dev, struct hibmc_connector *connector);
-extern const struct drm_mode_config_funcs hibmc_mode_funcs;
-
#endif
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c
index 86d712090d87..410bd019bb35 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c
@@ -83,7 +83,7 @@ int hibmc_ddc_create(struct drm_device *drm_dev,
connector->adapter.owner = THIS_MODULE;
connector->adapter.class = I2C_CLASS_DDC;
snprintf(connector->adapter.name, I2C_NAME_SIZE, "HIS i2c bit bus");
- connector->adapter.dev.parent = &drm_dev->pdev->dev;
+ connector->adapter.dev.parent = drm_dev->dev;
i2c_set_adapdata(&connector->adapter, connector);
connector->adapter.algo_data = &connector->bit_data;
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
index 74e26c27d878..c228091fb0e6 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
@@ -14,6 +14,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_print.h>
+#include <drm/drm_simple_kms_helper.h>
#include "hibmc_drm_drv.h"
#include "hibmc_drm_regs.h"
@@ -42,12 +43,6 @@ out:
return count;
}
-static enum drm_mode_status hibmc_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- return MODE_OK;
-}
-
static void hibmc_connector_destroy(struct drm_connector *connector)
{
struct hibmc_connector *hibmc_connector = to_hibmc_connector(connector);
@@ -59,7 +54,6 @@ static void hibmc_connector_destroy(struct drm_connector *connector)
static const struct drm_connector_helper_funcs
hibmc_connector_helper_funcs = {
.get_modes = hibmc_connector_get_modes,
- .mode_valid = hibmc_connector_mode_valid,
};
static const struct drm_connector_funcs hibmc_connector_funcs = {
@@ -90,15 +84,12 @@ static const struct drm_encoder_helper_funcs hibmc_encoder_helper_funcs = {
.mode_set = hibmc_encoder_mode_set,
};
-static const struct drm_encoder_funcs hibmc_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
int hibmc_vdac_init(struct hibmc_drm_private *priv)
{
- struct drm_device *dev = priv->dev;
+ struct drm_device *dev = &priv->dev;
struct hibmc_connector *hibmc_connector = &priv->connector;
struct drm_encoder *encoder = &priv->encoder;
+ struct drm_crtc *crtc = &priv->crtc;
struct drm_connector *connector = &hibmc_connector->base;
int ret;
@@ -108,9 +99,8 @@ int hibmc_vdac_init(struct hibmc_drm_private *priv)
return ret;
}
- encoder->possible_crtcs = 0x1;
- ret = drm_encoder_init(dev, encoder, &hibmc_encoder_funcs,
- DRM_MODE_ENCODER_DAC, NULL);
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+ ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_DAC);
if (ret) {
drm_err(dev, "failed to init encoder: %d\n", ret);
return ret;
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
deleted file mode 100644
index 602ece11bb4a..000000000000
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
+++ /dev/null
@@ -1,61 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* Hisilicon Hibmc SoC drm driver
- *
- * Based on the bochs drm driver.
- *
- * Copyright (c) 2016 Huawei Limited.
- *
- * Author:
- * Rongrong Zou <zourongrong@huawei.com>
- * Rongrong Zou <zourongrong@gmail.com>
- * Jianhua Li <lijianhua@huawei.com>
- */
-
-#include <linux/pci.h>
-
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_gem.h>
-#include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/drm_gem_vram_helper.h>
-#include <drm/drm_print.h>
-
-#include "hibmc_drm_drv.h"
-
-int hibmc_mm_init(struct hibmc_drm_private *hibmc)
-{
- struct drm_vram_mm *vmm;
- int ret;
- struct drm_device *dev = hibmc->dev;
-
- vmm = drm_vram_helper_alloc_mm(dev,
- pci_resource_start(dev->pdev, 0),
- hibmc->fb_size);
- if (IS_ERR(vmm)) {
- ret = PTR_ERR(vmm);
- drm_err(dev, "Error initializing VRAM MM; %d\n", ret);
- return ret;
- }
-
- return 0;
-}
-
-void hibmc_mm_fini(struct hibmc_drm_private *hibmc)
-{
- if (!hibmc->dev->vram_mm)
- return;
-
- drm_vram_helper_release_mm(hibmc->dev);
-}
-
-int hibmc_dumb_create(struct drm_file *file, struct drm_device *dev,
- struct drm_mode_create_dumb *args)
-{
- return drm_gem_vram_fill_create_dumb(file, dev, 0, 128, args);
-}
-
-const struct drm_mode_config_funcs hibmc_mode_funcs = {
- .mode_valid = drm_vram_helper_mode_valid,
- .atomic_check = drm_atomic_helper_check,
- .atomic_commit = drm_atomic_helper_commit,
- .fb_create = drm_gem_fb_create,
-};
diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
index 25cd9788a4d5..72a38f28393f 100644
--- a/drivers/gpu/drm/i915/Kconfig.debug
+++ b/drivers/gpu/drm/i915/Kconfig.debug
@@ -19,6 +19,8 @@ config DRM_I915_WERROR
config DRM_I915_DEBUG
bool "Enable additional driver debugging"
depends on DRM_I915
+ depends on EXPERT # only for developers
+ depends on !COMPILE_TEST # never built by robots
select DEBUG_FS
select PREEMPT_COUNT
select I2C_CHARDEV
@@ -31,10 +33,13 @@ config DRM_I915_DEBUG
select DRM_DEBUG_SELFTEST
select DMABUF_SELFTESTS
select SW_SYNC # signaling validation framework (igt/syncobj*)
+ select DRM_I915_WERROR
+ select DRM_I915_DEBUG_GEM
+ select DRM_I915_DEBUG_GEM_ONCE
+ select DRM_I915_DEBUG_MMIO
+ select DRM_I915_DEBUG_RUNTIME_PM
select DRM_I915_SW_FENCE_DEBUG_OBJECTS
select DRM_I915_SELFTEST
- select DRM_I915_DEBUG_RUNTIME_PM
- select DRM_I915_DEBUG_MMIO
default n
help
Choose this option to turn on extra driver debugging that may affect
@@ -69,6 +74,21 @@ config DRM_I915_DEBUG_GEM
If in doubt, say "N".
+config DRM_I915_DEBUG_GEM_ONCE
+ bool "Make a GEM debug failure fatal"
+ default n
+ depends on DRM_I915_DEBUG_GEM
+ help
+ During development, we often only want the very first failure
+ as that would otherwise be lost in the deluge of subsequent
+ failures. However, more casual testers may not want to trigger
+ a hard BUG_ON and hope that the system remains sufficiently usable
+ to capture a bug report in situ.
+
+ Recommended for driver developers only.
+
+ If in doubt, say "N".
+
config DRM_I915_ERRLOG_GEM
bool "Insert extra logging (very verbose) for common GEM errors"
default n
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 6d9e81ea67f4..2385a7505f5d 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -21,7 +21,6 @@ subdir-ccflags-y += $(call cc-disable-warning, unused-but-set-variable)
subdir-ccflags-y += $(call cc-disable-warning, sign-compare)
subdir-ccflags-y += $(call cc-disable-warning, sometimes-uninitialized)
subdir-ccflags-y += $(call cc-disable-warning, initializer-overrides)
-subdir-ccflags-y += $(call cc-disable-warning, uninitialized)
subdir-ccflags-y += $(call cc-disable-warning, frame-address)
subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror
@@ -59,6 +58,7 @@ i915-y += i915_drv.o \
# core library code
i915-y += \
+ dma_resv_utils.o \
i915_memcpy.o \
i915_mm.o \
i915_sw_fence.o \
@@ -83,6 +83,7 @@ gt-y += \
gt/gen6_engine_cs.o \
gt/gen6_ppgtt.o \
gt/gen7_renderclear.o \
+ gt/gen8_engine_cs.o \
gt/gen8_ppgtt.o \
gt/intel_breadcrumbs.o \
gt/intel_context.o \
@@ -92,6 +93,7 @@ gt-y += \
gt/intel_engine_heartbeat.o \
gt/intel_engine_pm.o \
gt/intel_engine_user.o \
+ gt/intel_execlists_submission.o \
gt/intel_ggtt.o \
gt/intel_ggtt_fencing.o \
gt/intel_gt.o \
@@ -107,6 +109,7 @@ gt-y += \
gt/intel_mocs.o \
gt/intel_ppgtt.o \
gt/intel_rc6.o \
+ gt/intel_region_lmem.o \
gt/intel_renderstate.o \
gt/intel_reset.o \
gt/intel_ring.o \
@@ -132,6 +135,7 @@ gem-y += \
gem/i915_gem_clflush.o \
gem/i915_gem_client_blt.o \
gem/i915_gem_context.o \
+ gem/i915_gem_create.o \
gem/i915_gem_dmabuf.o \
gem/i915_gem_domain.o \
gem/i915_gem_execbuffer.o \
@@ -167,7 +171,6 @@ i915-y += \
i915_scheduler.o \
i915_trace_points.o \
i915_vma.o \
- intel_region_lmem.o \
intel_wopcm.o
# general-purpose microcontroller (GuC) support
@@ -197,13 +200,17 @@ i915-y += \
display/intel_color.o \
display/intel_combo_phy.o \
display/intel_connector.o \
+ display/intel_crtc.o \
display/intel_csr.o \
+ display/intel_cursor.o \
display/intel_display.o \
display/intel_display_power.o \
display/intel_dpio_phy.o \
+ display/intel_dpll.o \
display/intel_dpll_mgr.o \
display/intel_dsb.o \
display/intel_fbc.o \
+ display/intel_fdi.o \
display/intel_fifo_underrun.o \
display/intel_frontbuffer.o \
display/intel_global_state.o \
@@ -215,7 +222,8 @@ i915-y += \
display/intel_quirks.o \
display/intel_sprite.o \
display/intel_tc.o \
- display/intel_vga.o
+ display/intel_vga.o \
+ display/i9xx_plane.o
i915-$(CONFIG_ACPI) += \
display/intel_acpi.o \
display/intel_opregion.o
@@ -234,6 +242,7 @@ i915-y += \
display/intel_crt.o \
display/intel_ddi.o \
display/intel_dp.o \
+ display/intel_dp_aux.o \
display/intel_dp_aux_backlight.o \
display/intel_dp_hdcp.o \
display/intel_dp_link_training.o \
@@ -247,9 +256,11 @@ i915-y += \
display/intel_lspcon.o \
display/intel_lvds.o \
display/intel_panel.o \
+ display/intel_pps.o \
display/intel_sdvo.o \
display/intel_tv.o \
display/intel_vdsc.o \
+ display/intel_vrr.o \
display/vlv_dsi.o \
display/vlv_dsi_pll.o
@@ -284,17 +295,9 @@ obj-$(CONFIG_DRM_I915_GVT_KVMGT) += gvt/kvmgt.o
# exclude some broken headers from the test coverage
no-header-test := \
- display/intel_vbt_defs.h \
- gvt/execlist.h \
- gvt/fb_decoder.h \
- gvt/gtt.h \
- gvt/gvt.h \
- gvt/interrupt.h \
- gvt/mmio_context.h \
- gvt/mpt.h \
- gvt/scheduler.h
+ display/intel_vbt_defs.h
-extra-$(CONFIG_DRM_I915_WERROR) += \
+always-$(CONFIG_DRM_I915_WERROR) += \
$(patsubst %.h,%.hdrtest, $(filter-out $(no-header-test), \
$(shell cd $(srctree)/$(src) && find * -name '*.h')))
diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.c b/drivers/gpu/drm/i915/display/i9xx_plane.c
new file mode 100644
index 000000000000..e3e69e6cef65
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/i9xx_plane.c
@@ -0,0 +1,926 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+#include <linux/kernel.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_plane_helper.h>
+
+#include "intel_atomic.h"
+#include "intel_atomic_plane.h"
+#include "intel_display_types.h"
+#include "intel_sprite.h"
+#include "i9xx_plane.h"
+
+/* Primary plane formats for gen <= 3 */
+static const u32 i8xx_primary_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+};
+
+/* Primary plane formats for ivb (no fp16 due to hw issue) */
+static const u32 ivb_primary_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+};
+
+/* Primary plane formats for gen >= 4, except ivb */
+static const u32 i965_primary_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_XBGR16161616F,
+};
+
+/* Primary plane formats for vlv/chv */
+static const u32 vlv_primary_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_ARGB2101010,
+ DRM_FORMAT_ABGR2101010,
+ DRM_FORMAT_XBGR16161616F,
+};
+
+static const u64 i9xx_format_modifiers[] = {
+ I915_FORMAT_MOD_X_TILED,
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
+static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane,
+ u32 format, u64 modifier)
+{
+ switch (modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ case I915_FORMAT_MOD_X_TILED:
+ break;
+ default:
+ return false;
+ }
+
+ switch (format) {
+ case DRM_FORMAT_C8:
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_XRGB1555:
+ case DRM_FORMAT_XRGB8888:
+ return modifier == DRM_FORMAT_MOD_LINEAR ||
+ modifier == I915_FORMAT_MOD_X_TILED;
+ default:
+ return false;
+ }
+}
+
+static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
+ u32 format, u64 modifier)
+{
+ switch (modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ case I915_FORMAT_MOD_X_TILED:
+ break;
+ default:
+ return false;
+ }
+
+ switch (format) {
+ case DRM_FORMAT_C8:
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_ABGR2101010:
+ case DRM_FORMAT_XBGR16161616F:
+ return modifier == DRM_FORMAT_MOD_LINEAR ||
+ modifier == I915_FORMAT_MOD_X_TILED;
+ default:
+ return false;
+ }
+}
+
+static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
+ enum i9xx_plane_id i9xx_plane)
+{
+ if (!HAS_FBC(dev_priv))
+ return false;
+
+ if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ return i9xx_plane == PLANE_A; /* tied to pipe A */
+ else if (IS_IVYBRIDGE(dev_priv))
+ return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B ||
+ i9xx_plane == PLANE_C;
+ else if (INTEL_GEN(dev_priv) >= 4)
+ return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B;
+ else
+ return i9xx_plane == PLANE_A;
+}
+
+static bool i9xx_plane_has_windowing(struct intel_plane *plane)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+
+ if (IS_CHERRYVIEW(dev_priv))
+ return i9xx_plane == PLANE_B;
+ else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
+ return false;
+ else if (IS_GEN(dev_priv, 4))
+ return i9xx_plane == PLANE_C;
+ else
+ return i9xx_plane == PLANE_B ||
+ i9xx_plane == PLANE_C;
+}
+
+static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int rotation = plane_state->hw.rotation;
+ u32 dspcntr;
+
+ dspcntr = DISPLAY_PLANE_ENABLE;
+
+ if (IS_G4X(dev_priv) || IS_GEN(dev_priv, 5) ||
+ IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
+ dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
+
+ switch (fb->format->format) {
+ case DRM_FORMAT_C8:
+ dspcntr |= DISPPLANE_8BPP;
+ break;
+ case DRM_FORMAT_XRGB1555:
+ dspcntr |= DISPPLANE_BGRX555;
+ break;
+ case DRM_FORMAT_ARGB1555:
+ dspcntr |= DISPPLANE_BGRA555;
+ break;
+ case DRM_FORMAT_RGB565:
+ dspcntr |= DISPPLANE_BGRX565;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ dspcntr |= DISPPLANE_BGRX888;
+ break;
+ case DRM_FORMAT_XBGR8888:
+ dspcntr |= DISPPLANE_RGBX888;
+ break;
+ case DRM_FORMAT_ARGB8888:
+ dspcntr |= DISPPLANE_BGRA888;
+ break;
+ case DRM_FORMAT_ABGR8888:
+ dspcntr |= DISPPLANE_RGBA888;
+ break;
+ case DRM_FORMAT_XRGB2101010:
+ dspcntr |= DISPPLANE_BGRX101010;
+ break;
+ case DRM_FORMAT_XBGR2101010:
+ dspcntr |= DISPPLANE_RGBX101010;
+ break;
+ case DRM_FORMAT_ARGB2101010:
+ dspcntr |= DISPPLANE_BGRA101010;
+ break;
+ case DRM_FORMAT_ABGR2101010:
+ dspcntr |= DISPPLANE_RGBA101010;
+ break;
+ case DRM_FORMAT_XBGR16161616F:
+ dspcntr |= DISPPLANE_RGBX161616;
+ break;
+ default:
+ MISSING_CASE(fb->format->format);
+ return 0;
+ }
+
+ if (INTEL_GEN(dev_priv) >= 4 &&
+ fb->modifier == I915_FORMAT_MOD_X_TILED)
+ dspcntr |= DISPPLANE_TILED;
+
+ if (rotation & DRM_MODE_ROTATE_180)
+ dspcntr |= DISPPLANE_ROTATE_180;
+
+ if (rotation & DRM_MODE_REFLECT_X)
+ dspcntr |= DISPPLANE_MIRROR;
+
+ return dspcntr;
+}
+
+int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ int src_x, src_y, src_w;
+ u32 offset;
+ int ret;
+
+ ret = intel_plane_compute_gtt(plane_state);
+ if (ret)
+ return ret;
+
+ if (!plane_state->uapi.visible)
+ return 0;
+
+ src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ src_x = plane_state->uapi.src.x1 >> 16;
+ src_y = plane_state->uapi.src.y1 >> 16;
+
+ /* Undocumented hardware limit on i965/g4x/vlv/chv */
+ if (HAS_GMCH(dev_priv) && fb->format->cpp[0] == 8 && src_w > 2048)
+ return -EINVAL;
+
+ intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
+
+ if (INTEL_GEN(dev_priv) >= 4)
+ offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
+ plane_state, 0);
+ else
+ offset = 0;
+
+ /*
+ * When using an X-tiled surface the plane starts to
+ * misbehave if the x offset + width exceeds the stride.
+ * hsw/bdw: underrun galore
+ * ilk/snb/ivb: wrap to the next tile row mid scanout
+ * i965/g4x: so far appear immune to this
+ * vlv/chv: TODO check
+ *
+ * Linear surfaces seem to work just fine, even on hsw/bdw
+ * despite them not using the linear offset anymore.
+ */
+ if (INTEL_GEN(dev_priv) >= 4 && fb->modifier == I915_FORMAT_MOD_X_TILED) {
+ u32 alignment = intel_surf_alignment(fb, 0);
+ int cpp = fb->format->cpp[0];
+
+ while ((src_x + src_w) * cpp > plane_state->color_plane[0].stride) {
+ if (offset == 0) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Unable to find suitable display surface offset due to X-tiling\n");
+ return -EINVAL;
+ }
+
+ offset = intel_plane_adjust_aligned_offset(&src_x, &src_y, plane_state, 0,
+ offset, offset - alignment);
+ }
+ }
+
+ /*
+ * Put the final coordinates back so that the src
+ * coordinate checks will see the right values.
+ */
+ drm_rect_translate_to(&plane_state->uapi.src,
+ src_x << 16, src_y << 16);
+
+ /* HSW/BDW do this automagically in hardware */
+ if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
+ unsigned int rotation = plane_state->hw.rotation;
+ int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
+
+ if (rotation & DRM_MODE_ROTATE_180) {
+ src_x += src_w - 1;
+ src_y += src_h - 1;
+ } else if (rotation & DRM_MODE_REFLECT_X) {
+ src_x += src_w - 1;
+ }
+ }
+
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+ drm_WARN_ON(&dev_priv->drm, src_x > 8191 || src_y > 4095);
+ } else if (INTEL_GEN(dev_priv) >= 4 &&
+ fb->modifier == I915_FORMAT_MOD_X_TILED) {
+ drm_WARN_ON(&dev_priv->drm, src_x > 4095 || src_y > 4095);
+ }
+
+ plane_state->color_plane[0].offset = offset;
+ plane_state->color_plane[0].x = src_x;
+ plane_state->color_plane[0].y = src_y;
+
+ return 0;
+}
+
+static int
+i9xx_plane_check(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ int ret;
+
+ ret = chv_plane_check_rotation(plane_state);
+ if (ret)
+ return ret;
+
+ ret = intel_atomic_plane_check_clipping(plane_state, crtc_state,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ i9xx_plane_has_windowing(plane));
+ if (ret)
+ return ret;
+
+ ret = i9xx_check_plane_surface(plane_state);
+ if (ret)
+ return ret;
+
+ if (!plane_state->uapi.visible)
+ return 0;
+
+ ret = intel_plane_check_src_coordinates(plane_state);
+ if (ret)
+ return ret;
+
+ plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state);
+
+ return 0;
+}
+
+static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 dspcntr = 0;
+
+ if (crtc_state->gamma_enable)
+ dspcntr |= DISPPLANE_GAMMA_ENABLE;
+
+ if (crtc_state->csc_enable)
+ dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
+
+ if (INTEL_GEN(dev_priv) < 5)
+ dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe);
+
+ return dspcntr;
+}
+
+static void i9xx_plane_ratio(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ unsigned int *num, unsigned int *den)
+{
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int cpp = fb->format->cpp[0];
+
+ /*
+ * g4x bspec says 64bpp pixel rate can't exceed 80%
+ * of cdclk when the sprite plane is enabled on the
+ * same pipe. ilk/snb bspec says 64bpp pixel rate is
+ * never allowed to exceed 80% of cdclk. Let's just go
+ * with the ilk/snb limit always.
+ */
+ if (cpp == 8) {
+ *num = 10;
+ *den = 8;
+ } else {
+ *num = 1;
+ *den = 1;
+ }
+}
+
+static int i9xx_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ unsigned int pixel_rate;
+ unsigned int num, den;
+
+ /*
+ * Note that crtc_state->pixel_rate accounts for both
+ * horizontal and vertical panel fitter downscaling factors.
+ * Pre-HSW bspec tells us to only consider the horizontal
+ * downscaling factor here. We ignore that and just consider
+ * both for simplicity.
+ */
+ pixel_rate = crtc_state->pixel_rate;
+
+ i9xx_plane_ratio(crtc_state, plane_state, &num, &den);
+
+ /* two pixels per clock with double wide pipe */
+ if (crtc_state->double_wide)
+ den *= 2;
+
+ return DIV_ROUND_UP(pixel_rate * num, den);
+}
+
+static void i9xx_update_plane(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+ u32 linear_offset;
+ int x = plane_state->color_plane[0].x;
+ int y = plane_state->color_plane[0].y;
+ int crtc_x = plane_state->uapi.dst.x1;
+ int crtc_y = plane_state->uapi.dst.y1;
+ int crtc_w = drm_rect_width(&plane_state->uapi.dst);
+ int crtc_h = drm_rect_height(&plane_state->uapi.dst);
+ unsigned long irqflags;
+ u32 dspaddr_offset;
+ u32 dspcntr;
+
+ dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
+
+ linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
+
+ if (INTEL_GEN(dev_priv) >= 4)
+ dspaddr_offset = plane_state->color_plane[0].offset;
+ else
+ dspaddr_offset = linear_offset;
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+ intel_de_write_fw(dev_priv, DSPSTRIDE(i9xx_plane),
+ plane_state->color_plane[0].stride);
+
+ if (INTEL_GEN(dev_priv) < 4) {
+ /*
+ * PLANE_A doesn't actually have a full window
+ * generator but let's assume we still need to
+ * program whatever is there.
+ */
+ intel_de_write_fw(dev_priv, DSPPOS(i9xx_plane),
+ (crtc_y << 16) | crtc_x);
+ intel_de_write_fw(dev_priv, DSPSIZE(i9xx_plane),
+ ((crtc_h - 1) << 16) | (crtc_w - 1));
+ } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
+ intel_de_write_fw(dev_priv, PRIMPOS(i9xx_plane),
+ (crtc_y << 16) | crtc_x);
+ intel_de_write_fw(dev_priv, PRIMSIZE(i9xx_plane),
+ ((crtc_h - 1) << 16) | (crtc_w - 1));
+ intel_de_write_fw(dev_priv, PRIMCNSTALPHA(i9xx_plane), 0);
+ }
+
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+ intel_de_write_fw(dev_priv, DSPOFFSET(i9xx_plane),
+ (y << 16) | x);
+ } else if (INTEL_GEN(dev_priv) >= 4) {
+ intel_de_write_fw(dev_priv, DSPLINOFF(i9xx_plane),
+ linear_offset);
+ intel_de_write_fw(dev_priv, DSPTILEOFF(i9xx_plane),
+ (y << 16) | x);
+ }
+
+ /*
+ * The control register self-arms if the plane was previously
+ * disabled. Try to make the plane enable atomic by writing
+ * the control register just before the surface register.
+ */
+ intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr);
+ if (INTEL_GEN(dev_priv) >= 4)
+ intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane),
+ intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
+ else
+ intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane),
+ intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+static void i9xx_disable_plane(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+ unsigned long irqflags;
+ u32 dspcntr;
+
+ /*
+ * DSPCNTR pipe gamma enable on g4x+ and pipe csc
+ * enable on ilk+ affect the pipe bottom color as
+ * well, so we must configure them even if the plane
+ * is disabled.
+ *
+ * On pre-g4x there is no way to gamma correct the
+ * pipe bottom color but we'll keep on doing this
+ * anyway so that the crtc state readout works correctly.
+ */
+ dspcntr = i9xx_plane_ctl_crtc(crtc_state);
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+ intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr);
+ if (INTEL_GEN(dev_priv) >= 4)
+ intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane), 0);
+ else
+ intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane), 0);
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+static void
+g4x_primary_async_flip(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ bool async_flip)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ u32 dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
+ u32 dspaddr_offset = plane_state->color_plane[0].offset;
+ enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+ unsigned long irqflags;
+
+ if (async_flip)
+ dspcntr |= DISPPLANE_ASYNC_FLIP;
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr);
+ intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane),
+ intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+static void
+vlv_primary_async_flip(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ bool async_flip)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ u32 dspaddr_offset = plane_state->color_plane[0].offset;
+ enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ intel_de_write_fw(dev_priv, DSPADDR_VLV(i9xx_plane),
+ intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+static void
+bdw_primary_enable_flip_done(struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+
+ spin_lock_irq(&i915->irq_lock);
+ bdw_enable_pipe_irq(i915, pipe, GEN8_PIPE_PRIMARY_FLIP_DONE);
+ spin_unlock_irq(&i915->irq_lock);
+}
+
+static void
+bdw_primary_disable_flip_done(struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+
+ spin_lock_irq(&i915->irq_lock);
+ bdw_disable_pipe_irq(i915, pipe, GEN8_PIPE_PRIMARY_FLIP_DONE);
+ spin_unlock_irq(&i915->irq_lock);
+}
+
+static void
+ivb_primary_enable_flip_done(struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+
+ spin_lock_irq(&i915->irq_lock);
+ ilk_enable_display_irq(i915, DE_PLANE_FLIP_DONE_IVB(plane->i9xx_plane));
+ spin_unlock_irq(&i915->irq_lock);
+}
+
+static void
+ivb_primary_disable_flip_done(struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+
+ spin_lock_irq(&i915->irq_lock);
+ ilk_disable_display_irq(i915, DE_PLANE_FLIP_DONE_IVB(plane->i9xx_plane));
+ spin_unlock_irq(&i915->irq_lock);
+}
+
+static void
+ilk_primary_enable_flip_done(struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+
+ spin_lock_irq(&i915->irq_lock);
+ ilk_enable_display_irq(i915, DE_PLANE_FLIP_DONE(plane->i9xx_plane));
+ spin_unlock_irq(&i915->irq_lock);
+}
+
+static void
+ilk_primary_disable_flip_done(struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+
+ spin_lock_irq(&i915->irq_lock);
+ ilk_disable_display_irq(i915, DE_PLANE_FLIP_DONE(plane->i9xx_plane));
+ spin_unlock_irq(&i915->irq_lock);
+}
+
+static void
+vlv_primary_enable_flip_done(struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+
+ spin_lock_irq(&i915->irq_lock);
+ i915_enable_pipestat(i915, pipe, PLANE_FLIP_DONE_INT_STATUS_VLV);
+ spin_unlock_irq(&i915->irq_lock);
+}
+
+static void
+vlv_primary_disable_flip_done(struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+
+ spin_lock_irq(&i915->irq_lock);
+ i915_disable_pipestat(i915, pipe, PLANE_FLIP_DONE_INT_STATUS_VLV);
+ spin_unlock_irq(&i915->irq_lock);
+}
+
+static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
+ enum pipe *pipe)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum intel_display_power_domain power_domain;
+ enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+ intel_wakeref_t wakeref;
+ bool ret;
+ u32 val;
+
+ /*
+ * Not 100% correct for planes that can move between pipes,
+ * but that's only the case for gen2-4 which don't have any
+ * display power wells.
+ */
+ power_domain = POWER_DOMAIN_PIPE(plane->pipe);
+ wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ if (!wakeref)
+ return false;
+
+ val = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
+
+ ret = val & DISPLAY_PLANE_ENABLE;
+
+ if (INTEL_GEN(dev_priv) >= 5)
+ *pipe = plane->pipe;
+ else
+ *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
+ DISPPLANE_SEL_PIPE_SHIFT;
+
+ intel_display_power_put(dev_priv, power_domain, wakeref);
+
+ return ret;
+}
+
+static unsigned int
+hsw_primary_max_stride(struct intel_plane *plane,
+ u32 pixel_format, u64 modifier,
+ unsigned int rotation)
+{
+ const struct drm_format_info *info = drm_format_info(pixel_format);
+ int cpp = info->cpp[0];
+
+ /* Limit to 8k pixels to guarantee OFFSET.x doesn't get too big. */
+ return min(8192 * cpp, 32 * 1024);
+}
+
+static unsigned int
+ilk_primary_max_stride(struct intel_plane *plane,
+ u32 pixel_format, u64 modifier,
+ unsigned int rotation)
+{
+ const struct drm_format_info *info = drm_format_info(pixel_format);
+ int cpp = info->cpp[0];
+
+ /* Limit to 4k pixels to guarantee TILEOFF.x doesn't get too big. */
+ if (modifier == I915_FORMAT_MOD_X_TILED)
+ return min(4096 * cpp, 32 * 1024);
+ else
+ return 32 * 1024;
+}
+
+unsigned int
+i965_plane_max_stride(struct intel_plane *plane,
+ u32 pixel_format, u64 modifier,
+ unsigned int rotation)
+{
+ const struct drm_format_info *info = drm_format_info(pixel_format);
+ int cpp = info->cpp[0];
+
+ /* Limit to 4k pixels to guarantee TILEOFF.x doesn't get too big. */
+ if (modifier == I915_FORMAT_MOD_X_TILED)
+ return min(4096 * cpp, 16 * 1024);
+ else
+ return 32 * 1024;
+}
+
+static unsigned int
+i9xx_plane_max_stride(struct intel_plane *plane,
+ u32 pixel_format, u64 modifier,
+ unsigned int rotation)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+
+ if (INTEL_GEN(dev_priv) >= 3) {
+ if (modifier == I915_FORMAT_MOD_X_TILED)
+ return 8*1024;
+ else
+ return 16*1024;
+ } else {
+ if (plane->i9xx_plane == PLANE_C)
+ return 4*1024;
+ else
+ return 8*1024;
+ }
+}
+
+static const struct drm_plane_funcs i965_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = intel_plane_destroy,
+ .atomic_duplicate_state = intel_plane_duplicate_state,
+ .atomic_destroy_state = intel_plane_destroy_state,
+ .format_mod_supported = i965_plane_format_mod_supported,
+};
+
+static const struct drm_plane_funcs i8xx_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = intel_plane_destroy,
+ .atomic_duplicate_state = intel_plane_duplicate_state,
+ .atomic_destroy_state = intel_plane_destroy_state,
+ .format_mod_supported = i8xx_plane_format_mod_supported,
+};
+
+struct intel_plane *
+intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+ struct intel_plane *plane;
+ const struct drm_plane_funcs *plane_funcs;
+ unsigned int supported_rotations;
+ const u32 *formats;
+ int num_formats;
+ int ret, zpos;
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ return skl_universal_plane_create(dev_priv, pipe,
+ PLANE_PRIMARY);
+
+ plane = intel_plane_alloc();
+ if (IS_ERR(plane))
+ return plane;
+
+ plane->pipe = pipe;
+ /*
+ * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
+ * port is hooked to pipe B. Hence we want plane A feeding pipe B.
+ */
+ if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4 &&
+ INTEL_NUM_PIPES(dev_priv) == 2)
+ plane->i9xx_plane = (enum i9xx_plane_id) !pipe;
+ else
+ plane->i9xx_plane = (enum i9xx_plane_id) pipe;
+ plane->id = PLANE_PRIMARY;
+ plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
+
+ plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane);
+ if (plane->has_fbc) {
+ struct intel_fbc *fbc = &dev_priv->fbc;
+
+ fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
+ }
+
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ formats = vlv_primary_formats;
+ num_formats = ARRAY_SIZE(vlv_primary_formats);
+ } else if (INTEL_GEN(dev_priv) >= 4) {
+ /*
+ * WaFP16GammaEnabling:ivb
+ * "Workaround : When using the 64-bit format, the plane
+ * output on each color channel has one quarter amplitude.
+ * It can be brought up to full amplitude by using pipe
+ * gamma correction or pipe color space conversion to
+ * multiply the plane output by four."
+ *
+ * There is no dedicated plane gamma for the primary plane,
+ * and using the pipe gamma/csc could conflict with other
+ * planes, so we choose not to expose fp16 on IVB primary
+ * planes. HSW primary planes no longer have this problem.
+ */
+ if (IS_IVYBRIDGE(dev_priv)) {
+ formats = ivb_primary_formats;
+ num_formats = ARRAY_SIZE(ivb_primary_formats);
+ } else {
+ formats = i965_primary_formats;
+ num_formats = ARRAY_SIZE(i965_primary_formats);
+ }
+ } else {
+ formats = i8xx_primary_formats;
+ num_formats = ARRAY_SIZE(i8xx_primary_formats);
+ }
+
+ if (INTEL_GEN(dev_priv) >= 4)
+ plane_funcs = &i965_plane_funcs;
+ else
+ plane_funcs = &i8xx_plane_funcs;
+
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ plane->min_cdclk = vlv_plane_min_cdclk;
+ else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ plane->min_cdclk = hsw_plane_min_cdclk;
+ else if (IS_IVYBRIDGE(dev_priv))
+ plane->min_cdclk = ivb_plane_min_cdclk;
+ else
+ plane->min_cdclk = i9xx_plane_min_cdclk;
+
+ if (HAS_GMCH(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 4)
+ plane->max_stride = i965_plane_max_stride;
+ else
+ plane->max_stride = i9xx_plane_max_stride;
+ } else {
+ if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ plane->max_stride = hsw_primary_max_stride;
+ else
+ plane->max_stride = ilk_primary_max_stride;
+ }
+
+ plane->update_plane = i9xx_update_plane;
+ plane->disable_plane = i9xx_disable_plane;
+ plane->get_hw_state = i9xx_plane_get_hw_state;
+ plane->check_plane = i9xx_plane_check;
+
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ plane->async_flip = vlv_primary_async_flip;
+ plane->enable_flip_done = vlv_primary_enable_flip_done;
+ plane->disable_flip_done = vlv_primary_disable_flip_done;
+ } else if (IS_BROADWELL(dev_priv)) {
+ plane->need_async_flip_disable_wa = true;
+ plane->async_flip = g4x_primary_async_flip;
+ plane->enable_flip_done = bdw_primary_enable_flip_done;
+ plane->disable_flip_done = bdw_primary_disable_flip_done;
+ } else if (INTEL_GEN(dev_priv) >= 7) {
+ plane->async_flip = g4x_primary_async_flip;
+ plane->enable_flip_done = ivb_primary_enable_flip_done;
+ plane->disable_flip_done = ivb_primary_disable_flip_done;
+ } else if (INTEL_GEN(dev_priv) >= 5) {
+ plane->async_flip = g4x_primary_async_flip;
+ plane->enable_flip_done = ilk_primary_enable_flip_done;
+ plane->disable_flip_done = ilk_primary_disable_flip_done;
+ }
+
+ if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
+ ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
+ 0, plane_funcs,
+ formats, num_formats,
+ i9xx_format_modifiers,
+ DRM_PLANE_TYPE_PRIMARY,
+ "primary %c", pipe_name(pipe));
+ else
+ ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
+ 0, plane_funcs,
+ formats, num_formats,
+ i9xx_format_modifiers,
+ DRM_PLANE_TYPE_PRIMARY,
+ "plane %c",
+ plane_name(plane->i9xx_plane));
+ if (ret)
+ goto fail;
+
+ if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
+ supported_rotations =
+ DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
+ DRM_MODE_REFLECT_X;
+ } else if (INTEL_GEN(dev_priv) >= 4) {
+ supported_rotations =
+ DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
+ } else {
+ supported_rotations = DRM_MODE_ROTATE_0;
+ }
+
+ if (INTEL_GEN(dev_priv) >= 4)
+ drm_plane_create_rotation_property(&plane->base,
+ DRM_MODE_ROTATE_0,
+ supported_rotations);
+
+ zpos = 0;
+ drm_plane_create_zpos_immutable_property(&plane->base, zpos);
+
+ drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
+
+ return plane;
+
+fail:
+ intel_plane_free(plane);
+
+ return ERR_PTR(ret);
+}
+
diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.h b/drivers/gpu/drm/i915/display/i9xx_plane.h
new file mode 100644
index 000000000000..ca963c2a8457
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/i9xx_plane.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef _I9XX_PLANE_H_
+#define _I9XX_PLANE_H_
+
+#include <linux/types.h>
+
+enum pipe;
+struct drm_i915_private;
+struct intel_plane;
+struct intel_plane_state;
+
+unsigned int i965_plane_max_stride(struct intel_plane *plane,
+ u32 pixel_format, u64 modifier,
+ unsigned int rotation);
+int i9xx_check_plane_surface(struct intel_plane_state *plane_state);
+
+struct intel_plane *
+intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index b3533a32f8ba..9d245a689323 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -1535,6 +1535,9 @@ static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder,
vdsc_cfg->convert_rgb = true;
+ /* FIXME: initialize from VBT */
+ vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST;
+
ret = intel_dsc_compute_params(encoder, crtc_state);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
index 7e9f84b00859..4683f98f7e54 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
@@ -312,10 +312,13 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
int ret;
intel_plane_set_invisible(new_crtc_state, new_plane_state);
+ new_crtc_state->enabled_planes &= ~BIT(plane->id);
if (!new_plane_state->hw.crtc && !old_plane_state->hw.crtc)
return 0;
+ new_crtc_state->enabled_planes |= BIT(plane->id);
+
ret = plane->check_plane(new_crtc_state, new_plane_state);
if (ret)
return ret;
@@ -449,7 +452,7 @@ void intel_update_plane(struct intel_plane *plane,
trace_intel_update_plane(&plane->base, crtc);
if (crtc_state->uapi.async_flip && plane->async_flip)
- plane->async_flip(plane, crtc_state, plane_state);
+ plane->async_flip(plane, crtc_state, plane_state, true);
else
plane->update_plane(plane, crtc_state, plane_state);
}
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index 4cc949b228f2..987cf509337f 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -1623,6 +1623,13 @@ static const u8 icp_ddc_pin_map[] = {
[TGL_DDC_BUS_PORT_6] = GMBUS_PIN_14_TC6_TGP,
};
+static const u8 rkl_pch_tgp_ddc_pin_map[] = {
+ [ICL_DDC_BUS_DDI_A] = GMBUS_PIN_1_BXT,
+ [ICL_DDC_BUS_DDI_B] = GMBUS_PIN_2_BXT,
+ [RKL_DDC_BUS_DDI_D] = GMBUS_PIN_9_TC1_ICP,
+ [RKL_DDC_BUS_DDI_E] = GMBUS_PIN_10_TC2_ICP,
+};
+
static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin)
{
const u8 *ddc_pin_map;
@@ -1630,6 +1637,9 @@ static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin)
if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1) {
return vbt_pin;
+ } else if (IS_ROCKETLAKE(dev_priv) && INTEL_PCH_TYPE(dev_priv) == PCH_TGP) {
+ ddc_pin_map = rkl_pch_tgp_ddc_pin_map;
+ n_entries = ARRAY_SIZE(rkl_pch_tgp_ddc_pin_map);
} else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) {
ddc_pin_map = icp_ddc_pin_map;
n_entries = ARRAY_SIZE(icp_ddc_pin_map);
@@ -2555,16 +2565,11 @@ static void fill_dsc(struct intel_crtc_state *crtc_state,
crtc_state->dsc.slice_count);
/*
- * FIXME: Use VBT rc_buffer_block_size and rc_buffer_size for the
- * implementation specific physical rate buffer size. Currently we use
- * the required rate buffer model size calculated in
- * drm_dsc_compute_rc_parameters() according to VESA DSC Annex E.
- *
* The VBT rc_buffer_block_size and rc_buffer_size definitions
- * correspond to DP 1.4 DPCD offsets 0x62 and 0x63. The DP DSC
- * implementation should also use the DPCD (or perhaps VBT for eDP)
- * provided value for the buffer size.
+ * correspond to DP 1.4 DPCD offsets 0x62 and 0x63.
*/
+ vdsc_cfg->rc_model_size = drm_dsc_dp_rc_buffer_size(dsc->rc_buffer_block_size,
+ dsc->rc_buffer_size);
/* FIXME: DSI spec says bpc + 1 for this one */
vdsc_cfg->line_buf_depth = VBT_DSC_LINE_BUFFER_DEPTH(dsc->line_buffer_depth);
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index bd060404d249..4b5a30ac84bc 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -20,76 +20,9 @@ struct intel_qgv_point {
struct intel_qgv_info {
struct intel_qgv_point points[I915_NUM_QGV_POINTS];
u8 num_points;
- u8 num_channels;
u8 t_bl;
- enum intel_dram_type dram_type;
};
-static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv,
- struct intel_qgv_info *qi)
-{
- u32 val = 0;
- int ret;
-
- ret = sandybridge_pcode_read(dev_priv,
- ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
- ICL_PCODE_MEM_SS_READ_GLOBAL_INFO,
- &val, NULL);
- if (ret)
- return ret;
-
- if (IS_GEN(dev_priv, 12)) {
- switch (val & 0xf) {
- case 0:
- qi->dram_type = INTEL_DRAM_DDR4;
- break;
- case 3:
- qi->dram_type = INTEL_DRAM_LPDDR4;
- break;
- case 4:
- qi->dram_type = INTEL_DRAM_DDR3;
- break;
- case 5:
- qi->dram_type = INTEL_DRAM_LPDDR3;
- break;
- default:
- MISSING_CASE(val & 0xf);
- break;
- }
- } else if (IS_GEN(dev_priv, 11)) {
- switch (val & 0xf) {
- case 0:
- qi->dram_type = INTEL_DRAM_DDR4;
- break;
- case 1:
- qi->dram_type = INTEL_DRAM_DDR3;
- break;
- case 2:
- qi->dram_type = INTEL_DRAM_LPDDR3;
- break;
- case 3:
- qi->dram_type = INTEL_DRAM_LPDDR4;
- break;
- default:
- MISSING_CASE(val & 0xf);
- break;
- }
- } else {
- MISSING_CASE(INTEL_GEN(dev_priv));
- qi->dram_type = INTEL_DRAM_LPDDR3; /* Conservative default */
- }
-
- qi->num_channels = (val & 0xf0) >> 4;
- qi->num_points = (val & 0xf00) >> 8;
-
- if (IS_GEN(dev_priv, 12))
- qi->t_bl = qi->dram_type == INTEL_DRAM_DDR4 ? 4 : 16;
- else if (IS_GEN(dev_priv, 11))
- qi->t_bl = qi->dram_type == INTEL_DRAM_DDR4 ? 4 : 8;
-
- return 0;
-}
-
static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv,
struct intel_qgv_point *sp,
int point)
@@ -139,11 +72,15 @@ int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv,
static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
struct intel_qgv_info *qi)
{
+ const struct dram_info *dram_info = &dev_priv->dram_info;
int i, ret;
- ret = icl_pcode_read_mem_global_info(dev_priv, qi);
- if (ret)
- return ret;
+ qi->num_points = dram_info->num_qgv_points;
+
+ if (IS_GEN(dev_priv, 12))
+ qi->t_bl = dev_priv->dram_info.type == INTEL_DRAM_DDR4 ? 4 : 16;
+ else if (IS_GEN(dev_priv, 11))
+ qi->t_bl = dev_priv->dram_info.type == INTEL_DRAM_DDR4 ? 4 : 8;
if (drm_WARN_ON(&dev_priv->drm,
qi->num_points > ARRAY_SIZE(qi->points)))
@@ -209,7 +146,7 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
{
struct intel_qgv_info qi = {};
bool is_y_tile = true; /* assume y tile may be used */
- int num_channels;
+ int num_channels = dev_priv->dram_info.num_channels;
int deinterleave;
int ipqdepth, ipqdepthpch;
int dclk_max;
@@ -222,7 +159,6 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
"Failed to get memory subsystem information, ignoring bandwidth limits");
return ret;
}
- num_channels = qi.num_channels;
deinterleave = DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2);
dclk_max = icl_sagv_max_dclk(&qi);
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index c449d28d0560..2e878cc274b7 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -2415,8 +2415,7 @@ static int intel_modeset_all_pipes(struct intel_atomic_state *state)
if (ret)
return ret;
- ret = drm_atomic_add_affected_planes(&state->base,
- &crtc->base);
+ ret = intel_atomic_add_affected_planes(state, crtc);
if (ret)
return ret;
@@ -2710,8 +2709,8 @@ static int dg1_rawclk(struct drm_i915_private *dev_priv)
* DG1 always uses a 38.4 MHz rawclk. The bspec tells us
* "Program Numerator=2, Denominator=4, Divider=37 decimal."
*/
- I915_WRITE(PCH_RAWCLK_FREQ,
- CNP_RAWCLK_DEN(4) | CNP_RAWCLK_DIV(37) | ICP_RAWCLK_NUM(2));
+ intel_de_write(dev_priv, PCH_RAWCLK_FREQ,
+ CNP_RAWCLK_DEN(4) | CNP_RAWCLK_DIV(37) | ICP_RAWCLK_NUM(2));
return 38400;
}
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index 172d398081ee..ff7dcb7088bf 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -1485,6 +1485,7 @@ static u32 ivb_csc_mode(const struct intel_crtc_state *crtc_state)
static int ivb_color_check(struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
bool limited_color_range = ilk_csc_limited_range(crtc_state);
int ret;
@@ -1492,6 +1493,13 @@ static int ivb_color_check(struct intel_crtc_state *crtc_state)
if (ret)
return ret;
+ if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB &&
+ crtc_state->hw.ctm) {
+ drm_dbg_kms(&dev_priv->drm,
+ "YCBCR and CTM together are not possible\n");
+ return -EINVAL;
+ }
+
crtc_state->gamma_enable =
(crtc_state->hw.gamma_lut ||
crtc_state->hw.degamma_lut) &&
@@ -1525,12 +1533,20 @@ static u32 glk_gamma_mode(const struct intel_crtc_state *crtc_state)
static int glk_color_check(struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
int ret;
ret = check_luts(crtc_state);
if (ret)
return ret;
+ if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB &&
+ crtc_state->hw.ctm) {
+ drm_dbg_kms(&dev_priv->drm,
+ "YCBCR and CTM together are not possible\n");
+ return -EINVAL;
+ }
+
crtc_state->gamma_enable =
crtc_state->hw.gamma_lut &&
!crtc_state->c8_planes;
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c
index d5ad61e4083e..996ae0608a62 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c
@@ -427,10 +427,22 @@ static void icl_combo_phys_uninit(struct drm_i915_private *dev_priv)
u32 val;
if (phy == PHY_A &&
- !icl_combo_phy_verify_state(dev_priv, phy))
- drm_warn(&dev_priv->drm,
- "Combo PHY %c HW state changed unexpectedly\n",
- phy_name(phy));
+ !icl_combo_phy_verify_state(dev_priv, phy)) {
+ if (IS_TIGERLAKE(dev_priv) || IS_DG1(dev_priv)) {
+ /*
+ * A known problem with old ifwi:
+ * https://gitlab.freedesktop.org/drm/intel/-/issues/2411
+ * Suppress the warning for CI. Remove ASAP!
+ */
+ drm_dbg_kms(&dev_priv->drm,
+ "Combo PHY %c HW state changed unexpectedly\n",
+ phy_name(phy));
+ } else {
+ drm_warn(&dev_priv->drm,
+ "Combo PHY %c HW state changed unexpectedly\n",
+ phy_name(phy));
+ }
+ }
if (!has_phy_misc(dev_priv, phy))
goto skip_phy_misc;
diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c
index 406e96785c76..d5ceb7bdc14b 100644
--- a/drivers/gpu/drm/i915/display/intel_connector.c
+++ b/drivers/gpu/drm/i915/display/intel_connector.c
@@ -279,24 +279,17 @@ intel_attach_aspect_ratio_property(struct drm_connector *connector)
}
void
-intel_attach_colorspace_property(struct drm_connector *connector)
+intel_attach_hdmi_colorspace_property(struct drm_connector *connector)
{
- switch (connector->connector_type) {
- case DRM_MODE_CONNECTOR_HDMIA:
- case DRM_MODE_CONNECTOR_HDMIB:
- if (drm_mode_create_hdmi_colorspace_property(connector))
- return;
- break;
- case DRM_MODE_CONNECTOR_DisplayPort:
- case DRM_MODE_CONNECTOR_eDP:
- if (drm_mode_create_dp_colorspace_property(connector))
- return;
- break;
- default:
- MISSING_CASE(connector->connector_type);
- return;
- }
+ if (!drm_mode_create_hdmi_colorspace_property(connector))
+ drm_object_attach_property(&connector->base,
+ connector->colorspace_property, 0);
+}
- drm_object_attach_property(&connector->base,
- connector->colorspace_property, 0);
+void
+intel_attach_dp_colorspace_property(struct drm_connector *connector)
+{
+ if (!drm_mode_create_dp_colorspace_property(connector))
+ drm_object_attach_property(&connector->base,
+ connector->colorspace_property, 0);
}
diff --git a/drivers/gpu/drm/i915/display/intel_connector.h b/drivers/gpu/drm/i915/display/intel_connector.h
index 93a7375c8196..661a37a3c6d8 100644
--- a/drivers/gpu/drm/i915/display/intel_connector.h
+++ b/drivers/gpu/drm/i915/display/intel_connector.h
@@ -30,6 +30,7 @@ int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
void intel_attach_force_audio_property(struct drm_connector *connector);
void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
void intel_attach_aspect_ratio_property(struct drm_connector *connector);
-void intel_attach_colorspace_property(struct drm_connector *connector);
+void intel_attach_hdmi_colorspace_property(struct drm_connector *connector);
+void intel_attach_dp_colorspace_property(struct drm_connector *connector);
#endif /* __INTEL_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c
new file mode 100644
index 000000000000..8e77ca7ddf11
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_crtc.c
@@ -0,0 +1,324 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_plane.h>
+#include <drm/drm_plane_helper.h>
+
+#include "intel_atomic.h"
+#include "intel_atomic_plane.h"
+#include "intel_color.h"
+#include "intel_crtc.h"
+#include "intel_cursor.h"
+#include "intel_display_debugfs.h"
+#include "intel_display_types.h"
+#include "intel_pipe_crc.h"
+#include "intel_sprite.h"
+#include "i9xx_plane.h"
+
+static void assert_vblank_disabled(struct drm_crtc *crtc)
+{
+ if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
+ drm_crtc_vblank_put(crtc);
+}
+
+u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(&crtc->base)];
+
+ if (!vblank->max_vblank_count)
+ return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
+
+ return crtc->base.funcs->get_vblank_counter(&crtc->base);
+}
+
+u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ u32 mode_flags = crtc->mode_flags;
+
+ /*
+ * From Gen 11, In case of dsi cmd mode, frame counter wouldnt
+ * have updated at the beginning of TE, if we want to use
+ * the hw counter, then we would find it updated in only
+ * the next TE, hence switching to sw counter.
+ */
+ if (mode_flags & (I915_MODE_FLAG_DSI_USE_TE0 | I915_MODE_FLAG_DSI_USE_TE1))
+ return 0;
+
+ /*
+ * On i965gm the hardware frame counter reads
+ * zero when the TV encoder is enabled :(
+ */
+ if (IS_I965GM(dev_priv) &&
+ (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT)))
+ return 0;
+
+ if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
+ return 0xffffffff; /* full 32 bit counter */
+ else if (INTEL_GEN(dev_priv) >= 3)
+ return 0xffffff; /* only 24 bits of frame count */
+ else
+ return 0; /* Gen2 doesn't have a hardware frame counter */
+}
+
+void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ assert_vblank_disabled(&crtc->base);
+ drm_crtc_set_max_vblank_count(&crtc->base,
+ intel_crtc_max_vblank_count(crtc_state));
+ drm_crtc_vblank_on(&crtc->base);
+}
+
+void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ drm_crtc_vblank_off(&crtc->base);
+ assert_vblank_disabled(&crtc->base);
+}
+
+struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *crtc_state;
+
+ crtc_state = kmalloc(sizeof(*crtc_state), GFP_KERNEL);
+
+ if (crtc_state)
+ intel_crtc_state_reset(crtc_state, crtc);
+
+ return crtc_state;
+}
+
+void intel_crtc_state_reset(struct intel_crtc_state *crtc_state,
+ struct intel_crtc *crtc)
+{
+ memset(crtc_state, 0, sizeof(*crtc_state));
+
+ __drm_atomic_helper_crtc_state_reset(&crtc_state->uapi, &crtc->base);
+
+ crtc_state->cpu_transcoder = INVALID_TRANSCODER;
+ crtc_state->master_transcoder = INVALID_TRANSCODER;
+ crtc_state->hsw_workaround_pipe = INVALID_PIPE;
+ crtc_state->scaler_state.scaler_id = -1;
+ crtc_state->mst_master_transcoder = INVALID_TRANSCODER;
+}
+
+static struct intel_crtc *intel_crtc_alloc(void)
+{
+ struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+
+ crtc = kzalloc(sizeof(*crtc), GFP_KERNEL);
+ if (!crtc)
+ return ERR_PTR(-ENOMEM);
+
+ crtc_state = intel_crtc_state_alloc(crtc);
+ if (!crtc_state) {
+ kfree(crtc);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ crtc->base.state = &crtc_state->uapi;
+ crtc->config = crtc_state;
+
+ return crtc;
+}
+
+static void intel_crtc_free(struct intel_crtc *crtc)
+{
+ intel_crtc_destroy_state(&crtc->base, crtc->base.state);
+ kfree(crtc);
+}
+
+static void intel_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ drm_crtc_cleanup(crtc);
+ kfree(intel_crtc);
+}
+
+static int intel_crtc_late_register(struct drm_crtc *crtc)
+{
+ intel_crtc_debugfs_add(crtc);
+ return 0;
+}
+
+#define INTEL_CRTC_FUNCS \
+ .set_config = drm_atomic_helper_set_config, \
+ .destroy = intel_crtc_destroy, \
+ .page_flip = drm_atomic_helper_page_flip, \
+ .atomic_duplicate_state = intel_crtc_duplicate_state, \
+ .atomic_destroy_state = intel_crtc_destroy_state, \
+ .set_crc_source = intel_crtc_set_crc_source, \
+ .verify_crc_source = intel_crtc_verify_crc_source, \
+ .get_crc_sources = intel_crtc_get_crc_sources, \
+ .late_register = intel_crtc_late_register
+
+static const struct drm_crtc_funcs bdw_crtc_funcs = {
+ INTEL_CRTC_FUNCS,
+
+ .get_vblank_counter = g4x_get_vblank_counter,
+ .enable_vblank = bdw_enable_vblank,
+ .disable_vblank = bdw_disable_vblank,
+ .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
+};
+
+static const struct drm_crtc_funcs ilk_crtc_funcs = {
+ INTEL_CRTC_FUNCS,
+
+ .get_vblank_counter = g4x_get_vblank_counter,
+ .enable_vblank = ilk_enable_vblank,
+ .disable_vblank = ilk_disable_vblank,
+ .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
+};
+
+static const struct drm_crtc_funcs g4x_crtc_funcs = {
+ INTEL_CRTC_FUNCS,
+
+ .get_vblank_counter = g4x_get_vblank_counter,
+ .enable_vblank = i965_enable_vblank,
+ .disable_vblank = i965_disable_vblank,
+ .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
+};
+
+static const struct drm_crtc_funcs i965_crtc_funcs = {
+ INTEL_CRTC_FUNCS,
+
+ .get_vblank_counter = i915_get_vblank_counter,
+ .enable_vblank = i965_enable_vblank,
+ .disable_vblank = i965_disable_vblank,
+ .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
+};
+
+static const struct drm_crtc_funcs i915gm_crtc_funcs = {
+ INTEL_CRTC_FUNCS,
+
+ .get_vblank_counter = i915_get_vblank_counter,
+ .enable_vblank = i915gm_enable_vblank,
+ .disable_vblank = i915gm_disable_vblank,
+ .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
+};
+
+static const struct drm_crtc_funcs i915_crtc_funcs = {
+ INTEL_CRTC_FUNCS,
+
+ .get_vblank_counter = i915_get_vblank_counter,
+ .enable_vblank = i8xx_enable_vblank,
+ .disable_vblank = i8xx_disable_vblank,
+ .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
+};
+
+static const struct drm_crtc_funcs i8xx_crtc_funcs = {
+ INTEL_CRTC_FUNCS,
+
+ /* no hw vblank counter */
+ .enable_vblank = i8xx_enable_vblank,
+ .disable_vblank = i8xx_disable_vblank,
+ .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
+};
+
+int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+ struct intel_plane *primary, *cursor;
+ const struct drm_crtc_funcs *funcs;
+ struct intel_crtc *crtc;
+ int sprite, ret;
+
+ crtc = intel_crtc_alloc();
+ if (IS_ERR(crtc))
+ return PTR_ERR(crtc);
+
+ crtc->pipe = pipe;
+ crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[pipe];
+
+ primary = intel_primary_plane_create(dev_priv, pipe);
+ if (IS_ERR(primary)) {
+ ret = PTR_ERR(primary);
+ goto fail;
+ }
+ crtc->plane_ids_mask |= BIT(primary->id);
+
+ for_each_sprite(dev_priv, pipe, sprite) {
+ struct intel_plane *plane;
+
+ plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
+ if (IS_ERR(plane)) {
+ ret = PTR_ERR(plane);
+ goto fail;
+ }
+ crtc->plane_ids_mask |= BIT(plane->id);
+ }
+
+ cursor = intel_cursor_plane_create(dev_priv, pipe);
+ if (IS_ERR(cursor)) {
+ ret = PTR_ERR(cursor);
+ goto fail;
+ }
+ crtc->plane_ids_mask |= BIT(cursor->id);
+
+ if (HAS_GMCH(dev_priv)) {
+ if (IS_CHERRYVIEW(dev_priv) ||
+ IS_VALLEYVIEW(dev_priv) || IS_G4X(dev_priv))
+ funcs = &g4x_crtc_funcs;
+ else if (IS_GEN(dev_priv, 4))
+ funcs = &i965_crtc_funcs;
+ else if (IS_I945GM(dev_priv) || IS_I915GM(dev_priv))
+ funcs = &i915gm_crtc_funcs;
+ else if (IS_GEN(dev_priv, 3))
+ funcs = &i915_crtc_funcs;
+ else
+ funcs = &i8xx_crtc_funcs;
+ } else {
+ if (INTEL_GEN(dev_priv) >= 8)
+ funcs = &bdw_crtc_funcs;
+ else
+ funcs = &ilk_crtc_funcs;
+ }
+
+ ret = drm_crtc_init_with_planes(&dev_priv->drm, &crtc->base,
+ &primary->base, &cursor->base,
+ funcs, "pipe %c", pipe_name(pipe));
+ if (ret)
+ goto fail;
+
+ BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) ||
+ dev_priv->pipe_to_crtc_mapping[pipe] != NULL);
+ dev_priv->pipe_to_crtc_mapping[pipe] = crtc;
+
+ if (INTEL_GEN(dev_priv) < 9) {
+ enum i9xx_plane_id i9xx_plane = primary->i9xx_plane;
+
+ BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
+ dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL);
+ dev_priv->plane_to_crtc_mapping[i9xx_plane] = crtc;
+ }
+
+ if (INTEL_GEN(dev_priv) >= 10)
+ drm_crtc_create_scaling_filter_property(&crtc->base,
+ BIT(DRM_SCALING_FILTER_DEFAULT) |
+ BIT(DRM_SCALING_FILTER_NEAREST_NEIGHBOR));
+
+ intel_color_init(crtc);
+
+ intel_crtc_crc_init(crtc);
+
+ drm_WARN_ON(&dev_priv->drm, drm_crtc_index(&crtc->base) != crtc->pipe);
+
+ return 0;
+
+fail:
+ intel_crtc_free(crtc);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_crtc.h b/drivers/gpu/drm/i915/display/intel_crtc.h
new file mode 100644
index 000000000000..08112d557411
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_crtc.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef _INTEL_CRTC_H_
+#define _INTEL_CRTC_H_
+
+#include <linux/types.h>
+
+enum pipe;
+struct drm_i915_private;
+struct intel_crtc;
+struct intel_crtc_state;
+
+u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state);
+int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe);
+struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc);
+void intel_crtc_state_reset(struct intel_crtc_state *crtc_state,
+ struct intel_crtc *crtc);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c
new file mode 100644
index 000000000000..21fe4d2753e9
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_cursor.c
@@ -0,0 +1,806 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+#include <linux/kernel.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_fourcc.h>
+
+#include "intel_atomic.h"
+#include "intel_atomic_plane.h"
+#include "intel_cursor.h"
+#include "intel_display_types.h"
+#include "intel_display.h"
+
+#include "intel_frontbuffer.h"
+#include "intel_pm.h"
+#include "intel_psr.h"
+#include "intel_sprite.h"
+
+/* Cursor formats */
+static const u32 intel_cursor_formats[] = {
+ DRM_FORMAT_ARGB8888,
+};
+
+static const u64 cursor_format_modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
+static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ u32 base;
+
+ if (INTEL_INFO(dev_priv)->display.cursor_needs_physical)
+ base = sg_dma_address(obj->mm.pages->sgl);
+ else
+ base = intel_plane_ggtt_offset(plane_state);
+
+ return base + plane_state->color_plane[0].offset;
+}
+
+static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
+{
+ int x = plane_state->uapi.dst.x1;
+ int y = plane_state->uapi.dst.y1;
+ u32 pos = 0;
+
+ if (x < 0) {
+ pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
+ x = -x;
+ }
+ pos |= x << CURSOR_X_SHIFT;
+
+ if (y < 0) {
+ pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
+ y = -y;
+ }
+ pos |= y << CURSOR_Y_SHIFT;
+
+ return pos;
+}
+
+static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
+{
+ const struct drm_mode_config *config =
+ &plane_state->uapi.plane->dev->mode_config;
+ int width = drm_rect_width(&plane_state->uapi.dst);
+ int height = drm_rect_height(&plane_state->uapi.dst);
+
+ return width > 0 && width <= config->cursor_width &&
+ height > 0 && height <= config->cursor_height;
+}
+
+static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->uapi.plane->dev);
+ unsigned int rotation = plane_state->hw.rotation;
+ int src_x, src_y;
+ u32 offset;
+ int ret;
+
+ ret = intel_plane_compute_gtt(plane_state);
+ if (ret)
+ return ret;
+
+ if (!plane_state->uapi.visible)
+ return 0;
+
+ src_x = plane_state->uapi.src.x1 >> 16;
+ src_y = plane_state->uapi.src.y1 >> 16;
+
+ intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
+ offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
+ plane_state, 0);
+
+ if (src_x != 0 || src_y != 0) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Arbitrary cursor panning not supported\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Put the final coordinates back so that the src
+ * coordinate checks will see the right values.
+ */
+ drm_rect_translate_to(&plane_state->uapi.src,
+ src_x << 16, src_y << 16);
+
+ /* ILK+ do this automagically in hardware */
+ if (HAS_GMCH(dev_priv) && rotation & DRM_MODE_ROTATE_180) {
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
+
+ offset += (src_h * src_w - 1) * fb->format->cpp[0];
+ }
+
+ plane_state->color_plane[0].offset = offset;
+ plane_state->color_plane[0].x = src_x;
+ plane_state->color_plane[0].y = src_y;
+
+ return 0;
+}
+
+static int intel_check_cursor(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state)
+{
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
+ const struct drm_rect src = plane_state->uapi.src;
+ const struct drm_rect dst = plane_state->uapi.dst;
+ int ret;
+
+ if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) {
+ drm_dbg_kms(&i915->drm, "cursor cannot be tiled\n");
+ return -EINVAL;
+ }
+
+ ret = intel_atomic_plane_check_clipping(plane_state, crtc_state,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ true);
+ if (ret)
+ return ret;
+
+ /* Use the unclipped src/dst rectangles, which we program to hw */
+ plane_state->uapi.src = src;
+ plane_state->uapi.dst = dst;
+
+ ret = intel_cursor_check_surface(plane_state);
+ if (ret)
+ return ret;
+
+ if (!plane_state->uapi.visible)
+ return 0;
+
+ ret = intel_plane_check_src_coordinates(plane_state);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static unsigned int
+i845_cursor_max_stride(struct intel_plane *plane,
+ u32 pixel_format, u64 modifier,
+ unsigned int rotation)
+{
+ return 2048;
+}
+
+static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
+{
+ u32 cntl = 0;
+
+ if (crtc_state->gamma_enable)
+ cntl |= CURSOR_GAMMA_ENABLE;
+
+ return cntl;
+}
+
+static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ return CURSOR_ENABLE |
+ CURSOR_FORMAT_ARGB |
+ CURSOR_STRIDE(plane_state->color_plane[0].stride);
+}
+
+static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
+{
+ int width = drm_rect_width(&plane_state->uapi.dst);
+
+ /*
+ * 845g/865g are only limited by the width of their cursors,
+ * the height is arbitrary up to the precision of the register.
+ */
+ return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64);
+}
+
+static int i845_check_cursor(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state)
+{
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
+ int ret;
+
+ ret = intel_check_cursor(crtc_state, plane_state);
+ if (ret)
+ return ret;
+
+ /* if we want to turn off the cursor ignore width and height */
+ if (!fb)
+ return 0;
+
+ /* Check for which cursor types we support */
+ if (!i845_cursor_size_ok(plane_state)) {
+ drm_dbg_kms(&i915->drm,
+ "Cursor dimension %dx%d not supported\n",
+ drm_rect_width(&plane_state->uapi.dst),
+ drm_rect_height(&plane_state->uapi.dst));
+ return -EINVAL;
+ }
+
+ drm_WARN_ON(&i915->drm, plane_state->uapi.visible &&
+ plane_state->color_plane[0].stride != fb->pitches[0]);
+
+ switch (fb->pitches[0]) {
+ case 256:
+ case 512:
+ case 1024:
+ case 2048:
+ break;
+ default:
+ drm_dbg_kms(&i915->drm, "Invalid cursor stride (%u)\n",
+ fb->pitches[0]);
+ return -EINVAL;
+ }
+
+ plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state);
+
+ return 0;
+}
+
+static void i845_update_cursor(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ u32 cntl = 0, base = 0, pos = 0, size = 0;
+ unsigned long irqflags;
+
+ if (plane_state && plane_state->uapi.visible) {
+ unsigned int width = drm_rect_width(&plane_state->uapi.dst);
+ unsigned int height = drm_rect_height(&plane_state->uapi.dst);
+
+ cntl = plane_state->ctl |
+ i845_cursor_ctl_crtc(crtc_state);
+
+ size = (height << 12) | width;
+
+ base = intel_cursor_base(plane_state);
+ pos = intel_cursor_position(plane_state);
+ }
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+ /* On these chipsets we can only modify the base/size/stride
+ * whilst the cursor is disabled.
+ */
+ if (plane->cursor.base != base ||
+ plane->cursor.size != size ||
+ plane->cursor.cntl != cntl) {
+ intel_de_write_fw(dev_priv, CURCNTR(PIPE_A), 0);
+ intel_de_write_fw(dev_priv, CURBASE(PIPE_A), base);
+ intel_de_write_fw(dev_priv, CURSIZE, size);
+ intel_de_write_fw(dev_priv, CURPOS(PIPE_A), pos);
+ intel_de_write_fw(dev_priv, CURCNTR(PIPE_A), cntl);
+
+ plane->cursor.base = base;
+ plane->cursor.size = size;
+ plane->cursor.cntl = cntl;
+ } else {
+ intel_de_write_fw(dev_priv, CURPOS(PIPE_A), pos);
+ }
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+static void i845_disable_cursor(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
+{
+ i845_update_cursor(plane, crtc_state, NULL);
+}
+
+static bool i845_cursor_get_hw_state(struct intel_plane *plane,
+ enum pipe *pipe)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum intel_display_power_domain power_domain;
+ intel_wakeref_t wakeref;
+ bool ret;
+
+ power_domain = POWER_DOMAIN_PIPE(PIPE_A);
+ wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ if (!wakeref)
+ return false;
+
+ ret = intel_de_read(dev_priv, CURCNTR(PIPE_A)) & CURSOR_ENABLE;
+
+ *pipe = PIPE_A;
+
+ intel_display_power_put(dev_priv, power_domain, wakeref);
+
+ return ret;
+}
+
+static unsigned int
+i9xx_cursor_max_stride(struct intel_plane *plane,
+ u32 pixel_format, u64 modifier,
+ unsigned int rotation)
+{
+ return plane->base.dev->mode_config.cursor_width * 4;
+}
+
+static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 cntl = 0;
+
+ if (INTEL_GEN(dev_priv) >= 11)
+ return cntl;
+
+ if (crtc_state->gamma_enable)
+ cntl = MCURSOR_GAMMA_ENABLE;
+
+ if (crtc_state->csc_enable)
+ cntl |= MCURSOR_PIPE_CSC_ENABLE;
+
+ if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
+ cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
+
+ return cntl;
+}
+
+static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->uapi.plane->dev);
+ u32 cntl = 0;
+
+ if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
+ cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
+
+ switch (drm_rect_width(&plane_state->uapi.dst)) {
+ case 64:
+ cntl |= MCURSOR_MODE_64_ARGB_AX;
+ break;
+ case 128:
+ cntl |= MCURSOR_MODE_128_ARGB_AX;
+ break;
+ case 256:
+ cntl |= MCURSOR_MODE_256_ARGB_AX;
+ break;
+ default:
+ MISSING_CASE(drm_rect_width(&plane_state->uapi.dst));
+ return 0;
+ }
+
+ if (plane_state->hw.rotation & DRM_MODE_ROTATE_180)
+ cntl |= MCURSOR_ROTATE_180;
+
+ return cntl;
+}
+
+static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->uapi.plane->dev);
+ int width = drm_rect_width(&plane_state->uapi.dst);
+ int height = drm_rect_height(&plane_state->uapi.dst);
+
+ if (!intel_cursor_size_ok(plane_state))
+ return false;
+
+ /* Cursor width is limited to a few power-of-two sizes */
+ switch (width) {
+ case 256:
+ case 128:
+ case 64:
+ break;
+ default:
+ return false;
+ }
+
+ /*
+ * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor
+ * height from 8 lines up to the cursor width, when the
+ * cursor is not rotated. Everything else requires square
+ * cursors.
+ */
+ if (HAS_CUR_FBC(dev_priv) &&
+ plane_state->hw.rotation & DRM_MODE_ROTATE_0) {
+ if (height < 8 || height > width)
+ return false;
+ } else {
+ if (height != width)
+ return false;
+ }
+
+ return true;
+}
+
+static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ enum pipe pipe = plane->pipe;
+ int ret;
+
+ ret = intel_check_cursor(crtc_state, plane_state);
+ if (ret)
+ return ret;
+
+ /* if we want to turn off the cursor ignore width and height */
+ if (!fb)
+ return 0;
+
+ /* Check for which cursor types we support */
+ if (!i9xx_cursor_size_ok(plane_state)) {
+ drm_dbg(&dev_priv->drm,
+ "Cursor dimension %dx%d not supported\n",
+ drm_rect_width(&plane_state->uapi.dst),
+ drm_rect_height(&plane_state->uapi.dst));
+ return -EINVAL;
+ }
+
+ drm_WARN_ON(&dev_priv->drm, plane_state->uapi.visible &&
+ plane_state->color_plane[0].stride != fb->pitches[0]);
+
+ if (fb->pitches[0] !=
+ drm_rect_width(&plane_state->uapi.dst) * fb->format->cpp[0]) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Invalid cursor stride (%u) (cursor width %d)\n",
+ fb->pitches[0],
+ drm_rect_width(&plane_state->uapi.dst));
+ return -EINVAL;
+ }
+
+ /*
+ * There's something wrong with the cursor on CHV pipe C.
+ * If it straddles the left edge of the screen then
+ * moving it away from the edge or disabling it often
+ * results in a pipe underrun, and often that can lead to
+ * dead pipe (constant underrun reported, and it scans
+ * out just a solid color). To recover from that, the
+ * display power well must be turned off and on again.
+ * Refuse the put the cursor into that compromised position.
+ */
+ if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
+ plane_state->uapi.visible && plane_state->uapi.dst.x1 < 0) {
+ drm_dbg_kms(&dev_priv->drm,
+ "CHV cursor C not allowed to straddle the left screen edge\n");
+ return -EINVAL;
+ }
+
+ plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state);
+
+ return 0;
+}
+
+static void i9xx_update_cursor(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+ u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0;
+ unsigned long irqflags;
+
+ if (plane_state && plane_state->uapi.visible) {
+ int width = drm_rect_width(&plane_state->uapi.dst);
+ int height = drm_rect_height(&plane_state->uapi.dst);
+
+ cntl = plane_state->ctl |
+ i9xx_cursor_ctl_crtc(crtc_state);
+
+ if (width != height)
+ fbc_ctl = CUR_FBC_CTL_EN | (height - 1);
+
+ base = intel_cursor_base(plane_state);
+ pos = intel_cursor_position(plane_state);
+ }
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+ /*
+ * On some platforms writing CURCNTR first will also
+ * cause CURPOS to be armed by the CURBASE write.
+ * Without the CURCNTR write the CURPOS write would
+ * arm itself. Thus we always update CURCNTR before
+ * CURPOS.
+ *
+ * On other platforms CURPOS always requires the
+ * CURBASE write to arm the update. Additonally
+ * a write to any of the cursor register will cancel
+ * an already armed cursor update. Thus leaving out
+ * the CURBASE write after CURPOS could lead to a
+ * cursor that doesn't appear to move, or even change
+ * shape. Thus we always write CURBASE.
+ *
+ * The other registers are armed by the CURBASE write
+ * except when the plane is getting enabled at which time
+ * the CURCNTR write arms the update.
+ */
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ skl_write_cursor_wm(plane, crtc_state);
+
+ if (!intel_crtc_needs_modeset(crtc_state))
+ intel_psr2_program_plane_sel_fetch(plane, crtc_state, plane_state, 0);
+
+ if (plane->cursor.base != base ||
+ plane->cursor.size != fbc_ctl ||
+ plane->cursor.cntl != cntl) {
+ if (HAS_CUR_FBC(dev_priv))
+ intel_de_write_fw(dev_priv, CUR_FBC_CTL(pipe),
+ fbc_ctl);
+ intel_de_write_fw(dev_priv, CURCNTR(pipe), cntl);
+ intel_de_write_fw(dev_priv, CURPOS(pipe), pos);
+ intel_de_write_fw(dev_priv, CURBASE(pipe), base);
+
+ plane->cursor.base = base;
+ plane->cursor.size = fbc_ctl;
+ plane->cursor.cntl = cntl;
+ } else {
+ intel_de_write_fw(dev_priv, CURPOS(pipe), pos);
+ intel_de_write_fw(dev_priv, CURBASE(pipe), base);
+ }
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+static void i9xx_disable_cursor(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
+{
+ i9xx_update_cursor(plane, crtc_state, NULL);
+}
+
+static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
+ enum pipe *pipe)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum intel_display_power_domain power_domain;
+ intel_wakeref_t wakeref;
+ bool ret;
+ u32 val;
+
+ /*
+ * Not 100% correct for planes that can move between pipes,
+ * but that's only the case for gen2-3 which don't have any
+ * display power wells.
+ */
+ power_domain = POWER_DOMAIN_PIPE(plane->pipe);
+ wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ if (!wakeref)
+ return false;
+
+ val = intel_de_read(dev_priv, CURCNTR(plane->pipe));
+
+ ret = val & MCURSOR_MODE;
+
+ if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
+ *pipe = plane->pipe;
+ else
+ *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >>
+ MCURSOR_PIPE_SELECT_SHIFT;
+
+ intel_display_power_put(dev_priv, power_domain, wakeref);
+
+ return ret;
+}
+
+static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
+ u32 format, u64 modifier)
+{
+ return modifier == DRM_FORMAT_MOD_LINEAR &&
+ format == DRM_FORMAT_ARGB8888;
+}
+
+static int
+intel_legacy_cursor_update(struct drm_plane *_plane,
+ struct drm_crtc *_crtc,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ u32 src_x, u32 src_y,
+ u32 src_w, u32 src_h,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct intel_plane *plane = to_intel_plane(_plane);
+ struct intel_crtc *crtc = to_intel_crtc(_crtc);
+ struct intel_plane_state *old_plane_state =
+ to_intel_plane_state(plane->base.state);
+ struct intel_plane_state *new_plane_state;
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ struct intel_crtc_state *new_crtc_state;
+ int ret;
+
+ /*
+ * When crtc is inactive or there is a modeset pending,
+ * wait for it to complete in the slowpath
+ *
+ * FIXME bigjoiner fastpath would be good
+ */
+ if (!crtc_state->hw.active || intel_crtc_needs_modeset(crtc_state) ||
+ crtc_state->update_pipe || crtc_state->bigjoiner)
+ goto slow;
+
+ /*
+ * Don't do an async update if there is an outstanding commit modifying
+ * the plane. This prevents our async update's changes from getting
+ * overridden by a previous synchronous update's state.
+ */
+ if (old_plane_state->uapi.commit &&
+ !try_wait_for_completion(&old_plane_state->uapi.commit->hw_done))
+ goto slow;
+
+ /*
+ * If any parameters change that may affect watermarks,
+ * take the slowpath. Only changing fb or position should be
+ * in the fastpath.
+ */
+ if (old_plane_state->uapi.crtc != &crtc->base ||
+ old_plane_state->uapi.src_w != src_w ||
+ old_plane_state->uapi.src_h != src_h ||
+ old_plane_state->uapi.crtc_w != crtc_w ||
+ old_plane_state->uapi.crtc_h != crtc_h ||
+ !old_plane_state->uapi.fb != !fb)
+ goto slow;
+
+ new_plane_state = to_intel_plane_state(intel_plane_duplicate_state(&plane->base));
+ if (!new_plane_state)
+ return -ENOMEM;
+
+ new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(&crtc->base));
+ if (!new_crtc_state) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
+ drm_atomic_set_fb_for_plane(&new_plane_state->uapi, fb);
+
+ new_plane_state->uapi.src_x = src_x;
+ new_plane_state->uapi.src_y = src_y;
+ new_plane_state->uapi.src_w = src_w;
+ new_plane_state->uapi.src_h = src_h;
+ new_plane_state->uapi.crtc_x = crtc_x;
+ new_plane_state->uapi.crtc_y = crtc_y;
+ new_plane_state->uapi.crtc_w = crtc_w;
+ new_plane_state->uapi.crtc_h = crtc_h;
+
+ intel_plane_copy_uapi_to_hw_state(new_plane_state, new_plane_state, crtc);
+
+ ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
+ old_plane_state, new_plane_state);
+ if (ret)
+ goto out_free;
+
+ ret = intel_plane_pin_fb(new_plane_state);
+ if (ret)
+ goto out_free;
+
+ intel_frontbuffer_flush(to_intel_frontbuffer(new_plane_state->hw.fb),
+ ORIGIN_FLIP);
+ intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
+ to_intel_frontbuffer(new_plane_state->hw.fb),
+ plane->frontbuffer_bit);
+
+ /* Swap plane state */
+ plane->base.state = &new_plane_state->uapi;
+
+ /*
+ * We cannot swap crtc_state as it may be in use by an atomic commit or
+ * page flip that's running simultaneously. If we swap crtc_state and
+ * destroy the old state, we will cause a use-after-free there.
+ *
+ * Only update active_planes, which is needed for our internal
+ * bookkeeping. Either value will do the right thing when updating
+ * planes atomically. If the cursor was part of the atomic update then
+ * we would have taken the slowpath.
+ */
+ crtc_state->active_planes = new_crtc_state->active_planes;
+
+ if (new_plane_state->uapi.visible)
+ intel_update_plane(plane, crtc_state, new_plane_state);
+ else
+ intel_disable_plane(plane, crtc_state);
+
+ intel_plane_unpin_fb(old_plane_state);
+
+out_free:
+ if (new_crtc_state)
+ intel_crtc_destroy_state(&crtc->base, &new_crtc_state->uapi);
+ if (ret)
+ intel_plane_destroy_state(&plane->base, &new_plane_state->uapi);
+ else
+ intel_plane_destroy_state(&plane->base, &old_plane_state->uapi);
+ return ret;
+
+slow:
+ return drm_atomic_helper_update_plane(&plane->base, &crtc->base, fb,
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ src_x, src_y, src_w, src_h, ctx);
+}
+
+static const struct drm_plane_funcs intel_cursor_plane_funcs = {
+ .update_plane = intel_legacy_cursor_update,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = intel_plane_destroy,
+ .atomic_duplicate_state = intel_plane_duplicate_state,
+ .atomic_destroy_state = intel_plane_destroy_state,
+ .format_mod_supported = intel_cursor_format_mod_supported,
+};
+
+struct intel_plane *
+intel_cursor_plane_create(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ struct intel_plane *cursor;
+ int ret, zpos;
+
+ cursor = intel_plane_alloc();
+ if (IS_ERR(cursor))
+ return cursor;
+
+ cursor->pipe = pipe;
+ cursor->i9xx_plane = (enum i9xx_plane_id) pipe;
+ cursor->id = PLANE_CURSOR;
+ cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id);
+
+ if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
+ cursor->max_stride = i845_cursor_max_stride;
+ cursor->update_plane = i845_update_cursor;
+ cursor->disable_plane = i845_disable_cursor;
+ cursor->get_hw_state = i845_cursor_get_hw_state;
+ cursor->check_plane = i845_check_cursor;
+ } else {
+ cursor->max_stride = i9xx_cursor_max_stride;
+ cursor->update_plane = i9xx_update_cursor;
+ cursor->disable_plane = i9xx_disable_cursor;
+ cursor->get_hw_state = i9xx_cursor_get_hw_state;
+ cursor->check_plane = i9xx_check_cursor;
+ }
+
+ cursor->cursor.base = ~0;
+ cursor->cursor.cntl = ~0;
+
+ if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
+ cursor->cursor.size = ~0;
+
+ ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
+ 0, &intel_cursor_plane_funcs,
+ intel_cursor_formats,
+ ARRAY_SIZE(intel_cursor_formats),
+ cursor_format_modifiers,
+ DRM_PLANE_TYPE_CURSOR,
+ "cursor %c", pipe_name(pipe));
+ if (ret)
+ goto fail;
+
+ if (INTEL_GEN(dev_priv) >= 4)
+ drm_plane_create_rotation_property(&cursor->base,
+ DRM_MODE_ROTATE_0,
+ DRM_MODE_ROTATE_0 |
+ DRM_MODE_ROTATE_180);
+
+ zpos = RUNTIME_INFO(dev_priv)->num_sprites[pipe] + 1;
+ drm_plane_create_zpos_immutable_property(&cursor->base, zpos);
+
+ if (INTEL_GEN(dev_priv) >= 12)
+ drm_plane_enable_fb_damage_clips(&cursor->base);
+
+ drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
+
+ return cursor;
+
+fail:
+ intel_plane_free(cursor);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_cursor.h b/drivers/gpu/drm/i915/display/intel_cursor.h
new file mode 100644
index 000000000000..ce333bf4c2d5
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_cursor.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef _INTEL_CURSOR_H_
+#define _INTEL_CURSOR_H_
+
+enum pipe;
+struct drm_i915_private;
+struct intel_plane;
+
+struct intel_plane *
+intel_cursor_plane_create(struct drm_i915_private *dev_priv,
+ enum pipe pipe);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index d5ace48b1ace..1bb40ec5fe5d 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -46,10 +46,12 @@
#include "intel_hotplug.h"
#include "intel_lspcon.h"
#include "intel_panel.h"
+#include "intel_pps.h"
#include "intel_psr.h"
#include "intel_sprite.h"
#include "intel_tc.h"
#include "intel_vdsc.h"
+#include "intel_vrr.h"
struct ddi_buf_trans {
u32 trans1; /* balance leg enable, de-emph level */
@@ -611,6 +613,34 @@ static const struct cnl_ddi_buf_trans jsl_combo_phy_ddi_translations_edp_hbr2[]
{ 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
};
+static const struct cnl_ddi_buf_trans dg1_combo_phy_ddi_translations_dp_rbr_hbr[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x32, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
+ { 0xA, 0x48, 0x35, 0x00, 0x0A }, /* 350 500 3.1 */
+ { 0xC, 0x63, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
+ { 0x6, 0x7F, 0x2C, 0x00, 0x13 }, /* 350 900 8.2 */
+ { 0xA, 0x43, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
+ { 0xC, 0x60, 0x36, 0x00, 0x09 }, /* 500 700 2.9 */
+ { 0x6, 0x7F, 0x30, 0x00, 0x0F }, /* 500 900 5.1 */
+ { 0xC, 0x60, 0x3F, 0x00, 0x00 }, /* 650 700 0.6 */
+ { 0x6, 0x7F, 0x37, 0x00, 0x08 }, /* 600 900 3.5 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
+};
+
+static const struct cnl_ddi_buf_trans dg1_combo_phy_ddi_translations_dp_hbr2_hbr3[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x32, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
+ { 0xA, 0x48, 0x35, 0x00, 0x0A }, /* 350 500 3.1 */
+ { 0xC, 0x63, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
+ { 0x6, 0x7F, 0x2C, 0x00, 0x13 }, /* 350 900 8.2 */
+ { 0xA, 0x43, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
+ { 0xC, 0x60, 0x36, 0x00, 0x09 }, /* 500 700 2.9 */
+ { 0x6, 0x7F, 0x30, 0x00, 0x0F }, /* 500 900 5.1 */
+ { 0xC, 0x58, 0x3F, 0x00, 0x00 }, /* 650 700 0.6 */
+ { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
+};
+
struct icl_mg_phy_ddi_buf_trans {
u32 cri_txdeemph_override_11_6;
u32 cri_txdeemph_override_5_0;
@@ -766,6 +796,34 @@ static const struct cnl_ddi_buf_trans tgl_combo_phy_ddi_translations_edp_hbr2_ho
{ 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 2 1 */
};
+static const struct cnl_ddi_buf_trans rkl_combo_phy_ddi_translations_dp_hbr[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x2F, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
+ { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */
+ { 0xC, 0x63, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
+ { 0x6, 0x7D, 0x2A, 0x00, 0x15 }, /* 350 900 8.2 */
+ { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
+ { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */
+ { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */
+ { 0xC, 0x6E, 0x3E, 0x00, 0x01 }, /* 650 700 0.6 */
+ { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
+};
+
+static const struct cnl_ddi_buf_trans rkl_combo_phy_ddi_translations_dp_hbr2_hbr3[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
+ { 0xA, 0x50, 0x38, 0x00, 0x07 }, /* 350 500 3.1 */
+ { 0xC, 0x61, 0x33, 0x00, 0x0C }, /* 350 700 6.0 */
+ { 0x6, 0x7F, 0x2E, 0x00, 0x11 }, /* 350 900 8.2 */
+ { 0xA, 0x47, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
+ { 0xC, 0x5F, 0x38, 0x00, 0x07 }, /* 500 700 2.9 */
+ { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */
+ { 0xC, 0x5F, 0x3F, 0x00, 0x00 }, /* 650 700 0.6 */
+ { 0x6, 0x7E, 0x36, 0x00, 0x09 }, /* 600 900 3.5 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
+};
+
static bool is_hobl_buf_trans(const struct cnl_ddi_buf_trans *table)
{
return table == tgl_combo_phy_ddi_translations_edp_hbr2_hobl;
@@ -1093,6 +1151,12 @@ icl_get_combo_buf_trans_edp(struct intel_encoder *encoder,
} else if (dev_priv->vbt.edp.low_vswing) {
*n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr2);
return icl_combo_phy_ddi_translations_edp_hbr2;
+ } else if (IS_DG1(dev_priv) && crtc_state->port_clock > 270000) {
+ *n_entries = ARRAY_SIZE(dg1_combo_phy_ddi_translations_dp_hbr2_hbr3);
+ return dg1_combo_phy_ddi_translations_dp_hbr2_hbr3;
+ } else if (IS_DG1(dev_priv)) {
+ *n_entries = ARRAY_SIZE(dg1_combo_phy_ddi_translations_dp_rbr_hbr);
+ return dg1_combo_phy_ddi_translations_dp_rbr_hbr;
}
return icl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
@@ -1259,7 +1323,10 @@ tgl_get_combo_buf_trans_dp(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
if (crtc_state->port_clock > 270000) {
- if (IS_TGL_U(dev_priv) || IS_TGL_Y(dev_priv)) {
+ if (IS_ROCKETLAKE(dev_priv)) {
+ *n_entries = ARRAY_SIZE(rkl_combo_phy_ddi_translations_dp_hbr2_hbr3);
+ return rkl_combo_phy_ddi_translations_dp_hbr2_hbr3;
+ } else if (IS_TGL_U(dev_priv) || IS_TGL_Y(dev_priv)) {
*n_entries = ARRAY_SIZE(tgl_uy_combo_phy_ddi_translations_dp_hbr2);
return tgl_uy_combo_phy_ddi_translations_dp_hbr2;
} else {
@@ -1267,8 +1334,13 @@ tgl_get_combo_buf_trans_dp(struct intel_encoder *encoder,
return tgl_combo_phy_ddi_translations_dp_hbr2;
}
} else {
- *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_dp_hbr);
- return tgl_combo_phy_ddi_translations_dp_hbr;
+ if (IS_ROCKETLAKE(dev_priv)) {
+ *n_entries = ARRAY_SIZE(rkl_combo_phy_ddi_translations_dp_hbr);
+ return rkl_combo_phy_ddi_translations_dp_hbr;
+ } else {
+ *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_dp_hbr);
+ return tgl_combo_phy_ddi_translations_dp_hbr;
+ }
}
}
@@ -2029,9 +2101,9 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state
}
}
-int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
- enum transcoder cpu_transcoder,
- bool enable)
+int intel_ddi_toggle_hdcp_bits(struct intel_encoder *intel_encoder,
+ enum transcoder cpu_transcoder,
+ bool enable, u32 hdcp_mask)
{
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -2046,9 +2118,9 @@ int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
if (enable)
- tmp |= TRANS_DDI_HDCP_SIGNALLING;
+ tmp |= hdcp_mask;
else
- tmp &= ~TRANS_DDI_HDCP_SIGNALLING;
+ tmp &= ~hdcp_mask;
intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), tmp);
intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref);
return ret;
@@ -2285,18 +2357,23 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder,
dig_port = enc_to_dig_port(encoder);
if (!intel_phy_is_tc(dev_priv, phy) ||
- dig_port->tc_mode != TC_PORT_TBT_ALT)
- intel_display_power_get(dev_priv,
- dig_port->ddi_io_power_domain);
+ dig_port->tc_mode != TC_PORT_TBT_ALT) {
+ drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref);
+ dig_port->ddi_io_wakeref = intel_display_power_get(dev_priv,
+ dig_port->ddi_io_power_domain);
+ }
/*
* AUX power is only needed for (e)DP mode, and for HDMI mode on TC
* ports.
*/
if (intel_crtc_has_dp_encoder(crtc_state) ||
- intel_phy_is_tc(dev_priv, phy))
- intel_display_power_get(dev_priv,
- intel_ddi_main_link_aux_domain(dig_port));
+ intel_phy_is_tc(dev_priv, phy)) {
+ drm_WARN_ON(&dev_priv->drm, dig_port->aux_wakeref);
+ dig_port->aux_wakeref =
+ intel_display_power_get(dev_priv,
+ intel_ddi_main_link_aux_domain(dig_port));
+ }
}
void intel_ddi_enable_pipe_clock(struct intel_encoder *encoder,
@@ -2626,15 +2703,11 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
ddi_translations = ehl_get_combo_buf_trans(encoder, crtc_state, &n_entries);
else
ddi_translations = icl_get_combo_buf_trans(encoder, crtc_state, &n_entries);
- if (!ddi_translations)
- return;
- if (level >= n_entries) {
- drm_dbg_kms(&dev_priv->drm,
- "DDI translation not found for level %d. Using %d instead.",
- level, n_entries - 1);
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
+ return;
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
level = n_entries - 1;
- }
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@@ -2754,14 +2827,15 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
int n_entries, ln;
u32 val;
+ if (enc_to_dig_port(encoder)->tc_mode == TC_PORT_TBT_ALT)
+ return;
+
ddi_translations = icl_get_mg_buf_trans(encoder, crtc_state, &n_entries);
- /* The table does not have values for level 3 and level 9. */
- if (level >= n_entries || level == 3 || level == 9) {
- drm_dbg_kms(&dev_priv->drm,
- "DDI translation not found for level %d. Using %d instead.",
- level, n_entries - 2);
- level = n_entries - 2;
- }
+
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
+ return;
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
+ level = n_entries - 1;
/* Set MG_TX_LINK_PARAMS cri_use_fs32 to 0. */
for (ln = 0; ln < 2; ln++) {
@@ -2891,9 +2965,14 @@ tgl_dkl_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
u32 val, dpcnt_mask, dpcnt_val;
int n_entries, ln;
+ if (enc_to_dig_port(encoder)->tc_mode == TC_PORT_TBT_ALT)
+ return;
+
ddi_translations = tgl_get_dkl_buf_trans(encoder, crtc_state, &n_entries);
- if (level >= n_entries)
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
+ return;
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
level = n_entries - 1;
dpcnt_mask = (DKL_TX_PRESHOOT_COEFF_MASK |
@@ -3480,6 +3559,22 @@ i915_reg_t dp_tp_status_reg(struct intel_encoder *encoder,
return DP_TP_STATUS(encoder->port);
}
+static void intel_dp_sink_set_msa_timing_par_ignore_state(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ bool enable)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ if (!crtc_state->vrr.enable)
+ return;
+
+ if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_DOWNSPREAD_CTRL,
+ enable ? DP_MSA_TIMING_PAR_IGNORE_EN : 0) <= 0)
+ drm_dbg_kms(&i915->drm,
+ "Failed to set MSA_TIMING_PAR_IGNORE %s in the sink\n",
+ enable ? "enable" : "disable");
+}
+
static void intel_dp_sink_set_fec_ready(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
@@ -3507,12 +3602,6 @@ static void intel_ddi_enable_fec(struct intel_encoder *encoder,
val = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
val |= DP_TP_CTL_FEC_ENABLE;
intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), val);
-
- if (intel_de_wait_for_set(dev_priv,
- dp_tp_status_reg(encoder, crtc_state),
- DP_TP_STATUS_FEC_ENABLE_LIVE, 1))
- drm_err(&dev_priv->drm,
- "Timed out waiting for FEC Enable Status\n");
}
static void intel_ddi_disable_fec_state(struct intel_encoder *encoder,
@@ -3532,6 +3621,23 @@ static void intel_ddi_disable_fec_state(struct intel_encoder *encoder,
intel_de_posting_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
}
+static void intel_ddi_power_up_lanes(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+ if (intel_phy_is_combo(i915, phy)) {
+ bool lane_reversal =
+ dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
+
+ intel_combo_phy_power_up_lanes(i915, phy, false,
+ crtc_state->lane_count,
+ lane_reversal);
+ }
+}
+
static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
@@ -3556,7 +3662,7 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
*/
/* 2. Enable Panel Power if PPS is required */
- intel_edp_panel_on(intel_dp);
+ intel_pps_on(intel_dp);
/*
* 3. For non-TBT Type-C ports, set FIA lane count
@@ -3577,9 +3683,11 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
/* 5. If IO power is controlled through PWR_WELL_CTL, Enable IO Power */
if (!intel_phy_is_tc(dev_priv, phy) ||
- dig_port->tc_mode != TC_PORT_TBT_ALT)
- intel_display_power_get(dev_priv,
- dig_port->ddi_io_power_domain);
+ dig_port->tc_mode != TC_PORT_TBT_ALT) {
+ drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref);
+ dig_port->ddi_io_wakeref = intel_display_power_get(dev_priv,
+ dig_port->ddi_io_power_domain);
+ }
/* 6. Program DP_MODE */
icl_program_mg_dp_mode(dig_port, crtc_state);
@@ -3621,14 +3729,7 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
* 7.f Combo PHY: Configure PORT_CL_DW10 Static Power Down to power up
* the used lanes of the DDI.
*/
- if (intel_phy_is_combo(dev_priv, phy)) {
- bool lane_reversal =
- dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
-
- intel_combo_phy_power_up_lanes(dev_priv, phy, false,
- crtc_state->lane_count,
- lane_reversal);
- }
+ intel_ddi_power_up_lanes(encoder, crtc_state);
/*
* 7.g Configure and enable DDI_BUF_CTL
@@ -3643,6 +3744,7 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
if (!is_mst)
intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
+ intel_dp_configure_protocol_converter(intel_dp, crtc_state);
intel_dp_sink_set_decompression_state(intel_dp, crtc_state, true);
/*
* DDI FEC: "anticipates enabling FEC encoding sets the FEC_READY bit
@@ -3651,6 +3753,9 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
*/
intel_dp_sink_set_fec_ready(intel_dp, crtc_state);
+ intel_dp_check_frl_training(intel_dp);
+ intel_dp_pcon_dsc_configure(intel_dp, crtc_state);
+
/*
* 7.i Follow DisplayPort specification training sequence (see notes for
* failure handling)
@@ -3693,14 +3798,16 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
crtc_state->port_clock,
crtc_state->lane_count);
- intel_edp_panel_on(intel_dp);
+ intel_pps_on(intel_dp);
intel_ddi_clk_select(encoder, crtc_state);
if (!intel_phy_is_tc(dev_priv, phy) ||
- dig_port->tc_mode != TC_PORT_TBT_ALT)
- intel_display_power_get(dev_priv,
- dig_port->ddi_io_power_domain);
+ dig_port->tc_mode != TC_PORT_TBT_ALT) {
+ drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref);
+ dig_port->ddi_io_wakeref = intel_display_power_get(dev_priv,
+ dig_port->ddi_io_power_domain);
+ }
icl_program_mg_dp_mode(dig_port, crtc_state);
@@ -3713,14 +3820,7 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
else
intel_prepare_dp_ddi_buffers(encoder, crtc_state);
- if (intel_phy_is_combo(dev_priv, phy)) {
- bool lane_reversal =
- dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
-
- intel_combo_phy_power_up_lanes(dev_priv, phy, false,
- crtc_state->lane_count,
- lane_reversal);
- }
+ intel_ddi_power_up_lanes(encoder, crtc_state);
intel_ddi_init_dp_buf_reg(encoder, crtc_state);
if (!is_mst)
@@ -3778,7 +3878,9 @@ static void intel_ddi_pre_enable_hdmi(struct intel_atomic_state *state,
intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
intel_ddi_clk_select(encoder, crtc_state);
- intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
+ drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref);
+ dig_port->ddi_io_wakeref = intel_display_power_get(dev_priv,
+ dig_port->ddi_io_power_domain);
icl_program_mg_dp_mode(dig_port, crtc_state);
@@ -3931,13 +4033,14 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
if (INTEL_GEN(dev_priv) >= 12)
intel_ddi_disable_pipe_clock(old_crtc_state);
- intel_edp_panel_vdd_on(intel_dp);
- intel_edp_panel_off(intel_dp);
+ intel_pps_vdd_on(intel_dp);
+ intel_pps_off(intel_dp);
if (!intel_phy_is_tc(dev_priv, phy) ||
dig_port->tc_mode != TC_PORT_TBT_ALT)
- intel_display_power_put_unchecked(dev_priv,
- dig_port->ddi_io_power_domain);
+ intel_display_power_put(dev_priv,
+ dig_port->ddi_io_power_domain,
+ fetch_and_zero(&dig_port->ddi_io_wakeref));
intel_ddi_clk_disable(encoder);
}
@@ -3958,8 +4061,9 @@ static void intel_ddi_post_disable_hdmi(struct intel_atomic_state *state,
intel_disable_ddi_buf(encoder, old_crtc_state);
- intel_display_power_put_unchecked(dev_priv,
- dig_port->ddi_io_power_domain);
+ intel_display_power_put(dev_priv,
+ dig_port->ddi_io_power_domain,
+ fetch_and_zero(&dig_port->ddi_io_wakeref));
intel_ddi_clk_disable(encoder);
@@ -3981,6 +4085,8 @@ static void intel_ddi_post_disable(struct intel_atomic_state *state,
intel_disable_pipe(old_crtc_state);
+ intel_vrr_disable(old_crtc_state);
+
intel_ddi_disable_transcoder_func(old_crtc_state);
intel_dsc_disable(old_crtc_state);
@@ -4032,8 +4138,9 @@ static void intel_ddi_post_disable(struct intel_atomic_state *state,
icl_unmap_plls_to_ports(encoder);
if (intel_crtc_has_dp_encoder(old_crtc_state) || is_tc_port)
- intel_display_power_put_unchecked(dev_priv,
- intel_ddi_main_link_aux_domain(dig_port));
+ intel_display_power_put(dev_priv,
+ intel_ddi_main_link_aux_domain(dig_port),
+ fetch_and_zero(&dig_port->aux_wakeref));
if (is_tc_port)
intel_tc_port_put_link(dig_port);
@@ -4118,6 +4225,7 @@ static void intel_enable_ddi_dp(struct intel_atomic_state *state,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
enum port port = encoder->port;
if (port == PORT_A && INTEL_GEN(dev_priv) < 9)
@@ -4125,7 +4233,10 @@ static void intel_enable_ddi_dp(struct intel_atomic_state *state,
intel_edp_backlight_on(crtc_state, conn_state);
intel_psr_enable(intel_dp, crtc_state, conn_state);
- intel_dp_set_infoframes(encoder, true, crtc_state, conn_state);
+
+ if (!dig_port->lspcon.active || dig_port->dp.has_hdmi_sink)
+ intel_dp_set_infoframes(encoder, true, crtc_state, conn_state);
+
intel_edp_drrs_enable(intel_dp, crtc_state);
if (crtc_state->has_audio)
@@ -4206,6 +4317,8 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state,
intel_de_write(dev_priv, reg, val);
}
+ intel_ddi_power_up_lanes(encoder, crtc_state);
+
/* In HDMI/DVI mode, the port width, and swing/emphasis values
* are ignored so nothing special needs to be done besides
* enabling the port.
@@ -4227,6 +4340,8 @@ static void intel_enable_ddi(struct intel_atomic_state *state,
if (!crtc_state->bigjoiner_slave)
intel_ddi_enable_transcoder_func(encoder, crtc_state);
+ intel_vrr_enable(encoder, crtc_state);
+
intel_enable_pipe(crtc_state);
intel_crtc_vblank_on(crtc_state);
@@ -4240,7 +4355,7 @@ static void intel_enable_ddi(struct intel_atomic_state *state,
if (conn_state->content_protection ==
DRM_MODE_CONTENT_PROTECTION_DESIRED)
intel_hdcp_enable(to_intel_connector(conn_state->connector),
- crtc_state->cpu_transcoder,
+ crtc_state,
(u8)conn_state->hdcp_content_type);
}
@@ -4263,6 +4378,9 @@ static void intel_disable_ddi_dp(struct intel_atomic_state *state,
/* Disable the decompression in DP Sink */
intel_dp_sink_set_decompression_state(intel_dp, old_crtc_state,
false);
+ /* Disable Ignore_MSA bit in DP Sink */
+ intel_dp_sink_set_msa_timing_par_ignore_state(intel_dp, old_crtc_state,
+ false);
}
static void intel_disable_ddi_hdmi(struct intel_atomic_state *state,
@@ -4368,9 +4486,12 @@ intel_ddi_pre_pll_enable(struct intel_atomic_state *state,
if (is_tc_port)
intel_tc_port_get_link(dig_port, crtc_state->lane_count);
- if (intel_crtc_has_dp_encoder(crtc_state) || is_tc_port)
- intel_display_power_get(dev_priv,
- intel_ddi_main_link_aux_domain(dig_port));
+ if (intel_crtc_has_dp_encoder(crtc_state) || is_tc_port) {
+ drm_WARN_ON(&dev_priv->drm, dig_port->aux_wakeref);
+ dig_port->aux_wakeref =
+ intel_display_power_get(dev_priv,
+ intel_ddi_main_link_aux_domain(dig_port));
+ }
if (is_tc_port && dig_port->tc_mode != TC_PORT_TBT_ALT)
/*
@@ -4583,6 +4704,7 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc);
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
u32 temp, flags = 0;
temp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
@@ -4657,9 +4779,12 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder,
pipe_config->fec_enable);
}
- pipe_config->infoframes.enable |=
- intel_hdmi_infoframes_enabled(encoder, pipe_config);
-
+ if (dig_port->lspcon.active && dig_port->dp.has_hdmi_sink)
+ pipe_config->infoframes.enable |=
+ intel_lspcon_infoframes_enabled(encoder, pipe_config);
+ else
+ pipe_config->infoframes.enable |=
+ intel_hdmi_infoframes_enabled(encoder, pipe_config);
break;
case TRANS_DDI_MODE_SELECT_DP_MST:
pipe_config->output_types |= BIT(INTEL_OUTPUT_DP_MST);
@@ -4946,6 +5071,8 @@ static void intel_ddi_encoder_destroy(struct drm_encoder *encoder)
intel_dp_encoder_flush_work(encoder);
drm_encoder_cleanup(encoder);
+ if (dig_port)
+ kfree(dig_port->hdcp_port_data.streams);
kfree(dig_port);
}
@@ -5099,12 +5226,20 @@ intel_ddi_hotplug(struct intel_encoder *encoder,
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ struct intel_dp *intel_dp = &dig_port->dp;
enum phy phy = intel_port_to_phy(i915, encoder->port);
bool is_tc = intel_phy_is_tc(i915, phy);
struct drm_modeset_acquire_ctx ctx;
enum intel_hotplug_state state;
int ret;
+ if (intel_dp->compliance.test_active &&
+ intel_dp->compliance.test_type == DP_TEST_LINK_PHY_TEST_PATTERN) {
+ intel_dp_phy_test(encoder);
+ /* just do the PHY test and nothing else */
+ return INTEL_HOTPLUG_UNCHANGED;
+ }
+
state = intel_encoder_hotplug(encoder, connector);
drm_modeset_acquire_init(&ctx, 0);
@@ -5262,8 +5397,7 @@ intel_ddi_max_lanes(struct intel_digital_port *dig_port)
static bool hti_uses_phy(struct drm_i915_private *i915, enum phy phy)
{
return i915->hti_state & HDPORT_ENABLED &&
- (i915->hti_state & HDPORT_PHY_USED_DP(phy) ||
- i915->hti_state & HDPORT_PHY_USED_HDMI(phy));
+ i915->hti_state & HDPORT_DDI_USED(phy);
}
static enum hpd_pin dg1_hpd_pin(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h
index dcc711cfe4fe..a4dd815c0000 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.h
+++ b/drivers/gpu/drm/i915/display/intel_ddi.h
@@ -50,9 +50,9 @@ u32 bxt_signal_levels(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state);
u32 ddi_signal_levels(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state);
-int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
- enum transcoder cpu_transcoder,
- bool enable);
+int intel_ddi_toggle_hdcp_bits(struct intel_encoder *intel_encoder,
+ enum transcoder cpu_transcoder,
+ bool enable, u32 hdcp_mask);
void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder);
#endif /* __INTEL_DDI_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 53a00cf3fa32..8d7aaa68c6f6 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -45,8 +45,10 @@
#include "display/intel_crt.h"
#include "display/intel_ddi.h"
+#include "display/intel_display_debugfs.h"
#include "display/intel_dp.h"
#include "display/intel_dp_mst.h"
+#include "display/intel_dpll.h"
#include "display/intel_dpll_mgr.h"
#include "display/intel_dsi.h"
#include "display/intel_dvo.h"
@@ -56,6 +58,9 @@
#include "display/intel_sdvo.h"
#include "display/intel_tv.h"
#include "display/intel_vdsc.h"
+#include "display/intel_vrr.h"
+
+#include "gem/i915_gem_object.h"
#include "gt/intel_rps.h"
@@ -67,10 +72,12 @@
#include "intel_bw.h"
#include "intel_cdclk.h"
#include "intel_color.h"
+#include "intel_crtc.h"
#include "intel_csr.h"
#include "intel_display_types.h"
#include "intel_dp_link_training.h"
#include "intel_fbc.h"
+#include "intel_fdi.h"
#include "intel_fbdev.h"
#include "intel_fifo_underrun.h"
#include "intel_frontbuffer.h"
@@ -79,72 +86,14 @@
#include "intel_overlay.h"
#include "intel_pipe_crc.h"
#include "intel_pm.h"
+#include "intel_pps.h"
#include "intel_psr.h"
#include "intel_quirks.h"
#include "intel_sideband.h"
#include "intel_sprite.h"
#include "intel_tc.h"
#include "intel_vga.h"
-
-/* Primary plane formats for gen <= 3 */
-static const u32 i8xx_primary_formats[] = {
- DRM_FORMAT_C8,
- DRM_FORMAT_XRGB1555,
- DRM_FORMAT_RGB565,
- DRM_FORMAT_XRGB8888,
-};
-
-/* Primary plane formats for ivb (no fp16 due to hw issue) */
-static const u32 ivb_primary_formats[] = {
- DRM_FORMAT_C8,
- DRM_FORMAT_RGB565,
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_XBGR8888,
- DRM_FORMAT_XRGB2101010,
- DRM_FORMAT_XBGR2101010,
-};
-
-/* Primary plane formats for gen >= 4, except ivb */
-static const u32 i965_primary_formats[] = {
- DRM_FORMAT_C8,
- DRM_FORMAT_RGB565,
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_XBGR8888,
- DRM_FORMAT_XRGB2101010,
- DRM_FORMAT_XBGR2101010,
- DRM_FORMAT_XBGR16161616F,
-};
-
-/* Primary plane formats for vlv/chv */
-static const u32 vlv_primary_formats[] = {
- DRM_FORMAT_C8,
- DRM_FORMAT_RGB565,
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_XBGR8888,
- DRM_FORMAT_ARGB8888,
- DRM_FORMAT_ABGR8888,
- DRM_FORMAT_XRGB2101010,
- DRM_FORMAT_XBGR2101010,
- DRM_FORMAT_ARGB2101010,
- DRM_FORMAT_ABGR2101010,
- DRM_FORMAT_XBGR16161616F,
-};
-
-static const u64 i9xx_format_modifiers[] = {
- I915_FORMAT_MOD_X_TILED,
- DRM_FORMAT_MOD_LINEAR,
- DRM_FORMAT_MOD_INVALID
-};
-
-/* Cursor formats */
-static const u32 intel_cursor_formats[] = {
- DRM_FORMAT_ARGB8888,
-};
-
-static const u64 cursor_format_modifiers[] = {
- DRM_FORMAT_MOD_LINEAR,
- DRM_FORMAT_MOD_INVALID
-};
+#include "i9xx_plane.h"
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config);
@@ -171,18 +120,6 @@ static void skl_pfit_enable(const struct intel_crtc_state *crtc_state);
static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
static void intel_modeset_setup_hw_state(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx);
-static struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc);
-
-struct intel_limit {
- struct {
- int min, max;
- } dot, vco, n, m, m1, m2, p, p1;
-
- struct {
- int dot_limit;
- int p2_slow, p2_fast;
- } p2;
-};
/* returns HPLL frequency in kHz */
int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
@@ -241,281 +178,6 @@ static void intel_update_czclk(struct drm_i915_private *dev_priv)
dev_priv->czclk_freq);
}
-/* units of 100MHz */
-static u32 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
- const struct intel_crtc_state *pipe_config)
-{
- if (HAS_DDI(dev_priv))
- return pipe_config->port_clock; /* SPLL */
- else
- return dev_priv->fdi_pll_freq;
-}
-
-static const struct intel_limit intel_limits_i8xx_dac = {
- .dot = { .min = 25000, .max = 350000 },
- .vco = { .min = 908000, .max = 1512000 },
- .n = { .min = 2, .max = 16 },
- .m = { .min = 96, .max = 140 },
- .m1 = { .min = 18, .max = 26 },
- .m2 = { .min = 6, .max = 16 },
- .p = { .min = 4, .max = 128 },
- .p1 = { .min = 2, .max = 33 },
- .p2 = { .dot_limit = 165000,
- .p2_slow = 4, .p2_fast = 2 },
-};
-
-static const struct intel_limit intel_limits_i8xx_dvo = {
- .dot = { .min = 25000, .max = 350000 },
- .vco = { .min = 908000, .max = 1512000 },
- .n = { .min = 2, .max = 16 },
- .m = { .min = 96, .max = 140 },
- .m1 = { .min = 18, .max = 26 },
- .m2 = { .min = 6, .max = 16 },
- .p = { .min = 4, .max = 128 },
- .p1 = { .min = 2, .max = 33 },
- .p2 = { .dot_limit = 165000,
- .p2_slow = 4, .p2_fast = 4 },
-};
-
-static const struct intel_limit intel_limits_i8xx_lvds = {
- .dot = { .min = 25000, .max = 350000 },
- .vco = { .min = 908000, .max = 1512000 },
- .n = { .min = 2, .max = 16 },
- .m = { .min = 96, .max = 140 },
- .m1 = { .min = 18, .max = 26 },
- .m2 = { .min = 6, .max = 16 },
- .p = { .min = 4, .max = 128 },
- .p1 = { .min = 1, .max = 6 },
- .p2 = { .dot_limit = 165000,
- .p2_slow = 14, .p2_fast = 7 },
-};
-
-static const struct intel_limit intel_limits_i9xx_sdvo = {
- .dot = { .min = 20000, .max = 400000 },
- .vco = { .min = 1400000, .max = 2800000 },
- .n = { .min = 1, .max = 6 },
- .m = { .min = 70, .max = 120 },
- .m1 = { .min = 8, .max = 18 },
- .m2 = { .min = 3, .max = 7 },
- .p = { .min = 5, .max = 80 },
- .p1 = { .min = 1, .max = 8 },
- .p2 = { .dot_limit = 200000,
- .p2_slow = 10, .p2_fast = 5 },
-};
-
-static const struct intel_limit intel_limits_i9xx_lvds = {
- .dot = { .min = 20000, .max = 400000 },
- .vco = { .min = 1400000, .max = 2800000 },
- .n = { .min = 1, .max = 6 },
- .m = { .min = 70, .max = 120 },
- .m1 = { .min = 8, .max = 18 },
- .m2 = { .min = 3, .max = 7 },
- .p = { .min = 7, .max = 98 },
- .p1 = { .min = 1, .max = 8 },
- .p2 = { .dot_limit = 112000,
- .p2_slow = 14, .p2_fast = 7 },
-};
-
-
-static const struct intel_limit intel_limits_g4x_sdvo = {
- .dot = { .min = 25000, .max = 270000 },
- .vco = { .min = 1750000, .max = 3500000},
- .n = { .min = 1, .max = 4 },
- .m = { .min = 104, .max = 138 },
- .m1 = { .min = 17, .max = 23 },
- .m2 = { .min = 5, .max = 11 },
- .p = { .min = 10, .max = 30 },
- .p1 = { .min = 1, .max = 3},
- .p2 = { .dot_limit = 270000,
- .p2_slow = 10,
- .p2_fast = 10
- },
-};
-
-static const struct intel_limit intel_limits_g4x_hdmi = {
- .dot = { .min = 22000, .max = 400000 },
- .vco = { .min = 1750000, .max = 3500000},
- .n = { .min = 1, .max = 4 },
- .m = { .min = 104, .max = 138 },
- .m1 = { .min = 16, .max = 23 },
- .m2 = { .min = 5, .max = 11 },
- .p = { .min = 5, .max = 80 },
- .p1 = { .min = 1, .max = 8},
- .p2 = { .dot_limit = 165000,
- .p2_slow = 10, .p2_fast = 5 },
-};
-
-static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
- .dot = { .min = 20000, .max = 115000 },
- .vco = { .min = 1750000, .max = 3500000 },
- .n = { .min = 1, .max = 3 },
- .m = { .min = 104, .max = 138 },
- .m1 = { .min = 17, .max = 23 },
- .m2 = { .min = 5, .max = 11 },
- .p = { .min = 28, .max = 112 },
- .p1 = { .min = 2, .max = 8 },
- .p2 = { .dot_limit = 0,
- .p2_slow = 14, .p2_fast = 14
- },
-};
-
-static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
- .dot = { .min = 80000, .max = 224000 },
- .vco = { .min = 1750000, .max = 3500000 },
- .n = { .min = 1, .max = 3 },
- .m = { .min = 104, .max = 138 },
- .m1 = { .min = 17, .max = 23 },
- .m2 = { .min = 5, .max = 11 },
- .p = { .min = 14, .max = 42 },
- .p1 = { .min = 2, .max = 6 },
- .p2 = { .dot_limit = 0,
- .p2_slow = 7, .p2_fast = 7
- },
-};
-
-static const struct intel_limit pnv_limits_sdvo = {
- .dot = { .min = 20000, .max = 400000},
- .vco = { .min = 1700000, .max = 3500000 },
- /* Pineview's Ncounter is a ring counter */
- .n = { .min = 3, .max = 6 },
- .m = { .min = 2, .max = 256 },
- /* Pineview only has one combined m divider, which we treat as m2. */
- .m1 = { .min = 0, .max = 0 },
- .m2 = { .min = 0, .max = 254 },
- .p = { .min = 5, .max = 80 },
- .p1 = { .min = 1, .max = 8 },
- .p2 = { .dot_limit = 200000,
- .p2_slow = 10, .p2_fast = 5 },
-};
-
-static const struct intel_limit pnv_limits_lvds = {
- .dot = { .min = 20000, .max = 400000 },
- .vco = { .min = 1700000, .max = 3500000 },
- .n = { .min = 3, .max = 6 },
- .m = { .min = 2, .max = 256 },
- .m1 = { .min = 0, .max = 0 },
- .m2 = { .min = 0, .max = 254 },
- .p = { .min = 7, .max = 112 },
- .p1 = { .min = 1, .max = 8 },
- .p2 = { .dot_limit = 112000,
- .p2_slow = 14, .p2_fast = 14 },
-};
-
-/* Ironlake / Sandybridge
- *
- * We calculate clock using (register_value + 2) for N/M1/M2, so here
- * the range value for them is (actual_value - 2).
- */
-static const struct intel_limit ilk_limits_dac = {
- .dot = { .min = 25000, .max = 350000 },
- .vco = { .min = 1760000, .max = 3510000 },
- .n = { .min = 1, .max = 5 },
- .m = { .min = 79, .max = 127 },
- .m1 = { .min = 12, .max = 22 },
- .m2 = { .min = 5, .max = 9 },
- .p = { .min = 5, .max = 80 },
- .p1 = { .min = 1, .max = 8 },
- .p2 = { .dot_limit = 225000,
- .p2_slow = 10, .p2_fast = 5 },
-};
-
-static const struct intel_limit ilk_limits_single_lvds = {
- .dot = { .min = 25000, .max = 350000 },
- .vco = { .min = 1760000, .max = 3510000 },
- .n = { .min = 1, .max = 3 },
- .m = { .min = 79, .max = 118 },
- .m1 = { .min = 12, .max = 22 },
- .m2 = { .min = 5, .max = 9 },
- .p = { .min = 28, .max = 112 },
- .p1 = { .min = 2, .max = 8 },
- .p2 = { .dot_limit = 225000,
- .p2_slow = 14, .p2_fast = 14 },
-};
-
-static const struct intel_limit ilk_limits_dual_lvds = {
- .dot = { .min = 25000, .max = 350000 },
- .vco = { .min = 1760000, .max = 3510000 },
- .n = { .min = 1, .max = 3 },
- .m = { .min = 79, .max = 127 },
- .m1 = { .min = 12, .max = 22 },
- .m2 = { .min = 5, .max = 9 },
- .p = { .min = 14, .max = 56 },
- .p1 = { .min = 2, .max = 8 },
- .p2 = { .dot_limit = 225000,
- .p2_slow = 7, .p2_fast = 7 },
-};
-
-/* LVDS 100mhz refclk limits. */
-static const struct intel_limit ilk_limits_single_lvds_100m = {
- .dot = { .min = 25000, .max = 350000 },
- .vco = { .min = 1760000, .max = 3510000 },
- .n = { .min = 1, .max = 2 },
- .m = { .min = 79, .max = 126 },
- .m1 = { .min = 12, .max = 22 },
- .m2 = { .min = 5, .max = 9 },
- .p = { .min = 28, .max = 112 },
- .p1 = { .min = 2, .max = 8 },
- .p2 = { .dot_limit = 225000,
- .p2_slow = 14, .p2_fast = 14 },
-};
-
-static const struct intel_limit ilk_limits_dual_lvds_100m = {
- .dot = { .min = 25000, .max = 350000 },
- .vco = { .min = 1760000, .max = 3510000 },
- .n = { .min = 1, .max = 3 },
- .m = { .min = 79, .max = 126 },
- .m1 = { .min = 12, .max = 22 },
- .m2 = { .min = 5, .max = 9 },
- .p = { .min = 14, .max = 42 },
- .p1 = { .min = 2, .max = 6 },
- .p2 = { .dot_limit = 225000,
- .p2_slow = 7, .p2_fast = 7 },
-};
-
-static const struct intel_limit intel_limits_vlv = {
- /*
- * These are the data rate limits (measured in fast clocks)
- * since those are the strictest limits we have. The fast
- * clock and actual rate limits are more relaxed, so checking
- * them would make no difference.
- */
- .dot = { .min = 25000 * 5, .max = 270000 * 5 },
- .vco = { .min = 4000000, .max = 6000000 },
- .n = { .min = 1, .max = 7 },
- .m1 = { .min = 2, .max = 3 },
- .m2 = { .min = 11, .max = 156 },
- .p1 = { .min = 2, .max = 3 },
- .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
-};
-
-static const struct intel_limit intel_limits_chv = {
- /*
- * These are the data rate limits (measured in fast clocks)
- * since those are the strictest limits we have. The fast
- * clock and actual rate limits are more relaxed, so checking
- * them would make no difference.
- */
- .dot = { .min = 25000 * 5, .max = 540000 * 5},
- .vco = { .min = 4800000, .max = 6480000 },
- .n = { .min = 1, .max = 1 },
- .m1 = { .min = 2, .max = 2 },
- .m2 = { .min = 24 << 22, .max = 175 << 22 },
- .p1 = { .min = 2, .max = 4 },
- .p2 = { .p2_slow = 1, .p2_fast = 14 },
-};
-
-static const struct intel_limit intel_limits_bxt = {
- /* FIXME: find real dot limits */
- .dot = { .min = 0, .max = INT_MAX },
- .vco = { .min = 4800000, .max = 6700000 },
- .n = { .min = 1, .max = 1 },
- .m1 = { .min = 2, .max = 2 },
- /* FIXME: find real m2 limits */
- .m2 = { .min = 2 << 22, .max = 255 << 22 },
- .p1 = { .min = 2, .max = 4 },
- .p2 = { .p2_slow = 1, .p2_fast = 20 },
-};
-
/* WA Display #0827: Gen9:all */
static void
skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
@@ -542,12 +204,6 @@ icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
}
static bool
-needs_modeset(const struct intel_crtc_state *state)
-{
- return drm_atomic_crtc_needs_modeset(&state->uapi);
-}
-
-static bool
is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
{
return crtc_state->master_transcoder != INVALID_TRANSCODER;
@@ -566,482 +222,6 @@ is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
is_trans_port_sync_slave(crtc_state);
}
-/*
- * Platform specific helpers to calculate the port PLL loopback- (clock.m),
- * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
- * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
- * The helpers' return value is the rate of the clock that is fed to the
- * display engine's pipe which can be the above fast dot clock rate or a
- * divided-down version of it.
- */
-/* m1 is reserved as 0 in Pineview, n is a ring counter */
-static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
-{
- clock->m = clock->m2 + 2;
- clock->p = clock->p1 * clock->p2;
- if (WARN_ON(clock->n == 0 || clock->p == 0))
- return 0;
- clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
- clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
-
- return clock->dot;
-}
-
-static u32 i9xx_dpll_compute_m(struct dpll *dpll)
-{
- return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
-}
-
-static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
-{
- clock->m = i9xx_dpll_compute_m(clock);
- clock->p = clock->p1 * clock->p2;
- if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
- return 0;
- clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
- clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
-
- return clock->dot;
-}
-
-static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
-{
- clock->m = clock->m1 * clock->m2;
- clock->p = clock->p1 * clock->p2;
- if (WARN_ON(clock->n == 0 || clock->p == 0))
- return 0;
- clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
- clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
-
- return clock->dot / 5;
-}
-
-int chv_calc_dpll_params(int refclk, struct dpll *clock)
-{
- clock->m = clock->m1 * clock->m2;
- clock->p = clock->p1 * clock->p2;
- if (WARN_ON(clock->n == 0 || clock->p == 0))
- return 0;
- clock->vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m),
- clock->n << 22);
- clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
-
- return clock->dot / 5;
-}
-
-/*
- * Returns whether the given set of divisors are valid for a given refclk with
- * the given connectors.
- */
-static bool intel_pll_is_valid(struct drm_i915_private *dev_priv,
- const struct intel_limit *limit,
- const struct dpll *clock)
-{
- if (clock->n < limit->n.min || limit->n.max < clock->n)
- return false;
- if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
- return false;
- if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
- return false;
- if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
- return false;
-
- if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
- !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
- if (clock->m1 <= clock->m2)
- return false;
-
- if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
- !IS_GEN9_LP(dev_priv)) {
- if (clock->p < limit->p.min || limit->p.max < clock->p)
- return false;
- if (clock->m < limit->m.min || limit->m.max < clock->m)
- return false;
- }
-
- if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
- return false;
- /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
- * connector, etc., rather than just a single range.
- */
- if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
- return false;
-
- return true;
-}
-
-static int
-i9xx_select_p2_div(const struct intel_limit *limit,
- const struct intel_crtc_state *crtc_state,
- int target)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
-
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
- /*
- * For LVDS just rely on its current settings for dual-channel.
- * We haven't figured out how to reliably set up different
- * single/dual channel state, if we even can.
- */
- if (intel_is_dual_link_lvds(dev_priv))
- return limit->p2.p2_fast;
- else
- return limit->p2.p2_slow;
- } else {
- if (target < limit->p2.dot_limit)
- return limit->p2.p2_slow;
- else
- return limit->p2.p2_fast;
- }
-}
-
-/*
- * Returns a set of divisors for the desired target clock with the given
- * refclk, or FALSE. The returned values represent the clock equation:
- * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
- *
- * Target and reference clocks are specified in kHz.
- *
- * If match_clock is provided, then best_clock P divider must match the P
- * divider from @match_clock used for LVDS downclocking.
- */
-static bool
-i9xx_find_best_dpll(const struct intel_limit *limit,
- struct intel_crtc_state *crtc_state,
- int target, int refclk, struct dpll *match_clock,
- struct dpll *best_clock)
-{
- struct drm_device *dev = crtc_state->uapi.crtc->dev;
- struct dpll clock;
- int err = target;
-
- memset(best_clock, 0, sizeof(*best_clock));
-
- clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
-
- for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
- clock.m1++) {
- for (clock.m2 = limit->m2.min;
- clock.m2 <= limit->m2.max; clock.m2++) {
- if (clock.m2 >= clock.m1)
- break;
- for (clock.n = limit->n.min;
- clock.n <= limit->n.max; clock.n++) {
- for (clock.p1 = limit->p1.min;
- clock.p1 <= limit->p1.max; clock.p1++) {
- int this_err;
-
- i9xx_calc_dpll_params(refclk, &clock);
- if (!intel_pll_is_valid(to_i915(dev),
- limit,
- &clock))
- continue;
- if (match_clock &&
- clock.p != match_clock->p)
- continue;
-
- this_err = abs(clock.dot - target);
- if (this_err < err) {
- *best_clock = clock;
- err = this_err;
- }
- }
- }
- }
- }
-
- return (err != target);
-}
-
-/*
- * Returns a set of divisors for the desired target clock with the given
- * refclk, or FALSE. The returned values represent the clock equation:
- * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
- *
- * Target and reference clocks are specified in kHz.
- *
- * If match_clock is provided, then best_clock P divider must match the P
- * divider from @match_clock used for LVDS downclocking.
- */
-static bool
-pnv_find_best_dpll(const struct intel_limit *limit,
- struct intel_crtc_state *crtc_state,
- int target, int refclk, struct dpll *match_clock,
- struct dpll *best_clock)
-{
- struct drm_device *dev = crtc_state->uapi.crtc->dev;
- struct dpll clock;
- int err = target;
-
- memset(best_clock, 0, sizeof(*best_clock));
-
- clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
-
- for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
- clock.m1++) {
- for (clock.m2 = limit->m2.min;
- clock.m2 <= limit->m2.max; clock.m2++) {
- for (clock.n = limit->n.min;
- clock.n <= limit->n.max; clock.n++) {
- for (clock.p1 = limit->p1.min;
- clock.p1 <= limit->p1.max; clock.p1++) {
- int this_err;
-
- pnv_calc_dpll_params(refclk, &clock);
- if (!intel_pll_is_valid(to_i915(dev),
- limit,
- &clock))
- continue;
- if (match_clock &&
- clock.p != match_clock->p)
- continue;
-
- this_err = abs(clock.dot - target);
- if (this_err < err) {
- *best_clock = clock;
- err = this_err;
- }
- }
- }
- }
- }
-
- return (err != target);
-}
-
-/*
- * Returns a set of divisors for the desired target clock with the given
- * refclk, or FALSE. The returned values represent the clock equation:
- * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
- *
- * Target and reference clocks are specified in kHz.
- *
- * If match_clock is provided, then best_clock P divider must match the P
- * divider from @match_clock used for LVDS downclocking.
- */
-static bool
-g4x_find_best_dpll(const struct intel_limit *limit,
- struct intel_crtc_state *crtc_state,
- int target, int refclk, struct dpll *match_clock,
- struct dpll *best_clock)
-{
- struct drm_device *dev = crtc_state->uapi.crtc->dev;
- struct dpll clock;
- int max_n;
- bool found = false;
- /* approximately equals target * 0.00585 */
- int err_most = (target >> 8) + (target >> 9);
-
- memset(best_clock, 0, sizeof(*best_clock));
-
- clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
-
- max_n = limit->n.max;
- /* based on hardware requirement, prefer smaller n to precision */
- for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
- /* based on hardware requirement, prefere larger m1,m2 */
- for (clock.m1 = limit->m1.max;
- clock.m1 >= limit->m1.min; clock.m1--) {
- for (clock.m2 = limit->m2.max;
- clock.m2 >= limit->m2.min; clock.m2--) {
- for (clock.p1 = limit->p1.max;
- clock.p1 >= limit->p1.min; clock.p1--) {
- int this_err;
-
- i9xx_calc_dpll_params(refclk, &clock);
- if (!intel_pll_is_valid(to_i915(dev),
- limit,
- &clock))
- continue;
-
- this_err = abs(clock.dot - target);
- if (this_err < err_most) {
- *best_clock = clock;
- err_most = this_err;
- max_n = clock.n;
- found = true;
- }
- }
- }
- }
- }
- return found;
-}
-
-/*
- * Check if the calculated PLL configuration is more optimal compared to the
- * best configuration and error found so far. Return the calculated error.
- */
-static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
- const struct dpll *calculated_clock,
- const struct dpll *best_clock,
- unsigned int best_error_ppm,
- unsigned int *error_ppm)
-{
- /*
- * For CHV ignore the error and consider only the P value.
- * Prefer a bigger P value based on HW requirements.
- */
- if (IS_CHERRYVIEW(to_i915(dev))) {
- *error_ppm = 0;
-
- return calculated_clock->p > best_clock->p;
- }
-
- if (drm_WARN_ON_ONCE(dev, !target_freq))
- return false;
-
- *error_ppm = div_u64(1000000ULL *
- abs(target_freq - calculated_clock->dot),
- target_freq);
- /*
- * Prefer a better P value over a better (smaller) error if the error
- * is small. Ensure this preference for future configurations too by
- * setting the error to 0.
- */
- if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
- *error_ppm = 0;
-
- return true;
- }
-
- return *error_ppm + 10 < best_error_ppm;
-}
-
-/*
- * Returns a set of divisors for the desired target clock with the given
- * refclk, or FALSE. The returned values represent the clock equation:
- * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
- */
-static bool
-vlv_find_best_dpll(const struct intel_limit *limit,
- struct intel_crtc_state *crtc_state,
- int target, int refclk, struct dpll *match_clock,
- struct dpll *best_clock)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_device *dev = crtc->base.dev;
- struct dpll clock;
- unsigned int bestppm = 1000000;
- /* min update 19.2 MHz */
- int max_n = min(limit->n.max, refclk / 19200);
- bool found = false;
-
- target *= 5; /* fast clock */
-
- memset(best_clock, 0, sizeof(*best_clock));
-
- /* based on hardware requirement, prefer smaller n to precision */
- for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
- for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
- for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
- clock.p2 -= clock.p2 > 10 ? 2 : 1) {
- clock.p = clock.p1 * clock.p2;
- /* based on hardware requirement, prefer bigger m1,m2 values */
- for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
- unsigned int ppm;
-
- clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
- refclk * clock.m1);
-
- vlv_calc_dpll_params(refclk, &clock);
-
- if (!intel_pll_is_valid(to_i915(dev),
- limit,
- &clock))
- continue;
-
- if (!vlv_PLL_is_optimal(dev, target,
- &clock,
- best_clock,
- bestppm, &ppm))
- continue;
-
- *best_clock = clock;
- bestppm = ppm;
- found = true;
- }
- }
- }
- }
-
- return found;
-}
-
-/*
- * Returns a set of divisors for the desired target clock with the given
- * refclk, or FALSE. The returned values represent the clock equation:
- * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
- */
-static bool
-chv_find_best_dpll(const struct intel_limit *limit,
- struct intel_crtc_state *crtc_state,
- int target, int refclk, struct dpll *match_clock,
- struct dpll *best_clock)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_device *dev = crtc->base.dev;
- unsigned int best_error_ppm;
- struct dpll clock;
- u64 m2;
- int found = false;
-
- memset(best_clock, 0, sizeof(*best_clock));
- best_error_ppm = 1000000;
-
- /*
- * Based on hardware doc, the n always set to 1, and m1 always
- * set to 2. If requires to support 200Mhz refclk, we need to
- * revisit this because n may not 1 anymore.
- */
- clock.n = 1, clock.m1 = 2;
- target *= 5; /* fast clock */
-
- for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
- for (clock.p2 = limit->p2.p2_fast;
- clock.p2 >= limit->p2.p2_slow;
- clock.p2 -= clock.p2 > 10 ? 2 : 1) {
- unsigned int error_ppm;
-
- clock.p = clock.p1 * clock.p2;
-
- m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22,
- refclk * clock.m1);
-
- if (m2 > INT_MAX/clock.m1)
- continue;
-
- clock.m2 = m2;
-
- chv_calc_dpll_params(refclk, &clock);
-
- if (!intel_pll_is_valid(to_i915(dev), limit, &clock))
- continue;
-
- if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
- best_error_ppm, &error_ppm))
- continue;
-
- *best_clock = clock;
- best_error_ppm = error_ppm;
- found = true;
- }
- }
-
- return found;
-}
-
-bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
- struct dpll *best_clock)
-{
- int refclk = 100000;
- const struct intel_limit *limit = &intel_limits_bxt;
-
- return chv_find_best_dpll(limit, crtc_state,
- crtc_state->port_clock, refclk,
- NULL, best_clock);
-}
-
static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
@@ -1315,12 +495,6 @@ static void assert_planes_disabled(struct intel_crtc *crtc)
assert_plane_disabled(plane);
}
-static void assert_vblank_disabled(struct drm_crtc *crtc)
-{
- if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
- drm_crtc_vblank_put(crtc);
-}
-
void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
@@ -1672,7 +846,7 @@ static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
/* Configure frame start delay to match the CPU */
val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
- val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
+ val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
intel_de_write(dev_priv, reg, val);
}
@@ -1683,7 +857,7 @@ static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
if (HAS_PCH_IBX(dev_priv)) {
/* Configure frame start delay to match the CPU */
val &= ~TRANS_FRAME_START_DELAY_MASK;
- val |= TRANS_FRAME_START_DELAY(0);
+ val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
/*
* Make the BPC in transcoder be consistent with
@@ -1728,7 +902,7 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
/* Configure frame start delay to match the CPU */
val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
- val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
+ val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
val = TRANS_ENABLE;
@@ -1805,55 +979,6 @@ enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
return crtc->pipe;
}
-static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- u32 mode_flags = crtc->mode_flags;
-
- /*
- * From Gen 11, In case of dsi cmd mode, frame counter wouldnt
- * have updated at the beginning of TE, if we want to use
- * the hw counter, then we would find it updated in only
- * the next TE, hence switching to sw counter.
- */
- if (mode_flags & (I915_MODE_FLAG_DSI_USE_TE0 | I915_MODE_FLAG_DSI_USE_TE1))
- return 0;
-
- /*
- * On i965gm the hardware frame counter reads
- * zero when the TV encoder is enabled :(
- */
- if (IS_I965GM(dev_priv) &&
- (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT)))
- return 0;
-
- if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
- return 0xffffffff; /* full 32 bit counter */
- else if (INTEL_GEN(dev_priv) >= 3)
- return 0xffffff; /* only 24 bits of frame count */
- else
- return 0; /* Gen2 doesn't have a hardware frame counter */
-}
-
-void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
-
- assert_vblank_disabled(&crtc->base);
- drm_crtc_set_max_vblank_count(&crtc->base,
- intel_crtc_max_vblank_count(crtc_state));
- drm_crtc_vblank_on(&crtc->base);
-}
-
-void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
-
- drm_crtc_vblank_off(&crtc->base);
- assert_vblank_disabled(&crtc->base);
-}
-
void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
@@ -1968,8 +1093,8 @@ static bool is_ccs_plane(const struct drm_framebuffer *fb, int plane)
static bool is_gen12_ccs_modifier(u64 modifier)
{
return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
+ modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC ||
modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS;
-
}
static bool is_gen12_ccs_plane(const struct drm_framebuffer *fb, int plane)
@@ -1977,6 +1102,12 @@ static bool is_gen12_ccs_plane(const struct drm_framebuffer *fb, int plane)
return is_gen12_ccs_modifier(fb->modifier) && is_ccs_plane(fb, plane);
}
+static bool is_gen12_ccs_cc_plane(const struct drm_framebuffer *fb, int plane)
+{
+ return fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC &&
+ plane == 2;
+}
+
static bool is_aux_plane(const struct drm_framebuffer *fb, int plane)
{
if (is_ccs_modifier(fb->modifier))
@@ -1998,6 +1129,9 @@ static int ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane)
drm_WARN_ON(fb->dev, !is_ccs_modifier(fb->modifier) ||
ccs_plane < fb->format->num_planes / 2);
+ if (is_gen12_ccs_cc_plane(fb, ccs_plane))
+ return 0;
+
return ccs_plane - fb->format->num_planes / 2;
}
@@ -2016,7 +1150,7 @@ int intel_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane)
bool
intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
- uint64_t modifier)
+ u64 modifier)
{
return info->is_yuv &&
info->num_planes == (is_ccs_modifier(modifier) ? 4 : 2);
@@ -2048,6 +1182,7 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
return 128;
fallthrough;
case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
if (is_ccs_plane(fb, color_plane))
return 64;
@@ -2182,8 +1317,13 @@ static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_pr
return 0;
}
-static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
- int color_plane)
+static bool has_async_flips(struct drm_i915_private *i915)
+{
+ return INTEL_GEN(i915) >= 5;
+}
+
+unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
+ int color_plane)
{
struct drm_i915_private *dev_priv = to_i915(fb->dev);
@@ -2196,7 +1336,7 @@ static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
case DRM_FORMAT_MOD_LINEAR:
return intel_linear_alignment(dev_priv);
case I915_FORMAT_MOD_X_TILED:
- if (INTEL_GEN(dev_priv) >= 9)
+ if (has_async_flips(dev_priv))
return 256 * 1024;
return 0;
case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
@@ -2204,6 +1344,7 @@ static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
return intel_tile_row_size(fb, color_plane);
fallthrough;
case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
return 16 * 1024;
case I915_FORMAT_MOD_Y_TILED_CCS:
case I915_FORMAT_MOD_Yf_TILED_CCS:
@@ -2309,7 +1450,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
*/
ret = i915_vma_pin_fence(vma);
if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
- i915_gem_object_unpin_from_display_plane(vma);
+ i915_vma_unpin(vma);
vma = ERR_PTR(ret);
goto err;
}
@@ -2327,12 +1468,9 @@ err:
void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
{
- i915_gem_object_lock(vma->obj, NULL);
if (flags & PLANE_HAS_FENCE)
i915_vma_unpin_fence(vma);
- i915_gem_object_unpin_from_display_plane(vma);
- i915_gem_object_unlock(vma->obj);
-
+ i915_vma_unpin(vma);
i915_vma_put(vma);
}
@@ -2452,10 +1590,10 @@ static u32 intel_adjust_aligned_offset(int *x, int *y,
* Adjust the tile offset by moving the difference into
* the x/y offsets.
*/
-static u32 intel_plane_adjust_aligned_offset(int *x, int *y,
- const struct intel_plane_state *state,
- int color_plane,
- u32 old_offset, u32 new_offset)
+u32 intel_plane_adjust_aligned_offset(int *x, int *y,
+ const struct intel_plane_state *state,
+ int color_plane,
+ u32 old_offset, u32 new_offset)
{
return intel_adjust_aligned_offset(x, y, state->hw.fb, color_plane,
state->hw.rotation,
@@ -2532,9 +1670,9 @@ static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
return offset_aligned;
}
-static u32 intel_plane_compute_aligned_offset(int *x, int *y,
- const struct intel_plane_state *state,
- int color_plane)
+u32 intel_plane_compute_aligned_offset(int *x, int *y,
+ const struct intel_plane_state *state,
+ int color_plane)
{
struct intel_plane *intel_plane = to_intel_plane(state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
@@ -2608,6 +1746,7 @@ static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
case I915_FORMAT_MOD_Y_TILED:
case I915_FORMAT_MOD_Y_TILED_CCS:
case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
return I915_TILING_Y;
default:
@@ -2686,6 +1825,25 @@ static const struct drm_format_info gen12_ccs_formats[] = {
.hsub = 2, .vsub = 2, .is_yuv = true },
};
+/*
+ * Same as gen12_ccs_formats[] above, but with additional surface used
+ * to pass Clear Color information in plane 2 with 64 bits of data.
+ */
+static const struct drm_format_info gen12_ccs_cc_formats[] = {
+ { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 3,
+ .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
+ .hsub = 1, .vsub = 1, },
+ { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 3,
+ .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
+ .hsub = 1, .vsub = 1, },
+ { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 3,
+ .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
+ .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 3,
+ .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
+ .hsub = 1, .vsub = 1, .has_alpha = true },
+};
+
static const struct drm_format_info *
lookup_format_info(const struct drm_format_info formats[],
int num_formats, u32 format)
@@ -2714,6 +1872,10 @@ intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
return lookup_format_info(gen12_ccs_formats,
ARRAY_SIZE(gen12_ccs_formats),
cmd->pixel_format);
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
+ return lookup_format_info(gen12_ccs_cc_formats,
+ ARRAY_SIZE(gen12_ccs_cc_formats),
+ cmd->pixel_format);
default:
return NULL;
}
@@ -2722,6 +1884,7 @@ intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
bool is_ccs_modifier(u64 modifier)
{
return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
+ modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC ||
modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS ||
modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
@@ -2940,7 +2103,7 @@ intel_fb_check_ccs_xy(struct drm_framebuffer *fb, int ccs_plane, int x, int y)
int ccs_x, ccs_y;
int main_x, main_y;
- if (!is_ccs_plane(fb, ccs_plane))
+ if (!is_ccs_plane(fb, ccs_plane) || is_gen12_ccs_cc_plane(fb, ccs_plane))
return 0;
intel_tile_dims(fb, ccs_plane, &tile_width, &tile_height);
@@ -3067,6 +2230,18 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
int x, y;
int ret;
+ /*
+ * Plane 2 of Render Compression with Clear Color fb modifier
+ * is consumed by the driver and not passed to DE. Skip the
+ * arithmetic related to alignment and offset calculation.
+ */
+ if (is_gen12_ccs_cc_plane(fb, i)) {
+ if (IS_ALIGNED(fb->offsets[i], PAGE_SIZE))
+ continue;
+ else
+ return -EINVAL;
+ }
+
cpp = fb->format->cpp[i];
intel_fb_plane_dims(&width, &height, fb, i);
@@ -3271,7 +2446,7 @@ intel_plane_remap_gtt(struct intel_plane_state *plane_state)
}
}
-static int
+int
intel_plane_compute_gtt(struct intel_plane_state *plane_state)
{
const struct intel_framebuffer *fb =
@@ -3551,7 +2726,7 @@ intel_set_plane_visible(struct intel_crtc_state *crtc_state,
crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base);
}
-static void fixup_active_planes(struct intel_crtc_state *crtc_state)
+static void fixup_plane_bitmasks(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
struct drm_plane *plane;
@@ -3561,11 +2736,14 @@ static void fixup_active_planes(struct intel_crtc_state *crtc_state)
* have been used on the same (or wrong) pipe. plane_mask uses
* unique ids, hence we can use that to reconstruct active_planes.
*/
+ crtc_state->enabled_planes = 0;
crtc_state->active_planes = 0;
drm_for_each_plane_mask(plane, &dev_priv->drm,
- crtc_state->uapi.plane_mask)
+ crtc_state->uapi.plane_mask) {
+ crtc_state->enabled_planes |= BIT(to_intel_plane(plane)->id);
crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
+ }
}
static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
@@ -3583,7 +2761,7 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
crtc->base.base.id, crtc->base.name);
intel_set_plane_visible(crtc_state, plane_state, false);
- fixup_active_planes(crtc_state);
+ fixup_plane_bitmasks(crtc_state);
crtc_state->data_rate[plane->id] = 0;
crtc_state->min_cdclk[plane->id] = 0;
@@ -3613,12 +2791,6 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
intel_disable_plane(plane, crtc_state);
}
-static struct intel_frontbuffer *
-to_intel_frontbuffer(struct drm_framebuffer *fb)
-{
- return fb ? to_intel_framebuffer(fb)->frontbuffer : NULL;
-}
-
static void
intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
struct intel_initial_plane_config *plane_config)
@@ -3817,33 +2989,19 @@ static int intel_plane_max_height(struct intel_plane *plane,
return INT_MAX;
}
-static int skl_check_main_surface(struct intel_plane_state *plane_state)
+int skl_calc_main_surface_offset(const struct intel_plane_state *plane_state,
+ int *x, int *y, u32 *offset)
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_framebuffer *fb = plane_state->hw.fb;
- unsigned int rotation = plane_state->hw.rotation;
- int x = plane_state->uapi.src.x1 >> 16;
- int y = plane_state->uapi.src.y1 >> 16;
- int w = drm_rect_width(&plane_state->uapi.src) >> 16;
- int h = drm_rect_height(&plane_state->uapi.src) >> 16;
- int min_width = intel_plane_min_width(plane, fb, 0, rotation);
- int max_width = intel_plane_max_width(plane, fb, 0, rotation);
- int max_height = intel_plane_max_height(plane, fb, 0, rotation);
- int aux_plane = intel_main_to_aux_plane(fb, 0);
- u32 aux_offset = plane_state->color_plane[aux_plane].offset;
- u32 alignment, offset;
+ const int aux_plane = intel_main_to_aux_plane(fb, 0);
+ const u32 aux_offset = plane_state->color_plane[aux_plane].offset;
+ const u32 alignment = intel_surf_alignment(fb, 0);
+ const int w = drm_rect_width(&plane_state->uapi.src) >> 16;
- if (w > max_width || w < min_width || h > max_height) {
- drm_dbg_kms(&dev_priv->drm,
- "requested Y/RGB source size %dx%d outside limits (min: %dx1 max: %dx%d)\n",
- w, h, min_width, max_width, max_height);
- return -EINVAL;
- }
-
- intel_add_fb_offsets(&x, &y, plane_state, 0);
- offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0);
- alignment = intel_surf_alignment(fb, 0);
+ intel_add_fb_offsets(x, y, plane_state, 0);
+ *offset = intel_plane_compute_aligned_offset(x, y, plane_state, 0);
if (drm_WARN_ON(&dev_priv->drm, alignment && !is_power_of_2(alignment)))
return -EINVAL;
@@ -3852,9 +3010,10 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
* main surface offset, and it must be non-negative. Make
* sure that is what we will get.
*/
- if (aux_plane && offset > aux_offset)
- offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
- offset, aux_offset & ~(alignment - 1));
+ if (aux_plane && *offset > aux_offset)
+ *offset = intel_plane_adjust_aligned_offset(x, y, plane_state, 0,
+ *offset,
+ aux_offset & ~(alignment - 1));
/*
* When using an X-tiled surface, the plane blows up
@@ -3865,18 +3024,51 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
int cpp = fb->format->cpp[0];
- while ((x + w) * cpp > plane_state->color_plane[0].stride) {
- if (offset == 0) {
+ while ((*x + w) * cpp > plane_state->color_plane[0].stride) {
+ if (*offset == 0) {
drm_dbg_kms(&dev_priv->drm,
"Unable to find suitable display surface offset due to X-tiling\n");
return -EINVAL;
}
- offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
- offset, offset - alignment);
+ *offset = intel_plane_adjust_aligned_offset(x, y, plane_state, 0,
+ *offset,
+ *offset - alignment);
}
}
+ return 0;
+}
+
+static int skl_check_main_surface(struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ const unsigned int rotation = plane_state->hw.rotation;
+ int x = plane_state->uapi.src.x1 >> 16;
+ int y = plane_state->uapi.src.y1 >> 16;
+ const int w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ const int h = drm_rect_height(&plane_state->uapi.src) >> 16;
+ const int min_width = intel_plane_min_width(plane, fb, 0, rotation);
+ const int max_width = intel_plane_max_width(plane, fb, 0, rotation);
+ const int max_height = intel_plane_max_height(plane, fb, 0, rotation);
+ const int aux_plane = intel_main_to_aux_plane(fb, 0);
+ const u32 alignment = intel_surf_alignment(fb, 0);
+ u32 offset;
+ int ret;
+
+ if (w > max_width || w < min_width || h > max_height) {
+ drm_dbg_kms(&dev_priv->drm,
+ "requested Y/RGB source size %dx%d outside limits (min: %dx1 max: %dx%d)\n",
+ w, h, min_width, max_width, max_height);
+ return -EINVAL;
+ }
+
+ ret = skl_calc_main_surface_offset(plane_state, &x, &y, &offset);
+ if (ret)
+ return ret;
+
/*
* CCS AUX surface doesn't have its own x/y offsets, we must make sure
* they match with the main surface x/y offsets.
@@ -3899,6 +3091,8 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
}
}
+ drm_WARN_ON(&dev_priv->drm, x > 8191 || y > 8191);
+
plane_state->color_plane[0].offset = offset;
plane_state->color_plane[0].x = x;
plane_state->color_plane[0].y = y;
@@ -3971,6 +3165,8 @@ static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
}
}
+ drm_WARN_ON(&i915->drm, x > 8191 || y > 8191);
+
plane_state->color_plane[uv_plane].offset = offset;
plane_state->color_plane[uv_plane].x = x;
plane_state->color_plane[uv_plane].y = y;
@@ -3991,7 +3187,8 @@ static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
int hsub, vsub;
int x, y;
- if (!is_ccs_plane(fb, ccs_plane))
+ if (!is_ccs_plane(fb, ccs_plane) ||
+ is_gen12_ccs_cc_plane(fb, ccs_plane))
continue;
intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb,
@@ -4063,422 +3260,6 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
return 0;
}
-static void i9xx_plane_ratio(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state,
- unsigned int *num, unsigned int *den)
-{
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- unsigned int cpp = fb->format->cpp[0];
-
- /*
- * g4x bspec says 64bpp pixel rate can't exceed 80%
- * of cdclk when the sprite plane is enabled on the
- * same pipe. ilk/snb bspec says 64bpp pixel rate is
- * never allowed to exceed 80% of cdclk. Let's just go
- * with the ilk/snb limit always.
- */
- if (cpp == 8) {
- *num = 10;
- *den = 8;
- } else {
- *num = 1;
- *den = 1;
- }
-}
-
-static int i9xx_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- unsigned int pixel_rate;
- unsigned int num, den;
-
- /*
- * Note that crtc_state->pixel_rate accounts for both
- * horizontal and vertical panel fitter downscaling factors.
- * Pre-HSW bspec tells us to only consider the horizontal
- * downscaling factor here. We ignore that and just consider
- * both for simplicity.
- */
- pixel_rate = crtc_state->pixel_rate;
-
- i9xx_plane_ratio(crtc_state, plane_state, &num, &den);
-
- /* two pixels per clock with double wide pipe */
- if (crtc_state->double_wide)
- den *= 2;
-
- return DIV_ROUND_UP(pixel_rate * num, den);
-}
-
-unsigned int
-i9xx_plane_max_stride(struct intel_plane *plane,
- u32 pixel_format, u64 modifier,
- unsigned int rotation)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-
- if (!HAS_GMCH(dev_priv)) {
- return 32*1024;
- } else if (INTEL_GEN(dev_priv) >= 4) {
- if (modifier == I915_FORMAT_MOD_X_TILED)
- return 16*1024;
- else
- return 32*1024;
- } else if (INTEL_GEN(dev_priv) >= 3) {
- if (modifier == I915_FORMAT_MOD_X_TILED)
- return 8*1024;
- else
- return 16*1024;
- } else {
- if (plane->i9xx_plane == PLANE_C)
- return 4*1024;
- else
- return 8*1024;
- }
-}
-
-static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 dspcntr = 0;
-
- if (crtc_state->gamma_enable)
- dspcntr |= DISPPLANE_GAMMA_ENABLE;
-
- if (crtc_state->csc_enable)
- dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
-
- if (INTEL_GEN(dev_priv) < 5)
- dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe);
-
- return dspcntr;
-}
-
-static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- struct drm_i915_private *dev_priv =
- to_i915(plane_state->uapi.plane->dev);
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- unsigned int rotation = plane_state->hw.rotation;
- u32 dspcntr;
-
- dspcntr = DISPLAY_PLANE_ENABLE;
-
- if (IS_G4X(dev_priv) || IS_GEN(dev_priv, 5) ||
- IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
- dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
-
- switch (fb->format->format) {
- case DRM_FORMAT_C8:
- dspcntr |= DISPPLANE_8BPP;
- break;
- case DRM_FORMAT_XRGB1555:
- dspcntr |= DISPPLANE_BGRX555;
- break;
- case DRM_FORMAT_ARGB1555:
- dspcntr |= DISPPLANE_BGRA555;
- break;
- case DRM_FORMAT_RGB565:
- dspcntr |= DISPPLANE_BGRX565;
- break;
- case DRM_FORMAT_XRGB8888:
- dspcntr |= DISPPLANE_BGRX888;
- break;
- case DRM_FORMAT_XBGR8888:
- dspcntr |= DISPPLANE_RGBX888;
- break;
- case DRM_FORMAT_ARGB8888:
- dspcntr |= DISPPLANE_BGRA888;
- break;
- case DRM_FORMAT_ABGR8888:
- dspcntr |= DISPPLANE_RGBA888;
- break;
- case DRM_FORMAT_XRGB2101010:
- dspcntr |= DISPPLANE_BGRX101010;
- break;
- case DRM_FORMAT_XBGR2101010:
- dspcntr |= DISPPLANE_RGBX101010;
- break;
- case DRM_FORMAT_ARGB2101010:
- dspcntr |= DISPPLANE_BGRA101010;
- break;
- case DRM_FORMAT_ABGR2101010:
- dspcntr |= DISPPLANE_RGBA101010;
- break;
- case DRM_FORMAT_XBGR16161616F:
- dspcntr |= DISPPLANE_RGBX161616;
- break;
- default:
- MISSING_CASE(fb->format->format);
- return 0;
- }
-
- if (INTEL_GEN(dev_priv) >= 4 &&
- fb->modifier == I915_FORMAT_MOD_X_TILED)
- dspcntr |= DISPPLANE_TILED;
-
- if (rotation & DRM_MODE_ROTATE_180)
- dspcntr |= DISPPLANE_ROTATE_180;
-
- if (rotation & DRM_MODE_REFLECT_X)
- dspcntr |= DISPPLANE_MIRROR;
-
- return dspcntr;
-}
-
-int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
-{
- struct drm_i915_private *dev_priv =
- to_i915(plane_state->uapi.plane->dev);
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- int src_x, src_y, src_w;
- u32 offset;
- int ret;
-
- ret = intel_plane_compute_gtt(plane_state);
- if (ret)
- return ret;
-
- if (!plane_state->uapi.visible)
- return 0;
-
- src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
- src_x = plane_state->uapi.src.x1 >> 16;
- src_y = plane_state->uapi.src.y1 >> 16;
-
- /* Undocumented hardware limit on i965/g4x/vlv/chv */
- if (HAS_GMCH(dev_priv) && fb->format->cpp[0] == 8 && src_w > 2048)
- return -EINVAL;
-
- intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
-
- if (INTEL_GEN(dev_priv) >= 4)
- offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
- plane_state, 0);
- else
- offset = 0;
-
- /*
- * Put the final coordinates back so that the src
- * coordinate checks will see the right values.
- */
- drm_rect_translate_to(&plane_state->uapi.src,
- src_x << 16, src_y << 16);
-
- /* HSW/BDW do this automagically in hardware */
- if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
- unsigned int rotation = plane_state->hw.rotation;
- int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
- int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
-
- if (rotation & DRM_MODE_ROTATE_180) {
- src_x += src_w - 1;
- src_y += src_h - 1;
- } else if (rotation & DRM_MODE_REFLECT_X) {
- src_x += src_w - 1;
- }
- }
-
- plane_state->color_plane[0].offset = offset;
- plane_state->color_plane[0].x = src_x;
- plane_state->color_plane[0].y = src_y;
-
- return 0;
-}
-
-static bool i9xx_plane_has_windowing(struct intel_plane *plane)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
-
- if (IS_CHERRYVIEW(dev_priv))
- return i9xx_plane == PLANE_B;
- else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
- return false;
- else if (IS_GEN(dev_priv, 4))
- return i9xx_plane == PLANE_C;
- else
- return i9xx_plane == PLANE_B ||
- i9xx_plane == PLANE_C;
-}
-
-static int
-i9xx_plane_check(struct intel_crtc_state *crtc_state,
- struct intel_plane_state *plane_state)
-{
- struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- int ret;
-
- ret = chv_plane_check_rotation(plane_state);
- if (ret)
- return ret;
-
- ret = intel_atomic_plane_check_clipping(plane_state, crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
- i9xx_plane_has_windowing(plane));
- if (ret)
- return ret;
-
- ret = i9xx_check_plane_surface(plane_state);
- if (ret)
- return ret;
-
- if (!plane_state->uapi.visible)
- return 0;
-
- ret = intel_plane_check_src_coordinates(plane_state);
- if (ret)
- return ret;
-
- plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state);
-
- return 0;
-}
-
-static void i9xx_update_plane(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
- u32 linear_offset;
- int x = plane_state->color_plane[0].x;
- int y = plane_state->color_plane[0].y;
- int crtc_x = plane_state->uapi.dst.x1;
- int crtc_y = plane_state->uapi.dst.y1;
- int crtc_w = drm_rect_width(&plane_state->uapi.dst);
- int crtc_h = drm_rect_height(&plane_state->uapi.dst);
- unsigned long irqflags;
- u32 dspaddr_offset;
- u32 dspcntr;
-
- dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
-
- linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
-
- if (INTEL_GEN(dev_priv) >= 4)
- dspaddr_offset = plane_state->color_plane[0].offset;
- else
- dspaddr_offset = linear_offset;
-
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
- intel_de_write_fw(dev_priv, DSPSTRIDE(i9xx_plane),
- plane_state->color_plane[0].stride);
-
- if (INTEL_GEN(dev_priv) < 4) {
- /*
- * PLANE_A doesn't actually have a full window
- * generator but let's assume we still need to
- * program whatever is there.
- */
- intel_de_write_fw(dev_priv, DSPPOS(i9xx_plane),
- (crtc_y << 16) | crtc_x);
- intel_de_write_fw(dev_priv, DSPSIZE(i9xx_plane),
- ((crtc_h - 1) << 16) | (crtc_w - 1));
- } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
- intel_de_write_fw(dev_priv, PRIMPOS(i9xx_plane),
- (crtc_y << 16) | crtc_x);
- intel_de_write_fw(dev_priv, PRIMSIZE(i9xx_plane),
- ((crtc_h - 1) << 16) | (crtc_w - 1));
- intel_de_write_fw(dev_priv, PRIMCNSTALPHA(i9xx_plane), 0);
- }
-
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- intel_de_write_fw(dev_priv, DSPOFFSET(i9xx_plane),
- (y << 16) | x);
- } else if (INTEL_GEN(dev_priv) >= 4) {
- intel_de_write_fw(dev_priv, DSPLINOFF(i9xx_plane),
- linear_offset);
- intel_de_write_fw(dev_priv, DSPTILEOFF(i9xx_plane),
- (y << 16) | x);
- }
-
- /*
- * The control register self-arms if the plane was previously
- * disabled. Try to make the plane enable atomic by writing
- * the control register just before the surface register.
- */
- intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr);
- if (INTEL_GEN(dev_priv) >= 4)
- intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane),
- intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
- else
- intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane),
- intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
-
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-}
-
-static void i9xx_disable_plane(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
- unsigned long irqflags;
- u32 dspcntr;
-
- /*
- * DSPCNTR pipe gamma enable on g4x+ and pipe csc
- * enable on ilk+ affect the pipe bottom color as
- * well, so we must configure them even if the plane
- * is disabled.
- *
- * On pre-g4x there is no way to gamma correct the
- * pipe bottom color but we'll keep on doing this
- * anyway so that the crtc state readout works correctly.
- */
- dspcntr = i9xx_plane_ctl_crtc(crtc_state);
-
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
- intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr);
- if (INTEL_GEN(dev_priv) >= 4)
- intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane), 0);
- else
- intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane), 0);
-
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-}
-
-static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
- enum pipe *pipe)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- enum intel_display_power_domain power_domain;
- enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
- intel_wakeref_t wakeref;
- bool ret;
- u32 val;
-
- /*
- * Not 100% correct for planes that can move between pipes,
- * but that's only the case for gen2-4 which don't have any
- * display power wells.
- */
- power_domain = POWER_DOMAIN_PIPE(plane->pipe);
- wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
- if (!wakeref)
- return false;
-
- val = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
-
- ret = val & DISPLAY_PLANE_ENABLE;
-
- if (INTEL_GEN(dev_priv) >= 5)
- *pipe = plane->pipe;
- else
- *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
- DISPPLANE_SEL_PIPE_SHIFT;
-
- intel_display_power_put(dev_priv, power_domain, wakeref);
-
- return ret;
-}
-
static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
{
struct drm_device *dev = intel_crtc->base.dev;
@@ -4647,6 +3428,7 @@ static u32 skl_plane_ctl_tiling(u64 fb_modifier)
case I915_FORMAT_MOD_Y_TILED:
return PLANE_CTL_TILED_Y;
case I915_FORMAT_MOD_Y_TILED_CCS:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
return PLANE_CTL_TILED_Y |
@@ -4707,9 +3489,6 @@ u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
u32 plane_ctl = 0;
- if (crtc_state->uapi.async_flip)
- plane_ctl |= PLANE_CTL_ASYNC_FLIP;
-
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
return plane_ctl;
@@ -4807,6 +3586,8 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
} else if (fb->format->is_yuv) {
plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE;
+ if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+ plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
}
return plane_color_ctl;
@@ -4996,532 +3777,6 @@ static void icl_set_pipe_chicken(struct intel_crtc *crtc)
intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
}
-static void intel_fdi_normal_train(struct intel_crtc *crtc)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- enum pipe pipe = crtc->pipe;
- i915_reg_t reg;
- u32 temp;
-
- /* enable normal train */
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- if (IS_IVYBRIDGE(dev_priv)) {
- temp &= ~FDI_LINK_TRAIN_NONE_IVB;
- temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
- } else {
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
- }
- intel_de_write(dev_priv, reg, temp);
-
- reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- if (HAS_PCH_CPT(dev_priv)) {
- temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
- temp |= FDI_LINK_TRAIN_NORMAL_CPT;
- } else {
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_NONE;
- }
- intel_de_write(dev_priv, reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
-
- /* wait one idle pattern time */
- intel_de_posting_read(dev_priv, reg);
- udelay(1000);
-
- /* IVB wants error correction enabled */
- if (IS_IVYBRIDGE(dev_priv))
- intel_de_write(dev_priv, reg,
- intel_de_read(dev_priv, reg) | FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE);
-}
-
-/* The FDI link training functions for ILK/Ibexpeak. */
-static void ilk_fdi_link_train(struct intel_crtc *crtc,
- const struct intel_crtc_state *crtc_state)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- enum pipe pipe = crtc->pipe;
- i915_reg_t reg;
- u32 temp, tries;
-
- /* FDI needs bits from pipe first */
- assert_pipe_enabled(dev_priv, crtc_state->cpu_transcoder);
-
- /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
- for train result */
- reg = FDI_RX_IMR(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_RX_SYMBOL_LOCK;
- temp &= ~FDI_RX_BIT_LOCK;
- intel_de_write(dev_priv, reg, temp);
- intel_de_read(dev_priv, reg);
- udelay(150);
-
- /* enable CPU FDI TX and PCH FDI RX */
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_DP_PORT_WIDTH_MASK;
- temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_1;
- intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
-
- reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_1;
- intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
-
- intel_de_posting_read(dev_priv, reg);
- udelay(150);
-
- /* Ironlake workaround, enable clock pointer after FDI enable*/
- intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
- FDI_RX_PHASE_SYNC_POINTER_OVR);
- intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
- FDI_RX_PHASE_SYNC_POINTER_OVR | FDI_RX_PHASE_SYNC_POINTER_EN);
-
- reg = FDI_RX_IIR(pipe);
- for (tries = 0; tries < 5; tries++) {
- temp = intel_de_read(dev_priv, reg);
- drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
-
- if ((temp & FDI_RX_BIT_LOCK)) {
- drm_dbg_kms(&dev_priv->drm, "FDI train 1 done.\n");
- intel_de_write(dev_priv, reg, temp | FDI_RX_BIT_LOCK);
- break;
- }
- }
- if (tries == 5)
- drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
-
- /* Train 2 */
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_2;
- intel_de_write(dev_priv, reg, temp);
-
- reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_2;
- intel_de_write(dev_priv, reg, temp);
-
- intel_de_posting_read(dev_priv, reg);
- udelay(150);
-
- reg = FDI_RX_IIR(pipe);
- for (tries = 0; tries < 5; tries++) {
- temp = intel_de_read(dev_priv, reg);
- drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
-
- if (temp & FDI_RX_SYMBOL_LOCK) {
- intel_de_write(dev_priv, reg,
- temp | FDI_RX_SYMBOL_LOCK);
- drm_dbg_kms(&dev_priv->drm, "FDI train 2 done.\n");
- break;
- }
- }
- if (tries == 5)
- drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
-
- drm_dbg_kms(&dev_priv->drm, "FDI train done\n");
-
-}
-
-static const int snb_b_fdi_train_param[] = {
- FDI_LINK_TRAIN_400MV_0DB_SNB_B,
- FDI_LINK_TRAIN_400MV_6DB_SNB_B,
- FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
- FDI_LINK_TRAIN_800MV_0DB_SNB_B,
-};
-
-/* The FDI link training functions for SNB/Cougarpoint. */
-static void gen6_fdi_link_train(struct intel_crtc *crtc,
- const struct intel_crtc_state *crtc_state)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- enum pipe pipe = crtc->pipe;
- i915_reg_t reg;
- u32 temp, i, retry;
-
- /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
- for train result */
- reg = FDI_RX_IMR(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_RX_SYMBOL_LOCK;
- temp &= ~FDI_RX_BIT_LOCK;
- intel_de_write(dev_priv, reg, temp);
-
- intel_de_posting_read(dev_priv, reg);
- udelay(150);
-
- /* enable CPU FDI TX and PCH FDI RX */
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_DP_PORT_WIDTH_MASK;
- temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_1;
- temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
- /* SNB-B */
- temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
- intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
-
- intel_de_write(dev_priv, FDI_RX_MISC(pipe),
- FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
-
- reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- if (HAS_PCH_CPT(dev_priv)) {
- temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
- temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
- } else {
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_1;
- }
- intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
-
- intel_de_posting_read(dev_priv, reg);
- udelay(150);
-
- for (i = 0; i < 4; i++) {
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
- temp |= snb_b_fdi_train_param[i];
- intel_de_write(dev_priv, reg, temp);
-
- intel_de_posting_read(dev_priv, reg);
- udelay(500);
-
- for (retry = 0; retry < 5; retry++) {
- reg = FDI_RX_IIR(pipe);
- temp = intel_de_read(dev_priv, reg);
- drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
- if (temp & FDI_RX_BIT_LOCK) {
- intel_de_write(dev_priv, reg,
- temp | FDI_RX_BIT_LOCK);
- drm_dbg_kms(&dev_priv->drm,
- "FDI train 1 done.\n");
- break;
- }
- udelay(50);
- }
- if (retry < 5)
- break;
- }
- if (i == 4)
- drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
-
- /* Train 2 */
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_2;
- if (IS_GEN(dev_priv, 6)) {
- temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
- /* SNB-B */
- temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
- }
- intel_de_write(dev_priv, reg, temp);
-
- reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- if (HAS_PCH_CPT(dev_priv)) {
- temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
- temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
- } else {
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_2;
- }
- intel_de_write(dev_priv, reg, temp);
-
- intel_de_posting_read(dev_priv, reg);
- udelay(150);
-
- for (i = 0; i < 4; i++) {
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
- temp |= snb_b_fdi_train_param[i];
- intel_de_write(dev_priv, reg, temp);
-
- intel_de_posting_read(dev_priv, reg);
- udelay(500);
-
- for (retry = 0; retry < 5; retry++) {
- reg = FDI_RX_IIR(pipe);
- temp = intel_de_read(dev_priv, reg);
- drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
- if (temp & FDI_RX_SYMBOL_LOCK) {
- intel_de_write(dev_priv, reg,
- temp | FDI_RX_SYMBOL_LOCK);
- drm_dbg_kms(&dev_priv->drm,
- "FDI train 2 done.\n");
- break;
- }
- udelay(50);
- }
- if (retry < 5)
- break;
- }
- if (i == 4)
- drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
-
- drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
-}
-
-/* Manual link training for Ivy Bridge A0 parts */
-static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
- const struct intel_crtc_state *crtc_state)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- enum pipe pipe = crtc->pipe;
- i915_reg_t reg;
- u32 temp, i, j;
-
- /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
- for train result */
- reg = FDI_RX_IMR(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_RX_SYMBOL_LOCK;
- temp &= ~FDI_RX_BIT_LOCK;
- intel_de_write(dev_priv, reg, temp);
-
- intel_de_posting_read(dev_priv, reg);
- udelay(150);
-
- drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR before link train 0x%x\n",
- intel_de_read(dev_priv, FDI_RX_IIR(pipe)));
-
- /* Try each vswing and preemphasis setting twice before moving on */
- for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
- /* disable first in case we need to retry */
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
- temp &= ~FDI_TX_ENABLE;
- intel_de_write(dev_priv, reg, temp);
-
- reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_LINK_TRAIN_AUTO;
- temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
- temp &= ~FDI_RX_ENABLE;
- intel_de_write(dev_priv, reg, temp);
-
- /* enable CPU FDI TX and PCH FDI RX */
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_DP_PORT_WIDTH_MASK;
- temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
- temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
- temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
- temp |= snb_b_fdi_train_param[j/2];
- temp |= FDI_COMPOSITE_SYNC;
- intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
-
- intel_de_write(dev_priv, FDI_RX_MISC(pipe),
- FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
-
- reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
- temp |= FDI_COMPOSITE_SYNC;
- intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
-
- intel_de_posting_read(dev_priv, reg);
- udelay(1); /* should be 0.5us */
-
- for (i = 0; i < 4; i++) {
- reg = FDI_RX_IIR(pipe);
- temp = intel_de_read(dev_priv, reg);
- drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
-
- if (temp & FDI_RX_BIT_LOCK ||
- (intel_de_read(dev_priv, reg) & FDI_RX_BIT_LOCK)) {
- intel_de_write(dev_priv, reg,
- temp | FDI_RX_BIT_LOCK);
- drm_dbg_kms(&dev_priv->drm,
- "FDI train 1 done, level %i.\n",
- i);
- break;
- }
- udelay(1); /* should be 0.5us */
- }
- if (i == 4) {
- drm_dbg_kms(&dev_priv->drm,
- "FDI train 1 fail on vswing %d\n", j / 2);
- continue;
- }
-
- /* Train 2 */
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_LINK_TRAIN_NONE_IVB;
- temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
- intel_de_write(dev_priv, reg, temp);
-
- reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
- temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
- intel_de_write(dev_priv, reg, temp);
-
- intel_de_posting_read(dev_priv, reg);
- udelay(2); /* should be 1.5us */
-
- for (i = 0; i < 4; i++) {
- reg = FDI_RX_IIR(pipe);
- temp = intel_de_read(dev_priv, reg);
- drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
-
- if (temp & FDI_RX_SYMBOL_LOCK ||
- (intel_de_read(dev_priv, reg) & FDI_RX_SYMBOL_LOCK)) {
- intel_de_write(dev_priv, reg,
- temp | FDI_RX_SYMBOL_LOCK);
- drm_dbg_kms(&dev_priv->drm,
- "FDI train 2 done, level %i.\n",
- i);
- goto train_done;
- }
- udelay(2); /* should be 1.5us */
- }
- if (i == 4)
- drm_dbg_kms(&dev_priv->drm,
- "FDI train 2 fail on vswing %d\n", j / 2);
- }
-
-train_done:
- drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
-}
-
-static void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
- enum pipe pipe = intel_crtc->pipe;
- i915_reg_t reg;
- u32 temp;
-
- /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
- reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
- temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
- temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
- intel_de_write(dev_priv, reg, temp | FDI_RX_PLL_ENABLE);
-
- intel_de_posting_read(dev_priv, reg);
- udelay(200);
-
- /* Switch from Rawclk to PCDclk */
- temp = intel_de_read(dev_priv, reg);
- intel_de_write(dev_priv, reg, temp | FDI_PCDCLK);
-
- intel_de_posting_read(dev_priv, reg);
- udelay(200);
-
- /* Enable CPU FDI TX PLL, always on for Ironlake */
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- if ((temp & FDI_TX_PLL_ENABLE) == 0) {
- intel_de_write(dev_priv, reg, temp | FDI_TX_PLL_ENABLE);
-
- intel_de_posting_read(dev_priv, reg);
- udelay(100);
- }
-}
-
-static void ilk_fdi_pll_disable(struct intel_crtc *intel_crtc)
-{
- struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- enum pipe pipe = intel_crtc->pipe;
- i915_reg_t reg;
- u32 temp;
-
- /* Switch from PCDclk to Rawclk */
- reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- intel_de_write(dev_priv, reg, temp & ~FDI_PCDCLK);
-
- /* Disable CPU FDI TX PLL */
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- intel_de_write(dev_priv, reg, temp & ~FDI_TX_PLL_ENABLE);
-
- intel_de_posting_read(dev_priv, reg);
- udelay(100);
-
- reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- intel_de_write(dev_priv, reg, temp & ~FDI_RX_PLL_ENABLE);
-
- /* Wait for the clocks to turn off. */
- intel_de_posting_read(dev_priv, reg);
- udelay(100);
-}
-
-static void ilk_fdi_disable(struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
- i915_reg_t reg;
- u32 temp;
-
- /* disable CPU FDI tx and PCH FDI rx */
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- intel_de_write(dev_priv, reg, temp & ~FDI_TX_ENABLE);
- intel_de_posting_read(dev_priv, reg);
-
- reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~(0x7 << 16);
- temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
- intel_de_write(dev_priv, reg, temp & ~FDI_RX_ENABLE);
-
- intel_de_posting_read(dev_priv, reg);
- udelay(100);
-
- /* Ironlake workaround, disable clock pointer after downing FDI */
- if (HAS_PCH_IBX(dev_priv))
- intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
- FDI_RX_PHASE_SYNC_POINTER_OVR);
-
- /* still set train pattern 1 */
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_1;
- intel_de_write(dev_priv, reg, temp);
-
- reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- if (HAS_PCH_CPT(dev_priv)) {
- temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
- temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
- } else {
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_1;
- }
- /* BPC in FDI rx is consistent with that in PIPECONF */
- temp &= ~(0x07 << 16);
- temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
- intel_de_write(dev_priv, reg, temp);
-
- intel_de_posting_read(dev_priv, reg);
- udelay(100);
-}
-
bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
{
struct drm_crtc *crtc;
@@ -5752,7 +4007,7 @@ static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_st
* Finds the encoder associated with the given CRTC. This can only be
* used when we know that the CRTC isn't feeding multiple encoders!
*/
-static struct intel_encoder *
+struct intel_encoder *
intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state)
{
@@ -6271,7 +4526,7 @@ static void cnl_program_nearest_filter_coefs(struct drm_i915_private *dev_priv,
intel_de_write_fw(dev_priv, CNL_PS_COEF_INDEX_SET(pipe, id, set), 0);
}
-inline u32 skl_scaler_get_filter_select(enum drm_scaling_filter filter, int set)
+u32 skl_scaler_get_filter_select(enum drm_scaling_filter filter, int set)
{
if (filter == DRM_SCALING_FILTER_NEAREST_NEIGHBOR) {
return (PS_FILTER_PROGRAMMED |
@@ -6470,7 +4725,7 @@ static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_s
if (!old_crtc_state->ips_enabled)
return false;
- if (needs_modeset(new_crtc_state))
+ if (intel_crtc_needs_modeset(new_crtc_state))
return true;
/*
@@ -6497,7 +4752,7 @@ static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_s
if (!new_crtc_state->ips_enabled)
return false;
- if (needs_modeset(new_crtc_state))
+ if (intel_crtc_needs_modeset(new_crtc_state))
return true;
/*
@@ -6550,7 +4805,7 @@ static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
const struct intel_crtc_state *new_crtc_state)
{
- return (!old_crtc_state->active_planes || needs_modeset(new_crtc_state)) &&
+ return (!old_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state)) &&
new_crtc_state->active_planes;
}
@@ -6558,7 +4813,7 @@ static bool planes_disabling(const struct intel_crtc_state *old_crtc_state,
const struct intel_crtc_state *new_crtc_state)
{
return old_crtc_state->active_planes &&
- (!new_crtc_state->active_planes || needs_modeset(new_crtc_state));
+ (!new_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state));
}
static void intel_post_plane_update(struct intel_atomic_state *state,
@@ -6590,41 +4845,72 @@ static void intel_post_plane_update(struct intel_atomic_state *state,
icl_wa_scalerclkgating(dev_priv, pipe, false);
}
-static void skl_disable_async_flip_wa(struct intel_atomic_state *state,
- struct intel_crtc *crtc,
- const struct intel_crtc_state *new_crtc_state)
+static void intel_crtc_enable_flip_done(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ u8 update_planes = crtc_state->update_planes;
+ const struct intel_plane_state *plane_state;
struct intel_plane *plane;
- struct intel_plane_state *new_plane_state;
int i;
- for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) {
- u32 update_mask = new_crtc_state->update_planes;
- u32 plane_ctl, surf_addr;
- enum plane_id plane_id;
- unsigned long irqflags;
- enum pipe pipe;
-
- if (crtc->pipe != plane->pipe ||
- !(update_mask & BIT(plane->id)))
- continue;
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ if (plane->enable_flip_done &&
+ plane->pipe == crtc->pipe &&
+ update_planes & BIT(plane->id))
+ plane->enable_flip_done(plane);
+ }
+}
- plane_id = plane->id;
- pipe = plane->pipe;
+static void intel_crtc_disable_flip_done(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ u8 update_planes = crtc_state->update_planes;
+ const struct intel_plane_state *plane_state;
+ struct intel_plane *plane;
+ int i;
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- plane_ctl = intel_de_read_fw(dev_priv, PLANE_CTL(pipe, plane_id));
- surf_addr = intel_de_read_fw(dev_priv, PLANE_SURF(pipe, plane_id));
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ if (plane->disable_flip_done &&
+ plane->pipe == crtc->pipe &&
+ update_planes & BIT(plane->id))
+ plane->disable_flip_done(plane);
+ }
+}
- plane_ctl &= ~PLANE_CTL_ASYNC_FLIP;
+static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ u8 update_planes = new_crtc_state->update_planes;
+ const struct intel_plane_state *old_plane_state;
+ struct intel_plane *plane;
+ bool need_vbl_wait = false;
+ int i;
- intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl);
- intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), surf_addr);
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+ for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
+ if (plane->need_async_flip_disable_wa &&
+ plane->pipe == crtc->pipe &&
+ update_planes & BIT(plane->id)) {
+ /*
+ * Apart from the async flip bit we want to
+ * preserve the old state for the plane.
+ */
+ plane->async_flip(plane, old_crtc_state,
+ old_plane_state, false);
+ need_vbl_wait = true;
+ }
}
- intel_wait_for_vblank(dev_priv, crtc->pipe);
+ if (need_vbl_wait)
+ intel_wait_for_vblank(i915, crtc->pipe);
}
static void intel_pre_plane_update(struct intel_atomic_state *state,
@@ -6681,7 +4967,7 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
* If we're doing a modeset we don't need to do any
* pre-vblank watermark programming here.
*/
- if (!needs_modeset(new_crtc_state)) {
+ if (!intel_crtc_needs_modeset(new_crtc_state)) {
/*
* For platforms that support atomic watermarks, program the
* 'intermediate' watermarks immediately. On pre-gen9 platforms, these
@@ -6717,10 +5003,8 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
* WA for platforms where async address update enable bit
* is double buffered and only latched at start of vblank.
*/
- if (old_crtc_state->uapi.async_flip &&
- !new_crtc_state->uapi.async_flip &&
- IS_GEN_RANGE(dev_priv, 9, 10))
- skl_disable_async_flip_wa(state, crtc, new_crtc_state);
+ if (old_crtc_state->uapi.async_flip && !new_crtc_state->uapi.async_flip)
+ intel_crtc_async_flip_disable_wa(state, crtc);
}
static void intel_crtc_disable_planes(struct intel_atomic_state *state,
@@ -7140,7 +5424,7 @@ static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
val = intel_de_read(dev_priv, reg);
val &= ~HSW_FRAME_START_DELAY_MASK;
- val |= HSW_FRAME_START_DELAY(0);
+ val |= HSW_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
intel_de_write(dev_priv, reg, val);
}
@@ -7575,25 +5859,25 @@ modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state)
enum intel_display_power_domain domain;
u64 domains, new_domains, old_domains;
- old_domains = crtc->enabled_power_domains;
- crtc->enabled_power_domains = new_domains =
- get_crtc_power_domains(crtc_state);
+ domains = get_crtc_power_domains(crtc_state);
- domains = new_domains & ~old_domains;
+ new_domains = domains & ~crtc->enabled_power_domains.mask;
+ old_domains = crtc->enabled_power_domains.mask & ~domains;
- for_each_power_domain(domain, domains)
- intel_display_power_get(dev_priv, domain);
+ for_each_power_domain(domain, new_domains)
+ intel_display_power_get_in_set(dev_priv,
+ &crtc->enabled_power_domains,
+ domain);
- return old_domains & ~new_domains;
+ return old_domains;
}
-static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
- u64 domains)
+static void modeset_put_crtc_power_domains(struct intel_crtc *crtc,
+ u64 domains)
{
- enum intel_display_power_domain domain;
-
- for_each_power_domain(domain, domains)
- intel_display_power_put_unchecked(dev_priv, domain);
+ intel_display_power_put_mask_in_set(to_i915(crtc->base.dev),
+ &crtc->enabled_power_domains,
+ domains);
}
static void valleyview_crtc_enable(struct intel_atomic_state *state,
@@ -7789,12 +6073,10 @@ static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
to_intel_dbuf_state(dev_priv->dbuf.obj.state);
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
- enum intel_display_power_domain domain;
struct intel_plane *plane;
struct drm_atomic_state *state;
struct intel_crtc_state *temp_crtc_state;
enum pipe pipe = crtc->pipe;
- u64 domains;
int ret;
if (!crtc_state->hw.active)
@@ -7850,10 +6132,7 @@ static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
intel_update_watermarks(crtc);
intel_disable_shared_dpll(crtc_state);
- domains = crtc->enabled_power_domains;
- for_each_power_domain(domain, domains)
- intel_display_power_put_unchecked(dev_priv, domain);
- crtc->enabled_power_domains = 0;
+ intel_display_power_put_all_in_set(dev_priv, &crtc->enabled_power_domains);
dev_priv->active_pipes &= ~BIT(pipe);
cdclk_state->min_cdclk[pipe] = 0;
@@ -7933,143 +6212,6 @@ static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
}
}
-static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
-{
- if (crtc_state->hw.enable && crtc_state->has_pch_encoder)
- return crtc_state->fdi_lanes;
-
- return 0;
-}
-
-static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
- struct intel_crtc_state *pipe_config)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_atomic_state *state = pipe_config->uapi.state;
- struct intel_crtc *other_crtc;
- struct intel_crtc_state *other_crtc_state;
-
- drm_dbg_kms(&dev_priv->drm,
- "checking fdi config on pipe %c, lanes %i\n",
- pipe_name(pipe), pipe_config->fdi_lanes);
- if (pipe_config->fdi_lanes > 4) {
- drm_dbg_kms(&dev_priv->drm,
- "invalid fdi lane config on pipe %c: %i lanes\n",
- pipe_name(pipe), pipe_config->fdi_lanes);
- return -EINVAL;
- }
-
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- if (pipe_config->fdi_lanes > 2) {
- drm_dbg_kms(&dev_priv->drm,
- "only 2 lanes on haswell, required: %i lanes\n",
- pipe_config->fdi_lanes);
- return -EINVAL;
- } else {
- return 0;
- }
- }
-
- if (INTEL_NUM_PIPES(dev_priv) == 2)
- return 0;
-
- /* Ivybridge 3 pipe is really complicated */
- switch (pipe) {
- case PIPE_A:
- return 0;
- case PIPE_B:
- if (pipe_config->fdi_lanes <= 2)
- return 0;
-
- other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
- other_crtc_state =
- intel_atomic_get_crtc_state(state, other_crtc);
- if (IS_ERR(other_crtc_state))
- return PTR_ERR(other_crtc_state);
-
- if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
- drm_dbg_kms(&dev_priv->drm,
- "invalid shared fdi lane config on pipe %c: %i lanes\n",
- pipe_name(pipe), pipe_config->fdi_lanes);
- return -EINVAL;
- }
- return 0;
- case PIPE_C:
- if (pipe_config->fdi_lanes > 2) {
- drm_dbg_kms(&dev_priv->drm,
- "only 2 lanes on pipe %c: required %i lanes\n",
- pipe_name(pipe), pipe_config->fdi_lanes);
- return -EINVAL;
- }
-
- other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
- other_crtc_state =
- intel_atomic_get_crtc_state(state, other_crtc);
- if (IS_ERR(other_crtc_state))
- return PTR_ERR(other_crtc_state);
-
- if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
- drm_dbg_kms(&dev_priv->drm,
- "fdi link B uses too many lanes to enable link C\n");
- return -EINVAL;
- }
- return 0;
- default:
- BUG();
- }
-}
-
-#define RETRY 1
-static int ilk_fdi_compute_config(struct intel_crtc *intel_crtc,
- struct intel_crtc_state *pipe_config)
-{
- struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *i915 = to_i915(dev);
- const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
- int lane, link_bw, fdi_dotclock, ret;
- bool needs_recompute = false;
-
-retry:
- /* FDI is a binary signal running at ~2.7GHz, encoding
- * each output octet as 10 bits. The actual frequency
- * is stored as a divider into a 100MHz clock, and the
- * mode pixel clock is stored in units of 1KHz.
- * Hence the bw of each lane in terms of the mode signal
- * is:
- */
- link_bw = intel_fdi_link_freq(i915, pipe_config);
-
- fdi_dotclock = adjusted_mode->crtc_clock;
-
- lane = ilk_get_lanes_required(fdi_dotclock, link_bw,
- pipe_config->pipe_bpp);
-
- pipe_config->fdi_lanes = lane;
-
- intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
- link_bw, &pipe_config->fdi_m_n, false, false);
-
- ret = ilk_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
- if (ret == -EDEADLK)
- return ret;
-
- if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
- pipe_config->pipe_bpp -= 2*3;
- drm_dbg_kms(&i915->drm,
- "fdi link bw constraint, reducing pipe bpp to %i\n",
- pipe_config->pipe_bpp);
- needs_recompute = true;
- pipe_config->bw_constrained = true;
-
- goto retry;
- }
-
- if (needs_recompute)
- return RETRY;
-
- return ret;
-}
-
bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@@ -8301,19 +6443,6 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
return -EINVAL;
}
- if ((pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
- pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) &&
- pipe_config->hw.ctm) {
- /*
- * There is only one pipe CSC unit per pipe, and we need that
- * for output conversion from RGB->YCBCR. So if CTM is already
- * applied we can't support YCBCR420 output.
- */
- drm_dbg_kms(&dev_priv->drm,
- "YCBCR420 and CTM together are not possible\n");
- return -EINVAL;
- }
-
/*
* Pipe horizontal size must be even in:
* - DVO ganged mode
@@ -8425,51 +6554,6 @@ static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
}
}
-static bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
-{
- if (dev_priv->params.panel_use_ssc >= 0)
- return dev_priv->params.panel_use_ssc != 0;
- return dev_priv->vbt.lvds_use_ssc
- && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
-}
-
-static u32 pnv_dpll_compute_fp(struct dpll *dpll)
-{
- return (1 << dpll->n) << 16 | dpll->m2;
-}
-
-static u32 i9xx_dpll_compute_fp(struct dpll *dpll)
-{
- return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
-}
-
-static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct dpll *reduced_clock)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 fp, fp2 = 0;
-
- if (IS_PINEVIEW(dev_priv)) {
- fp = pnv_dpll_compute_fp(&crtc_state->dpll);
- if (reduced_clock)
- fp2 = pnv_dpll_compute_fp(reduced_clock);
- } else {
- fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
- if (reduced_clock)
- fp2 = i9xx_dpll_compute_fp(reduced_clock);
- }
-
- crtc_state->dpll_hw_state.fp0 = fp;
-
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
- reduced_clock) {
- crtc_state->dpll_hw_state.fp1 = fp2;
- } else {
- crtc_state->dpll_hw_state.fp1 = fp;
- }
-}
-
static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
pipe)
{
@@ -8594,39 +6678,6 @@ void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_s
intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
}
-static void vlv_compute_dpll(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
-{
- pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
- DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
- if (crtc->pipe != PIPE_A)
- pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
-
- /* DPLL not used with DSI, but still need the rest set up */
- if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
- pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
- DPLL_EXT_BUFFER_ENABLE_VLV;
-
- pipe_config->dpll_hw_state.dpll_md =
- (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
-}
-
-static void chv_compute_dpll(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
-{
- pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
- DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
- if (crtc->pipe != PIPE_A)
- pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
-
- /* DPLL not used with DSI, but still need the rest set up */
- if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
- pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
-
- pipe_config->dpll_hw_state.dpll_md =
- (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
-}
-
static void vlv_prepare_pll(struct intel_crtc *crtc,
const struct intel_crtc_state *pipe_config)
{
@@ -8886,128 +6937,7 @@ void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
vlv_disable_pll(dev_priv, pipe);
}
-static void i9xx_compute_dpll(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct dpll *reduced_clock)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 dpll;
- struct dpll *clock = &crtc_state->dpll;
-
- i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
-
- dpll = DPLL_VGA_MODE_DIS;
-
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
- dpll |= DPLLB_MODE_LVDS;
- else
- dpll |= DPLLB_MODE_DAC_SERIAL;
-
- if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
- IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
- dpll |= (crtc_state->pixel_multiplier - 1)
- << SDVO_MULTIPLIER_SHIFT_HIRES;
- }
-
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
- intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- dpll |= DPLL_SDVO_HIGH_SPEED;
-
- if (intel_crtc_has_dp_encoder(crtc_state))
- dpll |= DPLL_SDVO_HIGH_SPEED;
-
- /* compute bitmask from p1 value */
- if (IS_PINEVIEW(dev_priv))
- dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
- else {
- dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
- if (IS_G4X(dev_priv) && reduced_clock)
- dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
- }
- switch (clock->p2) {
- case 5:
- dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
- break;
- case 7:
- dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
- break;
- case 10:
- dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
- break;
- case 14:
- dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
- break;
- }
- if (INTEL_GEN(dev_priv) >= 4)
- dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
-
- if (crtc_state->sdvo_tv_clock)
- dpll |= PLL_REF_INPUT_TVCLKINBC;
- else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
- intel_panel_use_ssc(dev_priv))
- dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
- else
- dpll |= PLL_REF_INPUT_DREFCLK;
-
- dpll |= DPLL_VCO_ENABLE;
- crtc_state->dpll_hw_state.dpll = dpll;
-
- if (INTEL_GEN(dev_priv) >= 4) {
- u32 dpll_md = (crtc_state->pixel_multiplier - 1)
- << DPLL_MD_UDI_MULTIPLIER_SHIFT;
- crtc_state->dpll_hw_state.dpll_md = dpll_md;
- }
-}
-
-static void i8xx_compute_dpll(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct dpll *reduced_clock)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- u32 dpll;
- struct dpll *clock = &crtc_state->dpll;
- i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
-
- dpll = DPLL_VGA_MODE_DIS;
-
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
- dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
- } else {
- if (clock->p1 == 2)
- dpll |= PLL_P1_DIVIDE_BY_TWO;
- else
- dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
- if (clock->p2 == 4)
- dpll |= PLL_P2_DIVIDE_BY_4;
- }
-
- /*
- * Bspec:
- * "[Almador Errata}: For the correct operation of the muxed DVO pins
- * (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data,
- * GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock
- * Enable) must be set to “1†in both the DPLL A Control Register
- * (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)."
- *
- * For simplicity We simply keep both bits always enabled in
- * both DPLLS. The spec says we should disable the DVO 2X clock
- * when not needed, but this seems to work fine in practice.
- */
- if (IS_I830(dev_priv) ||
- intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
- dpll |= DPLL_DVO_2X_MODE;
-
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
- intel_panel_use_ssc(dev_priv))
- dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
- else
- dpll |= PLL_REF_INPUT_DREFCLK;
-
- dpll |= DPLL_VCO_ENABLE;
- crtc_state->dpll_hw_state.dpll = dpll;
-}
static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
{
@@ -9207,214 +7137,12 @@ static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
- pipeconf |= PIPECONF_FRAME_START_DELAY(0);
+ pipeconf |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
intel_de_write(dev_priv, PIPECONF(crtc->pipe), pipeconf);
intel_de_posting_read(dev_priv, PIPECONF(crtc->pipe));
}
-static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- const struct intel_limit *limit;
- int refclk = 48000;
-
- memset(&crtc_state->dpll_hw_state, 0,
- sizeof(crtc_state->dpll_hw_state));
-
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
- if (intel_panel_use_ssc(dev_priv)) {
- refclk = dev_priv->vbt.lvds_ssc_freq;
- drm_dbg_kms(&dev_priv->drm,
- "using SSC reference clock of %d kHz\n",
- refclk);
- }
-
- limit = &intel_limits_i8xx_lvds;
- } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
- limit = &intel_limits_i8xx_dvo;
- } else {
- limit = &intel_limits_i8xx_dac;
- }
-
- if (!crtc_state->clock_set &&
- !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
- refclk, NULL, &crtc_state->dpll)) {
- drm_err(&dev_priv->drm,
- "Couldn't find PLL settings for mode!\n");
- return -EINVAL;
- }
-
- i8xx_compute_dpll(crtc, crtc_state, NULL);
-
- return 0;
-}
-
-static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- const struct intel_limit *limit;
- int refclk = 96000;
-
- memset(&crtc_state->dpll_hw_state, 0,
- sizeof(crtc_state->dpll_hw_state));
-
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
- if (intel_panel_use_ssc(dev_priv)) {
- refclk = dev_priv->vbt.lvds_ssc_freq;
- drm_dbg_kms(&dev_priv->drm,
- "using SSC reference clock of %d kHz\n",
- refclk);
- }
-
- if (intel_is_dual_link_lvds(dev_priv))
- limit = &intel_limits_g4x_dual_channel_lvds;
- else
- limit = &intel_limits_g4x_single_channel_lvds;
- } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
- intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
- limit = &intel_limits_g4x_hdmi;
- } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
- limit = &intel_limits_g4x_sdvo;
- } else {
- /* The option is for other outputs */
- limit = &intel_limits_i9xx_sdvo;
- }
-
- if (!crtc_state->clock_set &&
- !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
- refclk, NULL, &crtc_state->dpll)) {
- drm_err(&dev_priv->drm,
- "Couldn't find PLL settings for mode!\n");
- return -EINVAL;
- }
-
- i9xx_compute_dpll(crtc, crtc_state, NULL);
-
- return 0;
-}
-
-static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- const struct intel_limit *limit;
- int refclk = 96000;
-
- memset(&crtc_state->dpll_hw_state, 0,
- sizeof(crtc_state->dpll_hw_state));
-
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
- if (intel_panel_use_ssc(dev_priv)) {
- refclk = dev_priv->vbt.lvds_ssc_freq;
- drm_dbg_kms(&dev_priv->drm,
- "using SSC reference clock of %d kHz\n",
- refclk);
- }
-
- limit = &pnv_limits_lvds;
- } else {
- limit = &pnv_limits_sdvo;
- }
-
- if (!crtc_state->clock_set &&
- !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
- refclk, NULL, &crtc_state->dpll)) {
- drm_err(&dev_priv->drm,
- "Couldn't find PLL settings for mode!\n");
- return -EINVAL;
- }
-
- i9xx_compute_dpll(crtc, crtc_state, NULL);
-
- return 0;
-}
-
-static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- const struct intel_limit *limit;
- int refclk = 96000;
-
- memset(&crtc_state->dpll_hw_state, 0,
- sizeof(crtc_state->dpll_hw_state));
-
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
- if (intel_panel_use_ssc(dev_priv)) {
- refclk = dev_priv->vbt.lvds_ssc_freq;
- drm_dbg_kms(&dev_priv->drm,
- "using SSC reference clock of %d kHz\n",
- refclk);
- }
-
- limit = &intel_limits_i9xx_lvds;
- } else {
- limit = &intel_limits_i9xx_sdvo;
- }
-
- if (!crtc_state->clock_set &&
- !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
- refclk, NULL, &crtc_state->dpll)) {
- drm_err(&dev_priv->drm,
- "Couldn't find PLL settings for mode!\n");
- return -EINVAL;
- }
-
- i9xx_compute_dpll(crtc, crtc_state, NULL);
-
- return 0;
-}
-
-static int chv_crtc_compute_clock(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
-{
- int refclk = 100000;
- const struct intel_limit *limit = &intel_limits_chv;
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
-
- memset(&crtc_state->dpll_hw_state, 0,
- sizeof(crtc_state->dpll_hw_state));
-
- if (!crtc_state->clock_set &&
- !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
- refclk, NULL, &crtc_state->dpll)) {
- drm_err(&i915->drm, "Couldn't find PLL settings for mode!\n");
- return -EINVAL;
- }
-
- chv_compute_dpll(crtc, crtc_state);
-
- return 0;
-}
-
-static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
-{
- int refclk = 100000;
- const struct intel_limit *limit = &intel_limits_vlv;
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
-
- memset(&crtc_state->dpll_hw_state, 0,
- sizeof(crtc_state->dpll_hw_state));
-
- if (!crtc_state->clock_set &&
- !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
- refclk, NULL, &crtc_state->dpll)) {
- drm_err(&i915->drm, "Couldn't find PLL settings for mode!\n");
- return -EINVAL;
- }
-
- vlv_compute_dpll(crtc, crtc_state);
-
- return 0;
-}
-
static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
{
if (IS_I830(dev_priv))
@@ -10316,7 +8044,7 @@ static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
- val |= PIPECONF_FRAME_START_DELAY(0);
+ val |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
intel_de_write(dev_priv, PIPECONF(pipe), val);
intel_de_posting_read(dev_priv, PIPECONF(pipe));
@@ -10424,172 +8152,6 @@ int ilk_get_lanes_required(int target_clock, int link_bw, int bpp)
return DIV_ROUND_UP(bps, link_bw * 8);
}
-static bool ilk_needs_fb_cb_tune(struct dpll *dpll, int factor)
-{
- return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
-}
-
-static void ilk_compute_dpll(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct dpll *reduced_clock)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 dpll, fp, fp2;
- int factor;
-
- /* Enable autotuning of the PLL clock (if permissible) */
- factor = 21;
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
- if ((intel_panel_use_ssc(dev_priv) &&
- dev_priv->vbt.lvds_ssc_freq == 100000) ||
- (HAS_PCH_IBX(dev_priv) &&
- intel_is_dual_link_lvds(dev_priv)))
- factor = 25;
- } else if (crtc_state->sdvo_tv_clock) {
- factor = 20;
- }
-
- fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
-
- if (ilk_needs_fb_cb_tune(&crtc_state->dpll, factor))
- fp |= FP_CB_TUNE;
-
- if (reduced_clock) {
- fp2 = i9xx_dpll_compute_fp(reduced_clock);
-
- if (reduced_clock->m < factor * reduced_clock->n)
- fp2 |= FP_CB_TUNE;
- } else {
- fp2 = fp;
- }
-
- dpll = 0;
-
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
- dpll |= DPLLB_MODE_LVDS;
- else
- dpll |= DPLLB_MODE_DAC_SERIAL;
-
- dpll |= (crtc_state->pixel_multiplier - 1)
- << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
-
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
- intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- dpll |= DPLL_SDVO_HIGH_SPEED;
-
- if (intel_crtc_has_dp_encoder(crtc_state))
- dpll |= DPLL_SDVO_HIGH_SPEED;
-
- /*
- * The high speed IO clock is only really required for
- * SDVO/HDMI/DP, but we also enable it for CRT to make it
- * possible to share the DPLL between CRT and HDMI. Enabling
- * the clock needlessly does no real harm, except use up a
- * bit of power potentially.
- *
- * We'll limit this to IVB with 3 pipes, since it has only two
- * DPLLs and so DPLL sharing is the only way to get three pipes
- * driving PCH ports at the same time. On SNB we could do this,
- * and potentially avoid enabling the second DPLL, but it's not
- * clear if it''s a win or loss power wise. No point in doing
- * this on ILK at all since it has a fixed DPLL<->pipe mapping.
- */
- if (INTEL_NUM_PIPES(dev_priv) == 3 &&
- intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
- dpll |= DPLL_SDVO_HIGH_SPEED;
-
- /* compute bitmask from p1 value */
- dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
- /* also FPA1 */
- dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
-
- switch (crtc_state->dpll.p2) {
- case 5:
- dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
- break;
- case 7:
- dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
- break;
- case 10:
- dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
- break;
- case 14:
- dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
- break;
- }
-
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
- intel_panel_use_ssc(dev_priv))
- dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
- else
- dpll |= PLL_REF_INPUT_DREFCLK;
-
- dpll |= DPLL_VCO_ENABLE;
-
- crtc_state->dpll_hw_state.dpll = dpll;
- crtc_state->dpll_hw_state.fp0 = fp;
- crtc_state->dpll_hw_state.fp1 = fp2;
-}
-
-static int ilk_crtc_compute_clock(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_atomic_state *state =
- to_intel_atomic_state(crtc_state->uapi.state);
- const struct intel_limit *limit;
- int refclk = 120000;
-
- memset(&crtc_state->dpll_hw_state, 0,
- sizeof(crtc_state->dpll_hw_state));
-
- /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
- if (!crtc_state->has_pch_encoder)
- return 0;
-
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
- if (intel_panel_use_ssc(dev_priv)) {
- drm_dbg_kms(&dev_priv->drm,
- "using SSC reference clock of %d kHz\n",
- dev_priv->vbt.lvds_ssc_freq);
- refclk = dev_priv->vbt.lvds_ssc_freq;
- }
-
- if (intel_is_dual_link_lvds(dev_priv)) {
- if (refclk == 100000)
- limit = &ilk_limits_dual_lvds_100m;
- else
- limit = &ilk_limits_dual_lvds;
- } else {
- if (refclk == 100000)
- limit = &ilk_limits_single_lvds_100m;
- else
- limit = &ilk_limits_single_lvds;
- }
- } else {
- limit = &ilk_limits_dac;
- }
-
- if (!crtc_state->clock_set &&
- !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
- refclk, NULL, &crtc_state->dpll)) {
- drm_err(&dev_priv->drm,
- "Couldn't find PLL settings for mode!\n");
- return -EINVAL;
- }
-
- ilk_compute_dpll(crtc, crtc_state, NULL);
-
- if (!intel_reserve_shared_dplls(state, crtc, NULL)) {
- drm_dbg_kms(&dev_priv->drm,
- "failed to find PLL for pipe %c\n",
- pipe_name(crtc->pipe));
- return -EINVAL;
- }
-
- return 0;
-}
-
static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
struct intel_link_m_n *m_n)
{
@@ -11002,29 +8564,6 @@ out:
return ret;
}
-static int hsw_crtc_compute_clock(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_atomic_state *state =
- to_intel_atomic_state(crtc_state->uapi.state);
-
- if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
- INTEL_GEN(dev_priv) >= 11) {
- struct intel_encoder *encoder =
- intel_get_crtc_new_encoder(state, crtc_state);
-
- if (!intel_reserve_shared_dplls(state, crtc, encoder)) {
- drm_dbg_kms(&dev_priv->drm,
- "failed to find PLL for pipe %c\n",
- pipe_name(crtc->pipe));
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
static void dg1_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
struct intel_crtc_state *pipe_config)
{
@@ -11226,16 +8765,13 @@ static void hsw_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config,
- u64 *power_domain_mask,
- intel_wakeref_t *wakerefs)
+ struct intel_display_power_domain_set *power_domain_set)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- enum intel_display_power_domain power_domain;
unsigned long panel_transcoder_mask = BIT(TRANSCODER_EDP);
unsigned long enabled_panel_transcoders = 0;
enum transcoder panel_transcoder;
- intel_wakeref_t wf;
u32 tmp;
if (INTEL_GEN(dev_priv) >= 11)
@@ -11306,16 +8842,10 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
drm_WARN_ON(dev, (enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
enabled_panel_transcoders != BIT(TRANSCODER_EDP));
- power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
- drm_WARN_ON(dev, *power_domain_mask & BIT_ULL(power_domain));
-
- wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
- if (!wf)
+ if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
+ POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
return false;
- wakerefs[power_domain] = wf;
- *power_domain_mask |= BIT_ULL(power_domain);
-
tmp = intel_de_read(dev_priv, PIPECONF(pipe_config->cpu_transcoder));
return tmp & PIPECONF_ENABLE;
@@ -11323,14 +8853,11 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config,
- u64 *power_domain_mask,
- intel_wakeref_t *wakerefs)
+ struct intel_display_power_domain_set *power_domain_set)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- enum intel_display_power_domain power_domain;
enum transcoder cpu_transcoder;
- intel_wakeref_t wf;
enum port port;
u32 tmp;
@@ -11340,16 +8867,10 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
else
cpu_transcoder = TRANSCODER_DSI_C;
- power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
- drm_WARN_ON(dev, *power_domain_mask & BIT_ULL(power_domain));
-
- wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
- if (!wf)
+ if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
+ POWER_DOMAIN_TRANSCODER(cpu_transcoder)))
continue;
- wakerefs[power_domain] = wf;
- *power_domain_mask |= BIT_ULL(power_domain);
-
/*
* The PLL needs to be enabled with a valid divider
* configuration, otherwise accessing DSI registers will hang
@@ -11432,30 +8953,20 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- intel_wakeref_t wakerefs[POWER_DOMAIN_NUM], wf;
- enum intel_display_power_domain power_domain;
- u64 power_domain_mask;
+ struct intel_display_power_domain_set power_domain_set = { };
bool active;
u32 tmp;
- pipe_config->master_transcoder = INVALID_TRANSCODER;
-
- power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
- wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
- if (!wf)
+ if (!intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
+ POWER_DOMAIN_PIPE(crtc->pipe)))
return false;
- wakerefs[power_domain] = wf;
- power_domain_mask = BIT_ULL(power_domain);
-
pipe_config->shared_dpll = NULL;
- active = hsw_get_transcoder_state(crtc, pipe_config,
- &power_domain_mask, wakerefs);
+ active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_set);
if (IS_GEN9_LP(dev_priv) &&
- bxt_get_dsi_transcoder_state(crtc, pipe_config,
- &power_domain_mask, wakerefs)) {
+ bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_set)) {
drm_WARN_ON(&dev_priv->drm, active);
active = true;
}
@@ -11478,6 +8989,9 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
intel_get_transcoder_timings(crtc, pipe_config);
}
+ if (HAS_VRR(dev_priv) && !transcoder_is_dsi(pipe_config->cpu_transcoder))
+ intel_vrr_get_config(crtc, pipe_config);
+
intel_get_pipe_src_size(crtc, pipe_config);
if (IS_HASWELL(dev_priv)) {
@@ -11519,14 +9033,8 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
pipe_config->ips_linetime =
REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp);
- power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
- drm_WARN_ON(&dev_priv->drm, power_domain_mask & BIT_ULL(power_domain));
-
- wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
- if (wf) {
- wakerefs[power_domain] = wf;
- power_domain_mask |= BIT_ULL(power_domain);
-
+ if (intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
+ POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe))) {
if (INTEL_GEN(dev_priv) >= 9)
skl_get_pfit_config(pipe_config);
else
@@ -11560,9 +9068,7 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
}
out:
- for_each_power_domain(power_domain, power_domain_mask)
- intel_display_power_put(dev_priv,
- power_domain, wakerefs[power_domain]);
+ intel_display_power_put_all_in_set(dev_priv, &power_domain_set);
return active;
}
@@ -11582,569 +9088,6 @@ static bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state)
return true;
}
-static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
-{
- struct drm_i915_private *dev_priv =
- to_i915(plane_state->uapi.plane->dev);
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- u32 base;
-
- if (INTEL_INFO(dev_priv)->display.cursor_needs_physical)
- base = sg_dma_address(obj->mm.pages->sgl);
- else
- base = intel_plane_ggtt_offset(plane_state);
-
- return base + plane_state->color_plane[0].offset;
-}
-
-static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
-{
- int x = plane_state->uapi.dst.x1;
- int y = plane_state->uapi.dst.y1;
- u32 pos = 0;
-
- if (x < 0) {
- pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
- x = -x;
- }
- pos |= x << CURSOR_X_SHIFT;
-
- if (y < 0) {
- pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
- y = -y;
- }
- pos |= y << CURSOR_Y_SHIFT;
-
- return pos;
-}
-
-static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
-{
- const struct drm_mode_config *config =
- &plane_state->uapi.plane->dev->mode_config;
- int width = drm_rect_width(&plane_state->uapi.dst);
- int height = drm_rect_height(&plane_state->uapi.dst);
-
- return width > 0 && width <= config->cursor_width &&
- height > 0 && height <= config->cursor_height;
-}
-
-static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
-{
- struct drm_i915_private *dev_priv =
- to_i915(plane_state->uapi.plane->dev);
- unsigned int rotation = plane_state->hw.rotation;
- int src_x, src_y;
- u32 offset;
- int ret;
-
- ret = intel_plane_compute_gtt(plane_state);
- if (ret)
- return ret;
-
- if (!plane_state->uapi.visible)
- return 0;
-
- src_x = plane_state->uapi.src.x1 >> 16;
- src_y = plane_state->uapi.src.y1 >> 16;
-
- intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
- offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
- plane_state, 0);
-
- if (src_x != 0 || src_y != 0) {
- drm_dbg_kms(&dev_priv->drm,
- "Arbitrary cursor panning not supported\n");
- return -EINVAL;
- }
-
- /*
- * Put the final coordinates back so that the src
- * coordinate checks will see the right values.
- */
- drm_rect_translate_to(&plane_state->uapi.src,
- src_x << 16, src_y << 16);
-
- /* ILK+ do this automagically in hardware */
- if (HAS_GMCH(dev_priv) && rotation & DRM_MODE_ROTATE_180) {
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
- int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
-
- offset += (src_h * src_w - 1) * fb->format->cpp[0];
- }
-
- plane_state->color_plane[0].offset = offset;
- plane_state->color_plane[0].x = src_x;
- plane_state->color_plane[0].y = src_y;
-
- return 0;
-}
-
-static int intel_check_cursor(struct intel_crtc_state *crtc_state,
- struct intel_plane_state *plane_state)
-{
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
- const struct drm_rect src = plane_state->uapi.src;
- const struct drm_rect dst = plane_state->uapi.dst;
- int ret;
-
- if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) {
- drm_dbg_kms(&i915->drm, "cursor cannot be tiled\n");
- return -EINVAL;
- }
-
- ret = intel_atomic_plane_check_clipping(plane_state, crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
- true);
- if (ret)
- return ret;
-
- /* Use the unclipped src/dst rectangles, which we program to hw */
- plane_state->uapi.src = src;
- plane_state->uapi.dst = dst;
-
- ret = intel_cursor_check_surface(plane_state);
- if (ret)
- return ret;
-
- if (!plane_state->uapi.visible)
- return 0;
-
- ret = intel_plane_check_src_coordinates(plane_state);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static unsigned int
-i845_cursor_max_stride(struct intel_plane *plane,
- u32 pixel_format, u64 modifier,
- unsigned int rotation)
-{
- return 2048;
-}
-
-static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
-{
- u32 cntl = 0;
-
- if (crtc_state->gamma_enable)
- cntl |= CURSOR_GAMMA_ENABLE;
-
- return cntl;
-}
-
-static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- return CURSOR_ENABLE |
- CURSOR_FORMAT_ARGB |
- CURSOR_STRIDE(plane_state->color_plane[0].stride);
-}
-
-static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
-{
- int width = drm_rect_width(&plane_state->uapi.dst);
-
- /*
- * 845g/865g are only limited by the width of their cursors,
- * the height is arbitrary up to the precision of the register.
- */
- return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64);
-}
-
-static int i845_check_cursor(struct intel_crtc_state *crtc_state,
- struct intel_plane_state *plane_state)
-{
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
- int ret;
-
- ret = intel_check_cursor(crtc_state, plane_state);
- if (ret)
- return ret;
-
- /* if we want to turn off the cursor ignore width and height */
- if (!fb)
- return 0;
-
- /* Check for which cursor types we support */
- if (!i845_cursor_size_ok(plane_state)) {
- drm_dbg_kms(&i915->drm,
- "Cursor dimension %dx%d not supported\n",
- drm_rect_width(&plane_state->uapi.dst),
- drm_rect_height(&plane_state->uapi.dst));
- return -EINVAL;
- }
-
- drm_WARN_ON(&i915->drm, plane_state->uapi.visible &&
- plane_state->color_plane[0].stride != fb->pitches[0]);
-
- switch (fb->pitches[0]) {
- case 256:
- case 512:
- case 1024:
- case 2048:
- break;
- default:
- drm_dbg_kms(&i915->drm, "Invalid cursor stride (%u)\n",
- fb->pitches[0]);
- return -EINVAL;
- }
-
- plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state);
-
- return 0;
-}
-
-static void i845_update_cursor(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- u32 cntl = 0, base = 0, pos = 0, size = 0;
- unsigned long irqflags;
-
- if (plane_state && plane_state->uapi.visible) {
- unsigned int width = drm_rect_width(&plane_state->uapi.dst);
- unsigned int height = drm_rect_height(&plane_state->uapi.dst);
-
- cntl = plane_state->ctl |
- i845_cursor_ctl_crtc(crtc_state);
-
- size = (height << 12) | width;
-
- base = intel_cursor_base(plane_state);
- pos = intel_cursor_position(plane_state);
- }
-
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
- /* On these chipsets we can only modify the base/size/stride
- * whilst the cursor is disabled.
- */
- if (plane->cursor.base != base ||
- plane->cursor.size != size ||
- plane->cursor.cntl != cntl) {
- intel_de_write_fw(dev_priv, CURCNTR(PIPE_A), 0);
- intel_de_write_fw(dev_priv, CURBASE(PIPE_A), base);
- intel_de_write_fw(dev_priv, CURSIZE, size);
- intel_de_write_fw(dev_priv, CURPOS(PIPE_A), pos);
- intel_de_write_fw(dev_priv, CURCNTR(PIPE_A), cntl);
-
- plane->cursor.base = base;
- plane->cursor.size = size;
- plane->cursor.cntl = cntl;
- } else {
- intel_de_write_fw(dev_priv, CURPOS(PIPE_A), pos);
- }
-
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-}
-
-static void i845_disable_cursor(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state)
-{
- i845_update_cursor(plane, crtc_state, NULL);
-}
-
-static bool i845_cursor_get_hw_state(struct intel_plane *plane,
- enum pipe *pipe)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- enum intel_display_power_domain power_domain;
- intel_wakeref_t wakeref;
- bool ret;
-
- power_domain = POWER_DOMAIN_PIPE(PIPE_A);
- wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
- if (!wakeref)
- return false;
-
- ret = intel_de_read(dev_priv, CURCNTR(PIPE_A)) & CURSOR_ENABLE;
-
- *pipe = PIPE_A;
-
- intel_display_power_put(dev_priv, power_domain, wakeref);
-
- return ret;
-}
-
-static unsigned int
-i9xx_cursor_max_stride(struct intel_plane *plane,
- u32 pixel_format, u64 modifier,
- unsigned int rotation)
-{
- return plane->base.dev->mode_config.cursor_width * 4;
-}
-
-static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 cntl = 0;
-
- if (INTEL_GEN(dev_priv) >= 11)
- return cntl;
-
- if (crtc_state->gamma_enable)
- cntl = MCURSOR_GAMMA_ENABLE;
-
- if (crtc_state->csc_enable)
- cntl |= MCURSOR_PIPE_CSC_ENABLE;
-
- if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
- cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
-
- return cntl;
-}
-
-static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- struct drm_i915_private *dev_priv =
- to_i915(plane_state->uapi.plane->dev);
- u32 cntl = 0;
-
- if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
- cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
-
- switch (drm_rect_width(&plane_state->uapi.dst)) {
- case 64:
- cntl |= MCURSOR_MODE_64_ARGB_AX;
- break;
- case 128:
- cntl |= MCURSOR_MODE_128_ARGB_AX;
- break;
- case 256:
- cntl |= MCURSOR_MODE_256_ARGB_AX;
- break;
- default:
- MISSING_CASE(drm_rect_width(&plane_state->uapi.dst));
- return 0;
- }
-
- if (plane_state->hw.rotation & DRM_MODE_ROTATE_180)
- cntl |= MCURSOR_ROTATE_180;
-
- return cntl;
-}
-
-static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
-{
- struct drm_i915_private *dev_priv =
- to_i915(plane_state->uapi.plane->dev);
- int width = drm_rect_width(&plane_state->uapi.dst);
- int height = drm_rect_height(&plane_state->uapi.dst);
-
- if (!intel_cursor_size_ok(plane_state))
- return false;
-
- /* Cursor width is limited to a few power-of-two sizes */
- switch (width) {
- case 256:
- case 128:
- case 64:
- break;
- default:
- return false;
- }
-
- /*
- * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor
- * height from 8 lines up to the cursor width, when the
- * cursor is not rotated. Everything else requires square
- * cursors.
- */
- if (HAS_CUR_FBC(dev_priv) &&
- plane_state->hw.rotation & DRM_MODE_ROTATE_0) {
- if (height < 8 || height > width)
- return false;
- } else {
- if (height != width)
- return false;
- }
-
- return true;
-}
-
-static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
- struct intel_plane_state *plane_state)
-{
- struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- enum pipe pipe = plane->pipe;
- int ret;
-
- ret = intel_check_cursor(crtc_state, plane_state);
- if (ret)
- return ret;
-
- /* if we want to turn off the cursor ignore width and height */
- if (!fb)
- return 0;
-
- /* Check for which cursor types we support */
- if (!i9xx_cursor_size_ok(plane_state)) {
- drm_dbg(&dev_priv->drm,
- "Cursor dimension %dx%d not supported\n",
- drm_rect_width(&plane_state->uapi.dst),
- drm_rect_height(&plane_state->uapi.dst));
- return -EINVAL;
- }
-
- drm_WARN_ON(&dev_priv->drm, plane_state->uapi.visible &&
- plane_state->color_plane[0].stride != fb->pitches[0]);
-
- if (fb->pitches[0] !=
- drm_rect_width(&plane_state->uapi.dst) * fb->format->cpp[0]) {
- drm_dbg_kms(&dev_priv->drm,
- "Invalid cursor stride (%u) (cursor width %d)\n",
- fb->pitches[0],
- drm_rect_width(&plane_state->uapi.dst));
- return -EINVAL;
- }
-
- /*
- * There's something wrong with the cursor on CHV pipe C.
- * If it straddles the left edge of the screen then
- * moving it away from the edge or disabling it often
- * results in a pipe underrun, and often that can lead to
- * dead pipe (constant underrun reported, and it scans
- * out just a solid color). To recover from that, the
- * display power well must be turned off and on again.
- * Refuse the put the cursor into that compromised position.
- */
- if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
- plane_state->uapi.visible && plane_state->uapi.dst.x1 < 0) {
- drm_dbg_kms(&dev_priv->drm,
- "CHV cursor C not allowed to straddle the left screen edge\n");
- return -EINVAL;
- }
-
- plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state);
-
- return 0;
-}
-
-static void i9xx_update_cursor(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- enum pipe pipe = plane->pipe;
- u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0;
- unsigned long irqflags;
-
- if (plane_state && plane_state->uapi.visible) {
- unsigned width = drm_rect_width(&plane_state->uapi.dst);
- unsigned height = drm_rect_height(&plane_state->uapi.dst);
-
- cntl = plane_state->ctl |
- i9xx_cursor_ctl_crtc(crtc_state);
-
- if (width != height)
- fbc_ctl = CUR_FBC_CTL_EN | (height - 1);
-
- base = intel_cursor_base(plane_state);
- pos = intel_cursor_position(plane_state);
- }
-
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
- /*
- * On some platforms writing CURCNTR first will also
- * cause CURPOS to be armed by the CURBASE write.
- * Without the CURCNTR write the CURPOS write would
- * arm itself. Thus we always update CURCNTR before
- * CURPOS.
- *
- * On other platforms CURPOS always requires the
- * CURBASE write to arm the update. Additonally
- * a write to any of the cursor register will cancel
- * an already armed cursor update. Thus leaving out
- * the CURBASE write after CURPOS could lead to a
- * cursor that doesn't appear to move, or even change
- * shape. Thus we always write CURBASE.
- *
- * The other registers are armed by by the CURBASE write
- * except when the plane is getting enabled at which time
- * the CURCNTR write arms the update.
- */
-
- if (INTEL_GEN(dev_priv) >= 9)
- skl_write_cursor_wm(plane, crtc_state);
-
- if (!needs_modeset(crtc_state))
- intel_psr2_program_plane_sel_fetch(plane, crtc_state, plane_state, 0);
-
- if (plane->cursor.base != base ||
- plane->cursor.size != fbc_ctl ||
- plane->cursor.cntl != cntl) {
- if (HAS_CUR_FBC(dev_priv))
- intel_de_write_fw(dev_priv, CUR_FBC_CTL(pipe),
- fbc_ctl);
- intel_de_write_fw(dev_priv, CURCNTR(pipe), cntl);
- intel_de_write_fw(dev_priv, CURPOS(pipe), pos);
- intel_de_write_fw(dev_priv, CURBASE(pipe), base);
-
- plane->cursor.base = base;
- plane->cursor.size = fbc_ctl;
- plane->cursor.cntl = cntl;
- } else {
- intel_de_write_fw(dev_priv, CURPOS(pipe), pos);
- intel_de_write_fw(dev_priv, CURBASE(pipe), base);
- }
-
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-}
-
-static void i9xx_disable_cursor(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state)
-{
- i9xx_update_cursor(plane, crtc_state, NULL);
-}
-
-static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
- enum pipe *pipe)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- enum intel_display_power_domain power_domain;
- intel_wakeref_t wakeref;
- bool ret;
- u32 val;
-
- /*
- * Not 100% correct for planes that can move between pipes,
- * but that's only the case for gen2-3 which don't have any
- * display power wells.
- */
- power_domain = POWER_DOMAIN_PIPE(plane->pipe);
- wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
- if (!wakeref)
- return false;
-
- val = intel_de_read(dev_priv, CURCNTR(plane->pipe));
-
- ret = val & MCURSOR_MODE;
-
- if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
- *pipe = plane->pipe;
- else
- *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >>
- MCURSOR_PIPE_SELECT_SHIFT;
-
- intel_display_power_put(dev_priv, power_domain, wakeref);
-
- return ret;
-}
-
/* VESA 640x480x72Hz mode to set on the pipe */
static const struct drm_display_mode load_detect_mode = {
DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
@@ -12527,33 +9470,6 @@ static void ilk_pch_clock_get(struct intel_crtc *crtc,
&pipe_config->fdi_m_n);
}
-static void intel_crtc_state_reset(struct intel_crtc_state *crtc_state,
- struct intel_crtc *crtc)
-{
- memset(crtc_state, 0, sizeof(*crtc_state));
-
- __drm_atomic_helper_crtc_state_reset(&crtc_state->uapi, &crtc->base);
-
- crtc_state->cpu_transcoder = INVALID_TRANSCODER;
- crtc_state->master_transcoder = INVALID_TRANSCODER;
- crtc_state->hsw_workaround_pipe = INVALID_PIPE;
- crtc_state->output_format = INTEL_OUTPUT_FORMAT_INVALID;
- crtc_state->scaler_state.scaler_id = -1;
- crtc_state->mst_master_transcoder = INVALID_TRANSCODER;
-}
-
-static struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc)
-{
- struct intel_crtc_state *crtc_state;
-
- crtc_state = kmalloc(sizeof(*crtc_state), GFP_KERNEL);
-
- if (crtc_state)
- intel_crtc_state_reset(crtc_state, crtc);
-
- return crtc_state;
-}
-
/* Returns the currently programmed mode of the given encoder. */
struct drm_display_mode *
intel_encoder_current_mode(struct intel_encoder *encoder)
@@ -12594,14 +9510,6 @@ intel_encoder_current_mode(struct intel_encoder *encoder)
return mode;
}
-static void intel_crtc_destroy(struct drm_crtc *crtc)
-{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
- drm_crtc_cleanup(crtc);
- kfree(intel_crtc);
-}
-
/**
* intel_wm_need_update - Check whether watermarks need updating
* @cur: current plane state
@@ -12651,7 +9559,7 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- bool mode_changed = needs_modeset(crtc_state);
+ bool mode_changed = intel_crtc_needs_modeset(crtc_state);
bool was_crtc_enabled = old_crtc_state->hw.active;
bool is_crtc_enabled = crtc_state->hw.active;
bool turn_off, turn_on, visible, was_visible;
@@ -12842,6 +9750,7 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
plane_state->planar_linked_plane = NULL;
if (plane_state->planar_slave && !plane_state->uapi.visible) {
+ crtc_state->enabled_planes &= ~BIT(plane->id);
crtc_state->active_planes &= ~BIT(plane->id);
crtc_state->update_planes |= BIT(plane->id);
}
@@ -12885,6 +9794,7 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
linked_state->planar_slave = true;
linked_state->planar_linked_plane = plane;
+ crtc_state->enabled_planes |= BIT(linked->id);
crtc_state->active_planes |= BIT(linked->id);
crtc_state->update_planes |= BIT(linked->id);
drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n",
@@ -13013,7 +9923,7 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- bool mode_changed = needs_modeset(crtc_state);
+ bool mode_changed = intel_crtc_needs_modeset(crtc_state);
int ret;
if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) &&
@@ -13301,7 +10211,6 @@ static void snprintf_output_types(char *buf, size_t len,
}
static const char * const output_format_str[] = {
- [INTEL_OUTPUT_FORMAT_INVALID] = "Invalid",
[INTEL_OUTPUT_FORMAT_RGB] = "RGB",
[INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
[INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
@@ -13310,7 +10219,7 @@ static const char * const output_format_str[] = {
static const char *output_formats(enum intel_output_format format)
{
if (format >= ARRAY_SIZE(output_format_str))
- format = INTEL_OUTPUT_FORMAT_INVALID;
+ return "invalid";
return output_format_str[format];
}
@@ -13428,6 +10337,13 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
intel_hdmi_infoframe_enable(DP_SDP_VSC))
intel_dump_dp_vsc_sdp(dev_priv, &pipe_config->infoframes.vsc);
+ drm_dbg_kms(&dev_priv->drm, "vrr: %s, vmin: %d, vmax: %d, pipeline full: %d, flipline: %d, vmin vblank: %d, vmax vblank: %d\n",
+ yesno(pipe_config->vrr.enable),
+ pipe_config->vrr.vmin, pipe_config->vrr.vmax,
+ pipe_config->vrr.pipeline_full, pipe_config->vrr.flipline,
+ intel_vrr_vmin_vblank_start(pipe_config),
+ intel_vrr_vmax_vblank_start(pipe_config));
+
drm_dbg_kms(&dev_priv->drm, "requested mode:\n");
drm_mode_debug_printmodeline(&pipe_config->hw.mode);
drm_dbg_kms(&dev_priv->drm, "adjusted mode:\n");
@@ -13815,7 +10731,7 @@ encoder_retry:
return ret;
}
- if (ret == RETRY) {
+ if (ret == I915_DISPLAY_CONFIG_RETRY) {
if (drm_WARN(&i915->drm, !retry,
"loop in pipe configuration computation\n"))
return -EINVAL;
@@ -14421,6 +11337,12 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(mst_master_transcoder);
+ PIPE_CONF_CHECK_BOOL(vrr.enable);
+ PIPE_CONF_CHECK_I(vrr.vmin);
+ PIPE_CONF_CHECK_I(vrr.vmax);
+ PIPE_CONF_CHECK_I(vrr.flipline);
+ PIPE_CONF_CHECK_I(vrr.pipeline_full);
+
#undef PIPE_CONF_CHECK_X
#undef PIPE_CONF_CHECK_I
#undef PIPE_CONF_CHECK_BOOL
@@ -14845,7 +11767,7 @@ intel_modeset_verify_crtc(struct intel_crtc *crtc,
struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *new_crtc_state)
{
- if (!needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
+ if (!intel_crtc_needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
return;
verify_wm_state(crtc, new_crtc_state);
@@ -14879,10 +11801,17 @@ intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- const struct drm_display_mode *adjusted_mode =
- &crtc_state->hw.adjusted_mode;
+ struct drm_display_mode adjusted_mode =
+ crtc_state->hw.adjusted_mode;
+
+ if (crtc_state->vrr.enable) {
+ adjusted_mode.crtc_vtotal = crtc_state->vrr.vmax;
+ adjusted_mode.crtc_vblank_end = crtc_state->vrr.vmax;
+ adjusted_mode.crtc_vblank_start = intel_vrr_vmin_vblank_start(crtc_state);
+ crtc->vmax_vblank_start = intel_vrr_vmax_vblank_start(crtc_state);
+ }
- drm_calc_timestamping_constants(&crtc->base, adjusted_mode);
+ drm_calc_timestamping_constants(&crtc->base, &adjusted_mode);
crtc->mode_flags = crtc_state->mode_flags;
@@ -14916,8 +11845,8 @@ intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
if (IS_GEN(dev_priv, 2)) {
int vtotal;
- vtotal = adjusted_mode->crtc_vtotal;
- if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+ vtotal = adjusted_mode.crtc_vtotal;
+ if (adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
vtotal /= 2;
crtc->scanline_offset = vtotal - 1;
@@ -14940,7 +11869,7 @@ static void intel_modeset_clear_plls(struct intel_atomic_state *state)
return;
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
- if (!needs_modeset(new_crtc_state))
+ if (!intel_crtc_needs_modeset(new_crtc_state))
continue;
intel_release_shared_dplls(state, crtc);
@@ -14965,7 +11894,7 @@ static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
/* look at all crtc's that are going to be enabled in during modeset */
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
if (!crtc_state->hw.active ||
- !needs_modeset(crtc_state))
+ !intel_crtc_needs_modeset(crtc_state))
continue;
if (first_crtc_state) {
@@ -14990,7 +11919,7 @@ static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
crtc_state->hsw_workaround_pipe = INVALID_PIPE;
if (!crtc_state->hw.active ||
- needs_modeset(crtc_state))
+ intel_crtc_needs_modeset(crtc_state))
continue;
/* 2 or more enabled crtcs means no need for w/a */
@@ -15102,6 +12031,19 @@ static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
return 0;
}
+int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ return intel_crtc_add_planes_to_state(state, crtc,
+ old_crtc_state->enabled_planes |
+ new_crtc_state->enabled_planes);
+}
+
static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
{
/* See {hsw,vlv,ivb}_plane_ratio() */
@@ -15296,7 +12238,7 @@ static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state,
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
if (new_crtc_state->hw.enable &&
transcoders & BIT(new_crtc_state->cpu_transcoder) &&
- needs_modeset(new_crtc_state))
+ intel_crtc_needs_modeset(new_crtc_state))
return true;
}
@@ -15317,7 +12259,7 @@ static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state,
slave = crtc;
master = old_crtc_state->bigjoiner_linked_crtc;
master_crtc_state = intel_atomic_get_new_crtc_state(state, master);
- if (!master_crtc_state || !needs_modeset(master_crtc_state))
+ if (!master_crtc_state || !intel_crtc_needs_modeset(master_crtc_state))
goto claimed;
}
@@ -15355,21 +12297,16 @@ claimed:
return -EINVAL;
}
-static int kill_bigjoiner_slave(struct intel_atomic_state *state,
- struct intel_crtc_state *master_crtc_state)
+static void kill_bigjoiner_slave(struct intel_atomic_state *state,
+ struct intel_crtc_state *master_crtc_state)
{
struct intel_crtc_state *slave_crtc_state =
- intel_atomic_get_crtc_state(&state->base,
- master_crtc_state->bigjoiner_linked_crtc);
-
- if (IS_ERR(slave_crtc_state))
- return PTR_ERR(slave_crtc_state);
+ intel_atomic_get_new_crtc_state(state, master_crtc_state->bigjoiner_linked_crtc);
slave_crtc_state->bigjoiner = master_crtc_state->bigjoiner = false;
slave_crtc_state->bigjoiner_slave = master_crtc_state->bigjoiner_slave = false;
slave_crtc_state->bigjoiner_linked_crtc = master_crtc_state->bigjoiner_linked_crtc = NULL;
intel_crtc_copy_uapi_to_hw_state(state, slave_crtc_state);
- return 0;
}
/**
@@ -15382,7 +12319,7 @@ static int kill_bigjoiner_slave(struct intel_atomic_state *state,
* Async flip can only change the plane surface address, so anything else
* changing is rejected from the intel_atomic_check_async() function.
* Once this check is cleared, flip done interrupt is enabled using
- * the skl_enable_flip_done() function.
+ * the intel_crtc_enable_flip_done() function.
*
* As soon as the surface address register is written, flip done interrupt is
* generated and the requested events are sent to the usersapce in the interrupt
@@ -15401,7 +12338,7 @@ static int intel_atomic_check_async(struct intel_atomic_state *state)
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
- if (needs_modeset(new_crtc_state)) {
+ if (intel_crtc_needs_modeset(new_crtc_state)) {
drm_dbg_kms(&i915->drm, "Modeset Required. Async flip not supported\n");
return -EINVAL;
}
@@ -15426,7 +12363,7 @@ static int intel_atomic_check_async(struct intel_atomic_state *state)
* this(vlv/chv and icl+) should be added when async flip is
* enabled in the atomic IOCTL path.
*/
- if (plane->id != PLANE_PRIMARY)
+ if (!plane->async_flip)
return -EINVAL;
/*
@@ -15507,20 +12444,43 @@ static int intel_atomic_check_async(struct intel_atomic_state *state)
static int intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state *state)
{
- const struct intel_crtc_state *crtc_state;
+ struct intel_crtc_state *crtc_state;
struct intel_crtc *crtc;
int i;
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
struct intel_crtc_state *linked_crtc_state;
+ struct intel_crtc *linked_crtc;
+ int ret;
if (!crtc_state->bigjoiner)
continue;
- linked_crtc_state = intel_atomic_get_crtc_state(&state->base,
- crtc_state->bigjoiner_linked_crtc);
+ linked_crtc = crtc_state->bigjoiner_linked_crtc;
+ linked_crtc_state = intel_atomic_get_crtc_state(&state->base, linked_crtc);
if (IS_ERR(linked_crtc_state))
return PTR_ERR(linked_crtc_state);
+
+ if (!intel_crtc_needs_modeset(crtc_state))
+ continue;
+
+ linked_crtc_state->uapi.mode_changed = true;
+
+ ret = drm_atomic_add_affected_connectors(&state->base,
+ &linked_crtc->base);
+ if (ret)
+ return ret;
+
+ ret = intel_atomic_add_affected_planes(state, linked_crtc);
+ if (ret)
+ return ret;
+ }
+
+ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+ /* Kill old bigjoiner link, we may re-establish afterwards */
+ if (intel_crtc_needs_modeset(crtc_state) &&
+ crtc_state->bigjoiner && !crtc_state->bigjoiner_slave)
+ kill_bigjoiner_slave(state, crtc_state);
}
return 0;
@@ -15547,6 +12507,8 @@ static int intel_atomic_check(struct drm_device *dev,
new_crtc_state->uapi.mode_changed = true;
}
+ intel_vrr_check_modeset(state);
+
ret = drm_atomic_helper_check_modeset(dev, &state->base);
if (ret)
goto fail;
@@ -15557,20 +12519,13 @@ static int intel_atomic_check(struct drm_device *dev,
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
- if (!needs_modeset(new_crtc_state)) {
+ if (!intel_crtc_needs_modeset(new_crtc_state)) {
/* Light copy */
intel_crtc_copy_uapi_to_hw_state_nomodeset(state, new_crtc_state);
continue;
}
- /* Kill old bigjoiner link, we may re-establish afterwards */
- if (old_crtc_state->bigjoiner && !old_crtc_state->bigjoiner_slave) {
- ret = kill_bigjoiner_slave(state, new_crtc_state);
- if (ret)
- goto fail;
- }
-
if (!new_crtc_state->uapi.enable) {
if (!new_crtc_state->bigjoiner_slave) {
intel_crtc_copy_uapi_to_hw_state(state, new_crtc_state);
@@ -15595,7 +12550,7 @@ static int intel_atomic_check(struct drm_device *dev,
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
- if (!needs_modeset(new_crtc_state))
+ if (!intel_crtc_needs_modeset(new_crtc_state))
continue;
ret = intel_modeset_pipe_config_late(new_crtc_state);
@@ -15617,7 +12572,7 @@ static int intel_atomic_check(struct drm_device *dev,
* forced a full modeset.
*/
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
- if (!new_crtc_state->hw.enable || needs_modeset(new_crtc_state))
+ if (!new_crtc_state->hw.enable || intel_crtc_needs_modeset(new_crtc_state))
continue;
if (intel_dp_mst_is_slave_trans(new_crtc_state)) {
@@ -15640,11 +12595,21 @@ static int intel_atomic_check(struct drm_device *dev,
new_crtc_state->update_pipe = false;
}
}
+
+ if (new_crtc_state->bigjoiner) {
+ struct intel_crtc_state *linked_crtc_state =
+ intel_atomic_get_new_crtc_state(state, new_crtc_state->bigjoiner_linked_crtc);
+
+ if (intel_crtc_needs_modeset(linked_crtc_state)) {
+ new_crtc_state->uapi.mode_changed = true;
+ new_crtc_state->update_pipe = false;
+ }
+ }
}
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
- if (needs_modeset(new_crtc_state)) {
+ if (intel_crtc_needs_modeset(new_crtc_state)) {
any_ms = true;
continue;
}
@@ -15670,20 +12635,6 @@ static int intel_atomic_check(struct drm_device *dev,
if (ret)
goto fail;
- /*
- * distrust_bios_wm will force a full dbuf recomputation
- * but the hardware state will only get updated accordingly
- * if state->modeset==true. Hence distrust_bios_wm==true &&
- * state->modeset==false is an invalid combination which
- * would cause the hardware and software dbuf state to get
- * out of sync. We must prevent that.
- *
- * FIXME clean up this mess and introduce better
- * state tracking for dbuf.
- */
- if (dev_priv->wm.distrust_bios_wm)
- any_ms = true;
-
intel_fbc_choose_crtc(dev_priv, state);
ret = calc_watermark_data(state);
if (ret)
@@ -15721,12 +12672,12 @@ static int intel_atomic_check(struct drm_device *dev,
goto fail;
}
- if (!needs_modeset(new_crtc_state) &&
+ if (!intel_crtc_needs_modeset(new_crtc_state) &&
!new_crtc_state->update_pipe)
continue;
intel_dump_pipe_config(new_crtc_state, state,
- needs_modeset(new_crtc_state) ?
+ intel_crtc_needs_modeset(new_crtc_state) ?
"[modeset]" : "[fastset]");
}
@@ -15758,7 +12709,7 @@ static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
return ret;
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
- bool mode_changed = needs_modeset(crtc_state);
+ bool mode_changed = intel_crtc_needs_modeset(crtc_state);
if (mode_changed || crtc_state->update_pipe ||
crtc_state->uapi.color_mgmt_changed) {
@@ -15769,17 +12720,6 @@ static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
return 0;
}
-u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(&crtc->base)];
-
- if (!vblank->max_vblank_count)
- return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
-
- return crtc->base.funcs->get_vblank_counter(&crtc->base);
-}
-
void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
@@ -15849,7 +12789,7 @@ static void commit_pipe_config(struct intel_atomic_state *state,
intel_atomic_get_old_crtc_state(state, crtc);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- bool modeset = needs_modeset(new_crtc_state);
+ bool modeset = intel_crtc_needs_modeset(new_crtc_state);
/*
* During modesets pipe configuration was programmed as the
@@ -15883,7 +12823,7 @@ static void intel_enable_crtc(struct intel_atomic_state *state,
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- if (!needs_modeset(new_crtc_state))
+ if (!intel_crtc_needs_modeset(new_crtc_state))
return;
intel_crtc_update_active_timings(new_crtc_state);
@@ -15905,7 +12845,7 @@ static void intel_update_crtc(struct intel_atomic_state *state,
intel_atomic_get_old_crtc_state(state, crtc);
struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- bool modeset = needs_modeset(new_crtc_state);
+ bool modeset = intel_crtc_needs_modeset(new_crtc_state);
if (!modeset) {
if (new_crtc_state->preload_luts &&
@@ -15997,7 +12937,7 @@ static void intel_commit_modeset_disables(struct intel_atomic_state *state)
/* Only disable port sync and MST slaves */
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
- if (!needs_modeset(new_crtc_state) || old_crtc_state->bigjoiner)
+ if (!intel_crtc_needs_modeset(new_crtc_state) || old_crtc_state->bigjoiner)
continue;
if (!old_crtc_state->hw.active)
@@ -16021,7 +12961,7 @@ static void intel_commit_modeset_disables(struct intel_atomic_state *state)
/* Disable everything else left on */
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
- if (!needs_modeset(new_crtc_state) ||
+ if (!intel_crtc_needs_modeset(new_crtc_state) ||
(handled & BIT(crtc->pipe)) ||
old_crtc_state->bigjoiner_slave)
continue;
@@ -16071,7 +13011,7 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
continue;
/* ignore allocations for crtc's that have been turned off. */
- if (!needs_modeset(new_crtc_state)) {
+ if (!intel_crtc_needs_modeset(new_crtc_state)) {
entries[pipe] = old_crtc_state->wm.skl.ddb;
update_pipes |= BIT(pipe);
} else {
@@ -16247,6 +13187,43 @@ static void intel_atomic_cleanup_work(struct work_struct *work)
intel_atomic_helper_free_state(i915);
}
+static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_plane *plane;
+ struct intel_plane_state *plane_state;
+ int i;
+
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ struct drm_framebuffer *fb = plane_state->hw.fb;
+ int ret;
+
+ if (!fb ||
+ fb->modifier != I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC)
+ continue;
+
+ /*
+ * The layout of the fast clear color value expected by HW
+ * (the DRM ABI requiring this value to be located in fb at offset 0 of plane#2):
+ * - 4 x 4 bytes per-channel value
+ * (in surface type specific float/int format provided by the fb user)
+ * - 8 bytes native color value used by the display
+ * (converted/written by GPU during a fast clear operation using the
+ * above per-channel values)
+ *
+ * The commit's FB prepare hook already ensured that FB obj is pinned and the
+ * caller made sure that the object is synced wrt. the related color clear value
+ * GPU write on it.
+ */
+ ret = i915_gem_object_read_from_page(intel_fb_obj(fb),
+ fb->offsets[2] + 16,
+ &plane_state->ccval,
+ sizeof(plane_state->ccval));
+ /* The above could only fail if the FB obj has an unexpected backing store type. */
+ drm_WARN_ON(&i915->drm, ret);
+ }
+}
+
static void intel_atomic_commit_tail(struct intel_atomic_state *state)
{
struct drm_device *dev = state->base.dev;
@@ -16264,9 +13241,11 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
if (state->modeset)
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
+ intel_atomic_prepare_plane_clear_colors(state);
+
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
- if (needs_modeset(new_crtc_state) ||
+ if (intel_crtc_needs_modeset(new_crtc_state) ||
new_crtc_state->update_pipe) {
put_domains[crtc->pipe] =
@@ -16292,7 +13271,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
/* Complete the events for pipes that have now been disabled */
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
- bool modeset = needs_modeset(new_crtc_state);
+ bool modeset = intel_crtc_needs_modeset(new_crtc_state);
/* Complete events for now disable pipes here. */
if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) {
@@ -16312,7 +13291,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
if (new_crtc_state->uapi.async_flip)
- skl_enable_flip_done(crtc);
+ intel_crtc_enable_flip_done(state, crtc);
}
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
@@ -16337,10 +13316,10 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
if (new_crtc_state->uapi.async_flip)
- skl_disable_flip_done(crtc);
+ intel_crtc_disable_flip_done(state, crtc);
if (new_crtc_state->hw.active &&
- !needs_modeset(new_crtc_state) &&
+ !intel_crtc_needs_modeset(new_crtc_state) &&
!new_crtc_state->preload_luts &&
(new_crtc_state->uapi.color_mgmt_changed ||
new_crtc_state->update_pipe))
@@ -16376,8 +13355,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
intel_post_plane_update(state, crtc);
- if (put_domains[i])
- modeset_put_power_domains(dev_priv, put_domains[i]);
+ modeset_put_crtc_power_domains(crtc, put_domains[crtc->pipe]);
intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
@@ -16541,7 +13519,6 @@ static int intel_atomic_commit(struct drm_device *dev,
intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
return ret;
}
- dev_priv->wm.distrust_bios_wm = false;
intel_shared_dpll_swap_state(state);
intel_atomic_track_fbs(state);
@@ -16620,7 +13597,7 @@ static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
}
-static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
+int intel_plane_pin_fb(struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
@@ -16650,7 +13627,7 @@ static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
return 0;
}
-static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
+void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
{
struct i915_vma *vma;
@@ -16659,15 +13636,6 @@ static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
intel_unpin_fb_vma(vma, old_plane_state->flags);
}
-static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
-{
- struct i915_sched_attr attr = {
- .priority = I915_USER_PRIORITY(I915_PRIORITY_DISPLAY),
- };
-
- i915_gem_object_wait_priority(obj, 0, &attr);
-}
-
/**
* intel_prepare_plane_fb - Prepare fb for usage on plane
* @_plane: drm plane to prepare for
@@ -16684,6 +13652,9 @@ int
intel_prepare_plane_fb(struct drm_plane *_plane,
struct drm_plane_state *_new_plane_state)
{
+ struct i915_sched_attr attr = {
+ .priority = I915_USER_PRIORITY(I915_PRIORITY_DISPLAY),
+ };
struct intel_plane *plane = to_intel_plane(_plane);
struct intel_plane_state *new_plane_state =
to_intel_plane_state(_new_plane_state);
@@ -16712,7 +13683,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
* This should only fail upon a hung GPU, in which case we
* can safely continue.
*/
- if (needs_modeset(crtc_state)) {
+ if (intel_crtc_needs_modeset(crtc_state)) {
ret = i915_sw_fence_await_reservation(&state->commit_ready,
old_obj->base.resv, NULL,
false, 0,
@@ -16723,6 +13694,8 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
}
if (new_plane_state->uapi.fence) { /* explicit fencing */
+ i915_gem_fence_wait_priority(new_plane_state->uapi.fence,
+ &attr);
ret = i915_sw_fence_await_dma_fence(&state->commit_ready,
new_plane_state->uapi.fence,
i915_fence_timeout(dev_priv),
@@ -16744,7 +13717,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
if (ret)
return ret;
- fb_obj_bump_render_priority(obj);
+ i915_gem_object_wait_priority(obj, 0, &attr);
i915_gem_object_flush_frontbuffer(obj, ORIGIN_DIRTYFB);
if (!new_plane_state->uapi.fence) { /* implicit fencing */
@@ -16833,540 +13806,6 @@ void intel_plane_destroy(struct drm_plane *plane)
kfree(to_intel_plane(plane));
}
-static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane,
- u32 format, u64 modifier)
-{
- switch (modifier) {
- case DRM_FORMAT_MOD_LINEAR:
- case I915_FORMAT_MOD_X_TILED:
- break;
- default:
- return false;
- }
-
- switch (format) {
- case DRM_FORMAT_C8:
- case DRM_FORMAT_RGB565:
- case DRM_FORMAT_XRGB1555:
- case DRM_FORMAT_XRGB8888:
- return modifier == DRM_FORMAT_MOD_LINEAR ||
- modifier == I915_FORMAT_MOD_X_TILED;
- default:
- return false;
- }
-}
-
-static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
- u32 format, u64 modifier)
-{
- switch (modifier) {
- case DRM_FORMAT_MOD_LINEAR:
- case I915_FORMAT_MOD_X_TILED:
- break;
- default:
- return false;
- }
-
- switch (format) {
- case DRM_FORMAT_C8:
- case DRM_FORMAT_RGB565:
- case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_XBGR8888:
- case DRM_FORMAT_ARGB8888:
- case DRM_FORMAT_ABGR8888:
- case DRM_FORMAT_XRGB2101010:
- case DRM_FORMAT_XBGR2101010:
- case DRM_FORMAT_ARGB2101010:
- case DRM_FORMAT_ABGR2101010:
- case DRM_FORMAT_XBGR16161616F:
- return modifier == DRM_FORMAT_MOD_LINEAR ||
- modifier == I915_FORMAT_MOD_X_TILED;
- default:
- return false;
- }
-}
-
-static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
- u32 format, u64 modifier)
-{
- return modifier == DRM_FORMAT_MOD_LINEAR &&
- format == DRM_FORMAT_ARGB8888;
-}
-
-static const struct drm_plane_funcs i965_plane_funcs = {
- .update_plane = drm_atomic_helper_update_plane,
- .disable_plane = drm_atomic_helper_disable_plane,
- .destroy = intel_plane_destroy,
- .atomic_duplicate_state = intel_plane_duplicate_state,
- .atomic_destroy_state = intel_plane_destroy_state,
- .format_mod_supported = i965_plane_format_mod_supported,
-};
-
-static const struct drm_plane_funcs i8xx_plane_funcs = {
- .update_plane = drm_atomic_helper_update_plane,
- .disable_plane = drm_atomic_helper_disable_plane,
- .destroy = intel_plane_destroy,
- .atomic_duplicate_state = intel_plane_duplicate_state,
- .atomic_destroy_state = intel_plane_destroy_state,
- .format_mod_supported = i8xx_plane_format_mod_supported,
-};
-
-static int
-intel_legacy_cursor_update(struct drm_plane *_plane,
- struct drm_crtc *_crtc,
- struct drm_framebuffer *fb,
- int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- u32 src_x, u32 src_y,
- u32 src_w, u32 src_h,
- struct drm_modeset_acquire_ctx *ctx)
-{
- struct intel_plane *plane = to_intel_plane(_plane);
- struct intel_crtc *crtc = to_intel_crtc(_crtc);
- struct intel_plane_state *old_plane_state =
- to_intel_plane_state(plane->base.state);
- struct intel_plane_state *new_plane_state;
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
- struct intel_crtc_state *new_crtc_state;
- int ret;
-
- /*
- * When crtc is inactive or there is a modeset pending,
- * wait for it to complete in the slowpath
- *
- * FIXME bigjoiner fastpath would be good
- */
- if (!crtc_state->hw.active || needs_modeset(crtc_state) ||
- crtc_state->update_pipe || crtc_state->bigjoiner)
- goto slow;
-
- /*
- * Don't do an async update if there is an outstanding commit modifying
- * the plane. This prevents our async update's changes from getting
- * overridden by a previous synchronous update's state.
- */
- if (old_plane_state->uapi.commit &&
- !try_wait_for_completion(&old_plane_state->uapi.commit->hw_done))
- goto slow;
-
- /*
- * If any parameters change that may affect watermarks,
- * take the slowpath. Only changing fb or position should be
- * in the fastpath.
- */
- if (old_plane_state->uapi.crtc != &crtc->base ||
- old_plane_state->uapi.src_w != src_w ||
- old_plane_state->uapi.src_h != src_h ||
- old_plane_state->uapi.crtc_w != crtc_w ||
- old_plane_state->uapi.crtc_h != crtc_h ||
- !old_plane_state->uapi.fb != !fb)
- goto slow;
-
- new_plane_state = to_intel_plane_state(intel_plane_duplicate_state(&plane->base));
- if (!new_plane_state)
- return -ENOMEM;
-
- new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(&crtc->base));
- if (!new_crtc_state) {
- ret = -ENOMEM;
- goto out_free;
- }
-
- drm_atomic_set_fb_for_plane(&new_plane_state->uapi, fb);
-
- new_plane_state->uapi.src_x = src_x;
- new_plane_state->uapi.src_y = src_y;
- new_plane_state->uapi.src_w = src_w;
- new_plane_state->uapi.src_h = src_h;
- new_plane_state->uapi.crtc_x = crtc_x;
- new_plane_state->uapi.crtc_y = crtc_y;
- new_plane_state->uapi.crtc_w = crtc_w;
- new_plane_state->uapi.crtc_h = crtc_h;
-
- intel_plane_copy_uapi_to_hw_state(new_plane_state, new_plane_state, crtc);
-
- ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
- old_plane_state, new_plane_state);
- if (ret)
- goto out_free;
-
- ret = intel_plane_pin_fb(new_plane_state);
- if (ret)
- goto out_free;
-
- intel_frontbuffer_flush(to_intel_frontbuffer(new_plane_state->hw.fb),
- ORIGIN_FLIP);
- intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
- to_intel_frontbuffer(new_plane_state->hw.fb),
- plane->frontbuffer_bit);
-
- /* Swap plane state */
- plane->base.state = &new_plane_state->uapi;
-
- /*
- * We cannot swap crtc_state as it may be in use by an atomic commit or
- * page flip that's running simultaneously. If we swap crtc_state and
- * destroy the old state, we will cause a use-after-free there.
- *
- * Only update active_planes, which is needed for our internal
- * bookkeeping. Either value will do the right thing when updating
- * planes atomically. If the cursor was part of the atomic update then
- * we would have taken the slowpath.
- */
- crtc_state->active_planes = new_crtc_state->active_planes;
-
- if (new_plane_state->uapi.visible)
- intel_update_plane(plane, crtc_state, new_plane_state);
- else
- intel_disable_plane(plane, crtc_state);
-
- intel_plane_unpin_fb(old_plane_state);
-
-out_free:
- if (new_crtc_state)
- intel_crtc_destroy_state(&crtc->base, &new_crtc_state->uapi);
- if (ret)
- intel_plane_destroy_state(&plane->base, &new_plane_state->uapi);
- else
- intel_plane_destroy_state(&plane->base, &old_plane_state->uapi);
- return ret;
-
-slow:
- return drm_atomic_helper_update_plane(&plane->base, &crtc->base, fb,
- crtc_x, crtc_y, crtc_w, crtc_h,
- src_x, src_y, src_w, src_h, ctx);
-}
-
-static const struct drm_plane_funcs intel_cursor_plane_funcs = {
- .update_plane = intel_legacy_cursor_update,
- .disable_plane = drm_atomic_helper_disable_plane,
- .destroy = intel_plane_destroy,
- .atomic_duplicate_state = intel_plane_duplicate_state,
- .atomic_destroy_state = intel_plane_destroy_state,
- .format_mod_supported = intel_cursor_format_mod_supported,
-};
-
-static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
- enum i9xx_plane_id i9xx_plane)
-{
- if (!HAS_FBC(dev_priv))
- return false;
-
- if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
- return i9xx_plane == PLANE_A; /* tied to pipe A */
- else if (IS_IVYBRIDGE(dev_priv))
- return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B ||
- i9xx_plane == PLANE_C;
- else if (INTEL_GEN(dev_priv) >= 4)
- return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B;
- else
- return i9xx_plane == PLANE_A;
-}
-
-static struct intel_plane *
-intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
-{
- struct intel_plane *plane;
- const struct drm_plane_funcs *plane_funcs;
- unsigned int supported_rotations;
- const u32 *formats;
- int num_formats;
- int ret, zpos;
-
- if (INTEL_GEN(dev_priv) >= 9)
- return skl_universal_plane_create(dev_priv, pipe,
- PLANE_PRIMARY);
-
- plane = intel_plane_alloc();
- if (IS_ERR(plane))
- return plane;
-
- plane->pipe = pipe;
- /*
- * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
- * port is hooked to pipe B. Hence we want plane A feeding pipe B.
- */
- if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4 &&
- INTEL_NUM_PIPES(dev_priv) == 2)
- plane->i9xx_plane = (enum i9xx_plane_id) !pipe;
- else
- plane->i9xx_plane = (enum i9xx_plane_id) pipe;
- plane->id = PLANE_PRIMARY;
- plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
-
- plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane);
- if (plane->has_fbc) {
- struct intel_fbc *fbc = &dev_priv->fbc;
-
- fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
- }
-
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- formats = vlv_primary_formats;
- num_formats = ARRAY_SIZE(vlv_primary_formats);
- } else if (INTEL_GEN(dev_priv) >= 4) {
- /*
- * WaFP16GammaEnabling:ivb
- * "Workaround : When using the 64-bit format, the plane
- * output on each color channel has one quarter amplitude.
- * It can be brought up to full amplitude by using pipe
- * gamma correction or pipe color space conversion to
- * multiply the plane output by four."
- *
- * There is no dedicated plane gamma for the primary plane,
- * and using the pipe gamma/csc could conflict with other
- * planes, so we choose not to expose fp16 on IVB primary
- * planes. HSW primary planes no longer have this problem.
- */
- if (IS_IVYBRIDGE(dev_priv)) {
- formats = ivb_primary_formats;
- num_formats = ARRAY_SIZE(ivb_primary_formats);
- } else {
- formats = i965_primary_formats;
- num_formats = ARRAY_SIZE(i965_primary_formats);
- }
- } else {
- formats = i8xx_primary_formats;
- num_formats = ARRAY_SIZE(i8xx_primary_formats);
- }
-
- if (INTEL_GEN(dev_priv) >= 4)
- plane_funcs = &i965_plane_funcs;
- else
- plane_funcs = &i8xx_plane_funcs;
-
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- plane->min_cdclk = vlv_plane_min_cdclk;
- else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
- plane->min_cdclk = hsw_plane_min_cdclk;
- else if (IS_IVYBRIDGE(dev_priv))
- plane->min_cdclk = ivb_plane_min_cdclk;
- else
- plane->min_cdclk = i9xx_plane_min_cdclk;
-
- plane->max_stride = i9xx_plane_max_stride;
- plane->update_plane = i9xx_update_plane;
- plane->disable_plane = i9xx_disable_plane;
- plane->get_hw_state = i9xx_plane_get_hw_state;
- plane->check_plane = i9xx_plane_check;
-
- if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
- ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
- 0, plane_funcs,
- formats, num_formats,
- i9xx_format_modifiers,
- DRM_PLANE_TYPE_PRIMARY,
- "primary %c", pipe_name(pipe));
- else
- ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
- 0, plane_funcs,
- formats, num_formats,
- i9xx_format_modifiers,
- DRM_PLANE_TYPE_PRIMARY,
- "plane %c",
- plane_name(plane->i9xx_plane));
- if (ret)
- goto fail;
-
- if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
- supported_rotations =
- DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
- DRM_MODE_REFLECT_X;
- } else if (INTEL_GEN(dev_priv) >= 4) {
- supported_rotations =
- DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
- } else {
- supported_rotations = DRM_MODE_ROTATE_0;
- }
-
- if (INTEL_GEN(dev_priv) >= 4)
- drm_plane_create_rotation_property(&plane->base,
- DRM_MODE_ROTATE_0,
- supported_rotations);
-
- zpos = 0;
- drm_plane_create_zpos_immutable_property(&plane->base, zpos);
-
- drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
-
- return plane;
-
-fail:
- intel_plane_free(plane);
-
- return ERR_PTR(ret);
-}
-
-static struct intel_plane *
-intel_cursor_plane_create(struct drm_i915_private *dev_priv,
- enum pipe pipe)
-{
- struct intel_plane *cursor;
- int ret, zpos;
-
- cursor = intel_plane_alloc();
- if (IS_ERR(cursor))
- return cursor;
-
- cursor->pipe = pipe;
- cursor->i9xx_plane = (enum i9xx_plane_id) pipe;
- cursor->id = PLANE_CURSOR;
- cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id);
-
- if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
- cursor->max_stride = i845_cursor_max_stride;
- cursor->update_plane = i845_update_cursor;
- cursor->disable_plane = i845_disable_cursor;
- cursor->get_hw_state = i845_cursor_get_hw_state;
- cursor->check_plane = i845_check_cursor;
- } else {
- cursor->max_stride = i9xx_cursor_max_stride;
- cursor->update_plane = i9xx_update_cursor;
- cursor->disable_plane = i9xx_disable_cursor;
- cursor->get_hw_state = i9xx_cursor_get_hw_state;
- cursor->check_plane = i9xx_check_cursor;
- }
-
- cursor->cursor.base = ~0;
- cursor->cursor.cntl = ~0;
-
- if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
- cursor->cursor.size = ~0;
-
- ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
- 0, &intel_cursor_plane_funcs,
- intel_cursor_formats,
- ARRAY_SIZE(intel_cursor_formats),
- cursor_format_modifiers,
- DRM_PLANE_TYPE_CURSOR,
- "cursor %c", pipe_name(pipe));
- if (ret)
- goto fail;
-
- if (INTEL_GEN(dev_priv) >= 4)
- drm_plane_create_rotation_property(&cursor->base,
- DRM_MODE_ROTATE_0,
- DRM_MODE_ROTATE_0 |
- DRM_MODE_ROTATE_180);
-
- zpos = RUNTIME_INFO(dev_priv)->num_sprites[pipe] + 1;
- drm_plane_create_zpos_immutable_property(&cursor->base, zpos);
-
- if (INTEL_GEN(dev_priv) >= 12)
- drm_plane_enable_fb_damage_clips(&cursor->base);
-
- drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
-
- return cursor;
-
-fail:
- intel_plane_free(cursor);
-
- return ERR_PTR(ret);
-}
-
-#define INTEL_CRTC_FUNCS \
- .gamma_set = drm_atomic_helper_legacy_gamma_set, \
- .set_config = drm_atomic_helper_set_config, \
- .destroy = intel_crtc_destroy, \
- .page_flip = drm_atomic_helper_page_flip, \
- .atomic_duplicate_state = intel_crtc_duplicate_state, \
- .atomic_destroy_state = intel_crtc_destroy_state, \
- .set_crc_source = intel_crtc_set_crc_source, \
- .verify_crc_source = intel_crtc_verify_crc_source, \
- .get_crc_sources = intel_crtc_get_crc_sources
-
-static const struct drm_crtc_funcs bdw_crtc_funcs = {
- INTEL_CRTC_FUNCS,
-
- .get_vblank_counter = g4x_get_vblank_counter,
- .enable_vblank = bdw_enable_vblank,
- .disable_vblank = bdw_disable_vblank,
- .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
-};
-
-static const struct drm_crtc_funcs ilk_crtc_funcs = {
- INTEL_CRTC_FUNCS,
-
- .get_vblank_counter = g4x_get_vblank_counter,
- .enable_vblank = ilk_enable_vblank,
- .disable_vblank = ilk_disable_vblank,
- .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
-};
-
-static const struct drm_crtc_funcs g4x_crtc_funcs = {
- INTEL_CRTC_FUNCS,
-
- .get_vblank_counter = g4x_get_vblank_counter,
- .enable_vblank = i965_enable_vblank,
- .disable_vblank = i965_disable_vblank,
- .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
-};
-
-static const struct drm_crtc_funcs i965_crtc_funcs = {
- INTEL_CRTC_FUNCS,
-
- .get_vblank_counter = i915_get_vblank_counter,
- .enable_vblank = i965_enable_vblank,
- .disable_vblank = i965_disable_vblank,
- .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
-};
-
-static const struct drm_crtc_funcs i915gm_crtc_funcs = {
- INTEL_CRTC_FUNCS,
-
- .get_vblank_counter = i915_get_vblank_counter,
- .enable_vblank = i915gm_enable_vblank,
- .disable_vblank = i915gm_disable_vblank,
- .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
-};
-
-static const struct drm_crtc_funcs i915_crtc_funcs = {
- INTEL_CRTC_FUNCS,
-
- .get_vblank_counter = i915_get_vblank_counter,
- .enable_vblank = i8xx_enable_vblank,
- .disable_vblank = i8xx_disable_vblank,
- .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
-};
-
-static const struct drm_crtc_funcs i8xx_crtc_funcs = {
- INTEL_CRTC_FUNCS,
-
- /* no hw vblank counter */
- .enable_vblank = i8xx_enable_vblank,
- .disable_vblank = i8xx_disable_vblank,
- .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
-};
-
-static struct intel_crtc *intel_crtc_alloc(void)
-{
- struct intel_crtc_state *crtc_state;
- struct intel_crtc *crtc;
-
- crtc = kzalloc(sizeof(*crtc), GFP_KERNEL);
- if (!crtc)
- return ERR_PTR(-ENOMEM);
-
- crtc_state = intel_crtc_state_alloc(crtc);
- if (!crtc_state) {
- kfree(crtc);
- return ERR_PTR(-ENOMEM);
- }
-
- crtc->base.state = &crtc_state->uapi;
- crtc->config = crtc_state;
-
- return crtc;
-}
-
-static void intel_crtc_free(struct intel_crtc *crtc)
-{
- intel_crtc_destroy_state(&crtc->base, crtc->base.state);
- kfree(crtc);
-}
-
static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv)
{
struct intel_plane *plane;
@@ -17379,100 +13818,6 @@ static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv)
}
}
-static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
-{
- struct intel_plane *primary, *cursor;
- const struct drm_crtc_funcs *funcs;
- struct intel_crtc *crtc;
- int sprite, ret;
-
- crtc = intel_crtc_alloc();
- if (IS_ERR(crtc))
- return PTR_ERR(crtc);
-
- crtc->pipe = pipe;
- crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[pipe];
-
- primary = intel_primary_plane_create(dev_priv, pipe);
- if (IS_ERR(primary)) {
- ret = PTR_ERR(primary);
- goto fail;
- }
- crtc->plane_ids_mask |= BIT(primary->id);
-
- for_each_sprite(dev_priv, pipe, sprite) {
- struct intel_plane *plane;
-
- plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
- if (IS_ERR(plane)) {
- ret = PTR_ERR(plane);
- goto fail;
- }
- crtc->plane_ids_mask |= BIT(plane->id);
- }
-
- cursor = intel_cursor_plane_create(dev_priv, pipe);
- if (IS_ERR(cursor)) {
- ret = PTR_ERR(cursor);
- goto fail;
- }
- crtc->plane_ids_mask |= BIT(cursor->id);
-
- if (HAS_GMCH(dev_priv)) {
- if (IS_CHERRYVIEW(dev_priv) ||
- IS_VALLEYVIEW(dev_priv) || IS_G4X(dev_priv))
- funcs = &g4x_crtc_funcs;
- else if (IS_GEN(dev_priv, 4))
- funcs = &i965_crtc_funcs;
- else if (IS_I945GM(dev_priv) || IS_I915GM(dev_priv))
- funcs = &i915gm_crtc_funcs;
- else if (IS_GEN(dev_priv, 3))
- funcs = &i915_crtc_funcs;
- else
- funcs = &i8xx_crtc_funcs;
- } else {
- if (INTEL_GEN(dev_priv) >= 8)
- funcs = &bdw_crtc_funcs;
- else
- funcs = &ilk_crtc_funcs;
- }
-
- ret = drm_crtc_init_with_planes(&dev_priv->drm, &crtc->base,
- &primary->base, &cursor->base,
- funcs, "pipe %c", pipe_name(pipe));
- if (ret)
- goto fail;
-
- BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) ||
- dev_priv->pipe_to_crtc_mapping[pipe] != NULL);
- dev_priv->pipe_to_crtc_mapping[pipe] = crtc;
-
- if (INTEL_GEN(dev_priv) < 9) {
- enum i9xx_plane_id i9xx_plane = primary->i9xx_plane;
-
- BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
- dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL);
- dev_priv->plane_to_crtc_mapping[i9xx_plane] = crtc;
- }
-
- if (INTEL_GEN(dev_priv) >= 10)
- drm_crtc_create_scaling_filter_property(&crtc->base,
- BIT(DRM_SCALING_FILTER_DEFAULT) |
- BIT(DRM_SCALING_FILTER_NEAREST_NEIGHBOR));
-
- intel_color_init(crtc);
-
- intel_crtc_crc_init(crtc);
-
- drm_WARN_ON(&dev_priv->drm, drm_crtc_index(&crtc->base) != crtc->pipe);
-
- return 0;
-
-fail:
- intel_crtc_free(crtc);
-
- return ret;
-}
int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
@@ -17555,48 +13900,12 @@ static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
return true;
}
-void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
-{
- int pps_num;
- int pps_idx;
-
- if (HAS_DDI(dev_priv))
- return;
- /*
- * This w/a is needed at least on CPT/PPT, but to be sure apply it
- * everywhere where registers can be write protected.
- */
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- pps_num = 2;
- else
- pps_num = 1;
-
- for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
- u32 val = intel_de_read(dev_priv, PP_CONTROL(pps_idx));
-
- val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
- intel_de_write(dev_priv, PP_CONTROL(pps_idx), val);
- }
-}
-
-static void intel_pps_init(struct drm_i915_private *dev_priv)
-{
- if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv))
- dev_priv->pps_mmio_base = PCH_PPS_BASE;
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- dev_priv->pps_mmio_base = VLV_PPS_BASE;
- else
- dev_priv->pps_mmio_base = PPS_BASE;
-
- intel_pps_unlock_regs_wa(dev_priv);
-}
-
static void intel_setup_outputs(struct drm_i915_private *dev_priv)
{
struct intel_encoder *encoder;
bool dpd_is_edp = false;
- intel_pps_init(dev_priv);
+ intel_pps_unlock_regs_wa(dev_priv);
if (!HAS_DISPLAY(dev_priv))
return;
@@ -17997,7 +14306,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
goto err;
}
- if (is_gen12_ccs_plane(fb, i)) {
+ if (is_gen12_ccs_plane(fb, i) && !is_gen12_ccs_cc_plane(fb, i)) {
int ccs_aux_stride = gen12_ccs_aux_stride(fb, i);
if (fb->pitches[i] != ccs_aux_stride) {
@@ -18195,86 +14504,40 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
{
intel_init_cdclk_hooks(dev_priv);
+ intel_dpll_init_clock_hook(dev_priv);
+
if (INTEL_GEN(dev_priv) >= 9) {
dev_priv->display.get_pipe_config = hsw_get_pipe_config;
- dev_priv->display.get_initial_plane_config =
- skl_get_initial_plane_config;
- dev_priv->display.crtc_compute_clock = hsw_crtc_compute_clock;
dev_priv->display.crtc_enable = hsw_crtc_enable;
dev_priv->display.crtc_disable = hsw_crtc_disable;
} else if (HAS_DDI(dev_priv)) {
dev_priv->display.get_pipe_config = hsw_get_pipe_config;
- dev_priv->display.get_initial_plane_config =
- i9xx_get_initial_plane_config;
- dev_priv->display.crtc_compute_clock =
- hsw_crtc_compute_clock;
dev_priv->display.crtc_enable = hsw_crtc_enable;
dev_priv->display.crtc_disable = hsw_crtc_disable;
} else if (HAS_PCH_SPLIT(dev_priv)) {
dev_priv->display.get_pipe_config = ilk_get_pipe_config;
- dev_priv->display.get_initial_plane_config =
- i9xx_get_initial_plane_config;
- dev_priv->display.crtc_compute_clock =
- ilk_crtc_compute_clock;
dev_priv->display.crtc_enable = ilk_crtc_enable;
dev_priv->display.crtc_disable = ilk_crtc_disable;
- } else if (IS_CHERRYVIEW(dev_priv)) {
- dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
- dev_priv->display.get_initial_plane_config =
- i9xx_get_initial_plane_config;
- dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
- dev_priv->display.crtc_enable = valleyview_crtc_enable;
- dev_priv->display.crtc_disable = i9xx_crtc_disable;
- } else if (IS_VALLEYVIEW(dev_priv)) {
+ } else if (IS_CHERRYVIEW(dev_priv) ||
+ IS_VALLEYVIEW(dev_priv)) {
dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
- dev_priv->display.get_initial_plane_config =
- i9xx_get_initial_plane_config;
- dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
dev_priv->display.crtc_enable = valleyview_crtc_enable;
dev_priv->display.crtc_disable = i9xx_crtc_disable;
- } else if (IS_G4X(dev_priv)) {
- dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
- dev_priv->display.get_initial_plane_config =
- i9xx_get_initial_plane_config;
- dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
- dev_priv->display.crtc_enable = i9xx_crtc_enable;
- dev_priv->display.crtc_disable = i9xx_crtc_disable;
- } else if (IS_PINEVIEW(dev_priv)) {
- dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
- dev_priv->display.get_initial_plane_config =
- i9xx_get_initial_plane_config;
- dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
- dev_priv->display.crtc_enable = i9xx_crtc_enable;
- dev_priv->display.crtc_disable = i9xx_crtc_disable;
- } else if (!IS_GEN(dev_priv, 2)) {
- dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
- dev_priv->display.get_initial_plane_config =
- i9xx_get_initial_plane_config;
- dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
- dev_priv->display.crtc_enable = i9xx_crtc_enable;
- dev_priv->display.crtc_disable = i9xx_crtc_disable;
} else {
dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
- dev_priv->display.get_initial_plane_config =
- i9xx_get_initial_plane_config;
- dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
dev_priv->display.crtc_enable = i9xx_crtc_enable;
dev_priv->display.crtc_disable = i9xx_crtc_disable;
}
- if (IS_GEN(dev_priv, 5)) {
- dev_priv->display.fdi_link_train = ilk_fdi_link_train;
- } else if (IS_GEN(dev_priv, 6)) {
- dev_priv->display.fdi_link_train = gen6_fdi_link_train;
- } else if (IS_IVYBRIDGE(dev_priv)) {
- /* FIXME: detect B0+ stepping and use auto training */
- dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
- }
+ intel_fdi_init_hook(dev_priv);
- if (INTEL_GEN(dev_priv) >= 9)
+ if (INTEL_GEN(dev_priv) >= 9) {
dev_priv->display.commit_modeset_enables = skl_commit_modeset_enables;
- else
+ dev_priv->display.get_initial_plane_config = skl_get_initial_plane_config;
+ } else {
dev_priv->display.commit_modeset_enables = intel_commit_modeset_enables;
+ dev_priv->display.get_initial_plane_config = i9xx_get_initial_plane_config;
+ }
}
@@ -18282,14 +14545,10 @@ void intel_modeset_init_hw(struct drm_i915_private *i915)
{
struct intel_cdclk_state *cdclk_state =
to_intel_cdclk_state(i915->cdclk.obj.state);
- struct intel_dbuf_state *dbuf_state =
- to_intel_dbuf_state(i915->dbuf.obj.state);
intel_update_cdclk(i915);
intel_dump_cdclk_config(&i915->cdclk.hw, "Current CDCLK");
cdclk_state->logical = cdclk_state->actual = i915->cdclk.hw;
-
- dbuf_state->enabled_slices = i915->dbuf.enabled_slices;
}
static int sanitize_watermarks_add_affected(struct drm_atomic_state *state)
@@ -18522,8 +14781,7 @@ static void intel_mode_config_init(struct drm_i915_private *i915)
mode_config->funcs = &intel_mode_funcs;
- if (INTEL_GEN(i915) >= 9)
- mode_config->async_page_flip = true;
+ mode_config->async_page_flip = has_async_flips(i915);
/*
* Maximum framebuffer dimensions, chosen to match
@@ -18608,6 +14866,8 @@ int intel_modeset_init_noirq(struct drm_i915_private *i915)
i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI |
WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
+ i915->framestart_delay = 1; /* 1-4 */
+
intel_mode_config_init(i915);
ret = intel_cdclk_init(i915);
@@ -18654,6 +14914,8 @@ int intel_modeset_init_nogem(struct drm_i915_private *i915)
intel_panel_sanitize_ssc(i915);
+ intel_pps_setup(i915);
+
intel_gmbus_setup(i915);
drm_dbg_kms(&i915->drm, "%d display pipe%s available.\n",
@@ -18942,7 +15204,7 @@ static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc
val = intel_de_read(dev_priv, reg);
val &= ~HSW_FRAME_START_DELAY_MASK;
- val |= HSW_FRAME_START_DELAY(0);
+ val |= HSW_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
intel_de_write(dev_priv, reg, val);
} else {
i915_reg_t reg = PIPECONF(cpu_transcoder);
@@ -18950,7 +15212,7 @@ static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc
val = intel_de_read(dev_priv, reg);
val &= ~PIPECONF_FRAME_START_DELAY_MASK;
- val |= PIPECONF_FRAME_START_DELAY(0);
+ val |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
intel_de_write(dev_priv, reg, val);
}
@@ -18963,7 +15225,7 @@ static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc
val = intel_de_read(dev_priv, reg);
val &= ~TRANS_FRAME_START_DELAY_MASK;
- val |= TRANS_FRAME_START_DELAY(0);
+ val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
intel_de_write(dev_priv, reg, val);
} else {
enum pipe pch_transcoder = intel_crtc_pch_transcoder(crtc);
@@ -18972,7 +15234,7 @@ static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc
val = intel_de_read(dev_priv, reg);
val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
- val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
+ val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
intel_de_write(dev_priv, reg, val);
}
}
@@ -19165,7 +15427,7 @@ static void readout_plane_state(struct drm_i915_private *dev_priv)
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
- fixup_active_planes(crtc_state);
+ fixup_plane_bitmasks(crtc_state);
}
}
@@ -19588,7 +15850,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
put_domains = modeset_get_crtc_power_domains(crtc_state);
if (drm_WARN_ON(dev, put_domains))
- modeset_put_power_domains(dev_priv, put_domains);
+ modeset_put_crtc_power_domains(crtc, put_domains);
}
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index 5e0d42d82c11..76f8a805b0a3 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -499,6 +499,8 @@ enum phy_fia {
((connector) = to_intel_connector((__state)->base.connectors[__i].ptr), \
(new_connector_state) = to_intel_digital_connector_state((__state)->base.connectors[__i].new_state), 1))
+int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
u8 intel_calc_active_pipes(struct intel_atomic_state *state,
u8 active_pipes);
void intel_link_compute_m_n(u16 bpp, int nlanes,
@@ -544,7 +546,6 @@ unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info
unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info);
bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv);
int intel_display_suspend(struct drm_device *dev);
-void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv);
void intel_encoder_destroy(struct drm_encoder *encoder);
struct drm_display_mode *
intel_encoder_current_mode(struct intel_encoder *encoder);
@@ -628,11 +629,9 @@ u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state);
u32 skl_plane_stride(const struct intel_plane_state *plane_state,
int plane);
int skl_check_plane_surface(struct intel_plane_state *plane_state);
-int i9xx_check_plane_surface(struct intel_plane_state *plane_state);
+int skl_calc_main_surface_offset(const struct intel_plane_state *plane_state,
+ int *x, int *y, u32 *offset);
int skl_format_to_fourcc(int format, bool rgb_order, bool alpha);
-unsigned int i9xx_plane_max_stride(struct intel_plane *plane,
- u32 pixel_format, u64 modifier,
- unsigned int rotation);
int bdw_get_pipemisc_bpp(struct intel_crtc *crtc);
unsigned int intel_plane_fence_y_offset(const struct intel_plane_state *plane_state);
@@ -643,7 +642,23 @@ void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
bool
intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
- uint64_t modifier);
+ u64 modifier);
+
+int intel_plane_compute_gtt(struct intel_plane_state *plane_state);
+u32 intel_plane_compute_aligned_offset(int *x, int *y,
+ const struct intel_plane_state *state,
+ int color_plane);
+int intel_plane_pin_fb(struct intel_plane_state *plane_state);
+void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state);
+struct intel_encoder *
+intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
+ const struct intel_crtc_state *crtc_state);
+unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
+ int color_plane);
+u32 intel_plane_adjust_aligned_offset(int *x, int *y,
+ const struct intel_plane_state *state,
+ int color_plane,
+ u32 old_offset, u32 new_offset);
/* modesetting */
void intel_modeset_init_hw(struct drm_i915_private *i915);
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index ca41e8c00ad7..d62b18d5ecd8 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -18,6 +18,7 @@
#include "intel_pm.h"
#include "intel_psr.h"
#include "intel_sideband.h"
+#include "intel_sprite.h"
static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
{
@@ -865,6 +866,110 @@ static void intel_scaler_info(struct seq_file *m, struct intel_crtc *crtc)
}
}
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_VBLANK_EVADE)
+static void crtc_updates_info(struct seq_file *m,
+ struct intel_crtc *crtc,
+ const char *hdr)
+{
+ u64 count;
+ int row;
+
+ count = 0;
+ for (row = 0; row < ARRAY_SIZE(crtc->debug.vbl.times); row++)
+ count += crtc->debug.vbl.times[row];
+ seq_printf(m, "%sUpdates: %llu\n", hdr, count);
+ if (!count)
+ return;
+
+ for (row = 0; row < ARRAY_SIZE(crtc->debug.vbl.times); row++) {
+ char columns[80] = " |";
+ unsigned int x;
+
+ if (row & 1) {
+ const char *units;
+
+ if (row > 10) {
+ x = 1000000;
+ units = "ms";
+ } else {
+ x = 1000;
+ units = "us";
+ }
+
+ snprintf(columns, sizeof(columns), "%4ld%s |",
+ DIV_ROUND_CLOSEST(BIT(row + 9), x), units);
+ }
+
+ if (crtc->debug.vbl.times[row]) {
+ x = ilog2(crtc->debug.vbl.times[row]);
+ memset(columns + 8, '*', x);
+ columns[8 + x] = '\0';
+ }
+
+ seq_printf(m, "%s%s\n", hdr, columns);
+ }
+
+ seq_printf(m, "%sMin update: %lluns\n",
+ hdr, crtc->debug.vbl.min);
+ seq_printf(m, "%sMax update: %lluns\n",
+ hdr, crtc->debug.vbl.max);
+ seq_printf(m, "%sAverage update: %lluns\n",
+ hdr, div64_u64(crtc->debug.vbl.sum, count));
+ seq_printf(m, "%sOverruns > %uus: %u\n",
+ hdr, VBLANK_EVASION_TIME_US, crtc->debug.vbl.over);
+}
+
+static int crtc_updates_show(struct seq_file *m, void *data)
+{
+ crtc_updates_info(m, m->private, "");
+ return 0;
+}
+
+static int crtc_updates_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, crtc_updates_show, inode->i_private);
+}
+
+static ssize_t crtc_updates_write(struct file *file,
+ const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct intel_crtc *crtc = m->private;
+
+ /* May race with an update. Meh. */
+ memset(&crtc->debug.vbl, 0, sizeof(crtc->debug.vbl));
+
+ return len;
+}
+
+static const struct file_operations crtc_updates_fops = {
+ .owner = THIS_MODULE,
+ .open = crtc_updates_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = crtc_updates_write
+};
+
+static void crtc_updates_add(struct drm_crtc *crtc)
+{
+ debugfs_create_file("i915_update_info", 0644, crtc->debugfs_entry,
+ to_intel_crtc(crtc), &crtc_updates_fops);
+}
+
+#else
+static void crtc_updates_info(struct seq_file *m,
+ struct intel_crtc *crtc,
+ const char *hdr)
+{
+}
+
+static void crtc_updates_add(struct drm_crtc *crtc)
+{
+}
+#endif
+
static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -907,6 +1012,8 @@ static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s\n",
yesno(!crtc->cpu_fifo_underrun_disabled),
yesno(!crtc->pch_fifo_underrun_disabled));
+
+ crtc_updates_info(m, crtc, "\t");
}
static int i915_display_info(struct seq_file *m, void *unused)
@@ -1032,7 +1139,6 @@ static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
if (!dev_priv->ipc_enabled && enable)
drm_info(&dev_priv->drm,
"Enabling IPC: WM will be proper only after next commit\n");
- dev_priv->wm.distrust_bios_wm = true;
dev_priv->ipc_enabled = enable;
intel_enable_ipc(dev_priv);
}
@@ -2048,13 +2154,13 @@ static int i915_panel_show(struct seq_file *m, void *data)
return -ENODEV;
seq_printf(m, "Panel power up delay: %d\n",
- intel_dp->panel_power_up_delay);
+ intel_dp->pps.panel_power_up_delay);
seq_printf(m, "Panel power down delay: %d\n",
- intel_dp->panel_power_down_delay);
+ intel_dp->pps.panel_power_down_delay);
seq_printf(m, "Backlight on delay: %d\n",
- intel_dp->backlight_on_delay);
+ intel_dp->pps.backlight_on_delay);
seq_printf(m, "Backlight off delay: %d\n",
- intel_dp->backlight_off_delay);
+ intel_dp->pps.backlight_off_delay);
return 0;
}
@@ -2278,3 +2384,20 @@ int intel_connector_debugfs_add(struct drm_connector *connector)
return 0;
}
+
+/**
+ * intel_crtc_debugfs_add - add i915 specific crtc debugfs files
+ * @crtc: pointer to a drm_crtc
+ *
+ * Returns 0 on success, negative error codes on error.
+ *
+ * Failure to add debugfs entries should generally be ignored.
+ */
+int intel_crtc_debugfs_add(struct drm_crtc *crtc)
+{
+ if (!crtc->debugfs_entry)
+ return -ENODEV;
+
+ crtc_updates_add(crtc);
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.h b/drivers/gpu/drm/i915/display/intel_display_debugfs.h
index c922c1745bfe..557901f3eb90 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.h
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.h
@@ -7,14 +7,17 @@
#define __INTEL_DISPLAY_DEBUGFS_H__
struct drm_connector;
+struct drm_crtc;
struct drm_i915_private;
#ifdef CONFIG_DEBUG_FS
void intel_display_debugfs_register(struct drm_i915_private *i915);
int intel_connector_debugfs_add(struct drm_connector *connector);
+int intel_crtc_debugfs_add(struct drm_crtc *crtc);
#else
static inline void intel_display_debugfs_register(struct drm_i915_private *i915) {}
static inline int intel_connector_debugfs_add(struct drm_connector *connector) { return 0; }
+static inline int intel_crtc_debugfs_add(struct drm_crtc *crtc) { return 0; }
#endif
#endif /* __INTEL_DISPLAY_DEBUGFS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index fe2d90bba536..c11c37c65d86 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -4,7 +4,6 @@
*/
#include "display/intel_crt.h"
-#include "display/intel_dp.h"
#include "i915_drv.h"
#include "i915_irq.h"
@@ -16,6 +15,7 @@
#include "intel_dpio_phy.h"
#include "intel_hotplug.h"
#include "intel_pm.h"
+#include "intel_pps.h"
#include "intel_sideband.h"
#include "intel_tc.h"
#include "intel_vga.h"
@@ -936,7 +936,7 @@ static void bxt_enable_dc9(struct drm_i915_private *dev_priv)
* because PPS registers are always on.
*/
if (!HAS_PCH_SPLIT(dev_priv))
- intel_power_sequencer_reset(dev_priv);
+ intel_pps_reset_all(dev_priv);
gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
}
@@ -1446,7 +1446,7 @@ static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
/* make sure we're done processing display irqs */
intel_synchronize_irq(dev_priv);
- intel_power_sequencer_reset(dev_priv);
+ intel_pps_reset_all(dev_priv);
/* Prevent us from re-enabling polling on accident in late suspend */
if (!dev_priv->drm.dev->power.is_suspended)
@@ -2184,26 +2184,6 @@ static void __intel_display_power_put(struct drm_i915_private *dev_priv,
mutex_unlock(&power_domains->lock);
}
-/**
- * intel_display_power_put_unchecked - release an unchecked power domain reference
- * @dev_priv: i915 device instance
- * @domain: power domain to reference
- *
- * This function drops the power domain reference obtained by
- * intel_display_power_get() and might power down the corresponding hardware
- * block right away if this is the last reference.
- *
- * This function exists only for historical reasons and should be avoided in
- * new code, as the correctness of its use cannot be checked. Always use
- * intel_display_power_put() instead.
- */
-void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain)
-{
- __intel_display_power_put(dev_priv, domain);
- intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
-}
-
static void
queue_async_put_domains_work(struct i915_power_domains *power_domains,
intel_wakeref_t wakeref)
@@ -2410,8 +2390,85 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
__intel_display_power_put(dev_priv, domain);
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
}
+#else
+/**
+ * intel_display_power_put_unchecked - release an unchecked power domain reference
+ * @dev_priv: i915 device instance
+ * @domain: power domain to reference
+ *
+ * This function drops the power domain reference obtained by
+ * intel_display_power_get() and might power down the corresponding hardware
+ * block right away if this is the last reference.
+ *
+ * This function is only for the power domain code's internal use to suppress wakeref
+ * tracking when the correspondig debug kconfig option is disabled, should not
+ * be used otherwise.
+ */
+void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
+{
+ __intel_display_power_put(dev_priv, domain);
+ intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
+}
#endif
+void
+intel_display_power_get_in_set(struct drm_i915_private *i915,
+ struct intel_display_power_domain_set *power_domain_set,
+ enum intel_display_power_domain domain)
+{
+ intel_wakeref_t __maybe_unused wf;
+
+ drm_WARN_ON(&i915->drm, power_domain_set->mask & BIT_ULL(domain));
+
+ wf = intel_display_power_get(i915, domain);
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+ power_domain_set->wakerefs[domain] = wf;
+#endif
+ power_domain_set->mask |= BIT_ULL(domain);
+}
+
+bool
+intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915,
+ struct intel_display_power_domain_set *power_domain_set,
+ enum intel_display_power_domain domain)
+{
+ intel_wakeref_t wf;
+
+ drm_WARN_ON(&i915->drm, power_domain_set->mask & BIT_ULL(domain));
+
+ wf = intel_display_power_get_if_enabled(i915, domain);
+ if (!wf)
+ return false;
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+ power_domain_set->wakerefs[domain] = wf;
+#endif
+ power_domain_set->mask |= BIT_ULL(domain);
+
+ return true;
+}
+
+void
+intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
+ struct intel_display_power_domain_set *power_domain_set,
+ u64 mask)
+{
+ enum intel_display_power_domain domain;
+
+ drm_WARN_ON(&i915->drm, mask & ~power_domain_set->mask);
+
+ for_each_power_domain(domain, mask) {
+ intel_wakeref_t __maybe_unused wf = -1;
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+ wf = fetch_and_zero(&power_domain_set->wakerefs[domain]);
+#endif
+ intel_display_power_put(i915, domain, wf);
+ power_domain_set->mask &= ~BIT_ULL(domain);
+ }
+}
+
#define I830_PIPES_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_PIPE_A) | \
BIT_ULL(POWER_DOMAIN_PIPE_B) | \
@@ -5601,12 +5658,16 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
* resources powered until display HW readout is complete. We drop
* this reference in intel_power_domains_enable().
*/
- power_domains->wakeref =
+ drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
+ power_domains->init_wakeref =
intel_display_power_get(i915, POWER_DOMAIN_INIT);
/* Disable power support if the user asked so. */
- if (!i915->params.disable_power_well)
- intel_display_power_get(i915, POWER_DOMAIN_INIT);
+ if (!i915->params.disable_power_well) {
+ drm_WARN_ON(&i915->drm, power_domains->disable_wakeref);
+ i915->power_domains.disable_wakeref = intel_display_power_get(i915,
+ POWER_DOMAIN_INIT);
+ }
intel_power_domains_sync_hw(i915);
power_domains->initializing = false;
@@ -5626,11 +5687,12 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
void intel_power_domains_driver_remove(struct drm_i915_private *i915)
{
intel_wakeref_t wakeref __maybe_unused =
- fetch_and_zero(&i915->power_domains.wakeref);
+ fetch_and_zero(&i915->power_domains.init_wakeref);
/* Remove the refcount we took to keep power well support disabled. */
if (!i915->params.disable_power_well)
- intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
+ intel_display_power_put(i915, POWER_DOMAIN_INIT,
+ fetch_and_zero(&i915->power_domains.disable_wakeref));
intel_display_power_flush_work_sync(i915);
@@ -5655,7 +5717,7 @@ void intel_power_domains_driver_remove(struct drm_i915_private *i915)
void intel_power_domains_enable(struct drm_i915_private *i915)
{
intel_wakeref_t wakeref __maybe_unused =
- fetch_and_zero(&i915->power_domains.wakeref);
+ fetch_and_zero(&i915->power_domains.init_wakeref);
intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
intel_power_domains_verify_state(i915);
@@ -5672,8 +5734,8 @@ void intel_power_domains_disable(struct drm_i915_private *i915)
{
struct i915_power_domains *power_domains = &i915->power_domains;
- drm_WARN_ON(&i915->drm, power_domains->wakeref);
- power_domains->wakeref =
+ drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
+ power_domains->init_wakeref =
intel_display_power_get(i915, POWER_DOMAIN_INIT);
intel_power_domains_verify_state(i915);
@@ -5695,7 +5757,7 @@ void intel_power_domains_suspend(struct drm_i915_private *i915,
{
struct i915_power_domains *power_domains = &i915->power_domains;
intel_wakeref_t wakeref __maybe_unused =
- fetch_and_zero(&power_domains->wakeref);
+ fetch_and_zero(&power_domains->init_wakeref);
intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
@@ -5719,7 +5781,8 @@ void intel_power_domains_suspend(struct drm_i915_private *i915,
* power wells if power domains must be deinitialized for suspend.
*/
if (!i915->params.disable_power_well)
- intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
+ intel_display_power_put(i915, POWER_DOMAIN_INIT,
+ fetch_and_zero(&i915->power_domains.disable_wakeref));
intel_display_power_flush_work(i915);
intel_power_domains_verify_state(i915);
@@ -5754,8 +5817,8 @@ void intel_power_domains_resume(struct drm_i915_private *i915)
intel_power_domains_init_hw(i915, true);
power_domains->display_core_suspended = false;
} else {
- drm_WARN_ON(&i915->drm, power_domains->wakeref);
- power_domains->wakeref =
+ drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
+ power_domains->init_wakeref =
intel_display_power_get(i915, POWER_DOMAIN_INIT);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h
index 4aa0a09cf14f..bc30c479be53 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power.h
@@ -212,7 +212,8 @@ struct i915_power_domains {
bool display_core_suspended;
int power_well_count;
- intel_wakeref_t wakeref;
+ intel_wakeref_t init_wakeref;
+ intel_wakeref_t disable_wakeref;
struct mutex lock;
int domain_use_count[POWER_DOMAIN_NUM];
@@ -224,6 +225,13 @@ struct i915_power_domains {
struct i915_power_well *power_wells;
};
+struct intel_display_power_domain_set {
+ u64 mask;
+#ifdef CONFIG_DRM_I915_DEBUG_RUNTIME_PM
+ intel_wakeref_t wakerefs[POWER_DOMAIN_NUM];
+#endif
+};
+
#define for_each_power_domain(domain, mask) \
for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
for_each_if(BIT_ULL(domain) & (mask))
@@ -279,8 +287,6 @@ intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
intel_wakeref_t
intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
-void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain);
void __intel_display_power_put_async(struct drm_i915_private *i915,
enum intel_display_power_domain domain,
intel_wakeref_t wakeref);
@@ -297,6 +303,9 @@ intel_display_power_put_async(struct drm_i915_private *i915,
__intel_display_power_put_async(i915, domain, wakeref);
}
#else
+void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain);
+
static inline void
intel_display_power_put(struct drm_i915_private *i915,
enum intel_display_power_domain domain,
@@ -314,6 +323,28 @@ intel_display_power_put_async(struct drm_i915_private *i915,
}
#endif
+void
+intel_display_power_get_in_set(struct drm_i915_private *i915,
+ struct intel_display_power_domain_set *power_domain_set,
+ enum intel_display_power_domain domain);
+
+bool
+intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915,
+ struct intel_display_power_domain_set *power_domain_set,
+ enum intel_display_power_domain domain);
+
+void
+intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
+ struct intel_display_power_domain_set *power_domain_set,
+ u64 mask);
+
+static inline void
+intel_display_power_put_all_in_set(struct drm_i915_private *i915,
+ struct intel_display_power_domain_set *power_domain_set)
+{
+ intel_display_power_put_mask_in_set(i915, power_domain_set, power_domain_set->mask);
+}
+
enum dbuf_slice {
DBUF_S1,
DBUF_S2,
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 34d78c654df3..184ecbbcec99 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -225,6 +225,17 @@ struct intel_encoder {
const struct drm_connector *audio_connector;
};
+struct intel_panel_bl_funcs {
+ /* Connector and platform specific backlight functions */
+ int (*setup)(struct intel_connector *connector, enum pipe pipe);
+ u32 (*get)(struct intel_connector *connector, enum pipe pipe);
+ void (*set)(const struct drm_connector_state *conn_state, u32 level);
+ void (*disable)(const struct drm_connector_state *conn_state, u32 level);
+ void (*enable)(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state, u32 level);
+ u32 (*hz_to_pwm)(struct intel_connector *connector, u32 hz);
+};
+
struct intel_panel {
struct drm_display_mode *fixed_mode;
struct drm_display_mode *downclock_mode;
@@ -241,24 +252,28 @@ struct intel_panel {
bool alternate_pwm_increment; /* lpt+ */
/* PWM chip */
+ u32 pwm_level_min;
+ u32 pwm_level_max;
+ bool pwm_enabled;
bool util_pin_active_low; /* bxt+ */
u8 controller; /* bxt+ only */
struct pwm_device *pwm;
struct pwm_state pwm_state;
/* DPCD backlight */
- u8 pwmgen_bit_count;
+ union {
+ struct {
+ u8 pwmgen_bit_count;
+ } vesa;
+ struct {
+ bool sdr_uses_aux;
+ } intel;
+ } edp;
struct backlight_device *device;
- /* Connector and platform specific backlight functions */
- int (*setup)(struct intel_connector *connector, enum pipe pipe);
- u32 (*get)(struct intel_connector *connector);
- void (*set)(const struct drm_connector_state *conn_state, u32 level);
- void (*disable)(const struct drm_connector_state *conn_state);
- void (*enable)(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state);
- u32 (*hz_to_pwm)(struct intel_connector *connector, u32 hz);
+ const struct intel_panel_bl_funcs *funcs;
+ const struct intel_panel_bl_funcs *pwm_funcs;
void (*power)(struct intel_connector *, bool enable);
} backlight;
};
@@ -339,6 +354,10 @@ struct intel_hdcp_shim {
enum transcoder cpu_transcoder,
bool enable);
+ /* Enable/Disable stream encryption on DP MST Transport Link */
+ int (*stream_encryption)(struct intel_connector *connector,
+ bool enable);
+
/* Ensures the link is still protected */
bool (*check_link)(struct intel_digital_port *dig_port,
struct intel_connector *connector);
@@ -370,8 +389,13 @@ struct intel_hdcp_shim {
int (*config_stream_type)(struct intel_digital_port *dig_port,
bool is_repeater, u8 type);
+ /* Enable/Disable HDCP 2.2 stream encryption on DP MST Transport Link */
+ int (*stream_2_2_encryption)(struct intel_connector *connector,
+ bool enable);
+
/* HDCP2.2 Link Integrity Check */
- int (*check_2_2_link)(struct intel_digital_port *dig_port);
+ int (*check_2_2_link)(struct intel_digital_port *dig_port,
+ struct intel_connector *connector);
};
struct intel_hdcp {
@@ -398,7 +422,6 @@ struct intel_hdcp {
* content can flow only through a link protected by HDCP2.2.
*/
u8 content_type;
- struct hdcp_port_data port_data;
bool is_paired;
bool is_repeater;
@@ -432,6 +455,8 @@ struct intel_hdcp {
* Hence caching the transcoder here.
*/
enum transcoder cpu_transcoder;
+ /* Only used for DP MST stream encryption */
+ enum transcoder stream_transcoder;
};
struct intel_connector {
@@ -531,7 +556,7 @@ struct intel_plane_state {
struct drm_framebuffer *fb;
u16 alpha;
- uint16_t pixel_blend_mode;
+ u16 pixel_blend_mode;
unsigned int rotation;
enum drm_color_encoding color_encoding;
enum drm_color_range color_range;
@@ -604,6 +629,11 @@ struct intel_plane_state {
u32 planar_slave;
struct drm_intel_sprite_colorkey ckey;
+
+ struct drm_rect psr2_sel_fetch_area;
+
+ /* Clear Color Value */
+ u64 ccval;
};
struct intel_initial_plane_config {
@@ -663,6 +693,8 @@ struct intel_crtc_scaler_state {
#define I915_MODE_FLAG_DSI_USE_TE1 (1<<4)
/* Flag to indicate mipi dsi periodic command mode where we do not get TE */
#define I915_MODE_FLAG_DSI_PERIODIC_CMD_MODE (1<<5)
+/* Do tricks to make vblank timestamps sane with VRR? */
+#define I915_MODE_FLAG_VRR (1<<6)
struct intel_wm_level {
bool enable;
@@ -798,7 +830,6 @@ struct intel_crtc_wm_state {
};
enum intel_output_format {
- INTEL_OUTPUT_FORMAT_INVALID,
INTEL_OUTPUT_FORMAT_RGB,
INTEL_OUTPUT_FORMAT_YCBCR420,
INTEL_OUTPUT_FORMAT_YCBCR444,
@@ -1047,7 +1078,10 @@ struct intel_crtc_state {
u32 cgm_mode;
};
- /* bitmask of visible planes (enum plane_id) */
+ /* bitmask of logically enabled planes (enum plane_id) */
+ u8 enabled_planes;
+
+ /* bitmask of actually visible planes (enum plane_id) */
u8 active_planes;
u8 nv12_planes;
u8 c8_planes;
@@ -1118,6 +1152,13 @@ struct intel_crtc_state {
struct intel_dsb *dsb;
u32 psr2_man_track_ctl;
+
+ /* Variable Refresh Rate state */
+ struct {
+ bool enable;
+ u8 pipeline_full;
+ u16 flipline, vmin, vmax;
+ } vrr;
};
enum intel_pipe_crc_source {
@@ -1160,7 +1201,9 @@ struct intel_crtc {
/* I915_MODE_FLAG_* */
u8 mode_flags;
- unsigned long long enabled_power_domains;
+ u16 vmax_vblank_start;
+
+ struct intel_display_power_domain_set enabled_power_domains;
struct intel_overlay *overlay;
struct intel_crtc_state *config;
@@ -1186,6 +1229,15 @@ struct intel_crtc {
ktime_t start_vbl_time;
int min_vbl, max_vbl;
int scanline_start;
+#ifdef CONFIG_DRM_I915_DEBUG_VBLANK_EVADE
+ struct {
+ u64 min;
+ u64 max;
+ u64 sum;
+ unsigned int over;
+ unsigned int times[17]; /* [1us, 16ms] */
+ } vbl;
+#endif
} debug;
/* scalers available on this crtc */
@@ -1203,6 +1255,7 @@ struct intel_plane {
enum pipe pipe;
bool has_fbc;
bool has_ccs;
+ bool need_async_flip_disable_wa;
u32 frontbuffer_bit;
struct {
@@ -1239,7 +1292,10 @@ struct intel_plane {
const struct intel_plane_state *plane_state);
void (*async_flip)(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state);
+ const struct intel_plane_state *plane_state,
+ bool async_flip);
+ void (*enable_flip_done)(struct intel_plane *plane);
+ void (*disable_flip_done)(struct intel_plane *plane);
};
struct intel_watermark_params {
@@ -1321,6 +1377,43 @@ struct intel_dp_compliance {
u8 test_lane_count;
};
+struct intel_dp_pcon_frl {
+ bool is_trained;
+ int trained_rate_gbps;
+};
+
+struct intel_pps {
+ int panel_power_up_delay;
+ int panel_power_down_delay;
+ int panel_power_cycle_delay;
+ int backlight_on_delay;
+ int backlight_off_delay;
+ struct delayed_work panel_vdd_work;
+ bool want_panel_vdd;
+ unsigned long last_power_on;
+ unsigned long last_backlight_off;
+ ktime_t panel_power_off_time;
+ intel_wakeref_t vdd_wakeref;
+
+ /*
+ * Pipe whose power sequencer is currently locked into
+ * this port. Only relevant on VLV/CHV.
+ */
+ enum pipe pps_pipe;
+ /*
+ * Pipe currently driving the port. Used for preventing
+ * the use of the PPS for any pipe currentrly driving
+ * external DP as that will mess things up on VLV.
+ */
+ enum pipe active_pipe;
+ /*
+ * Set if the sequencer may be reset due to a power transition,
+ * requiring a reinitialization. Only relevant on BXT.
+ */
+ bool pps_reset;
+ struct edp_power_seq pps_delays;
+};
+
struct intel_dp {
i915_reg_t output_reg;
u32 DP;
@@ -1331,6 +1424,7 @@ struct intel_dp {
bool has_hdmi_sink;
bool has_audio;
bool reset_link_params;
+ bool use_max_params;
u8 dpcd[DP_RECEIVER_CAP_SIZE];
u8 psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
u8 downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
@@ -1339,6 +1433,7 @@ struct intel_dp {
u8 lttpr_common_caps[DP_LTTPR_COMMON_CAP_SIZE];
u8 lttpr_phy_caps[DP_MAX_LTTPR_COUNT][DP_LTTPR_PHY_CAP_SIZE];
u8 fec_capable;
+ u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE];
/* source rates */
int num_source_rates;
const int *source_rates;
@@ -1355,38 +1450,11 @@ struct intel_dp {
int max_link_rate;
/* sink or branch descriptor */
struct drm_dp_desc desc;
- u32 edid_quirks;
struct drm_dp_aux aux;
u32 aux_busy_last_status;
u8 train_set[4];
- int panel_power_up_delay;
- int panel_power_down_delay;
- int panel_power_cycle_delay;
- int backlight_on_delay;
- int backlight_off_delay;
- struct delayed_work panel_vdd_work;
- bool want_panel_vdd;
- unsigned long last_power_on;
- unsigned long last_backlight_off;
- ktime_t panel_power_off_time;
- /*
- * Pipe whose power sequencer is currently locked into
- * this port. Only relevant on VLV/CHV.
- */
- enum pipe pps_pipe;
- /*
- * Pipe currently driving the port. Used for preventing
- * the use of the PPS for any pipe currentrly driving
- * external DP as that will mess things up on VLV.
- */
- enum pipe active_pipe;
- /*
- * Set if the sequencer may be reset due to a power transition,
- * requiring a reinitialization. Only relevant on BXT.
- */
- bool pps_reset;
- struct edp_power_seq pps_delays;
+ struct intel_pps pps;
bool can_mst; /* this port supports mst */
bool is_mst;
@@ -1432,8 +1500,10 @@ struct intel_dp {
struct {
int min_tmds_clock, max_tmds_clock;
int max_dotclock;
+ int pcon_max_frl_bw;
u8 max_bpc;
bool ycbcr_444_to_420;
+ bool rgb_to_ycbcr;
} dfp;
/* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
@@ -1444,6 +1514,8 @@ struct intel_dp {
bool hobl_failed;
bool hobl_active;
+
+ struct intel_dp_pcon_frl frl;
};
enum lspcon_vendor {
@@ -1453,6 +1525,7 @@ enum lspcon_vendor {
struct intel_lspcon {
bool active;
+ bool hdr_supported;
enum drm_lspcon_mode mode;
enum lspcon_vendor vendor;
};
@@ -1469,6 +1542,8 @@ struct intel_digital_port {
/* Used for DP and ICL+ TypeC/DP and TypeC/HDMI ports. */
enum aux_ch aux_ch;
enum intel_display_power_domain ddi_io_power_domain;
+ intel_wakeref_t ddi_io_wakeref;
+ intel_wakeref_t aux_wakeref;
struct mutex tc_lock; /* protects the TypeC port mode */
intel_wakeref_t tc_lock_wakeref;
int tc_link_refcount;
@@ -1478,10 +1553,14 @@ struct intel_digital_port {
enum phy_fia tc_phy_fia;
u8 tc_phy_fia_idx;
- /* protects num_hdcp_streams reference count */
+ /* protects num_hdcp_streams reference count, hdcp_port_data and hdcp_auth_status */
struct mutex hdcp_mutex;
/* the number of pipes using HDCP signalling out of this port */
unsigned int num_hdcp_streams;
+ /* port HDCP auth status */
+ bool hdcp_auth_status;
+ /* HDCP port data need to pass to security f/w */
+ struct hdcp_port_data hdcp_port_data;
void (*write_infoframe)(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
@@ -1758,6 +1837,12 @@ intel_crtc_has_dp_encoder(const struct intel_crtc_state *crtc_state)
(1 << INTEL_OUTPUT_EDP));
}
+static inline bool
+intel_crtc_needs_modeset(const struct intel_crtc_state *crtc_state)
+{
+ return drm_atomic_crtc_needs_modeset(&crtc_state->uapi);
+}
+
static inline void
intel_wait_for_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
{
@@ -1780,4 +1865,32 @@ static inline u32 intel_plane_ggtt_offset(const struct intel_plane_state *state)
return i915_ggtt_offset(state->vma);
}
+static inline struct intel_frontbuffer *
+to_intel_frontbuffer(struct drm_framebuffer *fb)
+{
+ return fb ? to_intel_framebuffer(fb)->frontbuffer : NULL;
+}
+
+static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
+{
+ if (dev_priv->params.panel_use_ssc >= 0)
+ return dev_priv->params.panel_use_ssc != 0;
+ return dev_priv->vbt.lvds_use_ssc
+ && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
+}
+
+static inline u32 i9xx_dpll_compute_fp(struct dpll *dpll)
+{
+ return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
+}
+
+static inline u32 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
+ const struct intel_crtc_state *pipe_config)
+{
+ if (HAS_DDI(dev_priv))
+ return pipe_config->port_clock; /* SPLL */
+ else
+ return dev_priv->fdi_pll_freq;
+}
+
#endif /* __INTEL_DISPLAY_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 09123e8625c4..8c12d5375607 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -41,13 +41,13 @@
#include "i915_debugfs.h"
#include "i915_drv.h"
-#include "i915_trace.h"
#include "intel_atomic.h"
#include "intel_audio.h"
#include "intel_connector.h"
#include "intel_ddi.h"
#include "intel_display_types.h"
#include "intel_dp.h"
+#include "intel_dp_aux.h"
#include "intel_dp_link_training.h"
#include "intel_dp_mst.h"
#include "intel_dpio_phy.h"
@@ -58,10 +58,12 @@
#include "intel_lspcon.h"
#include "intel_lvds.h"
#include "intel_panel.h"
+#include "intel_pps.h"
#include "intel_psr.h"
#include "intel_sideband.h"
#include "intel_tc.h"
#include "intel_vdsc.h"
+#include "intel_vrr.h"
#define DP_DPRX_ESI_LEN 14
@@ -121,6 +123,11 @@ static const struct dp_link_dpll chv_dpll[] = {
{ .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
};
+const struct dpll *vlv_get_dpll(struct drm_i915_private *i915)
+{
+ return IS_CHERRYVIEW(i915) ? &chv_dpll[0].dpll : &vlv_dpll[0].dpll;
+}
+
/* Constants for DP DSC configurations */
static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15};
@@ -145,12 +152,6 @@ bool intel_dp_is_edp(struct intel_dp *intel_dp)
static void intel_dp_link_down(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state);
-static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
-static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
-static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state);
-static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
- enum pipe pipe);
static void intel_dp_unset_edid(struct intel_dp *intel_dp);
/* update sink rates from dpcd */
@@ -162,8 +163,7 @@ static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
int i, max_rate;
int max_lttpr_rate;
- if (drm_dp_has_quirk(&intel_dp->desc, 0,
- DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS)) {
+ if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS)) {
/* Needed, e.g., for Apple MBP 2017, 15 inch eDP Retina panel */
static const int quirk_rates[] = { 162000, 270000, 324000 };
@@ -480,6 +480,13 @@ int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
return -1;
}
+ if (intel_dp_is_edp(intel_dp) && !intel_dp->use_max_params) {
+ drm_dbg_kms(&i915->drm,
+ "Retrying Link training for eDP with max parameters\n");
+ intel_dp->use_max_params = true;
+ return 0;
+ }
+
index = intel_dp_rate_index(intel_dp->common_rates,
intel_dp->num_common_rates,
link_rate);
@@ -651,6 +658,10 @@ intel_dp_output_format(struct drm_connector *connector,
!drm_mode_is_420_only(info, mode))
return INTEL_OUTPUT_FORMAT_RGB;
+ if (intel_dp->dfp.rgb_to_ycbcr &&
+ intel_dp->dfp.ycbcr_444_to_420)
+ return INTEL_OUTPUT_FORMAT_RGB;
+
if (intel_dp->dfp.ycbcr_444_to_420)
return INTEL_OUTPUT_FORMAT_YCBCR444;
else
@@ -716,6 +727,25 @@ intel_dp_mode_valid_downstream(struct intel_connector *connector,
const struct drm_display_info *info = &connector->base.display_info;
int tmds_clock;
+ /* If PCON supports FRL MODE, check FRL bandwidth constraints */
+ if (intel_dp->dfp.pcon_max_frl_bw) {
+ int target_bw;
+ int max_frl_bw;
+ int bpp = intel_dp_mode_min_output_bpp(&connector->base, mode);
+
+ target_bw = bpp * target_clock;
+
+ max_frl_bw = intel_dp->dfp.pcon_max_frl_bw;
+
+ /* converting bw from Gbps to Kbps*/
+ max_frl_bw = max_frl_bw * 1000000;
+
+ if (target_bw > max_frl_bw)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+ }
+
if (intel_dp->dfp.max_dotclock &&
target_clock > intel_dp->dfp.max_dotclock)
return MODE_CLOCK_HIGH;
@@ -833,1129 +863,6 @@ intel_dp_mode_valid(struct drm_connector *connector,
return intel_mode_valid_max_plane_size(dev_priv, mode, bigjoiner);
}
-u32 intel_dp_pack_aux(const u8 *src, int src_bytes)
-{
- int i;
- u32 v = 0;
-
- if (src_bytes > 4)
- src_bytes = 4;
- for (i = 0; i < src_bytes; i++)
- v |= ((u32)src[i]) << ((3 - i) * 8);
- return v;
-}
-
-static void intel_dp_unpack_aux(u32 src, u8 *dst, int dst_bytes)
-{
- int i;
- if (dst_bytes > 4)
- dst_bytes = 4;
- for (i = 0; i < dst_bytes; i++)
- dst[i] = src >> ((3-i) * 8);
-}
-
-static void
-intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp);
-static void
-intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
- bool force_disable_vdd);
-static void
-intel_dp_pps_init(struct intel_dp *intel_dp);
-
-static intel_wakeref_t
-pps_lock(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- intel_wakeref_t wakeref;
-
- /*
- * See intel_power_sequencer_reset() why we need
- * a power domain reference here.
- */
- wakeref = intel_display_power_get(dev_priv,
- intel_aux_power_domain(dp_to_dig_port(intel_dp)));
-
- mutex_lock(&dev_priv->pps_mutex);
-
- return wakeref;
-}
-
-static intel_wakeref_t
-pps_unlock(struct intel_dp *intel_dp, intel_wakeref_t wakeref)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-
- mutex_unlock(&dev_priv->pps_mutex);
- intel_display_power_put(dev_priv,
- intel_aux_power_domain(dp_to_dig_port(intel_dp)),
- wakeref);
- return 0;
-}
-
-#define with_pps_lock(dp, wf) \
- for ((wf) = pps_lock(dp); (wf); (wf) = pps_unlock((dp), (wf)))
-
-static void
-vlv_power_sequencer_kick(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- enum pipe pipe = intel_dp->pps_pipe;
- bool pll_enabled, release_cl_override = false;
- enum dpio_phy phy = DPIO_PHY(pipe);
- enum dpio_channel ch = vlv_pipe_to_channel(pipe);
- u32 DP;
-
- if (drm_WARN(&dev_priv->drm,
- intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN,
- "skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n",
- pipe_name(pipe), dig_port->base.base.base.id,
- dig_port->base.base.name))
- return;
-
- drm_dbg_kms(&dev_priv->drm,
- "kicking pipe %c power sequencer for [ENCODER:%d:%s]\n",
- pipe_name(pipe), dig_port->base.base.base.id,
- dig_port->base.base.name);
-
- /* Preserve the BIOS-computed detected bit. This is
- * supposed to be read-only.
- */
- DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED;
- DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
- DP |= DP_PORT_WIDTH(1);
- DP |= DP_LINK_TRAIN_PAT_1;
-
- if (IS_CHERRYVIEW(dev_priv))
- DP |= DP_PIPE_SEL_CHV(pipe);
- else
- DP |= DP_PIPE_SEL(pipe);
-
- pll_enabled = intel_de_read(dev_priv, DPLL(pipe)) & DPLL_VCO_ENABLE;
-
- /*
- * The DPLL for the pipe must be enabled for this to work.
- * So enable temporarily it if it's not already enabled.
- */
- if (!pll_enabled) {
- release_cl_override = IS_CHERRYVIEW(dev_priv) &&
- !chv_phy_powergate_ch(dev_priv, phy, ch, true);
-
- if (vlv_force_pll_on(dev_priv, pipe, IS_CHERRYVIEW(dev_priv) ?
- &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
- drm_err(&dev_priv->drm,
- "Failed to force on pll for pipe %c!\n",
- pipe_name(pipe));
- return;
- }
- }
-
- /*
- * Similar magic as in intel_dp_enable_port().
- * We _must_ do this port enable + disable trick
- * to make this power sequencer lock onto the port.
- * Otherwise even VDD force bit won't work.
- */
- intel_de_write(dev_priv, intel_dp->output_reg, DP);
- intel_de_posting_read(dev_priv, intel_dp->output_reg);
-
- intel_de_write(dev_priv, intel_dp->output_reg, DP | DP_PORT_EN);
- intel_de_posting_read(dev_priv, intel_dp->output_reg);
-
- intel_de_write(dev_priv, intel_dp->output_reg, DP & ~DP_PORT_EN);
- intel_de_posting_read(dev_priv, intel_dp->output_reg);
-
- if (!pll_enabled) {
- vlv_force_pll_off(dev_priv, pipe);
-
- if (release_cl_override)
- chv_phy_powergate_ch(dev_priv, phy, ch, false);
- }
-}
-
-static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
-{
- struct intel_encoder *encoder;
- unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
-
- /*
- * We don't have power sequencer currently.
- * Pick one that's not used by other ports.
- */
- for_each_intel_dp(&dev_priv->drm, encoder) {
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-
- if (encoder->type == INTEL_OUTPUT_EDP) {
- drm_WARN_ON(&dev_priv->drm,
- intel_dp->active_pipe != INVALID_PIPE &&
- intel_dp->active_pipe !=
- intel_dp->pps_pipe);
-
- if (intel_dp->pps_pipe != INVALID_PIPE)
- pipes &= ~(1 << intel_dp->pps_pipe);
- } else {
- drm_WARN_ON(&dev_priv->drm,
- intel_dp->pps_pipe != INVALID_PIPE);
-
- if (intel_dp->active_pipe != INVALID_PIPE)
- pipes &= ~(1 << intel_dp->active_pipe);
- }
- }
-
- if (pipes == 0)
- return INVALID_PIPE;
-
- return ffs(pipes) - 1;
-}
-
-static enum pipe
-vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- enum pipe pipe;
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- /* We should never land here with regular DP ports */
- drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
-
- drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE &&
- intel_dp->active_pipe != intel_dp->pps_pipe);
-
- if (intel_dp->pps_pipe != INVALID_PIPE)
- return intel_dp->pps_pipe;
-
- pipe = vlv_find_free_pps(dev_priv);
-
- /*
- * Didn't find one. This should not happen since there
- * are two power sequencers and up to two eDP ports.
- */
- if (drm_WARN_ON(&dev_priv->drm, pipe == INVALID_PIPE))
- pipe = PIPE_A;
-
- vlv_steal_power_sequencer(dev_priv, pipe);
- intel_dp->pps_pipe = pipe;
-
- drm_dbg_kms(&dev_priv->drm,
- "picked pipe %c power sequencer for [ENCODER:%d:%s]\n",
- pipe_name(intel_dp->pps_pipe),
- dig_port->base.base.base.id,
- dig_port->base.base.name);
-
- /* init power sequencer on this pipe and port */
- intel_dp_init_panel_power_sequencer(intel_dp);
- intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
-
- /*
- * Even vdd force doesn't work until we've made
- * the power sequencer lock in on the port.
- */
- vlv_power_sequencer_kick(intel_dp);
-
- return intel_dp->pps_pipe;
-}
-
-static int
-bxt_power_sequencer_idx(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- int backlight_controller = dev_priv->vbt.backlight.controller;
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- /* We should never land here with regular DP ports */
- drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
-
- if (!intel_dp->pps_reset)
- return backlight_controller;
-
- intel_dp->pps_reset = false;
-
- /*
- * Only the HW needs to be reprogrammed, the SW state is fixed and
- * has been setup during connector init.
- */
- intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
-
- return backlight_controller;
-}
-
-typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
- enum pipe pipe);
-
-static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
- enum pipe pipe)
-{
- return intel_de_read(dev_priv, PP_STATUS(pipe)) & PP_ON;
-}
-
-static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
- enum pipe pipe)
-{
- return intel_de_read(dev_priv, PP_CONTROL(pipe)) & EDP_FORCE_VDD;
-}
-
-static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
- enum pipe pipe)
-{
- return true;
-}
-
-static enum pipe
-vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
- enum port port,
- vlv_pipe_check pipe_check)
-{
- enum pipe pipe;
-
- for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
- u32 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(pipe)) &
- PANEL_PORT_SELECT_MASK;
-
- if (port_sel != PANEL_PORT_SELECT_VLV(port))
- continue;
-
- if (!pipe_check(dev_priv, pipe))
- continue;
-
- return pipe;
- }
-
- return INVALID_PIPE;
-}
-
-static void
-vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- enum port port = dig_port->base.port;
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- /* try to find a pipe with this port selected */
- /* first pick one where the panel is on */
- intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
- vlv_pipe_has_pp_on);
- /* didn't find one? pick one where vdd is on */
- if (intel_dp->pps_pipe == INVALID_PIPE)
- intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
- vlv_pipe_has_vdd_on);
- /* didn't find one? pick one with just the correct port */
- if (intel_dp->pps_pipe == INVALID_PIPE)
- intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
- vlv_pipe_any);
-
- /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
- if (intel_dp->pps_pipe == INVALID_PIPE) {
- drm_dbg_kms(&dev_priv->drm,
- "no initial power sequencer for [ENCODER:%d:%s]\n",
- dig_port->base.base.base.id,
- dig_port->base.base.name);
- return;
- }
-
- drm_dbg_kms(&dev_priv->drm,
- "initial power sequencer for [ENCODER:%d:%s]: pipe %c\n",
- dig_port->base.base.base.id,
- dig_port->base.base.name,
- pipe_name(intel_dp->pps_pipe));
-
- intel_dp_init_panel_power_sequencer(intel_dp);
- intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
-}
-
-void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
-{
- struct intel_encoder *encoder;
-
- if (drm_WARN_ON(&dev_priv->drm,
- !(IS_VALLEYVIEW(dev_priv) ||
- IS_CHERRYVIEW(dev_priv) ||
- IS_GEN9_LP(dev_priv))))
- return;
-
- /*
- * We can't grab pps_mutex here due to deadlock with power_domain
- * mutex when power_domain functions are called while holding pps_mutex.
- * That also means that in order to use pps_pipe the code needs to
- * hold both a power domain reference and pps_mutex, and the power domain
- * reference get/put must be done while _not_ holding pps_mutex.
- * pps_{lock,unlock}() do these steps in the correct order, so one
- * should use them always.
- */
-
- for_each_intel_dp(&dev_priv->drm, encoder) {
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-
- drm_WARN_ON(&dev_priv->drm,
- intel_dp->active_pipe != INVALID_PIPE);
-
- if (encoder->type != INTEL_OUTPUT_EDP)
- continue;
-
- if (IS_GEN9_LP(dev_priv))
- intel_dp->pps_reset = true;
- else
- intel_dp->pps_pipe = INVALID_PIPE;
- }
-}
-
-struct pps_registers {
- i915_reg_t pp_ctrl;
- i915_reg_t pp_stat;
- i915_reg_t pp_on;
- i915_reg_t pp_off;
- i915_reg_t pp_div;
-};
-
-static void intel_pps_get_registers(struct intel_dp *intel_dp,
- struct pps_registers *regs)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- int pps_idx = 0;
-
- memset(regs, 0, sizeof(*regs));
-
- if (IS_GEN9_LP(dev_priv))
- pps_idx = bxt_power_sequencer_idx(intel_dp);
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- pps_idx = vlv_power_sequencer_pipe(intel_dp);
-
- regs->pp_ctrl = PP_CONTROL(pps_idx);
- regs->pp_stat = PP_STATUS(pps_idx);
- regs->pp_on = PP_ON_DELAYS(pps_idx);
- regs->pp_off = PP_OFF_DELAYS(pps_idx);
-
- /* Cycle delay moved from PP_DIVISOR to PP_CONTROL */
- if (IS_GEN9_LP(dev_priv) || INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
- regs->pp_div = INVALID_MMIO_REG;
- else
- regs->pp_div = PP_DIVISOR(pps_idx);
-}
-
-static i915_reg_t
-_pp_ctrl_reg(struct intel_dp *intel_dp)
-{
- struct pps_registers regs;
-
- intel_pps_get_registers(intel_dp, &regs);
-
- return regs.pp_ctrl;
-}
-
-static i915_reg_t
-_pp_stat_reg(struct intel_dp *intel_dp)
-{
- struct pps_registers regs;
-
- intel_pps_get_registers(intel_dp, &regs);
-
- return regs.pp_stat;
-}
-
-static bool edp_have_panel_power(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
- intel_dp->pps_pipe == INVALID_PIPE)
- return false;
-
- return (intel_de_read(dev_priv, _pp_stat_reg(intel_dp)) & PP_ON) != 0;
-}
-
-static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
- intel_dp->pps_pipe == INVALID_PIPE)
- return false;
-
- return intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
-}
-
-static void
-intel_dp_check_edp(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-
- if (!intel_dp_is_edp(intel_dp))
- return;
-
- if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
- drm_WARN(&dev_priv->drm, 1,
- "eDP powered off while attempting aux channel communication.\n");
- drm_dbg_kms(&dev_priv->drm, "Status 0x%08x Control 0x%08x\n",
- intel_de_read(dev_priv, _pp_stat_reg(intel_dp)),
- intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)));
- }
-}
-
-static u32
-intel_dp_aux_wait_done(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
- const unsigned int timeout_ms = 10;
- u32 status;
- bool done;
-
-#define C (((status = intel_uncore_read_notrace(&i915->uncore, ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
- done = wait_event_timeout(i915->gmbus_wait_queue, C,
- msecs_to_jiffies_timeout(timeout_ms));
-
- /* just trace the final value */
- trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
-
- if (!done)
- drm_err(&i915->drm,
- "%s: did not complete or timeout within %ums (status 0x%08x)\n",
- intel_dp->aux.name, timeout_ms, status);
-#undef C
-
- return status;
-}
-
-static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-
- if (index)
- return 0;
-
- /*
- * The clock divider is based off the hrawclk, and would like to run at
- * 2MHz. So, take the hrawclk value and divide by 2000 and use that
- */
- return DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq, 2000);
-}
-
-static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- u32 freq;
-
- if (index)
- return 0;
-
- /*
- * The clock divider is based off the cdclk or PCH rawclk, and would
- * like to run at 2MHz. So, take the cdclk or PCH rawclk value and
- * divide by 2000 and use that
- */
- if (dig_port->aux_ch == AUX_CH_A)
- freq = dev_priv->cdclk.hw.cdclk;
- else
- freq = RUNTIME_INFO(dev_priv)->rawclk_freq;
- return DIV_ROUND_CLOSEST(freq, 2000);
-}
-
-static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-
- if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) {
- /* Workaround for non-ULT HSW */
- switch (index) {
- case 0: return 63;
- case 1: return 72;
- default: return 0;
- }
- }
-
- return ilk_get_aux_clock_divider(intel_dp, index);
-}
-
-static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
-{
- /*
- * SKL doesn't need us to program the AUX clock divider (Hardware will
- * derive the clock from CDCLK automatically). We still implement the
- * get_aux_clock_divider vfunc to plug-in into the existing code.
- */
- return index ? 0 : 1;
-}
-
-static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
- int send_bytes,
- u32 aux_clock_divider)
-{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv =
- to_i915(dig_port->base.base.dev);
- u32 precharge, timeout;
-
- if (IS_GEN(dev_priv, 6))
- precharge = 3;
- else
- precharge = 5;
-
- if (IS_BROADWELL(dev_priv))
- timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
- else
- timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
-
- return DP_AUX_CH_CTL_SEND_BUSY |
- DP_AUX_CH_CTL_DONE |
- DP_AUX_CH_CTL_INTERRUPT |
- DP_AUX_CH_CTL_TIME_OUT_ERROR |
- timeout |
- DP_AUX_CH_CTL_RECEIVE_ERROR |
- (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
- (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
- (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
-}
-
-static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
- int send_bytes,
- u32 unused)
-{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *i915 =
- to_i915(dig_port->base.base.dev);
- enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
- u32 ret;
-
- ret = DP_AUX_CH_CTL_SEND_BUSY |
- DP_AUX_CH_CTL_DONE |
- DP_AUX_CH_CTL_INTERRUPT |
- DP_AUX_CH_CTL_TIME_OUT_ERROR |
- DP_AUX_CH_CTL_TIME_OUT_MAX |
- DP_AUX_CH_CTL_RECEIVE_ERROR |
- (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
- DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
- DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
-
- if (intel_phy_is_tc(i915, phy) &&
- dig_port->tc_mode == TC_PORT_TBT_ALT)
- ret |= DP_AUX_CH_CTL_TBT_IO;
-
- return ret;
-}
-
-static int
-intel_dp_aux_xfer(struct intel_dp *intel_dp,
- const u8 *send, int send_bytes,
- u8 *recv, int recv_size,
- u32 aux_send_ctl_flags)
-{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *i915 =
- to_i915(dig_port->base.base.dev);
- struct intel_uncore *uncore = &i915->uncore;
- enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
- bool is_tc_port = intel_phy_is_tc(i915, phy);
- i915_reg_t ch_ctl, ch_data[5];
- u32 aux_clock_divider;
- enum intel_display_power_domain aux_domain;
- intel_wakeref_t aux_wakeref;
- intel_wakeref_t pps_wakeref;
- int i, ret, recv_bytes;
- int try, clock = 0;
- u32 status;
- bool vdd;
-
- ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
- for (i = 0; i < ARRAY_SIZE(ch_data); i++)
- ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);
-
- if (is_tc_port)
- intel_tc_port_lock(dig_port);
-
- aux_domain = intel_aux_power_domain(dig_port);
-
- aux_wakeref = intel_display_power_get(i915, aux_domain);
- pps_wakeref = pps_lock(intel_dp);
-
- /*
- * We will be called with VDD already enabled for dpcd/edid/oui reads.
- * In such cases we want to leave VDD enabled and it's up to upper layers
- * to turn it off. But for eg. i2c-dev access we need to turn it on/off
- * ourselves.
- */
- vdd = edp_panel_vdd_on(intel_dp);
-
- /* dp aux is extremely sensitive to irq latency, hence request the
- * lowest possible wakeup latency and so prevent the cpu from going into
- * deep sleep states.
- */
- cpu_latency_qos_update_request(&intel_dp->pm_qos, 0);
-
- intel_dp_check_edp(intel_dp);
-
- /* Try to wait for any previous AUX channel activity */
- for (try = 0; try < 3; try++) {
- status = intel_uncore_read_notrace(uncore, ch_ctl);
- if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
- break;
- msleep(1);
- }
- /* just trace the final value */
- trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
-
- if (try == 3) {
- const u32 status = intel_uncore_read(uncore, ch_ctl);
-
- if (status != intel_dp->aux_busy_last_status) {
- drm_WARN(&i915->drm, 1,
- "%s: not started (status 0x%08x)\n",
- intel_dp->aux.name, status);
- intel_dp->aux_busy_last_status = status;
- }
-
- ret = -EBUSY;
- goto out;
- }
-
- /* Only 5 data registers! */
- if (drm_WARN_ON(&i915->drm, send_bytes > 20 || recv_size > 20)) {
- ret = -E2BIG;
- goto out;
- }
-
- while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
- u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
- send_bytes,
- aux_clock_divider);
-
- send_ctl |= aux_send_ctl_flags;
-
- /* Must try at least 3 times according to DP spec */
- for (try = 0; try < 5; try++) {
- /* Load the send data into the aux channel data registers */
- for (i = 0; i < send_bytes; i += 4)
- intel_uncore_write(uncore,
- ch_data[i >> 2],
- intel_dp_pack_aux(send + i,
- send_bytes - i));
-
- /* Send the command and wait for it to complete */
- intel_uncore_write(uncore, ch_ctl, send_ctl);
-
- status = intel_dp_aux_wait_done(intel_dp);
-
- /* Clear done status and any errors */
- intel_uncore_write(uncore,
- ch_ctl,
- status |
- DP_AUX_CH_CTL_DONE |
- DP_AUX_CH_CTL_TIME_OUT_ERROR |
- DP_AUX_CH_CTL_RECEIVE_ERROR);
-
- /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
- * 400us delay required for errors and timeouts
- * Timeout errors from the HW already meet this
- * requirement so skip to next iteration
- */
- if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
- continue;
-
- if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
- usleep_range(400, 500);
- continue;
- }
- if (status & DP_AUX_CH_CTL_DONE)
- goto done;
- }
- }
-
- if ((status & DP_AUX_CH_CTL_DONE) == 0) {
- drm_err(&i915->drm, "%s: not done (status 0x%08x)\n",
- intel_dp->aux.name, status);
- ret = -EBUSY;
- goto out;
- }
-
-done:
- /* Check for timeout or receive error.
- * Timeouts occur when the sink is not connected
- */
- if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
- drm_err(&i915->drm, "%s: receive error (status 0x%08x)\n",
- intel_dp->aux.name, status);
- ret = -EIO;
- goto out;
- }
-
- /* Timeouts occur when the device isn't connected, so they're
- * "normal" -- don't fill the kernel log with these */
- if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
- drm_dbg_kms(&i915->drm, "%s: timeout (status 0x%08x)\n",
- intel_dp->aux.name, status);
- ret = -ETIMEDOUT;
- goto out;
- }
-
- /* Unload any bytes sent back from the other side */
- recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
- DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
-
- /*
- * By BSpec: "Message sizes of 0 or >20 are not allowed."
- * We have no idea of what happened so we return -EBUSY so
- * drm layer takes care for the necessary retries.
- */
- if (recv_bytes == 0 || recv_bytes > 20) {
- drm_dbg_kms(&i915->drm,
- "%s: Forbidden recv_bytes = %d on aux transaction\n",
- intel_dp->aux.name, recv_bytes);
- ret = -EBUSY;
- goto out;
- }
-
- if (recv_bytes > recv_size)
- recv_bytes = recv_size;
-
- for (i = 0; i < recv_bytes; i += 4)
- intel_dp_unpack_aux(intel_uncore_read(uncore, ch_data[i >> 2]),
- recv + i, recv_bytes - i);
-
- ret = recv_bytes;
-out:
- cpu_latency_qos_update_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE);
-
- if (vdd)
- edp_panel_vdd_off(intel_dp, false);
-
- pps_unlock(intel_dp, pps_wakeref);
- intel_display_power_put_async(i915, aux_domain, aux_wakeref);
-
- if (is_tc_port)
- intel_tc_port_unlock(dig_port);
-
- return ret;
-}
-
-#define BARE_ADDRESS_SIZE 3
-#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
-
-static void
-intel_dp_aux_header(u8 txbuf[HEADER_SIZE],
- const struct drm_dp_aux_msg *msg)
-{
- txbuf[0] = (msg->request << 4) | ((msg->address >> 16) & 0xf);
- txbuf[1] = (msg->address >> 8) & 0xff;
- txbuf[2] = msg->address & 0xff;
- txbuf[3] = msg->size - 1;
-}
-
-static u32 intel_dp_aux_xfer_flags(const struct drm_dp_aux_msg *msg)
-{
- /*
- * If we're trying to send the HDCP Aksv, we need to set a the Aksv
- * select bit to inform the hardware to send the Aksv after our header
- * since we can't access that data from software.
- */
- if ((msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_NATIVE_WRITE &&
- msg->address == DP_AUX_HDCP_AKSV)
- return DP_AUX_CH_CTL_AUX_AKSV_SELECT;
-
- return 0;
-}
-
-static ssize_t
-intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
-{
- struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- u8 txbuf[20], rxbuf[20];
- size_t txsize, rxsize;
- u32 flags = intel_dp_aux_xfer_flags(msg);
- int ret;
-
- intel_dp_aux_header(txbuf, msg);
-
- switch (msg->request & ~DP_AUX_I2C_MOT) {
- case DP_AUX_NATIVE_WRITE:
- case DP_AUX_I2C_WRITE:
- case DP_AUX_I2C_WRITE_STATUS_UPDATE:
- txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
- rxsize = 2; /* 0 or 1 data bytes */
-
- if (drm_WARN_ON(&i915->drm, txsize > 20))
- return -E2BIG;
-
- drm_WARN_ON(&i915->drm, !msg->buffer != !msg->size);
-
- if (msg->buffer)
- memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
-
- ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
- rxbuf, rxsize, flags);
- if (ret > 0) {
- msg->reply = rxbuf[0] >> 4;
-
- if (ret > 1) {
- /* Number of bytes written in a short write. */
- ret = clamp_t(int, rxbuf[1], 0, msg->size);
- } else {
- /* Return payload size. */
- ret = msg->size;
- }
- }
- break;
-
- case DP_AUX_NATIVE_READ:
- case DP_AUX_I2C_READ:
- txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
- rxsize = msg->size + 1;
-
- if (drm_WARN_ON(&i915->drm, rxsize > 20))
- return -E2BIG;
-
- ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
- rxbuf, rxsize, flags);
- if (ret > 0) {
- msg->reply = rxbuf[0] >> 4;
- /*
- * Assume happy day, and copy the data. The caller is
- * expected to check msg->reply before touching it.
- *
- * Return payload size.
- */
- ret--;
- memcpy(msg->buffer, rxbuf + 1, ret);
- }
- break;
-
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
-
-static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- enum aux_ch aux_ch = dig_port->aux_ch;
-
- switch (aux_ch) {
- case AUX_CH_B:
- case AUX_CH_C:
- case AUX_CH_D:
- return DP_AUX_CH_CTL(aux_ch);
- default:
- MISSING_CASE(aux_ch);
- return DP_AUX_CH_CTL(AUX_CH_B);
- }
-}
-
-static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- enum aux_ch aux_ch = dig_port->aux_ch;
-
- switch (aux_ch) {
- case AUX_CH_B:
- case AUX_CH_C:
- case AUX_CH_D:
- return DP_AUX_CH_DATA(aux_ch, index);
- default:
- MISSING_CASE(aux_ch);
- return DP_AUX_CH_DATA(AUX_CH_B, index);
- }
-}
-
-static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- enum aux_ch aux_ch = dig_port->aux_ch;
-
- switch (aux_ch) {
- case AUX_CH_A:
- return DP_AUX_CH_CTL(aux_ch);
- case AUX_CH_B:
- case AUX_CH_C:
- case AUX_CH_D:
- return PCH_DP_AUX_CH_CTL(aux_ch);
- default:
- MISSING_CASE(aux_ch);
- return DP_AUX_CH_CTL(AUX_CH_A);
- }
-}
-
-static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- enum aux_ch aux_ch = dig_port->aux_ch;
-
- switch (aux_ch) {
- case AUX_CH_A:
- return DP_AUX_CH_DATA(aux_ch, index);
- case AUX_CH_B:
- case AUX_CH_C:
- case AUX_CH_D:
- return PCH_DP_AUX_CH_DATA(aux_ch, index);
- default:
- MISSING_CASE(aux_ch);
- return DP_AUX_CH_DATA(AUX_CH_A, index);
- }
-}
-
-static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- enum aux_ch aux_ch = dig_port->aux_ch;
-
- switch (aux_ch) {
- case AUX_CH_A:
- case AUX_CH_B:
- case AUX_CH_C:
- case AUX_CH_D:
- case AUX_CH_E:
- case AUX_CH_F:
- return DP_AUX_CH_CTL(aux_ch);
- default:
- MISSING_CASE(aux_ch);
- return DP_AUX_CH_CTL(AUX_CH_A);
- }
-}
-
-static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- enum aux_ch aux_ch = dig_port->aux_ch;
-
- switch (aux_ch) {
- case AUX_CH_A:
- case AUX_CH_B:
- case AUX_CH_C:
- case AUX_CH_D:
- case AUX_CH_E:
- case AUX_CH_F:
- return DP_AUX_CH_DATA(aux_ch, index);
- default:
- MISSING_CASE(aux_ch);
- return DP_AUX_CH_DATA(AUX_CH_A, index);
- }
-}
-
-static i915_reg_t tgl_aux_ctl_reg(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- enum aux_ch aux_ch = dig_port->aux_ch;
-
- switch (aux_ch) {
- case AUX_CH_A:
- case AUX_CH_B:
- case AUX_CH_C:
- case AUX_CH_USBC1:
- case AUX_CH_USBC2:
- case AUX_CH_USBC3:
- case AUX_CH_USBC4:
- case AUX_CH_USBC5:
- case AUX_CH_USBC6:
- return DP_AUX_CH_CTL(aux_ch);
- default:
- MISSING_CASE(aux_ch);
- return DP_AUX_CH_CTL(AUX_CH_A);
- }
-}
-
-static i915_reg_t tgl_aux_data_reg(struct intel_dp *intel_dp, int index)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- enum aux_ch aux_ch = dig_port->aux_ch;
-
- switch (aux_ch) {
- case AUX_CH_A:
- case AUX_CH_B:
- case AUX_CH_C:
- case AUX_CH_USBC1:
- case AUX_CH_USBC2:
- case AUX_CH_USBC3:
- case AUX_CH_USBC4:
- case AUX_CH_USBC5:
- case AUX_CH_USBC6:
- return DP_AUX_CH_DATA(aux_ch, index);
- default:
- MISSING_CASE(aux_ch);
- return DP_AUX_CH_DATA(AUX_CH_A, index);
- }
-}
-
-static void
-intel_dp_aux_fini(struct intel_dp *intel_dp)
-{
- if (cpu_latency_qos_request_active(&intel_dp->pm_qos))
- cpu_latency_qos_remove_request(&intel_dp->pm_qos);
-
- kfree(intel_dp->aux.name);
-}
-
-static void
-intel_dp_aux_init(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct intel_encoder *encoder = &dig_port->base;
- enum aux_ch aux_ch = dig_port->aux_ch;
-
- if (INTEL_GEN(dev_priv) >= 12) {
- intel_dp->aux_ch_ctl_reg = tgl_aux_ctl_reg;
- intel_dp->aux_ch_data_reg = tgl_aux_data_reg;
- } else if (INTEL_GEN(dev_priv) >= 9) {
- intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg;
- intel_dp->aux_ch_data_reg = skl_aux_data_reg;
- } else if (HAS_PCH_SPLIT(dev_priv)) {
- intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg;
- intel_dp->aux_ch_data_reg = ilk_aux_data_reg;
- } else {
- intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg;
- intel_dp->aux_ch_data_reg = g4x_aux_data_reg;
- }
-
- if (INTEL_GEN(dev_priv) >= 9)
- intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
- else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
- intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
- else if (HAS_PCH_SPLIT(dev_priv))
- intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
- else
- intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
-
- if (INTEL_GEN(dev_priv) >= 9)
- intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
- else
- intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
-
- drm_dp_aux_init(&intel_dp->aux);
-
- /* Failure to allocate our preferred name is not critical */
- if (INTEL_GEN(dev_priv) >= 12 && aux_ch >= AUX_CH_USBC1)
- intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX USBC%c/%s",
- aux_ch - AUX_CH_USBC1 + '1',
- encoder->base.name);
- else
- intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX %c/%s",
- aux_ch_name(aux_ch),
- encoder->base.name);
-
- intel_dp->aux.transfer = intel_dp_aux_transfer;
- cpu_latency_qos_add_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE);
-}
-
bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
{
int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
@@ -2267,6 +1174,44 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
return -EINVAL;
}
+/* Optimize link config in order: max bpp, min lanes, min clock */
+static int
+intel_dp_compute_link_config_fast(struct intel_dp *intel_dp,
+ struct intel_crtc_state *pipe_config,
+ const struct link_config_limits *limits)
+{
+ const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
+ int bpp, clock, lane_count;
+ int mode_rate, link_clock, link_avail;
+
+ for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
+ int output_bpp = intel_dp_output_bpp(pipe_config->output_format, bpp);
+
+ mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
+ output_bpp);
+
+ for (lane_count = limits->min_lane_count;
+ lane_count <= limits->max_lane_count;
+ lane_count <<= 1) {
+ for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
+ link_clock = intel_dp->common_rates[clock];
+ link_avail = intel_dp_max_data_rate(link_clock,
+ lane_count);
+
+ if (mode_rate <= link_avail) {
+ pipe_config->lane_count = lane_count;
+ pipe_config->pipe_bpp = bpp;
+ pipe_config->port_clock = link_clock;
+
+ return 0;
+ }
+ }
+ }
+ }
+
+ return -EINVAL;
+}
+
static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
{
int i, num_bpc;
@@ -2293,6 +1238,14 @@ static int intel_dp_dsc_compute_params(struct intel_encoder *encoder,
u8 line_buf_depth;
int ret;
+ /*
+ * RC_MODEL_SIZE is currently a constant across all configurations.
+ *
+ * FIXME: Look into using sink defined DPCD DP_DSC_RC_BUF_BLK_SIZE and
+ * DP_DSC_RC_BUF_SIZE for this.
+ */
+ vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST;
+
ret = intel_dsc_compute_params(encoder, crtc_state);
if (ret)
return ret;
@@ -2482,13 +1435,14 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
limits.min_bpp = intel_dp_min_bpp(pipe_config->output_format);
limits.max_bpp = intel_dp_max_bpp(intel_dp, pipe_config);
- if (intel_dp_is_edp(intel_dp)) {
+ if (intel_dp->use_max_params) {
/*
* Use the maximum clock and number of lanes the eDP panel
- * advertizes being capable of. The panels are generally
+ * advertizes being capable of in case the initial fast
+ * optimal params failed us. The panels are generally
* designed to support only a single clock and lane
- * configuration, and typically these values correspond to the
- * native resolution of the panel.
+ * configuration, and typically on older panels these
+ * values correspond to the native resolution of the panel.
*/
limits.min_lane_count = limits.max_lane_count;
limits.min_clock = limits.max_clock;
@@ -2507,11 +1461,22 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
intel_dp_can_bigjoiner(intel_dp))
pipe_config->bigjoiner = true;
- /*
- * Optimize for slow and wide. This is the place to add alternative
- * optimization policy.
- */
- ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
+ if (intel_dp_is_edp(intel_dp))
+ /*
+ * Optimize for fast and narrow. eDP 1.3 section 3.3 and eDP 1.4
+ * section A.1: "It is recommended that the minimum number of
+ * lanes be used, using the minimum link rate allowed for that
+ * lane configuration."
+ *
+ * Note that we fall back to the max clock and lane count for eDP
+ * panels that fail with the fast optimal settings (see
+ * intel_dp->use_max_params), in which case the fast vs. wide
+ * choice doesn't matter.
+ */
+ ret = intel_dp_compute_link_config_fast(intel_dp, pipe_config, &limits);
+ else
+ /* Optimize for slow and wide. */
+ ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
/* enable compression if the mode doesn't fit available BW */
drm_dbg_kms(&i915->drm, "Force DSC en = %d\n", intel_dp->force_dsc_en);
@@ -2760,6 +1725,9 @@ intel_dp_drrs_compute_config(struct intel_dp *intel_dp,
struct intel_connector *intel_connector = intel_dp->attached_connector;
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ if (pipe_config->vrr.enable)
+ return;
+
/*
* DRRS and PSR can't be enable together, so giving preference to PSR
* as it allows more power-savings by complete shutting down display,
@@ -2792,8 +1760,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_connector *intel_connector = intel_dp->attached_connector;
struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(conn_state);
- bool constant_n = drm_dp_has_quirk(&intel_dp->desc, 0,
- DP_DPCD_QUIRK_CONSTANT_N);
+ bool constant_n = drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_CONSTANT_N);
int ret = 0, output_bpp;
if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
@@ -2863,6 +1830,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
if (!HAS_DDI(dev_priv))
intel_dp_set_clock(encoder, pipe_config);
+ intel_vrr_compute_config(pipe_config, conn_state);
intel_psr_compute_config(intel_dp, pipe_config);
intel_dp_drrs_compute_config(intel_dp, pipe_config, output_bpp,
constant_n);
@@ -2963,427 +1931,6 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
}
}
-#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
-#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
-
-#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
-#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
-
-#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
-#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
-
-static void intel_pps_verify_state(struct intel_dp *intel_dp);
-
-static void wait_panel_status(struct intel_dp *intel_dp,
- u32 mask,
- u32 value)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- i915_reg_t pp_stat_reg, pp_ctrl_reg;
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- intel_pps_verify_state(intel_dp);
-
- pp_stat_reg = _pp_stat_reg(intel_dp);
- pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
-
- drm_dbg_kms(&dev_priv->drm,
- "mask %08x value %08x status %08x control %08x\n",
- mask, value,
- intel_de_read(dev_priv, pp_stat_reg),
- intel_de_read(dev_priv, pp_ctrl_reg));
-
- if (intel_de_wait_for_register(dev_priv, pp_stat_reg,
- mask, value, 5000))
- drm_err(&dev_priv->drm,
- "Panel status timeout: status %08x control %08x\n",
- intel_de_read(dev_priv, pp_stat_reg),
- intel_de_read(dev_priv, pp_ctrl_reg));
-
- drm_dbg_kms(&dev_priv->drm, "Wait complete\n");
-}
-
-static void wait_panel_on(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
-
- drm_dbg_kms(&i915->drm, "Wait for panel power on\n");
- wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
-}
-
-static void wait_panel_off(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
-
- drm_dbg_kms(&i915->drm, "Wait for panel power off time\n");
- wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
-}
-
-static void wait_panel_power_cycle(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- ktime_t panel_power_on_time;
- s64 panel_power_off_duration;
-
- drm_dbg_kms(&i915->drm, "Wait for panel power cycle\n");
-
- /* take the difference of currrent time and panel power off time
- * and then make panel wait for t11_t12 if needed. */
- panel_power_on_time = ktime_get_boottime();
- panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time);
-
- /* When we disable the VDD override bit last we have to do the manual
- * wait. */
- if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay)
- wait_remaining_ms_from_jiffies(jiffies,
- intel_dp->panel_power_cycle_delay - panel_power_off_duration);
-
- wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
-}
-
-static void wait_backlight_on(struct intel_dp *intel_dp)
-{
- wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
- intel_dp->backlight_on_delay);
-}
-
-static void edp_wait_backlight_off(struct intel_dp *intel_dp)
-{
- wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
- intel_dp->backlight_off_delay);
-}
-
-/* Read the current pp_control value, unlocking the register if it
- * is locked
- */
-
-static u32 ilk_get_pp_control(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- u32 control;
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- control = intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp));
- if (drm_WARN_ON(&dev_priv->drm, !HAS_DDI(dev_priv) &&
- (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
- control &= ~PANEL_UNLOCK_MASK;
- control |= PANEL_UNLOCK_REGS;
- }
- return control;
-}
-
-/*
- * Must be paired with edp_panel_vdd_off().
- * Must hold pps_mutex around the whole on/off sequence.
- * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
- */
-static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- u32 pp;
- i915_reg_t pp_stat_reg, pp_ctrl_reg;
- bool need_to_disable = !intel_dp->want_panel_vdd;
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- if (!intel_dp_is_edp(intel_dp))
- return false;
-
- cancel_delayed_work(&intel_dp->panel_vdd_work);
- intel_dp->want_panel_vdd = true;
-
- if (edp_have_panel_vdd(intel_dp))
- return need_to_disable;
-
- intel_display_power_get(dev_priv,
- intel_aux_power_domain(dig_port));
-
- drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD on\n",
- dig_port->base.base.base.id,
- dig_port->base.base.name);
-
- if (!edp_have_panel_power(intel_dp))
- wait_panel_power_cycle(intel_dp);
-
- pp = ilk_get_pp_control(intel_dp);
- pp |= EDP_FORCE_VDD;
-
- pp_stat_reg = _pp_stat_reg(intel_dp);
- pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
-
- intel_de_write(dev_priv, pp_ctrl_reg, pp);
- intel_de_posting_read(dev_priv, pp_ctrl_reg);
- drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
- intel_de_read(dev_priv, pp_stat_reg),
- intel_de_read(dev_priv, pp_ctrl_reg));
- /*
- * If the panel wasn't on, delay before accessing aux channel
- */
- if (!edp_have_panel_power(intel_dp)) {
- drm_dbg_kms(&dev_priv->drm,
- "[ENCODER:%d:%s] panel power wasn't enabled\n",
- dig_port->base.base.base.id,
- dig_port->base.base.name);
- msleep(intel_dp->panel_power_up_delay);
- }
-
- return need_to_disable;
-}
-
-/*
- * Must be paired with intel_edp_panel_vdd_off() or
- * intel_edp_panel_off().
- * Nested calls to these functions are not allowed since
- * we drop the lock. Caller must use some higher level
- * locking to prevent nested calls from other threads.
- */
-void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
-{
- intel_wakeref_t wakeref;
- bool vdd;
-
- if (!intel_dp_is_edp(intel_dp))
- return;
-
- vdd = false;
- with_pps_lock(intel_dp, wakeref)
- vdd = edp_panel_vdd_on(intel_dp);
- I915_STATE_WARN(!vdd, "[ENCODER:%d:%s] VDD already requested on\n",
- dp_to_dig_port(intel_dp)->base.base.base.id,
- dp_to_dig_port(intel_dp)->base.base.name);
-}
-
-static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port =
- dp_to_dig_port(intel_dp);
- u32 pp;
- i915_reg_t pp_stat_reg, pp_ctrl_reg;
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- drm_WARN_ON(&dev_priv->drm, intel_dp->want_panel_vdd);
-
- if (!edp_have_panel_vdd(intel_dp))
- return;
-
- drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD off\n",
- dig_port->base.base.base.id,
- dig_port->base.base.name);
-
- pp = ilk_get_pp_control(intel_dp);
- pp &= ~EDP_FORCE_VDD;
-
- pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
- pp_stat_reg = _pp_stat_reg(intel_dp);
-
- intel_de_write(dev_priv, pp_ctrl_reg, pp);
- intel_de_posting_read(dev_priv, pp_ctrl_reg);
-
- /* Make sure sequencer is idle before allowing subsequent activity */
- drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
- intel_de_read(dev_priv, pp_stat_reg),
- intel_de_read(dev_priv, pp_ctrl_reg));
-
- if ((pp & PANEL_POWER_ON) == 0)
- intel_dp->panel_power_off_time = ktime_get_boottime();
-
- intel_display_power_put_unchecked(dev_priv,
- intel_aux_power_domain(dig_port));
-}
-
-static void edp_panel_vdd_work(struct work_struct *__work)
-{
- struct intel_dp *intel_dp =
- container_of(to_delayed_work(__work),
- struct intel_dp, panel_vdd_work);
- intel_wakeref_t wakeref;
-
- with_pps_lock(intel_dp, wakeref) {
- if (!intel_dp->want_panel_vdd)
- edp_panel_vdd_off_sync(intel_dp);
- }
-}
-
-static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
-{
- unsigned long delay;
-
- /*
- * Queue the timer to fire a long time from now (relative to the power
- * down delay) to keep the panel power up across a sequence of
- * operations.
- */
- delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
- schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
-}
-
-/*
- * Must be paired with edp_panel_vdd_on().
- * Must hold pps_mutex around the whole on/off sequence.
- * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
- */
-static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- if (!intel_dp_is_edp(intel_dp))
- return;
-
- I915_STATE_WARN(!intel_dp->want_panel_vdd, "[ENCODER:%d:%s] VDD not forced on",
- dp_to_dig_port(intel_dp)->base.base.base.id,
- dp_to_dig_port(intel_dp)->base.base.name);
-
- intel_dp->want_panel_vdd = false;
-
- if (sync)
- edp_panel_vdd_off_sync(intel_dp);
- else
- edp_panel_vdd_schedule_off(intel_dp);
-}
-
-static void edp_panel_on(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- u32 pp;
- i915_reg_t pp_ctrl_reg;
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- if (!intel_dp_is_edp(intel_dp))
- return;
-
- drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power on\n",
- dp_to_dig_port(intel_dp)->base.base.base.id,
- dp_to_dig_port(intel_dp)->base.base.name);
-
- if (drm_WARN(&dev_priv->drm, edp_have_panel_power(intel_dp),
- "[ENCODER:%d:%s] panel power already on\n",
- dp_to_dig_port(intel_dp)->base.base.base.id,
- dp_to_dig_port(intel_dp)->base.base.name))
- return;
-
- wait_panel_power_cycle(intel_dp);
-
- pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
- pp = ilk_get_pp_control(intel_dp);
- if (IS_GEN(dev_priv, 5)) {
- /* ILK workaround: disable reset around power sequence */
- pp &= ~PANEL_POWER_RESET;
- intel_de_write(dev_priv, pp_ctrl_reg, pp);
- intel_de_posting_read(dev_priv, pp_ctrl_reg);
- }
-
- pp |= PANEL_POWER_ON;
- if (!IS_GEN(dev_priv, 5))
- pp |= PANEL_POWER_RESET;
-
- intel_de_write(dev_priv, pp_ctrl_reg, pp);
- intel_de_posting_read(dev_priv, pp_ctrl_reg);
-
- wait_panel_on(intel_dp);
- intel_dp->last_power_on = jiffies;
-
- if (IS_GEN(dev_priv, 5)) {
- pp |= PANEL_POWER_RESET; /* restore panel reset bit */
- intel_de_write(dev_priv, pp_ctrl_reg, pp);
- intel_de_posting_read(dev_priv, pp_ctrl_reg);
- }
-}
-
-void intel_edp_panel_on(struct intel_dp *intel_dp)
-{
- intel_wakeref_t wakeref;
-
- if (!intel_dp_is_edp(intel_dp))
- return;
-
- with_pps_lock(intel_dp, wakeref)
- edp_panel_on(intel_dp);
-}
-
-
-static void edp_panel_off(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- u32 pp;
- i915_reg_t pp_ctrl_reg;
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- if (!intel_dp_is_edp(intel_dp))
- return;
-
- drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power off\n",
- dig_port->base.base.base.id, dig_port->base.base.name);
-
- drm_WARN(&dev_priv->drm, !intel_dp->want_panel_vdd,
- "Need [ENCODER:%d:%s] VDD to turn off panel\n",
- dig_port->base.base.base.id, dig_port->base.base.name);
-
- pp = ilk_get_pp_control(intel_dp);
- /* We need to switch off panel power _and_ force vdd, for otherwise some
- * panels get very unhappy and cease to work. */
- pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
- EDP_BLC_ENABLE);
-
- pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
-
- intel_dp->want_panel_vdd = false;
-
- intel_de_write(dev_priv, pp_ctrl_reg, pp);
- intel_de_posting_read(dev_priv, pp_ctrl_reg);
-
- wait_panel_off(intel_dp);
- intel_dp->panel_power_off_time = ktime_get_boottime();
-
- /* We got a reference when we enabled the VDD. */
- intel_display_power_put_unchecked(dev_priv, intel_aux_power_domain(dig_port));
-}
-
-void intel_edp_panel_off(struct intel_dp *intel_dp)
-{
- intel_wakeref_t wakeref;
-
- if (!intel_dp_is_edp(intel_dp))
- return;
-
- with_pps_lock(intel_dp, wakeref)
- edp_panel_off(intel_dp);
-}
-
-/* Enable backlight in the panel power control. */
-static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- intel_wakeref_t wakeref;
-
- /*
- * If we enable the backlight right away following a panel power
- * on, we may see slight flicker as the panel syncs with the eDP
- * link. So delay a bit to make sure the image is solid before
- * allowing it to appear.
- */
- wait_backlight_on(intel_dp);
-
- with_pps_lock(intel_dp, wakeref) {
- i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
- u32 pp;
-
- pp = ilk_get_pp_control(intel_dp);
- pp |= EDP_BLC_ENABLE;
-
- intel_de_write(dev_priv, pp_ctrl_reg, pp);
- intel_de_posting_read(dev_priv, pp_ctrl_reg);
- }
-}
/* Enable backlight PWM and backlight PP control. */
void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
@@ -3398,31 +1945,7 @@ void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
drm_dbg_kms(&i915->drm, "\n");
intel_panel_enable_backlight(crtc_state, conn_state);
- _intel_edp_backlight_on(intel_dp);
-}
-
-/* Disable backlight in the panel power control. */
-static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- intel_wakeref_t wakeref;
-
- if (!intel_dp_is_edp(intel_dp))
- return;
-
- with_pps_lock(intel_dp, wakeref) {
- i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
- u32 pp;
-
- pp = ilk_get_pp_control(intel_dp);
- pp &= ~EDP_BLC_ENABLE;
-
- intel_de_write(dev_priv, pp_ctrl_reg, pp);
- intel_de_posting_read(dev_priv, pp_ctrl_reg);
- }
-
- intel_dp->last_backlight_off = jiffies;
- edp_wait_backlight_off(intel_dp);
+ intel_pps_backlight_on(intel_dp);
}
/* Disable backlight PP control and backlight PWM. */
@@ -3436,37 +1959,10 @@ void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
drm_dbg_kms(&i915->drm, "\n");
- _intel_edp_backlight_off(intel_dp);
+ intel_pps_backlight_off(intel_dp);
intel_panel_disable_backlight(old_conn_state);
}
-/*
- * Hook for controlling the panel power control backlight through the bl_power
- * sysfs attribute. Take care to handle multiple calls.
- */
-static void intel_edp_backlight_power(struct intel_connector *connector,
- bool enable)
-{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
- struct intel_dp *intel_dp = intel_attached_dp(connector);
- intel_wakeref_t wakeref;
- bool is_enabled;
-
- is_enabled = false;
- with_pps_lock(intel_dp, wakeref)
- is_enabled = ilk_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
- if (is_enabled == enable)
- return;
-
- drm_dbg_kms(&i915->drm, "panel power control backlight %s\n",
- enable ? "enable" : "disable");
-
- if (enable)
- _intel_edp_backlight_on(intel_dp);
- else
- _intel_edp_backlight_off(intel_dp);
-}
-
static void assert_dp_port(struct intel_dp *intel_dp, bool state)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
@@ -3583,6 +2079,29 @@ void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
enable ? "enable" : "disable");
}
+static void
+intel_edp_init_source_oui(struct intel_dp *intel_dp, bool careful)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ u8 oui[] = { 0x00, 0xaa, 0x01 };
+ u8 buf[3] = { 0 };
+
+ /*
+ * During driver init, we want to be careful and avoid changing the source OUI if it's
+ * already set to what we want, so as to avoid clearing any state by accident
+ */
+ if (careful) {
+ if (drm_dp_dpcd_read(&intel_dp->aux, DP_SOURCE_OUI, buf, sizeof(buf)) < 0)
+ drm_err(&i915->drm, "Failed to read source OUI\n");
+
+ if (memcmp(oui, buf, sizeof(oui)) == 0)
+ return;
+ }
+
+ if (drm_dp_dpcd_write(&intel_dp->aux, DP_SOURCE_OUI, oui, sizeof(oui)) < 0)
+ drm_err(&i915->drm, "Failed to write source OUI\n");
+}
+
/* If the device supports it, try to set the power state appropriately */
void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode)
{
@@ -3604,6 +2123,10 @@ void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode)
lspcon_resume(dp_to_dig_port(intel_dp));
+ /* Write the source OUI as early as possible */
+ if (intel_dp_is_edp(intel_dp))
+ intel_edp_init_source_oui(intel_dp, false);
+
/*
* When turning on, we need to retry for 1ms to give the sink
* time to wake up.
@@ -3860,10 +2383,12 @@ static void intel_disable_dp(struct intel_atomic_state *state,
/* Make sure the panel is off before trying to change the mode. But also
* ensure that we have vdd while we switch off the panel. */
- intel_edp_panel_vdd_on(intel_dp);
+ intel_pps_vdd_on(intel_dp);
intel_edp_backlight_off(old_conn_state);
intel_dp_set_power(intel_dp, DP_SET_POWER_D3);
- intel_edp_panel_off(intel_dp);
+ intel_pps_off(intel_dp);
+ intel_dp->frl.is_trained = false;
+ intel_dp->frl.trained_rate_gbps = 0;
}
static void g4x_disable_dp(struct intel_atomic_state *state,
@@ -3959,6 +2484,280 @@ cpt_set_link_train(struct intel_dp *intel_dp,
intel_de_posting_read(dev_priv, intel_dp->output_reg);
}
+static void intel_dp_get_pcon_dsc_cap(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ /* Clear the cached register set to avoid using stale values */
+
+ memset(intel_dp->pcon_dsc_dpcd, 0, sizeof(intel_dp->pcon_dsc_dpcd));
+
+ if (drm_dp_dpcd_read(&intel_dp->aux, DP_PCON_DSC_ENCODER,
+ intel_dp->pcon_dsc_dpcd,
+ sizeof(intel_dp->pcon_dsc_dpcd)) < 0)
+ drm_err(&i915->drm, "Failed to read DPCD register 0x%x\n",
+ DP_PCON_DSC_ENCODER);
+
+ drm_dbg_kms(&i915->drm, "PCON ENCODER DSC DPCD: %*ph\n",
+ (int)sizeof(intel_dp->pcon_dsc_dpcd), intel_dp->pcon_dsc_dpcd);
+}
+
+static int intel_dp_pcon_get_frl_mask(u8 frl_bw_mask)
+{
+ int bw_gbps[] = {9, 18, 24, 32, 40, 48};
+ int i;
+
+ for (i = ARRAY_SIZE(bw_gbps) - 1; i >= 0; i--) {
+ if (frl_bw_mask & (1 << i))
+ return bw_gbps[i];
+ }
+ return 0;
+}
+
+static int intel_dp_pcon_set_frl_mask(int max_frl)
+{
+ switch (max_frl) {
+ case 48:
+ return DP_PCON_FRL_BW_MASK_48GBPS;
+ case 40:
+ return DP_PCON_FRL_BW_MASK_40GBPS;
+ case 32:
+ return DP_PCON_FRL_BW_MASK_32GBPS;
+ case 24:
+ return DP_PCON_FRL_BW_MASK_24GBPS;
+ case 18:
+ return DP_PCON_FRL_BW_MASK_18GBPS;
+ case 9:
+ return DP_PCON_FRL_BW_MASK_9GBPS;
+ }
+
+ return 0;
+}
+
+static int intel_dp_hdmi_sink_max_frl(struct intel_dp *intel_dp)
+{
+ struct intel_connector *intel_connector = intel_dp->attached_connector;
+ struct drm_connector *connector = &intel_connector->base;
+ int max_frl_rate;
+ int max_lanes, rate_per_lane;
+ int max_dsc_lanes, dsc_rate_per_lane;
+
+ max_lanes = connector->display_info.hdmi.max_lanes;
+ rate_per_lane = connector->display_info.hdmi.max_frl_rate_per_lane;
+ max_frl_rate = max_lanes * rate_per_lane;
+
+ if (connector->display_info.hdmi.dsc_cap.v_1p2) {
+ max_dsc_lanes = connector->display_info.hdmi.dsc_cap.max_lanes;
+ dsc_rate_per_lane = connector->display_info.hdmi.dsc_cap.max_frl_rate_per_lane;
+ if (max_dsc_lanes && dsc_rate_per_lane)
+ max_frl_rate = min(max_frl_rate, max_dsc_lanes * dsc_rate_per_lane);
+ }
+
+ return max_frl_rate;
+}
+
+static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp)
+{
+#define PCON_EXTENDED_TRAIN_MODE (1 > 0)
+#define PCON_CONCURRENT_MODE (1 > 0)
+#define PCON_SEQUENTIAL_MODE !PCON_CONCURRENT_MODE
+#define PCON_NORMAL_TRAIN_MODE !PCON_EXTENDED_TRAIN_MODE
+#define TIMEOUT_FRL_READY_MS 500
+#define TIMEOUT_HDMI_LINK_ACTIVE_MS 1000
+
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ int max_frl_bw, max_pcon_frl_bw, max_edid_frl_bw, ret;
+ u8 max_frl_bw_mask = 0, frl_trained_mask;
+ bool is_active;
+
+ ret = drm_dp_pcon_reset_frl_config(&intel_dp->aux);
+ if (ret < 0)
+ return ret;
+
+ max_pcon_frl_bw = intel_dp->dfp.pcon_max_frl_bw;
+ drm_dbg(&i915->drm, "PCON max rate = %d Gbps\n", max_pcon_frl_bw);
+
+ max_edid_frl_bw = intel_dp_hdmi_sink_max_frl(intel_dp);
+ drm_dbg(&i915->drm, "Sink max rate from EDID = %d Gbps\n", max_edid_frl_bw);
+
+ max_frl_bw = min(max_edid_frl_bw, max_pcon_frl_bw);
+
+ if (max_frl_bw <= 0)
+ return -EINVAL;
+
+ ret = drm_dp_pcon_frl_prepare(&intel_dp->aux, false);
+ if (ret < 0)
+ return ret;
+ /* Wait for PCON to be FRL Ready */
+ wait_for(is_active = drm_dp_pcon_is_frl_ready(&intel_dp->aux) == true, TIMEOUT_FRL_READY_MS);
+
+ if (!is_active)
+ return -ETIMEDOUT;
+
+ max_frl_bw_mask = intel_dp_pcon_set_frl_mask(max_frl_bw);
+ ret = drm_dp_pcon_frl_configure_1(&intel_dp->aux, max_frl_bw, PCON_SEQUENTIAL_MODE);
+ if (ret < 0)
+ return ret;
+ ret = drm_dp_pcon_frl_configure_2(&intel_dp->aux, max_frl_bw_mask, PCON_NORMAL_TRAIN_MODE);
+ if (ret < 0)
+ return ret;
+ ret = drm_dp_pcon_frl_enable(&intel_dp->aux);
+ if (ret < 0)
+ return ret;
+ /*
+ * Wait for FRL to be completed
+ * Check if the HDMI Link is up and active.
+ */
+ wait_for(is_active = drm_dp_pcon_hdmi_link_active(&intel_dp->aux) == true, TIMEOUT_HDMI_LINK_ACTIVE_MS);
+
+ if (!is_active)
+ return -ETIMEDOUT;
+
+ /* Verify HDMI Link configuration shows FRL Mode */
+ if (drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, &frl_trained_mask) !=
+ DP_PCON_HDMI_MODE_FRL) {
+ drm_dbg(&i915->drm, "HDMI couldn't be trained in FRL Mode\n");
+ return -EINVAL;
+ }
+ drm_dbg(&i915->drm, "MAX_FRL_MASK = %u, FRL_TRAINED_MASK = %u\n", max_frl_bw_mask, frl_trained_mask);
+
+ intel_dp->frl.trained_rate_gbps = intel_dp_pcon_get_frl_mask(frl_trained_mask);
+ intel_dp->frl.is_trained = true;
+ drm_dbg(&i915->drm, "FRL trained with : %d Gbps\n", intel_dp->frl.trained_rate_gbps);
+
+ return 0;
+}
+
+static bool intel_dp_is_hdmi_2_1_sink(struct intel_dp *intel_dp)
+{
+ if (drm_dp_is_branch(intel_dp->dpcd) &&
+ intel_dp->has_hdmi_sink &&
+ intel_dp_hdmi_sink_max_frl(intel_dp) > 0)
+ return true;
+
+ return false;
+}
+
+void intel_dp_check_frl_training(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ /* Always go for FRL training if supported */
+ if (!intel_dp_is_hdmi_2_1_sink(intel_dp) ||
+ intel_dp->frl.is_trained)
+ return;
+
+ if (intel_dp_pcon_start_frl_training(intel_dp) < 0) {
+ int ret, mode;
+
+ drm_dbg(&dev_priv->drm, "Couldnt set FRL mode, continuing with TMDS mode\n");
+ ret = drm_dp_pcon_reset_frl_config(&intel_dp->aux);
+ mode = drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, NULL);
+
+ if (ret < 0 || mode != DP_PCON_HDMI_MODE_TMDS)
+ drm_dbg(&dev_priv->drm, "Issue with PCON, cannot set TMDS mode\n");
+ } else {
+ drm_dbg(&dev_priv->drm, "FRL training Completed\n");
+ }
+}
+
+static int
+intel_dp_pcon_dsc_enc_slice_height(const struct intel_crtc_state *crtc_state)
+{
+ int vactive = crtc_state->hw.adjusted_mode.vdisplay;
+
+ return intel_hdmi_dsc_get_slice_height(vactive);
+}
+
+static int
+intel_dp_pcon_dsc_enc_slices(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_connector *intel_connector = intel_dp->attached_connector;
+ struct drm_connector *connector = &intel_connector->base;
+ int hdmi_throughput = connector->display_info.hdmi.dsc_cap.clk_per_slice;
+ int hdmi_max_slices = connector->display_info.hdmi.dsc_cap.max_slices;
+ int pcon_max_slices = drm_dp_pcon_dsc_max_slices(intel_dp->pcon_dsc_dpcd);
+ int pcon_max_slice_width = drm_dp_pcon_dsc_max_slice_width(intel_dp->pcon_dsc_dpcd);
+
+ return intel_hdmi_dsc_get_num_slices(crtc_state, pcon_max_slices,
+ pcon_max_slice_width,
+ hdmi_max_slices, hdmi_throughput);
+}
+
+static int
+intel_dp_pcon_dsc_enc_bpp(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ int num_slices, int slice_width)
+{
+ struct intel_connector *intel_connector = intel_dp->attached_connector;
+ struct drm_connector *connector = &intel_connector->base;
+ int output_format = crtc_state->output_format;
+ bool hdmi_all_bpp = connector->display_info.hdmi.dsc_cap.all_bpp;
+ int pcon_fractional_bpp = drm_dp_pcon_dsc_bpp_incr(intel_dp->pcon_dsc_dpcd);
+ int hdmi_max_chunk_bytes =
+ connector->display_info.hdmi.dsc_cap.total_chunk_kbytes * 1024;
+
+ return intel_hdmi_dsc_get_bpp(pcon_fractional_bpp, slice_width,
+ num_slices, output_format, hdmi_all_bpp,
+ hdmi_max_chunk_bytes);
+}
+
+void
+intel_dp_pcon_dsc_configure(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ u8 pps_param[6];
+ int slice_height;
+ int slice_width;
+ int num_slices;
+ int bits_per_pixel;
+ int ret;
+ struct intel_connector *intel_connector = intel_dp->attached_connector;
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct drm_connector *connector;
+ bool hdmi_is_dsc_1_2;
+
+ if (!intel_dp_is_hdmi_2_1_sink(intel_dp))
+ return;
+
+ if (!intel_connector)
+ return;
+ connector = &intel_connector->base;
+ hdmi_is_dsc_1_2 = connector->display_info.hdmi.dsc_cap.v_1p2;
+
+ if (!drm_dp_pcon_enc_is_dsc_1_2(intel_dp->pcon_dsc_dpcd) ||
+ !hdmi_is_dsc_1_2)
+ return;
+
+ slice_height = intel_dp_pcon_dsc_enc_slice_height(crtc_state);
+ if (!slice_height)
+ return;
+
+ num_slices = intel_dp_pcon_dsc_enc_slices(intel_dp, crtc_state);
+ if (!num_slices)
+ return;
+
+ slice_width = DIV_ROUND_UP(crtc_state->hw.adjusted_mode.hdisplay,
+ num_slices);
+
+ bits_per_pixel = intel_dp_pcon_dsc_enc_bpp(intel_dp, crtc_state,
+ num_slices, slice_width);
+ if (!bits_per_pixel)
+ return;
+
+ pps_param[0] = slice_height & 0xFF;
+ pps_param[1] = slice_height >> 8;
+ pps_param[2] = slice_width & 0xFF;
+ pps_param[3] = slice_width >> 8;
+ pps_param[4] = bits_per_pixel & 0xFF;
+ pps_param[5] = (bits_per_pixel >> 8) & 0x3;
+
+ ret = drm_dp_pcon_pps_override_param(&intel_dp->aux, pps_param);
+ if (ret < 0)
+ drm_dbg_kms(&i915->drm, "Failed to set pcon DSC\n");
+}
+
static void
g4x_set_link_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
@@ -4044,12 +2843,42 @@ void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp,
enableddisabled(intel_dp->dfp.ycbcr_444_to_420));
tmp = 0;
+ if (intel_dp->dfp.rgb_to_ycbcr) {
+ bool bt2020, bt709;
- if (drm_dp_dpcd_writeb(&intel_dp->aux,
- DP_PROTOCOL_CONVERTER_CONTROL_2, tmp) <= 0)
+ /*
+ * FIXME: Currently if userspace selects BT2020 or BT709, but PCON supports only
+ * RGB->YCbCr for BT601 colorspace, we go ahead with BT601, as default.
+ *
+ */
+ tmp = DP_CONVERSION_BT601_RGB_YCBCR_ENABLE;
+
+ bt2020 = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd,
+ intel_dp->downstream_ports,
+ DP_DS_HDMI_BT2020_RGB_YCBCR_CONV);
+ bt709 = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd,
+ intel_dp->downstream_ports,
+ DP_DS_HDMI_BT709_RGB_YCBCR_CONV);
+ switch (crtc_state->infoframes.vsc.colorimetry) {
+ case DP_COLORIMETRY_BT2020_RGB:
+ case DP_COLORIMETRY_BT2020_YCC:
+ if (bt2020)
+ tmp = DP_CONVERSION_BT2020_RGB_YCBCR_ENABLE;
+ break;
+ case DP_COLORIMETRY_BT709_YCC:
+ case DP_COLORIMETRY_XVYCC_709:
+ if (bt709)
+ tmp = DP_CONVERSION_BT709_RGB_YCBCR_ENABLE;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (drm_dp_pcon_convert_rgb_to_ycbcr(&intel_dp->aux, tmp) < 0)
drm_dbg_kms(&i915->drm,
- "Failed to set protocol converter YCbCr 4:2:2 conversion mode to %s\n",
- enableddisabled(false));
+ "Failed to set protocol converter RGB->YCbCr conversion mode to %s\n",
+ enableddisabled(tmp ? true : false));
}
static void intel_enable_dp(struct intel_atomic_state *state,
@@ -4067,15 +2896,15 @@ static void intel_enable_dp(struct intel_atomic_state *state,
if (drm_WARN_ON(&dev_priv->drm, dp_reg & DP_PORT_EN))
return;
- with_pps_lock(intel_dp, wakeref) {
+ with_intel_pps_lock(intel_dp, wakeref) {
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- vlv_init_panel_power_sequencer(encoder, pipe_config);
+ vlv_pps_init(encoder, pipe_config);
intel_dp_enable_port(intel_dp, pipe_config);
- edp_panel_vdd_on(intel_dp);
- edp_panel_on(intel_dp);
- edp_panel_vdd_off(intel_dp, true);
+ intel_pps_vdd_on_unlocked(intel_dp);
+ intel_pps_on_unlocked(intel_dp);
+ intel_pps_vdd_off_unlocked(intel_dp, true);
}
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
@@ -4090,6 +2919,8 @@ static void intel_enable_dp(struct intel_atomic_state *state,
intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
intel_dp_configure_protocol_converter(intel_dp, pipe_config);
+ intel_dp_check_frl_training(intel_dp);
+ intel_dp_pcon_dsc_configure(intel_dp, pipe_config);
intel_dp_start_link_train(intel_dp, pipe_config);
intel_dp_stop_link_train(intel_dp, pipe_config);
@@ -4132,112 +2963,6 @@ static void g4x_pre_enable_dp(struct intel_atomic_state *state,
ilk_edp_pll_on(intel_dp, pipe_config);
}
-static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
-{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- enum pipe pipe = intel_dp->pps_pipe;
- i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
-
- drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE);
-
- if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
- return;
-
- edp_panel_vdd_off_sync(intel_dp);
-
- /*
- * VLV seems to get confused when multiple power sequencers
- * have the same port selected (even if only one has power/vdd
- * enabled). The failure manifests as vlv_wait_port_ready() failing
- * CHV on the other hand doesn't seem to mind having the same port
- * selected in multiple power sequencers, but let's clear the
- * port select always when logically disconnecting a power sequencer
- * from a port.
- */
- drm_dbg_kms(&dev_priv->drm,
- "detaching pipe %c power sequencer from [ENCODER:%d:%s]\n",
- pipe_name(pipe), dig_port->base.base.base.id,
- dig_port->base.base.name);
- intel_de_write(dev_priv, pp_on_reg, 0);
- intel_de_posting_read(dev_priv, pp_on_reg);
-
- intel_dp->pps_pipe = INVALID_PIPE;
-}
-
-static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
- enum pipe pipe)
-{
- struct intel_encoder *encoder;
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- for_each_intel_dp(&dev_priv->drm, encoder) {
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-
- drm_WARN(&dev_priv->drm, intel_dp->active_pipe == pipe,
- "stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n",
- pipe_name(pipe), encoder->base.base.id,
- encoder->base.name);
-
- if (intel_dp->pps_pipe != pipe)
- continue;
-
- drm_dbg_kms(&dev_priv->drm,
- "stealing pipe %c power sequencer from [ENCODER:%d:%s]\n",
- pipe_name(pipe), encoder->base.base.id,
- encoder->base.name);
-
- /* make sure vdd is off before we steal it */
- vlv_detach_power_sequencer(intel_dp);
- }
-}
-
-static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE);
-
- if (intel_dp->pps_pipe != INVALID_PIPE &&
- intel_dp->pps_pipe != crtc->pipe) {
- /*
- * If another power sequencer was being used on this
- * port previously make sure to turn off vdd there while
- * we still have control of it.
- */
- vlv_detach_power_sequencer(intel_dp);
- }
-
- /*
- * We may be stealing the power
- * sequencer from another port.
- */
- vlv_steal_power_sequencer(dev_priv, crtc->pipe);
-
- intel_dp->active_pipe = crtc->pipe;
-
- if (!intel_dp_is_edp(intel_dp))
- return;
-
- /* now it's all ours */
- intel_dp->pps_pipe = crtc->pipe;
-
- drm_dbg_kms(&dev_priv->drm,
- "initializing pipe %c power sequencer for [ENCODER:%d:%s]\n",
- pipe_name(intel_dp->pps_pipe), encoder->base.base.id,
- encoder->base.name);
-
- /* init power sequencer on this pipe and port */
- intel_dp_init_panel_power_sequencer(intel_dp);
- intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
-}
-
static void vlv_pre_enable_dp(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
@@ -4637,22 +3362,19 @@ ivb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp,
intel_de_posting_read(dev_priv, intel_dp->output_reg);
}
-void intel_dp_set_signal_levels(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
+static char dp_training_pattern_name(u8 train_pat)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- u8 train_set = intel_dp->train_set[0];
-
- drm_dbg_kms(&dev_priv->drm, "Using vswing level %d%s\n",
- train_set & DP_TRAIN_VOLTAGE_SWING_MASK,
- train_set & DP_TRAIN_MAX_SWING_REACHED ? " (max)" : "");
- drm_dbg_kms(&dev_priv->drm, "Using pre-emphasis level %d%s\n",
- (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
- DP_TRAIN_PRE_EMPHASIS_SHIFT,
- train_set & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ?
- " (max)" : "");
-
- intel_dp->set_signal_levels(intel_dp, crtc_state);
+ switch (train_pat) {
+ case DP_TRAINING_PATTERN_1:
+ case DP_TRAINING_PATTERN_2:
+ case DP_TRAINING_PATTERN_3:
+ return '0' + train_pat;
+ case DP_TRAINING_PATTERN_4:
+ return '4';
+ default:
+ MISSING_CASE(train_pat);
+ return '?';
+ }
}
void
@@ -4660,13 +3382,15 @@ intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
u8 dp_train_pat)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ u8 train_pat = intel_dp_training_pattern_symbol(dp_train_pat);
- if ((intel_dp_training_pattern_symbol(dp_train_pat)) !=
- DP_TRAINING_PATTERN_DISABLE)
+ if (train_pat != DP_TRAINING_PATTERN_DISABLE)
drm_dbg_kms(&dev_priv->drm,
- "Using DP training pattern TPS%d\n",
- intel_dp_training_pattern_symbol(dp_train_pat));
+ "[ENCODER:%d:%s] Using DP training pattern TPS%c\n",
+ encoder->base.base.id, encoder->base.name,
+ dp_training_pattern_name(train_pat));
intel_dp->set_link_train(intel_dp, crtc_state, dp_train_pat);
}
@@ -4732,15 +3456,15 @@ intel_dp_link_down(struct intel_encoder *encoder,
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
}
- msleep(intel_dp->panel_power_down_delay);
+ msleep(intel_dp->pps.panel_power_down_delay);
intel_dp->DP = DP;
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
intel_wakeref_t wakeref;
- with_pps_lock(intel_dp, wakeref)
- intel_dp->active_pipe = INVALID_PIPE;
+ with_intel_pps_lock(intel_dp, wakeref)
+ intel_dp->pps.active_pipe = INVALID_PIPE;
}
}
@@ -4870,6 +3594,12 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
intel_dp_get_dsc_sink_cap(intel_dp);
+ /*
+ * If needed, program our source OUI so we can make various Intel-specific AUX services
+ * available (such as HDR backlight controls)
+ */
+ intel_edp_init_source_oui(intel_dp, true);
+
return true;
}
@@ -5703,7 +4433,7 @@ static void intel_dp_process_phy_request(struct intel_dp *intel_dp,
intel_dp_autotest_phy_ddi_disable(intel_dp, crtc_state);
- intel_dp_set_signal_levels(intel_dp, crtc_state);
+ intel_dp_set_signal_levels(intel_dp, crtc_state, DP_PHY_DPRX);
intel_dp_phy_pattern_update(intel_dp, crtc_state);
@@ -5776,6 +4506,17 @@ update_status:
"Could not write test response to sink\n");
}
+static void
+intel_dp_mst_hpd_irq(struct intel_dp *intel_dp, u8 *esi, bool *handled)
+{
+ drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, handled);
+
+ if (esi[1] & DP_CP_IRQ) {
+ intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
+ *handled = true;
+ }
+}
+
/**
* intel_dp_check_mst_status - service any pending MST interrupts, check link status
* @intel_dp: Intel DP struct
@@ -5820,7 +4561,8 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
drm_dbg_kms(&i915->drm, "got esi %3ph\n", esi);
- drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
+ intel_dp_mst_hpd_irq(intel_dp, esi, &handled);
+
if (!handled)
break;
@@ -5838,6 +4580,28 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
return link_ok;
}
+static void
+intel_dp_handle_hdmi_link_status_change(struct intel_dp *intel_dp)
+{
+ bool is_active;
+ u8 buf = 0;
+
+ is_active = drm_dp_pcon_hdmi_link_active(&intel_dp->aux);
+ if (intel_dp->frl.is_trained && !is_active) {
+ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf) < 0)
+ return;
+
+ buf &= ~DP_PCON_ENABLE_HDMI_LINK;
+ if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, buf) < 0)
+ return;
+
+ drm_dp_pcon_hdmi_frl_link_error_count(&intel_dp->aux, &intel_dp->attached_connector->base);
+
+ /* Restart FRL training or fall back to TMDS mode */
+ intel_dp_check_frl_training(intel_dp);
+ }
+}
+
static bool
intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
{
@@ -6011,6 +4775,8 @@ int intel_dp_retrain_link(struct intel_encoder *encoder,
!intel_dp_mst_is_master_trans(crtc_state))
continue;
+ intel_dp_check_frl_training(intel_dp);
+ intel_dp_pcon_dsc_configure(intel_dp, crtc_state);
intel_dp_start_link_train(intel_dp, crtc_state);
intel_dp_stop_link_train(intel_dp, crtc_state);
break;
@@ -6120,7 +4886,7 @@ static int intel_dp_do_phy_test(struct intel_encoder *encoder,
return 0;
}
-static void intel_dp_phy_test(struct intel_encoder *encoder)
+void intel_dp_phy_test(struct intel_encoder *encoder)
{
struct drm_modeset_acquire_ctx ctx;
int ret;
@@ -6202,7 +4968,7 @@ intel_dp_hotplug(struct intel_encoder *encoder,
return state;
}
-static void intel_dp_check_service_irq(struct intel_dp *intel_dp)
+static void intel_dp_check_device_service_irq(struct intel_dp *intel_dp)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u8 val;
@@ -6226,6 +4992,30 @@ static void intel_dp_check_service_irq(struct intel_dp *intel_dp)
drm_dbg_kms(&i915->drm, "Sink specific irq unhandled\n");
}
+static void intel_dp_check_link_service_irq(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ u8 val;
+
+ if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
+ return;
+
+ if (drm_dp_dpcd_readb(&intel_dp->aux,
+ DP_LINK_SERVICE_IRQ_VECTOR_ESI0, &val) != 1 || !val) {
+ drm_dbg_kms(&i915->drm, "Error in reading link service irq vector\n");
+ return;
+ }
+
+ if (drm_dp_dpcd_writeb(&intel_dp->aux,
+ DP_LINK_SERVICE_IRQ_VECTOR_ESI0, val) != 1) {
+ drm_dbg_kms(&i915->drm, "Error in writing link service irq vector\n");
+ return;
+ }
+
+ if (val & HDMI_LINK_STATUS_CHANGED)
+ intel_dp_handle_hdmi_link_status_change(intel_dp);
+}
+
/*
* According to DP spec
* 5.1.2:
@@ -6265,7 +5055,8 @@ intel_dp_short_pulse(struct intel_dp *intel_dp)
return false;
}
- intel_dp_check_service_irq(intel_dp);
+ intel_dp_check_device_service_irq(intel_dp);
+ intel_dp_check_link_service_irq(intel_dp);
/* Handle CEC interrupts, if any */
drm_dp_cec_irq(&intel_dp->aux);
@@ -6485,13 +5276,20 @@ intel_dp_update_dfp(struct intel_dp *intel_dp,
intel_dp->downstream_ports,
edid);
+ intel_dp->dfp.pcon_max_frl_bw =
+ drm_dp_get_pcon_max_frl_bw(intel_dp->dpcd,
+ intel_dp->downstream_ports);
+
drm_dbg_kms(&i915->drm,
- "[CONNECTOR:%d:%s] DFP max bpc %d, max dotclock %d, TMDS clock %d-%d\n",
+ "[CONNECTOR:%d:%s] DFP max bpc %d, max dotclock %d, TMDS clock %d-%d, PCON Max FRL BW %dGbps\n",
connector->base.base.id, connector->base.name,
intel_dp->dfp.max_bpc,
intel_dp->dfp.max_dotclock,
intel_dp->dfp.min_tmds_clock,
- intel_dp->dfp.max_tmds_clock);
+ intel_dp->dfp.max_tmds_clock,
+ intel_dp->dfp.pcon_max_frl_bw);
+
+ intel_dp_get_pcon_dsc_cap(intel_dp);
}
static void
@@ -6499,7 +5297,7 @@ intel_dp_update_420(struct intel_dp *intel_dp)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
- bool is_branch, ycbcr_420_passthrough, ycbcr_444_to_420;
+ bool is_branch, ycbcr_420_passthrough, ycbcr_444_to_420, rgb_to_ycbcr;
/* No YCbCr output support on gmch platforms */
if (HAS_GMCH(i915))
@@ -6521,14 +5319,26 @@ intel_dp_update_420(struct intel_dp *intel_dp)
dp_to_dig_port(intel_dp)->lspcon.active ||
drm_dp_downstream_444_to_420_conversion(intel_dp->dpcd,
intel_dp->downstream_ports);
+ rgb_to_ycbcr = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd,
+ intel_dp->downstream_ports,
+ DP_DS_HDMI_BT601_RGB_YCBCR_CONV |
+ DP_DS_HDMI_BT709_RGB_YCBCR_CONV |
+ DP_DS_HDMI_BT2020_RGB_YCBCR_CONV);
if (INTEL_GEN(i915) >= 11) {
+ /* Let PCON convert from RGB->YCbCr if possible */
+ if (is_branch && rgb_to_ycbcr && ycbcr_444_to_420) {
+ intel_dp->dfp.rgb_to_ycbcr = true;
+ intel_dp->dfp.ycbcr_444_to_420 = true;
+ connector->base.ycbcr_420_allowed = true;
+ } else {
/* Prefer 4:2:0 passthrough over 4:4:4->4:2:0 conversion */
- intel_dp->dfp.ycbcr_444_to_420 =
- ycbcr_444_to_420 && !ycbcr_420_passthrough;
+ intel_dp->dfp.ycbcr_444_to_420 =
+ ycbcr_444_to_420 && !ycbcr_420_passthrough;
- connector->base.ycbcr_420_allowed =
- !is_branch || ycbcr_444_to_420 || ycbcr_420_passthrough;
+ connector->base.ycbcr_420_allowed =
+ !is_branch || ycbcr_444_to_420 || ycbcr_420_passthrough;
+ }
} else {
/* 4:4:4->4:2:0 conversion is the only way */
intel_dp->dfp.ycbcr_444_to_420 = ycbcr_444_to_420;
@@ -6537,8 +5347,9 @@ intel_dp_update_420(struct intel_dp *intel_dp)
}
drm_dbg_kms(&i915->drm,
- "[CONNECTOR:%d:%s] YCbCr 4:2:0 allowed? %s, YCbCr 4:4:4->4:2:0 conversion? %s\n",
+ "[CONNECTOR:%d:%s] RGB->YcbCr conversion? %s, YCbCr 4:2:0 allowed? %s, YCbCr 4:4:4->4:2:0 conversion? %s\n",
connector->base.base.id, connector->base.name,
+ yesno(intel_dp->dfp.rgb_to_ycbcr),
yesno(connector->base.ycbcr_420_allowed),
yesno(intel_dp->dfp.ycbcr_444_to_420));
}
@@ -6562,7 +5373,6 @@ intel_dp_set_edid(struct intel_dp *intel_dp)
}
drm_dp_cec_set_edid(&intel_dp->aux, edid);
- intel_dp->edid_quirks = drm_dp_get_edid_quirks(edid);
}
static void
@@ -6576,13 +5386,14 @@ intel_dp_unset_edid(struct intel_dp *intel_dp)
intel_dp->has_hdmi_sink = false;
intel_dp->has_audio = false;
- intel_dp->edid_quirks = 0;
intel_dp->dfp.max_bpc = 0;
intel_dp->dfp.max_dotclock = 0;
intel_dp->dfp.min_tmds_clock = 0;
intel_dp->dfp.max_tmds_clock = 0;
+ intel_dp->dfp.pcon_max_frl_bw = 0;
+
intel_dp->dfp.ycbcr_444_to_420 = false;
connector->base.ycbcr_420_allowed = false;
}
@@ -6688,7 +5499,7 @@ intel_dp_detect(struct drm_connector *connector,
to_intel_connector(connector)->detect_edid)
status = connector_status_connected;
- intel_dp_check_service_irq(intel_dp);
+ intel_dp_check_device_service_irq(intel_dp);
out:
if (status != connector_status_connected && !intel_dp->is_mst)
@@ -6741,6 +5552,10 @@ static int intel_dp_get_modes(struct drm_connector *connector)
edid = intel_connector->detect_edid;
if (edid) {
int ret = intel_connector_update_modes(connector, edid);
+
+ if (intel_vrr_is_capable(connector))
+ drm_connector_set_vrr_capable_property(connector,
+ true);
if (ret)
return ret;
}
@@ -6779,6 +5594,8 @@ intel_dp_connector_register(struct drm_connector *connector)
{
struct drm_i915_private *i915 = to_i915(connector->dev);
struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct intel_lspcon *lspcon = &dig_port->lspcon;
int ret;
ret = intel_connector_register(connector);
@@ -6792,6 +5609,22 @@ intel_dp_connector_register(struct drm_connector *connector)
ret = drm_dp_aux_register(&intel_dp->aux);
if (!ret)
drm_dp_cec_register_connector(&intel_dp->aux, connector);
+
+ if (!intel_bios_is_lspcon_present(i915, dig_port->base.port))
+ return ret;
+
+ /*
+ * ToDo: Clean this up to handle lspcon init and resume more
+ * efficiently and streamlined.
+ */
+ if (lspcon_init(dig_port)) {
+ lspcon_detect_hdr_capability(lspcon);
+ if (lspcon->hdr_supported)
+ drm_object_attach_property(&connector->base,
+ connector->dev->mode_config.hdr_output_metadata_property,
+ 0);
+ }
+
return ret;
}
@@ -6811,17 +5644,8 @@ void intel_dp_encoder_flush_work(struct drm_encoder *encoder)
struct intel_dp *intel_dp = &dig_port->dp;
intel_dp_mst_encoder_cleanup(dig_port);
- if (intel_dp_is_edp(intel_dp)) {
- intel_wakeref_t wakeref;
- cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
- /*
- * vdd might still be enabled do to the delayed vdd off.
- * Make sure vdd is actually turned off here.
- */
- with_pps_lock(intel_dp, wakeref)
- edp_panel_vdd_off_sync(intel_dp);
- }
+ intel_pps_vdd_off_sync(intel_dp);
intel_dp_aux_fini(intel_dp);
}
@@ -6837,53 +5661,15 @@ static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
- intel_wakeref_t wakeref;
- if (!intel_dp_is_edp(intel_dp))
- return;
-
- /*
- * vdd might still be enabled do to the delayed vdd off.
- * Make sure vdd is actually turned off here.
- */
- cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
- with_pps_lock(intel_dp, wakeref)
- edp_panel_vdd_off_sync(intel_dp);
+ intel_pps_vdd_off_sync(intel_dp);
}
void intel_dp_encoder_shutdown(struct intel_encoder *intel_encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
- intel_wakeref_t wakeref;
- if (!intel_dp_is_edp(intel_dp))
- return;
-
- with_pps_lock(intel_dp, wakeref)
- wait_panel_power_cycle(intel_dp);
-}
-
-static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- if (!edp_have_panel_vdd(intel_dp))
- return;
-
- /*
- * The VDD bit needs a power domain reference, so if the bit is
- * already enabled when we boot or resume, grab this reference and
- * schedule a vdd off, so we don't hold on to the reference
- * indefinitely.
- */
- drm_dbg_kms(&dev_priv->drm,
- "VDD left on by BIOS, adjusting state tracking\n");
- intel_display_power_get(dev_priv, intel_aux_power_domain(dig_port));
-
- edp_panel_vdd_schedule_off(intel_dp);
+ intel_pps_wait_power_cycle(intel_dp);
}
static enum pipe vlv_active_pipe(struct intel_dp *intel_dp)
@@ -6903,30 +5689,20 @@ void intel_dp_encoder_reset(struct drm_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(encoder));
- intel_wakeref_t wakeref;
if (!HAS_DDI(dev_priv))
intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg);
intel_dp->reset_link_params = true;
- if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
- !intel_dp_is_edp(intel_dp))
- return;
-
- with_pps_lock(intel_dp, wakeref) {
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- intel_dp->active_pipe = vlv_active_pipe(intel_dp);
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ intel_wakeref_t wakeref;
- if (intel_dp_is_edp(intel_dp)) {
- /*
- * Reinit the power sequencer, in case BIOS did
- * something nasty with it.
- */
- intel_dp_pps_init(intel_dp);
- intel_edp_panel_vdd_sanitize(intel_dp);
- }
+ with_intel_pps_lock(intel_dp, wakeref)
+ intel_dp->pps.active_pipe = vlv_active_pipe(intel_dp);
}
+
+ intel_pps_encoder_reset(intel_dp);
}
static int intel_modeset_tile_group(struct intel_atomic_state *state,
@@ -7091,19 +5867,6 @@ static const struct drm_encoder_funcs intel_dp_enc_funcs = {
.destroy = intel_dp_encoder_destroy,
};
-static bool intel_edp_have_power(struct intel_dp *intel_dp)
-{
- intel_wakeref_t wakeref;
- bool have_power = false;
-
- with_pps_lock(intel_dp, wakeref) {
- have_power = edp_have_panel_power(intel_dp) &&
- edp_have_panel_vdd(intel_dp);
- }
-
- return have_power;
-}
-
enum irqreturn
intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd)
{
@@ -7111,7 +5874,7 @@ intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd)
struct intel_dp *intel_dp = &dig_port->dp;
if (dig_port->base.type == INTEL_OUTPUT_EDP &&
- (long_hpd || !intel_edp_have_power(intel_dp))) {
+ (long_hpd || !intel_pps_have_power(intel_dp))) {
/*
* vdd off can generate a long/short pulse on eDP which
* would require vdd on to handle it, and thus we
@@ -7180,7 +5943,13 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
else if (INTEL_GEN(dev_priv) >= 5)
drm_connector_attach_max_bpc_property(connector, 6, 12);
- intel_attach_colorspace_property(connector);
+ /* Register HDMI colorspace for case of lspcon */
+ if (intel_bios_is_lspcon_present(dev_priv, port)) {
+ drm_connector_attach_content_type_property(connector);
+ intel_attach_hdmi_colorspace_property(connector);
+ } else {
+ intel_attach_dp_colorspace_property(connector);
+ }
if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 11)
drm_object_attach_property(&connector->base,
@@ -7199,277 +5968,9 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
connector->state->scaling_mode = DRM_MODE_SCALE_ASPECT;
}
-}
-
-static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
-{
- intel_dp->panel_power_off_time = ktime_get_boottime();
- intel_dp->last_power_on = jiffies;
- intel_dp->last_backlight_off = jiffies;
-}
-
-static void
-intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- u32 pp_on, pp_off, pp_ctl;
- struct pps_registers regs;
-
- intel_pps_get_registers(intel_dp, &regs);
-
- pp_ctl = ilk_get_pp_control(intel_dp);
-
- /* Ensure PPS is unlocked */
- if (!HAS_DDI(dev_priv))
- intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
-
- pp_on = intel_de_read(dev_priv, regs.pp_on);
- pp_off = intel_de_read(dev_priv, regs.pp_off);
-
- /* Pull timing values out of registers */
- seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on);
- seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on);
- seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off);
- seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off);
-
- if (i915_mmio_reg_valid(regs.pp_div)) {
- u32 pp_div;
-
- pp_div = intel_de_read(dev_priv, regs.pp_div);
-
- seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000;
- } else {
- seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000;
- }
-}
-
-static void
-intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq)
-{
- DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
- state_name,
- seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12);
-}
-static void
-intel_pps_verify_state(struct intel_dp *intel_dp)
-{
- struct edp_power_seq hw;
- struct edp_power_seq *sw = &intel_dp->pps_delays;
-
- intel_pps_readout_hw_state(intel_dp, &hw);
-
- if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
- hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
- DRM_ERROR("PPS state mismatch\n");
- intel_pps_dump_state("sw", sw);
- intel_pps_dump_state("hw", &hw);
- }
-}
-
-static void
-intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct edp_power_seq cur, vbt, spec,
- *final = &intel_dp->pps_delays;
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- /* already initialized? */
- if (final->t11_t12 != 0)
- return;
-
- intel_pps_readout_hw_state(intel_dp, &cur);
-
- intel_pps_dump_state("cur", &cur);
-
- vbt = dev_priv->vbt.edp.pps;
- /* On Toshiba Satellite P50-C-18C system the VBT T12 delay
- * of 500ms appears to be too short. Ocassionally the panel
- * just fails to power back on. Increasing the delay to 800ms
- * seems sufficient to avoid this problem.
- */
- if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
- vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10);
- drm_dbg_kms(&dev_priv->drm,
- "Increasing T12 panel delay as per the quirk to %d\n",
- vbt.t11_t12);
- }
- /* T11_T12 delay is special and actually in units of 100ms, but zero
- * based in the hw (so we need to add 100 ms). But the sw vbt
- * table multiplies it with 1000 to make it in units of 100usec,
- * too. */
- vbt.t11_t12 += 100 * 10;
-
- /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
- * our hw here, which are all in 100usec. */
- spec.t1_t3 = 210 * 10;
- spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
- spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
- spec.t10 = 500 * 10;
- /* This one is special and actually in units of 100ms, but zero
- * based in the hw (so we need to add 100 ms). But the sw vbt
- * table multiplies it with 1000 to make it in units of 100usec,
- * too. */
- spec.t11_t12 = (510 + 100) * 10;
-
- intel_pps_dump_state("vbt", &vbt);
-
- /* Use the max of the register settings and vbt. If both are
- * unset, fall back to the spec limits. */
-#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
- spec.field : \
- max(cur.field, vbt.field))
- assign_final(t1_t3);
- assign_final(t8);
- assign_final(t9);
- assign_final(t10);
- assign_final(t11_t12);
-#undef assign_final
-
-#define get_delay(field) (DIV_ROUND_UP(final->field, 10))
- intel_dp->panel_power_up_delay = get_delay(t1_t3);
- intel_dp->backlight_on_delay = get_delay(t8);
- intel_dp->backlight_off_delay = get_delay(t9);
- intel_dp->panel_power_down_delay = get_delay(t10);
- intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
-#undef get_delay
-
- drm_dbg_kms(&dev_priv->drm,
- "panel power up delay %d, power down delay %d, power cycle delay %d\n",
- intel_dp->panel_power_up_delay,
- intel_dp->panel_power_down_delay,
- intel_dp->panel_power_cycle_delay);
-
- drm_dbg_kms(&dev_priv->drm, "backlight on delay %d, off delay %d\n",
- intel_dp->backlight_on_delay,
- intel_dp->backlight_off_delay);
-
- /*
- * We override the HW backlight delays to 1 because we do manual waits
- * on them. For T8, even BSpec recommends doing it. For T9, if we
- * don't do this, we'll end up waiting for the backlight off delay
- * twice: once when we do the manual sleep, and once when we disable
- * the panel and wait for the PP_STATUS bit to become zero.
- */
- final->t8 = 1;
- final->t9 = 1;
-
- /*
- * HW has only a 100msec granularity for t11_t12 so round it up
- * accordingly.
- */
- final->t11_t12 = roundup(final->t11_t12, 100 * 10);
-}
-
-static void
-intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
- bool force_disable_vdd)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- u32 pp_on, pp_off, port_sel = 0;
- int div = RUNTIME_INFO(dev_priv)->rawclk_freq / 1000;
- struct pps_registers regs;
- enum port port = dp_to_dig_port(intel_dp)->base.port;
- const struct edp_power_seq *seq = &intel_dp->pps_delays;
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- intel_pps_get_registers(intel_dp, &regs);
-
- /*
- * On some VLV machines the BIOS can leave the VDD
- * enabled even on power sequencers which aren't
- * hooked up to any port. This would mess up the
- * power domain tracking the first time we pick
- * one of these power sequencers for use since
- * edp_panel_vdd_on() would notice that the VDD was
- * already on and therefore wouldn't grab the power
- * domain reference. Disable VDD first to avoid this.
- * This also avoids spuriously turning the VDD on as
- * soon as the new power sequencer gets initialized.
- */
- if (force_disable_vdd) {
- u32 pp = ilk_get_pp_control(intel_dp);
-
- drm_WARN(&dev_priv->drm, pp & PANEL_POWER_ON,
- "Panel power already on\n");
-
- if (pp & EDP_FORCE_VDD)
- drm_dbg_kms(&dev_priv->drm,
- "VDD already on, disabling first\n");
-
- pp &= ~EDP_FORCE_VDD;
-
- intel_de_write(dev_priv, regs.pp_ctrl, pp);
- }
-
- pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) |
- REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8);
- pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) |
- REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10);
-
- /* Haswell doesn't have any port selection bits for the panel
- * power sequencer any more. */
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- port_sel = PANEL_PORT_SELECT_VLV(port);
- } else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
- switch (port) {
- case PORT_A:
- port_sel = PANEL_PORT_SELECT_DPA;
- break;
- case PORT_C:
- port_sel = PANEL_PORT_SELECT_DPC;
- break;
- case PORT_D:
- port_sel = PANEL_PORT_SELECT_DPD;
- break;
- default:
- MISSING_CASE(port);
- break;
- }
- }
-
- pp_on |= port_sel;
-
- intel_de_write(dev_priv, regs.pp_on, pp_on);
- intel_de_write(dev_priv, regs.pp_off, pp_off);
-
- /*
- * Compute the divisor for the pp clock, simply match the Bspec formula.
- */
- if (i915_mmio_reg_valid(regs.pp_div)) {
- intel_de_write(dev_priv, regs.pp_div,
- REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)));
- } else {
- u32 pp_ctl;
-
- pp_ctl = intel_de_read(dev_priv, regs.pp_ctrl);
- pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK;
- pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000));
- intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
- }
-
- drm_dbg_kms(&dev_priv->drm,
- "panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
- intel_de_read(dev_priv, regs.pp_on),
- intel_de_read(dev_priv, regs.pp_off),
- i915_mmio_reg_valid(regs.pp_div) ?
- intel_de_read(dev_priv, regs.pp_div) :
- (intel_de_read(dev_priv, regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK));
-}
-
-static void intel_dp_pps_init(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- vlv_initial_power_sequencer_setup(intel_dp);
- } else {
- intel_dp_init_panel_power_sequencer(intel_dp);
- intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
- }
+ if (HAS_VRR(dev_priv))
+ drm_connector_attach_vrr_capable_property(connector);
}
/**
@@ -7908,14 +6409,11 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
struct drm_display_mode *downclock_mode = NULL;
bool has_dpcd;
enum pipe pipe = INVALID_PIPE;
- intel_wakeref_t wakeref;
struct edid *edid;
if (!intel_dp_is_edp(intel_dp))
return true;
- INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, edp_panel_vdd_work);
-
/*
* On IBX/CPT we may get here with LVDS already registered. Since the
* driver uses the only internal power sequencer available for both
@@ -7931,11 +6429,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
return false;
}
- with_pps_lock(intel_dp, wakeref) {
- intel_dp_init_panel_power_timestamps(intel_dp);
- intel_dp_pps_init(intel_dp);
- intel_edp_panel_vdd_sanitize(intel_dp);
- }
+ intel_pps_init(intel_dp);
/* Cache DPCD and EDID for edp. */
has_dpcd = intel_edp_init_dpcd(intel_dp);
@@ -7952,7 +6446,6 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
if (edid) {
if (drm_add_edid_modes(connector, edid)) {
drm_connector_update_edid_property(connector, edid);
- intel_dp->edid_quirks = drm_dp_get_edid_quirks(edid);
} else {
kfree(edid);
edid = ERR_PTR(-EINVAL);
@@ -7980,7 +6473,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
pipe = vlv_active_pipe(intel_dp);
if (pipe != PIPE_A && pipe != PIPE_B)
- pipe = intel_dp->pps_pipe;
+ pipe = intel_dp->pps.pps_pipe;
if (pipe != PIPE_A && pipe != PIPE_B)
pipe = PIPE_A;
@@ -7991,7 +6484,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
}
intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
- intel_connector->panel.backlight.power = intel_edp_backlight_power;
+ intel_connector->panel.backlight.power = intel_pps_backlight_power;
intel_panel_setup_backlight(connector, pipe);
if (fixed_mode) {
@@ -8003,13 +6496,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
return true;
out_vdd_off:
- cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
- /*
- * vdd might still be enabled do to the delayed vdd off.
- * Make sure vdd is actually turned off here.
- */
- with_pps_lock(intel_dp, wakeref)
- edp_panel_vdd_off_sync(intel_dp);
+ intel_pps_vdd_off_sync(intel_dp);
return false;
}
@@ -8063,8 +6550,8 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
intel_dp_set_source_rates(intel_dp);
intel_dp->reset_link_params = true;
- intel_dp->pps_pipe = INVALID_PIPE;
- intel_dp->active_pipe = INVALID_PIPE;
+ intel_dp->pps.pps_pipe = INVALID_PIPE;
+ intel_dp->pps.active_pipe = INVALID_PIPE;
/* Preserve the current hw state. */
intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg);
@@ -8082,7 +6569,7 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
}
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- intel_dp->active_pipe = vlv_active_pipe(intel_dp);
+ intel_dp->pps.active_pipe = vlv_active_pipe(intel_dp);
/*
* For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
@@ -8151,6 +6638,9 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
(temp & ~0xf) | 0xd);
}
+ intel_dp->frl.is_trained = false;
+ intel_dp->frl.trained_rate_gbps = 0;
+
return true;
fail:
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index 05f7ddf7a795..d80839139bfb 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -70,16 +70,11 @@ enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *dig_port,
void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
void intel_edp_backlight_off(const struct drm_connector_state *conn_state);
-void intel_edp_panel_vdd_on(struct intel_dp *intel_dp);
-void intel_edp_panel_on(struct intel_dp *intel_dp);
-void intel_edp_panel_off(struct intel_dp *intel_dp);
void intel_dp_mst_suspend(struct drm_i915_private *dev_priv);
void intel_dp_mst_resume(struct drm_i915_private *dev_priv);
int intel_dp_max_link_rate(struct intel_dp *intel_dp);
int intel_dp_max_lane_count(struct intel_dp *intel_dp);
int intel_dp_rate_select(struct intel_dp *intel_dp, int rate);
-void intel_power_sequencer_reset(struct drm_i915_private *dev_priv);
-u32 intel_dp_pack_aux(const u8 *src, int src_bytes);
void intel_edp_drrs_enable(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state);
@@ -96,9 +91,6 @@ void
intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
u8 dp_train_pat);
-void
-intel_dp_set_signal_levels(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state);
void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
u8 *link_bw, u8 *rate_select);
bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp);
@@ -144,5 +136,11 @@ bool intel_dp_initial_fastset_check(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state);
void intel_dp_sync_state(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
+const struct dpll *vlv_get_dpll(struct drm_i915_private *i915);
+
+void intel_dp_check_frl_training(struct intel_dp *intel_dp);
+void intel_dp_pcon_dsc_configure(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state);
+void intel_dp_phy_test(struct intel_encoder *encoder);
#endif /* __INTEL_DP_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c
new file mode 100644
index 000000000000..eaebf123310a
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c
@@ -0,0 +1,692 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020-2021 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "i915_trace.h"
+#include "intel_display_types.h"
+#include "intel_dp_aux.h"
+#include "intel_pps.h"
+#include "intel_tc.h"
+
+u32 intel_dp_pack_aux(const u8 *src, int src_bytes)
+{
+ int i;
+ u32 v = 0;
+
+ if (src_bytes > 4)
+ src_bytes = 4;
+ for (i = 0; i < src_bytes; i++)
+ v |= ((u32)src[i]) << ((3 - i) * 8);
+ return v;
+}
+
+static void intel_dp_unpack_aux(u32 src, u8 *dst, int dst_bytes)
+{
+ int i;
+
+ if (dst_bytes > 4)
+ dst_bytes = 4;
+ for (i = 0; i < dst_bytes; i++)
+ dst[i] = src >> ((3 - i) * 8);
+}
+
+static u32
+intel_dp_aux_wait_done(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
+ const unsigned int timeout_ms = 10;
+ u32 status;
+ bool done;
+
+#define C (((status = intel_uncore_read_notrace(&i915->uncore, ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
+ done = wait_event_timeout(i915->gmbus_wait_queue, C,
+ msecs_to_jiffies_timeout(timeout_ms));
+
+ /* just trace the final value */
+ trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
+
+ if (!done)
+ drm_err(&i915->drm,
+ "%s: did not complete or timeout within %ums (status 0x%08x)\n",
+ intel_dp->aux.name, timeout_ms, status);
+#undef C
+
+ return status;
+}
+
+static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ if (index)
+ return 0;
+
+ /*
+ * The clock divider is based off the hrawclk, and would like to run at
+ * 2MHz. So, take the hrawclk value and divide by 2000 and use that
+ */
+ return DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq, 2000);
+}
+
+static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ u32 freq;
+
+ if (index)
+ return 0;
+
+ /*
+ * The clock divider is based off the cdclk or PCH rawclk, and would
+ * like to run at 2MHz. So, take the cdclk or PCH rawclk value and
+ * divide by 2000 and use that
+ */
+ if (dig_port->aux_ch == AUX_CH_A)
+ freq = dev_priv->cdclk.hw.cdclk;
+ else
+ freq = RUNTIME_INFO(dev_priv)->rawclk_freq;
+ return DIV_ROUND_CLOSEST(freq, 2000);
+}
+
+static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+
+ if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) {
+ /* Workaround for non-ULT HSW */
+ switch (index) {
+ case 0: return 63;
+ case 1: return 72;
+ default: return 0;
+ }
+ }
+
+ return ilk_get_aux_clock_divider(intel_dp, index);
+}
+
+static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
+{
+ /*
+ * SKL doesn't need us to program the AUX clock divider (Hardware will
+ * derive the clock from CDCLK automatically). We still implement the
+ * get_aux_clock_divider vfunc to plug-in into the existing code.
+ */
+ return index ? 0 : 1;
+}
+
+static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
+ int send_bytes,
+ u32 aux_clock_divider)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *dev_priv =
+ to_i915(dig_port->base.base.dev);
+ u32 precharge, timeout;
+
+ if (IS_GEN(dev_priv, 6))
+ precharge = 3;
+ else
+ precharge = 5;
+
+ if (IS_BROADWELL(dev_priv))
+ timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
+ else
+ timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
+
+ return DP_AUX_CH_CTL_SEND_BUSY |
+ DP_AUX_CH_CTL_DONE |
+ DP_AUX_CH_CTL_INTERRUPT |
+ DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ timeout |
+ DP_AUX_CH_CTL_RECEIVE_ERROR |
+ (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+ (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
+ (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
+}
+
+static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
+ int send_bytes,
+ u32 unused)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *i915 =
+ to_i915(dig_port->base.base.dev);
+ enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
+ u32 ret;
+
+ ret = DP_AUX_CH_CTL_SEND_BUSY |
+ DP_AUX_CH_CTL_DONE |
+ DP_AUX_CH_CTL_INTERRUPT |
+ DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ DP_AUX_CH_CTL_TIME_OUT_MAX |
+ DP_AUX_CH_CTL_RECEIVE_ERROR |
+ (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+ DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
+ DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
+
+ if (intel_phy_is_tc(i915, phy) &&
+ dig_port->tc_mode == TC_PORT_TBT_ALT)
+ ret |= DP_AUX_CH_CTL_TBT_IO;
+
+ return ret;
+}
+
+static int
+intel_dp_aux_xfer(struct intel_dp *intel_dp,
+ const u8 *send, int send_bytes,
+ u8 *recv, int recv_size,
+ u32 aux_send_ctl_flags)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *i915 =
+ to_i915(dig_port->base.base.dev);
+ struct intel_uncore *uncore = &i915->uncore;
+ enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
+ bool is_tc_port = intel_phy_is_tc(i915, phy);
+ i915_reg_t ch_ctl, ch_data[5];
+ u32 aux_clock_divider;
+ enum intel_display_power_domain aux_domain;
+ intel_wakeref_t aux_wakeref;
+ intel_wakeref_t pps_wakeref;
+ int i, ret, recv_bytes;
+ int try, clock = 0;
+ u32 status;
+ bool vdd;
+
+ ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
+ for (i = 0; i < ARRAY_SIZE(ch_data); i++)
+ ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);
+
+ if (is_tc_port)
+ intel_tc_port_lock(dig_port);
+
+ aux_domain = intel_aux_power_domain(dig_port);
+
+ aux_wakeref = intel_display_power_get(i915, aux_domain);
+ pps_wakeref = intel_pps_lock(intel_dp);
+
+ /*
+ * We will be called with VDD already enabled for dpcd/edid/oui reads.
+ * In such cases we want to leave VDD enabled and it's up to upper layers
+ * to turn it off. But for eg. i2c-dev access we need to turn it on/off
+ * ourselves.
+ */
+ vdd = intel_pps_vdd_on_unlocked(intel_dp);
+
+ /*
+ * dp aux is extremely sensitive to irq latency, hence request the
+ * lowest possible wakeup latency and so prevent the cpu from going into
+ * deep sleep states.
+ */
+ cpu_latency_qos_update_request(&intel_dp->pm_qos, 0);
+
+ intel_pps_check_power_unlocked(intel_dp);
+
+ /* Try to wait for any previous AUX channel activity */
+ for (try = 0; try < 3; try++) {
+ status = intel_uncore_read_notrace(uncore, ch_ctl);
+ if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
+ break;
+ msleep(1);
+ }
+ /* just trace the final value */
+ trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
+
+ if (try == 3) {
+ const u32 status = intel_uncore_read(uncore, ch_ctl);
+
+ if (status != intel_dp->aux_busy_last_status) {
+ drm_WARN(&i915->drm, 1,
+ "%s: not started (status 0x%08x)\n",
+ intel_dp->aux.name, status);
+ intel_dp->aux_busy_last_status = status;
+ }
+
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /* Only 5 data registers! */
+ if (drm_WARN_ON(&i915->drm, send_bytes > 20 || recv_size > 20)) {
+ ret = -E2BIG;
+ goto out;
+ }
+
+ while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
+ u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
+ send_bytes,
+ aux_clock_divider);
+
+ send_ctl |= aux_send_ctl_flags;
+
+ /* Must try at least 3 times according to DP spec */
+ for (try = 0; try < 5; try++) {
+ /* Load the send data into the aux channel data registers */
+ for (i = 0; i < send_bytes; i += 4)
+ intel_uncore_write(uncore,
+ ch_data[i >> 2],
+ intel_dp_pack_aux(send + i,
+ send_bytes - i));
+
+ /* Send the command and wait for it to complete */
+ intel_uncore_write(uncore, ch_ctl, send_ctl);
+
+ status = intel_dp_aux_wait_done(intel_dp);
+
+ /* Clear done status and any errors */
+ intel_uncore_write(uncore,
+ ch_ctl,
+ status |
+ DP_AUX_CH_CTL_DONE |
+ DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ DP_AUX_CH_CTL_RECEIVE_ERROR);
+
+ /*
+ * DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
+ * 400us delay required for errors and timeouts
+ * Timeout errors from the HW already meet this
+ * requirement so skip to next iteration
+ */
+ if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
+ continue;
+
+ if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
+ usleep_range(400, 500);
+ continue;
+ }
+ if (status & DP_AUX_CH_CTL_DONE)
+ goto done;
+ }
+ }
+
+ if ((status & DP_AUX_CH_CTL_DONE) == 0) {
+ drm_err(&i915->drm, "%s: not done (status 0x%08x)\n",
+ intel_dp->aux.name, status);
+ ret = -EBUSY;
+ goto out;
+ }
+
+done:
+ /*
+ * Check for timeout or receive error. Timeouts occur when the sink is
+ * not connected.
+ */
+ if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
+ drm_err(&i915->drm, "%s: receive error (status 0x%08x)\n",
+ intel_dp->aux.name, status);
+ ret = -EIO;
+ goto out;
+ }
+
+ /*
+ * Timeouts occur when the device isn't connected, so they're "normal"
+ * -- don't fill the kernel log with these
+ */
+ if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
+ drm_dbg_kms(&i915->drm, "%s: timeout (status 0x%08x)\n",
+ intel_dp->aux.name, status);
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ /* Unload any bytes sent back from the other side */
+ recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
+ DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
+
+ /*
+ * By BSpec: "Message sizes of 0 or >20 are not allowed."
+ * We have no idea of what happened so we return -EBUSY so
+ * drm layer takes care for the necessary retries.
+ */
+ if (recv_bytes == 0 || recv_bytes > 20) {
+ drm_dbg_kms(&i915->drm,
+ "%s: Forbidden recv_bytes = %d on aux transaction\n",
+ intel_dp->aux.name, recv_bytes);
+ ret = -EBUSY;
+ goto out;
+ }
+
+ if (recv_bytes > recv_size)
+ recv_bytes = recv_size;
+
+ for (i = 0; i < recv_bytes; i += 4)
+ intel_dp_unpack_aux(intel_uncore_read(uncore, ch_data[i >> 2]),
+ recv + i, recv_bytes - i);
+
+ ret = recv_bytes;
+out:
+ cpu_latency_qos_update_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE);
+
+ if (vdd)
+ intel_pps_vdd_off_unlocked(intel_dp, false);
+
+ intel_pps_unlock(intel_dp, pps_wakeref);
+ intel_display_power_put_async(i915, aux_domain, aux_wakeref);
+
+ if (is_tc_port)
+ intel_tc_port_unlock(dig_port);
+
+ return ret;
+}
+
+#define BARE_ADDRESS_SIZE 3
+#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
+
+static void
+intel_dp_aux_header(u8 txbuf[HEADER_SIZE],
+ const struct drm_dp_aux_msg *msg)
+{
+ txbuf[0] = (msg->request << 4) | ((msg->address >> 16) & 0xf);
+ txbuf[1] = (msg->address >> 8) & 0xff;
+ txbuf[2] = msg->address & 0xff;
+ txbuf[3] = msg->size - 1;
+}
+
+static u32 intel_dp_aux_xfer_flags(const struct drm_dp_aux_msg *msg)
+{
+ /*
+ * If we're trying to send the HDCP Aksv, we need to set a the Aksv
+ * select bit to inform the hardware to send the Aksv after our header
+ * since we can't access that data from software.
+ */
+ if ((msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_NATIVE_WRITE &&
+ msg->address == DP_AUX_HDCP_AKSV)
+ return DP_AUX_CH_CTL_AUX_AKSV_SELECT;
+
+ return 0;
+}
+
+static ssize_t
+intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
+{
+ struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ u8 txbuf[20], rxbuf[20];
+ size_t txsize, rxsize;
+ u32 flags = intel_dp_aux_xfer_flags(msg);
+ int ret;
+
+ intel_dp_aux_header(txbuf, msg);
+
+ switch (msg->request & ~DP_AUX_I2C_MOT) {
+ case DP_AUX_NATIVE_WRITE:
+ case DP_AUX_I2C_WRITE:
+ case DP_AUX_I2C_WRITE_STATUS_UPDATE:
+ txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
+ rxsize = 2; /* 0 or 1 data bytes */
+
+ if (drm_WARN_ON(&i915->drm, txsize > 20))
+ return -E2BIG;
+
+ drm_WARN_ON(&i915->drm, !msg->buffer != !msg->size);
+
+ if (msg->buffer)
+ memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
+
+ ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
+ rxbuf, rxsize, flags);
+ if (ret > 0) {
+ msg->reply = rxbuf[0] >> 4;
+
+ if (ret > 1) {
+ /* Number of bytes written in a short write. */
+ ret = clamp_t(int, rxbuf[1], 0, msg->size);
+ } else {
+ /* Return payload size. */
+ ret = msg->size;
+ }
+ }
+ break;
+
+ case DP_AUX_NATIVE_READ:
+ case DP_AUX_I2C_READ:
+ txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
+ rxsize = msg->size + 1;
+
+ if (drm_WARN_ON(&i915->drm, rxsize > 20))
+ return -E2BIG;
+
+ ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
+ rxbuf, rxsize, flags);
+ if (ret > 0) {
+ msg->reply = rxbuf[0] >> 4;
+ /*
+ * Assume happy day, and copy the data. The caller is
+ * expected to check msg->reply before touching it.
+ *
+ * Return payload size.
+ */
+ ret--;
+ memcpy(msg->buffer, rxbuf + 1, ret);
+ }
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
+
+ switch (aux_ch) {
+ case AUX_CH_B:
+ case AUX_CH_C:
+ case AUX_CH_D:
+ return DP_AUX_CH_CTL(aux_ch);
+ default:
+ MISSING_CASE(aux_ch);
+ return DP_AUX_CH_CTL(AUX_CH_B);
+ }
+}
+
+static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
+
+ switch (aux_ch) {
+ case AUX_CH_B:
+ case AUX_CH_C:
+ case AUX_CH_D:
+ return DP_AUX_CH_DATA(aux_ch, index);
+ default:
+ MISSING_CASE(aux_ch);
+ return DP_AUX_CH_DATA(AUX_CH_B, index);
+ }
+}
+
+static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
+
+ switch (aux_ch) {
+ case AUX_CH_A:
+ return DP_AUX_CH_CTL(aux_ch);
+ case AUX_CH_B:
+ case AUX_CH_C:
+ case AUX_CH_D:
+ return PCH_DP_AUX_CH_CTL(aux_ch);
+ default:
+ MISSING_CASE(aux_ch);
+ return DP_AUX_CH_CTL(AUX_CH_A);
+ }
+}
+
+static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
+
+ switch (aux_ch) {
+ case AUX_CH_A:
+ return DP_AUX_CH_DATA(aux_ch, index);
+ case AUX_CH_B:
+ case AUX_CH_C:
+ case AUX_CH_D:
+ return PCH_DP_AUX_CH_DATA(aux_ch, index);
+ default:
+ MISSING_CASE(aux_ch);
+ return DP_AUX_CH_DATA(AUX_CH_A, index);
+ }
+}
+
+static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
+
+ switch (aux_ch) {
+ case AUX_CH_A:
+ case AUX_CH_B:
+ case AUX_CH_C:
+ case AUX_CH_D:
+ case AUX_CH_E:
+ case AUX_CH_F:
+ return DP_AUX_CH_CTL(aux_ch);
+ default:
+ MISSING_CASE(aux_ch);
+ return DP_AUX_CH_CTL(AUX_CH_A);
+ }
+}
+
+static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
+
+ switch (aux_ch) {
+ case AUX_CH_A:
+ case AUX_CH_B:
+ case AUX_CH_C:
+ case AUX_CH_D:
+ case AUX_CH_E:
+ case AUX_CH_F:
+ return DP_AUX_CH_DATA(aux_ch, index);
+ default:
+ MISSING_CASE(aux_ch);
+ return DP_AUX_CH_DATA(AUX_CH_A, index);
+ }
+}
+
+static i915_reg_t tgl_aux_ctl_reg(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
+
+ switch (aux_ch) {
+ case AUX_CH_A:
+ case AUX_CH_B:
+ case AUX_CH_C:
+ case AUX_CH_USBC1:
+ case AUX_CH_USBC2:
+ case AUX_CH_USBC3:
+ case AUX_CH_USBC4:
+ case AUX_CH_USBC5:
+ case AUX_CH_USBC6:
+ return DP_AUX_CH_CTL(aux_ch);
+ default:
+ MISSING_CASE(aux_ch);
+ return DP_AUX_CH_CTL(AUX_CH_A);
+ }
+}
+
+static i915_reg_t tgl_aux_data_reg(struct intel_dp *intel_dp, int index)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
+
+ switch (aux_ch) {
+ case AUX_CH_A:
+ case AUX_CH_B:
+ case AUX_CH_C:
+ case AUX_CH_USBC1:
+ case AUX_CH_USBC2:
+ case AUX_CH_USBC3:
+ case AUX_CH_USBC4:
+ case AUX_CH_USBC5:
+ case AUX_CH_USBC6:
+ return DP_AUX_CH_DATA(aux_ch, index);
+ default:
+ MISSING_CASE(aux_ch);
+ return DP_AUX_CH_DATA(AUX_CH_A, index);
+ }
+}
+
+void intel_dp_aux_fini(struct intel_dp *intel_dp)
+{
+ if (cpu_latency_qos_request_active(&intel_dp->pm_qos))
+ cpu_latency_qos_remove_request(&intel_dp->pm_qos);
+
+ kfree(intel_dp->aux.name);
+}
+
+void intel_dp_aux_init(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct intel_encoder *encoder = &dig_port->base;
+ enum aux_ch aux_ch = dig_port->aux_ch;
+
+ if (INTEL_GEN(dev_priv) >= 12) {
+ intel_dp->aux_ch_ctl_reg = tgl_aux_ctl_reg;
+ intel_dp->aux_ch_data_reg = tgl_aux_data_reg;
+ } else if (INTEL_GEN(dev_priv) >= 9) {
+ intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg;
+ intel_dp->aux_ch_data_reg = skl_aux_data_reg;
+ } else if (HAS_PCH_SPLIT(dev_priv)) {
+ intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg;
+ intel_dp->aux_ch_data_reg = ilk_aux_data_reg;
+ } else {
+ intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg;
+ intel_dp->aux_ch_data_reg = g4x_aux_data_reg;
+ }
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
+ else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
+ else if (HAS_PCH_SPLIT(dev_priv))
+ intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
+ else
+ intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
+ else
+ intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
+
+ drm_dp_aux_init(&intel_dp->aux);
+
+ /* Failure to allocate our preferred name is not critical */
+ if (INTEL_GEN(dev_priv) >= 12 && aux_ch >= AUX_CH_USBC1)
+ intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX USBC%c/%s",
+ aux_ch - AUX_CH_USBC1 + '1',
+ encoder->base.name);
+ else
+ intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX %c/%s",
+ aux_ch_name(aux_ch),
+ encoder->base.name);
+
+ intel_dp->aux.transfer = intel_dp_aux_transfer;
+ cpu_latency_qos_add_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.h b/drivers/gpu/drm/i915/display/intel_dp_aux.h
new file mode 100644
index 000000000000..4afbe76217b9
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020-2021 Intel Corporation
+ */
+
+#ifndef __INTEL_DP_AUX_H__
+#define __INTEL_DP_AUX_H__
+
+#include <linux/types.h>
+
+struct intel_dp;
+
+u32 intel_dp_pack_aux(const u8 *src, int src_bytes);
+
+void intel_dp_aux_fini(struct intel_dp *intel_dp);
+void intel_dp_aux_init(struct intel_dp *intel_dp);
+
+#endif /* __INTEL_DP_AUX_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index 51d27fc98d48..651884390137 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -22,10 +22,253 @@
*
*/
+/*
+ * Laptops with Intel GPUs which have panels that support controlling the
+ * backlight through DP AUX can actually use two different interfaces: Intel's
+ * proprietary DP AUX backlight interface, and the standard VESA backlight
+ * interface. Unfortunately, at the time of writing this a lot of laptops will
+ * advertise support for the standard VESA backlight interface when they
+ * don't properly support it. However, on these systems the Intel backlight
+ * interface generally does work properly. Additionally, these systems will
+ * usually just indicate that they use PWM backlight controls in their VBIOS
+ * for some reason.
+ */
+
#include "intel_display_types.h"
#include "intel_dp_aux_backlight.h"
+#include "intel_panel.h"
+
+/* TODO:
+ * Implement HDR, right now we just implement the bare minimum to bring us back into SDR mode so we
+ * can make people's backlights work in the mean time
+ */
+
+/*
+ * DP AUX registers for Intel's proprietary HDR backlight interface. We define
+ * them here since we'll likely be the only driver to ever use these.
+ */
+#define INTEL_EDP_HDR_TCON_CAP0 0x340
+
+#define INTEL_EDP_HDR_TCON_CAP1 0x341
+# define INTEL_EDP_HDR_TCON_2084_DECODE_CAP BIT(0)
+# define INTEL_EDP_HDR_TCON_2020_GAMUT_CAP BIT(1)
+# define INTEL_EDP_HDR_TCON_TONE_MAPPING_CAP BIT(2)
+# define INTEL_EDP_HDR_TCON_SEGMENTED_BACKLIGHT_CAP BIT(3)
+# define INTEL_EDP_HDR_TCON_BRIGHTNESS_NITS_CAP BIT(4)
+# define INTEL_EDP_HDR_TCON_OPTIMIZATION_CAP BIT(5)
+# define INTEL_EDP_HDR_TCON_SDP_COLORIMETRY_CAP BIT(6)
+# define INTEL_EDP_HDR_TCON_SRGB_TO_PANEL_GAMUT_CONVERSION_CAP BIT(7)
+
+#define INTEL_EDP_HDR_TCON_CAP2 0x342
+# define INTEL_EDP_SDR_TCON_BRIGHTNESS_AUX_CAP BIT(0)
+
+#define INTEL_EDP_HDR_TCON_CAP3 0x343
+
+#define INTEL_EDP_HDR_GETSET_CTRL_PARAMS 0x344
+# define INTEL_EDP_HDR_TCON_2084_DECODE_ENABLE BIT(0)
+# define INTEL_EDP_HDR_TCON_2020_GAMUT_ENABLE BIT(1)
+# define INTEL_EDP_HDR_TCON_TONE_MAPPING_ENABLE BIT(2) /* Pre-TGL+ */
+# define INTEL_EDP_HDR_TCON_SEGMENTED_BACKLIGHT_ENABLE BIT(3)
+# define INTEL_EDP_HDR_TCON_BRIGHTNESS_AUX_ENABLE BIT(4)
+# define INTEL_EDP_HDR_TCON_SRGB_TO_PANEL_GAMUT_ENABLE BIT(5)
+/* Bit 6 is reserved */
+# define INTEL_EDP_HDR_TCON_SDP_COLORIMETRY_ENABLE BIT(7)
+
+#define INTEL_EDP_HDR_CONTENT_LUMINANCE 0x346 /* Pre-TGL+ */
+#define INTEL_EDP_HDR_PANEL_LUMINANCE_OVERRIDE 0x34A
+#define INTEL_EDP_SDR_LUMINANCE_LEVEL 0x352
+#define INTEL_EDP_BRIGHTNESS_NITS_LSB 0x354
+#define INTEL_EDP_BRIGHTNESS_NITS_MSB 0x355
+#define INTEL_EDP_BRIGHTNESS_DELAY_FRAMES 0x356
+#define INTEL_EDP_BRIGHTNESS_PER_FRAME_STEPS 0x357
+
+#define INTEL_EDP_BRIGHTNESS_OPTIMIZATION_0 0x358
+# define INTEL_EDP_TCON_USAGE_MASK GENMASK(0, 3)
+# define INTEL_EDP_TCON_USAGE_UNKNOWN 0x0
+# define INTEL_EDP_TCON_USAGE_DESKTOP 0x1
+# define INTEL_EDP_TCON_USAGE_FULL_SCREEN_MEDIA 0x2
+# define INTEL_EDP_TCON_USAGE_FULL_SCREEN_GAMING 0x3
+# define INTEL_EDP_TCON_POWER_MASK BIT(4)
+# define INTEL_EDP_TCON_POWER_DC (0 << 4)
+# define INTEL_EDP_TCON_POWER_AC (1 << 4)
+# define INTEL_EDP_TCON_OPTIMIZATION_STRENGTH_MASK GENMASK(5, 7)
+
+#define INTEL_EDP_BRIGHTNESS_OPTIMIZATION_1 0x359
+
+/* Intel EDP backlight callbacks */
+static bool
+intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
+ struct drm_dp_aux *aux = &intel_dp->aux;
+ struct intel_panel *panel = &connector->panel;
+ int ret;
+ u8 tcon_cap[4];
+
+ ret = drm_dp_dpcd_read(aux, INTEL_EDP_HDR_TCON_CAP0, tcon_cap, sizeof(tcon_cap));
+ if (ret < 0)
+ return false;
+
+ if (!(tcon_cap[1] & INTEL_EDP_HDR_TCON_BRIGHTNESS_NITS_CAP))
+ return false;
+
+ if (tcon_cap[0] >= 1) {
+ drm_dbg_kms(&i915->drm, "Detected Intel HDR backlight interface version %d\n",
+ tcon_cap[0]);
+ } else {
+ drm_dbg_kms(&i915->drm, "Detected unsupported HDR backlight interface version %d\n",
+ tcon_cap[0]);
+ return false;
+ }
+
+ panel->backlight.edp.intel.sdr_uses_aux =
+ tcon_cap[2] & INTEL_EDP_SDR_TCON_BRIGHTNESS_AUX_CAP;
+
+ return true;
+}
+
+static u32
+intel_dp_aux_hdr_get_backlight(struct intel_connector *connector, enum pipe pipe)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
+ u8 tmp;
+ u8 buf[2] = { 0 };
+
+ if (drm_dp_dpcd_readb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, &tmp) < 0) {
+ drm_err(&i915->drm, "Failed to read current backlight mode from DPCD\n");
+ return 0;
+ }
+
+ if (!(tmp & INTEL_EDP_HDR_TCON_BRIGHTNESS_AUX_ENABLE)) {
+ if (!panel->backlight.edp.intel.sdr_uses_aux) {
+ u32 pwm_level = panel->backlight.pwm_funcs->get(connector, pipe);
+
+ return intel_panel_backlight_level_from_pwm(connector, pwm_level);
+ }
+
+ /* Assume 100% brightness if backlight controls aren't enabled yet */
+ return panel->backlight.max;
+ }
+
+ if (drm_dp_dpcd_read(&intel_dp->aux, INTEL_EDP_BRIGHTNESS_NITS_LSB, buf, sizeof(buf)) < 0) {
+ drm_err(&i915->drm, "Failed to read brightness from DPCD\n");
+ return 0;
+ }
+
+ return (buf[1] << 8 | buf[0]);
+}
+
+static void
+intel_dp_aux_hdr_set_aux_backlight(const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_device *dev = connector->base.dev;
+ struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
+ u8 buf[4] = { 0 };
+
+ buf[0] = level & 0xFF;
+ buf[1] = (level & 0xFF00) >> 8;
+
+ if (drm_dp_dpcd_write(&intel_dp->aux, INTEL_EDP_BRIGHTNESS_NITS_LSB, buf, 4) < 0)
+ drm_err(dev, "Failed to write brightness level to DPCD\n");
+}
+
+static void
+intel_dp_aux_hdr_set_backlight(const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct intel_panel *panel = &connector->panel;
+
+ if (panel->backlight.edp.intel.sdr_uses_aux) {
+ intel_dp_aux_hdr_set_aux_backlight(conn_state, level);
+ } else {
+ const u32 pwm_level = intel_panel_backlight_level_to_pwm(connector, level);
+
+ intel_panel_set_pwm_level(conn_state, pwm_level);
+ }
+}
+
+static void
+intel_dp_aux_hdr_enable_backlight(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct intel_panel *panel = &connector->panel;
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
+ int ret;
+ u8 old_ctrl, ctrl;
+
+ ret = drm_dp_dpcd_readb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, &old_ctrl);
+ if (ret < 0) {
+ drm_err(&i915->drm, "Failed to read current backlight control mode: %d\n", ret);
+ return;
+ }
+
+ ctrl = old_ctrl;
+ if (panel->backlight.edp.intel.sdr_uses_aux) {
+ ctrl |= INTEL_EDP_HDR_TCON_BRIGHTNESS_AUX_ENABLE;
+ intel_dp_aux_hdr_set_aux_backlight(conn_state, level);
+ } else {
+ u32 pwm_level = intel_panel_backlight_level_to_pwm(connector, level);
+
+ panel->backlight.pwm_funcs->enable(crtc_state, conn_state, pwm_level);
+
+ ctrl &= ~INTEL_EDP_HDR_TCON_BRIGHTNESS_AUX_ENABLE;
+ }
+
+ if (ctrl != old_ctrl)
+ if (drm_dp_dpcd_writeb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, ctrl) < 0)
+ drm_err(&i915->drm, "Failed to configure DPCD brightness controls\n");
+}
+
+static void
+intel_dp_aux_hdr_disable_backlight(const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct intel_panel *panel = &connector->panel;
+
+ /* Nothing to do for AUX based backlight controls */
+ if (panel->backlight.edp.intel.sdr_uses_aux)
+ return;
+
+ /* Note we want the actual pwm_level to be 0, regardless of pwm_min */
+ panel->backlight.pwm_funcs->disable(conn_state, intel_panel_invert_pwm_level(connector, 0));
+}
+
+static int
+intel_dp_aux_hdr_setup_backlight(struct intel_connector *connector, enum pipe pipe)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ int ret;
+
+ if (panel->backlight.edp.intel.sdr_uses_aux) {
+ drm_dbg_kms(&i915->drm, "SDR backlight is controlled through DPCD\n");
+ } else {
+ drm_dbg_kms(&i915->drm, "SDR backlight is controlled through PWM\n");
+
+ ret = panel->backlight.pwm_funcs->setup(connector, pipe);
+ if (ret < 0) {
+ drm_err(&i915->drm,
+ "Failed to setup SDR backlight controls through PWM: %d\n", ret);
+ return ret;
+ }
+ }
+
+ panel->backlight.max = 512;
+ panel->backlight.min = 0;
+ panel->backlight.level = intel_dp_aux_hdr_get_backlight(connector, pipe);
+ panel->backlight.enabled = panel->backlight.level != 0;
-static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
+ return 0;
+}
+
+/* VESA backlight callbacks */
+static void set_vesa_backlight_enable(struct intel_dp *intel_dp, bool enable)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u8 reg_val = 0;
@@ -52,7 +295,7 @@ static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
}
}
-static bool intel_dp_aux_backlight_dpcd_mode(struct intel_connector *connector)
+static bool intel_dp_aux_vesa_backlight_dpcd_mode(struct intel_connector *connector)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
@@ -75,7 +318,7 @@ static bool intel_dp_aux_backlight_dpcd_mode(struct intel_connector *connector)
* Read the current backlight value from DPCD register(s) based
* on if 8-bit(MSB) or 16-bit(MSB and LSB) values are supported
*/
-static u32 intel_dp_aux_get_backlight(struct intel_connector *connector)
+static u32 intel_dp_aux_vesa_get_backlight(struct intel_connector *connector, enum pipe unused)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
@@ -86,7 +329,7 @@ static u32 intel_dp_aux_get_backlight(struct intel_connector *connector)
* If we're not in DPCD control mode yet, the programmed brightness
* value is meaningless and we should assume max brightness
*/
- if (!intel_dp_aux_backlight_dpcd_mode(connector))
+ if (!intel_dp_aux_vesa_backlight_dpcd_mode(connector))
return connector->panel.backlight.max;
if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB,
@@ -107,7 +350,8 @@ static u32 intel_dp_aux_get_backlight(struct intel_connector *connector)
* 8-bit or 16 bit value (MSB and LSB)
*/
static void
-intel_dp_aux_set_backlight(const struct drm_connector_state *conn_state, u32 level)
+intel_dp_aux_vesa_set_backlight(const struct drm_connector_state *conn_state,
+ u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct intel_dp *intel_dp = intel_attached_dp(connector);
@@ -137,11 +381,11 @@ intel_dp_aux_set_backlight(const struct drm_connector_state *conn_state, u32 lev
* - Where P = 2^Pn, where Pn is the value programmed by field 4:0 of the
* EDP_PWMGEN_BIT_COUNT register (DPCD Address 00724h)
*/
-static bool intel_dp_aux_set_pwm_freq(struct intel_connector *connector)
+static bool intel_dp_aux_vesa_set_pwm_freq(struct intel_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_dp *intel_dp = intel_attached_dp(connector);
- const u8 pn = connector->panel.backlight.pwmgen_bit_count;
+ const u8 pn = connector->panel.backlight.edp.vesa.pwmgen_bit_count;
int freq, fxp, f, fxp_actual, fxp_min, fxp_max;
freq = dev_priv->vbt.backlight.pwm_freq_hz;
@@ -173,14 +417,16 @@ static bool intel_dp_aux_set_pwm_freq(struct intel_connector *connector)
return true;
}
-static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+static void
+intel_dp_aux_vesa_enable_backlight(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct intel_panel *panel = &connector->panel;
u8 dpcd_buf, new_dpcd_buf, edp_backlight_mode;
+ u8 pwmgen_bit_count = panel->backlight.edp.vesa.pwmgen_bit_count;
if (drm_dp_dpcd_readb(&intel_dp->aux,
DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &dpcd_buf) != 1) {
@@ -201,7 +447,7 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st
if (drm_dp_dpcd_writeb(&intel_dp->aux,
DP_EDP_PWMGEN_BIT_COUNT,
- panel->backlight.pwmgen_bit_count) < 0)
+ pwmgen_bit_count) < 0)
drm_dbg_kms(&i915->drm,
"Failed to write aux pwmgen bit count\n");
@@ -214,7 +460,7 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st
}
if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_FREQ_AUX_SET_CAP)
- if (intel_dp_aux_set_pwm_freq(connector))
+ if (intel_dp_aux_vesa_set_pwm_freq(connector))
new_dpcd_buf |= DP_EDP_BACKLIGHT_FREQ_AUX_SET_ENABLE;
if (new_dpcd_buf != dpcd_buf) {
@@ -225,18 +471,18 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st
}
}
- intel_dp_aux_set_backlight(conn_state,
- connector->panel.backlight.level);
- set_aux_backlight_enable(intel_dp, true);
+ intel_dp_aux_vesa_set_backlight(conn_state, level);
+ set_vesa_backlight_enable(intel_dp, true);
}
-static void intel_dp_aux_disable_backlight(const struct drm_connector_state *old_conn_state)
+static void intel_dp_aux_vesa_disable_backlight(const struct drm_connector_state *old_conn_state,
+ u32 level)
{
- set_aux_backlight_enable(enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder)),
- false);
+ set_vesa_backlight_enable(enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder)),
+ false);
}
-static u32 intel_dp_aux_calc_max_backlight(struct intel_connector *connector)
+static u32 intel_dp_aux_vesa_calc_max_backlight(struct intel_connector *connector)
{
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_dp *intel_dp = intel_attached_dp(connector);
@@ -309,40 +555,45 @@ static u32 intel_dp_aux_calc_max_backlight(struct intel_connector *connector)
"Failed to write aux pwmgen bit count\n");
return max_backlight;
}
- panel->backlight.pwmgen_bit_count = pn;
+ panel->backlight.edp.vesa.pwmgen_bit_count = pn;
max_backlight = (1 << pn) - 1;
return max_backlight;
}
-static int intel_dp_aux_setup_backlight(struct intel_connector *connector,
- enum pipe pipe)
+static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector,
+ enum pipe pipe)
{
struct intel_panel *panel = &connector->panel;
- panel->backlight.max = intel_dp_aux_calc_max_backlight(connector);
+ panel->backlight.max = intel_dp_aux_vesa_calc_max_backlight(connector);
if (!panel->backlight.max)
return -ENODEV;
panel->backlight.min = 0;
- panel->backlight.level = intel_dp_aux_get_backlight(connector);
- panel->backlight.enabled = intel_dp_aux_backlight_dpcd_mode(connector) &&
+ panel->backlight.level = intel_dp_aux_vesa_get_backlight(connector, pipe);
+ panel->backlight.enabled = intel_dp_aux_vesa_backlight_dpcd_mode(connector) &&
panel->backlight.level != 0;
return 0;
}
static bool
-intel_dp_aux_display_control_capable(struct intel_connector *connector)
+intel_dp_aux_supports_vesa_backlight(struct intel_connector *connector)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
/* Check the eDP Display control capabilities registers to determine if
- * the panel can support backlight control over the aux channel
+ * the panel can support backlight control over the aux channel.
+ *
+ * TODO: We currently only support AUX only backlight configurations, not backlights which
+ * require a mix of PWM and AUX controls to work. In the mean time, these machines typically
+ * work just fine using normal PWM controls anyway.
*/
if (intel_dp->edp_dpcd[1] & DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP &&
+ (intel_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_AUX_ENABLE_CAP) &&
(intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_AUX_SET_CAP)) {
drm_dbg_kms(&i915->drm, "AUX Backlight Control Supported!\n");
return true;
@@ -350,40 +601,89 @@ intel_dp_aux_display_control_capable(struct intel_connector *connector)
return false;
}
-int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector)
+static const struct intel_panel_bl_funcs intel_dp_hdr_bl_funcs = {
+ .setup = intel_dp_aux_hdr_setup_backlight,
+ .enable = intel_dp_aux_hdr_enable_backlight,
+ .disable = intel_dp_aux_hdr_disable_backlight,
+ .set = intel_dp_aux_hdr_set_backlight,
+ .get = intel_dp_aux_hdr_get_backlight,
+};
+
+static const struct intel_panel_bl_funcs intel_dp_vesa_bl_funcs = {
+ .setup = intel_dp_aux_vesa_setup_backlight,
+ .enable = intel_dp_aux_vesa_enable_backlight,
+ .disable = intel_dp_aux_vesa_disable_backlight,
+ .set = intel_dp_aux_vesa_set_backlight,
+ .get = intel_dp_aux_vesa_get_backlight,
+};
+
+enum intel_dp_aux_backlight_modparam {
+ INTEL_DP_AUX_BACKLIGHT_AUTO = -1,
+ INTEL_DP_AUX_BACKLIGHT_OFF = 0,
+ INTEL_DP_AUX_BACKLIGHT_ON = 1,
+ INTEL_DP_AUX_BACKLIGHT_FORCE_VESA = 2,
+ INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL = 3,
+};
+
+int intel_dp_aux_init_backlight_funcs(struct intel_connector *connector)
{
- struct intel_panel *panel = &intel_connector->panel;
- struct intel_dp *intel_dp = enc_to_intel_dp(intel_connector->encoder);
+ struct drm_device *dev = connector->base.dev;
+ struct intel_panel *panel = &connector->panel;
+ struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ bool try_intel_interface = false, try_vesa_interface = false;
- if (i915->params.enable_dpcd_backlight == 0 ||
- !intel_dp_aux_display_control_capable(intel_connector))
+ /* Check the VBT and user's module parameters to figure out which
+ * interfaces to probe
+ */
+ switch (i915->params.enable_dpcd_backlight) {
+ case INTEL_DP_AUX_BACKLIGHT_OFF:
return -ENODEV;
+ case INTEL_DP_AUX_BACKLIGHT_AUTO:
+ switch (i915->vbt.backlight.type) {
+ case INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE:
+ try_vesa_interface = true;
+ break;
+ case INTEL_BACKLIGHT_DISPLAY_DDI:
+ try_intel_interface = true;
+ try_vesa_interface = true;
+ break;
+ default:
+ return -ENODEV;
+ }
+ break;
+ case INTEL_DP_AUX_BACKLIGHT_ON:
+ if (i915->vbt.backlight.type != INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE)
+ try_intel_interface = true;
+
+ try_vesa_interface = true;
+ break;
+ case INTEL_DP_AUX_BACKLIGHT_FORCE_VESA:
+ try_vesa_interface = true;
+ break;
+ case INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL:
+ try_intel_interface = true;
+ break;
+ }
/*
- * There are a lot of machines that don't advertise the backlight
- * control interface to use properly in their VBIOS, :\
+ * A lot of eDP panels in the wild will report supporting both the
+ * Intel proprietary backlight control interface, and the VESA
+ * backlight control interface. Many of these panels are liars though,
+ * and will only work with the Intel interface. So, always probe for
+ * that first.
*/
- if (i915->vbt.backlight.type !=
- INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE &&
- i915->params.enable_dpcd_backlight != 1 &&
- !drm_dp_has_quirk(&intel_dp->desc, intel_dp->edid_quirks,
- DP_QUIRK_FORCE_DPCD_BACKLIGHT)) {
- drm_info(&i915->drm,
- "Panel advertises DPCD backlight support, but "
- "VBT disagrees. If your backlight controls "
- "don't work try booting with "
- "i915.enable_dpcd_backlight=1. If your machine "
- "needs this, please file a _new_ bug report on "
- "drm/i915, see " FDO_BUG_URL " for details.\n");
- return -ENODEV;
+ if (try_intel_interface && intel_dp_aux_supports_hdr_backlight(connector)) {
+ drm_dbg_kms(dev, "Using Intel proprietary eDP backlight controls\n");
+ panel->backlight.funcs = &intel_dp_hdr_bl_funcs;
+ return 0;
}
- panel->backlight.setup = intel_dp_aux_setup_backlight;
- panel->backlight.enable = intel_dp_aux_enable_backlight;
- panel->backlight.disable = intel_dp_aux_disable_backlight;
- panel->backlight.set = intel_dp_aux_set_backlight;
- panel->backlight.get = intel_dp_aux_get_backlight;
+ if (try_vesa_interface && intel_dp_aux_supports_vesa_backlight(connector)) {
+ drm_dbg_kms(dev, "Using VESA eDP backlight controls\n");
+ panel->backlight.funcs = &intel_dp_vesa_bl_funcs;
+ return 0;
+ }
- return 0;
+ return -ENODEV;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
index 03424d20e9f7..4dba5bb15af5 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
@@ -16,6 +16,30 @@
#include "intel_dp.h"
#include "intel_hdcp.h"
+static unsigned int transcoder_to_stream_enc_status(enum transcoder cpu_transcoder)
+{
+ u32 stream_enc_mask;
+
+ switch (cpu_transcoder) {
+ case TRANSCODER_A:
+ stream_enc_mask = HDCP_STATUS_STREAM_A_ENC;
+ break;
+ case TRANSCODER_B:
+ stream_enc_mask = HDCP_STATUS_STREAM_B_ENC;
+ break;
+ case TRANSCODER_C:
+ stream_enc_mask = HDCP_STATUS_STREAM_C_ENC;
+ break;
+ case TRANSCODER_D:
+ stream_enc_mask = HDCP_STATUS_STREAM_D_ENC;
+ break;
+ default:
+ stream_enc_mask = 0;
+ }
+
+ return stream_enc_mask;
+}
+
static void intel_dp_hdcp_wait_for_cp_irq(struct intel_hdcp *hdcp, int timeout)
{
long ret;
@@ -561,7 +585,8 @@ int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *dig_port,
}
static
-int intel_dp_hdcp2_check_link(struct intel_digital_port *dig_port)
+int intel_dp_hdcp2_check_link(struct intel_digital_port *dig_port,
+ struct intel_connector *connector)
{
u8 rx_status;
int ret;
@@ -622,48 +647,143 @@ static const struct intel_hdcp_shim intel_dp_hdcp_shim = {
};
static int
-intel_dp_mst_hdcp_toggle_signalling(struct intel_digital_port *dig_port,
- enum transcoder cpu_transcoder,
- bool enable)
+intel_dp_mst_toggle_hdcp_stream_select(struct intel_connector *connector,
+ bool enable)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_hdcp *hdcp = &connector->hdcp;
int ret;
- if (!enable)
- usleep_range(6, 60); /* Bspec says >= 6us */
-
- ret = intel_ddi_toggle_hdcp_signalling(&dig_port->base,
- cpu_transcoder, enable);
+ ret = intel_ddi_toggle_hdcp_bits(&dig_port->base,
+ hdcp->stream_transcoder, enable,
+ TRANS_DDI_HDCP_SELECT);
if (ret)
- drm_dbg_kms(&i915->drm, "%s HDCP signalling failed (%d)\n",
- enable ? "Enable" : "Disable", ret);
+ drm_err(&i915->drm, "%s HDCP stream select failed (%d)\n",
+ enable ? "Enable" : "Disable", ret);
return ret;
}
-static
-bool intel_dp_mst_hdcp_check_link(struct intel_digital_port *dig_port,
- struct intel_connector *connector)
+static int
+intel_dp_mst_hdcp_stream_encryption(struct intel_connector *connector,
+ bool enable)
+{
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ enum port port = dig_port->base.port;
+ enum transcoder cpu_transcoder = hdcp->stream_transcoder;
+ u32 stream_enc_status;
+ int ret;
+
+ ret = intel_dp_mst_toggle_hdcp_stream_select(connector, enable);
+ if (ret)
+ return ret;
+
+ stream_enc_status = transcoder_to_stream_enc_status(cpu_transcoder);
+ if (!stream_enc_status)
+ return -EINVAL;
+
+ /* Wait for encryption confirmation */
+ if (intel_de_wait_for_register(i915,
+ HDCP_STATUS(i915, cpu_transcoder, port),
+ stream_enc_status,
+ enable ? stream_enc_status : 0,
+ HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
+ drm_err(&i915->drm, "Timed out waiting for transcoder: %s stream encryption %s\n",
+ transcoder_name(cpu_transcoder), enable ? "enabled" : "disabled");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static bool intel_dp_mst_get_qses_status(struct intel_digital_port *dig_port,
+ struct intel_connector *connector)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- struct intel_dp *intel_dp = &dig_port->dp;
struct drm_dp_query_stream_enc_status_ack_reply reply;
+ struct intel_dp *intel_dp = &dig_port->dp;
int ret;
- if (!intel_dp_hdcp_check_link(dig_port, connector))
- return false;
-
ret = drm_dp_send_query_stream_enc_status(&intel_dp->mst_mgr,
connector->port, &reply);
if (ret) {
drm_dbg_kms(&i915->drm,
- "[CONNECTOR:%d:%s] failed QSES ret=%d\n",
- connector->base.base.id, connector->base.name, ret);
+ "[%s:%d] failed QSES ret=%d\n",
+ connector->base.name, connector->base.base.id, ret);
return false;
}
+ drm_dbg_kms(&i915->drm, "[%s:%d] QSES stream auth: %d stream enc: %d\n",
+ connector->base.name, connector->base.base.id,
+ reply.auth_completed, reply.encryption_enabled);
+
return reply.auth_completed && reply.encryption_enabled;
}
+static int
+intel_dp_mst_hdcp2_stream_encryption(struct intel_connector *connector,
+ bool enable)
+{
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ enum transcoder cpu_transcoder = hdcp->stream_transcoder;
+ enum pipe pipe = (enum pipe)cpu_transcoder;
+ enum port port = dig_port->base.port;
+ int ret;
+
+ drm_WARN_ON(&i915->drm, enable &&
+ !!(intel_de_read(i915, HDCP2_AUTH_STREAM(i915, cpu_transcoder, port))
+ & AUTH_STREAM_TYPE) != data->streams[0].stream_type);
+
+ ret = intel_dp_mst_toggle_hdcp_stream_select(connector, enable);
+ if (ret)
+ return ret;
+
+ /* Wait for encryption confirmation */
+ if (intel_de_wait_for_register(i915,
+ HDCP2_STREAM_STATUS(i915, cpu_transcoder, pipe),
+ STREAM_ENCRYPTION_STATUS,
+ enable ? STREAM_ENCRYPTION_STATUS : 0,
+ HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
+ drm_err(&i915->drm, "Timed out waiting for transcoder: %s stream encryption %s\n",
+ transcoder_name(cpu_transcoder), enable ? "enabled" : "disabled");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/*
+ * DP v2.0 I.3.3 ignore the stream signature L' in QSES reply msg reply.
+ * I.3.5 MST source device may use a QSES msg to query downstream status
+ * for a particular stream.
+ */
+static
+int intel_dp_mst_hdcp2_check_link(struct intel_digital_port *dig_port,
+ struct intel_connector *connector)
+{
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ int ret;
+
+ /*
+ * We do need to do the Link Check only for the connector involved with
+ * HDCP port authentication and encryption.
+ * We can re-use the hdcp->is_repeater flag to know that the connector
+ * involved with HDCP port authentication and encryption.
+ */
+ if (hdcp->is_repeater) {
+ ret = intel_dp_hdcp2_check_link(dig_port, connector);
+ if (ret)
+ return ret;
+ }
+
+ return intel_dp_mst_get_qses_status(dig_port, connector) ? 0 : -EINVAL;
+}
+
static const struct intel_hdcp_shim intel_dp_mst_hdcp_shim = {
.write_an_aksv = intel_dp_hdcp_write_an_aksv,
.read_bksv = intel_dp_hdcp_read_bksv,
@@ -673,10 +793,16 @@ static const struct intel_hdcp_shim intel_dp_mst_hdcp_shim = {
.read_ksv_ready = intel_dp_hdcp_read_ksv_ready,
.read_ksv_fifo = intel_dp_hdcp_read_ksv_fifo,
.read_v_prime_part = intel_dp_hdcp_read_v_prime_part,
- .toggle_signalling = intel_dp_mst_hdcp_toggle_signalling,
- .check_link = intel_dp_mst_hdcp_check_link,
+ .toggle_signalling = intel_dp_hdcp_toggle_signalling,
+ .stream_encryption = intel_dp_mst_hdcp_stream_encryption,
+ .check_link = intel_dp_hdcp_check_link,
.hdcp_capable = intel_dp_hdcp_capable,
-
+ .write_2_2_msg = intel_dp_hdcp2_write_msg,
+ .read_2_2_msg = intel_dp_hdcp2_read_msg,
+ .config_stream_type = intel_dp_hdcp2_config_stream_type,
+ .stream_2_2_encryption = intel_dp_mst_hdcp2_stream_encryption,
+ .check_2_2_link = intel_dp_mst_hdcp2_check_link,
+ .hdcp_2_2_capable = intel_dp_hdcp2_capable,
.protocol = HDCP_PROTOCOL_DP,
};
@@ -693,10 +819,10 @@ int intel_dp_init_hdcp(struct intel_digital_port *dig_port,
return 0;
if (intel_connector->mst_port)
- return intel_hdcp_init(intel_connector, port,
+ return intel_hdcp_init(intel_connector, dig_port,
&intel_dp_mst_hdcp_shim);
else if (!intel_dp_is_edp(intel_dp))
- return intel_hdcp_init(intel_connector, port,
+ return intel_hdcp_init(intel_connector, dig_port,
&intel_dp_hdcp_shim);
return 0;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index 91d3979902d0..892d7db7d94f 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -34,18 +34,6 @@ intel_dp_dump_link_status(const u8 link_status[DP_LINK_STATUS_SIZE])
link_status[3], link_status[4], link_status[5]);
}
-static int intel_dp_lttpr_count(struct intel_dp *intel_dp)
-{
- int count = drm_dp_lttpr_count(intel_dp->lttpr_common_caps);
-
- /*
- * Pretend no LTTPRs in case of LTTPR detection error, or
- * if too many (>8) LTTPRs are detected. This translates to link
- * training in transparent mode.
- */
- return count <= 0 ? 0 : count;
-}
-
static void intel_dp_reset_lttpr_count(struct intel_dp *intel_dp)
{
intel_dp->lttpr_common_caps[DP_PHY_REPEATER_CNT -
@@ -142,6 +130,17 @@ int intel_dp_lttpr_init(struct intel_dp *intel_dp)
return 0;
ret = intel_dp_read_lttpr_common_caps(intel_dp);
+ if (!ret)
+ return 0;
+
+ lttpr_count = drm_dp_lttpr_count(intel_dp->lttpr_common_caps);
+ /*
+ * Prevent setting LTTPR transparent mode explicitly if no LTTPRs are
+ * detected as this breaks link training at least on the Dell WD19TB
+ * dock.
+ */
+ if (lttpr_count == 0)
+ return 0;
/*
* See DP Standard v2.0 3.6.6.1. about the explicit disabling of
@@ -150,17 +149,12 @@ int intel_dp_lttpr_init(struct intel_dp *intel_dp)
*/
intel_dp_set_lttpr_transparent_mode(intel_dp, true);
- if (!ret)
- return 0;
-
- lttpr_count = intel_dp_lttpr_count(intel_dp);
-
/*
* In case of unsupported number of LTTPRs or failing to switch to
* non-transparent mode fall-back to transparent link training mode,
* still taking into account any LTTPR common lane- rate/count limits.
*/
- if (lttpr_count == 0)
+ if (lttpr_count < 0)
return 0;
if (!intel_dp_set_lttpr_transparent_mode(intel_dp, false)) {
@@ -222,11 +216,11 @@ intel_dp_phy_is_downstream_of_source(struct intel_dp *intel_dp,
enum drm_dp_phy dp_phy)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- int lttpr_count = intel_dp_lttpr_count(intel_dp);
+ int lttpr_count = drm_dp_lttpr_count(intel_dp->lttpr_common_caps);
- drm_WARN_ON_ONCE(&i915->drm, lttpr_count == 0 && dp_phy != DP_PHY_DPRX);
+ drm_WARN_ON_ONCE(&i915->drm, lttpr_count <= 0 && dp_phy != DP_PHY_DPRX);
- return lttpr_count == 0 || dp_phy == DP_PHY_LTTPR(lttpr_count - 1);
+ return lttpr_count <= 0 || dp_phy == DP_PHY_LTTPR(lttpr_count - 1);
}
static u8 intel_dp_phy_voltage_max(struct intel_dp *intel_dp,
@@ -334,6 +328,27 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
return drm_dp_dpcd_write(&intel_dp->aux, reg, buf, len) == len;
}
+void intel_dp_set_signal_levels(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ enum drm_dp_phy dp_phy)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u8 train_set = intel_dp->train_set[0];
+ char phy_name[10];
+
+ drm_dbg_kms(&dev_priv->drm, "Using vswing level %d%s, pre-emphasis level %d%s, at %s\n",
+ train_set & DP_TRAIN_VOLTAGE_SWING_MASK,
+ train_set & DP_TRAIN_MAX_SWING_REACHED ? " (max)" : "",
+ (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
+ DP_TRAIN_PRE_EMPHASIS_SHIFT,
+ train_set & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ?
+ " (max)" : "",
+ intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)));
+
+ if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy))
+ intel_dp->set_signal_levels(intel_dp, crtc_state);
+}
+
static bool
intel_dp_reset_link_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
@@ -341,7 +356,7 @@ intel_dp_reset_link_train(struct intel_dp *intel_dp,
u8 dp_train_pat)
{
memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
- intel_dp_set_signal_levels(intel_dp, crtc_state);
+ intel_dp_set_signal_levels(intel_dp, crtc_state, dp_phy);
return intel_dp_set_link_train(intel_dp, crtc_state, dp_phy, dp_train_pat);
}
@@ -355,7 +370,7 @@ intel_dp_update_link_train(struct intel_dp *intel_dp,
DP_TRAINING_LANE0_SET_PHY_REPEATER(dp_phy);
int ret;
- intel_dp_set_signal_levels(intel_dp, crtc_state);
+ intel_dp_set_signal_levels(intel_dp, crtc_state, dp_phy);
ret = drm_dp_dpcd_write(&intel_dp->aux, reg,
intel_dp->train_set, crtc_state->lane_count);
@@ -413,7 +428,7 @@ intel_dp_prepare_link_train(struct intel_dp *intel_dp,
drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
&rate_select, 1);
- link_config[0] = 0;
+ link_config[0] = crtc_state->vrr.enable ? DP_MSA_TIMING_PAR_IGNORE_EN : 0;
link_config[1] = DP_SET_ANSI_8B10B;
drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
@@ -676,9 +691,9 @@ static bool intel_dp_disable_dpcd_training_pattern(struct intel_dp *intel_dp,
* @intel_dp: DP struct
* @crtc_state: state for CRTC attached to the encoder
*
- * Stop the link training of the @intel_dp port, disabling the test pattern
- * symbol generation on the port and disabling the training pattern in
- * the sink's DPCD.
+ * Stop the link training of the @intel_dp port, disabling the training
+ * pattern in the sink's DPCD, and disabling the test pattern symbol
+ * generation on the port.
*
* What symbols are output on the port after this point is
* platform specific: On DDI/VLV/CHV platforms it will be the idle pattern
@@ -692,10 +707,9 @@ void intel_dp_stop_link_train(struct intel_dp *intel_dp,
{
intel_dp->link_trained = true;
- intel_dp_program_link_training_pattern(intel_dp,
- crtc_state,
- DP_TRAINING_PATTERN_DISABLE);
intel_dp_disable_dpcd_training_pattern(intel_dp, DP_PHY_DPRX);
+ intel_dp_program_link_training_pattern(intel_dp, crtc_state,
+ DP_TRAINING_PATTERN_DISABLE);
}
static bool
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.h b/drivers/gpu/drm/i915/display/intel_dp_link_training.h
index 86905aa24db7..6a1f76bd8c75 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.h
@@ -17,6 +17,9 @@ void intel_dp_get_adjust_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
enum drm_dp_phy dp_phy,
const u8 link_status[DP_LINK_STATUS_SIZE]);
+void intel_dp_set_signal_levels(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ enum drm_dp_phy dp_phy);
void intel_dp_start_link_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state);
void intel_dp_stop_link_train(struct intel_dp *intel_dp,
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 27f04aed8764..b4621ed0127e 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -53,8 +53,7 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
struct drm_i915_private *i915 = to_i915(connector->base.dev);
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
- bool constant_n = drm_dp_has_quirk(&intel_dp->desc, 0,
- DP_DPCD_QUIRK_CONSTANT_N);
+ bool constant_n = drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_CONSTANT_N);
int bpp, slots = -EINVAL;
crtc_state->lane_count = limits->max_lane_count;
@@ -69,7 +68,9 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
slots = drm_dp_atomic_find_vcpi_slots(state, &intel_dp->mst_mgr,
connector->port,
- crtc_state->pbn, 0);
+ crtc_state->pbn,
+ drm_dp_get_vc_payload_bw(crtc_state->port_clock,
+ crtc_state->lane_count));
if (slots == -EDEADLK)
return slots;
if (slots >= 0)
@@ -569,7 +570,7 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
if (conn_state->content_protection ==
DRM_MODE_CONTENT_PROTECTION_DESIRED)
intel_hdcp_enable(to_intel_connector(conn_state->connector),
- pipe_config->cpu_transcoder,
+ pipe_config,
(u8)conn_state->hdcp_content_type);
}
@@ -829,12 +830,11 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
-
- /* TODO: Figure out how to make HDCP work on GEN12+ */
- if (INTEL_GEN(dev_priv) < 12) {
+ if (INTEL_GEN(dev_priv) <= 12) {
ret = intel_dp_init_hdcp(dig_port, intel_connector);
if (ret)
- DRM_DEBUG_KMS("HDCP init failed, skipping.\n");
+ drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP MST init failed, skipping.\n",
+ connector->name, connector->base.id);
}
/*
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c
new file mode 100644
index 000000000000..7ba7f315aaee
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dpll.c
@@ -0,0 +1,1363 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+#include <linux/kernel.h>
+#include "intel_display_types.h"
+#include "intel_display.h"
+#include "intel_dpll.h"
+#include "intel_lvds.h"
+#include "intel_panel.h"
+
+struct intel_limit {
+ struct {
+ int min, max;
+ } dot, vco, n, m, m1, m2, p, p1;
+
+ struct {
+ int dot_limit;
+ int p2_slow, p2_fast;
+ } p2;
+};
+static const struct intel_limit intel_limits_i8xx_dac = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 908000, .max = 1512000 },
+ .n = { .min = 2, .max = 16 },
+ .m = { .min = 96, .max = 140 },
+ .m1 = { .min = 18, .max = 26 },
+ .m2 = { .min = 6, .max = 16 },
+ .p = { .min = 4, .max = 128 },
+ .p1 = { .min = 2, .max = 33 },
+ .p2 = { .dot_limit = 165000,
+ .p2_slow = 4, .p2_fast = 2 },
+};
+
+static const struct intel_limit intel_limits_i8xx_dvo = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 908000, .max = 1512000 },
+ .n = { .min = 2, .max = 16 },
+ .m = { .min = 96, .max = 140 },
+ .m1 = { .min = 18, .max = 26 },
+ .m2 = { .min = 6, .max = 16 },
+ .p = { .min = 4, .max = 128 },
+ .p1 = { .min = 2, .max = 33 },
+ .p2 = { .dot_limit = 165000,
+ .p2_slow = 4, .p2_fast = 4 },
+};
+
+static const struct intel_limit intel_limits_i8xx_lvds = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 908000, .max = 1512000 },
+ .n = { .min = 2, .max = 16 },
+ .m = { .min = 96, .max = 140 },
+ .m1 = { .min = 18, .max = 26 },
+ .m2 = { .min = 6, .max = 16 },
+ .p = { .min = 4, .max = 128 },
+ .p1 = { .min = 1, .max = 6 },
+ .p2 = { .dot_limit = 165000,
+ .p2_slow = 14, .p2_fast = 7 },
+};
+
+static const struct intel_limit intel_limits_i9xx_sdvo = {
+ .dot = { .min = 20000, .max = 400000 },
+ .vco = { .min = 1400000, .max = 2800000 },
+ .n = { .min = 1, .max = 6 },
+ .m = { .min = 70, .max = 120 },
+ .m1 = { .min = 8, .max = 18 },
+ .m2 = { .min = 3, .max = 7 },
+ .p = { .min = 5, .max = 80 },
+ .p1 = { .min = 1, .max = 8 },
+ .p2 = { .dot_limit = 200000,
+ .p2_slow = 10, .p2_fast = 5 },
+};
+
+static const struct intel_limit intel_limits_i9xx_lvds = {
+ .dot = { .min = 20000, .max = 400000 },
+ .vco = { .min = 1400000, .max = 2800000 },
+ .n = { .min = 1, .max = 6 },
+ .m = { .min = 70, .max = 120 },
+ .m1 = { .min = 8, .max = 18 },
+ .m2 = { .min = 3, .max = 7 },
+ .p = { .min = 7, .max = 98 },
+ .p1 = { .min = 1, .max = 8 },
+ .p2 = { .dot_limit = 112000,
+ .p2_slow = 14, .p2_fast = 7 },
+};
+
+
+static const struct intel_limit intel_limits_g4x_sdvo = {
+ .dot = { .min = 25000, .max = 270000 },
+ .vco = { .min = 1750000, .max = 3500000},
+ .n = { .min = 1, .max = 4 },
+ .m = { .min = 104, .max = 138 },
+ .m1 = { .min = 17, .max = 23 },
+ .m2 = { .min = 5, .max = 11 },
+ .p = { .min = 10, .max = 30 },
+ .p1 = { .min = 1, .max = 3},
+ .p2 = { .dot_limit = 270000,
+ .p2_slow = 10,
+ .p2_fast = 10
+ },
+};
+
+static const struct intel_limit intel_limits_g4x_hdmi = {
+ .dot = { .min = 22000, .max = 400000 },
+ .vco = { .min = 1750000, .max = 3500000},
+ .n = { .min = 1, .max = 4 },
+ .m = { .min = 104, .max = 138 },
+ .m1 = { .min = 16, .max = 23 },
+ .m2 = { .min = 5, .max = 11 },
+ .p = { .min = 5, .max = 80 },
+ .p1 = { .min = 1, .max = 8},
+ .p2 = { .dot_limit = 165000,
+ .p2_slow = 10, .p2_fast = 5 },
+};
+
+static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
+ .dot = { .min = 20000, .max = 115000 },
+ .vco = { .min = 1750000, .max = 3500000 },
+ .n = { .min = 1, .max = 3 },
+ .m = { .min = 104, .max = 138 },
+ .m1 = { .min = 17, .max = 23 },
+ .m2 = { .min = 5, .max = 11 },
+ .p = { .min = 28, .max = 112 },
+ .p1 = { .min = 2, .max = 8 },
+ .p2 = { .dot_limit = 0,
+ .p2_slow = 14, .p2_fast = 14
+ },
+};
+
+static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
+ .dot = { .min = 80000, .max = 224000 },
+ .vco = { .min = 1750000, .max = 3500000 },
+ .n = { .min = 1, .max = 3 },
+ .m = { .min = 104, .max = 138 },
+ .m1 = { .min = 17, .max = 23 },
+ .m2 = { .min = 5, .max = 11 },
+ .p = { .min = 14, .max = 42 },
+ .p1 = { .min = 2, .max = 6 },
+ .p2 = { .dot_limit = 0,
+ .p2_slow = 7, .p2_fast = 7
+ },
+};
+
+static const struct intel_limit pnv_limits_sdvo = {
+ .dot = { .min = 20000, .max = 400000},
+ .vco = { .min = 1700000, .max = 3500000 },
+ /* Pineview's Ncounter is a ring counter */
+ .n = { .min = 3, .max = 6 },
+ .m = { .min = 2, .max = 256 },
+ /* Pineview only has one combined m divider, which we treat as m2. */
+ .m1 = { .min = 0, .max = 0 },
+ .m2 = { .min = 0, .max = 254 },
+ .p = { .min = 5, .max = 80 },
+ .p1 = { .min = 1, .max = 8 },
+ .p2 = { .dot_limit = 200000,
+ .p2_slow = 10, .p2_fast = 5 },
+};
+
+static const struct intel_limit pnv_limits_lvds = {
+ .dot = { .min = 20000, .max = 400000 },
+ .vco = { .min = 1700000, .max = 3500000 },
+ .n = { .min = 3, .max = 6 },
+ .m = { .min = 2, .max = 256 },
+ .m1 = { .min = 0, .max = 0 },
+ .m2 = { .min = 0, .max = 254 },
+ .p = { .min = 7, .max = 112 },
+ .p1 = { .min = 1, .max = 8 },
+ .p2 = { .dot_limit = 112000,
+ .p2_slow = 14, .p2_fast = 14 },
+};
+
+/* Ironlake / Sandybridge
+ *
+ * We calculate clock using (register_value + 2) for N/M1/M2, so here
+ * the range value for them is (actual_value - 2).
+ */
+static const struct intel_limit ilk_limits_dac = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 1760000, .max = 3510000 },
+ .n = { .min = 1, .max = 5 },
+ .m = { .min = 79, .max = 127 },
+ .m1 = { .min = 12, .max = 22 },
+ .m2 = { .min = 5, .max = 9 },
+ .p = { .min = 5, .max = 80 },
+ .p1 = { .min = 1, .max = 8 },
+ .p2 = { .dot_limit = 225000,
+ .p2_slow = 10, .p2_fast = 5 },
+};
+
+static const struct intel_limit ilk_limits_single_lvds = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 1760000, .max = 3510000 },
+ .n = { .min = 1, .max = 3 },
+ .m = { .min = 79, .max = 118 },
+ .m1 = { .min = 12, .max = 22 },
+ .m2 = { .min = 5, .max = 9 },
+ .p = { .min = 28, .max = 112 },
+ .p1 = { .min = 2, .max = 8 },
+ .p2 = { .dot_limit = 225000,
+ .p2_slow = 14, .p2_fast = 14 },
+};
+
+static const struct intel_limit ilk_limits_dual_lvds = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 1760000, .max = 3510000 },
+ .n = { .min = 1, .max = 3 },
+ .m = { .min = 79, .max = 127 },
+ .m1 = { .min = 12, .max = 22 },
+ .m2 = { .min = 5, .max = 9 },
+ .p = { .min = 14, .max = 56 },
+ .p1 = { .min = 2, .max = 8 },
+ .p2 = { .dot_limit = 225000,
+ .p2_slow = 7, .p2_fast = 7 },
+};
+
+/* LVDS 100mhz refclk limits. */
+static const struct intel_limit ilk_limits_single_lvds_100m = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 1760000, .max = 3510000 },
+ .n = { .min = 1, .max = 2 },
+ .m = { .min = 79, .max = 126 },
+ .m1 = { .min = 12, .max = 22 },
+ .m2 = { .min = 5, .max = 9 },
+ .p = { .min = 28, .max = 112 },
+ .p1 = { .min = 2, .max = 8 },
+ .p2 = { .dot_limit = 225000,
+ .p2_slow = 14, .p2_fast = 14 },
+};
+
+static const struct intel_limit ilk_limits_dual_lvds_100m = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 1760000, .max = 3510000 },
+ .n = { .min = 1, .max = 3 },
+ .m = { .min = 79, .max = 126 },
+ .m1 = { .min = 12, .max = 22 },
+ .m2 = { .min = 5, .max = 9 },
+ .p = { .min = 14, .max = 42 },
+ .p1 = { .min = 2, .max = 6 },
+ .p2 = { .dot_limit = 225000,
+ .p2_slow = 7, .p2_fast = 7 },
+};
+
+static const struct intel_limit intel_limits_vlv = {
+ /*
+ * These are the data rate limits (measured in fast clocks)
+ * since those are the strictest limits we have. The fast
+ * clock and actual rate limits are more relaxed, so checking
+ * them would make no difference.
+ */
+ .dot = { .min = 25000 * 5, .max = 270000 * 5 },
+ .vco = { .min = 4000000, .max = 6000000 },
+ .n = { .min = 1, .max = 7 },
+ .m1 = { .min = 2, .max = 3 },
+ .m2 = { .min = 11, .max = 156 },
+ .p1 = { .min = 2, .max = 3 },
+ .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
+};
+
+static const struct intel_limit intel_limits_chv = {
+ /*
+ * These are the data rate limits (measured in fast clocks)
+ * since those are the strictest limits we have. The fast
+ * clock and actual rate limits are more relaxed, so checking
+ * them would make no difference.
+ */
+ .dot = { .min = 25000 * 5, .max = 540000 * 5},
+ .vco = { .min = 4800000, .max = 6480000 },
+ .n = { .min = 1, .max = 1 },
+ .m1 = { .min = 2, .max = 2 },
+ .m2 = { .min = 24 << 22, .max = 175 << 22 },
+ .p1 = { .min = 2, .max = 4 },
+ .p2 = { .p2_slow = 1, .p2_fast = 14 },
+};
+
+static const struct intel_limit intel_limits_bxt = {
+ /* FIXME: find real dot limits */
+ .dot = { .min = 0, .max = INT_MAX },
+ .vco = { .min = 4800000, .max = 6700000 },
+ .n = { .min = 1, .max = 1 },
+ .m1 = { .min = 2, .max = 2 },
+ /* FIXME: find real m2 limits */
+ .m2 = { .min = 2 << 22, .max = 255 << 22 },
+ .p1 = { .min = 2, .max = 4 },
+ .p2 = { .p2_slow = 1, .p2_fast = 20 },
+};
+
+/*
+ * Platform specific helpers to calculate the port PLL loopback- (clock.m),
+ * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
+ * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
+ * The helpers' return value is the rate of the clock that is fed to the
+ * display engine's pipe which can be the above fast dot clock rate or a
+ * divided-down version of it.
+ */
+/* m1 is reserved as 0 in Pineview, n is a ring counter */
+int pnv_calc_dpll_params(int refclk, struct dpll *clock)
+{
+ clock->m = clock->m2 + 2;
+ clock->p = clock->p1 * clock->p2;
+ if (WARN_ON(clock->n == 0 || clock->p == 0))
+ return 0;
+ clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
+ clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+
+ return clock->dot;
+}
+
+static u32 i9xx_dpll_compute_m(struct dpll *dpll)
+{
+ return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
+}
+
+int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
+{
+ clock->m = i9xx_dpll_compute_m(clock);
+ clock->p = clock->p1 * clock->p2;
+ if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
+ return 0;
+ clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
+ clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+
+ return clock->dot;
+}
+
+int vlv_calc_dpll_params(int refclk, struct dpll *clock)
+{
+ clock->m = clock->m1 * clock->m2;
+ clock->p = clock->p1 * clock->p2;
+ if (WARN_ON(clock->n == 0 || clock->p == 0))
+ return 0;
+ clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
+ clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+
+ return clock->dot / 5;
+}
+
+int chv_calc_dpll_params(int refclk, struct dpll *clock)
+{
+ clock->m = clock->m1 * clock->m2;
+ clock->p = clock->p1 * clock->p2;
+ if (WARN_ON(clock->n == 0 || clock->p == 0))
+ return 0;
+ clock->vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m),
+ clock->n << 22);
+ clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+
+ return clock->dot / 5;
+}
+
+/*
+ * Returns whether the given set of divisors are valid for a given refclk with
+ * the given connectors.
+ */
+static bool intel_pll_is_valid(struct drm_i915_private *dev_priv,
+ const struct intel_limit *limit,
+ const struct dpll *clock)
+{
+ if (clock->n < limit->n.min || limit->n.max < clock->n)
+ return false;
+ if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
+ return false;
+ if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
+ return false;
+ if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
+ return false;
+
+ if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
+ !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
+ if (clock->m1 <= clock->m2)
+ return false;
+
+ if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
+ !IS_GEN9_LP(dev_priv)) {
+ if (clock->p < limit->p.min || limit->p.max < clock->p)
+ return false;
+ if (clock->m < limit->m.min || limit->m.max < clock->m)
+ return false;
+ }
+
+ if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
+ return false;
+ /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
+ * connector, etc., rather than just a single range.
+ */
+ if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
+ return false;
+
+ return true;
+}
+
+static int
+i9xx_select_p2_div(const struct intel_limit *limit,
+ const struct intel_crtc_state *crtc_state,
+ int target)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+ /*
+ * For LVDS just rely on its current settings for dual-channel.
+ * We haven't figured out how to reliably set up different
+ * single/dual channel state, if we even can.
+ */
+ if (intel_is_dual_link_lvds(dev_priv))
+ return limit->p2.p2_fast;
+ else
+ return limit->p2.p2_slow;
+ } else {
+ if (target < limit->p2.dot_limit)
+ return limit->p2.p2_slow;
+ else
+ return limit->p2.p2_fast;
+ }
+}
+
+/*
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or FALSE. The returned values represent the clock equation:
+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ *
+ * Target and reference clocks are specified in kHz.
+ *
+ * If match_clock is provided, then best_clock P divider must match the P
+ * divider from @match_clock used for LVDS downclocking.
+ */
+static bool
+i9xx_find_best_dpll(const struct intel_limit *limit,
+ struct intel_crtc_state *crtc_state,
+ int target, int refclk, struct dpll *match_clock,
+ struct dpll *best_clock)
+{
+ struct drm_device *dev = crtc_state->uapi.crtc->dev;
+ struct dpll clock;
+ int err = target;
+
+ memset(best_clock, 0, sizeof(*best_clock));
+
+ clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
+
+ for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
+ clock.m1++) {
+ for (clock.m2 = limit->m2.min;
+ clock.m2 <= limit->m2.max; clock.m2++) {
+ if (clock.m2 >= clock.m1)
+ break;
+ for (clock.n = limit->n.min;
+ clock.n <= limit->n.max; clock.n++) {
+ for (clock.p1 = limit->p1.min;
+ clock.p1 <= limit->p1.max; clock.p1++) {
+ int this_err;
+
+ i9xx_calc_dpll_params(refclk, &clock);
+ if (!intel_pll_is_valid(to_i915(dev),
+ limit,
+ &clock))
+ continue;
+ if (match_clock &&
+ clock.p != match_clock->p)
+ continue;
+
+ this_err = abs(clock.dot - target);
+ if (this_err < err) {
+ *best_clock = clock;
+ err = this_err;
+ }
+ }
+ }
+ }
+ }
+
+ return (err != target);
+}
+
+/*
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or FALSE. The returned values represent the clock equation:
+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ *
+ * Target and reference clocks are specified in kHz.
+ *
+ * If match_clock is provided, then best_clock P divider must match the P
+ * divider from @match_clock used for LVDS downclocking.
+ */
+static bool
+pnv_find_best_dpll(const struct intel_limit *limit,
+ struct intel_crtc_state *crtc_state,
+ int target, int refclk, struct dpll *match_clock,
+ struct dpll *best_clock)
+{
+ struct drm_device *dev = crtc_state->uapi.crtc->dev;
+ struct dpll clock;
+ int err = target;
+
+ memset(best_clock, 0, sizeof(*best_clock));
+
+ clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
+
+ for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
+ clock.m1++) {
+ for (clock.m2 = limit->m2.min;
+ clock.m2 <= limit->m2.max; clock.m2++) {
+ for (clock.n = limit->n.min;
+ clock.n <= limit->n.max; clock.n++) {
+ for (clock.p1 = limit->p1.min;
+ clock.p1 <= limit->p1.max; clock.p1++) {
+ int this_err;
+
+ pnv_calc_dpll_params(refclk, &clock);
+ if (!intel_pll_is_valid(to_i915(dev),
+ limit,
+ &clock))
+ continue;
+ if (match_clock &&
+ clock.p != match_clock->p)
+ continue;
+
+ this_err = abs(clock.dot - target);
+ if (this_err < err) {
+ *best_clock = clock;
+ err = this_err;
+ }
+ }
+ }
+ }
+ }
+
+ return (err != target);
+}
+
+/*
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or FALSE. The returned values represent the clock equation:
+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ *
+ * Target and reference clocks are specified in kHz.
+ *
+ * If match_clock is provided, then best_clock P divider must match the P
+ * divider from @match_clock used for LVDS downclocking.
+ */
+static bool
+g4x_find_best_dpll(const struct intel_limit *limit,
+ struct intel_crtc_state *crtc_state,
+ int target, int refclk, struct dpll *match_clock,
+ struct dpll *best_clock)
+{
+ struct drm_device *dev = crtc_state->uapi.crtc->dev;
+ struct dpll clock;
+ int max_n;
+ bool found = false;
+ /* approximately equals target * 0.00585 */
+ int err_most = (target >> 8) + (target >> 9);
+
+ memset(best_clock, 0, sizeof(*best_clock));
+
+ clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
+
+ max_n = limit->n.max;
+ /* based on hardware requirement, prefer smaller n to precision */
+ for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
+ /* based on hardware requirement, prefere larger m1,m2 */
+ for (clock.m1 = limit->m1.max;
+ clock.m1 >= limit->m1.min; clock.m1--) {
+ for (clock.m2 = limit->m2.max;
+ clock.m2 >= limit->m2.min; clock.m2--) {
+ for (clock.p1 = limit->p1.max;
+ clock.p1 >= limit->p1.min; clock.p1--) {
+ int this_err;
+
+ i9xx_calc_dpll_params(refclk, &clock);
+ if (!intel_pll_is_valid(to_i915(dev),
+ limit,
+ &clock))
+ continue;
+
+ this_err = abs(clock.dot - target);
+ if (this_err < err_most) {
+ *best_clock = clock;
+ err_most = this_err;
+ max_n = clock.n;
+ found = true;
+ }
+ }
+ }
+ }
+ }
+ return found;
+}
+
+/*
+ * Check if the calculated PLL configuration is more optimal compared to the
+ * best configuration and error found so far. Return the calculated error.
+ */
+static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
+ const struct dpll *calculated_clock,
+ const struct dpll *best_clock,
+ unsigned int best_error_ppm,
+ unsigned int *error_ppm)
+{
+ /*
+ * For CHV ignore the error and consider only the P value.
+ * Prefer a bigger P value based on HW requirements.
+ */
+ if (IS_CHERRYVIEW(to_i915(dev))) {
+ *error_ppm = 0;
+
+ return calculated_clock->p > best_clock->p;
+ }
+
+ if (drm_WARN_ON_ONCE(dev, !target_freq))
+ return false;
+
+ *error_ppm = div_u64(1000000ULL *
+ abs(target_freq - calculated_clock->dot),
+ target_freq);
+ /*
+ * Prefer a better P value over a better (smaller) error if the error
+ * is small. Ensure this preference for future configurations too by
+ * setting the error to 0.
+ */
+ if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
+ *error_ppm = 0;
+
+ return true;
+ }
+
+ return *error_ppm + 10 < best_error_ppm;
+}
+
+/*
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or FALSE. The returned values represent the clock equation:
+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ */
+static bool
+vlv_find_best_dpll(const struct intel_limit *limit,
+ struct intel_crtc_state *crtc_state,
+ int target, int refclk, struct dpll *match_clock,
+ struct dpll *best_clock)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_device *dev = crtc->base.dev;
+ struct dpll clock;
+ unsigned int bestppm = 1000000;
+ /* min update 19.2 MHz */
+ int max_n = min(limit->n.max, refclk / 19200);
+ bool found = false;
+
+ target *= 5; /* fast clock */
+
+ memset(best_clock, 0, sizeof(*best_clock));
+
+ /* based on hardware requirement, prefer smaller n to precision */
+ for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
+ for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
+ for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
+ clock.p2 -= clock.p2 > 10 ? 2 : 1) {
+ clock.p = clock.p1 * clock.p2;
+ /* based on hardware requirement, prefer bigger m1,m2 values */
+ for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
+ unsigned int ppm;
+
+ clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
+ refclk * clock.m1);
+
+ vlv_calc_dpll_params(refclk, &clock);
+
+ if (!intel_pll_is_valid(to_i915(dev),
+ limit,
+ &clock))
+ continue;
+
+ if (!vlv_PLL_is_optimal(dev, target,
+ &clock,
+ best_clock,
+ bestppm, &ppm))
+ continue;
+
+ *best_clock = clock;
+ bestppm = ppm;
+ found = true;
+ }
+ }
+ }
+ }
+
+ return found;
+}
+
+/*
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or FALSE. The returned values represent the clock equation:
+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ */
+static bool
+chv_find_best_dpll(const struct intel_limit *limit,
+ struct intel_crtc_state *crtc_state,
+ int target, int refclk, struct dpll *match_clock,
+ struct dpll *best_clock)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_device *dev = crtc->base.dev;
+ unsigned int best_error_ppm;
+ struct dpll clock;
+ u64 m2;
+ int found = false;
+
+ memset(best_clock, 0, sizeof(*best_clock));
+ best_error_ppm = 1000000;
+
+ /*
+ * Based on hardware doc, the n always set to 1, and m1 always
+ * set to 2. If requires to support 200Mhz refclk, we need to
+ * revisit this because n may not 1 anymore.
+ */
+ clock.n = 1;
+ clock.m1 = 2;
+ target *= 5; /* fast clock */
+
+ for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
+ for (clock.p2 = limit->p2.p2_fast;
+ clock.p2 >= limit->p2.p2_slow;
+ clock.p2 -= clock.p2 > 10 ? 2 : 1) {
+ unsigned int error_ppm;
+
+ clock.p = clock.p1 * clock.p2;
+
+ m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22,
+ refclk * clock.m1);
+
+ if (m2 > INT_MAX/clock.m1)
+ continue;
+
+ clock.m2 = m2;
+
+ chv_calc_dpll_params(refclk, &clock);
+
+ if (!intel_pll_is_valid(to_i915(dev), limit, &clock))
+ continue;
+
+ if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
+ best_error_ppm, &error_ppm))
+ continue;
+
+ *best_clock = clock;
+ best_error_ppm = error_ppm;
+ found = true;
+ }
+ }
+
+ return found;
+}
+
+bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
+ struct dpll *best_clock)
+{
+ int refclk = 100000;
+ const struct intel_limit *limit = &intel_limits_bxt;
+
+ return chv_find_best_dpll(limit, crtc_state,
+ crtc_state->port_clock, refclk,
+ NULL, best_clock);
+}
+
+static u32 pnv_dpll_compute_fp(struct dpll *dpll)
+{
+ return (1 << dpll->n) << 16 | dpll->m2;
+}
+
+static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
+ struct dpll *reduced_clock)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 fp, fp2 = 0;
+
+ if (IS_PINEVIEW(dev_priv)) {
+ fp = pnv_dpll_compute_fp(&crtc_state->dpll);
+ if (reduced_clock)
+ fp2 = pnv_dpll_compute_fp(reduced_clock);
+ } else {
+ fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
+ if (reduced_clock)
+ fp2 = i9xx_dpll_compute_fp(reduced_clock);
+ }
+
+ crtc_state->dpll_hw_state.fp0 = fp;
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
+ reduced_clock) {
+ crtc_state->dpll_hw_state.fp1 = fp2;
+ } else {
+ crtc_state->dpll_hw_state.fp1 = fp;
+ }
+}
+
+static void i9xx_compute_dpll(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
+ struct dpll *reduced_clock)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 dpll;
+ struct dpll *clock = &crtc_state->dpll;
+
+ i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
+
+ dpll = DPLL_VGA_MODE_DIS;
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
+ dpll |= DPLLB_MODE_LVDS;
+ else
+ dpll |= DPLLB_MODE_DAC_SERIAL;
+
+ if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
+ IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
+ dpll |= (crtc_state->pixel_multiplier - 1)
+ << SDVO_MULTIPLIER_SHIFT_HIRES;
+ }
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ dpll |= DPLL_SDVO_HIGH_SPEED;
+
+ if (intel_crtc_has_dp_encoder(crtc_state))
+ dpll |= DPLL_SDVO_HIGH_SPEED;
+
+ /* compute bitmask from p1 value */
+ if (IS_PINEVIEW(dev_priv))
+ dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
+ else {
+ dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ if (IS_G4X(dev_priv) && reduced_clock)
+ dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
+ }
+ switch (clock->p2) {
+ case 5:
+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
+ break;
+ case 7:
+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
+ break;
+ case 10:
+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
+ break;
+ case 14:
+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
+ break;
+ }
+ if (INTEL_GEN(dev_priv) >= 4)
+ dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
+
+ if (crtc_state->sdvo_tv_clock)
+ dpll |= PLL_REF_INPUT_TVCLKINBC;
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
+ intel_panel_use_ssc(dev_priv))
+ dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
+ else
+ dpll |= PLL_REF_INPUT_DREFCLK;
+
+ dpll |= DPLL_VCO_ENABLE;
+ crtc_state->dpll_hw_state.dpll = dpll;
+
+ if (INTEL_GEN(dev_priv) >= 4) {
+ u32 dpll_md = (crtc_state->pixel_multiplier - 1)
+ << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+ crtc_state->dpll_hw_state.dpll_md = dpll_md;
+ }
+}
+
+static void i8xx_compute_dpll(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
+ struct dpll *reduced_clock)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ u32 dpll;
+ struct dpll *clock = &crtc_state->dpll;
+
+ i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
+
+ dpll = DPLL_VGA_MODE_DIS;
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+ dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ } else {
+ if (clock->p1 == 2)
+ dpll |= PLL_P1_DIVIDE_BY_TWO;
+ else
+ dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ if (clock->p2 == 4)
+ dpll |= PLL_P2_DIVIDE_BY_4;
+ }
+
+ /*
+ * Bspec:
+ * "[Almador Errata}: For the correct operation of the muxed DVO pins
+ * (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data,
+ * GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock
+ * Enable) must be set to “1†in both the DPLL A Control Register
+ * (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)."
+ *
+ * For simplicity We simply keep both bits always enabled in
+ * both DPLLS. The spec says we should disable the DVO 2X clock
+ * when not needed, but this seems to work fine in practice.
+ */
+ if (IS_I830(dev_priv) ||
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
+ dpll |= DPLL_DVO_2X_MODE;
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
+ intel_panel_use_ssc(dev_priv))
+ dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
+ else
+ dpll |= PLL_REF_INPUT_DREFCLK;
+
+ dpll |= DPLL_VCO_ENABLE;
+ crtc_state->dpll_hw_state.dpll = dpll;
+}
+
+static int hsw_crtc_compute_clock(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(crtc_state->uapi.state);
+
+ if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
+ INTEL_GEN(dev_priv) >= 11) {
+ struct intel_encoder *encoder =
+ intel_get_crtc_new_encoder(state, crtc_state);
+
+ if (!intel_reserve_shared_dplls(state, crtc, encoder)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "failed to find PLL for pipe %c\n",
+ pipe_name(crtc->pipe));
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static bool ilk_needs_fb_cb_tune(struct dpll *dpll, int factor)
+{
+ return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
+}
+
+
+static void ilk_compute_dpll(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
+ struct dpll *reduced_clock)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 dpll, fp, fp2;
+ int factor;
+
+ /* Enable autotuning of the PLL clock (if permissible) */
+ factor = 21;
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+ if ((intel_panel_use_ssc(dev_priv) &&
+ dev_priv->vbt.lvds_ssc_freq == 100000) ||
+ (HAS_PCH_IBX(dev_priv) &&
+ intel_is_dual_link_lvds(dev_priv)))
+ factor = 25;
+ } else if (crtc_state->sdvo_tv_clock) {
+ factor = 20;
+ }
+
+ fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
+
+ if (ilk_needs_fb_cb_tune(&crtc_state->dpll, factor))
+ fp |= FP_CB_TUNE;
+
+ if (reduced_clock) {
+ fp2 = i9xx_dpll_compute_fp(reduced_clock);
+
+ if (reduced_clock->m < factor * reduced_clock->n)
+ fp2 |= FP_CB_TUNE;
+ } else {
+ fp2 = fp;
+ }
+
+ dpll = 0;
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
+ dpll |= DPLLB_MODE_LVDS;
+ else
+ dpll |= DPLLB_MODE_DAC_SERIAL;
+
+ dpll |= (crtc_state->pixel_multiplier - 1)
+ << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ dpll |= DPLL_SDVO_HIGH_SPEED;
+
+ if (intel_crtc_has_dp_encoder(crtc_state))
+ dpll |= DPLL_SDVO_HIGH_SPEED;
+
+ /*
+ * The high speed IO clock is only really required for
+ * SDVO/HDMI/DP, but we also enable it for CRT to make it
+ * possible to share the DPLL between CRT and HDMI. Enabling
+ * the clock needlessly does no real harm, except use up a
+ * bit of power potentially.
+ *
+ * We'll limit this to IVB with 3 pipes, since it has only two
+ * DPLLs and so DPLL sharing is the only way to get three pipes
+ * driving PCH ports at the same time. On SNB we could do this,
+ * and potentially avoid enabling the second DPLL, but it's not
+ * clear if it''s a win or loss power wise. No point in doing
+ * this on ILK at all since it has a fixed DPLL<->pipe mapping.
+ */
+ if (INTEL_NUM_PIPES(dev_priv) == 3 &&
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
+ dpll |= DPLL_SDVO_HIGH_SPEED;
+
+ /* compute bitmask from p1 value */
+ dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ /* also FPA1 */
+ dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
+
+ switch (crtc_state->dpll.p2) {
+ case 5:
+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
+ break;
+ case 7:
+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
+ break;
+ case 10:
+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
+ break;
+ case 14:
+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
+ break;
+ }
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
+ intel_panel_use_ssc(dev_priv))
+ dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
+ else
+ dpll |= PLL_REF_INPUT_DREFCLK;
+
+ dpll |= DPLL_VCO_ENABLE;
+
+ crtc_state->dpll_hw_state.dpll = dpll;
+ crtc_state->dpll_hw_state.fp0 = fp;
+ crtc_state->dpll_hw_state.fp1 = fp2;
+}
+
+static int ilk_crtc_compute_clock(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(crtc_state->uapi.state);
+ const struct intel_limit *limit;
+ int refclk = 120000;
+
+ memset(&crtc_state->dpll_hw_state, 0,
+ sizeof(crtc_state->dpll_hw_state));
+
+ /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
+ if (!crtc_state->has_pch_encoder)
+ return 0;
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+ if (intel_panel_use_ssc(dev_priv)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "using SSC reference clock of %d kHz\n",
+ dev_priv->vbt.lvds_ssc_freq);
+ refclk = dev_priv->vbt.lvds_ssc_freq;
+ }
+
+ if (intel_is_dual_link_lvds(dev_priv)) {
+ if (refclk == 100000)
+ limit = &ilk_limits_dual_lvds_100m;
+ else
+ limit = &ilk_limits_dual_lvds;
+ } else {
+ if (refclk == 100000)
+ limit = &ilk_limits_single_lvds_100m;
+ else
+ limit = &ilk_limits_single_lvds;
+ }
+ } else {
+ limit = &ilk_limits_dac;
+ }
+
+ if (!crtc_state->clock_set &&
+ !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+ refclk, NULL, &crtc_state->dpll)) {
+ drm_err(&dev_priv->drm,
+ "Couldn't find PLL settings for mode!\n");
+ return -EINVAL;
+ }
+
+ ilk_compute_dpll(crtc, crtc_state, NULL);
+
+ if (!intel_reserve_shared_dplls(state, crtc, NULL)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "failed to find PLL for pipe %c\n",
+ pipe_name(crtc->pipe));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void vlv_compute_dpll(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config)
+{
+ pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
+ DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
+ if (crtc->pipe != PIPE_A)
+ pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
+
+ /* DPLL not used with DSI, but still need the rest set up */
+ if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
+ pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
+ DPLL_EXT_BUFFER_ENABLE_VLV;
+
+ pipe_config->dpll_hw_state.dpll_md =
+ (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+}
+
+void chv_compute_dpll(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config)
+{
+ pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
+ DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
+ if (crtc->pipe != PIPE_A)
+ pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
+
+ /* DPLL not used with DSI, but still need the rest set up */
+ if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
+ pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
+
+ pipe_config->dpll_hw_state.dpll_md =
+ (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+}
+
+static int chv_crtc_compute_clock(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
+{
+ int refclk = 100000;
+ const struct intel_limit *limit = &intel_limits_chv;
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+
+ memset(&crtc_state->dpll_hw_state, 0,
+ sizeof(crtc_state->dpll_hw_state));
+
+ if (!crtc_state->clock_set &&
+ !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+ refclk, NULL, &crtc_state->dpll)) {
+ drm_err(&i915->drm, "Couldn't find PLL settings for mode!\n");
+ return -EINVAL;
+ }
+
+ chv_compute_dpll(crtc, crtc_state);
+
+ return 0;
+}
+
+static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
+{
+ int refclk = 100000;
+ const struct intel_limit *limit = &intel_limits_vlv;
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+
+ memset(&crtc_state->dpll_hw_state, 0,
+ sizeof(crtc_state->dpll_hw_state));
+
+ if (!crtc_state->clock_set &&
+ !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+ refclk, NULL, &crtc_state->dpll)) {
+ drm_err(&i915->drm, "Couldn't find PLL settings for mode!\n");
+ return -EINVAL;
+ }
+
+ vlv_compute_dpll(crtc, crtc_state);
+
+ return 0;
+}
+
+static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_limit *limit;
+ int refclk = 96000;
+
+ memset(&crtc_state->dpll_hw_state, 0,
+ sizeof(crtc_state->dpll_hw_state));
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+ if (intel_panel_use_ssc(dev_priv)) {
+ refclk = dev_priv->vbt.lvds_ssc_freq;
+ drm_dbg_kms(&dev_priv->drm,
+ "using SSC reference clock of %d kHz\n",
+ refclk);
+ }
+
+ if (intel_is_dual_link_lvds(dev_priv))
+ limit = &intel_limits_g4x_dual_channel_lvds;
+ else
+ limit = &intel_limits_g4x_single_channel_lvds;
+ } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
+ limit = &intel_limits_g4x_hdmi;
+ } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
+ limit = &intel_limits_g4x_sdvo;
+ } else {
+ /* The option is for other outputs */
+ limit = &intel_limits_i9xx_sdvo;
+ }
+
+ if (!crtc_state->clock_set &&
+ !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+ refclk, NULL, &crtc_state->dpll)) {
+ drm_err(&dev_priv->drm,
+ "Couldn't find PLL settings for mode!\n");
+ return -EINVAL;
+ }
+
+ i9xx_compute_dpll(crtc, crtc_state, NULL);
+
+ return 0;
+}
+
+static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ const struct intel_limit *limit;
+ int refclk = 96000;
+
+ memset(&crtc_state->dpll_hw_state, 0,
+ sizeof(crtc_state->dpll_hw_state));
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+ if (intel_panel_use_ssc(dev_priv)) {
+ refclk = dev_priv->vbt.lvds_ssc_freq;
+ drm_dbg_kms(&dev_priv->drm,
+ "using SSC reference clock of %d kHz\n",
+ refclk);
+ }
+
+ limit = &pnv_limits_lvds;
+ } else {
+ limit = &pnv_limits_sdvo;
+ }
+
+ if (!crtc_state->clock_set &&
+ !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+ refclk, NULL, &crtc_state->dpll)) {
+ drm_err(&dev_priv->drm,
+ "Couldn't find PLL settings for mode!\n");
+ return -EINVAL;
+ }
+
+ i9xx_compute_dpll(crtc, crtc_state, NULL);
+
+ return 0;
+}
+
+static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ const struct intel_limit *limit;
+ int refclk = 96000;
+
+ memset(&crtc_state->dpll_hw_state, 0,
+ sizeof(crtc_state->dpll_hw_state));
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+ if (intel_panel_use_ssc(dev_priv)) {
+ refclk = dev_priv->vbt.lvds_ssc_freq;
+ drm_dbg_kms(&dev_priv->drm,
+ "using SSC reference clock of %d kHz\n",
+ refclk);
+ }
+
+ limit = &intel_limits_i9xx_lvds;
+ } else {
+ limit = &intel_limits_i9xx_sdvo;
+ }
+
+ if (!crtc_state->clock_set &&
+ !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+ refclk, NULL, &crtc_state->dpll)) {
+ drm_err(&dev_priv->drm,
+ "Couldn't find PLL settings for mode!\n");
+ return -EINVAL;
+ }
+
+ i9xx_compute_dpll(crtc, crtc_state, NULL);
+
+ return 0;
+}
+
+static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ const struct intel_limit *limit;
+ int refclk = 48000;
+
+ memset(&crtc_state->dpll_hw_state, 0,
+ sizeof(crtc_state->dpll_hw_state));
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+ if (intel_panel_use_ssc(dev_priv)) {
+ refclk = dev_priv->vbt.lvds_ssc_freq;
+ drm_dbg_kms(&dev_priv->drm,
+ "using SSC reference clock of %d kHz\n",
+ refclk);
+ }
+
+ limit = &intel_limits_i8xx_lvds;
+ } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
+ limit = &intel_limits_i8xx_dvo;
+ } else {
+ limit = &intel_limits_i8xx_dac;
+ }
+
+ if (!crtc_state->clock_set &&
+ !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+ refclk, NULL, &crtc_state->dpll)) {
+ drm_err(&dev_priv->drm,
+ "Couldn't find PLL settings for mode!\n");
+ return -EINVAL;
+ }
+
+ i8xx_compute_dpll(crtc, crtc_state, NULL);
+
+ return 0;
+}
+
+void
+intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv)
+{
+ if (INTEL_GEN(dev_priv) >= 9 || HAS_DDI(dev_priv))
+ dev_priv->display.crtc_compute_clock = hsw_crtc_compute_clock;
+ else if (HAS_PCH_SPLIT(dev_priv))
+ dev_priv->display.crtc_compute_clock = ilk_crtc_compute_clock;
+ else if (IS_CHERRYVIEW(dev_priv))
+ dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
+ else if (IS_VALLEYVIEW(dev_priv))
+ dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
+ else if (IS_G4X(dev_priv))
+ dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
+ else if (IS_PINEVIEW(dev_priv))
+ dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
+ else if (!IS_GEN(dev_priv, 2))
+ dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
+ else
+ dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.h b/drivers/gpu/drm/i915/display/intel_dpll.h
new file mode 100644
index 000000000000..caf4615092e1
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dpll.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef _INTEL_DPLL_H_
+#define _INTEL_DPLL_H_
+
+struct dpll;
+struct drm_i915_private;
+struct intel_crtc;
+struct intel_crtc_state;
+
+void intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv);
+int vlv_calc_dpll_params(int refclk, struct dpll *clock);
+int pnv_calc_dpll_params(int refclk, struct dpll *clock);
+int i9xx_calc_dpll_params(int refclk, struct dpll *clock);
+void vlv_compute_dpll(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config);
+void chv_compute_dpll(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
index b53c50372918..584c14c4cbd0 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
@@ -43,7 +43,7 @@
#define PANEL_PWM_MAX_VALUE 0xFF
-static u32 dcs_get_backlight(struct intel_connector *connector)
+static u32 dcs_get_backlight(struct intel_connector *connector, enum pipe unused)
{
struct intel_encoder *encoder = intel_attached_encoder(connector);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
@@ -77,7 +77,7 @@ static void dcs_set_backlight(const struct drm_connector_state *conn_state, u32
}
}
-static void dcs_disable_backlight(const struct drm_connector_state *conn_state)
+static void dcs_disable_backlight(const struct drm_connector_state *conn_state, u32 level)
{
struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(conn_state->best_encoder));
struct mipi_dsi_device *dsi_device;
@@ -111,10 +111,9 @@ static void dcs_disable_backlight(const struct drm_connector_state *conn_state)
}
static void dcs_enable_backlight(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+ const struct drm_connector_state *conn_state, u32 level)
{
struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(conn_state->best_encoder));
- struct intel_panel *panel = &to_intel_connector(conn_state->connector)->panel;
struct mipi_dsi_device *dsi_device;
enum port port;
@@ -142,7 +141,7 @@ static void dcs_enable_backlight(const struct intel_crtc_state *crtc_state,
&cabc, sizeof(cabc));
}
- dcs_set_backlight(conn_state, panel->backlight.level);
+ dcs_set_backlight(conn_state, level);
}
static int dcs_setup_backlight(struct intel_connector *connector,
@@ -156,6 +155,14 @@ static int dcs_setup_backlight(struct intel_connector *connector,
return 0;
}
+static const struct intel_panel_bl_funcs dcs_bl_funcs = {
+ .setup = dcs_setup_backlight,
+ .enable = dcs_enable_backlight,
+ .disable = dcs_disable_backlight,
+ .set = dcs_set_backlight,
+ .get = dcs_get_backlight,
+};
+
int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector)
{
struct drm_device *dev = intel_connector->base.dev;
@@ -169,11 +176,7 @@ int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector)
if (drm_WARN_ON(dev, encoder->type != INTEL_OUTPUT_DSI))
return -EINVAL;
- panel->backlight.setup = dcs_setup_backlight;
- panel->backlight.enable = dcs_enable_backlight;
- panel->backlight.disable = dcs_disable_backlight;
- panel->backlight.set = dcs_set_backlight;
- panel->backlight.get = dcs_get_backlight;
+ panel->backlight.funcs = &dcs_bl_funcs;
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c
index 237dbb1ba0ee..090cd76266c6 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo.c
+++ b/drivers/gpu/drm/i915/display/intel_dvo.c
@@ -301,12 +301,8 @@ static void intel_dvo_pre_enable(struct intel_atomic_state *state,
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
dvo_val |= DVO_VSYNC_ACTIVE_HIGH;
- /*I915_WRITE(DVOB_SRCDIM,
- (adjusted_mode->crtc_hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) |
- (adjusted_mode->crtc_vdisplay << DVO_SRCDIM_VERTICAL_SHIFT));*/
intel_de_write(dev_priv, dvo_srcdim_reg,
(adjusted_mode->crtc_hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) | (adjusted_mode->crtc_vdisplay << DVO_SRCDIM_VERTICAL_SHIFT));
- /*I915_WRITE(DVOB, dvo_val);*/
intel_de_write(dev_priv, dvo_reg, dvo_val);
}
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index a5b072816a7b..5fd4fa4805ef 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -676,7 +676,7 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
}
static bool tiling_is_valid(struct drm_i915_private *dev_priv,
- uint64_t modifier)
+ u64 modifier)
{
switch (modifier) {
case DRM_FORMAT_MOD_LINEAR:
@@ -742,6 +742,8 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
cache->fence_id = plane_state->vma->fence->id;
else
cache->fence_id = -1;
+
+ cache->psr2_active = crtc_state->has_psr2;
}
static bool intel_fbc_cfb_size_changed(struct drm_i915_private *dev_priv)
@@ -914,6 +916,16 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
return false;
}
+ /*
+ * Tigerlake is not supporting FBC with PSR2.
+ * Recommendation is to keep this combination disabled
+ * Bspec: 50422 HSD: 14010260002
+ */
+ if (fbc->state_cache.psr2_active && IS_TIGERLAKE(dev_priv)) {
+ fbc->no_fbc_reason = "not supported with PSR2";
+ return false;
+ }
+
return true;
}
@@ -1433,13 +1445,6 @@ static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv)
if (!HAS_FBC(dev_priv))
return 0;
- /*
- * Fbc is causing random underruns in CI execution on TGL platforms.
- * Disabling the same while the problem is being debugged and analyzed.
- */
- if (IS_TIGERLAKE(dev_priv))
- return 0;
-
if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9)
return 1;
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index 842c04e63214..84f853f113b9 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -256,7 +256,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
* If the object is stolen however, it will be full of whatever
* garbage was left in there.
*/
- if (vma->obj->stolen && !prealloc)
+ if (!i915_gem_object_is_shmem(vma->obj) && !prealloc)
memset_io(info->screen_base, 0, info->screen_size);
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
@@ -595,7 +595,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
* full of whatever garbage was left in there.
*/
if (state == FBINFO_STATE_RUNNING &&
- intel_fb_obj(&ifbdev->fb->base)->stolen)
+ !i915_gem_object_is_shmem(intel_fb_obj(&ifbdev->fb->base)))
memset_io(info->screen_base, 0, info->screen_size);
drm_fb_helper_set_suspend(&ifbdev->helper, state);
diff --git a/drivers/gpu/drm/i915/display/intel_fdi.c b/drivers/gpu/drm/i915/display/intel_fdi.c
new file mode 100644
index 000000000000..b2eb96ae10a2
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_fdi.c
@@ -0,0 +1,683 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+#include "intel_atomic.h"
+#include "intel_display_types.h"
+#include "intel_fdi.h"
+
+/* units of 100MHz */
+static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
+{
+ if (crtc_state->hw.enable && crtc_state->has_pch_encoder)
+ return crtc_state->fdi_lanes;
+
+ return 0;
+}
+
+static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
+ struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_atomic_state *state = pipe_config->uapi.state;
+ struct intel_crtc *other_crtc;
+ struct intel_crtc_state *other_crtc_state;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "checking fdi config on pipe %c, lanes %i\n",
+ pipe_name(pipe), pipe_config->fdi_lanes);
+ if (pipe_config->fdi_lanes > 4) {
+ drm_dbg_kms(&dev_priv->drm,
+ "invalid fdi lane config on pipe %c: %i lanes\n",
+ pipe_name(pipe), pipe_config->fdi_lanes);
+ return -EINVAL;
+ }
+
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+ if (pipe_config->fdi_lanes > 2) {
+ drm_dbg_kms(&dev_priv->drm,
+ "only 2 lanes on haswell, required: %i lanes\n",
+ pipe_config->fdi_lanes);
+ return -EINVAL;
+ } else {
+ return 0;
+ }
+ }
+
+ if (INTEL_NUM_PIPES(dev_priv) == 2)
+ return 0;
+
+ /* Ivybridge 3 pipe is really complicated */
+ switch (pipe) {
+ case PIPE_A:
+ return 0;
+ case PIPE_B:
+ if (pipe_config->fdi_lanes <= 2)
+ return 0;
+
+ other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
+ other_crtc_state =
+ intel_atomic_get_crtc_state(state, other_crtc);
+ if (IS_ERR(other_crtc_state))
+ return PTR_ERR(other_crtc_state);
+
+ if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
+ drm_dbg_kms(&dev_priv->drm,
+ "invalid shared fdi lane config on pipe %c: %i lanes\n",
+ pipe_name(pipe), pipe_config->fdi_lanes);
+ return -EINVAL;
+ }
+ return 0;
+ case PIPE_C:
+ if (pipe_config->fdi_lanes > 2) {
+ drm_dbg_kms(&dev_priv->drm,
+ "only 2 lanes on pipe %c: required %i lanes\n",
+ pipe_name(pipe), pipe_config->fdi_lanes);
+ return -EINVAL;
+ }
+
+ other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
+ other_crtc_state =
+ intel_atomic_get_crtc_state(state, other_crtc);
+ if (IS_ERR(other_crtc_state))
+ return PTR_ERR(other_crtc_state);
+
+ if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
+ drm_dbg_kms(&dev_priv->drm,
+ "fdi link B uses too many lanes to enable link C\n");
+ return -EINVAL;
+ }
+ return 0;
+ default:
+ BUG();
+ }
+}
+
+int ilk_fdi_compute_config(struct intel_crtc *intel_crtc,
+ struct intel_crtc_state *pipe_config)
+{
+ struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *i915 = to_i915(dev);
+ const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
+ int lane, link_bw, fdi_dotclock, ret;
+ bool needs_recompute = false;
+
+retry:
+ /* FDI is a binary signal running at ~2.7GHz, encoding
+ * each output octet as 10 bits. The actual frequency
+ * is stored as a divider into a 100MHz clock, and the
+ * mode pixel clock is stored in units of 1KHz.
+ * Hence the bw of each lane in terms of the mode signal
+ * is:
+ */
+ link_bw = intel_fdi_link_freq(i915, pipe_config);
+
+ fdi_dotclock = adjusted_mode->crtc_clock;
+
+ lane = ilk_get_lanes_required(fdi_dotclock, link_bw,
+ pipe_config->pipe_bpp);
+
+ pipe_config->fdi_lanes = lane;
+
+ intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
+ link_bw, &pipe_config->fdi_m_n, false, false);
+
+ ret = ilk_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
+ if (ret == -EDEADLK)
+ return ret;
+
+ if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
+ pipe_config->pipe_bpp -= 2*3;
+ drm_dbg_kms(&i915->drm,
+ "fdi link bw constraint, reducing pipe bpp to %i\n",
+ pipe_config->pipe_bpp);
+ needs_recompute = true;
+ pipe_config->bw_constrained = true;
+
+ goto retry;
+ }
+
+ if (needs_recompute)
+ return I915_DISPLAY_CONFIG_RETRY;
+
+ return ret;
+}
+
+void intel_fdi_normal_train(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ enum pipe pipe = crtc->pipe;
+ i915_reg_t reg;
+ u32 temp;
+
+ /* enable normal train */
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ if (IS_IVYBRIDGE(dev_priv)) {
+ temp &= ~FDI_LINK_TRAIN_NONE_IVB;
+ temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
+ }
+ intel_de_write(dev_priv, reg, temp);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ if (HAS_PCH_CPT(dev_priv)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_NORMAL_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_NONE;
+ }
+ intel_de_write(dev_priv, reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
+
+ /* wait one idle pattern time */
+ intel_de_posting_read(dev_priv, reg);
+ udelay(1000);
+
+ /* IVB wants error correction enabled */
+ if (IS_IVYBRIDGE(dev_priv))
+ intel_de_write(dev_priv, reg,
+ intel_de_read(dev_priv, reg) | FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE);
+}
+
+/* The FDI link training functions for ILK/Ibexpeak. */
+static void ilk_fdi_link_train(struct intel_crtc *crtc,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ enum pipe pipe = crtc->pipe;
+ i915_reg_t reg;
+ u32 temp, tries;
+
+ /* FDI needs bits from pipe first */
+ assert_pipe_enabled(dev_priv, crtc_state->cpu_transcoder);
+
+ /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
+ for train result */
+ reg = FDI_RX_IMR(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_RX_SYMBOL_LOCK;
+ temp &= ~FDI_RX_BIT_LOCK;
+ intel_de_write(dev_priv, reg, temp);
+ intel_de_read(dev_priv, reg);
+ udelay(150);
+
+ /* enable CPU FDI TX and PCH FDI RX */
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_DP_PORT_WIDTH_MASK;
+ temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(150);
+
+ /* Ironlake workaround, enable clock pointer after FDI enable*/
+ intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
+ FDI_RX_PHASE_SYNC_POINTER_OVR);
+ intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
+ FDI_RX_PHASE_SYNC_POINTER_OVR | FDI_RX_PHASE_SYNC_POINTER_EN);
+
+ reg = FDI_RX_IIR(pipe);
+ for (tries = 0; tries < 5; tries++) {
+ temp = intel_de_read(dev_priv, reg);
+ drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
+
+ if ((temp & FDI_RX_BIT_LOCK)) {
+ drm_dbg_kms(&dev_priv->drm, "FDI train 1 done.\n");
+ intel_de_write(dev_priv, reg, temp | FDI_RX_BIT_LOCK);
+ break;
+ }
+ }
+ if (tries == 5)
+ drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
+
+ /* Train 2 */
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_2;
+ intel_de_write(dev_priv, reg, temp);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_2;
+ intel_de_write(dev_priv, reg, temp);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(150);
+
+ reg = FDI_RX_IIR(pipe);
+ for (tries = 0; tries < 5; tries++) {
+ temp = intel_de_read(dev_priv, reg);
+ drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
+
+ if (temp & FDI_RX_SYMBOL_LOCK) {
+ intel_de_write(dev_priv, reg,
+ temp | FDI_RX_SYMBOL_LOCK);
+ drm_dbg_kms(&dev_priv->drm, "FDI train 2 done.\n");
+ break;
+ }
+ }
+ if (tries == 5)
+ drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
+
+ drm_dbg_kms(&dev_priv->drm, "FDI train done\n");
+
+}
+
+static const int snb_b_fdi_train_param[] = {
+ FDI_LINK_TRAIN_400MV_0DB_SNB_B,
+ FDI_LINK_TRAIN_400MV_6DB_SNB_B,
+ FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
+ FDI_LINK_TRAIN_800MV_0DB_SNB_B,
+};
+
+/* The FDI link training functions for SNB/Cougarpoint. */
+static void gen6_fdi_link_train(struct intel_crtc *crtc,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ enum pipe pipe = crtc->pipe;
+ i915_reg_t reg;
+ u32 temp, i, retry;
+
+ /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
+ for train result */
+ reg = FDI_RX_IMR(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_RX_SYMBOL_LOCK;
+ temp &= ~FDI_RX_BIT_LOCK;
+ intel_de_write(dev_priv, reg, temp);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(150);
+
+ /* enable CPU FDI TX and PCH FDI RX */
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_DP_PORT_WIDTH_MASK;
+ temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ /* SNB-B */
+ temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
+ intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
+
+ intel_de_write(dev_priv, FDI_RX_MISC(pipe),
+ FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ if (HAS_PCH_CPT(dev_priv)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ }
+ intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(150);
+
+ for (i = 0; i < 4; i++) {
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ temp |= snb_b_fdi_train_param[i];
+ intel_de_write(dev_priv, reg, temp);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(500);
+
+ for (retry = 0; retry < 5; retry++) {
+ reg = FDI_RX_IIR(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
+ if (temp & FDI_RX_BIT_LOCK) {
+ intel_de_write(dev_priv, reg,
+ temp | FDI_RX_BIT_LOCK);
+ drm_dbg_kms(&dev_priv->drm,
+ "FDI train 1 done.\n");
+ break;
+ }
+ udelay(50);
+ }
+ if (retry < 5)
+ break;
+ }
+ if (i == 4)
+ drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
+
+ /* Train 2 */
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_2;
+ if (IS_GEN(dev_priv, 6)) {
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ /* SNB-B */
+ temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
+ }
+ intel_de_write(dev_priv, reg, temp);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ if (HAS_PCH_CPT(dev_priv)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_2;
+ }
+ intel_de_write(dev_priv, reg, temp);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(150);
+
+ for (i = 0; i < 4; i++) {
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ temp |= snb_b_fdi_train_param[i];
+ intel_de_write(dev_priv, reg, temp);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(500);
+
+ for (retry = 0; retry < 5; retry++) {
+ reg = FDI_RX_IIR(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
+ if (temp & FDI_RX_SYMBOL_LOCK) {
+ intel_de_write(dev_priv, reg,
+ temp | FDI_RX_SYMBOL_LOCK);
+ drm_dbg_kms(&dev_priv->drm,
+ "FDI train 2 done.\n");
+ break;
+ }
+ udelay(50);
+ }
+ if (retry < 5)
+ break;
+ }
+ if (i == 4)
+ drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
+
+ drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
+}
+
+/* Manual link training for Ivy Bridge A0 parts */
+static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ enum pipe pipe = crtc->pipe;
+ i915_reg_t reg;
+ u32 temp, i, j;
+
+ /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
+ for train result */
+ reg = FDI_RX_IMR(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_RX_SYMBOL_LOCK;
+ temp &= ~FDI_RX_BIT_LOCK;
+ intel_de_write(dev_priv, reg, temp);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(150);
+
+ drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR before link train 0x%x\n",
+ intel_de_read(dev_priv, FDI_RX_IIR(pipe)));
+
+ /* Try each vswing and preemphasis setting twice before moving on */
+ for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
+ /* disable first in case we need to retry */
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
+ temp &= ~FDI_TX_ENABLE;
+ intel_de_write(dev_priv, reg, temp);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_LINK_TRAIN_AUTO;
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp &= ~FDI_RX_ENABLE;
+ intel_de_write(dev_priv, reg, temp);
+
+ /* enable CPU FDI TX and PCH FDI RX */
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_DP_PORT_WIDTH_MASK;
+ temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
+ temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ temp |= snb_b_fdi_train_param[j/2];
+ temp |= FDI_COMPOSITE_SYNC;
+ intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
+
+ intel_de_write(dev_priv, FDI_RX_MISC(pipe),
+ FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+ temp |= FDI_COMPOSITE_SYNC;
+ intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(1); /* should be 0.5us */
+
+ for (i = 0; i < 4; i++) {
+ reg = FDI_RX_IIR(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
+
+ if (temp & FDI_RX_BIT_LOCK ||
+ (intel_de_read(dev_priv, reg) & FDI_RX_BIT_LOCK)) {
+ intel_de_write(dev_priv, reg,
+ temp | FDI_RX_BIT_LOCK);
+ drm_dbg_kms(&dev_priv->drm,
+ "FDI train 1 done, level %i.\n",
+ i);
+ break;
+ }
+ udelay(1); /* should be 0.5us */
+ }
+ if (i == 4) {
+ drm_dbg_kms(&dev_priv->drm,
+ "FDI train 1 fail on vswing %d\n", j / 2);
+ continue;
+ }
+
+ /* Train 2 */
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_LINK_TRAIN_NONE_IVB;
+ temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
+ intel_de_write(dev_priv, reg, temp);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
+ intel_de_write(dev_priv, reg, temp);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(2); /* should be 1.5us */
+
+ for (i = 0; i < 4; i++) {
+ reg = FDI_RX_IIR(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
+
+ if (temp & FDI_RX_SYMBOL_LOCK ||
+ (intel_de_read(dev_priv, reg) & FDI_RX_SYMBOL_LOCK)) {
+ intel_de_write(dev_priv, reg,
+ temp | FDI_RX_SYMBOL_LOCK);
+ drm_dbg_kms(&dev_priv->drm,
+ "FDI train 2 done, level %i.\n",
+ i);
+ goto train_done;
+ }
+ udelay(2); /* should be 1.5us */
+ }
+ if (i == 4)
+ drm_dbg_kms(&dev_priv->drm,
+ "FDI train 2 fail on vswing %d\n", j / 2);
+ }
+
+train_done:
+ drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
+}
+
+void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
+ enum pipe pipe = intel_crtc->pipe;
+ i915_reg_t reg;
+ u32 temp;
+
+ /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
+ reg = FDI_RX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
+ temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
+ temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
+ intel_de_write(dev_priv, reg, temp | FDI_RX_PLL_ENABLE);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(200);
+
+ /* Switch from Rawclk to PCDclk */
+ temp = intel_de_read(dev_priv, reg);
+ intel_de_write(dev_priv, reg, temp | FDI_PCDCLK);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(200);
+
+ /* Enable CPU FDI TX PLL, always on for Ironlake */
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ if ((temp & FDI_TX_PLL_ENABLE) == 0) {
+ intel_de_write(dev_priv, reg, temp | FDI_TX_PLL_ENABLE);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(100);
+ }
+}
+
+void ilk_fdi_pll_disable(struct intel_crtc *intel_crtc)
+{
+ struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ enum pipe pipe = intel_crtc->pipe;
+ i915_reg_t reg;
+ u32 temp;
+
+ /* Switch from PCDclk to Rawclk */
+ reg = FDI_RX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ intel_de_write(dev_priv, reg, temp & ~FDI_PCDCLK);
+
+ /* Disable CPU FDI TX PLL */
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ intel_de_write(dev_priv, reg, temp & ~FDI_TX_PLL_ENABLE);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(100);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ intel_de_write(dev_priv, reg, temp & ~FDI_RX_PLL_ENABLE);
+
+ /* Wait for the clocks to turn off. */
+ intel_de_posting_read(dev_priv, reg);
+ udelay(100);
+}
+
+void ilk_fdi_disable(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ i915_reg_t reg;
+ u32 temp;
+
+ /* disable CPU FDI tx and PCH FDI rx */
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ intel_de_write(dev_priv, reg, temp & ~FDI_TX_ENABLE);
+ intel_de_posting_read(dev_priv, reg);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~(0x7 << 16);
+ temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
+ intel_de_write(dev_priv, reg, temp & ~FDI_RX_ENABLE);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(100);
+
+ /* Ironlake workaround, disable clock pointer after downing FDI */
+ if (HAS_PCH_IBX(dev_priv))
+ intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
+ FDI_RX_PHASE_SYNC_POINTER_OVR);
+
+ /* still set train pattern 1 */
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ intel_de_write(dev_priv, reg, temp);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ if (HAS_PCH_CPT(dev_priv)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ }
+ /* BPC in FDI rx is consistent with that in PIPECONF */
+ temp &= ~(0x07 << 16);
+ temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
+ intel_de_write(dev_priv, reg, temp);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(100);
+}
+
+void
+intel_fdi_init_hook(struct drm_i915_private *dev_priv)
+{
+ if (IS_GEN(dev_priv, 5)) {
+ dev_priv->display.fdi_link_train = ilk_fdi_link_train;
+ } else if (IS_GEN(dev_priv, 6)) {
+ dev_priv->display.fdi_link_train = gen6_fdi_link_train;
+ } else if (IS_IVYBRIDGE(dev_priv)) {
+ /* FIXME: detect B0+ stepping and use auto training */
+ dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
+ }
+}
diff --git a/drivers/gpu/drm/i915/display/intel_fdi.h b/drivers/gpu/drm/i915/display/intel_fdi.h
new file mode 100644
index 000000000000..a9cd21663eb8
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_fdi.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef _INTEL_FDI_H_
+#define _INTEL_FDI_H_
+
+struct drm_i915_private;
+struct intel_crtc;
+struct intel_crtc_state;
+
+#define I915_DISPLAY_CONFIG_RETRY 1
+int ilk_fdi_compute_config(struct intel_crtc *intel_crtc,
+ struct intel_crtc_state *pipe_config);
+void intel_fdi_normal_train(struct intel_crtc *crtc);
+void ilk_fdi_disable(struct intel_crtc *crtc);
+void ilk_fdi_pll_disable(struct intel_crtc *intel_crtc);
+void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state);
+void intel_fdi_init_hook(struct drm_i915_private *dev_priv);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
index d898b370d7a4..7b38eee9980f 100644
--- a/drivers/gpu/drm/i915/display/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
@@ -225,8 +225,10 @@ static void frontbuffer_release(struct kref *ref)
struct i915_vma *vma;
spin_lock(&obj->vma.lock);
- for_each_ggtt_vma(vma, obj)
+ for_each_ggtt_vma(vma, obj) {
+ i915_vma_clear_scanout(vma);
vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
+ }
spin_unlock(&obj->vma.lock);
RCU_INIT_POINTER(obj->frontbuffer, NULL);
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index b9d8825e2bb1..ae1371c36a32 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -15,6 +15,7 @@
#include <drm/drm_hdcp.h>
#include <drm/i915_component.h>
+#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_display_power.h"
#include "intel_display_types.h"
@@ -23,9 +24,78 @@
#include "intel_connector.h"
#define KEY_LOAD_TRIES 5
-#define ENCRYPT_STATUS_CHANGE_TIMEOUT_MS 50
#define HDCP2_LC_RETRY_CNT 3
+static int intel_conn_to_vcpi(struct intel_connector *connector)
+{
+ /* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */
+ return connector->port ? connector->port->vcpi.vcpi : 0;
+}
+
+/*
+ * intel_hdcp_required_content_stream selects the most highest common possible HDCP
+ * content_type for all streams in DP MST topology because security f/w doesn't
+ * have any provision to mark content_type for each stream separately, it marks
+ * all available streams with the content_type proivided at the time of port
+ * authentication. This may prohibit the userspace to use type1 content on
+ * HDCP 2.2 capable sink because of other sink are not capable of HDCP 2.2 in
+ * DP MST topology. Though it is not compulsory, security fw should change its
+ * policy to mark different content_types for different streams.
+ */
+static int
+intel_hdcp_required_content_stream(struct intel_digital_port *dig_port)
+{
+ struct drm_connector_list_iter conn_iter;
+ struct intel_digital_port *conn_dig_port;
+ struct intel_connector *connector;
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
+ bool enforce_type0 = false;
+ int k;
+
+ data->k = 0;
+
+ if (dig_port->hdcp_auth_status)
+ return 0;
+
+ drm_connector_list_iter_begin(&i915->drm, &conn_iter);
+ for_each_intel_connector_iter(connector, &conn_iter) {
+ if (connector->base.status == connector_status_disconnected)
+ continue;
+
+ if (!intel_encoder_is_mst(intel_attached_encoder(connector)))
+ continue;
+
+ conn_dig_port = intel_attached_dig_port(connector);
+ if (conn_dig_port != dig_port)
+ continue;
+
+ if (!enforce_type0 && !intel_hdcp2_capable(connector))
+ enforce_type0 = true;
+
+ data->streams[data->k].stream_id = intel_conn_to_vcpi(connector);
+ data->k++;
+
+ /* if there is only one active stream */
+ if (dig_port->dp.active_mst_links <= 1)
+ break;
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ if (drm_WARN_ON(&i915->drm, data->k > INTEL_NUM_PIPES(i915) || data->k == 0))
+ return -EINVAL;
+
+ /*
+ * Apply common protection level across all streams in DP MST Topology.
+ * Use highest supported content type for all streams in DP MST Topology.
+ */
+ for (k = 0; k < data->k; k++)
+ data->streams[k].stream_type =
+ enforce_type0 ? DRM_MODE_HDCP_CONTENT_TYPE0 : DRM_MODE_HDCP_CONTENT_TYPE1;
+
+ return 0;
+}
+
static
bool intel_hdcp_is_ksv_valid(u8 *ksv)
{
@@ -762,15 +832,22 @@ static int intel_hdcp_auth(struct intel_connector *connector)
if (intel_de_wait_for_set(dev_priv,
HDCP_STATUS(dev_priv, cpu_transcoder, port),
HDCP_STATUS_ENC,
- ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
+ HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
drm_err(&dev_priv->drm, "Timed out waiting for encryption\n");
return -ETIMEDOUT;
}
- /*
- * XXX: If we have MST-connected devices, we need to enable encryption
- * on those as well.
- */
+ /* DP MST Auth Part 1 Step 2.a and Step 2.b */
+ if (shim->stream_encryption) {
+ ret = shim->stream_encryption(connector, true);
+ if (ret) {
+ drm_err(&dev_priv->drm, "[%s:%d] Failed to enable HDCP 1.4 stream enc\n",
+ connector->base.name, connector->base.base.id);
+ return ret;
+ }
+ drm_dbg_kms(&dev_priv->drm, "HDCP 1.4 transcoder: %s stream encrypted\n",
+ transcoder_name(hdcp->stream_transcoder));
+ }
if (repeater_present)
return intel_hdcp_auth_downstream(connector);
@@ -792,24 +869,29 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP is being disabled...\n",
connector->base.name, connector->base.base.id);
- /*
- * If there are other connectors on this port using HDCP, don't disable
- * it. Instead, toggle the HDCP signalling off on that particular
- * connector/pipe and exit.
- */
- if (dig_port->num_hdcp_streams > 0) {
- ret = hdcp->shim->toggle_signalling(dig_port,
- cpu_transcoder, false);
- if (ret)
- DRM_ERROR("Failed to disable HDCP signalling\n");
- return ret;
+ if (hdcp->shim->stream_encryption) {
+ ret = hdcp->shim->stream_encryption(connector, false);
+ if (ret) {
+ drm_err(&dev_priv->drm, "[%s:%d] Failed to disable HDCP 1.4 stream enc\n",
+ connector->base.name, connector->base.base.id);
+ return ret;
+ }
+ drm_dbg_kms(&dev_priv->drm, "HDCP 1.4 transcoder: %s stream encryption disabled\n",
+ transcoder_name(hdcp->stream_transcoder));
+ /*
+ * If there are other connectors on this port using HDCP,
+ * don't disable it until it disabled HDCP encryption for
+ * all connectors in MST topology.
+ */
+ if (dig_port->num_hdcp_streams > 0)
+ return 0;
}
hdcp->hdcp_encrypted = false;
intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port), 0);
if (intel_de_wait_for_clear(dev_priv,
HDCP_STATUS(dev_priv, cpu_transcoder, port),
- ~0, ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
+ ~0, HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
drm_err(&dev_priv->drm,
"Failed to disable HDCP, timeout clearing status\n");
return -ETIMEDOUT;
@@ -1014,7 +1096,8 @@ static int
hdcp2_prepare_ake_init(struct intel_connector *connector,
struct hdcp2_ake_init *ake_data)
{
- struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct i915_hdcp_comp_master *comp;
int ret;
@@ -1043,7 +1126,8 @@ hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
struct hdcp2_ake_no_stored_km *ek_pub_km,
size_t *msg_sz)
{
- struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct i915_hdcp_comp_master *comp;
int ret;
@@ -1070,7 +1154,8 @@ hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
static int hdcp2_verify_hprime(struct intel_connector *connector,
struct hdcp2_ake_send_hprime *rx_hprime)
{
- struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct i915_hdcp_comp_master *comp;
int ret;
@@ -1095,7 +1180,8 @@ static int
hdcp2_store_pairing_info(struct intel_connector *connector,
struct hdcp2_ake_send_pairing_info *pairing_info)
{
- struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct i915_hdcp_comp_master *comp;
int ret;
@@ -1121,7 +1207,8 @@ static int
hdcp2_prepare_lc_init(struct intel_connector *connector,
struct hdcp2_lc_init *lc_init)
{
- struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct i915_hdcp_comp_master *comp;
int ret;
@@ -1147,7 +1234,8 @@ static int
hdcp2_verify_lprime(struct intel_connector *connector,
struct hdcp2_lc_send_lprime *rx_lprime)
{
- struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct i915_hdcp_comp_master *comp;
int ret;
@@ -1172,7 +1260,8 @@ hdcp2_verify_lprime(struct intel_connector *connector,
static int hdcp2_prepare_skey(struct intel_connector *connector,
struct hdcp2_ske_send_eks *ske_data)
{
- struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct i915_hdcp_comp_master *comp;
int ret;
@@ -1200,7 +1289,8 @@ hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector,
*rep_topology,
struct hdcp2_rep_send_ack *rep_send_ack)
{
- struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct i915_hdcp_comp_master *comp;
int ret;
@@ -1228,7 +1318,8 @@ static int
hdcp2_verify_mprime(struct intel_connector *connector,
struct hdcp2_rep_stream_ready *stream_ready)
{
- struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct i915_hdcp_comp_master *comp;
int ret;
@@ -1251,7 +1342,8 @@ hdcp2_verify_mprime(struct intel_connector *connector,
static int hdcp2_authenticate_port(struct intel_connector *connector)
{
- struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct i915_hdcp_comp_master *comp;
int ret;
@@ -1275,6 +1367,7 @@ static int hdcp2_authenticate_port(struct intel_connector *connector)
static int hdcp2_close_mei_session(struct intel_connector *connector)
{
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct i915_hdcp_comp_master *comp;
int ret;
@@ -1288,7 +1381,7 @@ static int hdcp2_close_mei_session(struct intel_connector *connector)
}
ret = comp->ops->close_hdcp_session(comp->mei_dev,
- &connector->hdcp.port_data);
+ &dig_port->hdcp_port_data);
mutex_unlock(&dev_priv->hdcp_comp_mutex);
return ret;
@@ -1448,13 +1541,14 @@ static
int _hdcp2_propagate_stream_management_info(struct intel_connector *connector)
{
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct intel_hdcp *hdcp = &connector->hdcp;
union {
struct hdcp2_rep_stream_manage stream_manage;
struct hdcp2_rep_stream_ready stream_ready;
} msgs;
const struct intel_hdcp_shim *shim = hdcp->shim;
- int ret;
+ int ret, streams_size_delta, i;
if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX)
return -ERANGE;
@@ -1463,16 +1557,18 @@ int _hdcp2_propagate_stream_management_info(struct intel_connector *connector)
msgs.stream_manage.msg_id = HDCP_2_2_REP_STREAM_MANAGE;
drm_hdcp_cpu_to_be24(msgs.stream_manage.seq_num_m, hdcp->seq_num_m);
- /* K no of streams is fixed as 1. Stored as big-endian. */
- msgs.stream_manage.k = cpu_to_be16(1);
+ msgs.stream_manage.k = cpu_to_be16(data->k);
- /* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */
- msgs.stream_manage.streams[0].stream_id = 0;
- msgs.stream_manage.streams[0].stream_type = hdcp->content_type;
+ for (i = 0; i < data->k; i++) {
+ msgs.stream_manage.streams[i].stream_id = data->streams[i].stream_id;
+ msgs.stream_manage.streams[i].stream_type = data->streams[i].stream_type;
+ }
+ streams_size_delta = (HDCP_2_2_MAX_CONTENT_STREAMS_CNT - data->k) *
+ sizeof(struct hdcp2_streamid_type);
/* Send it to Repeater */
ret = shim->write_2_2_msg(dig_port, &msgs.stream_manage,
- sizeof(msgs.stream_manage));
+ sizeof(msgs.stream_manage) - streams_size_delta);
if (ret < 0)
goto out;
@@ -1481,8 +1577,8 @@ int _hdcp2_propagate_stream_management_info(struct intel_connector *connector)
if (ret < 0)
goto out;
- hdcp->port_data.seq_num_m = hdcp->seq_num_m;
- hdcp->port_data.streams[0].stream_type = hdcp->content_type;
+ data->seq_num_m = hdcp->seq_num_m;
+
ret = hdcp2_verify_mprime(connector, &msgs.stream_ready);
out:
@@ -1606,6 +1702,36 @@ static int hdcp2_authenticate_sink(struct intel_connector *connector)
return ret;
}
+static int hdcp2_enable_stream_encryption(struct intel_connector *connector)
+{
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
+ enum port port = dig_port->base.port;
+ int ret = 0;
+
+ if (!(intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
+ LINK_ENCRYPTION_STATUS)) {
+ drm_err(&dev_priv->drm, "[%s:%d] HDCP 2.2 Link is not encrypted\n",
+ connector->base.name, connector->base.base.id);
+ return -EPERM;
+ }
+
+ if (hdcp->shim->stream_2_2_encryption) {
+ ret = hdcp->shim->stream_2_2_encryption(connector, true);
+ if (ret) {
+ drm_err(&dev_priv->drm, "[%s:%d] Failed to enable HDCP 2.2 stream enc\n",
+ connector->base.name, connector->base.base.id);
+ return ret;
+ }
+ drm_dbg_kms(&dev_priv->drm, "HDCP 2.2 transcoder: %s stream encrypted\n",
+ transcoder_name(hdcp->stream_transcoder));
+ }
+
+ return ret;
+}
+
static int hdcp2_enable_encryption(struct intel_connector *connector)
{
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
@@ -1641,7 +1767,8 @@ static int hdcp2_enable_encryption(struct intel_connector *connector)
HDCP2_STATUS(dev_priv, cpu_transcoder,
port),
LINK_ENCRYPTION_STATUS,
- ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
+ HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
+ dig_port->hdcp_auth_status = true;
return ret;
}
@@ -1665,7 +1792,7 @@ static int hdcp2_disable_encryption(struct intel_connector *connector)
HDCP2_STATUS(dev_priv, cpu_transcoder,
port),
LINK_ENCRYPTION_STATUS,
- ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
+ HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
if (ret == -ETIMEDOUT)
drm_dbg_kms(&dev_priv->drm, "Disable Encryption Timedout");
@@ -1714,11 +1841,11 @@ hdcp2_propagate_stream_management_info(struct intel_connector *connector)
static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
{
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
- struct intel_hdcp *hdcp = &connector->hdcp;
- int ret, i, tries = 3;
+ int ret = 0, i, tries = 3;
- for (i = 0; i < tries; i++) {
+ for (i = 0; i < tries && !dig_port->hdcp_auth_status; i++) {
ret = hdcp2_authenticate_sink(connector);
if (!ret) {
ret = hdcp2_propagate_stream_management_info(connector);
@@ -1728,8 +1855,7 @@ static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
ret);
break;
}
- hdcp->port_data.streams[0].stream_type =
- hdcp->content_type;
+
ret = hdcp2_authenticate_port(connector);
if (!ret)
break;
@@ -1744,7 +1870,7 @@ static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
}
- if (!ret) {
+ if (!ret && !dig_port->hdcp_auth_status) {
/*
* Ensuring the required 200mSec min time interval between
* Session Key Exchange and encryption.
@@ -1759,12 +1885,16 @@ static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
}
}
+ ret = hdcp2_enable_stream_encryption(connector);
+
return ret;
}
static int _intel_hdcp2_enable(struct intel_connector *connector)
{
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct intel_hdcp *hdcp = &connector->hdcp;
int ret;
@@ -1772,6 +1902,16 @@ static int _intel_hdcp2_enable(struct intel_connector *connector)
connector->base.name, connector->base.base.id,
hdcp->content_type);
+ /* Stream which requires encryption */
+ if (!intel_encoder_is_mst(intel_attached_encoder(connector))) {
+ data->k = 1;
+ data->streams[0].stream_type = hdcp->content_type;
+ } else {
+ ret = intel_hdcp_required_content_stream(dig_port);
+ if (ret)
+ return ret;
+ }
+
ret = hdcp2_authenticate_and_encrypt(connector);
if (ret) {
drm_dbg_kms(&i915->drm, "HDCP2 Type%d Enabling Failed. (%d)\n",
@@ -1789,18 +1929,37 @@ static int _intel_hdcp2_enable(struct intel_connector *connector)
static int _intel_hdcp2_disable(struct intel_connector *connector)
{
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
+ struct intel_hdcp *hdcp = &connector->hdcp;
int ret;
drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being Disabled\n",
connector->base.name, connector->base.base.id);
+ if (hdcp->shim->stream_2_2_encryption) {
+ ret = hdcp->shim->stream_2_2_encryption(connector, false);
+ if (ret) {
+ drm_err(&i915->drm, "[%s:%d] Failed to disable HDCP 2.2 stream enc\n",
+ connector->base.name, connector->base.base.id);
+ return ret;
+ }
+ drm_dbg_kms(&i915->drm, "HDCP 2.2 transcoder: %s stream encryption disabled\n",
+ transcoder_name(hdcp->stream_transcoder));
+
+ if (dig_port->num_hdcp_streams > 0)
+ return 0;
+ }
+
ret = hdcp2_disable_encryption(connector);
if (hdcp2_deauthenticate_port(connector) < 0)
drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
connector->hdcp.hdcp2_encrypted = false;
+ dig_port->hdcp_auth_status = false;
+ data->k = 0;
return ret;
}
@@ -1816,6 +1975,7 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
int ret = 0;
mutex_lock(&hdcp->mutex);
+ mutex_lock(&dig_port->hdcp_mutex);
cpu_transcoder = hdcp->cpu_transcoder;
/* hdcp2_check_link is expected only when HDCP2.2 is Enabled */
@@ -1837,7 +1997,7 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
goto out;
}
- ret = hdcp->shim->check_2_2_link(dig_port);
+ ret = hdcp->shim->check_2_2_link(dig_port, connector);
if (ret == HDCP_LINK_PROTECTED) {
if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
intel_hdcp_update_value(connector,
@@ -1893,6 +2053,7 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
}
out:
+ mutex_unlock(&dig_port->hdcp_mutex);
mutex_unlock(&hdcp->mutex);
return ret;
}
@@ -1968,12 +2129,13 @@ static enum mei_fw_tc intel_get_mei_fw_tc(enum transcoder cpu_transcoder)
}
static int initialize_hdcp_port_data(struct intel_connector *connector,
- enum port port,
+ struct intel_digital_port *dig_port,
const struct intel_hdcp_shim *shim)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct intel_hdcp *hdcp = &connector->hdcp;
- struct hdcp_port_data *data = &hdcp->port_data;
+ enum port port = dig_port->base.port;
if (INTEL_GEN(dev_priv) < 12)
data->fw_ddi = intel_get_mei_fw_ddi_index(port);
@@ -1994,16 +2156,15 @@ static int initialize_hdcp_port_data(struct intel_connector *connector,
data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED;
data->protocol = (u8)shim->protocol;
- data->k = 1;
if (!data->streams)
- data->streams = kcalloc(data->k,
+ data->streams = kcalloc(INTEL_NUM_PIPES(dev_priv),
sizeof(struct hdcp2_streamid_type),
GFP_KERNEL);
if (!data->streams) {
drm_err(&dev_priv->drm, "Out of Memory\n");
return -ENOMEM;
}
-
+ /* For SST */
data->streams[0].stream_id = 0;
data->streams[0].stream_type = hdcp->content_type;
@@ -2046,14 +2207,15 @@ void intel_hdcp_component_init(struct drm_i915_private *dev_priv)
}
}
-static void intel_hdcp2_init(struct intel_connector *connector, enum port port,
+static void intel_hdcp2_init(struct intel_connector *connector,
+ struct intel_digital_port *dig_port,
const struct intel_hdcp_shim *shim)
{
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
int ret;
- ret = initialize_hdcp_port_data(connector, port, shim);
+ ret = initialize_hdcp_port_data(connector, dig_port, shim);
if (ret) {
drm_dbg_kms(&i915->drm, "Mei hdcp data init failed\n");
return;
@@ -2063,7 +2225,7 @@ static void intel_hdcp2_init(struct intel_connector *connector, enum port port,
}
int intel_hdcp_init(struct intel_connector *connector,
- enum port port,
+ struct intel_digital_port *dig_port,
const struct intel_hdcp_shim *shim)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -2073,15 +2235,15 @@ int intel_hdcp_init(struct intel_connector *connector,
if (!shim)
return -EINVAL;
- if (is_hdcp2_supported(dev_priv) && !connector->mst_port)
- intel_hdcp2_init(connector, port, shim);
+ if (is_hdcp2_supported(dev_priv))
+ intel_hdcp2_init(connector, dig_port, shim);
ret =
drm_connector_attach_content_protection_property(&connector->base,
hdcp->hdcp2_supported);
if (ret) {
hdcp->hdcp2_supported = false;
- kfree(hdcp->port_data.streams);
+ kfree(dig_port->hdcp_port_data.streams);
return ret;
}
@@ -2095,7 +2257,7 @@ int intel_hdcp_init(struct intel_connector *connector,
}
int intel_hdcp_enable(struct intel_connector *connector,
- enum transcoder cpu_transcoder, u8 content_type)
+ const struct intel_crtc_state *pipe_config, u8 content_type)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
@@ -2106,15 +2268,28 @@ int intel_hdcp_enable(struct intel_connector *connector,
if (!hdcp->shim)
return -ENOENT;
+ if (!connector->encoder) {
+ drm_err(&dev_priv->drm, "[%s:%d] encoder is not initialized\n",
+ connector->base.name, connector->base.base.id);
+ return -ENODEV;
+ }
+
mutex_lock(&hdcp->mutex);
mutex_lock(&dig_port->hdcp_mutex);
drm_WARN_ON(&dev_priv->drm,
hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
hdcp->content_type = content_type;
- hdcp->cpu_transcoder = cpu_transcoder;
+
+ if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST)) {
+ hdcp->cpu_transcoder = pipe_config->mst_master_transcoder;
+ hdcp->stream_transcoder = pipe_config->cpu_transcoder;
+ } else {
+ hdcp->cpu_transcoder = pipe_config->cpu_transcoder;
+ hdcp->stream_transcoder = INVALID_TRANSCODER;
+ }
if (INTEL_GEN(dev_priv) >= 12)
- hdcp->port_data.fw_tc = intel_get_mei_fw_tc(cpu_transcoder);
+ dig_port->hdcp_port_data.fw_tc = intel_get_mei_fw_tc(hdcp->cpu_transcoder);
/*
* Considering that HDCP2.2 is more secure than HDCP1.4, If the setup
@@ -2234,7 +2409,7 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
if (desired_and_not_enabled || content_protection_type_changed)
intel_hdcp_enable(connector,
- crtc_state->cpu_transcoder,
+ crtc_state,
(u8)conn_state->hdcp_content_type);
}
@@ -2284,7 +2459,6 @@ void intel_hdcp_cleanup(struct intel_connector *connector)
drm_WARN_ON(connector->base.dev, work_pending(&hdcp->prop_work));
mutex_lock(&hdcp->mutex);
- kfree(hdcp->port_data.streams);
hdcp->shim = NULL;
mutex_unlock(&hdcp->mutex);
}
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.h b/drivers/gpu/drm/i915/display/intel_hdcp.h
index 1bbf5b67ed0a..8f53b0c7fe5c 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.h
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.h
@@ -8,6 +8,8 @@
#include <linux/types.h>
+#define HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS 50
+
struct drm_connector;
struct drm_connector_state;
struct drm_i915_private;
@@ -16,16 +18,18 @@ struct intel_connector;
struct intel_crtc_state;
struct intel_encoder;
struct intel_hdcp_shim;
+struct intel_digital_port;
enum port;
enum transcoder;
void intel_hdcp_atomic_check(struct drm_connector *connector,
struct drm_connector_state *old_state,
struct drm_connector_state *new_state);
-int intel_hdcp_init(struct intel_connector *connector, enum port port,
+int intel_hdcp_init(struct intel_connector *connector,
+ struct intel_digital_port *dig_port,
const struct intel_hdcp_shim *hdcp_shim);
int intel_hdcp_enable(struct intel_connector *connector,
- enum transcoder cpu_transcoder, u8 content_type);
+ const struct intel_crtc_state *pipe_config, u8 content_type);
int intel_hdcp_disable(struct intel_connector *connector);
void intel_hdcp_update_pipe(struct intel_atomic_state *state,
struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index 82674a8853c6..95919d325b0b 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -518,10 +518,10 @@ static u32 vlv_infoframes_enabled(struct intel_encoder *encoder,
VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
}
-static void hsw_write_infoframe(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- unsigned int type,
- const void *frame, ssize_t len)
+void hsw_write_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ const void *frame, ssize_t len)
{
const u32 *data = frame;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -555,10 +555,9 @@ static void hsw_write_infoframe(struct intel_encoder *encoder,
intel_de_posting_read(dev_priv, ctl_reg);
}
-static void hsw_read_infoframe(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- unsigned int type,
- void *frame, ssize_t len)
+void hsw_read_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type, void *frame, ssize_t len)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
@@ -1495,15 +1494,16 @@ static int kbl_repositioning_enc_en_signal(struct intel_connector *connector,
usleep_range(25, 50);
}
- ret = intel_ddi_toggle_hdcp_signalling(&dig_port->base, cpu_transcoder,
- false);
+ ret = intel_ddi_toggle_hdcp_bits(&dig_port->base, cpu_transcoder,
+ false, TRANS_DDI_HDCP_SIGNALLING);
if (ret) {
drm_err(&dev_priv->drm,
"Disable HDCP signalling failed (%d)\n", ret);
return ret;
}
- ret = intel_ddi_toggle_hdcp_signalling(&dig_port->base, cpu_transcoder,
- true);
+
+ ret = intel_ddi_toggle_hdcp_bits(&dig_port->base, cpu_transcoder,
+ true, TRANS_DDI_HDCP_SIGNALLING);
if (ret) {
drm_err(&dev_priv->drm,
"Enable HDCP signalling failed (%d)\n", ret);
@@ -1526,8 +1526,9 @@ int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *dig_port,
if (!enable)
usleep_range(6, 60); /* Bspec says >= 6us */
- ret = intel_ddi_toggle_hdcp_signalling(&dig_port->base, cpu_transcoder,
- enable);
+ ret = intel_ddi_toggle_hdcp_bits(&dig_port->base,
+ cpu_transcoder, enable,
+ TRANS_DDI_HDCP_SIGNALLING);
if (ret) {
drm_err(&dev_priv->drm, "%s HDCP signalling failed (%d)\n",
enable ? "Enable" : "Disable", ret);
@@ -1732,7 +1733,8 @@ int intel_hdmi_hdcp2_read_msg(struct intel_digital_port *dig_port,
}
static
-int intel_hdmi_hdcp2_check_link(struct intel_digital_port *dig_port)
+int intel_hdmi_hdcp2_check_link(struct intel_digital_port *dig_port,
+ struct intel_connector *connector)
{
u8 rx_status[HDCP_2_2_HDMI_RXSTATUS_LEN];
int ret;
@@ -2216,7 +2218,11 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi,
has_hdmi_sink))
return MODE_CLOCK_HIGH;
- /* BXT DPLL can't generate 223-240 MHz */
+ /* GLK DPLL can't generate 446-480 MHz */
+ if (IS_GEMINILAKE(dev_priv) && clock > 446666 && clock < 480000)
+ return MODE_CLOCK_RANGE;
+
+ /* BXT/GLK DPLL can't generate 223-240 MHz */
if (IS_GEN9_LP(dev_priv) && clock > 223333 && clock < 240000)
return MODE_CLOCK_RANGE;
@@ -2950,21 +2956,12 @@ static void
intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
- struct intel_digital_port *dig_port =
- hdmi_to_dig_port(intel_hdmi);
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
intel_attach_aspect_ratio_property(connector);
- /*
- * Attach Colorspace property for Non LSPCON based device
- * ToDo: This needs to be extended for LSPCON implementation
- * as well. Will be implemented separately.
- */
- if (!dig_port->lspcon.active)
- intel_attach_colorspace_property(connector);
-
+ intel_attach_hdmi_colorspace_property(connector);
drm_connector_attach_content_type_property(connector);
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
@@ -3300,7 +3297,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
intel_hdmi->attached_connector = intel_connector;
if (is_hdcp_supported(dev_priv, port)) {
- int ret = intel_hdcp_init(intel_connector, port,
+ int ret = intel_hdcp_init(intel_connector, dig_port,
&intel_hdmi_hdcp_shim);
if (ret)
drm_dbg_kms(&dev_priv->drm,
@@ -3438,3 +3435,236 @@ void intel_hdmi_init(struct drm_i915_private *dev_priv,
dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
intel_hdmi_init_connector(dig_port, intel_connector);
}
+
+/*
+ * intel_hdmi_dsc_get_slice_height - get the dsc slice_height
+ * @vactive: Vactive of a display mode
+ *
+ * @return: appropriate dsc slice height for a given mode.
+ */
+int intel_hdmi_dsc_get_slice_height(int vactive)
+{
+ int slice_height;
+
+ /*
+ * Slice Height determination : HDMI2.1 Section 7.7.5.2
+ * Select smallest slice height >=96, that results in a valid PPS and
+ * requires minimum padding lines required for final slice.
+ *
+ * Assumption : Vactive is even.
+ */
+ for (slice_height = 96; slice_height <= vactive; slice_height += 2)
+ if (vactive % slice_height == 0)
+ return slice_height;
+
+ return 0;
+}
+
+/*
+ * intel_hdmi_dsc_get_num_slices - get no. of dsc slices based on dsc encoder
+ * and dsc decoder capabilities
+ *
+ * @crtc_state: intel crtc_state
+ * @src_max_slices: maximum slices supported by the DSC encoder
+ * @src_max_slice_width: maximum slice width supported by DSC encoder
+ * @hdmi_max_slices: maximum slices supported by sink DSC decoder
+ * @hdmi_throughput: maximum clock per slice (MHz) supported by HDMI sink
+ *
+ * @return: num of dsc slices that can be supported by the dsc encoder
+ * and decoder.
+ */
+int
+intel_hdmi_dsc_get_num_slices(const struct intel_crtc_state *crtc_state,
+ int src_max_slices, int src_max_slice_width,
+ int hdmi_max_slices, int hdmi_throughput)
+{
+/* Pixel rates in KPixels/sec */
+#define HDMI_DSC_PEAK_PIXEL_RATE 2720000
+/*
+ * Rates at which the source and sink are required to process pixels in each
+ * slice, can be two levels: either atleast 340000KHz or atleast 40000KHz.
+ */
+#define HDMI_DSC_MAX_ENC_THROUGHPUT_0 340000
+#define HDMI_DSC_MAX_ENC_THROUGHPUT_1 400000
+
+/* Spec limits the slice width to 2720 pixels */
+#define MAX_HDMI_SLICE_WIDTH 2720
+ int kslice_adjust;
+ int adjusted_clk_khz;
+ int min_slices;
+ int target_slices;
+ int max_throughput; /* max clock freq. in khz per slice */
+ int max_slice_width;
+ int slice_width;
+ int pixel_clock = crtc_state->hw.adjusted_mode.crtc_clock;
+
+ if (!hdmi_throughput)
+ return 0;
+
+ /*
+ * Slice Width determination : HDMI2.1 Section 7.7.5.1
+ * kslice_adjust factor for 4:2:0, and 4:2:2 formats is 0.5, where as
+ * for 4:4:4 is 1.0. Multiplying these factors by 10 and later
+ * dividing adjusted clock value by 10.
+ */
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 ||
+ crtc_state->output_format == INTEL_OUTPUT_FORMAT_RGB)
+ kslice_adjust = 10;
+ else
+ kslice_adjust = 5;
+
+ /*
+ * As per spec, the rate at which the source and the sink process
+ * the pixels per slice are at two levels: atleast 340Mhz or 400Mhz.
+ * This depends upon the pixel clock rate and output formats
+ * (kslice adjust).
+ * If pixel clock * kslice adjust >= 2720MHz slices can be processed
+ * at max 340MHz, otherwise they can be processed at max 400MHz.
+ */
+
+ adjusted_clk_khz = DIV_ROUND_UP(kslice_adjust * pixel_clock, 10);
+
+ if (adjusted_clk_khz <= HDMI_DSC_PEAK_PIXEL_RATE)
+ max_throughput = HDMI_DSC_MAX_ENC_THROUGHPUT_0;
+ else
+ max_throughput = HDMI_DSC_MAX_ENC_THROUGHPUT_1;
+
+ /*
+ * Taking into account the sink's capability for maximum
+ * clock per slice (in MHz) as read from HF-VSDB.
+ */
+ max_throughput = min(max_throughput, hdmi_throughput * 1000);
+
+ min_slices = DIV_ROUND_UP(adjusted_clk_khz, max_throughput);
+ max_slice_width = min(MAX_HDMI_SLICE_WIDTH, src_max_slice_width);
+
+ /*
+ * Keep on increasing the num of slices/line, starting from min_slices
+ * per line till we get such a number, for which the slice_width is
+ * just less than max_slice_width. The slices/line selected should be
+ * less than or equal to the max horizontal slices that the combination
+ * of PCON encoder and HDMI decoder can support.
+ */
+ slice_width = max_slice_width;
+
+ do {
+ if (min_slices <= 1 && src_max_slices >= 1 && hdmi_max_slices >= 1)
+ target_slices = 1;
+ else if (min_slices <= 2 && src_max_slices >= 2 && hdmi_max_slices >= 2)
+ target_slices = 2;
+ else if (min_slices <= 4 && src_max_slices >= 4 && hdmi_max_slices >= 4)
+ target_slices = 4;
+ else if (min_slices <= 8 && src_max_slices >= 8 && hdmi_max_slices >= 8)
+ target_slices = 8;
+ else if (min_slices <= 12 && src_max_slices >= 12 && hdmi_max_slices >= 12)
+ target_slices = 12;
+ else if (min_slices <= 16 && src_max_slices >= 16 && hdmi_max_slices >= 16)
+ target_slices = 16;
+ else
+ return 0;
+
+ slice_width = DIV_ROUND_UP(crtc_state->hw.adjusted_mode.hdisplay, target_slices);
+ if (slice_width >= max_slice_width)
+ min_slices = target_slices + 1;
+ } while (slice_width >= max_slice_width);
+
+ return target_slices;
+}
+
+/*
+ * intel_hdmi_dsc_get_bpp - get the appropriate compressed bits_per_pixel based on
+ * source and sink capabilities.
+ *
+ * @src_fraction_bpp: fractional bpp supported by the source
+ * @slice_width: dsc slice width supported by the source and sink
+ * @num_slices: num of slices supported by the source and sink
+ * @output_format: video output format
+ * @hdmi_all_bpp: sink supports decoding of 1/16th bpp setting
+ * @hdmi_max_chunk_bytes: max bytes in a line of chunks supported by sink
+ *
+ * @return: compressed bits_per_pixel in step of 1/16 of bits_per_pixel
+ */
+int
+intel_hdmi_dsc_get_bpp(int src_fractional_bpp, int slice_width, int num_slices,
+ int output_format, bool hdmi_all_bpp,
+ int hdmi_max_chunk_bytes)
+{
+ int max_dsc_bpp, min_dsc_bpp;
+ int target_bytes;
+ bool bpp_found = false;
+ int bpp_decrement_x16;
+ int bpp_target;
+ int bpp_target_x16;
+
+ /*
+ * Get min bpp and max bpp as per Table 7.23, in HDMI2.1 spec
+ * Start with the max bpp and keep on decrementing with
+ * fractional bpp, if supported by PCON DSC encoder
+ *
+ * for each bpp we check if no of bytes can be supported by HDMI sink
+ */
+
+ /* Assuming: bpc as 8*/
+ if (output_format == INTEL_OUTPUT_FORMAT_YCBCR420) {
+ min_dsc_bpp = 6;
+ max_dsc_bpp = 3 * 4; /* 3*bpc/2 */
+ } else if (output_format == INTEL_OUTPUT_FORMAT_YCBCR444 ||
+ output_format == INTEL_OUTPUT_FORMAT_RGB) {
+ min_dsc_bpp = 8;
+ max_dsc_bpp = 3 * 8; /* 3*bpc */
+ } else {
+ /* Assuming 4:2:2 encoding */
+ min_dsc_bpp = 7;
+ max_dsc_bpp = 2 * 8; /* 2*bpc */
+ }
+
+ /*
+ * Taking into account if all dsc_all_bpp supported by HDMI2.1 sink
+ * Section 7.7.34 : Source shall not enable compressed Video
+ * Transport with bpp_target settings above 12 bpp unless
+ * DSC_all_bpp is set to 1.
+ */
+ if (!hdmi_all_bpp)
+ max_dsc_bpp = min(max_dsc_bpp, 12);
+
+ /*
+ * The Sink has a limit of compressed data in bytes for a scanline,
+ * as described in max_chunk_bytes field in HFVSDB block of edid.
+ * The no. of bytes depend on the target bits per pixel that the
+ * source configures. So we start with the max_bpp and calculate
+ * the target_chunk_bytes. We keep on decrementing the target_bpp,
+ * till we get the target_chunk_bytes just less than what the sink's
+ * max_chunk_bytes, or else till we reach the min_dsc_bpp.
+ *
+ * The decrement is according to the fractional support from PCON DSC
+ * encoder. For fractional BPP we use bpp_target as a multiple of 16.
+ *
+ * bpp_target_x16 = bpp_target * 16
+ * So we need to decrement by {1, 2, 4, 8, 16} for fractional bpps
+ * {1/16, 1/8, 1/4, 1/2, 1} respectively.
+ */
+
+ bpp_target = max_dsc_bpp;
+
+ /* src does not support fractional bpp implies decrement by 16 for bppx16 */
+ if (!src_fractional_bpp)
+ src_fractional_bpp = 1;
+ bpp_decrement_x16 = DIV_ROUND_UP(16, src_fractional_bpp);
+ bpp_target_x16 = (bpp_target * 16) - bpp_decrement_x16;
+
+ while (bpp_target_x16 > (min_dsc_bpp * 16)) {
+ int bpp;
+
+ bpp = DIV_ROUND_UP(bpp_target_x16, 16);
+ target_bytes = DIV_ROUND_UP((num_slices * slice_width * bpp), 8);
+ if (target_bytes <= hdmi_max_chunk_bytes) {
+ bpp_found = true;
+ break;
+ }
+ bpp_target_x16 -= bpp_decrement_x16;
+ }
+ if (bpp_found)
+ return bpp_target_x16;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.h b/drivers/gpu/drm/i915/display/intel_hdmi.h
index 15eb0ccde76e..fa1a9b030850 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.h
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.h
@@ -50,5 +50,12 @@ bool intel_hdmi_limited_color_range(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
bool intel_hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state, int bpc,
bool has_hdmi_sink, bool ycbcr420_output);
+int intel_hdmi_dsc_get_bpp(int src_fractional_bpp, int slice_width,
+ int num_slices, int output_format, bool hdmi_all_bpp,
+ int hdmi_max_chunk_bytes);
+int intel_hdmi_dsc_get_num_slices(const struct intel_crtc_state *crtc_state,
+ int src_max_slices, int src_max_slice_width,
+ int hdmi_max_slices, int hdmi_throughput);
+int intel_hdmi_dsc_get_slice_height(int vactive);
#endif /* __INTEL_HDMI_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c
index e37d45e531df..e4ff533e3a69 100644
--- a/drivers/gpu/drm/i915/display/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.c
@@ -30,11 +30,15 @@
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_lspcon.h"
+#include "intel_hdmi.h"
/* LSPCON OUI Vendor ID(signatures) */
#define LSPCON_VENDOR_PARADE_OUI 0x001CF8
#define LSPCON_VENDOR_MCA_OUI 0x0060AD
+#define DPCD_MCA_LSPCON_HDR_STATUS 0x70003
+#define DPCD_PARADE_LSPCON_HDR_STATUS 0x00511
+
/* AUX addresses to write MCA AVI IF */
#define LSPCON_MCA_AVI_IF_WRITE_OFFSET 0x5C0
#define LSPCON_MCA_AVI_IF_CTRL 0x5DF
@@ -104,6 +108,35 @@ static bool lspcon_detect_vendor(struct intel_lspcon *lspcon)
return true;
}
+static u32 get_hdr_status_reg(struct intel_lspcon *lspcon)
+{
+ if (lspcon->vendor == LSPCON_VENDOR_MCA)
+ return DPCD_MCA_LSPCON_HDR_STATUS;
+ else
+ return DPCD_PARADE_LSPCON_HDR_STATUS;
+}
+
+void lspcon_detect_hdr_capability(struct intel_lspcon *lspcon)
+{
+ struct intel_digital_port *dig_port =
+ container_of(lspcon, struct intel_digital_port, lspcon);
+ struct drm_device *dev = dig_port->base.base.dev;
+ struct intel_dp *dp = lspcon_to_intel_dp(lspcon);
+ u8 hdr_caps;
+ int ret;
+
+ ret = drm_dp_dpcd_read(&dp->aux, get_hdr_status_reg(lspcon),
+ &hdr_caps, 1);
+
+ if (ret < 0) {
+ drm_dbg_kms(dev, "HDR capability detection failed\n");
+ lspcon->hdr_supported = false;
+ } else if (hdr_caps & 0x1) {
+ drm_dbg_kms(dev, "LSPCON capable of HDR\n");
+ lspcon->hdr_supported = true;
+ }
+}
+
static enum drm_lspcon_mode lspcon_get_current_mode(struct intel_lspcon *lspcon)
{
enum drm_lspcon_mode current_mode;
@@ -418,27 +451,32 @@ void lspcon_write_infoframe(struct intel_encoder *encoder,
unsigned int type,
const void *frame, ssize_t len)
{
- bool ret;
+ bool ret = true;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_lspcon *lspcon = enc_to_intel_lspcon(encoder);
- /* LSPCON only needs AVI IF */
- if (type != HDMI_INFOFRAME_TYPE_AVI)
+ switch (type) {
+ case HDMI_INFOFRAME_TYPE_AVI:
+ if (lspcon->vendor == LSPCON_VENDOR_MCA)
+ ret = _lspcon_write_avi_infoframe_mca(&intel_dp->aux,
+ frame, len);
+ else
+ ret = _lspcon_write_avi_infoframe_parade(&intel_dp->aux,
+ frame, len);
+ break;
+ case HDMI_PACKET_TYPE_GAMUT_METADATA:
+ drm_dbg_kms(encoder->base.dev, "Update HDR metadata for lspcon\n");
+ /* It uses the legacy hsw implementation for the same */
+ hsw_write_infoframe(encoder, crtc_state, type, frame, len);
+ break;
+ default:
return;
-
- if (lspcon->vendor == LSPCON_VENDOR_MCA)
- ret = _lspcon_write_avi_infoframe_mca(&intel_dp->aux,
- frame, len);
- else
- ret = _lspcon_write_avi_infoframe_parade(&intel_dp->aux,
- frame, len);
+ }
if (!ret) {
- DRM_ERROR("Failed to write AVI infoframes\n");
+ DRM_ERROR("Failed to write infoframes\n");
return;
}
-
- DRM_DEBUG_DRIVER("AVI infoframes updated successfully\n");
}
void lspcon_read_infoframe(struct intel_encoder *encoder,
@@ -446,7 +484,10 @@ void lspcon_read_infoframe(struct intel_encoder *encoder,
unsigned int type,
void *frame, ssize_t len)
{
- /* FIXME implement this */
+ /* FIXME implement for AVI Infoframe as well */
+ if (type == HDMI_PACKET_TYPE_GAMUT_METADATA)
+ hsw_read_infoframe(encoder, crtc_state, type,
+ frame, len);
}
void lspcon_set_infoframes(struct intel_encoder *encoder,
@@ -491,12 +532,26 @@ void lspcon_set_infoframes(struct intel_encoder *encoder,
else
frame.avi.colorspace = HDMI_COLORSPACE_RGB;
- drm_hdmi_avi_infoframe_quant_range(&frame.avi,
- conn_state->connector,
- adjusted_mode,
- crtc_state->limited_color_range ?
- HDMI_QUANTIZATION_RANGE_LIMITED :
- HDMI_QUANTIZATION_RANGE_FULL);
+ /* Set the Colorspace as per the HDMI spec */
+ drm_hdmi_avi_infoframe_colorspace(&frame.avi, conn_state);
+
+ /* nonsense combination */
+ drm_WARN_ON(encoder->base.dev, crtc_state->limited_color_range &&
+ crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
+
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_RGB) {
+ drm_hdmi_avi_infoframe_quant_range(&frame.avi,
+ conn_state->connector,
+ adjusted_mode,
+ crtc_state->limited_color_range ?
+ HDMI_QUANTIZATION_RANGE_LIMITED :
+ HDMI_QUANTIZATION_RANGE_FULL);
+ } else {
+ frame.avi.quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT;
+ frame.avi.ycc_quantization_range = HDMI_YCC_QUANTIZATION_RANGE_LIMITED;
+ }
+
+ drm_hdmi_avi_infoframe_content_type(&frame.avi, conn_state);
ret = hdmi_infoframe_pack(&frame, buf, sizeof(buf));
if (ret < 0) {
@@ -508,11 +563,64 @@ void lspcon_set_infoframes(struct intel_encoder *encoder,
buf, ret);
}
+static bool _lspcon_read_avi_infoframe_enabled_mca(struct drm_dp_aux *aux)
+{
+ int ret;
+ u32 val = 0;
+ u16 reg = LSPCON_MCA_AVI_IF_CTRL;
+
+ ret = drm_dp_dpcd_read(aux, reg, &val, 1);
+ if (ret < 0) {
+ DRM_ERROR("DPCD read failed, address 0x%x\n", reg);
+ return false;
+ }
+
+ return val & LSPCON_MCA_AVI_IF_KICKOFF;
+}
+
+static bool _lspcon_read_avi_infoframe_enabled_parade(struct drm_dp_aux *aux)
+{
+ int ret;
+ u32 val = 0;
+ u16 reg = LSPCON_PARADE_AVI_IF_CTRL;
+
+ ret = drm_dp_dpcd_read(aux, reg, &val, 1);
+ if (ret < 0) {
+ DRM_ERROR("DPCD read failed, address 0x%x\n", reg);
+ return false;
+ }
+
+ return val & LSPCON_PARADE_AVI_IF_KICKOFF;
+}
+
u32 lspcon_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
- /* FIXME actually read this from the hw */
- return 0;
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_lspcon *lspcon = enc_to_intel_lspcon(encoder);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ bool infoframes_enabled;
+ u32 val = 0;
+ u32 mask, tmp;
+
+ if (lspcon->vendor == LSPCON_VENDOR_MCA)
+ infoframes_enabled = _lspcon_read_avi_infoframe_enabled_mca(&intel_dp->aux);
+ else
+ infoframes_enabled = _lspcon_read_avi_infoframe_enabled_parade(&intel_dp->aux);
+
+ if (infoframes_enabled)
+ val |= intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI);
+
+ if (lspcon->hdr_supported) {
+ tmp = intel_de_read(dev_priv,
+ HSW_TVIDEO_DIP_CTL(pipe_config->cpu_transcoder));
+ mask = VIDEO_DIP_ENABLE_GMP_HSW;
+
+ if (tmp & mask)
+ val |= intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA);
+ }
+
+ return val;
}
void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon)
@@ -520,7 +628,7 @@ void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon)
lspcon_wait_mode(lspcon, DRM_LSPCON_MODE_PCON);
}
-static bool lspcon_init(struct intel_digital_port *dig_port)
+bool lspcon_init(struct intel_digital_port *dig_port)
{
struct intel_dp *dp = &dig_port->dp;
struct intel_lspcon *lspcon = &dig_port->lspcon;
@@ -550,6 +658,14 @@ static bool lspcon_init(struct intel_digital_port *dig_port)
return true;
}
+u32 intel_lspcon_infoframes_enabled(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config)
+{
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+
+ return dig_port->infoframes_enabled(encoder, pipe_config);
+}
+
void lspcon_resume(struct intel_digital_port *dig_port)
{
struct intel_lspcon *lspcon = &dig_port->lspcon;
diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.h b/drivers/gpu/drm/i915/display/intel_lspcon.h
index b03dcb7076d8..e19e10492b05 100644
--- a/drivers/gpu/drm/i915/display/intel_lspcon.h
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.h
@@ -15,6 +15,8 @@ struct intel_digital_port;
struct intel_encoder;
struct intel_lspcon;
+bool lspcon_init(struct intel_digital_port *dig_port);
+void lspcon_detect_hdr_capability(struct intel_lspcon *lspcon);
void lspcon_resume(struct intel_digital_port *dig_port);
void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon);
void lspcon_write_infoframe(struct intel_encoder *encoder,
@@ -31,5 +33,15 @@ void lspcon_set_infoframes(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state);
u32 lspcon_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config);
+u32 intel_lspcon_infoframes_enabled(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config);
+void hsw_write_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ const void *frame, ssize_t len);
+void hsw_read_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ void *frame, ssize_t len);
#endif /* __INTEL_LSPCON_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index 52b4f6193b4c..f455040fa989 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -29,6 +29,7 @@
#include <drm/drm_fourcc.h>
#include "gem/i915_gem_pm.h"
+#include "gt/intel_gpu_commands.h"
#include "gt/intel_ring.h"
#include "i915_drv.h"
@@ -182,6 +183,7 @@ struct intel_overlay {
struct intel_crtc *crtc;
struct i915_vma *vma;
struct i915_vma *old_vma;
+ struct intel_frontbuffer *frontbuffer;
bool active;
bool pfit_active;
u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */
@@ -282,21 +284,19 @@ static void intel_overlay_flip_prepare(struct intel_overlay *overlay,
struct i915_vma *vma)
{
enum pipe pipe = overlay->crtc->pipe;
- struct intel_frontbuffer *from = NULL, *to = NULL;
+ struct intel_frontbuffer *frontbuffer = NULL;
drm_WARN_ON(&overlay->i915->drm, overlay->old_vma);
- if (overlay->vma)
- from = intel_frontbuffer_get(overlay->vma->obj);
if (vma)
- to = intel_frontbuffer_get(vma->obj);
+ frontbuffer = intel_frontbuffer_get(vma->obj);
- intel_frontbuffer_track(from, to, INTEL_FRONTBUFFER_OVERLAY(pipe));
+ intel_frontbuffer_track(overlay->frontbuffer, frontbuffer,
+ INTEL_FRONTBUFFER_OVERLAY(pipe));
- if (to)
- intel_frontbuffer_put(to);
- if (from)
- intel_frontbuffer_put(from);
+ if (overlay->frontbuffer)
+ intel_frontbuffer_put(overlay->frontbuffer);
+ overlay->frontbuffer = frontbuffer;
intel_frontbuffer_flip_prepare(overlay->i915,
INTEL_FRONTBUFFER_OVERLAY(pipe));
@@ -359,7 +359,7 @@ static void intel_overlay_release_old_vma(struct intel_overlay *overlay)
intel_frontbuffer_flip_complete(overlay->i915,
INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe));
- i915_gem_object_unpin_from_display_plane(vma);
+ i915_vma_unpin(vma);
i915_vma_put(vma);
}
@@ -860,7 +860,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
return 0;
out_unpin:
- i915_gem_object_unpin_from_display_plane(vma);
+ i915_vma_unpin(vma);
out_pin_section:
atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
index d64fce1a17cb..5fdf52643150 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.c
+++ b/drivers/gpu/drm/i915/display/intel_panel.c
@@ -511,40 +511,79 @@ static u32 scale_hw_to_user(struct intel_connector *connector,
0, user_max);
}
-static u32 intel_panel_compute_brightness(struct intel_connector *connector,
- u32 val)
+u32 intel_panel_invert_pwm_level(struct intel_connector *connector, u32 val)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0);
+ drm_WARN_ON(&dev_priv->drm, panel->backlight.pwm_level_max == 0);
if (dev_priv->params.invert_brightness < 0)
return val;
if (dev_priv->params.invert_brightness > 0 ||
dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) {
- return panel->backlight.max - val + panel->backlight.min;
+ return panel->backlight.pwm_level_max - val + panel->backlight.pwm_level_min;
}
return val;
}
-static u32 lpt_get_backlight(struct intel_connector *connector)
+void intel_panel_set_pwm_level(const struct drm_connector_state *conn_state, u32 val)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+
+ drm_dbg_kms(&i915->drm, "set backlight PWM = %d\n", val);
+ panel->backlight.pwm_funcs->set(conn_state, val);
+}
+
+u32 intel_panel_backlight_level_to_pwm(struct intel_connector *connector, u32 val)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+
+ drm_WARN_ON_ONCE(&dev_priv->drm,
+ panel->backlight.max == 0 || panel->backlight.pwm_level_max == 0);
+
+ val = scale(val, panel->backlight.min, panel->backlight.max,
+ panel->backlight.pwm_level_min, panel->backlight.pwm_level_max);
+
+ return intel_panel_invert_pwm_level(connector, val);
+}
+
+u32 intel_panel_backlight_level_from_pwm(struct intel_connector *connector, u32 val)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+
+ drm_WARN_ON_ONCE(&dev_priv->drm,
+ panel->backlight.max == 0 || panel->backlight.pwm_level_max == 0);
+
+ if (dev_priv->params.invert_brightness > 0 ||
+ (dev_priv->params.invert_brightness == 0 && dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS))
+ val = panel->backlight.pwm_level_max - (val - panel->backlight.pwm_level_min);
+
+ return scale(val, panel->backlight.pwm_level_min, panel->backlight.pwm_level_max,
+ panel->backlight.min, panel->backlight.max);
+}
+
+static u32 lpt_get_backlight(struct intel_connector *connector, enum pipe unused)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
return intel_de_read(dev_priv, BLC_PWM_PCH_CTL2) & BACKLIGHT_DUTY_CYCLE_MASK;
}
-static u32 pch_get_backlight(struct intel_connector *connector)
+static u32 pch_get_backlight(struct intel_connector *connector, enum pipe unused)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
return intel_de_read(dev_priv, BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
}
-static u32 i9xx_get_backlight(struct intel_connector *connector)
+static u32 i9xx_get_backlight(struct intel_connector *connector, enum pipe unused)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
@@ -564,23 +603,17 @@ static u32 i9xx_get_backlight(struct intel_connector *connector)
return val;
}
-static u32 _vlv_get_backlight(struct drm_i915_private *dev_priv, enum pipe pipe)
+static u32 vlv_get_backlight(struct intel_connector *connector, enum pipe pipe)
{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+
if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
return 0;
return intel_de_read(dev_priv, VLV_BLC_PWM_CTL(pipe)) & BACKLIGHT_DUTY_CYCLE_MASK;
}
-static u32 vlv_get_backlight(struct intel_connector *connector)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- enum pipe pipe = intel_connector_get_pipe(connector);
-
- return _vlv_get_backlight(dev_priv, pipe);
-}
-
-static u32 bxt_get_backlight(struct intel_connector *connector)
+static u32 bxt_get_backlight(struct intel_connector *connector, enum pipe unused)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
@@ -589,7 +622,7 @@ static u32 bxt_get_backlight(struct intel_connector *connector)
BXT_BLC_PWM_DUTY(panel->backlight.controller));
}
-static u32 pwm_get_backlight(struct intel_connector *connector)
+static u32 ext_pwm_get_backlight(struct intel_connector *connector, enum pipe unused)
{
struct intel_panel *panel = &connector->panel;
struct pwm_state state;
@@ -624,12 +657,12 @@ static void i9xx_set_backlight(const struct drm_connector_state *conn_state, u32
struct intel_panel *panel = &connector->panel;
u32 tmp, mask;
- drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0);
+ drm_WARN_ON(&dev_priv->drm, panel->backlight.pwm_level_max == 0);
if (panel->backlight.combination_mode) {
u8 lbpc;
- lbpc = level * 0xfe / panel->backlight.max + 1;
+ lbpc = level * 0xfe / panel->backlight.pwm_level_max + 1;
level /= lbpc;
pci_write_config_byte(dev_priv->drm.pdev, LBPC, lbpc);
}
@@ -666,7 +699,7 @@ static void bxt_set_backlight(const struct drm_connector_state *conn_state, u32
BXT_BLC_PWM_DUTY(panel->backlight.controller), level);
}
-static void pwm_set_backlight(const struct drm_connector_state *conn_state, u32 level)
+static void ext_pwm_set_backlight(const struct drm_connector_state *conn_state, u32 level)
{
struct intel_panel *panel = &to_intel_connector(conn_state->connector)->panel;
@@ -681,10 +714,9 @@ intel_panel_actually_set_backlight(const struct drm_connector_state *conn_state,
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- drm_dbg_kms(&i915->drm, "set backlight PWM = %d\n", level);
+ drm_dbg_kms(&i915->drm, "set backlight level = %d\n", level);
- level = intel_panel_compute_brightness(connector, level);
- panel->backlight.set(conn_state, level);
+ panel->backlight.funcs->set(conn_state, level);
}
/* set backlight brightness to level in range [0..max], assuming hw min is
@@ -726,13 +758,13 @@ void intel_panel_set_backlight_acpi(const struct drm_connector_state *conn_state
mutex_unlock(&dev_priv->backlight_lock);
}
-static void lpt_disable_backlight(const struct drm_connector_state *old_conn_state)
+static void lpt_disable_backlight(const struct drm_connector_state *old_conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
u32 tmp;
- intel_panel_actually_set_backlight(old_conn_state, 0);
+ intel_panel_set_pwm_level(old_conn_state, level);
/*
* Although we don't support or enable CPU PWM with LPT/SPT based
@@ -754,13 +786,13 @@ static void lpt_disable_backlight(const struct drm_connector_state *old_conn_sta
intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE);
}
-static void pch_disable_backlight(const struct drm_connector_state *old_conn_state)
+static void pch_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
{
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
u32 tmp;
- intel_panel_actually_set_backlight(old_conn_state, 0);
+ intel_panel_set_pwm_level(old_conn_state, val);
tmp = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, tmp & ~BLM_PWM_ENABLE);
@@ -769,44 +801,44 @@ static void pch_disable_backlight(const struct drm_connector_state *old_conn_sta
intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE);
}
-static void i9xx_disable_backlight(const struct drm_connector_state *old_conn_state)
+static void i9xx_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
{
- intel_panel_actually_set_backlight(old_conn_state, 0);
+ intel_panel_set_pwm_level(old_conn_state, val);
}
-static void i965_disable_backlight(const struct drm_connector_state *old_conn_state)
+static void i965_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
{
struct drm_i915_private *dev_priv = to_i915(old_conn_state->connector->dev);
u32 tmp;
- intel_panel_actually_set_backlight(old_conn_state, 0);
+ intel_panel_set_pwm_level(old_conn_state, val);
tmp = intel_de_read(dev_priv, BLC_PWM_CTL2);
intel_de_write(dev_priv, BLC_PWM_CTL2, tmp & ~BLM_PWM_ENABLE);
}
-static void vlv_disable_backlight(const struct drm_connector_state *old_conn_state)
+static void vlv_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
{
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
enum pipe pipe = to_intel_crtc(old_conn_state->crtc)->pipe;
u32 tmp;
- intel_panel_actually_set_backlight(old_conn_state, 0);
+ intel_panel_set_pwm_level(old_conn_state, val);
tmp = intel_de_read(dev_priv, VLV_BLC_PWM_CTL2(pipe));
intel_de_write(dev_priv, VLV_BLC_PWM_CTL2(pipe),
tmp & ~BLM_PWM_ENABLE);
}
-static void bxt_disable_backlight(const struct drm_connector_state *old_conn_state)
+static void bxt_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
{
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- u32 tmp, val;
+ u32 tmp;
- intel_panel_actually_set_backlight(old_conn_state, 0);
+ intel_panel_set_pwm_level(old_conn_state, val);
tmp = intel_de_read(dev_priv,
BXT_BLC_PWM_CTL(panel->backlight.controller));
@@ -820,14 +852,14 @@ static void bxt_disable_backlight(const struct drm_connector_state *old_conn_sta
}
}
-static void cnp_disable_backlight(const struct drm_connector_state *old_conn_state)
+static void cnp_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
{
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 tmp;
- intel_panel_actually_set_backlight(old_conn_state, 0);
+ intel_panel_set_pwm_level(old_conn_state, val);
tmp = intel_de_read(dev_priv,
BXT_BLC_PWM_CTL(panel->backlight.controller));
@@ -835,7 +867,7 @@ static void cnp_disable_backlight(const struct drm_connector_state *old_conn_sta
tmp & ~BXT_BLC_PWM_ENABLE);
}
-static void pwm_disable_backlight(const struct drm_connector_state *old_conn_state)
+static void ext_pwm_disable_backlight(const struct drm_connector_state *old_conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
struct intel_panel *panel = &connector->panel;
@@ -870,13 +902,13 @@ void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_st
if (panel->backlight.device)
panel->backlight.device->props.power = FB_BLANK_POWERDOWN;
panel->backlight.enabled = false;
- panel->backlight.disable(old_conn_state);
+ panel->backlight.funcs->disable(old_conn_state, 0);
mutex_unlock(&dev_priv->backlight_lock);
}
static void lpt_enable_backlight(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+ const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -906,7 +938,7 @@ static void lpt_enable_backlight(const struct intel_crtc_state *crtc_state,
intel_de_write(dev_priv, SOUTH_CHICKEN1, schicken);
}
- pch_ctl2 = panel->backlight.max << 16;
+ pch_ctl2 = panel->backlight.pwm_level_max << 16;
intel_de_write(dev_priv, BLC_PWM_PCH_CTL2, pch_ctl2);
pch_ctl1 = 0;
@@ -923,11 +955,11 @@ static void lpt_enable_backlight(const struct intel_crtc_state *crtc_state,
pch_ctl1 | BLM_PCH_PWM_ENABLE);
/* This won't stick until the above enable. */
- intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
+ intel_panel_set_pwm_level(conn_state, level);
}
static void pch_enable_backlight(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+ const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -958,9 +990,9 @@ static void pch_enable_backlight(const struct intel_crtc_state *crtc_state,
intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, cpu_ctl2 | BLM_PWM_ENABLE);
/* This won't stick until the above enable. */
- intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
+ intel_panel_set_pwm_level(conn_state, level);
- pch_ctl2 = panel->backlight.max << 16;
+ pch_ctl2 = panel->backlight.pwm_level_max << 16;
intel_de_write(dev_priv, BLC_PWM_PCH_CTL2, pch_ctl2);
pch_ctl1 = 0;
@@ -974,7 +1006,7 @@ static void pch_enable_backlight(const struct intel_crtc_state *crtc_state,
}
static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+ const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -987,7 +1019,7 @@ static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state,
intel_de_write(dev_priv, BLC_PWM_CTL, 0);
}
- freq = panel->backlight.max;
+ freq = panel->backlight.pwm_level_max;
if (panel->backlight.combination_mode)
freq /= 0xff;
@@ -1001,7 +1033,7 @@ static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state,
intel_de_posting_read(dev_priv, BLC_PWM_CTL);
/* XXX: combine this into above write? */
- intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
+ intel_panel_set_pwm_level(conn_state, level);
/*
* Needed to enable backlight on some 855gm models. BLC_HIST_CTL is
@@ -1013,7 +1045,7 @@ static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state,
}
static void i965_enable_backlight(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+ const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -1028,7 +1060,7 @@ static void i965_enable_backlight(const struct intel_crtc_state *crtc_state,
intel_de_write(dev_priv, BLC_PWM_CTL2, ctl2);
}
- freq = panel->backlight.max;
+ freq = panel->backlight.pwm_level_max;
if (panel->backlight.combination_mode)
freq /= 0xff;
@@ -1044,11 +1076,11 @@ static void i965_enable_backlight(const struct intel_crtc_state *crtc_state,
intel_de_posting_read(dev_priv, BLC_PWM_CTL2);
intel_de_write(dev_priv, BLC_PWM_CTL2, ctl2 | BLM_PWM_ENABLE);
- intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
+ intel_panel_set_pwm_level(conn_state, level);
}
static void vlv_enable_backlight(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+ const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -1063,11 +1095,11 @@ static void vlv_enable_backlight(const struct intel_crtc_state *crtc_state,
intel_de_write(dev_priv, VLV_BLC_PWM_CTL2(pipe), ctl2);
}
- ctl = panel->backlight.max << 16;
+ ctl = panel->backlight.pwm_level_max << 16;
intel_de_write(dev_priv, VLV_BLC_PWM_CTL(pipe), ctl);
/* XXX: combine this into above write? */
- intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
+ intel_panel_set_pwm_level(conn_state, level);
ctl2 = 0;
if (panel->backlight.active_low_pwm)
@@ -1079,7 +1111,7 @@ static void vlv_enable_backlight(const struct intel_crtc_state *crtc_state,
}
static void bxt_enable_backlight(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+ const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -1116,9 +1148,9 @@ static void bxt_enable_backlight(const struct intel_crtc_state *crtc_state,
intel_de_write(dev_priv,
BXT_BLC_PWM_FREQ(panel->backlight.controller),
- panel->backlight.max);
+ panel->backlight.pwm_level_max);
- intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
+ intel_panel_set_pwm_level(conn_state, level);
pwm_ctl = 0;
if (panel->backlight.active_low_pwm)
@@ -1133,7 +1165,7 @@ static void bxt_enable_backlight(const struct intel_crtc_state *crtc_state,
}
static void cnp_enable_backlight(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+ const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -1152,9 +1184,9 @@ static void cnp_enable_backlight(const struct intel_crtc_state *crtc_state,
intel_de_write(dev_priv,
BXT_BLC_PWM_FREQ(panel->backlight.controller),
- panel->backlight.max);
+ panel->backlight.pwm_level_max);
- intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
+ intel_panel_set_pwm_level(conn_state, level);
pwm_ctl = 0;
if (panel->backlight.active_low_pwm)
@@ -1168,14 +1200,12 @@ static void cnp_enable_backlight(const struct intel_crtc_state *crtc_state,
pwm_ctl | BXT_BLC_PWM_ENABLE);
}
-static void pwm_enable_backlight(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+static void ext_pwm_enable_backlight(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct intel_panel *panel = &connector->panel;
- int level = panel->backlight.level;
- level = intel_panel_compute_brightness(connector, level);
pwm_set_relative_duty_cycle(&panel->backlight.pwm_state, level, 100);
panel->backlight.pwm_state.enabled = true;
pwm_apply_state(panel->backlight.pwm, &panel->backlight.pwm_state);
@@ -1198,7 +1228,7 @@ static void __intel_panel_enable_backlight(const struct intel_crtc_state *crtc_s
panel->backlight.device->props.max_brightness);
}
- panel->backlight.enable(crtc_state, conn_state);
+ panel->backlight.funcs->enable(crtc_state, conn_state, panel->backlight.level);
panel->backlight.enabled = true;
if (panel->backlight.device)
panel->backlight.device->props.power = FB_BLANK_UNBLANK;
@@ -1233,10 +1263,8 @@ static u32 intel_panel_get_backlight(struct intel_connector *connector)
mutex_lock(&dev_priv->backlight_lock);
- if (panel->backlight.enabled) {
- val = panel->backlight.get(connector);
- val = intel_panel_compute_brightness(connector, val);
- }
+ if (panel->backlight.enabled)
+ val = panel->backlight.funcs->get(connector, intel_connector_get_pipe(connector));
mutex_unlock(&dev_priv->backlight_lock);
@@ -1567,13 +1595,13 @@ static u32 get_backlight_max_vbt(struct intel_connector *connector)
u16 pwm_freq_hz = get_vbt_pwm_freq(dev_priv);
u32 pwm;
- if (!panel->backlight.hz_to_pwm) {
+ if (!panel->backlight.pwm_funcs->hz_to_pwm) {
drm_dbg_kms(&dev_priv->drm,
"backlight frequency conversion not supported\n");
return 0;
}
- pwm = panel->backlight.hz_to_pwm(connector, pwm_freq_hz);
+ pwm = panel->backlight.pwm_funcs->hz_to_pwm(connector, pwm_freq_hz);
if (!pwm) {
drm_dbg_kms(&dev_priv->drm,
"backlight frequency conversion failed\n");
@@ -1592,7 +1620,7 @@ static u32 get_backlight_min_vbt(struct intel_connector *connector)
struct intel_panel *panel = &connector->panel;
int min;
- drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0);
+ drm_WARN_ON(&dev_priv->drm, panel->backlight.pwm_level_max == 0);
/*
* XXX: If the vbt value is 255, it makes min equal to max, which leads
@@ -1609,7 +1637,7 @@ static u32 get_backlight_min_vbt(struct intel_connector *connector)
}
/* vbt value is a coefficient in range [0..255] */
- return scale(min, 0, 255, 0, panel->backlight.max);
+ return scale(min, 0, 255, 0, panel->backlight.pwm_level_max);
}
static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unused)
@@ -1629,29 +1657,27 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY;
pch_ctl2 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL2);
- panel->backlight.max = pch_ctl2 >> 16;
+ panel->backlight.pwm_level_max = pch_ctl2 >> 16;
cpu_ctl2 = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
- if (!panel->backlight.max)
- panel->backlight.max = get_backlight_max_vbt(connector);
+ if (!panel->backlight.pwm_level_max)
+ panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
- if (!panel->backlight.max)
+ if (!panel->backlight.pwm_level_max)
return -ENODEV;
- panel->backlight.min = get_backlight_min_vbt(connector);
+ panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
- panel->backlight.enabled = pch_ctl1 & BLM_PCH_PWM_ENABLE;
+ panel->backlight.pwm_enabled = pch_ctl1 & BLM_PCH_PWM_ENABLE;
- cpu_mode = panel->backlight.enabled && HAS_PCH_LPT(dev_priv) &&
+ cpu_mode = panel->backlight.pwm_enabled && HAS_PCH_LPT(dev_priv) &&
!(pch_ctl1 & BLM_PCH_OVERRIDE_ENABLE) &&
(cpu_ctl2 & BLM_PWM_ENABLE);
- if (cpu_mode)
- val = pch_get_backlight(connector);
- else
- val = lpt_get_backlight(connector);
if (cpu_mode) {
+ val = pch_get_backlight(connector, unused);
+
drm_dbg_kms(&dev_priv->drm,
"CPU backlight register was enabled, switching to PCH override\n");
@@ -1664,10 +1690,6 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
cpu_ctl2 & ~BLM_PWM_ENABLE);
}
- val = intel_panel_compute_brightness(connector, val);
- panel->backlight.level = clamp(val, panel->backlight.min,
- panel->backlight.max);
-
return 0;
}
@@ -1675,29 +1697,24 @@ static int pch_setup_backlight(struct intel_connector *connector, enum pipe unus
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- u32 cpu_ctl2, pch_ctl1, pch_ctl2, val;
+ u32 cpu_ctl2, pch_ctl1, pch_ctl2;
pch_ctl1 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY;
pch_ctl2 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL2);
- panel->backlight.max = pch_ctl2 >> 16;
+ panel->backlight.pwm_level_max = pch_ctl2 >> 16;
- if (!panel->backlight.max)
- panel->backlight.max = get_backlight_max_vbt(connector);
+ if (!panel->backlight.pwm_level_max)
+ panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
- if (!panel->backlight.max)
+ if (!panel->backlight.pwm_level_max)
return -ENODEV;
- panel->backlight.min = get_backlight_min_vbt(connector);
-
- val = pch_get_backlight(connector);
- val = intel_panel_compute_brightness(connector, val);
- panel->backlight.level = clamp(val, panel->backlight.min,
- panel->backlight.max);
+ panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
cpu_ctl2 = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
- panel->backlight.enabled = (cpu_ctl2 & BLM_PWM_ENABLE) &&
+ panel->backlight.pwm_enabled = (cpu_ctl2 & BLM_PWM_ENABLE) &&
(pch_ctl1 & BLM_PCH_PWM_ENABLE);
return 0;
@@ -1717,27 +1734,26 @@ static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unu
if (IS_PINEVIEW(dev_priv))
panel->backlight.active_low_pwm = ctl & BLM_POLARITY_PNV;
- panel->backlight.max = ctl >> 17;
+ panel->backlight.pwm_level_max = ctl >> 17;
- if (!panel->backlight.max) {
- panel->backlight.max = get_backlight_max_vbt(connector);
- panel->backlight.max >>= 1;
+ if (!panel->backlight.pwm_level_max) {
+ panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
+ panel->backlight.pwm_level_max >>= 1;
}
- if (!panel->backlight.max)
+ if (!panel->backlight.pwm_level_max)
return -ENODEV;
if (panel->backlight.combination_mode)
- panel->backlight.max *= 0xff;
+ panel->backlight.pwm_level_max *= 0xff;
- panel->backlight.min = get_backlight_min_vbt(connector);
+ panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
- val = i9xx_get_backlight(connector);
- val = intel_panel_compute_brightness(connector, val);
- panel->backlight.level = clamp(val, panel->backlight.min,
- panel->backlight.max);
+ val = i9xx_get_backlight(connector, unused);
+ val = intel_panel_invert_pwm_level(connector, val);
+ val = clamp(val, panel->backlight.pwm_level_min, panel->backlight.pwm_level_max);
- panel->backlight.enabled = val != 0;
+ panel->backlight.pwm_enabled = val != 0;
return 0;
}
@@ -1746,32 +1762,27 @@ static int i965_setup_backlight(struct intel_connector *connector, enum pipe unu
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- u32 ctl, ctl2, val;
+ u32 ctl, ctl2;
ctl2 = intel_de_read(dev_priv, BLC_PWM_CTL2);
panel->backlight.combination_mode = ctl2 & BLM_COMBINATION_MODE;
panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965;
ctl = intel_de_read(dev_priv, BLC_PWM_CTL);
- panel->backlight.max = ctl >> 16;
+ panel->backlight.pwm_level_max = ctl >> 16;
- if (!panel->backlight.max)
- panel->backlight.max = get_backlight_max_vbt(connector);
+ if (!panel->backlight.pwm_level_max)
+ panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
- if (!panel->backlight.max)
+ if (!panel->backlight.pwm_level_max)
return -ENODEV;
if (panel->backlight.combination_mode)
- panel->backlight.max *= 0xff;
+ panel->backlight.pwm_level_max *= 0xff;
- panel->backlight.min = get_backlight_min_vbt(connector);
+ panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
- val = i9xx_get_backlight(connector);
- val = intel_panel_compute_brightness(connector, val);
- panel->backlight.level = clamp(val, panel->backlight.min,
- panel->backlight.max);
-
- panel->backlight.enabled = ctl2 & BLM_PWM_ENABLE;
+ panel->backlight.pwm_enabled = ctl2 & BLM_PWM_ENABLE;
return 0;
}
@@ -1780,7 +1791,7 @@ static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- u32 ctl, ctl2, val;
+ u32 ctl, ctl2;
if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
return -ENODEV;
@@ -1789,22 +1800,17 @@ static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe
panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965;
ctl = intel_de_read(dev_priv, VLV_BLC_PWM_CTL(pipe));
- panel->backlight.max = ctl >> 16;
+ panel->backlight.pwm_level_max = ctl >> 16;
- if (!panel->backlight.max)
- panel->backlight.max = get_backlight_max_vbt(connector);
+ if (!panel->backlight.pwm_level_max)
+ panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
- if (!panel->backlight.max)
+ if (!panel->backlight.pwm_level_max)
return -ENODEV;
- panel->backlight.min = get_backlight_min_vbt(connector);
-
- val = _vlv_get_backlight(dev_priv, pipe);
- val = intel_panel_compute_brightness(connector, val);
- panel->backlight.level = clamp(val, panel->backlight.min,
- panel->backlight.max);
+ panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
- panel->backlight.enabled = ctl2 & BLM_PWM_ENABLE;
+ panel->backlight.pwm_enabled = ctl2 & BLM_PWM_ENABLE;
return 0;
}
@@ -1829,24 +1835,18 @@ bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
}
panel->backlight.active_low_pwm = pwm_ctl & BXT_BLC_PWM_POLARITY;
- panel->backlight.max =
- intel_de_read(dev_priv,
- BXT_BLC_PWM_FREQ(panel->backlight.controller));
+ panel->backlight.pwm_level_max =
+ intel_de_read(dev_priv, BXT_BLC_PWM_FREQ(panel->backlight.controller));
- if (!panel->backlight.max)
- panel->backlight.max = get_backlight_max_vbt(connector);
+ if (!panel->backlight.pwm_level_max)
+ panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
- if (!panel->backlight.max)
+ if (!panel->backlight.pwm_level_max)
return -ENODEV;
- panel->backlight.min = get_backlight_min_vbt(connector);
+ panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
- val = bxt_get_backlight(connector);
- val = intel_panel_compute_brightness(connector, val);
- panel->backlight.level = clamp(val, panel->backlight.min,
- panel->backlight.max);
-
- panel->backlight.enabled = pwm_ctl & BXT_BLC_PWM_ENABLE;
+ panel->backlight.pwm_enabled = pwm_ctl & BXT_BLC_PWM_ENABLE;
return 0;
}
@@ -1856,7 +1856,7 @@ cnp_setup_backlight(struct intel_connector *connector, enum pipe unused)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- u32 pwm_ctl, val;
+ u32 pwm_ctl;
/*
* CNP has the BXT implementation of backlight, but with only one
@@ -1869,30 +1869,24 @@ cnp_setup_backlight(struct intel_connector *connector, enum pipe unused)
BXT_BLC_PWM_CTL(panel->backlight.controller));
panel->backlight.active_low_pwm = pwm_ctl & BXT_BLC_PWM_POLARITY;
- panel->backlight.max =
- intel_de_read(dev_priv,
- BXT_BLC_PWM_FREQ(panel->backlight.controller));
+ panel->backlight.pwm_level_max =
+ intel_de_read(dev_priv, BXT_BLC_PWM_FREQ(panel->backlight.controller));
- if (!panel->backlight.max)
- panel->backlight.max = get_backlight_max_vbt(connector);
+ if (!panel->backlight.pwm_level_max)
+ panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
- if (!panel->backlight.max)
+ if (!panel->backlight.pwm_level_max)
return -ENODEV;
- panel->backlight.min = get_backlight_min_vbt(connector);
-
- val = bxt_get_backlight(connector);
- val = intel_panel_compute_brightness(connector, val);
- panel->backlight.level = clamp(val, panel->backlight.min,
- panel->backlight.max);
+ panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
- panel->backlight.enabled = pwm_ctl & BXT_BLC_PWM_ENABLE;
+ panel->backlight.pwm_enabled = pwm_ctl & BXT_BLC_PWM_ENABLE;
return 0;
}
-static int pwm_setup_backlight(struct intel_connector *connector,
- enum pipe pipe)
+static int ext_pwm_setup_backlight(struct intel_connector *connector,
+ enum pipe pipe)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -1916,8 +1910,8 @@ static int pwm_setup_backlight(struct intel_connector *connector,
return -ENODEV;
}
- panel->backlight.max = 100; /* 100% */
- panel->backlight.min = get_backlight_min_vbt(connector);
+ panel->backlight.pwm_level_max = 100; /* 100% */
+ panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
if (pwm_is_enabled(panel->backlight.pwm)) {
/* PWM is already enabled, use existing settings */
@@ -1925,10 +1919,8 @@ static int pwm_setup_backlight(struct intel_connector *connector,
level = pwm_get_relative_duty_cycle(&panel->backlight.pwm_state,
100);
- level = intel_panel_compute_brightness(connector, level);
- panel->backlight.level = clamp(level, panel->backlight.min,
- panel->backlight.max);
- panel->backlight.enabled = true;
+ level = intel_panel_invert_pwm_level(connector, level);
+ panel->backlight.pwm_enabled = true;
drm_dbg_kms(&dev_priv->drm, "PWM already enabled at freq %ld, VBT freq %d, level %d\n",
NSEC_PER_SEC / (unsigned long)panel->backlight.pwm_state.period,
@@ -1944,6 +1936,58 @@ static int pwm_setup_backlight(struct intel_connector *connector,
return 0;
}
+static void intel_pwm_set_backlight(const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct intel_panel *panel = &connector->panel;
+
+ panel->backlight.pwm_funcs->set(conn_state,
+ intel_panel_invert_pwm_level(connector, level));
+}
+
+static u32 intel_pwm_get_backlight(struct intel_connector *connector, enum pipe pipe)
+{
+ struct intel_panel *panel = &connector->panel;
+
+ return intel_panel_invert_pwm_level(connector,
+ panel->backlight.pwm_funcs->get(connector, pipe));
+}
+
+static void intel_pwm_enable_backlight(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct intel_panel *panel = &connector->panel;
+
+ panel->backlight.pwm_funcs->enable(crtc_state, conn_state,
+ intel_panel_invert_pwm_level(connector, level));
+}
+
+static void intel_pwm_disable_backlight(const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct intel_panel *panel = &connector->panel;
+
+ panel->backlight.pwm_funcs->disable(conn_state,
+ intel_panel_invert_pwm_level(connector, level));
+}
+
+static int intel_pwm_setup_backlight(struct intel_connector *connector, enum pipe pipe)
+{
+ struct intel_panel *panel = &connector->panel;
+ int ret = panel->backlight.pwm_funcs->setup(connector, pipe);
+
+ if (ret < 0)
+ return ret;
+
+ panel->backlight.min = panel->backlight.pwm_level_min;
+ panel->backlight.max = panel->backlight.pwm_level_max;
+ panel->backlight.level = intel_pwm_get_backlight(connector, pipe);
+ panel->backlight.enabled = panel->backlight.pwm_enabled;
+
+ return 0;
+}
+
void intel_panel_update_backlight(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
@@ -1982,12 +2026,12 @@ int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe)
}
/* ensure intel_panel has been initialized first */
- if (drm_WARN_ON(&dev_priv->drm, !panel->backlight.setup))
+ if (drm_WARN_ON(&dev_priv->drm, !panel->backlight.funcs))
return -ENODEV;
/* set level and max in panel struct */
mutex_lock(&dev_priv->backlight_lock);
- ret = panel->backlight.setup(intel_connector, pipe);
+ ret = panel->backlight.funcs->setup(intel_connector, pipe);
mutex_unlock(&dev_priv->backlight_lock);
if (ret) {
@@ -2017,6 +2061,94 @@ static void intel_panel_destroy_backlight(struct intel_panel *panel)
panel->backlight.present = false;
}
+static const struct intel_panel_bl_funcs bxt_pwm_funcs = {
+ .setup = bxt_setup_backlight,
+ .enable = bxt_enable_backlight,
+ .disable = bxt_disable_backlight,
+ .set = bxt_set_backlight,
+ .get = bxt_get_backlight,
+ .hz_to_pwm = bxt_hz_to_pwm,
+};
+
+static const struct intel_panel_bl_funcs cnp_pwm_funcs = {
+ .setup = cnp_setup_backlight,
+ .enable = cnp_enable_backlight,
+ .disable = cnp_disable_backlight,
+ .set = bxt_set_backlight,
+ .get = bxt_get_backlight,
+ .hz_to_pwm = cnp_hz_to_pwm,
+};
+
+static const struct intel_panel_bl_funcs lpt_pwm_funcs = {
+ .setup = lpt_setup_backlight,
+ .enable = lpt_enable_backlight,
+ .disable = lpt_disable_backlight,
+ .set = lpt_set_backlight,
+ .get = lpt_get_backlight,
+ .hz_to_pwm = lpt_hz_to_pwm,
+};
+
+static const struct intel_panel_bl_funcs spt_pwm_funcs = {
+ .setup = lpt_setup_backlight,
+ .enable = lpt_enable_backlight,
+ .disable = lpt_disable_backlight,
+ .set = lpt_set_backlight,
+ .get = lpt_get_backlight,
+ .hz_to_pwm = spt_hz_to_pwm,
+};
+
+static const struct intel_panel_bl_funcs pch_pwm_funcs = {
+ .setup = pch_setup_backlight,
+ .enable = pch_enable_backlight,
+ .disable = pch_disable_backlight,
+ .set = pch_set_backlight,
+ .get = pch_get_backlight,
+ .hz_to_pwm = pch_hz_to_pwm,
+};
+
+static const struct intel_panel_bl_funcs ext_pwm_funcs = {
+ .setup = ext_pwm_setup_backlight,
+ .enable = ext_pwm_enable_backlight,
+ .disable = ext_pwm_disable_backlight,
+ .set = ext_pwm_set_backlight,
+ .get = ext_pwm_get_backlight,
+};
+
+static const struct intel_panel_bl_funcs vlv_pwm_funcs = {
+ .setup = vlv_setup_backlight,
+ .enable = vlv_enable_backlight,
+ .disable = vlv_disable_backlight,
+ .set = vlv_set_backlight,
+ .get = vlv_get_backlight,
+ .hz_to_pwm = vlv_hz_to_pwm,
+};
+
+static const struct intel_panel_bl_funcs i965_pwm_funcs = {
+ .setup = i965_setup_backlight,
+ .enable = i965_enable_backlight,
+ .disable = i965_disable_backlight,
+ .set = i9xx_set_backlight,
+ .get = i9xx_get_backlight,
+ .hz_to_pwm = i965_hz_to_pwm,
+};
+
+static const struct intel_panel_bl_funcs i9xx_pwm_funcs = {
+ .setup = i9xx_setup_backlight,
+ .enable = i9xx_enable_backlight,
+ .disable = i9xx_disable_backlight,
+ .set = i9xx_set_backlight,
+ .get = i9xx_get_backlight,
+ .hz_to_pwm = i9xx_hz_to_pwm,
+};
+
+static const struct intel_panel_bl_funcs pwm_bl_funcs = {
+ .setup = intel_pwm_setup_backlight,
+ .enable = intel_pwm_enable_backlight,
+ .disable = intel_pwm_disable_backlight,
+ .set = intel_pwm_set_backlight,
+ .get = intel_pwm_get_backlight,
+};
+
/* Set up chip specific backlight functions */
static void
intel_panel_init_backlight_funcs(struct intel_panel *panel)
@@ -2025,75 +2157,39 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel)
container_of(panel, struct intel_connector, panel);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP &&
- intel_dp_aux_init_backlight_funcs(connector) == 0)
- return;
-
if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI &&
intel_dsi_dcs_init_backlight_funcs(connector) == 0)
return;
if (IS_GEN9_LP(dev_priv)) {
- panel->backlight.setup = bxt_setup_backlight;
- panel->backlight.enable = bxt_enable_backlight;
- panel->backlight.disable = bxt_disable_backlight;
- panel->backlight.set = bxt_set_backlight;
- panel->backlight.get = bxt_get_backlight;
- panel->backlight.hz_to_pwm = bxt_hz_to_pwm;
+ panel->backlight.pwm_funcs = &bxt_pwm_funcs;
} else if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) {
- panel->backlight.setup = cnp_setup_backlight;
- panel->backlight.enable = cnp_enable_backlight;
- panel->backlight.disable = cnp_disable_backlight;
- panel->backlight.set = bxt_set_backlight;
- panel->backlight.get = bxt_get_backlight;
- panel->backlight.hz_to_pwm = cnp_hz_to_pwm;
+ panel->backlight.pwm_funcs = &cnp_pwm_funcs;
} else if (INTEL_PCH_TYPE(dev_priv) >= PCH_LPT) {
- panel->backlight.setup = lpt_setup_backlight;
- panel->backlight.enable = lpt_enable_backlight;
- panel->backlight.disable = lpt_disable_backlight;
- panel->backlight.set = lpt_set_backlight;
- panel->backlight.get = lpt_get_backlight;
if (HAS_PCH_LPT(dev_priv))
- panel->backlight.hz_to_pwm = lpt_hz_to_pwm;
+ panel->backlight.pwm_funcs = &lpt_pwm_funcs;
else
- panel->backlight.hz_to_pwm = spt_hz_to_pwm;
+ panel->backlight.pwm_funcs = &spt_pwm_funcs;
} else if (HAS_PCH_SPLIT(dev_priv)) {
- panel->backlight.setup = pch_setup_backlight;
- panel->backlight.enable = pch_enable_backlight;
- panel->backlight.disable = pch_disable_backlight;
- panel->backlight.set = pch_set_backlight;
- panel->backlight.get = pch_get_backlight;
- panel->backlight.hz_to_pwm = pch_hz_to_pwm;
+ panel->backlight.pwm_funcs = &pch_pwm_funcs;
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI) {
- panel->backlight.setup = pwm_setup_backlight;
- panel->backlight.enable = pwm_enable_backlight;
- panel->backlight.disable = pwm_disable_backlight;
- panel->backlight.set = pwm_set_backlight;
- panel->backlight.get = pwm_get_backlight;
+ panel->backlight.pwm_funcs = &ext_pwm_funcs;
} else {
- panel->backlight.setup = vlv_setup_backlight;
- panel->backlight.enable = vlv_enable_backlight;
- panel->backlight.disable = vlv_disable_backlight;
- panel->backlight.set = vlv_set_backlight;
- panel->backlight.get = vlv_get_backlight;
- panel->backlight.hz_to_pwm = vlv_hz_to_pwm;
+ panel->backlight.pwm_funcs = &vlv_pwm_funcs;
}
} else if (IS_GEN(dev_priv, 4)) {
- panel->backlight.setup = i965_setup_backlight;
- panel->backlight.enable = i965_enable_backlight;
- panel->backlight.disable = i965_disable_backlight;
- panel->backlight.set = i9xx_set_backlight;
- panel->backlight.get = i9xx_get_backlight;
- panel->backlight.hz_to_pwm = i965_hz_to_pwm;
+ panel->backlight.pwm_funcs = &i965_pwm_funcs;
} else {
- panel->backlight.setup = i9xx_setup_backlight;
- panel->backlight.enable = i9xx_enable_backlight;
- panel->backlight.disable = i9xx_disable_backlight;
- panel->backlight.set = i9xx_set_backlight;
- panel->backlight.get = i9xx_get_backlight;
- panel->backlight.hz_to_pwm = i9xx_hz_to_pwm;
+ panel->backlight.pwm_funcs = &i9xx_pwm_funcs;
}
+
+ if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP &&
+ intel_dp_aux_init_backlight_funcs(connector) == 0)
+ return;
+
+ /* We're using a standard PWM backlight interface */
+ panel->backlight.funcs = &pwm_bl_funcs;
}
enum drm_connector_status
diff --git a/drivers/gpu/drm/i915/display/intel_panel.h b/drivers/gpu/drm/i915/display/intel_panel.h
index 5b813fe90557..1d340f77bffc 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.h
+++ b/drivers/gpu/drm/i915/display/intel_panel.h
@@ -49,6 +49,10 @@ struct drm_display_mode *
intel_panel_edid_fixed_mode(struct intel_connector *connector);
struct drm_display_mode *
intel_panel_vbt_fixed_mode(struct intel_connector *connector);
+void intel_panel_set_pwm_level(const struct drm_connector_state *conn_state, u32 level);
+u32 intel_panel_invert_pwm_level(struct intel_connector *connector, u32 level);
+u32 intel_panel_backlight_level_to_pwm(struct intel_connector *connector, u32 level);
+u32 intel_panel_backlight_level_from_pwm(struct intel_connector *connector, u32 val);
#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
int intel_backlight_device_register(struct intel_connector *connector);
diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c
new file mode 100644
index 000000000000..c4867a8020a5
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_pps.c
@@ -0,0 +1,1406 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "intel_display_types.h"
+#include "intel_dp.h"
+#include "intel_pps.h"
+
+static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
+ enum pipe pipe);
+
+static void pps_init_delays(struct intel_dp *intel_dp);
+static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd);
+
+intel_wakeref_t intel_pps_lock(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ intel_wakeref_t wakeref;
+
+ /*
+ * See intel_pps_reset_all() why we need a power domain reference here.
+ */
+ wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DISPLAY_CORE);
+ mutex_lock(&dev_priv->pps_mutex);
+
+ return wakeref;
+}
+
+intel_wakeref_t intel_pps_unlock(struct intel_dp *intel_dp,
+ intel_wakeref_t wakeref)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ mutex_unlock(&dev_priv->pps_mutex);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
+
+ return 0;
+}
+
+static void
+vlv_power_sequencer_kick(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum pipe pipe = intel_dp->pps.pps_pipe;
+ bool pll_enabled, release_cl_override = false;
+ enum dpio_phy phy = DPIO_PHY(pipe);
+ enum dpio_channel ch = vlv_pipe_to_channel(pipe);
+ u32 DP;
+
+ if (drm_WARN(&dev_priv->drm,
+ intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN,
+ "skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n",
+ pipe_name(pipe), dig_port->base.base.base.id,
+ dig_port->base.base.name))
+ return;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "kicking pipe %c power sequencer for [ENCODER:%d:%s]\n",
+ pipe_name(pipe), dig_port->base.base.base.id,
+ dig_port->base.base.name);
+
+ /* Preserve the BIOS-computed detected bit. This is
+ * supposed to be read-only.
+ */
+ DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED;
+ DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
+ DP |= DP_PORT_WIDTH(1);
+ DP |= DP_LINK_TRAIN_PAT_1;
+
+ if (IS_CHERRYVIEW(dev_priv))
+ DP |= DP_PIPE_SEL_CHV(pipe);
+ else
+ DP |= DP_PIPE_SEL(pipe);
+
+ pll_enabled = intel_de_read(dev_priv, DPLL(pipe)) & DPLL_VCO_ENABLE;
+
+ /*
+ * The DPLL for the pipe must be enabled for this to work.
+ * So enable temporarily it if it's not already enabled.
+ */
+ if (!pll_enabled) {
+ release_cl_override = IS_CHERRYVIEW(dev_priv) &&
+ !chv_phy_powergate_ch(dev_priv, phy, ch, true);
+
+ if (vlv_force_pll_on(dev_priv, pipe, vlv_get_dpll(dev_priv))) {
+ drm_err(&dev_priv->drm,
+ "Failed to force on pll for pipe %c!\n",
+ pipe_name(pipe));
+ return;
+ }
+ }
+
+ /*
+ * Similar magic as in intel_dp_enable_port().
+ * We _must_ do this port enable + disable trick
+ * to make this power sequencer lock onto the port.
+ * Otherwise even VDD force bit won't work.
+ */
+ intel_de_write(dev_priv, intel_dp->output_reg, DP);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
+
+ intel_de_write(dev_priv, intel_dp->output_reg, DP | DP_PORT_EN);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
+
+ intel_de_write(dev_priv, intel_dp->output_reg, DP & ~DP_PORT_EN);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
+
+ if (!pll_enabled) {
+ vlv_force_pll_off(dev_priv, pipe);
+
+ if (release_cl_override)
+ chv_phy_powergate_ch(dev_priv, phy, ch, false);
+ }
+}
+
+static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
+{
+ struct intel_encoder *encoder;
+ unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
+
+ /*
+ * We don't have power sequencer currently.
+ * Pick one that's not used by other ports.
+ */
+ for_each_intel_dp(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ if (encoder->type == INTEL_OUTPUT_EDP) {
+ drm_WARN_ON(&dev_priv->drm,
+ intel_dp->pps.active_pipe != INVALID_PIPE &&
+ intel_dp->pps.active_pipe !=
+ intel_dp->pps.pps_pipe);
+
+ if (intel_dp->pps.pps_pipe != INVALID_PIPE)
+ pipes &= ~(1 << intel_dp->pps.pps_pipe);
+ } else {
+ drm_WARN_ON(&dev_priv->drm,
+ intel_dp->pps.pps_pipe != INVALID_PIPE);
+
+ if (intel_dp->pps.active_pipe != INVALID_PIPE)
+ pipes &= ~(1 << intel_dp->pps.active_pipe);
+ }
+ }
+
+ if (pipes == 0)
+ return INVALID_PIPE;
+
+ return ffs(pipes) - 1;
+}
+
+static enum pipe
+vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum pipe pipe;
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ /* We should never land here with regular DP ports */
+ drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
+
+ drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE &&
+ intel_dp->pps.active_pipe != intel_dp->pps.pps_pipe);
+
+ if (intel_dp->pps.pps_pipe != INVALID_PIPE)
+ return intel_dp->pps.pps_pipe;
+
+ pipe = vlv_find_free_pps(dev_priv);
+
+ /*
+ * Didn't find one. This should not happen since there
+ * are two power sequencers and up to two eDP ports.
+ */
+ if (drm_WARN_ON(&dev_priv->drm, pipe == INVALID_PIPE))
+ pipe = PIPE_A;
+
+ vlv_steal_power_sequencer(dev_priv, pipe);
+ intel_dp->pps.pps_pipe = pipe;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "picked pipe %c power sequencer for [ENCODER:%d:%s]\n",
+ pipe_name(intel_dp->pps.pps_pipe),
+ dig_port->base.base.base.id,
+ dig_port->base.base.name);
+
+ /* init power sequencer on this pipe and port */
+ pps_init_delays(intel_dp);
+ pps_init_registers(intel_dp, true);
+
+ /*
+ * Even vdd force doesn't work until we've made
+ * the power sequencer lock in on the port.
+ */
+ vlv_power_sequencer_kick(intel_dp);
+
+ return intel_dp->pps.pps_pipe;
+}
+
+static int
+bxt_power_sequencer_idx(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ int backlight_controller = dev_priv->vbt.backlight.controller;
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ /* We should never land here with regular DP ports */
+ drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
+
+ if (!intel_dp->pps.pps_reset)
+ return backlight_controller;
+
+ intel_dp->pps.pps_reset = false;
+
+ /*
+ * Only the HW needs to be reprogrammed, the SW state is fixed and
+ * has been setup during connector init.
+ */
+ pps_init_registers(intel_dp, false);
+
+ return backlight_controller;
+}
+
+typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
+ enum pipe pipe);
+
+static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ return intel_de_read(dev_priv, PP_STATUS(pipe)) & PP_ON;
+}
+
+static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ return intel_de_read(dev_priv, PP_CONTROL(pipe)) & EDP_FORCE_VDD;
+}
+
+static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ return true;
+}
+
+static enum pipe
+vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
+ enum port port,
+ vlv_pipe_check pipe_check)
+{
+ enum pipe pipe;
+
+ for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
+ u32 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(pipe)) &
+ PANEL_PORT_SELECT_MASK;
+
+ if (port_sel != PANEL_PORT_SELECT_VLV(port))
+ continue;
+
+ if (!pipe_check(dev_priv, pipe))
+ continue;
+
+ return pipe;
+ }
+
+ return INVALID_PIPE;
+}
+
+static void
+vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum port port = dig_port->base.port;
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ /* try to find a pipe with this port selected */
+ /* first pick one where the panel is on */
+ intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
+ vlv_pipe_has_pp_on);
+ /* didn't find one? pick one where vdd is on */
+ if (intel_dp->pps.pps_pipe == INVALID_PIPE)
+ intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
+ vlv_pipe_has_vdd_on);
+ /* didn't find one? pick one with just the correct port */
+ if (intel_dp->pps.pps_pipe == INVALID_PIPE)
+ intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
+ vlv_pipe_any);
+
+ /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
+ if (intel_dp->pps.pps_pipe == INVALID_PIPE) {
+ drm_dbg_kms(&dev_priv->drm,
+ "no initial power sequencer for [ENCODER:%d:%s]\n",
+ dig_port->base.base.base.id,
+ dig_port->base.base.name);
+ return;
+ }
+
+ drm_dbg_kms(&dev_priv->drm,
+ "initial power sequencer for [ENCODER:%d:%s]: pipe %c\n",
+ dig_port->base.base.base.id,
+ dig_port->base.base.name,
+ pipe_name(intel_dp->pps.pps_pipe));
+}
+
+void intel_pps_reset_all(struct drm_i915_private *dev_priv)
+{
+ struct intel_encoder *encoder;
+
+ if (drm_WARN_ON(&dev_priv->drm,
+ !(IS_VALLEYVIEW(dev_priv) ||
+ IS_CHERRYVIEW(dev_priv) ||
+ IS_GEN9_LP(dev_priv))))
+ return;
+
+ /*
+ * We can't grab pps_mutex here due to deadlock with power_domain
+ * mutex when power_domain functions are called while holding pps_mutex.
+ * That also means that in order to use pps_pipe the code needs to
+ * hold both a power domain reference and pps_mutex, and the power domain
+ * reference get/put must be done while _not_ holding pps_mutex.
+ * pps_{lock,unlock}() do these steps in the correct order, so one
+ * should use them always.
+ */
+
+ for_each_intel_dp(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ drm_WARN_ON(&dev_priv->drm,
+ intel_dp->pps.active_pipe != INVALID_PIPE);
+
+ if (encoder->type != INTEL_OUTPUT_EDP)
+ continue;
+
+ if (IS_GEN9_LP(dev_priv))
+ intel_dp->pps.pps_reset = true;
+ else
+ intel_dp->pps.pps_pipe = INVALID_PIPE;
+ }
+}
+
+struct pps_registers {
+ i915_reg_t pp_ctrl;
+ i915_reg_t pp_stat;
+ i915_reg_t pp_on;
+ i915_reg_t pp_off;
+ i915_reg_t pp_div;
+};
+
+static void intel_pps_get_registers(struct intel_dp *intel_dp,
+ struct pps_registers *regs)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ int pps_idx = 0;
+
+ memset(regs, 0, sizeof(*regs));
+
+ if (IS_GEN9_LP(dev_priv))
+ pps_idx = bxt_power_sequencer_idx(intel_dp);
+ else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ pps_idx = vlv_power_sequencer_pipe(intel_dp);
+
+ regs->pp_ctrl = PP_CONTROL(pps_idx);
+ regs->pp_stat = PP_STATUS(pps_idx);
+ regs->pp_on = PP_ON_DELAYS(pps_idx);
+ regs->pp_off = PP_OFF_DELAYS(pps_idx);
+
+ /* Cycle delay moved from PP_DIVISOR to PP_CONTROL */
+ if (IS_GEN9_LP(dev_priv) || INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
+ regs->pp_div = INVALID_MMIO_REG;
+ else
+ regs->pp_div = PP_DIVISOR(pps_idx);
+}
+
+static i915_reg_t
+_pp_ctrl_reg(struct intel_dp *intel_dp)
+{
+ struct pps_registers regs;
+
+ intel_pps_get_registers(intel_dp, &regs);
+
+ return regs.pp_ctrl;
+}
+
+static i915_reg_t
+_pp_stat_reg(struct intel_dp *intel_dp)
+{
+ struct pps_registers regs;
+
+ intel_pps_get_registers(intel_dp, &regs);
+
+ return regs.pp_stat;
+}
+
+static bool edp_have_panel_power(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ intel_dp->pps.pps_pipe == INVALID_PIPE)
+ return false;
+
+ return (intel_de_read(dev_priv, _pp_stat_reg(intel_dp)) & PP_ON) != 0;
+}
+
+static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ intel_dp->pps.pps_pipe == INVALID_PIPE)
+ return false;
+
+ return intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
+}
+
+void intel_pps_check_power_unlocked(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ if (!intel_dp_is_edp(intel_dp))
+ return;
+
+ if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
+ drm_WARN(&dev_priv->drm, 1,
+ "eDP powered off while attempting aux channel communication.\n");
+ drm_dbg_kms(&dev_priv->drm, "Status 0x%08x Control 0x%08x\n",
+ intel_de_read(dev_priv, _pp_stat_reg(intel_dp)),
+ intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)));
+ }
+}
+
+#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
+#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
+
+#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
+#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
+
+#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
+#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
+
+static void intel_pps_verify_state(struct intel_dp *intel_dp);
+
+static void wait_panel_status(struct intel_dp *intel_dp,
+ u32 mask,
+ u32 value)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ i915_reg_t pp_stat_reg, pp_ctrl_reg;
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ intel_pps_verify_state(intel_dp);
+
+ pp_stat_reg = _pp_stat_reg(intel_dp);
+ pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+
+ drm_dbg_kms(&dev_priv->drm,
+ "mask %08x value %08x status %08x control %08x\n",
+ mask, value,
+ intel_de_read(dev_priv, pp_stat_reg),
+ intel_de_read(dev_priv, pp_ctrl_reg));
+
+ if (intel_de_wait_for_register(dev_priv, pp_stat_reg,
+ mask, value, 5000))
+ drm_err(&dev_priv->drm,
+ "Panel status timeout: status %08x control %08x\n",
+ intel_de_read(dev_priv, pp_stat_reg),
+ intel_de_read(dev_priv, pp_ctrl_reg));
+
+ drm_dbg_kms(&dev_priv->drm, "Wait complete\n");
+}
+
+static void wait_panel_on(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ drm_dbg_kms(&i915->drm, "Wait for panel power on\n");
+ wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
+}
+
+static void wait_panel_off(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ drm_dbg_kms(&i915->drm, "Wait for panel power off time\n");
+ wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
+}
+
+static void wait_panel_power_cycle(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ ktime_t panel_power_on_time;
+ s64 panel_power_off_duration;
+
+ drm_dbg_kms(&i915->drm, "Wait for panel power cycle\n");
+
+ /* take the difference of currrent time and panel power off time
+ * and then make panel wait for t11_t12 if needed. */
+ panel_power_on_time = ktime_get_boottime();
+ panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->pps.panel_power_off_time);
+
+ /* When we disable the VDD override bit last we have to do the manual
+ * wait. */
+ if (panel_power_off_duration < (s64)intel_dp->pps.panel_power_cycle_delay)
+ wait_remaining_ms_from_jiffies(jiffies,
+ intel_dp->pps.panel_power_cycle_delay - panel_power_off_duration);
+
+ wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
+}
+
+void intel_pps_wait_power_cycle(struct intel_dp *intel_dp)
+{
+ intel_wakeref_t wakeref;
+
+ if (!intel_dp_is_edp(intel_dp))
+ return;
+
+ with_intel_pps_lock(intel_dp, wakeref)
+ wait_panel_power_cycle(intel_dp);
+}
+
+static void wait_backlight_on(struct intel_dp *intel_dp)
+{
+ wait_remaining_ms_from_jiffies(intel_dp->pps.last_power_on,
+ intel_dp->pps.backlight_on_delay);
+}
+
+static void edp_wait_backlight_off(struct intel_dp *intel_dp)
+{
+ wait_remaining_ms_from_jiffies(intel_dp->pps.last_backlight_off,
+ intel_dp->pps.backlight_off_delay);
+}
+
+/* Read the current pp_control value, unlocking the register if it
+ * is locked
+ */
+
+static u32 ilk_get_pp_control(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u32 control;
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ control = intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp));
+ if (drm_WARN_ON(&dev_priv->drm, !HAS_DDI(dev_priv) &&
+ (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
+ control &= ~PANEL_UNLOCK_MASK;
+ control |= PANEL_UNLOCK_REGS;
+ }
+ return control;
+}
+
+/*
+ * Must be paired with intel_pps_vdd_off_unlocked().
+ * Must hold pps_mutex around the whole on/off sequence.
+ * Can be nested with intel_pps_vdd_{on,off}() calls.
+ */
+bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ u32 pp;
+ i915_reg_t pp_stat_reg, pp_ctrl_reg;
+ bool need_to_disable = !intel_dp->pps.want_panel_vdd;
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ if (!intel_dp_is_edp(intel_dp))
+ return false;
+
+ cancel_delayed_work(&intel_dp->pps.panel_vdd_work);
+ intel_dp->pps.want_panel_vdd = true;
+
+ if (edp_have_panel_vdd(intel_dp))
+ return need_to_disable;
+
+ drm_WARN_ON(&dev_priv->drm, intel_dp->pps.vdd_wakeref);
+ intel_dp->pps.vdd_wakeref = intel_display_power_get(dev_priv,
+ intel_aux_power_domain(dig_port));
+
+ drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD on\n",
+ dig_port->base.base.base.id,
+ dig_port->base.base.name);
+
+ if (!edp_have_panel_power(intel_dp))
+ wait_panel_power_cycle(intel_dp);
+
+ pp = ilk_get_pp_control(intel_dp);
+ pp |= EDP_FORCE_VDD;
+
+ pp_stat_reg = _pp_stat_reg(intel_dp);
+ pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+
+ intel_de_write(dev_priv, pp_ctrl_reg, pp);
+ intel_de_posting_read(dev_priv, pp_ctrl_reg);
+ drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
+ intel_de_read(dev_priv, pp_stat_reg),
+ intel_de_read(dev_priv, pp_ctrl_reg));
+ /*
+ * If the panel wasn't on, delay before accessing aux channel
+ */
+ if (!edp_have_panel_power(intel_dp)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "[ENCODER:%d:%s] panel power wasn't enabled\n",
+ dig_port->base.base.base.id,
+ dig_port->base.base.name);
+ msleep(intel_dp->pps.panel_power_up_delay);
+ }
+
+ return need_to_disable;
+}
+
+/*
+ * Must be paired with intel_pps_off().
+ * Nested calls to these functions are not allowed since
+ * we drop the lock. Caller must use some higher level
+ * locking to prevent nested calls from other threads.
+ */
+void intel_pps_vdd_on(struct intel_dp *intel_dp)
+{
+ intel_wakeref_t wakeref;
+ bool vdd;
+
+ if (!intel_dp_is_edp(intel_dp))
+ return;
+
+ vdd = false;
+ with_intel_pps_lock(intel_dp, wakeref)
+ vdd = intel_pps_vdd_on_unlocked(intel_dp);
+ I915_STATE_WARN(!vdd, "[ENCODER:%d:%s] VDD already requested on\n",
+ dp_to_dig_port(intel_dp)->base.base.base.id,
+ dp_to_dig_port(intel_dp)->base.base.name);
+}
+
+static void intel_pps_vdd_off_sync_unlocked(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port =
+ dp_to_dig_port(intel_dp);
+ u32 pp;
+ i915_reg_t pp_stat_reg, pp_ctrl_reg;
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ drm_WARN_ON(&dev_priv->drm, intel_dp->pps.want_panel_vdd);
+
+ if (!edp_have_panel_vdd(intel_dp))
+ return;
+
+ drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD off\n",
+ dig_port->base.base.base.id,
+ dig_port->base.base.name);
+
+ pp = ilk_get_pp_control(intel_dp);
+ pp &= ~EDP_FORCE_VDD;
+
+ pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+ pp_stat_reg = _pp_stat_reg(intel_dp);
+
+ intel_de_write(dev_priv, pp_ctrl_reg, pp);
+ intel_de_posting_read(dev_priv, pp_ctrl_reg);
+
+ /* Make sure sequencer is idle before allowing subsequent activity */
+ drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
+ intel_de_read(dev_priv, pp_stat_reg),
+ intel_de_read(dev_priv, pp_ctrl_reg));
+
+ if ((pp & PANEL_POWER_ON) == 0)
+ intel_dp->pps.panel_power_off_time = ktime_get_boottime();
+
+ intel_display_power_put(dev_priv,
+ intel_aux_power_domain(dig_port),
+ fetch_and_zero(&intel_dp->pps.vdd_wakeref));
+}
+
+void intel_pps_vdd_off_sync(struct intel_dp *intel_dp)
+{
+ intel_wakeref_t wakeref;
+
+ if (!intel_dp_is_edp(intel_dp))
+ return;
+
+ cancel_delayed_work_sync(&intel_dp->pps.panel_vdd_work);
+ /*
+ * vdd might still be enabled due to the delayed vdd off.
+ * Make sure vdd is actually turned off here.
+ */
+ with_intel_pps_lock(intel_dp, wakeref)
+ intel_pps_vdd_off_sync_unlocked(intel_dp);
+}
+
+static void edp_panel_vdd_work(struct work_struct *__work)
+{
+ struct intel_pps *pps = container_of(to_delayed_work(__work),
+ struct intel_pps, panel_vdd_work);
+ struct intel_dp *intel_dp = container_of(pps, struct intel_dp, pps);
+ intel_wakeref_t wakeref;
+
+ with_intel_pps_lock(intel_dp, wakeref) {
+ if (!intel_dp->pps.want_panel_vdd)
+ intel_pps_vdd_off_sync_unlocked(intel_dp);
+ }
+}
+
+static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
+{
+ unsigned long delay;
+
+ /*
+ * Queue the timer to fire a long time from now (relative to the power
+ * down delay) to keep the panel power up across a sequence of
+ * operations.
+ */
+ delay = msecs_to_jiffies(intel_dp->pps.panel_power_cycle_delay * 5);
+ schedule_delayed_work(&intel_dp->pps.panel_vdd_work, delay);
+}
+
+/*
+ * Must be paired with edp_panel_vdd_on().
+ * Must hold pps_mutex around the whole on/off sequence.
+ * Can be nested with intel_pps_vdd_{on,off}() calls.
+ */
+void intel_pps_vdd_off_unlocked(struct intel_dp *intel_dp, bool sync)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ if (!intel_dp_is_edp(intel_dp))
+ return;
+
+ I915_STATE_WARN(!intel_dp->pps.want_panel_vdd, "[ENCODER:%d:%s] VDD not forced on",
+ dp_to_dig_port(intel_dp)->base.base.base.id,
+ dp_to_dig_port(intel_dp)->base.base.name);
+
+ intel_dp->pps.want_panel_vdd = false;
+
+ if (sync)
+ intel_pps_vdd_off_sync_unlocked(intel_dp);
+ else
+ edp_panel_vdd_schedule_off(intel_dp);
+}
+
+void intel_pps_on_unlocked(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u32 pp;
+ i915_reg_t pp_ctrl_reg;
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ if (!intel_dp_is_edp(intel_dp))
+ return;
+
+ drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power on\n",
+ dp_to_dig_port(intel_dp)->base.base.base.id,
+ dp_to_dig_port(intel_dp)->base.base.name);
+
+ if (drm_WARN(&dev_priv->drm, edp_have_panel_power(intel_dp),
+ "[ENCODER:%d:%s] panel power already on\n",
+ dp_to_dig_port(intel_dp)->base.base.base.id,
+ dp_to_dig_port(intel_dp)->base.base.name))
+ return;
+
+ wait_panel_power_cycle(intel_dp);
+
+ pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+ pp = ilk_get_pp_control(intel_dp);
+ if (IS_GEN(dev_priv, 5)) {
+ /* ILK workaround: disable reset around power sequence */
+ pp &= ~PANEL_POWER_RESET;
+ intel_de_write(dev_priv, pp_ctrl_reg, pp);
+ intel_de_posting_read(dev_priv, pp_ctrl_reg);
+ }
+
+ pp |= PANEL_POWER_ON;
+ if (!IS_GEN(dev_priv, 5))
+ pp |= PANEL_POWER_RESET;
+
+ intel_de_write(dev_priv, pp_ctrl_reg, pp);
+ intel_de_posting_read(dev_priv, pp_ctrl_reg);
+
+ wait_panel_on(intel_dp);
+ intel_dp->pps.last_power_on = jiffies;
+
+ if (IS_GEN(dev_priv, 5)) {
+ pp |= PANEL_POWER_RESET; /* restore panel reset bit */
+ intel_de_write(dev_priv, pp_ctrl_reg, pp);
+ intel_de_posting_read(dev_priv, pp_ctrl_reg);
+ }
+}
+
+void intel_pps_on(struct intel_dp *intel_dp)
+{
+ intel_wakeref_t wakeref;
+
+ if (!intel_dp_is_edp(intel_dp))
+ return;
+
+ with_intel_pps_lock(intel_dp, wakeref)
+ intel_pps_on_unlocked(intel_dp);
+}
+
+void intel_pps_off_unlocked(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ u32 pp;
+ i915_reg_t pp_ctrl_reg;
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ if (!intel_dp_is_edp(intel_dp))
+ return;
+
+ drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power off\n",
+ dig_port->base.base.base.id, dig_port->base.base.name);
+
+ drm_WARN(&dev_priv->drm, !intel_dp->pps.want_panel_vdd,
+ "Need [ENCODER:%d:%s] VDD to turn off panel\n",
+ dig_port->base.base.base.id, dig_port->base.base.name);
+
+ pp = ilk_get_pp_control(intel_dp);
+ /* We need to switch off panel power _and_ force vdd, for otherwise some
+ * panels get very unhappy and cease to work. */
+ pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
+ EDP_BLC_ENABLE);
+
+ pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+
+ intel_dp->pps.want_panel_vdd = false;
+
+ intel_de_write(dev_priv, pp_ctrl_reg, pp);
+ intel_de_posting_read(dev_priv, pp_ctrl_reg);
+
+ wait_panel_off(intel_dp);
+ intel_dp->pps.panel_power_off_time = ktime_get_boottime();
+
+ /* We got a reference when we enabled the VDD. */
+ intel_display_power_put(dev_priv,
+ intel_aux_power_domain(dig_port),
+ fetch_and_zero(&intel_dp->pps.vdd_wakeref));
+}
+
+void intel_pps_off(struct intel_dp *intel_dp)
+{
+ intel_wakeref_t wakeref;
+
+ if (!intel_dp_is_edp(intel_dp))
+ return;
+
+ with_intel_pps_lock(intel_dp, wakeref)
+ intel_pps_off_unlocked(intel_dp);
+}
+
+/* Enable backlight in the panel power control. */
+void intel_pps_backlight_on(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ intel_wakeref_t wakeref;
+
+ /*
+ * If we enable the backlight right away following a panel power
+ * on, we may see slight flicker as the panel syncs with the eDP
+ * link. So delay a bit to make sure the image is solid before
+ * allowing it to appear.
+ */
+ wait_backlight_on(intel_dp);
+
+ with_intel_pps_lock(intel_dp, wakeref) {
+ i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+ u32 pp;
+
+ pp = ilk_get_pp_control(intel_dp);
+ pp |= EDP_BLC_ENABLE;
+
+ intel_de_write(dev_priv, pp_ctrl_reg, pp);
+ intel_de_posting_read(dev_priv, pp_ctrl_reg);
+ }
+}
+
+/* Disable backlight in the panel power control. */
+void intel_pps_backlight_off(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ intel_wakeref_t wakeref;
+
+ if (!intel_dp_is_edp(intel_dp))
+ return;
+
+ with_intel_pps_lock(intel_dp, wakeref) {
+ i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+ u32 pp;
+
+ pp = ilk_get_pp_control(intel_dp);
+ pp &= ~EDP_BLC_ENABLE;
+
+ intel_de_write(dev_priv, pp_ctrl_reg, pp);
+ intel_de_posting_read(dev_priv, pp_ctrl_reg);
+ }
+
+ intel_dp->pps.last_backlight_off = jiffies;
+ edp_wait_backlight_off(intel_dp);
+}
+
+/*
+ * Hook for controlling the panel power control backlight through the bl_power
+ * sysfs attribute. Take care to handle multiple calls.
+ */
+void intel_pps_backlight_power(struct intel_connector *connector, bool enable)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ intel_wakeref_t wakeref;
+ bool is_enabled;
+
+ is_enabled = false;
+ with_intel_pps_lock(intel_dp, wakeref)
+ is_enabled = ilk_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
+ if (is_enabled == enable)
+ return;
+
+ drm_dbg_kms(&i915->drm, "panel power control backlight %s\n",
+ enable ? "enable" : "disable");
+
+ if (enable)
+ intel_pps_backlight_on(intel_dp);
+ else
+ intel_pps_backlight_off(intel_dp);
+}
+
+static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ enum pipe pipe = intel_dp->pps.pps_pipe;
+ i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
+
+ drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE);
+
+ if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
+ return;
+
+ intel_pps_vdd_off_sync_unlocked(intel_dp);
+
+ /*
+ * VLV seems to get confused when multiple power sequencers
+ * have the same port selected (even if only one has power/vdd
+ * enabled). The failure manifests as vlv_wait_port_ready() failing
+ * CHV on the other hand doesn't seem to mind having the same port
+ * selected in multiple power sequencers, but let's clear the
+ * port select always when logically disconnecting a power sequencer
+ * from a port.
+ */
+ drm_dbg_kms(&dev_priv->drm,
+ "detaching pipe %c power sequencer from [ENCODER:%d:%s]\n",
+ pipe_name(pipe), dig_port->base.base.base.id,
+ dig_port->base.base.name);
+ intel_de_write(dev_priv, pp_on_reg, 0);
+ intel_de_posting_read(dev_priv, pp_on_reg);
+
+ intel_dp->pps.pps_pipe = INVALID_PIPE;
+}
+
+static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ struct intel_encoder *encoder;
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ for_each_intel_dp(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ drm_WARN(&dev_priv->drm, intel_dp->pps.active_pipe == pipe,
+ "stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n",
+ pipe_name(pipe), encoder->base.base.id,
+ encoder->base.name);
+
+ if (intel_dp->pps.pps_pipe != pipe)
+ continue;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "stealing pipe %c power sequencer from [ENCODER:%d:%s]\n",
+ pipe_name(pipe), encoder->base.base.id,
+ encoder->base.name);
+
+ /* make sure vdd is off before we steal it */
+ vlv_detach_power_sequencer(intel_dp);
+ }
+}
+
+void vlv_pps_init(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE);
+
+ if (intel_dp->pps.pps_pipe != INVALID_PIPE &&
+ intel_dp->pps.pps_pipe != crtc->pipe) {
+ /*
+ * If another power sequencer was being used on this
+ * port previously make sure to turn off vdd there while
+ * we still have control of it.
+ */
+ vlv_detach_power_sequencer(intel_dp);
+ }
+
+ /*
+ * We may be stealing the power
+ * sequencer from another port.
+ */
+ vlv_steal_power_sequencer(dev_priv, crtc->pipe);
+
+ intel_dp->pps.active_pipe = crtc->pipe;
+
+ if (!intel_dp_is_edp(intel_dp))
+ return;
+
+ /* now it's all ours */
+ intel_dp->pps.pps_pipe = crtc->pipe;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "initializing pipe %c power sequencer for [ENCODER:%d:%s]\n",
+ pipe_name(intel_dp->pps.pps_pipe), encoder->base.base.id,
+ encoder->base.name);
+
+ /* init power sequencer on this pipe and port */
+ pps_init_delays(intel_dp);
+ pps_init_registers(intel_dp, true);
+}
+
+static void intel_pps_vdd_sanitize(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ if (!edp_have_panel_vdd(intel_dp))
+ return;
+
+ /*
+ * The VDD bit needs a power domain reference, so if the bit is
+ * already enabled when we boot or resume, grab this reference and
+ * schedule a vdd off, so we don't hold on to the reference
+ * indefinitely.
+ */
+ drm_dbg_kms(&dev_priv->drm,
+ "VDD left on by BIOS, adjusting state tracking\n");
+ drm_WARN_ON(&dev_priv->drm, intel_dp->pps.vdd_wakeref);
+ intel_dp->pps.vdd_wakeref = intel_display_power_get(dev_priv,
+ intel_aux_power_domain(dig_port));
+
+ edp_panel_vdd_schedule_off(intel_dp);
+}
+
+bool intel_pps_have_power(struct intel_dp *intel_dp)
+{
+ intel_wakeref_t wakeref;
+ bool have_power = false;
+
+ with_intel_pps_lock(intel_dp, wakeref) {
+ have_power = edp_have_panel_power(intel_dp) &&
+ edp_have_panel_vdd(intel_dp);
+ }
+
+ return have_power;
+}
+
+static void pps_init_timestamps(struct intel_dp *intel_dp)
+{
+ intel_dp->pps.panel_power_off_time = ktime_get_boottime();
+ intel_dp->pps.last_power_on = jiffies;
+ intel_dp->pps.last_backlight_off = jiffies;
+}
+
+static void
+intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u32 pp_on, pp_off, pp_ctl;
+ struct pps_registers regs;
+
+ intel_pps_get_registers(intel_dp, &regs);
+
+ pp_ctl = ilk_get_pp_control(intel_dp);
+
+ /* Ensure PPS is unlocked */
+ if (!HAS_DDI(dev_priv))
+ intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
+
+ pp_on = intel_de_read(dev_priv, regs.pp_on);
+ pp_off = intel_de_read(dev_priv, regs.pp_off);
+
+ /* Pull timing values out of registers */
+ seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on);
+ seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on);
+ seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off);
+ seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off);
+
+ if (i915_mmio_reg_valid(regs.pp_div)) {
+ u32 pp_div;
+
+ pp_div = intel_de_read(dev_priv, regs.pp_div);
+
+ seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000;
+ } else {
+ seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000;
+ }
+}
+
+static void
+intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq)
+{
+ DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
+ state_name,
+ seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12);
+}
+
+static void
+intel_pps_verify_state(struct intel_dp *intel_dp)
+{
+ struct edp_power_seq hw;
+ struct edp_power_seq *sw = &intel_dp->pps.pps_delays;
+
+ intel_pps_readout_hw_state(intel_dp, &hw);
+
+ if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
+ hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
+ DRM_ERROR("PPS state mismatch\n");
+ intel_pps_dump_state("sw", sw);
+ intel_pps_dump_state("hw", &hw);
+ }
+}
+
+static void pps_init_delays(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct edp_power_seq cur, vbt, spec,
+ *final = &intel_dp->pps.pps_delays;
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ /* already initialized? */
+ if (final->t11_t12 != 0)
+ return;
+
+ intel_pps_readout_hw_state(intel_dp, &cur);
+
+ intel_pps_dump_state("cur", &cur);
+
+ vbt = dev_priv->vbt.edp.pps;
+ /* On Toshiba Satellite P50-C-18C system the VBT T12 delay
+ * of 500ms appears to be too short. Ocassionally the panel
+ * just fails to power back on. Increasing the delay to 800ms
+ * seems sufficient to avoid this problem.
+ */
+ if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
+ vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10);
+ drm_dbg_kms(&dev_priv->drm,
+ "Increasing T12 panel delay as per the quirk to %d\n",
+ vbt.t11_t12);
+ }
+ /* T11_T12 delay is special and actually in units of 100ms, but zero
+ * based in the hw (so we need to add 100 ms). But the sw vbt
+ * table multiplies it with 1000 to make it in units of 100usec,
+ * too. */
+ vbt.t11_t12 += 100 * 10;
+
+ /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
+ * our hw here, which are all in 100usec. */
+ spec.t1_t3 = 210 * 10;
+ spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
+ spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
+ spec.t10 = 500 * 10;
+ /* This one is special and actually in units of 100ms, but zero
+ * based in the hw (so we need to add 100 ms). But the sw vbt
+ * table multiplies it with 1000 to make it in units of 100usec,
+ * too. */
+ spec.t11_t12 = (510 + 100) * 10;
+
+ intel_pps_dump_state("vbt", &vbt);
+
+ /* Use the max of the register settings and vbt. If both are
+ * unset, fall back to the spec limits. */
+#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
+ spec.field : \
+ max(cur.field, vbt.field))
+ assign_final(t1_t3);
+ assign_final(t8);
+ assign_final(t9);
+ assign_final(t10);
+ assign_final(t11_t12);
+#undef assign_final
+
+#define get_delay(field) (DIV_ROUND_UP(final->field, 10))
+ intel_dp->pps.panel_power_up_delay = get_delay(t1_t3);
+ intel_dp->pps.backlight_on_delay = get_delay(t8);
+ intel_dp->pps.backlight_off_delay = get_delay(t9);
+ intel_dp->pps.panel_power_down_delay = get_delay(t10);
+ intel_dp->pps.panel_power_cycle_delay = get_delay(t11_t12);
+#undef get_delay
+
+ drm_dbg_kms(&dev_priv->drm,
+ "panel power up delay %d, power down delay %d, power cycle delay %d\n",
+ intel_dp->pps.panel_power_up_delay,
+ intel_dp->pps.panel_power_down_delay,
+ intel_dp->pps.panel_power_cycle_delay);
+
+ drm_dbg_kms(&dev_priv->drm, "backlight on delay %d, off delay %d\n",
+ intel_dp->pps.backlight_on_delay,
+ intel_dp->pps.backlight_off_delay);
+
+ /*
+ * We override the HW backlight delays to 1 because we do manual waits
+ * on them. For T8, even BSpec recommends doing it. For T9, if we
+ * don't do this, we'll end up waiting for the backlight off delay
+ * twice: once when we do the manual sleep, and once when we disable
+ * the panel and wait for the PP_STATUS bit to become zero.
+ */
+ final->t8 = 1;
+ final->t9 = 1;
+
+ /*
+ * HW has only a 100msec granularity for t11_t12 so round it up
+ * accordingly.
+ */
+ final->t11_t12 = roundup(final->t11_t12, 100 * 10);
+}
+
+static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u32 pp_on, pp_off, port_sel = 0;
+ int div = RUNTIME_INFO(dev_priv)->rawclk_freq / 1000;
+ struct pps_registers regs;
+ enum port port = dp_to_dig_port(intel_dp)->base.port;
+ const struct edp_power_seq *seq = &intel_dp->pps.pps_delays;
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ intel_pps_get_registers(intel_dp, &regs);
+
+ /*
+ * On some VLV machines the BIOS can leave the VDD
+ * enabled even on power sequencers which aren't
+ * hooked up to any port. This would mess up the
+ * power domain tracking the first time we pick
+ * one of these power sequencers for use since
+ * intel_pps_vdd_on_unlocked() would notice that the VDD was
+ * already on and therefore wouldn't grab the power
+ * domain reference. Disable VDD first to avoid this.
+ * This also avoids spuriously turning the VDD on as
+ * soon as the new power sequencer gets initialized.
+ */
+ if (force_disable_vdd) {
+ u32 pp = ilk_get_pp_control(intel_dp);
+
+ drm_WARN(&dev_priv->drm, pp & PANEL_POWER_ON,
+ "Panel power already on\n");
+
+ if (pp & EDP_FORCE_VDD)
+ drm_dbg_kms(&dev_priv->drm,
+ "VDD already on, disabling first\n");
+
+ pp &= ~EDP_FORCE_VDD;
+
+ intel_de_write(dev_priv, regs.pp_ctrl, pp);
+ }
+
+ pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) |
+ REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8);
+ pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) |
+ REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10);
+
+ /* Haswell doesn't have any port selection bits for the panel
+ * power sequencer any more. */
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ port_sel = PANEL_PORT_SELECT_VLV(port);
+ } else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
+ switch (port) {
+ case PORT_A:
+ port_sel = PANEL_PORT_SELECT_DPA;
+ break;
+ case PORT_C:
+ port_sel = PANEL_PORT_SELECT_DPC;
+ break;
+ case PORT_D:
+ port_sel = PANEL_PORT_SELECT_DPD;
+ break;
+ default:
+ MISSING_CASE(port);
+ break;
+ }
+ }
+
+ pp_on |= port_sel;
+
+ intel_de_write(dev_priv, regs.pp_on, pp_on);
+ intel_de_write(dev_priv, regs.pp_off, pp_off);
+
+ /*
+ * Compute the divisor for the pp clock, simply match the Bspec formula.
+ */
+ if (i915_mmio_reg_valid(regs.pp_div)) {
+ intel_de_write(dev_priv, regs.pp_div,
+ REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)));
+ } else {
+ u32 pp_ctl;
+
+ pp_ctl = intel_de_read(dev_priv, regs.pp_ctrl);
+ pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK;
+ pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000));
+ intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
+ }
+
+ drm_dbg_kms(&dev_priv->drm,
+ "panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
+ intel_de_read(dev_priv, regs.pp_on),
+ intel_de_read(dev_priv, regs.pp_off),
+ i915_mmio_reg_valid(regs.pp_div) ?
+ intel_de_read(dev_priv, regs.pp_div) :
+ (intel_de_read(dev_priv, regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK));
+}
+
+void intel_pps_encoder_reset(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ intel_wakeref_t wakeref;
+
+ if (!intel_dp_is_edp(intel_dp))
+ return;
+
+ with_intel_pps_lock(intel_dp, wakeref) {
+ /*
+ * Reinit the power sequencer also on the resume path, in case
+ * BIOS did something nasty with it.
+ */
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ vlv_initial_power_sequencer_setup(intel_dp);
+
+ pps_init_delays(intel_dp);
+ pps_init_registers(intel_dp, false);
+
+ intel_pps_vdd_sanitize(intel_dp);
+ }
+}
+
+void intel_pps_init(struct intel_dp *intel_dp)
+{
+ INIT_DELAYED_WORK(&intel_dp->pps.panel_vdd_work, edp_panel_vdd_work);
+
+ pps_init_timestamps(intel_dp);
+
+ intel_pps_encoder_reset(intel_dp);
+}
+
+void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
+{
+ int pps_num;
+ int pps_idx;
+
+ if (HAS_DDI(dev_priv))
+ return;
+ /*
+ * This w/a is needed at least on CPT/PPT, but to be sure apply it
+ * everywhere where registers can be write protected.
+ */
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ pps_num = 2;
+ else
+ pps_num = 1;
+
+ for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
+ u32 val = intel_de_read(dev_priv, PP_CONTROL(pps_idx));
+
+ val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
+ intel_de_write(dev_priv, PP_CONTROL(pps_idx), val);
+ }
+}
+
+void intel_pps_setup(struct drm_i915_private *i915)
+{
+ if (HAS_PCH_SPLIT(i915) || IS_GEN9_LP(i915))
+ i915->pps_mmio_base = PCH_PPS_BASE;
+ else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ i915->pps_mmio_base = VLV_PPS_BASE;
+ else
+ i915->pps_mmio_base = PPS_BASE;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_pps.h b/drivers/gpu/drm/i915/display/intel_pps.h
new file mode 100644
index 000000000000..fbbcca782e7b
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_pps.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef __INTEL_PPS_H__
+#define __INTEL_PPS_H__
+
+#include <linux/types.h>
+
+#include "intel_wakeref.h"
+
+struct drm_i915_private;
+struct intel_connector;
+struct intel_crtc_state;
+struct intel_dp;
+struct intel_encoder;
+
+intel_wakeref_t intel_pps_lock(struct intel_dp *intel_dp);
+intel_wakeref_t intel_pps_unlock(struct intel_dp *intel_dp, intel_wakeref_t wakeref);
+
+#define with_intel_pps_lock(dp, wf) \
+ for ((wf) = intel_pps_lock(dp); (wf); (wf) = intel_pps_unlock((dp), (wf)))
+
+void intel_pps_backlight_on(struct intel_dp *intel_dp);
+void intel_pps_backlight_off(struct intel_dp *intel_dp);
+void intel_pps_backlight_power(struct intel_connector *connector, bool enable);
+
+bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp);
+void intel_pps_vdd_off_unlocked(struct intel_dp *intel_dp, bool sync);
+void intel_pps_on_unlocked(struct intel_dp *intel_dp);
+void intel_pps_off_unlocked(struct intel_dp *intel_dp);
+void intel_pps_check_power_unlocked(struct intel_dp *intel_dp);
+
+void intel_pps_vdd_on(struct intel_dp *intel_dp);
+void intel_pps_on(struct intel_dp *intel_dp);
+void intel_pps_off(struct intel_dp *intel_dp);
+void intel_pps_vdd_off_sync(struct intel_dp *intel_dp);
+bool intel_pps_have_power(struct intel_dp *intel_dp);
+void intel_pps_wait_power_cycle(struct intel_dp *intel_dp);
+
+void intel_pps_init(struct intel_dp *intel_dp);
+void intel_pps_encoder_reset(struct intel_dp *intel_dp);
+void intel_pps_reset_all(struct drm_i915_private *i915);
+
+void vlv_pps_init(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+
+void intel_pps_unlock_regs_wa(struct drm_i915_private *i915);
+void intel_pps_setup(struct drm_i915_private *i915);
+
+#endif /* __INTEL_PPS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index b3631b722de3..850cb7f5b332 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -28,9 +28,10 @@
#include "i915_drv.h"
#include "intel_atomic.h"
#include "intel_display_types.h"
+#include "intel_dp_aux.h"
+#include "intel_hdmi.h"
#include "intel_psr.h"
#include "intel_sprite.h"
-#include "intel_hdmi.h"
/**
* DOC: Panel Self Refresh (PSR/SRD)
@@ -305,7 +306,7 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
drm_dbg_kms(&dev_priv->drm, "eDP panel supports PSR version %x\n",
intel_dp->psr_dpcd[0]);
- if (drm_dp_has_quirk(&intel_dp->desc, 0, DP_DPCD_QUIRK_NO_PSR)) {
+ if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
drm_dbg_kms(&dev_priv->drm,
"PSR support not currently available for this panel\n");
return;
@@ -811,6 +812,13 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
&crtc_state->hw.adjusted_mode;
int psr_setup_time;
+ /*
+ * Current PSR panels dont work reliably with VRR enabled
+ * So if VRR is enabled, do not enable PSR.
+ */
+ if (crtc_state->vrr.enable)
+ return;
+
if (!CAN_PSR(dev_priv))
return;
@@ -1185,7 +1193,9 @@ void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
- u32 val;
+ const struct drm_rect *clip;
+ u32 val, offset;
+ int ret, x, y;
if (!crtc_state->enable_psr2_sel_fetch)
return;
@@ -1196,16 +1206,25 @@ void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
if (!val || plane->id == PLANE_CURSOR)
return;
- val = plane_state->uapi.dst.y1 << 16 | plane_state->uapi.dst.x1;
+ clip = &plane_state->psr2_sel_fetch_area;
+
+ val = (clip->y1 + plane_state->uapi.dst.y1) << 16;
+ val |= plane_state->uapi.dst.x1;
intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_POS(pipe, plane->id), val);
- val = plane_state->color_plane[color_plane].y << 16;
- val |= plane_state->color_plane[color_plane].x;
+ /* TODO: consider auxiliary surfaces */
+ x = plane_state->uapi.src.x1 >> 16;
+ y = (plane_state->uapi.src.y1 >> 16) + clip->y1;
+ ret = skl_calc_main_surface_offset(plane_state, &x, &y, &offset);
+ if (ret)
+ drm_warn_once(&dev_priv->drm, "skl_calc_main_surface_offset() returned %i\n",
+ ret);
+ val = y << 16 | x;
intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_OFFSET(pipe, plane->id),
val);
/* Sizes are 0 based */
- val = ((drm_rect_height(&plane_state->uapi.src) >> 16) - 1) << 16;
+ val = (drm_rect_height(clip) - 1) << 16;
val |= (drm_rect_width(&plane_state->uapi.src) >> 16) - 1;
intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_SIZE(pipe, plane->id), val);
}
@@ -1237,9 +1256,11 @@ static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
if (clip->y1 == -1)
goto exit;
+ drm_WARN_ON(crtc_state->uapi.crtc->dev, clip->y1 % 4 || clip->y2 % 4);
+
val |= PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1 / 4 + 1);
- val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(DIV_ROUND_UP(clip->y2, 4) + 1);
+ val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 / 4 + 1);
exit:
crtc_state->psr2_man_track_ctl = val;
}
@@ -1264,8 +1285,8 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
+ struct drm_rect pipe_clip = { .x1 = 0, .y1 = -1, .x2 = INT_MAX, .y2 = -1 };
struct intel_plane_state *new_plane_state, *old_plane_state;
- struct drm_rect pipe_clip = { .y1 = -1 };
struct intel_plane *plane;
bool full_update = false;
int i, ret;
@@ -1277,13 +1298,25 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
if (ret)
return ret;
+ /*
+ * Calculate minimal selective fetch area of each plane and calculate
+ * the pipe damaged area.
+ * In the next loop the plane selective fetch area will actually be set
+ * using whole pipe damaged area.
+ */
for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
new_plane_state, i) {
- struct drm_rect temp;
+ struct drm_rect src, damaged_area = { .y1 = -1 };
+ struct drm_mode_rect *damaged_clips;
+ u32 num_clips, j;
if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
continue;
+ if (!new_plane_state->uapi.visible &&
+ !old_plane_state->uapi.visible)
+ continue;
+
/*
* TODO: Not clear how to handle planes with negative position,
* also planes are not updated if they have a negative X
@@ -1295,18 +1328,94 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
break;
}
- if (!new_plane_state->uapi.visible)
- continue;
+ num_clips = drm_plane_get_damage_clips_count(&new_plane_state->uapi);
/*
- * For now doing a selective fetch in the whole plane area,
- * optimizations will come in the future.
+ * If visibility or plane moved, mark the whole plane area as
+ * damaged as it needs to be complete redraw in the new and old
+ * position.
*/
- temp.y1 = new_plane_state->uapi.dst.y1;
- temp.y2 = new_plane_state->uapi.dst.y2;
- clip_area_update(&pipe_clip, &temp);
+ if (new_plane_state->uapi.visible != old_plane_state->uapi.visible ||
+ !drm_rect_equals(&new_plane_state->uapi.dst,
+ &old_plane_state->uapi.dst)) {
+ if (old_plane_state->uapi.visible) {
+ damaged_area.y1 = old_plane_state->uapi.dst.y1;
+ damaged_area.y2 = old_plane_state->uapi.dst.y2;
+ clip_area_update(&pipe_clip, &damaged_area);
+ }
+
+ if (new_plane_state->uapi.visible) {
+ damaged_area.y1 = new_plane_state->uapi.dst.y1;
+ damaged_area.y2 = new_plane_state->uapi.dst.y2;
+ clip_area_update(&pipe_clip, &damaged_area);
+ }
+ continue;
+ } else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha ||
+ (!num_clips &&
+ new_plane_state->uapi.fb != old_plane_state->uapi.fb)) {
+ /*
+ * If the plane don't have damaged areas but the
+ * framebuffer changed or alpha changed, mark the whole
+ * plane area as damaged.
+ */
+ damaged_area.y1 = new_plane_state->uapi.dst.y1;
+ damaged_area.y2 = new_plane_state->uapi.dst.y2;
+ clip_area_update(&pipe_clip, &damaged_area);
+ continue;
+ }
+
+ drm_rect_fp_to_int(&src, &new_plane_state->uapi.src);
+ damaged_clips = drm_plane_get_damage_clips(&new_plane_state->uapi);
+
+ for (j = 0; j < num_clips; j++) {
+ struct drm_rect clip;
+
+ clip.x1 = damaged_clips[j].x1;
+ clip.y1 = damaged_clips[j].y1;
+ clip.x2 = damaged_clips[j].x2;
+ clip.y2 = damaged_clips[j].y2;
+ if (drm_rect_intersect(&clip, &src))
+ clip_area_update(&damaged_area, &clip);
+ }
+
+ if (damaged_area.y1 == -1)
+ continue;
+
+ damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1;
+ damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1;
+ clip_area_update(&pipe_clip, &damaged_area);
+ }
+
+ if (full_update)
+ goto skip_sel_fetch_set_loop;
+
+ /* It must be aligned to 4 lines */
+ pipe_clip.y1 -= pipe_clip.y1 % 4;
+ if (pipe_clip.y2 % 4)
+ pipe_clip.y2 = ((pipe_clip.y2 / 4) + 1) * 4;
+
+ /*
+ * Now that we have the pipe damaged area check if it intersect with
+ * every plane, if it does set the plane selective fetch area.
+ */
+ for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
+ new_plane_state, i) {
+ struct drm_rect *sel_fetch_area, inter;
+
+ if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc ||
+ !new_plane_state->uapi.visible)
+ continue;
+
+ inter = pipe_clip;
+ if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst))
+ continue;
+
+ sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
+ sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1;
+ sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1;
}
+skip_sel_fetch_set_loop:
psr2_man_trk_ctl_calc(crtc_state, &pipe_clip, full_update);
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index 019a2d6d807a..993543334a1e 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -49,6 +49,8 @@
#include "intel_psr.h"
#include "intel_dsi.h"
#include "intel_sprite.h"
+#include "i9xx_plane.h"
+#include "intel_vrr.h"
int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
int usecs)
@@ -61,13 +63,15 @@ int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
1000 * adjusted_mode->crtc_htotal);
}
-/* FIXME: We should instead only take spinlocks once for the entire update
- * instead of once per mmio. */
-#if IS_ENABLED(CONFIG_PROVE_LOCKING)
-#define VBLANK_EVASION_TIME_US 250
-#else
-#define VBLANK_EVASION_TIME_US 100
-#endif
+static int intel_mode_vblank_start(const struct drm_display_mode *mode)
+{
+ int vblank_start = mode->crtc_vblank_start;
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ vblank_start = DIV_ROUND_UP(vblank_start, 2);
+
+ return vblank_start;
+}
/**
* intel_pipe_update_start() - start update of a set of display registers
@@ -97,9 +101,10 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
if (new_crtc_state->uapi.async_flip)
return;
- vblank_start = adjusted_mode->crtc_vblank_start;
- if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
- vblank_start = DIV_ROUND_UP(vblank_start, 2);
+ if (new_crtc_state->vrr.enable)
+ vblank_start = intel_vrr_vmax_vblank_start(new_crtc_state);
+ else
+ vblank_start = intel_mode_vblank_start(adjusted_mode);
/* FIXME needs to be calibrated sensibly */
min = vblank_start - intel_usecs_to_scanlines(adjusted_mode,
@@ -187,6 +192,36 @@ irq_disable:
local_irq_disable();
}
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_VBLANK_EVADE)
+static void dbg_vblank_evade(struct intel_crtc *crtc, ktime_t end)
+{
+ u64 delta = ktime_to_ns(ktime_sub(end, crtc->debug.start_vbl_time));
+ unsigned int h;
+
+ h = ilog2(delta >> 9);
+ if (h >= ARRAY_SIZE(crtc->debug.vbl.times))
+ h = ARRAY_SIZE(crtc->debug.vbl.times) - 1;
+ crtc->debug.vbl.times[h]++;
+
+ crtc->debug.vbl.sum += delta;
+ if (!crtc->debug.vbl.min || delta < crtc->debug.vbl.min)
+ crtc->debug.vbl.min = delta;
+ if (delta > crtc->debug.vbl.max)
+ crtc->debug.vbl.max = delta;
+
+ if (delta > 1000 * VBLANK_EVASION_TIME_US) {
+ drm_dbg_kms(crtc->base.dev,
+ "Atomic update on pipe (%c) took %lld us, max time under evasion is %u us\n",
+ pipe_name(crtc->pipe),
+ div_u64(delta, 1000),
+ VBLANK_EVASION_TIME_US);
+ crtc->debug.vbl.over++;
+ }
+}
+#else
+static void dbg_vblank_evade(struct intel_crtc *crtc, ktime_t end) {}
+#endif
+
/**
* intel_pipe_update_end() - end update of a set of display registers
* @new_crtc_state: the new crtc state
@@ -235,6 +270,9 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
local_irq_enable();
+ /* Send VRR Push to terminate Vblank */
+ intel_vrr_send_push(new_crtc_state);
+
if (intel_vgpu_active(dev_priv))
return;
@@ -249,15 +287,8 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
crtc->debug.min_vbl, crtc->debug.max_vbl,
crtc->debug.scanline_start, scanline_end);
}
-#ifdef CONFIG_DRM_I915_DEBUG_VBLANK_EVADE
- else if (ktime_us_delta(end_vbl_time, crtc->debug.start_vbl_time) >
- VBLANK_EVASION_TIME_US)
- drm_warn(&dev_priv->drm,
- "Atomic update on pipe (%c) took %lld us, max time under evasion is %u us\n",
- pipe_name(pipe),
- ktime_us_delta(end_vbl_time, crtc->debug.start_vbl_time),
- VBLANK_EVASION_TIME_US);
-#endif
+
+ dbg_vblank_evade(crtc, end_vbl_time);
}
int intel_plane_check_stride(const struct intel_plane_state *plane_state)
@@ -618,13 +649,19 @@ skl_program_scaler(struct intel_plane *plane,
/* Preoffset values for YUV to RGB Conversion */
#define PREOFF_YUV_TO_RGB_HI 0x1800
-#define PREOFF_YUV_TO_RGB_ME 0x1F00
+#define PREOFF_YUV_TO_RGB_ME 0x0000
#define PREOFF_YUV_TO_RGB_LO 0x1800
#define ROFF(x) (((x) & 0xffff) << 16)
#define GOFF(x) (((x) & 0xffff) << 0)
#define BOFF(x) (((x) & 0xffff) << 16)
+/*
+ * Programs the input color space conversion stage for ICL HDR planes.
+ * Note that it is assumed that this stage always happens after YUV
+ * range correction. Thus, the input to this stage is assumed to be
+ * in full-range YCbCr.
+ */
static void
icl_program_input_csc(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
@@ -672,52 +709,7 @@ icl_program_input_csc(struct intel_plane *plane,
0x0, 0x7800, 0x7F10,
},
};
-
- /* Matrix for Limited Range to Full Range Conversion */
- static const u16 input_csc_matrix_lr[][9] = {
- /*
- * BT.601 Limted range YCbCr -> full range RGB
- * The matrix required is :
- * [1.164384, 0.000, 1.596027,
- * 1.164384, -0.39175, -0.812813,
- * 1.164384, 2.017232, 0.0000]
- */
- [DRM_COLOR_YCBCR_BT601] = {
- 0x7CC8, 0x7950, 0x0,
- 0x8D00, 0x7950, 0x9C88,
- 0x0, 0x7950, 0x6810,
- },
- /*
- * BT.709 Limited range YCbCr -> full range RGB
- * The matrix required is :
- * [1.164384, 0.000, 1.792741,
- * 1.164384, -0.213249, -0.532909,
- * 1.164384, 2.112402, 0.0000]
- */
- [DRM_COLOR_YCBCR_BT709] = {
- 0x7E58, 0x7950, 0x0,
- 0x8888, 0x7950, 0xADA8,
- 0x0, 0x7950, 0x6870,
- },
- /*
- * BT.2020 Limited range YCbCr -> full range RGB
- * The matrix required is :
- * [1.164, 0.000, 1.678,
- * 1.164, -0.1873, -0.6504,
- * 1.164, 2.1417, 0.0000]
- */
- [DRM_COLOR_YCBCR_BT2020] = {
- 0x7D70, 0x7950, 0x0,
- 0x8A68, 0x7950, 0xAC00,
- 0x0, 0x7950, 0x6890,
- },
- };
- const u16 *csc;
-
- if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
- csc = input_csc_matrix[plane_state->hw.color_encoding];
- else
- csc = input_csc_matrix_lr[plane_state->hw.color_encoding];
+ const u16 *csc = input_csc_matrix[plane_state->hw.color_encoding];
intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 0),
ROFF(csc[0]) | GOFF(csc[1]));
@@ -734,14 +726,8 @@ icl_program_input_csc(struct intel_plane *plane,
intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 0),
PREOFF_YUV_TO_RGB_HI);
- if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
- intel_de_write_fw(dev_priv,
- PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
- 0);
- else
- intel_de_write_fw(dev_priv,
- PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
- PREOFF_YUV_TO_RGB_ME);
+ intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
+ PREOFF_YUV_TO_RGB_ME);
intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 2),
PREOFF_YUV_TO_RGB_LO);
intel_de_write_fw(dev_priv,
@@ -755,7 +741,8 @@ icl_program_input_csc(struct intel_plane *plane,
static void
skl_plane_async_flip(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+ const struct intel_plane_state *plane_state,
+ bool async_flip)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
unsigned long irqflags;
@@ -766,6 +753,9 @@ skl_plane_async_flip(struct intel_plane *plane,
plane_ctl |= skl_plane_ctl_crtc(crtc_state);
+ if (async_flip)
+ plane_ctl |= PLANE_CTL_ASYNC_FLIP;
+
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl);
@@ -851,6 +841,10 @@ skl_program_plane(struct intel_plane *plane,
if (fb->format->is_yuv && icl_is_hdr_plane(dev_priv, plane_id))
icl_program_input_csc(plane, crtc_state, plane_state);
+ if (fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC)
+ intel_uncore_write64_fw(&dev_priv->uncore,
+ PLANE_CC_VAL(pipe, plane_id), plane_state->ccval);
+
skl_write_plane_wm(plane, crtc_state);
intel_de_write_fw(dev_priv, PLANE_KEYVAL(pipe, plane_id),
@@ -942,6 +936,28 @@ skl_plane_get_hw_state(struct intel_plane *plane,
return ret;
}
+static void
+skl_plane_enable_flip_done(struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+
+ spin_lock_irq(&i915->irq_lock);
+ bdw_enable_pipe_irq(i915, pipe, GEN9_PIPE_PLANE_FLIP_DONE(plane->id));
+ spin_unlock_irq(&i915->irq_lock);
+}
+
+static void
+skl_plane_disable_flip_done(struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+
+ spin_lock_irq(&i915->irq_lock);
+ bdw_disable_pipe_irq(i915, pipe, GEN9_PIPE_PLANE_FLIP_DONE(plane->id));
+ spin_unlock_irq(&i915->irq_lock);
+}
+
static void i9xx_plane_linear_gamma(u16 gamma[8])
{
/* The points are not evenly spaced. */
@@ -1835,7 +1851,26 @@ g4x_sprite_max_stride(struct intel_plane *plane,
u32 pixel_format, u64 modifier,
unsigned int rotation)
{
- return 16384;
+ const struct drm_format_info *info = drm_format_info(pixel_format);
+ int cpp = info->cpp[0];
+
+ /* Limit to 4k pixels to guarantee TILEOFF.x doesn't get too big. */
+ if (modifier == I915_FORMAT_MOD_X_TILED)
+ return min(4096 * cpp, 16 * 1024);
+ else
+ return 16 * 1024;
+}
+
+static unsigned int
+hsw_sprite_max_stride(struct intel_plane *plane,
+ u32 pixel_format, u64 modifier,
+ unsigned int rotation)
+{
+ const struct drm_format_info *info = drm_format_info(pixel_format);
+ int cpp = info->cpp[0];
+
+ /* Limit to 8k pixels to guarantee OFFSET.x doesn't get too big. */
+ return min(8192 * cpp, 16 * 1024);
}
static u32 g4x_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state)
@@ -2350,7 +2385,8 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS ||
fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
- fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS)) {
+ fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS ||
+ fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC)) {
drm_dbg_kms(&dev_priv->drm,
"Y/Yf tiling not supported in IF-ID mode\n");
return -EINVAL;
@@ -2840,6 +2876,7 @@ static const u64 skl_plane_format_modifiers_ccs[] = {
static const u64 gen12_plane_format_modifiers_mc_ccs[] = {
I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS,
I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS,
+ I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC,
I915_FORMAT_MOD_Y_TILED,
I915_FORMAT_MOD_X_TILED,
DRM_FORMAT_MOD_LINEAR,
@@ -2848,6 +2885,7 @@ static const u64 gen12_plane_format_modifiers_mc_ccs[] = {
static const u64 gen12_plane_format_modifiers_rc_ccs[] = {
I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS,
+ I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC,
I915_FORMAT_MOD_Y_TILED,
I915_FORMAT_MOD_X_TILED,
DRM_FORMAT_MOD_LINEAR,
@@ -3038,6 +3076,7 @@ static bool gen12_plane_format_mod_supported(struct drm_plane *_plane,
case I915_FORMAT_MOD_X_TILED:
case I915_FORMAT_MOD_Y_TILED:
case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
break;
default:
return false;
@@ -3274,7 +3313,13 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
plane->get_hw_state = skl_plane_get_hw_state;
plane->check_plane = skl_plane_check;
plane->min_cdclk = skl_plane_min_cdclk;
- plane->async_flip = skl_plane_async_flip;
+
+ if (plane_id == PLANE_PRIMARY) {
+ plane->need_async_flip_disable_wa = IS_GEN_RANGE(dev_priv, 9, 10);
+ plane->async_flip = skl_plane_async_flip;
+ plane->enable_flip_done = skl_plane_enable_flip_done;
+ plane->disable_flip_done = skl_plane_disable_flip_done;
+ }
if (INTEL_GEN(dev_priv) >= 11)
formats = icl_get_plane_formats(dev_priv, pipe,
@@ -3382,11 +3427,11 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
return plane;
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- plane->max_stride = i9xx_plane_max_stride;
plane->update_plane = vlv_update_plane;
plane->disable_plane = vlv_disable_plane;
plane->get_hw_state = vlv_plane_get_hw_state;
plane->check_plane = vlv_sprite_check;
+ plane->max_stride = i965_plane_max_stride;
plane->min_cdclk = vlv_plane_min_cdclk;
if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
@@ -3400,16 +3445,18 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
plane_funcs = &vlv_sprite_funcs;
} else if (INTEL_GEN(dev_priv) >= 7) {
- plane->max_stride = g4x_sprite_max_stride;
plane->update_plane = ivb_update_plane;
plane->disable_plane = ivb_disable_plane;
plane->get_hw_state = ivb_plane_get_hw_state;
plane->check_plane = g4x_sprite_check;
- if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
+ plane->max_stride = hsw_sprite_max_stride;
plane->min_cdclk = hsw_plane_min_cdclk;
- else
+ } else {
+ plane->max_stride = g4x_sprite_max_stride;
plane->min_cdclk = ivb_sprite_min_cdclk;
+ }
formats = snb_plane_formats;
num_formats = ARRAY_SIZE(snb_plane_formats);
@@ -3417,11 +3464,11 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
plane_funcs = &snb_sprite_funcs;
} else {
- plane->max_stride = g4x_sprite_max_stride;
plane->update_plane = g4x_update_plane;
plane->disable_plane = g4x_disable_plane;
plane->get_hw_state = g4x_plane_get_hw_state;
plane->check_plane = g4x_sprite_check;
+ plane->max_stride = g4x_sprite_max_stride;
plane->min_cdclk = g4x_sprite_min_cdclk;
modifiers = i9xx_plane_format_modifiers;
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.h b/drivers/gpu/drm/i915/display/intel_sprite.h
index cd2104ba1ca1..76126dd8d584 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.h
+++ b/drivers/gpu/drm/i915/display/intel_sprite.h
@@ -17,6 +17,16 @@ struct drm_i915_private;
struct intel_crtc_state;
struct intel_plane_state;
+/*
+ * FIXME: We should instead only take spinlocks once for the entire update
+ * instead of once per mmio.
+ */
+#if IS_ENABLED(CONFIG_PROVE_LOCKING)
+#define VBLANK_EVASION_TIME_US 250
+#else
+#define VBLANK_EVASION_TIME_US 100
+#endif
+
int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
int usecs);
struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index 4346bc1a747a..2cefc13535a0 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -23,36 +23,6 @@ static const char *tc_port_mode_name(enum tc_port_mode mode)
return names[mode];
}
-static void
-tc_port_load_fia_params(struct drm_i915_private *i915,
- struct intel_digital_port *dig_port)
-{
- enum port port = dig_port->base.port;
- enum tc_port tc_port = intel_port_to_tc(i915, port);
- u32 modular_fia;
-
- if (INTEL_INFO(i915)->display.has_modular_fia) {
- modular_fia = intel_uncore_read(&i915->uncore,
- PORT_TX_DFLEXDPSP(FIA1));
- drm_WARN_ON(&i915->drm, modular_fia == 0xffffffff);
- modular_fia &= MODULAR_FIA_MASK;
- } else {
- modular_fia = 0;
- }
-
- /*
- * Each Modular FIA instance houses 2 TC ports. In SOC that has more
- * than two TC ports, there are multiple instances of Modular FIA.
- */
- if (modular_fia) {
- dig_port->tc_phy_fia = tc_port / 2;
- dig_port->tc_phy_fia_idx = tc_port % 2;
- } else {
- dig_port->tc_phy_fia = FIA1;
- dig_port->tc_phy_fia_idx = tc_port;
- }
-}
-
static enum intel_display_power_domain
tc_cold_get_power_domain(struct intel_digital_port *dig_port)
{
@@ -262,7 +232,7 @@ static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port)
mask |= BIT(TC_PORT_LEGACY);
/* The sink can be connected only in a single mode. */
- if (!drm_WARN_ON(&i915->drm, hweight32(mask) > 1))
+ if (!drm_WARN_ON_ONCE(&i915->drm, hweight32(mask) > 1))
tc_port_fixup_legacy_flag(dig_port, mask);
return mask;
@@ -646,6 +616,43 @@ void intel_tc_port_put_link(struct intel_digital_port *dig_port)
mutex_unlock(&dig_port->tc_lock);
}
+static bool
+tc_has_modular_fia(struct drm_i915_private *i915, struct intel_digital_port *dig_port)
+{
+ intel_wakeref_t wakeref;
+ u32 val;
+
+ if (!INTEL_INFO(i915)->display.has_modular_fia)
+ return false;
+
+ wakeref = tc_cold_block(dig_port);
+ val = intel_uncore_read(&i915->uncore, PORT_TX_DFLEXDPSP(FIA1));
+ tc_cold_unblock(dig_port, wakeref);
+
+ drm_WARN_ON(&i915->drm, val == 0xffffffff);
+
+ return val & MODULAR_FIA_MASK;
+}
+
+static void
+tc_port_load_fia_params(struct drm_i915_private *i915, struct intel_digital_port *dig_port)
+{
+ enum port port = dig_port->base.port;
+ enum tc_port tc_port = intel_port_to_tc(i915, port);
+
+ /*
+ * Each Modular FIA instance houses 2 TC ports. In SOC that has more
+ * than two TC ports, there are multiple instances of Modular FIA.
+ */
+ if (tc_has_modular_fia(i915, dig_port)) {
+ dig_port->tc_phy_fia = tc_port / 2;
+ dig_port->tc_phy_fia_idx = tc_port % 2;
+ } else {
+ dig_port->tc_phy_fia = FIA1;
+ dig_port->tc_phy_fia_idx = tc_port;
+ }
+}
+
void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
index 49b4b5fca941..187ec573de59 100644
--- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
@@ -319,6 +319,8 @@ enum vbt_gmbus_ddi {
ICL_DDC_BUS_DDI_A = 0x1,
ICL_DDC_BUS_DDI_B,
TGL_DDC_BUS_DDI_C,
+ RKL_DDC_BUS_DDI_D = 0x3,
+ RKL_DDC_BUS_DDI_E,
ICL_DDC_BUS_PORT_1 = 0x4,
ICL_DDC_BUS_PORT_2,
ICL_DDC_BUS_PORT_3,
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index e2716a67b281..f58cc5700784 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -454,8 +454,6 @@ int intel_dsc_compute_params(struct intel_encoder *encoder,
else if (vdsc_cfg->bits_per_component == 12)
vdsc_cfg->mux_word_size = DSC_MUX_WORD_SIZE_12_BPC;
- /* RC_MODEL_SIZE is a constant across all configurations */
- vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST;
/* InitialScaleValue is a 6 bit value with 3 fractional bits (U3.3) */
vdsc_cfg->initial_scale_value = (vdsc_cfg->rc_model_size << 3) /
(vdsc_cfg->rc_model_size - vdsc_cfg->initial_offset);
@@ -741,7 +739,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
/* Populate PICTURE_PARAMETER_SET_9 registers */
pps_val = 0;
- pps_val |= DSC_RC_MODEL_SIZE(DSC_RC_MODEL_SIZE_CONST) |
+ pps_val |= DSC_RC_MODEL_SIZE(vdsc_cfg->rc_model_size) |
DSC_RC_EDGE_FACTOR(DSC_RC_EDGE_FACTOR_CONST);
drm_info(&dev_priv->drm, "PPS9 = 0x%08x\n", pps_val);
if (!is_pipe_dsc(crtc_state)) {
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c
new file mode 100644
index 000000000000..a9c2b2fd9252
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_vrr.c
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ *
+ */
+
+#include "i915_drv.h"
+#include "intel_display_types.h"
+#include "intel_vrr.h"
+
+bool intel_vrr_is_capable(struct drm_connector *connector)
+{
+ struct intel_dp *intel_dp;
+ const struct drm_display_info *info = &connector->display_info;
+ struct drm_i915_private *i915 = to_i915(connector->dev);
+
+ if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
+ connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
+ return false;
+
+ intel_dp = intel_attached_dp(to_intel_connector(connector));
+ /*
+ * DP Sink is capable of VRR video timings if
+ * Ignore MSA bit is set in DPCD.
+ * EDID monitor range also should be atleast 10 for reasonable
+ * Adaptive Sync or Variable Refresh Rate end user experience.
+ */
+ return HAS_VRR(i915) &&
+ drm_dp_sink_can_do_video_without_timing_msa(intel_dp->dpcd) &&
+ info->monitor_range.max_vfreq - info->monitor_range.min_vfreq > 10;
+}
+
+void
+intel_vrr_check_modeset(struct intel_atomic_state *state)
+{
+ int i;
+ struct intel_crtc_state *old_crtc_state, *new_crtc_state;
+ struct intel_crtc *crtc;
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ if (new_crtc_state->uapi.vrr_enabled !=
+ old_crtc_state->uapi.vrr_enabled)
+ new_crtc_state->uapi.mode_changed = true;
+ }
+}
+
+/*
+ * Without VRR registers get latched at:
+ * vblank_start
+ *
+ * With VRR the earliest registers can get latched is:
+ * intel_vrr_vmin_vblank_start(), which if we want to maintain
+ * the correct min vtotal is >=vblank_start+1
+ *
+ * The latest point registers can get latched is the vmax decision boundary:
+ * intel_vrr_vmax_vblank_start()
+ *
+ * Between those two points the vblank exit starts (and hence registers get
+ * latched) ASAP after a push is sent.
+ *
+ * framestart_delay is programmable 0-3.
+ */
+static int intel_vrr_vblank_exit_length(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+
+ /* The hw imposes the extra scanline before frame start */
+ return crtc_state->vrr.pipeline_full + i915->framestart_delay + 1;
+}
+
+int intel_vrr_vmin_vblank_start(const struct intel_crtc_state *crtc_state)
+{
+ /* Min vblank actually determined by flipline that is always >=vmin+1 */
+ return crtc_state->vrr.vmin + 1 - intel_vrr_vblank_exit_length(crtc_state);
+}
+
+int intel_vrr_vmax_vblank_start(const struct intel_crtc_state *crtc_state)
+{
+ return crtc_state->vrr.vmax - intel_vrr_vblank_exit_length(crtc_state);
+}
+
+void
+intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct intel_connector *connector =
+ to_intel_connector(conn_state->connector);
+ struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
+ const struct drm_display_info *info = &connector->base.display_info;
+ int vmin, vmax;
+
+ if (!intel_vrr_is_capable(&connector->base))
+ return;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+ return;
+
+ if (!crtc_state->uapi.vrr_enabled)
+ return;
+
+ vmin = DIV_ROUND_UP(adjusted_mode->crtc_clock * 1000,
+ adjusted_mode->crtc_htotal * info->monitor_range.max_vfreq);
+ vmax = adjusted_mode->crtc_clock * 1000 /
+ (adjusted_mode->crtc_htotal * info->monitor_range.min_vfreq);
+
+ vmin = max_t(int, vmin, adjusted_mode->crtc_vtotal);
+ vmax = max_t(int, vmax, adjusted_mode->crtc_vtotal);
+
+ if (vmin >= vmax)
+ return;
+
+ /*
+ * flipline determines the min vblank length the hardware will
+ * generate, and flipline>=vmin+1, hence we reduce vmin by one
+ * to make sure we can get the actual min vblank length.
+ */
+ crtc_state->vrr.vmin = vmin - 1;
+ crtc_state->vrr.vmax = vmax;
+ crtc_state->vrr.enable = true;
+
+ crtc_state->vrr.flipline = crtc_state->vrr.vmin + 1;
+
+ /*
+ * FIXME: s/4/framestart_delay+1/ to get consistent
+ * earliest/latest points for register latching regardless
+ * of the framestart_delay used?
+ *
+ * FIXME: this really needs the extra scanline to provide consistent
+ * behaviour for all framestart_delay values. Otherwise with
+ * framestart_delay==3 we will end up extending the min vblank by
+ * one extra line.
+ */
+ crtc_state->vrr.pipeline_full =
+ min(255, crtc_state->vrr.vmin - adjusted_mode->crtc_vdisplay - 4 - 1);
+
+ crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
+}
+
+void intel_vrr_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ u32 trans_vrr_ctl;
+
+ if (!crtc_state->vrr.enable)
+ return;
+
+ trans_vrr_ctl = VRR_CTL_VRR_ENABLE |
+ VRR_CTL_IGN_MAX_SHIFT | VRR_CTL_FLIP_LINE_EN |
+ VRR_CTL_PIPELINE_FULL(crtc_state->vrr.pipeline_full) |
+ VRR_CTL_PIPELINE_FULL_OVERRIDE;
+
+ intel_de_write(dev_priv, TRANS_VRR_VMIN(cpu_transcoder), crtc_state->vrr.vmin - 1);
+ intel_de_write(dev_priv, TRANS_VRR_VMAX(cpu_transcoder), crtc_state->vrr.vmax - 1);
+ intel_de_write(dev_priv, TRANS_VRR_CTL(cpu_transcoder), trans_vrr_ctl);
+ intel_de_write(dev_priv, TRANS_VRR_FLIPLINE(cpu_transcoder), crtc_state->vrr.flipline - 1);
+ intel_de_write(dev_priv, TRANS_PUSH(cpu_transcoder), TRANS_PUSH_EN);
+}
+
+void intel_vrr_send_push(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+ if (!crtc_state->vrr.enable)
+ return;
+
+ intel_de_write(dev_priv, TRANS_PUSH(cpu_transcoder),
+ TRANS_PUSH_EN | TRANS_PUSH_SEND);
+}
+
+void intel_vrr_disable(const struct intel_crtc_state *old_crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
+
+ if (!old_crtc_state->vrr.enable)
+ return;
+
+ intel_de_write(dev_priv, TRANS_VRR_CTL(cpu_transcoder), 0);
+ intel_de_write(dev_priv, TRANS_PUSH(cpu_transcoder), 0);
+}
+
+void intel_vrr_get_config(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ u32 trans_vrr_ctl;
+
+ trans_vrr_ctl = intel_de_read(dev_priv, TRANS_VRR_CTL(cpu_transcoder));
+ crtc_state->vrr.enable = trans_vrr_ctl & VRR_CTL_VRR_ENABLE;
+ if (!crtc_state->vrr.enable)
+ return;
+
+ if (trans_vrr_ctl & VRR_CTL_PIPELINE_FULL_OVERRIDE)
+ crtc_state->vrr.pipeline_full = REG_FIELD_GET(VRR_CTL_PIPELINE_FULL_MASK, trans_vrr_ctl);
+ if (trans_vrr_ctl & VRR_CTL_FLIP_LINE_EN)
+ crtc_state->vrr.flipline = intel_de_read(dev_priv, TRANS_VRR_FLIPLINE(cpu_transcoder)) + 1;
+ crtc_state->vrr.vmax = intel_de_read(dev_priv, TRANS_VRR_VMAX(cpu_transcoder)) + 1;
+ crtc_state->vrr.vmin = intel_de_read(dev_priv, TRANS_VRR_VMIN(cpu_transcoder)) + 1;
+
+ crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.h b/drivers/gpu/drm/i915/display/intel_vrr.h
new file mode 100644
index 000000000000..fac01bf4ab50
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_vrr.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_VRR_H__
+#define __INTEL_VRR_H__
+
+#include <linux/types.h>
+
+struct drm_connector;
+struct drm_connector_state;
+struct intel_atomic_state;
+struct intel_crtc;
+struct intel_crtc_state;
+struct intel_dp;
+struct intel_encoder;
+struct intel_crtc;
+
+bool intel_vrr_is_capable(struct drm_connector *connector);
+void intel_vrr_check_modeset(struct intel_atomic_state *state);
+void intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state);
+void intel_vrr_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void intel_vrr_send_push(const struct intel_crtc_state *crtc_state);
+void intel_vrr_disable(const struct intel_crtc_state *old_crtc_state);
+void intel_vrr_get_config(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state);
+int intel_vrr_vmax_vblank_start(const struct intel_crtc_state *crtc_state);
+int intel_vrr_vmin_vblank_start(const struct intel_crtc_state *crtc_state);
+
+#endif /* __INTEL_VRR_H__ */
diff --git a/drivers/gpu/drm/i915/dma_resv_utils.c b/drivers/gpu/drm/i915/dma_resv_utils.c
new file mode 100644
index 000000000000..9e508e7d4629
--- /dev/null
+++ b/drivers/gpu/drm/i915/dma_resv_utils.c
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/dma-resv.h>
+
+#include "dma_resv_utils.h"
+
+void dma_resv_prune(struct dma_resv *resv)
+{
+ if (dma_resv_trylock(resv)) {
+ if (dma_resv_test_signaled_rcu(resv, true))
+ dma_resv_add_excl_fence(resv, NULL);
+ dma_resv_unlock(resv);
+ }
+}
diff --git a/drivers/gpu/drm/i915/dma_resv_utils.h b/drivers/gpu/drm/i915/dma_resv_utils.h
new file mode 100644
index 000000000000..b9d8fb5f8367
--- /dev/null
+++ b/drivers/gpu/drm/i915/dma_resv_utils.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef DMA_RESV_UTILS_H
+#define DMA_RESV_UTILS_H
+
+struct dma_resv;
+
+void dma_resv_prune(struct dma_resv *resv);
+
+#endif /* DMA_RESV_UTILS_H */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 4fd38101bb56..4d2f40cf237b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -72,6 +72,8 @@
#include "gt/intel_context_param.h"
#include "gt/intel_engine_heartbeat.h"
#include "gt/intel_engine_user.h"
+#include "gt/intel_execlists_submission.h" /* virtual_engine */
+#include "gt/intel_gpu_commands.h"
#include "gt/intel_ring.h"
#include "i915_gem_context.h"
@@ -333,13 +335,12 @@ static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
return e;
}
-static void i915_gem_context_free(struct i915_gem_context *ctx)
+void i915_gem_context_release(struct kref *ref)
{
- GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
+ struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
- spin_lock(&ctx->i915->gem.contexts.lock);
- list_del(&ctx->link);
- spin_unlock(&ctx->i915->gem.contexts.lock);
+ trace_i915_context_free(ctx);
+ GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
mutex_destroy(&ctx->engines_mutex);
mutex_destroy(&ctx->lut_mutex);
@@ -353,37 +354,6 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
kfree_rcu(ctx, rcu);
}
-static void contexts_free_all(struct llist_node *list)
-{
- struct i915_gem_context *ctx, *cn;
-
- llist_for_each_entry_safe(ctx, cn, list, free_link)
- i915_gem_context_free(ctx);
-}
-
-static void contexts_flush_free(struct i915_gem_contexts *gc)
-{
- contexts_free_all(llist_del_all(&gc->free_list));
-}
-
-static void contexts_free_worker(struct work_struct *work)
-{
- struct i915_gem_contexts *gc =
- container_of(work, typeof(*gc), free_work);
-
- contexts_flush_free(gc);
-}
-
-void i915_gem_context_release(struct kref *ref)
-{
- struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
- struct i915_gem_contexts *gc = &ctx->i915->gem.contexts;
-
- trace_i915_context_free(ctx);
- if (llist_add(&ctx->free_link, &gc->free_list))
- schedule_work(&gc->free_work);
-}
-
static inline struct i915_gem_engines *
__context_engines_static(const struct i915_gem_context *ctx)
{
@@ -438,7 +408,7 @@ __active_engine(struct i915_request *rq, struct intel_engine_cs **active)
}
if (i915_request_is_active(rq)) {
- if (!i915_request_completed(rq))
+ if (!__i915_request_is_complete(rq))
*active = locked;
ret = true;
}
@@ -453,6 +423,9 @@ static struct intel_engine_cs *active_engine(struct intel_context *ce)
struct intel_engine_cs *engine = NULL;
struct i915_request *rq;
+ if (intel_context_has_inflight(ce))
+ return intel_context_inflight(ce);
+
if (!ce->timeline)
return NULL;
@@ -632,6 +605,10 @@ static void context_close(struct i915_gem_context *ctx)
*/
lut_close(ctx);
+ spin_lock(&ctx->i915->gem.contexts.lock);
+ list_del(&ctx->link);
+ spin_unlock(&ctx->i915->gem.contexts.lock);
+
mutex_unlock(&ctx->mutex);
/*
@@ -740,7 +717,8 @@ err_free:
}
static inline struct i915_gem_engines *
-__context_engines_await(const struct i915_gem_context *ctx)
+__context_engines_await(const struct i915_gem_context *ctx,
+ bool *user_engines)
{
struct i915_gem_engines *engines;
@@ -749,6 +727,10 @@ __context_engines_await(const struct i915_gem_context *ctx)
engines = rcu_dereference(ctx->engines);
GEM_BUG_ON(!engines);
+ if (user_engines)
+ *user_engines = i915_gem_context_user_engines(ctx);
+
+ /* successful await => strong mb */
if (unlikely(!i915_sw_fence_await(&engines->fence)))
continue;
@@ -772,7 +754,7 @@ context_apply_all(struct i915_gem_context *ctx,
struct intel_context *ce;
int err = 0;
- e = __context_engines_await(ctx);
+ e = __context_engines_await(ctx, NULL);
for_each_gem_engine(ce, e, it) {
err = fn(ce, data);
if (err)
@@ -849,9 +831,6 @@ i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags)
!HAS_EXECLISTS(i915))
return ERR_PTR(-EINVAL);
- /* Reap the stale contexts */
- contexts_flush_free(&i915->gem.contexts);
-
ctx = __create_context(i915);
if (IS_ERR(ctx))
return ctx;
@@ -896,23 +875,11 @@ static void init_contexts(struct i915_gem_contexts *gc)
{
spin_lock_init(&gc->lock);
INIT_LIST_HEAD(&gc->list);
-
- INIT_WORK(&gc->free_work, contexts_free_worker);
- init_llist_head(&gc->free_list);
}
void i915_gem_init__contexts(struct drm_i915_private *i915)
{
init_contexts(&i915->gem.contexts);
- drm_dbg(&i915->drm, "%s context support initialized\n",
- DRIVER_CAPS(i915)->has_logical_contexts ?
- "logical" : "fake");
-}
-
-void i915_gem_driver_release__contexts(struct drm_i915_private *i915)
-{
- flush_work(&i915->gem.contexts.free_work);
- rcu_barrier(); /* and flush the left over RCU frees */
}
static int gem_context_register(struct i915_gem_context *ctx,
@@ -988,7 +955,6 @@ err:
void i915_gem_context_close(struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
- struct drm_i915_private *i915 = file_priv->dev_priv;
struct i915_address_space *vm;
struct i915_gem_context *ctx;
unsigned long idx;
@@ -1000,8 +966,6 @@ void i915_gem_context_close(struct drm_file *file)
xa_for_each(&file_priv->vm_xa, idx, vm)
i915_vm_put(vm);
xa_destroy(&file_priv->vm_xa);
-
- contexts_flush_free(&i915->gem.contexts);
}
int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
@@ -1116,7 +1080,7 @@ static int context_barrier_task(struct i915_gem_context *ctx,
return err;
}
- e = __context_engines_await(ctx);
+ e = __context_engines_await(ctx, NULL);
if (!e) {
i915_active_release(&cb->base);
return -ENOENT;
@@ -1879,27 +1843,6 @@ replace:
return 0;
}
-static struct i915_gem_engines *
-__copy_engines(struct i915_gem_engines *e)
-{
- struct i915_gem_engines *copy;
- unsigned int n;
-
- copy = alloc_engines(e->num_engines);
- if (!copy)
- return ERR_PTR(-ENOMEM);
-
- for (n = 0; n < e->num_engines; n++) {
- if (e->engines[n])
- copy->engines[n] = intel_context_get(e->engines[n]);
- else
- copy->engines[n] = NULL;
- }
- copy->num_engines = n;
-
- return copy;
-}
-
static int
get_engines(struct i915_gem_context *ctx,
struct drm_i915_gem_context_param *args)
@@ -1907,19 +1850,17 @@ get_engines(struct i915_gem_context *ctx,
struct i915_context_param_engines __user *user;
struct i915_gem_engines *e;
size_t n, count, size;
+ bool user_engines;
int err = 0;
- err = mutex_lock_interruptible(&ctx->engines_mutex);
- if (err)
- return err;
+ e = __context_engines_await(ctx, &user_engines);
+ if (!e)
+ return -ENOENT;
- e = NULL;
- if (i915_gem_context_user_engines(ctx))
- e = __copy_engines(i915_gem_context_engines(ctx));
- mutex_unlock(&ctx->engines_mutex);
- if (IS_ERR_OR_NULL(e)) {
+ if (!user_engines) {
+ i915_sw_fence_complete(&e->fence);
args->size = 0;
- return PTR_ERR_OR_ZERO(e);
+ return 0;
}
count = e->num_engines;
@@ -1970,7 +1911,7 @@ get_engines(struct i915_gem_context *ctx,
args->size = size;
err_free:
- free_engines(e);
+ i915_sw_fence_complete(&e->fence);
return err;
}
@@ -2136,11 +2077,14 @@ static int copy_ring_size(struct intel_context *dst,
static int clone_engines(struct i915_gem_context *dst,
struct i915_gem_context *src)
{
- struct i915_gem_engines *e = i915_gem_context_lock_engines(src);
- struct i915_gem_engines *clone;
+ struct i915_gem_engines *clone, *e;
bool user_engines;
unsigned long n;
+ e = __context_engines_await(src, &user_engines);
+ if (!e)
+ return -ENOENT;
+
clone = alloc_engines(e->num_engines);
if (!clone)
goto err_unlock;
@@ -2182,9 +2126,7 @@ static int clone_engines(struct i915_gem_context *dst,
}
}
clone->num_engines = n;
-
- user_engines = i915_gem_context_user_engines(src);
- i915_gem_context_unlock_engines(src);
+ i915_sw_fence_complete(&e->fence);
/* Serialised by constructor */
engines_idle_release(dst, rcu_replace_pointer(dst->engines, clone, 1));
@@ -2195,7 +2137,7 @@ static int clone_engines(struct i915_gem_context *dst,
return 0;
err_unlock:
- i915_gem_context_unlock_engines(src);
+ i915_sw_fence_complete(&e->fence);
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h
index a133f92bbedb..b5c908f3f4f2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h
@@ -110,7 +110,6 @@ i915_gem_context_clear_user_engines(struct i915_gem_context *ctx)
/* i915_gem_context.c */
void i915_gem_init__contexts(struct drm_i915_private *i915);
-void i915_gem_driver_release__contexts(struct drm_i915_private *i915);
int i915_gem_context_open(struct drm_i915_private *i915,
struct drm_file *file);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
index ae14ca24a11f..1449f54924e0 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
@@ -108,7 +108,6 @@ struct i915_gem_context {
/** link: place with &drm_i915_private.context_list */
struct list_head link;
- struct llist_node free_link;
/**
* @ref: reference count
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_create.c b/drivers/gpu/drm/i915/gem/i915_gem_create.c
new file mode 100644
index 000000000000..45d60e3d98e3
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_create.c
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include "gem/i915_gem_ioctls.h"
+#include "gem/i915_gem_region.h"
+
+#include "i915_drv.h"
+
+static int
+i915_gem_create(struct drm_file *file,
+ struct intel_memory_region *mr,
+ u64 *size_p,
+ u32 *handle_p)
+{
+ struct drm_i915_gem_object *obj;
+ u32 handle;
+ u64 size;
+ int ret;
+
+ GEM_BUG_ON(!is_power_of_2(mr->min_page_size));
+ size = round_up(*size_p, mr->min_page_size);
+ if (size == 0)
+ return -EINVAL;
+
+ /* For most of the ABI (e.g. mmap) we think in system pages */
+ GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
+
+ /* Allocate the new object */
+ obj = i915_gem_object_create_region(mr, size, 0);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ GEM_BUG_ON(size != obj->base.size);
+
+ ret = drm_gem_handle_create(file, &obj->base, &handle);
+ /* drop reference from allocate - handle holds it now */
+ i915_gem_object_put(obj);
+ if (ret)
+ return ret;
+
+ *handle_p = handle;
+ *size_p = size;
+ return 0;
+}
+
+int
+i915_gem_dumb_create(struct drm_file *file,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ enum intel_memory_type mem_type;
+ int cpp = DIV_ROUND_UP(args->bpp, 8);
+ u32 format;
+
+ switch (cpp) {
+ case 1:
+ format = DRM_FORMAT_C8;
+ break;
+ case 2:
+ format = DRM_FORMAT_RGB565;
+ break;
+ case 4:
+ format = DRM_FORMAT_XRGB8888;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* have to work out size/pitch and return them */
+ args->pitch = ALIGN(args->width * cpp, 64);
+
+ /* align stride to page size so that we can remap */
+ if (args->pitch > intel_plane_fb_max_stride(to_i915(dev), format,
+ DRM_FORMAT_MOD_LINEAR))
+ args->pitch = ALIGN(args->pitch, 4096);
+
+ if (args->pitch < args->width)
+ return -EINVAL;
+
+ args->size = mul_u32_u32(args->pitch, args->height);
+
+ mem_type = INTEL_MEMORY_SYSTEM;
+ if (HAS_LMEM(to_i915(dev)))
+ mem_type = INTEL_MEMORY_LOCAL;
+
+ return i915_gem_create(file,
+ intel_memory_region_by_type(to_i915(dev),
+ mem_type),
+ &args->size, &args->handle);
+}
+
+/**
+ * Creates a new mm object and returns a handle to it.
+ * @dev: drm device pointer
+ * @data: ioctl data blob
+ * @file: drm file pointer
+ */
+int
+i915_gem_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_private *i915 = to_i915(dev);
+ struct drm_i915_gem_create *args = data;
+
+ i915_gem_flush_free_objects(i915);
+
+ return i915_gem_create(file,
+ intel_memory_region_by_type(i915,
+ INTEL_MEMORY_SYSTEM),
+ &args->size, &args->handle);
+}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
index fcce6909f201..36f54cedaaeb 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
@@ -5,6 +5,7 @@
*/
#include "display/intel_frontbuffer.h"
+#include "gt/intel_gt.h"
#include "i915_drv.h"
#include "i915_gem_clflush.h"
@@ -15,13 +16,58 @@
#include "i915_gem_lmem.h"
#include "i915_gem_mman.h"
+static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
+{
+ return !(obj->cache_level == I915_CACHE_NONE ||
+ obj->cache_level == I915_CACHE_WT);
+}
+
+static void
+flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
+{
+ struct i915_vma *vma;
+
+ assert_object_held(obj);
+
+ if (!(obj->write_domain & flush_domains))
+ return;
+
+ switch (obj->write_domain) {
+ case I915_GEM_DOMAIN_GTT:
+ spin_lock(&obj->vma.lock);
+ for_each_ggtt_vma(vma, obj) {
+ if (i915_vma_unset_ggtt_write(vma))
+ intel_gt_flush_ggtt_writes(vma->vm->gt);
+ }
+ spin_unlock(&obj->vma.lock);
+
+ i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
+ break;
+
+ case I915_GEM_DOMAIN_WC:
+ wmb();
+ break;
+
+ case I915_GEM_DOMAIN_CPU:
+ i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
+ break;
+
+ case I915_GEM_DOMAIN_RENDER:
+ if (gpu_write_needs_clflush(obj))
+ obj->cache_dirty = true;
+ break;
+ }
+
+ obj->write_domain = 0;
+}
+
static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
{
/*
* We manually flush the CPU domain so that we can override and
* force the flush for the display, and perform it asyncrhonously.
*/
- i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
+ flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
if (obj->cache_dirty)
i915_gem_clflush_object(obj, I915_CLFLUSH_FORCE);
obj->write_domain = 0;
@@ -80,7 +126,7 @@ i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write)
if (ret)
return ret;
- i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_WC);
+ flush_write_domain(obj, ~I915_GEM_DOMAIN_WC);
/* Serialise direct access to this object with the barriers for
* coherent writes from the GPU, by effectively invalidating the
@@ -141,7 +187,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
if (ret)
return ret;
- i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_GTT);
+ flush_write_domain(obj, ~I915_GEM_DOMAIN_GTT);
/* Serialise direct access to this object with the barriers for
* coherent writes from the GPU, by effectively invalidating the
@@ -370,6 +416,7 @@ retry:
}
vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
+ i915_vma_mark_scanout(vma);
i915_gem_object_flush_if_display_locked(obj);
@@ -387,48 +434,6 @@ err:
return vma;
}
-static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
-{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_vma *vma;
-
- if (list_empty(&obj->vma.list))
- return;
-
- mutex_lock(&i915->ggtt.vm.mutex);
- spin_lock(&obj->vma.lock);
- for_each_ggtt_vma(vma, obj) {
- if (!drm_mm_node_allocated(&vma->node))
- continue;
-
- GEM_BUG_ON(vma->vm != &i915->ggtt.vm);
- list_move_tail(&vma->vm_link, &vma->vm->bound_list);
- }
- spin_unlock(&obj->vma.lock);
- mutex_unlock(&i915->ggtt.vm.mutex);
-
- if (i915_gem_object_is_shrinkable(obj)) {
- unsigned long flags;
-
- spin_lock_irqsave(&i915->mm.obj_lock, flags);
-
- if (obj->mm.madv == I915_MADV_WILLNEED &&
- !atomic_read(&obj->mm.shrink_pin))
- list_move_tail(&obj->mm.link, &i915->mm.shrink_list);
-
- spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
- }
-}
-
-void
-i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
-{
- /* Bump the LRU to try and avoid premature eviction whilst flipping */
- i915_gem_object_bump_inactive_ggtt(vma->obj);
-
- i915_vma_unpin(vma);
-}
-
/**
* Moves a single object to the CPU read, and possibly write domain.
* @obj: object to act on
@@ -451,7 +456,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
if (ret)
return ret;
- i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
+ flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
/* Flush the CPU cache if it's still invalid. */
if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
@@ -569,9 +574,6 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
else
err = i915_gem_object_set_to_cpu_domain(obj, write_domain);
- /* And bump the LRU for this access */
- i915_gem_object_bump_inactive_ggtt(obj);
-
i915_gem_object_unlock(obj);
if (write_domain)
@@ -619,7 +621,7 @@ int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj,
goto out;
}
- i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
+ flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
/* If we're not in the cpu read domain, set ourself into the gtt
* read domain and manually flush cachelines (if required). This
@@ -670,7 +672,7 @@ int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
goto out;
}
- i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
+ flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
/* If we're not in the cpu write domain, set ourself into the
* gtt write domain and manually flush cachelines (as required).
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index bd3046e5a934..d70ca36f74f6 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -15,6 +15,7 @@
#include "gem/i915_gem_ioctls.h"
#include "gt/intel_context.h"
+#include "gt/intel_gpu_commands.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_buffer_pool.h"
#include "gt/intel_gt_pm.h"
@@ -534,8 +535,6 @@ eb_add_vma(struct i915_execbuffer *eb,
struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
struct eb_vma *ev = &eb->vma[i];
- GEM_BUG_ON(i915_vma_is_closed(vma));
-
ev->vma = vma;
ev->exec = entry;
ev->flags = entry->flags;
@@ -1277,7 +1276,10 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
int err;
if (!pool) {
- pool = intel_gt_get_buffer_pool(engine->gt, PAGE_SIZE);
+ pool = intel_gt_get_buffer_pool(engine->gt, PAGE_SIZE,
+ cache->has_llc ?
+ I915_MAP_WB :
+ I915_MAP_WC);
if (IS_ERR(pool))
return PTR_ERR(pool);
}
@@ -1287,10 +1289,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
if (err)
goto err_pool;
- cmd = i915_gem_object_pin_map(pool->obj,
- cache->has_llc ?
- I915_MAP_FORCE_WB :
- I915_MAP_FORCE_WC);
+ cmd = i915_gem_object_pin_map(pool->obj, pool->type);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
goto err_pool;
@@ -2459,7 +2458,8 @@ static int eb_parse(struct i915_execbuffer *eb)
return -EINVAL;
if (!pool) {
- pool = intel_gt_get_buffer_pool(eb->engine->gt, len);
+ pool = intel_gt_get_buffer_pool(eb->engine->gt, len,
+ I915_MAP_WB);
if (IS_ERR(pool))
return PTR_ERR(pool);
eb->batch_pool = pool;
@@ -2535,6 +2535,9 @@ static int eb_submit(struct i915_execbuffer *eb, struct i915_vma *batch)
{
int err;
+ if (intel_context_nopreempt(eb->context))
+ __set_bit(I915_FENCE_FLAG_NOPREEMPT, &eb->request->fence.flags);
+
err = eb_move_to_gpu(eb);
if (err)
return err;
@@ -2575,15 +2578,12 @@ static int eb_submit(struct i915_execbuffer *eb, struct i915_vma *batch)
return err;
}
- if (intel_context_nopreempt(eb->context))
- __set_bit(I915_FENCE_FLAG_NOPREEMPT, &eb->request->fence.flags);
-
return 0;
}
static int num_vcs_engines(const struct drm_i915_private *i915)
{
- return hweight64(VDBOX_MASK(&i915->gt));
+ return hweight_long(VDBOX_MASK(&i915->gt));
}
/*
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
index 932ee21e6609..194f35342710 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
@@ -31,18 +31,13 @@ i915_gem_object_create_lmem(struct drm_i915_private *i915,
size, flags);
}
-struct drm_i915_gem_object *
-__i915_gem_lmem_object_create(struct intel_memory_region *mem,
- resource_size_t size,
- unsigned int flags)
+int __i915_gem_lmem_object_init(struct intel_memory_region *mem,
+ struct drm_i915_gem_object *obj,
+ resource_size_t size,
+ unsigned int flags)
{
static struct lock_class_key lock_class;
struct drm_i915_private *i915 = mem->i915;
- struct drm_i915_gem_object *obj;
-
- obj = i915_gem_object_alloc();
- if (!obj)
- return ERR_PTR(-ENOMEM);
drm_gem_private_object_init(&i915->drm, &obj->base, size);
i915_gem_object_init(obj, &i915_gem_lmem_obj_ops, &lock_class);
@@ -53,5 +48,5 @@ __i915_gem_lmem_object_create(struct intel_memory_region *mem,
i915_gem_object_init_memory_region(obj, mem, flags);
- return obj;
+ return 0;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
index fc3f15580fe3..036d53c01de9 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
@@ -21,9 +21,9 @@ i915_gem_object_create_lmem(struct drm_i915_private *i915,
resource_size_t size,
unsigned int flags);
-struct drm_i915_gem_object *
-__i915_gem_lmem_object_create(struct intel_memory_region *mem,
- resource_size_t size,
- unsigned int flags);
+int __i915_gem_lmem_object_init(struct intel_memory_region *mem,
+ struct drm_i915_gem_object *obj,
+ resource_size_t size,
+ unsigned int flags);
#endif /* !__I915_GEM_LMEM_H */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 00d24000b5e8..70f798405f7f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -25,13 +25,13 @@
#include <linux/sched/mm.h>
#include "display/intel_frontbuffer.h"
-#include "gt/intel_gt.h"
#include "i915_drv.h"
#include "i915_gem_clflush.h"
#include "i915_gem_context.h"
#include "i915_gem_mman.h"
#include "i915_gem_object.h"
#include "i915_globals.h"
+#include "i915_memcpy.h"
#include "i915_trace.h"
static struct i915_global_object {
@@ -313,52 +313,6 @@ static void i915_gem_free_object(struct drm_gem_object *gem_obj)
queue_work(i915->wq, &i915->mm.free_work);
}
-static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
-{
- return !(obj->cache_level == I915_CACHE_NONE ||
- obj->cache_level == I915_CACHE_WT);
-}
-
-void
-i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
- unsigned int flush_domains)
-{
- struct i915_vma *vma;
-
- assert_object_held(obj);
-
- if (!(obj->write_domain & flush_domains))
- return;
-
- switch (obj->write_domain) {
- case I915_GEM_DOMAIN_GTT:
- spin_lock(&obj->vma.lock);
- for_each_ggtt_vma(vma, obj) {
- if (i915_vma_unset_ggtt_write(vma))
- intel_gt_flush_ggtt_writes(vma->vm->gt);
- }
- spin_unlock(&obj->vma.lock);
-
- i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
- break;
-
- case I915_GEM_DOMAIN_WC:
- wmb();
- break;
-
- case I915_GEM_DOMAIN_CPU:
- i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
- break;
-
- case I915_GEM_DOMAIN_RENDER:
- if (gpu_write_needs_clflush(obj))
- obj->cache_dirty = true;
- break;
- }
-
- obj->write_domain = 0;
-}
-
void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
enum fb_op_origin origin)
{
@@ -383,6 +337,70 @@ void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
}
}
+static void
+i915_gem_object_read_from_page_kmap(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size)
+{
+ void *src_map;
+ void *src_ptr;
+
+ src_map = kmap_atomic(i915_gem_object_get_page(obj, offset >> PAGE_SHIFT));
+
+ src_ptr = src_map + offset_in_page(offset);
+ if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
+ drm_clflush_virt_range(src_ptr, size);
+ memcpy(dst, src_ptr, size);
+
+ kunmap_atomic(src_map);
+}
+
+static void
+i915_gem_object_read_from_page_iomap(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size)
+{
+ void __iomem *src_map;
+ void __iomem *src_ptr;
+ dma_addr_t dma = i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT);
+
+ src_map = io_mapping_map_wc(&obj->mm.region->iomap,
+ dma - obj->mm.region->region.start,
+ PAGE_SIZE);
+
+ src_ptr = src_map + offset_in_page(offset);
+ if (!i915_memcpy_from_wc(dst, (void __force *)src_ptr, size))
+ memcpy_fromio(dst, src_ptr, size);
+
+ io_mapping_unmap(src_map);
+}
+
+/**
+ * i915_gem_object_read_from_page - read data from the page of a GEM object
+ * @obj: GEM object to read from
+ * @offset: offset within the object
+ * @dst: buffer to store the read data
+ * @size: size to read
+ *
+ * Reads data from @obj at the specified offset. The requested region to read
+ * from can't cross a page boundary. The caller must ensure that @obj pages
+ * are pinned and that @obj is synced wrt. any related writes.
+ *
+ * Returns 0 on success or -ENODEV if the type of @obj's backing store is
+ * unsupported.
+ */
+int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size)
+{
+ GEM_BUG_ON(offset >= obj->base.size);
+ GEM_BUG_ON(offset_in_page(offset) > PAGE_SIZE - size);
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+
+ if (i915_gem_object_has_struct_page(obj))
+ i915_gem_object_read_from_page_kmap(obj, offset, dst, size);
+ else if (i915_gem_object_has_iomem(obj))
+ i915_gem_object_read_from_page_iomap(obj, offset, dst, size);
+ else
+ return -ENODEV;
+
+ return 0;
+}
+
void i915_gem_init__objects(struct drm_i915_private *i915)
{
INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index be14486f63a7..d0ae834d787a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -188,6 +188,24 @@ i915_gem_object_set_volatile(struct drm_i915_gem_object *obj)
}
static inline bool
+i915_gem_object_has_tiling_quirk(struct drm_i915_gem_object *obj)
+{
+ return test_bit(I915_TILING_QUIRK_BIT, &obj->flags);
+}
+
+static inline void
+i915_gem_object_set_tiling_quirk(struct drm_i915_gem_object *obj)
+{
+ set_bit(I915_TILING_QUIRK_BIT, &obj->flags);
+}
+
+static inline void
+i915_gem_object_clear_tiling_quirk(struct drm_i915_gem_object *obj)
+{
+ clear_bit(I915_TILING_QUIRK_BIT, &obj->flags);
+}
+
+static inline bool
i915_gem_object_type_has(const struct drm_i915_gem_object *obj,
unsigned long flags)
{
@@ -201,6 +219,12 @@ i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
}
static inline bool
+i915_gem_object_has_iomem(const struct drm_i915_gem_object *obj)
+{
+ return i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM);
+}
+
+static inline bool
i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
{
return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE);
@@ -384,14 +408,6 @@ int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
void i915_gem_object_writeback(struct drm_i915_gem_object *obj);
-enum i915_map_type {
- I915_MAP_WB = 0,
- I915_MAP_WC,
-#define I915_MAP_OVERRIDE BIT(31)
- I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE,
- I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE,
-};
-
/**
* i915_gem_object_pin_map - return a contiguous mapping of the entire object
* @obj: the object to map into kernel address space
@@ -435,10 +451,6 @@ static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
void __i915_gem_object_release_map(struct drm_i915_gem_object *obj);
-void
-i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
- unsigned int flush_domains);
-
int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj,
unsigned int *needs_clflush);
int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
@@ -486,7 +498,6 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 alignment,
const struct i915_ggtt_view *view,
unsigned int flags);
-void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma);
void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj);
void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj);
@@ -512,6 +523,9 @@ static inline void __start_cpu_write(struct drm_i915_gem_object *obj)
obj->cache_dirty = true;
}
+void i915_gem_fence_wait_priority(struct dma_fence *fence,
+ const struct i915_sched_attr *attr);
+
int i915_gem_object_wait(struct drm_i915_gem_object *obj,
unsigned int flags,
long timeout);
@@ -540,4 +554,8 @@ i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
__i915_gem_object_invalidate_frontbuffer(obj, origin);
}
+int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size);
+
+bool i915_gem_object_is_shmem(const struct drm_i915_gem_object *obj);
+
#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
index aee7ad3cc3c6..d6dac21fce0b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
@@ -6,6 +6,7 @@
#include "i915_drv.h"
#include "gt/intel_context.h"
#include "gt/intel_engine_pm.h"
+#include "gt/intel_gpu_commands.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_buffer_pool.h"
#include "gt/intel_ring.h"
@@ -34,7 +35,7 @@ struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce,
count = div_u64(round_up(vma->size, block_size), block_size);
size = (1 + 8 * count) * sizeof(u32);
size = round_up(size, PAGE_SIZE);
- pool = intel_gt_get_buffer_pool(ce->engine->gt, size);
+ pool = intel_gt_get_buffer_pool(ce->engine->gt, size, I915_MAP_WC);
if (IS_ERR(pool)) {
err = PTR_ERR(pool);
goto out_pm;
@@ -54,7 +55,7 @@ struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce,
if (unlikely(err))
goto out_put;
- cmd = i915_gem_object_pin_map(pool->obj, I915_MAP_WC);
+ cmd = i915_gem_object_pin_map(pool->obj, pool->type);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
goto out_unpin;
@@ -256,7 +257,7 @@ struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
count = div_u64(round_up(dst->size, block_size), block_size);
size = (1 + 11 * count) * sizeof(u32);
size = round_up(size, PAGE_SIZE);
- pool = intel_gt_get_buffer_pool(ce->engine->gt, size);
+ pool = intel_gt_get_buffer_pool(ce->engine->gt, size, I915_MAP_WC);
if (IS_ERR(pool)) {
err = PTR_ERR(pool);
goto out_pm;
@@ -276,7 +277,7 @@ struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
if (unlikely(err))
goto out_put;
- cmd = i915_gem_object_pin_map(pool->obj, I915_MAP_WC);
+ cmd = i915_gem_object_pin_map(pool->obj, pool->type);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
goto out_unpin;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index e2d9b7e1e152..0438e00d4ca7 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -67,6 +67,14 @@ struct drm_i915_gem_object_ops {
const char *name; /* friendly name for debug, e.g. lockdep classes */
};
+enum i915_map_type {
+ I915_MAP_WB = 0,
+ I915_MAP_WC,
+#define I915_MAP_OVERRIDE BIT(31)
+ I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE,
+ I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE,
+};
+
enum i915_mmap_type {
I915_MMAP_TYPE_GTT = 0,
I915_MMAP_TYPE_WC,
@@ -142,8 +150,6 @@ struct drm_i915_gem_object {
*/
struct list_head obj_link;
- /** Stolen memory for this object, instead of being backed by shmem. */
- struct drm_mm_node *stolen;
union {
struct rcu_head rcu;
struct llist_node freed;
@@ -167,6 +173,7 @@ struct drm_i915_gem_object {
#define I915_BO_ALLOC_VOLATILE BIT(1)
#define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | I915_BO_ALLOC_VOLATILE)
#define I915_BO_READONLY BIT(2)
+#define I915_TILING_QUIRK_BIT 3 /* unknown swizzling; do not release! */
/*
* Is the object to be mapped as read-only to the GPU
@@ -275,12 +282,6 @@ struct drm_i915_gem_object {
* pages were last acquired.
*/
bool dirty:1;
-
- /**
- * This is set if the object has been pinned due to unknown
- * swizzling.
- */
- bool quirked:1;
} mm;
/** Record of address bit 17 of each page at last unbind. */
@@ -295,6 +296,8 @@ struct drm_i915_gem_object {
struct work_struct *work;
} userptr;
+ struct drm_mm_node *stolen;
+
unsigned long scratch;
u64 encode;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index e2c7b2a7895f..43028f3539a6 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -16,6 +16,7 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
unsigned long supported = INTEL_INFO(i915)->page_sizes;
+ bool shrinkable;
int i;
lockdep_assert_held(&obj->mm.lock);
@@ -38,13 +39,6 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
obj->mm.pages = pages;
- if (i915_gem_object_is_tiled(obj) &&
- i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
- GEM_BUG_ON(obj->mm.quirked);
- __i915_gem_object_pin_pages(obj);
- obj->mm.quirked = true;
- }
-
GEM_BUG_ON(!sg_page_sizes);
obj->mm.page_sizes.phys = sg_page_sizes;
@@ -63,7 +57,16 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
}
GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
- if (i915_gem_object_is_shrinkable(obj)) {
+ shrinkable = i915_gem_object_is_shrinkable(obj);
+
+ if (i915_gem_object_is_tiled(obj) &&
+ i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
+ GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
+ i915_gem_object_set_tiling_quirk(obj);
+ shrinkable = false;
+ }
+
+ if (shrinkable) {
struct list_head *list;
unsigned long flags;
@@ -238,7 +241,7 @@ unlock:
/* The 'mapping' part of i915_gem_object_pin_map() below */
static void *i915_gem_object_map_page(struct drm_i915_gem_object *obj,
- enum i915_map_type type)
+ enum i915_map_type type)
{
unsigned long n_pages = obj->base.size >> PAGE_SHIFT, i;
struct page *stack[32], **pages = stack, *page;
@@ -281,7 +284,7 @@ static void *i915_gem_object_map_page(struct drm_i915_gem_object *obj,
/* Too big for stack -- allocate temporary array instead */
pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
if (!pages)
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
i = 0;
@@ -290,11 +293,12 @@ static void *i915_gem_object_map_page(struct drm_i915_gem_object *obj,
vaddr = vmap(pages, n_pages, 0, pgprot);
if (pages != stack)
kvfree(pages);
- return vaddr;
+
+ return vaddr ?: ERR_PTR(-ENOMEM);
}
static void *i915_gem_object_map_pfn(struct drm_i915_gem_object *obj,
- enum i915_map_type type)
+ enum i915_map_type type)
{
resource_size_t iomap = obj->mm.region->iomap.base -
obj->mm.region->region.start;
@@ -305,13 +309,13 @@ static void *i915_gem_object_map_pfn(struct drm_i915_gem_object *obj,
void *vaddr;
if (type != I915_MAP_WC)
- return NULL;
+ return ERR_PTR(-ENODEV);
if (n_pfn > ARRAY_SIZE(stack)) {
/* Too big for stack -- allocate temporary array instead */
pfns = kvmalloc_array(n_pfn, sizeof(*pfns), GFP_KERNEL);
if (!pfns)
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
i = 0;
@@ -320,7 +324,8 @@ static void *i915_gem_object_map_pfn(struct drm_i915_gem_object *obj,
vaddr = vmap_pfn(pfns, n_pfn, pgprot_writecombine(PAGE_KERNEL_IO));
if (pfns != stack)
kvfree(pfns);
- return vaddr;
+
+ return vaddr ?: ERR_PTR(-ENOMEM);
}
/* get, pin, and map the pages of the object into kernel space */
@@ -349,8 +354,10 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
err = ____i915_gem_object_get_pages(obj);
- if (err)
- goto err_unlock;
+ if (err) {
+ ptr = ERR_PTR(err);
+ goto out_unlock;
+ }
smp_mb__before_atomic();
}
@@ -362,7 +369,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
ptr = page_unpack_bits(obj->mm.mapping, &has_type);
if (ptr && has_type != type) {
if (pinned) {
- err = -EBUSY;
+ ptr = ERR_PTR(-EBUSY);
goto err_unpin;
}
@@ -374,15 +381,13 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
if (!ptr) {
if (GEM_WARN_ON(type == I915_MAP_WC &&
!static_cpu_has(X86_FEATURE_PAT)))
- ptr = NULL;
+ ptr = ERR_PTR(-ENODEV);
else if (i915_gem_object_has_struct_page(obj))
ptr = i915_gem_object_map_page(obj, type);
else
ptr = i915_gem_object_map_pfn(obj, type);
- if (!ptr) {
- err = -ENOMEM;
+ if (IS_ERR(ptr))
goto err_unpin;
- }
obj->mm.mapping = page_pack_bits(ptr, type);
}
@@ -393,8 +398,6 @@ out_unlock:
err_unpin:
atomic_dec(&obj->mm.pages_pin_count);
-err_unlock:
- ptr = ERR_PTR(err);
goto out_unlock;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
index 3a4dfe2ef1da..3c0b157e2a35 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
@@ -213,7 +213,7 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
if (obj->ops == &i915_gem_phys_ops)
return 0;
- if (obj->ops != &i915_gem_shmem_ops)
+ if (!i915_gem_object_is_shmem(obj))
return -EINVAL;
err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
@@ -227,7 +227,7 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
goto err_unlock;
}
- if (obj->mm.quirked) {
+ if (i915_gem_object_has_tiling_quirk(obj)) {
err = -EFAULT;
goto err_unlock;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index 40d3e40500fa..000e1cd8e920 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -11,6 +11,13 @@
#include "i915_drv.h"
+#if defined(CONFIG_X86)
+#include <asm/smp.h>
+#else
+#define wbinvd_on_all_cpus() \
+ pr_warn(DRIVER_NAME ": Missing cache flush in %s\n", __func__)
+#endif
+
void i915_gem_suspend(struct drm_i915_private *i915)
{
GEM_TRACE("%s\n", dev_name(i915->drm.dev));
@@ -32,13 +39,6 @@ void i915_gem_suspend(struct drm_i915_private *i915)
i915_gem_drain_freed_objects(i915);
}
-static struct drm_i915_gem_object *first_mm_object(struct list_head *list)
-{
- return list_first_entry_or_null(list,
- struct drm_i915_gem_object,
- mm.link);
-}
-
void i915_gem_suspend_late(struct drm_i915_private *i915)
{
struct drm_i915_gem_object *obj;
@@ -48,6 +48,7 @@ void i915_gem_suspend_late(struct drm_i915_private *i915)
NULL
}, **phase;
unsigned long flags;
+ bool flush = false;
/*
* Neither the BIOS, ourselves or any other kernel
@@ -73,29 +74,56 @@ void i915_gem_suspend_late(struct drm_i915_private *i915)
spin_lock_irqsave(&i915->mm.obj_lock, flags);
for (phase = phases; *phase; phase++) {
- LIST_HEAD(keep);
+ list_for_each_entry(obj, *phase, mm.link) {
+ if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
+ flush |= (obj->read_domains & I915_GEM_DOMAIN_CPU) == 0;
+ __start_cpu_write(obj); /* presume auto-hibernate */
+ }
+ }
+ spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
+ if (flush)
+ wbinvd_on_all_cpus();
+}
- while ((obj = first_mm_object(*phase))) {
- list_move_tail(&obj->mm.link, &keep);
+int i915_gem_freeze(struct drm_i915_private *i915)
+{
+ /* Discard all purgeable objects, let userspace recover those as
+ * required after resuming.
+ */
+ i915_gem_shrink_all(i915);
- /* Beware the background _i915_gem_free_objects */
- if (!kref_get_unless_zero(&obj->base.refcount))
- continue;
+ return 0;
+}
- spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
+int i915_gem_freeze_late(struct drm_i915_private *i915)
+{
+ struct drm_i915_gem_object *obj;
+ intel_wakeref_t wakeref;
- i915_gem_object_lock(obj, NULL);
- drm_WARN_ON(&i915->drm,
- i915_gem_object_set_to_gtt_domain(obj, false));
- i915_gem_object_unlock(obj);
- i915_gem_object_put(obj);
+ /*
+ * Called just before we write the hibernation image.
+ *
+ * We need to update the domain tracking to reflect that the CPU
+ * will be accessing all the pages to create and restore from the
+ * hibernation, and so upon restoration those pages will be in the
+ * CPU domain.
+ *
+ * To make sure the hibernation image contains the latest state,
+ * we update that state just before writing out the image.
+ *
+ * To try and reduce the hibernation image, we manually shrink
+ * the objects as well, see i915_gem_freeze()
+ */
- spin_lock_irqsave(&i915->mm.obj_lock, flags);
- }
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ i915_gem_shrink(i915, -1UL, NULL, ~0);
+ i915_gem_drain_freed_objects(i915);
- list_splice_tail(&keep, *phase);
- }
- spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
+ wbinvd_on_all_cpus();
+ list_for_each_entry(obj, &i915->mm.shrink_list, mm.link)
+ __start_cpu_write(obj);
+
+ return 0;
}
void i915_gem_resume(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.h b/drivers/gpu/drm/i915/gem/i915_gem_pm.h
index 26b78dbdc225..c9a66630e92e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.h
@@ -19,4 +19,7 @@ void i915_gem_idle_work_handler(struct work_struct *work);
void i915_gem_suspend(struct drm_i915_private *i915);
void i915_gem_suspend_late(struct drm_i915_private *i915);
+int i915_gem_freeze(struct drm_i915_private *i915);
+int i915_gem_freeze_late(struct drm_i915_private *i915);
+
#endif /* __I915_GEM_PM_H__ */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c b/drivers/gpu/drm/i915/gem/i915_gem_region.c
index 1515384d7e0e..3e3dad22a683 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_region.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c
@@ -22,6 +22,7 @@ i915_gem_object_put_pages_buddy(struct drm_i915_gem_object *obj,
int
i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
{
+ const u64 max_segment = i915_sg_segment_size();
struct intel_memory_region *mem = obj->mm.region;
struct list_head *blocks = &obj->mm.blocks;
resource_size_t size = obj->base.size;
@@ -37,7 +38,7 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
if (!st)
return -ENOMEM;
- if (sg_alloc_table(st, size >> ilog2(mem->mm.chunk_size), GFP_KERNEL)) {
+ if (sg_alloc_table(st, size >> PAGE_SHIFT, GFP_KERNEL)) {
kfree(st);
return -ENOMEM;
}
@@ -64,27 +65,30 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
i915_buddy_block_size(&mem->mm, block));
offset = i915_buddy_block_offset(block);
- GEM_BUG_ON(overflows_type(block_size, sg->length));
+ while (block_size) {
+ u64 len;
- if (offset != prev_end ||
- add_overflows_t(typeof(sg->length), sg->length, block_size)) {
- if (st->nents) {
- sg_page_sizes |= sg->length;
- sg = __sg_next(sg);
+ if (offset != prev_end || sg->length >= max_segment) {
+ if (st->nents) {
+ sg_page_sizes |= sg->length;
+ sg = __sg_next(sg);
+ }
+
+ sg_dma_address(sg) = mem->region.start + offset;
+ sg_dma_len(sg) = 0;
+ sg->length = 0;
+ st->nents++;
}
- sg_dma_address(sg) = mem->region.start + offset;
- sg_dma_len(sg) = block_size;
+ len = min(block_size, max_segment - sg->length);
+ sg->length += len;
+ sg_dma_len(sg) += len;
- sg->length = block_size;
+ offset += len;
+ block_size -= len;
- st->nents++;
- } else {
- sg->length += block_size;
- sg_dma_len(sg) += block_size;
+ prev_end = offset;
}
-
- prev_end = offset + block_size;
}
sg_page_sizes |= sg->length;
@@ -139,6 +143,7 @@ i915_gem_object_create_region(struct intel_memory_region *mem,
unsigned int flags)
{
struct drm_i915_gem_object *obj;
+ int err;
/*
* NB: Our use of resource_size_t for the size stems from using struct
@@ -169,9 +174,18 @@ i915_gem_object_create_region(struct intel_memory_region *mem,
if (overflows_type(size, obj->base.size))
return ERR_PTR(-E2BIG);
- obj = mem->ops->create_object(mem, size, flags);
- if (!IS_ERR(obj))
- trace_i915_gem_object_create(obj);
+ obj = i915_gem_object_alloc();
+ if (!obj)
+ return ERR_PTR(-ENOMEM);
+ err = mem->ops->init_object(mem, obj, size, flags);
+ if (err)
+ goto err_object_free;
+
+ trace_i915_gem_object_create(obj);
return obj;
+
+err_object_free:
+ i915_gem_object_free(obj);
+ return ERR_PTR(err);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index 75e8b71c18b9..cf83c208688c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -464,26 +464,21 @@ static int __create_shmem(struct drm_i915_private *i915,
return 0;
}
-static struct drm_i915_gem_object *
-create_shmem(struct intel_memory_region *mem,
- resource_size_t size,
- unsigned int flags)
+static int shmem_object_init(struct intel_memory_region *mem,
+ struct drm_i915_gem_object *obj,
+ resource_size_t size,
+ unsigned int flags)
{
static struct lock_class_key lock_class;
struct drm_i915_private *i915 = mem->i915;
- struct drm_i915_gem_object *obj;
struct address_space *mapping;
unsigned int cache_level;
gfp_t mask;
int ret;
- obj = i915_gem_object_alloc();
- if (!obj)
- return ERR_PTR(-ENOMEM);
-
ret = __create_shmem(i915, &obj->base, size);
if (ret)
- goto fail;
+ return ret;
mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
if (IS_I965GM(i915) || IS_I965G(i915)) {
@@ -522,11 +517,7 @@ create_shmem(struct intel_memory_region *mem,
i915_gem_object_init_memory_region(obj, mem, 0);
- return obj;
-
-fail:
- i915_gem_object_free(obj);
- return ERR_PTR(ret);
+ return 0;
}
struct drm_i915_gem_object *
@@ -611,7 +602,7 @@ static void release_shmem(struct intel_memory_region *mem)
static const struct intel_memory_region_ops shmem_region_ops = {
.init = init_shmem,
.release = release_shmem,
- .create_object = create_shmem,
+ .init_object = shmem_object_init,
};
struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915)
@@ -621,3 +612,8 @@ struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915)
PAGE_SIZE, 0,
&shmem_region_ops);
}
+
+bool i915_gem_object_is_shmem(const struct drm_i915_gem_object *obj)
+{
+ return obj->ops == &i915_gem_shmem_ops;
+}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index dc8f052a0ffe..c2dba1cd9532 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -15,6 +15,7 @@
#include "gt/intel_gt_requests.h"
+#include "dma_resv_utils.h"
#include "i915_trace.h"
static bool swap_available(void)
@@ -209,6 +210,8 @@ i915_gem_shrink(struct drm_i915_private *i915,
mutex_unlock(&obj->mm.lock);
}
+ dma_resv_prune(obj->base.resv);
+
scanned += obj->base.size >> PAGE_SHIFT;
i915_gem_object_put(obj);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index 29bffc6afcc1..a1e197a6e999 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -608,11 +608,10 @@ i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen);
GEM_BUG_ON(!stolen);
-
- i915_gem_object_release_memory_region(obj);
-
i915_gem_stolen_remove_node(i915, stolen);
kfree(stolen);
+
+ i915_gem_object_release_memory_region(obj);
}
static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
@@ -622,18 +621,13 @@ static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
.release = i915_gem_object_release_stolen,
};
-static struct drm_i915_gem_object *
-__i915_gem_object_create_stolen(struct intel_memory_region *mem,
- struct drm_mm_node *stolen)
+static int __i915_gem_object_create_stolen(struct intel_memory_region *mem,
+ struct drm_i915_gem_object *obj,
+ struct drm_mm_node *stolen)
{
static struct lock_class_key lock_class;
- struct drm_i915_gem_object *obj;
unsigned int cache_level;
- int err = -ENOMEM;
-
- obj = i915_gem_object_alloc();
- if (!obj)
- goto err;
+ int err;
drm_gem_private_object_init(&mem->i915->drm, &obj->base, stolen->size);
i915_gem_object_init(obj, &i915_gem_object_stolen_ops, &lock_class);
@@ -645,55 +639,47 @@ __i915_gem_object_create_stolen(struct intel_memory_region *mem,
err = i915_gem_object_pin_pages(obj);
if (err)
- goto cleanup;
+ return err;
i915_gem_object_init_memory_region(obj, mem, 0);
- return obj;
-
-cleanup:
- i915_gem_object_free(obj);
-err:
- return ERR_PTR(err);
+ return 0;
}
-static struct drm_i915_gem_object *
-_i915_gem_object_create_stolen(struct intel_memory_region *mem,
- resource_size_t size,
- unsigned int flags)
+static int _i915_gem_object_stolen_init(struct intel_memory_region *mem,
+ struct drm_i915_gem_object *obj,
+ resource_size_t size,
+ unsigned int flags)
{
struct drm_i915_private *i915 = mem->i915;
- struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
int ret;
if (!drm_mm_initialized(&i915->mm.stolen))
- return ERR_PTR(-ENODEV);
+ return -ENODEV;
if (size == 0)
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
if (!stolen)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
ret = i915_gem_stolen_insert_node(i915, stolen, size, 4096);
- if (ret) {
- obj = ERR_PTR(ret);
+ if (ret)
goto err_free;
- }
- obj = __i915_gem_object_create_stolen(mem, stolen);
- if (IS_ERR(obj))
+ ret = __i915_gem_object_create_stolen(mem, obj, stolen);
+ if (ret)
goto err_remove;
- return obj;
+ return 0;
err_remove:
i915_gem_stolen_remove_node(i915, stolen);
err_free:
kfree(stolen);
- return obj;
+ return ret;
}
struct drm_i915_gem_object *
@@ -723,7 +709,7 @@ static void release_stolen(struct intel_memory_region *mem)
static const struct intel_memory_region_ops i915_region_stolen_ops = {
.init = init_stolen,
.release = release_stolen,
- .create_object = _i915_gem_object_create_stolen,
+ .init_object = _i915_gem_object_stolen_init,
};
struct intel_memory_region *i915_gem_stolen_setup(struct drm_i915_private *i915)
@@ -767,21 +753,32 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *i915,
mutex_lock(&i915->mm.stolen_lock);
ret = drm_mm_reserve_node(&i915->mm.stolen, stolen);
mutex_unlock(&i915->mm.stolen_lock);
- if (ret) {
- obj = ERR_PTR(ret);
+ if (ret)
goto err_free;
- }
- obj = __i915_gem_object_create_stolen(mem, stolen);
- if (IS_ERR(obj))
+ obj = i915_gem_object_alloc();
+ if (!obj) {
+ ret = -ENOMEM;
goto err_stolen;
+ }
+
+ ret = __i915_gem_object_create_stolen(mem, obj, stolen);
+ if (ret)
+ goto err_object_free;
i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
return obj;
+err_object_free:
+ i915_gem_object_free(obj);
err_stolen:
i915_gem_stolen_remove_node(i915, stolen);
err_free:
kfree(stolen);
- return obj;
+ return ERR_PTR(ret);
+}
+
+bool i915_gem_object_is_stolen(const struct drm_i915_gem_object *obj)
+{
+ return obj->ops == &i915_gem_object_stolen_ops;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
index 61e028063f9f..b03489706796 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
@@ -30,6 +30,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
resource_size_t stolen_offset,
resource_size_t size);
+bool i915_gem_object_is_stolen(const struct drm_i915_gem_object *obj);
+
#define I915_GEM_STOLEN_BIAS SZ_128K
#endif /* __I915_GEM_STOLEN_H__ */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
index ffcaee74a249..d589d3d81085 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
@@ -270,14 +270,14 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
obj->mm.madv == I915_MADV_WILLNEED &&
i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
if (tiling == I915_TILING_NONE) {
- GEM_BUG_ON(!obj->mm.quirked);
- __i915_gem_object_unpin_pages(obj);
- obj->mm.quirked = false;
+ GEM_BUG_ON(!i915_gem_object_has_tiling_quirk(obj));
+ i915_gem_object_clear_tiling_quirk(obj);
+ i915_gem_object_make_shrinkable(obj);
}
if (!i915_gem_object_is_tiled(obj)) {
- GEM_BUG_ON(obj->mm.quirked);
- __i915_gem_object_pin_pages(obj);
- obj->mm.quirked = true;
+ GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
+ i915_gem_object_make_unshrinkable(obj);
+ i915_gem_object_set_tiling_quirk(obj);
}
}
mutex_unlock(&obj->mm.lock);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
index 8af55cd3e690..4b9856d5ba14 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
@@ -5,10 +5,12 @@
*/
#include <linux/dma-fence-array.h>
+#include <linux/dma-fence-chain.h>
#include <linux/jiffies.h>
#include "gt/intel_engine.h"
+#include "dma_resv_utils.h"
#include "i915_gem_ioctls.h"
#include "i915_gem_object.h"
@@ -43,8 +45,7 @@ i915_gem_object_wait_reservation(struct dma_resv *resv,
unsigned int count, i;
int ret;
- ret = dma_resv_get_fences_rcu(resv,
- &excl, &count, &shared);
+ ret = dma_resv_get_fences_rcu(resv, &excl, &count, &shared);
if (ret)
return ret;
@@ -84,17 +85,14 @@ i915_gem_object_wait_reservation(struct dma_resv *resv,
* Opportunistically prune the fences iff we know they have *all* been
* signaled.
*/
- if (prune_fences && dma_resv_trylock(resv)) {
- if (dma_resv_test_signaled_rcu(resv, true))
- dma_resv_add_excl_fence(resv, NULL);
- dma_resv_unlock(resv);
- }
+ if (prune_fences)
+ dma_resv_prune(resv);
return timeout;
}
-static void __fence_set_priority(struct dma_fence *fence,
- const struct i915_sched_attr *attr)
+static void fence_set_priority(struct dma_fence *fence,
+ const struct i915_sched_attr *attr)
{
struct i915_request *rq;
struct intel_engine_cs *engine;
@@ -105,27 +103,47 @@ static void __fence_set_priority(struct dma_fence *fence,
rq = to_request(fence);
engine = rq->engine;
- local_bh_disable();
rcu_read_lock(); /* RCU serialisation for set-wedged protection */
if (engine->schedule)
engine->schedule(rq, attr);
rcu_read_unlock();
- local_bh_enable(); /* kick the tasklets if queues were reprioritised */
}
-static void fence_set_priority(struct dma_fence *fence,
- const struct i915_sched_attr *attr)
+static inline bool __dma_fence_is_chain(const struct dma_fence *fence)
{
+ return fence->ops == &dma_fence_chain_ops;
+}
+
+void i915_gem_fence_wait_priority(struct dma_fence *fence,
+ const struct i915_sched_attr *attr)
+{
+ if (dma_fence_is_signaled(fence))
+ return;
+
+ local_bh_disable();
+
/* Recurse once into a fence-array */
if (dma_fence_is_array(fence)) {
struct dma_fence_array *array = to_dma_fence_array(fence);
int i;
for (i = 0; i < array->num_fences; i++)
- __fence_set_priority(array->fences[i], attr);
+ fence_set_priority(array->fences[i], attr);
+ } else if (__dma_fence_is_chain(fence)) {
+ struct dma_fence *iter;
+
+ /* The chain is ordered; if we boost the last, we boost all */
+ dma_fence_chain_for_each(iter, fence) {
+ fence_set_priority(to_dma_fence_chain(iter)->fence,
+ attr);
+ break;
+ }
+ dma_fence_put(iter);
} else {
- __fence_set_priority(fence, attr);
+ fence_set_priority(fence, attr);
}
+
+ local_bh_enable(); /* kick the tasklets if queues were reprioritised */
}
int
@@ -141,12 +159,12 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
int ret;
ret = dma_resv_get_fences_rcu(obj->base.resv,
- &excl, &count, &shared);
+ &excl, &count, &shared);
if (ret)
return ret;
for (i = 0; i < count; i++) {
- fence_set_priority(shared[i], attr);
+ i915_gem_fence_wait_priority(shared[i], attr);
dma_fence_put(shared[i]);
}
@@ -156,7 +174,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
}
if (excl) {
- fence_set_priority(excl, attr);
+ i915_gem_fence_wait_priority(excl, attr);
dma_fence_put(excl);
}
return 0;
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
index a768ec61e966..2fb501a78a85 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
@@ -27,7 +27,7 @@ static void huge_free_pages(struct drm_i915_gem_object *obj,
static int huge_get_pages(struct drm_i915_gem_object *obj)
{
-#define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
+#define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_RETRY_MAYFAIL)
const unsigned long nreal = obj->scratch / PAGE_SIZE;
const unsigned long npages = obj->base.size / PAGE_SIZE;
struct scatterlist *sg, *src, *end;
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index 1f35e71429b4..aacf4856ccb4 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -368,6 +368,27 @@ static int igt_check_page_sizes(struct i915_vma *vma)
err = -EINVAL;
}
+ /*
+ * The dma-api is like a box of chocolates when it comes to the
+ * alignment of dma addresses, however for LMEM we have total control
+ * and so can guarantee alignment, likewise when we allocate our blocks
+ * they should appear in descending order, and if we know that we align
+ * to the largest page size for the GTT address, we should be able to
+ * assert that if we see 2M physical pages then we should also get 2M
+ * GTT pages. If we don't then something might be wrong in our
+ * construction of the backing pages.
+ *
+ * Maintaining alignment is required to utilise huge pages in the ppGGT.
+ */
+ if (i915_gem_object_is_lmem(obj) &&
+ IS_ALIGNED(vma->node.start, SZ_2M) &&
+ vma->page_sizes.sg & SZ_2M &&
+ vma->page_sizes.gtt < SZ_2M) {
+ pr_err("gtt pages mismatch for LMEM, expected 2M GTT pages, sg(%u), gtt(%u)\n",
+ vma->page_sizes.sg, vma->page_sizes.gtt);
+ err = -EINVAL;
+ }
+
if (obj->mm.page_sizes.gtt) {
pr_err("obj->page_sizes.gtt(%u) should never be set\n",
obj->mm.page_sizes.gtt);
@@ -1333,6 +1354,7 @@ static int igt_ppgtt_sanity_check(void *arg)
unsigned int flags;
} backends[] = {
{ igt_create_system, 0, },
+ { igt_create_local, 0, },
{ igt_create_local, I915_BO_ALLOC_CONTIGUOUS, },
};
struct {
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
index 4e36d4897ea6..6a674a7994df 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
@@ -20,13 +20,11 @@ static int __igt_client_fill(struct intel_engine_cs *engine)
{
struct intel_context *ce = engine->kernel_context;
struct drm_i915_gem_object *obj;
- struct rnd_state prng;
+ I915_RND_STATE(prng);
IGT_TIMEOUT(end);
u32 *vaddr;
int err = 0;
- prandom_seed_state(&prng, i915_selftest.random_seed);
-
intel_engine_pm_get(engine);
do {
const u32 max_block_size = S16_MAX * PAGE_SIZE;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
index 7049a6bbc03d..1117d2a44518 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
@@ -7,6 +7,7 @@
#include <linux/prime_numbers.h>
#include "gt/intel_engine_pm.h"
+#include "gt/intel_gpu_commands.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
#include "gt/intel_ring.h"
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index d27d87a678c8..d429c7643ff2 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -7,6 +7,7 @@
#include <linux/prime_numbers.h>
#include "gt/intel_engine_pm.h"
+#include "gt/intel_gpu_commands.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
#include "gem/i915_gem_region.h"
diff --git a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
index e21b5023ca7d..d6783061bc72 100644
--- a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
+++ b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
@@ -9,6 +9,7 @@
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_pm.h"
#include "gt/intel_context.h"
+#include "gt/intel_gpu_commands.h"
#include "gt/intel_gt.h"
#include "i915_vma.h"
#include "i915_drv.h"
diff --git a/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c b/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c
index 174a24553322..d4f4452ce5ed 100644
--- a/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c
@@ -11,6 +11,7 @@
#include "i915_drv.h"
#include "intel_gt.h"
#include "intel_gt_clock_utils.h"
+#include "intel_gt_pm.h"
#include "intel_llc.h"
#include "intel_rc6.h"
#include "intel_rps.h"
@@ -403,34 +404,34 @@ static int frequency_show(struct seq_file *m, void *unused)
seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
seq_printf(m, "CAGF: %dMHz\n", cagf);
- seq_printf(m, "RP CUR UP EI: %d (%dns)\n",
+ seq_printf(m, "RP CUR UP EI: %d (%lldns)\n",
rpcurupei,
intel_gt_pm_interval_to_ns(gt, rpcurupei));
- seq_printf(m, "RP CUR UP: %d (%dns)\n",
+ seq_printf(m, "RP CUR UP: %d (%lldns)\n",
rpcurup, intel_gt_pm_interval_to_ns(gt, rpcurup));
- seq_printf(m, "RP PREV UP: %d (%dns)\n",
+ seq_printf(m, "RP PREV UP: %d (%lldns)\n",
rpprevup, intel_gt_pm_interval_to_ns(gt, rpprevup));
seq_printf(m, "Up threshold: %d%%\n",
rps->power.up_threshold);
- seq_printf(m, "RP UP EI: %d (%dns)\n",
+ seq_printf(m, "RP UP EI: %d (%lldns)\n",
rpupei, intel_gt_pm_interval_to_ns(gt, rpupei));
- seq_printf(m, "RP UP THRESHOLD: %d (%dns)\n",
+ seq_printf(m, "RP UP THRESHOLD: %d (%lldns)\n",
rpupt, intel_gt_pm_interval_to_ns(gt, rpupt));
- seq_printf(m, "RP CUR DOWN EI: %d (%dns)\n",
+ seq_printf(m, "RP CUR DOWN EI: %d (%lldns)\n",
rpcurdownei,
intel_gt_pm_interval_to_ns(gt, rpcurdownei));
- seq_printf(m, "RP CUR DOWN: %d (%dns)\n",
+ seq_printf(m, "RP CUR DOWN: %d (%lldns)\n",
rpcurdown,
intel_gt_pm_interval_to_ns(gt, rpcurdown));
- seq_printf(m, "RP PREV DOWN: %d (%dns)\n",
+ seq_printf(m, "RP PREV DOWN: %d (%lldns)\n",
rpprevdown,
intel_gt_pm_interval_to_ns(gt, rpprevdown));
seq_printf(m, "Down threshold: %d%%\n",
rps->power.down_threshold);
- seq_printf(m, "RP DOWN EI: %d (%dns)\n",
+ seq_printf(m, "RP DOWN EI: %d (%lldns)\n",
rpdownei, intel_gt_pm_interval_to_ns(gt, rpdownei));
- seq_printf(m, "RP DOWN THRESHOLD: %d (%dns)\n",
+ seq_printf(m, "RP DOWN THRESHOLD: %d (%lldns)\n",
rpdownt, intel_gt_pm_interval_to_ns(gt, rpdownt));
max_freq = (IS_GEN9_LP(i915) ? rp_state_cap >> 0 :
@@ -558,7 +559,9 @@ static int rps_boost_show(struct seq_file *m, void *data)
seq_printf(m, "RPS enabled? %s\n", yesno(intel_rps_is_enabled(rps)));
seq_printf(m, "RPS active? %s\n", yesno(intel_rps_is_active(rps)));
- seq_printf(m, "GPU busy? %s\n", yesno(gt->awake));
+ seq_printf(m, "GPU busy? %s, %llums\n",
+ yesno(gt->awake),
+ ktime_to_ms(intel_gt_get_awake_time(gt)));
seq_printf(m, "Boosts outstanding? %d\n",
atomic_read(&rps->num_waiters));
seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
@@ -575,7 +578,7 @@ static int rps_boost_show(struct seq_file *m, void *data)
intel_gpu_freq(rps, rps->efficient_freq),
intel_gpu_freq(rps, rps->boost_freq));
- seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
+ seq_printf(m, "Wait boosts: %d\n", READ_ONCE(rps->boosts));
if (INTEL_GEN(i915) >= 6 && intel_rps_is_active(rps)) {
struct intel_uncore *uncore = gt->uncore;
diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
index 680bd9442eb0..e08dff376339 100644
--- a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
@@ -12,9 +12,9 @@
#include "intel_gt.h"
/* Write pde (index) from the page directory @pd to the page table @pt */
-static inline void gen6_write_pde(const struct gen6_ppgtt *ppgtt,
- const unsigned int pde,
- const struct i915_page_table *pt)
+static void gen6_write_pde(const struct gen6_ppgtt *ppgtt,
+ const unsigned int pde,
+ const struct i915_page_table *pt)
{
dma_addr_t addr = pt ? px_dma(pt) : px_dma(ppgtt->base.vm.scratch[1]);
@@ -27,8 +27,6 @@ void gen7_ppgtt_enable(struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
struct intel_uncore *uncore = gt->uncore;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
u32 ecochk;
intel_uncore_rmw(uncore, GAC_ECO_BITS, 0, ECOBITS_PPGTT_CACHE64B);
@@ -41,13 +39,6 @@ void gen7_ppgtt_enable(struct intel_gt *gt)
ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
}
intel_uncore_write(uncore, GAM_ECOCHK, ecochk);
-
- for_each_engine(engine, gt, id) {
- /* GFX_MODE is per-ring on gen7+ */
- ENGINE_WRITE(engine,
- RING_MODE_GEN7,
- _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
- }
}
void gen6_ppgtt_enable(struct intel_gt *gt)
diff --git a/drivers/gpu/drm/i915/gt/gen7_renderclear.c b/drivers/gpu/drm/i915/gt/gen7_renderclear.c
index 94465374ca2f..de575fdb033f 100644
--- a/drivers/gpu/drm/i915/gt/gen7_renderclear.c
+++ b/drivers/gpu/drm/i915/gt/gen7_renderclear.c
@@ -40,7 +40,7 @@ struct batch_vals {
u32 size;
};
-static inline int num_primitives(const struct batch_vals *bv)
+static int num_primitives(const struct batch_vals *bv)
{
/*
* We need to saturate the GPU with work in order to dispatch
@@ -240,7 +240,7 @@ gen7_emit_state_base_address(struct batch_chunk *batch,
/* general */
*cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
/* surface */
- *cs++ = batch_addr(batch) | surface_state_base | BASE_ADDRESS_MODIFY;
+ *cs++ = (batch_addr(batch) + surface_state_base) | BASE_ADDRESS_MODIFY;
/* dynamic */
*cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
/* indirect */
@@ -353,19 +353,21 @@ static void gen7_emit_pipeline_flush(struct batch_chunk *batch)
static void gen7_emit_pipeline_invalidate(struct batch_chunk *batch)
{
- u32 *cs = batch_alloc_items(batch, 0, 8);
+ u32 *cs = batch_alloc_items(batch, 0, 10);
/* ivb: Stall before STATE_CACHE_INVALIDATE */
- *cs++ = GFX_OP_PIPE_CONTROL(4);
+ *cs++ = GFX_OP_PIPE_CONTROL(5);
*cs++ = PIPE_CONTROL_STALL_AT_SCOREBOARD |
PIPE_CONTROL_CS_STALL;
*cs++ = 0;
*cs++ = 0;
+ *cs++ = 0;
- *cs++ = GFX_OP_PIPE_CONTROL(4);
+ *cs++ = GFX_OP_PIPE_CONTROL(5);
*cs++ = PIPE_CONTROL_STATE_CACHE_INVALIDATE;
*cs++ = 0;
*cs++ = 0;
+ *cs++ = 0;
batch_advance(batch, cs);
}
@@ -390,6 +392,18 @@ static void emit_batch(struct i915_vma * const vma,
&cb_kernel_ivb,
desc_count);
+ /* Reset inherited context registers */
+ gen7_emit_pipeline_flush(&cmds);
+ gen7_emit_pipeline_invalidate(&cmds);
+ batch_add(&cmds, MI_LOAD_REGISTER_IMM(2));
+ batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_0_GEN7));
+ batch_add(&cmds, 0xffff0000);
+ batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_1));
+ batch_add(&cmds, 0xffff0000 | PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
+ gen7_emit_pipeline_invalidate(&cmds);
+ gen7_emit_pipeline_flush(&cmds);
+
+ /* Switch to the media pipeline and our base address */
gen7_emit_pipeline_invalidate(&cmds);
batch_add(&cmds, PIPELINE_SELECT | PIPELINE_SELECT_MEDIA);
batch_add(&cmds, MI_NOOP);
@@ -399,9 +413,11 @@ static void emit_batch(struct i915_vma * const vma,
gen7_emit_state_base_address(&cmds, descriptors);
gen7_emit_pipeline_invalidate(&cmds);
+ /* Set the clear-residual kernel state */
gen7_emit_vfe_state(&cmds, bv, urb_size - 1, 0, 0);
gen7_emit_interface_descriptor_load(&cmds, descriptors, desc_count);
+ /* Execute the kernel on all HW threads */
for (i = 0; i < num_primitives(bv); i++)
gen7_emit_media_object(&cmds, i);
diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
new file mode 100644
index 000000000000..07ba524da90b
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
@@ -0,0 +1,635 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2014 Intel Corporation
+ */
+
+#include "gen8_engine_cs.h"
+#include "i915_drv.h"
+#include "intel_lrc.h"
+#include "intel_gpu_commands.h"
+#include "intel_ring.h"
+
+int gen8_emit_flush_rcs(struct i915_request *rq, u32 mode)
+{
+ bool vf_flush_wa = false, dc_flush_wa = false;
+ u32 *cs, flags = 0;
+ int len;
+
+ flags |= PIPE_CONTROL_CS_STALL;
+
+ if (mode & EMIT_FLUSH) {
+ flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
+ flags |= PIPE_CONTROL_FLUSH_ENABLE;
+ }
+
+ if (mode & EMIT_INVALIDATE) {
+ flags |= PIPE_CONTROL_TLB_INVALIDATE;
+ flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_QW_WRITE;
+ flags |= PIPE_CONTROL_STORE_DATA_INDEX;
+
+ /*
+ * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
+ * pipe control.
+ */
+ if (IS_GEN(rq->engine->i915, 9))
+ vf_flush_wa = true;
+
+ /* WaForGAMHang:kbl */
+ if (IS_KBL_GT_REVID(rq->engine->i915, 0, KBL_REVID_B0))
+ dc_flush_wa = true;
+ }
+
+ len = 6;
+
+ if (vf_flush_wa)
+ len += 6;
+
+ if (dc_flush_wa)
+ len += 12;
+
+ cs = intel_ring_begin(rq, len);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ if (vf_flush_wa)
+ cs = gen8_emit_pipe_control(cs, 0, 0);
+
+ if (dc_flush_wa)
+ cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_DC_FLUSH_ENABLE,
+ 0);
+
+ cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
+
+ if (dc_flush_wa)
+ cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_CS_STALL, 0);
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+int gen8_emit_flush_xcs(struct i915_request *rq, u32 mode)
+{
+ u32 cmd, *cs;
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ cmd = MI_FLUSH_DW + 1;
+
+ /*
+ * We always require a command barrier so that subsequent
+ * commands, such as breadcrumb interrupts, are strictly ordered
+ * wrt the contents of the write cache being flushed to memory
+ * (and thus being coherent from the CPU).
+ */
+ cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
+
+ if (mode & EMIT_INVALIDATE) {
+ cmd |= MI_INVALIDATE_TLB;
+ if (rq->engine->class == VIDEO_DECODE_CLASS)
+ cmd |= MI_INVALIDATE_BSD;
+ }
+
+ *cs++ = cmd;
+ *cs++ = LRC_PPHWSP_SCRATCH_ADDR;
+ *cs++ = 0; /* upper addr */
+ *cs++ = 0; /* value */
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+int gen11_emit_flush_rcs(struct i915_request *rq, u32 mode)
+{
+ if (mode & EMIT_FLUSH) {
+ u32 *cs;
+ u32 flags = 0;
+
+ flags |= PIPE_CONTROL_CS_STALL;
+
+ flags |= PIPE_CONTROL_TILE_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
+ flags |= PIPE_CONTROL_FLUSH_ENABLE;
+ flags |= PIPE_CONTROL_QW_WRITE;
+ flags |= PIPE_CONTROL_STORE_DATA_INDEX;
+
+ cs = intel_ring_begin(rq, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
+ intel_ring_advance(rq, cs);
+ }
+
+ if (mode & EMIT_INVALIDATE) {
+ u32 *cs;
+ u32 flags = 0;
+
+ flags |= PIPE_CONTROL_CS_STALL;
+
+ flags |= PIPE_CONTROL_COMMAND_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_TLB_INVALIDATE;
+ flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_QW_WRITE;
+ flags |= PIPE_CONTROL_STORE_DATA_INDEX;
+
+ cs = intel_ring_begin(rq, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
+ intel_ring_advance(rq, cs);
+ }
+
+ return 0;
+}
+
+static u32 preparser_disable(bool state)
+{
+ return MI_ARB_CHECK | 1 << 8 | state;
+}
+
+static i915_reg_t aux_inv_reg(const struct intel_engine_cs *engine)
+{
+ static const i915_reg_t vd[] = {
+ GEN12_VD0_AUX_NV,
+ GEN12_VD1_AUX_NV,
+ GEN12_VD2_AUX_NV,
+ GEN12_VD3_AUX_NV,
+ };
+
+ static const i915_reg_t ve[] = {
+ GEN12_VE0_AUX_NV,
+ GEN12_VE1_AUX_NV,
+ };
+
+ if (engine->class == VIDEO_DECODE_CLASS)
+ return vd[engine->instance];
+
+ if (engine->class == VIDEO_ENHANCEMENT_CLASS)
+ return ve[engine->instance];
+
+ GEM_BUG_ON("unknown aux_inv reg\n");
+ return INVALID_MMIO_REG;
+}
+
+static u32 *gen12_emit_aux_table_inv(const i915_reg_t inv_reg, u32 *cs)
+{
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = i915_mmio_reg_offset(inv_reg);
+ *cs++ = AUX_INV;
+ *cs++ = MI_NOOP;
+
+ return cs;
+}
+
+int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
+{
+ if (mode & EMIT_FLUSH) {
+ u32 flags = 0;
+ u32 *cs;
+
+ flags |= PIPE_CONTROL_TILE_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_FLUSH_L3;
+ flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+ /* Wa_1409600907:tgl */
+ flags |= PIPE_CONTROL_DEPTH_STALL;
+ flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
+ flags |= PIPE_CONTROL_FLUSH_ENABLE;
+
+ flags |= PIPE_CONTROL_STORE_DATA_INDEX;
+ flags |= PIPE_CONTROL_QW_WRITE;
+
+ flags |= PIPE_CONTROL_CS_STALL;
+
+ cs = intel_ring_begin(rq, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ cs = gen12_emit_pipe_control(cs,
+ PIPE_CONTROL0_HDC_PIPELINE_FLUSH,
+ flags, LRC_PPHWSP_SCRATCH_ADDR);
+ intel_ring_advance(rq, cs);
+ }
+
+ if (mode & EMIT_INVALIDATE) {
+ u32 flags = 0;
+ u32 *cs;
+
+ flags |= PIPE_CONTROL_COMMAND_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_TLB_INVALIDATE;
+ flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+
+ flags |= PIPE_CONTROL_STORE_DATA_INDEX;
+ flags |= PIPE_CONTROL_QW_WRITE;
+
+ flags |= PIPE_CONTROL_CS_STALL;
+
+ cs = intel_ring_begin(rq, 8 + 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ /*
+ * Prevent the pre-parser from skipping past the TLB
+ * invalidate and loading a stale page for the batch
+ * buffer / request payload.
+ */
+ *cs++ = preparser_disable(true);
+
+ cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
+
+ /* hsdes: 1809175790 */
+ cs = gen12_emit_aux_table_inv(GEN12_GFX_CCS_AUX_NV, cs);
+
+ *cs++ = preparser_disable(false);
+ intel_ring_advance(rq, cs);
+ }
+
+ return 0;
+}
+
+int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode)
+{
+ intel_engine_mask_t aux_inv = 0;
+ u32 cmd, *cs;
+
+ cmd = 4;
+ if (mode & EMIT_INVALIDATE)
+ cmd += 2;
+ if (mode & EMIT_INVALIDATE)
+ aux_inv = rq->engine->mask & ~BIT(BCS0);
+ if (aux_inv)
+ cmd += 2 * hweight8(aux_inv) + 2;
+
+ cs = intel_ring_begin(rq, cmd);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ if (mode & EMIT_INVALIDATE)
+ *cs++ = preparser_disable(true);
+
+ cmd = MI_FLUSH_DW + 1;
+
+ /*
+ * We always require a command barrier so that subsequent
+ * commands, such as breadcrumb interrupts, are strictly ordered
+ * wrt the contents of the write cache being flushed to memory
+ * (and thus being coherent from the CPU).
+ */
+ cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
+
+ if (mode & EMIT_INVALIDATE) {
+ cmd |= MI_INVALIDATE_TLB;
+ if (rq->engine->class == VIDEO_DECODE_CLASS)
+ cmd |= MI_INVALIDATE_BSD;
+ }
+
+ *cs++ = cmd;
+ *cs++ = LRC_PPHWSP_SCRATCH_ADDR;
+ *cs++ = 0; /* upper addr */
+ *cs++ = 0; /* value */
+
+ if (aux_inv) { /* hsdes: 1809175790 */
+ struct intel_engine_cs *engine;
+ unsigned int tmp;
+
+ *cs++ = MI_LOAD_REGISTER_IMM(hweight8(aux_inv));
+ for_each_engine_masked(engine, rq->engine->gt,
+ aux_inv, tmp) {
+ *cs++ = i915_mmio_reg_offset(aux_inv_reg(engine));
+ *cs++ = AUX_INV;
+ }
+ *cs++ = MI_NOOP;
+ }
+
+ if (mode & EMIT_INVALIDATE)
+ *cs++ = preparser_disable(false);
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static u32 preempt_address(struct intel_engine_cs *engine)
+{
+ return (i915_ggtt_offset(engine->status_page.vma) +
+ I915_GEM_HWS_PREEMPT_ADDR);
+}
+
+static u32 hwsp_offset(const struct i915_request *rq)
+{
+ const struct intel_timeline_cacheline *cl;
+
+ /* Before the request is executed, the timeline/cachline is fixed */
+
+ cl = rcu_dereference_protected(rq->hwsp_cacheline, 1);
+ if (cl)
+ return cl->ggtt_offset;
+
+ return rcu_dereference_protected(rq->timeline, 1)->hwsp_offset;
+}
+
+int gen8_emit_init_breadcrumb(struct i915_request *rq)
+{
+ u32 *cs;
+
+ GEM_BUG_ON(i915_request_has_initial_breadcrumb(rq));
+ if (!i915_request_timeline(rq)->has_initial_breadcrumb)
+ return 0;
+
+ cs = intel_ring_begin(rq, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = hwsp_offset(rq);
+ *cs++ = 0;
+ *cs++ = rq->fence.seqno - 1;
+
+ /*
+ * Check if we have been preempted before we even get started.
+ *
+ * After this point i915_request_started() reports true, even if
+ * we get preempted and so are no longer running.
+ *
+ * i915_request_started() is used during preemption processing
+ * to decide if the request is currently inside the user payload
+ * or spinning on a kernel semaphore (or earlier). For no-preemption
+ * requests, we do allow preemption on the semaphore before the user
+ * payload, but do not allow preemption once the request is started.
+ *
+ * i915_request_started() is similarly used during GPU hangs to
+ * determine if the user's payload was guilty, and if so, the
+ * request is banned. Before the request is started, it is assumed
+ * to be unharmed and an innocent victim of another's hang.
+ */
+ *cs++ = MI_NOOP;
+ *cs++ = MI_ARB_CHECK;
+
+ intel_ring_advance(rq, cs);
+
+ /* Record the updated position of the request's payload */
+ rq->infix = intel_ring_offset(rq, cs);
+
+ __set_bit(I915_FENCE_FLAG_INITIAL_BREADCRUMB, &rq->fence.flags);
+
+ return 0;
+}
+
+int gen8_emit_bb_start_noarb(struct i915_request *rq,
+ u64 offset, u32 len,
+ const unsigned int flags)
+{
+ u32 *cs;
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ /*
+ * WaDisableCtxRestoreArbitration:bdw,chv
+ *
+ * We don't need to perform MI_ARB_ENABLE as often as we do (in
+ * particular all the gen that do not need the w/a at all!), if we
+ * took care to make sure that on every switch into this context
+ * (both ordinary and for preemption) that arbitrartion was enabled
+ * we would be fine. However, for gen8 there is another w/a that
+ * requires us to not preempt inside GPGPU execution, so we keep
+ * arbitration disabled for gen8 batches. Arbitration will be
+ * re-enabled before we close the request
+ * (engine->emit_fini_breadcrumb).
+ */
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+
+ /* FIXME(BDW+): Address space and security selectors. */
+ *cs++ = MI_BATCH_BUFFER_START_GEN8 |
+ (flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
+ *cs++ = lower_32_bits(offset);
+ *cs++ = upper_32_bits(offset);
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+int gen8_emit_bb_start(struct i915_request *rq,
+ u64 offset, u32 len,
+ const unsigned int flags)
+{
+ u32 *cs;
+
+ if (unlikely(i915_request_has_nopreempt(rq)))
+ return gen8_emit_bb_start_noarb(rq, offset, len, flags);
+
+ cs = intel_ring_begin(rq, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+
+ *cs++ = MI_BATCH_BUFFER_START_GEN8 |
+ (flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
+ *cs++ = lower_32_bits(offset);
+ *cs++ = upper_32_bits(offset);
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+ *cs++ = MI_NOOP;
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static void assert_request_valid(struct i915_request *rq)
+{
+ struct intel_ring *ring __maybe_unused = rq->ring;
+
+ /* Can we unwind this request without appearing to go forwards? */
+ GEM_BUG_ON(intel_ring_direction(ring, rq->wa_tail, rq->head) <= 0);
+}
+
+/*
+ * Reserve space for 2 NOOPs at the end of each request to be
+ * used as a workaround for not being allowed to do lite
+ * restore with HEAD==TAIL (WaIdleLiteRestore).
+ */
+static u32 *gen8_emit_wa_tail(struct i915_request *rq, u32 *cs)
+{
+ /* Ensure there's always at least one preemption point per-request. */
+ *cs++ = MI_ARB_CHECK;
+ *cs++ = MI_NOOP;
+ rq->wa_tail = intel_ring_offset(rq, cs);
+
+ /* Check that entire request is less than half the ring */
+ assert_request_valid(rq);
+
+ return cs;
+}
+
+static u32 *emit_preempt_busywait(struct i915_request *rq, u32 *cs)
+{
+ *cs++ = MI_ARB_CHECK; /* trigger IDLE->ACTIVE first */
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_EQ_SDD;
+ *cs++ = 0;
+ *cs++ = preempt_address(rq->engine);
+ *cs++ = 0;
+ *cs++ = MI_NOOP;
+
+ return cs;
+}
+
+static __always_inline u32*
+gen8_emit_fini_breadcrumb_tail(struct i915_request *rq, u32 *cs)
+{
+ *cs++ = MI_USER_INTERRUPT;
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+ if (intel_engine_has_semaphores(rq->engine))
+ cs = emit_preempt_busywait(rq, cs);
+
+ rq->tail = intel_ring_offset(rq, cs);
+ assert_ring_tail_valid(rq->ring, rq->tail);
+
+ return gen8_emit_wa_tail(rq, cs);
+}
+
+static u32 *emit_xcs_breadcrumb(struct i915_request *rq, u32 *cs)
+{
+ return gen8_emit_ggtt_write(cs, rq->fence.seqno, hwsp_offset(rq), 0);
+}
+
+u32 *gen8_emit_fini_breadcrumb_xcs(struct i915_request *rq, u32 *cs)
+{
+ return gen8_emit_fini_breadcrumb_tail(rq, emit_xcs_breadcrumb(rq, cs));
+}
+
+u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
+{
+ cs = gen8_emit_pipe_control(cs,
+ PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
+ PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+ PIPE_CONTROL_DC_FLUSH_ENABLE,
+ 0);
+
+ /* XXX flush+write+CS_STALL all in one upsets gem_concurrent_blt:kbl */
+ cs = gen8_emit_ggtt_write_rcs(cs,
+ rq->fence.seqno,
+ hwsp_offset(rq),
+ PIPE_CONTROL_FLUSH_ENABLE |
+ PIPE_CONTROL_CS_STALL);
+
+ return gen8_emit_fini_breadcrumb_tail(rq, cs);
+}
+
+u32 *gen11_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
+{
+ cs = gen8_emit_ggtt_write_rcs(cs,
+ rq->fence.seqno,
+ hwsp_offset(rq),
+ PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_TILE_CACHE_FLUSH |
+ PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
+ PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+ PIPE_CONTROL_DC_FLUSH_ENABLE |
+ PIPE_CONTROL_FLUSH_ENABLE);
+
+ return gen8_emit_fini_breadcrumb_tail(rq, cs);
+}
+
+/*
+ * Note that the CS instruction pre-parser will not stall on the breadcrumb
+ * flush and will continue pre-fetching the instructions after it before the
+ * memory sync is completed. On pre-gen12 HW, the pre-parser will stop at
+ * BB_START/END instructions, so, even though we might pre-fetch the pre-amble
+ * of the next request before the memory has been flushed, we're guaranteed that
+ * we won't access the batch itself too early.
+ * However, on gen12+ the parser can pre-fetch across the BB_START/END commands,
+ * so, if the current request is modifying an instruction in the next request on
+ * the same intel_context, we might pre-fetch and then execute the pre-update
+ * instruction. To avoid this, the users of self-modifying code should either
+ * disable the parser around the code emitting the memory writes, via a new flag
+ * added to MI_ARB_CHECK, or emit the writes from a different intel_context. For
+ * the in-kernel use-cases we've opted to use a separate context, see
+ * reloc_gpu() as an example.
+ * All the above applies only to the instructions themselves. Non-inline data
+ * used by the instructions is not pre-fetched.
+ */
+
+static u32 *gen12_emit_preempt_busywait(struct i915_request *rq, u32 *cs)
+{
+ *cs++ = MI_ARB_CHECK; /* trigger IDLE->ACTIVE first */
+ *cs++ = MI_SEMAPHORE_WAIT_TOKEN |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_EQ_SDD;
+ *cs++ = 0;
+ *cs++ = preempt_address(rq->engine);
+ *cs++ = 0;
+ *cs++ = 0;
+
+ return cs;
+}
+
+static __always_inline u32*
+gen12_emit_fini_breadcrumb_tail(struct i915_request *rq, u32 *cs)
+{
+ *cs++ = MI_USER_INTERRUPT;
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+ if (intel_engine_has_semaphores(rq->engine))
+ cs = gen12_emit_preempt_busywait(rq, cs);
+
+ rq->tail = intel_ring_offset(rq, cs);
+ assert_ring_tail_valid(rq->ring, rq->tail);
+
+ return gen8_emit_wa_tail(rq, cs);
+}
+
+u32 *gen12_emit_fini_breadcrumb_xcs(struct i915_request *rq, u32 *cs)
+{
+ /* XXX Stalling flush before seqno write; post-sync not */
+ cs = emit_xcs_breadcrumb(rq, __gen8_emit_flush_dw(cs, 0, 0, 0));
+ return gen12_emit_fini_breadcrumb_tail(rq, cs);
+}
+
+u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
+{
+ cs = gen12_emit_ggtt_write_rcs(cs,
+ rq->fence.seqno,
+ hwsp_offset(rq),
+ PIPE_CONTROL0_HDC_PIPELINE_FLUSH,
+ PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_TILE_CACHE_FLUSH |
+ PIPE_CONTROL_FLUSH_L3 |
+ PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
+ PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+ /* Wa_1409600907:tgl */
+ PIPE_CONTROL_DEPTH_STALL |
+ PIPE_CONTROL_DC_FLUSH_ENABLE |
+ PIPE_CONTROL_FLUSH_ENABLE);
+
+ return gen12_emit_fini_breadcrumb_tail(rq, cs);
+}
diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.h b/drivers/gpu/drm/i915/gt/gen8_engine_cs.h
new file mode 100644
index 000000000000..cc6e21d3662a
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2014 Intel Corporation
+ */
+
+#ifndef __GEN8_ENGINE_CS_H__
+#define __GEN8_ENGINE_CS_H__
+
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include "i915_gem.h" /* GEM_BUG_ON */
+
+#include "intel_gpu_commands.h"
+
+struct i915_request;
+
+int gen8_emit_flush_rcs(struct i915_request *rq, u32 mode);
+int gen11_emit_flush_rcs(struct i915_request *rq, u32 mode);
+int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode);
+
+int gen8_emit_flush_xcs(struct i915_request *rq, u32 mode);
+int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode);
+
+int gen8_emit_init_breadcrumb(struct i915_request *rq);
+
+int gen8_emit_bb_start_noarb(struct i915_request *rq,
+ u64 offset, u32 len,
+ const unsigned int flags);
+int gen8_emit_bb_start(struct i915_request *rq,
+ u64 offset, u32 len,
+ const unsigned int flags);
+
+u32 *gen8_emit_fini_breadcrumb_xcs(struct i915_request *rq, u32 *cs);
+u32 *gen12_emit_fini_breadcrumb_xcs(struct i915_request *rq, u32 *cs);
+
+u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
+u32 *gen11_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
+u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
+
+static inline u32 *
+__gen8_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset)
+{
+ memset(batch, 0, 6 * sizeof(u32));
+
+ batch[0] = GFX_OP_PIPE_CONTROL(6) | flags0;
+ batch[1] = flags1;
+ batch[2] = offset;
+
+ return batch + 6;
+}
+
+static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
+{
+ return __gen8_emit_pipe_control(batch, 0, flags, offset);
+}
+
+static inline u32 *gen12_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset)
+{
+ return __gen8_emit_pipe_control(batch, flags0, flags1, offset);
+}
+
+static inline u32 *
+__gen8_emit_write_rcs(u32 *cs, u32 value, u32 offset, u32 flags0, u32 flags1)
+{
+ *cs++ = GFX_OP_PIPE_CONTROL(6) | flags0;
+ *cs++ = flags1 | PIPE_CONTROL_QW_WRITE;
+ *cs++ = offset;
+ *cs++ = 0;
+ *cs++ = value;
+ *cs++ = 0; /* We're thrashing one extra dword. */
+
+ return cs;
+}
+
+static inline u32*
+gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
+{
+ /* We're using qword write, offset should be aligned to 8 bytes. */
+ GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
+
+ return __gen8_emit_write_rcs(cs,
+ value,
+ gtt_offset,
+ 0,
+ flags | PIPE_CONTROL_GLOBAL_GTT_IVB);
+}
+
+static inline u32*
+gen12_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags0, u32 flags1)
+{
+ /* We're using qword write, offset should be aligned to 8 bytes. */
+ GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
+
+ return __gen8_emit_write_rcs(cs,
+ value,
+ gtt_offset,
+ flags0,
+ flags1 | PIPE_CONTROL_GLOBAL_GTT_IVB);
+}
+
+static inline u32 *
+__gen8_emit_flush_dw(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
+{
+ *cs++ = (MI_FLUSH_DW + 1) | flags;
+ *cs++ = gtt_offset;
+ *cs++ = 0;
+ *cs++ = value;
+
+ return cs;
+}
+
+static inline u32 *
+gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
+{
+ /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
+ GEM_BUG_ON(gtt_offset & (1 << 5));
+ /* Offset should be aligned to 8 bytes for both (QW/DW) write types */
+ GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
+
+ return __gen8_emit_flush_dw(cs,
+ value,
+ gtt_offset | MI_FLUSH_DW_USE_GTT,
+ flags | MI_FLUSH_DW_OP_STOREDW);
+}
+
+#endif /* __GEN8_ENGINE_CS_H__ */
diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
index a37c968ef8f7..755522ced60d 100644
--- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
@@ -109,7 +109,7 @@ static void gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create)
#define as_pd(x) container_of((x), typeof(struct i915_page_directory), pt)
-static inline unsigned int
+static unsigned int
gen8_pd_range(u64 start, u64 end, int lvl, unsigned int *idx)
{
const int shift = gen8_pd_shift(lvl);
@@ -125,7 +125,7 @@ gen8_pd_range(u64 start, u64 end, int lvl, unsigned int *idx)
return i915_pde_index(end, shift) - *idx;
}
-static inline bool gen8_pd_contains(u64 start, u64 end, int lvl)
+static bool gen8_pd_contains(u64 start, u64 end, int lvl)
{
const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
@@ -133,7 +133,7 @@ static inline bool gen8_pd_contains(u64 start, u64 end, int lvl)
return (start ^ end) & mask && (start & ~mask) == 0;
}
-static inline unsigned int gen8_pt_count(u64 start, u64 end)
+static unsigned int gen8_pt_count(u64 start, u64 end)
{
GEM_BUG_ON(start >= end);
if ((start ^ end) >> gen8_pd_shift(1))
@@ -142,14 +142,13 @@ static inline unsigned int gen8_pt_count(u64 start, u64 end)
return end - start;
}
-static inline unsigned int
-gen8_pd_top_count(const struct i915_address_space *vm)
+static unsigned int gen8_pd_top_count(const struct i915_address_space *vm)
{
unsigned int shift = __gen8_pte_shift(vm->top);
return (vm->total + (1ull << shift) - 1) >> shift;
}
-static inline struct i915_page_directory *
+static struct i915_page_directory *
gen8_pdp_for_page_index(struct i915_address_space * const vm, const u64 idx)
{
struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
@@ -160,7 +159,7 @@ gen8_pdp_for_page_index(struct i915_address_space * const vm, const u64 idx)
return i915_pd_entry(ppgtt->pd, gen8_pd_index(idx, vm->top));
}
-static inline struct i915_page_directory *
+static struct i915_page_directory *
gen8_pdp_for_page_address(struct i915_address_space * const vm, const u64 addr)
{
return gen8_pdp_for_page_index(vm, addr >> GEN8_PTE_SHIFT);
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
index 0625cbb3b431..34a645d6babd 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
@@ -187,18 +187,6 @@ static void add_retire(struct intel_breadcrumbs *b, struct intel_timeline *tl)
intel_engine_add_retire(b->irq_engine, tl);
}
-static bool __signal_request(struct i915_request *rq)
-{
- GEM_BUG_ON(test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags));
-
- if (!__dma_fence_signal(&rq->fence)) {
- i915_request_put(rq);
- return false;
- }
-
- return true;
-}
-
static struct llist_node *
slist_add(struct llist_node *node, struct llist_node *head)
{
@@ -246,6 +234,7 @@ static void signal_irq_work(struct irq_work *work)
intel_breadcrumbs_disarm_irq(b);
rcu_read_lock();
+ atomic_inc(&b->signaler_active);
list_for_each_entry_rcu(ce, &b->signalers, signal_link) {
struct i915_request *rq;
@@ -268,17 +257,20 @@ static void signal_irq_work(struct irq_work *work)
list_del_rcu(&rq->signal_link);
release = remove_signaling_context(b, ce);
spin_unlock(&ce->signal_lock);
-
- if (__signal_request(rq))
- /* We own signal_node now, xfer to local list */
- signal = slist_add(&rq->signal_node, signal);
-
if (release) {
- add_retire(b, ce->timeline);
+ if (intel_timeline_is_last(ce->timeline, rq))
+ add_retire(b, ce->timeline);
intel_context_put(ce);
}
+
+ if (__dma_fence_signal(&rq->fence))
+ /* We own signal_node now, xfer to local list */
+ signal = slist_add(&rq->signal_node, signal);
+ else
+ i915_request_put(rq);
}
}
+ atomic_dec(&b->signaler_active);
rcu_read_unlock();
llist_for_each_safe(signal, sn, signal) {
@@ -337,17 +329,19 @@ void intel_breadcrumbs_reset(struct intel_breadcrumbs *b)
spin_unlock_irqrestore(&b->irq_lock, flags);
}
-void intel_breadcrumbs_park(struct intel_breadcrumbs *b)
+void __intel_breadcrumbs_park(struct intel_breadcrumbs *b)
{
- /* Kick the work once more to drain the signalers */
+ if (!READ_ONCE(b->irq_armed))
+ return;
+
+ /* Kick the work once more to drain the signalers, and disarm the irq */
irq_work_sync(&b->irq_work);
- while (unlikely(READ_ONCE(b->irq_armed))) {
+ while (READ_ONCE(b->irq_armed) && !atomic_read(&b->active)) {
local_irq_disable();
signal_irq_work(&b->irq_work);
local_irq_enable();
cond_resched();
}
- GEM_BUG_ON(!list_empty(&b->signalers));
}
void intel_breadcrumbs_free(struct intel_breadcrumbs *b)
@@ -358,6 +352,17 @@ void intel_breadcrumbs_free(struct intel_breadcrumbs *b)
kfree(b);
}
+static void irq_signal_request(struct i915_request *rq,
+ struct intel_breadcrumbs *b)
+{
+ if (!__dma_fence_signal(&rq->fence))
+ return;
+
+ i915_request_get(rq);
+ if (llist_add(&rq->signal_node, &b->signaled_requests))
+ irq_work_queue(&b->irq_work);
+}
+
static void insert_breadcrumb(struct i915_request *rq)
{
struct intel_breadcrumbs *b = READ_ONCE(rq->engine)->breadcrumbs;
@@ -367,17 +372,13 @@ static void insert_breadcrumb(struct i915_request *rq)
if (test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags))
return;
- i915_request_get(rq);
-
/*
* If the request is already completed, we can transfer it
* straight onto a signaled list, and queue the irq worker for
* its signal completion.
*/
if (__i915_request_is_complete(rq)) {
- if (__signal_request(rq) &&
- llist_add(&rq->signal_node, &b->signaled_requests))
- irq_work_queue(&b->irq_work);
+ irq_signal_request(rq, b);
return;
}
@@ -408,6 +409,8 @@ static void insert_breadcrumb(struct i915_request *rq)
break;
}
}
+
+ i915_request_get(rq);
list_add_rcu(&rq->signal_link, pos);
GEM_BUG_ON(!check_signal_order(ce, rq));
GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags));
@@ -448,22 +451,61 @@ bool i915_request_enable_breadcrumb(struct i915_request *rq)
void i915_request_cancel_breadcrumb(struct i915_request *rq)
{
+ struct intel_breadcrumbs *b = READ_ONCE(rq->engine)->breadcrumbs;
struct intel_context *ce = rq->context;
bool release;
- if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags))
+ spin_lock(&ce->signal_lock);
+ if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) {
+ spin_unlock(&ce->signal_lock);
return;
+ }
- spin_lock(&ce->signal_lock);
list_del_rcu(&rq->signal_link);
- release = remove_signaling_context(rq->engine->breadcrumbs, ce);
+ release = remove_signaling_context(b, ce);
spin_unlock(&ce->signal_lock);
if (release)
intel_context_put(ce);
+ if (__i915_request_is_complete(rq))
+ irq_signal_request(rq, b);
+
i915_request_put(rq);
}
+void intel_context_remove_breadcrumbs(struct intel_context *ce,
+ struct intel_breadcrumbs *b)
+{
+ struct i915_request *rq, *rn;
+ bool release = false;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ce->signal_lock, flags);
+
+ if (list_empty(&ce->signals))
+ goto unlock;
+
+ list_for_each_entry_safe(rq, rn, &ce->signals, signal_link) {
+ GEM_BUG_ON(!__i915_request_is_complete(rq));
+ if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL,
+ &rq->fence.flags))
+ continue;
+
+ list_del_rcu(&rq->signal_link);
+ irq_signal_request(rq, b);
+ i915_request_put(rq);
+ }
+ release = remove_signaling_context(b, ce);
+
+unlock:
+ spin_unlock_irqrestore(&ce->signal_lock, flags);
+ if (release)
+ intel_context_put(ce);
+
+ while (atomic_read(&b->signaler_active))
+ cpu_relax();
+}
+
static void print_signals(struct intel_breadcrumbs *b, struct drm_printer *p)
{
struct intel_context *ce;
@@ -476,8 +518,8 @@ static void print_signals(struct intel_breadcrumbs *b, struct drm_printer *p)
list_for_each_entry_rcu(rq, &ce->signals, signal_link)
drm_printf(p, "\t[%llx:%llx%s] @ %dms\n",
rq->fence.context, rq->fence.seqno,
- i915_request_completed(rq) ? "!" :
- i915_request_started(rq) ? "*" :
+ __i915_request_is_complete(rq) ? "!" :
+ __i915_request_has_started(rq) ? "*" :
"",
jiffies_to_msecs(jiffies - rq->emitted_jiffies));
}
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.h b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.h
index ed3d1deabfbd..3ce5ce270b04 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.h
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.h
@@ -6,6 +6,7 @@
#ifndef __INTEL_BREADCRUMBS__
#define __INTEL_BREADCRUMBS__
+#include <linux/atomic.h>
#include <linux/irq_work.h>
#include "intel_engine_types.h"
@@ -19,7 +20,18 @@ intel_breadcrumbs_create(struct intel_engine_cs *irq_engine);
void intel_breadcrumbs_free(struct intel_breadcrumbs *b);
void intel_breadcrumbs_reset(struct intel_breadcrumbs *b);
-void intel_breadcrumbs_park(struct intel_breadcrumbs *b);
+void __intel_breadcrumbs_park(struct intel_breadcrumbs *b);
+
+static inline void intel_breadcrumbs_unpark(struct intel_breadcrumbs *b)
+{
+ atomic_inc(&b->active);
+}
+
+static inline void intel_breadcrumbs_park(struct intel_breadcrumbs *b)
+{
+ if (atomic_dec_and_test(&b->active))
+ __intel_breadcrumbs_park(b);
+}
static inline void
intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine)
@@ -33,4 +45,7 @@ void intel_engine_print_breadcrumbs(struct intel_engine_cs *engine,
bool i915_request_enable_breadcrumb(struct i915_request *request);
void i915_request_cancel_breadcrumb(struct i915_request *request);
+void intel_context_remove_breadcrumbs(struct intel_context *ce,
+ struct intel_breadcrumbs *b);
+
#endif /* __INTEL_BREADCRUMBS__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h b/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h
index a74bb3062bd8..3a084ce8ff5e 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h
@@ -29,17 +29,20 @@
* the overhead of waking that client is much preferred.
*/
struct intel_breadcrumbs {
- /* Not all breadcrumbs are attached to physical HW */
- struct intel_engine_cs *irq_engine;
+ atomic_t active;
spinlock_t signalers_lock; /* protects the list of signalers */
struct list_head signalers;
struct llist_head signaled_requests;
+ atomic_t signaler_active;
spinlock_t irq_lock; /* protects the interrupt from hardirq context */
struct irq_work irq_work; /* for use from inside irq_lock */
unsigned int irq_enabled;
bool irq_armed;
+
+ /* Not all breadcrumbs are attached to physical HW */
+ struct intel_engine_cs *irq_engine;
};
#endif /* __INTEL_BREADCRUMBS_TYPES__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index fda2eba81e22..d24ab6fa0ee5 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -191,6 +191,11 @@ static inline bool intel_context_is_closed(const struct intel_context *ce)
return test_bit(CONTEXT_CLOSED_BIT, &ce->flags);
}
+static inline bool intel_context_has_inflight(const struct intel_context *ce)
+{
+ return test_bit(COPS_HAS_INFLIGHT_BIT, &ce->ops->flags);
+}
+
static inline bool intel_context_use_semaphores(const struct intel_context *ce)
{
return test_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
@@ -248,16 +253,14 @@ intel_context_clear_nopreempt(struct intel_context *ce)
static inline u64 intel_context_get_total_runtime_ns(struct intel_context *ce)
{
- const u32 period =
- RUNTIME_INFO(ce->engine->i915)->cs_timestamp_period_ns;
+ const u32 period = ce->engine->gt->clock_period_ns;
return READ_ONCE(ce->runtime.total) * period;
}
static inline u64 intel_context_get_avg_runtime_ns(struct intel_context *ce)
{
- const u32 period =
- RUNTIME_INFO(ce->engine->i915)->cs_timestamp_period_ns;
+ const u32 period = ce->engine->gt->clock_period_ns;
return mul_u32_u32(ewma_runtime_read(&ce->runtime.avg), period);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_context_sseu.c b/drivers/gpu/drm/i915/gt/intel_context_sseu.c
index b9c8163978a3..8dfd8f656aaa 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_sseu.c
+++ b/drivers/gpu/drm/i915/gt/intel_context_sseu.c
@@ -9,7 +9,6 @@
#include "intel_engine_pm.h"
#include "intel_gpu_commands.h"
#include "intel_lrc.h"
-#include "intel_lrc_reg.h"
#include "intel_ring.h"
#include "intel_sseu.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index 52fa9c132746..e10d78601bbd 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -30,6 +30,10 @@ struct intel_context;
struct intel_ring;
struct intel_context_ops {
+ unsigned long flags;
+#define COPS_HAS_INFLIGHT_BIT 0
+#define COPS_HAS_INFLIGHT BIT(COPS_HAS_INFLIGHT_BIT)
+
int (*alloc)(struct intel_context *ce);
int (*pre_pin)(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void **vaddr);
@@ -58,8 +62,12 @@ struct intel_context {
struct intel_engine_cs *engine;
struct intel_engine_cs *inflight;
-#define intel_context_inflight(ce) ptr_mask_bits(READ_ONCE((ce)->inflight), 2)
-#define intel_context_inflight_count(ce) ptr_unmask_bits(READ_ONCE((ce)->inflight), 2)
+#define __intel_context_inflight(engine) ptr_mask_bits(engine, 3)
+#define __intel_context_inflight_count(engine) ptr_unmask_bits(engine, 3)
+#define intel_context_inflight(ce) \
+ __intel_context_inflight(READ_ONCE((ce)->inflight))
+#define intel_context_inflight_count(ce) \
+ __intel_context_inflight_count(READ_ONCE((ce)->inflight))
struct i915_address_space *vm;
struct i915_gem_context __rcu *gem_context;
@@ -81,12 +89,13 @@ struct intel_context {
unsigned long flags;
#define CONTEXT_BARRIER_BIT 0
#define CONTEXT_ALLOC_BIT 1
-#define CONTEXT_VALID_BIT 2
-#define CONTEXT_CLOSED_BIT 3
-#define CONTEXT_USE_SEMAPHORES 4
-#define CONTEXT_BANNED 5
-#define CONTEXT_FORCE_SINGLE_SUBMISSION 6
-#define CONTEXT_NOPREEMPT 7
+#define CONTEXT_INIT_BIT 2
+#define CONTEXT_VALID_BIT 3
+#define CONTEXT_CLOSED_BIT 4
+#define CONTEXT_USE_SEMAPHORES 5
+#define CONTEXT_BANNED 6
+#define CONTEXT_FORCE_SINGLE_SUBMISSION 7
+#define CONTEXT_NOPREEMPT 8
u32 *lrc_reg_state;
union {
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index 760fefdfe392..47ee8578e511 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -15,7 +15,6 @@
#include "i915_selftest.h"
#include "gt/intel_timeline.h"
#include "intel_engine_types.h"
-#include "intel_gpu_commands.h"
#include "intel_workarounds.h"
struct drm_printer;
@@ -223,91 +222,6 @@ void intel_engine_get_instdone(const struct intel_engine_cs *engine,
void intel_engine_init_execlists(struct intel_engine_cs *engine);
-static inline u32 *__gen8_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset)
-{
- memset(batch, 0, 6 * sizeof(u32));
-
- batch[0] = GFX_OP_PIPE_CONTROL(6) | flags0;
- batch[1] = flags1;
- batch[2] = offset;
-
- return batch + 6;
-}
-
-static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
-{
- return __gen8_emit_pipe_control(batch, 0, flags, offset);
-}
-
-static inline u32 *gen12_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset)
-{
- return __gen8_emit_pipe_control(batch, flags0, flags1, offset);
-}
-
-static inline u32 *
-__gen8_emit_write_rcs(u32 *cs, u32 value, u32 offset, u32 flags0, u32 flags1)
-{
- *cs++ = GFX_OP_PIPE_CONTROL(6) | flags0;
- *cs++ = flags1 | PIPE_CONTROL_QW_WRITE;
- *cs++ = offset;
- *cs++ = 0;
- *cs++ = value;
- *cs++ = 0; /* We're thrashing one extra dword. */
-
- return cs;
-}
-
-static inline u32*
-gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
-{
- /* We're using qword write, offset should be aligned to 8 bytes. */
- GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
-
- return __gen8_emit_write_rcs(cs,
- value,
- gtt_offset,
- 0,
- flags | PIPE_CONTROL_GLOBAL_GTT_IVB);
-}
-
-static inline u32*
-gen12_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags0, u32 flags1)
-{
- /* We're using qword write, offset should be aligned to 8 bytes. */
- GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
-
- return __gen8_emit_write_rcs(cs,
- value,
- gtt_offset,
- flags0,
- flags1 | PIPE_CONTROL_GLOBAL_GTT_IVB);
-}
-
-static inline u32 *
-__gen8_emit_flush_dw(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
-{
- *cs++ = (MI_FLUSH_DW + 1) | flags;
- *cs++ = gtt_offset;
- *cs++ = 0;
- *cs++ = value;
-
- return cs;
-}
-
-static inline u32 *
-gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
-{
- /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
- GEM_BUG_ON(gtt_offset & (1 << 5));
- /* Offset should be aligned to 8 bytes for both (QW/DW) write types */
- GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
-
- return __gen8_emit_flush_dw(cs,
- value,
- gtt_offset | MI_FLUSH_DW_USE_GTT,
- flags | MI_FLUSH_DW_OP_STOREDW);
-}
-
static inline void __intel_engine_reset(struct intel_engine_cs *engine,
bool stalled)
{
@@ -318,7 +232,12 @@ static inline void __intel_engine_reset(struct intel_engine_cs *engine,
bool intel_engines_are_idle(struct intel_gt *gt);
bool intel_engine_is_idle(struct intel_engine_cs *engine);
-void intel_engine_flush_submission(struct intel_engine_cs *engine);
+
+void __intel_engine_flush_submission(struct intel_engine_cs *engine, bool sync);
+static inline void intel_engine_flush_submission(struct intel_engine_cs *engine)
+{
+ __intel_engine_flush_submission(engine, true);
+}
void intel_engines_reset_default_submission(struct intel_gt *gt);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 0b31670343f5..fb1b1d096975 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -33,12 +33,14 @@
#include "intel_engine.h"
#include "intel_engine_pm.h"
#include "intel_engine_user.h"
+#include "intel_execlists_submission.h"
#include "intel_gt.h"
#include "intel_gt_requests.h"
#include "intel_gt_pm.h"
-#include "intel_lrc.h"
+#include "intel_lrc_reg.h"
#include "intel_reset.h"
#include "intel_ring.h"
+#include "uc/intel_guc_submission.h"
/* Haswell does have the CXT_SIZE register however it does not appear to be
* valid. Now, docs explain in dwords what is in the context object. The full
@@ -340,7 +342,7 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
engine->schedule = NULL;
ewma__engine_latency_init(&engine->latency);
- seqlock_init(&engine->stats.lock);
+ seqcount_init(&engine->stats.lock);
ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
@@ -647,6 +649,8 @@ static int init_status_page(struct intel_engine_cs *engine)
void *vaddr;
int ret;
+ INIT_LIST_HEAD(&engine->status_page.timelines);
+
/*
* Though the HWS register does support 36bit addresses, historically
* we have had hangs and corruption reported due to wild writes if
@@ -723,6 +727,9 @@ static int engine_setup_common(struct intel_engine_cs *engine)
intel_engine_init_whitelist(engine);
intel_engine_init_ctx_wa(engine);
+ if (INTEL_GEN(engine->i915) >= 12)
+ engine->flags |= I915_ENGINE_HAS_RELATIVE_MMIO;
+
return 0;
err_status:
@@ -829,6 +836,21 @@ create_pinned_context(struct intel_engine_cs *engine,
return ce;
}
+static void destroy_pinned_context(struct intel_context *ce)
+{
+ struct intel_engine_cs *engine = ce->engine;
+ struct i915_vma *hwsp = engine->status_page.vma;
+
+ GEM_BUG_ON(ce->timeline->hwsp_ggtt != hwsp);
+
+ mutex_lock(&hwsp->vm->mutex);
+ list_del(&ce->timeline->engine_link);
+ mutex_unlock(&hwsp->vm->mutex);
+
+ intel_context_unpin(ce);
+ intel_context_put(ce);
+}
+
static struct intel_context *
create_kernel_context(struct intel_engine_cs *engine)
{
@@ -889,7 +911,9 @@ int intel_engines_init(struct intel_gt *gt)
enum intel_engine_id id;
int err;
- if (HAS_EXECLISTS(gt->i915))
+ if (intel_uc_uses_guc_submission(&gt->uc))
+ setup = intel_guc_submission_setup;
+ else if (HAS_EXECLISTS(gt->i915))
setup = intel_execlists_submission_setup;
else
setup = intel_ring_submission_setup;
@@ -925,7 +949,6 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
GEM_BUG_ON(!list_empty(&engine->active.requests));
tasklet_kill(&engine->execlists.tasklet); /* flush the callback */
- cleanup_status_page(engine);
intel_breadcrumbs_free(engine->breadcrumbs);
intel_engine_fini_retire(engine);
@@ -934,11 +957,11 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
if (engine->default_state)
fput(engine->default_state);
- if (engine->kernel_context) {
- intel_context_unpin(engine->kernel_context);
- intel_context_put(engine->kernel_context);
- }
+ if (engine->kernel_context)
+ destroy_pinned_context(engine->kernel_context);
+
GEM_BUG_ON(!llist_empty(&engine->barrier_tasks));
+ cleanup_status_page(engine);
intel_wa_list_free(&engine->ctx_wa_list);
intel_wa_list_free(&engine->wa_list);
@@ -1002,32 +1025,50 @@ static unsigned long stop_timeout(const struct intel_engine_cs *engine)
return READ_ONCE(engine->props.stop_timeout_ms);
}
-int intel_engine_stop_cs(struct intel_engine_cs *engine)
+static int __intel_engine_stop_cs(struct intel_engine_cs *engine,
+ int fast_timeout_us,
+ int slow_timeout_ms)
{
struct intel_uncore *uncore = engine->uncore;
- const u32 base = engine->mmio_base;
- const i915_reg_t mode = RING_MI_MODE(base);
+ const i915_reg_t mode = RING_MI_MODE(engine->mmio_base);
int err;
+ intel_uncore_write_fw(uncore, mode, _MASKED_BIT_ENABLE(STOP_RING));
+ err = __intel_wait_for_register_fw(engine->uncore, mode,
+ MODE_IDLE, MODE_IDLE,
+ fast_timeout_us,
+ slow_timeout_ms,
+ NULL);
+
+ /* A final mmio read to let GPU writes be hopefully flushed to memory */
+ intel_uncore_posting_read_fw(uncore, mode);
+ return err;
+}
+
+int intel_engine_stop_cs(struct intel_engine_cs *engine)
+{
+ int err = 0;
+
if (INTEL_GEN(engine->i915) < 3)
return -ENODEV;
ENGINE_TRACE(engine, "\n");
+ if (__intel_engine_stop_cs(engine, 1000, stop_timeout(engine))) {
+ ENGINE_TRACE(engine,
+ "timed out on STOP_RING -> IDLE; HEAD:%04x, TAIL:%04x\n",
+ ENGINE_READ_FW(engine, RING_HEAD) & HEAD_ADDR,
+ ENGINE_READ_FW(engine, RING_TAIL) & TAIL_ADDR);
- intel_uncore_write_fw(uncore, mode, _MASKED_BIT_ENABLE(STOP_RING));
-
- err = 0;
- if (__intel_wait_for_register_fw(uncore,
- mode, MODE_IDLE, MODE_IDLE,
- 1000, stop_timeout(engine),
- NULL)) {
- ENGINE_TRACE(engine, "timed out on STOP_RING -> IDLE\n");
- err = -ETIMEDOUT;
+ /*
+ * Sometimes we observe that the idle flag is not
+ * set even though the ring is empty. So double
+ * check before giving up.
+ */
+ if ((ENGINE_READ_FW(engine, RING_HEAD) & HEAD_ADDR) !=
+ (ENGINE_READ_FW(engine, RING_TAIL) & TAIL_ADDR))
+ err = -ETIMEDOUT;
}
- /* A final mmio read to let GPU writes be hopefully flushed to memory */
- intel_uncore_posting_read_fw(uncore, mode);
-
return err;
}
@@ -1189,17 +1230,13 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
return idle;
}
-void intel_engine_flush_submission(struct intel_engine_cs *engine)
+void __intel_engine_flush_submission(struct intel_engine_cs *engine, bool sync)
{
struct tasklet_struct *t = &engine->execlists.tasklet;
if (!t->func)
return;
- /* Synchronise and wait for the tasklet on another CPU */
- tasklet_kill(t);
-
- /* Having cancelled the tasklet, ensure that is run */
local_bh_disable();
if (tasklet_trylock(t)) {
/* Must wait for any GPU reset in progress. */
@@ -1208,6 +1245,10 @@ void intel_engine_flush_submission(struct intel_engine_cs *engine)
tasklet_unlock(t);
}
local_bh_enable();
+
+ /* Synchronise and wait for the tasklet on another CPU */
+ if (sync)
+ tasklet_unlock_wait(t);
}
/**
@@ -1273,8 +1314,12 @@ void intel_engines_reset_default_submission(struct intel_gt *gt)
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, gt, id)
+ for_each_engine(engine, gt, id) {
+ if (engine->sanitize)
+ engine->sanitize(engine);
+
engine->set_default_submission(engine);
+ }
}
bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
@@ -1294,44 +1339,6 @@ bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
}
}
-static int print_sched_attr(const struct i915_sched_attr *attr,
- char *buf, int x, int len)
-{
- if (attr->priority == I915_PRIORITY_INVALID)
- return x;
-
- x += snprintf(buf + x, len - x,
- " prio=%d", attr->priority);
-
- return x;
-}
-
-static void print_request(struct drm_printer *m,
- struct i915_request *rq,
- const char *prefix)
-{
- const char *name = rq->fence.ops->get_timeline_name(&rq->fence);
- char buf[80] = "";
- int x = 0;
-
- x = print_sched_attr(&rq->sched.attr, buf, x, sizeof(buf));
-
- drm_printf(m, "%s %llx:%llx%s%s %s @ %dms: %s\n",
- prefix,
- rq->fence.context, rq->fence.seqno,
- i915_request_completed(rq) ? "!" :
- i915_request_started(rq) ? "*" :
- "",
- test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
- &rq->fence.flags) ? "+" :
- test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
- &rq->fence.flags) ? "-" :
- "",
- buf,
- jiffies_to_msecs(jiffies - rq->emitted_jiffies),
- name);
-}
-
static struct intel_timeline *get_timeline(struct i915_request *rq)
{
struct intel_timeline *tl;
@@ -1480,7 +1487,9 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
drm_printf(m, "\tIPEHR: 0x%08x\n", ENGINE_READ(engine, IPEHR));
}
- if (HAS_EXECLISTS(dev_priv)) {
+ if (intel_engine_in_guc_submission_mode(engine)) {
+ /* nothing to print yet */
+ } else if (HAS_EXECLISTS(dev_priv)) {
struct i915_request * const *port, *rq;
const u32 *hws =
&engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
@@ -1529,7 +1538,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
intel_context_is_banned(rq->context) ? "*" : "");
len += print_ring(hdr + len, sizeof(hdr) - len, rq);
scnprintf(hdr + len, sizeof(hdr) - len, "rq: ");
- print_request(m, rq, hdr);
+ i915_request_show(m, rq, hdr, 0);
}
for (port = execlists->pending; (rq = *port); port++) {
char hdr[160];
@@ -1543,7 +1552,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
intel_context_is_banned(rq->context) ? "*" : "");
len += print_ring(hdr + len, sizeof(hdr) - len, rq);
scnprintf(hdr + len, sizeof(hdr) - len, "rq: ");
- print_request(m, rq, hdr);
+ i915_request_show(m, rq, hdr, 0);
}
rcu_read_unlock();
execlists_active_unlock_bh(execlists);
@@ -1667,7 +1676,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
ktime_to_ms(intel_engine_get_busy_time(engine,
&dummy)));
drm_printf(m, "\tForcewake: %x domains, %d active\n",
- engine->fw_domain, atomic_read(&engine->fw_active));
+ engine->fw_domain, READ_ONCE(engine->fw_active));
rcu_read_lock();
rq = READ_ONCE(engine->heartbeat.systole);
@@ -1687,7 +1696,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
if (rq) {
struct intel_timeline *tl = get_timeline(rq);
- print_request(m, rq, "\t\tactive ");
+ i915_request_show(m, rq, "\t\tactive ", 0);
drm_printf(m, "\t\tring->start: 0x%08x\n",
i915_ggtt_offset(rq->ring->vma));
@@ -1725,7 +1734,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
drm_printf(m, "\tDevice is asleep; skipping register dump\n");
}
- intel_execlists_show_requests(engine, m, print_request, 8);
+ intel_execlists_show_requests(engine, m, i915_request_show, 8);
drm_printf(m, "HWSP:\n");
hexdump(m, engine->status_page.addr, PAGE_SIZE);
@@ -1745,7 +1754,7 @@ static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine,
* add it to the total.
*/
*now = ktime_get();
- if (atomic_read(&engine->stats.active))
+ if (READ_ONCE(engine->stats.active))
total = ktime_add(total, ktime_sub(*now, engine->stats.start));
return total;
@@ -1764,9 +1773,9 @@ ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine, ktime_t *now)
ktime_t total;
do {
- seq = read_seqbegin(&engine->stats.lock);
+ seq = read_seqcount_begin(&engine->stats.lock);
total = __intel_engine_get_busy_time(engine, now);
- } while (read_seqretry(&engine->stats.lock, seq));
+ } while (read_seqcount_retry(&engine->stats.lock, seq));
return total;
}
@@ -1802,7 +1811,7 @@ intel_engine_find_active_request(struct intel_engine_cs *engine)
struct intel_timeline *tl = request->context->timeline;
list_for_each_entry_from_reverse(request, &tl->requests, link) {
- if (i915_request_completed(request))
+ if (__i915_request_is_complete(request))
break;
active = request;
@@ -1813,10 +1822,10 @@ intel_engine_find_active_request(struct intel_engine_cs *engine)
return active;
list_for_each_entry(request, &engine->active.requests, sched.link) {
- if (i915_request_completed(request))
+ if (__i915_request_is_complete(request))
continue;
- if (!i915_request_started(request))
+ if (!__i915_request_has_started(request))
continue;
/* More than one preemptible request may match! */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
index 9060385cd69e..d7be2b9339f9 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
@@ -37,6 +37,18 @@ static bool next_heartbeat(struct intel_engine_cs *engine)
return true;
}
+static struct i915_request *
+heartbeat_create(struct intel_context *ce, gfp_t gfp)
+{
+ struct i915_request *rq;
+
+ intel_context_enter(ce);
+ rq = __i915_request_create(ce, gfp);
+ intel_context_exit(ce);
+
+ return rq;
+}
+
static void idle_pulse(struct intel_engine_cs *engine, struct i915_request *rq)
{
engine->wakeref_serial = READ_ONCE(engine->serial) + 1;
@@ -45,6 +57,15 @@ static void idle_pulse(struct intel_engine_cs *engine, struct i915_request *rq)
engine->heartbeat.systole = i915_request_get(rq);
}
+static void heartbeat_commit(struct i915_request *rq,
+ const struct i915_sched_attr *attr)
+{
+ idle_pulse(rq->engine, rq);
+
+ __i915_request_commit(rq);
+ __i915_request_queue(rq, attr);
+}
+
static void show_heartbeat(const struct i915_request *rq,
struct intel_engine_cs *engine)
{
@@ -139,16 +160,11 @@ static void heartbeat(struct work_struct *wrk)
goto out;
}
- intel_context_enter(ce);
- rq = __i915_request_create(ce, GFP_NOWAIT | __GFP_NOWARN);
- intel_context_exit(ce);
+ rq = heartbeat_create(ce, GFP_NOWAIT | __GFP_NOWARN);
if (IS_ERR(rq))
goto unlock;
- idle_pulse(engine, rq);
-
- __i915_request_commit(rq);
- __i915_request_queue(rq, &attr);
+ heartbeat_commit(rq, &attr);
unlock:
mutex_unlock(&ce->timeline->mutex);
@@ -187,17 +203,13 @@ static int __intel_engine_pulse(struct intel_engine_cs *engine)
GEM_BUG_ON(!intel_engine_has_preemption(engine));
GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
- intel_context_enter(ce);
- rq = __i915_request_create(ce, GFP_NOWAIT | __GFP_NOWARN);
- intel_context_exit(ce);
+ rq = heartbeat_create(ce, GFP_NOWAIT | __GFP_NOWARN);
if (IS_ERR(rq))
return PTR_ERR(rq);
__set_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags);
- idle_pulse(engine, rq);
- __i915_request_commit(rq);
- __i915_request_queue(rq, &attr);
+ heartbeat_commit(rq, &attr);
GEM_BUG_ON(rq->sched.attr.priority < I915_PRIORITY_BARRIER);
return 0;
@@ -273,8 +285,12 @@ int intel_engine_pulse(struct intel_engine_cs *engine)
int intel_engine_flush_barriers(struct intel_engine_cs *engine)
{
+ struct i915_sched_attr attr = {
+ .priority = I915_USER_PRIORITY(I915_PRIORITY_MIN),
+ };
+ struct intel_context *ce = engine->kernel_context;
struct i915_request *rq;
- int err = 0;
+ int err;
if (llist_empty(&engine->barrier_tasks))
return 0;
@@ -282,15 +298,22 @@ int intel_engine_flush_barriers(struct intel_engine_cs *engine)
if (!intel_engine_pm_get_if_awake(engine))
return 0;
- rq = i915_request_create(engine->kernel_context);
+ if (mutex_lock_interruptible(&ce->timeline->mutex)) {
+ err = -EINTR;
+ goto out_rpm;
+ }
+
+ rq = heartbeat_create(ce, GFP_KERNEL);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
- goto out_rpm;
+ goto out_unlock;
}
- idle_pulse(engine, rq);
- i915_request_add(rq);
+ heartbeat_commit(rq, &attr);
+ err = 0;
+out_unlock:
+ mutex_unlock(&ce->timeline->mutex);
out_rpm:
intel_engine_pm_put(engine);
return err;
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index 499b09cb4acf..e67d09259dd0 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -60,18 +60,26 @@ static int __engine_unpark(struct intel_wakeref *wf)
/* Scrub the context image after our loss of control */
ce->ops->reset(ce);
+
+ CE_TRACE(ce, "reset { seqno:%x, *hwsp:%x, ring:%x }\n",
+ ce->timeline->seqno,
+ READ_ONCE(*ce->timeline->hwsp_seqno),
+ ce->ring->emit);
+ GEM_BUG_ON(ce->timeline->seqno !=
+ READ_ONCE(*ce->timeline->hwsp_seqno));
}
if (engine->unpark)
engine->unpark(engine);
+ intel_breadcrumbs_unpark(engine->breadcrumbs);
intel_engine_unpark_heartbeat(engine);
return 0;
}
#if IS_ENABLED(CONFIG_LOCKDEP)
-static inline unsigned long __timeline_mark_lock(struct intel_context *ce)
+static unsigned long __timeline_mark_lock(struct intel_context *ce)
{
unsigned long flags;
@@ -81,8 +89,8 @@ static inline unsigned long __timeline_mark_lock(struct intel_context *ce)
return flags;
}
-static inline void __timeline_mark_unlock(struct intel_context *ce,
- unsigned long flags)
+static void __timeline_mark_unlock(struct intel_context *ce,
+ unsigned long flags)
{
mutex_release(&ce->timeline->mutex.dep_map, _THIS_IP_);
local_irq_restore(flags);
@@ -90,13 +98,13 @@ static inline void __timeline_mark_unlock(struct intel_context *ce,
#else
-static inline unsigned long __timeline_mark_lock(struct intel_context *ce)
+static unsigned long __timeline_mark_lock(struct intel_context *ce)
{
return 0;
}
-static inline void __timeline_mark_unlock(struct intel_context *ce,
- unsigned long flags)
+static void __timeline_mark_unlock(struct intel_context *ce,
+ unsigned long flags)
{
}
@@ -136,7 +144,7 @@ __queue_and_release_pm(struct i915_request *rq,
list_add_tail(&tl->link, &timelines->active_list);
/* Hand the request over to HW and so engine_retire() */
- __i915_request_queue(rq, NULL);
+ __i915_request_queue_bh(rq);
/* Let new submissions commence (and maybe retire this timeline) */
__intel_wakeref_defer_park(&engine->wakeref);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_stats.h b/drivers/gpu/drm/i915/gt/intel_engine_stats.h
new file mode 100644
index 000000000000..24fbdd94351a
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_engine_stats.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef __INTEL_ENGINE_STATS_H__
+#define __INTEL_ENGINE_STATS_H__
+
+#include <linux/atomic.h>
+#include <linux/ktime.h>
+#include <linux/seqlock.h>
+
+#include "i915_gem.h" /* GEM_BUG_ON */
+#include "intel_engine.h"
+
+static inline void intel_engine_context_in(struct intel_engine_cs *engine)
+{
+ unsigned long flags;
+
+ if (engine->stats.active) {
+ engine->stats.active++;
+ return;
+ }
+
+ /* The writer is serialised; but the pmu reader may be from hardirq */
+ local_irq_save(flags);
+ write_seqcount_begin(&engine->stats.lock);
+
+ engine->stats.start = ktime_get();
+ engine->stats.active++;
+
+ write_seqcount_end(&engine->stats.lock);
+ local_irq_restore(flags);
+
+ GEM_BUG_ON(!engine->stats.active);
+}
+
+static inline void intel_engine_context_out(struct intel_engine_cs *engine)
+{
+ unsigned long flags;
+
+ GEM_BUG_ON(!engine->stats.active);
+ if (engine->stats.active > 1) {
+ engine->stats.active--;
+ return;
+ }
+
+ local_irq_save(flags);
+ write_seqcount_begin(&engine->stats.lock);
+
+ engine->stats.active--;
+ engine->stats.total =
+ ktime_add(engine->stats.total,
+ ktime_sub(ktime_get(), engine->stats.start));
+
+ write_seqcount_end(&engine->stats.lock);
+ local_irq_restore(flags);
+}
+
+#endif /* __INTEL_ENGINE_STATS_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index ee6312601c56..d2346b425547 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -68,6 +68,7 @@ typedef u8 intel_engine_mask_t;
#define ALL_ENGINES ((intel_engine_mask_t)~0ul)
struct intel_hw_status_page {
+ struct list_head timelines;
struct i915_vma *vma;
u32 *addr;
};
@@ -183,7 +184,8 @@ struct intel_engine_execlists {
* Reserve the upper 16b for tracking internal errors.
*/
u32 error_interrupt;
-#define ERROR_CSB BIT(31)
+#define ERROR_CSB BIT(31)
+#define ERROR_PREEMPT BIT(30)
/**
* @reset_ccid: Active CCID [EXECLISTS_STATUS_HI] at the time of reset
@@ -237,16 +239,6 @@ struct intel_engine_execlists {
unsigned int port_mask;
/**
- * @switch_priority_hint: Second context priority.
- *
- * We submit multiple contexts to the HW simultaneously and would
- * like to occasionally switch between them to emulate timeslicing.
- * To know when timeslicing is suitable, we track the priority of
- * the context submitted second.
- */
- int switch_priority_hint;
-
- /**
* @queue_priority_hint: Highest pending priority.
*
* When we add requests into the queue, or adjust the priority of
@@ -327,7 +319,7 @@ struct intel_engine_cs {
* as possible.
*/
enum forcewake_domains fw_domain;
- atomic_t fw_active;
+ unsigned int fw_active;
unsigned long context_tag;
@@ -524,12 +516,12 @@ struct intel_engine_cs {
/**
* @active: Number of contexts currently scheduled in.
*/
- atomic_t active;
+ unsigned int active;
/**
* @lock: Lock protecting the below fields.
*/
- seqlock_t lock;
+ seqcount_t lock;
/**
* @total: Total time this engine was busy.
@@ -559,6 +551,8 @@ struct intel_engine_cs {
unsigned long stop_timeout_ms;
unsigned long timeslice_duration_ms;
} props, defaults;
+
+ I915_SELFTEST_DECLARE(struct fault_attr reset_timeout);
};
static inline bool
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
new file mode 100644
index 000000000000..ac1be7a632d3
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -0,0 +1,3896 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2014 Intel Corporation
+ */
+
+/**
+ * DOC: Logical Rings, Logical Ring Contexts and Execlists
+ *
+ * Motivation:
+ * GEN8 brings an expansion of the HW contexts: "Logical Ring Contexts".
+ * These expanded contexts enable a number of new abilities, especially
+ * "Execlists" (also implemented in this file).
+ *
+ * One of the main differences with the legacy HW contexts is that logical
+ * ring contexts incorporate many more things to the context's state, like
+ * PDPs or ringbuffer control registers:
+ *
+ * The reason why PDPs are included in the context is straightforward: as
+ * PPGTTs (per-process GTTs) are actually per-context, having the PDPs
+ * contained there mean you don't need to do a ppgtt->switch_mm yourself,
+ * instead, the GPU will do it for you on the context switch.
+ *
+ * But, what about the ringbuffer control registers (head, tail, etc..)?
+ * shouldn't we just need a set of those per engine command streamer? This is
+ * where the name "Logical Rings" starts to make sense: by virtualizing the
+ * rings, the engine cs shifts to a new "ring buffer" with every context
+ * switch. When you want to submit a workload to the GPU you: A) choose your
+ * context, B) find its appropriate virtualized ring, C) write commands to it
+ * and then, finally, D) tell the GPU to switch to that context.
+ *
+ * Instead of the legacy MI_SET_CONTEXT, the way you tell the GPU to switch
+ * to a contexts is via a context execution list, ergo "Execlists".
+ *
+ * LRC implementation:
+ * Regarding the creation of contexts, we have:
+ *
+ * - One global default context.
+ * - One local default context for each opened fd.
+ * - One local extra context for each context create ioctl call.
+ *
+ * Now that ringbuffers belong per-context (and not per-engine, like before)
+ * and that contexts are uniquely tied to a given engine (and not reusable,
+ * like before) we need:
+ *
+ * - One ringbuffer per-engine inside each context.
+ * - One backing object per-engine inside each context.
+ *
+ * The global default context starts its life with these new objects fully
+ * allocated and populated. The local default context for each opened fd is
+ * more complex, because we don't know at creation time which engine is going
+ * to use them. To handle this, we have implemented a deferred creation of LR
+ * contexts:
+ *
+ * The local context starts its life as a hollow or blank holder, that only
+ * gets populated for a given engine once we receive an execbuffer. If later
+ * on we receive another execbuffer ioctl for the same context but a different
+ * engine, we allocate/populate a new ringbuffer and context backing object and
+ * so on.
+ *
+ * Finally, regarding local contexts created using the ioctl call: as they are
+ * only allowed with the render ring, we can allocate & populate them right
+ * away (no need to defer anything, at least for now).
+ *
+ * Execlists implementation:
+ * Execlists are the new method by which, on gen8+ hardware, workloads are
+ * submitted for execution (as opposed to the legacy, ringbuffer-based, method).
+ * This method works as follows:
+ *
+ * When a request is committed, its commands (the BB start and any leading or
+ * trailing commands, like the seqno breadcrumbs) are placed in the ringbuffer
+ * for the appropriate context. The tail pointer in the hardware context is not
+ * updated at this time, but instead, kept by the driver in the ringbuffer
+ * structure. A structure representing this request is added to a request queue
+ * for the appropriate engine: this structure contains a copy of the context's
+ * tail after the request was written to the ring buffer and a pointer to the
+ * context itself.
+ *
+ * If the engine's request queue was empty before the request was added, the
+ * queue is processed immediately. Otherwise the queue will be processed during
+ * a context switch interrupt. In any case, elements on the queue will get sent
+ * (in pairs) to the GPU's ExecLists Submit Port (ELSP, for short) with a
+ * globally unique 20-bits submission ID.
+ *
+ * When execution of a request completes, the GPU updates the context status
+ * buffer with a context complete event and generates a context switch interrupt.
+ * During the interrupt handling, the driver examines the events in the buffer:
+ * for each context complete event, if the announced ID matches that on the head
+ * of the request queue, then that request is retired and removed from the queue.
+ *
+ * After processing, if any requests were retired and the queue is not empty
+ * then a new execution list can be submitted. The two requests at the front of
+ * the queue are next to be submitted but since a context may not occur twice in
+ * an execution list, if subsequent requests have the same ID as the first then
+ * the two requests must be combined. This is done simply by discarding requests
+ * at the head of the queue until either only one requests is left (in which case
+ * we use a NULL second context) or the first two requests have unique IDs.
+ *
+ * By always executing the first two requests in the queue the driver ensures
+ * that the GPU is kept as busy as possible. In the case where a single context
+ * completes but a second context is still executing, the request for this second
+ * context will be at the head of the queue when we remove the first one. This
+ * request will then be resubmitted along with a new request for a different context,
+ * which will cause the hardware to continue executing the second request and queue
+ * the new request (the GPU detects the condition of a context getting preempted
+ * with the same context and optimizes the context switch flow by not doing
+ * preemption, but just sampling the new tail pointer).
+ *
+ */
+#include <linux/interrupt.h>
+
+#include "i915_drv.h"
+#include "i915_trace.h"
+#include "i915_vgpu.h"
+#include "gen8_engine_cs.h"
+#include "intel_breadcrumbs.h"
+#include "intel_context.h"
+#include "intel_engine_pm.h"
+#include "intel_engine_stats.h"
+#include "intel_execlists_submission.h"
+#include "intel_gt.h"
+#include "intel_gt_pm.h"
+#include "intel_gt_requests.h"
+#include "intel_lrc.h"
+#include "intel_lrc_reg.h"
+#include "intel_mocs.h"
+#include "intel_reset.h"
+#include "intel_ring.h"
+#include "intel_workarounds.h"
+#include "shmem_utils.h"
+
+#define RING_EXECLIST_QFULL (1 << 0x2)
+#define RING_EXECLIST1_VALID (1 << 0x3)
+#define RING_EXECLIST0_VALID (1 << 0x4)
+#define RING_EXECLIST_ACTIVE_STATUS (3 << 0xE)
+#define RING_EXECLIST1_ACTIVE (1 << 0x11)
+#define RING_EXECLIST0_ACTIVE (1 << 0x12)
+
+#define GEN8_CTX_STATUS_IDLE_ACTIVE (1 << 0)
+#define GEN8_CTX_STATUS_PREEMPTED (1 << 1)
+#define GEN8_CTX_STATUS_ELEMENT_SWITCH (1 << 2)
+#define GEN8_CTX_STATUS_ACTIVE_IDLE (1 << 3)
+#define GEN8_CTX_STATUS_COMPLETE (1 << 4)
+#define GEN8_CTX_STATUS_LITE_RESTORE (1 << 15)
+
+#define GEN8_CTX_STATUS_COMPLETED_MASK \
+ (GEN8_CTX_STATUS_COMPLETE | GEN8_CTX_STATUS_PREEMPTED)
+
+#define GEN12_CTX_STATUS_SWITCHED_TO_NEW_QUEUE (0x1) /* lower csb dword */
+#define GEN12_CTX_SWITCH_DETAIL(csb_dw) ((csb_dw) & 0xF) /* upper csb dword */
+#define GEN12_CSB_SW_CTX_ID_MASK GENMASK(25, 15)
+#define GEN12_IDLE_CTX_ID 0x7FF
+#define GEN12_CSB_CTX_VALID(csb_dw) \
+ (FIELD_GET(GEN12_CSB_SW_CTX_ID_MASK, csb_dw) != GEN12_IDLE_CTX_ID)
+
+/* Typical size of the average request (2 pipecontrols and a MI_BB) */
+#define EXECLISTS_REQUEST_SIZE 64 /* bytes */
+
+struct virtual_engine {
+ struct intel_engine_cs base;
+ struct intel_context context;
+ struct rcu_work rcu;
+
+ /*
+ * We allow only a single request through the virtual engine at a time
+ * (each request in the timeline waits for the completion fence of
+ * the previous before being submitted). By restricting ourselves to
+ * only submitting a single request, each request is placed on to a
+ * physical to maximise load spreading (by virtue of the late greedy
+ * scheduling -- each real engine takes the next available request
+ * upon idling).
+ */
+ struct i915_request *request;
+
+ /*
+ * We keep a rbtree of available virtual engines inside each physical
+ * engine, sorted by priority. Here we preallocate the nodes we need
+ * for the virtual engine, indexed by physical_engine->id.
+ */
+ struct ve_node {
+ struct rb_node rb;
+ int prio;
+ } nodes[I915_NUM_ENGINES];
+
+ /*
+ * Keep track of bonded pairs -- restrictions upon on our selection
+ * of physical engines any particular request may be submitted to.
+ * If we receive a submit-fence from a master engine, we will only
+ * use one of sibling_mask physical engines.
+ */
+ struct ve_bond {
+ const struct intel_engine_cs *master;
+ intel_engine_mask_t sibling_mask;
+ } *bonds;
+ unsigned int num_bonds;
+
+ /* And finally, which physical engines this virtual engine maps onto. */
+ unsigned int num_siblings;
+ struct intel_engine_cs *siblings[];
+};
+
+static struct virtual_engine *to_virtual_engine(struct intel_engine_cs *engine)
+{
+ GEM_BUG_ON(!intel_engine_is_virtual(engine));
+ return container_of(engine, struct virtual_engine, base);
+}
+
+static struct i915_request *
+__active_request(const struct intel_timeline * const tl,
+ struct i915_request *rq,
+ int error)
+{
+ struct i915_request *active = rq;
+
+ list_for_each_entry_from_reverse(rq, &tl->requests, link) {
+ if (__i915_request_is_complete(rq))
+ break;
+
+ if (error) {
+ i915_request_set_error_once(rq, error);
+ __i915_request_skip(rq);
+ }
+ active = rq;
+ }
+
+ return active;
+}
+
+static struct i915_request *
+active_request(const struct intel_timeline * const tl, struct i915_request *rq)
+{
+ return __active_request(tl, rq, 0);
+}
+
+static void ring_set_paused(const struct intel_engine_cs *engine, int state)
+{
+ /*
+ * We inspect HWS_PREEMPT with a semaphore inside
+ * engine->emit_fini_breadcrumb. If the dword is true,
+ * the ring is paused as the semaphore will busywait
+ * until the dword is false.
+ */
+ engine->status_page.addr[I915_GEM_HWS_PREEMPT] = state;
+ if (state)
+ wmb();
+}
+
+static struct i915_priolist *to_priolist(struct rb_node *rb)
+{
+ return rb_entry(rb, struct i915_priolist, node);
+}
+
+static int rq_prio(const struct i915_request *rq)
+{
+ return READ_ONCE(rq->sched.attr.priority);
+}
+
+static int effective_prio(const struct i915_request *rq)
+{
+ int prio = rq_prio(rq);
+
+ /*
+ * If this request is special and must not be interrupted at any
+ * cost, so be it. Note we are only checking the most recent request
+ * in the context and so may be masking an earlier vip request. It
+ * is hoped that under the conditions where nopreempt is used, this
+ * will not matter (i.e. all requests to that context will be
+ * nopreempt for as long as desired).
+ */
+ if (i915_request_has_nopreempt(rq))
+ prio = I915_PRIORITY_UNPREEMPTABLE;
+
+ return prio;
+}
+
+static int queue_prio(const struct intel_engine_execlists *execlists)
+{
+ struct i915_priolist *p;
+ struct rb_node *rb;
+
+ rb = rb_first_cached(&execlists->queue);
+ if (!rb)
+ return INT_MIN;
+
+ /*
+ * As the priolist[] are inverted, with the highest priority in [0],
+ * we have to flip the index value to become priority.
+ */
+ p = to_priolist(rb);
+ if (!I915_USER_PRIORITY_SHIFT)
+ return p->priority;
+
+ return ((p->priority + 1) << I915_USER_PRIORITY_SHIFT) - ffs(p->used);
+}
+
+static int virtual_prio(const struct intel_engine_execlists *el)
+{
+ struct rb_node *rb = rb_first_cached(&el->virtual);
+
+ return rb ? rb_entry(rb, struct ve_node, rb)->prio : INT_MIN;
+}
+
+static bool need_preempt(const struct intel_engine_cs *engine,
+ const struct i915_request *rq)
+{
+ int last_prio;
+
+ if (!intel_engine_has_semaphores(engine))
+ return false;
+
+ /*
+ * Check if the current priority hint merits a preemption attempt.
+ *
+ * We record the highest value priority we saw during rescheduling
+ * prior to this dequeue, therefore we know that if it is strictly
+ * less than the current tail of ESLP[0], we do not need to force
+ * a preempt-to-idle cycle.
+ *
+ * However, the priority hint is a mere hint that we may need to
+ * preempt. If that hint is stale or we may be trying to preempt
+ * ourselves, ignore the request.
+ *
+ * More naturally we would write
+ * prio >= max(0, last);
+ * except that we wish to prevent triggering preemption at the same
+ * priority level: the task that is running should remain running
+ * to preserve FIFO ordering of dependencies.
+ */
+ last_prio = max(effective_prio(rq), I915_PRIORITY_NORMAL - 1);
+ if (engine->execlists.queue_priority_hint <= last_prio)
+ return false;
+
+ /*
+ * Check against the first request in ELSP[1], it will, thanks to the
+ * power of PI, be the highest priority of that context.
+ */
+ if (!list_is_last(&rq->sched.link, &engine->active.requests) &&
+ rq_prio(list_next_entry(rq, sched.link)) > last_prio)
+ return true;
+
+ /*
+ * If the inflight context did not trigger the preemption, then maybe
+ * it was the set of queued requests? Pick the highest priority in
+ * the queue (the first active priolist) and see if it deserves to be
+ * running instead of ELSP[0].
+ *
+ * The highest priority request in the queue can not be either
+ * ELSP[0] or ELSP[1] as, thanks again to PI, if it was the same
+ * context, it's priority would not exceed ELSP[0] aka last_prio.
+ */
+ return max(virtual_prio(&engine->execlists),
+ queue_prio(&engine->execlists)) > last_prio;
+}
+
+__maybe_unused static bool
+assert_priority_queue(const struct i915_request *prev,
+ const struct i915_request *next)
+{
+ /*
+ * Without preemption, the prev may refer to the still active element
+ * which we refuse to let go.
+ *
+ * Even with preemption, there are times when we think it is better not
+ * to preempt and leave an ostensibly lower priority request in flight.
+ */
+ if (i915_request_is_active(prev))
+ return true;
+
+ return rq_prio(prev) >= rq_prio(next);
+}
+
+static struct i915_request *
+__unwind_incomplete_requests(struct intel_engine_cs *engine)
+{
+ struct i915_request *rq, *rn, *active = NULL;
+ struct list_head *pl;
+ int prio = I915_PRIORITY_INVALID;
+
+ lockdep_assert_held(&engine->active.lock);
+
+ list_for_each_entry_safe_reverse(rq, rn,
+ &engine->active.requests,
+ sched.link) {
+ if (__i915_request_is_complete(rq)) {
+ list_del_init(&rq->sched.link);
+ continue;
+ }
+
+ __i915_request_unsubmit(rq);
+
+ GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
+ if (rq_prio(rq) != prio) {
+ prio = rq_prio(rq);
+ pl = i915_sched_lookup_priolist(engine, prio);
+ }
+ GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
+
+ list_move(&rq->sched.link, pl);
+ set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+
+ /* Check in case we rollback so far we wrap [size/2] */
+ if (intel_ring_direction(rq->ring,
+ rq->tail,
+ rq->ring->tail + 8) > 0)
+ rq->context->lrc.desc |= CTX_DESC_FORCE_RESTORE;
+
+ active = rq;
+ }
+
+ return active;
+}
+
+struct i915_request *
+execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists)
+{
+ struct intel_engine_cs *engine =
+ container_of(execlists, typeof(*engine), execlists);
+
+ return __unwind_incomplete_requests(engine);
+}
+
+static void
+execlists_context_status_change(struct i915_request *rq, unsigned long status)
+{
+ /*
+ * Only used when GVT-g is enabled now. When GVT-g is disabled,
+ * The compiler should eliminate this function as dead-code.
+ */
+ if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
+ return;
+
+ atomic_notifier_call_chain(&rq->engine->context_status_notifier,
+ status, rq);
+}
+
+static void reset_active(struct i915_request *rq,
+ struct intel_engine_cs *engine)
+{
+ struct intel_context * const ce = rq->context;
+ u32 head;
+
+ /*
+ * The executing context has been cancelled. We want to prevent
+ * further execution along this context and propagate the error on
+ * to anything depending on its results.
+ *
+ * In __i915_request_submit(), we apply the -EIO and remove the
+ * requests' payloads for any banned requests. But first, we must
+ * rewind the context back to the start of the incomplete request so
+ * that we do not jump back into the middle of the batch.
+ *
+ * We preserve the breadcrumbs and semaphores of the incomplete
+ * requests so that inter-timeline dependencies (i.e other timelines)
+ * remain correctly ordered. And we defer to __i915_request_submit()
+ * so that all asynchronous waits are correctly handled.
+ */
+ ENGINE_TRACE(engine, "{ reset rq=%llx:%lld }\n",
+ rq->fence.context, rq->fence.seqno);
+
+ /* On resubmission of the active request, payload will be scrubbed */
+ if (__i915_request_is_complete(rq))
+ head = rq->tail;
+ else
+ head = __active_request(ce->timeline, rq, -EIO)->head;
+ head = intel_ring_wrap(ce->ring, head);
+
+ /* Scrub the context image to prevent replaying the previous batch */
+ lrc_init_regs(ce, engine, true);
+
+ /* We've switched away, so this should be a no-op, but intent matters */
+ ce->lrc.lrca = lrc_update_regs(ce, engine, head);
+}
+
+static struct intel_engine_cs *
+__execlists_schedule_in(struct i915_request *rq)
+{
+ struct intel_engine_cs * const engine = rq->engine;
+ struct intel_context * const ce = rq->context;
+
+ intel_context_get(ce);
+
+ if (unlikely(intel_context_is_closed(ce) &&
+ !intel_engine_has_heartbeat(engine)))
+ intel_context_set_banned(ce);
+
+ if (unlikely(intel_context_is_banned(ce)))
+ reset_active(rq, engine);
+
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ lrc_check_regs(ce, engine, "before");
+
+ if (ce->tag) {
+ /* Use a fixed tag for OA and friends */
+ GEM_BUG_ON(ce->tag <= BITS_PER_LONG);
+ ce->lrc.ccid = ce->tag;
+ } else {
+ /* We don't need a strict matching tag, just different values */
+ unsigned int tag = __ffs(engine->context_tag);
+
+ GEM_BUG_ON(tag >= BITS_PER_LONG);
+ __clear_bit(tag, &engine->context_tag);
+ ce->lrc.ccid = (1 + tag) << (GEN11_SW_CTX_ID_SHIFT - 32);
+
+ BUILD_BUG_ON(BITS_PER_LONG > GEN12_MAX_CONTEXT_HW_ID);
+ }
+
+ ce->lrc.ccid |= engine->execlists.ccid;
+
+ __intel_gt_pm_get(engine->gt);
+ if (engine->fw_domain && !engine->fw_active++)
+ intel_uncore_forcewake_get(engine->uncore, engine->fw_domain);
+ execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
+ intel_engine_context_in(engine);
+
+ CE_TRACE(ce, "schedule-in, ccid:%x\n", ce->lrc.ccid);
+
+ return engine;
+}
+
+static void execlists_schedule_in(struct i915_request *rq, int idx)
+{
+ struct intel_context * const ce = rq->context;
+ struct intel_engine_cs *old;
+
+ GEM_BUG_ON(!intel_engine_pm_is_awake(rq->engine));
+ trace_i915_request_in(rq, idx);
+
+ old = ce->inflight;
+ if (!old)
+ old = __execlists_schedule_in(rq);
+ WRITE_ONCE(ce->inflight, ptr_inc(old));
+
+ GEM_BUG_ON(intel_context_inflight(ce) != rq->engine);
+}
+
+static void
+resubmit_virtual_request(struct i915_request *rq, struct virtual_engine *ve)
+{
+ struct intel_engine_cs *engine = rq->engine;
+
+ spin_lock_irq(&engine->active.lock);
+
+ clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+ WRITE_ONCE(rq->engine, &ve->base);
+ ve->base.submit_request(rq);
+
+ spin_unlock_irq(&engine->active.lock);
+}
+
+static void kick_siblings(struct i915_request *rq, struct intel_context *ce)
+{
+ struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
+ struct intel_engine_cs *engine = rq->engine;
+
+ /*
+ * After this point, the rq may be transferred to a new sibling, so
+ * before we clear ce->inflight make sure that the context has been
+ * removed from the b->signalers and furthermore we need to make sure
+ * that the concurrent iterator in signal_irq_work is no longer
+ * following ce->signal_link.
+ */
+ if (!list_empty(&ce->signals))
+ intel_context_remove_breadcrumbs(ce, engine->breadcrumbs);
+
+ /*
+ * This engine is now too busy to run this virtual request, so
+ * see if we can find an alternative engine for it to execute on.
+ * Once a request has become bonded to this engine, we treat it the
+ * same as other native request.
+ */
+ if (i915_request_in_priority_queue(rq) &&
+ rq->execution_mask != engine->mask)
+ resubmit_virtual_request(rq, ve);
+
+ if (READ_ONCE(ve->request))
+ tasklet_hi_schedule(&ve->base.execlists.tasklet);
+}
+
+static void __execlists_schedule_out(struct i915_request * const rq,
+ struct intel_context * const ce)
+{
+ struct intel_engine_cs * const engine = rq->engine;
+ unsigned int ccid;
+
+ /*
+ * NB process_csb() is not under the engine->active.lock and hence
+ * schedule_out can race with schedule_in meaning that we should
+ * refrain from doing non-trivial work here.
+ */
+
+ CE_TRACE(ce, "schedule-out, ccid:%x\n", ce->lrc.ccid);
+ GEM_BUG_ON(ce->inflight != engine);
+
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ lrc_check_regs(ce, engine, "after");
+
+ /*
+ * If we have just completed this context, the engine may now be
+ * idle and we want to re-enter powersaving.
+ */
+ if (intel_timeline_is_last(ce->timeline, rq) &&
+ __i915_request_is_complete(rq))
+ intel_engine_add_retire(engine, ce->timeline);
+
+ ccid = ce->lrc.ccid;
+ ccid >>= GEN11_SW_CTX_ID_SHIFT - 32;
+ ccid &= GEN12_MAX_CONTEXT_HW_ID;
+ if (ccid < BITS_PER_LONG) {
+ GEM_BUG_ON(ccid == 0);
+ GEM_BUG_ON(test_bit(ccid - 1, &engine->context_tag));
+ __set_bit(ccid - 1, &engine->context_tag);
+ }
+
+ lrc_update_runtime(ce);
+ intel_engine_context_out(engine);
+ execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
+ if (engine->fw_domain && !--engine->fw_active)
+ intel_uncore_forcewake_put(engine->uncore, engine->fw_domain);
+ intel_gt_pm_put_async(engine->gt);
+
+ /*
+ * If this is part of a virtual engine, its next request may
+ * have been blocked waiting for access to the active context.
+ * We have to kick all the siblings again in case we need to
+ * switch (e.g. the next request is not runnable on this
+ * engine). Hopefully, we will already have submitted the next
+ * request before the tasklet runs and do not need to rebuild
+ * each virtual tree and kick everyone again.
+ */
+ if (ce->engine != engine)
+ kick_siblings(rq, ce);
+
+ WRITE_ONCE(ce->inflight, NULL);
+ intel_context_put(ce);
+}
+
+static inline void execlists_schedule_out(struct i915_request *rq)
+{
+ struct intel_context * const ce = rq->context;
+
+ trace_i915_request_out(rq);
+
+ GEM_BUG_ON(!ce->inflight);
+ ce->inflight = ptr_dec(ce->inflight);
+ if (!__intel_context_inflight_count(ce->inflight))
+ __execlists_schedule_out(rq, ce);
+
+ i915_request_put(rq);
+}
+
+static u64 execlists_update_context(struct i915_request *rq)
+{
+ struct intel_context *ce = rq->context;
+ u64 desc = ce->lrc.desc;
+ u32 tail, prev;
+
+ /*
+ * WaIdleLiteRestore:bdw,skl
+ *
+ * We should never submit the context with the same RING_TAIL twice
+ * just in case we submit an empty ring, which confuses the HW.
+ *
+ * We append a couple of NOOPs (gen8_emit_wa_tail) after the end of
+ * the normal request to be able to always advance the RING_TAIL on
+ * subsequent resubmissions (for lite restore). Should that fail us,
+ * and we try and submit the same tail again, force the context
+ * reload.
+ *
+ * If we need to return to a preempted context, we need to skip the
+ * lite-restore and force it to reload the RING_TAIL. Otherwise, the
+ * HW has a tendency to ignore us rewinding the TAIL to the end of
+ * an earlier request.
+ */
+ GEM_BUG_ON(ce->lrc_reg_state[CTX_RING_TAIL] != rq->ring->tail);
+ prev = rq->ring->tail;
+ tail = intel_ring_set_tail(rq->ring, rq->tail);
+ if (unlikely(intel_ring_direction(rq->ring, tail, prev) <= 0))
+ desc |= CTX_DESC_FORCE_RESTORE;
+ ce->lrc_reg_state[CTX_RING_TAIL] = tail;
+ rq->tail = rq->wa_tail;
+
+ /*
+ * Make sure the context image is complete before we submit it to HW.
+ *
+ * Ostensibly, writes (including the WCB) should be flushed prior to
+ * an uncached write such as our mmio register access, the empirical
+ * evidence (esp. on Braswell) suggests that the WC write into memory
+ * may not be visible to the HW prior to the completion of the UC
+ * register write and that we may begin execution from the context
+ * before its image is complete leading to invalid PD chasing.
+ */
+ wmb();
+
+ ce->lrc.desc &= ~CTX_DESC_FORCE_RESTORE;
+ return desc;
+}
+
+static void write_desc(struct intel_engine_execlists *execlists, u64 desc, u32 port)
+{
+ if (execlists->ctrl_reg) {
+ writel(lower_32_bits(desc), execlists->submit_reg + port * 2);
+ writel(upper_32_bits(desc), execlists->submit_reg + port * 2 + 1);
+ } else {
+ writel(upper_32_bits(desc), execlists->submit_reg);
+ writel(lower_32_bits(desc), execlists->submit_reg);
+ }
+}
+
+static __maybe_unused char *
+dump_port(char *buf, int buflen, const char *prefix, struct i915_request *rq)
+{
+ if (!rq)
+ return "";
+
+ snprintf(buf, buflen, "%sccid:%x %llx:%lld%s prio %d",
+ prefix,
+ rq->context->lrc.ccid,
+ rq->fence.context, rq->fence.seqno,
+ __i915_request_is_complete(rq) ? "!" :
+ __i915_request_has_started(rq) ? "*" :
+ "",
+ rq_prio(rq));
+
+ return buf;
+}
+
+static __maybe_unused noinline void
+trace_ports(const struct intel_engine_execlists *execlists,
+ const char *msg,
+ struct i915_request * const *ports)
+{
+ const struct intel_engine_cs *engine =
+ container_of(execlists, typeof(*engine), execlists);
+ char __maybe_unused p0[40], p1[40];
+
+ if (!ports[0])
+ return;
+
+ ENGINE_TRACE(engine, "%s { %s%s }\n", msg,
+ dump_port(p0, sizeof(p0), "", ports[0]),
+ dump_port(p1, sizeof(p1), ", ", ports[1]));
+}
+
+static bool
+reset_in_progress(const struct intel_engine_execlists *execlists)
+{
+ return unlikely(!__tasklet_is_enabled(&execlists->tasklet));
+}
+
+static __maybe_unused noinline bool
+assert_pending_valid(const struct intel_engine_execlists *execlists,
+ const char *msg)
+{
+ struct intel_engine_cs *engine =
+ container_of(execlists, typeof(*engine), execlists);
+ struct i915_request * const *port, *rq;
+ struct intel_context *ce = NULL;
+ bool sentinel = false;
+ u32 ccid = -1;
+
+ trace_ports(execlists, msg, execlists->pending);
+
+ /* We may be messing around with the lists during reset, lalala */
+ if (reset_in_progress(execlists))
+ return true;
+
+ if (!execlists->pending[0]) {
+ GEM_TRACE_ERR("%s: Nothing pending for promotion!\n",
+ engine->name);
+ return false;
+ }
+
+ if (execlists->pending[execlists_num_ports(execlists)]) {
+ GEM_TRACE_ERR("%s: Excess pending[%d] for promotion!\n",
+ engine->name, execlists_num_ports(execlists));
+ return false;
+ }
+
+ for (port = execlists->pending; (rq = *port); port++) {
+ unsigned long flags;
+ bool ok = true;
+
+ GEM_BUG_ON(!kref_read(&rq->fence.refcount));
+ GEM_BUG_ON(!i915_request_is_active(rq));
+
+ if (ce == rq->context) {
+ GEM_TRACE_ERR("%s: Dup context:%llx in pending[%zd]\n",
+ engine->name,
+ ce->timeline->fence_context,
+ port - execlists->pending);
+ return false;
+ }
+ ce = rq->context;
+
+ if (ccid == ce->lrc.ccid) {
+ GEM_TRACE_ERR("%s: Dup ccid:%x context:%llx in pending[%zd]\n",
+ engine->name,
+ ccid, ce->timeline->fence_context,
+ port - execlists->pending);
+ return false;
+ }
+ ccid = ce->lrc.ccid;
+
+ /*
+ * Sentinels are supposed to be the last request so they flush
+ * the current execution off the HW. Check that they are the only
+ * request in the pending submission.
+ */
+ if (sentinel) {
+ GEM_TRACE_ERR("%s: context:%llx after sentinel in pending[%zd]\n",
+ engine->name,
+ ce->timeline->fence_context,
+ port - execlists->pending);
+ return false;
+ }
+ sentinel = i915_request_has_sentinel(rq);
+
+ /*
+ * We want virtual requests to only be in the first slot so
+ * that they are never stuck behind a hog and can be immediately
+ * transferred onto the next idle engine.
+ */
+ if (rq->execution_mask != engine->mask &&
+ port != execlists->pending) {
+ GEM_TRACE_ERR("%s: virtual engine:%llx not in prime position[%zd]\n",
+ engine->name,
+ ce->timeline->fence_context,
+ port - execlists->pending);
+ return false;
+ }
+
+ /* Hold tightly onto the lock to prevent concurrent retires! */
+ if (!spin_trylock_irqsave(&rq->lock, flags))
+ continue;
+
+ if (__i915_request_is_complete(rq))
+ goto unlock;
+
+ if (i915_active_is_idle(&ce->active) &&
+ !intel_context_is_barrier(ce)) {
+ GEM_TRACE_ERR("%s: Inactive context:%llx in pending[%zd]\n",
+ engine->name,
+ ce->timeline->fence_context,
+ port - execlists->pending);
+ ok = false;
+ goto unlock;
+ }
+
+ if (!i915_vma_is_pinned(ce->state)) {
+ GEM_TRACE_ERR("%s: Unpinned context:%llx in pending[%zd]\n",
+ engine->name,
+ ce->timeline->fence_context,
+ port - execlists->pending);
+ ok = false;
+ goto unlock;
+ }
+
+ if (!i915_vma_is_pinned(ce->ring->vma)) {
+ GEM_TRACE_ERR("%s: Unpinned ring:%llx in pending[%zd]\n",
+ engine->name,
+ ce->timeline->fence_context,
+ port - execlists->pending);
+ ok = false;
+ goto unlock;
+ }
+
+unlock:
+ spin_unlock_irqrestore(&rq->lock, flags);
+ if (!ok)
+ return false;
+ }
+
+ return ce;
+}
+
+static void execlists_submit_ports(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists *execlists = &engine->execlists;
+ unsigned int n;
+
+ GEM_BUG_ON(!assert_pending_valid(execlists, "submit"));
+
+ /*
+ * We can skip acquiring intel_runtime_pm_get() here as it was taken
+ * on our behalf by the request (see i915_gem_mark_busy()) and it will
+ * not be relinquished until the device is idle (see
+ * i915_gem_idle_work_handler()). As a precaution, we make sure
+ * that all ELSP are drained i.e. we have processed the CSB,
+ * before allowing ourselves to idle and calling intel_runtime_pm_put().
+ */
+ GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
+
+ /*
+ * ELSQ note: the submit queue is not cleared after being submitted
+ * to the HW so we need to make sure we always clean it up. This is
+ * currently ensured by the fact that we always write the same number
+ * of elsq entries, keep this in mind before changing the loop below.
+ */
+ for (n = execlists_num_ports(execlists); n--; ) {
+ struct i915_request *rq = execlists->pending[n];
+
+ write_desc(execlists,
+ rq ? execlists_update_context(rq) : 0,
+ n);
+ }
+
+ /* we need to manually load the submit queue */
+ if (execlists->ctrl_reg)
+ writel(EL_CTRL_LOAD, execlists->ctrl_reg);
+}
+
+static bool ctx_single_port_submission(const struct intel_context *ce)
+{
+ return (IS_ENABLED(CONFIG_DRM_I915_GVT) &&
+ intel_context_force_single_submission(ce));
+}
+
+static bool can_merge_ctx(const struct intel_context *prev,
+ const struct intel_context *next)
+{
+ if (prev != next)
+ return false;
+
+ if (ctx_single_port_submission(prev))
+ return false;
+
+ return true;
+}
+
+static unsigned long i915_request_flags(const struct i915_request *rq)
+{
+ return READ_ONCE(rq->fence.flags);
+}
+
+static bool can_merge_rq(const struct i915_request *prev,
+ const struct i915_request *next)
+{
+ GEM_BUG_ON(prev == next);
+ GEM_BUG_ON(!assert_priority_queue(prev, next));
+
+ /*
+ * We do not submit known completed requests. Therefore if the next
+ * request is already completed, we can pretend to merge it in
+ * with the previous context (and we will skip updating the ELSP
+ * and tracking). Thus hopefully keeping the ELSP full with active
+ * contexts, despite the best efforts of preempt-to-busy to confuse
+ * us.
+ */
+ if (__i915_request_is_complete(next))
+ return true;
+
+ if (unlikely((i915_request_flags(prev) ^ i915_request_flags(next)) &
+ (BIT(I915_FENCE_FLAG_NOPREEMPT) |
+ BIT(I915_FENCE_FLAG_SENTINEL))))
+ return false;
+
+ if (!can_merge_ctx(prev->context, next->context))
+ return false;
+
+ GEM_BUG_ON(i915_seqno_passed(prev->fence.seqno, next->fence.seqno));
+ return true;
+}
+
+static bool virtual_matches(const struct virtual_engine *ve,
+ const struct i915_request *rq,
+ const struct intel_engine_cs *engine)
+{
+ const struct intel_engine_cs *inflight;
+
+ if (!rq)
+ return false;
+
+ if (!(rq->execution_mask & engine->mask)) /* We peeked too soon! */
+ return false;
+
+ /*
+ * We track when the HW has completed saving the context image
+ * (i.e. when we have seen the final CS event switching out of
+ * the context) and must not overwrite the context image before
+ * then. This restricts us to only using the active engine
+ * while the previous virtualized request is inflight (so
+ * we reuse the register offsets). This is a very small
+ * hystersis on the greedy seelction algorithm.
+ */
+ inflight = intel_context_inflight(&ve->context);
+ if (inflight && inflight != engine)
+ return false;
+
+ return true;
+}
+
+static struct virtual_engine *
+first_virtual_engine(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists *el = &engine->execlists;
+ struct rb_node *rb = rb_first_cached(&el->virtual);
+
+ while (rb) {
+ struct virtual_engine *ve =
+ rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
+ struct i915_request *rq = READ_ONCE(ve->request);
+
+ /* lazily cleanup after another engine handled rq */
+ if (!rq || !virtual_matches(ve, rq, engine)) {
+ rb_erase_cached(rb, &el->virtual);
+ RB_CLEAR_NODE(rb);
+ rb = rb_first_cached(&el->virtual);
+ continue;
+ }
+
+ return ve;
+ }
+
+ return NULL;
+}
+
+static void virtual_xfer_context(struct virtual_engine *ve,
+ struct intel_engine_cs *engine)
+{
+ unsigned int n;
+
+ if (likely(engine == ve->siblings[0]))
+ return;
+
+ GEM_BUG_ON(READ_ONCE(ve->context.inflight));
+ if (!intel_engine_has_relative_mmio(engine))
+ lrc_update_offsets(&ve->context, engine);
+
+ /*
+ * Move the bound engine to the top of the list for
+ * future execution. We then kick this tasklet first
+ * before checking others, so that we preferentially
+ * reuse this set of bound registers.
+ */
+ for (n = 1; n < ve->num_siblings; n++) {
+ if (ve->siblings[n] == engine) {
+ swap(ve->siblings[n], ve->siblings[0]);
+ break;
+ }
+ }
+}
+
+static void defer_request(struct i915_request *rq, struct list_head * const pl)
+{
+ LIST_HEAD(list);
+
+ /*
+ * We want to move the interrupted request to the back of
+ * the round-robin list (i.e. its priority level), but
+ * in doing so, we must then move all requests that were in
+ * flight and were waiting for the interrupted request to
+ * be run after it again.
+ */
+ do {
+ struct i915_dependency *p;
+
+ GEM_BUG_ON(i915_request_is_active(rq));
+ list_move_tail(&rq->sched.link, pl);
+
+ for_each_waiter(p, rq) {
+ struct i915_request *w =
+ container_of(p->waiter, typeof(*w), sched);
+
+ if (p->flags & I915_DEPENDENCY_WEAK)
+ continue;
+
+ /* Leave semaphores spinning on the other engines */
+ if (w->engine != rq->engine)
+ continue;
+
+ /* No waiter should start before its signaler */
+ GEM_BUG_ON(i915_request_has_initial_breadcrumb(w) &&
+ __i915_request_has_started(w) &&
+ !__i915_request_is_complete(rq));
+
+ GEM_BUG_ON(i915_request_is_active(w));
+ if (!i915_request_is_ready(w))
+ continue;
+
+ if (rq_prio(w) < rq_prio(rq))
+ continue;
+
+ GEM_BUG_ON(rq_prio(w) > rq_prio(rq));
+ list_move_tail(&w->sched.link, &list);
+ }
+
+ rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
+ } while (rq);
+}
+
+static void defer_active(struct intel_engine_cs *engine)
+{
+ struct i915_request *rq;
+
+ rq = __unwind_incomplete_requests(engine);
+ if (!rq)
+ return;
+
+ defer_request(rq, i915_sched_lookup_priolist(engine, rq_prio(rq)));
+}
+
+static bool
+timeslice_yield(const struct intel_engine_execlists *el,
+ const struct i915_request *rq)
+{
+ /*
+ * Once bitten, forever smitten!
+ *
+ * If the active context ever busy-waited on a semaphore,
+ * it will be treated as a hog until the end of its timeslice (i.e.
+ * until it is scheduled out and replaced by a new submission,
+ * possibly even its own lite-restore). The HW only sends an interrupt
+ * on the first miss, and we do know if that semaphore has been
+ * signaled, or even if it is now stuck on another semaphore. Play
+ * safe, yield if it might be stuck -- it will be given a fresh
+ * timeslice in the near future.
+ */
+ return rq->context->lrc.ccid == READ_ONCE(el->yield);
+}
+
+static bool needs_timeslice(const struct intel_engine_cs *engine,
+ const struct i915_request *rq)
+{
+ if (!intel_engine_has_timeslices(engine))
+ return false;
+
+ /* If not currently active, or about to switch, wait for next event */
+ if (!rq || __i915_request_is_complete(rq))
+ return false;
+
+ /* We do not need to start the timeslice until after the ACK */
+ if (READ_ONCE(engine->execlists.pending[0]))
+ return false;
+
+ /* If ELSP[1] is occupied, always check to see if worth slicing */
+ if (!list_is_last_rcu(&rq->sched.link, &engine->active.requests)) {
+ ENGINE_TRACE(engine, "timeslice required for second inflight context\n");
+ return true;
+ }
+
+ /* Otherwise, ELSP[0] is by itself, but may be waiting in the queue */
+ if (!RB_EMPTY_ROOT(&engine->execlists.queue.rb_root)) {
+ ENGINE_TRACE(engine, "timeslice required for queue\n");
+ return true;
+ }
+
+ if (!RB_EMPTY_ROOT(&engine->execlists.virtual.rb_root)) {
+ ENGINE_TRACE(engine, "timeslice required for virtual\n");
+ return true;
+ }
+
+ return false;
+}
+
+static bool
+timeslice_expired(struct intel_engine_cs *engine, const struct i915_request *rq)
+{
+ const struct intel_engine_execlists *el = &engine->execlists;
+
+ if (i915_request_has_nopreempt(rq) && __i915_request_has_started(rq))
+ return false;
+
+ if (!needs_timeslice(engine, rq))
+ return false;
+
+ return timer_expired(&el->timer) || timeslice_yield(el, rq);
+}
+
+static unsigned long timeslice(const struct intel_engine_cs *engine)
+{
+ return READ_ONCE(engine->props.timeslice_duration_ms);
+}
+
+static void start_timeslice(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists *el = &engine->execlists;
+ unsigned long duration;
+
+ /* Disable the timer if there is nothing to switch to */
+ duration = 0;
+ if (needs_timeslice(engine, *el->active)) {
+ /* Avoid continually prolonging an active timeslice */
+ if (timer_active(&el->timer)) {
+ /*
+ * If we just submitted a new ELSP after an old
+ * context, that context may have already consumed
+ * its timeslice, so recheck.
+ */
+ if (!timer_pending(&el->timer))
+ tasklet_hi_schedule(&el->tasklet);
+ return;
+ }
+
+ duration = timeslice(engine);
+ }
+
+ set_timer_ms(&el->timer, duration);
+}
+
+static void record_preemption(struct intel_engine_execlists *execlists)
+{
+ (void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++);
+}
+
+static unsigned long active_preempt_timeout(struct intel_engine_cs *engine,
+ const struct i915_request *rq)
+{
+ if (!rq)
+ return 0;
+
+ /* Force a fast reset for terminated contexts (ignoring sysfs!) */
+ if (unlikely(intel_context_is_banned(rq->context)))
+ return 1;
+
+ return READ_ONCE(engine->props.preempt_timeout_ms);
+}
+
+static void set_preempt_timeout(struct intel_engine_cs *engine,
+ const struct i915_request *rq)
+{
+ if (!intel_engine_has_preempt_reset(engine))
+ return;
+
+ set_timer_ms(&engine->execlists.preempt,
+ active_preempt_timeout(engine, rq));
+}
+
+static bool completed(const struct i915_request *rq)
+{
+ if (i915_request_has_sentinel(rq))
+ return false;
+
+ return __i915_request_is_complete(rq);
+}
+
+static void execlists_dequeue(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct i915_request **port = execlists->pending;
+ struct i915_request ** const last_port = port + execlists->port_mask;
+ struct i915_request *last, * const *active;
+ struct virtual_engine *ve;
+ struct rb_node *rb;
+ bool submit = false;
+
+ /*
+ * Hardware submission is through 2 ports. Conceptually each port
+ * has a (RING_START, RING_HEAD, RING_TAIL) tuple. RING_START is
+ * static for a context, and unique to each, so we only execute
+ * requests belonging to a single context from each ring. RING_HEAD
+ * is maintained by the CS in the context image, it marks the place
+ * where it got up to last time, and through RING_TAIL we tell the CS
+ * where we want to execute up to this time.
+ *
+ * In this list the requests are in order of execution. Consecutive
+ * requests from the same context are adjacent in the ringbuffer. We
+ * can combine these requests into a single RING_TAIL update:
+ *
+ * RING_HEAD...req1...req2
+ * ^- RING_TAIL
+ * since to execute req2 the CS must first execute req1.
+ *
+ * Our goal then is to point each port to the end of a consecutive
+ * sequence of requests as being the most optimal (fewest wake ups
+ * and context switches) submission.
+ */
+
+ spin_lock(&engine->active.lock);
+
+ /*
+ * If the queue is higher priority than the last
+ * request in the currently active context, submit afresh.
+ * We will resubmit again afterwards in case we need to split
+ * the active context to interject the preemption request,
+ * i.e. we will retrigger preemption following the ack in case
+ * of trouble.
+ *
+ */
+ active = execlists->active;
+ while ((last = *active) && completed(last))
+ active++;
+
+ if (last) {
+ if (need_preempt(engine, last)) {
+ ENGINE_TRACE(engine,
+ "preempting last=%llx:%lld, prio=%d, hint=%d\n",
+ last->fence.context,
+ last->fence.seqno,
+ last->sched.attr.priority,
+ execlists->queue_priority_hint);
+ record_preemption(execlists);
+
+ /*
+ * Don't let the RING_HEAD advance past the breadcrumb
+ * as we unwind (and until we resubmit) so that we do
+ * not accidentally tell it to go backwards.
+ */
+ ring_set_paused(engine, 1);
+
+ /*
+ * Note that we have not stopped the GPU at this point,
+ * so we are unwinding the incomplete requests as they
+ * remain inflight and so by the time we do complete
+ * the preemption, some of the unwound requests may
+ * complete!
+ */
+ __unwind_incomplete_requests(engine);
+
+ last = NULL;
+ } else if (timeslice_expired(engine, last)) {
+ ENGINE_TRACE(engine,
+ "expired:%s last=%llx:%lld, prio=%d, hint=%d, yield?=%s\n",
+ yesno(timer_expired(&execlists->timer)),
+ last->fence.context, last->fence.seqno,
+ rq_prio(last),
+ execlists->queue_priority_hint,
+ yesno(timeslice_yield(execlists, last)));
+
+ /*
+ * Consume this timeslice; ensure we start a new one.
+ *
+ * The timeslice expired, and we will unwind the
+ * running contexts and recompute the next ELSP.
+ * If that submit will be the same pair of contexts
+ * (due to dependency ordering), we will skip the
+ * submission. If we don't cancel the timer now,
+ * we will see that the timer has expired and
+ * reschedule the tasklet; continually until the
+ * next context switch or other preeemption event.
+ *
+ * Since we have decided to reschedule based on
+ * consumption of this timeslice, if we submit the
+ * same context again, grant it a full timeslice.
+ */
+ cancel_timer(&execlists->timer);
+ ring_set_paused(engine, 1);
+ defer_active(engine);
+
+ /*
+ * Unlike for preemption, if we rewind and continue
+ * executing the same context as previously active,
+ * the order of execution will remain the same and
+ * the tail will only advance. We do not need to
+ * force a full context restore, as a lite-restore
+ * is sufficient to resample the monotonic TAIL.
+ *
+ * If we switch to any other context, similarly we
+ * will not rewind TAIL of current context, and
+ * normal save/restore will preserve state and allow
+ * us to later continue executing the same request.
+ */
+ last = NULL;
+ } else {
+ /*
+ * Otherwise if we already have a request pending
+ * for execution after the current one, we can
+ * just wait until the next CS event before
+ * queuing more. In either case we will force a
+ * lite-restore preemption event, but if we wait
+ * we hopefully coalesce several updates into a single
+ * submission.
+ */
+ if (active[1]) {
+ /*
+ * Even if ELSP[1] is occupied and not worthy
+ * of timeslices, our queue might be.
+ */
+ spin_unlock(&engine->active.lock);
+ return;
+ }
+ }
+ }
+
+ /* XXX virtual is always taking precedence */
+ while ((ve = first_virtual_engine(engine))) {
+ struct i915_request *rq;
+
+ spin_lock(&ve->base.active.lock);
+
+ rq = ve->request;
+ if (unlikely(!virtual_matches(ve, rq, engine)))
+ goto unlock; /* lost the race to a sibling */
+
+ GEM_BUG_ON(rq->engine != &ve->base);
+ GEM_BUG_ON(rq->context != &ve->context);
+
+ if (unlikely(rq_prio(rq) < queue_prio(execlists))) {
+ spin_unlock(&ve->base.active.lock);
+ break;
+ }
+
+ if (last && !can_merge_rq(last, rq)) {
+ spin_unlock(&ve->base.active.lock);
+ spin_unlock(&engine->active.lock);
+ return; /* leave this for another sibling */
+ }
+
+ ENGINE_TRACE(engine,
+ "virtual rq=%llx:%lld%s, new engine? %s\n",
+ rq->fence.context,
+ rq->fence.seqno,
+ __i915_request_is_complete(rq) ? "!" :
+ __i915_request_has_started(rq) ? "*" :
+ "",
+ yesno(engine != ve->siblings[0]));
+
+ WRITE_ONCE(ve->request, NULL);
+ WRITE_ONCE(ve->base.execlists.queue_priority_hint, INT_MIN);
+
+ rb = &ve->nodes[engine->id].rb;
+ rb_erase_cached(rb, &execlists->virtual);
+ RB_CLEAR_NODE(rb);
+
+ GEM_BUG_ON(!(rq->execution_mask & engine->mask));
+ WRITE_ONCE(rq->engine, engine);
+
+ if (__i915_request_submit(rq)) {
+ /*
+ * Only after we confirm that we will submit
+ * this request (i.e. it has not already
+ * completed), do we want to update the context.
+ *
+ * This serves two purposes. It avoids
+ * unnecessary work if we are resubmitting an
+ * already completed request after timeslicing.
+ * But more importantly, it prevents us altering
+ * ve->siblings[] on an idle context, where
+ * we may be using ve->siblings[] in
+ * virtual_context_enter / virtual_context_exit.
+ */
+ virtual_xfer_context(ve, engine);
+ GEM_BUG_ON(ve->siblings[0] != engine);
+
+ submit = true;
+ last = rq;
+ }
+
+ i915_request_put(rq);
+unlock:
+ spin_unlock(&ve->base.active.lock);
+
+ /*
+ * Hmm, we have a bunch of virtual engine requests,
+ * but the first one was already completed (thanks
+ * preempt-to-busy!). Keep looking at the veng queue
+ * until we have no more relevant requests (i.e.
+ * the normal submit queue has higher priority).
+ */
+ if (submit)
+ break;
+ }
+
+ while ((rb = rb_first_cached(&execlists->queue))) {
+ struct i915_priolist *p = to_priolist(rb);
+ struct i915_request *rq, *rn;
+ int i;
+
+ priolist_for_each_request_consume(rq, rn, p, i) {
+ bool merge = true;
+
+ /*
+ * Can we combine this request with the current port?
+ * It has to be the same context/ringbuffer and not
+ * have any exceptions (e.g. GVT saying never to
+ * combine contexts).
+ *
+ * If we can combine the requests, we can execute both
+ * by updating the RING_TAIL to point to the end of the
+ * second request, and so we never need to tell the
+ * hardware about the first.
+ */
+ if (last && !can_merge_rq(last, rq)) {
+ /*
+ * If we are on the second port and cannot
+ * combine this request with the last, then we
+ * are done.
+ */
+ if (port == last_port)
+ goto done;
+
+ /*
+ * We must not populate both ELSP[] with the
+ * same LRCA, i.e. we must submit 2 different
+ * contexts if we submit 2 ELSP.
+ */
+ if (last->context == rq->context)
+ goto done;
+
+ if (i915_request_has_sentinel(last))
+ goto done;
+
+ /*
+ * We avoid submitting virtual requests into
+ * the secondary ports so that we can migrate
+ * the request immediately to another engine
+ * rather than wait for the primary request.
+ */
+ if (rq->execution_mask != engine->mask)
+ goto done;
+
+ /*
+ * If GVT overrides us we only ever submit
+ * port[0], leaving port[1] empty. Note that we
+ * also have to be careful that we don't queue
+ * the same context (even though a different
+ * request) to the second port.
+ */
+ if (ctx_single_port_submission(last->context) ||
+ ctx_single_port_submission(rq->context))
+ goto done;
+
+ merge = false;
+ }
+
+ if (__i915_request_submit(rq)) {
+ if (!merge) {
+ *port++ = i915_request_get(last);
+ last = NULL;
+ }
+
+ GEM_BUG_ON(last &&
+ !can_merge_ctx(last->context,
+ rq->context));
+ GEM_BUG_ON(last &&
+ i915_seqno_passed(last->fence.seqno,
+ rq->fence.seqno));
+
+ submit = true;
+ last = rq;
+ }
+ }
+
+ rb_erase_cached(&p->node, &execlists->queue);
+ i915_priolist_free(p);
+ }
+done:
+ *port++ = i915_request_get(last);
+
+ /*
+ * Here be a bit of magic! Or sleight-of-hand, whichever you prefer.
+ *
+ * We choose the priority hint such that if we add a request of greater
+ * priority than this, we kick the submission tasklet to decide on
+ * the right order of submitting the requests to hardware. We must
+ * also be prepared to reorder requests as they are in-flight on the
+ * HW. We derive the priority hint then as the first "hole" in
+ * the HW submission ports and if there are no available slots,
+ * the priority of the lowest executing request, i.e. last.
+ *
+ * When we do receive a higher priority request ready to run from the
+ * user, see queue_request(), the priority hint is bumped to that
+ * request triggering preemption on the next dequeue (or subsequent
+ * interrupt for secondary ports).
+ */
+ execlists->queue_priority_hint = queue_prio(execlists);
+ spin_unlock(&engine->active.lock);
+
+ /*
+ * We can skip poking the HW if we ended up with exactly the same set
+ * of requests as currently running, e.g. trying to timeslice a pair
+ * of ordered contexts.
+ */
+ if (submit &&
+ memcmp(active,
+ execlists->pending,
+ (port - execlists->pending) * sizeof(*port))) {
+ *port = NULL;
+ while (port-- != execlists->pending)
+ execlists_schedule_in(*port, port - execlists->pending);
+
+ WRITE_ONCE(execlists->yield, -1);
+ set_preempt_timeout(engine, *active);
+ execlists_submit_ports(engine);
+ } else {
+ ring_set_paused(engine, 0);
+ while (port-- != execlists->pending)
+ i915_request_put(*port);
+ *execlists->pending = NULL;
+ }
+}
+
+static void execlists_dequeue_irq(struct intel_engine_cs *engine)
+{
+ local_irq_disable(); /* Suspend interrupts across request submission */
+ execlists_dequeue(engine);
+ local_irq_enable(); /* flush irq_work (e.g. breadcrumb enabling) */
+}
+
+static void clear_ports(struct i915_request **ports, int count)
+{
+ memset_p((void **)ports, NULL, count);
+}
+
+static void
+copy_ports(struct i915_request **dst, struct i915_request **src, int count)
+{
+ /* A memcpy_p() would be very useful here! */
+ while (count--)
+ WRITE_ONCE(*dst++, *src++); /* avoid write tearing */
+}
+
+static struct i915_request **
+cancel_port_requests(struct intel_engine_execlists * const execlists,
+ struct i915_request **inactive)
+{
+ struct i915_request * const *port;
+
+ for (port = execlists->pending; *port; port++)
+ *inactive++ = *port;
+ clear_ports(execlists->pending, ARRAY_SIZE(execlists->pending));
+
+ /* Mark the end of active before we overwrite *active */
+ for (port = xchg(&execlists->active, execlists->pending); *port; port++)
+ *inactive++ = *port;
+ clear_ports(execlists->inflight, ARRAY_SIZE(execlists->inflight));
+
+ smp_wmb(); /* complete the seqlock for execlists_active() */
+ WRITE_ONCE(execlists->active, execlists->inflight);
+
+ /* Having cancelled all outstanding process_csb(), stop their timers */
+ GEM_BUG_ON(execlists->pending[0]);
+ cancel_timer(&execlists->timer);
+ cancel_timer(&execlists->preempt);
+
+ return inactive;
+}
+
+static void invalidate_csb_entries(const u64 *first, const u64 *last)
+{
+ clflush((void *)first);
+ clflush((void *)last);
+}
+
+/*
+ * Starting with Gen12, the status has a new format:
+ *
+ * bit 0: switched to new queue
+ * bit 1: reserved
+ * bit 2: semaphore wait mode (poll or signal), only valid when
+ * switch detail is set to "wait on semaphore"
+ * bits 3-5: engine class
+ * bits 6-11: engine instance
+ * bits 12-14: reserved
+ * bits 15-25: sw context id of the lrc the GT switched to
+ * bits 26-31: sw counter of the lrc the GT switched to
+ * bits 32-35: context switch detail
+ * - 0: ctx complete
+ * - 1: wait on sync flip
+ * - 2: wait on vblank
+ * - 3: wait on scanline
+ * - 4: wait on semaphore
+ * - 5: context preempted (not on SEMAPHORE_WAIT or
+ * WAIT_FOR_EVENT)
+ * bit 36: reserved
+ * bits 37-43: wait detail (for switch detail 1 to 4)
+ * bits 44-46: reserved
+ * bits 47-57: sw context id of the lrc the GT switched away from
+ * bits 58-63: sw counter of the lrc the GT switched away from
+ */
+static bool gen12_csb_parse(const u64 csb)
+{
+ bool ctx_away_valid = GEN12_CSB_CTX_VALID(upper_32_bits(csb));
+ bool new_queue =
+ lower_32_bits(csb) & GEN12_CTX_STATUS_SWITCHED_TO_NEW_QUEUE;
+
+ /*
+ * The context switch detail is not guaranteed to be 5 when a preemption
+ * occurs, so we can't just check for that. The check below works for
+ * all the cases we care about, including preemptions of WAIT
+ * instructions and lite-restore. Preempt-to-idle via the CTRL register
+ * would require some extra handling, but we don't support that.
+ */
+ if (!ctx_away_valid || new_queue) {
+ GEM_BUG_ON(!GEN12_CSB_CTX_VALID(lower_32_bits(csb)));
+ return true;
+ }
+
+ /*
+ * switch detail = 5 is covered by the case above and we do not expect a
+ * context switch on an unsuccessful wait instruction since we always
+ * use polling mode.
+ */
+ GEM_BUG_ON(GEN12_CTX_SWITCH_DETAIL(upper_32_bits(csb)));
+ return false;
+}
+
+static bool gen8_csb_parse(const u64 csb)
+{
+ return csb & (GEN8_CTX_STATUS_IDLE_ACTIVE | GEN8_CTX_STATUS_PREEMPTED);
+}
+
+static noinline u64
+wa_csb_read(const struct intel_engine_cs *engine, u64 * const csb)
+{
+ u64 entry;
+
+ /*
+ * Reading from the HWSP has one particular advantage: we can detect
+ * a stale entry. Since the write into HWSP is broken, we have no reason
+ * to trust the HW at all, the mmio entry may equally be unordered, so
+ * we prefer the path that is self-checking and as a last resort,
+ * return the mmio value.
+ *
+ * tgl,dg1:HSDES#22011327657
+ */
+ preempt_disable();
+ if (wait_for_atomic_us((entry = READ_ONCE(*csb)) != -1, 10)) {
+ int idx = csb - engine->execlists.csb_status;
+ int status;
+
+ status = GEN8_EXECLISTS_STATUS_BUF;
+ if (idx >= 6) {
+ status = GEN11_EXECLISTS_STATUS_BUF2;
+ idx -= 6;
+ }
+ status += sizeof(u64) * idx;
+
+ entry = intel_uncore_read64(engine->uncore,
+ _MMIO(engine->mmio_base + status));
+ }
+ preempt_enable();
+
+ return entry;
+}
+
+static u64 csb_read(const struct intel_engine_cs *engine, u64 * const csb)
+{
+ u64 entry = READ_ONCE(*csb);
+
+ /*
+ * Unfortunately, the GPU does not always serialise its write
+ * of the CSB entries before its write of the CSB pointer, at least
+ * from the perspective of the CPU, using what is known as a Global
+ * Observation Point. We may read a new CSB tail pointer, but then
+ * read the stale CSB entries, causing us to misinterpret the
+ * context-switch events, and eventually declare the GPU hung.
+ *
+ * icl:HSDES#1806554093
+ * tgl:HSDES#22011248461
+ */
+ if (unlikely(entry == -1))
+ entry = wa_csb_read(engine, csb);
+
+ /* Consume this entry so that we can spot its future reuse. */
+ WRITE_ONCE(*csb, -1);
+
+ /* ELSP is an implicit wmb() before the GPU wraps and overwrites csb */
+ return entry;
+}
+
+static void new_timeslice(struct intel_engine_execlists *el)
+{
+ /* By cancelling, we will start afresh in start_timeslice() */
+ cancel_timer(&el->timer);
+}
+
+static struct i915_request **
+process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ u64 * const buf = execlists->csb_status;
+ const u8 num_entries = execlists->csb_size;
+ struct i915_request **prev;
+ u8 head, tail;
+
+ /*
+ * As we modify our execlists state tracking we require exclusive
+ * access. Either we are inside the tasklet, or the tasklet is disabled
+ * and we assume that is only inside the reset paths and so serialised.
+ */
+ GEM_BUG_ON(!tasklet_is_locked(&execlists->tasklet) &&
+ !reset_in_progress(execlists));
+ GEM_BUG_ON(!intel_engine_in_execlists_submission_mode(engine));
+
+ /*
+ * Note that csb_write, csb_status may be either in HWSP or mmio.
+ * When reading from the csb_write mmio register, we have to be
+ * careful to only use the GEN8_CSB_WRITE_PTR portion, which is
+ * the low 4bits. As it happens we know the next 4bits are always
+ * zero and so we can simply masked off the low u8 of the register
+ * and treat it identically to reading from the HWSP (without having
+ * to use explicit shifting and masking, and probably bifurcating
+ * the code to handle the legacy mmio read).
+ */
+ head = execlists->csb_head;
+ tail = READ_ONCE(*execlists->csb_write);
+ if (unlikely(head == tail))
+ return inactive;
+
+ /*
+ * We will consume all events from HW, or at least pretend to.
+ *
+ * The sequence of events from the HW is deterministic, and derived
+ * from our writes to the ELSP, with a smidgen of variability for
+ * the arrival of the asynchronous requests wrt to the inflight
+ * execution. If the HW sends an event that does not correspond with
+ * the one we are expecting, we have to abandon all hope as we lose
+ * all tracking of what the engine is actually executing. We will
+ * only detect we are out of sequence with the HW when we get an
+ * 'impossible' event because we have already drained our own
+ * preemption/promotion queue. If this occurs, we know that we likely
+ * lost track of execution earlier and must unwind and restart, the
+ * simplest way is by stop processing the event queue and force the
+ * engine to reset.
+ */
+ execlists->csb_head = tail;
+ ENGINE_TRACE(engine, "cs-irq head=%d, tail=%d\n", head, tail);
+
+ /*
+ * Hopefully paired with a wmb() in HW!
+ *
+ * We must complete the read of the write pointer before any reads
+ * from the CSB, so that we do not see stale values. Without an rmb
+ * (lfence) the HW may speculatively perform the CSB[] reads *before*
+ * we perform the READ_ONCE(*csb_write).
+ */
+ rmb();
+
+ /* Remember who was last running under the timer */
+ prev = inactive;
+ *prev = NULL;
+
+ do {
+ bool promote;
+ u64 csb;
+
+ if (++head == num_entries)
+ head = 0;
+
+ /*
+ * We are flying near dragons again.
+ *
+ * We hold a reference to the request in execlist_port[]
+ * but no more than that. We are operating in softirq
+ * context and so cannot hold any mutex or sleep. That
+ * prevents us stopping the requests we are processing
+ * in port[] from being retired simultaneously (the
+ * breadcrumb will be complete before we see the
+ * context-switch). As we only hold the reference to the
+ * request, any pointer chasing underneath the request
+ * is subject to a potential use-after-free. Thus we
+ * store all of the bookkeeping within port[] as
+ * required, and avoid using unguarded pointers beneath
+ * request itself. The same applies to the atomic
+ * status notifier.
+ */
+
+ csb = csb_read(engine, buf + head);
+ ENGINE_TRACE(engine, "csb[%d]: status=0x%08x:0x%08x\n",
+ head, upper_32_bits(csb), lower_32_bits(csb));
+
+ if (INTEL_GEN(engine->i915) >= 12)
+ promote = gen12_csb_parse(csb);
+ else
+ promote = gen8_csb_parse(csb);
+ if (promote) {
+ struct i915_request * const *old = execlists->active;
+
+ if (GEM_WARN_ON(!*execlists->pending)) {
+ execlists->error_interrupt |= ERROR_CSB;
+ break;
+ }
+
+ ring_set_paused(engine, 0);
+
+ /* Point active to the new ELSP; prevent overwriting */
+ WRITE_ONCE(execlists->active, execlists->pending);
+ smp_wmb(); /* notify execlists_active() */
+
+ /* cancel old inflight, prepare for switch */
+ trace_ports(execlists, "preempted", old);
+ while (*old)
+ *inactive++ = *old++;
+
+ /* switch pending to inflight */
+ GEM_BUG_ON(!assert_pending_valid(execlists, "promote"));
+ copy_ports(execlists->inflight,
+ execlists->pending,
+ execlists_num_ports(execlists));
+ smp_wmb(); /* complete the seqlock */
+ WRITE_ONCE(execlists->active, execlists->inflight);
+
+ /* XXX Magic delay for tgl */
+ ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR);
+
+ WRITE_ONCE(execlists->pending[0], NULL);
+ } else {
+ if (GEM_WARN_ON(!*execlists->active)) {
+ execlists->error_interrupt |= ERROR_CSB;
+ break;
+ }
+
+ /* port0 completed, advanced to port1 */
+ trace_ports(execlists, "completed", execlists->active);
+
+ /*
+ * We rely on the hardware being strongly
+ * ordered, that the breadcrumb write is
+ * coherent (visible from the CPU) before the
+ * user interrupt is processed. One might assume
+ * that the breadcrumb write being before the
+ * user interrupt and the CS event for the context
+ * switch would therefore be before the CS event
+ * itself...
+ */
+ if (GEM_SHOW_DEBUG() &&
+ !__i915_request_is_complete(*execlists->active)) {
+ struct i915_request *rq = *execlists->active;
+ const u32 *regs __maybe_unused =
+ rq->context->lrc_reg_state;
+
+ ENGINE_TRACE(engine,
+ "context completed before request!\n");
+ ENGINE_TRACE(engine,
+ "ring:{start:0x%08x, head:%04x, tail:%04x, ctl:%08x, mode:%08x}\n",
+ ENGINE_READ(engine, RING_START),
+ ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR,
+ ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR,
+ ENGINE_READ(engine, RING_CTL),
+ ENGINE_READ(engine, RING_MI_MODE));
+ ENGINE_TRACE(engine,
+ "rq:{start:%08x, head:%04x, tail:%04x, seqno:%llx:%d, hwsp:%d}, ",
+ i915_ggtt_offset(rq->ring->vma),
+ rq->head, rq->tail,
+ rq->fence.context,
+ lower_32_bits(rq->fence.seqno),
+ hwsp_seqno(rq));
+ ENGINE_TRACE(engine,
+ "ctx:{start:%08x, head:%04x, tail:%04x}, ",
+ regs[CTX_RING_START],
+ regs[CTX_RING_HEAD],
+ regs[CTX_RING_TAIL]);
+ }
+
+ *inactive++ = *execlists->active++;
+
+ GEM_BUG_ON(execlists->active - execlists->inflight >
+ execlists_num_ports(execlists));
+ }
+ } while (head != tail);
+
+ /*
+ * Gen11 has proven to fail wrt global observation point between
+ * entry and tail update, failing on the ordering and thus
+ * we see an old entry in the context status buffer.
+ *
+ * Forcibly evict out entries for the next gpu csb update,
+ * to increase the odds that we get a fresh entries with non
+ * working hardware. The cost for doing so comes out mostly with
+ * the wash as hardware, working or not, will need to do the
+ * invalidation before.
+ */
+ invalidate_csb_entries(&buf[0], &buf[num_entries - 1]);
+
+ /*
+ * We assume that any event reflects a change in context flow
+ * and merits a fresh timeslice. We reinstall the timer after
+ * inspecting the queue to see if we need to resumbit.
+ */
+ if (*prev != *execlists->active) /* elide lite-restores */
+ new_timeslice(execlists);
+
+ return inactive;
+}
+
+static void post_process_csb(struct i915_request **port,
+ struct i915_request **last)
+{
+ while (port != last)
+ execlists_schedule_out(*port++);
+}
+
+static void __execlists_hold(struct i915_request *rq)
+{
+ LIST_HEAD(list);
+
+ do {
+ struct i915_dependency *p;
+
+ if (i915_request_is_active(rq))
+ __i915_request_unsubmit(rq);
+
+ clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+ list_move_tail(&rq->sched.link, &rq->engine->active.hold);
+ i915_request_set_hold(rq);
+ RQ_TRACE(rq, "on hold\n");
+
+ for_each_waiter(p, rq) {
+ struct i915_request *w =
+ container_of(p->waiter, typeof(*w), sched);
+
+ if (p->flags & I915_DEPENDENCY_WEAK)
+ continue;
+
+ /* Leave semaphores spinning on the other engines */
+ if (w->engine != rq->engine)
+ continue;
+
+ if (!i915_request_is_ready(w))
+ continue;
+
+ if (__i915_request_is_complete(w))
+ continue;
+
+ if (i915_request_on_hold(w))
+ continue;
+
+ list_move_tail(&w->sched.link, &list);
+ }
+
+ rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
+ } while (rq);
+}
+
+static bool execlists_hold(struct intel_engine_cs *engine,
+ struct i915_request *rq)
+{
+ if (i915_request_on_hold(rq))
+ return false;
+
+ spin_lock_irq(&engine->active.lock);
+
+ if (__i915_request_is_complete(rq)) { /* too late! */
+ rq = NULL;
+ goto unlock;
+ }
+
+ /*
+ * Transfer this request onto the hold queue to prevent it
+ * being resumbitted to HW (and potentially completed) before we have
+ * released it. Since we may have already submitted following
+ * requests, we need to remove those as well.
+ */
+ GEM_BUG_ON(i915_request_on_hold(rq));
+ GEM_BUG_ON(rq->engine != engine);
+ __execlists_hold(rq);
+ GEM_BUG_ON(list_empty(&engine->active.hold));
+
+unlock:
+ spin_unlock_irq(&engine->active.lock);
+ return rq;
+}
+
+static bool hold_request(const struct i915_request *rq)
+{
+ struct i915_dependency *p;
+ bool result = false;
+
+ /*
+ * If one of our ancestors is on hold, we must also be on hold,
+ * otherwise we will bypass it and execute before it.
+ */
+ rcu_read_lock();
+ for_each_signaler(p, rq) {
+ const struct i915_request *s =
+ container_of(p->signaler, typeof(*s), sched);
+
+ if (s->engine != rq->engine)
+ continue;
+
+ result = i915_request_on_hold(s);
+ if (result)
+ break;
+ }
+ rcu_read_unlock();
+
+ return result;
+}
+
+static void __execlists_unhold(struct i915_request *rq)
+{
+ LIST_HEAD(list);
+
+ do {
+ struct i915_dependency *p;
+
+ RQ_TRACE(rq, "hold release\n");
+
+ GEM_BUG_ON(!i915_request_on_hold(rq));
+ GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
+
+ i915_request_clear_hold(rq);
+ list_move_tail(&rq->sched.link,
+ i915_sched_lookup_priolist(rq->engine,
+ rq_prio(rq)));
+ set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+
+ /* Also release any children on this engine that are ready */
+ for_each_waiter(p, rq) {
+ struct i915_request *w =
+ container_of(p->waiter, typeof(*w), sched);
+
+ if (p->flags & I915_DEPENDENCY_WEAK)
+ continue;
+
+ /* Propagate any change in error status */
+ if (rq->fence.error)
+ i915_request_set_error_once(w, rq->fence.error);
+
+ if (w->engine != rq->engine)
+ continue;
+
+ if (!i915_request_on_hold(w))
+ continue;
+
+ /* Check that no other parents are also on hold */
+ if (hold_request(w))
+ continue;
+
+ list_move_tail(&w->sched.link, &list);
+ }
+
+ rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
+ } while (rq);
+}
+
+static void execlists_unhold(struct intel_engine_cs *engine,
+ struct i915_request *rq)
+{
+ spin_lock_irq(&engine->active.lock);
+
+ /*
+ * Move this request back to the priority queue, and all of its
+ * children and grandchildren that were suspended along with it.
+ */
+ __execlists_unhold(rq);
+
+ if (rq_prio(rq) > engine->execlists.queue_priority_hint) {
+ engine->execlists.queue_priority_hint = rq_prio(rq);
+ tasklet_hi_schedule(&engine->execlists.tasklet);
+ }
+
+ spin_unlock_irq(&engine->active.lock);
+}
+
+struct execlists_capture {
+ struct work_struct work;
+ struct i915_request *rq;
+ struct i915_gpu_coredump *error;
+};
+
+static void execlists_capture_work(struct work_struct *work)
+{
+ struct execlists_capture *cap = container_of(work, typeof(*cap), work);
+ const gfp_t gfp = GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN;
+ struct intel_engine_cs *engine = cap->rq->engine;
+ struct intel_gt_coredump *gt = cap->error->gt;
+ struct intel_engine_capture_vma *vma;
+
+ /* Compress all the objects attached to the request, slow! */
+ vma = intel_engine_coredump_add_request(gt->engine, cap->rq, gfp);
+ if (vma) {
+ struct i915_vma_compress *compress =
+ i915_vma_capture_prepare(gt);
+
+ intel_engine_coredump_add_vma(gt->engine, vma, compress);
+ i915_vma_capture_finish(gt, compress);
+ }
+
+ gt->simulated = gt->engine->simulated;
+ cap->error->simulated = gt->simulated;
+
+ /* Publish the error state, and announce it to the world */
+ i915_error_state_store(cap->error);
+ i915_gpu_coredump_put(cap->error);
+
+ /* Return this request and all that depend upon it for signaling */
+ execlists_unhold(engine, cap->rq);
+ i915_request_put(cap->rq);
+
+ kfree(cap);
+}
+
+static struct execlists_capture *capture_regs(struct intel_engine_cs *engine)
+{
+ const gfp_t gfp = GFP_ATOMIC | __GFP_NOWARN;
+ struct execlists_capture *cap;
+
+ cap = kmalloc(sizeof(*cap), gfp);
+ if (!cap)
+ return NULL;
+
+ cap->error = i915_gpu_coredump_alloc(engine->i915, gfp);
+ if (!cap->error)
+ goto err_cap;
+
+ cap->error->gt = intel_gt_coredump_alloc(engine->gt, gfp);
+ if (!cap->error->gt)
+ goto err_gpu;
+
+ cap->error->gt->engine = intel_engine_coredump_alloc(engine, gfp);
+ if (!cap->error->gt->engine)
+ goto err_gt;
+
+ cap->error->gt->engine->hung = true;
+
+ return cap;
+
+err_gt:
+ kfree(cap->error->gt);
+err_gpu:
+ kfree(cap->error);
+err_cap:
+ kfree(cap);
+ return NULL;
+}
+
+static struct i915_request *
+active_context(struct intel_engine_cs *engine, u32 ccid)
+{
+ const struct intel_engine_execlists * const el = &engine->execlists;
+ struct i915_request * const *port, *rq;
+
+ /*
+ * Use the most recent result from process_csb(), but just in case
+ * we trigger an error (via interrupt) before the first CS event has
+ * been written, peek at the next submission.
+ */
+
+ for (port = el->active; (rq = *port); port++) {
+ if (rq->context->lrc.ccid == ccid) {
+ ENGINE_TRACE(engine,
+ "ccid:%x found at active:%zd\n",
+ ccid, port - el->active);
+ return rq;
+ }
+ }
+
+ for (port = el->pending; (rq = *port); port++) {
+ if (rq->context->lrc.ccid == ccid) {
+ ENGINE_TRACE(engine,
+ "ccid:%x found at pending:%zd\n",
+ ccid, port - el->pending);
+ return rq;
+ }
+ }
+
+ ENGINE_TRACE(engine, "ccid:%x not found\n", ccid);
+ return NULL;
+}
+
+static u32 active_ccid(struct intel_engine_cs *engine)
+{
+ return ENGINE_READ_FW(engine, RING_EXECLIST_STATUS_HI);
+}
+
+static void execlists_capture(struct intel_engine_cs *engine)
+{
+ struct execlists_capture *cap;
+
+ if (!IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR))
+ return;
+
+ /*
+ * We need to _quickly_ capture the engine state before we reset.
+ * We are inside an atomic section (softirq) here and we are delaying
+ * the forced preemption event.
+ */
+ cap = capture_regs(engine);
+ if (!cap)
+ return;
+
+ spin_lock_irq(&engine->active.lock);
+ cap->rq = active_context(engine, active_ccid(engine));
+ if (cap->rq) {
+ cap->rq = active_request(cap->rq->context->timeline, cap->rq);
+ cap->rq = i915_request_get_rcu(cap->rq);
+ }
+ spin_unlock_irq(&engine->active.lock);
+ if (!cap->rq)
+ goto err_free;
+
+ /*
+ * Remove the request from the execlists queue, and take ownership
+ * of the request. We pass it to our worker who will _slowly_ compress
+ * all the pages the _user_ requested for debugging their batch, after
+ * which we return it to the queue for signaling.
+ *
+ * By removing them from the execlists queue, we also remove the
+ * requests from being processed by __unwind_incomplete_requests()
+ * during the intel_engine_reset(), and so they will *not* be replayed
+ * afterwards.
+ *
+ * Note that because we have not yet reset the engine at this point,
+ * it is possible for the request that we have identified as being
+ * guilty, did in fact complete and we will then hit an arbitration
+ * point allowing the outstanding preemption to succeed. The likelihood
+ * of that is very low (as capturing of the engine registers should be
+ * fast enough to run inside an irq-off atomic section!), so we will
+ * simply hold that request accountable for being non-preemptible
+ * long enough to force the reset.
+ */
+ if (!execlists_hold(engine, cap->rq))
+ goto err_rq;
+
+ INIT_WORK(&cap->work, execlists_capture_work);
+ schedule_work(&cap->work);
+ return;
+
+err_rq:
+ i915_request_put(cap->rq);
+err_free:
+ i915_gpu_coredump_put(cap->error);
+ kfree(cap);
+}
+
+static void execlists_reset(struct intel_engine_cs *engine, const char *msg)
+{
+ const unsigned int bit = I915_RESET_ENGINE + engine->id;
+ unsigned long *lock = &engine->gt->reset.flags;
+
+ if (!intel_has_reset_engine(engine->gt))
+ return;
+
+ if (test_and_set_bit(bit, lock))
+ return;
+
+ ENGINE_TRACE(engine, "reset for %s\n", msg);
+
+ /* Mark this tasklet as disabled to avoid waiting for it to complete */
+ tasklet_disable_nosync(&engine->execlists.tasklet);
+
+ ring_set_paused(engine, 1); /* Freeze the current request in place */
+ execlists_capture(engine);
+ intel_engine_reset(engine, msg);
+
+ tasklet_enable(&engine->execlists.tasklet);
+ clear_and_wake_up_bit(bit, lock);
+}
+
+static bool preempt_timeout(const struct intel_engine_cs *const engine)
+{
+ const struct timer_list *t = &engine->execlists.preempt;
+
+ if (!CONFIG_DRM_I915_PREEMPT_TIMEOUT)
+ return false;
+
+ if (!timer_expired(t))
+ return false;
+
+ return engine->execlists.pending[0];
+}
+
+/*
+ * Check the unread Context Status Buffers and manage the submission of new
+ * contexts to the ELSP accordingly.
+ */
+static void execlists_submission_tasklet(unsigned long data)
+{
+ struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
+ struct i915_request *post[2 * EXECLIST_MAX_PORTS];
+ struct i915_request **inactive;
+
+ rcu_read_lock();
+ inactive = process_csb(engine, post);
+ GEM_BUG_ON(inactive - post > ARRAY_SIZE(post));
+
+ if (unlikely(preempt_timeout(engine))) {
+ cancel_timer(&engine->execlists.preempt);
+ engine->execlists.error_interrupt |= ERROR_PREEMPT;
+ }
+
+ if (unlikely(READ_ONCE(engine->execlists.error_interrupt))) {
+ const char *msg;
+
+ /* Generate the error message in priority wrt to the user! */
+ if (engine->execlists.error_interrupt & GENMASK(15, 0))
+ msg = "CS error"; /* thrown by a user payload */
+ else if (engine->execlists.error_interrupt & ERROR_CSB)
+ msg = "invalid CSB event";
+ else if (engine->execlists.error_interrupt & ERROR_PREEMPT)
+ msg = "preemption time out";
+ else
+ msg = "internal error";
+
+ engine->execlists.error_interrupt = 0;
+ execlists_reset(engine, msg);
+ }
+
+ if (!engine->execlists.pending[0]) {
+ execlists_dequeue_irq(engine);
+ start_timeslice(engine);
+ }
+
+ post_process_csb(post, inactive);
+ rcu_read_unlock();
+}
+
+static void __execlists_kick(struct intel_engine_execlists *execlists)
+{
+ /* Kick the tasklet for some interrupt coalescing and reset handling */
+ tasklet_hi_schedule(&execlists->tasklet);
+}
+
+#define execlists_kick(t, member) \
+ __execlists_kick(container_of(t, struct intel_engine_execlists, member))
+
+static void execlists_timeslice(struct timer_list *timer)
+{
+ execlists_kick(timer, timer);
+}
+
+static void execlists_preempt(struct timer_list *timer)
+{
+ execlists_kick(timer, preempt);
+}
+
+static void queue_request(struct intel_engine_cs *engine,
+ struct i915_request *rq)
+{
+ GEM_BUG_ON(!list_empty(&rq->sched.link));
+ list_add_tail(&rq->sched.link,
+ i915_sched_lookup_priolist(engine, rq_prio(rq)));
+ set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+}
+
+static bool submit_queue(struct intel_engine_cs *engine,
+ const struct i915_request *rq)
+{
+ struct intel_engine_execlists *execlists = &engine->execlists;
+
+ if (rq_prio(rq) <= execlists->queue_priority_hint)
+ return false;
+
+ execlists->queue_priority_hint = rq_prio(rq);
+ return true;
+}
+
+static bool ancestor_on_hold(const struct intel_engine_cs *engine,
+ const struct i915_request *rq)
+{
+ GEM_BUG_ON(i915_request_on_hold(rq));
+ return !list_empty(&engine->active.hold) && hold_request(rq);
+}
+
+static void execlists_submit_request(struct i915_request *request)
+{
+ struct intel_engine_cs *engine = request->engine;
+ unsigned long flags;
+
+ /* Will be called from irq-context when using foreign fences. */
+ spin_lock_irqsave(&engine->active.lock, flags);
+
+ if (unlikely(ancestor_on_hold(engine, request))) {
+ RQ_TRACE(request, "ancestor on hold\n");
+ list_add_tail(&request->sched.link, &engine->active.hold);
+ i915_request_set_hold(request);
+ } else {
+ queue_request(engine, request);
+
+ GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
+ GEM_BUG_ON(list_empty(&request->sched.link));
+
+ if (submit_queue(engine, request))
+ __execlists_kick(&engine->execlists);
+ }
+
+ spin_unlock_irqrestore(&engine->active.lock, flags);
+}
+
+static int execlists_context_pre_pin(struct intel_context *ce,
+ struct i915_gem_ww_ctx *ww,
+ void **vaddr)
+{
+ return lrc_pre_pin(ce, ce->engine, ww, vaddr);
+}
+
+static int execlists_context_pin(struct intel_context *ce, void *vaddr)
+{
+ return lrc_pin(ce, ce->engine, vaddr);
+}
+
+static int execlists_context_alloc(struct intel_context *ce)
+{
+ return lrc_alloc(ce, ce->engine);
+}
+
+static const struct intel_context_ops execlists_context_ops = {
+ .flags = COPS_HAS_INFLIGHT,
+
+ .alloc = execlists_context_alloc,
+
+ .pre_pin = execlists_context_pre_pin,
+ .pin = execlists_context_pin,
+ .unpin = lrc_unpin,
+ .post_unpin = lrc_post_unpin,
+
+ .enter = intel_context_enter_engine,
+ .exit = intel_context_exit_engine,
+
+ .reset = lrc_reset,
+ .destroy = lrc_destroy,
+};
+
+static int emit_pdps(struct i915_request *rq)
+{
+ const struct intel_engine_cs * const engine = rq->engine;
+ struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(rq->context->vm);
+ int err, i;
+ u32 *cs;
+
+ GEM_BUG_ON(intel_vgpu_active(rq->engine->i915));
+
+ /*
+ * Beware ye of the dragons, this sequence is magic!
+ *
+ * Small changes to this sequence can cause anything from
+ * GPU hangs to forcewake errors and machine lockups!
+ */
+
+ cs = intel_ring_begin(rq, 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+ *cs++ = MI_NOOP;
+ intel_ring_advance(rq, cs);
+
+ /* Flush any residual operations from the context load */
+ err = engine->emit_flush(rq, EMIT_FLUSH);
+ if (err)
+ return err;
+
+ /* Magic required to prevent forcewake errors! */
+ err = engine->emit_flush(rq, EMIT_INVALIDATE);
+ if (err)
+ return err;
+
+ cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ /* Ensure the LRI have landed before we invalidate & continue */
+ *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED;
+ for (i = GEN8_3LVL_PDPES; i--; ) {
+ const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
+ u32 base = engine->mmio_base;
+
+ *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i));
+ *cs++ = upper_32_bits(pd_daddr);
+ *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i));
+ *cs++ = lower_32_bits(pd_daddr);
+ }
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+ intel_ring_advance(rq, cs);
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static int execlists_request_alloc(struct i915_request *request)
+{
+ int ret;
+
+ GEM_BUG_ON(!intel_context_is_pinned(request->context));
+
+ /*
+ * Flush enough space to reduce the likelihood of waiting after
+ * we start building the request - in which case we will just
+ * have to repeat work.
+ */
+ request->reserved_space += EXECLISTS_REQUEST_SIZE;
+
+ /*
+ * Note that after this point, we have committed to using
+ * this request as it is being used to both track the
+ * state of engine initialisation and liveness of the
+ * golden renderstate above. Think twice before you try
+ * to cancel/unwind this request now.
+ */
+
+ if (!i915_vm_is_4lvl(request->context->vm)) {
+ ret = emit_pdps(request);
+ if (ret)
+ return ret;
+ }
+
+ /* Unconditionally invalidate GPU caches and TLBs. */
+ ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
+ if (ret)
+ return ret;
+
+ request->reserved_space -= EXECLISTS_REQUEST_SIZE;
+ return 0;
+}
+
+static void reset_csb_pointers(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ const unsigned int reset_value = execlists->csb_size - 1;
+
+ ring_set_paused(engine, 0);
+
+ /*
+ * Sometimes Icelake forgets to reset its pointers on a GPU reset.
+ * Bludgeon them with a mmio update to be sure.
+ */
+ ENGINE_WRITE(engine, RING_CONTEXT_STATUS_PTR,
+ 0xffff << 16 | reset_value << 8 | reset_value);
+ ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR);
+
+ /*
+ * After a reset, the HW starts writing into CSB entry [0]. We
+ * therefore have to set our HEAD pointer back one entry so that
+ * the *first* entry we check is entry 0. To complicate this further,
+ * as we don't wait for the first interrupt after reset, we have to
+ * fake the HW write to point back to the last entry so that our
+ * inline comparison of our cached head position against the last HW
+ * write works even before the first interrupt.
+ */
+ execlists->csb_head = reset_value;
+ WRITE_ONCE(*execlists->csb_write, reset_value);
+ wmb(); /* Make sure this is visible to HW (paranoia?) */
+
+ /* Check that the GPU does indeed update the CSB entries! */
+ memset(execlists->csb_status, -1, (reset_value + 1) * sizeof(u64));
+ invalidate_csb_entries(&execlists->csb_status[0],
+ &execlists->csb_status[reset_value]);
+
+ /* Once more for luck and our trusty paranoia */
+ ENGINE_WRITE(engine, RING_CONTEXT_STATUS_PTR,
+ 0xffff << 16 | reset_value << 8 | reset_value);
+ ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR);
+
+ GEM_BUG_ON(READ_ONCE(*execlists->csb_write) != reset_value);
+}
+
+static void sanitize_hwsp(struct intel_engine_cs *engine)
+{
+ struct intel_timeline *tl;
+
+ list_for_each_entry(tl, &engine->status_page.timelines, engine_link)
+ intel_timeline_reset_seqno(tl);
+}
+
+static void execlists_sanitize(struct intel_engine_cs *engine)
+{
+ GEM_BUG_ON(execlists_active(&engine->execlists));
+
+ /*
+ * Poison residual state on resume, in case the suspend didn't!
+ *
+ * We have to assume that across suspend/resume (or other loss
+ * of control) that the contents of our pinned buffers has been
+ * lost, replaced by garbage. Since this doesn't always happen,
+ * let's poison such state so that we more quickly spot when
+ * we falsely assume it has been preserved.
+ */
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ memset(engine->status_page.addr, POISON_INUSE, PAGE_SIZE);
+
+ reset_csb_pointers(engine);
+
+ /*
+ * The kernel_context HWSP is stored in the status_page. As above,
+ * that may be lost on resume/initialisation, and so we need to
+ * reset the value in the HWSP.
+ */
+ sanitize_hwsp(engine);
+
+ /* And scrub the dirty cachelines for the HWSP */
+ clflush_cache_range(engine->status_page.addr, PAGE_SIZE);
+}
+
+static void enable_error_interrupt(struct intel_engine_cs *engine)
+{
+ u32 status;
+
+ engine->execlists.error_interrupt = 0;
+ ENGINE_WRITE(engine, RING_EMR, ~0u);
+ ENGINE_WRITE(engine, RING_EIR, ~0u); /* clear all existing errors */
+
+ status = ENGINE_READ(engine, RING_ESR);
+ if (unlikely(status)) {
+ drm_err(&engine->i915->drm,
+ "engine '%s' resumed still in error: %08x\n",
+ engine->name, status);
+ __intel_gt_reset(engine->gt, engine->mask);
+ }
+
+ /*
+ * On current gen8+, we have 2 signals to play with
+ *
+ * - I915_ERROR_INSTUCTION (bit 0)
+ *
+ * Generate an error if the command parser encounters an invalid
+ * instruction
+ *
+ * This is a fatal error.
+ *
+ * - CP_PRIV (bit 2)
+ *
+ * Generate an error on privilege violation (where the CP replaces
+ * the instruction with a no-op). This also fires for writes into
+ * read-only scratch pages.
+ *
+ * This is a non-fatal error, parsing continues.
+ *
+ * * there are a few others defined for odd HW that we do not use
+ *
+ * Since CP_PRIV fires for cases where we have chosen to ignore the
+ * error (as the HW is validating and suppressing the mistakes), we
+ * only unmask the instruction error bit.
+ */
+ ENGINE_WRITE(engine, RING_EMR, ~I915_ERROR_INSTRUCTION);
+}
+
+static void enable_execlists(struct intel_engine_cs *engine)
+{
+ u32 mode;
+
+ assert_forcewakes_active(engine->uncore, FORCEWAKE_ALL);
+
+ intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */
+
+ if (INTEL_GEN(engine->i915) >= 11)
+ mode = _MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE);
+ else
+ mode = _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE);
+ ENGINE_WRITE_FW(engine, RING_MODE_GEN7, mode);
+
+ ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
+
+ ENGINE_WRITE_FW(engine,
+ RING_HWS_PGA,
+ i915_ggtt_offset(engine->status_page.vma));
+ ENGINE_POSTING_READ(engine, RING_HWS_PGA);
+
+ enable_error_interrupt(engine);
+}
+
+static bool unexpected_starting_state(struct intel_engine_cs *engine)
+{
+ bool unexpected = false;
+
+ if (ENGINE_READ_FW(engine, RING_MI_MODE) & STOP_RING) {
+ drm_dbg(&engine->i915->drm,
+ "STOP_RING still set in RING_MI_MODE\n");
+ unexpected = true;
+ }
+
+ return unexpected;
+}
+
+static int execlists_resume(struct intel_engine_cs *engine)
+{
+ intel_mocs_init_engine(engine);
+
+ intel_breadcrumbs_reset(engine->breadcrumbs);
+
+ if (GEM_SHOW_DEBUG() && unexpected_starting_state(engine)) {
+ struct drm_printer p = drm_debug_printer(__func__);
+
+ intel_engine_dump(engine, &p, NULL);
+ }
+
+ enable_execlists(engine);
+
+ return 0;
+}
+
+static void execlists_reset_prepare(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+
+ ENGINE_TRACE(engine, "depth<-%d\n",
+ atomic_read(&execlists->tasklet.count));
+
+ /*
+ * Prevent request submission to the hardware until we have
+ * completed the reset in i915_gem_reset_finish(). If a request
+ * is completed by one engine, it may then queue a request
+ * to a second via its execlists->tasklet *just* as we are
+ * calling engine->resume() and also writing the ELSP.
+ * Turning off the execlists->tasklet until the reset is over
+ * prevents the race.
+ */
+ __tasklet_disable_sync_once(&execlists->tasklet);
+ GEM_BUG_ON(!reset_in_progress(execlists));
+
+ /*
+ * We stop engines, otherwise we might get failed reset and a
+ * dead gpu (on elk). Also as modern gpu as kbl can suffer
+ * from system hang if batchbuffer is progressing when
+ * the reset is issued, regardless of READY_TO_RESET ack.
+ * Thus assume it is best to stop engines on all gens
+ * where we have a gpu reset.
+ *
+ * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES)
+ *
+ * FIXME: Wa for more modern gens needs to be validated
+ */
+ ring_set_paused(engine, 1);
+ intel_engine_stop_cs(engine);
+
+ engine->execlists.reset_ccid = active_ccid(engine);
+}
+
+static struct i915_request **
+reset_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+
+ mb(); /* paranoia: read the CSB pointers from after the reset */
+ clflush(execlists->csb_write);
+ mb();
+
+ inactive = process_csb(engine, inactive); /* drain preemption events */
+
+ /* Following the reset, we need to reload the CSB read/write pointers */
+ reset_csb_pointers(engine);
+
+ return inactive;
+}
+
+static void
+execlists_reset_active(struct intel_engine_cs *engine, bool stalled)
+{
+ struct intel_context *ce;
+ struct i915_request *rq;
+ u32 head;
+
+ /*
+ * Save the currently executing context, even if we completed
+ * its request, it was still running at the time of the
+ * reset and will have been clobbered.
+ */
+ rq = active_context(engine, engine->execlists.reset_ccid);
+ if (!rq)
+ return;
+
+ ce = rq->context;
+ GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
+
+ if (__i915_request_is_complete(rq)) {
+ /* Idle context; tidy up the ring so we can restart afresh */
+ head = intel_ring_wrap(ce->ring, rq->tail);
+ goto out_replay;
+ }
+
+ /* We still have requests in-flight; the engine should be active */
+ GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
+
+ /* Context has requests still in-flight; it should not be idle! */
+ GEM_BUG_ON(i915_active_is_idle(&ce->active));
+
+ rq = active_request(ce->timeline, rq);
+ head = intel_ring_wrap(ce->ring, rq->head);
+ GEM_BUG_ON(head == ce->ring->tail);
+
+ /*
+ * If this request hasn't started yet, e.g. it is waiting on a
+ * semaphore, we need to avoid skipping the request or else we
+ * break the signaling chain. However, if the context is corrupt
+ * the request will not restart and we will be stuck with a wedged
+ * device. It is quite often the case that if we issue a reset
+ * while the GPU is loading the context image, that the context
+ * image becomes corrupt.
+ *
+ * Otherwise, if we have not started yet, the request should replay
+ * perfectly and we do not need to flag the result as being erroneous.
+ */
+ if (!__i915_request_has_started(rq))
+ goto out_replay;
+
+ /*
+ * If the request was innocent, we leave the request in the ELSP
+ * and will try to replay it on restarting. The context image may
+ * have been corrupted by the reset, in which case we may have
+ * to service a new GPU hang, but more likely we can continue on
+ * without impact.
+ *
+ * If the request was guilty, we presume the context is corrupt
+ * and have to at least restore the RING register in the context
+ * image back to the expected values to skip over the guilty request.
+ */
+ __i915_request_reset(rq, stalled);
+
+ /*
+ * We want a simple context + ring to execute the breadcrumb update.
+ * We cannot rely on the context being intact across the GPU hang,
+ * so clear it and rebuild just what we need for the breadcrumb.
+ * All pending requests for this context will be zapped, and any
+ * future request will be after userspace has had the opportunity
+ * to recreate its own state.
+ */
+out_replay:
+ ENGINE_TRACE(engine, "replay {head:%04x, tail:%04x}\n",
+ head, ce->ring->tail);
+ lrc_reset_regs(ce, engine);
+ ce->lrc.lrca = lrc_update_regs(ce, engine, head);
+}
+
+static void execlists_reset_csb(struct intel_engine_cs *engine, bool stalled)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct i915_request *post[2 * EXECLIST_MAX_PORTS];
+ struct i915_request **inactive;
+
+ rcu_read_lock();
+ inactive = reset_csb(engine, post);
+
+ execlists_reset_active(engine, true);
+
+ inactive = cancel_port_requests(execlists, inactive);
+ post_process_csb(post, inactive);
+ rcu_read_unlock();
+}
+
+static void execlists_reset_rewind(struct intel_engine_cs *engine, bool stalled)
+{
+ unsigned long flags;
+
+ ENGINE_TRACE(engine, "\n");
+
+ /* Process the csb, find the guilty context and throw away */
+ execlists_reset_csb(engine, stalled);
+
+ /* Push back any incomplete requests for replay after the reset. */
+ rcu_read_lock();
+ spin_lock_irqsave(&engine->active.lock, flags);
+ __unwind_incomplete_requests(engine);
+ spin_unlock_irqrestore(&engine->active.lock, flags);
+ rcu_read_unlock();
+}
+
+static void nop_submission_tasklet(unsigned long data)
+{
+ struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
+
+ /* The driver is wedged; don't process any more events. */
+ WRITE_ONCE(engine->execlists.queue_priority_hint, INT_MIN);
+}
+
+static void execlists_reset_cancel(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct i915_request *rq, *rn;
+ struct rb_node *rb;
+ unsigned long flags;
+
+ ENGINE_TRACE(engine, "\n");
+
+ /*
+ * Before we call engine->cancel_requests(), we should have exclusive
+ * access to the submission state. This is arranged for us by the
+ * caller disabling the interrupt generation, the tasklet and other
+ * threads that may then access the same state, giving us a free hand
+ * to reset state. However, we still need to let lockdep be aware that
+ * we know this state may be accessed in hardirq context, so we
+ * disable the irq around this manipulation and we want to keep
+ * the spinlock focused on its duties and not accidentally conflate
+ * coverage to the submission's irq state. (Similarly, although we
+ * shouldn't need to disable irq around the manipulation of the
+ * submission's irq state, we also wish to remind ourselves that
+ * it is irq state.)
+ */
+ execlists_reset_csb(engine, true);
+
+ rcu_read_lock();
+ spin_lock_irqsave(&engine->active.lock, flags);
+
+ /* Mark all executing requests as skipped. */
+ list_for_each_entry(rq, &engine->active.requests, sched.link)
+ i915_request_mark_eio(rq);
+ intel_engine_signal_breadcrumbs(engine);
+
+ /* Flush the queued requests to the timeline list (for retiring). */
+ while ((rb = rb_first_cached(&execlists->queue))) {
+ struct i915_priolist *p = to_priolist(rb);
+ int i;
+
+ priolist_for_each_request_consume(rq, rn, p, i) {
+ i915_request_mark_eio(rq);
+ __i915_request_submit(rq);
+ }
+
+ rb_erase_cached(&p->node, &execlists->queue);
+ i915_priolist_free(p);
+ }
+
+ /* On-hold requests will be flushed to timeline upon their release */
+ list_for_each_entry(rq, &engine->active.hold, sched.link)
+ i915_request_mark_eio(rq);
+
+ /* Cancel all attached virtual engines */
+ while ((rb = rb_first_cached(&execlists->virtual))) {
+ struct virtual_engine *ve =
+ rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
+
+ rb_erase_cached(rb, &execlists->virtual);
+ RB_CLEAR_NODE(rb);
+
+ spin_lock(&ve->base.active.lock);
+ rq = fetch_and_zero(&ve->request);
+ if (rq) {
+ i915_request_mark_eio(rq);
+
+ rq->engine = engine;
+ __i915_request_submit(rq);
+ i915_request_put(rq);
+
+ ve->base.execlists.queue_priority_hint = INT_MIN;
+ }
+ spin_unlock(&ve->base.active.lock);
+ }
+
+ /* Remaining _unready_ requests will be nop'ed when submitted */
+
+ execlists->queue_priority_hint = INT_MIN;
+ execlists->queue = RB_ROOT_CACHED;
+
+ GEM_BUG_ON(__tasklet_is_enabled(&execlists->tasklet));
+ execlists->tasklet.func = nop_submission_tasklet;
+
+ spin_unlock_irqrestore(&engine->active.lock, flags);
+ rcu_read_unlock();
+}
+
+static void execlists_reset_finish(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+
+ /*
+ * After a GPU reset, we may have requests to replay. Do so now while
+ * we still have the forcewake to be sure that the GPU is not allowed
+ * to sleep before we restart and reload a context.
+ *
+ * If the GPU reset fails, the engine may still be alive with requests
+ * inflight. We expect those to complete, or for the device to be
+ * reset as the next level of recovery, and as a final resort we
+ * will declare the device wedged.
+ */
+ GEM_BUG_ON(!reset_in_progress(execlists));
+
+ /* And kick in case we missed a new request submission. */
+ if (__tasklet_enable(&execlists->tasklet))
+ __execlists_kick(execlists);
+
+ ENGINE_TRACE(engine, "depth->%d\n",
+ atomic_read(&execlists->tasklet.count));
+}
+
+static void gen8_logical_ring_enable_irq(struct intel_engine_cs *engine)
+{
+ ENGINE_WRITE(engine, RING_IMR,
+ ~(engine->irq_enable_mask | engine->irq_keep_mask));
+ ENGINE_POSTING_READ(engine, RING_IMR);
+}
+
+static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine)
+{
+ ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask);
+}
+
+static void execlists_park(struct intel_engine_cs *engine)
+{
+ cancel_timer(&engine->execlists.timer);
+ cancel_timer(&engine->execlists.preempt);
+}
+
+static bool can_preempt(struct intel_engine_cs *engine)
+{
+ if (INTEL_GEN(engine->i915) > 8)
+ return true;
+
+ /* GPGPU on bdw requires extra w/a; not implemented */
+ return engine->class != RENDER_CLASS;
+}
+
+static void execlists_set_default_submission(struct intel_engine_cs *engine)
+{
+ engine->submit_request = execlists_submit_request;
+ engine->schedule = i915_schedule;
+ engine->execlists.tasklet.func = execlists_submission_tasklet;
+
+ engine->reset.prepare = execlists_reset_prepare;
+ engine->reset.rewind = execlists_reset_rewind;
+ engine->reset.cancel = execlists_reset_cancel;
+ engine->reset.finish = execlists_reset_finish;
+
+ engine->park = execlists_park;
+ engine->unpark = NULL;
+
+ engine->flags |= I915_ENGINE_SUPPORTS_STATS;
+ if (!intel_vgpu_active(engine->i915)) {
+ engine->flags |= I915_ENGINE_HAS_SEMAPHORES;
+ if (can_preempt(engine)) {
+ engine->flags |= I915_ENGINE_HAS_PREEMPTION;
+ if (IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
+ engine->flags |= I915_ENGINE_HAS_TIMESLICES;
+ }
+ }
+
+ if (intel_engine_has_preemption(engine))
+ engine->emit_bb_start = gen8_emit_bb_start;
+ else
+ engine->emit_bb_start = gen8_emit_bb_start_noarb;
+}
+
+static void execlists_shutdown(struct intel_engine_cs *engine)
+{
+ /* Synchronise with residual timers and any softirq they raise */
+ del_timer_sync(&engine->execlists.timer);
+ del_timer_sync(&engine->execlists.preempt);
+ tasklet_kill(&engine->execlists.tasklet);
+}
+
+static void execlists_release(struct intel_engine_cs *engine)
+{
+ engine->sanitize = NULL; /* no longer in control, nothing to sanitize */
+
+ execlists_shutdown(engine);
+
+ intel_engine_cleanup_common(engine);
+ lrc_fini_wa_ctx(engine);
+}
+
+static void
+logical_ring_default_vfuncs(struct intel_engine_cs *engine)
+{
+ /* Default vfuncs which can be overriden by each engine. */
+
+ engine->resume = execlists_resume;
+
+ engine->cops = &execlists_context_ops;
+ engine->request_alloc = execlists_request_alloc;
+
+ engine->emit_flush = gen8_emit_flush_xcs;
+ engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb;
+ engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_xcs;
+ if (INTEL_GEN(engine->i915) >= 12) {
+ engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_xcs;
+ engine->emit_flush = gen12_emit_flush_xcs;
+ }
+ engine->set_default_submission = execlists_set_default_submission;
+
+ if (INTEL_GEN(engine->i915) < 11) {
+ engine->irq_enable = gen8_logical_ring_enable_irq;
+ engine->irq_disable = gen8_logical_ring_disable_irq;
+ } else {
+ /*
+ * TODO: On Gen11 interrupt masks need to be clear
+ * to allow C6 entry. Keep interrupts enabled at
+ * and take the hit of generating extra interrupts
+ * until a more refined solution exists.
+ */
+ }
+}
+
+static void logical_ring_default_irqs(struct intel_engine_cs *engine)
+{
+ unsigned int shift = 0;
+
+ if (INTEL_GEN(engine->i915) < 11) {
+ const u8 irq_shifts[] = {
+ [RCS0] = GEN8_RCS_IRQ_SHIFT,
+ [BCS0] = GEN8_BCS_IRQ_SHIFT,
+ [VCS0] = GEN8_VCS0_IRQ_SHIFT,
+ [VCS1] = GEN8_VCS1_IRQ_SHIFT,
+ [VECS0] = GEN8_VECS_IRQ_SHIFT,
+ };
+
+ shift = irq_shifts[engine->id];
+ }
+
+ engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
+ engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
+ engine->irq_keep_mask |= GT_CS_MASTER_ERROR_INTERRUPT << shift;
+ engine->irq_keep_mask |= GT_WAIT_SEMAPHORE_INTERRUPT << shift;
+}
+
+static void rcs_submission_override(struct intel_engine_cs *engine)
+{
+ switch (INTEL_GEN(engine->i915)) {
+ case 12:
+ engine->emit_flush = gen12_emit_flush_rcs;
+ engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_rcs;
+ break;
+ case 11:
+ engine->emit_flush = gen11_emit_flush_rcs;
+ engine->emit_fini_breadcrumb = gen11_emit_fini_breadcrumb_rcs;
+ break;
+ default:
+ engine->emit_flush = gen8_emit_flush_rcs;
+ engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_rcs;
+ break;
+ }
+}
+
+int intel_execlists_submission_setup(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct drm_i915_private *i915 = engine->i915;
+ struct intel_uncore *uncore = engine->uncore;
+ u32 base = engine->mmio_base;
+
+ tasklet_init(&engine->execlists.tasklet,
+ execlists_submission_tasklet, (unsigned long)engine);
+ timer_setup(&engine->execlists.timer, execlists_timeslice, 0);
+ timer_setup(&engine->execlists.preempt, execlists_preempt, 0);
+
+ logical_ring_default_vfuncs(engine);
+ logical_ring_default_irqs(engine);
+
+ if (engine->class == RENDER_CLASS)
+ rcs_submission_override(engine);
+
+ lrc_init_wa_ctx(engine);
+
+ if (HAS_LOGICAL_RING_ELSQ(i915)) {
+ execlists->submit_reg = uncore->regs +
+ i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(base));
+ execlists->ctrl_reg = uncore->regs +
+ i915_mmio_reg_offset(RING_EXECLIST_CONTROL(base));
+ } else {
+ execlists->submit_reg = uncore->regs +
+ i915_mmio_reg_offset(RING_ELSP(base));
+ }
+
+ execlists->csb_status =
+ (u64 *)&engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
+
+ execlists->csb_write =
+ &engine->status_page.addr[intel_hws_csb_write_index(i915)];
+
+ if (INTEL_GEN(i915) < 11)
+ execlists->csb_size = GEN8_CSB_ENTRIES;
+ else
+ execlists->csb_size = GEN11_CSB_ENTRIES;
+
+ engine->context_tag = GENMASK(BITS_PER_LONG - 2, 0);
+ if (INTEL_GEN(engine->i915) >= 11) {
+ execlists->ccid |= engine->instance << (GEN11_ENGINE_INSTANCE_SHIFT - 32);
+ execlists->ccid |= engine->class << (GEN11_ENGINE_CLASS_SHIFT - 32);
+ }
+
+ /* Finally, take ownership and responsibility for cleanup! */
+ engine->sanitize = execlists_sanitize;
+ engine->release = execlists_release;
+
+ return 0;
+}
+
+static struct list_head *virtual_queue(struct virtual_engine *ve)
+{
+ return &ve->base.execlists.default_priolist.requests[0];
+}
+
+static void rcu_virtual_context_destroy(struct work_struct *wrk)
+{
+ struct virtual_engine *ve =
+ container_of(wrk, typeof(*ve), rcu.work);
+ unsigned int n;
+
+ GEM_BUG_ON(ve->context.inflight);
+
+ /* Preempt-to-busy may leave a stale request behind. */
+ if (unlikely(ve->request)) {
+ struct i915_request *old;
+
+ spin_lock_irq(&ve->base.active.lock);
+
+ old = fetch_and_zero(&ve->request);
+ if (old) {
+ GEM_BUG_ON(!__i915_request_is_complete(old));
+ __i915_request_submit(old);
+ i915_request_put(old);
+ }
+
+ spin_unlock_irq(&ve->base.active.lock);
+ }
+
+ /*
+ * Flush the tasklet in case it is still running on another core.
+ *
+ * This needs to be done before we remove ourselves from the siblings'
+ * rbtrees as in the case it is running in parallel, it may reinsert
+ * the rb_node into a sibling.
+ */
+ tasklet_kill(&ve->base.execlists.tasklet);
+
+ /* Decouple ourselves from the siblings, no more access allowed. */
+ for (n = 0; n < ve->num_siblings; n++) {
+ struct intel_engine_cs *sibling = ve->siblings[n];
+ struct rb_node *node = &ve->nodes[sibling->id].rb;
+
+ if (RB_EMPTY_NODE(node))
+ continue;
+
+ spin_lock_irq(&sibling->active.lock);
+
+ /* Detachment is lazily performed in the execlists tasklet */
+ if (!RB_EMPTY_NODE(node))
+ rb_erase_cached(node, &sibling->execlists.virtual);
+
+ spin_unlock_irq(&sibling->active.lock);
+ }
+ GEM_BUG_ON(__tasklet_is_scheduled(&ve->base.execlists.tasklet));
+ GEM_BUG_ON(!list_empty(virtual_queue(ve)));
+
+ lrc_fini(&ve->context);
+ intel_context_fini(&ve->context);
+
+ intel_breadcrumbs_free(ve->base.breadcrumbs);
+ intel_engine_free_request_pool(&ve->base);
+
+ kfree(ve->bonds);
+ kfree(ve);
+}
+
+static void virtual_context_destroy(struct kref *kref)
+{
+ struct virtual_engine *ve =
+ container_of(kref, typeof(*ve), context.ref);
+
+ GEM_BUG_ON(!list_empty(&ve->context.signals));
+
+ /*
+ * When destroying the virtual engine, we have to be aware that
+ * it may still be in use from an hardirq/softirq context causing
+ * the resubmission of a completed request (background completion
+ * due to preempt-to-busy). Before we can free the engine, we need
+ * to flush the submission code and tasklets that are still potentially
+ * accessing the engine. Flushing the tasklets requires process context,
+ * and since we can guard the resubmit onto the engine with an RCU read
+ * lock, we can delegate the free of the engine to an RCU worker.
+ */
+ INIT_RCU_WORK(&ve->rcu, rcu_virtual_context_destroy);
+ queue_rcu_work(system_wq, &ve->rcu);
+}
+
+static void virtual_engine_initial_hint(struct virtual_engine *ve)
+{
+ int swp;
+
+ /*
+ * Pick a random sibling on starting to help spread the load around.
+ *
+ * New contexts are typically created with exactly the same order
+ * of siblings, and often started in batches. Due to the way we iterate
+ * the array of sibling when submitting requests, sibling[0] is
+ * prioritised for dequeuing. If we make sure that sibling[0] is fairly
+ * randomised across the system, we also help spread the load by the
+ * first engine we inspect being different each time.
+ *
+ * NB This does not force us to execute on this engine, it will just
+ * typically be the first we inspect for submission.
+ */
+ swp = prandom_u32_max(ve->num_siblings);
+ if (swp)
+ swap(ve->siblings[swp], ve->siblings[0]);
+}
+
+static int virtual_context_alloc(struct intel_context *ce)
+{
+ struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
+
+ return lrc_alloc(ce, ve->siblings[0]);
+}
+
+static int virtual_context_pre_pin(struct intel_context *ce,
+ struct i915_gem_ww_ctx *ww,
+ void **vaddr)
+{
+ struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
+
+ /* Note: we must use a real engine class for setting up reg state */
+ return lrc_pre_pin(ce, ve->siblings[0], ww, vaddr);
+}
+
+static int virtual_context_pin(struct intel_context *ce, void *vaddr)
+{
+ struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
+
+ return lrc_pin(ce, ve->siblings[0], vaddr);
+}
+
+static void virtual_context_enter(struct intel_context *ce)
+{
+ struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
+ unsigned int n;
+
+ for (n = 0; n < ve->num_siblings; n++)
+ intel_engine_pm_get(ve->siblings[n]);
+
+ intel_timeline_enter(ce->timeline);
+}
+
+static void virtual_context_exit(struct intel_context *ce)
+{
+ struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
+ unsigned int n;
+
+ intel_timeline_exit(ce->timeline);
+
+ for (n = 0; n < ve->num_siblings; n++)
+ intel_engine_pm_put(ve->siblings[n]);
+}
+
+static const struct intel_context_ops virtual_context_ops = {
+ .flags = COPS_HAS_INFLIGHT,
+
+ .alloc = virtual_context_alloc,
+
+ .pre_pin = virtual_context_pre_pin,
+ .pin = virtual_context_pin,
+ .unpin = lrc_unpin,
+ .post_unpin = lrc_post_unpin,
+
+ .enter = virtual_context_enter,
+ .exit = virtual_context_exit,
+
+ .destroy = virtual_context_destroy,
+};
+
+static intel_engine_mask_t virtual_submission_mask(struct virtual_engine *ve)
+{
+ struct i915_request *rq;
+ intel_engine_mask_t mask;
+
+ rq = READ_ONCE(ve->request);
+ if (!rq)
+ return 0;
+
+ /* The rq is ready for submission; rq->execution_mask is now stable. */
+ mask = rq->execution_mask;
+ if (unlikely(!mask)) {
+ /* Invalid selection, submit to a random engine in error */
+ i915_request_set_error_once(rq, -ENODEV);
+ mask = ve->siblings[0]->mask;
+ }
+
+ ENGINE_TRACE(&ve->base, "rq=%llx:%lld, mask=%x, prio=%d\n",
+ rq->fence.context, rq->fence.seqno,
+ mask, ve->base.execlists.queue_priority_hint);
+
+ return mask;
+}
+
+static void virtual_submission_tasklet(unsigned long data)
+{
+ struct virtual_engine * const ve = (struct virtual_engine *)data;
+ const int prio = READ_ONCE(ve->base.execlists.queue_priority_hint);
+ intel_engine_mask_t mask;
+ unsigned int n;
+
+ rcu_read_lock();
+ mask = virtual_submission_mask(ve);
+ rcu_read_unlock();
+ if (unlikely(!mask))
+ return;
+
+ for (n = 0; n < ve->num_siblings; n++) {
+ struct intel_engine_cs *sibling = READ_ONCE(ve->siblings[n]);
+ struct ve_node * const node = &ve->nodes[sibling->id];
+ struct rb_node **parent, *rb;
+ bool first;
+
+ if (!READ_ONCE(ve->request))
+ break; /* already handled by a sibling's tasklet */
+
+ spin_lock_irq(&sibling->active.lock);
+
+ if (unlikely(!(mask & sibling->mask))) {
+ if (!RB_EMPTY_NODE(&node->rb)) {
+ rb_erase_cached(&node->rb,
+ &sibling->execlists.virtual);
+ RB_CLEAR_NODE(&node->rb);
+ }
+
+ goto unlock_engine;
+ }
+
+ if (unlikely(!RB_EMPTY_NODE(&node->rb))) {
+ /*
+ * Cheat and avoid rebalancing the tree if we can
+ * reuse this node in situ.
+ */
+ first = rb_first_cached(&sibling->execlists.virtual) ==
+ &node->rb;
+ if (prio == node->prio || (prio > node->prio && first))
+ goto submit_engine;
+
+ rb_erase_cached(&node->rb, &sibling->execlists.virtual);
+ }
+
+ rb = NULL;
+ first = true;
+ parent = &sibling->execlists.virtual.rb_root.rb_node;
+ while (*parent) {
+ struct ve_node *other;
+
+ rb = *parent;
+ other = rb_entry(rb, typeof(*other), rb);
+ if (prio > other->prio) {
+ parent = &rb->rb_left;
+ } else {
+ parent = &rb->rb_right;
+ first = false;
+ }
+ }
+
+ rb_link_node(&node->rb, rb, parent);
+ rb_insert_color_cached(&node->rb,
+ &sibling->execlists.virtual,
+ first);
+
+submit_engine:
+ GEM_BUG_ON(RB_EMPTY_NODE(&node->rb));
+ node->prio = prio;
+ if (first && prio > sibling->execlists.queue_priority_hint)
+ tasklet_hi_schedule(&sibling->execlists.tasklet);
+
+unlock_engine:
+ spin_unlock_irq(&sibling->active.lock);
+
+ if (intel_context_inflight(&ve->context))
+ break;
+ }
+}
+
+static void virtual_submit_request(struct i915_request *rq)
+{
+ struct virtual_engine *ve = to_virtual_engine(rq->engine);
+ unsigned long flags;
+
+ ENGINE_TRACE(&ve->base, "rq=%llx:%lld\n",
+ rq->fence.context,
+ rq->fence.seqno);
+
+ GEM_BUG_ON(ve->base.submit_request != virtual_submit_request);
+
+ spin_lock_irqsave(&ve->base.active.lock, flags);
+
+ /* By the time we resubmit a request, it may be completed */
+ if (__i915_request_is_complete(rq)) {
+ __i915_request_submit(rq);
+ goto unlock;
+ }
+
+ if (ve->request) { /* background completion from preempt-to-busy */
+ GEM_BUG_ON(!__i915_request_is_complete(ve->request));
+ __i915_request_submit(ve->request);
+ i915_request_put(ve->request);
+ }
+
+ ve->base.execlists.queue_priority_hint = rq_prio(rq);
+ ve->request = i915_request_get(rq);
+
+ GEM_BUG_ON(!list_empty(virtual_queue(ve)));
+ list_move_tail(&rq->sched.link, virtual_queue(ve));
+
+ tasklet_hi_schedule(&ve->base.execlists.tasklet);
+
+unlock:
+ spin_unlock_irqrestore(&ve->base.active.lock, flags);
+}
+
+static struct ve_bond *
+virtual_find_bond(struct virtual_engine *ve,
+ const struct intel_engine_cs *master)
+{
+ int i;
+
+ for (i = 0; i < ve->num_bonds; i++) {
+ if (ve->bonds[i].master == master)
+ return &ve->bonds[i];
+ }
+
+ return NULL;
+}
+
+static void
+virtual_bond_execute(struct i915_request *rq, struct dma_fence *signal)
+{
+ struct virtual_engine *ve = to_virtual_engine(rq->engine);
+ intel_engine_mask_t allowed, exec;
+ struct ve_bond *bond;
+
+ allowed = ~to_request(signal)->engine->mask;
+
+ bond = virtual_find_bond(ve, to_request(signal)->engine);
+ if (bond)
+ allowed &= bond->sibling_mask;
+
+ /* Restrict the bonded request to run on only the available engines */
+ exec = READ_ONCE(rq->execution_mask);
+ while (!try_cmpxchg(&rq->execution_mask, &exec, exec & allowed))
+ ;
+
+ /* Prevent the master from being re-run on the bonded engines */
+ to_request(signal)->execution_mask &= ~allowed;
+}
+
+struct intel_context *
+intel_execlists_create_virtual(struct intel_engine_cs **siblings,
+ unsigned int count)
+{
+ struct virtual_engine *ve;
+ unsigned int n;
+ int err;
+
+ if (count == 0)
+ return ERR_PTR(-EINVAL);
+
+ if (count == 1)
+ return intel_context_create(siblings[0]);
+
+ ve = kzalloc(struct_size(ve, siblings, count), GFP_KERNEL);
+ if (!ve)
+ return ERR_PTR(-ENOMEM);
+
+ ve->base.i915 = siblings[0]->i915;
+ ve->base.gt = siblings[0]->gt;
+ ve->base.uncore = siblings[0]->uncore;
+ ve->base.id = -1;
+
+ ve->base.class = OTHER_CLASS;
+ ve->base.uabi_class = I915_ENGINE_CLASS_INVALID;
+ ve->base.instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
+ ve->base.uabi_instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
+
+ /*
+ * The decision on whether to submit a request using semaphores
+ * depends on the saturated state of the engine. We only compute
+ * this during HW submission of the request, and we need for this
+ * state to be globally applied to all requests being submitted
+ * to this engine. Virtual engines encompass more than one physical
+ * engine and so we cannot accurately tell in advance if one of those
+ * engines is already saturated and so cannot afford to use a semaphore
+ * and be pessimized in priority for doing so -- if we are the only
+ * context using semaphores after all other clients have stopped, we
+ * will be starved on the saturated system. Such a global switch for
+ * semaphores is less than ideal, but alas is the current compromise.
+ */
+ ve->base.saturated = ALL_ENGINES;
+
+ snprintf(ve->base.name, sizeof(ve->base.name), "virtual");
+
+ intel_engine_init_active(&ve->base, ENGINE_VIRTUAL);
+ intel_engine_init_execlists(&ve->base);
+
+ ve->base.cops = &virtual_context_ops;
+ ve->base.request_alloc = execlists_request_alloc;
+
+ ve->base.schedule = i915_schedule;
+ ve->base.submit_request = virtual_submit_request;
+ ve->base.bond_execute = virtual_bond_execute;
+
+ INIT_LIST_HEAD(virtual_queue(ve));
+ ve->base.execlists.queue_priority_hint = INT_MIN;
+ tasklet_init(&ve->base.execlists.tasklet,
+ virtual_submission_tasklet,
+ (unsigned long)ve);
+
+ intel_context_init(&ve->context, &ve->base);
+
+ ve->base.breadcrumbs = intel_breadcrumbs_create(NULL);
+ if (!ve->base.breadcrumbs) {
+ err = -ENOMEM;
+ goto err_put;
+ }
+
+ for (n = 0; n < count; n++) {
+ struct intel_engine_cs *sibling = siblings[n];
+
+ GEM_BUG_ON(!is_power_of_2(sibling->mask));
+ if (sibling->mask & ve->base.mask) {
+ DRM_DEBUG("duplicate %s entry in load balancer\n",
+ sibling->name);
+ err = -EINVAL;
+ goto err_put;
+ }
+
+ /*
+ * The virtual engine implementation is tightly coupled to
+ * the execlists backend -- we push out request directly
+ * into a tree inside each physical engine. We could support
+ * layering if we handle cloning of the requests and
+ * submitting a copy into each backend.
+ */
+ if (sibling->execlists.tasklet.func !=
+ execlists_submission_tasklet) {
+ err = -ENODEV;
+ goto err_put;
+ }
+
+ GEM_BUG_ON(RB_EMPTY_NODE(&ve->nodes[sibling->id].rb));
+ RB_CLEAR_NODE(&ve->nodes[sibling->id].rb);
+
+ ve->siblings[ve->num_siblings++] = sibling;
+ ve->base.mask |= sibling->mask;
+
+ /*
+ * All physical engines must be compatible for their emission
+ * functions (as we build the instructions during request
+ * construction and do not alter them before submission
+ * on the physical engine). We use the engine class as a guide
+ * here, although that could be refined.
+ */
+ if (ve->base.class != OTHER_CLASS) {
+ if (ve->base.class != sibling->class) {
+ DRM_DEBUG("invalid mixing of engine class, sibling %d, already %d\n",
+ sibling->class, ve->base.class);
+ err = -EINVAL;
+ goto err_put;
+ }
+ continue;
+ }
+
+ ve->base.class = sibling->class;
+ ve->base.uabi_class = sibling->uabi_class;
+ snprintf(ve->base.name, sizeof(ve->base.name),
+ "v%dx%d", ve->base.class, count);
+ ve->base.context_size = sibling->context_size;
+
+ ve->base.emit_bb_start = sibling->emit_bb_start;
+ ve->base.emit_flush = sibling->emit_flush;
+ ve->base.emit_init_breadcrumb = sibling->emit_init_breadcrumb;
+ ve->base.emit_fini_breadcrumb = sibling->emit_fini_breadcrumb;
+ ve->base.emit_fini_breadcrumb_dw =
+ sibling->emit_fini_breadcrumb_dw;
+
+ ve->base.flags = sibling->flags;
+ }
+
+ ve->base.flags |= I915_ENGINE_IS_VIRTUAL;
+
+ virtual_engine_initial_hint(ve);
+ return &ve->context;
+
+err_put:
+ intel_context_put(&ve->context);
+ return ERR_PTR(err);
+}
+
+struct intel_context *
+intel_execlists_clone_virtual(struct intel_engine_cs *src)
+{
+ struct virtual_engine *se = to_virtual_engine(src);
+ struct intel_context *dst;
+
+ dst = intel_execlists_create_virtual(se->siblings,
+ se->num_siblings);
+ if (IS_ERR(dst))
+ return dst;
+
+ if (se->num_bonds) {
+ struct virtual_engine *de = to_virtual_engine(dst->engine);
+
+ de->bonds = kmemdup(se->bonds,
+ sizeof(*se->bonds) * se->num_bonds,
+ GFP_KERNEL);
+ if (!de->bonds) {
+ intel_context_put(dst);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ de->num_bonds = se->num_bonds;
+ }
+
+ return dst;
+}
+
+int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine,
+ const struct intel_engine_cs *master,
+ const struct intel_engine_cs *sibling)
+{
+ struct virtual_engine *ve = to_virtual_engine(engine);
+ struct ve_bond *bond;
+ int n;
+
+ /* Sanity check the sibling is part of the virtual engine */
+ for (n = 0; n < ve->num_siblings; n++)
+ if (sibling == ve->siblings[n])
+ break;
+ if (n == ve->num_siblings)
+ return -EINVAL;
+
+ bond = virtual_find_bond(ve, master);
+ if (bond) {
+ bond->sibling_mask |= sibling->mask;
+ return 0;
+ }
+
+ bond = krealloc(ve->bonds,
+ sizeof(*bond) * (ve->num_bonds + 1),
+ GFP_KERNEL);
+ if (!bond)
+ return -ENOMEM;
+
+ bond[ve->num_bonds].master = master;
+ bond[ve->num_bonds].sibling_mask = sibling->mask;
+
+ ve->bonds = bond;
+ ve->num_bonds++;
+
+ return 0;
+}
+
+void intel_execlists_show_requests(struct intel_engine_cs *engine,
+ struct drm_printer *m,
+ void (*show_request)(struct drm_printer *m,
+ const struct i915_request *rq,
+ const char *prefix,
+ int indent),
+ unsigned int max)
+{
+ const struct intel_engine_execlists *execlists = &engine->execlists;
+ struct i915_request *rq, *last;
+ unsigned long flags;
+ unsigned int count;
+ struct rb_node *rb;
+
+ spin_lock_irqsave(&engine->active.lock, flags);
+
+ last = NULL;
+ count = 0;
+ list_for_each_entry(rq, &engine->active.requests, sched.link) {
+ if (count++ < max - 1)
+ show_request(m, rq, "\t\t", 0);
+ else
+ last = rq;
+ }
+ if (last) {
+ if (count > max) {
+ drm_printf(m,
+ "\t\t...skipping %d executing requests...\n",
+ count - max);
+ }
+ show_request(m, last, "\t\t", 0);
+ }
+
+ if (execlists->queue_priority_hint != INT_MIN)
+ drm_printf(m, "\t\tQueue priority hint: %d\n",
+ READ_ONCE(execlists->queue_priority_hint));
+
+ last = NULL;
+ count = 0;
+ for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
+ struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
+ int i;
+
+ priolist_for_each_request(rq, p, i) {
+ if (count++ < max - 1)
+ show_request(m, rq, "\t\t", 0);
+ else
+ last = rq;
+ }
+ }
+ if (last) {
+ if (count > max) {
+ drm_printf(m,
+ "\t\t...skipping %d queued requests...\n",
+ count - max);
+ }
+ show_request(m, last, "\t\t", 0);
+ }
+
+ last = NULL;
+ count = 0;
+ for (rb = rb_first_cached(&execlists->virtual); rb; rb = rb_next(rb)) {
+ struct virtual_engine *ve =
+ rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
+ struct i915_request *rq = READ_ONCE(ve->request);
+
+ if (rq) {
+ if (count++ < max - 1)
+ show_request(m, rq, "\t\t", 0);
+ else
+ last = rq;
+ }
+ }
+ if (last) {
+ if (count > max) {
+ drm_printf(m,
+ "\t\t...skipping %d virtual requests...\n",
+ count - max);
+ }
+ show_request(m, last, "\t\t", 0);
+ }
+
+ spin_unlock_irqrestore(&engine->active.lock, flags);
+}
+
+bool
+intel_engine_in_execlists_submission_mode(const struct intel_engine_cs *engine)
+{
+ return engine->set_default_submission ==
+ execlists_set_default_submission;
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_execlists.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.h b/drivers/gpu/drm/i915/gt/intel_execlists_submission.h
new file mode 100644
index 000000000000..a8fd7adefd82
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2014 Intel Corporation
+ */
+
+#ifndef __INTEL_EXECLISTS_SUBMISSION_H__
+#define __INTEL_EXECLISTS_SUBMISSION_H__
+
+#include <linux/types.h>
+
+struct drm_printer;
+
+struct i915_request;
+struct intel_context;
+struct intel_engine_cs;
+
+enum {
+ INTEL_CONTEXT_SCHEDULE_IN = 0,
+ INTEL_CONTEXT_SCHEDULE_OUT,
+ INTEL_CONTEXT_SCHEDULE_PREEMPTED,
+};
+
+int intel_execlists_submission_setup(struct intel_engine_cs *engine);
+
+void intel_execlists_show_requests(struct intel_engine_cs *engine,
+ struct drm_printer *m,
+ void (*show_request)(struct drm_printer *m,
+ const struct i915_request *rq,
+ const char *prefix,
+ int indent),
+ unsigned int max);
+
+struct intel_context *
+intel_execlists_create_virtual(struct intel_engine_cs **siblings,
+ unsigned int count);
+
+struct intel_context *
+intel_execlists_clone_virtual(struct intel_engine_cs *src);
+
+int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine,
+ const struct intel_engine_cs *master,
+ const struct intel_engine_cs *sibling);
+
+bool
+intel_engine_in_execlists_submission_mode(const struct intel_engine_cs *engine);
+
+#endif /* __INTEL_EXECLISTS_SUBMISSION_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index cf94525be2c1..700588bc9d57 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -101,7 +101,16 @@ static bool needs_idle_maps(struct drm_i915_private *i915)
* Query intel_iommu to see if we need the workaround. Presumably that
* was loaded first.
*/
- return IS_GEN(i915, 5) && IS_MOBILE(i915) && intel_vtd_active();
+ if (!intel_vtd_active())
+ return false;
+
+ if (IS_GEN(i915, 5) && IS_MOBILE(i915))
+ return true;
+
+ if (IS_GEN(i915, 12))
+ return true; /* XXX DMAR fault reason 7 */
+
+ return false;
}
void i915_ggtt_suspend(struct i915_ggtt *ggtt)
@@ -526,16 +535,39 @@ static int init_ggtt(struct i915_ggtt *ggtt)
mutex_init(&ggtt->error_mutex);
if (ggtt->mappable_end) {
- /* Reserve a mappable slot for our lockless error capture */
- ret = drm_mm_insert_node_in_range(&ggtt->vm.mm,
- &ggtt->error_capture,
- PAGE_SIZE, 0,
- I915_COLOR_UNEVICTABLE,
- 0, ggtt->mappable_end,
- DRM_MM_INSERT_LOW);
- if (ret)
- return ret;
+ /*
+ * Reserve a mappable slot for our lockless error capture.
+ *
+ * We strongly prefer taking address 0x0 in order to protect
+ * other critical buffers against accidental overwrites,
+ * as writing to address 0 is a very common mistake.
+ *
+ * Since 0 may already be in use by the system (e.g. the BIOS
+ * framebuffer), we let the reservation fail quietly and hope
+ * 0 remains reserved always.
+ *
+ * If we fail to reserve 0, and then fail to find any space
+ * for an error-capture, remain silent. We can afford not
+ * to reserve an error_capture node as we have fallback
+ * paths, and we trust that 0 will remain reserved. However,
+ * the only likely reason for failure to insert is a driver
+ * bug, which we expect to cause other failures...
+ */
+ ggtt->error_capture.size = I915_GTT_PAGE_SIZE;
+ ggtt->error_capture.color = I915_COLOR_UNEVICTABLE;
+ if (drm_mm_reserve_node(&ggtt->vm.mm, &ggtt->error_capture))
+ drm_mm_insert_node_in_range(&ggtt->vm.mm,
+ &ggtt->error_capture,
+ ggtt->error_capture.size, 0,
+ ggtt->error_capture.color,
+ 0, ggtt->mappable_end,
+ DRM_MM_INSERT_LOW);
}
+ if (drm_mm_node_allocated(&ggtt->error_capture))
+ drm_dbg(&ggtt->vm.i915->drm,
+ "Reserved GGTT:[%llx, %llx] for use by error capture\n",
+ ggtt->error_capture.start,
+ ggtt->error_capture.start + ggtt->error_capture.size);
/*
* The upper portion of the GuC address space has a sizeable hole
@@ -548,9 +580,9 @@ static int init_ggtt(struct i915_ggtt *ggtt)
/* Clear any non-preallocated blocks */
drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) {
- drm_dbg_kms(&ggtt->vm.i915->drm,
- "clearing unused GTT space: [%lx, %lx]\n",
- hole_start, hole_end);
+ drm_dbg(&ggtt->vm.i915->drm,
+ "clearing unused GTT space: [%lx, %lx]\n",
+ hole_start, hole_end);
ggtt->vm.clear_range(&ggtt->vm, hole_start,
hole_end - hole_start);
}
@@ -1050,7 +1082,12 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.alloc_pt_dma = alloc_pt_dma;
- ggtt->do_idle_maps = needs_idle_maps(i915);
+ if (needs_idle_maps(i915)) {
+ drm_notice(&i915->drm,
+ "Flushing DMA requests before IOMMU unmaps; performance may be degraded\n");
+ ggtt->do_idle_maps = true;
+ }
+
ggtt->vm.insert_page = i915_ggtt_insert_page;
ggtt->vm.insert_entries = i915_ggtt_insert_entries;
ggtt->vm.clear_range = i915_ggtt_clear_range;
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
index 7fb36b12fe7a..a357bb431815 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
@@ -320,13 +320,31 @@ void i915_vma_revoke_fence(struct i915_vma *vma)
fence_write(fence);
}
+static bool fence_is_active(const struct i915_fence_reg *fence)
+{
+ return fence->vma && i915_vma_is_active(fence->vma);
+}
+
static struct i915_fence_reg *fence_find(struct i915_ggtt *ggtt)
{
- struct i915_fence_reg *fence;
+ struct i915_fence_reg *active = NULL;
+ struct i915_fence_reg *fence, *fn;
- list_for_each_entry(fence, &ggtt->fence_list, link) {
+ list_for_each_entry_safe(fence, fn, &ggtt->fence_list, link) {
GEM_BUG_ON(fence->vma && fence->vma->fence != fence);
+ if (fence == active) /* now seen this fence twice */
+ active = ERR_PTR(-EAGAIN);
+
+ /* Prefer idle fences so we do not have to wait on the GPU */
+ if (active != ERR_PTR(-EAGAIN) && fence_is_active(fence)) {
+ if (!active)
+ active = fence;
+
+ list_move_tail(&fence->link, &ggtt->fence_list);
+ continue;
+ }
+
if (atomic_read(&fence->pin_count))
continue;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index 44f1d51e5ae5..d8e1ab412634 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -46,6 +46,8 @@ void intel_gt_init_hw_early(struct intel_gt *gt, struct i915_ggtt *ggtt)
int intel_gt_init_mmio(struct intel_gt *gt)
{
+ intel_gt_init_clock_frequency(gt);
+
intel_uc_init_mmio(&gt->uc);
intel_sseu_info_init(gt);
@@ -546,8 +548,6 @@ int intel_gt_init(struct intel_gt *gt)
*/
intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
- intel_gt_init_clock_frequency(gt);
-
err = intel_gt_init_scratch(gt, IS_GEN(gt->i915, 2) ? SZ_256K : SZ_4K);
if (err)
goto out_fw;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
index 104cb30e8c13..06d84cf09570 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
@@ -145,7 +145,8 @@ static void pool_retire(struct i915_active *ref)
}
static struct intel_gt_buffer_pool_node *
-node_create(struct intel_gt_buffer_pool *pool, size_t sz)
+node_create(struct intel_gt_buffer_pool *pool, size_t sz,
+ enum i915_map_type type)
{
struct intel_gt *gt = to_gt(pool);
struct intel_gt_buffer_pool_node *node;
@@ -169,12 +170,14 @@ node_create(struct intel_gt_buffer_pool *pool, size_t sz)
i915_gem_object_set_readonly(obj);
+ node->type = type;
node->obj = obj;
return node;
}
struct intel_gt_buffer_pool_node *
-intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size)
+intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size,
+ enum i915_map_type type)
{
struct intel_gt_buffer_pool *pool = &gt->buffer_pool;
struct intel_gt_buffer_pool_node *node;
@@ -191,6 +194,9 @@ intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size)
if (node->obj->base.size < size)
continue;
+ if (node->type != type)
+ continue;
+
age = READ_ONCE(node->age);
if (!age)
continue;
@@ -205,7 +211,7 @@ intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size)
rcu_read_unlock();
if (&node->link == list) {
- node = node_create(pool, size);
+ node = node_create(pool, size, type);
if (IS_ERR(node))
return node;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.h b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.h
index 42cbac003e8a..6068f8f1762e 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.h
@@ -15,7 +15,8 @@ struct intel_gt;
struct i915_request;
struct intel_gt_buffer_pool_node *
-intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size);
+intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size,
+ enum i915_map_type type);
static inline int
intel_gt_buffer_pool_mark_active(struct intel_gt_buffer_pool_node *node,
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool_types.h b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool_types.h
index bcf1658c9633..d8d82c890da8 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool_types.h
@@ -11,10 +11,9 @@
#include <linux/spinlock.h>
#include <linux/workqueue.h>
+#include "gem/i915_gem_object_types.h"
#include "i915_active_types.h"
-struct drm_i915_gem_object;
-
struct intel_gt_buffer_pool {
spinlock_t lock;
struct list_head cache_list[4];
@@ -31,6 +30,7 @@ struct intel_gt_buffer_pool_node {
struct rcu_head rcu;
};
unsigned long age;
+ enum i915_map_type type;
};
#endif /* INTEL_GT_BUFFER_POOL_TYPES_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
index 999079686846..a4242ca8dcd7 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
@@ -7,34 +7,146 @@
#include "intel_gt.h"
#include "intel_gt_clock_utils.h"
-#define MHZ_12 12000000 /* 12MHz (24MHz/2), 83.333ns */
-#define MHZ_12_5 12500000 /* 12.5MHz (25MHz/2), 80ns */
-#define MHZ_19_2 19200000 /* 19.2MHz, 52.083ns */
+static u32 read_reference_ts_freq(struct intel_uncore *uncore)
+{
+ u32 ts_override = intel_uncore_read(uncore, GEN9_TIMESTAMP_OVERRIDE);
+ u32 base_freq, frac_freq;
+
+ base_freq = ((ts_override & GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_MASK) >>
+ GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_SHIFT) + 1;
+ base_freq *= 1000000;
+
+ frac_freq = ((ts_override &
+ GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_MASK) >>
+ GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_SHIFT);
+ frac_freq = 1000000 / (frac_freq + 1);
+
+ return base_freq + frac_freq;
+}
+
+static u32 gen10_get_crystal_clock_freq(struct intel_uncore *uncore,
+ u32 rpm_config_reg)
+{
+ u32 f19_2_mhz = 19200000;
+ u32 f24_mhz = 24000000;
+ u32 crystal_clock =
+ (rpm_config_reg & GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
+ GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;
-static u32 read_clock_frequency(const struct intel_gt *gt)
+ switch (crystal_clock) {
+ case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ:
+ return f19_2_mhz;
+ case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
+ return f24_mhz;
+ default:
+ MISSING_CASE(crystal_clock);
+ return 0;
+ }
+}
+
+static u32 gen11_get_crystal_clock_freq(struct intel_uncore *uncore,
+ u32 rpm_config_reg)
{
- if (INTEL_GEN(gt->i915) >= 11) {
- u32 config;
-
- config = intel_uncore_read(gt->uncore, RPM_CONFIG0);
- config &= GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK;
- config >>= GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;
-
- switch (config) {
- case 0: return MHZ_12;
- case 1:
- case 2: return MHZ_19_2;
- default:
- case 3: return MHZ_12_5;
+ u32 f19_2_mhz = 19200000;
+ u32 f24_mhz = 24000000;
+ u32 f25_mhz = 25000000;
+ u32 f38_4_mhz = 38400000;
+ u32 crystal_clock =
+ (rpm_config_reg & GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
+ GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;
+
+ switch (crystal_clock) {
+ case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
+ return f24_mhz;
+ case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ:
+ return f19_2_mhz;
+ case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_38_4_MHZ:
+ return f38_4_mhz;
+ case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_25_MHZ:
+ return f25_mhz;
+ default:
+ MISSING_CASE(crystal_clock);
+ return 0;
+ }
+}
+
+static u32 read_clock_frequency(struct intel_uncore *uncore)
+{
+ u32 f12_5_mhz = 12500000;
+ u32 f19_2_mhz = 19200000;
+ u32 f24_mhz = 24000000;
+
+ if (INTEL_GEN(uncore->i915) <= 4) {
+ /*
+ * PRMs say:
+ *
+ * "The value in this register increments once every 16
+ * hclks." (through the “Clocking Configurationâ€
+ * (“CLKCFGâ€) MCHBAR register)
+ */
+ return RUNTIME_INFO(uncore->i915)->rawclk_freq * 1000 / 16;
+ } else if (INTEL_GEN(uncore->i915) <= 8) {
+ /*
+ * PRMs say:
+ *
+ * "The PCU TSC counts 10ns increments; this timestamp
+ * reflects bits 38:3 of the TSC (i.e. 80ns granularity,
+ * rolling over every 1.5 hours).
+ */
+ return f12_5_mhz;
+ } else if (INTEL_GEN(uncore->i915) <= 9) {
+ u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE);
+ u32 freq = 0;
+
+ if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
+ freq = read_reference_ts_freq(uncore);
+ } else {
+ freq = IS_GEN9_LP(uncore->i915) ? f19_2_mhz : f24_mhz;
+
+ /*
+ * Now figure out how the command stream's timestamp
+ * register increments from this frequency (it might
+ * increment only every few clock cycle).
+ */
+ freq >>= 3 - ((ctc_reg & CTC_SHIFT_PARAMETER_MASK) >>
+ CTC_SHIFT_PARAMETER_SHIFT);
}
- } else if (INTEL_GEN(gt->i915) >= 9) {
- if (IS_GEN9_LP(gt->i915))
- return MHZ_19_2;
- else
- return MHZ_12;
- } else {
- return MHZ_12_5;
+
+ return freq;
+ } else if (INTEL_GEN(uncore->i915) <= 12) {
+ u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE);
+ u32 freq = 0;
+
+ /*
+ * First figure out the reference frequency. There are 2 ways
+ * we can compute the frequency, either through the
+ * TIMESTAMP_OVERRIDE register or through RPM_CONFIG. CTC_MODE
+ * tells us which one we should use.
+ */
+ if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
+ freq = read_reference_ts_freq(uncore);
+ } else {
+ u32 c0 = intel_uncore_read(uncore, RPM_CONFIG0);
+
+ if (INTEL_GEN(uncore->i915) <= 10)
+ freq = gen10_get_crystal_clock_freq(uncore, c0);
+ else
+ freq = gen11_get_crystal_clock_freq(uncore, c0);
+
+ /*
+ * Now figure out how the command stream's timestamp
+ * register increments from this frequency (it might
+ * increment only every few clock cycle).
+ */
+ freq >>= 3 - ((c0 & GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >>
+ GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT);
+ }
+
+ return freq;
}
+
+ MISSING_CASE("Unknown gen, unable to read command streamer timestamp frequency\n");
+ return 0;
}
void intel_gt_init_clock_frequency(struct intel_gt *gt)
@@ -43,20 +155,27 @@ void intel_gt_init_clock_frequency(struct intel_gt *gt)
* Note that on gen11+, the clock frequency may be reconfigured.
* We do not, and we assume nobody else does.
*/
- gt->clock_frequency = read_clock_frequency(gt);
+ gt->clock_frequency = read_clock_frequency(gt->uncore);
+ if (gt->clock_frequency)
+ gt->clock_period_ns = intel_gt_clock_interval_to_ns(gt, 1);
+
GT_TRACE(gt,
- "Using clock frequency: %dkHz\n",
- gt->clock_frequency / 1000);
+ "Using clock frequency: %dkHz, period: %dns, wrap: %lldms\n",
+ gt->clock_frequency / 1000,
+ gt->clock_period_ns,
+ div_u64(mul_u32_u32(gt->clock_period_ns, S32_MAX),
+ USEC_PER_SEC));
+
}
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
void intel_gt_check_clock_frequency(const struct intel_gt *gt)
{
- if (gt->clock_frequency != read_clock_frequency(gt)) {
+ if (gt->clock_frequency != read_clock_frequency(gt->uncore)) {
dev_err(gt->i915->drm.dev,
"GT clock frequency changed, was %uHz, now %uHz!\n",
gt->clock_frequency,
- read_clock_frequency(gt));
+ read_clock_frequency(gt->uncore));
}
}
#endif
@@ -66,26 +185,24 @@ static u64 div_u64_roundup(u64 nom, u32 den)
return div_u64(nom + den - 1, den);
}
-u32 intel_gt_clock_interval_to_ns(const struct intel_gt *gt, u32 count)
+u64 intel_gt_clock_interval_to_ns(const struct intel_gt *gt, u64 count)
{
- return div_u64_roundup(mul_u32_u32(count, 1000 * 1000 * 1000),
- gt->clock_frequency);
+ return div_u64_roundup(count * NSEC_PER_SEC, gt->clock_frequency);
}
-u32 intel_gt_pm_interval_to_ns(const struct intel_gt *gt, u32 count)
+u64 intel_gt_pm_interval_to_ns(const struct intel_gt *gt, u64 count)
{
return intel_gt_clock_interval_to_ns(gt, 16 * count);
}
-u32 intel_gt_ns_to_clock_interval(const struct intel_gt *gt, u32 ns)
+u64 intel_gt_ns_to_clock_interval(const struct intel_gt *gt, u64 ns)
{
- return div_u64_roundup(mul_u32_u32(gt->clock_frequency, ns),
- 1000 * 1000 * 1000);
+ return div_u64_roundup(gt->clock_frequency * ns, NSEC_PER_SEC);
}
-u32 intel_gt_ns_to_pm_interval(const struct intel_gt *gt, u32 ns)
+u64 intel_gt_ns_to_pm_interval(const struct intel_gt *gt, u64 ns)
{
- u32 val;
+ u64 val;
/*
* Make these a multiple of magic 25 to avoid SNB (eg. Dell XPS
@@ -94,9 +211,9 @@ u32 intel_gt_ns_to_pm_interval(const struct intel_gt *gt, u32 ns)
* EI/thresholds are "bad", leading to a very sluggish or even
* frozen machine.
*/
- val = DIV_ROUND_UP(intel_gt_ns_to_clock_interval(gt, ns), 16);
+ val = div_u64_roundup(intel_gt_ns_to_clock_interval(gt, ns), 16);
if (IS_GEN(gt->i915, 6))
- val = roundup(val, 25);
+ val = div_u64_roundup(val, 25) * 25;
return val;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.h b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.h
index f793c89f2cbd..8b03e97a85df 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.h
@@ -18,10 +18,10 @@ void intel_gt_check_clock_frequency(const struct intel_gt *gt);
static inline void intel_gt_check_clock_frequency(const struct intel_gt *gt) {}
#endif
-u32 intel_gt_clock_interval_to_ns(const struct intel_gt *gt, u32 count);
-u32 intel_gt_pm_interval_to_ns(const struct intel_gt *gt, u32 count);
+u64 intel_gt_clock_interval_to_ns(const struct intel_gt *gt, u64 count);
+u64 intel_gt_pm_interval_to_ns(const struct intel_gt *gt, u64 count);
-u32 intel_gt_ns_to_clock_interval(const struct intel_gt *gt, u32 ns);
-u32 intel_gt_ns_to_pm_interval(const struct intel_gt *gt, u32 ns);
+u64 intel_gt_ns_to_clock_interval(const struct intel_gt *gt, u64 ns);
+u64 intel_gt_ns_to_pm_interval(const struct intel_gt *gt, u64 ns);
#endif /* __INTEL_GT_CLOCK_UTILS_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
index 257063a57101..9830342aa6f4 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
@@ -11,6 +11,7 @@
#include "intel_breadcrumbs.h"
#include "intel_gt.h"
#include "intel_gt_irq.h"
+#include "intel_lrc_reg.h"
#include "intel_uncore.h"
#include "intel_rps.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index 274aa0dd7050..c94e8ac884eb 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -39,6 +39,28 @@ static void user_forcewake(struct intel_gt *gt, bool suspend)
intel_gt_pm_put(gt);
}
+static void runtime_begin(struct intel_gt *gt)
+{
+ local_irq_disable();
+ write_seqcount_begin(&gt->stats.lock);
+ gt->stats.start = ktime_get();
+ gt->stats.active = true;
+ write_seqcount_end(&gt->stats.lock);
+ local_irq_enable();
+}
+
+static void runtime_end(struct intel_gt *gt)
+{
+ local_irq_disable();
+ write_seqcount_begin(&gt->stats.lock);
+ gt->stats.active = false;
+ gt->stats.total =
+ ktime_add(gt->stats.total,
+ ktime_sub(ktime_get(), gt->stats.start));
+ write_seqcount_end(&gt->stats.lock);
+ local_irq_enable();
+}
+
static int __gt_unpark(struct intel_wakeref *wf)
{
struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref);
@@ -67,6 +89,7 @@ static int __gt_unpark(struct intel_wakeref *wf)
i915_pmu_gt_unparked(i915);
intel_gt_unpark_requests(gt);
+ runtime_begin(gt);
return 0;
}
@@ -79,6 +102,7 @@ static int __gt_park(struct intel_wakeref *wf)
GT_TRACE(gt, "\n");
+ runtime_end(gt);
intel_gt_park_requests(gt);
i915_vma_parked(gt);
@@ -106,6 +130,7 @@ static const struct intel_wakeref_ops wf_ops = {
void intel_gt_pm_init_early(struct intel_gt *gt)
{
intel_wakeref_init(&gt->wakeref, gt->uncore->rpm, &wf_ops);
+ seqcount_mutex_init(&gt->stats.lock, &gt->wakeref.mutex);
}
void intel_gt_pm_init(struct intel_gt *gt)
@@ -339,6 +364,30 @@ int intel_gt_runtime_resume(struct intel_gt *gt)
return intel_uc_runtime_resume(&gt->uc);
}
+static ktime_t __intel_gt_get_awake_time(const struct intel_gt *gt)
+{
+ ktime_t total = gt->stats.total;
+
+ if (gt->stats.active)
+ total = ktime_add(total,
+ ktime_sub(ktime_get(), gt->stats.start));
+
+ return total;
+}
+
+ktime_t intel_gt_get_awake_time(const struct intel_gt *gt)
+{
+ unsigned int seq;
+ ktime_t total;
+
+ do {
+ seq = read_seqcount_begin(&gt->stats.lock);
+ total = __intel_gt_get_awake_time(gt);
+ } while (read_seqcount_retry(&gt->stats.lock, seq));
+
+ return total;
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftest_gt_pm.c"
#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
index 60f0e2fbe55c..63846a856e7e 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
@@ -58,6 +58,8 @@ int intel_gt_resume(struct intel_gt *gt);
void intel_gt_runtime_suspend(struct intel_gt *gt);
int intel_gt_runtime_resume(struct intel_gt *gt);
+ktime_t intel_gt_get_awake_time(const struct intel_gt *gt);
+
static inline bool is_mock_gt(const struct intel_gt *gt)
{
return I915_SELFTEST_ONLY(gt->awake == -ENODEV);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.c b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
index 66fcbf9d0fdd..dc06c78c9eeb 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_requests.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
@@ -135,13 +135,8 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
struct intel_gt_timelines *timelines = &gt->timelines;
struct intel_timeline *tl, *tn;
unsigned long active_count = 0;
- bool interruptible;
LIST_HEAD(free);
- interruptible = true;
- if (unlikely(timeout < 0))
- timeout = -timeout, interruptible = false;
-
flush_submission(gt, timeout); /* kick the ksoftirqd tasklets */
spin_lock(&timelines->lock);
list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
@@ -163,7 +158,7 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
mutex_unlock(&tl->mutex);
timeout = dma_fence_wait_timeout(fence,
- interruptible,
+ true,
timeout);
dma_fence_put(fence);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
index 6d39a4a11bf3..a83d3e18254d 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -75,6 +75,7 @@ struct intel_gt {
intel_wakeref_t awake;
u32 clock_frequency;
+ u32 clock_period_ns;
struct intel_llc llc;
struct intel_rc6 rc6;
@@ -87,6 +88,30 @@ struct intel_gt {
u32 pm_guc_events;
+ struct {
+ bool active;
+
+ /**
+ * @lock: Lock protecting the below fields.
+ */
+ seqcount_mutex_t lock;
+
+ /**
+ * @total: Total time this engine was busy.
+ *
+ * Accumulated time not counting the most recent block in cases
+ * where engine is currently busy (active > 0).
+ */
+ ktime_t total;
+
+ /**
+ * @start: Timestamp of the last idle to active transition.
+ *
+ * Idle is defined as active == 0, active is active > 0.
+ */
+ ktime_t start;
+ } stats;
+
struct intel_engine_cs *engine[I915_NUM_ENGINES];
struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1]
[MAX_ENGINE_INSTANCE + 1];
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c
index 7bfe9072be9a..04aa6601e984 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
@@ -422,6 +422,35 @@ void setup_private_pat(struct intel_uncore *uncore)
bdw_setup_private_ppat(uncore);
}
+struct i915_vma *
+__vm_create_scratch_for_read(struct i915_address_space *vm, unsigned long size)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int err;
+
+ obj = i915_gem_object_create_internal(vm->i915, PAGE_ALIGN(size));
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ i915_gem_object_set_cache_coherency(obj, I915_CACHING_CACHED);
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma)) {
+ i915_gem_object_put(obj);
+ return vma;
+ }
+
+ err = i915_vma_pin(vma, 0, 0,
+ i915_vma_is_ggtt(vma) ? PIN_GLOBAL : PIN_USER);
+ if (err) {
+ i915_vma_put(vma);
+ return ERR_PTR(err);
+ }
+
+ return vma;
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_gtt.c"
#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h
index 8a33940a71f3..29c10fde8ce3 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
@@ -573,6 +573,9 @@ int i915_vm_pin_pt_stash(struct i915_address_space *vm,
void i915_vm_free_pt_stash(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash);
+struct i915_vma *
+__vm_create_scratch_for_read(struct i915_address_space *vm, unsigned long size);
+
static inline struct sgt_dma {
struct scatterlist *sg;
dma_addr_t dma, max;
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 26c7d0a50585..94f485b591af 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -1,599 +1,23 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright © 2014 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- * Ben Widawsky <ben@bwidawsk.net>
- * Michel Thierry <michel.thierry@intel.com>
- * Thomas Daniel <thomas.daniel@intel.com>
- * Oscar Mateo <oscar.mateo@intel.com>
- *
- */
-
-/**
- * DOC: Logical Rings, Logical Ring Contexts and Execlists
- *
- * Motivation:
- * GEN8 brings an expansion of the HW contexts: "Logical Ring Contexts".
- * These expanded contexts enable a number of new abilities, especially
- * "Execlists" (also implemented in this file).
- *
- * One of the main differences with the legacy HW contexts is that logical
- * ring contexts incorporate many more things to the context's state, like
- * PDPs or ringbuffer control registers:
- *
- * The reason why PDPs are included in the context is straightforward: as
- * PPGTTs (per-process GTTs) are actually per-context, having the PDPs
- * contained there mean you don't need to do a ppgtt->switch_mm yourself,
- * instead, the GPU will do it for you on the context switch.
- *
- * But, what about the ringbuffer control registers (head, tail, etc..)?
- * shouldn't we just need a set of those per engine command streamer? This is
- * where the name "Logical Rings" starts to make sense: by virtualizing the
- * rings, the engine cs shifts to a new "ring buffer" with every context
- * switch. When you want to submit a workload to the GPU you: A) choose your
- * context, B) find its appropriate virtualized ring, C) write commands to it
- * and then, finally, D) tell the GPU to switch to that context.
- *
- * Instead of the legacy MI_SET_CONTEXT, the way you tell the GPU to switch
- * to a contexts is via a context execution list, ergo "Execlists".
- *
- * LRC implementation:
- * Regarding the creation of contexts, we have:
- *
- * - One global default context.
- * - One local default context for each opened fd.
- * - One local extra context for each context create ioctl call.
- *
- * Now that ringbuffers belong per-context (and not per-engine, like before)
- * and that contexts are uniquely tied to a given engine (and not reusable,
- * like before) we need:
- *
- * - One ringbuffer per-engine inside each context.
- * - One backing object per-engine inside each context.
- *
- * The global default context starts its life with these new objects fully
- * allocated and populated. The local default context for each opened fd is
- * more complex, because we don't know at creation time which engine is going
- * to use them. To handle this, we have implemented a deferred creation of LR
- * contexts:
- *
- * The local context starts its life as a hollow or blank holder, that only
- * gets populated for a given engine once we receive an execbuffer. If later
- * on we receive another execbuffer ioctl for the same context but a different
- * engine, we allocate/populate a new ringbuffer and context backing object and
- * so on.
- *
- * Finally, regarding local contexts created using the ioctl call: as they are
- * only allowed with the render ring, we can allocate & populate them right
- * away (no need to defer anything, at least for now).
- *
- * Execlists implementation:
- * Execlists are the new method by which, on gen8+ hardware, workloads are
- * submitted for execution (as opposed to the legacy, ringbuffer-based, method).
- * This method works as follows:
- *
- * When a request is committed, its commands (the BB start and any leading or
- * trailing commands, like the seqno breadcrumbs) are placed in the ringbuffer
- * for the appropriate context. The tail pointer in the hardware context is not
- * updated at this time, but instead, kept by the driver in the ringbuffer
- * structure. A structure representing this request is added to a request queue
- * for the appropriate engine: this structure contains a copy of the context's
- * tail after the request was written to the ring buffer and a pointer to the
- * context itself.
- *
- * If the engine's request queue was empty before the request was added, the
- * queue is processed immediately. Otherwise the queue will be processed during
- * a context switch interrupt. In any case, elements on the queue will get sent
- * (in pairs) to the GPU's ExecLists Submit Port (ELSP, for short) with a
- * globally unique 20-bits submission ID.
- *
- * When execution of a request completes, the GPU updates the context status
- * buffer with a context complete event and generates a context switch interrupt.
- * During the interrupt handling, the driver examines the events in the buffer:
- * for each context complete event, if the announced ID matches that on the head
- * of the request queue, then that request is retired and removed from the queue.
- *
- * After processing, if any requests were retired and the queue is not empty
- * then a new execution list can be submitted. The two requests at the front of
- * the queue are next to be submitted but since a context may not occur twice in
- * an execution list, if subsequent requests have the same ID as the first then
- * the two requests must be combined. This is done simply by discarding requests
- * at the head of the queue until either only one requests is left (in which case
- * we use a NULL second context) or the first two requests have unique IDs.
- *
- * By always executing the first two requests in the queue the driver ensures
- * that the GPU is kept as busy as possible. In the case where a single context
- * completes but a second context is still executing, the request for this second
- * context will be at the head of the queue when we remove the first one. This
- * request will then be resubmitted along with a new request for a different context,
- * which will cause the hardware to continue executing the second request and queue
- * the new request (the GPU detects the condition of a context getting preempted
- * with the same context and optimizes the context switch flow by not doing
- * preemption, but just sampling the new tail pointer).
- *
*/
-#include <linux/interrupt.h>
+#include "gen8_engine_cs.h"
#include "i915_drv.h"
#include "i915_perf.h"
-#include "i915_trace.h"
-#include "i915_vgpu.h"
-#include "intel_breadcrumbs.h"
-#include "intel_context.h"
-#include "intel_engine_pm.h"
+#include "intel_engine.h"
+#include "intel_gpu_commands.h"
#include "intel_gt.h"
-#include "intel_gt_pm.h"
-#include "intel_gt_requests.h"
+#include "intel_lrc.h"
#include "intel_lrc_reg.h"
-#include "intel_mocs.h"
-#include "intel_reset.h"
#include "intel_ring.h"
-#include "intel_workarounds.h"
#include "shmem_utils.h"
-#define RING_EXECLIST_QFULL (1 << 0x2)
-#define RING_EXECLIST1_VALID (1 << 0x3)
-#define RING_EXECLIST0_VALID (1 << 0x4)
-#define RING_EXECLIST_ACTIVE_STATUS (3 << 0xE)
-#define RING_EXECLIST1_ACTIVE (1 << 0x11)
-#define RING_EXECLIST0_ACTIVE (1 << 0x12)
-
-#define GEN8_CTX_STATUS_IDLE_ACTIVE (1 << 0)
-#define GEN8_CTX_STATUS_PREEMPTED (1 << 1)
-#define GEN8_CTX_STATUS_ELEMENT_SWITCH (1 << 2)
-#define GEN8_CTX_STATUS_ACTIVE_IDLE (1 << 3)
-#define GEN8_CTX_STATUS_COMPLETE (1 << 4)
-#define GEN8_CTX_STATUS_LITE_RESTORE (1 << 15)
-
-#define GEN8_CTX_STATUS_COMPLETED_MASK \
- (GEN8_CTX_STATUS_COMPLETE | GEN8_CTX_STATUS_PREEMPTED)
-
-#define CTX_DESC_FORCE_RESTORE BIT_ULL(2)
-
-#define GEN12_CTX_STATUS_SWITCHED_TO_NEW_QUEUE (0x1) /* lower csb dword */
-#define GEN12_CTX_SWITCH_DETAIL(csb_dw) ((csb_dw) & 0xF) /* upper csb dword */
-#define GEN12_CSB_SW_CTX_ID_MASK GENMASK(25, 15)
-#define GEN12_IDLE_CTX_ID 0x7FF
-#define GEN12_CSB_CTX_VALID(csb_dw) \
- (FIELD_GET(GEN12_CSB_SW_CTX_ID_MASK, csb_dw) != GEN12_IDLE_CTX_ID)
-
-/* Typical size of the average request (2 pipecontrols and a MI_BB) */
-#define EXECLISTS_REQUEST_SIZE 64 /* bytes */
-
-struct virtual_engine {
- struct intel_engine_cs base;
- struct intel_context context;
- struct rcu_work rcu;
-
- /*
- * We allow only a single request through the virtual engine at a time
- * (each request in the timeline waits for the completion fence of
- * the previous before being submitted). By restricting ourselves to
- * only submitting a single request, each request is placed on to a
- * physical to maximise load spreading (by virtue of the late greedy
- * scheduling -- each real engine takes the next available request
- * upon idling).
- */
- struct i915_request *request;
-
- /*
- * We keep a rbtree of available virtual engines inside each physical
- * engine, sorted by priority. Here we preallocate the nodes we need
- * for the virtual engine, indexed by physical_engine->id.
- */
- struct ve_node {
- struct rb_node rb;
- int prio;
- } nodes[I915_NUM_ENGINES];
-
- /*
- * Keep track of bonded pairs -- restrictions upon on our selection
- * of physical engines any particular request may be submitted to.
- * If we receive a submit-fence from a master engine, we will only
- * use one of sibling_mask physical engines.
- */
- struct ve_bond {
- const struct intel_engine_cs *master;
- intel_engine_mask_t sibling_mask;
- } *bonds;
- unsigned int num_bonds;
-
- /* And finally, which physical engines this virtual engine maps onto. */
- unsigned int num_siblings;
- struct intel_engine_cs *siblings[];
-};
-
-static struct virtual_engine *to_virtual_engine(struct intel_engine_cs *engine)
-{
- GEM_BUG_ON(!intel_engine_is_virtual(engine));
- return container_of(engine, struct virtual_engine, base);
-}
-
-static int __execlists_context_alloc(struct intel_context *ce,
- struct intel_engine_cs *engine);
-
-static void execlists_init_reg_state(u32 *reg_state,
- const struct intel_context *ce,
- const struct intel_engine_cs *engine,
- const struct intel_ring *ring,
- bool close);
-static void
-__execlists_update_reg_state(const struct intel_context *ce,
- const struct intel_engine_cs *engine,
- u32 head);
-
-static int lrc_ring_mi_mode(const struct intel_engine_cs *engine)
-{
- if (INTEL_GEN(engine->i915) >= 12)
- return 0x60;
- else if (INTEL_GEN(engine->i915) >= 9)
- return 0x54;
- else if (engine->class == RENDER_CLASS)
- return 0x58;
- else
- return -1;
-}
-
-static int lrc_ring_gpr0(const struct intel_engine_cs *engine)
-{
- if (INTEL_GEN(engine->i915) >= 12)
- return 0x74;
- else if (INTEL_GEN(engine->i915) >= 9)
- return 0x68;
- else if (engine->class == RENDER_CLASS)
- return 0xd8;
- else
- return -1;
-}
-
-static int lrc_ring_wa_bb_per_ctx(const struct intel_engine_cs *engine)
-{
- if (INTEL_GEN(engine->i915) >= 12)
- return 0x12;
- else if (INTEL_GEN(engine->i915) >= 9 || engine->class == RENDER_CLASS)
- return 0x18;
- else
- return -1;
-}
-
-static int lrc_ring_indirect_ptr(const struct intel_engine_cs *engine)
-{
- int x;
-
- x = lrc_ring_wa_bb_per_ctx(engine);
- if (x < 0)
- return x;
-
- return x + 2;
-}
-
-static int lrc_ring_indirect_offset(const struct intel_engine_cs *engine)
-{
- int x;
-
- x = lrc_ring_indirect_ptr(engine);
- if (x < 0)
- return x;
-
- return x + 2;
-}
-
-static int lrc_ring_cmd_buf_cctl(const struct intel_engine_cs *engine)
-{
- if (engine->class != RENDER_CLASS)
- return -1;
-
- if (INTEL_GEN(engine->i915) >= 12)
- return 0xb6;
- else if (INTEL_GEN(engine->i915) >= 11)
- return 0xaa;
- else
- return -1;
-}
-
-static u32
-lrc_ring_indirect_offset_default(const struct intel_engine_cs *engine)
-{
- switch (INTEL_GEN(engine->i915)) {
- default:
- MISSING_CASE(INTEL_GEN(engine->i915));
- fallthrough;
- case 12:
- return GEN12_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
- case 11:
- return GEN11_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
- case 10:
- return GEN10_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
- case 9:
- return GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
- case 8:
- return GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
- }
-}
-
-static void
-lrc_ring_setup_indirect_ctx(u32 *regs,
- const struct intel_engine_cs *engine,
- u32 ctx_bb_ggtt_addr,
- u32 size)
-{
- GEM_BUG_ON(!size);
- GEM_BUG_ON(!IS_ALIGNED(size, CACHELINE_BYTES));
- GEM_BUG_ON(lrc_ring_indirect_ptr(engine) == -1);
- regs[lrc_ring_indirect_ptr(engine) + 1] =
- ctx_bb_ggtt_addr | (size / CACHELINE_BYTES);
-
- GEM_BUG_ON(lrc_ring_indirect_offset(engine) == -1);
- regs[lrc_ring_indirect_offset(engine) + 1] =
- lrc_ring_indirect_offset_default(engine) << 6;
-}
-
-static u32 intel_context_get_runtime(const struct intel_context *ce)
-{
- /*
- * We can use either ppHWSP[16] which is recorded before the context
- * switch (and so excludes the cost of context switches) or use the
- * value from the context image itself, which is saved/restored earlier
- * and so includes the cost of the save.
- */
- return READ_ONCE(ce->lrc_reg_state[CTX_TIMESTAMP]);
-}
-
-static void mark_eio(struct i915_request *rq)
-{
- if (i915_request_completed(rq))
- return;
-
- GEM_BUG_ON(i915_request_signaled(rq));
-
- i915_request_set_error_once(rq, -EIO);
- i915_request_mark_complete(rq);
-}
-
-static struct i915_request *
-active_request(const struct intel_timeline * const tl, struct i915_request *rq)
-{
- struct i915_request *active = rq;
-
- rcu_read_lock();
- list_for_each_entry_continue_reverse(rq, &tl->requests, link) {
- if (i915_request_completed(rq))
- break;
-
- active = rq;
- }
- rcu_read_unlock();
-
- return active;
-}
-
-static inline u32 intel_hws_preempt_address(struct intel_engine_cs *engine)
-{
- return (i915_ggtt_offset(engine->status_page.vma) +
- I915_GEM_HWS_PREEMPT_ADDR);
-}
-
-static inline void
-ring_set_paused(const struct intel_engine_cs *engine, int state)
-{
- /*
- * We inspect HWS_PREEMPT with a semaphore inside
- * engine->emit_fini_breadcrumb. If the dword is true,
- * the ring is paused as the semaphore will busywait
- * until the dword is false.
- */
- engine->status_page.addr[I915_GEM_HWS_PREEMPT] = state;
- if (state)
- wmb();
-}
-
-static inline struct i915_priolist *to_priolist(struct rb_node *rb)
-{
- return rb_entry(rb, struct i915_priolist, node);
-}
-
-static inline int rq_prio(const struct i915_request *rq)
-{
- return READ_ONCE(rq->sched.attr.priority);
-}
-
-static int effective_prio(const struct i915_request *rq)
-{
- int prio = rq_prio(rq);
-
- /*
- * If this request is special and must not be interrupted at any
- * cost, so be it. Note we are only checking the most recent request
- * in the context and so may be masking an earlier vip request. It
- * is hoped that under the conditions where nopreempt is used, this
- * will not matter (i.e. all requests to that context will be
- * nopreempt for as long as desired).
- */
- if (i915_request_has_nopreempt(rq))
- prio = I915_PRIORITY_UNPREEMPTABLE;
-
- return prio;
-}
-
-static int queue_prio(const struct intel_engine_execlists *execlists)
-{
- struct i915_priolist *p;
- struct rb_node *rb;
-
- rb = rb_first_cached(&execlists->queue);
- if (!rb)
- return INT_MIN;
-
- /*
- * As the priolist[] are inverted, with the highest priority in [0],
- * we have to flip the index value to become priority.
- */
- p = to_priolist(rb);
- if (!I915_USER_PRIORITY_SHIFT)
- return p->priority;
-
- return ((p->priority + 1) << I915_USER_PRIORITY_SHIFT) - ffs(p->used);
-}
-
-static inline bool need_preempt(const struct intel_engine_cs *engine,
- const struct i915_request *rq,
- struct rb_node *rb)
-{
- int last_prio;
-
- if (!intel_engine_has_semaphores(engine))
- return false;
-
- /*
- * Check if the current priority hint merits a preemption attempt.
- *
- * We record the highest value priority we saw during rescheduling
- * prior to this dequeue, therefore we know that if it is strictly
- * less than the current tail of ESLP[0], we do not need to force
- * a preempt-to-idle cycle.
- *
- * However, the priority hint is a mere hint that we may need to
- * preempt. If that hint is stale or we may be trying to preempt
- * ourselves, ignore the request.
- *
- * More naturally we would write
- * prio >= max(0, last);
- * except that we wish to prevent triggering preemption at the same
- * priority level: the task that is running should remain running
- * to preserve FIFO ordering of dependencies.
- */
- last_prio = max(effective_prio(rq), I915_PRIORITY_NORMAL - 1);
- if (engine->execlists.queue_priority_hint <= last_prio)
- return false;
-
- /*
- * Check against the first request in ELSP[1], it will, thanks to the
- * power of PI, be the highest priority of that context.
- */
- if (!list_is_last(&rq->sched.link, &engine->active.requests) &&
- rq_prio(list_next_entry(rq, sched.link)) > last_prio)
- return true;
-
- if (rb) {
- struct virtual_engine *ve =
- rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
- bool preempt = false;
-
- if (engine == ve->siblings[0]) { /* only preempt one sibling */
- struct i915_request *next;
-
- rcu_read_lock();
- next = READ_ONCE(ve->request);
- if (next)
- preempt = rq_prio(next) > last_prio;
- rcu_read_unlock();
- }
-
- if (preempt)
- return preempt;
- }
-
- /*
- * If the inflight context did not trigger the preemption, then maybe
- * it was the set of queued requests? Pick the highest priority in
- * the queue (the first active priolist) and see if it deserves to be
- * running instead of ELSP[0].
- *
- * The highest priority request in the queue can not be either
- * ELSP[0] or ELSP[1] as, thanks again to PI, if it was the same
- * context, it's priority would not exceed ELSP[0] aka last_prio.
- */
- return queue_prio(&engine->execlists) > last_prio;
-}
-
-__maybe_unused static inline bool
-assert_priority_queue(const struct i915_request *prev,
- const struct i915_request *next)
-{
- /*
- * Without preemption, the prev may refer to the still active element
- * which we refuse to let go.
- *
- * Even with preemption, there are times when we think it is better not
- * to preempt and leave an ostensibly lower priority request in flight.
- */
- if (i915_request_is_active(prev))
- return true;
-
- return rq_prio(prev) >= rq_prio(next);
-}
-
-/*
- * The context descriptor encodes various attributes of a context,
- * including its GTT address and some flags. Because it's fairly
- * expensive to calculate, we'll just do it once and cache the result,
- * which remains valid until the context is unpinned.
- *
- * This is what a descriptor looks like, from LSB to MSB::
- *
- * bits 0-11: flags, GEN8_CTX_* (cached in ctx->desc_template)
- * bits 12-31: LRCA, GTT address of (the HWSP of) this context
- * bits 32-52: ctx ID, a globally unique tag (highest bit used by GuC)
- * bits 53-54: mbz, reserved for use by hardware
- * bits 55-63: group ID, currently unused and set to 0
- *
- * Starting from Gen11, the upper dword of the descriptor has a new format:
- *
- * bits 32-36: reserved
- * bits 37-47: SW context ID
- * bits 48:53: engine instance
- * bit 54: mbz, reserved for use by hardware
- * bits 55-60: SW counter
- * bits 61-63: engine class
- *
- * engine info, SW context ID and SW counter need to form a unique number
- * (Context ID) per lrc.
- */
-static u32
-lrc_descriptor(struct intel_context *ce, struct intel_engine_cs *engine)
-{
- u32 desc;
-
- desc = INTEL_LEGACY_32B_CONTEXT;
- if (i915_vm_is_4lvl(ce->vm))
- desc = INTEL_LEGACY_64B_CONTEXT;
- desc <<= GEN8_CTX_ADDRESSING_MODE_SHIFT;
-
- desc |= GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
- if (IS_GEN(engine->i915, 8))
- desc |= GEN8_CTX_L3LLC_COHERENT;
-
- return i915_ggtt_offset(ce->state) | desc;
-}
-
-static inline unsigned int dword_in_page(void *addr)
-{
- return offset_in_page(addr) / sizeof(u32);
-}
-
static void set_offsets(u32 *regs,
const u8 *data,
const struct intel_engine_cs *engine,
- bool clear)
+ bool close)
#define NOP(x) (BIT(7) | (x))
#define LRI(count, flags) ((flags) << 6 | (count) | BUILD_BUG_ON_ZERO(count >= BIT(6)))
#define POSTED BIT(0)
@@ -601,7 +25,7 @@ static void set_offsets(u32 *regs,
#define REG16(x) \
(((x) >> 9) | BIT(7) | BUILD_BUG_ON_ZERO(x >= 0x10000)), \
(((x) >> 2) & 0x7f)
-#define END(total_state_size) 0, (total_state_size)
+#define END 0
{
const u32 base = engine->mmio_base;
@@ -610,8 +34,6 @@ static void set_offsets(u32 *regs,
if (*data & BIT(7)) { /* skip */
count = *data++ & ~BIT(7);
- if (clear)
- memset32(regs, MI_NOOP, count);
regs += count;
continue;
}
@@ -639,19 +61,11 @@ static void set_offsets(u32 *regs,
} while (v & BIT(7));
regs[0] = base + (offset << 2);
- if (clear)
- regs[1] = 0;
regs += 2;
} while (--count);
}
- if (clear) {
- u8 count = *++data;
-
- /* Clear past the tail for HW access */
- GEM_BUG_ON(dword_in_page(regs) > count);
- memset32(regs, MI_NOOP, count - dword_in_page(regs));
-
+ if (close) {
/* Close the batch; used mainly by live_lrc_layout() */
*regs = MI_BATCH_BUFFER_END;
if (INTEL_GEN(engine->i915) >= 10)
@@ -691,7 +105,7 @@ static const u8 gen8_xcs_offsets[] = {
REG16(0x200),
REG(0x028),
- END(80)
+ END
};
static const u8 gen9_xcs_offsets[] = {
@@ -775,7 +189,7 @@ static const u8 gen9_xcs_offsets[] = {
REG16(0x67c),
REG(0x068),
- END(176)
+ END
};
static const u8 gen12_xcs_offsets[] = {
@@ -807,7 +221,7 @@ static const u8 gen12_xcs_offsets[] = {
REG16(0x274),
REG16(0x270),
- END(80)
+ END
};
static const u8 gen8_rcs_offsets[] = {
@@ -844,7 +258,7 @@ static const u8 gen8_rcs_offsets[] = {
LRI(1, 0),
REG(0x0c8),
- END(80)
+ END
};
static const u8 gen9_rcs_offsets[] = {
@@ -928,7 +342,7 @@ static const u8 gen9_rcs_offsets[] = {
REG16(0x67c),
REG(0x68),
- END(176)
+ END
};
static const u8 gen11_rcs_offsets[] = {
@@ -969,7 +383,7 @@ static const u8 gen11_rcs_offsets[] = {
LRI(1, 0),
REG(0x0c8),
- END(80)
+ END
};
static const u8 gen12_rcs_offsets[] = {
@@ -1065,7 +479,7 @@ static const u8 gen12_rcs_offsets[] = {
REG(0x084),
NOP(1),
- END(192)
+ END
};
#undef END
@@ -1104,2284 +518,440 @@ static const u8 *reg_offsets(const struct intel_engine_cs *engine)
}
}
-static struct i915_request *
-__unwind_incomplete_requests(struct intel_engine_cs *engine)
-{
- struct i915_request *rq, *rn, *active = NULL;
- struct list_head *pl;
- int prio = I915_PRIORITY_INVALID;
-
- lockdep_assert_held(&engine->active.lock);
-
- list_for_each_entry_safe_reverse(rq, rn,
- &engine->active.requests,
- sched.link) {
- if (i915_request_completed(rq))
- continue; /* XXX */
-
- __i915_request_unsubmit(rq);
-
- /*
- * Push the request back into the queue for later resubmission.
- * If this request is not native to this physical engine (i.e.
- * it came from a virtual source), push it back onto the virtual
- * engine so that it can be moved across onto another physical
- * engine as load dictates.
- */
- if (likely(rq->execution_mask == engine->mask)) {
- GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
- if (rq_prio(rq) != prio) {
- prio = rq_prio(rq);
- pl = i915_sched_lookup_priolist(engine, prio);
- }
- GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
-
- list_move(&rq->sched.link, pl);
- set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
-
- /* Check in case we rollback so far we wrap [size/2] */
- if (intel_ring_direction(rq->ring,
- rq->tail,
- rq->ring->tail + 8) > 0)
- rq->context->lrc.desc |= CTX_DESC_FORCE_RESTORE;
-
- active = rq;
- } else {
- struct intel_engine_cs *owner = rq->context->engine;
-
- WRITE_ONCE(rq->engine, owner);
- owner->submit_request(rq);
- active = NULL;
- }
- }
-
- return active;
-}
-
-struct i915_request *
-execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists)
-{
- struct intel_engine_cs *engine =
- container_of(execlists, typeof(*engine), execlists);
-
- return __unwind_incomplete_requests(engine);
-}
-
-static inline void
-execlists_context_status_change(struct i915_request *rq, unsigned long status)
+static int lrc_ring_mi_mode(const struct intel_engine_cs *engine)
{
- /*
- * Only used when GVT-g is enabled now. When GVT-g is disabled,
- * The compiler should eliminate this function as dead-code.
- */
- if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
- return;
-
- atomic_notifier_call_chain(&rq->engine->context_status_notifier,
- status, rq);
+ if (INTEL_GEN(engine->i915) >= 12)
+ return 0x60;
+ else if (INTEL_GEN(engine->i915) >= 9)
+ return 0x54;
+ else if (engine->class == RENDER_CLASS)
+ return 0x58;
+ else
+ return -1;
}
-static void intel_engine_context_in(struct intel_engine_cs *engine)
+static int lrc_ring_gpr0(const struct intel_engine_cs *engine)
{
- unsigned long flags;
-
- if (atomic_add_unless(&engine->stats.active, 1, 0))
- return;
-
- write_seqlock_irqsave(&engine->stats.lock, flags);
- if (!atomic_add_unless(&engine->stats.active, 1, 0)) {
- engine->stats.start = ktime_get();
- atomic_inc(&engine->stats.active);
- }
- write_sequnlock_irqrestore(&engine->stats.lock, flags);
+ if (INTEL_GEN(engine->i915) >= 12)
+ return 0x74;
+ else if (INTEL_GEN(engine->i915) >= 9)
+ return 0x68;
+ else if (engine->class == RENDER_CLASS)
+ return 0xd8;
+ else
+ return -1;
}
-static void intel_engine_context_out(struct intel_engine_cs *engine)
+static int lrc_ring_wa_bb_per_ctx(const struct intel_engine_cs *engine)
{
- unsigned long flags;
-
- GEM_BUG_ON(!atomic_read(&engine->stats.active));
-
- if (atomic_add_unless(&engine->stats.active, -1, 1))
- return;
-
- write_seqlock_irqsave(&engine->stats.lock, flags);
- if (atomic_dec_and_test(&engine->stats.active)) {
- engine->stats.total =
- ktime_add(engine->stats.total,
- ktime_sub(ktime_get(), engine->stats.start));
- }
- write_sequnlock_irqrestore(&engine->stats.lock, flags);
+ if (INTEL_GEN(engine->i915) >= 12)
+ return 0x12;
+ else if (INTEL_GEN(engine->i915) >= 9 || engine->class == RENDER_CLASS)
+ return 0x18;
+ else
+ return -1;
}
-static void
-execlists_check_context(const struct intel_context *ce,
- const struct intel_engine_cs *engine,
- const char *when)
+static int lrc_ring_indirect_ptr(const struct intel_engine_cs *engine)
{
- const struct intel_ring *ring = ce->ring;
- u32 *regs = ce->lrc_reg_state;
- bool valid = true;
int x;
- if (regs[CTX_RING_START] != i915_ggtt_offset(ring->vma)) {
- pr_err("%s: context submitted with incorrect RING_START [%08x], expected %08x\n",
- engine->name,
- regs[CTX_RING_START],
- i915_ggtt_offset(ring->vma));
- regs[CTX_RING_START] = i915_ggtt_offset(ring->vma);
- valid = false;
- }
-
- if ((regs[CTX_RING_CTL] & ~(RING_WAIT | RING_WAIT_SEMAPHORE)) !=
- (RING_CTL_SIZE(ring->size) | RING_VALID)) {
- pr_err("%s: context submitted with incorrect RING_CTL [%08x], expected %08x\n",
- engine->name,
- regs[CTX_RING_CTL],
- (u32)(RING_CTL_SIZE(ring->size) | RING_VALID));
- regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID;
- valid = false;
- }
-
- x = lrc_ring_mi_mode(engine);
- if (x != -1 && regs[x + 1] & (regs[x + 1] >> 16) & STOP_RING) {
- pr_err("%s: context submitted with STOP_RING [%08x] in RING_MI_MODE\n",
- engine->name, regs[x + 1]);
- regs[x + 1] &= ~STOP_RING;
- regs[x + 1] |= STOP_RING << 16;
- valid = false;
- }
+ x = lrc_ring_wa_bb_per_ctx(engine);
+ if (x < 0)
+ return x;
- WARN_ONCE(!valid, "Invalid lrc state found %s submission\n", when);
+ return x + 2;
}
-static void restore_default_state(struct intel_context *ce,
- struct intel_engine_cs *engine)
+static int lrc_ring_indirect_offset(const struct intel_engine_cs *engine)
{
- u32 *regs;
+ int x;
- regs = memset(ce->lrc_reg_state, 0, engine->context_size - PAGE_SIZE);
- execlists_init_reg_state(regs, ce, engine, ce->ring, true);
+ x = lrc_ring_indirect_ptr(engine);
+ if (x < 0)
+ return x;
- ce->runtime.last = intel_context_get_runtime(ce);
+ return x + 2;
}
-static void reset_active(struct i915_request *rq,
- struct intel_engine_cs *engine)
+static int lrc_ring_cmd_buf_cctl(const struct intel_engine_cs *engine)
{
- struct intel_context * const ce = rq->context;
- u32 head;
-
- /*
- * The executing context has been cancelled. We want to prevent
- * further execution along this context and propagate the error on
- * to anything depending on its results.
- *
- * In __i915_request_submit(), we apply the -EIO and remove the
- * requests' payloads for any banned requests. But first, we must
- * rewind the context back to the start of the incomplete request so
- * that we do not jump back into the middle of the batch.
- *
- * We preserve the breadcrumbs and semaphores of the incomplete
- * requests so that inter-timeline dependencies (i.e other timelines)
- * remain correctly ordered. And we defer to __i915_request_submit()
- * so that all asynchronous waits are correctly handled.
- */
- ENGINE_TRACE(engine, "{ rq=%llx:%lld }\n",
- rq->fence.context, rq->fence.seqno);
+ if (engine->class != RENDER_CLASS)
+ return -1;
- /* On resubmission of the active request, payload will be scrubbed */
- if (i915_request_completed(rq))
- head = rq->tail;
+ if (INTEL_GEN(engine->i915) >= 12)
+ return 0xb6;
+ else if (INTEL_GEN(engine->i915) >= 11)
+ return 0xaa;
else
- head = active_request(ce->timeline, rq)->head;
- head = intel_ring_wrap(ce->ring, head);
-
- /* Scrub the context image to prevent replaying the previous batch */
- restore_default_state(ce, engine);
- __execlists_update_reg_state(ce, engine, head);
-
- /* We've switched away, so this should be a no-op, but intent matters */
- ce->lrc.desc |= CTX_DESC_FORCE_RESTORE;
-}
-
-static void st_update_runtime_underflow(struct intel_context *ce, s32 dt)
-{
-#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
- ce->runtime.num_underflow += dt < 0;
- ce->runtime.max_underflow = max_t(u32, ce->runtime.max_underflow, -dt);
-#endif
+ return -1;
}
-static void intel_context_update_runtime(struct intel_context *ce)
+static u32
+lrc_ring_indirect_offset_default(const struct intel_engine_cs *engine)
{
- u32 old;
- s32 dt;
-
- if (intel_context_is_barrier(ce))
- return;
-
- old = ce->runtime.last;
- ce->runtime.last = intel_context_get_runtime(ce);
- dt = ce->runtime.last - old;
-
- if (unlikely(dt <= 0)) {
- CE_TRACE(ce, "runtime underflow: last=%u, new=%u, delta=%d\n",
- old, ce->runtime.last, dt);
- st_update_runtime_underflow(ce, dt);
- return;
+ switch (INTEL_GEN(engine->i915)) {
+ default:
+ MISSING_CASE(INTEL_GEN(engine->i915));
+ fallthrough;
+ case 12:
+ return GEN12_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
+ case 11:
+ return GEN11_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
+ case 10:
+ return GEN10_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
+ case 9:
+ return GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
+ case 8:
+ return GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
}
-
- ewma_runtime_add(&ce->runtime.avg, dt);
- ce->runtime.total += dt;
}
-static inline struct intel_engine_cs *
-__execlists_schedule_in(struct i915_request *rq)
+static void
+lrc_setup_indirect_ctx(u32 *regs,
+ const struct intel_engine_cs *engine,
+ u32 ctx_bb_ggtt_addr,
+ u32 size)
{
- struct intel_engine_cs * const engine = rq->engine;
- struct intel_context * const ce = rq->context;
-
- intel_context_get(ce);
-
- if (unlikely(intel_context_is_banned(ce)))
- reset_active(rq, engine);
-
- if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
- execlists_check_context(ce, engine, "before");
-
- if (ce->tag) {
- /* Use a fixed tag for OA and friends */
- GEM_BUG_ON(ce->tag <= BITS_PER_LONG);
- ce->lrc.ccid = ce->tag;
- } else {
- /* We don't need a strict matching tag, just different values */
- unsigned int tag = ffs(READ_ONCE(engine->context_tag));
-
- GEM_BUG_ON(tag == 0 || tag >= BITS_PER_LONG);
- clear_bit(tag - 1, &engine->context_tag);
- ce->lrc.ccid = tag << (GEN11_SW_CTX_ID_SHIFT - 32);
-
- BUILD_BUG_ON(BITS_PER_LONG > GEN12_MAX_CONTEXT_HW_ID);
- }
-
- ce->lrc.ccid |= engine->execlists.ccid;
-
- __intel_gt_pm_get(engine->gt);
- if (engine->fw_domain && !atomic_fetch_inc(&engine->fw_active))
- intel_uncore_forcewake_get(engine->uncore, engine->fw_domain);
- execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
- intel_engine_context_in(engine);
+ GEM_BUG_ON(!size);
+ GEM_BUG_ON(!IS_ALIGNED(size, CACHELINE_BYTES));
+ GEM_BUG_ON(lrc_ring_indirect_ptr(engine) == -1);
+ regs[lrc_ring_indirect_ptr(engine) + 1] =
+ ctx_bb_ggtt_addr | (size / CACHELINE_BYTES);
- return engine;
+ GEM_BUG_ON(lrc_ring_indirect_offset(engine) == -1);
+ regs[lrc_ring_indirect_offset(engine) + 1] =
+ lrc_ring_indirect_offset_default(engine) << 6;
}
-static inline struct i915_request *
-execlists_schedule_in(struct i915_request *rq, int idx)
+static void init_common_regs(u32 * const regs,
+ const struct intel_context *ce,
+ const struct intel_engine_cs *engine,
+ bool inhibit)
{
- struct intel_context * const ce = rq->context;
- struct intel_engine_cs *old;
-
- GEM_BUG_ON(!intel_engine_pm_is_awake(rq->engine));
- trace_i915_request_in(rq, idx);
-
- old = READ_ONCE(ce->inflight);
- do {
- if (!old) {
- WRITE_ONCE(ce->inflight, __execlists_schedule_in(rq));
- break;
- }
- } while (!try_cmpxchg(&ce->inflight, &old, ptr_inc(old)));
-
- GEM_BUG_ON(intel_context_inflight(ce) != rq->engine);
- return i915_request_get(rq);
-}
+ u32 ctl;
-static void kick_siblings(struct i915_request *rq, struct intel_context *ce)
-{
- struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
- struct i915_request *next = READ_ONCE(ve->request);
+ ctl = _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH);
+ ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
+ if (inhibit)
+ ctl |= CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT;
+ if (INTEL_GEN(engine->i915) < 11)
+ ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT |
+ CTX_CTRL_RS_CTX_ENABLE);
+ regs[CTX_CONTEXT_CONTROL] = ctl;
- if (next == rq || (next && next->execution_mask & ~rq->execution_mask))
- tasklet_hi_schedule(&ve->base.execlists.tasklet);
+ regs[CTX_TIMESTAMP] = ce->runtime.last;
}
-static inline void
-__execlists_schedule_out(struct i915_request *rq,
- struct intel_engine_cs * const engine,
- unsigned int ccid)
+static void init_wa_bb_regs(u32 * const regs,
+ const struct intel_engine_cs *engine)
{
- struct intel_context * const ce = rq->context;
-
- /*
- * NB process_csb() is not under the engine->active.lock and hence
- * schedule_out can race with schedule_in meaning that we should
- * refrain from doing non-trivial work here.
- */
+ const struct i915_ctx_workarounds * const wa_ctx = &engine->wa_ctx;
- if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
- execlists_check_context(ce, engine, "after");
+ if (wa_ctx->per_ctx.size) {
+ const u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma);
- /*
- * If we have just completed this context, the engine may now be
- * idle and we want to re-enter powersaving.
- */
- if (list_is_last_rcu(&rq->link, &ce->timeline->requests) &&
- i915_request_completed(rq))
- intel_engine_add_retire(engine, ce->timeline);
-
- ccid >>= GEN11_SW_CTX_ID_SHIFT - 32;
- ccid &= GEN12_MAX_CONTEXT_HW_ID;
- if (ccid < BITS_PER_LONG) {
- GEM_BUG_ON(ccid == 0);
- GEM_BUG_ON(test_bit(ccid - 1, &engine->context_tag));
- set_bit(ccid - 1, &engine->context_tag);
+ GEM_BUG_ON(lrc_ring_wa_bb_per_ctx(engine) == -1);
+ regs[lrc_ring_wa_bb_per_ctx(engine) + 1] =
+ (ggtt_offset + wa_ctx->per_ctx.offset) | 0x01;
}
- intel_context_update_runtime(ce);
- intel_engine_context_out(engine);
- execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
- if (engine->fw_domain && !atomic_dec_return(&engine->fw_active))
- intel_uncore_forcewake_put(engine->uncore, engine->fw_domain);
- intel_gt_pm_put_async(engine->gt);
-
- /*
- * If this is part of a virtual engine, its next request may
- * have been blocked waiting for access to the active context.
- * We have to kick all the siblings again in case we need to
- * switch (e.g. the next request is not runnable on this
- * engine). Hopefully, we will already have submitted the next
- * request before the tasklet runs and do not need to rebuild
- * each virtual tree and kick everyone again.
- */
- if (ce->engine != engine)
- kick_siblings(rq, ce);
-
- intel_context_put(ce);
-}
-
-static inline void
-execlists_schedule_out(struct i915_request *rq)
-{
- struct intel_context * const ce = rq->context;
- struct intel_engine_cs *cur, *old;
- u32 ccid;
-
- trace_i915_request_out(rq);
-
- ccid = rq->context->lrc.ccid;
- old = READ_ONCE(ce->inflight);
- do
- cur = ptr_unmask_bits(old, 2) ? ptr_dec(old) : NULL;
- while (!try_cmpxchg(&ce->inflight, &old, cur));
- if (!cur)
- __execlists_schedule_out(rq, old, ccid);
-
- i915_request_put(rq);
-}
-
-static u64 execlists_update_context(struct i915_request *rq)
-{
- struct intel_context *ce = rq->context;
- u64 desc = ce->lrc.desc;
- u32 tail, prev;
-
- /*
- * WaIdleLiteRestore:bdw,skl
- *
- * We should never submit the context with the same RING_TAIL twice
- * just in case we submit an empty ring, which confuses the HW.
- *
- * We append a couple of NOOPs (gen8_emit_wa_tail) after the end of
- * the normal request to be able to always advance the RING_TAIL on
- * subsequent resubmissions (for lite restore). Should that fail us,
- * and we try and submit the same tail again, force the context
- * reload.
- *
- * If we need to return to a preempted context, we need to skip the
- * lite-restore and force it to reload the RING_TAIL. Otherwise, the
- * HW has a tendency to ignore us rewinding the TAIL to the end of
- * an earlier request.
- */
- GEM_BUG_ON(ce->lrc_reg_state[CTX_RING_TAIL] != rq->ring->tail);
- prev = rq->ring->tail;
- tail = intel_ring_set_tail(rq->ring, rq->tail);
- if (unlikely(intel_ring_direction(rq->ring, tail, prev) <= 0))
- desc |= CTX_DESC_FORCE_RESTORE;
- ce->lrc_reg_state[CTX_RING_TAIL] = tail;
- rq->tail = rq->wa_tail;
-
- /*
- * Make sure the context image is complete before we submit it to HW.
- *
- * Ostensibly, writes (including the WCB) should be flushed prior to
- * an uncached write such as our mmio register access, the empirical
- * evidence (esp. on Braswell) suggests that the WC write into memory
- * may not be visible to the HW prior to the completion of the UC
- * register write and that we may begin execution from the context
- * before its image is complete leading to invalid PD chasing.
- */
- wmb();
-
- ce->lrc.desc &= ~CTX_DESC_FORCE_RESTORE;
- return desc;
-}
-
-static inline void write_desc(struct intel_engine_execlists *execlists, u64 desc, u32 port)
-{
- if (execlists->ctrl_reg) {
- writel(lower_32_bits(desc), execlists->submit_reg + port * 2);
- writel(upper_32_bits(desc), execlists->submit_reg + port * 2 + 1);
- } else {
- writel(upper_32_bits(desc), execlists->submit_reg);
- writel(lower_32_bits(desc), execlists->submit_reg);
+ if (wa_ctx->indirect_ctx.size) {
+ lrc_setup_indirect_ctx(regs, engine,
+ i915_ggtt_offset(wa_ctx->vma) +
+ wa_ctx->indirect_ctx.offset,
+ wa_ctx->indirect_ctx.size);
}
}
-static __maybe_unused char *
-dump_port(char *buf, int buflen, const char *prefix, struct i915_request *rq)
+static void init_ppgtt_regs(u32 *regs, const struct i915_ppgtt *ppgtt)
{
- if (!rq)
- return "";
-
- snprintf(buf, buflen, "%sccid:%x %llx:%lld%s prio %d",
- prefix,
- rq->context->lrc.ccid,
- rq->fence.context, rq->fence.seqno,
- i915_request_completed(rq) ? "!" :
- i915_request_started(rq) ? "*" :
- "",
- rq_prio(rq));
-
- return buf;
-}
-
-static __maybe_unused void
-trace_ports(const struct intel_engine_execlists *execlists,
- const char *msg,
- struct i915_request * const *ports)
-{
- const struct intel_engine_cs *engine =
- container_of(execlists, typeof(*engine), execlists);
- char __maybe_unused p0[40], p1[40];
-
- if (!ports[0])
- return;
-
- ENGINE_TRACE(engine, "%s { %s%s }\n", msg,
- dump_port(p0, sizeof(p0), "", ports[0]),
- dump_port(p1, sizeof(p1), ", ", ports[1]));
-}
-
-static inline bool
-reset_in_progress(const struct intel_engine_execlists *execlists)
-{
- return unlikely(!__tasklet_is_enabled(&execlists->tasklet));
-}
-
-static __maybe_unused bool
-assert_pending_valid(const struct intel_engine_execlists *execlists,
- const char *msg)
-{
- struct intel_engine_cs *engine =
- container_of(execlists, typeof(*engine), execlists);
- struct i915_request * const *port, *rq;
- struct intel_context *ce = NULL;
- bool sentinel = false;
- u32 ccid = -1;
-
- trace_ports(execlists, msg, execlists->pending);
-
- /* We may be messing around with the lists during reset, lalala */
- if (reset_in_progress(execlists))
- return true;
-
- if (!execlists->pending[0]) {
- GEM_TRACE_ERR("%s: Nothing pending for promotion!\n",
- engine->name);
- return false;
- }
-
- if (execlists->pending[execlists_num_ports(execlists)]) {
- GEM_TRACE_ERR("%s: Excess pending[%d] for promotion!\n",
- engine->name, execlists_num_ports(execlists));
- return false;
- }
-
- for (port = execlists->pending; (rq = *port); port++) {
- unsigned long flags;
- bool ok = true;
-
- GEM_BUG_ON(!kref_read(&rq->fence.refcount));
- GEM_BUG_ON(!i915_request_is_active(rq));
-
- if (ce == rq->context) {
- GEM_TRACE_ERR("%s: Dup context:%llx in pending[%zd]\n",
- engine->name,
- ce->timeline->fence_context,
- port - execlists->pending);
- return false;
- }
- ce = rq->context;
-
- if (ccid == ce->lrc.ccid) {
- GEM_TRACE_ERR("%s: Dup ccid:%x context:%llx in pending[%zd]\n",
- engine->name,
- ccid, ce->timeline->fence_context,
- port - execlists->pending);
- return false;
- }
- ccid = ce->lrc.ccid;
-
- /*
- * Sentinels are supposed to be the last request so they flush
- * the current execution off the HW. Check that they are the only
- * request in the pending submission.
+ if (i915_vm_is_4lvl(&ppgtt->vm)) {
+ /* 64b PPGTT (48bit canonical)
+ * PDP0_DESCRIPTOR contains the base address to PML4 and
+ * other PDP Descriptors are ignored.
*/
- if (sentinel) {
- GEM_TRACE_ERR("%s: context:%llx after sentinel in pending[%zd]\n",
- engine->name,
- ce->timeline->fence_context,
- port - execlists->pending);
- return false;
- }
- sentinel = i915_request_has_sentinel(rq);
-
- /* Hold tightly onto the lock to prevent concurrent retires! */
- if (!spin_trylock_irqsave(&rq->lock, flags))
- continue;
-
- if (i915_request_completed(rq))
- goto unlock;
-
- if (i915_active_is_idle(&ce->active) &&
- !intel_context_is_barrier(ce)) {
- GEM_TRACE_ERR("%s: Inactive context:%llx in pending[%zd]\n",
- engine->name,
- ce->timeline->fence_context,
- port - execlists->pending);
- ok = false;
- goto unlock;
- }
-
- if (!i915_vma_is_pinned(ce->state)) {
- GEM_TRACE_ERR("%s: Unpinned context:%llx in pending[%zd]\n",
- engine->name,
- ce->timeline->fence_context,
- port - execlists->pending);
- ok = false;
- goto unlock;
- }
-
- if (!i915_vma_is_pinned(ce->ring->vma)) {
- GEM_TRACE_ERR("%s: Unpinned ring:%llx in pending[%zd]\n",
- engine->name,
- ce->timeline->fence_context,
- port - execlists->pending);
- ok = false;
- goto unlock;
- }
-
-unlock:
- spin_unlock_irqrestore(&rq->lock, flags);
- if (!ok)
- return false;
- }
-
- return ce;
-}
-
-static void execlists_submit_ports(struct intel_engine_cs *engine)
-{
- struct intel_engine_execlists *execlists = &engine->execlists;
- unsigned int n;
-
- GEM_BUG_ON(!assert_pending_valid(execlists, "submit"));
-
- /*
- * We can skip acquiring intel_runtime_pm_get() here as it was taken
- * on our behalf by the request (see i915_gem_mark_busy()) and it will
- * not be relinquished until the device is idle (see
- * i915_gem_idle_work_handler()). As a precaution, we make sure
- * that all ELSP are drained i.e. we have processed the CSB,
- * before allowing ourselves to idle and calling intel_runtime_pm_put().
- */
- GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
-
- /*
- * ELSQ note: the submit queue is not cleared after being submitted
- * to the HW so we need to make sure we always clean it up. This is
- * currently ensured by the fact that we always write the same number
- * of elsq entries, keep this in mind before changing the loop below.
- */
- for (n = execlists_num_ports(execlists); n--; ) {
- struct i915_request *rq = execlists->pending[n];
-
- write_desc(execlists,
- rq ? execlists_update_context(rq) : 0,
- n);
- }
-
- /* we need to manually load the submit queue */
- if (execlists->ctrl_reg)
- writel(EL_CTRL_LOAD, execlists->ctrl_reg);
-}
-
-static bool ctx_single_port_submission(const struct intel_context *ce)
-{
- return (IS_ENABLED(CONFIG_DRM_I915_GVT) &&
- intel_context_force_single_submission(ce));
-}
-
-static bool can_merge_ctx(const struct intel_context *prev,
- const struct intel_context *next)
-{
- if (prev != next)
- return false;
-
- if (ctx_single_port_submission(prev))
- return false;
-
- return true;
-}
-
-static unsigned long i915_request_flags(const struct i915_request *rq)
-{
- return READ_ONCE(rq->fence.flags);
-}
-
-static bool can_merge_rq(const struct i915_request *prev,
- const struct i915_request *next)
-{
- GEM_BUG_ON(prev == next);
- GEM_BUG_ON(!assert_priority_queue(prev, next));
-
- /*
- * We do not submit known completed requests. Therefore if the next
- * request is already completed, we can pretend to merge it in
- * with the previous context (and we will skip updating the ELSP
- * and tracking). Thus hopefully keeping the ELSP full with active
- * contexts, despite the best efforts of preempt-to-busy to confuse
- * us.
- */
- if (i915_request_completed(next))
- return true;
-
- if (unlikely((i915_request_flags(prev) ^ i915_request_flags(next)) &
- (BIT(I915_FENCE_FLAG_NOPREEMPT) |
- BIT(I915_FENCE_FLAG_SENTINEL))))
- return false;
-
- if (!can_merge_ctx(prev->context, next->context))
- return false;
-
- GEM_BUG_ON(i915_seqno_passed(prev->fence.seqno, next->fence.seqno));
- return true;
-}
-
-static void virtual_update_register_offsets(u32 *regs,
- struct intel_engine_cs *engine)
-{
- set_offsets(regs, reg_offsets(engine), engine, false);
-}
-
-static bool virtual_matches(const struct virtual_engine *ve,
- const struct i915_request *rq,
- const struct intel_engine_cs *engine)
-{
- const struct intel_engine_cs *inflight;
-
- if (!(rq->execution_mask & engine->mask)) /* We peeked too soon! */
- return false;
-
- /*
- * We track when the HW has completed saving the context image
- * (i.e. when we have seen the final CS event switching out of
- * the context) and must not overwrite the context image before
- * then. This restricts us to only using the active engine
- * while the previous virtualized request is inflight (so
- * we reuse the register offsets). This is a very small
- * hystersis on the greedy seelction algorithm.
- */
- inflight = intel_context_inflight(&ve->context);
- if (inflight && inflight != engine)
- return false;
-
- return true;
-}
-
-static void virtual_xfer_context(struct virtual_engine *ve,
- struct intel_engine_cs *engine)
-{
- unsigned int n;
-
- if (likely(engine == ve->siblings[0]))
- return;
-
- GEM_BUG_ON(READ_ONCE(ve->context.inflight));
- if (!intel_engine_has_relative_mmio(engine))
- virtual_update_register_offsets(ve->context.lrc_reg_state,
- engine);
-
- /*
- * Move the bound engine to the top of the list for
- * future execution. We then kick this tasklet first
- * before checking others, so that we preferentially
- * reuse this set of bound registers.
- */
- for (n = 1; n < ve->num_siblings; n++) {
- if (ve->siblings[n] == engine) {
- swap(ve->siblings[n], ve->siblings[0]);
- break;
- }
+ ASSIGN_CTX_PML4(ppgtt, regs);
+ } else {
+ ASSIGN_CTX_PDP(ppgtt, regs, 3);
+ ASSIGN_CTX_PDP(ppgtt, regs, 2);
+ ASSIGN_CTX_PDP(ppgtt, regs, 1);
+ ASSIGN_CTX_PDP(ppgtt, regs, 0);
}
}
-#define for_each_waiter(p__, rq__) \
- list_for_each_entry_lockless(p__, \
- &(rq__)->sched.waiters_list, \
- wait_link)
-
-#define for_each_signaler(p__, rq__) \
- list_for_each_entry_rcu(p__, \
- &(rq__)->sched.signalers_list, \
- signal_link)
-
-static void defer_request(struct i915_request *rq, struct list_head * const pl)
-{
- LIST_HEAD(list);
-
- /*
- * We want to move the interrupted request to the back of
- * the round-robin list (i.e. its priority level), but
- * in doing so, we must then move all requests that were in
- * flight and were waiting for the interrupted request to
- * be run after it again.
- */
- do {
- struct i915_dependency *p;
-
- GEM_BUG_ON(i915_request_is_active(rq));
- list_move_tail(&rq->sched.link, pl);
-
- for_each_waiter(p, rq) {
- struct i915_request *w =
- container_of(p->waiter, typeof(*w), sched);
-
- if (p->flags & I915_DEPENDENCY_WEAK)
- continue;
-
- /* Leave semaphores spinning on the other engines */
- if (w->engine != rq->engine)
- continue;
-
- /* No waiter should start before its signaler */
- GEM_BUG_ON(i915_request_has_initial_breadcrumb(w) &&
- i915_request_started(w) &&
- !i915_request_completed(rq));
-
- GEM_BUG_ON(i915_request_is_active(w));
- if (!i915_request_is_ready(w))
- continue;
-
- if (rq_prio(w) < rq_prio(rq))
- continue;
-
- GEM_BUG_ON(rq_prio(w) > rq_prio(rq));
- list_move_tail(&w->sched.link, &list);
- }
-
- rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
- } while (rq);
-}
-
-static void defer_active(struct intel_engine_cs *engine)
+static struct i915_ppgtt *vm_alias(struct i915_address_space *vm)
{
- struct i915_request *rq;
-
- rq = __unwind_incomplete_requests(engine);
- if (!rq)
- return;
-
- defer_request(rq, i915_sched_lookup_priolist(engine, rq_prio(rq)));
+ if (i915_is_ggtt(vm))
+ return i915_vm_to_ggtt(vm)->alias;
+ else
+ return i915_vm_to_ppgtt(vm);
}
-static bool
-need_timeslice(const struct intel_engine_cs *engine,
- const struct i915_request *rq,
- const struct rb_node *rb)
+static void __reset_stop_ring(u32 *regs, const struct intel_engine_cs *engine)
{
- int hint;
-
- if (!intel_engine_has_timeslices(engine))
- return false;
-
- hint = engine->execlists.queue_priority_hint;
-
- if (rb) {
- const struct virtual_engine *ve =
- rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
- const struct intel_engine_cs *inflight =
- intel_context_inflight(&ve->context);
-
- if (!inflight || inflight == engine) {
- struct i915_request *next;
+ int x;
- rcu_read_lock();
- next = READ_ONCE(ve->request);
- if (next)
- hint = max(hint, rq_prio(next));
- rcu_read_unlock();
- }
+ x = lrc_ring_mi_mode(engine);
+ if (x != -1) {
+ regs[x + 1] &= ~STOP_RING;
+ regs[x + 1] |= STOP_RING << 16;
}
-
- if (!list_is_last(&rq->sched.link, &engine->active.requests))
- hint = max(hint, rq_prio(list_next_entry(rq, sched.link)));
-
- GEM_BUG_ON(hint >= I915_PRIORITY_UNPREEMPTABLE);
- return hint >= effective_prio(rq);
}
-static bool
-timeslice_yield(const struct intel_engine_execlists *el,
- const struct i915_request *rq)
+static void __lrc_init_regs(u32 *regs,
+ const struct intel_context *ce,
+ const struct intel_engine_cs *engine,
+ bool inhibit)
{
/*
- * Once bitten, forever smitten!
+ * A context is actually a big batch buffer with several
+ * MI_LOAD_REGISTER_IMM commands followed by (reg, value) pairs. The
+ * values we are setting here are only for the first context restore:
+ * on a subsequent save, the GPU will recreate this batchbuffer with new
+ * values (including all the missing MI_LOAD_REGISTER_IMM commands that
+ * we are not initializing here).
*
- * If the active context ever busy-waited on a semaphore,
- * it will be treated as a hog until the end of its timeslice (i.e.
- * until it is scheduled out and replaced by a new submission,
- * possibly even its own lite-restore). The HW only sends an interrupt
- * on the first miss, and we do know if that semaphore has been
- * signaled, or even if it is now stuck on another semaphore. Play
- * safe, yield if it might be stuck -- it will be given a fresh
- * timeslice in the near future.
+ * Must keep consistent with virtual_update_register_offsets().
*/
- return rq->context->lrc.ccid == READ_ONCE(el->yield);
-}
-
-static bool
-timeslice_expired(const struct intel_engine_execlists *el,
- const struct i915_request *rq)
-{
- return timer_expired(&el->timer) || timeslice_yield(el, rq);
-}
-
-static int
-switch_prio(struct intel_engine_cs *engine, const struct i915_request *rq)
-{
- if (list_is_last(&rq->sched.link, &engine->active.requests))
- return engine->execlists.queue_priority_hint;
-
- return rq_prio(list_next_entry(rq, sched.link));
-}
-
-static inline unsigned long
-timeslice(const struct intel_engine_cs *engine)
-{
- return READ_ONCE(engine->props.timeslice_duration_ms);
-}
-static unsigned long active_timeslice(const struct intel_engine_cs *engine)
-{
- const struct intel_engine_execlists *execlists = &engine->execlists;
- const struct i915_request *rq = *execlists->active;
-
- if (!rq || i915_request_completed(rq))
- return 0;
-
- if (READ_ONCE(execlists->switch_priority_hint) < effective_prio(rq))
- return 0;
-
- return timeslice(engine);
-}
+ if (inhibit)
+ memset(regs, 0, PAGE_SIZE);
-static void set_timeslice(struct intel_engine_cs *engine)
-{
- unsigned long duration;
+ set_offsets(regs, reg_offsets(engine), engine, inhibit);
- if (!intel_engine_has_timeslices(engine))
- return;
+ init_common_regs(regs, ce, engine, inhibit);
+ init_ppgtt_regs(regs, vm_alias(ce->vm));
- duration = active_timeslice(engine);
- ENGINE_TRACE(engine, "bump timeslicing, interval:%lu", duration);
+ init_wa_bb_regs(regs, engine);
- set_timer_ms(&engine->execlists.timer, duration);
+ __reset_stop_ring(regs, engine);
}
-static void start_timeslice(struct intel_engine_cs *engine, int prio)
+void lrc_init_regs(const struct intel_context *ce,
+ const struct intel_engine_cs *engine,
+ bool inhibit)
{
- struct intel_engine_execlists *execlists = &engine->execlists;
- unsigned long duration;
-
- if (!intel_engine_has_timeslices(engine))
- return;
-
- WRITE_ONCE(execlists->switch_priority_hint, prio);
- if (prio == INT_MIN)
- return;
-
- if (timer_pending(&execlists->timer))
- return;
-
- duration = timeslice(engine);
- ENGINE_TRACE(engine,
- "start timeslicing, prio:%d, interval:%lu",
- prio, duration);
-
- set_timer_ms(&execlists->timer, duration);
+ __lrc_init_regs(ce->lrc_reg_state, ce, engine, inhibit);
}
-static void record_preemption(struct intel_engine_execlists *execlists)
+void lrc_reset_regs(const struct intel_context *ce,
+ const struct intel_engine_cs *engine)
{
- (void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++);
+ __reset_stop_ring(ce->lrc_reg_state, engine);
}
-static unsigned long active_preempt_timeout(struct intel_engine_cs *engine,
- const struct i915_request *rq)
+static void
+set_redzone(void *vaddr, const struct intel_engine_cs *engine)
{
- if (!rq)
- return 0;
+ if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ return;
- /* Force a fast reset for terminated contexts (ignoring sysfs!) */
- if (unlikely(intel_context_is_banned(rq->context)))
- return 1;
+ vaddr += engine->context_size;
- return READ_ONCE(engine->props.preempt_timeout_ms);
+ memset(vaddr, CONTEXT_REDZONE, I915_GTT_PAGE_SIZE);
}
-static void set_preempt_timeout(struct intel_engine_cs *engine,
- const struct i915_request *rq)
+static void
+check_redzone(const void *vaddr, const struct intel_engine_cs *engine)
{
- if (!intel_engine_has_preempt_reset(engine))
+ if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
return;
- set_timer_ms(&engine->execlists.preempt,
- active_preempt_timeout(engine, rq));
-}
-
-static inline void clear_ports(struct i915_request **ports, int count)
-{
- memset_p((void **)ports, NULL, count);
-}
+ vaddr += engine->context_size;
-static inline void
-copy_ports(struct i915_request **dst, struct i915_request **src, int count)
-{
- /* A memcpy_p() would be very useful here! */
- while (count--)
- WRITE_ONCE(*dst++, *src++); /* avoid write tearing */
+ if (memchr_inv(vaddr, CONTEXT_REDZONE, I915_GTT_PAGE_SIZE))
+ drm_err_once(&engine->i915->drm,
+ "%s context redzone overwritten!\n",
+ engine->name);
}
-static void execlists_dequeue(struct intel_engine_cs *engine)
+void lrc_init_state(struct intel_context *ce,
+ struct intel_engine_cs *engine,
+ void *state)
{
- struct intel_engine_execlists * const execlists = &engine->execlists;
- struct i915_request **port = execlists->pending;
- struct i915_request ** const last_port = port + execlists->port_mask;
- struct i915_request * const *active;
- struct i915_request *last;
- struct rb_node *rb;
- bool submit = false;
-
- /*
- * Hardware submission is through 2 ports. Conceptually each port
- * has a (RING_START, RING_HEAD, RING_TAIL) tuple. RING_START is
- * static for a context, and unique to each, so we only execute
- * requests belonging to a single context from each ring. RING_HEAD
- * is maintained by the CS in the context image, it marks the place
- * where it got up to last time, and through RING_TAIL we tell the CS
- * where we want to execute up to this time.
- *
- * In this list the requests are in order of execution. Consecutive
- * requests from the same context are adjacent in the ringbuffer. We
- * can combine these requests into a single RING_TAIL update:
- *
- * RING_HEAD...req1...req2
- * ^- RING_TAIL
- * since to execute req2 the CS must first execute req1.
- *
- * Our goal then is to point each port to the end of a consecutive
- * sequence of requests as being the most optimal (fewest wake ups
- * and context switches) submission.
- */
-
- for (rb = rb_first_cached(&execlists->virtual); rb; ) {
- struct virtual_engine *ve =
- rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
- struct i915_request *rq = READ_ONCE(ve->request);
-
- if (!rq) { /* lazily cleanup after another engine handled rq */
- rb_erase_cached(rb, &execlists->virtual);
- RB_CLEAR_NODE(rb);
- rb = rb_first_cached(&execlists->virtual);
- continue;
- }
-
- if (!virtual_matches(ve, rq, engine)) {
- rb = rb_next(rb);
- continue;
- }
-
- break;
- }
-
- /*
- * If the queue is higher priority than the last
- * request in the currently active context, submit afresh.
- * We will resubmit again afterwards in case we need to split
- * the active context to interject the preemption request,
- * i.e. we will retrigger preemption following the ack in case
- * of trouble.
- */
- active = READ_ONCE(execlists->active);
-
- /*
- * In theory we can skip over completed contexts that have not
- * yet been processed by events (as those events are in flight):
- *
- * while ((last = *active) && i915_request_completed(last))
- * active++;
- *
- * However, the GPU cannot handle this as it will ultimately
- * find itself trying to jump back into a context it has just
- * completed and barf.
- */
-
- if ((last = *active)) {
- if (need_preempt(engine, last, rb)) {
- if (i915_request_completed(last)) {
- tasklet_hi_schedule(&execlists->tasklet);
- return;
- }
-
- ENGINE_TRACE(engine,
- "preempting last=%llx:%lld, prio=%d, hint=%d\n",
- last->fence.context,
- last->fence.seqno,
- last->sched.attr.priority,
- execlists->queue_priority_hint);
- record_preemption(execlists);
-
- /*
- * Don't let the RING_HEAD advance past the breadcrumb
- * as we unwind (and until we resubmit) so that we do
- * not accidentally tell it to go backwards.
- */
- ring_set_paused(engine, 1);
-
- /*
- * Note that we have not stopped the GPU at this point,
- * so we are unwinding the incomplete requests as they
- * remain inflight and so by the time we do complete
- * the preemption, some of the unwound requests may
- * complete!
- */
- __unwind_incomplete_requests(engine);
-
- last = NULL;
- } else if (need_timeslice(engine, last, rb) &&
- timeslice_expired(execlists, last)) {
- if (i915_request_completed(last)) {
- tasklet_hi_schedule(&execlists->tasklet);
- return;
- }
-
- ENGINE_TRACE(engine,
- "expired last=%llx:%lld, prio=%d, hint=%d, yield?=%s\n",
- last->fence.context,
- last->fence.seqno,
- last->sched.attr.priority,
- execlists->queue_priority_hint,
- yesno(timeslice_yield(execlists, last)));
-
- ring_set_paused(engine, 1);
- defer_active(engine);
-
- /*
- * Unlike for preemption, if we rewind and continue
- * executing the same context as previously active,
- * the order of execution will remain the same and
- * the tail will only advance. We do not need to
- * force a full context restore, as a lite-restore
- * is sufficient to resample the monotonic TAIL.
- *
- * If we switch to any other context, similarly we
- * will not rewind TAIL of current context, and
- * normal save/restore will preserve state and allow
- * us to later continue executing the same request.
- */
- last = NULL;
- } else {
- /*
- * Otherwise if we already have a request pending
- * for execution after the current one, we can
- * just wait until the next CS event before
- * queuing more. In either case we will force a
- * lite-restore preemption event, but if we wait
- * we hopefully coalesce several updates into a single
- * submission.
- */
- if (!list_is_last(&last->sched.link,
- &engine->active.requests)) {
- /*
- * Even if ELSP[1] is occupied and not worthy
- * of timeslices, our queue might be.
- */
- start_timeslice(engine, queue_prio(execlists));
- return;
- }
- }
- }
-
- while (rb) { /* XXX virtual is always taking precedence */
- struct virtual_engine *ve =
- rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
- struct i915_request *rq;
-
- spin_lock(&ve->base.active.lock);
-
- rq = ve->request;
- if (unlikely(!rq)) { /* lost the race to a sibling */
- spin_unlock(&ve->base.active.lock);
- rb_erase_cached(rb, &execlists->virtual);
- RB_CLEAR_NODE(rb);
- rb = rb_first_cached(&execlists->virtual);
- continue;
- }
+ bool inhibit = true;
- GEM_BUG_ON(rq != ve->request);
- GEM_BUG_ON(rq->engine != &ve->base);
- GEM_BUG_ON(rq->context != &ve->context);
-
- if (rq_prio(rq) >= queue_prio(execlists)) {
- if (!virtual_matches(ve, rq, engine)) {
- spin_unlock(&ve->base.active.lock);
- rb = rb_next(rb);
- continue;
- }
-
- if (last && !can_merge_rq(last, rq)) {
- spin_unlock(&ve->base.active.lock);
- start_timeslice(engine, rq_prio(rq));
- return; /* leave this for another sibling */
- }
-
- ENGINE_TRACE(engine,
- "virtual rq=%llx:%lld%s, new engine? %s\n",
- rq->fence.context,
- rq->fence.seqno,
- i915_request_completed(rq) ? "!" :
- i915_request_started(rq) ? "*" :
- "",
- yesno(engine != ve->siblings[0]));
-
- WRITE_ONCE(ve->request, NULL);
- WRITE_ONCE(ve->base.execlists.queue_priority_hint,
- INT_MIN);
- rb_erase_cached(rb, &execlists->virtual);
- RB_CLEAR_NODE(rb);
-
- GEM_BUG_ON(!(rq->execution_mask & engine->mask));
- WRITE_ONCE(rq->engine, engine);
-
- if (__i915_request_submit(rq)) {
- /*
- * Only after we confirm that we will submit
- * this request (i.e. it has not already
- * completed), do we want to update the context.
- *
- * This serves two purposes. It avoids
- * unnecessary work if we are resubmitting an
- * already completed request after timeslicing.
- * But more importantly, it prevents us altering
- * ve->siblings[] on an idle context, where
- * we may be using ve->siblings[] in
- * virtual_context_enter / virtual_context_exit.
- */
- virtual_xfer_context(ve, engine);
- GEM_BUG_ON(ve->siblings[0] != engine);
-
- submit = true;
- last = rq;
- }
- i915_request_put(rq);
-
- /*
- * Hmm, we have a bunch of virtual engine requests,
- * but the first one was already completed (thanks
- * preempt-to-busy!). Keep looking at the veng queue
- * until we have no more relevant requests (i.e.
- * the normal submit queue has higher priority).
- */
- if (!submit) {
- spin_unlock(&ve->base.active.lock);
- rb = rb_first_cached(&execlists->virtual);
- continue;
- }
- }
+ set_redzone(state, engine);
- spin_unlock(&ve->base.active.lock);
- break;
+ if (engine->default_state) {
+ shmem_read(engine->default_state, 0,
+ state, engine->context_size);
+ __set_bit(CONTEXT_VALID_BIT, &ce->flags);
+ inhibit = false;
}
- while ((rb = rb_first_cached(&execlists->queue))) {
- struct i915_priolist *p = to_priolist(rb);
- struct i915_request *rq, *rn;
- int i;
-
- priolist_for_each_request_consume(rq, rn, p, i) {
- bool merge = true;
-
- /*
- * Can we combine this request with the current port?
- * It has to be the same context/ringbuffer and not
- * have any exceptions (e.g. GVT saying never to
- * combine contexts).
- *
- * If we can combine the requests, we can execute both
- * by updating the RING_TAIL to point to the end of the
- * second request, and so we never need to tell the
- * hardware about the first.
- */
- if (last && !can_merge_rq(last, rq)) {
- /*
- * If we are on the second port and cannot
- * combine this request with the last, then we
- * are done.
- */
- if (port == last_port)
- goto done;
-
- /*
- * We must not populate both ELSP[] with the
- * same LRCA, i.e. we must submit 2 different
- * contexts if we submit 2 ELSP.
- */
- if (last->context == rq->context)
- goto done;
-
- if (i915_request_has_sentinel(last))
- goto done;
-
- /*
- * If GVT overrides us we only ever submit
- * port[0], leaving port[1] empty. Note that we
- * also have to be careful that we don't queue
- * the same context (even though a different
- * request) to the second port.
- */
- if (ctx_single_port_submission(last->context) ||
- ctx_single_port_submission(rq->context))
- goto done;
-
- merge = false;
- }
-
- if (__i915_request_submit(rq)) {
- if (!merge) {
- *port = execlists_schedule_in(last, port - execlists->pending);
- port++;
- last = NULL;
- }
-
- GEM_BUG_ON(last &&
- !can_merge_ctx(last->context,
- rq->context));
- GEM_BUG_ON(last &&
- i915_seqno_passed(last->fence.seqno,
- rq->fence.seqno));
-
- submit = true;
- last = rq;
- }
- }
-
- rb_erase_cached(&p->node, &execlists->queue);
- i915_priolist_free(p);
- }
+ /* Clear the ppHWSP (inc. per-context counters) */
+ memset(state, 0, PAGE_SIZE);
-done:
/*
- * Here be a bit of magic! Or sleight-of-hand, whichever you prefer.
- *
- * We choose the priority hint such that if we add a request of greater
- * priority than this, we kick the submission tasklet to decide on
- * the right order of submitting the requests to hardware. We must
- * also be prepared to reorder requests as they are in-flight on the
- * HW. We derive the priority hint then as the first "hole" in
- * the HW submission ports and if there are no available slots,
- * the priority of the lowest executing request, i.e. last.
- *
- * When we do receive a higher priority request ready to run from the
- * user, see queue_request(), the priority hint is bumped to that
- * request triggering preemption on the next dequeue (or subsequent
- * interrupt for secondary ports).
+ * The second page of the context object contains some registers which
+ * must be set up prior to the first execution.
*/
- execlists->queue_priority_hint = queue_prio(execlists);
-
- if (submit) {
- *port = execlists_schedule_in(last, port - execlists->pending);
- execlists->switch_priority_hint =
- switch_prio(engine, *execlists->pending);
-
- /*
- * Skip if we ended up with exactly the same set of requests,
- * e.g. trying to timeslice a pair of ordered contexts
- */
- if (!memcmp(active, execlists->pending,
- (port - execlists->pending + 1) * sizeof(*port))) {
- do
- execlists_schedule_out(fetch_and_zero(port));
- while (port-- != execlists->pending);
-
- goto skip_submit;
- }
- clear_ports(port + 1, last_port - port);
-
- WRITE_ONCE(execlists->yield, -1);
- set_preempt_timeout(engine, *active);
- execlists_submit_ports(engine);
- } else {
- start_timeslice(engine, execlists->queue_priority_hint);
-skip_submit:
- ring_set_paused(engine, 0);
- }
+ __lrc_init_regs(state + LRC_STATE_OFFSET, ce, engine, inhibit);
}
-static void
-cancel_port_requests(struct intel_engine_execlists * const execlists)
+static struct i915_vma *
+__lrc_alloc_state(struct intel_context *ce, struct intel_engine_cs *engine)
{
- struct i915_request * const *port;
-
- for (port = execlists->pending; *port; port++)
- execlists_schedule_out(*port);
- clear_ports(execlists->pending, ARRAY_SIZE(execlists->pending));
-
- /* Mark the end of active before we overwrite *active */
- for (port = xchg(&execlists->active, execlists->pending); *port; port++)
- execlists_schedule_out(*port);
- clear_ports(execlists->inflight, ARRAY_SIZE(execlists->inflight));
-
- smp_wmb(); /* complete the seqlock for execlists_active() */
- WRITE_ONCE(execlists->active, execlists->inflight);
-}
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ u32 context_size;
-static inline void
-invalidate_csb_entries(const u64 *first, const u64 *last)
-{
- clflush((void *)first);
- clflush((void *)last);
-}
+ context_size = round_up(engine->context_size, I915_GTT_PAGE_SIZE);
-/*
- * Starting with Gen12, the status has a new format:
- *
- * bit 0: switched to new queue
- * bit 1: reserved
- * bit 2: semaphore wait mode (poll or signal), only valid when
- * switch detail is set to "wait on semaphore"
- * bits 3-5: engine class
- * bits 6-11: engine instance
- * bits 12-14: reserved
- * bits 15-25: sw context id of the lrc the GT switched to
- * bits 26-31: sw counter of the lrc the GT switched to
- * bits 32-35: context switch detail
- * - 0: ctx complete
- * - 1: wait on sync flip
- * - 2: wait on vblank
- * - 3: wait on scanline
- * - 4: wait on semaphore
- * - 5: context preempted (not on SEMAPHORE_WAIT or
- * WAIT_FOR_EVENT)
- * bit 36: reserved
- * bits 37-43: wait detail (for switch detail 1 to 4)
- * bits 44-46: reserved
- * bits 47-57: sw context id of the lrc the GT switched away from
- * bits 58-63: sw counter of the lrc the GT switched away from
- */
-static inline bool gen12_csb_parse(const u64 csb)
-{
- bool ctx_away_valid = GEN12_CSB_CTX_VALID(upper_32_bits(csb));
- bool new_queue =
- lower_32_bits(csb) & GEN12_CTX_STATUS_SWITCHED_TO_NEW_QUEUE;
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ context_size += I915_GTT_PAGE_SIZE; /* for redzone */
- /*
- * The context switch detail is not guaranteed to be 5 when a preemption
- * occurs, so we can't just check for that. The check below works for
- * all the cases we care about, including preemptions of WAIT
- * instructions and lite-restore. Preempt-to-idle via the CTRL register
- * would require some extra handling, but we don't support that.
- */
- if (!ctx_away_valid || new_queue) {
- GEM_BUG_ON(!GEN12_CSB_CTX_VALID(lower_32_bits(csb)));
- return true;
+ if (INTEL_GEN(engine->i915) == 12) {
+ ce->wa_bb_page = context_size / PAGE_SIZE;
+ context_size += PAGE_SIZE;
}
- /*
- * switch detail = 5 is covered by the case above and we do not expect a
- * context switch on an unsuccessful wait instruction since we always
- * use polling mode.
- */
- GEM_BUG_ON(GEN12_CTX_SWITCH_DETAIL(upper_32_bits(csb)));
- return false;
-}
-
-static inline bool gen8_csb_parse(const u64 csb)
-{
- return csb & (GEN8_CTX_STATUS_IDLE_ACTIVE | GEN8_CTX_STATUS_PREEMPTED);
-}
-
-static noinline u64
-wa_csb_read(const struct intel_engine_cs *engine, u64 * const csb)
-{
- u64 entry;
-
- /*
- * Reading from the HWSP has one particular advantage: we can detect
- * a stale entry. Since the write into HWSP is broken, we have no reason
- * to trust the HW at all, the mmio entry may equally be unordered, so
- * we prefer the path that is self-checking and as a last resort,
- * return the mmio value.
- *
- * tgl,dg1:HSDES#22011327657
- */
- preempt_disable();
- if (wait_for_atomic_us((entry = READ_ONCE(*csb)) != -1, 10)) {
- int idx = csb - engine->execlists.csb_status;
- int status;
-
- status = GEN8_EXECLISTS_STATUS_BUF;
- if (idx >= 6) {
- status = GEN11_EXECLISTS_STATUS_BUF2;
- idx -= 6;
- }
- status += sizeof(u64) * idx;
+ obj = i915_gem_object_create_shmem(engine->i915, context_size);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
- entry = intel_uncore_read64(engine->uncore,
- _MMIO(engine->mmio_base + status));
+ vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ i915_gem_object_put(obj);
+ return vma;
}
- preempt_enable();
-
- return entry;
-}
-static inline u64
-csb_read(const struct intel_engine_cs *engine, u64 * const csb)
-{
- u64 entry = READ_ONCE(*csb);
-
- /*
- * Unfortunately, the GPU does not always serialise its write
- * of the CSB entries before its write of the CSB pointer, at least
- * from the perspective of the CPU, using what is known as a Global
- * Observation Point. We may read a new CSB tail pointer, but then
- * read the stale CSB entries, causing us to misinterpret the
- * context-switch events, and eventually declare the GPU hung.
- *
- * icl:HSDES#1806554093
- * tgl:HSDES#22011248461
- */
- if (unlikely(entry == -1))
- entry = wa_csb_read(engine, csb);
-
- /* Consume this entry so that we can spot its future reuse. */
- WRITE_ONCE(*csb, -1);
-
- /* ELSP is an implicit wmb() before the GPU wraps and overwrites csb */
- return entry;
+ return vma;
}
-static void process_csb(struct intel_engine_cs *engine)
+static struct intel_timeline *
+pinned_timeline(struct intel_context *ce, struct intel_engine_cs *engine)
{
- struct intel_engine_execlists * const execlists = &engine->execlists;
- u64 * const buf = execlists->csb_status;
- const u8 num_entries = execlists->csb_size;
- u8 head, tail;
-
- /*
- * As we modify our execlists state tracking we require exclusive
- * access. Either we are inside the tasklet, or the tasklet is disabled
- * and we assume that is only inside the reset paths and so serialised.
- */
- GEM_BUG_ON(!tasklet_is_locked(&execlists->tasklet) &&
- !reset_in_progress(execlists));
- GEM_BUG_ON(!intel_engine_in_execlists_submission_mode(engine));
-
- /*
- * Note that csb_write, csb_status may be either in HWSP or mmio.
- * When reading from the csb_write mmio register, we have to be
- * careful to only use the GEN8_CSB_WRITE_PTR portion, which is
- * the low 4bits. As it happens we know the next 4bits are always
- * zero and so we can simply masked off the low u8 of the register
- * and treat it identically to reading from the HWSP (without having
- * to use explicit shifting and masking, and probably bifurcating
- * the code to handle the legacy mmio read).
- */
- head = execlists->csb_head;
- tail = READ_ONCE(*execlists->csb_write);
- if (unlikely(head == tail))
- return;
-
- /*
- * We will consume all events from HW, or at least pretend to.
- *
- * The sequence of events from the HW is deterministic, and derived
- * from our writes to the ELSP, with a smidgen of variability for
- * the arrival of the asynchronous requests wrt to the inflight
- * execution. If the HW sends an event that does not correspond with
- * the one we are expecting, we have to abandon all hope as we lose
- * all tracking of what the engine is actually executing. We will
- * only detect we are out of sequence with the HW when we get an
- * 'impossible' event because we have already drained our own
- * preemption/promotion queue. If this occurs, we know that we likely
- * lost track of execution earlier and must unwind and restart, the
- * simplest way is by stop processing the event queue and force the
- * engine to reset.
- */
- execlists->csb_head = tail;
- ENGINE_TRACE(engine, "cs-irq head=%d, tail=%d\n", head, tail);
-
- /*
- * Hopefully paired with a wmb() in HW!
- *
- * We must complete the read of the write pointer before any reads
- * from the CSB, so that we do not see stale values. Without an rmb
- * (lfence) the HW may speculatively perform the CSB[] reads *before*
- * we perform the READ_ONCE(*csb_write).
- */
- rmb();
- do {
- bool promote;
- u64 csb;
-
- if (++head == num_entries)
- head = 0;
-
- /*
- * We are flying near dragons again.
- *
- * We hold a reference to the request in execlist_port[]
- * but no more than that. We are operating in softirq
- * context and so cannot hold any mutex or sleep. That
- * prevents us stopping the requests we are processing
- * in port[] from being retired simultaneously (the
- * breadcrumb will be complete before we see the
- * context-switch). As we only hold the reference to the
- * request, any pointer chasing underneath the request
- * is subject to a potential use-after-free. Thus we
- * store all of the bookkeeping within port[] as
- * required, and avoid using unguarded pointers beneath
- * request itself. The same applies to the atomic
- * status notifier.
- */
-
- csb = csb_read(engine, buf + head);
- ENGINE_TRACE(engine, "csb[%d]: status=0x%08x:0x%08x\n",
- head, upper_32_bits(csb), lower_32_bits(csb));
-
- if (INTEL_GEN(engine->i915) >= 12)
- promote = gen12_csb_parse(csb);
- else
- promote = gen8_csb_parse(csb);
- if (promote) {
- struct i915_request * const *old = execlists->active;
-
- if (GEM_WARN_ON(!*execlists->pending)) {
- execlists->error_interrupt |= ERROR_CSB;
- break;
- }
-
- ring_set_paused(engine, 0);
-
- /* Point active to the new ELSP; prevent overwriting */
- WRITE_ONCE(execlists->active, execlists->pending);
- smp_wmb(); /* notify execlists_active() */
-
- /* cancel old inflight, prepare for switch */
- trace_ports(execlists, "preempted", old);
- while (*old)
- execlists_schedule_out(*old++);
-
- /* switch pending to inflight */
- GEM_BUG_ON(!assert_pending_valid(execlists, "promote"));
- copy_ports(execlists->inflight,
- execlists->pending,
- execlists_num_ports(execlists));
- smp_wmb(); /* complete the seqlock */
- WRITE_ONCE(execlists->active, execlists->inflight);
-
- /* XXX Magic delay for tgl */
- ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR);
-
- WRITE_ONCE(execlists->pending[0], NULL);
- } else {
- if (GEM_WARN_ON(!*execlists->active)) {
- execlists->error_interrupt |= ERROR_CSB;
- break;
- }
-
- /* port0 completed, advanced to port1 */
- trace_ports(execlists, "completed", execlists->active);
-
- /*
- * We rely on the hardware being strongly
- * ordered, that the breadcrumb write is
- * coherent (visible from the CPU) before the
- * user interrupt is processed. One might assume
- * that the breadcrumb write being before the
- * user interrupt and the CS event for the context
- * switch would therefore be before the CS event
- * itself...
- */
- if (GEM_SHOW_DEBUG() &&
- !i915_request_completed(*execlists->active)) {
- struct i915_request *rq = *execlists->active;
- const u32 *regs __maybe_unused =
- rq->context->lrc_reg_state;
-
- ENGINE_TRACE(engine,
- "context completed before request!\n");
- ENGINE_TRACE(engine,
- "ring:{start:0x%08x, head:%04x, tail:%04x, ctl:%08x, mode:%08x}\n",
- ENGINE_READ(engine, RING_START),
- ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR,
- ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR,
- ENGINE_READ(engine, RING_CTL),
- ENGINE_READ(engine, RING_MI_MODE));
- ENGINE_TRACE(engine,
- "rq:{start:%08x, head:%04x, tail:%04x, seqno:%llx:%d, hwsp:%d}, ",
- i915_ggtt_offset(rq->ring->vma),
- rq->head, rq->tail,
- rq->fence.context,
- lower_32_bits(rq->fence.seqno),
- hwsp_seqno(rq));
- ENGINE_TRACE(engine,
- "ctx:{start:%08x, head:%04x, tail:%04x}, ",
- regs[CTX_RING_START],
- regs[CTX_RING_HEAD],
- regs[CTX_RING_TAIL]);
- }
-
- execlists_schedule_out(*execlists->active++);
-
- GEM_BUG_ON(execlists->active - execlists->inflight >
- execlists_num_ports(execlists));
- }
- } while (head != tail);
-
- set_timeslice(engine);
-
- /*
- * Gen11 has proven to fail wrt global observation point between
- * entry and tail update, failing on the ordering and thus
- * we see an old entry in the context status buffer.
- *
- * Forcibly evict out entries for the next gpu csb update,
- * to increase the odds that we get a fresh entries with non
- * working hardware. The cost for doing so comes out mostly with
- * the wash as hardware, working or not, will need to do the
- * invalidation before.
- */
- invalidate_csb_entries(&buf[0], &buf[num_entries - 1]);
-}
+ struct intel_timeline *tl = fetch_and_zero(&ce->timeline);
-static void __execlists_submission_tasklet(struct intel_engine_cs *const engine)
-{
- lockdep_assert_held(&engine->active.lock);
- if (!READ_ONCE(engine->execlists.pending[0])) {
- rcu_read_lock(); /* protect peeking at execlists->active */
- execlists_dequeue(engine);
- rcu_read_unlock();
- }
+ return intel_timeline_create_from_engine(engine, page_unmask_bits(tl));
}
-static void __execlists_hold(struct i915_request *rq)
+int lrc_alloc(struct intel_context *ce, struct intel_engine_cs *engine)
{
- LIST_HEAD(list);
-
- do {
- struct i915_dependency *p;
-
- if (i915_request_is_active(rq))
- __i915_request_unsubmit(rq);
-
- clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
- list_move_tail(&rq->sched.link, &rq->engine->active.hold);
- i915_request_set_hold(rq);
- RQ_TRACE(rq, "on hold\n");
-
- for_each_waiter(p, rq) {
- struct i915_request *w =
- container_of(p->waiter, typeof(*w), sched);
-
- /* Leave semaphores spinning on the other engines */
- if (w->engine != rq->engine)
- continue;
-
- if (!i915_request_is_ready(w))
- continue;
-
- if (i915_request_completed(w))
- continue;
-
- if (i915_request_on_hold(w))
- continue;
-
- list_move_tail(&w->sched.link, &list);
- }
-
- rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
- } while (rq);
-}
+ struct intel_ring *ring;
+ struct i915_vma *vma;
+ int err;
-static bool execlists_hold(struct intel_engine_cs *engine,
- struct i915_request *rq)
-{
- if (i915_request_on_hold(rq))
- return false;
+ GEM_BUG_ON(ce->state);
- spin_lock_irq(&engine->active.lock);
+ vma = __lrc_alloc_state(ce, engine);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
- if (i915_request_completed(rq)) { /* too late! */
- rq = NULL;
- goto unlock;
+ ring = intel_engine_create_ring(engine, (unsigned long)ce->ring);
+ if (IS_ERR(ring)) {
+ err = PTR_ERR(ring);
+ goto err_vma;
}
- if (rq->engine != engine) { /* preempted virtual engine */
- struct virtual_engine *ve = to_virtual_engine(rq->engine);
-
- /*
- * intel_context_inflight() is only protected by virtue
- * of process_csb() being called only by the tasklet (or
- * directly from inside reset while the tasklet is suspended).
- * Assert that neither of those are allowed to run while we
- * poke at the request queues.
- */
- GEM_BUG_ON(!reset_in_progress(&engine->execlists));
+ if (!page_mask_bits(ce->timeline)) {
+ struct intel_timeline *tl;
/*
- * An unsubmitted request along a virtual engine will
- * remain on the active (this) engine until we are able
- * to process the context switch away (and so mark the
- * context as no longer in flight). That cannot have happened
- * yet, otherwise we would not be hanging!
+ * Use the static global HWSP for the kernel context, and
+ * a dynamically allocated cacheline for everyone else.
*/
- spin_lock(&ve->base.active.lock);
- GEM_BUG_ON(intel_context_inflight(rq->context) != engine);
- GEM_BUG_ON(ve->request != rq);
- ve->request = NULL;
- spin_unlock(&ve->base.active.lock);
- i915_request_put(rq);
-
- rq->engine = engine;
- }
-
- /*
- * Transfer this request onto the hold queue to prevent it
- * being resumbitted to HW (and potentially completed) before we have
- * released it. Since we may have already submitted following
- * requests, we need to remove those as well.
- */
- GEM_BUG_ON(i915_request_on_hold(rq));
- GEM_BUG_ON(rq->engine != engine);
- __execlists_hold(rq);
- GEM_BUG_ON(list_empty(&engine->active.hold));
-
-unlock:
- spin_unlock_irq(&engine->active.lock);
- return rq;
-}
-
-static bool hold_request(const struct i915_request *rq)
-{
- struct i915_dependency *p;
- bool result = false;
-
- /*
- * If one of our ancestors is on hold, we must also be on hold,
- * otherwise we will bypass it and execute before it.
- */
- rcu_read_lock();
- for_each_signaler(p, rq) {
- const struct i915_request *s =
- container_of(p->signaler, typeof(*s), sched);
-
- if (s->engine != rq->engine)
- continue;
-
- result = i915_request_on_hold(s);
- if (result)
- break;
- }
- rcu_read_unlock();
-
- return result;
-}
-
-static void __execlists_unhold(struct i915_request *rq)
-{
- LIST_HEAD(list);
-
- do {
- struct i915_dependency *p;
-
- RQ_TRACE(rq, "hold release\n");
-
- GEM_BUG_ON(!i915_request_on_hold(rq));
- GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
-
- i915_request_clear_hold(rq);
- list_move_tail(&rq->sched.link,
- i915_sched_lookup_priolist(rq->engine,
- rq_prio(rq)));
- set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
-
- /* Also release any children on this engine that are ready */
- for_each_waiter(p, rq) {
- struct i915_request *w =
- container_of(p->waiter, typeof(*w), sched);
-
- /* Propagate any change in error status */
- if (rq->fence.error)
- i915_request_set_error_once(w, rq->fence.error);
-
- if (w->engine != rq->engine)
- continue;
-
- if (!i915_request_on_hold(w))
- continue;
-
- /* Check that no other parents are also on hold */
- if (hold_request(w))
- continue;
-
- list_move_tail(&w->sched.link, &list);
- }
-
- rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
- } while (rq);
-}
-
-static void execlists_unhold(struct intel_engine_cs *engine,
- struct i915_request *rq)
-{
- spin_lock_irq(&engine->active.lock);
-
- /*
- * Move this request back to the priority queue, and all of its
- * children and grandchildren that were suspended along with it.
- */
- __execlists_unhold(rq);
-
- if (rq_prio(rq) > engine->execlists.queue_priority_hint) {
- engine->execlists.queue_priority_hint = rq_prio(rq);
- tasklet_hi_schedule(&engine->execlists.tasklet);
- }
-
- spin_unlock_irq(&engine->active.lock);
-}
-
-struct execlists_capture {
- struct work_struct work;
- struct i915_request *rq;
- struct i915_gpu_coredump *error;
-};
-
-static void execlists_capture_work(struct work_struct *work)
-{
- struct execlists_capture *cap = container_of(work, typeof(*cap), work);
- const gfp_t gfp = GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN;
- struct intel_engine_cs *engine = cap->rq->engine;
- struct intel_gt_coredump *gt = cap->error->gt;
- struct intel_engine_capture_vma *vma;
-
- /* Compress all the objects attached to the request, slow! */
- vma = intel_engine_coredump_add_request(gt->engine, cap->rq, gfp);
- if (vma) {
- struct i915_vma_compress *compress =
- i915_vma_capture_prepare(gt);
-
- intel_engine_coredump_add_vma(gt->engine, vma, compress);
- i915_vma_capture_finish(gt, compress);
- }
-
- gt->simulated = gt->engine->simulated;
- cap->error->simulated = gt->simulated;
-
- /* Publish the error state, and announce it to the world */
- i915_error_state_store(cap->error);
- i915_gpu_coredump_put(cap->error);
-
- /* Return this request and all that depend upon it for signaling */
- execlists_unhold(engine, cap->rq);
- i915_request_put(cap->rq);
-
- kfree(cap);
-}
-
-static struct execlists_capture *capture_regs(struct intel_engine_cs *engine)
-{
- const gfp_t gfp = GFP_ATOMIC | __GFP_NOWARN;
- struct execlists_capture *cap;
-
- cap = kmalloc(sizeof(*cap), gfp);
- if (!cap)
- return NULL;
-
- cap->error = i915_gpu_coredump_alloc(engine->i915, gfp);
- if (!cap->error)
- goto err_cap;
-
- cap->error->gt = intel_gt_coredump_alloc(engine->gt, gfp);
- if (!cap->error->gt)
- goto err_gpu;
-
- cap->error->gt->engine = intel_engine_coredump_alloc(engine, gfp);
- if (!cap->error->gt->engine)
- goto err_gt;
-
- cap->error->gt->engine->hung = true;
-
- return cap;
-
-err_gt:
- kfree(cap->error->gt);
-err_gpu:
- kfree(cap->error);
-err_cap:
- kfree(cap);
- return NULL;
-}
-
-static struct i915_request *
-active_context(struct intel_engine_cs *engine, u32 ccid)
-{
- const struct intel_engine_execlists * const el = &engine->execlists;
- struct i915_request * const *port, *rq;
-
- /*
- * Use the most recent result from process_csb(), but just in case
- * we trigger an error (via interrupt) before the first CS event has
- * been written, peek at the next submission.
- */
-
- for (port = el->active; (rq = *port); port++) {
- if (rq->context->lrc.ccid == ccid) {
- ENGINE_TRACE(engine,
- "ccid found at active:%zd\n",
- port - el->active);
- return rq;
- }
- }
-
- for (port = el->pending; (rq = *port); port++) {
- if (rq->context->lrc.ccid == ccid) {
- ENGINE_TRACE(engine,
- "ccid found at pending:%zd\n",
- port - el->pending);
- return rq;
+ if (unlikely(ce->timeline))
+ tl = pinned_timeline(ce, engine);
+ else
+ tl = intel_timeline_create(engine->gt);
+ if (IS_ERR(tl)) {
+ err = PTR_ERR(tl);
+ goto err_ring;
}
- }
-
- ENGINE_TRACE(engine, "ccid:%x not found\n", ccid);
- return NULL;
-}
-
-static u32 active_ccid(struct intel_engine_cs *engine)
-{
- return ENGINE_READ_FW(engine, RING_EXECLIST_STATUS_HI);
-}
-static void execlists_capture(struct intel_engine_cs *engine)
-{
- struct execlists_capture *cap;
-
- if (!IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR))
- return;
-
- /*
- * We need to _quickly_ capture the engine state before we reset.
- * We are inside an atomic section (softirq) here and we are delaying
- * the forced preemption event.
- */
- cap = capture_regs(engine);
- if (!cap)
- return;
-
- spin_lock_irq(&engine->active.lock);
- cap->rq = active_context(engine, active_ccid(engine));
- if (cap->rq) {
- cap->rq = active_request(cap->rq->context->timeline, cap->rq);
- cap->rq = i915_request_get_rcu(cap->rq);
+ ce->timeline = tl;
}
- spin_unlock_irq(&engine->active.lock);
- if (!cap->rq)
- goto err_free;
-
- /*
- * Remove the request from the execlists queue, and take ownership
- * of the request. We pass it to our worker who will _slowly_ compress
- * all the pages the _user_ requested for debugging their batch, after
- * which we return it to the queue for signaling.
- *
- * By removing them from the execlists queue, we also remove the
- * requests from being processed by __unwind_incomplete_requests()
- * during the intel_engine_reset(), and so they will *not* be replayed
- * afterwards.
- *
- * Note that because we have not yet reset the engine at this point,
- * it is possible for the request that we have identified as being
- * guilty, did in fact complete and we will then hit an arbitration
- * point allowing the outstanding preemption to succeed. The likelihood
- * of that is very low (as capturing of the engine registers should be
- * fast enough to run inside an irq-off atomic section!), so we will
- * simply hold that request accountable for being non-preemptible
- * long enough to force the reset.
- */
- if (!execlists_hold(engine, cap->rq))
- goto err_rq;
-
- INIT_WORK(&cap->work, execlists_capture_work);
- schedule_work(&cap->work);
- return;
-
-err_rq:
- i915_request_put(cap->rq);
-err_free:
- i915_gpu_coredump_put(cap->error);
- kfree(cap);
-}
-
-static void execlists_reset(struct intel_engine_cs *engine, const char *msg)
-{
- const unsigned int bit = I915_RESET_ENGINE + engine->id;
- unsigned long *lock = &engine->gt->reset.flags;
-
- if (!intel_has_reset_engine(engine->gt))
- return;
-
- if (test_and_set_bit(bit, lock))
- return;
-
- ENGINE_TRACE(engine, "reset for %s\n", msg);
- /* Mark this tasklet as disabled to avoid waiting for it to complete */
- tasklet_disable_nosync(&engine->execlists.tasklet);
+ ce->ring = ring;
+ ce->state = vma;
- ring_set_paused(engine, 1); /* Freeze the current request in place */
- execlists_capture(engine);
- intel_engine_reset(engine, msg);
+ return 0;
- tasklet_enable(&engine->execlists.tasklet);
- clear_and_wake_up_bit(bit, lock);
+err_ring:
+ intel_ring_put(ring);
+err_vma:
+ i915_vma_put(vma);
+ return err;
}
-static bool preempt_timeout(const struct intel_engine_cs *const engine)
+void lrc_reset(struct intel_context *ce)
{
- const struct timer_list *t = &engine->execlists.preempt;
-
- if (!CONFIG_DRM_I915_PREEMPT_TIMEOUT)
- return false;
+ GEM_BUG_ON(!intel_context_is_pinned(ce));
- if (!timer_expired(t))
- return false;
+ intel_ring_reset(ce->ring, ce->ring->emit);
- return READ_ONCE(engine->execlists.pending[0]);
+ /* Scrub away the garbage */
+ lrc_init_regs(ce, ce->engine, true);
+ ce->lrc.lrca = lrc_update_regs(ce, ce->engine, ce->ring->tail);
}
-/*
- * Check the unread Context Status Buffers and manage the submission of new
- * contexts to the ELSP accordingly.
- */
-static void execlists_submission_tasklet(unsigned long data)
+int
+lrc_pre_pin(struct intel_context *ce,
+ struct intel_engine_cs *engine,
+ struct i915_gem_ww_ctx *ww,
+ void **vaddr)
{
- struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
- bool timeout = preempt_timeout(engine);
-
- process_csb(engine);
-
- if (unlikely(READ_ONCE(engine->execlists.error_interrupt))) {
- const char *msg;
-
- /* Generate the error message in priority wrt to the user! */
- if (engine->execlists.error_interrupt & GENMASK(15, 0))
- msg = "CS error"; /* thrown by a user payload */
- else if (engine->execlists.error_interrupt & ERROR_CSB)
- msg = "invalid CSB event";
- else
- msg = "internal error";
-
- engine->execlists.error_interrupt = 0;
- execlists_reset(engine, msg);
- }
-
- if (!READ_ONCE(engine->execlists.pending[0]) || timeout) {
- unsigned long flags;
+ GEM_BUG_ON(!ce->state);
+ GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
- spin_lock_irqsave(&engine->active.lock, flags);
- __execlists_submission_tasklet(engine);
- spin_unlock_irqrestore(&engine->active.lock, flags);
+ *vaddr = i915_gem_object_pin_map(ce->state->obj,
+ i915_coherent_map_type(ce->engine->i915) |
+ I915_MAP_OVERRIDE);
- /* Recheck after serialising with direct-submission */
- if (unlikely(timeout && preempt_timeout(engine))) {
- cancel_timer(&engine->execlists.preempt);
- execlists_reset(engine, "preemption time out");
- }
- }
+ return PTR_ERR_OR_ZERO(*vaddr);
}
-static void __execlists_kick(struct intel_engine_execlists *execlists)
+int
+lrc_pin(struct intel_context *ce,
+ struct intel_engine_cs *engine,
+ void *vaddr)
{
- /* Kick the tasklet for some interrupt coalescing and reset handling */
- tasklet_hi_schedule(&execlists->tasklet);
-}
-
-#define execlists_kick(t, member) \
- __execlists_kick(container_of(t, struct intel_engine_execlists, member))
+ ce->lrc_reg_state = vaddr + LRC_STATE_OFFSET;
-static void execlists_timeslice(struct timer_list *timer)
-{
- execlists_kick(timer, timer);
-}
+ if (!__test_and_set_bit(CONTEXT_INIT_BIT, &ce->flags))
+ lrc_init_state(ce, engine, vaddr);
-static void execlists_preempt(struct timer_list *timer)
-{
- execlists_kick(timer, preempt);
+ ce->lrc.lrca = lrc_update_regs(ce, engine, ce->ring->tail);
+ return 0;
}
-static void queue_request(struct intel_engine_cs *engine,
- struct i915_request *rq)
+void lrc_unpin(struct intel_context *ce)
{
- GEM_BUG_ON(!list_empty(&rq->sched.link));
- list_add_tail(&rq->sched.link,
- i915_sched_lookup_priolist(engine, rq_prio(rq)));
- set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+ check_redzone((void *)ce->lrc_reg_state - LRC_STATE_OFFSET,
+ ce->engine);
}
-static void __submit_queue_imm(struct intel_engine_cs *engine)
+void lrc_post_unpin(struct intel_context *ce)
{
- struct intel_engine_execlists * const execlists = &engine->execlists;
-
- if (reset_in_progress(execlists))
- return; /* defer until we restart the engine following reset */
-
- __execlists_submission_tasklet(engine);
+ i915_gem_object_unpin_map(ce->state->obj);
}
-static void submit_queue(struct intel_engine_cs *engine,
- const struct i915_request *rq)
+void lrc_fini(struct intel_context *ce)
{
- struct intel_engine_execlists *execlists = &engine->execlists;
-
- if (rq_prio(rq) <= execlists->queue_priority_hint)
+ if (!ce->state)
return;
- execlists->queue_priority_hint = rq_prio(rq);
- __submit_queue_imm(engine);
-}
-
-static bool ancestor_on_hold(const struct intel_engine_cs *engine,
- const struct i915_request *rq)
-{
- GEM_BUG_ON(i915_request_on_hold(rq));
- return !list_empty(&engine->active.hold) && hold_request(rq);
-}
-
-static void flush_csb(struct intel_engine_cs *engine)
-{
- struct intel_engine_execlists *el = &engine->execlists;
-
- if (READ_ONCE(el->pending[0]) && tasklet_trylock(&el->tasklet)) {
- if (!reset_in_progress(el))
- process_csb(engine);
- tasklet_unlock(&el->tasklet);
- }
-}
-
-static void execlists_submit_request(struct i915_request *request)
-{
- struct intel_engine_cs *engine = request->engine;
- unsigned long flags;
-
- /* Hopefully we clear execlists->pending[] to let us through */
- flush_csb(engine);
-
- /* Will be called from irq-context when using foreign fences. */
- spin_lock_irqsave(&engine->active.lock, flags);
-
- if (unlikely(ancestor_on_hold(engine, request))) {
- RQ_TRACE(request, "ancestor on hold\n");
- list_add_tail(&request->sched.link, &engine->active.hold);
- i915_request_set_hold(request);
- } else {
- queue_request(engine, request);
-
- GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
- GEM_BUG_ON(list_empty(&request->sched.link));
-
- submit_queue(engine, request);
- }
-
- spin_unlock_irqrestore(&engine->active.lock, flags);
-}
-
-static void __execlists_context_fini(struct intel_context *ce)
-{
- intel_ring_put(ce->ring);
- i915_vma_put(ce->state);
+ intel_ring_put(fetch_and_zero(&ce->ring));
+ i915_vma_put(fetch_and_zero(&ce->state));
}
-static void execlists_context_destroy(struct kref *kref)
+void lrc_destroy(struct kref *kref)
{
struct intel_context *ce = container_of(kref, typeof(*ce), ref);
GEM_BUG_ON(!i915_active_is_idle(&ce->active));
GEM_BUG_ON(intel_context_is_pinned(ce));
- if (ce->state)
- __execlists_context_fini(ce);
+ lrc_fini(ce);
intel_context_fini(ce);
intel_context_free(ce);
}
-static void
-set_redzone(void *vaddr, const struct intel_engine_cs *engine)
-{
- if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
- return;
-
- vaddr += engine->context_size;
-
- memset(vaddr, CONTEXT_REDZONE, I915_GTT_PAGE_SIZE);
-}
-
-static void
-check_redzone(const void *vaddr, const struct intel_engine_cs *engine)
-{
- if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
- return;
-
- vaddr += engine->context_size;
-
- if (memchr_inv(vaddr, CONTEXT_REDZONE, I915_GTT_PAGE_SIZE))
- drm_err_once(&engine->i915->drm,
- "%s context redzone overwritten!\n",
- engine->name);
-}
-
-static void execlists_context_unpin(struct intel_context *ce)
-{
- check_redzone((void *)ce->lrc_reg_state - LRC_STATE_OFFSET,
- ce->engine);
-}
-
-static void execlists_context_post_unpin(struct intel_context *ce)
-{
- i915_gem_object_unpin_map(ce->state->obj);
-}
-
static u32 *
gen12_emit_timestamp_wa(const struct intel_context *ce, u32 *cs)
{
@@ -3465,7 +1035,7 @@ gen12_emit_indirect_ctx_xcs(const struct intel_context *ce, u32 *cs)
return cs;
}
-static inline u32 context_wa_bb_offset(const struct intel_context *ce)
+static u32 context_wa_bb_offset(const struct intel_context *ce)
{
return PAGE_SIZE * ce->wa_bb_page;
}
@@ -3496,16 +1066,57 @@ setup_indirect_ctx_bb(const struct intel_context *ce,
while ((unsigned long)cs % CACHELINE_BYTES)
*cs++ = MI_NOOP;
- lrc_ring_setup_indirect_ctx(ce->lrc_reg_state, engine,
- i915_ggtt_offset(ce->state) +
- context_wa_bb_offset(ce),
- (cs - start) * sizeof(*cs));
+ lrc_setup_indirect_ctx(ce->lrc_reg_state, engine,
+ i915_ggtt_offset(ce->state) +
+ context_wa_bb_offset(ce),
+ (cs - start) * sizeof(*cs));
}
-static void
-__execlists_update_reg_state(const struct intel_context *ce,
- const struct intel_engine_cs *engine,
- u32 head)
+/*
+ * The context descriptor encodes various attributes of a context,
+ * including its GTT address and some flags. Because it's fairly
+ * expensive to calculate, we'll just do it once and cache the result,
+ * which remains valid until the context is unpinned.
+ *
+ * This is what a descriptor looks like, from LSB to MSB::
+ *
+ * bits 0-11: flags, GEN8_CTX_* (cached in ctx->desc_template)
+ * bits 12-31: LRCA, GTT address of (the HWSP of) this context
+ * bits 32-52: ctx ID, a globally unique tag (highest bit used by GuC)
+ * bits 53-54: mbz, reserved for use by hardware
+ * bits 55-63: group ID, currently unused and set to 0
+ *
+ * Starting from Gen11, the upper dword of the descriptor has a new format:
+ *
+ * bits 32-36: reserved
+ * bits 37-47: SW context ID
+ * bits 48:53: engine instance
+ * bit 54: mbz, reserved for use by hardware
+ * bits 55-60: SW counter
+ * bits 61-63: engine class
+ *
+ * engine info, SW context ID and SW counter need to form a unique number
+ * (Context ID) per lrc.
+ */
+static u32 lrc_descriptor(const struct intel_context *ce)
+{
+ u32 desc;
+
+ desc = INTEL_LEGACY_32B_CONTEXT;
+ if (i915_vm_is_4lvl(ce->vm))
+ desc = INTEL_LEGACY_64B_CONTEXT;
+ desc <<= GEN8_CTX_ADDRESSING_MODE_SHIFT;
+
+ desc |= GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
+ if (IS_GEN(ce->vm->i915, 8))
+ desc |= GEN8_CTX_L3LLC_COHERENT;
+
+ return i915_ggtt_offset(ce->state) | desc;
+}
+
+u32 lrc_update_regs(const struct intel_context *ce,
+ const struct intel_engine_cs *engine,
+ u32 head)
{
struct intel_ring *ring = ce->ring;
u32 *regs = ce->lrc_reg_state;
@@ -3537,205 +1148,54 @@ __execlists_update_reg_state(const struct intel_context *ce,
GEM_BUG_ON(engine->wa_ctx.indirect_ctx.size);
setup_indirect_ctx_bb(ce, engine, fn);
}
-}
-static int
-execlists_context_pre_pin(struct intel_context *ce,
- struct i915_gem_ww_ctx *ww, void **vaddr)
-{
- GEM_BUG_ON(!ce->state);
- GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
-
- *vaddr = i915_gem_object_pin_map(ce->state->obj,
- i915_coherent_map_type(ce->engine->i915) |
- I915_MAP_OVERRIDE);
-
- return PTR_ERR_OR_ZERO(*vaddr);
-}
-
-static int
-__execlists_context_pin(struct intel_context *ce,
- struct intel_engine_cs *engine,
- void *vaddr)
-{
- ce->lrc.lrca = lrc_descriptor(ce, engine) | CTX_DESC_FORCE_RESTORE;
- ce->lrc_reg_state = vaddr + LRC_STATE_OFFSET;
- __execlists_update_reg_state(ce, engine, ce->ring->tail);
-
- return 0;
+ return lrc_descriptor(ce) | CTX_DESC_FORCE_RESTORE;
}
-static int execlists_context_pin(struct intel_context *ce, void *vaddr)
+void lrc_update_offsets(struct intel_context *ce,
+ struct intel_engine_cs *engine)
{
- return __execlists_context_pin(ce, ce->engine, vaddr);
+ set_offsets(ce->lrc_reg_state, reg_offsets(engine), engine, false);
}
-static int execlists_context_alloc(struct intel_context *ce)
+void lrc_check_regs(const struct intel_context *ce,
+ const struct intel_engine_cs *engine,
+ const char *when)
{
- return __execlists_context_alloc(ce, ce->engine);
-}
-
-static void execlists_context_reset(struct intel_context *ce)
-{
- CE_TRACE(ce, "reset\n");
- GEM_BUG_ON(!intel_context_is_pinned(ce));
-
- intel_ring_reset(ce->ring, ce->ring->emit);
-
- /* Scrub away the garbage */
- execlists_init_reg_state(ce->lrc_reg_state,
- ce, ce->engine, ce->ring, true);
- __execlists_update_reg_state(ce, ce->engine, ce->ring->tail);
-
- ce->lrc.desc |= CTX_DESC_FORCE_RESTORE;
-}
-
-static const struct intel_context_ops execlists_context_ops = {
- .alloc = execlists_context_alloc,
-
- .pre_pin = execlists_context_pre_pin,
- .pin = execlists_context_pin,
- .unpin = execlists_context_unpin,
- .post_unpin = execlists_context_post_unpin,
-
- .enter = intel_context_enter_engine,
- .exit = intel_context_exit_engine,
-
- .reset = execlists_context_reset,
- .destroy = execlists_context_destroy,
-};
-
-static u32 hwsp_offset(const struct i915_request *rq)
-{
- const struct intel_timeline_cacheline *cl;
-
- /* Before the request is executed, the timeline/cachline is fixed */
-
- cl = rcu_dereference_protected(rq->hwsp_cacheline, 1);
- if (cl)
- return cl->ggtt_offset;
-
- return rcu_dereference_protected(rq->timeline, 1)->hwsp_offset;
-}
-
-static int gen8_emit_init_breadcrumb(struct i915_request *rq)
-{
- u32 *cs;
-
- GEM_BUG_ON(i915_request_has_initial_breadcrumb(rq));
- if (!i915_request_timeline(rq)->has_initial_breadcrumb)
- return 0;
-
- cs = intel_ring_begin(rq, 6);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- /*
- * Check if we have been preempted before we even get started.
- *
- * After this point i915_request_started() reports true, even if
- * we get preempted and so are no longer running.
- */
- *cs++ = MI_ARB_CHECK;
- *cs++ = MI_NOOP;
-
- *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
- *cs++ = hwsp_offset(rq);
- *cs++ = 0;
- *cs++ = rq->fence.seqno - 1;
-
- intel_ring_advance(rq, cs);
-
- /* Record the updated position of the request's payload */
- rq->infix = intel_ring_offset(rq, cs);
-
- __set_bit(I915_FENCE_FLAG_INITIAL_BREADCRUMB, &rq->fence.flags);
-
- return 0;
-}
-
-static int emit_pdps(struct i915_request *rq)
-{
- const struct intel_engine_cs * const engine = rq->engine;
- struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(rq->context->vm);
- int err, i;
- u32 *cs;
-
- GEM_BUG_ON(intel_vgpu_active(rq->engine->i915));
-
- /*
- * Beware ye of the dragons, this sequence is magic!
- *
- * Small changes to this sequence can cause anything from
- * GPU hangs to forcewake errors and machine lockups!
- */
-
- /* Flush any residual operations from the context load */
- err = engine->emit_flush(rq, EMIT_FLUSH);
- if (err)
- return err;
+ const struct intel_ring *ring = ce->ring;
+ u32 *regs = ce->lrc_reg_state;
+ bool valid = true;
+ int x;
- /* Magic required to prevent forcewake errors! */
- err = engine->emit_flush(rq, EMIT_INVALIDATE);
- if (err)
- return err;
-
- cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- /* Ensure the LRI have landed before we invalidate & continue */
- *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED;
- for (i = GEN8_3LVL_PDPES; i--; ) {
- const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
- u32 base = engine->mmio_base;
-
- *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i));
- *cs++ = upper_32_bits(pd_daddr);
- *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i));
- *cs++ = lower_32_bits(pd_daddr);
+ if (regs[CTX_RING_START] != i915_ggtt_offset(ring->vma)) {
+ pr_err("%s: context submitted with incorrect RING_START [%08x], expected %08x\n",
+ engine->name,
+ regs[CTX_RING_START],
+ i915_ggtt_offset(ring->vma));
+ regs[CTX_RING_START] = i915_ggtt_offset(ring->vma);
+ valid = false;
}
- *cs++ = MI_NOOP;
-
- intel_ring_advance(rq, cs);
-
- return 0;
-}
-
-static int execlists_request_alloc(struct i915_request *request)
-{
- int ret;
-
- GEM_BUG_ON(!intel_context_is_pinned(request->context));
- /*
- * Flush enough space to reduce the likelihood of waiting after
- * we start building the request - in which case we will just
- * have to repeat work.
- */
- request->reserved_space += EXECLISTS_REQUEST_SIZE;
-
- /*
- * Note that after this point, we have committed to using
- * this request as it is being used to both track the
- * state of engine initialisation and liveness of the
- * golden renderstate above. Think twice before you try
- * to cancel/unwind this request now.
- */
-
- if (!i915_vm_is_4lvl(request->context->vm)) {
- ret = emit_pdps(request);
- if (ret)
- return ret;
+ if ((regs[CTX_RING_CTL] & ~(RING_WAIT | RING_WAIT_SEMAPHORE)) !=
+ (RING_CTL_SIZE(ring->size) | RING_VALID)) {
+ pr_err("%s: context submitted with incorrect RING_CTL [%08x], expected %08x\n",
+ engine->name,
+ regs[CTX_RING_CTL],
+ (u32)(RING_CTL_SIZE(ring->size) | RING_VALID));
+ regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID;
+ valid = false;
}
- /* Unconditionally invalidate GPU caches and TLBs. */
- ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
- if (ret)
- return ret;
+ x = lrc_ring_mi_mode(engine);
+ if (x != -1 && regs[x + 1] & (regs[x + 1] >> 16) & STOP_RING) {
+ pr_err("%s: context submitted with STOP_RING [%08x] in RING_MI_MODE\n",
+ engine->name, regs[x + 1]);
+ regs[x + 1] &= ~STOP_RING;
+ regs[x + 1] |= STOP_RING << 16;
+ valid = false;
+ }
- request->reserved_space -= EXECLISTS_REQUEST_SIZE;
- return 0;
+ WARN_ONCE(!valid, "Invalid lrc state found %s submission\n", when);
}
/*
@@ -3955,7 +1415,7 @@ gen10_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
return batch;
}
-#define CTX_WA_BB_OBJ_SIZE (PAGE_SIZE)
+#define CTX_WA_BB_SIZE (PAGE_SIZE)
static int lrc_setup_wa_ctx(struct intel_engine_cs *engine)
{
@@ -3963,7 +1423,7 @@ static int lrc_setup_wa_ctx(struct intel_engine_cs *engine)
struct i915_vma *vma;
int err;
- obj = i915_gem_object_create_shmem(engine->i915, CTX_WA_BB_OBJ_SIZE);
+ obj = i915_gem_object_create_shmem(engine->i915, CTX_WA_BB_SIZE);
if (IS_ERR(obj))
return PTR_ERR(obj);
@@ -3985,7 +1445,7 @@ err:
return err;
}
-static void lrc_destroy_wa_ctx(struct intel_engine_cs *engine)
+void lrc_fini_wa_ctx(struct intel_engine_cs *engine)
{
i915_vma_unpin_and_release(&engine->wa_ctx.vma, 0);
@@ -3995,23 +1455,24 @@ static void lrc_destroy_wa_ctx(struct intel_engine_cs *engine)
typedef u32 *(*wa_bb_func_t)(struct intel_engine_cs *engine, u32 *batch);
-static int intel_init_workaround_bb(struct intel_engine_cs *engine)
+void lrc_init_wa_ctx(struct intel_engine_cs *engine)
{
struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
- struct i915_wa_ctx_bb *wa_bb[2] = { &wa_ctx->indirect_ctx,
- &wa_ctx->per_ctx };
- wa_bb_func_t wa_bb_fn[2];
+ struct i915_wa_ctx_bb *wa_bb[] = {
+ &wa_ctx->indirect_ctx, &wa_ctx->per_ctx
+ };
+ wa_bb_func_t wa_bb_fn[ARRAY_SIZE(wa_bb)];
void *batch, *batch_ptr;
unsigned int i;
- int ret;
+ int err;
if (engine->class != RENDER_CLASS)
- return 0;
+ return;
switch (INTEL_GEN(engine->i915)) {
case 12:
case 11:
- return 0;
+ return;
case 10:
wa_bb_fn[0] = gen10_init_indirectctx_bb;
wa_bb_fn[1] = NULL;
@@ -4026,14 +1487,20 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
break;
default:
MISSING_CASE(INTEL_GEN(engine->i915));
- return 0;
+ return;
}
- ret = lrc_setup_wa_ctx(engine);
- if (ret) {
- drm_dbg(&engine->i915->drm,
- "Failed to setup context WA page: %d\n", ret);
- return ret;
+ err = lrc_setup_wa_ctx(engine);
+ if (err) {
+ /*
+ * We continue even if we fail to initialize WA batch
+ * because we only expect rare glitches but nothing
+ * critical to prevent us from using GPU
+ */
+ drm_err(&engine->i915->drm,
+ "Ignoring context switch w/a allocation error:%d\n",
+ err);
+ return;
}
batch = i915_gem_object_pin_map(wa_ctx->vma->obj, I915_MAP_WB);
@@ -4048,2104 +1515,52 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
wa_bb[i]->offset = batch_ptr - batch;
if (GEM_DEBUG_WARN_ON(!IS_ALIGNED(wa_bb[i]->offset,
CACHELINE_BYTES))) {
- ret = -EINVAL;
+ err = -EINVAL;
break;
}
if (wa_bb_fn[i])
batch_ptr = wa_bb_fn[i](engine, batch_ptr);
wa_bb[i]->size = batch_ptr - (batch + wa_bb[i]->offset);
}
- GEM_BUG_ON(batch_ptr - batch > CTX_WA_BB_OBJ_SIZE);
+ GEM_BUG_ON(batch_ptr - batch > CTX_WA_BB_SIZE);
__i915_gem_object_flush_map(wa_ctx->vma->obj, 0, batch_ptr - batch);
__i915_gem_object_release_map(wa_ctx->vma->obj);
- if (ret)
- lrc_destroy_wa_ctx(engine);
-
- return ret;
-}
-
-static void reset_csb_pointers(struct intel_engine_cs *engine)
-{
- struct intel_engine_execlists * const execlists = &engine->execlists;
- const unsigned int reset_value = execlists->csb_size - 1;
-
- ring_set_paused(engine, 0);
-
- /*
- * Sometimes Icelake forgets to reset its pointers on a GPU reset.
- * Bludgeon them with a mmio update to be sure.
- */
- ENGINE_WRITE(engine, RING_CONTEXT_STATUS_PTR,
- 0xffff << 16 | reset_value << 8 | reset_value);
- ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR);
-
- /*
- * After a reset, the HW starts writing into CSB entry [0]. We
- * therefore have to set our HEAD pointer back one entry so that
- * the *first* entry we check is entry 0. To complicate this further,
- * as we don't wait for the first interrupt after reset, we have to
- * fake the HW write to point back to the last entry so that our
- * inline comparison of our cached head position against the last HW
- * write works even before the first interrupt.
- */
- execlists->csb_head = reset_value;
- WRITE_ONCE(*execlists->csb_write, reset_value);
- wmb(); /* Make sure this is visible to HW (paranoia?) */
-
- /* Check that the GPU does indeed update the CSB entries! */
- memset(execlists->csb_status, -1, (reset_value + 1) * sizeof(u64));
- invalidate_csb_entries(&execlists->csb_status[0],
- &execlists->csb_status[reset_value]);
-
- /* Once more for luck and our trusty paranoia */
- ENGINE_WRITE(engine, RING_CONTEXT_STATUS_PTR,
- 0xffff << 16 | reset_value << 8 | reset_value);
- ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR);
-
- GEM_BUG_ON(READ_ONCE(*execlists->csb_write) != reset_value);
-}
-
-static void execlists_sanitize(struct intel_engine_cs *engine)
-{
- GEM_BUG_ON(execlists_active(&engine->execlists));
-
- /*
- * Poison residual state on resume, in case the suspend didn't!
- *
- * We have to assume that across suspend/resume (or other loss
- * of control) that the contents of our pinned buffers has been
- * lost, replaced by garbage. Since this doesn't always happen,
- * let's poison such state so that we more quickly spot when
- * we falsely assume it has been preserved.
- */
- if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
- memset(engine->status_page.addr, POISON_INUSE, PAGE_SIZE);
-
- reset_csb_pointers(engine);
-
- /*
- * The kernel_context HWSP is stored in the status_page. As above,
- * that may be lost on resume/initialisation, and so we need to
- * reset the value in the HWSP.
- */
- intel_timeline_reset_seqno(engine->kernel_context->timeline);
-
- /* And scrub the dirty cachelines for the HWSP */
- clflush_cache_range(engine->status_page.addr, PAGE_SIZE);
-}
-
-static void enable_error_interrupt(struct intel_engine_cs *engine)
-{
- u32 status;
-
- engine->execlists.error_interrupt = 0;
- ENGINE_WRITE(engine, RING_EMR, ~0u);
- ENGINE_WRITE(engine, RING_EIR, ~0u); /* clear all existing errors */
-
- status = ENGINE_READ(engine, RING_ESR);
- if (unlikely(status)) {
- drm_err(&engine->i915->drm,
- "engine '%s' resumed still in error: %08x\n",
- engine->name, status);
- __intel_gt_reset(engine->gt, engine->mask);
- }
-
- /*
- * On current gen8+, we have 2 signals to play with
- *
- * - I915_ERROR_INSTUCTION (bit 0)
- *
- * Generate an error if the command parser encounters an invalid
- * instruction
- *
- * This is a fatal error.
- *
- * - CP_PRIV (bit 2)
- *
- * Generate an error on privilege violation (where the CP replaces
- * the instruction with a no-op). This also fires for writes into
- * read-only scratch pages.
- *
- * This is a non-fatal error, parsing continues.
- *
- * * there are a few others defined for odd HW that we do not use
- *
- * Since CP_PRIV fires for cases where we have chosen to ignore the
- * error (as the HW is validating and suppressing the mistakes), we
- * only unmask the instruction error bit.
- */
- ENGINE_WRITE(engine, RING_EMR, ~I915_ERROR_INSTRUCTION);
-}
-
-static void enable_execlists(struct intel_engine_cs *engine)
-{
- u32 mode;
-
- assert_forcewakes_active(engine->uncore, FORCEWAKE_ALL);
-
- intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */
-
- if (INTEL_GEN(engine->i915) >= 11)
- mode = _MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE);
- else
- mode = _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE);
- ENGINE_WRITE_FW(engine, RING_MODE_GEN7, mode);
-
- ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
-
- ENGINE_WRITE_FW(engine,
- RING_HWS_PGA,
- i915_ggtt_offset(engine->status_page.vma));
- ENGINE_POSTING_READ(engine, RING_HWS_PGA);
-
- enable_error_interrupt(engine);
-
- engine->context_tag = GENMASK(BITS_PER_LONG - 2, 0);
-}
-
-static bool unexpected_starting_state(struct intel_engine_cs *engine)
-{
- bool unexpected = false;
-
- if (ENGINE_READ_FW(engine, RING_MI_MODE) & STOP_RING) {
- drm_dbg(&engine->i915->drm,
- "STOP_RING still set in RING_MI_MODE\n");
- unexpected = true;
- }
-
- return unexpected;
-}
-
-static int execlists_resume(struct intel_engine_cs *engine)
-{
- intel_mocs_init_engine(engine);
-
- intel_breadcrumbs_reset(engine->breadcrumbs);
-
- if (GEM_SHOW_DEBUG() && unexpected_starting_state(engine)) {
- struct drm_printer p = drm_debug_printer(__func__);
-
- intel_engine_dump(engine, &p, NULL);
- }
-
- enable_execlists(engine);
-
- return 0;
-}
-static void execlists_reset_prepare(struct intel_engine_cs *engine)
-{
- struct intel_engine_execlists * const execlists = &engine->execlists;
- unsigned long flags;
-
- ENGINE_TRACE(engine, "depth<-%d\n",
- atomic_read(&execlists->tasklet.count));
-
- /*
- * Prevent request submission to the hardware until we have
- * completed the reset in i915_gem_reset_finish(). If a request
- * is completed by one engine, it may then queue a request
- * to a second via its execlists->tasklet *just* as we are
- * calling engine->resume() and also writing the ELSP.
- * Turning off the execlists->tasklet until the reset is over
- * prevents the race.
- */
- __tasklet_disable_sync_once(&execlists->tasklet);
- GEM_BUG_ON(!reset_in_progress(execlists));
-
- /* And flush any current direct submission. */
- spin_lock_irqsave(&engine->active.lock, flags);
- spin_unlock_irqrestore(&engine->active.lock, flags);
-
- /*
- * We stop engines, otherwise we might get failed reset and a
- * dead gpu (on elk). Also as modern gpu as kbl can suffer
- * from system hang if batchbuffer is progressing when
- * the reset is issued, regardless of READY_TO_RESET ack.
- * Thus assume it is best to stop engines on all gens
- * where we have a gpu reset.
- *
- * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES)
- *
- * FIXME: Wa for more modern gens needs to be validated
- */
- ring_set_paused(engine, 1);
- intel_engine_stop_cs(engine);
-
- engine->execlists.reset_ccid = active_ccid(engine);
-}
-
-static void __reset_stop_ring(u32 *regs, const struct intel_engine_cs *engine)
-{
- int x;
-
- x = lrc_ring_mi_mode(engine);
- if (x != -1) {
- regs[x + 1] &= ~STOP_RING;
- regs[x + 1] |= STOP_RING << 16;
- }
-}
-
-static void __execlists_reset_reg_state(const struct intel_context *ce,
- const struct intel_engine_cs *engine)
-{
- u32 *regs = ce->lrc_reg_state;
-
- __reset_stop_ring(regs, engine);
+ /* Verify that we can handle failure to setup the wa_ctx */
+ if (err || i915_inject_probe_error(engine->i915, -ENODEV))
+ lrc_fini_wa_ctx(engine);
}
-static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
-{
- struct intel_engine_execlists * const execlists = &engine->execlists;
- struct intel_context *ce;
- struct i915_request *rq;
- u32 head;
-
- mb(); /* paranoia: read the CSB pointers from after the reset */
- clflush(execlists->csb_write);
- mb();
-
- process_csb(engine); /* drain preemption events */
-
- /* Following the reset, we need to reload the CSB read/write pointers */
- reset_csb_pointers(engine);
-
- /*
- * Save the currently executing context, even if we completed
- * its request, it was still running at the time of the
- * reset and will have been clobbered.
- */
- rq = active_context(engine, engine->execlists.reset_ccid);
- if (!rq)
- goto unwind;
-
- ce = rq->context;
- GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
-
- if (i915_request_completed(rq)) {
- /* Idle context; tidy up the ring so we can restart afresh */
- head = intel_ring_wrap(ce->ring, rq->tail);
- goto out_replay;
- }
-
- /* We still have requests in-flight; the engine should be active */
- GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
-
- /* Context has requests still in-flight; it should not be idle! */
- GEM_BUG_ON(i915_active_is_idle(&ce->active));
-
- rq = active_request(ce->timeline, rq);
- head = intel_ring_wrap(ce->ring, rq->head);
- GEM_BUG_ON(head == ce->ring->tail);
-
- /*
- * If this request hasn't started yet, e.g. it is waiting on a
- * semaphore, we need to avoid skipping the request or else we
- * break the signaling chain. However, if the context is corrupt
- * the request will not restart and we will be stuck with a wedged
- * device. It is quite often the case that if we issue a reset
- * while the GPU is loading the context image, that the context
- * image becomes corrupt.
- *
- * Otherwise, if we have not started yet, the request should replay
- * perfectly and we do not need to flag the result as being erroneous.
- */
- if (!i915_request_started(rq))
- goto out_replay;
-
- /*
- * If the request was innocent, we leave the request in the ELSP
- * and will try to replay it on restarting. The context image may
- * have been corrupted by the reset, in which case we may have
- * to service a new GPU hang, but more likely we can continue on
- * without impact.
- *
- * If the request was guilty, we presume the context is corrupt
- * and have to at least restore the RING register in the context
- * image back to the expected values to skip over the guilty request.
- */
- __i915_request_reset(rq, stalled);
-
- /*
- * We want a simple context + ring to execute the breadcrumb update.
- * We cannot rely on the context being intact across the GPU hang,
- * so clear it and rebuild just what we need for the breadcrumb.
- * All pending requests for this context will be zapped, and any
- * future request will be after userspace has had the opportunity
- * to recreate its own state.
- */
-out_replay:
- ENGINE_TRACE(engine, "replay {head:%04x, tail:%04x}\n",
- head, ce->ring->tail);
- __execlists_reset_reg_state(ce, engine);
- __execlists_update_reg_state(ce, engine, head);
- ce->lrc.desc |= CTX_DESC_FORCE_RESTORE; /* paranoid: GPU was reset! */
-
-unwind:
- /* Push back any incomplete requests for replay after the reset. */
- cancel_port_requests(execlists);
- __unwind_incomplete_requests(engine);
-}
-
-static void execlists_reset_rewind(struct intel_engine_cs *engine, bool stalled)
-{
- unsigned long flags;
-
- ENGINE_TRACE(engine, "\n");
-
- spin_lock_irqsave(&engine->active.lock, flags);
-
- __execlists_reset(engine, stalled);
-
- spin_unlock_irqrestore(&engine->active.lock, flags);
-}
-
-static void nop_submission_tasklet(unsigned long data)
-{
- struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
-
- /* The driver is wedged; don't process any more events. */
- WRITE_ONCE(engine->execlists.queue_priority_hint, INT_MIN);
-}
-
-static void execlists_reset_cancel(struct intel_engine_cs *engine)
-{
- struct intel_engine_execlists * const execlists = &engine->execlists;
- struct i915_request *rq, *rn;
- struct rb_node *rb;
- unsigned long flags;
-
- ENGINE_TRACE(engine, "\n");
-
- /*
- * Before we call engine->cancel_requests(), we should have exclusive
- * access to the submission state. This is arranged for us by the
- * caller disabling the interrupt generation, the tasklet and other
- * threads that may then access the same state, giving us a free hand
- * to reset state. However, we still need to let lockdep be aware that
- * we know this state may be accessed in hardirq context, so we
- * disable the irq around this manipulation and we want to keep
- * the spinlock focused on its duties and not accidentally conflate
- * coverage to the submission's irq state. (Similarly, although we
- * shouldn't need to disable irq around the manipulation of the
- * submission's irq state, we also wish to remind ourselves that
- * it is irq state.)
- */
- spin_lock_irqsave(&engine->active.lock, flags);
-
- __execlists_reset(engine, true);
-
- /* Mark all executing requests as skipped. */
- list_for_each_entry(rq, &engine->active.requests, sched.link)
- mark_eio(rq);
- intel_engine_signal_breadcrumbs(engine);
-
- /* Flush the queued requests to the timeline list (for retiring). */
- while ((rb = rb_first_cached(&execlists->queue))) {
- struct i915_priolist *p = to_priolist(rb);
- int i;
-
- priolist_for_each_request_consume(rq, rn, p, i) {
- mark_eio(rq);
- __i915_request_submit(rq);
- }
-
- rb_erase_cached(&p->node, &execlists->queue);
- i915_priolist_free(p);
- }
-
- /* On-hold requests will be flushed to timeline upon their release */
- list_for_each_entry(rq, &engine->active.hold, sched.link)
- mark_eio(rq);
-
- /* Cancel all attached virtual engines */
- while ((rb = rb_first_cached(&execlists->virtual))) {
- struct virtual_engine *ve =
- rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
-
- rb_erase_cached(rb, &execlists->virtual);
- RB_CLEAR_NODE(rb);
-
- spin_lock(&ve->base.active.lock);
- rq = fetch_and_zero(&ve->request);
- if (rq) {
- mark_eio(rq);
-
- rq->engine = engine;
- __i915_request_submit(rq);
- i915_request_put(rq);
-
- ve->base.execlists.queue_priority_hint = INT_MIN;
- }
- spin_unlock(&ve->base.active.lock);
- }
-
- /* Remaining _unready_ requests will be nop'ed when submitted */
-
- execlists->queue_priority_hint = INT_MIN;
- execlists->queue = RB_ROOT_CACHED;
-
- GEM_BUG_ON(__tasklet_is_enabled(&execlists->tasklet));
- execlists->tasklet.func = nop_submission_tasklet;
-
- spin_unlock_irqrestore(&engine->active.lock, flags);
-}
-
-static void execlists_reset_finish(struct intel_engine_cs *engine)
-{
- struct intel_engine_execlists * const execlists = &engine->execlists;
-
- /*
- * After a GPU reset, we may have requests to replay. Do so now while
- * we still have the forcewake to be sure that the GPU is not allowed
- * to sleep before we restart and reload a context.
- */
- GEM_BUG_ON(!reset_in_progress(execlists));
- if (!RB_EMPTY_ROOT(&execlists->queue.rb_root))
- execlists->tasklet.func(execlists->tasklet.data);
-
- if (__tasklet_enable(&execlists->tasklet))
- /* And kick in case we missed a new request submission. */
- tasklet_hi_schedule(&execlists->tasklet);
- ENGINE_TRACE(engine, "depth->%d\n",
- atomic_read(&execlists->tasklet.count));
-}
-
-static int gen8_emit_bb_start_noarb(struct i915_request *rq,
- u64 offset, u32 len,
- const unsigned int flags)
-{
- u32 *cs;
-
- cs = intel_ring_begin(rq, 4);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- /*
- * WaDisableCtxRestoreArbitration:bdw,chv
- *
- * We don't need to perform MI_ARB_ENABLE as often as we do (in
- * particular all the gen that do not need the w/a at all!), if we
- * took care to make sure that on every switch into this context
- * (both ordinary and for preemption) that arbitrartion was enabled
- * we would be fine. However, for gen8 there is another w/a that
- * requires us to not preempt inside GPGPU execution, so we keep
- * arbitration disabled for gen8 batches. Arbitration will be
- * re-enabled before we close the request
- * (engine->emit_fini_breadcrumb).
- */
- *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
-
- /* FIXME(BDW+): Address space and security selectors. */
- *cs++ = MI_BATCH_BUFFER_START_GEN8 |
- (flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
- *cs++ = lower_32_bits(offset);
- *cs++ = upper_32_bits(offset);
-
- intel_ring_advance(rq, cs);
-
- return 0;
-}
-
-static int gen8_emit_bb_start(struct i915_request *rq,
- u64 offset, u32 len,
- const unsigned int flags)
-{
- u32 *cs;
-
- cs = intel_ring_begin(rq, 6);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
-
- *cs++ = MI_BATCH_BUFFER_START_GEN8 |
- (flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
- *cs++ = lower_32_bits(offset);
- *cs++ = upper_32_bits(offset);
-
- *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
- *cs++ = MI_NOOP;
-
- intel_ring_advance(rq, cs);
-
- return 0;
-}
-
-static void gen8_logical_ring_enable_irq(struct intel_engine_cs *engine)
-{
- ENGINE_WRITE(engine, RING_IMR,
- ~(engine->irq_enable_mask | engine->irq_keep_mask));
- ENGINE_POSTING_READ(engine, RING_IMR);
-}
-
-static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine)
-{
- ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask);
-}
-
-static int gen8_emit_flush(struct i915_request *request, u32 mode)
-{
- u32 cmd, *cs;
-
- cs = intel_ring_begin(request, 4);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- cmd = MI_FLUSH_DW + 1;
-
- /* We always require a command barrier so that subsequent
- * commands, such as breadcrumb interrupts, are strictly ordered
- * wrt the contents of the write cache being flushed to memory
- * (and thus being coherent from the CPU).
- */
- cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
-
- if (mode & EMIT_INVALIDATE) {
- cmd |= MI_INVALIDATE_TLB;
- if (request->engine->class == VIDEO_DECODE_CLASS)
- cmd |= MI_INVALIDATE_BSD;
- }
-
- *cs++ = cmd;
- *cs++ = LRC_PPHWSP_SCRATCH_ADDR;
- *cs++ = 0; /* upper addr */
- *cs++ = 0; /* value */
- intel_ring_advance(request, cs);
-
- return 0;
-}
-
-static int gen8_emit_flush_render(struct i915_request *request,
- u32 mode)
-{
- bool vf_flush_wa = false, dc_flush_wa = false;
- u32 *cs, flags = 0;
- int len;
-
- flags |= PIPE_CONTROL_CS_STALL;
-
- if (mode & EMIT_FLUSH) {
- flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
- flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
- flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
- flags |= PIPE_CONTROL_FLUSH_ENABLE;
- }
-
- if (mode & EMIT_INVALIDATE) {
- flags |= PIPE_CONTROL_TLB_INVALIDATE;
- flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_QW_WRITE;
- flags |= PIPE_CONTROL_STORE_DATA_INDEX;
-
- /*
- * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
- * pipe control.
- */
- if (IS_GEN(request->engine->i915, 9))
- vf_flush_wa = true;
-
- /* WaForGAMHang:kbl */
- if (IS_KBL_GT_REVID(request->engine->i915, 0, KBL_REVID_B0))
- dc_flush_wa = true;
- }
-
- len = 6;
-
- if (vf_flush_wa)
- len += 6;
-
- if (dc_flush_wa)
- len += 12;
-
- cs = intel_ring_begin(request, len);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- if (vf_flush_wa)
- cs = gen8_emit_pipe_control(cs, 0, 0);
-
- if (dc_flush_wa)
- cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_DC_FLUSH_ENABLE,
- 0);
-
- cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
-
- if (dc_flush_wa)
- cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_CS_STALL, 0);
-
- intel_ring_advance(request, cs);
-
- return 0;
-}
-
-static int gen11_emit_flush_render(struct i915_request *request,
- u32 mode)
-{
- if (mode & EMIT_FLUSH) {
- u32 *cs;
- u32 flags = 0;
-
- flags |= PIPE_CONTROL_CS_STALL;
-
- flags |= PIPE_CONTROL_TILE_CACHE_FLUSH;
- flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
- flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
- flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
- flags |= PIPE_CONTROL_FLUSH_ENABLE;
- flags |= PIPE_CONTROL_QW_WRITE;
- flags |= PIPE_CONTROL_STORE_DATA_INDEX;
-
- cs = intel_ring_begin(request, 6);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
- intel_ring_advance(request, cs);
- }
-
- if (mode & EMIT_INVALIDATE) {
- u32 *cs;
- u32 flags = 0;
-
- flags |= PIPE_CONTROL_CS_STALL;
-
- flags |= PIPE_CONTROL_COMMAND_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_TLB_INVALIDATE;
- flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_QW_WRITE;
- flags |= PIPE_CONTROL_STORE_DATA_INDEX;
-
- cs = intel_ring_begin(request, 6);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
- intel_ring_advance(request, cs);
- }
-
- return 0;
-}
-
-static u32 preparser_disable(bool state)
-{
- return MI_ARB_CHECK | 1 << 8 | state;
-}
-
-static i915_reg_t aux_inv_reg(const struct intel_engine_cs *engine)
-{
- static const i915_reg_t vd[] = {
- GEN12_VD0_AUX_NV,
- GEN12_VD1_AUX_NV,
- GEN12_VD2_AUX_NV,
- GEN12_VD3_AUX_NV,
- };
-
- static const i915_reg_t ve[] = {
- GEN12_VE0_AUX_NV,
- GEN12_VE1_AUX_NV,
- };
-
- if (engine->class == VIDEO_DECODE_CLASS)
- return vd[engine->instance];
-
- if (engine->class == VIDEO_ENHANCEMENT_CLASS)
- return ve[engine->instance];
-
- GEM_BUG_ON("unknown aux_inv_reg\n");
-
- return INVALID_MMIO_REG;
-}
-
-static u32 *
-gen12_emit_aux_table_inv(const i915_reg_t inv_reg, u32 *cs)
-{
- *cs++ = MI_LOAD_REGISTER_IMM(1);
- *cs++ = i915_mmio_reg_offset(inv_reg);
- *cs++ = AUX_INV;
- *cs++ = MI_NOOP;
-
- return cs;
-}
-
-static int gen12_emit_flush_render(struct i915_request *request,
- u32 mode)
-{
- if (mode & EMIT_FLUSH) {
- u32 flags = 0;
- u32 *cs;
-
- flags |= PIPE_CONTROL_TILE_CACHE_FLUSH;
- flags |= PIPE_CONTROL_FLUSH_L3;
- flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
- flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
- /* Wa_1409600907:tgl */
- flags |= PIPE_CONTROL_DEPTH_STALL;
- flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
- flags |= PIPE_CONTROL_FLUSH_ENABLE;
-
- flags |= PIPE_CONTROL_STORE_DATA_INDEX;
- flags |= PIPE_CONTROL_QW_WRITE;
-
- flags |= PIPE_CONTROL_CS_STALL;
-
- cs = intel_ring_begin(request, 6);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- cs = gen12_emit_pipe_control(cs,
- PIPE_CONTROL0_HDC_PIPELINE_FLUSH,
- flags, LRC_PPHWSP_SCRATCH_ADDR);
- intel_ring_advance(request, cs);
- }
-
- if (mode & EMIT_INVALIDATE) {
- u32 flags = 0;
- u32 *cs;
-
- flags |= PIPE_CONTROL_COMMAND_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_TLB_INVALIDATE;
- flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
-
- flags |= PIPE_CONTROL_STORE_DATA_INDEX;
- flags |= PIPE_CONTROL_QW_WRITE;
-
- flags |= PIPE_CONTROL_CS_STALL;
-
- cs = intel_ring_begin(request, 8 + 4);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- /*
- * Prevent the pre-parser from skipping past the TLB
- * invalidate and loading a stale page for the batch
- * buffer / request payload.
- */
- *cs++ = preparser_disable(true);
-
- cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
-
- /* hsdes: 1809175790 */
- cs = gen12_emit_aux_table_inv(GEN12_GFX_CCS_AUX_NV, cs);
-
- *cs++ = preparser_disable(false);
- intel_ring_advance(request, cs);
- }
-
- return 0;
-}
-
-static int gen12_emit_flush(struct i915_request *request, u32 mode)
-{
- intel_engine_mask_t aux_inv = 0;
- u32 cmd, *cs;
-
- cmd = 4;
- if (mode & EMIT_INVALIDATE)
- cmd += 2;
- if (mode & EMIT_INVALIDATE)
- aux_inv = request->engine->mask & ~BIT(BCS0);
- if (aux_inv)
- cmd += 2 * hweight8(aux_inv) + 2;
-
- cs = intel_ring_begin(request, cmd);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- if (mode & EMIT_INVALIDATE)
- *cs++ = preparser_disable(true);
-
- cmd = MI_FLUSH_DW + 1;
-
- /* We always require a command barrier so that subsequent
- * commands, such as breadcrumb interrupts, are strictly ordered
- * wrt the contents of the write cache being flushed to memory
- * (and thus being coherent from the CPU).
- */
- cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
-
- if (mode & EMIT_INVALIDATE) {
- cmd |= MI_INVALIDATE_TLB;
- if (request->engine->class == VIDEO_DECODE_CLASS)
- cmd |= MI_INVALIDATE_BSD;
- }
-
- *cs++ = cmd;
- *cs++ = LRC_PPHWSP_SCRATCH_ADDR;
- *cs++ = 0; /* upper addr */
- *cs++ = 0; /* value */
-
- if (aux_inv) { /* hsdes: 1809175790 */
- struct intel_engine_cs *engine;
- unsigned int tmp;
-
- *cs++ = MI_LOAD_REGISTER_IMM(hweight8(aux_inv));
- for_each_engine_masked(engine, request->engine->gt,
- aux_inv, tmp) {
- *cs++ = i915_mmio_reg_offset(aux_inv_reg(engine));
- *cs++ = AUX_INV;
- }
- *cs++ = MI_NOOP;
- }
-
- if (mode & EMIT_INVALIDATE)
- *cs++ = preparser_disable(false);
-
- intel_ring_advance(request, cs);
-
- return 0;
-}
-
-static void assert_request_valid(struct i915_request *rq)
-{
- struct intel_ring *ring __maybe_unused = rq->ring;
-
- /* Can we unwind this request without appearing to go forwards? */
- GEM_BUG_ON(intel_ring_direction(ring, rq->wa_tail, rq->head) <= 0);
-}
-
-/*
- * Reserve space for 2 NOOPs at the end of each request to be
- * used as a workaround for not being allowed to do lite
- * restore with HEAD==TAIL (WaIdleLiteRestore).
- */
-static u32 *gen8_emit_wa_tail(struct i915_request *request, u32 *cs)
-{
- /* Ensure there's always at least one preemption point per-request. */
- *cs++ = MI_ARB_CHECK;
- *cs++ = MI_NOOP;
- request->wa_tail = intel_ring_offset(request, cs);
-
- /* Check that entire request is less than half the ring */
- assert_request_valid(request);
-
- return cs;
-}
-
-static u32 *emit_preempt_busywait(struct i915_request *request, u32 *cs)
-{
- *cs++ = MI_SEMAPHORE_WAIT |
- MI_SEMAPHORE_GLOBAL_GTT |
- MI_SEMAPHORE_POLL |
- MI_SEMAPHORE_SAD_EQ_SDD;
- *cs++ = 0;
- *cs++ = intel_hws_preempt_address(request->engine);
- *cs++ = 0;
-
- return cs;
-}
-
-static __always_inline u32*
-gen8_emit_fini_breadcrumb_tail(struct i915_request *request, u32 *cs)
-{
- *cs++ = MI_USER_INTERRUPT;
-
- *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
- if (intel_engine_has_semaphores(request->engine))
- cs = emit_preempt_busywait(request, cs);
-
- request->tail = intel_ring_offset(request, cs);
- assert_ring_tail_valid(request->ring, request->tail);
-
- return gen8_emit_wa_tail(request, cs);
-}
-
-static u32 *emit_xcs_breadcrumb(struct i915_request *rq, u32 *cs)
-{
- return gen8_emit_ggtt_write(cs, rq->fence.seqno, hwsp_offset(rq), 0);
-}
-
-static u32 *gen8_emit_fini_breadcrumb(struct i915_request *rq, u32 *cs)
-{
- return gen8_emit_fini_breadcrumb_tail(rq, emit_xcs_breadcrumb(rq, cs));
-}
-
-static u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
-{
- cs = gen8_emit_pipe_control(cs,
- PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
- PIPE_CONTROL_DEPTH_CACHE_FLUSH |
- PIPE_CONTROL_DC_FLUSH_ENABLE,
- 0);
-
- /* XXX flush+write+CS_STALL all in one upsets gem_concurrent_blt:kbl */
- cs = gen8_emit_ggtt_write_rcs(cs,
- request->fence.seqno,
- hwsp_offset(request),
- PIPE_CONTROL_FLUSH_ENABLE |
- PIPE_CONTROL_CS_STALL);
-
- return gen8_emit_fini_breadcrumb_tail(request, cs);
-}
-
-static u32 *
-gen11_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
-{
- cs = gen8_emit_ggtt_write_rcs(cs,
- request->fence.seqno,
- hwsp_offset(request),
- PIPE_CONTROL_CS_STALL |
- PIPE_CONTROL_TILE_CACHE_FLUSH |
- PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
- PIPE_CONTROL_DEPTH_CACHE_FLUSH |
- PIPE_CONTROL_DC_FLUSH_ENABLE |
- PIPE_CONTROL_FLUSH_ENABLE);
-
- return gen8_emit_fini_breadcrumb_tail(request, cs);
-}
-
-/*
- * Note that the CS instruction pre-parser will not stall on the breadcrumb
- * flush and will continue pre-fetching the instructions after it before the
- * memory sync is completed. On pre-gen12 HW, the pre-parser will stop at
- * BB_START/END instructions, so, even though we might pre-fetch the pre-amble
- * of the next request before the memory has been flushed, we're guaranteed that
- * we won't access the batch itself too early.
- * However, on gen12+ the parser can pre-fetch across the BB_START/END commands,
- * so, if the current request is modifying an instruction in the next request on
- * the same intel_context, we might pre-fetch and then execute the pre-update
- * instruction. To avoid this, the users of self-modifying code should either
- * disable the parser around the code emitting the memory writes, via a new flag
- * added to MI_ARB_CHECK, or emit the writes from a different intel_context. For
- * the in-kernel use-cases we've opted to use a separate context, see
- * reloc_gpu() as an example.
- * All the above applies only to the instructions themselves. Non-inline data
- * used by the instructions is not pre-fetched.
- */
-
-static u32 *gen12_emit_preempt_busywait(struct i915_request *request, u32 *cs)
-{
- *cs++ = MI_SEMAPHORE_WAIT_TOKEN |
- MI_SEMAPHORE_GLOBAL_GTT |
- MI_SEMAPHORE_POLL |
- MI_SEMAPHORE_SAD_EQ_SDD;
- *cs++ = 0;
- *cs++ = intel_hws_preempt_address(request->engine);
- *cs++ = 0;
- *cs++ = 0;
- *cs++ = MI_NOOP;
-
- return cs;
-}
-
-static __always_inline u32*
-gen12_emit_fini_breadcrumb_tail(struct i915_request *request, u32 *cs)
-{
- *cs++ = MI_USER_INTERRUPT;
-
- *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
- if (intel_engine_has_semaphores(request->engine))
- cs = gen12_emit_preempt_busywait(request, cs);
-
- request->tail = intel_ring_offset(request, cs);
- assert_ring_tail_valid(request->ring, request->tail);
-
- return gen8_emit_wa_tail(request, cs);
-}
-
-static u32 *gen12_emit_fini_breadcrumb(struct i915_request *rq, u32 *cs)
-{
- /* XXX Stalling flush before seqno write; post-sync not */
- cs = emit_xcs_breadcrumb(rq, __gen8_emit_flush_dw(cs, 0, 0, 0));
- return gen12_emit_fini_breadcrumb_tail(rq, cs);
-}
-
-static u32 *
-gen12_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
-{
- cs = gen12_emit_ggtt_write_rcs(cs,
- request->fence.seqno,
- hwsp_offset(request),
- PIPE_CONTROL0_HDC_PIPELINE_FLUSH,
- PIPE_CONTROL_CS_STALL |
- PIPE_CONTROL_TILE_CACHE_FLUSH |
- PIPE_CONTROL_FLUSH_L3 |
- PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
- PIPE_CONTROL_DEPTH_CACHE_FLUSH |
- /* Wa_1409600907:tgl */
- PIPE_CONTROL_DEPTH_STALL |
- PIPE_CONTROL_DC_FLUSH_ENABLE |
- PIPE_CONTROL_FLUSH_ENABLE);
-
- return gen12_emit_fini_breadcrumb_tail(request, cs);
-}
-
-static void execlists_park(struct intel_engine_cs *engine)
-{
- cancel_timer(&engine->execlists.timer);
- cancel_timer(&engine->execlists.preempt);
-}
-
-void intel_execlists_set_default_submission(struct intel_engine_cs *engine)
-{
- engine->submit_request = execlists_submit_request;
- engine->schedule = i915_schedule;
- engine->execlists.tasklet.func = execlists_submission_tasklet;
-
- engine->reset.prepare = execlists_reset_prepare;
- engine->reset.rewind = execlists_reset_rewind;
- engine->reset.cancel = execlists_reset_cancel;
- engine->reset.finish = execlists_reset_finish;
-
- engine->park = execlists_park;
- engine->unpark = NULL;
-
- engine->flags |= I915_ENGINE_SUPPORTS_STATS;
- if (!intel_vgpu_active(engine->i915)) {
- engine->flags |= I915_ENGINE_HAS_SEMAPHORES;
- if (HAS_LOGICAL_RING_PREEMPTION(engine->i915)) {
- engine->flags |= I915_ENGINE_HAS_PREEMPTION;
- if (IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
- engine->flags |= I915_ENGINE_HAS_TIMESLICES;
- }
- }
-
- if (INTEL_GEN(engine->i915) >= 12)
- engine->flags |= I915_ENGINE_HAS_RELATIVE_MMIO;
-
- if (intel_engine_has_preemption(engine))
- engine->emit_bb_start = gen8_emit_bb_start;
- else
- engine->emit_bb_start = gen8_emit_bb_start_noarb;
-}
-
-static void execlists_shutdown(struct intel_engine_cs *engine)
-{
- /* Synchronise with residual timers and any softirq they raise */
- del_timer_sync(&engine->execlists.timer);
- del_timer_sync(&engine->execlists.preempt);
- tasklet_kill(&engine->execlists.tasklet);
-}
-
-static void execlists_release(struct intel_engine_cs *engine)
-{
- engine->sanitize = NULL; /* no longer in control, nothing to sanitize */
-
- execlists_shutdown(engine);
-
- intel_engine_cleanup_common(engine);
- lrc_destroy_wa_ctx(engine);
-}
-
-static void
-logical_ring_default_vfuncs(struct intel_engine_cs *engine)
-{
- /* Default vfuncs which can be overriden by each engine. */
-
- engine->resume = execlists_resume;
-
- engine->cops = &execlists_context_ops;
- engine->request_alloc = execlists_request_alloc;
-
- engine->emit_flush = gen8_emit_flush;
- engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb;
- engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb;
- if (INTEL_GEN(engine->i915) >= 12) {
- engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb;
- engine->emit_flush = gen12_emit_flush;
- }
- engine->set_default_submission = intel_execlists_set_default_submission;
-
- if (INTEL_GEN(engine->i915) < 11) {
- engine->irq_enable = gen8_logical_ring_enable_irq;
- engine->irq_disable = gen8_logical_ring_disable_irq;
- } else {
- /*
- * TODO: On Gen11 interrupt masks need to be clear
- * to allow C6 entry. Keep interrupts enabled at
- * and take the hit of generating extra interrupts
- * until a more refined solution exists.
- */
- }
-}
-
-static inline void
-logical_ring_default_irqs(struct intel_engine_cs *engine)
-{
- unsigned int shift = 0;
-
- if (INTEL_GEN(engine->i915) < 11) {
- const u8 irq_shifts[] = {
- [RCS0] = GEN8_RCS_IRQ_SHIFT,
- [BCS0] = GEN8_BCS_IRQ_SHIFT,
- [VCS0] = GEN8_VCS0_IRQ_SHIFT,
- [VCS1] = GEN8_VCS1_IRQ_SHIFT,
- [VECS0] = GEN8_VECS_IRQ_SHIFT,
- };
-
- shift = irq_shifts[engine->id];
- }
-
- engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
- engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
- engine->irq_keep_mask |= GT_CS_MASTER_ERROR_INTERRUPT << shift;
- engine->irq_keep_mask |= GT_WAIT_SEMAPHORE_INTERRUPT << shift;
-}
-
-static void rcs_submission_override(struct intel_engine_cs *engine)
-{
- switch (INTEL_GEN(engine->i915)) {
- case 12:
- engine->emit_flush = gen12_emit_flush_render;
- engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_rcs;
- break;
- case 11:
- engine->emit_flush = gen11_emit_flush_render;
- engine->emit_fini_breadcrumb = gen11_emit_fini_breadcrumb_rcs;
- break;
- default:
- engine->emit_flush = gen8_emit_flush_render;
- engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_rcs;
- break;
- }
-}
-
-int intel_execlists_submission_setup(struct intel_engine_cs *engine)
-{
- struct intel_engine_execlists * const execlists = &engine->execlists;
- struct drm_i915_private *i915 = engine->i915;
- struct intel_uncore *uncore = engine->uncore;
- u32 base = engine->mmio_base;
-
- tasklet_init(&engine->execlists.tasklet,
- execlists_submission_tasklet, (unsigned long)engine);
- timer_setup(&engine->execlists.timer, execlists_timeslice, 0);
- timer_setup(&engine->execlists.preempt, execlists_preempt, 0);
-
- logical_ring_default_vfuncs(engine);
- logical_ring_default_irqs(engine);
-
- if (engine->class == RENDER_CLASS)
- rcs_submission_override(engine);
-
- if (intel_init_workaround_bb(engine))
- /*
- * We continue even if we fail to initialize WA batch
- * because we only expect rare glitches but nothing
- * critical to prevent us from using GPU
- */
- drm_err(&i915->drm, "WA batch buffer initialization failed\n");
-
- if (HAS_LOGICAL_RING_ELSQ(i915)) {
- execlists->submit_reg = uncore->regs +
- i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(base));
- execlists->ctrl_reg = uncore->regs +
- i915_mmio_reg_offset(RING_EXECLIST_CONTROL(base));
- } else {
- execlists->submit_reg = uncore->regs +
- i915_mmio_reg_offset(RING_ELSP(base));
- }
-
- execlists->csb_status =
- (u64 *)&engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
-
- execlists->csb_write =
- &engine->status_page.addr[intel_hws_csb_write_index(i915)];
-
- if (INTEL_GEN(i915) < 11)
- execlists->csb_size = GEN8_CSB_ENTRIES;
- else
- execlists->csb_size = GEN11_CSB_ENTRIES;
-
- if (INTEL_GEN(engine->i915) >= 11) {
- execlists->ccid |= engine->instance << (GEN11_ENGINE_INSTANCE_SHIFT - 32);
- execlists->ccid |= engine->class << (GEN11_ENGINE_CLASS_SHIFT - 32);
- }
-
- /* Finally, take ownership and responsibility for cleanup! */
- engine->sanitize = execlists_sanitize;
- engine->release = execlists_release;
-
- return 0;
-}
-
-static void init_common_reg_state(u32 * const regs,
- const struct intel_engine_cs *engine,
- const struct intel_ring *ring,
- bool inhibit)
-{
- u32 ctl;
-
- ctl = _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH);
- ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
- if (inhibit)
- ctl |= CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT;
- if (INTEL_GEN(engine->i915) < 11)
- ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT |
- CTX_CTRL_RS_CTX_ENABLE);
- regs[CTX_CONTEXT_CONTROL] = ctl;
-
- regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID;
- regs[CTX_TIMESTAMP] = 0;
-}
-
-static void init_wa_bb_reg_state(u32 * const regs,
- const struct intel_engine_cs *engine)
-{
- const struct i915_ctx_workarounds * const wa_ctx = &engine->wa_ctx;
-
- if (wa_ctx->per_ctx.size) {
- const u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma);
-
- GEM_BUG_ON(lrc_ring_wa_bb_per_ctx(engine) == -1);
- regs[lrc_ring_wa_bb_per_ctx(engine) + 1] =
- (ggtt_offset + wa_ctx->per_ctx.offset) | 0x01;
- }
-
- if (wa_ctx->indirect_ctx.size) {
- lrc_ring_setup_indirect_ctx(regs, engine,
- i915_ggtt_offset(wa_ctx->vma) +
- wa_ctx->indirect_ctx.offset,
- wa_ctx->indirect_ctx.size);
- }
-}
-
-static void init_ppgtt_reg_state(u32 *regs, const struct i915_ppgtt *ppgtt)
-{
- if (i915_vm_is_4lvl(&ppgtt->vm)) {
- /* 64b PPGTT (48bit canonical)
- * PDP0_DESCRIPTOR contains the base address to PML4 and
- * other PDP Descriptors are ignored.
- */
- ASSIGN_CTX_PML4(ppgtt, regs);
- } else {
- ASSIGN_CTX_PDP(ppgtt, regs, 3);
- ASSIGN_CTX_PDP(ppgtt, regs, 2);
- ASSIGN_CTX_PDP(ppgtt, regs, 1);
- ASSIGN_CTX_PDP(ppgtt, regs, 0);
- }
-}
-
-static struct i915_ppgtt *vm_alias(struct i915_address_space *vm)
-{
- if (i915_is_ggtt(vm))
- return i915_vm_to_ggtt(vm)->alias;
- else
- return i915_vm_to_ppgtt(vm);
-}
-
-static void execlists_init_reg_state(u32 *regs,
- const struct intel_context *ce,
- const struct intel_engine_cs *engine,
- const struct intel_ring *ring,
- bool inhibit)
-{
- /*
- * A context is actually a big batch buffer with several
- * MI_LOAD_REGISTER_IMM commands followed by (reg, value) pairs. The
- * values we are setting here are only for the first context restore:
- * on a subsequent save, the GPU will recreate this batchbuffer with new
- * values (including all the missing MI_LOAD_REGISTER_IMM commands that
- * we are not initializing here).
- *
- * Must keep consistent with virtual_update_register_offsets().
- */
- set_offsets(regs, reg_offsets(engine), engine, inhibit);
-
- init_common_reg_state(regs, engine, ring, inhibit);
- init_ppgtt_reg_state(regs, vm_alias(ce->vm));
-
- init_wa_bb_reg_state(regs, engine);
-
- __reset_stop_ring(regs, engine);
-}
-
-static int
-populate_lr_context(struct intel_context *ce,
- struct drm_i915_gem_object *ctx_obj,
- struct intel_engine_cs *engine,
- struct intel_ring *ring)
-{
- bool inhibit = true;
- void *vaddr;
-
- vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB);
- if (IS_ERR(vaddr)) {
- drm_dbg(&engine->i915->drm, "Could not map object pages!\n");
- return PTR_ERR(vaddr);
- }
-
- set_redzone(vaddr, engine);
-
- if (engine->default_state) {
- shmem_read(engine->default_state, 0,
- vaddr, engine->context_size);
- __set_bit(CONTEXT_VALID_BIT, &ce->flags);
- inhibit = false;
- }
-
- /* Clear the ppHWSP (inc. per-context counters) */
- memset(vaddr, 0, PAGE_SIZE);
-
- /*
- * The second page of the context object contains some registers which
- * must be set up prior to the first execution.
- */
- execlists_init_reg_state(vaddr + LRC_STATE_OFFSET,
- ce, engine, ring, inhibit);
-
- __i915_gem_object_flush_map(ctx_obj, 0, engine->context_size);
- i915_gem_object_unpin_map(ctx_obj);
- return 0;
-}
-
-static struct intel_timeline *pinned_timeline(struct intel_context *ce)
-{
- struct intel_timeline *tl = fetch_and_zero(&ce->timeline);
-
- return intel_timeline_create_from_engine(ce->engine,
- page_unmask_bits(tl));
-}
-
-static int __execlists_context_alloc(struct intel_context *ce,
- struct intel_engine_cs *engine)
-{
- struct drm_i915_gem_object *ctx_obj;
- struct intel_ring *ring;
- struct i915_vma *vma;
- u32 context_size;
- int ret;
-
- GEM_BUG_ON(ce->state);
- context_size = round_up(engine->context_size, I915_GTT_PAGE_SIZE);
-
- if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
- context_size += I915_GTT_PAGE_SIZE; /* for redzone */
-
- if (INTEL_GEN(engine->i915) == 12) {
- ce->wa_bb_page = context_size / PAGE_SIZE;
- context_size += PAGE_SIZE;
- }
-
- ctx_obj = i915_gem_object_create_shmem(engine->i915, context_size);
- if (IS_ERR(ctx_obj))
- return PTR_ERR(ctx_obj);
-
- vma = i915_vma_instance(ctx_obj, &engine->gt->ggtt->vm, NULL);
- if (IS_ERR(vma)) {
- ret = PTR_ERR(vma);
- goto error_deref_obj;
- }
-
- if (!page_mask_bits(ce->timeline)) {
- struct intel_timeline *tl;
-
- /*
- * Use the static global HWSP for the kernel context, and
- * a dynamically allocated cacheline for everyone else.
- */
- if (unlikely(ce->timeline))
- tl = pinned_timeline(ce);
- else
- tl = intel_timeline_create(engine->gt);
- if (IS_ERR(tl)) {
- ret = PTR_ERR(tl);
- goto error_deref_obj;
- }
-
- ce->timeline = tl;
- }
-
- ring = intel_engine_create_ring(engine, (unsigned long)ce->ring);
- if (IS_ERR(ring)) {
- ret = PTR_ERR(ring);
- goto error_deref_obj;
- }
-
- ret = populate_lr_context(ce, ctx_obj, engine, ring);
- if (ret) {
- drm_dbg(&engine->i915->drm,
- "Failed to populate LRC: %d\n", ret);
- goto error_ring_free;
- }
-
- ce->ring = ring;
- ce->state = vma;
-
- return 0;
-
-error_ring_free:
- intel_ring_put(ring);
-error_deref_obj:
- i915_gem_object_put(ctx_obj);
- return ret;
-}
-
-static struct list_head *virtual_queue(struct virtual_engine *ve)
-{
- return &ve->base.execlists.default_priolist.requests[0];
-}
-
-static void rcu_virtual_context_destroy(struct work_struct *wrk)
-{
- struct virtual_engine *ve =
- container_of(wrk, typeof(*ve), rcu.work);
- unsigned int n;
-
- GEM_BUG_ON(ve->context.inflight);
-
- /* Preempt-to-busy may leave a stale request behind. */
- if (unlikely(ve->request)) {
- struct i915_request *old;
-
- spin_lock_irq(&ve->base.active.lock);
-
- old = fetch_and_zero(&ve->request);
- if (old) {
- GEM_BUG_ON(!i915_request_completed(old));
- __i915_request_submit(old);
- i915_request_put(old);
- }
-
- spin_unlock_irq(&ve->base.active.lock);
- }
-
- /*
- * Flush the tasklet in case it is still running on another core.
- *
- * This needs to be done before we remove ourselves from the siblings'
- * rbtrees as in the case it is running in parallel, it may reinsert
- * the rb_node into a sibling.
- */
- tasklet_kill(&ve->base.execlists.tasklet);
-
- /* Decouple ourselves from the siblings, no more access allowed. */
- for (n = 0; n < ve->num_siblings; n++) {
- struct intel_engine_cs *sibling = ve->siblings[n];
- struct rb_node *node = &ve->nodes[sibling->id].rb;
-
- if (RB_EMPTY_NODE(node))
- continue;
-
- spin_lock_irq(&sibling->active.lock);
-
- /* Detachment is lazily performed in the execlists tasklet */
- if (!RB_EMPTY_NODE(node))
- rb_erase_cached(node, &sibling->execlists.virtual);
-
- spin_unlock_irq(&sibling->active.lock);
- }
- GEM_BUG_ON(__tasklet_is_scheduled(&ve->base.execlists.tasklet));
- GEM_BUG_ON(!list_empty(virtual_queue(ve)));
-
- if (ve->context.state)
- __execlists_context_fini(&ve->context);
- intel_context_fini(&ve->context);
-
- intel_breadcrumbs_free(ve->base.breadcrumbs);
- intel_engine_free_request_pool(&ve->base);
-
- kfree(ve->bonds);
- kfree(ve);
-}
-
-static void virtual_context_destroy(struct kref *kref)
-{
- struct virtual_engine *ve =
- container_of(kref, typeof(*ve), context.ref);
-
- GEM_BUG_ON(!list_empty(&ve->context.signals));
-
- /*
- * When destroying the virtual engine, we have to be aware that
- * it may still be in use from an hardirq/softirq context causing
- * the resubmission of a completed request (background completion
- * due to preempt-to-busy). Before we can free the engine, we need
- * to flush the submission code and tasklets that are still potentially
- * accessing the engine. Flushing the tasklets requires process context,
- * and since we can guard the resubmit onto the engine with an RCU read
- * lock, we can delegate the free of the engine to an RCU worker.
- */
- INIT_RCU_WORK(&ve->rcu, rcu_virtual_context_destroy);
- queue_rcu_work(system_wq, &ve->rcu);
-}
-
-static void virtual_engine_initial_hint(struct virtual_engine *ve)
-{
- int swp;
-
- /*
- * Pick a random sibling on starting to help spread the load around.
- *
- * New contexts are typically created with exactly the same order
- * of siblings, and often started in batches. Due to the way we iterate
- * the array of sibling when submitting requests, sibling[0] is
- * prioritised for dequeuing. If we make sure that sibling[0] is fairly
- * randomised across the system, we also help spread the load by the
- * first engine we inspect being different each time.
- *
- * NB This does not force us to execute on this engine, it will just
- * typically be the first we inspect for submission.
- */
- swp = prandom_u32_max(ve->num_siblings);
- if (swp)
- swap(ve->siblings[swp], ve->siblings[0]);
-}
-
-static int virtual_context_alloc(struct intel_context *ce)
-{
- struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
-
- return __execlists_context_alloc(ce, ve->siblings[0]);
-}
-
-static int virtual_context_pin(struct intel_context *ce, void *vaddr)
-{
- struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
-
- /* Note: we must use a real engine class for setting up reg state */
- return __execlists_context_pin(ce, ve->siblings[0], vaddr);
-}
-
-static void virtual_context_enter(struct intel_context *ce)
-{
- struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
- unsigned int n;
-
- for (n = 0; n < ve->num_siblings; n++)
- intel_engine_pm_get(ve->siblings[n]);
-
- intel_timeline_enter(ce->timeline);
-}
-
-static void virtual_context_exit(struct intel_context *ce)
+static void st_update_runtime_underflow(struct intel_context *ce, s32 dt)
{
- struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
- unsigned int n;
-
- intel_timeline_exit(ce->timeline);
-
- for (n = 0; n < ve->num_siblings; n++)
- intel_engine_pm_put(ve->siblings[n]);
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+ ce->runtime.num_underflow++;
+ ce->runtime.max_underflow = max_t(u32, ce->runtime.max_underflow, -dt);
+#endif
}
-static const struct intel_context_ops virtual_context_ops = {
- .alloc = virtual_context_alloc,
-
- .pre_pin = execlists_context_pre_pin,
- .pin = virtual_context_pin,
- .unpin = execlists_context_unpin,
- .post_unpin = execlists_context_post_unpin,
-
- .enter = virtual_context_enter,
- .exit = virtual_context_exit,
-
- .destroy = virtual_context_destroy,
-};
-
-static intel_engine_mask_t virtual_submission_mask(struct virtual_engine *ve)
+void lrc_update_runtime(struct intel_context *ce)
{
- struct i915_request *rq;
- intel_engine_mask_t mask;
-
- rq = READ_ONCE(ve->request);
- if (!rq)
- return 0;
-
- /* The rq is ready for submission; rq->execution_mask is now stable. */
- mask = rq->execution_mask;
- if (unlikely(!mask)) {
- /* Invalid selection, submit to a random engine in error */
- i915_request_set_error_once(rq, -ENODEV);
- mask = ve->siblings[0]->mask;
- }
-
- ENGINE_TRACE(&ve->base, "rq=%llx:%lld, mask=%x, prio=%d\n",
- rq->fence.context, rq->fence.seqno,
- mask, ve->base.execlists.queue_priority_hint);
-
- return mask;
-}
+ u32 old;
+ s32 dt;
-static void virtual_submission_tasklet(unsigned long data)
-{
- struct virtual_engine * const ve = (struct virtual_engine *)data;
- const int prio = READ_ONCE(ve->base.execlists.queue_priority_hint);
- intel_engine_mask_t mask;
- unsigned int n;
-
- rcu_read_lock();
- mask = virtual_submission_mask(ve);
- rcu_read_unlock();
- if (unlikely(!mask))
+ if (intel_context_is_barrier(ce))
return;
- local_irq_disable();
- for (n = 0; n < ve->num_siblings; n++) {
- struct intel_engine_cs *sibling = READ_ONCE(ve->siblings[n]);
- struct ve_node * const node = &ve->nodes[sibling->id];
- struct rb_node **parent, *rb;
- bool first;
-
- if (!READ_ONCE(ve->request))
- break; /* already handled by a sibling's tasklet */
-
- if (unlikely(!(mask & sibling->mask))) {
- if (!RB_EMPTY_NODE(&node->rb)) {
- spin_lock(&sibling->active.lock);
- rb_erase_cached(&node->rb,
- &sibling->execlists.virtual);
- RB_CLEAR_NODE(&node->rb);
- spin_unlock(&sibling->active.lock);
- }
- continue;
- }
-
- spin_lock(&sibling->active.lock);
-
- if (!RB_EMPTY_NODE(&node->rb)) {
- /*
- * Cheat and avoid rebalancing the tree if we can
- * reuse this node in situ.
- */
- first = rb_first_cached(&sibling->execlists.virtual) ==
- &node->rb;
- if (prio == node->prio || (prio > node->prio && first))
- goto submit_engine;
-
- rb_erase_cached(&node->rb, &sibling->execlists.virtual);
- }
-
- rb = NULL;
- first = true;
- parent = &sibling->execlists.virtual.rb_root.rb_node;
- while (*parent) {
- struct ve_node *other;
-
- rb = *parent;
- other = rb_entry(rb, typeof(*other), rb);
- if (prio > other->prio) {
- parent = &rb->rb_left;
- } else {
- parent = &rb->rb_right;
- first = false;
- }
- }
-
- rb_link_node(&node->rb, rb, parent);
- rb_insert_color_cached(&node->rb,
- &sibling->execlists.virtual,
- first);
-
-submit_engine:
- GEM_BUG_ON(RB_EMPTY_NODE(&node->rb));
- node->prio = prio;
- if (first && prio > sibling->execlists.queue_priority_hint)
- tasklet_hi_schedule(&sibling->execlists.tasklet);
-
- spin_unlock(&sibling->active.lock);
- }
- local_irq_enable();
-}
-
-static void virtual_submit_request(struct i915_request *rq)
-{
- struct virtual_engine *ve = to_virtual_engine(rq->engine);
- struct i915_request *old;
- unsigned long flags;
-
- ENGINE_TRACE(&ve->base, "rq=%llx:%lld\n",
- rq->fence.context,
- rq->fence.seqno);
-
- GEM_BUG_ON(ve->base.submit_request != virtual_submit_request);
-
- spin_lock_irqsave(&ve->base.active.lock, flags);
-
- old = ve->request;
- if (old) { /* background completion event from preempt-to-busy */
- GEM_BUG_ON(!i915_request_completed(old));
- __i915_request_submit(old);
- i915_request_put(old);
- }
-
- if (i915_request_completed(rq)) {
- __i915_request_submit(rq);
-
- ve->base.execlists.queue_priority_hint = INT_MIN;
- ve->request = NULL;
- } else {
- ve->base.execlists.queue_priority_hint = rq_prio(rq);
- ve->request = i915_request_get(rq);
-
- GEM_BUG_ON(!list_empty(virtual_queue(ve)));
- list_move_tail(&rq->sched.link, virtual_queue(ve));
-
- tasklet_hi_schedule(&ve->base.execlists.tasklet);
- }
-
- spin_unlock_irqrestore(&ve->base.active.lock, flags);
-}
-
-static struct ve_bond *
-virtual_find_bond(struct virtual_engine *ve,
- const struct intel_engine_cs *master)
-{
- int i;
-
- for (i = 0; i < ve->num_bonds; i++) {
- if (ve->bonds[i].master == master)
- return &ve->bonds[i];
- }
-
- return NULL;
-}
-
-static void
-virtual_bond_execute(struct i915_request *rq, struct dma_fence *signal)
-{
- struct virtual_engine *ve = to_virtual_engine(rq->engine);
- intel_engine_mask_t allowed, exec;
- struct ve_bond *bond;
-
- allowed = ~to_request(signal)->engine->mask;
-
- bond = virtual_find_bond(ve, to_request(signal)->engine);
- if (bond)
- allowed &= bond->sibling_mask;
-
- /* Restrict the bonded request to run on only the available engines */
- exec = READ_ONCE(rq->execution_mask);
- while (!try_cmpxchg(&rq->execution_mask, &exec, exec & allowed))
- ;
-
- /* Prevent the master from being re-run on the bonded engines */
- to_request(signal)->execution_mask &= ~allowed;
-}
-
-struct intel_context *
-intel_execlists_create_virtual(struct intel_engine_cs **siblings,
- unsigned int count)
-{
- struct virtual_engine *ve;
- unsigned int n;
- int err;
-
- if (count == 0)
- return ERR_PTR(-EINVAL);
-
- if (count == 1)
- return intel_context_create(siblings[0]);
-
- ve = kzalloc(struct_size(ve, siblings, count), GFP_KERNEL);
- if (!ve)
- return ERR_PTR(-ENOMEM);
-
- ve->base.i915 = siblings[0]->i915;
- ve->base.gt = siblings[0]->gt;
- ve->base.uncore = siblings[0]->uncore;
- ve->base.id = -1;
-
- ve->base.class = OTHER_CLASS;
- ve->base.uabi_class = I915_ENGINE_CLASS_INVALID;
- ve->base.instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
- ve->base.uabi_instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
-
- /*
- * The decision on whether to submit a request using semaphores
- * depends on the saturated state of the engine. We only compute
- * this during HW submission of the request, and we need for this
- * state to be globally applied to all requests being submitted
- * to this engine. Virtual engines encompass more than one physical
- * engine and so we cannot accurately tell in advance if one of those
- * engines is already saturated and so cannot afford to use a semaphore
- * and be pessimized in priority for doing so -- if we are the only
- * context using semaphores after all other clients have stopped, we
- * will be starved on the saturated system. Such a global switch for
- * semaphores is less than ideal, but alas is the current compromise.
- */
- ve->base.saturated = ALL_ENGINES;
-
- snprintf(ve->base.name, sizeof(ve->base.name), "virtual");
-
- intel_engine_init_active(&ve->base, ENGINE_VIRTUAL);
- intel_engine_init_execlists(&ve->base);
-
- ve->base.cops = &virtual_context_ops;
- ve->base.request_alloc = execlists_request_alloc;
-
- ve->base.schedule = i915_schedule;
- ve->base.submit_request = virtual_submit_request;
- ve->base.bond_execute = virtual_bond_execute;
-
- INIT_LIST_HEAD(virtual_queue(ve));
- ve->base.execlists.queue_priority_hint = INT_MIN;
- tasklet_init(&ve->base.execlists.tasklet,
- virtual_submission_tasklet,
- (unsigned long)ve);
-
- intel_context_init(&ve->context, &ve->base);
-
- ve->base.breadcrumbs = intel_breadcrumbs_create(NULL);
- if (!ve->base.breadcrumbs) {
- err = -ENOMEM;
- goto err_put;
- }
-
- for (n = 0; n < count; n++) {
- struct intel_engine_cs *sibling = siblings[n];
-
- GEM_BUG_ON(!is_power_of_2(sibling->mask));
- if (sibling->mask & ve->base.mask) {
- DRM_DEBUG("duplicate %s entry in load balancer\n",
- sibling->name);
- err = -EINVAL;
- goto err_put;
- }
-
- /*
- * The virtual engine implementation is tightly coupled to
- * the execlists backend -- we push out request directly
- * into a tree inside each physical engine. We could support
- * layering if we handle cloning of the requests and
- * submitting a copy into each backend.
- */
- if (sibling->execlists.tasklet.func !=
- execlists_submission_tasklet) {
- err = -ENODEV;
- goto err_put;
- }
-
- GEM_BUG_ON(RB_EMPTY_NODE(&ve->nodes[sibling->id].rb));
- RB_CLEAR_NODE(&ve->nodes[sibling->id].rb);
-
- ve->siblings[ve->num_siblings++] = sibling;
- ve->base.mask |= sibling->mask;
-
- /*
- * All physical engines must be compatible for their emission
- * functions (as we build the instructions during request
- * construction and do not alter them before submission
- * on the physical engine). We use the engine class as a guide
- * here, although that could be refined.
- */
- if (ve->base.class != OTHER_CLASS) {
- if (ve->base.class != sibling->class) {
- DRM_DEBUG("invalid mixing of engine class, sibling %d, already %d\n",
- sibling->class, ve->base.class);
- err = -EINVAL;
- goto err_put;
- }
- continue;
- }
-
- ve->base.class = sibling->class;
- ve->base.uabi_class = sibling->uabi_class;
- snprintf(ve->base.name, sizeof(ve->base.name),
- "v%dx%d", ve->base.class, count);
- ve->base.context_size = sibling->context_size;
-
- ve->base.emit_bb_start = sibling->emit_bb_start;
- ve->base.emit_flush = sibling->emit_flush;
- ve->base.emit_init_breadcrumb = sibling->emit_init_breadcrumb;
- ve->base.emit_fini_breadcrumb = sibling->emit_fini_breadcrumb;
- ve->base.emit_fini_breadcrumb_dw =
- sibling->emit_fini_breadcrumb_dw;
-
- ve->base.flags = sibling->flags;
- }
-
- ve->base.flags |= I915_ENGINE_IS_VIRTUAL;
-
- virtual_engine_initial_hint(ve);
- return &ve->context;
-
-err_put:
- intel_context_put(&ve->context);
- return ERR_PTR(err);
-}
-
-struct intel_context *
-intel_execlists_clone_virtual(struct intel_engine_cs *src)
-{
- struct virtual_engine *se = to_virtual_engine(src);
- struct intel_context *dst;
-
- dst = intel_execlists_create_virtual(se->siblings,
- se->num_siblings);
- if (IS_ERR(dst))
- return dst;
-
- if (se->num_bonds) {
- struct virtual_engine *de = to_virtual_engine(dst->engine);
-
- de->bonds = kmemdup(se->bonds,
- sizeof(*se->bonds) * se->num_bonds,
- GFP_KERNEL);
- if (!de->bonds) {
- intel_context_put(dst);
- return ERR_PTR(-ENOMEM);
- }
-
- de->num_bonds = se->num_bonds;
- }
-
- return dst;
-}
-
-int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine,
- const struct intel_engine_cs *master,
- const struct intel_engine_cs *sibling)
-{
- struct virtual_engine *ve = to_virtual_engine(engine);
- struct ve_bond *bond;
- int n;
-
- /* Sanity check the sibling is part of the virtual engine */
- for (n = 0; n < ve->num_siblings; n++)
- if (sibling == ve->siblings[n])
- break;
- if (n == ve->num_siblings)
- return -EINVAL;
-
- bond = virtual_find_bond(ve, master);
- if (bond) {
- bond->sibling_mask |= sibling->mask;
- return 0;
- }
-
- bond = krealloc(ve->bonds,
- sizeof(*bond) * (ve->num_bonds + 1),
- GFP_KERNEL);
- if (!bond)
- return -ENOMEM;
-
- bond[ve->num_bonds].master = master;
- bond[ve->num_bonds].sibling_mask = sibling->mask;
-
- ve->bonds = bond;
- ve->num_bonds++;
-
- return 0;
-}
-
-void intel_execlists_show_requests(struct intel_engine_cs *engine,
- struct drm_printer *m,
- void (*show_request)(struct drm_printer *m,
- struct i915_request *rq,
- const char *prefix),
- unsigned int max)
-{
- const struct intel_engine_execlists *execlists = &engine->execlists;
- struct i915_request *rq, *last;
- unsigned long flags;
- unsigned int count;
- struct rb_node *rb;
-
- spin_lock_irqsave(&engine->active.lock, flags);
-
- last = NULL;
- count = 0;
- list_for_each_entry(rq, &engine->active.requests, sched.link) {
- if (count++ < max - 1)
- show_request(m, rq, "\t\tE ");
- else
- last = rq;
- }
- if (last) {
- if (count > max) {
- drm_printf(m,
- "\t\t...skipping %d executing requests...\n",
- count - max);
- }
- show_request(m, last, "\t\tE ");
- }
-
- if (execlists->switch_priority_hint != INT_MIN)
- drm_printf(m, "\t\tSwitch priority hint: %d\n",
- READ_ONCE(execlists->switch_priority_hint));
- if (execlists->queue_priority_hint != INT_MIN)
- drm_printf(m, "\t\tQueue priority hint: %d\n",
- READ_ONCE(execlists->queue_priority_hint));
-
- last = NULL;
- count = 0;
- for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
- struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
- int i;
-
- priolist_for_each_request(rq, p, i) {
- if (count++ < max - 1)
- show_request(m, rq, "\t\tQ ");
- else
- last = rq;
- }
- }
- if (last) {
- if (count > max) {
- drm_printf(m,
- "\t\t...skipping %d queued requests...\n",
- count - max);
- }
- show_request(m, last, "\t\tQ ");
- }
+ old = ce->runtime.last;
+ ce->runtime.last = lrc_get_runtime(ce);
+ dt = ce->runtime.last - old;
- last = NULL;
- count = 0;
- for (rb = rb_first_cached(&execlists->virtual); rb; rb = rb_next(rb)) {
- struct virtual_engine *ve =
- rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
- struct i915_request *rq = READ_ONCE(ve->request);
-
- if (rq) {
- if (count++ < max - 1)
- show_request(m, rq, "\t\tV ");
- else
- last = rq;
- }
- }
- if (last) {
- if (count > max) {
- drm_printf(m,
- "\t\t...skipping %d virtual requests...\n",
- count - max);
- }
- show_request(m, last, "\t\tV ");
+ if (unlikely(dt < 0)) {
+ CE_TRACE(ce, "runtime underflow: last=%u, new=%u, delta=%d\n",
+ old, ce->runtime.last, dt);
+ st_update_runtime_underflow(ce, dt);
+ return;
}
- spin_unlock_irqrestore(&engine->active.lock, flags);
-}
-
-void intel_lr_context_reset(struct intel_engine_cs *engine,
- struct intel_context *ce,
- u32 head,
- bool scrub)
-{
- GEM_BUG_ON(!intel_context_is_pinned(ce));
-
- /*
- * We want a simple context + ring to execute the breadcrumb update.
- * We cannot rely on the context being intact across the GPU hang,
- * so clear it and rebuild just what we need for the breadcrumb.
- * All pending requests for this context will be zapped, and any
- * future request will be after userspace has had the opportunity
- * to recreate its own state.
- */
- if (scrub)
- restore_default_state(ce, engine);
-
- /* Rerun the request; its payload has been neutered (if guilty). */
- __execlists_update_reg_state(ce, engine, head);
-}
-
-bool
-intel_engine_in_execlists_submission_mode(const struct intel_engine_cs *engine)
-{
- return engine->set_default_submission ==
- intel_execlists_set_default_submission;
+ ewma_runtime_add(&ce->runtime.avg, dt);
+ ce->runtime.total += dt;
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.h b/drivers/gpu/drm/i915/gt/intel_lrc.h
index c2d287f25497..7f697845c4cf 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.h
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.h
@@ -1,90 +1,20 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2014 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
*/
-#ifndef _INTEL_LRC_H_
-#define _INTEL_LRC_H_
+#ifndef __INTEL_LRC_H__
+#define __INTEL_LRC_H__
#include <linux/types.h>
-struct drm_printer;
+#include "intel_context.h"
+#include "intel_lrc_reg.h"
-struct drm_i915_private;
-struct i915_gem_context;
-struct i915_request;
-struct intel_context;
+struct drm_i915_gem_object;
struct intel_engine_cs;
+struct intel_ring;
-/* Execlists regs */
-#define RING_ELSP(base) _MMIO((base) + 0x230)
-#define RING_EXECLIST_STATUS_LO(base) _MMIO((base) + 0x234)
-#define RING_EXECLIST_STATUS_HI(base) _MMIO((base) + 0x234 + 4)
-#define RING_CONTEXT_CONTROL(base) _MMIO((base) + 0x244)
-#define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH (1 << 3)
-#define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT (1 << 0)
-#define CTX_CTRL_RS_CTX_ENABLE (1 << 1)
-#define CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT (1 << 2)
-#define GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE (1 << 8)
-#define RING_CONTEXT_STATUS_PTR(base) _MMIO((base) + 0x3a0)
-#define RING_EXECLIST_SQ_CONTENTS(base) _MMIO((base) + 0x510)
-#define RING_EXECLIST_CONTROL(base) _MMIO((base) + 0x550)
-
-#define EL_CTRL_LOAD (1 << 0)
-
-/* The docs specify that the write pointer wraps around after 5h, "After status
- * is written out to the last available status QW at offset 5h, this pointer
- * wraps to 0."
- *
- * Therefore, one must infer than even though there are 3 bits available, 6 and
- * 7 appear to be * reserved.
- */
-#define GEN8_CSB_ENTRIES 6
-#define GEN8_CSB_PTR_MASK 0x7
-#define GEN8_CSB_READ_PTR_MASK (GEN8_CSB_PTR_MASK << 8)
-#define GEN8_CSB_WRITE_PTR_MASK (GEN8_CSB_PTR_MASK << 0)
-
-#define GEN11_CSB_ENTRIES 12
-#define GEN11_CSB_PTR_MASK 0xf
-#define GEN11_CSB_READ_PTR_MASK (GEN11_CSB_PTR_MASK << 8)
-#define GEN11_CSB_WRITE_PTR_MASK (GEN11_CSB_PTR_MASK << 0)
-
-#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */
-#define MAX_GUC_CONTEXT_HW_ID (1 << 20) /* exclusive */
-#define GEN11_MAX_CONTEXT_HW_ID (1<<11) /* exclusive */
-/* in Gen12 ID 0x7FF is reserved to indicate idle */
-#define GEN12_MAX_CONTEXT_HW_ID (GEN11_MAX_CONTEXT_HW_ID - 1)
-
-enum {
- INTEL_CONTEXT_SCHEDULE_IN = 0,
- INTEL_CONTEXT_SCHEDULE_OUT,
- INTEL_CONTEXT_SCHEDULE_PREEMPTED,
-};
-
-/* Logical Rings */
-void intel_logical_ring_cleanup(struct intel_engine_cs *engine);
-
-int intel_execlists_submission_setup(struct intel_engine_cs *engine);
-
-/* Logical Ring Contexts */
/* At the start of the context image is its per-process HWS page */
#define LRC_PPHWSP_PN (0)
#define LRC_PPHWSP_SZ (1)
@@ -96,32 +26,57 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine);
#define LRC_PPHWSP_SCRATCH 0x34
#define LRC_PPHWSP_SCRATCH_ADDR (LRC_PPHWSP_SCRATCH * sizeof(u32))
-void intel_execlists_set_default_submission(struct intel_engine_cs *engine);
-
-void intel_lr_context_reset(struct intel_engine_cs *engine,
- struct intel_context *ce,
- u32 head,
- bool scrub);
-
-void intel_execlists_show_requests(struct intel_engine_cs *engine,
- struct drm_printer *m,
- void (*show_request)(struct drm_printer *m,
- struct i915_request *rq,
- const char *prefix),
- unsigned int max);
-
-struct intel_context *
-intel_execlists_create_virtual(struct intel_engine_cs **siblings,
- unsigned int count);
-
-struct intel_context *
-intel_execlists_clone_virtual(struct intel_engine_cs *src);
-
-int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine,
- const struct intel_engine_cs *master,
- const struct intel_engine_cs *sibling);
-
-bool
-intel_engine_in_execlists_submission_mode(const struct intel_engine_cs *engine);
-
-#endif /* _INTEL_LRC_H_ */
+void lrc_init_wa_ctx(struct intel_engine_cs *engine);
+void lrc_fini_wa_ctx(struct intel_engine_cs *engine);
+
+int lrc_alloc(struct intel_context *ce,
+ struct intel_engine_cs *engine);
+void lrc_reset(struct intel_context *ce);
+void lrc_fini(struct intel_context *ce);
+void lrc_destroy(struct kref *kref);
+
+int
+lrc_pre_pin(struct intel_context *ce,
+ struct intel_engine_cs *engine,
+ struct i915_gem_ww_ctx *ww,
+ void **vaddr);
+int
+lrc_pin(struct intel_context *ce,
+ struct intel_engine_cs *engine,
+ void *vaddr);
+void lrc_unpin(struct intel_context *ce);
+void lrc_post_unpin(struct intel_context *ce);
+
+void lrc_init_state(struct intel_context *ce,
+ struct intel_engine_cs *engine,
+ void *state);
+
+void lrc_init_regs(const struct intel_context *ce,
+ const struct intel_engine_cs *engine,
+ bool clear);
+void lrc_reset_regs(const struct intel_context *ce,
+ const struct intel_engine_cs *engine);
+
+u32 lrc_update_regs(const struct intel_context *ce,
+ const struct intel_engine_cs *engine,
+ u32 head);
+void lrc_update_offsets(struct intel_context *ce,
+ struct intel_engine_cs *engine);
+
+void lrc_check_regs(const struct intel_context *ce,
+ const struct intel_engine_cs *engine,
+ const char *when);
+
+void lrc_update_runtime(struct intel_context *ce);
+static inline u32 lrc_get_runtime(const struct intel_context *ce)
+{
+ /*
+ * We can use either ppHWSP[16] which is recorded before the context
+ * switch (and so excludes the cost of context switches) or use the
+ * value from the context image itself, which is saved/restored earlier
+ * and so includes the cost of the save.
+ */
+ return READ_ONCE(ce->lrc_reg_state[CTX_TIMESTAMP]);
+}
+
+#endif /* __INTEL_LRC_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
index 1b51f7b9a5c3..65fe76738335 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
+++ b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
@@ -9,6 +9,8 @@
#include <linux/types.h>
+#define CTX_DESC_FORCE_RESTORE BIT_ULL(2)
+
/* GEN8 to GEN12 Reg State Context */
#define CTX_CONTEXT_CONTROL (0x02 + 1)
#define CTX_RING_HEAD (0x04 + 1)
@@ -52,4 +54,43 @@
#define GEN8_EXECLISTS_STATUS_BUF 0x370
#define GEN11_EXECLISTS_STATUS_BUF2 0x3c0
+/* Execlists regs */
+#define RING_ELSP(base) _MMIO((base) + 0x230)
+#define RING_EXECLIST_STATUS_LO(base) _MMIO((base) + 0x234)
+#define RING_EXECLIST_STATUS_HI(base) _MMIO((base) + 0x234 + 4)
+#define RING_CONTEXT_CONTROL(base) _MMIO((base) + 0x244)
+#define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT REG_BIT(0)
+#define CTX_CTRL_RS_CTX_ENABLE REG_BIT(1)
+#define CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT REG_BIT(2)
+#define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH REG_BIT(3)
+#define GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE REG_BIT(8)
+#define RING_CONTEXT_STATUS_PTR(base) _MMIO((base) + 0x3a0)
+#define RING_EXECLIST_SQ_CONTENTS(base) _MMIO((base) + 0x510)
+#define RING_EXECLIST_CONTROL(base) _MMIO((base) + 0x550)
+#define EL_CTRL_LOAD REG_BIT(0)
+
+/*
+ * The docs specify that the write pointer wraps around after 5h, "After status
+ * is written out to the last available status QW at offset 5h, this pointer
+ * wraps to 0."
+ *
+ * Therefore, one must infer than even though there are 3 bits available, 6 and
+ * 7 appear to be * reserved.
+ */
+#define GEN8_CSB_ENTRIES 6
+#define GEN8_CSB_PTR_MASK 0x7
+#define GEN8_CSB_READ_PTR_MASK (GEN8_CSB_PTR_MASK << 8)
+#define GEN8_CSB_WRITE_PTR_MASK (GEN8_CSB_PTR_MASK << 0)
+
+#define GEN11_CSB_ENTRIES 12
+#define GEN11_CSB_PTR_MASK 0xf
+#define GEN11_CSB_READ_PTR_MASK (GEN11_CSB_PTR_MASK << 8)
+#define GEN11_CSB_WRITE_PTR_MASK (GEN11_CSB_PTR_MASK << 0)
+
+#define MAX_CONTEXT_HW_ID (1 << 21) /* exclusive */
+#define MAX_GUC_CONTEXT_HW_ID (1 << 20) /* exclusive */
+#define GEN11_MAX_CONTEXT_HW_ID (1 << 11) /* exclusive */
+/* in Gen12 ID 0x7FF is reserved to indicate idle */
+#define GEN12_MAX_CONTEXT_HW_ID (GEN11_MAX_CONTEXT_HW_ID - 1)
+
#endif /* _INTEL_LRC_REG_H_ */
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
index ab6870242e18..8acb84960cd0 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -24,8 +24,8 @@
#include "intel_engine.h"
#include "intel_gt.h"
+#include "intel_lrc_reg.h"
#include "intel_mocs.h"
-#include "intel_lrc.h"
#include "intel_ring.h"
/* structures required */
@@ -472,7 +472,7 @@ static u16 get_entry_l3cc(const struct drm_i915_mocs_table *table,
return table->table[I915_MOCS_PTE].l3cc_value;
}
-static inline u32 l3cc_combine(u16 low, u16 high)
+static u32 l3cc_combine(u16 low, u16 high)
{
return low | (u32)high << 16;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_ppgtt.c b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
index 46d9aceda64c..96b85a10ef33 100644
--- a/drivers/gpu/drm/i915/gt/intel_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
@@ -80,7 +80,7 @@ void free_px(struct i915_address_space *vm, struct i915_page_table *pt, int lvl)
kfree(pt);
}
-static inline void
+static void
write_dma_entry(struct drm_i915_gem_object * const pdma,
const unsigned short idx,
const u64 encoded_entry)
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
index d7b8e4457fc2..35504c97f11d 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6.c
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
@@ -49,7 +49,7 @@ static struct drm_i915_private *rc6_to_i915(struct intel_rc6 *rc)
return rc6_to_gt(rc)->i915;
}
-static inline void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val)
+static void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val)
{
intel_uncore_write_fw(uncore, reg, val);
}
diff --git a/drivers/gpu/drm/i915/intel_region_lmem.c b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
index 40d8f1a95df6..60393ce5614d 100644
--- a/drivers/gpu/drm/i915/intel_region_lmem.c
+++ b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
@@ -95,10 +95,10 @@ region_lmem_init(struct intel_memory_region *mem)
return ret;
}
-const struct intel_memory_region_ops intel_region_lmem_ops = {
+static const struct intel_memory_region_ops intel_region_lmem_ops = {
.init = region_lmem_init,
.release = region_lmem_release,
- .create_object = __i915_gem_lmem_object_create,
+ .init_object = __i915_gem_lmem_object_init,
};
struct intel_memory_region *
diff --git a/drivers/gpu/drm/i915/intel_region_lmem.h b/drivers/gpu/drm/i915/gt/intel_region_lmem.h
index 213def7c7b8a..8ea43e538dab 100644
--- a/drivers/gpu/drm/i915/intel_region_lmem.h
+++ b/drivers/gpu/drm/i915/gt/intel_region_lmem.h
@@ -8,8 +8,6 @@
struct drm_i915_private;
-extern const struct intel_memory_region_ops intel_region_lmem_ops;
-
struct intel_memory_region *
intel_setup_fake_lmem(struct drm_i915_private *i915);
diff --git a/drivers/gpu/drm/i915/gt/intel_renderstate.c b/drivers/gpu/drm/i915/gt/intel_renderstate.c
index ea2a77c7b469..ca816ba22197 100644
--- a/drivers/gpu/drm/i915/gt/intel_renderstate.c
+++ b/drivers/gpu/drm/i915/gt/intel_renderstate.c
@@ -27,7 +27,8 @@
#include "i915_drv.h"
#include "intel_renderstate.h"
-#include "gt/intel_context.h"
+#include "intel_context.h"
+#include "intel_gpu_commands.h"
#include "intel_ring.h"
static const struct intel_renderstate_rodata *
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index 3654c955e6be..61410cd62927 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -40,20 +40,19 @@ static void rmw_clear_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
intel_uncore_rmw_fw(uncore, reg, clr, 0);
}
-static void engine_skip_context(struct i915_request *rq)
+static void skip_context(struct i915_request *rq)
{
- struct intel_engine_cs *engine = rq->engine;
struct intel_context *hung_ctx = rq->context;
- if (!i915_request_is_active(rq))
- return;
+ list_for_each_entry_from_rcu(rq, &hung_ctx->timeline->requests, link) {
+ if (!i915_request_is_active(rq))
+ return;
- lockdep_assert_held(&engine->active.lock);
- list_for_each_entry_continue(rq, &engine->active.requests, sched.link)
if (rq->context == hung_ctx) {
i915_request_set_error_once(rq, -EIO);
__i915_request_skip(rq);
}
+ }
}
static void client_mark_guilty(struct i915_gem_context *ctx, bool banned)
@@ -152,15 +151,14 @@ static void mark_innocent(struct i915_request *rq)
void __i915_request_reset(struct i915_request *rq, bool guilty)
{
RQ_TRACE(rq, "guilty? %s\n", yesno(guilty));
-
- GEM_BUG_ON(i915_request_completed(rq));
+ GEM_BUG_ON(__i915_request_is_complete(rq));
rcu_read_lock(); /* protect the GEM context */
if (guilty) {
i915_request_set_error_once(rq, -EIO);
__i915_request_skip(rq);
if (mark_guilty(rq))
- engine_skip_context(rq);
+ skip_context(rq);
} else {
i915_request_set_error_once(rq, -EAGAIN);
mark_innocent(rq);
@@ -231,7 +229,7 @@ static int g4x_do_reset(struct intel_gt *gt,
GRDOM_MEDIA | GRDOM_RESET_ENABLE);
ret = wait_for_atomic(g4x_reset_complete(pdev), 50);
if (ret) {
- drm_dbg(&gt->i915->drm, "Wait for media reset failed\n");
+ GT_TRACE(gt, "Wait for media reset failed\n");
goto out;
}
@@ -239,7 +237,7 @@ static int g4x_do_reset(struct intel_gt *gt,
GRDOM_RENDER | GRDOM_RESET_ENABLE);
ret = wait_for_atomic(g4x_reset_complete(pdev), 50);
if (ret) {
- drm_dbg(&gt->i915->drm, "Wait for render reset failed\n");
+ GT_TRACE(gt, "Wait for render reset failed\n");
goto out;
}
@@ -265,7 +263,7 @@ static int ilk_do_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask,
5000, 0,
NULL);
if (ret) {
- drm_dbg(&gt->i915->drm, "Wait for render reset failed\n");
+ GT_TRACE(gt, "Wait for render reset failed\n");
goto out;
}
@@ -276,7 +274,7 @@ static int ilk_do_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask,
5000, 0,
NULL);
if (ret) {
- drm_dbg(&gt->i915->drm, "Wait for media reset failed\n");
+ GT_TRACE(gt, "Wait for media reset failed\n");
goto out;
}
@@ -305,9 +303,9 @@ static int gen6_hw_domain_reset(struct intel_gt *gt, u32 hw_domain_mask)
500, 0,
NULL);
if (err)
- drm_dbg(&gt->i915->drm,
- "Wait for 0x%08x engines reset failed\n",
- hw_domain_mask);
+ GT_TRACE(gt,
+ "Wait for 0x%08x engines reset failed\n",
+ hw_domain_mask);
return err;
}
@@ -407,8 +405,7 @@ static int gen11_lock_sfc(struct intel_engine_cs *engine, u32 *hw_mask)
return 0;
if (ret) {
- drm_dbg(&engine->i915->drm,
- "Wait for SFC forced lock ack failed\n");
+ ENGINE_TRACE(engine, "Wait for SFC forced lock ack failed\n");
return ret;
}
@@ -499,6 +496,9 @@ static int gen8_engine_reset_prepare(struct intel_engine_cs *engine)
u32 request, mask, ack;
int ret;
+ if (I915_SELFTEST_ONLY(should_fail(&engine->reset_timeout, 1)))
+ return -ETIMEDOUT;
+
ack = intel_uncore_read_fw(uncore, reg);
if (ack & RESET_CTL_CAT_ERROR) {
/*
@@ -754,8 +754,10 @@ static int gt_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
if (err)
return err;
+ local_bh_disable();
for_each_engine(engine, gt, id)
__intel_engine_reset(engine, stalled_mask & engine->mask);
+ local_bh_enable();
intel_ggtt_restore_fences(gt->ggtt);
@@ -833,9 +835,11 @@ static void __intel_gt_set_wedged(struct intel_gt *gt)
set_bit(I915_WEDGED, &gt->reset.flags);
/* Mark all executing requests as skipped */
+ local_bh_disable();
for_each_engine(engine, gt, id)
if (engine->reset.cancel)
engine->reset.cancel(engine);
+ local_bh_enable();
reset_finish(gt, awake);
@@ -1105,25 +1109,12 @@ error:
goto finish;
}
-static inline int intel_gt_reset_engine(struct intel_engine_cs *engine)
+static int intel_gt_reset_engine(struct intel_engine_cs *engine)
{
return __intel_gt_reset(engine->gt, engine->mask);
}
-/**
- * intel_engine_reset - reset GPU engine to recover from a hang
- * @engine: engine to reset
- * @msg: reason for GPU reset; or NULL for no drm_notice()
- *
- * Reset a specific GPU engine. Useful if a hang is detected.
- * Returns zero on successful reset or otherwise an error code.
- *
- * Procedure is:
- * - identifies the request that caused the hang and it is dropped
- * - reset engine (which will force the engine to idle)
- * - re-init/configure engine
- */
-int intel_engine_reset(struct intel_engine_cs *engine, const char *msg)
+int __intel_engine_reset_bh(struct intel_engine_cs *engine, const char *msg)
{
struct intel_gt *gt = engine->gt;
bool uses_guc = intel_engine_in_guc_submission_mode(engine);
@@ -1148,8 +1139,7 @@ int intel_engine_reset(struct intel_engine_cs *engine, const char *msg)
ret = intel_guc_reset_engine(&engine->gt->uc.guc, engine);
if (ret) {
/* If we fail here, we expect to fallback to a global reset */
- drm_dbg(&gt->i915->drm, "%sFailed to reset %s, ret=%d\n",
- uses_guc ? "GuC " : "", engine->name, ret);
+ ENGINE_TRACE(engine, "Failed to reset, err: %d\n", ret);
goto out;
}
@@ -1174,6 +1164,30 @@ out:
return ret;
}
+/**
+ * intel_engine_reset - reset GPU engine to recover from a hang
+ * @engine: engine to reset
+ * @msg: reason for GPU reset; or NULL for no drm_notice()
+ *
+ * Reset a specific GPU engine. Useful if a hang is detected.
+ * Returns zero on successful reset or otherwise an error code.
+ *
+ * Procedure is:
+ * - identifies the request that caused the hang and it is dropped
+ * - reset engine (which will force the engine to idle)
+ * - re-init/configure engine
+ */
+int intel_engine_reset(struct intel_engine_cs *engine, const char *msg)
+{
+ int err;
+
+ local_bh_disable();
+ err = __intel_engine_reset_bh(engine, msg);
+ local_bh_enable();
+
+ return err;
+}
+
static void intel_gt_reset_global(struct intel_gt *gt,
u32 engine_mask,
const char *reason)
@@ -1186,7 +1200,7 @@ static void intel_gt_reset_global(struct intel_gt *gt,
kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
- drm_dbg(&gt->i915->drm, "resetting chip, engines=%x\n", engine_mask);
+ GT_TRACE(gt, "resetting chip, engines=%x\n", engine_mask);
kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
/* Use a watchdog to ensure that our reset completes */
@@ -1260,18 +1274,20 @@ void intel_gt_handle_error(struct intel_gt *gt,
* single reset fails.
*/
if (intel_has_reset_engine(gt) && !intel_gt_is_wedged(gt)) {
+ local_bh_disable();
for_each_engine_masked(engine, gt, engine_mask, tmp) {
BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
&gt->reset.flags))
continue;
- if (intel_engine_reset(engine, msg) == 0)
+ if (__intel_engine_reset_bh(engine, msg) == 0)
engine_mask &= ~engine->mask;
clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id,
&gt->reset.flags);
}
+ local_bh_enable();
}
if (!engine_mask)
@@ -1380,6 +1396,17 @@ void intel_gt_init_reset(struct intel_gt *gt)
mutex_init(&gt->reset.mutex);
init_srcu_struct(&gt->reset.backoff_srcu);
+ /*
+ * While undesirable to wait inside the shrinker, complain anyway.
+ *
+ * If we have to wait during shrinking, we guarantee forward progress
+ * by forcing the reset. Therefore during the reset we must not
+ * re-enter the shrinker. By declaring that we take the reset mutex
+ * within the shrinker, we forbid ourselves from performing any
+ * fs-reclaim or taking related locks during reset.
+ */
+ i915_gem_shrinker_taints_mutex(gt->i915, &gt->reset.mutex);
+
/* no GPU until we are ready! */
__set_bit(I915_WEDGED, &gt->reset.flags);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.h b/drivers/gpu/drm/i915/gt/intel_reset.h
index a0eec7c11c0c..7dbf5cc8a333 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.h
+++ b/drivers/gpu/drm/i915/gt/intel_reset.h
@@ -34,6 +34,8 @@ void intel_gt_reset(struct intel_gt *gt,
const char *reason);
int intel_engine_reset(struct intel_engine_cs *engine,
const char *reason);
+int __intel_engine_reset_bh(struct intel_engine_cs *engine,
+ const char *reason);
void __i915_request_reset(struct i915_request *rq, bool guilty);
diff --git a/drivers/gpu/drm/i915/gt/intel_ring.c b/drivers/gpu/drm/i915/gt/intel_ring.c
index 4034a4bac7f0..78d1360caa0f 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring.c
@@ -5,9 +5,11 @@
*/
#include "gem/i915_gem_object.h"
+
#include "i915_drv.h"
#include "i915_vma.h"
#include "intel_engine.h"
+#include "intel_gpu_commands.h"
#include "intel_ring.h"
#include "intel_timeline.h"
@@ -40,7 +42,7 @@ int intel_ring_pin(struct intel_ring *ring, struct i915_gem_ww_ctx *ww)
/* Ring wraparound at offset 0 sometimes hangs. No idea why. */
flags = PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
- if (vma->obj->stolen)
+ if (i915_gem_object_is_stolen(vma->obj))
flags |= PIN_MAPPABLE;
else
flags |= PIN_HIGH;
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index ecf3a6118a6d..4984ff565424 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -122,31 +122,27 @@ static void set_hwsp(struct intel_engine_cs *engine, u32 offset)
hwsp = RING_HWS_PGA(engine->mmio_base);
}
- intel_uncore_write(engine->uncore, hwsp, offset);
- intel_uncore_posting_read(engine->uncore, hwsp);
+ intel_uncore_write_fw(engine->uncore, hwsp, offset);
+ intel_uncore_posting_read_fw(engine->uncore, hwsp);
}
static void flush_cs_tlb(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
-
- if (!IS_GEN_RANGE(dev_priv, 6, 7))
+ if (!IS_GEN_RANGE(engine->i915, 6, 7))
return;
/* ring should be idle before issuing a sync flush*/
- drm_WARN_ON(&dev_priv->drm,
- (ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
-
- ENGINE_WRITE(engine, RING_INSTPM,
- _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
- INSTPM_SYNC_FLUSH));
- if (intel_wait_for_register(engine->uncore,
- RING_INSTPM(engine->mmio_base),
- INSTPM_SYNC_FLUSH, 0,
- 1000))
- drm_err(&dev_priv->drm,
- "%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
- engine->name);
+ GEM_DEBUG_WARN_ON((ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
+
+ ENGINE_WRITE_FW(engine, RING_INSTPM,
+ _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
+ INSTPM_SYNC_FLUSH));
+ if (__intel_wait_for_register_fw(engine->uncore,
+ RING_INSTPM(engine->mmio_base),
+ INSTPM_SYNC_FLUSH, 0,
+ 2000, 0, NULL))
+ ENGINE_TRACE(engine,
+ "wait for SyncFlush to complete for TLB invalidation timed out\n");
}
static void ring_setup_status_page(struct intel_engine_cs *engine)
@@ -157,44 +153,6 @@ static void ring_setup_status_page(struct intel_engine_cs *engine)
flush_cs_tlb(engine);
}
-static bool stop_ring(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
-
- if (INTEL_GEN(dev_priv) > 2) {
- ENGINE_WRITE(engine,
- RING_MI_MODE, _MASKED_BIT_ENABLE(STOP_RING));
- if (intel_wait_for_register(engine->uncore,
- RING_MI_MODE(engine->mmio_base),
- MODE_IDLE,
- MODE_IDLE,
- 1000)) {
- drm_err(&dev_priv->drm,
- "%s : timed out trying to stop ring\n",
- engine->name);
-
- /*
- * Sometimes we observe that the idle flag is not
- * set even though the ring is empty. So double
- * check before giving up.
- */
- if (ENGINE_READ(engine, RING_HEAD) !=
- ENGINE_READ(engine, RING_TAIL))
- return false;
- }
- }
-
- ENGINE_WRITE(engine, RING_HEAD, ENGINE_READ(engine, RING_TAIL));
-
- ENGINE_WRITE(engine, RING_HEAD, 0);
- ENGINE_WRITE(engine, RING_TAIL, 0);
-
- /* The ring must be empty before it is disabled */
- ENGINE_WRITE(engine, RING_CTL, 0);
-
- return (ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR) == 0;
-}
-
static struct i915_address_space *vm_alias(struct i915_address_space *vm)
{
if (i915_is_ggtt(vm))
@@ -212,9 +170,16 @@ static void set_pp_dir(struct intel_engine_cs *engine)
{
struct i915_address_space *vm = vm_alias(engine->gt->vm);
- if (vm) {
- ENGINE_WRITE(engine, RING_PP_DIR_DCLV, PP_DIR_DCLV_2G);
- ENGINE_WRITE(engine, RING_PP_DIR_BASE, pp_dir(vm));
+ if (!vm)
+ return;
+
+ ENGINE_WRITE_FW(engine, RING_PP_DIR_DCLV, PP_DIR_DCLV_2G);
+ ENGINE_WRITE_FW(engine, RING_PP_DIR_BASE, pp_dir(vm));
+
+ if (INTEL_GEN(engine->i915) >= 7) {
+ ENGINE_WRITE_FW(engine,
+ RING_MODE_GEN7,
+ _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
}
}
@@ -222,38 +187,10 @@ static int xcs_resume(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
struct intel_ring *ring = engine->legacy.ring;
- int ret = 0;
ENGINE_TRACE(engine, "ring:{HEAD:%04x, TAIL:%04x}\n",
ring->head, ring->tail);
- intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
-
- /* WaClearRingBufHeadRegAtInit:ctg,elk */
- if (!stop_ring(engine)) {
- /* G45 ring initialization often fails to reset head to zero */
- drm_dbg(&dev_priv->drm, "%s head not reset to zero "
- "ctl %08x head %08x tail %08x start %08x\n",
- engine->name,
- ENGINE_READ(engine, RING_CTL),
- ENGINE_READ(engine, RING_HEAD),
- ENGINE_READ(engine, RING_TAIL),
- ENGINE_READ(engine, RING_START));
-
- if (!stop_ring(engine)) {
- drm_err(&dev_priv->drm,
- "failed to set %s head to zero "
- "ctl %08x head %08x tail %08x start %08x\n",
- engine->name,
- ENGINE_READ(engine, RING_CTL),
- ENGINE_READ(engine, RING_HEAD),
- ENGINE_READ(engine, RING_TAIL),
- ENGINE_READ(engine, RING_START));
- ret = -EIO;
- goto out;
- }
- }
-
if (HWS_NEEDS_PHYSICAL(dev_priv))
ring_setup_phys_status_page(engine);
else
@@ -270,7 +207,7 @@ static int xcs_resume(struct intel_engine_cs *engine)
* also enforces ordering), otherwise the hw might lose the new ring
* register values.
*/
- ENGINE_WRITE(engine, RING_START, i915_ggtt_offset(ring->vma));
+ ENGINE_WRITE_FW(engine, RING_START, i915_ggtt_offset(ring->vma));
/* Check that the ring offsets point within the ring! */
GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head));
@@ -280,53 +217,98 @@ static int xcs_resume(struct intel_engine_cs *engine)
set_pp_dir(engine);
/* First wake the ring up to an empty/idle ring */
- ENGINE_WRITE(engine, RING_HEAD, ring->head);
- ENGINE_WRITE(engine, RING_TAIL, ring->head);
+ ENGINE_WRITE_FW(engine, RING_HEAD, ring->head);
+ ENGINE_WRITE_FW(engine, RING_TAIL, ring->head);
ENGINE_POSTING_READ(engine, RING_TAIL);
- ENGINE_WRITE(engine, RING_CTL, RING_CTL_SIZE(ring->size) | RING_VALID);
+ ENGINE_WRITE_FW(engine, RING_CTL,
+ RING_CTL_SIZE(ring->size) | RING_VALID);
/* If the head is still not zero, the ring is dead */
- if (intel_wait_for_register(engine->uncore,
- RING_CTL(engine->mmio_base),
- RING_VALID, RING_VALID,
- 50)) {
- drm_err(&dev_priv->drm, "%s initialization failed "
- "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
- engine->name,
- ENGINE_READ(engine, RING_CTL),
- ENGINE_READ(engine, RING_CTL) & RING_VALID,
- ENGINE_READ(engine, RING_HEAD), ring->head,
- ENGINE_READ(engine, RING_TAIL), ring->tail,
- ENGINE_READ(engine, RING_START),
- i915_ggtt_offset(ring->vma));
- ret = -EIO;
- goto out;
+ if (__intel_wait_for_register_fw(engine->uncore,
+ RING_CTL(engine->mmio_base),
+ RING_VALID, RING_VALID,
+ 5000, 0, NULL)) {
+ drm_err(&dev_priv->drm,
+ "%s initialization failed; "
+ "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
+ engine->name,
+ ENGINE_READ(engine, RING_CTL),
+ ENGINE_READ(engine, RING_CTL) & RING_VALID,
+ ENGINE_READ(engine, RING_HEAD), ring->head,
+ ENGINE_READ(engine, RING_TAIL), ring->tail,
+ ENGINE_READ(engine, RING_START),
+ i915_ggtt_offset(ring->vma));
+ return -EIO;
}
if (INTEL_GEN(dev_priv) > 2)
- ENGINE_WRITE(engine,
- RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
+ ENGINE_WRITE_FW(engine,
+ RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
/* Now awake, let it get started */
if (ring->tail != ring->head) {
- ENGINE_WRITE(engine, RING_TAIL, ring->tail);
+ ENGINE_WRITE_FW(engine, RING_TAIL, ring->tail);
ENGINE_POSTING_READ(engine, RING_TAIL);
}
/* Papering over lost _interrupts_ immediately following the restart */
intel_engine_signal_breadcrumbs(engine);
-out:
- intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
+ return 0;
+}
- return ret;
+static void sanitize_hwsp(struct intel_engine_cs *engine)
+{
+ struct intel_timeline *tl;
+
+ list_for_each_entry(tl, &engine->status_page.timelines, engine_link)
+ intel_timeline_reset_seqno(tl);
}
-static void reset_prepare(struct intel_engine_cs *engine)
+static void xcs_sanitize(struct intel_engine_cs *engine)
+{
+ /*
+ * Poison residual state on resume, in case the suspend didn't!
+ *
+ * We have to assume that across suspend/resume (or other loss
+ * of control) that the contents of our pinned buffers has been
+ * lost, replaced by garbage. Since this doesn't always happen,
+ * let's poison such state so that we more quickly spot when
+ * we falsely assume it has been preserved.
+ */
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ memset(engine->status_page.addr, POISON_INUSE, PAGE_SIZE);
+
+ /*
+ * The kernel_context HWSP is stored in the status_page. As above,
+ * that may be lost on resume/initialisation, and so we need to
+ * reset the value in the HWSP.
+ */
+ sanitize_hwsp(engine);
+
+ /* And scrub the dirty cachelines for the HWSP */
+ clflush_cache_range(engine->status_page.addr, PAGE_SIZE);
+}
+
+static bool stop_ring(struct intel_engine_cs *engine)
{
- struct intel_uncore *uncore = engine->uncore;
- const u32 base = engine->mmio_base;
+ /* Empty the ring by skipping to the end */
+ ENGINE_WRITE_FW(engine, RING_HEAD, ENGINE_READ_FW(engine, RING_TAIL));
+ ENGINE_POSTING_READ(engine, RING_HEAD);
+
+ /* The ring must be empty before it is disabled */
+ ENGINE_WRITE_FW(engine, RING_CTL, 0);
+ ENGINE_POSTING_READ(engine, RING_CTL);
+
+ /* Then reset the disabled ring */
+ ENGINE_WRITE_FW(engine, RING_HEAD, 0);
+ ENGINE_WRITE_FW(engine, RING_TAIL, 0);
+
+ return (ENGINE_READ_FW(engine, RING_HEAD) & HEAD_ADDR) == 0;
+}
+static void reset_prepare(struct intel_engine_cs *engine)
+{
/*
* We stop engines, otherwise we might get failed reset and a
* dead gpu (on elk). Also as modern gpu as kbl can suffer
@@ -338,30 +320,35 @@ static void reset_prepare(struct intel_engine_cs *engine)
* WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES)
*
* WaMediaResetMainRingCleanup:ctg,elk (presumably)
+ * WaClearRingBufHeadRegAtInit:ctg,elk
*
* FIXME: Wa for more modern gens needs to be validated
*/
ENGINE_TRACE(engine, "\n");
+ intel_engine_stop_cs(engine);
- if (intel_engine_stop_cs(engine))
- ENGINE_TRACE(engine, "timed out on STOP_RING\n");
-
- intel_uncore_write_fw(uncore,
- RING_HEAD(base),
- intel_uncore_read_fw(uncore, RING_TAIL(base)));
- intel_uncore_posting_read_fw(uncore, RING_HEAD(base)); /* paranoia */
-
- intel_uncore_write_fw(uncore, RING_HEAD(base), 0);
- intel_uncore_write_fw(uncore, RING_TAIL(base), 0);
- intel_uncore_posting_read_fw(uncore, RING_TAIL(base));
-
- /* The ring must be empty before it is disabled */
- intel_uncore_write_fw(uncore, RING_CTL(base), 0);
+ if (!stop_ring(engine)) {
+ /* G45 ring initialization often fails to reset head to zero */
+ drm_dbg(&engine->i915->drm,
+ "%s head not reset to zero "
+ "ctl %08x head %08x tail %08x start %08x\n",
+ engine->name,
+ ENGINE_READ_FW(engine, RING_CTL),
+ ENGINE_READ_FW(engine, RING_HEAD),
+ ENGINE_READ_FW(engine, RING_TAIL),
+ ENGINE_READ_FW(engine, RING_START));
+ }
- /* Check acts as a post */
- if (intel_uncore_read_fw(uncore, RING_HEAD(base)))
- ENGINE_TRACE(engine, "ring head [%x] not parked\n",
- intel_uncore_read_fw(uncore, RING_HEAD(base)));
+ if (!stop_ring(engine)) {
+ drm_err(&engine->i915->drm,
+ "failed to set %s head to zero "
+ "ctl %08x head %08x tail %08x start %08x\n",
+ engine->name,
+ ENGINE_READ_FW(engine, RING_CTL),
+ ENGINE_READ_FW(engine, RING_HEAD),
+ ENGINE_READ_FW(engine, RING_TAIL),
+ ENGINE_READ_FW(engine, RING_START));
+ }
}
static void reset_rewind(struct intel_engine_cs *engine, bool stalled)
@@ -372,12 +359,14 @@ static void reset_rewind(struct intel_engine_cs *engine, bool stalled)
rq = NULL;
spin_lock_irqsave(&engine->active.lock, flags);
+ rcu_read_lock();
list_for_each_entry(pos, &engine->active.requests, sched.link) {
- if (!i915_request_completed(pos)) {
+ if (!__i915_request_is_complete(pos)) {
rq = pos;
break;
}
}
+ rcu_read_unlock();
/*
* The guilty request will get skipped on a hung engine.
@@ -441,10 +430,8 @@ static void reset_cancel(struct intel_engine_cs *engine)
spin_lock_irqsave(&engine->active.lock, flags);
/* Mark all submitted requests as skipped. */
- list_for_each_entry(request, &engine->active.requests, sched.link) {
- i915_request_set_error_once(request, -EIO);
- i915_request_mark_complete(request);
- }
+ list_for_each_entry(request, &engine->active.requests, sched.link)
+ i915_request_mark_eio(request);
intel_engine_signal_breadcrumbs(engine);
/* Remaining _unready_ requests will be nop'ed when submitted */
@@ -603,6 +590,7 @@ static int ring_context_pin(struct intel_context *ce, void *unused)
static void ring_context_reset(struct intel_context *ce)
{
intel_ring_reset(ce->ring, ce->ring->emit);
+ clear_bit(CONTEXT_VALID_BIT, &ce->flags);
}
static const struct intel_context_ops ring_context_ops = {
@@ -654,9 +642,9 @@ static int load_pd_dir(struct i915_request *rq,
return rq->engine->emit_flush(rq, EMIT_FLUSH);
}
-static inline int mi_set_context(struct i915_request *rq,
- struct intel_context *ce,
- u32 flags)
+static int mi_set_context(struct i915_request *rq,
+ struct intel_context *ce,
+ u32 flags)
{
struct intel_engine_cs *engine = rq->engine;
struct drm_i915_private *i915 = engine->i915;
@@ -1071,6 +1059,8 @@ static void setup_common(struct intel_engine_cs *engine)
setup_irq(engine);
engine->resume = xcs_resume;
+ engine->sanitize = xcs_sanitize;
+
engine->reset.prepare = reset_prepare;
engine->reset.rewind = reset_rewind;
engine->reset.cancel = reset_cancel;
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index b629eeb14002..ee5835c29c03 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -43,7 +43,7 @@ static u32 rps_pm_sanitize_mask(struct intel_rps *rps, u32 mask)
return mask & ~rps->pm_intrmsk_mbz;
}
-static inline void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val)
+static void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val)
{
intel_uncore_write_fw(uncore, reg, val);
}
@@ -400,7 +400,7 @@ static unsigned int gen5_invert_freq(struct intel_rps *rps,
return val;
}
-static bool gen5_rps_set(struct intel_rps *rps, u8 val)
+static int __gen5_rps_set(struct intel_rps *rps, u8 val)
{
struct intel_uncore *uncore = rps_to_uncore(rps);
u16 rgvswctl;
@@ -410,7 +410,7 @@ static bool gen5_rps_set(struct intel_rps *rps, u8 val)
rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
if (rgvswctl & MEMCTL_CMD_STS) {
DRM_DEBUG("gpu busy, RCS change rejected\n");
- return false; /* still busy with another command */
+ return -EBUSY; /* still busy with another command */
}
/* Invert the frequency bin into an ips delay */
@@ -426,7 +426,18 @@ static bool gen5_rps_set(struct intel_rps *rps, u8 val)
rgvswctl |= MEMCTL_CMD_STS;
intel_uncore_write16(uncore, MEMSWCTL, rgvswctl);
- return true;
+ return 0;
+}
+
+static int gen5_rps_set(struct intel_rps *rps, u8 val)
+{
+ int err;
+
+ spin_lock_irq(&mchdev_lock);
+ err = __gen5_rps_set(rps, val);
+ spin_unlock_irq(&mchdev_lock);
+
+ return err;
}
static unsigned long intel_pxfreq(u32 vidfreq)
@@ -557,7 +568,7 @@ static bool gen5_rps_enable(struct intel_rps *rps)
"stuck trying to change perf mode\n");
mdelay(1);
- gen5_rps_set(rps, rps->cur_freq);
+ __gen5_rps_set(rps, rps->cur_freq);
rps->ips.last_count1 = intel_uncore_read(uncore, DMIEC);
rps->ips.last_count1 += intel_uncore_read(uncore, DDREC);
@@ -599,7 +610,7 @@ static void gen5_rps_disable(struct intel_rps *rps)
intel_uncore_write(uncore, MEMINTRSTS, MEMINT_EVAL_CHG);
/* Go back to the starting frequency */
- gen5_rps_set(rps, rps->idle_freq);
+ __gen5_rps_set(rps, rps->idle_freq);
mdelay(1);
rgvswctl |= MEMCTL_CMD_STS;
intel_uncore_write(uncore, MEMSWCTL, rgvswctl);
@@ -797,20 +808,19 @@ static int rps_set(struct intel_rps *rps, u8 val, bool update)
struct drm_i915_private *i915 = rps_to_i915(rps);
int err;
- if (INTEL_GEN(i915) < 6)
- return 0;
-
if (val == rps->last_freq)
return 0;
if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
err = vlv_rps_set(rps, val);
- else
+ else if (INTEL_GEN(i915) >= 6)
err = gen6_rps_set(rps, val);
+ else
+ err = gen5_rps_set(rps, val);
if (err)
return err;
- if (update)
+ if (update && INTEL_GEN(i915) >= 6)
gen6_rps_set_thresholds(rps, val);
rps->last_freq = val;
@@ -852,6 +862,8 @@ void intel_rps_park(struct intel_rps *rps)
{
int adj;
+ GEM_BUG_ON(atomic_read(&rps->num_waiters));
+
if (!intel_rps_clear_active(rps))
return;
@@ -907,28 +919,27 @@ void intel_rps_park(struct intel_rps *rps)
void intel_rps_boost(struct i915_request *rq)
{
- struct intel_rps *rps = &READ_ONCE(rq->engine)->gt->rps;
- unsigned long flags;
-
- if (i915_request_signaled(rq) || !intel_rps_is_active(rps))
+ if (i915_request_signaled(rq) || i915_request_has_waitboost(rq))
return;
/* Serializes with i915_request_retire() */
- spin_lock_irqsave(&rq->lock, flags);
- if (!i915_request_has_waitboost(rq) &&
- !dma_fence_is_signaled_locked(&rq->fence)) {
- set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags);
+ if (!test_and_set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags)) {
+ struct intel_rps *rps = &READ_ONCE(rq->engine)->gt->rps;
+
+ if (atomic_fetch_inc(&rps->num_waiters))
+ return;
+
+ if (!intel_rps_is_active(rps))
+ return;
GT_TRACE(rps_to_gt(rps), "boost fence:%llx:%llx\n",
rq->fence.context, rq->fence.seqno);
- if (!atomic_fetch_inc(&rps->num_waiters) &&
- READ_ONCE(rps->cur_freq) < rps->boost_freq)
+ if (READ_ONCE(rps->cur_freq) < rps->boost_freq)
schedule_work(&rps->work);
- atomic_inc(&rps->boosts);
+ WRITE_ONCE(rps->boosts, rps->boosts + 1); /* debug only */
}
- spin_unlock_irqrestore(&rq->lock, flags);
}
int intel_rps_set(struct intel_rps *rps, u8 val)
@@ -1798,7 +1809,7 @@ void gen5_rps_irq_handler(struct intel_rps *rps)
rps->min_freq_softlimit,
rps->max_freq_softlimit);
- if (new_freq != rps->cur_freq && gen5_rps_set(rps, new_freq))
+ if (new_freq != rps->cur_freq && !__gen5_rps_set(rps, new_freq))
rps->cur_freq = new_freq;
spin_unlock(&mchdev_lock);
@@ -2109,7 +2120,7 @@ bool i915_gpu_turbo_disable(void)
spin_lock_irq(&mchdev_lock);
rps->max_freq_softlimit = rps->min_freq;
- ret = gen5_rps_set(&i915->gt.rps, rps->min_freq);
+ ret = !__gen5_rps_set(&i915->gt.rps, rps->min_freq);
spin_unlock_irq(&mchdev_lock);
drm_dev_put(&i915->drm);
diff --git a/drivers/gpu/drm/i915/gt/intel_rps_types.h b/drivers/gpu/drm/i915/gt/intel_rps_types.h
index 38083f0402d9..029fe13cf303 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_rps_types.h
@@ -93,7 +93,7 @@ struct intel_rps {
} power;
atomic_t num_waiters;
- atomic_t boosts;
+ unsigned int boosts;
/* manual wa residency calculations */
struct intel_rps_ei ei;
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
index 8015964043eb..037b0e3ccbed 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.c
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
@@ -319,6 +319,25 @@ __intel_timeline_create(struct intel_gt *gt,
return timeline;
}
+struct intel_timeline *
+intel_timeline_create_from_engine(struct intel_engine_cs *engine,
+ unsigned int offset)
+{
+ struct i915_vma *hwsp = engine->status_page.vma;
+ struct intel_timeline *tl;
+
+ tl = __intel_timeline_create(engine->gt, hwsp, offset);
+ if (IS_ERR(tl))
+ return tl;
+
+ /* Borrow a nearby lock; we only create these timelines during init */
+ mutex_lock(&hwsp->vm->mutex);
+ list_add_tail(&tl->engine_link, &engine->status_page.timelines);
+ mutex_unlock(&hwsp->vm->mutex);
+
+ return tl;
+}
+
void __intel_timeline_pin(struct intel_timeline *tl)
{
GEM_BUG_ON(!atomic_read(&tl->pin_count));
@@ -563,11 +582,11 @@ int intel_timeline_read_hwsp(struct i915_request *from,
rcu_read_lock();
cl = rcu_dereference(from->hwsp_cacheline);
- if (i915_request_completed(from)) /* confirm cacheline is valid */
+ if (i915_request_signaled(from)) /* confirm cacheline is valid */
goto unlock;
if (unlikely(!i915_active_acquire_if_busy(&cl->active)))
goto unlock; /* seqno wrapped and completed! */
- if (unlikely(i915_request_completed(from)))
+ if (unlikely(__i915_request_is_complete(from)))
goto release;
rcu_read_unlock();
@@ -615,6 +634,86 @@ void intel_gt_fini_timelines(struct intel_gt *gt)
GEM_BUG_ON(!list_empty(&timelines->hwsp_free_list));
}
+void intel_gt_show_timelines(struct intel_gt *gt,
+ struct drm_printer *m,
+ void (*show_request)(struct drm_printer *m,
+ const struct i915_request *rq,
+ const char *prefix,
+ int indent))
+{
+ struct intel_gt_timelines *timelines = &gt->timelines;
+ struct intel_timeline *tl, *tn;
+ LIST_HEAD(free);
+
+ spin_lock(&timelines->lock);
+ list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
+ unsigned long count, ready, inflight;
+ struct i915_request *rq, *rn;
+ struct dma_fence *fence;
+
+ if (!mutex_trylock(&tl->mutex)) {
+ drm_printf(m, "Timeline %llx: busy; skipping\n",
+ tl->fence_context);
+ continue;
+ }
+
+ intel_timeline_get(tl);
+ GEM_BUG_ON(!atomic_read(&tl->active_count));
+ atomic_inc(&tl->active_count); /* pin the list element */
+ spin_unlock(&timelines->lock);
+
+ count = 0;
+ ready = 0;
+ inflight = 0;
+ list_for_each_entry_safe(rq, rn, &tl->requests, link) {
+ if (i915_request_completed(rq))
+ continue;
+
+ count++;
+ if (i915_request_is_ready(rq))
+ ready++;
+ if (i915_request_is_active(rq))
+ inflight++;
+ }
+
+ drm_printf(m, "Timeline %llx: { ", tl->fence_context);
+ drm_printf(m, "count: %lu, ready: %lu, inflight: %lu",
+ count, ready, inflight);
+ drm_printf(m, ", seqno: { current: %d, last: %d }",
+ *tl->hwsp_seqno, tl->seqno);
+ fence = i915_active_fence_get(&tl->last_request);
+ if (fence) {
+ drm_printf(m, ", engine: %s",
+ to_request(fence)->engine->name);
+ dma_fence_put(fence);
+ }
+ drm_printf(m, " }\n");
+
+ if (show_request) {
+ list_for_each_entry_safe(rq, rn, &tl->requests, link)
+ show_request(m, rq, "", 2);
+ }
+
+ mutex_unlock(&tl->mutex);
+ spin_lock(&timelines->lock);
+
+ /* Resume list iteration after reacquiring spinlock */
+ list_safe_reset_next(tl, tn, link);
+ if (atomic_dec_and_test(&tl->active_count))
+ list_del(&tl->link);
+
+ /* Defer the final release to after the spinlock */
+ if (refcount_dec_and_test(&tl->kref.refcount)) {
+ GEM_BUG_ON(atomic_read(&tl->active_count));
+ list_add(&tl->link, &free);
+ }
+ }
+ spin_unlock(&timelines->lock);
+
+ list_for_each_entry_safe(tl, tn, &free, link)
+ __intel_timeline_free(&tl->kref);
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "gt/selftests/mock_timeline.c"
#include "gt/selftest_timeline.c"
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.h b/drivers/gpu/drm/i915/gt/intel_timeline.h
index 9882cd911d8e..dcdee692a80e 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.h
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.h
@@ -31,6 +31,8 @@
#include "i915_syncmap.h"
#include "intel_timeline_types.h"
+struct drm_printer;
+
struct intel_timeline *
__intel_timeline_create(struct intel_gt *gt,
struct i915_vma *global_hwsp,
@@ -42,14 +44,9 @@ intel_timeline_create(struct intel_gt *gt)
return __intel_timeline_create(gt, NULL, 0);
}
-static inline struct intel_timeline *
+struct intel_timeline *
intel_timeline_create_from_engine(struct intel_engine_cs *engine,
- unsigned int offset)
-{
- return __intel_timeline_create(engine->gt,
- engine->status_page.vma,
- offset);
-}
+ unsigned int offset);
static inline struct intel_timeline *
intel_timeline_get(struct intel_timeline *timeline)
@@ -106,4 +103,18 @@ int intel_timeline_read_hwsp(struct i915_request *from,
void intel_gt_init_timelines(struct intel_gt *gt);
void intel_gt_fini_timelines(struct intel_gt *gt);
+void intel_gt_show_timelines(struct intel_gt *gt,
+ struct drm_printer *m,
+ void (*show_request)(struct drm_printer *m,
+ const struct i915_request *rq,
+ const char *prefix,
+ int indent));
+
+static inline bool
+intel_timeline_is_last(const struct intel_timeline *tl,
+ const struct i915_request *rq)
+{
+ return list_is_last_rcu(&rq->link, &tl->requests);
+}
+
#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline_types.h b/drivers/gpu/drm/i915/gt/intel_timeline_types.h
index 4474f487f589..e360f50706bf 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_timeline_types.h
@@ -84,6 +84,8 @@ struct intel_timeline {
struct list_head link;
struct intel_gt *gt;
+ struct list_head engine_link;
+
struct kref kref;
struct rcu_head rcu;
};
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index adc9a8ea410a..ec366cf9ef56 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -7,6 +7,7 @@
#include "i915_drv.h"
#include "intel_context.h"
#include "intel_engine_pm.h"
+#include "intel_gpu_commands.h"
#include "intel_gt.h"
#include "intel_ring.h"
#include "intel_workarounds.h"
@@ -194,7 +195,7 @@ static void wa_add(struct i915_wa_list *wal, i915_reg_t reg,
}
static void
-wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 clear, u32 set)
+wa_write_clr_set(struct i915_wa_list *wal, i915_reg_t reg, u32 clear, u32 set)
{
wa_add(wal, reg, clear, set, clear);
}
@@ -202,21 +203,32 @@ wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 clear, u32 set)
static void
wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 set)
{
- wa_write_masked_or(wal, reg, ~0, set);
+ wa_write_clr_set(wal, reg, ~0, set);
}
static void
wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 set)
{
- wa_write_masked_or(wal, reg, set, set);
+ wa_write_clr_set(wal, reg, set, set);
}
static void
wa_write_clr(struct i915_wa_list *wal, i915_reg_t reg, u32 clr)
{
- wa_write_masked_or(wal, reg, clr, 0);
+ wa_write_clr_set(wal, reg, clr, 0);
}
+/*
+ * WA operations on "masked register". A masked register has the upper 16 bits
+ * documented as "masked" in b-spec. Its purpose is to allow writing to just a
+ * portion of the register without a rmw: you simply write in the upper 16 bits
+ * the mask of bits you are going to modify.
+ *
+ * The wa_masked_* family of functions already does the necessary operations to
+ * calculate the mask based on the parameters passed, so user only has to
+ * provide the lower 16 bits of that register.
+ */
+
static void
wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
{
@@ -229,38 +241,36 @@ wa_masked_dis(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
wa_add(wal, reg, 0, _MASKED_BIT_DISABLE(val), val);
}
-#define WA_SET_BIT_MASKED(addr, mask) \
- wa_masked_en(wal, (addr), (mask))
-
-#define WA_CLR_BIT_MASKED(addr, mask) \
- wa_masked_dis(wal, (addr), (mask))
-
-#define WA_SET_FIELD_MASKED(addr, mask, value) \
- wa_write_masked_or(wal, (addr), 0, _MASKED_FIELD((mask), (value)))
+static void
+wa_masked_field_set(struct i915_wa_list *wal, i915_reg_t reg,
+ u32 mask, u32 val)
+{
+ wa_add(wal, reg, 0, _MASKED_FIELD(mask, val), mask);
+}
static void gen6_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
- WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
+ wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING);
}
static void gen7_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
- WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
+ wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING);
}
static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
- WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
+ wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING);
/* WaDisableAsyncFlipPerfMode:bdw,chv */
- WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE);
+ wa_masked_en(wal, MI_MODE, ASYNC_FLIP_PERF_DISABLE);
/* WaDisablePartialInstShootdown:bdw,chv */
- WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
- PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
+ wa_masked_en(wal, GEN8_ROW_CHICKEN,
+ PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
/* Use Force Non-Coherent whenever executing a 3D context. This is a
* workaround for for a possible hang in the unlikely event a TLB
@@ -268,9 +278,9 @@ static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine,
*/
/* WaForceEnableNonCoherent:bdw,chv */
/* WaHdcDisableFetchWhenMasked:bdw,chv */
- WA_SET_BIT_MASKED(HDC_CHICKEN0,
- HDC_DONOT_FETCH_MEM_WHEN_MASKED |
- HDC_FORCE_NON_COHERENT);
+ wa_masked_en(wal, HDC_CHICKEN0,
+ HDC_DONOT_FETCH_MEM_WHEN_MASKED |
+ HDC_FORCE_NON_COHERENT);
/* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
* "The Hierarchical Z RAW Stall Optimization allows non-overlapping
@@ -280,10 +290,10 @@ static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine,
*
* This optimization is off by default for BDW and CHV; turn it on.
*/
- WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
+ wa_masked_dis(wal, CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
/* Wa4x4STCOptimizationDisable:bdw,chv */
- WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
+ wa_masked_en(wal, CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
/*
* BSpec recommends 8x4 when MSAA is used,
@@ -293,7 +303,7 @@ static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine,
* disable bit, which we don't touch here, but it's good
* to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
*/
- WA_SET_FIELD_MASKED(GEN7_GT_MODE,
+ wa_masked_field_set(wal, GEN7_GT_MODE,
GEN6_WIZ_HASHING_MASK,
GEN6_WIZ_HASHING_16x4);
}
@@ -306,24 +316,24 @@ static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine,
gen8_ctx_workarounds_init(engine, wal);
/* WaDisableThreadStallDopClockGating:bdw (pre-production) */
- WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
+ wa_masked_en(wal, GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
/* WaDisableDopClockGating:bdw
*
* Also see the related UCGTCL1 write in bdw_init_clock_gating()
* to disable EUTC clock gating.
*/
- WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
- DOP_CLOCK_GATING_DISABLE);
+ wa_masked_en(wal, GEN7_ROW_CHICKEN2,
+ DOP_CLOCK_GATING_DISABLE);
- WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
- GEN8_SAMPLER_POWER_BYPASS_DIS);
+ wa_masked_en(wal, HALF_SLICE_CHICKEN3,
+ GEN8_SAMPLER_POWER_BYPASS_DIS);
- WA_SET_BIT_MASKED(HDC_CHICKEN0,
- /* WaForceContextSaveRestoreNonCoherent:bdw */
- HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
- /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
- (IS_BDW_GT3(i915) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
+ wa_masked_en(wal, HDC_CHICKEN0,
+ /* WaForceContextSaveRestoreNonCoherent:bdw */
+ HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
+ /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
+ (IS_BDW_GT3(i915) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
}
static void chv_ctx_workarounds_init(struct intel_engine_cs *engine,
@@ -332,10 +342,10 @@ static void chv_ctx_workarounds_init(struct intel_engine_cs *engine,
gen8_ctx_workarounds_init(engine, wal);
/* WaDisableThreadStallDopClockGating:chv */
- WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
+ wa_masked_en(wal, GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
/* Improve HiZ throughput on CHV. */
- WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
+ wa_masked_en(wal, HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
}
static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine,
@@ -349,38 +359,38 @@ static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine,
* Must match Display Engine. See
* WaCompressedResourceDisplayNewHashMode.
*/
- WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
- GEN9_PBE_COMPRESSED_HASH_SELECTION);
- WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
- GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR);
+ wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
+ GEN9_PBE_COMPRESSED_HASH_SELECTION);
+ wa_masked_en(wal, GEN9_HALF_SLICE_CHICKEN7,
+ GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR);
}
/* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk,cfl */
/* WaDisablePartialInstShootdown:skl,bxt,kbl,glk,cfl */
- WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
- FLOW_CONTROL_ENABLE |
- PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
+ wa_masked_en(wal, GEN8_ROW_CHICKEN,
+ FLOW_CONTROL_ENABLE |
+ PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
/* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk,cfl */
/* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl,cfl */
- WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
- GEN9_ENABLE_YV12_BUGFIX |
- GEN9_ENABLE_GPGPU_PREEMPTION);
+ wa_masked_en(wal, GEN9_HALF_SLICE_CHICKEN7,
+ GEN9_ENABLE_YV12_BUGFIX |
+ GEN9_ENABLE_GPGPU_PREEMPTION);
/* Wa4x4STCOptimizationDisable:skl,bxt,kbl,glk,cfl */
/* WaDisablePartialResolveInVc:skl,bxt,kbl,cfl */
- WA_SET_BIT_MASKED(CACHE_MODE_1,
- GEN8_4x4_STC_OPTIMIZATION_DISABLE |
- GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE);
+ wa_masked_en(wal, CACHE_MODE_1,
+ GEN8_4x4_STC_OPTIMIZATION_DISABLE |
+ GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE);
/* WaCcsTlbPrefetchDisable:skl,bxt,kbl,glk,cfl */
- WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
- GEN9_CCS_TLB_PREFETCH_ENABLE);
+ wa_masked_dis(wal, GEN9_HALF_SLICE_CHICKEN5,
+ GEN9_CCS_TLB_PREFETCH_ENABLE);
/* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl,cfl */
- WA_SET_BIT_MASKED(HDC_CHICKEN0,
- HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
- HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
+ wa_masked_en(wal, HDC_CHICKEN0,
+ HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
+ HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
/* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
* both tied to WaForceContextSaveRestoreNonCoherent
@@ -396,19 +406,19 @@ static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine,
*/
/* WaForceEnableNonCoherent:skl,bxt,kbl,cfl */
- WA_SET_BIT_MASKED(HDC_CHICKEN0,
- HDC_FORCE_NON_COHERENT);
+ wa_masked_en(wal, HDC_CHICKEN0,
+ HDC_FORCE_NON_COHERENT);
/* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */
if (IS_SKYLAKE(i915) ||
IS_KABYLAKE(i915) ||
IS_COFFEELAKE(i915) ||
IS_COMETLAKE(i915))
- WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
- GEN8_SAMPLER_POWER_BYPASS_DIS);
+ wa_masked_en(wal, HALF_SLICE_CHICKEN3,
+ GEN8_SAMPLER_POWER_BYPASS_DIS);
/* WaDisableSTUnitPowerOptimization:skl,bxt,kbl,glk,cfl */
- WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
+ wa_masked_en(wal, HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
/*
* Supporting preemption with fine-granularity requires changes in the
@@ -422,16 +432,16 @@ static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine,
*/
/* WaDisable3DMidCmdPreemption:skl,bxt,glk,cfl,[cnl] */
- WA_CLR_BIT_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
+ wa_masked_dis(wal, GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
/* WaDisableGPGPUMidCmdPreemption:skl,bxt,blk,cfl,[cnl] */
- WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1,
+ wa_masked_field_set(wal, GEN8_CS_CHICKEN1,
GEN9_PREEMPT_GPGPU_LEVEL_MASK,
GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
/* WaClearHIZ_WM_CHICKEN3:bxt,glk */
if (IS_GEN9_LP(i915))
- WA_SET_BIT_MASKED(GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ);
+ wa_masked_en(wal, GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ);
}
static void skl_tune_iz_hashing(struct intel_engine_cs *engine,
@@ -465,7 +475,7 @@ static void skl_tune_iz_hashing(struct intel_engine_cs *engine,
return;
/* Tune IZ hashing. See intel_device_info_runtime_init() */
- WA_SET_FIELD_MASKED(GEN7_GT_MODE,
+ wa_masked_field_set(wal, GEN7_GT_MODE,
GEN9_IZ_HASHING_MASK(2) |
GEN9_IZ_HASHING_MASK(1) |
GEN9_IZ_HASHING_MASK(0),
@@ -487,12 +497,12 @@ static void bxt_ctx_workarounds_init(struct intel_engine_cs *engine,
gen9_ctx_workarounds_init(engine, wal);
/* WaDisableThreadStallDopClockGating:bxt */
- WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
- STALL_DOP_GATING_DISABLE);
+ wa_masked_en(wal, GEN8_ROW_CHICKEN,
+ STALL_DOP_GATING_DISABLE);
/* WaToEnableHwFixForPushConstHWBug:bxt */
- WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
- GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+ wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
+ GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
}
static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine,
@@ -504,12 +514,12 @@ static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine,
/* WaToEnableHwFixForPushConstHWBug:kbl */
if (IS_KBL_GT_REVID(i915, KBL_REVID_C0, REVID_FOREVER))
- WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
- GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+ wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
+ GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
/* WaDisableSbeCacheDispatchPortSharing:kbl */
- WA_SET_BIT_MASKED(GEN7_HALF_SLICE_CHICKEN1,
- GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
+ wa_masked_en(wal, GEN7_HALF_SLICE_CHICKEN1,
+ GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
}
static void glk_ctx_workarounds_init(struct intel_engine_cs *engine,
@@ -518,8 +528,8 @@ static void glk_ctx_workarounds_init(struct intel_engine_cs *engine,
gen9_ctx_workarounds_init(engine, wal);
/* WaToEnableHwFixForPushConstHWBug:glk */
- WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
- GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+ wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
+ GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
}
static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine,
@@ -528,41 +538,41 @@ static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine,
gen9_ctx_workarounds_init(engine, wal);
/* WaToEnableHwFixForPushConstHWBug:cfl */
- WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
- GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+ wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
+ GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
/* WaDisableSbeCacheDispatchPortSharing:cfl */
- WA_SET_BIT_MASKED(GEN7_HALF_SLICE_CHICKEN1,
- GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
+ wa_masked_en(wal, GEN7_HALF_SLICE_CHICKEN1,
+ GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
}
static void cnl_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
/* WaForceContextSaveRestoreNonCoherent:cnl */
- WA_SET_BIT_MASKED(CNL_HDC_CHICKEN0,
- HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT);
+ wa_masked_en(wal, CNL_HDC_CHICKEN0,
+ HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT);
/* WaDisableReplayBufferBankArbitrationOptimization:cnl */
- WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
- GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+ wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
+ GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
/* WaPushConstantDereferenceHoldDisable:cnl */
- WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, PUSH_CONSTANT_DEREF_DISABLE);
+ wa_masked_en(wal, GEN7_ROW_CHICKEN2, PUSH_CONSTANT_DEREF_DISABLE);
/* FtrEnableFastAnisoL1BankingFix:cnl */
- WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, CNL_FAST_ANISO_L1_BANKING_FIX);
+ wa_masked_en(wal, HALF_SLICE_CHICKEN3, CNL_FAST_ANISO_L1_BANKING_FIX);
/* WaDisable3DMidCmdPreemption:cnl */
- WA_CLR_BIT_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
+ wa_masked_dis(wal, GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
/* WaDisableGPGPUMidCmdPreemption:cnl */
- WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1,
+ wa_masked_field_set(wal, GEN8_CS_CHICKEN1,
GEN9_PREEMPT_GPGPU_LEVEL_MASK,
GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
/* WaDisableEarlyEOT:cnl */
- WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, DISABLE_EARLY_EOT);
+ wa_masked_en(wal, GEN8_ROW_CHICKEN, DISABLE_EARLY_EOT);
}
static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
@@ -580,8 +590,8 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
* Formerly known as WaPushConstantDereferenceHoldDisable
*/
if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
- WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
- PUSH_CONSTANT_DEREF_DISABLE);
+ wa_masked_en(wal, GEN7_ROW_CHICKEN2,
+ PUSH_CONSTANT_DEREF_DISABLE);
/* WaForceEnableNonCoherent:icl
* This is not the same workaround as in early Gen9 platforms, where
@@ -590,40 +600,40 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
* (the register is whitelisted in hardware now, so UMDs can opt in
* for coherency if they have a good reason).
*/
- WA_SET_BIT_MASKED(ICL_HDC_MODE, HDC_FORCE_NON_COHERENT);
+ wa_masked_en(wal, ICL_HDC_MODE, HDC_FORCE_NON_COHERENT);
/* Wa_2006611047:icl (pre-prod)
* Formerly known as WaDisableImprovedTdlClkGating
*/
if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
- WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
- GEN11_TDL_CLOCK_GATING_FIX_DISABLE);
+ wa_masked_en(wal, GEN7_ROW_CHICKEN2,
+ GEN11_TDL_CLOCK_GATING_FIX_DISABLE);
/* Wa_2006665173:icl (pre-prod) */
if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
- WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3,
- GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC);
+ wa_masked_en(wal, GEN11_COMMON_SLICE_CHICKEN3,
+ GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC);
/* WaEnableFloatBlendOptimization:icl */
- wa_write_masked_or(wal,
- GEN10_CACHE_MODE_SS,
- 0, /* write-only, so skip validation */
- _MASKED_BIT_ENABLE(FLOAT_BLEND_OPTIMIZATION_ENABLE));
+ wa_write_clr_set(wal,
+ GEN10_CACHE_MODE_SS,
+ 0, /* write-only, so skip validation */
+ _MASKED_BIT_ENABLE(FLOAT_BLEND_OPTIMIZATION_ENABLE));
/* WaDisableGPGPUMidThreadPreemption:icl */
- WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1,
+ wa_masked_field_set(wal, GEN8_CS_CHICKEN1,
GEN9_PREEMPT_GPGPU_LEVEL_MASK,
GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL);
/* allow headerless messages for preemptible GPGPU context */
- WA_SET_BIT_MASKED(GEN10_SAMPLER_MODE,
- GEN11_SAMPLER_ENABLE_HEADLESS_MSG);
+ wa_masked_en(wal, GEN10_SAMPLER_MODE,
+ GEN11_SAMPLER_ENABLE_HEADLESS_MSG);
/* Wa_1604278689:icl,ehl */
wa_write(wal, IVB_FBC_RT_BASE, 0xFFFFFFFF & ~ILK_FBC_RT_VALID);
- wa_write_masked_or(wal, IVB_FBC_RT_BASE_UPPER,
- 0, /* write-only register; skip validation */
- 0xFFFFFFFF);
+ wa_write_clr_set(wal, IVB_FBC_RT_BASE_UPPER,
+ 0, /* write-only register; skip validation */
+ 0xFFFFFFFF);
/* Wa_1406306137:icl,ehl */
wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN11_DIS_PICK_2ND_EU);
@@ -643,11 +653,11 @@ static void gen12_ctx_workarounds_init(struct intel_engine_cs *engine,
* Wa_14010443199:rkl
* Wa_14010698770:rkl
*/
- WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3,
- GEN12_DISABLE_CPS_AWARE_COLOR_PIPE);
+ wa_masked_en(wal, GEN11_COMMON_SLICE_CHICKEN3,
+ GEN12_DISABLE_CPS_AWARE_COLOR_PIPE);
/* WaDisableGPGPUMidThreadPreemption:gen12 */
- WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1,
+ wa_masked_field_set(wal, GEN8_CS_CHICKEN1,
GEN9_PREEMPT_GPGPU_LEVEL_MASK,
GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL);
}
@@ -680,12 +690,22 @@ static void dg1_ctx_workarounds_init(struct intel_engine_cs *engine,
gen12_ctx_workarounds_init(engine, wal);
/* Wa_1409044764 */
- WA_CLR_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3,
- DG1_FLOAT_POINT_BLEND_OPT_STRICT_MODE_EN);
+ wa_masked_dis(wal, GEN11_COMMON_SLICE_CHICKEN3,
+ DG1_FLOAT_POINT_BLEND_OPT_STRICT_MODE_EN);
/* Wa_22010493298 */
- WA_SET_BIT_MASKED(HIZ_CHICKEN,
- DG1_HZ_READ_SUPPRESSION_OPTIMIZATION_DISABLE);
+ wa_masked_en(wal, HIZ_CHICKEN,
+ DG1_HZ_READ_SUPPRESSION_OPTIMIZATION_DISABLE);
+
+ /*
+ * Wa_16011163337
+ *
+ * Like in tgl_ctx_workarounds_init(), read verification is ignored due
+ * to Wa_1608008084.
+ */
+ wa_add(wal,
+ FF_MODE2,
+ FF_MODE2_GS_TIMER_MASK, FF_MODE2_GS_TIMER_224, 0);
}
static void
@@ -804,57 +824,11 @@ ilk_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
static void
snb_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
- /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
- wa_masked_en(wal,
- _3D_CHICKEN,
- _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB);
-
- /* WaDisable_RenderCache_OperationalFlush:snb */
- wa_masked_dis(wal, CACHE_MODE_0, RC_OP_FLUSH_ENABLE);
-
- /*
- * BSpec recommends 8x4 when MSAA is used,
- * however in practice 16x4 seems fastest.
- *
- * Note that PS/WM thread counts depend on the WIZ hashing
- * disable bit, which we don't touch here, but it's good
- * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
- */
- wa_add(wal,
- GEN6_GT_MODE, 0,
- _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4),
- GEN6_WIZ_HASHING_16x4);
-
- wa_masked_dis(wal, CACHE_MODE_0, CM0_STC_EVICT_DISABLE_LRA_SNB);
-
- wa_masked_en(wal,
- _3D_CHICKEN3,
- /* WaStripsFansDisableFastClipPerformanceFix:snb */
- _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL |
- /*
- * Bspec says:
- * "This bit must be set if 3DSTATE_CLIP clip mode is set
- * to normal and 3DSTATE_SF number of SF output attributes
- * is more than 16."
- */
- _3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH);
}
static void
ivb_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
- /* WaDisableEarlyCull:ivb */
- wa_masked_en(wal, _3D_CHICKEN3, _3D_CHICKEN_SF_DISABLE_OBJEND_CULL);
-
- /* WaDisablePSDDualDispatchEnable:ivb */
- if (IS_IVB_GT1(i915))
- wa_masked_en(wal,
- GEN7_HALF_SLICE_CHICKEN1,
- GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
-
- /* WaDisable_RenderCache_OperationalFlush:ivb */
- wa_masked_dis(wal, CACHE_MODE_0_GEN7, RC_OP_FLUSH_ENABLE);
-
/* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
wa_masked_dis(wal,
GEN7_COMMON_SLICE_CHICKEN1,
@@ -866,91 +840,15 @@ ivb_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
/* WaForceL3Serialization:ivb */
wa_write_clr(wal, GEN7_L3SQCREG4, L3SQ_URB_READ_CAM_MATCH_DISABLE);
-
- /*
- * WaVSThreadDispatchOverride:ivb,vlv
- *
- * This actually overrides the dispatch
- * mode for all thread types.
- */
- wa_write_masked_or(wal, GEN7_FF_THREAD_MODE,
- GEN7_FF_SCHED_MASK,
- GEN7_FF_TS_SCHED_HW |
- GEN7_FF_VS_SCHED_HW |
- GEN7_FF_DS_SCHED_HW);
-
- if (0) { /* causes HiZ corruption on ivb:gt1 */
- /* enable HiZ Raw Stall Optimization */
- wa_masked_dis(wal, CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
- }
-
- /* WaDisable4x2SubspanOptimization:ivb */
- wa_masked_en(wal, CACHE_MODE_1, PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
-
- /*
- * BSpec recommends 8x4 when MSAA is used,
- * however in practice 16x4 seems fastest.
- *
- * Note that PS/WM thread counts depend on the WIZ hashing
- * disable bit, which we don't touch here, but it's good
- * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
- */
- wa_add(wal, GEN7_GT_MODE, 0,
- _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4),
- GEN6_WIZ_HASHING_16x4);
}
static void
vlv_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
- /* WaDisableEarlyCull:vlv */
- wa_masked_en(wal, _3D_CHICKEN3, _3D_CHICKEN_SF_DISABLE_OBJEND_CULL);
-
- /* WaPsdDispatchEnable:vlv */
- /* WaDisablePSDDualDispatchEnable:vlv */
- wa_masked_en(wal,
- GEN7_HALF_SLICE_CHICKEN1,
- GEN7_MAX_PS_THREAD_DEP |
- GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
-
- /* WaDisable_RenderCache_OperationalFlush:vlv */
- wa_masked_dis(wal, CACHE_MODE_0_GEN7, RC_OP_FLUSH_ENABLE);
-
/* WaForceL3Serialization:vlv */
wa_write_clr(wal, GEN7_L3SQCREG4, L3SQ_URB_READ_CAM_MATCH_DISABLE);
/*
- * WaVSThreadDispatchOverride:ivb,vlv
- *
- * This actually overrides the dispatch
- * mode for all thread types.
- */
- wa_write_masked_or(wal,
- GEN7_FF_THREAD_MODE,
- GEN7_FF_SCHED_MASK,
- GEN7_FF_TS_SCHED_HW |
- GEN7_FF_VS_SCHED_HW |
- GEN7_FF_DS_SCHED_HW);
-
- /*
- * BSpec says this must be set, even though
- * WaDisable4x2SubspanOptimization isn't listed for VLV.
- */
- wa_masked_en(wal, CACHE_MODE_1, PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
-
- /*
- * BSpec recommends 8x4 when MSAA is used,
- * however in practice 16x4 seems fastest.
- *
- * Note that PS/WM thread counts depend on the WIZ hashing
- * disable bit, which we don't touch here, but it's good
- * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
- */
- wa_add(wal, GEN7_GT_MODE, 0,
- _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4),
- GEN6_WIZ_HASHING_16x4);
-
- /*
* WaIncreaseL3CreditsForVLVB0:vlv
* This is the hardware default actually.
*/
@@ -970,31 +868,6 @@ hsw_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
/* WaVSRefCountFullforceMissDisable:hsw */
wa_write_clr(wal, GEN7_FF_THREAD_MODE, GEN7_FF_VS_REF_CNT_FFME);
-
- wa_masked_dis(wal,
- CACHE_MODE_0_GEN7,
- /* WaDisable_RenderCache_OperationalFlush:hsw */
- RC_OP_FLUSH_ENABLE |
- /* enable HiZ Raw Stall Optimization */
- HIZ_RAW_STALL_OPT_DISABLE);
-
- /* WaDisable4x2SubspanOptimization:hsw */
- wa_masked_en(wal, CACHE_MODE_1, PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
-
- /*
- * BSpec recommends 8x4 when MSAA is used,
- * however in practice 16x4 seems fastest.
- *
- * Note that PS/WM thread counts depend on the WIZ hashing
- * disable bit, which we don't touch here, but it's good
- * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
- */
- wa_add(wal, GEN7_GT_MODE, 0,
- _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4),
- GEN6_WIZ_HASHING_16x4);
-
- /* WaSampleCChickenBitEnable:hsw */
- wa_masked_en(wal, HALF_SLICE_CHICKEN3, HSW_SAMPLE_C_PERFORMANCE);
}
static void
@@ -1164,7 +1037,7 @@ wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
drm_dbg(&i915->drm, "MCR slice/subslice = %x\n", mcr);
- wa_write_masked_or(wal, GEN8_MCR_SELECTOR, mcr_mask, mcr);
+ wa_write_clr_set(wal, GEN8_MCR_SELECTOR, mcr_mask, mcr);
}
static void
@@ -1189,10 +1062,10 @@ icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
/* WaModifyGamTlbPartitioning:icl */
- wa_write_masked_or(wal,
- GEN11_GACB_PERF_CTRL,
- GEN11_HASH_CTRL_MASK,
- GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4);
+ wa_write_clr_set(wal,
+ GEN11_GACB_PERF_CTRL,
+ GEN11_HASH_CTRL_MASK,
+ GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4);
/* Wa_1405766107:icl
* Formerly known as WaCL2SFHalfMaxAlloc
@@ -1260,6 +1133,11 @@ tgl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
wa_write_or(wal,
SLICE_UNIT_LEVEL_CLKGATE,
L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
+
+ /* Wa_1408615072:tgl[a0] */
+ if (IS_TGL_UY_GT_REVID(i915, TGL_REVID_A0, TGL_REVID_A0))
+ wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
+ VSUNIT_CLKGATE_DIS_TGL);
}
static void
@@ -1358,9 +1236,9 @@ static bool
wa_verify(const struct i915_wa *wa, u32 cur, const char *name, const char *from)
{
if ((cur ^ wa->set) & wa->read) {
- DRM_ERROR("%s workaround lost on %s! (%x=%x/%x, expected %x)\n",
+ DRM_ERROR("%s workaround lost on %s! (reg[%x]=0x%x, relevant bits were 0x%x vs expected 0x%x)\n",
name, from, i915_mmio_reg_offset(wa->reg),
- cur, cur & wa->read, wa->set);
+ cur, cur & wa->read, wa->set & wa->read);
return false;
}
@@ -1425,7 +1303,8 @@ bool intel_gt_verify_workarounds(struct intel_gt *gt, const char *from)
return wa_list_verify(gt->uncore, &gt->i915->gt_wa_list, from);
}
-static inline bool is_nonpriv_flags_valid(u32 flags)
+__maybe_unused
+static bool is_nonpriv_flags_valid(u32 flags)
{
/* Check only valid flag bits are set */
if (flags & ~RING_FORCE_TO_NONPRIV_MASK_VALID)
@@ -1752,10 +1631,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
wa_write_or(wal,
GEN7_SARCHKMD,
GEN7_DISABLE_SAMPLER_PREFETCH);
-
- /* Wa_1408615072:tgl */
- wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
- VSUNIT_CLKGATE_DIS_TGL);
}
if (IS_DG1(i915) || IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
@@ -1770,6 +1645,19 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
*/
wa_write_or(wal, GEN7_FF_THREAD_MODE,
GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
+
+ /*
+ * Wa_1606700617:tgl,dg1
+ * Wa_22010271021:tgl,rkl,dg1
+ */
+ wa_masked_en(wal,
+ GEN9_CS_DEBUG_MODE1,
+ FF_DOP_CLOCK_GATE_DISABLE);
+
+ /* Wa_1406941453:tgl,rkl,dg1 */
+ wa_masked_en(wal,
+ GEN10_SAMPLER_MODE,
+ ENABLE_SMALLPL);
}
if (IS_DG1_REVID(i915, DG1_REVID_A0, DG1_REVID_A0) ||
@@ -1798,21 +1686,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
GEN6_RC_SLEEP_PSMI_CONTROL,
GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE |
GEN8_RC_SEMA_IDLE_MSG_DISABLE);
-
- /*
- * Wa_1606700617:tgl
- * Wa_22010271021:tgl,rkl
- */
- wa_masked_en(wal,
- GEN9_CS_DEBUG_MODE1,
- FF_DOP_CLOCK_GATE_DISABLE);
- }
-
- if (IS_GEN(i915, 12)) {
- /* Wa_1406941453:gen12 */
- wa_masked_en(wal,
- GEN10_SAMPLER_MODE,
- ENABLE_SMALLPL);
}
if (IS_GEN(i915, 11)) {
@@ -1838,14 +1711,14 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
* Wa_1604223664:icl
* Formerly known as WaL3BankAddressHashing
*/
- wa_write_masked_or(wal,
- GEN8_GARBCNTL,
- GEN11_HASH_CTRL_EXCL_MASK,
- GEN11_HASH_CTRL_EXCL_BIT0);
- wa_write_masked_or(wal,
- GEN11_GLBLINVL,
- GEN11_BANK_HASH_ADDR_EXCL_MASK,
- GEN11_BANK_HASH_ADDR_EXCL_BIT0);
+ wa_write_clr_set(wal,
+ GEN8_GARBCNTL,
+ GEN11_HASH_CTRL_EXCL_MASK,
+ GEN11_HASH_CTRL_EXCL_BIT0);
+ wa_write_clr_set(wal,
+ GEN11_GLBLINVL,
+ GEN11_BANK_HASH_ADDR_EXCL_MASK,
+ GEN11_BANK_HASH_ADDR_EXCL_BIT0);
/*
* Wa_1405733216:icl
@@ -1874,10 +1747,10 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
GEN7_DISABLE_SAMPLER_PREFETCH);
/* Wa_1409178092:icl */
- wa_write_masked_or(wal,
- GEN11_SCRATCH2,
- GEN11_COHERENT_PARTIAL_WRITE_MERGE_ENABLE,
- 0);
+ wa_write_clr_set(wal,
+ GEN11_SCRATCH2,
+ GEN11_COHERENT_PARTIAL_WRITE_MERGE_ENABLE,
+ 0);
/* WaEnable32PlaneMode:icl */
wa_masked_en(wal, GEN9_CSFE_CHICKEN1_RCS,
@@ -1951,24 +1824,132 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
/* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
if (IS_GEN9_LP(i915))
- wa_write_masked_or(wal,
- GEN8_L3SQCREG1,
- L3_PRIO_CREDITS_MASK,
- L3_GENERAL_PRIO_CREDITS(62) |
- L3_HIGH_PRIO_CREDITS(2));
+ wa_write_clr_set(wal,
+ GEN8_L3SQCREG1,
+ L3_PRIO_CREDITS_MASK,
+ L3_GENERAL_PRIO_CREDITS(62) |
+ L3_HIGH_PRIO_CREDITS(2));
/* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
wa_write_or(wal,
GEN8_L3SQCREG4,
GEN8_LQSC_FLUSH_COHERENT_LINES);
+
+ /* Disable atomics in L3 to prevent unrecoverable hangs */
+ wa_write_clr_set(wal, GEN9_SCRATCH_LNCF1,
+ GEN9_LNCF_NONIA_COHERENT_ATOMICS_ENABLE, 0);
+ wa_write_clr_set(wal, GEN8_L3SQCREG4,
+ GEN8_LQSQ_NONIA_COHERENT_ATOMICS_ENABLE, 0);
+ wa_write_clr_set(wal, GEN9_SCRATCH1,
+ EVICTION_PERF_FIX_ENABLE, 0);
+ }
+
+ if (IS_HASWELL(i915)) {
+ /* WaSampleCChickenBitEnable:hsw */
+ wa_masked_en(wal,
+ HALF_SLICE_CHICKEN3, HSW_SAMPLE_C_PERFORMANCE);
+
+ wa_masked_dis(wal,
+ CACHE_MODE_0_GEN7,
+ /* enable HiZ Raw Stall Optimization */
+ HIZ_RAW_STALL_OPT_DISABLE);
+
+ /* WaDisable4x2SubspanOptimization:hsw */
+ wa_masked_en(wal, CACHE_MODE_1, PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
+ }
+
+ if (IS_VALLEYVIEW(i915)) {
+ /* WaDisableEarlyCull:vlv */
+ wa_masked_en(wal,
+ _3D_CHICKEN3,
+ _3D_CHICKEN_SF_DISABLE_OBJEND_CULL);
+
+ /*
+ * WaVSThreadDispatchOverride:ivb,vlv
+ *
+ * This actually overrides the dispatch
+ * mode for all thread types.
+ */
+ wa_write_clr_set(wal,
+ GEN7_FF_THREAD_MODE,
+ GEN7_FF_SCHED_MASK,
+ GEN7_FF_TS_SCHED_HW |
+ GEN7_FF_VS_SCHED_HW |
+ GEN7_FF_DS_SCHED_HW);
+
+ /* WaPsdDispatchEnable:vlv */
+ /* WaDisablePSDDualDispatchEnable:vlv */
+ wa_masked_en(wal,
+ GEN7_HALF_SLICE_CHICKEN1,
+ GEN7_MAX_PS_THREAD_DEP |
+ GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
}
- if (IS_GEN(i915, 7))
+ if (IS_IVYBRIDGE(i915)) {
+ /* WaDisableEarlyCull:ivb */
+ wa_masked_en(wal,
+ _3D_CHICKEN3,
+ _3D_CHICKEN_SF_DISABLE_OBJEND_CULL);
+
+ if (0) { /* causes HiZ corruption on ivb:gt1 */
+ /* enable HiZ Raw Stall Optimization */
+ wa_masked_dis(wal,
+ CACHE_MODE_0_GEN7,
+ HIZ_RAW_STALL_OPT_DISABLE);
+ }
+
+ /*
+ * WaVSThreadDispatchOverride:ivb,vlv
+ *
+ * This actually overrides the dispatch
+ * mode for all thread types.
+ */
+ wa_write_clr_set(wal,
+ GEN7_FF_THREAD_MODE,
+ GEN7_FF_SCHED_MASK,
+ GEN7_FF_TS_SCHED_HW |
+ GEN7_FF_VS_SCHED_HW |
+ GEN7_FF_DS_SCHED_HW);
+
+ /* WaDisablePSDDualDispatchEnable:ivb */
+ if (IS_IVB_GT1(i915))
+ wa_masked_en(wal,
+ GEN7_HALF_SLICE_CHICKEN1,
+ GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
+ }
+
+ if (IS_GEN(i915, 7)) {
/* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
wa_masked_en(wal,
GFX_MODE_GEN7,
GFX_TLB_INVALIDATE_EXPLICIT | GFX_REPLAY_MODE);
+ /* WaDisable_RenderCache_OperationalFlush:ivb,vlv,hsw */
+ wa_masked_dis(wal, CACHE_MODE_0_GEN7, RC_OP_FLUSH_ENABLE);
+
+ /*
+ * BSpec says this must be set, even though
+ * WaDisable4x2SubspanOptimization:ivb,hsw
+ * WaDisable4x2SubspanOptimization isn't listed for VLV.
+ */
+ wa_masked_en(wal,
+ CACHE_MODE_1,
+ PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
+
+ /*
+ * BSpec recommends 8x4 when MSAA is used,
+ * however in practice 16x4 seems fastest.
+ *
+ * Note that PS/WM thread counts depend on the WIZ hashing
+ * disable bit, which we don't touch here, but it's good
+ * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
+ */
+ wa_add(wal, GEN7_GT_MODE, 0,
+ _MASKED_FIELD(GEN6_WIZ_HASHING_MASK,
+ GEN6_WIZ_HASHING_16x4),
+ GEN6_WIZ_HASHING_16x4);
+ }
+
if (IS_GEN_RANGE(i915, 6, 7))
/*
* We need to disable the AsyncFlip performance optimisations in
@@ -1991,6 +1972,39 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
GFX_MODE,
GFX_TLB_INVALIDATE_EXPLICIT);
+ /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
+ wa_masked_en(wal,
+ _3D_CHICKEN,
+ _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB);
+
+ wa_masked_en(wal,
+ _3D_CHICKEN3,
+ /* WaStripsFansDisableFastClipPerformanceFix:snb */
+ _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL |
+ /*
+ * Bspec says:
+ * "This bit must be set if 3DSTATE_CLIP clip mode is set
+ * to normal and 3DSTATE_SF number of SF output attributes
+ * is more than 16."
+ */
+ _3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH);
+
+ /*
+ * BSpec recommends 8x4 when MSAA is used,
+ * however in practice 16x4 seems fastest.
+ *
+ * Note that PS/WM thread counts depend on the WIZ hashing
+ * disable bit, which we don't touch here, but it's good
+ * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
+ */
+ wa_add(wal,
+ GEN6_GT_MODE, 0,
+ _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4),
+ GEN6_WIZ_HASHING_16x4);
+
+ /* WaDisable_RenderCache_OperationalFlush:snb */
+ wa_masked_dis(wal, CACHE_MODE_0, RC_OP_FLUSH_ENABLE);
+
/*
* From the Sandybridge PRM, volume 1 part 3, page 24:
* "If this bit is set, STCunit will have LRA as replacement
@@ -2067,39 +2081,6 @@ void intel_engine_apply_workarounds(struct intel_engine_cs *engine)
wa_list_apply(engine->uncore, &engine->wa_list);
}
-static struct i915_vma *
-create_scratch(struct i915_address_space *vm, int count)
-{
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
- unsigned int size;
- int err;
-
- size = round_up(count * sizeof(u32), PAGE_SIZE);
- obj = i915_gem_object_create_internal(vm->i915, size);
- if (IS_ERR(obj))
- return ERR_CAST(obj);
-
- i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
-
- vma = i915_vma_instance(obj, vm, NULL);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto err_obj;
- }
-
- err = i915_vma_pin(vma, 0, 0,
- i915_vma_is_ggtt(vma) ? PIN_GLOBAL : PIN_USER);
- if (err)
- goto err_obj;
-
- return vma;
-
-err_obj:
- i915_gem_object_put(obj);
- return ERR_PTR(err);
-}
-
struct mcr_range {
u32 start;
u32 end;
@@ -2202,7 +2183,8 @@ static int engine_wa_list_verify(struct intel_context *ce,
if (!wal->count)
return 0;
- vma = create_scratch(&ce->engine->gt->ggtt->vm, wal->count);
+ vma = __vm_create_scratch_for_read(&ce->engine->gt->ggtt->vm,
+ wal->count * sizeof(u32));
if (IS_ERR(vma))
return PTR_ERR(vma);
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index 2f830017c51d..4b4f03b70df7 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -245,17 +245,6 @@ static void mock_reset_rewind(struct intel_engine_cs *engine, bool stalled)
GEM_BUG_ON(stalled);
}
-static void mark_eio(struct i915_request *rq)
-{
- if (i915_request_completed(rq))
- return;
-
- GEM_BUG_ON(i915_request_signaled(rq));
-
- i915_request_set_error_once(rq, -EIO);
- i915_request_mark_complete(rq);
-}
-
static void mock_reset_cancel(struct intel_engine_cs *engine)
{
struct mock_engine *mock =
@@ -269,12 +258,12 @@ static void mock_reset_cancel(struct intel_engine_cs *engine)
/* Mark all submitted requests as skipped. */
list_for_each_entry(rq, &engine->active.requests, sched.link)
- mark_eio(rq);
+ i915_request_mark_eio(rq);
intel_engine_signal_breadcrumbs(engine);
/* Cancel and submit all pending requests. */
list_for_each_entry(rq, &mock->hw_queue, mock.link) {
- mark_eio(rq);
+ i915_request_mark_eio(rq);
__i915_request_submit(rq);
}
INIT_LIST_HEAD(&mock->hw_queue);
diff --git a/drivers/gpu/drm/i915/gt/selftest_context.c b/drivers/gpu/drm/i915/gt/selftest_context.c
index 1f4020e906a8..db738d400168 100644
--- a/drivers/gpu/drm/i915/gt/selftest_context.c
+++ b/drivers/gpu/drm/i915/gt/selftest_context.c
@@ -25,7 +25,7 @@ static int request_sync(struct i915_request *rq)
/* Opencode i915_request_add() so we can keep the timeline locked. */
__i915_request_commit(rq);
rq->sched.attr.priority = I915_PRIORITY_BARRIER;
- __i915_request_queue(rq, NULL);
+ __i915_request_queue_bh(rq);
timeout = i915_request_wait(rq, 0, HZ / 10);
if (timeout < 0)
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
index 729c3c7b11e2..439c8984f5fa 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
@@ -6,6 +6,7 @@
#include <linux/sort.h>
+#include "intel_gpu_commands.h"
#include "intel_gt_pm.h"
#include "intel_rps.h"
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
index b88aa35ad75b..223ab88f7e57 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
@@ -197,6 +197,7 @@ static int cmp_u32(const void *_a, const void *_b)
static int __live_heartbeat_fast(struct intel_engine_cs *engine)
{
+ const unsigned int error_threshold = max(20000u, jiffies_to_usecs(6));
struct intel_context *ce;
struct i915_request *rq;
ktime_t t0, t1;
@@ -254,12 +255,18 @@ static int __live_heartbeat_fast(struct intel_engine_cs *engine)
times[0],
times[ARRAY_SIZE(times) - 1]);
- /* Min work delay is 2 * 2 (worst), +1 for scheduling, +1 for slack */
- if (times[ARRAY_SIZE(times) / 2] > jiffies_to_usecs(6)) {
+ /*
+ * Ideally, the upper bound on min work delay would be something like
+ * 2 * 2 (worst), +1 for scheduling, +1 for slack. In practice, we
+ * are, even with system_wq_highpri, at the mercy of the CPU scheduler
+ * and may be stuck behind some slow work for many millisecond. Such
+ * as our very own display workers.
+ */
+ if (times[ARRAY_SIZE(times) / 2] > error_threshold) {
pr_err("%s: Heartbeat delay was %uus, expected less than %dus\n",
engine->name,
times[ARRAY_SIZE(times) / 2],
- jiffies_to_usecs(6));
+ error_threshold);
err = -EINVAL;
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
index b08fc5390e8a..c3d965279fc3 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
@@ -4,13 +4,215 @@
* Copyright © 2018 Intel Corporation
*/
+#include <linux/sort.h>
+
#include "i915_selftest.h"
+#include "intel_gpu_commands.h"
+#include "intel_gt_clock_utils.h"
#include "selftest_engine.h"
#include "selftest_engine_heartbeat.h"
#include "selftests/igt_atomic.h"
#include "selftests/igt_flush_test.h"
#include "selftests/igt_spinner.h"
+#define COUNT 5
+
+static int cmp_u64(const void *A, const void *B)
+{
+ const u64 *a = A, *b = B;
+
+ return *a - *b;
+}
+
+static u64 trifilter(u64 *a)
+{
+ sort(a, COUNT, sizeof(*a), cmp_u64, NULL);
+ return (a[1] + 2 * a[2] + a[3]) >> 2;
+}
+
+static u32 *emit_wait(u32 *cs, u32 offset, int op, u32 value)
+{
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ op;
+ *cs++ = value;
+ *cs++ = offset;
+ *cs++ = 0;
+
+ return cs;
+}
+
+static u32 *emit_store(u32 *cs, u32 offset, u32 value)
+{
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = offset;
+ *cs++ = 0;
+ *cs++ = value;
+
+ return cs;
+}
+
+static u32 *emit_srm(u32 *cs, i915_reg_t reg, u32 offset)
+{
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+ *cs++ = i915_mmio_reg_offset(reg);
+ *cs++ = offset;
+ *cs++ = 0;
+
+ return cs;
+}
+
+static void write_semaphore(u32 *x, u32 value)
+{
+ WRITE_ONCE(*x, value);
+ wmb();
+}
+
+static int __measure_timestamps(struct intel_context *ce,
+ u64 *dt, u64 *d_ring, u64 *d_ctx)
+{
+ struct intel_engine_cs *engine = ce->engine;
+ u32 *sema = memset32(engine->status_page.addr + 1000, 0, 5);
+ u32 offset = i915_ggtt_offset(engine->status_page.vma);
+ struct i915_request *rq;
+ u32 *cs;
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ cs = intel_ring_begin(rq, 28);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ return PTR_ERR(cs);
+ }
+
+ /* Signal & wait for start */
+ cs = emit_store(cs, offset + 4008, 1);
+ cs = emit_wait(cs, offset + 4008, MI_SEMAPHORE_SAD_NEQ_SDD, 1);
+
+ cs = emit_srm(cs, RING_TIMESTAMP(engine->mmio_base), offset + 4000);
+ cs = emit_srm(cs, RING_CTX_TIMESTAMP(engine->mmio_base), offset + 4004);
+
+ /* Busy wait */
+ cs = emit_wait(cs, offset + 4008, MI_SEMAPHORE_SAD_EQ_SDD, 1);
+
+ cs = emit_srm(cs, RING_TIMESTAMP(engine->mmio_base), offset + 4016);
+ cs = emit_srm(cs, RING_CTX_TIMESTAMP(engine->mmio_base), offset + 4012);
+
+ intel_ring_advance(rq, cs);
+ i915_request_get(rq);
+ i915_request_add(rq);
+ intel_engine_flush_submission(engine);
+
+ /* Wait for the request to start executing, that then waits for us */
+ while (READ_ONCE(sema[2]) == 0)
+ cpu_relax();
+
+ /* Run the request for a 100us, sampling timestamps before/after */
+ preempt_disable();
+ *dt = local_clock();
+ write_semaphore(&sema[2], 0);
+ udelay(100);
+ *dt = local_clock() - *dt;
+ write_semaphore(&sema[2], 1);
+ preempt_enable();
+
+ if (i915_request_wait(rq, 0, HZ / 2) < 0) {
+ i915_request_put(rq);
+ return -ETIME;
+ }
+ i915_request_put(rq);
+
+ pr_debug("%s CTX_TIMESTAMP: [%x, %x], RING_TIMESTAMP: [%x, %x]\n",
+ engine->name, sema[1], sema[3], sema[0], sema[4]);
+
+ *d_ctx = sema[3] - sema[1];
+ *d_ring = sema[4] - sema[0];
+ return 0;
+}
+
+static int __live_engine_timestamps(struct intel_engine_cs *engine)
+{
+ u64 s_ring[COUNT], s_ctx[COUNT], st[COUNT], d_ring, d_ctx, dt;
+ struct intel_context *ce;
+ int i, err = 0;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ for (i = 0; i < COUNT; i++) {
+ err = __measure_timestamps(ce, &st[i], &s_ring[i], &s_ctx[i]);
+ if (err)
+ break;
+ }
+ intel_context_put(ce);
+ if (err)
+ return err;
+
+ dt = trifilter(st);
+ d_ring = trifilter(s_ring);
+ d_ctx = trifilter(s_ctx);
+
+ pr_info("%s elapsed:%lldns, CTX_TIMESTAMP:%lldns, RING_TIMESTAMP:%lldns\n",
+ engine->name, dt,
+ intel_gt_clock_interval_to_ns(engine->gt, d_ctx),
+ intel_gt_clock_interval_to_ns(engine->gt, d_ring));
+
+ d_ring = intel_gt_clock_interval_to_ns(engine->gt, d_ring);
+ if (3 * dt > 4 * d_ring || 4 * dt < 3 * d_ring) {
+ pr_err("%s Mismatch between ring timestamp and walltime!\n",
+ engine->name);
+ return -EINVAL;
+ }
+
+ d_ring = trifilter(s_ring);
+ d_ctx = trifilter(s_ctx);
+
+ d_ctx *= engine->gt->clock_frequency;
+ if (IS_ICELAKE(engine->i915))
+ d_ring *= 12500000; /* Fixed 80ns for icl ctx timestamp? */
+ else
+ d_ring *= engine->gt->clock_frequency;
+
+ if (3 * d_ctx > 4 * d_ring || 4 * d_ctx < 3 * d_ring) {
+ pr_err("%s Mismatch between ring and context timestamps!\n",
+ engine->name);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int live_engine_timestamps(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /*
+ * Check that CS_TIMESTAMP / CTX_TIMESTAMP are in sync, i.e. share
+ * the same CS clock.
+ */
+
+ if (INTEL_GEN(gt->i915) < 8)
+ return 0;
+
+ for_each_engine(engine, gt, id) {
+ int err;
+
+ st_engine_heartbeat_disable(engine);
+ err = __live_engine_timestamps(engine);
+ st_engine_heartbeat_enable(engine);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static int live_engine_busy_stats(void *arg)
{
struct intel_gt *gt = arg;
@@ -177,6 +379,7 @@ static int live_engine_pm(void *arg)
int live_engine_pm_selftests(struct intel_gt *gt)
{
static const struct i915_subtest tests[] = {
+ SUBTEST(live_engine_timestamps),
SUBTEST(live_engine_busy_stats),
SUBTEST(live_engine_pm),
};
diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c
new file mode 100644
index 000000000000..264b5ebdb021
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c
@@ -0,0 +1,4741 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include <linux/prime_numbers.h>
+
+#include "gem/i915_gem_pm.h"
+#include "gt/intel_engine_heartbeat.h"
+#include "gt/intel_reset.h"
+#include "gt/selftest_engine_heartbeat.h"
+
+#include "i915_selftest.h"
+#include "selftests/i915_random.h"
+#include "selftests/igt_flush_test.h"
+#include "selftests/igt_live_test.h"
+#include "selftests/igt_spinner.h"
+#include "selftests/lib_sw_fence.h"
+
+#include "gem/selftests/igt_gem_utils.h"
+#include "gem/selftests/mock_context.h"
+
+#define CS_GPR(engine, n) ((engine)->mmio_base + 0x600 + (n) * 4)
+#define NUM_GPR 16
+#define NUM_GPR_DW (NUM_GPR * 2) /* each GPR is 2 dwords */
+
+static bool is_active(struct i915_request *rq)
+{
+ if (i915_request_is_active(rq))
+ return true;
+
+ if (i915_request_on_hold(rq))
+ return true;
+
+ if (i915_request_has_initial_breadcrumb(rq) && i915_request_started(rq))
+ return true;
+
+ return false;
+}
+
+static int wait_for_submit(struct intel_engine_cs *engine,
+ struct i915_request *rq,
+ unsigned long timeout)
+{
+ /* Ignore our own attempts to suppress excess tasklets */
+ tasklet_hi_schedule(&engine->execlists.tasklet);
+
+ timeout += jiffies;
+ do {
+ bool done = time_after(jiffies, timeout);
+
+ if (i915_request_completed(rq)) /* that was quick! */
+ return 0;
+
+ /* Wait until the HW has acknowleged the submission (or err) */
+ intel_engine_flush_submission(engine);
+ if (!READ_ONCE(engine->execlists.pending[0]) && is_active(rq))
+ return 0;
+
+ if (done)
+ return -ETIME;
+
+ cond_resched();
+ } while (1);
+}
+
+static int wait_for_reset(struct intel_engine_cs *engine,
+ struct i915_request *rq,
+ unsigned long timeout)
+{
+ timeout += jiffies;
+
+ do {
+ cond_resched();
+ intel_engine_flush_submission(engine);
+
+ if (READ_ONCE(engine->execlists.pending[0]))
+ continue;
+
+ if (i915_request_completed(rq))
+ break;
+
+ if (READ_ONCE(rq->fence.error))
+ break;
+ } while (time_before(jiffies, timeout));
+
+ flush_scheduled_work();
+
+ if (rq->fence.error != -EIO) {
+ pr_err("%s: hanging request %llx:%lld not reset\n",
+ engine->name,
+ rq->fence.context,
+ rq->fence.seqno);
+ return -EINVAL;
+ }
+
+ /* Give the request a jiffie to complete after flushing the worker */
+ if (i915_request_wait(rq, 0,
+ max(0l, (long)(timeout - jiffies)) + 1) < 0) {
+ pr_err("%s: hanging request %llx:%lld did not complete\n",
+ engine->name,
+ rq->fence.context,
+ rq->fence.seqno);
+ return -ETIME;
+ }
+
+ return 0;
+}
+
+static int live_sanitycheck(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct igt_spinner spin;
+ int err = 0;
+
+ if (!HAS_LOGICAL_RING_CONTEXTS(gt->i915))
+ return 0;
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce;
+ struct i915_request *rq;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ break;
+ }
+
+ rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_ctx;
+ }
+
+ i915_request_add(rq);
+ if (!igt_wait_for_spinner(&spin, rq)) {
+ GEM_TRACE("spinner failed to start\n");
+ GEM_TRACE_DUMP();
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ goto out_ctx;
+ }
+
+ igt_spinner_end(&spin);
+ if (igt_flush_test(gt->i915)) {
+ err = -EIO;
+ goto out_ctx;
+ }
+
+out_ctx:
+ intel_context_put(ce);
+ if (err)
+ break;
+ }
+
+ igt_spinner_fini(&spin);
+ return err;
+}
+
+static int live_unlite_restore(struct intel_gt *gt, int prio)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct igt_spinner spin;
+ int err = -ENOMEM;
+
+ /*
+ * Check that we can correctly context switch between 2 instances
+ * on the same engine from the same parent context.
+ */
+
+ if (igt_spinner_init(&spin, gt))
+ return err;
+
+ err = 0;
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce[2] = {};
+ struct i915_request *rq[2];
+ struct igt_live_test t;
+ int n;
+
+ if (prio && !intel_engine_has_preemption(engine))
+ continue;
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
+ err = -EIO;
+ break;
+ }
+ st_engine_heartbeat_disable(engine);
+
+ for (n = 0; n < ARRAY_SIZE(ce); n++) {
+ struct intel_context *tmp;
+
+ tmp = intel_context_create(engine);
+ if (IS_ERR(tmp)) {
+ err = PTR_ERR(tmp);
+ goto err_ce;
+ }
+
+ err = intel_context_pin(tmp);
+ if (err) {
+ intel_context_put(tmp);
+ goto err_ce;
+ }
+
+ /*
+ * Setup the pair of contexts such that if we
+ * lite-restore using the RING_TAIL from ce[1] it
+ * will execute garbage from ce[0]->ring.
+ */
+ memset(tmp->ring->vaddr,
+ POISON_INUSE, /* IPEHR: 0x5a5a5a5a [hung!] */
+ tmp->ring->vma->size);
+
+ ce[n] = tmp;
+ }
+ GEM_BUG_ON(!ce[1]->ring->size);
+ intel_ring_reset(ce[1]->ring, ce[1]->ring->size / 2);
+ lrc_update_regs(ce[1], engine, ce[1]->ring->head);
+
+ rq[0] = igt_spinner_create_request(&spin, ce[0], MI_ARB_CHECK);
+ if (IS_ERR(rq[0])) {
+ err = PTR_ERR(rq[0]);
+ goto err_ce;
+ }
+
+ i915_request_get(rq[0]);
+ i915_request_add(rq[0]);
+ GEM_BUG_ON(rq[0]->postfix > ce[1]->ring->emit);
+
+ if (!igt_wait_for_spinner(&spin, rq[0])) {
+ i915_request_put(rq[0]);
+ goto err_ce;
+ }
+
+ rq[1] = i915_request_create(ce[1]);
+ if (IS_ERR(rq[1])) {
+ err = PTR_ERR(rq[1]);
+ i915_request_put(rq[0]);
+ goto err_ce;
+ }
+
+ if (!prio) {
+ /*
+ * Ensure we do the switch to ce[1] on completion.
+ *
+ * rq[0] is already submitted, so this should reduce
+ * to a no-op (a wait on a request on the same engine
+ * uses the submit fence, not the completion fence),
+ * but it will install a dependency on rq[1] for rq[0]
+ * that will prevent the pair being reordered by
+ * timeslicing.
+ */
+ i915_request_await_dma_fence(rq[1], &rq[0]->fence);
+ }
+
+ i915_request_get(rq[1]);
+ i915_request_add(rq[1]);
+ GEM_BUG_ON(rq[1]->postfix <= rq[0]->postfix);
+ i915_request_put(rq[0]);
+
+ if (prio) {
+ struct i915_sched_attr attr = {
+ .priority = prio,
+ };
+
+ /* Alternatively preempt the spinner with ce[1] */
+ engine->schedule(rq[1], &attr);
+ }
+
+ /* And switch back to ce[0] for good measure */
+ rq[0] = i915_request_create(ce[0]);
+ if (IS_ERR(rq[0])) {
+ err = PTR_ERR(rq[0]);
+ i915_request_put(rq[1]);
+ goto err_ce;
+ }
+
+ i915_request_await_dma_fence(rq[0], &rq[1]->fence);
+ i915_request_get(rq[0]);
+ i915_request_add(rq[0]);
+ GEM_BUG_ON(rq[0]->postfix > rq[1]->postfix);
+ i915_request_put(rq[1]);
+ i915_request_put(rq[0]);
+
+err_ce:
+ intel_engine_flush_submission(engine);
+ igt_spinner_end(&spin);
+ for (n = 0; n < ARRAY_SIZE(ce); n++) {
+ if (IS_ERR_OR_NULL(ce[n]))
+ break;
+
+ intel_context_unpin(ce[n]);
+ intel_context_put(ce[n]);
+ }
+
+ st_engine_heartbeat_enable(engine);
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ if (err)
+ break;
+ }
+
+ igt_spinner_fini(&spin);
+ return err;
+}
+
+static int live_unlite_switch(void *arg)
+{
+ return live_unlite_restore(arg, 0);
+}
+
+static int live_unlite_preempt(void *arg)
+{
+ return live_unlite_restore(arg, I915_USER_PRIORITY(I915_PRIORITY_MAX));
+}
+
+static int live_unlite_ring(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ struct igt_spinner spin;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /*
+ * Setup a preemption event that will cause almost the entire ring
+ * to be unwound, potentially fooling our intel_ring_direction()
+ * into emitting a forward lite-restore instead of the rollback.
+ */
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce[2] = {};
+ struct i915_request *rq;
+ struct igt_live_test t;
+ int n;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
+ err = -EIO;
+ break;
+ }
+ st_engine_heartbeat_disable(engine);
+
+ for (n = 0; n < ARRAY_SIZE(ce); n++) {
+ struct intel_context *tmp;
+
+ tmp = intel_context_create(engine);
+ if (IS_ERR(tmp)) {
+ err = PTR_ERR(tmp);
+ goto err_ce;
+ }
+
+ err = intel_context_pin(tmp);
+ if (err) {
+ intel_context_put(tmp);
+ goto err_ce;
+ }
+
+ memset32(tmp->ring->vaddr,
+ 0xdeadbeef, /* trigger a hang if executed */
+ tmp->ring->vma->size / sizeof(u32));
+
+ ce[n] = tmp;
+ }
+
+ /* Create max prio spinner, followed by N low prio nops */
+ rq = igt_spinner_create_request(&spin, ce[0], MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_ce;
+ }
+
+ i915_request_get(rq);
+ rq->sched.attr.priority = I915_PRIORITY_BARRIER;
+ i915_request_add(rq);
+
+ if (!igt_wait_for_spinner(&spin, rq)) {
+ intel_gt_set_wedged(gt);
+ i915_request_put(rq);
+ err = -ETIME;
+ goto err_ce;
+ }
+
+ /* Fill the ring, until we will cause a wrap */
+ n = 0;
+ while (intel_ring_direction(ce[0]->ring,
+ rq->wa_tail,
+ ce[0]->ring->tail) <= 0) {
+ struct i915_request *tmp;
+
+ tmp = intel_context_create_request(ce[0]);
+ if (IS_ERR(tmp)) {
+ err = PTR_ERR(tmp);
+ i915_request_put(rq);
+ goto err_ce;
+ }
+
+ i915_request_add(tmp);
+ intel_engine_flush_submission(engine);
+ n++;
+ }
+ intel_engine_flush_submission(engine);
+ pr_debug("%s: Filled ring with %d nop tails {size:%x, tail:%x, emit:%x, rq.tail:%x}\n",
+ engine->name, n,
+ ce[0]->ring->size,
+ ce[0]->ring->tail,
+ ce[0]->ring->emit,
+ rq->tail);
+ GEM_BUG_ON(intel_ring_direction(ce[0]->ring,
+ rq->tail,
+ ce[0]->ring->tail) <= 0);
+ i915_request_put(rq);
+
+ /* Create a second ring to preempt the first ring after rq[0] */
+ rq = intel_context_create_request(ce[1]);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_ce;
+ }
+
+ rq->sched.attr.priority = I915_PRIORITY_BARRIER;
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ err = wait_for_submit(engine, rq, HZ / 2);
+ i915_request_put(rq);
+ if (err) {
+ pr_err("%s: preemption request was not submitted\n",
+ engine->name);
+ err = -ETIME;
+ }
+
+ pr_debug("%s: ring[0]:{ tail:%x, emit:%x }, ring[1]:{ tail:%x, emit:%x }\n",
+ engine->name,
+ ce[0]->ring->tail, ce[0]->ring->emit,
+ ce[1]->ring->tail, ce[1]->ring->emit);
+
+err_ce:
+ intel_engine_flush_submission(engine);
+ igt_spinner_end(&spin);
+ for (n = 0; n < ARRAY_SIZE(ce); n++) {
+ if (IS_ERR_OR_NULL(ce[n]))
+ break;
+
+ intel_context_unpin(ce[n]);
+ intel_context_put(ce[n]);
+ }
+ st_engine_heartbeat_enable(engine);
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ if (err)
+ break;
+ }
+
+ igt_spinner_fini(&spin);
+ return err;
+}
+
+static int live_pin_rewind(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /*
+ * We have to be careful not to trust intel_ring too much, for example
+ * ring->head is updated upon retire which is out of sync with pinning
+ * the context. Thus we cannot use ring->head to set CTX_RING_HEAD,
+ * or else we risk writing an older, stale value.
+ *
+ * To simulate this, let's apply a bit of deliberate sabotague.
+ */
+
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce;
+ struct i915_request *rq;
+ struct intel_ring *ring;
+ struct igt_live_test t;
+
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
+ err = -EIO;
+ break;
+ }
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ break;
+ }
+
+ err = intel_context_pin(ce);
+ if (err) {
+ intel_context_put(ce);
+ break;
+ }
+
+ /* Keep the context awake while we play games */
+ err = i915_active_acquire(&ce->active);
+ if (err) {
+ intel_context_unpin(ce);
+ intel_context_put(ce);
+ break;
+ }
+ ring = ce->ring;
+
+ /* Poison the ring, and offset the next request from HEAD */
+ memset32(ring->vaddr, STACK_MAGIC, ring->size / sizeof(u32));
+ ring->emit = ring->size / 2;
+ ring->tail = ring->emit;
+ GEM_BUG_ON(ring->head);
+
+ intel_context_unpin(ce);
+
+ /* Submit a simple nop request */
+ GEM_BUG_ON(intel_context_is_pinned(ce));
+ rq = intel_context_create_request(ce);
+ i915_active_release(&ce->active); /* e.g. async retire */
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
+ GEM_BUG_ON(!rq->head);
+ i915_request_add(rq);
+
+ /* Expect not to hang! */
+ if (igt_live_test_end(&t)) {
+ err = -EIO;
+ break;
+ }
+ }
+
+ return err;
+}
+
+static int live_hold_reset(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct igt_spinner spin;
+ int err = 0;
+
+ /*
+ * In order to support offline error capture for fast preempt reset,
+ * we need to decouple the guilty request and ensure that it and its
+ * descendents are not executed while the capture is in progress.
+ */
+
+ if (!intel_has_reset_engine(gt))
+ return 0;
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce;
+ struct i915_request *rq;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ break;
+ }
+
+ st_engine_heartbeat_disable(engine);
+
+ rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+ i915_request_add(rq);
+
+ if (!igt_wait_for_spinner(&spin, rq)) {
+ intel_gt_set_wedged(gt);
+ err = -ETIME;
+ goto out;
+ }
+
+ /* We have our request executing, now remove it and reset */
+
+ local_bh_disable();
+ if (test_and_set_bit(I915_RESET_ENGINE + id,
+ &gt->reset.flags)) {
+ local_bh_enable();
+ intel_gt_set_wedged(gt);
+ err = -EBUSY;
+ goto out;
+ }
+ tasklet_disable(&engine->execlists.tasklet);
+
+ engine->execlists.tasklet.func(engine->execlists.tasklet.data);
+ GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
+
+ i915_request_get(rq);
+ execlists_hold(engine, rq);
+ GEM_BUG_ON(!i915_request_on_hold(rq));
+
+ __intel_engine_reset_bh(engine, NULL);
+ GEM_BUG_ON(rq->fence.error != -EIO);
+
+ tasklet_enable(&engine->execlists.tasklet);
+ clear_and_wake_up_bit(I915_RESET_ENGINE + id,
+ &gt->reset.flags);
+ local_bh_enable();
+
+ /* Check that we do not resubmit the held request */
+ if (!i915_request_wait(rq, 0, HZ / 5)) {
+ pr_err("%s: on hold request completed!\n",
+ engine->name);
+ i915_request_put(rq);
+ err = -EIO;
+ goto out;
+ }
+ GEM_BUG_ON(!i915_request_on_hold(rq));
+
+ /* But is resubmitted on release */
+ execlists_unhold(engine, rq);
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ pr_err("%s: held request did not complete!\n",
+ engine->name);
+ intel_gt_set_wedged(gt);
+ err = -ETIME;
+ }
+ i915_request_put(rq);
+
+out:
+ st_engine_heartbeat_enable(engine);
+ intel_context_put(ce);
+ if (err)
+ break;
+ }
+
+ igt_spinner_fini(&spin);
+ return err;
+}
+
+static const char *error_repr(int err)
+{
+ return err ? "bad" : "good";
+}
+
+static int live_error_interrupt(void *arg)
+{
+ static const struct error_phase {
+ enum { GOOD = 0, BAD = -EIO } error[2];
+ } phases[] = {
+ { { BAD, GOOD } },
+ { { BAD, BAD } },
+ { { BAD, GOOD } },
+ { { GOOD, GOOD } }, /* sentinel */
+ };
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /*
+ * We hook up the CS_MASTER_ERROR_INTERRUPT to have forewarning
+ * of invalid commands in user batches that will cause a GPU hang.
+ * This is a faster mechanism than using hangcheck/heartbeats, but
+ * only detects problems the HW knows about -- it will not warn when
+ * we kill the HW!
+ *
+ * To verify our detection and reset, we throw some invalid commands
+ * at the HW and wait for the interrupt.
+ */
+
+ if (!intel_has_reset_engine(gt))
+ return 0;
+
+ for_each_engine(engine, gt, id) {
+ const struct error_phase *p;
+ int err = 0;
+
+ st_engine_heartbeat_disable(engine);
+
+ for (p = phases; p->error[0] != GOOD; p++) {
+ struct i915_request *client[ARRAY_SIZE(phases->error)];
+ u32 *cs;
+ int i;
+
+ memset(client, 0, sizeof(*client));
+ for (i = 0; i < ARRAY_SIZE(client); i++) {
+ struct intel_context *ce;
+ struct i915_request *rq;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out;
+ }
+
+ rq = intel_context_create_request(ce);
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ if (rq->engine->emit_init_breadcrumb) {
+ err = rq->engine->emit_init_breadcrumb(rq);
+ if (err) {
+ i915_request_add(rq);
+ goto out;
+ }
+ }
+
+ cs = intel_ring_begin(rq, 2);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ err = PTR_ERR(cs);
+ goto out;
+ }
+
+ if (p->error[i]) {
+ *cs++ = 0xdeadbeef;
+ *cs++ = 0xdeadbeef;
+ } else {
+ *cs++ = MI_NOOP;
+ *cs++ = MI_NOOP;
+ }
+
+ client[i] = i915_request_get(rq);
+ i915_request_add(rq);
+ }
+
+ err = wait_for_submit(engine, client[0], HZ / 2);
+ if (err) {
+ pr_err("%s: first request did not start within time!\n",
+ engine->name);
+ err = -ETIME;
+ goto out;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(client); i++) {
+ if (i915_request_wait(client[i], 0, HZ / 5) < 0)
+ pr_debug("%s: %s request incomplete!\n",
+ engine->name,
+ error_repr(p->error[i]));
+
+ if (!i915_request_started(client[i])) {
+ pr_err("%s: %s request not started!\n",
+ engine->name,
+ error_repr(p->error[i]));
+ err = -ETIME;
+ goto out;
+ }
+
+ /* Kick the tasklet to process the error */
+ intel_engine_flush_submission(engine);
+ if (client[i]->fence.error != p->error[i]) {
+ pr_err("%s: %s request (%s) with wrong error code: %d\n",
+ engine->name,
+ error_repr(p->error[i]),
+ i915_request_completed(client[i]) ? "completed" : "running",
+ client[i]->fence.error);
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+out:
+ for (i = 0; i < ARRAY_SIZE(client); i++)
+ if (client[i])
+ i915_request_put(client[i]);
+ if (err) {
+ pr_err("%s: failed at phase[%zd] { %d, %d }\n",
+ engine->name, p - phases,
+ p->error[0], p->error[1]);
+ break;
+ }
+ }
+
+ st_engine_heartbeat_enable(engine);
+ if (err) {
+ intel_gt_set_wedged(gt);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int
+emit_semaphore_chain(struct i915_request *rq, struct i915_vma *vma, int idx)
+{
+ u32 *cs;
+
+ cs = intel_ring_begin(rq, 10);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_NEQ_SDD;
+ *cs++ = 0;
+ *cs++ = i915_ggtt_offset(vma) + 4 * idx;
+ *cs++ = 0;
+
+ if (idx > 0) {
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = i915_ggtt_offset(vma) + 4 * (idx - 1);
+ *cs++ = 0;
+ *cs++ = 1;
+ } else {
+ *cs++ = MI_NOOP;
+ *cs++ = MI_NOOP;
+ *cs++ = MI_NOOP;
+ *cs++ = MI_NOOP;
+ }
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+
+ intel_ring_advance(rq, cs);
+ return 0;
+}
+
+static struct i915_request *
+semaphore_queue(struct intel_engine_cs *engine, struct i915_vma *vma, int idx)
+{
+ struct intel_context *ce;
+ struct i915_request *rq;
+ int err;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return ERR_CAST(ce);
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ goto out_ce;
+
+ err = 0;
+ if (rq->engine->emit_init_breadcrumb)
+ err = rq->engine->emit_init_breadcrumb(rq);
+ if (err == 0)
+ err = emit_semaphore_chain(rq, vma, idx);
+ if (err == 0)
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (err)
+ rq = ERR_PTR(err);
+
+out_ce:
+ intel_context_put(ce);
+ return rq;
+}
+
+static int
+release_queue(struct intel_engine_cs *engine,
+ struct i915_vma *vma,
+ int idx, int prio)
+{
+ struct i915_sched_attr attr = {
+ .priority = prio,
+ };
+ struct i915_request *rq;
+ u32 *cs;
+
+ rq = intel_engine_create_kernel_request(engine);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ return PTR_ERR(cs);
+ }
+
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = i915_ggtt_offset(vma) + 4 * (idx - 1);
+ *cs++ = 0;
+ *cs++ = 1;
+
+ intel_ring_advance(rq, cs);
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ local_bh_disable();
+ engine->schedule(rq, &attr);
+ local_bh_enable(); /* kick tasklet */
+
+ i915_request_put(rq);
+
+ return 0;
+}
+
+static int
+slice_semaphore_queue(struct intel_engine_cs *outer,
+ struct i915_vma *vma,
+ int count)
+{
+ struct intel_engine_cs *engine;
+ struct i915_request *head;
+ enum intel_engine_id id;
+ int err, i, n = 0;
+
+ head = semaphore_queue(outer, vma, n++);
+ if (IS_ERR(head))
+ return PTR_ERR(head);
+
+ for_each_engine(engine, outer->gt, id) {
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ for (i = 0; i < count; i++) {
+ struct i915_request *rq;
+
+ rq = semaphore_queue(engine, vma, n++);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ i915_request_put(rq);
+ }
+ }
+
+ err = release_queue(outer, vma, n, I915_PRIORITY_BARRIER);
+ if (err)
+ goto out;
+
+ if (i915_request_wait(head, 0,
+ 2 * outer->gt->info.num_engines * (count + 2) * (count + 3)) < 0) {
+ pr_err("%s: Failed to slice along semaphore chain of length (%d, %d)!\n",
+ outer->name, count, n);
+ GEM_TRACE_DUMP();
+ intel_gt_set_wedged(outer->gt);
+ err = -EIO;
+ }
+
+out:
+ i915_request_put(head);
+ return err;
+}
+
+static int live_timeslice_preempt(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct drm_i915_gem_object *obj;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct i915_vma *vma;
+ void *vaddr;
+ int err = 0;
+
+ /*
+ * If a request takes too long, we would like to give other users
+ * a fair go on the GPU. In particular, users may create batches
+ * that wait upon external input, where that input may even be
+ * supplied by another GPU job. To avoid blocking forever, we
+ * need to preempt the current task and replace it with another
+ * ready task.
+ */
+ if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
+ return 0;
+
+ obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_obj;
+ }
+
+ vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto err_obj;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
+ if (err)
+ goto err_map;
+
+ err = i915_vma_sync(vma);
+ if (err)
+ goto err_pin;
+
+ for_each_engine(engine, gt, id) {
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ memset(vaddr, 0, PAGE_SIZE);
+
+ st_engine_heartbeat_disable(engine);
+ err = slice_semaphore_queue(engine, vma, 5);
+ st_engine_heartbeat_enable(engine);
+ if (err)
+ goto err_pin;
+
+ if (igt_flush_test(gt->i915)) {
+ err = -EIO;
+ goto err_pin;
+ }
+ }
+
+err_pin:
+ i915_vma_unpin(vma);
+err_map:
+ i915_gem_object_unpin_map(obj);
+err_obj:
+ i915_gem_object_put(obj);
+ return err;
+}
+
+static struct i915_request *
+create_rewinder(struct intel_context *ce,
+ struct i915_request *wait,
+ void *slot, int idx)
+{
+ const u32 offset =
+ i915_ggtt_offset(ce->engine->status_page.vma) +
+ offset_in_page(slot);
+ struct i915_request *rq;
+ u32 *cs;
+ int err;
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ return rq;
+
+ if (wait) {
+ err = i915_request_await_dma_fence(rq, &wait->fence);
+ if (err)
+ goto err;
+ }
+
+ cs = intel_ring_begin(rq, 14);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err;
+ }
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+ *cs++ = MI_NOOP;
+
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_GTE_SDD;
+ *cs++ = idx;
+ *cs++ = offset;
+ *cs++ = 0;
+
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+ *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(rq->engine->mmio_base));
+ *cs++ = offset + idx * sizeof(u32);
+ *cs++ = 0;
+
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = offset;
+ *cs++ = 0;
+ *cs++ = idx + 1;
+
+ intel_ring_advance(rq, cs);
+
+ rq->sched.attr.priority = I915_PRIORITY_MASK;
+ err = 0;
+err:
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (err) {
+ i915_request_put(rq);
+ return ERR_PTR(err);
+ }
+
+ return rq;
+}
+
+static int live_timeslice_rewind(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /*
+ * The usual presumption on timeslice expiration is that we replace
+ * the active context with another. However, given a chain of
+ * dependencies we may end up with replacing the context with itself,
+ * but only a few of those requests, forcing us to rewind the
+ * RING_TAIL of the original request.
+ */
+ if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
+ return 0;
+
+ for_each_engine(engine, gt, id) {
+ enum { A1, A2, B1 };
+ enum { X = 1, Z, Y };
+ struct i915_request *rq[3] = {};
+ struct intel_context *ce;
+ unsigned long timeslice;
+ int i, err = 0;
+ u32 *slot;
+
+ if (!intel_engine_has_timeslices(engine))
+ continue;
+
+ /*
+ * A:rq1 -- semaphore wait, timestamp X
+ * A:rq2 -- write timestamp Y
+ *
+ * B:rq1 [await A:rq1] -- write timestamp Z
+ *
+ * Force timeslice, release semaphore.
+ *
+ * Expect execution/evaluation order XZY
+ */
+
+ st_engine_heartbeat_disable(engine);
+ timeslice = xchg(&engine->props.timeslice_duration_ms, 1);
+
+ slot = memset32(engine->status_page.addr + 1000, 0, 4);
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto err;
+ }
+
+ rq[A1] = create_rewinder(ce, NULL, slot, X);
+ if (IS_ERR(rq[A1])) {
+ intel_context_put(ce);
+ goto err;
+ }
+
+ rq[A2] = create_rewinder(ce, NULL, slot, Y);
+ intel_context_put(ce);
+ if (IS_ERR(rq[A2]))
+ goto err;
+
+ err = wait_for_submit(engine, rq[A2], HZ / 2);
+ if (err) {
+ pr_err("%s: failed to submit first context\n",
+ engine->name);
+ goto err;
+ }
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto err;
+ }
+
+ rq[B1] = create_rewinder(ce, rq[A1], slot, Z);
+ intel_context_put(ce);
+ if (IS_ERR(rq[2]))
+ goto err;
+
+ err = wait_for_submit(engine, rq[B1], HZ / 2);
+ if (err) {
+ pr_err("%s: failed to submit second context\n",
+ engine->name);
+ goto err;
+ }
+
+ /* ELSP[] = { { A:rq1, A:rq2 }, { B:rq1 } } */
+ ENGINE_TRACE(engine, "forcing tasklet for rewind\n");
+ while (i915_request_is_active(rq[A2])) { /* semaphore yield! */
+ /* Wait for the timeslice to kick in */
+ del_timer(&engine->execlists.timer);
+ tasklet_hi_schedule(&engine->execlists.tasklet);
+ intel_engine_flush_submission(engine);
+ }
+ /* -> ELSP[] = { { A:rq1 }, { B:rq1 } } */
+ GEM_BUG_ON(!i915_request_is_active(rq[A1]));
+ GEM_BUG_ON(!i915_request_is_active(rq[B1]));
+ GEM_BUG_ON(i915_request_is_active(rq[A2]));
+
+ /* Release the hounds! */
+ slot[0] = 1;
+ wmb(); /* "pairs" with GPU; paranoid kick of internal CPU$ */
+
+ for (i = 1; i <= 3; i++) {
+ unsigned long timeout = jiffies + HZ / 2;
+
+ while (!READ_ONCE(slot[i]) &&
+ time_before(jiffies, timeout))
+ ;
+
+ if (!time_before(jiffies, timeout)) {
+ pr_err("%s: rq[%d] timed out\n",
+ engine->name, i - 1);
+ err = -ETIME;
+ goto err;
+ }
+
+ pr_debug("%s: slot[%d]:%x\n", engine->name, i, slot[i]);
+ }
+
+ /* XZY: XZ < XY */
+ if (slot[Z] - slot[X] >= slot[Y] - slot[X]) {
+ pr_err("%s: timeslicing did not run context B [%u] before A [%u]!\n",
+ engine->name,
+ slot[Z] - slot[X],
+ slot[Y] - slot[X]);
+ err = -EINVAL;
+ }
+
+err:
+ memset32(&slot[0], -1, 4);
+ wmb();
+
+ engine->props.timeslice_duration_ms = timeslice;
+ st_engine_heartbeat_enable(engine);
+ for (i = 0; i < 3; i++)
+ i915_request_put(rq[i]);
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static struct i915_request *nop_request(struct intel_engine_cs *engine)
+{
+ struct i915_request *rq;
+
+ rq = intel_engine_create_kernel_request(engine);
+ if (IS_ERR(rq))
+ return rq;
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ return rq;
+}
+
+static long slice_timeout(struct intel_engine_cs *engine)
+{
+ long timeout;
+
+ /* Enough time for a timeslice to kick in, and kick out */
+ timeout = 2 * msecs_to_jiffies_timeout(timeslice(engine));
+
+ /* Enough time for the nop request to complete */
+ timeout += HZ / 5;
+
+ return timeout + 1;
+}
+
+static int live_timeslice_queue(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct drm_i915_gem_object *obj;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct i915_vma *vma;
+ void *vaddr;
+ int err = 0;
+
+ /*
+ * Make sure that even if ELSP[0] and ELSP[1] are filled with
+ * timeslicing between them disabled, we *do* enable timeslicing
+ * if the queue demands it. (Normally, we do not submit if
+ * ELSP[1] is already occupied, so must rely on timeslicing to
+ * eject ELSP[0] in favour of the queue.)
+ */
+ if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
+ return 0;
+
+ obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_obj;
+ }
+
+ vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto err_obj;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
+ if (err)
+ goto err_map;
+
+ err = i915_vma_sync(vma);
+ if (err)
+ goto err_pin;
+
+ for_each_engine(engine, gt, id) {
+ struct i915_sched_attr attr = {
+ .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
+ };
+ struct i915_request *rq, *nop;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ st_engine_heartbeat_disable(engine);
+ memset(vaddr, 0, PAGE_SIZE);
+
+ /* ELSP[0]: semaphore wait */
+ rq = semaphore_queue(engine, vma, 0);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_heartbeat;
+ }
+ engine->schedule(rq, &attr);
+ err = wait_for_submit(engine, rq, HZ / 2);
+ if (err) {
+ pr_err("%s: Timed out trying to submit semaphores\n",
+ engine->name);
+ goto err_rq;
+ }
+
+ /* ELSP[1]: nop request */
+ nop = nop_request(engine);
+ if (IS_ERR(nop)) {
+ err = PTR_ERR(nop);
+ goto err_rq;
+ }
+ err = wait_for_submit(engine, nop, HZ / 2);
+ i915_request_put(nop);
+ if (err) {
+ pr_err("%s: Timed out trying to submit nop\n",
+ engine->name);
+ goto err_rq;
+ }
+
+ GEM_BUG_ON(i915_request_completed(rq));
+ GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
+
+ /* Queue: semaphore signal, matching priority as semaphore */
+ err = release_queue(engine, vma, 1, effective_prio(rq));
+ if (err)
+ goto err_rq;
+
+ /* Wait until we ack the release_queue and start timeslicing */
+ do {
+ cond_resched();
+ intel_engine_flush_submission(engine);
+ } while (READ_ONCE(engine->execlists.pending[0]));
+
+ /* Timeslice every jiffy, so within 2 we should signal */
+ if (i915_request_wait(rq, 0, slice_timeout(engine)) < 0) {
+ struct drm_printer p =
+ drm_info_printer(gt->i915->drm.dev);
+
+ pr_err("%s: Failed to timeslice into queue\n",
+ engine->name);
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
+
+ memset(vaddr, 0xff, PAGE_SIZE);
+ err = -EIO;
+ }
+err_rq:
+ i915_request_put(rq);
+err_heartbeat:
+ st_engine_heartbeat_enable(engine);
+ if (err)
+ break;
+ }
+
+err_pin:
+ i915_vma_unpin(vma);
+err_map:
+ i915_gem_object_unpin_map(obj);
+err_obj:
+ i915_gem_object_put(obj);
+ return err;
+}
+
+static int live_timeslice_nopreempt(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct igt_spinner spin;
+ int err = 0;
+
+ /*
+ * We should not timeslice into a request that is marked with
+ * I915_REQUEST_NOPREEMPT.
+ */
+ if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
+ return 0;
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce;
+ struct i915_request *rq;
+ unsigned long timeslice;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ break;
+ }
+
+ st_engine_heartbeat_disable(engine);
+ timeslice = xchg(&engine->props.timeslice_duration_ms, 1);
+
+ /* Create an unpreemptible spinner */
+
+ rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_heartbeat;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ if (!igt_wait_for_spinner(&spin, rq)) {
+ i915_request_put(rq);
+ err = -ETIME;
+ goto out_spin;
+ }
+
+ set_bit(I915_FENCE_FLAG_NOPREEMPT, &rq->fence.flags);
+ i915_request_put(rq);
+
+ /* Followed by a maximum priority barrier (heartbeat) */
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out_spin;
+ }
+
+ rq = intel_context_create_request(ce);
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_spin;
+ }
+
+ rq->sched.attr.priority = I915_PRIORITY_BARRIER;
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ /*
+ * Wait until the barrier is in ELSP, and we know timeslicing
+ * will have been activated.
+ */
+ if (wait_for_submit(engine, rq, HZ / 2)) {
+ i915_request_put(rq);
+ err = -ETIME;
+ goto out_spin;
+ }
+
+ /*
+ * Since the ELSP[0] request is unpreemptible, it should not
+ * allow the maximum priority barrier through. Wait long
+ * enough to see if it is timesliced in by mistake.
+ */
+ if (i915_request_wait(rq, 0, slice_timeout(engine)) >= 0) {
+ pr_err("%s: I915_PRIORITY_BARRIER request completed, bypassing no-preempt request\n",
+ engine->name);
+ err = -EINVAL;
+ }
+ i915_request_put(rq);
+
+out_spin:
+ igt_spinner_end(&spin);
+out_heartbeat:
+ xchg(&engine->props.timeslice_duration_ms, timeslice);
+ st_engine_heartbeat_enable(engine);
+ if (err)
+ break;
+
+ if (igt_flush_test(gt->i915)) {
+ err = -EIO;
+ break;
+ }
+ }
+
+ igt_spinner_fini(&spin);
+ return err;
+}
+
+static int live_busywait_preempt(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct i915_gem_context *ctx_hi, *ctx_lo;
+ struct intel_engine_cs *engine;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ enum intel_engine_id id;
+ int err = -ENOMEM;
+ u32 *map;
+
+ /*
+ * Verify that even without HAS_LOGICAL_RING_PREEMPTION, we can
+ * preempt the busywaits used to synchronise between rings.
+ */
+
+ ctx_hi = kernel_context(gt->i915);
+ if (!ctx_hi)
+ return -ENOMEM;
+ ctx_hi->sched.priority =
+ I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
+
+ ctx_lo = kernel_context(gt->i915);
+ if (!ctx_lo)
+ goto err_ctx_hi;
+ ctx_lo->sched.priority =
+ I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
+
+ obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto err_ctx_lo;
+ }
+
+ map = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(map)) {
+ err = PTR_ERR(map);
+ goto err_obj;
+ }
+
+ vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_map;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
+ if (err)
+ goto err_map;
+
+ err = i915_vma_sync(vma);
+ if (err)
+ goto err_vma;
+
+ for_each_engine(engine, gt, id) {
+ struct i915_request *lo, *hi;
+ struct igt_live_test t;
+ u32 *cs;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
+ err = -EIO;
+ goto err_vma;
+ }
+
+ /*
+ * We create two requests. The low priority request
+ * busywaits on a semaphore (inside the ringbuffer where
+ * is should be preemptible) and the high priority requests
+ * uses a MI_STORE_DWORD_IMM to update the semaphore value
+ * allowing the first request to complete. If preemption
+ * fails, we hang instead.
+ */
+
+ lo = igt_request_alloc(ctx_lo, engine);
+ if (IS_ERR(lo)) {
+ err = PTR_ERR(lo);
+ goto err_vma;
+ }
+
+ cs = intel_ring_begin(lo, 8);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ i915_request_add(lo);
+ goto err_vma;
+ }
+
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = i915_ggtt_offset(vma);
+ *cs++ = 0;
+ *cs++ = 1;
+
+ /* XXX Do we need a flush + invalidate here? */
+
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_EQ_SDD;
+ *cs++ = 0;
+ *cs++ = i915_ggtt_offset(vma);
+ *cs++ = 0;
+
+ intel_ring_advance(lo, cs);
+
+ i915_request_get(lo);
+ i915_request_add(lo);
+
+ if (wait_for(READ_ONCE(*map), 10)) {
+ i915_request_put(lo);
+ err = -ETIMEDOUT;
+ goto err_vma;
+ }
+
+ /* Low priority request should be busywaiting now */
+ if (i915_request_wait(lo, 0, 1) != -ETIME) {
+ i915_request_put(lo);
+ pr_err("%s: Busywaiting request did not!\n",
+ engine->name);
+ err = -EIO;
+ goto err_vma;
+ }
+
+ hi = igt_request_alloc(ctx_hi, engine);
+ if (IS_ERR(hi)) {
+ err = PTR_ERR(hi);
+ i915_request_put(lo);
+ goto err_vma;
+ }
+
+ cs = intel_ring_begin(hi, 4);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ i915_request_add(hi);
+ i915_request_put(lo);
+ goto err_vma;
+ }
+
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = i915_ggtt_offset(vma);
+ *cs++ = 0;
+ *cs++ = 0;
+
+ intel_ring_advance(hi, cs);
+ i915_request_add(hi);
+
+ if (i915_request_wait(lo, 0, HZ / 5) < 0) {
+ struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
+
+ pr_err("%s: Failed to preempt semaphore busywait!\n",
+ engine->name);
+
+ intel_engine_dump(engine, &p, "%s\n", engine->name);
+ GEM_TRACE_DUMP();
+
+ i915_request_put(lo);
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ goto err_vma;
+ }
+ GEM_BUG_ON(READ_ONCE(*map));
+ i915_request_put(lo);
+
+ if (igt_live_test_end(&t)) {
+ err = -EIO;
+ goto err_vma;
+ }
+ }
+
+ err = 0;
+err_vma:
+ i915_vma_unpin(vma);
+err_map:
+ i915_gem_object_unpin_map(obj);
+err_obj:
+ i915_gem_object_put(obj);
+err_ctx_lo:
+ kernel_context_close(ctx_lo);
+err_ctx_hi:
+ kernel_context_close(ctx_hi);
+ return err;
+}
+
+static struct i915_request *
+spinner_create_request(struct igt_spinner *spin,
+ struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
+ u32 arb)
+{
+ struct intel_context *ce;
+ struct i915_request *rq;
+
+ ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
+ if (IS_ERR(ce))
+ return ERR_CAST(ce);
+
+ rq = igt_spinner_create_request(spin, ce, arb);
+ intel_context_put(ce);
+ return rq;
+}
+
+static int live_preempt(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct i915_gem_context *ctx_hi, *ctx_lo;
+ struct igt_spinner spin_hi, spin_lo;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = -ENOMEM;
+
+ if (igt_spinner_init(&spin_hi, gt))
+ return -ENOMEM;
+
+ if (igt_spinner_init(&spin_lo, gt))
+ goto err_spin_hi;
+
+ ctx_hi = kernel_context(gt->i915);
+ if (!ctx_hi)
+ goto err_spin_lo;
+ ctx_hi->sched.priority =
+ I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
+
+ ctx_lo = kernel_context(gt->i915);
+ if (!ctx_lo)
+ goto err_ctx_hi;
+ ctx_lo->sched.priority =
+ I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
+
+ for_each_engine(engine, gt, id) {
+ struct igt_live_test t;
+ struct i915_request *rq;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
+ err = -EIO;
+ goto err_ctx_lo;
+ }
+
+ rq = spinner_create_request(&spin_lo, ctx_lo, engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_ctx_lo;
+ }
+
+ i915_request_add(rq);
+ if (!igt_wait_for_spinner(&spin_lo, rq)) {
+ GEM_TRACE("lo spinner failed to start\n");
+ GEM_TRACE_DUMP();
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ goto err_ctx_lo;
+ }
+
+ rq = spinner_create_request(&spin_hi, ctx_hi, engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ igt_spinner_end(&spin_lo);
+ err = PTR_ERR(rq);
+ goto err_ctx_lo;
+ }
+
+ i915_request_add(rq);
+ if (!igt_wait_for_spinner(&spin_hi, rq)) {
+ GEM_TRACE("hi spinner failed to start\n");
+ GEM_TRACE_DUMP();
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ goto err_ctx_lo;
+ }
+
+ igt_spinner_end(&spin_hi);
+ igt_spinner_end(&spin_lo);
+
+ if (igt_live_test_end(&t)) {
+ err = -EIO;
+ goto err_ctx_lo;
+ }
+ }
+
+ err = 0;
+err_ctx_lo:
+ kernel_context_close(ctx_lo);
+err_ctx_hi:
+ kernel_context_close(ctx_hi);
+err_spin_lo:
+ igt_spinner_fini(&spin_lo);
+err_spin_hi:
+ igt_spinner_fini(&spin_hi);
+ return err;
+}
+
+static int live_late_preempt(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct i915_gem_context *ctx_hi, *ctx_lo;
+ struct igt_spinner spin_hi, spin_lo;
+ struct intel_engine_cs *engine;
+ struct i915_sched_attr attr = {};
+ enum intel_engine_id id;
+ int err = -ENOMEM;
+
+ if (igt_spinner_init(&spin_hi, gt))
+ return -ENOMEM;
+
+ if (igt_spinner_init(&spin_lo, gt))
+ goto err_spin_hi;
+
+ ctx_hi = kernel_context(gt->i915);
+ if (!ctx_hi)
+ goto err_spin_lo;
+
+ ctx_lo = kernel_context(gt->i915);
+ if (!ctx_lo)
+ goto err_ctx_hi;
+
+ /* Make sure ctx_lo stays before ctx_hi until we trigger preemption. */
+ ctx_lo->sched.priority = I915_USER_PRIORITY(1);
+
+ for_each_engine(engine, gt, id) {
+ struct igt_live_test t;
+ struct i915_request *rq;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
+ err = -EIO;
+ goto err_ctx_lo;
+ }
+
+ rq = spinner_create_request(&spin_lo, ctx_lo, engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_ctx_lo;
+ }
+
+ i915_request_add(rq);
+ if (!igt_wait_for_spinner(&spin_lo, rq)) {
+ pr_err("First context failed to start\n");
+ goto err_wedged;
+ }
+
+ rq = spinner_create_request(&spin_hi, ctx_hi, engine,
+ MI_NOOP);
+ if (IS_ERR(rq)) {
+ igt_spinner_end(&spin_lo);
+ err = PTR_ERR(rq);
+ goto err_ctx_lo;
+ }
+
+ i915_request_add(rq);
+ if (igt_wait_for_spinner(&spin_hi, rq)) {
+ pr_err("Second context overtook first?\n");
+ goto err_wedged;
+ }
+
+ attr.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
+ engine->schedule(rq, &attr);
+
+ if (!igt_wait_for_spinner(&spin_hi, rq)) {
+ pr_err("High priority context failed to preempt the low priority context\n");
+ GEM_TRACE_DUMP();
+ goto err_wedged;
+ }
+
+ igt_spinner_end(&spin_hi);
+ igt_spinner_end(&spin_lo);
+
+ if (igt_live_test_end(&t)) {
+ err = -EIO;
+ goto err_ctx_lo;
+ }
+ }
+
+ err = 0;
+err_ctx_lo:
+ kernel_context_close(ctx_lo);
+err_ctx_hi:
+ kernel_context_close(ctx_hi);
+err_spin_lo:
+ igt_spinner_fini(&spin_lo);
+err_spin_hi:
+ igt_spinner_fini(&spin_hi);
+ return err;
+
+err_wedged:
+ igt_spinner_end(&spin_hi);
+ igt_spinner_end(&spin_lo);
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ goto err_ctx_lo;
+}
+
+struct preempt_client {
+ struct igt_spinner spin;
+ struct i915_gem_context *ctx;
+};
+
+static int preempt_client_init(struct intel_gt *gt, struct preempt_client *c)
+{
+ c->ctx = kernel_context(gt->i915);
+ if (!c->ctx)
+ return -ENOMEM;
+
+ if (igt_spinner_init(&c->spin, gt))
+ goto err_ctx;
+
+ return 0;
+
+err_ctx:
+ kernel_context_close(c->ctx);
+ return -ENOMEM;
+}
+
+static void preempt_client_fini(struct preempt_client *c)
+{
+ igt_spinner_fini(&c->spin);
+ kernel_context_close(c->ctx);
+}
+
+static int live_nopreempt(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ struct preempt_client a, b;
+ enum intel_engine_id id;
+ int err = -ENOMEM;
+
+ /*
+ * Verify that we can disable preemption for an individual request
+ * that may be being observed and not want to be interrupted.
+ */
+
+ if (preempt_client_init(gt, &a))
+ return -ENOMEM;
+ if (preempt_client_init(gt, &b))
+ goto err_client_a;
+ b.ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
+
+ for_each_engine(engine, gt, id) {
+ struct i915_request *rq_a, *rq_b;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ engine->execlists.preempt_hang.count = 0;
+
+ rq_a = spinner_create_request(&a.spin,
+ a.ctx, engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq_a)) {
+ err = PTR_ERR(rq_a);
+ goto err_client_b;
+ }
+
+ /* Low priority client, but unpreemptable! */
+ __set_bit(I915_FENCE_FLAG_NOPREEMPT, &rq_a->fence.flags);
+
+ i915_request_add(rq_a);
+ if (!igt_wait_for_spinner(&a.spin, rq_a)) {
+ pr_err("First client failed to start\n");
+ goto err_wedged;
+ }
+
+ rq_b = spinner_create_request(&b.spin,
+ b.ctx, engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq_b)) {
+ err = PTR_ERR(rq_b);
+ goto err_client_b;
+ }
+
+ i915_request_add(rq_b);
+
+ /* B is much more important than A! (But A is unpreemptable.) */
+ GEM_BUG_ON(rq_prio(rq_b) <= rq_prio(rq_a));
+
+ /* Wait long enough for preemption and timeslicing */
+ if (igt_wait_for_spinner(&b.spin, rq_b)) {
+ pr_err("Second client started too early!\n");
+ goto err_wedged;
+ }
+
+ igt_spinner_end(&a.spin);
+
+ if (!igt_wait_for_spinner(&b.spin, rq_b)) {
+ pr_err("Second client failed to start\n");
+ goto err_wedged;
+ }
+
+ igt_spinner_end(&b.spin);
+
+ if (engine->execlists.preempt_hang.count) {
+ pr_err("Preemption recorded x%d; should have been suppressed!\n",
+ engine->execlists.preempt_hang.count);
+ err = -EINVAL;
+ goto err_wedged;
+ }
+
+ if (igt_flush_test(gt->i915))
+ goto err_wedged;
+ }
+
+ err = 0;
+err_client_b:
+ preempt_client_fini(&b);
+err_client_a:
+ preempt_client_fini(&a);
+ return err;
+
+err_wedged:
+ igt_spinner_end(&b.spin);
+ igt_spinner_end(&a.spin);
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ goto err_client_b;
+}
+
+struct live_preempt_cancel {
+ struct intel_engine_cs *engine;
+ struct preempt_client a, b;
+};
+
+static int __cancel_active0(struct live_preempt_cancel *arg)
+{
+ struct i915_request *rq;
+ struct igt_live_test t;
+ int err;
+
+ /* Preempt cancel of ELSP0 */
+ GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
+ if (igt_live_test_begin(&t, arg->engine->i915,
+ __func__, arg->engine->name))
+ return -EIO;
+
+ rq = spinner_create_request(&arg->a.spin,
+ arg->a.ctx, arg->engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ clear_bit(CONTEXT_BANNED, &rq->context->flags);
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
+ err = -EIO;
+ goto out;
+ }
+
+ intel_context_set_banned(rq->context);
+ err = intel_engine_pulse(arg->engine);
+ if (err)
+ goto out;
+
+ err = wait_for_reset(arg->engine, rq, HZ / 2);
+ if (err) {
+ pr_err("Cancelled inflight0 request did not reset\n");
+ goto out;
+ }
+
+out:
+ i915_request_put(rq);
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ return err;
+}
+
+static int __cancel_active1(struct live_preempt_cancel *arg)
+{
+ struct i915_request *rq[2] = {};
+ struct igt_live_test t;
+ int err;
+
+ /* Preempt cancel of ELSP1 */
+ GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
+ if (igt_live_test_begin(&t, arg->engine->i915,
+ __func__, arg->engine->name))
+ return -EIO;
+
+ rq[0] = spinner_create_request(&arg->a.spin,
+ arg->a.ctx, arg->engine,
+ MI_NOOP); /* no preemption */
+ if (IS_ERR(rq[0]))
+ return PTR_ERR(rq[0]);
+
+ clear_bit(CONTEXT_BANNED, &rq[0]->context->flags);
+ i915_request_get(rq[0]);
+ i915_request_add(rq[0]);
+ if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) {
+ err = -EIO;
+ goto out;
+ }
+
+ rq[1] = spinner_create_request(&arg->b.spin,
+ arg->b.ctx, arg->engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq[1])) {
+ err = PTR_ERR(rq[1]);
+ goto out;
+ }
+
+ clear_bit(CONTEXT_BANNED, &rq[1]->context->flags);
+ i915_request_get(rq[1]);
+ err = i915_request_await_dma_fence(rq[1], &rq[0]->fence);
+ i915_request_add(rq[1]);
+ if (err)
+ goto out;
+
+ intel_context_set_banned(rq[1]->context);
+ err = intel_engine_pulse(arg->engine);
+ if (err)
+ goto out;
+
+ igt_spinner_end(&arg->a.spin);
+ err = wait_for_reset(arg->engine, rq[1], HZ / 2);
+ if (err)
+ goto out;
+
+ if (rq[0]->fence.error != 0) {
+ pr_err("Normal inflight0 request did not complete\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (rq[1]->fence.error != -EIO) {
+ pr_err("Cancelled inflight1 request did not report -EIO\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+out:
+ i915_request_put(rq[1]);
+ i915_request_put(rq[0]);
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ return err;
+}
+
+static int __cancel_queued(struct live_preempt_cancel *arg)
+{
+ struct i915_request *rq[3] = {};
+ struct igt_live_test t;
+ int err;
+
+ /* Full ELSP and one in the wings */
+ GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
+ if (igt_live_test_begin(&t, arg->engine->i915,
+ __func__, arg->engine->name))
+ return -EIO;
+
+ rq[0] = spinner_create_request(&arg->a.spin,
+ arg->a.ctx, arg->engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq[0]))
+ return PTR_ERR(rq[0]);
+
+ clear_bit(CONTEXT_BANNED, &rq[0]->context->flags);
+ i915_request_get(rq[0]);
+ i915_request_add(rq[0]);
+ if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) {
+ err = -EIO;
+ goto out;
+ }
+
+ rq[1] = igt_request_alloc(arg->b.ctx, arg->engine);
+ if (IS_ERR(rq[1])) {
+ err = PTR_ERR(rq[1]);
+ goto out;
+ }
+
+ clear_bit(CONTEXT_BANNED, &rq[1]->context->flags);
+ i915_request_get(rq[1]);
+ err = i915_request_await_dma_fence(rq[1], &rq[0]->fence);
+ i915_request_add(rq[1]);
+ if (err)
+ goto out;
+
+ rq[2] = spinner_create_request(&arg->b.spin,
+ arg->a.ctx, arg->engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq[2])) {
+ err = PTR_ERR(rq[2]);
+ goto out;
+ }
+
+ i915_request_get(rq[2]);
+ err = i915_request_await_dma_fence(rq[2], &rq[1]->fence);
+ i915_request_add(rq[2]);
+ if (err)
+ goto out;
+
+ intel_context_set_banned(rq[2]->context);
+ err = intel_engine_pulse(arg->engine);
+ if (err)
+ goto out;
+
+ err = wait_for_reset(arg->engine, rq[2], HZ / 2);
+ if (err)
+ goto out;
+
+ if (rq[0]->fence.error != -EIO) {
+ pr_err("Cancelled inflight0 request did not report -EIO\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (rq[1]->fence.error != 0) {
+ pr_err("Normal inflight1 request did not complete\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (rq[2]->fence.error != -EIO) {
+ pr_err("Cancelled queued request did not report -EIO\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+out:
+ i915_request_put(rq[2]);
+ i915_request_put(rq[1]);
+ i915_request_put(rq[0]);
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ return err;
+}
+
+static int __cancel_hostile(struct live_preempt_cancel *arg)
+{
+ struct i915_request *rq;
+ int err;
+
+ /* Preempt cancel non-preemptible spinner in ELSP0 */
+ if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT))
+ return 0;
+
+ if (!intel_has_reset_engine(arg->engine->gt))
+ return 0;
+
+ GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
+ rq = spinner_create_request(&arg->a.spin,
+ arg->a.ctx, arg->engine,
+ MI_NOOP); /* preemption disabled */
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ clear_bit(CONTEXT_BANNED, &rq->context->flags);
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
+ err = -EIO;
+ goto out;
+ }
+
+ intel_context_set_banned(rq->context);
+ err = intel_engine_pulse(arg->engine); /* force reset */
+ if (err)
+ goto out;
+
+ err = wait_for_reset(arg->engine, rq, HZ / 2);
+ if (err) {
+ pr_err("Cancelled inflight0 request did not reset\n");
+ goto out;
+ }
+
+out:
+ i915_request_put(rq);
+ if (igt_flush_test(arg->engine->i915))
+ err = -EIO;
+ return err;
+}
+
+static void force_reset_timeout(struct intel_engine_cs *engine)
+{
+ engine->reset_timeout.probability = 999;
+ atomic_set(&engine->reset_timeout.times, -1);
+}
+
+static void cancel_reset_timeout(struct intel_engine_cs *engine)
+{
+ memset(&engine->reset_timeout, 0, sizeof(engine->reset_timeout));
+}
+
+static int __cancel_fail(struct live_preempt_cancel *arg)
+{
+ struct intel_engine_cs *engine = arg->engine;
+ struct i915_request *rq;
+ int err;
+
+ if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT))
+ return 0;
+
+ if (!intel_has_reset_engine(engine->gt))
+ return 0;
+
+ GEM_TRACE("%s(%s)\n", __func__, engine->name);
+ rq = spinner_create_request(&arg->a.spin,
+ arg->a.ctx, engine,
+ MI_NOOP); /* preemption disabled */
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ clear_bit(CONTEXT_BANNED, &rq->context->flags);
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
+ err = -EIO;
+ goto out;
+ }
+
+ intel_context_set_banned(rq->context);
+
+ err = intel_engine_pulse(engine);
+ if (err)
+ goto out;
+
+ force_reset_timeout(engine);
+
+ /* force preempt reset [failure] */
+ while (!engine->execlists.pending[0])
+ intel_engine_flush_submission(engine);
+ del_timer_sync(&engine->execlists.preempt);
+ intel_engine_flush_submission(engine);
+
+ cancel_reset_timeout(engine);
+
+ /* after failure, require heartbeats to reset device */
+ intel_engine_set_heartbeat(engine, 1);
+ err = wait_for_reset(engine, rq, HZ / 2);
+ intel_engine_set_heartbeat(engine,
+ engine->defaults.heartbeat_interval_ms);
+ if (err) {
+ pr_err("Cancelled inflight0 request did not reset\n");
+ goto out;
+ }
+
+out:
+ i915_request_put(rq);
+ if (igt_flush_test(engine->i915))
+ err = -EIO;
+ return err;
+}
+
+static int live_preempt_cancel(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct live_preempt_cancel data;
+ enum intel_engine_id id;
+ int err = -ENOMEM;
+
+ /*
+ * To cancel an inflight context, we need to first remove it from the
+ * GPU. That sounds like preemption! Plus a little bit of bookkeeping.
+ */
+
+ if (preempt_client_init(gt, &data.a))
+ return -ENOMEM;
+ if (preempt_client_init(gt, &data.b))
+ goto err_client_a;
+
+ for_each_engine(data.engine, gt, id) {
+ if (!intel_engine_has_preemption(data.engine))
+ continue;
+
+ err = __cancel_active0(&data);
+ if (err)
+ goto err_wedged;
+
+ err = __cancel_active1(&data);
+ if (err)
+ goto err_wedged;
+
+ err = __cancel_queued(&data);
+ if (err)
+ goto err_wedged;
+
+ err = __cancel_hostile(&data);
+ if (err)
+ goto err_wedged;
+
+ err = __cancel_fail(&data);
+ if (err)
+ goto err_wedged;
+ }
+
+ err = 0;
+err_client_b:
+ preempt_client_fini(&data.b);
+err_client_a:
+ preempt_client_fini(&data.a);
+ return err;
+
+err_wedged:
+ GEM_TRACE_DUMP();
+ igt_spinner_end(&data.b.spin);
+ igt_spinner_end(&data.a.spin);
+ intel_gt_set_wedged(gt);
+ goto err_client_b;
+}
+
+static int live_suppress_self_preempt(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ struct i915_sched_attr attr = {
+ .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX)
+ };
+ struct preempt_client a, b;
+ enum intel_engine_id id;
+ int err = -ENOMEM;
+
+ /*
+ * Verify that if a preemption request does not cause a change in
+ * the current execution order, the preempt-to-idle injection is
+ * skipped and that we do not accidentally apply it after the CS
+ * completion event.
+ */
+
+ if (intel_uc_uses_guc_submission(&gt->uc))
+ return 0; /* presume black blox */
+
+ if (intel_vgpu_active(gt->i915))
+ return 0; /* GVT forces single port & request submission */
+
+ if (preempt_client_init(gt, &a))
+ return -ENOMEM;
+ if (preempt_client_init(gt, &b))
+ goto err_client_a;
+
+ for_each_engine(engine, gt, id) {
+ struct i915_request *rq_a, *rq_b;
+ int depth;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ if (igt_flush_test(gt->i915))
+ goto err_wedged;
+
+ st_engine_heartbeat_disable(engine);
+ engine->execlists.preempt_hang.count = 0;
+
+ rq_a = spinner_create_request(&a.spin,
+ a.ctx, engine,
+ MI_NOOP);
+ if (IS_ERR(rq_a)) {
+ err = PTR_ERR(rq_a);
+ st_engine_heartbeat_enable(engine);
+ goto err_client_b;
+ }
+
+ i915_request_add(rq_a);
+ if (!igt_wait_for_spinner(&a.spin, rq_a)) {
+ pr_err("First client failed to start\n");
+ st_engine_heartbeat_enable(engine);
+ goto err_wedged;
+ }
+
+ /* Keep postponing the timer to avoid premature slicing */
+ mod_timer(&engine->execlists.timer, jiffies + HZ);
+ for (depth = 0; depth < 8; depth++) {
+ rq_b = spinner_create_request(&b.spin,
+ b.ctx, engine,
+ MI_NOOP);
+ if (IS_ERR(rq_b)) {
+ err = PTR_ERR(rq_b);
+ st_engine_heartbeat_enable(engine);
+ goto err_client_b;
+ }
+ i915_request_add(rq_b);
+
+ GEM_BUG_ON(i915_request_completed(rq_a));
+ engine->schedule(rq_a, &attr);
+ igt_spinner_end(&a.spin);
+
+ if (!igt_wait_for_spinner(&b.spin, rq_b)) {
+ pr_err("Second client failed to start\n");
+ st_engine_heartbeat_enable(engine);
+ goto err_wedged;
+ }
+
+ swap(a, b);
+ rq_a = rq_b;
+ }
+ igt_spinner_end(&a.spin);
+
+ if (engine->execlists.preempt_hang.count) {
+ pr_err("Preemption on %s recorded x%d, depth %d; should have been suppressed!\n",
+ engine->name,
+ engine->execlists.preempt_hang.count,
+ depth);
+ st_engine_heartbeat_enable(engine);
+ err = -EINVAL;
+ goto err_client_b;
+ }
+
+ st_engine_heartbeat_enable(engine);
+ if (igt_flush_test(gt->i915))
+ goto err_wedged;
+ }
+
+ err = 0;
+err_client_b:
+ preempt_client_fini(&b);
+err_client_a:
+ preempt_client_fini(&a);
+ return err;
+
+err_wedged:
+ igt_spinner_end(&b.spin);
+ igt_spinner_end(&a.spin);
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ goto err_client_b;
+}
+
+static int live_chain_preempt(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ struct preempt_client hi, lo;
+ enum intel_engine_id id;
+ int err = -ENOMEM;
+
+ /*
+ * Build a chain AB...BA between two contexts (A, B) and request
+ * preemption of the last request. It should then complete before
+ * the previously submitted spinner in B.
+ */
+
+ if (preempt_client_init(gt, &hi))
+ return -ENOMEM;
+
+ if (preempt_client_init(gt, &lo))
+ goto err_client_hi;
+
+ for_each_engine(engine, gt, id) {
+ struct i915_sched_attr attr = {
+ .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
+ };
+ struct igt_live_test t;
+ struct i915_request *rq;
+ int ring_size, count, i;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ rq = spinner_create_request(&lo.spin,
+ lo.ctx, engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq))
+ goto err_wedged;
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ ring_size = rq->wa_tail - rq->head;
+ if (ring_size < 0)
+ ring_size += rq->ring->size;
+ ring_size = rq->ring->size / ring_size;
+ pr_debug("%s(%s): Using maximum of %d requests\n",
+ __func__, engine->name, ring_size);
+
+ igt_spinner_end(&lo.spin);
+ if (i915_request_wait(rq, 0, HZ / 2) < 0) {
+ pr_err("Timed out waiting to flush %s\n", engine->name);
+ i915_request_put(rq);
+ goto err_wedged;
+ }
+ i915_request_put(rq);
+
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
+ err = -EIO;
+ goto err_wedged;
+ }
+
+ for_each_prime_number_from(count, 1, ring_size) {
+ rq = spinner_create_request(&hi.spin,
+ hi.ctx, engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq))
+ goto err_wedged;
+ i915_request_add(rq);
+ if (!igt_wait_for_spinner(&hi.spin, rq))
+ goto err_wedged;
+
+ rq = spinner_create_request(&lo.spin,
+ lo.ctx, engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq))
+ goto err_wedged;
+ i915_request_add(rq);
+
+ for (i = 0; i < count; i++) {
+ rq = igt_request_alloc(lo.ctx, engine);
+ if (IS_ERR(rq))
+ goto err_wedged;
+ i915_request_add(rq);
+ }
+
+ rq = igt_request_alloc(hi.ctx, engine);
+ if (IS_ERR(rq))
+ goto err_wedged;
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+ engine->schedule(rq, &attr);
+
+ igt_spinner_end(&hi.spin);
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ struct drm_printer p =
+ drm_info_printer(gt->i915->drm.dev);
+
+ pr_err("Failed to preempt over chain of %d\n",
+ count);
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
+ i915_request_put(rq);
+ goto err_wedged;
+ }
+ igt_spinner_end(&lo.spin);
+ i915_request_put(rq);
+
+ rq = igt_request_alloc(lo.ctx, engine);
+ if (IS_ERR(rq))
+ goto err_wedged;
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ struct drm_printer p =
+ drm_info_printer(gt->i915->drm.dev);
+
+ pr_err("Failed to flush low priority chain of %d requests\n",
+ count);
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
+
+ i915_request_put(rq);
+ goto err_wedged;
+ }
+ i915_request_put(rq);
+ }
+
+ if (igt_live_test_end(&t)) {
+ err = -EIO;
+ goto err_wedged;
+ }
+ }
+
+ err = 0;
+err_client_lo:
+ preempt_client_fini(&lo);
+err_client_hi:
+ preempt_client_fini(&hi);
+ return err;
+
+err_wedged:
+ igt_spinner_end(&hi.spin);
+ igt_spinner_end(&lo.spin);
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ goto err_client_lo;
+}
+
+static int create_gang(struct intel_engine_cs *engine,
+ struct i915_request **prev)
+{
+ struct drm_i915_gem_object *obj;
+ struct intel_context *ce;
+ struct i915_request *rq;
+ struct i915_vma *vma;
+ u32 *cs;
+ int err;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ obj = i915_gem_object_create_internal(engine->i915, 4096);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto err_ce;
+ }
+
+ vma = i915_vma_instance(obj, ce->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_obj;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto err_obj;
+
+ cs = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err_obj;
+ }
+
+ /* Semaphore target: spin until zero */
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_EQ_SDD;
+ *cs++ = 0;
+ *cs++ = lower_32_bits(vma->node.start);
+ *cs++ = upper_32_bits(vma->node.start);
+
+ if (*prev) {
+ u64 offset = (*prev)->batch->node.start;
+
+ /* Terminate the spinner in the next lower priority batch. */
+ *cs++ = MI_STORE_DWORD_IMM_GEN4;
+ *cs++ = lower_32_bits(offset);
+ *cs++ = upper_32_bits(offset);
+ *cs++ = 0;
+ }
+
+ *cs++ = MI_BATCH_BUFFER_END;
+ i915_gem_object_flush_map(obj);
+ i915_gem_object_unpin_map(obj);
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_obj;
+ }
+
+ rq->batch = i915_vma_get(vma);
+ i915_request_get(rq);
+
+ i915_vma_lock(vma);
+ err = i915_request_await_object(rq, vma->obj, false);
+ if (!err)
+ err = i915_vma_move_to_active(vma, rq, 0);
+ if (!err)
+ err = rq->engine->emit_bb_start(rq,
+ vma->node.start,
+ PAGE_SIZE, 0);
+ i915_vma_unlock(vma);
+ i915_request_add(rq);
+ if (err)
+ goto err_rq;
+
+ i915_gem_object_put(obj);
+ intel_context_put(ce);
+
+ rq->mock.link.next = &(*prev)->mock.link;
+ *prev = rq;
+ return 0;
+
+err_rq:
+ i915_vma_put(rq->batch);
+ i915_request_put(rq);
+err_obj:
+ i915_gem_object_put(obj);
+err_ce:
+ intel_context_put(ce);
+ return err;
+}
+
+static int __live_preempt_ring(struct intel_engine_cs *engine,
+ struct igt_spinner *spin,
+ int queue_sz, int ring_sz)
+{
+ struct intel_context *ce[2] = {};
+ struct i915_request *rq;
+ struct igt_live_test t;
+ int err = 0;
+ int n;
+
+ if (igt_live_test_begin(&t, engine->i915, __func__, engine->name))
+ return -EIO;
+
+ for (n = 0; n < ARRAY_SIZE(ce); n++) {
+ struct intel_context *tmp;
+
+ tmp = intel_context_create(engine);
+ if (IS_ERR(tmp)) {
+ err = PTR_ERR(tmp);
+ goto err_ce;
+ }
+
+ tmp->ring = __intel_context_ring_size(ring_sz);
+
+ err = intel_context_pin(tmp);
+ if (err) {
+ intel_context_put(tmp);
+ goto err_ce;
+ }
+
+ memset32(tmp->ring->vaddr,
+ 0xdeadbeef, /* trigger a hang if executed */
+ tmp->ring->vma->size / sizeof(u32));
+
+ ce[n] = tmp;
+ }
+
+ rq = igt_spinner_create_request(spin, ce[0], MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_ce;
+ }
+
+ i915_request_get(rq);
+ rq->sched.attr.priority = I915_PRIORITY_BARRIER;
+ i915_request_add(rq);
+
+ if (!igt_wait_for_spinner(spin, rq)) {
+ intel_gt_set_wedged(engine->gt);
+ i915_request_put(rq);
+ err = -ETIME;
+ goto err_ce;
+ }
+
+ /* Fill the ring, until we will cause a wrap */
+ n = 0;
+ while (ce[0]->ring->tail - rq->wa_tail <= queue_sz) {
+ struct i915_request *tmp;
+
+ tmp = intel_context_create_request(ce[0]);
+ if (IS_ERR(tmp)) {
+ err = PTR_ERR(tmp);
+ i915_request_put(rq);
+ goto err_ce;
+ }
+
+ i915_request_add(tmp);
+ intel_engine_flush_submission(engine);
+ n++;
+ }
+ intel_engine_flush_submission(engine);
+ pr_debug("%s: Filled %d with %d nop tails {size:%x, tail:%x, emit:%x, rq.tail:%x}\n",
+ engine->name, queue_sz, n,
+ ce[0]->ring->size,
+ ce[0]->ring->tail,
+ ce[0]->ring->emit,
+ rq->tail);
+ i915_request_put(rq);
+
+ /* Create a second request to preempt the first ring */
+ rq = intel_context_create_request(ce[1]);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_ce;
+ }
+
+ rq->sched.attr.priority = I915_PRIORITY_BARRIER;
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ err = wait_for_submit(engine, rq, HZ / 2);
+ i915_request_put(rq);
+ if (err) {
+ pr_err("%s: preemption request was not submited\n",
+ engine->name);
+ err = -ETIME;
+ }
+
+ pr_debug("%s: ring[0]:{ tail:%x, emit:%x }, ring[1]:{ tail:%x, emit:%x }\n",
+ engine->name,
+ ce[0]->ring->tail, ce[0]->ring->emit,
+ ce[1]->ring->tail, ce[1]->ring->emit);
+
+err_ce:
+ intel_engine_flush_submission(engine);
+ igt_spinner_end(spin);
+ for (n = 0; n < ARRAY_SIZE(ce); n++) {
+ if (IS_ERR_OR_NULL(ce[n]))
+ break;
+
+ intel_context_unpin(ce[n]);
+ intel_context_put(ce[n]);
+ }
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ return err;
+}
+
+static int live_preempt_ring(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ struct igt_spinner spin;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /*
+ * Check that we rollback large chunks of a ring in order to do a
+ * preemption event. Similar to live_unlite_ring, but looking at
+ * ring size rather than the impact of intel_ring_direction().
+ */
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ for_each_engine(engine, gt, id) {
+ int n;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ st_engine_heartbeat_disable(engine);
+
+ for (n = 0; n <= 3; n++) {
+ err = __live_preempt_ring(engine, &spin,
+ n * SZ_4K / 4, SZ_4K);
+ if (err)
+ break;
+ }
+
+ st_engine_heartbeat_enable(engine);
+ if (err)
+ break;
+ }
+
+ igt_spinner_fini(&spin);
+ return err;
+}
+
+static int live_preempt_gang(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /*
+ * Build as long a chain of preempters as we can, with each
+ * request higher priority than the last. Once we are ready, we release
+ * the last batch which then precolates down the chain, each releasing
+ * the next oldest in turn. The intent is to simply push as hard as we
+ * can with the number of preemptions, trying to exceed narrow HW
+ * limits. At a minimum, we insist that we can sort all the user
+ * high priority levels into execution order.
+ */
+
+ for_each_engine(engine, gt, id) {
+ struct i915_request *rq = NULL;
+ struct igt_live_test t;
+ IGT_TIMEOUT(end_time);
+ int prio = 0;
+ int err = 0;
+ u32 *cs;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name))
+ return -EIO;
+
+ do {
+ struct i915_sched_attr attr = {
+ .priority = I915_USER_PRIORITY(prio++),
+ };
+
+ err = create_gang(engine, &rq);
+ if (err)
+ break;
+
+ /* Submit each spinner at increasing priority */
+ engine->schedule(rq, &attr);
+ } while (prio <= I915_PRIORITY_MAX &&
+ !__igt_timeout(end_time, NULL));
+ pr_debug("%s: Preempt chain of %d requests\n",
+ engine->name, prio);
+
+ /*
+ * Such that the last spinner is the highest priority and
+ * should execute first. When that spinner completes,
+ * it will terminate the next lowest spinner until there
+ * are no more spinners and the gang is complete.
+ */
+ cs = i915_gem_object_pin_map(rq->batch->obj, I915_MAP_WC);
+ if (!IS_ERR(cs)) {
+ *cs = 0;
+ i915_gem_object_unpin_map(rq->batch->obj);
+ } else {
+ err = PTR_ERR(cs);
+ intel_gt_set_wedged(gt);
+ }
+
+ while (rq) { /* wait for each rq from highest to lowest prio */
+ struct i915_request *n = list_next_entry(rq, mock.link);
+
+ if (err == 0 && i915_request_wait(rq, 0, HZ / 5) < 0) {
+ struct drm_printer p =
+ drm_info_printer(engine->i915->drm.dev);
+
+ pr_err("Failed to flush chain of %d requests, at %d\n",
+ prio, rq_prio(rq) >> I915_USER_PRIORITY_SHIFT);
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
+
+ err = -ETIME;
+ }
+
+ i915_vma_put(rq->batch);
+ i915_request_put(rq);
+ rq = n;
+ }
+
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static struct i915_vma *
+create_gpr_user(struct intel_engine_cs *engine,
+ struct i915_vma *result,
+ unsigned int offset)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ u32 *cs;
+ int err;
+ int i;
+
+ obj = i915_gem_object_create_internal(engine->i915, 4096);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ vma = i915_vma_instance(obj, result->vm, NULL);
+ if (IS_ERR(vma)) {
+ i915_gem_object_put(obj);
+ return vma;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err) {
+ i915_vma_put(vma);
+ return ERR_PTR(err);
+ }
+
+ cs = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(cs)) {
+ i915_vma_put(vma);
+ return ERR_CAST(cs);
+ }
+
+ /* All GPR are clear for new contexts. We use GPR(0) as a constant */
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = CS_GPR(engine, 0);
+ *cs++ = 1;
+
+ for (i = 1; i < NUM_GPR; i++) {
+ u64 addr;
+
+ /*
+ * Perform: GPR[i]++
+ *
+ * As we read and write into the context saved GPR[i], if
+ * we restart this batch buffer from an earlier point, we
+ * will repeat the increment and store a value > 1.
+ */
+ *cs++ = MI_MATH(4);
+ *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(i));
+ *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(0));
+ *cs++ = MI_MATH_ADD;
+ *cs++ = MI_MATH_STORE(MI_MATH_REG(i), MI_MATH_REG_ACCU);
+
+ addr = result->node.start + offset + i * sizeof(*cs);
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8;
+ *cs++ = CS_GPR(engine, 2 * i);
+ *cs++ = lower_32_bits(addr);
+ *cs++ = upper_32_bits(addr);
+
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_GTE_SDD;
+ *cs++ = i;
+ *cs++ = lower_32_bits(result->node.start);
+ *cs++ = upper_32_bits(result->node.start);
+ }
+
+ *cs++ = MI_BATCH_BUFFER_END;
+ i915_gem_object_flush_map(obj);
+ i915_gem_object_unpin_map(obj);
+
+ return vma;
+}
+
+static struct i915_vma *create_global(struct intel_gt *gt, size_t sz)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int err;
+
+ obj = i915_gem_object_create_internal(gt->i915, sz);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ i915_gem_object_put(obj);
+ return vma;
+ }
+
+ err = i915_ggtt_pin(vma, NULL, 0, 0);
+ if (err) {
+ i915_vma_put(vma);
+ return ERR_PTR(err);
+ }
+
+ return vma;
+}
+
+static struct i915_request *
+create_gpr_client(struct intel_engine_cs *engine,
+ struct i915_vma *global,
+ unsigned int offset)
+{
+ struct i915_vma *batch, *vma;
+ struct intel_context *ce;
+ struct i915_request *rq;
+ int err;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return ERR_CAST(ce);
+
+ vma = i915_vma_instance(global->obj, ce->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_ce;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto out_ce;
+
+ batch = create_gpr_user(engine, vma, offset);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ goto out_vma;
+ }
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_batch;
+ }
+
+ i915_vma_lock(vma);
+ err = i915_request_await_object(rq, vma->obj, false);
+ if (!err)
+ err = i915_vma_move_to_active(vma, rq, 0);
+ i915_vma_unlock(vma);
+
+ i915_vma_lock(batch);
+ if (!err)
+ err = i915_request_await_object(rq, batch->obj, false);
+ if (!err)
+ err = i915_vma_move_to_active(batch, rq, 0);
+ if (!err)
+ err = rq->engine->emit_bb_start(rq,
+ batch->node.start,
+ PAGE_SIZE, 0);
+ i915_vma_unlock(batch);
+ i915_vma_unpin(batch);
+
+ if (!err)
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+out_batch:
+ i915_vma_put(batch);
+out_vma:
+ i915_vma_unpin(vma);
+out_ce:
+ intel_context_put(ce);
+ return err ? ERR_PTR(err) : rq;
+}
+
+static int preempt_user(struct intel_engine_cs *engine,
+ struct i915_vma *global,
+ int id)
+{
+ struct i915_sched_attr attr = {
+ .priority = I915_PRIORITY_MAX
+ };
+ struct i915_request *rq;
+ int err = 0;
+ u32 *cs;
+
+ rq = intel_engine_create_kernel_request(engine);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ return PTR_ERR(cs);
+ }
+
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = i915_ggtt_offset(global);
+ *cs++ = 0;
+ *cs++ = id;
+
+ intel_ring_advance(rq, cs);
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ engine->schedule(rq, &attr);
+
+ if (i915_request_wait(rq, 0, HZ / 2) < 0)
+ err = -ETIME;
+ i915_request_put(rq);
+
+ return err;
+}
+
+static int live_preempt_user(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ struct i915_vma *global;
+ enum intel_engine_id id;
+ u32 *result;
+ int err = 0;
+
+ /*
+ * In our other tests, we look at preemption in carefully
+ * controlled conditions in the ringbuffer. Since most of the
+ * time is spent in user batches, most of our preemptions naturally
+ * occur there. We want to verify that when we preempt inside a batch
+ * we continue on from the current instruction and do not roll back
+ * to the start, or another earlier arbitration point.
+ *
+ * To verify this, we create a batch which is a mixture of
+ * MI_MATH (gpr++) MI_SRM (gpr) and preemption points. Then with
+ * a few preempting contexts thrown into the mix, we look for any
+ * repeated instructions (which show up as incorrect values).
+ */
+
+ global = create_global(gt, 4096);
+ if (IS_ERR(global))
+ return PTR_ERR(global);
+
+ result = i915_gem_object_pin_map(global->obj, I915_MAP_WC);
+ if (IS_ERR(result)) {
+ i915_vma_unpin_and_release(&global, 0);
+ return PTR_ERR(result);
+ }
+
+ for_each_engine(engine, gt, id) {
+ struct i915_request *client[3] = {};
+ struct igt_live_test t;
+ int i;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ if (IS_GEN(gt->i915, 8) && engine->class != RENDER_CLASS)
+ continue; /* we need per-context GPR */
+
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
+ err = -EIO;
+ break;
+ }
+
+ memset(result, 0, 4096);
+
+ for (i = 0; i < ARRAY_SIZE(client); i++) {
+ struct i915_request *rq;
+
+ rq = create_gpr_client(engine, global,
+ NUM_GPR * i * sizeof(u32));
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto end_test;
+ }
+
+ client[i] = rq;
+ }
+
+ /* Continuously preempt the set of 3 running contexts */
+ for (i = 1; i <= NUM_GPR; i++) {
+ err = preempt_user(engine, global, i);
+ if (err)
+ goto end_test;
+ }
+
+ if (READ_ONCE(result[0]) != NUM_GPR) {
+ pr_err("%s: Failed to release semaphore\n",
+ engine->name);
+ err = -EIO;
+ goto end_test;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(client); i++) {
+ int gpr;
+
+ if (i915_request_wait(client[i], 0, HZ / 2) < 0) {
+ err = -ETIME;
+ goto end_test;
+ }
+
+ for (gpr = 1; gpr < NUM_GPR; gpr++) {
+ if (result[NUM_GPR * i + gpr] != 1) {
+ pr_err("%s: Invalid result, client %d, gpr %d, result: %d\n",
+ engine->name,
+ i, gpr, result[NUM_GPR * i + gpr]);
+ err = -EINVAL;
+ goto end_test;
+ }
+ }
+ }
+
+end_test:
+ for (i = 0; i < ARRAY_SIZE(client); i++) {
+ if (!client[i])
+ break;
+
+ i915_request_put(client[i]);
+ }
+
+ /* Flush the semaphores on error */
+ smp_store_mb(result[0], -1);
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ if (err)
+ break;
+ }
+
+ i915_vma_unpin_and_release(&global, I915_VMA_RELEASE_MAP);
+ return err;
+}
+
+static int live_preempt_timeout(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct i915_gem_context *ctx_hi, *ctx_lo;
+ struct igt_spinner spin_lo;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = -ENOMEM;
+
+ /*
+ * Check that we force preemption to occur by cancelling the previous
+ * context if it refuses to yield the GPU.
+ */
+ if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT))
+ return 0;
+
+ if (!intel_has_reset_engine(gt))
+ return 0;
+
+ if (igt_spinner_init(&spin_lo, gt))
+ return -ENOMEM;
+
+ ctx_hi = kernel_context(gt->i915);
+ if (!ctx_hi)
+ goto err_spin_lo;
+ ctx_hi->sched.priority =
+ I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
+
+ ctx_lo = kernel_context(gt->i915);
+ if (!ctx_lo)
+ goto err_ctx_hi;
+ ctx_lo->sched.priority =
+ I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
+
+ for_each_engine(engine, gt, id) {
+ unsigned long saved_timeout;
+ struct i915_request *rq;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ rq = spinner_create_request(&spin_lo, ctx_lo, engine,
+ MI_NOOP); /* preemption disabled */
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_ctx_lo;
+ }
+
+ i915_request_add(rq);
+ if (!igt_wait_for_spinner(&spin_lo, rq)) {
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ goto err_ctx_lo;
+ }
+
+ rq = igt_request_alloc(ctx_hi, engine);
+ if (IS_ERR(rq)) {
+ igt_spinner_end(&spin_lo);
+ err = PTR_ERR(rq);
+ goto err_ctx_lo;
+ }
+
+ /* Flush the previous CS ack before changing timeouts */
+ while (READ_ONCE(engine->execlists.pending[0]))
+ cpu_relax();
+
+ saved_timeout = engine->props.preempt_timeout_ms;
+ engine->props.preempt_timeout_ms = 1; /* in ms, -> 1 jiffie */
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ intel_engine_flush_submission(engine);
+ engine->props.preempt_timeout_ms = saved_timeout;
+
+ if (i915_request_wait(rq, 0, HZ / 10) < 0) {
+ intel_gt_set_wedged(gt);
+ i915_request_put(rq);
+ err = -ETIME;
+ goto err_ctx_lo;
+ }
+
+ igt_spinner_end(&spin_lo);
+ i915_request_put(rq);
+ }
+
+ err = 0;
+err_ctx_lo:
+ kernel_context_close(ctx_lo);
+err_ctx_hi:
+ kernel_context_close(ctx_hi);
+err_spin_lo:
+ igt_spinner_fini(&spin_lo);
+ return err;
+}
+
+static int random_range(struct rnd_state *rnd, int min, int max)
+{
+ return i915_prandom_u32_max_state(max - min, rnd) + min;
+}
+
+static int random_priority(struct rnd_state *rnd)
+{
+ return random_range(rnd, I915_PRIORITY_MIN, I915_PRIORITY_MAX);
+}
+
+struct preempt_smoke {
+ struct intel_gt *gt;
+ struct i915_gem_context **contexts;
+ struct intel_engine_cs *engine;
+ struct drm_i915_gem_object *batch;
+ unsigned int ncontext;
+ struct rnd_state prng;
+ unsigned long count;
+};
+
+static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke)
+{
+ return smoke->contexts[i915_prandom_u32_max_state(smoke->ncontext,
+ &smoke->prng)];
+}
+
+static int smoke_submit(struct preempt_smoke *smoke,
+ struct i915_gem_context *ctx, int prio,
+ struct drm_i915_gem_object *batch)
+{
+ struct i915_request *rq;
+ struct i915_vma *vma = NULL;
+ int err = 0;
+
+ if (batch) {
+ struct i915_address_space *vm;
+
+ vm = i915_gem_context_get_vm_rcu(ctx);
+ vma = i915_vma_instance(batch, vm, NULL);
+ i915_vm_put(vm);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ return err;
+ }
+
+ ctx->sched.priority = prio;
+
+ rq = igt_request_alloc(ctx, smoke->engine);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto unpin;
+ }
+
+ if (vma) {
+ i915_vma_lock(vma);
+ err = i915_request_await_object(rq, vma->obj, false);
+ if (!err)
+ err = i915_vma_move_to_active(vma, rq, 0);
+ if (!err)
+ err = rq->engine->emit_bb_start(rq,
+ vma->node.start,
+ PAGE_SIZE, 0);
+ i915_vma_unlock(vma);
+ }
+
+ i915_request_add(rq);
+
+unpin:
+ if (vma)
+ i915_vma_unpin(vma);
+
+ return err;
+}
+
+static int smoke_crescendo_thread(void *arg)
+{
+ struct preempt_smoke *smoke = arg;
+ IGT_TIMEOUT(end_time);
+ unsigned long count;
+
+ count = 0;
+ do {
+ struct i915_gem_context *ctx = smoke_context(smoke);
+ int err;
+
+ err = smoke_submit(smoke,
+ ctx, count % I915_PRIORITY_MAX,
+ smoke->batch);
+ if (err)
+ return err;
+
+ count++;
+ } while (count < smoke->ncontext && !__igt_timeout(end_time, NULL));
+
+ smoke->count = count;
+ return 0;
+}
+
+static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
+#define BATCH BIT(0)
+{
+ struct task_struct *tsk[I915_NUM_ENGINES] = {};
+ struct preempt_smoke arg[I915_NUM_ENGINES];
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ unsigned long count;
+ int err = 0;
+
+ for_each_engine(engine, smoke->gt, id) {
+ arg[id] = *smoke;
+ arg[id].engine = engine;
+ if (!(flags & BATCH))
+ arg[id].batch = NULL;
+ arg[id].count = 0;
+
+ tsk[id] = kthread_run(smoke_crescendo_thread, &arg,
+ "igt/smoke:%d", id);
+ if (IS_ERR(tsk[id])) {
+ err = PTR_ERR(tsk[id]);
+ break;
+ }
+ get_task_struct(tsk[id]);
+ }
+
+ yield(); /* start all threads before we kthread_stop() */
+
+ count = 0;
+ for_each_engine(engine, smoke->gt, id) {
+ int status;
+
+ if (IS_ERR_OR_NULL(tsk[id]))
+ continue;
+
+ status = kthread_stop(tsk[id]);
+ if (status && !err)
+ err = status;
+
+ count += arg[id].count;
+
+ put_task_struct(tsk[id]);
+ }
+
+ pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
+ count, flags, smoke->gt->info.num_engines, smoke->ncontext);
+ return 0;
+}
+
+static int smoke_random(struct preempt_smoke *smoke, unsigned int flags)
+{
+ enum intel_engine_id id;
+ IGT_TIMEOUT(end_time);
+ unsigned long count;
+
+ count = 0;
+ do {
+ for_each_engine(smoke->engine, smoke->gt, id) {
+ struct i915_gem_context *ctx = smoke_context(smoke);
+ int err;
+
+ err = smoke_submit(smoke,
+ ctx, random_priority(&smoke->prng),
+ flags & BATCH ? smoke->batch : NULL);
+ if (err)
+ return err;
+
+ count++;
+ }
+ } while (count < smoke->ncontext && !__igt_timeout(end_time, NULL));
+
+ pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n",
+ count, flags, smoke->gt->info.num_engines, smoke->ncontext);
+ return 0;
+}
+
+static int live_preempt_smoke(void *arg)
+{
+ struct preempt_smoke smoke = {
+ .gt = arg,
+ .prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed),
+ .ncontext = 256,
+ };
+ const unsigned int phase[] = { 0, BATCH };
+ struct igt_live_test t;
+ int err = -ENOMEM;
+ u32 *cs;
+ int n;
+
+ smoke.contexts = kmalloc_array(smoke.ncontext,
+ sizeof(*smoke.contexts),
+ GFP_KERNEL);
+ if (!smoke.contexts)
+ return -ENOMEM;
+
+ smoke.batch =
+ i915_gem_object_create_internal(smoke.gt->i915, PAGE_SIZE);
+ if (IS_ERR(smoke.batch)) {
+ err = PTR_ERR(smoke.batch);
+ goto err_free;
+ }
+
+ cs = i915_gem_object_pin_map(smoke.batch, I915_MAP_WB);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err_batch;
+ }
+ for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++)
+ cs[n] = MI_ARB_CHECK;
+ cs[n] = MI_BATCH_BUFFER_END;
+ i915_gem_object_flush_map(smoke.batch);
+ i915_gem_object_unpin_map(smoke.batch);
+
+ if (igt_live_test_begin(&t, smoke.gt->i915, __func__, "all")) {
+ err = -EIO;
+ goto err_batch;
+ }
+
+ for (n = 0; n < smoke.ncontext; n++) {
+ smoke.contexts[n] = kernel_context(smoke.gt->i915);
+ if (!smoke.contexts[n])
+ goto err_ctx;
+ }
+
+ for (n = 0; n < ARRAY_SIZE(phase); n++) {
+ err = smoke_crescendo(&smoke, phase[n]);
+ if (err)
+ goto err_ctx;
+
+ err = smoke_random(&smoke, phase[n]);
+ if (err)
+ goto err_ctx;
+ }
+
+err_ctx:
+ if (igt_live_test_end(&t))
+ err = -EIO;
+
+ for (n = 0; n < smoke.ncontext; n++) {
+ if (!smoke.contexts[n])
+ break;
+ kernel_context_close(smoke.contexts[n]);
+ }
+
+err_batch:
+ i915_gem_object_put(smoke.batch);
+err_free:
+ kfree(smoke.contexts);
+
+ return err;
+}
+
+static int nop_virtual_engine(struct intel_gt *gt,
+ struct intel_engine_cs **siblings,
+ unsigned int nsibling,
+ unsigned int nctx,
+ unsigned int flags)
+#define CHAIN BIT(0)
+{
+ IGT_TIMEOUT(end_time);
+ struct i915_request *request[16] = {};
+ struct intel_context *ve[16];
+ unsigned long n, prime, nc;
+ struct igt_live_test t;
+ ktime_t times[2] = {};
+ int err;
+
+ GEM_BUG_ON(!nctx || nctx > ARRAY_SIZE(ve));
+
+ for (n = 0; n < nctx; n++) {
+ ve[n] = intel_execlists_create_virtual(siblings, nsibling);
+ if (IS_ERR(ve[n])) {
+ err = PTR_ERR(ve[n]);
+ nctx = n;
+ goto out;
+ }
+
+ err = intel_context_pin(ve[n]);
+ if (err) {
+ intel_context_put(ve[n]);
+ nctx = n;
+ goto out;
+ }
+ }
+
+ err = igt_live_test_begin(&t, gt->i915, __func__, ve[0]->engine->name);
+ if (err)
+ goto out;
+
+ for_each_prime_number_from(prime, 1, 8192) {
+ times[1] = ktime_get_raw();
+
+ if (flags & CHAIN) {
+ for (nc = 0; nc < nctx; nc++) {
+ for (n = 0; n < prime; n++) {
+ struct i915_request *rq;
+
+ rq = i915_request_create(ve[nc]);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ if (request[nc])
+ i915_request_put(request[nc]);
+ request[nc] = i915_request_get(rq);
+ i915_request_add(rq);
+ }
+ }
+ } else {
+ for (n = 0; n < prime; n++) {
+ for (nc = 0; nc < nctx; nc++) {
+ struct i915_request *rq;
+
+ rq = i915_request_create(ve[nc]);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ if (request[nc])
+ i915_request_put(request[nc]);
+ request[nc] = i915_request_get(rq);
+ i915_request_add(rq);
+ }
+ }
+ }
+
+ for (nc = 0; nc < nctx; nc++) {
+ if (i915_request_wait(request[nc], 0, HZ / 10) < 0) {
+ pr_err("%s(%s): wait for %llx:%lld timed out\n",
+ __func__, ve[0]->engine->name,
+ request[nc]->fence.context,
+ request[nc]->fence.seqno);
+
+ GEM_TRACE("%s(%s) failed at request %llx:%lld\n",
+ __func__, ve[0]->engine->name,
+ request[nc]->fence.context,
+ request[nc]->fence.seqno);
+ GEM_TRACE_DUMP();
+ intel_gt_set_wedged(gt);
+ break;
+ }
+ }
+
+ times[1] = ktime_sub(ktime_get_raw(), times[1]);
+ if (prime == 1)
+ times[0] = times[1];
+
+ for (nc = 0; nc < nctx; nc++) {
+ i915_request_put(request[nc]);
+ request[nc] = NULL;
+ }
+
+ if (__igt_timeout(end_time, NULL))
+ break;
+ }
+
+ err = igt_live_test_end(&t);
+ if (err)
+ goto out;
+
+ pr_info("Requestx%d latencies on %s: 1 = %lluns, %lu = %lluns\n",
+ nctx, ve[0]->engine->name, ktime_to_ns(times[0]),
+ prime, div64_u64(ktime_to_ns(times[1]), prime));
+
+out:
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+
+ for (nc = 0; nc < nctx; nc++) {
+ i915_request_put(request[nc]);
+ intel_context_unpin(ve[nc]);
+ intel_context_put(ve[nc]);
+ }
+ return err;
+}
+
+static unsigned int
+__select_siblings(struct intel_gt *gt,
+ unsigned int class,
+ struct intel_engine_cs **siblings,
+ bool (*filter)(const struct intel_engine_cs *))
+{
+ unsigned int n = 0;
+ unsigned int inst;
+
+ for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
+ if (!gt->engine_class[class][inst])
+ continue;
+
+ if (filter && !filter(gt->engine_class[class][inst]))
+ continue;
+
+ siblings[n++] = gt->engine_class[class][inst];
+ }
+
+ return n;
+}
+
+static unsigned int
+select_siblings(struct intel_gt *gt,
+ unsigned int class,
+ struct intel_engine_cs **siblings)
+{
+ return __select_siblings(gt, class, siblings, NULL);
+}
+
+static int live_virtual_engine(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ unsigned int class;
+ int err;
+
+ if (intel_uc_uses_guc_submission(&gt->uc))
+ return 0;
+
+ for_each_engine(engine, gt, id) {
+ err = nop_virtual_engine(gt, &engine, 1, 1, 0);
+ if (err) {
+ pr_err("Failed to wrap engine %s: err=%d\n",
+ engine->name, err);
+ return err;
+ }
+ }
+
+ for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
+ int nsibling, n;
+
+ nsibling = select_siblings(gt, class, siblings);
+ if (nsibling < 2)
+ continue;
+
+ for (n = 1; n <= nsibling + 1; n++) {
+ err = nop_virtual_engine(gt, siblings, nsibling,
+ n, 0);
+ if (err)
+ return err;
+ }
+
+ err = nop_virtual_engine(gt, siblings, nsibling, n, CHAIN);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int mask_virtual_engine(struct intel_gt *gt,
+ struct intel_engine_cs **siblings,
+ unsigned int nsibling)
+{
+ struct i915_request *request[MAX_ENGINE_INSTANCE + 1];
+ struct intel_context *ve;
+ struct igt_live_test t;
+ unsigned int n;
+ int err;
+
+ /*
+ * Check that by setting the execution mask on a request, we can
+ * restrict it to our desired engine within the virtual engine.
+ */
+
+ ve = intel_execlists_create_virtual(siblings, nsibling);
+ if (IS_ERR(ve)) {
+ err = PTR_ERR(ve);
+ goto out_close;
+ }
+
+ err = intel_context_pin(ve);
+ if (err)
+ goto out_put;
+
+ err = igt_live_test_begin(&t, gt->i915, __func__, ve->engine->name);
+ if (err)
+ goto out_unpin;
+
+ for (n = 0; n < nsibling; n++) {
+ request[n] = i915_request_create(ve);
+ if (IS_ERR(request[n])) {
+ err = PTR_ERR(request[n]);
+ nsibling = n;
+ goto out;
+ }
+
+ /* Reverse order as it's more likely to be unnatural */
+ request[n]->execution_mask = siblings[nsibling - n - 1]->mask;
+
+ i915_request_get(request[n]);
+ i915_request_add(request[n]);
+ }
+
+ for (n = 0; n < nsibling; n++) {
+ if (i915_request_wait(request[n], 0, HZ / 10) < 0) {
+ pr_err("%s(%s): wait for %llx:%lld timed out\n",
+ __func__, ve->engine->name,
+ request[n]->fence.context,
+ request[n]->fence.seqno);
+
+ GEM_TRACE("%s(%s) failed at request %llx:%lld\n",
+ __func__, ve->engine->name,
+ request[n]->fence.context,
+ request[n]->fence.seqno);
+ GEM_TRACE_DUMP();
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ goto out;
+ }
+
+ if (request[n]->engine != siblings[nsibling - n - 1]) {
+ pr_err("Executed on wrong sibling '%s', expected '%s'\n",
+ request[n]->engine->name,
+ siblings[nsibling - n - 1]->name);
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+ err = igt_live_test_end(&t);
+out:
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+
+ for (n = 0; n < nsibling; n++)
+ i915_request_put(request[n]);
+
+out_unpin:
+ intel_context_unpin(ve);
+out_put:
+ intel_context_put(ve);
+out_close:
+ return err;
+}
+
+static int live_virtual_mask(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
+ unsigned int class;
+ int err;
+
+ if (intel_uc_uses_guc_submission(&gt->uc))
+ return 0;
+
+ for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
+ unsigned int nsibling;
+
+ nsibling = select_siblings(gt, class, siblings);
+ if (nsibling < 2)
+ continue;
+
+ err = mask_virtual_engine(gt, siblings, nsibling);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int slicein_virtual_engine(struct intel_gt *gt,
+ struct intel_engine_cs **siblings,
+ unsigned int nsibling)
+{
+ const long timeout = slice_timeout(siblings[0]);
+ struct intel_context *ce;
+ struct i915_request *rq;
+ struct igt_spinner spin;
+ unsigned int n;
+ int err = 0;
+
+ /*
+ * Virtual requests must take part in timeslicing on the target engines.
+ */
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ for (n = 0; n < nsibling; n++) {
+ ce = intel_context_create(siblings[n]);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out;
+ }
+
+ rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ i915_request_add(rq);
+ }
+
+ ce = intel_execlists_create_virtual(siblings, nsibling);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out;
+ }
+
+ rq = intel_context_create_request(ce);
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (i915_request_wait(rq, 0, timeout) < 0) {
+ GEM_TRACE_ERR("%s(%s) failed to slice in virtual request\n",
+ __func__, rq->engine->name);
+ GEM_TRACE_DUMP();
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ }
+ i915_request_put(rq);
+
+out:
+ igt_spinner_end(&spin);
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+ igt_spinner_fini(&spin);
+ return err;
+}
+
+static int sliceout_virtual_engine(struct intel_gt *gt,
+ struct intel_engine_cs **siblings,
+ unsigned int nsibling)
+{
+ const long timeout = slice_timeout(siblings[0]);
+ struct intel_context *ce;
+ struct i915_request *rq;
+ struct igt_spinner spin;
+ unsigned int n;
+ int err = 0;
+
+ /*
+ * Virtual requests must allow others a fair timeslice.
+ */
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ /* XXX We do not handle oversubscription and fairness with normal rq */
+ for (n = 0; n < nsibling; n++) {
+ ce = intel_execlists_create_virtual(siblings, nsibling);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out;
+ }
+
+ rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ i915_request_add(rq);
+ }
+
+ for (n = 0; !err && n < nsibling; n++) {
+ ce = intel_context_create(siblings[n]);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out;
+ }
+
+ rq = intel_context_create_request(ce);
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (i915_request_wait(rq, 0, timeout) < 0) {
+ GEM_TRACE_ERR("%s(%s) failed to slice out virtual request\n",
+ __func__, siblings[n]->name);
+ GEM_TRACE_DUMP();
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ }
+ i915_request_put(rq);
+ }
+
+out:
+ igt_spinner_end(&spin);
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+ igt_spinner_fini(&spin);
+ return err;
+}
+
+static int live_virtual_slice(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
+ unsigned int class;
+ int err;
+
+ if (intel_uc_uses_guc_submission(&gt->uc))
+ return 0;
+
+ for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
+ unsigned int nsibling;
+
+ nsibling = __select_siblings(gt, class, siblings,
+ intel_engine_has_timeslices);
+ if (nsibling < 2)
+ continue;
+
+ err = slicein_virtual_engine(gt, siblings, nsibling);
+ if (err)
+ return err;
+
+ err = sliceout_virtual_engine(gt, siblings, nsibling);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int preserved_virtual_engine(struct intel_gt *gt,
+ struct intel_engine_cs **siblings,
+ unsigned int nsibling)
+{
+ struct i915_request *last = NULL;
+ struct intel_context *ve;
+ struct i915_vma *scratch;
+ struct igt_live_test t;
+ unsigned int n;
+ int err = 0;
+ u32 *cs;
+
+ scratch = __vm_create_scratch_for_read(&siblings[0]->gt->ggtt->vm,
+ PAGE_SIZE);
+ if (IS_ERR(scratch))
+ return PTR_ERR(scratch);
+
+ err = i915_vma_sync(scratch);
+ if (err)
+ goto out_scratch;
+
+ ve = intel_execlists_create_virtual(siblings, nsibling);
+ if (IS_ERR(ve)) {
+ err = PTR_ERR(ve);
+ goto out_scratch;
+ }
+
+ err = intel_context_pin(ve);
+ if (err)
+ goto out_put;
+
+ err = igt_live_test_begin(&t, gt->i915, __func__, ve->engine->name);
+ if (err)
+ goto out_unpin;
+
+ for (n = 0; n < NUM_GPR_DW; n++) {
+ struct intel_engine_cs *engine = siblings[n % nsibling];
+ struct i915_request *rq;
+
+ rq = i915_request_create(ve);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_end;
+ }
+
+ i915_request_put(last);
+ last = i915_request_get(rq);
+
+ cs = intel_ring_begin(rq, 8);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ err = PTR_ERR(cs);
+ goto out_end;
+ }
+
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+ *cs++ = CS_GPR(engine, n);
+ *cs++ = i915_ggtt_offset(scratch) + n * sizeof(u32);
+ *cs++ = 0;
+
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = CS_GPR(engine, (n + 1) % NUM_GPR_DW);
+ *cs++ = n + 1;
+
+ *cs++ = MI_NOOP;
+ intel_ring_advance(rq, cs);
+
+ /* Restrict this request to run on a particular engine */
+ rq->execution_mask = engine->mask;
+ i915_request_add(rq);
+ }
+
+ if (i915_request_wait(last, 0, HZ / 5) < 0) {
+ err = -ETIME;
+ goto out_end;
+ }
+
+ cs = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto out_end;
+ }
+
+ for (n = 0; n < NUM_GPR_DW; n++) {
+ if (cs[n] != n) {
+ pr_err("Incorrect value[%d] found for GPR[%d]\n",
+ cs[n], n);
+ err = -EINVAL;
+ break;
+ }
+ }
+
+ i915_gem_object_unpin_map(scratch->obj);
+
+out_end:
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ i915_request_put(last);
+out_unpin:
+ intel_context_unpin(ve);
+out_put:
+ intel_context_put(ve);
+out_scratch:
+ i915_vma_unpin_and_release(&scratch, 0);
+ return err;
+}
+
+static int live_virtual_preserved(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
+ unsigned int class;
+
+ /*
+ * Check that the context image retains non-privileged (user) registers
+ * from one engine to the next. For this we check that the CS_GPR
+ * are preserved.
+ */
+
+ if (intel_uc_uses_guc_submission(&gt->uc))
+ return 0;
+
+ /* As we use CS_GPR we cannot run before they existed on all engines. */
+ if (INTEL_GEN(gt->i915) < 9)
+ return 0;
+
+ for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
+ int nsibling, err;
+
+ nsibling = select_siblings(gt, class, siblings);
+ if (nsibling < 2)
+ continue;
+
+ err = preserved_virtual_engine(gt, siblings, nsibling);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int bond_virtual_engine(struct intel_gt *gt,
+ unsigned int class,
+ struct intel_engine_cs **siblings,
+ unsigned int nsibling,
+ unsigned int flags)
+#define BOND_SCHEDULE BIT(0)
+{
+ struct intel_engine_cs *master;
+ struct i915_request *rq[16];
+ enum intel_engine_id id;
+ struct igt_spinner spin;
+ unsigned long n;
+ int err;
+
+ /*
+ * A set of bonded requests is intended to be run concurrently
+ * across a number of engines. We use one request per-engine
+ * and a magic fence to schedule each of the bonded requests
+ * at the same time. A consequence of our current scheduler is that
+ * we only move requests to the HW ready queue when the request
+ * becomes ready, that is when all of its prerequisite fences have
+ * been signaled. As one of those fences is the master submit fence,
+ * there is a delay on all secondary fences as the HW may be
+ * currently busy. Equally, as all the requests are independent,
+ * they may have other fences that delay individual request
+ * submission to HW. Ergo, we do not guarantee that all requests are
+ * immediately submitted to HW at the same time, just that if the
+ * rules are abided by, they are ready at the same time as the
+ * first is submitted. Userspace can embed semaphores in its batch
+ * to ensure parallel execution of its phases as it requires.
+ * Though naturally it gets requested that perhaps the scheduler should
+ * take care of parallel execution, even across preemption events on
+ * different HW. (The proper answer is of course "lalalala".)
+ *
+ * With the submit-fence, we have identified three possible phases
+ * of synchronisation depending on the master fence: queued (not
+ * ready), executing, and signaled. The first two are quite simple
+ * and checked below. However, the signaled master fence handling is
+ * contentious. Currently we do not distinguish between a signaled
+ * fence and an expired fence, as once signaled it does not convey
+ * any information about the previous execution. It may even be freed
+ * and hence checking later it may not exist at all. Ergo we currently
+ * do not apply the bonding constraint for an already signaled fence,
+ * as our expectation is that it should not constrain the secondaries
+ * and is outside of the scope of the bonded request API (i.e. all
+ * userspace requests are meant to be running in parallel). As
+ * it imposes no constraint, and is effectively a no-op, we do not
+ * check below as normal execution flows are checked extensively above.
+ *
+ * XXX Is the degenerate handling of signaled submit fences the
+ * expected behaviour for userpace?
+ */
+
+ GEM_BUG_ON(nsibling >= ARRAY_SIZE(rq) - 1);
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ err = 0;
+ rq[0] = ERR_PTR(-ENOMEM);
+ for_each_engine(master, gt, id) {
+ struct i915_sw_fence fence = {};
+ struct intel_context *ce;
+
+ if (master->class == class)
+ continue;
+
+ ce = intel_context_create(master);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out;
+ }
+
+ memset_p((void *)rq, ERR_PTR(-EINVAL), ARRAY_SIZE(rq));
+
+ rq[0] = igt_spinner_create_request(&spin, ce, MI_NOOP);
+ intel_context_put(ce);
+ if (IS_ERR(rq[0])) {
+ err = PTR_ERR(rq[0]);
+ goto out;
+ }
+ i915_request_get(rq[0]);
+
+ if (flags & BOND_SCHEDULE) {
+ onstack_fence_init(&fence);
+ err = i915_sw_fence_await_sw_fence_gfp(&rq[0]->submit,
+ &fence,
+ GFP_KERNEL);
+ }
+
+ i915_request_add(rq[0]);
+ if (err < 0)
+ goto out;
+
+ if (!(flags & BOND_SCHEDULE) &&
+ !igt_wait_for_spinner(&spin, rq[0])) {
+ err = -EIO;
+ goto out;
+ }
+
+ for (n = 0; n < nsibling; n++) {
+ struct intel_context *ve;
+
+ ve = intel_execlists_create_virtual(siblings, nsibling);
+ if (IS_ERR(ve)) {
+ err = PTR_ERR(ve);
+ onstack_fence_fini(&fence);
+ goto out;
+ }
+
+ err = intel_virtual_engine_attach_bond(ve->engine,
+ master,
+ siblings[n]);
+ if (err) {
+ intel_context_put(ve);
+ onstack_fence_fini(&fence);
+ goto out;
+ }
+
+ err = intel_context_pin(ve);
+ intel_context_put(ve);
+ if (err) {
+ onstack_fence_fini(&fence);
+ goto out;
+ }
+
+ rq[n + 1] = i915_request_create(ve);
+ intel_context_unpin(ve);
+ if (IS_ERR(rq[n + 1])) {
+ err = PTR_ERR(rq[n + 1]);
+ onstack_fence_fini(&fence);
+ goto out;
+ }
+ i915_request_get(rq[n + 1]);
+
+ err = i915_request_await_execution(rq[n + 1],
+ &rq[0]->fence,
+ ve->engine->bond_execute);
+ i915_request_add(rq[n + 1]);
+ if (err < 0) {
+ onstack_fence_fini(&fence);
+ goto out;
+ }
+ }
+ onstack_fence_fini(&fence);
+ intel_engine_flush_submission(master);
+ igt_spinner_end(&spin);
+
+ if (i915_request_wait(rq[0], 0, HZ / 10) < 0) {
+ pr_err("Master request did not execute (on %s)!\n",
+ rq[0]->engine->name);
+ err = -EIO;
+ goto out;
+ }
+
+ for (n = 0; n < nsibling; n++) {
+ if (i915_request_wait(rq[n + 1], 0,
+ MAX_SCHEDULE_TIMEOUT) < 0) {
+ err = -EIO;
+ goto out;
+ }
+
+ if (rq[n + 1]->engine != siblings[n]) {
+ pr_err("Bonded request did not execute on target engine: expected %s, used %s; master was %s\n",
+ siblings[n]->name,
+ rq[n + 1]->engine->name,
+ rq[0]->engine->name);
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+ for (n = 0; !IS_ERR(rq[n]); n++)
+ i915_request_put(rq[n]);
+ rq[0] = ERR_PTR(-ENOMEM);
+ }
+
+out:
+ for (n = 0; !IS_ERR(rq[n]); n++)
+ i915_request_put(rq[n]);
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+
+ igt_spinner_fini(&spin);
+ return err;
+}
+
+static int live_virtual_bond(void *arg)
+{
+ static const struct phase {
+ const char *name;
+ unsigned int flags;
+ } phases[] = {
+ { "", 0 },
+ { "schedule", BOND_SCHEDULE },
+ { },
+ };
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
+ unsigned int class;
+ int err;
+
+ if (intel_uc_uses_guc_submission(&gt->uc))
+ return 0;
+
+ for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
+ const struct phase *p;
+ int nsibling;
+
+ nsibling = select_siblings(gt, class, siblings);
+ if (nsibling < 2)
+ continue;
+
+ for (p = phases; p->name; p++) {
+ err = bond_virtual_engine(gt,
+ class, siblings, nsibling,
+ p->flags);
+ if (err) {
+ pr_err("%s(%s): failed class=%d, nsibling=%d, err=%d\n",
+ __func__, p->name, class, nsibling, err);
+ return err;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int reset_virtual_engine(struct intel_gt *gt,
+ struct intel_engine_cs **siblings,
+ unsigned int nsibling)
+{
+ struct intel_engine_cs *engine;
+ struct intel_context *ve;
+ struct igt_spinner spin;
+ struct i915_request *rq;
+ unsigned int n;
+ int err = 0;
+
+ /*
+ * In order to support offline error capture for fast preempt reset,
+ * we need to decouple the guilty request and ensure that it and its
+ * descendents are not executed while the capture is in progress.
+ */
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ ve = intel_execlists_create_virtual(siblings, nsibling);
+ if (IS_ERR(ve)) {
+ err = PTR_ERR(ve);
+ goto out_spin;
+ }
+
+ for (n = 0; n < nsibling; n++)
+ st_engine_heartbeat_disable(siblings[n]);
+
+ rq = igt_spinner_create_request(&spin, ve, MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_heartbeat;
+ }
+ i915_request_add(rq);
+
+ if (!igt_wait_for_spinner(&spin, rq)) {
+ intel_gt_set_wedged(gt);
+ err = -ETIME;
+ goto out_heartbeat;
+ }
+
+ engine = rq->engine;
+ GEM_BUG_ON(engine == ve->engine);
+
+ /* Take ownership of the reset and tasklet */
+ local_bh_disable();
+ if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
+ &gt->reset.flags)) {
+ local_bh_enable();
+ intel_gt_set_wedged(gt);
+ err = -EBUSY;
+ goto out_heartbeat;
+ }
+ tasklet_disable(&engine->execlists.tasklet);
+
+ engine->execlists.tasklet.func(engine->execlists.tasklet.data);
+ GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
+
+ /* Fake a preemption event; failed of course */
+ spin_lock_irq(&engine->active.lock);
+ __unwind_incomplete_requests(engine);
+ spin_unlock_irq(&engine->active.lock);
+ GEM_BUG_ON(rq->engine != engine);
+
+ /* Reset the engine while keeping our active request on hold */
+ execlists_hold(engine, rq);
+ GEM_BUG_ON(!i915_request_on_hold(rq));
+
+ __intel_engine_reset_bh(engine, NULL);
+ GEM_BUG_ON(rq->fence.error != -EIO);
+
+ /* Release our grasp on the engine, letting CS flow again */
+ tasklet_enable(&engine->execlists.tasklet);
+ clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id, &gt->reset.flags);
+ local_bh_enable();
+
+ /* Check that we do not resubmit the held request */
+ i915_request_get(rq);
+ if (!i915_request_wait(rq, 0, HZ / 5)) {
+ pr_err("%s: on hold request completed!\n",
+ engine->name);
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ goto out_rq;
+ }
+ GEM_BUG_ON(!i915_request_on_hold(rq));
+
+ /* But is resubmitted on release */
+ execlists_unhold(engine, rq);
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ pr_err("%s: held request did not complete!\n",
+ engine->name);
+ intel_gt_set_wedged(gt);
+ err = -ETIME;
+ }
+
+out_rq:
+ i915_request_put(rq);
+out_heartbeat:
+ for (n = 0; n < nsibling; n++)
+ st_engine_heartbeat_enable(siblings[n]);
+
+ intel_context_put(ve);
+out_spin:
+ igt_spinner_fini(&spin);
+ return err;
+}
+
+static int live_virtual_reset(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
+ unsigned int class;
+
+ /*
+ * Check that we handle a reset event within a virtual engine.
+ * Only the physical engine is reset, but we have to check the flow
+ * of the virtual requests around the reset, and make sure it is not
+ * forgotten.
+ */
+
+ if (intel_uc_uses_guc_submission(&gt->uc))
+ return 0;
+
+ if (!intel_has_reset_engine(gt))
+ return 0;
+
+ for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
+ int nsibling, err;
+
+ nsibling = select_siblings(gt, class, siblings);
+ if (nsibling < 2)
+ continue;
+
+ err = reset_virtual_engine(gt, siblings, nsibling);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+int intel_execlists_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(live_sanitycheck),
+ SUBTEST(live_unlite_switch),
+ SUBTEST(live_unlite_preempt),
+ SUBTEST(live_unlite_ring),
+ SUBTEST(live_pin_rewind),
+ SUBTEST(live_hold_reset),
+ SUBTEST(live_error_interrupt),
+ SUBTEST(live_timeslice_preempt),
+ SUBTEST(live_timeslice_rewind),
+ SUBTEST(live_timeslice_queue),
+ SUBTEST(live_timeslice_nopreempt),
+ SUBTEST(live_busywait_preempt),
+ SUBTEST(live_preempt),
+ SUBTEST(live_late_preempt),
+ SUBTEST(live_nopreempt),
+ SUBTEST(live_preempt_cancel),
+ SUBTEST(live_suppress_self_preempt),
+ SUBTEST(live_chain_preempt),
+ SUBTEST(live_preempt_ring),
+ SUBTEST(live_preempt_gang),
+ SUBTEST(live_preempt_timeout),
+ SUBTEST(live_preempt_user),
+ SUBTEST(live_preempt_smoke),
+ SUBTEST(live_virtual_engine),
+ SUBTEST(live_virtual_mask),
+ SUBTEST(live_virtual_preserved),
+ SUBTEST(live_virtual_slice),
+ SUBTEST(live_virtual_bond),
+ SUBTEST(live_virtual_reset),
+ };
+
+ if (!HAS_EXECLISTS(i915))
+ return 0;
+
+ if (intel_gt_is_wedged(&i915->gt))
+ return 0;
+
+ return intel_gt_live_subtests(tests, &i915->gt);
+}
diff --git a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
index 6180a47c1b51..5d911f724ebe 100644
--- a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
@@ -71,7 +71,7 @@ static int live_gt_clocks(void *arg)
enum intel_engine_id id;
int err = 0;
- if (!RUNTIME_INFO(gt->i915)->cs_timestamp_frequency_hz) { /* unknown */
+ if (!gt->clock_frequency) { /* unknown */
pr_info("CS_TIMESTAMP frequency unknown\n");
return 0;
}
@@ -112,12 +112,12 @@ static int live_gt_clocks(void *arg)
measure_clocks(engine, &cycles, &dt);
- time = i915_cs_timestamp_ticks_to_ns(engine->i915, cycles);
- expected = i915_cs_timestamp_ns_to_ticks(engine->i915, dt);
+ time = intel_gt_clock_interval_to_ns(engine->gt, cycles);
+ expected = intel_gt_ns_to_clock_interval(engine->gt, dt);
pr_info("%s: TIMESTAMP %d cycles [%lldns] in %lldns [%d cycles], using CS clock frequency of %uKHz\n",
engine->name, cycles, time, dt, expected,
- RUNTIME_INFO(engine->i915)->cs_timestamp_frequency_hz / 1000);
+ engine->gt->clock_frequency / 1000);
if (9 * time < 8 * dt || 8 * time > 9 * dt) {
pr_err("%s: CS ticks did not match walltime!\n",
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index fb5ebf930ab2..463bb6a700c8 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -506,7 +506,8 @@ static int igt_reset_nop_engine(void *arg)
}
err = intel_engine_reset(engine, NULL);
if (err) {
- pr_err("i915_reset_engine failed\n");
+ pr_err("intel_engine_reset(%s) failed, err:%d\n",
+ engine->name, err);
break;
}
@@ -539,6 +540,149 @@ static int igt_reset_nop_engine(void *arg)
return 0;
}
+static void force_reset_timeout(struct intel_engine_cs *engine)
+{
+ engine->reset_timeout.probability = 999;
+ atomic_set(&engine->reset_timeout.times, -1);
+}
+
+static void cancel_reset_timeout(struct intel_engine_cs *engine)
+{
+ memset(&engine->reset_timeout, 0, sizeof(engine->reset_timeout));
+}
+
+static int igt_reset_fail_engine(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /* Check that we can recover from engine-reset failues */
+
+ if (!intel_has_reset_engine(gt))
+ return 0;
+
+ for_each_engine(engine, gt, id) {
+ unsigned int count;
+ struct intel_context *ce;
+ IGT_TIMEOUT(end_time);
+ int err;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ st_engine_heartbeat_disable(engine);
+ set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
+
+ force_reset_timeout(engine);
+ err = intel_engine_reset(engine, NULL);
+ cancel_reset_timeout(engine);
+ if (err == 0) /* timeouts only generated on gen8+ */
+ goto skip;
+
+ count = 0;
+ do {
+ struct i915_request *last = NULL;
+ int i;
+
+ if (!wait_for_idle(engine)) {
+ pr_err("%s failed to idle before reset\n",
+ engine->name);
+ err = -EIO;
+ break;
+ }
+
+ for (i = 0; i < count % 15; i++) {
+ struct i915_request *rq;
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq)) {
+ struct drm_printer p =
+ drm_info_printer(gt->i915->drm.dev);
+ intel_engine_dump(engine, &p,
+ "%s(%s): failed to submit request\n",
+ __func__,
+ engine->name);
+
+ GEM_TRACE("%s(%s): failed to submit request\n",
+ __func__,
+ engine->name);
+ GEM_TRACE_DUMP();
+
+ intel_gt_set_wedged(gt);
+ if (last)
+ i915_request_put(last);
+
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ if (last)
+ i915_request_put(last);
+ last = i915_request_get(rq);
+ i915_request_add(rq);
+ }
+
+ if (count & 1) {
+ err = intel_engine_reset(engine, NULL);
+ if (err) {
+ GEM_TRACE_ERR("intel_engine_reset(%s) failed, err:%d\n",
+ engine->name, err);
+ GEM_TRACE_DUMP();
+ i915_request_put(last);
+ break;
+ }
+ } else {
+ force_reset_timeout(engine);
+ err = intel_engine_reset(engine, NULL);
+ cancel_reset_timeout(engine);
+ if (err != -ETIMEDOUT) {
+ pr_err("intel_engine_reset(%s) did not fail, err:%d\n",
+ engine->name, err);
+ i915_request_put(last);
+ break;
+ }
+ }
+
+ err = 0;
+ if (last) {
+ if (i915_request_wait(last, 0, HZ / 2) < 0) {
+ struct drm_printer p =
+ drm_info_printer(gt->i915->drm.dev);
+
+ intel_engine_dump(engine, &p,
+ "%s(%s): failed to complete request\n",
+ __func__,
+ engine->name);
+
+ GEM_TRACE("%s(%s): failed to complete request\n",
+ __func__,
+ engine->name);
+ GEM_TRACE_DUMP();
+
+ err = -EIO;
+ }
+ i915_request_put(last);
+ }
+ count++;
+ } while (err == 0 && time_before(jiffies, end_time));
+out:
+ pr_info("%s(%s): %d resets\n", __func__, engine->name, count);
+skip:
+ clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
+ st_engine_heartbeat_enable(engine);
+ intel_context_put(ce);
+
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static int __igt_reset_engine(struct intel_gt *gt, bool active)
{
struct i915_gpu_error *global = &gt->i915->gpu_error;
@@ -560,6 +704,7 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
for_each_engine(engine, gt, id) {
unsigned int reset_count, reset_engine_count;
+ unsigned long count;
IGT_TIMEOUT(end_time);
if (active && !intel_engine_can_store_dword(engine))
@@ -577,6 +722,7 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
st_engine_heartbeat_disable(engine);
set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
+ count = 0;
do {
if (active) {
struct i915_request *rq;
@@ -608,7 +754,8 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
err = intel_engine_reset(engine, NULL);
if (err) {
- pr_err("i915_reset_engine failed\n");
+ pr_err("intel_engine_reset(%s) failed, err:%d\n",
+ engine->name, err);
break;
}
@@ -625,9 +772,13 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
err = -EINVAL;
break;
}
+
+ count++;
} while (time_before(jiffies, end_time));
clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
st_engine_heartbeat_enable(engine);
+ pr_info("%s: Completed %lu %s resets\n",
+ engine->name, count, active ? "active" : "idle");
if (err)
break;
@@ -1478,7 +1629,8 @@ static int igt_reset_queue(void *arg)
prev = rq;
count++;
} while (time_before(jiffies, end_time));
- pr_info("%s: Completed %d resets\n", engine->name, count);
+ pr_info("%s: Completed %d queued resets\n",
+ engine->name, count);
*h.batch = MI_BATCH_BUFFER_END;
intel_gt_chipset_flush(engine->gt);
@@ -1575,13 +1727,21 @@ static int __igt_atomic_reset_engine(struct intel_engine_cs *engine,
GEM_TRACE("i915_reset_engine(%s:%s) under %s\n",
engine->name, mode, p->name);
- tasklet_disable(t);
+ if (t->func)
+ tasklet_disable(t);
+ if (strcmp(p->name, "softirq"))
+ local_bh_disable();
p->critical_section_begin();
- err = intel_engine_reset(engine, NULL);
+ err = __intel_engine_reset_bh(engine, NULL);
p->critical_section_end();
- tasklet_enable(t);
+ if (strcmp(p->name, "softirq"))
+ local_bh_enable();
+ if (t->func) {
+ tasklet_enable(t);
+ tasklet_hi_schedule(t);
+ }
if (err)
pr_err("i915_reset_engine(%s:%s) failed under %s\n",
@@ -1687,6 +1847,7 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_reset_nop_engine),
SUBTEST(igt_reset_idle_engine),
SUBTEST(igt_reset_active_engine),
+ SUBTEST(igt_reset_fail_engine),
SUBTEST(igt_reset_engines),
SUBTEST(igt_reset_engines_atomic),
SUBTEST(igt_reset_queue),
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index 95d41c01d0e0..920979a89413 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -1,22 +1,22 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2018 Intel Corporation
*/
#include <linux/prime_numbers.h>
-#include "gem/i915_gem_pm.h"
-#include "gt/intel_engine_heartbeat.h"
-#include "gt/intel_reset.h"
-#include "gt/selftest_engine_heartbeat.h"
-
#include "i915_selftest.h"
+#include "intel_engine_heartbeat.h"
+#include "intel_engine_pm.h"
+#include "intel_reset.h"
+#include "intel_ring.h"
+#include "selftest_engine_heartbeat.h"
#include "selftests/i915_random.h"
#include "selftests/igt_flush_test.h"
#include "selftests/igt_live_test.h"
#include "selftests/igt_spinner.h"
#include "selftests/lib_sw_fence.h"
+#include "shmem_utils.h"
#include "gem/selftests/igt_gem_utils.h"
#include "gem/selftests/mock_context.h"
@@ -27,29 +27,7 @@
static struct i915_vma *create_scratch(struct intel_gt *gt)
{
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
- int err;
-
- obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
- if (IS_ERR(obj))
- return ERR_CAST(obj);
-
- i915_gem_object_set_cache_coherency(obj, I915_CACHING_CACHED);
-
- vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
- if (IS_ERR(vma)) {
- i915_gem_object_put(obj);
- return vma;
- }
-
- err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
- if (err) {
- i915_gem_object_put(obj);
- return ERR_PTR(err);
- }
-
- return vma;
+ return __vm_create_scratch_for_read(&gt->ggtt->vm, PAGE_SIZE);
}
static bool is_active(struct i915_request *rq)
@@ -70,6 +48,9 @@ static int wait_for_submit(struct intel_engine_cs *engine,
struct i915_request *rq,
unsigned long timeout)
{
+ /* Ignore our own attempts to suppress excess tasklets */
+ tasklet_hi_schedule(&engine->execlists.tasklet);
+
timeout += jiffies;
do {
bool done = time_after(jiffies, timeout);
@@ -89,4623 +70,6 @@ static int wait_for_submit(struct intel_engine_cs *engine,
} while (1);
}
-static int wait_for_reset(struct intel_engine_cs *engine,
- struct i915_request *rq,
- unsigned long timeout)
-{
- timeout += jiffies;
-
- do {
- cond_resched();
- intel_engine_flush_submission(engine);
-
- if (READ_ONCE(engine->execlists.pending[0]))
- continue;
-
- if (i915_request_completed(rq))
- break;
-
- if (READ_ONCE(rq->fence.error))
- break;
- } while (time_before(jiffies, timeout));
-
- flush_scheduled_work();
-
- if (rq->fence.error != -EIO) {
- pr_err("%s: hanging request %llx:%lld not reset\n",
- engine->name,
- rq->fence.context,
- rq->fence.seqno);
- return -EINVAL;
- }
-
- /* Give the request a jiffie to complete after flushing the worker */
- if (i915_request_wait(rq, 0,
- max(0l, (long)(timeout - jiffies)) + 1) < 0) {
- pr_err("%s: hanging request %llx:%lld did not complete\n",
- engine->name,
- rq->fence.context,
- rq->fence.seqno);
- return -ETIME;
- }
-
- return 0;
-}
-
-static int live_sanitycheck(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- struct igt_spinner spin;
- int err = 0;
-
- if (!HAS_LOGICAL_RING_CONTEXTS(gt->i915))
- return 0;
-
- if (igt_spinner_init(&spin, gt))
- return -ENOMEM;
-
- for_each_engine(engine, gt, id) {
- struct intel_context *ce;
- struct i915_request *rq;
-
- ce = intel_context_create(engine);
- if (IS_ERR(ce)) {
- err = PTR_ERR(ce);
- break;
- }
-
- rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto out_ctx;
- }
-
- i915_request_add(rq);
- if (!igt_wait_for_spinner(&spin, rq)) {
- GEM_TRACE("spinner failed to start\n");
- GEM_TRACE_DUMP();
- intel_gt_set_wedged(gt);
- err = -EIO;
- goto out_ctx;
- }
-
- igt_spinner_end(&spin);
- if (igt_flush_test(gt->i915)) {
- err = -EIO;
- goto out_ctx;
- }
-
-out_ctx:
- intel_context_put(ce);
- if (err)
- break;
- }
-
- igt_spinner_fini(&spin);
- return err;
-}
-
-static int live_unlite_restore(struct intel_gt *gt, int prio)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- struct igt_spinner spin;
- int err = -ENOMEM;
-
- /*
- * Check that we can correctly context switch between 2 instances
- * on the same engine from the same parent context.
- */
-
- if (igt_spinner_init(&spin, gt))
- return err;
-
- err = 0;
- for_each_engine(engine, gt, id) {
- struct intel_context *ce[2] = {};
- struct i915_request *rq[2];
- struct igt_live_test t;
- int n;
-
- if (prio && !intel_engine_has_preemption(engine))
- continue;
-
- if (!intel_engine_can_store_dword(engine))
- continue;
-
- if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
- err = -EIO;
- break;
- }
- st_engine_heartbeat_disable(engine);
-
- for (n = 0; n < ARRAY_SIZE(ce); n++) {
- struct intel_context *tmp;
-
- tmp = intel_context_create(engine);
- if (IS_ERR(tmp)) {
- err = PTR_ERR(tmp);
- goto err_ce;
- }
-
- err = intel_context_pin(tmp);
- if (err) {
- intel_context_put(tmp);
- goto err_ce;
- }
-
- /*
- * Setup the pair of contexts such that if we
- * lite-restore using the RING_TAIL from ce[1] it
- * will execute garbage from ce[0]->ring.
- */
- memset(tmp->ring->vaddr,
- POISON_INUSE, /* IPEHR: 0x5a5a5a5a [hung!] */
- tmp->ring->vma->size);
-
- ce[n] = tmp;
- }
- GEM_BUG_ON(!ce[1]->ring->size);
- intel_ring_reset(ce[1]->ring, ce[1]->ring->size / 2);
- __execlists_update_reg_state(ce[1], engine, ce[1]->ring->head);
-
- rq[0] = igt_spinner_create_request(&spin, ce[0], MI_ARB_CHECK);
- if (IS_ERR(rq[0])) {
- err = PTR_ERR(rq[0]);
- goto err_ce;
- }
-
- i915_request_get(rq[0]);
- i915_request_add(rq[0]);
- GEM_BUG_ON(rq[0]->postfix > ce[1]->ring->emit);
-
- if (!igt_wait_for_spinner(&spin, rq[0])) {
- i915_request_put(rq[0]);
- goto err_ce;
- }
-
- rq[1] = i915_request_create(ce[1]);
- if (IS_ERR(rq[1])) {
- err = PTR_ERR(rq[1]);
- i915_request_put(rq[0]);
- goto err_ce;
- }
-
- if (!prio) {
- /*
- * Ensure we do the switch to ce[1] on completion.
- *
- * rq[0] is already submitted, so this should reduce
- * to a no-op (a wait on a request on the same engine
- * uses the submit fence, not the completion fence),
- * but it will install a dependency on rq[1] for rq[0]
- * that will prevent the pair being reordered by
- * timeslicing.
- */
- i915_request_await_dma_fence(rq[1], &rq[0]->fence);
- }
-
- i915_request_get(rq[1]);
- i915_request_add(rq[1]);
- GEM_BUG_ON(rq[1]->postfix <= rq[0]->postfix);
- i915_request_put(rq[0]);
-
- if (prio) {
- struct i915_sched_attr attr = {
- .priority = prio,
- };
-
- /* Alternatively preempt the spinner with ce[1] */
- engine->schedule(rq[1], &attr);
- }
-
- /* And switch back to ce[0] for good measure */
- rq[0] = i915_request_create(ce[0]);
- if (IS_ERR(rq[0])) {
- err = PTR_ERR(rq[0]);
- i915_request_put(rq[1]);
- goto err_ce;
- }
-
- i915_request_await_dma_fence(rq[0], &rq[1]->fence);
- i915_request_get(rq[0]);
- i915_request_add(rq[0]);
- GEM_BUG_ON(rq[0]->postfix > rq[1]->postfix);
- i915_request_put(rq[1]);
- i915_request_put(rq[0]);
-
-err_ce:
- intel_engine_flush_submission(engine);
- igt_spinner_end(&spin);
- for (n = 0; n < ARRAY_SIZE(ce); n++) {
- if (IS_ERR_OR_NULL(ce[n]))
- break;
-
- intel_context_unpin(ce[n]);
- intel_context_put(ce[n]);
- }
-
- st_engine_heartbeat_enable(engine);
- if (igt_live_test_end(&t))
- err = -EIO;
- if (err)
- break;
- }
-
- igt_spinner_fini(&spin);
- return err;
-}
-
-static int live_unlite_switch(void *arg)
-{
- return live_unlite_restore(arg, 0);
-}
-
-static int live_unlite_preempt(void *arg)
-{
- return live_unlite_restore(arg, I915_USER_PRIORITY(I915_PRIORITY_MAX));
-}
-
-static int live_unlite_ring(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_engine_cs *engine;
- struct igt_spinner spin;
- enum intel_engine_id id;
- int err = 0;
-
- /*
- * Setup a preemption event that will cause almost the entire ring
- * to be unwound, potentially fooling our intel_ring_direction()
- * into emitting a forward lite-restore instead of the rollback.
- */
-
- if (igt_spinner_init(&spin, gt))
- return -ENOMEM;
-
- for_each_engine(engine, gt, id) {
- struct intel_context *ce[2] = {};
- struct i915_request *rq;
- struct igt_live_test t;
- int n;
-
- if (!intel_engine_has_preemption(engine))
- continue;
-
- if (!intel_engine_can_store_dword(engine))
- continue;
-
- if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
- err = -EIO;
- break;
- }
- st_engine_heartbeat_disable(engine);
-
- for (n = 0; n < ARRAY_SIZE(ce); n++) {
- struct intel_context *tmp;
-
- tmp = intel_context_create(engine);
- if (IS_ERR(tmp)) {
- err = PTR_ERR(tmp);
- goto err_ce;
- }
-
- err = intel_context_pin(tmp);
- if (err) {
- intel_context_put(tmp);
- goto err_ce;
- }
-
- memset32(tmp->ring->vaddr,
- 0xdeadbeef, /* trigger a hang if executed */
- tmp->ring->vma->size / sizeof(u32));
-
- ce[n] = tmp;
- }
-
- /* Create max prio spinner, followed by N low prio nops */
- rq = igt_spinner_create_request(&spin, ce[0], MI_ARB_CHECK);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto err_ce;
- }
-
- i915_request_get(rq);
- rq->sched.attr.priority = I915_PRIORITY_BARRIER;
- i915_request_add(rq);
-
- if (!igt_wait_for_spinner(&spin, rq)) {
- intel_gt_set_wedged(gt);
- i915_request_put(rq);
- err = -ETIME;
- goto err_ce;
- }
-
- /* Fill the ring, until we will cause a wrap */
- n = 0;
- while (intel_ring_direction(ce[0]->ring,
- rq->wa_tail,
- ce[0]->ring->tail) <= 0) {
- struct i915_request *tmp;
-
- tmp = intel_context_create_request(ce[0]);
- if (IS_ERR(tmp)) {
- err = PTR_ERR(tmp);
- i915_request_put(rq);
- goto err_ce;
- }
-
- i915_request_add(tmp);
- intel_engine_flush_submission(engine);
- n++;
- }
- intel_engine_flush_submission(engine);
- pr_debug("%s: Filled ring with %d nop tails {size:%x, tail:%x, emit:%x, rq.tail:%x}\n",
- engine->name, n,
- ce[0]->ring->size,
- ce[0]->ring->tail,
- ce[0]->ring->emit,
- rq->tail);
- GEM_BUG_ON(intel_ring_direction(ce[0]->ring,
- rq->tail,
- ce[0]->ring->tail) <= 0);
- i915_request_put(rq);
-
- /* Create a second ring to preempt the first ring after rq[0] */
- rq = intel_context_create_request(ce[1]);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto err_ce;
- }
-
- rq->sched.attr.priority = I915_PRIORITY_BARRIER;
- i915_request_get(rq);
- i915_request_add(rq);
-
- err = wait_for_submit(engine, rq, HZ / 2);
- i915_request_put(rq);
- if (err) {
- pr_err("%s: preemption request was not submitted\n",
- engine->name);
- err = -ETIME;
- }
-
- pr_debug("%s: ring[0]:{ tail:%x, emit:%x }, ring[1]:{ tail:%x, emit:%x }\n",
- engine->name,
- ce[0]->ring->tail, ce[0]->ring->emit,
- ce[1]->ring->tail, ce[1]->ring->emit);
-
-err_ce:
- intel_engine_flush_submission(engine);
- igt_spinner_end(&spin);
- for (n = 0; n < ARRAY_SIZE(ce); n++) {
- if (IS_ERR_OR_NULL(ce[n]))
- break;
-
- intel_context_unpin(ce[n]);
- intel_context_put(ce[n]);
- }
- st_engine_heartbeat_enable(engine);
- if (igt_live_test_end(&t))
- err = -EIO;
- if (err)
- break;
- }
-
- igt_spinner_fini(&spin);
- return err;
-}
-
-static int live_pin_rewind(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- int err = 0;
-
- /*
- * We have to be careful not to trust intel_ring too much, for example
- * ring->head is updated upon retire which is out of sync with pinning
- * the context. Thus we cannot use ring->head to set CTX_RING_HEAD,
- * or else we risk writing an older, stale value.
- *
- * To simulate this, let's apply a bit of deliberate sabotague.
- */
-
- for_each_engine(engine, gt, id) {
- struct intel_context *ce;
- struct i915_request *rq;
- struct intel_ring *ring;
- struct igt_live_test t;
-
- if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
- err = -EIO;
- break;
- }
-
- ce = intel_context_create(engine);
- if (IS_ERR(ce)) {
- err = PTR_ERR(ce);
- break;
- }
-
- err = intel_context_pin(ce);
- if (err) {
- intel_context_put(ce);
- break;
- }
-
- /* Keep the context awake while we play games */
- err = i915_active_acquire(&ce->active);
- if (err) {
- intel_context_unpin(ce);
- intel_context_put(ce);
- break;
- }
- ring = ce->ring;
-
- /* Poison the ring, and offset the next request from HEAD */
- memset32(ring->vaddr, STACK_MAGIC, ring->size / sizeof(u32));
- ring->emit = ring->size / 2;
- ring->tail = ring->emit;
- GEM_BUG_ON(ring->head);
-
- intel_context_unpin(ce);
-
- /* Submit a simple nop request */
- GEM_BUG_ON(intel_context_is_pinned(ce));
- rq = intel_context_create_request(ce);
- i915_active_release(&ce->active); /* e.g. async retire */
- intel_context_put(ce);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- break;
- }
- GEM_BUG_ON(!rq->head);
- i915_request_add(rq);
-
- /* Expect not to hang! */
- if (igt_live_test_end(&t)) {
- err = -EIO;
- break;
- }
- }
-
- return err;
-}
-
-static int live_hold_reset(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- struct igt_spinner spin;
- int err = 0;
-
- /*
- * In order to support offline error capture for fast preempt reset,
- * we need to decouple the guilty request and ensure that it and its
- * descendents are not executed while the capture is in progress.
- */
-
- if (!intel_has_reset_engine(gt))
- return 0;
-
- if (igt_spinner_init(&spin, gt))
- return -ENOMEM;
-
- for_each_engine(engine, gt, id) {
- struct intel_context *ce;
- struct i915_request *rq;
-
- ce = intel_context_create(engine);
- if (IS_ERR(ce)) {
- err = PTR_ERR(ce);
- break;
- }
-
- st_engine_heartbeat_disable(engine);
-
- rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto out;
- }
- i915_request_add(rq);
-
- if (!igt_wait_for_spinner(&spin, rq)) {
- intel_gt_set_wedged(gt);
- err = -ETIME;
- goto out;
- }
-
- /* We have our request executing, now remove it and reset */
-
- if (test_and_set_bit(I915_RESET_ENGINE + id,
- &gt->reset.flags)) {
- intel_gt_set_wedged(gt);
- err = -EBUSY;
- goto out;
- }
- tasklet_disable(&engine->execlists.tasklet);
-
- engine->execlists.tasklet.func(engine->execlists.tasklet.data);
- GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
-
- i915_request_get(rq);
- execlists_hold(engine, rq);
- GEM_BUG_ON(!i915_request_on_hold(rq));
-
- intel_engine_reset(engine, NULL);
- GEM_BUG_ON(rq->fence.error != -EIO);
-
- tasklet_enable(&engine->execlists.tasklet);
- clear_and_wake_up_bit(I915_RESET_ENGINE + id,
- &gt->reset.flags);
-
- /* Check that we do not resubmit the held request */
- if (!i915_request_wait(rq, 0, HZ / 5)) {
- pr_err("%s: on hold request completed!\n",
- engine->name);
- i915_request_put(rq);
- err = -EIO;
- goto out;
- }
- GEM_BUG_ON(!i915_request_on_hold(rq));
-
- /* But is resubmitted on release */
- execlists_unhold(engine, rq);
- if (i915_request_wait(rq, 0, HZ / 5) < 0) {
- pr_err("%s: held request did not complete!\n",
- engine->name);
- intel_gt_set_wedged(gt);
- err = -ETIME;
- }
- i915_request_put(rq);
-
-out:
- st_engine_heartbeat_enable(engine);
- intel_context_put(ce);
- if (err)
- break;
- }
-
- igt_spinner_fini(&spin);
- return err;
-}
-
-static const char *error_repr(int err)
-{
- return err ? "bad" : "good";
-}
-
-static int live_error_interrupt(void *arg)
-{
- static const struct error_phase {
- enum { GOOD = 0, BAD = -EIO } error[2];
- } phases[] = {
- { { BAD, GOOD } },
- { { BAD, BAD } },
- { { BAD, GOOD } },
- { { GOOD, GOOD } }, /* sentinel */
- };
- struct intel_gt *gt = arg;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- /*
- * We hook up the CS_MASTER_ERROR_INTERRUPT to have forewarning
- * of invalid commands in user batches that will cause a GPU hang.
- * This is a faster mechanism than using hangcheck/heartbeats, but
- * only detects problems the HW knows about -- it will not warn when
- * we kill the HW!
- *
- * To verify our detection and reset, we throw some invalid commands
- * at the HW and wait for the interrupt.
- */
-
- if (!intel_has_reset_engine(gt))
- return 0;
-
- for_each_engine(engine, gt, id) {
- const struct error_phase *p;
- int err = 0;
-
- st_engine_heartbeat_disable(engine);
-
- for (p = phases; p->error[0] != GOOD; p++) {
- struct i915_request *client[ARRAY_SIZE(phases->error)];
- u32 *cs;
- int i;
-
- memset(client, 0, sizeof(*client));
- for (i = 0; i < ARRAY_SIZE(client); i++) {
- struct intel_context *ce;
- struct i915_request *rq;
-
- ce = intel_context_create(engine);
- if (IS_ERR(ce)) {
- err = PTR_ERR(ce);
- goto out;
- }
-
- rq = intel_context_create_request(ce);
- intel_context_put(ce);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto out;
- }
-
- if (rq->engine->emit_init_breadcrumb) {
- err = rq->engine->emit_init_breadcrumb(rq);
- if (err) {
- i915_request_add(rq);
- goto out;
- }
- }
-
- cs = intel_ring_begin(rq, 2);
- if (IS_ERR(cs)) {
- i915_request_add(rq);
- err = PTR_ERR(cs);
- goto out;
- }
-
- if (p->error[i]) {
- *cs++ = 0xdeadbeef;
- *cs++ = 0xdeadbeef;
- } else {
- *cs++ = MI_NOOP;
- *cs++ = MI_NOOP;
- }
-
- client[i] = i915_request_get(rq);
- i915_request_add(rq);
- }
-
- err = wait_for_submit(engine, client[0], HZ / 2);
- if (err) {
- pr_err("%s: first request did not start within time!\n",
- engine->name);
- err = -ETIME;
- goto out;
- }
-
- for (i = 0; i < ARRAY_SIZE(client); i++) {
- if (i915_request_wait(client[i], 0, HZ / 5) < 0)
- pr_debug("%s: %s request incomplete!\n",
- engine->name,
- error_repr(p->error[i]));
-
- if (!i915_request_started(client[i])) {
- pr_err("%s: %s request not started!\n",
- engine->name,
- error_repr(p->error[i]));
- err = -ETIME;
- goto out;
- }
-
- /* Kick the tasklet to process the error */
- intel_engine_flush_submission(engine);
- if (client[i]->fence.error != p->error[i]) {
- pr_err("%s: %s request (%s) with wrong error code: %d\n",
- engine->name,
- error_repr(p->error[i]),
- i915_request_completed(client[i]) ? "completed" : "running",
- client[i]->fence.error);
- err = -EINVAL;
- goto out;
- }
- }
-
-out:
- for (i = 0; i < ARRAY_SIZE(client); i++)
- if (client[i])
- i915_request_put(client[i]);
- if (err) {
- pr_err("%s: failed at phase[%zd] { %d, %d }\n",
- engine->name, p - phases,
- p->error[0], p->error[1]);
- break;
- }
- }
-
- st_engine_heartbeat_enable(engine);
- if (err) {
- intel_gt_set_wedged(gt);
- return err;
- }
- }
-
- return 0;
-}
-
-static int
-emit_semaphore_chain(struct i915_request *rq, struct i915_vma *vma, int idx)
-{
- u32 *cs;
-
- cs = intel_ring_begin(rq, 10);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
-
- *cs++ = MI_SEMAPHORE_WAIT |
- MI_SEMAPHORE_GLOBAL_GTT |
- MI_SEMAPHORE_POLL |
- MI_SEMAPHORE_SAD_NEQ_SDD;
- *cs++ = 0;
- *cs++ = i915_ggtt_offset(vma) + 4 * idx;
- *cs++ = 0;
-
- if (idx > 0) {
- *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
- *cs++ = i915_ggtt_offset(vma) + 4 * (idx - 1);
- *cs++ = 0;
- *cs++ = 1;
- } else {
- *cs++ = MI_NOOP;
- *cs++ = MI_NOOP;
- *cs++ = MI_NOOP;
- *cs++ = MI_NOOP;
- }
-
- *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
-
- intel_ring_advance(rq, cs);
- return 0;
-}
-
-static struct i915_request *
-semaphore_queue(struct intel_engine_cs *engine, struct i915_vma *vma, int idx)
-{
- struct intel_context *ce;
- struct i915_request *rq;
- int err;
-
- ce = intel_context_create(engine);
- if (IS_ERR(ce))
- return ERR_CAST(ce);
-
- rq = intel_context_create_request(ce);
- if (IS_ERR(rq))
- goto out_ce;
-
- err = 0;
- if (rq->engine->emit_init_breadcrumb)
- err = rq->engine->emit_init_breadcrumb(rq);
- if (err == 0)
- err = emit_semaphore_chain(rq, vma, idx);
- if (err == 0)
- i915_request_get(rq);
- i915_request_add(rq);
- if (err)
- rq = ERR_PTR(err);
-
-out_ce:
- intel_context_put(ce);
- return rq;
-}
-
-static int
-release_queue(struct intel_engine_cs *engine,
- struct i915_vma *vma,
- int idx, int prio)
-{
- struct i915_sched_attr attr = {
- .priority = prio,
- };
- struct i915_request *rq;
- u32 *cs;
-
- rq = intel_engine_create_kernel_request(engine);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
-
- cs = intel_ring_begin(rq, 4);
- if (IS_ERR(cs)) {
- i915_request_add(rq);
- return PTR_ERR(cs);
- }
-
- *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
- *cs++ = i915_ggtt_offset(vma) + 4 * (idx - 1);
- *cs++ = 0;
- *cs++ = 1;
-
- intel_ring_advance(rq, cs);
-
- i915_request_get(rq);
- i915_request_add(rq);
-
- local_bh_disable();
- engine->schedule(rq, &attr);
- local_bh_enable(); /* kick tasklet */
-
- i915_request_put(rq);
-
- return 0;
-}
-
-static int
-slice_semaphore_queue(struct intel_engine_cs *outer,
- struct i915_vma *vma,
- int count)
-{
- struct intel_engine_cs *engine;
- struct i915_request *head;
- enum intel_engine_id id;
- int err, i, n = 0;
-
- head = semaphore_queue(outer, vma, n++);
- if (IS_ERR(head))
- return PTR_ERR(head);
-
- for_each_engine(engine, outer->gt, id) {
- for (i = 0; i < count; i++) {
- struct i915_request *rq;
-
- rq = semaphore_queue(engine, vma, n++);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto out;
- }
-
- i915_request_put(rq);
- }
- }
-
- err = release_queue(outer, vma, n, I915_PRIORITY_BARRIER);
- if (err)
- goto out;
-
- if (i915_request_wait(head, 0,
- 2 * outer->gt->info.num_engines * (count + 2) * (count + 3)) < 0) {
- pr_err("Failed to slice along semaphore chain of length (%d, %d)!\n",
- count, n);
- GEM_TRACE_DUMP();
- intel_gt_set_wedged(outer->gt);
- err = -EIO;
- }
-
-out:
- i915_request_put(head);
- return err;
-}
-
-static int live_timeslice_preempt(void *arg)
-{
- struct intel_gt *gt = arg;
- struct drm_i915_gem_object *obj;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- struct i915_vma *vma;
- void *vaddr;
- int err = 0;
-
- /*
- * If a request takes too long, we would like to give other users
- * a fair go on the GPU. In particular, users may create batches
- * that wait upon external input, where that input may even be
- * supplied by another GPU job. To avoid blocking forever, we
- * need to preempt the current task and replace it with another
- * ready task.
- */
- if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
- return 0;
-
- obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
-
- vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto err_obj;
- }
-
- vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
- if (IS_ERR(vaddr)) {
- err = PTR_ERR(vaddr);
- goto err_obj;
- }
-
- err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
- if (err)
- goto err_map;
-
- err = i915_vma_sync(vma);
- if (err)
- goto err_pin;
-
- for_each_engine(engine, gt, id) {
- if (!intel_engine_has_preemption(engine))
- continue;
-
- memset(vaddr, 0, PAGE_SIZE);
-
- st_engine_heartbeat_disable(engine);
- err = slice_semaphore_queue(engine, vma, 5);
- st_engine_heartbeat_enable(engine);
- if (err)
- goto err_pin;
-
- if (igt_flush_test(gt->i915)) {
- err = -EIO;
- goto err_pin;
- }
- }
-
-err_pin:
- i915_vma_unpin(vma);
-err_map:
- i915_gem_object_unpin_map(obj);
-err_obj:
- i915_gem_object_put(obj);
- return err;
-}
-
-static struct i915_request *
-create_rewinder(struct intel_context *ce,
- struct i915_request *wait,
- void *slot, int idx)
-{
- const u32 offset =
- i915_ggtt_offset(ce->engine->status_page.vma) +
- offset_in_page(slot);
- struct i915_request *rq;
- u32 *cs;
- int err;
-
- rq = intel_context_create_request(ce);
- if (IS_ERR(rq))
- return rq;
-
- if (wait) {
- err = i915_request_await_dma_fence(rq, &wait->fence);
- if (err)
- goto err;
- }
-
- cs = intel_ring_begin(rq, 14);
- if (IS_ERR(cs)) {
- err = PTR_ERR(cs);
- goto err;
- }
-
- *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
- *cs++ = MI_NOOP;
-
- *cs++ = MI_SEMAPHORE_WAIT |
- MI_SEMAPHORE_GLOBAL_GTT |
- MI_SEMAPHORE_POLL |
- MI_SEMAPHORE_SAD_GTE_SDD;
- *cs++ = idx;
- *cs++ = offset;
- *cs++ = 0;
-
- *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
- *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(rq->engine->mmio_base));
- *cs++ = offset + idx * sizeof(u32);
- *cs++ = 0;
-
- *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
- *cs++ = offset;
- *cs++ = 0;
- *cs++ = idx + 1;
-
- intel_ring_advance(rq, cs);
-
- rq->sched.attr.priority = I915_PRIORITY_MASK;
- err = 0;
-err:
- i915_request_get(rq);
- i915_request_add(rq);
- if (err) {
- i915_request_put(rq);
- return ERR_PTR(err);
- }
-
- return rq;
-}
-
-static int live_timeslice_rewind(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- /*
- * The usual presumption on timeslice expiration is that we replace
- * the active context with another. However, given a chain of
- * dependencies we may end up with replacing the context with itself,
- * but only a few of those requests, forcing us to rewind the
- * RING_TAIL of the original request.
- */
- if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
- return 0;
-
- for_each_engine(engine, gt, id) {
- enum { A1, A2, B1 };
- enum { X = 1, Z, Y };
- struct i915_request *rq[3] = {};
- struct intel_context *ce;
- unsigned long timeslice;
- int i, err = 0;
- u32 *slot;
-
- if (!intel_engine_has_timeslices(engine))
- continue;
-
- /*
- * A:rq1 -- semaphore wait, timestamp X
- * A:rq2 -- write timestamp Y
- *
- * B:rq1 [await A:rq1] -- write timestamp Z
- *
- * Force timeslice, release semaphore.
- *
- * Expect execution/evaluation order XZY
- */
-
- st_engine_heartbeat_disable(engine);
- timeslice = xchg(&engine->props.timeslice_duration_ms, 1);
-
- slot = memset32(engine->status_page.addr + 1000, 0, 4);
-
- ce = intel_context_create(engine);
- if (IS_ERR(ce)) {
- err = PTR_ERR(ce);
- goto err;
- }
-
- rq[A1] = create_rewinder(ce, NULL, slot, X);
- if (IS_ERR(rq[A1])) {
- intel_context_put(ce);
- goto err;
- }
-
- rq[A2] = create_rewinder(ce, NULL, slot, Y);
- intel_context_put(ce);
- if (IS_ERR(rq[A2]))
- goto err;
-
- err = wait_for_submit(engine, rq[A2], HZ / 2);
- if (err) {
- pr_err("%s: failed to submit first context\n",
- engine->name);
- goto err;
- }
-
- ce = intel_context_create(engine);
- if (IS_ERR(ce)) {
- err = PTR_ERR(ce);
- goto err;
- }
-
- rq[B1] = create_rewinder(ce, rq[A1], slot, Z);
- intel_context_put(ce);
- if (IS_ERR(rq[2]))
- goto err;
-
- err = wait_for_submit(engine, rq[B1], HZ / 2);
- if (err) {
- pr_err("%s: failed to submit second context\n",
- engine->name);
- goto err;
- }
-
- /* ELSP[] = { { A:rq1, A:rq2 }, { B:rq1 } } */
- ENGINE_TRACE(engine, "forcing tasklet for rewind\n");
- if (i915_request_is_active(rq[A2])) { /* semaphore yielded! */
- /* Wait for the timeslice to kick in */
- del_timer(&engine->execlists.timer);
- tasklet_hi_schedule(&engine->execlists.tasklet);
- intel_engine_flush_submission(engine);
- }
- /* -> ELSP[] = { { A:rq1 }, { B:rq1 } } */
- GEM_BUG_ON(!i915_request_is_active(rq[A1]));
- GEM_BUG_ON(!i915_request_is_active(rq[B1]));
- GEM_BUG_ON(i915_request_is_active(rq[A2]));
-
- /* Release the hounds! */
- slot[0] = 1;
- wmb(); /* "pairs" with GPU; paranoid kick of internal CPU$ */
-
- for (i = 1; i <= 3; i++) {
- unsigned long timeout = jiffies + HZ / 2;
-
- while (!READ_ONCE(slot[i]) &&
- time_before(jiffies, timeout))
- ;
-
- if (!time_before(jiffies, timeout)) {
- pr_err("%s: rq[%d] timed out\n",
- engine->name, i - 1);
- err = -ETIME;
- goto err;
- }
-
- pr_debug("%s: slot[%d]:%x\n", engine->name, i, slot[i]);
- }
-
- /* XZY: XZ < XY */
- if (slot[Z] - slot[X] >= slot[Y] - slot[X]) {
- pr_err("%s: timeslicing did not run context B [%u] before A [%u]!\n",
- engine->name,
- slot[Z] - slot[X],
- slot[Y] - slot[X]);
- err = -EINVAL;
- }
-
-err:
- memset32(&slot[0], -1, 4);
- wmb();
-
- engine->props.timeslice_duration_ms = timeslice;
- st_engine_heartbeat_enable(engine);
- for (i = 0; i < 3; i++)
- i915_request_put(rq[i]);
- if (igt_flush_test(gt->i915))
- err = -EIO;
- if (err)
- return err;
- }
-
- return 0;
-}
-
-static struct i915_request *nop_request(struct intel_engine_cs *engine)
-{
- struct i915_request *rq;
-
- rq = intel_engine_create_kernel_request(engine);
- if (IS_ERR(rq))
- return rq;
-
- i915_request_get(rq);
- i915_request_add(rq);
-
- return rq;
-}
-
-static long slice_timeout(struct intel_engine_cs *engine)
-{
- long timeout;
-
- /* Enough time for a timeslice to kick in, and kick out */
- timeout = 2 * msecs_to_jiffies_timeout(timeslice(engine));
-
- /* Enough time for the nop request to complete */
- timeout += HZ / 5;
-
- return timeout + 1;
-}
-
-static int live_timeslice_queue(void *arg)
-{
- struct intel_gt *gt = arg;
- struct drm_i915_gem_object *obj;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- struct i915_vma *vma;
- void *vaddr;
- int err = 0;
-
- /*
- * Make sure that even if ELSP[0] and ELSP[1] are filled with
- * timeslicing between them disabled, we *do* enable timeslicing
- * if the queue demands it. (Normally, we do not submit if
- * ELSP[1] is already occupied, so must rely on timeslicing to
- * eject ELSP[0] in favour of the queue.)
- */
- if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
- return 0;
-
- obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
-
- vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto err_obj;
- }
-
- vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
- if (IS_ERR(vaddr)) {
- err = PTR_ERR(vaddr);
- goto err_obj;
- }
-
- err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
- if (err)
- goto err_map;
-
- err = i915_vma_sync(vma);
- if (err)
- goto err_pin;
-
- for_each_engine(engine, gt, id) {
- struct i915_sched_attr attr = {
- .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
- };
- struct i915_request *rq, *nop;
-
- if (!intel_engine_has_preemption(engine))
- continue;
-
- st_engine_heartbeat_disable(engine);
- memset(vaddr, 0, PAGE_SIZE);
-
- /* ELSP[0]: semaphore wait */
- rq = semaphore_queue(engine, vma, 0);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto err_heartbeat;
- }
- engine->schedule(rq, &attr);
- err = wait_for_submit(engine, rq, HZ / 2);
- if (err) {
- pr_err("%s: Timed out trying to submit semaphores\n",
- engine->name);
- goto err_rq;
- }
-
- /* ELSP[1]: nop request */
- nop = nop_request(engine);
- if (IS_ERR(nop)) {
- err = PTR_ERR(nop);
- goto err_rq;
- }
- err = wait_for_submit(engine, nop, HZ / 2);
- i915_request_put(nop);
- if (err) {
- pr_err("%s: Timed out trying to submit nop\n",
- engine->name);
- goto err_rq;
- }
-
- GEM_BUG_ON(i915_request_completed(rq));
- GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
-
- /* Queue: semaphore signal, matching priority as semaphore */
- err = release_queue(engine, vma, 1, effective_prio(rq));
- if (err)
- goto err_rq;
-
- /* Wait until we ack the release_queue and start timeslicing */
- do {
- cond_resched();
- intel_engine_flush_submission(engine);
- } while (READ_ONCE(engine->execlists.pending[0]));
-
- /* Timeslice every jiffy, so within 2 we should signal */
- if (i915_request_wait(rq, 0, slice_timeout(engine)) < 0) {
- struct drm_printer p =
- drm_info_printer(gt->i915->drm.dev);
-
- pr_err("%s: Failed to timeslice into queue\n",
- engine->name);
- intel_engine_dump(engine, &p,
- "%s\n", engine->name);
-
- memset(vaddr, 0xff, PAGE_SIZE);
- err = -EIO;
- }
-err_rq:
- i915_request_put(rq);
-err_heartbeat:
- st_engine_heartbeat_enable(engine);
- if (err)
- break;
- }
-
-err_pin:
- i915_vma_unpin(vma);
-err_map:
- i915_gem_object_unpin_map(obj);
-err_obj:
- i915_gem_object_put(obj);
- return err;
-}
-
-static int live_timeslice_nopreempt(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- struct igt_spinner spin;
- int err = 0;
-
- /*
- * We should not timeslice into a request that is marked with
- * I915_REQUEST_NOPREEMPT.
- */
- if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
- return 0;
-
- if (igt_spinner_init(&spin, gt))
- return -ENOMEM;
-
- for_each_engine(engine, gt, id) {
- struct intel_context *ce;
- struct i915_request *rq;
- unsigned long timeslice;
-
- if (!intel_engine_has_preemption(engine))
- continue;
-
- ce = intel_context_create(engine);
- if (IS_ERR(ce)) {
- err = PTR_ERR(ce);
- break;
- }
-
- st_engine_heartbeat_disable(engine);
- timeslice = xchg(&engine->props.timeslice_duration_ms, 1);
-
- /* Create an unpreemptible spinner */
-
- rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
- intel_context_put(ce);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto out_heartbeat;
- }
-
- i915_request_get(rq);
- i915_request_add(rq);
-
- if (!igt_wait_for_spinner(&spin, rq)) {
- i915_request_put(rq);
- err = -ETIME;
- goto out_spin;
- }
-
- set_bit(I915_FENCE_FLAG_NOPREEMPT, &rq->fence.flags);
- i915_request_put(rq);
-
- /* Followed by a maximum priority barrier (heartbeat) */
-
- ce = intel_context_create(engine);
- if (IS_ERR(ce)) {
- err = PTR_ERR(ce);
- goto out_spin;
- }
-
- rq = intel_context_create_request(ce);
- intel_context_put(ce);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto out_spin;
- }
-
- rq->sched.attr.priority = I915_PRIORITY_BARRIER;
- i915_request_get(rq);
- i915_request_add(rq);
-
- /*
- * Wait until the barrier is in ELSP, and we know timeslicing
- * will have been activated.
- */
- if (wait_for_submit(engine, rq, HZ / 2)) {
- i915_request_put(rq);
- err = -ETIME;
- goto out_spin;
- }
-
- /*
- * Since the ELSP[0] request is unpreemptible, it should not
- * allow the maximum priority barrier through. Wait long
- * enough to see if it is timesliced in by mistake.
- */
- if (i915_request_wait(rq, 0, slice_timeout(engine)) >= 0) {
- pr_err("%s: I915_PRIORITY_BARRIER request completed, bypassing no-preempt request\n",
- engine->name);
- err = -EINVAL;
- }
- i915_request_put(rq);
-
-out_spin:
- igt_spinner_end(&spin);
-out_heartbeat:
- xchg(&engine->props.timeslice_duration_ms, timeslice);
- st_engine_heartbeat_enable(engine);
- if (err)
- break;
-
- if (igt_flush_test(gt->i915)) {
- err = -EIO;
- break;
- }
- }
-
- igt_spinner_fini(&spin);
- return err;
-}
-
-static int live_busywait_preempt(void *arg)
-{
- struct intel_gt *gt = arg;
- struct i915_gem_context *ctx_hi, *ctx_lo;
- struct intel_engine_cs *engine;
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
- enum intel_engine_id id;
- int err = -ENOMEM;
- u32 *map;
-
- /*
- * Verify that even without HAS_LOGICAL_RING_PREEMPTION, we can
- * preempt the busywaits used to synchronise between rings.
- */
-
- ctx_hi = kernel_context(gt->i915);
- if (!ctx_hi)
- return -ENOMEM;
- ctx_hi->sched.priority =
- I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
-
- ctx_lo = kernel_context(gt->i915);
- if (!ctx_lo)
- goto err_ctx_hi;
- ctx_lo->sched.priority =
- I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
-
- obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
- if (IS_ERR(obj)) {
- err = PTR_ERR(obj);
- goto err_ctx_lo;
- }
-
- map = i915_gem_object_pin_map(obj, I915_MAP_WC);
- if (IS_ERR(map)) {
- err = PTR_ERR(map);
- goto err_obj;
- }
-
- vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto err_map;
- }
-
- err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
- if (err)
- goto err_map;
-
- err = i915_vma_sync(vma);
- if (err)
- goto err_vma;
-
- for_each_engine(engine, gt, id) {
- struct i915_request *lo, *hi;
- struct igt_live_test t;
- u32 *cs;
-
- if (!intel_engine_has_preemption(engine))
- continue;
-
- if (!intel_engine_can_store_dword(engine))
- continue;
-
- if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
- err = -EIO;
- goto err_vma;
- }
-
- /*
- * We create two requests. The low priority request
- * busywaits on a semaphore (inside the ringbuffer where
- * is should be preemptible) and the high priority requests
- * uses a MI_STORE_DWORD_IMM to update the semaphore value
- * allowing the first request to complete. If preemption
- * fails, we hang instead.
- */
-
- lo = igt_request_alloc(ctx_lo, engine);
- if (IS_ERR(lo)) {
- err = PTR_ERR(lo);
- goto err_vma;
- }
-
- cs = intel_ring_begin(lo, 8);
- if (IS_ERR(cs)) {
- err = PTR_ERR(cs);
- i915_request_add(lo);
- goto err_vma;
- }
-
- *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
- *cs++ = i915_ggtt_offset(vma);
- *cs++ = 0;
- *cs++ = 1;
-
- /* XXX Do we need a flush + invalidate here? */
-
- *cs++ = MI_SEMAPHORE_WAIT |
- MI_SEMAPHORE_GLOBAL_GTT |
- MI_SEMAPHORE_POLL |
- MI_SEMAPHORE_SAD_EQ_SDD;
- *cs++ = 0;
- *cs++ = i915_ggtt_offset(vma);
- *cs++ = 0;
-
- intel_ring_advance(lo, cs);
-
- i915_request_get(lo);
- i915_request_add(lo);
-
- if (wait_for(READ_ONCE(*map), 10)) {
- i915_request_put(lo);
- err = -ETIMEDOUT;
- goto err_vma;
- }
-
- /* Low priority request should be busywaiting now */
- if (i915_request_wait(lo, 0, 1) != -ETIME) {
- i915_request_put(lo);
- pr_err("%s: Busywaiting request did not!\n",
- engine->name);
- err = -EIO;
- goto err_vma;
- }
-
- hi = igt_request_alloc(ctx_hi, engine);
- if (IS_ERR(hi)) {
- err = PTR_ERR(hi);
- i915_request_put(lo);
- goto err_vma;
- }
-
- cs = intel_ring_begin(hi, 4);
- if (IS_ERR(cs)) {
- err = PTR_ERR(cs);
- i915_request_add(hi);
- i915_request_put(lo);
- goto err_vma;
- }
-
- *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
- *cs++ = i915_ggtt_offset(vma);
- *cs++ = 0;
- *cs++ = 0;
-
- intel_ring_advance(hi, cs);
- i915_request_add(hi);
-
- if (i915_request_wait(lo, 0, HZ / 5) < 0) {
- struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
-
- pr_err("%s: Failed to preempt semaphore busywait!\n",
- engine->name);
-
- intel_engine_dump(engine, &p, "%s\n", engine->name);
- GEM_TRACE_DUMP();
-
- i915_request_put(lo);
- intel_gt_set_wedged(gt);
- err = -EIO;
- goto err_vma;
- }
- GEM_BUG_ON(READ_ONCE(*map));
- i915_request_put(lo);
-
- if (igt_live_test_end(&t)) {
- err = -EIO;
- goto err_vma;
- }
- }
-
- err = 0;
-err_vma:
- i915_vma_unpin(vma);
-err_map:
- i915_gem_object_unpin_map(obj);
-err_obj:
- i915_gem_object_put(obj);
-err_ctx_lo:
- kernel_context_close(ctx_lo);
-err_ctx_hi:
- kernel_context_close(ctx_hi);
- return err;
-}
-
-static struct i915_request *
-spinner_create_request(struct igt_spinner *spin,
- struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
- u32 arb)
-{
- struct intel_context *ce;
- struct i915_request *rq;
-
- ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
- if (IS_ERR(ce))
- return ERR_CAST(ce);
-
- rq = igt_spinner_create_request(spin, ce, arb);
- intel_context_put(ce);
- return rq;
-}
-
-static int live_preempt(void *arg)
-{
- struct intel_gt *gt = arg;
- struct i915_gem_context *ctx_hi, *ctx_lo;
- struct igt_spinner spin_hi, spin_lo;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- int err = -ENOMEM;
-
- if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
- return 0;
-
- if (!(gt->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
- pr_err("Logical preemption supported, but not exposed\n");
-
- if (igt_spinner_init(&spin_hi, gt))
- return -ENOMEM;
-
- if (igt_spinner_init(&spin_lo, gt))
- goto err_spin_hi;
-
- ctx_hi = kernel_context(gt->i915);
- if (!ctx_hi)
- goto err_spin_lo;
- ctx_hi->sched.priority =
- I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
-
- ctx_lo = kernel_context(gt->i915);
- if (!ctx_lo)
- goto err_ctx_hi;
- ctx_lo->sched.priority =
- I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
-
- for_each_engine(engine, gt, id) {
- struct igt_live_test t;
- struct i915_request *rq;
-
- if (!intel_engine_has_preemption(engine))
- continue;
-
- if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
- err = -EIO;
- goto err_ctx_lo;
- }
-
- rq = spinner_create_request(&spin_lo, ctx_lo, engine,
- MI_ARB_CHECK);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto err_ctx_lo;
- }
-
- i915_request_add(rq);
- if (!igt_wait_for_spinner(&spin_lo, rq)) {
- GEM_TRACE("lo spinner failed to start\n");
- GEM_TRACE_DUMP();
- intel_gt_set_wedged(gt);
- err = -EIO;
- goto err_ctx_lo;
- }
-
- rq = spinner_create_request(&spin_hi, ctx_hi, engine,
- MI_ARB_CHECK);
- if (IS_ERR(rq)) {
- igt_spinner_end(&spin_lo);
- err = PTR_ERR(rq);
- goto err_ctx_lo;
- }
-
- i915_request_add(rq);
- if (!igt_wait_for_spinner(&spin_hi, rq)) {
- GEM_TRACE("hi spinner failed to start\n");
- GEM_TRACE_DUMP();
- intel_gt_set_wedged(gt);
- err = -EIO;
- goto err_ctx_lo;
- }
-
- igt_spinner_end(&spin_hi);
- igt_spinner_end(&spin_lo);
-
- if (igt_live_test_end(&t)) {
- err = -EIO;
- goto err_ctx_lo;
- }
- }
-
- err = 0;
-err_ctx_lo:
- kernel_context_close(ctx_lo);
-err_ctx_hi:
- kernel_context_close(ctx_hi);
-err_spin_lo:
- igt_spinner_fini(&spin_lo);
-err_spin_hi:
- igt_spinner_fini(&spin_hi);
- return err;
-}
-
-static int live_late_preempt(void *arg)
-{
- struct intel_gt *gt = arg;
- struct i915_gem_context *ctx_hi, *ctx_lo;
- struct igt_spinner spin_hi, spin_lo;
- struct intel_engine_cs *engine;
- struct i915_sched_attr attr = {};
- enum intel_engine_id id;
- int err = -ENOMEM;
-
- if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
- return 0;
-
- if (igt_spinner_init(&spin_hi, gt))
- return -ENOMEM;
-
- if (igt_spinner_init(&spin_lo, gt))
- goto err_spin_hi;
-
- ctx_hi = kernel_context(gt->i915);
- if (!ctx_hi)
- goto err_spin_lo;
-
- ctx_lo = kernel_context(gt->i915);
- if (!ctx_lo)
- goto err_ctx_hi;
-
- /* Make sure ctx_lo stays before ctx_hi until we trigger preemption. */
- ctx_lo->sched.priority = I915_USER_PRIORITY(1);
-
- for_each_engine(engine, gt, id) {
- struct igt_live_test t;
- struct i915_request *rq;
-
- if (!intel_engine_has_preemption(engine))
- continue;
-
- if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
- err = -EIO;
- goto err_ctx_lo;
- }
-
- rq = spinner_create_request(&spin_lo, ctx_lo, engine,
- MI_ARB_CHECK);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto err_ctx_lo;
- }
-
- i915_request_add(rq);
- if (!igt_wait_for_spinner(&spin_lo, rq)) {
- pr_err("First context failed to start\n");
- goto err_wedged;
- }
-
- rq = spinner_create_request(&spin_hi, ctx_hi, engine,
- MI_NOOP);
- if (IS_ERR(rq)) {
- igt_spinner_end(&spin_lo);
- err = PTR_ERR(rq);
- goto err_ctx_lo;
- }
-
- i915_request_add(rq);
- if (igt_wait_for_spinner(&spin_hi, rq)) {
- pr_err("Second context overtook first?\n");
- goto err_wedged;
- }
-
- attr.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
- engine->schedule(rq, &attr);
-
- if (!igt_wait_for_spinner(&spin_hi, rq)) {
- pr_err("High priority context failed to preempt the low priority context\n");
- GEM_TRACE_DUMP();
- goto err_wedged;
- }
-
- igt_spinner_end(&spin_hi);
- igt_spinner_end(&spin_lo);
-
- if (igt_live_test_end(&t)) {
- err = -EIO;
- goto err_ctx_lo;
- }
- }
-
- err = 0;
-err_ctx_lo:
- kernel_context_close(ctx_lo);
-err_ctx_hi:
- kernel_context_close(ctx_hi);
-err_spin_lo:
- igt_spinner_fini(&spin_lo);
-err_spin_hi:
- igt_spinner_fini(&spin_hi);
- return err;
-
-err_wedged:
- igt_spinner_end(&spin_hi);
- igt_spinner_end(&spin_lo);
- intel_gt_set_wedged(gt);
- err = -EIO;
- goto err_ctx_lo;
-}
-
-struct preempt_client {
- struct igt_spinner spin;
- struct i915_gem_context *ctx;
-};
-
-static int preempt_client_init(struct intel_gt *gt, struct preempt_client *c)
-{
- c->ctx = kernel_context(gt->i915);
- if (!c->ctx)
- return -ENOMEM;
-
- if (igt_spinner_init(&c->spin, gt))
- goto err_ctx;
-
- return 0;
-
-err_ctx:
- kernel_context_close(c->ctx);
- return -ENOMEM;
-}
-
-static void preempt_client_fini(struct preempt_client *c)
-{
- igt_spinner_fini(&c->spin);
- kernel_context_close(c->ctx);
-}
-
-static int live_nopreempt(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_engine_cs *engine;
- struct preempt_client a, b;
- enum intel_engine_id id;
- int err = -ENOMEM;
-
- /*
- * Verify that we can disable preemption for an individual request
- * that may be being observed and not want to be interrupted.
- */
-
- if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
- return 0;
-
- if (preempt_client_init(gt, &a))
- return -ENOMEM;
- if (preempt_client_init(gt, &b))
- goto err_client_a;
- b.ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
-
- for_each_engine(engine, gt, id) {
- struct i915_request *rq_a, *rq_b;
-
- if (!intel_engine_has_preemption(engine))
- continue;
-
- engine->execlists.preempt_hang.count = 0;
-
- rq_a = spinner_create_request(&a.spin,
- a.ctx, engine,
- MI_ARB_CHECK);
- if (IS_ERR(rq_a)) {
- err = PTR_ERR(rq_a);
- goto err_client_b;
- }
-
- /* Low priority client, but unpreemptable! */
- __set_bit(I915_FENCE_FLAG_NOPREEMPT, &rq_a->fence.flags);
-
- i915_request_add(rq_a);
- if (!igt_wait_for_spinner(&a.spin, rq_a)) {
- pr_err("First client failed to start\n");
- goto err_wedged;
- }
-
- rq_b = spinner_create_request(&b.spin,
- b.ctx, engine,
- MI_ARB_CHECK);
- if (IS_ERR(rq_b)) {
- err = PTR_ERR(rq_b);
- goto err_client_b;
- }
-
- i915_request_add(rq_b);
-
- /* B is much more important than A! (But A is unpreemptable.) */
- GEM_BUG_ON(rq_prio(rq_b) <= rq_prio(rq_a));
-
- /* Wait long enough for preemption and timeslicing */
- if (igt_wait_for_spinner(&b.spin, rq_b)) {
- pr_err("Second client started too early!\n");
- goto err_wedged;
- }
-
- igt_spinner_end(&a.spin);
-
- if (!igt_wait_for_spinner(&b.spin, rq_b)) {
- pr_err("Second client failed to start\n");
- goto err_wedged;
- }
-
- igt_spinner_end(&b.spin);
-
- if (engine->execlists.preempt_hang.count) {
- pr_err("Preemption recorded x%d; should have been suppressed!\n",
- engine->execlists.preempt_hang.count);
- err = -EINVAL;
- goto err_wedged;
- }
-
- if (igt_flush_test(gt->i915))
- goto err_wedged;
- }
-
- err = 0;
-err_client_b:
- preempt_client_fini(&b);
-err_client_a:
- preempt_client_fini(&a);
- return err;
-
-err_wedged:
- igt_spinner_end(&b.spin);
- igt_spinner_end(&a.spin);
- intel_gt_set_wedged(gt);
- err = -EIO;
- goto err_client_b;
-}
-
-struct live_preempt_cancel {
- struct intel_engine_cs *engine;
- struct preempt_client a, b;
-};
-
-static int __cancel_active0(struct live_preempt_cancel *arg)
-{
- struct i915_request *rq;
- struct igt_live_test t;
- int err;
-
- /* Preempt cancel of ELSP0 */
- GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
- if (igt_live_test_begin(&t, arg->engine->i915,
- __func__, arg->engine->name))
- return -EIO;
-
- rq = spinner_create_request(&arg->a.spin,
- arg->a.ctx, arg->engine,
- MI_ARB_CHECK);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
-
- clear_bit(CONTEXT_BANNED, &rq->context->flags);
- i915_request_get(rq);
- i915_request_add(rq);
- if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
- err = -EIO;
- goto out;
- }
-
- intel_context_set_banned(rq->context);
- err = intel_engine_pulse(arg->engine);
- if (err)
- goto out;
-
- err = wait_for_reset(arg->engine, rq, HZ / 2);
- if (err) {
- pr_err("Cancelled inflight0 request did not reset\n");
- goto out;
- }
-
-out:
- i915_request_put(rq);
- if (igt_live_test_end(&t))
- err = -EIO;
- return err;
-}
-
-static int __cancel_active1(struct live_preempt_cancel *arg)
-{
- struct i915_request *rq[2] = {};
- struct igt_live_test t;
- int err;
-
- /* Preempt cancel of ELSP1 */
- GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
- if (igt_live_test_begin(&t, arg->engine->i915,
- __func__, arg->engine->name))
- return -EIO;
-
- rq[0] = spinner_create_request(&arg->a.spin,
- arg->a.ctx, arg->engine,
- MI_NOOP); /* no preemption */
- if (IS_ERR(rq[0]))
- return PTR_ERR(rq[0]);
-
- clear_bit(CONTEXT_BANNED, &rq[0]->context->flags);
- i915_request_get(rq[0]);
- i915_request_add(rq[0]);
- if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) {
- err = -EIO;
- goto out;
- }
-
- rq[1] = spinner_create_request(&arg->b.spin,
- arg->b.ctx, arg->engine,
- MI_ARB_CHECK);
- if (IS_ERR(rq[1])) {
- err = PTR_ERR(rq[1]);
- goto out;
- }
-
- clear_bit(CONTEXT_BANNED, &rq[1]->context->flags);
- i915_request_get(rq[1]);
- err = i915_request_await_dma_fence(rq[1], &rq[0]->fence);
- i915_request_add(rq[1]);
- if (err)
- goto out;
-
- intel_context_set_banned(rq[1]->context);
- err = intel_engine_pulse(arg->engine);
- if (err)
- goto out;
-
- igt_spinner_end(&arg->a.spin);
- err = wait_for_reset(arg->engine, rq[1], HZ / 2);
- if (err)
- goto out;
-
- if (rq[0]->fence.error != 0) {
- pr_err("Normal inflight0 request did not complete\n");
- err = -EINVAL;
- goto out;
- }
-
- if (rq[1]->fence.error != -EIO) {
- pr_err("Cancelled inflight1 request did not report -EIO\n");
- err = -EINVAL;
- goto out;
- }
-
-out:
- i915_request_put(rq[1]);
- i915_request_put(rq[0]);
- if (igt_live_test_end(&t))
- err = -EIO;
- return err;
-}
-
-static int __cancel_queued(struct live_preempt_cancel *arg)
-{
- struct i915_request *rq[3] = {};
- struct igt_live_test t;
- int err;
-
- /* Full ELSP and one in the wings */
- GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
- if (igt_live_test_begin(&t, arg->engine->i915,
- __func__, arg->engine->name))
- return -EIO;
-
- rq[0] = spinner_create_request(&arg->a.spin,
- arg->a.ctx, arg->engine,
- MI_ARB_CHECK);
- if (IS_ERR(rq[0]))
- return PTR_ERR(rq[0]);
-
- clear_bit(CONTEXT_BANNED, &rq[0]->context->flags);
- i915_request_get(rq[0]);
- i915_request_add(rq[0]);
- if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) {
- err = -EIO;
- goto out;
- }
-
- rq[1] = igt_request_alloc(arg->b.ctx, arg->engine);
- if (IS_ERR(rq[1])) {
- err = PTR_ERR(rq[1]);
- goto out;
- }
-
- clear_bit(CONTEXT_BANNED, &rq[1]->context->flags);
- i915_request_get(rq[1]);
- err = i915_request_await_dma_fence(rq[1], &rq[0]->fence);
- i915_request_add(rq[1]);
- if (err)
- goto out;
-
- rq[2] = spinner_create_request(&arg->b.spin,
- arg->a.ctx, arg->engine,
- MI_ARB_CHECK);
- if (IS_ERR(rq[2])) {
- err = PTR_ERR(rq[2]);
- goto out;
- }
-
- i915_request_get(rq[2]);
- err = i915_request_await_dma_fence(rq[2], &rq[1]->fence);
- i915_request_add(rq[2]);
- if (err)
- goto out;
-
- intel_context_set_banned(rq[2]->context);
- err = intel_engine_pulse(arg->engine);
- if (err)
- goto out;
-
- err = wait_for_reset(arg->engine, rq[2], HZ / 2);
- if (err)
- goto out;
-
- if (rq[0]->fence.error != -EIO) {
- pr_err("Cancelled inflight0 request did not report -EIO\n");
- err = -EINVAL;
- goto out;
- }
-
- if (rq[1]->fence.error != 0) {
- pr_err("Normal inflight1 request did not complete\n");
- err = -EINVAL;
- goto out;
- }
-
- if (rq[2]->fence.error != -EIO) {
- pr_err("Cancelled queued request did not report -EIO\n");
- err = -EINVAL;
- goto out;
- }
-
-out:
- i915_request_put(rq[2]);
- i915_request_put(rq[1]);
- i915_request_put(rq[0]);
- if (igt_live_test_end(&t))
- err = -EIO;
- return err;
-}
-
-static int __cancel_hostile(struct live_preempt_cancel *arg)
-{
- struct i915_request *rq;
- int err;
-
- /* Preempt cancel non-preemptible spinner in ELSP0 */
- if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT))
- return 0;
-
- if (!intel_has_reset_engine(arg->engine->gt))
- return 0;
-
- GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
- rq = spinner_create_request(&arg->a.spin,
- arg->a.ctx, arg->engine,
- MI_NOOP); /* preemption disabled */
- if (IS_ERR(rq))
- return PTR_ERR(rq);
-
- clear_bit(CONTEXT_BANNED, &rq->context->flags);
- i915_request_get(rq);
- i915_request_add(rq);
- if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
- err = -EIO;
- goto out;
- }
-
- intel_context_set_banned(rq->context);
- err = intel_engine_pulse(arg->engine); /* force reset */
- if (err)
- goto out;
-
- err = wait_for_reset(arg->engine, rq, HZ / 2);
- if (err) {
- pr_err("Cancelled inflight0 request did not reset\n");
- goto out;
- }
-
-out:
- i915_request_put(rq);
- if (igt_flush_test(arg->engine->i915))
- err = -EIO;
- return err;
-}
-
-static int live_preempt_cancel(void *arg)
-{
- struct intel_gt *gt = arg;
- struct live_preempt_cancel data;
- enum intel_engine_id id;
- int err = -ENOMEM;
-
- /*
- * To cancel an inflight context, we need to first remove it from the
- * GPU. That sounds like preemption! Plus a little bit of bookkeeping.
- */
-
- if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
- return 0;
-
- if (preempt_client_init(gt, &data.a))
- return -ENOMEM;
- if (preempt_client_init(gt, &data.b))
- goto err_client_a;
-
- for_each_engine(data.engine, gt, id) {
- if (!intel_engine_has_preemption(data.engine))
- continue;
-
- err = __cancel_active0(&data);
- if (err)
- goto err_wedged;
-
- err = __cancel_active1(&data);
- if (err)
- goto err_wedged;
-
- err = __cancel_queued(&data);
- if (err)
- goto err_wedged;
-
- err = __cancel_hostile(&data);
- if (err)
- goto err_wedged;
- }
-
- err = 0;
-err_client_b:
- preempt_client_fini(&data.b);
-err_client_a:
- preempt_client_fini(&data.a);
- return err;
-
-err_wedged:
- GEM_TRACE_DUMP();
- igt_spinner_end(&data.b.spin);
- igt_spinner_end(&data.a.spin);
- intel_gt_set_wedged(gt);
- goto err_client_b;
-}
-
-static int live_suppress_self_preempt(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_engine_cs *engine;
- struct i915_sched_attr attr = {
- .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX)
- };
- struct preempt_client a, b;
- enum intel_engine_id id;
- int err = -ENOMEM;
-
- /*
- * Verify that if a preemption request does not cause a change in
- * the current execution order, the preempt-to-idle injection is
- * skipped and that we do not accidentally apply it after the CS
- * completion event.
- */
-
- if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
- return 0;
-
- if (intel_uc_uses_guc_submission(&gt->uc))
- return 0; /* presume black blox */
-
- if (intel_vgpu_active(gt->i915))
- return 0; /* GVT forces single port & request submission */
-
- if (preempt_client_init(gt, &a))
- return -ENOMEM;
- if (preempt_client_init(gt, &b))
- goto err_client_a;
-
- for_each_engine(engine, gt, id) {
- struct i915_request *rq_a, *rq_b;
- int depth;
-
- if (!intel_engine_has_preemption(engine))
- continue;
-
- if (igt_flush_test(gt->i915))
- goto err_wedged;
-
- st_engine_heartbeat_disable(engine);
- engine->execlists.preempt_hang.count = 0;
-
- rq_a = spinner_create_request(&a.spin,
- a.ctx, engine,
- MI_NOOP);
- if (IS_ERR(rq_a)) {
- err = PTR_ERR(rq_a);
- st_engine_heartbeat_enable(engine);
- goto err_client_b;
- }
-
- i915_request_add(rq_a);
- if (!igt_wait_for_spinner(&a.spin, rq_a)) {
- pr_err("First client failed to start\n");
- st_engine_heartbeat_enable(engine);
- goto err_wedged;
- }
-
- /* Keep postponing the timer to avoid premature slicing */
- mod_timer(&engine->execlists.timer, jiffies + HZ);
- for (depth = 0; depth < 8; depth++) {
- rq_b = spinner_create_request(&b.spin,
- b.ctx, engine,
- MI_NOOP);
- if (IS_ERR(rq_b)) {
- err = PTR_ERR(rq_b);
- st_engine_heartbeat_enable(engine);
- goto err_client_b;
- }
- i915_request_add(rq_b);
-
- GEM_BUG_ON(i915_request_completed(rq_a));
- engine->schedule(rq_a, &attr);
- igt_spinner_end(&a.spin);
-
- if (!igt_wait_for_spinner(&b.spin, rq_b)) {
- pr_err("Second client failed to start\n");
- st_engine_heartbeat_enable(engine);
- goto err_wedged;
- }
-
- swap(a, b);
- rq_a = rq_b;
- }
- igt_spinner_end(&a.spin);
-
- if (engine->execlists.preempt_hang.count) {
- pr_err("Preemption on %s recorded x%d, depth %d; should have been suppressed!\n",
- engine->name,
- engine->execlists.preempt_hang.count,
- depth);
- st_engine_heartbeat_enable(engine);
- err = -EINVAL;
- goto err_client_b;
- }
-
- st_engine_heartbeat_enable(engine);
- if (igt_flush_test(gt->i915))
- goto err_wedged;
- }
-
- err = 0;
-err_client_b:
- preempt_client_fini(&b);
-err_client_a:
- preempt_client_fini(&a);
- return err;
-
-err_wedged:
- igt_spinner_end(&b.spin);
- igt_spinner_end(&a.spin);
- intel_gt_set_wedged(gt);
- err = -EIO;
- goto err_client_b;
-}
-
-static int live_chain_preempt(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_engine_cs *engine;
- struct preempt_client hi, lo;
- enum intel_engine_id id;
- int err = -ENOMEM;
-
- /*
- * Build a chain AB...BA between two contexts (A, B) and request
- * preemption of the last request. It should then complete before
- * the previously submitted spinner in B.
- */
-
- if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
- return 0;
-
- if (preempt_client_init(gt, &hi))
- return -ENOMEM;
-
- if (preempt_client_init(gt, &lo))
- goto err_client_hi;
-
- for_each_engine(engine, gt, id) {
- struct i915_sched_attr attr = {
- .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
- };
- struct igt_live_test t;
- struct i915_request *rq;
- int ring_size, count, i;
-
- if (!intel_engine_has_preemption(engine))
- continue;
-
- rq = spinner_create_request(&lo.spin,
- lo.ctx, engine,
- MI_ARB_CHECK);
- if (IS_ERR(rq))
- goto err_wedged;
-
- i915_request_get(rq);
- i915_request_add(rq);
-
- ring_size = rq->wa_tail - rq->head;
- if (ring_size < 0)
- ring_size += rq->ring->size;
- ring_size = rq->ring->size / ring_size;
- pr_debug("%s(%s): Using maximum of %d requests\n",
- __func__, engine->name, ring_size);
-
- igt_spinner_end(&lo.spin);
- if (i915_request_wait(rq, 0, HZ / 2) < 0) {
- pr_err("Timed out waiting to flush %s\n", engine->name);
- i915_request_put(rq);
- goto err_wedged;
- }
- i915_request_put(rq);
-
- if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
- err = -EIO;
- goto err_wedged;
- }
-
- for_each_prime_number_from(count, 1, ring_size) {
- rq = spinner_create_request(&hi.spin,
- hi.ctx, engine,
- MI_ARB_CHECK);
- if (IS_ERR(rq))
- goto err_wedged;
- i915_request_add(rq);
- if (!igt_wait_for_spinner(&hi.spin, rq))
- goto err_wedged;
-
- rq = spinner_create_request(&lo.spin,
- lo.ctx, engine,
- MI_ARB_CHECK);
- if (IS_ERR(rq))
- goto err_wedged;
- i915_request_add(rq);
-
- for (i = 0; i < count; i++) {
- rq = igt_request_alloc(lo.ctx, engine);
- if (IS_ERR(rq))
- goto err_wedged;
- i915_request_add(rq);
- }
-
- rq = igt_request_alloc(hi.ctx, engine);
- if (IS_ERR(rq))
- goto err_wedged;
-
- i915_request_get(rq);
- i915_request_add(rq);
- engine->schedule(rq, &attr);
-
- igt_spinner_end(&hi.spin);
- if (i915_request_wait(rq, 0, HZ / 5) < 0) {
- struct drm_printer p =
- drm_info_printer(gt->i915->drm.dev);
-
- pr_err("Failed to preempt over chain of %d\n",
- count);
- intel_engine_dump(engine, &p,
- "%s\n", engine->name);
- i915_request_put(rq);
- goto err_wedged;
- }
- igt_spinner_end(&lo.spin);
- i915_request_put(rq);
-
- rq = igt_request_alloc(lo.ctx, engine);
- if (IS_ERR(rq))
- goto err_wedged;
-
- i915_request_get(rq);
- i915_request_add(rq);
-
- if (i915_request_wait(rq, 0, HZ / 5) < 0) {
- struct drm_printer p =
- drm_info_printer(gt->i915->drm.dev);
-
- pr_err("Failed to flush low priority chain of %d requests\n",
- count);
- intel_engine_dump(engine, &p,
- "%s\n", engine->name);
-
- i915_request_put(rq);
- goto err_wedged;
- }
- i915_request_put(rq);
- }
-
- if (igt_live_test_end(&t)) {
- err = -EIO;
- goto err_wedged;
- }
- }
-
- err = 0;
-err_client_lo:
- preempt_client_fini(&lo);
-err_client_hi:
- preempt_client_fini(&hi);
- return err;
-
-err_wedged:
- igt_spinner_end(&hi.spin);
- igt_spinner_end(&lo.spin);
- intel_gt_set_wedged(gt);
- err = -EIO;
- goto err_client_lo;
-}
-
-static int create_gang(struct intel_engine_cs *engine,
- struct i915_request **prev)
-{
- struct drm_i915_gem_object *obj;
- struct intel_context *ce;
- struct i915_request *rq;
- struct i915_vma *vma;
- u32 *cs;
- int err;
-
- ce = intel_context_create(engine);
- if (IS_ERR(ce))
- return PTR_ERR(ce);
-
- obj = i915_gem_object_create_internal(engine->i915, 4096);
- if (IS_ERR(obj)) {
- err = PTR_ERR(obj);
- goto err_ce;
- }
-
- vma = i915_vma_instance(obj, ce->vm, NULL);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto err_obj;
- }
-
- err = i915_vma_pin(vma, 0, 0, PIN_USER);
- if (err)
- goto err_obj;
-
- cs = i915_gem_object_pin_map(obj, I915_MAP_WC);
- if (IS_ERR(cs))
- goto err_obj;
-
- /* Semaphore target: spin until zero */
- *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
-
- *cs++ = MI_SEMAPHORE_WAIT |
- MI_SEMAPHORE_POLL |
- MI_SEMAPHORE_SAD_EQ_SDD;
- *cs++ = 0;
- *cs++ = lower_32_bits(vma->node.start);
- *cs++ = upper_32_bits(vma->node.start);
-
- if (*prev) {
- u64 offset = (*prev)->batch->node.start;
-
- /* Terminate the spinner in the next lower priority batch. */
- *cs++ = MI_STORE_DWORD_IMM_GEN4;
- *cs++ = lower_32_bits(offset);
- *cs++ = upper_32_bits(offset);
- *cs++ = 0;
- }
-
- *cs++ = MI_BATCH_BUFFER_END;
- i915_gem_object_flush_map(obj);
- i915_gem_object_unpin_map(obj);
-
- rq = intel_context_create_request(ce);
- if (IS_ERR(rq))
- goto err_obj;
-
- rq->batch = i915_vma_get(vma);
- i915_request_get(rq);
-
- i915_vma_lock(vma);
- err = i915_request_await_object(rq, vma->obj, false);
- if (!err)
- err = i915_vma_move_to_active(vma, rq, 0);
- if (!err)
- err = rq->engine->emit_bb_start(rq,
- vma->node.start,
- PAGE_SIZE, 0);
- i915_vma_unlock(vma);
- i915_request_add(rq);
- if (err)
- goto err_rq;
-
- i915_gem_object_put(obj);
- intel_context_put(ce);
-
- rq->mock.link.next = &(*prev)->mock.link;
- *prev = rq;
- return 0;
-
-err_rq:
- i915_vma_put(rq->batch);
- i915_request_put(rq);
-err_obj:
- i915_gem_object_put(obj);
-err_ce:
- intel_context_put(ce);
- return err;
-}
-
-static int __live_preempt_ring(struct intel_engine_cs *engine,
- struct igt_spinner *spin,
- int queue_sz, int ring_sz)
-{
- struct intel_context *ce[2] = {};
- struct i915_request *rq;
- struct igt_live_test t;
- int err = 0;
- int n;
-
- if (igt_live_test_begin(&t, engine->i915, __func__, engine->name))
- return -EIO;
-
- for (n = 0; n < ARRAY_SIZE(ce); n++) {
- struct intel_context *tmp;
-
- tmp = intel_context_create(engine);
- if (IS_ERR(tmp)) {
- err = PTR_ERR(tmp);
- goto err_ce;
- }
-
- tmp->ring = __intel_context_ring_size(ring_sz);
-
- err = intel_context_pin(tmp);
- if (err) {
- intel_context_put(tmp);
- goto err_ce;
- }
-
- memset32(tmp->ring->vaddr,
- 0xdeadbeef, /* trigger a hang if executed */
- tmp->ring->vma->size / sizeof(u32));
-
- ce[n] = tmp;
- }
-
- rq = igt_spinner_create_request(spin, ce[0], MI_ARB_CHECK);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto err_ce;
- }
-
- i915_request_get(rq);
- rq->sched.attr.priority = I915_PRIORITY_BARRIER;
- i915_request_add(rq);
-
- if (!igt_wait_for_spinner(spin, rq)) {
- intel_gt_set_wedged(engine->gt);
- i915_request_put(rq);
- err = -ETIME;
- goto err_ce;
- }
-
- /* Fill the ring, until we will cause a wrap */
- n = 0;
- while (ce[0]->ring->tail - rq->wa_tail <= queue_sz) {
- struct i915_request *tmp;
-
- tmp = intel_context_create_request(ce[0]);
- if (IS_ERR(tmp)) {
- err = PTR_ERR(tmp);
- i915_request_put(rq);
- goto err_ce;
- }
-
- i915_request_add(tmp);
- intel_engine_flush_submission(engine);
- n++;
- }
- intel_engine_flush_submission(engine);
- pr_debug("%s: Filled %d with %d nop tails {size:%x, tail:%x, emit:%x, rq.tail:%x}\n",
- engine->name, queue_sz, n,
- ce[0]->ring->size,
- ce[0]->ring->tail,
- ce[0]->ring->emit,
- rq->tail);
- i915_request_put(rq);
-
- /* Create a second request to preempt the first ring */
- rq = intel_context_create_request(ce[1]);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto err_ce;
- }
-
- rq->sched.attr.priority = I915_PRIORITY_BARRIER;
- i915_request_get(rq);
- i915_request_add(rq);
-
- err = wait_for_submit(engine, rq, HZ / 2);
- i915_request_put(rq);
- if (err) {
- pr_err("%s: preemption request was not submited\n",
- engine->name);
- err = -ETIME;
- }
-
- pr_debug("%s: ring[0]:{ tail:%x, emit:%x }, ring[1]:{ tail:%x, emit:%x }\n",
- engine->name,
- ce[0]->ring->tail, ce[0]->ring->emit,
- ce[1]->ring->tail, ce[1]->ring->emit);
-
-err_ce:
- intel_engine_flush_submission(engine);
- igt_spinner_end(spin);
- for (n = 0; n < ARRAY_SIZE(ce); n++) {
- if (IS_ERR_OR_NULL(ce[n]))
- break;
-
- intel_context_unpin(ce[n]);
- intel_context_put(ce[n]);
- }
- if (igt_live_test_end(&t))
- err = -EIO;
- return err;
-}
-
-static int live_preempt_ring(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_engine_cs *engine;
- struct igt_spinner spin;
- enum intel_engine_id id;
- int err = 0;
-
- /*
- * Check that we rollback large chunks of a ring in order to do a
- * preemption event. Similar to live_unlite_ring, but looking at
- * ring size rather than the impact of intel_ring_direction().
- */
-
- if (igt_spinner_init(&spin, gt))
- return -ENOMEM;
-
- for_each_engine(engine, gt, id) {
- int n;
-
- if (!intel_engine_has_preemption(engine))
- continue;
-
- if (!intel_engine_can_store_dword(engine))
- continue;
-
- st_engine_heartbeat_disable(engine);
-
- for (n = 0; n <= 3; n++) {
- err = __live_preempt_ring(engine, &spin,
- n * SZ_4K / 4, SZ_4K);
- if (err)
- break;
- }
-
- st_engine_heartbeat_enable(engine);
- if (err)
- break;
- }
-
- igt_spinner_fini(&spin);
- return err;
-}
-
-static int live_preempt_gang(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
- return 0;
-
- /*
- * Build as long a chain of preempters as we can, with each
- * request higher priority than the last. Once we are ready, we release
- * the last batch which then precolates down the chain, each releasing
- * the next oldest in turn. The intent is to simply push as hard as we
- * can with the number of preemptions, trying to exceed narrow HW
- * limits. At a minimum, we insist that we can sort all the user
- * high priority levels into execution order.
- */
-
- for_each_engine(engine, gt, id) {
- struct i915_request *rq = NULL;
- struct igt_live_test t;
- IGT_TIMEOUT(end_time);
- int prio = 0;
- int err = 0;
- u32 *cs;
-
- if (!intel_engine_has_preemption(engine))
- continue;
-
- if (igt_live_test_begin(&t, gt->i915, __func__, engine->name))
- return -EIO;
-
- do {
- struct i915_sched_attr attr = {
- .priority = I915_USER_PRIORITY(prio++),
- };
-
- err = create_gang(engine, &rq);
- if (err)
- break;
-
- /* Submit each spinner at increasing priority */
- engine->schedule(rq, &attr);
- } while (prio <= I915_PRIORITY_MAX &&
- !__igt_timeout(end_time, NULL));
- pr_debug("%s: Preempt chain of %d requests\n",
- engine->name, prio);
-
- /*
- * Such that the last spinner is the highest priority and
- * should execute first. When that spinner completes,
- * it will terminate the next lowest spinner until there
- * are no more spinners and the gang is complete.
- */
- cs = i915_gem_object_pin_map(rq->batch->obj, I915_MAP_WC);
- if (!IS_ERR(cs)) {
- *cs = 0;
- i915_gem_object_unpin_map(rq->batch->obj);
- } else {
- err = PTR_ERR(cs);
- intel_gt_set_wedged(gt);
- }
-
- while (rq) { /* wait for each rq from highest to lowest prio */
- struct i915_request *n = list_next_entry(rq, mock.link);
-
- if (err == 0 && i915_request_wait(rq, 0, HZ / 5) < 0) {
- struct drm_printer p =
- drm_info_printer(engine->i915->drm.dev);
-
- pr_err("Failed to flush chain of %d requests, at %d\n",
- prio, rq_prio(rq) >> I915_USER_PRIORITY_SHIFT);
- intel_engine_dump(engine, &p,
- "%s\n", engine->name);
-
- err = -ETIME;
- }
-
- i915_vma_put(rq->batch);
- i915_request_put(rq);
- rq = n;
- }
-
- if (igt_live_test_end(&t))
- err = -EIO;
- if (err)
- return err;
- }
-
- return 0;
-}
-
-static struct i915_vma *
-create_gpr_user(struct intel_engine_cs *engine,
- struct i915_vma *result,
- unsigned int offset)
-{
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
- u32 *cs;
- int err;
- int i;
-
- obj = i915_gem_object_create_internal(engine->i915, 4096);
- if (IS_ERR(obj))
- return ERR_CAST(obj);
-
- vma = i915_vma_instance(obj, result->vm, NULL);
- if (IS_ERR(vma)) {
- i915_gem_object_put(obj);
- return vma;
- }
-
- err = i915_vma_pin(vma, 0, 0, PIN_USER);
- if (err) {
- i915_vma_put(vma);
- return ERR_PTR(err);
- }
-
- cs = i915_gem_object_pin_map(obj, I915_MAP_WC);
- if (IS_ERR(cs)) {
- i915_vma_put(vma);
- return ERR_CAST(cs);
- }
-
- /* All GPR are clear for new contexts. We use GPR(0) as a constant */
- *cs++ = MI_LOAD_REGISTER_IMM(1);
- *cs++ = CS_GPR(engine, 0);
- *cs++ = 1;
-
- for (i = 1; i < NUM_GPR; i++) {
- u64 addr;
-
- /*
- * Perform: GPR[i]++
- *
- * As we read and write into the context saved GPR[i], if
- * we restart this batch buffer from an earlier point, we
- * will repeat the increment and store a value > 1.
- */
- *cs++ = MI_MATH(4);
- *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(i));
- *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(0));
- *cs++ = MI_MATH_ADD;
- *cs++ = MI_MATH_STORE(MI_MATH_REG(i), MI_MATH_REG_ACCU);
-
- addr = result->node.start + offset + i * sizeof(*cs);
- *cs++ = MI_STORE_REGISTER_MEM_GEN8;
- *cs++ = CS_GPR(engine, 2 * i);
- *cs++ = lower_32_bits(addr);
- *cs++ = upper_32_bits(addr);
-
- *cs++ = MI_SEMAPHORE_WAIT |
- MI_SEMAPHORE_POLL |
- MI_SEMAPHORE_SAD_GTE_SDD;
- *cs++ = i;
- *cs++ = lower_32_bits(result->node.start);
- *cs++ = upper_32_bits(result->node.start);
- }
-
- *cs++ = MI_BATCH_BUFFER_END;
- i915_gem_object_flush_map(obj);
- i915_gem_object_unpin_map(obj);
-
- return vma;
-}
-
-static struct i915_vma *create_global(struct intel_gt *gt, size_t sz)
-{
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
- int err;
-
- obj = i915_gem_object_create_internal(gt->i915, sz);
- if (IS_ERR(obj))
- return ERR_CAST(obj);
-
- vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
- if (IS_ERR(vma)) {
- i915_gem_object_put(obj);
- return vma;
- }
-
- err = i915_ggtt_pin(vma, NULL, 0, 0);
- if (err) {
- i915_vma_put(vma);
- return ERR_PTR(err);
- }
-
- return vma;
-}
-
-static struct i915_request *
-create_gpr_client(struct intel_engine_cs *engine,
- struct i915_vma *global,
- unsigned int offset)
-{
- struct i915_vma *batch, *vma;
- struct intel_context *ce;
- struct i915_request *rq;
- int err;
-
- ce = intel_context_create(engine);
- if (IS_ERR(ce))
- return ERR_CAST(ce);
-
- vma = i915_vma_instance(global->obj, ce->vm, NULL);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto out_ce;
- }
-
- err = i915_vma_pin(vma, 0, 0, PIN_USER);
- if (err)
- goto out_ce;
-
- batch = create_gpr_user(engine, vma, offset);
- if (IS_ERR(batch)) {
- err = PTR_ERR(batch);
- goto out_vma;
- }
-
- rq = intel_context_create_request(ce);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto out_batch;
- }
-
- i915_vma_lock(vma);
- err = i915_request_await_object(rq, vma->obj, false);
- if (!err)
- err = i915_vma_move_to_active(vma, rq, 0);
- i915_vma_unlock(vma);
-
- i915_vma_lock(batch);
- if (!err)
- err = i915_request_await_object(rq, batch->obj, false);
- if (!err)
- err = i915_vma_move_to_active(batch, rq, 0);
- if (!err)
- err = rq->engine->emit_bb_start(rq,
- batch->node.start,
- PAGE_SIZE, 0);
- i915_vma_unlock(batch);
- i915_vma_unpin(batch);
-
- if (!err)
- i915_request_get(rq);
- i915_request_add(rq);
-
-out_batch:
- i915_vma_put(batch);
-out_vma:
- i915_vma_unpin(vma);
-out_ce:
- intel_context_put(ce);
- return err ? ERR_PTR(err) : rq;
-}
-
-static int preempt_user(struct intel_engine_cs *engine,
- struct i915_vma *global,
- int id)
-{
- struct i915_sched_attr attr = {
- .priority = I915_PRIORITY_MAX
- };
- struct i915_request *rq;
- int err = 0;
- u32 *cs;
-
- rq = intel_engine_create_kernel_request(engine);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
-
- cs = intel_ring_begin(rq, 4);
- if (IS_ERR(cs)) {
- i915_request_add(rq);
- return PTR_ERR(cs);
- }
-
- *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
- *cs++ = i915_ggtt_offset(global);
- *cs++ = 0;
- *cs++ = id;
-
- intel_ring_advance(rq, cs);
-
- i915_request_get(rq);
- i915_request_add(rq);
-
- engine->schedule(rq, &attr);
-
- if (i915_request_wait(rq, 0, HZ / 2) < 0)
- err = -ETIME;
- i915_request_put(rq);
-
- return err;
-}
-
-static int live_preempt_user(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_engine_cs *engine;
- struct i915_vma *global;
- enum intel_engine_id id;
- u32 *result;
- int err = 0;
-
- if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
- return 0;
-
- /*
- * In our other tests, we look at preemption in carefully
- * controlled conditions in the ringbuffer. Since most of the
- * time is spent in user batches, most of our preemptions naturally
- * occur there. We want to verify that when we preempt inside a batch
- * we continue on from the current instruction and do not roll back
- * to the start, or another earlier arbitration point.
- *
- * To verify this, we create a batch which is a mixture of
- * MI_MATH (gpr++) MI_SRM (gpr) and preemption points. Then with
- * a few preempting contexts thrown into the mix, we look for any
- * repeated instructions (which show up as incorrect values).
- */
-
- global = create_global(gt, 4096);
- if (IS_ERR(global))
- return PTR_ERR(global);
-
- result = i915_gem_object_pin_map(global->obj, I915_MAP_WC);
- if (IS_ERR(result)) {
- i915_vma_unpin_and_release(&global, 0);
- return PTR_ERR(result);
- }
-
- for_each_engine(engine, gt, id) {
- struct i915_request *client[3] = {};
- struct igt_live_test t;
- int i;
-
- if (!intel_engine_has_preemption(engine))
- continue;
-
- if (IS_GEN(gt->i915, 8) && engine->class != RENDER_CLASS)
- continue; /* we need per-context GPR */
-
- if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
- err = -EIO;
- break;
- }
-
- memset(result, 0, 4096);
-
- for (i = 0; i < ARRAY_SIZE(client); i++) {
- struct i915_request *rq;
-
- rq = create_gpr_client(engine, global,
- NUM_GPR * i * sizeof(u32));
- if (IS_ERR(rq))
- goto end_test;
-
- client[i] = rq;
- }
-
- /* Continuously preempt the set of 3 running contexts */
- for (i = 1; i <= NUM_GPR; i++) {
- err = preempt_user(engine, global, i);
- if (err)
- goto end_test;
- }
-
- if (READ_ONCE(result[0]) != NUM_GPR) {
- pr_err("%s: Failed to release semaphore\n",
- engine->name);
- err = -EIO;
- goto end_test;
- }
-
- for (i = 0; i < ARRAY_SIZE(client); i++) {
- int gpr;
-
- if (i915_request_wait(client[i], 0, HZ / 2) < 0) {
- err = -ETIME;
- goto end_test;
- }
-
- for (gpr = 1; gpr < NUM_GPR; gpr++) {
- if (result[NUM_GPR * i + gpr] != 1) {
- pr_err("%s: Invalid result, client %d, gpr %d, result: %d\n",
- engine->name,
- i, gpr, result[NUM_GPR * i + gpr]);
- err = -EINVAL;
- goto end_test;
- }
- }
- }
-
-end_test:
- for (i = 0; i < ARRAY_SIZE(client); i++) {
- if (!client[i])
- break;
-
- i915_request_put(client[i]);
- }
-
- /* Flush the semaphores on error */
- smp_store_mb(result[0], -1);
- if (igt_live_test_end(&t))
- err = -EIO;
- if (err)
- break;
- }
-
- i915_vma_unpin_and_release(&global, I915_VMA_RELEASE_MAP);
- return err;
-}
-
-static int live_preempt_timeout(void *arg)
-{
- struct intel_gt *gt = arg;
- struct i915_gem_context *ctx_hi, *ctx_lo;
- struct igt_spinner spin_lo;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- int err = -ENOMEM;
-
- /*
- * Check that we force preemption to occur by cancelling the previous
- * context if it refuses to yield the GPU.
- */
- if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT))
- return 0;
-
- if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
- return 0;
-
- if (!intel_has_reset_engine(gt))
- return 0;
-
- if (igt_spinner_init(&spin_lo, gt))
- return -ENOMEM;
-
- ctx_hi = kernel_context(gt->i915);
- if (!ctx_hi)
- goto err_spin_lo;
- ctx_hi->sched.priority =
- I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
-
- ctx_lo = kernel_context(gt->i915);
- if (!ctx_lo)
- goto err_ctx_hi;
- ctx_lo->sched.priority =
- I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
-
- for_each_engine(engine, gt, id) {
- unsigned long saved_timeout;
- struct i915_request *rq;
-
- if (!intel_engine_has_preemption(engine))
- continue;
-
- rq = spinner_create_request(&spin_lo, ctx_lo, engine,
- MI_NOOP); /* preemption disabled */
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto err_ctx_lo;
- }
-
- i915_request_add(rq);
- if (!igt_wait_for_spinner(&spin_lo, rq)) {
- intel_gt_set_wedged(gt);
- err = -EIO;
- goto err_ctx_lo;
- }
-
- rq = igt_request_alloc(ctx_hi, engine);
- if (IS_ERR(rq)) {
- igt_spinner_end(&spin_lo);
- err = PTR_ERR(rq);
- goto err_ctx_lo;
- }
-
- /* Flush the previous CS ack before changing timeouts */
- while (READ_ONCE(engine->execlists.pending[0]))
- cpu_relax();
-
- saved_timeout = engine->props.preempt_timeout_ms;
- engine->props.preempt_timeout_ms = 1; /* in ms, -> 1 jiffie */
-
- i915_request_get(rq);
- i915_request_add(rq);
-
- intel_engine_flush_submission(engine);
- engine->props.preempt_timeout_ms = saved_timeout;
-
- if (i915_request_wait(rq, 0, HZ / 10) < 0) {
- intel_gt_set_wedged(gt);
- i915_request_put(rq);
- err = -ETIME;
- goto err_ctx_lo;
- }
-
- igt_spinner_end(&spin_lo);
- i915_request_put(rq);
- }
-
- err = 0;
-err_ctx_lo:
- kernel_context_close(ctx_lo);
-err_ctx_hi:
- kernel_context_close(ctx_hi);
-err_spin_lo:
- igt_spinner_fini(&spin_lo);
- return err;
-}
-
-static int random_range(struct rnd_state *rnd, int min, int max)
-{
- return i915_prandom_u32_max_state(max - min, rnd) + min;
-}
-
-static int random_priority(struct rnd_state *rnd)
-{
- return random_range(rnd, I915_PRIORITY_MIN, I915_PRIORITY_MAX);
-}
-
-struct preempt_smoke {
- struct intel_gt *gt;
- struct i915_gem_context **contexts;
- struct intel_engine_cs *engine;
- struct drm_i915_gem_object *batch;
- unsigned int ncontext;
- struct rnd_state prng;
- unsigned long count;
-};
-
-static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke)
-{
- return smoke->contexts[i915_prandom_u32_max_state(smoke->ncontext,
- &smoke->prng)];
-}
-
-static int smoke_submit(struct preempt_smoke *smoke,
- struct i915_gem_context *ctx, int prio,
- struct drm_i915_gem_object *batch)
-{
- struct i915_request *rq;
- struct i915_vma *vma = NULL;
- int err = 0;
-
- if (batch) {
- struct i915_address_space *vm;
-
- vm = i915_gem_context_get_vm_rcu(ctx);
- vma = i915_vma_instance(batch, vm, NULL);
- i915_vm_put(vm);
- if (IS_ERR(vma))
- return PTR_ERR(vma);
-
- err = i915_vma_pin(vma, 0, 0, PIN_USER);
- if (err)
- return err;
- }
-
- ctx->sched.priority = prio;
-
- rq = igt_request_alloc(ctx, smoke->engine);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto unpin;
- }
-
- if (vma) {
- i915_vma_lock(vma);
- err = i915_request_await_object(rq, vma->obj, false);
- if (!err)
- err = i915_vma_move_to_active(vma, rq, 0);
- if (!err)
- err = rq->engine->emit_bb_start(rq,
- vma->node.start,
- PAGE_SIZE, 0);
- i915_vma_unlock(vma);
- }
-
- i915_request_add(rq);
-
-unpin:
- if (vma)
- i915_vma_unpin(vma);
-
- return err;
-}
-
-static int smoke_crescendo_thread(void *arg)
-{
- struct preempt_smoke *smoke = arg;
- IGT_TIMEOUT(end_time);
- unsigned long count;
-
- count = 0;
- do {
- struct i915_gem_context *ctx = smoke_context(smoke);
- int err;
-
- err = smoke_submit(smoke,
- ctx, count % I915_PRIORITY_MAX,
- smoke->batch);
- if (err)
- return err;
-
- count++;
- } while (count < smoke->ncontext && !__igt_timeout(end_time, NULL));
-
- smoke->count = count;
- return 0;
-}
-
-static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
-#define BATCH BIT(0)
-{
- struct task_struct *tsk[I915_NUM_ENGINES] = {};
- struct preempt_smoke arg[I915_NUM_ENGINES];
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- unsigned long count;
- int err = 0;
-
- for_each_engine(engine, smoke->gt, id) {
- arg[id] = *smoke;
- arg[id].engine = engine;
- if (!(flags & BATCH))
- arg[id].batch = NULL;
- arg[id].count = 0;
-
- tsk[id] = kthread_run(smoke_crescendo_thread, &arg,
- "igt/smoke:%d", id);
- if (IS_ERR(tsk[id])) {
- err = PTR_ERR(tsk[id]);
- break;
- }
- get_task_struct(tsk[id]);
- }
-
- yield(); /* start all threads before we kthread_stop() */
-
- count = 0;
- for_each_engine(engine, smoke->gt, id) {
- int status;
-
- if (IS_ERR_OR_NULL(tsk[id]))
- continue;
-
- status = kthread_stop(tsk[id]);
- if (status && !err)
- err = status;
-
- count += arg[id].count;
-
- put_task_struct(tsk[id]);
- }
-
- pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
- count, flags, smoke->gt->info.num_engines, smoke->ncontext);
- return 0;
-}
-
-static int smoke_random(struct preempt_smoke *smoke, unsigned int flags)
-{
- enum intel_engine_id id;
- IGT_TIMEOUT(end_time);
- unsigned long count;
-
- count = 0;
- do {
- for_each_engine(smoke->engine, smoke->gt, id) {
- struct i915_gem_context *ctx = smoke_context(smoke);
- int err;
-
- err = smoke_submit(smoke,
- ctx, random_priority(&smoke->prng),
- flags & BATCH ? smoke->batch : NULL);
- if (err)
- return err;
-
- count++;
- }
- } while (count < smoke->ncontext && !__igt_timeout(end_time, NULL));
-
- pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n",
- count, flags, smoke->gt->info.num_engines, smoke->ncontext);
- return 0;
-}
-
-static int live_preempt_smoke(void *arg)
-{
- struct preempt_smoke smoke = {
- .gt = arg,
- .prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed),
- .ncontext = 256,
- };
- const unsigned int phase[] = { 0, BATCH };
- struct igt_live_test t;
- int err = -ENOMEM;
- u32 *cs;
- int n;
-
- if (!HAS_LOGICAL_RING_PREEMPTION(smoke.gt->i915))
- return 0;
-
- smoke.contexts = kmalloc_array(smoke.ncontext,
- sizeof(*smoke.contexts),
- GFP_KERNEL);
- if (!smoke.contexts)
- return -ENOMEM;
-
- smoke.batch =
- i915_gem_object_create_internal(smoke.gt->i915, PAGE_SIZE);
- if (IS_ERR(smoke.batch)) {
- err = PTR_ERR(smoke.batch);
- goto err_free;
- }
-
- cs = i915_gem_object_pin_map(smoke.batch, I915_MAP_WB);
- if (IS_ERR(cs)) {
- err = PTR_ERR(cs);
- goto err_batch;
- }
- for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++)
- cs[n] = MI_ARB_CHECK;
- cs[n] = MI_BATCH_BUFFER_END;
- i915_gem_object_flush_map(smoke.batch);
- i915_gem_object_unpin_map(smoke.batch);
-
- if (igt_live_test_begin(&t, smoke.gt->i915, __func__, "all")) {
- err = -EIO;
- goto err_batch;
- }
-
- for (n = 0; n < smoke.ncontext; n++) {
- smoke.contexts[n] = kernel_context(smoke.gt->i915);
- if (!smoke.contexts[n])
- goto err_ctx;
- }
-
- for (n = 0; n < ARRAY_SIZE(phase); n++) {
- err = smoke_crescendo(&smoke, phase[n]);
- if (err)
- goto err_ctx;
-
- err = smoke_random(&smoke, phase[n]);
- if (err)
- goto err_ctx;
- }
-
-err_ctx:
- if (igt_live_test_end(&t))
- err = -EIO;
-
- for (n = 0; n < smoke.ncontext; n++) {
- if (!smoke.contexts[n])
- break;
- kernel_context_close(smoke.contexts[n]);
- }
-
-err_batch:
- i915_gem_object_put(smoke.batch);
-err_free:
- kfree(smoke.contexts);
-
- return err;
-}
-
-static int nop_virtual_engine(struct intel_gt *gt,
- struct intel_engine_cs **siblings,
- unsigned int nsibling,
- unsigned int nctx,
- unsigned int flags)
-#define CHAIN BIT(0)
-{
- IGT_TIMEOUT(end_time);
- struct i915_request *request[16] = {};
- struct intel_context *ve[16];
- unsigned long n, prime, nc;
- struct igt_live_test t;
- ktime_t times[2] = {};
- int err;
-
- GEM_BUG_ON(!nctx || nctx > ARRAY_SIZE(ve));
-
- for (n = 0; n < nctx; n++) {
- ve[n] = intel_execlists_create_virtual(siblings, nsibling);
- if (IS_ERR(ve[n])) {
- err = PTR_ERR(ve[n]);
- nctx = n;
- goto out;
- }
-
- err = intel_context_pin(ve[n]);
- if (err) {
- intel_context_put(ve[n]);
- nctx = n;
- goto out;
- }
- }
-
- err = igt_live_test_begin(&t, gt->i915, __func__, ve[0]->engine->name);
- if (err)
- goto out;
-
- for_each_prime_number_from(prime, 1, 8192) {
- times[1] = ktime_get_raw();
-
- if (flags & CHAIN) {
- for (nc = 0; nc < nctx; nc++) {
- for (n = 0; n < prime; n++) {
- struct i915_request *rq;
-
- rq = i915_request_create(ve[nc]);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto out;
- }
-
- if (request[nc])
- i915_request_put(request[nc]);
- request[nc] = i915_request_get(rq);
- i915_request_add(rq);
- }
- }
- } else {
- for (n = 0; n < prime; n++) {
- for (nc = 0; nc < nctx; nc++) {
- struct i915_request *rq;
-
- rq = i915_request_create(ve[nc]);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto out;
- }
-
- if (request[nc])
- i915_request_put(request[nc]);
- request[nc] = i915_request_get(rq);
- i915_request_add(rq);
- }
- }
- }
-
- for (nc = 0; nc < nctx; nc++) {
- if (i915_request_wait(request[nc], 0, HZ / 10) < 0) {
- pr_err("%s(%s): wait for %llx:%lld timed out\n",
- __func__, ve[0]->engine->name,
- request[nc]->fence.context,
- request[nc]->fence.seqno);
-
- GEM_TRACE("%s(%s) failed at request %llx:%lld\n",
- __func__, ve[0]->engine->name,
- request[nc]->fence.context,
- request[nc]->fence.seqno);
- GEM_TRACE_DUMP();
- intel_gt_set_wedged(gt);
- break;
- }
- }
-
- times[1] = ktime_sub(ktime_get_raw(), times[1]);
- if (prime == 1)
- times[0] = times[1];
-
- for (nc = 0; nc < nctx; nc++) {
- i915_request_put(request[nc]);
- request[nc] = NULL;
- }
-
- if (__igt_timeout(end_time, NULL))
- break;
- }
-
- err = igt_live_test_end(&t);
- if (err)
- goto out;
-
- pr_info("Requestx%d latencies on %s: 1 = %lluns, %lu = %lluns\n",
- nctx, ve[0]->engine->name, ktime_to_ns(times[0]),
- prime, div64_u64(ktime_to_ns(times[1]), prime));
-
-out:
- if (igt_flush_test(gt->i915))
- err = -EIO;
-
- for (nc = 0; nc < nctx; nc++) {
- i915_request_put(request[nc]);
- intel_context_unpin(ve[nc]);
- intel_context_put(ve[nc]);
- }
- return err;
-}
-
-static unsigned int
-__select_siblings(struct intel_gt *gt,
- unsigned int class,
- struct intel_engine_cs **siblings,
- bool (*filter)(const struct intel_engine_cs *))
-{
- unsigned int n = 0;
- unsigned int inst;
-
- for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
- if (!gt->engine_class[class][inst])
- continue;
-
- if (filter && !filter(gt->engine_class[class][inst]))
- continue;
-
- siblings[n++] = gt->engine_class[class][inst];
- }
-
- return n;
-}
-
-static unsigned int
-select_siblings(struct intel_gt *gt,
- unsigned int class,
- struct intel_engine_cs **siblings)
-{
- return __select_siblings(gt, class, siblings, NULL);
-}
-
-static int live_virtual_engine(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- unsigned int class;
- int err;
-
- if (intel_uc_uses_guc_submission(&gt->uc))
- return 0;
-
- for_each_engine(engine, gt, id) {
- err = nop_virtual_engine(gt, &engine, 1, 1, 0);
- if (err) {
- pr_err("Failed to wrap engine %s: err=%d\n",
- engine->name, err);
- return err;
- }
- }
-
- for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
- int nsibling, n;
-
- nsibling = select_siblings(gt, class, siblings);
- if (nsibling < 2)
- continue;
-
- for (n = 1; n <= nsibling + 1; n++) {
- err = nop_virtual_engine(gt, siblings, nsibling,
- n, 0);
- if (err)
- return err;
- }
-
- err = nop_virtual_engine(gt, siblings, nsibling, n, CHAIN);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-static int mask_virtual_engine(struct intel_gt *gt,
- struct intel_engine_cs **siblings,
- unsigned int nsibling)
-{
- struct i915_request *request[MAX_ENGINE_INSTANCE + 1];
- struct intel_context *ve;
- struct igt_live_test t;
- unsigned int n;
- int err;
-
- /*
- * Check that by setting the execution mask on a request, we can
- * restrict it to our desired engine within the virtual engine.
- */
-
- ve = intel_execlists_create_virtual(siblings, nsibling);
- if (IS_ERR(ve)) {
- err = PTR_ERR(ve);
- goto out_close;
- }
-
- err = intel_context_pin(ve);
- if (err)
- goto out_put;
-
- err = igt_live_test_begin(&t, gt->i915, __func__, ve->engine->name);
- if (err)
- goto out_unpin;
-
- for (n = 0; n < nsibling; n++) {
- request[n] = i915_request_create(ve);
- if (IS_ERR(request[n])) {
- err = PTR_ERR(request[n]);
- nsibling = n;
- goto out;
- }
-
- /* Reverse order as it's more likely to be unnatural */
- request[n]->execution_mask = siblings[nsibling - n - 1]->mask;
-
- i915_request_get(request[n]);
- i915_request_add(request[n]);
- }
-
- for (n = 0; n < nsibling; n++) {
- if (i915_request_wait(request[n], 0, HZ / 10) < 0) {
- pr_err("%s(%s): wait for %llx:%lld timed out\n",
- __func__, ve->engine->name,
- request[n]->fence.context,
- request[n]->fence.seqno);
-
- GEM_TRACE("%s(%s) failed at request %llx:%lld\n",
- __func__, ve->engine->name,
- request[n]->fence.context,
- request[n]->fence.seqno);
- GEM_TRACE_DUMP();
- intel_gt_set_wedged(gt);
- err = -EIO;
- goto out;
- }
-
- if (request[n]->engine != siblings[nsibling - n - 1]) {
- pr_err("Executed on wrong sibling '%s', expected '%s'\n",
- request[n]->engine->name,
- siblings[nsibling - n - 1]->name);
- err = -EINVAL;
- goto out;
- }
- }
-
- err = igt_live_test_end(&t);
-out:
- if (igt_flush_test(gt->i915))
- err = -EIO;
-
- for (n = 0; n < nsibling; n++)
- i915_request_put(request[n]);
-
-out_unpin:
- intel_context_unpin(ve);
-out_put:
- intel_context_put(ve);
-out_close:
- return err;
-}
-
-static int live_virtual_mask(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
- unsigned int class;
- int err;
-
- if (intel_uc_uses_guc_submission(&gt->uc))
- return 0;
-
- for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
- unsigned int nsibling;
-
- nsibling = select_siblings(gt, class, siblings);
- if (nsibling < 2)
- continue;
-
- err = mask_virtual_engine(gt, siblings, nsibling);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-static int slicein_virtual_engine(struct intel_gt *gt,
- struct intel_engine_cs **siblings,
- unsigned int nsibling)
-{
- const long timeout = slice_timeout(siblings[0]);
- struct intel_context *ce;
- struct i915_request *rq;
- struct igt_spinner spin;
- unsigned int n;
- int err = 0;
-
- /*
- * Virtual requests must take part in timeslicing on the target engines.
- */
-
- if (igt_spinner_init(&spin, gt))
- return -ENOMEM;
-
- for (n = 0; n < nsibling; n++) {
- ce = intel_context_create(siblings[n]);
- if (IS_ERR(ce)) {
- err = PTR_ERR(ce);
- goto out;
- }
-
- rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
- intel_context_put(ce);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto out;
- }
-
- i915_request_add(rq);
- }
-
- ce = intel_execlists_create_virtual(siblings, nsibling);
- if (IS_ERR(ce)) {
- err = PTR_ERR(ce);
- goto out;
- }
-
- rq = intel_context_create_request(ce);
- intel_context_put(ce);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto out;
- }
-
- i915_request_get(rq);
- i915_request_add(rq);
- if (i915_request_wait(rq, 0, timeout) < 0) {
- GEM_TRACE_ERR("%s(%s) failed to slice in virtual request\n",
- __func__, rq->engine->name);
- GEM_TRACE_DUMP();
- intel_gt_set_wedged(gt);
- err = -EIO;
- }
- i915_request_put(rq);
-
-out:
- igt_spinner_end(&spin);
- if (igt_flush_test(gt->i915))
- err = -EIO;
- igt_spinner_fini(&spin);
- return err;
-}
-
-static int sliceout_virtual_engine(struct intel_gt *gt,
- struct intel_engine_cs **siblings,
- unsigned int nsibling)
-{
- const long timeout = slice_timeout(siblings[0]);
- struct intel_context *ce;
- struct i915_request *rq;
- struct igt_spinner spin;
- unsigned int n;
- int err = 0;
-
- /*
- * Virtual requests must allow others a fair timeslice.
- */
-
- if (igt_spinner_init(&spin, gt))
- return -ENOMEM;
-
- /* XXX We do not handle oversubscription and fairness with normal rq */
- for (n = 0; n < nsibling; n++) {
- ce = intel_execlists_create_virtual(siblings, nsibling);
- if (IS_ERR(ce)) {
- err = PTR_ERR(ce);
- goto out;
- }
-
- rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
- intel_context_put(ce);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto out;
- }
-
- i915_request_add(rq);
- }
-
- for (n = 0; !err && n < nsibling; n++) {
- ce = intel_context_create(siblings[n]);
- if (IS_ERR(ce)) {
- err = PTR_ERR(ce);
- goto out;
- }
-
- rq = intel_context_create_request(ce);
- intel_context_put(ce);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto out;
- }
-
- i915_request_get(rq);
- i915_request_add(rq);
- if (i915_request_wait(rq, 0, timeout) < 0) {
- GEM_TRACE_ERR("%s(%s) failed to slice out virtual request\n",
- __func__, siblings[n]->name);
- GEM_TRACE_DUMP();
- intel_gt_set_wedged(gt);
- err = -EIO;
- }
- i915_request_put(rq);
- }
-
-out:
- igt_spinner_end(&spin);
- if (igt_flush_test(gt->i915))
- err = -EIO;
- igt_spinner_fini(&spin);
- return err;
-}
-
-static int live_virtual_slice(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
- unsigned int class;
- int err;
-
- if (intel_uc_uses_guc_submission(&gt->uc))
- return 0;
-
- for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
- unsigned int nsibling;
-
- nsibling = __select_siblings(gt, class, siblings,
- intel_engine_has_timeslices);
- if (nsibling < 2)
- continue;
-
- err = slicein_virtual_engine(gt, siblings, nsibling);
- if (err)
- return err;
-
- err = sliceout_virtual_engine(gt, siblings, nsibling);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-static int preserved_virtual_engine(struct intel_gt *gt,
- struct intel_engine_cs **siblings,
- unsigned int nsibling)
-{
- struct i915_request *last = NULL;
- struct intel_context *ve;
- struct i915_vma *scratch;
- struct igt_live_test t;
- unsigned int n;
- int err = 0;
- u32 *cs;
-
- scratch = create_scratch(siblings[0]->gt);
- if (IS_ERR(scratch))
- return PTR_ERR(scratch);
-
- err = i915_vma_sync(scratch);
- if (err)
- goto out_scratch;
-
- ve = intel_execlists_create_virtual(siblings, nsibling);
- if (IS_ERR(ve)) {
- err = PTR_ERR(ve);
- goto out_scratch;
- }
-
- err = intel_context_pin(ve);
- if (err)
- goto out_put;
-
- err = igt_live_test_begin(&t, gt->i915, __func__, ve->engine->name);
- if (err)
- goto out_unpin;
-
- for (n = 0; n < NUM_GPR_DW; n++) {
- struct intel_engine_cs *engine = siblings[n % nsibling];
- struct i915_request *rq;
-
- rq = i915_request_create(ve);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto out_end;
- }
-
- i915_request_put(last);
- last = i915_request_get(rq);
-
- cs = intel_ring_begin(rq, 8);
- if (IS_ERR(cs)) {
- i915_request_add(rq);
- err = PTR_ERR(cs);
- goto out_end;
- }
-
- *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
- *cs++ = CS_GPR(engine, n);
- *cs++ = i915_ggtt_offset(scratch) + n * sizeof(u32);
- *cs++ = 0;
-
- *cs++ = MI_LOAD_REGISTER_IMM(1);
- *cs++ = CS_GPR(engine, (n + 1) % NUM_GPR_DW);
- *cs++ = n + 1;
-
- *cs++ = MI_NOOP;
- intel_ring_advance(rq, cs);
-
- /* Restrict this request to run on a particular engine */
- rq->execution_mask = engine->mask;
- i915_request_add(rq);
- }
-
- if (i915_request_wait(last, 0, HZ / 5) < 0) {
- err = -ETIME;
- goto out_end;
- }
-
- cs = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
- if (IS_ERR(cs)) {
- err = PTR_ERR(cs);
- goto out_end;
- }
-
- for (n = 0; n < NUM_GPR_DW; n++) {
- if (cs[n] != n) {
- pr_err("Incorrect value[%d] found for GPR[%d]\n",
- cs[n], n);
- err = -EINVAL;
- break;
- }
- }
-
- i915_gem_object_unpin_map(scratch->obj);
-
-out_end:
- if (igt_live_test_end(&t))
- err = -EIO;
- i915_request_put(last);
-out_unpin:
- intel_context_unpin(ve);
-out_put:
- intel_context_put(ve);
-out_scratch:
- i915_vma_unpin_and_release(&scratch, 0);
- return err;
-}
-
-static int live_virtual_preserved(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
- unsigned int class;
-
- /*
- * Check that the context image retains non-privileged (user) registers
- * from one engine to the next. For this we check that the CS_GPR
- * are preserved.
- */
-
- if (intel_uc_uses_guc_submission(&gt->uc))
- return 0;
-
- /* As we use CS_GPR we cannot run before they existed on all engines. */
- if (INTEL_GEN(gt->i915) < 9)
- return 0;
-
- for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
- int nsibling, err;
-
- nsibling = select_siblings(gt, class, siblings);
- if (nsibling < 2)
- continue;
-
- err = preserved_virtual_engine(gt, siblings, nsibling);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-static int bond_virtual_engine(struct intel_gt *gt,
- unsigned int class,
- struct intel_engine_cs **siblings,
- unsigned int nsibling,
- unsigned int flags)
-#define BOND_SCHEDULE BIT(0)
-{
- struct intel_engine_cs *master;
- struct i915_request *rq[16];
- enum intel_engine_id id;
- struct igt_spinner spin;
- unsigned long n;
- int err;
-
- /*
- * A set of bonded requests is intended to be run concurrently
- * across a number of engines. We use one request per-engine
- * and a magic fence to schedule each of the bonded requests
- * at the same time. A consequence of our current scheduler is that
- * we only move requests to the HW ready queue when the request
- * becomes ready, that is when all of its prerequisite fences have
- * been signaled. As one of those fences is the master submit fence,
- * there is a delay on all secondary fences as the HW may be
- * currently busy. Equally, as all the requests are independent,
- * they may have other fences that delay individual request
- * submission to HW. Ergo, we do not guarantee that all requests are
- * immediately submitted to HW at the same time, just that if the
- * rules are abided by, they are ready at the same time as the
- * first is submitted. Userspace can embed semaphores in its batch
- * to ensure parallel execution of its phases as it requires.
- * Though naturally it gets requested that perhaps the scheduler should
- * take care of parallel execution, even across preemption events on
- * different HW. (The proper answer is of course "lalalala".)
- *
- * With the submit-fence, we have identified three possible phases
- * of synchronisation depending on the master fence: queued (not
- * ready), executing, and signaled. The first two are quite simple
- * and checked below. However, the signaled master fence handling is
- * contentious. Currently we do not distinguish between a signaled
- * fence and an expired fence, as once signaled it does not convey
- * any information about the previous execution. It may even be freed
- * and hence checking later it may not exist at all. Ergo we currently
- * do not apply the bonding constraint for an already signaled fence,
- * as our expectation is that it should not constrain the secondaries
- * and is outside of the scope of the bonded request API (i.e. all
- * userspace requests are meant to be running in parallel). As
- * it imposes no constraint, and is effectively a no-op, we do not
- * check below as normal execution flows are checked extensively above.
- *
- * XXX Is the degenerate handling of signaled submit fences the
- * expected behaviour for userpace?
- */
-
- GEM_BUG_ON(nsibling >= ARRAY_SIZE(rq) - 1);
-
- if (igt_spinner_init(&spin, gt))
- return -ENOMEM;
-
- err = 0;
- rq[0] = ERR_PTR(-ENOMEM);
- for_each_engine(master, gt, id) {
- struct i915_sw_fence fence = {};
- struct intel_context *ce;
-
- if (master->class == class)
- continue;
-
- ce = intel_context_create(master);
- if (IS_ERR(ce)) {
- err = PTR_ERR(ce);
- goto out;
- }
-
- memset_p((void *)rq, ERR_PTR(-EINVAL), ARRAY_SIZE(rq));
-
- rq[0] = igt_spinner_create_request(&spin, ce, MI_NOOP);
- intel_context_put(ce);
- if (IS_ERR(rq[0])) {
- err = PTR_ERR(rq[0]);
- goto out;
- }
- i915_request_get(rq[0]);
-
- if (flags & BOND_SCHEDULE) {
- onstack_fence_init(&fence);
- err = i915_sw_fence_await_sw_fence_gfp(&rq[0]->submit,
- &fence,
- GFP_KERNEL);
- }
-
- i915_request_add(rq[0]);
- if (err < 0)
- goto out;
-
- if (!(flags & BOND_SCHEDULE) &&
- !igt_wait_for_spinner(&spin, rq[0])) {
- err = -EIO;
- goto out;
- }
-
- for (n = 0; n < nsibling; n++) {
- struct intel_context *ve;
-
- ve = intel_execlists_create_virtual(siblings, nsibling);
- if (IS_ERR(ve)) {
- err = PTR_ERR(ve);
- onstack_fence_fini(&fence);
- goto out;
- }
-
- err = intel_virtual_engine_attach_bond(ve->engine,
- master,
- siblings[n]);
- if (err) {
- intel_context_put(ve);
- onstack_fence_fini(&fence);
- goto out;
- }
-
- err = intel_context_pin(ve);
- intel_context_put(ve);
- if (err) {
- onstack_fence_fini(&fence);
- goto out;
- }
-
- rq[n + 1] = i915_request_create(ve);
- intel_context_unpin(ve);
- if (IS_ERR(rq[n + 1])) {
- err = PTR_ERR(rq[n + 1]);
- onstack_fence_fini(&fence);
- goto out;
- }
- i915_request_get(rq[n + 1]);
-
- err = i915_request_await_execution(rq[n + 1],
- &rq[0]->fence,
- ve->engine->bond_execute);
- i915_request_add(rq[n + 1]);
- if (err < 0) {
- onstack_fence_fini(&fence);
- goto out;
- }
- }
- onstack_fence_fini(&fence);
- intel_engine_flush_submission(master);
- igt_spinner_end(&spin);
-
- if (i915_request_wait(rq[0], 0, HZ / 10) < 0) {
- pr_err("Master request did not execute (on %s)!\n",
- rq[0]->engine->name);
- err = -EIO;
- goto out;
- }
-
- for (n = 0; n < nsibling; n++) {
- if (i915_request_wait(rq[n + 1], 0,
- MAX_SCHEDULE_TIMEOUT) < 0) {
- err = -EIO;
- goto out;
- }
-
- if (rq[n + 1]->engine != siblings[n]) {
- pr_err("Bonded request did not execute on target engine: expected %s, used %s; master was %s\n",
- siblings[n]->name,
- rq[n + 1]->engine->name,
- rq[0]->engine->name);
- err = -EINVAL;
- goto out;
- }
- }
-
- for (n = 0; !IS_ERR(rq[n]); n++)
- i915_request_put(rq[n]);
- rq[0] = ERR_PTR(-ENOMEM);
- }
-
-out:
- for (n = 0; !IS_ERR(rq[n]); n++)
- i915_request_put(rq[n]);
- if (igt_flush_test(gt->i915))
- err = -EIO;
-
- igt_spinner_fini(&spin);
- return err;
-}
-
-static int live_virtual_bond(void *arg)
-{
- static const struct phase {
- const char *name;
- unsigned int flags;
- } phases[] = {
- { "", 0 },
- { "schedule", BOND_SCHEDULE },
- { },
- };
- struct intel_gt *gt = arg;
- struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
- unsigned int class;
- int err;
-
- if (intel_uc_uses_guc_submission(&gt->uc))
- return 0;
-
- for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
- const struct phase *p;
- int nsibling;
-
- nsibling = select_siblings(gt, class, siblings);
- if (nsibling < 2)
- continue;
-
- for (p = phases; p->name; p++) {
- err = bond_virtual_engine(gt,
- class, siblings, nsibling,
- p->flags);
- if (err) {
- pr_err("%s(%s): failed class=%d, nsibling=%d, err=%d\n",
- __func__, p->name, class, nsibling, err);
- return err;
- }
- }
- }
-
- return 0;
-}
-
-static int reset_virtual_engine(struct intel_gt *gt,
- struct intel_engine_cs **siblings,
- unsigned int nsibling)
-{
- struct intel_engine_cs *engine;
- struct intel_context *ve;
- struct igt_spinner spin;
- struct i915_request *rq;
- unsigned int n;
- int err = 0;
-
- /*
- * In order to support offline error capture for fast preempt reset,
- * we need to decouple the guilty request and ensure that it and its
- * descendents are not executed while the capture is in progress.
- */
-
- if (igt_spinner_init(&spin, gt))
- return -ENOMEM;
-
- ve = intel_execlists_create_virtual(siblings, nsibling);
- if (IS_ERR(ve)) {
- err = PTR_ERR(ve);
- goto out_spin;
- }
-
- for (n = 0; n < nsibling; n++)
- st_engine_heartbeat_disable(siblings[n]);
-
- rq = igt_spinner_create_request(&spin, ve, MI_ARB_CHECK);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto out_heartbeat;
- }
- i915_request_add(rq);
-
- if (!igt_wait_for_spinner(&spin, rq)) {
- intel_gt_set_wedged(gt);
- err = -ETIME;
- goto out_heartbeat;
- }
-
- engine = rq->engine;
- GEM_BUG_ON(engine == ve->engine);
-
- /* Take ownership of the reset and tasklet */
- if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
- &gt->reset.flags)) {
- intel_gt_set_wedged(gt);
- err = -EBUSY;
- goto out_heartbeat;
- }
- tasklet_disable(&engine->execlists.tasklet);
-
- engine->execlists.tasklet.func(engine->execlists.tasklet.data);
- GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
-
- /* Fake a preemption event; failed of course */
- spin_lock_irq(&engine->active.lock);
- __unwind_incomplete_requests(engine);
- spin_unlock_irq(&engine->active.lock);
- GEM_BUG_ON(rq->engine != ve->engine);
-
- /* Reset the engine while keeping our active request on hold */
- execlists_hold(engine, rq);
- GEM_BUG_ON(!i915_request_on_hold(rq));
-
- intel_engine_reset(engine, NULL);
- GEM_BUG_ON(rq->fence.error != -EIO);
-
- /* Release our grasp on the engine, letting CS flow again */
- tasklet_enable(&engine->execlists.tasklet);
- clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id, &gt->reset.flags);
-
- /* Check that we do not resubmit the held request */
- i915_request_get(rq);
- if (!i915_request_wait(rq, 0, HZ / 5)) {
- pr_err("%s: on hold request completed!\n",
- engine->name);
- intel_gt_set_wedged(gt);
- err = -EIO;
- goto out_rq;
- }
- GEM_BUG_ON(!i915_request_on_hold(rq));
-
- /* But is resubmitted on release */
- execlists_unhold(engine, rq);
- if (i915_request_wait(rq, 0, HZ / 5) < 0) {
- pr_err("%s: held request did not complete!\n",
- engine->name);
- intel_gt_set_wedged(gt);
- err = -ETIME;
- }
-
-out_rq:
- i915_request_put(rq);
-out_heartbeat:
- for (n = 0; n < nsibling; n++)
- st_engine_heartbeat_enable(siblings[n]);
-
- intel_context_put(ve);
-out_spin:
- igt_spinner_fini(&spin);
- return err;
-}
-
-static int live_virtual_reset(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
- unsigned int class;
-
- /*
- * Check that we handle a reset event within a virtual engine.
- * Only the physical engine is reset, but we have to check the flow
- * of the virtual requests around the reset, and make sure it is not
- * forgotten.
- */
-
- if (intel_uc_uses_guc_submission(&gt->uc))
- return 0;
-
- if (!intel_has_reset_engine(gt))
- return 0;
-
- for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
- int nsibling, err;
-
- nsibling = select_siblings(gt, class, siblings);
- if (nsibling < 2)
- continue;
-
- err = reset_virtual_engine(gt, siblings, nsibling);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-int intel_execlists_live_selftests(struct drm_i915_private *i915)
-{
- static const struct i915_subtest tests[] = {
- SUBTEST(live_sanitycheck),
- SUBTEST(live_unlite_switch),
- SUBTEST(live_unlite_preempt),
- SUBTEST(live_unlite_ring),
- SUBTEST(live_pin_rewind),
- SUBTEST(live_hold_reset),
- SUBTEST(live_error_interrupt),
- SUBTEST(live_timeslice_preempt),
- SUBTEST(live_timeslice_rewind),
- SUBTEST(live_timeslice_queue),
- SUBTEST(live_timeslice_nopreempt),
- SUBTEST(live_busywait_preempt),
- SUBTEST(live_preempt),
- SUBTEST(live_late_preempt),
- SUBTEST(live_nopreempt),
- SUBTEST(live_preempt_cancel),
- SUBTEST(live_suppress_self_preempt),
- SUBTEST(live_chain_preempt),
- SUBTEST(live_preempt_ring),
- SUBTEST(live_preempt_gang),
- SUBTEST(live_preempt_timeout),
- SUBTEST(live_preempt_user),
- SUBTEST(live_preempt_smoke),
- SUBTEST(live_virtual_engine),
- SUBTEST(live_virtual_mask),
- SUBTEST(live_virtual_preserved),
- SUBTEST(live_virtual_slice),
- SUBTEST(live_virtual_bond),
- SUBTEST(live_virtual_reset),
- };
-
- if (!HAS_EXECLISTS(i915))
- return 0;
-
- if (intel_gt_is_wedged(&i915->gt))
- return 0;
-
- return intel_gt_live_subtests(tests, &i915->gt);
-}
-
static int emit_semaphore_signal(struct intel_context *ce, void *slot)
{
const u32 offset =
@@ -4775,9 +139,10 @@ static int live_lrc_layout(void *arg)
* match the layout saved by HW.
*/
- lrc = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ lrc = (u32 *)__get_free_page(GFP_KERNEL); /* requires page alignment */
if (!lrc)
return -ENOMEM;
+ GEM_BUG_ON(offset_in_page(lrc));
err = 0;
for_each_engine(engine, gt, id) {
@@ -4794,15 +159,12 @@ static int live_lrc_layout(void *arg)
}
hw += LRC_STATE_OFFSET / sizeof(*hw);
- execlists_init_reg_state(memset(lrc, POISON_INUSE, PAGE_SIZE),
- engine->kernel_context,
- engine,
- engine->kernel_context->ring,
- true);
+ __lrc_init_regs(memset(lrc, POISON_INUSE, PAGE_SIZE),
+ engine->kernel_context, engine, true);
dw = 0;
do {
- u32 lri = hw[dw];
+ u32 lri = READ_ONCE(hw[dw]);
if (lri == 0) {
dw++;
@@ -4835,9 +197,11 @@ static int live_lrc_layout(void *arg)
dw++;
while (lri) {
- if (hw[dw] != lrc[dw]) {
+ u32 offset = READ_ONCE(hw[dw]);
+
+ if (offset != lrc[dw]) {
pr_err("%s: Different registers found at dword %d, expected %x, found %x\n",
- engine->name, dw, hw[dw], lrc[dw]);
+ engine->name, dw, offset, lrc[dw]);
err = -EINVAL;
break;
}
@@ -4849,7 +213,7 @@ static int live_lrc_layout(void *arg)
dw += 2;
lri -= 2;
}
- } while ((lrc[dw] & ~BIT(0)) != MI_BATCH_BUFFER_END);
+ } while (!err && (lrc[dw] & ~BIT(0)) != MI_BATCH_BUFFER_END);
if (err) {
pr_info("%s: HW register image:\n", engine->name);
@@ -4864,7 +228,7 @@ static int live_lrc_layout(void *arg)
break;
}
- kfree(lrc);
+ free_page((unsigned long)lrc);
return err;
}
@@ -5249,6 +613,10 @@ static int __live_lrc_gpr(struct intel_engine_cs *engine,
err = emit_semaphore_signal(engine->kernel_context, slot);
if (err)
goto err_rq;
+
+ err = wait_for_submit(engine, rq, HZ / 2);
+ if (err)
+ goto err_rq;
} else {
slot[0] = 1;
wmb();
@@ -6242,16 +1610,17 @@ static void garbage_reset(struct intel_engine_cs *engine,
const unsigned int bit = I915_RESET_ENGINE + engine->id;
unsigned long *lock = &engine->gt->reset.flags;
- if (test_and_set_bit(bit, lock))
- return;
-
- tasklet_disable(&engine->execlists.tasklet);
+ local_bh_disable();
+ if (!test_and_set_bit(bit, lock)) {
+ tasklet_disable(&engine->execlists.tasklet);
- if (!rq->fence.error)
- intel_engine_reset(engine, NULL);
+ if (!rq->fence.error)
+ __intel_engine_reset_bh(engine, NULL);
- tasklet_enable(&engine->execlists.tasklet);
- clear_and_wake_up_bit(bit, lock);
+ tasklet_enable(&engine->execlists.tasklet);
+ clear_and_wake_up_bit(bit, lock);
+ }
+ local_bh_enable();
}
static struct i915_request *garbage(struct intel_context *ce,
diff --git a/drivers/gpu/drm/i915/gt/selftest_mocs.c b/drivers/gpu/drm/i915/gt/selftest_mocs.c
index b25eba50c88e..cf373c72359e 100644
--- a/drivers/gpu/drm/i915/gt/selftest_mocs.c
+++ b/drivers/gpu/drm/i915/gt/selftest_mocs.c
@@ -5,6 +5,7 @@
*/
#include "gt/intel_engine_pm.h"
+#include "gt/intel_gpu_commands.h"
#include "i915_selftest.h"
#include "gem/selftests/mock_context.h"
@@ -56,33 +57,6 @@ static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin)
return err;
}
-static struct i915_vma *create_scratch(struct intel_gt *gt)
-{
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
- int err;
-
- obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
- if (IS_ERR(obj))
- return ERR_CAST(obj);
-
- i915_gem_object_set_cache_coherency(obj, I915_CACHING_CACHED);
-
- vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
- if (IS_ERR(vma)) {
- i915_gem_object_put(obj);
- return vma;
- }
-
- err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
- if (err) {
- i915_gem_object_put(obj);
- return ERR_PTR(err);
- }
-
- return vma;
-}
-
static int live_mocs_init(struct live_mocs *arg, struct intel_gt *gt)
{
struct drm_i915_mocs_table table;
@@ -101,7 +75,7 @@ static int live_mocs_init(struct live_mocs *arg, struct intel_gt *gt)
if (flags & (HAS_GLOBAL_MOCS | HAS_ENGINE_MOCS))
arg->mocs = table;
- arg->scratch = create_scratch(gt);
+ arg->scratch = __vm_create_scratch_for_read(&gt->ggtt->vm, PAGE_SIZE);
if (IS_ERR(arg->scratch))
return PTR_ERR(arg->scratch);
@@ -125,7 +99,7 @@ static void live_mocs_fini(struct live_mocs *arg)
static int read_regs(struct i915_request *rq,
u32 addr, unsigned int count,
- uint32_t *offset)
+ u32 *offset)
{
unsigned int i;
u32 *cs;
@@ -153,7 +127,7 @@ static int read_regs(struct i915_request *rq,
static int read_mocs_table(struct i915_request *rq,
const struct drm_i915_mocs_table *table,
- uint32_t *offset)
+ u32 *offset)
{
u32 addr;
@@ -167,7 +141,7 @@ static int read_mocs_table(struct i915_request *rq,
static int read_l3cc_table(struct i915_request *rq,
const struct drm_i915_mocs_table *table,
- uint32_t *offset)
+ u32 *offset)
{
u32 addr = i915_mmio_reg_offset(GEN9_LNCFCMOCS(0));
@@ -176,7 +150,7 @@ static int read_l3cc_table(struct i915_request *rq,
static int check_mocs_table(struct intel_engine_cs *engine,
const struct drm_i915_mocs_table *table,
- uint32_t **vaddr)
+ u32 **vaddr)
{
unsigned int i;
u32 expect;
@@ -205,7 +179,7 @@ static bool mcr_range(struct drm_i915_private *i915, u32 offset)
static int check_l3cc_table(struct intel_engine_cs *engine,
const struct drm_i915_mocs_table *table,
- uint32_t **vaddr)
+ u32 **vaddr)
{
/* Can we read the MCR range 0xb00 directly? See intel_workarounds! */
u32 reg = i915_mmio_reg_offset(GEN9_LNCFCMOCS(0));
@@ -361,29 +335,34 @@ static int active_engine_reset(struct intel_context *ce,
static int __live_mocs_reset(struct live_mocs *mocs,
struct intel_context *ce)
{
+ struct intel_gt *gt = ce->engine->gt;
int err;
- err = intel_engine_reset(ce->engine, "mocs");
- if (err)
- return err;
+ if (intel_has_reset_engine(gt)) {
+ err = intel_engine_reset(ce->engine, "mocs");
+ if (err)
+ return err;
- err = check_mocs_engine(mocs, ce);
- if (err)
- return err;
+ err = check_mocs_engine(mocs, ce);
+ if (err)
+ return err;
- err = active_engine_reset(ce, "mocs");
- if (err)
- return err;
+ err = active_engine_reset(ce, "mocs");
+ if (err)
+ return err;
- err = check_mocs_engine(mocs, ce);
- if (err)
- return err;
+ err = check_mocs_engine(mocs, ce);
+ if (err)
+ return err;
+ }
- intel_gt_reset(ce->engine->gt, ce->engine->mask, "mocs");
+ if (intel_has_gpu_reset(gt)) {
+ intel_gt_reset(gt, ce->engine->mask, "mocs");
- err = check_mocs_engine(mocs, ce);
- if (err)
- return err;
+ err = check_mocs_engine(mocs, ce);
+ if (err)
+ return err;
+ }
return 0;
}
@@ -398,9 +377,6 @@ static int live_mocs_reset(void *arg)
/* Check the mocs setup is retained over per-engine and global resets */
- if (!intel_has_reset_engine(gt))
- return 0;
-
err = live_mocs_init(&mocs, gt);
if (err)
return err;
diff --git a/drivers/gpu/drm/i915/gt/selftest_rc6.c b/drivers/gpu/drm/i915/gt/selftest_rc6.c
index 64ef5ee5decf..61abc0556601 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rc6.c
+++ b/drivers/gpu/drm/i915/gt/selftest_rc6.c
@@ -6,6 +6,7 @@
#include "intel_context.h"
#include "intel_engine_pm.h"
+#include "intel_gpu_commands.h"
#include "intel_gt_requests.h"
#include "intel_ring.h"
#include "selftest_rc6.h"
diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c
index ef5aeebbeeb0..8784257ec808 100644
--- a/drivers/gpu/drm/i915/gt/selftest_reset.c
+++ b/drivers/gpu/drm/i915/gt/selftest_reset.c
@@ -9,6 +9,7 @@
#include "i915_memcpy.h"
#include "i915_selftest.h"
+#include "intel_gpu_commands.h"
#include "selftests/igt_reset.h"
#include "selftests/igt_atomic.h"
#include "selftests/igt_spinner.h"
@@ -95,10 +96,10 @@ __igt_reset_stolen(struct intel_gt *gt,
if (!__drm_mm_interval_first(&gt->i915->mm.stolen,
page << PAGE_SHIFT,
((page + 1) << PAGE_SHIFT) - 1))
- memset32(s, STACK_MAGIC, PAGE_SIZE / sizeof(u32));
+ memset_io(s, STACK_MAGIC, PAGE_SIZE);
- in = s;
- if (i915_memcpy_from_wc(tmp, s, PAGE_SIZE))
+ in = (void __force *)s;
+ if (i915_memcpy_from_wc(tmp, in, PAGE_SIZE))
in = tmp;
crc[page] = crc32_le(0, in, PAGE_SIZE);
@@ -133,8 +134,8 @@ __igt_reset_stolen(struct intel_gt *gt,
ggtt->error_capture.start,
PAGE_SIZE);
- in = s;
- if (i915_memcpy_from_wc(tmp, s, PAGE_SIZE))
+ in = (void __force *)s;
+ if (i915_memcpy_from_wc(tmp, in, PAGE_SIZE))
in = tmp;
x = crc32_le(0, in, PAGE_SIZE);
@@ -320,17 +321,25 @@ static int igt_atomic_engine_reset(void *arg)
goto out_unlock;
for_each_engine(engine, gt, id) {
- tasklet_disable(&engine->execlists.tasklet);
+ struct tasklet_struct *t = &engine->execlists.tasklet;
+
+ if (t->func)
+ tasklet_disable(t);
intel_engine_pm_get(engine);
for (p = igt_atomic_phases; p->name; p++) {
GEM_TRACE("intel_engine_reset(%s) under %s\n",
engine->name, p->name);
+ if (strcmp(p->name, "softirq"))
+ local_bh_disable();
p->critical_section_begin();
- err = intel_engine_reset(engine, NULL);
+ err = __intel_engine_reset_bh(engine, NULL);
p->critical_section_end();
+ if (strcmp(p->name, "softirq"))
+ local_bh_enable();
+
if (err) {
pr_err("intel_engine_reset(%s) failed under %s\n",
engine->name, p->name);
@@ -339,7 +348,10 @@ static int igt_atomic_engine_reset(void *arg)
}
intel_engine_pm_put(engine);
- tasklet_enable(&engine->execlists.tasklet);
+ if (t->func) {
+ tasklet_enable(t);
+ tasklet_hi_schedule(t);
+ }
if (err)
break;
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c
index aa5675ecb5cc..967641fee42a 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rps.c
+++ b/drivers/gpu/drm/i915/gt/selftest_rps.c
@@ -185,7 +185,10 @@ static u8 rps_set_check(struct intel_rps *rps, u8 freq)
{
mutex_lock(&rps->lock);
GEM_BUG_ON(!intel_rps_is_active(rps));
- intel_rps_set(rps, freq);
+ if (wait_for(!intel_rps_set(rps, freq), 50)) {
+ mutex_unlock(&rps->lock);
+ return 0;
+ }
GEM_BUG_ON(rps->last_freq != freq);
mutex_unlock(&rps->lock);
diff --git a/drivers/gpu/drm/i915/gt/selftest_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c
index 2edf2b15885f..6f3a3687ef0f 100644
--- a/drivers/gpu/drm/i915/gt/selftest_timeline.c
+++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c
@@ -9,6 +9,7 @@
#include "intel_context.h"
#include "intel_engine_heartbeat.h"
#include "intel_engine_pm.h"
+#include "intel_gpu_commands.h"
#include "intel_gt.h"
#include "intel_gt_requests.h"
#include "intel_ring.h"
@@ -1090,12 +1091,6 @@ static int live_hwsp_read(void *arg)
}
count++;
- if (8 * watcher[1].rq->ring->emit >
- 3 * watcher[1].rq->ring->size) {
- i915_request_put(rq);
- break;
- }
-
/* Flush the timeline before manually wrapping again */
if (i915_request_wait(rq,
I915_WAIT_INTERRUPTIBLE,
@@ -1104,9 +1099,14 @@ static int live_hwsp_read(void *arg)
i915_request_put(rq);
goto out;
}
-
retire_requests(tl);
i915_request_put(rq);
+
+ /* Single requests are limited to half a ring at most */
+ if (8 * watcher[1].rq->ring->emit >
+ 3 * watcher[1].rq->ring->size)
+ break;
+
} while (!__igt_timeout(end_time, NULL));
WRITE_ONCE(*(u32 *)tl->hwsp_seqno, 0xdeadbeef);
diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
index 61a0532d0f3d..2070b91cb607 100644
--- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
@@ -95,8 +95,9 @@ reference_lists_fini(struct intel_gt *gt, struct wa_lists *lists)
}
static struct drm_i915_gem_object *
-read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
+read_nonprivs(struct intel_context *ce)
{
+ struct intel_engine_cs *engine = ce->engine;
const u32 base = engine->mmio_base;
struct drm_i915_gem_object *result;
struct i915_request *rq;
@@ -130,7 +131,7 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
if (err)
goto err_obj;
- rq = igt_request_alloc(ctx, engine);
+ rq = intel_context_create_request(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_pin;
@@ -145,7 +146,7 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
goto err_req;
srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
- if (INTEL_GEN(ctx->i915) >= 8)
+ if (INTEL_GEN(engine->i915) >= 8)
srm++;
cs = intel_ring_begin(rq, 4 * RING_MAX_NONPRIV_SLOTS);
@@ -200,16 +201,16 @@ print_results(const struct intel_engine_cs *engine, const u32 *results)
}
}
-static int check_whitelist(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
+static int check_whitelist(struct intel_context *ce)
{
+ struct intel_engine_cs *engine = ce->engine;
struct drm_i915_gem_object *results;
struct intel_wedge_me wedge;
u32 *vaddr;
int err;
int i;
- results = read_nonprivs(ctx, engine);
+ results = read_nonprivs(ce);
if (IS_ERR(results))
return PTR_ERR(results);
@@ -293,8 +294,7 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
int (*reset)(struct intel_engine_cs *),
const char *name)
{
- struct drm_i915_private *i915 = engine->i915;
- struct i915_gem_context *ctx, *tmp;
+ struct intel_context *ce, *tmp;
struct igt_spinner spin;
intel_wakeref_t wakeref;
int err;
@@ -302,15 +302,15 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
pr_info("Checking %d whitelisted registers on %s (RING_NONPRIV) [%s]\n",
engine->whitelist.count, engine->name, name);
- ctx = kernel_context(i915);
- if (IS_ERR(ctx))
- return PTR_ERR(ctx);
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
err = igt_spinner_init(&spin, engine->gt);
if (err)
goto out_ctx;
- err = check_whitelist(ctx, engine);
+ err = check_whitelist(ce);
if (err) {
pr_err("Invalid whitelist *before* %s reset!\n", name);
goto out_spin;
@@ -330,22 +330,22 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
goto out_spin;
}
- err = check_whitelist(ctx, engine);
+ err = check_whitelist(ce);
if (err) {
pr_err("Whitelist not preserved in context across %s reset!\n",
name);
goto out_spin;
}
- tmp = kernel_context(i915);
+ tmp = intel_context_create(engine);
if (IS_ERR(tmp)) {
err = PTR_ERR(tmp);
goto out_spin;
}
- kernel_context_close(ctx);
- ctx = tmp;
+ intel_context_put(ce);
+ ce = tmp;
- err = check_whitelist(ctx, engine);
+ err = check_whitelist(ce);
if (err) {
pr_err("Invalid whitelist *after* %s reset in fresh context!\n",
name);
@@ -355,7 +355,7 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
out_spin:
igt_spinner_fini(&spin);
out_ctx:
- kernel_context_close(ctx);
+ intel_context_put(ce);
return err;
}
@@ -486,10 +486,11 @@ static int check_dirty_whitelist(struct intel_context *ce)
struct intel_engine_cs *engine = ce->engine;
struct i915_vma *scratch;
struct i915_vma *batch;
- int err = 0, i, v;
+ int err = 0, i, v, sz;
u32 *cs, *results;
- scratch = create_scratch(ce->vm, 2 * ARRAY_SIZE(values) + 1);
+ sz = (2 * ARRAY_SIZE(values) + 1) * sizeof(u32);
+ scratch = __vm_create_scratch_for_read(ce->vm, sz);
if (IS_ERR(scratch))
return PTR_ERR(scratch);
@@ -786,15 +787,15 @@ out:
return err;
}
-static int read_whitelisted_registers(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
+static int read_whitelisted_registers(struct intel_context *ce,
struct i915_vma *results)
{
+ struct intel_engine_cs *engine = ce->engine;
struct i915_request *rq;
int i, err = 0;
u32 srm, *cs;
- rq = igt_request_alloc(ctx, engine);
+ rq = intel_context_create_request(ce);
if (IS_ERR(rq))
return PTR_ERR(rq);
@@ -807,7 +808,7 @@ static int read_whitelisted_registers(struct i915_gem_context *ctx,
goto err_req;
srm = MI_STORE_REGISTER_MEM;
- if (INTEL_GEN(ctx->i915) >= 8)
+ if (INTEL_GEN(engine->i915) >= 8)
srm++;
cs = intel_ring_begin(rq, 4 * engine->whitelist.count);
@@ -834,18 +835,15 @@ err_req:
return request_add_sync(rq, err);
}
-static int scrub_whitelisted_registers(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
+static int scrub_whitelisted_registers(struct intel_context *ce)
{
- struct i915_address_space *vm;
+ struct intel_engine_cs *engine = ce->engine;
struct i915_request *rq;
struct i915_vma *batch;
int i, err = 0;
u32 *cs;
- vm = i915_gem_context_get_vm_rcu(ctx);
- batch = create_batch(vm);
- i915_vm_put(vm);
+ batch = create_batch(ce->vm);
if (IS_ERR(batch))
return PTR_ERR(batch);
@@ -873,7 +871,7 @@ static int scrub_whitelisted_registers(struct i915_gem_context *ctx,
i915_gem_object_flush_map(batch->obj);
intel_gt_chipset_flush(engine->gt);
- rq = igt_request_alloc(ctx, engine);
+ rq = intel_context_create_request(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_unpin;
@@ -1016,7 +1014,6 @@ static int live_isolated_whitelist(void *arg)
{
struct intel_gt *gt = arg;
struct {
- struct i915_gem_context *ctx;
struct i915_vma *scratch[2];
} client[2] = {};
struct intel_engine_cs *engine;
@@ -1032,61 +1029,57 @@ static int live_isolated_whitelist(void *arg)
return 0;
for (i = 0; i < ARRAY_SIZE(client); i++) {
- struct i915_address_space *vm;
- struct i915_gem_context *c;
-
- c = kernel_context(gt->i915);
- if (IS_ERR(c)) {
- err = PTR_ERR(c);
- goto err;
- }
-
- vm = i915_gem_context_get_vm_rcu(c);
-
- client[i].scratch[0] = create_scratch(vm, 1024);
+ client[i].scratch[0] =
+ __vm_create_scratch_for_read(gt->vm, 4096);
if (IS_ERR(client[i].scratch[0])) {
err = PTR_ERR(client[i].scratch[0]);
- i915_vm_put(vm);
- kernel_context_close(c);
goto err;
}
- client[i].scratch[1] = create_scratch(vm, 1024);
+ client[i].scratch[1] =
+ __vm_create_scratch_for_read(gt->vm, 4096);
if (IS_ERR(client[i].scratch[1])) {
err = PTR_ERR(client[i].scratch[1]);
i915_vma_unpin_and_release(&client[i].scratch[0], 0);
- i915_vm_put(vm);
- kernel_context_close(c);
goto err;
}
-
- client[i].ctx = c;
- i915_vm_put(vm);
}
for_each_engine(engine, gt, id) {
+ struct intel_context *ce[2];
+
if (!engine->kernel_context->vm)
continue;
if (!whitelist_writable_count(engine))
continue;
+ ce[0] = intel_context_create(engine);
+ if (IS_ERR(ce[0])) {
+ err = PTR_ERR(ce[0]);
+ break;
+ }
+ ce[1] = intel_context_create(engine);
+ if (IS_ERR(ce[1])) {
+ err = PTR_ERR(ce[1]);
+ intel_context_put(ce[0]);
+ break;
+ }
+
/* Read default values */
- err = read_whitelisted_registers(client[0].ctx, engine,
- client[0].scratch[0]);
+ err = read_whitelisted_registers(ce[0], client[0].scratch[0]);
if (err)
- goto err;
+ goto err_ce;
/* Try to overwrite registers (should only affect ctx0) */
- err = scrub_whitelisted_registers(client[0].ctx, engine);
+ err = scrub_whitelisted_registers(ce[0]);
if (err)
- goto err;
+ goto err_ce;
/* Read values from ctx1, we expect these to be defaults */
- err = read_whitelisted_registers(client[1].ctx, engine,
- client[1].scratch[0]);
+ err = read_whitelisted_registers(ce[1], client[1].scratch[0]);
if (err)
- goto err;
+ goto err_ce;
/* Verify that both reads return the same default values */
err = check_whitelisted_registers(engine,
@@ -1094,31 +1087,29 @@ static int live_isolated_whitelist(void *arg)
client[1].scratch[0],
result_eq);
if (err)
- goto err;
+ goto err_ce;
/* Read back the updated values in ctx0 */
- err = read_whitelisted_registers(client[0].ctx, engine,
- client[0].scratch[1]);
+ err = read_whitelisted_registers(ce[0], client[0].scratch[1]);
if (err)
- goto err;
+ goto err_ce;
/* User should be granted privilege to overwhite regs */
err = check_whitelisted_registers(engine,
client[0].scratch[0],
client[0].scratch[1],
result_neq);
+err_ce:
+ intel_context_put(ce[1]);
+ intel_context_put(ce[0]);
if (err)
- goto err;
+ break;
}
err:
for (i = 0; i < ARRAY_SIZE(client); i++) {
- if (!client[i].ctx)
- break;
-
i915_vma_unpin_and_release(&client[i].scratch[1], 0);
i915_vma_unpin_and_release(&client[i].scratch[0], 0);
- kernel_context_close(client[i].ctx);
}
if (igt_flush_test(gt->i915))
@@ -1128,18 +1119,21 @@ err:
}
static bool
-verify_wa_lists(struct i915_gem_context *ctx, struct wa_lists *lists,
+verify_wa_lists(struct intel_gt *gt, struct wa_lists *lists,
const char *str)
{
- struct drm_i915_private *i915 = ctx->i915;
- struct i915_gem_engines_iter it;
- struct intel_context *ce;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
bool ok = true;
- ok &= wa_list_verify(&i915->uncore, &lists->gt_wa_list, str);
+ ok &= wa_list_verify(gt->uncore, &lists->gt_wa_list, str);
+
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce;
- for_each_gem_engine(ce, i915_gem_context_engines(ctx), it) {
- enum intel_engine_id id = ce->engine->id;
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return false;
ok &= engine_wa_list_verify(ce,
&lists->engine[id].wa_list,
@@ -1148,6 +1142,8 @@ verify_wa_lists(struct i915_gem_context *ctx, struct wa_lists *lists,
ok &= engine_wa_list_verify(ce,
&lists->engine[id].ctx_wa_list,
str) == 0;
+
+ intel_context_put(ce);
}
return ok;
@@ -1157,7 +1153,6 @@ static int
live_gpu_reset_workarounds(void *arg)
{
struct intel_gt *gt = arg;
- struct i915_gem_context *ctx;
intel_wakeref_t wakeref;
struct wa_lists lists;
bool ok;
@@ -1165,12 +1160,6 @@ live_gpu_reset_workarounds(void *arg)
if (!intel_has_gpu_reset(gt))
return 0;
- ctx = kernel_context(gt->i915);
- if (IS_ERR(ctx))
- return PTR_ERR(ctx);
-
- i915_gem_context_lock_engines(ctx);
-
pr_info("Verifying after GPU reset...\n");
igt_global_reset_lock(gt);
@@ -1178,17 +1167,15 @@ live_gpu_reset_workarounds(void *arg)
reference_lists_init(gt, &lists);
- ok = verify_wa_lists(ctx, &lists, "before reset");
+ ok = verify_wa_lists(gt, &lists, "before reset");
if (!ok)
goto out;
intel_gt_reset(gt, ALL_ENGINES, "live_workarounds");
- ok = verify_wa_lists(ctx, &lists, "after reset");
+ ok = verify_wa_lists(gt, &lists, "after reset");
out:
- i915_gem_context_unlock_engines(ctx);
- kernel_context_close(ctx);
reference_lists_fini(gt, &lists);
intel_runtime_pm_put(gt->uncore->rpm, wakeref);
igt_global_reset_unlock(gt);
@@ -1200,8 +1187,8 @@ static int
live_engine_reset_workarounds(void *arg)
{
struct intel_gt *gt = arg;
- struct i915_gem_engines_iter it;
- struct i915_gem_context *ctx;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
struct intel_context *ce;
struct igt_spinner spin;
struct i915_request *rq;
@@ -1212,30 +1199,30 @@ live_engine_reset_workarounds(void *arg)
if (!intel_has_reset_engine(gt))
return 0;
- ctx = kernel_context(gt->i915);
- if (IS_ERR(ctx))
- return PTR_ERR(ctx);
-
igt_global_reset_lock(gt);
wakeref = intel_runtime_pm_get(gt->uncore->rpm);
reference_lists_init(gt, &lists);
- for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
- struct intel_engine_cs *engine = ce->engine;
+ for_each_engine(engine, gt, id) {
bool ok;
pr_info("Verifying after %s reset...\n", engine->name);
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ ret = PTR_ERR(ce);
+ break;
+ }
- ok = verify_wa_lists(ctx, &lists, "before reset");
+ ok = verify_wa_lists(gt, &lists, "before reset");
if (!ok) {
ret = -ESRCH;
goto err;
}
- intel_engine_reset(engine, "live_workarounds");
+ intel_engine_reset(engine, "live_workarounds:idle");
- ok = verify_wa_lists(ctx, &lists, "after idle reset");
+ ok = verify_wa_lists(gt, &lists, "after idle reset");
if (!ok) {
ret = -ESRCH;
goto err;
@@ -1259,23 +1246,26 @@ live_engine_reset_workarounds(void *arg)
goto err;
}
- intel_engine_reset(engine, "live_workarounds");
+ intel_engine_reset(engine, "live_workarounds:active");
igt_spinner_end(&spin);
igt_spinner_fini(&spin);
- ok = verify_wa_lists(ctx, &lists, "after busy reset");
+ ok = verify_wa_lists(gt, &lists, "after busy reset");
if (!ok) {
ret = -ESRCH;
goto err;
}
- }
+
err:
- i915_gem_context_unlock_engines(ctx);
+ intel_context_put(ce);
+ if (ret)
+ break;
+ }
+
reference_lists_fini(gt, &lists);
intel_runtime_pm_put(gt->uncore->rpm, wakeref);
igt_global_reset_unlock(gt);
- kernel_context_close(ctx);
igt_flush_test(gt->i915);
diff --git a/drivers/gpu/drm/i915/gt/shmem_utils.c b/drivers/gpu/drm/i915/gt/shmem_utils.c
index 5982b62f913d..a4d8fc9e2374 100644
--- a/drivers/gpu/drm/i915/gt/shmem_utils.c
+++ b/drivers/gpu/drm/i915/gt/shmem_utils.c
@@ -33,7 +33,7 @@ struct file *shmem_create_from_object(struct drm_i915_gem_object *obj)
struct file *file;
void *ptr;
- if (obj->ops == &i915_gem_shmem_ops) {
+ if (i915_gem_object_is_shmem(obj)) {
file = obj->base.filp;
atomic_long_inc(&file->f_count);
return file;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index 2a343a977987..4545e90e3bf1 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -579,20 +579,8 @@ int intel_guc_reset_engine(struct intel_guc *guc,
*/
int intel_guc_resume(struct intel_guc *guc)
{
- u32 action[] = {
- INTEL_GUC_ACTION_EXIT_S_STATE,
- GUC_POWER_D0,
- };
-
- /*
- * If GuC communication is enabled but submission is not supported,
- * we do not need to resume the GuC but we do need to enable the
- * GuC communication on resume (above).
- */
- if (!intel_guc_submission_is_used(guc) || !intel_guc_is_ready(guc))
- return 0;
-
- return intel_guc_send(guc, action, ARRAY_SIZE(action));
+ /* XXX: to be implemented with submission interface rework */
+ return 0;
}
/**
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index e84ab67b317d..bc2ba7d0626c 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -47,13 +47,6 @@ struct intel_guc {
struct i915_vma *stage_desc_pool;
void *stage_desc_pool_vaddr;
- struct i915_vma *workqueue;
- void *workqueue_vaddr;
- spinlock_t wq_lock;
-
- struct i915_vma *proc_desc;
- void *proc_desc_vaddr;
-
/* Control params for fw initialization */
u32 params[GUC_CTL_MAX_DWORDS];
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
index 5212ff844292..17526717368c 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
@@ -4,6 +4,7 @@
*/
#include "gt/intel_gt.h"
+#include "gt/intel_lrc.h"
#include "intel_guc_ads.h"
#include "intel_uc.h"
#include "i915_drv.h"
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
index f9d0907ea1a5..2270d6b3b272 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
@@ -76,7 +76,6 @@ static inline bool guc_ready(struct intel_uncore *uncore, u32 *status)
static int guc_wait_ucode(struct intel_uncore *uncore)
{
- struct drm_device *drm = &uncore->i915->drm;
u32 status;
int ret;
@@ -89,11 +88,11 @@ static int guc_wait_ucode(struct intel_uncore *uncore)
* attempt the ucode load again if this happens.)
*/
ret = wait_for(guc_ready(uncore, &status), 100);
- DRM_DEBUG_DRIVER("GuC status %#x\n", status);
-
if (ret) {
- drm_err(drm, "GuC load failed: status = 0x%08X\n", status);
- drm_err(drm, "GuC load failed: status: Reset = %d, "
+ struct drm_device *drm = &uncore->i915->drm;
+
+ drm_dbg(drm, "GuC load failed: status = 0x%08X\n", status);
+ drm_dbg(drm, "GuC load failed: status: Reset = %d, "
"BootROM = 0x%02X, UKernel = 0x%02X, "
"MIA = 0x%02X, Auth = 0x%02X\n",
REG_FIELD_GET(GS_MIA_IN_RESET, status),
@@ -103,12 +102,12 @@ static int guc_wait_ucode(struct intel_uncore *uncore)
REG_FIELD_GET(GS_AUTH_STATUS_MASK, status));
if ((status & GS_BOOTROM_MASK) == GS_BOOTROM_RSA_FAILED) {
- drm_err(drm, "GuC firmware signature verification failed\n");
+ drm_dbg(drm, "GuC firmware signature verification failed\n");
ret = -ENOEXEC;
}
if ((status & GS_UKERNEL_MASK) == GS_UKERNEL_EXCEPTION) {
- drm_err(drm, "GuC firmware exception. EIP: %#x\n",
+ drm_dbg(drm, "GuC firmware exception. EIP: %#x\n",
intel_uncore_read(uncore, SOFT_SCRATCH(13)));
ret = -ENXIO;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index fdfeb4b9b0f5..23dc0aeaa0ab 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -6,11 +6,14 @@
#include <linux/circ_buf.h>
#include "gem/i915_gem_context.h"
+#include "gt/gen8_engine_cs.h"
+#include "gt/intel_breadcrumbs.h"
#include "gt/intel_context.h"
#include "gt/intel_engine_pm.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
-#include "gt/intel_lrc_reg.h"
+#include "gt/intel_lrc.h"
+#include "gt/intel_mocs.h"
#include "gt/intel_ring.h"
#include "intel_guc_submission.h"
@@ -54,6 +57,8 @@
*
*/
+#define GUC_REQUEST_SIZE 64 /* bytes */
+
static inline struct i915_priolist *to_priolist(struct rb_node *rb)
{
return rb_entry(rb, struct i915_priolist, node);
@@ -66,58 +71,6 @@ static struct guc_stage_desc *__get_stage_desc(struct intel_guc *guc, u32 id)
return &base[id];
}
-static int guc_workqueue_create(struct intel_guc *guc)
-{
- return intel_guc_allocate_and_map_vma(guc, GUC_WQ_SIZE, &guc->workqueue,
- &guc->workqueue_vaddr);
-}
-
-static void guc_workqueue_destroy(struct intel_guc *guc)
-{
- i915_vma_unpin_and_release(&guc->workqueue, I915_VMA_RELEASE_MAP);
-}
-
-/*
- * Initialise the process descriptor shared with the GuC firmware.
- */
-static int guc_proc_desc_create(struct intel_guc *guc)
-{
- const u32 size = PAGE_ALIGN(sizeof(struct guc_process_desc));
-
- return intel_guc_allocate_and_map_vma(guc, size, &guc->proc_desc,
- &guc->proc_desc_vaddr);
-}
-
-static void guc_proc_desc_destroy(struct intel_guc *guc)
-{
- i915_vma_unpin_and_release(&guc->proc_desc, I915_VMA_RELEASE_MAP);
-}
-
-static void guc_proc_desc_init(struct intel_guc *guc)
-{
- struct guc_process_desc *desc;
-
- desc = memset(guc->proc_desc_vaddr, 0, sizeof(*desc));
-
- /*
- * XXX: pDoorbell and WQVBaseAddress are pointers in process address
- * space for ring3 clients (set them as in mmap_ioctl) or kernel
- * space for kernel clients (map on demand instead? May make debug
- * easier to have it mapped).
- */
- desc->wq_base_addr = 0;
- desc->db_base_addr = 0;
-
- desc->wq_size_bytes = GUC_WQ_SIZE;
- desc->wq_status = WQ_STATUS_ACTIVE;
- desc->priority = GUC_CLIENT_PRIORITY_KMD_NORMAL;
-}
-
-static void guc_proc_desc_fini(struct intel_guc *guc)
-{
- memset(guc->proc_desc_vaddr, 0, sizeof(struct guc_process_desc));
-}
-
static int guc_stage_desc_pool_create(struct intel_guc *guc)
{
u32 size = PAGE_ALIGN(sizeof(struct guc_stage_desc) *
@@ -153,8 +106,6 @@ static void guc_stage_desc_init(struct intel_guc *guc)
desc->stage_id = 0;
desc->priority = GUC_CLIENT_PRIORITY_KMD_NORMAL;
- desc->process_desc = intel_guc_ggtt_offset(guc, guc->proc_desc);
- desc->wq_addr = intel_guc_ggtt_offset(guc, guc->workqueue);
desc->wq_size = GUC_WQ_SIZE;
}
@@ -166,62 +117,9 @@ static void guc_stage_desc_fini(struct intel_guc *guc)
memset(desc, 0, sizeof(*desc));
}
-/* Construct a Work Item and append it to the GuC's Work Queue */
-static void guc_wq_item_append(struct intel_guc *guc,
- u32 target_engine, u32 context_desc,
- u32 ring_tail, u32 fence_id)
-{
- /* wqi_len is in DWords, and does not include the one-word header */
- const size_t wqi_size = sizeof(struct guc_wq_item);
- const u32 wqi_len = wqi_size / sizeof(u32) - 1;
- struct guc_process_desc *desc = guc->proc_desc_vaddr;
- struct guc_wq_item *wqi;
- u32 wq_off;
-
- lockdep_assert_held(&guc->wq_lock);
-
- /* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we
- * should not have the case where structure wqi is across page, neither
- * wrapped to the beginning. This simplifies the implementation below.
- *
- * XXX: if not the case, we need save data to a temp wqi and copy it to
- * workqueue buffer dw by dw.
- */
- BUILD_BUG_ON(wqi_size != 16);
-
- /* We expect the WQ to be active if we're appending items to it */
- GEM_BUG_ON(desc->wq_status != WQ_STATUS_ACTIVE);
-
- /* Free space is guaranteed. */
- wq_off = READ_ONCE(desc->tail);
- GEM_BUG_ON(CIRC_SPACE(wq_off, READ_ONCE(desc->head),
- GUC_WQ_SIZE) < wqi_size);
- GEM_BUG_ON(wq_off & (wqi_size - 1));
-
- wqi = guc->workqueue_vaddr + wq_off;
-
- /* Now fill in the 4-word work queue item */
- wqi->header = WQ_TYPE_INORDER |
- (wqi_len << WQ_LEN_SHIFT) |
- (target_engine << WQ_TARGET_SHIFT) |
- WQ_NO_WCFLUSH_WAIT;
- wqi->context_desc = context_desc;
- wqi->submit_element_info = ring_tail << WQ_RING_TAIL_SHIFT;
- GEM_BUG_ON(ring_tail > WQ_RING_TAIL_MAX);
- wqi->fence_id = fence_id;
-
- /* Make the update visible to GuC */
- WRITE_ONCE(desc->tail, (wq_off + wqi_size) & (GUC_WQ_SIZE - 1));
-}
-
static void guc_add_request(struct intel_guc *guc, struct i915_request *rq)
{
- struct intel_engine_cs *engine = rq->engine;
- u32 ctx_desc = rq->context->lrc.ccid;
- u32 ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64);
-
- guc_wq_item_append(guc, engine->guc_id, ctx_desc,
- ring_tail, rq->fence.seqno);
+ /* Leaving stub as this function will be used in future patches */
}
/*
@@ -244,16 +142,12 @@ static void guc_submit(struct intel_engine_cs *engine,
{
struct intel_guc *guc = &engine->gt->uc.guc;
- spin_lock(&guc->wq_lock);
-
do {
struct i915_request *rq = *out++;
flush_ggtt_writes(rq->ring->vma);
guc_add_request(guc, rq);
} while (out != end);
-
- spin_unlock(&guc->wq_lock);
}
static inline int rq_prio(const struct i915_request *rq)
@@ -388,17 +282,26 @@ static void guc_reset_prepare(struct intel_engine_cs *engine)
__tasklet_disable_sync_once(&execlists->tasklet);
}
-static void
-cancel_port_requests(struct intel_engine_execlists * const execlists)
+static void guc_reset_state(struct intel_context *ce,
+ struct intel_engine_cs *engine,
+ u32 head,
+ bool scrub)
{
- struct i915_request * const *port, *rq;
+ GEM_BUG_ON(!intel_context_is_pinned(ce));
- /* Note we are only using the inflight and not the pending queue */
+ /*
+ * We want a simple context + ring to execute the breadcrumb update.
+ * We cannot rely on the context being intact across the GPU hang,
+ * so clear it and rebuild just what we need for the breadcrumb.
+ * All pending requests for this context will be zapped, and any
+ * future request will be after userspace has had the opportunity
+ * to recreate its own state.
+ */
+ if (scrub)
+ lrc_init_regs(ce, engine, true);
- for (port = execlists->active; (rq = *port); port++)
- schedule_out(rq);
- execlists->active =
- memset(execlists->inflight, 0, sizeof(execlists->inflight));
+ /* Rerun the request; its payload has been neutered (if guilty). */
+ lrc_update_regs(ce, engine, head);
}
static void guc_reset_rewind(struct intel_engine_cs *engine, bool stalled)
@@ -409,8 +312,6 @@ static void guc_reset_rewind(struct intel_engine_cs *engine, bool stalled)
spin_lock_irqsave(&engine->active.lock, flags);
- cancel_port_requests(execlists);
-
/* Push back any incomplete requests for replay after the reset. */
rq = execlists_unwind_incomplete_requests(execlists);
if (!rq)
@@ -420,7 +321,7 @@ static void guc_reset_rewind(struct intel_engine_cs *engine, bool stalled)
stalled = false;
__i915_request_reset(rq, stalled);
- intel_lr_context_reset(engine, rq->context, rq->head, stalled);
+ guc_reset_state(rq->context, engine, rq->head, stalled);
out_unlock:
spin_unlock_irqrestore(&engine->active.lock, flags);
@@ -451,9 +352,6 @@ static void guc_reset_cancel(struct intel_engine_cs *engine)
*/
spin_lock_irqsave(&engine->active.lock, flags);
- /* Cancel the requests on the HW and clear the ELSP tracker. */
- cancel_port_requests(execlists);
-
/* Mark all executing requests as skipped. */
list_for_each_entry(rq, &engine->active.requests, sched.link) {
i915_request_set_error_once(rq, -EIO);
@@ -497,12 +395,6 @@ static void guc_reset_finish(struct intel_engine_cs *engine)
}
/*
- * Everything below here is concerned with setup & teardown, and is
- * therefore not part of the somewhat time-critical batch-submission
- * path of guc_submit() above.
- */
-
-/*
* Set up the memory resources to be shared with the GuC (via the GGTT)
* at firmware loading time.
*/
@@ -522,30 +414,12 @@ int intel_guc_submission_init(struct intel_guc *guc)
*/
GEM_BUG_ON(!guc->stage_desc_pool);
- ret = guc_workqueue_create(guc);
- if (ret)
- goto err_pool;
-
- ret = guc_proc_desc_create(guc);
- if (ret)
- goto err_workqueue;
-
- spin_lock_init(&guc->wq_lock);
-
return 0;
-
-err_workqueue:
- guc_workqueue_destroy(guc);
-err_pool:
- guc_stage_desc_pool_destroy(guc);
- return ret;
}
void intel_guc_submission_fini(struct intel_guc *guc)
{
if (guc->stage_desc_pool) {
- guc_proc_desc_destroy(guc);
- guc_workqueue_destroy(guc);
guc_stage_desc_pool_destroy(guc);
}
}
@@ -576,33 +450,186 @@ static void guc_interrupts_release(struct intel_gt *gt)
intel_uncore_rmw(uncore, GEN11_VCS_VECS_INTR_ENABLE, 0, dmask);
}
-static void guc_set_default_submission(struct intel_engine_cs *engine)
+static int guc_context_alloc(struct intel_context *ce)
+{
+ return lrc_alloc(ce, ce->engine);
+}
+
+static int guc_context_pre_pin(struct intel_context *ce,
+ struct i915_gem_ww_ctx *ww,
+ void **vaddr)
+{
+ return lrc_pre_pin(ce, ce->engine, ww, vaddr);
+}
+
+static int guc_context_pin(struct intel_context *ce, void *vaddr)
{
+ return lrc_pin(ce, ce->engine, vaddr);
+}
+
+static const struct intel_context_ops guc_context_ops = {
+ .alloc = guc_context_alloc,
+
+ .pre_pin = guc_context_pre_pin,
+ .pin = guc_context_pin,
+ .unpin = lrc_unpin,
+ .post_unpin = lrc_post_unpin,
+
+ .enter = intel_context_enter_engine,
+ .exit = intel_context_exit_engine,
+
+ .reset = lrc_reset,
+ .destroy = lrc_destroy,
+};
+
+static int guc_request_alloc(struct i915_request *request)
+{
+ int ret;
+
+ GEM_BUG_ON(!intel_context_is_pinned(request->context));
+
/*
- * We inherit a bunch of functions from execlists that we'd like
- * to keep using:
- *
- * engine->submit_request = execlists_submit_request;
- * engine->cancel_requests = execlists_cancel_requests;
- * engine->schedule = execlists_schedule;
+ * Flush enough space to reduce the likelihood of waiting after
+ * we start building the request - in which case we will just
+ * have to repeat work.
+ */
+ request->reserved_space += GUC_REQUEST_SIZE;
+
+ /*
+ * Note that after this point, we have committed to using
+ * this request as it is being used to both track the
+ * state of engine initialisation and liveness of the
+ * golden renderstate above. Think twice before you try
+ * to cancel/unwind this request now.
+ */
+
+ /* Unconditionally invalidate GPU caches and TLBs. */
+ ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
+ if (ret)
+ return ret;
+
+ request->reserved_space -= GUC_REQUEST_SIZE;
+ return 0;
+}
+
+static inline void queue_request(struct intel_engine_cs *engine,
+ struct i915_request *rq,
+ int prio)
+{
+ GEM_BUG_ON(!list_empty(&rq->sched.link));
+ list_add_tail(&rq->sched.link,
+ i915_sched_lookup_priolist(engine, prio));
+ set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+}
+
+static void guc_submit_request(struct i915_request *rq)
+{
+ struct intel_engine_cs *engine = rq->engine;
+ unsigned long flags;
+
+ /* Will be called from irq-context when using foreign fences. */
+ spin_lock_irqsave(&engine->active.lock, flags);
+
+ queue_request(engine, rq, rq_prio(rq));
+
+ GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
+ GEM_BUG_ON(list_empty(&rq->sched.link));
+
+ tasklet_hi_schedule(&engine->execlists.tasklet);
+
+ spin_unlock_irqrestore(&engine->active.lock, flags);
+}
+
+static void sanitize_hwsp(struct intel_engine_cs *engine)
+{
+ struct intel_timeline *tl;
+
+ list_for_each_entry(tl, &engine->status_page.timelines, engine_link)
+ intel_timeline_reset_seqno(tl);
+}
+
+static void guc_sanitize(struct intel_engine_cs *engine)
+{
+ /*
+ * Poison residual state on resume, in case the suspend didn't!
*
- * But we need to override the actual submission backend in order
- * to talk to the GuC.
+ * We have to assume that across suspend/resume (or other loss
+ * of control) that the contents of our pinned buffers has been
+ * lost, replaced by garbage. Since this doesn't always happen,
+ * let's poison such state so that we more quickly spot when
+ * we falsely assume it has been preserved.
*/
- intel_execlists_set_default_submission(engine);
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ memset(engine->status_page.addr, POISON_INUSE, PAGE_SIZE);
- engine->execlists.tasklet.func = guc_submission_tasklet;
+ /*
+ * The kernel_context HWSP is stored in the status_page. As above,
+ * that may be lost on resume/initialisation, and so we need to
+ * reset the value in the HWSP.
+ */
+ sanitize_hwsp(engine);
- /* do not use execlists park/unpark */
- engine->park = engine->unpark = NULL;
+ /* And scrub the dirty cachelines for the HWSP */
+ clflush_cache_range(engine->status_page.addr, PAGE_SIZE);
+}
+
+static void setup_hwsp(struct intel_engine_cs *engine)
+{
+ intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */
+
+ ENGINE_WRITE_FW(engine,
+ RING_HWS_PGA,
+ i915_ggtt_offset(engine->status_page.vma));
+}
+
+static void start_engine(struct intel_engine_cs *engine)
+{
+ ENGINE_WRITE_FW(engine,
+ RING_MODE_GEN7,
+ _MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE));
+
+ ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
+ ENGINE_POSTING_READ(engine, RING_MI_MODE);
+}
+
+static int guc_resume(struct intel_engine_cs *engine)
+{
+ assert_forcewakes_active(engine->uncore, FORCEWAKE_ALL);
+
+ intel_mocs_init_engine(engine);
+
+ intel_breadcrumbs_reset(engine->breadcrumbs);
+
+ setup_hwsp(engine);
+ start_engine(engine);
+
+ return 0;
+}
+
+static void guc_set_default_submission(struct intel_engine_cs *engine)
+{
+ engine->submit_request = guc_submit_request;
+ engine->schedule = i915_schedule;
+ engine->execlists.tasklet.func = guc_submission_tasklet;
engine->reset.prepare = guc_reset_prepare;
engine->reset.rewind = guc_reset_rewind;
engine->reset.cancel = guc_reset_cancel;
engine->reset.finish = guc_reset_finish;
- engine->flags &= ~I915_ENGINE_SUPPORTS_STATS;
engine->flags |= I915_ENGINE_NEEDS_BREADCRUMB_TASKLET;
+ engine->flags |= I915_ENGINE_HAS_PREEMPTION;
+
+ /*
+ * TODO: GuC supports timeslicing and semaphores as well, but they're
+ * handled by the firmware so some minor tweaks are required before
+ * enabling.
+ *
+ * engine->flags |= I915_ENGINE_HAS_TIMESLICES;
+ * engine->flags |= I915_ENGINE_HAS_SEMAPHORES;
+ */
+
+ engine->emit_bb_start = gen8_emit_bb_start;
/*
* For the breadcrumb irq to work we need the interrupts to stay
@@ -613,35 +640,92 @@ static void guc_set_default_submission(struct intel_engine_cs *engine)
GEM_BUG_ON(engine->irq_enable || engine->irq_disable);
}
-void intel_guc_submission_enable(struct intel_guc *guc)
+static void guc_release(struct intel_engine_cs *engine)
{
- struct intel_gt *gt = guc_to_gt(guc);
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
+ engine->sanitize = NULL; /* no longer in control, nothing to sanitize */
+
+ tasklet_kill(&engine->execlists.tasklet);
+
+ intel_engine_cleanup_common(engine);
+ lrc_fini_wa_ctx(engine);
+}
+
+static void guc_default_vfuncs(struct intel_engine_cs *engine)
+{
+ /* Default vfuncs which can be overridden by each engine. */
+
+ engine->resume = guc_resume;
+
+ engine->cops = &guc_context_ops;
+ engine->request_alloc = guc_request_alloc;
+
+ engine->emit_flush = gen8_emit_flush_xcs;
+ engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb;
+ engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_xcs;
+ if (INTEL_GEN(engine->i915) >= 12) {
+ engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_xcs;
+ engine->emit_flush = gen12_emit_flush_xcs;
+ }
+ engine->set_default_submission = guc_set_default_submission;
+}
+
+static void rcs_submission_override(struct intel_engine_cs *engine)
+{
+ switch (INTEL_GEN(engine->i915)) {
+ case 12:
+ engine->emit_flush = gen12_emit_flush_rcs;
+ engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_rcs;
+ break;
+ case 11:
+ engine->emit_flush = gen11_emit_flush_rcs;
+ engine->emit_fini_breadcrumb = gen11_emit_fini_breadcrumb_rcs;
+ break;
+ default:
+ engine->emit_flush = gen8_emit_flush_rcs;
+ engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_rcs;
+ break;
+ }
+}
+
+static inline void guc_default_irqs(struct intel_engine_cs *engine)
+{
+ engine->irq_keep_mask = GT_RENDER_USER_INTERRUPT;
+}
+
+int intel_guc_submission_setup(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *i915 = engine->i915;
/*
- * We're using GuC work items for submitting work through GuC. Since
- * we're coalescing multiple requests from a single context into a
- * single work item prior to assigning it to execlist_port, we can
- * never have more work items than the total number of ports (for all
- * engines). The GuC firmware is controlling the HEAD of work queue,
- * and it is guaranteed that it will remove the work item from the
- * queue before our request is completed.
+ * The setup relies on several assumptions (e.g. irqs always enabled)
+ * that are only valid on gen11+
*/
- BUILD_BUG_ON(ARRAY_SIZE(engine->execlists.inflight) *
- sizeof(struct guc_wq_item) *
- I915_NUM_ENGINES > GUC_WQ_SIZE);
+ GEM_BUG_ON(INTEL_GEN(i915) < 11);
+
+ tasklet_init(&engine->execlists.tasklet,
+ guc_submission_tasklet, (unsigned long)engine);
+
+ guc_default_vfuncs(engine);
+ guc_default_irqs(engine);
- guc_proc_desc_init(guc);
+ if (engine->class == RENDER_CLASS)
+ rcs_submission_override(engine);
+
+ lrc_init_wa_ctx(engine);
+
+ /* Finally, take ownership and responsibility for cleanup! */
+ engine->sanitize = guc_sanitize;
+ engine->release = guc_release;
+
+ return 0;
+}
+
+void intel_guc_submission_enable(struct intel_guc *guc)
+{
guc_stage_desc_init(guc);
/* Take over from manual control of ELSP (execlists) */
- guc_interrupts_capture(gt);
-
- for_each_engine(engine, gt, id) {
- engine->set_default_submission = guc_set_default_submission;
- engine->set_default_submission(engine);
- }
+ guc_interrupts_capture(guc_to_gt(guc));
}
void intel_guc_submission_disable(struct intel_guc *guc)
@@ -655,7 +739,6 @@ void intel_guc_submission_disable(struct intel_guc *guc)
guc_interrupts_release(gt);
guc_stage_desc_fini(guc);
- guc_proc_desc_fini(guc);
}
static bool __guc_submission_selected(struct intel_guc *guc)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
index 4cf9d3e50263..5f7b9e6347d0 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
@@ -19,6 +19,7 @@ void intel_guc_submission_disable(struct intel_guc *guc);
void intel_guc_submission_fini(struct intel_guc *guc);
int intel_guc_preempt_work_create(struct intel_guc *guc);
void intel_guc_preempt_work_destroy(struct intel_guc *guc);
+int intel_guc_submission_setup(struct intel_engine_cs *engine);
bool intel_engine_in_guc_submission_mode(const struct intel_engine_cs *engine);
static inline bool intel_guc_submission_is_supported(struct intel_guc *guc)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index 4e6070e95fe9..6abb8f2dc33d 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -15,6 +15,29 @@
static const struct intel_uc_ops uc_ops_off;
static const struct intel_uc_ops uc_ops_on;
+static void uc_expand_default_options(struct intel_uc *uc)
+{
+ struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
+
+ if (i915->params.enable_guc != -1)
+ return;
+
+ /* Don't enable GuC/HuC on pre-Gen12 */
+ if (INTEL_GEN(i915) < 12) {
+ i915->params.enable_guc = 0;
+ return;
+ }
+
+ /* Don't enable GuC/HuC on older Gen12 platforms */
+ if (IS_TIGERLAKE(i915) || IS_ROCKETLAKE(i915)) {
+ i915->params.enable_guc = 0;
+ return;
+ }
+
+ /* Default: enable HuC authentication only */
+ i915->params.enable_guc = ENABLE_GUC_LOAD_HUC;
+}
+
/* Reset GuC providing us with fresh state for both GuC and HuC.
*/
static int __intel_uc_reset_hw(struct intel_uc *uc)
@@ -52,9 +75,6 @@ static void __confirm_options(struct intel_uc *uc)
yesno(intel_uc_wants_guc_submission(uc)),
yesno(intel_uc_wants_huc(uc)));
- if (i915->params.enable_guc == -1)
- return;
-
if (i915->params.enable_guc == 0) {
GEM_BUG_ON(intel_uc_wants_guc(uc));
GEM_BUG_ON(intel_uc_wants_guc_submission(uc));
@@ -79,8 +99,7 @@ static void __confirm_options(struct intel_uc *uc)
"Incompatible option enable_guc=%d - %s\n",
i915->params.enable_guc, "GuC submission is N/A");
- if (i915->params.enable_guc & ~(ENABLE_GUC_SUBMISSION |
- ENABLE_GUC_LOAD_HUC))
+ if (i915->params.enable_guc & ~ENABLE_GUC_MASK)
drm_info(&i915->drm,
"Incompatible option enable_guc=%d - %s\n",
i915->params.enable_guc, "undocumented flag");
@@ -88,6 +107,8 @@ static void __confirm_options(struct intel_uc *uc)
void intel_uc_init_early(struct intel_uc *uc)
{
+ uc_expand_default_options(uc);
+
intel_guc_init_early(&uc->guc);
intel_huc_init_early(&uc->huc);
@@ -175,19 +196,15 @@ static void guc_get_mmio_msg(struct intel_guc *guc)
static void guc_handle_mmio_msg(struct intel_guc *guc)
{
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
-
/* we need communication to be enabled to reply to GuC */
GEM_BUG_ON(!guc_communication_enabled(guc));
- if (!guc->mmio_msg)
- return;
-
- spin_lock_irq(&i915->irq_lock);
- intel_guc_to_host_process_recv_msg(guc, &guc->mmio_msg, 1);
- spin_unlock_irq(&i915->irq_lock);
-
- guc->mmio_msg = 0;
+ spin_lock_irq(&guc->irq_lock);
+ if (guc->mmio_msg) {
+ intel_guc_to_host_process_recv_msg(guc, &guc->mmio_msg, 1);
+ guc->mmio_msg = 0;
+ }
+ spin_unlock_irq(&guc->irq_lock);
}
static void guc_reset_interrupts(struct intel_guc *guc)
@@ -207,7 +224,8 @@ static void guc_disable_interrupts(struct intel_guc *guc)
static int guc_enable_communication(struct intel_guc *guc)
{
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ struct intel_gt *gt = guc_to_gt(guc);
+ struct drm_i915_private *i915 = gt->i915;
int ret;
GEM_BUG_ON(guc_communication_enabled(guc));
@@ -227,9 +245,9 @@ static int guc_enable_communication(struct intel_guc *guc)
guc_enable_interrupts(guc);
/* check for CT messages received before we enabled interrupts */
- spin_lock_irq(&i915->irq_lock);
+ spin_lock_irq(&gt->irq_lock);
intel_guc_ct_event_handler(&guc->ct);
- spin_unlock_irq(&i915->irq_lock);
+ spin_unlock_irq(&gt->irq_lock);
drm_dbg(&i915->drm, "GuC communication enabled\n");
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index 602f1a0bc587..67b06fde1225 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -152,16 +152,11 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
uc_fw->path = NULL;
}
}
-
- /* We don't want to enable GuC/HuC on pre-Gen11 by default */
- if (i915->params.enable_guc == -1 && p < INTEL_ICELAKE)
- uc_fw->path = NULL;
}
static const char *__override_guc_firmware_path(struct drm_i915_private *i915)
{
- if (i915->params.enable_guc & (ENABLE_GUC_SUBMISSION |
- ENABLE_GUC_LOAD_HUC))
+ if (i915->params.enable_guc & ENABLE_GUC_MASK)
return i915->params.guc_firmware_path;
return "";
}
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 16b582cb97ed..fef1e857cefc 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -37,11 +37,19 @@
#include <linux/slab.h>
#include "i915_drv.h"
+#include "gt/intel_gpu_commands.h"
+#include "gt/intel_lrc.h"
#include "gt/intel_ring.h"
+#include "gt/intel_gt_requests.h"
+#include "gt/shmem_utils.h"
#include "gvt.h"
#include "i915_pvinfo.h"
#include "trace.h"
+#include "gem/i915_gem_context.h"
+#include "gem/i915_gem_pm.h"
+#include "gt/intel_context.h"
+
#define INVALID_OP (~0U)
#define OP_LEN_MI 9
@@ -454,6 +462,7 @@ enum {
RING_BUFFER_INSTRUCTION,
BATCH_BUFFER_INSTRUCTION,
BATCH_BUFFER_2ND_LEVEL,
+ RING_BUFFER_CTX,
};
enum {
@@ -495,6 +504,7 @@ struct parser_exec_state {
*/
int saved_buf_addr_type;
bool is_ctx_wa;
+ bool is_init_ctx;
const struct cmd_info *info;
@@ -708,6 +718,11 @@ static inline u32 cmd_val(struct parser_exec_state *s, int index)
return *cmd_ptr(s, index);
}
+static inline bool is_init_ctx(struct parser_exec_state *s)
+{
+ return (s->buf_type == RING_BUFFER_CTX && s->is_init_ctx);
+}
+
static void parser_exec_state_dump(struct parser_exec_state *s)
{
int cnt = 0;
@@ -721,7 +736,8 @@ static void parser_exec_state_dump(struct parser_exec_state *s)
gvt_dbg_cmd(" %s %s ip_gma(%08lx) ",
s->buf_type == RING_BUFFER_INSTRUCTION ?
- "RING_BUFFER" : "BATCH_BUFFER",
+ "RING_BUFFER" : ((s->buf_type == RING_BUFFER_CTX) ?
+ "CTX_BUFFER" : "BATCH_BUFFER"),
s->buf_addr_type == GTT_BUFFER ?
"GTT" : "PPGTT", s->ip_gma);
@@ -756,7 +772,8 @@ static inline void update_ip_va(struct parser_exec_state *s)
if (WARN_ON(s->ring_head == s->ring_tail))
return;
- if (s->buf_type == RING_BUFFER_INSTRUCTION) {
+ if (s->buf_type == RING_BUFFER_INSTRUCTION ||
+ s->buf_type == RING_BUFFER_CTX) {
unsigned long ring_top = s->ring_start + s->ring_size;
if (s->ring_head > s->ring_tail) {
@@ -820,68 +837,12 @@ static inline int cmd_length(struct parser_exec_state *s)
*addr = val; \
} while (0)
-static bool is_shadowed_mmio(unsigned int offset)
-{
- bool ret = false;
-
- if ((offset == 0x2168) || /*BB current head register UDW */
- (offset == 0x2140) || /*BB current header register */
- (offset == 0x211c) || /*second BB header register UDW */
- (offset == 0x2114)) { /*second BB header register UDW */
- ret = true;
- }
- return ret;
-}
-
-static inline bool is_force_nonpriv_mmio(unsigned int offset)
-{
- return (offset >= 0x24d0 && offset < 0x2500);
-}
-
-static int force_nonpriv_reg_handler(struct parser_exec_state *s,
- unsigned int offset, unsigned int index, char *cmd)
-{
- struct intel_gvt *gvt = s->vgpu->gvt;
- unsigned int data;
- u32 ring_base;
- u32 nopid;
-
- if (!strcmp(cmd, "lri"))
- data = cmd_val(s, index + 1);
- else {
- gvt_err("Unexpected forcenonpriv 0x%x write from cmd %s\n",
- offset, cmd);
- return -EINVAL;
- }
-
- ring_base = s->engine->mmio_base;
- nopid = i915_mmio_reg_offset(RING_NOPID(ring_base));
-
- if (!intel_gvt_in_force_nonpriv_whitelist(gvt, data) &&
- data != nopid) {
- gvt_err("Unexpected forcenonpriv 0x%x LRI write, value=0x%x\n",
- offset, data);
- patch_value(s, cmd_ptr(s, index), nopid);
- return 0;
- }
- return 0;
-}
-
static inline bool is_mocs_mmio(unsigned int offset)
{
return ((offset >= 0xc800) && (offset <= 0xcff8)) ||
((offset >= 0xb020) && (offset <= 0xb0a0));
}
-static int mocs_cmd_reg_handler(struct parser_exec_state *s,
- unsigned int offset, unsigned int index)
-{
- if (!is_mocs_mmio(offset))
- return -EINVAL;
- vgpu_vreg(s->vgpu, offset) = cmd_val(s, index + 1);
- return 0;
-}
-
static int is_cmd_update_pdps(unsigned int offset,
struct parser_exec_state *s)
{
@@ -929,6 +890,7 @@ static int cmd_reg_handler(struct parser_exec_state *s,
struct intel_vgpu *vgpu = s->vgpu;
struct intel_gvt *gvt = vgpu->gvt;
u32 ctx_sr_ctl;
+ u32 *vreg, vreg_old;
if (offset + 4 > gvt->device_info.mmio_size) {
gvt_vgpu_err("%s access to (%x) outside of MMIO range\n",
@@ -936,34 +898,101 @@ static int cmd_reg_handler(struct parser_exec_state *s,
return -EFAULT;
}
+ if (is_init_ctx(s)) {
+ struct intel_gvt_mmio_info *mmio_info;
+
+ intel_gvt_mmio_set_cmd_accessible(gvt, offset);
+ mmio_info = intel_gvt_find_mmio_info(gvt, offset);
+ if (mmio_info && mmio_info->write)
+ intel_gvt_mmio_set_cmd_write_patch(gvt, offset);
+ return 0;
+ }
+
if (!intel_gvt_mmio_is_cmd_accessible(gvt, offset)) {
gvt_vgpu_err("%s access to non-render register (%x)\n",
cmd, offset);
return -EBADRQC;
}
- if (is_shadowed_mmio(offset)) {
- gvt_vgpu_err("found access of shadowed MMIO %x\n", offset);
- return 0;
+ if (!strncmp(cmd, "srm", 3) ||
+ !strncmp(cmd, "lrm", 3)) {
+ if (offset != i915_mmio_reg_offset(GEN8_L3SQCREG4) &&
+ offset != 0x21f0) {
+ gvt_vgpu_err("%s access to register (%x)\n",
+ cmd, offset);
+ return -EPERM;
+ } else
+ return 0;
}
- if (is_mocs_mmio(offset) &&
- mocs_cmd_reg_handler(s, offset, index))
- return -EINVAL;
+ if (!strncmp(cmd, "lrr-src", 7) ||
+ !strncmp(cmd, "lrr-dst", 7)) {
+ gvt_vgpu_err("not allowed cmd %s\n", cmd);
+ return -EPERM;
+ }
+
+ if (!strncmp(cmd, "pipe_ctrl", 9)) {
+ /* TODO: add LRI POST logic here */
+ return 0;
+ }
- if (is_force_nonpriv_mmio(offset) &&
- force_nonpriv_reg_handler(s, offset, index, cmd))
+ if (strncmp(cmd, "lri", 3))
return -EPERM;
+ /* below are all lri handlers */
+ vreg = &vgpu_vreg(s->vgpu, offset);
+ if (!intel_gvt_mmio_is_cmd_accessible(gvt, offset)) {
+ gvt_vgpu_err("%s access to non-render register (%x)\n",
+ cmd, offset);
+ return -EBADRQC;
+ }
+
+ if (is_cmd_update_pdps(offset, s) &&
+ cmd_pdp_mmio_update_handler(s, offset, index))
+ return -EINVAL;
+
if (offset == i915_mmio_reg_offset(DERRMR) ||
offset == i915_mmio_reg_offset(FORCEWAKE_MT)) {
/* Writing to HW VGT_PVINFO_PAGE offset will be discarded */
patch_value(s, cmd_ptr(s, index), VGT_PVINFO_PAGE);
}
- if (is_cmd_update_pdps(offset, s) &&
- cmd_pdp_mmio_update_handler(s, offset, index))
- return -EINVAL;
+ if (is_mocs_mmio(offset))
+ *vreg = cmd_val(s, index + 1);
+
+ vreg_old = *vreg;
+
+ if (intel_gvt_mmio_is_cmd_write_patch(gvt, offset)) {
+ u32 cmdval_new, cmdval;
+ struct intel_gvt_mmio_info *mmio_info;
+
+ cmdval = cmd_val(s, index + 1);
+
+ mmio_info = intel_gvt_find_mmio_info(gvt, offset);
+ if (!mmio_info) {
+ cmdval_new = cmdval;
+ } else {
+ u64 ro_mask = mmio_info->ro_mask;
+ int ret;
+
+ if (likely(!ro_mask))
+ ret = mmio_info->write(s->vgpu, offset,
+ &cmdval, 4);
+ else {
+ gvt_vgpu_err("try to write RO reg %x\n",
+ offset);
+ ret = -EBADRQC;
+ }
+ if (ret)
+ return ret;
+ cmdval_new = *vreg;
+ }
+ if (cmdval_new != cmdval)
+ patch_value(s, cmd_ptr(s, index+1), cmdval_new);
+ }
+
+ /* only patch cmd. restore vreg value if changed in mmio write handler*/
+ *vreg = vreg_old;
/* TODO
* In order to let workload with inhibit context to generate
@@ -1215,6 +1244,8 @@ static int cmd_handler_mi_batch_buffer_end(struct parser_exec_state *s)
s->buf_type = BATCH_BUFFER_INSTRUCTION;
ret = ip_gma_set(s, s->ret_ip_gma_bb);
s->buf_addr_type = s->saved_buf_addr_type;
+ } else if (s->buf_type == RING_BUFFER_CTX) {
+ ret = ip_gma_set(s, s->ring_tail);
} else {
s->buf_type = RING_BUFFER_INSTRUCTION;
s->buf_addr_type = GTT_BUFFER;
@@ -2763,7 +2794,8 @@ static int command_scan(struct parser_exec_state *s,
gma_bottom = rb_start + rb_len;
while (s->ip_gma != gma_tail) {
- if (s->buf_type == RING_BUFFER_INSTRUCTION) {
+ if (s->buf_type == RING_BUFFER_INSTRUCTION ||
+ s->buf_type == RING_BUFFER_CTX) {
if (!(s->ip_gma >= rb_start) ||
!(s->ip_gma < gma_bottom)) {
gvt_vgpu_err("ip_gma %lx out of ring scope."
@@ -3056,6 +3088,118 @@ int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
return 0;
}
+/* generate dummy contexts by sending empty requests to HW, and let
+ * the HW to fill Engine Contexts. This dummy contexts are used for
+ * initialization purpose (update reg whitelist), so referred to as
+ * init context here
+ */
+void intel_gvt_update_reg_whitelist(struct intel_vgpu *vgpu)
+{
+ const unsigned long start = LRC_STATE_PN * PAGE_SIZE;
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ if (gvt->is_reg_whitelist_updated)
+ return;
+
+ /* scan init ctx to update cmd accessible list */
+ for_each_engine(engine, gvt->gt, id) {
+ struct parser_exec_state s;
+ void *vaddr;
+ int ret;
+
+ if (!engine->default_state)
+ continue;
+
+ vaddr = shmem_pin_map(engine->default_state);
+ if (IS_ERR(vaddr)) {
+ gvt_err("failed to map %s->default state, err:%zd\n",
+ engine->name, PTR_ERR(vaddr));
+ return;
+ }
+
+ s.buf_type = RING_BUFFER_CTX;
+ s.buf_addr_type = GTT_BUFFER;
+ s.vgpu = vgpu;
+ s.engine = engine;
+ s.ring_start = 0;
+ s.ring_size = engine->context_size - start;
+ s.ring_head = 0;
+ s.ring_tail = s.ring_size;
+ s.rb_va = vaddr + start;
+ s.workload = NULL;
+ s.is_ctx_wa = false;
+ s.is_init_ctx = true;
+
+ /* skipping the first RING_CTX_SIZE(0x50) dwords */
+ ret = ip_gma_set(&s, RING_CTX_SIZE);
+ if (ret == 0) {
+ ret = command_scan(&s, 0, s.ring_size, 0, s.ring_size);
+ if (ret)
+ gvt_err("Scan init ctx error\n");
+ }
+
+ shmem_unpin_map(engine->default_state, vaddr);
+ if (ret)
+ return;
+ }
+
+ gvt->is_reg_whitelist_updated = true;
+}
+
+int intel_gvt_scan_engine_context(struct intel_vgpu_workload *workload)
+{
+ struct intel_vgpu *vgpu = workload->vgpu;
+ unsigned long gma_head, gma_tail, gma_start, ctx_size;
+ struct parser_exec_state s;
+ int ring_id = workload->engine->id;
+ struct intel_context *ce = vgpu->submission.shadow[ring_id];
+ int ret;
+
+ GEM_BUG_ON(atomic_read(&ce->pin_count) < 0);
+
+ ctx_size = workload->engine->context_size - PAGE_SIZE;
+
+ /* Only ring contxt is loaded to HW for inhibit context, no need to
+ * scan engine context
+ */
+ if (is_inhibit_context(ce))
+ return 0;
+
+ gma_start = i915_ggtt_offset(ce->state) + LRC_STATE_PN*PAGE_SIZE;
+ gma_head = 0;
+ gma_tail = ctx_size;
+
+ s.buf_type = RING_BUFFER_CTX;
+ s.buf_addr_type = GTT_BUFFER;
+ s.vgpu = workload->vgpu;
+ s.engine = workload->engine;
+ s.ring_start = gma_start;
+ s.ring_size = ctx_size;
+ s.ring_head = gma_start + gma_head;
+ s.ring_tail = gma_start + gma_tail;
+ s.rb_va = ce->lrc_reg_state;
+ s.workload = workload;
+ s.is_ctx_wa = false;
+ s.is_init_ctx = false;
+
+ /* don't scan the first RING_CTX_SIZE(0x50) dwords, as it's ring
+ * context
+ */
+ ret = ip_gma_set(&s, gma_start + gma_head + RING_CTX_SIZE);
+ if (ret)
+ goto out;
+
+ ret = command_scan(&s, gma_head, gma_tail,
+ gma_start, ctx_size);
+out:
+ if (ret)
+ gvt_vgpu_err("scan shadow ctx error\n");
+
+ return ret;
+}
+
static int init_cmd_table(struct intel_gvt *gvt)
{
unsigned int gen_type = intel_gvt_get_device_type(gvt);
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.h b/drivers/gpu/drm/i915/gvt/cmd_parser.h
index ab25d151932a..416d345e2816 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.h
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.h
@@ -40,6 +40,7 @@
struct intel_gvt;
struct intel_shadow_wa_ctx;
+struct intel_vgpu;
struct intel_vgpu_workload;
void intel_gvt_clean_cmd_parser(struct intel_gvt *gvt);
@@ -50,4 +51,8 @@ int intel_gvt_scan_and_shadow_ringbuffer(struct intel_vgpu_workload *workload);
int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx);
+void intel_gvt_update_reg_whitelist(struct intel_vgpu *vgpu);
+
+int intel_gvt_scan_engine_context(struct intel_vgpu_workload *workload);
+
#endif
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index 158873f269b1..c8dcda6d4f0d 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -522,12 +522,11 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu,
static void clean_execlist(struct intel_vgpu *vgpu,
intel_engine_mask_t engine_mask)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
- struct intel_engine_cs *engine;
struct intel_vgpu_submission *s = &vgpu->submission;
+ struct intel_engine_cs *engine;
intel_engine_mask_t tmp;
- for_each_engine_masked(engine, &dev_priv->gt, engine_mask, tmp) {
+ for_each_engine_masked(engine, vgpu->gvt->gt, engine_mask, tmp) {
kfree(s->ring_scan_buffer[engine->id]);
s->ring_scan_buffer[engine->id] = NULL;
s->ring_scan_buffer_size[engine->id] = 0;
@@ -537,11 +536,10 @@ static void clean_execlist(struct intel_vgpu *vgpu,
static void reset_execlist(struct intel_vgpu *vgpu,
intel_engine_mask_t engine_mask)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
struct intel_engine_cs *engine;
intel_engine_mask_t tmp;
- for_each_engine_masked(engine, &dev_priv->gt, engine_mask, tmp)
+ for_each_engine_masked(engine, vgpu->gvt->gt, engine_mask, tmp)
init_vgpu_execlist(vgpu, engine);
}
diff --git a/drivers/gpu/drm/i915/gvt/execlist.h b/drivers/gpu/drm/i915/gvt/execlist.h
index d62cd14605a3..84ad74b37d66 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.h
+++ b/drivers/gpu/drm/i915/gvt/execlist.h
@@ -182,7 +182,4 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu);
int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu,
const struct intel_engine_cs *engine);
-void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,
- intel_engine_mask_t engine_mask);
-
#endif /*_GVT_EXECLIST_H_*/
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.h b/drivers/gpu/drm/i915/gvt/fb_decoder.h
index 67b6ede9e707..0daa3931aef7 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.h
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.h
@@ -38,6 +38,10 @@
#include <linux/types.h>
+#include "display/intel_display.h"
+
+struct intel_vgpu;
+
#define _PLANE_CTL_FORMAT_SHIFT 24
#define _PLANE_CTL_TILED_SHIFT 10
#define _PIPE_V_SRCSZ_SHIFT 0
@@ -98,8 +102,6 @@ enum DDI_PORT {
DDI_PORT_E = 4
};
-struct intel_gvt;
-
/* color space conversion and gamma correction are not included */
struct intel_vgpu_primary_plane_format {
u8 enabled; /* plane is enabled */
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index b0e173f2d990..3bf45672ef98 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -34,10 +34,19 @@
#ifndef _GVT_GTT_H_
#define _GVT_GTT_H_
-#define I915_GTT_PAGE_SHIFT 12
+#include <linux/kernel.h>
+#include <linux/kref.h>
+#include <linux/mutex.h>
+#include <linux/radix-tree.h>
+
+#include "gt/intel_gtt.h"
+struct intel_gvt;
+struct intel_vgpu;
struct intel_vgpu_mm;
+#define I915_GTT_PAGE_SHIFT 12
+
#define INTEL_GVT_INVALID_ADDR (~0UL)
struct intel_gvt_gtt_entry {
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index cf3578e3f4dd..03c993d68f10 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -33,6 +33,10 @@
#ifndef _GVT_H_
#define _GVT_H_
+#include <uapi/linux/pci_regs.h>
+
+#include "i915_drv.h"
+
#include "debug.h"
#include "hypercall.h"
#include "mmio.h"
@@ -244,7 +248,7 @@ struct gvt_mmio_block {
#define INTEL_GVT_MMIO_HASH_BITS 11
struct intel_gvt_mmio {
- u8 *mmio_attribute;
+ u16 *mmio_attribute;
/* Register contains RO bits */
#define F_RO (1 << 0)
/* Register contains graphics address */
@@ -263,6 +267,8 @@ struct intel_gvt_mmio {
* logical context image
*/
#define F_SR_IN_CTX (1 << 7)
+/* Value of command write of this reg needs to be patched */
+#define F_CMD_WRITE_PATCH (1 << 8)
struct gvt_mmio_block *mmio_block;
unsigned int num_mmio_block;
@@ -329,6 +335,7 @@ struct intel_gvt {
u32 *mocs_mmio_offset_list;
u32 mocs_mmio_offset_list_cnt;
} engine_mmio_list;
+ bool is_reg_whitelist_updated;
struct dentry *debugfs_root;
};
@@ -412,6 +419,9 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt);
#define vgpu_fence_base(vgpu) (vgpu->fence.base)
#define vgpu_fence_sz(vgpu) (vgpu->fence.size)
+/* ring context size i.e. the first 0x50 dwords*/
+#define RING_CTX_SIZE 320
+
struct intel_vgpu_creation_params {
__u64 handle;
__u64 low_gm_sz; /* in MB */
@@ -683,6 +693,35 @@ static inline void intel_gvt_mmio_set_sr_in_ctx(
}
void intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu);
+/**
+ * intel_gvt_mmio_set_cmd_write_patch -
+ * mark an MMIO if its cmd write needs to be
+ * patched
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ */
+static inline void intel_gvt_mmio_set_cmd_write_patch(
+ struct intel_gvt *gvt, unsigned int offset)
+{
+ gvt->mmio.mmio_attribute[offset >> 2] |= F_CMD_WRITE_PATCH;
+}
+
+/**
+ * intel_gvt_mmio_is_cmd_write_patch - check if an mmio's cmd access needs to
+ * be patched
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ * Returns:
+ * True if GPU commmand write to an MMIO should be patched
+ */
+static inline bool intel_gvt_mmio_is_cmd_write_patch(
+ struct intel_gvt *gvt, unsigned int offset)
+{
+ return gvt->mmio.mmio_attribute[offset >> 2] & F_CMD_WRITE_PATCH;
+}
+
void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_debugfs_init(struct intel_gvt *gvt);
void intel_gvt_debugfs_clean(struct intel_gvt *gvt);
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index aa7e75cb3e6a..6eeaeecb7f85 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -83,7 +83,7 @@ static void write_vreg(struct intel_vgpu *vgpu, unsigned int offset,
memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes);
}
-static struct intel_gvt_mmio_info *find_mmio_info(struct intel_gvt *gvt,
+struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
unsigned int offset)
{
struct intel_gvt_mmio_info *e;
@@ -96,7 +96,7 @@ static struct intel_gvt_mmio_info *find_mmio_info(struct intel_gvt *gvt,
}
static int new_mmio_info(struct intel_gvt *gvt,
- u32 offset, u8 flags, u32 size,
+ u32 offset, u16 flags, u32 size,
u32 addr_mask, u32 ro_mask, u32 device,
gvt_mmio_func read, gvt_mmio_func write)
{
@@ -118,7 +118,7 @@ static int new_mmio_info(struct intel_gvt *gvt,
return -ENOMEM;
info->offset = i;
- p = find_mmio_info(gvt, info->offset);
+ p = intel_gvt_find_mmio_info(gvt, info->offset);
if (p) {
WARN(1, "dup mmio definition offset %x\n",
info->offset);
@@ -1651,7 +1651,7 @@ static int edp_psr_imr_iir_write(struct intel_vgpu *vgpu,
return 0;
}
-/**
+/*
* FixMe:
* If guest fills non-priv batch buffer on ApolloLake/Broxton as Mesa i965 did:
* 717e7539124d (i965: Use a WC map and memcpy for the batch instead of pwrite.)
@@ -1965,7 +1965,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
/* RING MODE */
#define RING_REG(base) _MMIO((base) + 0x29c)
- MMIO_RING_DFH(RING_REG, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL,
+ MMIO_RING_DFH(RING_REG, D_ALL,
+ F_MODE_MASK | F_CMD_ACCESS | F_CMD_WRITE_PATCH, NULL,
ring_mode_mmio_write);
#undef RING_REG
@@ -2885,8 +2886,8 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(_MMIO(0xb10c), D_BDW, F_CMD_ACCESS, NULL, NULL);
MMIO_D(_MMIO(0xb110), D_BDW);
- MMIO_F(_MMIO(0x24d0), 48, F_CMD_ACCESS, 0, 0, D_BDW_PLUS,
- NULL, force_nonpriv_write);
+ MMIO_F(_MMIO(0x24d0), 48, F_CMD_ACCESS | F_CMD_WRITE_PATCH, 0, 0,
+ D_BDW_PLUS, NULL, force_nonpriv_write);
MMIO_D(_MMIO(0x44484), D_BDW_PLUS);
MMIO_D(_MMIO(0x4448c), D_BDW_PLUS);
@@ -3626,7 +3627,7 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
/*
* Normal tracked MMIOs.
*/
- mmio_info = find_mmio_info(gvt, offset);
+ mmio_info = intel_gvt_find_mmio_info(gvt, offset);
if (!mmio_info) {
gvt_dbg_mmio("untracked MMIO %08x len %d\n", offset, bytes);
goto default_rw;
@@ -3686,14 +3687,13 @@ void intel_gvt_restore_fence(struct intel_gvt *gvt)
}
}
-static inline int mmio_pm_restore_handler(struct intel_gvt *gvt,
- u32 offset, void *data)
+static int mmio_pm_restore_handler(struct intel_gvt *gvt, u32 offset, void *data)
{
struct intel_vgpu *vgpu = data;
struct drm_i915_private *dev_priv = gvt->gt->i915;
if (gvt->mmio.mmio_attribute[offset >> 2] & F_PM_SAVE)
- I915_WRITE(_MMIO(offset), vgpu_vreg(vgpu, offset));
+ intel_uncore_write(&dev_priv->uncore, _MMIO(offset), vgpu_vreg(vgpu, offset));
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.h b/drivers/gpu/drm/i915/gvt/interrupt.h
index fcd663811d37..287cd142629e 100644
--- a/drivers/gpu/drm/i915/gvt/interrupt.h
+++ b/drivers/gpu/drm/i915/gvt/interrupt.h
@@ -32,7 +32,10 @@
#ifndef _GVT_INTERRUPT_H_
#define _GVT_INTERRUPT_H_
-#include <linux/types.h>
+#include <linux/hrtimer.h>
+#include <linux/kernel.h>
+
+#include "i915_reg.h"
enum intel_gvt_event_type {
RCS_MI_USER_INTERRUPT = 0,
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 60f1a386dd06..b4348256ae95 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -1703,7 +1703,7 @@ static int kvmgt_page_track_add(unsigned long handle, u64 gfn)
return -EINVAL;
}
- spin_lock(&kvm->mmu_lock);
+ write_lock(&kvm->mmu_lock);
if (kvmgt_gfn_is_write_protected(info, gfn))
goto out;
@@ -1712,7 +1712,7 @@ static int kvmgt_page_track_add(unsigned long handle, u64 gfn)
kvmgt_protect_table_add(info, gfn);
out:
- spin_unlock(&kvm->mmu_lock);
+ write_unlock(&kvm->mmu_lock);
srcu_read_unlock(&kvm->srcu, idx);
return 0;
}
@@ -1737,7 +1737,7 @@ static int kvmgt_page_track_remove(unsigned long handle, u64 gfn)
return -EINVAL;
}
- spin_lock(&kvm->mmu_lock);
+ write_lock(&kvm->mmu_lock);
if (!kvmgt_gfn_is_write_protected(info, gfn))
goto out;
@@ -1746,7 +1746,7 @@ static int kvmgt_page_track_remove(unsigned long handle, u64 gfn)
kvmgt_protect_table_del(info, gfn);
out:
- spin_unlock(&kvm->mmu_lock);
+ write_unlock(&kvm->mmu_lock);
srcu_read_unlock(&kvm->srcu, idx);
return 0;
}
@@ -1772,7 +1772,7 @@ static void kvmgt_page_track_flush_slot(struct kvm *kvm,
struct kvmgt_guest_info *info = container_of(node,
struct kvmgt_guest_info, track_node);
- spin_lock(&kvm->mmu_lock);
+ write_lock(&kvm->mmu_lock);
for (i = 0; i < slot->npages; i++) {
gfn = slot->base_gfn + i;
if (kvmgt_gfn_is_write_protected(info, gfn)) {
@@ -1781,7 +1781,7 @@ static void kvmgt_page_track_flush_slot(struct kvm *kvm,
kvmgt_protect_table_del(info, gfn);
}
}
- spin_unlock(&kvm->mmu_lock);
+ write_unlock(&kvm->mmu_lock);
}
static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu, struct kvm *kvm)
diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h
index 9e862dc73579..7c26af39fbfc 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.h
+++ b/drivers/gpu/drm/i915/gvt/mmio.h
@@ -80,6 +80,9 @@ int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt,
int (*handler)(struct intel_gvt *gvt, u32 offset, void *data),
void *data);
+struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
+ unsigned int offset);
+
int intel_vgpu_init_mmio(struct intel_vgpu *vgpu);
void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr);
void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu);
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index afe574d6b3b5..c9589e26af93 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -35,6 +35,7 @@
#include "i915_drv.h"
#include "gt/intel_context.h"
+#include "gt/intel_gpu_commands.h"
#include "gt/intel_ring.h"
#include "gvt.h"
#include "trace.h"
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.h b/drivers/gpu/drm/i915/gvt/mmio_context.h
index 3b25e7fe32f6..b6b69777af49 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.h
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.h
@@ -36,6 +36,18 @@
#ifndef __GVT_RENDER_H__
#define __GVT_RENDER_H__
+#include <linux/types.h>
+
+#include "gt/intel_engine_types.h"
+#include "gt/intel_lrc_reg.h"
+#include "i915_reg.h"
+
+struct i915_request;
+struct intel_context;
+struct intel_engine_cs;
+struct intel_gvt;
+struct intel_vgpu;
+
struct engine_mmio {
enum intel_engine_id id;
i915_reg_t reg;
diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h
index 6f92cde71971..550a456e936f 100644
--- a/drivers/gpu/drm/i915/gvt/mpt.h
+++ b/drivers/gpu/drm/i915/gvt/mpt.h
@@ -33,6 +33,8 @@
#ifndef _GVT_MPT_H_
#define _GVT_MPT_H_
+#include "gvt.h"
+
/**
* DOC: Hypervisor Service APIs for GVT-g Core Logic
*
diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h
index b58860dee970..244cc7320b54 100644
--- a/drivers/gpu/drm/i915/gvt/reg.h
+++ b/drivers/gpu/drm/i915/gvt/reg.h
@@ -133,4 +133,6 @@
#define RING_GFX_MODE(base) _MMIO((base) + 0x29c)
#define VF_GUARDBAND _MMIO(0x83a4)
+
+#define BCS_TILE_REGISTER_VAL_OFFSET (0x43*4)
#endif
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index aed2ef6466a2..fc735692f21f 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -37,6 +37,8 @@
#include "gem/i915_gem_pm.h"
#include "gt/intel_context.h"
+#include "gt/intel_execlists_submission.h"
+#include "gt/intel_lrc.h"
#include "gt/intel_ring.h"
#include "i915_drv.h"
@@ -135,6 +137,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
int i;
bool skip = false;
int ring_id = workload->engine->id;
+ int ret;
GEM_BUG_ON(!intel_context_is_pinned(ctx));
@@ -161,16 +164,24 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
COPY_REG(bb_per_ctx_ptr);
COPY_REG(rcs_indirect_ctx);
COPY_REG(rcs_indirect_ctx_offset);
- }
+ } else if (workload->engine->id == BCS0)
+ intel_gvt_hypervisor_read_gpa(vgpu,
+ workload->ring_context_gpa +
+ BCS_TILE_REGISTER_VAL_OFFSET,
+ (void *)shadow_ring_context +
+ BCS_TILE_REGISTER_VAL_OFFSET, 4);
#undef COPY_REG
#undef COPY_REG_MASKED
+ /* don't copy Ring Context (the first 0x50 dwords),
+ * only copy the Engine Context part from guest
+ */
intel_gvt_hypervisor_read_gpa(vgpu,
workload->ring_context_gpa +
- sizeof(*shadow_ring_context),
+ RING_CTX_SIZE,
(void *)shadow_ring_context +
- sizeof(*shadow_ring_context),
- I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
+ RING_CTX_SIZE,
+ I915_GTT_PAGE_SIZE - RING_CTX_SIZE);
sr_oa_regs(workload, (u32 *)shadow_ring_context, false);
@@ -237,6 +248,11 @@ read:
gpa_size = I915_GTT_PAGE_SIZE;
dst = context_base + (i << I915_GTT_PAGE_SHIFT);
}
+ ret = intel_gvt_scan_engine_context(workload);
+ if (ret) {
+ gvt_vgpu_err("invalid cmd found in guest context pages\n");
+ return ret;
+ }
s->last_ctx[ring_id].valid = true;
return 0;
}
@@ -396,7 +412,9 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
if (!wa_ctx->indirect_ctx.obj)
return;
+ i915_gem_object_lock(wa_ctx->indirect_ctx.obj, NULL);
i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj);
+ i915_gem_object_unlock(wa_ctx->indirect_ctx.obj);
i915_gem_object_put(wa_ctx->indirect_ctx.obj);
wa_ctx->indirect_ctx.obj = NULL;
@@ -504,6 +522,7 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
struct intel_gvt *gvt = workload->vgpu->gvt;
const int gmadr_bytes = gvt->device_info.gmadr_bytes_in_cmd;
struct intel_vgpu_shadow_bb *bb;
+ struct i915_gem_ww_ctx ww;
int ret;
list_for_each_entry(bb, &workload->shadow_bb, list) {
@@ -528,10 +547,19 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
* directly
*/
if (!bb->ppgtt) {
- bb->vma = i915_gem_object_ggtt_pin(bb->obj,
- NULL, 0, 0, 0);
+ i915_gem_ww_ctx_init(&ww, false);
+retry:
+ i915_gem_object_lock(bb->obj, &ww);
+
+ bb->vma = i915_gem_object_ggtt_pin_ww(bb->obj, &ww,
+ NULL, 0, 0, 0);
if (IS_ERR(bb->vma)) {
ret = PTR_ERR(bb->vma);
+ if (ret == -EDEADLK) {
+ ret = i915_gem_ww_ctx_backoff(&ww);
+ if (!ret)
+ goto retry;
+ }
goto err;
}
@@ -545,13 +573,15 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
0);
if (ret)
goto err;
- }
- /* No one is going to touch shadow bb from now on. */
- i915_gem_object_flush_map(bb->obj);
+ /* No one is going to touch shadow bb from now on. */
+ i915_gem_object_flush_map(bb->obj);
+ i915_gem_object_unlock(bb->obj);
+ }
}
return 0;
err:
+ i915_gem_ww_ctx_fini(&ww);
release_shadow_batch_buffer(workload);
return ret;
}
@@ -578,14 +608,29 @@ static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
unsigned char *per_ctx_va =
(unsigned char *)wa_ctx->indirect_ctx.shadow_va +
wa_ctx->indirect_ctx.size;
+ struct i915_gem_ww_ctx ww;
+ int ret;
if (wa_ctx->indirect_ctx.size == 0)
return 0;
- vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL,
- 0, CACHELINE_BYTES, 0);
- if (IS_ERR(vma))
- return PTR_ERR(vma);
+ i915_gem_ww_ctx_init(&ww, false);
+retry:
+ i915_gem_object_lock(wa_ctx->indirect_ctx.obj, &ww);
+
+ vma = i915_gem_object_ggtt_pin_ww(wa_ctx->indirect_ctx.obj, &ww, NULL,
+ 0, CACHELINE_BYTES, 0);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ if (ret == -EDEADLK) {
+ ret = i915_gem_ww_ctx_backoff(&ww);
+ if (!ret)
+ goto retry;
+ }
+ return ret;
+ }
+
+ i915_gem_object_unlock(wa_ctx->indirect_ctx.obj);
/* FIXME: we are not tracking our pinned VMA leaving it
* up to the core to fix up the stray pin_count upon
@@ -619,12 +664,14 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
list_for_each_entry_safe(bb, pos, &workload->shadow_bb, list) {
if (bb->obj) {
+ i915_gem_object_lock(bb->obj, NULL);
if (bb->va && !IS_ERR(bb->va))
i915_gem_object_unpin_map(bb->obj);
if (bb->vma && !IS_ERR(bb->vma))
i915_vma_unpin(bb->vma);
+ i915_gem_object_unlock(bb->obj);
i915_gem_object_put(bb->obj);
}
list_del(&bb->list);
@@ -999,13 +1046,12 @@ void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
intel_engine_mask_t engine_mask)
{
struct intel_vgpu_submission *s = &vgpu->submission;
- struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
struct intel_engine_cs *engine;
struct intel_vgpu_workload *pos, *n;
intel_engine_mask_t tmp;
/* free the unsubmited workloads in the queues. */
- for_each_engine_masked(engine, &dev_priv->gt, engine_mask, tmp) {
+ for_each_engine_masked(engine, vgpu->gvt->gt, engine_mask, tmp) {
list_for_each_entry_safe(pos, n,
&s->workload_q_head[engine->id], list) {
list_del_init(&pos->list);
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index 64e7a0b791c3..7c86984a842f 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -36,6 +36,11 @@
#ifndef _GVT_SCHEDULER_H_
#define _GVT_SCHEDULER_H_
+#include "gt/intel_engine_types.h"
+
+#include "execlist.h"
+#include "interrupt.h"
+
struct intel_gvt_workload_scheduler {
struct intel_vgpu *current_vgpu;
struct intel_vgpu *next_vgpu;
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index cbe5931906e0..6a16d0ca7cda 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -499,9 +499,11 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
mutex_lock(&gvt->lock);
vgpu = __intel_gvt_create_vgpu(gvt, &param);
- if (!IS_ERR(vgpu))
+ if (!IS_ERR(vgpu)) {
/* calculate left instance change for types */
intel_gvt_update_vgpu_types(gvt);
+ intel_gvt_update_reg_whitelist(vgpu);
+ }
mutex_unlock(&gvt->lock);
return vgpu;
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index 10a865f3dc09..3bc616cc1ad2 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -159,8 +159,7 @@ __active_retire(struct i915_active *ref)
GEM_BUG_ON(ref->tree.rb_node != &ref->cache->node);
/* Make the cached node available for reuse with any timeline */
- if (IS_ENABLED(CONFIG_64BIT))
- ref->cache->timeline = 0; /* needs cmpxchg(u64) */
+ ref->cache->timeline = 0; /* needs cmpxchg(u64) */
}
spin_unlock_irqrestore(&ref->tree_lock, flags);
@@ -256,7 +255,6 @@ static struct active_node *__active_lookup(struct i915_active *ref, u64 idx)
if (cached == idx)
return it;
-#ifdef CONFIG_64BIT /* for cmpxchg(u64) */
/*
* An unclaimed cache [.timeline=0] can only be claimed once.
*
@@ -267,9 +265,8 @@ static struct active_node *__active_lookup(struct i915_active *ref, u64 idx)
* only the winner of that race will cmpxchg return the old
* value of 0).
*/
- if (!cached && !cmpxchg(&it->timeline, 0, idx))
+ if (!cached && !cmpxchg64(&it->timeline, 0, idx))
return it;
-#endif
}
BUILD_BUG_ON(offsetof(typeof(*it), node));
@@ -631,24 +628,26 @@ static int flush_lazy_signals(struct i915_active *ref)
int __i915_active_wait(struct i915_active *ref, int state)
{
- int err;
-
might_sleep();
- if (!i915_active_acquire_if_busy(ref))
- return 0;
-
/* Any fence added after the wait begins will not be auto-signaled */
- err = flush_lazy_signals(ref);
- i915_active_release(ref);
- if (err)
- return err;
+ if (i915_active_acquire_if_busy(ref)) {
+ int err;
- if (!i915_active_is_idle(ref) &&
- ___wait_var_event(ref, i915_active_is_idle(ref),
- state, 0, 0, schedule()))
- return -EINTR;
+ err = flush_lazy_signals(ref);
+ i915_active_release(ref);
+ if (err)
+ return err;
+ if (___wait_var_event(ref, i915_active_is_idle(ref),
+ state, 0, 0, schedule()))
+ return -EINTR;
+ }
+
+ /*
+ * After the wait is complete, the caller may free the active.
+ * We have to flush any concurrent retirement before returning.
+ */
flush_work(&ref->work);
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index b0899b665e85..ced9a96d7c34 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -26,6 +26,7 @@
*/
#include "gt/intel_engine.h"
+#include "gt/intel_gpu_commands.h"
#include "i915_drv.h"
#include "i915_memcpy.h"
@@ -1142,7 +1143,7 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
void *dst, *src;
int ret;
- dst = i915_gem_object_pin_map(dst_obj, I915_MAP_FORCE_WB);
+ dst = i915_gem_object_pin_map(dst_obj, I915_MAP_WB);
if (IS_ERR(dst))
return dst;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 77e76b665098..88336ff4bf09 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -45,6 +45,7 @@
#include "i915_debugfs.h"
#include "i915_debugfs_params.h"
#include "i915_irq.h"
+#include "i915_scheduler.h"
#include "i915_trace.h"
#include "intel_pm.h"
#include "intel_sideband.h"
@@ -209,7 +210,7 @@ i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
spin_unlock(&obj->vma.lock);
seq_printf(m, " (pinned x %d)", pin_count);
- if (obj->stolen)
+ if (i915_gem_object_is_stolen(obj))
seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
if (i915_gem_object_is_framebuffer(obj))
seq_printf(m, " (fb)");
@@ -219,145 +220,6 @@ i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
seq_printf(m, " (%s)", engine->name);
}
-struct file_stats {
- struct i915_address_space *vm;
- unsigned long count;
- u64 total;
- u64 active, inactive;
- u64 closed;
-};
-
-static int per_file_stats(int id, void *ptr, void *data)
-{
- struct drm_i915_gem_object *obj = ptr;
- struct file_stats *stats = data;
- struct i915_vma *vma;
-
- if (IS_ERR_OR_NULL(obj) || !kref_get_unless_zero(&obj->base.refcount))
- return 0;
-
- stats->count++;
- stats->total += obj->base.size;
-
- spin_lock(&obj->vma.lock);
- if (!stats->vm) {
- for_each_ggtt_vma(vma, obj) {
- if (!drm_mm_node_allocated(&vma->node))
- continue;
-
- if (i915_vma_is_active(vma))
- stats->active += vma->node.size;
- else
- stats->inactive += vma->node.size;
-
- if (i915_vma_is_closed(vma))
- stats->closed += vma->node.size;
- }
- } else {
- struct rb_node *p = obj->vma.tree.rb_node;
-
- while (p) {
- long cmp;
-
- vma = rb_entry(p, typeof(*vma), obj_node);
- cmp = i915_vma_compare(vma, stats->vm, NULL);
- if (cmp == 0) {
- if (drm_mm_node_allocated(&vma->node)) {
- if (i915_vma_is_active(vma))
- stats->active += vma->node.size;
- else
- stats->inactive += vma->node.size;
-
- if (i915_vma_is_closed(vma))
- stats->closed += vma->node.size;
- }
- break;
- }
- if (cmp < 0)
- p = p->rb_right;
- else
- p = p->rb_left;
- }
- }
- spin_unlock(&obj->vma.lock);
-
- i915_gem_object_put(obj);
- return 0;
-}
-
-#define print_file_stats(m, name, stats) do { \
- if (stats.count) \
- seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu closed)\n", \
- name, \
- stats.count, \
- stats.total, \
- stats.active, \
- stats.inactive, \
- stats.closed); \
-} while (0)
-
-static void print_context_stats(struct seq_file *m,
- struct drm_i915_private *i915)
-{
- struct file_stats kstats = {};
- struct i915_gem_context *ctx, *cn;
-
- spin_lock(&i915->gem.contexts.lock);
- list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
- struct i915_gem_engines_iter it;
- struct intel_context *ce;
-
- if (!kref_get_unless_zero(&ctx->ref))
- continue;
-
- spin_unlock(&i915->gem.contexts.lock);
-
- for_each_gem_engine(ce,
- i915_gem_context_lock_engines(ctx), it) {
- if (intel_context_pin_if_active(ce)) {
- rcu_read_lock();
- if (ce->state)
- per_file_stats(0,
- ce->state->obj, &kstats);
- per_file_stats(0, ce->ring->vma->obj, &kstats);
- rcu_read_unlock();
- intel_context_unpin(ce);
- }
- }
- i915_gem_context_unlock_engines(ctx);
-
- mutex_lock(&ctx->mutex);
- if (!IS_ERR_OR_NULL(ctx->file_priv)) {
- struct file_stats stats = {
- .vm = rcu_access_pointer(ctx->vm),
- };
- struct drm_file *file = ctx->file_priv->file;
- struct task_struct *task;
- char name[80];
-
- rcu_read_lock();
- idr_for_each(&file->object_idr, per_file_stats, &stats);
- rcu_read_unlock();
-
- rcu_read_lock();
- task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID);
- snprintf(name, sizeof(name), "%s",
- task ? task->comm : "<unknown>");
- rcu_read_unlock();
-
- print_file_stats(m, name, stats);
- }
- mutex_unlock(&ctx->mutex);
-
- spin_lock(&i915->gem.contexts.lock);
- list_safe_reset_next(ctx, cn, link);
- i915_gem_context_put(ctx);
- }
- spin_unlock(&i915->gem.contexts.lock);
-
- print_file_stats(m, "[k]contexts", kstats);
-}
-
static int i915_gem_object_info(struct seq_file *m, void *data)
{
struct drm_i915_private *i915 = node_to_i915(m->private);
@@ -371,311 +233,6 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
for_each_memory_region(mr, i915, id)
seq_printf(m, "%s: total:%pa, available:%pa bytes\n",
mr->name, &mr->total, &mr->avail);
- seq_putc(m, '\n');
-
- print_context_stats(m, i915);
-
- return 0;
-}
-
-static void gen8_display_interrupt_info(struct seq_file *m)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- enum pipe pipe;
-
- for_each_pipe(dev_priv, pipe) {
- enum intel_display_power_domain power_domain;
- intel_wakeref_t wakeref;
-
- power_domain = POWER_DOMAIN_PIPE(pipe);
- wakeref = intel_display_power_get_if_enabled(dev_priv,
- power_domain);
- if (!wakeref) {
- seq_printf(m, "Pipe %c power disabled\n",
- pipe_name(pipe));
- continue;
- }
- seq_printf(m, "Pipe %c IMR:\t%08x\n",
- pipe_name(pipe),
- I915_READ(GEN8_DE_PIPE_IMR(pipe)));
- seq_printf(m, "Pipe %c IIR:\t%08x\n",
- pipe_name(pipe),
- I915_READ(GEN8_DE_PIPE_IIR(pipe)));
- seq_printf(m, "Pipe %c IER:\t%08x\n",
- pipe_name(pipe),
- I915_READ(GEN8_DE_PIPE_IER(pipe)));
-
- intel_display_power_put(dev_priv, power_domain, wakeref);
- }
-
- seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
- I915_READ(GEN8_DE_PORT_IMR));
- seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
- I915_READ(GEN8_DE_PORT_IIR));
- seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
- I915_READ(GEN8_DE_PORT_IER));
-
- seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
- I915_READ(GEN8_DE_MISC_IMR));
- seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
- I915_READ(GEN8_DE_MISC_IIR));
- seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
- I915_READ(GEN8_DE_MISC_IER));
-
- seq_printf(m, "PCU interrupt mask:\t%08x\n",
- I915_READ(GEN8_PCU_IMR));
- seq_printf(m, "PCU interrupt identity:\t%08x\n",
- I915_READ(GEN8_PCU_IIR));
- seq_printf(m, "PCU interrupt enable:\t%08x\n",
- I915_READ(GEN8_PCU_IER));
-}
-
-static int i915_interrupt_info(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct intel_engine_cs *engine;
- intel_wakeref_t wakeref;
- int i, pipe;
-
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
-
- if (IS_CHERRYVIEW(dev_priv)) {
- intel_wakeref_t pref;
-
- seq_printf(m, "Master Interrupt Control:\t%08x\n",
- I915_READ(GEN8_MASTER_IRQ));
-
- seq_printf(m, "Display IER:\t%08x\n",
- I915_READ(VLV_IER));
- seq_printf(m, "Display IIR:\t%08x\n",
- I915_READ(VLV_IIR));
- seq_printf(m, "Display IIR_RW:\t%08x\n",
- I915_READ(VLV_IIR_RW));
- seq_printf(m, "Display IMR:\t%08x\n",
- I915_READ(VLV_IMR));
- for_each_pipe(dev_priv, pipe) {
- enum intel_display_power_domain power_domain;
-
- power_domain = POWER_DOMAIN_PIPE(pipe);
- pref = intel_display_power_get_if_enabled(dev_priv,
- power_domain);
- if (!pref) {
- seq_printf(m, "Pipe %c power disabled\n",
- pipe_name(pipe));
- continue;
- }
-
- seq_printf(m, "Pipe %c stat:\t%08x\n",
- pipe_name(pipe),
- I915_READ(PIPESTAT(pipe)));
-
- intel_display_power_put(dev_priv, power_domain, pref);
- }
-
- pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
- seq_printf(m, "Port hotplug:\t%08x\n",
- I915_READ(PORT_HOTPLUG_EN));
- seq_printf(m, "DPFLIPSTAT:\t%08x\n",
- I915_READ(VLV_DPFLIPSTAT));
- seq_printf(m, "DPINVGTT:\t%08x\n",
- I915_READ(DPINVGTT));
- intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
-
- for (i = 0; i < 4; i++) {
- seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
- i, I915_READ(GEN8_GT_IMR(i)));
- seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
- i, I915_READ(GEN8_GT_IIR(i)));
- seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
- i, I915_READ(GEN8_GT_IER(i)));
- }
-
- seq_printf(m, "PCU interrupt mask:\t%08x\n",
- I915_READ(GEN8_PCU_IMR));
- seq_printf(m, "PCU interrupt identity:\t%08x\n",
- I915_READ(GEN8_PCU_IIR));
- seq_printf(m, "PCU interrupt enable:\t%08x\n",
- I915_READ(GEN8_PCU_IER));
- } else if (INTEL_GEN(dev_priv) >= 11) {
- if (HAS_MASTER_UNIT_IRQ(dev_priv))
- seq_printf(m, "Master Unit Interrupt Control: %08x\n",
- I915_READ(DG1_MSTR_UNIT_INTR));
-
- seq_printf(m, "Master Interrupt Control: %08x\n",
- I915_READ(GEN11_GFX_MSTR_IRQ));
-
- seq_printf(m, "Render/Copy Intr Enable: %08x\n",
- I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
- seq_printf(m, "VCS/VECS Intr Enable: %08x\n",
- I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
- seq_printf(m, "GUC/SG Intr Enable:\t %08x\n",
- I915_READ(GEN11_GUC_SG_INTR_ENABLE));
- seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
- I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
- seq_printf(m, "Crypto Intr Enable:\t %08x\n",
- I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
- seq_printf(m, "GUnit/CSME Intr Enable:\t %08x\n",
- I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
-
- seq_printf(m, "Display Interrupt Control:\t%08x\n",
- I915_READ(GEN11_DISPLAY_INT_CTL));
-
- gen8_display_interrupt_info(m);
- } else if (INTEL_GEN(dev_priv) >= 8) {
- seq_printf(m, "Master Interrupt Control:\t%08x\n",
- I915_READ(GEN8_MASTER_IRQ));
-
- for (i = 0; i < 4; i++) {
- seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
- i, I915_READ(GEN8_GT_IMR(i)));
- seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
- i, I915_READ(GEN8_GT_IIR(i)));
- seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
- i, I915_READ(GEN8_GT_IER(i)));
- }
-
- gen8_display_interrupt_info(m);
- } else if (IS_VALLEYVIEW(dev_priv)) {
- intel_wakeref_t pref;
-
- seq_printf(m, "Display IER:\t%08x\n",
- I915_READ(VLV_IER));
- seq_printf(m, "Display IIR:\t%08x\n",
- I915_READ(VLV_IIR));
- seq_printf(m, "Display IIR_RW:\t%08x\n",
- I915_READ(VLV_IIR_RW));
- seq_printf(m, "Display IMR:\t%08x\n",
- I915_READ(VLV_IMR));
- for_each_pipe(dev_priv, pipe) {
- enum intel_display_power_domain power_domain;
-
- power_domain = POWER_DOMAIN_PIPE(pipe);
- pref = intel_display_power_get_if_enabled(dev_priv,
- power_domain);
- if (!pref) {
- seq_printf(m, "Pipe %c power disabled\n",
- pipe_name(pipe));
- continue;
- }
-
- seq_printf(m, "Pipe %c stat:\t%08x\n",
- pipe_name(pipe),
- I915_READ(PIPESTAT(pipe)));
- intel_display_power_put(dev_priv, power_domain, pref);
- }
-
- seq_printf(m, "Master IER:\t%08x\n",
- I915_READ(VLV_MASTER_IER));
-
- seq_printf(m, "Render IER:\t%08x\n",
- I915_READ(GTIER));
- seq_printf(m, "Render IIR:\t%08x\n",
- I915_READ(GTIIR));
- seq_printf(m, "Render IMR:\t%08x\n",
- I915_READ(GTIMR));
-
- seq_printf(m, "PM IER:\t\t%08x\n",
- I915_READ(GEN6_PMIER));
- seq_printf(m, "PM IIR:\t\t%08x\n",
- I915_READ(GEN6_PMIIR));
- seq_printf(m, "PM IMR:\t\t%08x\n",
- I915_READ(GEN6_PMIMR));
-
- pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
- seq_printf(m, "Port hotplug:\t%08x\n",
- I915_READ(PORT_HOTPLUG_EN));
- seq_printf(m, "DPFLIPSTAT:\t%08x\n",
- I915_READ(VLV_DPFLIPSTAT));
- seq_printf(m, "DPINVGTT:\t%08x\n",
- I915_READ(DPINVGTT));
- intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
-
- } else if (!HAS_PCH_SPLIT(dev_priv)) {
- seq_printf(m, "Interrupt enable: %08x\n",
- I915_READ(GEN2_IER));
- seq_printf(m, "Interrupt identity: %08x\n",
- I915_READ(GEN2_IIR));
- seq_printf(m, "Interrupt mask: %08x\n",
- I915_READ(GEN2_IMR));
- for_each_pipe(dev_priv, pipe)
- seq_printf(m, "Pipe %c stat: %08x\n",
- pipe_name(pipe),
- I915_READ(PIPESTAT(pipe)));
- } else {
- seq_printf(m, "North Display Interrupt enable: %08x\n",
- I915_READ(DEIER));
- seq_printf(m, "North Display Interrupt identity: %08x\n",
- I915_READ(DEIIR));
- seq_printf(m, "North Display Interrupt mask: %08x\n",
- I915_READ(DEIMR));
- seq_printf(m, "South Display Interrupt enable: %08x\n",
- I915_READ(SDEIER));
- seq_printf(m, "South Display Interrupt identity: %08x\n",
- I915_READ(SDEIIR));
- seq_printf(m, "South Display Interrupt mask: %08x\n",
- I915_READ(SDEIMR));
- seq_printf(m, "Graphics Interrupt enable: %08x\n",
- I915_READ(GTIER));
- seq_printf(m, "Graphics Interrupt identity: %08x\n",
- I915_READ(GTIIR));
- seq_printf(m, "Graphics Interrupt mask: %08x\n",
- I915_READ(GTIMR));
- }
-
- if (INTEL_GEN(dev_priv) >= 11) {
- seq_printf(m, "RCS Intr Mask:\t %08x\n",
- I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
- seq_printf(m, "BCS Intr Mask:\t %08x\n",
- I915_READ(GEN11_BCS_RSVD_INTR_MASK));
- seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
- I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
- seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
- I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
- seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
- I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
- seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
- I915_READ(GEN11_GUC_SG_INTR_MASK));
- seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
- I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
- seq_printf(m, "Crypto Intr Mask:\t %08x\n",
- I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
- seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
- I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
-
- } else if (INTEL_GEN(dev_priv) >= 6) {
- for_each_uabi_engine(engine, dev_priv) {
- seq_printf(m,
- "Graphics Interrupt mask (%s): %08x\n",
- engine->name, ENGINE_READ(engine, RING_IMR));
- }
- }
-
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
-
- return 0;
-}
-
-static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
-{
- struct drm_i915_private *i915 = node_to_i915(m->private);
- unsigned int i;
-
- seq_printf(m, "Total fences = %d\n", i915->ggtt.num_fences);
-
- rcu_read_lock();
- for (i = 0; i < i915->ggtt.num_fences; i++) {
- struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i];
- struct i915_vma *vma = reg->vma;
-
- seq_printf(m, "Fence %d, pin count = %d, object = ",
- i, atomic_read(&reg->pin_count));
- if (!vma)
- seq_puts(m, "unused");
- else
- i915_debugfs_describe_obj(m, vma->obj);
- seq_putc(m, '\n');
- }
- rcu_read_unlock();
return 0;
}
@@ -802,7 +359,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
u32 rpmodectl, freq_sts;
- rpmodectl = I915_READ(GEN6_RP_CONTROL);
+ rpmodectl = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CONTROL);
seq_printf(m, "Video Turbo Mode: %s\n",
yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
seq_printf(m, "HW control enabled: %s\n",
@@ -847,19 +404,19 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
int max_freq;
- rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
+ rp_state_limits = intel_uncore_read(&dev_priv->uncore, GEN6_RP_STATE_LIMITS);
if (IS_GEN9_LP(dev_priv)) {
- rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
- gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
+ rp_state_cap = intel_uncore_read(&dev_priv->uncore, BXT_RP_STATE_CAP);
+ gt_perf_status = intel_uncore_read(&dev_priv->uncore, BXT_GT_PERF_STATUS);
} else {
- rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
- gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
+ rp_state_cap = intel_uncore_read(&dev_priv->uncore, GEN6_RP_STATE_CAP);
+ gt_perf_status = intel_uncore_read(&dev_priv->uncore, GEN6_GT_PERF_STATUS);
}
/* RPSTAT1 is in the GT power well */
intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
- reqf = I915_READ(GEN6_RPNSWREQ);
+ reqf = intel_uncore_read(&dev_priv->uncore, GEN6_RPNSWREQ);
if (INTEL_GEN(dev_priv) >= 9)
reqf >>= 23;
else {
@@ -871,24 +428,24 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
}
reqf = intel_gpu_freq(rps, reqf);
- rpmodectl = I915_READ(GEN6_RP_CONTROL);
- rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
- rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
-
- rpstat = I915_READ(GEN6_RPSTAT1);
- rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
- rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
- rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
- rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
- rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
- rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
+ rpmodectl = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CONTROL);
+ rpinclimit = intel_uncore_read(&dev_priv->uncore, GEN6_RP_UP_THRESHOLD);
+ rpdeclimit = intel_uncore_read(&dev_priv->uncore, GEN6_RP_DOWN_THRESHOLD);
+
+ rpstat = intel_uncore_read(&dev_priv->uncore, GEN6_RPSTAT1);
+ rpupei = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
+ rpcurup = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
+ rpprevup = intel_uncore_read(&dev_priv->uncore, GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
+ rpdownei = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
+ rpcurdown = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
+ rpprevdown = intel_uncore_read(&dev_priv->uncore, GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
cagf = intel_rps_read_actual_frequency(rps);
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
if (INTEL_GEN(dev_priv) >= 11) {
- pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
- pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
+ pm_ier = intel_uncore_read(&dev_priv->uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE);
+ pm_imr = intel_uncore_read(&dev_priv->uncore, GEN11_GPM_WGBOXPERF_INTR_MASK);
/*
* The equivalent to the PM ISR & IIR cannot be read
* without affecting the current state of the system
@@ -896,17 +453,17 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
pm_isr = 0;
pm_iir = 0;
} else if (INTEL_GEN(dev_priv) >= 8) {
- pm_ier = I915_READ(GEN8_GT_IER(2));
- pm_imr = I915_READ(GEN8_GT_IMR(2));
- pm_isr = I915_READ(GEN8_GT_ISR(2));
- pm_iir = I915_READ(GEN8_GT_IIR(2));
+ pm_ier = intel_uncore_read(&dev_priv->uncore, GEN8_GT_IER(2));
+ pm_imr = intel_uncore_read(&dev_priv->uncore, GEN8_GT_IMR(2));
+ pm_isr = intel_uncore_read(&dev_priv->uncore, GEN8_GT_ISR(2));
+ pm_iir = intel_uncore_read(&dev_priv->uncore, GEN8_GT_IIR(2));
} else {
- pm_ier = I915_READ(GEN6_PMIER);
- pm_imr = I915_READ(GEN6_PMIMR);
- pm_isr = I915_READ(GEN6_PMISR);
- pm_iir = I915_READ(GEN6_PMIIR);
+ pm_ier = intel_uncore_read(&dev_priv->uncore, GEN6_PMIER);
+ pm_imr = intel_uncore_read(&dev_priv->uncore, GEN6_PMIMR);
+ pm_isr = intel_uncore_read(&dev_priv->uncore, GEN6_PMISR);
+ pm_iir = intel_uncore_read(&dev_priv->uncore, GEN6_PMIIR);
}
- pm_mask = I915_READ(GEN6_PMINTRMSK);
+ pm_mask = intel_uncore_read(&dev_priv->uncore, GEN6_PMINTRMSK);
seq_printf(m, "Video Turbo Mode: %s\n",
yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
@@ -936,27 +493,27 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
seq_printf(m, "CAGF: %dMHz\n", cagf);
- seq_printf(m, "RP CUR UP EI: %d (%dns)\n",
+ seq_printf(m, "RP CUR UP EI: %d (%lldns)\n",
rpupei,
intel_gt_pm_interval_to_ns(&dev_priv->gt, rpupei));
- seq_printf(m, "RP CUR UP: %d (%dun)\n",
+ seq_printf(m, "RP CUR UP: %d (%lldun)\n",
rpcurup,
intel_gt_pm_interval_to_ns(&dev_priv->gt, rpcurup));
- seq_printf(m, "RP PREV UP: %d (%dns)\n",
+ seq_printf(m, "RP PREV UP: %d (%lldns)\n",
rpprevup,
intel_gt_pm_interval_to_ns(&dev_priv->gt, rpprevup));
seq_printf(m, "Up threshold: %d%%\n",
rps->power.up_threshold);
- seq_printf(m, "RP CUR DOWN EI: %d (%dns)\n",
+ seq_printf(m, "RP CUR DOWN EI: %d (%lldns)\n",
rpdownei,
intel_gt_pm_interval_to_ns(&dev_priv->gt,
rpdownei));
- seq_printf(m, "RP CUR DOWN: %d (%dns)\n",
+ seq_printf(m, "RP CUR DOWN: %d (%lldns)\n",
rpcurdown,
intel_gt_pm_interval_to_ns(&dev_priv->gt,
rpcurdown));
- seq_printf(m, "RP PREV DOWN: %d (%dns)\n",
+ seq_printf(m, "RP PREV DOWN: %d (%lldns)\n",
rpprevdown,
intel_gt_pm_interval_to_ns(&dev_priv->gt,
rpprevdown));
@@ -1011,111 +568,6 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
return 0;
}
-static int i915_ring_freq_table(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct intel_rps *rps = &dev_priv->gt.rps;
- unsigned int max_gpu_freq, min_gpu_freq;
- intel_wakeref_t wakeref;
- int gpu_freq, ia_freq;
-
- if (!HAS_LLC(dev_priv))
- return -ENODEV;
-
- min_gpu_freq = rps->min_freq;
- max_gpu_freq = rps->max_freq;
- if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
- /* Convert GT frequency to 50 HZ units */
- min_gpu_freq /= GEN9_FREQ_SCALER;
- max_gpu_freq /= GEN9_FREQ_SCALER;
- }
-
- seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
-
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
- for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
- ia_freq = gpu_freq;
- sandybridge_pcode_read(dev_priv,
- GEN6_PCODE_READ_MIN_FREQ_TABLE,
- &ia_freq, NULL);
- seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
- intel_gpu_freq(rps,
- (gpu_freq *
- (IS_GEN9_BC(dev_priv) ||
- INTEL_GEN(dev_priv) >= 10 ?
- GEN9_FREQ_SCALER : 1))),
- ((ia_freq >> 0) & 0xff) * 100,
- ((ia_freq >> 8) & 0xff) * 100);
- }
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
-
- return 0;
-}
-
-static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
-{
- seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
- ring->space, ring->head, ring->tail, ring->emit);
-}
-
-static int i915_context_status(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *i915 = node_to_i915(m->private);
- struct i915_gem_context *ctx, *cn;
-
- spin_lock(&i915->gem.contexts.lock);
- list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
- struct i915_gem_engines_iter it;
- struct intel_context *ce;
-
- if (!kref_get_unless_zero(&ctx->ref))
- continue;
-
- spin_unlock(&i915->gem.contexts.lock);
-
- seq_puts(m, "HW context ");
- if (ctx->pid) {
- struct task_struct *task;
-
- task = get_pid_task(ctx->pid, PIDTYPE_PID);
- if (task) {
- seq_printf(m, "(%s [%d]) ",
- task->comm, task->pid);
- put_task_struct(task);
- }
- } else if (IS_ERR(ctx->file_priv)) {
- seq_puts(m, "(deleted) ");
- } else {
- seq_puts(m, "(kernel) ");
- }
-
- seq_putc(m, ctx->remap_slice ? 'R' : 'r');
- seq_putc(m, '\n');
-
- for_each_gem_engine(ce,
- i915_gem_context_lock_engines(ctx), it) {
- if (intel_context_pin_if_active(ce)) {
- seq_printf(m, "%s: ", ce->engine->name);
- if (ce->state)
- i915_debugfs_describe_obj(m, ce->state->obj);
- describe_ctx_ring(m, ce->ring);
- seq_putc(m, '\n');
- intel_context_unpin(ce);
- }
- }
- i915_gem_context_unlock_engines(ctx);
-
- seq_putc(m, '\n');
-
- spin_lock(&i915->gem.contexts.lock);
- list_safe_reset_next(ctx, cn, link);
- i915_gem_context_put(ctx);
- }
- spin_unlock(&i915->gem.contexts.lock);
-
- return 0;
-}
-
static const char *swizzle_string(unsigned swizzle)
{
switch (swizzle) {
@@ -1193,20 +645,6 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
return 0;
}
-static const char *rps_power_to_str(unsigned int power)
-{
- static const char * const strings[] = {
- [LOW_POWER] = "low power",
- [BETWEEN] = "mixed",
- [HIGH_POWER] = "high power",
- };
-
- if (power >= ARRAY_SIZE(strings) || !strings[power])
- return "unknown";
-
- return strings[power];
-}
-
static int i915_rps_boost_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -1231,42 +669,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
intel_gpu_freq(rps, rps->efficient_freq),
intel_gpu_freq(rps, rps->boost_freq));
- seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
-
- if (INTEL_GEN(dev_priv) >= 6 && intel_rps_is_active(rps)) {
- u32 rpup, rpupei;
- u32 rpdown, rpdownei;
-
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
- rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
- rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
- rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
- rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-
- seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
- rps_power_to_str(rps->power.mode));
- seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n",
- rpup && rpupei ? 100 * rpup / rpupei : 0,
- rps->power.up_threshold);
- seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n",
- rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
- rps->power.down_threshold);
- } else {
- seq_puts(m, "\nRPS Autotuning inactive\n");
- }
-
- return 0;
-}
-
-static int i915_llc(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- const bool edram = INTEL_GEN(dev_priv) > 8;
-
- seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
- seq_printf(m, "%s: %uMB\n", edram ? "eDRAM" : "eLLC",
- dev_priv->edram_size_mb);
+ seq_printf(m, "Wait boosts: %d\n", READ_ONCE(rps->boosts));
return 0;
}
@@ -1280,7 +683,7 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
seq_puts(m, "Runtime power management not supported\n");
seq_printf(m, "Runtime power status: %s\n",
- enableddisabled(!dev_priv->power_domains.wakeref));
+ enableddisabled(!dev_priv->power_domains.init_wakeref));
seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
seq_printf(m, "IRQs disabled: %s\n",
@@ -1306,34 +709,28 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
static int i915_engine_info(struct seq_file *m, void *unused)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_i915_private *i915 = node_to_i915(m->private);
struct intel_engine_cs *engine;
intel_wakeref_t wakeref;
struct drm_printer p;
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- seq_printf(m, "GT awake? %s [%d]\n",
- yesno(dev_priv->gt.awake),
- atomic_read(&dev_priv->gt.wakeref.count));
- seq_printf(m, "CS timestamp frequency: %u Hz\n",
- RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_hz);
+ seq_printf(m, "GT awake? %s [%d], %llums\n",
+ yesno(i915->gt.awake),
+ atomic_read(&i915->gt.wakeref.count),
+ ktime_to_ms(intel_gt_get_awake_time(&i915->gt)));
+ seq_printf(m, "CS timestamp frequency: %u Hz, %d ns\n",
+ i915->gt.clock_frequency,
+ i915->gt.clock_period_ns);
p = drm_seq_file_printer(m);
- for_each_uabi_engine(engine, dev_priv)
+ for_each_uabi_engine(engine, i915)
intel_engine_dump(engine, &p, "%s\n", engine->name);
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ intel_gt_show_timelines(&i915->gt, &p, i915_request_show_with_schedule);
- return 0;
-}
-
-static int i915_shrinker_info(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *i915 = node_to_i915(m->private);
-
- seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
- seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
return 0;
}
@@ -1411,7 +808,7 @@ i915_perf_noa_delay_set(void *data, u64 val)
* This would lead to infinite waits as we're doing timestamp
* difference on the CS with only 32bits.
*/
- if (i915_cs_timestamp_ns_to_ticks(i915, val) > U32_MAX)
+ if (intel_gt_ns_to_clock_interval(&i915->gt, val) > U32_MAX)
return -EINVAL;
atomic64_set(&i915->perf.noa_programming_delay, val);
@@ -1529,55 +926,6 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
i915_drop_caches_get, i915_drop_caches_set,
"0x%08llx\n");
-static int
-i915_cache_sharing_get(void *data, u64 *val)
-{
- struct drm_i915_private *dev_priv = data;
- intel_wakeref_t wakeref;
- u32 snpcr = 0;
-
- if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
- return -ENODEV;
-
- with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
- snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
-
- *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
-
- return 0;
-}
-
-static int
-i915_cache_sharing_set(void *data, u64 val)
-{
- struct drm_i915_private *dev_priv = data;
- intel_wakeref_t wakeref;
-
- if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
- return -ENODEV;
-
- if (val > 3)
- return -EINVAL;
-
- drm_dbg(&dev_priv->drm,
- "Manually setting uncore sharing to %llu\n", val);
- with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
- u32 snpcr;
-
- /* Update the cache sharing policy here as well */
- snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
- snpcr &= ~GEN6_MBC_SNPCR_MASK;
- snpcr |= val << GEN6_MBC_SNPCR_SHIFT;
- I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
- }
-
- return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
- i915_cache_sharing_get, i915_cache_sharing_set,
- "%llu\n");
-
static int i915_sseu_status(struct seq_file *m, void *unused)
{
struct drm_i915_private *i915 = node_to_i915(m->private);
@@ -1621,16 +969,10 @@ static const struct file_operations i915_forcewake_fops = {
static const struct drm_info_list i915_debugfs_list[] = {
{"i915_capabilities", i915_capabilities, 0},
{"i915_gem_objects", i915_gem_object_info, 0},
- {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
- {"i915_gem_interrupt", i915_interrupt_info, 0},
{"i915_frequency_info", i915_frequency_info, 0},
- {"i915_ring_freq_table", i915_ring_freq_table, 0},
- {"i915_context_status", i915_context_status, 0},
{"i915_swizzle_info", i915_swizzle_info, 0},
- {"i915_llc", i915_llc, 0},
{"i915_runtime_pm_status", i915_runtime_pm_status, 0},
{"i915_engine_info", i915_engine_info, 0},
- {"i915_shrinker_info", i915_shrinker_info, 0},
{"i915_wa_registers", i915_wa_registers, 0},
{"i915_sseu_status", i915_sseu_status, 0},
{"i915_rps_boost_info", i915_rps_boost_info, 0},
@@ -1643,7 +985,6 @@ static const struct i915_debugfs_files {
} i915_debugfs_files[] = {
{"i915_perf_noa_delay", &i915_perf_noa_delay_fops},
{"i915_wedged", &i915_wedged_fops},
- {"i915_cache_sharing", &i915_cache_sharing_fops},
{"i915_gem_drop_caches", &i915_drop_caches_fops},
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
{"i915_error_state", &i915_error_state_fops},
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 99eb0d7bbc44..8e9cb44e66e5 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -58,12 +58,14 @@
#include "display/intel_hotplug.h"
#include "display/intel_overlay.h"
#include "display/intel_pipe_crc.h"
+#include "display/intel_pps.h"
#include "display/intel_sprite.h"
#include "display/intel_vga.h"
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_ioctls.h"
#include "gem/i915_gem_mman.h"
+#include "gem/i915_gem_pm.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
#include "gt/intel_rc6.h"
@@ -410,6 +412,7 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
/* Try to make sure MCHBAR is enabled before poking at it */
intel_setup_mchbar(dev_priv);
+ intel_device_info_runtime_init(dev_priv);
ret = intel_gt_init_mmio(&dev_priv->gt);
if (ret)
@@ -516,8 +519,6 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
if (i915_inject_probe_failure(dev_priv))
return -ENODEV;
- intel_device_info_runtime_init(dev_priv);
-
if (HAS_PPGTT(dev_priv)) {
if (intel_vgpu_active(dev_priv) &&
!intel_vgpu_has_full_ppgtt(dev_priv)) {
@@ -609,14 +610,15 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
goto err_msi;
intel_opregion_setup(dev_priv);
+
+ intel_pcode_init(dev_priv);
+
/*
- * Fill the dram structure to get the system raw bandwidth and
- * dram info. This will be used for memory latency calculation.
+ * Fill the dram structure to get the system dram info. This will be
+ * used for memory latency calculation.
*/
intel_dram_detect(dev_priv);
- intel_pcode_init(dev_priv);
-
intel_bw_init_hw(dev_priv);
return 0;
@@ -733,6 +735,7 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
* events.
*/
drm_kms_helper_poll_fini(&dev_priv->drm);
+ drm_atomic_helper_shutdown(&dev_priv->drm);
intel_gt_driver_unregister(&dev_priv->gt);
acpi_video_unregister();
@@ -935,8 +938,6 @@ void i915_driver_remove(struct drm_i915_private *i915)
i915_gem_suspend(i915);
- drm_atomic_helper_shutdown(&i915->drm);
-
intel_gvt_driver_remove(i915);
intel_modeset_driver_remove(i915);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 632c713227dc..26d69d06aa6d 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -79,9 +79,9 @@
#include "gem/i915_gem_shrinker.h"
#include "gem/i915_gem_stolen.h"
-#include "gt/intel_lrc.h"
#include "gt/intel_engine.h"
#include "gt/intel_gt_types.h"
+#include "gt/intel_region_lmem.h"
#include "gt/intel_workarounds.h"
#include "gt/uc/intel_uc.h"
@@ -103,7 +103,6 @@
#include "i915_vma.h"
#include "i915_irq.h"
-#include "intel_region_lmem.h"
/* General customization:
*/
@@ -416,6 +415,7 @@ struct intel_fbc {
u16 gen9_wa_cfb_stride;
u16 interval;
s8 fence_id;
+ bool psr2_active;
} state_cache;
/*
@@ -1122,23 +1122,11 @@ struct drm_i915_private {
* crtc_state->wm.need_postvbl_update.
*/
struct mutex wm_mutex;
-
- /*
- * Set during HW readout of watermarks/DDB. Some platforms
- * need to know when we're still using BIOS-provided values
- * (which we don't fully trust).
- *
- * FIXME get rid of this.
- */
- bool distrust_bios_wm;
} wm;
struct dram_info {
- bool valid;
- bool is_16gb_dimm;
+ bool wm_lv_0_adjust_needed;
u8 num_channels;
- u8 ranks;
- u32 bandwidth_kbps;
bool symmetric_memory;
enum intel_dram_type {
INTEL_DRAM_UNKNOWN,
@@ -1147,6 +1135,7 @@ struct drm_i915_private {
INTEL_DRAM_LPDDR3,
INTEL_DRAM_LPDDR4
} type;
+ u8 num_qgv_points;
} dram_info;
struct intel_bw_info {
@@ -1169,9 +1158,6 @@ struct drm_i915_private {
struct i915_gem_contexts {
spinlock_t lock; /* locks list */
struct list_head list;
-
- struct llist_head free_list;
- struct work_struct free_work;
} contexts;
/*
@@ -1185,6 +1171,8 @@ struct drm_i915_private {
struct file *mmap_singleton;
} gem;
+ u8 framestart_delay;
+
u8 pch_ssc_use;
/* For i915gm/i945gm vblank irq workaround */
@@ -1346,7 +1334,7 @@ intel_subplatform(const struct intel_runtime_info *info, enum intel_platform p)
{
const unsigned int pi = __platform_mask_index(info, p);
- return info->platform_mask[pi] & INTEL_SUBPLATFORM_BITS;
+ return info->platform_mask[pi] & ((1 << INTEL_SUBPLATFORM_BITS) - 1);
}
static __always_inline bool
@@ -1569,16 +1557,30 @@ enum {
TGL_REVID_D0,
};
-extern const struct i915_rev_steppings tgl_uy_revids[];
-extern const struct i915_rev_steppings tgl_revids[];
+#define TGL_UY_REVIDS_SIZE 4
+#define TGL_REVIDS_SIZE 2
+
+extern const struct i915_rev_steppings tgl_uy_revids[TGL_UY_REVIDS_SIZE];
+extern const struct i915_rev_steppings tgl_revids[TGL_REVIDS_SIZE];
static inline const struct i915_rev_steppings *
tgl_revids_get(struct drm_i915_private *dev_priv)
{
- if (IS_TGL_U(dev_priv) || IS_TGL_Y(dev_priv))
- return &tgl_uy_revids[INTEL_REVID(dev_priv)];
- else
- return &tgl_revids[INTEL_REVID(dev_priv)];
+ u8 revid = INTEL_REVID(dev_priv);
+ u8 size;
+ const struct i915_rev_steppings *tgl_revid_tbl;
+
+ if (IS_TGL_U(dev_priv) || IS_TGL_Y(dev_priv)) {
+ tgl_revid_tbl = tgl_uy_revids;
+ size = ARRAY_SIZE(tgl_uy_revids);
+ } else {
+ tgl_revid_tbl = tgl_revids;
+ size = ARRAY_SIZE(tgl_revids);
+ }
+
+ revid = min_t(u8, revid, size - 1);
+
+ return &tgl_revid_tbl[revid];
}
#define IS_TGL_DISP_REVID(p, since, until) \
@@ -1588,14 +1590,14 @@ tgl_revids_get(struct drm_i915_private *dev_priv)
#define IS_TGL_UY_GT_REVID(p, since, until) \
((IS_TGL_U(p) || IS_TGL_Y(p)) && \
- tgl_uy_revids[INTEL_REVID(p)].gt_stepping >= (since) && \
- tgl_uy_revids[INTEL_REVID(p)].gt_stepping <= (until))
+ tgl_revids_get(p)->gt_stepping >= (since) && \
+ tgl_revids_get(p)->gt_stepping <= (until))
#define IS_TGL_GT_REVID(p, since, until) \
(IS_TIGERLAKE(p) && \
!(IS_TGL_U(p) || IS_TGL_Y(p)) && \
- tgl_revids[INTEL_REVID(p)].gt_stepping >= (since) && \
- tgl_revids[INTEL_REVID(p)].gt_stepping <= (until))
+ tgl_revids_get(p)->gt_stepping >= (since) && \
+ tgl_revids_get(p)->gt_stepping <= (until))
#define RKL_REVID_A0 0x0
#define RKL_REVID_B0 0x1
@@ -1646,8 +1648,6 @@ tgl_revids_get(struct drm_i915_private *dev_priv)
(INTEL_INFO(dev_priv)->has_logical_ring_contexts)
#define HAS_LOGICAL_RING_ELSQ(dev_priv) \
(INTEL_INFO(dev_priv)->has_logical_ring_elsq)
-#define HAS_LOGICAL_RING_PREEMPTION(dev_priv) \
- (INTEL_INFO(dev_priv)->has_logical_ring_preemption)
#define HAS_MASTER_UNIT_IRQ(dev_priv) (INTEL_INFO(dev_priv)->has_master_unit_irq)
@@ -1749,10 +1749,17 @@ tgl_revids_get(struct drm_i915_private *dev_priv)
#define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->pipe_mask != 0)
+#define HAS_VRR(i915) (INTEL_GEN(i915) >= 12)
+
/* Only valid when HAS_DISPLAY() is true */
#define INTEL_DISPLAY_ENABLED(dev_priv) \
(drm_WARN_ON(&(dev_priv)->drm, !HAS_DISPLAY(dev_priv)), !(dev_priv)->params.disable_display)
+static inline bool run_as_guest(void)
+{
+ return !hypervisor_is_type(X86_HYPER_NATIVE);
+}
+
static inline bool intel_vtd_active(void)
{
#ifdef CONFIG_INTEL_IOMMU
@@ -1761,7 +1768,7 @@ static inline bool intel_vtd_active(void)
#endif
/* Running as a guest, we assume the host is enforcing VT'd */
- return !hypervisor_is_type(X86_HYPER_NATIVE);
+ return run_as_guest();
}
static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv)
@@ -1793,8 +1800,6 @@ int i915_gem_init_userptr(struct drm_i915_private *dev_priv);
void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv);
void i915_gem_init_early(struct drm_i915_private *dev_priv);
void i915_gem_cleanup_early(struct drm_i915_private *dev_priv);
-int i915_gem_freeze(struct drm_i915_private *dev_priv);
-int i915_gem_freeze_late(struct drm_i915_private *dev_priv);
struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915);
@@ -1967,43 +1972,6 @@ mkwrite_device_info(struct drm_i915_private *dev_priv)
int i915_reg_read_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
-#define __I915_REG_OP(op__, dev_priv__, ...) \
- intel_uncore_##op__(&(dev_priv__)->uncore, __VA_ARGS__)
-
-#define I915_READ(reg__) __I915_REG_OP(read, dev_priv, (reg__))
-#define I915_WRITE(reg__, val__) __I915_REG_OP(write, dev_priv, (reg__), (val__))
-
-#define POSTING_READ(reg__) __I915_REG_OP(posting_read, dev_priv, (reg__))
-
-/* These are untraced mmio-accessors that are only valid to be used inside
- * critical sections, such as inside IRQ handlers, where forcewake is explicitly
- * controlled.
- *
- * Think twice, and think again, before using these.
- *
- * As an example, these accessors can possibly be used between:
- *
- * spin_lock_irq(&dev_priv->uncore.lock);
- * intel_uncore_forcewake_get__locked();
- *
- * and
- *
- * intel_uncore_forcewake_put__locked();
- * spin_unlock_irq(&dev_priv->uncore.lock);
- *
- *
- * Note: some registers may not need forcewake held, so
- * intel_uncore_forcewake_{get,put} can be omitted, see
- * intel_uncore_forcewake_for_reg().
- *
- * Certain architectures will die if the same cacheline is concurrently accessed
- * by different clients (e.g. on Ivybridge). Access to registers should
- * therefore generally be serialised, by either the dev_priv->uncore.lock or
- * a more localised lock guarding all access to that bank of registers.
- */
-#define I915_READ_FW(reg__) __I915_REG_OP(read_fw, dev_priv, (reg__))
-#define I915_WRITE_FW(reg__, val__) __I915_REG_OP(write_fw, dev_priv, (reg__), (val__))
-
/* i915_mm.c */
int remap_io_mapping(struct vm_area_struct *vma,
unsigned long addr, unsigned long pfn, unsigned long size,
@@ -2026,16 +1994,4 @@ i915_coherent_map_type(struct drm_i915_private *i915)
return HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC;
}
-static inline u64 i915_cs_timestamp_ns_to_ticks(struct drm_i915_private *i915, u64 val)
-{
- return DIV_ROUND_UP_ULL(val * RUNTIME_INFO(i915)->cs_timestamp_frequency_hz,
- 1000000000);
-}
-
-static inline u64 i915_cs_timestamp_ticks_to_ns(struct drm_i915_private *i915, u64 val)
-{
- return div_u64(val * 1000000000,
- RUNTIME_INFO(i915)->cs_timestamp_frequency_hz);
-}
-
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 58276694c848..aa4490934469 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -180,108 +180,6 @@ try_again:
}
static int
-i915_gem_create(struct drm_file *file,
- struct intel_memory_region *mr,
- u64 *size_p,
- u32 *handle_p)
-{
- struct drm_i915_gem_object *obj;
- u32 handle;
- u64 size;
- int ret;
-
- GEM_BUG_ON(!is_power_of_2(mr->min_page_size));
- size = round_up(*size_p, mr->min_page_size);
- if (size == 0)
- return -EINVAL;
-
- /* For most of the ABI (e.g. mmap) we think in system pages */
- GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
-
- /* Allocate the new object */
- obj = i915_gem_object_create_region(mr, size, 0);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
-
- ret = drm_gem_handle_create(file, &obj->base, &handle);
- /* drop reference from allocate - handle holds it now */
- i915_gem_object_put(obj);
- if (ret)
- return ret;
-
- *handle_p = handle;
- *size_p = size;
- return 0;
-}
-
-int
-i915_gem_dumb_create(struct drm_file *file,
- struct drm_device *dev,
- struct drm_mode_create_dumb *args)
-{
- enum intel_memory_type mem_type;
- int cpp = DIV_ROUND_UP(args->bpp, 8);
- u32 format;
-
- switch (cpp) {
- case 1:
- format = DRM_FORMAT_C8;
- break;
- case 2:
- format = DRM_FORMAT_RGB565;
- break;
- case 4:
- format = DRM_FORMAT_XRGB8888;
- break;
- default:
- return -EINVAL;
- }
-
- /* have to work out size/pitch and return them */
- args->pitch = ALIGN(args->width * cpp, 64);
-
- /* align stride to page size so that we can remap */
- if (args->pitch > intel_plane_fb_max_stride(to_i915(dev), format,
- DRM_FORMAT_MOD_LINEAR))
- args->pitch = ALIGN(args->pitch, 4096);
-
- if (args->pitch < args->width)
- return -EINVAL;
-
- args->size = mul_u32_u32(args->pitch, args->height);
-
- mem_type = INTEL_MEMORY_SYSTEM;
- if (HAS_LMEM(to_i915(dev)))
- mem_type = INTEL_MEMORY_LOCAL;
-
- return i915_gem_create(file,
- intel_memory_region_by_type(to_i915(dev),
- mem_type),
- &args->size, &args->handle);
-}
-
-/**
- * Creates a new mm object and returns a handle to it.
- * @dev: drm device pointer
- * @data: ioctl data blob
- * @file: drm file pointer
- */
-int
-i915_gem_create_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
-{
- struct drm_i915_private *i915 = to_i915(dev);
- struct drm_i915_gem_create *args = data;
-
- i915_gem_flush_free_objects(i915);
-
- return i915_gem_create(file,
- intel_memory_region_by_type(i915,
- INTEL_MEMORY_SYSTEM),
- &args->size, &args->handle);
-}
-
-static int
shmem_pread(struct page *page, int offset, int len, char __user *user_data,
bool needs_clflush)
{
@@ -1059,14 +957,14 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
i915_gem_object_is_tiled(obj) &&
i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
if (obj->mm.madv == I915_MADV_WILLNEED) {
- GEM_BUG_ON(!obj->mm.quirked);
- __i915_gem_object_unpin_pages(obj);
- obj->mm.quirked = false;
+ GEM_BUG_ON(!i915_gem_object_has_tiling_quirk(obj));
+ i915_gem_object_clear_tiling_quirk(obj);
+ i915_gem_object_make_shrinkable(obj);
}
if (args->madv == I915_MADV_WILLNEED) {
- GEM_BUG_ON(obj->mm.quirked);
- __i915_gem_object_pin_pages(obj);
- obj->mm.quirked = true;
+ GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
+ i915_gem_object_make_unshrinkable(obj);
+ i915_gem_object_set_tiling_quirk(obj);
}
}
@@ -1207,8 +1105,6 @@ void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
void i915_gem_driver_release(struct drm_i915_private *dev_priv)
{
- i915_gem_driver_release__contexts(dev_priv);
-
intel_gt_driver_release(&dev_priv->gt);
intel_wa_list_free(&dev_priv->gt_wa_list);
@@ -1249,53 +1145,6 @@ void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
drm_WARN_ON(&dev_priv->drm, dev_priv->mm.shrink_count);
}
-int i915_gem_freeze(struct drm_i915_private *dev_priv)
-{
- /* Discard all purgeable objects, let userspace recover those as
- * required after resuming.
- */
- i915_gem_shrink_all(dev_priv);
-
- return 0;
-}
-
-int i915_gem_freeze_late(struct drm_i915_private *i915)
-{
- struct drm_i915_gem_object *obj;
- intel_wakeref_t wakeref;
-
- /*
- * Called just before we write the hibernation image.
- *
- * We need to update the domain tracking to reflect that the CPU
- * will be accessing all the pages to create and restore from the
- * hibernation, and so upon restoration those pages will be in the
- * CPU domain.
- *
- * To make sure the hibernation image contains the latest state,
- * we update that state just before writing out the image.
- *
- * To try and reduce the hibernation image, we manually shrink
- * the objects as well, see i915_gem_freeze()
- */
-
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
- i915_gem_shrink(i915, -1UL, NULL, ~0);
- i915_gem_drain_freed_objects(i915);
-
- list_for_each_entry(obj, &i915->mm.shrink_list, mm.link) {
- i915_gem_object_lock(obj, NULL);
- drm_WARN_ON(&i915->drm,
- i915_gem_object_set_to_cpu_domain(obj, true));
- i915_gem_object_unlock(obj);
- }
-
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
-
- return 0;
-}
-
int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
{
struct drm_i915_file_private *file_priv;
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index a4cad3f154ca..e622aee6e4be 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -38,11 +38,18 @@ struct drm_i915_private;
#define GEM_SHOW_DEBUG() drm_debug_enabled(DRM_UT_DRIVER)
+#ifdef CONFIG_DRM_I915_DEBUG_GEM_ONCE
+#define __GEM_BUG(cond) BUG()
+#else
+#define __GEM_BUG(cond) \
+ WARN(1, "%s:%d GEM_BUG_ON(%s)\n", __func__, __LINE__, __stringify(cond))
+#endif
+
#define GEM_BUG_ON(condition) do { if (unlikely((condition))) { \
GEM_TRACE_ERR("%s:%d GEM_BUG_ON(%s)\n", \
__func__, __LINE__, __stringify(condition)); \
GEM_TRACE_DUMP(); \
- BUG(); \
+ __GEM_BUG(condition); \
} \
} while(0)
#define GEM_WARN_ON(expr) WARN_ON(expr)
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index e1a66c8245b8..4d2d59a9942b 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -61,6 +61,17 @@ mark_free(struct drm_mm_scan *scan,
return drm_mm_scan_add_block(scan, &vma->node);
}
+static bool defer_evict(struct i915_vma *vma)
+{
+ if (i915_vma_is_active(vma))
+ return true;
+
+ if (i915_vma_is_scanout(vma))
+ return true;
+
+ return false;
+}
+
/**
* i915_gem_evict_something - Evict vmas to make room for binding a new one
* @vm: address space to evict from
@@ -150,7 +161,7 @@ search_again:
* To notice when we complete one full cycle, we record the
* first active element seen, before moving it to the tail.
*/
- if (active != ERR_PTR(-EAGAIN) && i915_vma_is_active(vma)) {
+ if (active != ERR_PTR(-EAGAIN) && defer_evict(vma)) {
if (!active)
active = vma;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index c5ee1567f3d1..3ee2f682eff6 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -55,22 +55,17 @@ int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
- struct device *kdev = &dev_priv->drm.pdev->dev;
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
-
- if (unlikely(ggtt->do_idle_maps)) {
- /* XXX This does not prevent more requests being submitted! */
- if (intel_gt_retire_requests_timeout(ggtt->vm.gt,
- -MAX_SCHEDULE_TIMEOUT)) {
- drm_err(&dev_priv->drm,
- "Failed to wait for idle; VT'd may hang.\n");
- /* Wait a bit, in hopes it avoids the hang */
- udelay(10);
- }
- }
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct i915_ggtt *ggtt = &i915->ggtt;
+
+ /* XXX This does not prevent more requests being submitted! */
+ if (unlikely(ggtt->do_idle_maps))
+ /* Wait a bit, in the hope it avoids the hang */
+ usleep_range(100, 250);
- dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_sg(&i915->drm.pdev->dev,
+ pages->sgl, pages->nents,
+ PCI_DMA_BIDIRECTIONAL);
}
/**
diff --git a/drivers/gpu/drm/i915/i915_getparam.c b/drivers/gpu/drm/i915/i915_getparam.c
index f96032c60a12..75c3bfc2486e 100644
--- a/drivers/gpu/drm/i915/i915_getparam.c
+++ b/drivers/gpu/drm/i915/i915_getparam.c
@@ -154,7 +154,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
return -ENODEV;
break;
case I915_PARAM_CS_TIMESTAMP_FREQUENCY:
- value = RUNTIME_INFO(i915)->cs_timestamp_frequency_hz;
+ value = i915->gt.clock_frequency;
break;
case I915_PARAM_MMAP_GTT_COHERENT:
value = INTEL_INFO(i915)->has_coherent_ggtt;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index d8cac4c5881f..f962693404b7 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -485,7 +485,7 @@ static void error_print_context(struct drm_i915_error_state_buf *m,
const char *header,
const struct i915_gem_context_coredump *ctx)
{
- const u32 period = RUNTIME_INFO(m->i915)->cs_timestamp_period_ns;
+ const u32 period = m->i915->gt.clock_period_ns;
err_printf(m, "%s%s[%d] prio %d, guilty %d active %d, runtime total %lluns, avg %lluns\n",
header, ctx->comm, ctx->pid, ctx->sched_attr.priority,
@@ -1051,7 +1051,9 @@ i915_vma_coredump_create(const struct intel_gt *gt,
for_each_sgt_daddr(dma, iter, vma->pages) {
void __iomem *s;
- s = io_mapping_map_wc(&mem->iomap, dma, PAGE_SIZE);
+ s = io_mapping_map_wc(&mem->iomap,
+ dma - mem->region.start,
+ PAGE_SIZE);
ret = compress_page(compress,
(void __force *)s, dst,
true);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 6cdb052e3850..1a701367a718 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -327,10 +327,10 @@ i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
lockdep_assert_held(&dev_priv->irq_lock);
drm_WARN_ON(&dev_priv->drm, bits & ~mask);
- val = I915_READ(PORT_HOTPLUG_EN);
+ val = intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_EN);
val &= ~mask;
val |= bits;
- I915_WRITE(PORT_HOTPLUG_EN, val);
+ intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_EN, val);
}
/**
@@ -376,8 +376,8 @@ void ilk_update_display_irq(struct drm_i915_private *dev_priv,
if (new_val != dev_priv->irq_mask &&
!drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) {
dev_priv->irq_mask = new_val;
- I915_WRITE(DEIMR, dev_priv->irq_mask);
- POSTING_READ(DEIMR);
+ intel_uncore_write(&dev_priv->uncore, DEIMR, dev_priv->irq_mask);
+ intel_uncore_posting_read(&dev_priv->uncore, DEIMR);
}
}
@@ -401,15 +401,15 @@ static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
return;
- old_val = I915_READ(GEN8_DE_PORT_IMR);
+ old_val = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IMR);
new_val = old_val;
new_val &= ~interrupt_mask;
new_val |= (~enabled_irq_mask & interrupt_mask);
if (new_val != old_val) {
- I915_WRITE(GEN8_DE_PORT_IMR, new_val);
- POSTING_READ(GEN8_DE_PORT_IMR);
+ intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IMR, new_val);
+ intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PORT_IMR);
}
}
@@ -440,8 +440,8 @@ void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
if (new_val != dev_priv->de_irq_mask[pipe]) {
dev_priv->de_irq_mask[pipe] = new_val;
- I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
- POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
+ intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
+ intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe));
}
}
@@ -455,7 +455,7 @@ void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
u32 interrupt_mask,
u32 enabled_irq_mask)
{
- u32 sdeimr = I915_READ(SDEIMR);
+ u32 sdeimr = intel_uncore_read(&dev_priv->uncore, SDEIMR);
sdeimr &= ~interrupt_mask;
sdeimr |= (~enabled_irq_mask & interrupt_mask);
@@ -466,8 +466,8 @@ void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
return;
- I915_WRITE(SDEIMR, sdeimr);
- POSTING_READ(SDEIMR);
+ intel_uncore_write(&dev_priv->uncore, SDEIMR, sdeimr);
+ intel_uncore_posting_read(&dev_priv->uncore, SDEIMR);
}
u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
@@ -533,8 +533,8 @@ void i915_enable_pipestat(struct drm_i915_private *dev_priv,
dev_priv->pipestat_irq_mask[pipe] |= status_mask;
enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
- I915_WRITE(reg, enable_mask | status_mask);
- POSTING_READ(reg);
+ intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask);
+ intel_uncore_posting_read(&dev_priv->uncore, reg);
}
void i915_disable_pipestat(struct drm_i915_private *dev_priv,
@@ -556,8 +556,8 @@ void i915_disable_pipestat(struct drm_i915_private *dev_priv,
dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
- I915_WRITE(reg, enable_mask | status_mask);
- POSTING_READ(reg);
+ intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask);
+ intel_uncore_posting_read(&dev_priv->uncore, reg);
}
static bool i915_has_asle(struct drm_i915_private *dev_priv)
@@ -715,28 +715,18 @@ u32 g4x_get_vblank_counter(struct drm_crtc *crtc)
if (!vblank->max_vblank_count)
return 0;
- return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
+ return intel_uncore_read(&dev_priv->uncore, PIPE_FRMCOUNT_G4X(pipe));
}
-/*
- * On certain encoders on certain platforms, pipe
- * scanline register will not work to get the scanline,
- * since the timings are driven from the PORT or issues
- * with scanline register updates.
- * This function will use Framestamp and current
- * timestamp registers to calculate the scanline.
- */
-static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
+static u32 intel_crtc_scanlines_since_frame_timestamp(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct drm_vblank_crtc *vblank =
&crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
const struct drm_display_mode *mode = &vblank->hwmode;
- u32 vblank_start = mode->crtc_vblank_start;
- u32 vtotal = mode->crtc_vtotal;
u32 htotal = mode->crtc_htotal;
u32 clock = mode->crtc_clock;
- u32 scanline, scan_prev_time, scan_curr_time, scan_post_time;
+ u32 scan_prev_time, scan_curr_time, scan_post_time;
/*
* To avoid the race condition where we might cross into the
@@ -763,8 +753,28 @@ static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
PIPE_FRMTMSTMP(crtc->pipe));
} while (scan_post_time != scan_prev_time);
- scanline = div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
- clock), 1000 * htotal);
+ return div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
+ clock), 1000 * htotal);
+}
+
+/*
+ * On certain encoders on certain platforms, pipe
+ * scanline register will not work to get the scanline,
+ * since the timings are driven from the PORT or issues
+ * with scanline register updates.
+ * This function will use Framestamp and current
+ * timestamp registers to calculate the scanline.
+ */
+static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
+{
+ struct drm_vblank_crtc *vblank =
+ &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
+ const struct drm_display_mode *mode = &vblank->hwmode;
+ u32 vblank_start = mode->crtc_vblank_start;
+ u32 vtotal = mode->crtc_vtotal;
+ u32 scanline;
+
+ scanline = intel_crtc_scanlines_since_frame_timestamp(crtc);
scanline = min(scanline, vtotal - 1);
scanline = (scanline + vblank_start) % vtotal;
@@ -883,7 +893,20 @@ static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
if (stime)
*stime = ktime_get();
- if (use_scanline_counter) {
+ if (crtc->mode_flags & I915_MODE_FLAG_VRR) {
+ int scanlines = intel_crtc_scanlines_since_frame_timestamp(crtc);
+
+ position = __intel_get_crtc_scanline(crtc);
+
+ /*
+ * Already exiting vblank? If so, shift our position
+ * so it looks like we're already apporaching the full
+ * vblank end. This should make the generated timestamp
+ * more or less match when the active portion will start.
+ */
+ if (position >= vbl_start && scanlines < position)
+ position = min(crtc->vmax_vblank_start + scanlines, vtotal - 1);
+ } else if (use_scanline_counter) {
/* No obvious pixelcount register. Only query vertical
* scanout position from Display scan line register.
*/
@@ -1004,9 +1027,9 @@ static void ivb_parity_work(struct work_struct *work)
if (drm_WARN_ON(&dev_priv->drm, !dev_priv->l3_parity.which_slice))
goto out;
- misccpctl = I915_READ(GEN7_MISCCPCTL);
- I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
- POSTING_READ(GEN7_MISCCPCTL);
+ misccpctl = intel_uncore_read(&dev_priv->uncore, GEN7_MISCCPCTL);
+ intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
+ intel_uncore_posting_read(&dev_priv->uncore, GEN7_MISCCPCTL);
while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
i915_reg_t reg;
@@ -1020,13 +1043,13 @@ static void ivb_parity_work(struct work_struct *work)
reg = GEN7_L3CDERRST1(slice);
- error_status = I915_READ(reg);
+ error_status = intel_uncore_read(&dev_priv->uncore, reg);
row = GEN7_PARITY_ERROR_ROW(error_status);
bank = GEN7_PARITY_ERROR_BANK(error_status);
subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
- I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
- POSTING_READ(reg);
+ intel_uncore_write(&dev_priv->uncore, reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
+ intel_uncore_posting_read(&dev_priv->uncore, reg);
parity_event[0] = I915_L3_PARITY_UEVENT "=1";
parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
@@ -1047,7 +1070,7 @@ static void ivb_parity_work(struct work_struct *work)
kfree(parity_event[1]);
}
- I915_WRITE(GEN7_MISCCPCTL, misccpctl);
+ intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl);
out:
drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice);
@@ -1062,17 +1085,12 @@ static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
{
switch (pin) {
case HPD_PORT_TC1:
- return val & GEN11_HOTPLUG_CTL_LONG_DETECT(HPD_PORT_TC1);
case HPD_PORT_TC2:
- return val & GEN11_HOTPLUG_CTL_LONG_DETECT(HPD_PORT_TC2);
case HPD_PORT_TC3:
- return val & GEN11_HOTPLUG_CTL_LONG_DETECT(HPD_PORT_TC3);
case HPD_PORT_TC4:
- return val & GEN11_HOTPLUG_CTL_LONG_DETECT(HPD_PORT_TC4);
case HPD_PORT_TC5:
- return val & GEN11_HOTPLUG_CTL_LONG_DETECT(HPD_PORT_TC5);
case HPD_PORT_TC6:
- return val & GEN11_HOTPLUG_CTL_LONG_DETECT(HPD_PORT_TC6);
+ return val & GEN11_HOTPLUG_CTL_LONG_DETECT(pin);
default:
return false;
}
@@ -1096,13 +1114,10 @@ static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
{
switch (pin) {
case HPD_PORT_A:
- return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(HPD_PORT_A);
case HPD_PORT_B:
- return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(HPD_PORT_B);
case HPD_PORT_C:
- return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(HPD_PORT_C);
case HPD_PORT_D:
- return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(HPD_PORT_D);
+ return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(pin);
default:
return false;
}
@@ -1112,17 +1127,12 @@ static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
{
switch (pin) {
case HPD_PORT_TC1:
- return val & ICP_TC_HPD_LONG_DETECT(HPD_PORT_TC1);
case HPD_PORT_TC2:
- return val & ICP_TC_HPD_LONG_DETECT(HPD_PORT_TC2);
case HPD_PORT_TC3:
- return val & ICP_TC_HPD_LONG_DETECT(HPD_PORT_TC3);
case HPD_PORT_TC4:
- return val & ICP_TC_HPD_LONG_DETECT(HPD_PORT_TC4);
case HPD_PORT_TC5:
- return val & ICP_TC_HPD_LONG_DETECT(HPD_PORT_TC5);
case HPD_PORT_TC6:
- return val & ICP_TC_HPD_LONG_DETECT(HPD_PORT_TC6);
+ return val & ICP_TC_HPD_LONG_DETECT(pin);
default:
return false;
}
@@ -1337,7 +1347,7 @@ static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
display_pipe_crc_irq_handler(dev_priv, pipe,
- I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
+ intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)),
0, 0, 0, 0);
}
@@ -1345,11 +1355,11 @@ static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
display_pipe_crc_irq_handler(dev_priv, pipe,
- I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
- I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
- I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
- I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
- I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
+ intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)),
+ intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_2_IVB(pipe)),
+ intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_3_IVB(pipe)),
+ intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_4_IVB(pipe)),
+ intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_5_IVB(pipe)));
}
static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
@@ -1358,19 +1368,19 @@ static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
u32 res1, res2;
if (INTEL_GEN(dev_priv) >= 3)
- res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
+ res1 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES1_I915(pipe));
else
res1 = 0;
if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
- res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
+ res2 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES2_G4X(pipe));
else
res2 = 0;
display_pipe_crc_irq_handler(dev_priv, pipe,
- I915_READ(PIPE_CRC_RES_RED(pipe)),
- I915_READ(PIPE_CRC_RES_GREEN(pipe)),
- I915_READ(PIPE_CRC_RES_BLUE(pipe)),
+ intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RED(pipe)),
+ intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_GREEN(pipe)),
+ intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_BLUE(pipe)),
res1, res2);
}
@@ -1379,7 +1389,7 @@ static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
enum pipe pipe;
for_each_pipe(dev_priv, pipe) {
- I915_WRITE(PIPESTAT(pipe),
+ intel_uncore_write(&dev_priv->uncore, PIPESTAT(pipe),
PIPESTAT_INT_STATUS_MASK |
PIPE_FIFO_UNDERRUN_STATUS);
@@ -1433,7 +1443,7 @@ static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
continue;
reg = PIPESTAT(pipe);
- pipe_stats[pipe] = I915_READ(reg) & status_mask;
+ pipe_stats[pipe] = intel_uncore_read(&dev_priv->uncore, reg) & status_mask;
enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
/*
@@ -1446,8 +1456,8 @@ static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
* an interrupt is still pending.
*/
if (pipe_stats[pipe]) {
- I915_WRITE(reg, pipe_stats[pipe]);
- I915_WRITE(reg, enable_mask);
+ intel_uncore_write(&dev_priv->uncore, reg, pipe_stats[pipe]);
+ intel_uncore_write(&dev_priv->uncore, reg, enable_mask);
}
}
spin_unlock(&dev_priv->irq_lock);
@@ -1530,6 +1540,9 @@ static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
intel_handle_vblank(dev_priv, pipe);
+ if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV)
+ flip_done_handler(dev_priv, pipe);
+
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
i9xx_pipe_crc_irq_handler(dev_priv, pipe);
@@ -1563,18 +1576,18 @@ static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
* bits can itself generate a new hotplug interrupt :(
*/
for (i = 0; i < 10; i++) {
- u32 tmp = I915_READ(PORT_HOTPLUG_STAT) & hotplug_status_mask;
+ u32 tmp = intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT) & hotplug_status_mask;
if (tmp == 0)
return hotplug_status;
hotplug_status |= tmp;
- I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
+ intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, hotplug_status);
}
drm_WARN_ONCE(&dev_priv->drm, 1,
"PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
- I915_READ(PORT_HOTPLUG_STAT));
+ intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
return hotplug_status;
}
@@ -1623,9 +1636,9 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
u32 hotplug_status = 0;
u32 ier = 0;
- gt_iir = I915_READ(GTIIR);
- pm_iir = I915_READ(GEN6_PMIIR);
- iir = I915_READ(VLV_IIR);
+ gt_iir = intel_uncore_read(&dev_priv->uncore, GTIIR);
+ pm_iir = intel_uncore_read(&dev_priv->uncore, GEN6_PMIIR);
+ iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR);
if (gt_iir == 0 && pm_iir == 0 && iir == 0)
break;
@@ -1645,14 +1658,14 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
* don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
* bits this time around.
*/
- I915_WRITE(VLV_MASTER_IER, 0);
- ier = I915_READ(VLV_IER);
- I915_WRITE(VLV_IER, 0);
+ intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
+ ier = intel_uncore_read(&dev_priv->uncore, VLV_IER);
+ intel_uncore_write(&dev_priv->uncore, VLV_IER, 0);
if (gt_iir)
- I915_WRITE(GTIIR, gt_iir);
+ intel_uncore_write(&dev_priv->uncore, GTIIR, gt_iir);
if (pm_iir)
- I915_WRITE(GEN6_PMIIR, pm_iir);
+ intel_uncore_write(&dev_priv->uncore, GEN6_PMIIR, pm_iir);
if (iir & I915_DISPLAY_PORT_INTERRUPT)
hotplug_status = i9xx_hpd_irq_ack(dev_priv);
@@ -1670,10 +1683,10 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
* from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
*/
if (iir)
- I915_WRITE(VLV_IIR, iir);
+ intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir);
- I915_WRITE(VLV_IER, ier);
- I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
+ intel_uncore_write(&dev_priv->uncore, VLV_IER, ier);
+ intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
if (gt_iir)
gen6_gt_irq_handler(&dev_priv->gt, gt_iir);
@@ -1710,8 +1723,8 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
u32 hotplug_status = 0;
u32 ier = 0;
- master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
- iir = I915_READ(VLV_IIR);
+ master_ctl = intel_uncore_read(&dev_priv->uncore, GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
+ iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR);
if (master_ctl == 0 && iir == 0)
break;
@@ -1731,9 +1744,9 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
* don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
* bits this time around.
*/
- I915_WRITE(GEN8_MASTER_IRQ, 0);
- ier = I915_READ(VLV_IER);
- I915_WRITE(VLV_IER, 0);
+ intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, 0);
+ ier = intel_uncore_read(&dev_priv->uncore, VLV_IER);
+ intel_uncore_write(&dev_priv->uncore, VLV_IER, 0);
gen8_gt_irq_handler(&dev_priv->gt, master_ctl);
@@ -1754,10 +1767,10 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
* from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
*/
if (iir)
- I915_WRITE(VLV_IIR, iir);
+ intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir);
- I915_WRITE(VLV_IER, ier);
- I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
+ intel_uncore_write(&dev_priv->uncore, VLV_IER, ier);
+ intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
if (hotplug_status)
i9xx_hpd_irq_handler(dev_priv, hotplug_status);
@@ -1783,7 +1796,7 @@ static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
* zero. Not acking leads to "The master control interrupt lied (SDE)!"
* errors.
*/
- dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
+ dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
if (!hotplug_trigger) {
u32 mask = PORTA_HOTPLUG_STATUS_MASK |
PORTD_HOTPLUG_STATUS_MASK |
@@ -1792,7 +1805,7 @@ static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
dig_hotplug_reg &= ~mask;
}
- I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
+ intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg);
if (!hotplug_trigger)
return;
@@ -1837,7 +1850,7 @@ static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
for_each_pipe(dev_priv, pipe)
drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n",
pipe_name(pipe),
- I915_READ(FDI_RX_IIR(pipe)));
+ intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe)));
}
if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
@@ -1856,7 +1869,7 @@ static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
{
- u32 err_int = I915_READ(GEN7_ERR_INT);
+ u32 err_int = intel_uncore_read(&dev_priv->uncore, GEN7_ERR_INT);
enum pipe pipe;
if (err_int & ERR_INT_POISON)
@@ -1874,12 +1887,12 @@ static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
}
}
- I915_WRITE(GEN7_ERR_INT, err_int);
+ intel_uncore_write(&dev_priv->uncore, GEN7_ERR_INT, err_int);
}
static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
{
- u32 serr_int = I915_READ(SERR_INT);
+ u32 serr_int = intel_uncore_read(&dev_priv->uncore, SERR_INT);
enum pipe pipe;
if (serr_int & SERR_INT_POISON)
@@ -1889,7 +1902,7 @@ static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
- I915_WRITE(SERR_INT, serr_int);
+ intel_uncore_write(&dev_priv->uncore, SERR_INT, serr_int);
}
static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
@@ -1922,7 +1935,7 @@ static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
for_each_pipe(dev_priv, pipe)
drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n",
pipe_name(pipe),
- I915_READ(FDI_RX_IIR(pipe)));
+ intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe)));
}
if (pch_iir & SDE_ERROR_CPT)
@@ -1938,8 +1951,8 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
if (ddi_hotplug_trigger) {
u32 dig_hotplug_reg;
- dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI);
- I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg);
+ dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_DDI);
+ intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_DDI, dig_hotplug_reg);
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
ddi_hotplug_trigger, dig_hotplug_reg,
@@ -1950,8 +1963,8 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
if (tc_hotplug_trigger) {
u32 dig_hotplug_reg;
- dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC);
- I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg);
+ dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_TC);
+ intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_TC, dig_hotplug_reg);
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
tc_hotplug_trigger, dig_hotplug_reg,
@@ -1976,8 +1989,8 @@ static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
if (hotplug_trigger) {
u32 dig_hotplug_reg;
- dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
- I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
+ dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
+ intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg);
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
hotplug_trigger, dig_hotplug_reg,
@@ -1988,8 +2001,8 @@ static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
if (hotplug2_trigger) {
u32 dig_hotplug_reg;
- dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
- I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
+ dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG2);
+ intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG2, dig_hotplug_reg);
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
hotplug2_trigger, dig_hotplug_reg,
@@ -2009,8 +2022,8 @@ static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
{
u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
- dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
- I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
+ dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL);
+ intel_uncore_write(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
hotplug_trigger, dig_hotplug_reg,
@@ -2042,6 +2055,9 @@ static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
if (de_iir & DE_PIPE_VBLANK(pipe))
intel_handle_vblank(dev_priv, pipe);
+ if (de_iir & DE_PLANE_FLIP_DONE(pipe))
+ flip_done_handler(dev_priv, pipe);
+
if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
@@ -2051,7 +2067,7 @@ static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
/* check event from PCH */
if (de_iir & DE_PCH_EVENT) {
- u32 pch_iir = I915_READ(SDEIIR);
+ u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
if (HAS_PCH_CPT(dev_priv))
cpt_irq_handler(dev_priv, pch_iir);
@@ -2059,7 +2075,7 @@ static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
ibx_irq_handler(dev_priv, pch_iir);
/* should clear PCH hotplug event before clear CPU irq */
- I915_WRITE(SDEIIR, pch_iir);
+ intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir);
}
if (IS_GEN(dev_priv, 5) && de_iir & DE_PCU_EVENT)
@@ -2079,10 +2095,10 @@ static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
ivb_err_int_handler(dev_priv);
if (de_iir & DE_EDP_PSR_INT_HSW) {
- u32 psr_iir = I915_READ(EDP_PSR_IIR);
+ u32 psr_iir = intel_uncore_read(&dev_priv->uncore, EDP_PSR_IIR);
intel_psr_irq_handler(dev_priv, psr_iir);
- I915_WRITE(EDP_PSR_IIR, psr_iir);
+ intel_uncore_write(&dev_priv->uncore, EDP_PSR_IIR, psr_iir);
}
if (de_iir & DE_AUX_CHANNEL_A_IVB)
@@ -2092,18 +2108,21 @@ static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
intel_opregion_asle_intr(dev_priv);
for_each_pipe(dev_priv, pipe) {
- if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
+ if (de_iir & DE_PIPE_VBLANK_IVB(pipe))
intel_handle_vblank(dev_priv, pipe);
+
+ if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe))
+ flip_done_handler(dev_priv, pipe);
}
/* check event from PCH */
if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
- u32 pch_iir = I915_READ(SDEIIR);
+ u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
cpt_irq_handler(dev_priv, pch_iir);
/* clear PCH hotplug event before clear CPU irq */
- I915_WRITE(SDEIIR, pch_iir);
+ intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir);
}
}
@@ -2190,8 +2209,8 @@ static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
{
u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
- dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
- I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
+ dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
+ intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg);
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
hotplug_trigger, dig_hotplug_reg,
@@ -2210,8 +2229,8 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
if (trigger_tc) {
u32 dig_hotplug_reg;
- dig_hotplug_reg = I915_READ(GEN11_TC_HOTPLUG_CTL);
- I915_WRITE(GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg);
+ dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL);
+ intel_uncore_write(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg);
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
trigger_tc, dig_hotplug_reg,
@@ -2222,8 +2241,8 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
if (trigger_tbt) {
u32 dig_hotplug_reg;
- dig_hotplug_reg = I915_READ(GEN11_TBT_HOTPLUG_CTL);
- I915_WRITE(GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg);
+ dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL);
+ intel_uncore_write(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg);
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
trigger_tbt, dig_hotplug_reg,
@@ -2300,8 +2319,8 @@ gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
else
iir_reg = EDP_PSR_IIR;
- psr_iir = I915_READ(iir_reg);
- I915_WRITE(iir_reg, psr_iir);
+ psr_iir = intel_uncore_read(&dev_priv->uncore, iir_reg);
+ intel_uncore_write(&dev_priv->uncore, iir_reg, psr_iir);
if (psr_iir)
found = true;
@@ -2325,7 +2344,7 @@ static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
* Incase of dual link, TE comes from DSI_1
* this is to check if dual link is enabled
*/
- val = I915_READ(TRANS_DDI_FUNC_CTL2(TRANSCODER_DSI_0));
+ val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL2(TRANSCODER_DSI_0));
val &= PORT_SYNC_MODE_ENABLE;
/*
@@ -2337,7 +2356,7 @@ static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
dsi_trans = (port == PORT_A) ? TRANSCODER_DSI_0 : TRANSCODER_DSI_1;
/* Check if DSI configured in command mode */
- val = I915_READ(DSI_TRANS_FUNC_CONF(dsi_trans));
+ val = intel_uncore_read(&dev_priv->uncore, DSI_TRANS_FUNC_CONF(dsi_trans));
val = val & OP_MODE_MASK;
if (val != CMD_MODE_NO_GATE && val != CMD_MODE_TE_GATE) {
@@ -2346,7 +2365,7 @@ static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
}
/* Get PIPE for handling VBLANK event */
- val = I915_READ(TRANS_DDI_FUNC_CTL(dsi_trans));
+ val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL(dsi_trans));
switch (val & TRANS_DDI_EDP_INPUT_MASK) {
case TRANS_DDI_EDP_INPUT_A_ON:
pipe = PIPE_A;
@@ -2366,8 +2385,16 @@ static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
/* clear TE in dsi IIR */
port = (te_trigger & DSI1_TE) ? PORT_B : PORT_A;
- tmp = I915_READ(DSI_INTR_IDENT_REG(port));
- I915_WRITE(DSI_INTR_IDENT_REG(port), tmp);
+ tmp = intel_uncore_read(&dev_priv->uncore, DSI_INTR_IDENT_REG(port));
+ intel_uncore_write(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), tmp);
+}
+
+static u32 gen8_de_pipe_flip_done_mask(struct drm_i915_private *i915)
+{
+ if (INTEL_GEN(i915) >= 9)
+ return GEN9_PIPE_PLANE1_FLIP_DONE;
+ else
+ return GEN8_PIPE_PRIMARY_FLIP_DONE;
}
static irqreturn_t
@@ -2378,9 +2405,9 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
enum pipe pipe;
if (master_ctl & GEN8_DE_MISC_IRQ) {
- iir = I915_READ(GEN8_DE_MISC_IIR);
+ iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_MISC_IIR);
if (iir) {
- I915_WRITE(GEN8_DE_MISC_IIR, iir);
+ intel_uncore_write(&dev_priv->uncore, GEN8_DE_MISC_IIR, iir);
ret = IRQ_HANDLED;
gen8_de_misc_irq_handler(dev_priv, iir);
} else {
@@ -2390,9 +2417,9 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
}
if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
- iir = I915_READ(GEN11_DE_HPD_IIR);
+ iir = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IIR);
if (iir) {
- I915_WRITE(GEN11_DE_HPD_IIR, iir);
+ intel_uncore_write(&dev_priv->uncore, GEN11_DE_HPD_IIR, iir);
ret = IRQ_HANDLED;
gen11_hpd_irq_handler(dev_priv, iir);
} else {
@@ -2402,11 +2429,11 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
}
if (master_ctl & GEN8_DE_PORT_IRQ) {
- iir = I915_READ(GEN8_DE_PORT_IIR);
+ iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IIR);
if (iir) {
bool found = false;
- I915_WRITE(GEN8_DE_PORT_IIR, iir);
+ intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IIR, iir);
ret = IRQ_HANDLED;
if (iir & gen8_de_port_aux_mask(dev_priv)) {
@@ -2459,7 +2486,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
continue;
- iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
+ iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe));
if (!iir) {
drm_err(&dev_priv->drm,
"The master control interrupt lied (DE PIPE)!\n");
@@ -2467,12 +2494,12 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
}
ret = IRQ_HANDLED;
- I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
+ intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe), iir);
if (iir & GEN8_PIPE_VBLANK)
intel_handle_vblank(dev_priv, pipe);
- if (iir & GEN9_PIPE_PLANE1_FLIP_DONE)
+ if (iir & gen8_de_pipe_flip_done_mask(dev_priv))
flip_done_handler(dev_priv, pipe);
if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
@@ -2496,9 +2523,9 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
* scheme also closed the SDE interrupt handling race we've seen
* on older pch-split platforms. But this needs testing.
*/
- iir = I915_READ(SDEIIR);
+ iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
if (iir) {
- I915_WRITE(SDEIIR, iir);
+ intel_uncore_write(&dev_priv->uncore, SDEIIR, iir);
ret = IRQ_HANDLED;
if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
@@ -2741,7 +2768,7 @@ int i915gm_enable_vblank(struct drm_crtc *crtc)
* only when vblank interrupts are actually enabled.
*/
if (dev_priv->vblank_enabled++ == 0)
- I915_WRITE(SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
+ intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
return i8xx_enable_vblank(crtc);
}
@@ -2798,16 +2825,16 @@ static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc,
else
port = PORT_A;
- tmp = I915_READ(DSI_INTR_MASK_REG(port));
+ tmp = intel_uncore_read(&dev_priv->uncore, DSI_INTR_MASK_REG(port));
if (enable)
tmp &= ~DSI_TE_EVENT;
else
tmp |= DSI_TE_EVENT;
- I915_WRITE(DSI_INTR_MASK_REG(port), tmp);
+ intel_uncore_write(&dev_priv->uncore, DSI_INTR_MASK_REG(port), tmp);
- tmp = I915_READ(DSI_INTR_IDENT_REG(port));
- I915_WRITE(DSI_INTR_IDENT_REG(port), tmp);
+ tmp = intel_uncore_read(&dev_priv->uncore, DSI_INTR_IDENT_REG(port));
+ intel_uncore_write(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), tmp);
return true;
}
@@ -2835,19 +2862,6 @@ int bdw_enable_vblank(struct drm_crtc *crtc)
return 0;
}
-void skl_enable_flip_done(struct intel_crtc *crtc)
-{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
- unsigned long irqflags;
-
- spin_lock_irqsave(&i915->irq_lock, irqflags);
-
- bdw_enable_pipe_irq(i915, pipe, GEN9_PIPE_PLANE1_FLIP_DONE);
-
- spin_unlock_irqrestore(&i915->irq_lock, irqflags);
-}
-
/* Called from drm generic code, passed 'crtc' which
* we use as a pipe index
*/
@@ -2869,7 +2883,7 @@ void i915gm_disable_vblank(struct drm_crtc *crtc)
i8xx_disable_vblank(crtc);
if (--dev_priv->vblank_enabled == 0)
- I915_WRITE(SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
+ intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
}
void i965_disable_vblank(struct drm_crtc *crtc)
@@ -2912,19 +2926,6 @@ void bdw_disable_vblank(struct drm_crtc *crtc)
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
-void skl_disable_flip_done(struct intel_crtc *crtc)
-{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
- unsigned long irqflags;
-
- spin_lock_irqsave(&i915->irq_lock, irqflags);
-
- bdw_disable_pipe_irq(i915, pipe, GEN9_PIPE_PLANE1_FLIP_DONE);
-
- spin_unlock_irqrestore(&i915->irq_lock, irqflags);
-}
-
static void ibx_irq_reset(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
@@ -2935,7 +2936,7 @@ static void ibx_irq_reset(struct drm_i915_private *dev_priv)
GEN3_IRQ_RESET(uncore, SDE);
if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
- I915_WRITE(SERR_INT, 0xffffffff);
+ intel_uncore_write(&dev_priv->uncore, SERR_INT, 0xffffffff);
}
static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
@@ -2948,7 +2949,7 @@ static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK);
i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
- intel_uncore_write(uncore, PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+ intel_uncore_write(uncore, PORT_HOTPLUG_STAT, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
i9xx_pipestat_irq_reset(dev_priv);
@@ -3011,8 +3012,8 @@ static void ilk_irq_reset(struct drm_i915_private *dev_priv)
static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
{
- I915_WRITE(VLV_MASTER_IER, 0);
- POSTING_READ(VLV_MASTER_IER);
+ intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
+ intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
gen5_gt_irq_reset(&dev_priv->gt);
@@ -3117,13 +3118,10 @@ void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
u8 pipe_mask)
{
struct intel_uncore *uncore = &dev_priv->uncore;
-
- u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
+ u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN |
+ gen8_de_pipe_flip_done_mask(dev_priv);
enum pipe pipe;
- if (INTEL_GEN(dev_priv) >= 9)
- extra_ier |= GEN9_PIPE_PLANE1_FLIP_DONE;
-
spin_lock_irq(&dev_priv->irq_lock);
if (!intel_irqs_enabled(dev_priv)) {
@@ -3165,8 +3163,8 @@ static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
- I915_WRITE(GEN8_MASTER_IRQ, 0);
- POSTING_READ(GEN8_MASTER_IRQ);
+ intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, 0);
+ intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
gen8_gt_irq_reset(&dev_priv->gt);
@@ -3212,7 +3210,7 @@ static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
* duration to 2ms (which is the minimum in the Display Port spec).
* The pulse duration bits are reserved on LPT+.
*/
- hotplug = I915_READ(PCH_PORT_HOTPLUG);
+ hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
hotplug &= ~(PORTA_HOTPLUG_ENABLE |
PORTB_HOTPLUG_ENABLE |
PORTC_HOTPLUG_ENABLE |
@@ -3221,7 +3219,7 @@ static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
PORTC_PULSE_DURATION_MASK |
PORTD_PULSE_DURATION_MASK);
hotplug |= intel_hpd_hotplug_enables(dev_priv, ibx_hotplug_enables);
- I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
+ intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, hotplug);
}
static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
@@ -3270,20 +3268,20 @@ static void icp_ddi_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
u32 hotplug;
- hotplug = I915_READ(SHOTPLUG_CTL_DDI);
+ hotplug = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_DDI);
hotplug &= ~(SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_A) |
SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_B) |
SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_C) |
SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_D));
hotplug |= intel_hpd_hotplug_enables(dev_priv, icp_ddi_hotplug_enables);
- I915_WRITE(SHOTPLUG_CTL_DDI, hotplug);
+ intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_DDI, hotplug);
}
static void icp_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
u32 hotplug;
- hotplug = I915_READ(SHOTPLUG_CTL_TC);
+ hotplug = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_TC);
hotplug &= ~(ICP_TC_HPD_ENABLE(HPD_PORT_TC1) |
ICP_TC_HPD_ENABLE(HPD_PORT_TC2) |
ICP_TC_HPD_ENABLE(HPD_PORT_TC3) |
@@ -3291,7 +3289,7 @@ static void icp_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
ICP_TC_HPD_ENABLE(HPD_PORT_TC5) |
ICP_TC_HPD_ENABLE(HPD_PORT_TC6));
hotplug |= intel_hpd_hotplug_enables(dev_priv, icp_tc_hotplug_enables);
- I915_WRITE(SHOTPLUG_CTL_TC, hotplug);
+ intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_TC, hotplug);
}
static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
@@ -3302,7 +3300,7 @@ static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
if (INTEL_PCH_TYPE(dev_priv) <= PCH_TGP)
- I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
+ intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
@@ -3330,12 +3328,12 @@ static void dg1_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
u32 val;
- val = I915_READ(SOUTH_CHICKEN1);
+ val = intel_uncore_read(&dev_priv->uncore, SOUTH_CHICKEN1);
val |= (INVERT_DDIA_HPD |
INVERT_DDIB_HPD |
INVERT_DDIC_HPD |
INVERT_DDID_HPD);
- I915_WRITE(SOUTH_CHICKEN1, val);
+ intel_uncore_write(&dev_priv->uncore, SOUTH_CHICKEN1, val);
icp_hpd_irq_setup(dev_priv);
}
@@ -3344,7 +3342,7 @@ static void gen11_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
u32 hotplug;
- hotplug = I915_READ(GEN11_TC_HOTPLUG_CTL);
+ hotplug = intel_uncore_read(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL);
hotplug &= ~(GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) |
GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) |
GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) |
@@ -3352,14 +3350,14 @@ static void gen11_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) |
GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6));
hotplug |= intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables);
- I915_WRITE(GEN11_TC_HOTPLUG_CTL, hotplug);
+ intel_uncore_write(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, hotplug);
}
static void gen11_tbt_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
u32 hotplug;
- hotplug = I915_READ(GEN11_TBT_HOTPLUG_CTL);
+ hotplug = intel_uncore_read(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL);
hotplug &= ~(GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) |
GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) |
GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) |
@@ -3367,7 +3365,7 @@ static void gen11_tbt_hpd_detection_setup(struct drm_i915_private *dev_priv)
GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) |
GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6));
hotplug |= intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables);
- I915_WRITE(GEN11_TBT_HOTPLUG_CTL, hotplug);
+ intel_uncore_write(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, hotplug);
}
static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
@@ -3378,11 +3376,11 @@ static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd);
- val = I915_READ(GEN11_DE_HPD_IMR);
+ val = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IMR);
val &= ~hotplug_irqs;
val |= ~enabled_irqs & hotplug_irqs;
- I915_WRITE(GEN11_DE_HPD_IMR, val);
- POSTING_READ(GEN11_DE_HPD_IMR);
+ intel_uncore_write(&dev_priv->uncore, GEN11_DE_HPD_IMR, val);
+ intel_uncore_posting_read(&dev_priv->uncore, GEN11_DE_HPD_IMR);
gen11_tc_hpd_detection_setup(dev_priv);
gen11_tbt_hpd_detection_setup(dev_priv);
@@ -3425,25 +3423,25 @@ static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
/* Display WA #1179 WaHardHangonHotPlug: cnp */
if (HAS_PCH_CNP(dev_priv)) {
- val = I915_READ(SOUTH_CHICKEN1);
+ val = intel_uncore_read(&dev_priv->uncore, SOUTH_CHICKEN1);
val &= ~CHASSIS_CLK_REQ_DURATION_MASK;
val |= CHASSIS_CLK_REQ_DURATION(0xf);
- I915_WRITE(SOUTH_CHICKEN1, val);
+ intel_uncore_write(&dev_priv->uncore, SOUTH_CHICKEN1, val);
}
/* Enable digital hotplug on the PCH */
- hotplug = I915_READ(PCH_PORT_HOTPLUG);
+ hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
hotplug &= ~(PORTA_HOTPLUG_ENABLE |
PORTB_HOTPLUG_ENABLE |
PORTC_HOTPLUG_ENABLE |
PORTD_HOTPLUG_ENABLE);
hotplug |= intel_hpd_hotplug_enables(dev_priv, spt_hotplug_enables);
- I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
+ intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, hotplug);
- hotplug = I915_READ(PCH_PORT_HOTPLUG2);
+ hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG2);
hotplug &= ~PORTE_HOTPLUG_ENABLE;
hotplug |= intel_hpd_hotplug_enables(dev_priv, spt_hotplug2_enables);
- I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
+ intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG2, hotplug);
}
static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
@@ -3451,7 +3449,7 @@ static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
u32 hotplug_irqs, enabled_irqs;
if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
- I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
+ intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
@@ -3482,11 +3480,11 @@ static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
* duration to 2ms (which is the minimum in the Display Port spec)
* The pulse duration bits are reserved on HSW+.
*/
- hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
+ hotplug = intel_uncore_read(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL);
hotplug &= ~(DIGITAL_PORTA_HOTPLUG_ENABLE |
DIGITAL_PORTA_PULSE_DURATION_MASK);
hotplug |= intel_hpd_hotplug_enables(dev_priv, ilk_hotplug_enables);
- I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
+ intel_uncore_write(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
}
static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
@@ -3536,7 +3534,7 @@ static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
u32 hotplug;
- hotplug = I915_READ(PCH_PORT_HOTPLUG);
+ hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
hotplug &= ~(PORTA_HOTPLUG_ENABLE |
PORTB_HOTPLUG_ENABLE |
PORTC_HOTPLUG_ENABLE |
@@ -3544,7 +3542,7 @@ static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
BXT_DDIB_HPD_INVERT |
BXT_DDIC_HPD_INVERT);
hotplug |= intel_hpd_hotplug_enables(dev_priv, bxt_hotplug_enables);
- I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
+ intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, hotplug);
}
static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
@@ -3598,6 +3596,9 @@ static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
+ DE_PLANE_FLIP_DONE_IVB(PLANE_C) |
+ DE_PLANE_FLIP_DONE_IVB(PLANE_B) |
+ DE_PLANE_FLIP_DONE_IVB(PLANE_A) |
DE_DP_A_HOTPLUG_IVB);
} else {
display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
@@ -3605,6 +3606,8 @@ static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
DE_PIPEA_CRC_DONE | DE_POISON);
extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK |
DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
+ DE_PLANE_FLIP_DONE(PLANE_A) |
+ DE_PLANE_FLIP_DONE(PLANE_B) |
DE_DP_A_HOTPLUG);
}
@@ -3664,8 +3667,8 @@ static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
vlv_display_irq_postinstall(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
- I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
- POSTING_READ(VLV_MASTER_IER);
+ intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
+ intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
}
static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
@@ -3695,11 +3698,9 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
de_port_masked |= DSI0_TE | DSI1_TE;
}
- de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
- GEN8_PIPE_FIFO_UNDERRUN;
-
- if (INTEL_GEN(dev_priv) >= 9)
- de_pipe_enables |= GEN9_PIPE_PLANE1_FLIP_DONE;
+ de_pipe_enables = de_pipe_masked |
+ GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN |
+ gen8_de_pipe_flip_done_mask(dev_priv);
de_port_enables = de_port_masked;
if (IS_GEN9_LP(dev_priv))
@@ -3778,14 +3779,14 @@ static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
- I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
+ intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
if (HAS_MASTER_UNIT_IRQ(dev_priv)) {
dg1_master_intr_enable(uncore->regs);
- POSTING_READ(DG1_MSTR_UNIT_INTR);
+ intel_uncore_posting_read(&dev_priv->uncore, DG1_MSTR_UNIT_INTR);
} else {
gen11_master_intr_enable(uncore->regs);
- POSTING_READ(GEN11_GFX_MSTR_IRQ);
+ intel_uncore_posting_read(&dev_priv->uncore, GEN11_GFX_MSTR_IRQ);
}
}
@@ -3798,8 +3799,8 @@ static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
vlv_display_irq_postinstall(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
- I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
- POSTING_READ(GEN8_MASTER_IRQ);
+ intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
+ intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
}
static void i8xx_irq_reset(struct drm_i915_private *dev_priv)
@@ -3889,11 +3890,11 @@ static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
{
u32 emr;
- *eir = I915_READ(EIR);
+ *eir = intel_uncore_read(&dev_priv->uncore, EIR);
- I915_WRITE(EIR, *eir);
+ intel_uncore_write(&dev_priv->uncore, EIR, *eir);
- *eir_stuck = I915_READ(EIR);
+ *eir_stuck = intel_uncore_read(&dev_priv->uncore, EIR);
if (*eir_stuck == 0)
return;
@@ -3907,9 +3908,9 @@ static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
* (or by a GPU reset) so we mask any bit that
* remains set.
*/
- emr = I915_READ(EMR);
- I915_WRITE(EMR, 0xffffffff);
- I915_WRITE(EMR, emr | *eir_stuck);
+ emr = intel_uncore_read(&dev_priv->uncore, EMR);
+ intel_uncore_write(&dev_priv->uncore, EMR, 0xffffffff);
+ intel_uncore_write(&dev_priv->uncore, EMR, emr | *eir_stuck);
}
static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
@@ -3975,7 +3976,7 @@ static void i915_irq_reset(struct drm_i915_private *dev_priv)
if (I915_HAS_HOTPLUG(dev_priv)) {
i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
- I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+ intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
}
i9xx_pipestat_irq_reset(dev_priv);
@@ -3989,7 +3990,7 @@ static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
struct intel_uncore *uncore = &dev_priv->uncore;
u32 enable_mask;
- I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE |
+ intel_uncore_write(&dev_priv->uncore, EMR, ~(I915_ERROR_PAGE_TABLE |
I915_ERROR_MEMORY_REFRESH));
/* Unmask the interrupts that we always want on. */
@@ -4042,7 +4043,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
u32 hotplug_status = 0;
u32 iir;
- iir = I915_READ(GEN2_IIR);
+ iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR);
if (iir == 0)
break;
@@ -4059,7 +4060,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
if (iir & I915_MASTER_ERROR_INTERRUPT)
i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
- I915_WRITE(GEN2_IIR, iir);
+ intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
if (iir & I915_USER_INTERRUPT)
intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]);
@@ -4085,7 +4086,7 @@ static void i965_irq_reset(struct drm_i915_private *dev_priv)
struct intel_uncore *uncore = &dev_priv->uncore;
i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
- I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+ intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
i9xx_pipestat_irq_reset(dev_priv);
@@ -4112,7 +4113,7 @@ static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
error_mask = ~(I915_ERROR_PAGE_TABLE |
I915_ERROR_MEMORY_REFRESH);
}
- I915_WRITE(EMR, error_mask);
+ intel_uncore_write(&dev_priv->uncore, EMR, error_mask);
/* Unmask the interrupts that we always want on. */
dev_priv->irq_mask =
@@ -4188,7 +4189,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
u32 hotplug_status = 0;
u32 iir;
- iir = I915_READ(GEN2_IIR);
+ iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR);
if (iir == 0)
break;
@@ -4204,7 +4205,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
if (iir & I915_MASTER_ERROR_INTERRUPT)
i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
- I915_WRITE(GEN2_IIR, iir);
+ intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
if (iir & I915_USER_INTERRUPT)
intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]);
diff --git a/drivers/gpu/drm/i915/i915_irq.h b/drivers/gpu/drm/i915/i915_irq.h
index 2efe609519ca..25f25cd95818 100644
--- a/drivers/gpu/drm/i915/i915_irq.h
+++ b/drivers/gpu/drm/i915/i915_irq.h
@@ -118,9 +118,6 @@ void i965_disable_vblank(struct drm_crtc *crtc);
void ilk_disable_vblank(struct drm_crtc *crtc);
void bdw_disable_vblank(struct drm_crtc *crtc);
-void skl_enable_flip_done(struct intel_crtc *crtc);
-void skl_disable_flip_done(struct intel_crtc *crtc);
-
void gen2_irq_reset(struct intel_uncore *uncore);
void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
i915_reg_t iir, i915_reg_t ier);
diff --git a/drivers/gpu/drm/i915/i915_mm.c b/drivers/gpu/drm/i915/i915_mm.c
index 43039dc8c607..666808cb3a32 100644
--- a/drivers/gpu/drm/i915/i915_mm.c
+++ b/drivers/gpu/drm/i915/i915_mm.c
@@ -62,7 +62,7 @@ static int remap_sg(pte_t *pte, unsigned long addr, void *data)
{
struct remap_pfn *r = data;
- if (GEM_WARN_ON(!r->sgt.pfn))
+ if (GEM_WARN_ON(!r->sgt.sgp))
return -EINVAL;
/* Special PTE are not associated with any struct page */
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 7f139ea4a90b..6939634e56ed 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -185,7 +185,7 @@ i915_param_named_unsafe(inject_probe_failure, uint, 0400,
i915_param_named(enable_dpcd_backlight, int, 0400,
"Enable support for DPCD backlight control"
- "(-1=use per-VBT LFP backlight type setting [default], 0=disabled, 1=enabled)");
+ "(-1=use per-VBT LFP backlight type setting [default], 0=disabled, 1=enable, 2=force VESA interface, 3=force Intel interface)");
#if IS_ENABLED(CONFIG_DRM_I915_GVT)
i915_param_named(enable_gvt, bool, 0400,
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 330c03e2b4f7..f031966af5b7 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -32,6 +32,7 @@ struct drm_printer;
#define ENABLE_GUC_SUBMISSION BIT(0)
#define ENABLE_GUC_LOAD_HUC BIT(1)
+#define ENABLE_GUC_MASK GENMASK(1, 0)
/*
* Invoke param, a function-like macro, for each i915 param, with arguments:
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 11fe790b1969..020b5f561f07 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -455,6 +455,7 @@ static const struct intel_device_info snb_m_gt2_info = {
.has_llc = 1, \
.has_rc6 = 1, \
.has_rc6p = 1, \
+ .has_reset_engine = true, \
.has_rps = true, \
.dma_mask_size = 40, \
.ppgtt_type = INTEL_PPGTT_ALIASING, \
@@ -513,6 +514,7 @@ static const struct intel_device_info vlv_info = {
.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B),
.has_runtime_pm = 1,
.has_rc6 = 1,
+ .has_reset_engine = true,
.has_rps = true,
.display.has_gmch = 1,
.display.has_hotplug = 1,
@@ -571,8 +573,7 @@ static const struct intel_device_info hsw_gt3_info = {
.dma_mask_size = 39, \
.ppgtt_type = INTEL_PPGTT_FULL, \
.ppgtt_size = 48, \
- .has_64bit_reloc = 1, \
- .has_reset_engine = 1
+ .has_64bit_reloc = 1
#define BDW_PLATFORM \
GEN8_FEATURES, \
@@ -639,7 +640,6 @@ static const struct intel_device_info chv_info = {
GEN8_FEATURES, \
GEN(9), \
GEN9_DEFAULT_PAGE_SIZES, \
- .has_logical_ring_preemption = 1, \
.display.has_csr = 1, \
.has_gt_uc = 1, \
.display.has_hdcp = 1, \
@@ -700,7 +700,6 @@ static const struct intel_device_info skl_gt4_info = {
.has_rps = true, \
.display.has_dp_mst = 1, \
.has_logical_ring_contexts = 1, \
- .has_logical_ring_preemption = 1, \
.has_gt_uc = 1, \
.dma_mask_size = 39, \
.ppgtt_type = INTEL_PPGTT_FULL, \
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 649c26518d26..112ba5f2ce90 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -198,8 +198,11 @@
#include "gem/i915_gem_context.h"
#include "gt/intel_engine_pm.h"
#include "gt/intel_engine_user.h"
+#include "gt/intel_execlists_submission.h"
+#include "gt/intel_gpu_commands.h"
#include "gt/intel_gt.h"
-#include "gt/intel_lrc_reg.h"
+#include "gt/intel_gt_clock_utils.h"
+#include "gt/intel_lrc.h"
#include "gt/intel_ring.h"
#include "i915_drv.h"
@@ -1635,7 +1638,8 @@ static int alloc_noa_wait(struct i915_perf_stream *stream)
struct drm_i915_gem_object *bo;
struct i915_vma *vma;
const u64 delay_ticks = 0xffffffffffffffff -
- i915_cs_timestamp_ns_to_ticks(i915, atomic64_read(&stream->perf->noa_programming_delay));
+ intel_gt_ns_to_clock_interval(stream->perf->i915->ggtt.vm.gt,
+ atomic64_read(&stream->perf->noa_programming_delay));
const u32 base = stream->engine->mmio_base;
#define CS_GPR(x) GEN8_RING_CS_GPR(base, x)
u32 *batch, *ts0, *cs, *jump;
@@ -3516,7 +3520,8 @@ err:
static u64 oa_exponent_to_ns(struct i915_perf *perf, int exponent)
{
- return i915_cs_timestamp_ticks_to_ns(perf->i915, 2ULL << exponent);
+ return intel_gt_clock_interval_to_ns(perf->i915->ggtt.vm.gt,
+ 2ULL << exponent);
}
/**
@@ -4370,11 +4375,11 @@ void i915_perf_init(struct drm_i915_private *i915)
if (perf->ops.enable_metric_set) {
mutex_init(&perf->lock);
- oa_sample_rate_hard_limit =
- RUNTIME_INFO(i915)->cs_timestamp_frequency_hz / 2;
+ /* Choose a representative limit */
+ oa_sample_rate_hard_limit = i915->gt.clock_frequency / 2;
mutex_init(&perf->metrics_lock);
- idr_init(&perf->metrics_idr);
+ idr_init_base(&perf->metrics_idr, 1);
/* We set up some ratelimit state to potentially throttle any
* _NOTES about spurious, invalid OA reports which we don't
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index 9856479b56d8..2b88c0baa1bf 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -26,8 +26,6 @@
BIT(I915_SAMPLE_WAIT) | \
BIT(I915_SAMPLE_SEMA))
-#define ENGINE_SAMPLE_BITS (1 << I915_PMU_SAMPLE_BITS)
-
static cpumask_t i915_pmu_cpumask;
static unsigned int i915_pmu_target_cpu = -1;
@@ -56,17 +54,42 @@ static bool is_engine_config(u64 config)
return config < __I915_PMU_OTHER(0);
}
-static unsigned int config_enabled_bit(u64 config)
+static unsigned int other_bit(const u64 config)
+{
+ unsigned int val;
+
+ switch (config) {
+ case I915_PMU_ACTUAL_FREQUENCY:
+ val = __I915_PMU_ACTUAL_FREQUENCY_ENABLED;
+ break;
+ case I915_PMU_REQUESTED_FREQUENCY:
+ val = __I915_PMU_REQUESTED_FREQUENCY_ENABLED;
+ break;
+ case I915_PMU_RC6_RESIDENCY:
+ val = __I915_PMU_RC6_RESIDENCY_ENABLED;
+ break;
+ default:
+ /*
+ * Events that do not require sampling, or tracking state
+ * transitions between enabled and disabled can be ignored.
+ */
+ return -1;
+ }
+
+ return I915_ENGINE_SAMPLE_COUNT + val;
+}
+
+static unsigned int config_bit(const u64 config)
{
if (is_engine_config(config))
return engine_config_sample(config);
else
- return ENGINE_SAMPLE_BITS + (config - __I915_PMU_OTHER(0));
+ return other_bit(config);
}
-static u64 config_enabled_mask(u64 config)
+static u64 config_mask(u64 config)
{
- return BIT_ULL(config_enabled_bit(config));
+ return BIT_ULL(config_bit(config));
}
static bool is_engine_event(struct perf_event *event)
@@ -74,15 +97,15 @@ static bool is_engine_event(struct perf_event *event)
return is_engine_config(event->attr.config);
}
-static unsigned int event_enabled_bit(struct perf_event *event)
+static unsigned int event_bit(struct perf_event *event)
{
- return config_enabled_bit(event->attr.config);
+ return config_bit(event->attr.config);
}
static bool pmu_needs_timer(struct i915_pmu *pmu, bool gpu_active)
{
struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
- u64 enable;
+ u32 enable;
/*
* Only some counters need the sampling timer.
@@ -95,8 +118,8 @@ static bool pmu_needs_timer(struct i915_pmu *pmu, bool gpu_active)
* Mask out all the ones which do not need the timer, or in
* other words keep all the ones that could need the timer.
*/
- enable &= config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY) |
- config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY) |
+ enable &= config_mask(I915_PMU_ACTUAL_FREQUENCY) |
+ config_mask(I915_PMU_REQUESTED_FREQUENCY) |
ENGINE_SAMPLE_MASK;
/*
@@ -137,11 +160,9 @@ static u64 __get_rc6(struct intel_gt *gt)
return val;
}
-#if IS_ENABLED(CONFIG_PM)
-
-static inline s64 ktime_since(const ktime_t kt)
+static inline s64 ktime_since_raw(const ktime_t kt)
{
- return ktime_to_ns(ktime_sub(ktime_get(), kt));
+ return ktime_to_ns(ktime_sub(ktime_get_raw(), kt));
}
static u64 get_rc6(struct intel_gt *gt)
@@ -170,7 +191,7 @@ static u64 get_rc6(struct intel_gt *gt)
* on top of the last known real value, as the approximated RC6
* counter value.
*/
- val = ktime_since(pmu->sleep_last);
+ val = ktime_since_raw(pmu->sleep_last);
val += pmu->sample[__I915_SAMPLE_RC6].cur;
}
@@ -193,7 +214,7 @@ static void init_rc6(struct i915_pmu *pmu)
pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt);
pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur =
pmu->sample[__I915_SAMPLE_RC6].cur;
- pmu->sleep_last = ktime_get();
+ pmu->sleep_last = ktime_get_raw();
}
}
@@ -202,21 +223,9 @@ static void park_rc6(struct drm_i915_private *i915)
struct i915_pmu *pmu = &i915->pmu;
pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt);
- pmu->sleep_last = ktime_get();
-}
-
-#else
-
-static u64 get_rc6(struct intel_gt *gt)
-{
- return __get_rc6(gt);
+ pmu->sleep_last = ktime_get_raw();
}
-static void init_rc6(struct i915_pmu *pmu) { }
-static void park_rc6(struct drm_i915_private *i915) {}
-
-#endif
-
static void __i915_pmu_maybe_start_timer(struct i915_pmu *pmu)
{
if (!pmu->timer_enabled && pmu_needs_timer(pmu, true)) {
@@ -355,8 +364,8 @@ add_sample_mult(struct i915_pmu_sample *sample, u32 val, u32 mul)
static bool frequency_sampling_enabled(struct i915_pmu *pmu)
{
return pmu->enable &
- (config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY) |
- config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY));
+ (config_mask(I915_PMU_ACTUAL_FREQUENCY) |
+ config_mask(I915_PMU_REQUESTED_FREQUENCY));
}
static void
@@ -374,7 +383,7 @@ frequency_sample(struct intel_gt *gt, unsigned int period_ns)
if (!intel_gt_pm_get_if_awake(gt))
return;
- if (pmu->enable & config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) {
+ if (pmu->enable & config_mask(I915_PMU_ACTUAL_FREQUENCY)) {
u32 val;
/*
@@ -396,7 +405,7 @@ frequency_sample(struct intel_gt *gt, unsigned int period_ns)
intel_gpu_freq(rps, val), period_ns / 1000);
}
- if (pmu->enable & config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)) {
+ if (pmu->enable & config_mask(I915_PMU_REQUESTED_FREQUENCY)) {
add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_REQ],
intel_gpu_freq(rps, rps->cur_freq),
period_ns / 1000);
@@ -483,6 +492,8 @@ config_status(struct drm_i915_private *i915, u64 config)
if (!HAS_RC6(i915))
return -ENODEV;
break;
+ case I915_PMU_SOFTWARE_GT_AWAKE_TIME:
+ break;
default:
return -ENOENT;
}
@@ -590,6 +601,9 @@ static u64 __i915_pmu_event_read(struct perf_event *event)
case I915_PMU_RC6_RESIDENCY:
val = get_rc6(&i915->gt);
break;
+ case I915_PMU_SOFTWARE_GT_AWAKE_TIME:
+ val = ktime_to_ns(intel_gt_get_awake_time(&i915->gt));
+ break;
}
}
@@ -622,9 +636,13 @@ static void i915_pmu_enable(struct perf_event *event)
{
struct drm_i915_private *i915 =
container_of(event->pmu, typeof(*i915), pmu.base);
- unsigned int bit = event_enabled_bit(event);
struct i915_pmu *pmu = &i915->pmu;
unsigned long flags;
+ unsigned int bit;
+
+ bit = event_bit(event);
+ if (bit == -1)
+ goto update;
spin_lock_irqsave(&pmu->lock, flags);
@@ -670,6 +688,7 @@ static void i915_pmu_enable(struct perf_event *event)
spin_unlock_irqrestore(&pmu->lock, flags);
+update:
/*
* Store the current counter value so we can report the correct delta
* for all listeners. Even when the event was already enabled and has
@@ -682,10 +701,13 @@ static void i915_pmu_disable(struct perf_event *event)
{
struct drm_i915_private *i915 =
container_of(event->pmu, typeof(*i915), pmu.base);
- unsigned int bit = event_enabled_bit(event);
+ unsigned int bit = event_bit(event);
struct i915_pmu *pmu = &i915->pmu;
unsigned long flags;
+ if (bit == -1)
+ return;
+
spin_lock_irqsave(&pmu->lock, flags);
if (is_engine_event(event)) {
@@ -882,6 +904,7 @@ create_event_attributes(struct i915_pmu *pmu)
__event(I915_PMU_REQUESTED_FREQUENCY, "requested-frequency", "M"),
__event(I915_PMU_INTERRUPTS, "interrupts", NULL),
__event(I915_PMU_RC6_RESIDENCY, "rc6-residency", "ns"),
+ __event(I915_PMU_SOFTWARE_GT_AWAKE_TIME, "software-gt-awake-time", "ns"),
};
static const struct {
enum drm_i915_pmu_engine_sample sample;
diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h
index 8405d6da5b9a..60f9595f902c 100644
--- a/drivers/gpu/drm/i915/i915_pmu.h
+++ b/drivers/gpu/drm/i915/i915_pmu.h
@@ -14,6 +14,21 @@
struct drm_i915_private;
+/**
+ * Non-engine events that we need to track enabled-disabled transition and
+ * current state.
+ */
+enum i915_pmu_tracked_events {
+ __I915_PMU_ACTUAL_FREQUENCY_ENABLED = 0,
+ __I915_PMU_REQUESTED_FREQUENCY_ENABLED,
+ __I915_PMU_RC6_RESIDENCY_ENABLED,
+ __I915_PMU_TRACKED_EVENT_COUNT, /* count marker */
+};
+
+/**
+ * Slots used from the sampling timer (non-engine events) with some extras for
+ * convenience.
+ */
enum {
__I915_SAMPLE_FREQ_ACT = 0,
__I915_SAMPLE_FREQ_REQ,
@@ -28,8 +43,7 @@ enum {
* It is also used to know to needed number of event reference counters.
*/
#define I915_PMU_MASK_BITS \
- ((1 << I915_PMU_SAMPLE_BITS) + \
- (I915_PMU_LAST + 1 - __I915_PMU_OTHER(0)))
+ (I915_ENGINE_SAMPLE_COUNT + __I915_PMU_TRACKED_EVENT_COUNT)
#define I915_ENGINE_SAMPLE_COUNT (I915_SAMPLE_SEMA + 1)
@@ -66,18 +80,17 @@ struct i915_pmu {
*/
struct hrtimer timer;
/**
- * @enable: Bitmask of all currently enabled events.
+ * @enable: Bitmask of specific enabled events.
+ *
+ * For some events we need to track their state and do some internal
+ * house keeping.
*
- * Bits are derived from uAPI event numbers in a way that low 16 bits
- * correspond to engine event _sample_ _type_ (I915_SAMPLE_QUEUED is
- * bit 0), and higher bits correspond to other events (for instance
- * I915_PMU_ACTUAL_FREQUENCY is bit 16 etc).
+ * Each engine event sampler type and event listed in enum
+ * i915_pmu_tracked_events gets a bit in this field.
*
- * In other words, low 16 bits are not per engine but per engine
- * sampler type, while the upper bits are directly mapped to other
- * event types.
+ * Low bits are engine samplers and other events continue from there.
*/
- u64 enable;
+ u32 enable;
/**
* @timer_last:
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 5375b219cc3b..7146cd0f3256 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2928,8 +2928,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define HDPORT_STATE _MMIO(0x45050)
#define HDPORT_DPLL_USED_MASK REG_GENMASK(14, 12)
-#define HDPORT_PHY_USED_DP(phy) REG_BIT(2 * (phy) + 2)
-#define HDPORT_PHY_USED_HDMI(phy) REG_BIT(2 * (phy) + 1)
+#define HDPORT_DDI_USED(phy) REG_BIT(2 * (phy) + 1)
#define HDPORT_ENABLED REG_BIT(0)
/* Make render/texture TLB fetches lower priorty than associated data
@@ -4347,12 +4346,13 @@ enum {
#define _TRANS_VRR_CTL_B 0x61420
#define _TRANS_VRR_CTL_C 0x62420
#define _TRANS_VRR_CTL_D 0x63420
-#define TRANS_VRR_CTL(trans) _MMIO_TRANS2(trans, _TRANS_VRR_CTL_A)
-#define VRR_CTL_VRR_ENABLE REG_BIT(31)
-#define VRR_CTL_IGN_MAX_SHIFT REG_BIT(30)
-#define VRR_CTL_FLIP_LINE_EN REG_BIT(29)
-#define VRR_CTL_LINE_COUNT_MASK REG_GENMASK(10, 3)
-#define VRR_CTL_SW_FULLLINE_COUNT REG_BIT(0)
+#define TRANS_VRR_CTL(trans) _MMIO_TRANS2(trans, _TRANS_VRR_CTL_A)
+#define VRR_CTL_VRR_ENABLE REG_BIT(31)
+#define VRR_CTL_IGN_MAX_SHIFT REG_BIT(30)
+#define VRR_CTL_FLIP_LINE_EN REG_BIT(29)
+#define VRR_CTL_PIPELINE_FULL_MASK REG_GENMASK(10, 3)
+#define VRR_CTL_PIPELINE_FULL(x) REG_FIELD_PREP(VRR_CTL_PIPELINE_FULL_MASK, (x))
+#define VRR_CTL_PIPELINE_FULL_OVERRIDE REG_BIT(0)
#define _TRANS_VRR_VMAX_A 0x60424
#define _TRANS_VRR_VMAX_B 0x61424
@@ -6578,6 +6578,7 @@ enum {
#define TGL_CURSOR_D_OFFSET 0x73080
/* Display A control */
+#define _DSPAADDR_VLV 0x7017C /* vlv/chv */
#define _DSPACNTR 0x70180
#define DISPLAY_PLANE_ENABLE (1 << 31)
#define DISPLAY_PLANE_DISABLE 0
@@ -6614,6 +6615,7 @@ enum {
#define DISPPLANE_ROTATE_180 (1 << 15)
#define DISPPLANE_TRICKLE_FEED_DISABLE (1 << 14) /* Ironlake */
#define DISPPLANE_TILED (1 << 10)
+#define DISPPLANE_ASYNC_FLIP (1 << 9) /* g4x+ */
#define DISPPLANE_MIRROR (1 << 8) /* CHV pipe B */
#define _DSPAADDR 0x70184
#define _DSPASTRIDE 0x70188
@@ -6625,6 +6627,7 @@ enum {
#define _DSPASURFLIVE 0x701AC
#define _DSPAGAMC 0x701E0
+#define DSPADDR_VLV(plane) _MMIO_PIPE2(plane, _DSPAADDR_VLV)
#define DSPCNTR(plane) _MMIO_PIPE2(plane, _DSPACNTR)
#define DSPADDR(plane) _MMIO_PIPE2(plane, _DSPAADDR)
#define DSPSTRIDE(plane) _MMIO_PIPE2(plane, _DSPASTRIDE)
@@ -7070,6 +7073,8 @@ enum {
#define _PLANE_KEYMAX_1_A 0x701a0
#define _PLANE_KEYMAX_2_A 0x702a0
#define PLANE_KEYMAX_ALPHA(a) ((a) << 24)
+#define _PLANE_CC_VAL_1_A 0x701b4
+#define _PLANE_CC_VAL_2_A 0x702b4
#define _PLANE_AUX_DIST_1_A 0x701c0
#define _PLANE_AUX_DIST_2_A 0x702c0
#define _PLANE_AUX_OFFSET_1_A 0x701c4
@@ -7111,6 +7116,13 @@ enum {
#define _PLANE_NV12_BUF_CFG_1_A 0x70278
#define _PLANE_NV12_BUF_CFG_2_A 0x70378
+#define _PLANE_CC_VAL_1_B 0x711b4
+#define _PLANE_CC_VAL_2_B 0x712b4
+#define _PLANE_CC_VAL_1(pipe) _PIPE(pipe, _PLANE_CC_VAL_1_A, _PLANE_CC_VAL_1_B)
+#define _PLANE_CC_VAL_2(pipe) _PIPE(pipe, _PLANE_CC_VAL_2_A, _PLANE_CC_VAL_2_B)
+#define PLANE_CC_VAL(pipe, plane) \
+ _MMIO_PLANE(plane, _PLANE_CC_VAL_1(pipe), _PLANE_CC_VAL_2(pipe))
+
/* Input CSC Register Definitions */
#define _PLANE_INPUT_CSC_RY_GY_1_A 0x701E0
#define _PLANE_INPUT_CSC_RY_GY_2_A 0x702E0
@@ -8213,6 +8225,7 @@ enum {
#define GEN11_LQSC_CLEAN_EVICT_DISABLE (1 << 6)
#define GEN8_LQSC_RO_PERF_DIS (1 << 27)
#define GEN8_LQSC_FLUSH_COHERENT_LINES (1 << 21)
+#define GEN8_LQSQ_NONIA_COHERENT_ATOMICS_ENABLE REG_BIT(22)
/* GEN8 chicken */
#define HDC_CHICKEN0 _MMIO(0x7300)
@@ -9872,6 +9885,7 @@ enum skl_power_gate {
_PORTD_HDCP2_BASE, \
_PORTE_HDCP2_BASE, \
_PORTF_HDCP2_BASE) + (x))
+
#define PORT_HDCP2_AUTH(port) _PORT_HDCP2_BASE(port, 0x98)
#define _TRANSA_HDCP2_AUTH 0x66498
#define _TRANSB_HDCP2_AUTH 0x66598
@@ -9911,6 +9925,44 @@ enum skl_power_gate {
TRANS_HDCP2_STATUS(trans) : \
PORT_HDCP2_STATUS(port))
+#define _PIPEA_HDCP2_STREAM_STATUS 0x668C0
+#define _PIPEB_HDCP2_STREAM_STATUS 0x665C0
+#define _PIPEC_HDCP2_STREAM_STATUS 0x666C0
+#define _PIPED_HDCP2_STREAM_STATUS 0x667C0
+#define PIPE_HDCP2_STREAM_STATUS(pipe) _MMIO(_PICK((pipe), \
+ _PIPEA_HDCP2_STREAM_STATUS, \
+ _PIPEB_HDCP2_STREAM_STATUS, \
+ _PIPEC_HDCP2_STREAM_STATUS, \
+ _PIPED_HDCP2_STREAM_STATUS))
+
+#define _TRANSA_HDCP2_STREAM_STATUS 0x664C0
+#define _TRANSB_HDCP2_STREAM_STATUS 0x665C0
+#define TRANS_HDCP2_STREAM_STATUS(trans) _MMIO_TRANS(trans, \
+ _TRANSA_HDCP2_STREAM_STATUS, \
+ _TRANSB_HDCP2_STREAM_STATUS)
+#define STREAM_ENCRYPTION_STATUS BIT(31)
+#define STREAM_TYPE_STATUS BIT(30)
+#define HDCP2_STREAM_STATUS(dev_priv, trans, port) \
+ (INTEL_GEN(dev_priv) >= 12 ? \
+ TRANS_HDCP2_STREAM_STATUS(trans) : \
+ PIPE_HDCP2_STREAM_STATUS(pipe))
+
+#define _PORTA_HDCP2_AUTH_STREAM 0x66F00
+#define _PORTB_HDCP2_AUTH_STREAM 0x66F04
+#define PORT_HDCP2_AUTH_STREAM(port) _MMIO_PORT(port, \
+ _PORTA_HDCP2_AUTH_STREAM, \
+ _PORTB_HDCP2_AUTH_STREAM)
+#define _TRANSA_HDCP2_AUTH_STREAM 0x66F00
+#define _TRANSB_HDCP2_AUTH_STREAM 0x66F04
+#define TRANS_HDCP2_AUTH_STREAM(trans) _MMIO_TRANS(trans, \
+ _TRANSA_HDCP2_AUTH_STREAM, \
+ _TRANSB_HDCP2_AUTH_STREAM)
+#define AUTH_STREAM_TYPE BIT(31)
+#define HDCP2_AUTH_STREAM(dev_priv, trans, port) \
+ (INTEL_GEN(dev_priv) >= 12 ? \
+ TRANS_HDCP2_AUTH_STREAM(trans) : \
+ PORT_HDCP2_AUTH_STREAM(port))
+
/* Per-pipe DDI Function Control */
#define _TRANS_DDI_FUNC_CTL_A 0x60400
#define _TRANS_DDI_FUNC_CTL_B 0x61400
@@ -9960,6 +10012,7 @@ enum skl_power_gate {
#define TRANS_DDI_DP_VC_PAYLOAD_ALLOC (1 << 8)
#define TRANS_DDI_HDMI_SCRAMBLER_CTS_ENABLE (1 << 7)
#define TRANS_DDI_HDMI_SCRAMBLER_RESET_FREQ (1 << 6)
+#define TRANS_DDI_HDCP_SELECT REG_BIT(5)
#define TRANS_DDI_BFI_ENABLE (1 << 4)
#define TRANS_DDI_HIGH_TMDS_CHAR_RATE (1 << 4)
#define TRANS_DDI_HDMI_SCRAMBLING (1 << 0)
@@ -10851,8 +10904,10 @@ enum skl_power_gate {
#define CNL_DRAM_RANK_3 (0x2 << 9)
#define CNL_DRAM_RANK_4 (0x3 << 9)
-/* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register,
- * since on HSW we can't write to it using I915_WRITE. */
+/*
+ * Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register,
+ * since on HSW we can't write to it using intel_uncore_write.
+ */
#define D_COMP_HSW _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5F0C)
#define D_COMP_BDW _MMIO(0x138144)
#define D_COMP_RCOMP_IN_PROGRESS (1 << 9)
@@ -12053,6 +12108,12 @@ enum skl_power_gate {
#define __GEN11_VCS2_MOCS0 0x10000
#define GEN11_MFX2_MOCS(i) _MMIO(__GEN11_VCS2_MOCS0 + (i) * 4)
+#define GEN9_SCRATCH_LNCF1 _MMIO(0xb008)
+#define GEN9_LNCF_NONIA_COHERENT_ATOMICS_ENABLE REG_BIT(0)
+
+#define GEN9_SCRATCH1 _MMIO(0xb11c)
+#define EVICTION_PERF_FIX_ENABLE REG_BIT(8)
+
#define GEN10_SCRATCH_LNCF2 _MMIO(0xb0a0)
#define PMFLUSHDONE_LNICRSDROP (1 << 20)
#define PMFLUSH_GAPL3UNBLOCK (1 << 21)
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 5385b081a376..22e39d938f17 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -33,6 +33,7 @@
#include "gem/i915_gem_context.h"
#include "gt/intel_breadcrumbs.h"
#include "gt/intel_context.h"
+#include "gt/intel_gpu_commands.h"
#include "gt/intel_ring.h"
#include "gt/intel_rps.h"
@@ -275,7 +276,7 @@ static void remove_from_engine(struct i915_request *rq)
bool i915_request_retire(struct i915_request *rq)
{
- if (!i915_request_completed(rq))
+ if (!__i915_request_is_complete(rq))
return false;
RQ_TRACE(rq, "\n");
@@ -306,10 +307,8 @@ bool i915_request_retire(struct i915_request *rq)
spin_unlock_irq(&rq->lock);
}
- if (i915_request_has_waitboost(rq)) {
- GEM_BUG_ON(!atomic_read(&rq->engine->gt->rps.num_waiters));
+ if (test_and_set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags))
atomic_dec(&rq->engine->gt->rps.num_waiters);
- }
/*
* We only loosely track inflight requests across preemption,
@@ -321,7 +320,8 @@ bool i915_request_retire(struct i915_request *rq)
* after removing the breadcrumb and signaling it, so that we do not
* inadvertently attach the breadcrumb to a completed request.
*/
- remove_from_engine(rq);
+ if (!list_empty(&rq->sched.link))
+ remove_from_engine(rq);
GEM_BUG_ON(!llist_empty(&rq->execute_cb));
__list_del_entry(&rq->link); /* poison neither prev/next (RCU walks) */
@@ -342,8 +342,7 @@ void i915_request_retire_upto(struct i915_request *rq)
struct i915_request *tmp;
RQ_TRACE(rq, "\n");
-
- GEM_BUG_ON(!i915_request_completed(rq));
+ GEM_BUG_ON(!__i915_request_is_complete(rq));
do {
tmp = list_first_entry(&tl->requests, typeof(*tmp), link);
@@ -488,6 +487,8 @@ void __i915_request_skip(struct i915_request *rq)
if (rq->infix == rq->postfix)
return;
+ RQ_TRACE(rq, "error: %d\n", rq->fence.error);
+
/*
* As this request likely depends on state from the lost
* context, clear out all the user operations leaving the
@@ -513,6 +514,17 @@ void i915_request_set_error_once(struct i915_request *rq, int error)
} while (!try_cmpxchg(&rq->fence.error, &old, error));
}
+void i915_request_mark_eio(struct i915_request *rq)
+{
+ if (__i915_request_is_complete(rq))
+ return;
+
+ GEM_BUG_ON(i915_request_signaled(rq));
+
+ i915_request_set_error_once(rq, -EIO);
+ i915_request_mark_complete(rq);
+}
+
bool __i915_request_submit(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
@@ -539,12 +551,10 @@ bool __i915_request_submit(struct i915_request *request)
* dropped upon retiring. (Otherwise if resubmit a *retired*
* request, this would be a horrible use-after-free.)
*/
- if (i915_request_completed(request))
- goto xfer;
-
- if (unlikely(intel_context_is_closed(request->context) &&
- !intel_engine_has_heartbeat(engine)))
- intel_context_set_banned(request->context);
+ if (__i915_request_is_complete(request)) {
+ list_del_init(&request->sched.link);
+ goto active;
+ }
if (unlikely(intel_context_is_banned(request->context)))
i915_request_set_error_once(request, -EIO);
@@ -579,11 +589,11 @@ bool __i915_request_submit(struct i915_request *request)
engine->serial++;
result = true;
-xfer:
- if (!test_and_set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags)) {
- list_move_tail(&request->sched.link, &engine->active.requests);
- clear_bit(I915_FENCE_FLAG_PQUEUE, &request->fence.flags);
- }
+ GEM_BUG_ON(test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags));
+ list_move_tail(&request->sched.link, &engine->active.requests);
+active:
+ clear_bit(I915_FENCE_FLAG_PQUEUE, &request->fence.flags);
+ set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags);
/*
* XXX Rollback bonded-execution on __i915_request_unsubmit()?
@@ -643,7 +653,7 @@ void __i915_request_unsubmit(struct i915_request *request)
i915_request_cancel_breadcrumb(request);
/* We've already spun, don't charge on resubmitting. */
- if (request->sched.semaphores && i915_request_started(request))
+ if (request->sched.semaphores && __i915_request_has_started(request))
request->sched.semaphores = 0;
/*
@@ -855,7 +865,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
RCU_INIT_POINTER(rq->timeline, tl);
RCU_INIT_POINTER(rq->hwsp_cacheline, tl->hwsp_cacheline);
rq->hwsp_seqno = tl->hwsp_seqno;
- GEM_BUG_ON(i915_request_completed(rq));
+ GEM_BUG_ON(__i915_request_is_complete(rq));
rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */
@@ -961,15 +971,22 @@ i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
if (i915_request_started(signal))
return 0;
+ /*
+ * The caller holds a reference on @signal, but we do not serialise
+ * against it being retired and removed from the lists.
+ *
+ * We do not hold a reference to the request before @signal, and
+ * so must be very careful to ensure that it is not _recycled_ as
+ * we follow the link backwards.
+ */
fence = NULL;
rcu_read_lock();
- spin_lock_irq(&signal->lock);
do {
struct list_head *pos = READ_ONCE(signal->link.prev);
struct i915_request *prev;
/* Confirm signal has not been retired, the link is valid */
- if (unlikely(i915_request_started(signal)))
+ if (unlikely(__i915_request_has_started(signal)))
break;
/* Is signal the earliest request on its timeline? */
@@ -994,7 +1011,6 @@ i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
fence = &prev->fence;
} while (0);
- spin_unlock_irq(&signal->lock);
rcu_read_unlock();
if (!fence)
return 0;
@@ -1511,7 +1527,7 @@ __i915_request_add_to_timeline(struct i915_request *rq)
*/
prev = to_request(__i915_active_fence_set(&timeline->last_request,
&rq->fence));
- if (prev && !i915_request_completed(prev)) {
+ if (prev && !__i915_request_is_complete(prev)) {
/*
* The requests are supposed to be kept in order. However,
* we need to be wary in case the timeline->last_request
@@ -1582,6 +1598,12 @@ struct i915_request *__i915_request_commit(struct i915_request *rq)
return __i915_request_add_to_timeline(rq);
}
+void __i915_request_queue_bh(struct i915_request *rq)
+{
+ i915_sw_fence_commit(&rq->semaphore);
+ i915_sw_fence_commit(&rq->submit);
+}
+
void __i915_request_queue(struct i915_request *rq,
const struct i915_sched_attr *attr)
{
@@ -1598,8 +1620,10 @@ void __i915_request_queue(struct i915_request *rq,
*/
if (attr && rq->engine->schedule)
rq->engine->schedule(rq, attr);
- i915_sw_fence_commit(&rq->semaphore);
- i915_sw_fence_commit(&rq->submit);
+
+ local_bh_disable();
+ __i915_request_queue_bh(rq);
+ local_bh_enable(); /* kick tasklets */
}
void i915_request_add(struct i915_request *rq)
@@ -1823,7 +1847,7 @@ long i915_request_wait(struct i915_request *rq,
* for unhappy HW.
*/
if (i915_request_is_ready(rq))
- intel_engine_flush_submission(rq->engine);
+ __intel_engine_flush_submission(rq->engine, false);
for (;;) {
set_current_state(state);
@@ -1855,6 +1879,106 @@ out:
return timeout;
}
+static int print_sched_attr(const struct i915_sched_attr *attr,
+ char *buf, int x, int len)
+{
+ if (attr->priority == I915_PRIORITY_INVALID)
+ return x;
+
+ x += snprintf(buf + x, len - x,
+ " prio=%d", attr->priority);
+
+ return x;
+}
+
+static char queue_status(const struct i915_request *rq)
+{
+ if (i915_request_is_active(rq))
+ return 'E';
+
+ if (i915_request_is_ready(rq))
+ return intel_engine_is_virtual(rq->engine) ? 'V' : 'R';
+
+ return 'U';
+}
+
+static const char *run_status(const struct i915_request *rq)
+{
+ if (__i915_request_is_complete(rq))
+ return "!";
+
+ if (__i915_request_has_started(rq))
+ return "*";
+
+ if (!i915_sw_fence_signaled(&rq->semaphore))
+ return "&";
+
+ return "";
+}
+
+static const char *fence_status(const struct i915_request *rq)
+{
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
+ return "+";
+
+ if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags))
+ return "-";
+
+ return "";
+}
+
+void i915_request_show(struct drm_printer *m,
+ const struct i915_request *rq,
+ const char *prefix,
+ int indent)
+{
+ const char *name = rq->fence.ops->get_timeline_name((struct dma_fence *)&rq->fence);
+ char buf[80] = "";
+ int x = 0;
+
+ /*
+ * The prefix is used to show the queue status, for which we use
+ * the following flags:
+ *
+ * U [Unready]
+ * - initial status upon being submitted by the user
+ *
+ * - the request is not ready for execution as it is waiting
+ * for external fences
+ *
+ * R [Ready]
+ * - all fences the request was waiting on have been signaled,
+ * and the request is now ready for execution and will be
+ * in a backend queue
+ *
+ * - a ready request may still need to wait on semaphores
+ * [internal fences]
+ *
+ * V [Ready/virtual]
+ * - same as ready, but queued over multiple backends
+ *
+ * E [Executing]
+ * - the request has been transferred from the backend queue and
+ * submitted for execution on HW
+ *
+ * - a completed request may still be regarded as executing, its
+ * status may not be updated until it is retired and removed
+ * from the lists
+ */
+
+ x = print_sched_attr(&rq->sched.attr, buf, x, sizeof(buf));
+
+ drm_printf(m, "%s%.*s%c %llx:%lld%s%s %s @ %dms: %s\n",
+ prefix, indent, " ",
+ queue_status(rq),
+ rq->fence.context, rq->fence.seqno,
+ run_status(rq),
+ fence_status(rq),
+ buf,
+ jiffies_to_msecs(jiffies - rq->emitted_jiffies),
+ name);
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_request.c"
#include "selftests/i915_request.c"
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 92adfee30c7c..1bfe214a47e9 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -43,6 +43,7 @@
struct drm_file;
struct drm_i915_gem_object;
+struct drm_printer;
struct i915_request;
struct i915_capture_list {
@@ -308,12 +309,14 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp);
struct i915_request * __must_check
i915_request_create(struct intel_context *ce);
-void i915_request_set_error_once(struct i915_request *rq, int error);
void __i915_request_skip(struct i915_request *rq);
+void i915_request_set_error_once(struct i915_request *rq, int error);
+void i915_request_mark_eio(struct i915_request *rq);
struct i915_request *__i915_request_commit(struct i915_request *request);
void __i915_request_queue(struct i915_request *rq,
const struct i915_sched_attr *attr);
+void __i915_request_queue_bh(struct i915_request *rq);
bool i915_request_retire(struct i915_request *rq);
void i915_request_retire_upto(struct i915_request *rq);
@@ -371,6 +374,11 @@ long i915_request_wait(struct i915_request *rq,
#define I915_WAIT_PRIORITY BIT(1) /* small priority bump for the request */
#define I915_WAIT_ALL BIT(2) /* used by i915_gem_object_wait() */
+void i915_request_show(struct drm_printer *m,
+ const struct i915_request *rq,
+ const char *prefix,
+ int indent);
+
static inline bool i915_request_signaled(const struct i915_request *rq)
{
/* The request may live longer than its HWSP, so check flags first! */
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index cbb880b10c65..7144239f08df 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -458,14 +458,10 @@ int i915_sched_node_add_dependency(struct i915_sched_node *node,
if (!dep)
return -ENOMEM;
- local_bh_disable();
-
if (!__i915_sched_node_add_dependency(node, signal, dep,
flags | I915_DEPENDENCY_ALLOC))
i915_dependency_free(dep);
- local_bh_enable(); /* kick submission tasklet */
-
return 0;
}
@@ -504,6 +500,34 @@ void i915_sched_node_fini(struct i915_sched_node *node)
spin_unlock_irq(&schedule_lock);
}
+void i915_request_show_with_schedule(struct drm_printer *m,
+ const struct i915_request *rq,
+ const char *prefix,
+ int indent)
+{
+ struct i915_dependency *dep;
+
+ i915_request_show(m, rq, prefix, indent);
+ if (i915_request_completed(rq))
+ return;
+
+ rcu_read_lock();
+ for_each_signaler(dep, rq) {
+ const struct i915_request *signaler =
+ node_to_request(dep->signaler);
+
+ /* Dependencies along the same timeline are expected. */
+ if (signaler->timeline == rq->timeline)
+ continue;
+
+ if (__i915_request_is_complete(signaler))
+ continue;
+
+ i915_request_show(m, signaler, prefix, indent + 2);
+ }
+ rcu_read_unlock();
+}
+
static void i915_global_scheduler_shrink(void)
{
kmem_cache_shrink(global.slab_dependencies);
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index 6f0bf00fc569..4501e5ac2637 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -13,6 +13,8 @@
#include "i915_scheduler_types.h"
+struct drm_printer;
+
#define priolist_for_each_request(it, plist, idx) \
for (idx = 0; idx < ARRAY_SIZE((plist)->requests); idx++) \
list_for_each_entry(it, &(plist)->requests[idx], sched.link)
@@ -54,4 +56,9 @@ static inline void i915_priolist_free(struct i915_priolist *p)
__i915_priolist_free(p);
}
+void i915_request_show_with_schedule(struct drm_printer *m,
+ const struct i915_request *rq,
+ const char *prefix,
+ int indent);
+
#endif /* _I915_SCHEDULER_H_ */
diff --git a/drivers/gpu/drm/i915/i915_scheduler_types.h b/drivers/gpu/drm/i915/i915_scheduler_types.h
index f72e6c397b08..343ed44d5ed4 100644
--- a/drivers/gpu/drm/i915/i915_scheduler_types.h
+++ b/drivers/gpu/drm/i915/i915_scheduler_types.h
@@ -81,4 +81,14 @@ struct i915_dependency {
#define I915_DEPENDENCY_WEAK BIT(2)
};
+#define for_each_waiter(p__, rq__) \
+ list_for_each_entry_lockless(p__, \
+ &(rq__)->sched.waiters_list, \
+ wait_link)
+
+#define for_each_signaler(p__, rq__) \
+ list_for_each_entry_rcu(p__, \
+ &(rq__)->sched.signalers_list, \
+ signal_link)
+
#endif /* _I915_SCHEDULER_TYPES_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index db2111fc809e..63212df33c9e 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -24,6 +24,7 @@
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
+#include "display/intel_de.h"
#include "display/intel_fbc.h"
#include "display/intel_gmbus.h"
#include "display/intel_vga.h"
@@ -39,21 +40,21 @@ static void intel_save_swf(struct drm_i915_private *dev_priv)
/* Scratch space */
if (IS_GEN(dev_priv, 2) && IS_MOBILE(dev_priv)) {
for (i = 0; i < 7; i++) {
- dev_priv->regfile.saveSWF0[i] = I915_READ(SWF0(i));
- dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i));
+ dev_priv->regfile.saveSWF0[i] = intel_de_read(dev_priv, SWF0(i));
+ dev_priv->regfile.saveSWF1[i] = intel_de_read(dev_priv, SWF1(i));
}
for (i = 0; i < 3; i++)
- dev_priv->regfile.saveSWF3[i] = I915_READ(SWF3(i));
+ dev_priv->regfile.saveSWF3[i] = intel_de_read(dev_priv, SWF3(i));
} else if (IS_GEN(dev_priv, 2)) {
for (i = 0; i < 7; i++)
- dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i));
+ dev_priv->regfile.saveSWF1[i] = intel_de_read(dev_priv, SWF1(i));
} else if (HAS_GMCH(dev_priv)) {
for (i = 0; i < 16; i++) {
- dev_priv->regfile.saveSWF0[i] = I915_READ(SWF0(i));
- dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i));
+ dev_priv->regfile.saveSWF0[i] = intel_de_read(dev_priv, SWF0(i));
+ dev_priv->regfile.saveSWF1[i] = intel_de_read(dev_priv, SWF1(i));
}
for (i = 0; i < 3; i++)
- dev_priv->regfile.saveSWF3[i] = I915_READ(SWF3(i));
+ dev_priv->regfile.saveSWF3[i] = intel_de_read(dev_priv, SWF3(i));
}
}
@@ -64,21 +65,21 @@ static void intel_restore_swf(struct drm_i915_private *dev_priv)
/* Scratch space */
if (IS_GEN(dev_priv, 2) && IS_MOBILE(dev_priv)) {
for (i = 0; i < 7; i++) {
- I915_WRITE(SWF0(i), dev_priv->regfile.saveSWF0[i]);
- I915_WRITE(SWF1(i), dev_priv->regfile.saveSWF1[i]);
+ intel_de_write(dev_priv, SWF0(i), dev_priv->regfile.saveSWF0[i]);
+ intel_de_write(dev_priv, SWF1(i), dev_priv->regfile.saveSWF1[i]);
}
for (i = 0; i < 3; i++)
- I915_WRITE(SWF3(i), dev_priv->regfile.saveSWF3[i]);
+ intel_de_write(dev_priv, SWF3(i), dev_priv->regfile.saveSWF3[i]);
} else if (IS_GEN(dev_priv, 2)) {
for (i = 0; i < 7; i++)
- I915_WRITE(SWF1(i), dev_priv->regfile.saveSWF1[i]);
+ intel_de_write(dev_priv, SWF1(i), dev_priv->regfile.saveSWF1[i]);
} else if (HAS_GMCH(dev_priv)) {
for (i = 0; i < 16; i++) {
- I915_WRITE(SWF0(i), dev_priv->regfile.saveSWF0[i]);
- I915_WRITE(SWF1(i), dev_priv->regfile.saveSWF1[i]);
+ intel_de_write(dev_priv, SWF0(i), dev_priv->regfile.saveSWF0[i]);
+ intel_de_write(dev_priv, SWF1(i), dev_priv->regfile.saveSWF1[i]);
}
for (i = 0; i < 3; i++)
- I915_WRITE(SWF3(i), dev_priv->regfile.saveSWF3[i]);
+ intel_de_write(dev_priv, SWF3(i), dev_priv->regfile.saveSWF3[i]);
}
}
@@ -88,7 +89,7 @@ void i915_save_display(struct drm_i915_private *dev_priv)
/* Display arbitration control */
if (INTEL_GEN(dev_priv) <= 4)
- dev_priv->regfile.saveDSPARB = I915_READ(DSPARB);
+ dev_priv->regfile.saveDSPARB = intel_de_read(dev_priv, DSPARB);
if (IS_GEN(dev_priv, 4))
pci_read_config_word(pdev, GCDGMBUS,
@@ -109,7 +110,7 @@ void i915_restore_display(struct drm_i915_private *dev_priv)
/* Display arbitration */
if (INTEL_GEN(dev_priv) <= 4)
- I915_WRITE(DSPARB, dev_priv->regfile.saveDSPARB);
+ intel_de_write(dev_priv, DSPARB, dev_priv->regfile.saveDSPARB);
/* only restore FBC info on the platform that supports FBC*/
intel_fbc_global_disable(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 038d4c6884c5..2744558f3050 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -18,10 +18,15 @@
#define I915_SW_FENCE_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
#endif
-#define I915_SW_FENCE_FLAG_ALLOC BIT(3) /* after WQ_FLAG_* for safety */
-
static DEFINE_SPINLOCK(i915_sw_fence_lock);
+#define WQ_FLAG_BITS \
+ BITS_PER_TYPE(typeof_member(struct wait_queue_entry, flags))
+
+/* after WQ_FLAG_* for safety */
+#define I915_SW_FENCE_FLAG_FENCE BIT(WQ_FLAG_BITS - 1)
+#define I915_SW_FENCE_FLAG_ALLOC BIT(WQ_FLAG_BITS - 2)
+
enum {
DEBUG_FENCE_IDLE = 0,
DEBUG_FENCE_NOTIFY,
@@ -154,10 +159,10 @@ static void __i915_sw_fence_wake_up_all(struct i915_sw_fence *fence,
spin_lock_irqsave_nested(&x->lock, flags, 1 + !!continuation);
if (continuation) {
list_for_each_entry_safe(pos, next, &x->head, entry) {
- if (pos->func == autoremove_wake_function)
- pos->func(pos, TASK_NORMAL, 0, continuation);
- else
+ if (pos->flags & I915_SW_FENCE_FLAG_FENCE)
list_move_tail(&pos->entry, continuation);
+ else
+ pos->func(pos, TASK_NORMAL, 0, continuation);
}
} else {
LIST_HEAD(extra);
@@ -166,9 +171,9 @@ static void __i915_sw_fence_wake_up_all(struct i915_sw_fence *fence,
list_for_each_entry_safe(pos, next, &x->head, entry) {
int wake_flags;
- wake_flags = fence->error;
- if (pos->func == autoremove_wake_function)
- wake_flags = 0;
+ wake_flags = 0;
+ if (pos->flags & I915_SW_FENCE_FLAG_FENCE)
+ wake_flags = fence->error;
pos->func(pos, TASK_NORMAL, wake_flags, &extra);
}
@@ -332,8 +337,8 @@ static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
struct i915_sw_fence *signaler,
wait_queue_entry_t *wq, gfp_t gfp)
{
+ unsigned int pending;
unsigned long flags;
- int pending;
debug_fence_assert(fence);
might_sleep_if(gfpflags_allow_blocking(gfp));
@@ -349,7 +354,7 @@ static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
if (unlikely(i915_sw_fence_check_if_after(fence, signaler)))
return -EINVAL;
- pending = 0;
+ pending = I915_SW_FENCE_FLAG_FENCE;
if (!wq) {
wq = kmalloc(sizeof(*wq), gfp);
if (!wq) {
diff --git a/drivers/gpu/drm/i915/i915_utils.c b/drivers/gpu/drm/i915/i915_utils.c
index 4c305d838016..f9e780dee9de 100644
--- a/drivers/gpu/drm/i915/i915_utils.c
+++ b/drivers/gpu/drm/i915/i915_utils.c
@@ -87,7 +87,7 @@ bool i915_error_injected(void)
void cancel_timer(struct timer_list *t)
{
- if (!READ_ONCE(t->expires))
+ if (!timer_active(t))
return;
del_timer(t);
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index 54773371e6bd..abd4dcd9f79c 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -438,9 +438,14 @@ static inline void __add_taint_for_CI(unsigned int taint)
void cancel_timer(struct timer_list *t);
void set_timer_ms(struct timer_list *t, unsigned long timeout);
+static inline bool timer_active(const struct timer_list *t)
+{
+ return READ_ONCE(t->expires);
+}
+
static inline bool timer_expired(const struct timer_list *t)
{
- return READ_ONCE(t->expires) && !timer_pending(t);
+ return timer_active(t) && !timer_pending(t);
}
/*
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 5b3a3c653454..a64adc8c883b 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -363,6 +363,21 @@ i915_vma_unpin_fence(struct i915_vma *vma)
void i915_vma_parked(struct intel_gt *gt);
+static inline bool i915_vma_is_scanout(const struct i915_vma *vma)
+{
+ return test_bit(I915_VMA_SCANOUT_BIT, __i915_vma_flags(vma));
+}
+
+static inline void i915_vma_mark_scanout(struct i915_vma *vma)
+{
+ set_bit(I915_VMA_SCANOUT_BIT, __i915_vma_flags(vma));
+}
+
+static inline void i915_vma_clear_scanout(struct i915_vma *vma)
+{
+ clear_bit(I915_VMA_SCANOUT_BIT, __i915_vma_flags(vma));
+}
+
#define for_each_until(cond) if (cond) break; else
/**
diff --git a/drivers/gpu/drm/i915/i915_vma_types.h b/drivers/gpu/drm/i915/i915_vma_types.h
index 9e9082dc8f4b..f5cb848b7a7e 100644
--- a/drivers/gpu/drm/i915/i915_vma_types.h
+++ b/drivers/gpu/drm/i915/i915_vma_types.h
@@ -249,6 +249,9 @@ struct i915_vma {
#define I915_VMA_USERFAULT ((int)BIT(I915_VMA_USERFAULT_BIT))
#define I915_VMA_GGTT_WRITE ((int)BIT(I915_VMA_GGTT_WRITE_BIT))
+#define I915_VMA_SCANOUT_BIT 18
+#define I915_VMA_SCANOUT ((int)BIT(I915_VMA_SCANOUT_BIT))
+
struct i915_active active;
#define I915_VMA_PAGES_BIAS 24
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index e67cec8fa2aa..f2d5ae59081e 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -104,7 +104,7 @@ void intel_device_info_print_static(const struct intel_device_info *info,
drm_printf(p, "ppgtt-type: %d\n", info->ppgtt_type);
drm_printf(p, "dma_mask_size: %u\n", info->dma_mask_size);
-#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->name));
+#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->name))
DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
#undef PRINT_FLAG
@@ -117,150 +117,6 @@ void intel_device_info_print_runtime(const struct intel_runtime_info *info,
struct drm_printer *p)
{
drm_printf(p, "rawclk rate: %u kHz\n", info->rawclk_freq);
- drm_printf(p, "CS timestamp frequency: %u Hz\n",
- info->cs_timestamp_frequency_hz);
-}
-
-static u32 read_reference_ts_freq(struct drm_i915_private *dev_priv)
-{
- u32 ts_override = intel_uncore_read(&dev_priv->uncore,
- GEN9_TIMESTAMP_OVERRIDE);
- u32 base_freq, frac_freq;
-
- base_freq = ((ts_override & GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_MASK) >>
- GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_SHIFT) + 1;
- base_freq *= 1000000;
-
- frac_freq = ((ts_override &
- GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_MASK) >>
- GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_SHIFT);
- frac_freq = 1000000 / (frac_freq + 1);
-
- return base_freq + frac_freq;
-}
-
-static u32 gen10_get_crystal_clock_freq(struct drm_i915_private *dev_priv,
- u32 rpm_config_reg)
-{
- u32 f19_2_mhz = 19200000;
- u32 f24_mhz = 24000000;
- u32 crystal_clock = (rpm_config_reg &
- GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
- GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;
-
- switch (crystal_clock) {
- case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ:
- return f19_2_mhz;
- case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
- return f24_mhz;
- default:
- MISSING_CASE(crystal_clock);
- return 0;
- }
-}
-
-static u32 gen11_get_crystal_clock_freq(struct drm_i915_private *dev_priv,
- u32 rpm_config_reg)
-{
- u32 f19_2_mhz = 19200000;
- u32 f24_mhz = 24000000;
- u32 f25_mhz = 25000000;
- u32 f38_4_mhz = 38400000;
- u32 crystal_clock = (rpm_config_reg &
- GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
- GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;
-
- switch (crystal_clock) {
- case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
- return f24_mhz;
- case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ:
- return f19_2_mhz;
- case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_38_4_MHZ:
- return f38_4_mhz;
- case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_25_MHZ:
- return f25_mhz;
- default:
- MISSING_CASE(crystal_clock);
- return 0;
- }
-}
-
-static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
-{
- struct intel_uncore *uncore = &dev_priv->uncore;
- u32 f12_5_mhz = 12500000;
- u32 f19_2_mhz = 19200000;
- u32 f24_mhz = 24000000;
-
- if (INTEL_GEN(dev_priv) <= 4) {
- /* PRMs say:
- *
- * "The value in this register increments once every 16
- * hclks." (through the “Clocking Configurationâ€
- * (“CLKCFGâ€) MCHBAR register)
- */
- return RUNTIME_INFO(dev_priv)->rawclk_freq * 1000 / 16;
- } else if (INTEL_GEN(dev_priv) <= 8) {
- /* PRMs say:
- *
- * "The PCU TSC counts 10ns increments; this timestamp
- * reflects bits 38:3 of the TSC (i.e. 80ns granularity,
- * rolling over every 1.5 hours).
- */
- return f12_5_mhz;
- } else if (INTEL_GEN(dev_priv) <= 9) {
- u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE);
- u32 freq = 0;
-
- if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
- freq = read_reference_ts_freq(dev_priv);
- } else {
- freq = IS_GEN9_LP(dev_priv) ? f19_2_mhz : f24_mhz;
-
- /* Now figure out how the command stream's timestamp
- * register increments from this frequency (it might
- * increment only every few clock cycle).
- */
- freq >>= 3 - ((ctc_reg & CTC_SHIFT_PARAMETER_MASK) >>
- CTC_SHIFT_PARAMETER_SHIFT);
- }
-
- return freq;
- } else if (INTEL_GEN(dev_priv) <= 12) {
- u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE);
- u32 freq = 0;
-
- /* First figure out the reference frequency. There are 2 ways
- * we can compute the frequency, either through the
- * TIMESTAMP_OVERRIDE register or through RPM_CONFIG. CTC_MODE
- * tells us which one we should use.
- */
- if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
- freq = read_reference_ts_freq(dev_priv);
- } else {
- u32 rpm_config_reg = intel_uncore_read(uncore, RPM_CONFIG0);
-
- if (INTEL_GEN(dev_priv) <= 10)
- freq = gen10_get_crystal_clock_freq(dev_priv,
- rpm_config_reg);
- else
- freq = gen11_get_crystal_clock_freq(dev_priv,
- rpm_config_reg);
-
- /* Now figure out how the command stream's timestamp
- * register increments from this frequency (it might
- * increment only every few clock cycle).
- */
- freq >>= 3 - ((rpm_config_reg &
- GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >>
- GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT);
- }
-
- return freq;
- }
-
- MISSING_CASE("Unknown gen, unable to read command streamer timestamp frequency\n");
- return 0;
}
#undef INTEL_VGA_DEVICE
@@ -505,19 +361,6 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
runtime->rawclk_freq = intel_read_rawclk(dev_priv);
drm_dbg(&dev_priv->drm, "rawclk rate: %d kHz\n", runtime->rawclk_freq);
- /* Initialize command stream timestamp frequency */
- runtime->cs_timestamp_frequency_hz =
- read_timestamp_frequency(dev_priv);
- if (runtime->cs_timestamp_frequency_hz) {
- runtime->cs_timestamp_period_ns =
- i915_cs_timestamp_ticks_to_ns(dev_priv, 1);
- drm_dbg(&dev_priv->drm,
- "CS timestamp wraparound in %lldms\n",
- div_u64(mul_u32_u32(runtime->cs_timestamp_period_ns,
- S32_MAX),
- USEC_PER_SEC));
- }
-
if (!HAS_DISPLAY(dev_priv)) {
dev_priv->drm.driver_features &= ~(DRIVER_MODESET |
DRIVER_ATOMIC);
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index d92fa041c700..cf2d528c6e9b 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -123,7 +123,6 @@ enum intel_ppgtt_type {
func(has_llc); \
func(has_logical_ring_contexts); \
func(has_logical_ring_elsq); \
- func(has_logical_ring_preemption); \
func(has_master_unit_irq); \
func(has_pooled_eu); \
func(has_rc6); \
@@ -224,9 +223,6 @@ struct intel_runtime_info {
u8 num_scalers[I915_MAX_PIPES];
u32 rawclk_freq;
-
- u32 cs_timestamp_frequency_hz;
- u32 cs_timestamp_period_ns;
};
struct intel_driver_caps {
diff --git a/drivers/gpu/drm/i915/intel_dram.c b/drivers/gpu/drm/i915/intel_dram.c
index 4754296a250e..73d256fc6830 100644
--- a/drivers/gpu/drm/i915/intel_dram.c
+++ b/drivers/gpu/drm/i915/intel_dram.c
@@ -5,6 +5,7 @@
#include "i915_drv.h"
#include "intel_dram.h"
+#include "intel_sideband.h"
struct dram_dimm_info {
u16 size;
@@ -201,22 +202,12 @@ skl_dram_get_channels_info(struct drm_i915_private *i915)
return -EINVAL;
}
- /*
- * If any of the channel is single rank channel, worst case output
- * will be same as if single rank memory, so consider single rank
- * memory.
- */
- if (ch0.ranks == 1 || ch1.ranks == 1)
- dram_info->ranks = 1;
- else
- dram_info->ranks = max(ch0.ranks, ch1.ranks);
-
- if (dram_info->ranks == 0) {
+ if (ch0.ranks == 0 && ch1.ranks == 0) {
drm_info(&i915->drm, "couldn't get memory rank information\n");
return -EINVAL;
}
- dram_info->is_16gb_dimm = ch0.is_16gb_dimm || ch1.is_16gb_dimm;
+ dram_info->wm_lv_0_adjust_needed = ch0.is_16gb_dimm || ch1.is_16gb_dimm;
dram_info->symmetric_memory = intel_is_dram_symmetric(&ch0, &ch1);
@@ -269,16 +260,12 @@ skl_get_dram_info(struct drm_i915_private *i915)
mem_freq_khz = DIV_ROUND_UP((val & SKL_REQ_DATA_MASK) *
SKL_MEMORY_FREQ_MULTIPLIER_HZ, 1000);
- dram_info->bandwidth_kbps = dram_info->num_channels *
- mem_freq_khz * 8;
-
- if (dram_info->bandwidth_kbps == 0) {
+ if (dram_info->num_channels * mem_freq_khz == 0) {
drm_info(&i915->drm,
"Couldn't get system memory bandwidth\n");
return -EINVAL;
}
- dram_info->valid = true;
return 0;
}
@@ -365,7 +352,7 @@ static int bxt_get_dram_info(struct drm_i915_private *i915)
struct dram_info *dram_info = &i915->dram_info;
u32 dram_channels;
u32 mem_freq_khz, val;
- u8 num_active_channels;
+ u8 num_active_channels, valid_ranks = 0;
int i;
val = intel_uncore_read(&i915->uncore, BXT_P_CR_MC_BIOS_REQ_0_0_0);
@@ -375,10 +362,7 @@ static int bxt_get_dram_info(struct drm_i915_private *i915)
dram_channels = val & BXT_DRAM_CHANNEL_ACTIVE_MASK;
num_active_channels = hweight32(dram_channels);
- /* Each active bit represents 4-byte channel */
- dram_info->bandwidth_kbps = (mem_freq_khz * num_active_channels * 4);
-
- if (dram_info->bandwidth_kbps == 0) {
+ if (mem_freq_khz * num_active_channels == 0) {
drm_info(&i915->drm,
"Couldn't get system memory bandwidth\n");
return -EINVAL;
@@ -410,57 +394,125 @@ static int bxt_get_dram_info(struct drm_i915_private *i915)
dimm.size, dimm.width, dimm.ranks,
intel_dram_type_str(type));
- /*
- * If any of the channel is single rank channel,
- * worst case output will be same as if single rank
- * memory, so consider single rank memory.
- */
- if (dram_info->ranks == 0)
- dram_info->ranks = dimm.ranks;
- else if (dimm.ranks == 1)
- dram_info->ranks = 1;
+ if (valid_ranks == 0)
+ valid_ranks = dimm.ranks;
if (type != INTEL_DRAM_UNKNOWN)
dram_info->type = type;
}
- if (dram_info->type == INTEL_DRAM_UNKNOWN || dram_info->ranks == 0) {
+ if (dram_info->type == INTEL_DRAM_UNKNOWN || valid_ranks == 0) {
drm_info(&i915->drm, "couldn't get memory information\n");
return -EINVAL;
}
- dram_info->valid = true;
+ return 0;
+}
+
+static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv)
+{
+ struct dram_info *dram_info = &dev_priv->dram_info;
+ u32 val = 0;
+ int ret;
+
+ ret = sandybridge_pcode_read(dev_priv,
+ ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
+ ICL_PCODE_MEM_SS_READ_GLOBAL_INFO,
+ &val, NULL);
+ if (ret)
+ return ret;
+
+ if (IS_GEN(dev_priv, 12)) {
+ switch (val & 0xf) {
+ case 0:
+ dram_info->type = INTEL_DRAM_DDR4;
+ break;
+ case 3:
+ dram_info->type = INTEL_DRAM_LPDDR4;
+ break;
+ case 4:
+ dram_info->type = INTEL_DRAM_DDR3;
+ break;
+ case 5:
+ dram_info->type = INTEL_DRAM_LPDDR3;
+ break;
+ default:
+ MISSING_CASE(val & 0xf);
+ return -1;
+ }
+ } else {
+ switch (val & 0xf) {
+ case 0:
+ dram_info->type = INTEL_DRAM_DDR4;
+ break;
+ case 1:
+ dram_info->type = INTEL_DRAM_DDR3;
+ break;
+ case 2:
+ dram_info->type = INTEL_DRAM_LPDDR3;
+ break;
+ case 3:
+ dram_info->type = INTEL_DRAM_LPDDR4;
+ break;
+ default:
+ MISSING_CASE(val & 0xf);
+ return -1;
+ }
+ }
+
+ dram_info->num_channels = (val & 0xf0) >> 4;
+ dram_info->num_qgv_points = (val & 0xf00) >> 8;
return 0;
}
+static int gen11_get_dram_info(struct drm_i915_private *i915)
+{
+ int ret = skl_get_dram_info(i915);
+
+ if (ret)
+ return ret;
+
+ return icl_pcode_read_mem_global_info(i915);
+}
+
+static int gen12_get_dram_info(struct drm_i915_private *i915)
+{
+ /* Always needed for GEN12+ */
+ i915->dram_info.wm_lv_0_adjust_needed = true;
+
+ return icl_pcode_read_mem_global_info(i915);
+}
+
void intel_dram_detect(struct drm_i915_private *i915)
{
struct dram_info *dram_info = &i915->dram_info;
int ret;
/*
- * Assume 16Gb DIMMs are present until proven otherwise.
- * This is only used for the level 0 watermark latency
- * w/a which does not apply to bxt/glk.
+ * Assume level 0 watermark latency adjustment is needed until proven
+ * otherwise, this w/a is not needed by bxt/glk.
*/
- dram_info->is_16gb_dimm = !IS_GEN9_LP(i915);
+ dram_info->wm_lv_0_adjust_needed = !IS_GEN9_LP(i915);
if (INTEL_GEN(i915) < 9 || !HAS_DISPLAY(i915))
return;
- if (IS_GEN9_LP(i915))
+ if (INTEL_GEN(i915) >= 12)
+ ret = gen12_get_dram_info(i915);
+ else if (INTEL_GEN(i915) >= 11)
+ ret = gen11_get_dram_info(i915);
+ else if (IS_GEN9_LP(i915))
ret = bxt_get_dram_info(i915);
else
ret = skl_get_dram_info(i915);
if (ret)
return;
- drm_dbg_kms(&i915->drm, "DRAM bandwidth: %u kBps, channels: %u\n",
- dram_info->bandwidth_kbps, dram_info->num_channels);
+ drm_dbg_kms(&i915->drm, "DRAM channels: %u\n", dram_info->num_channels);
- drm_dbg_kms(&i915->drm, "DRAM ranks: %u, 16Gb DIMMs: %s\n",
- dram_info->ranks, yesno(dram_info->is_16gb_dimm));
+ drm_dbg_kms(&i915->drm, "Watermark level 0 adjustment needed: %s\n",
+ yesno(dram_info->wm_lv_0_adjust_needed));
}
static u32 gen9_edram_size_mb(struct drm_i915_private *i915, u32 cap)
diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c
index b326993a1026..1bfcdd89b241 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/intel_memory_region.c
@@ -10,7 +10,7 @@
#define REGION_MAP(type, inst) \
BIT((type) + INTEL_MEMORY_TYPE_SHIFT) | BIT(inst)
-const u32 intel_region_map[] = {
+static const u32 intel_region_map[] = {
[INTEL_REGION_SMEM] = REGION_MAP(INTEL_MEMORY_SYSTEM, 0),
[INTEL_REGION_LMEM] = REGION_MAP(INTEL_MEMORY_LOCAL, 0),
[INTEL_REGION_STOLEN] = REGION_MAP(INTEL_MEMORY_STOLEN, 0),
diff --git a/drivers/gpu/drm/i915/intel_memory_region.h b/drivers/gpu/drm/i915/intel_memory_region.h
index 232490d89a83..6ffc0673f005 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.h
+++ b/drivers/gpu/drm/i915/intel_memory_region.h
@@ -51,21 +51,16 @@ enum intel_region_id {
for (id = 0; id < ARRAY_SIZE((i915)->mm.regions); id++) \
for_each_if((mr) = (i915)->mm.regions[id])
-/**
- * Memory regions encoded as type | instance
- */
-extern const u32 intel_region_map[];
-
struct intel_memory_region_ops {
unsigned int flags;
int (*init)(struct intel_memory_region *mem);
void (*release)(struct intel_memory_region *mem);
- struct drm_i915_gem_object *
- (*create_object)(struct intel_memory_region *mem,
- resource_size_t size,
- unsigned int flags);
+ int (*init_object)(struct intel_memory_region *mem,
+ struct drm_i915_gem_object *obj,
+ resource_size_t size,
+ unsigned int flags);
};
struct intel_memory_region {
diff --git a/drivers/gpu/drm/i915/intel_pch.c b/drivers/gpu/drm/i915/intel_pch.c
index f31c0dabd0cc..ecaf314d60b6 100644
--- a/drivers/gpu/drm/i915/intel_pch.c
+++ b/drivers/gpu/drm/i915/intel_pch.c
@@ -143,8 +143,9 @@ static bool intel_is_virt_pch(unsigned short id,
sdevice == PCI_SUBDEVICE_ID_QEMU));
}
-static unsigned short
-intel_virt_detect_pch(const struct drm_i915_private *dev_priv)
+static void
+intel_virt_detect_pch(const struct drm_i915_private *dev_priv,
+ unsigned short *pch_id, enum intel_pch *pch_type)
{
unsigned short id = 0;
@@ -181,12 +182,21 @@ intel_virt_detect_pch(const struct drm_i915_private *dev_priv)
else
drm_dbg_kms(&dev_priv->drm, "Assuming no PCH\n");
- return id;
+ *pch_type = intel_pch_type(dev_priv, id);
+
+ /* Sanity check virtual PCH id */
+ if (drm_WARN_ON(&dev_priv->drm,
+ id && *pch_type == PCH_NONE))
+ id = 0;
+
+ *pch_id = id;
}
void intel_detect_pch(struct drm_i915_private *dev_priv)
{
struct pci_dev *pch = NULL;
+ unsigned short id;
+ enum intel_pch pch_type;
/* DG1 has south engine display on the same PCI device */
if (IS_DG1(dev_priv)) {
@@ -206,9 +216,6 @@ void intel_detect_pch(struct drm_i915_private *dev_priv)
* of only checking the first one.
*/
while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
- unsigned short id;
- enum intel_pch pch_type;
-
if (pch->vendor != PCI_VENDOR_ID_INTEL)
continue;
@@ -221,14 +228,7 @@ void intel_detect_pch(struct drm_i915_private *dev_priv)
break;
} else if (intel_is_virt_pch(id, pch->subsystem_vendor,
pch->subsystem_device)) {
- id = intel_virt_detect_pch(dev_priv);
- pch_type = intel_pch_type(dev_priv, id);
-
- /* Sanity check virtual PCH id */
- if (drm_WARN_ON(&dev_priv->drm,
- id && pch_type == PCH_NONE))
- id = 0;
-
+ intel_virt_detect_pch(dev_priv, &id, &pch_type);
dev_priv->pch_type = pch_type;
dev_priv->pch_id = id;
break;
@@ -244,10 +244,15 @@ void intel_detect_pch(struct drm_i915_private *dev_priv)
"Display disabled, reverting to NOP PCH\n");
dev_priv->pch_type = PCH_NOP;
dev_priv->pch_id = 0;
+ } else if (!pch) {
+ if (run_as_guest() && HAS_DISPLAY(dev_priv)) {
+ intel_virt_detect_pch(dev_priv, &id, &pch_type);
+ dev_priv->pch_type = pch_type;
+ dev_priv->pch_id = id;
+ } else {
+ drm_dbg_kms(&dev_priv->drm, "No PCH found.\n");
+ }
}
- if (!pch)
- drm_dbg_kms(&dev_priv->drm, "No PCH found.\n");
-
pci_dev_put(pch);
}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index a20b5051f18c..0c3e63f27c29 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -82,24 +82,24 @@ static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
* Must match Sampler, Pixel Back End, and Media. See
* WaCompressedResourceSamplerPbeMediaNewHashMode.
*/
- I915_WRITE(CHICKEN_PAR1_1,
- I915_READ(CHICKEN_PAR1_1) |
+ intel_uncore_write(&dev_priv->uncore, CHICKEN_PAR1_1,
+ intel_uncore_read(&dev_priv->uncore, CHICKEN_PAR1_1) |
SKL_DE_COMPRESSED_HASH_MODE);
}
/* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl,cfl */
- I915_WRITE(CHICKEN_PAR1_1,
- I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
+ intel_uncore_write(&dev_priv->uncore, CHICKEN_PAR1_1,
+ intel_uncore_read(&dev_priv->uncore, CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
/* WaEnableChickenDCPR:skl,bxt,kbl,glk,cfl */
- I915_WRITE(GEN8_CHICKEN_DCPR_1,
- I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
+ intel_uncore_write(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1,
+ intel_uncore_read(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
/*
* WaFbcWakeMemOn:skl,bxt,kbl,glk,cfl
* Display WA #0859: skl,bxt,kbl,glk,cfl
*/
- I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
+ intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) |
DISP_FBC_MEMORY_WAKE);
}
@@ -108,21 +108,21 @@ static void bxt_init_clock_gating(struct drm_i915_private *dev_priv)
gen9_init_clock_gating(dev_priv);
/* WaDisableSDEUnitClockGating:bxt */
- I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
+ intel_uncore_write(&dev_priv->uncore, GEN8_UCGCTL6, intel_uncore_read(&dev_priv->uncore, GEN8_UCGCTL6) |
GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
/*
* FIXME:
* GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ applies on 3x6 GT SKUs only.
*/
- I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
+ intel_uncore_write(&dev_priv->uncore, GEN8_UCGCTL6, intel_uncore_read(&dev_priv->uncore, GEN8_UCGCTL6) |
GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ);
/*
* Wa: Backlight PWM may stop in the asserted state, causing backlight
* to stay fully on.
*/
- I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
+ intel_uncore_write(&dev_priv->uncore, GEN9_CLKGATE_DIS_0, intel_uncore_read(&dev_priv->uncore, GEN9_CLKGATE_DIS_0) |
PWM1_GATING_DIS | PWM2_GATING_DIS);
/*
@@ -131,20 +131,20 @@ static void bxt_init_clock_gating(struct drm_i915_private *dev_priv)
* is off and a MMIO access is attempted by any privilege
* application, using batch buffers or any other means.
*/
- I915_WRITE(RM_TIMEOUT, MMIO_TIMEOUT_US(950));
+ intel_uncore_write(&dev_priv->uncore, RM_TIMEOUT, MMIO_TIMEOUT_US(950));
/*
* WaFbcTurnOffFbcWatermark:bxt
* Display WA #0562: bxt
*/
- I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
+ intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) |
DISP_FBC_WM_DIS);
/*
* WaFbcHighMemBwCorruptionAvoidance:bxt
* Display WA #0883: bxt
*/
- I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
+ intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) |
ILK_DPFC_DISABLE_DUMMY0);
}
@@ -157,7 +157,7 @@ static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
* Backlight PWM may stop in the asserted state, causing backlight
* to stay fully on.
*/
- I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
+ intel_uncore_write(&dev_priv->uncore, GEN9_CLKGATE_DIS_0, intel_uncore_read(&dev_priv->uncore, GEN9_CLKGATE_DIS_0) |
PWM1_GATING_DIS | PWM2_GATING_DIS);
}
@@ -165,7 +165,7 @@ static void pnv_get_mem_freq(struct drm_i915_private *dev_priv)
{
u32 tmp;
- tmp = I915_READ(CLKCFG);
+ tmp = intel_uncore_read(&dev_priv->uncore, CLKCFG);
switch (tmp & CLKCFG_FSB_MASK) {
case CLKCFG_FSB_533:
@@ -195,7 +195,7 @@ static void pnv_get_mem_freq(struct drm_i915_private *dev_priv)
}
/* detect pineview DDR3 setting */
- tmp = I915_READ(CSHRDDR3CTL);
+ tmp = intel_uncore_read(&dev_priv->uncore, CSHRDDR3CTL);
dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
}
@@ -366,39 +366,39 @@ static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enabl
u32 val;
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- was_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
- I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
- POSTING_READ(FW_BLC_SELF_VLV);
+ was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
+ intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
+ intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF_VLV);
} else if (IS_G4X(dev_priv) || IS_I965GM(dev_priv)) {
- was_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
- I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
- POSTING_READ(FW_BLC_SELF);
+ was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
+ intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
+ intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF);
} else if (IS_PINEVIEW(dev_priv)) {
- val = I915_READ(DSPFW3);
+ val = intel_uncore_read(&dev_priv->uncore, DSPFW3);
was_enabled = val & PINEVIEW_SELF_REFRESH_EN;
if (enable)
val |= PINEVIEW_SELF_REFRESH_EN;
else
val &= ~PINEVIEW_SELF_REFRESH_EN;
- I915_WRITE(DSPFW3, val);
- POSTING_READ(DSPFW3);
+ intel_uncore_write(&dev_priv->uncore, DSPFW3, val);
+ intel_uncore_posting_read(&dev_priv->uncore, DSPFW3);
} else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) {
- was_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
+ was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
_MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
- I915_WRITE(FW_BLC_SELF, val);
- POSTING_READ(FW_BLC_SELF);
+ intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, val);
+ intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF);
} else if (IS_I915GM(dev_priv)) {
/*
* FIXME can't find a bit like this for 915G, and
* and yet it does have the related watermark in
* FW_BLC_SELF. What's going on?
*/
- was_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
+ was_enabled = intel_uncore_read(&dev_priv->uncore, INSTPM) & INSTPM_SELF_EN;
val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
_MASKED_BIT_DISABLE(INSTPM_SELF_EN);
- I915_WRITE(INSTPM, val);
- POSTING_READ(INSTPM);
+ intel_uncore_write(&dev_priv->uncore, INSTPM, val);
+ intel_uncore_posting_read(&dev_priv->uncore, INSTPM);
} else {
return false;
}
@@ -494,20 +494,20 @@ static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
switch (pipe) {
case PIPE_A:
- dsparb = I915_READ(DSPARB);
- dsparb2 = I915_READ(DSPARB2);
+ dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
+ dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0);
sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4);
break;
case PIPE_B:
- dsparb = I915_READ(DSPARB);
- dsparb2 = I915_READ(DSPARB2);
+ dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
+ dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8);
sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12);
break;
case PIPE_C:
- dsparb2 = I915_READ(DSPARB2);
- dsparb3 = I915_READ(DSPARB3);
+ dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
+ dsparb3 = intel_uncore_read(&dev_priv->uncore, DSPARB3);
sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16);
sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20);
break;
@@ -525,7 +525,7 @@ static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv,
enum i9xx_plane_id i9xx_plane)
{
- u32 dsparb = I915_READ(DSPARB);
+ u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
int size;
size = dsparb & 0x7f;
@@ -541,7 +541,7 @@ static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv,
static int i830_get_fifo_size(struct drm_i915_private *dev_priv,
enum i9xx_plane_id i9xx_plane)
{
- u32 dsparb = I915_READ(DSPARB);
+ u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
int size;
size = dsparb & 0x1ff;
@@ -558,7 +558,7 @@ static int i830_get_fifo_size(struct drm_i915_private *dev_priv,
static int i845_get_fifo_size(struct drm_i915_private *dev_priv,
enum i9xx_plane_id i9xx_plane)
{
- u32 dsparb = I915_READ(DSPARB);
+ u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
int size;
size = dsparb & 0x7f;
@@ -911,38 +911,38 @@ static void pnv_update_wm(struct intel_crtc *unused_crtc)
wm = intel_calculate_wm(clock, &pnv_display_wm,
pnv_display_wm.fifo_size,
cpp, latency->display_sr);
- reg = I915_READ(DSPFW1);
+ reg = intel_uncore_read(&dev_priv->uncore, DSPFW1);
reg &= ~DSPFW_SR_MASK;
reg |= FW_WM(wm, SR);
- I915_WRITE(DSPFW1, reg);
+ intel_uncore_write(&dev_priv->uncore, DSPFW1, reg);
drm_dbg_kms(&dev_priv->drm, "DSPFW1 register is %x\n", reg);
/* cursor SR */
wm = intel_calculate_wm(clock, &pnv_cursor_wm,
pnv_display_wm.fifo_size,
4, latency->cursor_sr);
- reg = I915_READ(DSPFW3);
+ reg = intel_uncore_read(&dev_priv->uncore, DSPFW3);
reg &= ~DSPFW_CURSOR_SR_MASK;
reg |= FW_WM(wm, CURSOR_SR);
- I915_WRITE(DSPFW3, reg);
+ intel_uncore_write(&dev_priv->uncore, DSPFW3, reg);
/* Display HPLL off SR */
wm = intel_calculate_wm(clock, &pnv_display_hplloff_wm,
pnv_display_hplloff_wm.fifo_size,
cpp, latency->display_hpll_disable);
- reg = I915_READ(DSPFW3);
+ reg = intel_uncore_read(&dev_priv->uncore, DSPFW3);
reg &= ~DSPFW_HPLL_SR_MASK;
reg |= FW_WM(wm, HPLL_SR);
- I915_WRITE(DSPFW3, reg);
+ intel_uncore_write(&dev_priv->uncore, DSPFW3, reg);
/* cursor HPLL off SR */
wm = intel_calculate_wm(clock, &pnv_cursor_hplloff_wm,
pnv_display_hplloff_wm.fifo_size,
4, latency->cursor_hpll_disable);
- reg = I915_READ(DSPFW3);
+ reg = intel_uncore_read(&dev_priv->uncore, DSPFW3);
reg &= ~DSPFW_HPLL_CURSOR_MASK;
reg |= FW_WM(wm, HPLL_CURSOR);
- I915_WRITE(DSPFW3, reg);
+ intel_uncore_write(&dev_priv->uncore, DSPFW3, reg);
drm_dbg_kms(&dev_priv->drm, "DSPFW3 register is %x\n", reg);
intel_set_memory_cxsr(dev_priv, true);
@@ -976,25 +976,25 @@ static void g4x_write_wm_values(struct drm_i915_private *dev_priv,
for_each_pipe(dev_priv, pipe)
trace_g4x_wm(intel_get_crtc_for_pipe(dev_priv, pipe), wm);
- I915_WRITE(DSPFW1,
+ intel_uncore_write(&dev_priv->uncore, DSPFW1,
FW_WM(wm->sr.plane, SR) |
FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
- I915_WRITE(DSPFW2,
+ intel_uncore_write(&dev_priv->uncore, DSPFW2,
(wm->fbc_en ? DSPFW_FBC_SR_EN : 0) |
FW_WM(wm->sr.fbc, FBC_SR) |
FW_WM(wm->hpll.fbc, FBC_HPLL_SR) |
FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEB) |
FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
- I915_WRITE(DSPFW3,
+ intel_uncore_write(&dev_priv->uncore, DSPFW3,
(wm->hpll_en ? DSPFW_HPLL_SR_EN : 0) |
FW_WM(wm->sr.cursor, CURSOR_SR) |
FW_WM(wm->hpll.cursor, HPLL_CURSOR) |
FW_WM(wm->hpll.plane, HPLL_SR));
- POSTING_READ(DSPFW1);
+ intel_uncore_posting_read(&dev_priv->uncore, DSPFW1);
}
#define FW_WM_VLV(value, plane) \
@@ -1008,7 +1008,7 @@ static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
for_each_pipe(dev_priv, pipe) {
trace_vlv_wm(intel_get_crtc_for_pipe(dev_priv, pipe), wm);
- I915_WRITE(VLV_DDL(pipe),
+ intel_uncore_write(&dev_priv->uncore, VLV_DDL(pipe),
(wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) |
(wm->ddl[pipe].plane[PLANE_SPRITE1] << DDL_SPRITE_SHIFT(1)) |
(wm->ddl[pipe].plane[PLANE_SPRITE0] << DDL_SPRITE_SHIFT(0)) |
@@ -1020,35 +1020,35 @@ static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
* high order bits so that there are no out of bounds values
* present in the registers during the reprogramming.
*/
- I915_WRITE(DSPHOWM, 0);
- I915_WRITE(DSPHOWM1, 0);
- I915_WRITE(DSPFW4, 0);
- I915_WRITE(DSPFW5, 0);
- I915_WRITE(DSPFW6, 0);
+ intel_uncore_write(&dev_priv->uncore, DSPHOWM, 0);
+ intel_uncore_write(&dev_priv->uncore, DSPHOWM1, 0);
+ intel_uncore_write(&dev_priv->uncore, DSPFW4, 0);
+ intel_uncore_write(&dev_priv->uncore, DSPFW5, 0);
+ intel_uncore_write(&dev_priv->uncore, DSPFW6, 0);
- I915_WRITE(DSPFW1,
+ intel_uncore_write(&dev_priv->uncore, DSPFW1,
FW_WM(wm->sr.plane, SR) |
FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
- I915_WRITE(DSPFW2,
+ intel_uncore_write(&dev_priv->uncore, DSPFW2,
FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1], SPRITEB) |
FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
- I915_WRITE(DSPFW3,
+ intel_uncore_write(&dev_priv->uncore, DSPFW3,
FW_WM(wm->sr.cursor, CURSOR_SR));
if (IS_CHERRYVIEW(dev_priv)) {
- I915_WRITE(DSPFW7_CHV,
+ intel_uncore_write(&dev_priv->uncore, DSPFW7_CHV,
FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
- I915_WRITE(DSPFW8_CHV,
+ intel_uncore_write(&dev_priv->uncore, DSPFW8_CHV,
FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE1], SPRITEF) |
FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE0], SPRITEE));
- I915_WRITE(DSPFW9_CHV,
+ intel_uncore_write(&dev_priv->uncore, DSPFW9_CHV,
FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_PRIMARY], PLANEC) |
FW_WM(wm->pipe[PIPE_C].plane[PLANE_CURSOR], CURSORC));
- I915_WRITE(DSPHOWM,
+ intel_uncore_write(&dev_priv->uncore, DSPHOWM,
FW_WM(wm->sr.plane >> 9, SR_HI) |
FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) |
FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0] >> 8, SPRITEE_HI) |
@@ -1060,10 +1060,10 @@ static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
} else {
- I915_WRITE(DSPFW7,
+ intel_uncore_write(&dev_priv->uncore, DSPFW7,
FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
- I915_WRITE(DSPHOWM,
+ intel_uncore_write(&dev_priv->uncore, DSPHOWM,
FW_WM(wm->sr.plane >> 9, SR_HI) |
FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
@@ -1073,7 +1073,7 @@ static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
}
- POSTING_READ(DSPFW1);
+ intel_uncore_posting_read(&dev_priv->uncore, DSPFW1);
}
#undef FW_WM_VLV
@@ -2310,14 +2310,14 @@ static void i965_update_wm(struct intel_crtc *unused_crtc)
srwm);
/* 965 has limitations... */
- I915_WRITE(DSPFW1, FW_WM(srwm, SR) |
+ intel_uncore_write(&dev_priv->uncore, DSPFW1, FW_WM(srwm, SR) |
FW_WM(8, CURSORB) |
FW_WM(8, PLANEB) |
FW_WM(8, PLANEA));
- I915_WRITE(DSPFW2, FW_WM(8, CURSORA) |
+ intel_uncore_write(&dev_priv->uncore, DSPFW2, FW_WM(8, CURSORA) |
FW_WM(8, PLANEC_OLD));
/* update cursor SR watermark */
- I915_WRITE(DSPFW3, FW_WM(cursor_sr, CURSOR_SR));
+ intel_uncore_write(&dev_priv->uncore, DSPFW3, FW_WM(cursor_sr, CURSOR_SR));
if (cxsr_enabled)
intel_set_memory_cxsr(dev_priv, true);
@@ -2447,10 +2447,10 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
srwm = 1;
if (IS_I945G(dev_priv) || IS_I945GM(dev_priv))
- I915_WRITE(FW_BLC_SELF,
+ intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF,
FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
else
- I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
+ intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, srwm & 0x3f);
}
drm_dbg_kms(&dev_priv->drm,
@@ -2464,8 +2464,8 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
fwater_hi = fwater_hi | (1 << 8);
- I915_WRITE(FW_BLC, fwater_lo);
- I915_WRITE(FW_BLC2, fwater_hi);
+ intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo);
+ intel_uncore_write(&dev_priv->uncore, FW_BLC2, fwater_hi);
if (enabled)
intel_set_memory_cxsr(dev_priv, true);
@@ -2488,13 +2488,13 @@ static void i845_update_wm(struct intel_crtc *unused_crtc)
&i845_wm_info,
dev_priv->display.get_fifo_size(dev_priv, PLANE_A),
4, pessimal_latency_ns);
- fwater_lo = I915_READ(FW_BLC) & ~0xfff;
+ fwater_lo = intel_uncore_read(&dev_priv->uncore, FW_BLC) & ~0xfff;
fwater_lo |= (3<<8) | planea_wm;
drm_dbg_kms(&dev_priv->drm,
"Setting FIFO watermarks - A: %d\n", planea_wm);
- I915_WRITE(FW_BLC, fwater_lo);
+ intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo);
}
/* latency must be in 0.1us units. */
@@ -2930,7 +2930,7 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
* any underrun. If not able to get Dimm info assume 16GB dimm
* to avoid any underrun.
*/
- if (dev_priv->dram_info.is_16gb_dimm)
+ if (dev_priv->dram_info.wm_lv_0_adjust_needed)
wm[0] += 1;
} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
@@ -3534,17 +3534,17 @@ static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) {
previous->wm_lp[2] &= ~WM1_LP_SR_EN;
- I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]);
+ intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, previous->wm_lp[2]);
changed = true;
}
if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) {
previous->wm_lp[1] &= ~WM1_LP_SR_EN;
- I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]);
+ intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, previous->wm_lp[1]);
changed = true;
}
if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) {
previous->wm_lp[0] &= ~WM1_LP_SR_EN;
- I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]);
+ intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, previous->wm_lp[0]);
changed = true;
}
@@ -3574,56 +3574,56 @@ static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
_ilk_disable_lp_wm(dev_priv, dirty);
if (dirty & WM_DIRTY_PIPE(PIPE_A))
- I915_WRITE(WM0_PIPE_ILK(PIPE_A), results->wm_pipe[0]);
+ intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_A), results->wm_pipe[0]);
if (dirty & WM_DIRTY_PIPE(PIPE_B))
- I915_WRITE(WM0_PIPE_ILK(PIPE_B), results->wm_pipe[1]);
+ intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_B), results->wm_pipe[1]);
if (dirty & WM_DIRTY_PIPE(PIPE_C))
- I915_WRITE(WM0_PIPE_ILK(PIPE_C), results->wm_pipe[2]);
+ intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_C), results->wm_pipe[2]);
if (dirty & WM_DIRTY_DDB) {
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- val = I915_READ(WM_MISC);
+ val = intel_uncore_read(&dev_priv->uncore, WM_MISC);
if (results->partitioning == INTEL_DDB_PART_1_2)
val &= ~WM_MISC_DATA_PARTITION_5_6;
else
val |= WM_MISC_DATA_PARTITION_5_6;
- I915_WRITE(WM_MISC, val);
+ intel_uncore_write(&dev_priv->uncore, WM_MISC, val);
} else {
- val = I915_READ(DISP_ARB_CTL2);
+ val = intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL2);
if (results->partitioning == INTEL_DDB_PART_1_2)
val &= ~DISP_DATA_PARTITION_5_6;
else
val |= DISP_DATA_PARTITION_5_6;
- I915_WRITE(DISP_ARB_CTL2, val);
+ intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL2, val);
}
}
if (dirty & WM_DIRTY_FBC) {
- val = I915_READ(DISP_ARB_CTL);
+ val = intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL);
if (results->enable_fbc_wm)
val &= ~DISP_FBC_WM_DIS;
else
val |= DISP_FBC_WM_DIS;
- I915_WRITE(DISP_ARB_CTL, val);
+ intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, val);
}
if (dirty & WM_DIRTY_LP(1) &&
previous->wm_lp_spr[0] != results->wm_lp_spr[0])
- I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
+ intel_uncore_write(&dev_priv->uncore, WM1S_LP_ILK, results->wm_lp_spr[0]);
if (INTEL_GEN(dev_priv) >= 7) {
if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
- I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
+ intel_uncore_write(&dev_priv->uncore, WM2S_LP_IVB, results->wm_lp_spr[1]);
if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
- I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
+ intel_uncore_write(&dev_priv->uncore, WM3S_LP_IVB, results->wm_lp_spr[2]);
}
if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
- I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
+ intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, results->wm_lp[0]);
if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
- I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
+ intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, results->wm_lp[1]);
if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
- I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
+ intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, results->wm_lp[2]);
dev_priv->wm.hw = *results;
}
@@ -3640,7 +3640,7 @@ u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *dev_priv)
u8 enabled_slices_mask = 0;
for (i = 0; i < max_slices; i++) {
- if (I915_READ(DBUF_CTL_S(i)) & DBUF_POWER_STATE)
+ if (intel_uncore_read(&dev_priv->uncore, DBUF_CTL_S(i)) & DBUF_POWER_STATE)
enabled_slices_mask |= BIT(i);
}
@@ -4017,43 +4017,48 @@ static int intel_compute_sagv_mask(struct intel_atomic_state *state)
return 0;
}
-/*
- * Calculate initial DBuf slice offset, based on slice size
- * and mask(i.e if slice size is 1024 and second slice is enabled
- * offset would be 1024)
- */
-static unsigned int
-icl_get_first_dbuf_slice_offset(u32 dbuf_slice_mask,
- u32 slice_size,
- u32 ddb_size)
+static int intel_dbuf_size(struct drm_i915_private *dev_priv)
{
- unsigned int offset = 0;
+ int ddb_size = INTEL_INFO(dev_priv)->ddb_size;
- if (!dbuf_slice_mask)
- return 0;
+ drm_WARN_ON(&dev_priv->drm, ddb_size == 0);
- offset = (ffs(dbuf_slice_mask) - 1) * slice_size;
+ if (INTEL_GEN(dev_priv) < 11)
+ return ddb_size - 4; /* 4 blocks for bypass path allocation */
- WARN_ON(offset >= ddb_size);
- return offset;
+ return ddb_size;
}
-u16 intel_get_ddb_size(struct drm_i915_private *dev_priv)
+static int intel_dbuf_slice_size(struct drm_i915_private *dev_priv)
{
- u16 ddb_size = INTEL_INFO(dev_priv)->ddb_size;
- drm_WARN_ON(&dev_priv->drm, ddb_size == 0);
+ return intel_dbuf_size(dev_priv) /
+ INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
+}
- if (INTEL_GEN(dev_priv) < 11)
- return ddb_size - 4; /* 4 blocks for bypass path allocation */
+static void
+skl_ddb_entry_for_slices(struct drm_i915_private *dev_priv, u8 slice_mask,
+ struct skl_ddb_entry *ddb)
+{
+ int slice_size = intel_dbuf_slice_size(dev_priv);
- return ddb_size;
+ if (!slice_mask) {
+ ddb->start = 0;
+ ddb->end = 0;
+ return;
+ }
+
+ ddb->start = (ffs(slice_mask) - 1) * slice_size;
+ ddb->end = fls(slice_mask) * slice_size;
+
+ WARN_ON(ddb->start >= ddb->end);
+ WARN_ON(ddb->end > intel_dbuf_size(dev_priv));
}
u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *dev_priv,
const struct skl_ddb_entry *entry)
{
u32 slice_mask = 0;
- u16 ddb_size = intel_get_ddb_size(dev_priv);
+ u16 ddb_size = intel_dbuf_size(dev_priv);
u16 num_supported_slices = INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
u16 slice_size = ddb_size / num_supported_slices;
u16 start_slice;
@@ -4077,116 +4082,40 @@ u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *dev_priv,
return slice_mask;
}
-static u8 skl_compute_dbuf_slices(const struct intel_crtc_state *crtc_state,
- u8 active_pipes);
-
-static int
-skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
- const struct intel_crtc_state *crtc_state,
- const u64 total_data_rate,
- struct skl_ddb_entry *alloc, /* out */
- int *num_active /* out */)
-{
- struct drm_atomic_state *state = crtc_state->uapi.state;
- struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
- struct drm_crtc *for_crtc = crtc_state->uapi.crtc;
- const struct intel_crtc *crtc;
- u32 pipe_width = 0, total_width_in_range = 0, width_before_pipe_in_range = 0;
- enum pipe for_pipe = to_intel_crtc(for_crtc)->pipe;
- struct intel_dbuf_state *new_dbuf_state =
- intel_atomic_get_new_dbuf_state(intel_state);
- const struct intel_dbuf_state *old_dbuf_state =
- intel_atomic_get_old_dbuf_state(intel_state);
- u8 active_pipes = new_dbuf_state->active_pipes;
- u16 ddb_size;
- u32 ddb_range_size;
- u32 i;
- u32 dbuf_slice_mask;
- u32 offset;
- u32 slice_size;
- u32 total_slice_mask;
- u32 start, end;
- int ret;
-
- *num_active = hweight8(active_pipes);
-
- if (!crtc_state->hw.active) {
- alloc->start = 0;
- alloc->end = 0;
- return 0;
- }
-
- ddb_size = intel_get_ddb_size(dev_priv);
-
- slice_size = ddb_size / INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
+static unsigned int intel_crtc_ddb_weight(const struct intel_crtc_state *crtc_state)
+{
+ const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
+ int hdisplay, vdisplay;
- /*
- * If the state doesn't change the active CRTC's or there is no
- * modeset request, then there's no need to recalculate;
- * the existing pipe allocation limits should remain unchanged.
- * Note that we're safe from racing commits since any racing commit
- * that changes the active CRTC list or do modeset would need to
- * grab _all_ crtc locks, including the one we currently hold.
- */
- if (old_dbuf_state->active_pipes == new_dbuf_state->active_pipes &&
- !dev_priv->wm.distrust_bios_wm) {
- /*
- * alloc may be cleared by clear_intel_crtc_state,
- * copy from old state to be sure
- *
- * FIXME get rid of this mess
- */
- *alloc = to_intel_crtc_state(for_crtc->state)->wm.skl.ddb;
+ if (!crtc_state->hw.active)
return 0;
- }
-
- /*
- * Get allowed DBuf slices for correspondent pipe and platform.
- */
- dbuf_slice_mask = skl_compute_dbuf_slices(crtc_state, active_pipes);
-
- /*
- * Figure out at which DBuf slice we start, i.e if we start at Dbuf S2
- * and slice size is 1024, the offset would be 1024
- */
- offset = icl_get_first_dbuf_slice_offset(dbuf_slice_mask,
- slice_size, ddb_size);
-
- /*
- * Figure out total size of allowed DBuf slices, which is basically
- * a number of allowed slices for that pipe multiplied by slice size.
- * Inside of this
- * range ddb entries are still allocated in proportion to display width.
- */
- ddb_range_size = hweight8(dbuf_slice_mask) * slice_size;
/*
* Watermark/ddb requirement highly depends upon width of the
* framebuffer, So instead of allocating DDB equally among pipes
* distribute DDB based on resolution/width of the display.
*/
- total_slice_mask = dbuf_slice_mask;
- for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
- const struct drm_display_mode *pipe_mode =
- &crtc_state->hw.pipe_mode;
- enum pipe pipe = crtc->pipe;
- int hdisplay, vdisplay;
- u32 pipe_dbuf_slice_mask;
+ drm_mode_get_hv_timing(pipe_mode, &hdisplay, &vdisplay);
- if (!crtc_state->hw.active)
- continue;
+ return hdisplay;
+}
- pipe_dbuf_slice_mask = skl_compute_dbuf_slices(crtc_state,
- active_pipes);
+static void intel_crtc_dbuf_weights(const struct intel_dbuf_state *dbuf_state,
+ enum pipe for_pipe,
+ unsigned int *weight_start,
+ unsigned int *weight_end,
+ unsigned int *weight_total)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(dbuf_state->base.state->base.dev);
+ enum pipe pipe;
- /*
- * According to BSpec pipe can share one dbuf slice with another
- * pipes or pipe can use multiple dbufs, in both cases we
- * account for other pipes only if they have exactly same mask.
- * However we need to account how many slices we should enable
- * in total.
- */
- total_slice_mask |= pipe_dbuf_slice_mask;
+ *weight_start = 0;
+ *weight_end = 0;
+ *weight_total = 0;
+
+ for_each_pipe(dev_priv, pipe) {
+ int weight = dbuf_state->weight[pipe];
/*
* Do not account pipes using other slice sets
@@ -4195,42 +4124,78 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
* i.e no partial intersection), so it is enough to check for
* equality for now.
*/
- if (dbuf_slice_mask != pipe_dbuf_slice_mask)
+ if (dbuf_state->slices[pipe] != dbuf_state->slices[for_pipe])
continue;
- drm_mode_get_hv_timing(pipe_mode, &hdisplay, &vdisplay);
+ *weight_total += weight;
+ if (pipe < for_pipe) {
+ *weight_start += weight;
+ *weight_end += weight;
+ } else if (pipe == for_pipe) {
+ *weight_end += weight;
+ }
+ }
+}
- total_width_in_range += hdisplay;
+static int
+skl_crtc_allocate_ddb(struct intel_atomic_state *state, struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ unsigned int weight_total, weight_start, weight_end;
+ const struct intel_dbuf_state *old_dbuf_state =
+ intel_atomic_get_old_dbuf_state(state);
+ struct intel_dbuf_state *new_dbuf_state =
+ intel_atomic_get_new_dbuf_state(state);
+ struct intel_crtc_state *crtc_state;
+ struct skl_ddb_entry ddb_slices;
+ enum pipe pipe = crtc->pipe;
+ u32 ddb_range_size;
+ u32 dbuf_slice_mask;
+ u32 start, end;
+ int ret;
- if (pipe < for_pipe)
- width_before_pipe_in_range += hdisplay;
- else if (pipe == for_pipe)
- pipe_width = hdisplay;
+ if (new_dbuf_state->weight[pipe] == 0) {
+ new_dbuf_state->ddb[pipe].start = 0;
+ new_dbuf_state->ddb[pipe].end = 0;
+ goto out;
}
- /*
- * FIXME: For now we always enable slice S1 as per
- * the Bspec display initialization sequence.
- */
- new_dbuf_state->enabled_slices = total_slice_mask | BIT(DBUF_S1);
+ dbuf_slice_mask = new_dbuf_state->slices[pipe];
- if (old_dbuf_state->enabled_slices != new_dbuf_state->enabled_slices) {
- ret = intel_atomic_serialize_global_state(&new_dbuf_state->base);
- if (ret)
- return ret;
- }
+ skl_ddb_entry_for_slices(dev_priv, dbuf_slice_mask, &ddb_slices);
+ ddb_range_size = skl_ddb_entry_size(&ddb_slices);
+
+ intel_crtc_dbuf_weights(new_dbuf_state, pipe,
+ &weight_start, &weight_end, &weight_total);
+
+ start = ddb_range_size * weight_start / weight_total;
+ end = ddb_range_size * weight_end / weight_total;
+
+ new_dbuf_state->ddb[pipe].start = ddb_slices.start + start;
+ new_dbuf_state->ddb[pipe].end = ddb_slices.start + end;
+
+out:
+ if (skl_ddb_entry_equal(&old_dbuf_state->ddb[pipe],
+ &new_dbuf_state->ddb[pipe]))
+ return 0;
+
+ ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
+ if (ret)
+ return ret;
- start = ddb_range_size * width_before_pipe_in_range / total_width_in_range;
- end = ddb_range_size *
- (width_before_pipe_in_range + pipe_width) / total_width_in_range;
+ crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
- alloc->start = offset + start;
- alloc->end = offset + end;
+ crtc_state->wm.skl.ddb = new_dbuf_state->ddb[pipe];
drm_dbg_kms(&dev_priv->drm,
- "[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x\n",
- for_crtc->base.id, for_crtc->name,
- dbuf_slice_mask, alloc->start, alloc->end, active_pipes);
+ "[CRTC:%d:%s] dbuf slices 0x%x -> 0x%x, ddb (%d - %d) -> (%d - %d), active pipes 0x%x -> 0x%x\n",
+ crtc->base.base.id, crtc->base.name,
+ old_dbuf_state->slices[pipe], new_dbuf_state->slices[pipe],
+ old_dbuf_state->ddb[pipe].start, old_dbuf_state->ddb[pipe].end,
+ new_dbuf_state->ddb[pipe].start, new_dbuf_state->ddb[pipe].end,
+ old_dbuf_state->active_pipes, new_dbuf_state->active_pipes);
return 0;
}
@@ -4300,12 +4265,12 @@ skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv,
/* Cursor doesn't support NV12/planar, so no extra calculation needed */
if (plane_id == PLANE_CURSOR) {
- val = I915_READ(CUR_BUF_CFG(pipe));
+ val = intel_uncore_read(&dev_priv->uncore, CUR_BUF_CFG(pipe));
skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
return;
}
- val = I915_READ(PLANE_CTL(pipe, plane_id));
+ val = intel_uncore_read(&dev_priv->uncore, PLANE_CTL(pipe, plane_id));
/* No DDB allocated for disabled planes */
if (val & PLANE_CTL_ENABLE)
@@ -4314,11 +4279,11 @@ skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv,
val & PLANE_CTL_ALPHA_MASK);
if (INTEL_GEN(dev_priv) >= 11) {
- val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
+ val = intel_uncore_read(&dev_priv->uncore, PLANE_BUF_CFG(pipe, plane_id));
skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
} else {
- val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
- val2 = I915_READ(PLANE_NV12_BUF_CFG(pipe, plane_id));
+ val = intel_uncore_read(&dev_priv->uncore, PLANE_BUF_CFG(pipe, plane_id));
+ val2 = intel_uncore_read(&dev_priv->uncore, PLANE_NV12_BUF_CFG(pipe, plane_id));
if (fourcc &&
drm_format_info_is_yuv_semiplanar(drm_format_info(fourcc)))
@@ -4632,10 +4597,8 @@ static u8 tgl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes)
return compute_dbuf_slices(pipe, active_pipes, tgl_allowed_dbufs);
}
-static u8 skl_compute_dbuf_slices(const struct intel_crtc_state *crtc_state,
- u8 active_pipes)
+static u8 skl_compute_dbuf_slices(struct intel_crtc *crtc, u8 active_pipes)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
@@ -4798,55 +4761,30 @@ skl_plane_wm_level(const struct intel_crtc_state *crtc_state,
}
static int
-skl_allocate_pipe_ddb(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+skl_allocate_plane_ddb(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct skl_ddb_entry *alloc = &crtc_state->wm.skl.ddb;
+ const struct intel_dbuf_state *dbuf_state =
+ intel_atomic_get_new_dbuf_state(state);
+ const struct skl_ddb_entry *alloc = &dbuf_state->ddb[crtc->pipe];
+ int num_active = hweight8(dbuf_state->active_pipes);
u16 alloc_size, start = 0;
u16 total[I915_MAX_PLANES] = {};
u16 uv_total[I915_MAX_PLANES] = {};
u64 total_data_rate;
enum plane_id plane_id;
- int num_active;
u32 blocks;
int level;
- int ret;
/* Clear the partitioning for disabled planes. */
memset(crtc_state->wm.skl.plane_ddb_y, 0, sizeof(crtc_state->wm.skl.plane_ddb_y));
memset(crtc_state->wm.skl.plane_ddb_uv, 0, sizeof(crtc_state->wm.skl.plane_ddb_uv));
- if (!crtc_state->hw.active) {
- struct intel_atomic_state *state =
- to_intel_atomic_state(crtc_state->uapi.state);
- struct intel_dbuf_state *new_dbuf_state =
- intel_atomic_get_new_dbuf_state(state);
- const struct intel_dbuf_state *old_dbuf_state =
- intel_atomic_get_old_dbuf_state(state);
-
- /*
- * FIXME hack to make sure we compute this sensibly when
- * turning off all the pipes. Otherwise we leave it at
- * whatever we had previously, and then runtime PM will
- * mess it up by turning off all but S1. Remove this
- * once the dbuf state computation flow becomes sane.
- */
- if (new_dbuf_state->active_pipes == 0) {
- new_dbuf_state->enabled_slices = BIT(DBUF_S1);
-
- if (old_dbuf_state->enabled_slices != new_dbuf_state->enabled_slices) {
- ret = intel_atomic_serialize_global_state(&new_dbuf_state->base);
- if (ret)
- return ret;
- }
- }
-
- alloc->start = alloc->end = 0;
+ if (!crtc_state->hw.active)
return 0;
- }
if (INTEL_GEN(dev_priv) >= 11)
total_data_rate =
@@ -4855,12 +4793,6 @@ skl_allocate_pipe_ddb(struct intel_atomic_state *state,
total_data_rate =
skl_get_total_relative_data_rate(state, crtc);
- ret = skl_ddb_get_pipe_allocation_limits(dev_priv, crtc_state,
- total_data_rate,
- alloc, &num_active);
- if (ret)
- return ret;
-
alloc_size = skl_ddb_entry_size(alloc);
if (alloc_size == 0)
return 0;
@@ -5731,6 +5663,18 @@ static bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
return a->start < b->end && b->start < a->end;
}
+static void skl_ddb_entry_union(struct skl_ddb_entry *a,
+ const struct skl_ddb_entry *b)
+{
+ if (a->end && b->end) {
+ a->start = min(a->start, b->start);
+ a->end = max(a->end, b->end);
+ } else if (b->end) {
+ a->start = b->start;
+ a->end = b->end;
+ }
+}
+
bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
const struct skl_ddb_entry *entries,
int num_entries, int ignore_idx)
@@ -5775,39 +5719,114 @@ skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state,
return 0;
}
+static u8 intel_dbuf_enabled_slices(const struct intel_dbuf_state *dbuf_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(dbuf_state->base.state->base.dev);
+ u8 enabled_slices;
+ enum pipe pipe;
+
+ /*
+ * FIXME: For now we always enable slice S1 as per
+ * the Bspec display initialization sequence.
+ */
+ enabled_slices = BIT(DBUF_S1);
+
+ for_each_pipe(dev_priv, pipe)
+ enabled_slices |= dbuf_state->slices[pipe];
+
+ return enabled_slices;
+}
+
static int
skl_compute_ddb(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
const struct intel_dbuf_state *old_dbuf_state;
- const struct intel_dbuf_state *new_dbuf_state;
+ struct intel_dbuf_state *new_dbuf_state = NULL;
const struct intel_crtc_state *old_crtc_state;
struct intel_crtc_state *new_crtc_state;
struct intel_crtc *crtc;
int ret, i;
- for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
- new_crtc_state, i) {
- ret = skl_allocate_pipe_ddb(state, crtc);
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ new_dbuf_state = intel_atomic_get_dbuf_state(state);
+ if (IS_ERR(new_dbuf_state))
+ return PTR_ERR(new_dbuf_state);
+
+ old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
+ break;
+ }
+
+ if (!new_dbuf_state)
+ return 0;
+
+ new_dbuf_state->active_pipes =
+ intel_calc_active_pipes(state, old_dbuf_state->active_pipes);
+
+ if (old_dbuf_state->active_pipes != new_dbuf_state->active_pipes) {
+ ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
if (ret)
return ret;
+ }
- ret = skl_ddb_add_affected_planes(old_crtc_state,
- new_crtc_state);
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ enum pipe pipe = crtc->pipe;
+
+ new_dbuf_state->slices[pipe] =
+ skl_compute_dbuf_slices(crtc, new_dbuf_state->active_pipes);
+
+ if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe])
+ continue;
+
+ ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
if (ret)
return ret;
}
- old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
- new_dbuf_state = intel_atomic_get_new_dbuf_state(state);
+ new_dbuf_state->enabled_slices = intel_dbuf_enabled_slices(new_dbuf_state);
+
+ if (old_dbuf_state->enabled_slices != new_dbuf_state->enabled_slices) {
+ ret = intel_atomic_serialize_global_state(&new_dbuf_state->base);
+ if (ret)
+ return ret;
- if (new_dbuf_state &&
- new_dbuf_state->enabled_slices != old_dbuf_state->enabled_slices)
drm_dbg_kms(&dev_priv->drm,
"Enabled dbuf slices 0x%x -> 0x%x (out of %d dbuf slices)\n",
old_dbuf_state->enabled_slices,
new_dbuf_state->enabled_slices,
INTEL_INFO(dev_priv)->num_supported_dbuf_slices);
+ }
+
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ enum pipe pipe = crtc->pipe;
+
+ new_dbuf_state->weight[pipe] = intel_crtc_ddb_weight(new_crtc_state);
+
+ if (old_dbuf_state->weight[pipe] == new_dbuf_state->weight[pipe])
+ continue;
+
+ ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
+ if (ret)
+ return ret;
+ }
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ ret = skl_crtc_allocate_ddb(state, crtc);
+ if (ret)
+ return ret;
+ }
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ ret = skl_allocate_plane_ddb(state, crtc);
+ if (ret)
+ return ret;
+
+ ret = skl_ddb_add_affected_planes(old_crtc_state,
+ new_crtc_state);
+ if (ret)
+ return ret;
+ }
return 0;
}
@@ -5944,83 +5963,6 @@ skl_print_wm_changes(struct intel_atomic_state *state)
}
}
-static int intel_add_affected_pipes(struct intel_atomic_state *state,
- u8 pipe_mask)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct intel_crtc *crtc;
-
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- struct intel_crtc_state *crtc_state;
-
- if ((pipe_mask & BIT(crtc->pipe)) == 0)
- continue;
-
- crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
- if (IS_ERR(crtc_state))
- return PTR_ERR(crtc_state);
- }
-
- return 0;
-}
-
-static int
-skl_ddb_add_affected_pipes(struct intel_atomic_state *state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct intel_crtc_state *crtc_state;
- struct intel_crtc *crtc;
- int i, ret;
-
- if (dev_priv->wm.distrust_bios_wm) {
- /*
- * skl_ddb_get_pipe_allocation_limits() currently requires
- * all active pipes to be included in the state so that
- * it can redistribute the dbuf among them, and it really
- * wants to recompute things when distrust_bios_wm is set
- * so we add all the pipes to the state.
- */
- ret = intel_add_affected_pipes(state, ~0);
- if (ret)
- return ret;
- }
-
- for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
- struct intel_dbuf_state *new_dbuf_state;
- const struct intel_dbuf_state *old_dbuf_state;
-
- new_dbuf_state = intel_atomic_get_dbuf_state(state);
- if (IS_ERR(new_dbuf_state))
- return PTR_ERR(new_dbuf_state);
-
- old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
-
- new_dbuf_state->active_pipes =
- intel_calc_active_pipes(state, old_dbuf_state->active_pipes);
-
- if (old_dbuf_state->active_pipes == new_dbuf_state->active_pipes)
- break;
-
- ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
- if (ret)
- return ret;
-
- /*
- * skl_ddb_get_pipe_allocation_limits() currently requires
- * all active pipes to be included in the state so that
- * it can redistribute the dbuf among them.
- */
- ret = intel_add_affected_pipes(state,
- new_dbuf_state->active_pipes);
- if (ret)
- return ret;
-
- break;
- }
-
- return 0;
-}
-
/*
* To make sure the cursor watermark registers are always consistent
* with our computed state the following scenario needs special
@@ -6088,15 +6030,6 @@ skl_compute_wm(struct intel_atomic_state *state)
struct intel_crtc_state *new_crtc_state;
int ret, i;
- ret = skl_ddb_add_affected_pipes(state);
- if (ret)
- return ret;
-
- /*
- * Calculate WM's for all pipes that are part of this transaction.
- * Note that skl_ddb_add_affected_pipes may have added more CRTC's that
- * weren't otherwise being modified if pipe allocations had to change.
- */
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
ret = skl_build_pipe_wm(state, crtc);
if (ret)
@@ -6231,9 +6164,9 @@ void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
for (level = 0; level <= max_level; level++) {
if (plane_id != PLANE_CURSOR)
- val = I915_READ(PLANE_WM(pipe, plane_id, level));
+ val = intel_uncore_read(&dev_priv->uncore, PLANE_WM(pipe, plane_id, level));
else
- val = I915_READ(CUR_WM(pipe, level));
+ val = intel_uncore_read(&dev_priv->uncore, CUR_WM(pipe, level));
skl_wm_level_from_reg_val(val, &wm->wm[level]);
}
@@ -6242,9 +6175,9 @@ void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
wm->sagv_wm0 = wm->wm[0];
if (plane_id != PLANE_CURSOR)
- val = I915_READ(PLANE_WM_TRANS(pipe, plane_id));
+ val = intel_uncore_read(&dev_priv->uncore, PLANE_WM_TRANS(pipe, plane_id));
else
- val = I915_READ(CUR_WM_TRANS(pipe));
+ val = intel_uncore_read(&dev_priv->uncore, CUR_WM_TRANS(pipe));
skl_wm_level_from_reg_val(val, &wm->trans_wm);
}
@@ -6255,20 +6188,49 @@ void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
{
+ struct intel_dbuf_state *dbuf_state =
+ to_intel_dbuf_state(dev_priv->dbuf.obj.state);
struct intel_crtc *crtc;
- struct intel_crtc_state *crtc_state;
for_each_intel_crtc(&dev_priv->drm, crtc) {
- crtc_state = to_intel_crtc_state(crtc->base.state);
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ enum pipe pipe = crtc->pipe;
+ enum plane_id plane_id;
skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal);
crtc_state->wm.skl.raw = crtc_state->wm.skl.optimal;
- }
- if (dev_priv->active_pipes) {
- /* Fully recompute DDB on first atomic commit */
- dev_priv->wm.distrust_bios_wm = true;
+ memset(&dbuf_state->ddb[pipe], 0, sizeof(dbuf_state->ddb[pipe]));
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ struct skl_ddb_entry *ddb_y =
+ &crtc_state->wm.skl.plane_ddb_y[plane_id];
+ struct skl_ddb_entry *ddb_uv =
+ &crtc_state->wm.skl.plane_ddb_uv[plane_id];
+
+ skl_ddb_get_hw_plane_state(dev_priv, crtc->pipe,
+ plane_id, ddb_y, ddb_uv);
+
+ skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb_y);
+ skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb_uv);
+ }
+
+ dbuf_state->slices[pipe] =
+ skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes);
+
+ dbuf_state->weight[pipe] = intel_crtc_ddb_weight(crtc_state);
+
+ crtc_state->wm.skl.ddb = dbuf_state->ddb[pipe];
+
+ drm_dbg_kms(&dev_priv->drm,
+ "[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x\n",
+ crtc->base.base.id, crtc->base.name,
+ dbuf_state->slices[pipe], dbuf_state->ddb[pipe].start,
+ dbuf_state->ddb[pipe].end, dbuf_state->active_pipes);
}
+
+ dbuf_state->enabled_slices = dev_priv->dbuf.enabled_slices;
}
static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
@@ -6280,7 +6242,7 @@ static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
struct intel_pipe_wm *active = &crtc_state->wm.ilk.optimal;
enum pipe pipe = crtc->pipe;
- hw->wm_pipe[pipe] = I915_READ(WM0_PIPE_ILK(pipe));
+ hw->wm_pipe[pipe] = intel_uncore_read(&dev_priv->uncore, WM0_PIPE_ILK(pipe));
memset(active, 0, sizeof(*active));
@@ -6324,13 +6286,13 @@ static void g4x_read_wm_values(struct drm_i915_private *dev_priv,
{
u32 tmp;
- tmp = I915_READ(DSPFW1);
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW1);
wm->sr.plane = _FW_WM(tmp, SR);
wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEB);
wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEA);
- tmp = I915_READ(DSPFW2);
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW2);
wm->fbc_en = tmp & DSPFW_FBC_SR_EN;
wm->sr.fbc = _FW_WM(tmp, FBC_SR);
wm->hpll.fbc = _FW_WM(tmp, FBC_HPLL_SR);
@@ -6338,7 +6300,7 @@ static void g4x_read_wm_values(struct drm_i915_private *dev_priv,
wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEA);
- tmp = I915_READ(DSPFW3);
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW3);
wm->hpll_en = tmp & DSPFW_HPLL_SR_EN;
wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
wm->hpll.cursor = _FW_WM(tmp, HPLL_CURSOR);
@@ -6352,7 +6314,7 @@ static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
u32 tmp;
for_each_pipe(dev_priv, pipe) {
- tmp = I915_READ(VLV_DDL(pipe));
+ tmp = intel_uncore_read(&dev_priv->uncore, VLV_DDL(pipe));
wm->ddl[pipe].plane[PLANE_PRIMARY] =
(tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
@@ -6364,34 +6326,34 @@ static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
(tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
}
- tmp = I915_READ(DSPFW1);
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW1);
wm->sr.plane = _FW_WM(tmp, SR);
wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEB);
wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEA);
- tmp = I915_READ(DSPFW2);
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW2);
wm->pipe[PIPE_A].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEB);
wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEA);
- tmp = I915_READ(DSPFW3);
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW3);
wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
if (IS_CHERRYVIEW(dev_priv)) {
- tmp = I915_READ(DSPFW7_CHV);
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW7_CHV);
wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
- tmp = I915_READ(DSPFW8_CHV);
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW8_CHV);
wm->pipe[PIPE_C].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEF);
wm->pipe[PIPE_C].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEE);
- tmp = I915_READ(DSPFW9_CHV);
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW9_CHV);
wm->pipe[PIPE_C].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEC);
wm->pipe[PIPE_C].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORC);
- tmp = I915_READ(DSPHOWM);
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPHOWM);
wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
wm->pipe[PIPE_C].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEF_HI) << 8;
wm->pipe[PIPE_C].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEE_HI) << 8;
@@ -6403,11 +6365,11 @@ static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
} else {
- tmp = I915_READ(DSPFW7);
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW7);
wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
- tmp = I915_READ(DSPHOWM);
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPHOWM);
wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
@@ -6428,7 +6390,7 @@ void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
g4x_read_wm_values(dev_priv, wm);
- wm->cxsr = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
+ wm->cxsr = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
for_each_intel_crtc(&dev_priv->drm, crtc) {
struct intel_crtc_state *crtc_state =
@@ -6572,7 +6534,7 @@ void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
vlv_read_wm_values(dev_priv, wm);
- wm->cxsr = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
+ wm->cxsr = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
wm->level = VLV_WM_LEVEL_PM2;
if (IS_CHERRYVIEW(dev_priv)) {
@@ -6719,9 +6681,9 @@ void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
*/
static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv)
{
- I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
- I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
- I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
+ intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, intel_uncore_read(&dev_priv->uncore, WM3_LP_ILK) & ~WM1_LP_SR_EN);
+ intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, intel_uncore_read(&dev_priv->uncore, WM2_LP_ILK) & ~WM1_LP_SR_EN);
+ intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, intel_uncore_read(&dev_priv->uncore, WM1_LP_ILK) & ~WM1_LP_SR_EN);
/*
* Don't touch WM1S_LP_EN here.
@@ -6739,25 +6701,25 @@ void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv)
for_each_intel_crtc(&dev_priv->drm, crtc)
ilk_pipe_wm_get_hw_state(crtc);
- hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
- hw->wm_lp[1] = I915_READ(WM2_LP_ILK);
- hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
+ hw->wm_lp[0] = intel_uncore_read(&dev_priv->uncore, WM1_LP_ILK);
+ hw->wm_lp[1] = intel_uncore_read(&dev_priv->uncore, WM2_LP_ILK);
+ hw->wm_lp[2] = intel_uncore_read(&dev_priv->uncore, WM3_LP_ILK);
- hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
+ hw->wm_lp_spr[0] = intel_uncore_read(&dev_priv->uncore, WM1S_LP_ILK);
if (INTEL_GEN(dev_priv) >= 7) {
- hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
- hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
+ hw->wm_lp_spr[1] = intel_uncore_read(&dev_priv->uncore, WM2S_LP_IVB);
+ hw->wm_lp_spr[2] = intel_uncore_read(&dev_priv->uncore, WM3S_LP_IVB);
}
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
+ hw->partitioning = (intel_uncore_read(&dev_priv->uncore, WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
else if (IS_IVYBRIDGE(dev_priv))
- hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
+ hw->partitioning = (intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
hw->enable_fbc_wm =
- !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
+ !(intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) & DISP_FBC_WM_DIS);
}
/**
@@ -6808,14 +6770,14 @@ void intel_enable_ipc(struct drm_i915_private *dev_priv)
if (!HAS_IPC(dev_priv))
return;
- val = I915_READ(DISP_ARB_CTL2);
+ val = intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL2);
if (dev_priv->ipc_enabled)
val |= DISP_IPC_ENABLE;
else
val &= ~DISP_IPC_ENABLE;
- I915_WRITE(DISP_ARB_CTL2, val);
+ intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL2, val);
}
static bool intel_can_enable_ipc(struct drm_i915_private *dev_priv)
@@ -6850,7 +6812,7 @@ static void ibx_init_clock_gating(struct drm_i915_private *dev_priv)
* gating for the panel power sequencer or it will fail to
* start up when no ports are active.
*/
- I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
+ intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
}
static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv)
@@ -6858,12 +6820,12 @@ static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv)
enum pipe pipe;
for_each_pipe(dev_priv, pipe) {
- I915_WRITE(DSPCNTR(pipe),
- I915_READ(DSPCNTR(pipe)) |
+ intel_uncore_write(&dev_priv->uncore, DSPCNTR(pipe),
+ intel_uncore_read(&dev_priv->uncore, DSPCNTR(pipe)) |
DISPPLANE_TRICKLE_FEED_DISABLE);
- I915_WRITE(DSPSURF(pipe), I915_READ(DSPSURF(pipe)));
- POSTING_READ(DSPSURF(pipe));
+ intel_uncore_write(&dev_priv->uncore, DSPSURF(pipe), intel_uncore_read(&dev_priv->uncore, DSPSURF(pipe)));
+ intel_uncore_posting_read(&dev_priv->uncore, DSPSURF(pipe));
}
}
@@ -6879,10 +6841,10 @@ static void ilk_init_clock_gating(struct drm_i915_private *dev_priv)
ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
- I915_WRITE(PCH_3DCGDIS0,
+ intel_uncore_write(&dev_priv->uncore, PCH_3DCGDIS0,
MARIUNIT_CLOCK_GATE_DISABLE |
SVSMUNIT_CLOCK_GATE_DISABLE);
- I915_WRITE(PCH_3DCGDIS1,
+ intel_uncore_write(&dev_priv->uncore, PCH_3DCGDIS1,
VFMUNIT_CLOCK_GATE_DISABLE);
/*
@@ -6892,12 +6854,12 @@ static void ilk_init_clock_gating(struct drm_i915_private *dev_priv)
* The bit 5 of 0x42020
* The bit 15 of 0x45000
*/
- I915_WRITE(ILK_DISPLAY_CHICKEN2,
- (I915_READ(ILK_DISPLAY_CHICKEN2) |
+ intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2,
+ (intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2) |
ILK_DPARB_GATE | ILK_VSDPFD_FULL));
dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
- I915_WRITE(DISP_ARB_CTL,
- (I915_READ(DISP_ARB_CTL) |
+ intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL,
+ (intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) |
DISP_FBC_WM_DIS));
/*
@@ -6909,18 +6871,18 @@ static void ilk_init_clock_gating(struct drm_i915_private *dev_priv)
*/
if (IS_IRONLAKE_M(dev_priv)) {
/* WaFbcAsynchFlipDisableFbcQueue:ilk */
- I915_WRITE(ILK_DISPLAY_CHICKEN1,
- I915_READ(ILK_DISPLAY_CHICKEN1) |
+ intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1,
+ intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1) |
ILK_FBCQ_DIS);
- I915_WRITE(ILK_DISPLAY_CHICKEN2,
- I915_READ(ILK_DISPLAY_CHICKEN2) |
+ intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2,
+ intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2) |
ILK_DPARB_GATE);
}
- I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
+ intel_uncore_write(&dev_priv->uncore, ILK_DSPCLK_GATE_D, dspclk_gate);
- I915_WRITE(ILK_DISPLAY_CHICKEN2,
- I915_READ(ILK_DISPLAY_CHICKEN2) |
+ intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2,
+ intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2) |
ILK_ELPIN_409_SELECT);
g4x_disable_trickle_feed(dev_priv);
@@ -6938,27 +6900,27 @@ static void cpt_init_clock_gating(struct drm_i915_private *dev_priv)
* gating for the panel power sequencer or it will fail to
* start up when no ports are active.
*/
- I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
+ intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
PCH_DPLUNIT_CLOCK_GATE_DISABLE |
PCH_CPUNIT_CLOCK_GATE_DISABLE);
- I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
+ intel_uncore_write(&dev_priv->uncore, SOUTH_CHICKEN2, intel_uncore_read(&dev_priv->uncore, SOUTH_CHICKEN2) |
DPLS_EDP_PPS_FIX_DIS);
/* The below fixes the weird display corruption, a few pixels shifted
* downward, on (only) LVDS of some HP laptops with IVY.
*/
for_each_pipe(dev_priv, pipe) {
- val = I915_READ(TRANS_CHICKEN2(pipe));
+ val = intel_uncore_read(&dev_priv->uncore, TRANS_CHICKEN2(pipe));
val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
if (dev_priv->vbt.fdi_rx_polarity_inverted)
val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
- I915_WRITE(TRANS_CHICKEN2(pipe), val);
+ intel_uncore_write(&dev_priv->uncore, TRANS_CHICKEN2(pipe), val);
}
/* WADP0ClockGatingDisable */
for_each_pipe(dev_priv, pipe) {
- I915_WRITE(TRANS_CHICKEN1(pipe),
+ intel_uncore_write(&dev_priv->uncore, TRANS_CHICKEN1(pipe),
TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
}
}
@@ -6967,7 +6929,7 @@ static void gen6_check_mch_setup(struct drm_i915_private *dev_priv)
{
u32 tmp;
- tmp = I915_READ(MCH_SSKPD);
+ tmp = intel_uncore_read(&dev_priv->uncore, MCH_SSKPD);
if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL)
drm_dbg_kms(&dev_priv->drm,
"Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
@@ -6978,14 +6940,14 @@ static void gen6_init_clock_gating(struct drm_i915_private *dev_priv)
{
u32 dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
- I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
+ intel_uncore_write(&dev_priv->uncore, ILK_DSPCLK_GATE_D, dspclk_gate);
- I915_WRITE(ILK_DISPLAY_CHICKEN2,
- I915_READ(ILK_DISPLAY_CHICKEN2) |
+ intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2,
+ intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2) |
ILK_ELPIN_409_SELECT);
- I915_WRITE(GEN6_UCGCTL1,
- I915_READ(GEN6_UCGCTL1) |
+ intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL1,
+ intel_uncore_read(&dev_priv->uncore, GEN6_UCGCTL1) |
GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
GEN6_CSUNIT_CLOCK_GATE_DISABLE);
@@ -7002,7 +6964,7 @@ static void gen6_init_clock_gating(struct drm_i915_private *dev_priv)
* WaDisableRCCUnitClockGating:snb
* WaDisableRCPBUnitClockGating:snb
*/
- I915_WRITE(GEN6_UCGCTL2,
+ intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL2,
GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
@@ -7017,14 +6979,14 @@ static void gen6_init_clock_gating(struct drm_i915_private *dev_priv)
*
* WaFbcAsynchFlipDisableFbcQueue:snb
*/
- I915_WRITE(ILK_DISPLAY_CHICKEN1,
- I915_READ(ILK_DISPLAY_CHICKEN1) |
+ intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1,
+ intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1) |
ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
- I915_WRITE(ILK_DISPLAY_CHICKEN2,
- I915_READ(ILK_DISPLAY_CHICKEN2) |
+ intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2,
+ intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2) |
ILK_DPARB_GATE | ILK_VSDPFD_FULL);
- I915_WRITE(ILK_DSPCLK_GATE_D,
- I915_READ(ILK_DSPCLK_GATE_D) |
+ intel_uncore_write(&dev_priv->uncore, ILK_DSPCLK_GATE_D,
+ intel_uncore_read(&dev_priv->uncore, ILK_DSPCLK_GATE_D) |
ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
@@ -7042,23 +7004,23 @@ static void lpt_init_clock_gating(struct drm_i915_private *dev_priv)
* disabled when not needed anymore in order to save power.
*/
if (HAS_PCH_LPT_LP(dev_priv))
- I915_WRITE(SOUTH_DSPCLK_GATE_D,
- I915_READ(SOUTH_DSPCLK_GATE_D) |
+ intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D,
+ intel_uncore_read(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D) |
PCH_LP_PARTITION_LEVEL_DISABLE);
/* WADPOClockGatingDisable:hsw */
- I915_WRITE(TRANS_CHICKEN1(PIPE_A),
- I915_READ(TRANS_CHICKEN1(PIPE_A)) |
+ intel_uncore_write(&dev_priv->uncore, TRANS_CHICKEN1(PIPE_A),
+ intel_uncore_read(&dev_priv->uncore, TRANS_CHICKEN1(PIPE_A)) |
TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
}
static void lpt_suspend_hw(struct drm_i915_private *dev_priv)
{
if (HAS_PCH_LPT_LP(dev_priv)) {
- u32 val = I915_READ(SOUTH_DSPCLK_GATE_D);
+ u32 val = intel_uncore_read(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D);
val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
- I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
+ intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D, val);
}
}
@@ -7070,60 +7032,62 @@ static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
u32 val;
/* WaTempDisableDOPClkGating:bdw */
- misccpctl = I915_READ(GEN7_MISCCPCTL);
- I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
+ misccpctl = intel_uncore_read(&dev_priv->uncore, GEN7_MISCCPCTL);
+ intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
- val = I915_READ(GEN8_L3SQCREG1);
+ val = intel_uncore_read(&dev_priv->uncore, GEN8_L3SQCREG1);
val &= ~L3_PRIO_CREDITS_MASK;
val |= L3_GENERAL_PRIO_CREDITS(general_prio_credits);
val |= L3_HIGH_PRIO_CREDITS(high_prio_credits);
- I915_WRITE(GEN8_L3SQCREG1, val);
+ intel_uncore_write(&dev_priv->uncore, GEN8_L3SQCREG1, val);
/*
* Wait at least 100 clocks before re-enabling clock gating.
* See the definition of L3SQCREG1 in BSpec.
*/
- POSTING_READ(GEN8_L3SQCREG1);
+ intel_uncore_posting_read(&dev_priv->uncore, GEN8_L3SQCREG1);
udelay(1);
- I915_WRITE(GEN7_MISCCPCTL, misccpctl);
+ intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl);
}
static void icl_init_clock_gating(struct drm_i915_private *dev_priv)
{
/* Wa_1409120013:icl,ehl */
- I915_WRITE(ILK_DPFC_CHICKEN,
+ intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN,
ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL);
/* This is not an Wa. Enable to reduce Sampler power */
- I915_WRITE(GEN10_DFR_RATIO_EN_AND_CHICKEN,
- I915_READ(GEN10_DFR_RATIO_EN_AND_CHICKEN) & ~DFR_DISABLE);
+ intel_uncore_write(&dev_priv->uncore, GEN10_DFR_RATIO_EN_AND_CHICKEN,
+ intel_uncore_read(&dev_priv->uncore, GEN10_DFR_RATIO_EN_AND_CHICKEN) & ~DFR_DISABLE);
/*Wa_14010594013:icl, ehl */
intel_uncore_rmw(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1,
0, CNL_DELAY_PMRSP);
}
-static void tgl_init_clock_gating(struct drm_i915_private *dev_priv)
+static void gen12lp_init_clock_gating(struct drm_i915_private *dev_priv)
{
- /* Wa_1409120013:tgl */
- I915_WRITE(ILK_DPFC_CHICKEN,
- ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL);
+ /* Wa_1409120013:tgl,rkl,adl_s,dg1 */
+ intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN,
+ ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL);
/* Wa_1409825376:tgl (pre-prod)*/
if (IS_TGL_DISP_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_B1))
- I915_WRITE(GEN9_CLKGATE_DIS_3, I915_READ(GEN9_CLKGATE_DIS_3) |
+ intel_uncore_write(&dev_priv->uncore, GEN9_CLKGATE_DIS_3, intel_uncore_read(&dev_priv->uncore, GEN9_CLKGATE_DIS_3) |
TGL_VRH_GATING_DIS);
- /* Wa_14011059788:tgl */
+ /* Wa_14011059788:tgl,rkl,adl_s,dg1 */
intel_uncore_rmw(&dev_priv->uncore, GEN10_DFR_RATIO_EN_AND_CHICKEN,
0, DFR_DISABLE);
}
static void dg1_init_clock_gating(struct drm_i915_private *dev_priv)
{
+ gen12lp_init_clock_gating(dev_priv);
+
/* Wa_1409836686:dg1[a0] */
if (IS_DG1_REVID(dev_priv, DG1_REVID_A0, DG1_REVID_A0))
- I915_WRITE(GEN9_CLKGATE_DIS_3, I915_READ(GEN9_CLKGATE_DIS_3) |
+ intel_uncore_write(&dev_priv->uncore, GEN9_CLKGATE_DIS_3, intel_uncore_read(&dev_priv->uncore, GEN9_CLKGATE_DIS_3) |
DPT_GATING_DIS);
}
@@ -7133,7 +7097,7 @@ static void cnp_init_clock_gating(struct drm_i915_private *dev_priv)
return;
/* Display WA #1181 WaSouthDisplayDisablePWMCGEGating: cnp */
- I915_WRITE(SOUTH_DSPCLK_GATE_D, I915_READ(SOUTH_DSPCLK_GATE_D) |
+ intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D, intel_uncore_read(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D) |
CNP_PWM_CGE_GATING_DISABLE);
}
@@ -7143,35 +7107,35 @@ static void cnl_init_clock_gating(struct drm_i915_private *dev_priv)
cnp_init_clock_gating(dev_priv);
/* This is not an Wa. Enable for better image quality */
- I915_WRITE(_3D_CHICKEN3,
+ intel_uncore_write(&dev_priv->uncore, _3D_CHICKEN3,
_MASKED_BIT_ENABLE(_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE));
/* WaEnableChickenDCPR:cnl */
- I915_WRITE(GEN8_CHICKEN_DCPR_1,
- I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
+ intel_uncore_write(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1,
+ intel_uncore_read(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
/*
* WaFbcWakeMemOn:cnl
* Display WA #0859: cnl
*/
- I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
+ intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) |
DISP_FBC_MEMORY_WAKE);
- val = I915_READ(SLICE_UNIT_LEVEL_CLKGATE);
+ val = intel_uncore_read(&dev_priv->uncore, SLICE_UNIT_LEVEL_CLKGATE);
/* ReadHitWriteOnlyDisable:cnl */
val |= RCCUNIT_CLKGATE_DIS;
- I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE, val);
+ intel_uncore_write(&dev_priv->uncore, SLICE_UNIT_LEVEL_CLKGATE, val);
/* Wa_2201832410:cnl */
- val = I915_READ(SUBSLICE_UNIT_LEVEL_CLKGATE);
+ val = intel_uncore_read(&dev_priv->uncore, SUBSLICE_UNIT_LEVEL_CLKGATE);
val |= GWUNIT_CLKGATE_DIS;
- I915_WRITE(SUBSLICE_UNIT_LEVEL_CLKGATE, val);
+ intel_uncore_write(&dev_priv->uncore, SUBSLICE_UNIT_LEVEL_CLKGATE, val);
/* WaDisableVFclkgate:cnl */
/* WaVFUnitClockGatingDisable:cnl */
- val = I915_READ(UNSLICE_UNIT_LEVEL_CLKGATE);
+ val = intel_uncore_read(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE);
val |= VFUNIT_CLKGATE_DIS;
- I915_WRITE(UNSLICE_UNIT_LEVEL_CLKGATE, val);
+ intel_uncore_write(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE, val);
}
static void cfl_init_clock_gating(struct drm_i915_private *dev_priv)
@@ -7180,21 +7144,21 @@ static void cfl_init_clock_gating(struct drm_i915_private *dev_priv)
gen9_init_clock_gating(dev_priv);
/* WAC6entrylatency:cfl */
- I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) |
+ intel_uncore_write(&dev_priv->uncore, FBC_LLC_READ_CTRL, intel_uncore_read(&dev_priv->uncore, FBC_LLC_READ_CTRL) |
FBC_LLC_FULLY_OPEN);
/*
* WaFbcTurnOffFbcWatermark:cfl
* Display WA #0562: cfl
*/
- I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
+ intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) |
DISP_FBC_WM_DIS);
/*
* WaFbcNukeOnHostModify:cfl
* Display WA #0873: cfl
*/
- I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
+ intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) |
ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
}
@@ -7203,31 +7167,31 @@ static void kbl_init_clock_gating(struct drm_i915_private *dev_priv)
gen9_init_clock_gating(dev_priv);
/* WAC6entrylatency:kbl */
- I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) |
+ intel_uncore_write(&dev_priv->uncore, FBC_LLC_READ_CTRL, intel_uncore_read(&dev_priv->uncore, FBC_LLC_READ_CTRL) |
FBC_LLC_FULLY_OPEN);
/* WaDisableSDEUnitClockGating:kbl */
if (IS_KBL_GT_REVID(dev_priv, 0, KBL_REVID_B0))
- I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
+ intel_uncore_write(&dev_priv->uncore, GEN8_UCGCTL6, intel_uncore_read(&dev_priv->uncore, GEN8_UCGCTL6) |
GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
/* WaDisableGamClockGating:kbl */
if (IS_KBL_GT_REVID(dev_priv, 0, KBL_REVID_B0))
- I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
+ intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL1, intel_uncore_read(&dev_priv->uncore, GEN6_UCGCTL1) |
GEN6_GAMUNIT_CLOCK_GATE_DISABLE);
/*
* WaFbcTurnOffFbcWatermark:kbl
* Display WA #0562: kbl
*/
- I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
+ intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) |
DISP_FBC_WM_DIS);
/*
* WaFbcNukeOnHostModify:kbl
* Display WA #0873: kbl
*/
- I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
+ intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) |
ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
}
@@ -7236,32 +7200,32 @@ static void skl_init_clock_gating(struct drm_i915_private *dev_priv)
gen9_init_clock_gating(dev_priv);
/* WaDisableDopClockGating:skl */
- I915_WRITE(GEN7_MISCCPCTL, I915_READ(GEN7_MISCCPCTL) &
+ intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, intel_uncore_read(&dev_priv->uncore, GEN7_MISCCPCTL) &
~GEN7_DOP_CLOCK_GATE_ENABLE);
/* WAC6entrylatency:skl */
- I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) |
+ intel_uncore_write(&dev_priv->uncore, FBC_LLC_READ_CTRL, intel_uncore_read(&dev_priv->uncore, FBC_LLC_READ_CTRL) |
FBC_LLC_FULLY_OPEN);
/*
* WaFbcTurnOffFbcWatermark:skl
* Display WA #0562: skl
*/
- I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
+ intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) |
DISP_FBC_WM_DIS);
/*
* WaFbcNukeOnHostModify:skl
* Display WA #0873: skl
*/
- I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
+ intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) |
ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
/*
* WaFbcHighMemBwCorruptionAvoidance:skl
* Display WA #0883: skl
*/
- I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
+ intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) |
ILK_DPFC_DISABLE_DUMMY0);
}
@@ -7270,42 +7234,42 @@ static void bdw_init_clock_gating(struct drm_i915_private *dev_priv)
enum pipe pipe;
/* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
- I915_WRITE(CHICKEN_PIPESL_1(PIPE_A),
- I915_READ(CHICKEN_PIPESL_1(PIPE_A)) |
+ intel_uncore_write(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A),
+ intel_uncore_read(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A)) |
HSW_FBCQ_DIS);
/* WaSwitchSolVfFArbitrationPriority:bdw */
- I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
+ intel_uncore_write(&dev_priv->uncore, GAM_ECOCHK, intel_uncore_read(&dev_priv->uncore, GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
/* WaPsrDPAMaskVBlankInSRD:bdw */
- I915_WRITE(CHICKEN_PAR1_1,
- I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
+ intel_uncore_write(&dev_priv->uncore, CHICKEN_PAR1_1,
+ intel_uncore_read(&dev_priv->uncore, CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
/* WaPsrDPRSUnmaskVBlankInSRD:bdw */
for_each_pipe(dev_priv, pipe) {
- I915_WRITE(CHICKEN_PIPESL_1(pipe),
- I915_READ(CHICKEN_PIPESL_1(pipe)) |
+ intel_uncore_write(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe),
+ intel_uncore_read(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe)) |
BDW_DPRS_MASK_VBLANK_SRD);
}
/* WaVSRefCountFullforceMissDisable:bdw */
/* WaDSRefCountFullforceMissDisable:bdw */
- I915_WRITE(GEN7_FF_THREAD_MODE,
- I915_READ(GEN7_FF_THREAD_MODE) &
+ intel_uncore_write(&dev_priv->uncore, GEN7_FF_THREAD_MODE,
+ intel_uncore_read(&dev_priv->uncore, GEN7_FF_THREAD_MODE) &
~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
- I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
+ intel_uncore_write(&dev_priv->uncore, GEN6_RC_SLEEP_PSMI_CONTROL,
_MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
/* WaDisableSDEUnitClockGating:bdw */
- I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
+ intel_uncore_write(&dev_priv->uncore, GEN8_UCGCTL6, intel_uncore_read(&dev_priv->uncore, GEN8_UCGCTL6) |
GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
/* WaProgramL3SqcReg1Default:bdw */
gen8_set_l3sqc_credits(dev_priv, 30, 2);
/* WaKVMNotificationOnConfigChange:bdw */
- I915_WRITE(CHICKEN_PAR2_1, I915_READ(CHICKEN_PAR2_1)
+ intel_uncore_write(&dev_priv->uncore, CHICKEN_PAR2_1, intel_uncore_read(&dev_priv->uncore, CHICKEN_PAR2_1)
| KVM_CONFIG_CHANGE_NOTIFICATION_SELECT);
lpt_init_clock_gating(dev_priv);
@@ -7315,24 +7279,24 @@ static void bdw_init_clock_gating(struct drm_i915_private *dev_priv)
* Also see the CHICKEN2 write in bdw_init_workarounds() to disable DOP
* clock gating.
*/
- I915_WRITE(GEN6_UCGCTL1,
- I915_READ(GEN6_UCGCTL1) | GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE);
+ intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL1,
+ intel_uncore_read(&dev_priv->uncore, GEN6_UCGCTL1) | GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE);
}
static void hsw_init_clock_gating(struct drm_i915_private *dev_priv)
{
/* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
- I915_WRITE(CHICKEN_PIPESL_1(PIPE_A),
- I915_READ(CHICKEN_PIPESL_1(PIPE_A)) |
+ intel_uncore_write(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A),
+ intel_uncore_read(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A)) |
HSW_FBCQ_DIS);
/* This is required by WaCatErrorRejectionIssue:hsw */
- I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
- I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
+ intel_uncore_write(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
+ intel_uncore_read(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
/* WaSwitchSolVfFArbitrationPriority:hsw */
- I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
+ intel_uncore_write(&dev_priv->uncore, GAM_ECOCHK, intel_uncore_read(&dev_priv->uncore, GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
lpt_init_clock_gating(dev_priv);
}
@@ -7341,26 +7305,26 @@ static void ivb_init_clock_gating(struct drm_i915_private *dev_priv)
{
u32 snpcr;
- I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
+ intel_uncore_write(&dev_priv->uncore, ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
/* WaFbcAsynchFlipDisableFbcQueue:ivb */
- I915_WRITE(ILK_DISPLAY_CHICKEN1,
- I915_READ(ILK_DISPLAY_CHICKEN1) |
+ intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1,
+ intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1) |
ILK_FBCQ_DIS);
/* WaDisableBackToBackFlipFix:ivb */
- I915_WRITE(IVB_CHICKEN3,
+ intel_uncore_write(&dev_priv->uncore, IVB_CHICKEN3,
CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
CHICKEN3_DGMG_DONE_FIX_DISABLE);
if (IS_IVB_GT1(dev_priv))
- I915_WRITE(GEN7_ROW_CHICKEN2,
+ intel_uncore_write(&dev_priv->uncore, GEN7_ROW_CHICKEN2,
_MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
else {
/* must write both registers */
- I915_WRITE(GEN7_ROW_CHICKEN2,
+ intel_uncore_write(&dev_priv->uncore, GEN7_ROW_CHICKEN2,
_MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
- I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
+ intel_uncore_write(&dev_priv->uncore, GEN7_ROW_CHICKEN2_GT2,
_MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
}
@@ -7368,20 +7332,20 @@ static void ivb_init_clock_gating(struct drm_i915_private *dev_priv)
* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
* This implements the WaDisableRCZUnitClockGating:ivb workaround.
*/
- I915_WRITE(GEN6_UCGCTL2,
+ intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL2,
GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
/* This is required by WaCatErrorRejectionIssue:ivb */
- I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
- I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
+ intel_uncore_write(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
+ intel_uncore_read(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
g4x_disable_trickle_feed(dev_priv);
- snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
+ snpcr = intel_uncore_read(&dev_priv->uncore, GEN6_MBCUNIT_SNPCR);
snpcr &= ~GEN6_MBC_SNPCR_MASK;
snpcr |= GEN6_MBC_SNPCR_MED;
- I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
+ intel_uncore_write(&dev_priv->uncore, GEN6_MBCUNIT_SNPCR, snpcr);
if (!HAS_PCH_NOP(dev_priv))
cpt_init_clock_gating(dev_priv);
@@ -7392,58 +7356,58 @@ static void ivb_init_clock_gating(struct drm_i915_private *dev_priv)
static void vlv_init_clock_gating(struct drm_i915_private *dev_priv)
{
/* WaDisableBackToBackFlipFix:vlv */
- I915_WRITE(IVB_CHICKEN3,
+ intel_uncore_write(&dev_priv->uncore, IVB_CHICKEN3,
CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
CHICKEN3_DGMG_DONE_FIX_DISABLE);
/* WaDisableDopClockGating:vlv */
- I915_WRITE(GEN7_ROW_CHICKEN2,
+ intel_uncore_write(&dev_priv->uncore, GEN7_ROW_CHICKEN2,
_MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
/* This is required by WaCatErrorRejectionIssue:vlv */
- I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
- I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
+ intel_uncore_write(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
+ intel_uncore_read(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
/*
* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
* This implements the WaDisableRCZUnitClockGating:vlv workaround.
*/
- I915_WRITE(GEN6_UCGCTL2,
+ intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL2,
GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
/* WaDisableL3Bank2xClockGate:vlv
* Disabling L3 clock gating- MMIO 940c[25] = 1
* Set bit 25, to disable L3_BANK_2x_CLK_GATING */
- I915_WRITE(GEN7_UCGCTL4,
- I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
+ intel_uncore_write(&dev_priv->uncore, GEN7_UCGCTL4,
+ intel_uncore_read(&dev_priv->uncore, GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
/*
* WaDisableVLVClockGating_VBIIssue:vlv
* Disable clock gating on th GCFG unit to prevent a delay
* in the reporting of vblank events.
*/
- I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
+ intel_uncore_write(&dev_priv->uncore, VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
}
static void chv_init_clock_gating(struct drm_i915_private *dev_priv)
{
/* WaVSRefCountFullforceMissDisable:chv */
/* WaDSRefCountFullforceMissDisable:chv */
- I915_WRITE(GEN7_FF_THREAD_MODE,
- I915_READ(GEN7_FF_THREAD_MODE) &
+ intel_uncore_write(&dev_priv->uncore, GEN7_FF_THREAD_MODE,
+ intel_uncore_read(&dev_priv->uncore, GEN7_FF_THREAD_MODE) &
~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
/* WaDisableSemaphoreAndSyncFlipWait:chv */
- I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
+ intel_uncore_write(&dev_priv->uncore, GEN6_RC_SLEEP_PSMI_CONTROL,
_MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
/* WaDisableCSUnitClockGating:chv */
- I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
+ intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL1, intel_uncore_read(&dev_priv->uncore, GEN6_UCGCTL1) |
GEN6_CSUNIT_CLOCK_GATE_DISABLE);
/* WaDisableSDEUnitClockGating:chv */
- I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
+ intel_uncore_write(&dev_priv->uncore, GEN8_UCGCTL6, intel_uncore_read(&dev_priv->uncore, GEN8_UCGCTL6) |
GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
/*
@@ -7458,17 +7422,17 @@ static void g4x_init_clock_gating(struct drm_i915_private *dev_priv)
{
u32 dspclk_gate;
- I915_WRITE(RENCLK_GATE_D1, 0);
- I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
+ intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D1, 0);
+ intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
GS_UNIT_CLOCK_GATE_DISABLE |
CL_UNIT_CLOCK_GATE_DISABLE);
- I915_WRITE(RAMCLK_GATE_D, 0);
+ intel_uncore_write(&dev_priv->uncore, RAMCLK_GATE_D, 0);
dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
OVRUNIT_CLOCK_GATE_DISABLE |
OVCUNIT_CLOCK_GATE_DISABLE;
if (IS_GM45(dev_priv))
dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
- I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
+ intel_uncore_write(&dev_priv->uncore, DSPCLK_GATE_D, dspclk_gate);
g4x_disable_trickle_feed(dev_priv);
}
@@ -7489,49 +7453,49 @@ static void i965gm_init_clock_gating(struct drm_i915_private *dev_priv)
static void i965g_init_clock_gating(struct drm_i915_private *dev_priv)
{
- I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
+ intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
I965_RCC_CLOCK_GATE_DISABLE |
I965_RCPB_CLOCK_GATE_DISABLE |
I965_ISC_CLOCK_GATE_DISABLE |
I965_FBC_CLOCK_GATE_DISABLE);
- I915_WRITE(RENCLK_GATE_D2, 0);
- I915_WRITE(MI_ARB_STATE,
+ intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D2, 0);
+ intel_uncore_write(&dev_priv->uncore, MI_ARB_STATE,
_MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
}
static void gen3_init_clock_gating(struct drm_i915_private *dev_priv)
{
- u32 dstate = I915_READ(D_STATE);
+ u32 dstate = intel_uncore_read(&dev_priv->uncore, D_STATE);
dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
DSTATE_DOT_CLOCK_GATING;
- I915_WRITE(D_STATE, dstate);
+ intel_uncore_write(&dev_priv->uncore, D_STATE, dstate);
if (IS_PINEVIEW(dev_priv))
- I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
+ intel_uncore_write(&dev_priv->uncore, ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
/* IIR "flip pending" means done if this bit is set */
- I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
+ intel_uncore_write(&dev_priv->uncore, ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
/* interrupts should cause a wake up from C3 */
- I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
+ intel_uncore_write(&dev_priv->uncore, INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
/* On GEN3 we really need to make sure the ARB C3 LP bit is set */
- I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
+ intel_uncore_write(&dev_priv->uncore, MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
- I915_WRITE(MI_ARB_STATE,
+ intel_uncore_write(&dev_priv->uncore, MI_ARB_STATE,
_MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
}
static void i85x_init_clock_gating(struct drm_i915_private *dev_priv)
{
- I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
+ intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
/* interrupts should cause a wake up from C3 */
- I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
+ intel_uncore_write(&dev_priv->uncore, MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
_MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE));
- I915_WRITE(MEM_MODE,
+ intel_uncore_write(&dev_priv->uncore, MEM_MODE,
_MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE));
/*
@@ -7541,13 +7505,13 @@ static void i85x_init_clock_gating(struct drm_i915_private *dev_priv)
* abosultely nothing) would not allow FBC to recompress
* until a 2D blit occurs.
*/
- I915_WRITE(SCPD0,
+ intel_uncore_write(&dev_priv->uncore, SCPD0,
_MASKED_BIT_ENABLE(SCPD_FBC_IGNORE_3D));
}
static void i830_init_clock_gating(struct drm_i915_private *dev_priv)
{
- I915_WRITE(MEM_MODE,
+ intel_uncore_write(&dev_priv->uncore, MEM_MODE,
_MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) |
_MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE));
}
@@ -7583,7 +7547,7 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
if (IS_DG1(dev_priv))
dev_priv->display.init_clock_gating = dg1_init_clock_gating;
else if (IS_GEN(dev_priv, 12))
- dev_priv->display.init_clock_gating = tgl_init_clock_gating;
+ dev_priv->display.init_clock_gating = gen12lp_init_clock_gating;
else if (IS_GEN(dev_priv, 11))
dev_priv->display.init_clock_gating = icl_init_clock_gating;
else if (IS_CANNONLAKE(dev_priv))
diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h
index eab83e251dd5..97550cf0b6df 100644
--- a/drivers/gpu/drm/i915/intel_pm.h
+++ b/drivers/gpu/drm/i915/intel_pm.h
@@ -9,8 +9,10 @@
#include <linux/types.h>
#include "display/intel_bw.h"
+#include "display/intel_display.h"
#include "display/intel_global_state.h"
+#include "i915_drv.h"
#include "i915_reg.h"
struct drm_device;
@@ -40,7 +42,6 @@ void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
struct skl_ddb_entry *ddb_y,
struct skl_ddb_entry *ddb_uv);
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv);
-u16 intel_get_ddb_size(struct drm_i915_private *dev_priv);
u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *dev_priv,
const struct skl_ddb_entry *entry);
void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
@@ -69,6 +70,10 @@ bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable);
struct intel_dbuf_state {
struct intel_global_state base;
+ struct skl_ddb_entry ddb[I915_MAX_PIPES];
+ unsigned int weight[I915_MAX_PIPES];
+ u8 slices[I915_MAX_PIPES];
+
u8 enabled_slices;
u8 active_pipes;
};
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
index 02ebf5a04a9b..0ec0cf191955 100644
--- a/drivers/gpu/drm/i915/intel_sideband.c
+++ b/drivers/gpu/drm/i915/intel_sideband.c
@@ -404,8 +404,8 @@ static int __sandybridge_pcode_rw(struct drm_i915_private *i915,
lockdep_assert_held(&i915->sb_lock);
/*
- * GEN6_PCODE_* are outside of the forcewake domain, we can
- * use te fw I915_READ variants to reduce the amount of work
+ * GEN6_PCODE_* are outside of the forcewake domain, we can use
+ * intel_uncore_read/write_fw variants to reduce the amount of work
* required when reading/writing.
*/
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 1c14a07eba7d..9ac501bcfdad 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -2070,7 +2070,7 @@ int i915_reg_read_ioctl(struct drm_device *dev,
* This routine waits until the target register @reg contains the expected
* @value after applying the @mask, i.e. it waits until ::
*
- * (I915_READ_FW(reg) & mask) == value
+ * (intel_uncore_read_fw(uncore, reg) & mask) == value
*
* Otherwise, the wait will timeout after @slow_timeout_ms milliseconds.
* For atomic context @slow_timeout_ms must be zero and @fast_timeout_us
@@ -2126,7 +2126,7 @@ int __intel_wait_for_register_fw(struct intel_uncore *uncore,
* This routine waits until the target register @reg contains the expected
* @value after applying the @mask, i.e. it waits until ::
*
- * (I915_READ(reg) & mask) == value
+ * (intel_uncore_read(uncore, reg) & mask) == value
*
* Otherwise, the wait will timeout after @timeout_ms milliseconds.
*
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index bd2467284295..59f0da8f1fbb 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -216,7 +216,7 @@ void intel_uncore_forcewake_flush(struct intel_uncore *uncore,
/*
* Like above but the caller must manage the uncore.lock itself.
- * Must be used with I915_READ_FW and friends.
+ * Must be used with intel_uncore_read_fw() and friends.
*/
void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore,
enum forcewake_domains domains);
@@ -318,8 +318,8 @@ __uncore_write(write_notrace, 32, l, false)
* will be implemented using 2 32-bit writes in an arbitrary order with
* an arbitrary delay between them. This can cause the hardware to
* act upon the intermediate value, possibly leading to corruption and
- * machine death. For this reason we do not support I915_WRITE64, or
- * uncore->funcs.mmio_writeq.
+ * machine death. For this reason we do not support intel_uncore_write64,
+ * or uncore->funcs.mmio_writeq.
*
* When reading a 64-bit value as two 32-bit values, the delay may cause
* the two reads to mismatch, e.g. a timestamp overflowing. Also note that
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c
index 412e21604a05..dc394fb7ccfa 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
@@ -8,6 +8,7 @@
#include "gem/selftests/igt_gem_utils.h"
#include "gem/selftests/mock_context.h"
+#include "gem/i915_gem_pm.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index f88473d396f4..f99bb0113726 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -38,8 +38,8 @@ static void quirk_add(struct drm_i915_gem_object *obj,
struct list_head *objects)
{
/* quirk is only for live tiled objects, use it to declare ownership */
- GEM_BUG_ON(obj->mm.quirked);
- obj->mm.quirked = true;
+ GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
+ i915_gem_object_set_tiling_quirk(obj);
list_add(&obj->st_link, objects);
}
@@ -85,7 +85,7 @@ static void unpin_ggtt(struct i915_ggtt *ggtt)
struct i915_vma *vma;
list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
- if (vma->obj->mm.quirked)
+ if (i915_gem_object_has_tiling_quirk(vma->obj))
i915_vma_unpin(vma);
}
@@ -94,8 +94,8 @@ static void cleanup_objects(struct i915_ggtt *ggtt, struct list_head *list)
struct drm_i915_gem_object *obj, *on;
list_for_each_entry_safe(obj, on, list, st_link) {
- GEM_BUG_ON(!obj->mm.quirked);
- obj->mm.quirked = false;
+ GEM_BUG_ON(!i915_gem_object_has_tiling_quirk(obj));
+ i915_gem_object_set_tiling_quirk(obj);
i915_gem_object_put(obj);
}
@@ -442,28 +442,22 @@ static int igt_evict_contexts(void *arg)
/* Overfill the GGTT with context objects and so try to evict one. */
for_each_engine(engine, gt, id) {
struct i915_sw_fence fence;
- struct file *file;
-
- file = mock_file(i915);
- if (IS_ERR(file)) {
- err = PTR_ERR(file);
- break;
- }
count = 0;
onstack_fence_init(&fence);
do {
+ struct intel_context *ce;
struct i915_request *rq;
- struct i915_gem_context *ctx;
- ctx = live_context(i915, file);
- if (IS_ERR(ctx))
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
break;
/* We will need some GGTT space for the rq's context */
igt_evict_ctl.fail_if_busy = true;
- rq = igt_request_alloc(ctx, engine);
+ rq = intel_context_create_request(ce);
igt_evict_ctl.fail_if_busy = false;
+ intel_context_put(ce);
if (IS_ERR(rq)) {
/* When full, fail_if_busy will trigger EBUSY */
@@ -490,8 +484,6 @@ static int igt_evict_contexts(void *arg)
onstack_fence_fini(&fence);
pr_info("Submitted %lu contexts/requests on %s\n",
count, engine->name);
-
- fput(file);
if (err)
break;
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index c53a222e3dec..c1adea8765a9 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -28,6 +28,7 @@
#include "gem/i915_gem_context.h"
#include "gem/selftests/mock_context.h"
#include "gt/intel_context.h"
+#include "gt/intel_gpu_commands.h"
#include "i915_random.h"
#include "i915_selftest.h"
@@ -1880,7 +1881,7 @@ static int igt_cs_tlb(void *arg)
vma = i915_vma_instance(out, vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
- goto out_put_batch;
+ goto out_put_out;
}
err = i915_vma_pin(vma, 0, 0,
diff --git a/drivers/gpu/drm/i915/selftests/i915_perf.c b/drivers/gpu/drm/i915/selftests/i915_perf.c
index debbac660519..e9d86dab8677 100644
--- a/drivers/gpu/drm/i915/selftests/i915_perf.c
+++ b/drivers/gpu/drm/i915/selftests/i915_perf.c
@@ -262,7 +262,7 @@ static int live_noa_delay(void *arg)
delay = intel_read_status_page(stream->engine, 0x102);
delay -= intel_read_status_page(stream->engine, 0x100);
- delay = i915_cs_timestamp_ticks_to_ns(i915, delay);
+ delay = intel_gt_clock_interval_to_ns(stream->engine->gt, delay);
pr_info("GPU delay: %uns, expected %lluns\n",
delay, expected);
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index e424a6d1a68c..d2a678a2497e 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -33,6 +33,7 @@
#include "gt/intel_engine_pm.h"
#include "gt/intel_engine_user.h"
#include "gt/intel_gt.h"
+#include "gt/intel_gt_clock_utils.h"
#include "gt/intel_gt_requests.h"
#include "gt/selftest_engine_heartbeat.h"
@@ -1560,7 +1561,7 @@ static u32 trifilter(u32 *a)
static u64 cycles_to_ns(struct intel_engine_cs *engine, u32 cycles)
{
- u64 ns = i915_cs_timestamp_ticks_to_ns(engine->i915, cycles);
+ u64 ns = intel_gt_clock_interval_to_ns(engine->gt, cycles);
return DIV_ROUND_CLOSEST(ns, 1 << TF_BIAS);
}
@@ -1932,9 +1933,7 @@ static int measure_inter_request(struct intel_context *ce)
intel_ring_advance(rq, cs);
i915_request_add(rq);
}
- local_bh_disable();
i915_sw_fence_commit(submit);
- local_bh_enable();
intel_engine_flush_submission(ce->engine);
heap_fence_put(submit);
@@ -2220,11 +2219,9 @@ static int measure_completion(struct intel_context *ce)
intel_ring_advance(rq, cs);
dma_fence_add_callback(&rq->fence, &cb.base, signal_cb);
-
- local_bh_disable();
i915_request_add(rq);
- local_bh_enable();
+ intel_engine_flush_submission(ce->engine);
if (wait_for(READ_ONCE(sema[i]) == -1, 50)) {
err = -EIO;
goto err;
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c
index ec0ecb4e4ca6..83f6e5f31fb3 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.c
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c
@@ -3,6 +3,7 @@
*
* Copyright © 2018 Intel Corporation
*/
+#include "gt/intel_gpu_commands.h"
#include "gt/intel_gt.h"
#include "gem/selftests/igt_gem_utils.h"
@@ -219,6 +220,9 @@ void igt_spinner_fini(struct igt_spinner *spin)
bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq)
{
+ if (i915_request_is_ready(rq))
+ intel_engine_flush_submission(rq->engine);
+
return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq),
rq->fence.seqno),
100) &&
diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
index 0aeba8e3af28..ce7adfa3bca0 100644
--- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -129,6 +129,21 @@ static void igt_object_release(struct drm_i915_gem_object *obj)
i915_gem_object_put(obj);
}
+static bool is_contiguous(struct drm_i915_gem_object *obj)
+{
+ struct scatterlist *sg;
+ dma_addr_t addr = -1;
+
+ for (sg = obj->mm.pages->sgl; sg; sg = sg_next(sg)) {
+ if (addr != -1 && sg_dma_address(sg) != addr)
+ return false;
+
+ addr = sg_dma_address(sg) + sg_dma_len(sg);
+ }
+
+ return true;
+}
+
static int igt_mock_contiguous(void *arg)
{
struct intel_memory_region *mem = arg;
@@ -150,8 +165,8 @@ static int igt_mock_contiguous(void *arg)
if (IS_ERR(obj))
return PTR_ERR(obj);
- if (obj->mm.pages->nents != 1) {
- pr_err("%s min object spans multiple sg entries\n", __func__);
+ if (!is_contiguous(obj)) {
+ pr_err("%s min object spans disjoint sg entries\n", __func__);
err = -EINVAL;
goto err_close_objects;
}
@@ -163,8 +178,8 @@ static int igt_mock_contiguous(void *arg)
if (IS_ERR(obj))
return PTR_ERR(obj);
- if (obj->mm.pages->nents != 1) {
- pr_err("%s max object spans multiple sg entries\n", __func__);
+ if (!is_contiguous(obj)) {
+ pr_err("%s max object spans disjoint sg entries\n", __func__);
err = -EINVAL;
goto err_close_objects;
}
@@ -189,8 +204,8 @@ static int igt_mock_contiguous(void *arg)
goto err_close_objects;
}
- if (obj->mm.pages->nents != 1) {
- pr_err("%s object spans multiple sg entries\n", __func__);
+ if (!is_contiguous(obj)) {
+ pr_err("%s object spans disjoint sg entries\n", __func__);
err = -EINVAL;
goto err_close_objects;
}
@@ -337,6 +352,68 @@ out_put:
return err;
}
+#ifndef SZ_8G
+#define SZ_8G BIT_ULL(33)
+#endif
+
+static int igt_mock_max_segment(void *arg)
+{
+ const unsigned int max_segment = i915_sg_segment_size();
+ struct intel_memory_region *mem = arg;
+ struct drm_i915_private *i915 = mem->i915;
+ struct drm_i915_gem_object *obj;
+ struct i915_buddy_block *block;
+ struct scatterlist *sg;
+ LIST_HEAD(objects);
+ u64 size;
+ int err = 0;
+
+ /*
+ * While we may create very large contiguous blocks, we may need
+ * to break those down for consumption elsewhere. In particular,
+ * dma-mapping with scatterlist elements have an implicit limit of
+ * UINT_MAX on each element.
+ */
+
+ size = SZ_8G;
+ mem = mock_region_create(i915, 0, size, PAGE_SIZE, 0);
+ if (IS_ERR(mem))
+ return PTR_ERR(mem);
+
+ obj = igt_object_create(mem, &objects, size, 0);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto out_put;
+ }
+
+ size = 0;
+ list_for_each_entry(block, &obj->mm.blocks, link) {
+ if (i915_buddy_block_size(&mem->mm, block) > size)
+ size = i915_buddy_block_size(&mem->mm, block);
+ }
+ if (size < max_segment) {
+ pr_err("%s: Failed to create a huge contiguous block [> %u], largest block %lld\n",
+ __func__, max_segment, size);
+ err = -EINVAL;
+ goto out_close;
+ }
+
+ for (sg = obj->mm.pages->sgl; sg; sg = sg_next(sg)) {
+ if (sg->length > max_segment) {
+ pr_err("%s: Created an oversized scatterlist entry, %u > %u\n",
+ __func__, sg->length, max_segment);
+ err = -EINVAL;
+ goto out_close;
+ }
+ }
+
+out_close:
+ close_objects(mem, &objects);
+out_put:
+ intel_memory_region_put(mem);
+ return err;
+}
+
static int igt_gpu_write_dw(struct intel_context *ce,
struct i915_vma *vma,
u32 dword,
@@ -775,14 +852,22 @@ static int _perf_memcpy(struct intel_memory_region *src_mr,
}
sort(t, ARRAY_SIZE(t), sizeof(*t), wrap_ktime_compare, NULL);
+ if (t[0] <= 0) {
+ /* ignore the impossible to protect our sanity */
+ pr_debug("Skipping %s src(%s, %s) -> dst(%s, %s) %14s %4lluKiB copy, unstable measurement [%lld, %lld]\n",
+ __func__,
+ src_mr->name, repr_type(src_type),
+ dst_mr->name, repr_type(dst_type),
+ tests[i].name, size >> 10,
+ t[0], t[4]);
+ continue;
+ }
+
pr_info("%s src(%s, %s) -> dst(%s, %s) %14s %4llu KiB copy: %5lld MiB/s\n",
__func__,
- src_mr->name,
- repr_type(src_type),
- dst_mr->name,
- repr_type(dst_type),
- tests[i].name,
- size >> 10,
+ src_mr->name, repr_type(src_type),
+ dst_mr->name, repr_type(dst_type),
+ tests[i].name, size >> 10,
div64_u64(mul_u32_u32(4 * size,
1000 * 1000 * 1000),
t[1] + 2 * t[2] + t[3]) >> 20);
@@ -848,6 +933,7 @@ int intel_memory_region_mock_selftests(void)
SUBTEST(igt_mock_fill),
SUBTEST(igt_mock_contiguous),
SUBTEST(igt_mock_splintered_region),
+ SUBTEST(igt_mock_max_segment),
};
struct intel_memory_region *mem;
struct drm_i915_private *i915;
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index e946bd2087d8..0188f877cab2 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -64,8 +64,6 @@ static void mock_device_release(struct drm_device *dev)
mock_device_flush(i915);
intel_gt_driver_remove(&i915->gt);
- i915_gem_driver_release__contexts(i915);
-
i915_gem_drain_workqueue(i915);
i915_gem_drain_freed_objects(i915);
diff --git a/drivers/gpu/drm/i915/selftests/mock_region.c b/drivers/gpu/drm/i915/selftests/mock_region.c
index 979d96f27c43..3c6021415274 100644
--- a/drivers/gpu/drm/i915/selftests/mock_region.c
+++ b/drivers/gpu/drm/i915/selftests/mock_region.c
@@ -15,21 +15,16 @@ static const struct drm_i915_gem_object_ops mock_region_obj_ops = {
.release = i915_gem_object_release_memory_region,
};
-static struct drm_i915_gem_object *
-mock_object_create(struct intel_memory_region *mem,
- resource_size_t size,
- unsigned int flags)
+static int mock_object_init(struct intel_memory_region *mem,
+ struct drm_i915_gem_object *obj,
+ resource_size_t size,
+ unsigned int flags)
{
static struct lock_class_key lock_class;
struct drm_i915_private *i915 = mem->i915;
- struct drm_i915_gem_object *obj;
if (size > mem->mm.size)
- return ERR_PTR(-E2BIG);
-
- obj = i915_gem_object_alloc();
- if (!obj)
- return ERR_PTR(-ENOMEM);
+ return -E2BIG;
drm_gem_private_object_init(&i915->drm, &obj->base, size);
i915_gem_object_init(obj, &mock_region_obj_ops, &lock_class);
@@ -40,13 +35,13 @@ mock_object_create(struct intel_memory_region *mem,
i915_gem_object_init_memory_region(obj, mem, flags);
- return obj;
+ return 0;
}
static const struct intel_memory_region_ops mock_region_ops = {
.init = intel_memory_region_init_buddy,
.release = intel_memory_region_release_buddy,
- .create_object = mock_object_create,
+ .init_object = mock_object_init,
};
struct intel_memory_region *
diff --git a/drivers/gpu/drm/imx/Kconfig b/drivers/gpu/drm/imx/Kconfig
index 6231048aa5aa..b5fa0e45a839 100644
--- a/drivers/gpu/drm/imx/Kconfig
+++ b/drivers/gpu/drm/imx/Kconfig
@@ -28,6 +28,7 @@ config DRM_IMX_TVE
config DRM_IMX_LDB
tristate "Support for LVDS displays"
depends on DRM_IMX && MFD_SYSCON
+ depends on COMMON_CLK
select DRM_PANEL
help
Choose this to enable the internal LVDS Display Bridge (LDB)
@@ -36,7 +37,7 @@ config DRM_IMX_LDB
config DRM_IMX_HDMI
tristate "Freescale i.MX DRM HDMI"
select DRM_DW_HDMI
- depends on DRM_IMX
+ depends on DRM_IMX && OF
help
Choose this if you want to use HDMI on i.MX6.
diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c
index d07b39b8afd2..87428fb23d9f 100644
--- a/drivers/gpu/drm/imx/dw_hdmi-imx.c
+++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c
@@ -15,23 +15,32 @@
#include <drm/bridge/dw_hdmi.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_edid.h>
#include <drm/drm_encoder.h>
+#include <drm/drm_managed.h>
#include <drm/drm_of.h>
#include <drm/drm_simple_kms_helper.h>
#include "imx-drm.h"
+struct imx_hdmi;
+
+struct imx_hdmi_encoder {
+ struct drm_encoder encoder;
+ struct imx_hdmi *hdmi;
+};
+
struct imx_hdmi {
struct device *dev;
- struct drm_encoder encoder;
+ struct drm_bridge *bridge;
struct dw_hdmi *hdmi;
struct regmap *regmap;
};
static inline struct imx_hdmi *enc_to_imx_hdmi(struct drm_encoder *e)
{
- return container_of(e, struct imx_hdmi, encoder);
+ return container_of(e, struct imx_hdmi_encoder, encoder)->hdmi;
}
static const struct dw_hdmi_mpll_config imx_mpll_cfg[] = {
@@ -98,19 +107,6 @@ static const struct dw_hdmi_phy_config imx_phy_config[] = {
{ ~0UL, 0x0000, 0x0000, 0x0000}
};
-static int dw_hdmi_imx_parse_dt(struct imx_hdmi *hdmi)
-{
- struct device_node *np = hdmi->dev->of_node;
-
- hdmi->regmap = syscon_regmap_lookup_by_phandle(np, "gpr");
- if (IS_ERR(hdmi->regmap)) {
- dev_err(hdmi->dev, "Unable to get gpr\n");
- return PTR_ERR(hdmi->regmap);
- }
-
- return 0;
-}
-
static void dw_hdmi_imx_encoder_enable(struct drm_encoder *encoder)
{
struct imx_hdmi *hdmi = enc_to_imx_hdmi(encoder);
@@ -195,65 +191,36 @@ MODULE_DEVICE_TABLE(of, dw_hdmi_imx_dt_ids);
static int dw_hdmi_imx_bind(struct device *dev, struct device *master,
void *data)
{
- struct platform_device *pdev = to_platform_device(dev);
- const struct dw_hdmi_plat_data *plat_data;
- const struct of_device_id *match;
struct drm_device *drm = data;
+ struct imx_hdmi_encoder *hdmi_encoder;
struct drm_encoder *encoder;
- struct imx_hdmi *hdmi;
int ret;
- if (!pdev->dev.of_node)
- return -ENODEV;
+ hdmi_encoder = drmm_simple_encoder_alloc(drm, struct imx_hdmi_encoder,
+ encoder, DRM_MODE_ENCODER_TMDS);
+ if (IS_ERR(hdmi_encoder))
+ return PTR_ERR(hdmi_encoder);
- hdmi = dev_get_drvdata(dev);
- memset(hdmi, 0, sizeof(*hdmi));
-
- match = of_match_node(dw_hdmi_imx_dt_ids, pdev->dev.of_node);
- plat_data = match->data;
- hdmi->dev = &pdev->dev;
- encoder = &hdmi->encoder;
+ hdmi_encoder->hdmi = dev_get_drvdata(dev);
+ encoder = &hdmi_encoder->encoder;
ret = imx_drm_encoder_parse_of(drm, encoder, dev->of_node);
if (ret)
return ret;
- ret = dw_hdmi_imx_parse_dt(hdmi);
- if (ret < 0)
- return ret;
-
drm_encoder_helper_add(encoder, &dw_hdmi_imx_encoder_helper_funcs);
- drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
-
- hdmi->hdmi = dw_hdmi_bind(pdev, encoder, plat_data);
- /*
- * If dw_hdmi_bind() fails we'll never call dw_hdmi_unbind(),
- * which would have called the encoder cleanup. Do it manually.
- */
- if (IS_ERR(hdmi->hdmi)) {
- ret = PTR_ERR(hdmi->hdmi);
- drm_encoder_cleanup(encoder);
- }
-
- return ret;
-}
-
-static void dw_hdmi_imx_unbind(struct device *dev, struct device *master,
- void *data)
-{
- struct imx_hdmi *hdmi = dev_get_drvdata(dev);
-
- dw_hdmi_unbind(hdmi->hdmi);
+ return drm_bridge_attach(encoder, hdmi_encoder->hdmi->bridge, NULL, 0);
}
static const struct component_ops dw_hdmi_imx_ops = {
.bind = dw_hdmi_imx_bind,
- .unbind = dw_hdmi_imx_unbind,
};
static int dw_hdmi_imx_probe(struct platform_device *pdev)
{
+ struct device_node *np = pdev->dev.of_node;
+ const struct of_device_id *match = of_match_node(dw_hdmi_imx_dt_ids, np);
struct imx_hdmi *hdmi;
hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
@@ -261,13 +228,33 @@ static int dw_hdmi_imx_probe(struct platform_device *pdev)
return -ENOMEM;
platform_set_drvdata(pdev, hdmi);
+ hdmi->dev = &pdev->dev;
+
+ hdmi->regmap = syscon_regmap_lookup_by_phandle(np, "gpr");
+ if (IS_ERR(hdmi->regmap)) {
+ dev_err(hdmi->dev, "Unable to get gpr\n");
+ return PTR_ERR(hdmi->regmap);
+ }
+
+ hdmi->hdmi = dw_hdmi_probe(pdev, match->data);
+ if (IS_ERR(hdmi->hdmi))
+ return PTR_ERR(hdmi->hdmi);
+
+ hdmi->bridge = of_drm_find_bridge(np);
+ if (!hdmi->bridge) {
+ dev_err(hdmi->dev, "Unable to find bridge\n");
+ return -ENODEV;
+ }
return component_add(&pdev->dev, &dw_hdmi_imx_ops);
}
static int dw_hdmi_imx_remove(struct platform_device *pdev)
{
+ struct imx_hdmi *hdmi = platform_get_drvdata(pdev);
+
component_del(&pdev->dev, &dw_hdmi_imx_ops);
+ dw_hdmi_remove(hdmi->hdmi);
return 0;
}
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index 41e2978cb1eb..dbfe39e2f7f6 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -22,6 +22,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_managed.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
@@ -47,12 +48,18 @@
#define LDB_DI1_VS_POL_ACT_LOW (1 << 10)
#define LDB_BGREF_RMODE_INT (1 << 15)
+struct imx_ldb_channel;
+
+struct imx_ldb_encoder {
+ struct drm_connector connector;
+ struct drm_encoder encoder;
+ struct imx_ldb_channel *channel;
+};
+
struct imx_ldb;
struct imx_ldb_channel {
struct imx_ldb *ldb;
- struct drm_connector connector;
- struct drm_encoder encoder;
/* Defines what is connected to the ldb, only one at a time */
struct drm_panel *panel;
@@ -70,12 +77,12 @@ struct imx_ldb_channel {
static inline struct imx_ldb_channel *con_to_imx_ldb_ch(struct drm_connector *c)
{
- return container_of(c, struct imx_ldb_channel, connector);
+ return container_of(c, struct imx_ldb_encoder, connector)->channel;
}
static inline struct imx_ldb_channel *enc_to_imx_ldb_ch(struct drm_encoder *e)
{
- return container_of(e, struct imx_ldb_channel, encoder);
+ return container_of(e, struct imx_ldb_encoder, encoder)->channel;
}
struct bus_mux {
@@ -411,9 +418,20 @@ static int imx_ldb_register(struct drm_device *drm,
struct imx_ldb_channel *imx_ldb_ch)
{
struct imx_ldb *ldb = imx_ldb_ch->ldb;
- struct drm_encoder *encoder = &imx_ldb_ch->encoder;
+ struct imx_ldb_encoder *ldb_encoder;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
int ret;
+ ldb_encoder = drmm_simple_encoder_alloc(drm, struct imx_ldb_encoder,
+ encoder, DRM_MODE_ENCODER_LVDS);
+ if (IS_ERR(ldb_encoder))
+ return PTR_ERR(ldb_encoder);
+
+ ldb_encoder->channel = imx_ldb_ch;
+ connector = &ldb_encoder->connector;
+ encoder = &ldb_encoder->encoder;
+
ret = imx_drm_encoder_parse_of(drm, encoder, imx_ldb_ch->child);
if (ret)
return ret;
@@ -429,11 +447,9 @@ static int imx_ldb_register(struct drm_device *drm,
}
drm_encoder_helper_add(encoder, &imx_ldb_encoder_helper_funcs);
- drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_LVDS);
if (imx_ldb_ch->bridge) {
- ret = drm_bridge_attach(&imx_ldb_ch->encoder,
- imx_ldb_ch->bridge, NULL, 0);
+ ret = drm_bridge_attach(encoder, imx_ldb_ch->bridge, NULL, 0);
if (ret) {
DRM_ERROR("Failed to initialize bridge with drm\n");
return ret;
@@ -445,13 +461,13 @@ static int imx_ldb_register(struct drm_device *drm,
* historical reasons, the ldb driver can also work without
* a panel.
*/
- drm_connector_helper_add(&imx_ldb_ch->connector,
- &imx_ldb_connector_helper_funcs);
- drm_connector_init_with_ddc(drm, &imx_ldb_ch->connector,
+ drm_connector_helper_add(connector,
+ &imx_ldb_connector_helper_funcs);
+ drm_connector_init_with_ddc(drm, connector,
&imx_ldb_connector_funcs,
DRM_MODE_CONNECTOR_LVDS,
imx_ldb_ch->ddc);
- drm_connector_attach_encoder(&imx_ldb_ch->connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
}
return 0;
@@ -559,17 +575,42 @@ static int imx_ldb_panel_ddc(struct device *dev,
static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
{
struct drm_device *drm = data;
+ struct imx_ldb *imx_ldb = dev_get_drvdata(dev);
+ int ret;
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ struct imx_ldb_channel *channel = &imx_ldb->channel[i];
+
+ if (!channel->ldb)
+ break;
+
+ ret = imx_ldb_register(drm, channel);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct component_ops imx_ldb_ops = {
+ .bind = imx_ldb_bind,
+};
+
+static int imx_ldb_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
- const struct of_device_id *of_id =
- of_match_device(imx_ldb_dt_ids, dev);
+ const struct of_device_id *of_id = of_match_device(imx_ldb_dt_ids, dev);
struct device_node *child;
struct imx_ldb *imx_ldb;
int dual;
int ret;
int i;
- imx_ldb = dev_get_drvdata(dev);
- memset(imx_ldb, 0, sizeof(*imx_ldb));
+ imx_ldb = devm_kzalloc(dev, sizeof(*imx_ldb), GFP_KERNEL);
+ if (!imx_ldb)
+ return -ENOMEM;
imx_ldb->regmap = syscon_regmap_lookup_by_phandle(np, "gpr");
if (IS_ERR(imx_ldb->regmap)) {
@@ -669,25 +710,20 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
}
channel->bus_format = bus_format;
channel->child = child;
-
- ret = imx_ldb_register(drm, channel);
- if (ret) {
- channel->child = NULL;
- goto free_child;
- }
}
- return 0;
+ platform_set_drvdata(pdev, imx_ldb);
+
+ return component_add(&pdev->dev, &imx_ldb_ops);
free_child:
of_node_put(child);
return ret;
}
-static void imx_ldb_unbind(struct device *dev, struct device *master,
- void *data)
+static int imx_ldb_remove(struct platform_device *pdev)
{
- struct imx_ldb *imx_ldb = dev_get_drvdata(dev);
+ struct imx_ldb *imx_ldb = platform_get_drvdata(pdev);
int i;
for (i = 0; i < 2; i++) {
@@ -696,28 +732,7 @@ static void imx_ldb_unbind(struct device *dev, struct device *master,
kfree(channel->edid);
i2c_put_adapter(channel->ddc);
}
-}
-
-static const struct component_ops imx_ldb_ops = {
- .bind = imx_ldb_bind,
- .unbind = imx_ldb_unbind,
-};
-
-static int imx_ldb_probe(struct platform_device *pdev)
-{
- struct imx_ldb *imx_ldb;
-
- imx_ldb = devm_kzalloc(&pdev->dev, sizeof(*imx_ldb), GFP_KERNEL);
- if (!imx_ldb)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, imx_ldb);
-
- return component_add(&pdev->dev, &imx_ldb_ops);
-}
-static int imx_ldb_remove(struct platform_device *pdev)
-{
component_del(&pdev->dev, &imx_ldb_ops);
return 0;
}
diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c
index 2a8d2e32e7b4..bc8c3f802a15 100644
--- a/drivers/gpu/drm/imx/imx-tve.c
+++ b/drivers/gpu/drm/imx/imx-tve.c
@@ -19,6 +19,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
@@ -99,9 +100,13 @@ enum {
TVE_MODE_VGA,
};
-struct imx_tve {
+struct imx_tve_encoder {
struct drm_connector connector;
struct drm_encoder encoder;
+ struct imx_tve *tve;
+};
+
+struct imx_tve {
struct device *dev;
int mode;
int di_hsync_pin;
@@ -118,12 +123,12 @@ struct imx_tve {
static inline struct imx_tve *con_to_tve(struct drm_connector *c)
{
- return container_of(c, struct imx_tve, connector);
+ return container_of(c, struct imx_tve_encoder, connector)->tve;
}
static inline struct imx_tve *enc_to_tve(struct drm_encoder *e)
{
- return container_of(e, struct imx_tve, encoder);
+ return container_of(e, struct imx_tve_encoder, encoder)->tve;
}
static void tve_enable(struct imx_tve *tve)
@@ -418,7 +423,7 @@ static int tve_clk_init(struct imx_tve *tve, void __iomem *base)
init.parent_names = (const char **)&tve_di_parent;
tve->clk_hw_di.init = &init;
- tve->di_clk = clk_register(tve->dev, &tve->clk_hw_di);
+ tve->di_clk = devm_clk_register(tve->dev, &tve->clk_hw_di);
if (IS_ERR(tve->di_clk)) {
dev_err(tve->dev, "failed to register TVE output clock: %ld\n",
PTR_ERR(tve->di_clk));
@@ -428,33 +433,6 @@ static int tve_clk_init(struct imx_tve *tve, void __iomem *base)
return 0;
}
-static int imx_tve_register(struct drm_device *drm, struct imx_tve *tve)
-{
- int encoder_type;
- int ret;
-
- encoder_type = tve->mode == TVE_MODE_VGA ?
- DRM_MODE_ENCODER_DAC : DRM_MODE_ENCODER_TVDAC;
-
- ret = imx_drm_encoder_parse_of(drm, &tve->encoder, tve->dev->of_node);
- if (ret)
- return ret;
-
- drm_encoder_helper_add(&tve->encoder, &imx_tve_encoder_helper_funcs);
- drm_simple_encoder_init(drm, &tve->encoder, encoder_type);
-
- drm_connector_helper_add(&tve->connector,
- &imx_tve_connector_helper_funcs);
- drm_connector_init_with_ddc(drm, &tve->connector,
- &imx_tve_connector_funcs,
- DRM_MODE_CONNECTOR_VGA,
- tve->ddc);
-
- drm_connector_attach_encoder(&tve->connector, &tve->encoder);
-
- return 0;
-}
-
static void imx_tve_disable_regulator(void *data)
{
struct imx_tve *tve = data;
@@ -502,8 +480,49 @@ static int of_get_tve_mode(struct device_node *np)
static int imx_tve_bind(struct device *dev, struct device *master, void *data)
{
- struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = data;
+ struct imx_tve *tve = dev_get_drvdata(dev);
+ struct imx_tve_encoder *tvee;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ int encoder_type;
+ int ret;
+
+ encoder_type = tve->mode == TVE_MODE_VGA ?
+ DRM_MODE_ENCODER_DAC : DRM_MODE_ENCODER_TVDAC;
+
+ tvee = drmm_simple_encoder_alloc(drm, struct imx_tve_encoder, encoder,
+ encoder_type);
+ if (IS_ERR(tvee))
+ return PTR_ERR(tvee);
+
+ tvee->tve = tve;
+ encoder = &tvee->encoder;
+ connector = &tvee->connector;
+
+ ret = imx_drm_encoder_parse_of(drm, encoder, tve->dev->of_node);
+ if (ret)
+ return ret;
+
+ drm_encoder_helper_add(encoder, &imx_tve_encoder_helper_funcs);
+
+ drm_connector_helper_add(connector, &imx_tve_connector_helper_funcs);
+ ret = drm_connector_init_with_ddc(drm, connector,
+ &imx_tve_connector_funcs,
+ DRM_MODE_CONNECTOR_VGA, tve->ddc);
+ if (ret)
+ return ret;
+
+ return drm_connector_attach_encoder(connector, encoder);
+}
+
+static const struct component_ops imx_tve_ops = {
+ .bind = imx_tve_bind,
+};
+
+static int imx_tve_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct device_node *ddc_node;
struct imx_tve *tve;
@@ -513,8 +532,9 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data)
int irq;
int ret;
- tve = dev_get_drvdata(dev);
- memset(tve, 0, sizeof(*tve));
+ tve = devm_kzalloc(dev, sizeof(*tve), GFP_KERNEL);
+ if (!tve)
+ return -ENOMEM;
tve->dev = dev;
@@ -621,28 +641,9 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data)
if (ret)
return ret;
- ret = imx_tve_register(drm, tve);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static const struct component_ops imx_tve_ops = {
- .bind = imx_tve_bind,
-};
-
-static int imx_tve_probe(struct platform_device *pdev)
-{
- struct imx_tve *tve;
-
- tve = devm_kzalloc(&pdev->dev, sizeof(*tve), GFP_KERNEL);
- if (!tve)
- return -ENOMEM;
-
platform_set_drvdata(pdev, tve);
- return component_add(&pdev->dev, &imx_tve_ops);
+ return component_add(dev, &imx_tve_ops);
}
static int imx_tve_remove(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index 7ebd99ee3240..e6431a227feb 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -20,6 +20,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -163,7 +164,6 @@ static void ipu_disable_vblank(struct drm_crtc *crtc)
static const struct drm_crtc_funcs ipu_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
- .destroy = drm_crtc_cleanup,
.page_flip = drm_atomic_helper_page_flip,
.reset = imx_drm_crtc_reset,
.atomic_duplicate_state = imx_drm_crtc_duplicate_state,
@@ -322,73 +322,73 @@ static const struct drm_crtc_helper_funcs ipu_helper_funcs = {
.atomic_enable = ipu_crtc_atomic_enable,
};
-static void ipu_put_resources(struct ipu_crtc *ipu_crtc)
+static void ipu_put_resources(struct drm_device *dev, void *ptr)
{
+ struct ipu_crtc *ipu_crtc = ptr;
+
if (!IS_ERR_OR_NULL(ipu_crtc->dc))
ipu_dc_put(ipu_crtc->dc);
if (!IS_ERR_OR_NULL(ipu_crtc->di))
ipu_di_put(ipu_crtc->di);
}
-static int ipu_get_resources(struct ipu_crtc *ipu_crtc,
- struct ipu_client_platformdata *pdata)
+static int ipu_get_resources(struct drm_device *dev, struct ipu_crtc *ipu_crtc,
+ struct ipu_client_platformdata *pdata)
{
struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent);
int ret;
ipu_crtc->dc = ipu_dc_get(ipu, pdata->dc);
- if (IS_ERR(ipu_crtc->dc)) {
- ret = PTR_ERR(ipu_crtc->dc);
- goto err_out;
- }
+ if (IS_ERR(ipu_crtc->dc))
+ return PTR_ERR(ipu_crtc->dc);
+
+ ret = drmm_add_action_or_reset(dev, ipu_put_resources, ipu_crtc);
+ if (ret)
+ return ret;
ipu_crtc->di = ipu_di_get(ipu, pdata->di);
- if (IS_ERR(ipu_crtc->di)) {
- ret = PTR_ERR(ipu_crtc->di);
- goto err_out;
- }
+ if (IS_ERR(ipu_crtc->di))
+ return PTR_ERR(ipu_crtc->di);
return 0;
-err_out:
- ipu_put_resources(ipu_crtc);
-
- return ret;
}
-static int ipu_crtc_init(struct ipu_crtc *ipu_crtc,
- struct ipu_client_platformdata *pdata, struct drm_device *drm)
+static int ipu_drm_bind(struct device *dev, struct device *master, void *data)
{
- struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent);
- struct drm_crtc *crtc = &ipu_crtc->base;
+ struct ipu_client_platformdata *pdata = dev->platform_data;
+ struct ipu_soc *ipu = dev_get_drvdata(dev->parent);
+ struct drm_device *drm = data;
+ struct ipu_plane *primary_plane;
+ struct ipu_crtc *ipu_crtc;
+ struct drm_crtc *crtc;
int dp = -EINVAL;
int ret;
- ret = ipu_get_resources(ipu_crtc, pdata);
- if (ret) {
- dev_err(ipu_crtc->dev, "getting resources failed with %d.\n",
- ret);
- return ret;
- }
-
if (pdata->dp >= 0)
dp = IPU_DP_FLOW_SYNC_BG;
- ipu_crtc->plane[0] = ipu_plane_init(drm, ipu, pdata->dma[0], dp, 0,
- DRM_PLANE_TYPE_PRIMARY);
- if (IS_ERR(ipu_crtc->plane[0])) {
- ret = PTR_ERR(ipu_crtc->plane[0]);
- goto err_put_resources;
- }
+ primary_plane = ipu_plane_init(drm, ipu, pdata->dma[0], dp, 0,
+ DRM_PLANE_TYPE_PRIMARY);
+ if (IS_ERR(primary_plane))
+ return PTR_ERR(primary_plane);
+
+ ipu_crtc = drmm_crtc_alloc_with_planes(drm, struct ipu_crtc, base,
+ &primary_plane->base, NULL,
+ &ipu_crtc_funcs, NULL);
+ if (IS_ERR(ipu_crtc))
+ return PTR_ERR(ipu_crtc);
+ ipu_crtc->dev = dev;
+ ipu_crtc->plane[0] = primary_plane;
+
+ crtc = &ipu_crtc->base;
crtc->port = pdata->of_node;
drm_crtc_helper_add(crtc, &ipu_helper_funcs);
- drm_crtc_init_with_planes(drm, crtc, &ipu_crtc->plane[0]->base, NULL,
- &ipu_crtc_funcs, NULL);
- ret = ipu_plane_get_resources(ipu_crtc->plane[0]);
+ ret = ipu_get_resources(drm, ipu_crtc, pdata);
if (ret) {
- dev_err(ipu_crtc->dev, "getting plane 0 resources failed with %d.\n",
+ dev_err(ipu_crtc->dev, "getting resources failed with %d.\n",
ret);
- goto err_put_resources;
+ return ret;
}
/* If this crtc is using the DP, add an overlay plane */
@@ -397,16 +397,8 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc,
IPU_DP_FLOW_SYNC_FG,
drm_crtc_mask(&ipu_crtc->base),
DRM_PLANE_TYPE_OVERLAY);
- if (IS_ERR(ipu_crtc->plane[1])) {
+ if (IS_ERR(ipu_crtc->plane[1]))
ipu_crtc->plane[1] = NULL;
- } else {
- ret = ipu_plane_get_resources(ipu_crtc->plane[1]);
- if (ret) {
- dev_err(ipu_crtc->dev, "getting plane 1 "
- "resources failed with %d.\n", ret);
- goto err_put_plane0_res;
- }
- }
}
ipu_crtc->irq = ipu_plane_irq(ipu_crtc->plane[0]);
@@ -414,58 +406,21 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc,
"imx_drm", ipu_crtc);
if (ret < 0) {
dev_err(ipu_crtc->dev, "irq request failed with %d.\n", ret);
- goto err_put_plane1_res;
+ return ret;
}
/* Only enable IRQ when we actually need it to trigger work. */
disable_irq(ipu_crtc->irq);
return 0;
-
-err_put_plane1_res:
- if (ipu_crtc->plane[1])
- ipu_plane_put_resources(ipu_crtc->plane[1]);
-err_put_plane0_res:
- ipu_plane_put_resources(ipu_crtc->plane[0]);
-err_put_resources:
- ipu_put_resources(ipu_crtc);
-
- return ret;
-}
-
-static int ipu_drm_bind(struct device *dev, struct device *master, void *data)
-{
- struct ipu_client_platformdata *pdata = dev->platform_data;
- struct drm_device *drm = data;
- struct ipu_crtc *ipu_crtc;
-
- ipu_crtc = dev_get_drvdata(dev);
- memset(ipu_crtc, 0, sizeof(*ipu_crtc));
-
- ipu_crtc->dev = dev;
-
- return ipu_crtc_init(ipu_crtc, pdata, drm);
-}
-
-static void ipu_drm_unbind(struct device *dev, struct device *master,
- void *data)
-{
- struct ipu_crtc *ipu_crtc = dev_get_drvdata(dev);
-
- ipu_put_resources(ipu_crtc);
- if (ipu_crtc->plane[1])
- ipu_plane_put_resources(ipu_crtc->plane[1]);
- ipu_plane_put_resources(ipu_crtc->plane[0]);
}
static const struct component_ops ipu_crtc_ops = {
.bind = ipu_drm_bind,
- .unbind = ipu_drm_unbind,
};
static int ipu_drm_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct ipu_crtc *ipu_crtc;
int ret;
if (!dev->platform_data)
@@ -475,12 +430,6 @@ static int ipu_drm_probe(struct platform_device *pdev)
if (ret)
return ret;
- ipu_crtc = devm_kzalloc(dev, sizeof(*ipu_crtc), GFP_KERNEL);
- if (!ipu_crtc)
- return -ENOMEM;
-
- dev_set_drvdata(dev, ipu_crtc);
-
return component_add(dev, &ipu_crtc_ops);
}
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 8a4235d9d9f1..075508051b5f 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -11,6 +11,7 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_managed.h>
#include <drm/drm_plane_helper.h>
#include <video/imx-ipu-v3.h>
@@ -142,8 +143,10 @@ drm_plane_state_to_vbo(struct drm_plane_state *state)
fb->format->cpp[2] * x - eba;
}
-void ipu_plane_put_resources(struct ipu_plane *ipu_plane)
+static void ipu_plane_put_resources(struct drm_device *dev, void *ptr)
{
+ struct ipu_plane *ipu_plane = ptr;
+
if (!IS_ERR_OR_NULL(ipu_plane->dp))
ipu_dp_put(ipu_plane->dp);
if (!IS_ERR_OR_NULL(ipu_plane->dmfc))
@@ -154,7 +157,8 @@ void ipu_plane_put_resources(struct ipu_plane *ipu_plane)
ipu_idmac_put(ipu_plane->alpha_ch);
}
-int ipu_plane_get_resources(struct ipu_plane *ipu_plane)
+static int ipu_plane_get_resources(struct drm_device *dev,
+ struct ipu_plane *ipu_plane)
{
int ret;
int alpha_ch;
@@ -166,6 +170,10 @@ int ipu_plane_get_resources(struct ipu_plane *ipu_plane)
return ret;
}
+ ret = drmm_add_action_or_reset(dev, ipu_plane_put_resources, ipu_plane);
+ if (ret)
+ return ret;
+
alpha_ch = ipu_channel_alpha_channel(ipu_plane->dma);
if (alpha_ch >= 0) {
ipu_plane->alpha_ch = ipu_idmac_get(ipu_plane->ipu, alpha_ch);
@@ -181,7 +189,7 @@ int ipu_plane_get_resources(struct ipu_plane *ipu_plane)
if (IS_ERR(ipu_plane->dmfc)) {
ret = PTR_ERR(ipu_plane->dmfc);
DRM_ERROR("failed to get dmfc: ret %d\n", ret);
- goto err_out;
+ return ret;
}
if (ipu_plane->dp_flow >= 0) {
@@ -189,15 +197,11 @@ int ipu_plane_get_resources(struct ipu_plane *ipu_plane)
if (IS_ERR(ipu_plane->dp)) {
ret = PTR_ERR(ipu_plane->dp);
DRM_ERROR("failed to get dp flow: %d\n", ret);
- goto err_out;
+ return ret;
}
}
return 0;
-err_out:
- ipu_plane_put_resources(ipu_plane);
-
- return ret;
}
static bool ipu_plane_separate_alpha(struct ipu_plane *ipu_plane)
@@ -262,16 +266,6 @@ void ipu_plane_disable_deferred(struct drm_plane *plane)
}
EXPORT_SYMBOL_GPL(ipu_plane_disable_deferred);
-static void ipu_plane_destroy(struct drm_plane *plane)
-{
- struct ipu_plane *ipu_plane = to_ipu_plane(plane);
-
- DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
-
- drm_plane_cleanup(plane);
- kfree(ipu_plane);
-}
-
static void ipu_plane_state_reset(struct drm_plane *plane)
{
unsigned int zpos = (plane->type == DRM_PLANE_TYPE_PRIMARY) ? 0 : 1;
@@ -336,7 +330,6 @@ static bool ipu_plane_format_mod_supported(struct drm_plane *plane,
static const struct drm_plane_funcs ipu_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = ipu_plane_destroy,
.reset = ipu_plane_state_reset,
.atomic_duplicate_state = ipu_plane_duplicate_state,
.atomic_destroy_state = ipu_plane_destroy_state,
@@ -834,10 +827,15 @@ struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
DRM_DEBUG_KMS("channel %d, dp flow %d, possible_crtcs=0x%x\n",
dma, dp, possible_crtcs);
- ipu_plane = kzalloc(sizeof(*ipu_plane), GFP_KERNEL);
- if (!ipu_plane) {
- DRM_ERROR("failed to allocate plane\n");
- return ERR_PTR(-ENOMEM);
+ ipu_plane = drmm_universal_plane_alloc(dev, struct ipu_plane, base,
+ possible_crtcs, &ipu_plane_funcs,
+ ipu_plane_formats,
+ ARRAY_SIZE(ipu_plane_formats),
+ modifiers, type, NULL);
+ if (IS_ERR(ipu_plane)) {
+ DRM_ERROR("failed to allocate and initialize %s plane\n",
+ zpos ? "overlay" : "primary");
+ return ipu_plane;
}
ipu_plane->ipu = ipu;
@@ -847,22 +845,23 @@ struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
if (ipu_prg_present(ipu))
modifiers = pre_format_modifiers;
- ret = drm_universal_plane_init(dev, &ipu_plane->base, possible_crtcs,
- &ipu_plane_funcs, ipu_plane_formats,
- ARRAY_SIZE(ipu_plane_formats),
- modifiers, type, NULL);
- if (ret) {
- DRM_ERROR("failed to initialize plane\n");
- kfree(ipu_plane);
- return ERR_PTR(ret);
- }
-
drm_plane_helper_add(&ipu_plane->base, &ipu_plane_helper_funcs);
if (dp == IPU_DP_FLOW_SYNC_BG || dp == IPU_DP_FLOW_SYNC_FG)
- drm_plane_create_zpos_property(&ipu_plane->base, zpos, 0, 1);
+ ret = drm_plane_create_zpos_property(&ipu_plane->base, zpos, 0,
+ 1);
else
- drm_plane_create_zpos_immutable_property(&ipu_plane->base, 0);
+ ret = drm_plane_create_zpos_immutable_property(&ipu_plane->base,
+ 0);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = ipu_plane_get_resources(dev, ipu_plane);
+ if (ret) {
+ DRM_ERROR("failed to get %s plane resources: %pe\n",
+ zpos ? "overlay" : "primary", &ret);
+ return ERR_PTR(ret);
+ }
return ipu_plane;
}
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.h b/drivers/gpu/drm/imx/ipuv3-plane.h
index ffacbcdd2f98..6d544e6ce63f 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.h
+++ b/drivers/gpu/drm/imx/ipuv3-plane.h
@@ -41,9 +41,6 @@ int ipu_plane_mode_set(struct ipu_plane *plane, struct drm_crtc *crtc,
uint32_t src_x, uint32_t src_y, uint32_t src_w,
uint32_t src_h, bool interlaced);
-int ipu_plane_get_resources(struct ipu_plane *plane);
-void ipu_plane_put_resources(struct ipu_plane *plane);
-
int ipu_plane_irq(struct ipu_plane *plane);
void ipu_plane_disable(struct ipu_plane *ipu_plane, bool disable_dp_channel);
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index 2eb8df4697df..e0412e694fd9 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -15,6 +15,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_managed.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
@@ -22,10 +23,14 @@
#include "imx-drm.h"
-struct imx_parallel_display {
+struct imx_parallel_display_encoder {
struct drm_connector connector;
struct drm_encoder encoder;
struct drm_bridge bridge;
+ struct imx_parallel_display *pd;
+};
+
+struct imx_parallel_display {
struct device *dev;
void *edid;
u32 bus_format;
@@ -37,12 +42,12 @@ struct imx_parallel_display {
static inline struct imx_parallel_display *con_to_imxpd(struct drm_connector *c)
{
- return container_of(c, struct imx_parallel_display, connector);
+ return container_of(c, struct imx_parallel_display_encoder, connector)->pd;
}
static inline struct imx_parallel_display *bridge_to_imxpd(struct drm_bridge *b)
{
- return container_of(b, struct imx_parallel_display, bridge);
+ return container_of(b, struct imx_parallel_display_encoder, bridge)->pd;
}
static int imx_pd_connector_get_modes(struct drm_connector *connector)
@@ -74,7 +79,7 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector)
return ret;
drm_mode_copy(mode, &imxpd->mode);
- mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+ mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
drm_mode_probed_add(connector, mode);
num_modes++;
}
@@ -253,12 +258,26 @@ static const struct drm_bridge_funcs imx_pd_bridge_funcs = {
.atomic_get_output_bus_fmts = imx_pd_bridge_atomic_get_output_bus_fmts,
};
-static int imx_pd_register(struct drm_device *drm,
- struct imx_parallel_display *imxpd)
+static int imx_pd_bind(struct device *dev, struct device *master, void *data)
{
- struct drm_encoder *encoder = &imxpd->encoder;
+ struct drm_device *drm = data;
+ struct imx_parallel_display *imxpd = dev_get_drvdata(dev);
+ struct imx_parallel_display_encoder *imxpd_encoder;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+ struct drm_bridge *bridge;
int ret;
+ imxpd_encoder = drmm_simple_encoder_alloc(drm, struct imx_parallel_display_encoder,
+ encoder, DRM_MODE_ENCODER_NONE);
+ if (IS_ERR(imxpd_encoder))
+ return PTR_ERR(imxpd_encoder);
+
+ imxpd_encoder->pd = imxpd;
+ connector = &imxpd_encoder->connector;
+ encoder = &imxpd_encoder->encoder;
+ bridge = &imxpd_encoder->bridge;
+
ret = imx_drm_encoder_parse_of(drm, encoder, imxpd->dev->of_node);
if (ret)
return ret;
@@ -268,39 +287,37 @@ static int imx_pd_register(struct drm_device *drm,
* immediately since the current state is ON
* at this point.
*/
- imxpd->connector.dpms = DRM_MODE_DPMS_OFF;
-
- drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_NONE);
-
- imxpd->bridge.funcs = &imx_pd_bridge_funcs;
- drm_bridge_attach(encoder, &imxpd->bridge, NULL, 0);
+ connector->dpms = DRM_MODE_DPMS_OFF;
- if (!imxpd->next_bridge) {
- drm_connector_helper_add(&imxpd->connector,
- &imx_pd_connector_helper_funcs);
- drm_connector_init(drm, &imxpd->connector,
- &imx_pd_connector_funcs,
- DRM_MODE_CONNECTOR_DPI);
- }
+ bridge->funcs = &imx_pd_bridge_funcs;
+ drm_bridge_attach(encoder, bridge, NULL, 0);
if (imxpd->next_bridge) {
- ret = drm_bridge_attach(encoder, imxpd->next_bridge,
- &imxpd->bridge, 0);
+ ret = drm_bridge_attach(encoder, imxpd->next_bridge, bridge, 0);
if (ret < 0) {
dev_err(imxpd->dev, "failed to attach bridge: %d\n",
ret);
return ret;
}
} else {
- drm_connector_attach_encoder(&imxpd->connector, encoder);
+ drm_connector_helper_add(connector,
+ &imx_pd_connector_helper_funcs);
+ drm_connector_init(drm, connector, &imx_pd_connector_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+
+ drm_connector_attach_encoder(connector, encoder);
}
return 0;
}
-static int imx_pd_bind(struct device *dev, struct device *master, void *data)
+static const struct component_ops imx_pd_ops = {
+ .bind = imx_pd_bind,
+};
+
+static int imx_pd_probe(struct platform_device *pdev)
{
- struct drm_device *drm = data;
+ struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
const u8 *edidp;
struct imx_parallel_display *imxpd;
@@ -309,8 +326,9 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
u32 bus_format = 0;
const char *fmt;
- imxpd = dev_get_drvdata(dev);
- memset(imxpd, 0, sizeof(*imxpd));
+ imxpd = devm_kzalloc(dev, sizeof(*imxpd), GFP_KERNEL);
+ if (!imxpd)
+ return -ENOMEM;
/* port@1 is the output port */
ret = drm_of_find_panel_or_bridge(np, 1, 0, &imxpd->panel,
@@ -337,28 +355,9 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
imxpd->dev = dev;
- ret = imx_pd_register(drm, imxpd);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static const struct component_ops imx_pd_ops = {
- .bind = imx_pd_bind,
-};
-
-static int imx_pd_probe(struct platform_device *pdev)
-{
- struct imx_parallel_display *imxpd;
-
- imxpd = devm_kzalloc(&pdev->dev, sizeof(*imxpd), GFP_KERNEL);
- if (!imxpd)
- return -ENOMEM;
-
platform_set_drvdata(pdev, imxpd);
- return component_add(&pdev->dev, &imx_pd_ops);
+ return component_add(dev, &imx_pd_ops);
}
static int imx_pd_remove(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/ingenic/Kconfig b/drivers/gpu/drm/ingenic/Kconfig
index 477d5387e43e..3b57f8be007c 100644
--- a/drivers/gpu/drm/ingenic/Kconfig
+++ b/drivers/gpu/drm/ingenic/Kconfig
@@ -4,6 +4,7 @@ config DRM_INGENIC
depends on DRM
depends on CMA
depends on OF
+ depends on COMMON_CLK
select DRM_BRIDGE
select DRM_PANEL_BRIDGE
select DRM_KMS_HELPER
diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
index 368bfef8b340..7bb31fbee29d 100644
--- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
+++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
@@ -14,6 +14,7 @@
#include <linux/of_device.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
+#include <linux/pm.h>
#include <linux/regmap.h>
#include <drm/drm_atomic.h>
@@ -190,15 +191,15 @@ static void ingenic_drm_crtc_update_timings(struct ingenic_drm *priv,
{
unsigned int vpe, vds, vde, vt, hpe, hds, hde, ht;
- vpe = mode->vsync_end - mode->vsync_start;
- vds = mode->vtotal - mode->vsync_start;
- vde = vds + mode->vdisplay;
- vt = vde + mode->vsync_start - mode->vdisplay;
+ vpe = mode->crtc_vsync_end - mode->crtc_vsync_start;
+ vds = mode->crtc_vtotal - mode->crtc_vsync_start;
+ vde = vds + mode->crtc_vdisplay;
+ vt = vde + mode->crtc_vsync_start - mode->crtc_vdisplay;
- hpe = mode->hsync_end - mode->hsync_start;
- hds = mode->htotal - mode->hsync_start;
- hde = hds + mode->hdisplay;
- ht = hde + mode->hsync_start - mode->hdisplay;
+ hpe = mode->crtc_hsync_end - mode->crtc_hsync_start;
+ hds = mode->crtc_htotal - mode->crtc_hsync_start;
+ hde = hds + mode->crtc_hdisplay;
+ ht = hde + mode->crtc_hsync_start - mode->crtc_hdisplay;
regmap_write(priv->map, JZ_REG_LCD_VSYNC,
0 << JZ_LCD_VSYNC_VPS_OFFSET |
@@ -333,7 +334,7 @@ static void ingenic_drm_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_pending_vblank_event *event = crtc_state->event;
if (drm_atomic_crtc_needs_modeset(crtc_state)) {
- ingenic_drm_crtc_update_timings(priv, &crtc_state->mode);
+ ingenic_drm_crtc_update_timings(priv, &crtc_state->adjusted_mode);
priv->update_clk_rate = true;
}
@@ -589,7 +590,7 @@ static void ingenic_drm_encoder_atomic_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode = &crtc_state->adjusted_mode;
struct drm_connector *conn = conn_state->connector;
struct drm_display_info *info = &conn->display_info;
- unsigned int cfg;
+ unsigned int cfg, rgbcfg = 0;
priv->panel_is_sharp = info->bus_flags & DRM_BUS_FLAG_SHARP_SIGNALS;
@@ -626,6 +627,9 @@ static void ingenic_drm_encoder_atomic_mode_set(struct drm_encoder *encoder,
case MEDIA_BUS_FMT_RGB888_1X24:
cfg |= JZ_LCD_CFG_MODE_GENERIC_24BIT;
break;
+ case MEDIA_BUS_FMT_RGB888_3X8_DELTA:
+ rgbcfg = JZ_LCD_RGBC_EVEN_GBR | JZ_LCD_RGBC_ODD_RGB;
+ fallthrough;
case MEDIA_BUS_FMT_RGB888_3X8:
cfg |= JZ_LCD_CFG_MODE_8BIT_SERIAL;
break;
@@ -636,6 +640,7 @@ static void ingenic_drm_encoder_atomic_mode_set(struct drm_encoder *encoder,
}
regmap_write(priv->map, JZ_REG_LCD_CFG, cfg);
+ regmap_write(priv->map, JZ_REG_LCD_RGBC, rgbcfg);
}
static int ingenic_drm_encoder_atomic_check(struct drm_encoder *encoder,
@@ -643,6 +648,7 @@ static int ingenic_drm_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_connector_state *conn_state)
{
struct drm_display_info *info = &conn_state->connector->display_info;
+ struct drm_display_mode *mode = &crtc_state->adjusted_mode;
if (info->num_bus_formats != 1)
return -EINVAL;
@@ -651,10 +657,23 @@ static int ingenic_drm_encoder_atomic_check(struct drm_encoder *encoder,
return 0;
switch (*info->bus_formats) {
+ case MEDIA_BUS_FMT_RGB888_3X8:
+ case MEDIA_BUS_FMT_RGB888_3X8_DELTA:
+ /*
+ * The LCD controller expects timing values in dot-clock ticks,
+ * which is 3x the timing values in pixels when using a 3x8-bit
+ * display; but it will count the display area size in pixels
+ * either way. Go figure.
+ */
+ mode->crtc_clock = mode->clock * 3;
+ mode->crtc_hsync_start = mode->hsync_start * 3 - mode->hdisplay * 2;
+ mode->crtc_hsync_end = mode->hsync_end * 3 - mode->hdisplay * 2;
+ mode->crtc_hdisplay = mode->hdisplay;
+ mode->crtc_htotal = mode->htotal * 3 - mode->hdisplay * 2;
+ return 0;
case MEDIA_BUS_FMT_RGB565_1X16:
case MEDIA_BUS_FMT_RGB666_1X18:
case MEDIA_BUS_FMT_RGB888_1X24:
- case MEDIA_BUS_FMT_RGB888_3X8:
return 0;
default:
return -EINVAL;
@@ -755,8 +774,6 @@ static const struct drm_crtc_funcs ingenic_drm_crtc_funcs = {
.enable_vblank = ingenic_drm_enable_vblank,
.disable_vblank = ingenic_drm_disable_vblank,
-
- .gamma_set = drm_atomic_helper_legacy_gamma_set,
};
static const struct drm_plane_helper_funcs ingenic_drm_plane_helper_funcs = {
@@ -1167,6 +1184,22 @@ static int ingenic_drm_remove(struct platform_device *pdev)
return 0;
}
+static int __maybe_unused ingenic_drm_suspend(struct device *dev)
+{
+ struct ingenic_drm *priv = dev_get_drvdata(dev);
+
+ return drm_mode_config_helper_suspend(&priv->drm);
+}
+
+static int __maybe_unused ingenic_drm_resume(struct device *dev)
+{
+ struct ingenic_drm *priv = dev_get_drvdata(dev);
+
+ return drm_mode_config_helper_resume(&priv->drm);
+}
+
+static SIMPLE_DEV_PM_OPS(ingenic_drm_pm_ops, ingenic_drm_suspend, ingenic_drm_resume);
+
static const u32 jz4740_formats[] = {
DRM_FORMAT_XRGB1555,
DRM_FORMAT_RGB565,
@@ -1246,6 +1279,7 @@ MODULE_DEVICE_TABLE(of, ingenic_drm_of_match);
static struct platform_driver ingenic_drm_driver = {
.driver = {
.name = "ingenic-drm",
+ .pm = pm_ptr(&ingenic_drm_pm_ops),
.of_match_table = of_match_ptr(ingenic_drm_of_match),
},
.probe = ingenic_drm_probe,
diff --git a/drivers/gpu/drm/ingenic/ingenic-drm.h b/drivers/gpu/drm/ingenic/ingenic-drm.h
index 9b48ce02803d..1b4347f7f084 100644
--- a/drivers/gpu/drm/ingenic/ingenic-drm.h
+++ b/drivers/gpu/drm/ingenic/ingenic-drm.h
@@ -31,6 +31,7 @@
#define JZ_REG_LCD_SA1 0x54
#define JZ_REG_LCD_FID1 0x58
#define JZ_REG_LCD_CMD1 0x5C
+#define JZ_REG_LCD_RGBC 0x90
#define JZ_REG_LCD_OSDC 0x100
#define JZ_REG_LCD_OSDCTRL 0x104
#define JZ_REG_LCD_OSDS 0x108
@@ -138,6 +139,19 @@
#define JZ_LCD_STATE_SOF_IRQ BIT(4)
#define JZ_LCD_STATE_DISABLED BIT(0)
+#define JZ_LCD_RGBC_ODD_RGB (0x0 << 4)
+#define JZ_LCD_RGBC_ODD_RBG (0x1 << 4)
+#define JZ_LCD_RGBC_ODD_GRB (0x2 << 4)
+#define JZ_LCD_RGBC_ODD_GBR (0x3 << 4)
+#define JZ_LCD_RGBC_ODD_BRG (0x4 << 4)
+#define JZ_LCD_RGBC_ODD_BGR (0x5 << 4)
+#define JZ_LCD_RGBC_EVEN_RGB (0x0 << 0)
+#define JZ_LCD_RGBC_EVEN_RBG (0x1 << 0)
+#define JZ_LCD_RGBC_EVEN_GRB (0x2 << 0)
+#define JZ_LCD_RGBC_EVEN_GBR (0x3 << 0)
+#define JZ_LCD_RGBC_EVEN_BRG (0x4 << 0)
+#define JZ_LCD_RGBC_EVEN_BGR (0x5 << 0)
+
#define JZ_LCD_OSDC_OSDEN BIT(0)
#define JZ_LCD_OSDC_F0EN BIT(3)
#define JZ_LCD_OSDC_F1EN BIT(4)
diff --git a/drivers/gpu/drm/kmb/kmb_drv.c b/drivers/gpu/drm/kmb/kmb_drv.c
index a31a840ce634..f64e06e1067d 100644
--- a/drivers/gpu/drm/kmb/kmb_drv.c
+++ b/drivers/gpu/drm/kmb/kmb_drv.c
@@ -400,7 +400,7 @@ static void kmb_irq_reset(struct drm_device *drm)
DEFINE_DRM_GEM_CMA_FOPS(fops);
-static struct drm_driver kmb_driver = {
+static const struct drm_driver kmb_driver = {
.driver_features = DRIVER_GEM |
DRIVER_MODESET | DRIVER_ATOMIC,
.irq_handler = kmb_isr,
@@ -556,7 +556,7 @@ MODULE_DEVICE_TABLE(of, kmb_of_match);
static int __maybe_unused kmb_pm_suspend(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
- struct kmb_drm_private *kmb = drm ? to_kmb(drm) : NULL;
+ struct kmb_drm_private *kmb = to_kmb(drm);
drm_kms_helper_poll_disable(drm);
diff --git a/drivers/gpu/drm/kmb/kmb_plane.c b/drivers/gpu/drm/kmb/kmb_plane.c
index 8448d1edb553..be8eea3830c1 100644
--- a/drivers/gpu/drm/kmb/kmb_plane.c
+++ b/drivers/gpu/drm/kmb/kmb_plane.c
@@ -114,6 +114,9 @@ static void kmb_plane_atomic_disable(struct drm_plane *plane,
kmb = to_kmb(plane->dev);
+ if (WARN_ON(plane_id >= KMB_MAX_PLANES))
+ return;
+
switch (plane_id) {
case LAYER_0:
kmb->plane_status[plane_id].ctrl = LCD_CTRL_VL1_ENABLE;
diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c
index 63b4c5643f9c..5cc20b403a25 100644
--- a/drivers/gpu/drm/lima/lima_sched.c
+++ b/drivers/gpu/drm/lima/lima_sched.c
@@ -201,7 +201,7 @@ static int lima_pm_busy(struct lima_device *ldev)
int ret;
/* resume GPU if it has been suspended by runtime PM */
- ret = pm_runtime_get_sync(ldev->dev);
+ ret = pm_runtime_resume_and_get(ldev->dev);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/mediatek/Makefile b/drivers/gpu/drm/mediatek/Makefile
index a892edec5563..dc54a7a69005 100644
--- a/drivers/gpu/drm/mediatek/Makefile
+++ b/drivers/gpu/drm/mediatek/Makefile
@@ -1,10 +1,11 @@
# SPDX-License-Identifier: GPL-2.0
-mediatek-drm-y := mtk_disp_color.o \
+mediatek-drm-y := mtk_disp_ccorr.o \
+ mtk_disp_color.o \
+ mtk_disp_gamma.o \
mtk_disp_ovl.o \
mtk_disp_rdma.o \
mtk_drm_crtc.o \
- mtk_drm_ddp.o \
mtk_drm_ddp_comp.o \
mtk_drm_drv.o \
mtk_drm_gem.o \
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c b/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c
new file mode 100644
index 000000000000..141cb36b9c07
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c
@@ -0,0 +1,223 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/soc/mediatek/mtk-cmdq.h>
+
+#include "mtk_disp_drv.h"
+#include "mtk_drm_crtc.h"
+#include "mtk_drm_ddp_comp.h"
+
+#define DISP_CCORR_EN 0x0000
+#define CCORR_EN BIT(0)
+#define DISP_CCORR_CFG 0x0020
+#define CCORR_RELAY_MODE BIT(0)
+#define CCORR_ENGINE_EN BIT(1)
+#define CCORR_GAMMA_OFF BIT(2)
+#define CCORR_WGAMUT_SRC_CLIP BIT(3)
+#define DISP_CCORR_SIZE 0x0030
+#define DISP_CCORR_COEF_0 0x0080
+#define DISP_CCORR_COEF_1 0x0084
+#define DISP_CCORR_COEF_2 0x0088
+#define DISP_CCORR_COEF_3 0x008C
+#define DISP_CCORR_COEF_4 0x0090
+
+struct mtk_disp_ccorr_data {
+ u32 matrix_bits;
+};
+
+/**
+ * struct mtk_disp_ccorr - DISP_CCORR driver structure
+ * @ddp_comp - structure containing type enum and hardware resources
+ * @crtc - associated crtc to report irq events to
+ */
+struct mtk_disp_ccorr {
+ struct clk *clk;
+ void __iomem *regs;
+ struct cmdq_client_reg cmdq_reg;
+ const struct mtk_disp_ccorr_data *data;
+};
+
+int mtk_ccorr_clk_enable(struct device *dev)
+{
+ struct mtk_disp_ccorr *ccorr = dev_get_drvdata(dev);
+
+ return clk_prepare_enable(ccorr->clk);
+}
+
+void mtk_ccorr_clk_disable(struct device *dev)
+{
+ struct mtk_disp_ccorr *ccorr = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(ccorr->clk);
+}
+
+void mtk_ccorr_config(struct device *dev, unsigned int w,
+ unsigned int h, unsigned int vrefresh,
+ unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
+{
+ struct mtk_disp_ccorr *ccorr = dev_get_drvdata(dev);
+
+ mtk_ddp_write(cmdq_pkt, w << 16 | h, &ccorr->cmdq_reg, ccorr->regs,
+ DISP_CCORR_SIZE);
+ mtk_ddp_write(cmdq_pkt, CCORR_ENGINE_EN, &ccorr->cmdq_reg, ccorr->regs,
+ DISP_CCORR_CFG);
+}
+
+void mtk_ccorr_start(struct device *dev)
+{
+ struct mtk_disp_ccorr *ccorr = dev_get_drvdata(dev);
+
+ writel(CCORR_EN, ccorr->regs + DISP_CCORR_EN);
+}
+
+void mtk_ccorr_stop(struct device *dev)
+{
+ struct mtk_disp_ccorr *ccorr = dev_get_drvdata(dev);
+
+ writel_relaxed(0x0, ccorr->regs + DISP_CCORR_EN);
+}
+
+/* Converts a DRM S31.32 value to the HW S1.n format. */
+static u16 mtk_ctm_s31_32_to_s1_n(u64 in, u32 n)
+{
+ u16 r;
+
+ /* Sign bit. */
+ r = in & BIT_ULL(63) ? BIT(n + 1) : 0;
+
+ if ((in & GENMASK_ULL(62, 33)) > 0) {
+ /* identity value 0x100000000 -> 0x400(mt8183), */
+ /* identity value 0x100000000 -> 0x800(mt8192), */
+ /* if bigger this, set it to max 0x7ff. */
+ r |= GENMASK(n, 0);
+ } else {
+ /* take the n+1 most important bits. */
+ r |= (in >> (32 - n)) & GENMASK(n, 0);
+ }
+
+ return r;
+}
+
+void mtk_ccorr_ctm_set(struct device *dev, struct drm_crtc_state *state)
+{
+ struct mtk_disp_ccorr *ccorr = dev_get_drvdata(dev);
+ struct drm_property_blob *blob = state->ctm;
+ struct drm_color_ctm *ctm;
+ const u64 *input;
+ uint16_t coeffs[9] = { 0 };
+ int i;
+ struct cmdq_pkt *cmdq_pkt = NULL;
+ u32 matrix_bits = ccorr->data->matrix_bits;
+
+ if (!blob)
+ return;
+
+ ctm = (struct drm_color_ctm *)blob->data;
+ input = ctm->matrix;
+
+ for (i = 0; i < ARRAY_SIZE(coeffs); i++)
+ coeffs[i] = mtk_ctm_s31_32_to_s1_n(input[i], matrix_bits);
+
+ mtk_ddp_write(cmdq_pkt, coeffs[0] << 16 | coeffs[1],
+ &ccorr->cmdq_reg, ccorr->regs, DISP_CCORR_COEF_0);
+ mtk_ddp_write(cmdq_pkt, coeffs[2] << 16 | coeffs[3],
+ &ccorr->cmdq_reg, ccorr->regs, DISP_CCORR_COEF_1);
+ mtk_ddp_write(cmdq_pkt, coeffs[4] << 16 | coeffs[5],
+ &ccorr->cmdq_reg, ccorr->regs, DISP_CCORR_COEF_2);
+ mtk_ddp_write(cmdq_pkt, coeffs[6] << 16 | coeffs[7],
+ &ccorr->cmdq_reg, ccorr->regs, DISP_CCORR_COEF_3);
+ mtk_ddp_write(cmdq_pkt, coeffs[8] << 16,
+ &ccorr->cmdq_reg, ccorr->regs, DISP_CCORR_COEF_4);
+}
+
+static int mtk_disp_ccorr_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ return 0;
+}
+
+static void mtk_disp_ccorr_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+}
+
+static const struct component_ops mtk_disp_ccorr_component_ops = {
+ .bind = mtk_disp_ccorr_bind,
+ .unbind = mtk_disp_ccorr_unbind,
+};
+
+static int mtk_disp_ccorr_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mtk_disp_ccorr *priv;
+ struct resource *res;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(dev, "failed to get ccorr clk\n");
+ return PTR_ERR(priv->clk);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->regs)) {
+ dev_err(dev, "failed to ioremap ccorr\n");
+ return PTR_ERR(priv->regs);
+ }
+
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ ret = cmdq_dev_get_client_reg(dev, &priv->cmdq_reg, 0);
+ if (ret)
+ dev_dbg(dev, "get mediatek,gce-client-reg fail!\n");
+#endif
+
+ priv->data = of_device_get_match_data(dev);
+ platform_set_drvdata(pdev, priv);
+
+ ret = component_add(dev, &mtk_disp_ccorr_component_ops);
+ if (ret)
+ dev_err(dev, "Failed to add component: %d\n", ret);
+
+ return ret;
+}
+
+static int mtk_disp_ccorr_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &mtk_disp_ccorr_component_ops);
+
+ return 0;
+}
+
+static const struct mtk_disp_ccorr_data mt8183_ccorr_driver_data = {
+ .matrix_bits = 10,
+};
+
+static const struct of_device_id mtk_disp_ccorr_driver_dt_match[] = {
+ { .compatible = "mediatek,mt8183-disp-ccorr",
+ .data = &mt8183_ccorr_driver_data},
+ {},
+};
+MODULE_DEVICE_TABLE(of, mtk_disp_ccorr_driver_dt_match);
+
+struct platform_driver mtk_disp_ccorr_driver = {
+ .probe = mtk_disp_ccorr_probe,
+ .remove = mtk_disp_ccorr_remove,
+ .driver = {
+ .name = "mediatek-disp-ccorr",
+ .owner = THIS_MODULE,
+ .of_match_table = mtk_disp_ccorr_driver_dt_match,
+ },
+};
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_color.c b/drivers/gpu/drm/mediatek/mtk_disp_color.c
index 6048cbc9f0ec..63f411ab393b 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_color.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_color.c
@@ -11,6 +11,7 @@
#include <linux/platform_device.h>
#include <linux/soc/mediatek/mtk-cmdq.h>
+#include "mtk_disp_drv.h"
#include "mtk_drm_crtc.h"
#include "mtk_drm_ddp_comp.h"
@@ -36,64 +37,55 @@ struct mtk_disp_color_data {
* @data: platform colour driver data
*/
struct mtk_disp_color {
- struct mtk_ddp_comp ddp_comp;
struct drm_crtc *crtc;
+ struct clk *clk;
+ void __iomem *regs;
+ struct cmdq_client_reg cmdq_reg;
const struct mtk_disp_color_data *data;
};
-static inline struct mtk_disp_color *comp_to_color(struct mtk_ddp_comp *comp)
+int mtk_color_clk_enable(struct device *dev)
{
- return container_of(comp, struct mtk_disp_color, ddp_comp);
+ struct mtk_disp_color *color = dev_get_drvdata(dev);
+
+ return clk_prepare_enable(color->clk);
}
-static void mtk_color_config(struct mtk_ddp_comp *comp, unsigned int w,
- unsigned int h, unsigned int vrefresh,
- unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
+void mtk_color_clk_disable(struct device *dev)
{
- struct mtk_disp_color *color = comp_to_color(comp);
+ struct mtk_disp_color *color = dev_get_drvdata(dev);
- mtk_ddp_write(cmdq_pkt, w, comp, DISP_COLOR_WIDTH(color));
- mtk_ddp_write(cmdq_pkt, h, comp, DISP_COLOR_HEIGHT(color));
+ clk_disable_unprepare(color->clk);
}
-static void mtk_color_start(struct mtk_ddp_comp *comp)
+void mtk_color_config(struct device *dev, unsigned int w,
+ unsigned int h, unsigned int vrefresh,
+ unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
{
- struct mtk_disp_color *color = comp_to_color(comp);
+ struct mtk_disp_color *color = dev_get_drvdata(dev);
- writel(COLOR_BYPASS_ALL | COLOR_SEQ_SEL,
- comp->regs + DISP_COLOR_CFG_MAIN);
- writel(0x1, comp->regs + DISP_COLOR_START(color));
+ mtk_ddp_write(cmdq_pkt, w, &color->cmdq_reg, color->regs, DISP_COLOR_WIDTH(color));
+ mtk_ddp_write(cmdq_pkt, h, &color->cmdq_reg, color->regs, DISP_COLOR_HEIGHT(color));
}
-static const struct mtk_ddp_comp_funcs mtk_disp_color_funcs = {
- .config = mtk_color_config,
- .start = mtk_color_start,
-};
+void mtk_color_start(struct device *dev)
+{
+ struct mtk_disp_color *color = dev_get_drvdata(dev);
+
+ writel(COLOR_BYPASS_ALL | COLOR_SEQ_SEL,
+ color->regs + DISP_COLOR_CFG_MAIN);
+ writel(0x1, color->regs + DISP_COLOR_START(color));
+}
static int mtk_disp_color_bind(struct device *dev, struct device *master,
void *data)
{
- struct mtk_disp_color *priv = dev_get_drvdata(dev);
- struct drm_device *drm_dev = data;
- int ret;
-
- ret = mtk_ddp_comp_register(drm_dev, &priv->ddp_comp);
- if (ret < 0) {
- dev_err(dev, "Failed to register component %pOF: %d\n",
- dev->of_node, ret);
- return ret;
- }
-
return 0;
}
static void mtk_disp_color_unbind(struct device *dev, struct device *master,
void *data)
{
- struct mtk_disp_color *priv = dev_get_drvdata(dev);
- struct drm_device *drm_dev = data;
-
- mtk_ddp_comp_unregister(drm_dev, &priv->ddp_comp);
}
static const struct component_ops mtk_disp_color_component_ops = {
@@ -105,31 +97,32 @@ static int mtk_disp_color_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mtk_disp_color *priv;
- int comp_id;
+ struct resource *res;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DISP_COLOR);
- if (comp_id < 0) {
- dev_err(dev, "Failed to identify by alias: %d\n", comp_id);
- return comp_id;
+ priv->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(dev, "failed to get color clk\n");
+ return PTR_ERR(priv->clk);
}
- ret = mtk_ddp_comp_init(dev, dev->of_node, &priv->ddp_comp, comp_id,
- &mtk_disp_color_funcs);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to initialize component: %d\n",
- ret);
-
- return ret;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->regs)) {
+ dev_err(dev, "failed to ioremap color\n");
+ return PTR_ERR(priv->regs);
}
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ ret = cmdq_dev_get_client_reg(dev, &priv->cmdq_reg, 0);
+ if (ret)
+ dev_dbg(dev, "get mediatek,gce-client-reg fail!\n");
+#endif
priv->data = of_device_get_match_data(dev);
-
platform_set_drvdata(pdev, priv);
ret = component_add(dev, &mtk_disp_color_component_ops);
@@ -141,8 +134,6 @@ static int mtk_disp_color_probe(struct platform_device *pdev)
static int mtk_disp_color_remove(struct platform_device *pdev)
{
- component_del(&pdev->dev, &mtk_disp_color_component_ops);
-
return 0;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_drv.h b/drivers/gpu/drm/mediatek/mtk_disp_drv.h
new file mode 100644
index 000000000000..cafd9df2d63b
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_disp_drv.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020 MediaTek Inc.
+ */
+
+#ifndef _MTK_DISP_DRV_H_
+#define _MTK_DISP_DRV_H_
+
+#include <linux/soc/mediatek/mtk-cmdq.h>
+#include "mtk_drm_plane.h"
+
+void mtk_ccorr_ctm_set(struct device *dev, struct drm_crtc_state *state);
+int mtk_ccorr_clk_enable(struct device *dev);
+void mtk_ccorr_clk_disable(struct device *dev);
+void mtk_ccorr_config(struct device *dev, unsigned int w,
+ unsigned int h, unsigned int vrefresh,
+ unsigned int bpc, struct cmdq_pkt *cmdq_pkt);
+void mtk_ccorr_start(struct device *dev);
+void mtk_ccorr_stop(struct device *dev);
+
+void mtk_color_bypass_shadow(struct device *dev);
+int mtk_color_clk_enable(struct device *dev);
+void mtk_color_clk_disable(struct device *dev);
+void mtk_color_config(struct device *dev, unsigned int w,
+ unsigned int h, unsigned int vrefresh,
+ unsigned int bpc, struct cmdq_pkt *cmdq_pkt);
+void mtk_color_start(struct device *dev);
+
+void mtk_dither_set_common(void __iomem *regs, struct cmdq_client_reg *cmdq_reg,
+ unsigned int bpc, unsigned int cfg,
+ unsigned int dither_en, struct cmdq_pkt *cmdq_pkt);
+
+void mtk_dpi_start(struct device *dev);
+void mtk_dpi_stop(struct device *dev);
+
+void mtk_dsi_ddp_start(struct device *dev);
+void mtk_dsi_ddp_stop(struct device *dev);
+
+int mtk_gamma_clk_enable(struct device *dev);
+void mtk_gamma_clk_disable(struct device *dev);
+void mtk_gamma_config(struct device *dev, unsigned int w,
+ unsigned int h, unsigned int vrefresh,
+ unsigned int bpc, struct cmdq_pkt *cmdq_pkt);
+void mtk_gamma_set(struct device *dev, struct drm_crtc_state *state);
+void mtk_gamma_set_common(void __iomem *regs, struct drm_crtc_state *state);
+void mtk_gamma_start(struct device *dev);
+void mtk_gamma_stop(struct device *dev);
+
+void mtk_ovl_bgclr_in_on(struct device *dev);
+void mtk_ovl_bgclr_in_off(struct device *dev);
+void mtk_ovl_bypass_shadow(struct device *dev);
+int mtk_ovl_clk_enable(struct device *dev);
+void mtk_ovl_clk_disable(struct device *dev);
+void mtk_ovl_config(struct device *dev, unsigned int w,
+ unsigned int h, unsigned int vrefresh,
+ unsigned int bpc, struct cmdq_pkt *cmdq_pkt);
+int mtk_ovl_layer_check(struct device *dev, unsigned int idx,
+ struct mtk_plane_state *mtk_state);
+void mtk_ovl_layer_config(struct device *dev, unsigned int idx,
+ struct mtk_plane_state *state,
+ struct cmdq_pkt *cmdq_pkt);
+unsigned int mtk_ovl_layer_nr(struct device *dev);
+void mtk_ovl_layer_on(struct device *dev, unsigned int idx,
+ struct cmdq_pkt *cmdq_pkt);
+void mtk_ovl_layer_off(struct device *dev, unsigned int idx,
+ struct cmdq_pkt *cmdq_pkt);
+void mtk_ovl_start(struct device *dev);
+void mtk_ovl_stop(struct device *dev);
+unsigned int mtk_ovl_supported_rotations(struct device *dev);
+void mtk_ovl_enable_vblank(struct device *dev,
+ void (*vblank_cb)(void *),
+ void *vblank_cb_data);
+void mtk_ovl_disable_vblank(struct device *dev);
+
+void mtk_rdma_bypass_shadow(struct device *dev);
+int mtk_rdma_clk_enable(struct device *dev);
+void mtk_rdma_clk_disable(struct device *dev);
+void mtk_rdma_config(struct device *dev, unsigned int width,
+ unsigned int height, unsigned int vrefresh,
+ unsigned int bpc, struct cmdq_pkt *cmdq_pkt);
+unsigned int mtk_rdma_layer_nr(struct device *dev);
+void mtk_rdma_layer_config(struct device *dev, unsigned int idx,
+ struct mtk_plane_state *state,
+ struct cmdq_pkt *cmdq_pkt);
+void mtk_rdma_start(struct device *dev);
+void mtk_rdma_stop(struct device *dev);
+void mtk_rdma_enable_vblank(struct device *dev,
+ void (*vblank_cb)(void *),
+ void *vblank_cb_data);
+void mtk_rdma_disable_vblank(struct device *dev);
+
+#endif
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_gamma.c b/drivers/gpu/drm/mediatek/mtk_disp_gamma.c
new file mode 100644
index 000000000000..3ebf91e0ab41
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_disp_gamma.c
@@ -0,0 +1,197 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/soc/mediatek/mtk-cmdq.h>
+
+#include "mtk_disp_drv.h"
+#include "mtk_drm_crtc.h"
+#include "mtk_drm_ddp_comp.h"
+
+#define DISP_GAMMA_EN 0x0000
+#define GAMMA_EN BIT(0)
+#define DISP_GAMMA_CFG 0x0020
+#define GAMMA_LUT_EN BIT(1)
+#define GAMMA_DITHERING BIT(2)
+#define DISP_GAMMA_SIZE 0x0030
+#define DISP_GAMMA_LUT 0x0700
+
+#define LUT_10BIT_MASK 0x03ff
+
+struct mtk_disp_gamma_data {
+ bool has_dither;
+};
+
+/**
+ * struct mtk_disp_gamma - DISP_GAMMA driver structure
+ * @ddp_comp - structure containing type enum and hardware resources
+ * @crtc - associated crtc to report irq events to
+ */
+struct mtk_disp_gamma {
+ struct clk *clk;
+ void __iomem *regs;
+ struct cmdq_client_reg cmdq_reg;
+ const struct mtk_disp_gamma_data *data;
+};
+
+int mtk_gamma_clk_enable(struct device *dev)
+{
+ struct mtk_disp_gamma *gamma = dev_get_drvdata(dev);
+
+ return clk_prepare_enable(gamma->clk);
+}
+
+void mtk_gamma_clk_disable(struct device *dev)
+{
+ struct mtk_disp_gamma *gamma = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(gamma->clk);
+}
+
+void mtk_gamma_set_common(void __iomem *regs, struct drm_crtc_state *state)
+{
+ unsigned int i, reg;
+ struct drm_color_lut *lut;
+ void __iomem *lut_base;
+ u32 word;
+
+ if (state->gamma_lut) {
+ reg = readl(regs + DISP_GAMMA_CFG);
+ reg = reg | GAMMA_LUT_EN;
+ writel(reg, regs + DISP_GAMMA_CFG);
+ lut_base = regs + DISP_GAMMA_LUT;
+ lut = (struct drm_color_lut *)state->gamma_lut->data;
+ for (i = 0; i < MTK_LUT_SIZE; i++) {
+ word = (((lut[i].red >> 6) & LUT_10BIT_MASK) << 20) +
+ (((lut[i].green >> 6) & LUT_10BIT_MASK) << 10) +
+ ((lut[i].blue >> 6) & LUT_10BIT_MASK);
+ writel(word, (lut_base + i * 4));
+ }
+ }
+}
+
+void mtk_gamma_set(struct device *dev, struct drm_crtc_state *state)
+{
+ struct mtk_disp_gamma *gamma = dev_get_drvdata(dev);
+
+ mtk_gamma_set_common(gamma->regs, state);
+}
+
+void mtk_gamma_config(struct device *dev, unsigned int w,
+ unsigned int h, unsigned int vrefresh,
+ unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
+{
+ struct mtk_disp_gamma *gamma = dev_get_drvdata(dev);
+
+ mtk_ddp_write(cmdq_pkt, h << 16 | w, &gamma->cmdq_reg, gamma->regs,
+ DISP_GAMMA_SIZE);
+ if (gamma->data && gamma->data->has_dither)
+ mtk_dither_set_common(gamma->regs, &gamma->cmdq_reg, bpc,
+ DISP_GAMMA_CFG, GAMMA_DITHERING, cmdq_pkt);
+}
+
+void mtk_gamma_start(struct device *dev)
+{
+ struct mtk_disp_gamma *gamma = dev_get_drvdata(dev);
+
+ writel(GAMMA_EN, gamma->regs + DISP_GAMMA_EN);
+}
+
+void mtk_gamma_stop(struct device *dev)
+{
+ struct mtk_disp_gamma *gamma = dev_get_drvdata(dev);
+
+ writel_relaxed(0x0, gamma->regs + DISP_GAMMA_EN);
+}
+
+static int mtk_disp_gamma_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ return 0;
+}
+
+static void mtk_disp_gamma_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+}
+
+static const struct component_ops mtk_disp_gamma_component_ops = {
+ .bind = mtk_disp_gamma_bind,
+ .unbind = mtk_disp_gamma_unbind,
+};
+
+static int mtk_disp_gamma_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mtk_disp_gamma *priv;
+ struct resource *res;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(dev, "failed to get gamma clk\n");
+ return PTR_ERR(priv->clk);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->regs)) {
+ dev_err(dev, "failed to ioremap gamma\n");
+ return PTR_ERR(priv->regs);
+ }
+
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ ret = cmdq_dev_get_client_reg(dev, &priv->cmdq_reg, 0);
+ if (ret)
+ dev_dbg(dev, "get mediatek,gce-client-reg fail!\n");
+#endif
+
+ priv->data = of_device_get_match_data(dev);
+ platform_set_drvdata(pdev, priv);
+
+ ret = component_add(dev, &mtk_disp_gamma_component_ops);
+ if (ret)
+ dev_err(dev, "Failed to add component: %d\n", ret);
+
+ return ret;
+}
+
+static int mtk_disp_gamma_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &mtk_disp_gamma_component_ops);
+
+ return 0;
+}
+
+static const struct mtk_disp_gamma_data mt8173_gamma_driver_data = {
+ .has_dither = true,
+};
+
+static const struct of_device_id mtk_disp_gamma_driver_dt_match[] = {
+ { .compatible = "mediatek,mt8173-disp-gamma",
+ .data = &mt8173_gamma_driver_data},
+ { .compatible = "mediatek,mt8183-disp-gamma"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, mtk_disp_gamma_driver_dt_match);
+
+struct platform_driver mtk_disp_gamma_driver = {
+ .probe = mtk_disp_gamma_probe,
+ .remove = mtk_disp_gamma_remove,
+ .driver = {
+ .name = "mediatek-disp-gamma",
+ .owner = THIS_MODULE,
+ .of_match_table = mtk_disp_gamma_driver_dt_match,
+ },
+};
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
index 74ef6fc0528b..961f87f8d4d1 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
@@ -13,6 +13,7 @@
#include <linux/platform_device.h>
#include <linux/soc/mediatek/mtk-cmdq.h>
+#include "mtk_disp_drv.h"
#include "mtk_drm_crtc.h"
#include "mtk_drm_ddp_comp.h"
@@ -23,6 +24,7 @@
#define DISP_REG_OVL_RST 0x0014
#define DISP_REG_OVL_ROI_SIZE 0x0020
#define DISP_REG_OVL_DATAPATH_CON 0x0024
+#define OVL_LAYER_SMI_ID_EN BIT(0)
#define OVL_BGCLR_SEL_IN BIT(2)
#define DISP_REG_OVL_ROI_BGCLR 0x0028
#define DISP_REG_OVL_SRC_CON 0x002c
@@ -61,6 +63,7 @@ struct mtk_disp_ovl_data {
unsigned int gmc_bits;
unsigned int layer_nr;
bool fmt_rgb565_is_0;
+ bool smi_id_en;
};
/**
@@ -70,88 +73,124 @@ struct mtk_disp_ovl_data {
* @data: platform data
*/
struct mtk_disp_ovl {
- struct mtk_ddp_comp ddp_comp;
struct drm_crtc *crtc;
+ struct clk *clk;
+ void __iomem *regs;
+ struct cmdq_client_reg cmdq_reg;
const struct mtk_disp_ovl_data *data;
+ void (*vblank_cb)(void *data);
+ void *vblank_cb_data;
};
-static inline struct mtk_disp_ovl *comp_to_ovl(struct mtk_ddp_comp *comp)
-{
- return container_of(comp, struct mtk_disp_ovl, ddp_comp);
-}
-
static irqreturn_t mtk_disp_ovl_irq_handler(int irq, void *dev_id)
{
struct mtk_disp_ovl *priv = dev_id;
- struct mtk_ddp_comp *ovl = &priv->ddp_comp;
/* Clear frame completion interrupt */
- writel(0x0, ovl->regs + DISP_REG_OVL_INTSTA);
+ writel(0x0, priv->regs + DISP_REG_OVL_INTSTA);
- if (!priv->crtc)
+ if (!priv->vblank_cb)
return IRQ_NONE;
- mtk_crtc_ddp_irq(priv->crtc, ovl);
+ priv->vblank_cb(priv->vblank_cb_data);
return IRQ_HANDLED;
}
-static void mtk_ovl_enable_vblank(struct mtk_ddp_comp *comp,
- struct drm_crtc *crtc)
+void mtk_ovl_enable_vblank(struct device *dev,
+ void (*vblank_cb)(void *),
+ void *vblank_cb_data)
+{
+ struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
+
+ ovl->vblank_cb = vblank_cb;
+ ovl->vblank_cb_data = vblank_cb_data;
+ writel(0x0, ovl->regs + DISP_REG_OVL_INTSTA);
+ writel_relaxed(OVL_FME_CPL_INT, ovl->regs + DISP_REG_OVL_INTEN);
+}
+
+void mtk_ovl_disable_vblank(struct device *dev)
+{
+ struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
+
+ ovl->vblank_cb = NULL;
+ ovl->vblank_cb_data = NULL;
+ writel_relaxed(0x0, ovl->regs + DISP_REG_OVL_INTEN);
+}
+
+int mtk_ovl_clk_enable(struct device *dev)
{
- struct mtk_disp_ovl *ovl = comp_to_ovl(comp);
+ struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
- ovl->crtc = crtc;
- writel(0x0, comp->regs + DISP_REG_OVL_INTSTA);
- writel_relaxed(OVL_FME_CPL_INT, comp->regs + DISP_REG_OVL_INTEN);
+ return clk_prepare_enable(ovl->clk);
}
-static void mtk_ovl_disable_vblank(struct mtk_ddp_comp *comp)
+void mtk_ovl_clk_disable(struct device *dev)
{
- struct mtk_disp_ovl *ovl = comp_to_ovl(comp);
+ struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
- ovl->crtc = NULL;
- writel_relaxed(0x0, comp->regs + DISP_REG_OVL_INTEN);
+ clk_disable_unprepare(ovl->clk);
}
-static void mtk_ovl_start(struct mtk_ddp_comp *comp)
+void mtk_ovl_start(struct device *dev)
{
- writel_relaxed(0x1, comp->regs + DISP_REG_OVL_EN);
+ struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
+
+ if (ovl->data->smi_id_en) {
+ unsigned int reg;
+
+ reg = readl(ovl->regs + DISP_REG_OVL_DATAPATH_CON);
+ reg = reg | OVL_LAYER_SMI_ID_EN;
+ writel_relaxed(reg, ovl->regs + DISP_REG_OVL_DATAPATH_CON);
+ }
+ writel_relaxed(0x1, ovl->regs + DISP_REG_OVL_EN);
}
-static void mtk_ovl_stop(struct mtk_ddp_comp *comp)
+void mtk_ovl_stop(struct device *dev)
{
- writel_relaxed(0x0, comp->regs + DISP_REG_OVL_EN);
+ struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
+
+ writel_relaxed(0x0, ovl->regs + DISP_REG_OVL_EN);
+ if (ovl->data->smi_id_en) {
+ unsigned int reg;
+
+ reg = readl(ovl->regs + DISP_REG_OVL_DATAPATH_CON);
+ reg = reg & ~OVL_LAYER_SMI_ID_EN;
+ writel_relaxed(reg, ovl->regs + DISP_REG_OVL_DATAPATH_CON);
+ }
+
}
-static void mtk_ovl_config(struct mtk_ddp_comp *comp, unsigned int w,
- unsigned int h, unsigned int vrefresh,
- unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
+void mtk_ovl_config(struct device *dev, unsigned int w,
+ unsigned int h, unsigned int vrefresh,
+ unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
{
+ struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
+
if (w != 0 && h != 0)
- mtk_ddp_write_relaxed(cmdq_pkt, h << 16 | w, comp,
+ mtk_ddp_write_relaxed(cmdq_pkt, h << 16 | w, &ovl->cmdq_reg, ovl->regs,
DISP_REG_OVL_ROI_SIZE);
- mtk_ddp_write_relaxed(cmdq_pkt, 0x0, comp, DISP_REG_OVL_ROI_BGCLR);
+ mtk_ddp_write_relaxed(cmdq_pkt, 0x0, &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_ROI_BGCLR);
- mtk_ddp_write(cmdq_pkt, 0x1, comp, DISP_REG_OVL_RST);
- mtk_ddp_write(cmdq_pkt, 0x0, comp, DISP_REG_OVL_RST);
+ mtk_ddp_write(cmdq_pkt, 0x1, &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_RST);
+ mtk_ddp_write(cmdq_pkt, 0x0, &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_RST);
}
-static unsigned int mtk_ovl_layer_nr(struct mtk_ddp_comp *comp)
+unsigned int mtk_ovl_layer_nr(struct device *dev)
{
- struct mtk_disp_ovl *ovl = comp_to_ovl(comp);
+ struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
return ovl->data->layer_nr;
}
-static unsigned int mtk_ovl_supported_rotations(struct mtk_ddp_comp *comp)
+unsigned int mtk_ovl_supported_rotations(struct device *dev)
{
return DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y;
}
-static int mtk_ovl_layer_check(struct mtk_ddp_comp *comp, unsigned int idx,
- struct mtk_plane_state *mtk_state)
+int mtk_ovl_layer_check(struct device *dev, unsigned int idx,
+ struct mtk_plane_state *mtk_state)
{
struct drm_plane_state *state = &mtk_state->base;
unsigned int rotation = 0;
@@ -178,15 +217,15 @@ static int mtk_ovl_layer_check(struct mtk_ddp_comp *comp, unsigned int idx,
return 0;
}
-static void mtk_ovl_layer_on(struct mtk_ddp_comp *comp, unsigned int idx,
- struct cmdq_pkt *cmdq_pkt)
+void mtk_ovl_layer_on(struct device *dev, unsigned int idx,
+ struct cmdq_pkt *cmdq_pkt)
{
unsigned int gmc_thrshd_l;
unsigned int gmc_thrshd_h;
unsigned int gmc_value;
- struct mtk_disp_ovl *ovl = comp_to_ovl(comp);
+ struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
- mtk_ddp_write(cmdq_pkt, 0x1, comp,
+ mtk_ddp_write(cmdq_pkt, 0x1, &ovl->cmdq_reg, ovl->regs,
DISP_REG_OVL_RDMA_CTRL(idx));
gmc_thrshd_l = GMC_THRESHOLD_LOW >>
(GMC_THRESHOLD_BITS - ovl->data->gmc_bits);
@@ -198,17 +237,19 @@ static void mtk_ovl_layer_on(struct mtk_ddp_comp *comp, unsigned int idx,
gmc_value = gmc_thrshd_l | gmc_thrshd_l << 8 |
gmc_thrshd_h << 16 | gmc_thrshd_h << 24;
mtk_ddp_write(cmdq_pkt, gmc_value,
- comp, DISP_REG_OVL_RDMA_GMC(idx));
- mtk_ddp_write_mask(cmdq_pkt, BIT(idx), comp,
+ &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_RDMA_GMC(idx));
+ mtk_ddp_write_mask(cmdq_pkt, BIT(idx), &ovl->cmdq_reg, ovl->regs,
DISP_REG_OVL_SRC_CON, BIT(idx));
}
-static void mtk_ovl_layer_off(struct mtk_ddp_comp *comp, unsigned int idx,
- struct cmdq_pkt *cmdq_pkt)
+void mtk_ovl_layer_off(struct device *dev, unsigned int idx,
+ struct cmdq_pkt *cmdq_pkt)
{
- mtk_ddp_write_mask(cmdq_pkt, 0, comp,
+ struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
+
+ mtk_ddp_write_mask(cmdq_pkt, 0, &ovl->cmdq_reg, ovl->regs,
DISP_REG_OVL_SRC_CON, BIT(idx));
- mtk_ddp_write(cmdq_pkt, 0, comp,
+ mtk_ddp_write(cmdq_pkt, 0, &ovl->cmdq_reg, ovl->regs,
DISP_REG_OVL_RDMA_CTRL(idx));
}
@@ -248,11 +289,11 @@ static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt)
}
}
-static void mtk_ovl_layer_config(struct mtk_ddp_comp *comp, unsigned int idx,
- struct mtk_plane_state *state,
- struct cmdq_pkt *cmdq_pkt)
+void mtk_ovl_layer_config(struct device *dev, unsigned int idx,
+ struct mtk_plane_state *state,
+ struct cmdq_pkt *cmdq_pkt)
{
- struct mtk_disp_ovl *ovl = comp_to_ovl(comp);
+ struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
struct mtk_plane_pending_state *pending = &state->pending;
unsigned int addr = pending->addr;
unsigned int pitch = pending->pitch & 0xffff;
@@ -262,12 +303,12 @@ static void mtk_ovl_layer_config(struct mtk_ddp_comp *comp, unsigned int idx,
unsigned int con;
if (!pending->enable) {
- mtk_ovl_layer_off(comp, idx, cmdq_pkt);
+ mtk_ovl_layer_off(dev, idx, cmdq_pkt);
return;
}
con = ovl_fmt_convert(ovl, fmt);
- if (state->base.fb->format->has_alpha)
+ if (state->base.fb && state->base.fb->format->has_alpha)
con |= OVL_CON_AEN | OVL_CON_ALPHA;
if (pending->rotation & DRM_MODE_REFLECT_Y) {
@@ -280,76 +321,49 @@ static void mtk_ovl_layer_config(struct mtk_ddp_comp *comp, unsigned int idx,
addr += pending->pitch - 1;
}
- mtk_ddp_write_relaxed(cmdq_pkt, con, comp,
+ mtk_ddp_write_relaxed(cmdq_pkt, con, &ovl->cmdq_reg, ovl->regs,
DISP_REG_OVL_CON(idx));
- mtk_ddp_write_relaxed(cmdq_pkt, pitch, comp,
+ mtk_ddp_write_relaxed(cmdq_pkt, pitch, &ovl->cmdq_reg, ovl->regs,
DISP_REG_OVL_PITCH(idx));
- mtk_ddp_write_relaxed(cmdq_pkt, src_size, comp,
+ mtk_ddp_write_relaxed(cmdq_pkt, src_size, &ovl->cmdq_reg, ovl->regs,
DISP_REG_OVL_SRC_SIZE(idx));
- mtk_ddp_write_relaxed(cmdq_pkt, offset, comp,
+ mtk_ddp_write_relaxed(cmdq_pkt, offset, &ovl->cmdq_reg, ovl->regs,
DISP_REG_OVL_OFFSET(idx));
- mtk_ddp_write_relaxed(cmdq_pkt, addr, comp,
+ mtk_ddp_write_relaxed(cmdq_pkt, addr, &ovl->cmdq_reg, ovl->regs,
DISP_REG_OVL_ADDR(ovl, idx));
- mtk_ovl_layer_on(comp, idx, cmdq_pkt);
+ mtk_ovl_layer_on(dev, idx, cmdq_pkt);
}
-static void mtk_ovl_bgclr_in_on(struct mtk_ddp_comp *comp)
+void mtk_ovl_bgclr_in_on(struct device *dev)
{
+ struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
unsigned int reg;
- reg = readl(comp->regs + DISP_REG_OVL_DATAPATH_CON);
+ reg = readl(ovl->regs + DISP_REG_OVL_DATAPATH_CON);
reg = reg | OVL_BGCLR_SEL_IN;
- writel(reg, comp->regs + DISP_REG_OVL_DATAPATH_CON);
+ writel(reg, ovl->regs + DISP_REG_OVL_DATAPATH_CON);
}
-static void mtk_ovl_bgclr_in_off(struct mtk_ddp_comp *comp)
+void mtk_ovl_bgclr_in_off(struct device *dev)
{
+ struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
unsigned int reg;
- reg = readl(comp->regs + DISP_REG_OVL_DATAPATH_CON);
+ reg = readl(ovl->regs + DISP_REG_OVL_DATAPATH_CON);
reg = reg & ~OVL_BGCLR_SEL_IN;
- writel(reg, comp->regs + DISP_REG_OVL_DATAPATH_CON);
+ writel(reg, ovl->regs + DISP_REG_OVL_DATAPATH_CON);
}
-static const struct mtk_ddp_comp_funcs mtk_disp_ovl_funcs = {
- .config = mtk_ovl_config,
- .start = mtk_ovl_start,
- .stop = mtk_ovl_stop,
- .enable_vblank = mtk_ovl_enable_vblank,
- .disable_vblank = mtk_ovl_disable_vblank,
- .supported_rotations = mtk_ovl_supported_rotations,
- .layer_nr = mtk_ovl_layer_nr,
- .layer_check = mtk_ovl_layer_check,
- .layer_config = mtk_ovl_layer_config,
- .bgclr_in_on = mtk_ovl_bgclr_in_on,
- .bgclr_in_off = mtk_ovl_bgclr_in_off,
-};
-
static int mtk_disp_ovl_bind(struct device *dev, struct device *master,
void *data)
{
- struct mtk_disp_ovl *priv = dev_get_drvdata(dev);
- struct drm_device *drm_dev = data;
- int ret;
-
- ret = mtk_ddp_comp_register(drm_dev, &priv->ddp_comp);
- if (ret < 0) {
- dev_err(dev, "Failed to register component %pOF: %d\n",
- dev->of_node, ret);
- return ret;
- }
-
return 0;
}
static void mtk_disp_ovl_unbind(struct device *dev, struct device *master,
void *data)
{
- struct mtk_disp_ovl *priv = dev_get_drvdata(dev);
- struct drm_device *drm_dev = data;
-
- mtk_ddp_comp_unregister(drm_dev, &priv->ddp_comp);
}
static const struct component_ops mtk_disp_ovl_component_ops = {
@@ -361,7 +375,7 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mtk_disp_ovl *priv;
- int comp_id;
+ struct resource *res;
int irq;
int ret;
@@ -373,27 +387,25 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- priv->data = of_device_get_match_data(dev);
-
- comp_id = mtk_ddp_comp_get_id(dev->of_node,
- priv->data->layer_nr == 4 ?
- MTK_DISP_OVL :
- MTK_DISP_OVL_2L);
- if (comp_id < 0) {
- dev_err(dev, "Failed to identify by alias: %d\n", comp_id);
- return comp_id;
+ priv->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(dev, "failed to get ovl clk\n");
+ return PTR_ERR(priv->clk);
}
- ret = mtk_ddp_comp_init(dev, dev->of_node, &priv->ddp_comp, comp_id,
- &mtk_disp_ovl_funcs);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to initialize component: %d\n",
- ret);
-
- return ret;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->regs)) {
+ dev_err(dev, "failed to ioremap ovl\n");
+ return PTR_ERR(priv->regs);
}
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ ret = cmdq_dev_get_client_reg(dev, &priv->cmdq_reg, 0);
+ if (ret)
+ dev_dbg(dev, "get mediatek,gce-client-reg fail!\n");
+#endif
+ priv->data = of_device_get_match_data(dev);
platform_set_drvdata(pdev, priv);
ret = devm_request_irq(dev, irq, mtk_disp_ovl_irq_handler,
@@ -412,8 +424,6 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev)
static int mtk_disp_ovl_remove(struct platform_device *pdev)
{
- component_del(&pdev->dev, &mtk_disp_ovl_component_ops);
-
return 0;
}
@@ -431,11 +441,29 @@ static const struct mtk_disp_ovl_data mt8173_ovl_driver_data = {
.fmt_rgb565_is_0 = true,
};
+static const struct mtk_disp_ovl_data mt8183_ovl_driver_data = {
+ .addr = DISP_REG_OVL_ADDR_MT8173,
+ .gmc_bits = 10,
+ .layer_nr = 4,
+ .fmt_rgb565_is_0 = true,
+};
+
+static const struct mtk_disp_ovl_data mt8183_ovl_2l_driver_data = {
+ .addr = DISP_REG_OVL_ADDR_MT8173,
+ .gmc_bits = 10,
+ .layer_nr = 2,
+ .fmt_rgb565_is_0 = true,
+};
+
static const struct of_device_id mtk_disp_ovl_driver_dt_match[] = {
{ .compatible = "mediatek,mt2701-disp-ovl",
.data = &mt2701_ovl_driver_data},
{ .compatible = "mediatek,mt8173-disp-ovl",
.data = &mt8173_ovl_driver_data},
+ { .compatible = "mediatek,mt8183-disp-ovl",
+ .data = &mt8183_ovl_driver_data},
+ { .compatible = "mediatek,mt8183-disp-ovl-2l",
+ .data = &mt8183_ovl_2l_driver_data},
{},
};
MODULE_DEVICE_TABLE(of, mtk_disp_ovl_driver_dt_match);
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
index d46b8ae1d080..728aaadfea8c 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
@@ -11,6 +11,7 @@
#include <linux/platform_device.h>
#include <linux/soc/mediatek/mtk-cmdq.h>
+#include "mtk_disp_drv.h"
#include "mtk_drm_crtc.h"
#include "mtk_drm_ddp_comp.h"
@@ -61,83 +62,105 @@ struct mtk_disp_rdma_data {
* @data: local driver data
*/
struct mtk_disp_rdma {
- struct mtk_ddp_comp ddp_comp;
- struct drm_crtc *crtc;
+ struct clk *clk;
+ void __iomem *regs;
+ struct cmdq_client_reg cmdq_reg;
const struct mtk_disp_rdma_data *data;
+ void (*vblank_cb)(void *data);
+ void *vblank_cb_data;
+ u32 fifo_size;
};
-static inline struct mtk_disp_rdma *comp_to_rdma(struct mtk_ddp_comp *comp)
-{
- return container_of(comp, struct mtk_disp_rdma, ddp_comp);
-}
-
static irqreturn_t mtk_disp_rdma_irq_handler(int irq, void *dev_id)
{
struct mtk_disp_rdma *priv = dev_id;
- struct mtk_ddp_comp *rdma = &priv->ddp_comp;
/* Clear frame completion interrupt */
- writel(0x0, rdma->regs + DISP_REG_RDMA_INT_STATUS);
+ writel(0x0, priv->regs + DISP_REG_RDMA_INT_STATUS);
- if (!priv->crtc)
+ if (!priv->vblank_cb)
return IRQ_NONE;
- mtk_crtc_ddp_irq(priv->crtc, rdma);
+ priv->vblank_cb(priv->vblank_cb_data);
return IRQ_HANDLED;
}
-static void rdma_update_bits(struct mtk_ddp_comp *comp, unsigned int reg,
+static void rdma_update_bits(struct device *dev, unsigned int reg,
unsigned int mask, unsigned int val)
{
- unsigned int tmp = readl(comp->regs + reg);
+ struct mtk_disp_rdma *rdma = dev_get_drvdata(dev);
+ unsigned int tmp = readl(rdma->regs + reg);
tmp = (tmp & ~mask) | (val & mask);
- writel(tmp, comp->regs + reg);
+ writel(tmp, rdma->regs + reg);
}
-static void mtk_rdma_enable_vblank(struct mtk_ddp_comp *comp,
- struct drm_crtc *crtc)
+void mtk_rdma_enable_vblank(struct device *dev,
+ void (*vblank_cb)(void *),
+ void *vblank_cb_data)
{
- struct mtk_disp_rdma *rdma = comp_to_rdma(comp);
+ struct mtk_disp_rdma *rdma = dev_get_drvdata(dev);
- rdma->crtc = crtc;
- rdma_update_bits(comp, DISP_REG_RDMA_INT_ENABLE, RDMA_FRAME_END_INT,
+ rdma->vblank_cb = vblank_cb;
+ rdma->vblank_cb_data = vblank_cb_data;
+ rdma_update_bits(dev, DISP_REG_RDMA_INT_ENABLE, RDMA_FRAME_END_INT,
RDMA_FRAME_END_INT);
}
-static void mtk_rdma_disable_vblank(struct mtk_ddp_comp *comp)
+void mtk_rdma_disable_vblank(struct device *dev)
+{
+ struct mtk_disp_rdma *rdma = dev_get_drvdata(dev);
+
+ rdma->vblank_cb = NULL;
+ rdma->vblank_cb_data = NULL;
+ rdma_update_bits(dev, DISP_REG_RDMA_INT_ENABLE, RDMA_FRAME_END_INT, 0);
+}
+
+int mtk_rdma_clk_enable(struct device *dev)
{
- struct mtk_disp_rdma *rdma = comp_to_rdma(comp);
+ struct mtk_disp_rdma *rdma = dev_get_drvdata(dev);
- rdma->crtc = NULL;
- rdma_update_bits(comp, DISP_REG_RDMA_INT_ENABLE, RDMA_FRAME_END_INT, 0);
+ return clk_prepare_enable(rdma->clk);
}
-static void mtk_rdma_start(struct mtk_ddp_comp *comp)
+void mtk_rdma_clk_disable(struct device *dev)
{
- rdma_update_bits(comp, DISP_REG_RDMA_GLOBAL_CON, RDMA_ENGINE_EN,
+ struct mtk_disp_rdma *rdma = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(rdma->clk);
+}
+
+void mtk_rdma_start(struct device *dev)
+{
+ rdma_update_bits(dev, DISP_REG_RDMA_GLOBAL_CON, RDMA_ENGINE_EN,
RDMA_ENGINE_EN);
}
-static void mtk_rdma_stop(struct mtk_ddp_comp *comp)
+void mtk_rdma_stop(struct device *dev)
{
- rdma_update_bits(comp, DISP_REG_RDMA_GLOBAL_CON, RDMA_ENGINE_EN, 0);
+ rdma_update_bits(dev, DISP_REG_RDMA_GLOBAL_CON, RDMA_ENGINE_EN, 0);
}
-static void mtk_rdma_config(struct mtk_ddp_comp *comp, unsigned int width,
- unsigned int height, unsigned int vrefresh,
- unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
+void mtk_rdma_config(struct device *dev, unsigned int width,
+ unsigned int height, unsigned int vrefresh,
+ unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
{
unsigned int threshold;
unsigned int reg;
- struct mtk_disp_rdma *rdma = comp_to_rdma(comp);
+ struct mtk_disp_rdma *rdma = dev_get_drvdata(dev);
+ u32 rdma_fifo_size;
- mtk_ddp_write_mask(cmdq_pkt, width, comp,
+ mtk_ddp_write_mask(cmdq_pkt, width, &rdma->cmdq_reg, rdma->regs,
DISP_REG_RDMA_SIZE_CON_0, 0xfff);
- mtk_ddp_write_mask(cmdq_pkt, height, comp,
+ mtk_ddp_write_mask(cmdq_pkt, height, &rdma->cmdq_reg, rdma->regs,
DISP_REG_RDMA_SIZE_CON_1, 0xfffff);
+ if (rdma->fifo_size)
+ rdma_fifo_size = rdma->fifo_size;
+ else
+ rdma_fifo_size = RDMA_FIFO_SIZE(rdma);
+
/*
* Enable FIFO underflow since DSI and DPI can't be blocked.
* Keep the FIFO pseudo size reset default of 8 KiB. Set the
@@ -146,9 +169,9 @@ static void mtk_rdma_config(struct mtk_ddp_comp *comp, unsigned int width,
*/
threshold = width * height * vrefresh * 4 * 7 / 1000000;
reg = RDMA_FIFO_UNDERFLOW_EN |
- RDMA_FIFO_PSEUDO_SIZE(RDMA_FIFO_SIZE(rdma)) |
+ RDMA_FIFO_PSEUDO_SIZE(rdma_fifo_size) |
RDMA_OUTPUT_VALID_FIFO_THRESHOLD(threshold);
- mtk_ddp_write(cmdq_pkt, reg, comp, DISP_REG_RDMA_FIFO_CON);
+ mtk_ddp_write(cmdq_pkt, reg, &rdma->cmdq_reg, rdma->regs, DISP_REG_RDMA_FIFO_CON);
}
static unsigned int rdma_fmt_convert(struct mtk_disp_rdma *rdma,
@@ -188,16 +211,16 @@ static unsigned int rdma_fmt_convert(struct mtk_disp_rdma *rdma,
}
}
-static unsigned int mtk_rdma_layer_nr(struct mtk_ddp_comp *comp)
+unsigned int mtk_rdma_layer_nr(struct device *dev)
{
return 1;
}
-static void mtk_rdma_layer_config(struct mtk_ddp_comp *comp, unsigned int idx,
- struct mtk_plane_state *state,
- struct cmdq_pkt *cmdq_pkt)
+void mtk_rdma_layer_config(struct device *dev, unsigned int idx,
+ struct mtk_plane_state *state,
+ struct cmdq_pkt *cmdq_pkt)
{
- struct mtk_disp_rdma *rdma = comp_to_rdma(comp);
+ struct mtk_disp_rdma *rdma = dev_get_drvdata(dev);
struct mtk_plane_pending_state *pending = &state->pending;
unsigned int addr = pending->addr;
unsigned int pitch = pending->pitch & 0xffff;
@@ -205,53 +228,34 @@ static void mtk_rdma_layer_config(struct mtk_ddp_comp *comp, unsigned int idx,
unsigned int con;
con = rdma_fmt_convert(rdma, fmt);
- mtk_ddp_write_relaxed(cmdq_pkt, con, comp, DISP_RDMA_MEM_CON);
+ mtk_ddp_write_relaxed(cmdq_pkt, con, &rdma->cmdq_reg, rdma->regs, DISP_RDMA_MEM_CON);
if (fmt == DRM_FORMAT_UYVY || fmt == DRM_FORMAT_YUYV) {
- mtk_ddp_write_mask(cmdq_pkt, RDMA_MATRIX_ENABLE, comp,
+ mtk_ddp_write_mask(cmdq_pkt, RDMA_MATRIX_ENABLE, &rdma->cmdq_reg, rdma->regs,
DISP_REG_RDMA_SIZE_CON_0,
RDMA_MATRIX_ENABLE);
mtk_ddp_write_mask(cmdq_pkt, RDMA_MATRIX_INT_MTX_BT601_to_RGB,
- comp, DISP_REG_RDMA_SIZE_CON_0,
+ &rdma->cmdq_reg, rdma->regs, DISP_REG_RDMA_SIZE_CON_0,
RDMA_MATRIX_INT_MTX_SEL);
} else {
- mtk_ddp_write_mask(cmdq_pkt, 0, comp,
+ mtk_ddp_write_mask(cmdq_pkt, 0, &rdma->cmdq_reg, rdma->regs,
DISP_REG_RDMA_SIZE_CON_0,
RDMA_MATRIX_ENABLE);
}
- mtk_ddp_write_relaxed(cmdq_pkt, addr, comp, DISP_RDMA_MEM_START_ADDR);
- mtk_ddp_write_relaxed(cmdq_pkt, pitch, comp, DISP_RDMA_MEM_SRC_PITCH);
- mtk_ddp_write(cmdq_pkt, RDMA_MEM_GMC, comp,
+ mtk_ddp_write_relaxed(cmdq_pkt, addr, &rdma->cmdq_reg, rdma->regs,
+ DISP_RDMA_MEM_START_ADDR);
+ mtk_ddp_write_relaxed(cmdq_pkt, pitch, &rdma->cmdq_reg, rdma->regs,
+ DISP_RDMA_MEM_SRC_PITCH);
+ mtk_ddp_write(cmdq_pkt, RDMA_MEM_GMC, &rdma->cmdq_reg, rdma->regs,
DISP_RDMA_MEM_GMC_SETTING_0);
- mtk_ddp_write_mask(cmdq_pkt, RDMA_MODE_MEMORY, comp,
+ mtk_ddp_write_mask(cmdq_pkt, RDMA_MODE_MEMORY, &rdma->cmdq_reg, rdma->regs,
DISP_REG_RDMA_GLOBAL_CON, RDMA_MODE_MEMORY);
}
-static const struct mtk_ddp_comp_funcs mtk_disp_rdma_funcs = {
- .config = mtk_rdma_config,
- .start = mtk_rdma_start,
- .stop = mtk_rdma_stop,
- .enable_vblank = mtk_rdma_enable_vblank,
- .disable_vblank = mtk_rdma_disable_vblank,
- .layer_nr = mtk_rdma_layer_nr,
- .layer_config = mtk_rdma_layer_config,
-};
-
static int mtk_disp_rdma_bind(struct device *dev, struct device *master,
void *data)
{
- struct mtk_disp_rdma *priv = dev_get_drvdata(dev);
- struct drm_device *drm_dev = data;
- int ret;
-
- ret = mtk_ddp_comp_register(drm_dev, &priv->ddp_comp);
- if (ret < 0) {
- dev_err(dev, "Failed to register component %pOF: %d\n",
- dev->of_node, ret);
- return ret;
- }
-
return 0;
}
@@ -259,10 +263,6 @@ static int mtk_disp_rdma_bind(struct device *dev, struct device *master,
static void mtk_disp_rdma_unbind(struct device *dev, struct device *master,
void *data)
{
- struct mtk_disp_rdma *priv = dev_get_drvdata(dev);
- struct drm_device *drm_dev = data;
-
- mtk_ddp_comp_unregister(drm_dev, &priv->ddp_comp);
}
static const struct component_ops mtk_disp_rdma_component_ops = {
@@ -274,7 +274,7 @@ static int mtk_disp_rdma_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mtk_disp_rdma *priv;
- int comp_id;
+ struct resource *res;
int irq;
int ret;
@@ -286,25 +286,37 @@ static int mtk_disp_rdma_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DISP_RDMA);
- if (comp_id < 0) {
- dev_err(dev, "Failed to identify by alias: %d\n", comp_id);
- return comp_id;
+ priv->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(dev, "failed to get rdma clk\n");
+ return PTR_ERR(priv->clk);
}
- ret = mtk_ddp_comp_init(dev, dev->of_node, &priv->ddp_comp, comp_id,
- &mtk_disp_rdma_funcs);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to initialize component: %d\n",
- ret);
-
- return ret;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->regs)) {
+ dev_err(dev, "failed to ioremap rdma\n");
+ return PTR_ERR(priv->regs);
+ }
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ ret = cmdq_dev_get_client_reg(dev, &priv->cmdq_reg, 0);
+ if (ret)
+ dev_dbg(dev, "get mediatek,gce-client-reg fail!\n");
+#endif
+
+ if (of_find_property(dev->of_node, "mediatek,rdma-fifo-size", &ret)) {
+ ret = of_property_read_u32(dev->of_node,
+ "mediatek,rdma-fifo-size",
+ &priv->fifo_size);
+ if (ret) {
+ dev_err(dev, "Failed to get rdma fifo size\n");
+ return ret;
+ }
}
/* Disable and clear pending interrupts */
- writel(0x0, priv->ddp_comp.regs + DISP_REG_RDMA_INT_ENABLE);
- writel(0x0, priv->ddp_comp.regs + DISP_REG_RDMA_INT_STATUS);
+ writel(0x0, priv->regs + DISP_REG_RDMA_INT_ENABLE);
+ writel(0x0, priv->regs + DISP_REG_RDMA_INT_STATUS);
ret = devm_request_irq(dev, irq, mtk_disp_rdma_irq_handler,
IRQF_TRIGGER_NONE, dev_name(dev), priv);
@@ -339,11 +351,17 @@ static const struct mtk_disp_rdma_data mt8173_rdma_driver_data = {
.fifo_size = SZ_8K,
};
+static const struct mtk_disp_rdma_data mt8183_rdma_driver_data = {
+ .fifo_size = 5 * SZ_1K,
+};
+
static const struct of_device_id mtk_disp_rdma_driver_dt_match[] = {
{ .compatible = "mediatek,mt2701-disp-rdma",
.data = &mt2701_rdma_driver_data},
{ .compatible = "mediatek,mt8173-disp-rdma",
.data = &mt8173_rdma_driver_data},
+ { .compatible = "mediatek,mt8183-disp-rdma",
+ .data = &mt8183_rdma_driver_data},
{},
};
MODULE_DEVICE_TABLE(of, mtk_disp_rdma_driver_dt_match);
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
index 52f11a63a330..b05f900d9322 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -20,10 +20,12 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_connector.h>
#include <drm/drm_crtc.h>
#include <drm/drm_of.h>
#include <drm/drm_simple_kms_helper.h>
+#include "mtk_disp_drv.h"
#include "mtk_dpi_regs.h"
#include "mtk_drm_ddp_comp.h"
@@ -62,10 +64,10 @@ enum mtk_dpi_out_color_format {
};
struct mtk_dpi {
- struct mtk_ddp_comp ddp_comp;
struct drm_encoder encoder;
struct drm_bridge bridge;
struct drm_bridge *next_bridge;
+ struct drm_connector *connector;
void __iomem *regs;
struct device *dev;
struct clk *engine_clk;
@@ -562,53 +564,50 @@ static const struct drm_bridge_funcs mtk_dpi_bridge_funcs = {
.enable = mtk_dpi_bridge_enable,
};
-static void mtk_dpi_start(struct mtk_ddp_comp *comp)
+void mtk_dpi_start(struct device *dev)
{
- struct mtk_dpi *dpi = container_of(comp, struct mtk_dpi, ddp_comp);
+ struct mtk_dpi *dpi = dev_get_drvdata(dev);
mtk_dpi_power_on(dpi);
}
-static void mtk_dpi_stop(struct mtk_ddp_comp *comp)
+void mtk_dpi_stop(struct device *dev)
{
- struct mtk_dpi *dpi = container_of(comp, struct mtk_dpi, ddp_comp);
+ struct mtk_dpi *dpi = dev_get_drvdata(dev);
mtk_dpi_power_off(dpi);
}
-static const struct mtk_ddp_comp_funcs mtk_dpi_funcs = {
- .start = mtk_dpi_start,
- .stop = mtk_dpi_stop,
-};
-
static int mtk_dpi_bind(struct device *dev, struct device *master, void *data)
{
struct mtk_dpi *dpi = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
int ret;
- ret = mtk_ddp_comp_register(drm_dev, &dpi->ddp_comp);
- if (ret < 0) {
- dev_err(dev, "Failed to register component %pOF: %d\n",
- dev->of_node, ret);
- return ret;
- }
-
ret = drm_simple_encoder_init(drm_dev, &dpi->encoder,
DRM_MODE_ENCODER_TMDS);
if (ret) {
dev_err(dev, "Failed to initialize decoder: %d\n", ret);
- goto err_unregister;
+ return ret;
}
- dpi->encoder.possible_crtcs = mtk_drm_find_possible_crtc_by_comp(drm_dev, dpi->ddp_comp);
+ dpi->encoder.possible_crtcs = mtk_drm_find_possible_crtc_by_comp(drm_dev, dpi->dev);
- ret = drm_bridge_attach(&dpi->encoder, &dpi->bridge, NULL, 0);
+ ret = drm_bridge_attach(&dpi->encoder, &dpi->bridge, NULL,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret) {
dev_err(dev, "Failed to attach bridge: %d\n", ret);
goto err_cleanup;
}
+ dpi->connector = drm_bridge_connector_init(drm_dev, &dpi->encoder);
+ if (IS_ERR(dpi->connector)) {
+ dev_err(dev, "Unable to create bridge connector\n");
+ ret = PTR_ERR(dpi->connector);
+ goto err_cleanup;
+ }
+ drm_connector_attach_encoder(dpi->connector, &dpi->encoder);
+
dpi->bit_num = MTK_DPI_OUT_BIT_NUM_8BITS;
dpi->channel_swap = MTK_DPI_OUT_CHANNEL_SWAP_RGB;
dpi->yc_map = MTK_DPI_OUT_YC_MAP_RGB;
@@ -618,8 +617,6 @@ static int mtk_dpi_bind(struct device *dev, struct device *master, void *data)
err_cleanup:
drm_encoder_cleanup(&dpi->encoder);
-err_unregister:
- mtk_ddp_comp_unregister(drm_dev, &dpi->ddp_comp);
return ret;
}
@@ -627,10 +624,8 @@ static void mtk_dpi_unbind(struct device *dev, struct device *master,
void *data)
{
struct mtk_dpi *dpi = dev_get_drvdata(dev);
- struct drm_device *drm_dev = data;
drm_encoder_cleanup(&dpi->encoder);
- mtk_ddp_comp_unregister(drm_dev, &dpi->ddp_comp);
}
static const struct component_ops mtk_dpi_component_ops = {
@@ -691,7 +686,6 @@ static int mtk_dpi_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct mtk_dpi *dpi;
struct resource *mem;
- int comp_id;
int ret;
dpi = devm_kzalloc(dev, sizeof(*dpi), GFP_KERNEL);
@@ -769,19 +763,6 @@ static int mtk_dpi_probe(struct platform_device *pdev)
dev_info(dev, "Found bridge node: %pOF\n", dpi->next_bridge->of_node);
- comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DPI);
- if (comp_id < 0) {
- dev_err(dev, "Failed to identify by alias: %d\n", comp_id);
- return comp_id;
- }
-
- ret = mtk_ddp_comp_init(dev, dev->of_node, &dpi->ddp_comp, comp_id,
- &mtk_dpi_funcs);
- if (ret) {
- dev_err(dev, "Failed to initialize component: %d\n", ret);
- return ret;
- }
-
platform_set_drvdata(pdev, dpi);
dpi->bridge.funcs = &mtk_dpi_bridge_funcs;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index bdd37eadecd5..8b0de90156c6 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -7,6 +7,7 @@
#include <linux/pm_runtime.h>
#include <linux/soc/mediatek/mtk-cmdq.h>
#include <linux/soc/mediatek/mtk-mmsys.h>
+#include <linux/soc/mediatek/mtk-mutex.h>
#include <asm/barrier.h>
#include <soc/mediatek/smi.h>
@@ -19,7 +20,6 @@
#include "mtk_drm_drv.h"
#include "mtk_drm_crtc.h"
-#include "mtk_drm_ddp.h"
#include "mtk_drm_ddp_comp.h"
#include "mtk_drm_gem.h"
#include "mtk_drm_plane.h"
@@ -55,7 +55,7 @@ struct mtk_drm_crtc {
#endif
struct device *mmsys_dev;
- struct mtk_disp_mutex *mutex;
+ struct mtk_mutex *mutex;
unsigned int ddp_comp_nr;
struct mtk_ddp_comp **ddp_comp;
@@ -107,7 +107,7 @@ static void mtk_drm_crtc_destroy(struct drm_crtc *crtc)
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
- mtk_disp_mutex_put(mtk_crtc->mutex);
+ mtk_mutex_put(mtk_crtc->mutex);
drm_crtc_cleanup(crtc);
}
@@ -169,31 +169,13 @@ static void mtk_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
state->pending_config = true;
}
-static int mtk_drm_crtc_enable_vblank(struct drm_crtc *crtc)
-{
- struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
- struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
-
- mtk_ddp_comp_enable_vblank(comp, &mtk_crtc->base);
-
- return 0;
-}
-
-static void mtk_drm_crtc_disable_vblank(struct drm_crtc *crtc)
-{
- struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
- struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
-
- mtk_ddp_comp_disable_vblank(comp);
-}
-
static int mtk_crtc_ddp_clk_enable(struct mtk_drm_crtc *mtk_crtc)
{
int ret;
int i;
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
- ret = clk_prepare_enable(mtk_crtc->ddp_comp[i]->clk);
+ ret = mtk_ddp_comp_clk_enable(mtk_crtc->ddp_comp[i]);
if (ret) {
DRM_ERROR("Failed to enable clock %d: %d\n", i, ret);
goto err;
@@ -203,7 +185,7 @@ static int mtk_crtc_ddp_clk_enable(struct mtk_drm_crtc *mtk_crtc)
return 0;
err:
while (--i >= 0)
- clk_disable_unprepare(mtk_crtc->ddp_comp[i]->clk);
+ mtk_ddp_comp_clk_disable(mtk_crtc->ddp_comp[i]);
return ret;
}
@@ -212,7 +194,7 @@ static void mtk_crtc_ddp_clk_disable(struct mtk_drm_crtc *mtk_crtc)
int i;
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
- clk_disable_unprepare(mtk_crtc->ddp_comp[i]->clk);
+ mtk_ddp_comp_clk_disable(mtk_crtc->ddp_comp[i]);
}
static
@@ -283,7 +265,7 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
return ret;
}
- ret = mtk_disp_mutex_prepare(mtk_crtc->mutex);
+ ret = mtk_mutex_prepare(mtk_crtc->mutex);
if (ret < 0) {
DRM_ERROR("Failed to enable mutex clock: %d\n", ret);
goto err_pm_runtime_put;
@@ -299,11 +281,11 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
mtk_mmsys_ddp_connect(mtk_crtc->mmsys_dev,
mtk_crtc->ddp_comp[i]->id,
mtk_crtc->ddp_comp[i + 1]->id);
- mtk_disp_mutex_add_comp(mtk_crtc->mutex,
+ mtk_mutex_add_comp(mtk_crtc->mutex,
mtk_crtc->ddp_comp[i]->id);
}
- mtk_disp_mutex_add_comp(mtk_crtc->mutex, mtk_crtc->ddp_comp[i]->id);
- mtk_disp_mutex_enable(mtk_crtc->mutex);
+ mtk_mutex_add_comp(mtk_crtc->mutex, mtk_crtc->ddp_comp[i]->id);
+ mtk_mutex_enable(mtk_crtc->mutex);
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[i];
@@ -332,7 +314,7 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
return 0;
err_mutex_unprepare:
- mtk_disp_mutex_unprepare(mtk_crtc->mutex);
+ mtk_mutex_unprepare(mtk_crtc->mutex);
err_pm_runtime_put:
pm_runtime_put(crtc->dev->dev);
return ret;
@@ -351,19 +333,19 @@ static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc)
}
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
- mtk_disp_mutex_remove_comp(mtk_crtc->mutex,
+ mtk_mutex_remove_comp(mtk_crtc->mutex,
mtk_crtc->ddp_comp[i]->id);
- mtk_disp_mutex_disable(mtk_crtc->mutex);
+ mtk_mutex_disable(mtk_crtc->mutex);
for (i = 0; i < mtk_crtc->ddp_comp_nr - 1; i++) {
mtk_mmsys_ddp_disconnect(mtk_crtc->mmsys_dev,
mtk_crtc->ddp_comp[i]->id,
mtk_crtc->ddp_comp[i + 1]->id);
- mtk_disp_mutex_remove_comp(mtk_crtc->mutex,
+ mtk_mutex_remove_comp(mtk_crtc->mutex,
mtk_crtc->ddp_comp[i]->id);
}
- mtk_disp_mutex_remove_comp(mtk_crtc->mutex, mtk_crtc->ddp_comp[i]->id);
+ mtk_mutex_remove_comp(mtk_crtc->mutex, mtk_crtc->ddp_comp[i]->id);
mtk_crtc_ddp_clk_disable(mtk_crtc);
- mtk_disp_mutex_unprepare(mtk_crtc->mutex);
+ mtk_mutex_unprepare(mtk_crtc->mutex);
pm_runtime_put(drm->dev);
@@ -475,9 +457,9 @@ static void mtk_drm_crtc_hw_config(struct mtk_drm_crtc *mtk_crtc)
mtk_crtc->pending_async_planes = true;
if (priv->data->shadow_register) {
- mtk_disp_mutex_acquire(mtk_crtc->mutex);
+ mtk_mutex_acquire(mtk_crtc->mutex);
mtk_crtc_ddp_config(crtc, NULL);
- mtk_disp_mutex_release(mtk_crtc->mutex);
+ mtk_mutex_release(mtk_crtc->mutex);
}
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
if (mtk_crtc->cmdq_client) {
@@ -493,6 +475,40 @@ static void mtk_drm_crtc_hw_config(struct mtk_drm_crtc *mtk_crtc)
mutex_unlock(&mtk_crtc->hw_lock);
}
+static void mtk_crtc_ddp_irq(void *data)
+{
+ struct drm_crtc *crtc = data;
+ struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+ struct mtk_drm_private *priv = crtc->dev->dev_private;
+
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ if (!priv->data->shadow_register && !mtk_crtc->cmdq_client)
+#else
+ if (!priv->data->shadow_register)
+#endif
+ mtk_crtc_ddp_config(crtc, NULL);
+
+ mtk_drm_finish_page_flip(mtk_crtc);
+}
+
+static int mtk_drm_crtc_enable_vblank(struct drm_crtc *crtc)
+{
+ struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+ struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
+
+ mtk_ddp_comp_enable_vblank(comp, mtk_crtc_ddp_irq, &mtk_crtc->base);
+
+ return 0;
+}
+
+static void mtk_drm_crtc_disable_vblank(struct drm_crtc *crtc)
+{
+ struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+ struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
+
+ mtk_ddp_comp_disable_vblank(comp);
+}
+
int mtk_drm_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane,
struct mtk_plane_state *state)
{
@@ -619,7 +635,6 @@ static const struct drm_crtc_funcs mtk_crtc_funcs = {
.reset = mtk_drm_crtc_reset,
.atomic_duplicate_state = mtk_drm_crtc_duplicate_state,
.atomic_destroy_state = mtk_drm_crtc_destroy_state,
- .gamma_set = drm_atomic_helper_legacy_gamma_set,
.enable_vblank = mtk_drm_crtc_enable_vblank,
.disable_vblank = mtk_drm_crtc_disable_vblank,
};
@@ -662,21 +677,6 @@ err_cleanup_crtc:
return ret;
}
-void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *comp)
-{
- struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
- struct mtk_drm_private *priv = crtc->dev->dev_private;
-
-#if IS_REACHABLE(CONFIG_MTK_CMDQ)
- if (!priv->data->shadow_register && !mtk_crtc->cmdq_client)
-#else
- if (!priv->data->shadow_register)
-#endif
- mtk_crtc_ddp_config(crtc, NULL);
-
- mtk_drm_finish_page_flip(mtk_crtc);
-}
-
static int mtk_drm_crtc_num_comp_planes(struct mtk_drm_crtc *mtk_crtc,
int comp_idx)
{
@@ -772,7 +772,7 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
if (!mtk_crtc->ddp_comp)
return -ENOMEM;
- mtk_crtc->mutex = mtk_disp_mutex_get(priv->mutex_dev, pipe);
+ mtk_crtc->mutex = mtk_mutex_get(priv->mutex_dev);
if (IS_ERR(mtk_crtc->mutex)) {
ret = PTR_ERR(mtk_crtc->mutex);
dev_err(dev, "Failed to get mutex: %d\n", ret);
@@ -785,7 +785,7 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
struct device_node *node;
node = priv->comp_node[comp_id];
- comp = priv->ddp_comp[comp_id];
+ comp = &priv->ddp_comp[comp_id];
if (!comp) {
dev_err(dev, "Component %pOF not initialized\n", node);
ret = -ENODEV;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
index a2b4677a451c..45cfd0a032de 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
@@ -15,7 +15,6 @@
#define MTK_MIN_BPC 3
void mtk_drm_crtc_commit(struct drm_crtc *crtc);
-void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *comp);
int mtk_drm_crtc_create(struct drm_device *drm_dev,
const enum mtk_ddp_comp_id *path,
unsigned int path_len);
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp.h
deleted file mode 100644
index 6b691a57be4a..000000000000
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2015 MediaTek Inc.
- */
-
-#ifndef MTK_DRM_DDP_H
-#define MTK_DRM_DDP_H
-
-#include "mtk_drm_ddp_comp.h"
-
-struct regmap;
-struct device;
-struct mtk_disp_mutex;
-
-struct mtk_disp_mutex *mtk_disp_mutex_get(struct device *dev, unsigned int id);
-int mtk_disp_mutex_prepare(struct mtk_disp_mutex *mutex);
-void mtk_disp_mutex_add_comp(struct mtk_disp_mutex *mutex,
- enum mtk_ddp_comp_id id);
-void mtk_disp_mutex_enable(struct mtk_disp_mutex *mutex);
-void mtk_disp_mutex_disable(struct mtk_disp_mutex *mutex);
-void mtk_disp_mutex_remove_comp(struct mtk_disp_mutex *mutex,
- enum mtk_ddp_comp_id id);
-void mtk_disp_mutex_unprepare(struct mtk_disp_mutex *mutex);
-void mtk_disp_mutex_put(struct mtk_disp_mutex *mutex);
-void mtk_disp_mutex_acquire(struct mtk_disp_mutex *mutex);
-void mtk_disp_mutex_release(struct mtk_disp_mutex *mutex);
-
-#endif /* MTK_DRM_DDP_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
index 3064eac1a750..75bc00e17fc4 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
@@ -9,12 +9,12 @@
#include <linux/clk.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/soc/mediatek/mtk-cmdq.h>
#include <drm/drm_print.h>
+#include "mtk_disp_drv.h"
#include "mtk_drm_drv.h"
#include "mtk_drm_plane.h"
#include "mtk_drm_ddp_comp.h"
@@ -35,31 +35,13 @@
#define DISP_AAL_EN 0x0000
#define DISP_AAL_SIZE 0x0030
-#define DISP_CCORR_EN 0x0000
-#define CCORR_EN BIT(0)
-#define DISP_CCORR_CFG 0x0020
-#define CCORR_RELAY_MODE BIT(0)
-#define CCORR_ENGINE_EN BIT(1)
-#define CCORR_GAMMA_OFF BIT(2)
-#define CCORR_WGAMUT_SRC_CLIP BIT(3)
-#define DISP_CCORR_SIZE 0x0030
-#define DISP_CCORR_COEF_0 0x0080
-#define DISP_CCORR_COEF_1 0x0084
-#define DISP_CCORR_COEF_2 0x0088
-#define DISP_CCORR_COEF_3 0x008C
-#define DISP_CCORR_COEF_4 0x0090
-
#define DISP_DITHER_EN 0x0000
#define DITHER_EN BIT(0)
#define DISP_DITHER_CFG 0x0020
#define DITHER_RELAY_MODE BIT(0)
+#define DITHER_ENGINE_EN BIT(1)
#define DISP_DITHER_SIZE 0x0030
-#define DISP_GAMMA_EN 0x0000
-#define DISP_GAMMA_CFG 0x0020
-#define DISP_GAMMA_SIZE 0x0030
-#define DISP_GAMMA_LUT 0x0700
-
#define LUT_10BIT_MASK 0x03ff
#define OD_RELAYMODE BIT(0)
@@ -68,9 +50,6 @@
#define AAL_EN BIT(0)
-#define GAMMA_EN BIT(0)
-#define GAMMA_LUT_EN BIT(1)
-
#define DISP_DITHERING BIT(2)
#define DITHER_LSB_ERR_SHIFT_R(x) (((x) & 0x7) << 28)
#define DITHER_OVFLW_BIT_R(x) (((x) & 0x7) << 24)
@@ -86,262 +65,233 @@
#define DITHER_ADD_LSHIFT_G(x) (((x) & 0x7) << 4)
#define DITHER_ADD_RSHIFT_G(x) (((x) & 0x7) << 0)
+struct mtk_ddp_comp_dev {
+ struct clk *clk;
+ void __iomem *regs;
+ struct cmdq_client_reg cmdq_reg;
+};
+
void mtk_ddp_write(struct cmdq_pkt *cmdq_pkt, unsigned int value,
- struct mtk_ddp_comp *comp, unsigned int offset)
+ struct cmdq_client_reg *cmdq_reg, void __iomem *regs,
+ unsigned int offset)
{
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
if (cmdq_pkt)
- cmdq_pkt_write(cmdq_pkt, comp->subsys,
- comp->regs_pa + offset, value);
+ cmdq_pkt_write(cmdq_pkt, cmdq_reg->subsys,
+ cmdq_reg->offset + offset, value);
else
#endif
- writel(value, comp->regs + offset);
+ writel(value, regs + offset);
}
void mtk_ddp_write_relaxed(struct cmdq_pkt *cmdq_pkt, unsigned int value,
- struct mtk_ddp_comp *comp,
+ struct cmdq_client_reg *cmdq_reg, void __iomem *regs,
unsigned int offset)
{
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
if (cmdq_pkt)
- cmdq_pkt_write(cmdq_pkt, comp->subsys,
- comp->regs_pa + offset, value);
+ cmdq_pkt_write(cmdq_pkt, cmdq_reg->subsys,
+ cmdq_reg->offset + offset, value);
else
#endif
- writel_relaxed(value, comp->regs + offset);
+ writel_relaxed(value, regs + offset);
}
-void mtk_ddp_write_mask(struct cmdq_pkt *cmdq_pkt,
- unsigned int value,
- struct mtk_ddp_comp *comp,
- unsigned int offset,
- unsigned int mask)
+void mtk_ddp_write_mask(struct cmdq_pkt *cmdq_pkt, unsigned int value,
+ struct cmdq_client_reg *cmdq_reg, void __iomem *regs,
+ unsigned int offset, unsigned int mask)
{
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
if (cmdq_pkt) {
- cmdq_pkt_write_mask(cmdq_pkt, comp->subsys,
- comp->regs_pa + offset, value, mask);
+ cmdq_pkt_write_mask(cmdq_pkt, cmdq_reg->subsys,
+ cmdq_reg->offset + offset, value, mask);
} else {
#endif
- u32 tmp = readl(comp->regs + offset);
+ u32 tmp = readl(regs + offset);
tmp = (tmp & ~mask) | (value & mask);
- writel(tmp, comp->regs + offset);
+ writel(tmp, regs + offset);
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
}
#endif
}
-void mtk_dither_set(struct mtk_ddp_comp *comp, unsigned int bpc,
- unsigned int CFG, struct cmdq_pkt *cmdq_pkt)
+static int mtk_ddp_clk_enable(struct device *dev)
+{
+ struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
+
+ return clk_prepare_enable(priv->clk);
+}
+
+static void mtk_ddp_clk_disable(struct device *dev)
+{
+ struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(priv->clk);
+}
+
+void mtk_dither_set_common(void __iomem *regs, struct cmdq_client_reg *cmdq_reg,
+ unsigned int bpc, unsigned int cfg,
+ unsigned int dither_en, struct cmdq_pkt *cmdq_pkt)
{
/* If bpc equal to 0, the dithering function didn't be enabled */
if (bpc == 0)
return;
if (bpc >= MTK_MIN_BPC) {
- mtk_ddp_write(cmdq_pkt, 0, comp, DISP_DITHER_5);
- mtk_ddp_write(cmdq_pkt, 0, comp, DISP_DITHER_7);
+ mtk_ddp_write(cmdq_pkt, 0, cmdq_reg, regs, DISP_DITHER_5);
+ mtk_ddp_write(cmdq_pkt, 0, cmdq_reg, regs, DISP_DITHER_7);
mtk_ddp_write(cmdq_pkt,
DITHER_LSB_ERR_SHIFT_R(MTK_MAX_BPC - bpc) |
DITHER_ADD_LSHIFT_R(MTK_MAX_BPC - bpc) |
DITHER_NEW_BIT_MODE,
- comp, DISP_DITHER_15);
+ cmdq_reg, regs, DISP_DITHER_15);
mtk_ddp_write(cmdq_pkt,
DITHER_LSB_ERR_SHIFT_B(MTK_MAX_BPC - bpc) |
DITHER_ADD_LSHIFT_B(MTK_MAX_BPC - bpc) |
DITHER_LSB_ERR_SHIFT_G(MTK_MAX_BPC - bpc) |
DITHER_ADD_LSHIFT_G(MTK_MAX_BPC - bpc),
- comp, DISP_DITHER_16);
- mtk_ddp_write(cmdq_pkt, DISP_DITHERING, comp, CFG);
+ cmdq_reg, regs, DISP_DITHER_16);
+ mtk_ddp_write(cmdq_pkt, dither_en, cmdq_reg, regs, cfg);
}
}
-static void mtk_od_config(struct mtk_ddp_comp *comp, unsigned int w,
- unsigned int h, unsigned int vrefresh,
- unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
+static void mtk_dither_set(struct device *dev, unsigned int bpc,
+ unsigned int cfg, struct cmdq_pkt *cmdq_pkt)
{
- mtk_ddp_write(cmdq_pkt, w << 16 | h, comp, DISP_OD_SIZE);
- mtk_ddp_write(cmdq_pkt, OD_RELAYMODE, comp, DISP_OD_CFG);
- mtk_dither_set(comp, bpc, DISP_OD_CFG, cmdq_pkt);
-}
+ struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
-static void mtk_od_start(struct mtk_ddp_comp *comp)
-{
- writel(1, comp->regs + DISP_OD_EN);
+ mtk_dither_set_common(priv->regs, &priv->cmdq_reg, bpc, cfg,
+ DISP_DITHERING, cmdq_pkt);
}
-static void mtk_ufoe_start(struct mtk_ddp_comp *comp)
+static void mtk_od_config(struct device *dev, unsigned int w,
+ unsigned int h, unsigned int vrefresh,
+ unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
{
- writel(UFO_BYPASS, comp->regs + DISP_REG_UFO_START);
-}
+ struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
-static void mtk_aal_config(struct mtk_ddp_comp *comp, unsigned int w,
- unsigned int h, unsigned int vrefresh,
- unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
-{
- mtk_ddp_write(cmdq_pkt, h << 16 | w, comp, DISP_AAL_SIZE);
+ mtk_ddp_write(cmdq_pkt, w << 16 | h, &priv->cmdq_reg, priv->regs, DISP_OD_SIZE);
+ mtk_ddp_write(cmdq_pkt, OD_RELAYMODE, &priv->cmdq_reg, priv->regs, DISP_OD_CFG);
+ mtk_dither_set(dev, bpc, DISP_OD_CFG, cmdq_pkt);
}
-static void mtk_aal_start(struct mtk_ddp_comp *comp)
+static void mtk_od_start(struct device *dev)
{
- writel(AAL_EN, comp->regs + DISP_AAL_EN);
-}
+ struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
-static void mtk_aal_stop(struct mtk_ddp_comp *comp)
-{
- writel_relaxed(0x0, comp->regs + DISP_AAL_EN);
+ writel(1, priv->regs + DISP_OD_EN);
}
-static void mtk_ccorr_config(struct mtk_ddp_comp *comp, unsigned int w,
- unsigned int h, unsigned int vrefresh,
- unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
+static void mtk_ufoe_start(struct device *dev)
{
- mtk_ddp_write(cmdq_pkt, h << 16 | w, comp, DISP_CCORR_SIZE);
- mtk_ddp_write(cmdq_pkt, CCORR_ENGINE_EN, comp, DISP_CCORR_CFG);
-}
+ struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
-static void mtk_ccorr_start(struct mtk_ddp_comp *comp)
-{
- writel(CCORR_EN, comp->regs + DISP_CCORR_EN);
+ writel(UFO_BYPASS, priv->regs + DISP_REG_UFO_START);
}
-static void mtk_ccorr_stop(struct mtk_ddp_comp *comp)
+static void mtk_aal_config(struct device *dev, unsigned int w,
+ unsigned int h, unsigned int vrefresh,
+ unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
{
- writel_relaxed(0x0, comp->regs + DISP_CCORR_EN);
+ struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
+
+ mtk_ddp_write(cmdq_pkt, w << 16 | h, &priv->cmdq_reg, priv->regs, DISP_AAL_SIZE);
}
-/* Converts a DRM S31.32 value to the HW S1.10 format. */
-static u16 mtk_ctm_s31_32_to_s1_10(u64 in)
+static void mtk_aal_gamma_set(struct device *dev, struct drm_crtc_state *state)
{
- u16 r;
+ struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
- /* Sign bit. */
- r = in & BIT_ULL(63) ? BIT(11) : 0;
+ mtk_gamma_set_common(priv->regs, state);
+}
- if ((in & GENMASK_ULL(62, 33)) > 0) {
- /* identity value 0x100000000 -> 0x400, */
- /* if bigger this, set it to max 0x7ff. */
- r |= GENMASK(10, 0);
- } else {
- /* take the 11 most important bits. */
- r |= (in >> 22) & GENMASK(10, 0);
- }
+static void mtk_aal_start(struct device *dev)
+{
+ struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
- return r;
+ writel(AAL_EN, priv->regs + DISP_AAL_EN);
}
-static void mtk_ccorr_ctm_set(struct mtk_ddp_comp *comp,
- struct drm_crtc_state *state)
+static void mtk_aal_stop(struct device *dev)
{
- struct drm_property_blob *blob = state->ctm;
- struct drm_color_ctm *ctm;
- const u64 *input;
- uint16_t coeffs[9] = { 0 };
- int i;
- struct cmdq_pkt *cmdq_pkt = NULL;
-
- if (!blob)
- return;
+ struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
- ctm = (struct drm_color_ctm *)blob->data;
- input = ctm->matrix;
-
- for (i = 0; i < ARRAY_SIZE(coeffs); i++)
- coeffs[i] = mtk_ctm_s31_32_to_s1_10(input[i]);
-
- mtk_ddp_write(cmdq_pkt, coeffs[0] << 16 | coeffs[1],
- comp, DISP_CCORR_COEF_0);
- mtk_ddp_write(cmdq_pkt, coeffs[2] << 16 | coeffs[3],
- comp, DISP_CCORR_COEF_1);
- mtk_ddp_write(cmdq_pkt, coeffs[4] << 16 | coeffs[5],
- comp, DISP_CCORR_COEF_2);
- mtk_ddp_write(cmdq_pkt, coeffs[6] << 16 | coeffs[7],
- comp, DISP_CCORR_COEF_3);
- mtk_ddp_write(cmdq_pkt, coeffs[8] << 16,
- comp, DISP_CCORR_COEF_4);
+ writel_relaxed(0x0, priv->regs + DISP_AAL_EN);
}
-static void mtk_dither_config(struct mtk_ddp_comp *comp, unsigned int w,
+static void mtk_dither_config(struct device *dev, unsigned int w,
unsigned int h, unsigned int vrefresh,
unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
{
- mtk_ddp_write(cmdq_pkt, h << 16 | w, comp, DISP_DITHER_SIZE);
- mtk_ddp_write(cmdq_pkt, DITHER_RELAY_MODE, comp, DISP_DITHER_CFG);
-}
-
-static void mtk_dither_start(struct mtk_ddp_comp *comp)
-{
- writel(DITHER_EN, comp->regs + DISP_DITHER_EN);
-}
+ struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
-static void mtk_dither_stop(struct mtk_ddp_comp *comp)
-{
- writel_relaxed(0x0, comp->regs + DISP_DITHER_EN);
+ mtk_ddp_write(cmdq_pkt, h << 16 | w, &priv->cmdq_reg, priv->regs, DISP_DITHER_SIZE);
+ mtk_ddp_write(cmdq_pkt, DITHER_RELAY_MODE, &priv->cmdq_reg, priv->regs, DISP_DITHER_CFG);
+ mtk_dither_set_common(priv->regs, &priv->cmdq_reg, bpc, DISP_DITHER_CFG,
+ DITHER_ENGINE_EN, cmdq_pkt);
}
-static void mtk_gamma_config(struct mtk_ddp_comp *comp, unsigned int w,
- unsigned int h, unsigned int vrefresh,
- unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
+static void mtk_dither_start(struct device *dev)
{
- mtk_ddp_write(cmdq_pkt, h << 16 | w, comp, DISP_GAMMA_SIZE);
- mtk_dither_set(comp, bpc, DISP_GAMMA_CFG, cmdq_pkt);
-}
+ struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
-static void mtk_gamma_start(struct mtk_ddp_comp *comp)
-{
- writel(GAMMA_EN, comp->regs + DISP_GAMMA_EN);
+ writel(DITHER_EN, priv->regs + DISP_DITHER_EN);
}
-static void mtk_gamma_stop(struct mtk_ddp_comp *comp)
+static void mtk_dither_stop(struct device *dev)
{
- writel_relaxed(0x0, comp->regs + DISP_GAMMA_EN);
-}
+ struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
-static void mtk_gamma_set(struct mtk_ddp_comp *comp,
- struct drm_crtc_state *state)
-{
- unsigned int i, reg;
- struct drm_color_lut *lut;
- void __iomem *lut_base;
- u32 word;
-
- if (state->gamma_lut) {
- reg = readl(comp->regs + DISP_GAMMA_CFG);
- reg = reg | GAMMA_LUT_EN;
- writel(reg, comp->regs + DISP_GAMMA_CFG);
- lut_base = comp->regs + DISP_GAMMA_LUT;
- lut = (struct drm_color_lut *)state->gamma_lut->data;
- for (i = 0; i < MTK_LUT_SIZE; i++) {
- word = (((lut[i].red >> 6) & LUT_10BIT_MASK) << 20) +
- (((lut[i].green >> 6) & LUT_10BIT_MASK) << 10) +
- ((lut[i].blue >> 6) & LUT_10BIT_MASK);
- writel(word, (lut_base + i * 4));
- }
- }
+ writel_relaxed(0x0, priv->regs + DISP_DITHER_EN);
}
static const struct mtk_ddp_comp_funcs ddp_aal = {
- .gamma_set = mtk_gamma_set,
+ .clk_enable = mtk_ddp_clk_enable,
+ .clk_disable = mtk_ddp_clk_disable,
+ .gamma_set = mtk_aal_gamma_set,
.config = mtk_aal_config,
.start = mtk_aal_start,
.stop = mtk_aal_stop,
};
static const struct mtk_ddp_comp_funcs ddp_ccorr = {
+ .clk_enable = mtk_ccorr_clk_enable,
+ .clk_disable = mtk_ccorr_clk_disable,
.config = mtk_ccorr_config,
.start = mtk_ccorr_start,
.stop = mtk_ccorr_stop,
.ctm_set = mtk_ccorr_ctm_set,
};
+static const struct mtk_ddp_comp_funcs ddp_color = {
+ .clk_enable = mtk_color_clk_enable,
+ .clk_disable = mtk_color_clk_disable,
+ .config = mtk_color_config,
+ .start = mtk_color_start,
+};
+
static const struct mtk_ddp_comp_funcs ddp_dither = {
+ .clk_enable = mtk_ddp_clk_enable,
+ .clk_disable = mtk_ddp_clk_disable,
.config = mtk_dither_config,
.start = mtk_dither_start,
.stop = mtk_dither_stop,
};
+static const struct mtk_ddp_comp_funcs ddp_dpi = {
+ .start = mtk_dpi_start,
+ .stop = mtk_dpi_stop,
+};
+
+static const struct mtk_ddp_comp_funcs ddp_dsi = {
+ .start = mtk_dsi_ddp_start,
+ .stop = mtk_dsi_ddp_stop,
+};
+
static const struct mtk_ddp_comp_funcs ddp_gamma = {
+ .clk_enable = mtk_gamma_clk_enable,
+ .clk_disable = mtk_gamma_clk_disable,
.gamma_set = mtk_gamma_set,
.config = mtk_gamma_config,
.start = mtk_gamma_start,
@@ -349,11 +299,43 @@ static const struct mtk_ddp_comp_funcs ddp_gamma = {
};
static const struct mtk_ddp_comp_funcs ddp_od = {
+ .clk_enable = mtk_ddp_clk_enable,
+ .clk_disable = mtk_ddp_clk_disable,
.config = mtk_od_config,
.start = mtk_od_start,
};
+static const struct mtk_ddp_comp_funcs ddp_ovl = {
+ .clk_enable = mtk_ovl_clk_enable,
+ .clk_disable = mtk_ovl_clk_disable,
+ .config = mtk_ovl_config,
+ .start = mtk_ovl_start,
+ .stop = mtk_ovl_stop,
+ .enable_vblank = mtk_ovl_enable_vblank,
+ .disable_vblank = mtk_ovl_disable_vblank,
+ .supported_rotations = mtk_ovl_supported_rotations,
+ .layer_nr = mtk_ovl_layer_nr,
+ .layer_check = mtk_ovl_layer_check,
+ .layer_config = mtk_ovl_layer_config,
+ .bgclr_in_on = mtk_ovl_bgclr_in_on,
+ .bgclr_in_off = mtk_ovl_bgclr_in_off,
+};
+
+static const struct mtk_ddp_comp_funcs ddp_rdma = {
+ .clk_enable = mtk_rdma_clk_enable,
+ .clk_disable = mtk_rdma_clk_disable,
+ .config = mtk_rdma_config,
+ .start = mtk_rdma_start,
+ .stop = mtk_rdma_stop,
+ .enable_vblank = mtk_rdma_enable_vblank,
+ .disable_vblank = mtk_rdma_disable_vblank,
+ .layer_nr = mtk_rdma_layer_nr,
+ .layer_config = mtk_rdma_layer_config,
+};
+
static const struct mtk_ddp_comp_funcs ddp_ufoe = {
+ .clk_enable = mtk_ddp_clk_enable,
+ .clk_disable = mtk_ddp_clk_disable,
.start = mtk_ufoe_start,
};
@@ -387,36 +369,37 @@ static const struct mtk_ddp_comp_match mtk_ddp_matches[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_AAL1] = { MTK_DISP_AAL, 1, &ddp_aal },
[DDP_COMPONENT_BLS] = { MTK_DISP_BLS, 0, NULL },
[DDP_COMPONENT_CCORR] = { MTK_DISP_CCORR, 0, &ddp_ccorr },
- [DDP_COMPONENT_COLOR0] = { MTK_DISP_COLOR, 0, NULL },
- [DDP_COMPONENT_COLOR1] = { MTK_DISP_COLOR, 1, NULL },
+ [DDP_COMPONENT_COLOR0] = { MTK_DISP_COLOR, 0, &ddp_color },
+ [DDP_COMPONENT_COLOR1] = { MTK_DISP_COLOR, 1, &ddp_color },
[DDP_COMPONENT_DITHER] = { MTK_DISP_DITHER, 0, &ddp_dither },
- [DDP_COMPONENT_DPI0] = { MTK_DPI, 0, NULL },
- [DDP_COMPONENT_DPI1] = { MTK_DPI, 1, NULL },
- [DDP_COMPONENT_DSI0] = { MTK_DSI, 0, NULL },
- [DDP_COMPONENT_DSI1] = { MTK_DSI, 1, NULL },
- [DDP_COMPONENT_DSI2] = { MTK_DSI, 2, NULL },
- [DDP_COMPONENT_DSI3] = { MTK_DSI, 3, NULL },
+ [DDP_COMPONENT_DPI0] = { MTK_DPI, 0, &ddp_dpi },
+ [DDP_COMPONENT_DPI1] = { MTK_DPI, 1, &ddp_dpi },
+ [DDP_COMPONENT_DSI0] = { MTK_DSI, 0, &ddp_dsi },
+ [DDP_COMPONENT_DSI1] = { MTK_DSI, 1, &ddp_dsi },
+ [DDP_COMPONENT_DSI2] = { MTK_DSI, 2, &ddp_dsi },
+ [DDP_COMPONENT_DSI3] = { MTK_DSI, 3, &ddp_dsi },
[DDP_COMPONENT_GAMMA] = { MTK_DISP_GAMMA, 0, &ddp_gamma },
[DDP_COMPONENT_OD0] = { MTK_DISP_OD, 0, &ddp_od },
[DDP_COMPONENT_OD1] = { MTK_DISP_OD, 1, &ddp_od },
- [DDP_COMPONENT_OVL0] = { MTK_DISP_OVL, 0, NULL },
- [DDP_COMPONENT_OVL1] = { MTK_DISP_OVL, 1, NULL },
- [DDP_COMPONENT_OVL_2L0] = { MTK_DISP_OVL_2L, 0, NULL },
- [DDP_COMPONENT_OVL_2L1] = { MTK_DISP_OVL_2L, 1, NULL },
+ [DDP_COMPONENT_OVL0] = { MTK_DISP_OVL, 0, &ddp_ovl },
+ [DDP_COMPONENT_OVL1] = { MTK_DISP_OVL, 1, &ddp_ovl },
+ [DDP_COMPONENT_OVL_2L0] = { MTK_DISP_OVL_2L, 0, &ddp_ovl },
+ [DDP_COMPONENT_OVL_2L1] = { MTK_DISP_OVL_2L, 1, &ddp_ovl },
[DDP_COMPONENT_PWM0] = { MTK_DISP_PWM, 0, NULL },
[DDP_COMPONENT_PWM1] = { MTK_DISP_PWM, 1, NULL },
[DDP_COMPONENT_PWM2] = { MTK_DISP_PWM, 2, NULL },
- [DDP_COMPONENT_RDMA0] = { MTK_DISP_RDMA, 0, NULL },
- [DDP_COMPONENT_RDMA1] = { MTK_DISP_RDMA, 1, NULL },
- [DDP_COMPONENT_RDMA2] = { MTK_DISP_RDMA, 2, NULL },
+ [DDP_COMPONENT_RDMA0] = { MTK_DISP_RDMA, 0, &ddp_rdma },
+ [DDP_COMPONENT_RDMA1] = { MTK_DISP_RDMA, 1, &ddp_rdma },
+ [DDP_COMPONENT_RDMA2] = { MTK_DISP_RDMA, 2, &ddp_rdma },
[DDP_COMPONENT_UFOE] = { MTK_DISP_UFOE, 0, &ddp_ufoe },
[DDP_COMPONENT_WDMA0] = { MTK_DISP_WDMA, 0, NULL },
[DDP_COMPONENT_WDMA1] = { MTK_DISP_WDMA, 1, NULL },
};
-static bool mtk_drm_find_comp_in_ddp(struct mtk_ddp_comp ddp_comp,
+static bool mtk_drm_find_comp_in_ddp(struct device *dev,
const enum mtk_ddp_comp_id *path,
- unsigned int path_len)
+ unsigned int path_len,
+ struct mtk_ddp_comp *ddp_comp)
{
unsigned int i;
@@ -424,7 +407,7 @@ static bool mtk_drm_find_comp_in_ddp(struct mtk_ddp_comp ddp_comp,
return false;
for (i = 0U; i < path_len; i++)
- if (ddp_comp.id == path[i])
+ if (dev == ddp_comp[path[i]].dev)
return true;
return false;
@@ -446,18 +429,19 @@ int mtk_ddp_comp_get_id(struct device_node *node,
}
unsigned int mtk_drm_find_possible_crtc_by_comp(struct drm_device *drm,
- struct mtk_ddp_comp ddp_comp)
+ struct device *dev)
{
struct mtk_drm_private *private = drm->dev_private;
unsigned int ret = 0;
- if (mtk_drm_find_comp_in_ddp(ddp_comp, private->data->main_path, private->data->main_len))
+ if (mtk_drm_find_comp_in_ddp(dev, private->data->main_path, private->data->main_len,
+ private->ddp_comp))
ret = BIT(0);
- else if (mtk_drm_find_comp_in_ddp(ddp_comp, private->data->ext_path,
- private->data->ext_len))
+ else if (mtk_drm_find_comp_in_ddp(dev, private->data->ext_path,
+ private->data->ext_len, private->ddp_comp))
ret = BIT(1);
- else if (mtk_drm_find_comp_in_ddp(ddp_comp, private->data->third_path,
- private->data->third_len))
+ else if (mtk_drm_find_comp_in_ddp(dev, private->data->third_path,
+ private->data->third_len, private->ddp_comp))
ret = BIT(2);
else
DRM_INFO("Failed to find comp in ddp table\n");
@@ -465,59 +449,15 @@ unsigned int mtk_drm_find_possible_crtc_by_comp(struct drm_device *drm,
return ret;
}
-int mtk_ddp_comp_init(struct device *dev, struct device_node *node,
- struct mtk_ddp_comp *comp, enum mtk_ddp_comp_id comp_id,
- const struct mtk_ddp_comp_funcs *funcs)
+static int mtk_ddp_get_larb_dev(struct device_node *node, struct mtk_ddp_comp *comp,
+ struct device *dev)
{
- enum mtk_ddp_comp_type type;
struct device_node *larb_node;
struct platform_device *larb_pdev;
-#if IS_REACHABLE(CONFIG_MTK_CMDQ)
- struct resource res;
- struct cmdq_client_reg cmdq_reg;
- int ret;
-#endif
-
- if (comp_id < 0 || comp_id >= DDP_COMPONENT_ID_MAX)
- return -EINVAL;
-
- type = mtk_ddp_matches[comp_id].type;
-
- comp->id = comp_id;
- comp->funcs = funcs ?: mtk_ddp_matches[comp_id].funcs;
-
- if (comp_id == DDP_COMPONENT_BLS ||
- comp_id == DDP_COMPONENT_DPI0 ||
- comp_id == DDP_COMPONENT_DPI1 ||
- comp_id == DDP_COMPONENT_DSI0 ||
- comp_id == DDP_COMPONENT_DSI1 ||
- comp_id == DDP_COMPONENT_DSI2 ||
- comp_id == DDP_COMPONENT_DSI3 ||
- comp_id == DDP_COMPONENT_PWM0) {
- comp->regs = NULL;
- comp->clk = NULL;
- comp->irq = 0;
- return 0;
- }
-
- comp->regs = of_iomap(node, 0);
- comp->irq = of_irq_get(node, 0);
- comp->clk = of_clk_get(node, 0);
- if (IS_ERR(comp->clk))
- return PTR_ERR(comp->clk);
-
- /* Only DMA capable components need the LARB property */
- comp->larb_dev = NULL;
- if (type != MTK_DISP_OVL &&
- type != MTK_DISP_OVL_2L &&
- type != MTK_DISP_RDMA &&
- type != MTK_DISP_WDMA)
- return 0;
larb_node = of_parse_phandle(node, "mediatek,larb", 0);
if (!larb_node) {
- dev_err(dev,
- "Missing mediadek,larb phandle in %pOF node\n", node);
+ dev_err(dev, "Missing mediadek,larb phandle in %pOF node\n", node);
return -EINVAL;
}
@@ -528,40 +468,71 @@ int mtk_ddp_comp_init(struct device *dev, struct device_node *node,
return -EPROBE_DEFER;
}
of_node_put(larb_node);
-
comp->larb_dev = &larb_pdev->dev;
-#if IS_REACHABLE(CONFIG_MTK_CMDQ)
- if (of_address_to_resource(node, 0, &res) != 0) {
- dev_err(dev, "Missing reg in %s node\n", node->full_name);
- put_device(&larb_pdev->dev);
- return -EINVAL;
- }
- comp->regs_pa = res.start;
-
- ret = cmdq_dev_get_client_reg(dev, &cmdq_reg, 0);
- if (ret)
- dev_dbg(dev, "get mediatek,gce-client-reg fail!\n");
- else
- comp->subsys = cmdq_reg.subsys;
-#endif
return 0;
}
-int mtk_ddp_comp_register(struct drm_device *drm, struct mtk_ddp_comp *comp)
+int mtk_ddp_comp_init(struct device_node *node, struct mtk_ddp_comp *comp,
+ enum mtk_ddp_comp_id comp_id)
{
- struct mtk_drm_private *private = drm->dev_private;
+ struct platform_device *comp_pdev;
+ enum mtk_ddp_comp_type type;
+ struct mtk_ddp_comp_dev *priv;
+ int ret;
- if (private->ddp_comp[comp->id])
- return -EBUSY;
+ if (comp_id < 0 || comp_id >= DDP_COMPONENT_ID_MAX)
+ return -EINVAL;
- private->ddp_comp[comp->id] = comp;
- return 0;
-}
+ type = mtk_ddp_matches[comp_id].type;
-void mtk_ddp_comp_unregister(struct drm_device *drm, struct mtk_ddp_comp *comp)
-{
- struct mtk_drm_private *private = drm->dev_private;
+ comp->id = comp_id;
+ comp->funcs = mtk_ddp_matches[comp_id].funcs;
+ comp_pdev = of_find_device_by_node(node);
+ if (!comp_pdev) {
+ DRM_INFO("Waiting for device %s\n", node->full_name);
+ return -EPROBE_DEFER;
+ }
+ comp->dev = &comp_pdev->dev;
- private->ddp_comp[comp->id] = NULL;
+ /* Only DMA capable components need the LARB property */
+ if (type == MTK_DISP_OVL ||
+ type == MTK_DISP_OVL_2L ||
+ type == MTK_DISP_RDMA ||
+ type == MTK_DISP_WDMA) {
+ ret = mtk_ddp_get_larb_dev(node, comp, comp->dev);
+ if (ret)
+ return ret;
+ }
+
+ if (type == MTK_DISP_BLS ||
+ type == MTK_DISP_CCORR ||
+ type == MTK_DISP_COLOR ||
+ type == MTK_DISP_GAMMA ||
+ type == MTK_DPI ||
+ type == MTK_DSI ||
+ type == MTK_DISP_OVL ||
+ type == MTK_DISP_OVL_2L ||
+ type == MTK_DISP_PWM ||
+ type == MTK_DISP_RDMA)
+ return 0;
+
+ priv = devm_kzalloc(comp->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->regs = of_iomap(node, 0);
+ priv->clk = of_clk_get(node, 0);
+ if (IS_ERR(priv->clk))
+ return PTR_ERR(priv->clk);
+
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ ret = cmdq_dev_get_client_reg(comp->dev, &priv->cmdq_reg, 0);
+ if (ret)
+ dev_dbg(comp->dev, "get mediatek,gce-client-reg fail!\n");
+#endif
+
+ platform_set_drvdata(comp_pdev, priv);
+
+ return 0;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
index 5aa52b7afeec..bb914d976cf5 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
@@ -7,6 +7,7 @@
#define MTK_DRM_DDP_COMP_H
#include <linux/io.h>
+#include <linux/soc/mediatek/mtk-cmdq.h>
#include <linux/soc/mediatek/mtk-mmsys.h>
struct device;
@@ -39,79 +40,95 @@ enum mtk_ddp_comp_type {
struct mtk_ddp_comp;
struct cmdq_pkt;
struct mtk_ddp_comp_funcs {
- void (*config)(struct mtk_ddp_comp *comp, unsigned int w,
+ int (*clk_enable)(struct device *dev);
+ void (*clk_disable)(struct device *dev);
+ void (*config)(struct device *dev, unsigned int w,
unsigned int h, unsigned int vrefresh,
unsigned int bpc, struct cmdq_pkt *cmdq_pkt);
- void (*start)(struct mtk_ddp_comp *comp);
- void (*stop)(struct mtk_ddp_comp *comp);
- void (*enable_vblank)(struct mtk_ddp_comp *comp, struct drm_crtc *crtc);
- void (*disable_vblank)(struct mtk_ddp_comp *comp);
- unsigned int (*supported_rotations)(struct mtk_ddp_comp *comp);
- unsigned int (*layer_nr)(struct mtk_ddp_comp *comp);
- int (*layer_check)(struct mtk_ddp_comp *comp,
+ void (*start)(struct device *dev);
+ void (*stop)(struct device *dev);
+ void (*enable_vblank)(struct device *dev,
+ void (*vblank_cb)(void *),
+ void *vblank_cb_data);
+ void (*disable_vblank)(struct device *dev);
+ unsigned int (*supported_rotations)(struct device *dev);
+ unsigned int (*layer_nr)(struct device *dev);
+ int (*layer_check)(struct device *dev,
unsigned int idx,
struct mtk_plane_state *state);
- void (*layer_config)(struct mtk_ddp_comp *comp, unsigned int idx,
+ void (*layer_config)(struct device *dev, unsigned int idx,
struct mtk_plane_state *state,
struct cmdq_pkt *cmdq_pkt);
- void (*gamma_set)(struct mtk_ddp_comp *comp,
+ void (*gamma_set)(struct device *dev,
struct drm_crtc_state *state);
- void (*bgclr_in_on)(struct mtk_ddp_comp *comp);
- void (*bgclr_in_off)(struct mtk_ddp_comp *comp);
- void (*ctm_set)(struct mtk_ddp_comp *comp,
+ void (*bgclr_in_on)(struct device *dev);
+ void (*bgclr_in_off)(struct device *dev);
+ void (*ctm_set)(struct device *dev,
struct drm_crtc_state *state);
};
struct mtk_ddp_comp {
- struct clk *clk;
- void __iomem *regs;
+ struct device *dev;
int irq;
struct device *larb_dev;
enum mtk_ddp_comp_id id;
const struct mtk_ddp_comp_funcs *funcs;
- resource_size_t regs_pa;
- u8 subsys;
};
+static inline int mtk_ddp_comp_clk_enable(struct mtk_ddp_comp *comp)
+{
+ if (comp->funcs && comp->funcs->clk_enable)
+ return comp->funcs->clk_enable(comp->dev);
+
+ return 0;
+}
+
+static inline void mtk_ddp_comp_clk_disable(struct mtk_ddp_comp *comp)
+{
+ if (comp->funcs && comp->funcs->clk_disable)
+ comp->funcs->clk_disable(comp->dev);
+}
+
static inline void mtk_ddp_comp_config(struct mtk_ddp_comp *comp,
unsigned int w, unsigned int h,
unsigned int vrefresh, unsigned int bpc,
struct cmdq_pkt *cmdq_pkt)
{
if (comp->funcs && comp->funcs->config)
- comp->funcs->config(comp, w, h, vrefresh, bpc, cmdq_pkt);
+ comp->funcs->config(comp->dev, w, h, vrefresh, bpc, cmdq_pkt);
}
static inline void mtk_ddp_comp_start(struct mtk_ddp_comp *comp)
{
if (comp->funcs && comp->funcs->start)
- comp->funcs->start(comp);
+ comp->funcs->start(comp->dev);
}
static inline void mtk_ddp_comp_stop(struct mtk_ddp_comp *comp)
{
if (comp->funcs && comp->funcs->stop)
- comp->funcs->stop(comp);
+ comp->funcs->stop(comp->dev);
}
static inline void mtk_ddp_comp_enable_vblank(struct mtk_ddp_comp *comp,
- struct drm_crtc *crtc)
+ void (*vblank_cb)(void *),
+ void *vblank_cb_data)
{
if (comp->funcs && comp->funcs->enable_vblank)
- comp->funcs->enable_vblank(comp, crtc);
+ comp->funcs->enable_vblank(comp->dev, vblank_cb, vblank_cb_data);
}
static inline void mtk_ddp_comp_disable_vblank(struct mtk_ddp_comp *comp)
{
if (comp->funcs && comp->funcs->disable_vblank)
- comp->funcs->disable_vblank(comp);
+ comp->funcs->disable_vblank(comp->dev);
}
static inline
unsigned int mtk_ddp_comp_supported_rotations(struct mtk_ddp_comp *comp)
{
if (comp->funcs && comp->funcs->supported_rotations)
- return comp->funcs->supported_rotations(comp);
+ return comp->funcs->supported_rotations(comp->dev);
return 0;
}
@@ -119,7 +136,7 @@ unsigned int mtk_ddp_comp_supported_rotations(struct mtk_ddp_comp *comp)
static inline unsigned int mtk_ddp_comp_layer_nr(struct mtk_ddp_comp *comp)
{
if (comp->funcs && comp->funcs->layer_nr)
- return comp->funcs->layer_nr(comp);
+ return comp->funcs->layer_nr(comp->dev);
return 0;
}
@@ -129,7 +146,7 @@ static inline int mtk_ddp_comp_layer_check(struct mtk_ddp_comp *comp,
struct mtk_plane_state *state)
{
if (comp->funcs && comp->funcs->layer_check)
- return comp->funcs->layer_check(comp, idx, state);
+ return comp->funcs->layer_check(comp->dev, idx, state);
return 0;
}
@@ -139,52 +156,49 @@ static inline void mtk_ddp_comp_layer_config(struct mtk_ddp_comp *comp,
struct cmdq_pkt *cmdq_pkt)
{
if (comp->funcs && comp->funcs->layer_config)
- comp->funcs->layer_config(comp, idx, state, cmdq_pkt);
+ comp->funcs->layer_config(comp->dev, idx, state, cmdq_pkt);
}
static inline void mtk_ddp_gamma_set(struct mtk_ddp_comp *comp,
struct drm_crtc_state *state)
{
if (comp->funcs && comp->funcs->gamma_set)
- comp->funcs->gamma_set(comp, state);
+ comp->funcs->gamma_set(comp->dev, state);
}
static inline void mtk_ddp_comp_bgclr_in_on(struct mtk_ddp_comp *comp)
{
if (comp->funcs && comp->funcs->bgclr_in_on)
- comp->funcs->bgclr_in_on(comp);
+ comp->funcs->bgclr_in_on(comp->dev);
}
static inline void mtk_ddp_comp_bgclr_in_off(struct mtk_ddp_comp *comp)
{
if (comp->funcs && comp->funcs->bgclr_in_off)
- comp->funcs->bgclr_in_off(comp);
+ comp->funcs->bgclr_in_off(comp->dev);
}
static inline void mtk_ddp_ctm_set(struct mtk_ddp_comp *comp,
struct drm_crtc_state *state)
{
if (comp->funcs && comp->funcs->ctm_set)
- comp->funcs->ctm_set(comp, state);
+ comp->funcs->ctm_set(comp->dev, state);
}
int mtk_ddp_comp_get_id(struct device_node *node,
enum mtk_ddp_comp_type comp_type);
unsigned int mtk_drm_find_possible_crtc_by_comp(struct drm_device *drm,
- struct mtk_ddp_comp ddp_comp);
-int mtk_ddp_comp_init(struct device *dev, struct device_node *comp_node,
- struct mtk_ddp_comp *comp, enum mtk_ddp_comp_id comp_id,
- const struct mtk_ddp_comp_funcs *funcs);
-int mtk_ddp_comp_register(struct drm_device *drm, struct mtk_ddp_comp *comp);
-void mtk_ddp_comp_unregister(struct drm_device *drm, struct mtk_ddp_comp *comp);
-void mtk_dither_set(struct mtk_ddp_comp *comp, unsigned int bpc,
- unsigned int CFG, struct cmdq_pkt *cmdq_pkt);
+ struct device *dev);
+int mtk_ddp_comp_init(struct device_node *comp_node, struct mtk_ddp_comp *comp,
+ enum mtk_ddp_comp_id comp_id);
enum mtk_ddp_comp_type mtk_ddp_comp_get_type(enum mtk_ddp_comp_id comp_id);
void mtk_ddp_write(struct cmdq_pkt *cmdq_pkt, unsigned int value,
- struct mtk_ddp_comp *comp, unsigned int offset);
+ struct cmdq_client_reg *cmdq_reg, void __iomem *regs,
+ unsigned int offset);
void mtk_ddp_write_relaxed(struct cmdq_pkt *cmdq_pkt, unsigned int value,
- struct mtk_ddp_comp *comp, unsigned int offset);
+ struct cmdq_client_reg *cmdq_reg, void __iomem *regs,
+ unsigned int offset);
void mtk_ddp_write_mask(struct cmdq_pkt *cmdq_pkt, unsigned int value,
- struct mtk_ddp_comp *comp, unsigned int offset,
- unsigned int mask);
+ struct cmdq_client_reg *cmdq_reg, void __iomem *regs,
+ unsigned int offset, unsigned int mask);
#endif /* MTK_DRM_DDP_COMP_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 2f717df28a77..b013d56d2777 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -10,7 +10,6 @@
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/pm_runtime.h>
-#include <linux/soc/mediatek/mtk-mmsys.h>
#include <linux/dma-mapping.h>
#include <drm/drm_atomic.h>
@@ -26,7 +25,6 @@
#include <drm/drm_vblank.h>
#include "mtk_drm_crtc.h"
-#include "mtk_drm_ddp.h"
#include "mtk_drm_ddp_comp.h"
#include "mtk_drm_drv.h"
#include "mtk_drm_gem.h"
@@ -131,6 +129,24 @@ static const enum mtk_ddp_comp_id mt8173_mtk_ddp_ext[] = {
DDP_COMPONENT_DPI0,
};
+static const enum mtk_ddp_comp_id mt8183_mtk_ddp_main[] = {
+ DDP_COMPONENT_OVL0,
+ DDP_COMPONENT_OVL_2L0,
+ DDP_COMPONENT_RDMA0,
+ DDP_COMPONENT_COLOR0,
+ DDP_COMPONENT_CCORR,
+ DDP_COMPONENT_AAL0,
+ DDP_COMPONENT_GAMMA,
+ DDP_COMPONENT_DITHER,
+ DDP_COMPONENT_DSI0,
+};
+
+static const enum mtk_ddp_comp_id mt8183_mtk_ddp_ext[] = {
+ DDP_COMPONENT_OVL_2L1,
+ DDP_COMPONENT_RDMA1,
+ DDP_COMPONENT_DPI0,
+};
+
static const struct mtk_mmsys_driver_data mt2701_mmsys_driver_data = {
.main_path = mt2701_mtk_ddp_main,
.main_len = ARRAY_SIZE(mt2701_mtk_ddp_main),
@@ -163,6 +179,13 @@ static const struct mtk_mmsys_driver_data mt8173_mmsys_driver_data = {
.ext_len = ARRAY_SIZE(mt8173_mtk_ddp_ext),
};
+static const struct mtk_mmsys_driver_data mt8183_mmsys_driver_data = {
+ .main_path = mt8183_mtk_ddp_main,
+ .main_len = ARRAY_SIZE(mt8183_mtk_ddp_main),
+ .ext_path = mt8183_mtk_ddp_ext,
+ .ext_len = ARRAY_SIZE(mt8183_mtk_ddp_ext),
+};
+
static int mtk_drm_kms_init(struct drm_device *drm)
{
struct mtk_drm_private *private = drm->dev_private;
@@ -377,12 +400,20 @@ static const struct of_device_id mtk_ddp_comp_dt_ids[] = {
.data = (void *)MTK_DISP_OVL },
{ .compatible = "mediatek,mt8173-disp-ovl",
.data = (void *)MTK_DISP_OVL },
+ { .compatible = "mediatek,mt8183-disp-ovl",
+ .data = (void *)MTK_DISP_OVL },
+ { .compatible = "mediatek,mt8183-disp-ovl-2l",
+ .data = (void *)MTK_DISP_OVL_2L },
{ .compatible = "mediatek,mt2701-disp-rdma",
.data = (void *)MTK_DISP_RDMA },
{ .compatible = "mediatek,mt8173-disp-rdma",
.data = (void *)MTK_DISP_RDMA },
+ { .compatible = "mediatek,mt8183-disp-rdma",
+ .data = (void *)MTK_DISP_RDMA },
{ .compatible = "mediatek,mt8173-disp-wdma",
.data = (void *)MTK_DISP_WDMA },
+ { .compatible = "mediatek,mt8183-disp-ccorr",
+ .data = (void *)MTK_DISP_CCORR },
{ .compatible = "mediatek,mt2701-disp-color",
.data = (void *)MTK_DISP_COLOR },
{ .compatible = "mediatek,mt8173-disp-color",
@@ -391,22 +422,32 @@ static const struct of_device_id mtk_ddp_comp_dt_ids[] = {
.data = (void *)MTK_DISP_AAL},
{ .compatible = "mediatek,mt8173-disp-gamma",
.data = (void *)MTK_DISP_GAMMA, },
+ { .compatible = "mediatek,mt8183-disp-gamma",
+ .data = (void *)MTK_DISP_GAMMA, },
+ { .compatible = "mediatek,mt8183-disp-dither",
+ .data = (void *)MTK_DISP_DITHER },
{ .compatible = "mediatek,mt8173-disp-ufoe",
.data = (void *)MTK_DISP_UFOE },
{ .compatible = "mediatek,mt2701-dsi",
.data = (void *)MTK_DSI },
{ .compatible = "mediatek,mt8173-dsi",
.data = (void *)MTK_DSI },
+ { .compatible = "mediatek,mt8183-dsi",
+ .data = (void *)MTK_DSI },
{ .compatible = "mediatek,mt2701-dpi",
.data = (void *)MTK_DPI },
{ .compatible = "mediatek,mt8173-dpi",
.data = (void *)MTK_DPI },
+ { .compatible = "mediatek,mt8183-dpi",
+ .data = (void *)MTK_DPI },
{ .compatible = "mediatek,mt2701-disp-mutex",
.data = (void *)MTK_DISP_MUTEX },
{ .compatible = "mediatek,mt2712-disp-mutex",
.data = (void *)MTK_DISP_MUTEX },
{ .compatible = "mediatek,mt8173-disp-mutex",
.data = (void *)MTK_DISP_MUTEX },
+ { .compatible = "mediatek,mt8183-disp-mutex",
+ .data = (void *)MTK_DISP_MUTEX },
{ .compatible = "mediatek,mt2701-disp-pwm",
.data = (void *)MTK_DISP_BLS },
{ .compatible = "mediatek,mt8173-disp-pwm",
@@ -425,6 +466,8 @@ static const struct of_device_id mtk_drm_of_ids[] = {
.data = &mt2712_mmsys_driver_data},
{ .compatible = "mediatek,mt8173-mmsys",
.data = &mt8173_mmsys_driver_data},
+ { .compatible = "mediatek,mt8183-mmsys",
+ .data = &mt8183_mmsys_driver_data},
{ }
};
@@ -488,11 +531,13 @@ static int mtk_drm_probe(struct platform_device *pdev)
private->comp_node[comp_id] = of_node_get(node);
/*
- * Currently only the COLOR, OVL, RDMA, DSI, and DPI blocks have
- * separate component platform drivers and initialize their own
+ * Currently only the CCORR, COLOR, GAMMA, OVL, RDMA, DSI, and DPI
+ * blocks have separate component platform drivers and initialize their own
* DDP component structure. The others are initialized here.
*/
- if (comp_type == MTK_DISP_COLOR ||
+ if (comp_type == MTK_DISP_CCORR ||
+ comp_type == MTK_DISP_COLOR ||
+ comp_type == MTK_DISP_GAMMA ||
comp_type == MTK_DISP_OVL ||
comp_type == MTK_DISP_OVL_2L ||
comp_type == MTK_DISP_RDMA ||
@@ -502,24 +547,12 @@ static int mtk_drm_probe(struct platform_device *pdev)
node);
drm_of_component_match_add(dev, &match, compare_of,
node);
- } else {
- struct mtk_ddp_comp *comp;
-
- comp = devm_kzalloc(dev, sizeof(*comp), GFP_KERNEL);
- if (!comp) {
- ret = -ENOMEM;
- of_node_put(node);
- goto err_node;
- }
-
- ret = mtk_ddp_comp_init(dev->parent, node, comp,
- comp_id, NULL);
- if (ret) {
- of_node_put(node);
- goto err_node;
- }
-
- private->ddp_comp[comp_id] = comp;
+ }
+
+ ret = mtk_ddp_comp_init(node, &private->ddp_comp[comp_id], comp_id);
+ if (ret) {
+ of_node_put(node);
+ goto err_node;
}
}
@@ -545,10 +578,8 @@ err_node:
of_node_put(private->mutex_node);
for (i = 0; i < DDP_COMPONENT_ID_MAX; i++) {
of_node_put(private->comp_node[i]);
- if (private->ddp_comp[i]) {
- put_device(private->ddp_comp[i]->larb_dev);
- private->ddp_comp[i] = NULL;
- }
+ if (private->ddp_comp[i].larb_dev)
+ put_device(private->ddp_comp[i].larb_dev);
}
return ret;
}
@@ -604,8 +635,9 @@ static struct platform_driver mtk_drm_platform_driver = {
};
static struct platform_driver * const mtk_drm_drivers[] = {
- &mtk_ddp_driver,
+ &mtk_disp_ccorr_driver,
&mtk_disp_color_driver,
+ &mtk_disp_gamma_driver,
&mtk_disp_ovl_driver,
&mtk_disp_rdma_driver,
&mtk_dpi_driver,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.h b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
index 5d771cf0bf25..637f5669e895 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
@@ -41,13 +41,14 @@ struct mtk_drm_private {
struct device *mutex_dev;
struct device *mmsys_dev;
struct device_node *comp_node[DDP_COMPONENT_ID_MAX];
- struct mtk_ddp_comp *ddp_comp[DDP_COMPONENT_ID_MAX];
+ struct mtk_ddp_comp ddp_comp[DDP_COMPONENT_ID_MAX];
const struct mtk_mmsys_driver_data *data;
struct drm_atomic_state *suspend_state;
};
-extern struct platform_driver mtk_ddp_driver;
+extern struct platform_driver mtk_disp_ccorr_driver;
extern struct platform_driver mtk_disp_color_driver;
+extern struct platform_driver mtk_disp_gamma_driver;
extern struct platform_driver mtk_disp_ovl_driver;
extern struct platform_driver mtk_disp_rdma_driver;
extern struct platform_driver mtk_dpi_driver;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
index 28a2ee1336ef..280ea0d5e840 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
@@ -260,7 +260,7 @@ int mtk_drm_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
return -ENOMEM;
}
- drm_prime_sg_to_page_addr_arrays(sgt, mtk_gem->pages, NULL, npages);
+ drm_prime_sg_to_page_array(sgt, mtk_gem->pages, npages);
mtk_gem->kvaddr = vmap(mtk_gem->pages, npages, VM_MAP,
pgprot_writecombine(PAGE_KERNEL));
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 65fd99c528af..a1ff152ef468 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -25,6 +25,7 @@
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
+#include "mtk_disp_drv.h"
#include "mtk_drm_ddp_comp.h"
#define DSI_START 0x00
@@ -178,7 +179,6 @@ struct mtk_dsi_driver_data {
};
struct mtk_dsi {
- struct mtk_ddp_comp ddp_comp;
struct device *dev;
struct mipi_dsi_host host;
struct drm_encoder encoder;
@@ -767,25 +767,20 @@ static const struct drm_bridge_funcs mtk_dsi_bridge_funcs = {
.mode_set = mtk_dsi_bridge_mode_set,
};
-static void mtk_dsi_ddp_start(struct mtk_ddp_comp *comp)
+void mtk_dsi_ddp_start(struct device *dev)
{
- struct mtk_dsi *dsi = container_of(comp, struct mtk_dsi, ddp_comp);
+ struct mtk_dsi *dsi = dev_get_drvdata(dev);
mtk_dsi_poweron(dsi);
}
-static void mtk_dsi_ddp_stop(struct mtk_ddp_comp *comp)
+void mtk_dsi_ddp_stop(struct device *dev)
{
- struct mtk_dsi *dsi = container_of(comp, struct mtk_dsi, ddp_comp);
+ struct mtk_dsi *dsi = dev_get_drvdata(dev);
mtk_dsi_poweroff(dsi);
}
-static const struct mtk_ddp_comp_funcs mtk_dsi_funcs = {
- .start = mtk_dsi_ddp_start,
- .stop = mtk_dsi_ddp_stop,
-};
-
static int mtk_dsi_host_attach(struct mipi_dsi_host *host,
struct mipi_dsi_device *device)
{
@@ -952,7 +947,7 @@ static int mtk_dsi_encoder_init(struct drm_device *drm, struct mtk_dsi *dsi)
return ret;
}
- dsi->encoder.possible_crtcs = mtk_drm_find_possible_crtc_by_comp(drm, dsi->ddp_comp);
+ dsi->encoder.possible_crtcs = mtk_drm_find_possible_crtc_by_comp(drm, dsi->host.dev);
ret = drm_bridge_attach(&dsi->encoder, &dsi->bridge, NULL,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
@@ -980,32 +975,17 @@ static int mtk_dsi_bind(struct device *dev, struct device *master, void *data)
struct drm_device *drm = data;
struct mtk_dsi *dsi = dev_get_drvdata(dev);
- ret = mtk_ddp_comp_register(drm, &dsi->ddp_comp);
- if (ret < 0) {
- dev_err(dev, "Failed to register component %pOF: %d\n",
- dev->of_node, ret);
- return ret;
- }
-
ret = mtk_dsi_encoder_init(drm, dsi);
- if (ret)
- goto err_unregister;
- return 0;
-
-err_unregister:
- mtk_ddp_comp_unregister(drm, &dsi->ddp_comp);
return ret;
}
static void mtk_dsi_unbind(struct device *dev, struct device *master,
void *data)
{
- struct drm_device *drm = data;
struct mtk_dsi *dsi = dev_get_drvdata(dev);
drm_encoder_cleanup(&dsi->encoder);
- mtk_ddp_comp_unregister(drm, &dsi->ddp_comp);
}
static const struct component_ops mtk_dsi_component_ops = {
@@ -1020,7 +1000,6 @@ static int mtk_dsi_probe(struct platform_device *pdev)
struct drm_panel *panel;
struct resource *regs;
int irq_num;
- int comp_id;
int ret;
dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
@@ -1090,20 +1069,6 @@ static int mtk_dsi_probe(struct platform_device *pdev)
goto err_unregister_host;
}
- comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DSI);
- if (comp_id < 0) {
- dev_err(dev, "Failed to identify by alias: %d\n", comp_id);
- ret = comp_id;
- goto err_unregister_host;
- }
-
- ret = mtk_ddp_comp_init(dev, dev->of_node, &dsi->ddp_comp, comp_id,
- &mtk_dsi_funcs);
- if (ret) {
- dev_err(dev, "Failed to initialize component: %d\n", ret);
- goto err_unregister_host;
- }
-
irq_num = platform_get_irq(pdev, 0);
if (irq_num < 0) {
dev_err(&pdev->dev, "failed to get dsi irq_num: %d\n", irq_num);
@@ -1111,9 +1076,8 @@ static int mtk_dsi_probe(struct platform_device *pdev)
goto err_unregister_host;
}
- irq_set_status_flags(irq_num, IRQ_TYPE_LEVEL_LOW);
ret = devm_request_irq(&pdev->dev, irq_num, mtk_dsi_irq,
- IRQF_TRIGGER_LOW, dev_name(&pdev->dev), dsi);
+ IRQF_TRIGGER_NONE, dev_name(&pdev->dev), dsi);
if (ret) {
dev_err(&pdev->dev, "failed to request mediatek dsi irq\n");
goto err_unregister_host;
diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
index 6ccd270789c6..4fd4de16cd32 100644
--- a/drivers/gpu/drm/mga/mga_ioc32.c
+++ b/drivers/gpu/drm/mga/mga_ioc32.c
@@ -1,4 +1,4 @@
-/**
+/*
* \file mga_ioc32.c
*
* 32-bit ioctl compatibility routines for the MGA DRM.
@@ -159,13 +159,13 @@ static struct {
};
/**
- * Called whenever a 32-bit process running under a 64-bit kernel
- * performs an ioctl on /dev/dri/card<n>.
+ * mga_compat_ioctl - Called whenever a 32-bit process running under
+ * a 64-bit kernel performs an ioctl on /dev/dri/card<n>.
*
- * \param filp file pointer.
- * \param cmd command.
- * \param arg user argument.
- * \return zero on success or negative number on failure.
+ * @filp: file pointer.
+ * @cmd: command.
+ * @arg: user argument.
+ * return: zero on success or negative number on failure.
*/
long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index a977c9f49719..4e4c105f9a50 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -47,10 +47,11 @@ static const struct drm_driver mgag200_driver = {
static bool mgag200_has_sgram(struct mga_device *mdev)
{
struct drm_device *dev = &mdev->base;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
u32 option;
int ret;
- ret = pci_read_config_dword(dev->pdev, PCI_MGA_OPTION, &option);
+ ret = pci_read_config_dword(pdev, PCI_MGA_OPTION, &option);
if (drm_WARN(dev, ret, "failed to read PCI config dword: %d\n", ret))
return false;
@@ -60,6 +61,7 @@ static bool mgag200_has_sgram(struct mga_device *mdev)
static int mgag200_regs_init(struct mga_device *mdev)
{
struct drm_device *dev = &mdev->base;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
u32 option, option2;
u8 crtcext3;
@@ -99,13 +101,13 @@ static int mgag200_regs_init(struct mga_device *mdev)
}
if (option)
- pci_write_config_dword(dev->pdev, PCI_MGA_OPTION, option);
+ pci_write_config_dword(pdev, PCI_MGA_OPTION, option);
if (option2)
- pci_write_config_dword(dev->pdev, PCI_MGA_OPTION2, option2);
+ pci_write_config_dword(pdev, PCI_MGA_OPTION2, option2);
/* BAR 1 contains registers */
- mdev->rmmio_base = pci_resource_start(dev->pdev, 1);
- mdev->rmmio_size = pci_resource_len(dev->pdev, 1);
+ mdev->rmmio_base = pci_resource_start(pdev, 1);
+ mdev->rmmio_size = pci_resource_len(pdev, 1);
if (!devm_request_mem_region(dev->dev, mdev->rmmio_base,
mdev->rmmio_size, "mgadrmfb_mmio")) {
@@ -113,7 +115,7 @@ static int mgag200_regs_init(struct mga_device *mdev)
return -ENOMEM;
}
- mdev->rmmio = pcim_iomap(dev->pdev, 1, 0);
+ mdev->rmmio = pcim_iomap(pdev, 1, 0);
if (mdev->rmmio == NULL)
return -ENOMEM;
@@ -218,6 +220,7 @@ static void mgag200_g200_interpret_bios(struct mga_device *mdev,
static void mgag200_g200_init_refclk(struct mga_device *mdev)
{
struct drm_device *dev = &mdev->base;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
unsigned char __iomem *rom;
unsigned char *bios;
size_t size;
@@ -226,7 +229,7 @@ static void mgag200_g200_init_refclk(struct mga_device *mdev)
mdev->model.g200.pclk_max = 230000;
mdev->model.g200.ref_clk = 27050;
- rom = pci_map_rom(dev->pdev, &size);
+ rom = pci_map_rom(pdev, &size);
if (!rom)
return;
@@ -244,7 +247,7 @@ static void mgag200_g200_init_refclk(struct mga_device *mdev)
vfree(bios);
out:
- pci_unmap_rom(dev->pdev, rom);
+ pci_unmap_rom(pdev, rom);
}
static void mgag200_g200se_init_unique_id(struct mga_device *mdev)
@@ -301,7 +304,6 @@ mgag200_device_create(struct pci_dev *pdev, unsigned long flags)
return mdev;
dev = &mdev->base;
- dev->pdev = pdev;
pci_set_drvdata(pdev, dev);
ret = mgag200_device_init(mdev, flags);
diff --git a/drivers/gpu/drm/mgag200/mgag200_i2c.c b/drivers/gpu/drm/mgag200/mgag200_i2c.c
index 09731e614e46..ac8e34eef513 100644
--- a/drivers/gpu/drm/mgag200/mgag200_i2c.c
+++ b/drivers/gpu/drm/mgag200/mgag200_i2c.c
@@ -126,7 +126,7 @@ struct mga_i2c_chan *mgag200_i2c_create(struct drm_device *dev)
i2c->clock = clock;
i2c->adapter.owner = THIS_MODULE;
i2c->adapter.class = I2C_CLASS_DDC;
- i2c->adapter.dev.parent = &dev->pdev->dev;
+ i2c->adapter.dev.parent = dev->dev;
i2c->dev = dev;
i2c_set_adapdata(&i2c->adapter, i2c);
snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), "mga i2c");
diff --git a/drivers/gpu/drm/mgag200/mgag200_mm.c b/drivers/gpu/drm/mgag200/mgag200_mm.c
index 641f1aa992be..b667371b69a4 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mm.c
@@ -78,11 +78,12 @@ static size_t mgag200_probe_vram(struct mga_device *mdev, void __iomem *mem,
static void mgag200_mm_release(struct drm_device *dev, void *ptr)
{
struct mga_device *mdev = to_mga_device(dev);
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
mdev->vram_fb_available = 0;
iounmap(mdev->vram);
- arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
- pci_resource_len(dev->pdev, 0));
+ arch_io_free_memtype_wc(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
arch_phys_wc_del(mdev->fb_mtrr);
mdev->fb_mtrr = 0;
}
@@ -90,6 +91,7 @@ static void mgag200_mm_release(struct drm_device *dev, void *ptr)
int mgag200_mm_init(struct mga_device *mdev)
{
struct drm_device *dev = &mdev->base;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
u8 misc;
resource_size_t start, len;
int ret;
@@ -102,8 +104,8 @@ int mgag200_mm_init(struct mga_device *mdev)
WREG8(MGA_MISC_OUT, misc);
/* BAR 0 is VRAM */
- start = pci_resource_start(dev->pdev, 0);
- len = pci_resource_len(dev->pdev, 0);
+ start = pci_resource_start(pdev, 0);
+ len = pci_resource_len(pdev, 0);
if (!devm_request_mem_region(dev->dev, start, len, "mgadrmfb_vram")) {
drm_err(dev, "can't reserve VRAM\n");
diff --git a/drivers/gpu/drm/msm/adreno/a5xx.xml.h b/drivers/gpu/drm/msm/adreno/a5xx.xml.h
index 346cc6ff3a36..7b9fcfe95c04 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a5xx.xml.h
@@ -2367,6 +2367,8 @@ static inline uint32_t A5XX_VSC_RESOLVE_CNTL_Y(uint32_t val)
#define REG_A5XX_UCHE_ADDR_MODE_CNTL 0x00000e80
+#define REG_A5XX_UCHE_MODE_CNTL 0x00000e81
+
#define REG_A5XX_UCHE_SVM_CNTL 0x00000e82
#define REG_A5XX_UCHE_WRITE_THRU_BASE_LO 0x00000e87
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index a5af223eaf50..7e553d3efeb2 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -222,7 +222,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
a5xx_preempt_trigger(gpu);
}
-static const struct {
+static const struct adreno_five_hwcg_regs {
u32 offset;
u32 value;
} a5xx_hwcg[] = {
@@ -318,16 +318,124 @@ static const struct {
{REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
{REG_A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
{REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222}
+}, a50x_hwcg[] = {
+ {REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
+ {REG_A5XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
+ {REG_A5XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_TP0, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST3_TP0, 0x00007777},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY3_TP0, 0x00001111},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_HYST_UCHE, 0x00FFFFF4},
+ {REG_A5XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RB0, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_CCU0, 0x00022220},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RAC, 0x05522222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RAC, 0x00505555},
+ {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU0, 0x04040404},
+ {REG_A5XX_RBBM_CLOCK_HYST_RAC, 0x07444044},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RAC, 0x00010011},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
+ {REG_A5XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
+ {REG_A5XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
+ {REG_A5XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
+ {REG_A5XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
+ {REG_A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
+ {REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
+}, a512_hwcg[] = {
+ {REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_SP1, 0x02222220},
+ {REG_A5XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
+ {REG_A5XX_RBBM_CLOCK_HYST_SP1, 0x0000F3CF},
+ {REG_A5XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
+ {REG_A5XX_RBBM_CLOCK_DELAY_SP1, 0x00000080},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TP1, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_TP0, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_TP1, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST3_TP0, 0x00007777},
+ {REG_A5XX_RBBM_CLOCK_HYST3_TP1, 0x00007777},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY3_TP0, 0x00001111},
+ {REG_A5XX_RBBM_CLOCK_DELAY3_TP1, 0x00001111},
+ {REG_A5XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_HYST_UCHE, 0x00444444},
+ {REG_A5XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RB1, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RB0, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RB1, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_CCU0, 0x00022220},
+ {REG_A5XX_RBBM_CLOCK_CNTL_CCU1, 0x00022220},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RAC, 0x05522222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RAC, 0x00505555},
+ {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU0, 0x04040404},
+ {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU1, 0x04040404},
+ {REG_A5XX_RBBM_CLOCK_HYST_RAC, 0x07444044},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_1, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RAC, 0x00010011},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
+ {REG_A5XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
+ {REG_A5XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
+ {REG_A5XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
+ {REG_A5XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
+ {REG_A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
+ {REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
};
void a5xx_set_hwcg(struct msm_gpu *gpu, bool state)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- unsigned int i;
+ const struct adreno_five_hwcg_regs *regs;
+ unsigned int i, sz;
+
+ if (adreno_is_a508(adreno_gpu)) {
+ regs = a50x_hwcg;
+ sz = ARRAY_SIZE(a50x_hwcg);
+ } else if (adreno_is_a509(adreno_gpu) || adreno_is_a512(adreno_gpu)) {
+ regs = a512_hwcg;
+ sz = ARRAY_SIZE(a512_hwcg);
+ } else {
+ regs = a5xx_hwcg;
+ sz = ARRAY_SIZE(a5xx_hwcg);
+ }
- for (i = 0; i < ARRAY_SIZE(a5xx_hwcg); i++)
- gpu_write(gpu, a5xx_hwcg[i].offset,
- state ? a5xx_hwcg[i].value : 0);
+ for (i = 0; i < sz; i++)
+ gpu_write(gpu, regs[i].offset,
+ state ? regs[i].value : 0);
if (adreno_is_a540(adreno_gpu)) {
gpu_write(gpu, REG_A5XX_RBBM_CLOCK_DELAY_GPMU, state ? 0x00000770 : 0);
@@ -538,11 +646,13 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ u32 regbit;
int ret;
gpu_write(gpu, REG_A5XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
- if (adreno_is_a540(adreno_gpu))
+ if (adreno_is_a509(adreno_gpu) || adreno_is_a512(adreno_gpu) ||
+ adreno_is_a540(adreno_gpu))
gpu_write(gpu, REG_A5XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009);
/* Make all blocks contribute to the GPU BUSY perf counter */
@@ -604,29 +714,48 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
0x00100000 + adreno_gpu->gmem - 1);
gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_HI, 0x00000000);
- if (adreno_is_a510(adreno_gpu)) {
+ if (adreno_is_a508(adreno_gpu) || adreno_is_a510(adreno_gpu)) {
gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x20);
- gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x20);
+ if (adreno_is_a508(adreno_gpu))
+ gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x400);
+ else
+ gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x20);
gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x40000030);
gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x20100D0A);
- gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL,
- (0x200 << 11 | 0x200 << 22));
} else {
gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x40);
if (adreno_is_a530(adreno_gpu))
gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x40);
- if (adreno_is_a540(adreno_gpu))
+ else
gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x400);
gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x80000060);
gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x40201B16);
+ }
+
+ if (adreno_is_a508(adreno_gpu))
+ gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL,
+ (0x100 << 11 | 0x100 << 22));
+ else if (adreno_is_a509(adreno_gpu) || adreno_is_a510(adreno_gpu) ||
+ adreno_is_a512(adreno_gpu))
+ gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL,
+ (0x200 << 11 | 0x200 << 22));
+ else
gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL,
(0x400 << 11 | 0x300 << 22));
- }
if (adreno_gpu->info->quirks & ADRENO_QUIRK_TWO_PASS_USE_WFI)
gpu_rmw(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0, (1 << 8));
- gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0xc0200100);
+ /*
+ * Disable the RB sampler datapath DP2 clock gating optimization
+ * for 1-SP GPUs, as it is enabled by default.
+ */
+ if (adreno_is_a508(adreno_gpu) || adreno_is_a509(adreno_gpu) ||
+ adreno_is_a512(adreno_gpu))
+ gpu_rmw(gpu, REG_A5XX_RB_DBG_ECO_CNTL, 0, (1 << 9));
+
+ /* Disable UCHE global filter as SP can invalidate/flush independently */
+ gpu_write(gpu, REG_A5XX_UCHE_MODE_CNTL, BIT(29));
/* Enable USE_RETENTION_FLOPS */
gpu_write(gpu, REG_A5XX_CP_CHICKEN_DBG, 0x02000000);
@@ -653,10 +782,20 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F);
/* Set the highest bank bit */
- gpu_write(gpu, REG_A5XX_TPL1_MODE_CNTL, 2 << 7);
- gpu_write(gpu, REG_A5XX_RB_MODE_CNTL, 2 << 1);
if (adreno_is_a540(adreno_gpu))
- gpu_write(gpu, REG_A5XX_UCHE_DBG_ECO_CNTL_2, 2);
+ regbit = 2;
+ else
+ regbit = 1;
+
+ gpu_write(gpu, REG_A5XX_TPL1_MODE_CNTL, regbit << 7);
+ gpu_write(gpu, REG_A5XX_RB_MODE_CNTL, regbit << 1);
+
+ if (adreno_is_a509(adreno_gpu) || adreno_is_a512(adreno_gpu) ||
+ adreno_is_a540(adreno_gpu))
+ gpu_write(gpu, REG_A5XX_UCHE_DBG_ECO_CNTL_2, regbit);
+
+ /* Disable All flat shading optimization (ALLFLATOPTDIS) */
+ gpu_rmw(gpu, REG_A5XX_VPC_DBG_ECO_CNTL, 0, (1 << 10));
/* Protect registers from the CP */
gpu_write(gpu, REG_A5XX_CP_PROTECT_CNTL, 0x00000007);
@@ -688,12 +827,14 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
/* VPC */
gpu_write(gpu, REG_A5XX_CP_PROTECT(14), ADRENO_PROTECT_RW(0xE68, 8));
- gpu_write(gpu, REG_A5XX_CP_PROTECT(15), ADRENO_PROTECT_RW(0xE70, 4));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(15), ADRENO_PROTECT_RW(0xE70, 16));
/* UCHE */
gpu_write(gpu, REG_A5XX_CP_PROTECT(16), ADRENO_PROTECT_RW(0xE80, 16));
- if (adreno_is_a530(adreno_gpu) || adreno_is_a510(adreno_gpu))
+ if (adreno_is_a508(adreno_gpu) || adreno_is_a509(adreno_gpu) ||
+ adreno_is_a510(adreno_gpu) || adreno_is_a512(adreno_gpu) ||
+ adreno_is_a530(adreno_gpu))
gpu_write(gpu, REG_A5XX_CP_PROTECT(17),
ADRENO_PROTECT_RW(0x10000, 0x8000));
@@ -735,7 +876,8 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
if (ret)
return ret;
- if (!adreno_is_a510(adreno_gpu))
+ if (!(adreno_is_a508(adreno_gpu) || adreno_is_a509(adreno_gpu) ||
+ adreno_is_a510(adreno_gpu) || adreno_is_a512(adreno_gpu)))
a5xx_gpmu_ucode_init(gpu);
ret = a5xx_ucode_init(gpu);
@@ -1168,7 +1310,8 @@ static int a5xx_pm_resume(struct msm_gpu *gpu)
if (ret)
return ret;
- if (adreno_is_a510(adreno_gpu)) {
+ /* Adreno 508, 509, 510, 512 needs manual RBBM sus/res control */
+ if (!(adreno_is_a530(adreno_gpu) || adreno_is_a540(adreno_gpu))) {
/* Halt the sp_input_clk at HM level */
gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0x00000055);
a5xx_set_hwcg(gpu, true);
@@ -1210,8 +1353,8 @@ static int a5xx_pm_suspend(struct msm_gpu *gpu)
u32 mask = 0xf;
int i, ret;
- /* A510 has 3 XIN ports in VBIF */
- if (adreno_is_a510(adreno_gpu))
+ /* A508, A510 have 3 XIN ports in VBIF */
+ if (adreno_is_a508(adreno_gpu) || adreno_is_a510(adreno_gpu))
mask = 0x7;
/* Clear the VBIF pipe before shutting down */
@@ -1223,10 +1366,12 @@ static int a5xx_pm_suspend(struct msm_gpu *gpu)
/*
* Reset the VBIF before power collapse to avoid issue with FIFO
- * entries
+ * entries on Adreno A510 and A530 (the others will tend to lock up)
*/
- gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x003C0000);
- gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x00000000);
+ if (adreno_is_a510(adreno_gpu) || adreno_is_a530(adreno_gpu)) {
+ gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x003C0000);
+ gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x00000000);
+ }
ret = msm_gpu_pm_suspend(gpu);
if (ret)
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c
index f176a6f3eff6..5ccc9da455a1 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_power.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c
@@ -298,7 +298,7 @@ int a5xx_power_init(struct msm_gpu *gpu)
int ret;
/* Not all A5xx chips have a GPMU */
- if (adreno_is_a510(adreno_gpu))
+ if (!(adreno_is_a530(adreno_gpu) || adreno_is_a540(adreno_gpu)))
return 0;
/* Set up the limits management */
@@ -330,7 +330,7 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
unsigned int *data, *ptr, *cmds;
unsigned int cmds_size;
- if (adreno_is_a510(adreno_gpu))
+ if (!(adreno_is_a530(adreno_gpu) || adreno_is_a540(adreno_gpu)))
return;
if (a5xx_gpu->gpmu_bo)
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index e6703ae98760..71c917f909af 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -134,7 +134,7 @@ void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp)
if (!gmu->legacy) {
a6xx_hfi_set_freq(gmu, perf_index);
- dev_pm_opp_set_bw(&gpu->pdev->dev, opp);
+ dev_pm_opp_set_opp(&gpu->pdev->dev, opp);
pm_runtime_put(gmu->dev);
return;
}
@@ -158,7 +158,7 @@ void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp)
if (ret)
dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret);
- dev_pm_opp_set_bw(&gpu->pdev->dev, opp);
+ dev_pm_opp_set_opp(&gpu->pdev->dev, opp);
pm_runtime_put(gmu->dev);
}
@@ -245,37 +245,66 @@ static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu)
return ret;
}
+struct a6xx_gmu_oob_bits {
+ int set, ack, set_new, ack_new;
+ const char *name;
+};
+
+/* These are the interrupt / ack bits for each OOB request that are set
+ * in a6xx_gmu_set_oob and a6xx_clear_oob
+ */
+static const struct a6xx_gmu_oob_bits a6xx_gmu_oob_bits[] = {
+ [GMU_OOB_GPU_SET] = {
+ .name = "GPU_SET",
+ .set = 16,
+ .ack = 24,
+ .set_new = 30,
+ .ack_new = 31,
+ },
+
+ [GMU_OOB_PERFCOUNTER_SET] = {
+ .name = "PERFCOUNTER",
+ .set = 17,
+ .ack = 25,
+ .set_new = 28,
+ .ack_new = 30,
+ },
+
+ [GMU_OOB_BOOT_SLUMBER] = {
+ .name = "BOOT_SLUMBER",
+ .set = 22,
+ .ack = 30,
+ },
+
+ [GMU_OOB_DCVS_SET] = {
+ .name = "GPU_DCVS",
+ .set = 23,
+ .ack = 31,
+ },
+};
+
/* Trigger a OOB (out of band) request to the GMU */
int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
{
int ret;
u32 val;
int request, ack;
- const char *name;
- switch (state) {
- case GMU_OOB_GPU_SET:
- if (gmu->legacy) {
- request = GMU_OOB_GPU_SET_REQUEST;
- ack = GMU_OOB_GPU_SET_ACK;
- } else {
- request = GMU_OOB_GPU_SET_REQUEST_NEW;
- ack = GMU_OOB_GPU_SET_ACK_NEW;
- }
- name = "GPU_SET";
- break;
- case GMU_OOB_BOOT_SLUMBER:
- request = GMU_OOB_BOOT_SLUMBER_REQUEST;
- ack = GMU_OOB_BOOT_SLUMBER_ACK;
- name = "BOOT_SLUMBER";
- break;
- case GMU_OOB_DCVS_SET:
- request = GMU_OOB_DCVS_REQUEST;
- ack = GMU_OOB_DCVS_ACK;
- name = "GPU_DCVS";
- break;
- default:
+ if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits))
return -EINVAL;
+
+ if (gmu->legacy) {
+ request = a6xx_gmu_oob_bits[state].set;
+ ack = a6xx_gmu_oob_bits[state].ack;
+ } else {
+ request = a6xx_gmu_oob_bits[state].set_new;
+ ack = a6xx_gmu_oob_bits[state].ack_new;
+ if (!request || !ack) {
+ DRM_DEV_ERROR(gmu->dev,
+ "Invalid non-legacy GMU request %s\n",
+ a6xx_gmu_oob_bits[state].name);
+ return -EINVAL;
+ }
}
/* Trigger the equested OOB operation */
@@ -288,7 +317,7 @@ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
if (ret)
DRM_DEV_ERROR(gmu->dev,
"Timeout waiting for GMU OOB set %s: 0x%x\n",
- name,
+ a6xx_gmu_oob_bits[state].name,
gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO));
/* Clear the acknowledge interrupt */
@@ -300,27 +329,17 @@ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
/* Clear a pending OOB state in the GMU */
void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
{
- if (!gmu->legacy) {
- WARN_ON(state != GMU_OOB_GPU_SET);
- gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
- 1 << GMU_OOB_GPU_SET_CLEAR_NEW);
+ int bit;
+
+ if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits))
return;
- }
- switch (state) {
- case GMU_OOB_GPU_SET:
- gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
- 1 << GMU_OOB_GPU_SET_CLEAR);
- break;
- case GMU_OOB_BOOT_SLUMBER:
- gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
- 1 << GMU_OOB_BOOT_SLUMBER_CLEAR);
- break;
- case GMU_OOB_DCVS_SET:
- gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
- 1 << GMU_OOB_DCVS_CLEAR);
- break;
- }
+ if (gmu->legacy)
+ bit = a6xx_gmu_oob_bits[state].ack;
+ else
+ bit = a6xx_gmu_oob_bits[state].ack_new;
+
+ gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, bit);
}
/* Enable CPU control of SPTP power power collapse */
@@ -866,7 +885,7 @@ static void a6xx_gmu_set_initial_bw(struct msm_gpu *gpu, struct a6xx_gmu *gmu)
if (IS_ERR_OR_NULL(gpu_opp))
return;
- dev_pm_opp_set_bw(&gpu->pdev->dev, gpu_opp);
+ dev_pm_opp_set_opp(&gpu->pdev->dev, gpu_opp);
dev_pm_opp_put(gpu_opp);
}
@@ -1072,7 +1091,7 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
a6xx_gmu_shutdown(gmu);
/* Remove the bus vote */
- dev_pm_opp_set_bw(&gpu->pdev->dev, NULL);
+ dev_pm_opp_set_opp(&gpu->pdev->dev, NULL);
/*
* Make sure the GX domain is off before turning off the GMU (CX)
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
index c6d2bced8e5d..71dfa60070cc 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
@@ -153,44 +153,27 @@ static inline void gmu_write_rscc(struct a6xx_gmu *gmu, u32 offset, u32 value)
*/
enum a6xx_gmu_oob_state {
+ /*
+ * Let the GMU know that a boot or slumber operation has started. The value in
+ * REG_A6XX_GMU_BOOT_SLUMBER_OPTION lets the GMU know which operation we are
+ * doing
+ */
GMU_OOB_BOOT_SLUMBER = 0,
+ /*
+ * Let the GMU know to not turn off any GPU registers while the CPU is in a
+ * critical section
+ */
GMU_OOB_GPU_SET,
+ /*
+ * Set a new power level for the GPU when the CPU is doing frequency scaling
+ */
GMU_OOB_DCVS_SET,
+ /*
+ * Used to keep the GPU on for CPU-side reads of performance counters.
+ */
+ GMU_OOB_PERFCOUNTER_SET,
};
-/* These are the interrupt / ack bits for each OOB request that are set
- * in a6xx_gmu_set_oob and a6xx_clear_oob
- */
-
-/*
- * Let the GMU know that a boot or slumber operation has started. The value in
- * REG_A6XX_GMU_BOOT_SLUMBER_OPTION lets the GMU know which operation we are
- * doing
- */
-#define GMU_OOB_BOOT_SLUMBER_REQUEST 22
-#define GMU_OOB_BOOT_SLUMBER_ACK 30
-#define GMU_OOB_BOOT_SLUMBER_CLEAR 30
-
-/*
- * Set a new power level for the GPU when the CPU is doing frequency scaling
- */
-#define GMU_OOB_DCVS_REQUEST 23
-#define GMU_OOB_DCVS_ACK 31
-#define GMU_OOB_DCVS_CLEAR 31
-
-/*
- * Let the GMU know to not turn off any GPU registers while the CPU is in a
- * critical section
- */
-#define GMU_OOB_GPU_SET_REQUEST 16
-#define GMU_OOB_GPU_SET_ACK 24
-#define GMU_OOB_GPU_SET_CLEAR 24
-
-#define GMU_OOB_GPU_SET_REQUEST_NEW 30
-#define GMU_OOB_GPU_SET_ACK_NEW 31
-#define GMU_OOB_GPU_SET_CLEAR_NEW 31
-
-
void a6xx_hfi_init(struct a6xx_gmu *gmu);
int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state);
void a6xx_hfi_stop(struct a6xx_gmu *gmu);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 130661898546..ba8e9d3cf0fe 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -10,6 +10,7 @@
#include <linux/bitfield.h>
#include <linux/devfreq.h>
+#include <linux/nvmem-consumer.h>
#include <linux/soc/qcom/llcc-qcom.h>
#define GPU_PAS_ID 13
@@ -1117,7 +1118,7 @@ static void a6xx_llc_slices_init(struct platform_device *pdev,
a6xx_gpu->llc_slice = llcc_slice_getd(LLCC_GPU);
a6xx_gpu->htw_llc_slice = llcc_slice_getd(LLCC_GPUHTW);
- if (IS_ERR(a6xx_gpu->llc_slice) && IS_ERR(a6xx_gpu->htw_llc_slice))
+ if (IS_ERR_OR_NULL(a6xx_gpu->llc_slice) && IS_ERR_OR_NULL(a6xx_gpu->htw_llc_slice))
a6xx_gpu->llc_mmio = ERR_PTR(-EINVAL);
}
@@ -1169,14 +1170,18 @@ static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ static DEFINE_MUTEX(perfcounter_oob);
+
+ mutex_lock(&perfcounter_oob);
/* Force the GPU power on so we can read this register */
- a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
+ a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
*value = gpu_read64(gpu, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
REG_A6XX_RBBM_PERFCTR_CP_0_HI);
- a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
+ a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
+ mutex_unlock(&perfcounter_oob);
return 0;
}
@@ -1208,6 +1213,10 @@ static void a6xx_destroy(struct msm_gpu *gpu)
a6xx_gmu_remove(a6xx_gpu);
adreno_gpu_cleanup(adreno_gpu);
+
+ if (a6xx_gpu->opp_table)
+ dev_pm_opp_put_supported_hw(a6xx_gpu->opp_table);
+
kfree(a6xx_gpu);
}
@@ -1240,6 +1249,50 @@ static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu)
}
static struct msm_gem_address_space *
+a6xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct iommu_domain *iommu;
+ struct msm_mmu *mmu;
+ struct msm_gem_address_space *aspace;
+ u64 start, size;
+
+ iommu = iommu_domain_alloc(&platform_bus_type);
+ if (!iommu)
+ return NULL;
+
+ /*
+ * This allows GPU to set the bus attributes required to use system
+ * cache on behalf of the iommu page table walker.
+ */
+ if (!IS_ERR_OR_NULL(a6xx_gpu->htw_llc_slice))
+ adreno_set_llc_attributes(iommu);
+
+ mmu = msm_iommu_new(&pdev->dev, iommu);
+ if (IS_ERR(mmu)) {
+ iommu_domain_free(iommu);
+ return ERR_CAST(mmu);
+ }
+
+ /*
+ * Use the aperture start or SZ_16M, whichever is greater. This will
+ * ensure that we align with the allocated pagetable range while still
+ * allowing room in the lower 32 bits for GMEM and whatnot
+ */
+ start = max_t(u64, SZ_16M, iommu->geometry.aperture_start);
+ size = iommu->geometry.aperture_end - start + 1;
+
+ aspace = msm_gem_address_space_create(mmu, "gpu",
+ start & GENMASK_ULL(48, 0), size);
+
+ if (IS_ERR(aspace) && !IS_ERR(mmu))
+ mmu->funcs->destroy(mmu);
+
+ return aspace;
+}
+
+static struct msm_gem_address_space *
a6xx_create_private_address_space(struct msm_gpu *gpu)
{
struct msm_mmu *mmu;
@@ -1264,6 +1317,78 @@ static uint32_t a6xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
return ring->memptrs->rptr = gpu_read(gpu, REG_A6XX_CP_RB_RPTR);
}
+static u32 a618_get_speed_bin(u32 fuse)
+{
+ if (fuse == 0)
+ return 0;
+ else if (fuse == 169)
+ return 1;
+ else if (fuse == 174)
+ return 2;
+
+ return UINT_MAX;
+}
+
+static u32 fuse_to_supp_hw(struct device *dev, u32 revn, u32 fuse)
+{
+ u32 val = UINT_MAX;
+
+ if (revn == 618)
+ val = a618_get_speed_bin(fuse);
+
+ if (val == UINT_MAX) {
+ DRM_DEV_ERROR(dev,
+ "missing support for speed-bin: %u. Some OPPs may not be supported by hardware",
+ fuse);
+ return UINT_MAX;
+ }
+
+ return (1 << val);
+}
+
+static int a6xx_set_supported_hw(struct device *dev, struct a6xx_gpu *a6xx_gpu,
+ u32 revn)
+{
+ struct opp_table *opp_table;
+ struct nvmem_cell *cell;
+ u32 supp_hw = UINT_MAX;
+ void *buf;
+
+ cell = nvmem_cell_get(dev, "speed_bin");
+ /*
+ * -ENOENT means that the platform doesn't support speedbin which is
+ * fine
+ */
+ if (PTR_ERR(cell) == -ENOENT)
+ return 0;
+ else if (IS_ERR(cell)) {
+ DRM_DEV_ERROR(dev,
+ "failed to read speed-bin. Some OPPs may not be supported by hardware");
+ goto done;
+ }
+
+ buf = nvmem_cell_read(cell, NULL);
+ if (IS_ERR(buf)) {
+ nvmem_cell_put(cell);
+ DRM_DEV_ERROR(dev,
+ "failed to read speed-bin. Some OPPs may not be supported by hardware");
+ goto done;
+ }
+
+ supp_hw = fuse_to_supp_hw(dev, revn, *((u32 *) buf));
+
+ kfree(buf);
+ nvmem_cell_put(cell);
+
+done:
+ opp_table = dev_pm_opp_set_supported_hw(dev, &supp_hw, 1);
+ if (IS_ERR(opp_table))
+ return PTR_ERR(opp_table);
+
+ a6xx_gpu->opp_table = opp_table;
+ return 0;
+}
+
static const struct adreno_gpu_funcs funcs = {
.base = {
.get_param = adreno_get_param,
@@ -1285,7 +1410,7 @@ static const struct adreno_gpu_funcs funcs = {
.gpu_state_get = a6xx_gpu_state_get,
.gpu_state_put = a6xx_gpu_state_put,
#endif
- .create_address_space = adreno_iommu_create_address_space,
+ .create_address_space = a6xx_create_address_space,
.create_private_address_space = a6xx_create_private_address_space,
.get_rptr = a6xx_get_rptr,
},
@@ -1325,6 +1450,12 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
a6xx_llc_slices_init(pdev, a6xx_gpu);
+ ret = a6xx_set_supported_hw(&pdev->dev, a6xx_gpu, info->revn);
+ if (ret) {
+ a6xx_destroy(&(a6xx_gpu->base.base));
+ return ERR_PTR(ret);
+ }
+
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
if (ret) {
a6xx_destroy(&(a6xx_gpu->base.base));
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
index e793d329e77b..ce0610c5256f 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
@@ -33,6 +33,8 @@ struct a6xx_gpu {
void *llc_slice;
void *htw_llc_slice;
bool have_mmu500;
+
+ struct opp_table *opp_table;
};
#define to_a6xx_gpu(x) container_of(x, struct a6xx_gpu, base)
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 12e75ba360f9..600d445fabe8 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -134,6 +134,41 @@ static const struct adreno_info gpulist[] = {
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a4xx_gpu_init,
}, {
+ .rev = ADRENO_REV(5, 0, 8, ANY_ID),
+ .revn = 508,
+ .name = "A508",
+ .fw = {
+ [ADRENO_FW_PM4] = "a530_pm4.fw",
+ [ADRENO_FW_PFP] = "a530_pfp.fw",
+ },
+ .gmem = (SZ_128K + SZ_8K),
+ /*
+ * Increase inactive period to 250 to avoid bouncing
+ * the GDSC which appears to make it grumpy
+ */
+ .inactive_period = 250,
+ .quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE,
+ .init = a5xx_gpu_init,
+ .zapfw = "a508_zap.mdt",
+ }, {
+ .rev = ADRENO_REV(5, 0, 9, ANY_ID),
+ .revn = 509,
+ .name = "A509",
+ .fw = {
+ [ADRENO_FW_PM4] = "a530_pm4.fw",
+ [ADRENO_FW_PFP] = "a530_pfp.fw",
+ },
+ .gmem = (SZ_256K + SZ_16K),
+ /*
+ * Increase inactive period to 250 to avoid bouncing
+ * the GDSC which appears to make it grumpy
+ */
+ .inactive_period = 250,
+ .quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE,
+ .init = a5xx_gpu_init,
+ /* Adreno 509 uses the same ZAP as 512 */
+ .zapfw = "a512_zap.mdt",
+ }, {
.rev = ADRENO_REV(5, 1, 0, ANY_ID),
.revn = 510,
.name = "A510",
@@ -149,6 +184,23 @@ static const struct adreno_info gpulist[] = {
.inactive_period = 250,
.init = a5xx_gpu_init,
}, {
+ .rev = ADRENO_REV(5, 1, 2, ANY_ID),
+ .revn = 512,
+ .name = "A512",
+ .fw = {
+ [ADRENO_FW_PM4] = "a530_pm4.fw",
+ [ADRENO_FW_PFP] = "a530_pfp.fw",
+ },
+ .gmem = (SZ_256K + SZ_16K),
+ /*
+ * Increase inactive period to 250 to avoid bouncing
+ * the GDSC which appears to make it grumpy
+ */
+ .inactive_period = 250,
+ .quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE,
+ .init = a5xx_gpu_init,
+ .zapfw = "a512_zap.mdt",
+ }, {
.rev = ADRENO_REV(5, 3, 0, 2),
.revn = 530,
.name = "A530",
@@ -168,7 +220,7 @@ static const struct adreno_info gpulist[] = {
.init = a5xx_gpu_init,
.zapfw = "a530_zap.mdt",
}, {
- .rev = ADRENO_REV(5, 4, 0, 2),
+ .rev = ADRENO_REV(5, 4, 0, ANY_ID),
.revn = 540,
.name = "A540",
.fw = {
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index f09175698827..0f184c3dd9d9 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -186,11 +186,18 @@ int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid)
return zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw, pasid);
}
+void adreno_set_llc_attributes(struct iommu_domain *iommu)
+{
+ struct io_pgtable_domain_attr pgtbl_cfg;
+
+ pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_ARM_OUTER_WBWA;
+ iommu_domain_set_attr(iommu, DOMAIN_ATTR_IO_PGTABLE_CFG, &pgtbl_cfg);
+}
+
struct msm_gem_address_space *
adreno_iommu_create_address_space(struct msm_gpu *gpu,
struct platform_device *pdev)
{
- struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct iommu_domain *iommu;
struct msm_mmu *mmu;
struct msm_gem_address_space *aspace;
@@ -200,20 +207,6 @@ adreno_iommu_create_address_space(struct msm_gpu *gpu,
if (!iommu)
return NULL;
-
- if (adreno_is_a6xx(adreno_gpu)) {
- struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
- struct io_pgtable_domain_attr pgtbl_cfg;
- /*
- * This allows GPU to set the bus attributes required to use system
- * cache on behalf of the iommu page table walker.
- */
- if (!IS_ERR(a6xx_gpu->htw_llc_slice)) {
- pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_ARM_OUTER_WBWA;
- iommu_domain_set_attr(iommu, DOMAIN_ATTR_IO_PGTABLE_CFG, &pgtbl_cfg);
- }
- }
-
mmu = msm_iommu_new(&pdev->dev, iommu);
if (IS_ERR(mmu)) {
iommu_domain_free(iommu);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index b3d9a333591b..ccac275aa7a2 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -197,11 +197,26 @@ static inline int adreno_is_a430(struct adreno_gpu *gpu)
return gpu->revn == 430;
}
+static inline int adreno_is_a508(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 508;
+}
+
+static inline int adreno_is_a509(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 509;
+}
+
static inline int adreno_is_a510(struct adreno_gpu *gpu)
{
return gpu->revn == 510;
}
+static inline int adreno_is_a512(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 512;
+}
+
static inline int adreno_is_a530(struct adreno_gpu *gpu)
{
return gpu->revn == 530;
@@ -212,11 +227,6 @@ static inline int adreno_is_a540(struct adreno_gpu *gpu)
return gpu->revn == 540;
}
-static inline bool adreno_is_a6xx(struct adreno_gpu *gpu)
-{
- return ((gpu->revn < 700 && gpu->revn > 599));
-}
-
static inline int adreno_is_a618(struct adreno_gpu *gpu)
{
return gpu->revn == 618;
@@ -278,6 +288,8 @@ struct msm_gem_address_space *
adreno_iommu_create_address_space(struct msm_gpu *gpu,
struct platform_device *pdev);
+void adreno_set_llc_attributes(struct iommu_domain *iommu);
+
/*
* For a5xx and a6xx targets load the zap shader that is used to pull the GPU
* out of secure mode
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
index 5a056c1191df..b2be39b9144e 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
@@ -4,8 +4,10 @@
*/
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+#include <linux/delay.h>
#include "dpu_encoder_phys.h"
#include "dpu_hw_interrupts.h"
+#include "dpu_hw_pingpong.h"
#include "dpu_core_irq.h"
#include "dpu_formats.h"
#include "dpu_trace.h"
@@ -35,6 +37,8 @@
#define DPU_ENC_WR_PTR_START_TIMEOUT_US 20000
+#define DPU_ENC_MAX_POLL_TIMEOUT_US 2000
+
static bool dpu_encoder_phys_cmd_is_master(struct dpu_encoder_phys *phys_enc)
{
return (phys_enc->split_role != ENC_ROLE_SLAVE) ? true : false;
@@ -368,15 +372,12 @@ static void dpu_encoder_phys_cmd_tearcheck_config(
tc_cfg.vsync_count = vsync_hz /
(mode->vtotal * drm_mode_vrefresh(mode));
- /* enable external TE after kickoff to avoid premature autorefresh */
- tc_cfg.hw_vsync_mode = 0;
-
/*
- * By setting sync_cfg_height to near max register value, we essentially
- * disable dpu hw generated TE signal, since hw TE will arrive first.
- * Only caveat is if due to error, we hit wrap-around.
+ * Set the sync_cfg_height to twice vtotal so that if we lose a
+ * TE event coming from the display TE pin we won't stall immediately
*/
- tc_cfg.sync_cfg_height = 0xFFF0;
+ tc_cfg.hw_vsync_mode = 1;
+ tc_cfg.sync_cfg_height = mode->vtotal * 2;
tc_cfg.vsync_init_val = mode->vdisplay;
tc_cfg.sync_threshold_start = DEFAULT_TEARCHECK_SYNC_THRESH_START;
tc_cfg.sync_threshold_continue = DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE;
@@ -580,6 +581,69 @@ static void dpu_encoder_phys_cmd_prepare_for_kickoff(
atomic_read(&phys_enc->pending_kickoff_cnt));
}
+static bool dpu_encoder_phys_cmd_is_ongoing_pptx(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_pp_vsync_info info;
+
+ if (!phys_enc)
+ return false;
+
+ phys_enc->hw_pp->ops.get_vsync_info(phys_enc->hw_pp, &info);
+ if (info.wr_ptr_line_count > 0 &&
+ info.wr_ptr_line_count < phys_enc->cached_mode.vdisplay)
+ return true;
+
+ return false;
+}
+
+static void dpu_encoder_phys_cmd_prepare_commit(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+ int trial = 0;
+
+ if (!phys_enc)
+ return;
+ if (!phys_enc->hw_pp)
+ return;
+ if (!dpu_encoder_phys_cmd_is_master(phys_enc))
+ return;
+
+ /* If autorefresh is already disabled, we have nothing to do */
+ if (!phys_enc->hw_pp->ops.get_autorefresh(phys_enc->hw_pp, NULL))
+ return;
+
+ /*
+ * If autorefresh is enabled, disable it and make sure it is safe to
+ * proceed with current frame commit/push. Sequence fallowed is,
+ * 1. Disable TE
+ * 2. Disable autorefresh config
+ * 4. Poll for frame transfer ongoing to be false
+ * 5. Enable TE back
+ */
+ _dpu_encoder_phys_cmd_connect_te(phys_enc, false);
+ phys_enc->hw_pp->ops.setup_autorefresh(phys_enc->hw_pp, 0, false);
+
+ do {
+ udelay(DPU_ENC_MAX_POLL_TIMEOUT_US);
+ if ((trial * DPU_ENC_MAX_POLL_TIMEOUT_US)
+ > (KICKOFF_TIMEOUT_MS * USEC_PER_MSEC)) {
+ DPU_ERROR_CMDENC(cmd_enc,
+ "disable autorefresh failed\n");
+ break;
+ }
+
+ trial++;
+ } while (dpu_encoder_phys_cmd_is_ongoing_pptx(phys_enc));
+
+ _dpu_encoder_phys_cmd_connect_te(phys_enc, true);
+
+ DPU_DEBUG_CMDENC(to_dpu_encoder_phys_cmd(phys_enc),
+ "disabled autorefresh\n");
+}
+
static int _dpu_encoder_phys_cmd_wait_for_ctl_start(
struct dpu_encoder_phys *phys_enc)
{
@@ -621,20 +685,15 @@ static int dpu_encoder_phys_cmd_wait_for_tx_complete(
static int dpu_encoder_phys_cmd_wait_for_commit_done(
struct dpu_encoder_phys *phys_enc)
{
- int rc = 0;
struct dpu_encoder_phys_cmd *cmd_enc;
cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
/* only required for master controller */
- if (dpu_encoder_phys_cmd_is_master(phys_enc))
- rc = _dpu_encoder_phys_cmd_wait_for_ctl_start(phys_enc);
-
- /* required for both controllers */
- if (!rc && cmd_enc->serialize_wait4pp)
- dpu_encoder_phys_cmd_prepare_for_kickoff(phys_enc);
+ if (!dpu_encoder_phys_cmd_is_master(phys_enc))
+ return 0;
- return rc;
+ return _dpu_encoder_phys_cmd_wait_for_ctl_start(phys_enc);
}
static int dpu_encoder_phys_cmd_wait_for_vblank(
@@ -681,6 +740,7 @@ static void dpu_encoder_phys_cmd_trigger_start(
static void dpu_encoder_phys_cmd_init_ops(
struct dpu_encoder_phys_ops *ops)
{
+ ops->prepare_commit = dpu_encoder_phys_cmd_prepare_commit;
ops->is_master = dpu_encoder_phys_cmd_is_master;
ops->mode_set = dpu_encoder_phys_cmd_mode_set;
ops->mode_fixup = dpu_encoder_phys_cmd_mode_fixup;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index 90393fe9e59c..189f3533525c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -12,14 +12,17 @@
#define VIG_MASK \
(BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) |\
- BIT(DPU_SSPP_CSC_10BIT) | BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_QOS_8LVL) |\
+ BIT(DPU_SSPP_CSC_10BIT) | BIT(DPU_SSPP_CDP) |\
BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_EXCL_RECT))
#define VIG_SDM845_MASK \
- (VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED3))
+ (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED3))
#define VIG_SC7180_MASK \
- (VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED4))
+ (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED4))
+
+#define VIG_SM8250_MASK \
+ (VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED3LITE))
#define DMA_SDM845_MASK \
(BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) | BIT(DPU_SSPP_QOS_8LVL) |\
@@ -185,7 +188,7 @@ static const struct dpu_caps sm8150_dpu_caps = {
static const struct dpu_caps sm8250_dpu_caps = {
.max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.max_mixer_blendstages = 0xb,
- .qseed_type = DPU_SSPP_SCALER_QSEED3, /* TODO: qseed3 lite */
+ .qseed_type = DPU_SSPP_SCALER_QSEED3LITE,
.smart_dma_rev = DPU_SSPP_SMART_DMA_V2, /* TODO: v2.5 */
.ubwc_version = DPU_HW_UBWC_VER_40,
.has_src_split = true,
@@ -444,6 +447,34 @@ static const struct dpu_sspp_cfg sc7180_sspp[] = {
sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1),
};
+static const struct dpu_sspp_sub_blks sm8250_vig_sblk_0 =
+ _VIG_SBLK("0", 5, DPU_SSPP_SCALER_QSEED3LITE);
+static const struct dpu_sspp_sub_blks sm8250_vig_sblk_1 =
+ _VIG_SBLK("1", 6, DPU_SSPP_SCALER_QSEED3LITE);
+static const struct dpu_sspp_sub_blks sm8250_vig_sblk_2 =
+ _VIG_SBLK("2", 7, DPU_SSPP_SCALER_QSEED3LITE);
+static const struct dpu_sspp_sub_blks sm8250_vig_sblk_3 =
+ _VIG_SBLK("3", 8, DPU_SSPP_SCALER_QSEED3LITE);
+
+static const struct dpu_sspp_cfg sm8250_sspp[] = {
+ SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SM8250_MASK,
+ sm8250_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0),
+ SSPP_BLK("sspp_1", SSPP_VIG1, 0x6000, VIG_SM8250_MASK,
+ sm8250_vig_sblk_1, 4, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG1),
+ SSPP_BLK("sspp_2", SSPP_VIG2, 0x8000, VIG_SM8250_MASK,
+ sm8250_vig_sblk_2, 8, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG2),
+ SSPP_BLK("sspp_3", SSPP_VIG3, 0xa000, VIG_SM8250_MASK,
+ sm8250_vig_sblk_3, 12, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG3),
+ SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000, DMA_SDM845_MASK,
+ sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0),
+ SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000, DMA_SDM845_MASK,
+ sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA1),
+ SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000, DMA_CURSOR_SDM845_MASK,
+ sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR0),
+ SSPP_BLK("sspp_11", SSPP_DMA3, 0x2a000, DMA_CURSOR_SDM845_MASK,
+ sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1),
+};
+
/*************************************************************
* MIXER sub blocks config
*************************************************************/
@@ -532,23 +563,28 @@ static const struct dpu_dspp_sub_blks sm8150_dspp_sblk = {
.len = 0x90, .version = 0x40000},
};
-#define DSPP_BLK(_name, _id, _base, _sblk) \
+#define DSPP_BLK(_name, _id, _base, _mask, _sblk) \
{\
.name = _name, .id = _id, \
.base = _base, .len = 0x1800, \
- .features = DSPP_SC7180_MASK, \
+ .features = _mask, \
.sblk = _sblk \
}
static const struct dpu_dspp_cfg sc7180_dspp[] = {
- DSPP_BLK("dspp_0", DSPP_0, 0x54000, &sc7180_dspp_sblk),
+ DSPP_BLK("dspp_0", DSPP_0, 0x54000, DSPP_SC7180_MASK,
+ &sc7180_dspp_sblk),
};
static const struct dpu_dspp_cfg sm8150_dspp[] = {
- DSPP_BLK("dspp_0", DSPP_0, 0x54000, &sm8150_dspp_sblk),
- DSPP_BLK("dspp_1", DSPP_1, 0x56000, &sm8150_dspp_sblk),
- DSPP_BLK("dspp_2", DSPP_2, 0x58000, &sm8150_dspp_sblk),
- DSPP_BLK("dspp_3", DSPP_3, 0x5a000, &sm8150_dspp_sblk),
+ DSPP_BLK("dspp_0", DSPP_0, 0x54000, DSPP_SC7180_MASK,
+ &sm8150_dspp_sblk),
+ DSPP_BLK("dspp_1", DSPP_1, 0x56000, DSPP_SC7180_MASK,
+ &sm8150_dspp_sblk),
+ DSPP_BLK("dspp_2", DSPP_2, 0x58000, DSPP_SC7180_MASK,
+ &sm8150_dspp_sblk),
+ DSPP_BLK("dspp_3", DSPP_3, 0x5a000, DSPP_SC7180_MASK,
+ &sm8150_dspp_sblk),
};
/*************************************************************
@@ -624,33 +660,33 @@ static const struct dpu_merge_3d_cfg sm8150_merge_3d[] = {
/*************************************************************
* INTF sub blocks config
*************************************************************/
-#define INTF_BLK(_name, _id, _base, _type, _ctrl_id, _features) \
+#define INTF_BLK(_name, _id, _base, _type, _ctrl_id, _progfetch, _features) \
{\
.name = _name, .id = _id, \
.base = _base, .len = 0x280, \
.features = _features, \
.type = _type, \
.controller_id = _ctrl_id, \
- .prog_fetch_lines_worst_case = 24 \
+ .prog_fetch_lines_worst_case = _progfetch \
}
static const struct dpu_intf_cfg sdm845_intf[] = {
- INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, INTF_SDM845_MASK),
- INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, INTF_SDM845_MASK),
- INTF_BLK("intf_2", INTF_2, 0x6B000, INTF_DSI, 1, INTF_SDM845_MASK),
- INTF_BLK("intf_3", INTF_3, 0x6B800, INTF_DP, 1, INTF_SDM845_MASK),
+ INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, 24, INTF_SDM845_MASK),
+ INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, 24, INTF_SDM845_MASK),
+ INTF_BLK("intf_2", INTF_2, 0x6B000, INTF_DSI, 1, 24, INTF_SDM845_MASK),
+ INTF_BLK("intf_3", INTF_3, 0x6B800, INTF_DP, 1, 24, INTF_SDM845_MASK),
};
static const struct dpu_intf_cfg sc7180_intf[] = {
- INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, INTF_SC7180_MASK),
- INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, INTF_SC7180_MASK),
+ INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, 24, INTF_SC7180_MASK),
+ INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, 24, INTF_SC7180_MASK),
};
static const struct dpu_intf_cfg sm8150_intf[] = {
- INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, INTF_SC7180_MASK),
- INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, INTF_SC7180_MASK),
- INTF_BLK("intf_2", INTF_2, 0x6B000, INTF_DSI, 1, INTF_SC7180_MASK),
- INTF_BLK("intf_3", INTF_3, 0x6B800, INTF_DP, 1, INTF_SC7180_MASK),
+ INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, 24, INTF_SC7180_MASK),
+ INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, 24, INTF_SC7180_MASK),
+ INTF_BLK("intf_2", INTF_2, 0x6B000, INTF_DSI, 1, 24, INTF_SC7180_MASK),
+ INTF_BLK("intf_3", INTF_3, 0x6B800, INTF_DP, 1, 24, INTF_SC7180_MASK),
};
/*************************************************************
@@ -969,9 +1005,8 @@ static void sm8250_cfg_init(struct dpu_mdss_cfg *dpu_cfg)
.mdp = sm8250_mdp,
.ctl_count = ARRAY_SIZE(sm8150_ctl),
.ctl = sm8150_ctl,
- /* TODO: sspp qseed version differs from 845 */
- .sspp_count = ARRAY_SIZE(sdm845_sspp),
- .sspp = sdm845_sspp,
+ .sspp_count = ARRAY_SIZE(sm8250_sspp),
+ .sspp = sm8250_sspp,
.mixer_count = ARRAY_SIZE(sm8150_lm),
.mixer = sm8150_lm,
.dspp_count = ARRAY_SIZE(sm8150_dspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
index eaef99db2d2f..ea4647d21a20 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
@@ -95,6 +95,7 @@ enum {
* @DPU_SSPP_SRC Src and fetch part of the pipes,
* @DPU_SSPP_SCALER_QSEED2, QSEED2 algorithm support
* @DPU_SSPP_SCALER_QSEED3, QSEED3 alogorithm support
+ * @DPU_SSPP_SCALER_QSEED3LITE, QSEED3 Lite alogorithm support
* @DPU_SSPP_SCALER_QSEED4, QSEED4 algorithm support
* @DPU_SSPP_SCALER_RGB, RGB Scaler, supported by RGB pipes
* @DPU_SSPP_CSC, Support of Color space converion
@@ -114,6 +115,7 @@ enum {
DPU_SSPP_SRC = 0x1,
DPU_SSPP_SCALER_QSEED2,
DPU_SSPP_SCALER_QSEED3,
+ DPU_SSPP_SCALER_QSEED3LITE,
DPU_SSPP_SCALER_QSEED4,
DPU_SSPP_SCALER_RGB,
DPU_SSPP_CSC,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
index bea4ab5c58c5..245a7a62b5c6 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
@@ -23,6 +23,7 @@
#define PP_WR_PTR_IRQ 0x024
#define PP_OUT_LINE_COUNT 0x028
#define PP_LINE_COUNT 0x02C
+#define PP_AUTOREFRESH_CONFIG 0x030
#define PP_FBC_MODE 0x034
#define PP_FBC_BUDGET_CTL 0x038
@@ -120,6 +121,29 @@ static int dpu_hw_pp_setup_te_config(struct dpu_hw_pingpong *pp,
return 0;
}
+static void dpu_hw_pp_setup_autorefresh_config(struct dpu_hw_pingpong *pp,
+ u32 frame_count, bool enable)
+{
+ DPU_REG_WRITE(&pp->hw, PP_AUTOREFRESH_CONFIG,
+ enable ? (BIT(31) | frame_count) : 0);
+}
+
+/*
+ * dpu_hw_pp_get_autorefresh_config - Get autorefresh config from HW
+ * @pp: DPU pingpong structure
+ * @frame_count: Used to return the current frame count from hw
+ *
+ * Returns: True if autorefresh enabled, false if disabled.
+ */
+static bool dpu_hw_pp_get_autorefresh_config(struct dpu_hw_pingpong *pp,
+ u32 *frame_count)
+{
+ u32 val = DPU_REG_READ(&pp->hw, PP_AUTOREFRESH_CONFIG);
+ if (frame_count != NULL)
+ *frame_count = val & 0xffff;
+ return !!((val & BIT(31)) >> 31);
+}
+
static int dpu_hw_pp_poll_timeout_wr_ptr(struct dpu_hw_pingpong *pp,
u32 timeout_us)
{
@@ -228,6 +252,8 @@ static void _setup_pingpong_ops(struct dpu_hw_pingpong *c,
c->ops.enable_tearcheck = dpu_hw_pp_enable_te;
c->ops.connect_external_te = dpu_hw_pp_connect_external_te;
c->ops.get_vsync_info = dpu_hw_pp_get_vsync_info;
+ c->ops.setup_autorefresh = dpu_hw_pp_setup_autorefresh_config;
+ c->ops.get_autorefresh = dpu_hw_pp_get_autorefresh_config;
c->ops.poll_timeout_wr_ptr = dpu_hw_pp_poll_timeout_wr_ptr;
c->ops.get_line_count = dpu_hw_pp_get_line_count;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
index 6902b9b95c8e..845b9ce80e31 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
@@ -63,6 +63,8 @@ struct dpu_hw_dither_cfg {
* @setup_tearcheck : program tear check values
* @enable_tearcheck : enables tear check
* @get_vsync_info : retries timing info of the panel
+ * @setup_autorefresh : configure and enable the autorefresh config
+ * @get_autorefresh : retrieve autorefresh config from hardware
* @setup_dither : function to program the dither hw block
* @get_line_count: obtain current vertical line counter
*/
@@ -95,6 +97,18 @@ struct dpu_hw_pingpong_ops {
struct dpu_hw_pp_vsync_info *info);
/**
+ * configure and enable the autorefresh config
+ */
+ void (*setup_autorefresh)(struct dpu_hw_pingpong *pp,
+ u32 frame_count, bool enable);
+
+ /**
+ * retrieve autorefresh config from hardware
+ */
+ bool (*get_autorefresh)(struct dpu_hw_pingpong *pp,
+ u32 *frame_count);
+
+ /**
* poll until write pointer transmission starts
* @Return: 0 on success, -ETIMEDOUT on timeout
*/
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
index 2c2ca5335aa8..34d81aa16041 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
@@ -673,6 +673,7 @@ static void _setup_layer_ops(struct dpu_hw_pipe *c,
c->ops.setup_multirect = dpu_hw_sspp_setup_multirect;
if (test_bit(DPU_SSPP_SCALER_QSEED3, &features) ||
+ test_bit(DPU_SSPP_SCALER_QSEED3LITE, &features) ||
test_bit(DPU_SSPP_SCALER_QSEED4, &features)) {
c->ops.setup_scaler = _dpu_hw_sspp_setup_scaler3;
c->ops.get_scaler_ver = _dpu_hw_sspp_get_scaler3_ver;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
index 85b018a9b03c..fdfd4b46e2c6 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
@@ -28,6 +28,7 @@ struct dpu_hw_pipe;
#define DPU_SSPP_SCALER ((1UL << DPU_SSPP_SCALER_RGB) | \
(1UL << DPU_SSPP_SCALER_QSEED2) | \
(1UL << DPU_SSPP_SCALER_QSEED3) | \
+ (1UL << DPU_SSPP_SCALER_QSEED3LITE) | \
(1UL << DPU_SSPP_SCALER_QSEED4))
/**
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
index 84e9875994a8..f94584c982cd 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
@@ -59,6 +59,19 @@ static u32 dpu_hw_util_log_mask = DPU_DBG_MASK_NONE;
#define QSEED3_SEP_LUT_SIZE \
(QSEED3_LUT_SIZE * QSEED3_SEPARABLE_LUTS * sizeof(u32))
+/* DPU_SCALER_QSEED3LITE */
+#define QSEED3LITE_COEF_LUT_Y_SEP_BIT 4
+#define QSEED3LITE_COEF_LUT_UV_SEP_BIT 5
+#define QSEED3LITE_COEF_LUT_CTRL 0x4C
+#define QSEED3LITE_COEF_LUT_SWAP_BIT 0
+#define QSEED3LITE_DIR_FILTER_WEIGHT 0x60
+#define QSEED3LITE_FILTERS 2
+#define QSEED3LITE_SEPARABLE_LUTS 10
+#define QSEED3LITE_LUT_SIZE 33
+#define QSEED3LITE_SEP_LUT_SIZE \
+ (QSEED3LITE_LUT_SIZE * QSEED3LITE_SEPARABLE_LUTS * sizeof(u32))
+
+
void dpu_reg_write(struct dpu_hw_blk_reg_map *c,
u32 reg_off,
u32 val,
@@ -156,6 +169,57 @@ static void _dpu_hw_setup_scaler3_lut(struct dpu_hw_blk_reg_map *c,
}
+static void _dpu_hw_setup_scaler3lite_lut(struct dpu_hw_blk_reg_map *c,
+ struct dpu_hw_scaler3_cfg *scaler3_cfg, u32 offset)
+{
+ int j, filter;
+ int config_lut = 0x0;
+ unsigned long lut_flags;
+ u32 lut_addr, lut_offset;
+ u32 *lut[QSEED3LITE_FILTERS] = {NULL, NULL};
+ static const uint32_t off_tbl[QSEED3_FILTERS] = { 0x000, 0x200 };
+
+ DPU_REG_WRITE(c, QSEED3LITE_DIR_FILTER_WEIGHT + offset, scaler3_cfg->dir_weight);
+
+ if (!scaler3_cfg->sep_lut)
+ return;
+
+ lut_flags = (unsigned long) scaler3_cfg->lut_flag;
+ if (test_bit(QSEED3_COEF_LUT_Y_SEP_BIT, &lut_flags) &&
+ (scaler3_cfg->y_rgb_sep_lut_idx < QSEED3LITE_SEPARABLE_LUTS) &&
+ (scaler3_cfg->sep_len == QSEED3LITE_SEP_LUT_SIZE)) {
+ lut[0] = scaler3_cfg->sep_lut +
+ scaler3_cfg->y_rgb_sep_lut_idx * QSEED3LITE_LUT_SIZE;
+ config_lut = 1;
+ }
+ if (test_bit(QSEED3_COEF_LUT_UV_SEP_BIT, &lut_flags) &&
+ (scaler3_cfg->uv_sep_lut_idx < QSEED3LITE_SEPARABLE_LUTS) &&
+ (scaler3_cfg->sep_len == QSEED3LITE_SEP_LUT_SIZE)) {
+ lut[1] = scaler3_cfg->sep_lut +
+ scaler3_cfg->uv_sep_lut_idx * QSEED3LITE_LUT_SIZE;
+ config_lut = 1;
+ }
+
+ if (config_lut) {
+ for (filter = 0; filter < QSEED3LITE_FILTERS; filter++) {
+ if (!lut[filter])
+ continue;
+ lut_offset = 0;
+ lut_addr = QSEED3_COEF_LUT + offset + off_tbl[filter];
+ for (j = 0; j < QSEED3LITE_LUT_SIZE; j++) {
+ DPU_REG_WRITE(c,
+ lut_addr,
+ (lut[filter])[lut_offset++]);
+ lut_addr += 4;
+ }
+ }
+ }
+
+ if (test_bit(QSEED3_COEF_LUT_SWAP_BIT, &lut_flags))
+ DPU_REG_WRITE(c, QSEED3_COEF_LUT_CTRL + offset, BIT(0));
+
+}
+
static void _dpu_hw_setup_scaler3_de(struct dpu_hw_blk_reg_map *c,
struct dpu_hw_scaler3_de_cfg *de_cfg, u32 offset)
{
@@ -242,9 +306,12 @@ void dpu_hw_setup_scaler3(struct dpu_hw_blk_reg_map *c,
op_mode |= BIT(8);
}
- if (scaler3_cfg->lut_flag)
- _dpu_hw_setup_scaler3_lut(c, scaler3_cfg,
- scaler_offset);
+ if (scaler3_cfg->lut_flag) {
+ if (scaler_version < 0x2004)
+ _dpu_hw_setup_scaler3_lut(c, scaler3_cfg, scaler_offset);
+ else
+ _dpu_hw_setup_scaler3lite_lut(c, scaler3_cfg, scaler_offset);
+ }
if (scaler_version == 0x1002) {
phase_init =
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
index 234eb7d65753..ff3cffde84cd 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
@@ -97,6 +97,7 @@ struct dpu_hw_scaler3_de_cfg {
* @ cir_lut: pointer to circular filter LUT
* @ sep_lut: pointer to separable filter LUT
* @ de: detail enhancer configuration
+ * @ dir_weight: Directional weight
*/
struct dpu_hw_scaler3_cfg {
u32 enable;
@@ -137,6 +138,8 @@ struct dpu_hw_scaler3_cfg {
* Detail enhancer settings
*/
struct dpu_hw_scaler3_de_cfg de;
+
+ u32 dir_weight;
};
/**
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c
index cf867f3f7c36..b757054e1c23 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c
@@ -30,7 +30,7 @@
#define VBIF_XIN_HALT_CTRL0 0x0200
#define VBIF_XIN_HALT_CTRL1 0x0204
#define VBIF_XINL_QOS_RP_REMAP_000 0x0550
-#define VBIF_XINL_QOS_LVL_REMAP_000 0x0590
+#define VBIF_XINL_QOS_LVL_REMAP_000(v) (v < DPU_HW_VER_400 ? 0x570 : 0x0590)
static void dpu_hw_clear_errors(struct dpu_hw_vbif *vbif,
u32 *pnd_errors, u32 *src_errors)
@@ -156,18 +156,19 @@ static void dpu_hw_set_qos_remap(struct dpu_hw_vbif *vbif,
u32 xin_id, u32 level, u32 remap_level)
{
struct dpu_hw_blk_reg_map *c;
- u32 reg_val, reg_val_lvl, mask, reg_high, reg_shift;
+ u32 reg_lvl, reg_val, reg_val_lvl, mask, reg_high, reg_shift;
if (!vbif)
return;
c = &vbif->hw;
+ reg_lvl = VBIF_XINL_QOS_LVL_REMAP_000(c->hwversion);
reg_high = ((xin_id & 0x8) >> 3) * 4 + (level * 8);
reg_shift = (xin_id & 0x7) * 4;
reg_val = DPU_REG_READ(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high);
- reg_val_lvl = DPU_REG_READ(c, VBIF_XINL_QOS_LVL_REMAP_000 + reg_high);
+ reg_val_lvl = DPU_REG_READ(c, reg_lvl + reg_high);
mask = 0x7 << reg_shift;
@@ -178,7 +179,7 @@ static void dpu_hw_set_qos_remap(struct dpu_hw_vbif *vbif,
reg_val_lvl |= (remap_level << reg_shift) & mask;
DPU_REG_WRITE(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high, reg_val);
- DPU_REG_WRITE(c, VBIF_XINL_QOS_LVL_REMAP_000 + reg_high, reg_val_lvl);
+ DPU_REG_WRITE(c, reg_lvl + reg_high, reg_val_lvl);
}
static void dpu_hw_set_write_gather_en(struct dpu_hw_vbif *vbif, u32 xin_id)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index 374b0e8471e6..5a8e3e1fc48c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -749,7 +749,7 @@ static void _dpu_kms_set_encoder_mode(struct msm_kms *kms,
case DRM_MODE_ENCODER_TMDS:
info.num_of_h_tiles = 1;
break;
- };
+ }
rc = dpu_encoder_setup(encoder->dev, encoder, &info);
if (rc)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index bc0231a50132..f898a8f67b7f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -1465,6 +1465,7 @@ static int _dpu_plane_init_debugfs(struct drm_plane *plane)
pdpu->debugfs_root, &pdpu->debugfs_src);
if (cfg->features & BIT(DPU_SSPP_SCALER_QSEED3) ||
+ cfg->features & BIT(DPU_SSPP_SCALER_QSEED3LITE) ||
cfg->features & BIT(DPU_SSPP_SCALER_QSEED2) ||
cfg->features & BIT(DPU_SSPP_SCALER_QSEED4)) {
dpu_debugfs_setup_regset32(&pdpu->debugfs_scaler,
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
index df10c1ac7591..94ce62a26daf 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
@@ -177,7 +177,7 @@ static const struct mdp5_cfg_hw msm8x74v2_config = {
[3] = INTF_HDMI,
},
},
- .max_clk = 200000000,
+ .max_clk = 320000000,
};
static const struct mdp5_cfg_hw apq8084_config = {
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
index 0c8f9f88301f..f5d71b274079 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
@@ -1180,7 +1180,7 @@ static void mdp5_crtc_pp_done_irq(struct mdp_irq *irq, uint32_t irqstatus)
struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc,
pp_done);
- complete(&mdp5_crtc->pp_completion);
+ complete_all(&mdp5_crtc->pp_completion);
}
static void mdp5_crtc_wait_for_pp_done(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c
index 19b35ae3e927..1c6e1d2b947c 100644
--- a/drivers/gpu/drm/msm/dp/dp_aux.c
+++ b/drivers/gpu/drm/msm/dp/dp_aux.c
@@ -336,7 +336,6 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
ssize_t ret;
int const aux_cmd_native_max = 16;
int const aux_cmd_i2c_max = 128;
- int const retry_count = 5;
struct dp_aux_private *aux = container_of(dp_aux,
struct dp_aux_private, dp_aux);
@@ -378,12 +377,6 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
ret = dp_aux_cmd_fifo_tx(aux, msg);
if (ret < 0) {
- if (aux->native) {
- aux->retry_cnt++;
- if (!(aux->retry_cnt % retry_count))
- dp_catalog_aux_update_cfg(aux->catalog);
- dp_catalog_aux_reset(aux->catalog);
- }
usleep_range(400, 500); /* at least 400us to next try */
goto unlock_exit;
}
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c
index 44f0c57798d0..b1a9b1b98f5f 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog.c
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.c
@@ -190,6 +190,18 @@ int dp_catalog_aux_clear_hw_interrupts(struct dp_catalog *dp_catalog)
return 0;
}
+/**
+ * dp_catalog_aux_reset() - reset AUX controller
+ *
+ * @aux: DP catalog structure
+ *
+ * return: void
+ *
+ * This function reset AUX controller
+ *
+ * NOTE: reset AUX controller will also clear any pending HPD related interrupts
+ *
+ */
void dp_catalog_aux_reset(struct dp_catalog *dp_catalog)
{
u32 aux_ctrl;
@@ -483,6 +495,18 @@ int dp_catalog_ctrl_set_pattern(struct dp_catalog *dp_catalog,
return 0;
}
+/**
+ * dp_catalog_ctrl_reset() - reset DP controller
+ *
+ * @dp_catalog: DP catalog structure
+ *
+ * return: void
+ *
+ * This function reset the DP controller
+ *
+ * NOTE: reset DP controller will also clear any pending HPD related interrupts
+ *
+ */
void dp_catalog_ctrl_reset(struct dp_catalog *dp_catalog)
{
u32 sw_reset;
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index e3462f5d96d7..1390f3547fde 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -631,7 +631,7 @@ static void _dp_ctrl_calc_tu(struct dp_tu_calc_input *in,
tu = kzalloc(sizeof(*tu), GFP_KERNEL);
if (!tu)
- return
+ return;
dp_panel_update_tu_timings(in, tu);
@@ -1158,7 +1158,7 @@ static int dp_ctrl_link_rate_down_shift(struct dp_ctrl_private *ctrl)
default:
ret = -EINVAL;
break;
- };
+ }
if (!ret)
DRM_DEBUG_DP("new rate=0x%x\n", ctrl->link->link_params.rate);
@@ -1296,7 +1296,6 @@ static int dp_ctrl_setup_main_link(struct dp_ctrl_private *ctrl,
* transitioned to PUSH_IDLE. In order to start transmitting
* a link training pattern, we have to first do soft reset.
*/
- dp_catalog_ctrl_reset(ctrl->catalog);
ret = dp_ctrl_link_train(ctrl, cr, training_step);
@@ -1365,7 +1364,7 @@ static int dp_ctrl_enable_stream_clocks(struct dp_ctrl_private *ctrl)
return ret;
}
-int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip)
+int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip, bool reset)
{
struct dp_ctrl_private *ctrl;
struct dp_io *dp_io;
@@ -1382,6 +1381,9 @@ int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip)
ctrl->dp_ctrl.orientation = flip;
+ if (reset)
+ dp_catalog_ctrl_reset(ctrl->catalog);
+
dp_catalog_ctrl_phy_reset(ctrl->catalog);
phy_init(phy);
dp_catalog_ctrl_enable_irq(ctrl->catalog, true);
@@ -1420,16 +1422,14 @@ void dp_ctrl_host_deinit(struct dp_ctrl *dp_ctrl)
static bool dp_ctrl_use_fixed_nvid(struct dp_ctrl_private *ctrl)
{
u8 *dpcd = ctrl->panel->dpcd;
- u32 edid_quirks = 0;
- edid_quirks = drm_dp_get_edid_quirks(ctrl->panel->edid);
/*
* For better interop experience, used a fixed NVID=0x8000
* whenever connected to a VGA dongle downstream.
*/
if (drm_dp_is_branch(dpcd))
- return (drm_dp_has_quirk(&ctrl->panel->desc, edid_quirks,
- DP_DPCD_QUIRK_CONSTANT_N));
+ return (drm_dp_has_quirk(&ctrl->panel->desc,
+ DP_DPCD_QUIRK_CONSTANT_N));
return false;
}
@@ -1498,7 +1498,6 @@ static int dp_ctrl_link_maintenance(struct dp_ctrl_private *ctrl)
int training_step = DP_TRAINING_NONE;
dp_ctrl_push_idle(&ctrl->dp_ctrl);
- dp_catalog_ctrl_reset(ctrl->catalog);
ctrl->dp_ctrl.pixel_rate = ctrl->panel->dp_mode.drm_mode.clock;
@@ -1787,14 +1786,14 @@ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl)
* Set up transfer unit values and set controller state to send
* video.
*/
+ reinit_completion(&ctrl->video_comp);
+
dp_ctrl_configure_source_params(ctrl);
dp_catalog_ctrl_config_msa(ctrl->catalog,
ctrl->link->link_params.rate,
ctrl->dp_ctrl.pixel_rate, dp_ctrl_use_fixed_nvid(ctrl));
- reinit_completion(&ctrl->video_comp);
-
dp_ctrl_setup_tr_unit(ctrl);
dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO);
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.h b/drivers/gpu/drm/msm/dp/dp_ctrl.h
index f60ba93c8678..a836bd358447 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.h
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.h
@@ -19,7 +19,7 @@ struct dp_ctrl {
u32 pixel_rate;
};
-int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip);
+int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip, bool reset);
void dp_ctrl_host_deinit(struct dp_ctrl *dp_ctrl);
int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl);
int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl);
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index 3bc7ed21de28..5a39da6e1eaf 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -350,7 +350,7 @@ end:
return rc;
}
-static void dp_display_host_init(struct dp_display_private *dp)
+static void dp_display_host_init(struct dp_display_private *dp, int reset)
{
bool flip = false;
@@ -365,7 +365,7 @@ static void dp_display_host_init(struct dp_display_private *dp)
dp_display_set_encoder_mode(dp);
dp_power_init(dp->power, flip);
- dp_ctrl_host_init(dp->ctrl, flip);
+ dp_ctrl_host_init(dp->ctrl, flip, reset);
dp_aux_init(dp->aux);
dp->core_initialized = true;
}
@@ -403,7 +403,7 @@ static int dp_display_usbpd_configure_cb(struct device *dev)
goto end;
}
- dp_display_host_init(dp);
+ dp_display_host_init(dp, false);
/*
* set sink to normal operation mode -- D0
@@ -651,8 +651,8 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
dp_add_event(dp, EV_DISCONNECT_PENDING_TIMEOUT, 0, DP_TIMEOUT_5_SECOND);
/* signal the disconnect event early to ensure proper teardown */
- dp_display_handle_plugged_change(g_dp_display, false);
reinit_completion(&dp->audio_comp);
+ dp_display_handle_plugged_change(g_dp_display, false);
dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK |
DP_DP_IRQ_HPD_INT_MASK, true);
@@ -700,6 +700,13 @@ static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data)
return 0;
}
+ if (state == ST_CONNECT_PENDING || state == ST_DISCONNECT_PENDING) {
+ /* wait until ST_CONNECTED */
+ dp_add_event(dp, EV_IRQ_HPD_INT, 0, 1); /* delay = 1 */
+ mutex_unlock(&dp->event_mutex);
+ return 0;
+ }
+
ret = dp_display_usbpd_attention_cb(&dp->pdev->dev);
if (ret == -ECONNRESET) { /* cable unplugged */
dp->core_initialized = false;
@@ -890,6 +897,9 @@ static int dp_display_disable(struct dp_display_private *dp, u32 data)
/* wait only if audio was enabled */
if (dp_display->audio_enabled) {
+ /* signal the disconnect event */
+ reinit_completion(&dp->audio_comp);
+ dp_display_handle_plugged_change(dp_display, false);
if (!wait_for_completion_timeout(&dp->audio_comp,
HZ * 5))
DRM_ERROR("audio comp timeout\n");
@@ -1002,7 +1012,7 @@ int dp_display_get_test_bpp(struct msm_dp *dp)
static void dp_display_config_hpd(struct dp_display_private *dp)
{
- dp_display_host_init(dp);
+ dp_display_host_init(dp, true);
dp_catalog_ctrl_hpd_config(dp->catalog);
/* Enable interrupt first time
@@ -1256,7 +1266,7 @@ static int dp_pm_resume(struct device *dev)
dp->hpd_state = ST_DISCONNECTED;
/* turn on dp ctrl/phy */
- dp_display_host_init(dp);
+ dp_display_host_init(dp, true);
dp_catalog_ctrl_hpd_config(dp->catalog);
@@ -1439,7 +1449,7 @@ int msm_dp_display_enable(struct msm_dp *dp, struct drm_encoder *encoder)
state = dp_display->hpd_state;
if (state == ST_DISPLAY_OFF)
- dp_display_host_init(dp_display);
+ dp_display_host_init(dp_display, true);
dp_display_enable(dp_display, 0);
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
index d1780bcac8cc..9cc816663668 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.c
+++ b/drivers/gpu/drm/msm/dp/dp_panel.c
@@ -409,7 +409,6 @@ int dp_panel_timing_cfg(struct dp_panel *dp_panel)
int dp_panel_init_panel_info(struct dp_panel *dp_panel)
{
- int rc = 0;
struct drm_display_mode *drm_mode;
drm_mode = &dp_panel->dp_mode.drm_mode;
@@ -436,7 +435,7 @@ int dp_panel_init_panel_info(struct dp_panel *dp_panel)
min_t(u32, dp_panel->dp_mode.bpp, 30));
DRM_DEBUG_DP("updated bpp = %d\n", dp_panel->dp_mode.bpp);
- return rc;
+ return 0;
}
struct dp_panel *dp_panel_get(struct dp_panel_in *in)
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
index 1afb7c579dbb..eca86bf448f7 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
@@ -139,7 +139,7 @@ const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs = {
.disable = dsi_20nm_phy_disable,
.init = msm_dsi_phy_init_common,
},
- .io_start = { 0xfd998300, 0xfd9a0300 },
+ .io_start = { 0xfd998500, 0xfd9a0500 },
.num_dsi_phy = 2,
};
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
index e4e9bf04b736..de3b802ccd3d 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
@@ -172,9 +172,7 @@ static void dsi_pll_calc_dec_frac(struct dsi_pll_10nm *pll)
multiplier = 1 << config->frac_bits;
dec_multiple = div_u64(pll_freq * multiplier, divider);
- div_u64_rem(dec_multiple, multiplier, &frac);
-
- dec = div_u64(dec_multiple, multiplier);
+ dec = div_u64_rem(dec_multiple, multiplier, &frac);
if (pll_freq <= 1900000000UL)
regs->pll_prop_gain_rate = 8;
@@ -306,7 +304,8 @@ static void dsi_pll_commit(struct dsi_pll_10nm *pll)
reg->frac_div_start_mid);
pll_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1,
reg->frac_div_start_high);
- pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCKDET_RATE_1, 0x40);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCKDET_RATE_1,
+ reg->pll_lockdet_rate);
pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_DELAY, 0x06);
pll_write(base + REG_DSI_10nm_PHY_PLL_CMODE, 0x10);
pll_write(base + REG_DSI_10nm_PHY_PLL_CLOCK_INVERTERS,
@@ -345,6 +344,7 @@ static int dsi_pll_10nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
static int dsi_pll_10nm_lock_status(struct dsi_pll_10nm *pll)
{
+ struct device *dev = &pll->pdev->dev;
int rc;
u32 status = 0;
u32 const delay_us = 100;
@@ -357,8 +357,8 @@ static int dsi_pll_10nm_lock_status(struct dsi_pll_10nm *pll)
delay_us,
timeout_us);
if (rc)
- pr_err("DSI PLL(%d) lock failed, status=0x%08x\n",
- pll->id, status);
+ DRM_DEV_ERROR(dev, "DSI PLL(%d) lock failed, status=0x%08x\n",
+ pll->id, status);
return rc;
}
@@ -405,6 +405,7 @@ static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw)
{
struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
+ struct device *dev = &pll_10nm->pdev->dev;
int rc;
dsi_pll_enable_pll_bias(pll_10nm);
@@ -413,7 +414,7 @@ static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw)
rc = dsi_pll_10nm_vco_set_rate(hw,pll_10nm->vco_current_rate, 0);
if (rc) {
- pr_err("vco_set_rate failed, rc=%d\n", rc);
+ DRM_DEV_ERROR(dev, "vco_set_rate failed, rc=%d\n", rc);
return rc;
}
@@ -430,7 +431,7 @@ static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw)
/* Check for PLL lock */
rc = dsi_pll_10nm_lock_status(pll_10nm);
if (rc) {
- pr_err("PLL(%d) lock failed\n", pll_10nm->id);
+ DRM_DEV_ERROR(dev, "PLL(%d) lock failed\n", pll_10nm->id);
goto error;
}
@@ -483,6 +484,7 @@ static unsigned long dsi_pll_10nm_vco_recalc_rate(struct clk_hw *hw,
{
struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
+ struct dsi_pll_config *config = &pll_10nm->pll_configuration;
void __iomem *base = pll_10nm->mmio;
u64 ref_clk = pll_10nm->vco_ref_clk_rate;
u64 vco_rate = 0x0;
@@ -503,9 +505,8 @@ static unsigned long dsi_pll_10nm_vco_recalc_rate(struct clk_hw *hw,
/*
* TODO:
* 1. Assumes prescaler is disabled
- * 2. Multiplier is 2^18. it should be 2^(num_of_frac_bits)
*/
- multiplier = 1 << 18;
+ multiplier = 1 << config->frac_bits;
pll_freq = dec * (ref_clk * 2);
tmp64 = (ref_clk * 2 * frac);
pll_freq += div_u64(tmp64, multiplier);
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 108c405e03dd..94525ac76d4e 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -788,9 +788,10 @@ static int msm_ioctl_gem_info_iova(struct drm_device *dev,
struct drm_file *file, struct drm_gem_object *obj,
uint64_t *iova)
{
+ struct msm_drm_private *priv = dev->dev_private;
struct msm_file_private *ctx = file->driver_priv;
- if (!ctx->aspace)
+ if (!priv->gpu)
return -EINVAL;
/*
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 9d10739c4eb2..f091c1e164fa 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -987,8 +987,7 @@ void msm_gem_free_object(struct drm_gem_object *obj)
/* Don't drop the pages for imported dmabuf, as they are not
* ours, just free the array we allocated:
*/
- if (msm_obj->pages)
- kvfree(msm_obj->pages);
+ kvfree(msm_obj->pages);
put_iova_vmas(obj);
@@ -1216,7 +1215,7 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
goto fail;
}
- ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
+ ret = drm_prime_sg_to_page_array(sgt, msm_obj->pages, npages);
if (ret) {
msm_gem_unlock(obj);
goto fail;
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index d04c349d8112..5480852bdeda 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -198,6 +198,8 @@ static int submit_lookup_cmds(struct msm_gem_submit *submit,
submit->cmd[i].idx = submit_cmd.submit_idx;
submit->cmd[i].nr_relocs = submit_cmd.nr_relocs;
+ userptr = u64_to_user_ptr(submit_cmd.relocs);
+
sz = array_size(submit_cmd.nr_relocs,
sizeof(struct drm_msm_gem_submit_reloc));
/* check for overflow: */
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index d8151a89e163..4735251a394d 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -157,6 +157,7 @@ struct msm_kms {
* from the crtc's pending_timer close to end of the frame:
*/
struct mutex commit_lock[MAX_CRTCS];
+ struct lock_class_key commit_lock_keys[MAX_CRTCS];
unsigned pending_crtc_mask;
struct msm_pending_timer pending_timers[MAX_CRTCS];
};
@@ -166,8 +167,11 @@ static inline int msm_kms_init(struct msm_kms *kms,
{
unsigned i, ret;
- for (i = 0; i < ARRAY_SIZE(kms->commit_lock); i++)
- mutex_init(&kms->commit_lock[i]);
+ for (i = 0; i < ARRAY_SIZE(kms->commit_lock); i++) {
+ lockdep_register_key(&kms->commit_lock_keys[i]);
+ __mutex_init(&kms->commit_lock[i], "&kms->commit_lock[i]",
+ &kms->commit_lock_keys[i]);
+ }
kms->funcs = funcs;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/arb.c b/drivers/gpu/drm/nouveau/dispnv04/arb.c
index 9d4a2d97507e..1d3542d6006b 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/arb.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/arb.c
@@ -200,16 +200,17 @@ nv04_update_arb(struct drm_device *dev, int VClk, int bpp,
int MClk = nouveau_hw_get_clock(dev, PLL_MEMORY);
int NVClk = nouveau_hw_get_clock(dev, PLL_CORE);
uint32_t cfg1 = nvif_rd32(device, NV04_PFB_CFG1);
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
sim_data.pclk_khz = VClk;
sim_data.mclk_khz = MClk;
sim_data.nvclk_khz = NVClk;
sim_data.bpp = bpp;
sim_data.two_heads = nv_two_heads(dev);
- if ((dev->pdev->device & 0xffff) == 0x01a0 /*CHIPSET_NFORCE*/ ||
- (dev->pdev->device & 0xffff) == 0x01f0 /*CHIPSET_NFORCE2*/) {
+ if ((pdev->device & 0xffff) == 0x01a0 /*CHIPSET_NFORCE*/ ||
+ (pdev->device & 0xffff) == 0x01f0 /*CHIPSET_NFORCE2*/) {
uint32_t type;
- int domain = pci_domain_nr(dev->pdev->bus);
+ int domain = pci_domain_nr(pdev->bus);
pci_read_config_dword(pci_get_domain_bus_and_slot(domain, 0, 1),
0x7c, &type);
@@ -251,11 +252,12 @@ void
nouveau_calc_arb(struct drm_device *dev, int vclk, int bpp, int *burst, int *lwm)
{
struct nouveau_drm *drm = nouveau_drm(dev);
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_KELVIN)
nv04_update_arb(dev, vclk, bpp, burst, lwm);
- else if ((dev->pdev->device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ ||
- (dev->pdev->device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) {
+ else if ((pdev->device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ ||
+ (pdev->device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) {
*burst = 128;
*lwm = 0x0480;
} else
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
index 42687ea2a4ca..ce3d8c6ef000 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dfp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
@@ -488,12 +488,13 @@ static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode)
#ifdef __powerpc__
struct drm_device *dev = encoder->dev;
struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
/* BIOS scripts usually take care of the backlight, thanks
* Apple for your consistency.
*/
- if (dev->pdev->device == 0x0174 || dev->pdev->device == 0x0179 ||
- dev->pdev->device == 0x0189 || dev->pdev->device == 0x0329) {
+ if (pdev->device == 0x0174 || pdev->device == 0x0179 ||
+ pdev->device == 0x0189 || pdev->device == 0x0329) {
if (mode == DRM_MODE_DPMS_ON) {
nvif_mask(device, NV_PBUS_DEBUG_DUALHEAD_CTL, 1 << 31, 1 << 31);
nvif_mask(device, NV_PCRTC_GPIO_EXT, 3, 1);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.h b/drivers/gpu/drm/nouveau/dispnv04/disp.h
index 5ace5e906949..f0a24126641a 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.h
@@ -130,7 +130,7 @@ static inline bool
nv_two_heads(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- const int impl = dev->pdev->device & 0x0ff0;
+ const int impl = to_pci_dev(dev->dev)->device & 0x0ff0;
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS && impl != 0x0100 &&
impl != 0x0150 && impl != 0x01a0 && impl != 0x0200)
@@ -142,14 +142,14 @@ nv_two_heads(struct drm_device *dev)
static inline bool
nv_gf4_disp_arch(struct drm_device *dev)
{
- return nv_two_heads(dev) && (dev->pdev->device & 0x0ff0) != 0x0110;
+ return nv_two_heads(dev) && (to_pci_dev(dev->dev)->device & 0x0ff0) != 0x0110;
}
static inline bool
nv_two_reg_pll(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- const int impl = dev->pdev->device & 0x0ff0;
+ const int impl = to_pci_dev(dev->dev)->device & 0x0ff0;
if (impl == 0x0310 || impl == 0x0340 || drm->client.device.info.family >= NV_DEVICE_INFO_V0_CURIE)
return true;
@@ -160,9 +160,11 @@ static inline bool
nv_match_device(struct drm_device *dev, unsigned device,
unsigned sub_vendor, unsigned sub_device)
{
- return dev->pdev->device == device &&
- dev->pdev->subsystem_vendor == sub_vendor &&
- dev->pdev->subsystem_device == sub_device;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+
+ return pdev->device == device &&
+ pdev->subsystem_vendor == sub_vendor &&
+ pdev->subsystem_device == sub_device;
}
#include <subdev/bios/init.h>
diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.c b/drivers/gpu/drm/nouveau/dispnv04/hw.c
index b674d68ef28a..f7d35657aa64 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/hw.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/hw.c
@@ -214,14 +214,15 @@ nouveau_hw_pllvals_to_clk(struct nvkm_pll_vals *pv)
int
nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
{
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
struct nvkm_pll_vals pllvals;
int ret;
int domain;
- domain = pci_domain_nr(dev->pdev->bus);
+ domain = pci_domain_nr(pdev->bus);
if (plltype == PLL_MEMORY &&
- (dev->pdev->device & 0x0ff0) == CHIPSET_NFORCE) {
+ (pdev->device & 0x0ff0) == CHIPSET_NFORCE) {
uint32_t mpllP;
pci_read_config_dword(pci_get_domain_bus_and_slot(domain, 0, 3),
0x6c, &mpllP);
@@ -232,7 +233,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
return 400000 / mpllP;
} else
if (plltype == PLL_MEMORY &&
- (dev->pdev->device & 0xff0) == CHIPSET_NFORCE2) {
+ (pdev->device & 0xff0) == CHIPSET_NFORCE2) {
uint32_t clock;
pci_read_config_dword(pci_get_domain_bus_and_slot(domain, 0, 5),
@@ -309,6 +310,7 @@ void
nouveau_hw_save_vga_fonts(struct drm_device *dev, bool save)
{
struct nouveau_drm *drm = nouveau_drm(dev);
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
uint8_t misc, gr4, gr5, gr6, seq2, seq4;
bool graphicsmode;
unsigned plane;
@@ -327,7 +329,7 @@ nouveau_hw_save_vga_fonts(struct drm_device *dev, bool save)
NV_INFO(drm, "%sing VGA fonts\n", save ? "Sav" : "Restor");
/* map first 64KiB of VRAM, holds VGA fonts etc */
- iovram = ioremap(pci_resource_start(dev->pdev, 1), 65536);
+ iovram = ioremap(pci_resource_start(pdev, 1), 65536);
if (!iovram) {
NV_ERROR(drm, "Failed to map VRAM, "
"cannot save/restore VGA fonts.\n");
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base507c.c b/drivers/gpu/drm/nouveau/dispnv50/base507c.c
index 302d4e6fc52f..788db043a342 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base507c.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/base507c.c
@@ -88,7 +88,11 @@ base507c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
NVVAL(NV507C, SET_CONVERSION, OFS, 0x64));
} else {
PUSH_MTHD(push, NV507C, SET_PROCESSING,
- NVDEF(NV507C, SET_PROCESSING, USE_GAIN_OFS, DISABLE));
+ NVDEF(NV507C, SET_PROCESSING, USE_GAIN_OFS, DISABLE),
+
+ SET_CONVERSION,
+ NVVAL(NV507C, SET_CONVERSION, GAIN, 0) |
+ NVVAL(NV507C, SET_CONVERSION, OFS, 0));
}
PUSH_MTHD(push, NV507C, SURFACE_SET_OFFSET(0, 0), asyw->image.offset[0] >> 8);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base827c.c b/drivers/gpu/drm/nouveau/dispnv50/base827c.c
index 18d34096f125..093d4ba6910e 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base827c.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/base827c.c
@@ -49,7 +49,11 @@ base827c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
NVVAL(NV827C, SET_CONVERSION, OFS, 0x64));
} else {
PUSH_MTHD(push, NV827C, SET_PROCESSING,
- NVDEF(NV827C, SET_PROCESSING, USE_GAIN_OFS, DISABLE));
+ NVDEF(NV827C, SET_PROCESSING, USE_GAIN_OFS, DISABLE),
+
+ SET_CONVERSION,
+ NVVAL(NV827C, SET_CONVERSION, GAIN, 0) |
+ NVVAL(NV827C, SET_CONVERSION, OFS, 0));
}
PUSH_MTHD(push, NV827C, SURFACE_SET_OFFSET(0, 0), asyw->image.offset[0] >> 8,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core507d.c b/drivers/gpu/drm/nouveau/dispnv50/core507d.c
index e6f16a7750f0..1a1d806e0b01 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core507d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/core507d.c
@@ -36,7 +36,7 @@ core507d_update(struct nv50_core *core, u32 *interlock, bool ntfy)
struct nvif_push *push = core->chan.push;
int ret;
- if ((ret = PUSH_WAIT(push, 5)))
+ if ((ret = PUSH_WAIT(push, (ntfy ? 2 : 0) + 3)))
return ret;
if (ntfy) {
diff --git a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
index 9035d3ab062c..42f877f2ced2 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
@@ -54,7 +54,7 @@ corec37d_update(struct nv50_core *core, u32 *interlock, bool ntfy)
struct nvif_push *push = core->chan.push;
int ret;
- if ((ret = PUSH_WAIT(push, 9)))
+ if ((ret = PUSH_WAIT(push, (ntfy ? 2 * 2 : 0) + 5)))
return ret;
if (ntfy) {
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index c6367035970e..196612addfd6 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -220,6 +220,10 @@ nv50_dmac_wait(struct nvif_push *push, u32 size)
return 0;
}
+MODULE_PARM_DESC(kms_vram_pushbuf, "Place EVO/NVD push buffers in VRAM (default: auto)");
+static int nv50_dmac_vram_pushbuf = -1;
+module_param_named(kms_vram_pushbuf, nv50_dmac_vram_pushbuf, int, 0400);
+
int
nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
const s32 *oclass, u8 head, void *data, u32 size, s64 syncbuf,
@@ -241,7 +245,8 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
*
* This appears to match NVIDIA's behaviour on Pascal.
*/
- if (device->info.family == NV_DEVICE_INFO_V0_PASCAL)
+ if ((nv50_dmac_vram_pushbuf > 0) ||
+ (nv50_dmac_vram_pushbuf < 0 && device->info.family == NV_DEVICE_INFO_V0_PASCAL))
type |= NVIF_MEM_VRAM;
ret = nvif_mem_ctor_map(&cli->mmu, "kmsChanPush", type, 0x1000,
@@ -305,6 +310,14 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
* Output path helpers
*****************************************************************************/
static void
+nv50_outp_dump_caps(struct nouveau_drm *drm,
+ struct nouveau_encoder *outp)
+{
+ NV_DEBUG(drm, "%s caps: dp_interlace=%d\n",
+ outp->base.base.name, outp->caps.dp_interlace);
+}
+
+static void
nv50_outp_release(struct nouveau_encoder *nv_encoder)
{
struct nv50_disp *disp = nv50_disp(nv_encoder->base.base.dev);
@@ -419,8 +432,7 @@ nv50_outp_atomic_check(struct drm_encoder *encoder,
}
struct nouveau_connector *
-nv50_outp_get_new_connector(struct nouveau_encoder *outp,
- struct drm_atomic_state *state)
+nv50_outp_get_new_connector(struct drm_atomic_state *state, struct nouveau_encoder *outp)
{
struct drm_connector *connector;
struct drm_connector_state *connector_state;
@@ -436,8 +448,7 @@ nv50_outp_get_new_connector(struct nouveau_encoder *outp,
}
struct nouveau_connector *
-nv50_outp_get_old_connector(struct nouveau_encoder *outp,
- struct drm_atomic_state *state)
+nv50_outp_get_old_connector(struct drm_atomic_state *state, struct nouveau_encoder *outp)
{
struct drm_connector *connector;
struct drm_connector_state *connector_state;
@@ -452,27 +463,44 @@ nv50_outp_get_old_connector(struct nouveau_encoder *outp,
return NULL;
}
+static struct nouveau_crtc *
+nv50_outp_get_new_crtc(const struct drm_atomic_state *state, const struct nouveau_encoder *outp)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ const u32 mask = drm_encoder_mask(&outp->base.base);
+ int i;
+
+ for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
+ if (crtc_state->encoder_mask & mask)
+ return nouveau_crtc(crtc);
+ }
+
+ return NULL;
+}
+
/******************************************************************************
* DAC
*****************************************************************************/
static void
-nv50_dac_disable(struct drm_encoder *encoder, struct drm_atomic_state *state)
+nv50_dac_atomic_disable(struct drm_encoder *encoder, struct drm_atomic_state *state)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nv50_core *core = nv50_disp(encoder->dev)->core;
const u32 ctrl = NVDEF(NV507D, DAC_SET_CONTROL, OWNER, NONE);
- if (nv_encoder->crtc)
- core->func->dac->ctrl(core, nv_encoder->or, ctrl, NULL);
+
+ core->func->dac->ctrl(core, nv_encoder->or, ctrl, NULL);
nv_encoder->crtc = NULL;
nv50_outp_release(nv_encoder);
}
static void
-nv50_dac_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
+nv50_dac_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
- struct nv50_head_atom *asyh = nv50_head_atom(nv_crtc->base.state);
+ struct nouveau_crtc *nv_crtc = nv50_outp_get_new_crtc(state, nv_encoder);
+ struct nv50_head_atom *asyh =
+ nv50_head_atom(drm_atomic_get_new_crtc_state(state, &nv_crtc->base));
struct nv50_core *core = nv50_disp(encoder->dev)->core;
u32 ctrl = 0;
@@ -493,7 +521,7 @@ nv50_dac_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
core->func->dac->ctrl(core, nv_encoder->or, ctrl, asyh);
asyh->or.depth = 0;
- nv_encoder->crtc = encoder->crtc;
+ nv_encoder->crtc = &nv_crtc->base;
}
static enum drm_connector_status
@@ -526,8 +554,8 @@ nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
static const struct drm_encoder_helper_funcs
nv50_dac_help = {
.atomic_check = nv50_outp_atomic_check,
- .atomic_enable = nv50_dac_enable,
- .atomic_disable = nv50_dac_disable,
+ .atomic_enable = nv50_dac_atomic_enable,
+ .atomic_disable = nv50_dac_atomic_disable,
.detect = nv50_dac_detect
};
@@ -593,34 +621,27 @@ nv50_audio_component_get_eld(struct device *kdev, int port, int dev_id,
struct nouveau_drm *drm = nouveau_drm(drm_dev);
struct drm_encoder *encoder;
struct nouveau_encoder *nv_encoder;
- struct drm_connector *connector;
struct nouveau_crtc *nv_crtc;
- struct drm_connector_list_iter conn_iter;
int ret = 0;
*enabled = false;
+ mutex_lock(&drm->audio.lock);
+
drm_for_each_encoder(encoder, drm->dev) {
struct nouveau_connector *nv_connector = NULL;
+ if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST)
+ continue; /* TODO */
+
nv_encoder = nouveau_encoder(encoder);
+ nv_connector = nouveau_connector(nv_encoder->audio.connector);
+ nv_crtc = nouveau_crtc(nv_encoder->crtc);
- drm_connector_list_iter_begin(drm_dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
- if (connector->state->best_encoder == encoder) {
- nv_connector = nouveau_connector(connector);
- break;
- }
- }
- drm_connector_list_iter_end(&conn_iter);
- if (!nv_connector)
+ if (!nv_crtc || nv_encoder->or != port || nv_crtc->index != dev_id)
continue;
- nv_crtc = nouveau_crtc(encoder->crtc);
- if (!nv_crtc || nv_encoder->or != port ||
- nv_crtc->index != dev_id)
- continue;
- *enabled = nv_encoder->audio;
+ *enabled = nv_encoder->audio.enabled;
if (*enabled) {
ret = drm_eld_size(nv_connector->base.eld);
memcpy(buf, nv_connector->base.eld,
@@ -629,6 +650,8 @@ nv50_audio_component_get_eld(struct device *kdev, int port, int dev_id,
break;
}
+ mutex_unlock(&drm->audio.lock);
+
return ret;
}
@@ -678,17 +701,22 @@ static const struct component_ops nv50_audio_component_bind_ops = {
static void
nv50_audio_component_init(struct nouveau_drm *drm)
{
- if (!component_add(drm->dev->dev, &nv50_audio_component_bind_ops))
- drm->audio.component_registered = true;
+ if (component_add(drm->dev->dev, &nv50_audio_component_bind_ops))
+ return;
+
+ drm->audio.component_registered = true;
+ mutex_init(&drm->audio.lock);
}
static void
nv50_audio_component_fini(struct nouveau_drm *drm)
{
- if (drm->audio.component_registered) {
- component_del(drm->dev->dev, &nv50_audio_component_bind_ops);
- drm->audio.component_registered = false;
- }
+ if (!drm->audio.component_registered)
+ return;
+
+ component_del(drm->dev->dev, &nv50_audio_component_bind_ops);
+ drm->audio.component_registered = false;
+ mutex_destroy(&drm->audio.lock);
}
/******************************************************************************
@@ -711,24 +739,25 @@ nv50_audio_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
(0x0100 << nv_crtc->index),
};
- if (!nv_encoder->audio)
- return;
-
- nv_encoder->audio = false;
- nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
+ mutex_lock(&drm->audio.lock);
+ if (nv_encoder->audio.enabled) {
+ nv_encoder->audio.enabled = false;
+ nv_encoder->audio.connector = NULL;
+ nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
+ }
+ mutex_unlock(&drm->audio.lock);
nv50_audio_component_eld_notify(drm->audio.component, nv_encoder->or,
nv_crtc->index);
}
static void
-nv50_audio_enable(struct drm_encoder *encoder, struct drm_atomic_state *state,
+nv50_audio_enable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc,
+ struct nouveau_connector *nv_connector, struct drm_atomic_state *state,
struct drm_display_mode *mode)
{
struct nouveau_drm *drm = nouveau_drm(encoder->dev);
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
- struct nouveau_connector *nv_connector;
struct nv50_disp *disp = nv50_disp(encoder->dev);
struct __packed {
struct {
@@ -744,15 +773,19 @@ nv50_audio_enable(struct drm_encoder *encoder, struct drm_atomic_state *state,
(0x0100 << nv_crtc->index),
};
- nv_connector = nv50_outp_get_new_connector(nv_encoder, state);
if (!drm_detect_monitor_audio(nv_connector->edid))
return;
+ mutex_lock(&drm->audio.lock);
+
memcpy(args.data, nv_connector->base.eld, sizeof(args.data));
nvif_mthd(&disp->disp->object, 0, &args,
sizeof(args.base) + drm_eld_size(args.data));
- nv_encoder->audio = true;
+ nv_encoder->audio.enabled = true;
+ nv_encoder->audio.connector = &nv_connector->base;
+
+ mutex_unlock(&drm->audio.lock);
nv50_audio_component_eld_notify(drm->audio.component, nv_encoder->or,
nv_crtc->index);
@@ -781,12 +814,12 @@ nv50_hdmi_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
}
static void
-nv50_hdmi_enable(struct drm_encoder *encoder, struct drm_atomic_state *state,
+nv50_hdmi_enable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc,
+ struct nouveau_connector *nv_connector, struct drm_atomic_state *state,
struct drm_display_mode *mode)
{
struct nouveau_drm *drm = nouveau_drm(encoder->dev);
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
struct nv50_disp *disp = nv50_disp(encoder->dev);
struct {
struct nv50_disp_mthd_v1 base;
@@ -801,7 +834,6 @@ nv50_hdmi_enable(struct drm_encoder *encoder, struct drm_atomic_state *state,
.pwr.state = 1,
.pwr.rekey = 56, /* binary driver, and tegra, constant */
};
- struct nouveau_connector *nv_connector;
struct drm_hdmi_info *hdmi;
u32 max_ac_packet;
union hdmi_infoframe avi_frame;
@@ -811,7 +843,6 @@ nv50_hdmi_enable(struct drm_encoder *encoder, struct drm_atomic_state *state,
int ret;
int size;
- nv_connector = nv50_outp_get_new_connector(nv_encoder, state);
if (!drm_detect_hdmi_monitor(nv_connector->edid))
return;
@@ -857,7 +888,7 @@ nv50_hdmi_enable(struct drm_encoder *encoder, struct drm_atomic_state *state,
+ args.pwr.vendor_infoframe_length;
nvif_mthd(&disp->disp->object, 0, &args, size);
- nv50_audio_enable(encoder, state, mode);
+ nv50_audio_enable(encoder, nv_crtc, nv_connector, state, mode);
/* If SCDC is supported by the downstream monitor, update
* divider / scrambling settings to what we programmed above.
@@ -898,6 +929,7 @@ struct nv50_mstc {
struct nv50_msto {
struct drm_encoder encoder;
+ /* head is statically assigned on msto creation */
struct nv50_head *head;
struct nv50_mstc *mstc;
bool disabled;
@@ -1056,11 +1088,12 @@ nv50_dp_bpc_to_depth(unsigned int bpc)
}
static void
-nv50_msto_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
+nv50_msto_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
{
- struct nv50_head *head = nv50_head(encoder->crtc);
- struct nv50_head_atom *armh = nv50_head_atom(head->base.base.state);
struct nv50_msto *msto = nv50_msto(encoder);
+ struct nv50_head *head = msto->head;
+ struct nv50_head_atom *asyh =
+ nv50_head_atom(drm_atomic_get_new_crtc_state(state, &head->base.base));
struct nv50_mstc *mstc = NULL;
struct nv50_mstm *mstm = NULL;
struct drm_connector *connector;
@@ -1081,8 +1114,7 @@ nv50_msto_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
if (WARN_ON(!mstc))
return;
- r = drm_dp_mst_allocate_vcpi(&mstm->mgr, mstc->port, armh->dp.pbn,
- armh->dp.tu);
+ r = drm_dp_mst_allocate_vcpi(&mstm->mgr, mstc->port, asyh->dp.pbn, asyh->dp.tu);
if (!r)
DRM_DEBUG_KMS("Failed to allocate VCPI\n");
@@ -1094,15 +1126,15 @@ nv50_msto_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
else
proto = NV917D_SOR_SET_CONTROL_PROTOCOL_DP_B;
- mstm->outp->update(mstm->outp, head->base.index, armh, proto,
- nv50_dp_bpc_to_depth(armh->or.bpc));
+ mstm->outp->update(mstm->outp, head->base.index, asyh, proto,
+ nv50_dp_bpc_to_depth(asyh->or.bpc));
msto->mstc = mstc;
mstm->modified = true;
}
static void
-nv50_msto_disable(struct drm_encoder *encoder, struct drm_atomic_state *state)
+nv50_msto_atomic_disable(struct drm_encoder *encoder, struct drm_atomic_state *state)
{
struct nv50_msto *msto = nv50_msto(encoder);
struct nv50_mstc *mstc = msto->mstc;
@@ -1119,8 +1151,8 @@ nv50_msto_disable(struct drm_encoder *encoder, struct drm_atomic_state *state)
static const struct drm_encoder_helper_funcs
nv50_msto_help = {
- .atomic_disable = nv50_msto_disable,
- .atomic_enable = nv50_msto_enable,
+ .atomic_disable = nv50_msto_atomic_disable,
+ .atomic_enable = nv50_msto_atomic_enable,
.atomic_check = nv50_msto_atomic_check,
};
@@ -1616,43 +1648,38 @@ nv50_sor_update(struct nouveau_encoder *nv_encoder, u8 head,
}
static void
-nv50_sor_disable(struct drm_encoder *encoder,
- struct drm_atomic_state *state)
+nv50_sor_atomic_disable(struct drm_encoder *encoder, struct drm_atomic_state *state)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
- struct nouveau_connector *nv_connector =
- nv50_outp_get_old_connector(nv_encoder, state);
-
- nv_encoder->crtc = NULL;
-
- if (nv_crtc) {
- struct drm_dp_aux *aux = &nv_connector->aux;
- u8 pwr;
+ struct nouveau_connector *nv_connector = nv50_outp_get_old_connector(state, nv_encoder);
+ struct drm_dp_aux *aux = &nv_connector->aux;
+ u8 pwr;
- if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
- int ret = drm_dp_dpcd_readb(aux, DP_SET_POWER, &pwr);
+ if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
+ int ret = drm_dp_dpcd_readb(aux, DP_SET_POWER, &pwr);
- if (ret == 0) {
- pwr &= ~DP_SET_POWER_MASK;
- pwr |= DP_SET_POWER_D3;
- drm_dp_dpcd_writeb(aux, DP_SET_POWER, pwr);
- }
+ if (ret == 0) {
+ pwr &= ~DP_SET_POWER_MASK;
+ pwr |= DP_SET_POWER_D3;
+ drm_dp_dpcd_writeb(aux, DP_SET_POWER, pwr);
}
-
- nv_encoder->update(nv_encoder, nv_crtc->index, NULL, 0, 0);
- nv50_audio_disable(encoder, nv_crtc);
- nv50_hdmi_disable(&nv_encoder->base.base, nv_crtc);
- nv50_outp_release(nv_encoder);
}
+
+ nv_encoder->update(nv_encoder, nv_crtc->index, NULL, 0, 0);
+ nv50_audio_disable(encoder, nv_crtc);
+ nv50_hdmi_disable(&nv_encoder->base.base, nv_crtc);
+ nv50_outp_release(nv_encoder);
+ nv_encoder->crtc = NULL;
}
static void
-nv50_sor_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
+nv50_sor_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
- struct nv50_head_atom *asyh = nv50_head_atom(nv_crtc->base.state);
+ struct nouveau_crtc *nv_crtc = nv50_outp_get_new_crtc(state, nv_encoder);
+ struct nv50_head_atom *asyh =
+ nv50_head_atom(drm_atomic_get_new_crtc_state(state, &nv_crtc->base));
struct drm_display_mode *mode = &asyh->state.adjusted_mode;
struct {
struct nv50_disp_mthd_v1 base;
@@ -1672,8 +1699,8 @@ nv50_sor_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
u8 proto = NV507D_SOR_SET_CONTROL_PROTOCOL_CUSTOM;
u8 depth = NV837D_SOR_SET_CONTROL_PIXEL_DEPTH_DEFAULT;
- nv_connector = nv50_outp_get_new_connector(nv_encoder, state);
- nv_encoder->crtc = encoder->crtc;
+ nv_connector = nv50_outp_get_new_connector(state, nv_encoder);
+ nv_encoder->crtc = &nv_crtc->base;
if ((disp->disp->object.oclass == GT214_DISP ||
disp->disp->object.oclass >= GF110_DISP) &&
@@ -1699,7 +1726,7 @@ nv50_sor_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
proto = NV507D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B;
}
- nv50_hdmi_enable(&nv_encoder->base.base, state, mode);
+ nv50_hdmi_enable(&nv_encoder->base.base, nv_crtc, nv_connector, state, mode);
break;
case DCB_OUTPUT_LVDS:
proto = NV507D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM;
@@ -1740,7 +1767,7 @@ nv50_sor_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
else
proto = NV887D_SOR_SET_CONTROL_PROTOCOL_DP_B;
- nv50_audio_enable(encoder, state, mode);
+ nv50_audio_enable(encoder, nv_crtc, nv_connector, state, mode);
break;
default:
BUG();
@@ -1753,8 +1780,8 @@ nv50_sor_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
static const struct drm_encoder_helper_funcs
nv50_sor_help = {
.atomic_check = nv50_outp_atomic_check,
- .atomic_enable = nv50_sor_enable,
- .atomic_disable = nv50_sor_disable,
+ .atomic_enable = nv50_sor_atomic_enable,
+ .atomic_disable = nv50_sor_atomic_disable,
};
static void
@@ -1821,6 +1848,7 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
drm_connector_attach_encoder(connector, encoder);
disp->core->func->sor->get_caps(disp, nv_encoder, ffs(dcbe->or) - 1);
+ nv50_outp_dump_caps(drm, nv_encoder);
if (dcbe->type == DCB_OUTPUT_DP) {
struct nvkm_i2c_aux *aux =
@@ -1875,23 +1903,24 @@ nv50_pior_atomic_check(struct drm_encoder *encoder,
}
static void
-nv50_pior_disable(struct drm_encoder *encoder, struct drm_atomic_state *state)
+nv50_pior_atomic_disable(struct drm_encoder *encoder, struct drm_atomic_state *state)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nv50_core *core = nv50_disp(encoder->dev)->core;
const u32 ctrl = NVDEF(NV507D, PIOR_SET_CONTROL, OWNER, NONE);
- if (nv_encoder->crtc)
- core->func->pior->ctrl(core, nv_encoder->or, ctrl, NULL);
+
+ core->func->pior->ctrl(core, nv_encoder->or, ctrl, NULL);
nv_encoder->crtc = NULL;
nv50_outp_release(nv_encoder);
}
static void
-nv50_pior_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
+nv50_pior_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
- struct nv50_head_atom *asyh = nv50_head_atom(nv_crtc->base.state);
+ struct nouveau_crtc *nv_crtc = nv50_outp_get_new_crtc(state, nv_encoder);
+ struct nv50_head_atom *asyh =
+ nv50_head_atom(drm_atomic_get_new_crtc_state(state, &nv_crtc->base));
struct nv50_core *core = nv50_disp(encoder->dev)->core;
u32 ctrl = 0;
@@ -1929,8 +1958,8 @@ nv50_pior_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
static const struct drm_encoder_helper_funcs
nv50_pior_help = {
.atomic_check = nv50_pior_atomic_check,
- .atomic_enable = nv50_pior_enable,
- .atomic_disable = nv50_pior_disable,
+ .atomic_enable = nv50_pior_atomic_enable,
+ .atomic_disable = nv50_pior_atomic_disable,
};
static void
@@ -1991,6 +2020,7 @@ nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
drm_connector_attach_encoder(connector, encoder);
disp->core->func->pior->get_caps(disp, nv_encoder, ffs(dcbe->or) - 1);
+ nv50_outp_dump_caps(drm, nv_encoder);
return 0;
}
@@ -2663,6 +2693,14 @@ nv50_display_create(struct drm_device *dev)
else
nouveau_display(dev)->format_modifiers = disp50xx_modifiers;
+ if (disp->disp->object.oclass >= GK104_DISP) {
+ dev->mode_config.cursor_width = 256;
+ dev->mode_config.cursor_height = 256;
+ } else {
+ dev->mode_config.cursor_width = 64;
+ dev->mode_config.cursor_height = 64;
+ }
+
/* create crtc objects to represent the hw heads */
if (disp->disp->object.oclass >= GV100_DISP)
crtcs = nvif_rd32(&device->object, 0x610060) & 0xff;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c
index 537c1ef2e464..ec361d17e900 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.c
@@ -503,7 +503,6 @@ nv50_head_destroy(struct drm_crtc *crtc)
static const struct drm_crtc_funcs
nv50_head_func = {
.reset = nv50_head_reset,
- .gamma_set = drm_atomic_helper_legacy_gamma_set,
.destroy = nv50_head_destroy,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
@@ -518,7 +517,6 @@ nv50_head_func = {
static const struct drm_crtc_funcs
nvd9_head_func = {
.reset = nv50_head_reset,
- .gamma_set = drm_atomic_helper_legacy_gamma_set,
.destroy = nv50_head_destroy,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head907d.c b/drivers/gpu/drm/nouveau/dispnv50/head907d.c
index 8f860e9c5224..85648d790743 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head907d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head907d.c
@@ -322,7 +322,7 @@ head907d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
const int i = head->base.index;
int ret;
- if ((ret = PUSH_WAIT(push, 14)))
+ if ((ret = PUSH_WAIT(push, 13)))
return ret;
PUSH_MTHD(push, NV907D, HEAD_SET_OVERSCAN_COLOR(i),
@@ -353,14 +353,7 @@ head907d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
PUSH_MTHD(push, NV907D, HEAD_SET_DEFAULT_BASE_COLOR(i),
NVVAL(NV907D, HEAD_SET_DEFAULT_BASE_COLOR, RED, 0) |
NVVAL(NV907D, HEAD_SET_DEFAULT_BASE_COLOR, GREEN, 0) |
- NVVAL(NV907D, HEAD_SET_DEFAULT_BASE_COLOR, BLUE, 0),
-
- HEAD_SET_CRC_CONTROL(i),
- NVDEF(NV907D, HEAD_SET_CRC_CONTROL, CONTROLLING_CHANNEL, CORE) |
- NVDEF(NV907D, HEAD_SET_CRC_CONTROL, EXPECT_BUFFER_COLLAPSE, FALSE) |
- NVDEF(NV907D, HEAD_SET_CRC_CONTROL, TIMESTAMP_MODE, FALSE) |
- NVDEF(NV907D, HEAD_SET_CRC_CONTROL, PRIMARY_OUTPUT, NONE) |
- NVDEF(NV907D, HEAD_SET_CRC_CONTROL, SECONDARY_OUTPUT, NONE));
+ NVVAL(NV907D, HEAD_SET_DEFAULT_BASE_COLOR, BLUE, 0));
PUSH_MTHD(push, NV907D, HEAD_SET_PIXEL_CLOCK_FREQUENCY(i),
NVVAL(NV907D, HEAD_SET_PIXEL_CLOCK_FREQUENCY, HERTZ, m->clock * 1000) |
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head917d.c b/drivers/gpu/drm/nouveau/dispnv50/head917d.c
index a5d827403660..ea9f8667305e 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head917d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head917d.c
@@ -22,6 +22,7 @@
#include "head.h"
#include "core.h"
+#include "nvif/push.h"
#include <nvif/push507c.h>
#include <nvhw/class/cl917d.h>
@@ -73,6 +74,31 @@ head917d_base(struct nv50_head *head, struct nv50_head_atom *asyh)
return 0;
}
+static int
+head917d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ ret = PUSH_WAIT(push, 5);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NV917D, HEAD_SET_CONTROL_CURSOR(i),
+ NVDEF(NV917D, HEAD_SET_CONTROL_CURSOR, ENABLE, ENABLE) |
+ NVVAL(NV917D, HEAD_SET_CONTROL_CURSOR, FORMAT, asyh->curs.format) |
+ NVVAL(NV917D, HEAD_SET_CONTROL_CURSOR, SIZE, asyh->curs.layout) |
+ NVVAL(NV917D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_X, 0) |
+ NVVAL(NV917D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_Y, 0) |
+ NVDEF(NV917D, HEAD_SET_CONTROL_CURSOR, COMPOSITION, ALPHA_BLEND),
+
+ HEAD_SET_OFFSET_CURSOR(i), asyh->curs.offset >> 8);
+
+ PUSH_MTHD(push, NV917D, HEAD_SET_CONTEXT_DMA_CURSOR(i), asyh->curs.handle);
+ return 0;
+}
+
int
head917d_curs_layout(struct nv50_head *head, struct nv50_wndw_atom *asyw,
struct nv50_head_atom *asyh)
@@ -101,7 +127,7 @@ head917d = {
.core_clr = head907d_core_clr,
.curs_layout = head917d_curs_layout,
.curs_format = head507d_curs_format,
- .curs_set = head907d_curs_set,
+ .curs_set = head917d_curs_set,
.curs_clr = head907d_curs_clr,
.base = head917d_base,
.ovly = head907d_ovly,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index ce451242f79e..271de3a63f21 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -702,6 +702,11 @@ nv50_wndw_init(struct nv50_wndw *wndw)
nvif_notify_get(&wndw->notify);
}
+static const u64 nv50_cursor_format_modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID,
+};
+
int
nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev,
enum drm_plane_type type, const char *name, int index,
@@ -713,6 +718,7 @@ nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev,
struct nvif_mmu *mmu = &drm->client.mmu;
struct nv50_disp *disp = nv50_disp(dev);
struct nv50_wndw *wndw;
+ const u64 *format_modifiers;
int nformat;
int ret;
@@ -728,10 +734,13 @@ nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev,
for (nformat = 0; format[nformat]; nformat++);
- ret = drm_universal_plane_init(dev, &wndw->plane, heads, &nv50_wndw,
- format, nformat,
- nouveau_display(dev)->format_modifiers,
- type, "%s-%d", name, index);
+ if (type == DRM_PLANE_TYPE_CURSOR)
+ format_modifiers = nv50_cursor_format_modifiers;
+ else
+ format_modifiers = nouveau_display(dev)->format_modifiers;
+
+ ret = drm_universal_plane_init(dev, &wndw->plane, heads, &nv50_wndw, format, nformat,
+ format_modifiers, type, "%s-%d", name, index);
if (ret) {
kfree(*pwndw);
*pwndw = NULL;
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/cl917d.h b/drivers/gpu/drm/nouveau/include/nvhw/class/cl917d.h
index 2a2612d6e1e0..fb223723a38a 100644
--- a/drivers/gpu/drm/nouveau/include/nvhw/class/cl917d.h
+++ b/drivers/gpu/drm/nouveau/include/nvhw/class/cl917d.h
@@ -66,6 +66,10 @@
#define NV917D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_ALPHA_BLEND (0x00000000)
#define NV917D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_PREMULT_ALPHA_BLEND (0x00000001)
#define NV917D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_XOR (0x00000002)
+#define NV917D_HEAD_SET_OFFSET_CURSOR(a) (0x00000484 + (a)*0x00000300)
+#define NV917D_HEAD_SET_OFFSET_CURSOR_ORIGIN 31:0
+#define NV917D_HEAD_SET_CONTEXT_DMA_CURSOR(a) (0x0000048C + (a)*0x00000300)
+#define NV917D_HEAD_SET_CONTEXT_DMA_CURSOR_HANDLE 31:0
#define NV917D_HEAD_SET_DITHER_CONTROL(a) (0x000004A0 + (a)*0x00000300)
#define NV917D_HEAD_SET_DITHER_CONTROL_ENABLE 0:0
#define NV917D_HEAD_SET_DITHER_CONTROL_ENABLE_DISABLE (0x00000000)
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
index 57d4f457a7d4..0b86c44878e0 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
@@ -60,37 +60,33 @@ struct nv_device_time_v0 {
#define NV_DEVICE_INFO_UNIT (0xffffffffULL << 32)
#define NV_DEVICE_INFO(n) ((n) | (0x00000000ULL << 32))
-#define NV_DEVICE_FIFO(n) ((n) | (0x00000001ULL << 32))
+#define NV_DEVICE_HOST(n) ((n) | (0x00000001ULL << 32))
-/* This will be returned for unsupported queries. */
+/* This will be returned in the mthd field for unsupported queries. */
#define NV_DEVICE_INFO_INVALID ~0ULL
-/* These return a mask of available engines of particular type. */
-#define NV_DEVICE_INFO_ENGINE_SW NV_DEVICE_INFO(0x00000000)
-#define NV_DEVICE_INFO_ENGINE_GR NV_DEVICE_INFO(0x00000001)
-#define NV_DEVICE_INFO_ENGINE_MPEG NV_DEVICE_INFO(0x00000002)
-#define NV_DEVICE_INFO_ENGINE_ME NV_DEVICE_INFO(0x00000003)
-#define NV_DEVICE_INFO_ENGINE_CIPHER NV_DEVICE_INFO(0x00000004)
-#define NV_DEVICE_INFO_ENGINE_BSP NV_DEVICE_INFO(0x00000005)
-#define NV_DEVICE_INFO_ENGINE_VP NV_DEVICE_INFO(0x00000006)
-#define NV_DEVICE_INFO_ENGINE_CE NV_DEVICE_INFO(0x00000007)
-#define NV_DEVICE_INFO_ENGINE_SEC NV_DEVICE_INFO(0x00000008)
-#define NV_DEVICE_INFO_ENGINE_MSVLD NV_DEVICE_INFO(0x00000009)
-#define NV_DEVICE_INFO_ENGINE_MSPDEC NV_DEVICE_INFO(0x0000000a)
-#define NV_DEVICE_INFO_ENGINE_MSPPP NV_DEVICE_INFO(0x0000000b)
-#define NV_DEVICE_INFO_ENGINE_MSENC NV_DEVICE_INFO(0x0000000c)
-#define NV_DEVICE_INFO_ENGINE_VIC NV_DEVICE_INFO(0x0000000d)
-#define NV_DEVICE_INFO_ENGINE_SEC2 NV_DEVICE_INFO(0x0000000e)
-#define NV_DEVICE_INFO_ENGINE_NVDEC NV_DEVICE_INFO(0x0000000f)
-#define NV_DEVICE_INFO_ENGINE_NVENC NV_DEVICE_INFO(0x00000010)
-
+/* Returns the number of available runlists. */
+#define NV_DEVICE_HOST_RUNLISTS NV_DEVICE_HOST(0x00000000)
/* Returns the number of available channels. */
-#define NV_DEVICE_FIFO_CHANNELS NV_DEVICE_FIFO(0x00000000)
-
-/* Returns a mask of available runlists. */
-#define NV_DEVICE_FIFO_RUNLISTS NV_DEVICE_FIFO(0x00000001)
+#define NV_DEVICE_HOST_CHANNELS NV_DEVICE_HOST(0x00000001)
-/* These return a mask of engines available on a particular runlist. */
-#define NV_DEVICE_FIFO_RUNLIST_ENGINES(n) ((n) + NV_DEVICE_FIFO(0x00000010))
-#define NV_DEVICE_FIFO_RUNLIST_ENGINES__SIZE 64
+/* Returns a mask of available engine types on runlist(data). */
+#define NV_DEVICE_HOST_RUNLIST_ENGINES NV_DEVICE_HOST(0x00000100)
+#define NV_DEVICE_HOST_RUNLIST_ENGINES_SW 0x00000001
+#define NV_DEVICE_HOST_RUNLIST_ENGINES_GR 0x00000002
+#define NV_DEVICE_HOST_RUNLIST_ENGINES_MPEG 0x00000004
+#define NV_DEVICE_HOST_RUNLIST_ENGINES_ME 0x00000008
+#define NV_DEVICE_HOST_RUNLIST_ENGINES_CIPHER 0x00000010
+#define NV_DEVICE_HOST_RUNLIST_ENGINES_BSP 0x00000020
+#define NV_DEVICE_HOST_RUNLIST_ENGINES_VP 0x00000040
+#define NV_DEVICE_HOST_RUNLIST_ENGINES_CE 0x00000080
+#define NV_DEVICE_HOST_RUNLIST_ENGINES_SEC 0x00000100
+#define NV_DEVICE_HOST_RUNLIST_ENGINES_MSVLD 0x00000200
+#define NV_DEVICE_HOST_RUNLIST_ENGINES_MSPDEC 0x00000400
+#define NV_DEVICE_HOST_RUNLIST_ENGINES_MSPPP 0x00000800
+#define NV_DEVICE_HOST_RUNLIST_ENGINES_MSENC 0x00001000
+#define NV_DEVICE_HOST_RUNLIST_ENGINES_VIC 0x00002000
+#define NV_DEVICE_HOST_RUNLIST_ENGINES_SEC2 0x00004000
+#define NV_DEVICE_HOST_RUNLIST_ENGINES_NVDEC 0x00008000
+#define NV_DEVICE_HOST_RUNLIST_ENGINES_NVENC 0x00010000
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/fifo.h b/drivers/gpu/drm/nouveau/include/nvif/fifo.h
index e9468c9f9abf..d351ac890ca1 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/fifo.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/fifo.h
@@ -2,15 +2,15 @@
#define __NVIF_FIFO_H__
#include <nvif/device.h>
-/* Returns mask of runlists that support a NV_DEVICE_INFO_ENGINE_* type. */
+/* Returns mask of runlists that support a NV_DEVICE_INFO_RUNLIST_ENGINES_* type. */
u64 nvif_fifo_runlist(struct nvif_device *, u64 engine);
/* CE-supporting runlists (excluding GRCE, if others exist). */
static inline u64
nvif_fifo_runlist_ce(struct nvif_device *device)
{
- u64 runmgr = nvif_fifo_runlist(device, NV_DEVICE_INFO_ENGINE_GR);
- u64 runmce = nvif_fifo_runlist(device, NV_DEVICE_INFO_ENGINE_CE);
+ u64 runmgr = nvif_fifo_runlist(device, NV_DEVICE_HOST_RUNLIST_ENGINES_GR);
+ u64 runmce = nvif_fifo_runlist(device, NV_DEVICE_HOST_RUNLIST_ENGINES_CE);
if (runmce && !(runmce &= ~runmgr))
runmce = runmgr;
return runmce;
diff --git a/drivers/gpu/drm/nouveau/include/nvif/push.h b/drivers/gpu/drm/nouveau/include/nvif/push.h
index 168d7694ede5..6d3a8a3d2087 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/push.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/push.h
@@ -123,131 +123,131 @@ PUSH_KICK(struct nvif_push *push)
} while(0)
#endif
-#define PUSH_1(X,f,ds,n,c,o,p,s,mA,dA) do { \
- PUSH_##o##_HDR((p), s, mA, (c)+(n)); \
- PUSH_##f(X, (p), X##mA, 1, o, (dA), ds, ""); \
+#define PUSH_1(X,f,ds,n,o,p,s,mA,dA) do { \
+ PUSH_##o##_HDR((p), s, mA, (ds)+(n)); \
+ PUSH_##f(X, (p), X##mA, 1, o, (dA), ds, ""); \
} while(0)
-#define PUSH_2(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
- PUSH_ASSERT((mB) - (mA) == (1?PUSH_##o##_INC), "mthd1"); \
- PUSH_1(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
- PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
+#define PUSH_2(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \
+ PUSH_ASSERT((mB) - (mA) == (1?PUSH_##o##_INC), "mthd1"); \
+ PUSH_1(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
+ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
} while(0)
-#define PUSH_3(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
- PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd2"); \
- PUSH_2(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
- PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
+#define PUSH_3(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \
+ PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd2"); \
+ PUSH_2(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
+ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
} while(0)
-#define PUSH_4(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
- PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd3"); \
- PUSH_3(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
- PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
+#define PUSH_4(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \
+ PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd3"); \
+ PUSH_3(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
+ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
} while(0)
-#define PUSH_5(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
- PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd4"); \
- PUSH_4(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
- PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
+#define PUSH_5(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \
+ PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd4"); \
+ PUSH_4(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
+ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
} while(0)
-#define PUSH_6(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
- PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd5"); \
- PUSH_5(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
- PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
+#define PUSH_6(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \
+ PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd5"); \
+ PUSH_5(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
+ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
} while(0)
-#define PUSH_7(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
- PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd6"); \
- PUSH_6(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
- PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
+#define PUSH_7(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \
+ PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd6"); \
+ PUSH_6(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
+ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
} while(0)
-#define PUSH_8(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
- PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd7"); \
- PUSH_7(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
- PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
+#define PUSH_8(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \
+ PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd7"); \
+ PUSH_7(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
+ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
} while(0)
-#define PUSH_9(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
- PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd8"); \
- PUSH_8(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
- PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
+#define PUSH_9(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \
+ PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd8"); \
+ PUSH_8(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
+ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
} while(0)
-#define PUSH_10(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \
- PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd9"); \
- PUSH_9(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
- PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
+#define PUSH_10(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \
+ PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd9"); \
+ PUSH_9(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
+ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \
} while(0)
-#define PUSH_1D(X,o,p,s,mA,dA) \
- PUSH_1(X, DATA_, 1, 1, 0, o, (p), s, X##mA, (dA))
-#define PUSH_2D(X,o,p,s,mA,dA,mB,dB) \
- PUSH_2(X, DATA_, 1, 1, 0, o, (p), s, X##mB, (dB), \
- X##mA, (dA))
-#define PUSH_3D(X,o,p,s,mA,dA,mB,dB,mC,dC) \
- PUSH_3(X, DATA_, 1, 1, 0, o, (p), s, X##mC, (dC), \
- X##mB, (dB), \
- X##mA, (dA))
-#define PUSH_4D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD) \
- PUSH_4(X, DATA_, 1, 1, 0, o, (p), s, X##mD, (dD), \
- X##mC, (dC), \
- X##mB, (dB), \
- X##mA, (dA))
-#define PUSH_5D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE) \
- PUSH_5(X, DATA_, 1, 1, 0, o, (p), s, X##mE, (dE), \
- X##mD, (dD), \
- X##mC, (dC), \
- X##mB, (dB), \
- X##mA, (dA))
+#define PUSH_1D(X,o,p,s,mA,dA) \
+ PUSH_1(X, DATA_, 1, 0, o, (p), s, X##mA, (dA))
+#define PUSH_2D(X,o,p,s,mA,dA,mB,dB) \
+ PUSH_2(X, DATA_, 1, 0, o, (p), s, X##mB, (dB), \
+ X##mA, (dA))
+#define PUSH_3D(X,o,p,s,mA,dA,mB,dB,mC,dC) \
+ PUSH_3(X, DATA_, 1, 0, o, (p), s, X##mC, (dC), \
+ X##mB, (dB), \
+ X##mA, (dA))
+#define PUSH_4D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD) \
+ PUSH_4(X, DATA_, 1, 0, o, (p), s, X##mD, (dD), \
+ X##mC, (dC), \
+ X##mB, (dB), \
+ X##mA, (dA))
+#define PUSH_5D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE) \
+ PUSH_5(X, DATA_, 1, 0, o, (p), s, X##mE, (dE), \
+ X##mD, (dD), \
+ X##mC, (dC), \
+ X##mB, (dB), \
+ X##mA, (dA))
#define PUSH_6D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF) \
- PUSH_6(X, DATA_, 1, 1, 0, o, (p), s, X##mF, (dF), \
- X##mE, (dE), \
- X##mD, (dD), \
- X##mC, (dC), \
- X##mB, (dB), \
- X##mA, (dA))
+ PUSH_6(X, DATA_, 1, 0, o, (p), s, X##mF, (dF), \
+ X##mE, (dE), \
+ X##mD, (dD), \
+ X##mC, (dC), \
+ X##mB, (dB), \
+ X##mA, (dA))
#define PUSH_7D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF,mG,dG) \
- PUSH_7(X, DATA_, 1, 1, 0, o, (p), s, X##mG, (dG), \
- X##mF, (dF), \
- X##mE, (dE), \
- X##mD, (dD), \
- X##mC, (dC), \
- X##mB, (dB), \
- X##mA, (dA))
+ PUSH_7(X, DATA_, 1, 0, o, (p), s, X##mG, (dG), \
+ X##mF, (dF), \
+ X##mE, (dE), \
+ X##mD, (dD), \
+ X##mC, (dC), \
+ X##mB, (dB), \
+ X##mA, (dA))
#define PUSH_8D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF,mG,dG,mH,dH) \
- PUSH_8(X, DATA_, 1, 1, 0, o, (p), s, X##mH, (dH), \
- X##mG, (dG), \
- X##mF, (dF), \
- X##mE, (dE), \
- X##mD, (dD), \
- X##mC, (dC), \
- X##mB, (dB), \
- X##mA, (dA))
+ PUSH_8(X, DATA_, 1, 0, o, (p), s, X##mH, (dH), \
+ X##mG, (dG), \
+ X##mF, (dF), \
+ X##mE, (dE), \
+ X##mD, (dD), \
+ X##mC, (dC), \
+ X##mB, (dB), \
+ X##mA, (dA))
#define PUSH_9D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF,mG,dG,mH,dH,mI,dI) \
- PUSH_9(X, DATA_, 1, 1, 0, o, (p), s, X##mI, (dI), \
- X##mH, (dH), \
- X##mG, (dG), \
- X##mF, (dF), \
- X##mE, (dE), \
- X##mD, (dD), \
- X##mC, (dC), \
- X##mB, (dB), \
- X##mA, (dA))
+ PUSH_9(X, DATA_, 1, 0, o, (p), s, X##mI, (dI), \
+ X##mH, (dH), \
+ X##mG, (dG), \
+ X##mF, (dF), \
+ X##mE, (dE), \
+ X##mD, (dD), \
+ X##mC, (dC), \
+ X##mB, (dB), \
+ X##mA, (dA))
#define PUSH_10D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF,mG,dG,mH,dH,mI,dI,mJ,dJ) \
- PUSH_10(X, DATA_, 1, 1, 0, o, (p), s, X##mJ, (dJ), \
- X##mI, (dI), \
- X##mH, (dH), \
- X##mG, (dG), \
- X##mF, (dF), \
- X##mE, (dE), \
- X##mD, (dD), \
- X##mC, (dC), \
- X##mB, (dB), \
- X##mA, (dA))
+ PUSH_10(X, DATA_, 1, 0, o, (p), s, X##mJ, (dJ), \
+ X##mI, (dI), \
+ X##mH, (dH), \
+ X##mG, (dG), \
+ X##mF, (dF), \
+ X##mE, (dE), \
+ X##mD, (dD), \
+ X##mC, (dC), \
+ X##mB, (dB), \
+ X##mA, (dA))
-#define PUSH_1P(X,o,p,s,mA,dp,ds) \
- PUSH_1(X, DATAp, ds, ds, 0, o, (p), s, X##mA, (dp))
-#define PUSH_2P(X,o,p,s,mA,dA,mB,dp,ds) \
- PUSH_2(X, DATAp, ds, ds, 0, o, (p), s, X##mB, (dp), \
- X##mA, (dA))
-#define PUSH_3P(X,o,p,s,mA,dA,mB,dB,mC,dp,ds) \
- PUSH_3(X, DATAp, ds, ds, 0, o, (p), s, X##mC, (dp), \
- X##mB, (dB), \
- X##mA, (dA))
+#define PUSH_1P(X,o,p,s,mA,dp,ds) \
+ PUSH_1(X, DATAp, ds, 0, o, (p), s, X##mA, (dp))
+#define PUSH_2P(X,o,p,s,mA,dA,mB,dp,ds) \
+ PUSH_2(X, DATAp, ds, 0, o, (p), s, X##mB, (dp), \
+ X##mA, (dA))
+#define PUSH_3P(X,o,p,s,mA,dA,mB,dB,mC,dp,ds) \
+ PUSH_3(X, DATAp, ds, 0, o, (p), s, X##mC, (dp), \
+ X##mB, (dB), \
+ X##mA, (dA))
#define PUSH_(A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T,U,V,W,X,IMPL,...) IMPL
#define PUSH(A...) PUSH_(A, PUSH_10P, PUSH_10D, \
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
index c920939a1467..a18b6cfda07e 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
@@ -3,79 +3,7 @@
#define __NVKM_DEVICE_H__
#include <core/oclass.h>
#include <core/event.h>
-
-enum nvkm_devidx {
- NVKM_SUBDEV_PCI,
- NVKM_SUBDEV_VBIOS,
- NVKM_SUBDEV_DEVINIT,
- NVKM_SUBDEV_TOP,
- NVKM_SUBDEV_IBUS,
- NVKM_SUBDEV_GPIO,
- NVKM_SUBDEV_I2C,
- NVKM_SUBDEV_FUSE,
- NVKM_SUBDEV_MXM,
- NVKM_SUBDEV_MC,
- NVKM_SUBDEV_BUS,
- NVKM_SUBDEV_TIMER,
- NVKM_SUBDEV_INSTMEM,
- NVKM_SUBDEV_FB,
- NVKM_SUBDEV_LTC,
- NVKM_SUBDEV_MMU,
- NVKM_SUBDEV_BAR,
- NVKM_SUBDEV_FAULT,
- NVKM_SUBDEV_ACR,
- NVKM_SUBDEV_PMU,
- NVKM_SUBDEV_VOLT,
- NVKM_SUBDEV_ICCSENSE,
- NVKM_SUBDEV_THERM,
- NVKM_SUBDEV_CLK,
- NVKM_SUBDEV_GSP,
-
- NVKM_ENGINE_BSP,
-
- NVKM_ENGINE_CE0,
- NVKM_ENGINE_CE1,
- NVKM_ENGINE_CE2,
- NVKM_ENGINE_CE3,
- NVKM_ENGINE_CE4,
- NVKM_ENGINE_CE5,
- NVKM_ENGINE_CE6,
- NVKM_ENGINE_CE7,
- NVKM_ENGINE_CE8,
- NVKM_ENGINE_CE_LAST = NVKM_ENGINE_CE8,
-
- NVKM_ENGINE_CIPHER,
- NVKM_ENGINE_DISP,
- NVKM_ENGINE_DMAOBJ,
- NVKM_ENGINE_FIFO,
- NVKM_ENGINE_GR,
- NVKM_ENGINE_IFB,
- NVKM_ENGINE_ME,
- NVKM_ENGINE_MPEG,
- NVKM_ENGINE_MSENC,
- NVKM_ENGINE_MSPDEC,
- NVKM_ENGINE_MSPPP,
- NVKM_ENGINE_MSVLD,
-
- NVKM_ENGINE_NVENC0,
- NVKM_ENGINE_NVENC1,
- NVKM_ENGINE_NVENC2,
- NVKM_ENGINE_NVENC_LAST = NVKM_ENGINE_NVENC2,
-
- NVKM_ENGINE_NVDEC0,
- NVKM_ENGINE_NVDEC1,
- NVKM_ENGINE_NVDEC2,
- NVKM_ENGINE_NVDEC_LAST = NVKM_ENGINE_NVDEC2,
-
- NVKM_ENGINE_PM,
- NVKM_ENGINE_SEC,
- NVKM_ENGINE_SEC2,
- NVKM_ENGINE_SW,
- NVKM_ENGINE_VIC,
- NVKM_ENGINE_VP,
-
- NVKM_SUBDEV_NR
-};
+enum nvkm_subdev_type;
enum nvkm_device_type {
NVKM_DEVICE_PCI,
@@ -102,7 +30,6 @@ struct nvkm_device {
struct nvkm_event event;
- u64 disable_mask;
u32 debug;
const struct nvkm_device_chip *chip;
@@ -130,58 +57,16 @@ struct nvkm_device {
struct notifier_block nb;
} acpi;
- struct nvkm_acr *acr;
- struct nvkm_bar *bar;
- struct nvkm_bios *bios;
- struct nvkm_bus *bus;
- struct nvkm_clk *clk;
- struct nvkm_devinit *devinit;
- struct nvkm_fault *fault;
- struct nvkm_fb *fb;
- struct nvkm_fuse *fuse;
- struct nvkm_gpio *gpio;
- struct nvkm_gsp *gsp;
- struct nvkm_i2c *i2c;
- struct nvkm_subdev *ibus;
- struct nvkm_iccsense *iccsense;
- struct nvkm_instmem *imem;
- struct nvkm_ltc *ltc;
- struct nvkm_mc *mc;
- struct nvkm_mmu *mmu;
- struct nvkm_subdev *mxm;
- struct nvkm_pci *pci;
- struct nvkm_pmu *pmu;
- struct nvkm_therm *therm;
- struct nvkm_timer *timer;
- struct nvkm_top *top;
- struct nvkm_volt *volt;
-
- struct nvkm_engine *bsp;
- struct nvkm_engine *ce[9];
- struct nvkm_engine *cipher;
- struct nvkm_disp *disp;
- struct nvkm_dma *dma;
- struct nvkm_fifo *fifo;
- struct nvkm_gr *gr;
- struct nvkm_engine *ifb;
- struct nvkm_engine *me;
- struct nvkm_engine *mpeg;
- struct nvkm_engine *msenc;
- struct nvkm_engine *mspdec;
- struct nvkm_engine *msppp;
- struct nvkm_engine *msvld;
- struct nvkm_nvenc *nvenc[3];
- struct nvkm_nvdec *nvdec[3];
- struct nvkm_pm *pm;
- struct nvkm_engine *sec;
- struct nvkm_sec2 *sec2;
- struct nvkm_sw *sw;
- struct nvkm_engine *vic;
- struct nvkm_engine *vp;
+#define NVKM_LAYOUT_ONCE(type,data,ptr) data *ptr;
+#define NVKM_LAYOUT_INST(type,data,ptr,cnt) data *ptr[cnt];
+#include <core/layout.h>
+#undef NVKM_LAYOUT_INST
+#undef NVKM_LAYOUT_ONCE
+ struct list_head subdev;
};
-struct nvkm_subdev *nvkm_device_subdev(struct nvkm_device *, int index);
-struct nvkm_engine *nvkm_device_engine(struct nvkm_device *, int index);
+struct nvkm_subdev *nvkm_device_subdev(struct nvkm_device *, int type, int inst);
+struct nvkm_engine *nvkm_device_engine(struct nvkm_device *, int type, int inst);
struct nvkm_device_func {
struct nvkm_device_pci *(*pci)(struct nvkm_device *);
@@ -202,55 +87,15 @@ struct nvkm_device_quirk {
struct nvkm_device_chip {
const char *name;
-
- int (*acr )(struct nvkm_device *, int idx, struct nvkm_acr **);
- int (*bar )(struct nvkm_device *, int idx, struct nvkm_bar **);
- int (*bios )(struct nvkm_device *, int idx, struct nvkm_bios **);
- int (*bus )(struct nvkm_device *, int idx, struct nvkm_bus **);
- int (*clk )(struct nvkm_device *, int idx, struct nvkm_clk **);
- int (*devinit )(struct nvkm_device *, int idx, struct nvkm_devinit **);
- int (*fault )(struct nvkm_device *, int idx, struct nvkm_fault **);
- int (*fb )(struct nvkm_device *, int idx, struct nvkm_fb **);
- int (*fuse )(struct nvkm_device *, int idx, struct nvkm_fuse **);
- int (*gpio )(struct nvkm_device *, int idx, struct nvkm_gpio **);
- int (*gsp )(struct nvkm_device *, int idx, struct nvkm_gsp **);
- int (*i2c )(struct nvkm_device *, int idx, struct nvkm_i2c **);
- int (*ibus )(struct nvkm_device *, int idx, struct nvkm_subdev **);
- int (*iccsense)(struct nvkm_device *, int idx, struct nvkm_iccsense **);
- int (*imem )(struct nvkm_device *, int idx, struct nvkm_instmem **);
- int (*ltc )(struct nvkm_device *, int idx, struct nvkm_ltc **);
- int (*mc )(struct nvkm_device *, int idx, struct nvkm_mc **);
- int (*mmu )(struct nvkm_device *, int idx, struct nvkm_mmu **);
- int (*mxm )(struct nvkm_device *, int idx, struct nvkm_subdev **);
- int (*pci )(struct nvkm_device *, int idx, struct nvkm_pci **);
- int (*pmu )(struct nvkm_device *, int idx, struct nvkm_pmu **);
- int (*therm )(struct nvkm_device *, int idx, struct nvkm_therm **);
- int (*timer )(struct nvkm_device *, int idx, struct nvkm_timer **);
- int (*top )(struct nvkm_device *, int idx, struct nvkm_top **);
- int (*volt )(struct nvkm_device *, int idx, struct nvkm_volt **);
-
- int (*bsp )(struct nvkm_device *, int idx, struct nvkm_engine **);
- int (*ce[9] )(struct nvkm_device *, int idx, struct nvkm_engine **);
- int (*cipher )(struct nvkm_device *, int idx, struct nvkm_engine **);
- int (*disp )(struct nvkm_device *, int idx, struct nvkm_disp **);
- int (*dma )(struct nvkm_device *, int idx, struct nvkm_dma **);
- int (*fifo )(struct nvkm_device *, int idx, struct nvkm_fifo **);
- int (*gr )(struct nvkm_device *, int idx, struct nvkm_gr **);
- int (*ifb )(struct nvkm_device *, int idx, struct nvkm_engine **);
- int (*me )(struct nvkm_device *, int idx, struct nvkm_engine **);
- int (*mpeg )(struct nvkm_device *, int idx, struct nvkm_engine **);
- int (*msenc )(struct nvkm_device *, int idx, struct nvkm_engine **);
- int (*mspdec )(struct nvkm_device *, int idx, struct nvkm_engine **);
- int (*msppp )(struct nvkm_device *, int idx, struct nvkm_engine **);
- int (*msvld )(struct nvkm_device *, int idx, struct nvkm_engine **);
- int (*nvenc[3])(struct nvkm_device *, int idx, struct nvkm_nvenc **);
- int (*nvdec[3])(struct nvkm_device *, int idx, struct nvkm_nvdec **);
- int (*pm )(struct nvkm_device *, int idx, struct nvkm_pm **);
- int (*sec )(struct nvkm_device *, int idx, struct nvkm_engine **);
- int (*sec2 )(struct nvkm_device *, int idx, struct nvkm_sec2 **);
- int (*sw )(struct nvkm_device *, int idx, struct nvkm_sw **);
- int (*vic )(struct nvkm_device *, int idx, struct nvkm_engine **);
- int (*vp )(struct nvkm_device *, int idx, struct nvkm_engine **);
+#define NVKM_LAYOUT_ONCE(type,data,ptr,...) \
+ struct { \
+ u32 inst; \
+ int (*ctor)(struct nvkm_device *, enum nvkm_subdev_type, int inst, data **); \
+ } ptr;
+#define NVKM_LAYOUT_INST(A...) NVKM_LAYOUT_ONCE(A)
+#include <core/layout.h>
+#undef NVKM_LAYOUT_INST
+#undef NVKM_LAYOUT_ONCE
};
struct nvkm_device *nvkm_device_find(u64 name);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h b/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
index c6b401a6ea23..e58923b67d74 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
@@ -6,12 +6,18 @@
struct nvkm_fifo_chan;
struct nvkm_fb_tile;
+extern const struct nvkm_subdev_func nvkm_engine;
+
struct nvkm_engine {
const struct nvkm_engine_func *func;
struct nvkm_subdev subdev;
spinlock_t lock;
- int usecount;
+ struct {
+ refcount_t refcount;
+ struct mutex mutex;
+ bool enabled;
+ } use;
};
struct nvkm_engine_func {
@@ -42,9 +48,10 @@ struct nvkm_engine_func {
};
int nvkm_engine_ctor(const struct nvkm_engine_func *, struct nvkm_device *,
- int index, bool enable, struct nvkm_engine *);
+ enum nvkm_subdev_type, int inst, bool enable, struct nvkm_engine *);
int nvkm_engine_new_(const struct nvkm_engine_func *, struct nvkm_device *,
- int index, bool enable, struct nvkm_engine **);
+ enum nvkm_subdev_type, int, bool enable, struct nvkm_engine **);
+
struct nvkm_engine *nvkm_engine_ref(struct nvkm_engine *);
void nvkm_engine_unref(struct nvkm_engine **);
void nvkm_engine_tile(struct nvkm_engine *, int region);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/enum.h b/drivers/gpu/drm/nouveau/include/nvkm/core/enum.h
index ce98efd4b209..070462be35d9 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/enum.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/enum.h
@@ -8,6 +8,7 @@ struct nvkm_enum {
const char *name;
const void *data;
u32 data2;
+ int inst;
};
const struct nvkm_enum *nvkm_enum_find(const struct nvkm_enum *, u32 value);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/falcon.h b/drivers/gpu/drm/nouveau/include/nvkm/core/falcon.h
index 3981cb106aae..fd9a3f9a518e 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/falcon.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/falcon.h
@@ -21,11 +21,11 @@ void nvkm_falcon_v1_disable(struct nvkm_falcon *);
void gp102_sec2_flcn_bind_context(struct nvkm_falcon *, struct nvkm_memory *);
int gp102_sec2_flcn_enable(struct nvkm_falcon *);
-#define FLCN_PRINTK(t,f,fmt,a...) do { \
- if (nvkm_subdev_name[(f)->owner->index] != (f)->name) \
- nvkm_##t((f)->owner, "%s: "fmt"\n", (f)->name, ##a); \
- else \
- nvkm_##t((f)->owner, fmt"\n", ##a); \
+#define FLCN_PRINTK(t,f,fmt,a...) do { \
+ if ((f)->owner->name != (f)->name) \
+ nvkm_##t((f)->owner, "%s: "fmt"\n", (f)->name, ##a); \
+ else \
+ nvkm_##t((f)->owner, fmt"\n", ##a); \
} while(0)
#define FLCN_DBG(f,fmt,a...) FLCN_PRINTK(debug, (f), fmt, ##a)
#define FLCN_ERR(f,fmt,a...) FLCN_PRINTK(error, (f), fmt, ##a)
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/layout.h b/drivers/gpu/drm/nouveau/include/nvkm/core/layout.h
new file mode 100644
index 000000000000..7afe1579b20f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/layout.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: MIT */
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_PCI , struct nvkm_pci , pci)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_VBIOS , struct nvkm_bios , bios)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_DEVINIT , struct nvkm_devinit , devinit)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_TOP , struct nvkm_top , top)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_PRIVRING, struct nvkm_subdev , privring)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_GPIO , struct nvkm_gpio , gpio)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_I2C , struct nvkm_i2c , i2c)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_FUSE , struct nvkm_fuse , fuse)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_MXM , struct nvkm_subdev , mxm)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_MC , struct nvkm_mc , mc)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_BUS , struct nvkm_bus , bus)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_TIMER , struct nvkm_timer , timer)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_INSTMEM , struct nvkm_instmem , imem)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_FB , struct nvkm_fb , fb)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_LTC , struct nvkm_ltc , ltc)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_MMU , struct nvkm_mmu , mmu)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_BAR , struct nvkm_bar , bar)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_FAULT , struct nvkm_fault , fault)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_ACR , struct nvkm_acr , acr)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_PMU , struct nvkm_pmu , pmu)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_VOLT , struct nvkm_volt , volt)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_ICCSENSE, struct nvkm_iccsense, iccsense)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_THERM , struct nvkm_therm , therm)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_CLK , struct nvkm_clk , clk)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_GSP , struct nvkm_gsp , gsp)
+NVKM_LAYOUT_INST(NVKM_SUBDEV_IOCTRL , struct nvkm_subdev , ioctrl, 3)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_FLA , struct nvkm_subdev , fla)
+
+NVKM_LAYOUT_ONCE(NVKM_ENGINE_BSP , struct nvkm_engine , bsp)
+NVKM_LAYOUT_INST(NVKM_ENGINE_CE , struct nvkm_engine , ce, 10)
+NVKM_LAYOUT_ONCE(NVKM_ENGINE_CIPHER , struct nvkm_engine , cipher)
+NVKM_LAYOUT_ONCE(NVKM_ENGINE_DISP , struct nvkm_disp , disp)
+NVKM_LAYOUT_ONCE(NVKM_ENGINE_DMAOBJ , struct nvkm_dma , dma)
+NVKM_LAYOUT_ONCE(NVKM_ENGINE_FIFO , struct nvkm_fifo , fifo)
+NVKM_LAYOUT_ONCE(NVKM_ENGINE_GR , struct nvkm_gr , gr)
+NVKM_LAYOUT_ONCE(NVKM_ENGINE_IFB , struct nvkm_engine , ifb)
+NVKM_LAYOUT_ONCE(NVKM_ENGINE_ME , struct nvkm_engine , me)
+NVKM_LAYOUT_ONCE(NVKM_ENGINE_MPEG , struct nvkm_engine , mpeg)
+NVKM_LAYOUT_ONCE(NVKM_ENGINE_MSENC , struct nvkm_engine , msenc)
+NVKM_LAYOUT_ONCE(NVKM_ENGINE_MSPDEC , struct nvkm_engine , mspdec)
+NVKM_LAYOUT_ONCE(NVKM_ENGINE_MSPPP , struct nvkm_engine , msppp)
+NVKM_LAYOUT_ONCE(NVKM_ENGINE_MSVLD , struct nvkm_engine , msvld)
+NVKM_LAYOUT_INST(NVKM_ENGINE_NVDEC , struct nvkm_nvdec , nvdec, 5)
+NVKM_LAYOUT_INST(NVKM_ENGINE_NVENC , struct nvkm_nvenc , nvenc, 3)
+NVKM_LAYOUT_ONCE(NVKM_ENGINE_NVJPG , struct nvkm_engine , nvjpg)
+NVKM_LAYOUT_ONCE(NVKM_ENGINE_OFA , struct nvkm_engine , ofa)
+NVKM_LAYOUT_ONCE(NVKM_ENGINE_PM , struct nvkm_pm , pm)
+NVKM_LAYOUT_ONCE(NVKM_ENGINE_SEC , struct nvkm_engine , sec)
+NVKM_LAYOUT_ONCE(NVKM_ENGINE_SEC2 , struct nvkm_sec2 , sec2)
+NVKM_LAYOUT_ONCE(NVKM_ENGINE_SW , struct nvkm_sw , sw)
+NVKM_LAYOUT_ONCE(NVKM_ENGINE_VIC , struct nvkm_engine , vic)
+NVKM_LAYOUT_ONCE(NVKM_ENGINE_VP , struct nvkm_engine , vp)
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h b/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
index 76288c682e9e..1665738948fb 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
@@ -3,13 +3,25 @@
#define __NVKM_SUBDEV_H__
#include <core/device.h>
+enum nvkm_subdev_type {
+#define NVKM_LAYOUT_ONCE(t,s,p,...) t,
+#define NVKM_LAYOUT_INST NVKM_LAYOUT_ONCE
+#include <core/layout.h>
+#undef NVKM_LAYOUT_INST
+#undef NVKM_LAYOUT_ONCE
+ NVKM_SUBDEV_NR
+};
+
struct nvkm_subdev {
const struct nvkm_subdev_func *func;
struct nvkm_device *device;
- enum nvkm_devidx index;
- struct mutex mutex;
+ enum nvkm_subdev_type type;
+ int inst;
+ char name[16];
u32 debug;
+ struct list_head head;
+ void **pself;
bool oneinit;
};
@@ -23,11 +35,12 @@ struct nvkm_subdev_func {
void (*intr)(struct nvkm_subdev *);
};
-extern const char *nvkm_subdev_name[NVKM_SUBDEV_NR];
-int nvkm_subdev_new_(const struct nvkm_subdev_func *, struct nvkm_device *,
- int index, struct nvkm_subdev **);
+extern const char *nvkm_subdev_type[NVKM_SUBDEV_NR];
+int nvkm_subdev_new_(const struct nvkm_subdev_func *, struct nvkm_device *, enum nvkm_subdev_type,
+ int inst, struct nvkm_subdev **);
void nvkm_subdev_ctor(const struct nvkm_subdev_func *, struct nvkm_device *,
- int index, struct nvkm_subdev *);
+ enum nvkm_subdev_type, int inst, struct nvkm_subdev *);
+void nvkm_subdev_disable(struct nvkm_device *, enum nvkm_subdev_type, int inst);
void nvkm_subdev_del(struct nvkm_subdev **);
int nvkm_subdev_preinit(struct nvkm_subdev *);
int nvkm_subdev_init(struct nvkm_subdev *);
@@ -38,10 +51,8 @@ void nvkm_subdev_intr(struct nvkm_subdev *);
/* subdev logging */
#define nvkm_printk_(s,l,p,f,a...) do { \
const struct nvkm_subdev *_subdev = (s); \
- if (CONFIG_NOUVEAU_DEBUG >= (l) && _subdev->debug >= (l)) { \
- dev_##p(_subdev->device->dev, "%s: "f, \
- nvkm_subdev_name[_subdev->index], ##a); \
- } \
+ if (CONFIG_NOUVEAU_DEBUG >= (l) && _subdev->debug >= (l)) \
+ dev_##p(_subdev->device->dev, "%s: "f, _subdev->name, ##a); \
} while(0)
#define nvkm_printk(s,l,p,f,a...) nvkm_printk_((s), NV_DBG_##l, p, f, ##a)
#define nvkm_fatal(s,f,a...) nvkm_printk((s), FATAL, crit, f, ##a)
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/bsp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/bsp.h
index f938f024db81..d5530faf025e 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/bsp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/bsp.h
@@ -2,5 +2,5 @@
#ifndef __NVKM_BSP_H__
#define __NVKM_BSP_H__
#include <engine/xtensa.h>
-int g84_bsp_new(struct nvkm_device *, int, struct nvkm_engine **);
+int g84_bsp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
index 86f420f4630b..cfd2da8e66fe 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
@@ -3,13 +3,13 @@
#define __NVKM_CE_H__
#include <engine/falcon.h>
-int gt215_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
-int gf100_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
-int gk104_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
-int gm107_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
-int gm200_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
-int gp100_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
-int gp102_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
-int gv100_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
-int tu102_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
+int gt215_ce_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int gf100_ce_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int gk104_ce_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int gm107_ce_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int gm200_ce_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int gp100_ce_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int gp102_ce_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int gv100_ce_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int tu102_ce_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/cipher.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/cipher.h
index 66c5c5e27520..9da9176bbbbf 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/cipher.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/cipher.h
@@ -2,5 +2,5 @@
#ifndef __NVKM_CIPHER_H__
#define __NVKM_CIPHER_H__
#include <core/engine.h>
-int g84_cipher_new(struct nvkm_device *, int, struct nvkm_engine **);
+int g84_cipher_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
index 0f6fa6631a19..d08d3337ba0d 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
@@ -17,25 +17,28 @@ struct nvkm_disp {
struct nvkm_event hpd;
struct nvkm_event vblank;
- struct nvkm_oproxy *client;
+ struct {
+ spinlock_t lock;
+ struct nvkm_oproxy *object;
+ } client;
};
-int nv04_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
-int nv50_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
-int g84_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
-int gt200_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
-int g94_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
-int mcp77_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
-int gt215_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
-int mcp89_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
-int gf119_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
-int gk104_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
-int gk110_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
-int gm107_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
-int gm200_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
-int gp100_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
-int gp102_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
-int gv100_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
-int tu102_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
-int ga102_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int nv04_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
+int nv50_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
+int g84_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
+int gt200_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
+int g94_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
+int mcp77_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
+int gt215_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
+int mcp89_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
+int gf119_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
+int gk104_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
+int gk110_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
+int gm107_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
+int gm200_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
+int gp100_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
+int gp102_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
+int gv100_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
+int tu102_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
+int ga102_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h
index 2e12cdb6bb93..a003da39fd13 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h
@@ -23,9 +23,9 @@ struct nvkm_dma {
struct nvkm_dmaobj *nvkm_dmaobj_search(struct nvkm_client *, u64 object);
-int nv04_dma_new(struct nvkm_device *, int, struct nvkm_dma **);
-int nv50_dma_new(struct nvkm_device *, int, struct nvkm_dma **);
-int gf100_dma_new(struct nvkm_device *, int, struct nvkm_dma **);
-int gf119_dma_new(struct nvkm_device *, int, struct nvkm_dma **);
-int gv100_dma_new(struct nvkm_device *, int, struct nvkm_dma **);
+int nv04_dma_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_dma **);
+int nv50_dma_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_dma **);
+int gf100_dma_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_dma **);
+int gf119_dma_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_dma **);
+int gv100_dma_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_dma **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
index 27c1f868552c..306125d17ece 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
@@ -64,7 +64,7 @@ int nvkm_falcon_get(struct nvkm_falcon *, const struct nvkm_subdev *);
void nvkm_falcon_put(struct nvkm_falcon *, const struct nvkm_subdev *);
int nvkm_falcon_new_(const struct nvkm_falcon_func *, struct nvkm_device *,
- int index, bool enable, u32 addr, struct nvkm_engine **);
+ enum nvkm_subdev_type, int inst, bool enable, u32 addr, struct nvkm_engine **);
struct nvkm_falcon_func {
struct {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
index b335f3a1e66d..54fab7cc36c1 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
@@ -7,6 +7,7 @@
struct nvkm_fault_data;
#define NVKM_FIFO_CHID_NR 4096
+#define NVKM_FIFO_ENGN_NR 16
struct nvkm_fifo_engn {
struct nvkm_object *object;
@@ -17,7 +18,7 @@ struct nvkm_fifo_engn {
struct nvkm_fifo_chan {
const struct nvkm_fifo_chan_func *func;
struct nvkm_fifo *fifo;
- u64 engines;
+ u32 engm;
struct nvkm_object object;
struct list_head head;
@@ -29,7 +30,7 @@ struct nvkm_fifo_chan {
u64 addr;
u32 size;
- struct nvkm_fifo_engn engn[NVKM_SUBDEV_NR];
+ struct nvkm_fifo_engn engn[NVKM_FIFO_ENGN_NR];
};
struct nvkm_fifo {
@@ -40,6 +41,7 @@ struct nvkm_fifo {
int nr;
struct list_head chan;
spinlock_t lock;
+ struct mutex mutex;
struct nvkm_event uevent; /* async user trigger */
struct nvkm_event cevent; /* channel creation event */
@@ -57,22 +59,22 @@ nvkm_fifo_chan_inst(struct nvkm_fifo *, u64 inst, unsigned long *flags);
struct nvkm_fifo_chan *
nvkm_fifo_chan_chid(struct nvkm_fifo *, int chid, unsigned long *flags);
-int nv04_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
-int nv10_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
-int nv17_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
-int nv40_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
-int nv50_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
-int g84_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
-int gf100_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
-int gk104_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
-int gk110_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
-int gk208_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
-int gk20a_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
-int gm107_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
-int gm200_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
-int gm20b_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
-int gp100_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
-int gp10b_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
-int gv100_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
-int tu102_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
+int nv04_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
+int nv10_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
+int nv17_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
+int nv40_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
+int nv50_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
+int g84_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
+int gf100_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
+int gk104_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
+int gk110_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
+int gk208_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
+int gk20a_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
+int gm107_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
+int gm200_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
+int gm20b_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
+int gp100_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
+int gp10b_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
+int gv100_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
+int tu102_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
index 1530c81f86a2..b28b752ffaa2 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
@@ -14,44 +14,44 @@ int nvkm_gr_ctxsw_pause(struct nvkm_device *);
int nvkm_gr_ctxsw_resume(struct nvkm_device *);
u32 nvkm_gr_ctxsw_inst(struct nvkm_device *);
-int nv04_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int nv10_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int nv15_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int nv17_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int nv20_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int nv25_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int nv2a_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int nv30_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int nv34_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int nv35_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int nv40_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int nv44_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int nv50_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int g84_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gt200_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int mcp79_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gt215_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int mcp89_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gf100_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gf104_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gf108_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gf110_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gf117_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gf119_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gk104_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gk110_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gk110b_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gk208_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gk20a_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gm107_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gm200_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gm20b_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gp100_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gp102_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gp104_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gp107_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gp108_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gp10b_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gv100_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int tu102_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int nv04_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int nv10_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int nv15_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int nv17_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int nv20_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int nv25_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int nv2a_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int nv30_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int nv34_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int nv35_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int nv40_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int nv44_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int nv50_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int g84_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gt200_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int mcp79_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gt215_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int mcp89_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gf100_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gf104_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gf108_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gf110_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gf117_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gf119_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gk104_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gk110_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gk110b_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gk208_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gk20a_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gm107_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gm200_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gm20b_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gp100_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gp102_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gp104_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gp107_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gp108_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gp10b_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int gv100_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int tu102_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/mpeg.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/mpeg.h
index 8585a31f5943..f137f277935f 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/mpeg.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/mpeg.h
@@ -2,9 +2,9 @@
#ifndef __NVKM_MPEG_H__
#define __NVKM_MPEG_H__
#include <core/engine.h>
-int nv31_mpeg_new(struct nvkm_device *, int index, struct nvkm_engine **);
-int nv40_mpeg_new(struct nvkm_device *, int index, struct nvkm_engine **);
-int nv44_mpeg_new(struct nvkm_device *, int index, struct nvkm_engine **);
-int nv50_mpeg_new(struct nvkm_device *, int index, struct nvkm_engine **);
-int g84_mpeg_new(struct nvkm_device *, int index, struct nvkm_engine **);
+int nv31_mpeg_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int nv40_mpeg_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int nv44_mpeg_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int nv50_mpeg_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int g84_mpeg_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/mspdec.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/mspdec.h
index 83bb2fcb2cbf..ac8f08ce183c 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/mspdec.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/mspdec.h
@@ -2,8 +2,8 @@
#ifndef __NVKM_MSPDEC_H__
#define __NVKM_MSPDEC_H__
#include <engine/falcon.h>
-int g98_mspdec_new(struct nvkm_device *, int, struct nvkm_engine **);
-int gt215_mspdec_new(struct nvkm_device *, int, struct nvkm_engine **);
-int gf100_mspdec_new(struct nvkm_device *, int, struct nvkm_engine **);
-int gk104_mspdec_new(struct nvkm_device *, int, struct nvkm_engine **);
+int g98_mspdec_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int gt215_mspdec_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int gf100_mspdec_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int gk104_mspdec_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/msppp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/msppp.h
index 69e09fd96e0c..81c2b6f0ad84 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/msppp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/msppp.h
@@ -2,7 +2,7 @@
#ifndef __NVKM_MSPPP_H__
#define __NVKM_MSPPP_H__
#include <engine/falcon.h>
-int g98_msppp_new(struct nvkm_device *, int, struct nvkm_engine **);
-int gt215_msppp_new(struct nvkm_device *, int, struct nvkm_engine **);
-int gf100_msppp_new(struct nvkm_device *, int, struct nvkm_engine **);
+int g98_msppp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int gt215_msppp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int gf100_msppp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/msvld.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/msvld.h
index 9e11cefc9649..2d5fa961ba66 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/msvld.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/msvld.h
@@ -2,9 +2,9 @@
#ifndef __NVKM_MSVLD_H__
#define __NVKM_MSVLD_H__
#include <engine/falcon.h>
-int g98_msvld_new(struct nvkm_device *, int, struct nvkm_engine **);
-int gt215_msvld_new(struct nvkm_device *, int, struct nvkm_engine **);
-int mcp89_msvld_new(struct nvkm_device *, int, struct nvkm_engine **);
-int gf100_msvld_new(struct nvkm_device *, int, struct nvkm_engine **);
-int gk104_msvld_new(struct nvkm_device *, int, struct nvkm_engine **);
+int g98_msvld_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int gt215_msvld_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int mcp89_msvld_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int gf100_msvld_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int gk104_msvld_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h
index 1b3183e31606..97bd3092f68a 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h
@@ -11,5 +11,5 @@ struct nvkm_nvdec {
struct nvkm_falcon falcon;
};
-int gm107_nvdec_new(struct nvkm_device *, int, struct nvkm_nvdec **);
+int gm107_nvdec_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvdec **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h
index 33e6ba8adc8d..1a259c5c9a71 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h
@@ -11,5 +11,5 @@ struct nvkm_nvenc {
struct nvkm_falcon falcon;
};
-int gm107_nvenc_new(struct nvkm_device *, int, struct nvkm_nvenc **);
+int gm107_nvenc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvenc **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h
index 4d754e7650d9..af89d46ea360 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h
@@ -7,20 +7,23 @@ struct nvkm_pm {
const struct nvkm_pm_func *func;
struct nvkm_engine engine;
- struct nvkm_object *perfmon;
+ struct {
+ spinlock_t lock;
+ struct nvkm_object *object;
+ } client;
struct list_head domains;
struct list_head sources;
u32 sequence;
};
-int nv40_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
-int nv50_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
-int g84_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
-int gt200_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
-int gt215_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
-int gf100_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
-int gf108_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
-int gf117_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
-int gk104_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
+int nv40_pm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pm **);
+int nv50_pm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pm **);
+int g84_pm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pm **);
+int gt200_pm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pm **);
+int gt215_pm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pm **);
+int gf100_pm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pm **);
+int gf108_pm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pm **);
+int gf117_pm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pm **);
+int gk104_pm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pm **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/sec.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/sec.h
index f14e98a8a0ca..37ed7ab8d050 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/sec.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/sec.h
@@ -2,5 +2,5 @@
#ifndef __NVKM_SEC_H__
#define __NVKM_SEC_H__
#include <engine/falcon.h>
-int g98_sec_new(struct nvkm_device *, int, struct nvkm_engine **);
+int g98_sec_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h
index 34dc765648d5..06264c840eae 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h
@@ -18,7 +18,7 @@ struct nvkm_sec2 {
bool initmsg_received;
};
-int gp102_sec2_new(struct nvkm_device *, int, struct nvkm_sec2 **);
-int gp108_sec2_new(struct nvkm_device *, int, struct nvkm_sec2 **);
-int tu102_sec2_new(struct nvkm_device *, int, struct nvkm_sec2 **);
+int gp102_sec2_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_sec2 **);
+int gp108_sec2_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_sec2 **);
+int tu102_sec2_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_sec2 **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/sw.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/sw.h
index 2e91769e3ee2..b1a53ffbfdef 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/sw.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/sw.h
@@ -12,8 +12,8 @@ struct nvkm_sw {
bool nvkm_sw_mthd(struct nvkm_sw *sw, int chid, int subc, u32 mthd, u32 data);
-int nv04_sw_new(struct nvkm_device *, int, struct nvkm_sw **);
-int nv10_sw_new(struct nvkm_device *, int, struct nvkm_sw **);
-int nv50_sw_new(struct nvkm_device *, int, struct nvkm_sw **);
-int gf100_sw_new(struct nvkm_device *, int, struct nvkm_sw **);
+int nv04_sw_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_sw **);
+int nv10_sw_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_sw **);
+int nv50_sw_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_sw **);
+int gf100_sw_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_sw **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/vp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/vp.h
index 8984415b2a3d..1bab26858538 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/vp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/vp.h
@@ -2,5 +2,5 @@
#ifndef __NVKM_VP_H__
#define __NVKM_VP_H__
#include <engine/xtensa.h>
-int g84_vp_new(struct nvkm_device *, int, struct nvkm_engine **);
+int g84_vp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h
index fbf27b2293a9..3083a5866a55 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h
@@ -13,7 +13,7 @@ struct nvkm_xtensa {
};
int nvkm_xtensa_new_(const struct nvkm_xtensa_func *, struct nvkm_device *,
- int index, bool enable, u32 addr, struct nvkm_engine **);
+ enum nvkm_subdev_type, int, bool enable, u32 addr, struct nvkm_engine **);
struct nvkm_xtensa_func {
u32 fifo_val;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/acr.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/acr.h
index 836d8b932822..c0b254f7f0b5 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/acr.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/acr.h
@@ -59,12 +59,12 @@ struct nvkm_acr {
bool nvkm_acr_managed_falcon(struct nvkm_device *, enum nvkm_acr_lsf_id);
int nvkm_acr_bootstrap_falcons(struct nvkm_device *, unsigned long mask);
-int gm200_acr_new(struct nvkm_device *, int, struct nvkm_acr **);
-int gm20b_acr_new(struct nvkm_device *, int, struct nvkm_acr **);
-int gp102_acr_new(struct nvkm_device *, int, struct nvkm_acr **);
-int gp108_acr_new(struct nvkm_device *, int, struct nvkm_acr **);
-int gp10b_acr_new(struct nvkm_device *, int, struct nvkm_acr **);
-int tu102_acr_new(struct nvkm_device *, int, struct nvkm_acr **);
+int gm200_acr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_acr **);
+int gm20b_acr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_acr **);
+int gp102_acr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_acr **);
+int gp108_acr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_acr **);
+int gp10b_acr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_acr **);
+int tu102_acr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_acr **);
struct nvkm_acr_lsfw {
const struct nvkm_acr_lsf_func *func;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h
index 14b09f7e46a5..4f07836ab984 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h
@@ -23,11 +23,11 @@ void nvkm_bar_bar2_reset(struct nvkm_device *);
struct nvkm_vmm *nvkm_bar_bar2_vmm(struct nvkm_device *);
void nvkm_bar_flush(struct nvkm_bar *);
-int nv50_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
-int g84_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
-int gf100_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
-int gk20a_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
-int gm107_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
-int gm20b_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
-int tu102_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
+int nv50_bar_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_bar **);
+int g84_bar_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_bar **);
+int gf100_bar_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_bar **);
+int gk20a_bar_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_bar **);
+int gm107_bar_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_bar **);
+int gm20b_bar_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_bar **);
+int tu102_bar_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_bar **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h
index f2860f8e0c2e..b61cfb077533 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h
@@ -30,5 +30,5 @@ u8 nvbios_rd08(struct nvkm_bios *, u32 addr);
u16 nvbios_rd16(struct nvkm_bios *, u32 addr);
u32 nvbios_rd32(struct nvkm_bios *, u32 addr);
-int nvkm_bios_new(struct nvkm_device *, int, struct nvkm_bios **);
+int nvkm_bios_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_bios **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h
index f5f59261ea81..d1beaad0c82b 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h
@@ -14,6 +14,7 @@ enum dcb_connector_type {
DCB_CONNECTOR_LVDS_SPWG = 0x41,
DCB_CONNECTOR_DP = 0x46,
DCB_CONNECTOR_eDP = 0x47,
+ DCB_CONNECTOR_mDP = 0x48,
DCB_CONNECTOR_HDMI_0 = 0x60,
DCB_CONNECTOR_HDMI_1 = 0x61,
DCB_CONNECTOR_HDMI_C = 0x63,
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h
index ae9ad6c034fb..2ac03bbc6133 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h
@@ -18,9 +18,9 @@ void nvkm_hwsq_wait(struct nvkm_hwsq *, u8 flag, u8 data);
void nvkm_hwsq_wait_vblank(struct nvkm_hwsq *);
void nvkm_hwsq_nsec(struct nvkm_hwsq *, u32 nsec);
-int nv04_bus_new(struct nvkm_device *, int, struct nvkm_bus **);
-int nv31_bus_new(struct nvkm_device *, int, struct nvkm_bus **);
-int nv50_bus_new(struct nvkm_device *, int, struct nvkm_bus **);
-int g94_bus_new(struct nvkm_device *, int, struct nvkm_bus **);
-int gf100_bus_new(struct nvkm_device *, int, struct nvkm_bus **);
+int nv04_bus_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_bus **);
+int nv31_bus_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_bus **);
+int nv50_bus_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_bus **);
+int g94_bus_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_bus **);
+int gf100_bus_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_bus **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
index bf937e7dfd77..05b99c9e9a26 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
@@ -125,14 +125,14 @@ int nvkm_clk_astate(struct nvkm_clk *, int req, int rel, bool wait);
int nvkm_clk_dstate(struct nvkm_clk *, int req, int rel);
int nvkm_clk_tstate(struct nvkm_clk *, u8 temperature);
-int nv04_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
-int nv40_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
-int nv50_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
-int g84_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
-int mcp77_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
-int gt215_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
-int gf100_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
-int gk104_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
-int gk20a_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
-int gm20b_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
+int nv04_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **);
+int nv40_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **);
+int nv50_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **);
+int g84_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **);
+int mcp77_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **);
+int gt215_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **);
+int gf100_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **);
+int gk104_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **);
+int gk20a_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **);
+int gm20b_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
index 50cc7c05eac4..848b5d9ce705 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
@@ -14,23 +14,22 @@ struct nvkm_devinit {
u32 nvkm_devinit_mmio(struct nvkm_devinit *, u32 addr);
int nvkm_devinit_pll_set(struct nvkm_devinit *, u32 type, u32 khz);
void nvkm_devinit_meminit(struct nvkm_devinit *);
-u64 nvkm_devinit_disable(struct nvkm_devinit *);
-int nvkm_devinit_post(struct nvkm_devinit *, u64 *disable);
+int nvkm_devinit_post(struct nvkm_devinit *);
-int nv04_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
-int nv05_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
-int nv10_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
-int nv1a_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
-int nv20_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
-int nv50_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
-int g84_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
-int g98_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
-int gt215_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
-int mcp89_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
-int gf100_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
-int gm107_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
-int gm200_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
-int gv100_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
-int tu102_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
-int ga100_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
+int nv04_devinit_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **);
+int nv05_devinit_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **);
+int nv10_devinit_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **);
+int nv1a_devinit_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **);
+int nv20_devinit_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **);
+int nv50_devinit_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **);
+int g84_devinit_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **);
+int g98_devinit_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **);
+int gt215_devinit_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **);
+int mcp89_devinit_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **);
+int gf100_devinit_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **);
+int gm107_devinit_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **);
+int gm200_devinit_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **);
+int gv100_devinit_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **);
+int tu102_devinit_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **);
+int ga100_devinit_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h
index a513c16ab105..581458ad38e0 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h
@@ -30,8 +30,8 @@ struct nvkm_fault_data {
u8 reason;
};
-int gp100_fault_new(struct nvkm_device *, int, struct nvkm_fault **);
-int gp10b_fault_new(struct nvkm_device *, int, struct nvkm_fault **);
-int gv100_fault_new(struct nvkm_device *, int, struct nvkm_fault **);
-int tu102_fault_new(struct nvkm_device *, int, struct nvkm_fault **);
+int gp100_fault_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fault **);
+int gp10b_fault_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fault **);
+int gv100_fault_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fault **);
+int tu102_fault_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fault **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
index 2ecd52aec1d1..ef6a6297148c 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
@@ -36,7 +36,11 @@ struct nvkm_fb {
struct nvkm_blob vpr_scrubber;
struct nvkm_ram *ram;
- struct nvkm_mm tags;
+
+ struct {
+ struct mutex mutex; /* protects mm and nvkm_memory::tags */
+ struct nvkm_mm mm;
+ } tags;
struct {
struct nvkm_fb_tile region[16];
@@ -54,40 +58,40 @@ void nvkm_fb_tile_init(struct nvkm_fb *, int region, u32 addr, u32 size,
void nvkm_fb_tile_fini(struct nvkm_fb *, int region, struct nvkm_fb_tile *);
void nvkm_fb_tile_prog(struct nvkm_fb *, int region, struct nvkm_fb_tile *);
-int nv04_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int nv10_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int nv1a_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int nv20_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int nv25_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int nv30_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int nv35_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int nv36_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int nv40_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int nv41_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int nv44_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int nv46_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int nv47_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int nv49_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int nv4e_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int nv50_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int g84_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int gt215_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int mcp77_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int mcp89_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int gf100_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int gf108_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int gk104_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int gk110_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int gk20a_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int gm107_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int gm200_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int gm20b_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int gp100_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int gp102_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int gp10b_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int gv100_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int ga100_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
-int ga102_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int nv04_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int nv10_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int nv1a_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int nv20_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int nv25_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int nv30_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int nv35_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int nv36_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int nv40_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int nv41_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int nv44_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int nv46_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int nv47_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int nv49_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int nv4e_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int nv50_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int g84_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int gt215_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int mcp77_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int mcp89_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int gf100_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int gf108_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int gk104_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int gk110_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int gk20a_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int gm107_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int gm200_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int gm20b_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int gp100_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int gp102_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int gp10b_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int gv100_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int ga100_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int ga102_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
#include <subdev/bios.h>
#include <subdev/bios/ramcfg.h>
@@ -128,6 +132,7 @@ struct nvkm_ram {
#define NVKM_RAM_MM_MIXED (NVKM_MM_HEAP_ANY + 3)
struct nvkm_mm vram;
u64 stolen;
+ struct mutex mutex;
int ranks;
int parts;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fuse.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fuse.h
index 00111c34311e..dabbef0ac96c 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fuse.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fuse.h
@@ -11,7 +11,7 @@ struct nvkm_fuse {
u32 nvkm_fuse_read(struct nvkm_fuse *, u32 addr);
-int nv50_fuse_new(struct nvkm_device *, int, struct nvkm_fuse **);
-int gf100_fuse_new(struct nvkm_device *, int, struct nvkm_fuse **);
-int gm107_fuse_new(struct nvkm_device *, int, struct nvkm_fuse **);
+int nv50_fuse_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fuse **);
+int gf100_fuse_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fuse **);
+int gm107_fuse_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fuse **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h
index cdcce5ece6ff..0e46ea1fe972 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h
@@ -32,10 +32,10 @@ int nvkm_gpio_find(struct nvkm_gpio *, int idx, u8 tag, u8 line,
int nvkm_gpio_set(struct nvkm_gpio *, int idx, u8 tag, u8 line, int state);
int nvkm_gpio_get(struct nvkm_gpio *, int idx, u8 tag, u8 line);
-int nv10_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
-int nv50_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
-int g94_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
-int gf119_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
-int gk104_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
-int ga102_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
+int nv10_gpio_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gpio **);
+int nv50_gpio_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gpio **);
+int g94_gpio_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gpio **);
+int gf119_gpio_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gpio **);
+int gk104_gpio_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gpio **);
+int ga102_gpio_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gpio **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
index 06db67610a50..cf42a59d4e58 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
@@ -9,5 +9,5 @@ struct nvkm_gsp {
struct nvkm_falcon falcon;
};
-int gv100_gsp_new(struct nvkm_device *, int, struct nvkm_gsp **);
+int gv100_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
index 640f649ce497..146e13292203 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
@@ -85,15 +85,15 @@ struct nvkm_i2c {
struct nvkm_i2c_bus *nvkm_i2c_bus_find(struct nvkm_i2c *, int);
struct nvkm_i2c_aux *nvkm_i2c_aux_find(struct nvkm_i2c *, int);
-int nv04_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
-int nv4e_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
-int nv50_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
-int g94_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
-int gf117_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
-int gf119_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
-int gk104_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
-int gk110_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
-int gm200_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
+int nv04_i2c_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_i2c **);
+int nv4e_i2c_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_i2c **);
+int nv50_i2c_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_i2c **);
+int g94_i2c_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_i2c **);
+int gf117_i2c_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_i2c **);
+int gf119_i2c_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_i2c **);
+int gk104_i2c_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_i2c **);
+int gk110_i2c_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_i2c **);
+int gm200_i2c_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_i2c **);
static inline int
nvkm_rdi2cr(struct i2c_adapter *adap, u8 addr, u8 reg)
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h
deleted file mode 100644
index db791411eaa8..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-#ifndef __NVKM_IBUS_H__
-#define __NVKM_IBUS_H__
-#include <core/subdev.h>
-
-int gf100_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **);
-int gf117_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **);
-int gk104_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **);
-int gk20a_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **);
-int gm200_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **);
-int gp10b_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **);
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/iccsense.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/iccsense.h
index f483dcd7cd1c..7400d62dcbec 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/iccsense.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/iccsense.h
@@ -14,6 +14,6 @@ struct nvkm_iccsense {
u32 power_w_crit;
};
-int gf100_iccsense_new(struct nvkm_device *, int index, struct nvkm_iccsense **);
+int gf100_iccsense_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_iccsense **);
int nvkm_iccsense_read_all(struct nvkm_iccsense *iccsense);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
index c74ab7c31d05..f967b97d163c 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
@@ -13,6 +13,11 @@ struct nvkm_instmem {
struct list_head boot;
u32 reserved;
+ /* <=nv4x: protects NV_PRAMIN/BAR2 MM
+ * >=nv50: protects BAR2 MM & LRU
+ */
+ struct mutex mutex;
+
struct nvkm_memory *vbios;
struct nvkm_ramht *ramht;
struct nvkm_memory *ramro;
@@ -25,8 +30,8 @@ int nvkm_instobj_new(struct nvkm_instmem *, u32 size, u32 align, bool zero,
struct nvkm_memory **);
-int nv04_instmem_new(struct nvkm_device *, int, struct nvkm_instmem **);
-int nv40_instmem_new(struct nvkm_device *, int, struct nvkm_instmem **);
-int nv50_instmem_new(struct nvkm_device *, int, struct nvkm_instmem **);
-int gk20a_instmem_new(struct nvkm_device *, int, struct nvkm_instmem **);
+int nv04_instmem_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_instmem **);
+int nv40_instmem_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_instmem **);
+int nv50_instmem_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_instmem **);
+int gk20a_instmem_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_instmem **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
index d76f60d7d29a..d32a326a9290 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
@@ -13,6 +13,7 @@ struct nvkm_ltc {
u32 ltc_nr;
u32 lts_nr;
+ struct mutex mutex; /* serialises CBC operations */
u32 num_tags;
u32 tag_base;
struct nvkm_memory *tag_ram;
@@ -33,12 +34,11 @@ int nvkm_ltc_zbc_stencil_get(struct nvkm_ltc *, int index, const u32);
void nvkm_ltc_invalidate(struct nvkm_ltc *);
void nvkm_ltc_flush(struct nvkm_ltc *);
-int gf100_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
-int gk104_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
-int gk20a_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
-int gm107_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
-int gm200_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
-int gp100_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
-int gp102_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
-int gp10b_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
+int gf100_ltc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_ltc **);
+int gk104_ltc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_ltc **);
+int gm107_ltc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_ltc **);
+int gm200_ltc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_ltc **);
+int gp100_ltc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_ltc **);
+int gp102_ltc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_ltc **);
+int gp10b_ltc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_ltc **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
index e45ca4583967..cb86a56e68d4 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
@@ -8,29 +8,29 @@ struct nvkm_mc {
struct nvkm_subdev subdev;
};
-void nvkm_mc_enable(struct nvkm_device *, enum nvkm_devidx);
-void nvkm_mc_disable(struct nvkm_device *, enum nvkm_devidx);
-bool nvkm_mc_enabled(struct nvkm_device *, enum nvkm_devidx);
-void nvkm_mc_reset(struct nvkm_device *, enum nvkm_devidx);
+void nvkm_mc_enable(struct nvkm_device *, enum nvkm_subdev_type, int);
+void nvkm_mc_disable(struct nvkm_device *, enum nvkm_subdev_type, int);
+bool nvkm_mc_enabled(struct nvkm_device *, enum nvkm_subdev_type, int);
+void nvkm_mc_reset(struct nvkm_device *, enum nvkm_subdev_type, int);
void nvkm_mc_intr(struct nvkm_device *, bool *handled);
void nvkm_mc_intr_unarm(struct nvkm_device *);
void nvkm_mc_intr_rearm(struct nvkm_device *);
-void nvkm_mc_intr_mask(struct nvkm_device *, enum nvkm_devidx, bool enable);
+void nvkm_mc_intr_mask(struct nvkm_device *, enum nvkm_subdev_type, int, bool enable);
void nvkm_mc_unk260(struct nvkm_device *, u32 data);
-int nv04_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
-int nv11_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
-int nv17_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
-int nv44_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
-int nv50_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
-int g84_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
-int g98_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
-int gt215_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
-int gf100_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
-int gk104_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
-int gk20a_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
-int gp100_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
-int gp10b_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
-int tu102_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
-int ga100_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
+int nv04_mc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mc **);
+int nv11_mc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mc **);
+int nv17_mc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mc **);
+int nv44_mc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mc **);
+int nv50_mc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mc **);
+int g84_mc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mc **);
+int g98_mc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mc **);
+int gt215_mc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mc **);
+int gf100_mc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mc **);
+int gk104_mc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mc **);
+int gk20a_mc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mc **);
+int gp100_mc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mc **);
+int gp10b_mc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mc **);
+int tu102_mc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mc **);
+int ga100_mc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mc **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
index 54cdcb017518..0911e73f7424 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
@@ -117,22 +117,24 @@ struct nvkm_mmu {
struct list_head list;
} ptc, ptp;
+ struct mutex mutex; /* serialises mmu invalidations */
+
struct nvkm_device_oclass user;
};
-int nv04_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
-int nv41_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
-int nv44_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
-int nv50_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
-int g84_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
-int mcp77_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
-int gf100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
-int gk104_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
-int gk20a_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
-int gm200_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
-int gm20b_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
-int gp100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
-int gp10b_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
-int gv100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
-int tu102_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
+int nv04_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
+int nv41_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
+int nv44_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
+int nv50_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
+int g84_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
+int mcp77_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
+int gf100_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
+int gk104_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
+int gk20a_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
+int gm200_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
+int gm20b_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
+int gp100_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
+int gp10b_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
+int gv100_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
+int tu102_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mxm.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mxm.h
index 78df1e9def05..7d4132a17d0f 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mxm.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mxm.h
@@ -3,5 +3,5 @@
#define __NVKM_MXM_H__
#include <core/subdev.h>
-int nv50_mxm_new(struct nvkm_device *, int, struct nvkm_subdev **);
+int nv50_mxm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_subdev **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h
index 4803a4fad4a2..74c19bdfb757 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h
@@ -39,17 +39,17 @@ void nvkm_pci_wr32(struct nvkm_pci *, u16 addr, u32 data);
u32 nvkm_pci_mask(struct nvkm_pci *, u16 addr, u32 mask, u32 value);
void nvkm_pci_rom_shadow(struct nvkm_pci *, bool shadow);
-int nv04_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
-int nv40_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
-int nv46_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
-int nv4c_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
-int g84_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
-int g92_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
-int g94_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
-int gf100_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
-int gf106_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
-int gk104_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
-int gp100_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
+int nv04_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pci **);
+int nv40_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pci **);
+int nv46_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pci **);
+int nv4c_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pci **);
+int g84_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pci **);
+int g92_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pci **);
+int g94_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pci **);
+int gf100_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pci **);
+int gf106_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pci **);
+int gk104_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pci **);
+int gp100_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pci **);
/* pcie functions */
int nvkm_pcie_set_link(struct nvkm_pci *, enum nvkm_pcie_speed, u8 width);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
index 5ff6d1f8985a..f57a3a5a288d 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
@@ -18,6 +18,7 @@ struct nvkm_pmu {
struct completion wpr_ready;
struct {
+ struct mutex mutex;
u32 base;
u32 size;
} send;
@@ -39,18 +40,18 @@ int nvkm_pmu_send(struct nvkm_pmu *, u32 reply[2], u32 process,
void nvkm_pmu_pgob(struct nvkm_pmu *, bool enable);
bool nvkm_pmu_fan_controlled(struct nvkm_device *);
-int gt215_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
-int gf100_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
-int gf119_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
-int gk104_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
-int gk110_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
-int gk208_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
-int gk20a_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
-int gm107_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
-int gm200_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
-int gm20b_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
-int gp102_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
-int gp10b_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
+int gt215_pmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pmu **);
+int gf100_pmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pmu **);
+int gf119_pmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pmu **);
+int gk104_pmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pmu **);
+int gk110_pmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pmu **);
+int gk208_pmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pmu **);
+int gk20a_pmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pmu **);
+int gm107_pmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pmu **);
+int gm200_pmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pmu **);
+int gm20b_pmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pmu **);
+int gp102_pmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pmu **);
+int gp10b_pmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pmu **);
/* interface to MEMX process running on PMU */
struct nvkm_memx;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/privring.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/privring.h
new file mode 100644
index 000000000000..e1399f8a90ad
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/privring.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVKM_PRIVRING_H__
+#define __NVKM_PRIVRING_H__
+#include <core/subdev.h>
+
+int gf100_privring_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_subdev **);
+int gf117_privring_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_subdev **);
+int gk104_privring_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_subdev **);
+int gk20a_privring_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_subdev **);
+int gm200_privring_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_subdev **);
+int gp10b_privring_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_subdev **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h
index 62c34f98c930..bd04f49272a6 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h
@@ -107,13 +107,13 @@ void nvkm_therm_clkgate_init(struct nvkm_therm *,
void nvkm_therm_clkgate_enable(struct nvkm_therm *);
void nvkm_therm_clkgate_fini(struct nvkm_therm *, bool);
-int nv40_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
-int nv50_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
-int g84_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
-int gt215_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
-int gf119_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
-int gk104_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
-int gm107_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
-int gm200_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
-int gp100_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
+int nv40_therm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_therm **);
+int nv50_therm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_therm **);
+int g84_therm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_therm **);
+int gt215_therm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_therm **);
+int gf119_therm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_therm **);
+int gk104_therm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_therm **);
+int gm107_therm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_therm **);
+int gm200_therm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_therm **);
+int gp100_therm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_therm **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
index d06dcbe1faa6..439a3f72b0d7 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
@@ -76,8 +76,8 @@ s64 nvkm_timer_wait_test(struct nvkm_timer_wait *);
#define nvkm_wait_msec(d,m,addr,mask,data) \
nvkm_wait_usec((d), (m) * 1000, (addr), (mask), (data))
-int nv04_timer_new(struct nvkm_device *, int, struct nvkm_timer **);
-int nv40_timer_new(struct nvkm_device *, int, struct nvkm_timer **);
-int nv41_timer_new(struct nvkm_device *, int, struct nvkm_timer **);
-int gk20a_timer_new(struct nvkm_device *, int, struct nvkm_timer **);
+int nv04_timer_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_timer **);
+int nv40_timer_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_timer **);
+int nv41_timer_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_timer **);
+int gk20a_timer_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_timer **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h
index 7be0e7e7bd77..ee75c5524c43 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h
@@ -9,13 +9,24 @@ struct nvkm_top {
struct list_head device;
};
-u32 nvkm_top_addr(struct nvkm_device *, enum nvkm_devidx);
-u32 nvkm_top_reset(struct nvkm_device *, enum nvkm_devidx);
-u32 nvkm_top_intr(struct nvkm_device *, u32 intr, u64 *subdevs);
-u32 nvkm_top_intr_mask(struct nvkm_device *, enum nvkm_devidx);
-int nvkm_top_fault_id(struct nvkm_device *, enum nvkm_devidx);
-enum nvkm_devidx nvkm_top_fault(struct nvkm_device *, int fault);
-enum nvkm_devidx nvkm_top_engine(struct nvkm_device *, int, int *runl, int *engn);
+struct nvkm_top_device {
+ enum nvkm_subdev_type type;
+ int inst;
+ u32 addr;
+ int fault;
+ int engine;
+ int runlist;
+ int reset;
+ int intr;
+ struct list_head head;
+};
+
+u32 nvkm_top_addr(struct nvkm_device *, enum nvkm_subdev_type, int);
+u32 nvkm_top_reset(struct nvkm_device *, enum nvkm_subdev_type, int);
+u32 nvkm_top_intr_mask(struct nvkm_device *, enum nvkm_subdev_type, int);
+int nvkm_top_fault_id(struct nvkm_device *, enum nvkm_subdev_type, int);
+struct nvkm_subdev *nvkm_top_fault(struct nvkm_device *, int fault);
-int gk104_top_new(struct nvkm_device *, int, struct nvkm_top **);
+int gk104_top_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_top **);
+int ga100_top_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_top **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
index 45053a280930..0be86d5f0158 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
@@ -36,10 +36,10 @@ int nvkm_volt_get(struct nvkm_volt *);
int nvkm_volt_set_id(struct nvkm_volt *, u8 id, u8 min_id, u8 temp,
int condition);
-int nv40_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
-int gf100_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
-int gf117_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
-int gk104_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
-int gk20a_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
-int gm20b_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
+int nv40_volt_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_volt **);
+int gf100_volt_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_volt **);
+int gf117_volt_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_volt **);
+int gk104_volt_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_volt **);
+int gk20a_volt_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_volt **);
+int gm20b_volt_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_volt **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index 9a5be6f32424..0a9334deffe2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -181,6 +181,7 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
struct nvif_device *device = &drm->client.device;
struct nvkm_gr *gr = nvxx_gr(device);
struct drm_nouveau_getparam *getparam = data;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
switch (getparam->param) {
case NOUVEAU_GETPARAM_CHIPSET_ID:
@@ -188,13 +189,13 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
break;
case NOUVEAU_GETPARAM_PCI_VENDOR:
if (device->info.platform != NV_DEVICE_INFO_V0_SOC)
- getparam->value = dev->pdev->vendor;
+ getparam->value = pdev->vendor;
else
getparam->value = 0;
break;
case NOUVEAU_GETPARAM_PCI_DEVICE:
if (device->info.platform != NV_DEVICE_INFO_V0_SOC)
- getparam->value = dev->pdev->device;
+ getparam->value = pdev->device;
else
getparam->value = 0;
break;
@@ -205,7 +206,7 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
case NV_DEVICE_INFO_V0_PCIE: getparam->value = 2; break;
case NV_DEVICE_INFO_V0_SOC : getparam->value = 3; break;
case NV_DEVICE_INFO_V0_IGP :
- if (!pci_is_pcie(dev->pdev))
+ if (!pci_is_pcie(pdev))
getparam->value = 1;
else
getparam->value = 2;
@@ -268,19 +269,19 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) {
if (init->fb_ctxdma_handle == ~0) {
switch (init->tt_ctxdma_handle) {
- case 0x01: engine = NV_DEVICE_INFO_ENGINE_GR ; break;
- case 0x02: engine = NV_DEVICE_INFO_ENGINE_MSPDEC; break;
- case 0x04: engine = NV_DEVICE_INFO_ENGINE_MSPPP ; break;
- case 0x08: engine = NV_DEVICE_INFO_ENGINE_MSVLD ; break;
- case 0x30: engine = NV_DEVICE_INFO_ENGINE_CE ; break;
+ case 0x01: engine = NV_DEVICE_HOST_RUNLIST_ENGINES_GR ; break;
+ case 0x02: engine = NV_DEVICE_HOST_RUNLIST_ENGINES_MSPDEC; break;
+ case 0x04: engine = NV_DEVICE_HOST_RUNLIST_ENGINES_MSPPP ; break;
+ case 0x08: engine = NV_DEVICE_HOST_RUNLIST_ENGINES_MSVLD ; break;
+ case 0x30: engine = NV_DEVICE_HOST_RUNLIST_ENGINES_CE ; break;
default:
return nouveau_abi16_put(abi16, -ENOSYS);
}
} else {
- engine = NV_DEVICE_INFO_ENGINE_GR;
+ engine = NV_DEVICE_HOST_RUNLIST_ENGINES_GR;
}
- if (engine != NV_DEVICE_INFO_ENGINE_CE)
+ if (engine != NV_DEVICE_HOST_RUNLIST_ENGINES_CE)
engine = nvif_fifo_runlist(device, engine);
else
engine = nvif_fifo_runlist_ce(device);
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 69a84d0197d0..7c15f6448428 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -377,7 +377,7 @@ nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector)
return NULL;
}
- handle = ACPI_HANDLE(&dev->pdev->dev);
+ handle = ACPI_HANDLE(dev->dev);
if (!handle)
return NULL;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index d204ea8a5618..e8c445eb1100 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -110,6 +110,9 @@ static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_outp
struct nvbios *bios = &drm->vbios;
uint8_t sub = bios->data[bios->fp.xlated_entry + script] + (bios->fp.link_c_increment && dcbent->or & DCB_OUTPUT_C ? 1 : 0);
uint16_t scriptofs = ROM16(bios->data[bios->init_script_tbls_ptr + sub * 2]);
+#ifdef __powerpc__
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+#endif
if (!bios->fp.xlated_entry || !sub || !scriptofs)
return -EINVAL;
@@ -123,8 +126,8 @@ static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_outp
#ifdef __powerpc__
/* Powerbook specific quirks */
if (script == LVDS_RESET &&
- (dev->pdev->device == 0x0179 || dev->pdev->device == 0x0189 ||
- dev->pdev->device == 0x0329))
+ (pdev->device == 0x0179 || pdev->device == 0x0189 ||
+ pdev->device == 0x0329))
nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72);
#endif
@@ -2083,7 +2086,7 @@ nouveau_bios_init(struct drm_device *dev)
int ret;
/* only relevant for PCI devices */
- if (!dev->pdev)
+ if (!dev_is_pci(dev->dev))
return 0;
if (!NVInitVBIOS(dev))
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index c85b1af06b7b..2375711877cf 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -473,10 +473,10 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
- drm->gem.vram_available -= bo->mem.size;
+ drm->gem.vram_available -= bo->base.size;
break;
case TTM_PL_TT:
- drm->gem.gart_available -= bo->mem.size;
+ drm->gem.gart_available -= bo->base.size;
break;
default:
break;
@@ -504,10 +504,10 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
if (!nvbo->bo.pin_count) {
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
- drm->gem.vram_available += bo->mem.size;
+ drm->gem.vram_available += bo->base.size;
break;
case TTM_PL_TT:
- drm->gem.gart_available += bo->mem.size;
+ drm->gem.gart_available += bo->base.size;
break;
default:
break;
@@ -547,7 +547,7 @@ nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm;
- int i;
+ int i, j;
if (!ttm_dma)
return;
@@ -556,10 +556,21 @@ nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
if (nvbo->force_coherent)
return;
- for (i = 0; i < ttm_dma->num_pages; i++)
+ for (i = 0; i < ttm_dma->num_pages; ++i) {
+ struct page *p = ttm_dma->pages[i];
+ size_t num_pages = 1;
+
+ for (j = i + 1; j < ttm_dma->num_pages; ++j) {
+ if (++p != ttm_dma->pages[j])
+ break;
+
+ ++num_pages;
+ }
dma_sync_single_for_device(drm->dev->dev,
ttm_dma->dma_address[i],
- PAGE_SIZE, DMA_TO_DEVICE);
+ num_pages * PAGE_SIZE, DMA_TO_DEVICE);
+ i += num_pages;
+ }
}
void
@@ -567,7 +578,7 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm;
- int i;
+ int i, j;
if (!ttm_dma)
return;
@@ -576,9 +587,21 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
if (nvbo->force_coherent)
return;
- for (i = 0; i < ttm_dma->num_pages; i++)
+ for (i = 0; i < ttm_dma->num_pages; ++i) {
+ struct page *p = ttm_dma->pages[i];
+ size_t num_pages = 1;
+
+ for (j = i + 1; j < ttm_dma->num_pages; ++j) {
+ if (++p != ttm_dma->pages[j])
+ break;
+
+ ++num_pages;
+ }
+
dma_sync_single_for_cpu(drm->dev->dev, ttm_dma->dma_address[i],
- PAGE_SIZE, DMA_FROM_DEVICE);
+ num_pages * PAGE_SIZE, DMA_FROM_DEVICE);
+ i += num_pages;
+ }
}
void nouveau_bo_add_io_reserve_lru(struct ttm_buffer_object *bo)
@@ -774,7 +797,10 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict,
return ret;
}
- mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING);
+ if (drm_drv_uses_atomic_modeset(drm->dev))
+ mutex_lock(&cli->mutex);
+ else
+ mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING);
ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, ctx->interruptible);
if (ret == 0) {
ret = drm->ttm.move(chan, bo, &bo->mem, new_reg);
@@ -910,7 +936,7 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_resource *new_reg,
return 0;
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
- *new_tile = nv10_bo_set_tiling(dev, offset, new_reg->size,
+ *new_tile = nv10_bo_set_tiling(dev, offset, bo->base.size,
nvbo->mode, nvbo->zeta);
}
@@ -1232,9 +1258,8 @@ nouveau_ttm_tt_populate(struct ttm_bo_device *bdev,
return 0;
if (slave && ttm->sg) {
- /* make userspace faulting work */
- drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
- ttm_dma->dma_address, ttm->num_pages);
+ drm_prime_sg_to_dma_addr_array(ttm->sg, ttm_dma->dma_address,
+ ttm->num_pages);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index 5d191e58edf1..7cfac265fd45 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -533,6 +533,7 @@ nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device,
if (ret) {
NV_PRINTK(err, cli, "channel failed to initialise, %d\n", ret);
nouveau_channel_del(pchan);
+ goto done;
}
ret = nouveau_svmm_join((*pchan)->vmm->svmm, (*pchan)->inst);
@@ -555,7 +556,7 @@ nouveau_channels_init(struct nouveau_drm *drm)
} args = {
.m.version = 1,
.m.count = sizeof(args.v) / sizeof(args.v.channels),
- .v.channels.mthd = NV_DEVICE_FIFO_CHANNELS,
+ .v.channels.mthd = NV_DEVICE_HOST_CHANNELS,
};
struct nvif_object *device = &drm->client.device.object;
int ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 8b4b3688c7ae..61e6d7412505 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -411,6 +411,7 @@ static struct nouveau_encoder *
nouveau_connector_ddc_detect(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
struct nouveau_encoder *nv_encoder = NULL, *found = NULL;
struct drm_encoder *encoder;
int ret;
@@ -438,11 +439,11 @@ nouveau_connector_ddc_detect(struct drm_connector *connector)
break;
if (switcheroo_ddc)
- vga_switcheroo_lock_ddc(dev->pdev);
+ vga_switcheroo_lock_ddc(pdev);
if (nvkm_probe_i2c(nv_encoder->i2c, 0x50))
found = nv_encoder;
if (switcheroo_ddc)
- vga_switcheroo_unlock_ddc(dev->pdev);
+ vga_switcheroo_unlock_ddc(pdev);
break;
}
@@ -490,6 +491,7 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_drm *drm = nouveau_drm(connector->dev);
struct drm_device *dev = connector->dev;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
if (nv_connector->detected_encoder == nv_encoder)
return;
@@ -511,8 +513,8 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
connector->doublescan_allowed = true;
if (drm->client.device.info.family == NV_DEVICE_INFO_V0_KELVIN ||
(drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
- (dev->pdev->device & 0x0ff0) != 0x0100 &&
- (dev->pdev->device & 0x0ff0) != 0x0150))
+ (pdev->device & 0x0ff0) != 0x0100 &&
+ (pdev->device & 0x0ff0) != 0x0150))
/* HW is broken */
connector->interlace_allowed = false;
else
@@ -1210,6 +1212,7 @@ drm_conntype_from_dcb(enum dcb_connector_type dcb)
case DCB_CONNECTOR_DMS59_DP0:
case DCB_CONNECTOR_DMS59_DP1:
case DCB_CONNECTOR_DP :
+ case DCB_CONNECTOR_mDP :
case DCB_CONNECTOR_USB_C : return DRM_MODE_CONNECTOR_DisplayPort;
case DCB_CONNECTOR_eDP : return DRM_MODE_CONNECTOR_eDP;
case DCB_CONNECTOR_HDMI_0 :
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index bceb48a2dfca..17831ee897ea 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -286,11 +286,11 @@ nouveau_check_bl_size(struct nouveau_drm *drm, struct nouveau_bo *nvbo,
bl_size = bw * bh * (1 << tile_mode) * gob_size;
- DRM_DEBUG_KMS("offset=%u stride=%u h=%u tile_mode=0x%02x bw=%u bh=%u gob_size=%u bl_size=%llu size=%lu\n",
+ DRM_DEBUG_KMS("offset=%u stride=%u h=%u tile_mode=0x%02x bw=%u bh=%u gob_size=%u bl_size=%llu size=%zu\n",
offset, stride, h, tile_mode, bw, bh, gob_size, bl_size,
- nvbo->bo.mem.size);
+ nvbo->bo.base.size);
- if (bl_size + offset > nvbo->bo.mem.size)
+ if (bl_size + offset > nvbo->bo.base.size)
return -ERANGE;
return 0;
@@ -363,7 +363,7 @@ nouveau_framebuffer_new(struct drm_device *dev,
} else {
uint32_t size = mode_cmd->pitches[i] * height;
- if (size + mode_cmd->offsets[i] > nvbo->bo.mem.size)
+ if (size + mode_cmd->offsets[i] > nvbo->bo.base.size)
return -ERANGE;
}
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index d141a5f004af..885815ea917f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -115,8 +115,8 @@ nouveau_platform_name(struct platform_device *platformdev)
static u64
nouveau_name(struct drm_device *dev)
{
- if (dev->pdev)
- return nouveau_pci_name(dev->pdev);
+ if (dev_is_pci(dev->dev))
+ return nouveau_pci_name(to_pci_dev(dev->dev));
else
return nouveau_platform_name(to_platform_device(dev->dev));
}
@@ -344,7 +344,7 @@ nouveau_accel_gr_init(struct nouveau_drm *drm)
/* Allocate channel that has access to the graphics engine. */
if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) {
- arg0 = nvif_fifo_runlist(device, NV_DEVICE_INFO_ENGINE_GR);
+ arg0 = nvif_fifo_runlist(device, NV_DEVICE_HOST_RUNLIST_ENGINES_GR);
arg1 = 1;
} else {
arg0 = NvDmaFB;
@@ -760,7 +760,6 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
if (ret)
goto fail_drm;
- drm_dev->pdev = pdev;
pci_set_drvdata(pdev, drm_dev);
ret = nouveau_drm_device_init(drm_dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 9d04d1b36434..d28ee6844245 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -55,7 +55,6 @@
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_memory.h>
-#include <drm/ttm/ttm_module.h>
#include <drm/drm_audio_component.h>
@@ -222,6 +221,7 @@ struct nouveau_drm {
struct {
struct drm_audio_component *component;
+ struct mutex lock;
bool component_registered;
} audio;
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
index 21937f1c7dd9..1ffcc0a491fd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -53,7 +53,12 @@ struct nouveau_encoder {
* actually programmed on the hw, not the proposed crtc */
struct drm_crtc *crtc;
u32 ctrl;
- bool audio;
+
+ /* Protected by nouveau_drm.audio.lock */
+ struct {
+ bool enabled;
+ struct drm_connector *connector;
+ } audio;
struct drm_display_mode mode;
int last_dpms;
@@ -141,11 +146,9 @@ enum drm_mode_status nv50_dp_mode_valid(struct drm_connector *,
unsigned *clock);
struct nouveau_connector *
-nv50_outp_get_new_connector(struct nouveau_encoder *outp,
- struct drm_atomic_state *state);
+nv50_outp_get_new_connector(struct drm_atomic_state *state, struct nouveau_encoder *outp);
struct nouveau_connector *
-nv50_outp_get_old_connector(struct nouveau_encoder *outp,
- struct drm_atomic_state *state);
+nv50_outp_get_old_connector(struct drm_atomic_state *state, struct nouveau_encoder *outp);
int nv50_mstm_detect(struct nouveau_encoder *encoder);
void nv50_mstm_remove(struct nv50_mstm *mstm);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 24ec5339efb4..4fc0fa696461 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -396,7 +396,9 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
NV_INFO(drm, "allocated %dx%d fb: 0x%llx, bo %p\n",
fb->width, fb->height, nvbo->offset, nvbo);
- vga_switcheroo_client_fb_set(dev->pdev, info);
+ if (dev_is_pci(dev->dev))
+ vga_switcheroo_client_fb_set(to_pci_dev(dev->dev), info);
+
return 0;
out_unlock:
@@ -548,7 +550,7 @@ nouveau_fbcon_init(struct drm_device *dev)
int ret;
if (!dev->mode_config.num_crtc ||
- (dev->pdev->class >> 8) != PCI_CLASS_DISPLAY_VGA)
+ (to_pci_dev(dev->dev)->class >> 8) != PCI_CLASS_DISPLAY_VGA)
return 0;
fbcon = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL);
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index 2f16b5249283..347488685f74 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -30,9 +30,9 @@
struct sg_table *nouveau_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
struct nouveau_bo *nvbo = nouveau_gem_object(obj);
- int npages = nvbo->bo.num_pages;
- return drm_prime_pages_to_sg(obj->dev, nvbo->bo.ttm->pages, npages);
+ return drm_prime_pages_to_sg(obj->dev, nvbo->bo.ttm->pages,
+ nvbo->bo.ttm->num_pages);
}
struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index a2e23fd4906c..1cf52635ea74 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -84,7 +84,7 @@ nouveau_sgdma_create_ttm(struct ttm_buffer_object *bo, uint32_t page_flags)
if (!nvbe)
return NULL;
- if (ttm_dma_tt_init(&nvbe->ttm, bo, page_flags, caching)) {
+ if (ttm_sg_tt_init(&nvbe->ttm, bo, page_flags, caching)) {
kfree(nvbe);
return NULL;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index 4f69e4c3dafd..1c3f890377d2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -315,6 +315,10 @@ nouveau_svmm_init(struct drm_device *dev, void *data,
struct drm_nouveau_svm_init *args = data;
int ret;
+ /* We need to fail if svm is disabled */
+ if (!cli->drm->svm)
+ return -ENOSYS;
+
/* Allocate tracking for SVM-enabled VMM. */
if (!(svmm = kzalloc(sizeof(*svmm), GFP_KERNEL)))
return -ENOMEM;
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index c85dd8afa3c3..7c4b374b3eca 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -87,18 +87,20 @@ nouveau_vga_init(struct nouveau_drm *drm)
{
struct drm_device *dev = drm->dev;
bool runtime = nouveau_pmops_runtime();
+ struct pci_dev *pdev;
/* only relevant for PCI devices */
- if (!dev->pdev)
+ if (!dev_is_pci(dev->dev))
return;
+ pdev = to_pci_dev(dev->dev);
- vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode);
+ vga_client_register(pdev, dev, NULL, nouveau_vga_set_decode);
/* don't register Thunderbolt eGPU with vga_switcheroo */
- if (pci_is_thunderbolt_attached(dev->pdev))
+ if (pci_is_thunderbolt_attached(pdev))
return;
- vga_switcheroo_register_client(dev->pdev, &nouveau_switcheroo_ops, runtime);
+ vga_switcheroo_register_client(pdev, &nouveau_switcheroo_ops, runtime);
if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus())
vga_switcheroo_init_domain_pm_ops(drm->dev->dev, &drm->vga_pm_domain);
@@ -109,17 +111,19 @@ nouveau_vga_fini(struct nouveau_drm *drm)
{
struct drm_device *dev = drm->dev;
bool runtime = nouveau_pmops_runtime();
+ struct pci_dev *pdev;
/* only relevant for PCI devices */
- if (!dev->pdev)
+ if (!dev_is_pci(dev->dev))
return;
+ pdev = to_pci_dev(dev->dev);
- vga_client_register(dev->pdev, NULL, NULL, NULL);
+ vga_client_register(pdev, NULL, NULL, NULL);
- if (pci_is_thunderbolt_attached(dev->pdev))
+ if (pci_is_thunderbolt_attached(pdev))
return;
- vga_switcheroo_unregister_client(dev->pdev);
+ vga_switcheroo_unregister_client(pdev);
if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus())
vga_switcheroo_fini_domain_pm_ops(drm->dev->dev);
}
diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c
index 1253fdec712d..b1cd8d7dd87d 100644
--- a/drivers/gpu/drm/nouveau/nv17_fence.c
+++ b/drivers/gpu/drm/nouveau/nv17_fence.c
@@ -80,7 +80,7 @@ nv17_fence_context_new(struct nouveau_channel *chan)
struct nv10_fence_chan *fctx;
struct ttm_resource *reg = &priv->bo->bo.mem;
u32 start = reg->start * PAGE_SIZE;
- u32 limit = start + reg->size - 1;
+ u32 limit = start + priv->bo->bo.base.size - 1;
int ret = 0;
fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index 447238e3cbe7..1625826505f6 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -39,7 +39,7 @@ nv50_fence_context_new(struct nouveau_channel *chan)
struct nv10_fence_chan *fctx;
struct ttm_resource *reg = &priv->bo->bo.mem;
u32 start = reg->start * PAGE_SIZE;
- u32 limit = start + reg->size - 1;
+ u32 limit = start + priv->bo->bo.base.size - 1;
int ret;
fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
diff --git a/drivers/gpu/drm/nouveau/nvif/fifo.c b/drivers/gpu/drm/nouveau/nvif/fifo.c
index e84a2e2ff043..a463289962b2 100644
--- a/drivers/gpu/drm/nouveau/nvif/fifo.c
+++ b/drivers/gpu/drm/nouveau/nvif/fifo.c
@@ -41,9 +41,11 @@ nvif_fifo_runlists(struct nvif_device *device)
return -ENOMEM;
a->m.version = 1;
a->m.count = sizeof(a->v) / sizeof(a->v.runlists);
- a->v.runlists.mthd = NV_DEVICE_FIFO_RUNLISTS;
- for (i = 0; i < ARRAY_SIZE(a->v.runlist); i++)
- a->v.runlist[i].mthd = NV_DEVICE_FIFO_RUNLIST_ENGINES(i);
+ a->v.runlists.mthd = NV_DEVICE_HOST_RUNLISTS;
+ for (i = 0; i < ARRAY_SIZE(a->v.runlist); i++) {
+ a->v.runlist[i].mthd = NV_DEVICE_HOST_RUNLIST_ENGINES;
+ a->v.runlist[i].data = i;
+ }
ret = nvif_object_mthd(object, NV_DEVICE_V0_INFO, a, sizeof(*a));
if (ret)
@@ -58,7 +60,7 @@ nvif_fifo_runlists(struct nvif_device *device)
}
for (i = 0; i < device->runlists; i++) {
- if (a->v.runlists.data & BIT_ULL(i))
+ if (a->v.runlist[i].mthd != NV_DEVICE_INFO_INVALID)
device->runlist[i].engines = a->v.runlist[i].data;
}
@@ -70,29 +72,15 @@ done:
u64
nvif_fifo_runlist(struct nvif_device *device, u64 engine)
{
- struct nvif_object *object = &device->object;
- struct {
- struct nv_device_info_v1 m;
- struct {
- struct nv_device_info_v1_data engine;
- } v;
- } a = {
- .m.version = 1,
- .m.count = sizeof(a.v) / sizeof(a.v.engine),
- .v.engine.mthd = engine,
- };
u64 runm = 0;
int ret, i;
if ((ret = nvif_fifo_runlists(device)))
return runm;
- ret = nvif_object_mthd(object, NV_DEVICE_V0_INFO, &a, sizeof(a));
- if (ret == 0) {
- for (i = 0; i < device->runlists; i++) {
- if (device->runlist[i].engines & a.v.engine.data)
- runm |= BIT_ULL(i);
- }
+ for (i = 0; i < device->runlists; i++) {
+ if (device->runlist[i].engines & engine)
+ runm |= BIT_ULL(i);
}
return runm;
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/engine.c b/drivers/gpu/drm/nouveau/nvkm/core/engine.c
index 1a47c40e171b..e41a39ae1597 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/engine.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/engine.c
@@ -40,10 +40,11 @@ nvkm_engine_unref(struct nvkm_engine **pengine)
{
struct nvkm_engine *engine = *pengine;
if (engine) {
- mutex_lock(&engine->subdev.mutex);
- if (--engine->usecount == 0)
+ if (refcount_dec_and_mutex_lock(&engine->use.refcount, &engine->use.mutex)) {
nvkm_subdev_fini(&engine->subdev, false);
- mutex_unlock(&engine->subdev.mutex);
+ engine->use.enabled = false;
+ mutex_unlock(&engine->use.mutex);
+ }
*pengine = NULL;
}
}
@@ -51,17 +52,21 @@ nvkm_engine_unref(struct nvkm_engine **pengine)
struct nvkm_engine *
nvkm_engine_ref(struct nvkm_engine *engine)
{
+ int ret;
if (engine) {
- mutex_lock(&engine->subdev.mutex);
- if (++engine->usecount == 1) {
- int ret = nvkm_subdev_init(&engine->subdev);
- if (ret) {
- engine->usecount--;
- mutex_unlock(&engine->subdev.mutex);
- return ERR_PTR(ret);
+ if (!refcount_inc_not_zero(&engine->use.refcount)) {
+ mutex_lock(&engine->use.mutex);
+ if (!refcount_inc_not_zero(&engine->use.refcount)) {
+ engine->use.enabled = true;
+ if ((ret = nvkm_subdev_init(&engine->subdev))) {
+ engine->use.enabled = false;
+ mutex_unlock(&engine->use.mutex);
+ return ERR_PTR(ret);
+ }
+ refcount_set(&engine->use.refcount, 1);
}
+ mutex_unlock(&engine->use.mutex);
}
- mutex_unlock(&engine->subdev.mutex);
}
return engine;
}
@@ -114,7 +119,7 @@ nvkm_engine_init(struct nvkm_subdev *subdev)
int ret = 0, i;
s64 time;
- if (!engine->usecount) {
+ if (!engine->use.enabled) {
nvkm_trace(subdev, "init skipped, engine has no users\n");
return ret;
}
@@ -156,11 +161,12 @@ nvkm_engine_dtor(struct nvkm_subdev *subdev)
struct nvkm_engine *engine = nvkm_engine(subdev);
if (engine->func->dtor)
return engine->func->dtor(engine);
+ mutex_destroy(&engine->use.mutex);
return engine;
}
-static const struct nvkm_subdev_func
-nvkm_engine_func = {
+const struct nvkm_subdev_func
+nvkm_engine = {
.dtor = nvkm_engine_dtor,
.preinit = nvkm_engine_preinit,
.init = nvkm_engine_init,
@@ -170,14 +176,15 @@ nvkm_engine_func = {
};
int
-nvkm_engine_ctor(const struct nvkm_engine_func *func,
- struct nvkm_device *device, int index, bool enable,
- struct nvkm_engine *engine)
+nvkm_engine_ctor(const struct nvkm_engine_func *func, struct nvkm_device *device,
+ enum nvkm_subdev_type type, int inst, bool enable, struct nvkm_engine *engine)
{
- nvkm_subdev_ctor(&nvkm_engine_func, device, index, &engine->subdev);
+ nvkm_subdev_ctor(&nvkm_engine, device, type, inst, &engine->subdev);
engine->func = func;
+ refcount_set(&engine->use.refcount, 0);
+ mutex_init(&engine->use.mutex);
- if (!nvkm_boolopt(device->cfgopt, nvkm_subdev_name[index], enable)) {
+ if (!nvkm_boolopt(device->cfgopt, engine->subdev.name, enable)) {
nvkm_debug(&engine->subdev, "disabled\n");
return -ENODEV;
}
@@ -187,11 +194,11 @@ nvkm_engine_ctor(const struct nvkm_engine_func *func,
}
int
-nvkm_engine_new_(const struct nvkm_engine_func *func,
- struct nvkm_device *device, int index, bool enable,
+nvkm_engine_new_(const struct nvkm_engine_func *func, struct nvkm_device *device,
+ enum nvkm_subdev_type type, int inst, bool enable,
struct nvkm_engine **pengine)
{
if (!(*pengine = kzalloc(sizeof(**pengine), GFP_KERNEL)))
return -ENOMEM;
- return nvkm_engine_ctor(func, device, index, enable, *pengine);
+ return nvkm_engine_ctor(func, device, type, inst, enable, *pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/memory.c b/drivers/gpu/drm/nouveau/nvkm/core/memory.c
index 38130ef272d6..c69daac9bac7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/memory.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/memory.c
@@ -33,13 +33,13 @@ nvkm_memory_tags_put(struct nvkm_memory *memory, struct nvkm_device *device,
struct nvkm_fb *fb = device->fb;
struct nvkm_tags *tags = *ptags;
if (tags) {
- mutex_lock(&fb->subdev.mutex);
+ mutex_lock(&fb->tags.mutex);
if (refcount_dec_and_test(&tags->refcount)) {
- nvkm_mm_free(&fb->tags, &tags->mn);
+ nvkm_mm_free(&fb->tags.mm, &tags->mn);
kfree(memory->tags);
memory->tags = NULL;
}
- mutex_unlock(&fb->subdev.mutex);
+ mutex_unlock(&fb->tags.mutex);
*ptags = NULL;
}
}
@@ -52,29 +52,29 @@ nvkm_memory_tags_get(struct nvkm_memory *memory, struct nvkm_device *device,
struct nvkm_fb *fb = device->fb;
struct nvkm_tags *tags;
- mutex_lock(&fb->subdev.mutex);
+ mutex_lock(&fb->tags.mutex);
if ((tags = memory->tags)) {
/* If comptags exist for the memory, but a different amount
* than requested, the buffer is being mapped with settings
* that are incompatible with existing mappings.
*/
if (tags->mn && tags->mn->length != nr) {
- mutex_unlock(&fb->subdev.mutex);
+ mutex_unlock(&fb->tags.mutex);
return -EINVAL;
}
refcount_inc(&tags->refcount);
- mutex_unlock(&fb->subdev.mutex);
+ mutex_unlock(&fb->tags.mutex);
*ptags = tags;
return 0;
}
if (!(tags = kmalloc(sizeof(*tags), GFP_KERNEL))) {
- mutex_unlock(&fb->subdev.mutex);
+ mutex_unlock(&fb->tags.mutex);
return -ENOMEM;
}
- if (!nvkm_mm_head(&fb->tags, 0, 1, nr, nr, 1, &tags->mn)) {
+ if (!nvkm_mm_head(&fb->tags.mm, 0, 1, nr, nr, 1, &tags->mn)) {
if (clr)
clr(device, tags->mn->offset, tags->mn->length);
} else {
@@ -92,7 +92,7 @@ nvkm_memory_tags_get(struct nvkm_memory *memory, struct nvkm_device *device,
refcount_set(&tags->refcount, 1);
*ptags = memory->tags = tags;
- mutex_unlock(&fb->subdev.mutex);
+ mutex_unlock(&fb->tags.mutex);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
index 49d468b45d3f..a74b7acb6832 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
@@ -26,69 +26,13 @@
#include <core/option.h>
#include <subdev/mc.h>
-static struct lock_class_key nvkm_subdev_lock_class[NVKM_SUBDEV_NR];
-
const char *
-nvkm_subdev_name[NVKM_SUBDEV_NR] = {
- [NVKM_SUBDEV_ACR ] = "acr",
- [NVKM_SUBDEV_BAR ] = "bar",
- [NVKM_SUBDEV_VBIOS ] = "bios",
- [NVKM_SUBDEV_BUS ] = "bus",
- [NVKM_SUBDEV_CLK ] = "clk",
- [NVKM_SUBDEV_DEVINIT ] = "devinit",
- [NVKM_SUBDEV_FAULT ] = "fault",
- [NVKM_SUBDEV_FB ] = "fb",
- [NVKM_SUBDEV_FUSE ] = "fuse",
- [NVKM_SUBDEV_GPIO ] = "gpio",
- [NVKM_SUBDEV_GSP ] = "gsp",
- [NVKM_SUBDEV_I2C ] = "i2c",
- [NVKM_SUBDEV_IBUS ] = "priv",
- [NVKM_SUBDEV_ICCSENSE] = "iccsense",
- [NVKM_SUBDEV_INSTMEM ] = "imem",
- [NVKM_SUBDEV_LTC ] = "ltc",
- [NVKM_SUBDEV_MC ] = "mc",
- [NVKM_SUBDEV_MMU ] = "mmu",
- [NVKM_SUBDEV_MXM ] = "mxm",
- [NVKM_SUBDEV_PCI ] = "pci",
- [NVKM_SUBDEV_PMU ] = "pmu",
- [NVKM_SUBDEV_THERM ] = "therm",
- [NVKM_SUBDEV_TIMER ] = "tmr",
- [NVKM_SUBDEV_TOP ] = "top",
- [NVKM_SUBDEV_VOLT ] = "volt",
- [NVKM_ENGINE_BSP ] = "bsp",
- [NVKM_ENGINE_CE0 ] = "ce0",
- [NVKM_ENGINE_CE1 ] = "ce1",
- [NVKM_ENGINE_CE2 ] = "ce2",
- [NVKM_ENGINE_CE3 ] = "ce3",
- [NVKM_ENGINE_CE4 ] = "ce4",
- [NVKM_ENGINE_CE5 ] = "ce5",
- [NVKM_ENGINE_CE6 ] = "ce6",
- [NVKM_ENGINE_CE7 ] = "ce7",
- [NVKM_ENGINE_CE8 ] = "ce8",
- [NVKM_ENGINE_CIPHER ] = "cipher",
- [NVKM_ENGINE_DISP ] = "disp",
- [NVKM_ENGINE_DMAOBJ ] = "dma",
- [NVKM_ENGINE_FIFO ] = "fifo",
- [NVKM_ENGINE_GR ] = "gr",
- [NVKM_ENGINE_IFB ] = "ifb",
- [NVKM_ENGINE_ME ] = "me",
- [NVKM_ENGINE_MPEG ] = "mpeg",
- [NVKM_ENGINE_MSENC ] = "msenc",
- [NVKM_ENGINE_MSPDEC ] = "mspdec",
- [NVKM_ENGINE_MSPPP ] = "msppp",
- [NVKM_ENGINE_MSVLD ] = "msvld",
- [NVKM_ENGINE_NVENC0 ] = "nvenc0",
- [NVKM_ENGINE_NVENC1 ] = "nvenc1",
- [NVKM_ENGINE_NVENC2 ] = "nvenc2",
- [NVKM_ENGINE_NVDEC0 ] = "nvdec0",
- [NVKM_ENGINE_NVDEC1 ] = "nvdec1",
- [NVKM_ENGINE_NVDEC2 ] = "nvdec2",
- [NVKM_ENGINE_PM ] = "pm",
- [NVKM_ENGINE_SEC ] = "sec",
- [NVKM_ENGINE_SEC2 ] = "sec2",
- [NVKM_ENGINE_SW ] = "sw",
- [NVKM_ENGINE_VIC ] = "vic",
- [NVKM_ENGINE_VP ] = "vp",
+nvkm_subdev_type[NVKM_SUBDEV_NR] = {
+#define NVKM_LAYOUT_ONCE(type,data,ptr,...) [type] = #ptr,
+#define NVKM_LAYOUT_INST(A...) NVKM_LAYOUT_ONCE(A)
+#include <core/layout.h>
+#undef NVKM_LAYOUT_ONCE
+#undef NVKM_LAYOUT_INST
};
void
@@ -125,7 +69,7 @@ nvkm_subdev_fini(struct nvkm_subdev *subdev, bool suspend)
}
}
- nvkm_mc_reset(device, subdev->index);
+ nvkm_mc_reset(device, subdev->type, subdev->inst);
time = ktime_to_us(ktime_get()) - time;
nvkm_trace(subdev, "%s completed in %lldus\n", action, time);
@@ -199,6 +143,7 @@ nvkm_subdev_del(struct nvkm_subdev **psubdev)
if (subdev && !WARN_ON(!subdev->func)) {
nvkm_trace(subdev, "destroy running...\n");
time = ktime_to_us(ktime_get());
+ list_del(&subdev->head);
if (subdev->func->dtor)
*psubdev = subdev->func->dtor(subdev);
time = ktime_to_us(ktime_get()) - time;
@@ -209,26 +154,41 @@ nvkm_subdev_del(struct nvkm_subdev **psubdev)
}
void
-nvkm_subdev_ctor(const struct nvkm_subdev_func *func,
- struct nvkm_device *device, int index,
- struct nvkm_subdev *subdev)
+nvkm_subdev_disable(struct nvkm_device *device, enum nvkm_subdev_type type, int inst)
+{
+ struct nvkm_subdev *subdev;
+ list_for_each_entry(subdev, &device->subdev, head) {
+ if (subdev->type == type && subdev->inst == inst) {
+ *subdev->pself = NULL;
+ nvkm_subdev_del(&subdev);
+ break;
+ }
+ }
+}
+
+void
+nvkm_subdev_ctor(const struct nvkm_subdev_func *func, struct nvkm_device *device,
+ enum nvkm_subdev_type type, int inst, struct nvkm_subdev *subdev)
{
- const char *name = nvkm_subdev_name[index];
subdev->func = func;
subdev->device = device;
- subdev->index = index;
-
- __mutex_init(&subdev->mutex, name, &nvkm_subdev_lock_class[index]);
- subdev->debug = nvkm_dbgopt(device->dbgopt, name);
+ subdev->type = type;
+ subdev->inst = inst < 0 ? 0 : inst;
+
+ if (inst >= 0)
+ snprintf(subdev->name, sizeof(subdev->name), "%s%d", nvkm_subdev_type[type], inst);
+ else
+ strscpy(subdev->name, nvkm_subdev_type[type], sizeof(subdev->name));
+ subdev->debug = nvkm_dbgopt(device->dbgopt, subdev->name);
+ list_add_tail(&subdev->head, &device->subdev);
}
int
-nvkm_subdev_new_(const struct nvkm_subdev_func *func,
- struct nvkm_device *device, int index,
- struct nvkm_subdev **psubdev)
+nvkm_subdev_new_(const struct nvkm_subdev_func *func, struct nvkm_device *device,
+ enum nvkm_subdev_type type, int inst, struct nvkm_subdev **psubdev)
{
if (!(*psubdev = kzalloc(sizeof(**psubdev), GFP_KERNEL)))
return -ENOMEM;
- nvkm_subdev_ctor(func, device, index, *psubdev);
+ nvkm_subdev_ctor(func, device, type, inst, *psubdev);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c
index 44e116f7880d..39f6db269c7a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c
@@ -36,8 +36,9 @@ g84_bsp = {
};
int
-g84_bsp_new(struct nvkm_device *device, int index, struct nvkm_engine **pengine)
+g84_bsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_engine **pengine)
{
- return nvkm_xtensa_new_(&g84_bsp, device, index,
+ return nvkm_xtensa_new_(&g84_bsp, device, type, inst,
device->chipset != 0x92, 0x103000, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gf100.c
index ad9f855c9a40..b9cc39565985 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gf100.c
@@ -29,9 +29,7 @@
static void
gf100_ce_init(struct nvkm_falcon *ce)
{
- struct nvkm_device *device = ce->engine.subdev.device;
- const int index = ce->engine.subdev.index - NVKM_ENGINE_CE0;
- nvkm_wr32(device, ce->addr + 0x084, index);
+ nvkm_wr32(ce->engine.subdev.device, ce->addr + 0x084, ce->engine.subdev.inst);
}
static const struct nvkm_falcon_func
@@ -63,16 +61,9 @@ gf100_ce1 = {
};
int
-gf100_ce_new(struct nvkm_device *device, int index,
+gf100_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_engine **pengine)
{
- if (index == NVKM_ENGINE_CE0) {
- return nvkm_falcon_new_(&gf100_ce0, device, index, true,
- 0x104000, pengine);
- } else
- if (index == NVKM_ENGINE_CE1) {
- return nvkm_falcon_new_(&gf100_ce1, device, index, true,
- 0x105000, pengine);
- }
- return -ENODEV;
+ return nvkm_falcon_new_(inst ? &gf100_ce1 : &gf100_ce0, device, type, inst, true,
+ 0x104000 + (inst * 0x1000), pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gk104.c
index 9e0b53a10f77..27f29eb0494d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gk104.c
@@ -58,9 +58,9 @@ gk104_ce_intr_launcherr(struct nvkm_engine *ce, const u32 base)
void
gk104_ce_intr(struct nvkm_engine *ce)
{
- const u32 base = (ce->subdev.index - NVKM_ENGINE_CE0) * 0x1000;
struct nvkm_subdev *subdev = &ce->subdev;
struct nvkm_device *device = subdev->device;
+ const u32 base = subdev->inst * 0x1000;
u32 mask = nvkm_rd32(device, 0x104904 + base);
u32 intr = nvkm_rd32(device, 0x104908 + base) & mask;
if (intr & 0x00000001) {
@@ -94,8 +94,8 @@ gk104_ce = {
};
int
-gk104_ce_new(struct nvkm_device *device, int index,
+gk104_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_engine **pengine)
{
- return nvkm_engine_new_(&gk104_ce, device, index, true, pengine);
+ return nvkm_engine_new_(&gk104_ce, device, type, inst, true, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm107.c
index c0df7daa85e2..c3c476592c43 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm107.c
@@ -36,8 +36,8 @@ gm107_ce = {
};
int
-gm107_ce_new(struct nvkm_device *device, int index,
+gm107_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_engine **pengine)
{
- return nvkm_engine_new_(&gm107_ce, device, index, true, pengine);
+ return nvkm_engine_new_(&gm107_ce, device, type, inst, true, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm200.c
index c6fa8b20737e..d2db61865371 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm200.c
@@ -35,8 +35,8 @@ gm200_ce = {
};
int
-gm200_ce_new(struct nvkm_device *device, int index,
+gm200_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_engine **pengine)
{
- return nvkm_engine_new_(&gm200_ce, device, index, true, pengine);
+ return nvkm_engine_new_(&gm200_ce, device, type, inst, true, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp100.c
index c7710456bc30..a4f08a4472c9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp100.c
@@ -59,9 +59,9 @@ gp100_ce_intr_launcherr(struct nvkm_engine *ce, const u32 base)
void
gp100_ce_intr(struct nvkm_engine *ce)
{
- const u32 base = (ce->subdev.index - NVKM_ENGINE_CE0) * 0x80;
struct nvkm_subdev *subdev = &ce->subdev;
struct nvkm_device *device = subdev->device;
+ const u32 base = subdev->inst * 0x80;
u32 mask = nvkm_rd32(device, 0x10440c + base);
u32 intr = nvkm_rd32(device, 0x104410 + base) & mask;
if (intr & 0x00000001) { //XXX: guess
@@ -95,8 +95,8 @@ gp100_ce = {
};
int
-gp100_ce_new(struct nvkm_device *device, int index,
+gp100_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_engine **pengine)
{
- return nvkm_engine_new_(&gp100_ce, device, index, true, pengine);
+ return nvkm_engine_new_(&gp100_ce, device, type, inst, true, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp102.c
index 985c8f653874..180d497a95eb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp102.c
@@ -37,8 +37,8 @@ gp102_ce = {
};
int
-gp102_ce_new(struct nvkm_device *device, int index,
+gp102_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_engine **pengine)
{
- return nvkm_engine_new_(&gp102_ce, device, index, true, pengine);
+ return nvkm_engine_new_(&gp102_ce, device, type, inst, true, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c
index 63ac51a54fd3..704df0f2d1f1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c
@@ -44,7 +44,7 @@ gt215_ce_intr(struct nvkm_falcon *ce, struct nvkm_fifo_chan *chan)
{
struct nvkm_subdev *subdev = &ce->engine.subdev;
struct nvkm_device *device = subdev->device;
- const u32 base = (subdev->index - NVKM_ENGINE_CE0) * 0x1000;
+ const u32 base = subdev->inst * 0x1000;
u32 ssta = nvkm_rd32(device, 0x104040 + base) & 0x0000ffff;
u32 addr = nvkm_rd32(device, 0x104040 + base) >> 16;
u32 mthd = (addr & 0x07ff) << 2;
@@ -75,9 +75,9 @@ gt215_ce = {
};
int
-gt215_ce_new(struct nvkm_device *device, int index,
+gt215_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_engine **pengine)
{
- return nvkm_falcon_new_(&gt215_ce, device, index,
+ return nvkm_falcon_new_(&gt215_ce, device, type, inst,
(device->chipset != 0xaf), 0x104000, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gv100.c
index fcda3de45857..cd5e9cdca1cf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gv100.c
@@ -33,8 +33,8 @@ gv100_ce = {
};
int
-gv100_ce_new(struct nvkm_device *device, int index,
+gv100_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_engine **pengine)
{
- return nvkm_engine_new_(&gv100_ce, device, index, true, pengine);
+ return nvkm_engine_new_(&gv100_ce, device, type, inst, true, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c
index b4308e2d8c75..e5ff92d9364c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c
@@ -33,8 +33,8 @@ tu102_ce = {
};
int
-tu102_ce_new(struct nvkm_device *device, int index,
+tu102_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_engine **pengine)
{
- return nvkm_engine_new_(&tu102_ce, device, index, true, pengine);
+ return nvkm_engine_new_(&tu102_ce, device, type, inst, true, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/cipher/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/cipher/g84.c
index 68ffb520531e..be2a7181dc15 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/cipher/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/cipher/g84.c
@@ -127,8 +127,8 @@ g84_cipher = {
};
int
-g84_cipher_new(struct nvkm_device *device, int index,
+g84_cipher_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_engine **pengine)
{
- return nvkm_engine_new_(&g84_cipher, device, index, true, pengine);
+ return nvkm_engine_new_(&g84_cipher, device, type, inst, true, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index cdcc851e06f9..b930f539feec 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -71,2640 +71,2557 @@ nvkm_device_list(u64 *name, int size)
static const struct nvkm_device_chip
null_chipset = {
.name = "NULL",
- .bios = nvkm_bios_new,
+ .bios = { 0x00000001, nvkm_bios_new },
};
static const struct nvkm_device_chip
nv4_chipset = {
.name = "NV04",
- .bios = nvkm_bios_new,
- .bus = nv04_bus_new,
- .clk = nv04_clk_new,
- .devinit = nv04_devinit_new,
- .fb = nv04_fb_new,
- .i2c = nv04_i2c_new,
- .imem = nv04_instmem_new,
- .mc = nv04_mc_new,
- .mmu = nv04_mmu_new,
- .pci = nv04_pci_new,
- .timer = nv04_timer_new,
- .disp = nv04_disp_new,
- .dma = nv04_dma_new,
- .fifo = nv04_fifo_new,
- .gr = nv04_gr_new,
- .sw = nv04_sw_new,
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv04_bus_new },
+ .clk = { 0x00000001, nv04_clk_new },
+ .devinit = { 0x00000001, nv04_devinit_new },
+ .fb = { 0x00000001, nv04_fb_new },
+ .i2c = { 0x00000001, nv04_i2c_new },
+ .imem = { 0x00000001, nv04_instmem_new },
+ .mc = { 0x00000001, nv04_mc_new },
+ .mmu = { 0x00000001, nv04_mmu_new },
+ .pci = { 0x00000001, nv04_pci_new },
+ .timer = { 0x00000001, nv04_timer_new },
+ .disp = { 0x00000001, nv04_disp_new },
+ .dma = { 0x00000001, nv04_dma_new },
+ .fifo = { 0x00000001, nv04_fifo_new },
+ .gr = { 0x00000001, nv04_gr_new },
+ .sw = { 0x00000001, nv04_sw_new },
};
static const struct nvkm_device_chip
nv5_chipset = {
.name = "NV05",
- .bios = nvkm_bios_new,
- .bus = nv04_bus_new,
- .clk = nv04_clk_new,
- .devinit = nv05_devinit_new,
- .fb = nv04_fb_new,
- .i2c = nv04_i2c_new,
- .imem = nv04_instmem_new,
- .mc = nv04_mc_new,
- .mmu = nv04_mmu_new,
- .pci = nv04_pci_new,
- .timer = nv04_timer_new,
- .disp = nv04_disp_new,
- .dma = nv04_dma_new,
- .fifo = nv04_fifo_new,
- .gr = nv04_gr_new,
- .sw = nv04_sw_new,
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv04_bus_new },
+ .clk = { 0x00000001, nv04_clk_new },
+ .devinit = { 0x00000001, nv05_devinit_new },
+ .fb = { 0x00000001, nv04_fb_new },
+ .i2c = { 0x00000001, nv04_i2c_new },
+ .imem = { 0x00000001, nv04_instmem_new },
+ .mc = { 0x00000001, nv04_mc_new },
+ .mmu = { 0x00000001, nv04_mmu_new },
+ .pci = { 0x00000001, nv04_pci_new },
+ .timer = { 0x00000001, nv04_timer_new },
+ .disp = { 0x00000001, nv04_disp_new },
+ .dma = { 0x00000001, nv04_dma_new },
+ .fifo = { 0x00000001, nv04_fifo_new },
+ .gr = { 0x00000001, nv04_gr_new },
+ .sw = { 0x00000001, nv04_sw_new },
};
static const struct nvkm_device_chip
nv10_chipset = {
.name = "NV10",
- .bios = nvkm_bios_new,
- .bus = nv04_bus_new,
- .clk = nv04_clk_new,
- .devinit = nv10_devinit_new,
- .fb = nv10_fb_new,
- .gpio = nv10_gpio_new,
- .i2c = nv04_i2c_new,
- .imem = nv04_instmem_new,
- .mc = nv04_mc_new,
- .mmu = nv04_mmu_new,
- .pci = nv04_pci_new,
- .timer = nv04_timer_new,
- .disp = nv04_disp_new,
- .dma = nv04_dma_new,
- .gr = nv10_gr_new,
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv04_bus_new },
+ .clk = { 0x00000001, nv04_clk_new },
+ .devinit = { 0x00000001, nv10_devinit_new },
+ .fb = { 0x00000001, nv10_fb_new },
+ .gpio = { 0x00000001, nv10_gpio_new },
+ .i2c = { 0x00000001, nv04_i2c_new },
+ .imem = { 0x00000001, nv04_instmem_new },
+ .mc = { 0x00000001, nv04_mc_new },
+ .mmu = { 0x00000001, nv04_mmu_new },
+ .pci = { 0x00000001, nv04_pci_new },
+ .timer = { 0x00000001, nv04_timer_new },
+ .disp = { 0x00000001, nv04_disp_new },
+ .dma = { 0x00000001, nv04_dma_new },
+ .gr = { 0x00000001, nv10_gr_new },
};
static const struct nvkm_device_chip
nv11_chipset = {
.name = "NV11",
- .bios = nvkm_bios_new,
- .bus = nv04_bus_new,
- .clk = nv04_clk_new,
- .devinit = nv10_devinit_new,
- .fb = nv10_fb_new,
- .gpio = nv10_gpio_new,
- .i2c = nv04_i2c_new,
- .imem = nv04_instmem_new,
- .mc = nv11_mc_new,
- .mmu = nv04_mmu_new,
- .pci = nv04_pci_new,
- .timer = nv04_timer_new,
- .disp = nv04_disp_new,
- .dma = nv04_dma_new,
- .fifo = nv10_fifo_new,
- .gr = nv15_gr_new,
- .sw = nv10_sw_new,
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv04_bus_new },
+ .clk = { 0x00000001, nv04_clk_new },
+ .devinit = { 0x00000001, nv10_devinit_new },
+ .fb = { 0x00000001, nv10_fb_new },
+ .gpio = { 0x00000001, nv10_gpio_new },
+ .i2c = { 0x00000001, nv04_i2c_new },
+ .imem = { 0x00000001, nv04_instmem_new },
+ .mc = { 0x00000001, nv11_mc_new },
+ .mmu = { 0x00000001, nv04_mmu_new },
+ .pci = { 0x00000001, nv04_pci_new },
+ .timer = { 0x00000001, nv04_timer_new },
+ .disp = { 0x00000001, nv04_disp_new },
+ .dma = { 0x00000001, nv04_dma_new },
+ .fifo = { 0x00000001, nv10_fifo_new },
+ .gr = { 0x00000001, nv15_gr_new },
+ .sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv15_chipset = {
.name = "NV15",
- .bios = nvkm_bios_new,
- .bus = nv04_bus_new,
- .clk = nv04_clk_new,
- .devinit = nv10_devinit_new,
- .fb = nv10_fb_new,
- .gpio = nv10_gpio_new,
- .i2c = nv04_i2c_new,
- .imem = nv04_instmem_new,
- .mc = nv04_mc_new,
- .mmu = nv04_mmu_new,
- .pci = nv04_pci_new,
- .timer = nv04_timer_new,
- .disp = nv04_disp_new,
- .dma = nv04_dma_new,
- .fifo = nv10_fifo_new,
- .gr = nv15_gr_new,
- .sw = nv10_sw_new,
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv04_bus_new },
+ .clk = { 0x00000001, nv04_clk_new },
+ .devinit = { 0x00000001, nv10_devinit_new },
+ .fb = { 0x00000001, nv10_fb_new },
+ .gpio = { 0x00000001, nv10_gpio_new },
+ .i2c = { 0x00000001, nv04_i2c_new },
+ .imem = { 0x00000001, nv04_instmem_new },
+ .mc = { 0x00000001, nv04_mc_new },
+ .mmu = { 0x00000001, nv04_mmu_new },
+ .pci = { 0x00000001, nv04_pci_new },
+ .timer = { 0x00000001, nv04_timer_new },
+ .disp = { 0x00000001, nv04_disp_new },
+ .dma = { 0x00000001, nv04_dma_new },
+ .fifo = { 0x00000001, nv10_fifo_new },
+ .gr = { 0x00000001, nv15_gr_new },
+ .sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv17_chipset = {
.name = "NV17",
- .bios = nvkm_bios_new,
- .bus = nv04_bus_new,
- .clk = nv04_clk_new,
- .devinit = nv10_devinit_new,
- .fb = nv10_fb_new,
- .gpio = nv10_gpio_new,
- .i2c = nv04_i2c_new,
- .imem = nv04_instmem_new,
- .mc = nv17_mc_new,
- .mmu = nv04_mmu_new,
- .pci = nv04_pci_new,
- .timer = nv04_timer_new,
- .disp = nv04_disp_new,
- .dma = nv04_dma_new,
- .fifo = nv17_fifo_new,
- .gr = nv17_gr_new,
- .sw = nv10_sw_new,
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv04_bus_new },
+ .clk = { 0x00000001, nv04_clk_new },
+ .devinit = { 0x00000001, nv10_devinit_new },
+ .fb = { 0x00000001, nv10_fb_new },
+ .gpio = { 0x00000001, nv10_gpio_new },
+ .i2c = { 0x00000001, nv04_i2c_new },
+ .imem = { 0x00000001, nv04_instmem_new },
+ .mc = { 0x00000001, nv17_mc_new },
+ .mmu = { 0x00000001, nv04_mmu_new },
+ .pci = { 0x00000001, nv04_pci_new },
+ .timer = { 0x00000001, nv04_timer_new },
+ .disp = { 0x00000001, nv04_disp_new },
+ .dma = { 0x00000001, nv04_dma_new },
+ .fifo = { 0x00000001, nv17_fifo_new },
+ .gr = { 0x00000001, nv17_gr_new },
+ .sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv18_chipset = {
.name = "NV18",
- .bios = nvkm_bios_new,
- .bus = nv04_bus_new,
- .clk = nv04_clk_new,
- .devinit = nv10_devinit_new,
- .fb = nv10_fb_new,
- .gpio = nv10_gpio_new,
- .i2c = nv04_i2c_new,
- .imem = nv04_instmem_new,
- .mc = nv17_mc_new,
- .mmu = nv04_mmu_new,
- .pci = nv04_pci_new,
- .timer = nv04_timer_new,
- .disp = nv04_disp_new,
- .dma = nv04_dma_new,
- .fifo = nv17_fifo_new,
- .gr = nv17_gr_new,
- .sw = nv10_sw_new,
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv04_bus_new },
+ .clk = { 0x00000001, nv04_clk_new },
+ .devinit = { 0x00000001, nv10_devinit_new },
+ .fb = { 0x00000001, nv10_fb_new },
+ .gpio = { 0x00000001, nv10_gpio_new },
+ .i2c = { 0x00000001, nv04_i2c_new },
+ .imem = { 0x00000001, nv04_instmem_new },
+ .mc = { 0x00000001, nv17_mc_new },
+ .mmu = { 0x00000001, nv04_mmu_new },
+ .pci = { 0x00000001, nv04_pci_new },
+ .timer = { 0x00000001, nv04_timer_new },
+ .disp = { 0x00000001, nv04_disp_new },
+ .dma = { 0x00000001, nv04_dma_new },
+ .fifo = { 0x00000001, nv17_fifo_new },
+ .gr = { 0x00000001, nv17_gr_new },
+ .sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv1a_chipset = {
.name = "nForce",
- .bios = nvkm_bios_new,
- .bus = nv04_bus_new,
- .clk = nv04_clk_new,
- .devinit = nv1a_devinit_new,
- .fb = nv1a_fb_new,
- .gpio = nv10_gpio_new,
- .i2c = nv04_i2c_new,
- .imem = nv04_instmem_new,
- .mc = nv04_mc_new,
- .mmu = nv04_mmu_new,
- .pci = nv04_pci_new,
- .timer = nv04_timer_new,
- .disp = nv04_disp_new,
- .dma = nv04_dma_new,
- .fifo = nv10_fifo_new,
- .gr = nv15_gr_new,
- .sw = nv10_sw_new,
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv04_bus_new },
+ .clk = { 0x00000001, nv04_clk_new },
+ .devinit = { 0x00000001, nv1a_devinit_new },
+ .fb = { 0x00000001, nv1a_fb_new },
+ .gpio = { 0x00000001, nv10_gpio_new },
+ .i2c = { 0x00000001, nv04_i2c_new },
+ .imem = { 0x00000001, nv04_instmem_new },
+ .mc = { 0x00000001, nv04_mc_new },
+ .mmu = { 0x00000001, nv04_mmu_new },
+ .pci = { 0x00000001, nv04_pci_new },
+ .timer = { 0x00000001, nv04_timer_new },
+ .disp = { 0x00000001, nv04_disp_new },
+ .dma = { 0x00000001, nv04_dma_new },
+ .fifo = { 0x00000001, nv10_fifo_new },
+ .gr = { 0x00000001, nv15_gr_new },
+ .sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv1f_chipset = {
.name = "nForce2",
- .bios = nvkm_bios_new,
- .bus = nv04_bus_new,
- .clk = nv04_clk_new,
- .devinit = nv1a_devinit_new,
- .fb = nv1a_fb_new,
- .gpio = nv10_gpio_new,
- .i2c = nv04_i2c_new,
- .imem = nv04_instmem_new,
- .mc = nv17_mc_new,
- .mmu = nv04_mmu_new,
- .pci = nv04_pci_new,
- .timer = nv04_timer_new,
- .disp = nv04_disp_new,
- .dma = nv04_dma_new,
- .fifo = nv17_fifo_new,
- .gr = nv17_gr_new,
- .sw = nv10_sw_new,
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv04_bus_new },
+ .clk = { 0x00000001, nv04_clk_new },
+ .devinit = { 0x00000001, nv1a_devinit_new },
+ .fb = { 0x00000001, nv1a_fb_new },
+ .gpio = { 0x00000001, nv10_gpio_new },
+ .i2c = { 0x00000001, nv04_i2c_new },
+ .imem = { 0x00000001, nv04_instmem_new },
+ .mc = { 0x00000001, nv17_mc_new },
+ .mmu = { 0x00000001, nv04_mmu_new },
+ .pci = { 0x00000001, nv04_pci_new },
+ .timer = { 0x00000001, nv04_timer_new },
+ .disp = { 0x00000001, nv04_disp_new },
+ .dma = { 0x00000001, nv04_dma_new },
+ .fifo = { 0x00000001, nv17_fifo_new },
+ .gr = { 0x00000001, nv17_gr_new },
+ .sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv20_chipset = {
.name = "NV20",
- .bios = nvkm_bios_new,
- .bus = nv04_bus_new,
- .clk = nv04_clk_new,
- .devinit = nv20_devinit_new,
- .fb = nv20_fb_new,
- .gpio = nv10_gpio_new,
- .i2c = nv04_i2c_new,
- .imem = nv04_instmem_new,
- .mc = nv17_mc_new,
- .mmu = nv04_mmu_new,
- .pci = nv04_pci_new,
- .timer = nv04_timer_new,
- .disp = nv04_disp_new,
- .dma = nv04_dma_new,
- .fifo = nv17_fifo_new,
- .gr = nv20_gr_new,
- .sw = nv10_sw_new,
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv04_bus_new },
+ .clk = { 0x00000001, nv04_clk_new },
+ .devinit = { 0x00000001, nv20_devinit_new },
+ .fb = { 0x00000001, nv20_fb_new },
+ .gpio = { 0x00000001, nv10_gpio_new },
+ .i2c = { 0x00000001, nv04_i2c_new },
+ .imem = { 0x00000001, nv04_instmem_new },
+ .mc = { 0x00000001, nv17_mc_new },
+ .mmu = { 0x00000001, nv04_mmu_new },
+ .pci = { 0x00000001, nv04_pci_new },
+ .timer = { 0x00000001, nv04_timer_new },
+ .disp = { 0x00000001, nv04_disp_new },
+ .dma = { 0x00000001, nv04_dma_new },
+ .fifo = { 0x00000001, nv17_fifo_new },
+ .gr = { 0x00000001, nv20_gr_new },
+ .sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv25_chipset = {
.name = "NV25",
- .bios = nvkm_bios_new,
- .bus = nv04_bus_new,
- .clk = nv04_clk_new,
- .devinit = nv20_devinit_new,
- .fb = nv25_fb_new,
- .gpio = nv10_gpio_new,
- .i2c = nv04_i2c_new,
- .imem = nv04_instmem_new,
- .mc = nv17_mc_new,
- .mmu = nv04_mmu_new,
- .pci = nv04_pci_new,
- .timer = nv04_timer_new,
- .disp = nv04_disp_new,
- .dma = nv04_dma_new,
- .fifo = nv17_fifo_new,
- .gr = nv25_gr_new,
- .sw = nv10_sw_new,
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv04_bus_new },
+ .clk = { 0x00000001, nv04_clk_new },
+ .devinit = { 0x00000001, nv20_devinit_new },
+ .fb = { 0x00000001, nv25_fb_new },
+ .gpio = { 0x00000001, nv10_gpio_new },
+ .i2c = { 0x00000001, nv04_i2c_new },
+ .imem = { 0x00000001, nv04_instmem_new },
+ .mc = { 0x00000001, nv17_mc_new },
+ .mmu = { 0x00000001, nv04_mmu_new },
+ .pci = { 0x00000001, nv04_pci_new },
+ .timer = { 0x00000001, nv04_timer_new },
+ .disp = { 0x00000001, nv04_disp_new },
+ .dma = { 0x00000001, nv04_dma_new },
+ .fifo = { 0x00000001, nv17_fifo_new },
+ .gr = { 0x00000001, nv25_gr_new },
+ .sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv28_chipset = {
.name = "NV28",
- .bios = nvkm_bios_new,
- .bus = nv04_bus_new,
- .clk = nv04_clk_new,
- .devinit = nv20_devinit_new,
- .fb = nv25_fb_new,
- .gpio = nv10_gpio_new,
- .i2c = nv04_i2c_new,
- .imem = nv04_instmem_new,
- .mc = nv17_mc_new,
- .mmu = nv04_mmu_new,
- .pci = nv04_pci_new,
- .timer = nv04_timer_new,
- .disp = nv04_disp_new,
- .dma = nv04_dma_new,
- .fifo = nv17_fifo_new,
- .gr = nv25_gr_new,
- .sw = nv10_sw_new,
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv04_bus_new },
+ .clk = { 0x00000001, nv04_clk_new },
+ .devinit = { 0x00000001, nv20_devinit_new },
+ .fb = { 0x00000001, nv25_fb_new },
+ .gpio = { 0x00000001, nv10_gpio_new },
+ .i2c = { 0x00000001, nv04_i2c_new },
+ .imem = { 0x00000001, nv04_instmem_new },
+ .mc = { 0x00000001, nv17_mc_new },
+ .mmu = { 0x00000001, nv04_mmu_new },
+ .pci = { 0x00000001, nv04_pci_new },
+ .timer = { 0x00000001, nv04_timer_new },
+ .disp = { 0x00000001, nv04_disp_new },
+ .dma = { 0x00000001, nv04_dma_new },
+ .fifo = { 0x00000001, nv17_fifo_new },
+ .gr = { 0x00000001, nv25_gr_new },
+ .sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv2a_chipset = {
.name = "NV2A",
- .bios = nvkm_bios_new,
- .bus = nv04_bus_new,
- .clk = nv04_clk_new,
- .devinit = nv20_devinit_new,
- .fb = nv25_fb_new,
- .gpio = nv10_gpio_new,
- .i2c = nv04_i2c_new,
- .imem = nv04_instmem_new,
- .mc = nv17_mc_new,
- .mmu = nv04_mmu_new,
- .pci = nv04_pci_new,
- .timer = nv04_timer_new,
- .disp = nv04_disp_new,
- .dma = nv04_dma_new,
- .fifo = nv17_fifo_new,
- .gr = nv2a_gr_new,
- .sw = nv10_sw_new,
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv04_bus_new },
+ .clk = { 0x00000001, nv04_clk_new },
+ .devinit = { 0x00000001, nv20_devinit_new },
+ .fb = { 0x00000001, nv25_fb_new },
+ .gpio = { 0x00000001, nv10_gpio_new },
+ .i2c = { 0x00000001, nv04_i2c_new },
+ .imem = { 0x00000001, nv04_instmem_new },
+ .mc = { 0x00000001, nv17_mc_new },
+ .mmu = { 0x00000001, nv04_mmu_new },
+ .pci = { 0x00000001, nv04_pci_new },
+ .timer = { 0x00000001, nv04_timer_new },
+ .disp = { 0x00000001, nv04_disp_new },
+ .dma = { 0x00000001, nv04_dma_new },
+ .fifo = { 0x00000001, nv17_fifo_new },
+ .gr = { 0x00000001, nv2a_gr_new },
+ .sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv30_chipset = {
.name = "NV30",
- .bios = nvkm_bios_new,
- .bus = nv04_bus_new,
- .clk = nv04_clk_new,
- .devinit = nv20_devinit_new,
- .fb = nv30_fb_new,
- .gpio = nv10_gpio_new,
- .i2c = nv04_i2c_new,
- .imem = nv04_instmem_new,
- .mc = nv17_mc_new,
- .mmu = nv04_mmu_new,
- .pci = nv04_pci_new,
- .timer = nv04_timer_new,
- .disp = nv04_disp_new,
- .dma = nv04_dma_new,
- .fifo = nv17_fifo_new,
- .gr = nv30_gr_new,
- .sw = nv10_sw_new,
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv04_bus_new },
+ .clk = { 0x00000001, nv04_clk_new },
+ .devinit = { 0x00000001, nv20_devinit_new },
+ .fb = { 0x00000001, nv30_fb_new },
+ .gpio = { 0x00000001, nv10_gpio_new },
+ .i2c = { 0x00000001, nv04_i2c_new },
+ .imem = { 0x00000001, nv04_instmem_new },
+ .mc = { 0x00000001, nv17_mc_new },
+ .mmu = { 0x00000001, nv04_mmu_new },
+ .pci = { 0x00000001, nv04_pci_new },
+ .timer = { 0x00000001, nv04_timer_new },
+ .disp = { 0x00000001, nv04_disp_new },
+ .dma = { 0x00000001, nv04_dma_new },
+ .fifo = { 0x00000001, nv17_fifo_new },
+ .gr = { 0x00000001, nv30_gr_new },
+ .sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv31_chipset = {
.name = "NV31",
- .bios = nvkm_bios_new,
- .bus = nv31_bus_new,
- .clk = nv04_clk_new,
- .devinit = nv20_devinit_new,
- .fb = nv30_fb_new,
- .gpio = nv10_gpio_new,
- .i2c = nv04_i2c_new,
- .imem = nv04_instmem_new,
- .mc = nv17_mc_new,
- .mmu = nv04_mmu_new,
- .pci = nv04_pci_new,
- .timer = nv04_timer_new,
- .disp = nv04_disp_new,
- .dma = nv04_dma_new,
- .fifo = nv17_fifo_new,
- .gr = nv30_gr_new,
- .mpeg = nv31_mpeg_new,
- .sw = nv10_sw_new,
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv31_bus_new },
+ .clk = { 0x00000001, nv04_clk_new },
+ .devinit = { 0x00000001, nv20_devinit_new },
+ .fb = { 0x00000001, nv30_fb_new },
+ .gpio = { 0x00000001, nv10_gpio_new },
+ .i2c = { 0x00000001, nv04_i2c_new },
+ .imem = { 0x00000001, nv04_instmem_new },
+ .mc = { 0x00000001, nv17_mc_new },
+ .mmu = { 0x00000001, nv04_mmu_new },
+ .pci = { 0x00000001, nv04_pci_new },
+ .timer = { 0x00000001, nv04_timer_new },
+ .disp = { 0x00000001, nv04_disp_new },
+ .dma = { 0x00000001, nv04_dma_new },
+ .fifo = { 0x00000001, nv17_fifo_new },
+ .gr = { 0x00000001, nv30_gr_new },
+ .mpeg = { 0x00000001, nv31_mpeg_new },
+ .sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv34_chipset = {
.name = "NV34",
- .bios = nvkm_bios_new,
- .bus = nv31_bus_new,
- .clk = nv04_clk_new,
- .devinit = nv10_devinit_new,
- .fb = nv10_fb_new,
- .gpio = nv10_gpio_new,
- .i2c = nv04_i2c_new,
- .imem = nv04_instmem_new,
- .mc = nv17_mc_new,
- .mmu = nv04_mmu_new,
- .pci = nv04_pci_new,
- .timer = nv04_timer_new,
- .disp = nv04_disp_new,
- .dma = nv04_dma_new,
- .fifo = nv17_fifo_new,
- .gr = nv34_gr_new,
- .mpeg = nv31_mpeg_new,
- .sw = nv10_sw_new,
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv31_bus_new },
+ .clk = { 0x00000001, nv04_clk_new },
+ .devinit = { 0x00000001, nv10_devinit_new },
+ .fb = { 0x00000001, nv10_fb_new },
+ .gpio = { 0x00000001, nv10_gpio_new },
+ .i2c = { 0x00000001, nv04_i2c_new },
+ .imem = { 0x00000001, nv04_instmem_new },
+ .mc = { 0x00000001, nv17_mc_new },
+ .mmu = { 0x00000001, nv04_mmu_new },
+ .pci = { 0x00000001, nv04_pci_new },
+ .timer = { 0x00000001, nv04_timer_new },
+ .disp = { 0x00000001, nv04_disp_new },
+ .dma = { 0x00000001, nv04_dma_new },
+ .fifo = { 0x00000001, nv17_fifo_new },
+ .gr = { 0x00000001, nv34_gr_new },
+ .mpeg = { 0x00000001, nv31_mpeg_new },
+ .sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv35_chipset = {
.name = "NV35",
- .bios = nvkm_bios_new,
- .bus = nv04_bus_new,
- .clk = nv04_clk_new,
- .devinit = nv20_devinit_new,
- .fb = nv35_fb_new,
- .gpio = nv10_gpio_new,
- .i2c = nv04_i2c_new,
- .imem = nv04_instmem_new,
- .mc = nv17_mc_new,
- .mmu = nv04_mmu_new,
- .pci = nv04_pci_new,
- .timer = nv04_timer_new,
- .disp = nv04_disp_new,
- .dma = nv04_dma_new,
- .fifo = nv17_fifo_new,
- .gr = nv35_gr_new,
- .sw = nv10_sw_new,
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv04_bus_new },
+ .clk = { 0x00000001, nv04_clk_new },
+ .devinit = { 0x00000001, nv20_devinit_new },
+ .fb = { 0x00000001, nv35_fb_new },
+ .gpio = { 0x00000001, nv10_gpio_new },
+ .i2c = { 0x00000001, nv04_i2c_new },
+ .imem = { 0x00000001, nv04_instmem_new },
+ .mc = { 0x00000001, nv17_mc_new },
+ .mmu = { 0x00000001, nv04_mmu_new },
+ .pci = { 0x00000001, nv04_pci_new },
+ .timer = { 0x00000001, nv04_timer_new },
+ .disp = { 0x00000001, nv04_disp_new },
+ .dma = { 0x00000001, nv04_dma_new },
+ .fifo = { 0x00000001, nv17_fifo_new },
+ .gr = { 0x00000001, nv35_gr_new },
+ .sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv36_chipset = {
.name = "NV36",
- .bios = nvkm_bios_new,
- .bus = nv31_bus_new,
- .clk = nv04_clk_new,
- .devinit = nv20_devinit_new,
- .fb = nv36_fb_new,
- .gpio = nv10_gpio_new,
- .i2c = nv04_i2c_new,
- .imem = nv04_instmem_new,
- .mc = nv17_mc_new,
- .mmu = nv04_mmu_new,
- .pci = nv04_pci_new,
- .timer = nv04_timer_new,
- .disp = nv04_disp_new,
- .dma = nv04_dma_new,
- .fifo = nv17_fifo_new,
- .gr = nv35_gr_new,
- .mpeg = nv31_mpeg_new,
- .sw = nv10_sw_new,
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv31_bus_new },
+ .clk = { 0x00000001, nv04_clk_new },
+ .devinit = { 0x00000001, nv20_devinit_new },
+ .fb = { 0x00000001, nv36_fb_new },
+ .gpio = { 0x00000001, nv10_gpio_new },
+ .i2c = { 0x00000001, nv04_i2c_new },
+ .imem = { 0x00000001, nv04_instmem_new },
+ .mc = { 0x00000001, nv17_mc_new },
+ .mmu = { 0x00000001, nv04_mmu_new },
+ .pci = { 0x00000001, nv04_pci_new },
+ .timer = { 0x00000001, nv04_timer_new },
+ .disp = { 0x00000001, nv04_disp_new },
+ .dma = { 0x00000001, nv04_dma_new },
+ .fifo = { 0x00000001, nv17_fifo_new },
+ .gr = { 0x00000001, nv35_gr_new },
+ .mpeg = { 0x00000001, nv31_mpeg_new },
+ .sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv40_chipset = {
.name = "NV40",
- .bios = nvkm_bios_new,
- .bus = nv31_bus_new,
- .clk = nv40_clk_new,
- .devinit = nv1a_devinit_new,
- .fb = nv40_fb_new,
- .gpio = nv10_gpio_new,
- .i2c = nv04_i2c_new,
- .imem = nv40_instmem_new,
- .mc = nv17_mc_new,
- .mmu = nv04_mmu_new,
- .pci = nv40_pci_new,
- .therm = nv40_therm_new,
- .timer = nv40_timer_new,
- .volt = nv40_volt_new,
- .disp = nv04_disp_new,
- .dma = nv04_dma_new,
- .fifo = nv40_fifo_new,
- .gr = nv40_gr_new,
- .mpeg = nv40_mpeg_new,
- .pm = nv40_pm_new,
- .sw = nv10_sw_new,
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv31_bus_new },
+ .clk = { 0x00000001, nv40_clk_new },
+ .devinit = { 0x00000001, nv1a_devinit_new },
+ .fb = { 0x00000001, nv40_fb_new },
+ .gpio = { 0x00000001, nv10_gpio_new },
+ .i2c = { 0x00000001, nv04_i2c_new },
+ .imem = { 0x00000001, nv40_instmem_new },
+ .mc = { 0x00000001, nv17_mc_new },
+ .mmu = { 0x00000001, nv04_mmu_new },
+ .pci = { 0x00000001, nv40_pci_new },
+ .therm = { 0x00000001, nv40_therm_new },
+ .timer = { 0x00000001, nv40_timer_new },
+ .volt = { 0x00000001, nv40_volt_new },
+ .disp = { 0x00000001, nv04_disp_new },
+ .dma = { 0x00000001, nv04_dma_new },
+ .fifo = { 0x00000001, nv40_fifo_new },
+ .gr = { 0x00000001, nv40_gr_new },
+ .mpeg = { 0x00000001, nv40_mpeg_new },
+ .pm = { 0x00000001, nv40_pm_new },
+ .sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv41_chipset = {
.name = "NV41",
- .bios = nvkm_bios_new,
- .bus = nv31_bus_new,
- .clk = nv40_clk_new,
- .devinit = nv1a_devinit_new,
- .fb = nv41_fb_new,
- .gpio = nv10_gpio_new,
- .i2c = nv04_i2c_new,
- .imem = nv40_instmem_new,
- .mc = nv17_mc_new,
- .mmu = nv41_mmu_new,
- .pci = nv40_pci_new,
- .therm = nv40_therm_new,
- .timer = nv41_timer_new,
- .volt = nv40_volt_new,
- .disp = nv04_disp_new,
- .dma = nv04_dma_new,
- .fifo = nv40_fifo_new,
- .gr = nv40_gr_new,
- .mpeg = nv40_mpeg_new,
- .pm = nv40_pm_new,
- .sw = nv10_sw_new,
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv31_bus_new },
+ .clk = { 0x00000001, nv40_clk_new },
+ .devinit = { 0x00000001, nv1a_devinit_new },
+ .fb = { 0x00000001, nv41_fb_new },
+ .gpio = { 0x00000001, nv10_gpio_new },
+ .i2c = { 0x00000001, nv04_i2c_new },
+ .imem = { 0x00000001, nv40_instmem_new },
+ .mc = { 0x00000001, nv17_mc_new },
+ .mmu = { 0x00000001, nv41_mmu_new },
+ .pci = { 0x00000001, nv40_pci_new },
+ .therm = { 0x00000001, nv40_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, nv40_volt_new },
+ .disp = { 0x00000001, nv04_disp_new },
+ .dma = { 0x00000001, nv04_dma_new },
+ .fifo = { 0x00000001, nv40_fifo_new },
+ .gr = { 0x00000001, nv40_gr_new },
+ .mpeg = { 0x00000001, nv40_mpeg_new },
+ .pm = { 0x00000001, nv40_pm_new },
+ .sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv42_chipset = {
.name = "NV42",
- .bios = nvkm_bios_new,
- .bus = nv31_bus_new,
- .clk = nv40_clk_new,
- .devinit = nv1a_devinit_new,
- .fb = nv41_fb_new,
- .gpio = nv10_gpio_new,
- .i2c = nv04_i2c_new,
- .imem = nv40_instmem_new,
- .mc = nv17_mc_new,
- .mmu = nv41_mmu_new,
- .pci = nv40_pci_new,
- .therm = nv40_therm_new,
- .timer = nv41_timer_new,
- .volt = nv40_volt_new,
- .disp = nv04_disp_new,
- .dma = nv04_dma_new,
- .fifo = nv40_fifo_new,
- .gr = nv40_gr_new,
- .mpeg = nv40_mpeg_new,
- .pm = nv40_pm_new,
- .sw = nv10_sw_new,
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv31_bus_new },
+ .clk = { 0x00000001, nv40_clk_new },
+ .devinit = { 0x00000001, nv1a_devinit_new },
+ .fb = { 0x00000001, nv41_fb_new },
+ .gpio = { 0x00000001, nv10_gpio_new },
+ .i2c = { 0x00000001, nv04_i2c_new },
+ .imem = { 0x00000001, nv40_instmem_new },
+ .mc = { 0x00000001, nv17_mc_new },
+ .mmu = { 0x00000001, nv41_mmu_new },
+ .pci = { 0x00000001, nv40_pci_new },
+ .therm = { 0x00000001, nv40_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, nv40_volt_new },
+ .disp = { 0x00000001, nv04_disp_new },
+ .dma = { 0x00000001, nv04_dma_new },
+ .fifo = { 0x00000001, nv40_fifo_new },
+ .gr = { 0x00000001, nv40_gr_new },
+ .mpeg = { 0x00000001, nv40_mpeg_new },
+ .pm = { 0x00000001, nv40_pm_new },
+ .sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv43_chipset = {
.name = "NV43",
- .bios = nvkm_bios_new,
- .bus = nv31_bus_new,
- .clk = nv40_clk_new,
- .devinit = nv1a_devinit_new,
- .fb = nv41_fb_new,
- .gpio = nv10_gpio_new,
- .i2c = nv04_i2c_new,
- .imem = nv40_instmem_new,
- .mc = nv17_mc_new,
- .mmu = nv41_mmu_new,
- .pci = nv40_pci_new,
- .therm = nv40_therm_new,
- .timer = nv41_timer_new,
- .volt = nv40_volt_new,
- .disp = nv04_disp_new,
- .dma = nv04_dma_new,
- .fifo = nv40_fifo_new,
- .gr = nv40_gr_new,
- .mpeg = nv40_mpeg_new,
- .pm = nv40_pm_new,
- .sw = nv10_sw_new,
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv31_bus_new },
+ .clk = { 0x00000001, nv40_clk_new },
+ .devinit = { 0x00000001, nv1a_devinit_new },
+ .fb = { 0x00000001, nv41_fb_new },
+ .gpio = { 0x00000001, nv10_gpio_new },
+ .i2c = { 0x00000001, nv04_i2c_new },
+ .imem = { 0x00000001, nv40_instmem_new },
+ .mc = { 0x00000001, nv17_mc_new },
+ .mmu = { 0x00000001, nv41_mmu_new },
+ .pci = { 0x00000001, nv40_pci_new },
+ .therm = { 0x00000001, nv40_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, nv40_volt_new },
+ .disp = { 0x00000001, nv04_disp_new },
+ .dma = { 0x00000001, nv04_dma_new },
+ .fifo = { 0x00000001, nv40_fifo_new },
+ .gr = { 0x00000001, nv40_gr_new },
+ .mpeg = { 0x00000001, nv40_mpeg_new },
+ .pm = { 0x00000001, nv40_pm_new },
+ .sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv44_chipset = {
.name = "NV44",
- .bios = nvkm_bios_new,
- .bus = nv31_bus_new,
- .clk = nv40_clk_new,
- .devinit = nv1a_devinit_new,
- .fb = nv44_fb_new,
- .gpio = nv10_gpio_new,
- .i2c = nv04_i2c_new,
- .imem = nv40_instmem_new,
- .mc = nv44_mc_new,
- .mmu = nv44_mmu_new,
- .pci = nv40_pci_new,
- .therm = nv40_therm_new,
- .timer = nv41_timer_new,
- .volt = nv40_volt_new,
- .disp = nv04_disp_new,
- .dma = nv04_dma_new,
- .fifo = nv40_fifo_new,
- .gr = nv44_gr_new,
- .mpeg = nv44_mpeg_new,
- .pm = nv40_pm_new,
- .sw = nv10_sw_new,
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv31_bus_new },
+ .clk = { 0x00000001, nv40_clk_new },
+ .devinit = { 0x00000001, nv1a_devinit_new },
+ .fb = { 0x00000001, nv44_fb_new },
+ .gpio = { 0x00000001, nv10_gpio_new },
+ .i2c = { 0x00000001, nv04_i2c_new },
+ .imem = { 0x00000001, nv40_instmem_new },
+ .mc = { 0x00000001, nv44_mc_new },
+ .mmu = { 0x00000001, nv44_mmu_new },
+ .pci = { 0x00000001, nv40_pci_new },
+ .therm = { 0x00000001, nv40_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, nv40_volt_new },
+ .disp = { 0x00000001, nv04_disp_new },
+ .dma = { 0x00000001, nv04_dma_new },
+ .fifo = { 0x00000001, nv40_fifo_new },
+ .gr = { 0x00000001, nv44_gr_new },
+ .mpeg = { 0x00000001, nv44_mpeg_new },
+ .pm = { 0x00000001, nv40_pm_new },
+ .sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv45_chipset = {
.name = "NV45",
- .bios = nvkm_bios_new,
- .bus = nv31_bus_new,
- .clk = nv40_clk_new,
- .devinit = nv1a_devinit_new,
- .fb = nv40_fb_new,
- .gpio = nv10_gpio_new,
- .i2c = nv04_i2c_new,
- .imem = nv40_instmem_new,
- .mc = nv17_mc_new,
- .mmu = nv04_mmu_new,
- .pci = nv40_pci_new,
- .therm = nv40_therm_new,
- .timer = nv41_timer_new,
- .volt = nv40_volt_new,
- .disp = nv04_disp_new,
- .dma = nv04_dma_new,
- .fifo = nv40_fifo_new,
- .gr = nv40_gr_new,
- .mpeg = nv44_mpeg_new,
- .pm = nv40_pm_new,
- .sw = nv10_sw_new,
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv31_bus_new },
+ .clk = { 0x00000001, nv40_clk_new },
+ .devinit = { 0x00000001, nv1a_devinit_new },
+ .fb = { 0x00000001, nv40_fb_new },
+ .gpio = { 0x00000001, nv10_gpio_new },
+ .i2c = { 0x00000001, nv04_i2c_new },
+ .imem = { 0x00000001, nv40_instmem_new },
+ .mc = { 0x00000001, nv17_mc_new },
+ .mmu = { 0x00000001, nv04_mmu_new },
+ .pci = { 0x00000001, nv40_pci_new },
+ .therm = { 0x00000001, nv40_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, nv40_volt_new },
+ .disp = { 0x00000001, nv04_disp_new },
+ .dma = { 0x00000001, nv04_dma_new },
+ .fifo = { 0x00000001, nv40_fifo_new },
+ .gr = { 0x00000001, nv40_gr_new },
+ .mpeg = { 0x00000001, nv44_mpeg_new },
+ .pm = { 0x00000001, nv40_pm_new },
+ .sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv46_chipset = {
.name = "G72",
- .bios = nvkm_bios_new,
- .bus = nv31_bus_new,
- .clk = nv40_clk_new,
- .devinit = nv1a_devinit_new,
- .fb = nv46_fb_new,
- .gpio = nv10_gpio_new,
- .i2c = nv04_i2c_new,
- .imem = nv40_instmem_new,
- .mc = nv44_mc_new,
- .mmu = nv44_mmu_new,
- .pci = nv46_pci_new,
- .therm = nv40_therm_new,
- .timer = nv41_timer_new,
- .volt = nv40_volt_new,
- .disp = nv04_disp_new,
- .dma = nv04_dma_new,
- .fifo = nv40_fifo_new,
- .gr = nv44_gr_new,
- .mpeg = nv44_mpeg_new,
- .pm = nv40_pm_new,
- .sw = nv10_sw_new,
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv31_bus_new },
+ .clk = { 0x00000001, nv40_clk_new },
+ .devinit = { 0x00000001, nv1a_devinit_new },
+ .fb = { 0x00000001, nv46_fb_new },
+ .gpio = { 0x00000001, nv10_gpio_new },
+ .i2c = { 0x00000001, nv04_i2c_new },
+ .imem = { 0x00000001, nv40_instmem_new },
+ .mc = { 0x00000001, nv44_mc_new },
+ .mmu = { 0x00000001, nv44_mmu_new },
+ .pci = { 0x00000001, nv46_pci_new },
+ .therm = { 0x00000001, nv40_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, nv40_volt_new },
+ .disp = { 0x00000001, nv04_disp_new },
+ .dma = { 0x00000001, nv04_dma_new },
+ .fifo = { 0x00000001, nv40_fifo_new },
+ .gr = { 0x00000001, nv44_gr_new },
+ .mpeg = { 0x00000001, nv44_mpeg_new },
+ .pm = { 0x00000001, nv40_pm_new },
+ .sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv47_chipset = {
.name = "G70",
- .bios = nvkm_bios_new,
- .bus = nv31_bus_new,
- .clk = nv40_clk_new,
- .devinit = nv1a_devinit_new,
- .fb = nv47_fb_new,
- .gpio = nv10_gpio_new,
- .i2c = nv04_i2c_new,
- .imem = nv40_instmem_new,
- .mc = nv17_mc_new,
- .mmu = nv41_mmu_new,
- .pci = nv40_pci_new,
- .therm = nv40_therm_new,
- .timer = nv41_timer_new,
- .volt = nv40_volt_new,
- .disp = nv04_disp_new,
- .dma = nv04_dma_new,
- .fifo = nv40_fifo_new,
- .gr = nv40_gr_new,
- .mpeg = nv44_mpeg_new,
- .pm = nv40_pm_new,
- .sw = nv10_sw_new,
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv31_bus_new },
+ .clk = { 0x00000001, nv40_clk_new },
+ .devinit = { 0x00000001, nv1a_devinit_new },
+ .fb = { 0x00000001, nv47_fb_new },
+ .gpio = { 0x00000001, nv10_gpio_new },
+ .i2c = { 0x00000001, nv04_i2c_new },
+ .imem = { 0x00000001, nv40_instmem_new },
+ .mc = { 0x00000001, nv17_mc_new },
+ .mmu = { 0x00000001, nv41_mmu_new },
+ .pci = { 0x00000001, nv40_pci_new },
+ .therm = { 0x00000001, nv40_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, nv40_volt_new },
+ .disp = { 0x00000001, nv04_disp_new },
+ .dma = { 0x00000001, nv04_dma_new },
+ .fifo = { 0x00000001, nv40_fifo_new },
+ .gr = { 0x00000001, nv40_gr_new },
+ .mpeg = { 0x00000001, nv44_mpeg_new },
+ .pm = { 0x00000001, nv40_pm_new },
+ .sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv49_chipset = {
.name = "G71",
- .bios = nvkm_bios_new,
- .bus = nv31_bus_new,
- .clk = nv40_clk_new,
- .devinit = nv1a_devinit_new,
- .fb = nv49_fb_new,
- .gpio = nv10_gpio_new,
- .i2c = nv04_i2c_new,
- .imem = nv40_instmem_new,
- .mc = nv17_mc_new,
- .mmu = nv41_mmu_new,
- .pci = nv40_pci_new,
- .therm = nv40_therm_new,
- .timer = nv41_timer_new,
- .volt = nv40_volt_new,
- .disp = nv04_disp_new,
- .dma = nv04_dma_new,
- .fifo = nv40_fifo_new,
- .gr = nv40_gr_new,
- .mpeg = nv44_mpeg_new,
- .pm = nv40_pm_new,
- .sw = nv10_sw_new,
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv31_bus_new },
+ .clk = { 0x00000001, nv40_clk_new },
+ .devinit = { 0x00000001, nv1a_devinit_new },
+ .fb = { 0x00000001, nv49_fb_new },
+ .gpio = { 0x00000001, nv10_gpio_new },
+ .i2c = { 0x00000001, nv04_i2c_new },
+ .imem = { 0x00000001, nv40_instmem_new },
+ .mc = { 0x00000001, nv17_mc_new },
+ .mmu = { 0x00000001, nv41_mmu_new },
+ .pci = { 0x00000001, nv40_pci_new },
+ .therm = { 0x00000001, nv40_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, nv40_volt_new },
+ .disp = { 0x00000001, nv04_disp_new },
+ .dma = { 0x00000001, nv04_dma_new },
+ .fifo = { 0x00000001, nv40_fifo_new },
+ .gr = { 0x00000001, nv40_gr_new },
+ .mpeg = { 0x00000001, nv44_mpeg_new },
+ .pm = { 0x00000001, nv40_pm_new },
+ .sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv4a_chipset = {
.name = "NV44A",
- .bios = nvkm_bios_new,
- .bus = nv31_bus_new,
- .clk = nv40_clk_new,
- .devinit = nv1a_devinit_new,
- .fb = nv44_fb_new,
- .gpio = nv10_gpio_new,
- .i2c = nv04_i2c_new,
- .imem = nv40_instmem_new,
- .mc = nv44_mc_new,
- .mmu = nv04_mmu_new,
- .pci = nv40_pci_new,
- .therm = nv40_therm_new,
- .timer = nv41_timer_new,
- .volt = nv40_volt_new,
- .disp = nv04_disp_new,
- .dma = nv04_dma_new,
- .fifo = nv40_fifo_new,
- .gr = nv44_gr_new,
- .mpeg = nv44_mpeg_new,
- .pm = nv40_pm_new,
- .sw = nv10_sw_new,
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv31_bus_new },
+ .clk = { 0x00000001, nv40_clk_new },
+ .devinit = { 0x00000001, nv1a_devinit_new },
+ .fb = { 0x00000001, nv44_fb_new },
+ .gpio = { 0x00000001, nv10_gpio_new },
+ .i2c = { 0x00000001, nv04_i2c_new },
+ .imem = { 0x00000001, nv40_instmem_new },
+ .mc = { 0x00000001, nv44_mc_new },
+ .mmu = { 0x00000001, nv04_mmu_new },
+ .pci = { 0x00000001, nv40_pci_new },
+ .therm = { 0x00000001, nv40_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, nv40_volt_new },
+ .disp = { 0x00000001, nv04_disp_new },
+ .dma = { 0x00000001, nv04_dma_new },
+ .fifo = { 0x00000001, nv40_fifo_new },
+ .gr = { 0x00000001, nv44_gr_new },
+ .mpeg = { 0x00000001, nv44_mpeg_new },
+ .pm = { 0x00000001, nv40_pm_new },
+ .sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv4b_chipset = {
.name = "G73",
- .bios = nvkm_bios_new,
- .bus = nv31_bus_new,
- .clk = nv40_clk_new,
- .devinit = nv1a_devinit_new,
- .fb = nv49_fb_new,
- .gpio = nv10_gpio_new,
- .i2c = nv04_i2c_new,
- .imem = nv40_instmem_new,
- .mc = nv17_mc_new,
- .mmu = nv41_mmu_new,
- .pci = nv40_pci_new,
- .therm = nv40_therm_new,
- .timer = nv41_timer_new,
- .volt = nv40_volt_new,
- .disp = nv04_disp_new,
- .dma = nv04_dma_new,
- .fifo = nv40_fifo_new,
- .gr = nv40_gr_new,
- .mpeg = nv44_mpeg_new,
- .pm = nv40_pm_new,
- .sw = nv10_sw_new,
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv31_bus_new },
+ .clk = { 0x00000001, nv40_clk_new },
+ .devinit = { 0x00000001, nv1a_devinit_new },
+ .fb = { 0x00000001, nv49_fb_new },
+ .gpio = { 0x00000001, nv10_gpio_new },
+ .i2c = { 0x00000001, nv04_i2c_new },
+ .imem = { 0x00000001, nv40_instmem_new },
+ .mc = { 0x00000001, nv17_mc_new },
+ .mmu = { 0x00000001, nv41_mmu_new },
+ .pci = { 0x00000001, nv40_pci_new },
+ .therm = { 0x00000001, nv40_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, nv40_volt_new },
+ .disp = { 0x00000001, nv04_disp_new },
+ .dma = { 0x00000001, nv04_dma_new },
+ .fifo = { 0x00000001, nv40_fifo_new },
+ .gr = { 0x00000001, nv40_gr_new },
+ .mpeg = { 0x00000001, nv44_mpeg_new },
+ .pm = { 0x00000001, nv40_pm_new },
+ .sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv4c_chipset = {
.name = "C61",
- .bios = nvkm_bios_new,
- .bus = nv31_bus_new,
- .clk = nv40_clk_new,
- .devinit = nv1a_devinit_new,
- .fb = nv46_fb_new,
- .gpio = nv10_gpio_new,
- .i2c = nv04_i2c_new,
- .imem = nv40_instmem_new,
- .mc = nv44_mc_new,
- .mmu = nv44_mmu_new,
- .pci = nv4c_pci_new,
- .therm = nv40_therm_new,
- .timer = nv41_timer_new,
- .volt = nv40_volt_new,
- .disp = nv04_disp_new,
- .dma = nv04_dma_new,
- .fifo = nv40_fifo_new,
- .gr = nv44_gr_new,
- .mpeg = nv44_mpeg_new,
- .pm = nv40_pm_new,
- .sw = nv10_sw_new,
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv31_bus_new },
+ .clk = { 0x00000001, nv40_clk_new },
+ .devinit = { 0x00000001, nv1a_devinit_new },
+ .fb = { 0x00000001, nv46_fb_new },
+ .gpio = { 0x00000001, nv10_gpio_new },
+ .i2c = { 0x00000001, nv04_i2c_new },
+ .imem = { 0x00000001, nv40_instmem_new },
+ .mc = { 0x00000001, nv44_mc_new },
+ .mmu = { 0x00000001, nv44_mmu_new },
+ .pci = { 0x00000001, nv4c_pci_new },
+ .therm = { 0x00000001, nv40_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, nv40_volt_new },
+ .disp = { 0x00000001, nv04_disp_new },
+ .dma = { 0x00000001, nv04_dma_new },
+ .fifo = { 0x00000001, nv40_fifo_new },
+ .gr = { 0x00000001, nv44_gr_new },
+ .mpeg = { 0x00000001, nv44_mpeg_new },
+ .pm = { 0x00000001, nv40_pm_new },
+ .sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv4e_chipset = {
.name = "C51",
- .bios = nvkm_bios_new,
- .bus = nv31_bus_new,
- .clk = nv40_clk_new,
- .devinit = nv1a_devinit_new,
- .fb = nv4e_fb_new,
- .gpio = nv10_gpio_new,
- .i2c = nv4e_i2c_new,
- .imem = nv40_instmem_new,
- .mc = nv44_mc_new,
- .mmu = nv44_mmu_new,
- .pci = nv4c_pci_new,
- .therm = nv40_therm_new,
- .timer = nv41_timer_new,
- .volt = nv40_volt_new,
- .disp = nv04_disp_new,
- .dma = nv04_dma_new,
- .fifo = nv40_fifo_new,
- .gr = nv44_gr_new,
- .mpeg = nv44_mpeg_new,
- .pm = nv40_pm_new,
- .sw = nv10_sw_new,
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv31_bus_new },
+ .clk = { 0x00000001, nv40_clk_new },
+ .devinit = { 0x00000001, nv1a_devinit_new },
+ .fb = { 0x00000001, nv4e_fb_new },
+ .gpio = { 0x00000001, nv10_gpio_new },
+ .i2c = { 0x00000001, nv4e_i2c_new },
+ .imem = { 0x00000001, nv40_instmem_new },
+ .mc = { 0x00000001, nv44_mc_new },
+ .mmu = { 0x00000001, nv44_mmu_new },
+ .pci = { 0x00000001, nv4c_pci_new },
+ .therm = { 0x00000001, nv40_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, nv40_volt_new },
+ .disp = { 0x00000001, nv04_disp_new },
+ .dma = { 0x00000001, nv04_dma_new },
+ .fifo = { 0x00000001, nv40_fifo_new },
+ .gr = { 0x00000001, nv44_gr_new },
+ .mpeg = { 0x00000001, nv44_mpeg_new },
+ .pm = { 0x00000001, nv40_pm_new },
+ .sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv50_chipset = {
.name = "G80",
- .bar = nv50_bar_new,
- .bios = nvkm_bios_new,
- .bus = nv50_bus_new,
- .clk = nv50_clk_new,
- .devinit = nv50_devinit_new,
- .fb = nv50_fb_new,
- .fuse = nv50_fuse_new,
- .gpio = nv50_gpio_new,
- .i2c = nv50_i2c_new,
- .imem = nv50_instmem_new,
- .mc = nv50_mc_new,
- .mmu = nv50_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = nv46_pci_new,
- .therm = nv50_therm_new,
- .timer = nv41_timer_new,
- .volt = nv40_volt_new,
- .disp = nv50_disp_new,
- .dma = nv50_dma_new,
- .fifo = nv50_fifo_new,
- .gr = nv50_gr_new,
- .mpeg = nv50_mpeg_new,
- .pm = nv50_pm_new,
- .sw = nv50_sw_new,
+ .bar = { 0x00000001, nv50_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv50_bus_new },
+ .clk = { 0x00000001, nv50_clk_new },
+ .devinit = { 0x00000001, nv50_devinit_new },
+ .fb = { 0x00000001, nv50_fb_new },
+ .fuse = { 0x00000001, nv50_fuse_new },
+ .gpio = { 0x00000001, nv50_gpio_new },
+ .i2c = { 0x00000001, nv50_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .mc = { 0x00000001, nv50_mc_new },
+ .mmu = { 0x00000001, nv50_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, nv46_pci_new },
+ .therm = { 0x00000001, nv50_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, nv40_volt_new },
+ .disp = { 0x00000001, nv50_disp_new },
+ .dma = { 0x00000001, nv50_dma_new },
+ .fifo = { 0x00000001, nv50_fifo_new },
+ .gr = { 0x00000001, nv50_gr_new },
+ .mpeg = { 0x00000001, nv50_mpeg_new },
+ .pm = { 0x00000001, nv50_pm_new },
+ .sw = { 0x00000001, nv50_sw_new },
};
static const struct nvkm_device_chip
nv63_chipset = {
.name = "C73",
- .bios = nvkm_bios_new,
- .bus = nv31_bus_new,
- .clk = nv40_clk_new,
- .devinit = nv1a_devinit_new,
- .fb = nv46_fb_new,
- .gpio = nv10_gpio_new,
- .i2c = nv04_i2c_new,
- .imem = nv40_instmem_new,
- .mc = nv44_mc_new,
- .mmu = nv44_mmu_new,
- .pci = nv4c_pci_new,
- .therm = nv40_therm_new,
- .timer = nv41_timer_new,
- .volt = nv40_volt_new,
- .disp = nv04_disp_new,
- .dma = nv04_dma_new,
- .fifo = nv40_fifo_new,
- .gr = nv44_gr_new,
- .mpeg = nv44_mpeg_new,
- .pm = nv40_pm_new,
- .sw = nv10_sw_new,
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv31_bus_new },
+ .clk = { 0x00000001, nv40_clk_new },
+ .devinit = { 0x00000001, nv1a_devinit_new },
+ .fb = { 0x00000001, nv46_fb_new },
+ .gpio = { 0x00000001, nv10_gpio_new },
+ .i2c = { 0x00000001, nv04_i2c_new },
+ .imem = { 0x00000001, nv40_instmem_new },
+ .mc = { 0x00000001, nv44_mc_new },
+ .mmu = { 0x00000001, nv44_mmu_new },
+ .pci = { 0x00000001, nv4c_pci_new },
+ .therm = { 0x00000001, nv40_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, nv40_volt_new },
+ .disp = { 0x00000001, nv04_disp_new },
+ .dma = { 0x00000001, nv04_dma_new },
+ .fifo = { 0x00000001, nv40_fifo_new },
+ .gr = { 0x00000001, nv44_gr_new },
+ .mpeg = { 0x00000001, nv44_mpeg_new },
+ .pm = { 0x00000001, nv40_pm_new },
+ .sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv67_chipset = {
.name = "C67",
- .bios = nvkm_bios_new,
- .bus = nv31_bus_new,
- .clk = nv40_clk_new,
- .devinit = nv1a_devinit_new,
- .fb = nv46_fb_new,
- .gpio = nv10_gpio_new,
- .i2c = nv04_i2c_new,
- .imem = nv40_instmem_new,
- .mc = nv44_mc_new,
- .mmu = nv44_mmu_new,
- .pci = nv4c_pci_new,
- .therm = nv40_therm_new,
- .timer = nv41_timer_new,
- .volt = nv40_volt_new,
- .disp = nv04_disp_new,
- .dma = nv04_dma_new,
- .fifo = nv40_fifo_new,
- .gr = nv44_gr_new,
- .mpeg = nv44_mpeg_new,
- .pm = nv40_pm_new,
- .sw = nv10_sw_new,
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv31_bus_new },
+ .clk = { 0x00000001, nv40_clk_new },
+ .devinit = { 0x00000001, nv1a_devinit_new },
+ .fb = { 0x00000001, nv46_fb_new },
+ .gpio = { 0x00000001, nv10_gpio_new },
+ .i2c = { 0x00000001, nv04_i2c_new },
+ .imem = { 0x00000001, nv40_instmem_new },
+ .mc = { 0x00000001, nv44_mc_new },
+ .mmu = { 0x00000001, nv44_mmu_new },
+ .pci = { 0x00000001, nv4c_pci_new },
+ .therm = { 0x00000001, nv40_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, nv40_volt_new },
+ .disp = { 0x00000001, nv04_disp_new },
+ .dma = { 0x00000001, nv04_dma_new },
+ .fifo = { 0x00000001, nv40_fifo_new },
+ .gr = { 0x00000001, nv44_gr_new },
+ .mpeg = { 0x00000001, nv44_mpeg_new },
+ .pm = { 0x00000001, nv40_pm_new },
+ .sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv68_chipset = {
.name = "C68",
- .bios = nvkm_bios_new,
- .bus = nv31_bus_new,
- .clk = nv40_clk_new,
- .devinit = nv1a_devinit_new,
- .fb = nv46_fb_new,
- .gpio = nv10_gpio_new,
- .i2c = nv04_i2c_new,
- .imem = nv40_instmem_new,
- .mc = nv44_mc_new,
- .mmu = nv44_mmu_new,
- .pci = nv4c_pci_new,
- .therm = nv40_therm_new,
- .timer = nv41_timer_new,
- .volt = nv40_volt_new,
- .disp = nv04_disp_new,
- .dma = nv04_dma_new,
- .fifo = nv40_fifo_new,
- .gr = nv44_gr_new,
- .mpeg = nv44_mpeg_new,
- .pm = nv40_pm_new,
- .sw = nv10_sw_new,
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv31_bus_new },
+ .clk = { 0x00000001, nv40_clk_new },
+ .devinit = { 0x00000001, nv1a_devinit_new },
+ .fb = { 0x00000001, nv46_fb_new },
+ .gpio = { 0x00000001, nv10_gpio_new },
+ .i2c = { 0x00000001, nv04_i2c_new },
+ .imem = { 0x00000001, nv40_instmem_new },
+ .mc = { 0x00000001, nv44_mc_new },
+ .mmu = { 0x00000001, nv44_mmu_new },
+ .pci = { 0x00000001, nv4c_pci_new },
+ .therm = { 0x00000001, nv40_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, nv40_volt_new },
+ .disp = { 0x00000001, nv04_disp_new },
+ .dma = { 0x00000001, nv04_dma_new },
+ .fifo = { 0x00000001, nv40_fifo_new },
+ .gr = { 0x00000001, nv44_gr_new },
+ .mpeg = { 0x00000001, nv44_mpeg_new },
+ .pm = { 0x00000001, nv40_pm_new },
+ .sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv84_chipset = {
.name = "G84",
- .bar = g84_bar_new,
- .bios = nvkm_bios_new,
- .bus = nv50_bus_new,
- .clk = g84_clk_new,
- .devinit = g84_devinit_new,
- .fb = g84_fb_new,
- .fuse = nv50_fuse_new,
- .gpio = nv50_gpio_new,
- .i2c = nv50_i2c_new,
- .imem = nv50_instmem_new,
- .mc = g84_mc_new,
- .mmu = g84_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = g84_pci_new,
- .therm = g84_therm_new,
- .timer = nv41_timer_new,
- .volt = nv40_volt_new,
- .bsp = g84_bsp_new,
- .cipher = g84_cipher_new,
- .disp = g84_disp_new,
- .dma = nv50_dma_new,
- .fifo = g84_fifo_new,
- .gr = g84_gr_new,
- .mpeg = g84_mpeg_new,
- .pm = g84_pm_new,
- .sw = nv50_sw_new,
- .vp = g84_vp_new,
+ .bar = { 0x00000001, g84_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv50_bus_new },
+ .clk = { 0x00000001, g84_clk_new },
+ .devinit = { 0x00000001, g84_devinit_new },
+ .fb = { 0x00000001, g84_fb_new },
+ .fuse = { 0x00000001, nv50_fuse_new },
+ .gpio = { 0x00000001, nv50_gpio_new },
+ .i2c = { 0x00000001, nv50_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .mc = { 0x00000001, g84_mc_new },
+ .mmu = { 0x00000001, g84_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, g84_pci_new },
+ .therm = { 0x00000001, g84_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, nv40_volt_new },
+ .bsp = { 0x00000001, g84_bsp_new },
+ .cipher = { 0x00000001, g84_cipher_new },
+ .disp = { 0x00000001, g84_disp_new },
+ .dma = { 0x00000001, nv50_dma_new },
+ .fifo = { 0x00000001, g84_fifo_new },
+ .gr = { 0x00000001, g84_gr_new },
+ .mpeg = { 0x00000001, g84_mpeg_new },
+ .pm = { 0x00000001, g84_pm_new },
+ .sw = { 0x00000001, nv50_sw_new },
+ .vp = { 0x00000001, g84_vp_new },
};
static const struct nvkm_device_chip
nv86_chipset = {
.name = "G86",
- .bar = g84_bar_new,
- .bios = nvkm_bios_new,
- .bus = nv50_bus_new,
- .clk = g84_clk_new,
- .devinit = g84_devinit_new,
- .fb = g84_fb_new,
- .fuse = nv50_fuse_new,
- .gpio = nv50_gpio_new,
- .i2c = nv50_i2c_new,
- .imem = nv50_instmem_new,
- .mc = g84_mc_new,
- .mmu = g84_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = g84_pci_new,
- .therm = g84_therm_new,
- .timer = nv41_timer_new,
- .volt = nv40_volt_new,
- .bsp = g84_bsp_new,
- .cipher = g84_cipher_new,
- .disp = g84_disp_new,
- .dma = nv50_dma_new,
- .fifo = g84_fifo_new,
- .gr = g84_gr_new,
- .mpeg = g84_mpeg_new,
- .pm = g84_pm_new,
- .sw = nv50_sw_new,
- .vp = g84_vp_new,
+ .bar = { 0x00000001, g84_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv50_bus_new },
+ .clk = { 0x00000001, g84_clk_new },
+ .devinit = { 0x00000001, g84_devinit_new },
+ .fb = { 0x00000001, g84_fb_new },
+ .fuse = { 0x00000001, nv50_fuse_new },
+ .gpio = { 0x00000001, nv50_gpio_new },
+ .i2c = { 0x00000001, nv50_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .mc = { 0x00000001, g84_mc_new },
+ .mmu = { 0x00000001, g84_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, g84_pci_new },
+ .therm = { 0x00000001, g84_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, nv40_volt_new },
+ .bsp = { 0x00000001, g84_bsp_new },
+ .cipher = { 0x00000001, g84_cipher_new },
+ .disp = { 0x00000001, g84_disp_new },
+ .dma = { 0x00000001, nv50_dma_new },
+ .fifo = { 0x00000001, g84_fifo_new },
+ .gr = { 0x00000001, g84_gr_new },
+ .mpeg = { 0x00000001, g84_mpeg_new },
+ .pm = { 0x00000001, g84_pm_new },
+ .sw = { 0x00000001, nv50_sw_new },
+ .vp = { 0x00000001, g84_vp_new },
};
static const struct nvkm_device_chip
nv92_chipset = {
.name = "G92",
- .bar = g84_bar_new,
- .bios = nvkm_bios_new,
- .bus = nv50_bus_new,
- .clk = g84_clk_new,
- .devinit = g84_devinit_new,
- .fb = g84_fb_new,
- .fuse = nv50_fuse_new,
- .gpio = nv50_gpio_new,
- .i2c = nv50_i2c_new,
- .imem = nv50_instmem_new,
- .mc = g84_mc_new,
- .mmu = g84_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = g92_pci_new,
- .therm = g84_therm_new,
- .timer = nv41_timer_new,
- .volt = nv40_volt_new,
- .bsp = g84_bsp_new,
- .cipher = g84_cipher_new,
- .disp = g84_disp_new,
- .dma = nv50_dma_new,
- .fifo = g84_fifo_new,
- .gr = g84_gr_new,
- .mpeg = g84_mpeg_new,
- .pm = g84_pm_new,
- .sw = nv50_sw_new,
- .vp = g84_vp_new,
+ .bar = { 0x00000001, g84_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, nv50_bus_new },
+ .clk = { 0x00000001, g84_clk_new },
+ .devinit = { 0x00000001, g84_devinit_new },
+ .fb = { 0x00000001, g84_fb_new },
+ .fuse = { 0x00000001, nv50_fuse_new },
+ .gpio = { 0x00000001, nv50_gpio_new },
+ .i2c = { 0x00000001, nv50_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .mc = { 0x00000001, g84_mc_new },
+ .mmu = { 0x00000001, g84_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, g92_pci_new },
+ .therm = { 0x00000001, g84_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, nv40_volt_new },
+ .bsp = { 0x00000001, g84_bsp_new },
+ .cipher = { 0x00000001, g84_cipher_new },
+ .disp = { 0x00000001, g84_disp_new },
+ .dma = { 0x00000001, nv50_dma_new },
+ .fifo = { 0x00000001, g84_fifo_new },
+ .gr = { 0x00000001, g84_gr_new },
+ .mpeg = { 0x00000001, g84_mpeg_new },
+ .pm = { 0x00000001, g84_pm_new },
+ .sw = { 0x00000001, nv50_sw_new },
+ .vp = { 0x00000001, g84_vp_new },
};
static const struct nvkm_device_chip
nv94_chipset = {
.name = "G94",
- .bar = g84_bar_new,
- .bios = nvkm_bios_new,
- .bus = g94_bus_new,
- .clk = g84_clk_new,
- .devinit = g84_devinit_new,
- .fb = g84_fb_new,
- .fuse = nv50_fuse_new,
- .gpio = g94_gpio_new,
- .i2c = g94_i2c_new,
- .imem = nv50_instmem_new,
- .mc = g84_mc_new,
- .mmu = g84_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = g94_pci_new,
- .therm = g84_therm_new,
- .timer = nv41_timer_new,
- .volt = nv40_volt_new,
- .bsp = g84_bsp_new,
- .cipher = g84_cipher_new,
- .disp = g94_disp_new,
- .dma = nv50_dma_new,
- .fifo = g84_fifo_new,
- .gr = g84_gr_new,
- .mpeg = g84_mpeg_new,
- .pm = g84_pm_new,
- .sw = nv50_sw_new,
- .vp = g84_vp_new,
+ .bar = { 0x00000001, g84_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, g94_bus_new },
+ .clk = { 0x00000001, g84_clk_new },
+ .devinit = { 0x00000001, g84_devinit_new },
+ .fb = { 0x00000001, g84_fb_new },
+ .fuse = { 0x00000001, nv50_fuse_new },
+ .gpio = { 0x00000001, g94_gpio_new },
+ .i2c = { 0x00000001, g94_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .mc = { 0x00000001, g84_mc_new },
+ .mmu = { 0x00000001, g84_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, g94_pci_new },
+ .therm = { 0x00000001, g84_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, nv40_volt_new },
+ .bsp = { 0x00000001, g84_bsp_new },
+ .cipher = { 0x00000001, g84_cipher_new },
+ .disp = { 0x00000001, g94_disp_new },
+ .dma = { 0x00000001, nv50_dma_new },
+ .fifo = { 0x00000001, g84_fifo_new },
+ .gr = { 0x00000001, g84_gr_new },
+ .mpeg = { 0x00000001, g84_mpeg_new },
+ .pm = { 0x00000001, g84_pm_new },
+ .sw = { 0x00000001, nv50_sw_new },
+ .vp = { 0x00000001, g84_vp_new },
};
static const struct nvkm_device_chip
nv96_chipset = {
.name = "G96",
- .bar = g84_bar_new,
- .bios = nvkm_bios_new,
- .bus = g94_bus_new,
- .clk = g84_clk_new,
- .devinit = g84_devinit_new,
- .fb = g84_fb_new,
- .fuse = nv50_fuse_new,
- .gpio = g94_gpio_new,
- .i2c = g94_i2c_new,
- .imem = nv50_instmem_new,
- .mc = g84_mc_new,
- .mmu = g84_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = g94_pci_new,
- .therm = g84_therm_new,
- .timer = nv41_timer_new,
- .volt = nv40_volt_new,
- .bsp = g84_bsp_new,
- .cipher = g84_cipher_new,
- .disp = g94_disp_new,
- .dma = nv50_dma_new,
- .fifo = g84_fifo_new,
- .gr = g84_gr_new,
- .mpeg = g84_mpeg_new,
- .pm = g84_pm_new,
- .sw = nv50_sw_new,
- .vp = g84_vp_new,
+ .bar = { 0x00000001, g84_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, g94_bus_new },
+ .clk = { 0x00000001, g84_clk_new },
+ .devinit = { 0x00000001, g84_devinit_new },
+ .fb = { 0x00000001, g84_fb_new },
+ .fuse = { 0x00000001, nv50_fuse_new },
+ .gpio = { 0x00000001, g94_gpio_new },
+ .i2c = { 0x00000001, g94_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .mc = { 0x00000001, g84_mc_new },
+ .mmu = { 0x00000001, g84_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, g94_pci_new },
+ .therm = { 0x00000001, g84_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, nv40_volt_new },
+ .bsp = { 0x00000001, g84_bsp_new },
+ .cipher = { 0x00000001, g84_cipher_new },
+ .disp = { 0x00000001, g94_disp_new },
+ .dma = { 0x00000001, nv50_dma_new },
+ .fifo = { 0x00000001, g84_fifo_new },
+ .gr = { 0x00000001, g84_gr_new },
+ .mpeg = { 0x00000001, g84_mpeg_new },
+ .pm = { 0x00000001, g84_pm_new },
+ .sw = { 0x00000001, nv50_sw_new },
+ .vp = { 0x00000001, g84_vp_new },
};
static const struct nvkm_device_chip
nv98_chipset = {
.name = "G98",
- .bar = g84_bar_new,
- .bios = nvkm_bios_new,
- .bus = g94_bus_new,
- .clk = g84_clk_new,
- .devinit = g98_devinit_new,
- .fb = g84_fb_new,
- .fuse = nv50_fuse_new,
- .gpio = g94_gpio_new,
- .i2c = g94_i2c_new,
- .imem = nv50_instmem_new,
- .mc = g98_mc_new,
- .mmu = g84_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = g94_pci_new,
- .therm = g84_therm_new,
- .timer = nv41_timer_new,
- .volt = nv40_volt_new,
- .disp = g94_disp_new,
- .dma = nv50_dma_new,
- .fifo = g84_fifo_new,
- .gr = g84_gr_new,
- .mspdec = g98_mspdec_new,
- .msppp = g98_msppp_new,
- .msvld = g98_msvld_new,
- .pm = g84_pm_new,
- .sec = g98_sec_new,
- .sw = nv50_sw_new,
+ .bar = { 0x00000001, g84_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, g94_bus_new },
+ .clk = { 0x00000001, g84_clk_new },
+ .devinit = { 0x00000001, g98_devinit_new },
+ .fb = { 0x00000001, g84_fb_new },
+ .fuse = { 0x00000001, nv50_fuse_new },
+ .gpio = { 0x00000001, g94_gpio_new },
+ .i2c = { 0x00000001, g94_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .mc = { 0x00000001, g98_mc_new },
+ .mmu = { 0x00000001, g84_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, g94_pci_new },
+ .therm = { 0x00000001, g84_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, nv40_volt_new },
+ .disp = { 0x00000001, g94_disp_new },
+ .dma = { 0x00000001, nv50_dma_new },
+ .fifo = { 0x00000001, g84_fifo_new },
+ .gr = { 0x00000001, g84_gr_new },
+ .mspdec = { 0x00000001, g98_mspdec_new },
+ .msppp = { 0x00000001, g98_msppp_new },
+ .msvld = { 0x00000001, g98_msvld_new },
+ .pm = { 0x00000001, g84_pm_new },
+ .sec = { 0x00000001, g98_sec_new },
+ .sw = { 0x00000001, nv50_sw_new },
};
static const struct nvkm_device_chip
nva0_chipset = {
.name = "GT200",
- .bar = g84_bar_new,
- .bios = nvkm_bios_new,
- .bus = g94_bus_new,
- .clk = g84_clk_new,
- .devinit = g84_devinit_new,
- .fb = g84_fb_new,
- .fuse = nv50_fuse_new,
- .gpio = g94_gpio_new,
- .i2c = nv50_i2c_new,
- .imem = nv50_instmem_new,
- .mc = g84_mc_new,
- .mmu = g84_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = g94_pci_new,
- .therm = g84_therm_new,
- .timer = nv41_timer_new,
- .volt = nv40_volt_new,
- .bsp = g84_bsp_new,
- .cipher = g84_cipher_new,
- .disp = gt200_disp_new,
- .dma = nv50_dma_new,
- .fifo = g84_fifo_new,
- .gr = gt200_gr_new,
- .mpeg = g84_mpeg_new,
- .pm = gt200_pm_new,
- .sw = nv50_sw_new,
- .vp = g84_vp_new,
+ .bar = { 0x00000001, g84_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, g94_bus_new },
+ .clk = { 0x00000001, g84_clk_new },
+ .devinit = { 0x00000001, g84_devinit_new },
+ .fb = { 0x00000001, g84_fb_new },
+ .fuse = { 0x00000001, nv50_fuse_new },
+ .gpio = { 0x00000001, g94_gpio_new },
+ .i2c = { 0x00000001, nv50_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .mc = { 0x00000001, g84_mc_new },
+ .mmu = { 0x00000001, g84_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, g94_pci_new },
+ .therm = { 0x00000001, g84_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, nv40_volt_new },
+ .bsp = { 0x00000001, g84_bsp_new },
+ .cipher = { 0x00000001, g84_cipher_new },
+ .disp = { 0x00000001, gt200_disp_new },
+ .dma = { 0x00000001, nv50_dma_new },
+ .fifo = { 0x00000001, g84_fifo_new },
+ .gr = { 0x00000001, gt200_gr_new },
+ .mpeg = { 0x00000001, g84_mpeg_new },
+ .pm = { 0x00000001, gt200_pm_new },
+ .sw = { 0x00000001, nv50_sw_new },
+ .vp = { 0x00000001, g84_vp_new },
};
static const struct nvkm_device_chip
nva3_chipset = {
.name = "GT215",
- .bar = g84_bar_new,
- .bios = nvkm_bios_new,
- .bus = g94_bus_new,
- .clk = gt215_clk_new,
- .devinit = gt215_devinit_new,
- .fb = gt215_fb_new,
- .fuse = nv50_fuse_new,
- .gpio = g94_gpio_new,
- .i2c = g94_i2c_new,
- .imem = nv50_instmem_new,
- .mc = gt215_mc_new,
- .mmu = g84_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = g94_pci_new,
- .pmu = gt215_pmu_new,
- .therm = gt215_therm_new,
- .timer = nv41_timer_new,
- .volt = nv40_volt_new,
- .ce[0] = gt215_ce_new,
- .disp = gt215_disp_new,
- .dma = nv50_dma_new,
- .fifo = g84_fifo_new,
- .gr = gt215_gr_new,
- .mpeg = g84_mpeg_new,
- .mspdec = gt215_mspdec_new,
- .msppp = gt215_msppp_new,
- .msvld = gt215_msvld_new,
- .pm = gt215_pm_new,
- .sw = nv50_sw_new,
+ .bar = { 0x00000001, g84_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, g94_bus_new },
+ .clk = { 0x00000001, gt215_clk_new },
+ .devinit = { 0x00000001, gt215_devinit_new },
+ .fb = { 0x00000001, gt215_fb_new },
+ .fuse = { 0x00000001, nv50_fuse_new },
+ .gpio = { 0x00000001, g94_gpio_new },
+ .i2c = { 0x00000001, g94_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .mc = { 0x00000001, gt215_mc_new },
+ .mmu = { 0x00000001, g84_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, g94_pci_new },
+ .pmu = { 0x00000001, gt215_pmu_new },
+ .therm = { 0x00000001, gt215_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, nv40_volt_new },
+ .ce = { 0x00000001, gt215_ce_new },
+ .disp = { 0x00000001, gt215_disp_new },
+ .dma = { 0x00000001, nv50_dma_new },
+ .fifo = { 0x00000001, g84_fifo_new },
+ .gr = { 0x00000001, gt215_gr_new },
+ .mpeg = { 0x00000001, g84_mpeg_new },
+ .mspdec = { 0x00000001, gt215_mspdec_new },
+ .msppp = { 0x00000001, gt215_msppp_new },
+ .msvld = { 0x00000001, gt215_msvld_new },
+ .pm = { 0x00000001, gt215_pm_new },
+ .sw = { 0x00000001, nv50_sw_new },
};
static const struct nvkm_device_chip
nva5_chipset = {
.name = "GT216",
- .bar = g84_bar_new,
- .bios = nvkm_bios_new,
- .bus = g94_bus_new,
- .clk = gt215_clk_new,
- .devinit = gt215_devinit_new,
- .fb = gt215_fb_new,
- .fuse = nv50_fuse_new,
- .gpio = g94_gpio_new,
- .i2c = g94_i2c_new,
- .imem = nv50_instmem_new,
- .mc = gt215_mc_new,
- .mmu = g84_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = g94_pci_new,
- .pmu = gt215_pmu_new,
- .therm = gt215_therm_new,
- .timer = nv41_timer_new,
- .volt = nv40_volt_new,
- .ce[0] = gt215_ce_new,
- .disp = gt215_disp_new,
- .dma = nv50_dma_new,
- .fifo = g84_fifo_new,
- .gr = gt215_gr_new,
- .mspdec = gt215_mspdec_new,
- .msppp = gt215_msppp_new,
- .msvld = gt215_msvld_new,
- .pm = gt215_pm_new,
- .sw = nv50_sw_new,
+ .bar = { 0x00000001, g84_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, g94_bus_new },
+ .clk = { 0x00000001, gt215_clk_new },
+ .devinit = { 0x00000001, gt215_devinit_new },
+ .fb = { 0x00000001, gt215_fb_new },
+ .fuse = { 0x00000001, nv50_fuse_new },
+ .gpio = { 0x00000001, g94_gpio_new },
+ .i2c = { 0x00000001, g94_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .mc = { 0x00000001, gt215_mc_new },
+ .mmu = { 0x00000001, g84_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, g94_pci_new },
+ .pmu = { 0x00000001, gt215_pmu_new },
+ .therm = { 0x00000001, gt215_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, nv40_volt_new },
+ .ce = { 0x00000001, gt215_ce_new },
+ .disp = { 0x00000001, gt215_disp_new },
+ .dma = { 0x00000001, nv50_dma_new },
+ .fifo = { 0x00000001, g84_fifo_new },
+ .gr = { 0x00000001, gt215_gr_new },
+ .mspdec = { 0x00000001, gt215_mspdec_new },
+ .msppp = { 0x00000001, gt215_msppp_new },
+ .msvld = { 0x00000001, gt215_msvld_new },
+ .pm = { 0x00000001, gt215_pm_new },
+ .sw = { 0x00000001, nv50_sw_new },
};
static const struct nvkm_device_chip
nva8_chipset = {
.name = "GT218",
- .bar = g84_bar_new,
- .bios = nvkm_bios_new,
- .bus = g94_bus_new,
- .clk = gt215_clk_new,
- .devinit = gt215_devinit_new,
- .fb = gt215_fb_new,
- .fuse = nv50_fuse_new,
- .gpio = g94_gpio_new,
- .i2c = g94_i2c_new,
- .imem = nv50_instmem_new,
- .mc = gt215_mc_new,
- .mmu = g84_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = g94_pci_new,
- .pmu = gt215_pmu_new,
- .therm = gt215_therm_new,
- .timer = nv41_timer_new,
- .volt = nv40_volt_new,
- .ce[0] = gt215_ce_new,
- .disp = gt215_disp_new,
- .dma = nv50_dma_new,
- .fifo = g84_fifo_new,
- .gr = gt215_gr_new,
- .mspdec = gt215_mspdec_new,
- .msppp = gt215_msppp_new,
- .msvld = gt215_msvld_new,
- .pm = gt215_pm_new,
- .sw = nv50_sw_new,
+ .bar = { 0x00000001, g84_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, g94_bus_new },
+ .clk = { 0x00000001, gt215_clk_new },
+ .devinit = { 0x00000001, gt215_devinit_new },
+ .fb = { 0x00000001, gt215_fb_new },
+ .fuse = { 0x00000001, nv50_fuse_new },
+ .gpio = { 0x00000001, g94_gpio_new },
+ .i2c = { 0x00000001, g94_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .mc = { 0x00000001, gt215_mc_new },
+ .mmu = { 0x00000001, g84_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, g94_pci_new },
+ .pmu = { 0x00000001, gt215_pmu_new },
+ .therm = { 0x00000001, gt215_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, nv40_volt_new },
+ .ce = { 0x00000001, gt215_ce_new },
+ .disp = { 0x00000001, gt215_disp_new },
+ .dma = { 0x00000001, nv50_dma_new },
+ .fifo = { 0x00000001, g84_fifo_new },
+ .gr = { 0x00000001, gt215_gr_new },
+ .mspdec = { 0x00000001, gt215_mspdec_new },
+ .msppp = { 0x00000001, gt215_msppp_new },
+ .msvld = { 0x00000001, gt215_msvld_new },
+ .pm = { 0x00000001, gt215_pm_new },
+ .sw = { 0x00000001, nv50_sw_new },
};
static const struct nvkm_device_chip
nvaa_chipset = {
.name = "MCP77/MCP78",
- .bar = g84_bar_new,
- .bios = nvkm_bios_new,
- .bus = g94_bus_new,
- .clk = mcp77_clk_new,
- .devinit = g98_devinit_new,
- .fb = mcp77_fb_new,
- .fuse = nv50_fuse_new,
- .gpio = g94_gpio_new,
- .i2c = g94_i2c_new,
- .imem = nv50_instmem_new,
- .mc = g98_mc_new,
- .mmu = mcp77_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = g94_pci_new,
- .therm = g84_therm_new,
- .timer = nv41_timer_new,
- .volt = nv40_volt_new,
- .disp = mcp77_disp_new,
- .dma = nv50_dma_new,
- .fifo = g84_fifo_new,
- .gr = gt200_gr_new,
- .mspdec = g98_mspdec_new,
- .msppp = g98_msppp_new,
- .msvld = g98_msvld_new,
- .pm = g84_pm_new,
- .sec = g98_sec_new,
- .sw = nv50_sw_new,
+ .bar = { 0x00000001, g84_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, g94_bus_new },
+ .clk = { 0x00000001, mcp77_clk_new },
+ .devinit = { 0x00000001, g98_devinit_new },
+ .fb = { 0x00000001, mcp77_fb_new },
+ .fuse = { 0x00000001, nv50_fuse_new },
+ .gpio = { 0x00000001, g94_gpio_new },
+ .i2c = { 0x00000001, g94_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .mc = { 0x00000001, g98_mc_new },
+ .mmu = { 0x00000001, mcp77_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, g94_pci_new },
+ .therm = { 0x00000001, g84_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, nv40_volt_new },
+ .disp = { 0x00000001, mcp77_disp_new },
+ .dma = { 0x00000001, nv50_dma_new },
+ .fifo = { 0x00000001, g84_fifo_new },
+ .gr = { 0x00000001, gt200_gr_new },
+ .mspdec = { 0x00000001, g98_mspdec_new },
+ .msppp = { 0x00000001, g98_msppp_new },
+ .msvld = { 0x00000001, g98_msvld_new },
+ .pm = { 0x00000001, g84_pm_new },
+ .sec = { 0x00000001, g98_sec_new },
+ .sw = { 0x00000001, nv50_sw_new },
};
static const struct nvkm_device_chip
nvac_chipset = {
.name = "MCP79/MCP7A",
- .bar = g84_bar_new,
- .bios = nvkm_bios_new,
- .bus = g94_bus_new,
- .clk = mcp77_clk_new,
- .devinit = g98_devinit_new,
- .fb = mcp77_fb_new,
- .fuse = nv50_fuse_new,
- .gpio = g94_gpio_new,
- .i2c = g94_i2c_new,
- .imem = nv50_instmem_new,
- .mc = g98_mc_new,
- .mmu = mcp77_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = g94_pci_new,
- .therm = g84_therm_new,
- .timer = nv41_timer_new,
- .volt = nv40_volt_new,
- .disp = mcp77_disp_new,
- .dma = nv50_dma_new,
- .fifo = g84_fifo_new,
- .gr = mcp79_gr_new,
- .mspdec = g98_mspdec_new,
- .msppp = g98_msppp_new,
- .msvld = g98_msvld_new,
- .pm = g84_pm_new,
- .sec = g98_sec_new,
- .sw = nv50_sw_new,
+ .bar = { 0x00000001, g84_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, g94_bus_new },
+ .clk = { 0x00000001, mcp77_clk_new },
+ .devinit = { 0x00000001, g98_devinit_new },
+ .fb = { 0x00000001, mcp77_fb_new },
+ .fuse = { 0x00000001, nv50_fuse_new },
+ .gpio = { 0x00000001, g94_gpio_new },
+ .i2c = { 0x00000001, g94_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .mc = { 0x00000001, g98_mc_new },
+ .mmu = { 0x00000001, mcp77_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, g94_pci_new },
+ .therm = { 0x00000001, g84_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, nv40_volt_new },
+ .disp = { 0x00000001, mcp77_disp_new },
+ .dma = { 0x00000001, nv50_dma_new },
+ .fifo = { 0x00000001, g84_fifo_new },
+ .gr = { 0x00000001, mcp79_gr_new },
+ .mspdec = { 0x00000001, g98_mspdec_new },
+ .msppp = { 0x00000001, g98_msppp_new },
+ .msvld = { 0x00000001, g98_msvld_new },
+ .pm = { 0x00000001, g84_pm_new },
+ .sec = { 0x00000001, g98_sec_new },
+ .sw = { 0x00000001, nv50_sw_new },
};
static const struct nvkm_device_chip
nvaf_chipset = {
.name = "MCP89",
- .bar = g84_bar_new,
- .bios = nvkm_bios_new,
- .bus = g94_bus_new,
- .clk = gt215_clk_new,
- .devinit = mcp89_devinit_new,
- .fb = mcp89_fb_new,
- .fuse = nv50_fuse_new,
- .gpio = g94_gpio_new,
- .i2c = g94_i2c_new,
- .imem = nv50_instmem_new,
- .mc = gt215_mc_new,
- .mmu = mcp77_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = g94_pci_new,
- .pmu = gt215_pmu_new,
- .therm = gt215_therm_new,
- .timer = nv41_timer_new,
- .volt = nv40_volt_new,
- .ce[0] = gt215_ce_new,
- .disp = mcp89_disp_new,
- .dma = nv50_dma_new,
- .fifo = g84_fifo_new,
- .gr = mcp89_gr_new,
- .mspdec = gt215_mspdec_new,
- .msppp = gt215_msppp_new,
- .msvld = mcp89_msvld_new,
- .pm = gt215_pm_new,
- .sw = nv50_sw_new,
+ .bar = { 0x00000001, g84_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, g94_bus_new },
+ .clk = { 0x00000001, gt215_clk_new },
+ .devinit = { 0x00000001, mcp89_devinit_new },
+ .fb = { 0x00000001, mcp89_fb_new },
+ .fuse = { 0x00000001, nv50_fuse_new },
+ .gpio = { 0x00000001, g94_gpio_new },
+ .i2c = { 0x00000001, g94_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .mc = { 0x00000001, gt215_mc_new },
+ .mmu = { 0x00000001, mcp77_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, g94_pci_new },
+ .pmu = { 0x00000001, gt215_pmu_new },
+ .therm = { 0x00000001, gt215_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, nv40_volt_new },
+ .ce = { 0x00000001, gt215_ce_new },
+ .disp = { 0x00000001, mcp89_disp_new },
+ .dma = { 0x00000001, nv50_dma_new },
+ .fifo = { 0x00000001, g84_fifo_new },
+ .gr = { 0x00000001, mcp89_gr_new },
+ .mspdec = { 0x00000001, gt215_mspdec_new },
+ .msppp = { 0x00000001, gt215_msppp_new },
+ .msvld = { 0x00000001, mcp89_msvld_new },
+ .pm = { 0x00000001, gt215_pm_new },
+ .sw = { 0x00000001, nv50_sw_new },
};
static const struct nvkm_device_chip
nvc0_chipset = {
.name = "GF100",
- .bar = gf100_bar_new,
- .bios = nvkm_bios_new,
- .bus = gf100_bus_new,
- .clk = gf100_clk_new,
- .devinit = gf100_devinit_new,
- .fb = gf100_fb_new,
- .fuse = gf100_fuse_new,
- .gpio = g94_gpio_new,
- .i2c = g94_i2c_new,
- .ibus = gf100_ibus_new,
- .iccsense = gf100_iccsense_new,
- .imem = nv50_instmem_new,
- .ltc = gf100_ltc_new,
- .mc = gf100_mc_new,
- .mmu = gf100_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = gf100_pci_new,
- .pmu = gf100_pmu_new,
- .therm = gt215_therm_new,
- .timer = nv41_timer_new,
- .volt = gf100_volt_new,
- .ce[0] = gf100_ce_new,
- .ce[1] = gf100_ce_new,
- .disp = gt215_disp_new,
- .dma = gf100_dma_new,
- .fifo = gf100_fifo_new,
- .gr = gf100_gr_new,
- .mspdec = gf100_mspdec_new,
- .msppp = gf100_msppp_new,
- .msvld = gf100_msvld_new,
- .pm = gf100_pm_new,
- .sw = gf100_sw_new,
+ .bar = { 0x00000001, gf100_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .clk = { 0x00000001, gf100_clk_new },
+ .devinit = { 0x00000001, gf100_devinit_new },
+ .fb = { 0x00000001, gf100_fb_new },
+ .fuse = { 0x00000001, gf100_fuse_new },
+ .gpio = { 0x00000001, g94_gpio_new },
+ .i2c = { 0x00000001, g94_i2c_new },
+ .iccsense = { 0x00000001, gf100_iccsense_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .ltc = { 0x00000001, gf100_ltc_new },
+ .mc = { 0x00000001, gf100_mc_new },
+ .mmu = { 0x00000001, gf100_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, gf100_pci_new },
+ .pmu = { 0x00000001, gf100_pmu_new },
+ .privring = { 0x00000001, gf100_privring_new },
+ .therm = { 0x00000001, gt215_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, gf100_volt_new },
+ .ce = { 0x00000003, gf100_ce_new },
+ .disp = { 0x00000001, gt215_disp_new },
+ .dma = { 0x00000001, gf100_dma_new },
+ .fifo = { 0x00000001, gf100_fifo_new },
+ .gr = { 0x00000001, gf100_gr_new },
+ .mspdec = { 0x00000001, gf100_mspdec_new },
+ .msppp = { 0x00000001, gf100_msppp_new },
+ .msvld = { 0x00000001, gf100_msvld_new },
+ .pm = { 0x00000001, gf100_pm_new },
+ .sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nvc1_chipset = {
.name = "GF108",
- .bar = gf100_bar_new,
- .bios = nvkm_bios_new,
- .bus = gf100_bus_new,
- .clk = gf100_clk_new,
- .devinit = gf100_devinit_new,
- .fb = gf108_fb_new,
- .fuse = gf100_fuse_new,
- .gpio = g94_gpio_new,
- .i2c = g94_i2c_new,
- .ibus = gf100_ibus_new,
- .iccsense = gf100_iccsense_new,
- .imem = nv50_instmem_new,
- .ltc = gf100_ltc_new,
- .mc = gf100_mc_new,
- .mmu = gf100_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = gf106_pci_new,
- .pmu = gf100_pmu_new,
- .therm = gt215_therm_new,
- .timer = nv41_timer_new,
- .volt = gf100_volt_new,
- .ce[0] = gf100_ce_new,
- .disp = gt215_disp_new,
- .dma = gf100_dma_new,
- .fifo = gf100_fifo_new,
- .gr = gf108_gr_new,
- .mspdec = gf100_mspdec_new,
- .msppp = gf100_msppp_new,
- .msvld = gf100_msvld_new,
- .pm = gf108_pm_new,
- .sw = gf100_sw_new,
+ .bar = { 0x00000001, gf100_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .clk = { 0x00000001, gf100_clk_new },
+ .devinit = { 0x00000001, gf100_devinit_new },
+ .fb = { 0x00000001, gf108_fb_new },
+ .fuse = { 0x00000001, gf100_fuse_new },
+ .gpio = { 0x00000001, g94_gpio_new },
+ .i2c = { 0x00000001, g94_i2c_new },
+ .iccsense = { 0x00000001, gf100_iccsense_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .ltc = { 0x00000001, gf100_ltc_new },
+ .mc = { 0x00000001, gf100_mc_new },
+ .mmu = { 0x00000001, gf100_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, gf106_pci_new },
+ .pmu = { 0x00000001, gf100_pmu_new },
+ .privring = { 0x00000001, gf100_privring_new },
+ .therm = { 0x00000001, gt215_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, gf100_volt_new },
+ .ce = { 0x00000001, gf100_ce_new },
+ .disp = { 0x00000001, gt215_disp_new },
+ .dma = { 0x00000001, gf100_dma_new },
+ .fifo = { 0x00000001, gf100_fifo_new },
+ .gr = { 0x00000001, gf108_gr_new },
+ .mspdec = { 0x00000001, gf100_mspdec_new },
+ .msppp = { 0x00000001, gf100_msppp_new },
+ .msvld = { 0x00000001, gf100_msvld_new },
+ .pm = { 0x00000001, gf108_pm_new },
+ .sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nvc3_chipset = {
.name = "GF106",
- .bar = gf100_bar_new,
- .bios = nvkm_bios_new,
- .bus = gf100_bus_new,
- .clk = gf100_clk_new,
- .devinit = gf100_devinit_new,
- .fb = gf100_fb_new,
- .fuse = gf100_fuse_new,
- .gpio = g94_gpio_new,
- .i2c = g94_i2c_new,
- .ibus = gf100_ibus_new,
- .iccsense = gf100_iccsense_new,
- .imem = nv50_instmem_new,
- .ltc = gf100_ltc_new,
- .mc = gf100_mc_new,
- .mmu = gf100_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = gf106_pci_new,
- .pmu = gf100_pmu_new,
- .therm = gt215_therm_new,
- .timer = nv41_timer_new,
- .volt = gf100_volt_new,
- .ce[0] = gf100_ce_new,
- .disp = gt215_disp_new,
- .dma = gf100_dma_new,
- .fifo = gf100_fifo_new,
- .gr = gf104_gr_new,
- .mspdec = gf100_mspdec_new,
- .msppp = gf100_msppp_new,
- .msvld = gf100_msvld_new,
- .pm = gf100_pm_new,
- .sw = gf100_sw_new,
+ .bar = { 0x00000001, gf100_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .clk = { 0x00000001, gf100_clk_new },
+ .devinit = { 0x00000001, gf100_devinit_new },
+ .fb = { 0x00000001, gf100_fb_new },
+ .fuse = { 0x00000001, gf100_fuse_new },
+ .gpio = { 0x00000001, g94_gpio_new },
+ .i2c = { 0x00000001, g94_i2c_new },
+ .iccsense = { 0x00000001, gf100_iccsense_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .ltc = { 0x00000001, gf100_ltc_new },
+ .mc = { 0x00000001, gf100_mc_new },
+ .mmu = { 0x00000001, gf100_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, gf106_pci_new },
+ .pmu = { 0x00000001, gf100_pmu_new },
+ .privring = { 0x00000001, gf100_privring_new },
+ .therm = { 0x00000001, gt215_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, gf100_volt_new },
+ .ce = { 0x00000001, gf100_ce_new },
+ .disp = { 0x00000001, gt215_disp_new },
+ .dma = { 0x00000001, gf100_dma_new },
+ .fifo = { 0x00000001, gf100_fifo_new },
+ .gr = { 0x00000001, gf104_gr_new },
+ .mspdec = { 0x00000001, gf100_mspdec_new },
+ .msppp = { 0x00000001, gf100_msppp_new },
+ .msvld = { 0x00000001, gf100_msvld_new },
+ .pm = { 0x00000001, gf100_pm_new },
+ .sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nvc4_chipset = {
.name = "GF104",
- .bar = gf100_bar_new,
- .bios = nvkm_bios_new,
- .bus = gf100_bus_new,
- .clk = gf100_clk_new,
- .devinit = gf100_devinit_new,
- .fb = gf100_fb_new,
- .fuse = gf100_fuse_new,
- .gpio = g94_gpio_new,
- .i2c = g94_i2c_new,
- .ibus = gf100_ibus_new,
- .iccsense = gf100_iccsense_new,
- .imem = nv50_instmem_new,
- .ltc = gf100_ltc_new,
- .mc = gf100_mc_new,
- .mmu = gf100_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = gf100_pci_new,
- .pmu = gf100_pmu_new,
- .therm = gt215_therm_new,
- .timer = nv41_timer_new,
- .volt = gf100_volt_new,
- .ce[0] = gf100_ce_new,
- .ce[1] = gf100_ce_new,
- .disp = gt215_disp_new,
- .dma = gf100_dma_new,
- .fifo = gf100_fifo_new,
- .gr = gf104_gr_new,
- .mspdec = gf100_mspdec_new,
- .msppp = gf100_msppp_new,
- .msvld = gf100_msvld_new,
- .pm = gf100_pm_new,
- .sw = gf100_sw_new,
+ .bar = { 0x00000001, gf100_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .clk = { 0x00000001, gf100_clk_new },
+ .devinit = { 0x00000001, gf100_devinit_new },
+ .fb = { 0x00000001, gf100_fb_new },
+ .fuse = { 0x00000001, gf100_fuse_new },
+ .gpio = { 0x00000001, g94_gpio_new },
+ .i2c = { 0x00000001, g94_i2c_new },
+ .iccsense = { 0x00000001, gf100_iccsense_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .ltc = { 0x00000001, gf100_ltc_new },
+ .mc = { 0x00000001, gf100_mc_new },
+ .mmu = { 0x00000001, gf100_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, gf100_pci_new },
+ .pmu = { 0x00000001, gf100_pmu_new },
+ .privring = { 0x00000001, gf100_privring_new },
+ .therm = { 0x00000001, gt215_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, gf100_volt_new },
+ .ce = { 0x00000003, gf100_ce_new },
+ .disp = { 0x00000001, gt215_disp_new },
+ .dma = { 0x00000001, gf100_dma_new },
+ .fifo = { 0x00000001, gf100_fifo_new },
+ .gr = { 0x00000001, gf104_gr_new },
+ .mspdec = { 0x00000001, gf100_mspdec_new },
+ .msppp = { 0x00000001, gf100_msppp_new },
+ .msvld = { 0x00000001, gf100_msvld_new },
+ .pm = { 0x00000001, gf100_pm_new },
+ .sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nvc8_chipset = {
.name = "GF110",
- .bar = gf100_bar_new,
- .bios = nvkm_bios_new,
- .bus = gf100_bus_new,
- .clk = gf100_clk_new,
- .devinit = gf100_devinit_new,
- .fb = gf100_fb_new,
- .fuse = gf100_fuse_new,
- .gpio = g94_gpio_new,
- .i2c = g94_i2c_new,
- .ibus = gf100_ibus_new,
- .iccsense = gf100_iccsense_new,
- .imem = nv50_instmem_new,
- .ltc = gf100_ltc_new,
- .mc = gf100_mc_new,
- .mmu = gf100_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = gf100_pci_new,
- .pmu = gf100_pmu_new,
- .therm = gt215_therm_new,
- .timer = nv41_timer_new,
- .volt = gf100_volt_new,
- .ce[0] = gf100_ce_new,
- .ce[1] = gf100_ce_new,
- .disp = gt215_disp_new,
- .dma = gf100_dma_new,
- .fifo = gf100_fifo_new,
- .gr = gf110_gr_new,
- .mspdec = gf100_mspdec_new,
- .msppp = gf100_msppp_new,
- .msvld = gf100_msvld_new,
- .pm = gf100_pm_new,
- .sw = gf100_sw_new,
+ .bar = { 0x00000001, gf100_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .clk = { 0x00000001, gf100_clk_new },
+ .devinit = { 0x00000001, gf100_devinit_new },
+ .fb = { 0x00000001, gf100_fb_new },
+ .fuse = { 0x00000001, gf100_fuse_new },
+ .gpio = { 0x00000001, g94_gpio_new },
+ .i2c = { 0x00000001, g94_i2c_new },
+ .iccsense = { 0x00000001, gf100_iccsense_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .ltc = { 0x00000001, gf100_ltc_new },
+ .mc = { 0x00000001, gf100_mc_new },
+ .mmu = { 0x00000001, gf100_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, gf100_pci_new },
+ .pmu = { 0x00000001, gf100_pmu_new },
+ .privring = { 0x00000001, gf100_privring_new },
+ .therm = { 0x00000001, gt215_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, gf100_volt_new },
+ .ce = { 0x00000003, gf100_ce_new },
+ .disp = { 0x00000001, gt215_disp_new },
+ .dma = { 0x00000001, gf100_dma_new },
+ .fifo = { 0x00000001, gf100_fifo_new },
+ .gr = { 0x00000001, gf110_gr_new },
+ .mspdec = { 0x00000001, gf100_mspdec_new },
+ .msppp = { 0x00000001, gf100_msppp_new },
+ .msvld = { 0x00000001, gf100_msvld_new },
+ .pm = { 0x00000001, gf100_pm_new },
+ .sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nvce_chipset = {
.name = "GF114",
- .bar = gf100_bar_new,
- .bios = nvkm_bios_new,
- .bus = gf100_bus_new,
- .clk = gf100_clk_new,
- .devinit = gf100_devinit_new,
- .fb = gf100_fb_new,
- .fuse = gf100_fuse_new,
- .gpio = g94_gpio_new,
- .i2c = g94_i2c_new,
- .ibus = gf100_ibus_new,
- .iccsense = gf100_iccsense_new,
- .imem = nv50_instmem_new,
- .ltc = gf100_ltc_new,
- .mc = gf100_mc_new,
- .mmu = gf100_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = gf100_pci_new,
- .pmu = gf100_pmu_new,
- .therm = gt215_therm_new,
- .timer = nv41_timer_new,
- .volt = gf100_volt_new,
- .ce[0] = gf100_ce_new,
- .ce[1] = gf100_ce_new,
- .disp = gt215_disp_new,
- .dma = gf100_dma_new,
- .fifo = gf100_fifo_new,
- .gr = gf104_gr_new,
- .mspdec = gf100_mspdec_new,
- .msppp = gf100_msppp_new,
- .msvld = gf100_msvld_new,
- .pm = gf100_pm_new,
- .sw = gf100_sw_new,
+ .bar = { 0x00000001, gf100_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .clk = { 0x00000001, gf100_clk_new },
+ .devinit = { 0x00000001, gf100_devinit_new },
+ .fb = { 0x00000001, gf100_fb_new },
+ .fuse = { 0x00000001, gf100_fuse_new },
+ .gpio = { 0x00000001, g94_gpio_new },
+ .i2c = { 0x00000001, g94_i2c_new },
+ .iccsense = { 0x00000001, gf100_iccsense_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .ltc = { 0x00000001, gf100_ltc_new },
+ .mc = { 0x00000001, gf100_mc_new },
+ .mmu = { 0x00000001, gf100_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, gf100_pci_new },
+ .pmu = { 0x00000001, gf100_pmu_new },
+ .privring = { 0x00000001, gf100_privring_new },
+ .therm = { 0x00000001, gt215_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, gf100_volt_new },
+ .ce = { 0x00000003, gf100_ce_new },
+ .disp = { 0x00000001, gt215_disp_new },
+ .dma = { 0x00000001, gf100_dma_new },
+ .fifo = { 0x00000001, gf100_fifo_new },
+ .gr = { 0x00000001, gf104_gr_new },
+ .mspdec = { 0x00000001, gf100_mspdec_new },
+ .msppp = { 0x00000001, gf100_msppp_new },
+ .msvld = { 0x00000001, gf100_msvld_new },
+ .pm = { 0x00000001, gf100_pm_new },
+ .sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nvcf_chipset = {
.name = "GF116",
- .bar = gf100_bar_new,
- .bios = nvkm_bios_new,
- .bus = gf100_bus_new,
- .clk = gf100_clk_new,
- .devinit = gf100_devinit_new,
- .fb = gf100_fb_new,
- .fuse = gf100_fuse_new,
- .gpio = g94_gpio_new,
- .i2c = g94_i2c_new,
- .ibus = gf100_ibus_new,
- .iccsense = gf100_iccsense_new,
- .imem = nv50_instmem_new,
- .ltc = gf100_ltc_new,
- .mc = gf100_mc_new,
- .mmu = gf100_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = gf106_pci_new,
- .pmu = gf100_pmu_new,
- .therm = gt215_therm_new,
- .timer = nv41_timer_new,
- .volt = gf100_volt_new,
- .ce[0] = gf100_ce_new,
- .disp = gt215_disp_new,
- .dma = gf100_dma_new,
- .fifo = gf100_fifo_new,
- .gr = gf104_gr_new,
- .mspdec = gf100_mspdec_new,
- .msppp = gf100_msppp_new,
- .msvld = gf100_msvld_new,
- .pm = gf100_pm_new,
- .sw = gf100_sw_new,
+ .bar = { 0x00000001, gf100_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .clk = { 0x00000001, gf100_clk_new },
+ .devinit = { 0x00000001, gf100_devinit_new },
+ .fb = { 0x00000001, gf100_fb_new },
+ .fuse = { 0x00000001, gf100_fuse_new },
+ .gpio = { 0x00000001, g94_gpio_new },
+ .i2c = { 0x00000001, g94_i2c_new },
+ .iccsense = { 0x00000001, gf100_iccsense_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .ltc = { 0x00000001, gf100_ltc_new },
+ .mc = { 0x00000001, gf100_mc_new },
+ .mmu = { 0x00000001, gf100_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, gf106_pci_new },
+ .pmu = { 0x00000001, gf100_pmu_new },
+ .privring = { 0x00000001, gf100_privring_new },
+ .therm = { 0x00000001, gt215_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, gf100_volt_new },
+ .ce = { 0x00000001, gf100_ce_new },
+ .disp = { 0x00000001, gt215_disp_new },
+ .dma = { 0x00000001, gf100_dma_new },
+ .fifo = { 0x00000001, gf100_fifo_new },
+ .gr = { 0x00000001, gf104_gr_new },
+ .mspdec = { 0x00000001, gf100_mspdec_new },
+ .msppp = { 0x00000001, gf100_msppp_new },
+ .msvld = { 0x00000001, gf100_msvld_new },
+ .pm = { 0x00000001, gf100_pm_new },
+ .sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nvd7_chipset = {
.name = "GF117",
- .bar = gf100_bar_new,
- .bios = nvkm_bios_new,
- .bus = gf100_bus_new,
- .clk = gf100_clk_new,
- .devinit = gf100_devinit_new,
- .fb = gf100_fb_new,
- .fuse = gf100_fuse_new,
- .gpio = gf119_gpio_new,
- .i2c = gf117_i2c_new,
- .ibus = gf117_ibus_new,
- .iccsense = gf100_iccsense_new,
- .imem = nv50_instmem_new,
- .ltc = gf100_ltc_new,
- .mc = gf100_mc_new,
- .mmu = gf100_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = gf106_pci_new,
- .therm = gf119_therm_new,
- .timer = nv41_timer_new,
- .volt = gf117_volt_new,
- .ce[0] = gf100_ce_new,
- .disp = gf119_disp_new,
- .dma = gf119_dma_new,
- .fifo = gf100_fifo_new,
- .gr = gf117_gr_new,
- .mspdec = gf100_mspdec_new,
- .msppp = gf100_msppp_new,
- .msvld = gf100_msvld_new,
- .pm = gf117_pm_new,
- .sw = gf100_sw_new,
+ .bar = { 0x00000001, gf100_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .clk = { 0x00000001, gf100_clk_new },
+ .devinit = { 0x00000001, gf100_devinit_new },
+ .fb = { 0x00000001, gf100_fb_new },
+ .fuse = { 0x00000001, gf100_fuse_new },
+ .gpio = { 0x00000001, gf119_gpio_new },
+ .i2c = { 0x00000001, gf117_i2c_new },
+ .iccsense = { 0x00000001, gf100_iccsense_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .ltc = { 0x00000001, gf100_ltc_new },
+ .mc = { 0x00000001, gf100_mc_new },
+ .mmu = { 0x00000001, gf100_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, gf106_pci_new },
+ .privring = { 0x00000001, gf117_privring_new },
+ .therm = { 0x00000001, gf119_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, gf117_volt_new },
+ .ce = { 0x00000001, gf100_ce_new },
+ .disp = { 0x00000001, gf119_disp_new },
+ .dma = { 0x00000001, gf119_dma_new },
+ .fifo = { 0x00000001, gf100_fifo_new },
+ .gr = { 0x00000001, gf117_gr_new },
+ .mspdec = { 0x00000001, gf100_mspdec_new },
+ .msppp = { 0x00000001, gf100_msppp_new },
+ .msvld = { 0x00000001, gf100_msvld_new },
+ .pm = { 0x00000001, gf117_pm_new },
+ .sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nvd9_chipset = {
.name = "GF119",
- .bar = gf100_bar_new,
- .bios = nvkm_bios_new,
- .bus = gf100_bus_new,
- .clk = gf100_clk_new,
- .devinit = gf100_devinit_new,
- .fb = gf100_fb_new,
- .fuse = gf100_fuse_new,
- .gpio = gf119_gpio_new,
- .i2c = gf119_i2c_new,
- .ibus = gf117_ibus_new,
- .iccsense = gf100_iccsense_new,
- .imem = nv50_instmem_new,
- .ltc = gf100_ltc_new,
- .mc = gf100_mc_new,
- .mmu = gf100_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = gf106_pci_new,
- .pmu = gf119_pmu_new,
- .therm = gf119_therm_new,
- .timer = nv41_timer_new,
- .volt = gf100_volt_new,
- .ce[0] = gf100_ce_new,
- .disp = gf119_disp_new,
- .dma = gf119_dma_new,
- .fifo = gf100_fifo_new,
- .gr = gf119_gr_new,
- .mspdec = gf100_mspdec_new,
- .msppp = gf100_msppp_new,
- .msvld = gf100_msvld_new,
- .pm = gf117_pm_new,
- .sw = gf100_sw_new,
+ .bar = { 0x00000001, gf100_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .clk = { 0x00000001, gf100_clk_new },
+ .devinit = { 0x00000001, gf100_devinit_new },
+ .fb = { 0x00000001, gf100_fb_new },
+ .fuse = { 0x00000001, gf100_fuse_new },
+ .gpio = { 0x00000001, gf119_gpio_new },
+ .i2c = { 0x00000001, gf119_i2c_new },
+ .iccsense = { 0x00000001, gf100_iccsense_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .ltc = { 0x00000001, gf100_ltc_new },
+ .mc = { 0x00000001, gf100_mc_new },
+ .mmu = { 0x00000001, gf100_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, gf106_pci_new },
+ .pmu = { 0x00000001, gf119_pmu_new },
+ .privring = { 0x00000001, gf117_privring_new },
+ .therm = { 0x00000001, gf119_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .volt = { 0x00000001, gf100_volt_new },
+ .ce = { 0x00000001, gf100_ce_new },
+ .disp = { 0x00000001, gf119_disp_new },
+ .dma = { 0x00000001, gf119_dma_new },
+ .fifo = { 0x00000001, gf100_fifo_new },
+ .gr = { 0x00000001, gf119_gr_new },
+ .mspdec = { 0x00000001, gf100_mspdec_new },
+ .msppp = { 0x00000001, gf100_msppp_new },
+ .msvld = { 0x00000001, gf100_msvld_new },
+ .pm = { 0x00000001, gf117_pm_new },
+ .sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nve4_chipset = {
.name = "GK104",
- .bar = gf100_bar_new,
- .bios = nvkm_bios_new,
- .bus = gf100_bus_new,
- .clk = gk104_clk_new,
- .devinit = gf100_devinit_new,
- .fb = gk104_fb_new,
- .fuse = gf100_fuse_new,
- .gpio = gk104_gpio_new,
- .i2c = gk104_i2c_new,
- .ibus = gk104_ibus_new,
- .iccsense = gf100_iccsense_new,
- .imem = nv50_instmem_new,
- .ltc = gk104_ltc_new,
- .mc = gk104_mc_new,
- .mmu = gk104_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = gk104_pci_new,
- .pmu = gk104_pmu_new,
- .therm = gk104_therm_new,
- .timer = nv41_timer_new,
- .top = gk104_top_new,
- .volt = gk104_volt_new,
- .ce[0] = gk104_ce_new,
- .ce[1] = gk104_ce_new,
- .ce[2] = gk104_ce_new,
- .disp = gk104_disp_new,
- .dma = gf119_dma_new,
- .fifo = gk104_fifo_new,
- .gr = gk104_gr_new,
- .mspdec = gk104_mspdec_new,
- .msppp = gf100_msppp_new,
- .msvld = gk104_msvld_new,
- .pm = gk104_pm_new,
- .sw = gf100_sw_new,
+ .bar = { 0x00000001, gf100_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .clk = { 0x00000001, gk104_clk_new },
+ .devinit = { 0x00000001, gf100_devinit_new },
+ .fb = { 0x00000001, gk104_fb_new },
+ .fuse = { 0x00000001, gf100_fuse_new },
+ .gpio = { 0x00000001, gk104_gpio_new },
+ .i2c = { 0x00000001, gk104_i2c_new },
+ .iccsense = { 0x00000001, gf100_iccsense_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .ltc = { 0x00000001, gk104_ltc_new },
+ .mc = { 0x00000001, gk104_mc_new },
+ .mmu = { 0x00000001, gk104_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, gk104_pci_new },
+ .pmu = { 0x00000001, gk104_pmu_new },
+ .privring = { 0x00000001, gk104_privring_new },
+ .therm = { 0x00000001, gk104_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .top = { 0x00000001, gk104_top_new },
+ .volt = { 0x00000001, gk104_volt_new },
+ .ce = { 0x00000007, gk104_ce_new },
+ .disp = { 0x00000001, gk104_disp_new },
+ .dma = { 0x00000001, gf119_dma_new },
+ .fifo = { 0x00000001, gk104_fifo_new },
+ .gr = { 0x00000001, gk104_gr_new },
+ .mspdec = { 0x00000001, gk104_mspdec_new },
+ .msppp = { 0x00000001, gf100_msppp_new },
+ .msvld = { 0x00000001, gk104_msvld_new },
+ .pm = { 0x00000001, gk104_pm_new },
+ .sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nve6_chipset = {
.name = "GK106",
- .bar = gf100_bar_new,
- .bios = nvkm_bios_new,
- .bus = gf100_bus_new,
- .clk = gk104_clk_new,
- .devinit = gf100_devinit_new,
- .fb = gk104_fb_new,
- .fuse = gf100_fuse_new,
- .gpio = gk104_gpio_new,
- .i2c = gk104_i2c_new,
- .ibus = gk104_ibus_new,
- .iccsense = gf100_iccsense_new,
- .imem = nv50_instmem_new,
- .ltc = gk104_ltc_new,
- .mc = gk104_mc_new,
- .mmu = gk104_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = gk104_pci_new,
- .pmu = gk104_pmu_new,
- .therm = gk104_therm_new,
- .timer = nv41_timer_new,
- .top = gk104_top_new,
- .volt = gk104_volt_new,
- .ce[0] = gk104_ce_new,
- .ce[1] = gk104_ce_new,
- .ce[2] = gk104_ce_new,
- .disp = gk104_disp_new,
- .dma = gf119_dma_new,
- .fifo = gk104_fifo_new,
- .gr = gk104_gr_new,
- .mspdec = gk104_mspdec_new,
- .msppp = gf100_msppp_new,
- .msvld = gk104_msvld_new,
- .pm = gk104_pm_new,
- .sw = gf100_sw_new,
+ .bar = { 0x00000001, gf100_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .clk = { 0x00000001, gk104_clk_new },
+ .devinit = { 0x00000001, gf100_devinit_new },
+ .fb = { 0x00000001, gk104_fb_new },
+ .fuse = { 0x00000001, gf100_fuse_new },
+ .gpio = { 0x00000001, gk104_gpio_new },
+ .i2c = { 0x00000001, gk104_i2c_new },
+ .iccsense = { 0x00000001, gf100_iccsense_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .ltc = { 0x00000001, gk104_ltc_new },
+ .mc = { 0x00000001, gk104_mc_new },
+ .mmu = { 0x00000001, gk104_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, gk104_pci_new },
+ .pmu = { 0x00000001, gk104_pmu_new },
+ .privring = { 0x00000001, gk104_privring_new },
+ .therm = { 0x00000001, gk104_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .top = { 0x00000001, gk104_top_new },
+ .volt = { 0x00000001, gk104_volt_new },
+ .ce = { 0x00000007, gk104_ce_new },
+ .disp = { 0x00000001, gk104_disp_new },
+ .dma = { 0x00000001, gf119_dma_new },
+ .fifo = { 0x00000001, gk104_fifo_new },
+ .gr = { 0x00000001, gk104_gr_new },
+ .mspdec = { 0x00000001, gk104_mspdec_new },
+ .msppp = { 0x00000001, gf100_msppp_new },
+ .msvld = { 0x00000001, gk104_msvld_new },
+ .pm = { 0x00000001, gk104_pm_new },
+ .sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nve7_chipset = {
.name = "GK107",
- .bar = gf100_bar_new,
- .bios = nvkm_bios_new,
- .bus = gf100_bus_new,
- .clk = gk104_clk_new,
- .devinit = gf100_devinit_new,
- .fb = gk104_fb_new,
- .fuse = gf100_fuse_new,
- .gpio = gk104_gpio_new,
- .i2c = gk104_i2c_new,
- .ibus = gk104_ibus_new,
- .iccsense = gf100_iccsense_new,
- .imem = nv50_instmem_new,
- .ltc = gk104_ltc_new,
- .mc = gk104_mc_new,
- .mmu = gk104_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = gk104_pci_new,
- .pmu = gk104_pmu_new,
- .therm = gk104_therm_new,
- .timer = nv41_timer_new,
- .top = gk104_top_new,
- .volt = gk104_volt_new,
- .ce[0] = gk104_ce_new,
- .ce[1] = gk104_ce_new,
- .ce[2] = gk104_ce_new,
- .disp = gk104_disp_new,
- .dma = gf119_dma_new,
- .fifo = gk104_fifo_new,
- .gr = gk104_gr_new,
- .mspdec = gk104_mspdec_new,
- .msppp = gf100_msppp_new,
- .msvld = gk104_msvld_new,
- .pm = gk104_pm_new,
- .sw = gf100_sw_new,
+ .bar = { 0x00000001, gf100_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .clk = { 0x00000001, gk104_clk_new },
+ .devinit = { 0x00000001, gf100_devinit_new },
+ .fb = { 0x00000001, gk104_fb_new },
+ .fuse = { 0x00000001, gf100_fuse_new },
+ .gpio = { 0x00000001, gk104_gpio_new },
+ .i2c = { 0x00000001, gk104_i2c_new },
+ .iccsense = { 0x00000001, gf100_iccsense_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .ltc = { 0x00000001, gk104_ltc_new },
+ .mc = { 0x00000001, gk104_mc_new },
+ .mmu = { 0x00000001, gk104_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, gk104_pci_new },
+ .pmu = { 0x00000001, gk104_pmu_new },
+ .privring = { 0x00000001, gk104_privring_new },
+ .therm = { 0x00000001, gk104_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .top = { 0x00000001, gk104_top_new },
+ .volt = { 0x00000001, gk104_volt_new },
+ .ce = { 0x00000007, gk104_ce_new },
+ .disp = { 0x00000001, gk104_disp_new },
+ .dma = { 0x00000001, gf119_dma_new },
+ .fifo = { 0x00000001, gk104_fifo_new },
+ .gr = { 0x00000001, gk104_gr_new },
+ .mspdec = { 0x00000001, gk104_mspdec_new },
+ .msppp = { 0x00000001, gf100_msppp_new },
+ .msvld = { 0x00000001, gk104_msvld_new },
+ .pm = { 0x00000001, gk104_pm_new },
+ .sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nvea_chipset = {
.name = "GK20A",
- .bar = gk20a_bar_new,
- .bus = gf100_bus_new,
- .clk = gk20a_clk_new,
- .fb = gk20a_fb_new,
- .fuse = gf100_fuse_new,
- .ibus = gk20a_ibus_new,
- .imem = gk20a_instmem_new,
- .ltc = gk104_ltc_new,
- .mc = gk20a_mc_new,
- .mmu = gk20a_mmu_new,
- .pmu = gk20a_pmu_new,
- .timer = gk20a_timer_new,
- .top = gk104_top_new,
- .volt = gk20a_volt_new,
- .ce[2] = gk104_ce_new,
- .dma = gf119_dma_new,
- .fifo = gk20a_fifo_new,
- .gr = gk20a_gr_new,
- .pm = gk104_pm_new,
- .sw = gf100_sw_new,
+ .bar = { 0x00000001, gk20a_bar_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .clk = { 0x00000001, gk20a_clk_new },
+ .fb = { 0x00000001, gk20a_fb_new },
+ .fuse = { 0x00000001, gf100_fuse_new },
+ .imem = { 0x00000001, gk20a_instmem_new },
+ .ltc = { 0x00000001, gk104_ltc_new },
+ .mc = { 0x00000001, gk20a_mc_new },
+ .mmu = { 0x00000001, gk20a_mmu_new },
+ .pmu = { 0x00000001, gk20a_pmu_new },
+ .privring = { 0x00000001, gk20a_privring_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .top = { 0x00000001, gk104_top_new },
+ .volt = { 0x00000001, gk20a_volt_new },
+ .ce = { 0x00000004, gk104_ce_new },
+ .dma = { 0x00000001, gf119_dma_new },
+ .fifo = { 0x00000001, gk20a_fifo_new },
+ .gr = { 0x00000001, gk20a_gr_new },
+ .pm = { 0x00000001, gk104_pm_new },
+ .sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nvf0_chipset = {
.name = "GK110",
- .bar = gf100_bar_new,
- .bios = nvkm_bios_new,
- .bus = gf100_bus_new,
- .clk = gk104_clk_new,
- .devinit = gf100_devinit_new,
- .fb = gk110_fb_new,
- .fuse = gf100_fuse_new,
- .gpio = gk104_gpio_new,
- .i2c = gk110_i2c_new,
- .ibus = gk104_ibus_new,
- .iccsense = gf100_iccsense_new,
- .imem = nv50_instmem_new,
- .ltc = gk104_ltc_new,
- .mc = gk104_mc_new,
- .mmu = gk104_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = gk104_pci_new,
- .pmu = gk110_pmu_new,
- .therm = gk104_therm_new,
- .timer = nv41_timer_new,
- .top = gk104_top_new,
- .volt = gk104_volt_new,
- .ce[0] = gk104_ce_new,
- .ce[1] = gk104_ce_new,
- .ce[2] = gk104_ce_new,
- .disp = gk110_disp_new,
- .dma = gf119_dma_new,
- .fifo = gk110_fifo_new,
- .gr = gk110_gr_new,
- .mspdec = gk104_mspdec_new,
- .msppp = gf100_msppp_new,
- .msvld = gk104_msvld_new,
- .sw = gf100_sw_new,
+ .bar = { 0x00000001, gf100_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .clk = { 0x00000001, gk104_clk_new },
+ .devinit = { 0x00000001, gf100_devinit_new },
+ .fb = { 0x00000001, gk110_fb_new },
+ .fuse = { 0x00000001, gf100_fuse_new },
+ .gpio = { 0x00000001, gk104_gpio_new },
+ .i2c = { 0x00000001, gk110_i2c_new },
+ .iccsense = { 0x00000001, gf100_iccsense_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .ltc = { 0x00000001, gk104_ltc_new },
+ .mc = { 0x00000001, gk104_mc_new },
+ .mmu = { 0x00000001, gk104_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, gk104_pci_new },
+ .pmu = { 0x00000001, gk110_pmu_new },
+ .privring = { 0x00000001, gk104_privring_new },
+ .therm = { 0x00000001, gk104_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .top = { 0x00000001, gk104_top_new },
+ .volt = { 0x00000001, gk104_volt_new },
+ .ce = { 0x00000007, gk104_ce_new },
+ .disp = { 0x00000001, gk110_disp_new },
+ .dma = { 0x00000001, gf119_dma_new },
+ .fifo = { 0x00000001, gk110_fifo_new },
+ .gr = { 0x00000001, gk110_gr_new },
+ .mspdec = { 0x00000001, gk104_mspdec_new },
+ .msppp = { 0x00000001, gf100_msppp_new },
+ .msvld = { 0x00000001, gk104_msvld_new },
+ .sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nvf1_chipset = {
.name = "GK110B",
- .bar = gf100_bar_new,
- .bios = nvkm_bios_new,
- .bus = gf100_bus_new,
- .clk = gk104_clk_new,
- .devinit = gf100_devinit_new,
- .fb = gk110_fb_new,
- .fuse = gf100_fuse_new,
- .gpio = gk104_gpio_new,
- .i2c = gk110_i2c_new,
- .ibus = gk104_ibus_new,
- .iccsense = gf100_iccsense_new,
- .imem = nv50_instmem_new,
- .ltc = gk104_ltc_new,
- .mc = gk104_mc_new,
- .mmu = gk104_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = gk104_pci_new,
- .pmu = gk110_pmu_new,
- .therm = gk104_therm_new,
- .timer = nv41_timer_new,
- .top = gk104_top_new,
- .volt = gk104_volt_new,
- .ce[0] = gk104_ce_new,
- .ce[1] = gk104_ce_new,
- .ce[2] = gk104_ce_new,
- .disp = gk110_disp_new,
- .dma = gf119_dma_new,
- .fifo = gk110_fifo_new,
- .gr = gk110b_gr_new,
- .mspdec = gk104_mspdec_new,
- .msppp = gf100_msppp_new,
- .msvld = gk104_msvld_new,
- .sw = gf100_sw_new,
+ .bar = { 0x00000001, gf100_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .clk = { 0x00000001, gk104_clk_new },
+ .devinit = { 0x00000001, gf100_devinit_new },
+ .fb = { 0x00000001, gk110_fb_new },
+ .fuse = { 0x00000001, gf100_fuse_new },
+ .gpio = { 0x00000001, gk104_gpio_new },
+ .i2c = { 0x00000001, gk110_i2c_new },
+ .iccsense = { 0x00000001, gf100_iccsense_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .ltc = { 0x00000001, gk104_ltc_new },
+ .mc = { 0x00000001, gk104_mc_new },
+ .mmu = { 0x00000001, gk104_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, gk104_pci_new },
+ .pmu = { 0x00000001, gk110_pmu_new },
+ .privring = { 0x00000001, gk104_privring_new },
+ .therm = { 0x00000001, gk104_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .top = { 0x00000001, gk104_top_new },
+ .volt = { 0x00000001, gk104_volt_new },
+ .ce = { 0x00000007, gk104_ce_new },
+ .disp = { 0x00000001, gk110_disp_new },
+ .dma = { 0x00000001, gf119_dma_new },
+ .fifo = { 0x00000001, gk110_fifo_new },
+ .gr = { 0x00000001, gk110b_gr_new },
+ .mspdec = { 0x00000001, gk104_mspdec_new },
+ .msppp = { 0x00000001, gf100_msppp_new },
+ .msvld = { 0x00000001, gk104_msvld_new },
+ .sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nv106_chipset = {
.name = "GK208B",
- .bar = gf100_bar_new,
- .bios = nvkm_bios_new,
- .bus = gf100_bus_new,
- .clk = gk104_clk_new,
- .devinit = gf100_devinit_new,
- .fb = gk110_fb_new,
- .fuse = gf100_fuse_new,
- .gpio = gk104_gpio_new,
- .i2c = gk110_i2c_new,
- .ibus = gk104_ibus_new,
- .iccsense = gf100_iccsense_new,
- .imem = nv50_instmem_new,
- .ltc = gk104_ltc_new,
- .mc = gk20a_mc_new,
- .mmu = gk104_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = gk104_pci_new,
- .pmu = gk208_pmu_new,
- .therm = gk104_therm_new,
- .timer = nv41_timer_new,
- .top = gk104_top_new,
- .volt = gk104_volt_new,
- .ce[0] = gk104_ce_new,
- .ce[1] = gk104_ce_new,
- .ce[2] = gk104_ce_new,
- .disp = gk110_disp_new,
- .dma = gf119_dma_new,
- .fifo = gk208_fifo_new,
- .gr = gk208_gr_new,
- .mspdec = gk104_mspdec_new,
- .msppp = gf100_msppp_new,
- .msvld = gk104_msvld_new,
- .sw = gf100_sw_new,
+ .bar = { 0x00000001, gf100_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .clk = { 0x00000001, gk104_clk_new },
+ .devinit = { 0x00000001, gf100_devinit_new },
+ .fb = { 0x00000001, gk110_fb_new },
+ .fuse = { 0x00000001, gf100_fuse_new },
+ .gpio = { 0x00000001, gk104_gpio_new },
+ .i2c = { 0x00000001, gk110_i2c_new },
+ .iccsense = { 0x00000001, gf100_iccsense_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .ltc = { 0x00000001, gk104_ltc_new },
+ .mc = { 0x00000001, gk20a_mc_new },
+ .mmu = { 0x00000001, gk104_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, gk104_pci_new },
+ .pmu = { 0x00000001, gk208_pmu_new },
+ .privring = { 0x00000001, gk104_privring_new },
+ .therm = { 0x00000001, gk104_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .top = { 0x00000001, gk104_top_new },
+ .volt = { 0x00000001, gk104_volt_new },
+ .ce = { 0x00000007, gk104_ce_new },
+ .disp = { 0x00000001, gk110_disp_new },
+ .dma = { 0x00000001, gf119_dma_new },
+ .fifo = { 0x00000001, gk208_fifo_new },
+ .gr = { 0x00000001, gk208_gr_new },
+ .mspdec = { 0x00000001, gk104_mspdec_new },
+ .msppp = { 0x00000001, gf100_msppp_new },
+ .msvld = { 0x00000001, gk104_msvld_new },
+ .sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nv108_chipset = {
.name = "GK208",
- .bar = gf100_bar_new,
- .bios = nvkm_bios_new,
- .bus = gf100_bus_new,
- .clk = gk104_clk_new,
- .devinit = gf100_devinit_new,
- .fb = gk110_fb_new,
- .fuse = gf100_fuse_new,
- .gpio = gk104_gpio_new,
- .i2c = gk110_i2c_new,
- .ibus = gk104_ibus_new,
- .iccsense = gf100_iccsense_new,
- .imem = nv50_instmem_new,
- .ltc = gk104_ltc_new,
- .mc = gk20a_mc_new,
- .mmu = gk104_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = gk104_pci_new,
- .pmu = gk208_pmu_new,
- .therm = gk104_therm_new,
- .timer = nv41_timer_new,
- .top = gk104_top_new,
- .volt = gk104_volt_new,
- .ce[0] = gk104_ce_new,
- .ce[1] = gk104_ce_new,
- .ce[2] = gk104_ce_new,
- .disp = gk110_disp_new,
- .dma = gf119_dma_new,
- .fifo = gk208_fifo_new,
- .gr = gk208_gr_new,
- .mspdec = gk104_mspdec_new,
- .msppp = gf100_msppp_new,
- .msvld = gk104_msvld_new,
- .sw = gf100_sw_new,
+ .bar = { 0x00000001, gf100_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .clk = { 0x00000001, gk104_clk_new },
+ .devinit = { 0x00000001, gf100_devinit_new },
+ .fb = { 0x00000001, gk110_fb_new },
+ .fuse = { 0x00000001, gf100_fuse_new },
+ .gpio = { 0x00000001, gk104_gpio_new },
+ .i2c = { 0x00000001, gk110_i2c_new },
+ .iccsense = { 0x00000001, gf100_iccsense_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .ltc = { 0x00000001, gk104_ltc_new },
+ .mc = { 0x00000001, gk20a_mc_new },
+ .mmu = { 0x00000001, gk104_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, gk104_pci_new },
+ .pmu = { 0x00000001, gk208_pmu_new },
+ .privring = { 0x00000001, gk104_privring_new },
+ .therm = { 0x00000001, gk104_therm_new },
+ .timer = { 0x00000001, nv41_timer_new },
+ .top = { 0x00000001, gk104_top_new },
+ .volt = { 0x00000001, gk104_volt_new },
+ .ce = { 0x00000007, gk104_ce_new },
+ .disp = { 0x00000001, gk110_disp_new },
+ .dma = { 0x00000001, gf119_dma_new },
+ .fifo = { 0x00000001, gk208_fifo_new },
+ .gr = { 0x00000001, gk208_gr_new },
+ .mspdec = { 0x00000001, gk104_mspdec_new },
+ .msppp = { 0x00000001, gf100_msppp_new },
+ .msvld = { 0x00000001, gk104_msvld_new },
+ .sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nv117_chipset = {
.name = "GM107",
- .bar = gm107_bar_new,
- .bios = nvkm_bios_new,
- .bus = gf100_bus_new,
- .clk = gk104_clk_new,
- .devinit = gm107_devinit_new,
- .fb = gm107_fb_new,
- .fuse = gm107_fuse_new,
- .gpio = gk104_gpio_new,
- .i2c = gk110_i2c_new,
- .ibus = gk104_ibus_new,
- .iccsense = gf100_iccsense_new,
- .imem = nv50_instmem_new,
- .ltc = gm107_ltc_new,
- .mc = gk20a_mc_new,
- .mmu = gk104_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = gk104_pci_new,
- .pmu = gm107_pmu_new,
- .therm = gm107_therm_new,
- .timer = gk20a_timer_new,
- .top = gk104_top_new,
- .volt = gk104_volt_new,
- .ce[0] = gm107_ce_new,
- .ce[2] = gm107_ce_new,
- .disp = gm107_disp_new,
- .dma = gf119_dma_new,
- .fifo = gm107_fifo_new,
- .gr = gm107_gr_new,
- .nvdec[0] = gm107_nvdec_new,
- .nvenc[0] = gm107_nvenc_new,
- .sw = gf100_sw_new,
+ .bar = { 0x00000001, gm107_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .clk = { 0x00000001, gk104_clk_new },
+ .devinit = { 0x00000001, gm107_devinit_new },
+ .fb = { 0x00000001, gm107_fb_new },
+ .fuse = { 0x00000001, gm107_fuse_new },
+ .gpio = { 0x00000001, gk104_gpio_new },
+ .i2c = { 0x00000001, gk110_i2c_new },
+ .iccsense = { 0x00000001, gf100_iccsense_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .ltc = { 0x00000001, gm107_ltc_new },
+ .mc = { 0x00000001, gk20a_mc_new },
+ .mmu = { 0x00000001, gk104_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, gk104_pci_new },
+ .pmu = { 0x00000001, gm107_pmu_new },
+ .privring = { 0x00000001, gk104_privring_new },
+ .therm = { 0x00000001, gm107_therm_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .top = { 0x00000001, gk104_top_new },
+ .volt = { 0x00000001, gk104_volt_new },
+ .ce = { 0x00000005, gm107_ce_new },
+ .disp = { 0x00000001, gm107_disp_new },
+ .dma = { 0x00000001, gf119_dma_new },
+ .fifo = { 0x00000001, gm107_fifo_new },
+ .gr = { 0x00000001, gm107_gr_new },
+ .nvdec = { 0x00000001, gm107_nvdec_new },
+ .nvenc = { 0x00000001, gm107_nvenc_new },
+ .sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nv118_chipset = {
.name = "GM108",
- .bar = gm107_bar_new,
- .bios = nvkm_bios_new,
- .bus = gf100_bus_new,
- .clk = gk104_clk_new,
- .devinit = gm107_devinit_new,
- .fb = gm107_fb_new,
- .fuse = gm107_fuse_new,
- .gpio = gk104_gpio_new,
- .i2c = gk110_i2c_new,
- .ibus = gk104_ibus_new,
- .iccsense = gf100_iccsense_new,
- .imem = nv50_instmem_new,
- .ltc = gm107_ltc_new,
- .mc = gk20a_mc_new,
- .mmu = gk104_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = gk104_pci_new,
- .pmu = gm107_pmu_new,
- .therm = gm107_therm_new,
- .timer = gk20a_timer_new,
- .top = gk104_top_new,
- .volt = gk104_volt_new,
- .ce[0] = gm107_ce_new,
- .ce[2] = gm107_ce_new,
- .disp = gm107_disp_new,
- .dma = gf119_dma_new,
- .fifo = gm107_fifo_new,
- .gr = gm107_gr_new,
- .sw = gf100_sw_new,
+ .bar = { 0x00000001, gm107_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .clk = { 0x00000001, gk104_clk_new },
+ .devinit = { 0x00000001, gm107_devinit_new },
+ .fb = { 0x00000001, gm107_fb_new },
+ .fuse = { 0x00000001, gm107_fuse_new },
+ .gpio = { 0x00000001, gk104_gpio_new },
+ .i2c = { 0x00000001, gk110_i2c_new },
+ .iccsense = { 0x00000001, gf100_iccsense_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .ltc = { 0x00000001, gm107_ltc_new },
+ .mc = { 0x00000001, gk20a_mc_new },
+ .mmu = { 0x00000001, gk104_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, gk104_pci_new },
+ .pmu = { 0x00000001, gm107_pmu_new },
+ .privring = { 0x00000001, gk104_privring_new },
+ .therm = { 0x00000001, gm107_therm_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .top = { 0x00000001, gk104_top_new },
+ .volt = { 0x00000001, gk104_volt_new },
+ .ce = { 0x00000005, gm107_ce_new },
+ .disp = { 0x00000001, gm107_disp_new },
+ .dma = { 0x00000001, gf119_dma_new },
+ .fifo = { 0x00000001, gm107_fifo_new },
+ .gr = { 0x00000001, gm107_gr_new },
+ .sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nv120_chipset = {
.name = "GM200",
- .acr = gm200_acr_new,
- .bar = gm107_bar_new,
- .bios = nvkm_bios_new,
- .bus = gf100_bus_new,
- .devinit = gm200_devinit_new,
- .fb = gm200_fb_new,
- .fuse = gm107_fuse_new,
- .gpio = gk104_gpio_new,
- .i2c = gm200_i2c_new,
- .ibus = gm200_ibus_new,
- .iccsense = gf100_iccsense_new,
- .imem = nv50_instmem_new,
- .ltc = gm200_ltc_new,
- .mc = gk20a_mc_new,
- .mmu = gm200_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = gk104_pci_new,
- .pmu = gm200_pmu_new,
- .therm = gm200_therm_new,
- .timer = gk20a_timer_new,
- .top = gk104_top_new,
- .volt = gk104_volt_new,
- .ce[0] = gm200_ce_new,
- .ce[1] = gm200_ce_new,
- .ce[2] = gm200_ce_new,
- .disp = gm200_disp_new,
- .dma = gf119_dma_new,
- .fifo = gm200_fifo_new,
- .gr = gm200_gr_new,
- .nvdec[0] = gm107_nvdec_new,
- .nvenc[0] = gm107_nvenc_new,
- .nvenc[1] = gm107_nvenc_new,
- .sw = gf100_sw_new,
+ .acr = { 0x00000001, gm200_acr_new },
+ .bar = { 0x00000001, gm107_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .devinit = { 0x00000001, gm200_devinit_new },
+ .fb = { 0x00000001, gm200_fb_new },
+ .fuse = { 0x00000001, gm107_fuse_new },
+ .gpio = { 0x00000001, gk104_gpio_new },
+ .i2c = { 0x00000001, gm200_i2c_new },
+ .iccsense = { 0x00000001, gf100_iccsense_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .ltc = { 0x00000001, gm200_ltc_new },
+ .mc = { 0x00000001, gk20a_mc_new },
+ .mmu = { 0x00000001, gm200_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, gk104_pci_new },
+ .pmu = { 0x00000001, gm200_pmu_new },
+ .privring = { 0x00000001, gm200_privring_new },
+ .therm = { 0x00000001, gm200_therm_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .top = { 0x00000001, gk104_top_new },
+ .volt = { 0x00000001, gk104_volt_new },
+ .ce = { 0x00000007, gm200_ce_new },
+ .disp = { 0x00000001, gm200_disp_new },
+ .dma = { 0x00000001, gf119_dma_new },
+ .fifo = { 0x00000001, gm200_fifo_new },
+ .gr = { 0x00000001, gm200_gr_new },
+ .nvdec = { 0x00000001, gm107_nvdec_new },
+ .nvenc = { 0x00000003, gm107_nvenc_new },
+ .sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nv124_chipset = {
.name = "GM204",
- .acr = gm200_acr_new,
- .bar = gm107_bar_new,
- .bios = nvkm_bios_new,
- .bus = gf100_bus_new,
- .devinit = gm200_devinit_new,
- .fb = gm200_fb_new,
- .fuse = gm107_fuse_new,
- .gpio = gk104_gpio_new,
- .i2c = gm200_i2c_new,
- .ibus = gm200_ibus_new,
- .iccsense = gf100_iccsense_new,
- .imem = nv50_instmem_new,
- .ltc = gm200_ltc_new,
- .mc = gk20a_mc_new,
- .mmu = gm200_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = gk104_pci_new,
- .pmu = gm200_pmu_new,
- .therm = gm200_therm_new,
- .timer = gk20a_timer_new,
- .top = gk104_top_new,
- .volt = gk104_volt_new,
- .ce[0] = gm200_ce_new,
- .ce[1] = gm200_ce_new,
- .ce[2] = gm200_ce_new,
- .disp = gm200_disp_new,
- .dma = gf119_dma_new,
- .fifo = gm200_fifo_new,
- .gr = gm200_gr_new,
- .nvdec[0] = gm107_nvdec_new,
- .nvenc[0] = gm107_nvenc_new,
- .nvenc[1] = gm107_nvenc_new,
- .sw = gf100_sw_new,
+ .acr = { 0x00000001, gm200_acr_new },
+ .bar = { 0x00000001, gm107_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .devinit = { 0x00000001, gm200_devinit_new },
+ .fb = { 0x00000001, gm200_fb_new },
+ .fuse = { 0x00000001, gm107_fuse_new },
+ .gpio = { 0x00000001, gk104_gpio_new },
+ .i2c = { 0x00000001, gm200_i2c_new },
+ .iccsense = { 0x00000001, gf100_iccsense_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .ltc = { 0x00000001, gm200_ltc_new },
+ .mc = { 0x00000001, gk20a_mc_new },
+ .mmu = { 0x00000001, gm200_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, gk104_pci_new },
+ .pmu = { 0x00000001, gm200_pmu_new },
+ .privring = { 0x00000001, gm200_privring_new },
+ .therm = { 0x00000001, gm200_therm_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .top = { 0x00000001, gk104_top_new },
+ .volt = { 0x00000001, gk104_volt_new },
+ .ce = { 0x00000007, gm200_ce_new },
+ .disp = { 0x00000001, gm200_disp_new },
+ .dma = { 0x00000001, gf119_dma_new },
+ .fifo = { 0x00000001, gm200_fifo_new },
+ .gr = { 0x00000001, gm200_gr_new },
+ .nvdec = { 0x00000001, gm107_nvdec_new },
+ .nvenc = { 0x00000003, gm107_nvenc_new },
+ .sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nv126_chipset = {
.name = "GM206",
- .acr = gm200_acr_new,
- .bar = gm107_bar_new,
- .bios = nvkm_bios_new,
- .bus = gf100_bus_new,
- .devinit = gm200_devinit_new,
- .fb = gm200_fb_new,
- .fuse = gm107_fuse_new,
- .gpio = gk104_gpio_new,
- .i2c = gm200_i2c_new,
- .ibus = gm200_ibus_new,
- .iccsense = gf100_iccsense_new,
- .imem = nv50_instmem_new,
- .ltc = gm200_ltc_new,
- .mc = gk20a_mc_new,
- .mmu = gm200_mmu_new,
- .mxm = nv50_mxm_new,
- .pci = gk104_pci_new,
- .pmu = gm200_pmu_new,
- .therm = gm200_therm_new,
- .timer = gk20a_timer_new,
- .top = gk104_top_new,
- .volt = gk104_volt_new,
- .ce[0] = gm200_ce_new,
- .ce[1] = gm200_ce_new,
- .ce[2] = gm200_ce_new,
- .disp = gm200_disp_new,
- .dma = gf119_dma_new,
- .fifo = gm200_fifo_new,
- .gr = gm200_gr_new,
- .nvdec[0] = gm107_nvdec_new,
- .nvenc[0] = gm107_nvenc_new,
- .sw = gf100_sw_new,
+ .acr = { 0x00000001, gm200_acr_new },
+ .bar = { 0x00000001, gm107_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .devinit = { 0x00000001, gm200_devinit_new },
+ .fb = { 0x00000001, gm200_fb_new },
+ .fuse = { 0x00000001, gm107_fuse_new },
+ .gpio = { 0x00000001, gk104_gpio_new },
+ .i2c = { 0x00000001, gm200_i2c_new },
+ .iccsense = { 0x00000001, gf100_iccsense_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .ltc = { 0x00000001, gm200_ltc_new },
+ .mc = { 0x00000001, gk20a_mc_new },
+ .mmu = { 0x00000001, gm200_mmu_new },
+ .mxm = { 0x00000001, nv50_mxm_new },
+ .pci = { 0x00000001, gk104_pci_new },
+ .pmu = { 0x00000001, gm200_pmu_new },
+ .privring = { 0x00000001, gm200_privring_new },
+ .therm = { 0x00000001, gm200_therm_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .top = { 0x00000001, gk104_top_new },
+ .volt = { 0x00000001, gk104_volt_new },
+ .ce = { 0x00000007, gm200_ce_new },
+ .disp = { 0x00000001, gm200_disp_new },
+ .dma = { 0x00000001, gf119_dma_new },
+ .fifo = { 0x00000001, gm200_fifo_new },
+ .gr = { 0x00000001, gm200_gr_new },
+ .nvdec = { 0x00000001, gm107_nvdec_new },
+ .nvenc = { 0x00000001, gm107_nvenc_new },
+ .sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nv12b_chipset = {
.name = "GM20B",
- .acr = gm20b_acr_new,
- .bar = gm20b_bar_new,
- .bus = gf100_bus_new,
- .clk = gm20b_clk_new,
- .fb = gm20b_fb_new,
- .fuse = gm107_fuse_new,
- .ibus = gk20a_ibus_new,
- .imem = gk20a_instmem_new,
- .ltc = gm200_ltc_new,
- .mc = gk20a_mc_new,
- .mmu = gm20b_mmu_new,
- .pmu = gm20b_pmu_new,
- .timer = gk20a_timer_new,
- .top = gk104_top_new,
- .ce[2] = gm200_ce_new,
- .volt = gm20b_volt_new,
- .dma = gf119_dma_new,
- .fifo = gm20b_fifo_new,
- .gr = gm20b_gr_new,
- .sw = gf100_sw_new,
+ .acr = { 0x00000001, gm20b_acr_new },
+ .bar = { 0x00000001, gm20b_bar_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .clk = { 0x00000001, gm20b_clk_new },
+ .fb = { 0x00000001, gm20b_fb_new },
+ .fuse = { 0x00000001, gm107_fuse_new },
+ .imem = { 0x00000001, gk20a_instmem_new },
+ .ltc = { 0x00000001, gm200_ltc_new },
+ .mc = { 0x00000001, gk20a_mc_new },
+ .mmu = { 0x00000001, gm20b_mmu_new },
+ .pmu = { 0x00000001, gm20b_pmu_new },
+ .privring = { 0x00000001, gk20a_privring_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .top = { 0x00000001, gk104_top_new },
+ .volt = { 0x00000001, gm20b_volt_new },
+ .ce = { 0x00000004, gm200_ce_new },
+ .dma = { 0x00000001, gf119_dma_new },
+ .fifo = { 0x00000001, gm20b_fifo_new },
+ .gr = { 0x00000001, gm20b_gr_new },
+ .sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nv130_chipset = {
.name = "GP100",
- .acr = gm200_acr_new,
- .bar = gm107_bar_new,
- .bios = nvkm_bios_new,
- .bus = gf100_bus_new,
- .devinit = gm200_devinit_new,
- .fault = gp100_fault_new,
- .fb = gp100_fb_new,
- .fuse = gm107_fuse_new,
- .gpio = gk104_gpio_new,
- .i2c = gm200_i2c_new,
- .ibus = gm200_ibus_new,
- .imem = nv50_instmem_new,
- .ltc = gp100_ltc_new,
- .mc = gp100_mc_new,
- .mmu = gp100_mmu_new,
- .therm = gp100_therm_new,
- .pci = gp100_pci_new,
- .pmu = gm200_pmu_new,
- .timer = gk20a_timer_new,
- .top = gk104_top_new,
- .ce[0] = gp100_ce_new,
- .ce[1] = gp100_ce_new,
- .ce[2] = gp100_ce_new,
- .ce[3] = gp100_ce_new,
- .ce[4] = gp100_ce_new,
- .ce[5] = gp100_ce_new,
- .dma = gf119_dma_new,
- .disp = gp100_disp_new,
- .fifo = gp100_fifo_new,
- .gr = gp100_gr_new,
- .nvdec[0] = gm107_nvdec_new,
- .nvenc[0] = gm107_nvenc_new,
- .nvenc[1] = gm107_nvenc_new,
- .nvenc[2] = gm107_nvenc_new,
- .sw = gf100_sw_new,
+ .acr = { 0x00000001, gm200_acr_new },
+ .bar = { 0x00000001, gm107_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .devinit = { 0x00000001, gm200_devinit_new },
+ .fault = { 0x00000001, gp100_fault_new },
+ .fb = { 0x00000001, gp100_fb_new },
+ .fuse = { 0x00000001, gm107_fuse_new },
+ .gpio = { 0x00000001, gk104_gpio_new },
+ .i2c = { 0x00000001, gm200_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .ltc = { 0x00000001, gp100_ltc_new },
+ .mc = { 0x00000001, gp100_mc_new },
+ .mmu = { 0x00000001, gp100_mmu_new },
+ .therm = { 0x00000001, gp100_therm_new },
+ .pci = { 0x00000001, gp100_pci_new },
+ .pmu = { 0x00000001, gm200_pmu_new },
+ .privring = { 0x00000001, gm200_privring_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .top = { 0x00000001, gk104_top_new },
+ .ce = { 0x0000003f, gp100_ce_new },
+ .dma = { 0x00000001, gf119_dma_new },
+ .disp = { 0x00000001, gp100_disp_new },
+ .fifo = { 0x00000001, gp100_fifo_new },
+ .gr = { 0x00000001, gp100_gr_new },
+ .nvdec = { 0x00000001, gm107_nvdec_new },
+ .nvenc = { 0x00000007, gm107_nvenc_new },
+ .sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nv132_chipset = {
.name = "GP102",
- .acr = gp102_acr_new,
- .bar = gm107_bar_new,
- .bios = nvkm_bios_new,
- .bus = gf100_bus_new,
- .devinit = gm200_devinit_new,
- .fault = gp100_fault_new,
- .fb = gp102_fb_new,
- .fuse = gm107_fuse_new,
- .gpio = gk104_gpio_new,
- .i2c = gm200_i2c_new,
- .ibus = gm200_ibus_new,
- .imem = nv50_instmem_new,
- .ltc = gp102_ltc_new,
- .mc = gp100_mc_new,
- .mmu = gp100_mmu_new,
- .therm = gp100_therm_new,
- .pci = gp100_pci_new,
- .pmu = gp102_pmu_new,
- .timer = gk20a_timer_new,
- .top = gk104_top_new,
- .ce[0] = gp102_ce_new,
- .ce[1] = gp102_ce_new,
- .ce[2] = gp102_ce_new,
- .ce[3] = gp102_ce_new,
- .disp = gp102_disp_new,
- .dma = gf119_dma_new,
- .fifo = gp100_fifo_new,
- .gr = gp102_gr_new,
- .nvdec[0] = gm107_nvdec_new,
- .nvenc[0] = gm107_nvenc_new,
- .nvenc[1] = gm107_nvenc_new,
- .sec2 = gp102_sec2_new,
- .sw = gf100_sw_new,
+ .acr = { 0x00000001, gp102_acr_new },
+ .bar = { 0x00000001, gm107_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .devinit = { 0x00000001, gm200_devinit_new },
+ .fault = { 0x00000001, gp100_fault_new },
+ .fb = { 0x00000001, gp102_fb_new },
+ .fuse = { 0x00000001, gm107_fuse_new },
+ .gpio = { 0x00000001, gk104_gpio_new },
+ .i2c = { 0x00000001, gm200_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .ltc = { 0x00000001, gp102_ltc_new },
+ .mc = { 0x00000001, gp100_mc_new },
+ .mmu = { 0x00000001, gp100_mmu_new },
+ .therm = { 0x00000001, gp100_therm_new },
+ .pci = { 0x00000001, gp100_pci_new },
+ .pmu = { 0x00000001, gp102_pmu_new },
+ .privring = { 0x00000001, gm200_privring_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .top = { 0x00000001, gk104_top_new },
+ .ce = { 0x0000000f, gp102_ce_new },
+ .disp = { 0x00000001, gp102_disp_new },
+ .dma = { 0x00000001, gf119_dma_new },
+ .fifo = { 0x00000001, gp100_fifo_new },
+ .gr = { 0x00000001, gp102_gr_new },
+ .nvdec = { 0x00000001, gm107_nvdec_new },
+ .nvenc = { 0x00000003, gm107_nvenc_new },
+ .sec2 = { 0x00000001, gp102_sec2_new },
+ .sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nv134_chipset = {
.name = "GP104",
- .acr = gp102_acr_new,
- .bar = gm107_bar_new,
- .bios = nvkm_bios_new,
- .bus = gf100_bus_new,
- .devinit = gm200_devinit_new,
- .fault = gp100_fault_new,
- .fb = gp102_fb_new,
- .fuse = gm107_fuse_new,
- .gpio = gk104_gpio_new,
- .i2c = gm200_i2c_new,
- .ibus = gm200_ibus_new,
- .imem = nv50_instmem_new,
- .ltc = gp102_ltc_new,
- .mc = gp100_mc_new,
- .mmu = gp100_mmu_new,
- .therm = gp100_therm_new,
- .pci = gp100_pci_new,
- .pmu = gp102_pmu_new,
- .timer = gk20a_timer_new,
- .top = gk104_top_new,
- .ce[0] = gp102_ce_new,
- .ce[1] = gp102_ce_new,
- .ce[2] = gp102_ce_new,
- .ce[3] = gp102_ce_new,
- .disp = gp102_disp_new,
- .dma = gf119_dma_new,
- .fifo = gp100_fifo_new,
- .gr = gp104_gr_new,
- .nvdec[0] = gm107_nvdec_new,
- .nvenc[0] = gm107_nvenc_new,
- .nvenc[1] = gm107_nvenc_new,
- .sec2 = gp102_sec2_new,
- .sw = gf100_sw_new,
+ .acr = { 0x00000001, gp102_acr_new },
+ .bar = { 0x00000001, gm107_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .devinit = { 0x00000001, gm200_devinit_new },
+ .fault = { 0x00000001, gp100_fault_new },
+ .fb = { 0x00000001, gp102_fb_new },
+ .fuse = { 0x00000001, gm107_fuse_new },
+ .gpio = { 0x00000001, gk104_gpio_new },
+ .i2c = { 0x00000001, gm200_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .ltc = { 0x00000001, gp102_ltc_new },
+ .mc = { 0x00000001, gp100_mc_new },
+ .mmu = { 0x00000001, gp100_mmu_new },
+ .therm = { 0x00000001, gp100_therm_new },
+ .pci = { 0x00000001, gp100_pci_new },
+ .pmu = { 0x00000001, gp102_pmu_new },
+ .privring = { 0x00000001, gm200_privring_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .top = { 0x00000001, gk104_top_new },
+ .ce = { 0x0000000f, gp102_ce_new },
+ .disp = { 0x00000001, gp102_disp_new },
+ .dma = { 0x00000001, gf119_dma_new },
+ .fifo = { 0x00000001, gp100_fifo_new },
+ .gr = { 0x00000001, gp104_gr_new },
+ .nvdec = { 0x00000001, gm107_nvdec_new },
+ .nvenc = { 0x00000003, gm107_nvenc_new },
+ .sec2 = { 0x00000001, gp102_sec2_new },
+ .sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nv136_chipset = {
.name = "GP106",
- .acr = gp102_acr_new,
- .bar = gm107_bar_new,
- .bios = nvkm_bios_new,
- .bus = gf100_bus_new,
- .devinit = gm200_devinit_new,
- .fault = gp100_fault_new,
- .fb = gp102_fb_new,
- .fuse = gm107_fuse_new,
- .gpio = gk104_gpio_new,
- .i2c = gm200_i2c_new,
- .ibus = gm200_ibus_new,
- .imem = nv50_instmem_new,
- .ltc = gp102_ltc_new,
- .mc = gp100_mc_new,
- .mmu = gp100_mmu_new,
- .therm = gp100_therm_new,
- .pci = gp100_pci_new,
- .pmu = gp102_pmu_new,
- .timer = gk20a_timer_new,
- .top = gk104_top_new,
- .ce[0] = gp102_ce_new,
- .ce[1] = gp102_ce_new,
- .ce[2] = gp102_ce_new,
- .ce[3] = gp102_ce_new,
- .disp = gp102_disp_new,
- .dma = gf119_dma_new,
- .fifo = gp100_fifo_new,
- .gr = gp104_gr_new,
- .nvdec[0] = gm107_nvdec_new,
- .nvenc[0] = gm107_nvenc_new,
- .sec2 = gp102_sec2_new,
- .sw = gf100_sw_new,
+ .acr = { 0x00000001, gp102_acr_new },
+ .bar = { 0x00000001, gm107_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .devinit = { 0x00000001, gm200_devinit_new },
+ .fault = { 0x00000001, gp100_fault_new },
+ .fb = { 0x00000001, gp102_fb_new },
+ .fuse = { 0x00000001, gm107_fuse_new },
+ .gpio = { 0x00000001, gk104_gpio_new },
+ .i2c = { 0x00000001, gm200_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .ltc = { 0x00000001, gp102_ltc_new },
+ .mc = { 0x00000001, gp100_mc_new },
+ .mmu = { 0x00000001, gp100_mmu_new },
+ .therm = { 0x00000001, gp100_therm_new },
+ .pci = { 0x00000001, gp100_pci_new },
+ .pmu = { 0x00000001, gp102_pmu_new },
+ .privring = { 0x00000001, gm200_privring_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .top = { 0x00000001, gk104_top_new },
+ .ce = { 0x0000000f, gp102_ce_new },
+ .disp = { 0x00000001, gp102_disp_new },
+ .dma = { 0x00000001, gf119_dma_new },
+ .fifo = { 0x00000001, gp100_fifo_new },
+ .gr = { 0x00000001, gp104_gr_new },
+ .nvdec = { 0x00000001, gm107_nvdec_new },
+ .nvenc = { 0x00000001, gm107_nvenc_new },
+ .sec2 = { 0x00000001, gp102_sec2_new },
+ .sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nv137_chipset = {
.name = "GP107",
- .acr = gp102_acr_new,
- .bar = gm107_bar_new,
- .bios = nvkm_bios_new,
- .bus = gf100_bus_new,
- .devinit = gm200_devinit_new,
- .fault = gp100_fault_new,
- .fb = gp102_fb_new,
- .fuse = gm107_fuse_new,
- .gpio = gk104_gpio_new,
- .i2c = gm200_i2c_new,
- .ibus = gm200_ibus_new,
- .imem = nv50_instmem_new,
- .ltc = gp102_ltc_new,
- .mc = gp100_mc_new,
- .mmu = gp100_mmu_new,
- .therm = gp100_therm_new,
- .pci = gp100_pci_new,
- .pmu = gp102_pmu_new,
- .timer = gk20a_timer_new,
- .top = gk104_top_new,
- .ce[0] = gp102_ce_new,
- .ce[1] = gp102_ce_new,
- .ce[2] = gp102_ce_new,
- .ce[3] = gp102_ce_new,
- .disp = gp102_disp_new,
- .dma = gf119_dma_new,
- .fifo = gp100_fifo_new,
- .gr = gp107_gr_new,
- .nvdec[0] = gm107_nvdec_new,
- .nvenc[0] = gm107_nvenc_new,
- .nvenc[1] = gm107_nvenc_new,
- .sec2 = gp102_sec2_new,
- .sw = gf100_sw_new,
+ .acr = { 0x00000001, gp102_acr_new },
+ .bar = { 0x00000001, gm107_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .devinit = { 0x00000001, gm200_devinit_new },
+ .fault = { 0x00000001, gp100_fault_new },
+ .fb = { 0x00000001, gp102_fb_new },
+ .fuse = { 0x00000001, gm107_fuse_new },
+ .gpio = { 0x00000001, gk104_gpio_new },
+ .i2c = { 0x00000001, gm200_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .ltc = { 0x00000001, gp102_ltc_new },
+ .mc = { 0x00000001, gp100_mc_new },
+ .mmu = { 0x00000001, gp100_mmu_new },
+ .therm = { 0x00000001, gp100_therm_new },
+ .pci = { 0x00000001, gp100_pci_new },
+ .pmu = { 0x00000001, gp102_pmu_new },
+ .privring = { 0x00000001, gm200_privring_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .top = { 0x00000001, gk104_top_new },
+ .ce = { 0x0000000f, gp102_ce_new },
+ .disp = { 0x00000001, gp102_disp_new },
+ .dma = { 0x00000001, gf119_dma_new },
+ .fifo = { 0x00000001, gp100_fifo_new },
+ .gr = { 0x00000001, gp107_gr_new },
+ .nvdec = { 0x00000001, gm107_nvdec_new },
+ .nvenc = { 0x00000003, gm107_nvenc_new },
+ .sec2 = { 0x00000001, gp102_sec2_new },
+ .sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nv138_chipset = {
.name = "GP108",
- .acr = gp108_acr_new,
- .bar = gm107_bar_new,
- .bios = nvkm_bios_new,
- .bus = gf100_bus_new,
- .devinit = gm200_devinit_new,
- .fault = gp100_fault_new,
- .fb = gp102_fb_new,
- .fuse = gm107_fuse_new,
- .gpio = gk104_gpio_new,
- .i2c = gm200_i2c_new,
- .ibus = gm200_ibus_new,
- .imem = nv50_instmem_new,
- .ltc = gp102_ltc_new,
- .mc = gp100_mc_new,
- .mmu = gp100_mmu_new,
- .therm = gp100_therm_new,
- .pci = gp100_pci_new,
- .pmu = gp102_pmu_new,
- .timer = gk20a_timer_new,
- .top = gk104_top_new,
- .ce[0] = gp102_ce_new,
- .ce[1] = gp102_ce_new,
- .ce[2] = gp102_ce_new,
- .ce[3] = gp102_ce_new,
- .disp = gp102_disp_new,
- .dma = gf119_dma_new,
- .fifo = gp100_fifo_new,
- .gr = gp108_gr_new,
- .nvdec[0] = gm107_nvdec_new,
- .sec2 = gp108_sec2_new,
- .sw = gf100_sw_new,
+ .acr = { 0x00000001, gp108_acr_new },
+ .bar = { 0x00000001, gm107_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .devinit = { 0x00000001, gm200_devinit_new },
+ .fault = { 0x00000001, gp100_fault_new },
+ .fb = { 0x00000001, gp102_fb_new },
+ .fuse = { 0x00000001, gm107_fuse_new },
+ .gpio = { 0x00000001, gk104_gpio_new },
+ .i2c = { 0x00000001, gm200_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .ltc = { 0x00000001, gp102_ltc_new },
+ .mc = { 0x00000001, gp100_mc_new },
+ .mmu = { 0x00000001, gp100_mmu_new },
+ .therm = { 0x00000001, gp100_therm_new },
+ .pci = { 0x00000001, gp100_pci_new },
+ .pmu = { 0x00000001, gp102_pmu_new },
+ .privring = { 0x00000001, gm200_privring_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .top = { 0x00000001, gk104_top_new },
+ .ce = { 0x0000000f, gp102_ce_new },
+ .disp = { 0x00000001, gp102_disp_new },
+ .dma = { 0x00000001, gf119_dma_new },
+ .fifo = { 0x00000001, gp100_fifo_new },
+ .gr = { 0x00000001, gp108_gr_new },
+ .nvdec = { 0x00000001, gm107_nvdec_new },
+ .sec2 = { 0x00000001, gp108_sec2_new },
+ .sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nv13b_chipset = {
.name = "GP10B",
- .acr = gp10b_acr_new,
- .bar = gm20b_bar_new,
- .bus = gf100_bus_new,
- .fault = gp10b_fault_new,
- .fb = gp10b_fb_new,
- .fuse = gm107_fuse_new,
- .ibus = gp10b_ibus_new,
- .imem = gk20a_instmem_new,
- .ltc = gp10b_ltc_new,
- .mc = gp10b_mc_new,
- .mmu = gp10b_mmu_new,
- .pmu = gp10b_pmu_new,
- .timer = gk20a_timer_new,
- .top = gk104_top_new,
- .ce[0] = gp100_ce_new,
- .dma = gf119_dma_new,
- .fifo = gp10b_fifo_new,
- .gr = gp10b_gr_new,
- .sw = gf100_sw_new,
+ .acr = { 0x00000001, gp10b_acr_new },
+ .bar = { 0x00000001, gm20b_bar_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .fault = { 0x00000001, gp10b_fault_new },
+ .fb = { 0x00000001, gp10b_fb_new },
+ .fuse = { 0x00000001, gm107_fuse_new },
+ .imem = { 0x00000001, gk20a_instmem_new },
+ .ltc = { 0x00000001, gp10b_ltc_new },
+ .mc = { 0x00000001, gp10b_mc_new },
+ .mmu = { 0x00000001, gp10b_mmu_new },
+ .pmu = { 0x00000001, gp10b_pmu_new },
+ .privring = { 0x00000001, gp10b_privring_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .top = { 0x00000001, gk104_top_new },
+ .ce = { 0x00000001, gp100_ce_new },
+ .dma = { 0x00000001, gf119_dma_new },
+ .fifo = { 0x00000001, gp10b_fifo_new },
+ .gr = { 0x00000001, gp10b_gr_new },
+ .sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nv140_chipset = {
.name = "GV100",
- .acr = gp108_acr_new,
- .bar = gm107_bar_new,
- .bios = nvkm_bios_new,
- .bus = gf100_bus_new,
- .devinit = gv100_devinit_new,
- .fault = gv100_fault_new,
- .fb = gv100_fb_new,
- .fuse = gm107_fuse_new,
- .gpio = gk104_gpio_new,
- .gsp = gv100_gsp_new,
- .i2c = gm200_i2c_new,
- .ibus = gm200_ibus_new,
- .imem = nv50_instmem_new,
- .ltc = gp102_ltc_new,
- .mc = gp100_mc_new,
- .mmu = gv100_mmu_new,
- .pci = gp100_pci_new,
- .pmu = gp102_pmu_new,
- .therm = gp100_therm_new,
- .timer = gk20a_timer_new,
- .top = gk104_top_new,
- .disp = gv100_disp_new,
- .ce[0] = gv100_ce_new,
- .ce[1] = gv100_ce_new,
- .ce[2] = gv100_ce_new,
- .ce[3] = gv100_ce_new,
- .ce[4] = gv100_ce_new,
- .ce[5] = gv100_ce_new,
- .ce[6] = gv100_ce_new,
- .ce[7] = gv100_ce_new,
- .ce[8] = gv100_ce_new,
- .dma = gv100_dma_new,
- .fifo = gv100_fifo_new,
- .gr = gv100_gr_new,
- .nvdec[0] = gm107_nvdec_new,
- .nvenc[0] = gm107_nvenc_new,
- .nvenc[1] = gm107_nvenc_new,
- .nvenc[2] = gm107_nvenc_new,
- .sec2 = gp108_sec2_new,
+ .acr = { 0x00000001, gp108_acr_new },
+ .bar = { 0x00000001, gm107_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .devinit = { 0x00000001, gv100_devinit_new },
+ .fault = { 0x00000001, gv100_fault_new },
+ .fb = { 0x00000001, gv100_fb_new },
+ .fuse = { 0x00000001, gm107_fuse_new },
+ .gpio = { 0x00000001, gk104_gpio_new },
+ .gsp = { 0x00000001, gv100_gsp_new },
+ .i2c = { 0x00000001, gm200_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .ltc = { 0x00000001, gp102_ltc_new },
+ .mc = { 0x00000001, gp100_mc_new },
+ .mmu = { 0x00000001, gv100_mmu_new },
+ .pci = { 0x00000001, gp100_pci_new },
+ .pmu = { 0x00000001, gp102_pmu_new },
+ .privring = { 0x00000001, gm200_privring_new },
+ .therm = { 0x00000001, gp100_therm_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .top = { 0x00000001, gk104_top_new },
+ .ce = { 0x000001ff, gv100_ce_new },
+ .disp = { 0x00000001, gv100_disp_new },
+ .dma = { 0x00000001, gv100_dma_new },
+ .fifo = { 0x00000001, gv100_fifo_new },
+ .gr = { 0x00000001, gv100_gr_new },
+ .nvdec = { 0x00000001, gm107_nvdec_new },
+ .nvenc = { 0x00000007, gm107_nvenc_new },
+ .sec2 = { 0x00000001, gp108_sec2_new },
};
static const struct nvkm_device_chip
nv162_chipset = {
.name = "TU102",
- .acr = tu102_acr_new,
- .bar = tu102_bar_new,
- .bios = nvkm_bios_new,
- .bus = gf100_bus_new,
- .devinit = tu102_devinit_new,
- .fault = tu102_fault_new,
- .fb = gv100_fb_new,
- .fuse = gm107_fuse_new,
- .gpio = gk104_gpio_new,
- .gsp = gv100_gsp_new,
- .i2c = gm200_i2c_new,
- .ibus = gm200_ibus_new,
- .imem = nv50_instmem_new,
- .ltc = gp102_ltc_new,
- .mc = tu102_mc_new,
- .mmu = tu102_mmu_new,
- .pci = gp100_pci_new,
- .pmu = gp102_pmu_new,
- .therm = gp100_therm_new,
- .timer = gk20a_timer_new,
- .top = gk104_top_new,
- .ce[0] = tu102_ce_new,
- .ce[1] = tu102_ce_new,
- .ce[2] = tu102_ce_new,
- .ce[3] = tu102_ce_new,
- .ce[4] = tu102_ce_new,
- .disp = tu102_disp_new,
- .dma = gv100_dma_new,
- .fifo = tu102_fifo_new,
- .gr = tu102_gr_new,
- .nvdec[0] = gm107_nvdec_new,
- .nvenc[0] = gm107_nvenc_new,
- .sec2 = tu102_sec2_new,
+ .acr = { 0x00000001, tu102_acr_new },
+ .bar = { 0x00000001, tu102_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .devinit = { 0x00000001, tu102_devinit_new },
+ .fault = { 0x00000001, tu102_fault_new },
+ .fb = { 0x00000001, gv100_fb_new },
+ .fuse = { 0x00000001, gm107_fuse_new },
+ .gpio = { 0x00000001, gk104_gpio_new },
+ .gsp = { 0x00000001, gv100_gsp_new },
+ .i2c = { 0x00000001, gm200_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .ltc = { 0x00000001, gp102_ltc_new },
+ .mc = { 0x00000001, tu102_mc_new },
+ .mmu = { 0x00000001, tu102_mmu_new },
+ .pci = { 0x00000001, gp100_pci_new },
+ .pmu = { 0x00000001, gp102_pmu_new },
+ .privring = { 0x00000001, gm200_privring_new },
+ .therm = { 0x00000001, gp100_therm_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .top = { 0x00000001, gk104_top_new },
+ .ce = { 0x0000001f, tu102_ce_new },
+ .disp = { 0x00000001, tu102_disp_new },
+ .dma = { 0x00000001, gv100_dma_new },
+ .fifo = { 0x00000001, tu102_fifo_new },
+ .gr = { 0x00000001, tu102_gr_new },
+ .nvdec = { 0x00000001, gm107_nvdec_new },
+ .nvenc = { 0x00000001, gm107_nvenc_new },
+ .sec2 = { 0x00000001, tu102_sec2_new },
};
static const struct nvkm_device_chip
nv164_chipset = {
.name = "TU104",
- .acr = tu102_acr_new,
- .bar = tu102_bar_new,
- .bios = nvkm_bios_new,
- .bus = gf100_bus_new,
- .devinit = tu102_devinit_new,
- .fault = tu102_fault_new,
- .fb = gv100_fb_new,
- .fuse = gm107_fuse_new,
- .gpio = gk104_gpio_new,
- .gsp = gv100_gsp_new,
- .i2c = gm200_i2c_new,
- .ibus = gm200_ibus_new,
- .imem = nv50_instmem_new,
- .ltc = gp102_ltc_new,
- .mc = tu102_mc_new,
- .mmu = tu102_mmu_new,
- .pci = gp100_pci_new,
- .pmu = gp102_pmu_new,
- .therm = gp100_therm_new,
- .timer = gk20a_timer_new,
- .top = gk104_top_new,
- .ce[0] = tu102_ce_new,
- .ce[1] = tu102_ce_new,
- .ce[2] = tu102_ce_new,
- .ce[3] = tu102_ce_new,
- .ce[4] = tu102_ce_new,
- .disp = tu102_disp_new,
- .dma = gv100_dma_new,
- .fifo = tu102_fifo_new,
- .gr = tu102_gr_new,
- .nvdec[0] = gm107_nvdec_new,
- .nvdec[1] = gm107_nvdec_new,
- .nvenc[0] = gm107_nvenc_new,
- .sec2 = tu102_sec2_new,
+ .acr = { 0x00000001, tu102_acr_new },
+ .bar = { 0x00000001, tu102_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .devinit = { 0x00000001, tu102_devinit_new },
+ .fault = { 0x00000001, tu102_fault_new },
+ .fb = { 0x00000001, gv100_fb_new },
+ .fuse = { 0x00000001, gm107_fuse_new },
+ .gpio = { 0x00000001, gk104_gpio_new },
+ .gsp = { 0x00000001, gv100_gsp_new },
+ .i2c = { 0x00000001, gm200_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .ltc = { 0x00000001, gp102_ltc_new },
+ .mc = { 0x00000001, tu102_mc_new },
+ .mmu = { 0x00000001, tu102_mmu_new },
+ .pci = { 0x00000001, gp100_pci_new },
+ .pmu = { 0x00000001, gp102_pmu_new },
+ .privring = { 0x00000001, gm200_privring_new },
+ .therm = { 0x00000001, gp100_therm_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .top = { 0x00000001, gk104_top_new },
+ .ce = { 0x0000001f, tu102_ce_new },
+ .disp = { 0x00000001, tu102_disp_new },
+ .dma = { 0x00000001, gv100_dma_new },
+ .fifo = { 0x00000001, tu102_fifo_new },
+ .gr = { 0x00000001, tu102_gr_new },
+ .nvdec = { 0x00000003, gm107_nvdec_new },
+ .nvenc = { 0x00000001, gm107_nvenc_new },
+ .sec2 = { 0x00000001, tu102_sec2_new },
};
static const struct nvkm_device_chip
nv166_chipset = {
.name = "TU106",
- .acr = tu102_acr_new,
- .bar = tu102_bar_new,
- .bios = nvkm_bios_new,
- .bus = gf100_bus_new,
- .devinit = tu102_devinit_new,
- .fault = tu102_fault_new,
- .fb = gv100_fb_new,
- .fuse = gm107_fuse_new,
- .gpio = gk104_gpio_new,
- .gsp = gv100_gsp_new,
- .i2c = gm200_i2c_new,
- .ibus = gm200_ibus_new,
- .imem = nv50_instmem_new,
- .ltc = gp102_ltc_new,
- .mc = tu102_mc_new,
- .mmu = tu102_mmu_new,
- .pci = gp100_pci_new,
- .pmu = gp102_pmu_new,
- .therm = gp100_therm_new,
- .timer = gk20a_timer_new,
- .top = gk104_top_new,
- .ce[0] = tu102_ce_new,
- .ce[1] = tu102_ce_new,
- .ce[2] = tu102_ce_new,
- .ce[3] = tu102_ce_new,
- .ce[4] = tu102_ce_new,
- .disp = tu102_disp_new,
- .dma = gv100_dma_new,
- .fifo = tu102_fifo_new,
- .gr = tu102_gr_new,
- .nvdec[0] = gm107_nvdec_new,
- .nvdec[1] = gm107_nvdec_new,
- .nvdec[2] = gm107_nvdec_new,
- .nvenc[0] = gm107_nvenc_new,
- .sec2 = tu102_sec2_new,
+ .acr = { 0x00000001, tu102_acr_new },
+ .bar = { 0x00000001, tu102_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .devinit = { 0x00000001, tu102_devinit_new },
+ .fault = { 0x00000001, tu102_fault_new },
+ .fb = { 0x00000001, gv100_fb_new },
+ .fuse = { 0x00000001, gm107_fuse_new },
+ .gpio = { 0x00000001, gk104_gpio_new },
+ .gsp = { 0x00000001, gv100_gsp_new },
+ .i2c = { 0x00000001, gm200_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .ltc = { 0x00000001, gp102_ltc_new },
+ .mc = { 0x00000001, tu102_mc_new },
+ .mmu = { 0x00000001, tu102_mmu_new },
+ .pci = { 0x00000001, gp100_pci_new },
+ .pmu = { 0x00000001, gp102_pmu_new },
+ .privring = { 0x00000001, gm200_privring_new },
+ .therm = { 0x00000001, gp100_therm_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .top = { 0x00000001, gk104_top_new },
+ .ce = { 0x0000001f, tu102_ce_new },
+ .disp = { 0x00000001, tu102_disp_new },
+ .dma = { 0x00000001, gv100_dma_new },
+ .fifo = { 0x00000001, tu102_fifo_new },
+ .gr = { 0x00000001, tu102_gr_new },
+ .nvdec = { 0x00000007, gm107_nvdec_new },
+ .nvenc = { 0x00000001, gm107_nvenc_new },
+ .sec2 = { 0x00000001, tu102_sec2_new },
};
static const struct nvkm_device_chip
nv167_chipset = {
.name = "TU117",
- .acr = tu102_acr_new,
- .bar = tu102_bar_new,
- .bios = nvkm_bios_new,
- .bus = gf100_bus_new,
- .devinit = tu102_devinit_new,
- .fault = tu102_fault_new,
- .fb = gv100_fb_new,
- .fuse = gm107_fuse_new,
- .gpio = gk104_gpio_new,
- .gsp = gv100_gsp_new,
- .i2c = gm200_i2c_new,
- .ibus = gm200_ibus_new,
- .imem = nv50_instmem_new,
- .ltc = gp102_ltc_new,
- .mc = tu102_mc_new,
- .mmu = tu102_mmu_new,
- .pci = gp100_pci_new,
- .pmu = gp102_pmu_new,
- .therm = gp100_therm_new,
- .timer = gk20a_timer_new,
- .top = gk104_top_new,
- .ce[0] = tu102_ce_new,
- .ce[1] = tu102_ce_new,
- .ce[2] = tu102_ce_new,
- .ce[3] = tu102_ce_new,
- .ce[4] = tu102_ce_new,
- .disp = tu102_disp_new,
- .dma = gv100_dma_new,
- .fifo = tu102_fifo_new,
- .gr = tu102_gr_new,
- .nvdec[0] = gm107_nvdec_new,
- .nvenc[0] = gm107_nvenc_new,
- .sec2 = tu102_sec2_new,
+ .acr = { 0x00000001, tu102_acr_new },
+ .bar = { 0x00000001, tu102_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .devinit = { 0x00000001, tu102_devinit_new },
+ .fault = { 0x00000001, tu102_fault_new },
+ .fb = { 0x00000001, gv100_fb_new },
+ .fuse = { 0x00000001, gm107_fuse_new },
+ .gpio = { 0x00000001, gk104_gpio_new },
+ .gsp = { 0x00000001, gv100_gsp_new },
+ .i2c = { 0x00000001, gm200_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .ltc = { 0x00000001, gp102_ltc_new },
+ .mc = { 0x00000001, tu102_mc_new },
+ .mmu = { 0x00000001, tu102_mmu_new },
+ .pci = { 0x00000001, gp100_pci_new },
+ .pmu = { 0x00000001, gp102_pmu_new },
+ .privring = { 0x00000001, gm200_privring_new },
+ .therm = { 0x00000001, gp100_therm_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .top = { 0x00000001, gk104_top_new },
+ .ce = { 0x0000001f, tu102_ce_new },
+ .disp = { 0x00000001, tu102_disp_new },
+ .dma = { 0x00000001, gv100_dma_new },
+ .fifo = { 0x00000001, tu102_fifo_new },
+ .gr = { 0x00000001, tu102_gr_new },
+ .nvdec = { 0x00000001, gm107_nvdec_new },
+ .nvenc = { 0x00000001, gm107_nvenc_new },
+ .sec2 = { 0x00000001, tu102_sec2_new },
};
static const struct nvkm_device_chip
nv168_chipset = {
.name = "TU116",
- .acr = tu102_acr_new,
- .bar = tu102_bar_new,
- .bios = nvkm_bios_new,
- .bus = gf100_bus_new,
- .devinit = tu102_devinit_new,
- .fault = tu102_fault_new,
- .fb = gv100_fb_new,
- .fuse = gm107_fuse_new,
- .gpio = gk104_gpio_new,
- .gsp = gv100_gsp_new,
- .i2c = gm200_i2c_new,
- .ibus = gm200_ibus_new,
- .imem = nv50_instmem_new,
- .ltc = gp102_ltc_new,
- .mc = tu102_mc_new,
- .mmu = tu102_mmu_new,
- .pci = gp100_pci_new,
- .pmu = gp102_pmu_new,
- .therm = gp100_therm_new,
- .timer = gk20a_timer_new,
- .top = gk104_top_new,
- .ce[0] = tu102_ce_new,
- .ce[1] = tu102_ce_new,
- .ce[2] = tu102_ce_new,
- .ce[3] = tu102_ce_new,
- .ce[4] = tu102_ce_new,
- .disp = tu102_disp_new,
- .dma = gv100_dma_new,
- .fifo = tu102_fifo_new,
- .gr = tu102_gr_new,
- .nvdec[0] = gm107_nvdec_new,
- .nvenc[0] = gm107_nvenc_new,
- .sec2 = tu102_sec2_new,
+ .acr = { 0x00000001, tu102_acr_new },
+ .bar = { 0x00000001, tu102_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .bus = { 0x00000001, gf100_bus_new },
+ .devinit = { 0x00000001, tu102_devinit_new },
+ .fault = { 0x00000001, tu102_fault_new },
+ .fb = { 0x00000001, gv100_fb_new },
+ .fuse = { 0x00000001, gm107_fuse_new },
+ .gpio = { 0x00000001, gk104_gpio_new },
+ .gsp = { 0x00000001, gv100_gsp_new },
+ .i2c = { 0x00000001, gm200_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .ltc = { 0x00000001, gp102_ltc_new },
+ .mc = { 0x00000001, tu102_mc_new },
+ .mmu = { 0x00000001, tu102_mmu_new },
+ .pci = { 0x00000001, gp100_pci_new },
+ .pmu = { 0x00000001, gp102_pmu_new },
+ .privring = { 0x00000001, gm200_privring_new },
+ .therm = { 0x00000001, gp100_therm_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .top = { 0x00000001, gk104_top_new },
+ .ce = { 0x0000001f, tu102_ce_new },
+ .disp = { 0x00000001, tu102_disp_new },
+ .dma = { 0x00000001, gv100_dma_new },
+ .fifo = { 0x00000001, tu102_fifo_new },
+ .gr = { 0x00000001, tu102_gr_new },
+ .nvdec = { 0x00000001, gm107_nvdec_new },
+ .nvenc = { 0x00000001, gm107_nvenc_new },
+ .sec2 = { 0x00000001, tu102_sec2_new },
};
static const struct nvkm_device_chip
nv170_chipset = {
.name = "GA100",
- .bar = tu102_bar_new,
- .bios = nvkm_bios_new,
- .devinit = ga100_devinit_new,
- .fb = ga100_fb_new,
- .gpio = gk104_gpio_new,
- .i2c = gm200_i2c_new,
- .ibus = gm200_ibus_new,
- .imem = nv50_instmem_new,
- .mc = ga100_mc_new,
- .mmu = tu102_mmu_new,
- .pci = gp100_pci_new,
- .timer = gk20a_timer_new,
+ .bar = { 0x00000001, tu102_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .devinit = { 0x00000001, ga100_devinit_new },
+ .fb = { 0x00000001, ga100_fb_new },
+ .gpio = { 0x00000001, gk104_gpio_new },
+ .i2c = { 0x00000001, gm200_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .mc = { 0x00000001, ga100_mc_new },
+ .mmu = { 0x00000001, tu102_mmu_new },
+ .pci = { 0x00000001, gp100_pci_new },
+ .privring = { 0x00000001, gm200_privring_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .top = { 0x00000001, ga100_top_new },
};
static const struct nvkm_device_chip
nv172_chipset = {
.name = "GA102",
- .bar = tu102_bar_new,
- .bios = nvkm_bios_new,
- .devinit = ga100_devinit_new,
- .fb = ga102_fb_new,
- .gpio = ga102_gpio_new,
- .i2c = gm200_i2c_new,
- .ibus = gm200_ibus_new,
- .imem = nv50_instmem_new,
- .mc = ga100_mc_new,
- .mmu = tu102_mmu_new,
- .pci = gp100_pci_new,
- .timer = gk20a_timer_new,
- .disp = ga102_disp_new,
- .dma = gv100_dma_new,
+ .bar = { 0x00000001, tu102_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .devinit = { 0x00000001, ga100_devinit_new },
+ .fb = { 0x00000001, ga102_fb_new },
+ .gpio = { 0x00000001, ga102_gpio_new },
+ .i2c = { 0x00000001, gm200_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .mc = { 0x00000001, ga100_mc_new },
+ .mmu = { 0x00000001, tu102_mmu_new },
+ .pci = { 0x00000001, gp100_pci_new },
+ .privring = { 0x00000001, gm200_privring_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .top = { 0x00000001, ga100_top_new },
+ .disp = { 0x00000001, ga102_disp_new },
+ .dma = { 0x00000001, gv100_dma_new },
};
static const struct nvkm_device_chip
nv174_chipset = {
.name = "GA104",
- .bar = tu102_bar_new,
- .bios = nvkm_bios_new,
- .devinit = ga100_devinit_new,
- .fb = ga102_fb_new,
- .gpio = ga102_gpio_new,
- .i2c = gm200_i2c_new,
- .ibus = gm200_ibus_new,
- .imem = nv50_instmem_new,
- .mc = ga100_mc_new,
- .mmu = tu102_mmu_new,
- .pci = gp100_pci_new,
- .timer = gk20a_timer_new,
- .disp = ga102_disp_new,
- .dma = gv100_dma_new,
+ .bar = { 0x00000001, tu102_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .devinit = { 0x00000001, ga100_devinit_new },
+ .fb = { 0x00000001, ga102_fb_new },
+ .gpio = { 0x00000001, ga102_gpio_new },
+ .i2c = { 0x00000001, gm200_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .mc = { 0x00000001, ga100_mc_new },
+ .mmu = { 0x00000001, tu102_mmu_new },
+ .pci = { 0x00000001, gp100_pci_new },
+ .privring = { 0x00000001, gm200_privring_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .top = { 0x00000001, ga100_top_new },
+ .disp = { 0x00000001, ga102_disp_new },
+ .dma = { 0x00000001, gv100_dma_new },
};
static int
@@ -2726,97 +2643,24 @@ nvkm_device_event_func = {
};
struct nvkm_subdev *
-nvkm_device_subdev(struct nvkm_device *device, int index)
+nvkm_device_subdev(struct nvkm_device *device, int type, int inst)
{
- struct nvkm_engine *engine;
-
- if (device->disable_mask & (1ULL << index))
- return NULL;
-
- switch (index) {
-#define _(n,p,m) case NVKM_SUBDEV_##n: if (p) return (m); break
- _(ACR , device->acr , &device->acr->subdev);
- _(BAR , device->bar , &device->bar->subdev);
- _(VBIOS , device->bios , &device->bios->subdev);
- _(BUS , device->bus , &device->bus->subdev);
- _(CLK , device->clk , &device->clk->subdev);
- _(DEVINIT , device->devinit , &device->devinit->subdev);
- _(FAULT , device->fault , &device->fault->subdev);
- _(FB , device->fb , &device->fb->subdev);
- _(FUSE , device->fuse , &device->fuse->subdev);
- _(GPIO , device->gpio , &device->gpio->subdev);
- _(GSP , device->gsp , &device->gsp->subdev);
- _(I2C , device->i2c , &device->i2c->subdev);
- _(IBUS , device->ibus , device->ibus);
- _(ICCSENSE, device->iccsense, &device->iccsense->subdev);
- _(INSTMEM , device->imem , &device->imem->subdev);
- _(LTC , device->ltc , &device->ltc->subdev);
- _(MC , device->mc , &device->mc->subdev);
- _(MMU , device->mmu , &device->mmu->subdev);
- _(MXM , device->mxm , device->mxm);
- _(PCI , device->pci , &device->pci->subdev);
- _(PMU , device->pmu , &device->pmu->subdev);
- _(THERM , device->therm , &device->therm->subdev);
- _(TIMER , device->timer , &device->timer->subdev);
- _(TOP , device->top , &device->top->subdev);
- _(VOLT , device->volt , &device->volt->subdev);
-#undef _
- default:
- engine = nvkm_device_engine(device, index);
- if (engine)
- return &engine->subdev;
- break;
+ struct nvkm_subdev *subdev;
+
+ list_for_each_entry(subdev, &device->subdev, head) {
+ if (subdev->type == type && subdev->inst == inst)
+ return subdev;
}
+
return NULL;
}
struct nvkm_engine *
-nvkm_device_engine(struct nvkm_device *device, int index)
+nvkm_device_engine(struct nvkm_device *device, int type, int inst)
{
- if (device->disable_mask & (1ULL << index))
- return NULL;
-
- switch (index) {
-#define _(n,p,m) case NVKM_ENGINE_##n: if (p) return (m); break
- _(BSP , device->bsp , device->bsp);
- _(CE0 , device->ce[0] , device->ce[0]);
- _(CE1 , device->ce[1] , device->ce[1]);
- _(CE2 , device->ce[2] , device->ce[2]);
- _(CE3 , device->ce[3] , device->ce[3]);
- _(CE4 , device->ce[4] , device->ce[4]);
- _(CE5 , device->ce[5] , device->ce[5]);
- _(CE6 , device->ce[6] , device->ce[6]);
- _(CE7 , device->ce[7] , device->ce[7]);
- _(CE8 , device->ce[8] , device->ce[8]);
- _(CIPHER , device->cipher , device->cipher);
- _(DISP , device->disp , &device->disp->engine);
- _(DMAOBJ , device->dma , &device->dma->engine);
- _(FIFO , device->fifo , &device->fifo->engine);
- _(GR , device->gr , &device->gr->engine);
- _(IFB , device->ifb , device->ifb);
- _(ME , device->me , device->me);
- _(MPEG , device->mpeg , device->mpeg);
- _(MSENC , device->msenc , device->msenc);
- _(MSPDEC , device->mspdec , device->mspdec);
- _(MSPPP , device->msppp , device->msppp);
- _(MSVLD , device->msvld , device->msvld);
- _(NVENC0 , device->nvenc[0], &device->nvenc[0]->engine);
- _(NVENC1 , device->nvenc[1], &device->nvenc[1]->engine);
- _(NVENC2 , device->nvenc[2], &device->nvenc[2]->engine);
- _(NVDEC0 , device->nvdec[0], &device->nvdec[0]->engine);
- _(NVDEC1 , device->nvdec[1], &device->nvdec[1]->engine);
- _(NVDEC2 , device->nvdec[2], &device->nvdec[2]->engine);
- _(PM , device->pm , &device->pm->engine);
- _(SEC , device->sec , device->sec);
- _(SEC2 , device->sec2 , &device->sec2->engine);
- _(SW , device->sw , &device->sw->engine);
- _(VIC , device->vic , device->vic);
- _(VP , device->vp , device->vp);
-#undef _
- default:
- WARN_ON(1);
- break;
- }
+ struct nvkm_subdev *subdev = nvkm_device_subdev(device, type, inst);
+ if (subdev && subdev->func == &nvkm_engine)
+ return container_of(subdev, struct nvkm_engine, subdev);
return NULL;
}
@@ -2825,7 +2669,7 @@ nvkm_device_fini(struct nvkm_device *device, bool suspend)
{
const char *action = suspend ? "suspend" : "fini";
struct nvkm_subdev *subdev;
- int ret, i;
+ int ret;
s64 time;
nvdev_trace(device, "%s running...\n", action);
@@ -2833,12 +2677,10 @@ nvkm_device_fini(struct nvkm_device *device, bool suspend)
nvkm_acpi_fini(device);
- for (i = NVKM_SUBDEV_NR - 1; i >= 0; i--) {
- if ((subdev = nvkm_device_subdev(device, i))) {
- ret = nvkm_subdev_fini(subdev, suspend);
- if (ret && suspend)
- goto fail;
- }
+ list_for_each_entry_reverse(subdev, &device->subdev, head) {
+ ret = nvkm_subdev_fini(subdev, suspend);
+ if (ret && suspend)
+ goto fail;
}
nvkm_therm_clkgate_fini(device->therm, suspend);
@@ -2851,13 +2693,11 @@ nvkm_device_fini(struct nvkm_device *device, bool suspend)
return 0;
fail:
- do {
- if ((subdev = nvkm_device_subdev(device, i))) {
- int rret = nvkm_subdev_init(subdev);
- if (rret)
- nvkm_fatal(subdev, "failed restart, %d\n", ret);
- }
- } while (++i < NVKM_SUBDEV_NR);
+ list_for_each_entry_from(subdev, &device->subdev, head) {
+ int rret = nvkm_subdev_init(subdev);
+ if (rret)
+ nvkm_fatal(subdev, "failed restart, %d\n", ret);
+ }
nvdev_trace(device, "%s failed with %d\n", action, ret);
return ret;
@@ -2867,7 +2707,7 @@ static int
nvkm_device_preinit(struct nvkm_device *device)
{
struct nvkm_subdev *subdev;
- int ret, i;
+ int ret;
s64 time;
nvdev_trace(device, "preinit running...\n");
@@ -2879,15 +2719,13 @@ nvkm_device_preinit(struct nvkm_device *device)
goto fail;
}
- for (i = 0; i < NVKM_SUBDEV_NR; i++) {
- if ((subdev = nvkm_device_subdev(device, i))) {
- ret = nvkm_subdev_preinit(subdev);
- if (ret)
- goto fail;
- }
+ list_for_each_entry(subdev, &device->subdev, head) {
+ ret = nvkm_subdev_preinit(subdev);
+ if (ret)
+ goto fail;
}
- ret = nvkm_devinit_post(device->devinit, &device->disable_mask);
+ ret = nvkm_devinit_post(device->devinit);
if (ret)
goto fail;
@@ -2904,7 +2742,7 @@ int
nvkm_device_init(struct nvkm_device *device)
{
struct nvkm_subdev *subdev;
- int ret, i;
+ int ret;
s64 time;
ret = nvkm_device_preinit(device);
@@ -2922,12 +2760,10 @@ nvkm_device_init(struct nvkm_device *device)
goto fail;
}
- for (i = 0; i < NVKM_SUBDEV_NR; i++) {
- if ((subdev = nvkm_device_subdev(device, i))) {
- ret = nvkm_subdev_init(subdev);
- if (ret)
- goto fail_subdev;
- }
+ list_for_each_entry(subdev, &device->subdev, head) {
+ ret = nvkm_subdev_init(subdev);
+ if (ret)
+ goto fail_subdev;
}
nvkm_acpi_init(device);
@@ -2938,11 +2774,8 @@ nvkm_device_init(struct nvkm_device *device)
return 0;
fail_subdev:
- do {
- if ((subdev = nvkm_device_subdev(device, i)))
- nvkm_subdev_fini(subdev, false);
- } while (--i >= 0);
-
+ list_for_each_entry_from(subdev, &device->subdev, head)
+ nvkm_subdev_fini(subdev, false);
fail:
nvkm_device_fini(device, false);
@@ -2954,15 +2787,12 @@ void
nvkm_device_del(struct nvkm_device **pdevice)
{
struct nvkm_device *device = *pdevice;
- int i;
+ struct nvkm_subdev *subdev, *subtmp;
if (device) {
mutex_lock(&nv_devices_mutex);
- device->disable_mask = 0;
- for (i = NVKM_SUBDEV_NR - 1; i >= 0; i--) {
- struct nvkm_subdev *subdev =
- nvkm_device_subdev(device, i);
+
+ list_for_each_entry_safe_reverse(subdev, subtmp, &device->subdev, head)
nvkm_subdev_del(&subdev);
- }
nvkm_event_fini(&device->event);
@@ -3021,7 +2851,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
struct nvkm_subdev *subdev;
u64 mmio_base, mmio_size;
u32 boot0, boot1, strap;
- int ret = -EEXIST, i;
+ int ret = -EEXIST, j;
unsigned chipset;
mutex_lock(&nv_devices_mutex);
@@ -3038,6 +2868,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
device->name = name;
list_add_tail(&device->head, &nv_devices);
device->debug = nvkm_dbgopt(device->dbgopt, "device");
+ INIT_LIST_HEAD(&device->subdev);
ret = nvkm_event_init(&nvkm_device_event_func, 1, 1, &device->event);
if (ret)
@@ -3271,88 +3102,46 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
mutex_init(&device->mutex);
- for (i = 0; i < NVKM_SUBDEV_NR; i++) {
-#define _(s,m) case s: \
- if (device->chip->m && (subdev_mask & (1ULL << (s)))) { \
- ret = device->chip->m(device, (s), &device->m); \
- if (ret) { \
- subdev = nvkm_device_subdev(device, (s)); \
- nvkm_subdev_del(&subdev); \
- device->m = NULL; \
- if (ret != -ENODEV) { \
- nvdev_error(device, "%s ctor failed, %d\n", \
- nvkm_subdev_name[s], ret); \
- goto done; \
- } \
- } \
- } \
- break
- switch (i) {
- _(NVKM_SUBDEV_ACR , acr);
- _(NVKM_SUBDEV_BAR , bar);
- _(NVKM_SUBDEV_VBIOS , bios);
- _(NVKM_SUBDEV_BUS , bus);
- _(NVKM_SUBDEV_CLK , clk);
- _(NVKM_SUBDEV_DEVINIT , devinit);
- _(NVKM_SUBDEV_FAULT , fault);
- _(NVKM_SUBDEV_FB , fb);
- _(NVKM_SUBDEV_FUSE , fuse);
- _(NVKM_SUBDEV_GPIO , gpio);
- _(NVKM_SUBDEV_GSP , gsp);
- _(NVKM_SUBDEV_I2C , i2c);
- _(NVKM_SUBDEV_IBUS , ibus);
- _(NVKM_SUBDEV_ICCSENSE, iccsense);
- _(NVKM_SUBDEV_INSTMEM , imem);
- _(NVKM_SUBDEV_LTC , ltc);
- _(NVKM_SUBDEV_MC , mc);
- _(NVKM_SUBDEV_MMU , mmu);
- _(NVKM_SUBDEV_MXM , mxm);
- _(NVKM_SUBDEV_PCI , pci);
- _(NVKM_SUBDEV_PMU , pmu);
- _(NVKM_SUBDEV_THERM , therm);
- _(NVKM_SUBDEV_TIMER , timer);
- _(NVKM_SUBDEV_TOP , top);
- _(NVKM_SUBDEV_VOLT , volt);
- _(NVKM_ENGINE_BSP , bsp);
- _(NVKM_ENGINE_CE0 , ce[0]);
- _(NVKM_ENGINE_CE1 , ce[1]);
- _(NVKM_ENGINE_CE2 , ce[2]);
- _(NVKM_ENGINE_CE3 , ce[3]);
- _(NVKM_ENGINE_CE4 , ce[4]);
- _(NVKM_ENGINE_CE5 , ce[5]);
- _(NVKM_ENGINE_CE6 , ce[6]);
- _(NVKM_ENGINE_CE7 , ce[7]);
- _(NVKM_ENGINE_CE8 , ce[8]);
- _(NVKM_ENGINE_CIPHER , cipher);
- _(NVKM_ENGINE_DISP , disp);
- _(NVKM_ENGINE_DMAOBJ , dma);
- _(NVKM_ENGINE_FIFO , fifo);
- _(NVKM_ENGINE_GR , gr);
- _(NVKM_ENGINE_IFB , ifb);
- _(NVKM_ENGINE_ME , me);
- _(NVKM_ENGINE_MPEG , mpeg);
- _(NVKM_ENGINE_MSENC , msenc);
- _(NVKM_ENGINE_MSPDEC , mspdec);
- _(NVKM_ENGINE_MSPPP , msppp);
- _(NVKM_ENGINE_MSVLD , msvld);
- _(NVKM_ENGINE_NVENC0 , nvenc[0]);
- _(NVKM_ENGINE_NVENC1 , nvenc[1]);
- _(NVKM_ENGINE_NVENC2 , nvenc[2]);
- _(NVKM_ENGINE_NVDEC0 , nvdec[0]);
- _(NVKM_ENGINE_NVDEC1 , nvdec[1]);
- _(NVKM_ENGINE_NVDEC2 , nvdec[2]);
- _(NVKM_ENGINE_PM , pm);
- _(NVKM_ENGINE_SEC , sec);
- _(NVKM_ENGINE_SEC2 , sec2);
- _(NVKM_ENGINE_SW , sw);
- _(NVKM_ENGINE_VIC , vic);
- _(NVKM_ENGINE_VP , vp);
- default:
- WARN_ON(1);
- continue;
- }
-#undef _
+#define NVKM_LAYOUT_ONCE(type,data,ptr) \
+ if (device->chip->ptr.inst && (subdev_mask & (BIT_ULL(type)))) { \
+ WARN_ON(device->chip->ptr.inst != 0x00000001); \
+ ret = device->chip->ptr.ctor(device, (type), -1, &device->ptr); \
+ subdev = nvkm_device_subdev(device, (type), 0); \
+ if (ret) { \
+ nvkm_subdev_del(&subdev); \
+ device->ptr = NULL; \
+ if (ret != -ENODEV) { \
+ nvdev_error(device, "%s ctor failed: %d\n", \
+ nvkm_subdev_type[(type)], ret); \
+ goto done; \
+ } \
+ } else { \
+ subdev->pself = (void **)&device->ptr; \
+ } \
+ }
+#define NVKM_LAYOUT_INST(type,data,ptr,cnt) \
+ WARN_ON(device->chip->ptr.inst & ~((1 << ARRAY_SIZE(device->ptr)) - 1)); \
+ for (j = 0; device->chip->ptr.inst && j < ARRAY_SIZE(device->ptr); j++) { \
+ if ((device->chip->ptr.inst & BIT(j)) && (subdev_mask & BIT_ULL(type))) { \
+ int inst = (device->chip->ptr.inst == 1) ? -1 : (j); \
+ ret = device->chip->ptr.ctor(device, (type), inst, &device->ptr[j]); \
+ subdev = nvkm_device_subdev(device, (type), (j)); \
+ if (ret) { \
+ nvkm_subdev_del(&subdev); \
+ device->ptr[j] = NULL; \
+ if (ret != -ENODEV) { \
+ nvdev_error(device, "%s%d ctor failed: %d\n", \
+ nvkm_subdev_type[(type)], (j), ret); \
+ goto done; \
+ } \
+ } else { \
+ subdev->pself = (void **)&device->ptr[j]; \
+ } \
+ } \
}
+#include <core/layout.h>
+#undef NVKM_LAYOUT_INST
+#undef NVKM_LAYOUT_ONCE
ret = 0;
done:
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
index 54eab5e04230..93949b3c7214 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
@@ -15,7 +15,6 @@
#include <subdev/gpio.h>
#include <subdev/gsp.h>
#include <subdev/i2c.h>
-#include <subdev/ibus.h>
#include <subdev/iccsense.h>
#include <subdev/instmem.h>
#include <subdev/ltc.h>
@@ -24,6 +23,7 @@
#include <subdev/mxm.h>
#include <subdev/pci.h>
#include <subdev/pmu.h>
+#include <subdev/privring.h>
#include <subdev/therm.h>
#include <subdev/timer.h>
#include <subdev/top.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
index 147894798786..fea9d8f2b10c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
@@ -43,15 +43,15 @@ static int
nvkm_udevice_info_subdev(struct nvkm_device *device, u64 mthd, u64 *data)
{
struct nvkm_subdev *subdev;
- enum nvkm_devidx subidx;
+ enum nvkm_subdev_type type;
switch (mthd & NV_DEVICE_INFO_UNIT) {
- case NV_DEVICE_FIFO(0): subidx = NVKM_ENGINE_FIFO; break;
+ case NV_DEVICE_HOST(0): type = NVKM_ENGINE_FIFO; break;
default:
return -EINVAL;
}
- subdev = nvkm_device_subdev(device, subidx);
+ subdev = nvkm_device_subdev(device, type, 0);
if (subdev)
return nvkm_subdev_info(subdev, mthd, data);
return -ENODEV;
@@ -66,37 +66,7 @@ nvkm_udevice_info_v1(struct nvkm_device *device,
args->mthd = NV_DEVICE_INFO_INVALID;
return;
}
-
- switch (args->mthd) {
-#define ENGINE__(A,B,C) NV_DEVICE_INFO_ENGINE_##A: { int _i; \
- for (_i = (B), args->data = 0ULL; _i <= (C); _i++) { \
- if (nvkm_device_engine(device, _i)) \
- args->data |= BIT_ULL(_i); \
- } \
-}
-#define ENGINE_A(A) ENGINE__(A, NVKM_ENGINE_##A , NVKM_ENGINE_##A)
-#define ENGINE_B(A) ENGINE__(A, NVKM_ENGINE_##A##0, NVKM_ENGINE_##A##_LAST)
- case ENGINE_A(SW ); break;
- case ENGINE_A(GR ); break;
- case ENGINE_A(MPEG ); break;
- case ENGINE_A(ME ); break;
- case ENGINE_A(CIPHER); break;
- case ENGINE_A(BSP ); break;
- case ENGINE_A(VP ); break;
- case ENGINE_B(CE ); break;
- case ENGINE_A(SEC ); break;
- case ENGINE_A(MSVLD ); break;
- case ENGINE_A(MSPDEC); break;
- case ENGINE_A(MSPPP ); break;
- case ENGINE_A(MSENC ); break;
- case ENGINE_A(VIC ); break;
- case ENGINE_A(SEC2 ); break;
- case ENGINE_B(NVDEC ); break;
- case ENGINE_B(NVENC ); break;
- default:
- args->mthd = NV_DEVICE_INFO_INVALID;
- break;
- }
+ args->mthd = NV_DEVICE_INFO_INVALID;
}
static int
@@ -357,7 +327,7 @@ nvkm_udevice_child_get(struct nvkm_object *object, int index,
int i;
for (; i = __ffs64(mask), mask && !sclass; mask &= ~(1ULL << i)) {
- if (!(engine = nvkm_device_engine(device, i)) ||
+ if (!(engine = nvkm_device_engine(device, i, 0)) ||
!(engine->func->base.sclass))
continue;
oclass->engine = engine;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
index cbd33e87b799..5daa77755276 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
@@ -149,10 +149,10 @@ static void
nvkm_disp_class_del(struct nvkm_oproxy *oproxy)
{
struct nvkm_disp *disp = nvkm_disp(oproxy->base.engine);
- mutex_lock(&disp->engine.subdev.mutex);
- if (disp->client == oproxy)
- disp->client = NULL;
- mutex_unlock(&disp->engine.subdev.mutex);
+ spin_lock(&disp->client.lock);
+ if (disp->client.object == oproxy)
+ disp->client.object = NULL;
+ spin_unlock(&disp->client.lock);
}
static const struct nvkm_oproxy_func
@@ -175,13 +175,13 @@ nvkm_disp_class_new(struct nvkm_device *device,
return ret;
*pobject = &oproxy->base;
- mutex_lock(&disp->engine.subdev.mutex);
- if (disp->client) {
- mutex_unlock(&disp->engine.subdev.mutex);
+ spin_lock(&disp->client.lock);
+ if (disp->client.object) {
+ spin_unlock(&disp->client.lock);
return -EBUSY;
}
- disp->client = oproxy;
- mutex_unlock(&disp->engine.subdev.mutex);
+ disp->client.object = oproxy;
+ spin_unlock(&disp->client.lock);
return sclass->ctor(disp, oclass, data, size, &oproxy->object);
}
@@ -473,21 +473,22 @@ nvkm_disp = {
int
nvkm_disp_ctor(const struct nvkm_disp_func *func, struct nvkm_device *device,
- int index, struct nvkm_disp *disp)
+ enum nvkm_subdev_type type, int inst, struct nvkm_disp *disp)
{
disp->func = func;
INIT_LIST_HEAD(&disp->head);
INIT_LIST_HEAD(&disp->ior);
INIT_LIST_HEAD(&disp->outp);
INIT_LIST_HEAD(&disp->conn);
- return nvkm_engine_ctor(&nvkm_disp, device, index, true, &disp->engine);
+ spin_lock_init(&disp->client.lock);
+ return nvkm_engine_ctor(&nvkm_disp, device, type, inst, true, &disp->engine);
}
int
nvkm_disp_new_(const struct nvkm_disp_func *func, struct nvkm_device *device,
- int index, struct nvkm_disp **pdisp)
+ enum nvkm_subdev_type type, int inst, struct nvkm_disp **pdisp)
{
if (!(*pdisp = kzalloc(sizeof(**pdisp), GFP_KERNEL)))
return -ENOMEM;
- return nvkm_disp_ctor(func, device, index, *pdisp);
+ return nvkm_disp_ctor(func, device, type, inst, *pdisp);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
index 50e3539f33d2..a7a7eb041515 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
@@ -278,7 +278,7 @@ nv50_disp_chan_child_get(struct nvkm_object *object, int index,
const struct nvkm_device_oclass *oclass = NULL;
if (chan->func->bind)
- sclass->engine = nvkm_device_engine(device, NVKM_ENGINE_DMAOBJ);
+ sclass->engine = nvkm_device_engine(device, NVKM_ENGINE_DMAOBJ, 0);
else
sclass->engine = NULL;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c
index 731f188fc1ee..156bbe8b2de3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c
@@ -41,7 +41,8 @@ g84_disp = {
};
int
-g84_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+g84_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_disp **pdisp)
{
- return nv50_disp_new_(&g84_disp, device, index, pdisp);
+ return nv50_disp_new_(&g84_disp, device, type, inst, pdisp);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c
index def54fe1951e..3425b5d3bc72 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c
@@ -41,7 +41,8 @@ g94_disp = {
};
int
-g94_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+g94_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_disp **pdisp)
{
- return nv50_disp_new_(&g94_disp, device, index, pdisp);
+ return nv50_disp_new_(&g94_disp, device, type, inst, pdisp);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ga102.c
index aa2e5645fe36..68aa52588d92 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ga102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ga102.c
@@ -40,7 +40,8 @@ ga102_disp = {
};
int
-ga102_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+ga102_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_disp **pdisp)
{
- return nv50_disp_new_(&ga102_disp, device, index, pdisp);
+ return nv50_disp_new_(&ga102_disp, device, type, inst, pdisp);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
index e675d9b9d5d7..a6bafe7fea1f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
@@ -266,7 +266,8 @@ gf119_disp = {
};
int
-gf119_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+gf119_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_disp **pdisp)
{
- return nv50_disp_new_(&gf119_disp, device, index, pdisp);
+ return nv50_disp_new_(&gf119_disp, device, type, inst, pdisp);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c
index 4c3439b1a62d..3b79cf233ac5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c
@@ -41,7 +41,8 @@ gk104_disp = {
};
int
-gk104_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+gk104_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_disp **pdisp)
{
- return nv50_disp_new_(&gk104_disp, device, index, pdisp);
+ return nv50_disp_new_(&gk104_disp, device, type, inst, pdisp);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c
index bc6f4750c942..988eb12237a6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c
@@ -41,7 +41,8 @@ gk110_disp = {
};
int
-gk110_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+gk110_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_disp **pdisp)
{
- return nv50_disp_new_(&gk110_disp, device, index, pdisp);
+ return nv50_disp_new_(&gk110_disp, device, type, inst, pdisp);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
index 031cf6b03a76..5d8108feeacd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
@@ -41,7 +41,8 @@ gm107_disp = {
};
int
-gm107_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+gm107_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_disp **pdisp)
{
- return nv50_disp_new_(&gm107_disp, device, index, pdisp);
+ return nv50_disp_new_(&gm107_disp, device, type, inst, pdisp);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c
index ec9c33a5162d..f7bb66087476 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c
@@ -41,7 +41,8 @@ gm200_disp = {
};
int
-gm200_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+gm200_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_disp **pdisp)
{
- return nv50_disp_new_(&gm200_disp, device, index, pdisp);
+ return nv50_disp_new_(&gm200_disp, device, type, inst, pdisp);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c
index 8471de3f3b61..af0ca812a394 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c
@@ -40,7 +40,8 @@ gp100_disp = {
};
int
-gp100_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+gp100_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_disp **pdisp)
{
- return nv50_disp_new_(&gp100_disp, device, index, pdisp);
+ return nv50_disp_new_(&gp100_disp, device, type, inst, pdisp);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c
index a3779c5046ea..065fea1bdfd1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c
@@ -67,7 +67,8 @@ gp102_disp = {
};
int
-gp102_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+gp102_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_disp **pdisp)
{
- return nv50_disp_new_(&gp102_disp, device, index, pdisp);
+ return nv50_disp_new_(&gp102_disp, device, type, inst, pdisp);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c
index f80183701f44..22bc269df64a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c
@@ -41,7 +41,8 @@ gt200_disp = {
};
int
-gt200_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+gt200_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_disp **pdisp)
{
- return nv50_disp_new_(&gt200_disp, device, index, pdisp);
+ return nv50_disp_new_(&gt200_disp, device, type, inst, pdisp);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c
index 7581efc1357e..63a912b174d7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c
@@ -41,7 +41,8 @@ gt215_disp = {
};
int
-gt215_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+gt215_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_disp **pdisp)
{
- return nv50_disp_new_(&gt215_disp, device, index, pdisp);
+ return nv50_disp_new_(&gt215_disp, device, type, inst, pdisp);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
index c1032527f791..53879d5271cf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
@@ -441,7 +441,8 @@ gv100_disp = {
};
int
-gv100_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+gv100_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_disp **pdisp)
{
- return nv50_disp_new_(&gv100_disp, device, index, pdisp);
+ return nv50_disp_new_(&gv100_disp, device, type, inst, pdisp);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c
index cfdce23ab83a..762a59f24bbb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c
@@ -39,7 +39,8 @@ mcp77_disp = {
};
int
-mcp77_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+mcp77_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_disp **pdisp)
{
- return nv50_disp_new_(&mcp77_disp, device, index, pdisp);
+ return nv50_disp_new_(&mcp77_disp, device, type, inst, pdisp);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp89.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp89.c
index 85d9329cfa0e..e5c58aae15de 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp89.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp89.c
@@ -39,7 +39,8 @@ mcp89_disp = {
};
int
-mcp89_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+mcp89_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_disp **pdisp)
{
- return nv50_disp_new_(&mcp89_disp, device, index, pdisp);
+ return nv50_disp_new_(&mcp89_disp, device, type, inst, pdisp);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv04.c
index b780ba1a3bc7..a12097db2c2a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv04.c
@@ -64,11 +64,12 @@ nv04_disp = {
};
int
-nv04_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+nv04_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_disp **pdisp)
{
int ret, i;
- ret = nvkm_disp_new_(&nv04_disp, device, index, pdisp);
+ ret = nvkm_disp_new_(&nv04_disp, device, type, inst, pdisp);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
index e21556bf2cb1..3f20e49070ce 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
@@ -154,7 +154,7 @@ nv50_disp_ = {
int
nv50_disp_new_(const struct nv50_disp_func *func, struct nvkm_device *device,
- int index, struct nvkm_disp **pdisp)
+ enum nvkm_subdev_type type, int inst, struct nvkm_disp **pdisp)
{
struct nv50_disp *disp;
int ret;
@@ -164,7 +164,7 @@ nv50_disp_new_(const struct nv50_disp_func *func, struct nvkm_device *device,
disp->func = func;
*pdisp = &disp->base;
- ret = nvkm_disp_ctor(&nv50_disp_, device, index, &disp->base);
+ ret = nvkm_disp_ctor(&nv50_disp_, device, type, inst, &disp->base);
if (ret)
return ret;
@@ -769,7 +769,8 @@ nv50_disp = {
};
int
-nv50_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+nv50_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_disp **pdisp)
{
- return nv50_disp_new_(&nv50_disp, device, index, pdisp);
+ return nv50_disp_new_(&nv50_disp, device, type, inst, pdisp);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
index db31b37752a2..025cacd7c3b0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
@@ -47,8 +47,8 @@ void nv50_disp_super_2_1(struct nv50_disp *, struct nvkm_head *);
void nv50_disp_super_2_2(struct nv50_disp *, struct nvkm_head *);
void nv50_disp_super_3_0(struct nv50_disp *, struct nvkm_head *);
-int nv50_disp_new_(const struct nv50_disp_func *, struct nvkm_device *,
- int index, struct nvkm_disp **);
+int nv50_disp_new_(const struct nv50_disp_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ struct nvkm_disp **);
struct nv50_disp_func {
int (*init)(struct nv50_disp *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h
index f815a5342880..ec57d8b6bce9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h
@@ -4,10 +4,10 @@
#include <engine/disp.h>
#include "outp.h"
-int nvkm_disp_ctor(const struct nvkm_disp_func *, struct nvkm_device *,
- int index, struct nvkm_disp *);
-int nvkm_disp_new_(const struct nvkm_disp_func *, struct nvkm_device *,
- int index, struct nvkm_disp **);
+int nvkm_disp_ctor(const struct nvkm_disp_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ struct nvkm_disp *);
+int nvkm_disp_new_(const struct nvkm_disp_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ struct nvkm_disp **);
void nvkm_disp_vblank(struct nvkm_disp *, int head);
struct nvkm_disp_func {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/tu102.c
index 4c85d1d4fbd4..f5f8dc8e8f35 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/tu102.c
@@ -146,7 +146,8 @@ tu102_disp = {
};
int
-tu102_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+tu102_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_disp **pdisp)
{
- return nv50_disp_new_(&tu102_disp, device, index, pdisp);
+ return nv50_disp_new_(&tu102_disp, device, type, inst, pdisp);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c
index 11b7b8fd5dda..425cde35f128 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c
@@ -104,7 +104,7 @@ nvkm_dma = {
int
nvkm_dma_new_(const struct nvkm_dma_func *func, struct nvkm_device *device,
- int index, struct nvkm_dma **pdma)
+ enum nvkm_subdev_type type, int inst, struct nvkm_dma **pdma)
{
struct nvkm_dma *dma;
@@ -112,5 +112,5 @@ nvkm_dma_new_(const struct nvkm_dma_func *func, struct nvkm_device *device,
return -ENOMEM;
dma->func = func;
- return nvkm_engine_ctor(&nvkm_dma, device, index, true, &dma->engine);
+ return nvkm_engine_ctor(&nvkm_dma, device, type, inst, true, &dma->engine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/gf100.c
index efec5d322179..99a1e07fa204 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dma/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/gf100.c
@@ -30,7 +30,8 @@ gf100_dma = {
};
int
-gf100_dma_new(struct nvkm_device *device, int index, struct nvkm_dma **pdma)
+gf100_dma_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_dma **pdma)
{
- return nvkm_dma_new_(&gf100_dma, device, index, pdma);
+ return nvkm_dma_new_(&gf100_dma, device, type, inst, pdma);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/gf119.c
index 34c766039aed..fd1d1fc22dc6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dma/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/gf119.c
@@ -30,7 +30,8 @@ gf119_dma = {
};
int
-gf119_dma_new(struct nvkm_device *device, int index, struct nvkm_dma **pdma)
+gf119_dma_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_dma **pdma)
{
- return nvkm_dma_new_(&gf119_dma, device, index, pdma);
+ return nvkm_dma_new_(&gf119_dma, device, type, inst, pdma);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/gv100.c
index c65a4c2ea93d..a5af0df30663 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dma/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/gv100.c
@@ -28,7 +28,8 @@ gv100_dma = {
};
int
-gv100_dma_new(struct nvkm_device *device, int index, struct nvkm_dma **pdma)
+gv100_dma_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_dma **pdma)
{
- return nvkm_dma_new_(&gv100_dma, device, index, pdma);
+ return nvkm_dma_new_(&gv100_dma, device, type, inst, pdma);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/nv04.c
index 30747a0ce488..ea5a889f60c2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dma/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/nv04.c
@@ -30,7 +30,8 @@ nv04_dma = {
};
int
-nv04_dma_new(struct nvkm_device *device, int index, struct nvkm_dma **pdma)
+nv04_dma_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_dma **pdma)
{
- return nvkm_dma_new_(&nv04_dma, device, index, pdma);
+ return nvkm_dma_new_(&nv04_dma, device, type, inst, pdma);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/nv50.c
index 77aca7b71c83..6e8f79660014 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dma/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/nv50.c
@@ -30,7 +30,8 @@ nv50_dma = {
};
int
-nv50_dma_new(struct nvkm_device *device, int index, struct nvkm_dma **pdma)
+nv50_dma_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_dma **pdma)
{
- return nvkm_dma_new_(&nv50_dma, device, index, pdma);
+ return nvkm_dma_new_(&nv50_dma, device, type, inst, pdma);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/dma/priv.h
index 0c9d9640a59d..d403bedb485a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dma/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/priv.h
@@ -9,8 +9,8 @@ struct nvkm_dmaobj_func {
struct nvkm_gpuobj **);
};
-int nvkm_dma_new_(const struct nvkm_dma_func *, struct nvkm_device *,
- int index, struct nvkm_dma **);
+int nvkm_dma_new_(const struct nvkm_dma_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ struct nvkm_dma **);
struct nvkm_dma_func {
int (*class_new)(struct nvkm_dma *, const struct nvkm_oclass *,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c b/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
index 8675613e142b..43b7dec45179 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
@@ -108,7 +108,7 @@ nvkm_falcon_fini(struct nvkm_engine *engine, bool suspend)
}
}
- if (nvkm_mc_enabled(device, engine->subdev.index)) {
+ if (nvkm_mc_enabled(device, engine->subdev.type, engine->subdev.inst)) {
nvkm_mask(device, base + 0x048, 0x00000003, 0x00000000);
nvkm_wr32(device, base + 0x014, 0xffffffff);
}
@@ -335,9 +335,9 @@ nvkm_falcon = {
};
int
-nvkm_falcon_new_(const struct nvkm_falcon_func *func,
- struct nvkm_device *device, int index, bool enable,
- u32 addr, struct nvkm_engine **pengine)
+nvkm_falcon_new_(const struct nvkm_falcon_func *func, struct nvkm_device *device,
+ enum nvkm_subdev_type type, int inst, bool enable, u32 addr,
+ struct nvkm_engine **pengine)
{
struct nvkm_falcon *falcon;
@@ -351,6 +351,5 @@ nvkm_falcon_new_(const struct nvkm_falcon_func *func,
falcon->data.size = func->data.size;
*pengine = &falcon->engine;
- return nvkm_engine_ctor(&nvkm_falcon, device, index,
- enable, &falcon->engine);
+ return nvkm_engine_ctor(&nvkm_falcon, device, type, inst, enable, &falcon->engine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
index c773caf21f6b..2ed4ff05d207 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
@@ -292,7 +292,7 @@ nvkm_fifo_info(struct nvkm_engine *engine, u64 mthd, u64 *data)
{
struct nvkm_fifo *fifo = nvkm_fifo(engine);
switch (mthd) {
- case NV_DEVICE_FIFO_CHANNELS: *data = fifo->nr; return 0;
+ case NV_DEVICE_HOST_CHANNELS: *data = fifo->nr; return 0;
default:
if (fifo->func->info)
return fifo->func->info(fifo, mthd, data);
@@ -313,7 +313,7 @@ nvkm_fifo_oneinit(struct nvkm_engine *engine)
static void
nvkm_fifo_preinit(struct nvkm_engine *engine)
{
- nvkm_mc_reset(engine->subdev.device, NVKM_ENGINE_FIFO);
+ nvkm_mc_reset(engine->subdev.device, NVKM_ENGINE_FIFO, 0);
}
static int
@@ -334,6 +334,7 @@ nvkm_fifo_dtor(struct nvkm_engine *engine)
nvkm_event_fini(&fifo->kevent);
nvkm_event_fini(&fifo->cevent);
nvkm_event_fini(&fifo->uevent);
+ mutex_destroy(&fifo->mutex);
return data;
}
@@ -351,13 +352,14 @@ nvkm_fifo = {
int
nvkm_fifo_ctor(const struct nvkm_fifo_func *func, struct nvkm_device *device,
- int index, int nr, struct nvkm_fifo *fifo)
+ enum nvkm_subdev_type type, int inst, int nr, struct nvkm_fifo *fifo)
{
int ret;
fifo->func = func;
INIT_LIST_HEAD(&fifo->chan);
spin_lock_init(&fifo->lock);
+ mutex_init(&fifo->mutex);
if (WARN_ON(fifo->nr > NVKM_FIFO_CHID_NR))
fifo->nr = NVKM_FIFO_CHID_NR;
@@ -365,7 +367,7 @@ nvkm_fifo_ctor(const struct nvkm_fifo_func *func, struct nvkm_device *device,
fifo->nr = nr;
bitmap_clear(fifo->mask, 0, fifo->nr);
- ret = nvkm_engine_ctor(&nvkm_fifo, device, index, true, &fifo->engine);
+ ret = nvkm_engine_ctor(&nvkm_fifo, device, type, inst, true, &fifo->engine);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
index d83485385934..8d957643940a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
@@ -35,6 +35,15 @@ struct nvkm_fifo_chan_object {
int hash;
};
+static struct nvkm_fifo_engn *
+nvkm_fifo_chan_engn(struct nvkm_fifo_chan *chan, struct nvkm_engine *engine)
+{
+ int engi = chan->fifo->func->engine_id(chan->fifo, engine);
+ if (engi >= 0)
+ return &chan->engn[engi];
+ return NULL;
+}
+
static int
nvkm_fifo_chan_child_fini(struct nvkm_oproxy *base, bool suspend)
{
@@ -42,8 +51,8 @@ nvkm_fifo_chan_child_fini(struct nvkm_oproxy *base, bool suspend)
container_of(base, typeof(*object), oproxy);
struct nvkm_engine *engine = object->oproxy.object->engine;
struct nvkm_fifo_chan *chan = object->chan;
- struct nvkm_fifo_engn *engn = &chan->engn[engine->subdev.index];
- const char *name = nvkm_subdev_name[engine->subdev.index];
+ struct nvkm_fifo_engn *engn = nvkm_fifo_chan_engn(chan, engine);
+ const char *name = engine->subdev.name;
int ret = 0;
if (--engn->usecount)
@@ -75,8 +84,8 @@ nvkm_fifo_chan_child_init(struct nvkm_oproxy *base)
container_of(base, typeof(*object), oproxy);
struct nvkm_engine *engine = object->oproxy.object->engine;
struct nvkm_fifo_chan *chan = object->chan;
- struct nvkm_fifo_engn *engn = &chan->engn[engine->subdev.index];
- const char *name = nvkm_subdev_name[engine->subdev.index];
+ struct nvkm_fifo_engn *engn = nvkm_fifo_chan_engn(chan, engine);
+ const char *name = engine->subdev.name;
int ret;
if (engn->usecount++)
@@ -108,7 +117,7 @@ nvkm_fifo_chan_child_del(struct nvkm_oproxy *base)
container_of(base, typeof(*object), oproxy);
struct nvkm_engine *engine = object->oproxy.base.engine;
struct nvkm_fifo_chan *chan = object->chan;
- struct nvkm_fifo_engn *engn = &chan->engn[engine->subdev.index];
+ struct nvkm_fifo_engn *engn = nvkm_fifo_chan_engn(chan, engine);
if (chan->func->object_dtor)
chan->func->object_dtor(chan, object->hash);
@@ -118,7 +127,7 @@ nvkm_fifo_chan_child_del(struct nvkm_oproxy *base)
chan->func->engine_dtor(chan, engine);
nvkm_object_del(&engn->object);
if (chan->vmm)
- atomic_dec(&chan->vmm->engref[engine->subdev.index]);
+ atomic_dec(&chan->vmm->engref[engine->subdev.type]);
}
}
@@ -135,7 +144,7 @@ nvkm_fifo_chan_child_new(const struct nvkm_oclass *oclass, void *data, u32 size,
{
struct nvkm_engine *engine = oclass->engine;
struct nvkm_fifo_chan *chan = nvkm_fifo_chan(oclass->parent);
- struct nvkm_fifo_engn *engn = &chan->engn[engine->subdev.index];
+ struct nvkm_fifo_engn *engn = nvkm_fifo_chan_engn(chan, engine);
struct nvkm_fifo_chan_object *object;
int ret = 0;
@@ -152,7 +161,7 @@ nvkm_fifo_chan_child_new(const struct nvkm_oclass *oclass, void *data, u32 size,
};
if (chan->vmm)
- atomic_inc(&chan->vmm->engref[engine->subdev.index]);
+ atomic_inc(&chan->vmm->engref[engine->subdev.type]);
if (engine->func->fifo.cclass) {
ret = engine->func->fifo.cclass(chan, &cclass,
@@ -203,13 +212,12 @@ nvkm_fifo_chan_child_get(struct nvkm_object *object, int index,
{
struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
struct nvkm_fifo *fifo = chan->fifo;
- struct nvkm_device *device = fifo->engine.subdev.device;
struct nvkm_engine *engine;
- u64 mask = chan->engines;
- int ret, i, c;
+ u32 engm = chan->engm;
+ int engi, ret, c;
- for (; c = 0, i = __ffs64(mask), mask; mask &= ~(1ULL << i)) {
- if (!(engine = nvkm_device_engine(device, i)))
+ for (; c = 0, engi = __ffs(engm), engm; engm &= ~(1ULL << engi)) {
+ if (!(engine = fifo->func->id_engine(fifo, engi)))
continue;
oclass->engine = engine;
oclass->base.oclass = 0;
@@ -352,7 +360,7 @@ nvkm_fifo_chan_func = {
int
nvkm_fifo_chan_ctor(const struct nvkm_fifo_chan_func *func,
struct nvkm_fifo *fifo, u32 size, u32 align, bool zero,
- u64 hvmm, u64 push, u64 engines, int bar, u32 base,
+ u64 hvmm, u64 push, u32 engm, int bar, u32 base,
u32 user, const struct nvkm_oclass *oclass,
struct nvkm_fifo_chan *chan)
{
@@ -365,7 +373,7 @@ nvkm_fifo_chan_ctor(const struct nvkm_fifo_chan_func *func,
nvkm_object_ctor(&nvkm_fifo_chan_func, oclass, &chan->object);
chan->func = func;
chan->fifo = fifo;
- chan->engines = engines;
+ chan->engm = engm;
INIT_LIST_HEAD(&chan->head);
/* instance memory */
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
index 177e10562600..e53504354841 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
@@ -22,7 +22,7 @@ struct nvkm_fifo_chan_func {
int nvkm_fifo_chan_ctor(const struct nvkm_fifo_chan_func *, struct nvkm_fifo *,
u32 size, u32 align, bool zero, u64 vm, u64 push,
- u64 engines, int bar, u32 base, u32 user,
+ u32 engm, int bar, u32 base, u32 user,
const struct nvkm_oclass *, struct nvkm_fifo_chan *);
struct nvkm_fifo_chan_oclass {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c
index a5c998fe4485..353b77d9b3dc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c
@@ -45,29 +45,9 @@ g84_fifo_chan_ntfy(struct nvkm_fifo_chan *chan, u32 type,
}
static int
-g84_fifo_chan_engine(struct nvkm_engine *engine)
-{
- switch (engine->subdev.index) {
- case NVKM_ENGINE_GR : return 0;
- case NVKM_ENGINE_MPEG :
- case NVKM_ENGINE_MSPPP : return 1;
- case NVKM_ENGINE_CE0 : return 2;
- case NVKM_ENGINE_VP :
- case NVKM_ENGINE_MSPDEC: return 3;
- case NVKM_ENGINE_CIPHER:
- case NVKM_ENGINE_SEC : return 4;
- case NVKM_ENGINE_BSP :
- case NVKM_ENGINE_MSVLD : return 5;
- default:
- WARN_ON(1);
- return 0;
- }
-}
-
-static int
g84_fifo_chan_engine_addr(struct nvkm_engine *engine)
{
- switch (engine->subdev.index) {
+ switch (engine->subdev.type) {
case NVKM_ENGINE_DMAOBJ:
case NVKM_ENGINE_SW : return -1;
case NVKM_ENGINE_GR : return 0x0020;
@@ -79,7 +59,7 @@ g84_fifo_chan_engine_addr(struct nvkm_engine *engine)
case NVKM_ENGINE_MSVLD : return 0x0080;
case NVKM_ENGINE_CIPHER:
case NVKM_ENGINE_SEC : return 0x00a0;
- case NVKM_ENGINE_CE0 : return 0x00c0;
+ case NVKM_ENGINE_CE : return 0x00c0;
default:
WARN_ON(1);
return -1;
@@ -102,7 +82,7 @@ g84_fifo_chan_engine_fini(struct nvkm_fifo_chan *base,
if (offset < 0)
return 0;
- engn = g84_fifo_chan_engine(engine);
+ engn = fifo->base.func->engine_id(&fifo->base, engine);
save = nvkm_mask(device, 0x002520, 0x0000003f, 1 << engn);
nvkm_wr32(device, 0x0032fc, chan->base.inst->addr >> 12);
done = nvkm_msec(device, 2000,
@@ -134,7 +114,7 @@ g84_fifo_chan_engine_init(struct nvkm_fifo_chan *base,
struct nvkm_engine *engine)
{
struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
- struct nvkm_gpuobj *engn = chan->engn[engine->subdev.index];
+ struct nvkm_gpuobj *engn = *nv50_fifo_chan_engine(chan, engine);
u64 limit, start;
int offset;
@@ -162,12 +142,11 @@ g84_fifo_chan_engine_ctor(struct nvkm_fifo_chan *base,
struct nvkm_object *object)
{
struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
- int engn = engine->subdev.index;
if (g84_fifo_chan_engine_addr(engine) < 0)
return 0;
- return nvkm_object_bind(object, NULL, 0, &chan->engn[engn]);
+ return nvkm_object_bind(object, NULL, 0, nv50_fifo_chan_engine(chan, engine));
}
static int
@@ -178,14 +157,14 @@ g84_fifo_chan_object_ctor(struct nvkm_fifo_chan *base,
u32 handle = object->handle;
u32 context;
- switch (object->engine->subdev.index) {
+ switch (object->engine->subdev.type) {
case NVKM_ENGINE_DMAOBJ:
case NVKM_ENGINE_SW : context = 0x00000000; break;
case NVKM_ENGINE_GR : context = 0x00100000; break;
case NVKM_ENGINE_MPEG :
case NVKM_ENGINE_MSPPP : context = 0x00200000; break;
case NVKM_ENGINE_ME :
- case NVKM_ENGINE_CE0 : context = 0x00300000; break;
+ case NVKM_ENGINE_CE : context = 0x00300000; break;
case NVKM_ENGINE_VP :
case NVKM_ENGINE_MSPDEC: context = 0x00400000; break;
case NVKM_ENGINE_CIPHER:
@@ -241,20 +220,20 @@ g84_fifo_chan_ctor(struct nv50_fifo *fifo, u64 vmm, u64 push,
ret = nvkm_fifo_chan_ctor(&g84_fifo_chan_func, &fifo->base,
0x10000, 0x1000, false, vmm, push,
- (1ULL << NVKM_ENGINE_BSP) |
- (1ULL << NVKM_ENGINE_CE0) |
- (1ULL << NVKM_ENGINE_CIPHER) |
- (1ULL << NVKM_ENGINE_DMAOBJ) |
- (1ULL << NVKM_ENGINE_GR) |
- (1ULL << NVKM_ENGINE_ME) |
- (1ULL << NVKM_ENGINE_MPEG) |
- (1ULL << NVKM_ENGINE_MSPDEC) |
- (1ULL << NVKM_ENGINE_MSPPP) |
- (1ULL << NVKM_ENGINE_MSVLD) |
- (1ULL << NVKM_ENGINE_SEC) |
- (1ULL << NVKM_ENGINE_SW) |
- (1ULL << NVKM_ENGINE_VIC) |
- (1ULL << NVKM_ENGINE_VP),
+ BIT(G84_FIFO_ENGN_SW) |
+ BIT(G84_FIFO_ENGN_GR) |
+ BIT(G84_FIFO_ENGN_MPEG) |
+ BIT(G84_FIFO_ENGN_MSPPP) |
+ BIT(G84_FIFO_ENGN_ME) |
+ BIT(G84_FIFO_ENGN_CE0) |
+ BIT(G84_FIFO_ENGN_VP) |
+ BIT(G84_FIFO_ENGN_MSPDEC) |
+ BIT(G84_FIFO_ENGN_CIPHER) |
+ BIT(G84_FIFO_ENGN_SEC) |
+ BIT(G84_FIFO_ENGN_VIC) |
+ BIT(G84_FIFO_ENGN_BSP) |
+ BIT(G84_FIFO_ENGN_MSVLD) |
+ BIT(G84_FIFO_ENGN_DMA),
0, 0xc00000, 0x2000, oclass, &chan->base);
chan->fifo = fifo;
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h
index 7c125a15f963..f7ac1061fa84 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h
@@ -12,10 +12,17 @@ struct gf100_fifo_chan {
struct list_head head;
bool killed;
- struct {
+#define GF100_FIFO_ENGN_GR 0
+#define GF100_FIFO_ENGN_MSPDEC 1
+#define GF100_FIFO_ENGN_MSPPP 2
+#define GF100_FIFO_ENGN_MSVLD 3
+#define GF100_FIFO_ENGN_CE0 4
+#define GF100_FIFO_ENGN_CE1 5
+#define GF100_FIFO_ENGN_SW 15
+ struct gf100_fifo_engn {
struct nvkm_gpuobj *inst;
struct nvkm_vma *vma;
- } engn[NVKM_SUBDEV_NR];
+ } engn[NVKM_FIFO_ENGN_NR];
};
extern const struct nvkm_fifo_chan_oclass gf100_fifo_gpfifo_oclass;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
index 22698661aa85..cfbe096e604f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
@@ -16,10 +16,11 @@ struct gk104_fifo_chan {
struct nvkm_memory *mthd;
- struct {
+#define GK104_FIFO_ENGN_SW 15
+ struct gk104_fifo_engn {
struct nvkm_gpuobj *inst;
struct nvkm_vma *vma;
- } engn[NVKM_SUBDEV_NR];
+ } engn[NVKM_FIFO_ENGN_NR];
};
extern const struct nvkm_fifo_chan_func gk104_fifo_gpfifo_func;
@@ -29,6 +30,7 @@ int gk104_fifo_gpfifo_new(struct gk104_fifo *, const struct nvkm_oclass *,
void *gk104_fifo_gpfifo_dtor(struct nvkm_fifo_chan *);
void gk104_fifo_gpfifo_init(struct nvkm_fifo_chan *);
void gk104_fifo_gpfifo_fini(struct nvkm_fifo_chan *);
+struct gk104_fifo_engn *gk104_fifo_gpfifo_engine(struct gk104_fifo_chan *, struct nvkm_engine *);
int gk104_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan *, struct nvkm_engine *,
struct nvkm_object *);
void gk104_fifo_gpfifo_engine_dtor(struct nvkm_fifo_chan *,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv04.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv04.h
index 60ca79465aff..727bc8976b40 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv04.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv04.h
@@ -9,7 +9,11 @@ struct nv04_fifo_chan {
struct nvkm_fifo_chan base;
struct nv04_fifo *fifo;
u32 ramfc;
- struct nvkm_gpuobj *engn[NVKM_SUBDEV_NR];
+#define NV04_FIFO_ENGN_SW 0
+#define NV04_FIFO_ENGN_GR 1
+#define NV04_FIFO_ENGN_MPEG 2
+#define NV04_FIFO_ENGN_DMA 3
+ struct nvkm_gpuobj *engn[NVKM_FIFO_ENGN_NR];
};
extern const struct nvkm_fifo_chan_func nv04_fifo_dma_func;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c
index 85f7dbf53c99..c44d7c81dd52 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c
@@ -31,7 +31,7 @@
static int
nv50_fifo_chan_engine_addr(struct nvkm_engine *engine)
{
- switch (engine->subdev.index) {
+ switch (engine->subdev.type) {
case NVKM_ENGINE_DMAOBJ:
case NVKM_ENGINE_SW : return -1;
case NVKM_ENGINE_GR : return 0x0000;
@@ -42,6 +42,15 @@ nv50_fifo_chan_engine_addr(struct nvkm_engine *engine)
}
}
+struct nvkm_gpuobj **
+nv50_fifo_chan_engine(struct nv50_fifo_chan *chan, struct nvkm_engine *engine)
+{
+ int engi = chan->base.fifo->func->engine_id(chan->base.fifo, engine);
+ if (engi >= 0)
+ return &chan->engn[engi];
+ return NULL;
+}
+
static int
nv50_fifo_chan_engine_fini(struct nvkm_fifo_chan *base,
struct nvkm_engine *engine, bool suspend)
@@ -103,7 +112,7 @@ nv50_fifo_chan_engine_init(struct nvkm_fifo_chan *base,
struct nvkm_engine *engine)
{
struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
- struct nvkm_gpuobj *engn = chan->engn[engine->subdev.index];
+ struct nvkm_gpuobj *engn = *nv50_fifo_chan_engine(chan, engine);
u64 limit, start;
int offset;
@@ -130,7 +139,7 @@ nv50_fifo_chan_engine_dtor(struct nvkm_fifo_chan *base,
struct nvkm_engine *engine)
{
struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
- nvkm_gpuobj_del(&chan->engn[engine->subdev.index]);
+ nvkm_gpuobj_del(nv50_fifo_chan_engine(chan, engine));
}
static int
@@ -139,12 +148,11 @@ nv50_fifo_chan_engine_ctor(struct nvkm_fifo_chan *base,
struct nvkm_object *object)
{
struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
- int engn = engine->subdev.index;
if (nv50_fifo_chan_engine_addr(engine) < 0)
return 0;
- return nvkm_object_bind(object, NULL, 0, &chan->engn[engn]);
+ return nvkm_object_bind(object, NULL, 0, nv50_fifo_chan_engine(chan, engine));
}
void
@@ -162,7 +170,7 @@ nv50_fifo_chan_object_ctor(struct nvkm_fifo_chan *base,
u32 handle = object->handle;
u32 context;
- switch (object->engine->subdev.index) {
+ switch (object->engine->subdev.type) {
case NVKM_ENGINE_DMAOBJ:
case NVKM_ENGINE_SW : context = 0x00000000; break;
case NVKM_ENGINE_GR : context = 0x00100000; break;
@@ -240,10 +248,10 @@ nv50_fifo_chan_ctor(struct nv50_fifo *fifo, u64 vmm, u64 push,
ret = nvkm_fifo_chan_ctor(&nv50_fifo_chan_func, &fifo->base,
0x10000, 0x1000, false, vmm, push,
- (1ULL << NVKM_ENGINE_DMAOBJ) |
- (1ULL << NVKM_ENGINE_SW) |
- (1ULL << NVKM_ENGINE_GR) |
- (1ULL << NVKM_ENGINE_MPEG),
+ BIT(NV50_FIFO_ENGN_SW) |
+ BIT(NV50_FIFO_ENGN_GR) |
+ BIT(NV50_FIFO_ENGN_MPEG) |
+ BIT(NV50_FIFO_ENGN_DMA),
0, 0xc00000, 0x2000, oclass, &chan->base);
chan->fifo = fifo;
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h
index 5735ff72a9d1..af8bdf275552 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h
@@ -15,13 +15,33 @@ struct nv50_fifo_chan {
struct nvkm_gpuobj *pgd;
struct nvkm_ramht *ramht;
- struct nvkm_gpuobj *engn[NVKM_SUBDEV_NR];
+#define NV50_FIFO_ENGN_SW 0
+#define NV50_FIFO_ENGN_GR 1
+#define NV50_FIFO_ENGN_MPEG 2
+#define NV50_FIFO_ENGN_DMA 3
+
+#define G84_FIFO_ENGN_SW 0
+#define G84_FIFO_ENGN_GR 1
+#define G84_FIFO_ENGN_MPEG 2
+#define G84_FIFO_ENGN_MSPPP 2
+#define G84_FIFO_ENGN_ME 3
+#define G84_FIFO_ENGN_CE0 3
+#define G84_FIFO_ENGN_VP 4
+#define G84_FIFO_ENGN_MSPDEC 4
+#define G84_FIFO_ENGN_CIPHER 5
+#define G84_FIFO_ENGN_SEC 5
+#define G84_FIFO_ENGN_VIC 5
+#define G84_FIFO_ENGN_BSP 6
+#define G84_FIFO_ENGN_MSVLD 6
+#define G84_FIFO_ENGN_DMA 7
+ struct nvkm_gpuobj *engn[NVKM_FIFO_ENGN_NR];
};
int nv50_fifo_chan_ctor(struct nv50_fifo *, u64 vmm, u64 push,
const struct nvkm_oclass *, struct nv50_fifo_chan *);
void *nv50_fifo_chan_dtor(struct nvkm_fifo_chan *);
void nv50_fifo_chan_fini(struct nvkm_fifo_chan *);
+struct nvkm_gpuobj **nv50_fifo_chan_engine(struct nv50_fifo_chan *, struct nvkm_engine *);
void nv50_fifo_chan_engine_dtor(struct nvkm_fifo_chan *, struct nvkm_engine *);
void nv50_fifo_chan_object_dtor(struct nvkm_fifo_chan *, int);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c
index c213122cf088..dbcdc5fab990 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c
@@ -38,9 +38,9 @@ nv04_fifo_dma_object_dtor(struct nvkm_fifo_chan *base, int cookie)
struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
struct nvkm_instmem *imem = chan->fifo->base.engine.subdev.device->imem;
- mutex_lock(&chan->fifo->base.engine.subdev.mutex);
+ mutex_lock(&chan->fifo->base.mutex);
nvkm_ramht_remove(imem->ramht, cookie);
- mutex_unlock(&chan->fifo->base.engine.subdev.mutex);
+ mutex_unlock(&chan->fifo->base.mutex);
}
static int
@@ -53,7 +53,7 @@ nv04_fifo_dma_object_ctor(struct nvkm_fifo_chan *base,
u32 handle = object->handle;
int hash;
- switch (object->engine->subdev.index) {
+ switch (object->engine->subdev.type) {
case NVKM_ENGINE_DMAOBJ:
case NVKM_ENGINE_SW : context |= 0x00000000; break;
case NVKM_ENGINE_GR : context |= 0x00010000; break;
@@ -63,10 +63,10 @@ nv04_fifo_dma_object_ctor(struct nvkm_fifo_chan *base,
return -EINVAL;
}
- mutex_lock(&chan->fifo->base.engine.subdev.mutex);
+ mutex_lock(&chan->fifo->base.mutex);
hash = nvkm_ramht_insert(imem->ramht, object, chan->base.chid, 4,
handle, context);
- mutex_unlock(&chan->fifo->base.engine.subdev.mutex);
+ mutex_unlock(&chan->fifo->base.mutex);
return hash;
}
@@ -191,9 +191,9 @@ nv04_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
ret = nvkm_fifo_chan_ctor(&nv04_fifo_dma_func, &fifo->base,
0x1000, 0x1000, false, 0, args->v0.pushbuf,
- (1ULL << NVKM_ENGINE_DMAOBJ) |
- (1ULL << NVKM_ENGINE_GR) |
- (1ULL << NVKM_ENGINE_SW),
+ BIT(NV04_FIFO_ENGN_SW) |
+ BIT(NV04_FIFO_ENGN_GR) |
+ BIT(NV04_FIFO_ENGN_DMA),
0, 0x800000, 0x10000, oclass, &chan->base);
chan->fifo = fifo;
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv10.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv10.c
index f5f355ff005d..07d80d54a07c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv10.c
@@ -62,9 +62,9 @@ nv10_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
ret = nvkm_fifo_chan_ctor(&nv04_fifo_dma_func, &fifo->base,
0x1000, 0x1000, false, 0, args->v0.pushbuf,
- (1ULL << NVKM_ENGINE_DMAOBJ) |
- (1ULL << NVKM_ENGINE_GR) |
- (1ULL << NVKM_ENGINE_SW),
+ BIT(NV04_FIFO_ENGN_SW) |
+ BIT(NV04_FIFO_ENGN_GR) |
+ BIT(NV04_FIFO_ENGN_DMA),
0, 0x800000, 0x10000, oclass, &chan->base);
chan->fifo = fifo;
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv17.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv17.c
index 7edc6a564b5d..edd70a114218 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv17.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv17.c
@@ -62,10 +62,10 @@ nv17_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
ret = nvkm_fifo_chan_ctor(&nv04_fifo_dma_func, &fifo->base,
0x1000, 0x1000, false, 0, args->v0.pushbuf,
- (1ULL << NVKM_ENGINE_DMAOBJ) |
- (1ULL << NVKM_ENGINE_GR) |
- (1ULL << NVKM_ENGINE_MPEG) | /* NV31- */
- (1ULL << NVKM_ENGINE_SW),
+ BIT(NV04_FIFO_ENGN_SW) |
+ BIT(NV04_FIFO_ENGN_GR) |
+ BIT(NV04_FIFO_ENGN_MPEG) | /* NV31- */
+ BIT(NV04_FIFO_ENGN_DMA),
0, 0x800000, 0x10000, oclass, &chan->base);
chan->fifo = fifo;
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv40.c
index 5f722c6e8a2f..0411fb908457 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv40.c
@@ -35,7 +35,7 @@
static bool
nv40_fifo_dma_engine(struct nvkm_engine *engine, u32 *reg, u32 *ctx)
{
- switch (engine->subdev.index) {
+ switch (engine->subdev.type) {
case NVKM_ENGINE_DMAOBJ:
case NVKM_ENGINE_SW:
return false;
@@ -55,6 +55,15 @@ nv40_fifo_dma_engine(struct nvkm_engine *engine, u32 *reg, u32 *ctx)
}
}
+static struct nvkm_gpuobj **
+nv40_fifo_dma_engn(struct nv04_fifo_chan *chan, struct nvkm_engine *engine)
+{
+ int engi = chan->base.fifo->func->engine_id(chan->base.fifo, engine);
+ if (engi >= 0)
+ return &chan->engn[engi];
+ return NULL;
+}
+
static int
nv40_fifo_dma_engine_fini(struct nvkm_fifo_chan *base,
struct nvkm_engine *engine, bool suspend)
@@ -99,7 +108,7 @@ nv40_fifo_dma_engine_init(struct nvkm_fifo_chan *base,
if (!nv40_fifo_dma_engine(engine, &reg, &ctx))
return 0;
- inst = chan->engn[engine->subdev.index]->addr >> 4;
+ inst = (*nv40_fifo_dma_engn(chan, engine))->addr >> 4;
spin_lock_irqsave(&fifo->base.lock, flags);
nvkm_mask(device, 0x002500, 0x00000001, 0x00000000);
@@ -121,7 +130,7 @@ nv40_fifo_dma_engine_dtor(struct nvkm_fifo_chan *base,
struct nvkm_engine *engine)
{
struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
- nvkm_gpuobj_del(&chan->engn[engine->subdev.index]);
+ nvkm_gpuobj_del(nv40_fifo_dma_engn(chan, engine));
}
static int
@@ -130,13 +139,12 @@ nv40_fifo_dma_engine_ctor(struct nvkm_fifo_chan *base,
struct nvkm_object *object)
{
struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
- const int engn = engine->subdev.index;
u32 reg, ctx;
if (!nv40_fifo_dma_engine(engine, &reg, &ctx))
return 0;
- return nvkm_object_bind(object, NULL, 0, &chan->engn[engn]);
+ return nvkm_object_bind(object, NULL, 0, nv40_fifo_dma_engn(chan, engine));
}
static int
@@ -149,7 +157,7 @@ nv40_fifo_dma_object_ctor(struct nvkm_fifo_chan *base,
u32 handle = object->handle;
int hash;
- switch (object->engine->subdev.index) {
+ switch (object->engine->subdev.type) {
case NVKM_ENGINE_DMAOBJ:
case NVKM_ENGINE_SW : context |= 0x00000000; break;
case NVKM_ENGINE_GR : context |= 0x00100000; break;
@@ -159,10 +167,10 @@ nv40_fifo_dma_object_ctor(struct nvkm_fifo_chan *base,
return -EINVAL;
}
- mutex_lock(&chan->fifo->base.engine.subdev.mutex);
+ mutex_lock(&chan->fifo->base.mutex);
hash = nvkm_ramht_insert(imem->ramht, object, chan->base.chid, 4,
handle, context);
- mutex_unlock(&chan->fifo->base.engine.subdev.mutex);
+ mutex_unlock(&chan->fifo->base.mutex);
return hash;
}
@@ -209,10 +217,10 @@ nv40_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
ret = nvkm_fifo_chan_ctor(&nv40_fifo_dma_func, &fifo->base,
0x1000, 0x1000, false, 0, args->v0.pushbuf,
- (1ULL << NVKM_ENGINE_DMAOBJ) |
- (1ULL << NVKM_ENGINE_GR) |
- (1ULL << NVKM_ENGINE_MPEG) |
- (1ULL << NVKM_ENGINE_SW),
+ BIT(NV04_FIFO_ENGN_SW) |
+ BIT(NV04_FIFO_ENGN_GR) |
+ BIT(NV04_FIFO_ENGN_MPEG) |
+ BIT(NV04_FIFO_ENGN_DMA),
0, 0xc00000, 0x1000, oclass, &chan->base);
chan->fifo = fifo;
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c
index ff7b529764fe..c0a7d0f21dac 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c
@@ -38,12 +38,82 @@ g84_fifo_uevent_init(struct nvkm_fifo *fifo)
nvkm_mask(device, 0x002140, 0x40000000, 0x40000000);
}
+static struct nvkm_engine *
+g84_fifo_id_engine(struct nvkm_fifo *fifo, int engi)
+{
+ struct nvkm_device *device = fifo->engine.subdev.device;
+ struct nvkm_engine *engine;
+ enum nvkm_subdev_type type;
+
+ switch (engi) {
+ case G84_FIFO_ENGN_SW : type = NVKM_ENGINE_SW; break;
+ case G84_FIFO_ENGN_GR : type = NVKM_ENGINE_GR; break;
+ case G84_FIFO_ENGN_MPEG :
+ if ((engine = nvkm_device_engine(device, NVKM_ENGINE_MSPPP, 0)))
+ return engine;
+ type = NVKM_ENGINE_MPEG;
+ break;
+ case G84_FIFO_ENGN_ME :
+ if ((engine = nvkm_device_engine(device, NVKM_ENGINE_CE, 0)))
+ return engine;
+ type = NVKM_ENGINE_ME;
+ break;
+ case G84_FIFO_ENGN_VP :
+ if ((engine = nvkm_device_engine(device, NVKM_ENGINE_MSPDEC, 0)))
+ return engine;
+ type = NVKM_ENGINE_VP;
+ break;
+ case G84_FIFO_ENGN_CIPHER:
+ if ((engine = nvkm_device_engine(device, NVKM_ENGINE_VIC, 0)))
+ return engine;
+ if ((engine = nvkm_device_engine(device, NVKM_ENGINE_SEC, 0)))
+ return engine;
+ type = NVKM_ENGINE_CIPHER;
+ break;
+ case G84_FIFO_ENGN_BSP :
+ if ((engine = nvkm_device_engine(device, NVKM_ENGINE_MSVLD, 0)))
+ return engine;
+ type = NVKM_ENGINE_BSP;
+ break;
+ case G84_FIFO_ENGN_DMA : type = NVKM_ENGINE_DMAOBJ; break;
+ default:
+ WARN_ON(1);
+ return NULL;
+ }
+
+ return nvkm_device_engine(fifo->engine.subdev.device, type, 0);
+}
+
+static int
+g84_fifo_engine_id(struct nvkm_fifo *base, struct nvkm_engine *engine)
+{
+ switch (engine->subdev.type) {
+ case NVKM_ENGINE_SW : return G84_FIFO_ENGN_SW;
+ case NVKM_ENGINE_GR : return G84_FIFO_ENGN_GR;
+ case NVKM_ENGINE_MPEG :
+ case NVKM_ENGINE_MSPPP : return G84_FIFO_ENGN_MPEG;
+ case NVKM_ENGINE_CE : return G84_FIFO_ENGN_CE0;
+ case NVKM_ENGINE_VP :
+ case NVKM_ENGINE_MSPDEC: return G84_FIFO_ENGN_VP;
+ case NVKM_ENGINE_CIPHER:
+ case NVKM_ENGINE_SEC : return G84_FIFO_ENGN_CIPHER;
+ case NVKM_ENGINE_BSP :
+ case NVKM_ENGINE_MSVLD : return G84_FIFO_ENGN_BSP;
+ case NVKM_ENGINE_DMAOBJ: return G84_FIFO_ENGN_DMA;
+ default:
+ WARN_ON(1);
+ return -1;
+ }
+}
+
static const struct nvkm_fifo_func
g84_fifo = {
.dtor = nv50_fifo_dtor,
.oneinit = nv50_fifo_oneinit,
.init = nv50_fifo_init,
.intr = nv04_fifo_intr,
+ .engine_id = g84_fifo_engine_id,
+ .id_engine = g84_fifo_id_engine,
.pause = nv04_fifo_pause,
.start = nv04_fifo_start,
.uevent_init = g84_fifo_uevent_init,
@@ -56,7 +126,8 @@ g84_fifo = {
};
int
-g84_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+g84_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
{
- return nv50_fifo_new_(&g84_fifo, device, index, pfifo);
+ return nv50_fifo_new_(&g84_fifo, device, type, inst, pfifo);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
index 5a39e51d42d7..8b4f36b3e34b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
@@ -57,7 +57,7 @@ gf100_fifo_runlist_commit(struct gf100_fifo *fifo)
int nr = 0;
int target;
- mutex_lock(&subdev->mutex);
+ mutex_lock(&fifo->base.mutex);
cur = fifo->runlist.mem[fifo->runlist.active];
fifo->runlist.active = !fifo->runlist.active;
@@ -73,7 +73,7 @@ gf100_fifo_runlist_commit(struct gf100_fifo *fifo)
case NVKM_MEM_TARGET_VRAM: target = 0; break;
case NVKM_MEM_TARGET_NCOH: target = 3; break;
default:
- mutex_unlock(&subdev->mutex);
+ mutex_unlock(&fifo->base.mutex);
WARN_ON(1);
return;
}
@@ -86,59 +86,61 @@ gf100_fifo_runlist_commit(struct gf100_fifo *fifo)
!(nvkm_rd32(device, 0x00227c) & 0x00100000),
msecs_to_jiffies(2000)) == 0)
nvkm_error(subdev, "runlist update timeout\n");
- mutex_unlock(&subdev->mutex);
+ mutex_unlock(&fifo->base.mutex);
}
void
gf100_fifo_runlist_remove(struct gf100_fifo *fifo, struct gf100_fifo_chan *chan)
{
- mutex_lock(&fifo->base.engine.subdev.mutex);
+ mutex_lock(&fifo->base.mutex);
list_del_init(&chan->head);
- mutex_unlock(&fifo->base.engine.subdev.mutex);
+ mutex_unlock(&fifo->base.mutex);
}
void
gf100_fifo_runlist_insert(struct gf100_fifo *fifo, struct gf100_fifo_chan *chan)
{
- mutex_lock(&fifo->base.engine.subdev.mutex);
+ mutex_lock(&fifo->base.mutex);
list_add_tail(&chan->head, &fifo->chan);
- mutex_unlock(&fifo->base.engine.subdev.mutex);
+ mutex_unlock(&fifo->base.mutex);
}
-static inline int
-gf100_fifo_engidx(struct gf100_fifo *fifo, u32 engn)
+static struct nvkm_engine *
+gf100_fifo_id_engine(struct nvkm_fifo *fifo, int engi)
{
- switch (engn) {
- case NVKM_ENGINE_GR : engn = 0; break;
- case NVKM_ENGINE_MSVLD : engn = 1; break;
- case NVKM_ENGINE_MSPPP : engn = 2; break;
- case NVKM_ENGINE_MSPDEC: engn = 3; break;
- case NVKM_ENGINE_CE0 : engn = 4; break;
- case NVKM_ENGINE_CE1 : engn = 5; break;
+ enum nvkm_subdev_type type;
+ int inst;
+
+ switch (engi) {
+ case GF100_FIFO_ENGN_GR : type = NVKM_ENGINE_GR ; inst = 0; break;
+ case GF100_FIFO_ENGN_MSPDEC: type = NVKM_ENGINE_MSPDEC; inst = 0; break;
+ case GF100_FIFO_ENGN_MSPPP : type = NVKM_ENGINE_MSPPP ; inst = 0; break;
+ case GF100_FIFO_ENGN_MSVLD : type = NVKM_ENGINE_MSVLD ; inst = 0; break;
+ case GF100_FIFO_ENGN_CE0 : type = NVKM_ENGINE_CE ; inst = 0; break;
+ case GF100_FIFO_ENGN_CE1 : type = NVKM_ENGINE_CE ; inst = 1; break;
+ case GF100_FIFO_ENGN_SW : type = NVKM_ENGINE_SW ; inst = 0; break;
default:
- return -1;
+ WARN_ON(1);
+ return NULL;
}
- return engn;
+ return nvkm_device_engine(fifo->engine.subdev.device, type, inst);
}
-static inline struct nvkm_engine *
-gf100_fifo_engine(struct gf100_fifo *fifo, u32 engn)
+static int
+gf100_fifo_engine_id(struct nvkm_fifo *base, struct nvkm_engine *engine)
{
- struct nvkm_device *device = fifo->base.engine.subdev.device;
-
- switch (engn) {
- case 0: engn = NVKM_ENGINE_GR; break;
- case 1: engn = NVKM_ENGINE_MSVLD; break;
- case 2: engn = NVKM_ENGINE_MSPPP; break;
- case 3: engn = NVKM_ENGINE_MSPDEC; break;
- case 4: engn = NVKM_ENGINE_CE0; break;
- case 5: engn = NVKM_ENGINE_CE1; break;
+ switch (engine->subdev.type) {
+ case NVKM_ENGINE_GR : return GF100_FIFO_ENGN_GR;
+ case NVKM_ENGINE_MSPDEC: return GF100_FIFO_ENGN_MSPDEC;
+ case NVKM_ENGINE_MSPPP : return GF100_FIFO_ENGN_MSPPP;
+ case NVKM_ENGINE_MSVLD : return GF100_FIFO_ENGN_MSVLD;
+ case NVKM_ENGINE_CE : return GF100_FIFO_ENGN_CE0 + engine->subdev.inst;
+ case NVKM_ENGINE_SW : return GF100_FIFO_ENGN_SW;
default:
- return NULL;
+ WARN_ON(1);
+ return -1;
}
-
- return nvkm_device_engine(device, engn);
}
static void
@@ -148,20 +150,17 @@ gf100_fifo_recover_work(struct work_struct *w)
struct nvkm_device *device = fifo->base.engine.subdev.device;
struct nvkm_engine *engine;
unsigned long flags;
- u32 engn, engm = 0;
- u64 mask, todo;
+ u32 engm, engn, todo;
spin_lock_irqsave(&fifo->base.lock, flags);
- mask = fifo->recover.mask;
+ engm = fifo->recover.mask;
fifo->recover.mask = 0ULL;
spin_unlock_irqrestore(&fifo->base.lock, flags);
- for (todo = mask; engn = __ffs64(todo), todo; todo &= ~BIT_ULL(engn))
- engm |= 1 << gf100_fifo_engidx(fifo, engn);
nvkm_mask(device, 0x002630, engm, engm);
- for (todo = mask; engn = __ffs64(todo), todo; todo &= ~BIT_ULL(engn)) {
- if ((engine = nvkm_device_engine(device, engn))) {
+ for (todo = engm; engn = __ffs(todo), todo; todo &= ~BIT_ULL(engn)) {
+ if ((engine = gf100_fifo_id_engine(&fifo->base, engn))) {
nvkm_subdev_fini(&engine->subdev, false);
WARN_ON(nvkm_subdev_init(&engine->subdev));
}
@@ -179,17 +178,18 @@ gf100_fifo_recover(struct gf100_fifo *fifo, struct nvkm_engine *engine,
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
struct nvkm_device *device = subdev->device;
u32 chid = chan->base.chid;
+ int engi = gf100_fifo_engine_id(&fifo->base, engine);
nvkm_error(subdev, "%s engine fault on channel %d, recovering...\n",
- nvkm_subdev_name[engine->subdev.index], chid);
+ engine->subdev.name, chid);
assert_spin_locked(&fifo->base.lock);
nvkm_mask(device, 0x003004 + (chid * 0x08), 0x00000001, 0x00000000);
list_del_init(&chan->head);
chan->killed = true;
- if (engine != &fifo->base.engine)
- fifo->recover.mask |= 1ULL << engine->subdev.index;
+ if (engi >= 0 && engi != GF100_FIFO_ENGN_SW)
+ fifo->recover.mask |= BIT(engi);
schedule_work(&fifo->recover.work);
nvkm_fifo_kevent(&fifo->base, chid);
}
@@ -205,8 +205,8 @@ gf100_fifo_fault_engine[] = {
{ 0x11, "PMSPPP", NULL, NVKM_ENGINE_MSPPP },
{ 0x13, "PCOUNTER" },
{ 0x14, "PMSPDEC", NULL, NVKM_ENGINE_MSPDEC },
- { 0x15, "PCE0", NULL, NVKM_ENGINE_CE0 },
- { 0x16, "PCE1", NULL, NVKM_ENGINE_CE1 },
+ { 0x15, "PCE0", NULL, NVKM_ENGINE_CE, 0 },
+ { 0x16, "PCE1", NULL, NVKM_ENGINE_CE, 1 },
{ 0x17, "PMU" },
{}
};
@@ -286,7 +286,7 @@ gf100_fifo_fault(struct nvkm_fifo *base, struct nvkm_fault_data *info)
nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
break;
default:
- engine = nvkm_device_engine(device, eu->data2);
+ engine = nvkm_device_engine(device, eu->data2, eu->inst);
break;
}
}
@@ -335,7 +335,7 @@ gf100_fifo_intr_sched_ctxsw(struct gf100_fifo *fifo)
if (busy && unk0 && unk1) {
list_for_each_entry(chan, &fifo->chan, head) {
if (chan->base.chid == chid) {
- engine = gf100_fifo_engine(fifo, engn);
+ engine = gf100_fifo_id_engine(&fifo->base, engn);
if (!engine)
break;
gf100_fifo_recover(fifo, engine, chan);
@@ -673,6 +673,8 @@ gf100_fifo = {
.fini = gf100_fifo_fini,
.intr = gf100_fifo_intr,
.fault = gf100_fifo_fault,
+ .engine_id = gf100_fifo_engine_id,
+ .id_engine = gf100_fifo_id_engine,
.uevent_init = gf100_fifo_uevent_init,
.uevent_fini = gf100_fifo_uevent_fini,
.chan = {
@@ -682,7 +684,8 @@ gf100_fifo = {
};
int
-gf100_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+gf100_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
{
struct gf100_fifo *fifo;
@@ -692,5 +695,5 @@ gf100_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
INIT_WORK(&fifo->recover.work, gf100_fifo_recover_work);
*pfifo = &fifo->base;
- return nvkm_fifo_ctor(&gf100_fifo, device, index, 128, &fifo->base);
+ return nvkm_fifo_ctor(&gf100_fifo, device, type, inst, 128, &fifo->base);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
index 5d4b695cab8e..e771bd519ee2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
@@ -36,19 +36,7 @@
#include <nvif/class.h>
#include <nvif/cl0080.h>
-struct gk104_fifo_engine_status {
- bool busy;
- bool faulted;
- bool chsw;
- bool save;
- bool load;
- struct {
- bool tsg;
- u32 id;
- } prev, next, *chan;
-};
-
-static void
+void
gk104_fifo_engine_status(struct gk104_fifo *fifo, int engn,
struct gk104_fifo_engine_status *status)
{
@@ -95,7 +83,7 @@ gk104_fifo_engine_status(struct gk104_fifo *fifo, int engn,
status->chan == &status->next ? "*" : " ");
}
-static int
+int
gk104_fifo_class_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
void *argv, u32 argc, struct nvkm_object **pobject)
{
@@ -112,7 +100,7 @@ gk104_fifo_class_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
return -EINVAL;
}
-static int
+int
gk104_fifo_class_get(struct nvkm_fifo *base, int index,
struct nvkm_oclass *oclass)
{
@@ -134,14 +122,14 @@ gk104_fifo_class_get(struct nvkm_fifo *base, int index,
return c;
}
-static void
+void
gk104_fifo_uevent_fini(struct nvkm_fifo *fifo)
{
struct nvkm_device *device = fifo->engine.subdev.device;
nvkm_mask(device, 0x002140, 0x80000000, 0x00000000);
}
-static void
+void
gk104_fifo_uevent_init(struct nvkm_fifo *fifo)
{
struct nvkm_device *device = fifo->engine.subdev.device;
@@ -180,12 +168,11 @@ gk104_fifo_runlist_update(struct gk104_fifo *fifo, int runl)
{
const struct gk104_fifo_runlist_func *func = fifo->func->runlist;
struct gk104_fifo_chan *chan;
- struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
struct nvkm_memory *mem;
struct nvkm_fifo_cgrp *cgrp;
int nr = 0;
- mutex_lock(&subdev->mutex);
+ mutex_lock(&fifo->base.mutex);
mem = fifo->runlist[runl].mem[fifo->runlist[runl].next];
fifo->runlist[runl].next = !fifo->runlist[runl].next;
@@ -203,27 +190,27 @@ gk104_fifo_runlist_update(struct gk104_fifo *fifo, int runl)
nvkm_done(mem);
func->commit(fifo, runl, mem, nr);
- mutex_unlock(&subdev->mutex);
+ mutex_unlock(&fifo->base.mutex);
}
void
gk104_fifo_runlist_remove(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan)
{
struct nvkm_fifo_cgrp *cgrp = chan->cgrp;
- mutex_lock(&fifo->base.engine.subdev.mutex);
+ mutex_lock(&fifo->base.mutex);
if (!list_empty(&chan->head)) {
list_del_init(&chan->head);
if (cgrp && !--cgrp->chan_nr)
list_del_init(&cgrp->head);
}
- mutex_unlock(&fifo->base.engine.subdev.mutex);
+ mutex_unlock(&fifo->base.mutex);
}
void
gk104_fifo_runlist_insert(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan)
{
struct nvkm_fifo_cgrp *cgrp = chan->cgrp;
- mutex_lock(&fifo->base.engine.subdev.mutex);
+ mutex_lock(&fifo->base.mutex);
if (cgrp) {
if (!cgrp->chan_nr++)
list_add_tail(&cgrp->head, &fifo->runlist[chan->runl].cgrp);
@@ -231,7 +218,7 @@ gk104_fifo_runlist_insert(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan)
} else {
list_add_tail(&chan->head, &fifo->runlist[chan->runl].chan);
}
- mutex_unlock(&fifo->base.engine.subdev.mutex);
+ mutex_unlock(&fifo->base.mutex);
}
void
@@ -271,6 +258,33 @@ gk104_fifo_pbdma = {
.init = gk104_fifo_pbdma_init,
};
+struct nvkm_engine *
+gk104_fifo_id_engine(struct nvkm_fifo *base, int engi)
+{
+ if (engi == GK104_FIFO_ENGN_SW)
+ return nvkm_device_engine(base->engine.subdev.device, NVKM_ENGINE_SW, 0);
+
+ return gk104_fifo(base)->engine[engi].engine;
+}
+
+int
+gk104_fifo_engine_id(struct nvkm_fifo *base, struct nvkm_engine *engine)
+{
+ struct gk104_fifo *fifo = gk104_fifo(base);
+ int engn;
+
+ if (engine->subdev.type == NVKM_ENGINE_SW)
+ return GK104_FIFO_ENGN_SW;
+
+ for (engn = 0; engn < fifo->engine_nr && engine; engn++) {
+ if (fifo->engine[engn].engine == engine)
+ return engn;
+ }
+
+ WARN_ON(1);
+ return -1;
+}
+
static void
gk104_fifo_recover_work(struct work_struct *w)
{
@@ -422,11 +436,12 @@ gk104_fifo_recover_engn(struct gk104_fifo *fifo, int engn)
* called from the fault handler already.
*/
if (!status.faulted && engine) {
- mmui = nvkm_top_fault_id(device, engine->subdev.index);
+ mmui = nvkm_top_fault_id(device, engine->subdev.type, engine->subdev.inst);
if (mmui < 0) {
const struct nvkm_enum *en = fifo->func->fault.engine;
for (; en && en->name; en++) {
- if (en->data2 == engine->subdev.index) {
+ if (en->data2 == engine->subdev.type &&
+ en->inst == engine->subdev.inst) {
mmui = en->value;
break;
}
@@ -471,8 +486,8 @@ gk104_fifo_fault(struct nvkm_fifo *base, struct nvkm_fault_data *info)
struct nvkm_engine *engine = NULL;
struct nvkm_fifo_chan *chan;
unsigned long flags;
- char ct[8] = "HUB/", en[16] = "";
- int engn;
+ const char *en = "";
+ char ct[8] = "HUB/";
er = nvkm_enum_find(fifo->func->fault.reason, info->reason);
ee = nvkm_enum_find(fifo->func->fault.engine, info->engine);
@@ -496,23 +511,20 @@ gk104_fifo_fault(struct nvkm_fifo *base, struct nvkm_fault_data *info)
nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
break;
default:
- engine = nvkm_device_engine(device, ee->data2);
+ engine = nvkm_device_engine(device, ee->data2, 0);
break;
}
}
if (ee == NULL) {
- enum nvkm_devidx engidx = nvkm_top_fault(device, info->engine);
- if (engidx < NVKM_SUBDEV_NR) {
- const char *src = nvkm_subdev_name[engidx];
- char *dst = en;
- do {
- *dst++ = toupper(*src++);
- } while(*src);
- engine = nvkm_device_engine(device, engidx);
+ struct nvkm_subdev *subdev = nvkm_top_fault(device, info->engine);
+ if (subdev) {
+ if (subdev->func == &nvkm_engine)
+ engine = container_of(subdev, typeof(*engine), subdev);
+ en = engine->subdev.name;
}
} else {
- snprintf(en, sizeof(en), "%s", ee->name);
+ en = ee->name;
}
spin_lock_irqsave(&fifo->base.lock, flags);
@@ -535,11 +547,10 @@ gk104_fifo_fault(struct nvkm_fifo *base, struct nvkm_fault_data *info)
* correct engine(s), but just in case we can't find the channel
* information...
*/
- for (engn = 0; engn < fifo->engine_nr && engine; engn++) {
- if (fifo->engine[engn].engine == engine) {
+ if (engine) {
+ int engn = fifo->base.func->engine_id(&fifo->base, engine);
+ if (engn >= 0 && engn != GK104_FIFO_ENGN_SW)
gk104_fifo_recover_engn(fifo, engn);
- break;
- }
}
spin_unlock_irqrestore(&fifo->base.lock, flags);
@@ -556,7 +567,7 @@ gk104_fifo_bind_reason[] = {
{}
};
-static void
+void
gk104_fifo_intr_bind(struct gk104_fifo *fifo)
{
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
@@ -627,7 +638,7 @@ gk104_fifo_intr_sched(struct gk104_fifo *fifo)
}
}
-static void
+void
gk104_fifo_intr_chsw(struct gk104_fifo *fifo)
{
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
@@ -637,7 +648,7 @@ gk104_fifo_intr_chsw(struct gk104_fifo *fifo)
nvkm_wr32(device, 0x00256c, stat);
}
-static void
+void
gk104_fifo_intr_dropped_fault(struct gk104_fifo *fifo)
{
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
@@ -680,7 +691,7 @@ static const struct nvkm_bitfield gk104_fifo_pbdma_intr_0[] = {
{}
};
-static void
+void
gk104_fifo_intr_pbdma_0(struct gk104_fifo *fifo, int unit)
{
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
@@ -729,7 +740,7 @@ static const struct nvkm_bitfield gk104_fifo_pbdma_intr_1[] = {
{}
};
-static void
+void
gk104_fifo_intr_pbdma_1(struct gk104_fifo *fifo, int unit)
{
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
@@ -750,7 +761,7 @@ gk104_fifo_intr_pbdma_1(struct gk104_fifo *fifo, int unit)
nvkm_wr32(device, 0x040148 + (unit * 0x2000), stat);
}
-static void
+void
gk104_fifo_intr_runlist(struct gk104_fifo *fifo)
{
struct nvkm_device *device = fifo->base.engine.subdev.device;
@@ -763,7 +774,7 @@ gk104_fifo_intr_runlist(struct gk104_fifo *fifo)
}
}
-static void
+void
gk104_fifo_intr_engine(struct gk104_fifo *fifo)
{
nvkm_fifo_uevent(&fifo->base);
@@ -861,7 +872,7 @@ gk104_fifo_intr(struct nvkm_fifo *base)
}
}
-static void
+void
gk104_fifo_fini(struct nvkm_fifo *base)
{
struct gk104_fifo *fifo = gk104_fifo(base);
@@ -871,24 +882,46 @@ gk104_fifo_fini(struct nvkm_fifo *base)
nvkm_mask(device, 0x002140, 0x10000000, 0x10000000);
}
-static int
+int
gk104_fifo_info(struct nvkm_fifo *base, u64 mthd, u64 *data)
{
struct gk104_fifo *fifo = gk104_fifo(base);
switch (mthd) {
- case NV_DEVICE_FIFO_RUNLISTS:
+ case NV_DEVICE_HOST_RUNLISTS:
*data = (1ULL << fifo->runlist_nr) - 1;
return 0;
- case NV_DEVICE_FIFO_RUNLIST_ENGINES(0)...
- NV_DEVICE_FIFO_RUNLIST_ENGINES(63): {
- int runl = mthd - NV_DEVICE_FIFO_RUNLIST_ENGINES(0), engn;
- if (runl < fifo->runlist_nr) {
- unsigned long engm = fifo->runlist[runl].engm;
+ case NV_DEVICE_HOST_RUNLIST_ENGINES: {
+ if (*data < fifo->runlist_nr) {
+ unsigned long engm = fifo->runlist[*data].engm;
struct nvkm_engine *engine;
+ int engn;
*data = 0;
for_each_set_bit(engn, &engm, fifo->engine_nr) {
- if ((engine = fifo->engine[engn].engine))
- *data |= BIT_ULL(engine->subdev.index);
+ if ((engine = fifo->engine[engn].engine)) {
+#define CASE(n) case NVKM_ENGINE_##n: *data |= NV_DEVICE_HOST_RUNLIST_ENGINES_##n; break
+ switch (engine->subdev.type) {
+ CASE(SW );
+ CASE(GR );
+ CASE(MPEG );
+ CASE(ME );
+ CASE(CIPHER);
+ CASE(BSP );
+ CASE(VP );
+ CASE(CE );
+ CASE(SEC );
+ CASE(MSVLD );
+ CASE(MSPDEC);
+ CASE(MSPPP );
+ CASE(MSENC );
+ CASE(VIC );
+ CASE(SEC2 );
+ CASE(NVDEC );
+ CASE(NVENC );
+ default:
+ WARN_ON(1);
+ break;
+ }
+ }
}
return 0;
}
@@ -899,15 +932,15 @@ gk104_fifo_info(struct nvkm_fifo *base, u64 mthd, u64 *data)
}
}
-static int
+int
gk104_fifo_oneinit(struct nvkm_fifo *base)
{
struct gk104_fifo *fifo = gk104_fifo(base);
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_vmm *bar = nvkm_bar_bar1_vmm(device);
- int engn, runl, pbid, ret, i, j;
- enum nvkm_devidx engidx;
+ struct nvkm_top_device *tdev;
+ int pbid, ret, i, j;
u32 *map;
fifo->pbdma_nr = fifo->func->pbdma->nr(fifo);
@@ -921,25 +954,41 @@ gk104_fifo_oneinit(struct nvkm_fifo *base)
map[i] = nvkm_rd32(device, 0x002390 + (i * 0x04));
/* Determine runlist configuration from topology device info. */
- i = 0;
- while ((int)(engidx = nvkm_top_engine(device, i++, &runl, &engn)) >= 0) {
+ list_for_each_entry(tdev, &device->top->device, head) {
+ const int engn = tdev->engine;
+ char _en[16], *en;
+
+ if (engn < 0)
+ continue;
+
/* Determine which PBDMA handles requests for this engine. */
for (j = 0, pbid = -1; j < fifo->pbdma_nr; j++) {
- if (map[j] & (1 << runl)) {
+ if (map[j] & BIT(tdev->runlist)) {
pbid = j;
break;
}
}
+ fifo->engine[engn].engine = nvkm_device_engine(device, tdev->type, tdev->inst);
+ if (!fifo->engine[engn].engine) {
+ snprintf(_en, sizeof(_en), "%s, %d",
+ nvkm_subdev_type[tdev->type], tdev->inst);
+ en = _en;
+ } else {
+ en = fifo->engine[engn].engine->subdev.name;
+ }
+
nvkm_debug(subdev, "engine %2d: runlist %2d pbdma %2d (%s)\n",
- engn, runl, pbid, nvkm_subdev_name[engidx]);
+ tdev->engine, tdev->runlist, pbid, en);
- fifo->engine[engn].engine = nvkm_device_engine(device, engidx);
- fifo->engine[engn].runl = runl;
+ fifo->engine[engn].runl = tdev->runlist;
fifo->engine[engn].pbid = pbid;
fifo->engine_nr = max(fifo->engine_nr, engn + 1);
- fifo->runlist[runl].engm |= 1 << engn;
- fifo->runlist_nr = max(fifo->runlist_nr, runl + 1);
+ fifo->runlist[tdev->runlist].engm |= BIT(engn);
+ fifo->runlist[tdev->runlist].engm_sw |= BIT(engn);
+ if (tdev->type == NVKM_ENGINE_GR)
+ fifo->runlist[tdev->runlist].engm_sw |= BIT(GK104_FIFO_ENGN_SW);
+ fifo->runlist_nr = max(fifo->runlist_nr, tdev->runlist + 1);
}
kfree(map);
@@ -974,7 +1023,7 @@ gk104_fifo_oneinit(struct nvkm_fifo *base)
return nvkm_memory_map(fifo->user.mem, 0, bar, fifo->user.bar, NULL, 0);
}
-static void
+void
gk104_fifo_init(struct nvkm_fifo *base)
{
struct gk104_fifo *fifo = gk104_fifo(base);
@@ -1006,7 +1055,7 @@ gk104_fifo_init(struct nvkm_fifo *base)
nvkm_wr32(device, 0x002140, 0x7fffffff);
}
-static void *
+void *
gk104_fifo_dtor(struct nvkm_fifo *base)
{
struct gk104_fifo *fifo = gk104_fifo(base);
@@ -1033,6 +1082,8 @@ gk104_fifo_ = {
.fini = gk104_fifo_fini,
.intr = gk104_fifo_intr,
.fault = gk104_fifo_fault,
+ .engine_id = gk104_fifo_engine_id,
+ .id_engine = gk104_fifo_id_engine,
.uevent_init = gk104_fifo_uevent_init,
.uevent_fini = gk104_fifo_uevent_fini,
.recover_chan = gk104_fifo_recover_chan,
@@ -1042,7 +1093,7 @@ gk104_fifo_ = {
int
gk104_fifo_new_(const struct gk104_fifo_func *func, struct nvkm_device *device,
- int index, int nr, struct nvkm_fifo **pfifo)
+ enum nvkm_subdev_type type, int inst, int nr, struct nvkm_fifo **pfifo)
{
struct gk104_fifo *fifo;
@@ -1052,7 +1103,7 @@ gk104_fifo_new_(const struct gk104_fifo_func *func, struct nvkm_device *device,
INIT_WORK(&fifo->recover.work, gk104_fifo_recover_work);
*pfifo = &fifo->base;
- return nvkm_fifo_ctor(&gk104_fifo_, device, index, nr, &fifo->base);
+ return nvkm_fifo_ctor(&gk104_fifo_, device, type, inst, nr, &fifo->base);
}
const struct nvkm_enum
@@ -1084,12 +1135,12 @@ gk104_fifo_fault_engine[] = {
{ 0x11, "MSPPP", NULL, NVKM_ENGINE_MSPPP },
{ 0x13, "PERF" },
{ 0x14, "MSPDEC", NULL, NVKM_ENGINE_MSPDEC },
- { 0x15, "CE0", NULL, NVKM_ENGINE_CE0 },
- { 0x16, "CE1", NULL, NVKM_ENGINE_CE1 },
+ { 0x15, "CE0", NULL, NVKM_ENGINE_CE, 0 },
+ { 0x16, "CE1", NULL, NVKM_ENGINE_CE, 1 },
{ 0x17, "PMU" },
{ 0x18, "PTP" },
{ 0x19, "MSENC", NULL, NVKM_ENGINE_MSENC },
- { 0x1b, "CE2", NULL, NVKM_ENGINE_CE2 },
+ { 0x1b, "CE2", NULL, NVKM_ENGINE_CE, 2 },
{}
};
@@ -1191,7 +1242,8 @@ gk104_fifo = {
};
int
-gk104_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+gk104_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
{
- return gk104_fifo_new_(&gk104_fifo, device, index, 4096, pfifo);
+ return gk104_fifo_new_(&gk104_fifo, device, type, inst, 4096, pfifo);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
index 6407a4a174cf..f2d12ae73944 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
@@ -35,6 +35,7 @@ struct gk104_fifo {
struct list_head cgrp;
struct list_head chan;
u32 engm;
+ u32 engm_sw;
} runlist[16];
int runlist_nr;
@@ -87,11 +88,43 @@ struct gk104_fifo_func {
bool cgrp_force;
};
-int gk104_fifo_new_(const struct gk104_fifo_func *, struct nvkm_device *,
+struct gk104_fifo_engine_status {
+ bool busy;
+ bool faulted;
+ bool chsw;
+ bool save;
+ bool load;
+ struct {
+ bool tsg;
+ u32 id;
+ } prev, next, *chan;
+};
+
+int gk104_fifo_new_(const struct gk104_fifo_func *, struct nvkm_device *, enum nvkm_subdev_type,
int index, int nr, struct nvkm_fifo **);
void gk104_fifo_runlist_insert(struct gk104_fifo *, struct gk104_fifo_chan *);
void gk104_fifo_runlist_remove(struct gk104_fifo *, struct gk104_fifo_chan *);
void gk104_fifo_runlist_update(struct gk104_fifo *, int runl);
+void gk104_fifo_engine_status(struct gk104_fifo *fifo, int engn,
+ struct gk104_fifo_engine_status *status);
+void gk104_fifo_intr_bind(struct gk104_fifo *fifo);
+void gk104_fifo_intr_chsw(struct gk104_fifo *fifo);
+void gk104_fifo_intr_dropped_fault(struct gk104_fifo *fifo);
+void gk104_fifo_intr_pbdma_0(struct gk104_fifo *fifo, int unit);
+void gk104_fifo_intr_pbdma_1(struct gk104_fifo *fifo, int unit);
+void gk104_fifo_intr_runlist(struct gk104_fifo *fifo);
+void gk104_fifo_intr_engine(struct gk104_fifo *fifo);
+void *gk104_fifo_dtor(struct nvkm_fifo *base);
+int gk104_fifo_oneinit(struct nvkm_fifo *base);
+int gk104_fifo_info(struct nvkm_fifo *base, u64 mthd, u64 *data);
+void gk104_fifo_init(struct nvkm_fifo *base);
+void gk104_fifo_fini(struct nvkm_fifo *base);
+int gk104_fifo_class_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
+ void *argv, u32 argc, struct nvkm_object **pobject);
+int gk104_fifo_class_get(struct nvkm_fifo *base, int index,
+ struct nvkm_oclass *oclass);
+void gk104_fifo_uevent_fini(struct nvkm_fifo *fifo);
+void gk104_fifo_uevent_init(struct nvkm_fifo *fifo);
extern const struct gk104_fifo_pbdma_func gk104_fifo_pbdma;
int gk104_fifo_pbdma_nr(struct gk104_fifo *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c
index f820969e4405..915278c7e012 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c
@@ -60,7 +60,8 @@ gk110_fifo = {
};
int
-gk110_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+gk110_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
{
- return gk104_fifo_new_(&gk110_fifo, device, index, 4096, pfifo);
+ return gk104_fifo_new_(&gk110_fifo, device, type, inst, 4096, pfifo);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
index 2f54787b5fd0..cb703693de52 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
@@ -57,7 +57,8 @@ gk208_fifo = {
};
int
-gk208_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+gk208_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
{
- return gk104_fifo_new_(&gk208_fifo, device, index, 1024, pfifo);
+ return gk104_fifo_new_(&gk208_fifo, device, type, inst, 1024, pfifo);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
index a814c4e0ed3e..6e35cf44c640 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
@@ -38,7 +38,8 @@ gk20a_fifo = {
};
int
-gk20a_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+gk20a_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
{
- return gk104_fifo_new_(&gk20a_fifo, device, index, 128, pfifo);
+ return gk104_fifo_new_(&gk20a_fifo, device, type, inst, 128, pfifo);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c
index c2a2e4572f6c..7af6e687d474 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c
@@ -106,7 +106,8 @@ gm107_fifo = {
};
int
-gm107_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+gm107_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
{
- return gk104_fifo_new_(&gm107_fifo, device, index, 2048, pfifo);
+ return gk104_fifo_new_(&gm107_fifo, device, type, inst, 2048, pfifo);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c
index b8cfe3b28c4f..573658cb6c73 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c
@@ -54,7 +54,8 @@ gm200_fifo = {
};
int
-gm200_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+gm200_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
{
- return gk104_fifo_new_(&gm200_fifo, device, index, 4096, pfifo);
+ return gk104_fifo_new_(&gm200_fifo, device, type, inst, 4096, pfifo);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
index 70b4feebc1fa..556c97e54f14 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
@@ -38,7 +38,8 @@ gm20b_fifo = {
};
int
-gm20b_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+gm20b_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
{
- return gk104_fifo_new_(&gm20b_fifo, device, index, 512, pfifo);
+ return gk104_fifo_new_(&gm20b_fifo, device, type, inst, 512, pfifo);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
index 2c7a0176b3c8..6b46b6b65b87 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
@@ -91,7 +91,8 @@ gp100_fifo = {
};
int
-gp100_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+gp100_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
{
- return gk104_fifo_new_(&gp100_fifo, device, index, 4096, pfifo);
+ return gk104_fifo_new_(&gp100_fifo, device, type, inst, 4096, pfifo);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c
index 8c65ad4feedb..7a5929cb4d29 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c
@@ -39,7 +39,8 @@ gp10b_fifo = {
};
int
-gp10b_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+gp10b_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
{
- return gk104_fifo_new_(&gp10b_fifo, device, index, 512, pfifo);
+ return gk104_fifo_new_(&gp10b_fifo, device, type, inst, 512, pfifo);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c
index 75f9632789b3..4e78bbe3b94b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c
@@ -52,11 +52,10 @@ gf100_fifo_chan_ntfy(struct nvkm_fifo_chan *chan, u32 type,
static u32
gf100_fifo_gpfifo_engine_addr(struct nvkm_engine *engine)
{
- switch (engine->subdev.index) {
+ switch (engine->subdev.type) {
case NVKM_ENGINE_SW : return 0;
case NVKM_ENGINE_GR : return 0x0210;
- case NVKM_ENGINE_CE0 : return 0x0230;
- case NVKM_ENGINE_CE1 : return 0x0240;
+ case NVKM_ENGINE_CE : return 0x0230 + (engine->subdev.inst * 0x10);
case NVKM_ENGINE_MSPDEC: return 0x0250;
case NVKM_ENGINE_MSPPP : return 0x0260;
case NVKM_ENGINE_MSVLD : return 0x0270;
@@ -66,6 +65,15 @@ gf100_fifo_gpfifo_engine_addr(struct nvkm_engine *engine)
}
}
+static struct gf100_fifo_engn *
+gf100_fifo_gpfifo_engine(struct gf100_fifo_chan *chan, struct nvkm_engine *engine)
+{
+ int engi = chan->base.fifo->func->engine_id(chan->base.fifo, engine);
+ if (engi >= 0)
+ return &chan->engn[engi];
+ return NULL;
+}
+
static int
gf100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
struct nvkm_engine *engine, bool suspend)
@@ -77,7 +85,7 @@ gf100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
struct nvkm_gpuobj *inst = chan->base.inst;
int ret = 0;
- mutex_lock(&subdev->mutex);
+ mutex_lock(&chan->fifo->base.mutex);
nvkm_wr32(device, 0x002634, chan->base.chid);
if (nvkm_msec(device, 2000,
if (nvkm_rd32(device, 0x002634) == chan->base.chid)
@@ -87,7 +95,7 @@ gf100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
chan->base.chid, chan->base.object.client->name);
ret = -ETIMEDOUT;
}
- mutex_unlock(&subdev->mutex);
+ mutex_unlock(&chan->fifo->base.mutex);
if (ret && suspend)
return ret;
@@ -108,13 +116,13 @@ gf100_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *base,
{
const u32 offset = gf100_fifo_gpfifo_engine_addr(engine);
struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
+ struct gf100_fifo_engn *engn = gf100_fifo_gpfifo_engine(chan, engine);
struct nvkm_gpuobj *inst = chan->base.inst;
if (offset) {
- u64 addr = chan->engn[engine->subdev.index].vma->addr;
nvkm_kmap(inst);
- nvkm_wo32(inst, offset + 0x00, lower_32_bits(addr) | 4);
- nvkm_wo32(inst, offset + 0x04, upper_32_bits(addr));
+ nvkm_wo32(inst, offset + 0x00, lower_32_bits(engn->vma->addr) | 4);
+ nvkm_wo32(inst, offset + 0x04, upper_32_bits(engn->vma->addr));
nvkm_done(inst);
}
@@ -126,8 +134,9 @@ gf100_fifo_gpfifo_engine_dtor(struct nvkm_fifo_chan *base,
struct nvkm_engine *engine)
{
struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
- nvkm_vmm_put(chan->base.vmm, &chan->engn[engine->subdev.index].vma);
- nvkm_gpuobj_del(&chan->engn[engine->subdev.index].inst);
+ struct gf100_fifo_engn *engn = gf100_fifo_gpfifo_engine(chan, engine);
+ nvkm_vmm_put(chan->base.vmm, &engn->vma);
+ nvkm_gpuobj_del(&engn->inst);
}
static int
@@ -136,23 +145,21 @@ gf100_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan *base,
struct nvkm_object *object)
{
struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
- int engn = engine->subdev.index;
+ struct gf100_fifo_engn *engn = gf100_fifo_gpfifo_engine(chan, engine);
int ret;
if (!gf100_fifo_gpfifo_engine_addr(engine))
return 0;
- ret = nvkm_object_bind(object, NULL, 0, &chan->engn[engn].inst);
+ ret = nvkm_object_bind(object, NULL, 0, &engn->inst);
if (ret)
return ret;
- ret = nvkm_vmm_get(chan->base.vmm, 12, chan->engn[engn].inst->size,
- &chan->engn[engn].vma);
+ ret = nvkm_vmm_get(chan->base.vmm, 12, engn->inst->size, &engn->vma);
if (ret)
return ret;
- return nvkm_memory_map(chan->engn[engn].inst, 0, chan->base.vmm,
- chan->engn[engn].vma, NULL, 0);
+ return nvkm_memory_map(engn->inst, 0, chan->base.vmm, engn->vma, NULL, 0);
}
static void
@@ -243,13 +250,13 @@ gf100_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
ret = nvkm_fifo_chan_ctor(&gf100_fifo_gpfifo_func, &fifo->base,
0x1000, 0x1000, true, args->v0.vmm, 0,
- (1ULL << NVKM_ENGINE_CE0) |
- (1ULL << NVKM_ENGINE_CE1) |
- (1ULL << NVKM_ENGINE_GR) |
- (1ULL << NVKM_ENGINE_MSPDEC) |
- (1ULL << NVKM_ENGINE_MSPPP) |
- (1ULL << NVKM_ENGINE_MSVLD) |
- (1ULL << NVKM_ENGINE_SW),
+ BIT(GF100_FIFO_ENGN_GR) |
+ BIT(GF100_FIFO_ENGN_MSPDEC) |
+ BIT(GF100_FIFO_ENGN_MSPPP) |
+ BIT(GF100_FIFO_ENGN_MSVLD) |
+ BIT(GF100_FIFO_ENGN_CE0) |
+ BIT(GF100_FIFO_ENGN_CE1) |
+ BIT(GF100_FIFO_ENGN_SW),
1, fifo->user.bar->addr, 0x1000,
oclass, &chan->base);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
index 728a1edbf98c..b6900a52bcce 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
@@ -65,19 +65,18 @@ int
gk104_fifo_gpfifo_kick(struct gk104_fifo_chan *chan)
{
int ret;
- mutex_lock(&chan->base.fifo->engine.subdev.mutex);
+ mutex_lock(&chan->base.fifo->mutex);
ret = gk104_fifo_gpfifo_kick_locked(chan);
- mutex_unlock(&chan->base.fifo->engine.subdev.mutex);
+ mutex_unlock(&chan->base.fifo->mutex);
return ret;
}
static u32
gk104_fifo_gpfifo_engine_addr(struct nvkm_engine *engine)
{
- switch (engine->subdev.index) {
+ switch (engine->subdev.type) {
case NVKM_ENGINE_SW :
- case NVKM_ENGINE_CE0...NVKM_ENGINE_CE_LAST:
- return 0;
+ case NVKM_ENGINE_CE : return 0;
case NVKM_ENGINE_GR : return 0x0210;
case NVKM_ENGINE_SEC : return 0x0220;
case NVKM_ENGINE_MSPDEC: return 0x0250;
@@ -85,15 +84,26 @@ gk104_fifo_gpfifo_engine_addr(struct nvkm_engine *engine)
case NVKM_ENGINE_MSVLD : return 0x0270;
case NVKM_ENGINE_VIC : return 0x0280;
case NVKM_ENGINE_MSENC : return 0x0290;
- case NVKM_ENGINE_NVDEC0: return 0x02100270;
- case NVKM_ENGINE_NVENC0: return 0x02100290;
- case NVKM_ENGINE_NVENC1: return 0x0210;
+ case NVKM_ENGINE_NVDEC : return 0x02100270;
+ case NVKM_ENGINE_NVENC :
+ if (engine->subdev.inst)
+ return 0x0210;
+ return 0x02100290;
default:
WARN_ON(1);
return 0;
}
}
+struct gk104_fifo_engn *
+gk104_fifo_gpfifo_engine(struct gk104_fifo_chan *chan, struct nvkm_engine *engine)
+{
+ int engi = chan->base.fifo->func->engine_id(chan->base.fifo, engine);
+ if (engi >= 0)
+ return &chan->engn[engi];
+ return NULL;
+}
+
static int
gk104_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
struct nvkm_engine *engine, bool suspend)
@@ -126,13 +136,13 @@ gk104_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *base,
struct nvkm_engine *engine)
{
struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
+ struct gk104_fifo_engn *engn = gk104_fifo_gpfifo_engine(chan, engine);
struct nvkm_gpuobj *inst = chan->base.inst;
u32 offset = gk104_fifo_gpfifo_engine_addr(engine);
if (offset) {
- u64 addr = chan->engn[engine->subdev.index].vma->addr;
- u32 datalo = lower_32_bits(addr) | 0x00000004;
- u32 datahi = upper_32_bits(addr);
+ u32 datalo = lower_32_bits(engn->vma->addr) | 0x00000004;
+ u32 datahi = upper_32_bits(engn->vma->addr);
nvkm_kmap(inst);
nvkm_wo32(inst, (offset & 0xffff) + 0x00, datalo);
nvkm_wo32(inst, (offset & 0xffff) + 0x04, datahi);
@@ -151,8 +161,9 @@ gk104_fifo_gpfifo_engine_dtor(struct nvkm_fifo_chan *base,
struct nvkm_engine *engine)
{
struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
- nvkm_vmm_put(chan->base.vmm, &chan->engn[engine->subdev.index].vma);
- nvkm_gpuobj_del(&chan->engn[engine->subdev.index].inst);
+ struct gk104_fifo_engn *engn = gk104_fifo_gpfifo_engine(chan, engine);
+ nvkm_vmm_put(chan->base.vmm, &engn->vma);
+ nvkm_gpuobj_del(&engn->inst);
}
int
@@ -161,23 +172,21 @@ gk104_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan *base,
struct nvkm_object *object)
{
struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
- int engn = engine->subdev.index;
+ struct gk104_fifo_engn *engn = gk104_fifo_gpfifo_engine(chan, engine);
int ret;
if (!gk104_fifo_gpfifo_engine_addr(engine))
return 0;
- ret = nvkm_object_bind(object, NULL, 0, &chan->engn[engn].inst);
+ ret = nvkm_object_bind(object, NULL, 0, &engn->inst);
if (ret)
return ret;
- ret = nvkm_vmm_get(chan->base.vmm, 12, chan->engn[engn].inst->size,
- &chan->engn[engn].vma);
+ ret = nvkm_vmm_get(chan->base.vmm, 12, engn->inst->size, &engn->vma);
if (ret)
return ret;
- return nvkm_memory_map(chan->engn[engn].inst, 0, chan->base.vmm,
- chan->engn[engn].vma, NULL, 0);
+ return nvkm_memory_map(engn->inst, 0, chan->base.vmm, engn->vma, NULL, 0);
}
void
@@ -247,23 +256,12 @@ gk104_fifo_gpfifo_new_(struct gk104_fifo *fifo, u64 *runlists, u16 *chid,
{
struct gk104_fifo_chan *chan;
int runlist = ffs(*runlists) -1, ret, i;
- unsigned long engm;
- u64 subdevs = 0;
u64 usermem;
if (!vmm || runlist < 0 || runlist >= fifo->runlist_nr)
return -EINVAL;
*runlists = BIT_ULL(runlist);
- engm = fifo->runlist[runlist].engm;
- for_each_set_bit(i, &engm, fifo->engine_nr) {
- if (fifo->engine[i].engine)
- subdevs |= BIT_ULL(fifo->engine[i].engine->subdev.index);
- }
-
- if (subdevs & BIT_ULL(NVKM_ENGINE_GR))
- subdevs |= BIT_ULL(NVKM_ENGINE_SW);
-
/* Allocate the channel. */
if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
return -ENOMEM;
@@ -273,7 +271,7 @@ gk104_fifo_gpfifo_new_(struct gk104_fifo *fifo, u64 *runlists, u16 *chid,
INIT_LIST_HEAD(&chan->head);
ret = nvkm_fifo_chan_ctor(&gk104_fifo_gpfifo_func, &fifo->base,
- 0x1000, 0x1000, true, vmm, 0, subdevs,
+ 0x1000, 0x1000, true, vmm, 0, fifo->runlist[runlist].engm_sw,
1, fifo->user.bar->addr, 0x200,
oclass, &chan->base);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c
index a7462cf59d65..ee4967b706a7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c
@@ -44,7 +44,7 @@ gv100_fifo_gpfifo_engine_valid(struct gk104_fifo_chan *chan, bool ce, bool valid
int ret;
/* Block runlist to prevent the channel from being rescheduled. */
- mutex_lock(&subdev->mutex);
+ mutex_lock(&chan->fifo->base.mutex);
nvkm_mask(device, 0x002630, BIT(chan->runl), BIT(chan->runl));
/* Preempt the channel. */
@@ -58,7 +58,7 @@ gv100_fifo_gpfifo_engine_valid(struct gk104_fifo_chan *chan, bool ce, bool valid
/* Resume runlist. */
nvkm_mask(device, 0x002630, BIT(chan->runl), 0);
- mutex_unlock(&subdev->mutex);
+ mutex_unlock(&chan->fifo->base.mutex);
return ret;
}
@@ -70,8 +70,7 @@ gv100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
struct nvkm_gpuobj *inst = chan->base.inst;
int ret;
- if (engine->subdev.index >= NVKM_ENGINE_CE0 &&
- engine->subdev.index <= NVKM_ENGINE_CE_LAST)
+ if (engine->subdev.type == NVKM_ENGINE_CE)
return gk104_fifo_gpfifo_kick(chan);
ret = gv100_fifo_gpfifo_engine_valid(chan, false, false);
@@ -90,17 +89,15 @@ gv100_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *base,
struct nvkm_engine *engine)
{
struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
+ struct gk104_fifo_engn *engn = gk104_fifo_gpfifo_engine(chan, engine);
struct nvkm_gpuobj *inst = chan->base.inst;
- u64 addr;
- if (engine->subdev.index >= NVKM_ENGINE_CE0 &&
- engine->subdev.index <= NVKM_ENGINE_CE_LAST)
+ if (engine->subdev.type == NVKM_ENGINE_CE)
return 0;
- addr = chan->engn[engine->subdev.index].vma->addr;
nvkm_kmap(inst);
- nvkm_wo32(inst, 0x210, lower_32_bits(addr) | 0x00000004);
- nvkm_wo32(inst, 0x214, upper_32_bits(addr));
+ nvkm_wo32(inst, 0x210, lower_32_bits(engn->vma->addr) | 0x00000004);
+ nvkm_wo32(inst, 0x214, upper_32_bits(engn->vma->addr));
nvkm_done(inst);
return gv100_fifo_gpfifo_engine_valid(chan, false, true);
@@ -129,8 +126,6 @@ gv100_fifo_gpfifo_new_(const struct nvkm_fifo_chan_func *func,
struct nvkm_device *device = fifo->base.engine.subdev.device;
struct gk104_fifo_chan *chan;
int runlist = ffs(*runlists) -1, ret, i;
- unsigned long engm;
- u64 subdevs = 0;
u64 usermem, mthd;
u32 size;
@@ -138,12 +133,6 @@ gv100_fifo_gpfifo_new_(const struct nvkm_fifo_chan_func *func,
return -EINVAL;
*runlists = BIT_ULL(runlist);
- engm = fifo->runlist[runlist].engm;
- for_each_set_bit(i, &engm, fifo->engine_nr) {
- if (fifo->engine[i].engine)
- subdevs |= BIT_ULL(fifo->engine[i].engine->subdev.index);
- }
-
/* Allocate the channel. */
if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
return -ENOMEM;
@@ -153,7 +142,7 @@ gv100_fifo_gpfifo_new_(const struct nvkm_fifo_chan_func *func,
INIT_LIST_HEAD(&chan->head);
ret = nvkm_fifo_chan_ctor(func, &fifo->base, 0x1000, 0x1000, true, vmm,
- 0, subdevs, 1, fifo->user.bar->addr, 0x200,
+ 0, fifo->runlist[runlist].engm, 1, fifo->user.bar->addr, 0x200,
oclass, &chan->base);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c
index 6ee1bb32a071..70e16a91ac12 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c
@@ -301,7 +301,8 @@ gv100_fifo = {
};
int
-gv100_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+gv100_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
{
- return gk104_fifo_new_(&gv100_fifo, device, index, 4096, pfifo);
+ return gk104_fifo_new_(&gv100_fifo, device, type, inst, 4096, pfifo);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
index c1d1b1aa5bc6..c6730c124769 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
@@ -94,6 +94,38 @@ __releases(fifo->base.lock)
spin_unlock_irqrestore(&fifo->base.lock, flags);
}
+struct nvkm_engine *
+nv04_fifo_id_engine(struct nvkm_fifo *fifo, int engi)
+{
+ enum nvkm_subdev_type type;
+
+ switch (engi) {
+ case NV04_FIFO_ENGN_SW : type = NVKM_ENGINE_SW; break;
+ case NV04_FIFO_ENGN_GR : type = NVKM_ENGINE_GR; break;
+ case NV04_FIFO_ENGN_MPEG: type = NVKM_ENGINE_MPEG; break;
+ case NV04_FIFO_ENGN_DMA : type = NVKM_ENGINE_DMAOBJ; break;
+ default:
+ WARN_ON(1);
+ return NULL;
+ }
+
+ return nvkm_device_engine(fifo->engine.subdev.device, type, 0);
+}
+
+int
+nv04_fifo_engine_id(struct nvkm_fifo *base, struct nvkm_engine *engine)
+{
+ switch (engine->subdev.type) {
+ case NVKM_ENGINE_SW : return NV04_FIFO_ENGN_SW;
+ case NVKM_ENGINE_GR : return NV04_FIFO_ENGN_GR;
+ case NVKM_ENGINE_MPEG : return NV04_FIFO_ENGN_MPEG;
+ case NVKM_ENGINE_DMAOBJ: return NV04_FIFO_ENGN_DMA;
+ default:
+ WARN_ON(1);
+ return 0;
+ }
+}
+
static const char *
nv_dma_state_err(u32 state)
{
@@ -326,7 +358,7 @@ nv04_fifo_init(struct nvkm_fifo *base)
int
nv04_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device,
- int index, int nr, const struct nv04_fifo_ramfc *ramfc,
+ enum nvkm_subdev_type type, int inst, int nr, const struct nv04_fifo_ramfc *ramfc,
struct nvkm_fifo **pfifo)
{
struct nv04_fifo *fifo;
@@ -337,7 +369,7 @@ nv04_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device,
fifo->ramfc = ramfc;
*pfifo = &fifo->base;
- ret = nvkm_fifo_ctor(func, device, index, nr, &fifo->base);
+ ret = nvkm_fifo_ctor(func, device, type, inst, nr, &fifo->base);
if (ret)
return ret;
@@ -349,6 +381,8 @@ static const struct nvkm_fifo_func
nv04_fifo = {
.init = nv04_fifo_init,
.intr = nv04_fifo_intr,
+ .engine_id = nv04_fifo_engine_id,
+ .id_engine = nv04_fifo_id_engine,
.pause = nv04_fifo_pause,
.start = nv04_fifo_start,
.chan = {
@@ -358,8 +392,8 @@ nv04_fifo = {
};
int
-nv04_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+nv04_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
{
- return nv04_fifo_new_(&nv04_fifo, device, index, 16,
- nv04_fifo_ramfc, pfifo);
+ return nv04_fifo_new_(&nv04_fifo, device, type, inst, 16, nv04_fifo_ramfc, pfifo);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h
index e5ecceee77ae..3f23bcde4a54 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h
@@ -17,8 +17,7 @@ struct nv04_fifo {
const struct nv04_fifo_ramfc *ramfc;
};
-int nv04_fifo_new_(const struct nvkm_fifo_func *, struct nvkm_device *,
- int index, int nr, const struct nv04_fifo_ramfc *,
- struct nvkm_fifo **);
+int nv04_fifo_new_(const struct nvkm_fifo_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ int nr, const struct nv04_fifo_ramfc *, struct nvkm_fifo **);
void nv04_fifo_init(struct nvkm_fifo *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c
index f9a87deb2b3d..f8887f0f2f82 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c
@@ -43,6 +43,8 @@ static const struct nvkm_fifo_func
nv10_fifo = {
.init = nv04_fifo_init,
.intr = nv04_fifo_intr,
+ .engine_id = nv04_fifo_engine_id,
+ .id_engine = nv04_fifo_id_engine,
.pause = nv04_fifo_pause,
.start = nv04_fifo_start,
.chan = {
@@ -52,8 +54,8 @@ nv10_fifo = {
};
int
-nv10_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+nv10_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
{
- return nv04_fifo_new_(&nv10_fifo, device, index, 32,
- nv10_fifo_ramfc, pfifo);
+ return nv04_fifo_new_(&nv10_fifo, device, type, inst, 32, nv10_fifo_ramfc, pfifo);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c
index f6d383a21222..3f94c7b5b054 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c
@@ -81,6 +81,8 @@ static const struct nvkm_fifo_func
nv17_fifo = {
.init = nv17_fifo_init,
.intr = nv04_fifo_intr,
+ .engine_id = nv04_fifo_engine_id,
+ .id_engine = nv04_fifo_id_engine,
.pause = nv04_fifo_pause,
.start = nv04_fifo_start,
.chan = {
@@ -90,8 +92,8 @@ nv17_fifo = {
};
int
-nv17_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+nv17_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
{
- return nv04_fifo_new_(&nv17_fifo, device, index, 32,
- nv17_fifo_ramfc, pfifo);
+ return nv04_fifo_new_(&nv17_fifo, device, type, inst, 32, nv17_fifo_ramfc, pfifo);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c
index 2d61fd832ddb..f9ea46809bc0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c
@@ -112,6 +112,8 @@ static const struct nvkm_fifo_func
nv40_fifo = {
.init = nv40_fifo_init,
.intr = nv04_fifo_intr,
+ .engine_id = nv04_fifo_engine_id,
+ .id_engine = nv04_fifo_id_engine,
.pause = nv04_fifo_pause,
.start = nv04_fifo_start,
.chan = {
@@ -121,8 +123,8 @@ nv40_fifo = {
};
int
-nv40_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+nv40_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
{
- return nv04_fifo_new_(&nv40_fifo, device, index, 32,
- nv40_fifo_ramfc, pfifo);
+ return nv04_fifo_new_(&nv40_fifo, device, type, inst, 32, nv40_fifo_ramfc, pfifo);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
index fa6e094d8068..be94156ea248 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
@@ -51,9 +51,9 @@ nv50_fifo_runlist_update_locked(struct nv50_fifo *fifo)
void
nv50_fifo_runlist_update(struct nv50_fifo *fifo)
{
- mutex_lock(&fifo->base.engine.subdev.mutex);
+ mutex_lock(&fifo->base.mutex);
nv50_fifo_runlist_update_locked(fifo);
- mutex_unlock(&fifo->base.engine.subdev.mutex);
+ mutex_unlock(&fifo->base.mutex);
}
int
@@ -107,7 +107,7 @@ nv50_fifo_dtor(struct nvkm_fifo *base)
int
nv50_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device,
- int index, struct nvkm_fifo **pfifo)
+ enum nvkm_subdev_type type, int inst, struct nvkm_fifo **pfifo)
{
struct nv50_fifo *fifo;
int ret;
@@ -116,7 +116,7 @@ nv50_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device,
return -ENOMEM;
*pfifo = &fifo->base;
- ret = nvkm_fifo_ctor(func, device, index, 128, &fifo->base);
+ ret = nvkm_fifo_ctor(func, device, type, inst, 128, &fifo->base);
if (ret)
return ret;
@@ -131,6 +131,8 @@ nv50_fifo = {
.oneinit = nv50_fifo_oneinit,
.init = nv50_fifo_init,
.intr = nv04_fifo_intr,
+ .engine_id = nv04_fifo_engine_id,
+ .id_engine = nv04_fifo_id_engine,
.pause = nv04_fifo_pause,
.start = nv04_fifo_start,
.chan = {
@@ -141,7 +143,8 @@ nv50_fifo = {
};
int
-nv50_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+nv50_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
{
- return nv50_fifo_new_(&nv50_fifo, device, index, pfifo);
+ return nv50_fifo_new_(&nv50_fifo, device, type, inst, pfifo);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h
index 87d30b6bd2ea..0111e7e5a4e3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h
@@ -10,8 +10,8 @@ struct nv50_fifo {
int cur_runlist;
};
-int nv50_fifo_new_(const struct nvkm_fifo_func *, struct nvkm_device *,
- int index, struct nvkm_fifo **);
+int nv50_fifo_new_(const struct nvkm_fifo_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ struct nvkm_fifo **);
void *nv50_fifo_dtor(struct nvkm_fifo *);
int nv50_fifo_oneinit(struct nvkm_fifo *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
index 0ef8baab513e..899272801a8b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
@@ -4,8 +4,8 @@
#define nvkm_fifo(p) container_of((p), struct nvkm_fifo, engine)
#include <engine/fifo.h>
-int nvkm_fifo_ctor(const struct nvkm_fifo_func *, struct nvkm_device *,
- int index, int nr, struct nvkm_fifo *);
+int nvkm_fifo_ctor(const struct nvkm_fifo_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ int nr, struct nvkm_fifo *);
void nvkm_fifo_uevent(struct nvkm_fifo *);
void nvkm_fifo_cevent(struct nvkm_fifo *);
void nvkm_fifo_kevent(struct nvkm_fifo *, int chid);
@@ -23,6 +23,8 @@ struct nvkm_fifo_func {
void (*fini)(struct nvkm_fifo *);
void (*intr)(struct nvkm_fifo *);
void (*fault)(struct nvkm_fifo *, struct nvkm_fault_data *);
+ int (*engine_id)(struct nvkm_fifo *, struct nvkm_engine *);
+ struct nvkm_engine *(*id_engine)(struct nvkm_fifo *, int engi);
void (*pause)(struct nvkm_fifo *, unsigned long *);
void (*start)(struct nvkm_fifo *, unsigned long *);
void (*uevent_init)(struct nvkm_fifo *);
@@ -35,8 +37,13 @@ struct nvkm_fifo_func {
};
void nv04_fifo_intr(struct nvkm_fifo *);
+int nv04_fifo_engine_id(struct nvkm_fifo *, struct nvkm_engine *);
+struct nvkm_engine *nv04_fifo_id_engine(struct nvkm_fifo *, int);
void nv04_fifo_pause(struct nvkm_fifo *, unsigned long *);
void nv04_fifo_start(struct nvkm_fifo *, unsigned long *);
void gf100_fifo_intr_fault(struct nvkm_fifo *, int);
+
+int gk104_fifo_engine_id(struct nvkm_fifo *, struct nvkm_engine *);
+struct nvkm_engine *gk104_fifo_id_engine(struct nvkm_fifo *, int);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c
index 005f3e1729b9..e417044cc347 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c
@@ -24,7 +24,13 @@
#include "changk104.h"
#include "user.h"
+#include <core/client.h>
#include <core/gpuobj.h>
+#include <subdev/bar.h>
+#include <subdev/fault.h>
+#include <subdev/top.h>
+#include <subdev/timer.h>
+#include <engine/sw.h>
#include <nvif/class.h>
@@ -109,8 +115,363 @@ tu102_fifo = {
.cgrp_force = true,
};
+static void
+tu102_fifo_recover_work(struct work_struct *w)
+{
+ struct gk104_fifo *fifo = container_of(w, typeof(*fifo), recover.work);
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ struct nvkm_engine *engine;
+ unsigned long flags;
+ u32 engm, runm, todo;
+ int engn, runl;
+
+ spin_lock_irqsave(&fifo->base.lock, flags);
+ runm = fifo->recover.runm;
+ engm = fifo->recover.engm;
+ fifo->recover.engm = 0;
+ fifo->recover.runm = 0;
+ spin_unlock_irqrestore(&fifo->base.lock, flags);
+
+ nvkm_mask(device, 0x002630, runm, runm);
+
+ for (todo = engm; engn = __ffs(todo), todo; todo &= ~BIT(engn)) {
+ if ((engine = fifo->engine[engn].engine)) {
+ nvkm_subdev_fini(&engine->subdev, false);
+ WARN_ON(nvkm_subdev_init(&engine->subdev));
+ }
+ }
+
+ for (todo = runm; runl = __ffs(todo), todo; todo &= ~BIT(runl))
+ gk104_fifo_runlist_update(fifo, runl);
+
+ nvkm_mask(device, 0x002630, runm, 0x00000000);
+}
+
+static void tu102_fifo_recover_engn(struct gk104_fifo *fifo, int engn);
+
+static void
+tu102_fifo_recover_runl(struct gk104_fifo *fifo, int runl)
+{
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ const u32 runm = BIT(runl);
+
+ assert_spin_locked(&fifo->base.lock);
+ if (fifo->recover.runm & runm)
+ return;
+ fifo->recover.runm |= runm;
+
+ /* Block runlist to prevent channel assignment(s) from changing. */
+ nvkm_mask(device, 0x002630, runm, runm);
+
+ /* Schedule recovery. */
+ nvkm_warn(subdev, "runlist %d: scheduled for recovery\n", runl);
+ schedule_work(&fifo->recover.work);
+}
+
+static struct gk104_fifo_chan *
+tu102_fifo_recover_chid(struct gk104_fifo *fifo, int runl, int chid)
+{
+ struct gk104_fifo_chan *chan;
+ struct nvkm_fifo_cgrp *cgrp;
+
+ list_for_each_entry(chan, &fifo->runlist[runl].chan, head) {
+ if (chan->base.chid == chid) {
+ list_del_init(&chan->head);
+ return chan;
+ }
+ }
+
+ list_for_each_entry(cgrp, &fifo->runlist[runl].cgrp, head) {
+ if (cgrp->id == chid) {
+ chan = list_first_entry(&cgrp->chan, typeof(*chan), head);
+ list_del_init(&chan->head);
+ if (!--cgrp->chan_nr)
+ list_del_init(&cgrp->head);
+ return chan;
+ }
+ }
+
+ return NULL;
+}
+
+static void
+tu102_fifo_recover_chan(struct nvkm_fifo *base, int chid)
+{
+ struct gk104_fifo *fifo = gk104_fifo(base);
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ const u32 stat = nvkm_rd32(device, 0x800004 + (chid * 0x08));
+ const u32 runl = (stat & 0x000f0000) >> 16;
+ const bool used = (stat & 0x00000001);
+ unsigned long engn, engm = fifo->runlist[runl].engm;
+ struct gk104_fifo_chan *chan;
+
+ assert_spin_locked(&fifo->base.lock);
+ if (!used)
+ return;
+
+ /* Lookup SW state for channel, and mark it as dead. */
+ chan = tu102_fifo_recover_chid(fifo, runl, chid);
+ if (chan) {
+ chan->killed = true;
+ nvkm_fifo_kevent(&fifo->base, chid);
+ }
+
+ /* Disable channel. */
+ nvkm_wr32(device, 0x800004 + (chid * 0x08), stat | 0x00000800);
+ nvkm_warn(subdev, "channel %d: killed\n", chid);
+
+ /* Block channel assignments from changing during recovery. */
+ tu102_fifo_recover_runl(fifo, runl);
+
+ /* Schedule recovery for any engines the channel is on. */
+ for_each_set_bit(engn, &engm, fifo->engine_nr) {
+ struct gk104_fifo_engine_status status;
+
+ gk104_fifo_engine_status(fifo, engn, &status);
+ if (!status.chan || status.chan->id != chid)
+ continue;
+ tu102_fifo_recover_engn(fifo, engn);
+ }
+}
+
+static void
+tu102_fifo_recover_engn(struct gk104_fifo *fifo, int engn)
+{
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ const u32 runl = fifo->engine[engn].runl;
+ const u32 engm = BIT(engn);
+ struct gk104_fifo_engine_status status;
+
+ assert_spin_locked(&fifo->base.lock);
+ if (fifo->recover.engm & engm)
+ return;
+ fifo->recover.engm |= engm;
+
+ /* Block channel assignments from changing during recovery. */
+ tu102_fifo_recover_runl(fifo, runl);
+
+ /* Determine which channel (if any) is currently on the engine. */
+ gk104_fifo_engine_status(fifo, engn, &status);
+ if (status.chan) {
+ /* The channel is not longer viable, kill it. */
+ tu102_fifo_recover_chan(&fifo->base, status.chan->id);
+ }
+
+ /* Preempt the runlist */
+ nvkm_wr32(device, 0x2638, BIT(runl));
+
+ /* Schedule recovery. */
+ nvkm_warn(subdev, "engine %d: scheduled for recovery\n", engn);
+ schedule_work(&fifo->recover.work);
+}
+
+static void
+tu102_fifo_fault(struct nvkm_fifo *base, struct nvkm_fault_data *info)
+{
+ struct gk104_fifo *fifo = gk104_fifo(base);
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ const struct nvkm_enum *er, *ee, *ec, *ea;
+ struct nvkm_engine *engine = NULL;
+ struct nvkm_fifo_chan *chan;
+ unsigned long flags;
+ const char *en = "";
+ char ct[8] = "HUB/";
+ int engn;
+
+ er = nvkm_enum_find(fifo->func->fault.reason, info->reason);
+ ee = nvkm_enum_find(fifo->func->fault.engine, info->engine);
+ if (info->hub) {
+ ec = nvkm_enum_find(fifo->func->fault.hubclient, info->client);
+ } else {
+ ec = nvkm_enum_find(fifo->func->fault.gpcclient, info->client);
+ snprintf(ct, sizeof(ct), "GPC%d/", info->gpc);
+ }
+ ea = nvkm_enum_find(fifo->func->fault.access, info->access);
+
+ if (ee && ee->data2) {
+ switch (ee->data2) {
+ case NVKM_SUBDEV_BAR:
+ nvkm_bar_bar1_reset(device);
+ break;
+ case NVKM_SUBDEV_INSTMEM:
+ nvkm_bar_bar2_reset(device);
+ break;
+ case NVKM_ENGINE_IFB:
+ nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
+ break;
+ default:
+ engine = nvkm_device_engine(device, ee->data2, 0);
+ break;
+ }
+ }
+
+ if (ee == NULL) {
+ struct nvkm_subdev *subdev = nvkm_top_fault(device, info->engine);
+ if (subdev) {
+ if (subdev->func == &nvkm_engine)
+ engine = container_of(subdev, typeof(*engine), subdev);
+ en = engine->subdev.name;
+ }
+ } else {
+ en = ee->name;
+ }
+
+ spin_lock_irqsave(&fifo->base.lock, flags);
+ chan = nvkm_fifo_chan_inst_locked(&fifo->base, info->inst);
+
+ nvkm_error(subdev,
+ "fault %02x [%s] at %016llx engine %02x [%s] client %02x "
+ "[%s%s] reason %02x [%s] on channel %d [%010llx %s]\n",
+ info->access, ea ? ea->name : "", info->addr,
+ info->engine, ee ? ee->name : en,
+ info->client, ct, ec ? ec->name : "",
+ info->reason, er ? er->name : "", chan ? chan->chid : -1,
+ info->inst, chan ? chan->object.client->name : "unknown");
+
+ /* Kill the channel that caused the fault. */
+ if (chan)
+ tu102_fifo_recover_chan(&fifo->base, chan->chid);
+
+ /* Channel recovery will probably have already done this for the
+ * correct engine(s), but just in case we can't find the channel
+ * information...
+ */
+ for (engn = 0; engn < fifo->engine_nr && engine; engn++) {
+ if (fifo->engine[engn].engine == engine) {
+ tu102_fifo_recover_engn(fifo, engn);
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&fifo->base.lock, flags);
+}
+
+static void
+tu102_fifo_intr_ctxsw_timeout(struct gk104_fifo *fifo)
+{
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ unsigned long flags, engm;
+ u32 engn;
+
+ spin_lock_irqsave(&fifo->base.lock, flags);
+
+ engm = nvkm_rd32(device, 0x2a30);
+ nvkm_wr32(device, 0x2a30, engm);
+
+ for_each_set_bit(engn, &engm, 32)
+ tu102_fifo_recover_engn(fifo, engn);
+
+ spin_unlock_irqrestore(&fifo->base.lock, flags);
+}
+
+static void
+tu102_fifo_intr_sched(struct gk104_fifo *fifo)
+{
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 intr = nvkm_rd32(device, 0x00254c);
+ u32 code = intr & 0x000000ff;
+
+ nvkm_error(subdev, "SCHED_ERROR %02x\n", code);
+}
+
+static void
+tu102_fifo_intr(struct nvkm_fifo *base)
+{
+ struct gk104_fifo *fifo = gk104_fifo(base);
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 mask = nvkm_rd32(device, 0x002140);
+ u32 stat = nvkm_rd32(device, 0x002100) & mask;
+
+ if (stat & 0x00000001) {
+ gk104_fifo_intr_bind(fifo);
+ nvkm_wr32(device, 0x002100, 0x00000001);
+ stat &= ~0x00000001;
+ }
+
+ if (stat & 0x00000002) {
+ tu102_fifo_intr_ctxsw_timeout(fifo);
+ stat &= ~0x00000002;
+ }
+
+ if (stat & 0x00000100) {
+ tu102_fifo_intr_sched(fifo);
+ nvkm_wr32(device, 0x002100, 0x00000100);
+ stat &= ~0x00000100;
+ }
+
+ if (stat & 0x00010000) {
+ gk104_fifo_intr_chsw(fifo);
+ nvkm_wr32(device, 0x002100, 0x00010000);
+ stat &= ~0x00010000;
+ }
+
+ if (stat & 0x20000000) {
+ u32 mask = nvkm_rd32(device, 0x0025a0);
+
+ while (mask) {
+ u32 unit = __ffs(mask);
+
+ gk104_fifo_intr_pbdma_0(fifo, unit);
+ gk104_fifo_intr_pbdma_1(fifo, unit);
+ nvkm_wr32(device, 0x0025a0, (1 << unit));
+ mask &= ~(1 << unit);
+ }
+ stat &= ~0x20000000;
+ }
+
+ if (stat & 0x40000000) {
+ gk104_fifo_intr_runlist(fifo);
+ stat &= ~0x40000000;
+ }
+
+ if (stat & 0x80000000) {
+ nvkm_wr32(device, 0x002100, 0x80000000);
+ gk104_fifo_intr_engine(fifo);
+ stat &= ~0x80000000;
+ }
+
+ if (stat) {
+ nvkm_error(subdev, "INTR %08x\n", stat);
+ nvkm_mask(device, 0x002140, stat, 0x00000000);
+ nvkm_wr32(device, 0x002100, stat);
+ }
+}
+
+static const struct nvkm_fifo_func
+tu102_fifo_ = {
+ .dtor = gk104_fifo_dtor,
+ .oneinit = gk104_fifo_oneinit,
+ .info = gk104_fifo_info,
+ .init = gk104_fifo_init,
+ .fini = gk104_fifo_fini,
+ .intr = tu102_fifo_intr,
+ .fault = tu102_fifo_fault,
+ .engine_id = gk104_fifo_engine_id,
+ .id_engine = gk104_fifo_id_engine,
+ .uevent_init = gk104_fifo_uevent_init,
+ .uevent_fini = gk104_fifo_uevent_fini,
+ .recover_chan = tu102_fifo_recover_chan,
+ .class_get = gk104_fifo_class_get,
+ .class_new = gk104_fifo_class_new,
+};
+
int
-tu102_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+tu102_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
{
- return gk104_fifo_new_(&tu102_fifo, device, index, 4096, pfifo);
+ struct gk104_fifo *fifo;
+
+ if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
+ return -ENOMEM;
+ fifo->func = &tu102_fifo;
+ INIT_WORK(&fifo->recover.work, tu102_fifo_recover_work);
+ *pfifo = &fifo->base;
+
+ return nvkm_fifo_ctor(&tu102_fifo_, device, type, inst, 4096, &fifo->base);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c
index d41fb94524e9..61759f54406e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c
@@ -175,8 +175,8 @@ nvkm_gr = {
int
nvkm_gr_ctor(const struct nvkm_gr_func *func, struct nvkm_device *device,
- int index, bool enable, struct nvkm_gr *gr)
+ enum nvkm_subdev_type type, int inst, bool enable, struct nvkm_gr *gr)
{
gr->func = func;
- return nvkm_engine_ctor(&nvkm_gr, device, index, enable, &gr->engine);
+ return nvkm_engine_ctor(&nvkm_gr, device, type, inst, enable, &gr->engine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/g84.c
index da1ba74682b4..65c332118fd6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/g84.c
@@ -192,7 +192,7 @@ g84_gr = {
};
int
-g84_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+g84_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return nv50_gr_new_(&g84_gr, device, index, pgr);
+ return nv50_gr_new_(&g84_gr, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 749f73fc45a8..397ff4fe9df8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -2087,8 +2087,8 @@ gf100_gr_flcn = {
};
int
-gf100_gr_new_(const struct gf100_gr_fwif *fwif,
- struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gf100_gr_new_(const struct gf100_gr_fwif *fwif, struct nvkm_device *device,
+ enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
struct gf100_gr *gr;
int ret;
@@ -2097,7 +2097,7 @@ gf100_gr_new_(const struct gf100_gr_fwif *fwif,
return -ENOMEM;
*pgr = &gr->base;
- ret = nvkm_gr_ctor(&gf100_gr_, device, index, true, &gr->base);
+ ret = nvkm_gr_ctor(&gf100_gr_, device, type, inst, true, &gr->base);
if (ret)
return ret;
@@ -2483,7 +2483,7 @@ gf100_gr_fwif[] = {
};
int
-gf100_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gf100_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(gf100_gr_fwif, device, index, pgr);
+ return gf100_gr_new_(gf100_gr_fwif, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index dfd5dd74f0d5..c0038f906135 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -416,6 +416,6 @@ void gm20b_gr_acr_bld_patch(struct nvkm_acr *, u32, s64);
extern const struct nvkm_acr_lsf_func gp108_gr_gpccs_acr;
extern const struct nvkm_acr_lsf_func gp108_gr_fecs_acr;
-int gf100_gr_new_(const struct gf100_gr_fwif *, struct nvkm_device *, int,
+int gf100_gr_new_(const struct gf100_gr_fwif *, struct nvkm_device *, enum nvkm_subdev_type, int,
struct nvkm_gr **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
index 0536fe8b2b92..3acd99c306f2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
@@ -152,7 +152,7 @@ gf104_gr_fwif[] = {
};
int
-gf104_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gf104_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(gf104_gr_fwif, device, index, pgr);
+ return gf100_gr_new_(gf104_gr_fwif, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
index 14284b06112f..030640bb3dca 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
@@ -151,7 +151,7 @@ gf108_gr_fwif[] = {
};
int
-gf108_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gf108_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(gf108_gr_fwif, device, index, pgr);
+ return gf100_gr_new_(gf108_gr_fwif, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
index 280752551a3a..616e2def1865 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
@@ -127,7 +127,7 @@ gf110_gr_fwif[] = {
};
int
-gf110_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gf110_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(gf110_gr_fwif, device, index, pgr);
+ return gf100_gr_new_(gf110_gr_fwif, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
index 235c3fbe4b95..669e7536970e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
@@ -192,7 +192,7 @@ gf117_gr_fwif[] = {
};
int
-gf117_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gf117_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(gf117_gr_fwif, device, index, pgr);
+ return gf100_gr_new_(gf117_gr_fwif, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
index 7eac385ece97..5b09bda8110c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
@@ -218,7 +218,7 @@ gf119_gr_fwif[] = {
};
int
-gf119_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gf119_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(gf119_gr_fwif, device, index, pgr);
+ return gf100_gr_new_(gf119_gr_fwif, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
index 89f51d76082b..b680eaa0f350 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
@@ -497,7 +497,7 @@ gk104_gr_fwif[] = {
};
int
-gk104_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gk104_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(gk104_gr_fwif, device, index, pgr);
+ return gf100_gr_new_(gk104_gr_fwif, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
index 735f05e54d62..103e06a77e65 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
@@ -393,7 +393,7 @@ gk110_gr_fwif[] = {
};
int
-gk110_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gk110_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(gk110_gr_fwif, device, index, pgr);
+ return gf100_gr_new_(gk110_gr_fwif, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
index adc971be8f3b..034d0b11a17d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
@@ -144,7 +144,8 @@ gk110b_gr_fwif[] = {
};
int
-gk110b_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gk110b_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_gr **pgr)
{
- return gf100_gr_new_(gk110b_gr_fwif, device, index, pgr);
+ return gf100_gr_new_(gk110b_gr_fwif, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
index aa0eff6795ac..116d682f9f96 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
@@ -202,7 +202,7 @@ gk208_gr_fwif[] = {
};
int
-gk208_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gk208_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(gk208_gr_fwif, device, index, pgr);
+ return gf100_gr_new_(gk208_gr_fwif, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
index 6d4d72851610..be0b2cefd8e8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
@@ -357,7 +357,7 @@ gk20a_gr_fwif[] = {
};
int
-gk20a_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gk20a_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(gk20a_gr_fwif, device, index, pgr);
+ return gf100_gr_new_(gk20a_gr_fwif, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
index 09bb78ba9d00..310987174cb5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
@@ -437,7 +437,7 @@ gm107_gr_fwif[] = {
};
int
-gm107_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gm107_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(gm107_gr_fwif, device, index, pgr);
+ return gf100_gr_new_(gm107_gr_fwif, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
index 815137047518..5c38ff0fe7f9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
@@ -288,7 +288,7 @@ gm200_gr_fwif[] = {
};
int
-gm200_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gm200_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(gm200_gr_fwif, device, index, pgr);
+ return gf100_gr_new_(gm200_gr_fwif, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c
index 1aab691fa71c..ec1c46e47e00 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c
@@ -181,7 +181,7 @@ gm20b_gr_fwif[] = {
};
int
-gm20b_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gm20b_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(gm20b_gr_fwif, device, index, pgr);
+ return gf100_gr_new_(gm20b_gr_fwif, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
index ddba7ce937c7..0550dd6f46f1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
@@ -156,7 +156,7 @@ gp100_gr_fwif[] = {
};
int
-gp100_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gp100_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(gp100_gr_fwif, device, index, pgr);
+ return gf100_gr_new_(gp100_gr_fwif, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
index c083f3757ff7..5b001f374be0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
@@ -152,7 +152,7 @@ gp102_gr_fwif[] = {
};
int
-gp102_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gp102_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(gp102_gr_fwif, device, index, pgr);
+ return gf100_gr_new_(gp102_gr_fwif, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c
index f6a31e9a8cc8..2655574ec63b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c
@@ -93,7 +93,7 @@ gp104_gr_fwif[] = {
};
int
-gp104_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gp104_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(gp104_gr_fwif, device, index, pgr);
+ return gf100_gr_new_(gp104_gr_fwif, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
index 2c80c6a75b56..adabc04d4f3a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
@@ -82,7 +82,7 @@ gp107_gr_fwif[] = {
};
int
-gp107_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gp107_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(gp107_gr_fwif, device, index, pgr);
+ return gf100_gr_new_(gp107_gr_fwif, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp108.c
index 2be8f416dd6f..7310f0466bb7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp108.c
@@ -92,7 +92,7 @@ gp108_gr_fwif[] = {
};
int
-gp108_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gp108_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(gp108_gr_fwif, device, index, pgr);
+ return gf100_gr_new_(gp108_gr_fwif, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
index 6edc4bc7ed44..e13683b6e7b1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
@@ -94,7 +94,7 @@ gp10b_gr_fwif[] = {
};
int
-gp10b_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gp10b_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(gp10b_gr_fwif, device, index, pgr);
+ return gf100_gr_new_(gp10b_gr_fwif, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt200.c
index c711a55ce392..1dfc65d45b52 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt200.c
@@ -43,7 +43,7 @@ gt200_gr = {
};
int
-gt200_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gt200_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return nv50_gr_new_(&gt200_gr, device, index, pgr);
+ return nv50_gr_new_(&gt200_gr, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt215.c
index fa103df32ec7..fcb5ead345a3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt215.c
@@ -44,7 +44,7 @@ gt215_gr = {
};
int
-gt215_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gt215_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return nv50_gr_new_(&gt215_gr, device, index, pgr);
+ return nv50_gr_new_(&gt215_gr, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c
index 2189a8f4e644..4d043c1173ea 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c
@@ -141,7 +141,7 @@ gv100_gr_fwif[] = {
};
int
-gv100_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+gv100_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(gv100_gr_fwif, device, index, pgr);
+ return gf100_gr_new_(gv100_gr_fwif, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp79.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp79.c
index eb1a90644752..cf782b64f62e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp79.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp79.c
@@ -42,7 +42,7 @@ mcp79_gr = {
};
int
-mcp79_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+mcp79_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return nv50_gr_new_(&mcp79_gr, device, index, pgr);
+ return nv50_gr_new_(&mcp79_gr, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp89.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp89.c
index c91eb56e9327..6f90a6395453 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp89.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp89.c
@@ -44,7 +44,7 @@ mcp89_gr = {
};
int
-mcp89_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+mcp89_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return nv50_gr_new_(&mcp89_gr, device, index, pgr);
+ return nv50_gr_new_(&mcp89_gr, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c
index 9c2e985dc079..0bc1a238de43 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c
@@ -1413,7 +1413,7 @@ nv04_gr = {
};
int
-nv04_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+nv04_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
struct nv04_gr *gr;
@@ -1422,5 +1422,5 @@ nv04_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
spin_lock_init(&gr->lock);
*pgr = &gr->base;
- return nvkm_gr_ctor(&nv04_gr, device, index, true, &gr->base);
+ return nvkm_gr_ctor(&nv04_gr, device, type, inst, true, &gr->base);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c
index 4ebbfbdd8240..942450b33bc6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c
@@ -1173,7 +1173,7 @@ nv10_gr_init(struct nvkm_gr *base)
int
nv10_gr_new_(const struct nvkm_gr_func *func, struct nvkm_device *device,
- int index, struct nvkm_gr **pgr)
+ enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
struct nv10_gr *gr;
@@ -1182,7 +1182,7 @@ nv10_gr_new_(const struct nvkm_gr_func *func, struct nvkm_device *device,
spin_lock_init(&gr->lock);
*pgr = &gr->base;
- return nvkm_gr_ctor(func, device, index, true, &gr->base);
+ return nvkm_gr_ctor(func, device, type, inst, true, &gr->base);
}
static const struct nvkm_gr_func
@@ -1215,7 +1215,7 @@ nv10_gr = {
};
int
-nv10_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+nv10_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return nv10_gr_new_(&nv10_gr, device, index, pgr);
+ return nv10_gr_new_(&nv10_gr, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.h
index 4327baea02af..5cfe927c9123 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.h
@@ -3,7 +3,7 @@
#define __NV10_GR_H__
#include "priv.h"
-int nv10_gr_new_(const struct nvkm_gr_func *, struct nvkm_device *, int index,
+int nv10_gr_new_(const struct nvkm_gr_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
struct nvkm_gr **);
int nv10_gr_init(struct nvkm_gr *);
void nv10_gr_intr(struct nvkm_gr *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv15.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv15.c
index 3e2c6856b4c4..69ece259df86 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv15.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv15.c
@@ -53,7 +53,7 @@ nv15_gr = {
};
int
-nv15_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+nv15_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return nv10_gr_new_(&nv15_gr, device, index, pgr);
+ return nv10_gr_new_(&nv15_gr, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv17.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv17.c
index 12437d085a73..e39dfc7d4077 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv17.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv17.c
@@ -53,7 +53,7 @@ nv17_gr = {
};
int
-nv17_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+nv17_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return nv10_gr_new_(&nv17_gr, device, index, pgr);
+ return nv10_gr_new_(&nv17_gr, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
index d837630a3625..6bff10cee71b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
@@ -330,7 +330,7 @@ nv20_gr_dtor(struct nvkm_gr *base)
int
nv20_gr_new_(const struct nvkm_gr_func *func, struct nvkm_device *device,
- int index, struct nvkm_gr **pgr)
+ enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
struct nv20_gr *gr;
@@ -338,7 +338,7 @@ nv20_gr_new_(const struct nvkm_gr_func *func, struct nvkm_device *device,
return -ENOMEM;
*pgr = &gr->base;
- return nvkm_gr_ctor(func, device, index, true, &gr->base);
+ return nvkm_gr_ctor(func, device, type, inst, true, &gr->base);
}
static const struct nvkm_gr_func
@@ -370,7 +370,7 @@ nv20_gr = {
};
int
-nv20_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+nv20_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return nv20_gr_new_(&nv20_gr, device, index, pgr);
+ return nv20_gr_new_(&nv20_gr, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h
index e57407a8a7c3..c0d2be53413e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h
@@ -9,8 +9,8 @@ struct nv20_gr {
struct nvkm_memory *ctxtab;
};
-int nv20_gr_new_(const struct nvkm_gr_func *, struct nvkm_device *,
- int, struct nvkm_gr **);
+int nv20_gr_new_(const struct nvkm_gr_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ struct nvkm_gr **);
void *nv20_gr_dtor(struct nvkm_gr *);
int nv20_gr_oneinit(struct nvkm_gr *);
int nv20_gr_init(struct nvkm_gr *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c
index 32d29d3faee0..f3a56f17d94a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c
@@ -129,7 +129,7 @@ nv25_gr = {
};
int
-nv25_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+nv25_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return nv20_gr_new_(&nv25_gr, device, index, pgr);
+ return nv20_gr_new_(&nv25_gr, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c
index f941062c66f0..f268d2642d29 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c
@@ -120,7 +120,7 @@ nv2a_gr = {
};
int
-nv2a_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+nv2a_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return nv20_gr_new_(&nv2a_gr, device, index, pgr);
+ return nv20_gr_new_(&nv2a_gr, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
index 785ec956df0f..e5737cdf2fa1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
@@ -194,7 +194,7 @@ nv30_gr = {
};
int
-nv30_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+nv30_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return nv20_gr_new_(&nv30_gr, device, index, pgr);
+ return nv20_gr_new_(&nv30_gr, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
index bd610d75c677..1ab2da8ebf4e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
@@ -131,7 +131,7 @@ nv34_gr = {
};
int
-nv34_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+nv34_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return nv20_gr_new_(&nv34_gr, device, index, pgr);
+ return nv20_gr_new_(&nv34_gr, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c
index 89db7f523037..591260f5676b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c
@@ -131,7 +131,7 @@ nv35_gr = {
};
int
-nv35_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+nv35_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return nv20_gr_new_(&nv35_gr, device, index, pgr);
+ return nv20_gr_new_(&nv35_gr, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c
index 5f1ad8344ea9..67f3535ff97e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c
@@ -429,7 +429,7 @@ nv40_gr_init(struct nvkm_gr *base)
int
nv40_gr_new_(const struct nvkm_gr_func *func, struct nvkm_device *device,
- int index, struct nvkm_gr **pgr)
+ enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
struct nv40_gr *gr;
@@ -438,7 +438,7 @@ nv40_gr_new_(const struct nvkm_gr_func *func, struct nvkm_device *device,
*pgr = &gr->base;
INIT_LIST_HEAD(&gr->chan);
- return nvkm_gr_ctor(func, device, index, true, &gr->base);
+ return nvkm_gr_ctor(func, device, type, inst, true, &gr->base);
}
static const struct nvkm_gr_func
@@ -470,7 +470,7 @@ nv40_gr = {
};
int
-nv40_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+nv40_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return nv40_gr_new_(&nv40_gr, device, index, pgr);
+ return nv40_gr_new_(&nv40_gr, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h
index e6128791b2d2..f3d3d3a5ae5b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h
@@ -10,7 +10,7 @@ struct nv40_gr {
struct list_head chan;
};
-int nv40_gr_new_(const struct nvkm_gr_func *, struct nvkm_device *, int index,
+int nv40_gr_new_(const struct nvkm_gr_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
struct nvkm_gr **);
int nv40_gr_init(struct nvkm_gr *);
void nv40_gr_intr(struct nvkm_gr *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv44.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv44.c
index 45ff80254eb4..22b6a38a7031 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv44.c
@@ -102,7 +102,7 @@ nv44_gr = {
};
int
-nv44_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+nv44_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return nv40_gr_new_(&nv44_gr, device, index, pgr);
+ return nv40_gr_new_(&nv44_gr, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c
index df16ffda1749..563a10097e95 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c
@@ -761,7 +761,7 @@ nv50_gr_init(struct nvkm_gr *base)
int
nv50_gr_new_(const struct nvkm_gr_func *func, struct nvkm_device *device,
- int index, struct nvkm_gr **pgr)
+ enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
struct nv50_gr *gr;
@@ -770,7 +770,7 @@ nv50_gr_new_(const struct nvkm_gr_func *func, struct nvkm_device *device,
spin_lock_init(&gr->lock);
*pgr = &gr->base;
- return nvkm_gr_ctor(func, device, index, true, &gr->base);
+ return nvkm_gr_ctor(func, device, type, inst, true, &gr->base);
}
static const struct nvkm_gr_func
@@ -790,7 +790,7 @@ nv50_gr = {
};
int
-nv50_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+nv50_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return nv50_gr_new_(&nv50_gr, device, index, pgr);
+ return nv50_gr_new_(&nv50_gr, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h
index 465f4da0ddfc..84388c42e5c6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h
@@ -11,7 +11,7 @@ struct nv50_gr {
u32 size;
};
-int nv50_gr_new_(const struct nvkm_gr_func *, struct nvkm_device *, int index,
+int nv50_gr_new_(const struct nvkm_gr_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
struct nvkm_gr **);
int nv50_gr_init(struct nvkm_gr *);
void nv50_gr_intr(struct nvkm_gr *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h
index 3b30f24032cc..9b2c66e8be90 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h
@@ -7,8 +7,8 @@
struct nvkm_fb_tile;
struct nvkm_fifo_chan;
-int nvkm_gr_ctor(const struct nvkm_gr_func *, struct nvkm_device *,
- int index, bool enable, struct nvkm_gr *);
+int nvkm_gr_ctor(const struct nvkm_gr_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ bool enable, struct nvkm_gr *);
bool nv04_gr_idle(struct nvkm_gr *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c
index 6039f9948aa2..1a8a21844e12 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c
@@ -198,7 +198,7 @@ tu102_gr_fwif[] = {
};
int
-tu102_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+tu102_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
- return gf100_gr_new_(tu102_gr_fwif, device, index, pgr);
+ return gf100_gr_new_(tu102_gr_fwif, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/g84.c
index c0e11a071843..0fcc0ffa1e40 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/g84.c
@@ -37,7 +37,8 @@ g84_mpeg = {
};
int
-g84_mpeg_new(struct nvkm_device *device, int index, struct nvkm_engine **pmpeg)
+g84_mpeg_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_engine **pmpeg)
{
- return nvkm_engine_new_(&g84_mpeg, device, index, true, pmpeg);
+ return nvkm_engine_new_(&g84_mpeg, device, type, inst, true, pmpeg);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
index 7fea7d45202f..b1054db4c1b8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
@@ -274,7 +274,7 @@ nv31_mpeg_ = {
int
nv31_mpeg_new_(const struct nv31_mpeg_func *func, struct nvkm_device *device,
- int index, struct nvkm_engine **pmpeg)
+ enum nvkm_subdev_type type, int inst, struct nvkm_engine **pmpeg)
{
struct nv31_mpeg *mpeg;
@@ -283,8 +283,7 @@ nv31_mpeg_new_(const struct nv31_mpeg_func *func, struct nvkm_device *device,
mpeg->func = func;
*pmpeg = &mpeg->engine;
- return nvkm_engine_ctor(&nv31_mpeg_, device, index,
- true, &mpeg->engine);
+ return nvkm_engine_ctor(&nv31_mpeg_, device, type, inst, true, &mpeg->engine);
}
static const struct nv31_mpeg_func
@@ -293,7 +292,8 @@ nv31_mpeg = {
};
int
-nv31_mpeg_new(struct nvkm_device *device, int index, struct nvkm_engine **pmpeg)
+nv31_mpeg_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_engine **pmpeg)
{
- return nv31_mpeg_new_(&nv31_mpeg, device, index, pmpeg);
+ return nv31_mpeg_new_(&nv31_mpeg, device, type, inst, pmpeg);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h
index b3e131538858..9f30aaaf809e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h
@@ -11,8 +11,8 @@ struct nv31_mpeg {
struct nv31_mpeg_chan *chan;
};
-int nv31_mpeg_new_(const struct nv31_mpeg_func *, struct nvkm_device *,
- int index, struct nvkm_engine **);
+int nv31_mpeg_new_(const struct nv31_mpeg_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ struct nvkm_engine **);
struct nv31_mpeg_func {
bool (*mthd_dma)(struct nvkm_device *, u32 mthd, u32 data);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv40.c
index b5ec7c504dc6..179167484ef1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv40.c
@@ -76,7 +76,8 @@ nv40_mpeg = {
};
int
-nv40_mpeg_new(struct nvkm_device *device, int index, struct nvkm_engine **pmpeg)
+nv40_mpeg_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_engine **pmpeg)
{
- return nv31_mpeg_new_(&nv40_mpeg, device, index, pmpeg);
+ return nv31_mpeg_new_(&nv40_mpeg, device, type, inst, pmpeg);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
index c3cf02ed468e..521ce43a2871 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
@@ -203,7 +203,8 @@ nv44_mpeg = {
};
int
-nv44_mpeg_new(struct nvkm_device *device, int index, struct nvkm_engine **pmpeg)
+nv44_mpeg_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_engine **pmpeg)
{
struct nv44_mpeg *mpeg;
@@ -212,5 +213,5 @@ nv44_mpeg_new(struct nvkm_device *device, int index, struct nvkm_engine **pmpeg)
INIT_LIST_HEAD(&mpeg->chan);
*pmpeg = &mpeg->engine;
- return nvkm_engine_ctor(&nv44_mpeg, device, index, true, &mpeg->engine);
+ return nvkm_engine_ctor(&nv44_mpeg, device, type, inst, true, &mpeg->engine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c
index 6df880a39019..e6374f36961c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c
@@ -129,7 +129,8 @@ nv50_mpeg = {
};
int
-nv50_mpeg_new(struct nvkm_device *device, int index, struct nvkm_engine **pmpeg)
+nv50_mpeg_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_engine **pmpeg)
{
- return nvkm_engine_new_(&nv50_mpeg, device, index, true, pmpeg);
+ return nvkm_engine_new_(&nv50_mpeg, device, type, inst, true, pmpeg);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/base.c
index 80211f76093b..842fcfbd28b8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/base.c
@@ -24,9 +24,8 @@
#include "priv.h"
int
-nvkm_mspdec_new_(const struct nvkm_falcon_func *func,
- struct nvkm_device *device, int index,
- struct nvkm_engine **pengine)
+nvkm_mspdec_new_(const struct nvkm_falcon_func *func, struct nvkm_device *device,
+ enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine)
{
- return nvkm_falcon_new_(func, device, index, true, 0x085000, pengine);
+ return nvkm_falcon_new_(func, device, type, inst, true, 0x085000, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/g98.c b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/g98.c
index f30cf1dcfb30..ecb06d68f544 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/g98.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/g98.c
@@ -43,8 +43,8 @@ g98_mspdec = {
};
int
-g98_mspdec_new(struct nvkm_device *device, int index,
- struct nvkm_engine **pengine)
+g98_mspdec_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_engine **pengine)
{
- return nvkm_mspdec_new_(&g98_mspdec, device, index, pengine);
+ return nvkm_mspdec_new_(&g98_mspdec, device, type, inst, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gf100.c
index cfe1aa81bd14..0a69bd767d69 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gf100.c
@@ -43,8 +43,8 @@ gf100_mspdec = {
};
int
-gf100_mspdec_new(struct nvkm_device *device, int index,
+gf100_mspdec_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_engine **pengine)
{
- return nvkm_mspdec_new_(&gf100_mspdec, device, index, pengine);
+ return nvkm_mspdec_new_(&gf100_mspdec, device, type, inst, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gk104.c
index 24272b4927bc..a08991dca428 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gk104.c
@@ -35,8 +35,8 @@ gk104_mspdec = {
};
int
-gk104_mspdec_new(struct nvkm_device *device, int index,
+gk104_mspdec_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_engine **pengine)
{
- return nvkm_mspdec_new_(&gk104_mspdec, device, index, pengine);
+ return nvkm_mspdec_new_(&gk104_mspdec, device, type, inst, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gt215.c
index cf6e59ad6ee2..791fb03a32ad 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gt215.c
@@ -35,8 +35,8 @@ gt215_mspdec = {
};
int
-gt215_mspdec_new(struct nvkm_device *device, int index,
- struct nvkm_engine **pengine)
+gt215_mspdec_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_engine **pengine)
{
- return nvkm_mspdec_new_(&gt215_mspdec, device, index, pengine);
+ return nvkm_mspdec_new_(&gt215_mspdec, device, type, inst, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/priv.h
index 86445a2600d0..2bc5537d40a3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/priv.h
@@ -3,8 +3,8 @@
#define __NVKM_MSPDEC_PRIV_H__
#include <engine/mspdec.h>
-int nvkm_mspdec_new_(const struct nvkm_falcon_func *, struct nvkm_device *,
- int index, struct nvkm_engine **);
+int nvkm_mspdec_new_(const struct nvkm_falcon_func *, struct nvkm_device *, enum nvkm_subdev_type,
+ int, struct nvkm_engine **);
void g98_mspdec_init(struct nvkm_falcon *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/base.c
index bfae5e60e925..45a9411ab2e2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/base.c
@@ -25,7 +25,7 @@
int
nvkm_msppp_new_(const struct nvkm_falcon_func *func, struct nvkm_device *device,
- int index, struct nvkm_engine **pengine)
+ enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine)
{
- return nvkm_falcon_new_(func, device, index, true, 0x086000, pengine);
+ return nvkm_falcon_new_(func, device, type, inst, true, 0x086000, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/g98.c b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/g98.c
index c45dbf79d1f9..160120b9bd64 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/g98.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/g98.c
@@ -43,8 +43,8 @@ g98_msppp = {
};
int
-g98_msppp_new(struct nvkm_device *device, int index,
+g98_msppp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_engine **pengine)
{
- return nvkm_msppp_new_(&g98_msppp, device, index, pengine);
+ return nvkm_msppp_new_(&g98_msppp, device, type, inst, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gf100.c
index 803c62ab516e..debed9ae8731 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gf100.c
@@ -43,8 +43,8 @@ gf100_msppp = {
};
int
-gf100_msppp_new(struct nvkm_device *device, int index,
+gf100_msppp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_engine **pengine)
{
- return nvkm_msppp_new_(&gf100_msppp, device, index, pengine);
+ return nvkm_msppp_new_(&gf100_msppp, device, type, inst, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gt215.c
index 49cbf72cee4b..a2fd736fef94 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gt215.c
@@ -35,8 +35,8 @@ gt215_msppp = {
};
int
-gt215_msppp_new(struct nvkm_device *device, int index,
- struct nvkm_engine **pengine)
+gt215_msppp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_engine **pengine)
{
- return nvkm_msppp_new_(&gt215_msppp, device, index, pengine);
+ return nvkm_msppp_new_(&gt215_msppp, device, type, inst, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/priv.h
index f20b10915db2..582ab8ce1425 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/priv.h
@@ -3,8 +3,8 @@
#define __NVKM_MSPPP_PRIV_H__
#include <engine/msppp.h>
-int nvkm_msppp_new_(const struct nvkm_falcon_func *, struct nvkm_device *,
- int index, struct nvkm_engine **);
+int nvkm_msppp_new_(const struct nvkm_falcon_func *, struct nvkm_device *, enum nvkm_subdev_type,
+ int, struct nvkm_engine **);
void g98_msppp_init(struct nvkm_falcon *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/base.c
index 745bbb653dc0..7be42b980e57 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/base.c
@@ -25,7 +25,7 @@
int
nvkm_msvld_new_(const struct nvkm_falcon_func *func, struct nvkm_device *device,
- int index, struct nvkm_engine **pengine)
+ enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine)
{
- return nvkm_falcon_new_(func, device, index, true, 0x084000, pengine);
+ return nvkm_falcon_new_(func, device, type, inst, true, 0x084000, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/g98.c b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/g98.c
index 4a2a9f0494af..cfa2065319a6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/g98.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/g98.c
@@ -43,8 +43,8 @@ g98_msvld = {
};
int
-g98_msvld_new(struct nvkm_device *device, int index,
+g98_msvld_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_engine **pengine)
{
- return nvkm_msvld_new_(&g98_msvld, device, index, pengine);
+ return nvkm_msvld_new_(&g98_msvld, device, type, inst, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gf100.c
index 1695e532c081..8d58ad8e04d3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gf100.c
@@ -43,8 +43,8 @@ gf100_msvld = {
};
int
-gf100_msvld_new(struct nvkm_device *device, int index,
+gf100_msvld_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_engine **pengine)
{
- return nvkm_msvld_new_(&gf100_msvld, device, index, pengine);
+ return nvkm_msvld_new_(&gf100_msvld, device, type, inst, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gk104.c
index b640cd63ebe8..b28be28046f1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gk104.c
@@ -35,8 +35,8 @@ gk104_msvld = {
};
int
-gk104_msvld_new(struct nvkm_device *device, int index,
+gk104_msvld_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_engine **pengine)
{
- return nvkm_msvld_new_(&gk104_msvld, device, index, pengine);
+ return nvkm_msvld_new_(&gk104_msvld, device, type, inst, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gt215.c
index 201e8ef3519e..d7489f972c99 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gt215.c
@@ -35,8 +35,8 @@ gt215_msvld = {
};
int
-gt215_msvld_new(struct nvkm_device *device, int index,
- struct nvkm_engine **pengine)
+gt215_msvld_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_engine **pengine)
{
- return nvkm_msvld_new_(&gt215_msvld, device, index, pengine);
+ return nvkm_msvld_new_(&gt215_msvld, device, type, inst, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/mcp89.c b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/mcp89.c
index a0f540ef257b..16c30b62ab09 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/mcp89.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/mcp89.c
@@ -35,8 +35,8 @@ mcp89_msvld = {
};
int
-mcp89_msvld_new(struct nvkm_device *device, int index,
- struct nvkm_engine **pengine)
+mcp89_msvld_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_engine **pengine)
{
- return nvkm_msvld_new_(&mcp89_msvld, device, index, pengine);
+ return nvkm_msvld_new_(&mcp89_msvld, device, type, inst, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/priv.h
index 5cd1e83badbb..f729d919b054 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/priv.h
@@ -3,8 +3,8 @@
#define __NVKM_MSVLD_PRIV_H__
#include <engine/msvld.h>
-int nvkm_msvld_new_(const struct nvkm_falcon_func *, struct nvkm_device *,
- int index, struct nvkm_engine **);
+int nvkm_msvld_new_(const struct nvkm_falcon_func *, struct nvkm_device *, enum nvkm_subdev_type,
+ int, struct nvkm_engine **);
void g98_msvld_init(struct nvkm_falcon *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c
index 9b23c1b70ebf..b0181cc5953b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c
@@ -37,7 +37,7 @@ nvkm_nvdec = {
int
nvkm_nvdec_new_(const struct nvkm_nvdec_fwif *fwif, struct nvkm_device *device,
- int index, struct nvkm_nvdec **pnvdec)
+ enum nvkm_subdev_type type, int inst, struct nvkm_nvdec **pnvdec)
{
struct nvkm_nvdec *nvdec;
int ret;
@@ -45,7 +45,7 @@ nvkm_nvdec_new_(const struct nvkm_nvdec_fwif *fwif, struct nvkm_device *device,
if (!(nvdec = *pnvdec = kzalloc(sizeof(*nvdec), GFP_KERNEL)))
return -ENOMEM;
- ret = nvkm_engine_ctor(&nvkm_nvdec, device, index, true,
+ ret = nvkm_engine_ctor(&nvkm_nvdec, device, type, inst, true,
&nvdec->engine);
if (ret)
return ret;
@@ -57,5 +57,5 @@ nvkm_nvdec_new_(const struct nvkm_nvdec_fwif *fwif, struct nvkm_device *device,
nvdec->func = fwif->func;
return nvkm_falcon_ctor(nvdec->func->flcn, &nvdec->engine.subdev,
- nvkm_subdev_name[index], 0, &nvdec->falcon);
+ nvdec->engine.subdev.name, 0, &nvdec->falcon);
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gm107.c
index 0ab27ab4d8ee..8c44ce44a6d7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gm107.c
@@ -56,8 +56,8 @@ gm107_nvdec_fwif[] = {
};
int
-gm107_nvdec_new(struct nvkm_device *device, int index,
+gm107_nvdec_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_nvdec **pnvdec)
{
- return nvkm_nvdec_new_(gm107_nvdec_fwif, device, index, pnvdec);
+ return nvkm_nvdec_new_(gm107_nvdec_fwif, device, type, inst, pnvdec);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h
index e14da8b000d0..0920f6a887e2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h
@@ -14,6 +14,6 @@ struct nvkm_nvdec_fwif {
const struct nvkm_nvdec_func *func;
};
-int nvkm_nvdec_new_(const struct nvkm_nvdec_fwif *fwif,
- struct nvkm_device *, int, struct nvkm_nvdec **);
+int nvkm_nvdec_new_(const struct nvkm_nvdec_fwif *fwif, struct nvkm_device *,
+ enum nvkm_subdev_type, int, struct nvkm_nvdec **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/base.c
index 484100e15668..c39e797dc7c9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/base.c
@@ -39,7 +39,7 @@ nvkm_nvenc = {
int
nvkm_nvenc_new_(const struct nvkm_nvenc_fwif *fwif, struct nvkm_device *device,
- int index, struct nvkm_nvenc **pnvenc)
+ enum nvkm_subdev_type type, int inst, struct nvkm_nvenc **pnvenc)
{
struct nvkm_nvenc *nvenc;
int ret;
@@ -47,7 +47,7 @@ nvkm_nvenc_new_(const struct nvkm_nvenc_fwif *fwif, struct nvkm_device *device,
if (!(nvenc = *pnvenc = kzalloc(sizeof(*nvenc), GFP_KERNEL)))
return -ENOMEM;
- ret = nvkm_engine_ctor(&nvkm_nvenc, device, index, true,
+ ret = nvkm_engine_ctor(&nvkm_nvenc, device, type, inst, true,
&nvenc->engine);
if (ret)
return ret;
@@ -59,5 +59,5 @@ nvkm_nvenc_new_(const struct nvkm_nvenc_fwif *fwif, struct nvkm_device *device,
nvenc->func = fwif->func;
return nvkm_falcon_ctor(nvenc->func->flcn, &nvenc->engine.subdev,
- nvkm_subdev_name[index], 0, &nvenc->falcon);
+ nvenc->engine.subdev.name, 0, &nvenc->falcon);
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/gm107.c
index d249c8ffb2d5..f44d41bf2034 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/gm107.c
@@ -56,8 +56,8 @@ gm107_nvenc_fwif[] = {
};
int
-gm107_nvenc_new(struct nvkm_device *device, int index,
+gm107_nvenc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_nvenc **pnvenc)
{
- return nvkm_nvenc_new_(gm107_nvenc_fwif, device, index, pnvenc);
+ return nvkm_nvenc_new_(gm107_nvenc_fwif, device, type, inst, pnvenc);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h
index 100fa5ebbeef..4130a2bfbb4f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h
@@ -14,6 +14,6 @@ struct nvkm_nvenc_fwif {
const struct nvkm_nvenc_func *func;
};
-int nvkm_nvenc_new_(const struct nvkm_nvenc_fwif *, struct nvkm_device *,
+int nvkm_nvenc_new_(const struct nvkm_nvenc_fwif *, struct nvkm_device *, enum nvkm_subdev_type,
int, struct nvkm_nvenc **pnvenc);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
index b2785bee418e..8fe0444f761e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
@@ -628,10 +628,10 @@ nvkm_perfmon_dtor(struct nvkm_object *object)
{
struct nvkm_perfmon *perfmon = nvkm_perfmon(object);
struct nvkm_pm *pm = perfmon->pm;
- mutex_lock(&pm->engine.subdev.mutex);
- if (pm->perfmon == &perfmon->object)
- pm->perfmon = NULL;
- mutex_unlock(&pm->engine.subdev.mutex);
+ spin_lock(&pm->client.lock);
+ if (pm->client.object == &perfmon->object)
+ pm->client.object = NULL;
+ spin_unlock(&pm->client.lock);
return perfmon;
}
@@ -671,11 +671,11 @@ nvkm_pm_oclass_new(struct nvkm_device *device, const struct nvkm_oclass *oclass,
if (ret)
return ret;
- mutex_lock(&pm->engine.subdev.mutex);
- if (pm->perfmon == NULL)
- pm->perfmon = *pobject;
- ret = (pm->perfmon == *pobject) ? 0 : -EBUSY;
- mutex_unlock(&pm->engine.subdev.mutex);
+ spin_lock(&pm->client.lock);
+ if (pm->client.object == NULL)
+ pm->client.object = *pobject;
+ ret = (pm->client.object == *pobject) ? 0 : -EBUSY;
+ spin_unlock(&pm->client.lock);
return ret;
}
@@ -858,10 +858,11 @@ nvkm_pm = {
int
nvkm_pm_ctor(const struct nvkm_pm_func *func, struct nvkm_device *device,
- int index, struct nvkm_pm *pm)
+ enum nvkm_subdev_type type, int inst, struct nvkm_pm *pm)
{
pm->func = func;
INIT_LIST_HEAD(&pm->domains);
INIT_LIST_HEAD(&pm->sources);
- return nvkm_engine_ctor(&nvkm_pm, device, index, true, &pm->engine);
+ spin_lock_init(&pm->client.lock);
+ return nvkm_engine_ctor(&nvkm_pm, device, type, inst, true, &pm->engine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/g84.c
index 6e441ddafd86..0086d00eb162 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/g84.c
@@ -159,7 +159,7 @@ g84_pm[] = {
};
int
-g84_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
+g84_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
{
- return nv40_pm_new_(g84_pm, device, index, ppm);
+ return nv40_pm_new_(g84_pm, device, type, inst, ppm);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c
index fe2532ee4145..8e02701def8e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c
@@ -187,7 +187,7 @@ gf100_pm_ = {
int
gf100_pm_new_(const struct gf100_pm_func *func, struct nvkm_device *device,
- int index, struct nvkm_pm **ppm)
+ enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
{
struct nvkm_pm *pm;
u32 mask;
@@ -196,7 +196,7 @@ gf100_pm_new_(const struct gf100_pm_func *func, struct nvkm_device *device,
if (!(pm = *ppm = kzalloc(sizeof(*pm), GFP_KERNEL)))
return -ENOMEM;
- ret = nvkm_pm_ctor(&gf100_pm_, device, index, pm);
+ ret = nvkm_pm_ctor(&gf100_pm_, device, type, inst, pm);
if (ret)
return ret;
@@ -237,7 +237,7 @@ gf100_pm = {
};
int
-gf100_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
+gf100_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
{
- return gf100_pm_new_(&gf100_pm, device, index, ppm);
+ return gf100_pm_new_(&gf100_pm, device, type, inst, ppm);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h
index 461bb219b1c0..bc4b014c4e8e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h
@@ -9,8 +9,8 @@ struct gf100_pm_func {
const struct nvkm_specdom *doms_part;
};
-int gf100_pm_new_(const struct gf100_pm_func *, struct nvkm_device *,
- int index, struct nvkm_pm **);
+int gf100_pm_new_(const struct gf100_pm_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ struct nvkm_pm **);
extern const struct nvkm_funcdom gf100_perfctr_func;
extern const struct nvkm_specdom gf100_pm_gpc[];
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf108.c
index 49b24c98a7f7..505565866b59 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf108.c
@@ -60,7 +60,7 @@ gf108_pm = {
};
int
-gf108_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
+gf108_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
{
- return gf100_pm_new_(&gf108_pm, device, index, ppm);
+ return gf100_pm_new_(&gf108_pm, device, type, inst, ppm);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf117.c
index 9170025fc988..c61e8c010bb3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf117.c
@@ -74,7 +74,7 @@ gf117_pm = {
};
int
-gf117_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
+gf117_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
{
- return gf100_pm_new_(&gf117_pm, device, index, ppm);
+ return gf100_pm_new_(&gf117_pm, device, type, inst, ppm);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gk104.c
index 07f946d26ac6..75bf3df1cb18 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gk104.c
@@ -178,7 +178,7 @@ gk104_pm = {
};
int
-gk104_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
+gk104_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
{
- return gf100_pm_new_(&gk104_pm, device, index, ppm);
+ return gf100_pm_new_(&gk104_pm, device, type, inst, ppm);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt200.c
index 5cf5dd536fd0..25874c541486 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt200.c
@@ -151,7 +151,7 @@ gt200_pm[] = {
};
int
-gt200_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
+gt200_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
{
- return nv40_pm_new_(gt200_pm, device, index, ppm);
+ return nv40_pm_new_(gt200_pm, device, type, inst, ppm);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt215.c
index c9227ad41b04..54c23e2b6645 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt215.c
@@ -132,7 +132,7 @@ gt215_pm[] = {
};
int
-gt215_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
+gt215_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
{
- return nv40_pm_new_(gt215_pm, device, index, ppm);
+ return nv40_pm_new_(gt215_pm, device, type, inst, ppm);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c
index 3fda594700e0..eba5b3b79340 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c
@@ -80,7 +80,7 @@ nv40_pm_ = {
int
nv40_pm_new_(const struct nvkm_specdom *doms, struct nvkm_device *device,
- int index, struct nvkm_pm **ppm)
+ enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
{
struct nv40_pm *pm;
int ret;
@@ -89,7 +89,7 @@ nv40_pm_new_(const struct nvkm_specdom *doms, struct nvkm_device *device,
return -ENOMEM;
*ppm = &pm->base;
- ret = nvkm_pm_ctor(&nv40_pm_, device, index, &pm->base);
+ ret = nvkm_pm_ctor(&nv40_pm_, device, type, inst, &pm->base);
if (ret)
return ret;
@@ -117,7 +117,7 @@ nv40_pm[] = {
};
int
-nv40_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
+nv40_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
{
- return nv40_pm_new_(nv40_pm, device, index, ppm);
+ return nv40_pm_new_(nv40_pm, device, type, inst, ppm);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h
index 8ed19320fda1..afb79843723d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h
@@ -9,7 +9,7 @@ struct nv40_pm {
u32 sequence;
};
-int nv40_pm_new_(const struct nvkm_specdom *, struct nvkm_device *,
- int index, struct nvkm_pm **);
+int nv40_pm_new_(const struct nvkm_specdom *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ struct nvkm_pm **);
extern const struct nvkm_funcdom nv40_perfctr_func;
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv50.c
index cc5a41d4c6f2..bbd3404901f9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv50.c
@@ -169,7 +169,7 @@ nv50_pm[] = {
};
int
-nv50_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
+nv50_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
{
- return nv40_pm_new_(nv50_pm, device, index, ppm);
+ return nv40_pm_new_(nv50_pm, device, type, inst, ppm);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h
index cd6f8f79b235..6ae25d3e7f45 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h
@@ -4,8 +4,8 @@
#define nvkm_pm(p) container_of((p), struct nvkm_pm, engine)
#include <engine/pm.h>
-int nvkm_pm_ctor(const struct nvkm_pm_func *, struct nvkm_device *,
- int index, struct nvkm_pm *);
+int nvkm_pm_ctor(const struct nvkm_pm_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ struct nvkm_pm *);
struct nvkm_pm_func {
void (*fini)(struct nvkm_pm *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec/g98.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec/g98.c
index 6d2a7f0afbb5..1b87df03c823 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec/g98.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec/g98.c
@@ -74,9 +74,8 @@ g98_sec = {
};
int
-g98_sec_new(struct nvkm_device *device, int index,
+g98_sec_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_engine **pengine)
{
- return nvkm_falcon_new_(&g98_sec, device, index,
- true, 0x087000, pengine);
+ return nvkm_falcon_new_(&g98_sec, device, type, inst, true, 0x087000, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c
index 41318aa0d481..092c6d0b8e01 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c
@@ -85,7 +85,7 @@ nvkm_sec2 = {
int
nvkm_sec2_new_(const struct nvkm_sec2_fwif *fwif, struct nvkm_device *device,
- int index, u32 addr, struct nvkm_sec2 **psec2)
+ enum nvkm_subdev_type type, int inst, u32 addr, struct nvkm_sec2 **psec2)
{
struct nvkm_sec2 *sec2;
int ret;
@@ -93,7 +93,7 @@ nvkm_sec2_new_(const struct nvkm_sec2_fwif *fwif, struct nvkm_device *device,
if (!(sec2 = *psec2 = kzalloc(sizeof(*sec2), GFP_KERNEL)))
return -ENOMEM;
- ret = nvkm_engine_ctor(&nvkm_sec2, device, index, true, &sec2->engine);
+ ret = nvkm_engine_ctor(&nvkm_sec2, device, type, inst, true, &sec2->engine);
if (ret)
return ret;
@@ -104,7 +104,7 @@ nvkm_sec2_new_(const struct nvkm_sec2_fwif *fwif, struct nvkm_device *device,
sec2->func = fwif->func;
ret = nvkm_falcon_ctor(sec2->func->flcn, &sec2->engine.subdev,
- nvkm_subdev_name[index], addr, &sec2->falcon);
+ sec2->engine.subdev.name, addr, &sec2->falcon);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c
index bccf7acb7f98..44e39f5743d5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c
@@ -343,7 +343,8 @@ gp102_sec2_fwif[] = {
};
int
-gp102_sec2_new(struct nvkm_device *device, int index, struct nvkm_sec2 **psec2)
+gp102_sec2_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_sec2 **psec2)
{
- return nvkm_sec2_new_(gp102_sec2_fwif, device, index, 0, psec2);
+ return nvkm_sec2_new_(gp102_sec2_fwif, device, type, inst, 0, psec2);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp108.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp108.c
index e770c9497871..3e9f5c842f3c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp108.c
@@ -36,7 +36,8 @@ gp108_sec2_fwif[] = {
};
int
-gp108_sec2_new(struct nvkm_device *device, int index, struct nvkm_sec2 **psec2)
+gp108_sec2_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_sec2 **psec2)
{
- return nvkm_sec2_new_(gp108_sec2_fwif, device, index, 0, psec2);
+ return nvkm_sec2_new_(gp108_sec2_fwif, device, type, inst, 0, psec2);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h
index 8cbc0b7d0a27..af19229e885d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h
@@ -25,6 +25,6 @@ int gp102_sec2_load(struct nvkm_sec2 *, int, const struct nvkm_sec2_fwif *);
extern const struct nvkm_sec2_func gp102_sec2;
extern const struct nvkm_acr_lsf_func gp102_sec2_acr_1;
-int nvkm_sec2_new_(const struct nvkm_sec2_fwif *, struct nvkm_device *,
+int nvkm_sec2_new_(const struct nvkm_sec2_fwif *, struct nvkm_device *, enum nvkm_subdev_type,
int, u32 addr, struct nvkm_sec2 **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c
index a231c1c6c0a5..f3faeb705575 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c
@@ -72,10 +72,11 @@ tu102_sec2_fwif[] = {
};
int
-tu102_sec2_new(struct nvkm_device *device, int index, struct nvkm_sec2 **psec2)
+tu102_sec2_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_sec2 **psec2)
{
/* TOP info wasn't updated on Turing to reflect the PRI
* address change for some reason. We override it here.
*/
- return nvkm_sec2_new_(tu102_sec2_fwif, device, index, 0x840000, psec2);
+ return nvkm_sec2_new_(tu102_sec2_fwif, device, type, inst, 0x840000, psec2);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c
index 7be3198e11de..14871d0bd746 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c
@@ -97,7 +97,7 @@ nvkm_sw = {
int
nvkm_sw_new_(const struct nvkm_sw_func *func, struct nvkm_device *device,
- int index, struct nvkm_sw **psw)
+ enum nvkm_subdev_type type, int inst, struct nvkm_sw **psw)
{
struct nvkm_sw *sw;
@@ -106,5 +106,5 @@ nvkm_sw_new_(const struct nvkm_sw_func *func, struct nvkm_device *device,
INIT_LIST_HEAD(&sw->chan);
sw->func = func;
- return nvkm_engine_ctor(&nvkm_sw, device, index, true, &sw->engine);
+ return nvkm_engine_ctor(&nvkm_sw, device, type, inst, true, &sw->engine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/gf100.c
index ea8f4247b628..55abf839f29d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/gf100.c
@@ -149,7 +149,7 @@ gf100_sw = {
};
int
-gf100_sw_new(struct nvkm_device *device, int index, struct nvkm_sw **psw)
+gf100_sw_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_sw **psw)
{
- return nvkm_sw_new_(&gf100_sw, device, index, psw);
+ return nvkm_sw_new_(&gf100_sw, device, type, inst, psw);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv04.c
index b6675fe1b0ce..4aa57573869c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv04.c
@@ -133,7 +133,7 @@ nv04_sw = {
};
int
-nv04_sw_new(struct nvkm_device *device, int index, struct nvkm_sw **psw)
+nv04_sw_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_sw **psw)
{
- return nvkm_sw_new_(&nv04_sw, device, index, psw);
+ return nvkm_sw_new_(&nv04_sw, device, type, inst, psw);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv10.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv10.c
index 09d22fcd194c..e79e640ae535 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv10.c
@@ -62,7 +62,7 @@ nv10_sw = {
};
int
-nv10_sw_new(struct nvkm_device *device, int index, struct nvkm_sw **psw)
+nv10_sw_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_sw **psw)
{
- return nvkm_sw_new_(&nv10_sw, device, index, psw);
+ return nvkm_sw_new_(&nv10_sw, device, type, inst, psw);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c
index 01573d187f2c..1fdd094c8b7e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c
@@ -142,7 +142,7 @@ nv50_sw = {
};
int
-nv50_sw_new(struct nvkm_device *device, int index, struct nvkm_sw **psw)
+nv50_sw_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_sw **psw)
{
- return nvkm_sw_new_(&nv50_sw, device, index, psw);
+ return nvkm_sw_new_(&nv50_sw, device, type, inst, psw);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/sw/priv.h
index 6d18fc6180f2..d9d83b1b8849 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/priv.h
@@ -5,8 +5,8 @@
#include <engine/sw.h>
struct nvkm_sw_chan;
-int nvkm_sw_new_(const struct nvkm_sw_func *, struct nvkm_device *,
- int index, struct nvkm_sw **);
+int nvkm_sw_new_(const struct nvkm_sw_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ struct nvkm_sw **);
struct nvkm_sw_chan_sclass {
int (*ctor)(struct nvkm_sw_chan *, const struct nvkm_oclass *,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/vp/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/vp/g84.c
index 7a96178786c4..b502266c76fd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/vp/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/vp/g84.c
@@ -36,8 +36,8 @@ g84_vp = {
};
int
-g84_vp_new(struct nvkm_device *device, int index, struct nvkm_engine **pengine)
+g84_vp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_engine **pengine)
{
- return nvkm_xtensa_new_(&g84_vp, device, index,
- true, 0x00f000, pengine);
+ return nvkm_xtensa_new_(&g84_vp, device, type, inst, true, 0x00f000, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c b/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c
index 70549381e082..f7d3ba0afb55 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c
@@ -175,9 +175,9 @@ nvkm_xtensa = {
};
int
-nvkm_xtensa_new_(const struct nvkm_xtensa_func *func,
- struct nvkm_device *device, int index, bool enable,
- u32 addr, struct nvkm_engine **pengine)
+nvkm_xtensa_new_(const struct nvkm_xtensa_func *func, struct nvkm_device *device,
+ enum nvkm_subdev_type type, int inst, bool enable, u32 addr,
+ struct nvkm_engine **pengine)
{
struct nvkm_xtensa *xtensa;
@@ -187,6 +187,5 @@ nvkm_xtensa_new_(const struct nvkm_xtensa_func *func,
xtensa->addr = addr;
*pengine = &xtensa->engine;
- return nvkm_engine_ctor(&nvkm_xtensa, device, index,
- enable, &xtensa->engine);
+ return nvkm_engine_ctor(&nvkm_xtensa, device, type, inst, enable, &xtensa->engine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
index c6a3448180d6..262641a014b0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
@@ -88,13 +88,12 @@ int
nvkm_falcon_enable(struct nvkm_falcon *falcon)
{
struct nvkm_device *device = falcon->owner->device;
- enum nvkm_devidx id = falcon->owner->index;
int ret;
- nvkm_mc_enable(device, id);
+ nvkm_mc_enable(device, falcon->owner->type, falcon->owner->inst);
ret = falcon->func->enable(falcon);
if (ret) {
- nvkm_mc_disable(device, id);
+ nvkm_mc_disable(device, falcon->owner->type, falcon->owner->inst);
return ret;
}
@@ -105,15 +104,14 @@ void
nvkm_falcon_disable(struct nvkm_falcon *falcon)
{
struct nvkm_device *device = falcon->owner->device;
- enum nvkm_devidx id = falcon->owner->index;
/* already disabled, return or wait_idle will timeout */
- if (!nvkm_mc_enabled(device, id))
+ if (!nvkm_mc_enabled(device, falcon->owner->type, falcon->owner->inst))
return;
falcon->func->disable(falcon);
- nvkm_mc_disable(device, id);
+ nvkm_mc_disable(device, falcon->owner->type, falcon->owner->inst);
}
int
@@ -143,7 +141,7 @@ nvkm_falcon_oneinit(struct nvkm_falcon *falcon)
u32 reg;
if (!falcon->addr) {
- falcon->addr = nvkm_top_addr(subdev->device, subdev->index);
+ falcon->addr = nvkm_top_addr(subdev->device, subdev->type, subdev->inst);
if (WARN_ON(!falcon->addr))
return -ENODEV;
}
@@ -188,7 +186,7 @@ nvkm_falcon_get(struct nvkm_falcon *falcon, const struct nvkm_subdev *user)
mutex_lock(&falcon->mutex);
if (falcon->user) {
nvkm_error(user, "%s falcon already acquired by %s!\n",
- falcon->name, nvkm_subdev_name[falcon->user->index]);
+ falcon->name, falcon->user->name);
mutex_unlock(&falcon->mutex);
return -EBUSY;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
index fb4fff1222af..2cb24fff7e32 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
@@ -11,7 +11,6 @@ include $(src)/nvkm/subdev/fuse/Kbuild
include $(src)/nvkm/subdev/gpio/Kbuild
include $(src)/nvkm/subdev/gsp/Kbuild
include $(src)/nvkm/subdev/i2c/Kbuild
-include $(src)/nvkm/subdev/ibus/Kbuild
include $(src)/nvkm/subdev/iccsense/Kbuild
include $(src)/nvkm/subdev/instmem/Kbuild
include $(src)/nvkm/subdev/ltc/Kbuild
@@ -20,6 +19,7 @@ include $(src)/nvkm/subdev/mmu/Kbuild
include $(src)/nvkm/subdev/mxm/Kbuild
include $(src)/nvkm/subdev/pci/Kbuild
include $(src)/nvkm/subdev/pmu/Kbuild
+include $(src)/nvkm/subdev/privring/Kbuild
include $(src)/nvkm/subdev/therm/Kbuild
include $(src)/nvkm/subdev/timer/Kbuild
include $(src)/nvkm/subdev/top/Kbuild
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c
index c962df9910dd..af6cac696d43 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c
@@ -410,14 +410,14 @@ nvkm_acr_ctor_wpr(struct nvkm_acr *acr, int ver)
int
nvkm_acr_new_(const struct nvkm_acr_fwif *fwif, struct nvkm_device *device,
- int index, struct nvkm_acr **pacr)
+ enum nvkm_subdev_type type, int inst, struct nvkm_acr **pacr)
{
struct nvkm_acr *acr;
long wprfw;
if (!(acr = *pacr = kzalloc(sizeof(*acr), GFP_KERNEL)))
return -ENOMEM;
- nvkm_subdev_ctor(&nvkm_acr, device, index, &acr->subdev);
+ nvkm_subdev_ctor(&nvkm_acr, device, type, inst, &acr->subdev);
INIT_LIST_HEAD(&acr->hsfw);
INIT_LIST_HEAD(&acr->lsfw);
INIT_LIST_HEAD(&acr->hsf);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c
index cd41b2e6cc87..cdb1ead26d84 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c
@@ -262,7 +262,7 @@ gm200_acr_hsfw_boot(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf,
hsf->func->bld(acr, hsf);
/* Boot the falcon. */
- nvkm_mc_intr_mask(device, falcon->owner->index, false);
+ nvkm_mc_intr_mask(device, falcon->owner->type, falcon->owner->inst, false);
nvkm_falcon_wr32(falcon, 0x040, 0xdeada5a5);
nvkm_falcon_set_start_addr(falcon, hsf->imem_tag << 8);
@@ -279,7 +279,7 @@ gm200_acr_hsfw_boot(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf,
return -EIO;
nvkm_falcon_clear_interrupt(falcon, intr_clear);
- nvkm_mc_intr_mask(device, falcon->owner->index, true);
+ nvkm_mc_intr_mask(device, falcon->owner->type, falcon->owner->inst, true);
return ret;
}
@@ -478,7 +478,8 @@ gm200_acr_fwif[] = {
};
int
-gm200_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr)
+gm200_acr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_acr **pacr)
{
- return nvkm_acr_new_(gm200_acr_fwif, device, index, pacr);
+ return nvkm_acr_new_(gm200_acr_fwif, device, type, inst, pacr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm20b.c
index b1ecc58152cc..54e996f2f630 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm20b.c
@@ -129,7 +129,8 @@ gm20b_acr_fwif[] = {
};
int
-gm20b_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr)
+gm20b_acr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_acr **pacr)
{
- return nvkm_acr_new_(gm20b_acr_fwif, device, index, pacr);
+ return nvkm_acr_new_(gm20b_acr_fwif, device, type, inst, pacr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c
index 80eb9d8dbc80..fb9132a39bb1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c
@@ -276,7 +276,8 @@ gp102_acr_fwif[] = {
};
int
-gp102_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr)
+gp102_acr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_acr **pacr)
{
- return nvkm_acr_new_(gp102_acr_fwif, device, index, pacr);
+ return nvkm_acr_new_(gp102_acr_fwif, device, type, inst, pacr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp108.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp108.c
index 67a7c141004b..373d638a2177 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp108.c
@@ -106,7 +106,8 @@ gp108_acr_fwif[] = {
};
int
-gp108_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr)
+gp108_acr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_acr **pacr)
{
- return nvkm_acr_new_(gp108_acr_fwif, device, index, pacr);
+ return nvkm_acr_new_(gp108_acr_fwif, device, type, inst, pacr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp10b.c
index 8249f0d2d81d..f03ba028867b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp10b.c
@@ -52,7 +52,8 @@ gp10b_acr_fwif[] = {
};
int
-gp10b_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr)
+gp10b_acr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_acr **pacr)
{
- return nvkm_acr_new_(gp10b_acr_fwif, device, index, pacr);
+ return nvkm_acr_new_(gp10b_acr_fwif, device, type, inst, pacr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/priv.h
index d71af17a169a..c30b841c9d35 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/priv.h
@@ -135,8 +135,8 @@ int gp102_acr_load_load(struct nvkm_acr *, struct nvkm_acr_hsfw *);
extern const struct nvkm_acr_hsf_func gp108_acr_unload_0;
void gp108_acr_hsfw_bld(struct nvkm_acr *, struct nvkm_acr_hsf *);
-int nvkm_acr_new_(const struct nvkm_acr_fwif *, struct nvkm_device *, int,
- struct nvkm_acr **);
+int nvkm_acr_new_(const struct nvkm_acr_fwif *, struct nvkm_device *, enum nvkm_subdev_type,
+ int inst, struct nvkm_acr **);
int nvkm_acr_hsf_boot(struct nvkm_acr *, const char *name);
struct nvkm_acr_lsf {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c
index c4981bce9a2b..05a87e77525f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c
@@ -224,7 +224,8 @@ tu102_acr_fwif[] = {
};
int
-tu102_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr)
+tu102_acr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_acr **pacr)
{
- return nvkm_acr_new_(tu102_acr_fwif, device, index, pacr);
+ return nvkm_acr_new_(tu102_acr_fwif, device, type, inst, pacr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
index 209a6a40834a..d017a1b5e5dd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
@@ -134,9 +134,9 @@ nvkm_bar = {
void
nvkm_bar_ctor(const struct nvkm_bar_func *func, struct nvkm_device *device,
- int index, struct nvkm_bar *bar)
+ enum nvkm_subdev_type type, int inst, struct nvkm_bar *bar)
{
- nvkm_subdev_ctor(&nvkm_bar, device, index, &bar->subdev);
+ nvkm_subdev_ctor(&nvkm_bar, device, type, inst, &bar->subdev);
bar->func = func;
spin_lock_init(&bar->lock);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/g84.c
index 87f26f54b481..77a41bcf860e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/g84.c
@@ -56,7 +56,8 @@ g84_bar_func = {
};
int
-g84_bar_new(struct nvkm_device *device, int index, struct nvkm_bar **pbar)
+g84_bar_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_bar **pbar)
{
- return nv50_bar_new_(&g84_bar_func, device, index, 0x200, pbar);
+ return nv50_bar_new_(&g84_bar_func, device, type, inst, 0x200, pbar);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
index a3dcb09a40ee..51070b7dda85 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
@@ -162,12 +162,12 @@ gf100_bar_dtor(struct nvkm_bar *base)
int
gf100_bar_new_(const struct nvkm_bar_func *func, struct nvkm_device *device,
- int index, struct nvkm_bar **pbar)
+ enum nvkm_subdev_type type, int inst, struct nvkm_bar **pbar)
{
struct gf100_bar *bar;
if (!(bar = kzalloc(sizeof(*bar), GFP_KERNEL)))
return -ENOMEM;
- nvkm_bar_ctor(func, device, index, &bar->base);
+ nvkm_bar_ctor(func, device, type, inst, &bar->base);
bar->bar2_halve = nvkm_boolopt(device->cfgopt, "NvBar2Halve", false);
*pbar = &bar->base;
return 0;
@@ -189,7 +189,8 @@ gf100_bar_func = {
};
int
-gf100_bar_new(struct nvkm_device *device, int index, struct nvkm_bar **pbar)
+gf100_bar_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_bar **pbar)
{
- return gf100_bar_new_(&gf100_bar_func, device, index, pbar);
+ return gf100_bar_new_(&gf100_bar_func, device, type, inst, pbar);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h
index 4ae4c7145712..328a68b418d9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h
@@ -15,7 +15,7 @@ struct gf100_bar {
struct gf100_barN bar[2];
};
-int gf100_bar_new_(const struct nvkm_bar_func *, struct nvkm_device *,
+int gf100_bar_new_(const struct nvkm_bar_func *, struct nvkm_device *, enum nvkm_subdev_type,
int, struct nvkm_bar **);
void *gf100_bar_dtor(struct nvkm_bar *);
int gf100_bar_oneinit(struct nvkm_bar *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c
index 35878fb538f2..eead8ab88393 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c
@@ -32,9 +32,10 @@ gk20a_bar_func = {
};
int
-gk20a_bar_new(struct nvkm_device *device, int index, struct nvkm_bar **pbar)
+gk20a_bar_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_bar **pbar)
{
- int ret = gf100_bar_new_(&gk20a_bar_func, device, index, pbar);
+ int ret = gf100_bar_new_(&gk20a_bar_func, device, type, inst, pbar);
if (ret == 0)
(*pbar)->iomap_uncached = true;
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gm107.c
index 3ddf9222d935..da95307a7912 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gm107.c
@@ -59,7 +59,8 @@ gm107_bar_func = {
};
int
-gm107_bar_new(struct nvkm_device *device, int index, struct nvkm_bar **pbar)
+gm107_bar_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_bar **pbar)
{
- return gf100_bar_new_(&gm107_bar_func, device, index, pbar);
+ return gf100_bar_new_(&gm107_bar_func, device, type, inst, pbar);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gm20b.c
index 1ed6170891c4..4acdb4fb0107 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gm20b.c
@@ -32,9 +32,10 @@ gm20b_bar_func = {
};
int
-gm20b_bar_new(struct nvkm_device *device, int index, struct nvkm_bar **pbar)
+gm20b_bar_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_bar **pbar)
{
- int ret = gf100_bar_new_(&gm20b_bar_func, device, index, pbar);
+ int ret = gf100_bar_new_(&gm20b_bar_func, device, type, inst, pbar);
if (ret == 0)
(*pbar)->iomap_uncached = true;
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
index f23a0ccc2bec..27d8a1be43e4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
@@ -220,12 +220,12 @@ nv50_bar_dtor(struct nvkm_bar *base)
int
nv50_bar_new_(const struct nvkm_bar_func *func, struct nvkm_device *device,
- int index, u32 pgd_addr, struct nvkm_bar **pbar)
+ enum nvkm_subdev_type type, int inst, u32 pgd_addr, struct nvkm_bar **pbar)
{
struct nv50_bar *bar;
if (!(bar = kzalloc(sizeof(*bar), GFP_KERNEL)))
return -ENOMEM;
- nvkm_bar_ctor(func, device, index, &bar->base);
+ nvkm_bar_ctor(func, device, type, inst, &bar->base);
bar->pgd_addr = pgd_addr;
*pbar = &bar->base;
return 0;
@@ -248,7 +248,8 @@ nv50_bar_func = {
};
int
-nv50_bar_new(struct nvkm_device *device, int index, struct nvkm_bar **pbar)
+nv50_bar_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_bar **pbar)
{
- return nv50_bar_new_(&nv50_bar_func, device, index, 0x1400, pbar);
+ return nv50_bar_new_(&nv50_bar_func, device, type, inst, 0x1400, pbar);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h
index e4193deb2e51..dedee9394079 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h
@@ -16,7 +16,7 @@ struct nv50_bar {
struct nvkm_gpuobj *bar2;
};
-int nv50_bar_new_(const struct nvkm_bar_func *, struct nvkm_device *,
+int nv50_bar_new_(const struct nvkm_bar_func *, struct nvkm_device *, enum nvkm_subdev_type,
int, u32 pgd_addr, struct nvkm_bar **);
void *nv50_bar_dtor(struct nvkm_bar *);
int nv50_bar_oneinit(struct nvkm_bar *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h
index 869ad184f923..daebfc991c76 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h
@@ -5,7 +5,7 @@
#include <subdev/bar.h>
void nvkm_bar_ctor(const struct nvkm_bar_func *, struct nvkm_device *,
- int, struct nvkm_bar *);
+ enum nvkm_subdev_type, int, struct nvkm_bar *);
struct nvkm_bar_func {
void *(*dtor)(struct nvkm_bar *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/tu102.c
index 798f65ec3a86..c25ab407b85d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/tu102.c
@@ -92,7 +92,8 @@ tu102_bar = {
};
int
-tu102_bar_new(struct nvkm_device *device, int index, struct nvkm_bar **pbar)
+tu102_bar_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_bar **pbar)
{
- return gf100_bar_new_(&tu102_bar, device, index, pbar);
+ return gf100_bar_new_(&tu102_bar, device, type, inst, pbar);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
index f3c30b2a788e..d0f52d59fc2f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
@@ -140,7 +140,8 @@ nvkm_bios = {
};
int
-nvkm_bios_new(struct nvkm_device *device, int index, struct nvkm_bios **pbios)
+nvkm_bios_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_bios **pbios)
{
struct nvkm_bios *bios;
struct nvbios_image image;
@@ -149,7 +150,7 @@ nvkm_bios_new(struct nvkm_device *device, int index, struct nvkm_bios **pbios)
if (!(bios = *pbios = kzalloc(sizeof(*bios), GFP_KERNEL)))
return -ENOMEM;
- nvkm_subdev_ctor(&nvkm_bios, device, index, &bios->subdev);
+ nvkm_subdev_ctor(&nvkm_bios, device, type, inst, &bios->subdev);
ret = nvbios_shadow(bios);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/base.c
index 52ad73bce5fe..0e5a46db52ea 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/base.c
@@ -53,12 +53,12 @@ nvkm_bus = {
int
nvkm_bus_new_(const struct nvkm_bus_func *func, struct nvkm_device *device,
- int index, struct nvkm_bus **pbus)
+ enum nvkm_subdev_type type, int inst, struct nvkm_bus **pbus)
{
struct nvkm_bus *bus;
if (!(bus = *pbus = kzalloc(sizeof(*bus), GFP_KERNEL)))
return -ENOMEM;
- nvkm_subdev_ctor(&nvkm_bus, device, index, &bus->subdev);
+ nvkm_subdev_ctor(&nvkm_bus, device, type, inst, &bus->subdev);
bus->func = func;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/g94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/g94.c
index 9700b5c01cc6..a0d6e2d3f804 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/g94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/g94.c
@@ -58,7 +58,8 @@ g94_bus = {
};
int
-g94_bus_new(struct nvkm_device *device, int index, struct nvkm_bus **pbus)
+g94_bus_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_bus **pbus)
{
- return nvkm_bus_new_(&g94_bus, device, index, pbus);
+ return nvkm_bus_new_(&g94_bus, device, type, inst, pbus);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/gf100.c
index e0930d5fdfb1..53a6651ac225 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/gf100.c
@@ -40,7 +40,7 @@ gf100_bus_intr(struct nvkm_bus *bus)
(addr & 0x00000002) ? "write" : "read", data,
(addr & 0x00fffffc),
(stat & 0x00000002) ? "!ENGINE " : "",
- (stat & 0x00000004) ? "IBUS " : "",
+ (stat & 0x00000004) ? "PRIVRING " : "",
(stat & 0x00000008) ? "TIMEOUT " : "");
nvkm_wr32(device, 0x009084, 0x00000000);
@@ -69,7 +69,8 @@ gf100_bus = {
};
int
-gf100_bus_new(struct nvkm_device *device, int index, struct nvkm_bus **pbus)
+gf100_bus_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_bus **pbus)
{
- return nvkm_bus_new_(&gf100_bus, device, index, pbus);
+ return nvkm_bus_new_(&gf100_bus, device, type, inst, pbus);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.c
index 2b44ba5cf4b0..cfed17c062ea 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.c
@@ -68,7 +68,8 @@ nv04_bus = {
};
int
-nv04_bus_new(struct nvkm_device *device, int index, struct nvkm_bus **pbus)
+nv04_bus_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_bus **pbus)
{
- return nvkm_bus_new_(&nv04_bus, device, index, pbus);
+ return nvkm_bus_new_(&nv04_bus, device, type, inst, pbus);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv31.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv31.c
index 5153d89e1f0b..ad8da523bb22 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv31.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv31.c
@@ -82,7 +82,8 @@ nv31_bus = {
};
int
-nv31_bus_new(struct nvkm_device *device, int index, struct nvkm_bus **pbus)
+nv31_bus_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_bus **pbus)
{
- return nvkm_bus_new_(&nv31_bus, device, index, pbus);
+ return nvkm_bus_new_(&nv31_bus, device, type, inst, pbus);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv50.c
index 19e10fdc9291..3a1e45adeedc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv50.c
@@ -99,7 +99,8 @@ nv50_bus = {
};
int
-nv50_bus_new(struct nvkm_device *device, int index, struct nvkm_bus **pbus)
+nv50_bus_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_bus **pbus)
{
- return nvkm_bus_new_(&nv50_bus, device, index, pbus);
+ return nvkm_bus_new_(&nv50_bus, device, type, inst, pbus);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/priv.h
index 76f7ba1c6494..2e9345b17cf8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/priv.h
@@ -11,7 +11,7 @@ struct nvkm_bus_func {
u32 hwsq_size;
};
-int nvkm_bus_new_(const struct nvkm_bus_func *, struct nvkm_device *, int,
+int nvkm_bus_new_(const struct nvkm_bus_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
struct nvkm_bus **);
void nv50_bus_init(struct nvkm_bus *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
index dc184e857f85..57199be082fd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
@@ -649,7 +649,7 @@ nvkm_clk = {
int
nvkm_clk_ctor(const struct nvkm_clk_func *func, struct nvkm_device *device,
- int index, bool allow_reclock, struct nvkm_clk *clk)
+ enum nvkm_subdev_type type, int inst, bool allow_reclock, struct nvkm_clk *clk)
{
struct nvkm_subdev *subdev = &clk->subdev;
struct nvkm_bios *bios = device->bios;
@@ -657,7 +657,7 @@ nvkm_clk_ctor(const struct nvkm_clk_func *func, struct nvkm_device *device,
const char *mode;
struct nvbios_vpstate_header h;
- nvkm_subdev_ctor(&nvkm_clk, device, index, subdev);
+ nvkm_subdev_ctor(&nvkm_clk, device, type, inst, subdev);
if (bios && !nvbios_vpstate_parse(bios, &h)) {
struct nvbios_vpstate_entry base, boost;
@@ -716,9 +716,9 @@ nvkm_clk_ctor(const struct nvkm_clk_func *func, struct nvkm_device *device,
int
nvkm_clk_new_(const struct nvkm_clk_func *func, struct nvkm_device *device,
- int index, bool allow_reclock, struct nvkm_clk **pclk)
+ enum nvkm_subdev_type type, int inst, bool allow_reclock, struct nvkm_clk **pclk)
{
if (!(*pclk = kzalloc(sizeof(**pclk), GFP_KERNEL)))
return -ENOMEM;
- return nvkm_clk_ctor(func, device, index, allow_reclock, *pclk);
+ return nvkm_clk_ctor(func, device, type, inst, allow_reclock, *pclk);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/g84.c
index f97e3ec196bb..07157cf53c9e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/g84.c
@@ -41,8 +41,8 @@ g84_clk = {
};
int
-g84_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
+g84_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_clk **pclk)
{
- return nv50_clk_new_(&g84_clk, device, index,
- (device->chipset >= 0x94), pclk);
+ return nv50_clk_new_(&g84_clk, device, type, inst, (device->chipset >= 0x94), pclk);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c
index 7f67f9f5a550..6eea11aefb70 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c
@@ -468,7 +468,8 @@ gf100_clk = {
};
int
-gf100_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
+gf100_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_clk **pclk)
{
struct gf100_clk *clk;
@@ -476,5 +477,5 @@ gf100_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
return -ENOMEM;
*pclk = &clk->base;
- return nvkm_clk_ctor(&gf100_clk, device, index, false, &clk->base);
+ return nvkm_clk_ctor(&gf100_clk, device, type, inst, false, &clk->base);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c
index 0b37e3da7feb..0d8e2ddcc5ee 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c
@@ -504,7 +504,8 @@ gk104_clk = {
};
int
-gk104_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
+gk104_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_clk **pclk)
{
struct gk104_clk *clk;
@@ -512,5 +513,5 @@ gk104_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
return -ENOMEM;
*pclk = &clk->base;
- return nvkm_clk_ctor(&gk104_clk, device, index, true, &clk->base);
+ return nvkm_clk_ctor(&gk104_clk, device, type, inst, true, &clk->base);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c
index 218893e3e5f9..d573fb0917fc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c
@@ -610,10 +610,9 @@ gk20a_clk = {
};
int
-gk20a_clk_ctor(struct nvkm_device *device, int index,
- const struct nvkm_clk_func *func,
- const struct gk20a_clk_pllg_params *params,
- struct gk20a_clk *clk)
+gk20a_clk_ctor(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ const struct nvkm_clk_func *func, const struct gk20a_clk_pllg_params *params,
+ struct gk20a_clk *clk)
{
struct nvkm_device_tegra *tdev = device->func->tegra(device);
int ret;
@@ -628,7 +627,7 @@ gk20a_clk_ctor(struct nvkm_device *device, int index,
clk->params = params;
clk->parent_rate = clk_get_rate(tdev->clk);
- ret = nvkm_clk_ctor(func, device, index, true, &clk->base);
+ ret = nvkm_clk_ctor(func, device, type, inst, true, &clk->base);
if (ret)
return ret;
@@ -639,7 +638,8 @@ gk20a_clk_ctor(struct nvkm_device *device, int index,
}
int
-gk20a_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
+gk20a_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_clk **pclk)
{
struct gk20a_clk *clk;
int ret;
@@ -649,11 +649,9 @@ gk20a_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
return -ENOMEM;
*pclk = &clk->base;
- ret = gk20a_clk_ctor(device, index, &gk20a_clk, &gk20a_pllg_params,
- clk);
+ ret = gk20a_clk_ctor(device, type, inst, &gk20a_clk, &gk20a_pllg_params, clk);
clk->pl_to_div = pl_to_div;
clk->div_to_pl = div_to_pl;
-
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h
index 0d1450972162..286413ff4a9e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h
@@ -146,8 +146,8 @@ gk20a_pllg_n_lo(struct gk20a_clk *clk, struct gk20a_pll *pll)
clk->parent_rate / KHZ);
}
-int gk20a_clk_ctor(struct nvkm_device *, int, const struct nvkm_clk_func *,
- const struct gk20a_clk_pllg_params *, struct gk20a_clk *);
+int gk20a_clk_ctor(struct nvkm_device *, enum nvkm_subdev_type, int, const struct nvkm_clk_func *,
+ const struct gk20a_clk_pllg_params *, struct gk20a_clk *);
void gk20a_clk_fini(struct nvkm_clk *);
int gk20a_clk_read(struct nvkm_clk *, enum nv_clk_src);
int gk20a_clk_calc(struct nvkm_clk *, struct nvkm_cstate *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c
index b284e949f732..a139dafffe06 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c
@@ -908,7 +908,7 @@ gm20b_clk = {
};
static int
-gm20b_clk_new_speedo0(struct nvkm_device *device, int index,
+gm20b_clk_new_speedo0(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_clk **pclk)
{
struct gk20a_clk *clk;
@@ -919,12 +919,9 @@ gm20b_clk_new_speedo0(struct nvkm_device *device, int index,
return -ENOMEM;
*pclk = &clk->base;
- ret = gk20a_clk_ctor(device, index, &gm20b_clk_speedo0,
- &gm20b_pllg_params, clk);
-
+ ret = gk20a_clk_ctor(device, type, inst, &gm20b_clk_speedo0, &gm20b_pllg_params, clk);
clk->pl_to_div = pl_to_div;
clk->div_to_pl = div_to_pl;
-
return ret;
}
@@ -1014,7 +1011,8 @@ gm20b_clk_init_safe_fmax(struct gm20b_clk *clk)
}
int
-gm20b_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
+gm20b_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_clk **pclk)
{
struct nvkm_device_tegra *tdev = device->func->tegra(device);
struct gm20b_clk *clk;
@@ -1024,7 +1022,7 @@ gm20b_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
/* Speedo 0 GPUs cannot use noise-aware PLL */
if (tdev->gpu_speedo_id == 0)
- return gm20b_clk_new_speedo0(device, index, pclk);
+ return gm20b_clk_new_speedo0(device, type, inst, pclk);
/* Speedo >= 1, use NAPLL */
clk = kzalloc(sizeof(*clk) + sizeof(*clk_params), GFP_KERNEL);
@@ -1036,8 +1034,7 @@ gm20b_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
/* duplicate the clock parameters since we will patch them below */
clk_params = (void *) (clk + 1);
*clk_params = gm20b_pllg_params;
- ret = gk20a_clk_ctor(device, index, &gm20b_clk, clk_params,
- &clk->base);
+ ret = gk20a_clk_ctor(device, type, inst, &gm20b_clk, clk_params, &clk->base);
if (ret)
return ret;
@@ -1050,7 +1047,7 @@ gm20b_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
if (clk_params->max_m == 0) {
nvkm_warn(subdev, "cannot use NAPLL, using legacy clock...\n");
kfree(clk);
- return gm20b_clk_new_speedo0(device, index, pclk);
+ return gm20b_clk_new_speedo0(device, type, inst, pclk);
}
clk->base.pl_to_div = pl_to_div;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
index f0a26881d9b9..b5f3969727a2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
@@ -537,7 +537,8 @@ gt215_clk = {
};
int
-gt215_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
+gt215_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_clk **pclk)
{
struct gt215_clk *clk;
@@ -545,5 +546,5 @@ gt215_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
return -ENOMEM;
*pclk = &clk->base;
- return nvkm_clk_ctor(&gt215_clk, device, index, true, &clk->base);
+ return nvkm_clk_ctor(&gt215_clk, device, type, inst, true, &clk->base);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c
index 4884eb4a9221..81f103f88dc8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c
@@ -409,7 +409,8 @@ mcp77_clk = {
};
int
-mcp77_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
+mcp77_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_clk **pclk)
{
struct mcp77_clk *clk;
@@ -417,5 +418,5 @@ mcp77_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
return -ENOMEM;
*pclk = &clk->base;
- return nvkm_clk_ctor(&mcp77_clk, device, index, true, &clk->base);
+ return nvkm_clk_ctor(&mcp77_clk, device, type, inst, true, &clk->base);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv04.c
index b280f85e8827..ca13598c2caa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv04.c
@@ -72,9 +72,10 @@ nv04_clk = {
};
int
-nv04_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
+nv04_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_clk **pclk)
{
- int ret = nvkm_clk_new_(&nv04_clk, device, index, false, pclk);
+ int ret = nvkm_clk_new_(&nv04_clk, device, type, inst, false, pclk);
if (ret == 0) {
(*pclk)->pll_calc = nv04_clk_pll_calc;
(*pclk)->pll_prog = nv04_clk_pll_prog;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv40.c
index 2ab9b9b84018..7ddd8cecb805 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv40.c
@@ -218,7 +218,8 @@ nv40_clk = {
};
int
-nv40_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
+nv40_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_clk **pclk)
{
struct nv40_clk *clk;
@@ -228,5 +229,5 @@ nv40_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
clk->base.pll_prog = nv04_clk_pll_prog;
*pclk = &clk->base;
- return nvkm_clk_ctor(&nv40_clk, device, index, true, &clk->base);
+ return nvkm_clk_ctor(&nv40_clk, device, type, inst, true, &clk->base);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c
index da1770e47490..83067763c0ec 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c
@@ -507,14 +507,14 @@ nv50_clk_tidy(struct nvkm_clk *base)
int
nv50_clk_new_(const struct nvkm_clk_func *func, struct nvkm_device *device,
- int index, bool allow_reclock, struct nvkm_clk **pclk)
+ enum nvkm_subdev_type type, int inst, bool allow_reclock, struct nvkm_clk **pclk)
{
struct nv50_clk *clk;
int ret;
if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL)))
return -ENOMEM;
- ret = nvkm_clk_ctor(func, device, index, allow_reclock, &clk->base);
+ ret = nvkm_clk_ctor(func, device, type, inst, allow_reclock, &clk->base);
*pclk = &clk->base;
if (ret)
return ret;
@@ -555,7 +555,8 @@ nv50_clk = {
};
int
-nv50_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
+nv50_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_clk **pclk)
{
- return nv50_clk_new_(&nv50_clk, device, index, false, pclk);
+ return nv50_clk_new_(&nv50_clk, device, type, inst, false, pclk);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h
index 7c7713238ec4..5b4cb7e5cff6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h
@@ -20,7 +20,7 @@ struct nv50_clk {
struct nv50_clk_hwsq hwsq;
};
-int nv50_clk_new_(const struct nvkm_clk_func *, struct nvkm_device *, int,
+int nv50_clk_new_(const struct nvkm_clk_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
bool, struct nvkm_clk **);
int nv50_clk_read(struct nvkm_clk *, enum nv_clk_src);
int nv50_clk_calc(struct nvkm_clk *, struct nvkm_cstate *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/priv.h
index 81dfb37480ae..810cc572cd30 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/priv.h
@@ -16,9 +16,9 @@ struct nvkm_clk_func {
struct nvkm_domain domains[];
};
-int nvkm_clk_ctor(const struct nvkm_clk_func *, struct nvkm_device *, int,
+int nvkm_clk_ctor(const struct nvkm_clk_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
bool allow_reclock, struct nvkm_clk *);
-int nvkm_clk_new_(const struct nvkm_clk_func *, struct nvkm_device *, int,
+int nvkm_clk_new_(const struct nvkm_clk_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
bool allow_reclock, struct nvkm_clk **);
int nv04_clk_pll_calc(struct nvkm_clk *, struct nvbios_pll *, int clk,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/base.c
index 4756019ddf3f..dd4981708fe4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/base.c
@@ -56,12 +56,12 @@ nvkm_devinit_disable(struct nvkm_devinit *init)
}
int
-nvkm_devinit_post(struct nvkm_devinit *init, u64 *disable)
+nvkm_devinit_post(struct nvkm_devinit *init)
{
int ret = 0;
if (init && init->func->post)
ret = init->func->post(init, init->post);
- *disable = nvkm_devinit_disable(init);
+ nvkm_devinit_disable(init);
return ret;
}
@@ -126,11 +126,10 @@ nvkm_devinit = {
};
void
-nvkm_devinit_ctor(const struct nvkm_devinit_func *func,
- struct nvkm_device *device, int index,
- struct nvkm_devinit *init)
+nvkm_devinit_ctor(const struct nvkm_devinit_func *func, struct nvkm_device *device,
+ enum nvkm_subdev_type type, int inst, struct nvkm_devinit *init)
{
- nvkm_subdev_ctor(&nvkm_devinit, device, index, &init->subdev);
+ nvkm_subdev_ctor(&nvkm_devinit, device, type, inst, &init->subdev);
init->func = func;
init->force_post = nvkm_boolopt(device->cfgopt, "NvForcePost", false);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g84.c
index e895289bf3c1..c224702b7bed 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g84.c
@@ -35,18 +35,18 @@ g84_devinit_disable(struct nvkm_devinit *init)
u64 disable = 0ULL;
if (!(r001540 & 0x40000000)) {
- disable |= (1ULL << NVKM_ENGINE_MPEG);
- disable |= (1ULL << NVKM_ENGINE_VP);
- disable |= (1ULL << NVKM_ENGINE_BSP);
- disable |= (1ULL << NVKM_ENGINE_CIPHER);
+ nvkm_subdev_disable(device, NVKM_ENGINE_MPEG, 0);
+ nvkm_subdev_disable(device, NVKM_ENGINE_VP, 0);
+ nvkm_subdev_disable(device, NVKM_ENGINE_BSP, 0);
+ nvkm_subdev_disable(device, NVKM_ENGINE_CIPHER, 0);
}
if (!(r00154c & 0x00000004))
- disable |= (1ULL << NVKM_ENGINE_DISP);
+ nvkm_subdev_disable(device, NVKM_ENGINE_DISP, 0);
if (!(r00154c & 0x00000020))
- disable |= (1ULL << NVKM_ENGINE_BSP);
+ nvkm_subdev_disable(device, NVKM_ENGINE_BSP, 0);
if (!(r00154c & 0x00000040))
- disable |= (1ULL << NVKM_ENGINE_CIPHER);
+ nvkm_subdev_disable(device, NVKM_ENGINE_CIPHER, 0);
return disable;
}
@@ -61,8 +61,8 @@ g84_devinit = {
};
int
-g84_devinit_new(struct nvkm_device *device, int index,
+g84_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_devinit **pinit)
{
- return nv50_devinit_new_(&g84_devinit, device, index, pinit);
+ return nv50_devinit_new_(&g84_devinit, device, type, inst, pinit);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g98.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g98.c
index a9d45844df5a..05729ca19e9a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g98.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g98.c
@@ -35,17 +35,17 @@ g98_devinit_disable(struct nvkm_devinit *init)
u64 disable = 0ULL;
if (!(r001540 & 0x40000000)) {
- disable |= (1ULL << NVKM_ENGINE_MSPDEC);
- disable |= (1ULL << NVKM_ENGINE_MSVLD);
- disable |= (1ULL << NVKM_ENGINE_MSPPP);
+ nvkm_subdev_disable(device, NVKM_ENGINE_MSPDEC, 0);
+ nvkm_subdev_disable(device, NVKM_ENGINE_MSVLD, 0);
+ nvkm_subdev_disable(device, NVKM_ENGINE_MSPPP, 0);
}
if (!(r00154c & 0x00000004))
- disable |= (1ULL << NVKM_ENGINE_DISP);
+ nvkm_subdev_disable(device, NVKM_ENGINE_DISP, 0);
if (!(r00154c & 0x00000020))
- disable |= (1ULL << NVKM_ENGINE_MSVLD);
+ nvkm_subdev_disable(device, NVKM_ENGINE_MSVLD, 0);
if (!(r00154c & 0x00000040))
- disable |= (1ULL << NVKM_ENGINE_SEC);
+ nvkm_subdev_disable(device, NVKM_ENGINE_SEC, 0);
return disable;
}
@@ -60,8 +60,8 @@ g98_devinit = {
};
int
-g98_devinit_new(struct nvkm_device *device, int index,
+g98_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_devinit **pinit)
{
- return nv50_devinit_new_(&g98_devinit, device, index, pinit);
+ return nv50_devinit_new_(&g98_devinit, device, type, inst, pinit);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/ga100.c
index 636a92128f6c..6b280b05c4ca 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/ga100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/ga100.c
@@ -70,7 +70,8 @@ ga100_devinit = {
};
int
-ga100_devinit_new(struct nvkm_device *device, int index, struct nvkm_devinit **pinit)
+ga100_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_devinit **pinit)
{
- return nv50_devinit_new_(&ga100_devinit, device, index, pinit);
+ return nv50_devinit_new_(&ga100_devinit, device, type, inst, pinit);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c
index 8b1b34c3ad26..051cfd6a5caf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c
@@ -71,21 +71,21 @@ gf100_devinit_disable(struct nvkm_devinit *init)
u64 disable = 0ULL;
if (r022500 & 0x00000001)
- disable |= (1ULL << NVKM_ENGINE_DISP);
+ nvkm_subdev_disable(device, NVKM_ENGINE_DISP, 0);
if (r022500 & 0x00000002) {
- disable |= (1ULL << NVKM_ENGINE_MSPDEC);
- disable |= (1ULL << NVKM_ENGINE_MSPPP);
+ nvkm_subdev_disable(device, NVKM_ENGINE_MSPDEC, 0);
+ nvkm_subdev_disable(device, NVKM_ENGINE_MSPPP, 0);
}
if (r022500 & 0x00000004)
- disable |= (1ULL << NVKM_ENGINE_MSVLD);
+ nvkm_subdev_disable(device, NVKM_ENGINE_MSVLD, 0);
if (r022500 & 0x00000008)
- disable |= (1ULL << NVKM_ENGINE_MSENC);
+ nvkm_subdev_disable(device, NVKM_ENGINE_MSENC, 0);
if (r022500 & 0x00000100)
- disable |= (1ULL << NVKM_ENGINE_CE0);
+ nvkm_subdev_disable(device, NVKM_ENGINE_CE, 0);
if (r022500 & 0x00000200)
- disable |= (1ULL << NVKM_ENGINE_CE1);
+ nvkm_subdev_disable(device, NVKM_ENGINE_CE, 1);
return disable;
}
@@ -114,8 +114,8 @@ gf100_devinit = {
};
int
-gf100_devinit_new(struct nvkm_device *device, int index,
- struct nvkm_devinit **pinit)
+gf100_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_devinit **pinit)
{
- return nv50_devinit_new_(&gf100_devinit, device, index, pinit);
+ return nv50_devinit_new_(&gf100_devinit, device, type, inst, pinit);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c
index 28ca01be3d38..4323732a3cb2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c
@@ -35,11 +35,11 @@ gm107_devinit_disable(struct nvkm_devinit *init)
u64 disable = 0ULL;
if (r021c00 & 0x00000001)
- disable |= (1ULL << NVKM_ENGINE_CE0);
+ nvkm_subdev_disable(device, NVKM_ENGINE_CE, 0);
if (r021c00 & 0x00000004)
- disable |= (1ULL << NVKM_ENGINE_CE2);
+ nvkm_subdev_disable(device, NVKM_ENGINE_CE, 2);
if (r021c04 & 0x00000001)
- disable |= (1ULL << NVKM_ENGINE_DISP);
+ nvkm_subdev_disable(device, NVKM_ENGINE_DISP, 0);
return disable;
}
@@ -54,8 +54,8 @@ gm107_devinit = {
};
int
-gm107_devinit_new(struct nvkm_device *device, int index,
- struct nvkm_devinit **pinit)
+gm107_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_devinit **pinit)
{
- return nv50_devinit_new_(&gm107_devinit, device, index, pinit);
+ return nv50_devinit_new_(&gm107_devinit, device, type, inst, pinit);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
index 59940dacc2ba..a308b9bde449 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
@@ -179,8 +179,8 @@ gm200_devinit = {
};
int
-gm200_devinit_new(struct nvkm_device *device, int index,
- struct nvkm_devinit **pinit)
+gm200_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_devinit **pinit)
{
- return nv50_devinit_new_(&gm200_devinit, device, index, pinit);
+ return nv50_devinit_new_(&gm200_devinit, device, type, inst, pinit);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gt215.c
index 9a8522fa9c65..dc026ac1b595 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gt215.c
@@ -71,16 +71,16 @@ gt215_devinit_disable(struct nvkm_devinit *init)
u64 disable = 0ULL;
if (!(r001540 & 0x40000000)) {
- disable |= (1ULL << NVKM_ENGINE_MSPDEC);
- disable |= (1ULL << NVKM_ENGINE_MSPPP);
+ nvkm_subdev_disable(device, NVKM_ENGINE_MSPDEC, 0);
+ nvkm_subdev_disable(device, NVKM_ENGINE_MSPPP, 0);
}
if (!(r00154c & 0x00000004))
- disable |= (1ULL << NVKM_ENGINE_DISP);
+ nvkm_subdev_disable(device, NVKM_ENGINE_DISP, 0);
if (!(r00154c & 0x00000020))
- disable |= (1ULL << NVKM_ENGINE_MSVLD);
+ nvkm_subdev_disable(device, NVKM_ENGINE_MSVLD, 0);
if (!(r00154c & 0x00000200))
- disable |= (1ULL << NVKM_ENGINE_CE0);
+ nvkm_subdev_disable(device, NVKM_ENGINE_CE, 0);
return disable;
}
@@ -146,8 +146,8 @@ gt215_devinit = {
};
int
-gt215_devinit_new(struct nvkm_device *device, int index,
- struct nvkm_devinit **pinit)
+gt215_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_devinit **pinit)
{
- return nv50_devinit_new_(&gt215_devinit, device, index, pinit);
+ return nv50_devinit_new_(&gt215_devinit, device, type, inst, pinit);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gv100.c
index fbde6828bd38..b4d1688517d5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gv100.c
@@ -72,8 +72,8 @@ gv100_devinit = {
};
int
-gv100_devinit_new(struct nvkm_device *device, int index,
- struct nvkm_devinit **pinit)
+gv100_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_devinit **pinit)
{
- return nv50_devinit_new_(&gv100_devinit, device, index, pinit);
+ return nv50_devinit_new_(&gv100_devinit, device, type, inst, pinit);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/mcp89.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/mcp89.c
index ce4f718e98a1..fb90d47e1225 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/mcp89.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/mcp89.c
@@ -35,18 +35,18 @@ mcp89_devinit_disable(struct nvkm_devinit *init)
u64 disable = 0;
if (!(r001540 & 0x40000000)) {
- disable |= (1ULL << NVKM_ENGINE_MSPDEC);
- disable |= (1ULL << NVKM_ENGINE_MSPPP);
+ nvkm_subdev_disable(device, NVKM_ENGINE_MSPDEC, 0);
+ nvkm_subdev_disable(device, NVKM_ENGINE_MSPPP, 0);
}
if (!(r00154c & 0x00000004))
- disable |= (1ULL << NVKM_ENGINE_DISP);
+ nvkm_subdev_disable(device, NVKM_ENGINE_DISP, 0);
if (!(r00154c & 0x00000020))
- disable |= (1ULL << NVKM_ENGINE_MSVLD);
+ nvkm_subdev_disable(device, NVKM_ENGINE_MSVLD, 0);
if (!(r00154c & 0x00000040))
- disable |= (1ULL << NVKM_ENGINE_VIC);
+ nvkm_subdev_disable(device, NVKM_ENGINE_VIC, 0);
if (!(r00154c & 0x00000200))
- disable |= (1ULL << NVKM_ENGINE_CE0);
+ nvkm_subdev_disable(device, NVKM_ENGINE_CE, 0);
return disable;
}
@@ -61,8 +61,8 @@ mcp89_devinit = {
};
int
-mcp89_devinit_new(struct nvkm_device *device, int index,
- struct nvkm_devinit **pinit)
+mcp89_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_devinit **pinit)
{
- return nv50_devinit_new_(&mcp89_devinit, device, index, pinit);
+ return nv50_devinit_new_(&mcp89_devinit, device, type, inst, pinit);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.c
index 317ce9fb8225..88bc890f89a2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.c
@@ -434,9 +434,8 @@ nv04_devinit_dtor(struct nvkm_devinit *base)
}
int
-nv04_devinit_new_(const struct nvkm_devinit_func *func,
- struct nvkm_device *device, int index,
- struct nvkm_devinit **pinit)
+nv04_devinit_new_(const struct nvkm_devinit_func *func, struct nvkm_device *device,
+ enum nvkm_subdev_type type, int inst, struct nvkm_devinit **pinit)
{
struct nv04_devinit *init;
@@ -444,7 +443,7 @@ nv04_devinit_new_(const struct nvkm_devinit_func *func,
return -ENOMEM;
*pinit = &init->base;
- nvkm_devinit_ctor(func, device, index, &init->base);
+ nvkm_devinit_ctor(func, device, type, inst, &init->base);
init->owner = -1;
return 0;
}
@@ -459,8 +458,8 @@ nv04_devinit = {
};
int
-nv04_devinit_new(struct nvkm_device *device, int index,
+nv04_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_devinit **pinit)
{
- return nv04_devinit_new_(&nv04_devinit, device, index, pinit);
+ return nv04_devinit_new_(&nv04_devinit, device, type, inst, pinit);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h
index 15b029ddf6df..06ad8a606bb8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h
@@ -11,7 +11,7 @@ struct nv04_devinit {
};
int nv04_devinit_new_(const struct nvkm_devinit_func *, struct nvkm_device *,
- int, struct nvkm_devinit **);
+ enum nvkm_subdev_type, int, struct nvkm_devinit **);
void *nv04_devinit_dtor(struct nvkm_devinit *);
void nv04_devinit_preinit(struct nvkm_devinit *);
void nv04_devinit_fini(struct nvkm_devinit *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv05.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv05.c
index 9891eadca1ce..1410befd2285 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv05.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv05.c
@@ -136,8 +136,8 @@ nv05_devinit = {
};
int
-nv05_devinit_new(struct nvkm_device *device, int index,
+nv05_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_devinit **pinit)
{
- return nv04_devinit_new_(&nv05_devinit, device, index, pinit);
+ return nv04_devinit_new_(&nv05_devinit, device, type, inst, pinit);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv10.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv10.c
index 570822f83acf..a6aa8786d610 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv10.c
@@ -106,8 +106,8 @@ nv10_devinit = {
};
int
-nv10_devinit_new(struct nvkm_device *device, int index,
+nv10_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_devinit **pinit)
{
- return nv04_devinit_new_(&nv10_devinit, device, index, pinit);
+ return nv04_devinit_new_(&nv10_devinit, device, type, inst, pinit);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv1a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv1a.c
index fefafec7e2a7..4cc5ef9a5a63 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv1a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv1a.c
@@ -35,8 +35,8 @@ nv1a_devinit = {
};
int
-nv1a_devinit_new(struct nvkm_device *device, int index,
+nv1a_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_devinit **pinit)
{
- return nv04_devinit_new_(&nv1a_devinit, device, index, pinit);
+ return nv04_devinit_new_(&nv1a_devinit, device, type, inst, pinit);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv20.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv20.c
index 4ef04e0d8826..67f46df723e4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv20.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv20.c
@@ -72,8 +72,8 @@ nv20_devinit = {
};
int
-nv20_devinit_new(struct nvkm_device *device, int index,
+nv20_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_devinit **pinit)
{
- return nv04_devinit_new_(&nv20_devinit, device, index, pinit);
+ return nv04_devinit_new_(&nv20_devinit, device, type, inst, pinit);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c
index d7947c4391dc..380995d398b1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c
@@ -85,7 +85,7 @@ nv50_devinit_disable(struct nvkm_devinit *init)
u64 disable = 0ULL;
if (!(r001540 & 0x40000000))
- disable |= (1ULL << NVKM_ENGINE_MPEG);
+ nvkm_subdev_disable(device, NVKM_ENGINE_MPEG, 0);
return disable;
}
@@ -101,8 +101,8 @@ nv50_devinit_preinit(struct nvkm_devinit *base)
* missing, assume it's a secondary gpu which requires post
*/
if (!base->post) {
- u64 disable = nvkm_devinit_disable(base);
- if (disable & (1ULL << NVKM_ENGINE_DISP))
+ nvkm_devinit_disable(base);
+ if (!device->disp)
base->post = true;
}
@@ -148,9 +148,8 @@ nv50_devinit_init(struct nvkm_devinit *base)
}
int
-nv50_devinit_new_(const struct nvkm_devinit_func *func,
- struct nvkm_device *device, int index,
- struct nvkm_devinit **pinit)
+nv50_devinit_new_(const struct nvkm_devinit_func *func, struct nvkm_device *device,
+ enum nvkm_subdev_type type, int inst, struct nvkm_devinit **pinit)
{
struct nv50_devinit *init;
@@ -158,7 +157,7 @@ nv50_devinit_new_(const struct nvkm_devinit_func *func,
return -ENOMEM;
*pinit = &init->base;
- nvkm_devinit_ctor(func, device, index, &init->base);
+ nvkm_devinit_ctor(func, device, type, inst, &init->base);
return 0;
}
@@ -172,8 +171,8 @@ nv50_devinit = {
};
int
-nv50_devinit_new(struct nvkm_device *device, int index,
+nv50_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_devinit **pinit)
{
- return nv50_devinit_new_(&nv50_devinit, device, index, pinit);
+ return nv50_devinit_new_(&nv50_devinit, device, type, inst, pinit);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h
index e8d37a6145a2..987a7f478b84 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h
@@ -9,7 +9,7 @@ struct nv50_devinit {
u32 r001540;
};
-int nv50_devinit_new_(const struct nvkm_devinit_func *, struct nvkm_device *,
+int nv50_devinit_new_(const struct nvkm_devinit_func *, struct nvkm_device *, enum nvkm_subdev_type,
int, struct nvkm_devinit **);
void nv50_devinit_preinit(struct nvkm_devinit *);
void nv50_devinit_init(struct nvkm_devinit *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h
index 05961e624264..dd8b038a8cee 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h
@@ -16,7 +16,8 @@ struct nvkm_devinit_func {
};
void nvkm_devinit_ctor(const struct nvkm_devinit_func *, struct nvkm_device *,
- int index, struct nvkm_devinit *);
+ enum nvkm_subdev_type, int inst, struct nvkm_devinit *);
+u64 nvkm_devinit_disable(struct nvkm_devinit *);
int nv04_devinit_post(struct nvkm_devinit *, bool);
int tu102_devinit_post(struct nvkm_devinit *, bool);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c
index 9a469bf482f2..634f64f88fc8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c
@@ -82,8 +82,8 @@ tu102_devinit = {
};
int
-tu102_devinit_new(struct nvkm_device *device, int index,
- struct nvkm_devinit **pinit)
+tu102_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_devinit **pinit)
{
- return nv50_devinit_new_(&tu102_devinit, device, index, pinit);
+ return nv50_devinit_new_(&tu102_devinit, device, type, inst, pinit);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c
index f6dca97140d6..fd54fa504efa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c
@@ -170,12 +170,12 @@ nvkm_fault = {
int
nvkm_fault_new_(const struct nvkm_fault_func *func, struct nvkm_device *device,
- int index, struct nvkm_fault **pfault)
+ enum nvkm_subdev_type type, int inst, struct nvkm_fault **pfault)
{
struct nvkm_fault *fault;
if (!(fault = *pfault = kzalloc(sizeof(*fault), GFP_KERNEL)))
return -ENOMEM;
- nvkm_subdev_ctor(&nvkm_fault, device, index, &fault->subdev);
+ nvkm_subdev_ctor(&nvkm_fault, device, type, inst, &fault->subdev);
fault->func = func;
fault->user.ctor = nvkm_ufault_new;
fault->user.base = func->user.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c
index f6b189cc4330..6af7959e02ea 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c
@@ -30,7 +30,7 @@ void
gp100_fault_buffer_intr(struct nvkm_fault_buffer *buffer, bool enable)
{
struct nvkm_device *device = buffer->fault->subdev.device;
- nvkm_mc_intr_mask(device, NVKM_SUBDEV_FAULT, enable);
+ nvkm_mc_intr_mask(device, NVKM_SUBDEV_FAULT, 0, enable);
}
void
@@ -82,8 +82,8 @@ gp100_fault = {
};
int
-gp100_fault_new(struct nvkm_device *device, int index,
+gp100_fault_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_fault **pfault)
{
- return nvkm_fault_new_(&gp100_fault, device, index, pfault);
+ return nvkm_fault_new_(&gp100_fault, device, type, inst, pfault);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp10b.c
index 9e66d1f7654d..89e0bc96fb92 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp10b.c
@@ -46,8 +46,8 @@ gp10b_fault = {
};
int
-gp10b_fault_new(struct nvkm_device *device, int index,
+gp10b_fault_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_fault **pfault)
{
- return nvkm_fault_new_(&gp10b_fault, device, index, pfault);
+ return nvkm_fault_new_(&gp10b_fault, device, type, inst, pfault);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c
index 2707be4ffabc..cd9d2ade5ac7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c
@@ -228,8 +228,8 @@ gv100_fault = {
};
int
-gv100_fault_new(struct nvkm_device *device, int index,
+gv100_fault_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_fault **pfault)
{
- return nvkm_fault_new_(&gv100_fault, device, index, pfault);
+ return nvkm_fault_new_(&gv100_fault, device, type, inst, pfault);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h
index f6f1dd7eee1f..36681c347fb5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h
@@ -18,8 +18,8 @@ struct nvkm_fault_buffer {
u64 addr;
};
-int nvkm_fault_new_(const struct nvkm_fault_func *, struct nvkm_device *,
- int index, struct nvkm_fault **);
+int nvkm_fault_new_(const struct nvkm_fault_func *, struct nvkm_device *, enum nvkm_subdev_type,
+ int inst, struct nvkm_fault **);
struct nvkm_fault_func {
int (*oneinit)(struct nvkm_fault *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c
index 45a6a68b9f48..91eb6729c84d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c
@@ -22,6 +22,7 @@
#include "priv.h"
#include <core/memory.h>
+#include <subdev/mc.h>
#include <subdev/mmu.h>
#include <engine/fifo.h>
@@ -34,6 +35,9 @@ tu102_fault_buffer_intr(struct nvkm_fault_buffer *buffer, bool enable)
* which don't appear to actually work anymore, but newer
* versions of RM don't appear to touch anything at all..
*/
+ struct nvkm_device *device = buffer->fault->subdev.device;
+
+ nvkm_mc_intr_mask(device, NVKM_SUBDEV_FAULT, 0, enable);
}
static void
@@ -41,6 +45,11 @@ tu102_fault_buffer_fini(struct nvkm_fault_buffer *buffer)
{
struct nvkm_device *device = buffer->fault->subdev.device;
const u32 foff = buffer->id * 0x20;
+
+ /* Disable the fault interrupts */
+ nvkm_wr32(device, 0xb81408, 0x1);
+ nvkm_wr32(device, 0xb81410, 0x10);
+
nvkm_mask(device, 0xb83010 + foff, 0x80000000, 0x00000000);
}
@@ -50,6 +59,10 @@ tu102_fault_buffer_init(struct nvkm_fault_buffer *buffer)
struct nvkm_device *device = buffer->fault->subdev.device;
const u32 foff = buffer->id * 0x20;
+ /* Enable the fault interrupts */
+ nvkm_wr32(device, 0xb81208, 0x1);
+ nvkm_wr32(device, 0xb81210, 0x10);
+
nvkm_mask(device, 0xb83010 + foff, 0xc0000000, 0x40000000);
nvkm_wr32(device, 0xb83004 + foff, upper_32_bits(buffer->addr));
nvkm_wr32(device, 0xb83000 + foff, lower_32_bits(buffer->addr));
@@ -109,14 +122,20 @@ tu102_fault_intr(struct nvkm_fault *fault)
}
if (stat & 0x00000200) {
+ /* Clear the associated interrupt flag */
+ nvkm_wr32(device, 0xb81010, 0x10);
+
if (fault->buffer[0]) {
nvkm_event_send(&fault->event, 1, 0, NULL, 0);
stat &= ~0x00000200;
}
}
- /*XXX: guess, can't confirm until we get fw... */
+ /* Replayable MMU fault */
if (stat & 0x00000100) {
+ /* Clear the associated interrupt flag */
+ nvkm_wr32(device, 0xb81008, 0x1);
+
if (fault->buffer[1]) {
nvkm_event_send(&fault->event, 1, 1, NULL, 0);
stat &= ~0x00000100;
@@ -162,8 +181,8 @@ tu102_fault = {
};
int
-tu102_fault_new(struct nvkm_device *device, int index,
+tu102_fault_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_fault **pfault)
{
- return nvkm_fault_new_(&tu102_fault, device, index, pfault);
+ return nvkm_fault_new_(&tu102_fault, device, type, inst, pfault);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
index 5940e0dea2f8..6faaea948fc4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
@@ -122,7 +122,7 @@ nvkm_fb_oneinit(struct nvkm_subdev *subdev)
nvkm_debug(subdev, "%d comptags\n", tags);
}
- return nvkm_mm_init(&fb->tags, 0, 0, tags, 1);
+ return nvkm_mm_init(&fb->tags.mm, 0, 0, tags, 1);
}
static int
@@ -205,7 +205,9 @@ nvkm_fb_dtor(struct nvkm_subdev *subdev)
for (i = 0; i < fb->tile.regions; i++)
fb->func->tile.fini(fb, i, &fb->tile.region[i]);
- nvkm_mm_fini(&fb->tags);
+ nvkm_mm_fini(&fb->tags.mm);
+ mutex_destroy(&fb->tags.mutex);
+
nvkm_ram_del(&fb->ram);
nvkm_blob_dtor(&fb->vpr_scrubber);
@@ -225,21 +227,21 @@ nvkm_fb = {
void
nvkm_fb_ctor(const struct nvkm_fb_func *func, struct nvkm_device *device,
- int index, struct nvkm_fb *fb)
+ enum nvkm_subdev_type type, int inst, struct nvkm_fb *fb)
{
- nvkm_subdev_ctor(&nvkm_fb, device, index, &fb->subdev);
+ nvkm_subdev_ctor(&nvkm_fb, device, type, inst, &fb->subdev);
fb->func = func;
fb->tile.regions = fb->func->tile.regions;
- fb->page = nvkm_longopt(device->cfgopt, "NvFbBigPage",
- fb->func->default_bigpage);
+ fb->page = nvkm_longopt(device->cfgopt, "NvFbBigPage", fb->func->default_bigpage);
+ mutex_init(&fb->tags.mutex);
}
int
nvkm_fb_new_(const struct nvkm_fb_func *func, struct nvkm_device *device,
- int index, struct nvkm_fb **pfb)
+ enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
if (!(*pfb = kzalloc(sizeof(**pfb), GFP_KERNEL)))
return -ENOMEM;
- nvkm_fb_ctor(func, device, index, *pfb);
+ nvkm_fb_ctor(func, device, type, inst, *pfb);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/g84.c
index 06bf95c0c549..770a4ad39122 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/g84.c
@@ -32,7 +32,7 @@ g84_fb = {
};
int
-g84_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+g84_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return nv50_fb_new_(&g84_fb, device, index, pfb);
+ return nv50_fb_new_(&g84_fb, device, type, inst, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c
index bf82686851cd..b47bebfbc26f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c
@@ -34,7 +34,7 @@ ga100_fb = {
};
int
-ga100_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+ga100_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return gp102_fb_new_(&ga100_fb, device, index, pfb);
+ return gp102_fb_new_(&ga100_fb, device, type, inst, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c
index bcecf84a6e67..6ea7908f0563 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c
@@ -34,7 +34,7 @@ ga102_fb = {
};
int
-ga102_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+ga102_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return gp102_fb_new_(&ga102_fb, device, index, pfb);
+ return gp102_fb_new_(&ga102_fb, device, type, inst, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
index e8dc4e913494..9dcc40f9ef79 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
@@ -117,13 +117,13 @@ gf100_fb_dtor(struct nvkm_fb *base)
int
gf100_fb_new_(const struct nvkm_fb_func *func, struct nvkm_device *device,
- int index, struct nvkm_fb **pfb)
+ enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
struct gf100_fb *fb;
if (!(fb = kzalloc(sizeof(*fb), GFP_KERNEL)))
return -ENOMEM;
- nvkm_fb_ctor(func, device, index, &fb->base);
+ nvkm_fb_ctor(func, device, type, inst, &fb->base);
*pfb = &fb->base;
return 0;
@@ -141,7 +141,7 @@ gf100_fb = {
};
int
-gf100_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+gf100_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return gf100_fb_new_(&gf100_fb, device, index, pfb);
+ return gf100_fb_new_(&gf100_fb, device, type, inst, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h
index 2ed7cdaab37c..0cac7b06acc8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h
@@ -10,8 +10,8 @@ struct gf100_fb {
dma_addr_t r100c10;
};
-int gf100_fb_new_(const struct nvkm_fb_func *, struct nvkm_device *,
- int index, struct nvkm_fb **);
+int gf100_fb_new_(const struct nvkm_fb_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ struct nvkm_fb **);
void *gf100_fb_dtor(struct nvkm_fb *);
void gf100_fb_init(struct nvkm_fb *);
void gf100_fb_intr(struct nvkm_fb *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf108.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf108.c
index 4a9f463745b5..76678dd60f93 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf108.c
@@ -36,7 +36,7 @@ gf108_fb = {
};
int
-gf108_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+gf108_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return gf100_fb_new_(&gf108_fb, device, index, pfb);
+ return gf100_fb_new_(&gf108_fb, device, type, inst, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
index 48fd98e08baa..f73442ccb424 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
@@ -83,7 +83,7 @@ gk104_fb = {
};
int
-gk104_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+gk104_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return gf100_fb_new_(&gk104_fb, device, index, pfb);
+ return gf100_fb_new_(&gk104_fb, device, type, inst, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk110.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk110.c
index 0695e5dd360e..45d6cdffafee 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk110.c
@@ -65,7 +65,7 @@ gk110_fb = {
};
int
-gk110_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+gk110_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return gf100_fb_new_(&gk110_fb, device, index, pfb);
+ return gf100_fb_new_(&gk110_fb, device, type, inst, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
index a7e29b125094..6bc42f89d8c4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
@@ -34,7 +34,7 @@ gk20a_fb = {
};
int
-gk20a_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+gk20a_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return gf100_fb_new_(&gk20a_fb, device, index, pfb);
+ return gf100_fb_new_(&gk20a_fb, device, type, inst, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c
index 69c876d5d1c1..de52462a92bf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c
@@ -36,7 +36,7 @@ gm107_fb = {
};
int
-gm107_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+gm107_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return gf100_fb_new_(&gm107_fb, device, index, pfb);
+ return gf100_fb_new_(&gm107_fb, device, type, inst, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c
index d3b8c3367152..5acf8d15d06f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c
@@ -67,7 +67,7 @@ gm200_fb = {
};
int
-gm200_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+gm200_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return gf100_fb_new_(&gm200_fb, device, index, pfb);
+ return gf100_fb_new_(&gm200_fb, device, type, inst, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm20b.c
index 12db61e31128..86f61a3f2fea 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm20b.c
@@ -34,7 +34,7 @@ gm20b_fb = {
};
int
-gm20b_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+gm20b_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return gf100_fb_new_(&gm20b_fb, device, index, pfb);
+ return gf100_fb_new_(&gm20b_fb, device, type, inst, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c
index 8205ce436b3e..09e943edc362 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c
@@ -71,7 +71,7 @@ gp100_fb = {
};
int
-gp100_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+gp100_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return gf100_fb_new_(&gp100_fb, device, index, pfb);
+ return gf100_fb_new_(&gp100_fb, device, type, inst, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c
index fc8c93aa3da5..0e78b3d734a0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c
@@ -114,9 +114,9 @@ gp102_fb = {
int
gp102_fb_new_(const struct nvkm_fb_func *func, struct nvkm_device *device,
- int index, struct nvkm_fb **pfb)
+ enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- int ret = gf100_fb_new_(func, device, index, pfb);
+ int ret = gf100_fb_new_(func, device, type, inst, pfb);
if (ret)
return ret;
@@ -126,9 +126,9 @@ gp102_fb_new_(const struct nvkm_fb_func *func, struct nvkm_device *device,
}
int
-gp102_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+gp102_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return gp102_fb_new_(&gp102_fb, device, index, pfb);
+ return gp102_fb_new_(&gp102_fb, device, type, inst, pfb);
}
MODULE_FIRMWARE("nvidia/gp102/nvdec/scrubber.bin");
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp10b.c
index af8e43979dc1..84c9815a6d48 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp10b.c
@@ -31,7 +31,7 @@ gp10b_fb = {
};
int
-gp10b_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+gp10b_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return gf100_fb_new_(&gp10b_fb, device, index, pfb);
+ return gf100_fb_new_(&gp10b_fb, device, type, inst, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gt215.c
index 9266559b45f9..c1ec9758617c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gt215.c
@@ -32,7 +32,7 @@ gt215_fb = {
};
int
-gt215_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+gt215_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return nv50_fb_new_(&gt215_fb, device, index, pfb);
+ return nv50_fb_new_(&gt215_fb, device, type, inst, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c
index feda86a5fba8..63daa83ae12d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c
@@ -42,9 +42,9 @@ gv100_fb = {
};
int
-gv100_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+gv100_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return gp102_fb_new_(&gv100_fb, device, index, pfb);
+ return gp102_fb_new_(&gv100_fb, device, type, inst, pfb);
}
MODULE_FIRMWARE("nvidia/gv100/nvdec/scrubber.bin");
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp77.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp77.c
index 73b3b86a2826..70c7b08ee0a6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp77.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp77.c
@@ -31,7 +31,7 @@ mcp77_fb = {
};
int
-mcp77_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+mcp77_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return nv50_fb_new_(&mcp77_fb, device, index, pfb);
+ return nv50_fb_new_(&mcp77_fb, device, type, inst, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp89.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp89.c
index 6d11e32ec7ad..308d955168e8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp89.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp89.c
@@ -31,7 +31,7 @@ mcp89_fb = {
};
int
-mcp89_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+mcp89_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return nv50_fb_new_(&mcp89_fb, device, index, pfb);
+ return nv50_fb_new_(&mcp89_fb, device, type, inst, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.c
index c886664533c8..8d5a007ecc47 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.c
@@ -44,7 +44,7 @@ nv04_fb = {
};
int
-nv04_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+nv04_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return nvkm_fb_new_(&nv04_fb, device, index, pfb);
+ return nvkm_fb_new_(&nv04_fb, device, type, inst, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv10.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv10.c
index c998b7e96aa3..7d2c16b27032 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv10.c
@@ -64,7 +64,7 @@ nv10_fb = {
};
int
-nv10_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+nv10_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return nvkm_fb_new_(&nv10_fb, device, index, pfb);
+ return nvkm_fb_new_(&nv10_fb, device, type, inst, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv1a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv1a.c
index 7b9f04f44af8..4bdad2abd56f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv1a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv1a.c
@@ -36,7 +36,7 @@ nv1a_fb = {
};
int
-nv1a_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+nv1a_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return nvkm_fb_new_(&nv1a_fb, device, index, pfb);
+ return nvkm_fb_new_(&nv1a_fb, device, type, inst, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv20.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv20.c
index a021d21ff153..d254f27f9b37 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv20.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv20.c
@@ -45,7 +45,7 @@ nv20_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
{
u32 tiles = DIV_ROUND_UP(size, 0x40);
u32 tags = round_up(tiles / fb->ram->parts, 0x40);
- if (!nvkm_mm_head(&fb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+ if (!nvkm_mm_head(&fb->tags.mm, 0, 1, tags, tags, 1, &tile->tag)) {
if (!(flags & 2)) tile->zcomp = 0x00000000; /* Z16 */
else tile->zcomp = 0x04000000; /* Z24S8 */
tile->zcomp |= tile->tag->offset;
@@ -63,7 +63,7 @@ nv20_fb_tile_fini(struct nvkm_fb *fb, int i, struct nvkm_fb_tile *tile)
tile->limit = 0;
tile->pitch = 0;
tile->zcomp = 0;
- nvkm_mm_free(&fb->tags, &tile->tag);
+ nvkm_mm_free(&fb->tags.mm, &tile->tag);
}
void
@@ -96,7 +96,7 @@ nv20_fb = {
};
int
-nv20_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+nv20_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return nvkm_fb_new_(&nv20_fb, device, index, pfb);
+ return nvkm_fb_new_(&nv20_fb, device, type, inst, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv25.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv25.c
index 7709f5fe9a45..47da66dea6e6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv25.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv25.c
@@ -32,7 +32,7 @@ nv25_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
{
u32 tiles = DIV_ROUND_UP(size, 0x40);
u32 tags = round_up(tiles / fb->ram->parts, 0x40);
- if (!nvkm_mm_head(&fb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+ if (!nvkm_mm_head(&fb->tags.mm, 0, 1, tags, tags, 1, &tile->tag)) {
if (!(flags & 2)) tile->zcomp = 0x00100000; /* Z16 */
else tile->zcomp = 0x00200000; /* Z24S8 */
tile->zcomp |= tile->tag->offset;
@@ -54,7 +54,7 @@ nv25_fb = {
};
int
-nv25_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+nv25_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return nvkm_fb_new_(&nv25_fb, device, index, pfb);
+ return nvkm_fb_new_(&nv25_fb, device, type, inst, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c
index 8aa782666507..0f87efb636d5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c
@@ -51,7 +51,7 @@ nv30_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
{
u32 tiles = DIV_ROUND_UP(size, 0x40);
u32 tags = round_up(tiles / fb->ram->parts, 0x40);
- if (!nvkm_mm_head(&fb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+ if (!nvkm_mm_head(&fb->tags.mm, 0, 1, tags, tags, 1, &tile->tag)) {
if (flags & 2) tile->zcomp |= 0x01000000; /* Z16 */
else tile->zcomp |= 0x02000000; /* Z24S8 */
tile->zcomp |= ((tile->tag->offset ) >> 6);
@@ -127,7 +127,7 @@ nv30_fb = {
};
int
-nv30_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+nv30_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return nvkm_fb_new_(&nv30_fb, device, index, pfb);
+ return nvkm_fb_new_(&nv30_fb, device, type, inst, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv35.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv35.c
index 6e83dcff72e0..0694dcfd107e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv35.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv35.c
@@ -32,7 +32,7 @@ nv35_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
{
u32 tiles = DIV_ROUND_UP(size, 0x40);
u32 tags = round_up(tiles / fb->ram->parts, 0x40);
- if (!nvkm_mm_head(&fb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+ if (!nvkm_mm_head(&fb->tags.mm, 0, 1, tags, tags, 1, &tile->tag)) {
if (flags & 2) tile->zcomp |= 0x04000000; /* Z16 */
else tile->zcomp |= 0x08000000; /* Z24S8 */
tile->zcomp |= ((tile->tag->offset ) >> 6);
@@ -56,7 +56,7 @@ nv35_fb = {
};
int
-nv35_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+nv35_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return nvkm_fb_new_(&nv35_fb, device, index, pfb);
+ return nvkm_fb_new_(&nv35_fb, device, type, inst, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv36.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv36.c
index 2a07617bb44c..1a39770372f1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv36.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv36.c
@@ -32,7 +32,7 @@ nv36_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
{
u32 tiles = DIV_ROUND_UP(size, 0x40);
u32 tags = round_up(tiles / fb->ram->parts, 0x40);
- if (!nvkm_mm_head(&fb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+ if (!nvkm_mm_head(&fb->tags.mm, 0, 1, tags, tags, 1, &tile->tag)) {
if (flags & 2) tile->zcomp |= 0x10000000; /* Z16 */
else tile->zcomp |= 0x20000000; /* Z24S8 */
tile->zcomp |= ((tile->tag->offset ) >> 6);
@@ -56,7 +56,7 @@ nv36_fb = {
};
int
-nv36_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+nv36_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return nvkm_fb_new_(&nv36_fb, device, index, pfb);
+ return nvkm_fb_new_(&nv36_fb, device, type, inst, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.c
index 955160778b5b..77dbb9d6ba48 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.c
@@ -33,7 +33,7 @@ nv40_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
u32 tiles = DIV_ROUND_UP(size, 0x80);
u32 tags = round_up(tiles / fb->ram->parts, 0x100);
if ( (flags & 2) &&
- !nvkm_mm_head(&fb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+ !nvkm_mm_head(&fb->tags.mm, 0, 1, tags, tags, 1, &tile->tag)) {
tile->zcomp = 0x28000000; /* Z24S8_SPLIT_GRAD */
tile->zcomp |= ((tile->tag->offset ) >> 8);
tile->zcomp |= ((tile->tag->offset + tags - 1) >> 8) << 13;
@@ -62,7 +62,7 @@ nv40_fb = {
};
int
-nv40_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+nv40_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return nvkm_fb_new_(&nv40_fb, device, index, pfb);
+ return nvkm_fb_new_(&nv40_fb, device, type, inst, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv41.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv41.c
index b77f08d34cc3..0f9d9e48e7ad 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv41.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv41.c
@@ -56,7 +56,7 @@ nv41_fb = {
};
int
-nv41_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+nv41_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return nvkm_fb_new_(&nv41_fb, device, index, pfb);
+ return nvkm_fb_new_(&nv41_fb, device, type, inst, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv44.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv44.c
index b59dc486083d..b1046ee9f0ea 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv44.c
@@ -65,7 +65,7 @@ nv44_fb = {
};
int
-nv44_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+nv44_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return nvkm_fb_new_(&nv44_fb, device, index, pfb);
+ return nvkm_fb_new_(&nv44_fb, device, type, inst, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv46.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv46.c
index cab7d20fa039..0d78de422dfa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv46.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv46.c
@@ -51,7 +51,7 @@ nv46_fb = {
};
int
-nv46_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+nv46_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return nvkm_fb_new_(&nv46_fb, device, index, pfb);
+ return nvkm_fb_new_(&nv46_fb, device, type, inst, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv47.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv47.c
index a8b0ad4c871d..5cedde29c8ee 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv47.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv47.c
@@ -39,7 +39,7 @@ nv47_fb = {
};
int
-nv47_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+nv47_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return nvkm_fb_new_(&nv47_fb, device, index, pfb);
+ return nvkm_fb_new_(&nv47_fb, device, type, inst, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv49.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv49.c
index d0b317bb0252..95cc099603d8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv49.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv49.c
@@ -39,7 +39,7 @@ nv49_fb = {
};
int
-nv49_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+nv49_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return nvkm_fb_new_(&nv49_fb, device, index, pfb);
+ return nvkm_fb_new_(&nv49_fb, device, type, inst, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv4e.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv4e.c
index 6a6f0c086071..c9f3148f4e75 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv4e.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv4e.c
@@ -37,7 +37,7 @@ nv4e_fb = {
};
int
-nv4e_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+nv4e_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return nvkm_fb_new_(&nv4e_fb, device, index, pfb);
+ return nvkm_fb_new_(&nv4e_fb, device, type, inst, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c
index b2f5bf8144ea..95fd8f834010 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c
@@ -262,16 +262,15 @@ nv50_fb_ = {
int
nv50_fb_new_(const struct nv50_fb_func *func, struct nvkm_device *device,
- int index, struct nvkm_fb **pfb)
+ enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
struct nv50_fb *fb;
if (!(fb = kzalloc(sizeof(*fb), GFP_KERNEL)))
return -ENOMEM;
- nvkm_fb_ctor(&nv50_fb_, device, index, &fb->base);
+ nvkm_fb_ctor(&nv50_fb_, device, type, inst, &fb->base);
fb->func = func;
*pfb = &fb->base;
-
return 0;
}
@@ -283,7 +282,7 @@ nv50_fb = {
};
int
-nv50_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+nv50_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
- return nv50_fb_new_(&nv50_fb, device, index, pfb);
+ return nv50_fb_new_(&nv50_fb, device, type, inst, pfb);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h
index 5e2b0c9539ed..a5e673859a90 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h
@@ -17,6 +17,6 @@ struct nv50_fb_func {
u32 trap;
};
-int nv50_fb_new_(const struct nv50_fb_func *, struct nvkm_device *, int index,
+int nv50_fb_new_(const struct nv50_fb_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
struct nvkm_fb **pfb);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
index 66932ac10d15..3f1be9780c65 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
@@ -38,9 +38,9 @@ struct nvkm_fb_func {
};
void nvkm_fb_ctor(const struct nvkm_fb_func *, struct nvkm_device *device,
- int index, struct nvkm_fb *);
+ enum nvkm_subdev_type type, int inst, struct nvkm_fb *);
int nvkm_fb_new_(const struct nvkm_fb_func *, struct nvkm_device *device,
- int index, struct nvkm_fb **);
+ enum nvkm_subdev_type type, int inst, struct nvkm_fb **);
int nvkm_fb_bios_memtype(struct nvkm_bios *);
void nv10_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
@@ -78,7 +78,7 @@ int gm200_fb_init_page(struct nvkm_fb *);
void gp100_fb_init_remapper(struct nvkm_fb *);
void gp100_fb_init_unkn(struct nvkm_fb *);
-int gp102_fb_new_(const struct nvkm_fb_func *, struct nvkm_device *, int,
+int gp102_fb_new_(const struct nvkm_fb_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
struct nvkm_fb **);
bool gp102_fb_vpr_scrub_required(struct nvkm_fb *);
int gp102_fb_vpr_scrub(struct nvkm_fb *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c
index b11867f682cb..03b1bdb27770 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c
@@ -81,12 +81,12 @@ nvkm_vram_dtor(struct nvkm_memory *memory)
struct nvkm_vram *vram = nvkm_vram(memory);
struct nvkm_mm_node *next = vram->mn;
struct nvkm_mm_node *node;
- mutex_lock(&vram->ram->fb->subdev.mutex);
+ mutex_lock(&vram->ram->mutex);
while ((node = next)) {
next = node->next;
nvkm_mm_free(&vram->ram->vram, &node);
}
- mutex_unlock(&vram->ram->fb->subdev.mutex);
+ mutex_unlock(&vram->ram->mutex);
return vram;
}
@@ -126,7 +126,7 @@ nvkm_ram_get(struct nvkm_device *device, u8 heap, u8 type, u8 rpage, u64 size,
vram->page = page;
*pmemory = &vram->memory;
- mutex_lock(&ram->fb->subdev.mutex);
+ mutex_lock(&ram->mutex);
node = &vram->mn;
do {
if (back)
@@ -134,7 +134,7 @@ nvkm_ram_get(struct nvkm_device *device, u8 heap, u8 type, u8 rpage, u64 size,
else
ret = nvkm_mm_head(mm, heap, type, max, min, align, &r);
if (ret) {
- mutex_unlock(&ram->fb->subdev.mutex);
+ mutex_unlock(&ram->mutex);
nvkm_memory_unref(pmemory);
return ret;
}
@@ -143,7 +143,7 @@ nvkm_ram_get(struct nvkm_device *device, u8 heap, u8 type, u8 rpage, u64 size,
node = &r->next;
max -= r->length;
} while (max);
- mutex_unlock(&ram->fb->subdev.mutex);
+ mutex_unlock(&ram->mutex);
return 0;
}
@@ -163,6 +163,7 @@ nvkm_ram_del(struct nvkm_ram **pram)
if (ram->func->dtor)
*pram = ram->func->dtor(ram);
nvkm_mm_fini(&ram->vram);
+ mutex_destroy(&ram->mutex);
kfree(*pram);
*pram = NULL;
}
@@ -196,6 +197,7 @@ nvkm_ram_ctor(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
ram->fb = fb;
ram->type = type;
ram->size = size;
+ mutex_init(&ram->mutex);
if (!nvkm_mm_initialised(&ram->vram)) {
ret = nvkm_mm_init(&ram->vram, NVKM_RAM_MM_NORMAL, 0,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
index d350d92852d2..2b678b60b4d3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
@@ -260,7 +260,7 @@ gk104_ram_calc_gddr5(struct gk104_ram *ram, u32 freq)
ram_mask(fuc, 0x10f808, 0x40000000, 0x40000000);
ram_block(fuc);
- if (nvkm_device_engine(ram->base.fb->subdev.device, NVKM_ENGINE_DISP))
+ if (ram->base.fb->subdev.device->disp)
ram_wr32(fuc, 0x62c000, 0x0f0f0000);
/* MR1: turn termination on early, for some reason.. */
@@ -661,7 +661,7 @@ gk104_ram_calc_gddr5(struct gk104_ram *ram, u32 freq)
ram_unblock(fuc);
- if (nvkm_device_engine(ram->base.fb->subdev.device, NVKM_ENGINE_DISP))
+ if (ram->base.fb->subdev.device->disp)
ram_wr32(fuc, 0x62c000, 0x0f0f0f00);
if (next->bios.rammap_11_08_01)
@@ -711,7 +711,7 @@ gk104_ram_calc_sddr3(struct gk104_ram *ram, u32 freq)
ram_mask(fuc, 0x10f808, 0x40000000, 0x40000000);
ram_block(fuc);
- if (nvkm_device_engine(ram->base.fb->subdev.device, NVKM_ENGINE_DISP))
+ if (ram->base.fb->subdev.device->disp)
ram_wr32(fuc, 0x62c000, 0x0f0f0000);
if (vc == 1 && ram_have(fuc, gpio2E)) {
@@ -943,7 +943,7 @@ gk104_ram_calc_sddr3(struct gk104_ram *ram, u32 freq)
ram_unblock(fuc);
- if (nvkm_device_engine(ram->base.fb->subdev.device, NVKM_ENGINE_DISP))
+ if (ram->base.fb->subdev.device->disp)
ram_wr32(fuc, 0x62c000, 0x0f0f0f00);
if (next->bios.rammap_11_08_01)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/base.c
index 1c3c18ea8ced..375dfce09f84 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/base.c
@@ -42,12 +42,12 @@ nvkm_fuse = {
int
nvkm_fuse_new_(const struct nvkm_fuse_func *func, struct nvkm_device *device,
- int index, struct nvkm_fuse **pfuse)
+ enum nvkm_subdev_type type, int inst, struct nvkm_fuse **pfuse)
{
struct nvkm_fuse *fuse;
if (!(fuse = *pfuse = kzalloc(sizeof(*fuse), GFP_KERNEL)))
return -ENOMEM;
- nvkm_subdev_ctor(&nvkm_fuse, device, index, &fuse->subdev);
+ nvkm_subdev_ctor(&nvkm_fuse, device, type, inst, &fuse->subdev);
fuse->func = func;
spin_lock_init(&fuse->lock);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gf100.c
index 13671fedc805..01f770654b1d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gf100.c
@@ -47,7 +47,8 @@ gf100_fuse = {
};
int
-gf100_fuse_new(struct nvkm_device *device, int index, struct nvkm_fuse **pfuse)
+gf100_fuse_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fuse **pfuse)
{
- return nvkm_fuse_new_(&gf100_fuse, device, index, pfuse);
+ return nvkm_fuse_new_(&gf100_fuse, device, type, inst, pfuse);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c
index 9aff4ea04506..7dc99492f536 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c
@@ -36,7 +36,8 @@ gm107_fuse = {
};
int
-gm107_fuse_new(struct nvkm_device *device, int index, struct nvkm_fuse **pfuse)
+gm107_fuse_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fuse **pfuse)
{
- return nvkm_fuse_new_(&gm107_fuse, device, index, pfuse);
+ return nvkm_fuse_new_(&gm107_fuse, device, type, inst, pfuse);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/nv50.c
index 514c193db25d..2505e8e1c1d3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/nv50.c
@@ -45,7 +45,8 @@ nv50_fuse = {
};
int
-nv50_fuse_new(struct nvkm_device *device, int index, struct nvkm_fuse **pfuse)
+nv50_fuse_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fuse **pfuse)
{
- return nvkm_fuse_new_(&nv50_fuse, device, index, pfuse);
+ return nvkm_fuse_new_(&nv50_fuse, device, type, inst, pfuse);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/priv.h
index 2edc612408dd..e83d0c30dff6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/priv.h
@@ -8,6 +8,6 @@ struct nvkm_fuse_func {
u32 (*read)(struct nvkm_fuse *, u32 addr);
};
-int nvkm_fuse_new_(const struct nvkm_fuse_func *, struct nvkm_device *,
- int index, struct nvkm_fuse **);
+int nvkm_fuse_new_(const struct nvkm_fuse_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ struct nvkm_fuse **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c
index 914276410ef8..048bcc70c3f4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c
@@ -241,14 +241,14 @@ nvkm_gpio = {
int
nvkm_gpio_new_(const struct nvkm_gpio_func *func, struct nvkm_device *device,
- int index, struct nvkm_gpio **pgpio)
+ enum nvkm_subdev_type type, int inst, struct nvkm_gpio **pgpio)
{
struct nvkm_gpio *gpio;
if (!(gpio = *pgpio = kzalloc(sizeof(*gpio), GFP_KERNEL)))
return -ENOMEM;
- nvkm_subdev_ctor(&nvkm_gpio, device, index, &gpio->subdev);
+ nvkm_subdev_ctor(&nvkm_gpio, device, type, inst, &gpio->subdev);
gpio->func = func;
return nvkm_event_init(&nvkm_gpio_intr_func, 2, func->lines,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/g94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/g94.c
index 6dcda55fb865..114728ccdf8e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/g94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/g94.c
@@ -68,7 +68,8 @@ g94_gpio = {
};
int
-g94_gpio_new(struct nvkm_device *device, int index, struct nvkm_gpio **pgpio)
+g94_gpio_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_gpio **pgpio)
{
- return nvkm_gpio_new_(&g94_gpio, device, index, pgpio);
+ return nvkm_gpio_new_(&g94_gpio, device, type, inst, pgpio);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/ga102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/ga102.c
index 62c791baf400..4a96f926b66d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/ga102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/ga102.c
@@ -112,7 +112,8 @@ ga102_gpio = {
};
int
-ga102_gpio_new(struct nvkm_device *device, int index, struct nvkm_gpio **pgpio)
+ga102_gpio_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_gpio **pgpio)
{
- return nvkm_gpio_new_(&ga102_gpio, device, index, pgpio);
+ return nvkm_gpio_new_(&ga102_gpio, device, type, inst, pgpio);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gf119.c
index bb7400dfaef8..ecb19e4f5c48 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gf119.c
@@ -80,7 +80,8 @@ gf119_gpio = {
};
int
-gf119_gpio_new(struct nvkm_device *device, int index, struct nvkm_gpio **pgpio)
+gf119_gpio_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_gpio **pgpio)
{
- return nvkm_gpio_new_(&gf119_gpio, device, index, pgpio);
+ return nvkm_gpio_new_(&gf119_gpio, device, type, inst, pgpio);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c
index 2ead515b8530..c0e4cdb45520 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c
@@ -68,7 +68,8 @@ gk104_gpio = {
};
int
-gk104_gpio_new(struct nvkm_device *device, int index, struct nvkm_gpio **pgpio)
+gk104_gpio_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_gpio **pgpio)
{
- return nvkm_gpio_new_(&gk104_gpio, device, index, pgpio);
+ return nvkm_gpio_new_(&gk104_gpio, device, type, inst, pgpio);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv10.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv10.c
index ae3499b48330..48ad29b5638f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv10.c
@@ -112,7 +112,8 @@ nv10_gpio = {
};
int
-nv10_gpio_new(struct nvkm_device *device, int index, struct nvkm_gpio **pgpio)
+nv10_gpio_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_gpio **pgpio)
{
- return nvkm_gpio_new_(&nv10_gpio, device, index, pgpio);
+ return nvkm_gpio_new_(&nv10_gpio, device, type, inst, pgpio);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv50.c
index 73923fd5f7f2..b86c49762f11 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv50.c
@@ -126,7 +126,8 @@ nv50_gpio = {
};
int
-nv50_gpio_new(struct nvkm_device *device, int index, struct nvkm_gpio **pgpio)
+nv50_gpio_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_gpio **pgpio)
{
- return nvkm_gpio_new_(&nv50_gpio, device, index, pgpio);
+ return nvkm_gpio_new_(&nv50_gpio, device, type, inst, pgpio);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/priv.h
index 59e39affe2a0..6590d81164e7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/priv.h
@@ -28,8 +28,8 @@ struct nvkm_gpio_func {
void (*reset)(struct nvkm_gpio *, u8);
};
-int nvkm_gpio_new_(const struct nvkm_gpio_func *, struct nvkm_device *,
- int index, struct nvkm_gpio **);
+int nvkm_gpio_new_(const struct nvkm_gpio_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ struct nvkm_gpio **);
void nv50_gpio_reset(struct nvkm_gpio *, u8);
int nv50_gpio_drive(struct nvkm_gpio *, int, int, int);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c
index 5a32df0f9992..22574886b819 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c
@@ -40,20 +40,18 @@ nvkm_gsp = {
int
nvkm_gsp_new_(const struct nvkm_gsp_fwif *fwif, struct nvkm_device *device,
- int index, struct nvkm_gsp **pgsp)
+ enum nvkm_subdev_type type, int inst, struct nvkm_gsp **pgsp)
{
struct nvkm_gsp *gsp;
if (!(gsp = *pgsp = kzalloc(sizeof(*gsp), GFP_KERNEL)))
return -ENOMEM;
- nvkm_subdev_ctor(&nvkm_gsp, device, index, &gsp->subdev);
+ nvkm_subdev_ctor(&nvkm_gsp, device, type, inst, &gsp->subdev);
fwif = nvkm_firmware_load(&gsp->subdev, fwif, "Gsp", gsp);
if (IS_ERR(fwif))
return PTR_ERR(fwif);
- return nvkm_falcon_ctor(fwif->flcn, &gsp->subdev,
- nvkm_subdev_name[gsp->subdev.index], 0,
- &gsp->falcon);
+ return nvkm_falcon_ctor(fwif->flcn, &gsp->subdev, gsp->subdev.name, 0, &gsp->falcon);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c
index 2114f9b00a28..2ac7fc934c09 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c
@@ -49,7 +49,8 @@ gv100_gsp[] = {
};
int
-gv100_gsp_new(struct nvkm_device *device, int index, struct nvkm_gsp **pgsp)
+gv100_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_gsp **pgsp)
{
- return nvkm_gsp_new_(gv100_gsp, device, index, pgsp);
+ return nvkm_gsp_new_(gv100_gsp, device, type, inst, pgsp);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h
index 92820fb997c1..19381ddd38d4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h
@@ -10,6 +10,6 @@ struct nvkm_gsp_fwif {
const struct nvkm_falcon_func *flcn;
};
-int nvkm_gsp_new_(const struct nvkm_gsp_fwif *, struct nvkm_device *, int,
+int nvkm_gsp_new_(const struct nvkm_gsp_fwif *, struct nvkm_device *, enum nvkm_subdev_type, int,
struct nvkm_gsp **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
index 719345074711..cb5cb533d91c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
@@ -277,7 +277,7 @@ nvkm_i2c_drv[] = {
int
nvkm_i2c_new_(const struct nvkm_i2c_func *func, struct nvkm_device *device,
- int index, struct nvkm_i2c **pi2c)
+ enum nvkm_subdev_type type, int inst, struct nvkm_i2c **pi2c)
{
struct nvkm_bios *bios = device->bios;
struct nvkm_i2c *i2c;
@@ -289,7 +289,7 @@ nvkm_i2c_new_(const struct nvkm_i2c_func *func, struct nvkm_device *device,
if (!(i2c = *pi2c = kzalloc(sizeof(*i2c), GFP_KERNEL)))
return -ENOMEM;
- nvkm_subdev_ctor(&nvkm_i2c, device, index, &i2c->subdev);
+ nvkm_subdev_ctor(&nvkm_i2c, device, type, inst, &i2c->subdev);
i2c->func = func;
INIT_LIST_HEAD(&i2c->pad);
INIT_LIST_HEAD(&i2c->bus);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/g94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/g94.c
index bb2a31d88161..e5bad085c06f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/g94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/g94.c
@@ -66,7 +66,8 @@ g94_i2c = {
};
int
-g94_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
+g94_i2c_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_i2c **pi2c)
{
- return nvkm_i2c_new_(&g94_i2c, device, index, pi2c);
+ return nvkm_i2c_new_(&g94_i2c, device, type, inst, pi2c);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf117.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf117.c
index ae4aad3fcd2e..cda30ee6767d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf117.c
@@ -30,7 +30,8 @@ gf117_i2c = {
};
int
-gf117_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
+gf117_i2c_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_i2c **pi2c)
{
- return nvkm_i2c_new_(&gf117_i2c, device, index, pi2c);
+ return nvkm_i2c_new_(&gf117_i2c, device, type, inst, pi2c);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf119.c
index 6f2b02af42c8..e9c6a6cca09d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf119.c
@@ -34,7 +34,8 @@ gf119_i2c = {
};
int
-gf119_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
+gf119_i2c_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_i2c **pi2c)
{
- return nvkm_i2c_new_(&gf119_i2c, device, index, pi2c);
+ return nvkm_i2c_new_(&gf119_i2c, device, type, inst, pi2c);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk104.c
index f9f6bf4b66c9..d35aa6fe3015 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk104.c
@@ -66,7 +66,8 @@ gk104_i2c = {
};
int
-gk104_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
+gk104_i2c_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_i2c **pi2c)
{
- return nvkm_i2c_new_(&gk104_i2c, device, index, pi2c);
+ return nvkm_i2c_new_(&gk104_i2c, device, type, inst, pi2c);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk110.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk110.c
index 8e3bfa1af52a..9fec6af56e07 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk110.c
@@ -39,7 +39,8 @@ gk110_i2c = {
};
int
-gk110_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
+gk110_i2c_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_i2c **pi2c)
{
- return nvkm_i2c_new_(&gk110_i2c, device, index, pi2c);
+ return nvkm_i2c_new_(&gk110_i2c, device, type, inst, pi2c);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm200.c
index 7b2375bff8a9..46917eb600f9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm200.c
@@ -41,7 +41,8 @@ gm200_i2c = {
};
int
-gm200_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
+gm200_i2c_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_i2c **pi2c)
{
- return nvkm_i2c_new_(&gm200_i2c, device, index, pi2c);
+ return nvkm_i2c_new_(&gm200_i2c, device, type, inst, pi2c);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv04.c
index 18776f49355c..ecfcf147c789 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv04.c
@@ -30,7 +30,8 @@ nv04_i2c = {
};
int
-nv04_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
+nv04_i2c_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_i2c **pi2c)
{
- return nvkm_i2c_new_(&nv04_i2c, device, index, pi2c);
+ return nvkm_i2c_new_(&nv04_i2c, device, type, inst, pi2c);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv4e.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv4e.c
index 6b762f7cee9e..ad1d3fd2bcbc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv4e.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv4e.c
@@ -30,7 +30,8 @@ nv4e_i2c = {
};
int
-nv4e_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
+nv4e_i2c_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_i2c **pi2c)
{
- return nvkm_i2c_new_(&nv4e_i2c, device, index, pi2c);
+ return nvkm_i2c_new_(&nv4e_i2c, device, type, inst, pi2c);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.c
index 75640ab97d6a..2f94bed2c056 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.c
@@ -30,7 +30,8 @@ nv50_i2c = {
};
int
-nv50_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
+nv50_i2c_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_i2c **pi2c)
{
- return nvkm_i2c_new_(&nv50_i2c, device, index, pi2c);
+ return nvkm_i2c_new_(&nv50_i2c, device, type, inst, pi2c);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h
index e35f6036fcfc..f9d79f72f7e7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h
@@ -4,8 +4,8 @@
#define nvkm_i2c(p) container_of((p), struct nvkm_i2c, subdev)
#include <subdev/i2c.h>
-int nvkm_i2c_new_(const struct nvkm_i2c_func *, struct nvkm_device *,
- int index, struct nvkm_i2c **);
+int nvkm_i2c_new_(const struct nvkm_i2c_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ struct nvkm_i2c **);
struct nvkm_i2c_func {
int (*pad_x_new)(struct nvkm_i2c *, int id, struct nvkm_i2c_pad **);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/Kbuild
deleted file mode 100644
index 127efb51f67d..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/Kbuild
+++ /dev/null
@@ -1,7 +0,0 @@
-# SPDX-License-Identifier: MIT
-nvkm-y += nvkm/subdev/ibus/gf100.o
-nvkm-y += nvkm/subdev/ibus/gf117.o
-nvkm-y += nvkm/subdev/ibus/gk104.o
-nvkm-y += nvkm/subdev/ibus/gk20a.o
-nvkm-y += nvkm/subdev/ibus/gm200.o
-nvkm-y += nvkm/subdev/ibus/gp10b.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/priv.h
deleted file mode 100644
index 302d69e384d8..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/priv.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-#ifndef __NVKM_IBUS_PRIV_H__
-#define __NVKM_IBUS_PRIV_H__
-
-#include <subdev/ibus.h>
-
-void gf100_ibus_intr(struct nvkm_subdev *);
-void gk104_ibus_intr(struct nvkm_subdev *);
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c
index fecfa6afcf54..8f0ccd3664eb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c
@@ -312,20 +312,20 @@ iccsense_func = {
};
void
-nvkm_iccsense_ctor(struct nvkm_device *device, int index,
+nvkm_iccsense_ctor(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_iccsense *iccsense)
{
- nvkm_subdev_ctor(&iccsense_func, device, index, &iccsense->subdev);
+ nvkm_subdev_ctor(&iccsense_func, device, type, inst, &iccsense->subdev);
}
int
-nvkm_iccsense_new_(struct nvkm_device *device, int index,
+nvkm_iccsense_new_(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_iccsense **iccsense)
{
if (!(*iccsense = kzalloc(sizeof(**iccsense), GFP_KERNEL)))
return -ENOMEM;
INIT_LIST_HEAD(&(*iccsense)->sensors);
INIT_LIST_HEAD(&(*iccsense)->rails);
- nvkm_iccsense_ctor(device, index, *iccsense);
+ nvkm_iccsense_ctor(device, type, inst, *iccsense);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/gf100.c
index cccff1c8a409..3eabf4944395 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/gf100.c
@@ -24,8 +24,8 @@
#include "priv.h"
int
-gf100_iccsense_new(struct nvkm_device *device, int index,
+gf100_iccsense_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_iccsense **piccsense)
{
- return nvkm_iccsense_new_(device, index, piccsense);
+ return nvkm_iccsense_new_(device, type, inst, piccsense);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h
index cc09c6c504af..c33441124241 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h
@@ -22,6 +22,6 @@ struct nvkm_iccsense_rail {
u8 mohm;
};
-void nvkm_iccsense_ctor(struct nvkm_device *, int, struct nvkm_iccsense *);
-int nvkm_iccsense_new_(struct nvkm_device *, int, struct nvkm_iccsense **);
+void nvkm_iccsense_ctor(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_iccsense *);
+int nvkm_iccsense_new_(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_iccsense **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
index 364ea4492acc..cd8163a52bb6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
@@ -218,9 +218,11 @@ static void *
nvkm_instmem_dtor(struct nvkm_subdev *subdev)
{
struct nvkm_instmem *imem = nvkm_instmem(subdev);
+ void *data = imem;
if (imem->func->dtor)
- return imem->func->dtor(imem);
- return imem;
+ data = imem->func->dtor(imem);
+ mutex_destroy(&imem->mutex);
+ return data;
}
static const struct nvkm_subdev_func
@@ -232,13 +234,13 @@ nvkm_instmem = {
};
void
-nvkm_instmem_ctor(const struct nvkm_instmem_func *func,
- struct nvkm_device *device, int index,
- struct nvkm_instmem *imem)
+nvkm_instmem_ctor(const struct nvkm_instmem_func *func, struct nvkm_device *device,
+ enum nvkm_subdev_type type, int inst, struct nvkm_instmem *imem)
{
- nvkm_subdev_ctor(&nvkm_instmem, device, index, &imem->subdev);
+ nvkm_subdev_ctor(&nvkm_instmem, device, type, inst, &imem->subdev);
imem->func = func;
spin_lock_init(&imem->lock);
INIT_LIST_HEAD(&imem->list);
INIT_LIST_HEAD(&imem->boot);
+ mutex_init(&imem->mutex);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
index 13d4d7ac0697..648ecf5a8fbc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
@@ -568,7 +568,7 @@ gk20a_instmem = {
};
int
-gk20a_instmem_new(struct nvkm_device *device, int index,
+gk20a_instmem_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_instmem **pimem)
{
struct nvkm_device_tegra *tdev = device->func->tegra(device);
@@ -576,7 +576,7 @@ gk20a_instmem_new(struct nvkm_device *device, int index,
if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
return -ENOMEM;
- nvkm_instmem_ctor(&gk20a_instmem, device, index, &imem->base);
+ nvkm_instmem_ctor(&gk20a_instmem, device, type, inst, &imem->base);
mutex_init(&imem->lock);
*pimem = &imem->base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
index 6bf0dad46919..25603b01d6f8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
@@ -99,9 +99,9 @@ static void *
nv04_instobj_dtor(struct nvkm_memory *memory)
{
struct nv04_instobj *iobj = nv04_instobj(memory);
- mutex_lock(&iobj->imem->base.subdev.mutex);
+ mutex_lock(&iobj->imem->base.mutex);
nvkm_mm_free(&iobj->imem->heap, &iobj->node);
- mutex_unlock(&iobj->imem->base.subdev.mutex);
+ mutex_unlock(&iobj->imem->base.mutex);
nvkm_instobj_dtor(&iobj->imem->base, &iobj->base);
return iobj;
}
@@ -132,10 +132,9 @@ nv04_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
iobj->base.memory.ptrs = &nv04_instobj_ptrs;
iobj->imem = imem;
- mutex_lock(&imem->base.subdev.mutex);
- ret = nvkm_mm_head(&imem->heap, 0, 1, size, size,
- align ? align : 1, &iobj->node);
- mutex_unlock(&imem->base.subdev.mutex);
+ mutex_lock(&imem->base.mutex);
+ ret = nvkm_mm_head(&imem->heap, 0, 1, size, size, align ? align : 1, &iobj->node);
+ mutex_unlock(&imem->base.mutex);
return ret;
}
@@ -218,14 +217,14 @@ nv04_instmem = {
};
int
-nv04_instmem_new(struct nvkm_device *device, int index,
+nv04_instmem_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_instmem **pimem)
{
struct nv04_instmem *imem;
if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
return -ENOMEM;
- nvkm_instmem_ctor(&nv04_instmem, device, index, &imem->base);
+ nvkm_instmem_ctor(&nv04_instmem, device, type, inst, &imem->base);
*pimem = &imem->base;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c
index 086c118488ef..6b462f960922 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c
@@ -99,9 +99,9 @@ static void *
nv40_instobj_dtor(struct nvkm_memory *memory)
{
struct nv40_instobj *iobj = nv40_instobj(memory);
- mutex_lock(&iobj->imem->base.subdev.mutex);
+ mutex_lock(&iobj->imem->base.mutex);
nvkm_mm_free(&iobj->imem->heap, &iobj->node);
- mutex_unlock(&iobj->imem->base.subdev.mutex);
+ mutex_unlock(&iobj->imem->base.mutex);
nvkm_instobj_dtor(&iobj->imem->base, &iobj->base);
return iobj;
}
@@ -132,10 +132,9 @@ nv40_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
iobj->base.memory.ptrs = &nv40_instobj_ptrs;
iobj->imem = imem;
- mutex_lock(&imem->base.subdev.mutex);
- ret = nvkm_mm_head(&imem->heap, 0, 1, size, size,
- align ? align : 1, &iobj->node);
- mutex_unlock(&imem->base.subdev.mutex);
+ mutex_lock(&imem->base.mutex);
+ ret = nvkm_mm_head(&imem->heap, 0, 1, size, size, align ? align : 1, &iobj->node);
+ mutex_unlock(&imem->base.mutex);
return ret;
}
@@ -236,7 +235,7 @@ nv40_instmem = {
};
int
-nv40_instmem_new(struct nvkm_device *device, int index,
+nv40_instmem_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_instmem **pimem)
{
struct nv40_instmem *imem;
@@ -244,7 +243,7 @@ nv40_instmem_new(struct nvkm_device *device, int index,
if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
return -ENOMEM;
- nvkm_instmem_ctor(&nv40_instmem, device, index, &imem->base);
+ nvkm_instmem_ctor(&nv40_instmem, device, type, inst, &imem->base);
*pimem = &imem->base;
/* map bar */
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
index 02c4eb28cef4..96aca0edfa3c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
@@ -133,12 +133,12 @@ nv50_instobj_kmap(struct nv50_instobj *iobj, struct nvkm_vmm *vmm)
* into it. The lock has to be dropped while doing this due
* to the possibility of recursion for page table allocation.
*/
- mutex_unlock(&subdev->mutex);
+ mutex_unlock(&imem->base.mutex);
while ((ret = nvkm_vmm_get(vmm, 12, size, &bar))) {
/* Evict unused mappings, and keep retrying until we either
* succeed,or there's no more objects left on the LRU.
*/
- mutex_lock(&subdev->mutex);
+ mutex_lock(&imem->base.mutex);
eobj = list_first_entry_or_null(&imem->lru, typeof(*eobj), lru);
if (eobj) {
nvkm_debug(subdev, "evict %016llx %016llx @ %016llx\n",
@@ -151,7 +151,7 @@ nv50_instobj_kmap(struct nv50_instobj *iobj, struct nvkm_vmm *vmm)
emap = eobj->map;
eobj->map = NULL;
}
- mutex_unlock(&subdev->mutex);
+ mutex_unlock(&imem->base.mutex);
if (!eobj)
break;
iounmap(emap);
@@ -160,12 +160,12 @@ nv50_instobj_kmap(struct nv50_instobj *iobj, struct nvkm_vmm *vmm)
if (ret == 0)
ret = nvkm_memory_map(memory, 0, vmm, bar, NULL, 0);
- mutex_lock(&subdev->mutex);
+ mutex_lock(&imem->base.mutex);
if (ret || iobj->bar) {
/* We either failed, or another thread beat us. */
- mutex_unlock(&subdev->mutex);
+ mutex_unlock(&imem->base.mutex);
nvkm_vmm_put(vmm, &bar);
- mutex_lock(&subdev->mutex);
+ mutex_lock(&imem->base.mutex);
return;
}
@@ -197,7 +197,7 @@ nv50_instobj_release(struct nvkm_memory *memory)
wmb();
nvkm_bar_flush(subdev->device->bar);
- if (refcount_dec_and_mutex_lock(&iobj->maps, &subdev->mutex)) {
+ if (refcount_dec_and_mutex_lock(&iobj->maps, &imem->base.mutex)) {
/* Add the now-unused mapping to the LRU instead of directly
* unmapping it here, in case we need to map it again later.
*/
@@ -208,7 +208,7 @@ nv50_instobj_release(struct nvkm_memory *memory)
/* Switch back to NULL accessors when last map is gone. */
iobj->base.memory.ptrs = NULL;
- mutex_unlock(&subdev->mutex);
+ mutex_unlock(&imem->base.mutex);
}
}
@@ -227,9 +227,9 @@ nv50_instobj_acquire(struct nvkm_memory *memory)
/* Take the lock, and re-check that another thread hasn't
* already mapped the object in the meantime.
*/
- mutex_lock(&imem->subdev.mutex);
+ mutex_lock(&imem->mutex);
if (refcount_inc_not_zero(&iobj->maps)) {
- mutex_unlock(&imem->subdev.mutex);
+ mutex_unlock(&imem->mutex);
return iobj->map;
}
@@ -252,7 +252,7 @@ nv50_instobj_acquire(struct nvkm_memory *memory)
refcount_set(&iobj->maps, 1);
}
- mutex_unlock(&imem->subdev.mutex);
+ mutex_unlock(&imem->mutex);
return map;
}
@@ -265,7 +265,7 @@ nv50_instobj_boot(struct nvkm_memory *memory, struct nvkm_vmm *vmm)
/* Exclude bootstrapped objects (ie. the page tables for the
* instmem BAR itself) from eviction.
*/
- mutex_lock(&imem->subdev.mutex);
+ mutex_lock(&imem->mutex);
if (likely(iobj->lru.next)) {
list_del_init(&iobj->lru);
iobj->lru.next = NULL;
@@ -273,7 +273,7 @@ nv50_instobj_boot(struct nvkm_memory *memory, struct nvkm_vmm *vmm)
nv50_instobj_kmap(iobj, vmm);
nvkm_instmem_boot(imem);
- mutex_unlock(&imem->subdev.mutex);
+ mutex_unlock(&imem->mutex);
}
static u64
@@ -315,12 +315,12 @@ nv50_instobj_dtor(struct nvkm_memory *memory)
struct nvkm_vma *bar;
void *map = map;
- mutex_lock(&imem->subdev.mutex);
+ mutex_lock(&imem->mutex);
if (likely(iobj->lru.next))
list_del(&iobj->lru);
map = iobj->map;
bar = iobj->bar;
- mutex_unlock(&imem->subdev.mutex);
+ mutex_unlock(&imem->mutex);
if (map) {
struct nvkm_vmm *vmm = nvkm_bar_bar2_vmm(imem->subdev.device);
@@ -386,14 +386,14 @@ nv50_instmem = {
};
int
-nv50_instmem_new(struct nvkm_device *device, int index,
+nv50_instmem_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_instmem **pimem)
{
struct nv50_instmem *imem;
if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
return -ENOMEM;
- nvkm_instmem_ctor(&nv50_instmem, device, index, &imem->base);
+ nvkm_instmem_ctor(&nv50_instmem, device, type, inst, &imem->base);
INIT_LIST_HEAD(&imem->lru);
*pimem = &imem->base;
return 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h
index f5da8fcbdde3..56c15e30a5dd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h
@@ -16,7 +16,7 @@ struct nvkm_instmem_func {
};
void nvkm_instmem_ctor(const struct nvkm_instmem_func *, struct nvkm_device *,
- int index, struct nvkm_instmem *);
+ enum nvkm_subdev_type, int, struct nvkm_instmem *);
void nvkm_instmem_boot(struct nvkm_instmem *);
#include <core/memory.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c
index 23242179e600..fa683c190795 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c
@@ -33,10 +33,10 @@ nvkm_ltc_tags_clear(struct nvkm_device *device, u32 first, u32 count)
BUG_ON((first > limit) || (limit >= ltc->num_tags));
- mutex_lock(&ltc->subdev.mutex);
+ mutex_lock(&ltc->mutex);
ltc->func->cbc_clear(ltc, first, limit);
ltc->func->cbc_wait(ltc);
- mutex_unlock(&ltc->subdev.mutex);
+ mutex_unlock(&ltc->mutex);
}
int
@@ -113,6 +113,7 @@ nvkm_ltc_dtor(struct nvkm_subdev *subdev)
{
struct nvkm_ltc *ltc = nvkm_ltc(subdev);
nvkm_memory_unref(&ltc->tag_ram);
+ mutex_destroy(&ltc->mutex);
return ltc;
}
@@ -126,15 +127,16 @@ nvkm_ltc = {
int
nvkm_ltc_new_(const struct nvkm_ltc_func *func, struct nvkm_device *device,
- int index, struct nvkm_ltc **pltc)
+ enum nvkm_subdev_type type, int inst, struct nvkm_ltc **pltc)
{
struct nvkm_ltc *ltc;
if (!(ltc = *pltc = kzalloc(sizeof(*ltc), GFP_KERNEL)))
return -ENOMEM;
- nvkm_subdev_ctor(&nvkm_ltc, device, index, &ltc->subdev);
+ nvkm_subdev_ctor(&nvkm_ltc, device, type, inst, &ltc->subdev);
ltc->func = func;
+ mutex_init(&ltc->mutex);
ltc->zbc_min = 1; /* reserve 0 for disabled */
ltc->zbc_max = min(func->zbc, NVKM_LTC_MAX_ZBC_CNT) - 1;
return 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c
index a21ef45b8572..fd8aeafc812d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c
@@ -200,8 +200,8 @@ gf100_ltc_oneinit_tag_ram(struct nvkm_ltc *ltc)
}
mm_init:
- nvkm_mm_fini(&fb->tags);
- return nvkm_mm_init(&fb->tags, 0, 0, ltc->num_tags, 1);
+ nvkm_mm_fini(&fb->tags.mm);
+ return nvkm_mm_init(&fb->tags.mm, 0, 0, ltc->num_tags, 1);
}
int
@@ -249,7 +249,8 @@ gf100_ltc = {
};
int
-gf100_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc)
+gf100_ltc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_ltc **pltc)
{
- return nvkm_ltc_new_(&gf100_ltc, device, index, pltc);
+ return nvkm_ltc_new_(&gf100_ltc, device, type, inst, pltc);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gk104.c
index b4f6e0034d58..94aa09244d67 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gk104.c
@@ -50,7 +50,8 @@ gk104_ltc = {
};
int
-gk104_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc)
+gk104_ltc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_ltc **pltc)
{
- return nvkm_ltc_new_(&gk104_ltc, device, index, pltc);
+ return nvkm_ltc_new_(&gk104_ltc, device, type, inst, pltc);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
index ec0a3844b2d1..54d1d65d5a85 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
@@ -145,7 +145,8 @@ gm107_ltc = {
};
int
-gm107_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc)
+gm107_ltc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_ltc **pltc)
{
- return nvkm_ltc_new_(&gm107_ltc, device, index, pltc);
+ return nvkm_ltc_new_(&gm107_ltc, device, type, inst, pltc);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c
index e18e0dc19ec8..8cfdbbdd8e8d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c
@@ -57,7 +57,8 @@ gm200_ltc = {
};
int
-gm200_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc)
+gm200_ltc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_ltc **pltc)
{
- return nvkm_ltc_new_(&gm200_ltc, device, index, pltc);
+ return nvkm_ltc_new_(&gm200_ltc, device, type, inst, pltc);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c
index e923ed76d37a..a4a6cd9b435a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c
@@ -69,7 +69,8 @@ gp100_ltc = {
};
int
-gp100_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc)
+gp100_ltc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_ltc **pltc)
{
- return nvkm_ltc_new_(&gp100_ltc, device, index, pltc);
+ return nvkm_ltc_new_(&gp100_ltc, device, type, inst, pltc);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp102.c
index 601747ada655..ff05d617e7f4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp102.c
@@ -45,7 +45,8 @@ gp102_ltc = {
};
int
-gp102_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc)
+gp102_ltc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_ltc **pltc)
{
- return nvkm_ltc_new_(&gp102_ltc, device, index, pltc);
+ return nvkm_ltc_new_(&gp102_ltc, device, type, inst, pltc);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp10b.c
index c0063c7caa50..dfebd796cb4b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp10b.c
@@ -59,7 +59,8 @@ gp10b_ltc = {
};
int
-gp10b_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc)
+gp10b_ltc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_ltc **pltc)
{
- return nvkm_ltc_new_(&gp10b_ltc, device, index, pltc);
+ return nvkm_ltc_new_(&gp10b_ltc, device, type, inst, pltc);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
index eca5a711b1b8..2bebe139005d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
@@ -5,8 +5,8 @@
#include <subdev/ltc.h>
#include <core/enum.h>
-int nvkm_ltc_new_(const struct nvkm_ltc_func *, struct nvkm_device *,
- int index, struct nvkm_ltc **);
+int nvkm_ltc_new_(const struct nvkm_ltc_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ struct nvkm_ltc **);
struct nvkm_ltc_func {
int (*oneinit)(struct nvkm_ltc *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c
index 0e57ab2a709f..21c4af3f81d5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c
@@ -35,14 +35,14 @@ nvkm_mc_unk260(struct nvkm_device *device, u32 data)
}
void
-nvkm_mc_intr_mask(struct nvkm_device *device, enum nvkm_devidx devidx, bool en)
+nvkm_mc_intr_mask(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, bool en)
{
struct nvkm_mc *mc = device->mc;
const struct nvkm_mc_map *map;
if (likely(mc) && mc->func->intr_mask) {
- u32 mask = nvkm_top_intr_mask(device, devidx);
+ u32 mask = nvkm_top_intr_mask(device, type, inst);
for (map = mc->func->intr; !mask && map->stat; map++) {
- if (map->unit == devidx)
+ if (map->type == type && map->inst == inst)
mask = map->stat;
}
mc->func->intr_mask(mc, mask, en ? mask : 0);
@@ -78,27 +78,34 @@ void
nvkm_mc_intr(struct nvkm_device *device, bool *handled)
{
struct nvkm_mc *mc = device->mc;
+ struct nvkm_top *top = device->top;
+ struct nvkm_top_device *tdev;
struct nvkm_subdev *subdev;
const struct nvkm_mc_map *map;
u32 stat, intr;
- u64 subdevs;
if (unlikely(!mc))
return;
- intr = nvkm_mc_intr_stat(mc);
- stat = nvkm_top_intr(device, intr, &subdevs);
- while (subdevs) {
- enum nvkm_devidx subidx = __ffs64(subdevs);
- subdev = nvkm_device_subdev(device, subidx);
- if (subdev)
- nvkm_subdev_intr(subdev);
- subdevs &= ~BIT_ULL(subidx);
+ stat = intr = nvkm_mc_intr_stat(mc);
+
+ if (top) {
+ list_for_each_entry(tdev, &top->device, head) {
+ if (tdev->intr >= 0 && (stat & BIT(tdev->intr))) {
+ subdev = nvkm_device_subdev(device, tdev->type, tdev->inst);
+ if (subdev) {
+ nvkm_subdev_intr(subdev);
+ stat &= ~BIT(tdev->intr);
+ if (!stat)
+ break;
+ }
+ }
+ }
}
for (map = mc->func->intr; map->stat; map++) {
if (intr & map->stat) {
- subdev = nvkm_device_subdev(device, map->unit);
+ subdev = nvkm_device_subdev(device, map->type, map->inst);
if (subdev)
nvkm_subdev_intr(subdev);
stat &= ~map->stat;
@@ -108,23 +115,19 @@ nvkm_mc_intr(struct nvkm_device *device, bool *handled)
if (stat)
nvkm_error(&mc->subdev, "intr %08x\n", stat);
*handled = intr != 0;
-
- if (mc->func->intr_hack)
- mc->func->intr_hack(mc, handled);
}
static u32
-nvkm_mc_reset_mask(struct nvkm_device *device, bool isauto,
- enum nvkm_devidx devidx)
+nvkm_mc_reset_mask(struct nvkm_device *device, bool isauto, enum nvkm_subdev_type type, int inst)
{
struct nvkm_mc *mc = device->mc;
const struct nvkm_mc_map *map;
u64 pmc_enable = 0;
if (likely(mc)) {
- if (!(pmc_enable = nvkm_top_reset(device, devidx))) {
+ if (!(pmc_enable = nvkm_top_reset(device, type, inst))) {
for (map = mc->func->reset; map && map->stat; map++) {
if (!isauto || !map->noauto) {
- if (map->unit == devidx) {
+ if (map->type == type && map->inst == inst) {
pmc_enable = map->stat;
break;
}
@@ -136,9 +139,9 @@ nvkm_mc_reset_mask(struct nvkm_device *device, bool isauto,
}
void
-nvkm_mc_reset(struct nvkm_device *device, enum nvkm_devidx devidx)
+nvkm_mc_reset(struct nvkm_device *device, enum nvkm_subdev_type type, int inst)
{
- u64 pmc_enable = nvkm_mc_reset_mask(device, true, devidx);
+ u64 pmc_enable = nvkm_mc_reset_mask(device, true, type, inst);
if (pmc_enable) {
nvkm_mask(device, 0x000200, pmc_enable, 0x00000000);
nvkm_mask(device, 0x000200, pmc_enable, pmc_enable);
@@ -147,17 +150,17 @@ nvkm_mc_reset(struct nvkm_device *device, enum nvkm_devidx devidx)
}
void
-nvkm_mc_disable(struct nvkm_device *device, enum nvkm_devidx devidx)
+nvkm_mc_disable(struct nvkm_device *device, enum nvkm_subdev_type type, int inst)
{
- u64 pmc_enable = nvkm_mc_reset_mask(device, false, devidx);
+ u64 pmc_enable = nvkm_mc_reset_mask(device, false, type, inst);
if (pmc_enable)
nvkm_mask(device, 0x000200, pmc_enable, 0x00000000);
}
void
-nvkm_mc_enable(struct nvkm_device *device, enum nvkm_devidx devidx)
+nvkm_mc_enable(struct nvkm_device *device, enum nvkm_subdev_type type, int inst)
{
- u64 pmc_enable = nvkm_mc_reset_mask(device, false, devidx);
+ u64 pmc_enable = nvkm_mc_reset_mask(device, false, type, inst);
if (pmc_enable) {
nvkm_mask(device, 0x000200, pmc_enable, pmc_enable);
nvkm_rd32(device, 0x000200);
@@ -165,9 +168,9 @@ nvkm_mc_enable(struct nvkm_device *device, enum nvkm_devidx devidx)
}
bool
-nvkm_mc_enabled(struct nvkm_device *device, enum nvkm_devidx devidx)
+nvkm_mc_enabled(struct nvkm_device *device, enum nvkm_subdev_type type, int inst)
{
- u64 pmc_enable = nvkm_mc_reset_mask(device, false, devidx);
+ u64 pmc_enable = nvkm_mc_reset_mask(device, false, type, inst);
return (pmc_enable != 0) &&
((nvkm_rd32(device, 0x000200) & pmc_enable) == pmc_enable);
@@ -206,19 +209,19 @@ nvkm_mc = {
void
nvkm_mc_ctor(const struct nvkm_mc_func *func, struct nvkm_device *device,
- int index, struct nvkm_mc *mc)
+ enum nvkm_subdev_type type, int inst, struct nvkm_mc *mc)
{
- nvkm_subdev_ctor(&nvkm_mc, device, index, &mc->subdev);
+ nvkm_subdev_ctor(&nvkm_mc, device, type, inst, &mc->subdev);
mc->func = func;
}
int
nvkm_mc_new_(const struct nvkm_mc_func *func, struct nvkm_device *device,
- int index, struct nvkm_mc **pmc)
+ enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
{
struct nvkm_mc *mc;
if (!(mc = *pmc = kzalloc(sizeof(*mc), GFP_KERNEL)))
return -ENOMEM;
- nvkm_mc_ctor(func, device, index, *pmc);
+ nvkm_mc_ctor(func, device, type, inst, *pmc);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c
index 430a61c3df44..4cfc1c984006 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c
@@ -62,7 +62,7 @@ g84_mc = {
};
int
-g84_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+g84_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
{
- return nvkm_mc_new_(&g84_mc, device, index, pmc);
+ return nvkm_mc_new_(&g84_mc, device, type, inst, pmc);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c
index 93ad4982ce5f..b7e58d75d894 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c
@@ -62,7 +62,7 @@ g98_mc = {
};
int
-g98_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+g98_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
{
- return nvkm_mc_new_(&g98_mc, device, index, pmc);
+ return nvkm_mc_new_(&g98_mc, device, type, inst, pmc);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/ga100.c
index 967eb3af11eb..4105175dfccd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/ga100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/ga100.c
@@ -68,7 +68,7 @@ ga100_mc = {
};
int
-ga100_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+ga100_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
{
- return nvkm_mc_new_(&ga100_mc, device, index, pmc);
+ return nvkm_mc_new_(&ga100_mc, device, type, inst, pmc);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c
index f93766418056..3a589c6f7fad 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c
@@ -27,11 +27,11 @@ static const struct nvkm_mc_map
gf100_mc_reset[] = {
{ 0x00020000, NVKM_ENGINE_MSPDEC },
{ 0x00008000, NVKM_ENGINE_MSVLD },
- { 0x00002000, NVKM_SUBDEV_PMU, true },
+ { 0x00002000, NVKM_SUBDEV_PMU, 0, true },
{ 0x00001000, NVKM_ENGINE_GR },
{ 0x00000100, NVKM_ENGINE_FIFO },
- { 0x00000080, NVKM_ENGINE_CE1 },
- { 0x00000040, NVKM_ENGINE_CE0 },
+ { 0x00000080, NVKM_ENGINE_CE, 1 },
+ { 0x00000040, NVKM_ENGINE_CE, 0 },
{ 0x00000002, NVKM_ENGINE_MSPPP },
{}
};
@@ -43,10 +43,10 @@ gf100_mc_intr[] = {
{ 0x00008000, NVKM_ENGINE_MSVLD },
{ 0x00001000, NVKM_ENGINE_GR },
{ 0x00000100, NVKM_ENGINE_FIFO },
- { 0x00000040, NVKM_ENGINE_CE1 },
- { 0x00000020, NVKM_ENGINE_CE0 },
+ { 0x00000040, NVKM_ENGINE_CE, 1 },
+ { 0x00000020, NVKM_ENGINE_CE, 0 },
{ 0x00000001, NVKM_ENGINE_MSPPP },
- { 0x40000000, NVKM_SUBDEV_IBUS },
+ { 0x40000000, NVKM_SUBDEV_PRIVRING },
{ 0x10000000, NVKM_SUBDEV_BUS },
{ 0x08000000, NVKM_SUBDEV_FB },
{ 0x02000000, NVKM_SUBDEV_LTC },
@@ -112,7 +112,7 @@ gf100_mc = {
};
int
-gf100_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+gf100_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
{
- return nvkm_mc_new_(&gf100_mc, device, index, pmc);
+ return nvkm_mc_new_(&gf100_mc, device, type, inst, pmc);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk104.c
index 7b8c6ecad1a5..d9b9067fa93f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk104.c
@@ -26,7 +26,7 @@
const struct nvkm_mc_map
gk104_mc_reset[] = {
{ 0x00000100, NVKM_ENGINE_FIFO },
- { 0x00002000, NVKM_SUBDEV_PMU, true },
+ { 0x00002000, NVKM_SUBDEV_PMU, 0, true },
{}
};
@@ -34,7 +34,7 @@ const struct nvkm_mc_map
gk104_mc_intr[] = {
{ 0x04000000, NVKM_ENGINE_DISP },
{ 0x00000100, NVKM_ENGINE_FIFO },
- { 0x40000000, NVKM_SUBDEV_IBUS },
+ { 0x40000000, NVKM_SUBDEV_PRIVRING },
{ 0x10000000, NVKM_SUBDEV_BUS },
{ 0x08000000, NVKM_SUBDEV_FB },
{ 0x02000000, NVKM_SUBDEV_LTC },
@@ -60,7 +60,7 @@ gk104_mc = {
};
int
-gk104_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+gk104_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
{
- return nvkm_mc_new_(&gk104_mc, device, index, pmc);
+ return nvkm_mc_new_(&gk104_mc, device, type, inst, pmc);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c
index ca1bf3279dbe..03590292749a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c
@@ -35,7 +35,7 @@ gk20a_mc = {
};
int
-gk20a_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+gk20a_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
{
- return nvkm_mc_new_(&gk20a_mc, device, index, pmc);
+ return nvkm_mc_new_(&gk20a_mc, device, type, inst, pmc);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c
index 43db245eec9a..5fd1a0595c33 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c
@@ -80,7 +80,7 @@ gp100_mc_intr[] = {
{ 0x04000000, NVKM_ENGINE_DISP },
{ 0x00000100, NVKM_ENGINE_FIFO },
{ 0x00000200, NVKM_SUBDEV_FAULT },
- { 0x40000000, NVKM_SUBDEV_IBUS },
+ { 0x40000000, NVKM_SUBDEV_PRIVRING },
{ 0x10000000, NVKM_SUBDEV_BUS },
{ 0x08000000, NVKM_SUBDEV_FB },
{ 0x02000000, NVKM_SUBDEV_LTC },
@@ -106,13 +106,13 @@ gp100_mc = {
int
gp100_mc_new_(const struct nvkm_mc_func *func, struct nvkm_device *device,
- int index, struct nvkm_mc **pmc)
+ enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
{
struct gp100_mc *mc;
if (!(mc = kzalloc(sizeof(*mc), GFP_KERNEL)))
return -ENOMEM;
- nvkm_mc_ctor(func, device, index, &mc->base);
+ nvkm_mc_ctor(func, device, type, inst, &mc->base);
*pmc = &mc->base;
spin_lock_init(&mc->lock);
@@ -122,7 +122,7 @@ gp100_mc_new_(const struct nvkm_mc_func *func, struct nvkm_device *device,
}
int
-gp100_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+gp100_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
{
- return gp100_mc_new_(&gp100_mc, device, index, pmc);
+ return gp100_mc_new_(&gp100_mc, device, type, inst, pmc);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp10b.c
index 45c62f5ef782..dd581d030ced 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp10b.c
@@ -43,7 +43,7 @@ gp10b_mc = {
};
int
-gp10b_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+gp10b_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
{
- return gp100_mc_new_(&gp10b_mc, device, index, pmc);
+ return gp100_mc_new_(&gp10b_mc, device, type, inst, pmc);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gt215.c
index 99d50a3d956f..1b4d43531dba 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gt215.c
@@ -27,7 +27,7 @@ static const struct nvkm_mc_map
gt215_mc_reset[] = {
{ 0x04008000, NVKM_ENGINE_MSVLD },
{ 0x01020000, NVKM_ENGINE_MSPDEC },
- { 0x00802000, NVKM_ENGINE_CE0 },
+ { 0x00802000, NVKM_ENGINE_CE, 0 },
{ 0x00400002, NVKM_ENGINE_MSPPP },
{ 0x00201000, NVKM_ENGINE_GR },
{ 0x00000100, NVKM_ENGINE_FIFO },
@@ -37,7 +37,7 @@ gt215_mc_reset[] = {
static const struct nvkm_mc_map
gt215_mc_intr[] = {
{ 0x04000000, NVKM_ENGINE_DISP },
- { 0x00400000, NVKM_ENGINE_CE0 },
+ { 0x00400000, NVKM_ENGINE_CE, 0 },
{ 0x00020000, NVKM_ENGINE_MSPDEC },
{ 0x00008000, NVKM_ENGINE_MSVLD },
{ 0x00001000, NVKM_ENGINE_GR },
@@ -71,7 +71,7 @@ gt215_mc = {
};
int
-gt215_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+gt215_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
{
- return nvkm_mc_new_(&gt215_mc, device, index, pmc);
+ return nvkm_mc_new_(&gt215_mc, device, type, inst, pmc);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c
index 6509defd1460..bc0d09bafa99 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c
@@ -80,7 +80,7 @@ nv04_mc = {
};
int
-nv04_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+nv04_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
{
- return nvkm_mc_new_(&nv04_mc, device, index, pmc);
+ return nvkm_mc_new_(&nv04_mc, device, type, inst, pmc);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv11.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv11.c
index 9213107901e6..ab59ca1ee068 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv11.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv11.c
@@ -44,7 +44,7 @@ nv11_mc = {
};
int
-nv11_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+nv11_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
{
- return nvkm_mc_new_(&nv11_mc, device, index, pmc);
+ return nvkm_mc_new_(&nv11_mc, device, type, inst, pmc);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv17.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv17.c
index 64bf5bbf8146..03d756e26e57 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv17.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv17.c
@@ -53,7 +53,7 @@ nv17_mc = {
};
int
-nv17_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+nv17_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
{
- return nvkm_mc_new_(&nv17_mc, device, index, pmc);
+ return nvkm_mc_new_(&nv17_mc, device, type, inst, pmc);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c
index 65fa44a64b98..95f65766e8b0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c
@@ -48,7 +48,7 @@ nv44_mc = {
};
int
-nv44_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+nv44_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
{
- return nvkm_mc_new_(&nv44_mc, device, index, pmc);
+ return nvkm_mc_new_(&nv44_mc, device, type, inst, pmc);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c
index fe93b4fd7100..fce3613cdfa5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c
@@ -55,7 +55,7 @@ nv50_mc = {
};
int
-nv50_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+nv50_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
{
- return nvkm_mc_new_(&nv50_mc, device, index, pmc);
+ return nvkm_mc_new_(&nv50_mc, device, type, inst, pmc);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
index 4aab753a6040..c8bcabb98f99 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
@@ -4,14 +4,15 @@
#define nvkm_mc(p) container_of((p), struct nvkm_mc, subdev)
#include <subdev/mc.h>
-void nvkm_mc_ctor(const struct nvkm_mc_func *, struct nvkm_device *,
- int index, struct nvkm_mc *);
-int nvkm_mc_new_(const struct nvkm_mc_func *, struct nvkm_device *,
- int index, struct nvkm_mc **);
+void nvkm_mc_ctor(const struct nvkm_mc_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ struct nvkm_mc *);
+int nvkm_mc_new_(const struct nvkm_mc_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ struct nvkm_mc **);
struct nvkm_mc_map {
u32 stat;
- u32 unit;
+ enum nvkm_subdev_type type;
+ int inst;
bool noauto;
};
@@ -26,7 +27,6 @@ struct nvkm_mc_func {
void (*intr_mask)(struct nvkm_mc *, u32 mask, u32 stat);
/* retrieve pending interrupt mask (NV_PMC_INTR) */
u32 (*intr_stat)(struct nvkm_mc *);
- void (*intr_hack)(struct nvkm_mc *, bool *handled);
const struct nvkm_mc_map *reset;
void (*unk260)(struct nvkm_mc *, u32);
};
@@ -53,7 +53,7 @@ void gf100_mc_unk260(struct nvkm_mc *, u32);
void gp100_mc_intr_unarm(struct nvkm_mc *);
void gp100_mc_intr_rearm(struct nvkm_mc *);
void gp100_mc_intr_mask(struct nvkm_mc *, u32, u32);
-int gp100_mc_new_(const struct nvkm_mc_func *, struct nvkm_device *, int,
+int gp100_mc_new_(const struct nvkm_mc_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
struct nvkm_mc **);
extern const struct nvkm_mc_map gk104_mc_intr[];
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu102.c
index d098c44a4fcb..58db83ebadc5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu102.c
@@ -19,37 +19,118 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
+#define tu102_mc(p) container_of((p), struct tu102_mc, base)
#include "priv.h"
+struct tu102_mc {
+ struct nvkm_mc base;
+ spinlock_t lock;
+ bool intr;
+ u32 mask;
+};
+
static void
-tu102_mc_intr_hack(struct nvkm_mc *mc, bool *handled)
+tu102_mc_intr_update(struct tu102_mc *mc)
{
- struct nvkm_device *device = mc->subdev.device;
- u32 stat = nvkm_rd32(device, 0xb81010);
- if (stat & 0x00000050) {
- struct nvkm_subdev *subdev =
- nvkm_device_subdev(device, NVKM_SUBDEV_FAULT);
- nvkm_wr32(device, 0xb81010, stat & 0x00000050);
- if (subdev)
- nvkm_subdev_intr(subdev);
- *handled = true;
+ struct nvkm_device *device = mc->base.subdev.device;
+ u32 mask = mc->intr ? mc->mask : 0, i;
+
+ for (i = 0; i < 2; i++) {
+ nvkm_wr32(device, 0x000180 + (i * 0x04), ~mask);
+ nvkm_wr32(device, 0x000160 + (i * 0x04), mask);
}
+
+ if (mask & 0x00000200)
+ nvkm_wr32(device, 0xb81608, 0x6);
+ else
+ nvkm_wr32(device, 0xb81610, 0x6);
}
+void
+tu102_mc_intr_unarm(struct nvkm_mc *base)
+{
+ struct tu102_mc *mc = tu102_mc(base);
+ unsigned long flags;
+
+ spin_lock_irqsave(&mc->lock, flags);
+ mc->intr = false;
+ tu102_mc_intr_update(mc);
+ spin_unlock_irqrestore(&mc->lock, flags);
+}
+
+void
+tu102_mc_intr_rearm(struct nvkm_mc *base)
+{
+ struct tu102_mc *mc = tu102_mc(base);
+ unsigned long flags;
+
+ spin_lock_irqsave(&mc->lock, flags);
+ mc->intr = true;
+ tu102_mc_intr_update(mc);
+ spin_unlock_irqrestore(&mc->lock, flags);
+}
+
+void
+tu102_mc_intr_mask(struct nvkm_mc *base, u32 mask, u32 intr)
+{
+ struct tu102_mc *mc = tu102_mc(base);
+ unsigned long flags;
+
+ spin_lock_irqsave(&mc->lock, flags);
+ mc->mask = (mc->mask & ~mask) | intr;
+ tu102_mc_intr_update(mc);
+ spin_unlock_irqrestore(&mc->lock, flags);
+}
+
+static u32
+tu102_mc_intr_stat(struct nvkm_mc *mc)
+{
+ struct nvkm_device *device = mc->subdev.device;
+ u32 intr0 = nvkm_rd32(device, 0x000100);
+ u32 intr1 = nvkm_rd32(device, 0x000104);
+ u32 intr_top = nvkm_rd32(device, 0xb81600);
+
+ /* Turing and above route the MMU fault interrupts via a different
+ * interrupt tree with different control registers. For the moment remap
+ * them back to the old PMC vector.
+ */
+ if (intr_top & 0x00000006)
+ intr0 |= 0x00000200;
+
+ return intr0 | intr1;
+}
+
+
static const struct nvkm_mc_func
tu102_mc = {
.init = nv50_mc_init,
.intr = gp100_mc_intr,
- .intr_unarm = gp100_mc_intr_unarm,
- .intr_rearm = gp100_mc_intr_rearm,
- .intr_mask = gp100_mc_intr_mask,
- .intr_stat = gf100_mc_intr_stat,
- .intr_hack = tu102_mc_intr_hack,
+ .intr_unarm = tu102_mc_intr_unarm,
+ .intr_rearm = tu102_mc_intr_rearm,
+ .intr_mask = tu102_mc_intr_mask,
+ .intr_stat = tu102_mc_intr_stat,
.reset = gk104_mc_reset,
};
+static int
+tu102_mc_new_(const struct nvkm_mc_func *func, struct nvkm_device *device,
+ enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
+{
+ struct tu102_mc *mc;
+
+ if (!(mc = kzalloc(sizeof(*mc), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_mc_ctor(func, device, type, inst, &mc->base);
+ *pmc = &mc->base;
+
+ spin_lock_init(&mc->lock);
+ mc->intr = false;
+ mc->mask = 0x7fffffff;
+ return 0;
+}
+
int
-tu102_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+tu102_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
{
- return gp100_mc_new_(&tu102_mc, device, index, pmc);
+ return tu102_mc_new_(&tu102_mc, device, type, inst, pmc);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
index 6d5212ae2fd5..ad3b44a9e0e7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
@@ -402,6 +402,7 @@ nvkm_mmu_dtor(struct nvkm_subdev *subdev)
nvkm_vmm_unref(&mmu->vmm);
nvkm_mmu_ptc_fini(mmu);
+ mutex_destroy(&mmu->mutex);
return mmu;
}
@@ -414,22 +415,23 @@ nvkm_mmu = {
void
nvkm_mmu_ctor(const struct nvkm_mmu_func *func, struct nvkm_device *device,
- int index, struct nvkm_mmu *mmu)
+ enum nvkm_subdev_type type, int inst, struct nvkm_mmu *mmu)
{
- nvkm_subdev_ctor(&nvkm_mmu, device, index, &mmu->subdev);
+ nvkm_subdev_ctor(&nvkm_mmu, device, type, inst, &mmu->subdev);
mmu->func = func;
mmu->dma_bits = func->dma_bits;
nvkm_mmu_ptc_init(mmu);
+ mutex_init(&mmu->mutex);
mmu->user.ctor = nvkm_ummu_new;
mmu->user.base = func->mmu.user;
}
int
nvkm_mmu_new_(const struct nvkm_mmu_func *func, struct nvkm_device *device,
- int index, struct nvkm_mmu **pmmu)
+ enum nvkm_subdev_type type, int inst, struct nvkm_mmu **pmmu)
{
if (!(*pmmu = kzalloc(sizeof(**pmmu), GFP_KERNEL)))
return -ENOMEM;
- nvkm_mmu_ctor(func, device, index, *pmmu);
+ nvkm_mmu_ctor(func, device, type, inst, *pmmu);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/g84.c
index 8accda5a772b..ce47a3b97be9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/g84.c
@@ -35,7 +35,8 @@ g84_mmu = {
};
int
-g84_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+g84_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_mmu **pmmu)
{
- return nvkm_mmu_new_(&g84_mmu, device, index, pmmu);
+ return nvkm_mmu_new_(&g84_mmu, device, type, inst, pmmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c
index 2cd5ec81c0d0..7a28b1d49f7c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c
@@ -84,7 +84,8 @@ gf100_mmu = {
};
int
-gf100_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+gf100_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_mmu **pmmu)
{
- return nvkm_mmu_new_(&gf100_mmu, device, index, pmmu);
+ return nvkm_mmu_new_(&gf100_mmu, device, type, inst, pmmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk104.c
index 3d7d1eb1cff9..34c9b2b821f6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk104.c
@@ -35,7 +35,8 @@ gk104_mmu = {
};
int
-gk104_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+gk104_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_mmu **pmmu)
{
- return nvkm_mmu_new_(&gk104_mmu, device, index, pmmu);
+ return nvkm_mmu_new_(&gk104_mmu, device, type, inst, pmmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk20a.c
index ac74965a60d4..a7db29c429ee 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk20a.c
@@ -35,7 +35,8 @@ gk20a_mmu = {
};
int
-gk20a_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+gk20a_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_mmu **pmmu)
{
- return nvkm_mmu_new_(&gk20a_mmu, device, index, pmmu);
+ return nvkm_mmu_new_(&gk20a_mmu, device, type, inst, pmmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c
index 83990c83f9f8..e1696f637a68 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c
@@ -90,9 +90,10 @@ gm200_mmu_fixed = {
};
int
-gm200_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+gm200_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_mmu **pmmu)
{
if (device->fb->page)
- return nvkm_mmu_new_(&gm200_mmu_fixed, device, index, pmmu);
- return nvkm_mmu_new_(&gm200_mmu, device, index, pmmu);
+ return nvkm_mmu_new_(&gm200_mmu_fixed, device, type, inst, pmmu);
+ return nvkm_mmu_new_(&gm200_mmu, device, type, inst, pmmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm20b.c
index 7353a94b4091..e6e1a8ad701e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm20b.c
@@ -47,9 +47,10 @@ gm20b_mmu_fixed = {
};
int
-gm20b_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+gm20b_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_mmu **pmmu)
{
if (device->fb->page)
- return nvkm_mmu_new_(&gm20b_mmu_fixed, device, index, pmmu);
- return nvkm_mmu_new_(&gm20b_mmu, device, index, pmmu);
+ return nvkm_mmu_new_(&gm20b_mmu_fixed, device, type, inst, pmmu);
+ return nvkm_mmu_new_(&gm20b_mmu, device, type, inst, pmmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp100.c
index 65cb9d28e60e..daa5ab0f8711 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp100.c
@@ -37,9 +37,10 @@ gp100_mmu = {
};
int
-gp100_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+gp100_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_mmu **pmmu)
{
if (!nvkm_boolopt(device->cfgopt, "GP100MmuLayout", true))
- return gm200_mmu_new(device, index, pmmu);
- return nvkm_mmu_new_(&gp100_mmu, device, index, pmmu);
+ return gm200_mmu_new(device, type, inst, pmmu);
+ return nvkm_mmu_new_(&gp100_mmu, device, type, inst, pmmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b.c
index 0a50be9a785a..edd0bf9a5cd8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b.c
@@ -37,9 +37,10 @@ gp10b_mmu = {
};
int
-gp10b_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+gp10b_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_mmu **pmmu)
{
if (!nvkm_boolopt(device->cfgopt, "GP100MmuLayout", true))
- return gm20b_mmu_new(device, index, pmmu);
- return nvkm_mmu_new_(&gp10b_mmu, device, index, pmmu);
+ return gm20b_mmu_new(device, type, inst, pmmu);
+ return nvkm_mmu_new_(&gp10b_mmu, device, type, inst, pmmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gv100.c
index e0997eedd6d9..fb8bdc88d566 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gv100.c
@@ -37,7 +37,8 @@ gv100_mmu = {
};
int
-gv100_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+gv100_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_mmu **pmmu)
{
- return nvkm_mmu_new_(&gv100_mmu, device, index, pmmu);
+ return nvkm_mmu_new_(&gv100_mmu, device, type, inst, pmmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mcp77.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mcp77.c
index 0527b50730d9..514876d6411b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mcp77.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mcp77.c
@@ -35,7 +35,8 @@ mcp77_mmu = {
};
int
-mcp77_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+mcp77_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_mmu **pmmu)
{
- return nvkm_mmu_new_(&mcp77_mmu, device, index, pmmu);
+ return nvkm_mmu_new_(&mcp77_mmu, device, type, inst, pmmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c
index d201c887c2cd..0674aa8f68c8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c
@@ -35,7 +35,8 @@ nv04_mmu = {
};
int
-nv04_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+nv04_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_mmu **pmmu)
{
- return nvkm_mmu_new_(&nv04_mmu, device, index, pmmu);
+ return nvkm_mmu_new_(&nv04_mmu, device, type, inst, pmmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c
index adca81895c09..909f92b72847 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c
@@ -47,11 +47,12 @@ nv41_mmu = {
};
int
-nv41_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+nv41_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_mmu **pmmu)
{
if (device->type == NVKM_DEVICE_AGP ||
!nvkm_boolopt(device->cfgopt, "NvPCIE", true))
- return nv04_mmu_new(device, index, pmmu);
+ return nv04_mmu_new(device, type, inst, pmmu);
- return nvkm_mmu_new_(&nv41_mmu, device, index, pmmu);
+ return nvkm_mmu_new_(&nv41_mmu, device, type, inst, pmmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c
index 598c53a27bde..dd2a8d461da3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c
@@ -62,11 +62,12 @@ nv44_mmu = {
};
int
-nv44_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+nv44_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_mmu **pmmu)
{
if (device->type == NVKM_DEVICE_AGP ||
!nvkm_boolopt(device->cfgopt, "NvPCIE", true))
- return nv04_mmu_new(device, index, pmmu);
+ return nv04_mmu_new(device, type, inst, pmmu);
- return nvkm_mmu_new_(&nv44_mmu, device, index, pmmu);
+ return nvkm_mmu_new_(&nv44_mmu, device, type, inst, pmmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c
index c0083ddda65a..78d46e35d0a9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c
@@ -71,7 +71,8 @@ nv50_mmu = {
};
int
-nv50_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+nv50_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_mmu **pmmu)
{
- return nvkm_mmu_new_(&nv50_mmu, device, index, pmmu);
+ return nvkm_mmu_new_(&nv50_mmu, device, type, inst, pmmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
index 479b02344271..5265bf4d8366 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
@@ -4,10 +4,10 @@
#define nvkm_mmu(p) container_of((p), struct nvkm_mmu, subdev)
#include <subdev/mmu.h>
-void nvkm_mmu_ctor(const struct nvkm_mmu_func *, struct nvkm_device *,
- int index, struct nvkm_mmu *);
-int nvkm_mmu_new_(const struct nvkm_mmu_func *, struct nvkm_device *,
- int index, struct nvkm_mmu **);
+void nvkm_mmu_ctor(const struct nvkm_mmu_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ struct nvkm_mmu *);
+int nvkm_mmu_new_(const struct nvkm_mmu_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ struct nvkm_mmu **);
struct nvkm_mmu_func {
void (*init)(struct nvkm_mmu *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c
index 94081f35f967..8d060ce47f86 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c
@@ -51,7 +51,8 @@ tu102_mmu = {
};
int
-tu102_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+tu102_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_mmu **pmmu)
{
- return nvkm_mmu_new_(&tu102_mmu, device, index, pmmu);
+ return nvkm_mmu_new_(&tu102_mmu, device, type, inst, pmmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
index 6a2d9eb8e1ea..5438384d9a67 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
@@ -187,12 +187,11 @@ gf100_vmm_invalidate_pdb(struct nvkm_vmm *vmm, u64 addr)
void
gf100_vmm_invalidate(struct nvkm_vmm *vmm, u32 type)
{
- struct nvkm_subdev *subdev = &vmm->mmu->subdev;
- struct nvkm_device *device = subdev->device;
+ struct nvkm_device *device = vmm->mmu->subdev.device;
struct nvkm_mmu_pt *pd = vmm->pd->pt[0];
u64 addr = 0;
- mutex_lock(&subdev->mutex);
+ mutex_lock(&vmm->mmu->mutex);
/* Looks like maybe a "free flush slots" counter, the
* faster you write to 0x100cbc to more it decreases.
*/
@@ -222,7 +221,7 @@ gf100_vmm_invalidate(struct nvkm_vmm *vmm, u32 type)
if (nvkm_rd32(device, 0x100c80) & 0x00008000)
break;
);
- mutex_unlock(&subdev->mutex);
+ mutex_unlock(&vmm->mmu->mutex);
}
void
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv41.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv41.c
index 1d3369683a21..31984671daf8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv41.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv41.c
@@ -80,17 +80,16 @@ nv41_vmm_desc_12[] = {
static void
nv41_vmm_flush(struct nvkm_vmm *vmm, int level)
{
- struct nvkm_subdev *subdev = &vmm->mmu->subdev;
- struct nvkm_device *device = subdev->device;
+ struct nvkm_device *device = vmm->mmu->subdev.device;
- mutex_lock(&subdev->mutex);
+ mutex_lock(&vmm->mmu->mutex);
nvkm_wr32(device, 0x100810, 0x00000022);
nvkm_msec(device, 2000,
if (nvkm_rd32(device, 0x100810) & 0x00000020)
break;
);
nvkm_wr32(device, 0x100810, 0x00000000);
- mutex_unlock(&subdev->mutex);
+ mutex_unlock(&vmm->mmu->mutex);
}
static const struct nvkm_vmm_func
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
index 2d89e27e8e9e..b7548dcd72c7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
@@ -184,7 +184,7 @@ nv50_vmm_flush(struct nvkm_vmm *vmm, int level)
struct nvkm_device *device = subdev->device;
int i, id;
- mutex_lock(&subdev->mutex);
+ mutex_lock(&vmm->mmu->mutex);
for (i = 0; i < NVKM_SUBDEV_NR; i++) {
if (!atomic_read(&vmm->engref[i]))
continue;
@@ -207,7 +207,7 @@ nv50_vmm_flush(struct nvkm_vmm *vmm, int level)
case NVKM_ENGINE_MSVLD : id = 0x09; break;
case NVKM_ENGINE_CIPHER:
case NVKM_ENGINE_SEC : id = 0x0a; break;
- case NVKM_ENGINE_CE0 : id = 0x0d; break;
+ case NVKM_ENGINE_CE : id = 0x0d; break;
default:
continue;
}
@@ -217,10 +217,9 @@ nv50_vmm_flush(struct nvkm_vmm *vmm, int level)
if (!(nvkm_rd32(device, 0x100c80) & 0x00000001))
break;
) < 0)
- nvkm_error(subdev, "%s mmu invalidate timeout\n",
- nvkm_subdev_name[i]);
+ nvkm_error(subdev, "%s mmu invalidate timeout\n", nvkm_subdev_type[i]);
}
- mutex_unlock(&subdev->mutex);
+ mutex_unlock(&vmm->mmu->mutex);
}
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c
index b1294d0076c0..6cb5eefa45e9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c
@@ -26,15 +26,14 @@
static void
tu102_vmm_flush(struct nvkm_vmm *vmm, int depth)
{
- struct nvkm_subdev *subdev = &vmm->mmu->subdev;
- struct nvkm_device *device = subdev->device;
+ struct nvkm_device *device = vmm->mmu->subdev.device;
u32 type = (5 /* CACHE_LEVEL_UP_TO_PDE3 */ - depth) << 24;
type |= 0x00000001; /* PAGE_ALL */
if (atomic_read(&vmm->engref[NVKM_SUBDEV_BAR]))
type |= 0x00000004; /* HUB_ONLY */
- mutex_lock(&subdev->mutex);
+ mutex_lock(&vmm->mmu->mutex);
nvkm_wr32(device, 0xb830a0, vmm->pd->pt[0]->addr >> 8);
nvkm_wr32(device, 0xb830a4, 0x00000000);
@@ -46,7 +45,7 @@ tu102_vmm_flush(struct nvkm_vmm *vmm, int depth)
break;
);
- mutex_unlock(&subdev->mutex);
+ mutex_unlock(&vmm->mmu->mutex);
}
static const struct nvkm_vmm_func
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c
index f44682d62f75..c1acfe642da3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c
@@ -230,7 +230,8 @@ nvkm_mxm = {
};
int
-nvkm_mxm_new_(struct nvkm_device *device, int index, struct nvkm_mxm **pmxm)
+nvkm_mxm_new_(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_mxm **pmxm)
{
struct nvkm_bios *bios = device->bios;
struct nvkm_mxm *mxm;
@@ -240,7 +241,7 @@ nvkm_mxm_new_(struct nvkm_device *device, int index, struct nvkm_mxm **pmxm)
if (!(mxm = *pmxm = kzalloc(sizeof(*mxm), GFP_KERNEL)))
return -ENOMEM;
- nvkm_subdev_ctor(&nvkm_mxm, device, index, &mxm->subdev);
+ nvkm_subdev_ctor(&nvkm_mxm, device, type, inst, &mxm->subdev);
data = mxm_table(bios, &ver, &len);
if (!data || !(ver = nvbios_rd08(bios, data))) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c
index 70e2c414bb7b..f3167904dcb0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c
@@ -201,12 +201,13 @@ mxm_dcb_sanitise(struct nvkm_mxm *mxm)
}
int
-nv50_mxm_new(struct nvkm_device *device, int index, struct nvkm_subdev **pmxm)
+nv50_mxm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_subdev **pmxm)
{
struct nvkm_mxm *mxm;
int ret;
- ret = nvkm_mxm_new_(device, index, &mxm);
+ ret = nvkm_mxm_new_(device, type, inst, &mxm);
if (mxm)
*pmxm = &mxm->subdev;
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/priv.h
index fc8f69e6fc64..fcacb6c6a7f7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/priv.h
@@ -12,5 +12,5 @@ struct nvkm_mxm {
u8 *mxms;
};
-int nvkm_mxm_new_(struct nvkm_device *, int index, struct nvkm_mxm **);
+int nvkm_mxm_new_(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_mxm **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
index ee2431a7804e..a7d42ea8ba28 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
@@ -183,13 +183,13 @@ nvkm_pci_func = {
int
nvkm_pci_new_(const struct nvkm_pci_func *func, struct nvkm_device *device,
- int index, struct nvkm_pci **ppci)
+ enum nvkm_subdev_type type, int inst, struct nvkm_pci **ppci)
{
struct nvkm_pci *pci;
if (!(pci = *ppci = kzalloc(sizeof(**ppci), GFP_KERNEL)))
return -ENOMEM;
- nvkm_subdev_ctor(&nvkm_pci_func, device, index, &pci->subdev);
+ nvkm_subdev_ctor(&nvkm_pci_func, device, type, inst, &pci->subdev);
pci->func = func;
pci->pdev = device->func->pci(device)->pdev;
pci->irq = -1;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g84.c
index 62438d892f42..5b29aacedef3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g84.c
@@ -150,7 +150,8 @@ g84_pci_func = {
};
int
-g84_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
+g84_pci_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_pci **ppci)
{
- return nvkm_pci_new_(&g84_pci_func, device, index, ppci);
+ return nvkm_pci_new_(&g84_pci_func, device, type, inst, ppci);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g92.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g92.c
index 48874359d5f6..a9e0674009c6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g92.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g92.c
@@ -51,7 +51,8 @@ g92_pci_func = {
};
int
-g92_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
+g92_pci_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_pci **ppci)
{
- return nvkm_pci_new_(&g92_pci_func, device, index, ppci);
+ return nvkm_pci_new_(&g92_pci_func, device, type, inst, ppci);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c
index 09adb37a5664..7bacd0693283 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c
@@ -43,7 +43,8 @@ g94_pci_func = {
};
int
-g94_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
+g94_pci_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_pci **ppci)
{
- return nvkm_pci_new_(&g94_pci_func, device, index, ppci);
+ return nvkm_pci_new_(&g94_pci_func, device, type, inst, ppci);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c
index 00a5e7d3ee9d..099906092fe1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c
@@ -96,7 +96,8 @@ gf100_pci_func = {
};
int
-gf100_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
+gf100_pci_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_pci **ppci)
{
- return nvkm_pci_new_(&gf100_pci_func, device, index, ppci);
+ return nvkm_pci_new_(&gf100_pci_func, device, type, inst, ppci);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c
index 11bf419afe3f..bcde609ba866 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c
@@ -43,7 +43,8 @@ gf106_pci_func = {
};
int
-gf106_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
+gf106_pci_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_pci **ppci)
{
- return nvkm_pci_new_(&gf106_pci_func, device, index, ppci);
+ return nvkm_pci_new_(&gf106_pci_func, device, type, inst, ppci);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gk104.c
index e68030507d88..6be87ecffc89 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gk104.c
@@ -222,7 +222,8 @@ gk104_pci_func = {
};
int
-gk104_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
+gk104_pci_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_pci **ppci)
{
- return nvkm_pci_new_(&gk104_pci_func, device, index, ppci);
+ return nvkm_pci_new_(&gk104_pci_func, device, type, inst, ppci);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c
index 82c5234a06ff..a5fafda0014d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c
@@ -38,7 +38,8 @@ gp100_pci_func = {
};
int
-gp100_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
+gp100_pci_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_pci **ppci)
{
- return nvkm_pci_new_(&gp100_pci_func, device, index, ppci);
+ return nvkm_pci_new_(&gp100_pci_func, device, type, inst, ppci);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c
index 5b1ed42cb90b..9ab64194b185 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c
@@ -52,7 +52,8 @@ nv04_pci_func = {
};
int
-nv04_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
+nv04_pci_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_pci **ppci)
{
- return nvkm_pci_new_(&nv04_pci_func, device, index, ppci);
+ return nvkm_pci_new_(&nv04_pci_func, device, type, inst, ppci);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c
index 6eb417765802..6a3c31cf0200 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c
@@ -59,7 +59,8 @@ nv40_pci_func = {
};
int
-nv40_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
+nv40_pci_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_pci **ppci)
{
- return nvkm_pci_new_(&nv40_pci_func, device, index, ppci);
+ return nvkm_pci_new_(&nv40_pci_func, device, type, inst, ppci);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv46.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv46.c
index fc617e4c0ab6..9cad17f178ec 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv46.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv46.c
@@ -45,7 +45,8 @@ nv46_pci_func = {
};
int
-nv46_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
+nv46_pci_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_pci **ppci)
{
- return nvkm_pci_new_(&nv46_pci_func, device, index, ppci);
+ return nvkm_pci_new_(&nv46_pci_func, device, type, inst, ppci);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c
index 1f1b26b5fa72..741e34bf307c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c
@@ -31,7 +31,8 @@ nv4c_pci_func = {
};
int
-nv4c_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
+nv4c_pci_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_pci **ppci)
{
- return nvkm_pci_new_(&nv4c_pci_func, device, index, ppci);
+ return nvkm_pci_new_(&nv4c_pci_func, device, type, inst, ppci);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h
index 7009aad86b6e..9b7583532962 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h
@@ -4,8 +4,8 @@
#define nvkm_pci(p) container_of((p), struct nvkm_pci, subdev)
#include <subdev/pci.h>
-int nvkm_pci_new_(const struct nvkm_pci_func *, struct nvkm_device *,
- int index, struct nvkm_pci **);
+int nvkm_pci_new_(const struct nvkm_pci_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ struct nvkm_pci **);
struct nvkm_pci_func {
void (*init)(struct nvkm_pci *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
index a0fe607c9c07..24382875fb4f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
@@ -148,6 +148,7 @@ nvkm_pmu_dtor(struct nvkm_subdev *subdev)
nvkm_falcon_cmdq_del(&pmu->hpq);
nvkm_falcon_qmgr_del(&pmu->qmgr);
nvkm_falcon_dtor(&pmu->falcon);
+ mutex_destroy(&pmu->send.mutex);
return nvkm_pmu(subdev);
}
@@ -162,11 +163,13 @@ nvkm_pmu = {
int
nvkm_pmu_ctor(const struct nvkm_pmu_fwif *fwif, struct nvkm_device *device,
- int index, struct nvkm_pmu *pmu)
+ enum nvkm_subdev_type type, int inst, struct nvkm_pmu *pmu)
{
int ret;
- nvkm_subdev_ctor(&nvkm_pmu, device, index, &pmu->subdev);
+ nvkm_subdev_ctor(&nvkm_pmu, device, type, inst, &pmu->subdev);
+
+ mutex_init(&pmu->send.mutex);
INIT_WORK(&pmu->recv.work, nvkm_pmu_recv);
init_waitqueue_head(&pmu->recv.wait);
@@ -177,9 +180,8 @@ nvkm_pmu_ctor(const struct nvkm_pmu_fwif *fwif, struct nvkm_device *device,
pmu->func = fwif->func;
- ret = nvkm_falcon_ctor(pmu->func->flcn, &pmu->subdev,
- nvkm_subdev_name[pmu->subdev.index], 0x10a000,
- &pmu->falcon);
+ ret = nvkm_falcon_ctor(pmu->func->flcn, &pmu->subdev, pmu->subdev.name,
+ 0x10a000, &pmu->falcon);
if (ret)
return ret;
@@ -195,10 +197,10 @@ nvkm_pmu_ctor(const struct nvkm_pmu_fwif *fwif, struct nvkm_device *device,
int
nvkm_pmu_new_(const struct nvkm_pmu_fwif *fwif, struct nvkm_device *device,
- int index, struct nvkm_pmu **ppmu)
+ enum nvkm_subdev_type type, int inst, struct nvkm_pmu **ppmu)
{
struct nvkm_pmu *pmu;
if (!(pmu = *ppmu = kzalloc(sizeof(*pmu), GFP_KERNEL)))
return -ENOMEM;
- return nvkm_pmu_ctor(fwif, device, index, *ppmu);
+ return nvkm_pmu_ctor(fwif, device, type, inst, *ppmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c
index 3ecb3d9cbcf2..f725a3ec5479 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c
@@ -30,14 +30,14 @@ void
gf100_pmu_reset(struct nvkm_pmu *pmu)
{
struct nvkm_device *device = pmu->subdev.device;
- nvkm_mc_disable(device, NVKM_SUBDEV_PMU);
- nvkm_mc_enable(device, NVKM_SUBDEV_PMU);
+ nvkm_mc_disable(device, NVKM_SUBDEV_PMU, 0);
+ nvkm_mc_enable(device, NVKM_SUBDEV_PMU, 0);
}
bool
gf100_pmu_enabled(struct nvkm_pmu *pmu)
{
- return nvkm_mc_enabled(pmu->subdev.device, NVKM_SUBDEV_PMU);
+ return nvkm_mc_enabled(pmu->subdev.device, NVKM_SUBDEV_PMU, 0);
}
static const struct nvkm_pmu_func
@@ -69,7 +69,8 @@ gf100_pmu_fwif[] = {
};
int
-gf100_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+gf100_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_pmu **ppmu)
{
- return nvkm_pmu_new_(gf100_pmu_fwif, device, index, ppmu);
+ return nvkm_pmu_new_(gf100_pmu_fwif, device, type, inst, ppmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c
index 8dd0271aaaee..0f4b6697a4e4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c
@@ -47,7 +47,8 @@ gf119_pmu_fwif[] = {
};
int
-gf119_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+gf119_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_pmu **ppmu)
{
- return nvkm_pmu_new_(gf119_pmu_fwif, device, index, ppmu);
+ return nvkm_pmu_new_(gf119_pmu_fwif, device, type, inst, ppmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
index 8b70cc17a634..9e7631d7aa41 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
@@ -127,7 +127,8 @@ gk104_pmu_fwif[] = {
};
int
-gk104_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+gk104_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_pmu **ppmu)
{
- return nvkm_pmu_new_(gk104_pmu_fwif, device, index, ppmu);
+ return nvkm_pmu_new_(gk104_pmu_fwif, device, type, inst, ppmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c
index 0081f2141b10..dbaefee53e1f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c
@@ -106,7 +106,8 @@ gk110_pmu_fwif[] = {
};
int
-gk110_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+gk110_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_pmu **ppmu)
{
- return nvkm_pmu_new_(gk110_pmu_fwif, device, index, ppmu);
+ return nvkm_pmu_new_(gk110_pmu_fwif, device, type, inst, ppmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c
index b227c701a5e7..a08fb049e6d6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c
@@ -48,7 +48,8 @@ gk208_pmu_fwif[] = {
};
int
-gk208_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+gk208_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_pmu **ppmu)
{
- return nvkm_pmu_new_(gk208_pmu_fwif, device, index, ppmu);
+ return nvkm_pmu_new_(gk208_pmu_fwif, device, type, inst, ppmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c
index 26c1adf8f44c..a67a42e73f08 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c
@@ -210,7 +210,8 @@ gk20a_pmu_fwif[] = {
};
int
-gk20a_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+gk20a_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_pmu **ppmu)
{
struct gk20a_pmu *pmu;
int ret;
@@ -219,7 +220,7 @@ gk20a_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
return -ENOMEM;
*ppmu = &pmu->base;
- ret = nvkm_pmu_ctor(gk20a_pmu_fwif, device, index, &pmu->base);
+ ret = nvkm_pmu_ctor(gk20a_pmu_fwif, device, type, inst, &pmu->base);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c
index 5afb55e58b51..622ee637f97b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c
@@ -49,7 +49,8 @@ gm107_pmu_fwif[] = {
};
int
-gm107_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+gm107_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_pmu **ppmu)
{
- return nvkm_pmu_new_(gm107_pmu_fwif, device, index, ppmu);
+ return nvkm_pmu_new_(gm107_pmu_fwif, device, type, inst, ppmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm200.c
index 383376addb41..5968c7696596 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm200.c
@@ -45,7 +45,8 @@ gm200_pmu_fwif[] = {
};
int
-gm200_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+gm200_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_pmu **ppmu)
{
- return nvkm_pmu_new_(gm200_pmu_fwif, device, index, ppmu);
+ return nvkm_pmu_new_(gm200_pmu_fwif, device, type, inst, ppmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
index 8f6ed5373ea1..148706977eec 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
@@ -240,7 +240,8 @@ gm20b_pmu_fwif[] = {
};
int
-gm20b_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+gm20b_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_pmu **ppmu)
{
- return nvkm_pmu_new_(gm20b_pmu_fwif, device, index, ppmu);
+ return nvkm_pmu_new_(gm20b_pmu_fwif, device, type, inst, ppmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c
index 3d8ce14dba7b..00da1b873ce8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c
@@ -51,7 +51,8 @@ gp102_pmu_fwif[] = {
};
int
-gp102_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+gp102_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_pmu **ppmu)
{
- return nvkm_pmu_new_(gp102_pmu_fwif, device, index, ppmu);
+ return nvkm_pmu_new_(gp102_pmu_fwif, device, type, inst, ppmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c
index 9c237c426599..461f722656e2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c
@@ -99,7 +99,8 @@ gp10b_pmu_fwif[] = {
};
int
-gp10b_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+gp10b_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_pmu **ppmu)
{
- return nvkm_pmu_new_(gp10b_pmu_fwif, device, index, ppmu);
+ return nvkm_pmu_new_(gp10b_pmu_fwif, device, type, inst, ppmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c
index 88b909913ff9..b0407b86bc10 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c
@@ -34,7 +34,7 @@ gt215_pmu_send(struct nvkm_pmu *pmu, u32 reply[2],
struct nvkm_device *device = subdev->device;
u32 addr;
- mutex_lock(&subdev->mutex);
+ mutex_lock(&pmu->send.mutex);
/* wait for a free slot in the fifo */
addr = nvkm_rd32(device, 0x10a4a0);
if (nvkm_msec(device, 2000,
@@ -42,7 +42,7 @@ gt215_pmu_send(struct nvkm_pmu *pmu, u32 reply[2],
if (tmp != (addr ^ 8))
break;
) < 0) {
- mutex_unlock(&subdev->mutex);
+ mutex_unlock(&pmu->send.mutex);
return -EBUSY;
}
@@ -79,7 +79,7 @@ gt215_pmu_send(struct nvkm_pmu *pmu, u32 reply[2],
reply[1] = pmu->recv.data[1];
}
- mutex_unlock(&subdev->mutex);
+ mutex_unlock(&pmu->send.mutex);
return 0;
}
@@ -282,7 +282,8 @@ gt215_pmu_fwif[] = {
};
int
-gt215_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+gt215_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_pmu **ppmu)
{
- return nvkm_pmu_new_(gt215_pmu_fwif, device, index, ppmu);
+ return nvkm_pmu_new_(gt215_pmu_fwif, device, type, inst, ppmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
index 276b6d778e53..e7860d177353 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
@@ -62,8 +62,8 @@ int gf100_pmu_nofw(struct nvkm_pmu *, int, const struct nvkm_pmu_fwif *);
int gm200_pmu_nofw(struct nvkm_pmu *, int, const struct nvkm_pmu_fwif *);
int gm20b_pmu_load(struct nvkm_pmu *, int, const struct nvkm_pmu_fwif *);
-int nvkm_pmu_ctor(const struct nvkm_pmu_fwif *, struct nvkm_device *,
- int index, struct nvkm_pmu *);
-int nvkm_pmu_new_(const struct nvkm_pmu_fwif *, struct nvkm_device *,
- int index, struct nvkm_pmu **);
+int nvkm_pmu_ctor(const struct nvkm_pmu_fwif *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ struct nvkm_pmu *);
+int nvkm_pmu_new_(const struct nvkm_pmu_fwif *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ struct nvkm_pmu **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/privring/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/Kbuild
new file mode 100644
index 000000000000..d47d1bdd0f2b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/Kbuild
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: MIT
+nvkm-y += nvkm/subdev/privring/gf100.o
+nvkm-y += nvkm/subdev/privring/gf117.o
+nvkm-y += nvkm/subdev/privring/gk104.o
+nvkm-y += nvkm/subdev/privring/gk20a.o
+nvkm-y += nvkm/subdev/privring/gm200.o
+nvkm-y += nvkm/subdev/privring/gp10b.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/gf100.c
index 1115376bc85f..ef7caca70372 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/gf100.c
@@ -25,39 +25,39 @@
#include <subdev/timer.h>
static void
-gf100_ibus_intr_hub(struct nvkm_subdev *ibus, int i)
+gf100_privring_intr_hub(struct nvkm_subdev *privring, int i)
{
- struct nvkm_device *device = ibus->device;
+ struct nvkm_device *device = privring->device;
u32 addr = nvkm_rd32(device, 0x122120 + (i * 0x0400));
u32 data = nvkm_rd32(device, 0x122124 + (i * 0x0400));
u32 stat = nvkm_rd32(device, 0x122128 + (i * 0x0400));
- nvkm_debug(ibus, "HUB%d: %06x %08x (%08x)\n", i, addr, data, stat);
+ nvkm_debug(privring, "HUB%d: %06x %08x (%08x)\n", i, addr, data, stat);
}
static void
-gf100_ibus_intr_rop(struct nvkm_subdev *ibus, int i)
+gf100_privring_intr_rop(struct nvkm_subdev *privring, int i)
{
- struct nvkm_device *device = ibus->device;
+ struct nvkm_device *device = privring->device;
u32 addr = nvkm_rd32(device, 0x124120 + (i * 0x0400));
u32 data = nvkm_rd32(device, 0x124124 + (i * 0x0400));
u32 stat = nvkm_rd32(device, 0x124128 + (i * 0x0400));
- nvkm_debug(ibus, "ROP%d: %06x %08x (%08x)\n", i, addr, data, stat);
+ nvkm_debug(privring, "ROP%d: %06x %08x (%08x)\n", i, addr, data, stat);
}
static void
-gf100_ibus_intr_gpc(struct nvkm_subdev *ibus, int i)
+gf100_privring_intr_gpc(struct nvkm_subdev *privring, int i)
{
- struct nvkm_device *device = ibus->device;
+ struct nvkm_device *device = privring->device;
u32 addr = nvkm_rd32(device, 0x128120 + (i * 0x0400));
u32 data = nvkm_rd32(device, 0x128124 + (i * 0x0400));
u32 stat = nvkm_rd32(device, 0x128128 + (i * 0x0400));
- nvkm_debug(ibus, "GPC%d: %06x %08x (%08x)\n", i, addr, data, stat);
+ nvkm_debug(privring, "GPC%d: %06x %08x (%08x)\n", i, addr, data, stat);
}
void
-gf100_ibus_intr(struct nvkm_subdev *ibus)
+gf100_privring_intr(struct nvkm_subdev *privring)
{
- struct nvkm_device *device = ibus->device;
+ struct nvkm_device *device = privring->device;
u32 intr0 = nvkm_rd32(device, 0x121c58);
u32 intr1 = nvkm_rd32(device, 0x121c5c);
u32 hubnr = nvkm_rd32(device, 0x121c70);
@@ -68,7 +68,7 @@ gf100_ibus_intr(struct nvkm_subdev *ibus)
for (i = 0; (intr0 & 0x0000ff00) && i < hubnr; i++) {
u32 stat = 0x00000100 << i;
if (intr0 & stat) {
- gf100_ibus_intr_hub(ibus, i);
+ gf100_privring_intr_hub(privring, i);
intr0 &= ~stat;
}
}
@@ -76,7 +76,7 @@ gf100_ibus_intr(struct nvkm_subdev *ibus)
for (i = 0; (intr0 & 0xffff0000) && i < ropnr; i++) {
u32 stat = 0x00010000 << i;
if (intr0 & stat) {
- gf100_ibus_intr_rop(ibus, i);
+ gf100_privring_intr_rop(privring, i);
intr0 &= ~stat;
}
}
@@ -84,7 +84,7 @@ gf100_ibus_intr(struct nvkm_subdev *ibus)
for (i = 0; intr1 && i < gpcnr; i++) {
u32 stat = 0x00000001 << i;
if (intr1 & stat) {
- gf100_ibus_intr_gpc(ibus, i);
+ gf100_privring_intr_gpc(privring, i);
intr1 &= ~stat;
}
}
@@ -97,9 +97,9 @@ gf100_ibus_intr(struct nvkm_subdev *ibus)
}
static int
-gf100_ibus_init(struct nvkm_subdev *ibus)
+gf100_privring_init(struct nvkm_subdev *privring)
{
- struct nvkm_device *device = ibus->device;
+ struct nvkm_device *device = privring->device;
nvkm_mask(device, 0x122310, 0x0003ffff, 0x00000800);
nvkm_wr32(device, 0x12232c, 0x00100064);
nvkm_wr32(device, 0x122330, 0x00100064);
@@ -109,14 +109,14 @@ gf100_ibus_init(struct nvkm_subdev *ibus)
}
static const struct nvkm_subdev_func
-gf100_ibus = {
- .init = gf100_ibus_init,
- .intr = gf100_ibus_intr,
+gf100_privring = {
+ .init = gf100_privring_init,
+ .intr = gf100_privring_intr,
};
int
-gf100_ibus_new(struct nvkm_device *device, int index,
- struct nvkm_subdev **pibus)
+gf100_privring_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_subdev **pprivring)
{
- return nvkm_subdev_new_(&gf100_ibus, device, index, pibus);
+ return nvkm_subdev_new_(&gf100_privring, device, type, inst, pprivring);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf117.c b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/gf117.c
index 1124dadac145..c78721fcd729 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/gf117.c
@@ -24,9 +24,9 @@
#include "priv.h"
static int
-gf117_ibus_init(struct nvkm_subdev *ibus)
+gf117_privring_init(struct nvkm_subdev *privring)
{
- struct nvkm_device *device = ibus->device;
+ struct nvkm_device *device = privring->device;
nvkm_mask(device, 0x122310, 0x0003ffff, 0x00000800);
nvkm_mask(device, 0x122348, 0x0003ffff, 0x00000100);
nvkm_mask(device, 0x1223b0, 0x0003ffff, 0x00000fff);
@@ -34,14 +34,14 @@ gf117_ibus_init(struct nvkm_subdev *ibus)
}
static const struct nvkm_subdev_func
-gf117_ibus = {
- .init = gf117_ibus_init,
- .intr = gf100_ibus_intr,
+gf117_privring = {
+ .init = gf117_privring_init,
+ .intr = gf100_privring_intr,
};
int
-gf117_ibus_new(struct nvkm_device *device, int index,
- struct nvkm_subdev **pibus)
+gf117_privring_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_subdev **pprivring)
{
- return nvkm_subdev_new_(&gf117_ibus, device, index, pibus);
+ return nvkm_subdev_new_(&gf117_privring, device, type, inst, pprivring);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/gk104.c
index 22e487b493ad..568a4c0997bd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/gk104.c
@@ -25,39 +25,39 @@
#include <subdev/timer.h>
static void
-gk104_ibus_intr_hub(struct nvkm_subdev *ibus, int i)
+gk104_privring_intr_hub(struct nvkm_subdev *privring, int i)
{
- struct nvkm_device *device = ibus->device;
+ struct nvkm_device *device = privring->device;
u32 addr = nvkm_rd32(device, 0x122120 + (i * 0x0800));
u32 data = nvkm_rd32(device, 0x122124 + (i * 0x0800));
u32 stat = nvkm_rd32(device, 0x122128 + (i * 0x0800));
- nvkm_debug(ibus, "HUB%d: %06x %08x (%08x)\n", i, addr, data, stat);
+ nvkm_debug(privring, "HUB%d: %06x %08x (%08x)\n", i, addr, data, stat);
}
static void
-gk104_ibus_intr_rop(struct nvkm_subdev *ibus, int i)
+gk104_privring_intr_rop(struct nvkm_subdev *privring, int i)
{
- struct nvkm_device *device = ibus->device;
+ struct nvkm_device *device = privring->device;
u32 addr = nvkm_rd32(device, 0x124120 + (i * 0x0800));
u32 data = nvkm_rd32(device, 0x124124 + (i * 0x0800));
u32 stat = nvkm_rd32(device, 0x124128 + (i * 0x0800));
- nvkm_debug(ibus, "ROP%d: %06x %08x (%08x)\n", i, addr, data, stat);
+ nvkm_debug(privring, "ROP%d: %06x %08x (%08x)\n", i, addr, data, stat);
}
static void
-gk104_ibus_intr_gpc(struct nvkm_subdev *ibus, int i)
+gk104_privring_intr_gpc(struct nvkm_subdev *privring, int i)
{
- struct nvkm_device *device = ibus->device;
+ struct nvkm_device *device = privring->device;
u32 addr = nvkm_rd32(device, 0x128120 + (i * 0x0800));
u32 data = nvkm_rd32(device, 0x128124 + (i * 0x0800));
u32 stat = nvkm_rd32(device, 0x128128 + (i * 0x0800));
- nvkm_debug(ibus, "GPC%d: %06x %08x (%08x)\n", i, addr, data, stat);
+ nvkm_debug(privring, "GPC%d: %06x %08x (%08x)\n", i, addr, data, stat);
}
void
-gk104_ibus_intr(struct nvkm_subdev *ibus)
+gk104_privring_intr(struct nvkm_subdev *privring)
{
- struct nvkm_device *device = ibus->device;
+ struct nvkm_device *device = privring->device;
u32 intr0 = nvkm_rd32(device, 0x120058);
u32 intr1 = nvkm_rd32(device, 0x12005c);
u32 hubnr = nvkm_rd32(device, 0x120070);
@@ -68,7 +68,7 @@ gk104_ibus_intr(struct nvkm_subdev *ibus)
for (i = 0; (intr0 & 0x0000ff00) && i < hubnr; i++) {
u32 stat = 0x00000100 << i;
if (intr0 & stat) {
- gk104_ibus_intr_hub(ibus, i);
+ gk104_privring_intr_hub(privring, i);
intr0 &= ~stat;
}
}
@@ -76,7 +76,7 @@ gk104_ibus_intr(struct nvkm_subdev *ibus)
for (i = 0; (intr0 & 0xffff0000) && i < ropnr; i++) {
u32 stat = 0x00010000 << i;
if (intr0 & stat) {
- gk104_ibus_intr_rop(ibus, i);
+ gk104_privring_intr_rop(privring, i);
intr0 &= ~stat;
}
}
@@ -84,7 +84,7 @@ gk104_ibus_intr(struct nvkm_subdev *ibus)
for (i = 0; intr1 && i < gpcnr; i++) {
u32 stat = 0x00000001 << i;
if (intr1 & stat) {
- gk104_ibus_intr_gpc(ibus, i);
+ gk104_privring_intr_gpc(privring, i);
intr1 &= ~stat;
}
}
@@ -97,9 +97,9 @@ gk104_ibus_intr(struct nvkm_subdev *ibus)
}
static int
-gk104_ibus_init(struct nvkm_subdev *ibus)
+gk104_privring_init(struct nvkm_subdev *privring)
{
- struct nvkm_device *device = ibus->device;
+ struct nvkm_device *device = privring->device;
nvkm_mask(device, 0x122318, 0x0003ffff, 0x00001000);
nvkm_mask(device, 0x12231c, 0x0003ffff, 0x00000200);
nvkm_mask(device, 0x122310, 0x0003ffff, 0x00000800);
@@ -111,15 +111,15 @@ gk104_ibus_init(struct nvkm_subdev *ibus)
}
static const struct nvkm_subdev_func
-gk104_ibus = {
- .preinit = gk104_ibus_init,
- .init = gk104_ibus_init,
- .intr = gk104_ibus_intr,
+gk104_privring = {
+ .preinit = gk104_privring_init,
+ .init = gk104_privring_init,
+ .intr = gk104_privring_intr,
};
int
-gk104_ibus_new(struct nvkm_device *device, int index,
- struct nvkm_subdev **pibus)
+gk104_privring_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_subdev **pprivring)
{
- return nvkm_subdev_new_(&gk104_ibus, device, index, pibus);
+ return nvkm_subdev_new_(&gk104_privring, device, type, inst, pprivring);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/gk20a.c
index 187d544378b0..55e4a60d8770 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/gk20a.c
@@ -19,13 +19,13 @@
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
-#include <subdev/ibus.h>
+#include <subdev/privring.h>
#include <subdev/timer.h>
static void
-gk20a_ibus_init_ibus_ring(struct nvkm_subdev *ibus)
+gk20a_privring_init_privring_ring(struct nvkm_subdev *privring)
{
- struct nvkm_device *device = ibus->device;
+ struct nvkm_device *device = privring->device;
nvkm_mask(device, 0x137250, 0x3f, 0);
nvkm_mask(device, 0x000200, 0x20, 0);
@@ -46,14 +46,14 @@ gk20a_ibus_init_ibus_ring(struct nvkm_subdev *ibus)
}
static void
-gk20a_ibus_intr(struct nvkm_subdev *ibus)
+gk20a_privring_intr(struct nvkm_subdev *privring)
{
- struct nvkm_device *device = ibus->device;
+ struct nvkm_device *device = privring->device;
u32 status0 = nvkm_rd32(device, 0x120058);
if (status0 & 0x7) {
- nvkm_debug(ibus, "resetting ibus ring\n");
- gk20a_ibus_init_ibus_ring(ibus);
+ nvkm_debug(privring, "resetting privring ring\n");
+ gk20a_privring_init_privring_ring(privring);
}
/* Acknowledge interrupt */
@@ -65,21 +65,21 @@ gk20a_ibus_intr(struct nvkm_subdev *ibus)
}
static int
-gk20a_ibus_init(struct nvkm_subdev *ibus)
+gk20a_privring_init(struct nvkm_subdev *privring)
{
- gk20a_ibus_init_ibus_ring(ibus);
+ gk20a_privring_init_privring_ring(privring);
return 0;
}
static const struct nvkm_subdev_func
-gk20a_ibus = {
- .init = gk20a_ibus_init,
- .intr = gk20a_ibus_intr,
+gk20a_privring = {
+ .init = gk20a_privring_init,
+ .intr = gk20a_privring_intr,
};
int
-gk20a_ibus_new(struct nvkm_device *device, int index,
- struct nvkm_subdev **pibus)
+gk20a_privring_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_subdev **pprivring)
{
- return nvkm_subdev_new_(&gk20a_ibus, device, index, pibus);
+ return nvkm_subdev_new_(&gk20a_privring, device, type, inst, pprivring);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/gm200.c
index 0f1f0ad6377e..b4eaf6db36d7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/gm200.c
@@ -24,13 +24,13 @@
#include "priv.h"
static const struct nvkm_subdev_func
-gm200_ibus = {
- .intr = gk104_ibus_intr,
+gm200_privring = {
+ .intr = gk104_privring_intr,
};
int
-gm200_ibus_new(struct nvkm_device *device, int index,
- struct nvkm_subdev **pibus)
+gm200_privring_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_subdev **pprivring)
{
- return nvkm_subdev_new_(&gm200_ibus, device, index, pibus);
+ return nvkm_subdev_new_(&gm200_privring, device, type, inst, pprivring);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/gp10b.c
index 0347b367cefe..4534111cf907 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/gp10b.c
@@ -19,14 +19,14 @@
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
-#include <subdev/ibus.h>
+#include <subdev/privring.h>
#include "priv.h"
static int
-gp10b_ibus_init(struct nvkm_subdev *ibus)
+gp10b_privring_init(struct nvkm_subdev *privring)
{
- struct nvkm_device *device = ibus->device;
+ struct nvkm_device *device = privring->device;
nvkm_wr32(device, 0x1200a8, 0x0);
@@ -42,14 +42,14 @@ gp10b_ibus_init(struct nvkm_subdev *ibus)
}
static const struct nvkm_subdev_func
-gp10b_ibus = {
- .init = gp10b_ibus_init,
- .intr = gk104_ibus_intr,
+gp10b_privring = {
+ .init = gp10b_privring_init,
+ .intr = gk104_privring_intr,
};
int
-gp10b_ibus_new(struct nvkm_device *device, int index,
- struct nvkm_subdev **pibus)
+gp10b_privring_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_subdev **pprivring)
{
- return nvkm_subdev_new_(&gp10b_ibus, device, index, pibus);
+ return nvkm_subdev_new_(&gp10b_privring, device, type, inst, pprivring);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/privring/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/priv.h
new file mode 100644
index 000000000000..b378c14bc8dc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/priv.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVKM_PRIVRING_PRIV_H__
+#define __NVKM_PRIVRING_PRIV_H__
+#include <subdev/privring.h>
+
+void gf100_privring_intr(struct nvkm_subdev *);
+void gk104_privring_intr(struct nvkm_subdev *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
index 4a4d1e224126..fc5ee118e910 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
@@ -421,10 +421,10 @@ nvkm_therm = {
};
void
-nvkm_therm_ctor(struct nvkm_therm *therm, struct nvkm_device *device,
- int index, const struct nvkm_therm_func *func)
+nvkm_therm_ctor(struct nvkm_therm *therm, struct nvkm_device *device, enum nvkm_subdev_type type,
+ int inst, const struct nvkm_therm_func *func)
{
- nvkm_subdev_ctor(&nvkm_therm, device, index, &therm->subdev);
+ nvkm_subdev_ctor(&nvkm_therm, device, type, inst, &therm->subdev);
therm->func = func;
nvkm_alarm_init(&therm->alarm, nvkm_therm_alarm);
@@ -443,13 +443,13 @@ nvkm_therm_ctor(struct nvkm_therm *therm, struct nvkm_device *device,
int
nvkm_therm_new_(const struct nvkm_therm_func *func, struct nvkm_device *device,
- int index, struct nvkm_therm **ptherm)
+ enum nvkm_subdev_type type, int inst, struct nvkm_therm **ptherm)
{
struct nvkm_therm *therm;
if (!(therm = *ptherm = kzalloc(sizeof(*therm), GFP_KERNEL)))
return -ENOMEM;
- nvkm_therm_ctor(therm, device, index, func);
+ nvkm_therm_ctor(therm, device, type, inst, func);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/g84.c
index 96f8da40ac82..4af86f2d3e7e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/g84.c
@@ -223,12 +223,13 @@ g84_therm = {
};
int
-g84_therm_new(struct nvkm_device *device, int index, struct nvkm_therm **ptherm)
+g84_therm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_therm **ptherm)
{
struct nvkm_therm *therm;
int ret;
- ret = nvkm_therm_new_(&g84_therm, device, index, &therm);
+ ret = nvkm_therm_new_(&g84_therm, device, type, inst, &therm);
*ptherm = therm;
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf119.c
index 0981b02790e2..2b031d4eaeb6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf119.c
@@ -146,8 +146,8 @@ gf119_therm = {
};
int
-gf119_therm_new(struct nvkm_device *device, int index,
- struct nvkm_therm **ptherm)
+gf119_therm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_therm **ptherm)
{
- return nvkm_therm_new_(&gf119_therm, device, index, ptherm);
+ return nvkm_therm_new_(&gf119_therm, device, type, inst, ptherm);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.c
index 4e03971d2e3d..45e295c271fb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.c
@@ -35,8 +35,8 @@ gk104_clkgate_enable(struct nvkm_therm *base)
int i;
/* Program ENG_MANT, ENG_FILTER */
- for (i = 0; order[i].engine != NVKM_SUBDEV_NR; i++) {
- if (!nvkm_device_subdev(dev, order[i].engine))
+ for (i = 0; order[i].type != NVKM_SUBDEV_NR; i++) {
+ if (!nvkm_device_subdev(dev, order[i].type, order[i].inst))
continue;
nvkm_mask(dev, 0x20200 + order[i].offset, 0xff00, 0x4500);
@@ -47,8 +47,8 @@ gk104_clkgate_enable(struct nvkm_therm *base)
nvkm_wr32(dev, 0x02028c, therm->idle_filter->hubmmu);
/* Enable clockgating (ENG_CLK = RUN->AUTO) */
- for (i = 0; order[i].engine != NVKM_SUBDEV_NR; i++) {
- if (!nvkm_device_subdev(dev, order[i].engine))
+ for (i = 0; order[i].type != NVKM_SUBDEV_NR; i++) {
+ if (!nvkm_device_subdev(dev, order[i].type, order[i].inst))
continue;
nvkm_mask(dev, 0x20200 + order[i].offset, 0x00ff, 0x0045);
@@ -64,8 +64,8 @@ gk104_clkgate_fini(struct nvkm_therm *base, bool suspend)
int i;
/* ENG_CLK = AUTO->RUN, ENG_PWR = RUN->AUTO */
- for (i = 0; order[i].engine != NVKM_SUBDEV_NR; i++) {
- if (!nvkm_device_subdev(dev, order[i].engine))
+ for (i = 0; order[i].type != NVKM_SUBDEV_NR; i++) {
+ if (!nvkm_device_subdev(dev, order[i].type, order[i].inst))
continue;
nvkm_mask(dev, 0x20200 + order[i].offset, 0xff, 0x54);
@@ -73,15 +73,15 @@ gk104_clkgate_fini(struct nvkm_therm *base, bool suspend)
}
const struct gk104_clkgate_engine_info gk104_clkgate_engine_info[] = {
- { NVKM_ENGINE_GR, 0x00 },
- { NVKM_ENGINE_MSPDEC, 0x04 },
- { NVKM_ENGINE_MSPPP, 0x08 },
- { NVKM_ENGINE_MSVLD, 0x0c },
- { NVKM_ENGINE_CE0, 0x10 },
- { NVKM_ENGINE_CE1, 0x14 },
- { NVKM_ENGINE_MSENC, 0x18 },
- { NVKM_ENGINE_CE2, 0x1c },
- { NVKM_SUBDEV_NR, 0 },
+ { NVKM_ENGINE_GR, 0, 0x00 },
+ { NVKM_ENGINE_MSPDEC, 0, 0x04 },
+ { NVKM_ENGINE_MSPPP, 0, 0x08 },
+ { NVKM_ENGINE_MSVLD, 0, 0x0c },
+ { NVKM_ENGINE_CE, 0, 0x10 },
+ { NVKM_ENGINE_CE, 1, 0x14 },
+ { NVKM_ENGINE_MSENC, 0, 0x18 },
+ { NVKM_ENGINE_CE, 2, 0x1c },
+ { NVKM_SUBDEV_NR },
};
const struct gf100_idle_filter gk104_idle_filter = {
@@ -106,9 +106,8 @@ gk104_therm_func = {
};
static int
-gk104_therm_new_(const struct nvkm_therm_func *func,
- struct nvkm_device *device,
- int index,
+gk104_therm_new_(const struct nvkm_therm_func *func, struct nvkm_device *device,
+ enum nvkm_subdev_type type, int inst,
const struct gk104_clkgate_engine_info *clkgate_order,
const struct gf100_idle_filter *idle_filter,
struct nvkm_therm **ptherm)
@@ -118,19 +117,17 @@ gk104_therm_new_(const struct nvkm_therm_func *func,
if (!therm)
return -ENOMEM;
- nvkm_therm_ctor(&therm->base, device, index, func);
+ nvkm_therm_ctor(&therm->base, device, type, inst, func);
*ptherm = &therm->base;
therm->clkgate_order = clkgate_order;
therm->idle_filter = idle_filter;
-
return 0;
}
int
-gk104_therm_new(struct nvkm_device *device,
- int index, struct nvkm_therm **ptherm)
+gk104_therm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_therm **ptherm)
{
- return gk104_therm_new_(&gk104_therm_func, device, index,
+ return gk104_therm_new_(&gk104_therm_func, device, type, inst,
gk104_clkgate_engine_info, &gk104_idle_filter,
ptherm);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.h b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.h
index 293e7743b19b..9a8641421038 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.h
@@ -31,7 +31,8 @@
#include "gf100.h"
struct gk104_clkgate_engine_info {
- enum nvkm_devidx engine;
+ enum nvkm_subdev_type type;
+ int inst;
u8 offset;
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm107.c
index 86848ece4d89..c845fd392f58 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm107.c
@@ -68,8 +68,8 @@ gm107_therm = {
};
int
-gm107_therm_new(struct nvkm_device *device, int index,
+gm107_therm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_therm **ptherm)
{
- return nvkm_therm_new_(&gm107_therm, device, index, ptherm);
+ return nvkm_therm_new_(&gm107_therm, device, type, inst, ptherm);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm200.c
index 73dc78093d5d..e0cdd12463ec 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm200.c
@@ -32,8 +32,8 @@ gm200_therm = {
};
int
-gm200_therm_new(struct nvkm_device *device, int index,
+gm200_therm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_therm **ptherm)
{
- return nvkm_therm_new_(&gm200_therm, device, index, ptherm);
+ return nvkm_therm_new_(&gm200_therm, device, type, inst, ptherm);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gp100.c
index 9f0dea3f61dc..44f021392b95 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gp100.c
@@ -49,8 +49,8 @@ gp100_therm = {
};
int
-gp100_therm_new(struct nvkm_device *device, int index,
+gp100_therm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_therm **ptherm)
{
- return nvkm_therm_new_(&gp100_therm, device, index, ptherm);
+ return nvkm_therm_new_(&gp100_therm, device, type, inst, ptherm);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gt215.c
index c08097f2aff5..9e451bd9395c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gt215.c
@@ -68,8 +68,8 @@ gt215_therm = {
};
int
-gt215_therm_new(struct nvkm_device *device, int index,
- struct nvkm_therm **ptherm)
+gt215_therm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_therm **ptherm)
{
- return nvkm_therm_new_(&gt215_therm, device, index, ptherm);
+ return nvkm_therm_new_(&gt215_therm, device, type, inst, ptherm);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv40.c
index 2c92ffb5f9d0..c13fee9734df 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv40.c
@@ -197,8 +197,8 @@ nv40_therm = {
};
int
-nv40_therm_new(struct nvkm_device *device, int index,
+nv40_therm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_therm **ptherm)
{
- return nvkm_therm_new_(&nv40_therm, device, index, ptherm);
+ return nvkm_therm_new_(&nv40_therm, device, type, inst, ptherm);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv50.c
index 9b57b433d4cf..9cf16a75a3cd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv50.c
@@ -169,8 +169,8 @@ nv50_therm = {
};
int
-nv50_therm_new(struct nvkm_device *device, int index,
+nv50_therm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_therm **ptherm)
{
- return nvkm_therm_new_(&nv50_therm, device, index, ptherm);
+ return nvkm_therm_new_(&nv50_therm, device, type, inst, ptherm);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h
index 21659daf1864..54e960589411 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h
@@ -30,10 +30,10 @@
#include <subdev/bios/gpio.h>
#include <subdev/bios/perf.h>
-int nvkm_therm_new_(const struct nvkm_therm_func *, struct nvkm_device *,
- int index, struct nvkm_therm **);
-void nvkm_therm_ctor(struct nvkm_therm *therm, struct nvkm_device *device,
- int index, const struct nvkm_therm_func *func);
+int nvkm_therm_new_(const struct nvkm_therm_func *, struct nvkm_device *, enum nvkm_subdev_type,
+ int, struct nvkm_therm **);
+void nvkm_therm_ctor(struct nvkm_therm *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ const struct nvkm_therm_func *);
struct nvkm_fan {
struct nvkm_therm *parent;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
index dd922033628c..8b0da0c06268 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
@@ -183,14 +183,14 @@ nvkm_timer = {
int
nvkm_timer_new_(const struct nvkm_timer_func *func, struct nvkm_device *device,
- int index, struct nvkm_timer **ptmr)
+ enum nvkm_subdev_type type, int inst, struct nvkm_timer **ptmr)
{
struct nvkm_timer *tmr;
if (!(tmr = *ptmr = kzalloc(sizeof(*tmr), GFP_KERNEL)))
return -ENOMEM;
- nvkm_subdev_ctor(&nvkm_timer, device, index, &tmr->subdev);
+ nvkm_subdev_ctor(&nvkm_timer, device, type, inst, &tmr->subdev);
tmr->func = func;
INIT_LIST_HEAD(&tmr->alarms);
spin_lock_init(&tmr->lock);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/gk20a.c
index 9ed5f64912d0..73c3776b6b83 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/gk20a.c
@@ -33,7 +33,8 @@ gk20a_timer = {
};
int
-gk20a_timer_new(struct nvkm_device *device, int index, struct nvkm_timer **ptmr)
+gk20a_timer_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_timer **ptmr)
{
- return nvkm_timer_new_(&gk20a_timer, device, index, ptmr);
+ return nvkm_timer_new_(&gk20a_timer, device, type, inst, ptmr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c
index 7f48249f41de..0058e856b378 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c
@@ -145,7 +145,8 @@ nv04_timer = {
};
int
-nv04_timer_new(struct nvkm_device *device, int index, struct nvkm_timer **ptmr)
+nv04_timer_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_timer **ptmr)
{
- return nvkm_timer_new_(&nv04_timer, device, index, ptmr);
+ return nvkm_timer_new_(&nv04_timer, device, type, inst, ptmr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv40.c
index bb99a152f26e..7e1f8c22f2a8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv40.c
@@ -82,7 +82,8 @@ nv40_timer = {
};
int
-nv40_timer_new(struct nvkm_device *device, int index, struct nvkm_timer **ptmr)
+nv40_timer_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_timer **ptmr)
{
- return nvkm_timer_new_(&nv40_timer, device, index, ptmr);
+ return nvkm_timer_new_(&nv40_timer, device, type, inst, ptmr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv41.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv41.c
index 3cf9ec1b1b57..c2b263721f10 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv41.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv41.c
@@ -79,7 +79,8 @@ nv41_timer = {
};
int
-nv41_timer_new(struct nvkm_device *device, int index, struct nvkm_timer **ptmr)
+nv41_timer_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_timer **ptmr)
{
- return nvkm_timer_new_(&nv41_timer, device, index, ptmr);
+ return nvkm_timer_new_(&nv41_timer, device, type, inst, ptmr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/priv.h
index 89e97294b182..e6debe7e2fa9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/priv.h
@@ -4,8 +4,8 @@
#define nvkm_timer(p) container_of((p), struct nvkm_timer, subdev)
#include <subdev/timer.h>
-int nvkm_timer_new_(const struct nvkm_timer_func *, struct nvkm_device *,
- int index, struct nvkm_timer **);
+int nvkm_timer_new_(const struct nvkm_timer_func *, struct nvkm_device *, enum nvkm_subdev_type,
+ int, struct nvkm_timer **);
struct nvkm_timer_func {
void (*init)(struct nvkm_timer *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/top/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/top/Kbuild
index 438d9d78ab52..d5db845195dc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/top/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/top/Kbuild
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: MIT
nvkm-y += nvkm/subdev/top/base.o
nvkm-y += nvkm/subdev/top/gk104.o
+nvkm-y += nvkm/subdev/top/ga100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c
index cce6e4e90ebf..28d0789f50fe 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c
@@ -28,7 +28,8 @@ nvkm_top_device_new(struct nvkm_top *top)
{
struct nvkm_top_device *info = kmalloc(sizeof(*info), GFP_KERNEL);
if (info) {
- info->index = NVKM_SUBDEV_NR;
+ info->type = NVKM_SUBDEV_NR;
+ info->inst = -1;
info->addr = 0;
info->fault = -1;
info->engine = -1;
@@ -41,14 +42,14 @@ nvkm_top_device_new(struct nvkm_top *top)
}
u32
-nvkm_top_addr(struct nvkm_device *device, enum nvkm_devidx index)
+nvkm_top_addr(struct nvkm_device *device, enum nvkm_subdev_type type, int inst)
{
struct nvkm_top *top = device->top;
struct nvkm_top_device *info;
if (top) {
list_for_each_entry(info, &top->device, head) {
- if (info->index == index)
+ if (info->type == type && info->inst == inst)
return info->addr;
}
}
@@ -57,14 +58,14 @@ nvkm_top_addr(struct nvkm_device *device, enum nvkm_devidx index)
}
u32
-nvkm_top_reset(struct nvkm_device *device, enum nvkm_devidx index)
+nvkm_top_reset(struct nvkm_device *device, enum nvkm_subdev_type type, int inst)
{
struct nvkm_top *top = device->top;
struct nvkm_top_device *info;
if (top) {
list_for_each_entry(info, &top->device, head) {
- if (info->index == index && info->reset >= 0)
+ if (info->type == type && info->inst == inst && info->reset >= 0)
return BIT(info->reset);
}
}
@@ -73,14 +74,14 @@ nvkm_top_reset(struct nvkm_device *device, enum nvkm_devidx index)
}
u32
-nvkm_top_intr_mask(struct nvkm_device *device, enum nvkm_devidx devidx)
+nvkm_top_intr_mask(struct nvkm_device *device, enum nvkm_subdev_type type, int inst)
{
struct nvkm_top *top = device->top;
struct nvkm_top_device *info;
if (top) {
list_for_each_entry(info, &top->device, head) {
- if (info->index == devidx && info->intr >= 0)
+ if (info->type == type && info->inst == inst && info->intr >= 0)
return BIT(info->intr);
}
}
@@ -88,44 +89,21 @@ nvkm_top_intr_mask(struct nvkm_device *device, enum nvkm_devidx devidx)
return 0;
}
-u32
-nvkm_top_intr(struct nvkm_device *device, u32 intr, u64 *psubdevs)
-{
- struct nvkm_top *top = device->top;
- struct nvkm_top_device *info;
- u64 subdevs = 0;
- u32 handled = 0;
-
- if (top) {
- list_for_each_entry(info, &top->device, head) {
- if (info->index != NVKM_SUBDEV_NR && info->intr >= 0) {
- if (intr & BIT(info->intr)) {
- subdevs |= BIT_ULL(info->index);
- handled |= BIT(info->intr);
- }
- }
- }
- }
-
- *psubdevs = subdevs;
- return intr & ~handled;
-}
-
int
-nvkm_top_fault_id(struct nvkm_device *device, enum nvkm_devidx devidx)
+nvkm_top_fault_id(struct nvkm_device *device, enum nvkm_subdev_type type, int inst)
{
struct nvkm_top *top = device->top;
struct nvkm_top_device *info;
list_for_each_entry(info, &top->device, head) {
- if (info->index == devidx && info->fault >= 0)
+ if (info->type == type && info->inst == inst && info->fault >= 0)
return info->fault;
}
return -ENOENT;
}
-enum nvkm_devidx
+struct nvkm_subdev *
nvkm_top_fault(struct nvkm_device *device, int fault)
{
struct nvkm_top *top = device->top;
@@ -133,28 +111,10 @@ nvkm_top_fault(struct nvkm_device *device, int fault)
list_for_each_entry(info, &top->device, head) {
if (info->fault == fault)
- return info->index;
- }
-
- return NVKM_SUBDEV_NR;
-}
-
-enum nvkm_devidx
-nvkm_top_engine(struct nvkm_device *device, int index, int *runl, int *engn)
-{
- struct nvkm_top *top = device->top;
- struct nvkm_top_device *info;
- int n = 0;
-
- list_for_each_entry(info, &top->device, head) {
- if (info->engine >= 0 && info->runlist >= 0 && n++ == index) {
- *runl = info->runlist;
- *engn = info->engine;
- return info->index;
- }
+ return nvkm_device_subdev(device, info->type, info->inst);
}
- return -ENODEV;
+ return NULL;
}
static int
@@ -186,12 +146,12 @@ nvkm_top = {
int
nvkm_top_new_(const struct nvkm_top_func *func, struct nvkm_device *device,
- int index, struct nvkm_top **ptop)
+ enum nvkm_subdev_type type, int inst, struct nvkm_top **ptop)
{
struct nvkm_top *top;
if (!(top = *ptop = kzalloc(sizeof(*top), GFP_KERNEL)))
return -ENOMEM;
- nvkm_subdev_ctor(&nvkm_top, device, index, &top->subdev);
+ nvkm_subdev_ctor(&nvkm_top, device, type, inst, &top->subdev);
top->func = func;
INIT_LIST_HEAD(&top->device);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/top/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/top/ga100.c
new file mode 100644
index 000000000000..31933f3e5a07
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/top/ga100.c
@@ -0,0 +1,107 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+static int
+ga100_top_oneinit(struct nvkm_top *top)
+{
+ struct nvkm_subdev *subdev = &top->subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_top_device *info = NULL;
+ u32 data, type, inst;
+ int i, n, size = nvkm_rd32(device, 0x0224fc) >> 20;
+
+ for (i = 0, n = 0; i < size; i++) {
+ if (!info) {
+ if (!(info = nvkm_top_device_new(top)))
+ return -ENOMEM;
+ type = ~0;
+ inst = 0;
+ }
+
+ data = nvkm_rd32(device, 0x022800 + (i * 0x04));
+ nvkm_trace(subdev, "%02x: %08x\n", i, data);
+ if (!data && n == 0)
+ continue;
+
+ switch (n++) {
+ case 0:
+ type = (data & 0x3f000000) >> 24;
+ inst = (data & 0x000f0000) >> 16;
+ info->fault = (data & 0x0000007f);
+ break;
+ case 1:
+ info->addr = (data & 0x00fff000);
+ info->reset = (data & 0x0000001f);
+ break;
+ case 2:
+ info->runlist = (data & 0x0000fc00) >> 10;
+ info->engine = (data & 0x00000003);
+ break;
+ default:
+ break;
+ }
+
+ if (data & 0x80000000)
+ continue;
+ n = 0;
+
+ /* Translate engine type to NVKM engine identifier. */
+#define I_(T,I) do { info->type = (T); info->inst = (I); } while(0)
+#define O_(T,I) do { WARN_ON(inst); I_(T, I); } while (0)
+ switch (type) {
+ case 0x00000000: O_(NVKM_ENGINE_GR , 0); break;
+ case 0x0000000d: O_(NVKM_ENGINE_SEC2 , 0); break;
+ case 0x0000000e: I_(NVKM_ENGINE_NVENC , inst); break;
+ case 0x00000010: I_(NVKM_ENGINE_NVDEC , inst); break;
+ case 0x00000012: I_(NVKM_SUBDEV_IOCTRL, inst); break;
+ case 0x00000013: I_(NVKM_ENGINE_CE , inst); break;
+ case 0x00000014: O_(NVKM_SUBDEV_GSP , 0); break;
+ case 0x00000015: O_(NVKM_ENGINE_NVJPG , 0); break;
+ case 0x00000016: O_(NVKM_ENGINE_OFA , 0); break;
+ case 0x00000017: O_(NVKM_SUBDEV_FLA , 0); break;
+ break;
+ default:
+ break;
+ }
+
+ nvkm_debug(subdev, "%02x.%d (%8s): addr %06x fault %2d "
+ "runlist %2d engine %2d reset %2d\n", type, inst,
+ info->type == NVKM_SUBDEV_NR ? "????????" : nvkm_subdev_type[info->type],
+ info->addr, info->fault, info->runlist, info->engine, info->reset);
+ info = NULL;
+ }
+
+ return 0;
+}
+
+static const struct nvkm_top_func
+ga100_top = {
+ .oneinit = ga100_top_oneinit,
+};
+
+int
+ga100_top_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_top **ptop)
+{
+ return nvkm_top_new_(&ga100_top, device, type, inst, ptop);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c
index 1156634533f9..4dcad97bd505 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c
@@ -70,26 +70,26 @@ gk104_top_oneinit(struct nvkm_top *top)
continue;
/* Translate engine type to NVKM engine identifier. */
-#define A_(A) if (inst == 0) info->index = NVKM_ENGINE_##A
-#define B_(A) if (inst + NVKM_ENGINE_##A##0 < NVKM_ENGINE_##A##_LAST + 1) \
- info->index = NVKM_ENGINE_##A##0 + inst
-#define C_(A) if (inst == 0) info->index = NVKM_SUBDEV_##A
+#define I_(T,I) do { info->type = (T); info->inst = (I); } while(0)
+#define O_(T,I) do { WARN_ON(inst); I_(T, I); } while (0)
switch (type) {
- case 0x00000000: A_(GR ); break;
- case 0x00000001: A_(CE0 ); break;
- case 0x00000002: A_(CE1 ); break;
- case 0x00000003: A_(CE2 ); break;
- case 0x00000008: A_(MSPDEC); break;
- case 0x00000009: A_(MSPPP ); break;
- case 0x0000000a: A_(MSVLD ); break;
- case 0x0000000b: A_(MSENC ); break;
- case 0x0000000c: A_(VIC ); break;
- case 0x0000000d: A_(SEC2 ); break;
- case 0x0000000e: B_(NVENC ); break;
- case 0x0000000f: A_(NVENC1); break;
- case 0x00000010: B_(NVDEC ); break;
- case 0x00000013: B_(CE ); break;
- case 0x00000014: C_(GSP ); break;
+ case 0x00000000: O_(NVKM_ENGINE_GR , 0); break;
+ case 0x00000001: O_(NVKM_ENGINE_CE , 0); break;
+ case 0x00000002: O_(NVKM_ENGINE_CE , 1); break;
+ case 0x00000003: O_(NVKM_ENGINE_CE , 2); break;
+ case 0x00000008: O_(NVKM_ENGINE_MSPDEC, 0); break;
+ case 0x00000009: O_(NVKM_ENGINE_MSPPP , 0); break;
+ case 0x0000000a: O_(NVKM_ENGINE_MSVLD , 0); break;
+ case 0x0000000b: O_(NVKM_ENGINE_MSENC , 0); break;
+ case 0x0000000c: O_(NVKM_ENGINE_VIC , 0); break;
+ case 0x0000000d: O_(NVKM_ENGINE_SEC2 , 0); break;
+ case 0x0000000e: I_(NVKM_ENGINE_NVENC , inst); break;
+ case 0x0000000f: O_(NVKM_ENGINE_NVENC , 1); break;
+ case 0x00000010: I_(NVKM_ENGINE_NVDEC , inst); break;
+ case 0x00000012: I_(NVKM_SUBDEV_IOCTRL, inst); break;
+ case 0x00000013: I_(NVKM_ENGINE_CE , inst); break;
+ case 0x00000014: O_(NVKM_SUBDEV_GSP , 0); break;
+ case 0x00000015: O_(NVKM_ENGINE_NVJPG , 0); break;
default:
break;
}
@@ -97,8 +97,7 @@ gk104_top_oneinit(struct nvkm_top *top)
nvkm_debug(subdev, "%02x.%d (%8s): addr %06x fault %2d "
"engine %2d runlist %2d intr %2d "
"reset %2d\n", type, inst,
- info->index == NVKM_SUBDEV_NR ? NULL :
- nvkm_subdev_name[info->index],
+ info->type == NVKM_SUBDEV_NR ? "????????" : nvkm_subdev_type[info->type],
info->addr, info->fault, info->engine, info->runlist,
info->intr, info->reset);
info = NULL;
@@ -113,7 +112,8 @@ gk104_top = {
};
int
-gk104_top_new(struct nvkm_device *device, int index, struct nvkm_top **ptop)
+gk104_top_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_top **ptop)
{
- return nvkm_top_new_(&gk104_top, device, index, ptop);
+ return nvkm_top_new_(&gk104_top, device, type, inst, ptop);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/top/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/top/priv.h
index a16baa2941cf..8e103a836705 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/top/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/top/priv.h
@@ -8,19 +8,8 @@ struct nvkm_top_func {
int (*oneinit)(struct nvkm_top *);
};
-int nvkm_top_new_(const struct nvkm_top_func *, struct nvkm_device *,
- int, struct nvkm_top **);
-
-struct nvkm_top_device {
- enum nvkm_devidx index;
- u32 addr;
- int fault;
- int engine;
- int runlist;
- int reset;
- int intr;
- struct list_head head;
-};
+int nvkm_top_new_(const struct nvkm_top_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ struct nvkm_top **);
struct nvkm_top_device *nvkm_top_device_new(struct nvkm_top *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c
index e344901cfdc7..a17a6dd8d3de 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c
@@ -281,12 +281,12 @@ nvkm_volt = {
void
nvkm_volt_ctor(const struct nvkm_volt_func *func, struct nvkm_device *device,
- int index, struct nvkm_volt *volt)
+ enum nvkm_subdev_type type, int inst, struct nvkm_volt *volt)
{
struct nvkm_bios *bios = device->bios;
int i;
- nvkm_subdev_ctor(&nvkm_volt, device, index, &volt->subdev);
+ nvkm_subdev_ctor(&nvkm_volt, device, type, inst, &volt->subdev);
volt->func = func;
/* Assuming the non-bios device should build the voltage table later */
@@ -319,10 +319,10 @@ nvkm_volt_ctor(const struct nvkm_volt_func *func, struct nvkm_device *device,
int
nvkm_volt_new_(const struct nvkm_volt_func *func, struct nvkm_device *device,
- int index, struct nvkm_volt **pvolt)
+ enum nvkm_subdev_type type, int inst, struct nvkm_volt **pvolt)
{
if (!(*pvolt = kzalloc(sizeof(**pvolt), GFP_KERNEL)))
return -ENOMEM;
- nvkm_volt_ctor(func, device, index, *pvolt);
+ nvkm_volt_ctor(func, device, type, inst, *pvolt);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf100.c
index d9ed6925ca64..b47a1c0817be 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf100.c
@@ -56,12 +56,13 @@ gf100_volt = {
};
int
-gf100_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
+gf100_volt_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_volt **pvolt)
{
struct nvkm_volt *volt;
int ret;
- ret = nvkm_volt_new_(&gf100_volt, device, index, &volt);
+ ret = nvkm_volt_new_(&gf100_volt, device, type, inst, &volt);
*pvolt = volt;
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf117.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf117.c
index 547a58f0aeac..03c8a2c2916c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf117.c
@@ -46,12 +46,13 @@ gf117_volt = {
};
int
-gf117_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
+gf117_volt_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_volt **pvolt)
{
struct nvkm_volt *volt;
int ret;
- ret = nvkm_volt_new_(&gf117_volt, device, index, &volt);
+ ret = nvkm_volt_new_(&gf117_volt, device, type, inst, &volt);
*pvolt = volt;
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c
index 1c744e029454..d1ce4309cfb8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c
@@ -95,7 +95,8 @@ gk104_volt_pwm = {
};
int
-gk104_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
+gk104_volt_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_volt **pvolt)
{
const struct nvkm_volt_func *volt_func = &gk104_volt_gpio;
struct dcb_gpio_func gpio;
@@ -114,7 +115,7 @@ gk104_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
if (!(volt = kzalloc(sizeof(*volt), GFP_KERNEL)))
return -ENOMEM;
- nvkm_volt_ctor(volt_func, device, index, &volt->base);
+ nvkm_volt_ctor(volt_func, device, type, inst, &volt->base);
*pvolt = &volt->base;
volt->bios = bios;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c
index ce5d83cdc7cf..8c2faa964511 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c
@@ -144,14 +144,14 @@ gk20a_volt = {
};
int
-gk20a_volt_ctor(struct nvkm_device *device, int index,
+gk20a_volt_ctor(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
const struct cvb_coef *coefs, int nb_coefs,
int vmin, struct gk20a_volt *volt)
{
struct nvkm_device_tegra *tdev = device->func->tegra(device);
int i, uv;
- nvkm_volt_ctor(&gk20a_volt, device, index, &volt->base);
+ nvkm_volt_ctor(&gk20a_volt, device, type, inst, &volt->base);
uv = regulator_get_voltage(tdev->vdd);
nvkm_debug(&volt->base.subdev, "the default voltage is %duV\n", uv);
@@ -172,7 +172,7 @@ gk20a_volt_ctor(struct nvkm_device *device, int index,
}
int
-gk20a_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
+gk20a_volt_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_volt **pvolt)
{
struct gk20a_volt *volt;
@@ -181,6 +181,6 @@ gk20a_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
return -ENOMEM;
*pvolt = &volt->base;
- return gk20a_volt_ctor(device, index, gk20a_cvb_coef,
+ return gk20a_volt_ctor(device, type, inst, gk20a_cvb_coef,
ARRAY_SIZE(gk20a_cvb_coef), 0, volt);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.h b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.h
index 6a6c97f9684e..01f8a5fcf496 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.h
@@ -37,7 +37,7 @@ struct gk20a_volt {
struct regulator *vdd;
};
-int gk20a_volt_ctor(struct nvkm_device *device, int index,
+int gk20a_volt_ctor(struct nvkm_device *device, enum nvkm_subdev_type, int,
const struct cvb_coef *coefs, int nb_coefs,
int vmin, struct gk20a_volt *volt);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c
index 2925b9cae681..c2e9694d333f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c
@@ -64,7 +64,8 @@ static const u32 speedo_to_vmin[] = {
};
int
-gm20b_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
+gm20b_volt_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_volt **pvolt)
{
struct nvkm_device_tegra *tdev = device->func->tegra(device);
struct gk20a_volt *volt;
@@ -84,9 +85,9 @@ gm20b_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
vmin = speedo_to_vmin[tdev->gpu_speedo_id];
if (tdev->gpu_speedo_id >= 1)
- return gk20a_volt_ctor(device, index, gm20b_na_cvb_coef,
- ARRAY_SIZE(gm20b_na_cvb_coef), vmin, volt);
+ return gk20a_volt_ctor(device, type, inst, gm20b_na_cvb_coef,
+ ARRAY_SIZE(gm20b_na_cvb_coef), vmin, volt);
else
- return gk20a_volt_ctor(device, index, gm20b_cvb_coef,
- ARRAY_SIZE(gm20b_cvb_coef), vmin, volt);
+ return gk20a_volt_ctor(device, type, inst, gm20b_cvb_coef,
+ ARRAY_SIZE(gm20b_cvb_coef), vmin, volt);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/nv40.c
index 23409387abb5..d6a587d6082d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/nv40.c
@@ -30,12 +30,13 @@ nv40_volt = {
};
int
-nv40_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
+nv40_volt_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_volt **pvolt)
{
struct nvkm_volt *volt;
int ret;
- ret = nvkm_volt_new_(&nv40_volt, device, index, &volt);
+ ret = nvkm_volt_new_(&nv40_volt, device, type, inst, &volt);
*pvolt = volt;
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h
index 75f13a34671f..24e2d16d1913 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h
@@ -4,10 +4,10 @@
#define nvkm_volt(p) container_of((p), struct nvkm_volt, subdev)
#include <subdev/volt.h>
-void nvkm_volt_ctor(const struct nvkm_volt_func *, struct nvkm_device *,
- int index, struct nvkm_volt *);
-int nvkm_volt_new_(const struct nvkm_volt_func *, struct nvkm_device *,
- int index, struct nvkm_volt **);
+void nvkm_volt_ctor(const struct nvkm_volt_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ struct nvkm_volt *);
+int nvkm_volt_new_(const struct nvkm_volt_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ struct nvkm_volt **);
struct nvkm_volt_func {
int (*oneinit)(struct nvkm_volt *);
diff --git a/drivers/gpu/drm/omapdrm/Kconfig b/drivers/gpu/drm/omapdrm/Kconfig
index 5417e7a47072..e7281da5bc6a 100644
--- a/drivers/gpu/drm/omapdrm/Kconfig
+++ b/drivers/gpu/drm/omapdrm/Kconfig
@@ -5,13 +5,129 @@ config DRM_OMAP
depends on ARCH_OMAP2PLUS || ARCH_MULTIPLATFORM
select OMAP2_DSS
select DRM_KMS_HELPER
+ select VIDEOMODE_HELPERS
+ select HDMI
default n
help
DRM display driver for OMAP2/3/4 based boards.
if DRM_OMAP
-source "drivers/gpu/drm/omapdrm/dss/Kconfig"
-source "drivers/gpu/drm/omapdrm/displays/Kconfig"
+config OMAP2_DSS_DEBUG
+ bool "Debug support"
+ default n
+ help
+ This enables printing of debug messages. Alternatively, debug messages
+ can also be enabled by setting CONFIG_DYNAMIC_DEBUG and then setting
+ appropriate flags in <debugfs>/dynamic_debug/control.
+
+config OMAP2_DSS_DEBUGFS
+ bool "Debugfs filesystem support"
+ depends on DEBUG_FS
+ default n
+ help
+ This enables debugfs for OMAPDSS at <debugfs>/omapdss. This enables
+ querying about clock configuration and register configuration of dss,
+ dispc, dsi, hdmi and rfbi.
+
+config OMAP2_DSS_COLLECT_IRQ_STATS
+ bool "Collect DSS IRQ statistics"
+ depends on OMAP2_DSS_DEBUGFS
+ default n
+ help
+ Collect DSS IRQ statistics, printable via debugfs.
+
+ The statistics can be found from
+ <debugfs>/omapdss/dispc_irq for DISPC interrupts, and
+ <debugfs>/omapdss/dsi_irq for DSI interrupts.
+
+config OMAP2_DSS_DPI
+ bool "DPI support"
+ default y
+ help
+ DPI Interface. This is the Parallel Display Interface.
+
+config OMAP2_DSS_VENC
+ bool "VENC support"
+ default y
+ help
+ OMAP Video Encoder support for S-Video and composite TV-out.
+
+config OMAP2_DSS_HDMI_COMMON
+ bool
+
+config OMAP4_DSS_HDMI
+ bool "HDMI support for OMAP4"
+ default y
+ select OMAP2_DSS_HDMI_COMMON
+ help
+ HDMI support for OMAP4 based SoCs.
+
+config OMAP4_DSS_HDMI_CEC
+ bool "Enable HDMI CEC support for OMAP4"
+ depends on OMAP4_DSS_HDMI
+ select CEC_CORE
+ default y
+ help
+ When selected the HDMI transmitter will support the CEC feature.
+
+config OMAP5_DSS_HDMI
+ bool "HDMI support for OMAP5"
+ default n
+ select OMAP2_DSS_HDMI_COMMON
+ help
+ HDMI Interface for OMAP5 and similar cores. This adds the High
+ Definition Multimedia Interface. See http://www.hdmi.org/ for HDMI
+ specification.
+
+config OMAP2_DSS_SDI
+ bool "SDI support"
+ default n
+ help
+ SDI (Serial Display Interface) support.
+
+ SDI is a high speed one-way display serial bus between the host
+ processor and a display.
+
+config OMAP2_DSS_DSI
+ bool "DSI support"
+ default n
+ select DRM_MIPI_DSI
+ help
+ MIPI DSI (Display Serial Interface) support.
+
+ DSI is a high speed half-duplex serial interface between the host
+ processor and a peripheral, such as a display or a framebuffer chip.
+
+ See http://www.mipi.org/ for DSI specifications.
+
+config OMAP2_DSS_MIN_FCK_PER_PCK
+ int "Minimum FCK/PCK ratio (for scaling)"
+ range 0 32
+ default 0
+ help
+ This can be used to adjust the minimum FCK/PCK ratio.
+
+ With this you can make sure that DISPC FCK is at least
+ n x PCK. Video plane scaling requires higher FCK than
+ normally.
+
+ If this is set to 0, there's no extra constraint on the
+ DISPC FCK. However, the FCK will at minimum be
+ 2xPCK (if active matrix) or 3xPCK (if passive matrix).
+
+ Max FCK is 173MHz, so this doesn't work if your PCK
+ is very high.
+
+config OMAP2_DSS_SLEEP_AFTER_VENC_RESET
+ bool "Sleep 20ms after VENC reset"
+ default y
+ help
+ There is a 20ms sleep after VENC reset which seemed to fix the
+ reset. The reason for the bug is unclear, and it's also unclear
+ on what platforms this happens.
+
+ This option enables the sleep, and is enabled by default. You can
+ disable the sleep if it doesn't cause problems on your platform.
endif
diff --git a/drivers/gpu/drm/omapdrm/Makefile b/drivers/gpu/drm/omapdrm/Makefile
index f115253115c5..21e8277ff88f 100644
--- a/drivers/gpu/drm/omapdrm/Makefile
+++ b/drivers/gpu/drm/omapdrm/Makefile
@@ -4,16 +4,12 @@
# Direct Rendering Infrastructure (DRI)
#
-obj-y += dss/
-obj-y += displays/
-
omapdrm-y := omap_drv.o \
omap_irq.o \
omap_debugfs.o \
omap_crtc.o \
omap_plane.o \
omap_encoder.o \
- omap_connector.o \
omap_fb.o \
omap_gem.o \
omap_gem_dmabuf.o \
@@ -22,4 +18,17 @@ omapdrm-y := omap_drv.o \
omapdrm-$(CONFIG_DRM_FBDEV_EMULATION) += omap_fbdev.o
-obj-$(CONFIG_DRM_OMAP) += omapdrm.o
+omapdrm-y += dss/base.o dss/output.o dss/dss.o dss/dispc.o \
+ dss/dispc_coefs.o dss/pll.o dss/video-pll.o
+omapdrm-$(CONFIG_OMAP2_DSS_DPI) += dss/dpi.o
+omapdrm-$(CONFIG_OMAP2_DSS_VENC) += dss/venc.o
+omapdrm-$(CONFIG_OMAP2_DSS_SDI) += dss/sdi.o
+omapdrm-$(CONFIG_OMAP2_DSS_DSI) += dss/dsi.o
+omapdrm-$(CONFIG_OMAP2_DSS_HDMI_COMMON) += dss/hdmi_common.o dss/hdmi_wp.o \
+ dss/hdmi_pll.o dss/hdmi_phy.o
+omapdrm-$(CONFIG_OMAP4_DSS_HDMI) += dss/hdmi4.o dss/hdmi4_core.o
+omapdrm-$(CONFIG_OMAP4_DSS_HDMI_CEC) += dss/hdmi4_cec.o
+omapdrm-$(CONFIG_OMAP5_DSS_HDMI) += dss/hdmi5.o dss/hdmi5_core.o
+ccflags-$(CONFIG_OMAP2_DSS_DEBUG) += -DDEBUG
+
+obj-$(CONFIG_DRM_OMAP) += omapdrm.o
diff --git a/drivers/gpu/drm/omapdrm/displays/Kconfig b/drivers/gpu/drm/omapdrm/displays/Kconfig
deleted file mode 100644
index f2be594c7eff..000000000000
--- a/drivers/gpu/drm/omapdrm/displays/Kconfig
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-menu "OMAPDRM External Display Device Drivers"
-
-config DRM_OMAP_PANEL_DSI_CM
- tristate "Generic DSI Command Mode Panel"
- depends on BACKLIGHT_CLASS_DEVICE
- help
- Driver for generic DSI command mode panels.
-
-endmenu
diff --git a/drivers/gpu/drm/omapdrm/displays/Makefile b/drivers/gpu/drm/omapdrm/displays/Makefile
deleted file mode 100644
index 488ddf153613..000000000000
--- a/drivers/gpu/drm/omapdrm/displays/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_DRM_OMAP_PANEL_DSI_CM) += panel-dsi-cm.o
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
deleted file mode 100644
index e39ce0c0c9a9..000000000000
--- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
+++ /dev/null
@@ -1,1385 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Generic DSI Command Mode panel driver
- *
- * Copyright (C) 2013 Texas Instruments Incorporated - https://www.ti.com/
- * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
- */
-
-/* #define DEBUG */
-
-#include <linux/backlight.h>
-#include <linux/delay.h>
-#include <linux/gpio/consumer.h>
-#include <linux/interrupt.h>
-#include <linux/jiffies.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/sched/signal.h>
-#include <linux/slab.h>
-#include <linux/workqueue.h>
-#include <linux/of_device.h>
-#include <linux/regulator/consumer.h>
-
-#include <drm/drm_connector.h>
-
-#include <video/mipi_display.h>
-#include <video/of_display_timing.h>
-
-#include "../dss/omapdss.h"
-
-/* DSI Virtual channel. Hardcoded for now. */
-#define TCH 0
-
-#define DCS_READ_NUM_ERRORS 0x05
-#define DCS_BRIGHTNESS 0x51
-#define DCS_CTRL_DISPLAY 0x53
-#define DCS_GET_ID1 0xda
-#define DCS_GET_ID2 0xdb
-#define DCS_GET_ID3 0xdc
-
-struct panel_drv_data {
- struct omap_dss_device dssdev;
- struct omap_dss_device *src;
-
- struct videomode vm;
-
- struct platform_device *pdev;
-
- struct mutex lock;
-
- struct backlight_device *bldev;
- struct backlight_device *extbldev;
-
- unsigned long hw_guard_end; /* next value of jiffies when we can
- * issue the next sleep in/out command
- */
- unsigned long hw_guard_wait; /* max guard time in jiffies */
-
- /* panel HW configuration from DT or platform data */
- struct gpio_desc *reset_gpio;
- struct gpio_desc *ext_te_gpio;
-
- struct regulator *vpnl;
- struct regulator *vddi;
-
- bool use_dsi_backlight;
-
- int width_mm;
- int height_mm;
-
- struct omap_dsi_pin_config pin_config;
-
- /* runtime variables */
- bool enabled;
-
- bool te_enabled;
-
- atomic_t do_update;
- int channel;
-
- struct delayed_work te_timeout_work;
-
- bool intro_printed;
-
- struct workqueue_struct *workqueue;
-
- bool ulps_enabled;
- unsigned int ulps_timeout;
- struct delayed_work ulps_work;
-};
-
-#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
-
-static irqreturn_t dsicm_te_isr(int irq, void *data);
-static void dsicm_te_timeout_work_callback(struct work_struct *work);
-static int _dsicm_enable_te(struct panel_drv_data *ddata, bool enable);
-
-static int dsicm_panel_reset(struct panel_drv_data *ddata);
-
-static void dsicm_ulps_work(struct work_struct *work);
-
-static void dsicm_bl_power(struct panel_drv_data *ddata, bool enable)
-{
- struct backlight_device *backlight;
-
- if (ddata->bldev)
- backlight = ddata->bldev;
- else if (ddata->extbldev)
- backlight = ddata->extbldev;
- else
- return;
-
- if (enable) {
- backlight->props.fb_blank = FB_BLANK_UNBLANK;
- backlight->props.state = ~(BL_CORE_FBBLANK | BL_CORE_SUSPENDED);
- backlight->props.power = FB_BLANK_UNBLANK;
- } else {
- backlight->props.fb_blank = FB_BLANK_NORMAL;
- backlight->props.power = FB_BLANK_POWERDOWN;
- backlight->props.state |= BL_CORE_FBBLANK | BL_CORE_SUSPENDED;
- }
-
- backlight_update_status(backlight);
-}
-
-static void hw_guard_start(struct panel_drv_data *ddata, int guard_msec)
-{
- ddata->hw_guard_wait = msecs_to_jiffies(guard_msec);
- ddata->hw_guard_end = jiffies + ddata->hw_guard_wait;
-}
-
-static void hw_guard_wait(struct panel_drv_data *ddata)
-{
- unsigned long wait = ddata->hw_guard_end - jiffies;
-
- if ((long)wait > 0 && wait <= ddata->hw_guard_wait) {
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(wait);
- }
-}
-
-static int dsicm_dcs_read_1(struct panel_drv_data *ddata, u8 dcs_cmd, u8 *data)
-{
- struct omap_dss_device *src = ddata->src;
- int r;
- u8 buf[1];
-
- r = src->ops->dsi.dcs_read(src, ddata->channel, dcs_cmd, buf, 1);
-
- if (r < 0)
- return r;
-
- *data = buf[0];
-
- return 0;
-}
-
-static int dsicm_dcs_write_0(struct panel_drv_data *ddata, u8 dcs_cmd)
-{
- struct omap_dss_device *src = ddata->src;
-
- return src->ops->dsi.dcs_write(src, ddata->channel, &dcs_cmd, 1);
-}
-
-static int dsicm_dcs_write_1(struct panel_drv_data *ddata, u8 dcs_cmd, u8 param)
-{
- struct omap_dss_device *src = ddata->src;
- u8 buf[2] = { dcs_cmd, param };
-
- return src->ops->dsi.dcs_write(src, ddata->channel, buf, 2);
-}
-
-static int dsicm_sleep_in(struct panel_drv_data *ddata)
-
-{
- struct omap_dss_device *src = ddata->src;
- u8 cmd;
- int r;
-
- hw_guard_wait(ddata);
-
- cmd = MIPI_DCS_ENTER_SLEEP_MODE;
- r = src->ops->dsi.dcs_write_nosync(src, ddata->channel, &cmd, 1);
- if (r)
- return r;
-
- hw_guard_start(ddata, 120);
-
- usleep_range(5000, 10000);
-
- return 0;
-}
-
-static int dsicm_sleep_out(struct panel_drv_data *ddata)
-{
- int r;
-
- hw_guard_wait(ddata);
-
- r = dsicm_dcs_write_0(ddata, MIPI_DCS_EXIT_SLEEP_MODE);
- if (r)
- return r;
-
- hw_guard_start(ddata, 120);
-
- usleep_range(5000, 10000);
-
- return 0;
-}
-
-static int dsicm_get_id(struct panel_drv_data *ddata, u8 *id1, u8 *id2, u8 *id3)
-{
- int r;
-
- r = dsicm_dcs_read_1(ddata, DCS_GET_ID1, id1);
- if (r)
- return r;
- r = dsicm_dcs_read_1(ddata, DCS_GET_ID2, id2);
- if (r)
- return r;
- r = dsicm_dcs_read_1(ddata, DCS_GET_ID3, id3);
- if (r)
- return r;
-
- return 0;
-}
-
-static int dsicm_set_update_window(struct panel_drv_data *ddata,
- u16 x, u16 y, u16 w, u16 h)
-{
- struct omap_dss_device *src = ddata->src;
- int r;
- u16 x1 = x;
- u16 x2 = x + w - 1;
- u16 y1 = y;
- u16 y2 = y + h - 1;
-
- u8 buf[5];
- buf[0] = MIPI_DCS_SET_COLUMN_ADDRESS;
- buf[1] = (x1 >> 8) & 0xff;
- buf[2] = (x1 >> 0) & 0xff;
- buf[3] = (x2 >> 8) & 0xff;
- buf[4] = (x2 >> 0) & 0xff;
-
- r = src->ops->dsi.dcs_write_nosync(src, ddata->channel, buf, sizeof(buf));
- if (r)
- return r;
-
- buf[0] = MIPI_DCS_SET_PAGE_ADDRESS;
- buf[1] = (y1 >> 8) & 0xff;
- buf[2] = (y1 >> 0) & 0xff;
- buf[3] = (y2 >> 8) & 0xff;
- buf[4] = (y2 >> 0) & 0xff;
-
- r = src->ops->dsi.dcs_write_nosync(src, ddata->channel, buf, sizeof(buf));
- if (r)
- return r;
-
- src->ops->dsi.bta_sync(src, ddata->channel);
-
- return r;
-}
-
-static void dsicm_queue_ulps_work(struct panel_drv_data *ddata)
-{
- if (ddata->ulps_timeout > 0)
- queue_delayed_work(ddata->workqueue, &ddata->ulps_work,
- msecs_to_jiffies(ddata->ulps_timeout));
-}
-
-static void dsicm_cancel_ulps_work(struct panel_drv_data *ddata)
-{
- cancel_delayed_work(&ddata->ulps_work);
-}
-
-static int dsicm_enter_ulps(struct panel_drv_data *ddata)
-{
- struct omap_dss_device *src = ddata->src;
- int r;
-
- if (ddata->ulps_enabled)
- return 0;
-
- dsicm_cancel_ulps_work(ddata);
-
- r = _dsicm_enable_te(ddata, false);
- if (r)
- goto err;
-
- if (ddata->ext_te_gpio)
- disable_irq(gpiod_to_irq(ddata->ext_te_gpio));
-
- src->ops->dsi.disable(src, false, true);
-
- ddata->ulps_enabled = true;
-
- return 0;
-
-err:
- dev_err(&ddata->pdev->dev, "enter ULPS failed");
- dsicm_panel_reset(ddata);
-
- ddata->ulps_enabled = false;
-
- dsicm_queue_ulps_work(ddata);
-
- return r;
-}
-
-static int dsicm_exit_ulps(struct panel_drv_data *ddata)
-{
- struct omap_dss_device *src = ddata->src;
- int r;
-
- if (!ddata->ulps_enabled)
- return 0;
-
- src->ops->enable(src);
- src->ops->dsi.enable_hs(src, ddata->channel, true);
-
- r = _dsicm_enable_te(ddata, true);
- if (r) {
- dev_err(&ddata->pdev->dev, "failed to re-enable TE");
- goto err2;
- }
-
- if (ddata->ext_te_gpio)
- enable_irq(gpiod_to_irq(ddata->ext_te_gpio));
-
- dsicm_queue_ulps_work(ddata);
-
- ddata->ulps_enabled = false;
-
- return 0;
-
-err2:
- dev_err(&ddata->pdev->dev, "failed to exit ULPS");
-
- r = dsicm_panel_reset(ddata);
- if (!r) {
- if (ddata->ext_te_gpio)
- enable_irq(gpiod_to_irq(ddata->ext_te_gpio));
- ddata->ulps_enabled = false;
- }
-
- dsicm_queue_ulps_work(ddata);
-
- return r;
-}
-
-static int dsicm_wake_up(struct panel_drv_data *ddata)
-{
- if (ddata->ulps_enabled)
- return dsicm_exit_ulps(ddata);
-
- dsicm_cancel_ulps_work(ddata);
- dsicm_queue_ulps_work(ddata);
- return 0;
-}
-
-static int dsicm_bl_update_status(struct backlight_device *dev)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(&dev->dev);
- struct omap_dss_device *src = ddata->src;
- int r = 0;
- int level;
-
- if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
- dev->props.power == FB_BLANK_UNBLANK)
- level = dev->props.brightness;
- else
- level = 0;
-
- dev_dbg(&ddata->pdev->dev, "update brightness to %d\n", level);
-
- mutex_lock(&ddata->lock);
-
- if (ddata->enabled) {
- src->ops->dsi.bus_lock(src);
-
- r = dsicm_wake_up(ddata);
- if (!r)
- r = dsicm_dcs_write_1(ddata, DCS_BRIGHTNESS, level);
-
- src->ops->dsi.bus_unlock(src);
- }
-
- mutex_unlock(&ddata->lock);
-
- return r;
-}
-
-static int dsicm_bl_get_intensity(struct backlight_device *dev)
-{
- if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
- dev->props.power == FB_BLANK_UNBLANK)
- return dev->props.brightness;
-
- return 0;
-}
-
-static const struct backlight_ops dsicm_bl_ops = {
- .get_brightness = dsicm_bl_get_intensity,
- .update_status = dsicm_bl_update_status,
-};
-
-static ssize_t dsicm_num_errors_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(dev);
- struct omap_dss_device *src = ddata->src;
- u8 errors = 0;
- int r;
-
- mutex_lock(&ddata->lock);
-
- if (ddata->enabled) {
- src->ops->dsi.bus_lock(src);
-
- r = dsicm_wake_up(ddata);
- if (!r)
- r = dsicm_dcs_read_1(ddata, DCS_READ_NUM_ERRORS,
- &errors);
-
- src->ops->dsi.bus_unlock(src);
- } else {
- r = -ENODEV;
- }
-
- mutex_unlock(&ddata->lock);
-
- if (r)
- return r;
-
- return snprintf(buf, PAGE_SIZE, "%d\n", errors);
-}
-
-static ssize_t dsicm_hw_revision_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(dev);
- struct omap_dss_device *src = ddata->src;
- u8 id1, id2, id3;
- int r;
-
- mutex_lock(&ddata->lock);
-
- if (ddata->enabled) {
- src->ops->dsi.bus_lock(src);
-
- r = dsicm_wake_up(ddata);
- if (!r)
- r = dsicm_get_id(ddata, &id1, &id2, &id3);
-
- src->ops->dsi.bus_unlock(src);
- } else {
- r = -ENODEV;
- }
-
- mutex_unlock(&ddata->lock);
-
- if (r)
- return r;
-
- return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x\n", id1, id2, id3);
-}
-
-static ssize_t dsicm_store_ulps(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(dev);
- struct omap_dss_device *src = ddata->src;
- unsigned long t;
- int r;
-
- r = kstrtoul(buf, 0, &t);
- if (r)
- return r;
-
- mutex_lock(&ddata->lock);
-
- if (ddata->enabled) {
- src->ops->dsi.bus_lock(src);
-
- if (t)
- r = dsicm_enter_ulps(ddata);
- else
- r = dsicm_wake_up(ddata);
-
- src->ops->dsi.bus_unlock(src);
- }
-
- mutex_unlock(&ddata->lock);
-
- if (r)
- return r;
-
- return count;
-}
-
-static ssize_t dsicm_show_ulps(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(dev);
- unsigned int t;
-
- mutex_lock(&ddata->lock);
- t = ddata->ulps_enabled;
- mutex_unlock(&ddata->lock);
-
- return snprintf(buf, PAGE_SIZE, "%u\n", t);
-}
-
-static ssize_t dsicm_store_ulps_timeout(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(dev);
- struct omap_dss_device *src = ddata->src;
- unsigned long t;
- int r;
-
- r = kstrtoul(buf, 0, &t);
- if (r)
- return r;
-
- mutex_lock(&ddata->lock);
- ddata->ulps_timeout = t;
-
- if (ddata->enabled) {
- /* dsicm_wake_up will restart the timer */
- src->ops->dsi.bus_lock(src);
- r = dsicm_wake_up(ddata);
- src->ops->dsi.bus_unlock(src);
- }
-
- mutex_unlock(&ddata->lock);
-
- if (r)
- return r;
-
- return count;
-}
-
-static ssize_t dsicm_show_ulps_timeout(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(dev);
- unsigned int t;
-
- mutex_lock(&ddata->lock);
- t = ddata->ulps_timeout;
- mutex_unlock(&ddata->lock);
-
- return snprintf(buf, PAGE_SIZE, "%u\n", t);
-}
-
-static DEVICE_ATTR(num_dsi_errors, S_IRUGO, dsicm_num_errors_show, NULL);
-static DEVICE_ATTR(hw_revision, S_IRUGO, dsicm_hw_revision_show, NULL);
-static DEVICE_ATTR(ulps, S_IRUGO | S_IWUSR,
- dsicm_show_ulps, dsicm_store_ulps);
-static DEVICE_ATTR(ulps_timeout, S_IRUGO | S_IWUSR,
- dsicm_show_ulps_timeout, dsicm_store_ulps_timeout);
-
-static struct attribute *dsicm_attrs[] = {
- &dev_attr_num_dsi_errors.attr,
- &dev_attr_hw_revision.attr,
- &dev_attr_ulps.attr,
- &dev_attr_ulps_timeout.attr,
- NULL,
-};
-
-static const struct attribute_group dsicm_attr_group = {
- .attrs = dsicm_attrs,
-};
-
-static void dsicm_hw_reset(struct panel_drv_data *ddata)
-{
- gpiod_set_value(ddata->reset_gpio, 1);
- udelay(10);
- /* reset the panel */
- gpiod_set_value(ddata->reset_gpio, 0);
- /* assert reset */
- udelay(10);
- gpiod_set_value(ddata->reset_gpio, 1);
- /* wait after releasing reset */
- usleep_range(5000, 10000);
-}
-
-static int dsicm_power_on(struct panel_drv_data *ddata)
-{
- struct omap_dss_device *src = ddata->src;
- u8 id1, id2, id3;
- int r;
- struct omap_dss_dsi_config dsi_config = {
- .mode = OMAP_DSS_DSI_CMD_MODE,
- .pixel_format = OMAP_DSS_DSI_FMT_RGB888,
- .vm = &ddata->vm,
- .hs_clk_min = 150000000,
- .hs_clk_max = 300000000,
- .lp_clk_min = 7000000,
- .lp_clk_max = 10000000,
- };
-
- if (ddata->vpnl) {
- r = regulator_enable(ddata->vpnl);
- if (r) {
- dev_err(&ddata->pdev->dev,
- "failed to enable VPNL: %d\n", r);
- return r;
- }
- }
-
- if (ddata->vddi) {
- r = regulator_enable(ddata->vddi);
- if (r) {
- dev_err(&ddata->pdev->dev,
- "failed to enable VDDI: %d\n", r);
- goto err_vpnl;
- }
- }
-
- if (ddata->pin_config.num_pins > 0) {
- r = src->ops->dsi.configure_pins(src, &ddata->pin_config);
- if (r) {
- dev_err(&ddata->pdev->dev,
- "failed to configure DSI pins\n");
- goto err_vddi;
- }
- }
-
- r = src->ops->dsi.set_config(src, &dsi_config);
- if (r) {
- dev_err(&ddata->pdev->dev, "failed to configure DSI\n");
- goto err_vddi;
- }
-
- src->ops->enable(src);
-
- dsicm_hw_reset(ddata);
-
- src->ops->dsi.enable_hs(src, ddata->channel, false);
-
- r = dsicm_sleep_out(ddata);
- if (r)
- goto err;
-
- r = dsicm_get_id(ddata, &id1, &id2, &id3);
- if (r)
- goto err;
-
- r = dsicm_dcs_write_1(ddata, DCS_BRIGHTNESS, 0xff);
- if (r)
- goto err;
-
- r = dsicm_dcs_write_1(ddata, DCS_CTRL_DISPLAY,
- (1<<2) | (1<<5)); /* BL | BCTRL */
- if (r)
- goto err;
-
- r = dsicm_dcs_write_1(ddata, MIPI_DCS_SET_PIXEL_FORMAT,
- MIPI_DCS_PIXEL_FMT_24BIT);
- if (r)
- goto err;
-
- r = dsicm_dcs_write_0(ddata, MIPI_DCS_SET_DISPLAY_ON);
- if (r)
- goto err;
-
- r = _dsicm_enable_te(ddata, ddata->te_enabled);
- if (r)
- goto err;
-
- r = src->ops->dsi.enable_video_output(src, ddata->channel);
- if (r)
- goto err;
-
- ddata->enabled = true;
-
- if (!ddata->intro_printed) {
- dev_info(&ddata->pdev->dev, "panel revision %02x.%02x.%02x\n",
- id1, id2, id3);
- ddata->intro_printed = true;
- }
-
- src->ops->dsi.enable_hs(src, ddata->channel, true);
-
- return 0;
-err:
- dev_err(&ddata->pdev->dev, "error while enabling panel, issuing HW reset\n");
-
- dsicm_hw_reset(ddata);
-
- src->ops->dsi.disable(src, true, false);
-err_vddi:
- if (ddata->vddi)
- regulator_disable(ddata->vddi);
-err_vpnl:
- if (ddata->vpnl)
- regulator_disable(ddata->vpnl);
-
- return r;
-}
-
-static void dsicm_power_off(struct panel_drv_data *ddata)
-{
- struct omap_dss_device *src = ddata->src;
- int r;
-
- src->ops->dsi.disable_video_output(src, ddata->channel);
-
- r = dsicm_dcs_write_0(ddata, MIPI_DCS_SET_DISPLAY_OFF);
- if (!r)
- r = dsicm_sleep_in(ddata);
-
- if (r) {
- dev_err(&ddata->pdev->dev,
- "error disabling panel, issuing HW reset\n");
- dsicm_hw_reset(ddata);
- }
-
- src->ops->dsi.disable(src, true, false);
-
- if (ddata->vddi)
- regulator_disable(ddata->vddi);
- if (ddata->vpnl)
- regulator_disable(ddata->vpnl);
-
- ddata->enabled = false;
-}
-
-static int dsicm_panel_reset(struct panel_drv_data *ddata)
-{
- dev_err(&ddata->pdev->dev, "performing LCD reset\n");
-
- dsicm_power_off(ddata);
- dsicm_hw_reset(ddata);
- return dsicm_power_on(ddata);
-}
-
-static int dsicm_connect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- struct panel_drv_data *ddata = to_panel_data(dst);
- struct device *dev = &ddata->pdev->dev;
- int r;
-
- r = src->ops->dsi.request_vc(src, &ddata->channel);
- if (r) {
- dev_err(dev, "failed to get virtual channel\n");
- return r;
- }
-
- r = src->ops->dsi.set_vc_id(src, ddata->channel, TCH);
- if (r) {
- dev_err(dev, "failed to set VC_ID\n");
- src->ops->dsi.release_vc(src, ddata->channel);
- return r;
- }
-
- ddata->src = src;
- return 0;
-}
-
-static void dsicm_disconnect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- struct panel_drv_data *ddata = to_panel_data(dst);
-
- src->ops->dsi.release_vc(src, ddata->channel);
- ddata->src = NULL;
-}
-
-static void dsicm_enable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = ddata->src;
- int r;
-
- mutex_lock(&ddata->lock);
-
- src->ops->dsi.bus_lock(src);
-
- r = dsicm_power_on(ddata);
-
- src->ops->dsi.bus_unlock(src);
-
- if (r)
- goto err;
-
- mutex_unlock(&ddata->lock);
-
- dsicm_bl_power(ddata, true);
-
- return;
-err:
- dev_dbg(&ddata->pdev->dev, "enable failed (%d)\n", r);
- mutex_unlock(&ddata->lock);
-}
-
-static void dsicm_disable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = ddata->src;
- int r;
-
- dsicm_bl_power(ddata, false);
-
- mutex_lock(&ddata->lock);
-
- dsicm_cancel_ulps_work(ddata);
-
- src->ops->dsi.bus_lock(src);
-
- r = dsicm_wake_up(ddata);
- if (!r)
- dsicm_power_off(ddata);
-
- src->ops->dsi.bus_unlock(src);
-
- mutex_unlock(&ddata->lock);
-}
-
-static void dsicm_framedone_cb(int err, void *data)
-{
- struct panel_drv_data *ddata = data;
- struct omap_dss_device *src = ddata->src;
-
- dev_dbg(&ddata->pdev->dev, "framedone, err %d\n", err);
- src->ops->dsi.bus_unlock(src);
-}
-
-static irqreturn_t dsicm_te_isr(int irq, void *data)
-{
- struct panel_drv_data *ddata = data;
- struct omap_dss_device *src = ddata->src;
- int old;
- int r;
-
- old = atomic_cmpxchg(&ddata->do_update, 1, 0);
-
- if (old) {
- cancel_delayed_work(&ddata->te_timeout_work);
-
- r = src->ops->dsi.update(src, ddata->channel, dsicm_framedone_cb,
- ddata);
- if (r)
- goto err;
- }
-
- return IRQ_HANDLED;
-err:
- dev_err(&ddata->pdev->dev, "start update failed\n");
- src->ops->dsi.bus_unlock(src);
- return IRQ_HANDLED;
-}
-
-static void dsicm_te_timeout_work_callback(struct work_struct *work)
-{
- struct panel_drv_data *ddata = container_of(work, struct panel_drv_data,
- te_timeout_work.work);
- struct omap_dss_device *src = ddata->src;
-
- dev_err(&ddata->pdev->dev, "TE not received for 250ms!\n");
-
- atomic_set(&ddata->do_update, 0);
- src->ops->dsi.bus_unlock(src);
-}
-
-static int dsicm_update(struct omap_dss_device *dssdev,
- u16 x, u16 y, u16 w, u16 h)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = ddata->src;
- int r;
-
- dev_dbg(&ddata->pdev->dev, "update %d, %d, %d x %d\n", x, y, w, h);
-
- mutex_lock(&ddata->lock);
- src->ops->dsi.bus_lock(src);
-
- r = dsicm_wake_up(ddata);
- if (r)
- goto err;
-
- if (!ddata->enabled) {
- r = 0;
- goto err;
- }
-
- /* XXX no need to send this every frame, but dsi break if not done */
- r = dsicm_set_update_window(ddata, 0, 0, ddata->vm.hactive,
- ddata->vm.vactive);
- if (r)
- goto err;
-
- if (ddata->te_enabled && ddata->ext_te_gpio) {
- schedule_delayed_work(&ddata->te_timeout_work,
- msecs_to_jiffies(250));
- atomic_set(&ddata->do_update, 1);
- } else {
- r = src->ops->dsi.update(src, ddata->channel, dsicm_framedone_cb,
- ddata);
- if (r)
- goto err;
- }
-
- /* note: no bus_unlock here. unlock is src framedone_cb */
- mutex_unlock(&ddata->lock);
- return 0;
-err:
- src->ops->dsi.bus_unlock(src);
- mutex_unlock(&ddata->lock);
- return r;
-}
-
-static int dsicm_sync(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = ddata->src;
-
- dev_dbg(&ddata->pdev->dev, "sync\n");
-
- mutex_lock(&ddata->lock);
- src->ops->dsi.bus_lock(src);
- src->ops->dsi.bus_unlock(src);
- mutex_unlock(&ddata->lock);
-
- dev_dbg(&ddata->pdev->dev, "sync done\n");
-
- return 0;
-}
-
-static int _dsicm_enable_te(struct panel_drv_data *ddata, bool enable)
-{
- struct omap_dss_device *src = ddata->src;
- int r;
-
- if (enable)
- r = dsicm_dcs_write_1(ddata, MIPI_DCS_SET_TEAR_ON, 0);
- else
- r = dsicm_dcs_write_0(ddata, MIPI_DCS_SET_TEAR_OFF);
-
- if (!ddata->ext_te_gpio)
- src->ops->dsi.enable_te(src, enable);
-
- /* possible panel bug */
- msleep(100);
-
- return r;
-}
-
-static int dsicm_enable_te(struct omap_dss_device *dssdev, bool enable)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = ddata->src;
- int r;
-
- mutex_lock(&ddata->lock);
-
- if (ddata->te_enabled == enable)
- goto end;
-
- src->ops->dsi.bus_lock(src);
-
- if (ddata->enabled) {
- r = dsicm_wake_up(ddata);
- if (r)
- goto err;
-
- r = _dsicm_enable_te(ddata, enable);
- if (r)
- goto err;
- }
-
- ddata->te_enabled = enable;
-
- src->ops->dsi.bus_unlock(src);
-end:
- mutex_unlock(&ddata->lock);
-
- return 0;
-err:
- src->ops->dsi.bus_unlock(src);
- mutex_unlock(&ddata->lock);
-
- return r;
-}
-
-static int dsicm_get_te(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- int r;
-
- mutex_lock(&ddata->lock);
- r = ddata->te_enabled;
- mutex_unlock(&ddata->lock);
-
- return r;
-}
-
-static int dsicm_memory_read(struct omap_dss_device *dssdev,
- void *buf, size_t size,
- u16 x, u16 y, u16 w, u16 h)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = ddata->src;
- int r;
- int first = 1;
- int plen;
- unsigned int buf_used = 0;
-
- if (size < w * h * 3)
- return -ENOMEM;
-
- mutex_lock(&ddata->lock);
-
- if (!ddata->enabled) {
- r = -ENODEV;
- goto err1;
- }
-
- size = min((u32)w * h * 3,
- ddata->vm.hactive * ddata->vm.vactive * 3);
-
- src->ops->dsi.bus_lock(src);
-
- r = dsicm_wake_up(ddata);
- if (r)
- goto err2;
-
- /* plen 1 or 2 goes into short packet. until checksum error is fixed,
- * use short packets. plen 32 works, but bigger packets seem to cause
- * an error. */
- if (size % 2)
- plen = 1;
- else
- plen = 2;
-
- dsicm_set_update_window(ddata, x, y, w, h);
-
- r = src->ops->dsi.set_max_rx_packet_size(src, ddata->channel, plen);
- if (r)
- goto err2;
-
- while (buf_used < size) {
- u8 dcs_cmd = first ? 0x2e : 0x3e;
- first = 0;
-
- r = src->ops->dsi.dcs_read(src, ddata->channel, dcs_cmd,
- buf + buf_used, size - buf_used);
-
- if (r < 0) {
- dev_err(dssdev->dev, "read error\n");
- goto err3;
- }
-
- buf_used += r;
-
- if (r < plen) {
- dev_err(&ddata->pdev->dev, "short read\n");
- break;
- }
-
- if (signal_pending(current)) {
- dev_err(&ddata->pdev->dev, "signal pending, "
- "aborting memory read\n");
- r = -ERESTARTSYS;
- goto err3;
- }
- }
-
- r = buf_used;
-
-err3:
- src->ops->dsi.set_max_rx_packet_size(src, ddata->channel, 1);
-err2:
- src->ops->dsi.bus_unlock(src);
-err1:
- mutex_unlock(&ddata->lock);
- return r;
-}
-
-static void dsicm_ulps_work(struct work_struct *work)
-{
- struct panel_drv_data *ddata = container_of(work, struct panel_drv_data,
- ulps_work.work);
- struct omap_dss_device *dssdev = &ddata->dssdev;
- struct omap_dss_device *src = ddata->src;
-
- mutex_lock(&ddata->lock);
-
- if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE || !ddata->enabled) {
- mutex_unlock(&ddata->lock);
- return;
- }
-
- src->ops->dsi.bus_lock(src);
-
- dsicm_enter_ulps(ddata);
-
- src->ops->dsi.bus_unlock(src);
- mutex_unlock(&ddata->lock);
-}
-
-static int dsicm_get_modes(struct omap_dss_device *dssdev,
- struct drm_connector *connector)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- connector->display_info.width_mm = ddata->width_mm;
- connector->display_info.height_mm = ddata->height_mm;
-
- return omapdss_display_get_modes(connector, &ddata->vm);
-}
-
-static int dsicm_check_timings(struct omap_dss_device *dssdev,
- struct drm_display_mode *mode)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- int ret = 0;
-
- if (mode->hdisplay != ddata->vm.hactive)
- ret = -EINVAL;
-
- if (mode->vdisplay != ddata->vm.vactive)
- ret = -EINVAL;
-
- if (ret) {
- dev_warn(dssdev->dev, "wrong resolution: %d x %d",
- mode->hdisplay, mode->vdisplay);
- dev_warn(dssdev->dev, "panel resolution: %d x %d",
- ddata->vm.hactive, ddata->vm.vactive);
- }
-
- return ret;
-}
-
-static const struct omap_dss_device_ops dsicm_ops = {
- .connect = dsicm_connect,
- .disconnect = dsicm_disconnect,
-
- .enable = dsicm_enable,
- .disable = dsicm_disable,
-
- .get_modes = dsicm_get_modes,
- .check_timings = dsicm_check_timings,
-};
-
-static const struct omap_dss_driver dsicm_dss_driver = {
- .update = dsicm_update,
- .sync = dsicm_sync,
-
- .enable_te = dsicm_enable_te,
- .get_te = dsicm_get_te,
-
- .memory_read = dsicm_memory_read,
-};
-
-static int dsicm_probe_of(struct platform_device *pdev)
-{
- struct device_node *node = pdev->dev.of_node;
- struct backlight_device *backlight;
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct display_timing timing;
- int err;
-
- ddata->reset_gpio = devm_gpiod_get(&pdev->dev, "reset", GPIOD_OUT_LOW);
- if (IS_ERR(ddata->reset_gpio)) {
- err = PTR_ERR(ddata->reset_gpio);
- dev_err(&pdev->dev, "reset gpio request failed: %d", err);
- return err;
- }
-
- ddata->ext_te_gpio = devm_gpiod_get_optional(&pdev->dev, "te",
- GPIOD_IN);
- if (IS_ERR(ddata->ext_te_gpio)) {
- err = PTR_ERR(ddata->ext_te_gpio);
- dev_err(&pdev->dev, "TE gpio request failed: %d", err);
- return err;
- }
-
- err = of_get_display_timing(node, "panel-timing", &timing);
- if (!err) {
- videomode_from_timing(&timing, &ddata->vm);
- if (!ddata->vm.pixelclock)
- ddata->vm.pixelclock =
- ddata->vm.hactive * ddata->vm.vactive * 60;
- } else {
- dev_warn(&pdev->dev,
- "failed to get video timing, using defaults\n");
- }
-
- ddata->width_mm = 0;
- of_property_read_u32(node, "width-mm", &ddata->width_mm);
-
- ddata->height_mm = 0;
- of_property_read_u32(node, "height-mm", &ddata->height_mm);
-
- ddata->vpnl = devm_regulator_get_optional(&pdev->dev, "vpnl");
- if (IS_ERR(ddata->vpnl)) {
- err = PTR_ERR(ddata->vpnl);
- if (err == -EPROBE_DEFER)
- return err;
- ddata->vpnl = NULL;
- }
-
- ddata->vddi = devm_regulator_get_optional(&pdev->dev, "vddi");
- if (IS_ERR(ddata->vddi)) {
- err = PTR_ERR(ddata->vddi);
- if (err == -EPROBE_DEFER)
- return err;
- ddata->vddi = NULL;
- }
-
- backlight = devm_of_find_backlight(&pdev->dev);
- if (IS_ERR(backlight))
- return PTR_ERR(backlight);
-
- /* If no backlight device is found assume native backlight support */
- if (backlight)
- ddata->extbldev = backlight;
- else
- ddata->use_dsi_backlight = true;
-
- /* TODO: ulps */
-
- return 0;
-}
-
-static int dsicm_probe(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata;
- struct backlight_device *bldev = NULL;
- struct device *dev = &pdev->dev;
- struct omap_dss_device *dssdev;
- int r;
-
- dev_dbg(dev, "probe\n");
-
- ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL);
- if (!ddata)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, ddata);
- ddata->pdev = pdev;
-
- ddata->vm.hactive = 864;
- ddata->vm.vactive = 480;
- ddata->vm.pixelclock = 864 * 480 * 60;
-
- r = dsicm_probe_of(pdev);
- if (r)
- return r;
-
- dssdev = &ddata->dssdev;
- dssdev->dev = dev;
- dssdev->ops = &dsicm_ops;
- dssdev->driver = &dsicm_dss_driver;
- dssdev->type = OMAP_DISPLAY_TYPE_DSI;
- dssdev->display = true;
- dssdev->owner = THIS_MODULE;
- dssdev->of_port = 0;
- dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES;
-
- dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE |
- OMAP_DSS_DISPLAY_CAP_TEAR_ELIM;
-
- omapdss_display_init(dssdev);
- omapdss_device_register(dssdev);
-
- mutex_init(&ddata->lock);
-
- atomic_set(&ddata->do_update, 0);
-
- if (ddata->ext_te_gpio) {
- r = devm_request_irq(dev, gpiod_to_irq(ddata->ext_te_gpio),
- dsicm_te_isr,
- IRQF_TRIGGER_RISING,
- "taal vsync", ddata);
-
- if (r) {
- dev_err(dev, "IRQ request failed\n");
- goto err_reg;
- }
-
- INIT_DEFERRABLE_WORK(&ddata->te_timeout_work,
- dsicm_te_timeout_work_callback);
-
- dev_dbg(dev, "Using GPIO TE\n");
- }
-
- ddata->workqueue = create_singlethread_workqueue("dsicm_wq");
- if (!ddata->workqueue) {
- r = -ENOMEM;
- goto err_reg;
- }
- INIT_DELAYED_WORK(&ddata->ulps_work, dsicm_ulps_work);
-
- dsicm_hw_reset(ddata);
-
- if (ddata->use_dsi_backlight) {
- struct backlight_properties props = { 0 };
- props.max_brightness = 255;
- props.type = BACKLIGHT_RAW;
-
- bldev = devm_backlight_device_register(dev, dev_name(dev),
- dev, ddata, &dsicm_bl_ops, &props);
- if (IS_ERR(bldev)) {
- r = PTR_ERR(bldev);
- goto err_bl;
- }
-
- ddata->bldev = bldev;
- }
-
- r = sysfs_create_group(&dev->kobj, &dsicm_attr_group);
- if (r) {
- dev_err(dev, "failed to create sysfs files\n");
- goto err_bl;
- }
-
- return 0;
-
-err_bl:
- destroy_workqueue(ddata->workqueue);
-err_reg:
- if (ddata->extbldev)
- put_device(&ddata->extbldev->dev);
-
- return r;
-}
-
-static int __exit dsicm_remove(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct omap_dss_device *dssdev = &ddata->dssdev;
-
- dev_dbg(&pdev->dev, "remove\n");
-
- omapdss_device_unregister(dssdev);
-
- if (omapdss_device_is_enabled(dssdev))
- dsicm_disable(dssdev);
- omapdss_device_disconnect(ddata->src, dssdev);
-
- sysfs_remove_group(&pdev->dev.kobj, &dsicm_attr_group);
-
- if (ddata->extbldev)
- put_device(&ddata->extbldev->dev);
-
- dsicm_cancel_ulps_work(ddata);
- destroy_workqueue(ddata->workqueue);
-
- /* reset, to be sure that the panel is in a valid state */
- dsicm_hw_reset(ddata);
-
- return 0;
-}
-
-static const struct of_device_id dsicm_of_match[] = {
- { .compatible = "omapdss,panel-dsi-cm", },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, dsicm_of_match);
-
-static struct platform_driver dsicm_driver = {
- .probe = dsicm_probe,
- .remove = __exit_p(dsicm_remove),
- .driver = {
- .name = "panel-dsi-cm",
- .of_match_table = dsicm_of_match,
- .suppress_bind_attrs = true,
- },
-};
-
-module_platform_driver(dsicm_driver);
-
-MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
-MODULE_DESCRIPTION("Generic DSI Command Mode Panel Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/omapdrm/dss/Kconfig b/drivers/gpu/drm/omapdrm/dss/Kconfig
deleted file mode 100644
index e11b258a2294..000000000000
--- a/drivers/gpu/drm/omapdrm/dss/Kconfig
+++ /dev/null
@@ -1,135 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-config OMAP2_DSS_INIT
- bool
-
-config OMAP_DSS_BASE
- tristate
-
-menuconfig OMAP2_DSS
- tristate "OMAP2+ Display Subsystem support"
- select OMAP_DSS_BASE
- select VIDEOMODE_HELPERS
- select OMAP2_DSS_INIT
- select HDMI
- help
- OMAP2+ Display Subsystem support.
-
-if OMAP2_DSS
-
-config OMAP2_DSS_DEBUG
- bool "Debug support"
- default n
- help
- This enables printing of debug messages. Alternatively, debug messages
- can also be enabled by setting CONFIG_DYNAMIC_DEBUG and then setting
- appropriate flags in <debugfs>/dynamic_debug/control.
-
-config OMAP2_DSS_DEBUGFS
- bool "Debugfs filesystem support"
- depends on DEBUG_FS
- default n
- help
- This enables debugfs for OMAPDSS at <debugfs>/omapdss. This enables
- querying about clock configuration and register configuration of dss,
- dispc, dsi, hdmi and rfbi.
-
-config OMAP2_DSS_COLLECT_IRQ_STATS
- bool "Collect DSS IRQ statistics"
- depends on OMAP2_DSS_DEBUGFS
- default n
- help
- Collect DSS IRQ statistics, printable via debugfs.
-
- The statistics can be found from
- <debugfs>/omapdss/dispc_irq for DISPC interrupts, and
- <debugfs>/omapdss/dsi_irq for DSI interrupts.
-
-config OMAP2_DSS_DPI
- bool "DPI support"
- default y
- help
- DPI Interface. This is the Parallel Display Interface.
-
-config OMAP2_DSS_VENC
- bool "VENC support"
- default y
- help
- OMAP Video Encoder support for S-Video and composite TV-out.
-
-config OMAP2_DSS_HDMI_COMMON
- bool
-
-config OMAP4_DSS_HDMI
- bool "HDMI support for OMAP4"
- default y
- select OMAP2_DSS_HDMI_COMMON
- help
- HDMI support for OMAP4 based SoCs.
-
-config OMAP4_DSS_HDMI_CEC
- bool "Enable HDMI CEC support for OMAP4"
- depends on OMAP4_DSS_HDMI
- select CEC_CORE
- default y
- help
- When selected the HDMI transmitter will support the CEC feature.
-
-config OMAP5_DSS_HDMI
- bool "HDMI support for OMAP5"
- default n
- select OMAP2_DSS_HDMI_COMMON
- help
- HDMI Interface for OMAP5 and similar cores. This adds the High
- Definition Multimedia Interface. See https://www.hdmi.org/ for HDMI
- specification.
-
-config OMAP2_DSS_SDI
- bool "SDI support"
- default n
- help
- SDI (Serial Display Interface) support.
-
- SDI is a high speed one-way display serial bus between the host
- processor and a display.
-
-config OMAP2_DSS_DSI
- bool "DSI support"
- default n
- help
- MIPI DSI (Display Serial Interface) support.
-
- DSI is a high speed half-duplex serial interface between the host
- processor and a peripheral, such as a display or a framebuffer chip.
-
- See https://www.mipi.org/ for DSI specifications.
-
-config OMAP2_DSS_MIN_FCK_PER_PCK
- int "Minimum FCK/PCK ratio (for scaling)"
- range 0 32
- default 0
- help
- This can be used to adjust the minimum FCK/PCK ratio.
-
- With this you can make sure that DISPC FCK is at least
- n x PCK. Video plane scaling requires higher FCK than
- normally.
-
- If this is set to 0, there's no extra constraint on the
- DISPC FCK. However, the FCK will at minimum be
- 2xPCK (if active matrix) or 3xPCK (if passive matrix).
-
- Max FCK is 173MHz, so this doesn't work if your PCK
- is very high.
-
-config OMAP2_DSS_SLEEP_AFTER_VENC_RESET
- bool "Sleep 20ms after VENC reset"
- default y
- help
- There is a 20ms sleep after VENC reset which seemed to fix the
- reset. The reason for the bug is unclear, and it's also unclear
- on what platforms this happens.
-
- This option enables the sleep, and is enabled by default. You can
- disable the sleep if it doesn't cause problems on your platform.
-
-endif
diff --git a/drivers/gpu/drm/omapdrm/dss/Makefile b/drivers/gpu/drm/omapdrm/dss/Makefile
deleted file mode 100644
index f967e6948f2e..000000000000
--- a/drivers/gpu/drm/omapdrm/dss/Makefile
+++ /dev/null
@@ -1,20 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_OMAP2_DSS_INIT) += omapdss-boot-init.o
-
-obj-$(CONFIG_OMAP_DSS_BASE) += omapdss-base.o
-omapdss-base-y := base.o display.o output.o
-
-obj-$(CONFIG_OMAP2_DSS) += omapdss.o
-# Core DSS files
-omapdss-y := dss.o dispc.o dispc_coefs.o \
- pll.o video-pll.o
-omapdss-$(CONFIG_OMAP2_DSS_DPI) += dpi.o
-omapdss-$(CONFIG_OMAP2_DSS_VENC) += venc.o
-omapdss-$(CONFIG_OMAP2_DSS_SDI) += sdi.o
-omapdss-$(CONFIG_OMAP2_DSS_DSI) += dsi.o
-omapdss-$(CONFIG_OMAP2_DSS_HDMI_COMMON) += hdmi_common.o hdmi_wp.o hdmi_pll.o \
- hdmi_phy.o
-omapdss-$(CONFIG_OMAP4_DSS_HDMI) += hdmi4.o hdmi4_core.o
-omapdss-$(CONFIG_OMAP4_DSS_HDMI_CEC) += hdmi4_cec.o
-omapdss-$(CONFIG_OMAP5_DSS_HDMI) += hdmi5.o hdmi5_core.o
-ccflags-$(CONFIG_OMAP2_DSS_DEBUG) += -DDEBUG
diff --git a/drivers/gpu/drm/omapdrm/dss/base.c b/drivers/gpu/drm/omapdrm/dss/base.c
index cf50430e6363..050ca7eafac5 100644
--- a/drivers/gpu/drm/omapdrm/dss/base.c
+++ b/drivers/gpu/drm/omapdrm/dss/base.c
@@ -16,32 +16,10 @@
#include "dss.h"
#include "omapdss.h"
-static struct dss_device *dss_device;
-
-struct dss_device *omapdss_get_dss(void)
-{
- return dss_device;
-}
-EXPORT_SYMBOL(omapdss_get_dss);
-
-void omapdss_set_dss(struct dss_device *dss)
-{
- dss_device = dss;
-}
-EXPORT_SYMBOL(omapdss_set_dss);
-
struct dispc_device *dispc_get_dispc(struct dss_device *dss)
{
return dss->dispc;
}
-EXPORT_SYMBOL(dispc_get_dispc);
-
-const struct dispc_ops *dispc_get_ops(struct dss_device *dss)
-{
- return dss->dispc_ops;
-}
-EXPORT_SYMBOL(dispc_get_ops);
-
/* -----------------------------------------------------------------------------
* OMAP DSS Devices Handling
@@ -56,7 +34,6 @@ void omapdss_device_register(struct omap_dss_device *dssdev)
list_add_tail(&dssdev->list, &omapdss_devices_list);
mutex_unlock(&omapdss_devices_lock);
}
-EXPORT_SYMBOL_GPL(omapdss_device_register);
void omapdss_device_unregister(struct omap_dss_device *dssdev)
{
@@ -64,7 +41,6 @@ void omapdss_device_unregister(struct omap_dss_device *dssdev)
list_del(&dssdev->list);
mutex_unlock(&omapdss_devices_lock);
}
-EXPORT_SYMBOL_GPL(omapdss_device_unregister);
static bool omapdss_device_is_registered(struct device_node *node)
{
@@ -86,24 +62,16 @@ static bool omapdss_device_is_registered(struct device_node *node)
struct omap_dss_device *omapdss_device_get(struct omap_dss_device *dssdev)
{
- if (!try_module_get(dssdev->owner))
- return NULL;
-
- if (get_device(dssdev->dev) == NULL) {
- module_put(dssdev->owner);
+ if (get_device(dssdev->dev) == NULL)
return NULL;
- }
return dssdev;
}
-EXPORT_SYMBOL(omapdss_device_get);
void omapdss_device_put(struct omap_dss_device *dssdev)
{
put_device(dssdev->dev);
- module_put(dssdev->owner);
}
-EXPORT_SYMBOL(omapdss_device_put);
struct omap_dss_device *omapdss_find_device_by_node(struct device_node *node)
{
@@ -149,7 +117,7 @@ struct omap_dss_device *omapdss_device_next_output(struct omap_dss_device *from)
goto done;
}
- if (dssdev->id && (dssdev->next || dssdev->bridge))
+ if (dssdev->id && dssdev->bridge)
goto done;
}
@@ -164,7 +132,6 @@ done:
mutex_unlock(&omapdss_devices_lock);
return dssdev;
}
-EXPORT_SYMBOL(omapdss_device_next_output);
static bool omapdss_device_is_connected(struct omap_dss_device *dssdev)
{
@@ -175,8 +142,6 @@ int omapdss_device_connect(struct dss_device *dss,
struct omap_dss_device *src,
struct omap_dss_device *dst)
{
- int ret;
-
dev_dbg(&dss->pdev->dev, "connect(%s, %s)\n",
src ? dev_name(src->dev) : "NULL",
dst ? dev_name(dst->dev) : "NULL");
@@ -195,17 +160,8 @@ int omapdss_device_connect(struct dss_device *dss,
dst->dss = dss;
- if (dst->ops && dst->ops->connect) {
- ret = dst->ops->connect(src, dst);
- if (ret < 0) {
- dst->dss = NULL;
- return ret;
- }
- }
-
return 0;
}
-EXPORT_SYMBOL_GPL(omapdss_device_connect);
void omapdss_device_disconnect(struct omap_dss_device *src,
struct omap_dss_device *dst)
@@ -222,43 +178,12 @@ void omapdss_device_disconnect(struct omap_dss_device *src,
}
if (!dst->id && !omapdss_device_is_connected(dst)) {
- WARN_ON(!dst->display);
+ WARN_ON(1);
return;
}
- WARN_ON(dst->state != OMAP_DSS_DISPLAY_DISABLED);
-
- if (dst->ops && dst->ops->disconnect)
- dst->ops->disconnect(src, dst);
dst->dss = NULL;
}
-EXPORT_SYMBOL_GPL(omapdss_device_disconnect);
-
-void omapdss_device_enable(struct omap_dss_device *dssdev)
-{
- if (!dssdev)
- return;
-
- if (dssdev->ops && dssdev->ops->enable)
- dssdev->ops->enable(dssdev);
-
- omapdss_device_enable(dssdev->next);
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-}
-EXPORT_SYMBOL_GPL(omapdss_device_enable);
-
-void omapdss_device_disable(struct omap_dss_device *dssdev)
-{
- if (!dssdev)
- return;
-
- omapdss_device_disable(dssdev->next);
-
- if (dssdev->ops && dssdev->ops->disable)
- dssdev->ops->disable(dssdev);
-}
-EXPORT_SYMBOL_GPL(omapdss_device_disable);
/* -----------------------------------------------------------------------------
* Components Handling
@@ -344,7 +269,6 @@ void omapdss_gather_components(struct device *dev)
for_each_available_child_of_node(dev->of_node, child)
omapdss_walk_device(dev, child, true);
}
-EXPORT_SYMBOL(omapdss_gather_components);
static bool omapdss_component_is_loaded(struct omapdss_comp_node *comp)
{
@@ -369,8 +293,3 @@ bool omapdss_stack_is_ready(void)
return true;
}
-EXPORT_SYMBOL(omapdss_stack_is_ready);
-
-MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
-MODULE_DESCRIPTION("OMAP Display Subsystem Base");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c
index 599183879caf..f4cbef8ccace 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc.c
+++ b/drivers/gpu/drm/omapdrm/dss/dispc.c
@@ -351,8 +351,6 @@ static unsigned long dispc_plane_pclk_rate(struct dispc_device *dispc,
static unsigned long dispc_plane_lclk_rate(struct dispc_device *dispc,
enum omap_plane_id plane);
-static void dispc_clear_irqstatus(struct dispc_device *dispc, u32 mask);
-
static inline void dispc_write_reg(struct dispc_device *dispc, u16 idx, u32 val)
{
__raw_writel(val, dispc->base + idx);
@@ -379,12 +377,12 @@ static void mgr_fld_write(struct dispc_device *dispc, enum omap_channel channel,
REG_FLD_MOD(dispc, rfld->reg, val, rfld->high, rfld->low);
}
-static int dispc_get_num_ovls(struct dispc_device *dispc)
+int dispc_get_num_ovls(struct dispc_device *dispc)
{
return dispc->feat->num_ovls;
}
-static int dispc_get_num_mgrs(struct dispc_device *dispc)
+int dispc_get_num_mgrs(struct dispc_device *dispc)
{
return dispc->feat->num_mgrs;
}
@@ -670,13 +668,13 @@ void dispc_runtime_put(struct dispc_device *dispc)
WARN_ON(r < 0 && r != -ENOSYS);
}
-static u32 dispc_mgr_get_vsync_irq(struct dispc_device *dispc,
+u32 dispc_mgr_get_vsync_irq(struct dispc_device *dispc,
enum omap_channel channel)
{
return mgr_desc[channel].vsync_irq;
}
-static u32 dispc_mgr_get_framedone_irq(struct dispc_device *dispc,
+u32 dispc_mgr_get_framedone_irq(struct dispc_device *dispc,
enum omap_channel channel)
{
if (channel == OMAP_DSS_CHANNEL_DIGIT && dispc->feat->no_framedone_tv)
@@ -685,18 +683,18 @@ static u32 dispc_mgr_get_framedone_irq(struct dispc_device *dispc,
return mgr_desc[channel].framedone_irq;
}
-static u32 dispc_mgr_get_sync_lost_irq(struct dispc_device *dispc,
+u32 dispc_mgr_get_sync_lost_irq(struct dispc_device *dispc,
enum omap_channel channel)
{
return mgr_desc[channel].sync_lost_irq;
}
-static u32 dispc_wb_get_framedone_irq(struct dispc_device *dispc)
+u32 dispc_wb_get_framedone_irq(struct dispc_device *dispc)
{
return DISPC_IRQ_FRAMEDONEWB;
}
-static void dispc_mgr_enable(struct dispc_device *dispc,
+void dispc_mgr_enable(struct dispc_device *dispc,
enum omap_channel channel, bool enable)
{
mgr_fld_write(dispc, channel, DISPC_MGR_FLD_ENABLE, enable);
@@ -710,13 +708,13 @@ static bool dispc_mgr_is_enabled(struct dispc_device *dispc,
return !!mgr_fld_read(dispc, channel, DISPC_MGR_FLD_ENABLE);
}
-static bool dispc_mgr_go_busy(struct dispc_device *dispc,
+bool dispc_mgr_go_busy(struct dispc_device *dispc,
enum omap_channel channel)
{
return mgr_fld_read(dispc, channel, DISPC_MGR_FLD_GO) == 1;
}
-static void dispc_mgr_go(struct dispc_device *dispc, enum omap_channel channel)
+void dispc_mgr_go(struct dispc_device *dispc, enum omap_channel channel)
{
WARN_ON(!dispc_mgr_is_enabled(dispc, channel));
WARN_ON(dispc_mgr_go_busy(dispc, channel));
@@ -726,12 +724,12 @@ static void dispc_mgr_go(struct dispc_device *dispc, enum omap_channel channel)
mgr_fld_write(dispc, channel, DISPC_MGR_FLD_GO, 1);
}
-static bool dispc_wb_go_busy(struct dispc_device *dispc)
+bool dispc_wb_go_busy(struct dispc_device *dispc)
{
return REG_GET(dispc, DISPC_CONTROL2, 6, 6) == 1;
}
-static void dispc_wb_go(struct dispc_device *dispc)
+void dispc_wb_go(struct dispc_device *dispc)
{
enum omap_plane_id plane = OMAP_DSS_WB;
bool enable, go;
@@ -877,50 +875,62 @@ static void dispc_ovl_write_color_conv_coef(struct dispc_device *dispc,
#undef CVAL
}
-static void dispc_wb_write_color_conv_coef(struct dispc_device *dispc,
- const struct csc_coef_rgb2yuv *ct)
-{
- const enum omap_plane_id plane = OMAP_DSS_WB;
-
-#define CVAL(x, y) (FLD_VAL(x, 26, 16) | FLD_VAL(y, 10, 0))
+/* YUV -> RGB, ITU-R BT.601, full range */
+static const struct csc_coef_yuv2rgb coefs_yuv2rgb_bt601_full = {
+ 256, 0, 358, /* ry, rcb, rcr |1.000 0.000 1.402|*/
+ 256, -88, -182, /* gy, gcb, gcr |1.000 -0.344 -0.714|*/
+ 256, 452, 0, /* by, bcb, bcr |1.000 1.772 0.000|*/
+ true, /* full range */
+};
- dispc_write_reg(dispc, DISPC_OVL_CONV_COEF(plane, 0), CVAL(ct->yg, ct->yr));
- dispc_write_reg(dispc, DISPC_OVL_CONV_COEF(plane, 1), CVAL(ct->crr, ct->yb));
- dispc_write_reg(dispc, DISPC_OVL_CONV_COEF(plane, 2), CVAL(ct->crb, ct->crg));
- dispc_write_reg(dispc, DISPC_OVL_CONV_COEF(plane, 3), CVAL(ct->cbg, ct->cbr));
- dispc_write_reg(dispc, DISPC_OVL_CONV_COEF(plane, 4), CVAL(0, ct->cbb));
+/* YUV -> RGB, ITU-R BT.601, limited range */
+static const struct csc_coef_yuv2rgb coefs_yuv2rgb_bt601_lim = {
+ 298, 0, 409, /* ry, rcb, rcr |1.164 0.000 1.596|*/
+ 298, -100, -208, /* gy, gcb, gcr |1.164 -0.392 -0.813|*/
+ 298, 516, 0, /* by, bcb, bcr |1.164 2.017 0.000|*/
+ false, /* limited range */
+};
- REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES(plane), ct->full_range, 11, 11);
+/* YUV -> RGB, ITU-R BT.709, full range */
+static const struct csc_coef_yuv2rgb coefs_yuv2rgb_bt709_full = {
+ 256, 0, 402, /* ry, rcb, rcr |1.000 0.000 1.570|*/
+ 256, -48, -120, /* gy, gcb, gcr |1.000 -0.187 -0.467|*/
+ 256, 475, 0, /* by, bcb, bcr |1.000 1.856 0.000|*/
+ true, /* full range */
+};
-#undef CVAL
-}
+/* YUV -> RGB, ITU-R BT.709, limited range */
+static const struct csc_coef_yuv2rgb coefs_yuv2rgb_bt709_lim = {
+ 298, 0, 459, /* ry, rcb, rcr |1.164 0.000 1.793|*/
+ 298, -55, -136, /* gy, gcb, gcr |1.164 -0.213 -0.533|*/
+ 298, 541, 0, /* by, bcb, bcr |1.164 2.112 0.000|*/
+ false, /* limited range */
+};
-static void dispc_setup_color_conv_coef(struct dispc_device *dispc)
+static void dispc_ovl_set_csc(struct dispc_device *dispc,
+ enum omap_plane_id plane,
+ enum drm_color_encoding color_encoding,
+ enum drm_color_range color_range)
{
- int i;
- int num_ovl = dispc_get_num_ovls(dispc);
-
- /* YUV -> RGB, ITU-R BT.601, limited range */
- const struct csc_coef_yuv2rgb coefs_yuv2rgb_bt601_lim = {
- 298, 0, 409, /* ry, rcb, rcr */
- 298, -100, -208, /* gy, gcb, gcr */
- 298, 516, 0, /* by, bcb, bcr */
- false, /* limited range */
- };
+ const struct csc_coef_yuv2rgb *csc;
- /* RGB -> YUV, ITU-R BT.601, limited range */
- const struct csc_coef_rgb2yuv coefs_rgb2yuv_bt601_lim = {
- 66, 129, 25, /* yr, yg, yb */
- -38, -74, 112, /* cbr, cbg, cbb */
- 112, -94, -18, /* crr, crg, crb */
- false, /* limited range */
- };
-
- for (i = 1; i < num_ovl; i++)
- dispc_ovl_write_color_conv_coef(dispc, i, &coefs_yuv2rgb_bt601_lim);
+ switch (color_encoding) {
+ default:
+ case DRM_COLOR_YCBCR_BT601:
+ if (color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+ csc = &coefs_yuv2rgb_bt601_full;
+ else
+ csc = &coefs_yuv2rgb_bt601_lim;
+ break;
+ case DRM_COLOR_YCBCR_BT709:
+ if (color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+ csc = &coefs_yuv2rgb_bt709_full;
+ else
+ csc = &coefs_yuv2rgb_bt709_lim;
+ break;
+ }
- if (dispc->feat->has_writeback)
- dispc_wb_write_color_conv_coef(dispc, &coefs_rgb2yuv_bt601_lim);
+ dispc_ovl_write_color_conv_coef(dispc, plane, csc);
}
static void dispc_ovl_set_ba0(struct dispc_device *dispc,
@@ -1285,7 +1295,7 @@ static bool dispc_ovl_color_mode_supported(struct dispc_device *dispc,
return false;
}
-static const u32 *dispc_ovl_get_color_modes(struct dispc_device *dispc,
+const u32 *dispc_ovl_get_color_modes(struct dispc_device *dispc,
enum omap_plane_id plane)
{
return dispc->feat->supported_color_modes[plane];
@@ -2601,7 +2611,9 @@ static int dispc_ovl_setup_common(struct dispc_device *dispc,
u8 pre_mult_alpha, u8 global_alpha,
enum omap_dss_rotation_type rotation_type,
bool replication, const struct videomode *vm,
- bool mem_to_mem)
+ bool mem_to_mem,
+ enum drm_color_encoding color_encoding,
+ enum drm_color_range color_range)
{
bool five_taps = true;
bool fieldmode = false;
@@ -2750,6 +2762,9 @@ static int dispc_ovl_setup_common(struct dispc_device *dispc,
fieldmode, fourcc, rotation);
dispc_ovl_set_output_size(dispc, plane, out_width, out_height);
dispc_ovl_set_vid_color_conv(dispc, plane, cconv);
+
+ if (plane != OMAP_DSS_WB)
+ dispc_ovl_set_csc(dispc, plane, color_encoding, color_range);
}
dispc_ovl_set_rotation_attrs(dispc, plane, rotation, rotation_type,
@@ -2764,7 +2779,7 @@ static int dispc_ovl_setup_common(struct dispc_device *dispc,
return 0;
}
-static int dispc_ovl_setup(struct dispc_device *dispc,
+int dispc_ovl_setup(struct dispc_device *dispc,
enum omap_plane_id plane,
const struct omap_overlay_info *oi,
const struct videomode *vm, bool mem_to_mem,
@@ -2786,12 +2801,13 @@ static int dispc_ovl_setup(struct dispc_device *dispc,
oi->screen_width, oi->pos_x, oi->pos_y, oi->width, oi->height,
oi->out_width, oi->out_height, oi->fourcc, oi->rotation,
oi->zorder, oi->pre_mult_alpha, oi->global_alpha,
- oi->rotation_type, replication, vm, mem_to_mem);
+ oi->rotation_type, replication, vm, mem_to_mem,
+ oi->color_encoding, oi->color_range);
return r;
}
-static int dispc_wb_setup(struct dispc_device *dispc,
+int dispc_wb_setup(struct dispc_device *dispc,
const struct omap_dss_writeback_info *wi,
bool mem_to_mem, const struct videomode *vm,
enum dss_writeback_channel channel_in)
@@ -2819,7 +2835,8 @@ static int dispc_wb_setup(struct dispc_device *dispc,
wi->buf_width, pos_x, pos_y, in_width, in_height, wi->width,
wi->height, wi->fourcc, wi->rotation, zorder,
wi->pre_mult_alpha, global_alpha, wi->rotation_type,
- replication, vm, mem_to_mem);
+ replication, vm, mem_to_mem, DRM_COLOR_YCBCR_BT601,
+ DRM_COLOR_YCBCR_LIMITED_RANGE);
if (r)
return r;
@@ -2874,12 +2891,12 @@ static int dispc_wb_setup(struct dispc_device *dispc,
return 0;
}
-static bool dispc_has_writeback(struct dispc_device *dispc)
+bool dispc_has_writeback(struct dispc_device *dispc)
{
return dispc->feat->has_writeback;
}
-static int dispc_ovl_enable(struct dispc_device *dispc,
+int dispc_ovl_enable(struct dispc_device *dispc,
enum omap_plane_id plane, bool enable)
{
DSSDBG("dispc_enable_plane %d, %d\n", plane, enable);
@@ -2970,7 +2987,7 @@ static void dispc_mgr_enable_alpha_fixed_zorder(struct dispc_device *dispc,
REG_FLD_MOD(dispc, DISPC_CONFIG, enable, 19, 19);
}
-static void dispc_mgr_setup(struct dispc_device *dispc,
+void dispc_mgr_setup(struct dispc_device *dispc,
enum omap_channel channel,
const struct omap_overlay_manager_info *info)
{
@@ -3049,7 +3066,7 @@ static void dispc_mgr_enable_stallmode(struct dispc_device *dispc,
mgr_fld_write(dispc, channel, DISPC_MGR_FLD_STALLMODE, enable);
}
-static void dispc_mgr_set_lcd_config(struct dispc_device *dispc,
+void dispc_mgr_set_lcd_config(struct dispc_device *dispc,
enum omap_channel channel,
const struct dss_lcd_mgr_config *config)
{
@@ -3098,7 +3115,7 @@ static bool _dispc_mgr_pclk_ok(struct dispc_device *dispc,
return pclk <= dispc->feat->max_tv_pclk;
}
-static int dispc_mgr_check_timings(struct dispc_device *dispc,
+int dispc_mgr_check_timings(struct dispc_device *dispc,
enum omap_channel channel,
const struct videomode *vm)
{
@@ -3191,7 +3208,7 @@ static int vm_flag_to_int(enum display_flags flags, enum display_flags high,
}
/* change name to mode? */
-static void dispc_mgr_set_timings(struct dispc_device *dispc,
+void dispc_mgr_set_timings(struct dispc_device *dispc,
enum omap_channel channel,
const struct videomode *vm)
{
@@ -3735,17 +3752,17 @@ int dispc_mgr_get_clock_div(struct dispc_device *dispc,
return 0;
}
-static u32 dispc_read_irqstatus(struct dispc_device *dispc)
+u32 dispc_read_irqstatus(struct dispc_device *dispc)
{
return dispc_read_reg(dispc, DISPC_IRQSTATUS);
}
-static void dispc_clear_irqstatus(struct dispc_device *dispc, u32 mask)
+void dispc_clear_irqstatus(struct dispc_device *dispc, u32 mask)
{
dispc_write_reg(dispc, DISPC_IRQSTATUS, mask);
}
-static void dispc_write_irqenable(struct dispc_device *dispc, u32 mask)
+void dispc_write_irqenable(struct dispc_device *dispc, u32 mask)
{
u32 old_mask = dispc_read_reg(dispc, DISPC_IRQENABLE);
@@ -3769,7 +3786,7 @@ void dispc_disable_sidle(struct dispc_device *dispc)
REG_FLD_MOD(dispc, DISPC_SYSCONFIG, 1, 4, 3); /* SIDLEMODE: no idle */
}
-static u32 dispc_mgr_gamma_size(struct dispc_device *dispc,
+u32 dispc_mgr_gamma_size(struct dispc_device *dispc,
enum omap_channel channel)
{
const struct dispc_gamma_desc *gdesc = &mgr_desc[channel].gamma;
@@ -3824,7 +3841,7 @@ static const struct drm_color_lut dispc_mgr_gamma_default_lut[] = {
{ .red = U16_MAX, .green = U16_MAX, .blue = U16_MAX, },
};
-static void dispc_mgr_set_gamma(struct dispc_device *dispc,
+void dispc_mgr_set_gamma(struct dispc_device *dispc,
enum omap_channel channel,
const struct drm_color_lut *lut,
unsigned int length)
@@ -3930,8 +3947,6 @@ static void _omap_dispc_initial_config(struct dispc_device *dispc)
dispc->feat->has_gamma_table)
REG_FLD_MOD(dispc, DISPC_CONFIG, 1, 9, 9);
- dispc_setup_color_conv_coef(dispc);
-
dispc_set_loadmode(dispc, OMAP_DSS_LOAD_FRAME_ONLY);
dispc_init_fifos(dispc);
@@ -4482,7 +4497,7 @@ static irqreturn_t dispc_irq_handler(int irq, void *arg)
return dispc->user_handler(irq, dispc->user_data);
}
-static int dispc_request_irq(struct dispc_device *dispc, irq_handler_t handler,
+int dispc_request_irq(struct dispc_device *dispc, irq_handler_t handler,
void *dev_id)
{
int r;
@@ -4506,7 +4521,7 @@ static int dispc_request_irq(struct dispc_device *dispc, irq_handler_t handler,
return r;
}
-static void dispc_free_irq(struct dispc_device *dispc, void *dev_id)
+void dispc_free_irq(struct dispc_device *dispc, void *dev_id)
{
devm_free_irq(&dispc->pdev->dev, dispc->irq, dispc);
@@ -4514,7 +4529,7 @@ static void dispc_free_irq(struct dispc_device *dispc, void *dev_id)
dispc->user_data = NULL;
}
-static u32 dispc_get_memory_bandwidth_limit(struct dispc_device *dispc)
+u32 dispc_get_memory_bandwidth_limit(struct dispc_device *dispc)
{
u32 limit = 0;
@@ -4684,47 +4699,6 @@ static void dispc_errata_i734_wa(struct dispc_device *dispc)
REG_FLD_MOD(dispc, DISPC_CONFIG, gatestate, 8, 4);
}
-static const struct dispc_ops dispc_ops = {
- .read_irqstatus = dispc_read_irqstatus,
- .clear_irqstatus = dispc_clear_irqstatus,
- .write_irqenable = dispc_write_irqenable,
-
- .request_irq = dispc_request_irq,
- .free_irq = dispc_free_irq,
-
- .runtime_get = dispc_runtime_get,
- .runtime_put = dispc_runtime_put,
-
- .get_num_ovls = dispc_get_num_ovls,
- .get_num_mgrs = dispc_get_num_mgrs,
-
- .get_memory_bandwidth_limit = dispc_get_memory_bandwidth_limit,
-
- .mgr_enable = dispc_mgr_enable,
- .mgr_is_enabled = dispc_mgr_is_enabled,
- .mgr_get_vsync_irq = dispc_mgr_get_vsync_irq,
- .mgr_get_framedone_irq = dispc_mgr_get_framedone_irq,
- .mgr_get_sync_lost_irq = dispc_mgr_get_sync_lost_irq,
- .mgr_go_busy = dispc_mgr_go_busy,
- .mgr_go = dispc_mgr_go,
- .mgr_set_lcd_config = dispc_mgr_set_lcd_config,
- .mgr_check_timings = dispc_mgr_check_timings,
- .mgr_set_timings = dispc_mgr_set_timings,
- .mgr_setup = dispc_mgr_setup,
- .mgr_gamma_size = dispc_mgr_gamma_size,
- .mgr_set_gamma = dispc_mgr_set_gamma,
-
- .ovl_enable = dispc_ovl_enable,
- .ovl_setup = dispc_ovl_setup,
- .ovl_get_color_modes = dispc_ovl_get_color_modes,
-
- .wb_get_framedone_irq = dispc_wb_get_framedone_irq,
- .wb_setup = dispc_wb_setup,
- .has_writeback = dispc_has_writeback,
- .wb_go_busy = dispc_wb_go_busy,
- .wb_go = dispc_wb_go,
-};
-
/* DISPC HW IP initialisation */
static const struct of_device_id dispc_of_match[] = {
{ .compatible = "ti,omap2-dispc", .data = &omap24xx_dispc_feats },
@@ -4826,7 +4800,6 @@ static int dispc_bind(struct device *dev, struct device *master, void *data)
dispc_runtime_put(dispc);
dss->dispc = dispc;
- dss->dispc_ops = &dispc_ops;
dispc->debugfs = dss_debugfs_create_file(dss, "dispc", dispc_dump_regs,
dispc);
@@ -4848,7 +4821,6 @@ static void dispc_unbind(struct device *dev, struct device *master, void *data)
dss_debugfs_remove_file(dispc->debugfs);
dss->dispc = NULL;
- dss->dispc_ops = NULL;
pm_runtime_disable(dev);
diff --git a/drivers/gpu/drm/omapdrm/dss/display.c b/drivers/gpu/drm/omapdrm/dss/display.c
deleted file mode 100644
index 3b82158b1bfd..000000000000
--- a/drivers/gpu/drm/omapdrm/dss/display.c
+++ /dev/null
@@ -1,60 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2009 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
- *
- * Some code and ideas taken from drivers/video/omap/ driver
- * by Imre Deak.
- */
-
-#define DSS_SUBSYS_NAME "DISPLAY"
-
-#include <linux/kernel.h>
-#include <linux/of.h>
-
-#include <drm/drm_connector.h>
-#include <drm/drm_modes.h>
-
-#include "omapdss.h"
-
-static int disp_num_counter;
-
-void omapdss_display_init(struct omap_dss_device *dssdev)
-{
- int id;
-
- /*
- * Note: this presumes that all displays either have an DT alias, or
- * none has.
- */
- id = of_alias_get_id(dssdev->dev->of_node, "display");
- if (id < 0)
- id = disp_num_counter++;
-
- /* Use 'label' property for name, if it exists */
- of_property_read_string(dssdev->dev->of_node, "label", &dssdev->name);
-
- if (dssdev->name == NULL)
- dssdev->name = devm_kasprintf(dssdev->dev, GFP_KERNEL,
- "display%u", id);
-}
-EXPORT_SYMBOL_GPL(omapdss_display_init);
-
-int omapdss_display_get_modes(struct drm_connector *connector,
- const struct videomode *vm)
-{
- struct drm_display_mode *mode;
-
- mode = drm_mode_create(connector->dev);
- if (!mode)
- return 0;
-
- drm_display_mode_from_videomode(vm, mode);
-
- mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
- drm_mode_set_name(mode);
- drm_mode_probed_add(connector, mode);
-
- return 1;
-}
-EXPORT_SYMBOL_GPL(omapdss_display_get_modes);
diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c
index 1d2992daef40..030f997eccd0 100644
--- a/drivers/gpu/drm/omapdrm/dss/dpi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dpi.c
@@ -641,7 +641,6 @@ static int dpi_init_output_port(struct dpi_data *dpi, struct device_node *port)
out->type = OMAP_DISPLAY_TYPE_DPI;
out->dispc_channel = dpi_get_channel(dpi);
out->of_port = port_num;
- out->owner = THIS_MODULE;
r = omapdss_device_init_output(out, &dpi->bridge);
if (r < 0) {
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index 735a4e9027d0..8e11612f5fe1 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -14,7 +14,9 @@
#include <linux/device.h>
#include <linux/err.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
#include <linux/mutex.h>
#include <linux/module.h>
#include <linux/semaphore.h>
@@ -33,6 +35,9 @@
#include <linux/component.h>
#include <linux/sys_soc.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
#include <video/mipi_display.h>
#include "omapdss.h"
@@ -40,73 +45,7 @@
#define DSI_CATCH_MISSING_TE
-struct dsi_reg { u16 module; u16 idx; };
-
-#define DSI_REG(mod, idx) ((const struct dsi_reg) { mod, idx })
-
-/* DSI Protocol Engine */
-
-#define DSI_PROTO 0
-#define DSI_PROTO_SZ 0x200
-
-#define DSI_REVISION DSI_REG(DSI_PROTO, 0x0000)
-#define DSI_SYSCONFIG DSI_REG(DSI_PROTO, 0x0010)
-#define DSI_SYSSTATUS DSI_REG(DSI_PROTO, 0x0014)
-#define DSI_IRQSTATUS DSI_REG(DSI_PROTO, 0x0018)
-#define DSI_IRQENABLE DSI_REG(DSI_PROTO, 0x001C)
-#define DSI_CTRL DSI_REG(DSI_PROTO, 0x0040)
-#define DSI_GNQ DSI_REG(DSI_PROTO, 0x0044)
-#define DSI_COMPLEXIO_CFG1 DSI_REG(DSI_PROTO, 0x0048)
-#define DSI_COMPLEXIO_IRQ_STATUS DSI_REG(DSI_PROTO, 0x004C)
-#define DSI_COMPLEXIO_IRQ_ENABLE DSI_REG(DSI_PROTO, 0x0050)
-#define DSI_CLK_CTRL DSI_REG(DSI_PROTO, 0x0054)
-#define DSI_TIMING1 DSI_REG(DSI_PROTO, 0x0058)
-#define DSI_TIMING2 DSI_REG(DSI_PROTO, 0x005C)
-#define DSI_VM_TIMING1 DSI_REG(DSI_PROTO, 0x0060)
-#define DSI_VM_TIMING2 DSI_REG(DSI_PROTO, 0x0064)
-#define DSI_VM_TIMING3 DSI_REG(DSI_PROTO, 0x0068)
-#define DSI_CLK_TIMING DSI_REG(DSI_PROTO, 0x006C)
-#define DSI_TX_FIFO_VC_SIZE DSI_REG(DSI_PROTO, 0x0070)
-#define DSI_RX_FIFO_VC_SIZE DSI_REG(DSI_PROTO, 0x0074)
-#define DSI_COMPLEXIO_CFG2 DSI_REG(DSI_PROTO, 0x0078)
-#define DSI_RX_FIFO_VC_FULLNESS DSI_REG(DSI_PROTO, 0x007C)
-#define DSI_VM_TIMING4 DSI_REG(DSI_PROTO, 0x0080)
-#define DSI_TX_FIFO_VC_EMPTINESS DSI_REG(DSI_PROTO, 0x0084)
-#define DSI_VM_TIMING5 DSI_REG(DSI_PROTO, 0x0088)
-#define DSI_VM_TIMING6 DSI_REG(DSI_PROTO, 0x008C)
-#define DSI_VM_TIMING7 DSI_REG(DSI_PROTO, 0x0090)
-#define DSI_STOPCLK_TIMING DSI_REG(DSI_PROTO, 0x0094)
-#define DSI_VC_CTRL(n) DSI_REG(DSI_PROTO, 0x0100 + (n * 0x20))
-#define DSI_VC_TE(n) DSI_REG(DSI_PROTO, 0x0104 + (n * 0x20))
-#define DSI_VC_LONG_PACKET_HEADER(n) DSI_REG(DSI_PROTO, 0x0108 + (n * 0x20))
-#define DSI_VC_LONG_PACKET_PAYLOAD(n) DSI_REG(DSI_PROTO, 0x010C + (n * 0x20))
-#define DSI_VC_SHORT_PACKET_HEADER(n) DSI_REG(DSI_PROTO, 0x0110 + (n * 0x20))
-#define DSI_VC_IRQSTATUS(n) DSI_REG(DSI_PROTO, 0x0118 + (n * 0x20))
-#define DSI_VC_IRQENABLE(n) DSI_REG(DSI_PROTO, 0x011C + (n * 0x20))
-
-/* DSIPHY_SCP */
-
-#define DSI_PHY 1
-#define DSI_PHY_OFFSET 0x200
-#define DSI_PHY_SZ 0x40
-
-#define DSI_DSIPHY_CFG0 DSI_REG(DSI_PHY, 0x0000)
-#define DSI_DSIPHY_CFG1 DSI_REG(DSI_PHY, 0x0004)
-#define DSI_DSIPHY_CFG2 DSI_REG(DSI_PHY, 0x0008)
-#define DSI_DSIPHY_CFG5 DSI_REG(DSI_PHY, 0x0014)
-#define DSI_DSIPHY_CFG10 DSI_REG(DSI_PHY, 0x0028)
-
-/* DSI_PLL_CTRL_SCP */
-
-#define DSI_PLL 2
-#define DSI_PLL_OFFSET 0x300
-#define DSI_PLL_SZ 0x20
-
-#define DSI_PLL_CONTROL DSI_REG(DSI_PLL, 0x0000)
-#define DSI_PLL_STATUS DSI_REG(DSI_PLL, 0x0004)
-#define DSI_PLL_GO DSI_REG(DSI_PLL, 0x0008)
-#define DSI_PLL_CONFIGURATION1 DSI_REG(DSI_PLL, 0x000C)
-#define DSI_PLL_CONFIGURATION2 DSI_REG(DSI_PLL, 0x0010)
+#include "dsi.h"
#define REG_GET(dsi, idx, start, end) \
FLD_GET(dsi_read_reg(dsi, idx), start, end)
@@ -114,324 +53,36 @@ struct dsi_reg { u16 module; u16 idx; };
#define REG_FLD_MOD(dsi, idx, val, start, end) \
dsi_write_reg(dsi, idx, FLD_MOD(dsi_read_reg(dsi, idx), val, start, end))
-/* Global interrupts */
-#define DSI_IRQ_VC0 (1 << 0)
-#define DSI_IRQ_VC1 (1 << 1)
-#define DSI_IRQ_VC2 (1 << 2)
-#define DSI_IRQ_VC3 (1 << 3)
-#define DSI_IRQ_WAKEUP (1 << 4)
-#define DSI_IRQ_RESYNC (1 << 5)
-#define DSI_IRQ_PLL_LOCK (1 << 7)
-#define DSI_IRQ_PLL_UNLOCK (1 << 8)
-#define DSI_IRQ_PLL_RECALL (1 << 9)
-#define DSI_IRQ_COMPLEXIO_ERR (1 << 10)
-#define DSI_IRQ_HS_TX_TIMEOUT (1 << 14)
-#define DSI_IRQ_LP_RX_TIMEOUT (1 << 15)
-#define DSI_IRQ_TE_TRIGGER (1 << 16)
-#define DSI_IRQ_ACK_TRIGGER (1 << 17)
-#define DSI_IRQ_SYNC_LOST (1 << 18)
-#define DSI_IRQ_LDO_POWER_GOOD (1 << 19)
-#define DSI_IRQ_TA_TIMEOUT (1 << 20)
-#define DSI_IRQ_ERROR_MASK \
- (DSI_IRQ_HS_TX_TIMEOUT | DSI_IRQ_LP_RX_TIMEOUT | DSI_IRQ_SYNC_LOST | \
- DSI_IRQ_TA_TIMEOUT)
-#define DSI_IRQ_CHANNEL_MASK 0xf
-
-/* Virtual channel interrupts */
-#define DSI_VC_IRQ_CS (1 << 0)
-#define DSI_VC_IRQ_ECC_CORR (1 << 1)
-#define DSI_VC_IRQ_PACKET_SENT (1 << 2)
-#define DSI_VC_IRQ_FIFO_TX_OVF (1 << 3)
-#define DSI_VC_IRQ_FIFO_RX_OVF (1 << 4)
-#define DSI_VC_IRQ_BTA (1 << 5)
-#define DSI_VC_IRQ_ECC_NO_CORR (1 << 6)
-#define DSI_VC_IRQ_FIFO_TX_UDF (1 << 7)
-#define DSI_VC_IRQ_PP_BUSY_CHANGE (1 << 8)
-#define DSI_VC_IRQ_ERROR_MASK \
- (DSI_VC_IRQ_CS | DSI_VC_IRQ_ECC_CORR | DSI_VC_IRQ_FIFO_TX_OVF | \
- DSI_VC_IRQ_FIFO_RX_OVF | DSI_VC_IRQ_ECC_NO_CORR | \
- DSI_VC_IRQ_FIFO_TX_UDF)
-
-/* ComplexIO interrupts */
-#define DSI_CIO_IRQ_ERRSYNCESC1 (1 << 0)
-#define DSI_CIO_IRQ_ERRSYNCESC2 (1 << 1)
-#define DSI_CIO_IRQ_ERRSYNCESC3 (1 << 2)
-#define DSI_CIO_IRQ_ERRSYNCESC4 (1 << 3)
-#define DSI_CIO_IRQ_ERRSYNCESC5 (1 << 4)
-#define DSI_CIO_IRQ_ERRESC1 (1 << 5)
-#define DSI_CIO_IRQ_ERRESC2 (1 << 6)
-#define DSI_CIO_IRQ_ERRESC3 (1 << 7)
-#define DSI_CIO_IRQ_ERRESC4 (1 << 8)
-#define DSI_CIO_IRQ_ERRESC5 (1 << 9)
-#define DSI_CIO_IRQ_ERRCONTROL1 (1 << 10)
-#define DSI_CIO_IRQ_ERRCONTROL2 (1 << 11)
-#define DSI_CIO_IRQ_ERRCONTROL3 (1 << 12)
-#define DSI_CIO_IRQ_ERRCONTROL4 (1 << 13)
-#define DSI_CIO_IRQ_ERRCONTROL5 (1 << 14)
-#define DSI_CIO_IRQ_STATEULPS1 (1 << 15)
-#define DSI_CIO_IRQ_STATEULPS2 (1 << 16)
-#define DSI_CIO_IRQ_STATEULPS3 (1 << 17)
-#define DSI_CIO_IRQ_STATEULPS4 (1 << 18)
-#define DSI_CIO_IRQ_STATEULPS5 (1 << 19)
-#define DSI_CIO_IRQ_ERRCONTENTIONLP0_1 (1 << 20)
-#define DSI_CIO_IRQ_ERRCONTENTIONLP1_1 (1 << 21)
-#define DSI_CIO_IRQ_ERRCONTENTIONLP0_2 (1 << 22)
-#define DSI_CIO_IRQ_ERRCONTENTIONLP1_2 (1 << 23)
-#define DSI_CIO_IRQ_ERRCONTENTIONLP0_3 (1 << 24)
-#define DSI_CIO_IRQ_ERRCONTENTIONLP1_3 (1 << 25)
-#define DSI_CIO_IRQ_ERRCONTENTIONLP0_4 (1 << 26)
-#define DSI_CIO_IRQ_ERRCONTENTIONLP1_4 (1 << 27)
-#define DSI_CIO_IRQ_ERRCONTENTIONLP0_5 (1 << 28)
-#define DSI_CIO_IRQ_ERRCONTENTIONLP1_5 (1 << 29)
-#define DSI_CIO_IRQ_ULPSACTIVENOT_ALL0 (1 << 30)
-#define DSI_CIO_IRQ_ULPSACTIVENOT_ALL1 (1 << 31)
-#define DSI_CIO_IRQ_ERROR_MASK \
- (DSI_CIO_IRQ_ERRSYNCESC1 | DSI_CIO_IRQ_ERRSYNCESC2 | \
- DSI_CIO_IRQ_ERRSYNCESC3 | DSI_CIO_IRQ_ERRSYNCESC4 | \
- DSI_CIO_IRQ_ERRSYNCESC5 | \
- DSI_CIO_IRQ_ERRESC1 | DSI_CIO_IRQ_ERRESC2 | \
- DSI_CIO_IRQ_ERRESC3 | DSI_CIO_IRQ_ERRESC4 | \
- DSI_CIO_IRQ_ERRESC5 | \
- DSI_CIO_IRQ_ERRCONTROL1 | DSI_CIO_IRQ_ERRCONTROL2 | \
- DSI_CIO_IRQ_ERRCONTROL3 | DSI_CIO_IRQ_ERRCONTROL4 | \
- DSI_CIO_IRQ_ERRCONTROL5 | \
- DSI_CIO_IRQ_ERRCONTENTIONLP0_1 | DSI_CIO_IRQ_ERRCONTENTIONLP1_1 | \
- DSI_CIO_IRQ_ERRCONTENTIONLP0_2 | DSI_CIO_IRQ_ERRCONTENTIONLP1_2 | \
- DSI_CIO_IRQ_ERRCONTENTIONLP0_3 | DSI_CIO_IRQ_ERRCONTENTIONLP1_3 | \
- DSI_CIO_IRQ_ERRCONTENTIONLP0_4 | DSI_CIO_IRQ_ERRCONTENTIONLP1_4 | \
- DSI_CIO_IRQ_ERRCONTENTIONLP0_5 | DSI_CIO_IRQ_ERRCONTENTIONLP1_5)
-
-typedef void (*omap_dsi_isr_t) (void *arg, u32 mask);
-struct dsi_data;
-
-static int dsi_display_init_dispc(struct dsi_data *dsi);
-static void dsi_display_uninit_dispc(struct dsi_data *dsi);
-
-static int dsi_vc_send_null(struct dsi_data *dsi, int channel);
-
-/* DSI PLL HSDIV indices */
-#define HSDIV_DISPC 0
-#define HSDIV_DSI 1
-
-#define DSI_MAX_NR_ISRS 2
-#define DSI_MAX_NR_LANES 5
-
-enum dsi_model {
- DSI_MODEL_OMAP3,
- DSI_MODEL_OMAP4,
- DSI_MODEL_OMAP5,
-};
-
-enum dsi_lane_function {
- DSI_LANE_UNUSED = 0,
- DSI_LANE_CLK,
- DSI_LANE_DATA1,
- DSI_LANE_DATA2,
- DSI_LANE_DATA3,
- DSI_LANE_DATA4,
-};
-
-struct dsi_lane_config {
- enum dsi_lane_function function;
- u8 polarity;
-};
-
-struct dsi_isr_data {
- omap_dsi_isr_t isr;
- void *arg;
- u32 mask;
-};
-
-enum fifo_size {
- DSI_FIFO_SIZE_0 = 0,
- DSI_FIFO_SIZE_32 = 1,
- DSI_FIFO_SIZE_64 = 2,
- DSI_FIFO_SIZE_96 = 3,
- DSI_FIFO_SIZE_128 = 4,
-};
-
-enum dsi_vc_source {
- DSI_VC_SOURCE_L4 = 0,
- DSI_VC_SOURCE_VP,
-};
-
-struct dsi_irq_stats {
- unsigned long last_reset;
- unsigned int irq_count;
- unsigned int dsi_irqs[32];
- unsigned int vc_irqs[4][32];
- unsigned int cio_irqs[32];
-};
-
-struct dsi_isr_tables {
- struct dsi_isr_data isr_table[DSI_MAX_NR_ISRS];
- struct dsi_isr_data isr_table_vc[4][DSI_MAX_NR_ISRS];
- struct dsi_isr_data isr_table_cio[DSI_MAX_NR_ISRS];
-};
-
-struct dsi_clk_calc_ctx {
- struct dsi_data *dsi;
- struct dss_pll *pll;
-
- /* inputs */
-
- const struct omap_dss_dsi_config *config;
-
- unsigned long req_pck_min, req_pck_nom, req_pck_max;
-
- /* outputs */
-
- struct dss_pll_clock_info dsi_cinfo;
- struct dispc_clock_info dispc_cinfo;
-
- struct videomode vm;
- struct omap_dss_dsi_videomode_timings dsi_vm;
-};
-
-struct dsi_lp_clock_info {
- unsigned long lp_clk;
- u16 lp_clk_div;
-};
-
-struct dsi_module_id_data {
- u32 address;
- int id;
-};
-
-enum dsi_quirks {
- DSI_QUIRK_PLL_PWR_BUG = (1 << 0), /* DSI-PLL power command 0x3 is not working */
- DSI_QUIRK_DCS_CMD_CONFIG_VC = (1 << 1),
- DSI_QUIRK_VC_OCP_WIDTH = (1 << 2),
- DSI_QUIRK_REVERSE_TXCLKESC = (1 << 3),
- DSI_QUIRK_GNQ = (1 << 4),
- DSI_QUIRK_PHY_DCC = (1 << 5),
-};
-
-struct dsi_of_data {
- enum dsi_model model;
- const struct dss_pll_hw *pll_hw;
- const struct dsi_module_id_data *modules;
- unsigned int max_fck_freq;
- unsigned int max_pll_lpdiv;
- enum dsi_quirks quirks;
-};
-
-struct dsi_data {
- struct device *dev;
- void __iomem *proto_base;
- void __iomem *phy_base;
- void __iomem *pll_base;
-
- const struct dsi_of_data *data;
- int module_id;
-
- int irq;
-
- bool is_enabled;
-
- struct clk *dss_clk;
- struct regmap *syscon;
- struct dss_device *dss;
-
- struct dispc_clock_info user_dispc_cinfo;
- struct dss_pll_clock_info user_dsi_cinfo;
+static int dsi_init_dispc(struct dsi_data *dsi);
+static void dsi_uninit_dispc(struct dsi_data *dsi);
- struct dsi_lp_clock_info user_lp_cinfo;
- struct dsi_lp_clock_info current_lp_cinfo;
+static int dsi_vc_send_null(struct dsi_data *dsi, int vc, int channel);
- struct dss_pll pll;
-
- bool vdds_dsi_enabled;
- struct regulator *vdds_dsi_reg;
-
- struct {
- enum dsi_vc_source source;
- struct omap_dss_device *dssdev;
- enum fifo_size tx_fifo_size;
- enum fifo_size rx_fifo_size;
- int vc_id;
- } vc[4];
-
- struct mutex lock;
- struct semaphore bus_lock;
-
- spinlock_t irq_lock;
- struct dsi_isr_tables isr_tables;
- /* space for a copy used by the interrupt handler */
- struct dsi_isr_tables isr_tables_copy;
-
- int update_channel;
-#ifdef DSI_PERF_MEASURE
- unsigned int update_bytes;
-#endif
-
- bool te_enabled;
- bool ulps_enabled;
-
- void (*framedone_callback)(int, void *);
- void *framedone_data;
-
- struct delayed_work framedone_timeout_work;
-
-#ifdef DSI_CATCH_MISSING_TE
- struct timer_list te_timer;
-#endif
-
- unsigned long cache_req_pck;
- unsigned long cache_clk_freq;
- struct dss_pll_clock_info cache_cinfo;
-
- u32 errors;
- spinlock_t errors_lock;
-#ifdef DSI_PERF_MEASURE
- ktime_t perf_setup_time;
- ktime_t perf_start_time;
-#endif
- int debug_read;
- int debug_write;
- struct {
- struct dss_debugfs_entry *irqs;
- struct dss_debugfs_entry *regs;
- struct dss_debugfs_entry *clks;
- } debugfs;
-
-#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
- spinlock_t irq_stats_lock;
- struct dsi_irq_stats irq_stats;
-#endif
-
- unsigned int num_lanes_supported;
- unsigned int line_buffer_size;
-
- struct dsi_lane_config lanes[DSI_MAX_NR_LANES];
- unsigned int num_lanes_used;
-
- unsigned int scp_clk_refcount;
-
- struct dss_lcd_mgr_config mgr_config;
- struct videomode vm;
- enum omap_dss_dsi_pixel_format pix_fmt;
- enum omap_dss_dsi_mode mode;
- struct omap_dss_dsi_videomode_timings vm_timings;
-
- struct omap_dss_device output;
-};
-
-struct dsi_packet_sent_handler_data {
- struct dsi_data *dsi;
- struct completion *completion;
-};
+static ssize_t _omap_dsi_host_transfer(struct dsi_data *dsi, int vc,
+ const struct mipi_dsi_msg *msg);
#ifdef DSI_PERF_MEASURE
static bool dsi_perf;
module_param(dsi_perf, bool, 0644);
#endif
+/* Note: for some reason video mode seems to work only if VC_VIDEO is 0 */
+#define VC_VIDEO 0
+#define VC_CMD 1
+
+#define drm_bridge_to_dsi(bridge) \
+ container_of(bridge, struct dsi_data, bridge)
+
static inline struct dsi_data *to_dsi_data(struct omap_dss_device *dssdev)
{
return dev_get_drvdata(dssdev->dev);
}
+static inline struct dsi_data *host_to_omap(struct mipi_dsi_host *host)
+{
+ return container_of(host, struct dsi_data, host);
+}
+
static inline void dsi_write_reg(struct dsi_data *dsi,
const struct dsi_reg idx, u32 val)
{
@@ -461,17 +112,13 @@ static inline u32 dsi_read_reg(struct dsi_data *dsi, const struct dsi_reg idx)
return __raw_readl(base + idx.idx);
}
-static void dsi_bus_lock(struct omap_dss_device *dssdev)
+static void dsi_bus_lock(struct dsi_data *dsi)
{
- struct dsi_data *dsi = to_dsi_data(dssdev);
-
down(&dsi->bus_lock);
}
-static void dsi_bus_unlock(struct omap_dss_device *dssdev)
+static void dsi_bus_unlock(struct dsi_data *dsi)
{
- struct dsi_data *dsi = to_dsi_data(dssdev);
-
up(&dsi->bus_lock);
}
@@ -514,22 +161,6 @@ static inline bool wait_for_bit_change(struct dsi_data *dsi,
return false;
}
-static u8 dsi_get_pixel_size(enum omap_dss_dsi_pixel_format fmt)
-{
- switch (fmt) {
- case OMAP_DSS_DSI_FMT_RGB888:
- case OMAP_DSS_DSI_FMT_RGB666:
- return 24;
- case OMAP_DSS_DSI_FMT_RGB666_PACKED:
- return 18;
- case OMAP_DSS_DSI_FMT_RGB565:
- return 16;
- default:
- BUG();
- return 0;
- }
-}
-
#ifdef DSI_PERF_MEASURE
static void dsi_perf_mark_setup(struct dsi_data *dsi)
{
@@ -623,7 +254,7 @@ static void print_irq_status(u32 status)
#undef PIS
}
-static void print_irq_status_vc(int channel, u32 status)
+static void print_irq_status_vc(int vc, u32 status)
{
if (status == 0)
return;
@@ -634,7 +265,7 @@ static void print_irq_status_vc(int channel, u32 status)
#define PIS(x) (status & DSI_VC_IRQ_##x) ? (#x " ") : ""
pr_debug("DSI VC(%d) IRQ 0x%x: %s%s%s%s%s%s%s%s%s\n",
- channel,
+ vc,
status,
PIS(CS),
PIS(ECC_CORR),
@@ -1015,7 +646,7 @@ static int dsi_unregister_isr(struct dsi_data *dsi, omap_dsi_isr_t isr,
return r;
}
-static int dsi_register_isr_vc(struct dsi_data *dsi, int channel,
+static int dsi_register_isr_vc(struct dsi_data *dsi, int vc,
omap_dsi_isr_t isr, void *arg, u32 mask)
{
unsigned long flags;
@@ -1024,18 +655,18 @@ static int dsi_register_isr_vc(struct dsi_data *dsi, int channel,
spin_lock_irqsave(&dsi->irq_lock, flags);
r = _dsi_register_isr(isr, arg, mask,
- dsi->isr_tables.isr_table_vc[channel],
- ARRAY_SIZE(dsi->isr_tables.isr_table_vc[channel]));
+ dsi->isr_tables.isr_table_vc[vc],
+ ARRAY_SIZE(dsi->isr_tables.isr_table_vc[vc]));
if (r == 0)
- _omap_dsi_set_irqs_vc(dsi, channel);
+ _omap_dsi_set_irqs_vc(dsi, vc);
spin_unlock_irqrestore(&dsi->irq_lock, flags);
return r;
}
-static int dsi_unregister_isr_vc(struct dsi_data *dsi, int channel,
+static int dsi_unregister_isr_vc(struct dsi_data *dsi, int vc,
omap_dsi_isr_t isr, void *arg, u32 mask)
{
unsigned long flags;
@@ -1044,49 +675,11 @@ static int dsi_unregister_isr_vc(struct dsi_data *dsi, int channel,
spin_lock_irqsave(&dsi->irq_lock, flags);
r = _dsi_unregister_isr(isr, arg, mask,
- dsi->isr_tables.isr_table_vc[channel],
- ARRAY_SIZE(dsi->isr_tables.isr_table_vc[channel]));
+ dsi->isr_tables.isr_table_vc[vc],
+ ARRAY_SIZE(dsi->isr_tables.isr_table_vc[vc]));
if (r == 0)
- _omap_dsi_set_irqs_vc(dsi, channel);
-
- spin_unlock_irqrestore(&dsi->irq_lock, flags);
-
- return r;
-}
-
-static int dsi_register_isr_cio(struct dsi_data *dsi, omap_dsi_isr_t isr,
- void *arg, u32 mask)
-{
- unsigned long flags;
- int r;
-
- spin_lock_irqsave(&dsi->irq_lock, flags);
-
- r = _dsi_register_isr(isr, arg, mask, dsi->isr_tables.isr_table_cio,
- ARRAY_SIZE(dsi->isr_tables.isr_table_cio));
-
- if (r == 0)
- _omap_dsi_set_irqs_cio(dsi);
-
- spin_unlock_irqrestore(&dsi->irq_lock, flags);
-
- return r;
-}
-
-static int dsi_unregister_isr_cio(struct dsi_data *dsi, omap_dsi_isr_t isr,
- void *arg, u32 mask)
-{
- unsigned long flags;
- int r;
-
- spin_lock_irqsave(&dsi->irq_lock, flags);
-
- r = _dsi_unregister_isr(isr, arg, mask, dsi->isr_tables.isr_table_cio,
- ARRAY_SIZE(dsi->isr_tables.isr_table_cio));
-
- if (r == 0)
- _omap_dsi_set_irqs_cio(dsi);
+ _omap_dsi_set_irqs_vc(dsi, vc);
spin_unlock_irqrestore(&dsi->irq_lock, flags);
@@ -1819,56 +1412,6 @@ static void dsi_cio_timings(struct dsi_data *dsi)
dsi_write_reg(dsi, DSI_DSIPHY_CFG2, r);
}
-/* lane masks have lane 0 at lsb. mask_p for positive lines, n for negative */
-static void dsi_cio_enable_lane_override(struct dsi_data *dsi,
- unsigned int mask_p,
- unsigned int mask_n)
-{
- int i;
- u32 l;
- u8 lptxscp_start = dsi->num_lanes_supported == 3 ? 22 : 26;
-
- l = 0;
-
- for (i = 0; i < dsi->num_lanes_supported; ++i) {
- unsigned int p = dsi->lanes[i].polarity;
-
- if (mask_p & (1 << i))
- l |= 1 << (i * 2 + (p ? 0 : 1));
-
- if (mask_n & (1 << i))
- l |= 1 << (i * 2 + (p ? 1 : 0));
- }
-
- /*
- * Bits in REGLPTXSCPDAT4TO0DXDY:
- * 17: DY0 18: DX0
- * 19: DY1 20: DX1
- * 21: DY2 22: DX2
- * 23: DY3 24: DX3
- * 25: DY4 26: DX4
- */
-
- /* Set the lane override configuration */
-
- /* REGLPTXSCPDAT4TO0DXDY */
- REG_FLD_MOD(dsi, DSI_DSIPHY_CFG10, l, lptxscp_start, 17);
-
- /* Enable lane override */
-
- /* ENLPTXSCPDAT */
- REG_FLD_MOD(dsi, DSI_DSIPHY_CFG10, 1, 27, 27);
-}
-
-static void dsi_cio_disable_lane_override(struct dsi_data *dsi)
-{
- /* Disable lane override */
- REG_FLD_MOD(dsi, DSI_DSIPHY_CFG10, 0, 27, 27); /* ENLPTXSCPDAT */
- /* Reset the lane override configuration */
- /* REGLPTXSCPDAT4TO0DXDY */
- REG_FLD_MOD(dsi, DSI_DSIPHY_CFG10, 0, 22, 17);
-}
-
static int dsi_cio_wait_tx_clk_esc_reset(struct dsi_data *dsi)
{
int t, i;
@@ -2043,32 +1586,6 @@ static int dsi_cio_init(struct dsi_data *dsi)
l = FLD_MOD(l, 0x1fff, 12, 0); /* STOP_STATE_COUNTER_IO */
dsi_write_reg(dsi, DSI_TIMING1, l);
- if (dsi->ulps_enabled) {
- unsigned int mask_p;
- int i;
-
- DSSDBG("manual ulps exit\n");
-
- /* ULPS is exited by Mark-1 state for 1ms, followed by
- * stop state. DSS HW cannot do this via the normal
- * ULPS exit sequence, as after reset the DSS HW thinks
- * that we are not in ULPS mode, and refuses to send the
- * sequence. So we need to send the ULPS exit sequence
- * manually by setting positive lines high and negative lines
- * low for 1ms.
- */
-
- mask_p = 0;
-
- for (i = 0; i < dsi->num_lanes_supported; ++i) {
- if (dsi->lanes[i].function == DSI_LANE_UNUSED)
- continue;
- mask_p |= 1 << i;
- }
-
- dsi_cio_enable_lane_override(dsi, mask_p, 0);
- }
-
r = dsi_cio_power(dsi, DSI_COMPLEXIO_POWER_ON);
if (r)
goto err_cio_pwr;
@@ -2087,29 +1604,15 @@ static int dsi_cio_init(struct dsi_data *dsi)
if (r)
goto err_tx_clk_esc_rst;
- if (dsi->ulps_enabled) {
- /* Keep Mark-1 state for 1ms (as per DSI spec) */
- ktime_t wait = ns_to_ktime(1000 * 1000);
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_hrtimeout(&wait, HRTIMER_MODE_REL);
-
- /* Disable the override. The lanes should be set to Mark-11
- * state by the HW */
- dsi_cio_disable_lane_override(dsi);
- }
-
/* FORCE_TX_STOP_MODE_IO */
REG_FLD_MOD(dsi, DSI_TIMING1, 0, 15, 15);
dsi_cio_timings(dsi);
- if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
- /* DDR_CLK_ALWAYS_ON */
- REG_FLD_MOD(dsi, DSI_CLK_CTRL,
- dsi->vm_timings.ddr_clk_always_on, 13, 13);
- }
-
- dsi->ulps_enabled = false;
+ /* DDR_CLK_ALWAYS_ON */
+ REG_FLD_MOD(dsi, DSI_CLK_CTRL,
+ !(dsi->dsidev->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS),
+ 13, 13);
DSSDBG("CIO init done\n");
@@ -2120,8 +1623,6 @@ err_tx_clk_esc_rst:
err_cio_pwr_dom:
dsi_cio_power(dsi, DSI_COMPLEXIO_POWER_OFF);
err_cio_pwr:
- if (dsi->ulps_enabled)
- dsi_cio_disable_lane_override(dsi);
err_scp_clk_dom:
dsi_disable_scp_clk(dsi);
dsi_disable_pads(dsi);
@@ -2218,9 +1719,9 @@ static int dsi_force_tx_stop_mode_io(struct dsi_data *dsi)
return 0;
}
-static bool dsi_vc_is_enabled(struct dsi_data *dsi, int channel)
+static bool dsi_vc_is_enabled(struct dsi_data *dsi, int vc)
{
- return REG_GET(dsi, DSI_VC_CTRL(channel), 0, 0);
+ return REG_GET(dsi, DSI_VC_CTRL(vc), 0, 0);
}
static void dsi_packet_sent_handler_vp(void *data, u32 mask)
@@ -2228,14 +1729,14 @@ static void dsi_packet_sent_handler_vp(void *data, u32 mask)
struct dsi_packet_sent_handler_data *vp_data =
(struct dsi_packet_sent_handler_data *) data;
struct dsi_data *dsi = vp_data->dsi;
- const int channel = dsi->update_channel;
+ const int vc = dsi->update_vc;
u8 bit = dsi->te_enabled ? 30 : 31;
- if (REG_GET(dsi, DSI_VC_TE(channel), bit, bit) == 0)
+ if (REG_GET(dsi, DSI_VC_TE(vc), bit, bit) == 0)
complete(vp_data->completion);
}
-static int dsi_sync_vc_vp(struct dsi_data *dsi, int channel)
+static int dsi_sync_vc_vp(struct dsi_data *dsi, int vc)
{
DECLARE_COMPLETION_ONSTACK(completion);
struct dsi_packet_sent_handler_data vp_data = {
@@ -2247,13 +1748,13 @@ static int dsi_sync_vc_vp(struct dsi_data *dsi, int channel)
bit = dsi->te_enabled ? 30 : 31;
- r = dsi_register_isr_vc(dsi, channel, dsi_packet_sent_handler_vp,
+ r = dsi_register_isr_vc(dsi, vc, dsi_packet_sent_handler_vp,
&vp_data, DSI_VC_IRQ_PACKET_SENT);
if (r)
goto err0;
/* Wait for completion only if TE_EN/TE_START is still set */
- if (REG_GET(dsi, DSI_VC_TE(channel), bit, bit)) {
+ if (REG_GET(dsi, DSI_VC_TE(vc), bit, bit)) {
if (wait_for_completion_timeout(&completion,
msecs_to_jiffies(10)) == 0) {
DSSERR("Failed to complete previous frame transfer\n");
@@ -2262,12 +1763,12 @@ static int dsi_sync_vc_vp(struct dsi_data *dsi, int channel)
}
}
- dsi_unregister_isr_vc(dsi, channel, dsi_packet_sent_handler_vp,
+ dsi_unregister_isr_vc(dsi, vc, dsi_packet_sent_handler_vp,
&vp_data, DSI_VC_IRQ_PACKET_SENT);
return 0;
err1:
- dsi_unregister_isr_vc(dsi, channel, dsi_packet_sent_handler_vp,
+ dsi_unregister_isr_vc(dsi, vc, dsi_packet_sent_handler_vp,
&vp_data, DSI_VC_IRQ_PACKET_SENT);
err0:
return r;
@@ -2278,13 +1779,13 @@ static void dsi_packet_sent_handler_l4(void *data, u32 mask)
struct dsi_packet_sent_handler_data *l4_data =
(struct dsi_packet_sent_handler_data *) data;
struct dsi_data *dsi = l4_data->dsi;
- const int channel = dsi->update_channel;
+ const int vc = dsi->update_vc;
- if (REG_GET(dsi, DSI_VC_CTRL(channel), 5, 5) == 0)
+ if (REG_GET(dsi, DSI_VC_CTRL(vc), 5, 5) == 0)
complete(l4_data->completion);
}
-static int dsi_sync_vc_l4(struct dsi_data *dsi, int channel)
+static int dsi_sync_vc_l4(struct dsi_data *dsi, int vc)
{
DECLARE_COMPLETION_ONSTACK(completion);
struct dsi_packet_sent_handler_data l4_data = {
@@ -2293,13 +1794,13 @@ static int dsi_sync_vc_l4(struct dsi_data *dsi, int channel)
};
int r = 0;
- r = dsi_register_isr_vc(dsi, channel, dsi_packet_sent_handler_l4,
+ r = dsi_register_isr_vc(dsi, vc, dsi_packet_sent_handler_l4,
&l4_data, DSI_VC_IRQ_PACKET_SENT);
if (r)
goto err0;
/* Wait for completion only if TX_FIFO_NOT_EMPTY is still set */
- if (REG_GET(dsi, DSI_VC_CTRL(channel), 5, 5)) {
+ if (REG_GET(dsi, DSI_VC_CTRL(vc), 5, 5)) {
if (wait_for_completion_timeout(&completion,
msecs_to_jiffies(10)) == 0) {
DSSERR("Failed to complete previous l4 transfer\n");
@@ -2308,47 +1809,47 @@ static int dsi_sync_vc_l4(struct dsi_data *dsi, int channel)
}
}
- dsi_unregister_isr_vc(dsi, channel, dsi_packet_sent_handler_l4,
+ dsi_unregister_isr_vc(dsi, vc, dsi_packet_sent_handler_l4,
&l4_data, DSI_VC_IRQ_PACKET_SENT);
return 0;
err1:
- dsi_unregister_isr_vc(dsi, channel, dsi_packet_sent_handler_l4,
+ dsi_unregister_isr_vc(dsi, vc, dsi_packet_sent_handler_l4,
&l4_data, DSI_VC_IRQ_PACKET_SENT);
err0:
return r;
}
-static int dsi_sync_vc(struct dsi_data *dsi, int channel)
+static int dsi_sync_vc(struct dsi_data *dsi, int vc)
{
WARN_ON(!dsi_bus_is_locked(dsi));
WARN_ON(in_interrupt());
- if (!dsi_vc_is_enabled(dsi, channel))
+ if (!dsi_vc_is_enabled(dsi, vc))
return 0;
- switch (dsi->vc[channel].source) {
+ switch (dsi->vc[vc].source) {
case DSI_VC_SOURCE_VP:
- return dsi_sync_vc_vp(dsi, channel);
+ return dsi_sync_vc_vp(dsi, vc);
case DSI_VC_SOURCE_L4:
- return dsi_sync_vc_l4(dsi, channel);
+ return dsi_sync_vc_l4(dsi, vc);
default:
BUG();
return -EINVAL;
}
}
-static int dsi_vc_enable(struct dsi_data *dsi, int channel, bool enable)
+static int dsi_vc_enable(struct dsi_data *dsi, int vc, bool enable)
{
- DSSDBG("dsi_vc_enable channel %d, enable %d\n",
- channel, enable);
+ DSSDBG("dsi_vc_enable vc %d, enable %d\n",
+ vc, enable);
enable = enable ? 1 : 0;
- REG_FLD_MOD(dsi, DSI_VC_CTRL(channel), enable, 0, 0);
+ REG_FLD_MOD(dsi, DSI_VC_CTRL(vc), enable, 0, 0);
- if (!wait_for_bit_change(dsi, DSI_VC_CTRL(channel), 0, enable)) {
+ if (!wait_for_bit_change(dsi, DSI_VC_CTRL(vc), 0, enable)) {
DSSERR("Failed to set dsi_vc_enable to %d\n", enable);
return -EIO;
}
@@ -2356,17 +1857,17 @@ static int dsi_vc_enable(struct dsi_data *dsi, int channel, bool enable)
return 0;
}
-static void dsi_vc_initial_config(struct dsi_data *dsi, int channel)
+static void dsi_vc_initial_config(struct dsi_data *dsi, int vc)
{
u32 r;
- DSSDBG("Initial config of virtual channel %d", channel);
+ DSSDBG("Initial config of VC %d", vc);
- r = dsi_read_reg(dsi, DSI_VC_CTRL(channel));
+ r = dsi_read_reg(dsi, DSI_VC_CTRL(vc));
if (FLD_GET(r, 15, 15)) /* VC_BUSY */
DSSERR("VC(%d) busy when trying to configure it!\n",
- channel);
+ vc);
r = FLD_MOD(r, 0, 1, 1); /* SOURCE, 0 = L4 */
r = FLD_MOD(r, 0, 2, 2); /* BTA_SHORT_EN */
@@ -2381,74 +1882,39 @@ static void dsi_vc_initial_config(struct dsi_data *dsi, int channel)
r = FLD_MOD(r, 4, 29, 27); /* DMA_RX_REQ_NB = no dma */
r = FLD_MOD(r, 4, 23, 21); /* DMA_TX_REQ_NB = no dma */
- dsi_write_reg(dsi, DSI_VC_CTRL(channel), r);
+ dsi_write_reg(dsi, DSI_VC_CTRL(vc), r);
- dsi->vc[channel].source = DSI_VC_SOURCE_L4;
+ dsi->vc[vc].source = DSI_VC_SOURCE_L4;
}
-static int dsi_vc_config_source(struct dsi_data *dsi, int channel,
- enum dsi_vc_source source)
-{
- if (dsi->vc[channel].source == source)
- return 0;
-
- DSSDBG("Source config of virtual channel %d", channel);
-
- dsi_sync_vc(dsi, channel);
-
- dsi_vc_enable(dsi, channel, 0);
-
- /* VC_BUSY */
- if (!wait_for_bit_change(dsi, DSI_VC_CTRL(channel), 15, 0)) {
- DSSERR("vc(%d) busy when trying to config for VP\n", channel);
- return -EIO;
- }
-
- /* SOURCE, 0 = L4, 1 = video port */
- REG_FLD_MOD(dsi, DSI_VC_CTRL(channel), source, 1, 1);
-
- /* DCS_CMD_ENABLE */
- if (dsi->data->quirks & DSI_QUIRK_DCS_CMD_CONFIG_VC) {
- bool enable = source == DSI_VC_SOURCE_VP;
- REG_FLD_MOD(dsi, DSI_VC_CTRL(channel), enable, 30, 30);
- }
-
- dsi_vc_enable(dsi, channel, 1);
-
- dsi->vc[channel].source = source;
-
- return 0;
-}
-
-static void dsi_vc_enable_hs(struct omap_dss_device *dssdev, int channel,
+static void dsi_vc_enable_hs(struct omap_dss_device *dssdev, int vc,
bool enable)
{
struct dsi_data *dsi = to_dsi_data(dssdev);
- DSSDBG("dsi_vc_enable_hs(%d, %d)\n", channel, enable);
+ DSSDBG("dsi_vc_enable_hs(%d, %d)\n", vc, enable);
+
+ if (REG_GET(dsi, DSI_VC_CTRL(vc), 9, 9) == enable)
+ return;
WARN_ON(!dsi_bus_is_locked(dsi));
- dsi_vc_enable(dsi, channel, 0);
+ dsi_vc_enable(dsi, vc, 0);
dsi_if_enable(dsi, 0);
- REG_FLD_MOD(dsi, DSI_VC_CTRL(channel), enable, 9, 9);
+ REG_FLD_MOD(dsi, DSI_VC_CTRL(vc), enable, 9, 9);
- dsi_vc_enable(dsi, channel, 1);
+ dsi_vc_enable(dsi, vc, 1);
dsi_if_enable(dsi, 1);
dsi_force_tx_stop_mode_io(dsi);
-
- /* start the DDR clock by sending a NULL packet */
- if (dsi->vm_timings.ddr_clk_always_on && enable)
- dsi_vc_send_null(dsi, channel);
}
-static void dsi_vc_flush_long_data(struct dsi_data *dsi, int channel)
+static void dsi_vc_flush_long_data(struct dsi_data *dsi, int vc)
{
- while (REG_GET(dsi, DSI_VC_CTRL(channel), 20, 20)) {
+ while (REG_GET(dsi, DSI_VC_CTRL(vc), 20, 20)) {
u32 val;
- val = dsi_read_reg(dsi, DSI_VC_SHORT_PACKET_HEADER(channel));
+ val = dsi_read_reg(dsi, DSI_VC_SHORT_PACKET_HEADER(vc));
DSSDBG("\t\tb1 %#02x b2 %#02x b3 %#02x b4 %#02x\n",
(val >> 0) & 0xff,
(val >> 8) & 0xff,
@@ -2494,13 +1960,13 @@ static void dsi_show_rx_ack_with_err(u16 err)
DSSERR("\t\tDSI Protocol Violation\n");
}
-static u16 dsi_vc_flush_receive_data(struct dsi_data *dsi, int channel)
+static u16 dsi_vc_flush_receive_data(struct dsi_data *dsi, int vc)
{
/* RX_FIFO_NOT_EMPTY */
- while (REG_GET(dsi, DSI_VC_CTRL(channel), 20, 20)) {
+ while (REG_GET(dsi, DSI_VC_CTRL(vc), 20, 20)) {
u32 val;
u8 dt;
- val = dsi_read_reg(dsi, DSI_VC_SHORT_PACKET_HEADER(channel));
+ val = dsi_read_reg(dsi, DSI_VC_SHORT_PACKET_HEADER(vc));
DSSERR("\trawval %#08x\n", val);
dt = FLD_GET(val, 5, 0);
if (dt == MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT) {
@@ -2515,7 +1981,7 @@ static u16 dsi_vc_flush_receive_data(struct dsi_data *dsi, int channel)
} else if (dt == MIPI_DSI_RX_DCS_LONG_READ_RESPONSE) {
DSSERR("\tDCS long response, len %d\n",
FLD_GET(val, 23, 8));
- dsi_vc_flush_long_data(dsi, channel);
+ dsi_vc_flush_long_data(dsi, vc);
} else {
DSSERR("\tunknown datatype 0x%02x\n", dt);
}
@@ -2523,35 +1989,35 @@ static u16 dsi_vc_flush_receive_data(struct dsi_data *dsi, int channel)
return 0;
}
-static int dsi_vc_send_bta(struct dsi_data *dsi, int channel)
+static int dsi_vc_send_bta(struct dsi_data *dsi, int vc)
{
if (dsi->debug_write || dsi->debug_read)
- DSSDBG("dsi_vc_send_bta %d\n", channel);
+ DSSDBG("dsi_vc_send_bta %d\n", vc);
WARN_ON(!dsi_bus_is_locked(dsi));
/* RX_FIFO_NOT_EMPTY */
- if (REG_GET(dsi, DSI_VC_CTRL(channel), 20, 20)) {
+ if (REG_GET(dsi, DSI_VC_CTRL(vc), 20, 20)) {
DSSERR("rx fifo not empty when sending BTA, dumping data:\n");
- dsi_vc_flush_receive_data(dsi, channel);
+ dsi_vc_flush_receive_data(dsi, vc);
}
- REG_FLD_MOD(dsi, DSI_VC_CTRL(channel), 1, 6, 6); /* BTA_EN */
+ REG_FLD_MOD(dsi, DSI_VC_CTRL(vc), 1, 6, 6); /* BTA_EN */
/* flush posted write */
- dsi_read_reg(dsi, DSI_VC_CTRL(channel));
+ dsi_read_reg(dsi, DSI_VC_CTRL(vc));
return 0;
}
-static int dsi_vc_send_bta_sync(struct omap_dss_device *dssdev, int channel)
+static int dsi_vc_send_bta_sync(struct omap_dss_device *dssdev, int vc)
{
struct dsi_data *dsi = to_dsi_data(dssdev);
DECLARE_COMPLETION_ONSTACK(completion);
int r = 0;
u32 err;
- r = dsi_register_isr_vc(dsi, channel, dsi_completion_handler,
+ r = dsi_register_isr_vc(dsi, vc, dsi_completion_handler,
&completion, DSI_VC_IRQ_BTA);
if (r)
goto err0;
@@ -2561,7 +2027,7 @@ static int dsi_vc_send_bta_sync(struct omap_dss_device *dssdev, int channel)
if (r)
goto err1;
- r = dsi_vc_send_bta(dsi, channel);
+ r = dsi_vc_send_bta(dsi, vc);
if (r)
goto err2;
@@ -2582,29 +2048,30 @@ err2:
dsi_unregister_isr(dsi, dsi_completion_handler, &completion,
DSI_IRQ_ERROR_MASK);
err1:
- dsi_unregister_isr_vc(dsi, channel, dsi_completion_handler,
+ dsi_unregister_isr_vc(dsi, vc, dsi_completion_handler,
&completion, DSI_VC_IRQ_BTA);
err0:
return r;
}
-static inline void dsi_vc_write_long_header(struct dsi_data *dsi, int channel,
- u8 data_type, u16 len, u8 ecc)
+static inline void dsi_vc_write_long_header(struct dsi_data *dsi, int vc,
+ int channel, u8 data_type, u16 len,
+ u8 ecc)
{
u32 val;
u8 data_id;
WARN_ON(!dsi_bus_is_locked(dsi));
- data_id = data_type | dsi->vc[channel].vc_id << 6;
+ data_id = data_type | channel << 6;
val = FLD_VAL(data_id, 7, 0) | FLD_VAL(len, 23, 8) |
FLD_VAL(ecc, 31, 24);
- dsi_write_reg(dsi, DSI_VC_LONG_PACKET_HEADER(channel), val);
+ dsi_write_reg(dsi, DSI_VC_LONG_PACKET_HEADER(vc), val);
}
-static inline void dsi_vc_write_long_payload(struct dsi_data *dsi, int channel,
+static inline void dsi_vc_write_long_payload(struct dsi_data *dsi, int vc,
u8 b1, u8 b2, u8 b3, u8 b4)
{
u32 val;
@@ -2614,33 +2081,31 @@ static inline void dsi_vc_write_long_payload(struct dsi_data *dsi, int channel,
/* DSSDBG("\twriting %02x, %02x, %02x, %02x (%#010x)\n",
b1, b2, b3, b4, val); */
- dsi_write_reg(dsi, DSI_VC_LONG_PACKET_PAYLOAD(channel), val);
+ dsi_write_reg(dsi, DSI_VC_LONG_PACKET_PAYLOAD(vc), val);
}
-static int dsi_vc_send_long(struct dsi_data *dsi, int channel, u8 data_type,
- u8 *data, u16 len, u8 ecc)
+static int dsi_vc_send_long(struct dsi_data *dsi, int vc,
+ const struct mipi_dsi_msg *msg)
{
/*u32 val; */
int i;
- u8 *p;
+ const u8 *p;
int r = 0;
u8 b1, b2, b3, b4;
if (dsi->debug_write)
- DSSDBG("dsi_vc_send_long, %d bytes\n", len);
+ DSSDBG("dsi_vc_send_long, %d bytes\n", msg->tx_len);
/* len + header */
- if (dsi->vc[channel].tx_fifo_size * 32 * 4 < len + 4) {
+ if (dsi->vc[vc].tx_fifo_size * 32 * 4 < msg->tx_len + 4) {
DSSERR("unable to send long packet: packet too long.\n");
return -EINVAL;
}
- dsi_vc_config_source(dsi, channel, DSI_VC_SOURCE_L4);
-
- dsi_vc_write_long_header(dsi, channel, data_type, len, ecc);
+ dsi_vc_write_long_header(dsi, vc, msg->channel, msg->type, msg->tx_len, 0);
- p = data;
- for (i = 0; i < len >> 2; i++) {
+ p = msg->tx_buf;
+ for (i = 0; i < msg->tx_len >> 2; i++) {
if (dsi->debug_write)
DSSDBG("\tsending full packet %d\n", i);
@@ -2649,10 +2114,10 @@ static int dsi_vc_send_long(struct dsi_data *dsi, int channel, u8 data_type,
b3 = *p++;
b4 = *p++;
- dsi_vc_write_long_payload(dsi, channel, b1, b2, b3, b4);
+ dsi_vc_write_long_payload(dsi, vc, b1, b2, b3, b4);
}
- i = len % 4;
+ i = msg->tx_len % 4;
if (i) {
b1 = 0; b2 = 0; b3 = 0;
@@ -2674,194 +2139,89 @@ static int dsi_vc_send_long(struct dsi_data *dsi, int channel, u8 data_type,
break;
}
- dsi_vc_write_long_payload(dsi, channel, b1, b2, b3, 0);
+ dsi_vc_write_long_payload(dsi, vc, b1, b2, b3, 0);
}
return r;
}
-static int dsi_vc_send_short(struct dsi_data *dsi, int channel, u8 data_type,
- u16 data, u8 ecc)
+static int dsi_vc_send_short(struct dsi_data *dsi, int vc,
+ const struct mipi_dsi_msg *msg)
{
+ struct mipi_dsi_packet pkt;
u32 r;
- u8 data_id;
+
+ r = mipi_dsi_create_packet(&pkt, msg);
+ if (r < 0)
+ return r;
WARN_ON(!dsi_bus_is_locked(dsi));
if (dsi->debug_write)
- DSSDBG("dsi_vc_send_short(ch%d, dt %#x, b1 %#x, b2 %#x)\n",
- channel,
- data_type, data & 0xff, (data >> 8) & 0xff);
-
- dsi_vc_config_source(dsi, channel, DSI_VC_SOURCE_L4);
+ DSSDBG("dsi_vc_send_short(vc%d, dt %#x, b1 %#x, b2 %#x)\n",
+ vc, msg->type, pkt.header[1], pkt.header[2]);
- if (FLD_GET(dsi_read_reg(dsi, DSI_VC_CTRL(channel)), 16, 16)) {
+ if (FLD_GET(dsi_read_reg(dsi, DSI_VC_CTRL(vc)), 16, 16)) {
DSSERR("ERROR FIFO FULL, aborting transfer\n");
return -EINVAL;
}
- data_id = data_type | dsi->vc[channel].vc_id << 6;
-
- r = (data_id << 0) | (data << 8) | (ecc << 24);
+ r = pkt.header[3] << 24 | pkt.header[2] << 16 | pkt.header[1] << 8 |
+ pkt.header[0];
- dsi_write_reg(dsi, DSI_VC_SHORT_PACKET_HEADER(channel), r);
+ dsi_write_reg(dsi, DSI_VC_SHORT_PACKET_HEADER(vc), r);
return 0;
}
-static int dsi_vc_send_null(struct dsi_data *dsi, int channel)
-{
- return dsi_vc_send_long(dsi, channel, MIPI_DSI_NULL_PACKET, NULL, 0, 0);
-}
-
-static int dsi_vc_write_nosync_common(struct dsi_data *dsi, int channel,
- u8 *data, int len,
- enum dss_dsi_content_type type)
-{
- int r;
-
- if (len == 0) {
- BUG_ON(type == DSS_DSI_CONTENT_DCS);
- r = dsi_vc_send_short(dsi, channel,
- MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM, 0, 0);
- } else if (len == 1) {
- r = dsi_vc_send_short(dsi, channel,
- type == DSS_DSI_CONTENT_GENERIC ?
- MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM :
- MIPI_DSI_DCS_SHORT_WRITE, data[0], 0);
- } else if (len == 2) {
- r = dsi_vc_send_short(dsi, channel,
- type == DSS_DSI_CONTENT_GENERIC ?
- MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM :
- MIPI_DSI_DCS_SHORT_WRITE_PARAM,
- data[0] | (data[1] << 8), 0);
- } else {
- r = dsi_vc_send_long(dsi, channel,
- type == DSS_DSI_CONTENT_GENERIC ?
- MIPI_DSI_GENERIC_LONG_WRITE :
- MIPI_DSI_DCS_LONG_WRITE, data, len, 0);
- }
-
- return r;
-}
-
-static int dsi_vc_dcs_write_nosync(struct omap_dss_device *dssdev, int channel,
- u8 *data, int len)
-{
- struct dsi_data *dsi = to_dsi_data(dssdev);
-
- return dsi_vc_write_nosync_common(dsi, channel, data, len,
- DSS_DSI_CONTENT_DCS);
-}
-
-static int dsi_vc_generic_write_nosync(struct omap_dss_device *dssdev, int channel,
- u8 *data, int len)
+static int dsi_vc_send_null(struct dsi_data *dsi, int vc, int channel)
{
- struct dsi_data *dsi = to_dsi_data(dssdev);
+ const struct mipi_dsi_msg msg = {
+ .channel = channel,
+ .type = MIPI_DSI_NULL_PACKET,
+ };
- return dsi_vc_write_nosync_common(dsi, channel, data, len,
- DSS_DSI_CONTENT_GENERIC);
+ return dsi_vc_send_long(dsi, vc, &msg);
}
-static int dsi_vc_write_common(struct omap_dss_device *dssdev,
- int channel, u8 *data, int len,
- enum dss_dsi_content_type type)
+static int dsi_vc_write_common(struct omap_dss_device *dssdev, int vc,
+ const struct mipi_dsi_msg *msg)
{
struct dsi_data *dsi = to_dsi_data(dssdev);
int r;
- r = dsi_vc_write_nosync_common(dsi, channel, data, len, type);
- if (r)
- goto err;
-
- r = dsi_vc_send_bta_sync(dssdev, channel);
- if (r)
- goto err;
-
- /* RX_FIFO_NOT_EMPTY */
- if (REG_GET(dsi, DSI_VC_CTRL(channel), 20, 20)) {
- DSSERR("rx fifo not empty after write, dumping data:\n");
- dsi_vc_flush_receive_data(dsi, channel);
- r = -EIO;
- goto err;
- }
-
- return 0;
-err:
- DSSERR("dsi_vc_write_common(ch %d, cmd 0x%02x, len %d) failed\n",
- channel, data[0], len);
- return r;
-}
-
-static int dsi_vc_dcs_write(struct omap_dss_device *dssdev, int channel, u8 *data,
- int len)
-{
- return dsi_vc_write_common(dssdev, channel, data, len,
- DSS_DSI_CONTENT_DCS);
-}
-
-static int dsi_vc_generic_write(struct omap_dss_device *dssdev, int channel, u8 *data,
- int len)
-{
- return dsi_vc_write_common(dssdev, channel, data, len,
- DSS_DSI_CONTENT_GENERIC);
-}
+ if (mipi_dsi_packet_format_is_short(msg->type))
+ r = dsi_vc_send_short(dsi, vc, msg);
+ else
+ r = dsi_vc_send_long(dsi, vc, msg);
-static int dsi_vc_dcs_send_read_request(struct dsi_data *dsi, int channel,
- u8 dcs_cmd)
-{
- int r;
+ if (r < 0)
+ return r;
- if (dsi->debug_read)
- DSSDBG("dsi_vc_dcs_send_read_request(ch%d, dcs_cmd %x)\n",
- channel, dcs_cmd);
+ /*
+ * TODO: we do not always have to do the BTA sync, for example
+ * we can improve performance by setting the update window
+ * information without sending BTA sync between the commands.
+ * In that case we can return early.
+ */
- r = dsi_vc_send_short(dsi, channel, MIPI_DSI_DCS_READ, dcs_cmd, 0);
+ r = dsi_vc_send_bta_sync(dssdev, vc);
if (r) {
- DSSERR("dsi_vc_dcs_send_read_request(ch %d, cmd 0x%02x)"
- " failed\n", channel, dcs_cmd);
+ DSSERR("bta sync failed\n");
return r;
}
- return 0;
-}
-
-static int dsi_vc_generic_send_read_request(struct dsi_data *dsi, int channel,
- u8 *reqdata, int reqlen)
-{
- u16 data;
- u8 data_type;
- int r;
-
- if (dsi->debug_read)
- DSSDBG("dsi_vc_generic_send_read_request(ch %d, reqlen %d)\n",
- channel, reqlen);
-
- if (reqlen == 0) {
- data_type = MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM;
- data = 0;
- } else if (reqlen == 1) {
- data_type = MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM;
- data = reqdata[0];
- } else if (reqlen == 2) {
- data_type = MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM;
- data = reqdata[0] | (reqdata[1] << 8);
- } else {
- BUG();
- return -EINVAL;
- }
-
- r = dsi_vc_send_short(dsi, channel, data_type, data, 0);
- if (r) {
- DSSERR("dsi_vc_generic_send_read_request(ch %d, reqlen %d)"
- " failed\n", channel, reqlen);
- return r;
+ /* RX_FIFO_NOT_EMPTY */
+ if (REG_GET(dsi, DSI_VC_CTRL(vc), 20, 20)) {
+ DSSERR("rx fifo not empty after write, dumping data:\n");
+ dsi_vc_flush_receive_data(dsi, vc);
+ return -EIO;
}
return 0;
}
-static int dsi_vc_read_rx_fifo(struct dsi_data *dsi, int channel, u8 *buf,
+static int dsi_vc_read_rx_fifo(struct dsi_data *dsi, int vc, u8 *buf,
int buflen, enum dss_dsi_content_type type)
{
u32 val;
@@ -2869,13 +2229,13 @@ static int dsi_vc_read_rx_fifo(struct dsi_data *dsi, int channel, u8 *buf,
int r;
/* RX_FIFO_NOT_EMPTY */
- if (REG_GET(dsi, DSI_VC_CTRL(channel), 20, 20) == 0) {
+ if (REG_GET(dsi, DSI_VC_CTRL(vc), 20, 20) == 0) {
DSSERR("RX fifo empty when trying to read.\n");
r = -EIO;
goto err;
}
- val = dsi_read_reg(dsi, DSI_VC_SHORT_PACKET_HEADER(channel));
+ val = dsi_read_reg(dsi, DSI_VC_SHORT_PACKET_HEADER(vc));
if (dsi->debug_read)
DSSDBG("\theader: %08x\n", val);
dt = FLD_GET(val, 5, 0);
@@ -2939,7 +2299,7 @@ static int dsi_vc_read_rx_fifo(struct dsi_data *dsi, int channel, u8 *buf,
for (w = 0; w < len + 2;) {
int b;
val = dsi_read_reg(dsi,
- DSI_VC_SHORT_PACKET_HEADER(channel));
+ DSI_VC_SHORT_PACKET_HEADER(vc));
if (dsi->debug_read)
DSSDBG("\t\t%02x %02x %02x %02x\n",
(val >> 0) & 0xff,
@@ -2963,168 +2323,73 @@ static int dsi_vc_read_rx_fifo(struct dsi_data *dsi, int channel, u8 *buf,
}
err:
- DSSERR("dsi_vc_read_rx_fifo(ch %d type %s) failed\n", channel,
+ DSSERR("dsi_vc_read_rx_fifo(vc %d type %s) failed\n", vc,
type == DSS_DSI_CONTENT_GENERIC ? "GENERIC" : "DCS");
return r;
}
-static int dsi_vc_dcs_read(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
- u8 *buf, int buflen)
+static int dsi_vc_dcs_read(struct omap_dss_device *dssdev, int vc,
+ const struct mipi_dsi_msg *msg)
{
struct dsi_data *dsi = to_dsi_data(dssdev);
+ u8 cmd = ((u8 *)msg->tx_buf)[0];
int r;
- r = dsi_vc_dcs_send_read_request(dsi, channel, dcs_cmd);
+ if (dsi->debug_read)
+ DSSDBG("%s(vc %d, cmd %x)\n", __func__, vc, cmd);
+
+ r = dsi_vc_send_short(dsi, vc, msg);
if (r)
goto err;
- r = dsi_vc_send_bta_sync(dssdev, channel);
+ r = dsi_vc_send_bta_sync(dssdev, vc);
if (r)
goto err;
- r = dsi_vc_read_rx_fifo(dsi, channel, buf, buflen,
+ r = dsi_vc_read_rx_fifo(dsi, vc, msg->rx_buf, msg->rx_len,
DSS_DSI_CONTENT_DCS);
if (r < 0)
goto err;
- if (r != buflen) {
+ if (r != msg->rx_len) {
r = -EIO;
goto err;
}
return 0;
err:
- DSSERR("dsi_vc_dcs_read(ch %d, cmd 0x%02x) failed\n", channel, dcs_cmd);
+ DSSERR("%s(vc %d, cmd 0x%02x) failed\n", __func__, vc, cmd);
return r;
}
-static int dsi_vc_generic_read(struct omap_dss_device *dssdev, int channel,
- u8 *reqdata, int reqlen, u8 *buf, int buflen)
+static int dsi_vc_generic_read(struct omap_dss_device *dssdev, int vc,
+ const struct mipi_dsi_msg *msg)
{
struct dsi_data *dsi = to_dsi_data(dssdev);
int r;
- r = dsi_vc_generic_send_read_request(dsi, channel, reqdata, reqlen);
+ r = dsi_vc_send_short(dsi, vc, msg);
if (r)
- return r;
+ goto err;
- r = dsi_vc_send_bta_sync(dssdev, channel);
+ r = dsi_vc_send_bta_sync(dssdev, vc);
if (r)
- return r;
+ goto err;
- r = dsi_vc_read_rx_fifo(dsi, channel, buf, buflen,
+ r = dsi_vc_read_rx_fifo(dsi, vc, msg->rx_buf, msg->rx_len,
DSS_DSI_CONTENT_GENERIC);
if (r < 0)
- return r;
-
- if (r != buflen) {
- r = -EIO;
- return r;
- }
-
- return 0;
-}
-
-static int dsi_vc_set_max_rx_packet_size(struct omap_dss_device *dssdev, int channel,
- u16 len)
-{
- struct dsi_data *dsi = to_dsi_data(dssdev);
-
- return dsi_vc_send_short(dsi, channel,
- MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE, len, 0);
-}
-
-static int dsi_enter_ulps(struct dsi_data *dsi)
-{
- DECLARE_COMPLETION_ONSTACK(completion);
- int r, i;
- unsigned int mask;
-
- DSSDBG("Entering ULPS");
-
- WARN_ON(!dsi_bus_is_locked(dsi));
-
- WARN_ON(dsi->ulps_enabled);
-
- if (dsi->ulps_enabled)
- return 0;
-
- /* DDR_CLK_ALWAYS_ON */
- if (REG_GET(dsi, DSI_CLK_CTRL, 13, 13)) {
- dsi_if_enable(dsi, 0);
- REG_FLD_MOD(dsi, DSI_CLK_CTRL, 0, 13, 13);
- dsi_if_enable(dsi, 1);
- }
-
- dsi_sync_vc(dsi, 0);
- dsi_sync_vc(dsi, 1);
- dsi_sync_vc(dsi, 2);
- dsi_sync_vc(dsi, 3);
-
- dsi_force_tx_stop_mode_io(dsi);
-
- dsi_vc_enable(dsi, 0, false);
- dsi_vc_enable(dsi, 1, false);
- dsi_vc_enable(dsi, 2, false);
- dsi_vc_enable(dsi, 3, false);
-
- if (REG_GET(dsi, DSI_COMPLEXIO_CFG2, 16, 16)) { /* HS_BUSY */
- DSSERR("HS busy when enabling ULPS\n");
- return -EIO;
- }
-
- if (REG_GET(dsi, DSI_COMPLEXIO_CFG2, 17, 17)) { /* LP_BUSY */
- DSSERR("LP busy when enabling ULPS\n");
- return -EIO;
- }
-
- r = dsi_register_isr_cio(dsi, dsi_completion_handler, &completion,
- DSI_CIO_IRQ_ULPSACTIVENOT_ALL0);
- if (r)
- return r;
-
- mask = 0;
-
- for (i = 0; i < dsi->num_lanes_supported; ++i) {
- if (dsi->lanes[i].function == DSI_LANE_UNUSED)
- continue;
- mask |= 1 << i;
- }
- /* Assert TxRequestEsc for data lanes and TxUlpsClk for clk lane */
- /* LANEx_ULPS_SIG2 */
- REG_FLD_MOD(dsi, DSI_COMPLEXIO_CFG2, mask, 9, 5);
-
- /* flush posted write and wait for SCP interface to finish the write */
- dsi_read_reg(dsi, DSI_COMPLEXIO_CFG2);
+ goto err;
- if (wait_for_completion_timeout(&completion,
- msecs_to_jiffies(1000)) == 0) {
- DSSERR("ULPS enable timeout\n");
+ if (r != msg->rx_len) {
r = -EIO;
goto err;
}
- dsi_unregister_isr_cio(dsi, dsi_completion_handler, &completion,
- DSI_CIO_IRQ_ULPSACTIVENOT_ALL0);
-
- /* Reset LANEx_ULPS_SIG2 */
- REG_FLD_MOD(dsi, DSI_COMPLEXIO_CFG2, 0, 9, 5);
-
- /* flush posted write and wait for SCP interface to finish the write */
- dsi_read_reg(dsi, DSI_COMPLEXIO_CFG2);
-
- dsi_cio_power(dsi, DSI_COMPLEXIO_POWER_ULPS);
-
- dsi_if_enable(dsi, false);
-
- dsi->ulps_enabled = true;
-
return 0;
-
err:
- dsi_unregister_isr_cio(dsi, dsi_completion_handler, &completion,
- DSI_CIO_IRQ_ULPSACTIVENOT_ALL0);
+ DSSERR("%s(vc %d, reqlen %d) failed\n", __func__, vc, msg->tx_len);
return r;
}
@@ -3241,7 +2506,7 @@ static void dsi_config_vp_num_line_buffers(struct dsi_data *dsi)
int num_line_buffers;
if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
- int bpp = dsi_get_pixel_size(dsi->pix_fmt);
+ int bpp = mipi_dsi_pixel_format_to_bpp(dsi->pix_fmt);
const struct videomode *vm = &dsi->vm;
/*
* Don't use line buffers if width is greater than the video
@@ -3372,7 +2637,7 @@ static void dsi_config_cmd_mode_interleaving(struct dsi_data *dsi)
int tclk_trail, ths_exit, exiths_clk;
bool ddr_alwon;
const struct videomode *vm = &dsi->vm;
- int bpp = dsi_get_pixel_size(dsi->pix_fmt);
+ int bpp = mipi_dsi_pixel_format_to_bpp(dsi->pix_fmt);
int ndl = dsi->num_lanes_used - 1;
int dsi_fclk_hsdiv = dsi->user_dsi_cinfo.mX[HSDIV_DSI] + 1;
int hsa_interleave_hs = 0, hsa_interleave_lp = 0;
@@ -3500,7 +2765,7 @@ static int dsi_proto_config(struct dsi_data *dsi)
dsi_set_lp_rx_timeout(dsi, 0x1fff, true, true);
dsi_set_hs_tx_timeout(dsi, 0x1fff, true, true);
- switch (dsi_get_pixel_size(dsi->pix_fmt)) {
+ switch (mipi_dsi_pixel_format_to_bpp(dsi->pix_fmt)) {
case 16:
buswidth = 0;
break;
@@ -3621,7 +2886,7 @@ static void dsi_proto_timings(struct dsi_data *dsi)
int window_sync = dsi->vm_timings.window_sync;
bool hsync_end;
const struct videomode *vm = &dsi->vm;
- int bpp = dsi_get_pixel_size(dsi->pix_fmt);
+ int bpp = mipi_dsi_pixel_format_to_bpp(dsi->pix_fmt);
int tl, t_he, width_bytes;
hsync_end = dsi->vm_timings.trans_mode == OMAP_DSS_DSI_PULSE_MODE;
@@ -3659,12 +2924,9 @@ static void dsi_proto_timings(struct dsi_data *dsi)
}
}
-static int dsi_configure_pins(struct omap_dss_device *dssdev,
- const struct omap_dsi_pin_config *pin_cfg)
+static int dsi_configure_pins(struct dsi_data *dsi,
+ int num_pins, const u32 *pins)
{
- struct dsi_data *dsi = to_dsi_data(dssdev);
- int num_pins;
- const int *pins;
struct dsi_lane_config lanes[DSI_MAX_NR_LANES];
int num_lanes;
int i;
@@ -3677,9 +2939,6 @@ static int dsi_configure_pins(struct omap_dss_device *dssdev,
DSI_LANE_DATA4,
};
- num_pins = pin_cfg->num_pins;
- pins = pin_cfg->pins;
-
if (num_pins < 4 || num_pins > dsi->num_lanes_supported * 2
|| num_pins % 2 != 0)
return -EINVAL;
@@ -3691,15 +2950,15 @@ static int dsi_configure_pins(struct omap_dss_device *dssdev,
for (i = 0; i < num_pins; i += 2) {
u8 lane, pol;
- int dx, dy;
+ u32 dx, dy;
dx = pins[i];
dy = pins[i + 1];
- if (dx < 0 || dx >= dsi->num_lanes_supported * 2)
+ if (dx >= dsi->num_lanes_supported * 2)
return -EINVAL;
- if (dy < 0 || dy >= dsi->num_lanes_supported * 2)
+ if (dy >= dsi->num_lanes_supported * 2)
return -EINVAL;
if (dx & 1) {
@@ -3725,86 +2984,102 @@ static int dsi_configure_pins(struct omap_dss_device *dssdev,
return 0;
}
-static int dsi_enable_video_output(struct omap_dss_device *dssdev, int channel)
+static int dsi_enable_video_mode(struct dsi_data *dsi, int vc)
{
- struct dsi_data *dsi = to_dsi_data(dssdev);
- int bpp = dsi_get_pixel_size(dsi->pix_fmt);
+ int bpp = mipi_dsi_pixel_format_to_bpp(dsi->pix_fmt);
u8 data_type;
u16 word_count;
- int r;
- r = dsi_display_init_dispc(dsi);
- if (r)
- return r;
+ switch (dsi->pix_fmt) {
+ case MIPI_DSI_FMT_RGB888:
+ data_type = MIPI_DSI_PACKED_PIXEL_STREAM_24;
+ break;
+ case MIPI_DSI_FMT_RGB666:
+ data_type = MIPI_DSI_PIXEL_STREAM_3BYTE_18;
+ break;
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ data_type = MIPI_DSI_PACKED_PIXEL_STREAM_18;
+ break;
+ case MIPI_DSI_FMT_RGB565:
+ data_type = MIPI_DSI_PACKED_PIXEL_STREAM_16;
+ break;
+ default:
+ return -EINVAL;
+ }
- if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
- switch (dsi->pix_fmt) {
- case OMAP_DSS_DSI_FMT_RGB888:
- data_type = MIPI_DSI_PACKED_PIXEL_STREAM_24;
- break;
- case OMAP_DSS_DSI_FMT_RGB666:
- data_type = MIPI_DSI_PIXEL_STREAM_3BYTE_18;
- break;
- case OMAP_DSS_DSI_FMT_RGB666_PACKED:
- data_type = MIPI_DSI_PACKED_PIXEL_STREAM_18;
- break;
- case OMAP_DSS_DSI_FMT_RGB565:
- data_type = MIPI_DSI_PACKED_PIXEL_STREAM_16;
- break;
- default:
- r = -EINVAL;
- goto err_pix_fmt;
- }
+ dsi_if_enable(dsi, false);
+ dsi_vc_enable(dsi, vc, false);
- dsi_if_enable(dsi, false);
- dsi_vc_enable(dsi, channel, false);
+ /* MODE, 1 = video mode */
+ REG_FLD_MOD(dsi, DSI_VC_CTRL(vc), 1, 4, 4);
- /* MODE, 1 = video mode */
- REG_FLD_MOD(dsi, DSI_VC_CTRL(channel), 1, 4, 4);
+ word_count = DIV_ROUND_UP(dsi->vm.hactive * bpp, 8);
- word_count = DIV_ROUND_UP(dsi->vm.hactive * bpp, 8);
+ dsi_vc_write_long_header(dsi, vc, dsi->dsidev->channel, data_type,
+ word_count, 0);
- dsi_vc_write_long_header(dsi, channel, data_type,
- word_count, 0);
+ dsi_vc_enable(dsi, vc, true);
+ dsi_if_enable(dsi, true);
- dsi_vc_enable(dsi, channel, true);
- dsi_if_enable(dsi, true);
+ return 0;
+}
+
+static void dsi_disable_video_mode(struct dsi_data *dsi, int vc)
+{
+ dsi_if_enable(dsi, false);
+ dsi_vc_enable(dsi, vc, false);
+
+ /* MODE, 0 = command mode */
+ REG_FLD_MOD(dsi, DSI_VC_CTRL(vc), 0, 4, 4);
+
+ dsi_vc_enable(dsi, vc, true);
+ dsi_if_enable(dsi, true);
+}
+
+static void dsi_enable_video_output(struct omap_dss_device *dssdev, int vc)
+{
+ struct dsi_data *dsi = to_dsi_data(dssdev);
+ int r;
+
+ r = dsi_init_dispc(dsi);
+ if (r) {
+ dev_err(dsi->dev, "failed to init dispc!\n");
+ return;
+ }
+
+ if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
+ r = dsi_enable_video_mode(dsi, vc);
+ if (r)
+ goto err_video_mode;
}
r = dss_mgr_enable(&dsi->output);
if (r)
goto err_mgr_enable;
- return 0;
+ return;
err_mgr_enable:
if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
dsi_if_enable(dsi, false);
- dsi_vc_enable(dsi, channel, false);
+ dsi_vc_enable(dsi, vc, false);
}
-err_pix_fmt:
- dsi_display_uninit_dispc(dsi);
- return r;
+err_video_mode:
+ dsi_uninit_dispc(dsi);
+ dev_err(dsi->dev, "failed to enable DSI encoder!\n");
+ return;
}
-static void dsi_disable_video_output(struct omap_dss_device *dssdev, int channel)
+static void dsi_disable_video_output(struct omap_dss_device *dssdev, int vc)
{
struct dsi_data *dsi = to_dsi_data(dssdev);
- if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
- dsi_if_enable(dsi, false);
- dsi_vc_enable(dsi, channel, false);
-
- /* MODE, 0 = command mode */
- REG_FLD_MOD(dsi, DSI_VC_CTRL(channel), 0, 4, 4);
-
- dsi_vc_enable(dsi, channel, true);
- dsi_if_enable(dsi, true);
- }
+ if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE)
+ dsi_disable_video_mode(dsi, vc);
dss_mgr_disable(&dsi->output);
- dsi_display_uninit_dispc(dsi);
+ dsi_uninit_dispc(dsi);
}
static void dsi_update_screen_dispc(struct dsi_data *dsi)
@@ -3817,16 +3092,14 @@ static void dsi_update_screen_dispc(struct dsi_data *dsi)
unsigned int packet_len;
u32 l;
int r;
- const unsigned channel = dsi->update_channel;
+ const unsigned vc = dsi->update_vc;
const unsigned int line_buf_size = dsi->line_buffer_size;
u16 w = dsi->vm.hactive;
u16 h = dsi->vm.vactive;
DSSDBG("dsi_update_screen_dispc(%dx%d)\n", w, h);
- dsi_vc_config_source(dsi, channel, DSI_VC_SOURCE_VP);
-
- bytespp = dsi_get_pixel_size(dsi->pix_fmt) / 8;
+ bytespp = mipi_dsi_pixel_format_to_bpp(dsi->pix_fmt) / 8;
bytespl = w * bytespp;
bytespf = bytespl * h;
@@ -3845,16 +3118,16 @@ static void dsi_update_screen_dispc(struct dsi_data *dsi)
total_len += (bytespf % packet_payload) + 1;
l = FLD_VAL(total_len, 23, 0); /* TE_SIZE */
- dsi_write_reg(dsi, DSI_VC_TE(channel), l);
+ dsi_write_reg(dsi, DSI_VC_TE(vc), l);
- dsi_vc_write_long_header(dsi, channel, MIPI_DSI_DCS_LONG_WRITE,
+ dsi_vc_write_long_header(dsi, vc, dsi->dsidev->channel, MIPI_DSI_DCS_LONG_WRITE,
packet_len, 0);
if (dsi->te_enabled)
l = FLD_MOD(l, 1, 30, 30); /* TE_EN */
else
l = FLD_MOD(l, 1, 31, 31); /* TE_START */
- dsi_write_reg(dsi, DSI_VC_TE(channel), l);
+ dsi_write_reg(dsi, DSI_VC_TE(vc), l);
/* We put SIDLEMODE to no-idle for the duration of the transfer,
* because DSS interrupts are not capable of waking up the CPU and the
@@ -3877,7 +3150,7 @@ static void dsi_update_screen_dispc(struct dsi_data *dsi)
* for TE is longer than the timer allows */
REG_FLD_MOD(dsi, DSI_TIMING2, 0, 15, 15); /* LP_RX_TO */
- dsi_vc_send_bta(dsi, channel);
+ dsi_vc_send_bta(dsi, vc);
#ifdef DSI_CATCH_MISSING_TE
mod_timer(&dsi->te_timer, jiffies + msecs_to_jiffies(250));
@@ -3902,7 +3175,7 @@ static void dsi_handle_framedone(struct dsi_data *dsi, int error)
REG_FLD_MOD(dsi, DSI_TIMING2, 1, 15, 15); /* LP_RX_TO */
}
- dsi->framedone_callback(error, dsi->framedone_data);
+ dsi_bus_unlock(dsi);
if (!error)
dsi_perf_show(dsi, "DISPC");
@@ -3935,30 +3208,91 @@ static void dsi_framedone_irq_callback(void *data)
cancel_delayed_work(&dsi->framedone_timeout_work);
+ DSSDBG("Framedone received!\n");
+
dsi_handle_framedone(dsi, 0);
}
-static int dsi_update(struct omap_dss_device *dssdev, int channel,
- void (*callback)(int, void *), void *data)
+static int _dsi_update(struct dsi_data *dsi)
{
- struct dsi_data *dsi = to_dsi_data(dssdev);
-
dsi_perf_mark_setup(dsi);
- dsi->update_channel = channel;
-
- dsi->framedone_callback = callback;
- dsi->framedone_data = data;
-
#ifdef DSI_PERF_MEASURE
dsi->update_bytes = dsi->vm.hactive * dsi->vm.vactive *
- dsi_get_pixel_size(dsi->pix_fmt) / 8;
+ mipi_dsi_pixel_format_to_bpp(dsi->pix_fmt) / 8;
#endif
dsi_update_screen_dispc(dsi);
return 0;
}
+static int _dsi_send_nop(struct dsi_data *dsi, int vc, int channel)
+{
+ const u8 payload[] = { MIPI_DCS_NOP };
+ const struct mipi_dsi_msg msg = {
+ .channel = channel,
+ .type = MIPI_DSI_DCS_SHORT_WRITE,
+ .tx_len = 1,
+ .tx_buf = payload,
+ };
+
+ WARN_ON(!dsi_bus_is_locked(dsi));
+
+ return _omap_dsi_host_transfer(dsi, vc, &msg);
+}
+
+static int dsi_update_channel(struct omap_dss_device *dssdev, int vc)
+{
+ struct dsi_data *dsi = to_dsi_data(dssdev);
+ int r;
+
+ dsi_bus_lock(dsi);
+
+ if (!dsi->video_enabled) {
+ r = -EIO;
+ goto err;
+ }
+
+ if (dsi->vm.hactive == 0 || dsi->vm.vactive == 0) {
+ r = -EINVAL;
+ goto err;
+ }
+
+ DSSDBG("dsi_update_channel: %d", vc);
+
+ /*
+ * Send NOP between the frames. If we don't send something here, the
+ * updates stop working. This is probably related to DSI spec stating
+ * that the DSI host should transition to LP at least once per frame.
+ */
+ r = _dsi_send_nop(dsi, VC_CMD, dsi->dsidev->channel);
+ if (r < 0) {
+ DSSWARN("failed to send nop between frames: %d\n", r);
+ goto err;
+ }
+
+ dsi->update_vc = vc;
+
+ if (dsi->te_enabled && dsi->te_gpio) {
+ schedule_delayed_work(&dsi->te_timeout_work,
+ msecs_to_jiffies(250));
+ atomic_set(&dsi->do_ext_te_update, 1);
+ } else {
+ _dsi_update(dsi);
+ }
+
+ return 0;
+
+err:
+ dsi_bus_unlock(dsi);
+ return r;
+}
+
+static int dsi_update_all(struct omap_dss_device *dssdev)
+{
+ return dsi_update_channel(dssdev, VC_VIDEO);
+}
+
/* Display funcs */
static int dsi_configure_dispc_clocks(struct dsi_data *dsi)
@@ -3983,12 +3317,12 @@ static int dsi_configure_dispc_clocks(struct dsi_data *dsi)
return 0;
}
-static int dsi_display_init_dispc(struct dsi_data *dsi)
+static int dsi_init_dispc(struct dsi_data *dsi)
{
- enum omap_channel channel = dsi->output.dispc_channel;
+ enum omap_channel dispc_channel = dsi->output.dispc_channel;
int r;
- dss_select_lcd_clk_source(dsi->dss, channel, dsi->module_id == 0 ?
+ dss_select_lcd_clk_source(dsi->dss, dispc_channel, dsi->module_id == 0 ?
DSS_CLK_SRC_PLL1_1 :
DSS_CLK_SRC_PLL2_1);
@@ -4013,7 +3347,7 @@ static int dsi_display_init_dispc(struct dsi_data *dsi)
dsi->mgr_config.io_pad_mode = DSS_IO_PAD_MODE_BYPASS;
dsi->mgr_config.video_port_width =
- dsi_get_pixel_size(dsi->pix_fmt);
+ mipi_dsi_pixel_format_to_bpp(dsi->pix_fmt);
dsi->mgr_config.lcden_sig_polarity = 0;
dss_mgr_set_lcd_config(&dsi->output, &dsi->mgr_config);
@@ -4024,19 +3358,19 @@ err1:
dss_mgr_unregister_framedone_handler(&dsi->output,
dsi_framedone_irq_callback, dsi);
err:
- dss_select_lcd_clk_source(dsi->dss, channel, DSS_CLK_SRC_FCK);
+ dss_select_lcd_clk_source(dsi->dss, dispc_channel, DSS_CLK_SRC_FCK);
return r;
}
-static void dsi_display_uninit_dispc(struct dsi_data *dsi)
+static void dsi_uninit_dispc(struct dsi_data *dsi)
{
- enum omap_channel channel = dsi->output.dispc_channel;
+ enum omap_channel dispc_channel = dsi->output.dispc_channel;
if (dsi->mode == OMAP_DSS_DSI_CMD_MODE)
dss_mgr_unregister_framedone_handler(&dsi->output,
dsi_framedone_irq_callback, dsi);
- dss_select_lcd_clk_source(dsi->dss, channel, DSS_CLK_SRC_FCK);
+ dss_select_lcd_clk_source(dsi->dss, dispc_channel, DSS_CLK_SRC_FCK);
}
static int dsi_configure_dsi_clocks(struct dsi_data *dsi)
@@ -4055,7 +3389,37 @@ static int dsi_configure_dsi_clocks(struct dsi_data *dsi)
return 0;
}
-static int dsi_display_init_dsi(struct dsi_data *dsi)
+static void dsi_setup_dsi_vcs(struct dsi_data *dsi)
+{
+ /* Setup VC_CMD for LP and cpu transfers */
+ REG_FLD_MOD(dsi, DSI_VC_CTRL(VC_CMD), 0, 9, 9); /* LP */
+
+ REG_FLD_MOD(dsi, DSI_VC_CTRL(VC_CMD), 0, 1, 1); /* SOURCE_L4 */
+ dsi->vc[VC_CMD].source = DSI_VC_SOURCE_L4;
+
+ /* Setup VC_VIDEO for HS and dispc transfers */
+ REG_FLD_MOD(dsi, DSI_VC_CTRL(VC_VIDEO), 1, 9, 9); /* HS */
+
+ REG_FLD_MOD(dsi, DSI_VC_CTRL(VC_VIDEO), 1, 1, 1); /* SOURCE_VP */
+ dsi->vc[VC_VIDEO].source = DSI_VC_SOURCE_VP;
+
+ if ((dsi->data->quirks & DSI_QUIRK_DCS_CMD_CONFIG_VC) &&
+ !(dsi->dsidev->mode_flags & MIPI_DSI_MODE_VIDEO))
+ REG_FLD_MOD(dsi, DSI_VC_CTRL(VC_VIDEO), 1, 30, 30); /* DCS_CMD_ENABLE */
+
+ dsi_vc_enable(dsi, VC_CMD, 1);
+ dsi_vc_enable(dsi, VC_VIDEO, 1);
+
+ dsi_if_enable(dsi, 1);
+
+ dsi_force_tx_stop_mode_io(dsi);
+
+ /* start the DDR clock by sending a NULL packet */
+ if (!(dsi->dsidev->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS))
+ dsi_vc_send_null(dsi, VC_CMD, dsi->dsidev->channel);
+}
+
+static int dsi_init_dsi(struct dsi_data *dsi)
{
int r;
@@ -4097,13 +3461,7 @@ static int dsi_display_init_dsi(struct dsi_data *dsi)
if (r)
goto err3;
- /* enable interface */
- dsi_vc_enable(dsi, 0, 1);
- dsi_vc_enable(dsi, 1, 1);
- dsi_vc_enable(dsi, 2, 1);
- dsi_vc_enable(dsi, 3, 1);
- dsi_if_enable(dsi, 1);
- dsi_force_tx_stop_mode_io(dsi);
+ dsi_setup_dsi_vcs(dsi);
return 0;
err3:
@@ -4119,12 +3477,8 @@ err0:
return r;
}
-static void dsi_display_uninit_dsi(struct dsi_data *dsi, bool disconnect_lanes,
- bool enter_ulps)
+static void dsi_uninit_dsi(struct dsi_data *dsi)
{
- if (enter_ulps && !dsi->ulps_enabled)
- dsi_enter_ulps(dsi);
-
/* disable interface */
dsi_if_enable(dsi, 0);
dsi_vc_enable(dsi, 0, 0);
@@ -4136,21 +3490,19 @@ static void dsi_display_uninit_dsi(struct dsi_data *dsi, bool disconnect_lanes,
dsi_cio_uninit(dsi);
dss_pll_disable(&dsi->pll);
- if (disconnect_lanes) {
- regulator_disable(dsi->vdds_dsi_reg);
- dsi->vdds_dsi_enabled = false;
- }
+ regulator_disable(dsi->vdds_dsi_reg);
+ dsi->vdds_dsi_enabled = false;
}
-static void dsi_display_enable(struct omap_dss_device *dssdev)
+static void dsi_enable(struct dsi_data *dsi)
{
- struct dsi_data *dsi = to_dsi_data(dssdev);
int r;
- DSSDBG("dsi_display_enable\n");
-
WARN_ON(!dsi_bus_is_locked(dsi));
+ if (WARN_ON(dsi->iface_enabled))
+ return;
+
mutex_lock(&dsi->lock);
r = dsi_runtime_get(dsi);
@@ -4159,10 +3511,12 @@ static void dsi_display_enable(struct omap_dss_device *dssdev)
_dsi_initialize_irq(dsi);
- r = dsi_display_init_dsi(dsi);
+ r = dsi_init_dsi(dsi);
if (r)
goto err_init_dsi;
+ dsi->iface_enabled = true;
+
mutex_unlock(&dsi->lock);
return;
@@ -4171,18 +3525,16 @@ err_init_dsi:
dsi_runtime_put(dsi);
err_get_dsi:
mutex_unlock(&dsi->lock);
- DSSDBG("dsi_display_enable FAILED\n");
+ DSSDBG("dsi_enable FAILED\n");
}
-static void dsi_display_disable(struct omap_dss_device *dssdev,
- bool disconnect_lanes, bool enter_ulps)
+static void dsi_disable(struct dsi_data *dsi)
{
- struct dsi_data *dsi = to_dsi_data(dssdev);
-
- DSSDBG("dsi_display_disable\n");
-
WARN_ON(!dsi_bus_is_locked(dsi));
+ if (WARN_ON(!dsi->iface_enabled))
+ return;
+
mutex_lock(&dsi->lock);
dsi_sync_vc(dsi, 0);
@@ -4190,18 +3542,26 @@ static void dsi_display_disable(struct omap_dss_device *dssdev,
dsi_sync_vc(dsi, 2);
dsi_sync_vc(dsi, 3);
- dsi_display_uninit_dsi(dsi, disconnect_lanes, enter_ulps);
+ dsi_uninit_dsi(dsi);
dsi_runtime_put(dsi);
+ dsi->iface_enabled = false;
+
mutex_unlock(&dsi->lock);
}
-static int dsi_enable_te(struct omap_dss_device *dssdev, bool enable)
+static int dsi_enable_te(struct dsi_data *dsi, bool enable)
{
- struct dsi_data *dsi = to_dsi_data(dssdev);
-
dsi->te_enabled = enable;
+
+ if (dsi->te_gpio) {
+ if (enable)
+ enable_irq(dsi->te_irq);
+ else
+ disable_irq(dsi->te_irq);
+ }
+
return 0;
}
@@ -4351,7 +3711,7 @@ static bool dsi_cm_calc(struct dsi_data *dsi,
unsigned long pck, txbyteclk;
clkin = clk_get_rate(dsi->pll.clkin);
- bitspp = dsi_get_pixel_size(cfg->pixel_format);
+ bitspp = mipi_dsi_pixel_format_to_bpp(cfg->pixel_format);
ndl = dsi->num_lanes_used - 1;
/*
@@ -4384,7 +3744,7 @@ static bool dsi_vm_calc_blanking(struct dsi_clk_calc_ctx *ctx)
{
struct dsi_data *dsi = ctx->dsi;
const struct omap_dss_dsi_config *cfg = ctx->config;
- int bitspp = dsi_get_pixel_size(cfg->pixel_format);
+ int bitspp = mipi_dsi_pixel_format_to_bpp(cfg->pixel_format);
int ndl = dsi->num_lanes_used - 1;
unsigned long hsclk = ctx->dsi_cinfo.clkdco / 4;
unsigned long byteclk = hsclk / 4;
@@ -4531,7 +3891,6 @@ static bool dsi_vm_calc_blanking(struct dsi_clk_calc_ctx *ctx)
dsi_vm->hfp_blanking_mode = 1;
dsi_vm->hbp_blanking_mode = 1;
- dsi_vm->ddr_clk_always_on = cfg->ddr_clk_always_on;
dsi_vm->window_sync = 4;
/* setup DISPC videomode */
@@ -4651,7 +4010,7 @@ static bool dsi_vm_calc(struct dsi_data *dsi,
unsigned long pll_min;
unsigned long pll_max;
int ndl = dsi->num_lanes_used - 1;
- int bitspp = dsi_get_pixel_size(cfg->pixel_format);
+ int bitspp = mipi_dsi_pixel_format_to_bpp(cfg->pixel_format);
unsigned long byteclk_min;
clkin = clk_get_rate(dsi->pll.clkin);
@@ -4684,39 +4043,62 @@ static bool dsi_vm_calc(struct dsi_data *dsi,
dsi_vm_calc_pll_cb, ctx);
}
-static int dsi_set_config(struct omap_dss_device *dssdev,
- const struct omap_dss_dsi_config *config)
+static bool dsi_is_video_mode(struct omap_dss_device *dssdev)
{
struct dsi_data *dsi = to_dsi_data(dssdev);
- struct dsi_clk_calc_ctx ctx;
+
+ return dsi->mode == OMAP_DSS_DSI_VIDEO_MODE;
+}
+
+static int __dsi_calc_config(struct dsi_data *dsi,
+ const struct drm_display_mode *mode,
+ struct dsi_clk_calc_ctx *ctx)
+{
+ struct omap_dss_dsi_config cfg = dsi->config;
+ struct videomode vm;
bool ok;
int r;
- mutex_lock(&dsi->lock);
+ drm_display_mode_to_videomode(mode, &vm);
- dsi->pix_fmt = config->pixel_format;
- dsi->mode = config->mode;
+ cfg.vm = &vm;
+ cfg.mode = dsi->mode;
+ cfg.pixel_format = dsi->pix_fmt;
- if (config->mode == OMAP_DSS_DSI_VIDEO_MODE)
- ok = dsi_vm_calc(dsi, config, &ctx);
+ if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE)
+ ok = dsi_vm_calc(dsi, &cfg, ctx);
else
- ok = dsi_cm_calc(dsi, config, &ctx);
+ ok = dsi_cm_calc(dsi, &cfg, ctx);
- if (!ok) {
- DSSERR("failed to find suitable DSI clock settings\n");
- r = -EINVAL;
- goto err;
- }
+ if (!ok)
+ return -EINVAL;
+
+ dsi_pll_calc_dsi_fck(dsi, &ctx->dsi_cinfo);
+
+ r = dsi_lp_clock_calc(ctx->dsi_cinfo.clkout[HSDIV_DSI],
+ cfg.lp_clk_min, cfg.lp_clk_max, &ctx->lp_cinfo);
+ if (r)
+ return r;
+
+ return 0;
+}
- dsi_pll_calc_dsi_fck(dsi, &ctx.dsi_cinfo);
+static int dsi_set_config(struct omap_dss_device *dssdev,
+ const struct drm_display_mode *mode)
+{
+ struct dsi_data *dsi = to_dsi_data(dssdev);
+ struct dsi_clk_calc_ctx ctx;
+ int r;
- r = dsi_lp_clock_calc(ctx.dsi_cinfo.clkout[HSDIV_DSI],
- config->lp_clk_min, config->lp_clk_max, &dsi->user_lp_cinfo);
+ mutex_lock(&dsi->lock);
+
+ r = __dsi_calc_config(dsi, mode, &ctx);
if (r) {
- DSSERR("failed to find suitable DSI LP clock settings\n");
+ DSSERR("failed to find suitable DSI clock settings\n");
goto err;
}
+ dsi->user_lp_cinfo = ctx.lp_cinfo;
dsi->user_dsi_cinfo = ctx.dsi_cinfo;
dsi->user_dispc_cinfo = ctx.dispc_cinfo;
@@ -4757,12 +4139,12 @@ err:
}
/*
- * Return a hardcoded channel for the DSI output. This should work for
+ * Return a hardcoded dispc channel for the DSI output. This should work for
* current use cases, but this can be later expanded to either resolve
* the channel in some more dynamic manner, or get the channel as a user
* parameter.
*/
-static enum omap_channel dsi_get_channel(struct dsi_data *dsi)
+static enum omap_channel dsi_get_dispc_channel(struct dsi_data *dsi)
{
switch (dsi->data->model) {
case DSI_MODEL_OMAP3:
@@ -4796,59 +4178,75 @@ static enum omap_channel dsi_get_channel(struct dsi_data *dsi)
}
}
-static int dsi_request_vc(struct omap_dss_device *dssdev, int *channel)
+static ssize_t _omap_dsi_host_transfer(struct dsi_data *dsi, int vc,
+ const struct mipi_dsi_msg *msg)
{
- struct dsi_data *dsi = to_dsi_data(dssdev);
- int i;
+ struct omap_dss_device *dssdev = &dsi->output;
+ int r;
- for (i = 0; i < ARRAY_SIZE(dsi->vc); i++) {
- if (!dsi->vc[i].dssdev) {
- dsi->vc[i].dssdev = dssdev;
- *channel = i;
- return 0;
- }
+ dsi_vc_enable_hs(dssdev, vc, !(msg->flags & MIPI_DSI_MSG_USE_LPM));
+
+ switch (msg->type) {
+ case MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM:
+ case MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM:
+ case MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM:
+ case MIPI_DSI_GENERIC_LONG_WRITE:
+ case MIPI_DSI_DCS_SHORT_WRITE:
+ case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
+ case MIPI_DSI_DCS_LONG_WRITE:
+ case MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE:
+ case MIPI_DSI_NULL_PACKET:
+ r = dsi_vc_write_common(dssdev, vc, msg);
+ break;
+ case MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM:
+ case MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM:
+ case MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM:
+ r = dsi_vc_generic_read(dssdev, vc, msg);
+ break;
+ case MIPI_DSI_DCS_READ:
+ r = dsi_vc_dcs_read(dssdev, vc, msg);
+ break;
+ default:
+ r = -EINVAL;
+ break;
}
- DSSERR("cannot get VC for display %s", dssdev->name);
- return -ENOSPC;
-}
-
-static int dsi_set_vc_id(struct omap_dss_device *dssdev, int channel, int vc_id)
-{
- struct dsi_data *dsi = to_dsi_data(dssdev);
-
- if (vc_id < 0 || vc_id > 3) {
- DSSERR("VC ID out of range\n");
- return -EINVAL;
- }
+ if (r < 0)
+ return r;
- if (channel < 0 || channel > 3) {
- DSSERR("Virtual Channel out of range\n");
- return -EINVAL;
- }
+ if (msg->type == MIPI_DSI_DCS_SHORT_WRITE ||
+ msg->type == MIPI_DSI_DCS_SHORT_WRITE_PARAM) {
+ u8 cmd = ((u8 *)msg->tx_buf)[0];
- if (dsi->vc[channel].dssdev != dssdev) {
- DSSERR("Virtual Channel not allocated to display %s\n",
- dssdev->name);
- return -EINVAL;
+ if (cmd == MIPI_DCS_SET_TEAR_OFF)
+ dsi_enable_te(dsi, false);
+ else if (cmd == MIPI_DCS_SET_TEAR_ON)
+ dsi_enable_te(dsi, true);
}
- dsi->vc[channel].vc_id = vc_id;
-
return 0;
}
-static void dsi_release_vc(struct omap_dss_device *dssdev, int channel)
+static ssize_t omap_dsi_host_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
{
- struct dsi_data *dsi = to_dsi_data(dssdev);
+ struct dsi_data *dsi = host_to_omap(host);
+ int r;
+ int vc = VC_CMD;
- if ((channel >= 0 && channel <= 3) &&
- dsi->vc[channel].dssdev == dssdev) {
- dsi->vc[channel].dssdev = NULL;
- dsi->vc[channel].vc_id = 0;
+ dsi_bus_lock(dsi);
+
+ if (!dsi->iface_enabled) {
+ dsi_enable(dsi);
+ schedule_delayed_work(&dsi->dsi_disable_work, msecs_to_jiffies(2000));
}
-}
+ r = _omap_dsi_host_transfer(dsi, vc, msg);
+
+ dsi_bus_unlock(dsi);
+
+ return r;
+}
static int dsi_get_clocks(struct dsi_data *dsi)
{
@@ -4865,57 +4263,167 @@ static int dsi_get_clocks(struct dsi_data *dsi)
return 0;
}
-static int dsi_connect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
+static const struct omapdss_dsi_ops dsi_ops = {
+ .update = dsi_update_all,
+ .is_video_mode = dsi_is_video_mode,
+};
+
+static irqreturn_t omap_dsi_te_irq_handler(int irq, void *dev_id)
{
- return omapdss_device_connect(dst->dss, dst, dst->next);
+ struct dsi_data *dsi = (struct dsi_data *)dev_id;
+ int old;
+
+ old = atomic_cmpxchg(&dsi->do_ext_te_update, 1, 0);
+ if (old) {
+ cancel_delayed_work(&dsi->te_timeout_work);
+ _dsi_update(dsi);
+ }
+
+ return IRQ_HANDLED;
}
-static void dsi_disconnect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
+static void omap_dsi_te_timeout_work_callback(struct work_struct *work)
{
- omapdss_device_disconnect(dst, dst->next);
+ struct dsi_data *dsi =
+ container_of(work, struct dsi_data, te_timeout_work.work);
+ int old;
+
+ old = atomic_cmpxchg(&dsi->do_ext_te_update, 1, 0);
+ if (old) {
+ dev_err(dsi->dev, "TE not received for 250ms!\n");
+ _dsi_update(dsi);
+ }
}
-static const struct omap_dss_device_ops dsi_ops = {
- .connect = dsi_connect,
- .disconnect = dsi_disconnect,
- .enable = dsi_display_enable,
+static int omap_dsi_register_te_irq(struct dsi_data *dsi,
+ struct mipi_dsi_device *client)
+{
+ int err;
+ int te_irq;
- .dsi = {
- .bus_lock = dsi_bus_lock,
- .bus_unlock = dsi_bus_unlock,
+ dsi->te_gpio = gpiod_get(&client->dev, "te-gpios", GPIOD_IN);
+ if (IS_ERR(dsi->te_gpio)) {
+ err = PTR_ERR(dsi->te_gpio);
- .disable = dsi_display_disable,
+ if (err == -ENOENT) {
+ dsi->te_gpio = NULL;
+ return 0;
+ }
- .enable_hs = dsi_vc_enable_hs,
+ dev_err(dsi->dev, "Could not get TE gpio: %d\n", err);
+ return err;
+ }
+
+ te_irq = gpiod_to_irq(dsi->te_gpio);
+ if (te_irq < 0) {
+ gpiod_put(dsi->te_gpio);
+ dsi->te_gpio = NULL;
+ return -EINVAL;
+ }
+
+ dsi->te_irq = te_irq;
- .configure_pins = dsi_configure_pins,
- .set_config = dsi_set_config,
+ irq_set_status_flags(te_irq, IRQ_NOAUTOEN);
- .enable_video_output = dsi_enable_video_output,
- .disable_video_output = dsi_disable_video_output,
+ err = request_threaded_irq(te_irq, NULL, omap_dsi_te_irq_handler,
+ IRQF_TRIGGER_RISING, "TE", dsi);
+ if (err) {
+ dev_err(dsi->dev, "request irq failed with %d\n", err);
+ gpiod_put(dsi->te_gpio);
+ dsi->te_gpio = NULL;
+ return err;
+ }
+
+ INIT_DEFERRABLE_WORK(&dsi->te_timeout_work,
+ omap_dsi_te_timeout_work_callback);
- .update = dsi_update,
+ dev_dbg(dsi->dev, "Using GPIO TE\n");
- .enable_te = dsi_enable_te,
+ return 0;
+}
- .request_vc = dsi_request_vc,
- .set_vc_id = dsi_set_vc_id,
- .release_vc = dsi_release_vc,
+static void omap_dsi_unregister_te_irq(struct dsi_data *dsi)
+{
+ if (dsi->te_gpio) {
+ free_irq(dsi->te_irq, dsi);
+ cancel_delayed_work(&dsi->te_timeout_work);
+ gpiod_put(dsi->te_gpio);
+ dsi->te_gpio = NULL;
+ }
+}
- .dcs_write = dsi_vc_dcs_write,
- .dcs_write_nosync = dsi_vc_dcs_write_nosync,
- .dcs_read = dsi_vc_dcs_read,
+static int omap_dsi_host_attach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *client)
+{
+ struct dsi_data *dsi = host_to_omap(host);
+ int r;
- .gen_write = dsi_vc_generic_write,
- .gen_write_nosync = dsi_vc_generic_write_nosync,
- .gen_read = dsi_vc_generic_read,
+ if (dsi->dsidev) {
+ DSSERR("dsi client already attached\n");
+ return -EBUSY;
+ }
- .bta_sync = dsi_vc_send_bta_sync,
+ if (mipi_dsi_pixel_format_to_bpp(client->format) < 0) {
+ DSSERR("invalid pixel format\n");
+ return -EINVAL;
+ }
- .set_max_rx_packet_size = dsi_vc_set_max_rx_packet_size,
- },
+ atomic_set(&dsi->do_ext_te_update, 0);
+
+ if (client->mode_flags & MIPI_DSI_MODE_VIDEO) {
+ dsi->mode = OMAP_DSS_DSI_VIDEO_MODE;
+ } else {
+ r = omap_dsi_register_te_irq(dsi, client);
+ if (r)
+ return r;
+
+ dsi->mode = OMAP_DSS_DSI_CMD_MODE;
+ }
+
+ dsi->dsidev = client;
+ dsi->pix_fmt = client->format;
+
+ dsi->config.hs_clk_min = 150000000; // TODO: get from client?
+ dsi->config.hs_clk_max = client->hs_rate;
+ dsi->config.lp_clk_min = 7000000; // TODO: get from client?
+ dsi->config.lp_clk_max = client->lp_rate;
+
+ if (client->mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
+ dsi->config.trans_mode = OMAP_DSS_DSI_BURST_MODE;
+ else if (client->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
+ dsi->config.trans_mode = OMAP_DSS_DSI_PULSE_MODE;
+ else
+ dsi->config.trans_mode = OMAP_DSS_DSI_EVENT_MODE;
+
+ return 0;
+}
+
+static int omap_dsi_host_detach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *client)
+{
+ struct dsi_data *dsi = host_to_omap(host);
+
+ if (WARN_ON(dsi->dsidev != client))
+ return -EINVAL;
+
+ cancel_delayed_work_sync(&dsi->dsi_disable_work);
+
+ dsi_bus_lock(dsi);
+
+ if (dsi->iface_enabled)
+ dsi_disable(dsi);
+
+ dsi_bus_unlock(dsi);
+
+ omap_dsi_unregister_te_irq(dsi);
+ dsi->dsidev = NULL;
+ return 0;
+}
+
+static const struct mipi_dsi_host_ops omap_dsi_host_ops = {
+ .attach = omap_dsi_host_attach,
+ .detach = omap_dsi_host_detach,
+ .transfer = omap_dsi_host_transfer,
};
/* -----------------------------------------------------------------------------
@@ -5097,6 +4605,106 @@ static const struct component_ops dsi_component_ops = {
};
/* -----------------------------------------------------------------------------
+ * DRM Bridge Operations
+ */
+
+static int dsi_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct dsi_data *dsi = drm_bridge_to_dsi(bridge);
+
+ if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
+ return -EINVAL;
+
+ return drm_bridge_attach(bridge->encoder, dsi->output.next_bridge,
+ bridge, flags);
+}
+
+static enum drm_mode_status
+dsi_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_info *info,
+ const struct drm_display_mode *mode)
+{
+ struct dsi_data *dsi = drm_bridge_to_dsi(bridge);
+ struct dsi_clk_calc_ctx ctx;
+ int r;
+
+ mutex_lock(&dsi->lock);
+ r = __dsi_calc_config(dsi, mode, &ctx);
+ mutex_unlock(&dsi->lock);
+
+ return r ? MODE_CLOCK_RANGE : MODE_OK;
+}
+
+static void dsi_bridge_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
+{
+ struct dsi_data *dsi = drm_bridge_to_dsi(bridge);
+
+ dsi_set_config(&dsi->output, adjusted_mode);
+}
+
+static void dsi_bridge_enable(struct drm_bridge *bridge)
+{
+ struct dsi_data *dsi = drm_bridge_to_dsi(bridge);
+ struct omap_dss_device *dssdev = &dsi->output;
+
+ cancel_delayed_work_sync(&dsi->dsi_disable_work);
+
+ dsi_bus_lock(dsi);
+
+ if (!dsi->iface_enabled)
+ dsi_enable(dsi);
+
+ dsi_enable_video_output(dssdev, VC_VIDEO);
+
+ dsi->video_enabled = true;
+
+ dsi_bus_unlock(dsi);
+}
+
+static void dsi_bridge_disable(struct drm_bridge *bridge)
+{
+ struct dsi_data *dsi = drm_bridge_to_dsi(bridge);
+ struct omap_dss_device *dssdev = &dsi->output;
+
+ cancel_delayed_work_sync(&dsi->dsi_disable_work);
+
+ dsi_bus_lock(dsi);
+
+ dsi->video_enabled = false;
+
+ dsi_disable_video_output(dssdev, VC_VIDEO);
+
+ dsi_disable(dsi);
+
+ dsi_bus_unlock(dsi);
+}
+
+static const struct drm_bridge_funcs dsi_bridge_funcs = {
+ .attach = dsi_bridge_attach,
+ .mode_valid = dsi_bridge_mode_valid,
+ .mode_set = dsi_bridge_mode_set,
+ .enable = dsi_bridge_enable,
+ .disable = dsi_bridge_disable,
+};
+
+static void dsi_bridge_init(struct dsi_data *dsi)
+{
+ dsi->bridge.funcs = &dsi_bridge_funcs;
+ dsi->bridge.of_node = dsi->host.dev->of_node;
+ dsi->bridge.type = DRM_MODE_CONNECTOR_DSI;
+
+ drm_bridge_add(&dsi->bridge);
+}
+
+static void dsi_bridge_cleanup(struct dsi_data *dsi)
+{
+ drm_bridge_remove(&dsi->bridge);
+}
+
+/* -----------------------------------------------------------------------------
* Probe & Remove, Suspend & Resume
*/
@@ -5105,23 +4713,26 @@ static int dsi_init_output(struct dsi_data *dsi)
struct omap_dss_device *out = &dsi->output;
int r;
+ dsi_bridge_init(dsi);
+
out->dev = dsi->dev;
out->id = dsi->module_id == 0 ?
OMAP_DSS_OUTPUT_DSI1 : OMAP_DSS_OUTPUT_DSI2;
out->type = OMAP_DISPLAY_TYPE_DSI;
out->name = dsi->module_id == 0 ? "dsi.0" : "dsi.1";
- out->dispc_channel = dsi_get_channel(dsi);
- out->ops = &dsi_ops;
- out->owner = THIS_MODULE;
+ out->dispc_channel = dsi_get_dispc_channel(dsi);
+ out->dsi_ops = &dsi_ops;
out->of_port = 0;
out->bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE
| DRM_BUS_FLAG_DE_HIGH
| DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE;
- r = omapdss_device_init_output(out, NULL);
- if (r < 0)
+ r = omapdss_device_init_output(out, &dsi->bridge);
+ if (r < 0) {
+ dsi_bridge_cleanup(dsi);
return r;
+ }
omapdss_device_register(out);
@@ -5134,6 +4745,7 @@ static void dsi_uninit_output(struct dsi_data *dsi)
omapdss_device_unregister(out);
omapdss_device_cleanup_output(out);
+ dsi_bridge_cleanup(dsi);
}
static int dsi_probe_of(struct dsi_data *dsi)
@@ -5142,9 +4754,8 @@ static int dsi_probe_of(struct dsi_data *dsi)
struct property *prop;
u32 lane_arr[10];
int len, num_pins;
- int r, i;
+ int r;
struct device_node *ep;
- struct omap_dsi_pin_config pin_cfg;
ep = of_graph_get_endpoint_by_regs(node, 0, 0);
if (!ep)
@@ -5172,11 +4783,7 @@ static int dsi_probe_of(struct dsi_data *dsi)
goto err;
}
- pin_cfg.num_pins = num_pins;
- for (i = 0; i < num_pins; ++i)
- pin_cfg.pins[i] = (int)lane_arr[i];
-
- r = dsi_configure_pins(&dsi->output, &pin_cfg);
+ r = dsi_configure_pins(dsi, num_pins, lane_arr);
if (r) {
dev_err(dsi->dev, "failed to configure pins");
goto err;
@@ -5256,6 +4863,18 @@ static const struct soc_device_attribute dsi_soc_devices[] = {
{ /* sentinel */ }
};
+static void omap_dsi_disable_work_callback(struct work_struct *work)
+{
+ struct dsi_data *dsi = container_of(work, struct dsi_data, dsi_disable_work.work);
+
+ dsi_bus_lock(dsi);
+
+ if (dsi->iface_enabled && !dsi->video_enabled)
+ dsi_disable(dsi);
+
+ dsi_bus_unlock(dsi);
+}
+
static int dsi_probe(struct platform_device *pdev)
{
const struct soc_device_attribute *soc;
@@ -5289,6 +4908,8 @@ static int dsi_probe(struct platform_device *pdev)
INIT_DEFERRABLE_WORK(&dsi->framedone_timeout_work,
dsi_framedone_timeout_work_callback);
+ INIT_DEFERRABLE_WORK(&dsi->dsi_disable_work, omap_dsi_disable_work_callback);
+
#ifdef DSI_CATCH_MISSING_TE
timer_setup(&dsi->te_timer, dsi_te_timeout, 0);
#endif
@@ -5364,11 +4985,8 @@ static int dsi_probe(struct platform_device *pdev)
}
/* DSI VCs initialization */
- for (i = 0; i < ARRAY_SIZE(dsi->vc); i++) {
+ for (i = 0; i < ARRAY_SIZE(dsi->vc); i++)
dsi->vc[i].source = DSI_VC_SOURCE_L4;
- dsi->vc[i].dssdev = NULL;
- dsi->vc[i].vc_id = 0;
- }
r = dsi_get_clocks(dsi);
if (r)
@@ -5387,21 +5005,24 @@ static int dsi_probe(struct platform_device *pdev)
dsi->num_lanes_supported = 3;
}
- r = of_platform_populate(dev->of_node, NULL, NULL, dev);
+ dsi->host.ops = &omap_dsi_host_ops;
+ dsi->host.dev = &pdev->dev;
+
+ r = dsi_probe_of(dsi);
if (r) {
- DSSERR("Failed to populate DSI child devices: %d\n", r);
+ DSSERR("Invalid DSI DT data\n");
+ goto err_pm_disable;
+ }
+
+ r = mipi_dsi_host_register(&dsi->host);
+ if (r < 0) {
+ dev_err(&pdev->dev, "failed to register DSI host: %d\n", r);
goto err_pm_disable;
}
r = dsi_init_output(dsi);
if (r)
- goto err_of_depopulate;
-
- r = dsi_probe_of(dsi);
- if (r) {
- DSSERR("Invalid DSI DT data\n");
- goto err_uninit_output;
- }
+ goto err_dsi_host_unregister;
r = component_add(&pdev->dev, &dsi_component_ops);
if (r)
@@ -5411,8 +5032,8 @@ static int dsi_probe(struct platform_device *pdev)
err_uninit_output:
dsi_uninit_output(dsi);
-err_of_depopulate:
- of_platform_depopulate(dev);
+err_dsi_host_unregister:
+ mipi_dsi_host_unregister(&dsi->host);
err_pm_disable:
pm_runtime_disable(dev);
return r;
@@ -5426,7 +5047,7 @@ static int dsi_remove(struct platform_device *pdev)
dsi_uninit_output(dsi);
- of_platform_depopulate(&pdev->dev);
+ mipi_dsi_host_unregister(&dsi->host);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.h b/drivers/gpu/drm/omapdrm/dss/dsi.h
new file mode 100644
index 000000000000..601707c0ecc4
--- /dev/null
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.h
@@ -0,0 +1,456 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ */
+
+#ifndef __OMAP_DRM_DSS_DSI_H
+#define __OMAP_DRM_DSS_DSI_H
+
+#include <drm/drm_mipi_dsi.h>
+
+struct dsi_reg {
+ u16 module;
+ u16 idx;
+};
+
+#define DSI_REG(mod, idx) ((const struct dsi_reg) { mod, idx })
+
+/* DSI Protocol Engine */
+
+#define DSI_PROTO 0
+#define DSI_PROTO_SZ 0x200
+
+#define DSI_REVISION DSI_REG(DSI_PROTO, 0x0000)
+#define DSI_SYSCONFIG DSI_REG(DSI_PROTO, 0x0010)
+#define DSI_SYSSTATUS DSI_REG(DSI_PROTO, 0x0014)
+#define DSI_IRQSTATUS DSI_REG(DSI_PROTO, 0x0018)
+#define DSI_IRQENABLE DSI_REG(DSI_PROTO, 0x001C)
+#define DSI_CTRL DSI_REG(DSI_PROTO, 0x0040)
+#define DSI_GNQ DSI_REG(DSI_PROTO, 0x0044)
+#define DSI_COMPLEXIO_CFG1 DSI_REG(DSI_PROTO, 0x0048)
+#define DSI_COMPLEXIO_IRQ_STATUS DSI_REG(DSI_PROTO, 0x004C)
+#define DSI_COMPLEXIO_IRQ_ENABLE DSI_REG(DSI_PROTO, 0x0050)
+#define DSI_CLK_CTRL DSI_REG(DSI_PROTO, 0x0054)
+#define DSI_TIMING1 DSI_REG(DSI_PROTO, 0x0058)
+#define DSI_TIMING2 DSI_REG(DSI_PROTO, 0x005C)
+#define DSI_VM_TIMING1 DSI_REG(DSI_PROTO, 0x0060)
+#define DSI_VM_TIMING2 DSI_REG(DSI_PROTO, 0x0064)
+#define DSI_VM_TIMING3 DSI_REG(DSI_PROTO, 0x0068)
+#define DSI_CLK_TIMING DSI_REG(DSI_PROTO, 0x006C)
+#define DSI_TX_FIFO_VC_SIZE DSI_REG(DSI_PROTO, 0x0070)
+#define DSI_RX_FIFO_VC_SIZE DSI_REG(DSI_PROTO, 0x0074)
+#define DSI_COMPLEXIO_CFG2 DSI_REG(DSI_PROTO, 0x0078)
+#define DSI_RX_FIFO_VC_FULLNESS DSI_REG(DSI_PROTO, 0x007C)
+#define DSI_VM_TIMING4 DSI_REG(DSI_PROTO, 0x0080)
+#define DSI_TX_FIFO_VC_EMPTINESS DSI_REG(DSI_PROTO, 0x0084)
+#define DSI_VM_TIMING5 DSI_REG(DSI_PROTO, 0x0088)
+#define DSI_VM_TIMING6 DSI_REG(DSI_PROTO, 0x008C)
+#define DSI_VM_TIMING7 DSI_REG(DSI_PROTO, 0x0090)
+#define DSI_STOPCLK_TIMING DSI_REG(DSI_PROTO, 0x0094)
+#define DSI_VC_CTRL(n) DSI_REG(DSI_PROTO, 0x0100 + (n * 0x20))
+#define DSI_VC_TE(n) DSI_REG(DSI_PROTO, 0x0104 + (n * 0x20))
+#define DSI_VC_LONG_PACKET_HEADER(n) DSI_REG(DSI_PROTO, 0x0108 + (n * 0x20))
+#define DSI_VC_LONG_PACKET_PAYLOAD(n) DSI_REG(DSI_PROTO, 0x010C + (n * 0x20))
+#define DSI_VC_SHORT_PACKET_HEADER(n) DSI_REG(DSI_PROTO, 0x0110 + (n * 0x20))
+#define DSI_VC_IRQSTATUS(n) DSI_REG(DSI_PROTO, 0x0118 + (n * 0x20))
+#define DSI_VC_IRQENABLE(n) DSI_REG(DSI_PROTO, 0x011C + (n * 0x20))
+
+/* DSIPHY_SCP */
+
+#define DSI_PHY 1
+#define DSI_PHY_OFFSET 0x200
+#define DSI_PHY_SZ 0x40
+
+#define DSI_DSIPHY_CFG0 DSI_REG(DSI_PHY, 0x0000)
+#define DSI_DSIPHY_CFG1 DSI_REG(DSI_PHY, 0x0004)
+#define DSI_DSIPHY_CFG2 DSI_REG(DSI_PHY, 0x0008)
+#define DSI_DSIPHY_CFG5 DSI_REG(DSI_PHY, 0x0014)
+#define DSI_DSIPHY_CFG10 DSI_REG(DSI_PHY, 0x0028)
+
+/* DSI_PLL_CTRL_SCP */
+
+#define DSI_PLL 2
+#define DSI_PLL_OFFSET 0x300
+#define DSI_PLL_SZ 0x20
+
+#define DSI_PLL_CONTROL DSI_REG(DSI_PLL, 0x0000)
+#define DSI_PLL_STATUS DSI_REG(DSI_PLL, 0x0004)
+#define DSI_PLL_GO DSI_REG(DSI_PLL, 0x0008)
+#define DSI_PLL_CONFIGURATION1 DSI_REG(DSI_PLL, 0x000C)
+#define DSI_PLL_CONFIGURATION2 DSI_REG(DSI_PLL, 0x0010)
+
+/* Global interrupts */
+#define DSI_IRQ_VC0 (1 << 0)
+#define DSI_IRQ_VC1 (1 << 1)
+#define DSI_IRQ_VC2 (1 << 2)
+#define DSI_IRQ_VC3 (1 << 3)
+#define DSI_IRQ_WAKEUP (1 << 4)
+#define DSI_IRQ_RESYNC (1 << 5)
+#define DSI_IRQ_PLL_LOCK (1 << 7)
+#define DSI_IRQ_PLL_UNLOCK (1 << 8)
+#define DSI_IRQ_PLL_RECALL (1 << 9)
+#define DSI_IRQ_COMPLEXIO_ERR (1 << 10)
+#define DSI_IRQ_HS_TX_TIMEOUT (1 << 14)
+#define DSI_IRQ_LP_RX_TIMEOUT (1 << 15)
+#define DSI_IRQ_TE_TRIGGER (1 << 16)
+#define DSI_IRQ_ACK_TRIGGER (1 << 17)
+#define DSI_IRQ_SYNC_LOST (1 << 18)
+#define DSI_IRQ_LDO_POWER_GOOD (1 << 19)
+#define DSI_IRQ_TA_TIMEOUT (1 << 20)
+#define DSI_IRQ_ERROR_MASK \
+ (DSI_IRQ_HS_TX_TIMEOUT | DSI_IRQ_LP_RX_TIMEOUT | DSI_IRQ_SYNC_LOST | \
+ DSI_IRQ_TA_TIMEOUT)
+#define DSI_IRQ_CHANNEL_MASK 0xf
+
+/* Virtual channel interrupts */
+#define DSI_VC_IRQ_CS (1 << 0)
+#define DSI_VC_IRQ_ECC_CORR (1 << 1)
+#define DSI_VC_IRQ_PACKET_SENT (1 << 2)
+#define DSI_VC_IRQ_FIFO_TX_OVF (1 << 3)
+#define DSI_VC_IRQ_FIFO_RX_OVF (1 << 4)
+#define DSI_VC_IRQ_BTA (1 << 5)
+#define DSI_VC_IRQ_ECC_NO_CORR (1 << 6)
+#define DSI_VC_IRQ_FIFO_TX_UDF (1 << 7)
+#define DSI_VC_IRQ_PP_BUSY_CHANGE (1 << 8)
+#define DSI_VC_IRQ_ERROR_MASK \
+ (DSI_VC_IRQ_CS | DSI_VC_IRQ_ECC_CORR | DSI_VC_IRQ_FIFO_TX_OVF | \
+ DSI_VC_IRQ_FIFO_RX_OVF | DSI_VC_IRQ_ECC_NO_CORR | \
+ DSI_VC_IRQ_FIFO_TX_UDF)
+
+/* ComplexIO interrupts */
+#define DSI_CIO_IRQ_ERRSYNCESC1 (1 << 0)
+#define DSI_CIO_IRQ_ERRSYNCESC2 (1 << 1)
+#define DSI_CIO_IRQ_ERRSYNCESC3 (1 << 2)
+#define DSI_CIO_IRQ_ERRSYNCESC4 (1 << 3)
+#define DSI_CIO_IRQ_ERRSYNCESC5 (1 << 4)
+#define DSI_CIO_IRQ_ERRESC1 (1 << 5)
+#define DSI_CIO_IRQ_ERRESC2 (1 << 6)
+#define DSI_CIO_IRQ_ERRESC3 (1 << 7)
+#define DSI_CIO_IRQ_ERRESC4 (1 << 8)
+#define DSI_CIO_IRQ_ERRESC5 (1 << 9)
+#define DSI_CIO_IRQ_ERRCONTROL1 (1 << 10)
+#define DSI_CIO_IRQ_ERRCONTROL2 (1 << 11)
+#define DSI_CIO_IRQ_ERRCONTROL3 (1 << 12)
+#define DSI_CIO_IRQ_ERRCONTROL4 (1 << 13)
+#define DSI_CIO_IRQ_ERRCONTROL5 (1 << 14)
+#define DSI_CIO_IRQ_STATEULPS1 (1 << 15)
+#define DSI_CIO_IRQ_STATEULPS2 (1 << 16)
+#define DSI_CIO_IRQ_STATEULPS3 (1 << 17)
+#define DSI_CIO_IRQ_STATEULPS4 (1 << 18)
+#define DSI_CIO_IRQ_STATEULPS5 (1 << 19)
+#define DSI_CIO_IRQ_ERRCONTENTIONLP0_1 (1 << 20)
+#define DSI_CIO_IRQ_ERRCONTENTIONLP1_1 (1 << 21)
+#define DSI_CIO_IRQ_ERRCONTENTIONLP0_2 (1 << 22)
+#define DSI_CIO_IRQ_ERRCONTENTIONLP1_2 (1 << 23)
+#define DSI_CIO_IRQ_ERRCONTENTIONLP0_3 (1 << 24)
+#define DSI_CIO_IRQ_ERRCONTENTIONLP1_3 (1 << 25)
+#define DSI_CIO_IRQ_ERRCONTENTIONLP0_4 (1 << 26)
+#define DSI_CIO_IRQ_ERRCONTENTIONLP1_4 (1 << 27)
+#define DSI_CIO_IRQ_ERRCONTENTIONLP0_5 (1 << 28)
+#define DSI_CIO_IRQ_ERRCONTENTIONLP1_5 (1 << 29)
+#define DSI_CIO_IRQ_ULPSACTIVENOT_ALL0 (1 << 30)
+#define DSI_CIO_IRQ_ULPSACTIVENOT_ALL1 (1 << 31)
+#define DSI_CIO_IRQ_ERROR_MASK \
+ (DSI_CIO_IRQ_ERRSYNCESC1 | DSI_CIO_IRQ_ERRSYNCESC2 | \
+ DSI_CIO_IRQ_ERRSYNCESC3 | DSI_CIO_IRQ_ERRSYNCESC4 | \
+ DSI_CIO_IRQ_ERRSYNCESC5 | \
+ DSI_CIO_IRQ_ERRESC1 | DSI_CIO_IRQ_ERRESC2 | \
+ DSI_CIO_IRQ_ERRESC3 | DSI_CIO_IRQ_ERRESC4 | \
+ DSI_CIO_IRQ_ERRESC5 | \
+ DSI_CIO_IRQ_ERRCONTROL1 | DSI_CIO_IRQ_ERRCONTROL2 | \
+ DSI_CIO_IRQ_ERRCONTROL3 | DSI_CIO_IRQ_ERRCONTROL4 | \
+ DSI_CIO_IRQ_ERRCONTROL5 | \
+ DSI_CIO_IRQ_ERRCONTENTIONLP0_1 | DSI_CIO_IRQ_ERRCONTENTIONLP1_1 | \
+ DSI_CIO_IRQ_ERRCONTENTIONLP0_2 | DSI_CIO_IRQ_ERRCONTENTIONLP1_2 | \
+ DSI_CIO_IRQ_ERRCONTENTIONLP0_3 | DSI_CIO_IRQ_ERRCONTENTIONLP1_3 | \
+ DSI_CIO_IRQ_ERRCONTENTIONLP0_4 | DSI_CIO_IRQ_ERRCONTENTIONLP1_4 | \
+ DSI_CIO_IRQ_ERRCONTENTIONLP0_5 | DSI_CIO_IRQ_ERRCONTENTIONLP1_5)
+
+enum omap_dss_dsi_mode {
+ OMAP_DSS_DSI_CMD_MODE = 0,
+ OMAP_DSS_DSI_VIDEO_MODE,
+};
+
+enum omap_dss_dsi_trans_mode {
+ /* Sync Pulses: both sync start and end packets sent */
+ OMAP_DSS_DSI_PULSE_MODE,
+ /* Sync Events: only sync start packets sent */
+ OMAP_DSS_DSI_EVENT_MODE,
+ /* Burst: only sync start packets sent, pixels are time compressed */
+ OMAP_DSS_DSI_BURST_MODE,
+};
+
+struct omap_dss_dsi_videomode_timings {
+ unsigned long hsclk;
+
+ unsigned int ndl;
+ unsigned int bitspp;
+
+ /* pixels */
+ u16 hact;
+ /* lines */
+ u16 vact;
+
+ /* DSI video mode blanking data */
+ /* Unit: byte clock cycles */
+ u16 hss;
+ u16 hsa;
+ u16 hse;
+ u16 hfp;
+ u16 hbp;
+ /* Unit: line clocks */
+ u16 vsa;
+ u16 vfp;
+ u16 vbp;
+
+ /* DSI blanking modes */
+ int blanking_mode;
+ int hsa_blanking_mode;
+ int hbp_blanking_mode;
+ int hfp_blanking_mode;
+
+ enum omap_dss_dsi_trans_mode trans_mode;
+
+ int window_sync;
+};
+
+struct omap_dss_dsi_config {
+ enum omap_dss_dsi_mode mode;
+ enum mipi_dsi_pixel_format pixel_format;
+ const struct videomode *vm;
+
+ unsigned long hs_clk_min, hs_clk_max;
+ unsigned long lp_clk_min, lp_clk_max;
+
+ enum omap_dss_dsi_trans_mode trans_mode;
+};
+
+/* DSI PLL HSDIV indices */
+#define HSDIV_DISPC 0
+#define HSDIV_DSI 1
+
+#define DSI_MAX_NR_ISRS 2
+#define DSI_MAX_NR_LANES 5
+
+enum dsi_model {
+ DSI_MODEL_OMAP3,
+ DSI_MODEL_OMAP4,
+ DSI_MODEL_OMAP5,
+};
+
+enum dsi_lane_function {
+ DSI_LANE_UNUSED = 0,
+ DSI_LANE_CLK,
+ DSI_LANE_DATA1,
+ DSI_LANE_DATA2,
+ DSI_LANE_DATA3,
+ DSI_LANE_DATA4,
+};
+
+struct dsi_lane_config {
+ enum dsi_lane_function function;
+ u8 polarity;
+};
+
+typedef void (*omap_dsi_isr_t) (void *arg, u32 mask);
+
+struct dsi_isr_data {
+ omap_dsi_isr_t isr;
+ void *arg;
+ u32 mask;
+};
+
+enum fifo_size {
+ DSI_FIFO_SIZE_0 = 0,
+ DSI_FIFO_SIZE_32 = 1,
+ DSI_FIFO_SIZE_64 = 2,
+ DSI_FIFO_SIZE_96 = 3,
+ DSI_FIFO_SIZE_128 = 4,
+};
+
+enum dsi_vc_source {
+ DSI_VC_SOURCE_L4 = 0,
+ DSI_VC_SOURCE_VP,
+};
+
+struct dsi_irq_stats {
+ unsigned long last_reset;
+ unsigned int irq_count;
+ unsigned int dsi_irqs[32];
+ unsigned int vc_irqs[4][32];
+ unsigned int cio_irqs[32];
+};
+
+struct dsi_isr_tables {
+ struct dsi_isr_data isr_table[DSI_MAX_NR_ISRS];
+ struct dsi_isr_data isr_table_vc[4][DSI_MAX_NR_ISRS];
+ struct dsi_isr_data isr_table_cio[DSI_MAX_NR_ISRS];
+};
+
+struct dsi_lp_clock_info {
+ unsigned long lp_clk;
+ u16 lp_clk_div;
+};
+
+struct dsi_clk_calc_ctx {
+ struct dsi_data *dsi;
+ struct dss_pll *pll;
+
+ /* inputs */
+
+ const struct omap_dss_dsi_config *config;
+
+ unsigned long req_pck_min, req_pck_nom, req_pck_max;
+
+ /* outputs */
+
+ struct dss_pll_clock_info dsi_cinfo;
+ struct dispc_clock_info dispc_cinfo;
+ struct dsi_lp_clock_info lp_cinfo;
+
+ struct videomode vm;
+ struct omap_dss_dsi_videomode_timings dsi_vm;
+};
+
+struct dsi_module_id_data {
+ u32 address;
+ int id;
+};
+
+enum dsi_quirks {
+ DSI_QUIRK_PLL_PWR_BUG = (1 << 0), /* DSI-PLL power command 0x3 is not working */
+ DSI_QUIRK_DCS_CMD_CONFIG_VC = (1 << 1),
+ DSI_QUIRK_VC_OCP_WIDTH = (1 << 2),
+ DSI_QUIRK_REVERSE_TXCLKESC = (1 << 3),
+ DSI_QUIRK_GNQ = (1 << 4),
+ DSI_QUIRK_PHY_DCC = (1 << 5),
+};
+
+struct dsi_of_data {
+ enum dsi_model model;
+ const struct dss_pll_hw *pll_hw;
+ const struct dsi_module_id_data *modules;
+ unsigned int max_fck_freq;
+ unsigned int max_pll_lpdiv;
+ enum dsi_quirks quirks;
+};
+
+struct dsi_data {
+ struct device *dev;
+ void __iomem *proto_base;
+ void __iomem *phy_base;
+ void __iomem *pll_base;
+
+ const struct dsi_of_data *data;
+ int module_id;
+
+ int irq;
+
+ bool is_enabled;
+
+ struct clk *dss_clk;
+ struct regmap *syscon;
+ struct dss_device *dss;
+
+ struct mipi_dsi_host host;
+
+ struct dispc_clock_info user_dispc_cinfo;
+ struct dss_pll_clock_info user_dsi_cinfo;
+
+ struct dsi_lp_clock_info user_lp_cinfo;
+ struct dsi_lp_clock_info current_lp_cinfo;
+
+ struct dss_pll pll;
+
+ bool vdds_dsi_enabled;
+ struct regulator *vdds_dsi_reg;
+
+ struct mipi_dsi_device *dsidev;
+
+ struct {
+ enum dsi_vc_source source;
+ enum fifo_size tx_fifo_size;
+ enum fifo_size rx_fifo_size;
+ } vc[4];
+
+ struct mutex lock;
+ struct semaphore bus_lock;
+
+ spinlock_t irq_lock;
+ struct dsi_isr_tables isr_tables;
+ /* space for a copy used by the interrupt handler */
+ struct dsi_isr_tables isr_tables_copy;
+
+ int update_vc;
+#ifdef DSI_PERF_MEASURE
+ unsigned int update_bytes;
+#endif
+
+ /* external TE GPIO */
+ struct gpio_desc *te_gpio;
+ int te_irq;
+ struct delayed_work te_timeout_work;
+ atomic_t do_ext_te_update;
+
+ bool te_enabled;
+ bool iface_enabled;
+ bool video_enabled;
+
+ struct delayed_work framedone_timeout_work;
+
+#ifdef DSI_CATCH_MISSING_TE
+ struct timer_list te_timer;
+#endif
+
+ unsigned long cache_req_pck;
+ unsigned long cache_clk_freq;
+ struct dss_pll_clock_info cache_cinfo;
+
+ u32 errors;
+ spinlock_t errors_lock;
+#ifdef DSI_PERF_MEASURE
+ ktime_t perf_setup_time;
+ ktime_t perf_start_time;
+#endif
+ int debug_read;
+ int debug_write;
+ struct {
+ struct dss_debugfs_entry *irqs;
+ struct dss_debugfs_entry *regs;
+ struct dss_debugfs_entry *clks;
+ } debugfs;
+
+#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
+ spinlock_t irq_stats_lock;
+ struct dsi_irq_stats irq_stats;
+#endif
+
+ unsigned int num_lanes_supported;
+ unsigned int line_buffer_size;
+
+ struct dsi_lane_config lanes[DSI_MAX_NR_LANES];
+ unsigned int num_lanes_used;
+
+ unsigned int scp_clk_refcount;
+
+ struct omap_dss_dsi_config config;
+
+ struct dss_lcd_mgr_config mgr_config;
+ struct videomode vm;
+ enum mipi_dsi_pixel_format pix_fmt;
+ enum omap_dss_dsi_mode mode;
+ struct omap_dss_dsi_videomode_timings vm_timings;
+
+ struct omap_dss_device output;
+ struct drm_bridge bridge;
+
+ struct delayed_work dsi_disable_work;
+};
+
+struct dsi_packet_sent_handler_data {
+ struct dsi_data *dsi;
+ struct completion *completion;
+};
+
+#endif /* __OMAP_DRM_DSS_DSI_H */
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c
index d7b2f5bcac16..d6a5862b4dbf 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss.c
@@ -1308,6 +1308,7 @@ static int dss_bind(struct device *dev)
{
struct dss_device *dss = dev_get_drvdata(dev);
struct platform_device *drm_pdev;
+ struct dss_pdata pdata;
int r;
r = component_bind_all(dev, NULL);
@@ -1316,9 +1317,9 @@ static int dss_bind(struct device *dev)
pm_set_vt_switch(0);
- omapdss_set_dss(dss);
-
- drm_pdev = platform_device_register_simple("omapdrm", 0, NULL, 0);
+ pdata.dss = dss;
+ drm_pdev = platform_device_register_data(NULL, "omapdrm", 0,
+ &pdata, sizeof(pdata));
if (IS_ERR(drm_pdev)) {
component_unbind_all(dev, NULL);
return PTR_ERR(drm_pdev);
@@ -1335,8 +1336,6 @@ static void dss_unbind(struct device *dev)
platform_device_unregister(dss->drm_pdev);
- omapdss_set_dss(NULL);
-
component_unbind_all(dev, NULL);
}
@@ -1569,15 +1568,7 @@ static int dss_remove(struct platform_device *pdev)
static void dss_shutdown(struct platform_device *pdev)
{
- struct omap_dss_device *dssdev = NULL;
-
DSSDBG("shutdown\n");
-
- for_each_dss_output(dssdev) {
- if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE &&
- dssdev->ops && dssdev->ops->disable)
- dssdev->ops->disable(dssdev);
- }
}
static int dss_runtime_suspend(struct device *dev)
@@ -1650,21 +1641,14 @@ static struct platform_driver * const omap_dss_drivers[] = {
#endif
};
-static int __init omap_dss_init(void)
+int __init omap_dss_init(void)
{
return platform_register_drivers(omap_dss_drivers,
ARRAY_SIZE(omap_dss_drivers));
}
-static void __exit omap_dss_exit(void)
+void omap_dss_exit(void)
{
platform_unregister_drivers(omap_dss_drivers,
ARRAY_SIZE(omap_dss_drivers));
}
-
-module_init(omap_dss_init);
-module_exit(omap_dss_exit);
-
-MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
-MODULE_DESCRIPTION("OMAP2/3/4/5 Display Subsystem");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.h b/drivers/gpu/drm/omapdrm/dss/dss.h
index 2b404bcb41dd..a547527bb2f3 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.h
+++ b/drivers/gpu/drm/omapdrm/dss/dss.h
@@ -257,8 +257,6 @@ struct dss_device {
struct dss_pll *video2_pll;
struct dispc_device *dispc;
- const struct dispc_ops *dispc_ops;
- const struct dss_mgr_ops *mgr_ops;
struct omap_drm_private *mgr_ops_priv;
};
@@ -393,6 +391,76 @@ void dispc_dump_clocks(struct dispc_device *dispc, struct seq_file *s);
int dispc_runtime_get(struct dispc_device *dispc);
void dispc_runtime_put(struct dispc_device *dispc);
+int dispc_get_num_ovls(struct dispc_device *dispc);
+int dispc_get_num_mgrs(struct dispc_device *dispc);
+
+const u32 *dispc_ovl_get_color_modes(struct dispc_device *dispc,
+ enum omap_plane_id plane);
+
+u32 dispc_read_irqstatus(struct dispc_device *dispc);
+void dispc_clear_irqstatus(struct dispc_device *dispc, u32 mask);
+void dispc_write_irqenable(struct dispc_device *dispc, u32 mask);
+
+int dispc_request_irq(struct dispc_device *dispc, irq_handler_t handler,
+ void *dev_id);
+void dispc_free_irq(struct dispc_device *dispc, void *dev_id);
+
+u32 dispc_mgr_get_vsync_irq(struct dispc_device *dispc,
+ enum omap_channel channel);
+u32 dispc_mgr_get_framedone_irq(struct dispc_device *dispc,
+ enum omap_channel channel);
+u32 dispc_mgr_get_sync_lost_irq(struct dispc_device *dispc,
+ enum omap_channel channel);
+u32 dispc_wb_get_framedone_irq(struct dispc_device *dispc);
+
+u32 dispc_get_memory_bandwidth_limit(struct dispc_device *dispc);
+
+void dispc_mgr_enable(struct dispc_device *dispc,
+ enum omap_channel channel, bool enable);
+
+bool dispc_mgr_go_busy(struct dispc_device *dispc,
+ enum omap_channel channel);
+
+void dispc_mgr_go(struct dispc_device *dispc, enum omap_channel channel);
+
+void dispc_mgr_set_lcd_config(struct dispc_device *dispc,
+ enum omap_channel channel,
+ const struct dss_lcd_mgr_config *config);
+void dispc_mgr_set_timings(struct dispc_device *dispc,
+ enum omap_channel channel,
+ const struct videomode *vm);
+void dispc_mgr_setup(struct dispc_device *dispc,
+ enum omap_channel channel,
+ const struct omap_overlay_manager_info *info);
+
+int dispc_mgr_check_timings(struct dispc_device *dispc,
+ enum omap_channel channel,
+ const struct videomode *vm);
+
+u32 dispc_mgr_gamma_size(struct dispc_device *dispc,
+ enum omap_channel channel);
+void dispc_mgr_set_gamma(struct dispc_device *dispc,
+ enum omap_channel channel,
+ const struct drm_color_lut *lut,
+ unsigned int length);
+
+int dispc_ovl_setup(struct dispc_device *dispc,
+ enum omap_plane_id plane,
+ const struct omap_overlay_info *oi,
+ const struct videomode *vm, bool mem_to_mem,
+ enum omap_channel channel);
+
+int dispc_ovl_enable(struct dispc_device *dispc,
+ enum omap_plane_id plane, bool enable);
+
+bool dispc_has_writeback(struct dispc_device *dispc);
+int dispc_wb_setup(struct dispc_device *dispc,
+ const struct omap_dss_writeback_info *wi,
+ bool mem_to_mem, const struct videomode *vm,
+ enum dss_writeback_channel channel_in);
+bool dispc_wb_go_busy(struct dispc_device *dispc);
+void dispc_wb_go(struct dispc_device *dispc);
+
void dispc_enable_sidle(struct dispc_device *dispc);
void dispc_disable_sidle(struct dispc_device *dispc);
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
index 8de41e74e8f8..35b750cebaeb 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
@@ -707,7 +707,6 @@ static int hdmi4_init_output(struct omap_hdmi *hdmi)
out->type = OMAP_DISPLAY_TYPE_HDMI;
out->name = "hdmi.0";
out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT;
- out->owner = THIS_MODULE;
out->of_port = 0;
r = omapdss_device_init_output(out, &hdmi->bridge);
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
index 54e5cb5aa52d..65085d886da5 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
@@ -681,7 +681,6 @@ static int hdmi5_init_output(struct omap_hdmi *hdmi)
out->type = OMAP_DISPLAY_TYPE_HDMI;
out->name = "hdmi.0";
out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT;
- out->owner = THIS_MODULE;
out->of_port = 0;
r = omapdss_device_init_output(out, &hdmi->bridge);
diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
deleted file mode 100644
index f21b5df31213..000000000000
--- a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
+++ /dev/null
@@ -1,229 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com/
- * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
- */
-
-/*
- * As omapdss panel drivers are omapdss specific, but we want to define the
- * DT-data in generic manner, we convert the compatible strings of the panel and
- * encoder nodes from "panel-foo" to "omapdss,panel-foo". This way we can have
- * both correct DT data and omapdss specific drivers.
- *
- * When we get generic panel drivers to the kernel, this file will be removed.
- */
-
-#include <linux/kernel.h>
-#include <linux/of.h>
-#include <linux/of_graph.h>
-#include <linux/slab.h>
-#include <linux/list.h>
-
-static struct list_head dss_conv_list __initdata;
-
-static const char prefix[] __initconst = "omapdss,";
-
-struct dss_conv_node {
- struct list_head list;
- struct device_node *node;
- bool root;
-};
-
-static int __init omapdss_count_strings(const struct property *prop)
-{
- const char *p = prop->value;
- int l = 0, total = 0;
- int i;
-
- for (i = 0; total < prop->length; total += l, p += l, i++)
- l = strlen(p) + 1;
-
- return i;
-}
-
-static void __init omapdss_update_prop(struct device_node *node, char *compat,
- int len)
-{
- struct property *prop;
-
- prop = kzalloc(sizeof(*prop), GFP_KERNEL);
- if (!prop)
- return;
-
- prop->name = "compatible";
- prop->value = compat;
- prop->length = len;
-
- of_update_property(node, prop);
-}
-
-static void __init omapdss_prefix_strcpy(char *dst, int dst_len,
- const char *src, int src_len)
-{
- size_t total = 0;
-
- while (total < src_len) {
- size_t l = strlen(src) + 1;
-
- strcpy(dst, prefix);
- dst += strlen(prefix);
-
- strcpy(dst, src);
- dst += l;
-
- src += l;
- total += l;
- }
-}
-
-/* prepend compatible property strings with "omapdss," */
-static void __init omapdss_omapify_node(struct device_node *node)
-{
- struct property *prop;
- char *new_compat;
- int num_strs;
- int new_len;
-
- prop = of_find_property(node, "compatible", NULL);
-
- if (!prop || !prop->value)
- return;
-
- if (strnlen(prop->value, prop->length) >= prop->length)
- return;
-
- /* is it already prefixed? */
- if (strncmp(prefix, prop->value, strlen(prefix)) == 0)
- return;
-
- num_strs = omapdss_count_strings(prop);
-
- new_len = prop->length + strlen(prefix) * num_strs;
- new_compat = kmalloc(new_len, GFP_KERNEL);
-
- omapdss_prefix_strcpy(new_compat, new_len, prop->value, prop->length);
-
- omapdss_update_prop(node, new_compat, new_len);
-}
-
-static void __init omapdss_add_to_list(struct device_node *node, bool root)
-{
- struct dss_conv_node *n = kmalloc(sizeof(*n), GFP_KERNEL);
- if (n) {
- n->node = node;
- n->root = root;
- list_add(&n->list, &dss_conv_list);
- }
-}
-
-static bool __init omapdss_list_contains(const struct device_node *node)
-{
- struct dss_conv_node *n;
-
- list_for_each_entry(n, &dss_conv_list, list) {
- if (n->node == node)
- return true;
- }
-
- return false;
-}
-
-static void __init omapdss_walk_device(struct device_node *node, bool root)
-{
- struct device_node *n;
-
- omapdss_add_to_list(node, root);
-
- /*
- * of_graph_get_remote_port_parent() prints an error if there is no
- * port/ports node. To avoid that, check first that there's the node.
- */
- n = of_get_child_by_name(node, "ports");
- if (!n)
- n = of_get_child_by_name(node, "port");
- if (!n)
- return;
-
- of_node_put(n);
-
- n = NULL;
- while ((n = of_graph_get_next_endpoint(node, n)) != NULL) {
- struct device_node *pn;
-
- pn = of_graph_get_remote_port_parent(n);
-
- if (!pn)
- continue;
-
- if (!of_device_is_available(pn) || omapdss_list_contains(pn)) {
- of_node_put(pn);
- continue;
- }
-
- omapdss_walk_device(pn, false);
- }
-}
-
-static const struct of_device_id omapdss_of_match[] __initconst = {
- { .compatible = "ti,omap2-dss", },
- { .compatible = "ti,omap3-dss", },
- { .compatible = "ti,omap4-dss", },
- { .compatible = "ti,omap5-dss", },
- { .compatible = "ti,dra7-dss", },
- {},
-};
-
-static const struct of_device_id omapdss_of_fixups_whitelist[] __initconst = {
- { .compatible = "panel-dsi-cm" },
- {},
-};
-
-static void __init omapdss_find_children(struct device_node *np)
-{
- struct device_node *child;
-
- for_each_available_child_of_node(np, child) {
- if (!of_find_property(child, "compatible", NULL))
- continue;
-
- omapdss_walk_device(child, true);
-
- if (of_device_is_compatible(child, "ti,sysc"))
- omapdss_find_children(child);
- }
-}
-
-static int __init omapdss_boot_init(void)
-{
- struct device_node *dss;
-
- INIT_LIST_HEAD(&dss_conv_list);
-
- dss = of_find_matching_node(NULL, omapdss_of_match);
-
- if (dss == NULL || !of_device_is_available(dss))
- goto put_node;
-
- omapdss_walk_device(dss, true);
- omapdss_find_children(dss);
-
- while (!list_empty(&dss_conv_list)) {
- struct dss_conv_node *n;
-
- n = list_first_entry(&dss_conv_list, struct dss_conv_node,
- list);
-
- if (of_match_node(omapdss_of_fixups_whitelist, n->node))
- omapdss_omapify_node(n->node);
-
- list_del(&n->list);
- of_node_put(n->node);
- kfree(n);
- }
-
-put_node:
- of_node_put(dss);
- return 0;
-}
-
-subsys_initcall(omapdss_boot_init);
diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h
index a48a9a254e33..a40abeafd2e9 100644
--- a/drivers/gpu/drm/omapdrm/dss/omapdss.h
+++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h
@@ -7,13 +7,14 @@
#ifndef __OMAP_DRM_DSS_H
#define __OMAP_DRM_DSS_H
-#include <linux/list.h>
+#include <drm/drm_color_mgmt.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_mode.h>
#include <linux/device.h>
#include <linux/interrupt.h>
-#include <video/videomode.h>
+#include <linux/list.h>
#include <linux/platform_data/omapdss.h>
-#include <uapi/drm/drm_mode.h>
-#include <drm/drm_crtc.h>
+#include <video/videomode.h>
#define DISPC_IRQ_FRAMEDONE (1 << 0)
#define DISPC_IRQ_VSYNC (1 << 1)
@@ -116,28 +117,6 @@ enum omap_dss_venc_type {
OMAP_DSS_VENC_TYPE_SVIDEO,
};
-enum omap_dss_dsi_pixel_format {
- OMAP_DSS_DSI_FMT_RGB888,
- OMAP_DSS_DSI_FMT_RGB666,
- OMAP_DSS_DSI_FMT_RGB666_PACKED,
- OMAP_DSS_DSI_FMT_RGB565,
-};
-
-enum omap_dss_dsi_mode {
- OMAP_DSS_DSI_CMD_MODE = 0,
- OMAP_DSS_DSI_VIDEO_MODE,
-};
-
-enum omap_display_caps {
- OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE = 1 << 0,
- OMAP_DSS_DISPLAY_CAP_TEAR_ELIM = 1 << 1,
-};
-
-enum omap_dss_display_state {
- OMAP_DSS_DISPLAY_DISABLED = 0,
- OMAP_DSS_DISPLAY_ACTIVE,
-};
-
enum omap_dss_rotation_type {
OMAP_DSS_ROT_NONE = 0,
OMAP_DSS_ROT_TILER = 1 << 0,
@@ -162,64 +141,6 @@ enum omap_dss_output_id {
OMAP_DSS_OUTPUT_HDMI = 1 << 6,
};
-/* DSI */
-
-enum omap_dss_dsi_trans_mode {
- /* Sync Pulses: both sync start and end packets sent */
- OMAP_DSS_DSI_PULSE_MODE,
- /* Sync Events: only sync start packets sent */
- OMAP_DSS_DSI_EVENT_MODE,
- /* Burst: only sync start packets sent, pixels are time compressed */
- OMAP_DSS_DSI_BURST_MODE,
-};
-
-struct omap_dss_dsi_videomode_timings {
- unsigned long hsclk;
-
- unsigned int ndl;
- unsigned int bitspp;
-
- /* pixels */
- u16 hact;
- /* lines */
- u16 vact;
-
- /* DSI video mode blanking data */
- /* Unit: byte clock cycles */
- u16 hss;
- u16 hsa;
- u16 hse;
- u16 hfp;
- u16 hbp;
- /* Unit: line clocks */
- u16 vsa;
- u16 vfp;
- u16 vbp;
-
- /* DSI blanking modes */
- int blanking_mode;
- int hsa_blanking_mode;
- int hbp_blanking_mode;
- int hfp_blanking_mode;
-
- enum omap_dss_dsi_trans_mode trans_mode;
-
- bool ddr_clk_always_on;
- int window_sync;
-};
-
-struct omap_dss_dsi_config {
- enum omap_dss_dsi_mode mode;
- enum omap_dss_dsi_pixel_format pixel_format;
- const struct videomode *vm;
-
- unsigned long hs_clk_min, hs_clk_max;
- unsigned long lp_clk_min, lp_clk_max;
-
- bool ddr_clk_always_on;
- enum omap_dss_dsi_trans_mode trans_mode;
-};
-
struct omap_dss_cpr_coefs {
s16 rr, rg, rb;
s16 gr, gg, gb;
@@ -243,6 +164,9 @@ struct omap_overlay_info {
u8 global_alpha;
u8 pre_mult_alpha;
u8 zorder;
+
+ enum drm_color_encoding color_encoding;
+ enum drm_color_range color_range;
};
struct omap_overlay_manager_info {
@@ -258,21 +182,6 @@ struct omap_overlay_manager_info {
struct omap_dss_cpr_coefs cpr_coefs;
};
-/* 22 pins means 1 clk lane and 10 data lanes */
-#define OMAP_DSS_MAX_DSI_PINS 22
-
-struct omap_dsi_pin_config {
- int num_pins;
- /*
- * pin numbers in the following order:
- * clk+, clk-
- * data1+, data1-
- * data2+, data2-
- * ...
- */
- int pins[OMAP_DSS_MAX_DSI_PINS];
-};
-
struct omap_dss_writeback_info {
u32 paddr;
u32 p_uv_addr;
@@ -286,89 +195,14 @@ struct omap_dss_writeback_info {
};
struct omapdss_dsi_ops {
- void (*disable)(struct omap_dss_device *dssdev, bool disconnect_lanes,
- bool enter_ulps);
-
- /* bus configuration */
- int (*set_config)(struct omap_dss_device *dssdev,
- const struct omap_dss_dsi_config *cfg);
- int (*configure_pins)(struct omap_dss_device *dssdev,
- const struct omap_dsi_pin_config *pin_cfg);
-
- void (*enable_hs)(struct omap_dss_device *dssdev, int channel,
- bool enable);
- int (*enable_te)(struct omap_dss_device *dssdev, bool enable);
-
- int (*update)(struct omap_dss_device *dssdev, int channel,
- void (*callback)(int, void *), void *data);
-
- void (*bus_lock)(struct omap_dss_device *dssdev);
- void (*bus_unlock)(struct omap_dss_device *dssdev);
-
- int (*enable_video_output)(struct omap_dss_device *dssdev, int channel);
- void (*disable_video_output)(struct omap_dss_device *dssdev,
- int channel);
-
- int (*request_vc)(struct omap_dss_device *dssdev, int *channel);
- int (*set_vc_id)(struct omap_dss_device *dssdev, int channel,
- int vc_id);
- void (*release_vc)(struct omap_dss_device *dssdev, int channel);
-
- /* data transfer */
- int (*dcs_write)(struct omap_dss_device *dssdev, int channel,
- u8 *data, int len);
- int (*dcs_write_nosync)(struct omap_dss_device *dssdev, int channel,
- u8 *data, int len);
- int (*dcs_read)(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
- u8 *data, int len);
-
- int (*gen_write)(struct omap_dss_device *dssdev, int channel,
- u8 *data, int len);
- int (*gen_write_nosync)(struct omap_dss_device *dssdev, int channel,
- u8 *data, int len);
- int (*gen_read)(struct omap_dss_device *dssdev, int channel,
- u8 *reqdata, int reqlen,
- u8 *data, int len);
-
- int (*bta_sync)(struct omap_dss_device *dssdev, int channel);
-
- int (*set_max_rx_packet_size)(struct omap_dss_device *dssdev,
- int channel, u16 plen);
-};
-
-struct omap_dss_device_ops {
- int (*connect)(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst);
- void (*disconnect)(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst);
-
- void (*enable)(struct omap_dss_device *dssdev);
- void (*disable)(struct omap_dss_device *dssdev);
-
- int (*check_timings)(struct omap_dss_device *dssdev,
- struct drm_display_mode *mode);
-
- int (*get_modes)(struct omap_dss_device *dssdev,
- struct drm_connector *connector);
-
- const struct omapdss_dsi_ops dsi;
-};
-
-/**
- * enum omap_dss_device_ops_flag - Indicates which device ops are supported
- * @OMAP_DSS_DEVICE_OP_MODES: The device supports reading modes
- */
-enum omap_dss_device_ops_flag {
- OMAP_DSS_DEVICE_OP_MODES = BIT(3),
+ int (*update)(struct omap_dss_device *dssdev);
+ bool (*is_video_mode)(struct omap_dss_device *dssdev);
};
struct omap_dss_device {
struct device *dev;
- struct module *owner;
-
struct dss_device *dss;
- struct omap_dss_device *next;
struct drm_bridge *bridge;
struct drm_bridge *next_bridge;
struct drm_panel *panel;
@@ -382,23 +216,11 @@ struct omap_dss_device {
*/
enum omap_display_type type;
- /*
- * True if the device is a display (panel or connector) at the end of
- * the pipeline, false otherwise.
- */
- bool display;
-
const char *name;
- const struct omap_dss_driver *driver;
- const struct omap_dss_device_ops *ops;
- unsigned long ops_flags;
+ const struct omapdss_dsi_ops *dsi_ops;
u32 bus_flags;
- enum omap_display_caps caps;
-
- enum omap_dss_display_state state;
-
/* OMAP DSS output specific fields */
/* DISPC channel for this output */
@@ -411,30 +233,10 @@ struct omap_dss_device {
unsigned int of_port;
};
-struct omap_dss_driver {
- int (*update)(struct omap_dss_device *dssdev,
- u16 x, u16 y, u16 w, u16 h);
- int (*sync)(struct omap_dss_device *dssdev);
-
- int (*enable_te)(struct omap_dss_device *dssdev, bool enable);
- int (*get_te)(struct omap_dss_device *dssdev);
-
- int (*memory_read)(struct omap_dss_device *dssdev,
- void *buf, size_t size,
- u16 x, u16 y, u16 w, u16 h);
+struct dss_pdata {
+ struct dss_device *dss;
};
-struct dss_device *omapdss_get_dss(void);
-void omapdss_set_dss(struct dss_device *dss);
-static inline bool omapdss_is_initialized(void)
-{
- return !!omapdss_get_dss();
-}
-
-void omapdss_display_init(struct omap_dss_device *dssdev);
-int omapdss_display_get_modes(struct drm_connector *connector,
- const struct videomode *vm);
-
void omapdss_device_register(struct omap_dss_device *dssdev);
void omapdss_device_unregister(struct omap_dss_device *dssdev);
struct omap_dss_device *omapdss_device_get(struct omap_dss_device *dssdev);
@@ -445,8 +247,6 @@ int omapdss_device_connect(struct dss_device *dss,
struct omap_dss_device *dst);
void omapdss_device_disconnect(struct omap_dss_device *src,
struct omap_dss_device *dst);
-void omapdss_device_enable(struct omap_dss_device *dssdev);
-void omapdss_device_disable(struct omap_dss_device *dssdev);
int omap_dss_get_num_overlay_managers(void);
@@ -466,11 +266,6 @@ int omap_dispc_unregister_isr(omap_dispc_isr_t isr, void *arg, u32 mask);
int omapdss_compat_init(void);
void omapdss_compat_uninit(void);
-static inline bool omapdss_device_is_enabled(struct omap_dss_device *dssdev)
-{
- return dssdev->state == OMAP_DSS_DISPLAY_ACTIVE;
-}
-
enum dss_writeback_channel {
DSS_WB_LCD1_MGR = 0,
DSS_WB_LCD2_MGR = 1,
@@ -482,31 +277,23 @@ enum dss_writeback_channel {
DSS_WB_LCD3_MGR = 7,
};
-struct dss_mgr_ops {
- void (*start_update)(struct omap_drm_private *priv,
- enum omap_channel channel);
- int (*enable)(struct omap_drm_private *priv,
- enum omap_channel channel);
- void (*disable)(struct omap_drm_private *priv,
- enum omap_channel channel);
- void (*set_timings)(struct omap_drm_private *priv,
- enum omap_channel channel,
- const struct videomode *vm);
- void (*set_lcd_config)(struct omap_drm_private *priv,
- enum omap_channel channel,
- const struct dss_lcd_mgr_config *config);
- int (*register_framedone_handler)(struct omap_drm_private *priv,
- enum omap_channel channel,
- void (*handler)(void *), void *data);
- void (*unregister_framedone_handler)(struct omap_drm_private *priv,
- enum omap_channel channel,
- void (*handler)(void *), void *data);
-};
-
-int dss_install_mgr_ops(struct dss_device *dss,
- const struct dss_mgr_ops *mgr_ops,
- struct omap_drm_private *priv);
-void dss_uninstall_mgr_ops(struct dss_device *dss);
+void omap_crtc_dss_start_update(struct omap_drm_private *priv,
+ enum omap_channel channel);
+void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable);
+int omap_crtc_dss_enable(struct omap_drm_private *priv, enum omap_channel channel);
+void omap_crtc_dss_disable(struct omap_drm_private *priv, enum omap_channel channel);
+void omap_crtc_dss_set_timings(struct omap_drm_private *priv,
+ enum omap_channel channel,
+ const struct videomode *vm);
+void omap_crtc_dss_set_lcd_config(struct omap_drm_private *priv,
+ enum omap_channel channel,
+ const struct dss_lcd_mgr_config *config);
+int omap_crtc_dss_register_framedone(
+ struct omap_drm_private *priv, enum omap_channel channel,
+ void (*handler)(void *), void *data);
+void omap_crtc_dss_unregister_framedone(
+ struct omap_drm_private *priv, enum omap_channel channel,
+ void (*handler)(void *), void *data);
void dss_mgr_set_timings(struct omap_dss_device *dssdev,
const struct videomode *vm);
@@ -520,80 +307,12 @@ int dss_mgr_register_framedone_handler(struct omap_dss_device *dssdev,
void dss_mgr_unregister_framedone_handler(struct omap_dss_device *dssdev,
void (*handler)(void *), void *data);
-/* dispc ops */
-
-struct dispc_ops {
- u32 (*read_irqstatus)(struct dispc_device *dispc);
- void (*clear_irqstatus)(struct dispc_device *dispc, u32 mask);
- void (*write_irqenable)(struct dispc_device *dispc, u32 mask);
-
- int (*request_irq)(struct dispc_device *dispc, irq_handler_t handler,
- void *dev_id);
- void (*free_irq)(struct dispc_device *dispc, void *dev_id);
-
- int (*runtime_get)(struct dispc_device *dispc);
- void (*runtime_put)(struct dispc_device *dispc);
-
- int (*get_num_ovls)(struct dispc_device *dispc);
- int (*get_num_mgrs)(struct dispc_device *dispc);
-
- u32 (*get_memory_bandwidth_limit)(struct dispc_device *dispc);
-
- void (*mgr_enable)(struct dispc_device *dispc,
- enum omap_channel channel, bool enable);
- bool (*mgr_is_enabled)(struct dispc_device *dispc,
- enum omap_channel channel);
- u32 (*mgr_get_vsync_irq)(struct dispc_device *dispc,
- enum omap_channel channel);
- u32 (*mgr_get_framedone_irq)(struct dispc_device *dispc,
- enum omap_channel channel);
- u32 (*mgr_get_sync_lost_irq)(struct dispc_device *dispc,
- enum omap_channel channel);
- bool (*mgr_go_busy)(struct dispc_device *dispc,
- enum omap_channel channel);
- void (*mgr_go)(struct dispc_device *dispc, enum omap_channel channel);
- void (*mgr_set_lcd_config)(struct dispc_device *dispc,
- enum omap_channel channel,
- const struct dss_lcd_mgr_config *config);
- int (*mgr_check_timings)(struct dispc_device *dispc,
- enum omap_channel channel,
- const struct videomode *vm);
- void (*mgr_set_timings)(struct dispc_device *dispc,
- enum omap_channel channel,
- const struct videomode *vm);
- void (*mgr_setup)(struct dispc_device *dispc, enum omap_channel channel,
- const struct omap_overlay_manager_info *info);
- u32 (*mgr_gamma_size)(struct dispc_device *dispc,
- enum omap_channel channel);
- void (*mgr_set_gamma)(struct dispc_device *dispc,
- enum omap_channel channel,
- const struct drm_color_lut *lut,
- unsigned int length);
-
- int (*ovl_enable)(struct dispc_device *dispc, enum omap_plane_id plane,
- bool enable);
- int (*ovl_setup)(struct dispc_device *dispc, enum omap_plane_id plane,
- const struct omap_overlay_info *oi,
- const struct videomode *vm, bool mem_to_mem,
- enum omap_channel channel);
-
- const u32 *(*ovl_get_color_modes)(struct dispc_device *dispc,
- enum omap_plane_id plane);
-
- u32 (*wb_get_framedone_irq)(struct dispc_device *dispc);
- int (*wb_setup)(struct dispc_device *dispc,
- const struct omap_dss_writeback_info *wi,
- bool mem_to_mem, const struct videomode *vm,
- enum dss_writeback_channel channel_in);
- bool (*has_writeback)(struct dispc_device *dispc);
- bool (*wb_go_busy)(struct dispc_device *dispc);
- void (*wb_go)(struct dispc_device *dispc);
-};
-
struct dispc_device *dispc_get_dispc(struct dss_device *dss);
-const struct dispc_ops *dispc_get_ops(struct dss_device *dss);
bool omapdss_stack_is_ready(void);
void omapdss_gather_components(struct device *dev);
+int omap_dss_init(void);
+void omap_dss_exit(void);
+
#endif /* __OMAP_DRM_DSS_H */
diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c
index 5affdf078134..7378e855c278 100644
--- a/drivers/gpu/drm/omapdrm/dss/output.c
+++ b/drivers/gpu/drm/omapdrm/dss/output.c
@@ -30,7 +30,6 @@ int omapdss_device_init_output(struct omap_dss_device *out,
return 0;
}
- out->next = omapdss_find_device_by_node(remote_node);
out->bridge = of_drm_find_bridge(remote_node);
out->panel = of_drm_find_panel(remote_node);
if (IS_ERR(out->panel))
@@ -38,12 +37,6 @@ int omapdss_device_init_output(struct omap_dss_device *out,
of_node_put(remote_node);
- if (out->next && out->type != out->next->type) {
- dev_err(out->dev, "output type and display type don't match\n");
- ret = -EINVAL;
- goto error;
- }
-
if (out->panel) {
struct drm_bridge *bridge;
@@ -69,7 +62,7 @@ int omapdss_device_init_output(struct omap_dss_device *out,
out->bridge = local_bridge;
}
- if (!out->next && !out->bridge) {
+ if (!out->bridge) {
ret = -EPROBE_DEFER;
goto error;
}
@@ -78,98 +71,64 @@ int omapdss_device_init_output(struct omap_dss_device *out,
error:
omapdss_device_cleanup_output(out);
- out->next = NULL;
return ret;
}
-EXPORT_SYMBOL(omapdss_device_init_output);
void omapdss_device_cleanup_output(struct omap_dss_device *out)
{
if (out->bridge && out->panel)
drm_panel_bridge_remove(out->next_bridge ?
out->next_bridge : out->bridge);
-
- if (out->next)
- omapdss_device_put(out->next);
-}
-EXPORT_SYMBOL(omapdss_device_cleanup_output);
-
-int dss_install_mgr_ops(struct dss_device *dss,
- const struct dss_mgr_ops *mgr_ops,
- struct omap_drm_private *priv)
-{
- if (dss->mgr_ops)
- return -EBUSY;
-
- dss->mgr_ops = mgr_ops;
- dss->mgr_ops_priv = priv;
-
- return 0;
-}
-EXPORT_SYMBOL(dss_install_mgr_ops);
-
-void dss_uninstall_mgr_ops(struct dss_device *dss)
-{
- dss->mgr_ops = NULL;
- dss->mgr_ops_priv = NULL;
}
-EXPORT_SYMBOL(dss_uninstall_mgr_ops);
void dss_mgr_set_timings(struct omap_dss_device *dssdev,
const struct videomode *vm)
{
- dssdev->dss->mgr_ops->set_timings(dssdev->dss->mgr_ops_priv,
+ omap_crtc_dss_set_timings(dssdev->dss->mgr_ops_priv,
dssdev->dispc_channel, vm);
}
-EXPORT_SYMBOL(dss_mgr_set_timings);
void dss_mgr_set_lcd_config(struct omap_dss_device *dssdev,
const struct dss_lcd_mgr_config *config)
{
- dssdev->dss->mgr_ops->set_lcd_config(dssdev->dss->mgr_ops_priv,
+ omap_crtc_dss_set_lcd_config(dssdev->dss->mgr_ops_priv,
dssdev->dispc_channel, config);
}
-EXPORT_SYMBOL(dss_mgr_set_lcd_config);
int dss_mgr_enable(struct omap_dss_device *dssdev)
{
- return dssdev->dss->mgr_ops->enable(dssdev->dss->mgr_ops_priv,
+ return omap_crtc_dss_enable(dssdev->dss->mgr_ops_priv,
dssdev->dispc_channel);
}
-EXPORT_SYMBOL(dss_mgr_enable);
void dss_mgr_disable(struct omap_dss_device *dssdev)
{
- dssdev->dss->mgr_ops->disable(dssdev->dss->mgr_ops_priv,
+ omap_crtc_dss_disable(dssdev->dss->mgr_ops_priv,
dssdev->dispc_channel);
}
-EXPORT_SYMBOL(dss_mgr_disable);
void dss_mgr_start_update(struct omap_dss_device *dssdev)
{
- dssdev->dss->mgr_ops->start_update(dssdev->dss->mgr_ops_priv,
+ omap_crtc_dss_start_update(dssdev->dss->mgr_ops_priv,
dssdev->dispc_channel);
}
-EXPORT_SYMBOL(dss_mgr_start_update);
int dss_mgr_register_framedone_handler(struct omap_dss_device *dssdev,
void (*handler)(void *), void *data)
{
struct dss_device *dss = dssdev->dss;
- return dss->mgr_ops->register_framedone_handler(dss->mgr_ops_priv,
+ return omap_crtc_dss_register_framedone(dss->mgr_ops_priv,
dssdev->dispc_channel,
handler, data);
}
-EXPORT_SYMBOL(dss_mgr_register_framedone_handler);
void dss_mgr_unregister_framedone_handler(struct omap_dss_device *dssdev,
void (*handler)(void *), void *data)
{
struct dss_device *dss = dssdev->dss;
- dss->mgr_ops->unregister_framedone_handler(dss->mgr_ops_priv,
+ omap_crtc_dss_unregister_framedone(dss->mgr_ops_priv,
dssdev->dispc_channel,
handler, data);
}
-EXPORT_SYMBOL(dss_mgr_unregister_framedone_handler);
diff --git a/drivers/gpu/drm/omapdrm/dss/pll.c b/drivers/gpu/drm/omapdrm/dss/pll.c
index 241a338ace29..4c8246a3ded9 100644
--- a/drivers/gpu/drm/omapdrm/dss/pll.c
+++ b/drivers/gpu/drm/omapdrm/dss/pll.c
@@ -222,6 +222,9 @@ bool dss_pll_calc_a(const struct dss_pll *pll, unsigned long clkin,
n_stop = min((unsigned)(clkin / fint_hw_min), hw->n_max);
n_inc = 1;
+ if (n_start > n_stop)
+ return false;
+
if (hw->errata_i886) {
swap(n_start, n_stop);
n_inc = -1;
@@ -239,6 +242,9 @@ bool dss_pll_calc_a(const struct dss_pll *pll, unsigned long clkin,
hw->m_max);
m_inc = 1;
+ if (m_start > m_stop)
+ continue;
+
if (hw->errata_i886) {
swap(m_start, m_stop);
m_inc = -1;
diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c
index 282e4c837cd9..91eaae3b9481 100644
--- a/drivers/gpu/drm/omapdrm/dss/sdi.c
+++ b/drivers/gpu/drm/omapdrm/dss/sdi.c
@@ -312,7 +312,6 @@ static int sdi_init_output(struct sdi_device *sdi)
out->dispc_channel = OMAP_DSS_CHANNEL_LCD;
/* We have SDI only on OMAP3, where it's on port 1 */
out->of_port = 1;
- out->owner = THIS_MODULE;
out->bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE /* 15.5.9.1.2 */
| DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE;
diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c
index 94cf50d837b0..e522c17955d0 100644
--- a/drivers/gpu/drm/omapdrm/dss/venc.c
+++ b/drivers/gpu/drm/omapdrm/dss/venc.c
@@ -733,9 +733,7 @@ static int venc_init_output(struct venc_device *venc)
out->type = OMAP_DISPLAY_TYPE_VENC;
out->name = "venc.0";
out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT;
- out->owner = THIS_MODULE;
out->of_port = 0;
- out->ops_flags = OMAP_DSS_DEVICE_OP_MODES;
r = omapdss_device_init_output(out, &venc->bridge);
if (r < 0) {
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c
deleted file mode 100644
index 47719b92e22b..000000000000
--- a/drivers/gpu/drm/omapdrm/omap_connector.c
+++ /dev/null
@@ -1,157 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/
- * Author: Rob Clark <rob@ti.com>
- */
-
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_probe_helper.h>
-
-#include "omap_drv.h"
-
-/*
- * connector funcs
- */
-
-#define to_omap_connector(x) container_of(x, struct omap_connector, base)
-
-struct omap_connector {
- struct drm_connector base;
- struct omap_dss_device *output;
-};
-
-static enum drm_connector_status omap_connector_detect(
- struct drm_connector *connector, bool force)
-{
- return connector_status_connected;
-}
-
-static void omap_connector_destroy(struct drm_connector *connector)
-{
- struct omap_connector *omap_connector = to_omap_connector(connector);
-
- DBG("%s", connector->name);
-
- drm_connector_unregister(connector);
- drm_connector_cleanup(connector);
-
- omapdss_device_put(omap_connector->output);
-
- kfree(omap_connector);
-}
-
-static int omap_connector_get_modes(struct drm_connector *connector)
-{
- struct omap_connector *omap_connector = to_omap_connector(connector);
- struct omap_dss_device *dssdev = NULL;
- struct omap_dss_device *d;
-
- DBG("%s", connector->name);
-
- /*
- * If the display pipeline reports modes (e.g. with a fixed resolution
- * panel or an analog TV output), query it.
- */
- for (d = omap_connector->output; d; d = d->next) {
- if (d->ops_flags & OMAP_DSS_DEVICE_OP_MODES)
- dssdev = d;
- }
-
- if (dssdev)
- return dssdev->ops->get_modes(dssdev, connector);
-
- /* We can't retrieve modes. The KMS core will add the default modes. */
- return 0;
-}
-
-enum drm_mode_status omap_connector_mode_fixup(struct omap_dss_device *dssdev,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- int ret;
-
- drm_mode_copy(adjusted_mode, mode);
-
- for (; dssdev; dssdev = dssdev->next) {
- if (!dssdev->ops || !dssdev->ops->check_timings)
- continue;
-
- ret = dssdev->ops->check_timings(dssdev, adjusted_mode);
- if (ret)
- return MODE_BAD;
- }
-
- return MODE_OK;
-}
-
-static enum drm_mode_status omap_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- struct omap_connector *omap_connector = to_omap_connector(connector);
- struct drm_display_mode new_mode = {};
- enum drm_mode_status status;
-
- status = omap_connector_mode_fixup(omap_connector->output, mode,
- &new_mode);
- if (status != MODE_OK)
- goto done;
-
- /* Check if vrefresh is still valid. */
- if (drm_mode_vrefresh(mode) != drm_mode_vrefresh(&new_mode))
- status = MODE_NOCLOCK;
-
-done:
- DBG("connector: mode %s: " DRM_MODE_FMT,
- (status == MODE_OK) ? "valid" : "invalid",
- DRM_MODE_ARG(mode));
-
- return status;
-}
-
-static const struct drm_connector_funcs omap_connector_funcs = {
- .reset = drm_atomic_helper_connector_reset,
- .detect = omap_connector_detect,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = omap_connector_destroy,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static const struct drm_connector_helper_funcs omap_connector_helper_funcs = {
- .get_modes = omap_connector_get_modes,
- .mode_valid = omap_connector_mode_valid,
-};
-
-/* initialize connector */
-struct drm_connector *omap_connector_init(struct drm_device *dev,
- struct omap_dss_device *output,
- struct drm_encoder *encoder)
-{
- struct drm_connector *connector = NULL;
- struct omap_connector *omap_connector;
-
- DBG("%s", output->name);
-
- omap_connector = kzalloc(sizeof(*omap_connector), GFP_KERNEL);
- if (!omap_connector)
- goto fail;
-
- omap_connector->output = omapdss_device_get(output);
-
- connector = &omap_connector->base;
- connector->interlace_allowed = 1;
- connector->doublescan_allowed = 0;
-
- drm_connector_init(dev, connector, &omap_connector_funcs,
- DRM_MODE_CONNECTOR_DSI);
- drm_connector_helper_add(connector, &omap_connector_helper_funcs);
-
- return connector;
-
-fail:
- if (connector)
- omap_connector_destroy(connector);
-
- return NULL;
-}
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.h b/drivers/gpu/drm/omapdrm/omap_connector.h
deleted file mode 100644
index 0ecd4f1655b7..000000000000
--- a/drivers/gpu/drm/omapdrm/omap_connector.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * omap_connector.h -- OMAP DRM Connector
- *
- * Copyright (C) 2011 Texas Instruments
- * Author: Rob Clark <rob@ti.com>
- */
-
-#ifndef __OMAPDRM_CONNECTOR_H__
-#define __OMAPDRM_CONNECTOR_H__
-
-#include <linux/types.h>
-
-enum drm_mode_status;
-
-struct drm_connector;
-struct drm_device;
-struct drm_encoder;
-struct omap_dss_device;
-
-struct drm_connector *omap_connector_init(struct drm_device *dev,
- struct omap_dss_device *output,
- struct drm_encoder *encoder);
-enum drm_mode_status omap_connector_mode_fixup(struct omap_dss_device *dssdev,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
-
-#endif /* __OMAPDRM_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index 7d66269ad998..06a719c104f4 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -100,14 +100,14 @@ int omap_crtc_wait_pending(struct drm_crtc *crtc)
* the upstream part of the video pipe.
*/
-static void omap_crtc_dss_start_update(struct omap_drm_private *priv,
+void omap_crtc_dss_start_update(struct omap_drm_private *priv,
enum omap_channel channel)
{
- priv->dispc_ops->mgr_enable(priv->dispc, channel, true);
+ dispc_mgr_enable(priv->dispc, channel, true);
}
/* Called only from the encoder enable/disable and suspend/resume handlers. */
-static void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable)
+void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable)
{
struct omap_crtc_state *omap_state = to_omap_crtc_state(crtc->state);
struct drm_device *dev = crtc->dev;
@@ -128,7 +128,7 @@ static void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable)
}
if (omap_crtc->pipe->output->type == OMAP_DISPLAY_TYPE_HDMI) {
- priv->dispc_ops->mgr_enable(priv->dispc, channel, enable);
+ dispc_mgr_enable(priv->dispc, channel, enable);
omap_crtc->enabled = enable;
return;
}
@@ -141,9 +141,9 @@ static void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable)
omap_crtc->ignore_digit_sync_lost = true;
}
- framedone_irq = priv->dispc_ops->mgr_get_framedone_irq(priv->dispc,
+ framedone_irq = dispc_mgr_get_framedone_irq(priv->dispc,
channel);
- vsync_irq = priv->dispc_ops->mgr_get_vsync_irq(priv->dispc, channel);
+ vsync_irq = dispc_mgr_get_vsync_irq(priv->dispc, channel);
if (enable) {
wait = omap_irq_wait_init(dev, vsync_irq, 1);
@@ -163,7 +163,7 @@ static void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable)
wait = omap_irq_wait_init(dev, vsync_irq, 2);
}
- priv->dispc_ops->mgr_enable(priv->dispc, channel, enable);
+ dispc_mgr_enable(priv->dispc, channel, enable);
omap_crtc->enabled = enable;
ret = omap_irq_wait(dev, wait, msecs_to_jiffies(100));
@@ -180,21 +180,19 @@ static void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable)
}
-static int omap_crtc_dss_enable(struct omap_drm_private *priv,
- enum omap_channel channel)
+int omap_crtc_dss_enable(struct omap_drm_private *priv, enum omap_channel channel)
{
struct drm_crtc *crtc = priv->channels[channel]->crtc;
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
- priv->dispc_ops->mgr_set_timings(priv->dispc, omap_crtc->channel,
+ dispc_mgr_set_timings(priv->dispc, omap_crtc->channel,
&omap_crtc->vm);
omap_crtc_set_enabled(&omap_crtc->base, true);
return 0;
}
-static void omap_crtc_dss_disable(struct omap_drm_private *priv,
- enum omap_channel channel)
+void omap_crtc_dss_disable(struct omap_drm_private *priv, enum omap_channel channel)
{
struct drm_crtc *crtc = priv->channels[channel]->crtc;
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
@@ -202,7 +200,7 @@ static void omap_crtc_dss_disable(struct omap_drm_private *priv,
omap_crtc_set_enabled(&omap_crtc->base, false);
}
-static void omap_crtc_dss_set_timings(struct omap_drm_private *priv,
+void omap_crtc_dss_set_timings(struct omap_drm_private *priv,
enum omap_channel channel,
const struct videomode *vm)
{
@@ -213,7 +211,7 @@ static void omap_crtc_dss_set_timings(struct omap_drm_private *priv,
omap_crtc->vm = *vm;
}
-static void omap_crtc_dss_set_lcd_config(struct omap_drm_private *priv,
+void omap_crtc_dss_set_lcd_config(struct omap_drm_private *priv,
enum omap_channel channel,
const struct dss_lcd_mgr_config *config)
{
@@ -221,11 +219,11 @@ static void omap_crtc_dss_set_lcd_config(struct omap_drm_private *priv,
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
DBG("%s", omap_crtc->name);
- priv->dispc_ops->mgr_set_lcd_config(priv->dispc, omap_crtc->channel,
+ dispc_mgr_set_lcd_config(priv->dispc, omap_crtc->channel,
config);
}
-static int omap_crtc_dss_register_framedone(
+int omap_crtc_dss_register_framedone(
struct omap_drm_private *priv, enum omap_channel channel,
void (*handler)(void *), void *data)
{
@@ -244,7 +242,7 @@ static int omap_crtc_dss_register_framedone(
return 0;
}
-static void omap_crtc_dss_unregister_framedone(
+void omap_crtc_dss_unregister_framedone(
struct omap_drm_private *priv, enum omap_channel channel,
void (*handler)(void *), void *data)
{
@@ -261,16 +259,6 @@ static void omap_crtc_dss_unregister_framedone(
omap_crtc->framedone_handler_data = NULL;
}
-static const struct dss_mgr_ops mgr_ops = {
- .start_update = omap_crtc_dss_start_update,
- .enable = omap_crtc_dss_enable,
- .disable = omap_crtc_dss_disable,
- .set_timings = omap_crtc_dss_set_timings,
- .set_lcd_config = omap_crtc_dss_set_lcd_config,
- .register_framedone_handler = omap_crtc_dss_register_framedone,
- .unregister_framedone_handler = omap_crtc_dss_unregister_framedone,
-};
-
/* -----------------------------------------------------------------------------
* Setup, Flush and Page Flip
*/
@@ -300,7 +288,7 @@ void omap_crtc_vblank_irq(struct drm_crtc *crtc)
* If the dispc is busy we're racing the flush operation. Try again on
* the next vblank interrupt.
*/
- if (priv->dispc_ops->mgr_go_busy(priv->dispc, omap_crtc->channel)) {
+ if (dispc_mgr_go_busy(priv->dispc, omap_crtc->channel)) {
spin_unlock(&crtc->dev->event_lock);
return;
}
@@ -362,27 +350,14 @@ static void omap_crtc_manual_display_update(struct work_struct *data)
{
struct omap_crtc *omap_crtc =
container_of(data, struct omap_crtc, update_work.work);
- struct drm_display_mode *mode = &omap_crtc->pipe->crtc->mode;
- struct omap_dss_device *dssdev = omap_crtc->pipe->output->next;
+ struct omap_dss_device *dssdev = omap_crtc->pipe->output;
struct drm_device *dev = omap_crtc->base.dev;
- const struct omap_dss_driver *dssdrv;
int ret;
- if (!dssdev) {
- dev_err_once(dev->dev, "missing display dssdev!");
+ if (!dssdev || !dssdev->dsi_ops || !dssdev->dsi_ops->update)
return;
- }
-
- dssdrv = dssdev->driver;
- if (!dssdrv || !dssdrv->update) {
- dev_err_once(dev->dev, "missing or incorrect dssdrv!");
- return;
- }
-
- if (dssdrv->sync)
- dssdrv->sync(dssdev);
- ret = dssdrv->update(dssdev, 0, 0, mode->hdisplay, mode->vdisplay);
+ ret = dssdev->dsi_ops->update(dssdev);
if (ret < 0) {
spin_lock_irq(&dev->event_lock);
omap_crtc->pending = false;
@@ -391,6 +366,33 @@ static void omap_crtc_manual_display_update(struct work_struct *data)
}
}
+static s16 omap_crtc_s31_32_to_s2_8(s64 coef)
+{
+ u64 sign_bit = 1ULL << 63;
+ u64 cbits = (u64)coef;
+
+ s16 ret = clamp_val(((cbits & ~sign_bit) >> 24), 0, 0x1ff);
+
+ if (cbits & sign_bit)
+ ret = -ret;
+
+ return ret;
+}
+
+static void omap_crtc_cpr_coefs_from_ctm(const struct drm_color_ctm *ctm,
+ struct omap_dss_cpr_coefs *cpr)
+{
+ cpr->rr = omap_crtc_s31_32_to_s2_8(ctm->matrix[0]);
+ cpr->rg = omap_crtc_s31_32_to_s2_8(ctm->matrix[1]);
+ cpr->rb = omap_crtc_s31_32_to_s2_8(ctm->matrix[2]);
+ cpr->gr = omap_crtc_s31_32_to_s2_8(ctm->matrix[3]);
+ cpr->gg = omap_crtc_s31_32_to_s2_8(ctm->matrix[4]);
+ cpr->gb = omap_crtc_s31_32_to_s2_8(ctm->matrix[5]);
+ cpr->br = omap_crtc_s31_32_to_s2_8(ctm->matrix[6]);
+ cpr->bg = omap_crtc_s31_32_to_s2_8(ctm->matrix[7]);
+ cpr->bb = omap_crtc_s31_32_to_s2_8(ctm->matrix[8]);
+}
+
static void omap_crtc_write_crtc_properties(struct drm_crtc *crtc)
{
struct omap_drm_private *priv = crtc->dev->dev_private;
@@ -402,9 +404,17 @@ static void omap_crtc_write_crtc_properties(struct drm_crtc *crtc)
info.default_color = 0x000000;
info.trans_enabled = false;
info.partial_alpha_enabled = false;
- info.cpr_enable = false;
- priv->dispc_ops->mgr_setup(priv->dispc, omap_crtc->channel, &info);
+ if (crtc->state->ctm) {
+ struct drm_color_ctm *ctm = crtc->state->ctm->data;
+
+ info.cpr_enable = true;
+ omap_crtc_cpr_coefs_from_ctm(ctm, &info.cpr_coefs);
+ } else {
+ info.cpr_enable = false;
+ }
+
+ dispc_mgr_setup(priv->dispc, omap_crtc->channel, &info);
}
/* -----------------------------------------------------------------------------
@@ -445,7 +455,7 @@ static void omap_crtc_atomic_enable(struct drm_crtc *crtc,
DBG("%s", omap_crtc->name);
- priv->dispc_ops->runtime_get(priv->dispc);
+ dispc_runtime_get(priv->dispc);
/* manual updated display will not trigger vsync irq */
if (omap_state->manually_updated)
@@ -484,7 +494,7 @@ static void omap_crtc_atomic_disable(struct drm_crtc *crtc,
drm_crtc_vblank_off(crtc);
- priv->dispc_ops->runtime_put(priv->dispc);
+ dispc_runtime_put(priv->dispc);
}
static enum drm_mode_status omap_crtc_mode_valid(struct drm_crtc *crtc,
@@ -502,9 +512,8 @@ static enum drm_mode_status omap_crtc_mode_valid(struct drm_crtc *crtc,
* valid DISPC mode. DSI will calculate and configure the
* proper DISPC mode later.
*/
- if (omap_crtc->pipe->output->next == NULL ||
- omap_crtc->pipe->output->next->type != OMAP_DISPLAY_TYPE_DSI) {
- r = priv->dispc_ops->mgr_check_timings(priv->dispc,
+ if (omap_crtc->pipe->output->type != OMAP_DISPLAY_TYPE_DSI) {
+ r = dispc_mgr_check_timings(priv->dispc,
omap_crtc->channel,
&vm);
if (r)
@@ -555,17 +564,16 @@ static void omap_crtc_mode_set_nofb(struct drm_crtc *crtc)
static bool omap_crtc_is_manually_updated(struct drm_crtc *crtc)
{
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
- struct omap_dss_device *display = omap_crtc->pipe->output->next;
+ struct omap_dss_device *dssdev = omap_crtc->pipe->output;
- if (!display)
+ if (!dssdev || !dssdev->dsi_ops || !dssdev->dsi_ops->is_video_mode)
return false;
- if (display->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) {
- DBG("detected manually updated display!");
- return true;
- }
+ if (dssdev->dsi_ops->is_video_mode(dssdev))
+ return false;
- return false;
+ DBG("detected manually updated display!");
+ return true;
}
static int omap_crtc_atomic_check(struct drm_crtc *crtc,
@@ -575,8 +583,8 @@ static int omap_crtc_atomic_check(struct drm_crtc *crtc,
crtc);
struct drm_plane_state *pri_state;
- if (crtc_state->color_mgmt_changed && crtc_state->gamma_lut) {
- unsigned int length = crtc_state->gamma_lut->length /
+ if (crtc_state->color_mgmt_changed && crtc_state->degamma_lut) {
+ unsigned int length = crtc_state->degamma_lut->length /
sizeof(struct drm_color_lut);
if (length < 2)
@@ -617,13 +625,13 @@ static void omap_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_color_lut *lut = NULL;
unsigned int length = 0;
- if (crtc->state->gamma_lut) {
+ if (crtc->state->degamma_lut) {
lut = (struct drm_color_lut *)
- crtc->state->gamma_lut->data;
- length = crtc->state->gamma_lut->length /
+ crtc->state->degamma_lut->data;
+ length = crtc->state->degamma_lut->length /
sizeof(*lut);
}
- priv->dispc_ops->mgr_set_gamma(priv->dispc, omap_crtc->channel,
+ dispc_mgr_set_gamma(priv->dispc, omap_crtc->channel,
lut, length);
}
@@ -648,7 +656,7 @@ static void omap_crtc_atomic_flush(struct drm_crtc *crtc,
WARN_ON(ret != 0);
spin_lock_irq(&crtc->dev->event_lock);
- priv->dispc_ops->mgr_go(priv->dispc, omap_crtc->channel);
+ dispc_mgr_go(priv->dispc, omap_crtc->channel);
omap_crtc_arm_event(crtc);
spin_unlock_irq(&crtc->dev->event_lock);
}
@@ -741,7 +749,6 @@ static const struct drm_crtc_funcs omap_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
.destroy = omap_crtc_destroy,
.page_flip = drm_atomic_helper_page_flip,
- .gamma_set = drm_atomic_helper_legacy_gamma_set,
.atomic_duplicate_state = omap_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
.atomic_set_property = omap_crtc_atomic_set_property,
@@ -771,16 +778,6 @@ static const char *channel_names[] = {
[OMAP_DSS_CHANNEL_LCD3] = "lcd3",
};
-void omap_crtc_pre_init(struct omap_drm_private *priv)
-{
- dss_install_mgr_ops(priv->dss, &mgr_ops, priv);
-}
-
-void omap_crtc_pre_uninit(struct omap_drm_private *priv)
-{
- dss_uninstall_mgr_ops(priv->dss);
-}
-
/* initialize crtc */
struct drm_crtc *omap_crtc_init(struct drm_device *dev,
struct omap_drm_pipeline *pipe,
@@ -839,10 +836,10 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev,
* extracted with dispc_mgr_gamma_size(). If it returns 0
* gamma table is not supported.
*/
- if (priv->dispc_ops->mgr_gamma_size(priv->dispc, channel)) {
+ if (dispc_mgr_gamma_size(priv->dispc, channel)) {
unsigned int gamma_lut_size = 256;
- drm_crtc_enable_color_mgmt(crtc, 0, false, gamma_lut_size);
+ drm_crtc_enable_color_mgmt(crtc, gamma_lut_size, true, 0);
drm_mode_crtc_set_gamma_size(crtc, gamma_lut_size);
}
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.h b/drivers/gpu/drm/omapdrm/omap_crtc.h
index 2fd57751ae2b..a8b9cbee86e0 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.h
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.h
@@ -22,8 +22,6 @@ struct videomode;
struct videomode *omap_crtc_timings(struct drm_crtc *crtc);
enum omap_channel omap_crtc_channel(struct drm_crtc *crtc);
-void omap_crtc_pre_init(struct omap_drm_private *priv);
-void omap_crtc_pre_uninit(struct omap_drm_private *priv);
struct drm_crtc *omap_crtc_init(struct drm_device *dev,
struct omap_drm_pipeline *pipe,
struct drm_plane *plane);
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index 42c2ed752095..28bbad1353ee 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -69,7 +69,7 @@ static void omap_atomic_commit_tail(struct drm_atomic_state *old_state)
struct drm_device *dev = old_state->dev;
struct omap_drm_private *priv = dev->dev_private;
- priv->dispc_ops->runtime_get(priv->dispc);
+ dispc_runtime_get(priv->dispc);
/* Apply the atomic update. */
drm_atomic_helper_commit_modeset_disables(dev, old_state);
@@ -113,7 +113,7 @@ static void omap_atomic_commit_tail(struct drm_atomic_state *old_state)
drm_atomic_helper_cleanup_planes(dev, old_state);
- priv->dispc_ops->runtime_put(priv->dispc);
+ dispc_runtime_put(priv->dispc);
}
static const struct drm_mode_config_helper_funcs omap_mode_config_helper_funcs = {
@@ -192,7 +192,7 @@ static int omap_compare_pipelines(const void *a, const void *b)
static int omap_modeset_init_properties(struct drm_device *dev)
{
struct omap_drm_private *priv = dev->dev_private;
- unsigned int num_planes = priv->dispc_ops->get_num_ovls(priv->dispc);
+ unsigned int num_planes = dispc_get_num_ovls(priv->dispc);
priv->zorder_prop = drm_property_create_range(dev, 0, "zorder", 0,
num_planes - 1);
@@ -206,14 +206,7 @@ static int omap_display_id(struct omap_dss_device *output)
{
struct device_node *node = NULL;
- if (output->next) {
- struct omap_dss_device *display = output;
-
- while (display->next)
- display = display->next;
-
- node = display->dev->of_node;
- } else if (output->bridge) {
+ if (output->bridge) {
struct drm_bridge *bridge = output->bridge;
while (drm_bridge_get_next_bridge(bridge))
@@ -228,8 +221,8 @@ static int omap_display_id(struct omap_dss_device *output)
static int omap_modeset_init(struct drm_device *dev)
{
struct omap_drm_private *priv = dev->dev_private;
- int num_ovls = priv->dispc_ops->get_num_ovls(priv->dispc);
- int num_mgrs = priv->dispc_ops->get_num_mgrs(priv->dispc);
+ int num_ovls = dispc_get_num_ovls(priv->dispc);
+ int num_mgrs = dispc_get_num_mgrs(priv->dispc);
unsigned int i;
int ret;
u32 plane_crtc_mask;
@@ -332,19 +325,12 @@ static int omap_modeset_init(struct drm_device *dev)
struct drm_encoder *encoder = pipe->encoder;
struct drm_crtc *crtc;
- if (pipe->output->next) {
- pipe->connector = omap_connector_init(dev, pipe->output,
- encoder);
- if (!pipe->connector)
- return -ENOMEM;
- } else {
- pipe->connector = drm_bridge_connector_init(dev, encoder);
- if (IS_ERR(pipe->connector)) {
- dev_err(priv->dev,
- "unable to create bridge connector for %s\n",
- pipe->output->name);
- return PTR_ERR(pipe->connector);
- }
+ pipe->connector = drm_bridge_connector_init(dev, encoder);
+ if (IS_ERR(pipe->connector)) {
+ dev_err(priv->dev,
+ "unable to create bridge connector for %s\n",
+ pipe->output->name);
+ return PTR_ERR(pipe->connector);
}
drm_connector_attach_encoder(pipe->connector, encoder);
@@ -568,6 +554,7 @@ static const struct soc_device_attribute omapdrm_soc_devices[] = {
static int omapdrm_init(struct omap_drm_private *priv, struct device *dev)
{
const struct soc_device_attribute *soc;
+ struct dss_pdata *pdata = dev->platform_data;
struct drm_device *ddev;
int ret;
@@ -582,11 +569,10 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev)
ddev->dev_private = priv;
priv->dev = dev;
- priv->dss = omapdss_get_dss();
+ priv->dss = pdata->dss;
priv->dispc = dispc_get_dispc(priv->dss);
- priv->dispc_ops = dispc_get_ops(priv->dss);
- omap_crtc_pre_init(priv);
+ priv->dss->mgr_ops_priv = priv;
soc = soc_device_match(omapdrm_soc_devices);
priv->omaprev = soc ? (unsigned int)soc->data : 0;
@@ -596,9 +582,7 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev)
INIT_LIST_HEAD(&priv->obj_list);
/* Get memory bandwidth limits */
- if (priv->dispc_ops->get_memory_bandwidth_limit)
- priv->max_bandwidth =
- priv->dispc_ops->get_memory_bandwidth_limit(priv->dispc);
+ priv->max_bandwidth = dispc_get_memory_bandwidth_limit(priv->dispc);
omap_gem_init(ddev);
@@ -641,7 +625,6 @@ err_gem_deinit:
omap_gem_deinit(ddev);
destroy_workqueue(priv->wq);
omap_disconnect_pipelines(ddev);
- omap_crtc_pre_uninit(priv);
drm_dev_put(ddev);
return ret;
}
@@ -667,7 +650,6 @@ static void omapdrm_cleanup(struct omap_drm_private *priv)
destroy_workqueue(priv->wq);
omap_disconnect_pipelines(ddev);
- omap_crtc_pre_uninit(priv);
drm_dev_put(ddev);
}
@@ -677,9 +659,6 @@ static int pdev_probe(struct platform_device *pdev)
struct omap_drm_private *priv;
int ret;
- if (omapdss_is_initialized() == false)
- return -EPROBE_DEFER;
-
ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (ret) {
dev_err(&pdev->dev, "Failed to set the DMA mask\n");
@@ -748,9 +727,21 @@ static struct platform_driver * const drivers[] = {
static int __init omap_drm_init(void)
{
+ int r;
+
DBG("init");
- return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
+ r = omap_dss_init();
+ if (r)
+ return r;
+
+ r = platform_register_drivers(drivers, ARRAY_SIZE(drivers));
+ if (r) {
+ omap_dss_exit();
+ return r;
+ }
+
+ return 0;
}
static void __exit omap_drm_fini(void)
@@ -758,13 +749,15 @@ static void __exit omap_drm_fini(void)
DBG("fini");
platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
+
+ omap_dss_exit();
}
-/* need late_initcall() so we load after dss_driver's are loaded */
-late_initcall(omap_drm_init);
+module_init(omap_drm_init);
module_exit(omap_drm_fini);
MODULE_AUTHOR("Rob Clark <rob@ti.com>");
+MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
MODULE_DESCRIPTION("OMAP DRM Display Driver");
MODULE_ALIAS("platform:" DRIVER_NAME);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index ae57e7ada876..d6f136984da9 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -12,11 +12,11 @@
#include <linux/workqueue.h>
#include "dss/omapdss.h"
+#include "dss/dss.h"
#include <drm/drm_gem.h>
#include <drm/omap_drm.h>
-#include "omap_connector.h"
#include "omap_crtc.h"
#include "omap_encoder.h"
#include "omap_fb.h"
@@ -47,7 +47,6 @@ struct omap_drm_private {
struct dss_device *dss;
struct dispc_device *dispc;
- const struct dispc_ops *dispc_ops;
unsigned int num_pipes;
struct omap_drm_pipeline pipes[8];
diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c
index 57e92a4d5937..4dd05bc732da 100644
--- a/drivers/gpu/drm/omapdrm/omap_encoder.c
+++ b/drivers/gpu/drm/omapdrm/omap_encoder.c
@@ -75,7 +75,6 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder,
{
struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
struct omap_dss_device *output = omap_encoder->output;
- struct omap_dss_device *dssdev;
struct drm_device *dev = encoder->dev;
struct drm_connector *connector;
struct drm_bridge *bridge;
@@ -98,9 +97,6 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder,
*
* A better solution is to use DRM's bus-flags through the whole driver.
*/
- for (dssdev = output; dssdev; dssdev = dssdev->next)
- omap_encoder_update_videomode_flags(&vm, dssdev->bus_flags);
-
for (bridge = output->bridge; bridge;
bridge = drm_bridge_get_next_bridge(bridge)) {
if (!bridge->timings)
@@ -113,65 +109,12 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder,
bus_flags = connector->display_info.bus_flags;
omap_encoder_update_videomode_flags(&vm, bus_flags);
- /* Set timings for the dss manager. */
+ /* Set timings for all devices in the display pipeline. */
dss_mgr_set_timings(output, &vm);
}
-static void omap_encoder_disable(struct drm_encoder *encoder)
-{
- struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
- struct omap_dss_device *dssdev = omap_encoder->output;
- struct drm_device *dev = encoder->dev;
-
- dev_dbg(dev->dev, "disable(%s)\n", dssdev->name);
-
- /*
- * Disable the chain of external devices, starting at the one at the
- * internal encoder's output. This is used for DSI outputs only, as
- * dssdev->next is NULL for all other outputs.
- */
- omapdss_device_disable(dssdev->next);
-}
-
-static void omap_encoder_enable(struct drm_encoder *encoder)
-{
- struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
- struct omap_dss_device *dssdev = omap_encoder->output;
- struct drm_device *dev = encoder->dev;
-
- dev_dbg(dev->dev, "enable(%s)\n", dssdev->name);
-
- /*
- * Enable the chain of external devices, starting at the one at the
- * internal encoder's output. This is used for DSI outputs only, as
- * dssdev->next is NULL for all other outputs.
- */
- omapdss_device_enable(dssdev->next);
-}
-
-static int omap_encoder_atomic_check(struct drm_encoder *encoder,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
-{
- struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
- enum drm_mode_status status;
-
- status = omap_connector_mode_fixup(omap_encoder->output,
- &crtc_state->mode,
- &crtc_state->adjusted_mode);
- if (status != MODE_OK) {
- dev_err(encoder->dev->dev, "invalid timings: %d\n", status);
- return -EINVAL;
- }
-
- return 0;
-}
-
static const struct drm_encoder_helper_funcs omap_encoder_helper_funcs = {
.mode_set = omap_encoder_mode_set,
- .disable = omap_encoder_disable,
- .enable = omap_encoder_enable,
- .atomic_check = omap_encoder_atomic_check,
};
/* initialize encoder */
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 30d299ca8795..38af6195d959 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -1324,8 +1324,7 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
}
omap_obj->pages = pages;
- ret = drm_prime_sg_to_page_addr_arrays(sgt, pages, NULL,
- npages);
+ ret = drm_prime_sg_to_page_array(sgt, pages, npages);
if (ret) {
omap_gem_free_object(obj);
obj = ERR_PTR(-ENOMEM);
diff --git a/drivers/gpu/drm/omapdrm/omap_irq.c b/drivers/gpu/drm/omapdrm/omap_irq.c
index 97c83b959f7e..15148d4b35b5 100644
--- a/drivers/gpu/drm/omapdrm/omap_irq.c
+++ b/drivers/gpu/drm/omapdrm/omap_irq.c
@@ -29,7 +29,7 @@ static void omap_irq_update(struct drm_device *dev)
DBG("irqmask=%08x", irqmask);
- priv->dispc_ops->write_irqenable(priv->dispc, irqmask);
+ dispc_write_irqenable(priv->dispc, irqmask);
}
static void omap_irq_wait_handler(struct omap_irq_wait *wait)
@@ -83,7 +83,7 @@ int omap_irq_enable_framedone(struct drm_crtc *crtc, bool enable)
unsigned long flags;
enum omap_channel channel = omap_crtc_channel(crtc);
int framedone_irq =
- priv->dispc_ops->mgr_get_framedone_irq(priv->dispc, channel);
+ dispc_mgr_get_framedone_irq(priv->dispc, channel);
DBG("dev=%p, crtc=%u, enable=%d", dev, channel, enable);
@@ -120,7 +120,7 @@ int omap_irq_enable_vblank(struct drm_crtc *crtc)
DBG("dev=%p, crtc=%u", dev, channel);
spin_lock_irqsave(&priv->wait_lock, flags);
- priv->irq_mask |= priv->dispc_ops->mgr_get_vsync_irq(priv->dispc,
+ priv->irq_mask |= dispc_mgr_get_vsync_irq(priv->dispc,
channel);
omap_irq_update(dev);
spin_unlock_irqrestore(&priv->wait_lock, flags);
@@ -146,7 +146,7 @@ void omap_irq_disable_vblank(struct drm_crtc *crtc)
DBG("dev=%p, crtc=%u", dev, channel);
spin_lock_irqsave(&priv->wait_lock, flags);
- priv->irq_mask &= ~priv->dispc_ops->mgr_get_vsync_irq(priv->dispc,
+ priv->irq_mask &= ~dispc_mgr_get_vsync_irq(priv->dispc,
channel);
omap_irq_update(dev);
spin_unlock_irqrestore(&priv->wait_lock, flags);
@@ -211,9 +211,9 @@ static irqreturn_t omap_irq_handler(int irq, void *arg)
unsigned int id;
u32 irqstatus;
- irqstatus = priv->dispc_ops->read_irqstatus(priv->dispc);
- priv->dispc_ops->clear_irqstatus(priv->dispc, irqstatus);
- priv->dispc_ops->read_irqstatus(priv->dispc); /* flush posted write */
+ irqstatus = dispc_read_irqstatus(priv->dispc);
+ dispc_clear_irqstatus(priv->dispc, irqstatus);
+ dispc_read_irqstatus(priv->dispc); /* flush posted write */
VERB("irqs: %08x", irqstatus);
@@ -221,15 +221,15 @@ static irqreturn_t omap_irq_handler(int irq, void *arg)
struct drm_crtc *crtc = priv->pipes[id].crtc;
enum omap_channel channel = omap_crtc_channel(crtc);
- if (irqstatus & priv->dispc_ops->mgr_get_vsync_irq(priv->dispc, channel)) {
+ if (irqstatus & dispc_mgr_get_vsync_irq(priv->dispc, channel)) {
drm_handle_vblank(dev, id);
omap_crtc_vblank_irq(crtc);
}
- if (irqstatus & priv->dispc_ops->mgr_get_sync_lost_irq(priv->dispc, channel))
+ if (irqstatus & dispc_mgr_get_sync_lost_irq(priv->dispc, channel))
omap_crtc_error_irq(crtc, irqstatus);
- if (irqstatus & priv->dispc_ops->mgr_get_framedone_irq(priv->dispc, channel))
+ if (irqstatus & dispc_mgr_get_framedone_irq(priv->dispc, channel))
omap_crtc_framedone_irq(crtc, irqstatus);
}
@@ -263,7 +263,7 @@ static const u32 omap_underflow_irqs[] = {
int omap_drm_irq_install(struct drm_device *dev)
{
struct omap_drm_private *priv = dev->dev_private;
- unsigned int num_mgrs = priv->dispc_ops->get_num_mgrs(priv->dispc);
+ unsigned int num_mgrs = dispc_get_num_mgrs(priv->dispc);
unsigned int max_planes;
unsigned int i;
int ret;
@@ -281,13 +281,13 @@ int omap_drm_irq_install(struct drm_device *dev)
}
for (i = 0; i < num_mgrs; ++i)
- priv->irq_mask |= priv->dispc_ops->mgr_get_sync_lost_irq(priv->dispc, i);
+ priv->irq_mask |= dispc_mgr_get_sync_lost_irq(priv->dispc, i);
- priv->dispc_ops->runtime_get(priv->dispc);
- priv->dispc_ops->clear_irqstatus(priv->dispc, 0xffffffff);
- priv->dispc_ops->runtime_put(priv->dispc);
+ dispc_runtime_get(priv->dispc);
+ dispc_clear_irqstatus(priv->dispc, 0xffffffff);
+ dispc_runtime_put(priv->dispc);
- ret = priv->dispc_ops->request_irq(priv->dispc, omap_irq_handler, dev);
+ ret = dispc_request_irq(priv->dispc, omap_irq_handler, dev);
if (ret < 0)
return ret;
@@ -305,5 +305,5 @@ void omap_drm_irq_uninstall(struct drm_device *dev)
dev->irq_enabled = false;
- priv->dispc_ops->free_irq(priv->dispc, dev);
+ dispc_free_irq(priv->dispc, dev);
}
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index 21e0b9785599..51dc24acea73 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -59,6 +59,8 @@ static void omap_plane_atomic_update(struct drm_plane *plane,
info.pre_mult_alpha = 1;
else
info.pre_mult_alpha = 0;
+ info.color_encoding = state->color_encoding;
+ info.color_range = state->color_range;
/* update scanout: */
omap_framebuffer_update_scanout(state->fb, state, &info);
@@ -70,17 +72,17 @@ static void omap_plane_atomic_update(struct drm_plane *plane,
&info.paddr, &info.p_uv_addr);
/* and finally, update omapdss: */
- ret = priv->dispc_ops->ovl_setup(priv->dispc, omap_plane->id, &info,
+ ret = dispc_ovl_setup(priv->dispc, omap_plane->id, &info,
omap_crtc_timings(state->crtc), false,
omap_crtc_channel(state->crtc));
if (ret) {
dev_err(plane->dev->dev, "Failed to setup plane %s\n",
omap_plane->name);
- priv->dispc_ops->ovl_enable(priv->dispc, omap_plane->id, false);
+ dispc_ovl_enable(priv->dispc, omap_plane->id, false);
return;
}
- priv->dispc_ops->ovl_enable(priv->dispc, omap_plane->id, true);
+ dispc_ovl_enable(priv->dispc, omap_plane->id, true);
}
static void omap_plane_atomic_disable(struct drm_plane *plane,
@@ -93,7 +95,7 @@ static void omap_plane_atomic_disable(struct drm_plane *plane,
plane->state->zpos = plane->type == DRM_PLANE_TYPE_PRIMARY
? 0 : omap_plane->id;
- priv->dispc_ops->ovl_enable(priv->dispc, omap_plane->id, false);
+ dispc_ovl_enable(priv->dispc, omap_plane->id, false);
}
static int omap_plane_atomic_check(struct drm_plane *plane,
@@ -189,6 +191,8 @@ static void omap_plane_reset(struct drm_plane *plane)
*/
plane->state->zpos = plane->type == DRM_PLANE_TYPE_PRIMARY
? 0 : omap_plane->id;
+ plane->state->color_encoding = DRM_COLOR_YCBCR_BT601;
+ plane->state->color_range = DRM_COLOR_YCBCR_FULL_RANGE;
}
static int omap_plane_atomic_set_property(struct drm_plane *plane,
@@ -232,6 +236,22 @@ static const struct drm_plane_funcs omap_plane_funcs = {
.atomic_get_property = omap_plane_atomic_get_property,
};
+static bool omap_plane_supports_yuv(struct drm_plane *plane)
+{
+ struct omap_drm_private *priv = plane->dev->dev_private;
+ struct omap_plane *omap_plane = to_omap_plane(plane);
+ const u32 *formats = dispc_ovl_get_color_modes(priv->dispc, omap_plane->id);
+ u32 i;
+
+ for (i = 0; formats[i]; i++)
+ if (formats[i] == DRM_FORMAT_YUYV ||
+ formats[i] == DRM_FORMAT_UYVY ||
+ formats[i] == DRM_FORMAT_NV12)
+ return true;
+
+ return false;
+}
+
static const char *plane_id_to_name[] = {
[OMAP_DSS_GFX] = "gfx",
[OMAP_DSS_VIDEO1] = "vid1",
@@ -252,7 +272,7 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
u32 possible_crtcs)
{
struct omap_drm_private *priv = dev->dev_private;
- unsigned int num_planes = priv->dispc_ops->get_num_ovls(priv->dispc);
+ unsigned int num_planes = dispc_get_num_ovls(priv->dispc);
struct drm_plane *plane;
struct omap_plane *omap_plane;
enum omap_plane_id id;
@@ -271,7 +291,7 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
if (!omap_plane)
return ERR_PTR(-ENOMEM);
- formats = priv->dispc_ops->ovl_get_color_modes(priv->dispc, id);
+ formats = dispc_ovl_get_color_modes(priv->dispc, id);
for (nformats = 0; formats[nformats]; ++nformats)
;
omap_plane->id = id;
@@ -293,6 +313,15 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
drm_plane_create_blend_mode_property(plane, BIT(DRM_MODE_BLEND_PREMULTI) |
BIT(DRM_MODE_BLEND_COVERAGE));
+ if (omap_plane_supports_yuv(plane))
+ drm_plane_create_color_properties(plane,
+ BIT(DRM_COLOR_YCBCR_BT601) |
+ BIT(DRM_COLOR_YCBCR_BT709),
+ BIT(DRM_COLOR_YCBCR_FULL_RANGE) |
+ BIT(DRM_COLOR_YCBCR_LIMITED_RANGE),
+ DRM_COLOR_YCBCR_BT601,
+ DRM_COLOR_YCBCR_FULL_RANGE);
+
return plane;
error:
diff --git a/drivers/gpu/drm/omapdrm/tcm-sita.c b/drivers/gpu/drm/omapdrm/tcm-sita.c
index 9e1acbd2c7aa..8338dc665301 100644
--- a/drivers/gpu/drm/omapdrm/tcm-sita.c
+++ b/drivers/gpu/drm/omapdrm/tcm-sita.c
@@ -254,6 +254,5 @@ struct tcm *sita_init(u16 width, u16 height)
return tcm;
error:
- kfree(tcm);
return NULL;
}
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index b4e021ea30f9..4894913936e9 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -57,6 +57,15 @@ config DRM_PANEL_BOE_TV101WUM_NL6
Say Y here if you want to support for BOE TV101WUM and AUO KD101N80
45NA WUXGA PANEL DSI Video Mode panel
+config DRM_PANEL_DSI_CM
+ tristate "Generic DSI command mode panels"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ DRM panel driver for DSI command mode panels with support for
+ embedded and external backlights.
+
config DRM_PANEL_LVDS
tristate "Generic LVDS panel driver"
depends on OF
@@ -145,6 +154,17 @@ config DRM_PANEL_JDI_LT070ME05000
The panel has a 1200(RGB)×1920 (WUXGA) resolution and uses
24 bit per pixel.
+config DRM_PANEL_KHADAS_TS050
+ tristate "Khadas TS050 panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for Khadas TS050 TFT-LCD
+ panel module. The panel has a 1080x1920 resolution and uses
+ 24 bit RGB per pixel. It provides a MIPI DSI interface to
+ the host, a built-in LED backlight and touch controller.
+
config DRM_PANEL_KINGDISPLAY_KD097D04
tristate "Kingdisplay kd097d04 panel"
depends on OF
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index ebbf488c7eac..cae4d976c069 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_DRM_PANEL_ARM_VERSATILE) += panel-arm-versatile.o
obj-$(CONFIG_DRM_PANEL_ASUS_Z00T_TM5P5_NT35596) += panel-asus-z00t-tm5p5-n35596.o
obj-$(CONFIG_DRM_PANEL_BOE_HIMAX8279D) += panel-boe-himax8279d.o
obj-$(CONFIG_DRM_PANEL_BOE_TV101WUM_NL6) += panel-boe-tv101wum-nl6.o
+obj-$(CONFIG_DRM_PANEL_DSI_CM) += panel-dsi-cm.o
obj-$(CONFIG_DRM_PANEL_LVDS) += panel-lvds.o
obj-$(CONFIG_DRM_PANEL_SIMPLE) += panel-simple.o
obj-$(CONFIG_DRM_PANEL_ELIDA_KD35T133) += panel-elida-kd35t133.o
@@ -13,6 +14,7 @@ obj-$(CONFIG_DRM_PANEL_ILITEK_IL9322) += panel-ilitek-ili9322.o
obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9881C) += panel-ilitek-ili9881c.o
obj-$(CONFIG_DRM_PANEL_INNOLUX_P079ZCA) += panel-innolux-p079zca.o
obj-$(CONFIG_DRM_PANEL_JDI_LT070ME05000) += panel-jdi-lt070me05000.o
+obj-$(CONFIG_DRM_PANEL_KHADAS_TS050) += panel-khadas-ts050.o
obj-$(CONFIG_DRM_PANEL_KINGDISPLAY_KD097D04) += panel-kingdisplay-kd097d04.o
obj-$(CONFIG_DRM_PANEL_LEADTEK_LTK050H3146W) += panel-leadtek-ltk050h3146w.o
obj-$(CONFIG_DRM_PANEL_LEADTEK_LTK500HD1829) += panel-leadtek-ltk500hd1829.o
diff --git a/drivers/gpu/drm/panel/panel-dsi-cm.c b/drivers/gpu/drm/panel/panel-dsi-cm.c
new file mode 100644
index 000000000000..af381d756ac1
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-dsi-cm.c
@@ -0,0 +1,665 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Generic DSI Command Mode panel driver
+ *
+ * Copyright (C) 2013 Texas Instruments Incorporated - https://www.ti.com/
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_connector.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+#include <video/mipi_display.h>
+
+#define DCS_GET_ID1 0xda
+#define DCS_GET_ID2 0xdb
+#define DCS_GET_ID3 0xdc
+
+#define DCS_REGULATOR_SUPPLY_NUM 2
+
+static const struct of_device_id dsicm_of_match[];
+
+struct dsic_panel_data {
+ u32 xres;
+ u32 yres;
+ u32 refresh;
+ u32 width_mm;
+ u32 height_mm;
+ u32 max_hs_rate;
+ u32 max_lp_rate;
+};
+
+struct panel_drv_data {
+ struct mipi_dsi_device *dsi;
+ struct drm_panel panel;
+ struct drm_display_mode mode;
+
+ struct mutex lock;
+
+ struct backlight_device *bldev;
+ struct backlight_device *extbldev;
+
+ unsigned long hw_guard_end; /* next value of jiffies when we can
+ * issue the next sleep in/out command
+ */
+ unsigned long hw_guard_wait; /* max guard time in jiffies */
+
+ const struct dsic_panel_data *panel_data;
+
+ struct gpio_desc *reset_gpio;
+
+ struct regulator_bulk_data supplies[DCS_REGULATOR_SUPPLY_NUM];
+
+ bool use_dsi_backlight;
+
+ /* runtime variables */
+ bool enabled;
+
+ bool intro_printed;
+};
+
+static inline struct panel_drv_data *panel_to_ddata(struct drm_panel *panel)
+{
+ return container_of(panel, struct panel_drv_data, panel);
+}
+
+static void dsicm_bl_power(struct panel_drv_data *ddata, bool enable)
+{
+ struct backlight_device *backlight;
+
+ if (ddata->bldev)
+ backlight = ddata->bldev;
+ else if (ddata->extbldev)
+ backlight = ddata->extbldev;
+ else
+ return;
+
+ if (enable) {
+ backlight->props.fb_blank = FB_BLANK_UNBLANK;
+ backlight->props.state = ~(BL_CORE_FBBLANK | BL_CORE_SUSPENDED);
+ backlight->props.power = FB_BLANK_UNBLANK;
+ } else {
+ backlight->props.fb_blank = FB_BLANK_NORMAL;
+ backlight->props.power = FB_BLANK_POWERDOWN;
+ backlight->props.state |= BL_CORE_FBBLANK | BL_CORE_SUSPENDED;
+ }
+
+ backlight_update_status(backlight);
+}
+
+static void hw_guard_start(struct panel_drv_data *ddata, int guard_msec)
+{
+ ddata->hw_guard_wait = msecs_to_jiffies(guard_msec);
+ ddata->hw_guard_end = jiffies + ddata->hw_guard_wait;
+}
+
+static void hw_guard_wait(struct panel_drv_data *ddata)
+{
+ unsigned long wait = ddata->hw_guard_end - jiffies;
+
+ if ((long)wait > 0 && wait <= ddata->hw_guard_wait) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(wait);
+ }
+}
+
+static int dsicm_dcs_read_1(struct panel_drv_data *ddata, u8 dcs_cmd, u8 *data)
+{
+ return mipi_dsi_dcs_read(ddata->dsi, dcs_cmd, data, 1);
+}
+
+static int dsicm_dcs_write_1(struct panel_drv_data *ddata, u8 dcs_cmd, u8 param)
+{
+ return mipi_dsi_dcs_write(ddata->dsi, dcs_cmd, &param, 1);
+}
+
+static int dsicm_sleep_in(struct panel_drv_data *ddata)
+
+{
+ int r;
+
+ hw_guard_wait(ddata);
+
+ r = mipi_dsi_dcs_enter_sleep_mode(ddata->dsi);
+ if (r)
+ return r;
+
+ hw_guard_start(ddata, 120);
+
+ usleep_range(5000, 10000);
+
+ return 0;
+}
+
+static int dsicm_sleep_out(struct panel_drv_data *ddata)
+{
+ int r;
+
+ hw_guard_wait(ddata);
+
+ r = mipi_dsi_dcs_exit_sleep_mode(ddata->dsi);
+ if (r)
+ return r;
+
+ hw_guard_start(ddata, 120);
+
+ usleep_range(5000, 10000);
+
+ return 0;
+}
+
+static int dsicm_get_id(struct panel_drv_data *ddata, u8 *id1, u8 *id2, u8 *id3)
+{
+ int r;
+
+ r = dsicm_dcs_read_1(ddata, DCS_GET_ID1, id1);
+ if (r)
+ return r;
+ r = dsicm_dcs_read_1(ddata, DCS_GET_ID2, id2);
+ if (r)
+ return r;
+ r = dsicm_dcs_read_1(ddata, DCS_GET_ID3, id3);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+static int dsicm_set_update_window(struct panel_drv_data *ddata)
+{
+ struct mipi_dsi_device *dsi = ddata->dsi;
+ int r;
+
+ r = mipi_dsi_dcs_set_column_address(dsi, 0, ddata->mode.hdisplay - 1);
+ if (r < 0)
+ return r;
+
+ r = mipi_dsi_dcs_set_page_address(dsi, 0, ddata->mode.vdisplay - 1);
+ if (r < 0)
+ return r;
+
+ return 0;
+}
+
+static int dsicm_bl_update_status(struct backlight_device *dev)
+{
+ struct panel_drv_data *ddata = dev_get_drvdata(&dev->dev);
+ int r = 0;
+ int level;
+
+ if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
+ dev->props.power == FB_BLANK_UNBLANK)
+ level = dev->props.brightness;
+ else
+ level = 0;
+
+ dev_dbg(&ddata->dsi->dev, "update brightness to %d\n", level);
+
+ mutex_lock(&ddata->lock);
+
+ if (ddata->enabled)
+ r = dsicm_dcs_write_1(ddata, MIPI_DCS_SET_DISPLAY_BRIGHTNESS,
+ level);
+
+ mutex_unlock(&ddata->lock);
+
+ return r;
+}
+
+static int dsicm_bl_get_intensity(struct backlight_device *dev)
+{
+ if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
+ dev->props.power == FB_BLANK_UNBLANK)
+ return dev->props.brightness;
+
+ return 0;
+}
+
+static const struct backlight_ops dsicm_bl_ops = {
+ .get_brightness = dsicm_bl_get_intensity,
+ .update_status = dsicm_bl_update_status,
+};
+
+static ssize_t num_dsi_errors_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct panel_drv_data *ddata = dev_get_drvdata(dev);
+ u8 errors = 0;
+ int r = -ENODEV;
+
+ mutex_lock(&ddata->lock);
+
+ if (ddata->enabled)
+ r = dsicm_dcs_read_1(ddata, MIPI_DCS_GET_ERROR_COUNT_ON_DSI, &errors);
+
+ mutex_unlock(&ddata->lock);
+
+ if (r)
+ return r;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", errors);
+}
+
+static ssize_t hw_revision_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct panel_drv_data *ddata = dev_get_drvdata(dev);
+ u8 id1, id2, id3;
+ int r = -ENODEV;
+
+ mutex_lock(&ddata->lock);
+
+ if (ddata->enabled)
+ r = dsicm_get_id(ddata, &id1, &id2, &id3);
+
+ mutex_unlock(&ddata->lock);
+
+ if (r)
+ return r;
+
+ return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x\n", id1, id2, id3);
+}
+
+static DEVICE_ATTR_RO(num_dsi_errors);
+static DEVICE_ATTR_RO(hw_revision);
+
+static struct attribute *dsicm_attrs[] = {
+ &dev_attr_num_dsi_errors.attr,
+ &dev_attr_hw_revision.attr,
+ NULL,
+};
+
+static const struct attribute_group dsicm_attr_group = {
+ .attrs = dsicm_attrs,
+};
+
+static void dsicm_hw_reset(struct panel_drv_data *ddata)
+{
+ gpiod_set_value(ddata->reset_gpio, 1);
+ udelay(10);
+ /* reset the panel */
+ gpiod_set_value(ddata->reset_gpio, 0);
+ /* assert reset */
+ udelay(10);
+ gpiod_set_value(ddata->reset_gpio, 1);
+ /* wait after releasing reset */
+ usleep_range(5000, 10000);
+}
+
+static int dsicm_power_on(struct panel_drv_data *ddata)
+{
+ u8 id1, id2, id3;
+ int r;
+
+ dsicm_hw_reset(ddata);
+
+ ddata->dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ r = dsicm_sleep_out(ddata);
+ if (r)
+ goto err;
+
+ r = dsicm_get_id(ddata, &id1, &id2, &id3);
+ if (r)
+ goto err;
+
+ r = dsicm_dcs_write_1(ddata, MIPI_DCS_SET_DISPLAY_BRIGHTNESS, 0xff);
+ if (r)
+ goto err;
+
+ r = dsicm_dcs_write_1(ddata, MIPI_DCS_WRITE_CONTROL_DISPLAY,
+ (1<<2) | (1<<5)); /* BL | BCTRL */
+ if (r)
+ goto err;
+
+ r = mipi_dsi_dcs_set_pixel_format(ddata->dsi, MIPI_DCS_PIXEL_FMT_24BIT);
+ if (r)
+ goto err;
+
+ r = dsicm_set_update_window(ddata);
+ if (r)
+ goto err;
+
+ r = mipi_dsi_dcs_set_display_on(ddata->dsi);
+ if (r)
+ goto err;
+
+ r = mipi_dsi_dcs_set_tear_on(ddata->dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+ if (r)
+ goto err;
+
+ /* possible panel bug */
+ msleep(100);
+
+ ddata->enabled = true;
+
+ if (!ddata->intro_printed) {
+ dev_info(&ddata->dsi->dev, "panel revision %02x.%02x.%02x\n",
+ id1, id2, id3);
+ ddata->intro_printed = true;
+ }
+
+ ddata->dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ return 0;
+err:
+ dev_err(&ddata->dsi->dev, "error while enabling panel, issuing HW reset\n");
+
+ dsicm_hw_reset(ddata);
+
+ return r;
+}
+
+static int dsicm_power_off(struct panel_drv_data *ddata)
+{
+ int r;
+
+ ddata->enabled = false;
+
+ r = mipi_dsi_dcs_set_display_off(ddata->dsi);
+ if (!r)
+ r = dsicm_sleep_in(ddata);
+
+ if (r) {
+ dev_err(&ddata->dsi->dev,
+ "error disabling panel, issuing HW reset\n");
+ dsicm_hw_reset(ddata);
+ }
+
+ return r;
+}
+
+static int dsicm_prepare(struct drm_panel *panel)
+{
+ struct panel_drv_data *ddata = panel_to_ddata(panel);
+ int r;
+
+ r = regulator_bulk_enable(ARRAY_SIZE(ddata->supplies), ddata->supplies);
+ if (r)
+ dev_err(&ddata->dsi->dev, "failed to enable supplies: %d\n", r);
+
+ return r;
+}
+
+static int dsicm_enable(struct drm_panel *panel)
+{
+ struct panel_drv_data *ddata = panel_to_ddata(panel);
+ int r;
+
+ mutex_lock(&ddata->lock);
+
+ r = dsicm_power_on(ddata);
+ if (r)
+ goto err;
+
+ mutex_unlock(&ddata->lock);
+
+ dsicm_bl_power(ddata, true);
+
+ return 0;
+err:
+ dev_err(&ddata->dsi->dev, "enable failed (%d)\n", r);
+ mutex_unlock(&ddata->lock);
+ return r;
+}
+
+static int dsicm_unprepare(struct drm_panel *panel)
+{
+ struct panel_drv_data *ddata = panel_to_ddata(panel);
+ int r;
+
+ r = regulator_bulk_disable(ARRAY_SIZE(ddata->supplies), ddata->supplies);
+ if (r)
+ dev_err(&ddata->dsi->dev, "failed to disable supplies: %d\n", r);
+
+ return r;
+}
+
+static int dsicm_disable(struct drm_panel *panel)
+{
+ struct panel_drv_data *ddata = panel_to_ddata(panel);
+ int r;
+
+ dsicm_bl_power(ddata, false);
+
+ mutex_lock(&ddata->lock);
+
+ r = dsicm_power_off(ddata);
+
+ mutex_unlock(&ddata->lock);
+
+ return r;
+}
+
+static int dsicm_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct panel_drv_data *ddata = panel_to_ddata(panel);
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &ddata->mode);
+ if (!mode) {
+ dev_err(&ddata->dsi->dev, "failed to add mode %ux%ux@%u kHz\n",
+ ddata->mode.hdisplay, ddata->mode.vdisplay,
+ ddata->mode.clock);
+ return -ENOMEM;
+ }
+
+ connector->display_info.width_mm = ddata->panel_data->width_mm;
+ connector->display_info.height_mm = ddata->panel_data->height_mm;
+
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs dsicm_panel_funcs = {
+ .unprepare = dsicm_unprepare,
+ .disable = dsicm_disable,
+ .prepare = dsicm_prepare,
+ .enable = dsicm_enable,
+ .get_modes = dsicm_get_modes,
+};
+
+static int dsicm_probe_of(struct mipi_dsi_device *dsi)
+{
+ struct backlight_device *backlight;
+ struct panel_drv_data *ddata = mipi_dsi_get_drvdata(dsi);
+ int err;
+ struct drm_display_mode *mode = &ddata->mode;
+
+ ddata->reset_gpio = devm_gpiod_get(&dsi->dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ddata->reset_gpio)) {
+ err = PTR_ERR(ddata->reset_gpio);
+ dev_err(&dsi->dev, "reset gpio request failed: %d", err);
+ return err;
+ }
+
+ mode->hdisplay = mode->hsync_start = mode->hsync_end = mode->htotal =
+ ddata->panel_data->xres;
+ mode->vdisplay = mode->vsync_start = mode->vsync_end = mode->vtotal =
+ ddata->panel_data->yres;
+ mode->clock = ddata->panel_data->xres * ddata->panel_data->yres *
+ ddata->panel_data->refresh / 1000;
+ mode->width_mm = ddata->panel_data->width_mm;
+ mode->height_mm = ddata->panel_data->height_mm;
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_set_name(mode);
+
+ ddata->supplies[0].supply = "vpnl";
+ ddata->supplies[1].supply = "vddi";
+ err = devm_regulator_bulk_get(&dsi->dev, ARRAY_SIZE(ddata->supplies),
+ ddata->supplies);
+ if (err)
+ return err;
+
+ backlight = devm_of_find_backlight(&dsi->dev);
+ if (IS_ERR(backlight))
+ return PTR_ERR(backlight);
+
+ /* If no backlight device is found assume native backlight support */
+ if (backlight)
+ ddata->extbldev = backlight;
+ else
+ ddata->use_dsi_backlight = true;
+
+ return 0;
+}
+
+static int dsicm_probe(struct mipi_dsi_device *dsi)
+{
+ struct panel_drv_data *ddata;
+ struct backlight_device *bldev = NULL;
+ struct device *dev = &dsi->dev;
+ int r;
+
+ dev_dbg(dev, "probe\n");
+
+ ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL);
+ if (!ddata)
+ return -ENOMEM;
+
+ mipi_dsi_set_drvdata(dsi, ddata);
+ ddata->dsi = dsi;
+
+ ddata->panel_data = of_device_get_match_data(dev);
+ if (!ddata->panel_data)
+ return -ENODEV;
+
+ r = dsicm_probe_of(dsi);
+ if (r)
+ return r;
+
+ mutex_init(&ddata->lock);
+
+ dsicm_hw_reset(ddata);
+
+ drm_panel_init(&ddata->panel, dev, &dsicm_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ if (ddata->use_dsi_backlight) {
+ struct backlight_properties props = { 0 };
+ props.max_brightness = 255;
+ props.type = BACKLIGHT_RAW;
+
+ bldev = devm_backlight_device_register(dev, dev_name(dev),
+ dev, ddata, &dsicm_bl_ops, &props);
+ if (IS_ERR(bldev)) {
+ r = PTR_ERR(bldev);
+ goto err_bl;
+ }
+
+ ddata->bldev = bldev;
+ }
+
+ r = sysfs_create_group(&dev->kobj, &dsicm_attr_group);
+ if (r) {
+ dev_err(dev, "failed to create sysfs files\n");
+ goto err_bl;
+ }
+
+ dsi->lanes = 2;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS |
+ MIPI_DSI_MODE_EOT_PACKET;
+ dsi->hs_rate = ddata->panel_data->max_hs_rate;
+ dsi->lp_rate = ddata->panel_data->max_lp_rate;
+
+ drm_panel_add(&ddata->panel);
+
+ r = mipi_dsi_attach(dsi);
+ if (r < 0)
+ goto err_dsi_attach;
+
+ return 0;
+
+err_dsi_attach:
+ drm_panel_remove(&ddata->panel);
+ sysfs_remove_group(&dsi->dev.kobj, &dsicm_attr_group);
+err_bl:
+ if (ddata->extbldev)
+ put_device(&ddata->extbldev->dev);
+
+ return r;
+}
+
+static int dsicm_remove(struct mipi_dsi_device *dsi)
+{
+ struct panel_drv_data *ddata = mipi_dsi_get_drvdata(dsi);
+
+ dev_dbg(&dsi->dev, "remove\n");
+
+ mipi_dsi_detach(dsi);
+
+ drm_panel_remove(&ddata->panel);
+
+ sysfs_remove_group(&dsi->dev.kobj, &dsicm_attr_group);
+
+ if (ddata->extbldev)
+ put_device(&ddata->extbldev->dev);
+
+ return 0;
+}
+
+static const struct dsic_panel_data taal_data = {
+ .xres = 864,
+ .yres = 480,
+ .refresh = 60,
+ .width_mm = 0,
+ .height_mm = 0,
+ .max_hs_rate = 300000000,
+ .max_lp_rate = 10000000,
+};
+
+static const struct dsic_panel_data himalaya_data = {
+ .xres = 480,
+ .yres = 864,
+ .refresh = 60,
+ .width_mm = 49,
+ .height_mm = 88,
+ .max_hs_rate = 300000000,
+ .max_lp_rate = 10000000,
+};
+
+static const struct dsic_panel_data droid4_data = {
+ .xres = 540,
+ .yres = 960,
+ .refresh = 60,
+ .width_mm = 50,
+ .height_mm = 89,
+ .max_hs_rate = 300000000,
+ .max_lp_rate = 10000000,
+};
+
+static const struct of_device_id dsicm_of_match[] = {
+ { .compatible = "tpo,taal", .data = &taal_data },
+ { .compatible = "nokia,himalaya", &himalaya_data },
+ { .compatible = "motorola,droid4-panel", &droid4_data },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, dsicm_of_match);
+
+static struct mipi_dsi_driver dsicm_driver = {
+ .probe = dsicm_probe,
+ .remove = dsicm_remove,
+ .driver = {
+ .name = "panel-dsi-cm",
+ .of_match_table = dsicm_of_match,
+ },
+};
+module_mipi_dsi_driver(dsicm_driver);
+
+MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
+MODULE_DESCRIPTION("Generic DSI Command Mode Panel Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-elida-kd35t133.c b/drivers/gpu/drm/panel/panel-elida-kd35t133.c
index bc36aa3c1123..fe5ac3ef9018 100644
--- a/drivers/gpu/drm/panel/panel-elida-kd35t133.c
+++ b/drivers/gpu/drm/panel/panel-elida-kd35t133.c
@@ -265,7 +265,8 @@ static int kd35t133_probe(struct mipi_dsi_device *dsi)
dsi->lanes = 1;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
- MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_EOT_PACKET;
+ MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_EOT_PACKET |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS;
drm_panel_init(&ctx->panel, &dsi->dev, &kd35t133_funcs,
DRM_MODE_CONNECTOR_DSI);
diff --git a/drivers/gpu/drm/panel/panel-khadas-ts050.c b/drivers/gpu/drm/panel/panel-khadas-ts050.c
new file mode 100644
index 000000000000..8f6ac1a40c31
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-khadas-ts050.c
@@ -0,0 +1,870 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_device.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+struct khadas_ts050_panel {
+ struct drm_panel base;
+ struct mipi_dsi_device *link;
+
+ struct regulator *supply;
+ struct gpio_desc *reset_gpio;
+ struct gpio_desc *enable_gpio;
+
+ bool prepared;
+ bool enabled;
+};
+
+struct khadas_ts050_panel_cmd {
+ u8 cmd;
+ u8 data;
+};
+
+/* Only the CMD1 User Command set is documented */
+static const struct khadas_ts050_panel_cmd init_code[] = {
+ /* Select Unknown CMD Page (Undocumented) */
+ {0xff, 0xee},
+ /* Reload CMD1: Don't reload default value to register */
+ {0xfb, 0x01},
+ {0x1f, 0x45},
+ {0x24, 0x4f},
+ {0x38, 0xc8},
+ {0x39, 0x27},
+ {0x1e, 0x77},
+ {0x1d, 0x0f},
+ {0x7e, 0x71},
+ {0x7c, 0x03},
+ {0xff, 0x00},
+ {0xfb, 0x01},
+ {0x35, 0x01},
+ /* Select CMD2 Page0 (Undocumented) */
+ {0xff, 0x01},
+ /* Reload CMD1: Don't reload default value to register */
+ {0xfb, 0x01},
+ {0x00, 0x01},
+ {0x01, 0x55},
+ {0x02, 0x40},
+ {0x05, 0x40},
+ {0x06, 0x4a},
+ {0x07, 0x24},
+ {0x08, 0x0c},
+ {0x0b, 0x7d},
+ {0x0c, 0x7d},
+ {0x0e, 0xb0},
+ {0x0f, 0xae},
+ {0x11, 0x10},
+ {0x12, 0x10},
+ {0x13, 0x03},
+ {0x14, 0x4a},
+ {0x15, 0x12},
+ {0x16, 0x12},
+ {0x18, 0x00},
+ {0x19, 0x77},
+ {0x1a, 0x55},
+ {0x1b, 0x13},
+ {0x1c, 0x00},
+ {0x1d, 0x00},
+ {0x1e, 0x13},
+ {0x1f, 0x00},
+ {0x23, 0x00},
+ {0x24, 0x00},
+ {0x25, 0x00},
+ {0x26, 0x00},
+ {0x27, 0x00},
+ {0x28, 0x00},
+ {0x35, 0x00},
+ {0x66, 0x00},
+ {0x58, 0x82},
+ {0x59, 0x02},
+ {0x5a, 0x02},
+ {0x5b, 0x02},
+ {0x5c, 0x82},
+ {0x5d, 0x82},
+ {0x5e, 0x02},
+ {0x5f, 0x02},
+ {0x72, 0x31},
+ /* Select CMD2 Page4 (Undocumented) */
+ {0xff, 0x05},
+ /* Reload CMD1: Don't reload default value to register */
+ {0xfb, 0x01},
+ {0x00, 0x01},
+ {0x01, 0x0b},
+ {0x02, 0x0c},
+ {0x03, 0x09},
+ {0x04, 0x0a},
+ {0x05, 0x00},
+ {0x06, 0x0f},
+ {0x07, 0x10},
+ {0x08, 0x00},
+ {0x09, 0x00},
+ {0x0a, 0x00},
+ {0x0b, 0x00},
+ {0x0c, 0x00},
+ {0x0d, 0x13},
+ {0x0e, 0x15},
+ {0x0f, 0x17},
+ {0x10, 0x01},
+ {0x11, 0x0b},
+ {0x12, 0x0c},
+ {0x13, 0x09},
+ {0x14, 0x0a},
+ {0x15, 0x00},
+ {0x16, 0x0f},
+ {0x17, 0x10},
+ {0x18, 0x00},
+ {0x19, 0x00},
+ {0x1a, 0x00},
+ {0x1b, 0x00},
+ {0x1c, 0x00},
+ {0x1d, 0x13},
+ {0x1e, 0x15},
+ {0x1f, 0x17},
+ {0x20, 0x00},
+ {0x21, 0x03},
+ {0x22, 0x01},
+ {0x23, 0x40},
+ {0x24, 0x40},
+ {0x25, 0xed},
+ {0x29, 0x58},
+ {0x2a, 0x12},
+ {0x2b, 0x01},
+ {0x4b, 0x06},
+ {0x4c, 0x11},
+ {0x4d, 0x20},
+ {0x4e, 0x02},
+ {0x4f, 0x02},
+ {0x50, 0x20},
+ {0x51, 0x61},
+ {0x52, 0x01},
+ {0x53, 0x63},
+ {0x54, 0x77},
+ {0x55, 0xed},
+ {0x5b, 0x00},
+ {0x5c, 0x00},
+ {0x5d, 0x00},
+ {0x5e, 0x00},
+ {0x5f, 0x15},
+ {0x60, 0x75},
+ {0x61, 0x00},
+ {0x62, 0x00},
+ {0x63, 0x00},
+ {0x64, 0x00},
+ {0x65, 0x00},
+ {0x66, 0x00},
+ {0x67, 0x00},
+ {0x68, 0x04},
+ {0x69, 0x00},
+ {0x6a, 0x00},
+ {0x6c, 0x40},
+ {0x75, 0x01},
+ {0x76, 0x01},
+ {0x7a, 0x80},
+ {0x7b, 0xa3},
+ {0x7c, 0xd8},
+ {0x7d, 0x60},
+ {0x7f, 0x15},
+ {0x80, 0x81},
+ {0x83, 0x05},
+ {0x93, 0x08},
+ {0x94, 0x10},
+ {0x8a, 0x00},
+ {0x9b, 0x0f},
+ {0xea, 0xff},
+ {0xec, 0x00},
+ /* Select CMD2 Page0 (Undocumented) */
+ {0xff, 0x01},
+ /* Reload CMD1: Don't reload default value to register */
+ {0xfb, 0x01},
+ {0x75, 0x00},
+ {0x76, 0xdf},
+ {0x77, 0x00},
+ {0x78, 0xe4},
+ {0x79, 0x00},
+ {0x7a, 0xed},
+ {0x7b, 0x00},
+ {0x7c, 0xf6},
+ {0x7d, 0x00},
+ {0x7e, 0xff},
+ {0x7f, 0x01},
+ {0x80, 0x07},
+ {0x81, 0x01},
+ {0x82, 0x10},
+ {0x83, 0x01},
+ {0x84, 0x18},
+ {0x85, 0x01},
+ {0x86, 0x20},
+ {0x87, 0x01},
+ {0x88, 0x3d},
+ {0x89, 0x01},
+ {0x8a, 0x56},
+ {0x8b, 0x01},
+ {0x8c, 0x84},
+ {0x8d, 0x01},
+ {0x8e, 0xab},
+ {0x8f, 0x01},
+ {0x90, 0xec},
+ {0x91, 0x02},
+ {0x92, 0x22},
+ {0x93, 0x02},
+ {0x94, 0x23},
+ {0x95, 0x02},
+ {0x96, 0x55},
+ {0x97, 0x02},
+ {0x98, 0x8b},
+ {0x99, 0x02},
+ {0x9a, 0xaf},
+ {0x9b, 0x02},
+ {0x9c, 0xdf},
+ {0x9d, 0x03},
+ {0x9e, 0x01},
+ {0x9f, 0x03},
+ {0xa0, 0x2c},
+ {0xa2, 0x03},
+ {0xa3, 0x39},
+ {0xa4, 0x03},
+ {0xa5, 0x47},
+ {0xa6, 0x03},
+ {0xa7, 0x56},
+ {0xa9, 0x03},
+ {0xaa, 0x66},
+ {0xab, 0x03},
+ {0xac, 0x76},
+ {0xad, 0x03},
+ {0xae, 0x85},
+ {0xaf, 0x03},
+ {0xb0, 0x90},
+ {0xb1, 0x03},
+ {0xb2, 0xcb},
+ {0xb3, 0x00},
+ {0xb4, 0xdf},
+ {0xb5, 0x00},
+ {0xb6, 0xe4},
+ {0xb7, 0x00},
+ {0xb8, 0xed},
+ {0xb9, 0x00},
+ {0xba, 0xf6},
+ {0xbb, 0x00},
+ {0xbc, 0xff},
+ {0xbd, 0x01},
+ {0xbe, 0x07},
+ {0xbf, 0x01},
+ {0xc0, 0x10},
+ {0xc1, 0x01},
+ {0xc2, 0x18},
+ {0xc3, 0x01},
+ {0xc4, 0x20},
+ {0xc5, 0x01},
+ {0xc6, 0x3d},
+ {0xc7, 0x01},
+ {0xc8, 0x56},
+ {0xc9, 0x01},
+ {0xca, 0x84},
+ {0xcb, 0x01},
+ {0xcc, 0xab},
+ {0xcd, 0x01},
+ {0xce, 0xec},
+ {0xcf, 0x02},
+ {0xd0, 0x22},
+ {0xd1, 0x02},
+ {0xd2, 0x23},
+ {0xd3, 0x02},
+ {0xd4, 0x55},
+ {0xd5, 0x02},
+ {0xd6, 0x8b},
+ {0xd7, 0x02},
+ {0xd8, 0xaf},
+ {0xd9, 0x02},
+ {0xda, 0xdf},
+ {0xdb, 0x03},
+ {0xdc, 0x01},
+ {0xdd, 0x03},
+ {0xde, 0x2c},
+ {0xdf, 0x03},
+ {0xe0, 0x39},
+ {0xe1, 0x03},
+ {0xe2, 0x47},
+ {0xe3, 0x03},
+ {0xe4, 0x56},
+ {0xe5, 0x03},
+ {0xe6, 0x66},
+ {0xe7, 0x03},
+ {0xe8, 0x76},
+ {0xe9, 0x03},
+ {0xea, 0x85},
+ {0xeb, 0x03},
+ {0xec, 0x90},
+ {0xed, 0x03},
+ {0xee, 0xcb},
+ {0xef, 0x00},
+ {0xf0, 0xbb},
+ {0xf1, 0x00},
+ {0xf2, 0xc0},
+ {0xf3, 0x00},
+ {0xf4, 0xcc},
+ {0xf5, 0x00},
+ {0xf6, 0xd6},
+ {0xf7, 0x00},
+ {0xf8, 0xe1},
+ {0xf9, 0x00},
+ {0xfa, 0xea},
+ /* Select CMD2 Page2 (Undocumented) */
+ {0xff, 0x02},
+ /* Reload CMD1: Don't reload default value to register */
+ {0xfb, 0x01},
+ {0x00, 0x00},
+ {0x01, 0xf4},
+ {0x02, 0x00},
+ {0x03, 0xef},
+ {0x04, 0x01},
+ {0x05, 0x07},
+ {0x06, 0x01},
+ {0x07, 0x28},
+ {0x08, 0x01},
+ {0x09, 0x44},
+ {0x0a, 0x01},
+ {0x0b, 0x76},
+ {0x0c, 0x01},
+ {0x0d, 0xa0},
+ {0x0e, 0x01},
+ {0x0f, 0xe7},
+ {0x10, 0x02},
+ {0x11, 0x1f},
+ {0x12, 0x02},
+ {0x13, 0x22},
+ {0x14, 0x02},
+ {0x15, 0x54},
+ {0x16, 0x02},
+ {0x17, 0x8b},
+ {0x18, 0x02},
+ {0x19, 0xaf},
+ {0x1a, 0x02},
+ {0x1b, 0xe0},
+ {0x1c, 0x03},
+ {0x1d, 0x01},
+ {0x1e, 0x03},
+ {0x1f, 0x2d},
+ {0x20, 0x03},
+ {0x21, 0x39},
+ {0x22, 0x03},
+ {0x23, 0x47},
+ {0x24, 0x03},
+ {0x25, 0x57},
+ {0x26, 0x03},
+ {0x27, 0x65},
+ {0x28, 0x03},
+ {0x29, 0x77},
+ {0x2a, 0x03},
+ {0x2b, 0x85},
+ {0x2d, 0x03},
+ {0x2f, 0x8f},
+ {0x30, 0x03},
+ {0x31, 0xcb},
+ {0x32, 0x00},
+ {0x33, 0xbb},
+ {0x34, 0x00},
+ {0x35, 0xc0},
+ {0x36, 0x00},
+ {0x37, 0xcc},
+ {0x38, 0x00},
+ {0x39, 0xd6},
+ {0x3a, 0x00},
+ {0x3b, 0xe1},
+ {0x3d, 0x00},
+ {0x3f, 0xea},
+ {0x40, 0x00},
+ {0x41, 0xf4},
+ {0x42, 0x00},
+ {0x43, 0xfe},
+ {0x44, 0x01},
+ {0x45, 0x07},
+ {0x46, 0x01},
+ {0x47, 0x28},
+ {0x48, 0x01},
+ {0x49, 0x44},
+ {0x4a, 0x01},
+ {0x4b, 0x76},
+ {0x4c, 0x01},
+ {0x4d, 0xa0},
+ {0x4e, 0x01},
+ {0x4f, 0xe7},
+ {0x50, 0x02},
+ {0x51, 0x1f},
+ {0x52, 0x02},
+ {0x53, 0x22},
+ {0x54, 0x02},
+ {0x55, 0x54},
+ {0x56, 0x02},
+ {0x58, 0x8b},
+ {0x59, 0x02},
+ {0x5a, 0xaf},
+ {0x5b, 0x02},
+ {0x5c, 0xe0},
+ {0x5d, 0x03},
+ {0x5e, 0x01},
+ {0x5f, 0x03},
+ {0x60, 0x2d},
+ {0x61, 0x03},
+ {0x62, 0x39},
+ {0x63, 0x03},
+ {0x64, 0x47},
+ {0x65, 0x03},
+ {0x66, 0x57},
+ {0x67, 0x03},
+ {0x68, 0x65},
+ {0x69, 0x03},
+ {0x6a, 0x77},
+ {0x6b, 0x03},
+ {0x6c, 0x85},
+ {0x6d, 0x03},
+ {0x6e, 0x8f},
+ {0x6f, 0x03},
+ {0x70, 0xcb},
+ {0x71, 0x00},
+ {0x72, 0x00},
+ {0x73, 0x00},
+ {0x74, 0x21},
+ {0x75, 0x00},
+ {0x76, 0x4c},
+ {0x77, 0x00},
+ {0x78, 0x6b},
+ {0x79, 0x00},
+ {0x7a, 0x85},
+ {0x7b, 0x00},
+ {0x7c, 0x9a},
+ {0x7d, 0x00},
+ {0x7e, 0xad},
+ {0x7f, 0x00},
+ {0x80, 0xbe},
+ {0x81, 0x00},
+ {0x82, 0xcd},
+ {0x83, 0x01},
+ {0x84, 0x01},
+ {0x85, 0x01},
+ {0x86, 0x29},
+ {0x87, 0x01},
+ {0x88, 0x68},
+ {0x89, 0x01},
+ {0x8a, 0x98},
+ {0x8b, 0x01},
+ {0x8c, 0xe5},
+ {0x8d, 0x02},
+ {0x8e, 0x1e},
+ {0x8f, 0x02},
+ {0x90, 0x30},
+ {0x91, 0x02},
+ {0x92, 0x52},
+ {0x93, 0x02},
+ {0x94, 0x88},
+ {0x95, 0x02},
+ {0x96, 0xaa},
+ {0x97, 0x02},
+ {0x98, 0xd7},
+ {0x99, 0x02},
+ {0x9a, 0xf7},
+ {0x9b, 0x03},
+ {0x9c, 0x21},
+ {0x9d, 0x03},
+ {0x9e, 0x2e},
+ {0x9f, 0x03},
+ {0xa0, 0x3d},
+ {0xa2, 0x03},
+ {0xa3, 0x4c},
+ {0xa4, 0x03},
+ {0xa5, 0x5e},
+ {0xa6, 0x03},
+ {0xa7, 0x71},
+ {0xa9, 0x03},
+ {0xaa, 0x86},
+ {0xab, 0x03},
+ {0xac, 0x94},
+ {0xad, 0x03},
+ {0xae, 0xfa},
+ {0xaf, 0x00},
+ {0xb0, 0x00},
+ {0xb1, 0x00},
+ {0xb2, 0x21},
+ {0xb3, 0x00},
+ {0xb4, 0x4c},
+ {0xb5, 0x00},
+ {0xb6, 0x6b},
+ {0xb7, 0x00},
+ {0xb8, 0x85},
+ {0xb9, 0x00},
+ {0xba, 0x9a},
+ {0xbb, 0x00},
+ {0xbc, 0xad},
+ {0xbd, 0x00},
+ {0xbe, 0xbe},
+ {0xbf, 0x00},
+ {0xc0, 0xcd},
+ {0xc1, 0x01},
+ {0xc2, 0x01},
+ {0xc3, 0x01},
+ {0xc4, 0x29},
+ {0xc5, 0x01},
+ {0xc6, 0x68},
+ {0xc7, 0x01},
+ {0xc8, 0x98},
+ {0xc9, 0x01},
+ {0xca, 0xe5},
+ {0xcb, 0x02},
+ {0xcc, 0x1e},
+ {0xcd, 0x02},
+ {0xce, 0x20},
+ {0xcf, 0x02},
+ {0xd0, 0x52},
+ {0xd1, 0x02},
+ {0xd2, 0x88},
+ {0xd3, 0x02},
+ {0xd4, 0xaa},
+ {0xd5, 0x02},
+ {0xd6, 0xd7},
+ {0xd7, 0x02},
+ {0xd8, 0xf7},
+ {0xd9, 0x03},
+ {0xda, 0x21},
+ {0xdb, 0x03},
+ {0xdc, 0x2e},
+ {0xdd, 0x03},
+ {0xde, 0x3d},
+ {0xdf, 0x03},
+ {0xe0, 0x4c},
+ {0xe1, 0x03},
+ {0xe2, 0x5e},
+ {0xe3, 0x03},
+ {0xe4, 0x71},
+ {0xe5, 0x03},
+ {0xe6, 0x86},
+ {0xe7, 0x03},
+ {0xe8, 0x94},
+ {0xe9, 0x03},
+ {0xea, 0xfa},
+ /* Select CMD2 Page0 (Undocumented) */
+ {0xff, 0x01},
+ /* Reload CMD1: Don't reload default value to register */
+ {0xfb, 0x01},
+ /* Select CMD2 Page1 (Undocumented) */
+ {0xff, 0x02},
+ /* Reload CMD1: Don't reload default value to register */
+ {0xfb, 0x01},
+ /* Select CMD2 Page3 (Undocumented) */
+ {0xff, 0x04},
+ /* Reload CMD1: Don't reload default value to register */
+ {0xfb, 0x01},
+ /* Select CMD1 */
+ {0xff, 0x00},
+ {0xd3, 0x05}, /* RGBMIPICTRL: VSYNC back porch = 5 */
+ {0xd4, 0x04}, /* RGBMIPICTRL: VSYNC front porch = 4 */
+};
+
+static inline
+struct khadas_ts050_panel *to_khadas_ts050_panel(struct drm_panel *panel)
+{
+ return container_of(panel, struct khadas_ts050_panel, base);
+}
+
+static int khadas_ts050_panel_prepare(struct drm_panel *panel)
+{
+ struct khadas_ts050_panel *khadas_ts050 = to_khadas_ts050_panel(panel);
+ unsigned int i;
+ int err;
+
+ if (khadas_ts050->prepared)
+ return 0;
+
+ gpiod_set_value_cansleep(khadas_ts050->enable_gpio, 0);
+
+ err = regulator_enable(khadas_ts050->supply);
+ if (err < 0)
+ return err;
+
+ gpiod_set_value_cansleep(khadas_ts050->enable_gpio, 1);
+
+ msleep(60);
+
+ gpiod_set_value_cansleep(khadas_ts050->reset_gpio, 1);
+
+ usleep_range(10000, 11000);
+
+ gpiod_set_value_cansleep(khadas_ts050->reset_gpio, 0);
+
+ /* Select CMD2 page 4 (Undocumented) */
+ mipi_dsi_dcs_write(khadas_ts050->link, 0xff, (u8[]){ 0x05 }, 1);
+
+ /* Reload CMD1: Don't reload default value to register */
+ mipi_dsi_dcs_write(khadas_ts050->link, 0xfb, (u8[]){ 0x01 }, 1);
+
+ mipi_dsi_dcs_write(khadas_ts050->link, 0xc5, (u8[]){ 0x01 }, 1);
+
+ msleep(100);
+
+ for (i = 0; i < ARRAY_SIZE(init_code); i++) {
+ err = mipi_dsi_dcs_write(khadas_ts050->link,
+ init_code[i].cmd,
+ &init_code[i].data, 1);
+ if (err < 0) {
+ dev_err(panel->dev, "failed write cmds: %d\n", err);
+ goto poweroff;
+ }
+ }
+
+ err = mipi_dsi_dcs_exit_sleep_mode(khadas_ts050->link);
+ if (err < 0) {
+ dev_err(panel->dev, "failed to exit sleep mode: %d\n", err);
+ goto poweroff;
+ }
+
+ msleep(120);
+
+ /* Select CMD1 */
+ mipi_dsi_dcs_write(khadas_ts050->link, 0xff, (u8[]){ 0x00 }, 1);
+
+ err = mipi_dsi_dcs_set_tear_on(khadas_ts050->link,
+ MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+ if (err < 0) {
+ dev_err(panel->dev, "failed to set tear on: %d\n", err);
+ goto poweroff;
+ }
+
+ err = mipi_dsi_dcs_set_display_on(khadas_ts050->link);
+ if (err < 0) {
+ dev_err(panel->dev, "failed to set display on: %d\n", err);
+ goto poweroff;
+ }
+
+ usleep_range(10000, 11000);
+
+ khadas_ts050->prepared = true;
+
+ return 0;
+
+poweroff:
+ gpiod_set_value_cansleep(khadas_ts050->enable_gpio, 0);
+ gpiod_set_value_cansleep(khadas_ts050->reset_gpio, 1);
+
+ regulator_disable(khadas_ts050->supply);
+
+ return err;
+}
+
+static int khadas_ts050_panel_unprepare(struct drm_panel *panel)
+{
+ struct khadas_ts050_panel *khadas_ts050 = to_khadas_ts050_panel(panel);
+ int err;
+
+ if (!khadas_ts050->prepared)
+ return 0;
+
+ khadas_ts050->prepared = false;
+
+ err = mipi_dsi_dcs_enter_sleep_mode(khadas_ts050->link);
+ if (err < 0)
+ dev_err(panel->dev, "failed to enter sleep mode: %d\n", err);
+
+ msleep(150);
+
+ gpiod_set_value_cansleep(khadas_ts050->enable_gpio, 0);
+ gpiod_set_value_cansleep(khadas_ts050->reset_gpio, 1);
+
+ err = regulator_disable(khadas_ts050->supply);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int khadas_ts050_panel_enable(struct drm_panel *panel)
+{
+ struct khadas_ts050_panel *khadas_ts050 = to_khadas_ts050_panel(panel);
+
+ khadas_ts050->enabled = true;
+
+ return 0;
+}
+
+static int khadas_ts050_panel_disable(struct drm_panel *panel)
+{
+ struct khadas_ts050_panel *khadas_ts050 = to_khadas_ts050_panel(panel);
+ int err;
+
+ if (!khadas_ts050->enabled)
+ return 0;
+
+ err = mipi_dsi_dcs_set_display_off(khadas_ts050->link);
+ if (err < 0)
+ dev_err(panel->dev, "failed to set display off: %d\n", err);
+
+ usleep_range(10000, 11000);
+
+ khadas_ts050->enabled = false;
+
+ return 0;
+}
+
+static const struct drm_display_mode default_mode = {
+ .clock = 120000,
+ .hdisplay = 1088,
+ .hsync_start = 1088 + 104,
+ .hsync_end = 1088 + 104 + 4,
+ .htotal = 1088 + 104 + 4 + 127,
+ .vdisplay = 1920,
+ .vsync_start = 1920 + 4,
+ .vsync_end = 1920 + 4 + 2,
+ .vtotal = 1920 + 4 + 2 + 3,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+};
+
+static int khadas_ts050_panel_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &default_mode);
+ if (!mode) {
+ dev_err(panel->dev, "failed to add mode %ux%u@%u\n",
+ default_mode.hdisplay, default_mode.vdisplay,
+ drm_mode_vrefresh(&default_mode));
+ return -ENOMEM;
+ }
+
+ drm_mode_set_name(mode);
+
+ drm_mode_probed_add(connector, mode);
+
+ connector->display_info.width_mm = 64;
+ connector->display_info.height_mm = 118;
+ connector->display_info.bpc = 8;
+
+ return 1;
+}
+
+static const struct drm_panel_funcs khadas_ts050_panel_funcs = {
+ .prepare = khadas_ts050_panel_prepare,
+ .unprepare = khadas_ts050_panel_unprepare,
+ .enable = khadas_ts050_panel_enable,
+ .disable = khadas_ts050_panel_disable,
+ .get_modes = khadas_ts050_panel_get_modes,
+};
+
+static const struct of_device_id khadas_ts050_of_match[] = {
+ { .compatible = "khadas,ts050", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, khadas_ts050_of_match);
+
+static int khadas_ts050_panel_add(struct khadas_ts050_panel *khadas_ts050)
+{
+ struct device *dev = &khadas_ts050->link->dev;
+ int err;
+
+ khadas_ts050->supply = devm_regulator_get(dev, "power");
+ if (IS_ERR(khadas_ts050->supply))
+ return dev_err_probe(dev, PTR_ERR(khadas_ts050->supply),
+ "failed to get power supply");
+
+ khadas_ts050->reset_gpio = devm_gpiod_get(dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(khadas_ts050->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(khadas_ts050->reset_gpio),
+ "failed to get reset gpio");
+
+ khadas_ts050->enable_gpio = devm_gpiod_get(dev, "enable",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(khadas_ts050->enable_gpio))
+ return dev_err_probe(dev, PTR_ERR(khadas_ts050->enable_gpio),
+ "failed to get enable gpio");
+
+ drm_panel_init(&khadas_ts050->base, &khadas_ts050->link->dev,
+ &khadas_ts050_panel_funcs, DRM_MODE_CONNECTOR_DSI);
+
+ err = drm_panel_of_backlight(&khadas_ts050->base);
+ if (err)
+ return err;
+
+ drm_panel_add(&khadas_ts050->base);
+
+ return 0;
+}
+
+static int khadas_ts050_panel_probe(struct mipi_dsi_device *dsi)
+{
+ struct khadas_ts050_panel *khadas_ts050;
+ int err;
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_EOT_PACKET;
+
+ khadas_ts050 = devm_kzalloc(&dsi->dev, sizeof(*khadas_ts050),
+ GFP_KERNEL);
+ if (!khadas_ts050)
+ return -ENOMEM;
+
+ mipi_dsi_set_drvdata(dsi, khadas_ts050);
+ khadas_ts050->link = dsi;
+
+ err = khadas_ts050_panel_add(khadas_ts050);
+ if (err < 0)
+ return err;
+
+ err = mipi_dsi_attach(dsi);
+ if (err)
+ drm_panel_remove(&khadas_ts050->base);
+
+ return err;
+}
+
+static int khadas_ts050_panel_remove(struct mipi_dsi_device *dsi)
+{
+ struct khadas_ts050_panel *khadas_ts050 = mipi_dsi_get_drvdata(dsi);
+ int err;
+
+ err = mipi_dsi_detach(dsi);
+ if (err < 0)
+ dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", err);
+
+ drm_panel_remove(&khadas_ts050->base);
+ drm_panel_disable(&khadas_ts050->base);
+ drm_panel_unprepare(&khadas_ts050->base);
+
+ return 0;
+}
+
+static void khadas_ts050_panel_shutdown(struct mipi_dsi_device *dsi)
+{
+ struct khadas_ts050_panel *khadas_ts050 = mipi_dsi_get_drvdata(dsi);
+
+ drm_panel_disable(&khadas_ts050->base);
+ drm_panel_unprepare(&khadas_ts050->base);
+}
+
+static struct mipi_dsi_driver khadas_ts050_panel_driver = {
+ .driver = {
+ .name = "panel-khadas-ts050",
+ .of_match_table = khadas_ts050_of_match,
+ },
+ .probe = khadas_ts050_panel_probe,
+ .remove = khadas_ts050_panel_remove,
+ .shutdown = khadas_ts050_panel_shutdown,
+};
+module_mipi_dsi_driver(khadas_ts050_panel_driver);
+
+MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
+MODULE_DESCRIPTION("Khadas TS050 panel driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c b/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c
index 0c5f22e95c2d..30f28ad4df6b 100644
--- a/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c
+++ b/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c
@@ -9,6 +9,7 @@
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/regulator/consumer.h>
#include <video/mipi_display.h>
@@ -22,6 +23,7 @@
/* Manufacturer specific Commands send via DSI */
#define MANTIX_CMD_OTP_STOP_RELOAD_MIPI 0x41
#define MANTIX_CMD_INT_CANCEL 0x4C
+#define MANTIX_CMD_SPI_FINISH 0x90
struct mantix {
struct device *dev;
@@ -33,6 +35,8 @@ struct mantix {
struct regulator *avdd;
struct regulator *avee;
struct regulator *vddi;
+
+ const struct drm_display_mode *default_mode;
};
static inline struct mantix *panel_to_mantix(struct drm_panel *panel)
@@ -66,6 +70,10 @@ static int mantix_init_sequence(struct mantix *ctx)
dsi_generic_write_seq(dsi, 0x80, 0x64, 0x00, 0x64, 0x00, 0x00);
msleep(20);
+ dsi_generic_write_seq(dsi, MANTIX_CMD_SPI_FINISH, 0xA5);
+ dsi_generic_write_seq(dsi, MANTIX_CMD_OTP_STOP_RELOAD_MIPI, 0x00, 0x2F);
+ msleep(20);
+
dev_dbg(dev, "Panel init sequence done\n");
return 0;
}
@@ -182,7 +190,7 @@ static int mantix_prepare(struct drm_panel *panel)
return 0;
}
-static const struct drm_display_mode default_mode = {
+static const struct drm_display_mode default_mode_mantix = {
.hdisplay = 720,
.hsync_start = 720 + 45,
.hsync_end = 720 + 45 + 14,
@@ -197,17 +205,32 @@ static const struct drm_display_mode default_mode = {
.height_mm = 130,
};
+static const struct drm_display_mode default_mode_ys = {
+ .hdisplay = 720,
+ .hsync_start = 720 + 45,
+ .hsync_end = 720 + 45 + 14,
+ .htotal = 720 + 45 + 14 + 25,
+ .vdisplay = 1440,
+ .vsync_start = 1440 + 175,
+ .vsync_end = 1440 + 175 + 8,
+ .vtotal = 1440 + 175 + 8 + 50,
+ .clock = 85298,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+ .width_mm = 65,
+ .height_mm = 130,
+};
+
static int mantix_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
struct mantix *ctx = panel_to_mantix(panel);
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(connector->dev, &default_mode);
+ mode = drm_mode_duplicate(connector->dev, ctx->default_mode);
if (!mode) {
dev_err(ctx->dev, "Failed to add mode %ux%u@%u\n",
- default_mode.hdisplay, default_mode.vdisplay,
- drm_mode_vrefresh(&default_mode));
+ ctx->default_mode->hdisplay, ctx->default_mode->vdisplay,
+ drm_mode_vrefresh(ctx->default_mode));
return -ENOMEM;
}
@@ -238,6 +261,7 @@ static int mantix_probe(struct mipi_dsi_device *dsi)
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
+ ctx->default_mode = of_device_get_match_data(dev);
ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(ctx->reset_gpio)) {
@@ -288,8 +312,8 @@ static int mantix_probe(struct mipi_dsi_device *dsi)
}
dev_info(dev, "%ux%u@%u %ubpp dsi %udl - ready\n",
- default_mode.hdisplay, default_mode.vdisplay,
- drm_mode_vrefresh(&default_mode),
+ ctx->default_mode->hdisplay, ctx->default_mode->vdisplay,
+ drm_mode_vrefresh(ctx->default_mode),
mipi_dsi_pixel_format_to_bpp(dsi->format), dsi->lanes);
return 0;
@@ -316,7 +340,8 @@ static int mantix_remove(struct mipi_dsi_device *dsi)
}
static const struct of_device_id mantix_of_match[] = {
- { .compatible = "mantix,mlaf057we51-x" },
+ { .compatible = "mantix,mlaf057we51-x", .data = &default_mode_mantix },
+ { .compatible = "ys,ys57pss36bh5gq", .data = &default_mode_ys },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mantix_of_match);
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
index 6b4e97bfd46e..603c5dfe8768 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
@@ -25,6 +25,14 @@
/* Manufacturer Command Set */
#define MCS_ELVSS_ON 0xb1
#define MCS_TEMP_SWIRE 0xb2
+#define MCS_PENTILE_1 0xb3
+#define MCS_PENTILE_2 0xb4
+#define MCS_GAMMA_DELTA_Y_RED 0xb5
+#define MCS_GAMMA_DELTA_X_RED 0xb6
+#define MCS_GAMMA_DELTA_Y_GREEN 0xb7
+#define MCS_GAMMA_DELTA_X_GREEN 0xb8
+#define MCS_GAMMA_DELTA_Y_BLUE 0xb9
+#define MCS_GAMMA_DELTA_X_BLUE 0xba
#define MCS_MIECTL1 0xc0
#define MCS_BCMODE 0xc1
#define MCS_ERROR_CHECK 0xd5
@@ -281,6 +289,7 @@ struct s6e63m0 {
struct backlight_device *bl_dev;
u8 lcd_type;
u8 elvss_pulse;
+ bool dsi_mode;
struct regulator_bulk_data supplies[2];
struct gpio_desc *reset_gpio;
@@ -395,9 +404,21 @@ static int s6e63m0_check_lcd_type(struct s6e63m0 *ctx)
static void s6e63m0_init(struct s6e63m0 *ctx)
{
- s6e63m0_dcs_write_seq_static(ctx, MCS_PANELCTL,
- 0x01, 0x27, 0x27, 0x07, 0x07, 0x54, 0x9f,
- 0x63, 0x8f, 0x1a, 0x33, 0x0d, 0x00, 0x00);
+ /*
+ * We do not know why there is a difference in the DSI mode.
+ * (No datasheet.)
+ *
+ * In the vendor driver this sequence is called
+ * "SEQ_PANEL_CONDITION_SET" or "DCS_CMD_SEQ_PANEL_COND_SET".
+ */
+ if (ctx->dsi_mode)
+ s6e63m0_dcs_write_seq_static(ctx, MCS_PANELCTL,
+ 0x01, 0x2c, 0x2c, 0x07, 0x07, 0x5f, 0xb3,
+ 0x6d, 0x97, 0x1d, 0x3a, 0x0f, 0x00, 0x00);
+ else
+ s6e63m0_dcs_write_seq_static(ctx, MCS_PANELCTL,
+ 0x01, 0x27, 0x27, 0x07, 0x07, 0x54, 0x9f,
+ 0x63, 0x8f, 0x1a, 0x33, 0x0d, 0x00, 0x00);
s6e63m0_dcs_write_seq_static(ctx, MCS_DISCTL,
0x02, 0x03, 0x1c, 0x10, 0x10);
@@ -414,40 +435,40 @@ static void s6e63m0_init(struct s6e63m0 *ctx)
s6e63m0_dcs_write_seq_static(ctx, MCS_SRCCTL,
0x00, 0x8e, 0x07);
- s6e63m0_dcs_write_seq_static(ctx, 0xb3, 0x6c);
+ s6e63m0_dcs_write_seq_static(ctx, MCS_PENTILE_1, 0x6c);
- s6e63m0_dcs_write_seq_static(ctx, 0xb5,
+ s6e63m0_dcs_write_seq_static(ctx, MCS_GAMMA_DELTA_Y_RED,
0x2c, 0x12, 0x0c, 0x0a, 0x10, 0x0e, 0x17,
0x13, 0x1f, 0x1a, 0x2a, 0x24, 0x1f, 0x1b,
0x1a, 0x17, 0x2b, 0x26, 0x22, 0x20, 0x3a,
0x34, 0x30, 0x2c, 0x29, 0x26, 0x25, 0x23,
0x21, 0x20, 0x1e, 0x1e);
- s6e63m0_dcs_write_seq_static(ctx, 0xb6,
+ s6e63m0_dcs_write_seq_static(ctx, MCS_GAMMA_DELTA_X_RED,
0x00, 0x00, 0x11, 0x22, 0x33, 0x44, 0x44,
0x44, 0x55, 0x55, 0x66, 0x66, 0x66, 0x66,
0x66, 0x66);
- s6e63m0_dcs_write_seq_static(ctx, 0xb7,
+ s6e63m0_dcs_write_seq_static(ctx, MCS_GAMMA_DELTA_Y_GREEN,
0x2c, 0x12, 0x0c, 0x0a, 0x10, 0x0e, 0x17,
0x13, 0x1f, 0x1a, 0x2a, 0x24, 0x1f, 0x1b,
0x1a, 0x17, 0x2b, 0x26, 0x22, 0x20, 0x3a,
0x34, 0x30, 0x2c, 0x29, 0x26, 0x25, 0x23,
0x21, 0x20, 0x1e, 0x1e);
- s6e63m0_dcs_write_seq_static(ctx, 0xb8,
+ s6e63m0_dcs_write_seq_static(ctx, MCS_GAMMA_DELTA_X_GREEN,
0x00, 0x00, 0x11, 0x22, 0x33, 0x44, 0x44,
0x44, 0x55, 0x55, 0x66, 0x66, 0x66, 0x66,
0x66, 0x66);
- s6e63m0_dcs_write_seq_static(ctx, 0xb9,
+ s6e63m0_dcs_write_seq_static(ctx, MCS_GAMMA_DELTA_Y_BLUE,
0x2c, 0x12, 0x0c, 0x0a, 0x10, 0x0e, 0x17,
0x13, 0x1f, 0x1a, 0x2a, 0x24, 0x1f, 0x1b,
0x1a, 0x17, 0x2b, 0x26, 0x22, 0x20, 0x3a,
0x34, 0x30, 0x2c, 0x29, 0x26, 0x25, 0x23,
0x21, 0x20, 0x1e, 0x1e);
- s6e63m0_dcs_write_seq_static(ctx, 0xba,
+ s6e63m0_dcs_write_seq_static(ctx, MCS_GAMMA_DELTA_X_BLUE,
0x00, 0x00, 0x11, 0x22, 0x33, 0x44, 0x44,
0x44, 0x55, 0x55, 0x66, 0x66, 0x66, 0x66,
0x66, 0x66);
@@ -671,12 +692,12 @@ static const struct backlight_ops s6e63m0_backlight_ops = {
.update_status = s6e63m0_set_brightness,
};
-static int s6e63m0_backlight_register(struct s6e63m0 *ctx)
+static int s6e63m0_backlight_register(struct s6e63m0 *ctx, u32 max_brightness)
{
struct backlight_properties props = {
.type = BACKLIGHT_RAW,
- .brightness = MAX_BRIGHTNESS,
- .max_brightness = MAX_BRIGHTNESS
+ .brightness = max_brightness,
+ .max_brightness = max_brightness,
};
struct device *dev = ctx->dev;
int ret = 0;
@@ -698,12 +719,14 @@ int s6e63m0_probe(struct device *dev,
bool dsi_mode)
{
struct s6e63m0 *ctx;
+ u32 max_brightness;
int ret;
ctx = devm_kzalloc(dev, sizeof(struct s6e63m0), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
+ ctx->dsi_mode = dsi_mode;
ctx->dcs_read = dcs_read;
ctx->dcs_write = dcs_write;
dev_set_drvdata(dev, ctx);
@@ -712,6 +735,14 @@ int s6e63m0_probe(struct device *dev,
ctx->enabled = false;
ctx->prepared = false;
+ ret = device_property_read_u32(dev, "max-brightness", &max_brightness);
+ if (ret)
+ max_brightness = MAX_BRIGHTNESS;
+ if (max_brightness > MAX_BRIGHTNESS) {
+ dev_err(dev, "illegal max brightness specified\n");
+ max_brightness = MAX_BRIGHTNESS;
+ }
+
ctx->supplies[0].supply = "vdd3";
ctx->supplies[1].supply = "vci";
ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies),
@@ -731,7 +762,7 @@ int s6e63m0_probe(struct device *dev,
dsi_mode ? DRM_MODE_CONNECTOR_DSI :
DRM_MODE_CONNECTOR_DPI);
- ret = s6e63m0_backlight_register(ctx);
+ ret = s6e63m0_backlight_register(ctx, max_brightness);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 41bbec72b2da..4e2dad314c79 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -39,72 +39,145 @@
#include <drm/drm_panel.h>
/**
- * struct panel_desc
- * @modes: Pointer to array of fixed modes appropriate for this panel. If
- * only one mode then this can just be the address of this the mode.
- * NOTE: cannot be used with "timings" and also if this is specified
- * then you cannot override the mode in the device tree.
- * @num_modes: Number of elements in modes array.
- * @timings: Pointer to array of display timings. NOTE: cannot be used with
- * "modes" and also these will be used to validate a device tree
- * override if one is present.
- * @num_timings: Number of elements in timings array.
- * @bpc: Bits per color.
- * @size: Structure containing the physical size of this panel.
- * @delay: Structure containing various delay values for this panel.
- * @bus_format: See MEDIA_BUS_FMT_... defines.
- * @bus_flags: See DRM_BUS_FLAG_... defines.
- * @connector_type: LVDS, eDP, DSI, DPI, etc.
+ * struct panel_desc - Describes a simple panel.
*/
struct panel_desc {
+ /**
+ * @modes: Pointer to array of fixed modes appropriate for this panel.
+ *
+ * If only one mode then this can just be the address of the mode.
+ * NOTE: cannot be used with "timings" and also if this is specified
+ * then you cannot override the mode in the device tree.
+ */
const struct drm_display_mode *modes;
+
+ /** @num_modes: Number of elements in modes array. */
unsigned int num_modes;
+
+ /**
+ * @timings: Pointer to array of display timings
+ *
+ * NOTE: cannot be used with "modes" and also these will be used to
+ * validate a device tree override if one is present.
+ */
const struct display_timing *timings;
+
+ /** @num_timings: Number of elements in timings array. */
unsigned int num_timings;
+ /** @bpc: Bits per color. */
unsigned int bpc;
- /**
- * @width: width (in millimeters) of the panel's active display area
- * @height: height (in millimeters) of the panel's active display area
- */
+ /** @size: Structure containing the physical size of this panel. */
struct {
+ /**
+ * @size.width: Width (in mm) of the active display area.
+ */
unsigned int width;
+
+ /**
+ * @size.height: Height (in mm) of the active display area.
+ */
unsigned int height;
} size;
- /**
- * @prepare: the time (in milliseconds) that it takes for the panel to
- * become ready and start receiving video data
- * @hpd_absent_delay: Add this to the prepare delay if we know Hot
- * Plug Detect isn't used.
- * @enable: the time (in milliseconds) that it takes for the panel to
- * display the first valid frame after starting to receive
- * video data
- * @disable: the time (in milliseconds) that it takes for the panel to
- * turn the display off (no content is visible)
- * @unprepare: the time (in milliseconds) that it takes for the panel
- * to power itself down completely
- */
+ /** @delay: Structure containing various delay values for this panel. */
struct {
+ /**
+ * @delay.prepare: Time for the panel to become ready.
+ *
+ * The time (in milliseconds) that it takes for the panel to
+ * become ready and start receiving video data
+ */
unsigned int prepare;
+
+ /**
+ * @delay.hpd_absent_delay: Time to wait if HPD isn't hooked up.
+ *
+ * Add this to the prepare delay if we know Hot Plug Detect
+ * isn't used.
+ */
unsigned int hpd_absent_delay;
+
+ /**
+ * @delay.prepare_to_enable: Time between prepare and enable.
+ *
+ * The minimum time, in milliseconds, that needs to have passed
+ * between when prepare finished and enable may begin. If at
+ * enable time less time has passed since prepare finished,
+ * the driver waits for the remaining time.
+ *
+ * If a fixed enable delay is also specified, we'll start
+ * counting before delaying for the fixed delay.
+ *
+ * If a fixed prepare delay is also specified, we won't start
+ * counting until after the fixed delay. We can't overlap this
+ * fixed delay with the min time because the fixed delay
+ * doesn't happen at the end of the function if a HPD GPIO was
+ * specified.
+ *
+ * In other words:
+ * prepare()
+ * ...
+ * // do fixed prepare delay
+ * // wait for HPD GPIO if applicable
+ * // start counting for prepare_to_enable
+ *
+ * enable()
+ * // do fixed enable delay
+ * // enforce prepare_to_enable min time
+ */
+ unsigned int prepare_to_enable;
+
+ /**
+ * @delay.enable: Time for the panel to display a valid frame.
+ *
+ * The time (in milliseconds) that it takes for the panel to
+ * display the first valid frame after starting to receive
+ * video data.
+ */
unsigned int enable;
+
+ /**
+ * @delay.disable: Time for the panel to turn the display off.
+ *
+ * The time (in milliseconds) that it takes for the panel to
+ * turn the display off (no content is visible).
+ */
unsigned int disable;
+
+ /**
+ * @delay.unprepare: Time to power down completely.
+ *
+ * The time (in milliseconds) that it takes for the panel
+ * to power itself down completely.
+ *
+ * This time is used to prevent a future "prepare" from
+ * starting until at least this many milliseconds has passed.
+ * If at prepare time less time has passed since unprepare
+ * finished, the driver waits for the remaining time.
+ */
unsigned int unprepare;
} delay;
+ /** @bus_format: See MEDIA_BUS_FMT_... defines. */
u32 bus_format;
+
+ /** @bus_flags: See DRM_BUS_FLAG_... defines. */
u32 bus_flags;
+
+ /** @connector_type: LVDS, eDP, DSI, DPI, etc. */
int connector_type;
};
struct panel_simple {
struct drm_panel base;
- bool prepared;
bool enabled;
bool no_hpd;
+ ktime_t prepared_time;
+ ktime_t unprepared_time;
+
const struct panel_desc *desc;
struct regulator *supply;
@@ -232,6 +305,20 @@ static int panel_simple_get_non_edid_modes(struct panel_simple *panel,
return num;
}
+static void panel_simple_wait(ktime_t start_ktime, unsigned int min_ms)
+{
+ ktime_t now_ktime, min_ktime;
+
+ if (!min_ms)
+ return;
+
+ min_ktime = ktime_add(start_ktime, ms_to_ktime(min_ms));
+ now_ktime = ktime_get();
+
+ if (ktime_before(now_ktime, min_ktime))
+ msleep(ktime_to_ms(ktime_sub(min_ktime, now_ktime)) + 1);
+}
+
static int panel_simple_disable(struct drm_panel *panel)
{
struct panel_simple *p = to_panel_simple(panel);
@@ -251,17 +338,15 @@ static int panel_simple_unprepare(struct drm_panel *panel)
{
struct panel_simple *p = to_panel_simple(panel);
- if (!p->prepared)
+ if (p->prepared_time == 0)
return 0;
gpiod_set_value_cansleep(p->enable_gpio, 0);
regulator_disable(p->supply);
- if (p->desc->delay.unprepare)
- msleep(p->desc->delay.unprepare);
-
- p->prepared = false;
+ p->prepared_time = 0;
+ p->unprepared_time = ktime_get();
return 0;
}
@@ -298,9 +383,11 @@ static int panel_simple_prepare(struct drm_panel *panel)
int err;
int hpd_asserted;
- if (p->prepared)
+ if (p->prepared_time != 0)
return 0;
+ panel_simple_wait(p->unprepared_time, p->desc->delay.unprepare);
+
err = regulator_enable(p->supply);
if (err < 0) {
dev_err(panel->dev, "failed to enable supply: %d\n", err);
@@ -335,7 +422,7 @@ static int panel_simple_prepare(struct drm_panel *panel)
}
}
- p->prepared = true;
+ p->prepared_time = ktime_get();
return 0;
}
@@ -350,6 +437,8 @@ static int panel_simple_enable(struct drm_panel *panel)
if (p->desc->delay.enable)
msleep(p->desc->delay.enable);
+ panel_simple_wait(p->prepared_time, p->desc->delay.prepare_to_enable);
+
p->enabled = true;
return 0;
@@ -516,7 +605,7 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
return -ENOMEM;
panel->enabled = false;
- panel->prepared = false;
+ panel->prepared_time = 0;
panel->desc = desc;
panel->no_hpd = of_property_read_bool(dev->of_node, "no-hpd");
@@ -1318,6 +1407,51 @@ static const struct panel_desc boe_nv101wxmn51 = {
},
};
+static const struct drm_display_mode boe_nv110wtm_n61_modes[] = {
+ {
+ .clock = 207800,
+ .hdisplay = 2160,
+ .hsync_start = 2160 + 48,
+ .hsync_end = 2160 + 48 + 32,
+ .htotal = 2160 + 48 + 32 + 100,
+ .vdisplay = 1440,
+ .vsync_start = 1440 + 3,
+ .vsync_end = 1440 + 3 + 6,
+ .vtotal = 1440 + 3 + 6 + 31,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC,
+ },
+ {
+ .clock = 138500,
+ .hdisplay = 2160,
+ .hsync_start = 2160 + 48,
+ .hsync_end = 2160 + 48 + 32,
+ .htotal = 2160 + 48 + 32 + 100,
+ .vdisplay = 1440,
+ .vsync_start = 1440 + 3,
+ .vsync_end = 1440 + 3 + 6,
+ .vtotal = 1440 + 3 + 6 + 31,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC,
+ },
+};
+
+static const struct panel_desc boe_nv110wtm_n61 = {
+ .modes = boe_nv110wtm_n61_modes,
+ .num_modes = ARRAY_SIZE(boe_nv110wtm_n61_modes),
+ .bpc = 8,
+ .size = {
+ .width = 233,
+ .height = 155,
+ },
+ .delay = {
+ .hpd_absent_delay = 200,
+ .prepare_to_enable = 80,
+ .unprepare = 500,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_DATA_MSB_TO_LSB,
+ .connector_type = DRM_MODE_CONNECTOR_eDP,
+};
+
/* Also used for boe_nv133fhm_n62 */
static const struct drm_display_mode boe_nv133fhm_n61_modes = {
.clock = 147840,
@@ -2265,6 +2399,8 @@ static const struct panel_desc innolux_n116bge = {
.width = 256,
.height = 144,
},
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+ .connector_type = DRM_MODE_CONNECTOR_eDP,
};
static const struct drm_display_mode innolux_n125hce_gn1_mode = {
@@ -4034,6 +4170,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "boe,nv101wxmn51",
.data = &boe_nv101wxmn51,
}, {
+ .compatible = "boe,nv110wtm-n61",
+ .data = &boe_nv110wtm_n61,
+ }, {
.compatible = "boe,nv133fhm-n61",
.data = &boe_nv133fhm_n61,
}, {
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7703.c b/drivers/gpu/drm/panel/panel-sitronix-st7703.c
index b30510b1696a..a2c303e5732c 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7703.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7703.c
@@ -530,10 +530,8 @@ static int st7703_probe(struct mipi_dsi_device *dsi)
return -ENOMEM;
ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
- if (IS_ERR(ctx->reset_gpio)) {
- dev_err(dev, "cannot get reset gpio\n");
- return PTR_ERR(ctx->reset_gpio);
- }
+ if (IS_ERR(ctx->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio), "Failed to get reset gpio\n");
mipi_dsi_set_drvdata(dsi, ctx);
@@ -545,19 +543,13 @@ static int st7703_probe(struct mipi_dsi_device *dsi)
dsi->lanes = ctx->desc->lanes;
ctx->vcc = devm_regulator_get(dev, "vcc");
- if (IS_ERR(ctx->vcc)) {
- ret = PTR_ERR(ctx->vcc);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to request vcc regulator: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(ctx->vcc))
+ return dev_err_probe(dev, PTR_ERR(ctx->vcc), "Failed to request vcc regulator\n");
+
ctx->iovcc = devm_regulator_get(dev, "iovcc");
- if (IS_ERR(ctx->iovcc)) {
- ret = PTR_ERR(ctx->iovcc);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to request iovcc regulator: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(ctx->iovcc))
+ return dev_err_probe(dev, PTR_ERR(ctx->iovcc),
+ "Failed to request iovcc regulator\n");
drm_panel_init(&ctx->panel, dev, &st7703_drm_funcs,
DRM_MODE_CONNECTOR_DSI);
diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
index f44d28fad085..56b3f5935703 100644
--- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c
+++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
@@ -76,6 +76,7 @@ static int panfrost_devfreq_get_dev_status(struct device *dev,
}
static struct devfreq_dev_profile panfrost_devfreq_profile = {
+ .timer = DEVFREQ_TIMER_DELAYED,
.polling_ms = 50, /* ~3 frames */
.target = panfrost_devfreq_target,
.get_dev_status = panfrost_devfreq_get_dev_status,
diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c
index 40e6708fbbe2..fa0a737e9dea 100644
--- a/drivers/gpu/drm/pl111/pl111_drv.c
+++ b/drivers/gpu/drm/pl111/pl111_drv.c
@@ -228,7 +228,7 @@ static const struct drm_driver pl111_drm_driver = {
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import_sg_table = pl111_gem_import_sg_table,
- .gem_prime_mmap = drm_gem_cma_prime_mmap,
+ .gem_prime_mmap = drm_gem_prime_mmap,
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = pl111_debugfs_init,
@@ -320,7 +320,7 @@ dev_put:
return ret;
}
-static int pl111_amba_remove(struct amba_device *amba_dev)
+static void pl111_amba_remove(struct amba_device *amba_dev)
{
struct device *dev = &amba_dev->dev;
struct drm_device *drm = amba_get_drvdata(amba_dev);
@@ -331,8 +331,6 @@ static int pl111_amba_remove(struct amba_device *amba_dev)
drm_panel_bridge_remove(priv->bridge);
drm_dev_put(drm);
of_reserved_mem_device_release(dev);
-
- return 0;
}
/*
diff --git a/drivers/gpu/drm/qxl/qxl_dev.h b/drivers/gpu/drm/qxl/qxl_dev.h
index a7bc31f6d565..06caa61b5d66 100644
--- a/drivers/gpu/drm/qxl/qxl_dev.h
+++ b/drivers/gpu/drm/qxl/qxl_dev.h
@@ -271,7 +271,7 @@ struct qxl_mode {
/* qxl-1 compat: fixed */
struct qxl_modes {
uint32_t n_modes;
- struct qxl_mode modes[0];
+ struct qxl_mode modes[];
};
/* qxl-1 compat: append only */
@@ -382,12 +382,12 @@ struct qxl_data_chunk {
uint32_t data_size;
QXLPHYSICAL prev_chunk;
QXLPHYSICAL next_chunk;
- uint8_t data[0];
+ uint8_t data[];
};
struct qxl_message {
union qxl_release_info release_info;
- uint8_t data[0];
+ uint8_t data[];
};
struct qxl_compat_update_cmd {
@@ -469,7 +469,7 @@ struct qxl_raster_glyph {
struct qxl_point glyph_origin;
uint16_t width;
uint16_t height;
- uint8_t data[0];
+ uint8_t data[];
};
struct qxl_string {
@@ -768,7 +768,7 @@ enum {
struct qxl_path_seg {
uint32_t flags;
uint32_t count;
- struct qxl_point_fix points[0];
+ struct qxl_point_fix points[];
};
struct qxl_path {
@@ -819,7 +819,7 @@ struct qxl_image_descriptor {
struct qxl_palette {
uint64_t unique;
uint16_t num_ents;
- uint32_t ents[0];
+ uint32_t ents[];
};
struct qxl_bitmap {
@@ -838,7 +838,7 @@ struct qxl_surface_id {
struct qxl_encoder_data {
uint32_t data_size;
- uint8_t data[0];
+ uint8_t data[];
};
struct qxl_image {
@@ -868,7 +868,7 @@ struct qxl_monitors_config {
uint16_t count;
uint16_t max_allowed; /* If it is 0 no fixed limit is given by the
driver */
- struct qxl_head heads[0];
+ struct qxl_head heads[];
};
#pragma pack(pop)
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index 6e7f16f4cec7..1864467f1063 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -141,7 +141,7 @@ static void qxl_drm_release(struct drm_device *dev)
/*
* TODO: qxl_device_fini() call should be in qxl_pci_remove(),
- * reodering qxl_modeset_fini() + qxl_device_fini() calls is
+ * reordering qxl_modeset_fini() + qxl_device_fini() calls is
* non-trivial though.
*/
qxl_modeset_fini(qdev);
@@ -163,7 +163,7 @@ DEFINE_DRM_GEM_FOPS(qxl_fops);
static int qxl_drm_freeze(struct drm_device *dev)
{
- struct pci_dev *pdev = dev->pdev;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
struct qxl_device *qdev = to_qxl(dev);
int ret;
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 8bd0f916dfbc..83b54f0dad61 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -46,7 +46,6 @@
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_execbuf_util.h>
-#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_placement.h>
#include "qxl_dev.h"
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index 16e1e589508e..b6075f452b9e 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -370,13 +370,14 @@ static int qxl_clientcap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct qxl_device *qdev = to_qxl(dev);
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
struct drm_qxl_clientcap *param = data;
int byte, idx;
byte = param->index / 8;
idx = param->index % 8;
- if (dev->pdev->revision < 4)
+ if (pdev->revision < 4)
return -ENOSYS;
if (byte >= 58)
diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
index 1ba5a702d763..ddf6588a2a38 100644
--- a/drivers/gpu/drm/qxl/qxl_irq.c
+++ b/drivers/gpu/drm/qxl/qxl_irq.c
@@ -81,6 +81,7 @@ static void qxl_client_monitors_config_work_func(struct work_struct *work)
int qxl_irq_init(struct qxl_device *qdev)
{
+ struct pci_dev *pdev = to_pci_dev(qdev->ddev.dev);
int ret;
init_waitqueue_head(&qdev->display_event);
@@ -93,7 +94,7 @@ int qxl_irq_init(struct qxl_device *qdev)
atomic_set(&qdev->irq_received_cursor, 0);
atomic_set(&qdev->irq_received_io_cmd, 0);
qdev->irq_received_error = 0;
- ret = drm_irq_install(&qdev->ddev, qdev->ddev.pdev->irq);
+ ret = drm_irq_install(&qdev->ddev, pdev->irq);
qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
if (unlikely(ret != 0)) {
DRM_ERROR("Failed installing irq: %d\n", ret);
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
index 228e2b9198f1..4a60a52ab62e 100644
--- a/drivers/gpu/drm/qxl/qxl_kms.c
+++ b/drivers/gpu/drm/qxl/qxl_kms.c
@@ -111,7 +111,6 @@ int qxl_device_init(struct qxl_device *qdev,
{
int r, sb;
- qdev->ddev.pdev = pdev;
pci_set_drvdata(pdev, &qdev->ddev);
mutex_init(&qdev->gem.mutex);
diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h
index ebf24c9d2bf2..e60a8f88e226 100644
--- a/drivers/gpu/drm/qxl/qxl_object.h
+++ b/drivers/gpu/drm/qxl/qxl_object.h
@@ -50,7 +50,7 @@ static inline void qxl_bo_unreserve(struct qxl_bo *bo)
static inline unsigned long qxl_bo_size(struct qxl_bo *bo)
{
- return bo->tbo.num_pages << PAGE_SHIFT;
+ return bo->tbo.base.size;
}
static inline u64 qxl_bo_mmap_offset(struct qxl_bo *bo)
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index e75e364655b8..0fcfc952d5e9 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -456,7 +456,7 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
bo = entry->bo;
dma_resv_add_shared_fence(bo->base.resv, &release->base);
- ttm_bo_move_to_lru_tail(bo, NULL);
+ ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL);
dma_resv_unlock(bo->base.resv);
}
spin_unlock(&ttm_bo_glob.lru_lock);
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 7dd0c69baa47..33c09dc94f8b 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -31,7 +31,6 @@
#include <drm/qxl_drm.h>
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_placement.h>
#include "qxl_drv.h"
diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
index 6ac71755c22d..cdeb1db87222 100644
--- a/drivers/gpu/drm/r128/r128_ioc32.c
+++ b/drivers/gpu/drm/r128/r128_ioc32.c
@@ -1,4 +1,4 @@
-/**
+/*
* \file r128_ioc32.c
*
* 32-bit ioctl compatibility routines for the R128 DRM.
@@ -170,13 +170,13 @@ drm_ioctl_compat_t *r128_compat_ioctls[] = {
};
/**
- * Called whenever a 32-bit process running under a 64-bit kernel
- * performs an ioctl on /dev/dri/card<n>.
+ * r128_compat_ioctl - Called whenever a 32-bit process running under
+ * a 64-bit kernel performs an ioctl on /dev/dri/card<n>.
*
- * \param filp file pointer.
- * \param cmd command.
- * \param arg user argument.
- * \return zero on success or negative number on failure.
+ * @filp: file pointer.
+ * @cmd: command.
+ * @arg: user argument.
+ * return: zero on success or negative number on failure.
*/
long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 683de198e18d..0fce73b9a646 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -2062,9 +2062,9 @@ atombios_apply_encoder_quirks(struct drm_encoder *encoder,
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
/* Funky macbooks */
- if ((dev->pdev->device == 0x71C5) &&
- (dev->pdev->subsystem_vendor == 0x106b) &&
- (dev->pdev->subsystem_device == 0x0080)) {
+ if ((rdev->pdev->device == 0x71C5) &&
+ (rdev->pdev->subsystem_vendor == 0x106b) &&
+ (rdev->pdev->subsystem_device == 0x0080)) {
if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) {
uint32_t lvtma_bit_depth_control = RREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL);
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index aef4efc692b1..2955bb32d5ad 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -2612,7 +2612,6 @@ int r100_asic_reset(struct radeon_device *rdev, bool hard)
void r100_set_common_regs(struct radeon_device *rdev)
{
- struct drm_device *dev = rdev->ddev;
bool force_dac2 = false;
u32 tmp;
@@ -2630,7 +2629,7 @@ void r100_set_common_regs(struct radeon_device *rdev)
* don't report it in the bios connector
* table.
*/
- switch (dev->pdev->device) {
+ switch (rdev->pdev->device) {
/* RN50 */
case 0x515e:
case 0x5969:
@@ -2640,17 +2639,17 @@ void r100_set_common_regs(struct radeon_device *rdev)
case 0x5159:
case 0x515a:
/* DELL triple head servers */
- if ((dev->pdev->subsystem_vendor == 0x1028 /* DELL */) &&
- ((dev->pdev->subsystem_device == 0x016c) ||
- (dev->pdev->subsystem_device == 0x016d) ||
- (dev->pdev->subsystem_device == 0x016e) ||
- (dev->pdev->subsystem_device == 0x016f) ||
- (dev->pdev->subsystem_device == 0x0170) ||
- (dev->pdev->subsystem_device == 0x017d) ||
- (dev->pdev->subsystem_device == 0x017e) ||
- (dev->pdev->subsystem_device == 0x0183) ||
- (dev->pdev->subsystem_device == 0x018a) ||
- (dev->pdev->subsystem_device == 0x019a)))
+ if ((rdev->pdev->subsystem_vendor == 0x1028 /* DELL */) &&
+ ((rdev->pdev->subsystem_device == 0x016c) ||
+ (rdev->pdev->subsystem_device == 0x016d) ||
+ (rdev->pdev->subsystem_device == 0x016e) ||
+ (rdev->pdev->subsystem_device == 0x016f) ||
+ (rdev->pdev->subsystem_device == 0x0170) ||
+ (rdev->pdev->subsystem_device == 0x017d) ||
+ (rdev->pdev->subsystem_device == 0x017e) ||
+ (rdev->pdev->subsystem_device == 0x0183) ||
+ (rdev->pdev->subsystem_device == 0x018a) ||
+ (rdev->pdev->subsystem_device == 0x019a)))
force_dac2 = true;
break;
}
@@ -2798,7 +2797,7 @@ void r100_vram_init_sizes(struct radeon_device *rdev)
rdev->mc.real_vram_size = 8192 * 1024;
WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
}
- /* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM -
+ /* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM -
* Novell bug 204882 + along with lots of ubuntu ones
*/
if (rdev->mc.aper_size > config_aper_size)
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index dc68e538d5a9..34b7c6f16479 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -220,7 +220,7 @@ int r600_fmt_get_nblocksx(u32 format, u32 w)
if (bw == 0)
return 0;
- return (w + bw - 1) / bw;
+ return DIV_ROUND_UP(w, bw);
}
int r600_fmt_get_nblocksy(u32 format, u32 h)
@@ -234,7 +234,7 @@ int r600_fmt_get_nblocksy(u32 format, u32 h)
if (bh == 0)
return 0;
- return (h + bh - 1) / bh;
+ return DIV_ROUND_UP(h, bh);
}
struct array_mode_checker {
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 5f3adba43e47..f09989bdce98 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -75,7 +75,6 @@
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
-#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_execbuf_util.h>
#include <drm/drm_gem.h>
@@ -2314,6 +2313,9 @@ struct radeon_device {
struct device *dev;
struct drm_device *ddev;
struct pci_dev *pdev;
+#ifdef __alpha__
+ struct pci_controller *hose;
+#endif
struct rw_semaphore exclusive_lock;
/* ASIC */
union radeon_asic_config config;
@@ -2623,14 +2625,14 @@ void r100_pll_errata_after_index(struct radeon_device *rdev);
(rdev->family == CHIP_RV410) || \
(rdev->family == CHIP_RS400) || \
(rdev->family == CHIP_RS480))
-#define ASIC_IS_X2(rdev) ((rdev->ddev->pdev->device == 0x9441) || \
- (rdev->ddev->pdev->device == 0x9443) || \
- (rdev->ddev->pdev->device == 0x944B) || \
- (rdev->ddev->pdev->device == 0x9506) || \
- (rdev->ddev->pdev->device == 0x9509) || \
- (rdev->ddev->pdev->device == 0x950F) || \
- (rdev->ddev->pdev->device == 0x689C) || \
- (rdev->ddev->pdev->device == 0x689D))
+#define ASIC_IS_X2(rdev) ((rdev->pdev->device == 0x9441) || \
+ (rdev->pdev->device == 0x9443) || \
+ (rdev->pdev->device == 0x944B) || \
+ (rdev->pdev->device == 0x9506) || \
+ (rdev->pdev->device == 0x9509) || \
+ (rdev->pdev->device == 0x950F) || \
+ (rdev->pdev->device == 0x689C) || \
+ (rdev->pdev->device == 0x689D))
#define ASIC_IS_AVIVO(rdev) ((rdev->family >= CHIP_RS600))
#define ASIC_IS_DCE2(rdev) ((rdev->family == CHIP_RS600) || \
(rdev->family == CHIP_RS690) || \
@@ -2653,14 +2655,14 @@ void r100_pll_errata_after_index(struct radeon_device *rdev);
#define ASIC_IS_DCE83(rdev) ((rdev->family == CHIP_KABINI) || \
(rdev->family == CHIP_MULLINS))
-#define ASIC_IS_LOMBOK(rdev) ((rdev->ddev->pdev->device == 0x6849) || \
- (rdev->ddev->pdev->device == 0x6850) || \
- (rdev->ddev->pdev->device == 0x6858) || \
- (rdev->ddev->pdev->device == 0x6859) || \
- (rdev->ddev->pdev->device == 0x6840) || \
- (rdev->ddev->pdev->device == 0x6841) || \
- (rdev->ddev->pdev->device == 0x6842) || \
- (rdev->ddev->pdev->device == 0x6843))
+#define ASIC_IS_LOMBOK(rdev) ((rdev->pdev->device == 0x6849) || \
+ (rdev->pdev->device == 0x6850) || \
+ (rdev->pdev->device == 0x6858) || \
+ (rdev->pdev->device == 0x6859) || \
+ (rdev->pdev->device == 0x6840) || \
+ (rdev->pdev->device == 0x6841) || \
+ (rdev->pdev->device == 0x6842) || \
+ (rdev->pdev->device == 0x6843))
/*
* BIOS helpers.
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 8becbe09af2f..bfacf8fe5cc1 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -2478,6 +2478,9 @@ int radeon_asic_init(struct radeon_device *rdev)
if (rdev->family == CHIP_HAINAN) {
rdev->has_uvd = false;
rdev->has_vce = false;
+ } else if (rdev->family == CHIP_OLAND) {
+ rdev->has_uvd = true;
+ rdev->has_vce = false;
} else {
rdev->has_uvd = true;
rdev->has_vce = true;
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index be96d9b64e43..42301b4e56f5 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -284,46 +284,47 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
uint16_t *line_mux,
struct radeon_hpd *hpd)
{
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
/* Asus M2A-VM HDMI board lists the DVI port as HDMI */
- if ((dev->pdev->device == 0x791e) &&
- (dev->pdev->subsystem_vendor == 0x1043) &&
- (dev->pdev->subsystem_device == 0x826d)) {
+ if ((pdev->device == 0x791e) &&
+ (pdev->subsystem_vendor == 0x1043) &&
+ (pdev->subsystem_device == 0x826d)) {
if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) &&
(supported_device == ATOM_DEVICE_DFP3_SUPPORT))
*connector_type = DRM_MODE_CONNECTOR_DVID;
}
/* Asrock RS600 board lists the DVI port as HDMI */
- if ((dev->pdev->device == 0x7941) &&
- (dev->pdev->subsystem_vendor == 0x1849) &&
- (dev->pdev->subsystem_device == 0x7941)) {
+ if ((pdev->device == 0x7941) &&
+ (pdev->subsystem_vendor == 0x1849) &&
+ (pdev->subsystem_device == 0x7941)) {
if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) &&
(supported_device == ATOM_DEVICE_DFP3_SUPPORT))
*connector_type = DRM_MODE_CONNECTOR_DVID;
}
/* MSI K9A2GM V2/V3 board has no HDMI or DVI */
- if ((dev->pdev->device == 0x796e) &&
- (dev->pdev->subsystem_vendor == 0x1462) &&
- (dev->pdev->subsystem_device == 0x7302)) {
+ if ((pdev->device == 0x796e) &&
+ (pdev->subsystem_vendor == 0x1462) &&
+ (pdev->subsystem_device == 0x7302)) {
if ((supported_device == ATOM_DEVICE_DFP2_SUPPORT) ||
(supported_device == ATOM_DEVICE_DFP3_SUPPORT))
return false;
}
/* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */
- if ((dev->pdev->device == 0x7941) &&
- (dev->pdev->subsystem_vendor == 0x147b) &&
- (dev->pdev->subsystem_device == 0x2412)) {
+ if ((pdev->device == 0x7941) &&
+ (pdev->subsystem_vendor == 0x147b) &&
+ (pdev->subsystem_device == 0x2412)) {
if (*connector_type == DRM_MODE_CONNECTOR_DVII)
return false;
}
/* Falcon NW laptop lists vga ddc line for LVDS */
- if ((dev->pdev->device == 0x5653) &&
- (dev->pdev->subsystem_vendor == 0x1462) &&
- (dev->pdev->subsystem_device == 0x0291)) {
+ if ((pdev->device == 0x5653) &&
+ (pdev->subsystem_vendor == 0x1462) &&
+ (pdev->subsystem_device == 0x0291)) {
if (*connector_type == DRM_MODE_CONNECTOR_LVDS) {
i2c_bus->valid = false;
*line_mux = 53;
@@ -331,26 +332,26 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
}
/* HIS X1300 is DVI+VGA, not DVI+DVI */
- if ((dev->pdev->device == 0x7146) &&
- (dev->pdev->subsystem_vendor == 0x17af) &&
- (dev->pdev->subsystem_device == 0x2058)) {
+ if ((pdev->device == 0x7146) &&
+ (pdev->subsystem_vendor == 0x17af) &&
+ (pdev->subsystem_device == 0x2058)) {
if (supported_device == ATOM_DEVICE_DFP1_SUPPORT)
return false;
}
/* Gigabyte X1300 is DVI+VGA, not DVI+DVI */
- if ((dev->pdev->device == 0x7142) &&
- (dev->pdev->subsystem_vendor == 0x1458) &&
- (dev->pdev->subsystem_device == 0x2134)) {
+ if ((pdev->device == 0x7142) &&
+ (pdev->subsystem_vendor == 0x1458) &&
+ (pdev->subsystem_device == 0x2134)) {
if (supported_device == ATOM_DEVICE_DFP1_SUPPORT)
return false;
}
/* Funky macbooks */
- if ((dev->pdev->device == 0x71C5) &&
- (dev->pdev->subsystem_vendor == 0x106b) &&
- (dev->pdev->subsystem_device == 0x0080)) {
+ if ((pdev->device == 0x71C5) &&
+ (pdev->subsystem_vendor == 0x106b) &&
+ (pdev->subsystem_device == 0x0080)) {
if ((supported_device == ATOM_DEVICE_CRT1_SUPPORT) ||
(supported_device == ATOM_DEVICE_DFP2_SUPPORT))
return false;
@@ -366,27 +367,27 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
}
/* ASUS HD 3600 XT board lists the DVI port as HDMI */
- if ((dev->pdev->device == 0x9598) &&
- (dev->pdev->subsystem_vendor == 0x1043) &&
- (dev->pdev->subsystem_device == 0x01da)) {
+ if ((pdev->device == 0x9598) &&
+ (pdev->subsystem_vendor == 0x1043) &&
+ (pdev->subsystem_device == 0x01da)) {
if (*connector_type == DRM_MODE_CONNECTOR_HDMIA) {
*connector_type = DRM_MODE_CONNECTOR_DVII;
}
}
/* ASUS HD 3600 board lists the DVI port as HDMI */
- if ((dev->pdev->device == 0x9598) &&
- (dev->pdev->subsystem_vendor == 0x1043) &&
- (dev->pdev->subsystem_device == 0x01e4)) {
+ if ((pdev->device == 0x9598) &&
+ (pdev->subsystem_vendor == 0x1043) &&
+ (pdev->subsystem_device == 0x01e4)) {
if (*connector_type == DRM_MODE_CONNECTOR_HDMIA) {
*connector_type = DRM_MODE_CONNECTOR_DVII;
}
}
/* ASUS HD 3450 board lists the DVI port as HDMI */
- if ((dev->pdev->device == 0x95C5) &&
- (dev->pdev->subsystem_vendor == 0x1043) &&
- (dev->pdev->subsystem_device == 0x01e2)) {
+ if ((pdev->device == 0x95C5) &&
+ (pdev->subsystem_vendor == 0x1043) &&
+ (pdev->subsystem_device == 0x01e2)) {
if (*connector_type == DRM_MODE_CONNECTOR_HDMIA) {
*connector_type = DRM_MODE_CONNECTOR_DVII;
}
@@ -411,9 +412,9 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
* with different crtcs which isn't possible on the hardware
* side and leaves no crtcs for LVDS or VGA.
*/
- if (((dev->pdev->device == 0x95c4) || (dev->pdev->device == 0x9591)) &&
- (dev->pdev->subsystem_vendor == 0x1025) &&
- (dev->pdev->subsystem_device == 0x013c)) {
+ if (((pdev->device == 0x95c4) || (pdev->device == 0x9591)) &&
+ (pdev->subsystem_vendor == 0x1025) &&
+ (pdev->subsystem_device == 0x013c)) {
if ((*connector_type == DRM_MODE_CONNECTOR_DVII) &&
(supported_device == ATOM_DEVICE_DFP1_SUPPORT)) {
/* actually it's a DVI-D port not DVI-I */
@@ -425,9 +426,9 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
/* XFX Pine Group device rv730 reports no VGA DDC lines
* even though they are wired up to record 0x93
*/
- if ((dev->pdev->device == 0x9498) &&
- (dev->pdev->subsystem_vendor == 0x1682) &&
- (dev->pdev->subsystem_device == 0x2452) &&
+ if ((pdev->device == 0x9498) &&
+ (pdev->subsystem_vendor == 0x1682) &&
+ (pdev->subsystem_device == 0x2452) &&
(i2c_bus->valid == false) &&
!(supported_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))) {
struct radeon_device *rdev = dev->dev_private;
@@ -435,11 +436,11 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
}
/* Fujitsu D3003-S2 board lists DVI-I as DVI-D and VGA */
- if (((dev->pdev->device == 0x9802) ||
- (dev->pdev->device == 0x9805) ||
- (dev->pdev->device == 0x9806)) &&
- (dev->pdev->subsystem_vendor == 0x1734) &&
- (dev->pdev->subsystem_device == 0x11bd)) {
+ if (((pdev->device == 0x9802) ||
+ (pdev->device == 0x9805) ||
+ (pdev->device == 0x9806)) &&
+ (pdev->subsystem_vendor == 0x1734) &&
+ (pdev->subsystem_device == 0x11bd)) {
if (*connector_type == DRM_MODE_CONNECTOR_VGA) {
*connector_type = DRM_MODE_CONNECTOR_DVII;
*line_mux = 0x3103;
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index bb29cf02974d..33121655d50b 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -205,7 +205,7 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
continue;
status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
- if (!ACPI_FAILURE(status)) {
+ if (ACPI_SUCCESS(status)) {
found = true;
break;
}
@@ -218,7 +218,7 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
continue;
status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
- if (!ACPI_FAILURE(status)) {
+ if (ACPI_SUCCESS(status)) {
found = true;
break;
}
@@ -528,7 +528,7 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev)
crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
fp2_gen_cntl = 0;
- if (rdev->ddev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) {
+ if (rdev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) {
fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
}
@@ -565,7 +565,7 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev)
(RADEON_CRTC_SYNC_TRISTAT |
RADEON_CRTC_DISPLAY_DIS)));
- if (rdev->ddev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) {
+ if (rdev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) {
WREG32(RADEON_FP2_GEN_CNTL, (fp2_gen_cntl & ~RADEON_FP2_ON));
}
@@ -583,7 +583,7 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev)
WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
}
WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl);
- if (rdev->ddev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) {
+ if (rdev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) {
WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
}
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index ff2135059c07..783a6b8802d5 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -894,13 +894,13 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
/* quirks */
/* Radeon 7000 (RV100) */
- if (((dev->pdev->device == 0x5159) &&
- (dev->pdev->subsystem_vendor == 0x174B) &&
- (dev->pdev->subsystem_device == 0x7c28)) ||
+ if (((rdev->pdev->device == 0x5159) &&
+ (rdev->pdev->subsystem_vendor == 0x174B) &&
+ (rdev->pdev->subsystem_device == 0x7c28)) ||
/* Radeon 9100 (R200) */
- ((dev->pdev->device == 0x514D) &&
- (dev->pdev->subsystem_vendor == 0x174B) &&
- (dev->pdev->subsystem_device == 0x7149))) {
+ ((rdev->pdev->device == 0x514D) &&
+ (rdev->pdev->subsystem_vendor == 0x174B) &&
+ (rdev->pdev->subsystem_device == 0x7149))) {
/* vbios value is bad, use the default */
found = 0;
}
@@ -2221,20 +2221,21 @@ static bool radeon_apply_legacy_quirks(struct drm_device *dev,
struct radeon_i2c_bus_rec *ddc_i2c,
struct radeon_hpd *hpd)
{
+ struct radeon_device *rdev = dev->dev_private;
/* Certain IBM chipset RN50s have a BIOS reporting two VGAs,
one with VGA DDC and one with CRT2 DDC. - kill the CRT2 DDC one */
- if (dev->pdev->device == 0x515e &&
- dev->pdev->subsystem_vendor == 0x1014) {
+ if (rdev->pdev->device == 0x515e &&
+ rdev->pdev->subsystem_vendor == 0x1014) {
if (*legacy_connector == CONNECTOR_CRT_LEGACY &&
ddc_i2c->mask_clk_reg == RADEON_GPIO_CRT2_DDC)
return false;
}
/* X300 card with extra non-existent DVI port */
- if (dev->pdev->device == 0x5B60 &&
- dev->pdev->subsystem_vendor == 0x17af &&
- dev->pdev->subsystem_device == 0x201e && bios_index == 2) {
+ if (rdev->pdev->device == 0x5B60 &&
+ rdev->pdev->subsystem_vendor == 0x17af &&
+ rdev->pdev->subsystem_device == 0x201e && bios_index == 2) {
if (*legacy_connector == CONNECTOR_DVI_I_LEGACY)
return false;
}
@@ -2244,22 +2245,24 @@ static bool radeon_apply_legacy_quirks(struct drm_device *dev,
static bool radeon_apply_legacy_tv_quirks(struct drm_device *dev)
{
+ struct radeon_device *rdev = dev->dev_private;
+
/* Acer 5102 has non-existent TV port */
- if (dev->pdev->device == 0x5975 &&
- dev->pdev->subsystem_vendor == 0x1025 &&
- dev->pdev->subsystem_device == 0x009f)
+ if (rdev->pdev->device == 0x5975 &&
+ rdev->pdev->subsystem_vendor == 0x1025 &&
+ rdev->pdev->subsystem_device == 0x009f)
return false;
/* HP dc5750 has non-existent TV port */
- if (dev->pdev->device == 0x5974 &&
- dev->pdev->subsystem_vendor == 0x103c &&
- dev->pdev->subsystem_device == 0x280a)
+ if (rdev->pdev->device == 0x5974 &&
+ rdev->pdev->subsystem_vendor == 0x103c &&
+ rdev->pdev->subsystem_device == 0x280a)
return false;
/* MSI S270 has non-existent TV port */
- if (dev->pdev->device == 0x5955 &&
- dev->pdev->subsystem_vendor == 0x1462 &&
- dev->pdev->subsystem_device == 0x0131)
+ if (rdev->pdev->device == 0x5955 &&
+ rdev->pdev->subsystem_vendor == 0x1462 &&
+ rdev->pdev->subsystem_device == 0x0131)
return false;
return true;
@@ -2413,9 +2416,9 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
/* RV100 board with external TDMS bit mis-set.
* Actually uses internal TMDS, clear the bit.
*/
- if (dev->pdev->device == 0x5159 &&
- dev->pdev->subsystem_vendor == 0x1014 &&
- dev->pdev->subsystem_device == 0x029A) {
+ if (rdev->pdev->device == 0x5159 &&
+ rdev->pdev->subsystem_vendor == 0x1014 &&
+ rdev->pdev->subsystem_device == 0x029A) {
tmp &= ~(1 << 4);
}
if ((tmp >> 4) & 0x1) {
@@ -2707,9 +2710,9 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev)
/* boards with a thermal chip, but no overdrive table */
/* Asus 9600xt has an f75375 on the monid bus */
- if ((dev->pdev->device == 0x4152) &&
- (dev->pdev->subsystem_vendor == 0x1043) &&
- (dev->pdev->subsystem_device == 0xc002)) {
+ if ((rdev->pdev->device == 0x4152) &&
+ (rdev->pdev->subsystem_vendor == 0x1043) &&
+ (rdev->pdev->subsystem_device == 0xc002)) {
i2c_bus = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
if (rdev->pm.i2c_bus) {
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index c6262fce7440..35e937d39b51 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -130,8 +130,7 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
* IGP chips to avoid image corruptions
*/
if (p->ring == R600_RING_TYPE_UVD_INDEX &&
- (i <= 0 || pci_find_capability(p->rdev->ddev->pdev,
- PCI_CAP_ID_AGP) ||
+ (i <= 0 || pci_find_capability(p->rdev->pdev, PCI_CAP_ID_AGP) ||
p->rdev->family == CHIP_RS780 ||
p->rdev->family == CHIP_RS880)) {
@@ -401,7 +400,8 @@ static int cmp_size_smaller_first(void *priv, struct list_head *a,
struct radeon_bo_list *lb = list_entry(b, struct radeon_bo_list, tv.head);
/* Sort A before B if A is smaller. */
- return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
+ return (int)la->robj->tbo.mem.num_pages -
+ (int)lb->robj->tbo.mem.num_pages;
}
/**
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index ebccaa5b2d0e..2cbf14fc6ece 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1562,6 +1562,7 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
bool fbcon, bool freeze)
{
struct radeon_device *rdev;
+ struct pci_dev *pdev;
struct drm_crtc *crtc;
struct drm_connector *connector;
int i, r;
@@ -1571,6 +1572,7 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
}
rdev = dev->dev_private;
+ pdev = to_pci_dev(dev->dev);
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
@@ -1636,14 +1638,14 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
radeon_agp_suspend(rdev);
- pci_save_state(dev->pdev);
+ pci_save_state(pdev);
if (freeze && rdev->family >= CHIP_CEDAR && !(rdev->flags & RADEON_IS_IGP)) {
rdev->asic->asic_reset(rdev, true);
- pci_restore_state(dev->pdev);
+ pci_restore_state(pdev);
} else if (suspend) {
/* Shut down the device */
- pci_disable_device(dev->pdev);
- pci_set_power_state(dev->pdev, PCI_D3hot);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, PCI_D3hot);
}
if (fbcon) {
@@ -1665,6 +1667,7 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
{
struct drm_connector *connector;
struct radeon_device *rdev = dev->dev_private;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
struct drm_crtc *crtc;
int r;
@@ -1675,9 +1678,9 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
console_lock();
}
if (resume) {
- pci_set_power_state(dev->pdev, PCI_D0);
- pci_restore_state(dev->pdev);
- if (pci_enable_device(dev->pdev)) {
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ if (pci_enable_device(pdev)) {
if (fbcon)
console_unlock();
return -1;
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 3a6fedad002d..652af7a134bd 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -1317,7 +1317,7 @@ radeon_user_framebuffer_create(struct drm_device *dev,
obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
if (obj == NULL) {
- dev_err(&dev->pdev->dev, "No GEM object associated to handle 0x%08X, "
+ dev_err(dev->dev, "No GEM object associated to handle 0x%08X, "
"can't create framebuffer\n", mode_cmd->handles[0]);
return ERR_PTR(-ENOENT);
}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index e45d7344ac2b..efeb115ae70e 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -342,14 +342,9 @@ static int radeon_pci_probe(struct pci_dev *pdev,
if (ret)
goto err_free;
- dev->pdev = pdev;
-#ifdef __alpha__
- dev->hose = pdev->sysdata;
-#endif
-
pci_set_drvdata(pdev, dev);
- if (pci_find_capability(dev->pdev, PCI_CAP_ID_AGP))
+ if (pci_find_capability(pdev, PCI_CAP_ID_AGP))
dev->agp = drm_agp_init(dev);
if (dev->agp) {
dev->agp->agp_mtrr = arch_phys_wc_add(
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index fc4212633bdf..0b206b052972 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -290,7 +290,7 @@ static int radeonfb_create(struct drm_fb_helper *helper,
DRM_INFO("fb depth is %d\n", fb->format->depth);
DRM_INFO(" pitch is %d\n", fb->pitches[0]);
- vga_switcheroo_client_fb_set(rdev->ddev->pdev, info);
+ vga_switcheroo_client_fb_set(rdev->pdev, info);
return 0;
out:
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index b6b21d2e7262..941826923247 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -651,7 +651,7 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
}
if (args->offset < RADEON_VA_RESERVED_SIZE) {
- dev_err(&dev->pdev->dev,
+ dev_err(dev->dev,
"offset 0x%lX is in reserved area 0x%X\n",
(unsigned long)args->offset,
RADEON_VA_RESERVED_SIZE);
@@ -665,7 +665,7 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
*/
invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM;
if ((args->flags & invalid_flags)) {
- dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
+ dev_err(dev->dev, "invalid flags 0x%08X vs 0x%08X\n",
args->flags, invalid_flags);
args->operation = RADEON_VA_RESULT_ERROR;
return -EINVAL;
@@ -676,7 +676,7 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
case RADEON_VA_UNMAP:
break;
default:
- dev_err(&dev->pdev->dev, "unsupported operation %d\n",
+ dev_err(dev->dev, "unsupported operation %d\n",
args->operation);
args->operation = RADEON_VA_RESULT_ERROR;
return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index e543d993f73e..314d066e68e9 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -919,7 +919,7 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
i2c->rec = *rec;
i2c->adapter.owner = THIS_MODULE;
i2c->adapter.class = I2C_CLASS_DDC;
- i2c->adapter.dev.parent = &dev->pdev->dev;
+ i2c->adapter.dev.parent = dev->dev;
i2c->dev = dev;
i2c_set_adapdata(&i2c->adapter, i2c);
mutex_init(&i2c->mutex);
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index b8b7f627f0a9..84d0b1a3355f 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -314,7 +314,7 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
rdev->irq.installed = true;
- r = drm_irq_install(rdev->ddev, rdev->ddev->pdev->irq);
+ r = drm_irq_install(rdev->ddev, rdev->pdev->irq);
if (r) {
rdev->irq.installed = false;
flush_delayed_work(&rdev->hotplug_work);
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 50cee4880bb4..2479d6ab7a36 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -76,7 +76,7 @@ void radeon_driver_unload_kms(struct drm_device *dev)
}
radeon_acpi_fini(rdev);
-
+
radeon_modeset_fini(rdev);
radeon_device_fini(rdev);
@@ -105,6 +105,7 @@ done_free:
*/
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
{
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
struct radeon_device *rdev;
int r, acpi_status;
@@ -114,10 +115,14 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
}
dev->dev_private = (void *)rdev;
+#ifdef __alpha__
+ rdev->hose = pdev->sysdata;
+#endif
+
/* update BUS flag */
- if (pci_find_capability(dev->pdev, PCI_CAP_ID_AGP)) {
+ if (pci_find_capability(pdev, PCI_CAP_ID_AGP)) {
flags |= RADEON_IS_AGP;
- } else if (pci_is_pcie(dev->pdev)) {
+ } else if (pci_is_pcie(pdev)) {
flags |= RADEON_IS_PCIE;
} else {
flags |= RADEON_IS_PCI;
@@ -126,7 +131,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
if ((radeon_runtime_pm != 0) &&
radeon_has_atpx() &&
((flags & RADEON_IS_IGP) == 0) &&
- !pci_is_thunderbolt_attached(dev->pdev))
+ !pci_is_thunderbolt_attached(pdev))
flags |= RADEON_IS_PX;
/* radeon_device_init should report only fatal error
@@ -135,9 +140,9 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
* properly initialize the GPU MC controller and permit
* VRAM allocation
*/
- r = radeon_device_init(rdev, dev, dev->pdev, flags);
+ r = radeon_device_init(rdev, dev, pdev, flags);
if (r) {
- dev_err(&dev->pdev->dev, "Fatal error during GPU init\n");
+ dev_err(dev->dev, "Fatal error during GPU init\n");
goto out;
}
@@ -147,7 +152,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
*/
r = radeon_modeset_init(rdev);
if (r)
- dev_err(&dev->pdev->dev, "Fatal error during modeset init\n");
+ dev_err(dev->dev, "Fatal error during modeset init\n");
/* Call ACPI methods: require modeset init
* but failure is not fatal
@@ -155,8 +160,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
if (!r) {
acpi_status = radeon_acpi_init(rdev);
if (acpi_status)
- dev_dbg(&dev->pdev->dev,
- "Error during ACPI methods call\n");
+ dev_dbg(dev->dev, "Error during ACPI methods call\n");
}
if (radeon_is_px(dev)) {
@@ -239,7 +243,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
switch (info->request) {
case RADEON_INFO_DEVICE_ID:
- *value = dev->pdev->device;
+ *value = to_pci_dev(dev->dev)->device;
break;
case RADEON_INFO_NUM_GB_PIPES:
*value = rdev->num_gb_pipes;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index e64fd0ce6707..7fdb77d48d6a 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -974,9 +974,9 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder,
/* XXX: these are oem specific */
if (ASIC_IS_R300(rdev)) {
- if ((dev->pdev->device == 0x4850) &&
- (dev->pdev->subsystem_vendor == 0x1028) &&
- (dev->pdev->subsystem_device == 0x2001)) /* Dell Inspiron 8600 */
+ if ((rdev->pdev->device == 0x4850) &&
+ (rdev->pdev->subsystem_vendor == 0x1028) &&
+ (rdev->pdev->subsystem_device == 0x2001)) /* Dell Inspiron 8600 */
fp2_gen_cntl |= R300_FP2_DVO_CLOCK_MODE_SINGLE;
else
fp2_gen_cntl |= RADEON_FP2_PAD_FLOP_EN | R300_FP2_DVO_CLOCK_MODE_SINGLE;
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 8bc5ad1d6585..9b81786782de 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -53,20 +53,19 @@ static void radeon_update_memory_usage(struct radeon_bo *bo,
unsigned mem_type, int sign)
{
struct radeon_device *rdev = bo->rdev;
- u64 size = (u64)bo->tbo.num_pages << PAGE_SHIFT;
switch (mem_type) {
case TTM_PL_TT:
if (sign > 0)
- atomic64_add(size, &rdev->gtt_usage);
+ atomic64_add(bo->tbo.base.size, &rdev->gtt_usage);
else
- atomic64_sub(size, &rdev->gtt_usage);
+ atomic64_sub(bo->tbo.base.size, &rdev->gtt_usage);
break;
case TTM_PL_VRAM:
if (sign > 0)
- atomic64_add(size, &rdev->vram_usage);
+ atomic64_add(bo->tbo.base.size, &rdev->vram_usage);
else
- atomic64_sub(size, &rdev->vram_usage);
+ atomic64_sub(bo->tbo.base.size, &rdev->vram_usage);
break;
}
}
@@ -255,7 +254,7 @@ int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
}
return 0;
}
- r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
+ r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.mem.num_pages, &bo->kmap);
if (r) {
return r;
}
@@ -609,7 +608,7 @@ int radeon_bo_get_surface_reg(struct radeon_bo *bo)
out:
radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
bo->tbo.mem.start << PAGE_SHIFT,
- bo->tbo.num_pages << PAGE_SHIFT);
+ bo->tbo.base.size);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index d606e9a935e3..9896d8231fe5 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -109,12 +109,12 @@ static inline u64 radeon_bo_gpu_offset(struct radeon_bo *bo)
static inline unsigned long radeon_bo_size(struct radeon_bo *bo)
{
- return bo->tbo.num_pages << PAGE_SHIFT;
+ return bo->tbo.base.size;
}
static inline unsigned radeon_bo_ngpu_pages(struct radeon_bo *bo)
{
- return (bo->tbo.num_pages << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE;
+ return bo->tbo.base.size / RADEON_GPU_PAGE_SIZE;
}
static inline unsigned radeon_bo_gpu_page_alignment(struct radeon_bo *bo)
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
index dd482edc819c..ab29eb9e8667 100644
--- a/drivers/gpu/drm/radeon/radeon_prime.c
+++ b/drivers/gpu/drm/radeon/radeon_prime.c
@@ -35,9 +35,9 @@
struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
struct radeon_bo *bo = gem_to_radeon_bo(obj);
- int npages = bo->tbo.num_pages;
- return drm_prime_pages_to_sg(obj->dev, bo->tbo.ttm->pages, npages);
+ return drm_prime_pages_to_sg(obj->dev, bo->tbo.ttm->pages,
+ bo->tbo.ttm->num_pages);
}
struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h
index c93f3ab3c4e3..1729cb9a95c5 100644
--- a/drivers/gpu/drm/radeon/radeon_trace.h
+++ b/drivers/gpu/drm/radeon/radeon_trace.h
@@ -22,7 +22,7 @@ TRACE_EVENT(radeon_bo_create,
TP_fast_assign(
__entry->bo = bo;
- __entry->pages = bo->tbo.num_pages;
+ __entry->pages = bo->tbo.mem.num_pages;
),
TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages)
);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 23195d5d4e91..e8c66d10478f 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -46,7 +46,6 @@
#include <drm/radeon_drm.h>
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_placement.h>
#include "radeon_reg.h"
@@ -276,7 +275,7 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict,
out:
/* update statistics */
- atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &rdev->num_bytes_moved);
+ atomic64_add(bo->base.size, &rdev->num_bytes_moved);
radeon_bo_move_notify(bo, evict, new_mem);
return 0;
}
@@ -325,7 +324,7 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_reso
* access, as done in ttm_bo_vm_fault().
*/
mem->bus.offset = (mem->bus.offset & 0x0ffffffffUL) +
- rdev->ddev->hose->dense_mem_base;
+ rdev->hose->dense_mem_base;
#endif
break;
default:
@@ -396,8 +395,8 @@ static int radeon_ttm_tt_pin_userptr(struct ttm_bo_device *bdev, struct ttm_tt *
if (r)
goto release_sg;
- drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
- gtt->ttm.dma_address, ttm->num_pages);
+ drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
+ ttm->num_pages);
return 0;
@@ -535,7 +534,7 @@ static struct ttm_tt *radeon_ttm_tt_create(struct ttm_buffer_object *bo,
else
caching = ttm_cached;
- if (ttm_dma_tt_init(&gtt->ttm, bo, page_flags, caching)) {
+ if (ttm_sg_tt_init(&gtt->ttm, bo, page_flags, caching)) {
kfree(gtt);
return NULL;
}
@@ -573,8 +572,8 @@ static int radeon_ttm_tt_populate(struct ttm_bo_device *bdev,
}
if (slave && ttm->sg) {
- drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
- gtt->ttm.dma_address, ttm->num_pages);
+ drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
+ ttm->num_pages);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 39c1c339be7b..dfa9fdbe98da 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -781,7 +781,7 @@ int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
uint64_t offs = radeon_bo_size(rdev->uvd.vcpu_bo) -
RADEON_GPU_PAGE_SIZE;
- uint32_t *msg = rdev->uvd.cpu_addr + offs;
+ uint32_t __iomem *msg = (void __iomem *)(rdev->uvd.cpu_addr + offs);
uint64_t addr = rdev->uvd.gpu_addr + offs;
int r, i;
@@ -791,19 +791,19 @@ int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
return r;
/* stitch together an UVD create msg */
- msg[0] = cpu_to_le32(0x00000de4);
- msg[1] = cpu_to_le32(0x00000000);
- msg[2] = cpu_to_le32(handle);
- msg[3] = cpu_to_le32(0x00000000);
- msg[4] = cpu_to_le32(0x00000000);
- msg[5] = cpu_to_le32(0x00000000);
- msg[6] = cpu_to_le32(0x00000000);
- msg[7] = cpu_to_le32(0x00000780);
- msg[8] = cpu_to_le32(0x00000440);
- msg[9] = cpu_to_le32(0x00000000);
- msg[10] = cpu_to_le32(0x01b37000);
+ writel(cpu_to_le32(0x00000de4), &msg[0]);
+ writel(0x0, (void __iomem *)&msg[1]);
+ writel(cpu_to_le32(handle), &msg[2]);
+ writel(0x0, &msg[3]);
+ writel(0x0, &msg[4]);
+ writel(0x0, &msg[5]);
+ writel(0x0, &msg[6]);
+ writel(cpu_to_le32(0x00000780), &msg[7]);
+ writel(cpu_to_le32(0x00000440), &msg[8]);
+ writel(0x0, &msg[9]);
+ writel(cpu_to_le32(0x01b37000), &msg[10]);
for (i = 11; i < 1024; ++i)
- msg[i] = cpu_to_le32(0x0);
+ writel(0x0, &msg[i]);
r = radeon_uvd_send_msg(rdev, ring, addr, fence);
radeon_bo_unreserve(rdev->uvd.vcpu_bo);
@@ -817,7 +817,7 @@ int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
uint64_t offs = radeon_bo_size(rdev->uvd.vcpu_bo) -
RADEON_GPU_PAGE_SIZE;
- uint32_t *msg = rdev->uvd.cpu_addr + offs;
+ uint32_t __iomem *msg = (void __iomem *)(rdev->uvd.cpu_addr + offs);
uint64_t addr = rdev->uvd.gpu_addr + offs;
int r, i;
@@ -827,12 +827,12 @@ int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
return r;
/* stitch together an UVD destroy msg */
- msg[0] = cpu_to_le32(0x00000de4);
- msg[1] = cpu_to_le32(0x00000002);
- msg[2] = cpu_to_le32(handle);
- msg[3] = cpu_to_le32(0x00000000);
+ writel(cpu_to_le32(0x00000de4), &msg[0]);
+ writel(cpu_to_le32(0x00000002), &msg[1]);
+ writel(cpu_to_le32(handle), &msg[2]);
+ writel(0x0, &msg[3]);
for (i = 4; i < 1024; ++i)
- msg[i] = cpu_to_le32(0x0);
+ writel(0x0, &msg[i]);
r = radeon_uvd_send_msg(rdev, ring, addr, fence);
radeon_bo_unreserve(rdev->uvd.vcpu_bo);
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c
index a450497368b2..511a942e851d 100644
--- a/drivers/gpu/drm/radeon/radeon_vce.c
+++ b/drivers/gpu/drm/radeon/radeon_vce.c
@@ -68,7 +68,6 @@ int radeon_vce_init(struct radeon_device *rdev)
case CHIP_TAHITI:
case CHIP_PITCAIRN:
case CHIP_VERDE:
- case CHIP_OLAND:
case CHIP_ARUBA:
fw_name = FIRMWARE_TAHITI;
break;
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index c296f94f9700..7bc302a89232 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -187,7 +187,7 @@ static void rs690_mc_init(struct radeon_device *rdev)
/* FastFB shall be used with UMA memory. Here it is simply disabled when sideport
* memory is present.
*/
- if (rdev->mc.igp_sideport_enabled == false && radeon_fastfb == 1) {
+ if (!rdev->mc.igp_sideport_enabled && radeon_fastfb == 1) {
DRM_INFO("Direct mapping: aper base at 0x%llx, replaced by direct mapping base 0x%llx.\n",
(unsigned long long)rdev->mc.aper_base, k8_addr);
rdev->mc.aper_base = (resource_size_t)k8_addr;
diff --git a/drivers/gpu/drm/radeon/rs780_dpm.c b/drivers/gpu/drm/radeon/rs780_dpm.c
index 17390074277a..24ad12409120 100644
--- a/drivers/gpu/drm/radeon/rs780_dpm.c
+++ b/drivers/gpu/drm/radeon/rs780_dpm.c
@@ -223,16 +223,15 @@ static void rs780_preset_starting_fbdiv(struct radeon_device *rdev)
static void rs780_voltage_scaling_init(struct radeon_device *rdev)
{
struct igp_power_info *pi = rs780_get_pi(rdev);
- struct drm_device *dev = rdev->ddev;
u32 fv_throt_pwm_fb_div_range[3];
u32 fv_throt_pwm_range[4];
- if (dev->pdev->device == 0x9614) {
+ if (rdev->pdev->device == 0x9614) {
fv_throt_pwm_fb_div_range[0] = RS780D_FVTHROTPWMFBDIVRANGEREG0_DFLT;
fv_throt_pwm_fb_div_range[1] = RS780D_FVTHROTPWMFBDIVRANGEREG1_DFLT;
fv_throt_pwm_fb_div_range[2] = RS780D_FVTHROTPWMFBDIVRANGEREG2_DFLT;
- } else if ((dev->pdev->device == 0x9714) ||
- (dev->pdev->device == 0x9715)) {
+ } else if ((rdev->pdev->device == 0x9714) ||
+ (rdev->pdev->device == 0x9715)) {
fv_throt_pwm_fb_div_range[0] = RS880D_FVTHROTPWMFBDIVRANGEREG0_DFLT;
fv_throt_pwm_fb_div_range[1] = RS880D_FVTHROTPWMFBDIVRANGEREG1_DFLT;
fv_throt_pwm_fb_div_range[2] = RS880D_FVTHROTPWMFBDIVRANGEREG2_DFLT;
diff --git a/drivers/gpu/drm/radeon/vce_v1_0.c b/drivers/gpu/drm/radeon/vce_v1_0.c
index 70c5da2141d7..bdfbcf14b864 100644
--- a/drivers/gpu/drm/radeon/vce_v1_0.c
+++ b/drivers/gpu/drm/radeon/vce_v1_0.c
@@ -169,7 +169,6 @@ int vce_v1_0_load_fw(struct radeon_device *rdev, uint32_t *data)
chip_id = 0x01000015;
break;
case CHIP_PITCAIRN:
- case CHIP_OLAND:
chip_id = 0x01000016;
break;
case CHIP_ARUBA:
diff --git a/drivers/gpu/drm/rcar-du/rcar_cmm.c b/drivers/gpu/drm/rcar-du/rcar_cmm.c
index c578095b09a5..382d53f8a22e 100644
--- a/drivers/gpu/drm/rcar-du/rcar_cmm.c
+++ b/drivers/gpu/drm/rcar-du/rcar_cmm.c
@@ -122,7 +122,7 @@ int rcar_cmm_enable(struct platform_device *pdev)
{
int ret;
- ret = pm_runtime_get_sync(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index b5fb941e0f53..ea7e39d03545 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -730,13 +730,10 @@ static void rcar_du_crtc_atomic_enable(struct drm_crtc *crtc,
*/
if (rcdu->info->lvds_clk_mask & BIT(rcrtc->index) &&
rstate->outputs == BIT(RCAR_DU_OUTPUT_DPAD0)) {
- struct rcar_du_encoder *encoder =
- rcdu->encoders[RCAR_DU_OUTPUT_LVDS0 + rcrtc->index];
+ struct drm_bridge *bridge = rcdu->lvds[rcrtc->index];
const struct drm_display_mode *mode =
&crtc->state->adjusted_mode;
- struct drm_bridge *bridge;
- bridge = drm_bridge_chain_get_first_bridge(&encoder->base);
rcar_lvds_clk_enable(bridge, mode->clock * 1000);
}
@@ -764,15 +761,12 @@ static void rcar_du_crtc_atomic_disable(struct drm_crtc *crtc,
if (rcdu->info->lvds_clk_mask & BIT(rcrtc->index) &&
rstate->outputs == BIT(RCAR_DU_OUTPUT_DPAD0)) {
- struct rcar_du_encoder *encoder =
- rcdu->encoders[RCAR_DU_OUTPUT_LVDS0 + rcrtc->index];
- struct drm_bridge *bridge;
+ struct drm_bridge *bridge = rcdu->lvds[rcrtc->index];
/*
* Disable the LVDS clock output, see
* rcar_du_crtc_atomic_enable().
*/
- bridge = drm_bridge_chain_get_first_bridge(&encoder->base);
rcar_lvds_clk_disable(bridge);
}
@@ -1144,7 +1138,6 @@ static const struct drm_crtc_funcs crtc_funcs_gen3 = {
.set_crc_source = rcar_du_crtc_set_crc_source,
.verify_crc_source = rcar_du_crtc_verify_crc_source,
.get_crc_sources = rcar_du_crtc_get_crc_sources,
- .gamma_set = drm_atomic_helper_legacy_gamma_set,
};
/* -----------------------------------------------------------------------------
@@ -1257,7 +1250,7 @@ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int swindex,
else
primary = &rgrp->planes[swindex % 2].plane;
- ret = drm_crtc_init_with_planes(rcdu->ddev, crtc, primary, NULL,
+ ret = drm_crtc_init_with_planes(&rcdu->ddev, crtc, primary, NULL,
rcdu->info->gen <= 2 ?
&crtc_funcs_gen2 : &crtc_funcs_gen3,
NULL);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index 600056dff374..bfbff90588cb 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -18,10 +18,11 @@
#include <linux/wait.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_drv.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
#include "rcar_du_drv.h"
@@ -527,14 +528,14 @@ static int rcar_du_pm_suspend(struct device *dev)
{
struct rcar_du_device *rcdu = dev_get_drvdata(dev);
- return drm_mode_config_helper_suspend(rcdu->ddev);
+ return drm_mode_config_helper_suspend(&rcdu->ddev);
}
static int rcar_du_pm_resume(struct device *dev)
{
struct rcar_du_device *rcdu = dev_get_drvdata(dev);
- return drm_mode_config_helper_resume(rcdu->ddev);
+ return drm_mode_config_helper_resume(&rcdu->ddev);
}
#endif
@@ -549,7 +550,7 @@ static const struct dev_pm_ops rcar_du_pm_ops = {
static int rcar_du_remove(struct platform_device *pdev)
{
struct rcar_du_device *rcdu = platform_get_drvdata(pdev);
- struct drm_device *ddev = rcdu->ddev;
+ struct drm_device *ddev = &rcdu->ddev;
drm_dev_unregister(ddev);
@@ -563,14 +564,14 @@ static int rcar_du_remove(struct platform_device *pdev)
static int rcar_du_probe(struct platform_device *pdev)
{
struct rcar_du_device *rcdu;
- struct drm_device *ddev;
struct resource *mem;
int ret;
/* Allocate and initialize the R-Car device structure. */
- rcdu = devm_kzalloc(&pdev->dev, sizeof(*rcdu), GFP_KERNEL);
- if (rcdu == NULL)
- return -ENOMEM;
+ rcdu = devm_drm_dev_alloc(&pdev->dev, &rcar_du_driver,
+ struct rcar_du_device, ddev);
+ if (IS_ERR(rcdu))
+ return PTR_ERR(rcdu);
rcdu->dev = &pdev->dev;
rcdu->info = of_device_get_match_data(rcdu->dev);
@@ -584,13 +585,6 @@ static int rcar_du_probe(struct platform_device *pdev)
return PTR_ERR(rcdu->mmio);
/* DRM/KMS objects */
- ddev = drm_dev_alloc(&rcar_du_driver, &pdev->dev);
- if (IS_ERR(ddev))
- return PTR_ERR(ddev);
-
- rcdu->ddev = ddev;
- ddev->dev_private = rcdu;
-
ret = rcar_du_modeset_init(rcdu);
if (ret < 0) {
if (ret != -EPROBE_DEFER)
@@ -599,25 +593,24 @@ static int rcar_du_probe(struct platform_device *pdev)
goto error;
}
- ddev->irq_enabled = 1;
+ rcdu->ddev.irq_enabled = 1;
/*
* Register the DRM device with the core and the connectors with
* sysfs.
*/
- ret = drm_dev_register(ddev, 0);
+ ret = drm_dev_register(&rcdu->ddev, 0);
if (ret)
goto error;
DRM_INFO("Device %s probed\n", dev_name(&pdev->dev));
- drm_fbdev_generic_setup(ddev, 32);
+ drm_fbdev_generic_setup(&rcdu->ddev, 32);
return 0;
error:
- rcar_du_remove(pdev);
-
+ drm_kms_helper_poll_fini(&rcdu->ddev);
return ret;
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
index 61504c54e2ec..02ca2d0e1b55 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
@@ -13,6 +13,8 @@
#include <linux/kernel.h>
#include <linux/wait.h>
+#include <drm/drm_device.h>
+
#include "rcar_cmm.h"
#include "rcar_du_crtc.h"
#include "rcar_du_group.h"
@@ -20,10 +22,9 @@
struct clk;
struct device;
-struct drm_device;
+struct drm_bridge;
struct drm_property;
struct rcar_du_device;
-struct rcar_du_encoder;
#define RCAR_DU_FEATURE_CRTC_IRQ_CLOCK BIT(0) /* Per-CRTC IRQ and clock */
#define RCAR_DU_FEATURE_VSP1_SOURCE BIT(1) /* Has inputs from VSP1 */
@@ -71,6 +72,7 @@ struct rcar_du_device_info {
#define RCAR_DU_MAX_CRTCS 4
#define RCAR_DU_MAX_GROUPS DIV_ROUND_UP(RCAR_DU_MAX_CRTCS, 2)
#define RCAR_DU_MAX_VSPS 4
+#define RCAR_DU_MAX_LVDS 2
struct rcar_du_device {
struct device *dev;
@@ -78,16 +80,15 @@ struct rcar_du_device {
void __iomem *mmio;
- struct drm_device *ddev;
+ struct drm_device ddev;
struct rcar_du_crtc crtcs[RCAR_DU_MAX_CRTCS];
unsigned int num_crtcs;
- struct rcar_du_encoder *encoders[RCAR_DU_OUTPUT_MAX];
-
struct rcar_du_group groups[RCAR_DU_MAX_GROUPS];
struct platform_device *cmms[RCAR_DU_MAX_CRTCS];
struct rcar_du_vsp vsps[RCAR_DU_MAX_VSPS];
+ struct drm_bridge *lvds[RCAR_DU_MAX_LVDS];
struct {
struct drm_property *colorkey;
@@ -98,6 +99,11 @@ struct rcar_du_device {
unsigned int vspd1_sink;
};
+static inline struct rcar_du_device *to_rcar_du_device(struct drm_device *dev)
+{
+ return container_of(dev, struct rcar_du_device, ddev);
+}
+
static inline bool rcar_du_has(struct rcar_du_device *rcdu,
unsigned int feature)
{
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
index b0335da0c161..ba8c6038cd63 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
@@ -8,12 +8,13 @@
*/
#include <linux/export.h>
+#include <linux/slab.h>
#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_managed.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_panel.h>
-#include <drm/drm_simple_kms_helper.h>
#include "rcar_du_drv.h"
#include "rcar_du_encoder.h"
@@ -44,26 +45,25 @@ static unsigned int rcar_du_encoder_count_ports(struct device_node *node)
return num_ports;
}
+static const struct drm_encoder_funcs rcar_du_encoder_funcs = {
+};
+
+static void rcar_du_encoder_release(struct drm_device *dev, void *res)
+{
+ struct rcar_du_encoder *renc = res;
+
+ drm_encoder_cleanup(&renc->base);
+ kfree(renc);
+}
+
int rcar_du_encoder_init(struct rcar_du_device *rcdu,
enum rcar_du_output output,
struct device_node *enc_node)
{
struct rcar_du_encoder *renc;
- struct drm_encoder *encoder;
struct drm_bridge *bridge;
int ret;
- renc = devm_kzalloc(rcdu->dev, sizeof(*renc), GFP_KERNEL);
- if (renc == NULL)
- return -ENOMEM;
-
- rcdu->encoders[output] = renc;
- renc->output = output;
- encoder = rcar_encoder_to_drm_encoder(renc);
-
- dev_dbg(rcdu->dev, "initializing encoder %pOF for output %u\n",
- enc_node, output);
-
/*
* Locate the DRM bridge from the DT node. For the DPAD outputs, if the
* DT node has a single port, assume that it describes a panel and
@@ -74,57 +74,57 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
rcar_du_encoder_count_ports(enc_node) == 1) {
struct drm_panel *panel = of_drm_find_panel(enc_node);
- if (IS_ERR(panel)) {
- ret = PTR_ERR(panel);
- goto done;
- }
+ if (IS_ERR(panel))
+ return PTR_ERR(panel);
bridge = devm_drm_panel_bridge_add_typed(rcdu->dev, panel,
DRM_MODE_CONNECTOR_DPI);
- if (IS_ERR(bridge)) {
- ret = PTR_ERR(bridge);
- goto done;
- }
+ if (IS_ERR(bridge))
+ return PTR_ERR(bridge);
} else {
bridge = of_drm_find_bridge(enc_node);
- if (!bridge) {
- ret = -EPROBE_DEFER;
- goto done;
- }
+ if (!bridge)
+ return -EPROBE_DEFER;
+
+ if (output == RCAR_DU_OUTPUT_LVDS0 ||
+ output == RCAR_DU_OUTPUT_LVDS1)
+ rcdu->lvds[output - RCAR_DU_OUTPUT_LVDS0] = bridge;
}
/*
- * On Gen3 skip the LVDS1 output if the LVDS1 encoder is used as a
- * companion for LVDS0 in dual-link mode.
+ * Create and initialize the encoder. On Gen3 skip the LVDS1 output if
+ * the LVDS1 encoder is used as a companion for LVDS0 in dual-link
+ * mode.
*/
if (rcdu->info->gen >= 3 && output == RCAR_DU_OUTPUT_LVDS1) {
- if (rcar_lvds_dual_link(bridge)) {
- ret = -ENOLINK;
- goto done;
- }
+ if (rcar_lvds_dual_link(bridge))
+ return -ENOLINK;
}
- ret = drm_simple_encoder_init(rcdu->ddev, encoder,
- DRM_MODE_ENCODER_NONE);
- if (ret < 0)
- goto done;
+ renc = kzalloc(sizeof(*renc), GFP_KERNEL);
+ if (renc == NULL)
+ return -ENOMEM;
- /*
- * Attach the bridge to the encoder. The bridge will create the
- * connector.
- */
- ret = drm_bridge_attach(encoder, bridge, NULL, 0);
- if (ret) {
- drm_encoder_cleanup(encoder);
- return ret;
- }
+ renc->output = output;
+
+ dev_dbg(rcdu->dev, "initializing encoder %pOF for output %u\n",
+ enc_node, output);
-done:
+ ret = drm_encoder_init(&rcdu->ddev, &renc->base, &rcar_du_encoder_funcs,
+ DRM_MODE_ENCODER_NONE, NULL);
if (ret < 0) {
- if (encoder->name)
- encoder->funcs->destroy(encoder);
- devm_kfree(rcdu->dev, renc);
+ kfree(renc);
+ return ret;
}
- return ret;
+ ret = drmm_add_action_or_reset(&rcdu->ddev, rcar_du_encoder_release,
+ renc);
+ if (ret)
+ return ret;
+
+ /*
+ * Attach the bridge to the encoder. The bridge will create the
+ * connector.
+ */
+ return drm_bridge_attach(&renc->base, bridge, NULL, 0);
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.h b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h
index df9be4524301..73560563fb31 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h
@@ -22,8 +22,6 @@ struct rcar_du_encoder {
#define to_rcar_encoder(e) \
container_of(e, struct rcar_du_encoder, base)
-#define rcar_encoder_to_drm_encoder(e) (&(e)->base)
-
int rcar_du_encoder_init(struct rcar_du_device *rcdu,
enum rcar_du_output output,
struct device_node *enc_node);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index 72dda446355f..fdb8a0d127ad 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -14,6 +14,7 @@
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -327,7 +328,7 @@ const struct rcar_du_format_info *rcar_du_format_info(u32 fourcc)
int rcar_du_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
- struct rcar_du_device *rcdu = dev->dev_private;
+ struct rcar_du_device *rcdu = to_rcar_du_device(dev);
unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
unsigned int align;
@@ -349,7 +350,7 @@ static struct drm_framebuffer *
rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
- struct rcar_du_device *rcdu = dev->dev_private;
+ struct rcar_du_device *rcdu = to_rcar_du_device(dev);
const struct rcar_du_format_info *format;
unsigned int chroma_pitch;
unsigned int max_pitch;
@@ -421,7 +422,7 @@ rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
static int rcar_du_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state)
{
- struct rcar_du_device *rcdu = dev->dev_private;
+ struct rcar_du_device *rcdu = to_rcar_du_device(dev);
int ret;
ret = drm_atomic_helper_check(dev, state);
@@ -437,7 +438,7 @@ static int rcar_du_atomic_check(struct drm_device *dev,
static void rcar_du_atomic_commit_tail(struct drm_atomic_state *old_state)
{
struct drm_device *dev = old_state->dev;
- struct rcar_du_device *rcdu = dev->dev_private;
+ struct rcar_du_device *rcdu = to_rcar_du_device(dev);
struct drm_crtc_state *crtc_state;
struct drm_crtc *crtc;
unsigned int i;
@@ -583,7 +584,7 @@ static int rcar_du_properties_init(struct rcar_du_device *rcdu)
* or enable source color keying (1).
*/
rcdu->props.colorkey =
- drm_property_create_range(rcdu->ddev, 0, "colorkey",
+ drm_property_create_range(&rcdu->ddev, 0, "colorkey",
0, 0x01ffffff);
if (rcdu->props.colorkey == NULL)
return -ENOMEM;
@@ -700,10 +701,10 @@ static int rcar_du_cmm_init(struct rcar_du_device *rcdu)
int ret;
cmm = of_parse_phandle(np, "renesas,cmms", i);
- if (IS_ERR(cmm)) {
+ if (!cmm) {
dev_err(rcdu->dev,
"Failed to parse 'renesas,cmms' property\n");
- return PTR_ERR(cmm);
+ return -EINVAL;
}
if (!of_device_is_available(cmm)) {
@@ -713,10 +714,10 @@ static int rcar_du_cmm_init(struct rcar_du_device *rcdu)
}
pdev = of_find_device_by_node(cmm);
- if (IS_ERR(pdev)) {
+ if (!pdev) {
dev_err(rcdu->dev, "No device found for CMM%u\n", i);
of_node_put(cmm);
- return PTR_ERR(pdev);
+ return -EINVAL;
}
of_node_put(cmm);
@@ -726,8 +727,12 @@ static int rcar_du_cmm_init(struct rcar_du_device *rcdu)
* disabled: return 0 and let the DU continue probing.
*/
ret = rcar_cmm_init(pdev);
- if (ret)
+ if (ret) {
+ platform_device_put(pdev);
return ret == -ENODEV ? 0 : ret;
+ }
+
+ rcdu->cmms[i] = pdev;
/*
* Enforce suspend/resume ordering by making the CMM a provider
@@ -739,20 +744,27 @@ static int rcar_du_cmm_init(struct rcar_du_device *rcdu)
"Failed to create device link to CMM%u\n", i);
return -EINVAL;
}
-
- rcdu->cmms[i] = pdev;
}
return 0;
}
+static void rcar_du_modeset_cleanup(struct drm_device *dev, void *res)
+{
+ struct rcar_du_device *rcdu = to_rcar_du_device(dev);
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(rcdu->cmms); ++i)
+ platform_device_put(rcdu->cmms[i]);
+}
+
int rcar_du_modeset_init(struct rcar_du_device *rcdu)
{
static const unsigned int mmio_offsets[] = {
DU0_REG_OFFSET, DU2_REG_OFFSET
};
- struct drm_device *dev = rcdu->ddev;
+ struct drm_device *dev = &rcdu->ddev;
struct drm_encoder *encoder;
unsigned int dpad0_sources;
unsigned int num_encoders;
@@ -766,6 +778,10 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
if (ret)
return ret;
+ ret = drmm_add_action(&rcdu->ddev, rcar_du_modeset_cleanup, NULL);
+ if (ret)
+ return ret;
+
dev->mode_config.min_width = 0;
dev->mode_config.min_height = 0;
dev->mode_config.normalize_zpos = true;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
index a0021fc25b27..02e5f11f38eb 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
@@ -128,7 +128,7 @@ static int rcar_du_plane_hwalloc(struct rcar_du_plane *plane,
int rcar_du_atomic_check_planes(struct drm_device *dev,
struct drm_atomic_state *state)
{
- struct rcar_du_device *rcdu = dev->dev_private;
+ struct rcar_du_device *rcdu = to_rcar_du_device(dev);
unsigned int group_freed_planes[RCAR_DU_MAX_GROUPS] = { 0, };
unsigned int group_free_planes[RCAR_DU_MAX_GROUPS] = { 0, };
bool needs_realloc = false;
@@ -773,9 +773,9 @@ int rcar_du_planes_init(struct rcar_du_group *rgrp)
plane->group = rgrp;
- ret = drm_universal_plane_init(rcdu->ddev, &plane->plane, crtcs,
- &rcar_du_plane_funcs, formats,
- ARRAY_SIZE(formats),
+ ret = drm_universal_plane_init(&rcdu->ddev, &plane->plane,
+ crtcs, &rcar_du_plane_funcs,
+ formats, ARRAY_SIZE(formats),
NULL, type, NULL);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
index f6a69aa116e6..53221d8473c1 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
@@ -21,6 +21,7 @@
#include <linux/dma-mapping.h>
#include <linux/of_platform.h>
#include <linux/scatterlist.h>
+#include <linux/slab.h>
#include <linux/videodev2.h>
#include <media/vsp1.h>
@@ -344,6 +345,15 @@ static const struct drm_plane_funcs rcar_du_vsp_plane_funcs = {
static void rcar_du_vsp_cleanup(struct drm_device *dev, void *res)
{
struct rcar_du_vsp *vsp = res;
+ unsigned int i;
+
+ for (i = 0; i < vsp->num_planes; ++i) {
+ struct rcar_du_vsp_plane *plane = &vsp->planes[i];
+
+ drm_plane_cleanup(&plane->plane);
+ }
+
+ kfree(vsp->planes);
put_device(vsp->vsp);
}
@@ -354,6 +364,7 @@ int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np,
struct rcar_du_device *rcdu = vsp->dev;
struct platform_device *pdev;
unsigned int num_crtcs = hweight32(crtcs);
+ unsigned int num_planes;
unsigned int i;
int ret;
@@ -364,7 +375,7 @@ int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np,
vsp->vsp = &pdev->dev;
- ret = drmm_add_action(rcdu->ddev, rcar_du_vsp_cleanup, vsp);
+ ret = drmm_add_action_or_reset(&rcdu->ddev, rcar_du_vsp_cleanup, vsp);
if (ret < 0)
return ret;
@@ -376,14 +387,13 @@ int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np,
* The VSP2D (Gen3) has 5 RPFs, but the VSP1D (Gen2) is limited to
* 4 RPFs.
*/
- vsp->num_planes = rcdu->info->gen >= 3 ? 5 : 4;
+ num_planes = rcdu->info->gen >= 3 ? 5 : 4;
- vsp->planes = devm_kcalloc(rcdu->dev, vsp->num_planes,
- sizeof(*vsp->planes), GFP_KERNEL);
+ vsp->planes = kcalloc(num_planes, sizeof(*vsp->planes), GFP_KERNEL);
if (!vsp->planes)
return -ENOMEM;
- for (i = 0; i < vsp->num_planes; ++i) {
+ for (i = 0; i < num_planes; ++i) {
enum drm_plane_type type = i < num_crtcs
? DRM_PLANE_TYPE_PRIMARY
: DRM_PLANE_TYPE_OVERLAY;
@@ -392,8 +402,8 @@ int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np,
plane->vsp = vsp;
plane->index = i;
- ret = drm_universal_plane_init(rcdu->ddev, &plane->plane, crtcs,
- &rcar_du_vsp_plane_funcs,
+ ret = drm_universal_plane_init(&rcdu->ddev, &plane->plane,
+ crtcs, &rcar_du_vsp_plane_funcs,
rcar_du_vsp_formats,
ARRAY_SIZE(rcar_du_vsp_formats),
NULL, type, NULL);
@@ -409,8 +419,10 @@ int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np,
} else {
drm_plane_create_alpha_property(&plane->plane);
drm_plane_create_zpos_property(&plane->plane, 1, 1,
- vsp->num_planes - 1);
+ num_planes - 1);
}
+
+ vsp->num_planes++;
}
return 0;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_writeback.c b/drivers/gpu/drm/rcar-du/rcar_du_writeback.c
index 04efa78d70b6..c79d1259e49b 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_writeback.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_writeback.c
@@ -204,7 +204,7 @@ int rcar_du_writeback_init(struct rcar_du_device *rcdu,
drm_connector_helper_add(&wb_conn->base,
&rcar_du_wb_conn_helper_funcs);
- return drm_writeback_connector_init(rcdu->ddev, wb_conn,
+ return drm_writeback_connector_init(&rcdu->ddev, wb_conn,
&rcar_du_wb_conn_funcs,
&rcar_du_wb_enc_helper_funcs,
writeback_formats,
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig
index 310aa1546893..cb25c0e8fc9b 100644
--- a/drivers/gpu/drm/rockchip/Kconfig
+++ b/drivers/gpu/drm/rockchip/Kconfig
@@ -49,7 +49,7 @@ config ROCKCHIP_DW_MIPI_DSI
select GENERIC_PHY_MIPI_DPHY
help
This selects support for Rockchip SoC specific extensions
- for the Synopsys DesignWare HDMI driver. If you want to
+ for the Synopsys DesignWare dsi driver. If you want to
enable MIPI DSI on RK3288 or RK3399 based SoC, you should
select this option.
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
index e84325e56d98..24a71091759c 100644
--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
@@ -1089,7 +1089,7 @@ static int dw_mipi_dsi_rockchip_probe(struct platform_device *pdev)
dsi->grf_regmap = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
if (IS_ERR(dsi->grf_regmap)) {
- DRM_DEV_ERROR(dsi->dev, "Unable to get rockchip,grf\n");
+ DRM_DEV_ERROR(dev, "Unable to get rockchip,grf\n");
return PTR_ERR(dsi->grf_regmap);
}
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
index 23de359a1dec..830bdd5e9b7c 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
@@ -202,7 +202,7 @@ static int rockchip_hdmi_parse_dt(struct rockchip_hdmi *hdmi)
} else if (PTR_ERR(hdmi->vpll_clk) == -EPROBE_DEFER) {
return -EPROBE_DEFER;
} else if (IS_ERR(hdmi->vpll_clk)) {
- DRM_DEV_ERROR(hdmi->dev, "failed to get grf clock\n");
+ DRM_DEV_ERROR(hdmi->dev, "failed to get vpll clock\n");
return PTR_ERR(hdmi->vpll_clk);
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index d1e05482641b..8d15cabdcb02 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -1643,7 +1643,6 @@ static const struct drm_crtc_funcs vop_crtc_funcs = {
.disable_vblank = vop_crtc_disable_vblank,
.set_crc_source = vop_crtc_set_crc_source,
.verify_crc_source = vop_crtc_verify_crc_source,
- .gamma_set = drm_atomic_helper_legacy_gamma_set,
};
static void vop_fb_unref_worker(struct drm_flip_work *work, void *val)
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
index 4a2099cb582e..857d97cdc67c 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
@@ -17,9 +17,20 @@
#define NUM_YUV2YUV_COEFFICIENTS 12
+/* AFBC supports a number of configurable modes. Relevant to us is block size
+ * (16x16 or 32x8), storage modifiers (SPARSE, SPLIT), and the YUV-like
+ * colourspace transform (YTR). 16x16 SPARSE mode is always used. SPLIT mode
+ * could be enabled via the hreg_block_split register, but is not currently
+ * handled. The colourspace transform is implicitly always assumed by the
+ * decoder, so consumers must use this transform as well.
+ *
+ * Failure to match modifiers will cause errors displaying AFBC buffers
+ * produced by conformant AFBC producers, including Mesa.
+ */
#define ROCKCHIP_AFBC_MOD \
DRM_FORMAT_MOD_ARM_AFBC( \
AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 | AFBC_FORMAT_MOD_SPARSE \
+ | AFBC_FORMAT_MOD_YTR \
)
enum vop_data_format {
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index b498d474ef9e..92637b70c9bf 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -60,8 +60,6 @@
#define to_drm_sched_job(sched_job) \
container_of((sched_job), struct drm_sched_job, queue_node)
-static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
-
/**
* drm_sched_rq_init - initialize a given run queue struct
*
@@ -164,6 +162,40 @@ drm_sched_rq_select_entity(struct drm_sched_rq *rq)
}
/**
+ * drm_sched_job_done - complete a job
+ * @s_job: pointer to the job which is done
+ *
+ * Finish the job's fence and wake up the worker thread.
+ */
+static void drm_sched_job_done(struct drm_sched_job *s_job)
+{
+ struct drm_sched_fence *s_fence = s_job->s_fence;
+ struct drm_gpu_scheduler *sched = s_fence->sched;
+
+ atomic_dec(&sched->hw_rq_count);
+ atomic_dec(&sched->score);
+
+ trace_drm_sched_process_job(s_fence);
+
+ dma_fence_get(&s_fence->finished);
+ drm_sched_fence_finished(s_fence);
+ dma_fence_put(&s_fence->finished);
+ wake_up_interruptible(&sched->wake_up_worker);
+}
+
+/**
+ * drm_sched_job_done_cb - the callback for a done job
+ * @f: fence
+ * @cb: fence callbacks
+ */
+static void drm_sched_job_done_cb(struct dma_fence *f, struct dma_fence_cb *cb)
+{
+ struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb);
+
+ drm_sched_job_done(s_job);
+}
+
+/**
* drm_sched_dependency_optimized
*
* @fence: the dependency fence
@@ -199,7 +231,7 @@ EXPORT_SYMBOL(drm_sched_dependency_optimized);
static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
{
if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
- !list_empty(&sched->ring_mirror_list))
+ !list_empty(&sched->pending_list))
schedule_delayed_work(&sched->work_tdr, sched->timeout);
}
@@ -259,7 +291,7 @@ void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
{
spin_lock(&sched->job_list_lock);
- if (list_empty(&sched->ring_mirror_list))
+ if (list_empty(&sched->pending_list))
cancel_delayed_work(&sched->work_tdr);
else
mod_delayed_work(system_wq, &sched->work_tdr, remaining);
@@ -273,7 +305,7 @@ static void drm_sched_job_begin(struct drm_sched_job *s_job)
struct drm_gpu_scheduler *sched = s_job->sched;
spin_lock(&sched->job_list_lock);
- list_add_tail(&s_job->node, &sched->ring_mirror_list);
+ list_add_tail(&s_job->list, &sched->pending_list);
drm_sched_start_timeout(sched);
spin_unlock(&sched->job_list_lock);
}
@@ -287,8 +319,8 @@ static void drm_sched_job_timedout(struct work_struct *work)
/* Protects against concurrent deletion in drm_sched_get_cleanup_job */
spin_lock(&sched->job_list_lock);
- job = list_first_entry_or_null(&sched->ring_mirror_list,
- struct drm_sched_job, node);
+ job = list_first_entry_or_null(&sched->pending_list,
+ struct drm_sched_job, list);
if (job) {
/*
@@ -296,7 +328,7 @@ static void drm_sched_job_timedout(struct work_struct *work)
* drm_sched_cleanup_jobs. It will be reinserted back after sched->thread
* is parked at which point it's safe.
*/
- list_del_init(&job->node);
+ list_del_init(&job->list);
spin_unlock(&sched->job_list_lock);
job->sched->ops->timedout_job(job);
@@ -372,7 +404,7 @@ EXPORT_SYMBOL(drm_sched_increase_karma);
* Stop the scheduler and also removes and frees all completed jobs.
* Note: bad job will not be freed as it might be used later and so it's
* callers responsibility to release it manually if it's not part of the
- * mirror list any more.
+ * pending list any more.
*
*/
void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
@@ -393,26 +425,27 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
* Add at the head of the queue to reflect it was the earliest
* job extracted.
*/
- list_add(&bad->node, &sched->ring_mirror_list);
+ list_add(&bad->list, &sched->pending_list);
/*
* Iterate the job list from later to earlier one and either deactive
- * their HW callbacks or remove them from mirror list if they already
+ * their HW callbacks or remove them from pending list if they already
* signaled.
* This iteration is thread safe as sched thread is stopped.
*/
- list_for_each_entry_safe_reverse(s_job, tmp, &sched->ring_mirror_list, node) {
+ list_for_each_entry_safe_reverse(s_job, tmp, &sched->pending_list,
+ list) {
if (s_job->s_fence->parent &&
dma_fence_remove_callback(s_job->s_fence->parent,
&s_job->cb)) {
atomic_dec(&sched->hw_rq_count);
} else {
/*
- * remove job from ring_mirror_list.
+ * remove job from pending_list.
* Locking here is for concurrent resume timeout
*/
spin_lock(&sched->job_list_lock);
- list_del_init(&s_job->node);
+ list_del_init(&s_job->list);
spin_unlock(&sched->job_list_lock);
/*
@@ -463,7 +496,7 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
* so no new jobs are being inserted or removed. Also concurrent
* GPU recovers can't run in parallel.
*/
- list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
+ list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
struct dma_fence *fence = s_job->s_fence->parent;
atomic_inc(&sched->hw_rq_count);
@@ -473,14 +506,14 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
if (fence) {
r = dma_fence_add_callback(fence, &s_job->cb,
- drm_sched_process_job);
+ drm_sched_job_done_cb);
if (r == -ENOENT)
- drm_sched_process_job(fence, &s_job->cb);
+ drm_sched_job_done(s_job);
else if (r)
DRM_ERROR("fence add callback failed (%d)\n",
r);
} else
- drm_sched_process_job(NULL, &s_job->cb);
+ drm_sched_job_done(s_job);
}
if (full_recovery) {
@@ -494,7 +527,7 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
EXPORT_SYMBOL(drm_sched_start);
/**
- * drm_sched_resubmit_jobs - helper to relunch job from mirror ring list
+ * drm_sched_resubmit_jobs - helper to relunch job from pending ring list
*
* @sched: scheduler instance
*
@@ -506,7 +539,7 @@ void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
bool found_guilty = false;
struct dma_fence *fence;
- list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
+ list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
struct drm_sched_fence *s_fence = s_job->s_fence;
if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
@@ -566,7 +599,7 @@ int drm_sched_job_init(struct drm_sched_job *job,
return -ENOMEM;
job->id = atomic64_inc_return(&sched->job_id_count);
- INIT_LIST_HEAD(&job->node);
+ INIT_LIST_HEAD(&job->list);
return 0;
}
@@ -636,36 +669,11 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched)
}
/**
- * drm_sched_process_job - process a job
- *
- * @f: fence
- * @cb: fence callbacks
- *
- * Called after job has finished execution.
- */
-static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
-{
- struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb);
- struct drm_sched_fence *s_fence = s_job->s_fence;
- struct drm_gpu_scheduler *sched = s_fence->sched;
-
- atomic_dec(&sched->hw_rq_count);
- atomic_dec(&sched->score);
-
- trace_drm_sched_process_job(s_fence);
-
- dma_fence_get(&s_fence->finished);
- drm_sched_fence_finished(s_fence);
- dma_fence_put(&s_fence->finished);
- wake_up_interruptible(&sched->wake_up_worker);
-}
-
-/**
* drm_sched_get_cleanup_job - fetch the next finished job to be destroyed
*
* @sched: scheduler instance
*
- * Returns the next finished job from the mirror list (if there is one)
+ * Returns the next finished job from the pending list (if there is one)
* ready for it to be destroyed.
*/
static struct drm_sched_job *
@@ -675,7 +683,7 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
/*
* Don't destroy jobs while the timeout worker is running OR thread
- * is being parked and hence assumed to not touch ring_mirror_list
+ * is being parked and hence assumed to not touch pending_list
*/
if ((sched->timeout != MAX_SCHEDULE_TIMEOUT &&
!cancel_delayed_work(&sched->work_tdr)) ||
@@ -684,12 +692,12 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
spin_lock(&sched->job_list_lock);
- job = list_first_entry_or_null(&sched->ring_mirror_list,
- struct drm_sched_job, node);
+ job = list_first_entry_or_null(&sched->pending_list,
+ struct drm_sched_job, list);
if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
- /* remove job from ring_mirror_list */
- list_del_init(&job->node);
+ /* remove job from pending_list */
+ list_del_init(&job->list);
} else {
job = NULL;
/* queue timeout for next job */
@@ -809,9 +817,9 @@ static int drm_sched_main(void *param)
if (!IS_ERR_OR_NULL(fence)) {
s_fence->parent = dma_fence_get(fence);
r = dma_fence_add_callback(fence, &sched_job->cb,
- drm_sched_process_job);
+ drm_sched_job_done_cb);
if (r == -ENOENT)
- drm_sched_process_job(fence, &sched_job->cb);
+ drm_sched_job_done(sched_job);
else if (r)
DRM_ERROR("fence add callback failed (%d)\n",
r);
@@ -820,7 +828,7 @@ static int drm_sched_main(void *param)
if (IS_ERR(fence))
dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
- drm_sched_process_job(NULL, &sched_job->cb);
+ drm_sched_job_done(sched_job);
}
wake_up(&sched->job_scheduled);
@@ -858,7 +866,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
init_waitqueue_head(&sched->wake_up_worker);
init_waitqueue_head(&sched->job_scheduled);
- INIT_LIST_HEAD(&sched->ring_mirror_list);
+ INIT_LIST_HEAD(&sched->pending_list);
spin_lock_init(&sched->job_list_lock);
atomic_set(&sched->hw_rq_count, 0);
INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
@@ -891,6 +899,9 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched)
if (sched->thread)
kthread_stop(sched->thread);
+ /* Confirm no work left behind accessing device structures */
+ cancel_delayed_work_sync(&sched->work_tdr);
+
sched->ready = false;
}
EXPORT_SYMBOL(drm_sched_fini);
diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c
index a98057431023..7476301d7142 100644
--- a/drivers/gpu/drm/sti/sti_cursor.c
+++ b/drivers/gpu/drm/sti/sti_cursor.c
@@ -330,13 +330,6 @@ static const struct drm_plane_helper_funcs sti_cursor_helpers_funcs = {
.atomic_disable = sti_cursor_atomic_disable,
};
-static void sti_cursor_destroy(struct drm_plane *drm_plane)
-{
- DRM_DEBUG_DRIVER("\n");
-
- drm_plane_cleanup(drm_plane);
-}
-
static int sti_cursor_late_register(struct drm_plane *drm_plane)
{
struct sti_plane *plane = to_sti_plane(drm_plane);
@@ -350,7 +343,7 @@ static int sti_cursor_late_register(struct drm_plane *drm_plane)
static const struct drm_plane_funcs sti_cursor_plane_helpers_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = sti_cursor_destroy,
+ .destroy = drm_plane_cleanup,
.reset = sti_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
index 2d5a2b5b78b8..2f4a34f14d33 100644
--- a/drivers/gpu/drm/sti/sti_gdp.c
+++ b/drivers/gpu/drm/sti/sti_gdp.c
@@ -884,13 +884,6 @@ static const struct drm_plane_helper_funcs sti_gdp_helpers_funcs = {
.atomic_disable = sti_gdp_atomic_disable,
};
-static void sti_gdp_destroy(struct drm_plane *drm_plane)
-{
- DRM_DEBUG_DRIVER("\n");
-
- drm_plane_cleanup(drm_plane);
-}
-
static int sti_gdp_late_register(struct drm_plane *drm_plane)
{
struct sti_plane *plane = to_sti_plane(drm_plane);
@@ -902,7 +895,7 @@ static int sti_gdp_late_register(struct drm_plane *drm_plane)
static const struct drm_plane_funcs sti_gdp_plane_helpers_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = sti_gdp_destroy,
+ .destroy = drm_plane_cleanup,
.reset = sti_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
index 5a4e12194a77..62f824cd5f21 100644
--- a/drivers/gpu/drm/sti/sti_hqvdp.c
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -1262,13 +1262,6 @@ static const struct drm_plane_helper_funcs sti_hqvdp_helpers_funcs = {
.atomic_disable = sti_hqvdp_atomic_disable,
};
-static void sti_hqvdp_destroy(struct drm_plane *drm_plane)
-{
- DRM_DEBUG_DRIVER("\n");
-
- drm_plane_cleanup(drm_plane);
-}
-
static int sti_hqvdp_late_register(struct drm_plane *drm_plane)
{
struct sti_plane *plane = to_sti_plane(drm_plane);
@@ -1282,7 +1275,7 @@ static int sti_hqvdp_late_register(struct drm_plane *drm_plane)
static const struct drm_plane_funcs sti_hqvdp_plane_helpers_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = sti_hqvdp_destroy,
+ .destroy = drm_plane_cleanup,
.reset = sti_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index 3980677435cb..7812094f93d6 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -713,7 +713,6 @@ static const struct drm_crtc_funcs ltdc_crtc_funcs = {
.enable_vblank = ltdc_crtc_enable_vblank,
.disable_vblank = ltdc_crtc_disable_vblank,
.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
- .gamma_set = drm_atomic_helper_legacy_gamma_set,
};
/*
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index eaaf5d70e352..9f06dec0fc61 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -569,30 +569,13 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
if (info->bus_flags & DRM_BUS_FLAG_DE_LOW)
val |= SUN4I_TCON0_IO_POL_DE_NEGATIVE;
- /*
- * On A20 and similar SoCs, the only way to achieve Positive Edge
- * (Rising Edge), is setting dclk clock phase to 2/3(240°).
- * By default TCON works in Negative Edge(Falling Edge),
- * this is why phase is set to 0 in that case.
- * Unfortunately there's no way to logically invert dclk through
- * IO_POL register.
- * The only acceptable way to work, triple checked with scope,
- * is using clock phase set to 0° for Negative Edge and set to 240°
- * for Positive Edge.
- * On A33 and similar SoCs there would be a 90° phase option,
- * but it divides also dclk by 2.
- * Following code is a way to avoid quirks all around TCON
- * and DOTCLOCK drivers.
- */
- if (info->bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE)
- clk_set_phase(tcon->dclk, 240);
-
if (info->bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)
- clk_set_phase(tcon->dclk, 0);
+ val |= SUN4I_TCON0_IO_POL_DCLK_DRIVE_NEGEDGE;
regmap_update_bits(tcon->regs, SUN4I_TCON0_IO_POL_REG,
SUN4I_TCON0_IO_POL_HSYNC_POSITIVE |
SUN4I_TCON0_IO_POL_VSYNC_POSITIVE |
+ SUN4I_TCON0_IO_POL_DCLK_DRIVE_NEGEDGE |
SUN4I_TCON0_IO_POL_DE_NEGATIVE,
val);
@@ -689,6 +672,30 @@ static void sun4i_tcon1_mode_set(struct sun4i_tcon *tcon,
SUN4I_TCON1_BASIC5_V_SYNC(vsync) |
SUN4I_TCON1_BASIC5_H_SYNC(hsync));
+ /* Setup the polarity of multiple signals */
+ if (tcon->quirks->polarity_in_ch0) {
+ val = 0;
+
+ if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+ val |= SUN4I_TCON0_IO_POL_HSYNC_POSITIVE;
+
+ if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+ val |= SUN4I_TCON0_IO_POL_VSYNC_POSITIVE;
+
+ regmap_write(tcon->regs, SUN4I_TCON0_IO_POL_REG, val);
+ } else {
+ /* according to vendor driver, this bit must be always set */
+ val = SUN4I_TCON1_IO_POL_UNKNOWN;
+
+ if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+ val |= SUN4I_TCON1_IO_POL_HSYNC_POSITIVE;
+
+ if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+ val |= SUN4I_TCON1_IO_POL_VSYNC_POSITIVE;
+
+ regmap_write(tcon->regs, SUN4I_TCON1_IO_POL_REG, val);
+ }
+
/* Map output pins to channel 1 */
regmap_update_bits(tcon->regs, SUN4I_TCON_GCTL_REG,
SUN4I_TCON_GCTL_IOMAP_MASK,
@@ -1517,6 +1524,7 @@ static const struct sun4i_tcon_quirks sun8i_a83t_tv_quirks = {
static const struct sun4i_tcon_quirks sun8i_r40_tv_quirks = {
.has_channel_1 = true,
+ .polarity_in_ch0 = true,
.set_mux = sun8i_r40_tcon_tv_set_mux,
};
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h
index cfbf4e6c1679..e624f6977eb8 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.h
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h
@@ -113,6 +113,7 @@
#define SUN4I_TCON0_IO_POL_REG 0x88
#define SUN4I_TCON0_IO_POL_DCLK_PHASE(phase) ((phase & 3) << 28)
#define SUN4I_TCON0_IO_POL_DE_NEGATIVE BIT(27)
+#define SUN4I_TCON0_IO_POL_DCLK_DRIVE_NEGEDGE BIT(26)
#define SUN4I_TCON0_IO_POL_HSYNC_POSITIVE BIT(25)
#define SUN4I_TCON0_IO_POL_VSYNC_POSITIVE BIT(24)
@@ -153,6 +154,11 @@
#define SUN4I_TCON1_BASIC5_V_SYNC(height) (((height) - 1) & 0x3ff)
#define SUN4I_TCON1_IO_POL_REG 0xf0
+/* there is no documentation about this bit */
+#define SUN4I_TCON1_IO_POL_UNKNOWN BIT(26)
+#define SUN4I_TCON1_IO_POL_HSYNC_POSITIVE BIT(25)
+#define SUN4I_TCON1_IO_POL_VSYNC_POSITIVE BIT(24)
+
#define SUN4I_TCON1_IO_TRI_REG 0xf4
#define SUN4I_TCON_ECC_FIFO_REG 0xf8
@@ -235,6 +241,7 @@ struct sun4i_tcon_quirks {
bool needs_de_be_mux; /* sun6i needs mux to select backend */
bool needs_edp_reset; /* a80 edp reset needed for tcon0 access */
bool supports_lvds; /* Does the TCON support an LVDS output? */
+ bool polarity_in_ch0; /* some tcon1 channels have polarity bits in tcon0 pol register */
u8 dclk_min_div; /* minimum divider for TCON0 DCLK */
/* callback to handle tcon muxing options */
diff --git a/drivers/gpu/drm/sun4i/sun8i_csc.c b/drivers/gpu/drm/sun4i/sun8i_csc.c
index 781955dd4995..9bd62de0c288 100644
--- a/drivers/gpu/drm/sun4i/sun8i_csc.c
+++ b/drivers/gpu/drm/sun4i/sun8i_csc.c
@@ -46,33 +46,6 @@ static const u32 yuv2rgb[2][2][12] = {
},
};
-static const u32 yvu2rgb[2][2][12] = {
- [DRM_COLOR_YCBCR_LIMITED_RANGE] = {
- [DRM_COLOR_YCBCR_BT601] = {
- 0x000004A8, 0x00000662, 0x00000000, 0xFFFC8451,
- 0x000004A8, 0xFFFFFCC0, 0xFFFFFE6F, 0x00021E4D,
- 0x000004A8, 0x00000000, 0x00000811, 0xFFFBACA9,
- },
- [DRM_COLOR_YCBCR_BT709] = {
- 0x000004A8, 0x0000072B, 0x00000000, 0xFFFC1F99,
- 0x000004A8, 0xFFFFFDDF, 0xFFFFFF26, 0x00013383,
- 0x000004A8, 0x00000000, 0x00000873, 0xFFFB7BEF,
- }
- },
- [DRM_COLOR_YCBCR_FULL_RANGE] = {
- [DRM_COLOR_YCBCR_BT601] = {
- 0x00000400, 0x0000059B, 0x00000000, 0xFFFD322E,
- 0x00000400, 0xFFFFFD25, 0xFFFFFEA0, 0x00021DD5,
- 0x00000400, 0x00000000, 0x00000716, 0xFFFC74BD,
- },
- [DRM_COLOR_YCBCR_BT709] = {
- 0x00000400, 0x0000064C, 0x00000000, 0xFFFCD9B4,
- 0x00000400, 0xFFFFFE21, 0xFFFFFF41, 0x00014F96,
- 0x00000400, 0x00000000, 0x0000076C, 0xFFFC49EF,
- }
- },
-};
-
/*
* DE3 has a bit different CSC units. Factors are in two's complement format.
* First three factors in a row are multiplication factors which have 17 bits
@@ -96,7 +69,7 @@ static const u32 yvu2rgb[2][2][12] = {
* c20 c21 c22 [d2 const2]
*/
-static const u32 yuv2rgb_de3[2][2][12] = {
+static const u32 yuv2rgb_de3[2][3][12] = {
[DRM_COLOR_YCBCR_LIMITED_RANGE] = {
[DRM_COLOR_YCBCR_BT601] = {
0x0002542A, 0x00000000, 0x0003312A, 0xFFC00000,
@@ -107,6 +80,11 @@ static const u32 yuv2rgb_de3[2][2][12] = {
0x0002542A, 0x00000000, 0x000395E2, 0xFFC00000,
0x0002542A, 0xFFFF92D2, 0xFFFEEF27, 0xFE000000,
0x0002542A, 0x0004398C, 0x00000000, 0xFE000000,
+ },
+ [DRM_COLOR_YCBCR_BT2020] = {
+ 0x0002542A, 0x00000000, 0x00035B7B, 0xFFC00000,
+ 0x0002542A, 0xFFFFA017, 0xFFFEB2FC, 0xFE000000,
+ 0x0002542A, 0x00044896, 0x00000000, 0xFE000000,
}
},
[DRM_COLOR_YCBCR_FULL_RANGE] = {
@@ -119,33 +97,11 @@ static const u32 yuv2rgb_de3[2][2][12] = {
0x00020000, 0x00000000, 0x0003264C, 0x00000000,
0x00020000, 0xFFFFA018, 0xFFFF1053, 0xFE000000,
0x00020000, 0x0003B611, 0x00000000, 0xFE000000,
- }
- },
-};
-
-static const u32 yvu2rgb_de3[2][2][12] = {
- [DRM_COLOR_YCBCR_LIMITED_RANGE] = {
- [DRM_COLOR_YCBCR_BT601] = {
- 0x0002542A, 0x0003312A, 0x00000000, 0xFFC00000,
- 0x0002542A, 0xFFFE5FC3, 0xFFFF376B, 0xFE000000,
- 0x0002542A, 0x00000000, 0x000408D2, 0xFE000000,
},
- [DRM_COLOR_YCBCR_BT709] = {
- 0x0002542A, 0x000395E2, 0x00000000, 0xFFC00000,
- 0x0002542A, 0xFFFEEF27, 0xFFFF92D2, 0xFE000000,
- 0x0002542A, 0x00000000, 0x0004398C, 0xFE000000,
- }
- },
- [DRM_COLOR_YCBCR_FULL_RANGE] = {
- [DRM_COLOR_YCBCR_BT601] = {
- 0x00020000, 0x0002CDD2, 0x00000000, 0x00000000,
- 0x00020000, 0xFFFE925D, 0xFFFF4FCE, 0xFE000000,
- 0x00020000, 0x00000000, 0x00038B43, 0xFE000000,
- },
- [DRM_COLOR_YCBCR_BT709] = {
- 0x00020000, 0x0003264C, 0x00000000, 0x00000000,
- 0x00020000, 0xFFFF1053, 0xFFFFA018, 0xFE000000,
- 0x00020000, 0x00000000, 0x0003B611, 0xFE000000,
+ [DRM_COLOR_YCBCR_BT2020] = {
+ 0x00020000, 0x00000000, 0x0002F2FE, 0x00000000,
+ 0x00020000, 0xFFFFABC0, 0xFFFEDB78, 0xFE000000,
+ 0x00020000, 0x0003C346, 0x00000000, 0xFE000000,
}
},
};
@@ -157,21 +113,30 @@ static void sun8i_csc_set_coefficients(struct regmap *map, u32 base,
{
const u32 *table;
u32 base_reg;
+ int i;
+
+ table = yuv2rgb[range][encoding];
switch (mode) {
case SUN8I_CSC_MODE_YUV2RGB:
- table = yuv2rgb[range][encoding];
+ base_reg = SUN8I_CSC_COEFF(base, 0);
+ regmap_bulk_write(map, base_reg, table, 12);
break;
case SUN8I_CSC_MODE_YVU2RGB:
- table = yvu2rgb[range][encoding];
+ for (i = 0; i < 12; i++) {
+ if ((i & 3) == 1)
+ base_reg = SUN8I_CSC_COEFF(base, i + 1);
+ else if ((i & 3) == 2)
+ base_reg = SUN8I_CSC_COEFF(base, i - 1);
+ else
+ base_reg = SUN8I_CSC_COEFF(base, i);
+ regmap_write(map, base_reg, table[i]);
+ }
break;
default:
DRM_WARN("Wrong CSC mode specified.\n");
return;
}
-
- base_reg = SUN8I_CSC_COEFF(base, 0);
- regmap_bulk_write(map, base_reg, table, 12);
}
static void sun8i_de3_ccsc_set_coefficients(struct regmap *map, int layer,
@@ -180,22 +145,36 @@ static void sun8i_de3_ccsc_set_coefficients(struct regmap *map, int layer,
enum drm_color_range range)
{
const u32 *table;
- u32 base_reg;
+ u32 addr;
+ int i;
+
+ table = yuv2rgb_de3[range][encoding];
switch (mode) {
case SUN8I_CSC_MODE_YUV2RGB:
- table = yuv2rgb_de3[range][encoding];
+ addr = SUN50I_MIXER_BLEND_CSC_COEFF(DE3_BLD_BASE, layer, 0);
+ regmap_bulk_write(map, addr, table, 12);
break;
case SUN8I_CSC_MODE_YVU2RGB:
- table = yvu2rgb_de3[range][encoding];
+ for (i = 0; i < 12; i++) {
+ if ((i & 3) == 1)
+ addr = SUN50I_MIXER_BLEND_CSC_COEFF(DE3_BLD_BASE,
+ layer,
+ i + 1);
+ else if ((i & 3) == 2)
+ addr = SUN50I_MIXER_BLEND_CSC_COEFF(DE3_BLD_BASE,
+ layer,
+ i - 1);
+ else
+ addr = SUN50I_MIXER_BLEND_CSC_COEFF(DE3_BLD_BASE,
+ layer, i);
+ regmap_write(map, addr, table[i]);
+ }
break;
default:
DRM_WARN("Wrong CSC mode specified.\n");
return;
}
-
- base_reg = SUN50I_MIXER_BLEND_CSC_COEFF(DE3_BLD_BASE, layer, 0, 0);
- regmap_bulk_write(map, base_reg, table, 12);
}
static void sun8i_csc_enable(struct regmap *map, u32 base, bool enable)
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
index 92add2cef2e7..bbdfd5e26ec8 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
@@ -21,8 +21,7 @@ static void sun8i_dw_hdmi_encoder_mode_set(struct drm_encoder *encoder,
{
struct sun8i_dw_hdmi *hdmi = encoder_to_sun8i_dw_hdmi(encoder);
- if (hdmi->quirks->set_rate)
- clk_set_rate(hdmi->clk_tmds, mode->crtc_clock * 1000);
+ clk_set_rate(hdmi->clk_tmds, mode->crtc_clock * 1000);
}
static const struct drm_encoder_helper_funcs
@@ -48,11 +47,9 @@ sun8i_dw_hdmi_mode_valid_h6(struct dw_hdmi *hdmi, void *data,
{
/*
* Controller support maximum of 594 MHz, which correlates to
- * 4K@60Hz 4:4:4 or RGB. However, for frequencies greater than
- * 340 MHz scrambling has to be enabled. Because scrambling is
- * not yet implemented, just limit to 340 MHz for now.
+ * 4K@60Hz 4:4:4 or RGB.
*/
- if (mode->clock > 340000)
+ if (mode->clock > 594000)
return MODE_CLOCK_HIGH;
return MODE_OK;
@@ -295,7 +292,6 @@ static int sun8i_dw_hdmi_remove(struct platform_device *pdev)
static const struct sun8i_dw_hdmi_quirks sun8i_a83t_quirks = {
.mode_valid = sun8i_dw_hdmi_mode_valid_a83t,
- .set_rate = true,
};
static const struct sun8i_dw_hdmi_quirks sun50i_h6_quirks = {
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
index d983746fa194..d4b55af0592f 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
@@ -179,7 +179,6 @@ struct sun8i_dw_hdmi_quirks {
enum drm_mode_status (*mode_valid)(struct dw_hdmi *hdmi, void *data,
const struct drm_display_info *info,
const struct drm_display_mode *mode);
- unsigned int set_rate : 1;
unsigned int use_drm_infoframe : 1;
};
diff --git a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
index 35c2133724e2..9994edf67509 100644
--- a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
+++ b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
@@ -104,29 +104,21 @@ static const struct dw_hdmi_mpll_config sun50i_h6_mpll_cfg[] = {
static const struct dw_hdmi_curr_ctrl sun50i_h6_cur_ctr[] = {
/* pixelclk bpp8 bpp10 bpp12 */
- { 25175000, { 0x0000, 0x0000, 0x0000 }, },
{ 27000000, { 0x0012, 0x0000, 0x0000 }, },
- { 59400000, { 0x0008, 0x0008, 0x0008 }, },
- { 72000000, { 0x0008, 0x0008, 0x001b }, },
- { 74250000, { 0x0013, 0x0013, 0x0013 }, },
- { 90000000, { 0x0008, 0x001a, 0x001b }, },
- { 118800000, { 0x001b, 0x001a, 0x001b }, },
- { 144000000, { 0x001b, 0x001a, 0x0034 }, },
- { 180000000, { 0x001b, 0x0033, 0x0034 }, },
- { 216000000, { 0x0036, 0x0033, 0x0034 }, },
- { 237600000, { 0x0036, 0x0033, 0x001b }, },
- { 288000000, { 0x0036, 0x001b, 0x001b }, },
- { 297000000, { 0x0019, 0x001b, 0x0019 }, },
- { 330000000, { 0x0036, 0x001b, 0x001b }, },
- { 594000000, { 0x003f, 0x001b, 0x001b }, },
+ { 74250000, { 0x0013, 0x001a, 0x001b }, },
+ { 148500000, { 0x0019, 0x0033, 0x0034 }, },
+ { 297000000, { 0x0019, 0x001b, 0x001b }, },
+ { 594000000, { 0x0010, 0x001b, 0x001b }, },
{ ~0UL, { 0x0000, 0x0000, 0x0000 }, }
};
static const struct dw_hdmi_phy_config sun50i_h6_phy_config[] = {
/*pixelclk symbol term vlev*/
- { 74250000, 0x8009, 0x0004, 0x0232},
- { 148500000, 0x8029, 0x0004, 0x0273},
- { 594000000, 0x8039, 0x0004, 0x014a},
+ { 27000000, 0x8009, 0x0007, 0x02b0 },
+ { 74250000, 0x8009, 0x0006, 0x022d },
+ { 148500000, 0x8029, 0x0006, 0x0270 },
+ { 297000000, 0x8039, 0x0005, 0x01ab },
+ { 594000000, 0x8029, 0x0000, 0x008a },
{ ~0UL, 0x0000, 0x0000, 0x0000}
};
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.h b/drivers/gpu/drm/sun4i/sun8i_mixer.h
index 7576b523fdbb..145833a9d82d 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.h
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.h
@@ -50,10 +50,8 @@
#define SUN8I_MIXER_BLEND_CK_MIN(base, x) ((base) + 0xe0 + 0x04 * (x))
#define SUN8I_MIXER_BLEND_OUTCTL(base) ((base) + 0xfc)
#define SUN50I_MIXER_BLEND_CSC_CTL(base) ((base) + 0x100)
-#define SUN50I_MIXER_BLEND_CSC_COEFF(base, layer, x, y) \
- ((base) + 0x110 + (layer) * 0x30 + (x) * 0x10 + 4 * (y))
-#define SUN50I_MIXER_BLEND_CSC_CONST(base, layer, i) \
- ((base) + 0x110 + (layer) * 0x30 + (i) * 0x10 + 0x0c)
+#define SUN50I_MIXER_BLEND_CSC_COEFF(base, layer, x) \
+ ((base) + 0x110 + (layer) * 0x30 + (x) * 4)
#define SUN8I_MIXER_BLEND_PIPE_CTL_EN_MSK GENMASK(12, 8)
#define SUN8I_MIXER_BLEND_PIPE_CTL_EN(pipe) BIT(8 + pipe)
diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
index 76393fc976fe..8cc294a9969d 100644
--- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
@@ -543,6 +543,8 @@ struct sun8i_vi_layer *sun8i_vi_layer_init_one(struct drm_device *drm,
supported_encodings = BIT(DRM_COLOR_YCBCR_BT601) |
BIT(DRM_COLOR_YCBCR_BT709);
+ if (mixer->cfg->is_de3)
+ supported_encodings |= BIT(DRM_COLOR_YCBCR_BT2020);
supported_ranges = BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
BIT(DRM_COLOR_YCBCR_FULL_RANGE);
diff --git a/drivers/gpu/drm/tdfx/tdfx_drv.c b/drivers/gpu/drm/tdfx/tdfx_drv.c
index ab699bf0ac5c..58c185c299f4 100644
--- a/drivers/gpu/drm/tdfx/tdfx_drv.c
+++ b/drivers/gpu/drm/tdfx/tdfx_drv.c
@@ -56,7 +56,7 @@ static const struct file_operations tdfx_driver_fops = {
.llseek = noop_llseek,
};
-static struct drm_driver driver = {
+static const struct drm_driver driver = {
.driver_features = DRIVER_LEGACY,
.fops = &tdfx_driver_fops,
.name = DRIVER_NAME,
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 85dd7131553a..0ae3a025efe9 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -2186,7 +2186,7 @@ static int tegra_dc_runtime_resume(struct host1x_client *client)
struct device *dev = client->dev;
int err;
- err = pm_runtime_get_sync(dev);
+ err = pm_runtime_resume_and_get(dev);
if (err < 0) {
dev_err(dev, "failed to get runtime PM: %d\n", err);
return err;
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index e45c8414e2a3..e9ce7d6992d2 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -1301,8 +1301,10 @@ static const struct of_device_id host1x_drm_subdevs[] = {
{ .compatible = "nvidia,tegra30-hdmi", },
{ .compatible = "nvidia,tegra30-gr2d", },
{ .compatible = "nvidia,tegra30-gr3d", },
+ { .compatible = "nvidia,tegra114-dc", },
{ .compatible = "nvidia,tegra114-dsi", },
{ .compatible = "nvidia,tegra114-hdmi", },
+ { .compatible = "nvidia,tegra114-gr2d", },
{ .compatible = "nvidia,tegra114-gr3d", },
{ .compatible = "nvidia,tegra124-dc", },
{ .compatible = "nvidia,tegra124-sor", },
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index 5691ef1b0e58..f46d377f0c30 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -1111,7 +1111,7 @@ static int tegra_dsi_runtime_resume(struct host1x_client *client)
struct device *dev = client->dev;
int err;
- err = pm_runtime_get_sync(dev);
+ err = pm_runtime_resume_and_get(dev);
if (err < 0) {
dev_err(dev, "failed to get runtime PM: %d\n", err);
return err;
diff --git a/drivers/gpu/drm/tegra/falcon.c b/drivers/gpu/drm/tegra/falcon.c
index 56edef06c48e..223ab2ceb7e6 100644
--- a/drivers/gpu/drm/tegra/falcon.c
+++ b/drivers/gpu/drm/tegra/falcon.c
@@ -72,7 +72,7 @@ static int falcon_parse_firmware_image(struct falcon *falcon)
struct falcon_fw_os_header_v1 *os;
/* endian problems would show up right here */
- if (bin->magic != PCI_VENDOR_ID_NVIDIA) {
+ if (bin->magic != PCI_VENDOR_ID_NVIDIA && bin->magic != 0x10fe) {
dev_err(falcon->dev, "incorrect firmware magic\n");
return -EINVAL;
}
@@ -178,9 +178,10 @@ int falcon_boot(struct falcon *falcon)
falcon->firmware.data.offset + offset,
offset, FALCON_MEMORY_DATA);
- /* copy the first code segment into Falcon internal memory */
- falcon_copy_chunk(falcon, falcon->firmware.code.offset,
- 0, FALCON_MEMORY_IMEM);
+ /* copy the code segment into Falcon internal memory */
+ for (offset = 0; offset < falcon->firmware.code.size; offset += 256)
+ falcon_copy_chunk(falcon, falcon->firmware.code.offset + offset,
+ offset, FALCON_MEMORY_IMEM);
/* setup falcon interrupts */
falcon_writel(falcon, FALCON_IRQMSET_EXT(0xff) |
diff --git a/drivers/gpu/drm/tegra/gr2d.c b/drivers/gpu/drm/tegra/gr2d.c
index 1a0d3ba6e525..adbe2ddcda19 100644
--- a/drivers/gpu/drm/tegra/gr2d.c
+++ b/drivers/gpu/drm/tegra/gr2d.c
@@ -161,9 +161,14 @@ static const struct gr2d_soc tegra30_gr2d_soc = {
.version = 0x30,
};
+static const struct gr2d_soc tegra114_gr2d_soc = {
+ .version = 0x35,
+};
+
static const struct of_device_id gr2d_match[] = {
- { .compatible = "nvidia,tegra30-gr2d", .data = &tegra20_gr2d_soc },
- { .compatible = "nvidia,tegra20-gr2d", .data = &tegra30_gr2d_soc },
+ { .compatible = "nvidia,tegra114-gr2d", .data = &tegra114_gr2d_soc },
+ { .compatible = "nvidia,tegra30-gr2d", .data = &tegra30_gr2d_soc },
+ { .compatible = "nvidia,tegra20-gr2d", .data = &tegra20_gr2d_soc },
{ },
};
MODULE_DEVICE_TABLE(of, gr2d_match);
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index d09a24931c87..e5d2a4026028 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -1510,7 +1510,7 @@ static int tegra_hdmi_runtime_resume(struct host1x_client *client)
struct device *dev = client->dev;
int err;
- err = pm_runtime_get_sync(dev);
+ err = pm_runtime_resume_and_get(dev);
if (err < 0) {
dev_err(dev, "failed to get runtime PM: %d\n", err);
return err;
diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c
index 22a03f7ffdc1..5ce771cba133 100644
--- a/drivers/gpu/drm/tegra/hub.c
+++ b/drivers/gpu/drm/tegra/hub.c
@@ -789,7 +789,7 @@ static int tegra_display_hub_runtime_resume(struct host1x_client *client)
unsigned int i;
int err;
- err = pm_runtime_get_sync(dev);
+ err = pm_runtime_resume_and_get(dev);
if (err < 0) {
dev_err(dev, "failed to get runtime PM: %d\n", err);
return err;
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index cc2aa2308a51..f02a035dda45 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -3218,7 +3218,7 @@ static int tegra_sor_runtime_resume(struct host1x_client *client)
struct device *dev = client->dev;
int err;
- err = pm_runtime_get_sync(dev);
+ err = pm_runtime_resume_and_get(dev);
if (err < 0) {
dev_err(dev, "failed to get runtime PM: %d\n", err);
return err;
diff --git a/drivers/gpu/drm/tegra/vic.c b/drivers/gpu/drm/tegra/vic.c
index ade56b860cf9..77e128832920 100644
--- a/drivers/gpu/drm/tegra/vic.c
+++ b/drivers/gpu/drm/tegra/vic.c
@@ -117,7 +117,19 @@ static int vic_boot(struct vic *vic)
if (spec->num_ids > 0) {
value = spec->ids[0] & 0xffff;
+ /*
+ * STREAMID0 is used for input/output buffers.
+ * Initialize it to SID_VIC in case context isolation
+ * is not enabled, and SID_VIC is used for both firmware
+ * and data buffers.
+ *
+ * If context isolation is enabled, it will be
+ * overridden by the SETSTREAMID opcode as part of
+ * each job.
+ */
vic_writel(vic, value, VIC_THI_STREAMID0);
+
+ /* STREAMID1 is used for firmware loading. */
vic_writel(vic, value, VIC_THI_STREAMID1);
}
}
@@ -135,16 +147,21 @@ static int vic_boot(struct vic *vic)
hdr = vic->falcon.firmware.virt;
fce_bin_data_offset = *(u32 *)(hdr + VIC_UCODE_FCE_DATA_OFFSET);
- hdr = vic->falcon.firmware.virt +
- *(u32 *)(hdr + VIC_UCODE_FCE_HEADER_OFFSET);
- fce_ucode_size = *(u32 *)(hdr + FCE_UCODE_SIZE_OFFSET);
falcon_execute_method(&vic->falcon, VIC_SET_APPLICATION_ID, 1);
- falcon_execute_method(&vic->falcon, VIC_SET_FCE_UCODE_SIZE,
- fce_ucode_size);
- falcon_execute_method(&vic->falcon, VIC_SET_FCE_UCODE_OFFSET,
- (vic->falcon.firmware.iova + fce_bin_data_offset)
- >> 8);
+
+ /* Old VIC firmware needs kernel help with setting up FCE microcode. */
+ if (fce_bin_data_offset != 0x0 && fce_bin_data_offset != 0xa5a5a5a5) {
+ hdr = vic->falcon.firmware.virt +
+ *(u32 *)(hdr + VIC_UCODE_FCE_HEADER_OFFSET);
+ fce_ucode_size = *(u32 *)(hdr + FCE_UCODE_SIZE_OFFSET);
+
+ falcon_execute_method(&vic->falcon, VIC_SET_FCE_UCODE_SIZE,
+ fce_ucode_size);
+ falcon_execute_method(
+ &vic->falcon, VIC_SET_FCE_UCODE_OFFSET,
+ (vic->falcon.firmware.iova + fce_bin_data_offset) >> 8);
+ }
err = falcon_wait_idle(&vic->falcon);
if (err < 0) {
@@ -314,7 +331,7 @@ static int vic_open_channel(struct tegra_drm_client *client,
struct vic *vic = to_vic(client);
int err;
- err = pm_runtime_get_sync(vic->dev);
+ err = pm_runtime_resume_and_get(vic->dev);
if (err < 0)
return err;
diff --git a/drivers/gpu/drm/tilcdc/Makefile b/drivers/gpu/drm/tilcdc/Makefile
index 662bf3a348c9..f5190477de72 100644
--- a/drivers/gpu/drm/tilcdc/Makefile
+++ b/drivers/gpu/drm/tilcdc/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-ifeq (, $(findstring -W,$(EXTRA_CFLAGS)))
+ifeq (, $(findstring -W,$(KCFLAGS)))
ccflags-y += -Werror
endif
diff --git a/drivers/gpu/drm/tiny/cirrus.c b/drivers/gpu/drm/tiny/cirrus.c
index 561c49d8657a..a043e602199e 100644
--- a/drivers/gpu/drm/tiny/cirrus.c
+++ b/drivers/gpu/drm/tiny/cirrus.c
@@ -602,7 +602,6 @@ static int cirrus_pci_probe(struct pci_dev *pdev,
drm_mode_config_reset(dev);
- dev->pdev = pdev;
pci_set_drvdata(pdev, dev);
ret = drm_dev_register(dev, 0);
if (ret)
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
index 03c86628e4ac..8f9fa4188897 100644
--- a/drivers/gpu/drm/ttm/ttm_agp_backend.c
+++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -32,7 +32,6 @@
#define pr_fmt(fmt) "[TTM] " fmt
-#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
#include <linux/agp_backend.h>
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 9a03c7834b1e..20a25660b35b 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -31,7 +31,6 @@
#define pr_fmt(fmt) "[TTM] " fmt
-#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
#include <linux/jiffies.h>
@@ -43,6 +42,8 @@
#include <linux/atomic.h>
#include <linux/dma-resv.h>
+#include "ttm_module.h"
+
static void ttm_bo_global_kobj_release(struct kobject *kobj);
/*
@@ -71,9 +72,9 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
struct ttm_resource_manager *man;
int i, mem_type;
- drm_printf(&p, "No space for %p (%lu pages, %luK, %luM)\n",
- bo, bo->mem.num_pages, bo->mem.size >> 10,
- bo->mem.size >> 20);
+ drm_printf(&p, "No space for %p (%lu pages, %zuK, %zuM)\n",
+ bo, bo->mem.num_pages, bo->base.size >> 10,
+ bo->base.size >> 20);
for (i = 0; i < placement->num_placement; i++) {
mem_type = placement->placement[i].mem_type;
drm_printf(&p, " placement[%d]=0x%08X (%d)\n",
@@ -109,40 +110,14 @@ static struct kobj_type ttm_bo_glob_kobj_type = {
.default_attrs = ttm_bo_global_attrs
};
-static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
- struct ttm_resource *mem)
-{
- struct ttm_bo_device *bdev = bo->bdev;
- struct ttm_resource_manager *man;
-
- if (!list_empty(&bo->lru) || bo->pin_count)
- return;
-
- man = ttm_manager_type(bdev, mem->mem_type);
- list_add_tail(&bo->lru, &man->lru[bo->priority]);
-
- if (man->use_tt && bo->ttm &&
- !(bo->ttm->page_flags & (TTM_PAGE_FLAG_SG |
- TTM_PAGE_FLAG_SWAPPED))) {
- list_add_tail(&bo->swap, &ttm_bo_glob.swap_lru[bo->priority]);
- }
-}
-
static void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
- bool notify = false;
- if (!list_empty(&bo->swap)) {
- list_del_init(&bo->swap);
- notify = true;
- }
- if (!list_empty(&bo->lru)) {
- list_del_init(&bo->lru);
- notify = true;
- }
+ list_del_init(&bo->swap);
+ list_del_init(&bo->lru);
- if (notify && bdev->driver->del_from_lru_notify)
+ if (bdev->driver->del_from_lru_notify)
bdev->driver->del_from_lru_notify(bo);
}
@@ -155,12 +130,32 @@ static void ttm_bo_bulk_move_set_pos(struct ttm_lru_bulk_move_pos *pos,
}
void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
+ struct ttm_resource *mem,
struct ttm_lru_bulk_move *bulk)
{
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_resource_manager *man;
+
dma_resv_assert_held(bo->base.resv);
- ttm_bo_del_from_lru(bo);
- ttm_bo_add_mem_to_lru(bo, &bo->mem);
+ if (bo->pin_count) {
+ ttm_bo_del_from_lru(bo);
+ return;
+ }
+
+ man = ttm_manager_type(bdev, mem->mem_type);
+ list_move_tail(&bo->lru, &man->lru[bo->priority]);
+ if (man->use_tt && bo->ttm &&
+ !(bo->ttm->page_flags & (TTM_PAGE_FLAG_SG |
+ TTM_PAGE_FLAG_SWAPPED))) {
+ struct list_head *swap;
+
+ swap = &ttm_bo_glob.swap_lru[bo->priority];
+ list_move_tail(&bo->swap, swap);
+ }
+
+ if (bdev->driver->del_from_lru_notify)
+ bdev->driver->del_from_lru_notify(bo);
if (bulk && !bo->pin_count) {
switch (bo->mem.mem_type) {
@@ -267,7 +262,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
goto out_err;
}
- ctx->bytes_moved += bo->num_pages << PAGE_SHIFT;
+ ctx->bytes_moved += bo->base.size;
return 0;
out_err:
@@ -514,10 +509,9 @@ static void ttm_bo_release(struct kref *kref)
* shrinkers, now that they are queued for
* destruction.
*/
- if (bo->pin_count) {
+ if (WARN_ON(bo->pin_count)) {
bo->pin_count = 0;
- ttm_bo_del_from_lru(bo);
- ttm_bo_add_mem_to_lru(bo, &bo->mem);
+ ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL);
}
kref_init(&bo->kref);
@@ -859,8 +853,7 @@ static int ttm_bo_mem_placement(struct ttm_buffer_object *bo,
mem->placement = place->flags;
spin_lock(&ttm_bo_glob.lru_lock);
- ttm_bo_del_from_lru(bo);
- ttm_bo_add_mem_to_lru(bo, mem);
+ ttm_bo_move_to_lru_tail(bo, mem, NULL);
spin_unlock(&ttm_bo_glob.lru_lock);
return 0;
@@ -937,9 +930,8 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
}
error:
- if (bo->mem.mem_type == TTM_PL_SYSTEM && !list_empty(&bo->lru)) {
+ if (bo->mem.mem_type == TTM_PL_SYSTEM && !bo->pin_count)
ttm_bo_move_to_lru_tail_unlocked(bo);
- }
return ret;
}
@@ -967,8 +959,10 @@ static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo,
return ret;
/* move to the bounce domain */
ret = ttm_bo_handle_move_mem(bo, &hop_mem, false, ctx, NULL);
- if (ret)
+ if (ret) {
+ ttm_resource_free(bo, &hop_mem);
return ret;
+ }
return 0;
}
@@ -984,8 +978,7 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
memset(&hop, 0, sizeof(hop));
- mem.num_pages = bo->num_pages;
- mem.size = mem.num_pages << PAGE_SHIFT;
+ mem.num_pages = PAGE_ALIGN(bo->base.size) >> PAGE_SHIFT;
mem.page_alignment = bo->mem.page_alignment;
mem.bus.offset = 0;
mem.bus.addr = NULL;
@@ -1000,18 +993,19 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
* stop and the driver will be called to make
* the second hop.
*/
-bounce:
ret = ttm_bo_mem_space(bo, placement, &mem, ctx);
if (ret)
return ret;
+bounce:
ret = ttm_bo_handle_move_mem(bo, &mem, false, ctx, &hop);
if (ret == -EMULTIHOP) {
ret = ttm_bo_bounce_temp_buffer(bo, &mem, ctx, &hop);
if (ret)
- return ret;
+ goto out;
/* try and move to final place now. */
goto bounce;
}
+out:
if (ret)
ttm_resource_free(bo, &mem);
return ret;
@@ -1101,7 +1095,7 @@ EXPORT_SYMBOL(ttm_bo_validate);
int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
struct ttm_buffer_object *bo,
- unsigned long size,
+ size_t size,
enum ttm_bo_type type,
struct ttm_placement *placement,
uint32_t page_alignment,
@@ -1112,9 +1106,8 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
void (*destroy) (struct ttm_buffer_object *))
{
struct ttm_mem_global *mem_glob = &ttm_mem_glob;
- int ret = 0;
- unsigned long num_pages;
bool locked;
+ int ret = 0;
ret = ttm_mem_global_alloc(mem_glob, acc_size, ctx);
if (ret) {
@@ -1126,16 +1119,6 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
return -ENOMEM;
}
- num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
- if (num_pages == 0) {
- pr_err("Illegal buffer object size\n");
- if (destroy)
- (*destroy)(bo);
- else
- kfree(bo);
- ttm_mem_global_free(mem_glob, acc_size);
- return -EINVAL;
- }
bo->destroy = destroy ? destroy : ttm_bo_default_destroy;
kref_init(&bo->kref);
@@ -1144,10 +1127,8 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
INIT_LIST_HEAD(&bo->swap);
bo->bdev = bdev;
bo->type = type;
- bo->num_pages = num_pages;
- bo->mem.size = num_pages << PAGE_SHIFT;
bo->mem.mem_type = TTM_PL_SYSTEM;
- bo->mem.num_pages = bo->num_pages;
+ bo->mem.num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
bo->mem.mm_node = NULL;
bo->mem.page_alignment = page_alignment;
bo->mem.bus.offset = 0;
@@ -1165,9 +1146,10 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
}
if (!ttm_bo_uses_embedded_gem_object(bo)) {
/*
- * bo.gem is not initialized, so we have to setup the
+ * bo.base is not initialized, so we have to setup the
* struct elements we want use regardless.
*/
+ bo->base.size = size;
dma_resv_init(&bo->base._resv);
drm_vma_node_reset(&bo->base.vma_node);
}
@@ -1209,7 +1191,7 @@ EXPORT_SYMBOL(ttm_bo_init_reserved);
int ttm_bo_init(struct ttm_bo_device *bdev,
struct ttm_buffer_object *bo,
- unsigned long size,
+ size_t size,
enum ttm_bo_type type,
struct ttm_placement *placement,
uint32_t page_alignment,
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 7ccb2295cac1..398d5013fc39 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -310,7 +310,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
kref_init(&fbo->base.kref);
fbo->base.destroy = &ttm_transfered_destroy;
fbo->base.acc_size = 0;
- fbo->base.pin_count = 1;
+ fbo->base.pin_count = 0;
if (bo->type != ttm_bo_type_sg)
fbo->base.base.resv = &fbo->base.base._resv;
@@ -319,6 +319,8 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
ret = dma_resv_trylock(&fbo->base.base._resv);
WARN_ON(!ret);
+ ttm_bo_move_to_lru_tail_unlocked(&fbo->base);
+
*new_obj = &fbo->base;
return 0;
}
@@ -429,9 +431,9 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
map->virtual = NULL;
map->bo = bo;
- if (num_pages > bo->num_pages)
+ if (num_pages > bo->mem.num_pages)
return -EINVAL;
- if (start_page > bo->num_pages)
+ if ((start_page + num_pages) > bo->mem.num_pages)
return -EINVAL;
ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
@@ -483,14 +485,14 @@ int ttm_bo_vmap(struct ttm_buffer_object *bo, struct dma_buf_map *map)
if (mem->bus.is_iomem) {
void __iomem *vaddr_iomem;
- size_t size = bo->num_pages << PAGE_SHIFT;
if (mem->bus.addr)
vaddr_iomem = (void __iomem *)mem->bus.addr;
else if (mem->bus.caching == ttm_write_combined)
- vaddr_iomem = ioremap_wc(mem->bus.offset, size);
+ vaddr_iomem = ioremap_wc(mem->bus.offset,
+ bo->base.size);
else
- vaddr_iomem = ioremap(mem->bus.offset, size);
+ vaddr_iomem = ioremap(mem->bus.offset, bo->base.size);
if (!vaddr_iomem)
return -ENOMEM;
@@ -515,7 +517,7 @@ int ttm_bo_vmap(struct ttm_buffer_object *bo, struct dma_buf_map *map)
* or to make the buffer object look contiguous.
*/
prot = ttm_io_prot(bo, mem, PAGE_KERNEL);
- vaddr = vmap(ttm->pages, bo->num_pages, 0, prot);
+ vaddr = vmap(ttm->pages, ttm->num_pages, 0, prot);
if (!vaddr)
return -ENOMEM;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 2944fa0af493..6dc96cf66744 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -31,7 +31,6 @@
#define pr_fmt(fmt) "[TTM] " fmt
-#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/drm_vma_manager.h>
@@ -199,7 +198,7 @@ static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf,
/* Fault should not cross bo boundary. */
page_offset &= ~(fault_page_size - 1);
- if (page_offset + fault_page_size > bo->num_pages)
+ if (page_offset + fault_page_size > bo->mem.num_pages)
goto out_fallback;
if (bo->mem.bus.is_iomem)
@@ -307,7 +306,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
page_last = vma_pages(vma) + vma->vm_pgoff -
drm_vma_node_start(&bo->base.vma_node);
- if (unlikely(page_offset >= bo->num_pages))
+ if (unlikely(page_offset >= bo->mem.num_pages))
return VM_FAULT_SIGBUS;
prot = ttm_io_prot(bo, &bo->mem, prot);
@@ -470,7 +469,7 @@ int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
<< PAGE_SHIFT);
int ret;
- if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->num_pages)
+ if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->mem.num_pages)
return -EIO;
ret = ttm_bo_reserve(bo, true, false, NULL);
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 8a8f1a6a83a6..9fa36ed59429 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -55,7 +55,7 @@ void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
- ttm_bo_move_to_lru_tail(bo, NULL);
+ ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL);
dma_resv_unlock(bo->base.resv);
}
spin_unlock(&ttm_bo_glob.lru_lock);
@@ -162,7 +162,7 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
dma_resv_add_shared_fence(bo->base.resv, fence);
else
dma_resv_add_excl_fence(bo->base.resv, fence);
- ttm_bo_move_to_lru_tail(bo, NULL);
+ ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL);
dma_resv_unlock(bo->base.resv);
}
spin_unlock(&ttm_bo_glob.lru_lock);
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index 5ed1fc8f2ace..a3bfbd9cea68 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -29,7 +29,6 @@
#define pr_fmt(fmt) "[TTM] " fmt
#include <drm/ttm/ttm_memory.h>
-#include <drm/ttm/ttm_module.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/wait.h>
@@ -39,6 +38,8 @@
#include <linux/swap.h>
#include <drm/ttm/ttm_pool.h>
+#include "ttm_module.h"
+
#define TTM_MEMORY_ALLOC_RETRIES 4
struct ttm_mem_global ttm_mem_glob;
diff --git a/drivers/gpu/drm/ttm/ttm_module.c b/drivers/gpu/drm/ttm/ttm_module.c
index 6ff40c041d79..c0906437cb1c 100644
--- a/drivers/gpu/drm/ttm/ttm_module.c
+++ b/drivers/gpu/drm/ttm/ttm_module.c
@@ -32,9 +32,10 @@
#include <linux/module.h>
#include <linux/device.h>
#include <linux/sched.h>
-#include <drm/ttm/ttm_module.h>
#include <drm/drm_sysfs.h>
+#include "ttm_module.h"
+
static DECLARE_WAIT_QUEUE_HEAD(exit_q);
static atomic_t device_released;
diff --git a/include/drm/ttm/ttm_module.h b/drivers/gpu/drm/ttm/ttm_module.h
index 45fa318c1585..45fa318c1585 100644
--- a/include/drm/ttm/ttm_module.h
+++ b/drivers/gpu/drm/ttm/ttm_module.h
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index 11e0313db0ea..6e27cb1bf48b 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -33,6 +33,7 @@
#include <linux/module.h>
#include <linux/dma-mapping.h>
+#include <linux/highmem.h>
#ifdef CONFIG_X86
#include <asm/set_memory.h>
@@ -84,7 +85,7 @@ static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags,
* put_page() on a TTM allocated page is illegal.
*/
if (order)
- gfp_flags |= __GFP_NOMEMALLOC | __GFP_NORETRY |
+ gfp_flags |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN |
__GFP_KSWAPD_RECLAIM;
if (!pool->use_dma_alloc) {
@@ -218,6 +219,15 @@ static void ttm_pool_unmap(struct ttm_pool *pool, dma_addr_t dma_addr,
/* Give pages into a specific pool_type */
static void ttm_pool_type_give(struct ttm_pool_type *pt, struct page *p)
{
+ unsigned int i, num_pages = 1 << pt->order;
+
+ for (i = 0; i < num_pages; ++i) {
+ if (PageHighMem(p))
+ clear_highpage(p + i);
+ else
+ clear_page(page_address(p + i));
+ }
+
spin_lock(&pt->lock);
list_add(&p->lru, &pt->pages);
spin_unlock(&pt->lock);
diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c b/drivers/gpu/drm/ttm/ttm_range_manager.c
index e0952444cea9..a39305f742da 100644
--- a/drivers/gpu/drm/ttm/ttm_range_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_range_manager.c
@@ -29,7 +29,6 @@
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
-#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/drm_mm.h>
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index da9eeffe0c6d..7f75a13163f0 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -129,7 +129,7 @@ static void ttm_tt_init_fields(struct ttm_tt *ttm,
uint32_t page_flags,
enum ttm_caching caching)
{
- ttm->num_pages = bo->num_pages;
+ ttm->num_pages = PAGE_ALIGN(bo->base.size) >> PAGE_SHIFT;
ttm->caching = ttm_cached;
ttm->page_flags = page_flags;
ttm->dma_address = NULL;
@@ -162,19 +162,6 @@ void ttm_tt_fini(struct ttm_tt *ttm)
}
EXPORT_SYMBOL(ttm_tt_fini);
-int ttm_dma_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
- uint32_t page_flags, enum ttm_caching caching)
-{
- ttm_tt_init_fields(ttm, bo, page_flags, caching);
-
- if (ttm_dma_tt_alloc_page_directory(ttm)) {
- pr_err("Failed allocating page table\n");
- return -ENOMEM;
- }
- return 0;
-}
-EXPORT_SYMBOL(ttm_dma_tt_init);
-
int ttm_sg_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
uint32_t page_flags, enum ttm_caching caching)
{
diff --git a/drivers/gpu/drm/tve200/tve200_display.c b/drivers/gpu/drm/tve200/tve200_display.c
index 17ff24d999d1..cb0e837d3dba 100644
--- a/drivers/gpu/drm/tve200/tve200_display.c
+++ b/drivers/gpu/drm/tve200/tve200_display.c
@@ -11,7 +11,6 @@
*/
#include <linux/clk.h>
-#include <linux/version.h>
#include <linux/dma-buf.h>
#include <linux/of_graph.h>
#include <linux/delay.h>
diff --git a/drivers/gpu/drm/tve200/tve200_drv.c b/drivers/gpu/drm/tve200/tve200_drv.c
index 07140e0b90a3..7fa71c8bb828 100644
--- a/drivers/gpu/drm/tve200/tve200_drv.c
+++ b/drivers/gpu/drm/tve200/tve200_drv.c
@@ -35,7 +35,6 @@
#include <linux/platform_device.h>
#include <linux/shmem_fs.h>
#include <linux/slab.h>
-#include <linux/version.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index 42d401fd244e..99e22beea90b 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -232,8 +232,8 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
return ret;
mmu_debug = V3D_READ(V3D_MMU_DEBUG_INFO);
- dev->coherent_dma_mask =
- DMA_BIT_MASK(30 + V3D_GET_FIELD(mmu_debug, V3D_MMU_PA_WIDTH));
+ dma_set_mask_and_coherent(dev,
+ DMA_BIT_MASK(30 + V3D_GET_FIELD(mmu_debug, V3D_MMU_PA_WIDTH)));
v3d->va_width = 30 + V3D_GET_FIELD(mmu_debug, V3D_MMU_VA_WIDTH);
ident1 = V3D_READ(V3D_HUB_IDENT1);
diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c
index c88686489b88..e714d5318f30 100644
--- a/drivers/gpu/drm/v3d/v3d_irq.c
+++ b/drivers/gpu/drm/v3d/v3d_irq.c
@@ -178,10 +178,7 @@ v3d_hub_irq(int irq, void *arg)
};
const char *client = "?";
- V3D_WRITE(V3D_MMU_CTL,
- V3D_READ(V3D_MMU_CTL) & (V3D_MMU_CTL_CAP_EXCEEDED |
- V3D_MMU_CTL_PT_INVALID |
- V3D_MMU_CTL_WRITE_VIOLATION));
+ V3D_WRITE(V3D_MMU_CTL, V3D_READ(V3D_MMU_CTL));
if (v3d->ver >= 41) {
axi_id = axi_id >> 5;
@@ -217,7 +214,7 @@ v3d_irq_init(struct v3d_dev *v3d)
V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS);
V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS);
- irq1 = platform_get_irq(v3d_to_pdev(v3d), 1);
+ irq1 = platform_get_irq_optional(v3d_to_pdev(v3d), 1);
if (irq1 == -EPROBE_DEFER)
return irq1;
if (irq1 > 0) {
diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.c b/drivers/gpu/drm/vboxvideo/vbox_drv.c
index f3eac72cb46e..e534896b6cfd 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_drv.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_drv.c
@@ -51,7 +51,6 @@ static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (IS_ERR(vbox))
return PTR_ERR(vbox);
- vbox->ddev.pdev = pdev;
pci_set_drvdata(pdev, vbox);
mutex_init(&vbox->hw_mutex);
@@ -109,15 +108,16 @@ static void vbox_pci_remove(struct pci_dev *pdev)
static int vbox_pm_suspend(struct device *dev)
{
struct vbox_private *vbox = dev_get_drvdata(dev);
+ struct pci_dev *pdev = to_pci_dev(dev);
int error;
error = drm_mode_config_helper_suspend(&vbox->ddev);
if (error)
return error;
- pci_save_state(vbox->ddev.pdev);
- pci_disable_device(vbox->ddev.pdev);
- pci_set_power_state(vbox->ddev.pdev, PCI_D3hot);
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, PCI_D3hot);
return 0;
}
@@ -125,8 +125,9 @@ static int vbox_pm_suspend(struct device *dev)
static int vbox_pm_resume(struct device *dev)
{
struct vbox_private *vbox = dev_get_drvdata(dev);
+ struct pci_dev *pdev = to_pci_dev(dev);
- if (pci_enable_device(vbox->ddev.pdev))
+ if (pci_enable_device(pdev))
return -EIO;
return drm_mode_config_helper_resume(&vbox->ddev);
diff --git a/drivers/gpu/drm/vboxvideo/vbox_irq.c b/drivers/gpu/drm/vboxvideo/vbox_irq.c
index 631657fa554f..b3ded68603ba 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_irq.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_irq.c
@@ -170,10 +170,12 @@ static void vbox_hotplug_worker(struct work_struct *work)
int vbox_irq_init(struct vbox_private *vbox)
{
+ struct pci_dev *pdev = to_pci_dev(vbox->ddev.dev);
+
INIT_WORK(&vbox->hotplug_work, vbox_hotplug_worker);
vbox_update_mode_hints(vbox);
- return drm_irq_install(&vbox->ddev, vbox->ddev.pdev->irq);
+ return drm_irq_install(&vbox->ddev, pdev->irq);
}
void vbox_irq_fini(struct vbox_private *vbox)
diff --git a/drivers/gpu/drm/vboxvideo/vbox_main.c b/drivers/gpu/drm/vboxvideo/vbox_main.c
index d68d9bad7674..f28779715ccd 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_main.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_main.c
@@ -8,7 +8,9 @@
* Hans de Goede <hdegoede@redhat.com>
*/
+#include <linux/pci.h>
#include <linux/vbox_err.h>
+
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_damage_helper.h>
@@ -30,6 +32,7 @@ void vbox_report_caps(struct vbox_private *vbox)
static int vbox_accel_init(struct vbox_private *vbox)
{
+ struct pci_dev *pdev = to_pci_dev(vbox->ddev.dev);
struct vbva_buffer *vbva;
unsigned int i;
@@ -41,7 +44,7 @@ static int vbox_accel_init(struct vbox_private *vbox)
/* Take a command buffer for each screen from the end of usable VRAM. */
vbox->available_vram_size -= vbox->num_crtcs * VBVA_MIN_BUFFER_SIZE;
- vbox->vbva_buffers = pci_iomap_range(vbox->ddev.pdev, 0,
+ vbox->vbva_buffers = pci_iomap_range(pdev, 0,
vbox->available_vram_size,
vbox->num_crtcs *
VBVA_MIN_BUFFER_SIZE);
@@ -106,6 +109,7 @@ bool vbox_check_supported(u16 id)
int vbox_hw_init(struct vbox_private *vbox)
{
+ struct pci_dev *pdev = to_pci_dev(vbox->ddev.dev);
int ret = -ENOMEM;
vbox->full_vram_size = inl(VBE_DISPI_IOPORT_DATA);
@@ -115,7 +119,7 @@ int vbox_hw_init(struct vbox_private *vbox)
/* Map guest-heap at end of vram */
vbox->guest_heap =
- pci_iomap_range(vbox->ddev.pdev, 0, GUEST_HEAP_OFFSET(vbox),
+ pci_iomap_range(pdev, 0, GUEST_HEAP_OFFSET(vbox),
GUEST_HEAP_SIZE);
if (!vbox->guest_heap)
return -ENOMEM;
diff --git a/drivers/gpu/drm/vboxvideo/vbox_ttm.c b/drivers/gpu/drm/vboxvideo/vbox_ttm.c
index f5a06675da43..0066a3c1dfc9 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_ttm.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_ttm.c
@@ -15,8 +15,9 @@ int vbox_mm_init(struct vbox_private *vbox)
struct drm_vram_mm *vmm;
int ret;
struct drm_device *dev = &vbox->ddev;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
- vmm = drm_vram_helper_alloc_mm(dev, pci_resource_start(dev->pdev, 0),
+ vmm = drm_vram_helper_alloc_mm(dev, pci_resource_start(pdev, 0),
vbox->available_vram_size);
if (IS_ERR(vmm)) {
ret = PTR_ERR(vmm);
@@ -24,8 +25,8 @@ int vbox_mm_init(struct vbox_private *vbox)
return ret;
}
- vbox->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
- pci_resource_len(dev->pdev, 0));
+ vbox->fb_mtrr = arch_phys_wc_add(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
return 0;
}
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index 469d1b4f2643..fddaeb0b09c1 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -21,7 +21,7 @@
#include "vc4_drv.h"
#include "uapi/drm/vc4_drm.h"
-static vm_fault_t vc4_fault(struct vm_fault *vmf);
+static const struct drm_gem_object_funcs vc4_gem_object_funcs;
static const char * const bo_type_names[] = {
"kernel",
@@ -376,20 +376,6 @@ out:
return bo;
}
-static const struct vm_operations_struct vc4_vm_ops = {
- .fault = vc4_fault,
- .open = drm_gem_vm_open,
- .close = drm_gem_vm_close,
-};
-
-static const struct drm_gem_object_funcs vc4_gem_object_funcs = {
- .free = vc4_free_object,
- .export = vc4_prime_export,
- .get_sg_table = drm_gem_cma_prime_get_sg_table,
- .vmap = vc4_prime_vmap,
- .vm_ops = &vc4_vm_ops,
-};
-
/**
* vc4_create_object - Implementation of driver->gem_create_object.
* @dev: DRM device
@@ -538,7 +524,7 @@ static void vc4_bo_cache_free_old(struct drm_device *dev)
/* Called on the last userspace/kernel unreference of the BO. Returns
* it to the BO cache if possible, otherwise frees it.
*/
-void vc4_free_object(struct drm_gem_object *gem_bo)
+static void vc4_free_object(struct drm_gem_object *gem_bo)
{
struct drm_device *dev = gem_bo->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
@@ -673,7 +659,7 @@ static void vc4_bo_cache_time_timer(struct timer_list *t)
schedule_work(&vc4->bo_cache.time_work);
}
-struct dma_buf * vc4_prime_export(struct drm_gem_object *obj, int flags)
+static struct dma_buf *vc4_prime_export(struct drm_gem_object *obj, int flags)
{
struct vc4_bo *bo = to_vc4_bo(obj);
struct dma_buf *dmabuf;
@@ -718,19 +704,9 @@ static vm_fault_t vc4_fault(struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
}
-int vc4_mmap(struct file *filp, struct vm_area_struct *vma)
+static int vc4_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
{
- struct drm_gem_object *gem_obj;
- unsigned long vm_pgoff;
- struct vc4_bo *bo;
- int ret;
-
- ret = drm_gem_mmap(filp, vma);
- if (ret)
- return ret;
-
- gem_obj = vma->vm_private_data;
- bo = to_vc4_bo(gem_obj);
+ struct vc4_bo *bo = to_vc4_bo(obj);
if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) {
DRM_DEBUG("mmaping of shader BOs for writing not allowed.\n");
@@ -744,72 +720,23 @@ int vc4_mmap(struct file *filp, struct vm_area_struct *vma)
return -EINVAL;
}
- /*
- * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
- * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
- * the whole buffer.
- */
- vma->vm_flags &= ~VM_PFNMAP;
-
- /* This ->vm_pgoff dance is needed to make all parties happy:
- * - dma_mmap_wc() uses ->vm_pgoff as an offset within the allocated
- * mem-region, hence the need to set it to zero (the value set by
- * the DRM core is a virtual offset encoding the GEM object-id)
- * - the mmap() core logic needs ->vm_pgoff to be restored to its
- * initial value before returning from this function because it
- * encodes the offset of this GEM in the dev->anon_inode pseudo-file
- * and this information will be used when we invalidate userspace
- * mappings with drm_vma_node_unmap() (called from vc4_gem_purge()).
- */
- vm_pgoff = vma->vm_pgoff;
- vma->vm_pgoff = 0;
- ret = dma_mmap_wc(bo->base.base.dev->dev, vma, bo->base.vaddr,
- bo->base.paddr, vma->vm_end - vma->vm_start);
- vma->vm_pgoff = vm_pgoff;
-
- if (ret)
- drm_gem_vm_close(vma);
-
- return ret;
-}
-
-int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
-{
- struct vc4_bo *bo = to_vc4_bo(obj);
-
- if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) {
- DRM_DEBUG("mmaping of shader BOs for writing not allowed.\n");
- return -EINVAL;
- }
-
- return drm_gem_cma_prime_mmap(obj, vma);
-}
-
-int vc4_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
-{
- struct vc4_bo *bo = to_vc4_bo(obj);
-
- if (bo->validated_shader) {
- DRM_DEBUG("mmaping of shader BOs not allowed.\n");
- return -EINVAL;
- }
-
- return drm_gem_cma_prime_vmap(obj, map);
+ return drm_gem_cma_mmap(obj, vma);
}
-struct drm_gem_object *
-vc4_prime_import_sg_table(struct drm_device *dev,
- struct dma_buf_attachment *attach,
- struct sg_table *sgt)
-{
- struct drm_gem_object *obj;
-
- obj = drm_gem_cma_prime_import_sg_table(dev, attach, sgt);
- if (IS_ERR(obj))
- return obj;
+static const struct vm_operations_struct vc4_vm_ops = {
+ .fault = vc4_fault,
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
- return obj;
-}
+static const struct drm_gem_object_funcs vc4_gem_object_funcs = {
+ .free = vc4_free_object,
+ .export = vc4_prime_export,
+ .get_sg_table = drm_gem_cma_get_sg_table,
+ .vmap = drm_gem_cma_vmap,
+ .mmap = vc4_gem_object_mmap,
+ .vm_ops = &vc4_vm_ops,
+};
static int vc4_grab_bin_bo(struct vc4_dev *vc4, struct vc4_file *vc4file)
{
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index ea710beb8e00..269390bc586e 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -403,7 +403,9 @@ static void require_hvs_enabled(struct drm_device *dev)
SCALER_DISPCTRL_ENABLE);
}
-static int vc4_crtc_disable(struct drm_crtc *crtc, unsigned int channel)
+static int vc4_crtc_disable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state,
+ unsigned int channel)
{
struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc);
struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
@@ -435,13 +437,13 @@ static int vc4_crtc_disable(struct drm_crtc *crtc, unsigned int channel)
mdelay(20);
if (vc4_encoder && vc4_encoder->post_crtc_disable)
- vc4_encoder->post_crtc_disable(encoder);
+ vc4_encoder->post_crtc_disable(encoder, state);
vc4_crtc_pixelvalve_reset(crtc);
vc4_hvs_stop_channel(dev, channel);
if (vc4_encoder && vc4_encoder->post_crtc_powerdown)
- vc4_encoder->post_crtc_powerdown(encoder);
+ vc4_encoder->post_crtc_powerdown(encoder, state);
return 0;
}
@@ -468,7 +470,7 @@ int vc4_crtc_disable_at_boot(struct drm_crtc *crtc)
if (channel < 0)
return 0;
- return vc4_crtc_disable(crtc, channel);
+ return vc4_crtc_disable(crtc, NULL, channel);
}
static void vc4_crtc_atomic_disable(struct drm_crtc *crtc,
@@ -484,7 +486,7 @@ static void vc4_crtc_atomic_disable(struct drm_crtc *crtc,
/* Disable vblank irq handling before crtc is disabled. */
drm_crtc_vblank_off(crtc);
- vc4_crtc_disable(crtc, old_vc4_state->assigned_channel);
+ vc4_crtc_disable(crtc, state, old_vc4_state->assigned_channel);
/*
* Make sure we issue a vblank event after disabling the CRTC if
@@ -503,8 +505,6 @@ static void vc4_crtc_atomic_disable(struct drm_crtc *crtc,
static void vc4_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
- struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
- crtc);
struct drm_device *dev = crtc->dev;
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc);
@@ -517,17 +517,17 @@ static void vc4_crtc_atomic_enable(struct drm_crtc *crtc,
*/
drm_crtc_vblank_on(crtc);
- vc4_hvs_atomic_enable(crtc, old_state);
+ vc4_hvs_atomic_enable(crtc, state);
if (vc4_encoder->pre_crtc_configure)
- vc4_encoder->pre_crtc_configure(encoder);
+ vc4_encoder->pre_crtc_configure(encoder, state);
vc4_crtc_config_pv(crtc);
CRTC_WRITE(PV_CONTROL, CRTC_READ(PV_CONTROL) | PV_CONTROL_EN);
if (vc4_encoder->pre_crtc_enable)
- vc4_encoder->pre_crtc_enable(encoder);
+ vc4_encoder->pre_crtc_enable(encoder, state);
/* When feeding the transposer block the pixelvalve is unneeded and
* should not be enabled.
@@ -536,7 +536,7 @@ static void vc4_crtc_atomic_enable(struct drm_crtc *crtc,
CRTC_READ(PV_V_CONTROL) | PV_VCONTROL_VIDEN);
if (vc4_encoder->post_crtc_enable)
- vc4_encoder->post_crtc_enable(encoder);
+ vc4_encoder->post_crtc_enable(encoder, state);
}
static enum drm_mode_status vc4_crtc_mode_valid(struct drm_crtc *crtc,
@@ -593,7 +593,7 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_connector_state *conn_state;
int ret, i;
- ret = vc4_hvs_atomic_check(crtc, crtc_state);
+ ret = vc4_hvs_atomic_check(crtc, state);
if (ret)
return ret;
@@ -697,7 +697,6 @@ vc4_async_page_flip_complete(struct vc4_seqno_cb *cb)
container_of(cb, struct vc4_async_flip_state, cb);
struct drm_crtc *crtc = flip_state->crtc;
struct drm_device *dev = crtc->dev;
- struct vc4_dev *vc4 = to_vc4_dev(dev);
struct drm_plane *plane = crtc->primary;
vc4_plane_async_set_fb(plane, flip_state->fb);
@@ -729,8 +728,6 @@ vc4_async_page_flip_complete(struct vc4_seqno_cb *cb)
}
kfree(flip_state);
-
- up(&vc4->async_modeset);
}
/* Implements async (non-vblank-synced) page flips.
@@ -745,7 +742,6 @@ static int vc4_async_page_flip(struct drm_crtc *crtc,
uint32_t flags)
{
struct drm_device *dev = crtc->dev;
- struct vc4_dev *vc4 = to_vc4_dev(dev);
struct drm_plane *plane = crtc->primary;
int ret = 0;
struct vc4_async_flip_state *flip_state;
@@ -774,15 +770,6 @@ static int vc4_async_page_flip(struct drm_crtc *crtc,
flip_state->crtc = crtc;
flip_state->event = event;
- /* Make sure all other async modesetes have landed. */
- ret = down_interruptible(&vc4->async_modeset);
- if (ret) {
- drm_framebuffer_put(fb);
- vc4_bo_dec_usecnt(bo);
- kfree(flip_state);
- return ret;
- }
-
/* Save the current FB before it's replaced by the new one in
* drm_atomic_set_fb_for_plane(). We'll need the old FB in
* vc4_async_page_flip_complete() to decrement the BO usecnt and keep
@@ -884,7 +871,6 @@ static const struct drm_crtc_funcs vc4_crtc_funcs = {
.reset = vc4_crtc_reset,
.atomic_duplicate_state = vc4_crtc_duplicate_state,
.atomic_destroy_state = vc4_crtc_destroy_state,
- .gamma_set = drm_atomic_helper_legacy_gamma_set,
.enable_vblank = vc4_enable_vblank,
.disable_vblank = vc4_disable_vblank,
.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 2cd97a39c286..556ad0f02a0d 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -140,17 +140,7 @@ static void vc4_close(struct drm_device *dev, struct drm_file *file)
kfree(vc4file);
}
-static const struct file_operations vc4_drm_fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = vc4_mmap,
- .poll = drm_poll,
- .read = drm_read,
- .compat_ioctl = drm_compat_ioctl,
- .llseek = noop_llseek,
-};
+DEFINE_DRM_GEM_FOPS(vc4_drm_fops);
static const struct drm_ioctl_desc vc4_drm_ioctls[] = {
DRM_IOCTL_DEF_DRV(VC4_SUBMIT_CL, vc4_submit_cl_ioctl, DRM_RENDER_ALLOW),
@@ -190,12 +180,7 @@ static struct drm_driver vc4_drm_driver = {
.gem_create_object = vc4_create_object,
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_import_sg_table = vc4_prime_import_sg_table,
- .gem_prime_mmap = vc4_prime_mmap,
-
- .dumb_create = vc4_dumb_create,
+ DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(vc4_dumb_create),
.ioctls = vc4_drm_ioctls,
.num_ioctls = ARRAY_SIZE(vc4_drm_ioctls),
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 43a1af110b3e..a7500716cf3f 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -77,7 +77,6 @@ struct vc4_dev {
struct vc4_hvs *hvs;
struct vc4_v3d *v3d;
struct vc4_dpi *dpi;
- struct vc4_dsi *dsi1;
struct vc4_vec *vec;
struct vc4_txp *txp;
@@ -215,8 +214,6 @@ struct vc4_dev {
struct work_struct reset_work;
} hangcheck;
- struct semaphore async_modeset;
-
struct drm_modeset_lock ctm_state_lock;
struct drm_private_obj ctm_manager;
struct drm_private_obj hvs_channels;
@@ -444,12 +441,12 @@ struct vc4_encoder {
enum vc4_encoder_type type;
u32 clock_select;
- void (*pre_crtc_configure)(struct drm_encoder *encoder);
- void (*pre_crtc_enable)(struct drm_encoder *encoder);
- void (*post_crtc_enable)(struct drm_encoder *encoder);
+ void (*pre_crtc_configure)(struct drm_encoder *encoder, struct drm_atomic_state *state);
+ void (*pre_crtc_enable)(struct drm_encoder *encoder, struct drm_atomic_state *state);
+ void (*post_crtc_enable)(struct drm_encoder *encoder, struct drm_atomic_state *state);
- void (*post_crtc_disable)(struct drm_encoder *encoder);
- void (*post_crtc_powerdown)(struct drm_encoder *encoder);
+ void (*post_crtc_disable)(struct drm_encoder *encoder, struct drm_atomic_state *state);
+ void (*post_crtc_powerdown)(struct drm_encoder *encoder, struct drm_atomic_state *state);
};
static inline struct vc4_encoder *
@@ -785,13 +782,11 @@ struct vc4_validated_shader_info {
/* vc4_bo.c */
struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size);
-void vc4_free_object(struct drm_gem_object *gem_obj);
struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t size,
bool from_cache, enum vc4_kernel_bo_type type);
int vc4_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
-struct dma_buf *vc4_prime_export(struct drm_gem_object *obj, int flags);
int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
@@ -806,12 +801,6 @@ int vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int vc4_label_bo_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-int vc4_mmap(struct file *filp, struct vm_area_struct *vma);
-int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
-struct drm_gem_object *vc4_prime_import_sg_table(struct drm_device *dev,
- struct dma_buf_attachment *attach,
- struct sg_table *sgt);
-int vc4_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map);
int vc4_bo_cache_init(struct drm_device *dev);
int vc4_bo_inc_usecnt(struct vc4_bo *bo);
void vc4_bo_dec_usecnt(struct vc4_bo *bo);
@@ -916,11 +905,10 @@ void vc4_irq_reset(struct drm_device *dev);
extern struct platform_driver vc4_hvs_driver;
void vc4_hvs_stop_channel(struct drm_device *dev, unsigned int output);
int vc4_hvs_get_fifo_from_output(struct drm_device *dev, unsigned int output);
-int vc4_hvs_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state);
-void vc4_hvs_atomic_enable(struct drm_crtc *crtc, struct drm_crtc_state *old_state);
-void vc4_hvs_atomic_disable(struct drm_crtc *crtc, struct drm_crtc_state *old_state);
-void vc4_hvs_atomic_flush(struct drm_crtc *crtc,
- struct drm_atomic_state *state);
+int vc4_hvs_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state);
+void vc4_hvs_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state);
+void vc4_hvs_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *state);
+void vc4_hvs_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state);
void vc4_hvs_dump_state(struct drm_device *dev);
void vc4_hvs_unmask_underrun(struct drm_device *dev, int channel);
void vc4_hvs_mask_underrun(struct drm_device *dev, int channel);
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
index 19aab4e7e209..a55256ed0955 100644
--- a/drivers/gpu/drm/vc4/vc4_dsi.c
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -306,11 +306,11 @@
# define DSI0_PHY_AFEC0_RESET BIT(11)
# define DSI1_PHY_AFEC0_PD_BG BIT(11)
# define DSI0_PHY_AFEC0_PD BIT(10)
-# define DSI1_PHY_AFEC0_PD_DLANE3 BIT(10)
+# define DSI1_PHY_AFEC0_PD_DLANE1 BIT(10)
# define DSI0_PHY_AFEC0_PD_BG BIT(9)
# define DSI1_PHY_AFEC0_PD_DLANE2 BIT(9)
# define DSI0_PHY_AFEC0_PD_DLANE1 BIT(8)
-# define DSI1_PHY_AFEC0_PD_DLANE1 BIT(8)
+# define DSI1_PHY_AFEC0_PD_DLANE3 BIT(8)
# define DSI_PHY_AFEC0_PTATADJ_MASK VC4_MASK(7, 4)
# define DSI_PHY_AFEC0_PTATADJ_SHIFT 4
# define DSI_PHY_AFEC0_CTATADJ_MASK VC4_MASK(3, 0)
@@ -493,6 +493,18 @@
*/
#define DSI1_ID 0x8c
+struct vc4_dsi_variant {
+ /* Whether we're on bcm2835's DSI0 or DSI1. */
+ unsigned int port;
+
+ bool broken_axi_workaround;
+
+ const char *debugfs_name;
+ const struct debugfs_reg32 *regs;
+ size_t nregs;
+
+};
+
/* General DSI hardware state. */
struct vc4_dsi {
struct platform_device *pdev;
@@ -509,8 +521,7 @@ struct vc4_dsi {
u32 *reg_dma_mem;
dma_addr_t reg_paddr;
- /* Whether we're on bcm2835's DSI0 or DSI1. */
- int port;
+ const struct vc4_dsi_variant *variant;
/* DSI channel for the panel we're connected to. */
u32 channel;
@@ -586,10 +597,10 @@ dsi_dma_workaround_write(struct vc4_dsi *dsi, u32 offset, u32 val)
#define DSI_READ(offset) readl(dsi->regs + (offset))
#define DSI_WRITE(offset, val) dsi_dma_workaround_write(dsi, offset, val)
#define DSI_PORT_READ(offset) \
- DSI_READ(dsi->port ? DSI1_##offset : DSI0_##offset)
+ DSI_READ(dsi->variant->port ? DSI1_##offset : DSI0_##offset)
#define DSI_PORT_WRITE(offset, val) \
- DSI_WRITE(dsi->port ? DSI1_##offset : DSI0_##offset, val)
-#define DSI_PORT_BIT(bit) (dsi->port ? DSI1_##bit : DSI0_##bit)
+ DSI_WRITE(dsi->variant->port ? DSI1_##offset : DSI0_##offset, val)
+#define DSI_PORT_BIT(bit) (dsi->variant->port ? DSI1_##bit : DSI0_##bit)
/* VC4 DSI encoder KMS struct */
struct vc4_dsi_encoder {
@@ -837,7 +848,7 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
ret = pm_runtime_get_sync(dev);
if (ret) {
- DRM_ERROR("Failed to runtime PM enable on DSI%d\n", dsi->port);
+ DRM_ERROR("Failed to runtime PM enable on DSI%d\n", dsi->variant->port);
return;
}
@@ -871,7 +882,7 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
DSI_PORT_WRITE(STAT, DSI_PORT_READ(STAT));
/* Set AFE CTR00/CTR1 to release powerdown of analog. */
- if (dsi->port == 0) {
+ if (dsi->variant->port == 0) {
u32 afec0 = (VC4_SET_FIELD(7, DSI_PHY_AFEC0_PTATADJ) |
VC4_SET_FIELD(7, DSI_PHY_AFEC0_CTATADJ));
@@ -1017,7 +1028,7 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
DSI_PORT_BIT(PHYC_CLANE_ENABLE) |
((dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) ?
0 : DSI_PORT_BIT(PHYC_HS_CLK_CONTINUOUS)) |
- (dsi->port == 0 ?
+ (dsi->variant->port == 0 ?
VC4_SET_FIELD(lpx - 1, DSI0_PHYC_ESC_CLK_LPDT) :
VC4_SET_FIELD(lpx - 1, DSI1_PHYC_ESC_CLK_LPDT)));
@@ -1043,13 +1054,13 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
DSI_DISP1_ENABLE);
/* Ungate the block. */
- if (dsi->port == 0)
+ if (dsi->variant->port == 0)
DSI_PORT_WRITE(CTRL, DSI_PORT_READ(CTRL) | DSI0_CTRL_CTRL0);
else
DSI_PORT_WRITE(CTRL, DSI_PORT_READ(CTRL) | DSI1_CTRL_EN);
/* Bring AFE out of reset. */
- if (dsi->port == 0) {
+ if (dsi->variant->port == 0) {
} else {
DSI_PORT_WRITE(PHY_AFEC0,
DSI_PORT_READ(PHY_AFEC0) &
@@ -1313,8 +1324,32 @@ static const struct drm_encoder_helper_funcs vc4_dsi_encoder_helper_funcs = {
.mode_fixup = vc4_dsi_encoder_mode_fixup,
};
+static const struct vc4_dsi_variant bcm2711_dsi1_variant = {
+ .port = 1,
+ .debugfs_name = "dsi1_regs",
+ .regs = dsi1_regs,
+ .nregs = ARRAY_SIZE(dsi1_regs),
+};
+
+static const struct vc4_dsi_variant bcm2835_dsi0_variant = {
+ .port = 0,
+ .debugfs_name = "dsi0_regs",
+ .regs = dsi0_regs,
+ .nregs = ARRAY_SIZE(dsi0_regs),
+};
+
+static const struct vc4_dsi_variant bcm2835_dsi1_variant = {
+ .port = 1,
+ .broken_axi_workaround = true,
+ .debugfs_name = "dsi1_regs",
+ .regs = dsi1_regs,
+ .nregs = ARRAY_SIZE(dsi1_regs),
+};
+
static const struct of_device_id vc4_dsi_dt_match[] = {
- { .compatible = "brcm,bcm2835-dsi1", (void *)(uintptr_t)1 },
+ { .compatible = "brcm,bcm2711-dsi1", &bcm2711_dsi1_variant },
+ { .compatible = "brcm,bcm2835-dsi0", &bcm2835_dsi0_variant },
+ { .compatible = "brcm,bcm2835-dsi1", &bcm2835_dsi1_variant },
{}
};
@@ -1325,7 +1360,7 @@ static void dsi_handle_error(struct vc4_dsi *dsi,
if (!(stat & bit))
return;
- DRM_ERROR("DSI%d: %s error\n", dsi->port, type);
+ DRM_ERROR("DSI%d: %s error\n", dsi->variant->port, type);
*ret = IRQ_HANDLED;
}
@@ -1398,12 +1433,12 @@ vc4_dsi_init_phy_clocks(struct vc4_dsi *dsi)
struct device *dev = &dsi->pdev->dev;
const char *parent_name = __clk_get_name(dsi->pll_phy_clock);
static const struct {
- const char *dsi0_name, *dsi1_name;
+ const char *name;
int div;
} phy_clocks[] = {
- { "dsi0_byte", "dsi1_byte", 8 },
- { "dsi0_ddr2", "dsi1_ddr2", 4 },
- { "dsi0_ddr", "dsi1_ddr", 2 },
+ { "byte", 8 },
+ { "ddr2", 4 },
+ { "ddr", 2 },
};
int i;
@@ -1419,8 +1454,12 @@ vc4_dsi_init_phy_clocks(struct vc4_dsi *dsi)
for (i = 0; i < ARRAY_SIZE(phy_clocks); i++) {
struct clk_fixed_factor *fix = &dsi->phy_clocks[i];
struct clk_init_data init;
+ char clk_name[16];
int ret;
+ snprintf(clk_name, sizeof(clk_name),
+ "dsi%u_%s", dsi->variant->port, phy_clocks[i].name);
+
/* We just use core fixed factor clock ops for the PHY
* clocks. The clocks are actually gated by the
* PHY_AFEC0_DDRCLK_EN bits, which we should be
@@ -1437,10 +1476,7 @@ vc4_dsi_init_phy_clocks(struct vc4_dsi *dsi)
memset(&init, 0, sizeof(init));
init.parent_names = &parent_name;
init.num_parents = 1;
- if (dsi->port == 1)
- init.name = phy_clocks[i].dsi1_name;
- else
- init.name = phy_clocks[i].dsi0_name;
+ init.name = clk_name;
init.ops = &clk_fixed_factor_ops;
ret = devm_clk_hw_register(dev, &fix->hw);
@@ -1459,7 +1495,6 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = dev_get_drvdata(master);
- struct vc4_dev *vc4 = to_vc4_dev(drm);
struct vc4_dsi *dsi = dev_get_drvdata(dev);
struct vc4_dsi_encoder *vc4_dsi_encoder;
struct drm_panel *panel;
@@ -1471,7 +1506,7 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
if (!match)
return -ENODEV;
- dsi->port = (uintptr_t)match->data;
+ dsi->variant = match->data;
vc4_dsi_encoder = devm_kzalloc(dev, sizeof(*vc4_dsi_encoder),
GFP_KERNEL);
@@ -1488,13 +1523,8 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
return PTR_ERR(dsi->regs);
dsi->regset.base = dsi->regs;
- if (dsi->port == 0) {
- dsi->regset.regs = dsi0_regs;
- dsi->regset.nregs = ARRAY_SIZE(dsi0_regs);
- } else {
- dsi->regset.regs = dsi1_regs;
- dsi->regset.nregs = ARRAY_SIZE(dsi1_regs);
- }
+ dsi->regset.regs = dsi->variant->regs;
+ dsi->regset.nregs = dsi->variant->nregs;
if (DSI_PORT_READ(ID) != DSI_ID_VALUE) {
dev_err(dev, "Port returned 0x%08x for ID instead of 0x%08x\n",
@@ -1502,11 +1532,11 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
return -ENODEV;
}
- /* DSI1 has a broken AXI slave that doesn't respond to writes
- * from the ARM. It does handle writes from the DMA engine,
+ /* DSI1 on BCM2835/6/7 has a broken AXI slave that doesn't respond to
+ * writes from the ARM. It does handle writes from the DMA engine,
* so set up a channel for talking to it.
*/
- if (dsi->port == 1) {
+ if (dsi->variant->broken_axi_workaround) {
dsi->reg_dma_mem = dma_alloc_coherent(dev, 4,
&dsi->reg_dma_paddr,
GFP_KERNEL);
@@ -1612,9 +1642,6 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
if (ret)
return ret;
- if (dsi->port == 1)
- vc4->dsi1 = dsi;
-
drm_simple_encoder_init(drm, dsi->encoder, DRM_MODE_ENCODER_DSI);
drm_encoder_helper_add(dsi->encoder, &vc4_dsi_encoder_helper_funcs);
@@ -1630,10 +1657,7 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
*/
list_splice_init(&dsi->encoder->bridge_chain, &dsi->bridge_chain);
- if (dsi->port == 0)
- vc4_debugfs_add_regset32(drm, "dsi0_regs", &dsi->regset);
- else
- vc4_debugfs_add_regset32(drm, "dsi1_regs", &dsi->regset);
+ vc4_debugfs_add_regset32(drm, dsi->variant->debugfs_name, &dsi->regset);
pm_runtime_enable(dev);
@@ -1643,8 +1667,6 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
static void vc4_dsi_unbind(struct device *dev, struct device *master,
void *data)
{
- struct drm_device *drm = dev_get_drvdata(master);
- struct vc4_dev *vc4 = to_vc4_dev(drm);
struct vc4_dsi *dsi = dev_get_drvdata(dev);
if (dsi->bridge)
@@ -1656,9 +1678,6 @@ static void vc4_dsi_unbind(struct device *dev, struct device *master,
*/
list_splice_init(&dsi->bridge_chain, &dsi->encoder->bridge_chain);
drm_encoder_cleanup(dsi->encoder);
-
- if (dsi->port == 1)
- vc4->dsi1 = NULL;
}
static const struct component_ops vc4_dsi_ops = {
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index b641252939d8..445d3bab89e0 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -1026,7 +1026,6 @@ int vc4_queue_seqno_cb(struct drm_device *dev,
void (*func)(struct vc4_seqno_cb *cb))
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
- int ret = 0;
unsigned long irqflags;
cb->func = func;
@@ -1041,7 +1040,7 @@ int vc4_queue_seqno_cb(struct drm_device *dev,
}
spin_unlock_irqrestore(&vc4->job_lock, irqflags);
- return ret;
+ return 0;
}
/* Scheduled when any job has been completed, this walks the list of
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 98cab0bbe92d..1fda574579af 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -76,12 +76,25 @@
#define VC5_HDMI_VERTB_VSPO_SHIFT 16
#define VC5_HDMI_VERTB_VSPO_MASK VC4_MASK(29, 16)
+#define VC5_HDMI_DEEP_COLOR_CONFIG_1_INIT_PACK_PHASE_SHIFT 8
+#define VC5_HDMI_DEEP_COLOR_CONFIG_1_INIT_PACK_PHASE_MASK VC4_MASK(10, 8)
+
+#define VC5_HDMI_DEEP_COLOR_CONFIG_1_COLOR_DEPTH_SHIFT 0
+#define VC5_HDMI_DEEP_COLOR_CONFIG_1_COLOR_DEPTH_MASK VC4_MASK(3, 0)
+
+#define VC5_HDMI_GCP_CONFIG_GCP_ENABLE BIT(31)
+
+#define VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_1_SHIFT 8
+#define VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_1_MASK VC4_MASK(15, 8)
+
# define VC4_HD_M_SW_RST BIT(2)
# define VC4_HD_M_ENABLE BIT(0)
#define CEC_CLOCK_FREQ 40000
#define VC4_HSM_MID_CLOCK 149985000
+#define HDMI_14_MAX_TMDS_CLK (340 * 1000 * 1000)
+
static int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
@@ -119,24 +132,57 @@ static void vc5_hdmi_reset(struct vc4_hdmi *vc4_hdmi)
HDMI_READ(HDMI_CLOCK_STOP) | VC4_DVP_HT_CLOCK_STOP_PIXEL);
}
+#ifdef CONFIG_DRM_VC4_HDMI_CEC
+static void vc4_hdmi_cec_update_clk_div(struct vc4_hdmi *vc4_hdmi)
+{
+ u16 clk_cnt;
+ u32 value;
+
+ value = HDMI_READ(HDMI_CEC_CNTRL_1);
+ value &= ~VC4_HDMI_CEC_DIV_CLK_CNT_MASK;
+
+ /*
+ * Set the clock divider: the hsm_clock rate and this divider
+ * setting will give a 40 kHz CEC clock.
+ */
+ clk_cnt = clk_get_rate(vc4_hdmi->cec_clock) / CEC_CLOCK_FREQ;
+ value |= clk_cnt << VC4_HDMI_CEC_DIV_CLK_CNT_SHIFT;
+ HDMI_WRITE(HDMI_CEC_CNTRL_1, value);
+}
+#else
+static void vc4_hdmi_cec_update_clk_div(struct vc4_hdmi *vc4_hdmi) {}
+#endif
+
static enum drm_connector_status
vc4_hdmi_connector_detect(struct drm_connector *connector, bool force)
{
struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
+ bool connected = false;
if (vc4_hdmi->hpd_gpio) {
if (gpio_get_value_cansleep(vc4_hdmi->hpd_gpio) ^
vc4_hdmi->hpd_active_low)
- return connector_status_connected;
- cec_phys_addr_invalidate(vc4_hdmi->cec_adap);
- return connector_status_disconnected;
+ connected = true;
+ } else if (drm_probe_ddc(vc4_hdmi->ddc)) {
+ connected = true;
+ } else if (HDMI_READ(HDMI_HOTPLUG) & VC4_HDMI_HOTPLUG_CONNECTED) {
+ connected = true;
}
- if (drm_probe_ddc(vc4_hdmi->ddc))
- return connector_status_connected;
+ if (connected) {
+ if (connector->status != connector_status_connected) {
+ struct edid *edid = drm_get_edid(connector, vc4_hdmi->ddc);
+
+ if (edid) {
+ cec_s_phys_addr_from_edid(vc4_hdmi->cec_adap, edid);
+ vc4_hdmi->encoder.hdmi_monitor = drm_detect_hdmi_monitor(edid);
+ kfree(edid);
+ }
+ }
- if (HDMI_READ(HDMI_HOTPLUG) & VC4_HDMI_HOTPLUG_CONNECTED)
return connector_status_connected;
+ }
+
cec_phys_addr_invalidate(vc4_hdmi->cec_adap);
return connector_status_disconnected;
}
@@ -170,16 +216,48 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector)
static void vc4_hdmi_connector_reset(struct drm_connector *connector)
{
- drm_atomic_helper_connector_reset(connector);
+ struct vc4_hdmi_connector_state *old_state =
+ conn_state_to_vc4_hdmi_conn_state(connector->state);
+ struct vc4_hdmi_connector_state *new_state =
+ kzalloc(sizeof(*new_state), GFP_KERNEL);
+
+ if (connector->state)
+ __drm_atomic_helper_connector_destroy_state(connector->state);
+
+ kfree(old_state);
+ __drm_atomic_helper_connector_reset(connector, &new_state->base);
+
+ if (!new_state)
+ return;
+
+ new_state->base.max_bpc = 8;
+ new_state->base.max_requested_bpc = 8;
drm_atomic_helper_connector_tv_reset(connector);
}
+static struct drm_connector_state *
+vc4_hdmi_connector_duplicate_state(struct drm_connector *connector)
+{
+ struct drm_connector_state *conn_state = connector->state;
+ struct vc4_hdmi_connector_state *vc4_state = conn_state_to_vc4_hdmi_conn_state(conn_state);
+ struct vc4_hdmi_connector_state *new_state;
+
+ new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
+ if (!new_state)
+ return NULL;
+
+ new_state->pixel_rate = vc4_state->pixel_rate;
+ __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
+
+ return &new_state->base;
+}
+
static const struct drm_connector_funcs vc4_hdmi_connector_funcs = {
.detect = vc4_hdmi_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = vc4_hdmi_connector_destroy,
.reset = vc4_hdmi_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_duplicate_state = vc4_hdmi_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
@@ -200,12 +278,20 @@ static int vc4_hdmi_connector_init(struct drm_device *dev,
vc4_hdmi->ddc);
drm_connector_helper_add(connector, &vc4_hdmi_connector_helper_funcs);
+ /*
+ * Some of the properties below require access to state, like bpc.
+ * Allocate some default initial connector state with our reset helper.
+ */
+ if (connector->funcs->reset)
+ connector->funcs->reset(connector);
+
/* Create and attach TV margin props to this connector. */
ret = drm_mode_create_tv_margin_properties(dev);
if (ret)
return ret;
drm_connector_attach_tv_margin_properties(connector);
+ drm_connector_attach_max_bpc_property(connector, 8, 12);
connector->polled = (DRM_CONNECTOR_POLL_CONNECT |
DRM_CONNECTOR_POLL_DISCONNECT);
@@ -219,7 +305,8 @@ static int vc4_hdmi_connector_init(struct drm_device *dev,
}
static int vc4_hdmi_stop_packet(struct drm_encoder *encoder,
- enum hdmi_infoframe_type type)
+ enum hdmi_infoframe_type type,
+ bool poll)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
u32 packet_id = type - 0x80;
@@ -227,6 +314,9 @@ static int vc4_hdmi_stop_packet(struct drm_encoder *encoder,
HDMI_WRITE(HDMI_RAM_PACKET_CONFIG,
HDMI_READ(HDMI_RAM_PACKET_CONFIG) & ~BIT(packet_id));
+ if (!poll)
+ return 0;
+
return wait_for(!(HDMI_READ(HDMI_RAM_PACKET_STATUS) &
BIT(packet_id)), 100);
}
@@ -253,7 +343,7 @@ static void vc4_hdmi_write_infoframe(struct drm_encoder *encoder,
if (len < 0)
return;
- ret = vc4_hdmi_stop_packet(encoder, frame->any.type);
+ ret = vc4_hdmi_stop_packet(encoder, frame->any.type, true);
if (ret) {
DRM_ERROR("Failed to wait for infoframe to go idle: %d\n", ret);
return;
@@ -356,7 +446,8 @@ static void vc4_hdmi_set_infoframes(struct drm_encoder *encoder)
vc4_hdmi_set_audio_infoframe(encoder);
}
-static void vc4_hdmi_encoder_post_crtc_disable(struct drm_encoder *encoder)
+static void vc4_hdmi_encoder_post_crtc_disable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
@@ -369,7 +460,8 @@ static void vc4_hdmi_encoder_post_crtc_disable(struct drm_encoder *encoder)
HDMI_READ(HDMI_VID_CTL) | VC4_HD_VID_CTL_BLANKPIX);
}
-static void vc4_hdmi_encoder_post_crtc_powerdown(struct drm_encoder *encoder)
+static void vc4_hdmi_encoder_post_crtc_powerdown(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
int ret;
@@ -468,6 +560,7 @@ static void vc5_hdmi_csc_setup(struct vc4_hdmi *vc4_hdmi, bool enable)
}
static void vc4_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
+ struct drm_connector_state *state,
struct drm_display_mode *mode)
{
bool hsync_pos = mode->flags & DRM_MODE_FLAG_PHSYNC;
@@ -511,7 +604,9 @@ static void vc4_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
HDMI_WRITE(HDMI_VERTB0, vertb_even);
HDMI_WRITE(HDMI_VERTB1, vertb);
}
+
static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
+ struct drm_connector_state *state,
struct drm_display_mode *mode)
{
bool hsync_pos = mode->flags & DRM_MODE_FLAG_PHSYNC;
@@ -531,6 +626,9 @@ static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
mode->crtc_vsync_end -
interlaced,
VC4_HDMI_VERTB_VBP));
+ unsigned char gcp;
+ bool gcp_en;
+ u32 reg;
HDMI_WRITE(HDMI_VEC_INTERFACE_XBAR, 0x354021);
HDMI_WRITE(HDMI_HORZA,
@@ -556,6 +654,39 @@ static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
HDMI_WRITE(HDMI_VERTB0, vertb_even);
HDMI_WRITE(HDMI_VERTB1, vertb);
+ switch (state->max_bpc) {
+ case 12:
+ gcp = 6;
+ gcp_en = true;
+ break;
+ case 10:
+ gcp = 5;
+ gcp_en = true;
+ break;
+ case 8:
+ default:
+ gcp = 4;
+ gcp_en = false;
+ break;
+ }
+
+ reg = HDMI_READ(HDMI_DEEP_COLOR_CONFIG_1);
+ reg &= ~(VC5_HDMI_DEEP_COLOR_CONFIG_1_INIT_PACK_PHASE_MASK |
+ VC5_HDMI_DEEP_COLOR_CONFIG_1_COLOR_DEPTH_MASK);
+ reg |= VC4_SET_FIELD(2, VC5_HDMI_DEEP_COLOR_CONFIG_1_INIT_PACK_PHASE) |
+ VC4_SET_FIELD(gcp, VC5_HDMI_DEEP_COLOR_CONFIG_1_COLOR_DEPTH);
+ HDMI_WRITE(HDMI_DEEP_COLOR_CONFIG_1, reg);
+
+ reg = HDMI_READ(HDMI_GCP_WORD_1);
+ reg &= ~VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_1_MASK;
+ reg |= VC4_SET_FIELD(gcp, VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_1);
+ HDMI_WRITE(HDMI_GCP_WORD_1, reg);
+
+ reg = HDMI_READ(HDMI_GCP_CONFIG);
+ reg &= ~VC5_HDMI_GCP_CONFIG_GCP_ENABLE;
+ reg |= gcp_en ? VC5_HDMI_GCP_CONFIG_GCP_ENABLE : 0;
+ HDMI_WRITE(HDMI_GCP_CONFIG, reg);
+
HDMI_WRITE(HDMI_CLOCK_STOP, 0);
}
@@ -583,8 +714,29 @@ static void vc4_hdmi_recenter_fifo(struct vc4_hdmi *vc4_hdmi)
"VC4_HDMI_FIFO_CTL_RECENTER_DONE");
}
-static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder)
+static struct drm_connector_state *
+vc4_hdmi_encoder_get_connector_state(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
{
+ struct drm_connector_state *conn_state;
+ struct drm_connector *connector;
+ unsigned int i;
+
+ for_each_new_connector_in_state(state, connector, conn_state, i) {
+ if (conn_state->best_encoder == encoder)
+ return conn_state;
+ }
+
+ return NULL;
+}
+
+static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
+{
+ struct drm_connector_state *conn_state =
+ vc4_hdmi_encoder_get_connector_state(encoder, state);
+ struct vc4_hdmi_connector_state *vc4_conn_state =
+ conn_state_to_vc4_hdmi_conn_state(conn_state);
struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
unsigned long pixel_rate, hsm_rate;
@@ -596,7 +748,7 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder)
return;
}
- pixel_rate = mode->clock * 1000 * ((mode->flags & DRM_MODE_FLAG_DBLCLK) ? 2 : 1);
+ pixel_rate = vc4_conn_state->pixel_rate;
ret = clk_set_rate(vc4_hdmi->pixel_clock, pixel_rate);
if (ret) {
DRM_ERROR("Failed to set pixel clock rate: %d\n", ret);
@@ -639,6 +791,8 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder)
return;
}
+ vc4_hdmi_cec_update_clk_div(vc4_hdmi);
+
/*
* FIXME: When the pixel freq is 594MHz (4k60), this needs to be setup
* at 300MHz.
@@ -660,11 +814,8 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder)
return;
}
- if (vc4_hdmi->variant->reset)
- vc4_hdmi->variant->reset(vc4_hdmi);
-
if (vc4_hdmi->variant->phy_init)
- vc4_hdmi->variant->phy_init(vc4_hdmi, mode);
+ vc4_hdmi->variant->phy_init(vc4_hdmi, vc4_conn_state);
HDMI_WRITE(HDMI_SCHEDULER_CONTROL,
HDMI_READ(HDMI_SCHEDULER_CONTROL) |
@@ -672,10 +823,11 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder)
VC4_HDMI_SCHEDULER_CONTROL_IGNORE_VSYNC_PREDICTS);
if (vc4_hdmi->variant->set_timings)
- vc4_hdmi->variant->set_timings(vc4_hdmi, mode);
+ vc4_hdmi->variant->set_timings(vc4_hdmi, conn_state, mode);
}
-static void vc4_hdmi_encoder_pre_crtc_enable(struct drm_encoder *encoder)
+static void vc4_hdmi_encoder_pre_crtc_enable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
{
struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder);
@@ -697,7 +849,8 @@ static void vc4_hdmi_encoder_pre_crtc_enable(struct drm_encoder *encoder)
HDMI_WRITE(HDMI_FIFO_CTL, VC4_HDMI_FIFO_CTL_MASTER_SLAVE_N);
}
-static void vc4_hdmi_encoder_post_crtc_enable(struct drm_encoder *encoder)
+static void vc4_hdmi_encoder_post_crtc_enable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
{
struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
@@ -766,6 +919,7 @@ static int vc4_hdmi_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
+ struct vc4_hdmi_connector_state *vc4_state = conn_state_to_vc4_hdmi_conn_state(conn_state);
struct drm_display_mode *mode = &crtc_state->adjusted_mode;
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
unsigned long long pixel_rate = mode->clock * 1000;
@@ -790,9 +944,22 @@ static int vc4_hdmi_encoder_atomic_check(struct drm_encoder *encoder,
pixel_rate = mode->clock * 1000;
}
+ if (conn_state->max_bpc == 12) {
+ pixel_rate = pixel_rate * 150;
+ do_div(pixel_rate, 100);
+ } else if (conn_state->max_bpc == 10) {
+ pixel_rate = pixel_rate * 125;
+ do_div(pixel_rate, 100);
+ }
+
+ if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+ pixel_rate = pixel_rate * 2;
+
if (pixel_rate > vc4_hdmi->variant->max_pixel_clock)
return -EINVAL;
+ vc4_state->pixel_rate = pixel_rate;
+
return 0;
}
@@ -936,7 +1103,7 @@ static void vc4_hdmi_audio_reset(struct vc4_hdmi *vc4_hdmi)
int ret;
vc4_hdmi->audio.streaming = false;
- ret = vc4_hdmi_stop_packet(encoder, HDMI_INFOFRAME_TYPE_AUDIO);
+ ret = vc4_hdmi_stop_packet(encoder, HDMI_INFOFRAME_TYPE_AUDIO, false);
if (ret)
dev_err(dev, "Failed to stop audio infoframe: %d\n", ret);
@@ -1288,15 +1455,22 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
}
#ifdef CONFIG_DRM_VC4_HDMI_CEC
-static irqreturn_t vc4_cec_irq_handler_thread(int irq, void *priv)
+static irqreturn_t vc4_cec_irq_handler_rx_thread(int irq, void *priv)
+{
+ struct vc4_hdmi *vc4_hdmi = priv;
+
+ if (vc4_hdmi->cec_rx_msg.len)
+ cec_received_msg(vc4_hdmi->cec_adap,
+ &vc4_hdmi->cec_rx_msg);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t vc4_cec_irq_handler_tx_thread(int irq, void *priv)
{
struct vc4_hdmi *vc4_hdmi = priv;
- if (vc4_hdmi->cec_irq_was_rx) {
- if (vc4_hdmi->cec_rx_msg.len)
- cec_received_msg(vc4_hdmi->cec_adap,
- &vc4_hdmi->cec_rx_msg);
- } else if (vc4_hdmi->cec_tx_ok) {
+ if (vc4_hdmi->cec_tx_ok) {
cec_transmit_done(vc4_hdmi->cec_adap, CEC_TX_STATUS_OK,
0, 0, 0, 0);
} else {
@@ -1310,15 +1484,35 @@ static irqreturn_t vc4_cec_irq_handler_thread(int irq, void *priv)
return IRQ_HANDLED;
}
+static irqreturn_t vc4_cec_irq_handler_thread(int irq, void *priv)
+{
+ struct vc4_hdmi *vc4_hdmi = priv;
+ irqreturn_t ret;
+
+ if (vc4_hdmi->cec_irq_was_rx)
+ ret = vc4_cec_irq_handler_rx_thread(irq, priv);
+ else
+ ret = vc4_cec_irq_handler_tx_thread(irq, priv);
+
+ return ret;
+}
+
static void vc4_cec_read_msg(struct vc4_hdmi *vc4_hdmi, u32 cntrl1)
{
+ struct drm_device *dev = vc4_hdmi->connector.dev;
struct cec_msg *msg = &vc4_hdmi->cec_rx_msg;
unsigned int i;
msg->len = 1 + ((cntrl1 & VC4_HDMI_CEC_REC_WRD_CNT_MASK) >>
VC4_HDMI_CEC_REC_WRD_CNT_SHIFT);
+
+ if (msg->len > 16) {
+ drm_err(dev, "Attempting to read too much data (%d)\n", msg->len);
+ return;
+ }
+
for (i = 0; i < msg->len; i += 4) {
- u32 val = HDMI_READ(HDMI_CEC_RX_DATA_1 + i);
+ u32 val = HDMI_READ(HDMI_CEC_RX_DATA_1 + (i >> 2));
msg->msg[i] = val & 0xff;
msg->msg[i + 1] = (val >> 8) & 0xff;
@@ -1327,31 +1521,55 @@ static void vc4_cec_read_msg(struct vc4_hdmi *vc4_hdmi, u32 cntrl1)
}
}
+static irqreturn_t vc4_cec_irq_handler_tx_bare(int irq, void *priv)
+{
+ struct vc4_hdmi *vc4_hdmi = priv;
+ u32 cntrl1;
+
+ cntrl1 = HDMI_READ(HDMI_CEC_CNTRL_1);
+ vc4_hdmi->cec_tx_ok = cntrl1 & VC4_HDMI_CEC_TX_STATUS_GOOD;
+ cntrl1 &= ~VC4_HDMI_CEC_START_XMIT_BEGIN;
+ HDMI_WRITE(HDMI_CEC_CNTRL_1, cntrl1);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t vc4_cec_irq_handler_rx_bare(int irq, void *priv)
+{
+ struct vc4_hdmi *vc4_hdmi = priv;
+ u32 cntrl1;
+
+ vc4_hdmi->cec_rx_msg.len = 0;
+ cntrl1 = HDMI_READ(HDMI_CEC_CNTRL_1);
+ vc4_cec_read_msg(vc4_hdmi, cntrl1);
+ cntrl1 |= VC4_HDMI_CEC_CLEAR_RECEIVE_OFF;
+ HDMI_WRITE(HDMI_CEC_CNTRL_1, cntrl1);
+ cntrl1 &= ~VC4_HDMI_CEC_CLEAR_RECEIVE_OFF;
+
+ HDMI_WRITE(HDMI_CEC_CNTRL_1, cntrl1);
+
+ return IRQ_WAKE_THREAD;
+}
+
static irqreturn_t vc4_cec_irq_handler(int irq, void *priv)
{
struct vc4_hdmi *vc4_hdmi = priv;
u32 stat = HDMI_READ(HDMI_CEC_CPU_STATUS);
- u32 cntrl1, cntrl5;
+ irqreturn_t ret;
+ u32 cntrl5;
if (!(stat & VC4_HDMI_CPU_CEC))
return IRQ_NONE;
- vc4_hdmi->cec_rx_msg.len = 0;
- cntrl1 = HDMI_READ(HDMI_CEC_CNTRL_1);
+
cntrl5 = HDMI_READ(HDMI_CEC_CNTRL_5);
vc4_hdmi->cec_irq_was_rx = cntrl5 & VC4_HDMI_CEC_RX_CEC_INT;
- if (vc4_hdmi->cec_irq_was_rx) {
- vc4_cec_read_msg(vc4_hdmi, cntrl1);
- cntrl1 |= VC4_HDMI_CEC_CLEAR_RECEIVE_OFF;
- HDMI_WRITE(HDMI_CEC_CNTRL_1, cntrl1);
- cntrl1 &= ~VC4_HDMI_CEC_CLEAR_RECEIVE_OFF;
- } else {
- vc4_hdmi->cec_tx_ok = cntrl1 & VC4_HDMI_CEC_TX_STATUS_GOOD;
- cntrl1 &= ~VC4_HDMI_CEC_START_XMIT_BEGIN;
- }
- HDMI_WRITE(HDMI_CEC_CNTRL_1, cntrl1);
- HDMI_WRITE(HDMI_CEC_CPU_CLEAR, VC4_HDMI_CPU_CEC);
+ if (vc4_hdmi->cec_irq_was_rx)
+ ret = vc4_cec_irq_handler_rx_bare(irq, priv);
+ else
+ ret = vc4_cec_irq_handler_tx_bare(irq, priv);
- return IRQ_WAKE_THREAD;
+ HDMI_WRITE(HDMI_CEC_CPU_CLEAR, VC4_HDMI_CPU_CEC);
+ return ret;
}
static int vc4_hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
@@ -1388,9 +1606,11 @@ static int vc4_hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
((3600 / usecs) << VC4_HDMI_CEC_CNT_TO_3600_US_SHIFT) |
((3500 / usecs) << VC4_HDMI_CEC_CNT_TO_3500_US_SHIFT));
- HDMI_WRITE(HDMI_CEC_CPU_MASK_CLEAR, VC4_HDMI_CPU_CEC);
+ if (!vc4_hdmi->variant->external_irq_controller)
+ HDMI_WRITE(HDMI_CEC_CPU_MASK_CLEAR, VC4_HDMI_CPU_CEC);
} else {
- HDMI_WRITE(HDMI_CEC_CPU_MASK_SET, VC4_HDMI_CPU_CEC);
+ if (!vc4_hdmi->variant->external_irq_controller)
+ HDMI_WRITE(HDMI_CEC_CPU_MASK_SET, VC4_HDMI_CPU_CEC);
HDMI_WRITE(HDMI_CEC_CNTRL_5, val |
VC4_HDMI_CEC_TX_SW_RESET | VC4_HDMI_CEC_RX_SW_RESET);
}
@@ -1411,11 +1631,17 @@ static int vc4_hdmi_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
u32 signal_free_time, struct cec_msg *msg)
{
struct vc4_hdmi *vc4_hdmi = cec_get_drvdata(adap);
+ struct drm_device *dev = vc4_hdmi->connector.dev;
u32 val;
unsigned int i;
+ if (msg->len > 16) {
+ drm_err(dev, "Attempting to transmit too much data (%d)\n", msg->len);
+ return -ENOMEM;
+ }
+
for (i = 0; i < msg->len; i += 4)
- HDMI_WRITE(HDMI_CEC_TX_DATA_1 + i,
+ HDMI_WRITE(HDMI_CEC_TX_DATA_1 + (i >> 2),
(msg->msg[i]) |
(msg->msg[i + 1] << 8) |
(msg->msg[i + 2] << 16) |
@@ -1442,11 +1668,14 @@ static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi)
{
struct cec_connector_info conn_info;
struct platform_device *pdev = vc4_hdmi->pdev;
+ struct device *dev = &pdev->dev;
u32 value;
int ret;
- if (!vc4_hdmi->variant->cec_available)
+ if (!of_find_property(dev->of_node, "interrupts", NULL)) {
+ dev_warn(dev, "'interrupts' DT property is missing, no CEC\n");
return 0;
+ }
vc4_hdmi->cec_adap = cec_allocate_adapter(&vc4_hdmi_cec_adap_ops,
vc4_hdmi, "vc4",
@@ -1459,23 +1688,39 @@ static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi)
cec_fill_conn_info_from_drm(&conn_info, &vc4_hdmi->connector);
cec_s_conn_info(vc4_hdmi->cec_adap, &conn_info);
- HDMI_WRITE(HDMI_CEC_CPU_MASK_SET, 0xffffffff);
value = HDMI_READ(HDMI_CEC_CNTRL_1);
- value &= ~VC4_HDMI_CEC_DIV_CLK_CNT_MASK;
- /*
- * Set the logical address to Unregistered and set the clock
- * divider: the hsm_clock rate and this divider setting will
- * give a 40 kHz CEC clock.
- */
- value |= VC4_HDMI_CEC_ADDR_MASK |
- (4091 << VC4_HDMI_CEC_DIV_CLK_CNT_SHIFT);
+ /* Set the logical address to Unregistered */
+ value |= VC4_HDMI_CEC_ADDR_MASK;
HDMI_WRITE(HDMI_CEC_CNTRL_1, value);
- ret = devm_request_threaded_irq(&pdev->dev, platform_get_irq(pdev, 0),
- vc4_cec_irq_handler,
- vc4_cec_irq_handler_thread, 0,
- "vc4 hdmi cec", vc4_hdmi);
- if (ret)
- goto err_delete_cec_adap;
+
+ vc4_hdmi_cec_update_clk_div(vc4_hdmi);
+
+ if (vc4_hdmi->variant->external_irq_controller) {
+ ret = devm_request_threaded_irq(&pdev->dev,
+ platform_get_irq_byname(pdev, "cec-rx"),
+ vc4_cec_irq_handler_rx_bare,
+ vc4_cec_irq_handler_rx_thread, 0,
+ "vc4 hdmi cec rx", vc4_hdmi);
+ if (ret)
+ goto err_delete_cec_adap;
+
+ ret = devm_request_threaded_irq(&pdev->dev,
+ platform_get_irq_byname(pdev, "cec-tx"),
+ vc4_cec_irq_handler_tx_bare,
+ vc4_cec_irq_handler_tx_thread, 0,
+ "vc4 hdmi cec tx", vc4_hdmi);
+ if (ret)
+ goto err_delete_cec_adap;
+ } else {
+ HDMI_WRITE(HDMI_CEC_CPU_MASK_SET, 0xffffffff);
+
+ ret = devm_request_threaded_irq(&pdev->dev, platform_get_irq(pdev, 0),
+ vc4_cec_irq_handler,
+ vc4_cec_irq_handler_thread, 0,
+ "vc4 hdmi cec", vc4_hdmi);
+ if (ret)
+ goto err_delete_cec_adap;
+ }
ret = cec_register_adapter(vc4_hdmi->cec_adap, &pdev->dev);
if (ret < 0)
@@ -1575,6 +1820,7 @@ static int vc4_hdmi_init_resources(struct vc4_hdmi *vc4_hdmi)
return PTR_ERR(vc4_hdmi->hsm_clock);
}
vc4_hdmi->audio_clock = vc4_hdmi->hsm_clock;
+ vc4_hdmi->cec_clock = vc4_hdmi->hsm_clock;
return 0;
}
@@ -1668,6 +1914,12 @@ static int vc5_hdmi_init_resources(struct vc4_hdmi *vc4_hdmi)
return PTR_ERR(vc4_hdmi->audio_clock);
}
+ vc4_hdmi->cec_clock = devm_clk_get(dev, "cec");
+ if (IS_ERR(vc4_hdmi->cec_clock)) {
+ DRM_ERROR("Failed to get CEC clock\n");
+ return PTR_ERR(vc4_hdmi->cec_clock);
+ }
+
vc4_hdmi->reset = devm_reset_control_get(dev, NULL);
if (IS_ERR(vc4_hdmi->reset)) {
DRM_ERROR("Failed to get HDMI reset line\n");
@@ -1740,6 +1992,9 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
vc4_hdmi->disable_wifi_frequencies =
of_property_read_bool(dev->of_node, "wifi-2.4ghz-coexistence");
+ if (vc4_hdmi->variant->reset)
+ vc4_hdmi->variant->reset(vc4_hdmi);
+
pm_runtime_enable(dev);
drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
@@ -1835,7 +2090,6 @@ static const struct vc4_hdmi_variant bcm2835_variant = {
.debugfs_name = "hdmi_regs",
.card_name = "vc4-hdmi",
.max_pixel_clock = 162000000,
- .cec_available = true,
.registers = vc4_hdmi_fields,
.num_registers = ARRAY_SIZE(vc4_hdmi_fields),
@@ -1854,7 +2108,7 @@ static const struct vc4_hdmi_variant bcm2711_hdmi0_variant = {
.encoder_type = VC4_ENCODER_TYPE_HDMI0,
.debugfs_name = "hdmi0_regs",
.card_name = "vc4-hdmi-0",
- .max_pixel_clock = 297000000,
+ .max_pixel_clock = HDMI_14_MAX_TMDS_CLK,
.registers = vc5_hdmi_hdmi0_fields,
.num_registers = ARRAY_SIZE(vc5_hdmi_hdmi0_fields),
.phy_lane_mapping = {
@@ -1864,6 +2118,7 @@ static const struct vc4_hdmi_variant bcm2711_hdmi0_variant = {
PHY_LANE_CK,
},
.unsupported_odd_h_timings = true,
+ .external_irq_controller = true,
.init_resources = vc5_hdmi_init_resources,
.csc_setup = vc5_hdmi_csc_setup,
@@ -1880,7 +2135,7 @@ static const struct vc4_hdmi_variant bcm2711_hdmi1_variant = {
.encoder_type = VC4_ENCODER_TYPE_HDMI1,
.debugfs_name = "hdmi1_regs",
.card_name = "vc4-hdmi-1",
- .max_pixel_clock = 297000000,
+ .max_pixel_clock = HDMI_14_MAX_TMDS_CLK,
.registers = vc5_hdmi_hdmi1_fields,
.num_registers = ARRAY_SIZE(vc5_hdmi_hdmi1_fields),
.phy_lane_mapping = {
@@ -1890,6 +2145,7 @@ static const struct vc4_hdmi_variant bcm2711_hdmi1_variant = {
PHY_LANE_2,
},
.unsupported_odd_h_timings = true,
+ .external_irq_controller = true,
.init_resources = vc5_hdmi_init_resources,
.csc_setup = vc5_hdmi_csc_setup,
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.h b/drivers/gpu/drm/vc4/vc4_hdmi.h
index 0526a9cf608a..3cebd1fd00fc 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.h
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.h
@@ -21,10 +21,9 @@ to_vc4_hdmi_encoder(struct drm_encoder *encoder)
return container_of(encoder, struct vc4_hdmi_encoder, base.base);
}
-struct drm_display_mode;
-
struct vc4_hdmi;
struct vc4_hdmi_register;
+struct vc4_hdmi_connector_state;
enum vc4_hdmi_phy_channel {
PHY_LANE_0 = 0,
@@ -43,9 +42,6 @@ struct vc4_hdmi_variant {
/* Filename to expose the registers in debugfs */
const char *debugfs_name;
- /* Set to true when the CEC support is available */
- bool cec_available;
-
/* Maximum pixel clock supported by the controller (in Hz) */
unsigned long long max_pixel_clock;
@@ -65,6 +61,13 @@ struct vc4_hdmi_variant {
/* The BCM2711 cannot deal with odd horizontal pixel timings */
bool unsupported_odd_h_timings;
+ /*
+ * The BCM2711 CEC/hotplug IRQ controller is shared between the
+ * two HDMI controllers, and we have a proper irqchip driver for
+ * it.
+ */
+ bool external_irq_controller;
+
/* Callback to get the resources (memory region, interrupts,
* clocks, etc) for that variant.
*/
@@ -78,11 +81,12 @@ struct vc4_hdmi_variant {
/* Callback to configure the video timings in the HDMI block */
void (*set_timings)(struct vc4_hdmi *vc4_hdmi,
+ struct drm_connector_state *state,
struct drm_display_mode *mode);
- /* Callback to initialize the PHY according to the mode */
+ /* Callback to initialize the PHY according to the connector state */
void (*phy_init)(struct vc4_hdmi *vc4_hdmi,
- struct drm_display_mode *mode);
+ struct vc4_hdmi_connector_state *vc4_conn_state);
/* Callback to disable the PHY */
void (*phy_disable)(struct vc4_hdmi *vc4_hdmi);
@@ -155,6 +159,7 @@ struct vc4_hdmi {
bool cec_tx_ok;
bool cec_irq_was_rx;
+ struct clk *cec_clock;
struct clk *pixel_clock;
struct clk *hsm_clock;
struct clk *audio_clock;
@@ -180,14 +185,25 @@ encoder_to_vc4_hdmi(struct drm_encoder *encoder)
return container_of(_encoder, struct vc4_hdmi, encoder);
}
+struct vc4_hdmi_connector_state {
+ struct drm_connector_state base;
+ unsigned long long pixel_rate;
+};
+
+static inline struct vc4_hdmi_connector_state *
+conn_state_to_vc4_hdmi_conn_state(struct drm_connector_state *conn_state)
+{
+ return container_of(conn_state, struct vc4_hdmi_connector_state, base);
+}
+
void vc4_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi,
- struct drm_display_mode *mode);
+ struct vc4_hdmi_connector_state *vc4_conn_state);
void vc4_hdmi_phy_disable(struct vc4_hdmi *vc4_hdmi);
void vc4_hdmi_phy_rng_enable(struct vc4_hdmi *vc4_hdmi);
void vc4_hdmi_phy_rng_disable(struct vc4_hdmi *vc4_hdmi);
void vc5_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi,
- struct drm_display_mode *mode);
+ struct vc4_hdmi_connector_state *vc4_conn_state);
void vc5_hdmi_phy_disable(struct vc4_hdmi *vc4_hdmi);
void vc5_hdmi_phy_rng_enable(struct vc4_hdmi *vc4_hdmi);
void vc5_hdmi_phy_rng_disable(struct vc4_hdmi *vc4_hdmi);
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi_phy.c b/drivers/gpu/drm/vc4/vc4_hdmi_phy.c
index 057796b54c51..36535480f8e2 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi_phy.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi_phy.c
@@ -127,7 +127,8 @@
#define OSCILLATOR_FREQUENCY 54000000
-void vc4_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi, struct drm_display_mode *mode)
+void vc4_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi,
+ struct vc4_hdmi_connector_state *conn_state)
{
/* PHY should be in reset, like
* vc4_hdmi_encoder_disable() does.
@@ -339,11 +340,12 @@ static void vc5_hdmi_reset_phy(struct vc4_hdmi *vc4_hdmi)
HDMI_WRITE(HDMI_TX_PHY_POWERDOWN_CTL, BIT(10));
}
-void vc5_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi, struct drm_display_mode *mode)
+void vc5_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi,
+ struct vc4_hdmi_connector_state *conn_state)
{
const struct phy_lane_settings *chan0_settings, *chan1_settings, *chan2_settings, *clock_settings;
const struct vc4_hdmi_variant *variant = vc4_hdmi->variant;
- unsigned long long pixel_freq = mode->clock * 1000;
+ unsigned long long pixel_freq = conn_state->pixel_rate;
unsigned long long vco_freq;
unsigned char word_sel;
u8 vco_sel, vco_div;
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi_regs.h b/drivers/gpu/drm/vc4/vc4_hdmi_regs.h
index 96d764ebfe67..e1b58eac766f 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi_regs.h
+++ b/drivers/gpu/drm/vc4/vc4_hdmi_regs.h
@@ -29,6 +29,7 @@ enum vc4_hdmi_field {
HDMI_CEC_CPU_MASK_SET,
HDMI_CEC_CPU_MASK_STATUS,
HDMI_CEC_CPU_STATUS,
+ HDMI_CEC_CPU_SET,
/*
* Transmit data, first byte is low byte of the 32-bit reg.
@@ -59,9 +60,12 @@ enum vc4_hdmi_field {
*/
HDMI_CTS_0,
HDMI_CTS_1,
+ HDMI_DEEP_COLOR_CONFIG_1,
HDMI_DVP_CTL,
HDMI_FIFO_CTL,
HDMI_FRAME_COUNT,
+ HDMI_GCP_CONFIG,
+ HDMI_GCP_WORD_1,
HDMI_HORZA,
HDMI_HORZB,
HDMI_HOTPLUG,
@@ -196,9 +200,10 @@ static const struct vc4_hdmi_register __maybe_unused vc4_hdmi_fields[] = {
VC4_HDMI_REG(HDMI_TX_PHY_RESET_CTL, 0x02c0),
VC4_HDMI_REG(HDMI_TX_PHY_CTL_0, 0x02c4),
VC4_HDMI_REG(HDMI_CEC_CPU_STATUS, 0x0340),
+ VC4_HDMI_REG(HDMI_CEC_CPU_SET, 0x0344),
VC4_HDMI_REG(HDMI_CEC_CPU_CLEAR, 0x0348),
VC4_HDMI_REG(HDMI_CEC_CPU_MASK_STATUS, 0x034c),
- VC4_HDMI_REG(HDMI_CEC_CPU_MASK_SET, 0x034c),
+ VC4_HDMI_REG(HDMI_CEC_CPU_MASK_SET, 0x0350),
VC4_HDMI_REG(HDMI_CEC_CPU_MASK_CLEAR, 0x0354),
VC4_HDMI_REG(HDMI_RAM_PACKET_START, 0x0400),
};
@@ -229,6 +234,9 @@ static const struct vc4_hdmi_register __maybe_unused vc5_hdmi_hdmi0_fields[] = {
VC4_HDMI_REG(HDMI_VERTB1, 0x0f8),
VC4_HDMI_REG(HDMI_MAI_CHANNEL_MAP, 0x09c),
VC4_HDMI_REG(HDMI_MAI_CONFIG, 0x0a0),
+ VC4_HDMI_REG(HDMI_DEEP_COLOR_CONFIG_1, 0x170),
+ VC4_HDMI_REG(HDMI_GCP_CONFIG, 0x178),
+ VC4_HDMI_REG(HDMI_GCP_WORD_1, 0x17c),
VC4_HDMI_REG(HDMI_HOTPLUG, 0x1a8),
VC5_DVP_REG(HDMI_CLOCK_STOP, 0x0bc),
@@ -305,6 +313,9 @@ static const struct vc4_hdmi_register __maybe_unused vc5_hdmi_hdmi1_fields[] = {
VC4_HDMI_REG(HDMI_VERTB1, 0x0f8),
VC4_HDMI_REG(HDMI_MAI_CHANNEL_MAP, 0x09c),
VC4_HDMI_REG(HDMI_MAI_CONFIG, 0x0a0),
+ VC4_HDMI_REG(HDMI_DEEP_COLOR_CONFIG_1, 0x170),
+ VC4_HDMI_REG(HDMI_GCP_CONFIG, 0x178),
+ VC4_HDMI_REG(HDMI_GCP_WORD_1, 0x17c),
VC4_HDMI_REG(HDMI_HOTPLUG, 0x1a8),
VC5_DVP_REG(HDMI_CLOCK_STOP, 0x0bc),
diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c
index cccd341e5d67..c239045e05d6 100644
--- a/drivers/gpu/drm/vc4/vc4_hvs.c
+++ b/drivers/gpu/drm/vc4/vc4_hvs.c
@@ -326,10 +326,10 @@ void vc4_hvs_stop_channel(struct drm_device *dev, unsigned int chan)
SCALER_DISPSTATX_EMPTY);
}
-int vc4_hvs_atomic_check(struct drm_crtc *crtc,
- struct drm_crtc_state *state)
+int vc4_hvs_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state)
{
- struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(state);
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
struct drm_device *dev = crtc->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct drm_plane *plane;
@@ -341,10 +341,10 @@ int vc4_hvs_atomic_check(struct drm_crtc *crtc,
/* The pixelvalve can only feed one encoder (and encoders are
* 1:1 with connectors.)
*/
- if (hweight32(state->connector_mask) > 1)
+ if (hweight32(crtc_state->connector_mask) > 1)
return -EINVAL;
- drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, state)
+ drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state)
dlist_count += vc4_plane_dlist_size(plane_state);
dlist_count++; /* Account for SCALER_CTL0_END. */
@@ -391,11 +391,12 @@ static void vc4_hvs_update_dlist(struct drm_crtc *crtc)
}
void vc4_hvs_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct drm_device *dev = crtc->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
- struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
+ struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(new_crtc_state);
struct drm_display_mode *mode = &crtc->state->adjusted_mode;
bool oneshot = vc4_state->feed_txp;
@@ -404,9 +405,10 @@ void vc4_hvs_atomic_enable(struct drm_crtc *crtc,
}
void vc4_hvs_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct drm_device *dev = crtc->dev;
+ struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state, crtc);
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(old_state);
unsigned int chan = vc4_state->assigned_channel;
@@ -620,11 +622,11 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
* for now we just allocate globally.
*/
if (!hvs->hvs5)
- /* 96kB */
- drm_mm_init(&hvs->lbm_mm, 0, 96 * 1024);
+ /* 48k words of 2x12-bit pixels */
+ drm_mm_init(&hvs->lbm_mm, 0, 48 * 1024);
else
- /* 70k words */
- drm_mm_init(&hvs->lbm_mm, 0, 70 * 2 * 1024);
+ /* 60k words of 4x12-bit pixels */
+ drm_mm_init(&hvs->lbm_mm, 0, 60 * 1024);
/* Upload filter kernels. We only have the one for now, so we
* keep it around for the lifetime of the driver.
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index ba310c0ab5f6..f09254c2497d 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -39,7 +39,11 @@ static struct vc4_ctm_state *to_vc4_ctm_state(struct drm_private_state *priv)
struct vc4_hvs_state {
struct drm_private_state base;
- unsigned int unassigned_channels;
+
+ struct {
+ unsigned in_use: 1;
+ struct drm_crtc_commit *pending_commit;
+ } fifo_state[HVS_NUM_CHANNELS];
};
static struct vc4_hvs_state *
@@ -183,6 +187,32 @@ vc4_ctm_commit(struct vc4_dev *vc4, struct drm_atomic_state *state)
}
static struct vc4_hvs_state *
+vc4_hvs_get_new_global_state(struct drm_atomic_state *state)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(state->dev);
+ struct drm_private_state *priv_state;
+
+ priv_state = drm_atomic_get_new_private_obj_state(state, &vc4->hvs_channels);
+ if (IS_ERR(priv_state))
+ return ERR_CAST(priv_state);
+
+ return to_vc4_hvs_state(priv_state);
+}
+
+static struct vc4_hvs_state *
+vc4_hvs_get_old_global_state(struct drm_atomic_state *state)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(state->dev);
+ struct drm_private_state *priv_state;
+
+ priv_state = drm_atomic_get_old_private_obj_state(state, &vc4->hvs_channels);
+ if (IS_ERR(priv_state))
+ return ERR_CAST(priv_state);
+
+ return to_vc4_hvs_state(priv_state);
+}
+
+static struct vc4_hvs_state *
vc4_hvs_get_global_state(struct drm_atomic_state *state)
{
struct vc4_dev *vc4 = to_vc4_dev(state->dev);
@@ -302,14 +332,15 @@ static void vc5_hvs_pv_muxing_commit(struct vc4_dev *vc4,
}
}
-static void
-vc4_atomic_complete_commit(struct drm_atomic_state *state)
+static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_hvs *hvs = vc4->hvs;
+ struct drm_crtc_state *old_crtc_state;
struct drm_crtc_state *new_crtc_state;
struct drm_crtc *crtc;
+ struct vc4_hvs_state *old_hvs_state;
int i;
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
@@ -325,9 +356,35 @@ vc4_atomic_complete_commit(struct drm_atomic_state *state)
if (vc4->hvs->hvs5)
clk_set_min_rate(hvs->core_clk, 500000000);
- drm_atomic_helper_wait_for_fences(dev, state, false);
+ old_hvs_state = vc4_hvs_get_old_global_state(state);
+ if (!old_hvs_state)
+ return;
+
+ for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
+ struct vc4_crtc_state *vc4_crtc_state =
+ to_vc4_crtc_state(old_crtc_state);
+ struct drm_crtc_commit *commit;
+ unsigned int channel = vc4_crtc_state->assigned_channel;
+ unsigned long done;
+
+ if (channel == VC4_HVS_CHANNEL_DISABLED)
+ continue;
+
+ if (!old_hvs_state->fifo_state[channel].in_use)
+ continue;
+
+ commit = old_hvs_state->fifo_state[i].pending_commit;
+ if (!commit)
+ continue;
- drm_atomic_helper_wait_for_dependencies(state);
+ done = wait_for_completion_timeout(&commit->hw_done, 10 * HZ);
+ if (!done)
+ drm_err(dev, "Timed out waiting for hw_done\n");
+
+ done = wait_for_completion_timeout(&commit->flip_done, 10 * HZ);
+ if (!done)
+ drm_err(dev, "Timed out waiting for flip_done\n");
+ }
drm_atomic_helper_commit_modeset_disables(dev, state);
@@ -350,125 +407,37 @@ vc4_atomic_complete_commit(struct drm_atomic_state *state)
drm_atomic_helper_cleanup_planes(dev, state);
- drm_atomic_helper_commit_cleanup_done(state);
-
if (vc4->hvs->hvs5)
clk_set_min_rate(hvs->core_clk, 0);
-
- drm_atomic_state_put(state);
-
- up(&vc4->async_modeset);
-}
-
-static void commit_work(struct work_struct *work)
-{
- struct drm_atomic_state *state = container_of(work,
- struct drm_atomic_state,
- commit_work);
- vc4_atomic_complete_commit(state);
}
-/**
- * vc4_atomic_commit - commit validated state object
- * @dev: DRM device
- * @state: the driver state object
- * @nonblock: nonblocking commit
- *
- * This function commits a with drm_atomic_helper_check() pre-validated state
- * object. This can still fail when e.g. the framebuffer reservation fails. For
- * now this doesn't implement asynchronous commits.
- *
- * RETURNS
- * Zero for success or -errno.
- */
-static int vc4_atomic_commit(struct drm_device *dev,
- struct drm_atomic_state *state,
- bool nonblock)
+static int vc4_atomic_commit_setup(struct drm_atomic_state *state)
{
- struct vc4_dev *vc4 = to_vc4_dev(dev);
- int ret;
-
- if (state->async_update) {
- ret = down_interruptible(&vc4->async_modeset);
- if (ret)
- return ret;
-
- ret = drm_atomic_helper_prepare_planes(dev, state);
- if (ret) {
- up(&vc4->async_modeset);
- return ret;
- }
-
- drm_atomic_helper_async_commit(dev, state);
-
- drm_atomic_helper_cleanup_planes(dev, state);
-
- up(&vc4->async_modeset);
-
- return 0;
- }
+ struct drm_crtc_state *crtc_state;
+ struct vc4_hvs_state *hvs_state;
+ struct drm_crtc *crtc;
+ unsigned int i;
- /* We know for sure we don't want an async update here. Set
- * state->legacy_cursor_update to false to prevent
- * drm_atomic_helper_setup_commit() from auto-completing
- * commit->flip_done.
- */
- state->legacy_cursor_update = false;
- ret = drm_atomic_helper_setup_commit(state, nonblock);
- if (ret)
- return ret;
+ hvs_state = vc4_hvs_get_new_global_state(state);
+ if (!hvs_state)
+ return -EINVAL;
- INIT_WORK(&state->commit_work, commit_work);
+ for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
+ struct vc4_crtc_state *vc4_crtc_state =
+ to_vc4_crtc_state(crtc_state);
+ unsigned int channel =
+ vc4_crtc_state->assigned_channel;
- ret = down_interruptible(&vc4->async_modeset);
- if (ret)
- return ret;
+ if (channel == VC4_HVS_CHANNEL_DISABLED)
+ continue;
- ret = drm_atomic_helper_prepare_planes(dev, state);
- if (ret) {
- up(&vc4->async_modeset);
- return ret;
- }
+ if (!hvs_state->fifo_state[channel].in_use)
+ continue;
- if (!nonblock) {
- ret = drm_atomic_helper_wait_for_fences(dev, state, true);
- if (ret) {
- drm_atomic_helper_cleanup_planes(dev, state);
- up(&vc4->async_modeset);
- return ret;
- }
+ hvs_state->fifo_state[channel].pending_commit =
+ drm_crtc_commit_get(crtc_state->commit);
}
- /*
- * This is the point of no return - everything below never fails except
- * when the hw goes bonghits. Which means we can commit the new state on
- * the software side now.
- */
-
- BUG_ON(drm_atomic_helper_swap_state(state, false) < 0);
-
- /*
- * Everything below can be run asynchronously without the need to grab
- * any modeset locks at all under one condition: It must be guaranteed
- * that the asynchronous work has either been cancelled (if the driver
- * supports it, which at least requires that the framebuffers get
- * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
- * before the new state gets committed on the software side with
- * drm_atomic_helper_swap_state().
- *
- * This scheme allows new atomic state updates to be prepared and
- * checked in parallel to the asynchronous completion of the previous
- * update. Which is important since compositors need to figure out the
- * composition of the next frame right after having submitted the
- * current layout.
- */
-
- drm_atomic_state_get(state);
- if (nonblock)
- queue_work(system_unbound_wq, &state->commit_work);
- else
- vc4_atomic_complete_commit(state);
-
return 0;
}
@@ -697,6 +666,7 @@ vc4_hvs_channels_duplicate_state(struct drm_private_obj *obj)
{
struct vc4_hvs_state *old_state = to_vc4_hvs_state(obj->state);
struct vc4_hvs_state *state;
+ unsigned int i;
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state)
@@ -704,7 +674,16 @@ vc4_hvs_channels_duplicate_state(struct drm_private_obj *obj)
__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
- state->unassigned_channels = old_state->unassigned_channels;
+
+ for (i = 0; i < HVS_NUM_CHANNELS; i++) {
+ state->fifo_state[i].in_use = old_state->fifo_state[i].in_use;
+
+ if (!old_state->fifo_state[i].pending_commit)
+ continue;
+
+ state->fifo_state[i].pending_commit =
+ drm_crtc_commit_get(old_state->fifo_state[i].pending_commit);
+ }
return &state->base;
}
@@ -713,6 +692,14 @@ static void vc4_hvs_channels_destroy_state(struct drm_private_obj *obj,
struct drm_private_state *state)
{
struct vc4_hvs_state *hvs_state = to_vc4_hvs_state(state);
+ unsigned int i;
+
+ for (i = 0; i < HVS_NUM_CHANNELS; i++) {
+ if (!hvs_state->fifo_state[i].pending_commit)
+ continue;
+
+ drm_crtc_commit_put(hvs_state->fifo_state[i].pending_commit);
+ }
kfree(hvs_state);
}
@@ -737,7 +724,6 @@ static int vc4_hvs_channels_obj_init(struct vc4_dev *vc4)
if (!state)
return -ENOMEM;
- state->unassigned_channels = GENMASK(HVS_NUM_CHANNELS - 1, 0);
drm_atomic_private_obj_init(&vc4->base, &vc4->hvs_channels,
&state->base,
&vc4_hvs_state_funcs);
@@ -781,12 +767,17 @@ static int vc4_pv_muxing_atomic_check(struct drm_device *dev,
struct vc4_hvs_state *hvs_new_state;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
struct drm_crtc *crtc;
+ unsigned int unassigned_channels = 0;
unsigned int i;
hvs_new_state = vc4_hvs_get_global_state(state);
if (!hvs_new_state)
return -EINVAL;
+ for (i = 0; i < ARRAY_SIZE(hvs_new_state->fifo_state); i++)
+ if (!hvs_new_state->fifo_state[i].in_use)
+ unassigned_channels |= BIT(i);
+
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
struct vc4_crtc_state *old_vc4_crtc_state =
to_vc4_crtc_state(old_crtc_state);
@@ -794,6 +785,7 @@ static int vc4_pv_muxing_atomic_check(struct drm_device *dev,
to_vc4_crtc_state(new_crtc_state);
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
unsigned int matching_channels;
+ unsigned int channel;
/* Nothing to do here, let's skip it */
if (old_crtc_state->enable == new_crtc_state->enable)
@@ -804,7 +796,8 @@ static int vc4_pv_muxing_atomic_check(struct drm_device *dev,
/* If we're disabling our CRTC, we put back our channel */
if (!new_crtc_state->enable) {
- hvs_new_state->unassigned_channels |= BIT(old_vc4_crtc_state->assigned_channel);
+ channel = old_vc4_crtc_state->assigned_channel;
+ hvs_new_state->fifo_state[channel].in_use = false;
new_vc4_crtc_state->assigned_channel = VC4_HVS_CHANNEL_DISABLED;
continue;
}
@@ -833,15 +826,14 @@ static int vc4_pv_muxing_atomic_check(struct drm_device *dev,
* the future, we will need to have something smarter,
* but it works so far.
*/
- matching_channels = hvs_new_state->unassigned_channels & vc4_crtc->data->hvs_available_channels;
- if (matching_channels) {
- unsigned int channel = ffs(matching_channels) - 1;
-
- new_vc4_crtc_state->assigned_channel = channel;
- hvs_new_state->unassigned_channels &= ~BIT(channel);
- } else {
+ matching_channels = unassigned_channels & vc4_crtc->data->hvs_available_channels;
+ if (!matching_channels)
return -EINVAL;
- }
+
+ channel = ffs(matching_channels) - 1;
+ new_vc4_crtc_state->assigned_channel = channel;
+ unassigned_channels &= ~BIT(channel);
+ hvs_new_state->fifo_state[channel].in_use = true;
}
return 0;
@@ -867,9 +859,14 @@ vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
return vc4_load_tracker_atomic_check(state);
}
+static struct drm_mode_config_helper_funcs vc4_mode_config_helpers = {
+ .atomic_commit_setup = vc4_atomic_commit_setup,
+ .atomic_commit_tail = vc4_atomic_commit_tail,
+};
+
static const struct drm_mode_config_funcs vc4_mode_funcs = {
.atomic_check = vc4_atomic_check,
- .atomic_commit = vc4_atomic_commit,
+ .atomic_commit = drm_atomic_helper_commit,
.fb_create = vc4_fb_create,
};
@@ -889,8 +886,6 @@ int vc4_kms_load(struct drm_device *dev)
vc4->load_tracker_enabled = true;
}
- sema_init(&vc4->async_modeset, 1);
-
/* Set support for vblank irq fast disable, before drm_vblank_init() */
dev->vblank_disable_immediate = true;
@@ -910,6 +905,7 @@ int vc4_kms_load(struct drm_device *dev)
}
dev->mode_config.funcs = &vc4_mode_funcs;
+ dev->mode_config.helper_private = &vc4_mode_config_helpers;
dev->mode_config.preferred_depth = 24;
dev->mode_config.async_page_flip = true;
dev->mode_config.allow_fb_modifiers = true;
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 6b39cc2ca18d..7322169c0682 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -220,7 +220,7 @@ static void vc4_plane_reset(struct drm_plane *plane)
__drm_atomic_helper_plane_reset(plane, &vc4_state->base);
}
-static void vc4_dlist_write(struct vc4_plane_state *vc4_state, u32 val)
+static void vc4_dlist_counter_increment(struct vc4_plane_state *vc4_state)
{
if (vc4_state->dlist_count == vc4_state->dlist_size) {
u32 new_size = max(4u, vc4_state->dlist_count * 2);
@@ -235,7 +235,15 @@ static void vc4_dlist_write(struct vc4_plane_state *vc4_state, u32 val)
vc4_state->dlist_size = new_size;
}
- vc4_state->dlist[vc4_state->dlist_count++] = val;
+ vc4_state->dlist_count++;
+}
+
+static void vc4_dlist_write(struct vc4_plane_state *vc4_state, u32 val)
+{
+ unsigned int idx = vc4_state->dlist_count;
+
+ vc4_dlist_counter_increment(vc4_state);
+ vc4_state->dlist[idx] = val;
}
/* Returns the scl0/scl1 field based on whether the dimensions need to
@@ -437,6 +445,7 @@ static void vc4_write_ppf(struct vc4_plane_state *vc4_state, u32 src, u32 dst)
static u32 vc4_lbm_size(struct drm_plane_state *state)
{
struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
+ struct vc4_dev *vc4 = to_vc4_dev(state->plane->dev);
u32 pix_per_line;
u32 lbm;
@@ -472,7 +481,11 @@ static u32 vc4_lbm_size(struct drm_plane_state *state)
lbm = pix_per_line * 16;
}
- lbm = roundup(lbm, 32);
+ /* Align it to 64 or 128 (hvs5) bytes */
+ lbm = roundup(lbm, vc4->hvs->hvs5 ? 128 : 64);
+
+ /* Each "word" of the LBM memory contains 2 or 4 (hvs5) pixels */
+ lbm /= vc4->hvs->hvs5 ? 4 : 2;
return lbm;
}
@@ -912,9 +925,9 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
if (!vc4_state->is_unity) {
vc4_dlist_write(vc4_state,
VC4_SET_FIELD(vc4_state->crtc_w,
- SCALER_POS1_SCL_WIDTH) |
+ SCALER5_POS1_SCL_WIDTH) |
VC4_SET_FIELD(vc4_state->crtc_h,
- SCALER_POS1_SCL_HEIGHT));
+ SCALER5_POS1_SCL_HEIGHT));
}
/* Position Word 2: Source Image Size */
@@ -973,8 +986,10 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
* be set when calling vc4_plane_allocate_lbm().
*/
if (vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
- vc4_state->y_scaling[1] != VC4_SCALING_NONE)
- vc4_state->lbm_offset = vc4_state->dlist_count++;
+ vc4_state->y_scaling[1] != VC4_SCALING_NONE) {
+ vc4_state->lbm_offset = vc4_state->dlist_count;
+ vc4_dlist_counter_increment(vc4_state);
+ }
if (num_planes > 1) {
/* Emit Cb/Cr as channel 0 and Y as channel
@@ -1268,11 +1283,6 @@ static const struct drm_plane_helper_funcs vc4_plane_helper_funcs = {
.atomic_async_update = vc4_plane_atomic_async_update,
};
-static void vc4_plane_destroy(struct drm_plane *plane)
-{
- drm_plane_cleanup(plane);
-}
-
static bool vc4_format_mod_supported(struct drm_plane *plane,
uint32_t format,
uint64_t modifier)
@@ -1323,7 +1333,7 @@ static bool vc4_format_mod_supported(struct drm_plane *plane,
static const struct drm_plane_funcs vc4_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = vc4_plane_destroy,
+ .destroy = drm_plane_cleanup,
.set_property = NULL,
.reset = vc4_plane_reset,
.atomic_duplicate_state = vc4_plane_duplicate_state,
diff --git a/drivers/gpu/drm/vc4/vc4_txp.c b/drivers/gpu/drm/vc4/vc4_txp.c
index 8aa5220885f4..c0122d83b651 100644
--- a/drivers/gpu/drm/vc4/vc4_txp.c
+++ b/drivers/gpu/drm/vc4/vc4_txp.c
@@ -382,7 +382,6 @@ static const struct drm_crtc_funcs vc4_txp_crtc_funcs = {
.reset = vc4_crtc_reset,
.atomic_duplicate_state = vc4_crtc_duplicate_state,
.atomic_destroy_state = vc4_crtc_destroy_state,
- .gamma_set = drm_atomic_helper_legacy_gamma_set,
.enable_vblank = vc4_txp_enable_vblank,
.disable_vblank = vc4_txp_disable_vblank,
};
@@ -395,7 +394,7 @@ static int vc4_txp_atomic_check(struct drm_crtc *crtc,
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
int ret;
- ret = vc4_hvs_atomic_check(crtc, crtc_state);
+ ret = vc4_hvs_atomic_check(crtc, state);
if (ret)
return ret;
@@ -408,23 +407,19 @@ static int vc4_txp_atomic_check(struct drm_crtc *crtc,
static void vc4_txp_atomic_enable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
- struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
- crtc);
drm_crtc_vblank_on(crtc);
- vc4_hvs_atomic_enable(crtc, old_state);
+ vc4_hvs_atomic_enable(crtc, state);
}
static void vc4_txp_atomic_disable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
- struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
- crtc);
struct drm_device *dev = crtc->dev;
/* Disable vblank irq handling before crtc is disabled. */
drm_crtc_vblank_off(crtc);
- vc4_hvs_atomic_disable(crtc, old_state);
+ vc4_hvs_atomic_disable(crtc, state);
/*
* Make sure we issue a vblank event after disabling the CRTC if
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index f8635ccaf9a1..a0e75f1d5d01 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -356,8 +356,7 @@ static struct drm_gem_object *vgem_prime_import_sg_table(struct drm_device *dev,
}
obj->pages_pin_count++; /* perma-pinned */
- drm_prime_sg_to_page_addr_arrays(obj->table, obj->pages, NULL,
- npages);
+ drm_prime_sg_to_page_array(obj->table, obj->pages, npages);
return &obj->base;
}
diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
index a3e0fb5b8671..faeae5d881fb 100644
--- a/drivers/gpu/drm/via/via_irq.c
+++ b/drivers/gpu/drm/via/via_irq.c
@@ -308,7 +308,7 @@ int via_driver_irq_postinstall(struct drm_device *dev)
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
u32 status;
- DRM_DEBUG("via_driver_irq_postinstall\n");
+ DRM_DEBUG("fun: %s\n", __func__);
if (!dev_priv)
return -EINVAL;
diff --git a/drivers/gpu/drm/virtio/Kconfig b/drivers/gpu/drm/virtio/Kconfig
index b925b8b1da16..51ec7c3240c9 100644
--- a/drivers/gpu/drm/virtio/Kconfig
+++ b/drivers/gpu/drm/virtio/Kconfig
@@ -1,7 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_VIRTIO_GPU
tristate "Virtio GPU driver"
- depends on DRM && VIRTIO && VIRTIO_MENU && MMU
+ depends on DRM && VIRTIO_MENU && MMU
+ select VIRTIO
select DRM_KMS_HELPER
select DRM_GEM_SHMEM_HELPER
select VIRTIO_DMA_SHARED_BUFFER
diff --git a/drivers/gpu/drm/virtio/virtgpu_debugfs.c b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
index 5fefc88d47e4..c2b20e0ee030 100644
--- a/drivers/gpu/drm/virtio/virtgpu_debugfs.c
+++ b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
@@ -28,14 +28,13 @@
#include "virtgpu_drv.h"
-static void virtio_add_bool(struct seq_file *m, const char *name,
- bool value)
+static void virtio_gpu_add_bool(struct seq_file *m, const char *name,
+ bool value)
{
seq_printf(m, "%-16s : %s\n", name, value ? "yes" : "no");
}
-static void virtio_add_int(struct seq_file *m, const char *name,
- int value)
+static void virtio_gpu_add_int(struct seq_file *m, const char *name, int value)
{
seq_printf(m, "%-16s : %d\n", name, value);
}
@@ -45,13 +44,16 @@ static int virtio_gpu_features(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct virtio_gpu_device *vgdev = node->minor->dev->dev_private;
- virtio_add_bool(m, "virgl", vgdev->has_virgl_3d);
- virtio_add_bool(m, "edid", vgdev->has_edid);
- virtio_add_bool(m, "indirect", vgdev->has_indirect);
- virtio_add_bool(m, "resource uuid", vgdev->has_resource_assign_uuid);
- virtio_add_bool(m, "blob resources", vgdev->has_resource_blob);
- virtio_add_int(m, "cap sets", vgdev->num_capsets);
- virtio_add_int(m, "scanouts", vgdev->num_scanouts);
+ virtio_gpu_add_bool(m, "virgl", vgdev->has_virgl_3d);
+ virtio_gpu_add_bool(m, "edid", vgdev->has_edid);
+ virtio_gpu_add_bool(m, "indirect", vgdev->has_indirect);
+
+ virtio_gpu_add_bool(m, "resource uuid",
+ vgdev->has_resource_assign_uuid);
+
+ virtio_gpu_add_bool(m, "blob resources", vgdev->has_resource_blob);
+ virtio_gpu_add_int(m, "cap sets", vgdev->num_capsets);
+ virtio_gpu_add_int(m, "scanouts", vgdev->num_scanouts);
if (vgdev->host_visible_region.len) {
seq_printf(m, "%-16s : 0x%lx +0x%lx\n", "host visible region",
(unsigned long)vgdev->host_visible_region.addr,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index 27f13bd29c13..a21dc3ad6f88 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -54,7 +54,6 @@ static int virtio_gpu_pci_quirk(struct drm_device *dev, struct virtio_device *vd
DRM_INFO("pci: %s detected at %s\n",
vga ? "virtio-vga" : "virtio-gpu-pci",
pname);
- dev->pdev = pdev;
if (vga)
drm_fb_helper_remove_conflicting_pci_framebuffers(pdev,
"virtiodrmfb");
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 6a232553c99b..d9dbc4f258f3 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -136,6 +136,7 @@ struct virtio_gpu_fence_driver {
struct virtio_gpu_fence {
struct dma_fence f;
+ uint64_t fence_id;
struct virtio_gpu_fence_driver *drv;
struct list_head node;
};
diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c
index 728ca36f6327..d28e25e8409b 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fence.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fence.c
@@ -27,51 +27,48 @@
#include "virtgpu_drv.h"
-#define to_virtio_fence(x) \
+#define to_virtio_gpu_fence(x) \
container_of(x, struct virtio_gpu_fence, f)
-static const char *virtio_get_driver_name(struct dma_fence *f)
+static const char *virtio_gpu_get_driver_name(struct dma_fence *f)
{
return "virtio_gpu";
}
-static const char *virtio_get_timeline_name(struct dma_fence *f)
+static const char *virtio_gpu_get_timeline_name(struct dma_fence *f)
{
return "controlq";
}
-static bool virtio_fence_signaled(struct dma_fence *f)
+static bool virtio_gpu_fence_signaled(struct dma_fence *f)
{
- struct virtio_gpu_fence *fence = to_virtio_fence(f);
-
- if (WARN_ON_ONCE(fence->f.seqno == 0))
- /* leaked fence outside driver before completing
- * initialization with virtio_gpu_fence_emit */
- return false;
- if (atomic64_read(&fence->drv->last_fence_id) >= fence->f.seqno)
- return true;
+ /* leaked fence outside driver before completing
+ * initialization with virtio_gpu_fence_emit.
+ */
+ WARN_ON_ONCE(f->seqno == 0);
return false;
}
-static void virtio_fence_value_str(struct dma_fence *f, char *str, int size)
+static void virtio_gpu_fence_value_str(struct dma_fence *f, char *str, int size)
{
- snprintf(str, size, "%llu", f->seqno);
+ snprintf(str, size, "[%llu, %llu]", f->context, f->seqno);
}
-static void virtio_timeline_value_str(struct dma_fence *f, char *str, int size)
+static void virtio_gpu_timeline_value_str(struct dma_fence *f, char *str,
+ int size)
{
- struct virtio_gpu_fence *fence = to_virtio_fence(f);
+ struct virtio_gpu_fence *fence = to_virtio_gpu_fence(f);
snprintf(str, size, "%llu",
(u64)atomic64_read(&fence->drv->last_fence_id));
}
-static const struct dma_fence_ops virtio_fence_ops = {
- .get_driver_name = virtio_get_driver_name,
- .get_timeline_name = virtio_get_timeline_name,
- .signaled = virtio_fence_signaled,
- .fence_value_str = virtio_fence_value_str,
- .timeline_value_str = virtio_timeline_value_str,
+static const struct dma_fence_ops virtio_gpu_fence_ops = {
+ .get_driver_name = virtio_gpu_get_driver_name,
+ .get_timeline_name = virtio_gpu_get_timeline_name,
+ .signaled = virtio_gpu_fence_signaled,
+ .fence_value_str = virtio_gpu_fence_value_str,
+ .timeline_value_str = virtio_gpu_timeline_value_str,
};
struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev)
@@ -88,7 +85,8 @@ struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev)
* unknown yet. The fence must not be used outside of the driver
* until virtio_gpu_fence_emit is called.
*/
- dma_fence_init(&fence->f, &virtio_fence_ops, &drv->lock, drv->context, 0);
+ dma_fence_init(&fence->f, &virtio_gpu_fence_ops, &drv->lock, drv->context,
+ 0);
return fence;
}
@@ -101,7 +99,7 @@ void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
unsigned long irq_flags;
spin_lock_irqsave(&drv->lock, irq_flags);
- fence->f.seqno = ++drv->current_fence_id;
+ fence->fence_id = fence->f.seqno = ++drv->current_fence_id;
dma_fence_get(&fence->f);
list_add_tail(&fence->node, &drv->fences);
spin_unlock_irqrestore(&drv->lock, irq_flags);
@@ -109,24 +107,45 @@ void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
trace_dma_fence_emit(&fence->f);
cmd_hdr->flags |= cpu_to_le32(VIRTIO_GPU_FLAG_FENCE);
- cmd_hdr->fence_id = cpu_to_le64(fence->f.seqno);
+ cmd_hdr->fence_id = cpu_to_le64(fence->fence_id);
}
void virtio_gpu_fence_event_process(struct virtio_gpu_device *vgdev,
u64 fence_id)
{
struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
- struct virtio_gpu_fence *fence, *tmp;
+ struct virtio_gpu_fence *signaled, *curr, *tmp;
unsigned long irq_flags;
spin_lock_irqsave(&drv->lock, irq_flags);
atomic64_set(&vgdev->fence_drv.last_fence_id, fence_id);
- list_for_each_entry_safe(fence, tmp, &drv->fences, node) {
- if (fence_id < fence->f.seqno)
+ list_for_each_entry_safe(curr, tmp, &drv->fences, node) {
+ if (fence_id != curr->fence_id)
continue;
- dma_fence_signal_locked(&fence->f);
- list_del(&fence->node);
- dma_fence_put(&fence->f);
+
+ signaled = curr;
+
+ /*
+ * Signal any fences with a strictly smaller sequence number
+ * than the current signaled fence.
+ */
+ list_for_each_entry_safe(curr, tmp, &drv->fences, node) {
+ /* dma-fence contexts must match */
+ if (signaled->f.context != curr->f.context)
+ continue;
+
+ if (!dma_fence_is_later(&signaled->f, &curr->f))
+ continue;
+
+ dma_fence_signal_locked(&curr->f);
+ list_del(&curr->node);
+ dma_fence_put(&curr->f);
+ }
+
+ dma_fence_signal_locked(&signaled->f);
+ list_del(&signaled->node);
+ dma_fence_put(&signaled->f);
+ break;
}
spin_unlock_irqrestore(&drv->lock, irq_flags);
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c
index c30c75ee83fc..8502400b2f9c 100644
--- a/drivers/gpu/drm/virtio/virtgpu_gem.c
+++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
@@ -39,9 +39,6 @@ static int virtio_gpu_gem_create(struct drm_file *file,
int ret;
u32 handle;
- if (vgdev->has_virgl_3d)
- virtio_gpu_create_context(dev, file);
-
ret = virtio_gpu_object_create(vgdev, params, &obj, NULL);
if (ret < 0)
return ret;
@@ -119,6 +116,11 @@ int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
if (!vgdev->has_virgl_3d)
goto out_notify;
+ /* the context might still be missing when the first ioctl is
+ * DRM_IOCTL_MODE_CREATE_DUMB or DRM_IOCTL_PRIME_FD_TO_HANDLE
+ */
+ virtio_gpu_create_context(obj->dev, file);
+
objs = virtio_gpu_array_alloc(1);
if (!objs)
return -ENOMEM;
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
index b4ec479c32cd..b375394193be 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -163,6 +163,7 @@ int virtio_gpu_init(struct drm_device *dev)
vgdev->host_visible_region.len,
dev_name(&vgdev->vdev->dev))) {
DRM_ERROR("Could not reserve host visible region\n");
+ ret = -EBUSY;
goto err_vqs;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_vram.c b/drivers/gpu/drm/virtio/virtgpu_vram.c
index 23c21bc4d01e..5cc34e7330fa 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vram.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vram.c
@@ -69,6 +69,7 @@ static const struct drm_gem_object_funcs virtio_gpu_vram_funcs = {
.close = virtio_gpu_gem_object_close,
.free = virtio_gpu_vram_free,
.mmap = virtio_gpu_vram_mmap,
+ .export = virtgpu_gem_prime_export,
};
bool virtio_gpu_is_vram(struct virtio_gpu_object *bo)
@@ -134,6 +135,8 @@ int virtio_gpu_vram_create(struct virtio_gpu_device *vgdev,
obj = &vram->base.base.base;
obj->funcs = &virtio_gpu_vram_funcs;
+
+ params->size = PAGE_ALIGN(params->size);
drm_gem_private_object_init(vgdev->ddev, obj, params->size);
/* Create fake offset */
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
index d4d39227f2ed..2173b82606f6 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.c
+++ b/drivers/gpu/drm/vkms/vkms_drv.c
@@ -34,12 +34,16 @@
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
-static struct vkms_device *vkms_device;
+static struct vkms_config *default_config;
-bool enable_cursor = true;
+static bool enable_cursor = true;
module_param_named(enable_cursor, enable_cursor, bool, 0444);
MODULE_PARM_DESC(enable_cursor, "Enable/Disable cursor support");
+static bool enable_writeback = true;
+module_param_named(enable_writeback, enable_writeback, bool, 0444);
+MODULE_PARM_DESC(enable_writeback, "Enable/Disable writeback connector support");
+
DEFINE_DRM_GEM_FOPS(vkms_driver_fops);
static void vkms_release(struct drm_device *dev)
@@ -113,16 +117,20 @@ static int vkms_modeset_init(struct vkms_device *vkmsdev)
dev->mode_config.max_height = YRES_MAX;
dev->mode_config.cursor_width = 512;
dev->mode_config.cursor_height = 512;
- dev->mode_config.preferred_depth = 32;
+ /* FIXME: There's a confusion between bpp and depth between this and
+ * fbdev helpers. We have to go with 0, meaning "pick the default",
+ * which ix XRGB8888 in all cases. */
+ dev->mode_config.preferred_depth = 0;
dev->mode_config.helper_private = &vkms_mode_config_helpers;
return vkms_output_init(vkmsdev, 0);
}
-static int __init vkms_init(void)
+static int vkms_create(struct vkms_config *config)
{
int ret;
struct platform_device *pdev;
+ struct vkms_device *vkms_device;
pdev = platform_device_register_simple(DRIVER_NAME, -1, NULL, 0);
if (IS_ERR(pdev))
@@ -140,6 +148,8 @@ static int __init vkms_init(void)
goto out_devres;
}
vkms_device->platform = pdev;
+ vkms_device->config = config;
+ config->dev = vkms_device;
ret = dma_coerce_mask_and_coherent(vkms_device->drm.dev,
DMA_BIT_MASK(64));
@@ -176,21 +186,47 @@ out_unregister:
return ret;
}
-static void __exit vkms_exit(void)
+static int __init vkms_init(void)
+{
+ struct vkms_config *config;
+
+ config = kmalloc(sizeof(*config), GFP_KERNEL);
+ if (!config)
+ return -ENOMEM;
+
+ default_config = config;
+
+ config->cursor = enable_cursor;
+ config->writeback = enable_writeback;
+
+ return vkms_create(config);
+}
+
+static void vkms_destroy(struct vkms_config *config)
{
struct platform_device *pdev;
- if (!vkms_device) {
+ if (!config->dev) {
DRM_INFO("vkms_device is NULL.\n");
return;
}
- pdev = vkms_device->platform;
+ pdev = config->dev->platform;
- drm_dev_unregister(&vkms_device->drm);
- drm_atomic_helper_shutdown(&vkms_device->drm);
+ drm_dev_unregister(&config->dev->drm);
+ drm_atomic_helper_shutdown(&config->dev->drm);
devres_release_group(&pdev->dev, NULL);
platform_device_unregister(pdev);
+
+ config->dev = NULL;
+}
+
+static void __exit vkms_exit(void)
+{
+ if (default_config->dev)
+ vkms_destroy(default_config);
+
+ kfree(default_config);
}
module_init(vkms_init);
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
index 5ed91ff08cb3..35540c7c4416 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.h
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
@@ -19,8 +19,6 @@
#define XRES_MAX 8192
#define YRES_MAX 8192
-extern bool enable_cursor;
-
struct vkms_composer {
struct drm_framebuffer fb;
struct drm_rect src, dst;
@@ -82,10 +80,20 @@ struct vkms_output {
spinlock_t composer_lock;
};
+struct vkms_device;
+
+struct vkms_config {
+ bool writeback;
+ bool cursor;
+ /* only set when instantiated */
+ struct vkms_device *dev;
+};
+
struct vkms_device {
struct drm_device drm;
struct platform_device *platform;
struct vkms_output output;
+ const struct vkms_config *config;
};
#define drm_crtc_to_vkms_output(target) \
diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c
index 4a1848b0318f..f5f6f15c362c 100644
--- a/drivers/gpu/drm/vkms/vkms_output.c
+++ b/drivers/gpu/drm/vkms/vkms_output.c
@@ -41,12 +41,13 @@ int vkms_output_init(struct vkms_device *vkmsdev, int index)
struct drm_crtc *crtc = &output->crtc;
struct drm_plane *primary, *cursor = NULL;
int ret;
+ int writeback;
primary = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_PRIMARY, index);
if (IS_ERR(primary))
return PTR_ERR(primary);
- if (enable_cursor) {
+ if (vkmsdev->config->cursor) {
cursor = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_CURSOR, index);
if (IS_ERR(cursor)) {
ret = PTR_ERR(cursor);
@@ -80,9 +81,11 @@ int vkms_output_init(struct vkms_device *vkmsdev, int index)
goto err_attach;
}
- ret = vkms_enable_writeback_connector(vkmsdev);
- if (ret)
- DRM_ERROR("Failed to init writeback connector\n");
+ if (vkmsdev->config->writeback) {
+ writeback = vkms_enable_writeback_connector(vkmsdev);
+ if (writeback)
+ DRM_ERROR("Failed to init writeback connector\n");
+ }
drm_mode_config_reset(dev);
@@ -98,7 +101,7 @@ err_connector:
drm_crtc_cleanup(crtc);
err_crtc:
- if (enable_cursor)
+ if (vkmsdev->config->cursor)
drm_plane_cleanup(cursor);
err_cursor:
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index 31f85f09f1fc..cc4cdca7176e 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -1,9 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_ttm_buffer.o \
- vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
- vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \
- vmwgfx_fence.o vmwgfx_bo.o vmwgfx_scrn.o vmwgfx_context.o \
+ vmwgfx_cmd.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
+ vmwgfx_overlay.o vmwgfx_gmrid_manager.o vmwgfx_fence.o \
+ vmwgfx_bo.o vmwgfx_scrn.o vmwgfx_context.o \
vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \
vmwgfx_cmdbuf_res.o vmwgfx_cmdbuf.o vmwgfx_stdu.o \
vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \
diff --git a/drivers/gpu/drm/vmwgfx/ttm_object.c b/drivers/gpu/drm/vmwgfx/ttm_object.c
index 16077785ad47..0fe869d0fad1 100644
--- a/drivers/gpu/drm/vmwgfx/ttm_object.c
+++ b/drivers/gpu/drm/vmwgfx/ttm_object.c
@@ -59,7 +59,6 @@
#define pr_fmt(fmt) "[TTM] " fmt
-#include <drm/ttm/ttm_module.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
index f41550797970..180f6dbc9460 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
@@ -555,7 +555,7 @@ static int vmw_binding_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
SVGA3dCmdSetShader body;
} *cmd;
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -564,7 +564,7 @@ static int vmw_binding_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
cmd->body.cid = bi->ctx->id;
cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN;
cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
return 0;
}
@@ -587,7 +587,7 @@ static int vmw_binding_scrub_render_target(struct vmw_ctx_bindinfo *bi,
SVGA3dCmdSetRenderTarget body;
} *cmd;
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -598,7 +598,7 @@ static int vmw_binding_scrub_render_target(struct vmw_ctx_bindinfo *bi,
cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
cmd->body.target.face = 0;
cmd->body.target.mipmap = 0;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
return 0;
}
@@ -626,7 +626,7 @@ static int vmw_binding_scrub_texture(struct vmw_ctx_bindinfo *bi,
} body;
} *cmd;
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -636,7 +636,7 @@ static int vmw_binding_scrub_texture(struct vmw_ctx_bindinfo *bi,
cmd->body.s1.stage = binding->texture_stage;
cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
return 0;
}
@@ -657,7 +657,7 @@ static int vmw_binding_scrub_dx_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
SVGA3dCmdDXSetShader body;
} *cmd;
- cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), bi->ctx->id);
+ cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), bi->ctx->id);
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -665,7 +665,7 @@ static int vmw_binding_scrub_dx_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
cmd->header.size = sizeof(cmd->body);
cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN;
cmd->body.shaderId = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
return 0;
}
@@ -686,7 +686,7 @@ static int vmw_binding_scrub_cb(struct vmw_ctx_bindinfo *bi, bool rebind)
SVGA3dCmdDXSetSingleConstantBuffer body;
} *cmd;
- cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), bi->ctx->id);
+ cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), bi->ctx->id);
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -703,7 +703,7 @@ static int vmw_binding_scrub_cb(struct vmw_ctx_bindinfo *bi, bool rebind)
cmd->body.sizeInBytes = 0;
cmd->body.sid = SVGA3D_INVALID_ID;
}
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
return 0;
}
@@ -810,7 +810,7 @@ static int vmw_emit_set_sr(struct vmw_ctx_binding_state *cbs,
view_id_size = cbs->bind_cmd_count*sizeof(uint32);
cmd_size = sizeof(*cmd) + view_id_size;
- cmd = VMW_FIFO_RESERVE_DX(ctx->dev_priv, cmd_size, ctx->id);
+ cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id);
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -821,7 +821,7 @@ static int vmw_emit_set_sr(struct vmw_ctx_binding_state *cbs,
memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
- vmw_fifo_commit(ctx->dev_priv, cmd_size);
+ vmw_cmd_commit(ctx->dev_priv, cmd_size);
bitmap_clear(cbs->per_shader[shader_slot].dirty_sr,
cbs->bind_first_slot, cbs->bind_cmd_count);
@@ -846,7 +846,7 @@ static int vmw_emit_set_rt(struct vmw_ctx_binding_state *cbs)
vmw_collect_view_ids(cbs, loc, SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS);
view_id_size = cbs->bind_cmd_count*sizeof(uint32);
cmd_size = sizeof(*cmd) + view_id_size;
- cmd = VMW_FIFO_RESERVE_DX(ctx->dev_priv, cmd_size, ctx->id);
+ cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id);
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -860,7 +860,7 @@ static int vmw_emit_set_rt(struct vmw_ctx_binding_state *cbs)
memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
- vmw_fifo_commit(ctx->dev_priv, cmd_size);
+ vmw_cmd_commit(ctx->dev_priv, cmd_size);
return 0;
@@ -930,7 +930,7 @@ static int vmw_emit_set_so_target(struct vmw_ctx_binding_state *cbs)
so_target_size = cbs->bind_cmd_count*sizeof(SVGA3dSoTarget);
cmd_size = sizeof(*cmd) + so_target_size;
- cmd = VMW_FIFO_RESERVE_DX(ctx->dev_priv, cmd_size, ctx->id);
+ cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id);
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -938,7 +938,7 @@ static int vmw_emit_set_so_target(struct vmw_ctx_binding_state *cbs)
cmd->header.size = sizeof(cmd->body) + so_target_size;
memcpy(&cmd[1], cbs->bind_cmd_buffer, so_target_size);
- vmw_fifo_commit(ctx->dev_priv, cmd_size);
+ vmw_cmd_commit(ctx->dev_priv, cmd_size);
return 0;
@@ -1044,7 +1044,7 @@ static int vmw_emit_set_vb(struct vmw_ctx_binding_state *cbs)
set_vb_size = cbs->bind_cmd_count*sizeof(SVGA3dVertexBuffer);
cmd_size = sizeof(*cmd) + set_vb_size;
- cmd = VMW_FIFO_RESERVE_DX(ctx->dev_priv, cmd_size, ctx->id);
+ cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id);
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -1054,7 +1054,7 @@ static int vmw_emit_set_vb(struct vmw_ctx_binding_state *cbs)
memcpy(&cmd[1], cbs->bind_cmd_buffer, set_vb_size);
- vmw_fifo_commit(ctx->dev_priv, cmd_size);
+ vmw_cmd_commit(ctx->dev_priv, cmd_size);
bitmap_clear(cbs->dirty_vb,
cbs->bind_first_slot, cbs->bind_cmd_count);
@@ -1074,7 +1074,7 @@ static int vmw_emit_set_uav(struct vmw_ctx_binding_state *cbs)
vmw_collect_view_ids(cbs, loc, SVGA3D_MAX_UAVIEWS);
view_id_size = cbs->bind_cmd_count*sizeof(uint32);
cmd_size = sizeof(*cmd) + view_id_size;
- cmd = VMW_FIFO_RESERVE_DX(ctx->dev_priv, cmd_size, ctx->id);
+ cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id);
if (!cmd)
return -ENOMEM;
@@ -1086,7 +1086,7 @@ static int vmw_emit_set_uav(struct vmw_ctx_binding_state *cbs)
memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
- vmw_fifo_commit(ctx->dev_priv, cmd_size);
+ vmw_cmd_commit(ctx->dev_priv, cmd_size);
return 0;
}
@@ -1104,7 +1104,7 @@ static int vmw_emit_set_cs_uav(struct vmw_ctx_binding_state *cbs)
vmw_collect_view_ids(cbs, loc, SVGA3D_MAX_UAVIEWS);
view_id_size = cbs->bind_cmd_count*sizeof(uint32);
cmd_size = sizeof(*cmd) + view_id_size;
- cmd = VMW_FIFO_RESERVE_DX(ctx->dev_priv, cmd_size, ctx->id);
+ cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id);
if (!cmd)
return -ENOMEM;
@@ -1116,7 +1116,7 @@ static int vmw_emit_set_cs_uav(struct vmw_ctx_binding_state *cbs)
memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
- vmw_fifo_commit(ctx->dev_priv, cmd_size);
+ vmw_cmd_commit(ctx->dev_priv, cmd_size);
return 0;
}
@@ -1263,7 +1263,7 @@ static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind)
SVGA3dCmdDXSetIndexBuffer body;
} *cmd;
- cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), bi->ctx->id);
+ cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), bi->ctx->id);
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -1279,7 +1279,7 @@ static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind)
cmd->body.offset = 0;
}
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
return 0;
}
@@ -1315,14 +1315,14 @@ static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind)
SVGA3dCmdDXSetStreamOutput body;
} *cmd;
- cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), bi->ctx->id);
+ cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), bi->ctx->id);
if (!cmd)
return -ENOMEM;
cmd->header.id = SVGA_3D_CMD_DX_SET_STREAMOUTPUT;
cmd->header.size = sizeof(cmd->body);
cmd->body.soid = rebind ? bi->res->id : SVGA3D_INVALID_ID;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
index f21881e087db..9f2779ddcf08 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
@@ -482,8 +482,8 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
d.src_addr = NULL;
d.dst_pages = dst->ttm->pages;
d.src_pages = src->ttm->pages;
- d.dst_num_pages = dst->num_pages;
- d.src_num_pages = src->num_pages;
+ d.dst_num_pages = dst->mem.num_pages;
+ d.src_num_pages = src->mem.num_pages;
d.dst_prot = ttm_io_prot(dst, &dst->mem, PAGE_KERNEL);
d.src_prot = ttm_io_prot(src, &src->mem, PAGE_KERNEL);
d.diff = diff;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index 263d76ae43f0..63dbc44eebe0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -223,7 +223,7 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
uint32_t new_flags;
place = vmw_vram_placement.placement[0];
- place.lpfn = bo->num_pages;
+ place.lpfn = bo->mem.num_pages;
placement.num_placement = 1;
placement.placement = &place;
placement.num_busy_placement = 1;
@@ -244,7 +244,7 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
* that situation.
*/
if (bo->mem.mem_type == TTM_PL_VRAM &&
- bo->mem.start < bo->num_pages &&
+ bo->mem.start < bo->mem.num_pages &&
bo->mem.start > 0 &&
buf->base.pin_count == 0) {
ctx.interruptible = false;
@@ -391,7 +391,7 @@ void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo)
if (virtual)
return virtual;
- ret = ttm_bo_kmap(bo, 0, bo->num_pages, &vbo->map);
+ ret = ttm_bo_kmap(bo, 0, bo->mem.num_pages, &vbo->map);
if (ret)
DRM_ERROR("Buffer object map failed: %d.\n", ret);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
index a95156fc5db7..7400d617ae3c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
+ * Copyright 2009-2020 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -36,9 +36,8 @@ struct vmw_temp_set_context {
SVGA3dCmdDXTempSetContext body;
};
-bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
+bool vmw_supports_3d(struct vmw_private *dev_priv)
{
- u32 *fifo_mem = dev_priv->mmio_virt;
uint32_t fifo_min, hwversion;
const struct vmw_fifo_state *fifo = &dev_priv->fifo;
@@ -62,15 +61,15 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
return false;
- fifo_min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
+ fifo_min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
return false;
- hwversion = vmw_mmio_read(fifo_mem +
- ((fifo->capabilities &
- SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
- SVGA_FIFO_3D_HWVERSION_REVISED :
- SVGA_FIFO_3D_HWVERSION));
+ hwversion = vmw_fifo_mem_read(dev_priv,
+ ((fifo->capabilities &
+ SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
+ SVGA_FIFO_3D_HWVERSION_REVISED :
+ SVGA_FIFO_3D_HWVERSION));
if (hwversion == 0)
return false;
@@ -87,13 +86,12 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
{
- u32 *fifo_mem = dev_priv->mmio_virt;
uint32_t caps;
if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
return false;
- caps = vmw_mmio_read(fifo_mem + SVGA_FIFO_CAPABILITIES);
+ caps = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CAPABILITIES);
if (caps & SVGA_FIFO_CAP_PITCHLOCK)
return true;
@@ -102,7 +100,6 @@ bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
{
- u32 *fifo_mem = dev_priv->mmio_virt;
uint32_t max;
uint32_t min;
@@ -129,6 +126,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_ENABLE |
SVGA_REG_ENABLE_HIDE);
+
vmw_write(dev_priv, SVGA_REG_TRACES, 0);
min = 4;
@@ -139,19 +137,19 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
if (min < PAGE_SIZE)
min = PAGE_SIZE;
- vmw_mmio_write(min, fifo_mem + SVGA_FIFO_MIN);
- vmw_mmio_write(dev_priv->mmio_size, fifo_mem + SVGA_FIFO_MAX);
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_MIN, min);
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_MAX, dev_priv->fifo_mem_size);
wmb();
- vmw_mmio_write(min, fifo_mem + SVGA_FIFO_NEXT_CMD);
- vmw_mmio_write(min, fifo_mem + SVGA_FIFO_STOP);
- vmw_mmio_write(0, fifo_mem + SVGA_FIFO_BUSY);
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_NEXT_CMD, min);
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_STOP, min);
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_BUSY, 0);
mb();
vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
- max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX);
- min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
- fifo->capabilities = vmw_mmio_read(fifo_mem + SVGA_FIFO_CAPABILITIES);
+ max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX);
+ min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
+ fifo->capabilities = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CAPABILITIES);
DRM_INFO("Fifo max 0x%08x min 0x%08x cap 0x%08x\n",
(unsigned int) max,
@@ -159,15 +157,14 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
(unsigned int) fifo->capabilities);
atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
- vmw_mmio_write(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
- vmw_marker_queue_init(&fifo->marker_queue);
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_FENCE, dev_priv->last_read_seqno);
return 0;
}
void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
{
- u32 *fifo_mem = dev_priv->mmio_virt;
+ u32 *fifo_mem = dev_priv->fifo_mem;
if (cmpxchg(fifo_mem + SVGA_FIFO_BUSY, 0, 1) == 0)
vmw_write(dev_priv, SVGA_REG_SYNC, reason);
@@ -175,13 +172,11 @@ void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
{
- u32 *fifo_mem = dev_priv->mmio_virt;
-
vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
;
- dev_priv->last_read_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
+ dev_priv->last_read_seqno = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_FENCE);
vmw_write(dev_priv, SVGA_REG_CONFIG_DONE,
dev_priv->config_done_state);
@@ -190,8 +185,6 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
vmw_write(dev_priv, SVGA_REG_TRACES,
dev_priv->traces_state);
- vmw_marker_queue_takedown(&fifo->marker_queue);
-
if (likely(fifo->static_buffer != NULL)) {
vfree(fifo->static_buffer);
fifo->static_buffer = NULL;
@@ -205,11 +198,10 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes)
{
- u32 *fifo_mem = dev_priv->mmio_virt;
- uint32_t max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX);
- uint32_t next_cmd = vmw_mmio_read(fifo_mem + SVGA_FIFO_NEXT_CMD);
- uint32_t min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
- uint32_t stop = vmw_mmio_read(fifo_mem + SVGA_FIFO_STOP);
+ uint32_t max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX);
+ uint32_t next_cmd = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_NEXT_CMD);
+ uint32_t min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
+ uint32_t stop = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_STOP);
return ((max - next_cmd) + (stop - min) <= bytes);
}
@@ -298,7 +290,7 @@ static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
uint32_t bytes)
{
struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
- u32 *fifo_mem = dev_priv->mmio_virt;
+ u32 *fifo_mem = dev_priv->fifo_mem;
uint32_t max;
uint32_t min;
uint32_t next_cmd;
@@ -306,9 +298,9 @@ static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
int ret;
mutex_lock(&fifo_state->fifo_mutex);
- max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX);
- min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
- next_cmd = vmw_mmio_read(fifo_mem + SVGA_FIFO_NEXT_CMD);
+ max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX);
+ min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
+ next_cmd = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_NEXT_CMD);
if (unlikely(bytes >= (max - min)))
goto out_err;
@@ -319,7 +311,7 @@ static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
fifo_state->reserved_size = bytes;
while (1) {
- uint32_t stop = vmw_mmio_read(fifo_mem + SVGA_FIFO_STOP);
+ uint32_t stop = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_STOP);
bool need_bounce = false;
bool reserve_in_place = false;
@@ -353,8 +345,9 @@ static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
fifo_state->using_bounce_buffer = false;
if (reserveable)
- vmw_mmio_write(bytes, fifo_mem +
- SVGA_FIFO_RESERVED);
+ vmw_fifo_mem_write(dev_priv,
+ SVGA_FIFO_RESERVED,
+ bytes);
return (void __force *) (fifo_mem +
(next_cmd >> 2));
} else {
@@ -381,7 +374,7 @@ out_err:
return NULL;
}
-void *vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes,
+void *vmw_cmd_ctx_reserve(struct vmw_private *dev_priv, uint32_t bytes,
int ctx_id)
{
void *ret;
@@ -402,10 +395,11 @@ void *vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes,
}
static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
- u32 *fifo_mem,
+ struct vmw_private *vmw,
uint32_t next_cmd,
uint32_t max, uint32_t min, uint32_t bytes)
{
+ u32 *fifo_mem = vmw->fifo_mem;
uint32_t chunk_size = max - next_cmd;
uint32_t rest;
uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
@@ -414,7 +408,7 @@ static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
if (bytes < chunk_size)
chunk_size = bytes;
- vmw_mmio_write(bytes, fifo_mem + SVGA_FIFO_RESERVED);
+ vmw_fifo_mem_write(vmw, SVGA_FIFO_RESERVED, bytes);
mb();
memcpy(fifo_mem + (next_cmd >> 2), buffer, chunk_size);
rest = bytes - chunk_size;
@@ -423,7 +417,7 @@ static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
}
static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
- u32 *fifo_mem,
+ struct vmw_private *vmw,
uint32_t next_cmd,
uint32_t max, uint32_t min, uint32_t bytes)
{
@@ -431,12 +425,12 @@ static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
fifo_state->dynamic_buffer : fifo_state->static_buffer;
while (bytes > 0) {
- vmw_mmio_write(*buffer++, fifo_mem + (next_cmd >> 2));
+ vmw_fifo_mem_write(vmw, (next_cmd >> 2), *buffer++);
next_cmd += sizeof(uint32_t);
if (unlikely(next_cmd == max))
next_cmd = min;
mb();
- vmw_mmio_write(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
+ vmw_fifo_mem_write(vmw, SVGA_FIFO_NEXT_CMD, next_cmd);
mb();
bytes -= sizeof(uint32_t);
}
@@ -445,10 +439,9 @@ static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
{
struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
- u32 *fifo_mem = dev_priv->mmio_virt;
- uint32_t next_cmd = vmw_mmio_read(fifo_mem + SVGA_FIFO_NEXT_CMD);
- uint32_t max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX);
- uint32_t min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
+ uint32_t next_cmd = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_NEXT_CMD);
+ uint32_t max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX);
+ uint32_t min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
if (fifo_state->dx)
@@ -462,10 +455,10 @@ static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
if (fifo_state->using_bounce_buffer) {
if (reserveable)
- vmw_fifo_res_copy(fifo_state, fifo_mem,
+ vmw_fifo_res_copy(fifo_state, dev_priv,
next_cmd, max, min, bytes);
else
- vmw_fifo_slow_copy(fifo_state, fifo_mem,
+ vmw_fifo_slow_copy(fifo_state, dev_priv,
next_cmd, max, min, bytes);
if (fifo_state->dynamic_buffer) {
@@ -481,18 +474,18 @@ static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
if (next_cmd >= max)
next_cmd -= max - min;
mb();
- vmw_mmio_write(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_NEXT_CMD, next_cmd);
}
if (reserveable)
- vmw_mmio_write(0, fifo_mem + SVGA_FIFO_RESERVED);
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_RESERVED, 0);
mb();
up_write(&fifo_state->rwsem);
vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
mutex_unlock(&fifo_state->fifo_mutex);
}
-void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
+void vmw_cmd_commit(struct vmw_private *dev_priv, uint32_t bytes)
{
if (dev_priv->cman)
vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, false);
@@ -507,7 +500,7 @@ void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
* @dev_priv: Pointer to device private structure.
* @bytes: Number of bytes to commit.
*/
-void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes)
+void vmw_cmd_commit_flush(struct vmw_private *dev_priv, uint32_t bytes)
{
if (dev_priv->cman)
vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, true);
@@ -522,7 +515,7 @@ void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes)
* @dev_priv: Pointer to device private structure.
* @interruptible: Whether to wait interruptible if function needs to sleep.
*/
-int vmw_fifo_flush(struct vmw_private *dev_priv, bool interruptible)
+int vmw_cmd_flush(struct vmw_private *dev_priv, bool interruptible)
{
might_sleep();
@@ -532,7 +525,7 @@ int vmw_fifo_flush(struct vmw_private *dev_priv, bool interruptible)
return 0;
}
-int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
+int vmw_cmd_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
{
struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
struct svga_fifo_cmd_fence *cmd_fence;
@@ -540,7 +533,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
int ret = 0;
uint32_t bytes = sizeof(u32) + sizeof(*cmd_fence);
- fm = VMW_FIFO_RESERVE(dev_priv, bytes);
+ fm = VMW_CMD_RESERVE(dev_priv, bytes);
if (unlikely(fm == NULL)) {
*seqno = atomic_read(&dev_priv->marker_seq);
ret = -ENOMEM;
@@ -560,15 +553,14 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
* waiting code in vmwgfx_irq.c will emulate this.
*/
- vmw_fifo_commit(dev_priv, 0);
+ vmw_cmd_commit(dev_priv, 0);
return 0;
}
*fm++ = SVGA_CMD_FENCE;
cmd_fence = (struct svga_fifo_cmd_fence *) fm;
cmd_fence->fence = *seqno;
- vmw_fifo_commit_flush(dev_priv, bytes);
- (void) vmw_marker_push(&fifo_state->marker_queue, *seqno);
+ vmw_cmd_commit_flush(dev_priv, bytes);
vmw_update_seqno(dev_priv, fifo_state);
out_err:
@@ -599,7 +591,7 @@ static int vmw_fifo_emit_dummy_legacy_query(struct vmw_private *dev_priv,
SVGA3dCmdWaitForQuery body;
} *cmd;
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -616,7 +608,7 @@ static int vmw_fifo_emit_dummy_legacy_query(struct vmw_private *dev_priv,
cmd->body.guestResult.offset = 0;
}
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
return 0;
}
@@ -645,7 +637,7 @@ static int vmw_fifo_emit_dummy_gb_query(struct vmw_private *dev_priv,
SVGA3dCmdWaitForGBQuery body;
} *cmd;
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -657,7 +649,7 @@ static int vmw_fifo_emit_dummy_gb_query(struct vmw_private *dev_priv,
cmd->body.mobid = bo->mem.start;
cmd->body.offset = 0;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
return 0;
}
@@ -681,7 +673,7 @@ static int vmw_fifo_emit_dummy_gb_query(struct vmw_private *dev_priv,
*
* Returns -ENOMEM on failure to reserve fifo space.
*/
-int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
+int vmw_cmd_emit_dummy_query(struct vmw_private *dev_priv,
uint32_t cid)
{
if (dev_priv->has_mob)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
index 9a9fe10d829b..45fbc41440f1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -610,7 +610,7 @@ static void vmw_cmdbuf_work_func(struct work_struct *work)
/* Send a new fence in case one was removed */
if (send_fence) {
- vmw_fifo_send_fence(man->dev_priv, &dummy);
+ vmw_cmd_send_fence(man->dev_priv, &dummy);
wake_up_all(&man->idle_queue);
}
@@ -1208,18 +1208,14 @@ static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
*
* @man: The command buffer manager.
* @size: The size of the main space pool.
- * @default_size: The default size of the command buffer for small kernel
- * submissions.
*
- * Set the size and allocate the main command buffer space pool,
- * as well as the default size of the command buffer for
- * small kernel submissions. If successful, this enables large command
- * submissions. Note that this function requires that rudimentary command
+ * Set the size and allocate the main command buffer space pool.
+ * If successful, this enables large command submissions.
+ * Note that this function requires that rudimentary command
* submission is already available and that the MOB memory manager is alive.
* Returns 0 on success. Negative error code on failure.
*/
-int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
- size_t size, size_t default_size)
+int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, size_t size)
{
struct vmw_private *dev_priv = man->dev_priv;
bool dummy;
@@ -1230,7 +1226,7 @@ int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
/* First, try to allocate a huge chunk of DMA memory */
size = PAGE_ALIGN(size);
- man->map = dma_alloc_coherent(&dev_priv->dev->pdev->dev, size,
+ man->map = dma_alloc_coherent(dev_priv->drm.dev, size,
&man->handle, GFP_KERNEL);
if (man->map) {
man->using_mob = false;
@@ -1313,7 +1309,7 @@ struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
man->num_contexts = (dev_priv->capabilities & SVGA_CAP_HP_CMD_QUEUE) ?
2 : 1;
man->headers = dma_pool_create("vmwgfx cmdbuf",
- &dev_priv->dev->pdev->dev,
+ dev_priv->drm.dev,
sizeof(SVGACBHeader),
64, PAGE_SIZE);
if (!man->headers) {
@@ -1322,7 +1318,7 @@ struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
}
man->dheaders = dma_pool_create("vmwgfx inline cmdbuf",
- &dev_priv->dev->pdev->dev,
+ dev_priv->drm.dev,
sizeof(struct vmw_cmdbuf_dheader),
64, PAGE_SIZE);
if (!man->dheaders) {
@@ -1387,7 +1383,7 @@ void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man)
ttm_bo_put(man->cmd_space);
man->cmd_space = NULL;
} else {
- dma_free_coherent(&man->dev_priv->dev->pdev->dev,
+ dma_free_coherent(man->dev_priv->drm.dev,
man->size, man->map, man->handle);
}
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index 61c246335e66..6f4d0da11ad8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -163,7 +163,7 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
}
vmw_execbuf_release_pinned_bo(dev_priv);
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
return;
@@ -171,7 +171,7 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = res->id;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
vmw_fifo_resource_dec(dev_priv);
}
@@ -265,7 +265,7 @@ static int vmw_context_init(struct vmw_private *dev_priv,
return -ENOMEM;
}
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
vmw_resource_unreference(&res);
return -ENOMEM;
@@ -275,7 +275,7 @@ static int vmw_context_init(struct vmw_private *dev_priv,
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = res->id;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
vmw_fifo_resource_inc(dev_priv);
res->hw_destroy = vmw_hw_context_destroy;
return 0;
@@ -316,7 +316,7 @@ static int vmw_gb_context_create(struct vmw_resource *res)
goto out_no_fifo;
}
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
ret = -ENOMEM;
goto out_no_fifo;
@@ -325,7 +325,7 @@ static int vmw_gb_context_create(struct vmw_resource *res)
cmd->header.id = SVGA_3D_CMD_DEFINE_GB_CONTEXT;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = res->id;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
vmw_fifo_resource_inc(dev_priv);
return 0;
@@ -348,7 +348,7 @@ static int vmw_gb_context_bind(struct vmw_resource *res,
BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -358,7 +358,7 @@ static int vmw_gb_context_bind(struct vmw_resource *res,
cmd->body.mobid = bo->mem.start;
cmd->body.validContents = res->backup_dirty;
res->backup_dirty = false;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
return 0;
}
@@ -392,7 +392,7 @@ static int vmw_gb_context_unbind(struct vmw_resource *res,
submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
- cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
+ cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
if (unlikely(cmd == NULL)) {
mutex_unlock(&dev_priv->binding_mutex);
return -ENOMEM;
@@ -411,7 +411,7 @@ static int vmw_gb_context_unbind(struct vmw_resource *res,
cmd2->body.cid = res->id;
cmd2->body.mobid = SVGA3D_INVALID_ID;
- vmw_fifo_commit(dev_priv, submit_size);
+ vmw_cmd_commit(dev_priv, submit_size);
mutex_unlock(&dev_priv->binding_mutex);
/*
@@ -440,14 +440,14 @@ static int vmw_gb_context_destroy(struct vmw_resource *res)
if (likely(res->id == -1))
return 0;
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
return -ENOMEM;
cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = res->id;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
if (dev_priv->query_cid == res->id)
dev_priv->query_cid_valid = false;
vmw_resource_release_id(res);
@@ -483,7 +483,7 @@ static int vmw_dx_context_create(struct vmw_resource *res)
goto out_no_fifo;
}
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
ret = -ENOMEM;
goto out_no_fifo;
@@ -492,7 +492,7 @@ static int vmw_dx_context_create(struct vmw_resource *res)
cmd->header.id = SVGA_3D_CMD_DX_DEFINE_CONTEXT;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = res->id;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
vmw_fifo_resource_inc(dev_priv);
return 0;
@@ -515,7 +515,7 @@ static int vmw_dx_context_bind(struct vmw_resource *res,
BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -525,7 +525,7 @@ static int vmw_dx_context_bind(struct vmw_resource *res,
cmd->body.mobid = bo->mem.start;
cmd->body.validContents = res->backup_dirty;
res->backup_dirty = false;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
return 0;
@@ -608,7 +608,7 @@ static int vmw_dx_context_unbind(struct vmw_resource *res,
submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
- cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
+ cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
if (unlikely(cmd == NULL)) {
mutex_unlock(&dev_priv->binding_mutex);
return -ENOMEM;
@@ -627,7 +627,7 @@ static int vmw_dx_context_unbind(struct vmw_resource *res,
cmd2->body.cid = res->id;
cmd2->body.mobid = SVGA3D_INVALID_ID;
- vmw_fifo_commit(dev_priv, submit_size);
+ vmw_cmd_commit(dev_priv, submit_size);
mutex_unlock(&dev_priv->binding_mutex);
/*
@@ -656,14 +656,14 @@ static int vmw_dx_context_destroy(struct vmw_resource *res)
if (likely(res->id == -1))
return 0;
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
return -ENOMEM;
cmd->header.id = SVGA_3D_CMD_DX_DESTROY_CONTEXT;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = res->id;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
if (dev_priv->query_cid == res->id)
dev_priv->query_cid_valid = false;
vmw_resource_release_id(res);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
index 984d8884357d..ba658fa9cf6c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -175,7 +175,7 @@ static int vmw_cotable_unscrub(struct vmw_resource *res)
WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
dma_resv_assert_held(bo->base.resv);
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (!cmd)
return -ENOMEM;
@@ -188,7 +188,7 @@ static int vmw_cotable_unscrub(struct vmw_resource *res)
cmd->body.mobid = bo->mem.start;
cmd->body.validSizeInBytes = vcotbl->size_read_back;
- vmw_fifo_commit_flush(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit_flush(dev_priv, sizeof(*cmd));
vcotbl->scrubbed = false;
return 0;
@@ -263,7 +263,7 @@ int vmw_cotable_scrub(struct vmw_resource *res, bool readback)
if (readback)
submit_size += sizeof(*cmd0);
- cmd1 = VMW_FIFO_RESERVE(dev_priv, submit_size);
+ cmd1 = VMW_CMD_RESERVE(dev_priv, submit_size);
if (!cmd1)
return -ENOMEM;
@@ -283,7 +283,7 @@ int vmw_cotable_scrub(struct vmw_resource *res, bool readback)
cmd1->body.type = vcotbl->type;
cmd1->body.mobid = SVGA3D_INVALID_ID;
cmd1->body.validSizeInBytes = 0;
- vmw_fifo_commit_flush(dev_priv, submit_size);
+ vmw_cmd_commit_flush(dev_priv, submit_size);
vcotbl->scrubbed = true;
/* Trigger a create() on next validate. */
@@ -349,7 +349,7 @@ static int vmw_cotable_readback(struct vmw_resource *res)
struct vmw_fence_obj *fence;
if (!vcotbl->scrubbed) {
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (!cmd)
return -ENOMEM;
@@ -358,7 +358,7 @@ static int vmw_cotable_readback(struct vmw_resource *res)
cmd->body.cid = vcotbl->ctx->id;
cmd->body.type = vcotbl->type;
vcotbl->size_read_back = res->backup_size;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
}
(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
@@ -430,7 +430,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
* Do a page by page copy of COTables. This eliminates slow vmap()s.
* This should really be a TTM utility.
*/
- for (i = 0; i < old_bo->num_pages; ++i) {
+ for (i = 0; i < old_bo->mem.num_pages; ++i) {
bool dummy;
ret = ttm_bo_kmap(old_bo, i, 1, &old_map);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 216daf93022c..dd69b51c40e4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -32,10 +32,10 @@
#include <linux/mem_encrypt.h>
#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_sysfs.h>
#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_placement.h>
#include "ttm_object.h"
@@ -43,8 +43,6 @@
#include "vmwgfx_drv.h"
#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
-#define VMWGFX_CHIP_SVGAII 0
-#define VMW_FB_RESERVATION 0
#define VMW_MIN_INITIAL_WIDTH 800
#define VMW_MIN_INITIAL_HEIGHT 600
@@ -253,8 +251,8 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
};
static const struct pci_device_id vmw_pci_id_list[] = {
- {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
- {0, 0, 0}
+ { PCI_DEVICE(0x15ad, VMWGFX_PCI_ID_SVGA2) },
+ { }
};
MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
@@ -425,8 +423,7 @@ static int vmw_request_device_late(struct vmw_private *dev_priv)
}
if (dev_priv->cman) {
- ret = vmw_cmdbuf_set_pool_size(dev_priv->cman,
- 256*4096, 2*4096);
+ ret = vmw_cmdbuf_set_pool_size(dev_priv->cman, 256*4096);
if (ret) {
struct vmw_cmdbuf_man *man = dev_priv->cman;
@@ -609,7 +606,7 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv)
*/
static int vmw_dma_masks(struct vmw_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
int ret = 0;
ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64));
@@ -644,26 +641,84 @@ static void vmw_vram_manager_fini(struct vmw_private *dev_priv)
#endif
}
-static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
+static int vmw_setup_pci_resources(struct vmw_private *dev,
+ unsigned long pci_id)
{
- struct vmw_private *dev_priv;
+ resource_size_t fifo_start;
+ resource_size_t fifo_size;
int ret;
+ struct pci_dev *pdev = to_pci_dev(dev->drm.dev);
+
+ pci_set_master(pdev);
+
+ ret = pci_request_regions(pdev, "vmwgfx probe");
+ if (ret)
+ return ret;
+
+ dev->io_start = pci_resource_start(pdev, 0);
+ dev->vram_start = pci_resource_start(pdev, 1);
+ dev->vram_size = pci_resource_len(pdev, 1);
+ fifo_start = pci_resource_start(pdev, 2);
+ fifo_size = pci_resource_len(pdev, 2);
+
+ DRM_INFO("FIFO at %pa size is %llu kiB\n",
+ &fifo_start, (uint64_t)fifo_size / 1024);
+ dev->fifo_mem = devm_memremap(dev->drm.dev,
+ fifo_start,
+ fifo_size,
+ MEMREMAP_WB);
+
+ if (IS_ERR(dev->fifo_mem)) {
+ DRM_ERROR("Failed mapping FIFO memory.\n");
+ pci_release_regions(pdev);
+ return PTR_ERR(dev->fifo_mem);
+ }
+
+ /*
+ * This is approximate size of the vram, the exact size will only
+ * be known after we read SVGA_REG_VRAM_SIZE. The PCI resource
+ * size will be equal to or bigger than the size reported by
+ * SVGA_REG_VRAM_SIZE.
+ */
+ DRM_INFO("VRAM at %pa size is %llu kiB\n",
+ &dev->vram_start, (uint64_t)dev->vram_size / 1024);
+
+ return 0;
+}
+
+static int vmw_detect_version(struct vmw_private *dev)
+{
uint32_t svga_id;
+
+ vmw_write(dev, SVGA_REG_ID, SVGA_ID_2);
+ svga_id = vmw_read(dev, SVGA_REG_ID);
+ if (svga_id != SVGA_ID_2) {
+ DRM_ERROR("Unsupported SVGA ID 0x%x on chipset 0x%x\n",
+ svga_id, dev->vmw_chipset);
+ return -ENOSYS;
+ }
+ return 0;
+}
+
+static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
+{
+ int ret;
enum vmw_res_type i;
bool refuse_dma = false;
char host_log[100] = {0};
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
- dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
- if (unlikely(!dev_priv)) {
- DRM_ERROR("Failed allocating a device private struct.\n");
- return -ENOMEM;
- }
+ dev_priv->vmw_chipset = pci_id;
+ dev_priv->last_read_seqno = (uint32_t) -100;
+ dev_priv->drm.dev_private = dev_priv;
- pci_set_master(dev->pdev);
+ ret = vmw_setup_pci_resources(dev_priv, pci_id);
+ if (ret)
+ return ret;
+ ret = vmw_detect_version(dev_priv);
+ if (ret)
+ goto out_no_pci_or_version;
- dev_priv->dev = dev;
- dev_priv->vmw_chipset = chipset;
- dev_priv->last_read_seqno = (uint32_t) -100;
mutex_init(&dev_priv->cmdbuf_mutex);
mutex_init(&dev_priv->release_mutex);
mutex_init(&dev_priv->binding_mutex);
@@ -673,7 +728,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
spin_lock_init(&dev_priv->hw_lock);
spin_lock_init(&dev_priv->waiter_lock);
spin_lock_init(&dev_priv->cap_lock);
- spin_lock_init(&dev_priv->svga_lock);
spin_lock_init(&dev_priv->cursor_lock);
for (i = vmw_res_context; i < vmw_res_max; ++i) {
@@ -688,21 +742,10 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
dev_priv->used_memory_size = 0;
- dev_priv->io_start = pci_resource_start(dev->pdev, 0);
- dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
- dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
-
dev_priv->assume_16bpp = !!vmw_assume_16bpp;
dev_priv->enable_fb = enable_fbdev;
- vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
- svga_id = vmw_read(dev_priv, SVGA_REG_ID);
- if (svga_id != SVGA_ID_2) {
- ret = -ENOSYS;
- DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
- goto out_err0;
- }
dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
@@ -720,7 +763,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
}
dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
- dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
+ dev_priv->fifo_mem_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
@@ -794,7 +837,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
if (unlikely(ret != 0))
goto out_err0;
- dma_set_max_seg_size(dev->dev, U32_MAX);
+ dma_set_max_seg_size(dev_priv->drm.dev, U32_MAX);
if (dev_priv->capabilities & SVGA_CAP_GMR2) {
DRM_INFO("Max GMR ids is %u\n",
@@ -804,21 +847,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
(unsigned)dev_priv->memory_size / 1024);
}
- DRM_INFO("Maximum display memory size is %u kiB\n",
- dev_priv->prim_bb_mem / 1024);
- DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
- dev_priv->vram_start, dev_priv->vram_size / 1024);
- DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
- dev_priv->mmio_start, dev_priv->mmio_size / 1024);
-
- dev_priv->mmio_virt = memremap(dev_priv->mmio_start,
- dev_priv->mmio_size, MEMREMAP_WB);
-
- if (unlikely(dev_priv->mmio_virt == NULL)) {
- ret = -ENOMEM;
- DRM_ERROR("Failed mapping MMIO.\n");
- goto out_err0;
- }
+ DRM_INFO("Maximum display memory size is %llu kiB\n",
+ (uint64_t)dev_priv->prim_bb_mem / 1024);
/* Need mmio memory to check for fifo pitchlock cap. */
if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
@@ -826,7 +856,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
!vmw_fifo_have_pitchlock(dev_priv)) {
ret = -ENOSYS;
DRM_ERROR("Hardware has no pitchlock\n");
- goto out_err4;
+ goto out_err0;
}
dev_priv->tdev = ttm_object_device_init(&ttm_mem_glob, 12,
@@ -835,29 +865,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
if (unlikely(dev_priv->tdev == NULL)) {
DRM_ERROR("Unable to initialize TTM object management.\n");
ret = -ENOMEM;
- goto out_err4;
- }
-
- dev->dev_private = dev_priv;
-
- ret = pci_request_regions(dev->pdev, "vmwgfx probe");
- dev_priv->stealth = (ret != 0);
- if (dev_priv->stealth) {
- /**
- * Request at least the mmio PCI resource.
- */
-
- DRM_INFO("It appears like vesafb is loaded. "
- "Ignore above error if any.\n");
- ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
- if (unlikely(ret != 0)) {
- DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
- goto out_no_device;
- }
+ goto out_err0;
}
if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
- ret = vmw_irq_install(dev, dev->pdev->irq);
+ ret = vmw_irq_install(&dev_priv->drm, pdev->irq);
if (ret != 0) {
DRM_ERROR("Failed installing irq: %d\n", ret);
goto out_no_irq;
@@ -874,8 +886,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
DRM_FILE_PAGE_OFFSET_START,
DRM_FILE_PAGE_OFFSET_SIZE);
ret = ttm_bo_device_init(&dev_priv->bdev, &vmw_bo_driver,
- dev_priv->dev->dev,
- dev->anon_inode->i_mapping,
+ dev_priv->drm.dev,
+ dev_priv->drm.anon_inode->i_mapping,
&dev_priv->vma_manager,
dev_priv->map_mode == vmw_dma_alloc_coherent,
false);
@@ -955,7 +967,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
if (ret)
goto out_no_fifo;
- DRM_INFO("Atomic: %s\n", (dev->driver->driver_features & DRIVER_ATOMIC)
+ DRM_INFO("Atomic: %s\n", (dev_priv->drm.driver->driver_features & DRIVER_ATOMIC)
? "yes." : "no.");
if (dev_priv->sm_type == VMW_SM_5)
DRM_INFO("SM5 support available.\n");
@@ -1000,29 +1012,24 @@ out_no_bdev:
vmw_fence_manager_takedown(dev_priv->fman);
out_no_fman:
if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
- vmw_irq_uninstall(dev_priv->dev);
+ vmw_irq_uninstall(&dev_priv->drm);
out_no_irq:
- if (dev_priv->stealth)
- pci_release_region(dev->pdev, 2);
- else
- pci_release_regions(dev->pdev);
-out_no_device:
ttm_object_device_release(&dev_priv->tdev);
-out_err4:
- memunmap(dev_priv->mmio_virt);
out_err0:
for (i = vmw_res_context; i < vmw_res_max; ++i)
idr_destroy(&dev_priv->res_idr[i]);
if (dev_priv->ctx.staged_bindings)
vmw_binding_state_free(dev_priv->ctx.staged_bindings);
- kfree(dev_priv);
+out_no_pci_or_version:
+ pci_release_regions(pdev);
return ret;
}
static void vmw_driver_unload(struct drm_device *dev)
{
struct vmw_private *dev_priv = vmw_priv(dev);
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
enum vmw_res_type i;
unregister_pm_notifier(&dev_priv->pm_nb);
@@ -1052,21 +1059,16 @@ static void vmw_driver_unload(struct drm_device *dev)
vmw_release_device_late(dev_priv);
vmw_fence_manager_takedown(dev_priv->fman);
if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
- vmw_irq_uninstall(dev_priv->dev);
- if (dev_priv->stealth)
- pci_release_region(dev->pdev, 2);
- else
- pci_release_regions(dev->pdev);
+ vmw_irq_uninstall(&dev_priv->drm);
ttm_object_device_release(&dev_priv->tdev);
- memunmap(dev_priv->mmio_virt);
if (dev_priv->ctx.staged_bindings)
vmw_binding_state_free(dev_priv->ctx.staged_bindings);
for (i = vmw_res_context; i < vmw_res_max; ++i)
idr_destroy(&dev_priv->res_idr[i]);
- kfree(dev_priv);
+ pci_release_regions(pdev);
}
static void vmw_postclose(struct drm_device *dev,
@@ -1190,12 +1192,10 @@ static void __vmw_svga_enable(struct vmw_private *dev_priv)
{
struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
- spin_lock(&dev_priv->svga_lock);
if (!ttm_resource_manager_used(man)) {
vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE);
ttm_resource_manager_set_used(man, true);
}
- spin_unlock(&dev_priv->svga_lock);
}
/**
@@ -1221,14 +1221,12 @@ static void __vmw_svga_disable(struct vmw_private *dev_priv)
{
struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
- spin_lock(&dev_priv->svga_lock);
if (ttm_resource_manager_used(man)) {
ttm_resource_manager_set_used(man, false);
vmw_write(dev_priv, SVGA_REG_ENABLE,
SVGA_REG_ENABLE_HIDE |
SVGA_REG_ENABLE_ENABLE);
}
- spin_unlock(&dev_priv->svga_lock);
}
/**
@@ -1253,19 +1251,16 @@ void vmw_svga_disable(struct vmw_private *dev_priv)
* to be inconsistent with the device, causing modesetting problems.
*
*/
- vmw_kms_lost_device(dev_priv->dev);
+ vmw_kms_lost_device(&dev_priv->drm);
ttm_write_lock(&dev_priv->reservation_sem, false);
- spin_lock(&dev_priv->svga_lock);
if (ttm_resource_manager_used(man)) {
- ttm_resource_manager_set_used(man, false);
- spin_unlock(&dev_priv->svga_lock);
if (ttm_resource_manager_evict_all(&dev_priv->bdev, man))
DRM_ERROR("Failed evicting VRAM buffers.\n");
+ ttm_resource_manager_set_used(man, false);
vmw_write(dev_priv, SVGA_REG_ENABLE,
SVGA_REG_ENABLE_HIDE |
SVGA_REG_ENABLE_ENABLE);
- } else
- spin_unlock(&dev_priv->svga_lock);
+ }
ttm_write_unlock(&dev_priv->reservation_sem);
}
@@ -1275,8 +1270,6 @@ static void vmw_remove(struct pci_dev *pdev)
drm_dev_unregister(dev);
vmw_driver_unload(dev);
- drm_dev_put(dev);
- pci_disable_device(pdev);
}
static unsigned long
@@ -1377,7 +1370,7 @@ static int vmw_pm_freeze(struct device *kdev)
* No user-space processes should be running now.
*/
ttm_suspend_unlock(&dev_priv->reservation_sem);
- ret = vmw_kms_suspend(dev_priv->dev);
+ ret = vmw_kms_suspend(&dev_priv->drm);
if (ret) {
ttm_suspend_lock(&dev_priv->reservation_sem);
DRM_ERROR("Failed to freeze modesetting.\n");
@@ -1409,7 +1402,7 @@ static int vmw_pm_freeze(struct device *kdev)
vmw_fence_fifo_down(dev_priv->fman);
__vmw_svga_disable(dev_priv);
-
+
vmw_release_device_late(dev_priv);
return 0;
}
@@ -1438,7 +1431,7 @@ static int vmw_pm_restore(struct device *kdev)
dev_priv->suspend_locked = false;
ttm_suspend_unlock(&dev_priv->reservation_sem);
if (dev_priv->suspend_state)
- vmw_kms_resume(dev_priv->dev);
+ vmw_kms_resume(&dev_priv->drm);
if (dev_priv->enable_fb)
vmw_fb_on(dev_priv);
@@ -1507,39 +1500,36 @@ static struct pci_driver vmw_pci_driver = {
static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- struct drm_device *dev;
+ struct vmw_private *vmw;
int ret;
- ret = pci_enable_device(pdev);
+ ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "svgadrmfb");
if (ret)
return ret;
- dev = drm_dev_alloc(&driver, &pdev->dev);
- if (IS_ERR(dev)) {
- ret = PTR_ERR(dev);
- goto err_pci_disable_device;
- }
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return ret;
- dev->pdev = pdev;
- pci_set_drvdata(pdev, dev);
+ vmw = devm_drm_dev_alloc(&pdev->dev, &driver,
+ struct vmw_private, drm);
+ if (IS_ERR(vmw))
+ return PTR_ERR(vmw);
- ret = vmw_driver_load(dev, ent->driver_data);
- if (ret)
- goto err_drm_dev_put;
+ vmw->drm.pdev = pdev;
+ pci_set_drvdata(pdev, &vmw->drm);
- ret = drm_dev_register(dev, ent->driver_data);
+ ret = vmw_driver_load(vmw, ent->device);
if (ret)
- goto err_vmw_driver_unload;
+ return ret;
- return 0;
+ ret = drm_dev_register(&vmw->drm, 0);
+ if (ret) {
+ vmw_driver_unload(&vmw->drm);
+ return ret;
+ }
-err_vmw_driver_unload:
- vmw_driver_unload(dev);
-err_drm_dev_put:
- drm_dev_put(dev);
-err_pci_disable_device:
- pci_disable_device(pdev);
- return ret;
+ return 0;
}
static int __init vmwgfx_init(void)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index b45becbb00f8..5fa5bcd20cc5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -39,7 +39,6 @@
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_execbuf_util.h>
-#include <drm/ttm/ttm_module.h>
#include "ttm_lock.h"
#include "ttm_object.h"
@@ -67,6 +66,8 @@
#define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
#define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 1
+#define VMWGFX_PCI_ID_SVGA2 0x0405
+
/*
* Perhaps we should have sysfs entries for these.
*/
@@ -275,13 +276,6 @@ struct vmw_surface {
struct list_head view_list;
};
-struct vmw_marker_queue {
- struct list_head head;
- u64 lag;
- u64 lag_time;
- spinlock_t lock;
-};
-
struct vmw_fifo_state {
unsigned long reserved_size;
u32 *dynamic_buffer;
@@ -291,7 +285,6 @@ struct vmw_fifo_state {
uint32_t capabilities;
struct mutex fifo_mutex;
struct rw_semaphore rwsem;
- struct vmw_marker_queue marker_queue;
bool dx;
};
@@ -490,19 +483,19 @@ enum vmw_sm_type {
};
struct vmw_private {
+ struct drm_device drm;
struct ttm_bo_device bdev;
struct vmw_fifo_state fifo;
- struct drm_device *dev;
struct drm_vma_offset_manager vma_manager;
- unsigned long vmw_chipset;
- unsigned int io_start;
- uint32_t vram_start;
- uint32_t vram_size;
- uint32_t prim_bb_mem;
- uint32_t mmio_start;
- uint32_t mmio_size;
+ u32 vmw_chipset;
+ resource_size_t io_start;
+ resource_size_t vram_start;
+ resource_size_t vram_size;
+ resource_size_t prim_bb_mem;
+ u32 *fifo_mem;
+ resource_size_t fifo_mem_size;
uint32_t fb_max_width;
uint32_t fb_max_height;
uint32_t texture_max_width;
@@ -511,7 +504,6 @@ struct vmw_private {
uint32_t stdu_max_height;
uint32_t initial_width;
uint32_t initial_height;
- u32 *mmio_virt;
uint32_t capabilities;
uint32_t capabilities2;
uint32_t max_gmr_ids;
@@ -591,13 +583,7 @@ struct vmw_private {
struct mutex cmdbuf_mutex;
struct mutex binding_mutex;
- /**
- * Operating mode.
- */
-
- bool stealth;
bool enable_fb;
- spinlock_t svga_lock;
/**
* PM management.
@@ -967,30 +953,29 @@ extern int vmw_fifo_init(struct vmw_private *dev_priv,
extern void vmw_fifo_release(struct vmw_private *dev_priv,
struct vmw_fifo_state *fifo);
extern void *
-vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes, int ctx_id);
-extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes);
-extern void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes);
-extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
- uint32_t *seqno);
+vmw_cmd_ctx_reserve(struct vmw_private *dev_priv, uint32_t bytes, int ctx_id);
+extern void vmw_cmd_commit(struct vmw_private *dev_priv, uint32_t bytes);
+extern void vmw_cmd_commit_flush(struct vmw_private *dev_priv, uint32_t bytes);
+extern int vmw_cmd_send_fence(struct vmw_private *dev_priv, uint32_t *seqno);
+extern bool vmw_supports_3d(struct vmw_private *dev_priv);
extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
-extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv);
-extern int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
- uint32_t cid);
-extern int vmw_fifo_flush(struct vmw_private *dev_priv,
- bool interruptible);
+extern int vmw_cmd_emit_dummy_query(struct vmw_private *dev_priv,
+ uint32_t cid);
+extern int vmw_cmd_flush(struct vmw_private *dev_priv,
+ bool interruptible);
-#define VMW_FIFO_RESERVE_DX(__priv, __bytes, __ctx_id) \
+#define VMW_CMD_CTX_RESERVE(__priv, __bytes, __ctx_id) \
({ \
- vmw_fifo_reserve_dx(__priv, __bytes, __ctx_id) ? : ({ \
+ vmw_cmd_ctx_reserve(__priv, __bytes, __ctx_id) ? : ({ \
DRM_ERROR("FIFO reserve failed at %s for %u bytes\n", \
__func__, (unsigned int) __bytes); \
NULL; \
}); \
})
-#define VMW_FIFO_RESERVE(__priv, __bytes) \
- VMW_FIFO_RESERVE_DX(__priv, __bytes, SVGA3D_INVALID_ID)
+#define VMW_CMD_RESERVE(__priv, __bytes) \
+ VMW_CMD_CTX_RESERVE(__priv, __bytes, SVGA3D_INVALID_ID)
/**
* TTM glue - vmwgfx_ttm_glue.c
@@ -1125,19 +1110,6 @@ extern void vmw_generic_waiter_add(struct vmw_private *dev_priv, u32 flag,
extern void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
u32 flag, int *waiter_count);
-/**
- * Rudimentary fence-like objects currently used only for throttling -
- * vmwgfx_marker.c
- */
-
-extern void vmw_marker_queue_init(struct vmw_marker_queue *queue);
-extern void vmw_marker_queue_takedown(struct vmw_marker_queue *queue);
-extern int vmw_marker_push(struct vmw_marker_queue *queue,
- uint32_t seqno);
-extern int vmw_marker_pull(struct vmw_marker_queue *queue,
- uint32_t signaled_seqno);
-extern int vmw_wait_lag(struct vmw_private *dev_priv,
- struct vmw_marker_queue *queue, uint32_t us);
/**
* Kernel framebuffer - vmwgfx_fb.c
@@ -1411,8 +1383,7 @@ struct vmw_cmdbuf_header;
extern struct vmw_cmdbuf_man *
vmw_cmdbuf_man_create(struct vmw_private *dev_priv);
-extern int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
- size_t size, size_t default_size);
+extern int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, size_t size);
extern void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man);
extern void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man);
extern int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
@@ -1581,28 +1552,29 @@ static inline void vmw_fifo_resource_dec(struct vmw_private *dev_priv)
}
/**
- * vmw_mmio_read - Perform a MMIO read from volatile memory
+ * vmw_fifo_mem_read - Perform a MMIO read from the fifo memory
*
- * @addr: The address to read from
+ * @fifo_reg: The fifo register to read from
*
* This function is intended to be equivalent to ioread32() on
* memremap'd memory, but without byteswapping.
*/
-static inline u32 vmw_mmio_read(u32 *addr)
+static inline u32 vmw_fifo_mem_read(struct vmw_private *vmw, uint32 fifo_reg)
{
- return READ_ONCE(*addr);
+ return READ_ONCE(*(vmw->fifo_mem + fifo_reg));
}
/**
- * vmw_mmio_write - Perform a MMIO write to volatile memory
+ * vmw_fifo_mem_write - Perform a MMIO write to volatile memory
*
- * @addr: The address to write to
+ * @addr: The fifo register to write to
*
* This function is intended to be equivalent to iowrite32 on
* memremap'd memory, but without byteswapping.
*/
-static inline void vmw_mmio_write(u32 value, u32 *addr)
+static inline void vmw_fifo_mem_write(struct vmw_private *vmw, u32 fifo_reg,
+ u32 value)
{
- WRITE_ONCE(*addr, value);
+ WRITE_ONCE(*(vmw->fifo_mem + fifo_reg), value);
}
#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index e67e2e8f6e6f..462f17320708 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -724,7 +724,7 @@ static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
if (!dx_query_mob || dx_query_mob->dx_query_ctx)
return 0;
- cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), ctx_res->id);
+ cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), ctx_res->id);
if (cmd == NULL)
return -ENOMEM;
@@ -732,7 +732,7 @@ static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = ctx_res->id;
cmd->body.mobid = dx_query_mob->base.mem.start;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
vmw_context_bind_dx_query(ctx_res, dx_query_mob);
@@ -1042,7 +1042,7 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
- if (unlikely(new_query_bo->base.num_pages > 4)) {
+ if (unlikely(new_query_bo->base.mem.num_pages > 4)) {
VMW_DEBUG_USER("Query buffer too large.\n");
return -EINVAL;
}
@@ -1100,7 +1100,7 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
BUG_ON(!ctx_entry->valid);
ctx = ctx_entry->res;
- ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
+ ret = vmw_cmd_emit_dummy_query(dev_priv, ctx->id);
if (unlikely(ret != 0))
VMW_DEBUG_USER("Out of fifo space for dummy query.\n");
@@ -1541,7 +1541,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
return ret;
/* Make sure DMA doesn't cross BO boundaries. */
- bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
+ bo_size = vmw_bo->base.base.size;
if (unlikely(cmd->body.guest.ptr.offset > bo_size)) {
VMW_DEBUG_USER("Invalid DMA offset.\n");
return -EINVAL;
@@ -3762,7 +3762,7 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv,
/* p_handle implies file_priv. */
BUG_ON(p_handle != NULL && file_priv == NULL);
- ret = vmw_fifo_send_fence(dev_priv, &sequence);
+ ret = vmw_cmd_send_fence(dev_priv, &sequence);
if (unlikely(ret != 0)) {
VMW_DEBUG_USER("Fence submission error. Syncing.\n");
synced = true;
@@ -3876,10 +3876,10 @@ static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
void *cmd;
if (sw_context->dx_ctx_node)
- cmd = VMW_FIFO_RESERVE_DX(dev_priv, command_size,
+ cmd = VMW_CMD_CTX_RESERVE(dev_priv, command_size,
sw_context->dx_ctx_node->ctx->id);
else
- cmd = VMW_FIFO_RESERVE(dev_priv, command_size);
+ cmd = VMW_CMD_RESERVE(dev_priv, command_size);
if (!cmd)
return -ENOMEM;
@@ -3888,7 +3888,7 @@ static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
memcpy(cmd, kernel_commands, command_size);
vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
vmw_resource_relocations_free(&sw_context->res_relocations);
- vmw_fifo_commit(dev_priv, command_size);
+ vmw_cmd_commit(dev_priv, command_size);
return 0;
}
@@ -4046,11 +4046,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
}
if (throttle_us) {
- ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
- throttle_us);
-
- if (ret)
- goto out_free_fence_fd;
+ VMW_DEBUG_USER("Throttling is no longer supported.\n");
}
kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
@@ -4329,7 +4325,7 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
if (dev_priv->query_cid_valid) {
BUG_ON(fence != NULL);
- ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
+ ret = vmw_cmd_emit_dummy_query(dev_priv, dev_priv->query_cid);
if (ret)
goto out_no_emit;
dev_priv->query_cid_valid = false;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 4d60201037d1..33f07abfc3ae 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -258,7 +258,7 @@ out_unreserve:
if (w && h) {
WARN_ON_ONCE(par->set_fb->funcs->dirty(cur_fb, NULL, 0, 0,
&clip, 1));
- vmw_fifo_flush(vmw_priv, false);
+ vmw_cmd_flush(vmw_priv, false);
}
out_unlock:
mutex_unlock(&par->bo_mutex);
@@ -481,7 +481,7 @@ static int vmw_fb_kms_detach(struct vmw_fb_par *par,
DRM_ERROR("Could not unset a mode.\n");
return ret;
}
- drm_mode_destroy(par->vmw_priv->dev, par->set_mode);
+ drm_mode_destroy(&par->vmw_priv->drm, par->set_mode);
par->set_mode = NULL;
}
@@ -567,7 +567,7 @@ static int vmw_fb_set_par(struct fb_info *info)
struct drm_display_mode *mode;
int ret;
- mode = drm_mode_duplicate(vmw_priv->dev, &new_mode);
+ mode = drm_mode_duplicate(&vmw_priv->drm, &new_mode);
if (!mode) {
DRM_ERROR("Could not create new fb mode.\n");
return -ENOMEM;
@@ -581,7 +581,7 @@ static int vmw_fb_set_par(struct fb_info *info)
mode->hdisplay *
DIV_ROUND_UP(var->bits_per_pixel, 8),
mode->vdisplay)) {
- drm_mode_destroy(vmw_priv->dev, mode);
+ drm_mode_destroy(&vmw_priv->drm, mode);
return -EINVAL;
}
@@ -615,7 +615,7 @@ static int vmw_fb_set_par(struct fb_info *info)
out_unlock:
if (par->set_mode)
- drm_mode_destroy(vmw_priv->dev, par->set_mode);
+ drm_mode_destroy(&vmw_priv->drm, par->set_mode);
par->set_mode = mode;
mutex_unlock(&par->bo_mutex);
@@ -638,7 +638,7 @@ static const struct fb_ops vmw_fb_ops = {
int vmw_fb_init(struct vmw_private *vmw_priv)
{
- struct device *device = &vmw_priv->dev->pdev->dev;
+ struct device *device = vmw_priv->drm.dev;
struct vmw_fb_par *par;
struct fb_info *info;
unsigned fb_width, fb_height;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 0f8d29397157..378ec7600154 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -141,8 +141,7 @@ static bool vmw_fence_enable_signaling(struct dma_fence *f)
struct vmw_fence_manager *fman = fman_from_fence(fence);
struct vmw_private *dev_priv = fman->dev_priv;
- u32 *fifo_mem = dev_priv->mmio_virt;
- u32 seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
+ u32 seqno = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_FENCE);
if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
return false;
@@ -401,14 +400,12 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
u32 passed_seqno)
{
u32 goal_seqno;
- u32 *fifo_mem;
struct vmw_fence_obj *fence;
if (likely(!fman->seqno_valid))
return false;
- fifo_mem = fman->dev_priv->mmio_virt;
- goal_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE_GOAL);
+ goal_seqno = vmw_fifo_mem_read(fman->dev_priv, SVGA_FIFO_FENCE_GOAL);
if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP))
return false;
@@ -416,8 +413,9 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
list_for_each_entry(fence, &fman->fence_list, head) {
if (!list_empty(&fence->seq_passed_actions)) {
fman->seqno_valid = true;
- vmw_mmio_write(fence->base.seqno,
- fifo_mem + SVGA_FIFO_FENCE_GOAL);
+ vmw_fifo_mem_write(fman->dev_priv,
+ SVGA_FIFO_FENCE_GOAL,
+ fence->base.seqno);
break;
}
}
@@ -445,18 +443,17 @@ static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
{
struct vmw_fence_manager *fman = fman_from_fence(fence);
u32 goal_seqno;
- u32 *fifo_mem;
if (dma_fence_is_signaled_locked(&fence->base))
return false;
- fifo_mem = fman->dev_priv->mmio_virt;
- goal_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE_GOAL);
+ goal_seqno = vmw_fifo_mem_read(fman->dev_priv, SVGA_FIFO_FENCE_GOAL);
if (likely(fman->seqno_valid &&
goal_seqno - fence->base.seqno < VMW_FENCE_WRAP))
return false;
- vmw_mmio_write(fence->base.seqno, fifo_mem + SVGA_FIFO_FENCE_GOAL);
+ vmw_fifo_mem_write(fman->dev_priv, SVGA_FIFO_FENCE_GOAL,
+ fence->base.seqno);
fman->seqno_valid = true;
return true;
@@ -468,9 +465,8 @@ static void __vmw_fences_update(struct vmw_fence_manager *fman)
struct list_head action_list;
bool needs_rerun;
uint32_t seqno, new_seqno;
- u32 *fifo_mem = fman->dev_priv->mmio_virt;
- seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
+ seqno = vmw_fifo_mem_read(fman->dev_priv, SVGA_FIFO_FENCE);
rerun:
list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
@@ -492,7 +488,7 @@ rerun:
needs_rerun = vmw_fence_goal_new_locked(fman, seqno);
if (unlikely(needs_rerun)) {
- new_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
+ new_seqno = vmw_fifo_mem_read(fman->dev_priv, SVGA_FIFO_FENCE);
if (new_seqno != seqno) {
seqno = new_seqno;
goto rerun;
@@ -1033,7 +1029,7 @@ int vmw_event_fence_action_queue(struct drm_file *file_priv,
eaction->action.type = VMW_ACTION_EVENT;
eaction->fence = vmw_fence_obj_reference(fence);
- eaction->dev = fman->dev_priv->dev;
+ eaction->dev = &fman->dev_priv->drm;
eaction->tv_sec = tv_sec;
eaction->tv_usec = tv_usec;
@@ -1055,7 +1051,7 @@ static int vmw_event_fence_action_create(struct drm_file *file_priv,
{
struct vmw_event_fence_pending *event;
struct vmw_fence_manager *fman = fman_from_fence(fence);
- struct drm_device *dev = fman->dev_priv->dev;
+ struct drm_device *dev = &fman->dev_priv->drm;
int ret;
event = kzalloc(sizeof(*event), GFP_KERNEL);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
index 83c0d5a3e4fd..964ddf1ca57a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
@@ -51,7 +51,7 @@ static int vmw_gmr2_bind(struct vmw_private *dev_priv,
uint32_t cmd_size = define_size + remap_size;
uint32_t i;
- cmd_orig = cmd = VMW_FIFO_RESERVE(dev_priv, cmd_size);
+ cmd_orig = cmd = VMW_CMD_RESERVE(dev_priv, cmd_size);
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -98,7 +98,7 @@ static int vmw_gmr2_bind(struct vmw_private *dev_priv,
BUG_ON(cmd != cmd_orig + cmd_size / sizeof(*cmd));
- vmw_fifo_commit(dev_priv, cmd_size);
+ vmw_cmd_commit(dev_priv, cmd_size);
return 0;
}
@@ -110,7 +110,7 @@ static void vmw_gmr2_unbind(struct vmw_private *dev_priv,
uint32_t define_size = sizeof(define_cmd) + 4;
uint32_t *cmd;
- cmd = VMW_FIFO_RESERVE(dev_priv, define_size);
+ cmd = VMW_CMD_RESERVE(dev_priv, define_size);
if (unlikely(cmd == NULL))
return;
@@ -120,7 +120,7 @@ static void vmw_gmr2_unbind(struct vmw_private *dev_priv,
*cmd++ = SVGA_CMD_DEFINE_GMR2;
memcpy(cmd, &define_cmd, sizeof(define_cmd));
- vmw_fifo_commit(dev_priv, define_size);
+ vmw_cmd_commit(dev_priv, define_size);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
index be325a62c178..1774960d1b89 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -29,7 +29,6 @@
*/
#include "vmwgfx_drv.h"
-#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
#include <linux/idr.h>
@@ -65,20 +64,19 @@ static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man,
spin_lock(&gman->lock);
if (gman->max_gmr_pages > 0) {
- gman->used_gmr_pages += bo->num_pages;
+ gman->used_gmr_pages += mem->num_pages;
if (unlikely(gman->used_gmr_pages > gman->max_gmr_pages))
goto nospace;
}
mem->mm_node = gman;
mem->start = id;
- mem->num_pages = bo->num_pages;
spin_unlock(&gman->lock);
return 0;
nospace:
- gman->used_gmr_pages -= bo->num_pages;
+ gman->used_gmr_pages -= mem->num_pages;
spin_unlock(&gman->lock);
ida_free(&gman->gmr_ida, id);
return -ENOSPC;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index f681b7b4df1b..80af8772b8c2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -51,7 +51,7 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
param->value = vmw_overlay_num_free_overlays(dev_priv);
break;
case DRM_VMW_PARAM_3D:
- param->value = vmw_fifo_have_3d(dev_priv) ? 1 : 0;
+ param->value = vmw_supports_3d(dev_priv) ? 1 : 0;
break;
case DRM_VMW_PARAM_HW_CAPS:
param->value = dev_priv->capabilities;
@@ -67,7 +67,6 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
break;
case DRM_VMW_PARAM_FIFO_HW_VERSION:
{
- u32 *fifo_mem = dev_priv->mmio_virt;
const struct vmw_fifo_state *fifo = &dev_priv->fifo;
if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS)) {
@@ -76,11 +75,11 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
}
param->value =
- vmw_mmio_read(fifo_mem +
- ((fifo->capabilities &
- SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
- SVGA_FIFO_3D_HWVERSION_REVISED :
- SVGA_FIFO_3D_HWVERSION));
+ vmw_fifo_mem_read(dev_priv,
+ ((fifo->capabilities &
+ SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
+ SVGA_FIFO_3D_HWVERSION_REVISED :
+ SVGA_FIFO_3D_HWVERSION));
break;
}
case DRM_VMW_PARAM_MAX_SURF_MEMORY:
@@ -235,7 +234,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
if (unlikely(ret != 0))
goto out_err;
} else {
- fifo_mem = dev_priv->mmio_virt;
+ fifo_mem = dev_priv->fifo_mem;
memcpy(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index 75f3efee21a4..6c2a569f1fcb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -117,12 +117,10 @@ static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
void vmw_update_seqno(struct vmw_private *dev_priv,
struct vmw_fifo_state *fifo_state)
{
- u32 *fifo_mem = dev_priv->mmio_virt;
- uint32_t seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
+ uint32_t seqno = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_FENCE);
if (dev_priv->last_read_seqno != seqno) {
dev_priv->last_read_seqno = seqno;
- vmw_marker_pull(&fifo_state->marker_queue, seqno);
vmw_fences_update(dev_priv->fman);
}
}
@@ -222,11 +220,9 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
}
}
finish_wait(&dev_priv->fence_queue, &__wait);
- if (ret == 0 && fifo_idle) {
- u32 *fifo_mem = dev_priv->mmio_virt;
+ if (ret == 0 && fifo_idle)
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_FENCE, signal_seq);
- vmw_mmio_write(signal_seq, fifo_mem + SVGA_FIFO_FENCE);
- }
wake_up_all(&dev_priv->fence_queue);
out_err:
if (fifo_idle)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index bc67f2b930e1..9a89f658e501 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -36,9 +36,6 @@
#include "vmwgfx_kms.h"
-/* Might need a hrtimer here? */
-#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
-
void vmw_du_cleanup(struct vmw_display_unit *du)
{
drm_plane_cleanup(&du->primary);
@@ -68,7 +65,7 @@ static int vmw_cursor_update_image(struct vmw_private *dev_priv,
if (!image)
return -EINVAL;
- cmd = VMW_FIFO_RESERVE(dev_priv, cmd_size);
+ cmd = VMW_CMD_RESERVE(dev_priv, cmd_size);
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -83,7 +80,7 @@ static int vmw_cursor_update_image(struct vmw_private *dev_priv,
cmd->cursor.hotspotX = hotspotX;
cmd->cursor.hotspotY = hotspotY;
- vmw_fifo_commit_flush(dev_priv, cmd_size);
+ vmw_cmd_commit_flush(dev_priv, cmd_size);
return 0;
}
@@ -128,15 +125,14 @@ err_unreserve:
static void vmw_cursor_update_position(struct vmw_private *dev_priv,
bool show, int x, int y)
{
- u32 *fifo_mem = dev_priv->mmio_virt;
uint32_t count;
spin_lock(&dev_priv->cursor_lock);
- vmw_mmio_write(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON);
- vmw_mmio_write(x, fifo_mem + SVGA_FIFO_CURSOR_X);
- vmw_mmio_write(y, fifo_mem + SVGA_FIFO_CURSOR_Y);
- count = vmw_mmio_read(fifo_mem + SVGA_FIFO_CURSOR_COUNT);
- vmw_mmio_write(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT);
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_ON, show ? 1 : 0);
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_X, x);
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_Y, y);
+ count = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CURSOR_COUNT);
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_COUNT, ++count);
spin_unlock(&dev_priv->cursor_lock);
}
@@ -236,7 +232,7 @@ err_unreserve:
*/
void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
struct vmw_display_unit *du;
struct drm_crtc *crtc;
@@ -252,7 +248,7 @@ void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv)
void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
struct vmw_display_unit *du;
struct drm_crtc *crtc;
@@ -891,7 +887,7 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
bool is_bo_proxy)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
struct vmw_framebuffer_surface *vfbs;
enum SVGA3dSurfaceFormat format;
int ret;
@@ -1003,11 +999,11 @@ static int vmw_framebuffer_bo_dirty(struct drm_framebuffer *framebuffer,
struct drm_clip_rect norect;
int ret, increment = 1;
- drm_modeset_lock_all(dev_priv->dev);
+ drm_modeset_lock_all(&dev_priv->drm);
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0)) {
- drm_modeset_unlock_all(dev_priv->dev);
+ drm_modeset_unlock_all(&dev_priv->drm);
return ret;
}
@@ -1033,10 +1029,10 @@ static int vmw_framebuffer_bo_dirty(struct drm_framebuffer *framebuffer,
break;
}
- vmw_fifo_flush(dev_priv, false);
+ vmw_cmd_flush(dev_priv, false);
ttm_read_unlock(&dev_priv->reservation_sem);
- drm_modeset_unlock_all(dev_priv->dev);
+ drm_modeset_unlock_all(&dev_priv->drm);
return ret;
}
@@ -1213,14 +1209,14 @@ static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
*mode_cmd)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
struct vmw_framebuffer_bo *vfbd;
unsigned int requested_size;
struct drm_format_name_buf format_name;
int ret;
requested_size = mode_cmd->height * mode_cmd->pitches[0];
- if (unlikely(requested_size > bo->base.num_pages * PAGE_SIZE)) {
+ if (unlikely(requested_size > bo->base.base.size)) {
DRM_ERROR("Screen buffer object size is too small "
"for requested mode.\n");
return -EINVAL;
@@ -1319,7 +1315,7 @@ vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
bo && only_2d &&
mode_cmd->width > 64 && /* Don't create a proxy for cursor */
dev_priv->active_display_unit == vmw_du_screen_target) {
- ret = vmw_create_bo_proxy(dev_priv->dev, mode_cmd,
+ ret = vmw_create_bo_proxy(&dev_priv->drm, mode_cmd,
bo, &surface);
if (ret)
return ERR_PTR(ret);
@@ -1768,7 +1764,7 @@ int vmw_kms_present(struct vmw_private *dev_priv,
if (ret)
return ret;
- vmw_fifo_flush(dev_priv, false);
+ vmw_cmd_flush(dev_priv, false);
return 0;
}
@@ -1780,7 +1776,7 @@ vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv)
return;
dev_priv->hotplug_mode_update_property =
- drm_property_create_range(dev_priv->dev,
+ drm_property_create_range(&dev_priv->drm,
DRM_MODE_PROP_IMMUTABLE,
"hotplug_mode_update", 0, 1);
@@ -1791,7 +1787,7 @@ vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv)
int vmw_kms_init(struct vmw_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
int ret;
drm_mode_config_init(dev);
@@ -1823,7 +1819,7 @@ int vmw_kms_close(struct vmw_private *dev_priv)
* but since it destroys encoders and our destructor calls
* drm_encoder_cleanup which takes the lock we deadlock.
*/
- drm_mode_config_cleanup(dev_priv->dev);
+ drm_mode_config_cleanup(&dev_priv->drm);
if (dev_priv->active_display_unit == vmw_du_legacy)
ret = vmw_kms_ldu_close_display(dev_priv);
@@ -1876,11 +1872,11 @@ int vmw_kms_write_svga(struct vmw_private *vmw_priv,
if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
else if (vmw_fifo_have_pitchlock(vmw_priv))
- vmw_mmio_write(pitch, vmw_priv->mmio_virt +
- SVGA_FIFO_PITCHLOCK);
+ vmw_fifo_mem_write(vmw_priv, SVGA_FIFO_PITCHLOCK, pitch);
vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
- vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp);
+ if ((vmw_priv->capabilities & SVGA_CAP_8BIT_EMULATION) != 0)
+ vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp);
if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) {
DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n",
@@ -1934,7 +1930,7 @@ void vmw_disable_vblank(struct drm_crtc *crtc)
static int vmw_du_update_layout(struct vmw_private *dev_priv,
unsigned int num_rects, struct drm_rect *rects)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
struct vmw_display_unit *du;
struct drm_connector *con;
struct drm_connector_list_iter conn_iter;
@@ -2366,7 +2362,7 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
if (dirty->crtc) {
units[num_units++] = vmw_crtc_to_du(dirty->crtc);
} else {
- list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list,
+ list_for_each_entry(crtc, &dev_priv->drm.mode_config.crtc_list,
head) {
struct drm_plane *plane = crtc->primary;
@@ -2386,7 +2382,7 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
dirty->unit = unit;
if (dirty->fifo_reserve_size > 0) {
- dirty->cmd = VMW_FIFO_RESERVE(dev_priv,
+ dirty->cmd = VMW_CMD_RESERVE(dev_priv,
dirty->fifo_reserve_size);
if (!dirty->cmd)
return -ENOMEM;
@@ -2520,7 +2516,7 @@ int vmw_kms_update_proxy(struct vmw_resource *res,
if (!clips)
return 0;
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd) * num_clips);
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd) * num_clips);
if (!cmd)
return -ENOMEM;
@@ -2549,7 +2545,7 @@ int vmw_kms_update_proxy(struct vmw_resource *res,
copy_size += sizeof(*cmd);
}
- vmw_fifo_commit(dev_priv, copy_size);
+ vmw_cmd_commit(dev_priv, copy_size);
return 0;
}
@@ -2568,8 +2564,8 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
int i = 0;
int ret = 0;
- mutex_lock(&dev_priv->dev->mode_config.mutex);
- list_for_each_entry(con, &dev_priv->dev->mode_config.connector_list,
+ mutex_lock(&dev_priv->drm.mode_config.mutex);
+ list_for_each_entry(con, &dev_priv->drm.mode_config.connector_list,
head) {
if (i == unit)
break;
@@ -2577,7 +2573,7 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
++i;
}
- if (&con->head == &dev_priv->dev->mode_config.connector_list) {
+ if (&con->head == &dev_priv->drm.mode_config.connector_list) {
DRM_ERROR("Could not find initial display unit.\n");
ret = -EINVAL;
goto out_unlock;
@@ -2611,7 +2607,7 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
}
out_unlock:
- mutex_unlock(&dev_priv->dev->mode_config.mutex);
+ mutex_unlock(&dev_priv->drm.mode_config.mutex);
return ret;
}
@@ -2631,7 +2627,7 @@ vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv)
return;
dev_priv->implicit_placement_property =
- drm_property_create_range(dev_priv->dev,
+ drm_property_create_range(&dev_priv->drm,
DRM_MODE_PROP_IMMUTABLE,
"implicit_placement", 0, 1);
}
@@ -2752,7 +2748,7 @@ int vmw_du_helper_plane_update(struct vmw_du_update_plane *update)
goto out_unref;
reserved_size = update->calc_fifo_size(update, num_hits);
- cmd_start = VMW_FIFO_RESERVE(update->dev_priv, reserved_size);
+ cmd_start = VMW_CMD_RESERVE(update->dev_priv, reserved_size);
if (!cmd_start) {
ret = -ENOMEM;
goto out_revert;
@@ -2801,7 +2797,7 @@ int vmw_du_helper_plane_update(struct vmw_du_update_plane *update)
if (reserved_size < submit_size)
submit_size = 0;
- vmw_fifo_commit(update->dev_priv, submit_size);
+ vmw_cmd_commit(update->dev_priv, submit_size);
vmw_kms_helper_validation_finish(update->dev_priv, NULL, &val_ctx,
update->out_fence, NULL);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 9d1de5b5cc6a..9a9508edbc9e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -125,7 +125,6 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, crtc->y);
vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, crtc->mode.hdisplay);
vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, crtc->mode.vdisplay);
- vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
i++;
}
@@ -355,7 +354,7 @@ static const struct drm_crtc_helper_funcs vmw_ldu_crtc_helper_funcs = {
static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
{
struct vmw_legacy_display_unit *ldu;
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
struct drm_connector *connector;
struct drm_encoder *encoder;
struct drm_plane *primary, *cursor;
@@ -479,7 +478,7 @@ err_free:
int vmw_kms_ldu_init_display(struct vmw_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
int i, ret;
if (dev_priv->ldu_priv) {
@@ -554,7 +553,7 @@ int vmw_kms_ldu_do_bo_dirty(struct vmw_private *dev_priv,
} *cmd;
fifo_size = sizeof(*cmd) * num_clips;
- cmd = VMW_FIFO_RESERVE(dev_priv, fifo_size);
+ cmd = VMW_CMD_RESERVE(dev_priv, fifo_size);
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -567,6 +566,6 @@ int vmw_kms_ldu_do_bo_dirty(struct vmw_private *dev_priv,
cmd[i].body.height = clips->y2 - clips->y1;
}
- vmw_fifo_commit(dev_priv, fifo_size);
+ vmw_cmd_commit(dev_priv, fifo_size);
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
deleted file mode 100644
index e53bc639a754..000000000000
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
+++ /dev/null
@@ -1,155 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR MIT
-/**************************************************************************
- *
- * Copyright 2010 VMware, Inc., Palo Alto, CA., USA
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-
-#include "vmwgfx_drv.h"
-
-struct vmw_marker {
- struct list_head head;
- uint32_t seqno;
- u64 submitted;
-};
-
-void vmw_marker_queue_init(struct vmw_marker_queue *queue)
-{
- INIT_LIST_HEAD(&queue->head);
- queue->lag = 0;
- queue->lag_time = ktime_get_raw_ns();
- spin_lock_init(&queue->lock);
-}
-
-void vmw_marker_queue_takedown(struct vmw_marker_queue *queue)
-{
- struct vmw_marker *marker, *next;
-
- spin_lock(&queue->lock);
- list_for_each_entry_safe(marker, next, &queue->head, head) {
- kfree(marker);
- }
- spin_unlock(&queue->lock);
-}
-
-int vmw_marker_push(struct vmw_marker_queue *queue,
- uint32_t seqno)
-{
- struct vmw_marker *marker = kmalloc(sizeof(*marker), GFP_KERNEL);
-
- if (unlikely(!marker))
- return -ENOMEM;
-
- marker->seqno = seqno;
- marker->submitted = ktime_get_raw_ns();
- spin_lock(&queue->lock);
- list_add_tail(&marker->head, &queue->head);
- spin_unlock(&queue->lock);
-
- return 0;
-}
-
-int vmw_marker_pull(struct vmw_marker_queue *queue,
- uint32_t signaled_seqno)
-{
- struct vmw_marker *marker, *next;
- bool updated = false;
- u64 now;
-
- spin_lock(&queue->lock);
- now = ktime_get_raw_ns();
-
- if (list_empty(&queue->head)) {
- queue->lag = 0;
- queue->lag_time = now;
- updated = true;
- goto out_unlock;
- }
-
- list_for_each_entry_safe(marker, next, &queue->head, head) {
- if (signaled_seqno - marker->seqno > (1 << 30))
- continue;
-
- queue->lag = now - marker->submitted;
- queue->lag_time = now;
- updated = true;
- list_del(&marker->head);
- kfree(marker);
- }
-
-out_unlock:
- spin_unlock(&queue->lock);
-
- return (updated) ? 0 : -EBUSY;
-}
-
-static u64 vmw_fifo_lag(struct vmw_marker_queue *queue)
-{
- u64 now;
-
- spin_lock(&queue->lock);
- now = ktime_get_raw_ns();
- queue->lag += now - queue->lag_time;
- queue->lag_time = now;
- spin_unlock(&queue->lock);
- return queue->lag;
-}
-
-
-static bool vmw_lag_lt(struct vmw_marker_queue *queue,
- uint32_t us)
-{
- u64 cond = (u64) us * NSEC_PER_USEC;
-
- return vmw_fifo_lag(queue) <= cond;
-}
-
-int vmw_wait_lag(struct vmw_private *dev_priv,
- struct vmw_marker_queue *queue, uint32_t us)
-{
- struct vmw_marker *marker;
- uint32_t seqno;
- int ret;
-
- while (!vmw_lag_lt(queue, us)) {
- spin_lock(&queue->lock);
- if (list_empty(&queue->head))
- seqno = atomic_read(&dev_priv->marker_seq);
- else {
- marker = list_first_entry(&queue->head,
- struct vmw_marker, head);
- seqno = marker->seqno;
- }
- spin_unlock(&queue->lock);
-
- ret = vmw_wait_seqno(dev_priv, false, seqno, true,
- 3*HZ);
-
- if (unlikely(ret != 0))
- return ret;
-
- (void) vmw_marker_pull(queue, seqno);
- }
- return 0;
-}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
index 7f95ed6aa224..a372980fe6a5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -148,7 +148,7 @@ static int vmw_setup_otable_base(struct vmw_private *dev_priv,
mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1;
}
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
ret = -ENOMEM;
goto out_no_fifo;
@@ -170,7 +170,7 @@ static int vmw_setup_otable_base(struct vmw_private *dev_priv,
*/
BUG_ON(mob->pt_level == VMW_MOBFMT_PTDEPTH_2);
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
otable->page_table = mob;
return 0;
@@ -203,7 +203,7 @@ static void vmw_takedown_otable_base(struct vmw_private *dev_priv,
return;
bo = otable->page_table->pt_bo;
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
return;
@@ -215,7 +215,7 @@ static void vmw_takedown_otable_base(struct vmw_private *dev_priv,
cmd->body.sizeInBytes = 0;
cmd->body.validSizeInBytes = 0;
cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
if (bo) {
int ret;
@@ -558,12 +558,12 @@ void vmw_mob_unbind(struct vmw_private *dev_priv,
BUG_ON(ret != 0);
}
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (cmd) {
cmd->header.id = SVGA_3D_CMD_DESTROY_GB_MOB;
cmd->header.size = sizeof(cmd->body);
cmd->body.mobid = mob->id;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
}
if (bo) {
@@ -625,7 +625,7 @@ int vmw_mob_bind(struct vmw_private *dev_priv,
vmw_fifo_resource_inc(dev_priv);
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
goto out_no_cmd_space;
@@ -636,7 +636,7 @@ int vmw_mob_bind(struct vmw_private *dev_priv,
cmd->body.base = mob->pt_root_page >> PAGE_SHIFT;
cmd->body.sizeInBytes = num_data_pages * PAGE_SIZE;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
return 0;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index cd7ed1650d60..d6d282c13b7f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -122,7 +122,7 @@ static int vmw_overlay_send_put(struct vmw_private *dev_priv,
fifo_size = sizeof(*cmds) + sizeof(*flush) + sizeof(*items) * num_items;
- cmds = VMW_FIFO_RESERVE(dev_priv, fifo_size);
+ cmds = VMW_CMD_RESERVE(dev_priv, fifo_size);
/* hardware has hung, can't do anything here */
if (!cmds)
return -ENOMEM;
@@ -169,7 +169,7 @@ static int vmw_overlay_send_put(struct vmw_private *dev_priv,
fill_flush(flush, arg->stream_id);
- vmw_fifo_commit(dev_priv, fifo_size);
+ vmw_cmd_commit(dev_priv, fifo_size);
return 0;
}
@@ -192,7 +192,7 @@ static int vmw_overlay_send_stop(struct vmw_private *dev_priv,
int ret;
for (;;) {
- cmds = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmds));
+ cmds = VMW_CMD_RESERVE(dev_priv, sizeof(*cmds));
if (cmds)
break;
@@ -211,7 +211,7 @@ static int vmw_overlay_send_stop(struct vmw_private *dev_priv,
cmds->body.items[0].value = false;
fill_flush(&cmds->flush, stream_id);
- vmw_fifo_commit(dev_priv, sizeof(*cmds));
+ vmw_cmd_commit(dev_priv, sizeof(*cmds));
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
index 0b76b3d17d4c..0a900afc66ff 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
@@ -232,7 +232,7 @@ void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo,
int vmw_bo_dirty_add(struct vmw_buffer_object *vbo)
{
struct vmw_bo_dirty *dirty = vbo->dirty;
- pgoff_t num_pages = vbo->base.num_pages;
+ pgoff_t num_pages = vbo->base.mem.num_pages;
size_t size, acc_size;
int ret;
static struct ttm_operation_ctx ctx = {
@@ -413,7 +413,7 @@ vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf)
return ret;
page_offset = vmf->pgoff - drm_vma_node_start(&bo->base.vma_node);
- if (unlikely(page_offset >= bo->num_pages)) {
+ if (unlikely(page_offset >= bo->mem.num_pages)) {
ret = VM_FAULT_SIGBUS;
goto out_unlock;
}
@@ -456,7 +456,7 @@ vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf)
page_offset = vmf->pgoff -
drm_vma_node_start(&bo->base.vma_node);
- if (page_offset >= bo->num_pages ||
+ if (page_offset >= bo->mem.num_pages ||
vmw_resources_clean(vbo, page_offset,
page_offset + PAGE_SIZE,
&allowed_prefault)) {
@@ -531,7 +531,7 @@ vm_fault_t vmw_bo_vm_huge_fault(struct vm_fault *vmf,
page_offset = vmf->pgoff -
drm_vma_node_start(&bo->base.vma_node);
- if (page_offset >= bo->num_pages ||
+ if (page_offset >= bo->mem.num_pages ||
vmw_resources_clean(vbo, page_offset,
page_offset + PAGE_SIZE,
&allowed_prefault)) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 00b535831a7a..d1e7b9608145 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -360,7 +360,7 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res,
int ret;
if (likely(res->backup)) {
- BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
+ BUG_ON(res->backup->base.base.size < size);
return 0;
}
@@ -827,7 +827,7 @@ int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob)
dx_query_ctx = dx_query_mob->dx_query_ctx;
dev_priv = dx_query_ctx->dev_priv;
- cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), dx_query_ctx->id);
+ cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), dx_query_ctx->id);
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -835,7 +835,7 @@ int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob)
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = dx_query_ctx->id;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
/* Triggers a rebind the next time affected context is bound */
dx_query_mob->dx_query_ctx = NULL;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 4bdad2f2d130..b0db059b8cfb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -132,7 +132,7 @@ static int vmw_sou_fifo_create(struct vmw_private *dev_priv,
BUG_ON(!sou->buffer);
fifo_size = sizeof(*cmd);
- cmd = VMW_FIFO_RESERVE(dev_priv, fifo_size);
+ cmd = VMW_CMD_RESERVE(dev_priv, fifo_size);
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -153,7 +153,7 @@ static int vmw_sou_fifo_create(struct vmw_private *dev_priv,
vmw_bo_get_guest_ptr(&sou->buffer->base, &cmd->obj.backingStore.ptr);
cmd->obj.backingStore.pitch = mode->hdisplay * 4;
- vmw_fifo_commit(dev_priv, fifo_size);
+ vmw_cmd_commit(dev_priv, fifo_size);
sou->defined = true;
@@ -181,7 +181,7 @@ static int vmw_sou_fifo_destroy(struct vmw_private *dev_priv,
return 0;
fifo_size = sizeof(*cmd);
- cmd = VMW_FIFO_RESERVE(dev_priv, fifo_size);
+ cmd = VMW_CMD_RESERVE(dev_priv, fifo_size);
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -189,7 +189,7 @@ static int vmw_sou_fifo_destroy(struct vmw_private *dev_priv,
cmd->header.cmdType = SVGA_CMD_DESTROY_SCREEN;
cmd->body.screenId = sou->base.unit;
- vmw_fifo_commit(dev_priv, fifo_size);
+ vmw_cmd_commit(dev_priv, fifo_size);
/* Force sync */
ret = vmw_fallback_wait(dev_priv, false, true, 0, false, 3*HZ);
@@ -829,7 +829,7 @@ static const struct drm_crtc_helper_funcs vmw_sou_crtc_helper_funcs = {
static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
{
struct vmw_screen_object_unit *sou;
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
struct drm_connector *connector;
struct drm_encoder *encoder;
struct drm_plane *primary, *cursor;
@@ -946,7 +946,7 @@ err_free:
int vmw_kms_sou_init_display(struct vmw_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
int i, ret;
if (!(dev_priv->capabilities & SVGA_CAP_SCREEN_OBJECT_2)) {
@@ -992,7 +992,7 @@ static int do_bo_define_gmrfb(struct vmw_private *dev_priv,
if (depth == 32)
depth = 24;
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (!cmd)
return -ENOMEM;
@@ -1003,7 +1003,7 @@ static int do_bo_define_gmrfb(struct vmw_private *dev_priv,
cmd->body.bytesPerLine = framebuffer->base.pitches[0];
/* Buffer is reserved in vram or GMR */
vmw_bo_get_guest_ptr(&buf->base, &cmd->body.ptr);
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
return 0;
}
@@ -1029,7 +1029,7 @@ static void vmw_sou_surface_fifo_commit(struct vmw_kms_dirty *dirty)
int i;
if (!dirty->num_hits) {
- vmw_fifo_commit(dirty->dev_priv, 0);
+ vmw_cmd_commit(dirty->dev_priv, 0);
return;
}
@@ -1061,7 +1061,7 @@ static void vmw_sou_surface_fifo_commit(struct vmw_kms_dirty *dirty)
blit->bottom -= sdirty->top;
}
- vmw_fifo_commit(dirty->dev_priv, region_size + sizeof(*cmd));
+ vmw_cmd_commit(dirty->dev_priv, region_size + sizeof(*cmd));
sdirty->left = sdirty->top = S32_MAX;
sdirty->right = sdirty->bottom = S32_MIN;
@@ -1185,11 +1185,11 @@ out_unref:
static void vmw_sou_bo_fifo_commit(struct vmw_kms_dirty *dirty)
{
if (!dirty->num_hits) {
- vmw_fifo_commit(dirty->dev_priv, 0);
+ vmw_cmd_commit(dirty->dev_priv, 0);
return;
}
- vmw_fifo_commit(dirty->dev_priv,
+ vmw_cmd_commit(dirty->dev_priv,
sizeof(struct vmw_kms_sou_bo_blit) *
dirty->num_hits);
}
@@ -1295,11 +1295,11 @@ out_unref:
static void vmw_sou_readback_fifo_commit(struct vmw_kms_dirty *dirty)
{
if (!dirty->num_hits) {
- vmw_fifo_commit(dirty->dev_priv, 0);
+ vmw_cmd_commit(dirty->dev_priv, 0);
return;
}
- vmw_fifo_commit(dirty->dev_priv,
+ vmw_cmd_commit(dirty->dev_priv,
sizeof(struct vmw_kms_sou_readback_blit) *
dirty->num_hits);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index f328aa5839a2..905ae50aaa2a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -222,7 +222,7 @@ static int vmw_gb_shader_create(struct vmw_resource *res)
goto out_no_fifo;
}
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
ret = -ENOMEM;
goto out_no_fifo;
@@ -233,7 +233,7 @@ static int vmw_gb_shader_create(struct vmw_resource *res)
cmd->body.shid = res->id;
cmd->body.type = shader->type;
cmd->body.sizeInBytes = shader->size;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
vmw_fifo_resource_inc(dev_priv);
return 0;
@@ -256,7 +256,7 @@ static int vmw_gb_shader_bind(struct vmw_resource *res,
BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -266,7 +266,7 @@ static int vmw_gb_shader_bind(struct vmw_resource *res,
cmd->body.mobid = bo->mem.start;
cmd->body.offsetInBytes = res->backup_offset;
res->backup_dirty = false;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
return 0;
}
@@ -284,7 +284,7 @@ static int vmw_gb_shader_unbind(struct vmw_resource *res,
BUG_ON(res->backup->base.mem.mem_type != VMW_PL_MOB);
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -293,7 +293,7 @@ static int vmw_gb_shader_unbind(struct vmw_resource *res,
cmd->body.shid = res->id;
cmd->body.mobid = SVGA3D_INVALID_ID;
cmd->body.offsetInBytes = 0;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
/*
* Create a fence object and fence the backup buffer.
@@ -324,7 +324,7 @@ static int vmw_gb_shader_destroy(struct vmw_resource *res)
mutex_lock(&dev_priv->binding_mutex);
vmw_binding_res_list_scrub(&res->binding_head);
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
mutex_unlock(&dev_priv->binding_mutex);
return -ENOMEM;
@@ -333,7 +333,7 @@ static int vmw_gb_shader_destroy(struct vmw_resource *res)
cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SHADER;
cmd->header.size = sizeof(cmd->body);
cmd->body.shid = res->id;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
mutex_unlock(&dev_priv->binding_mutex);
vmw_resource_release_id(res);
vmw_fifo_resource_dec(dev_priv);
@@ -394,7 +394,7 @@ static int vmw_dx_shader_unscrub(struct vmw_resource *res)
if (!list_empty(&shader->cotable_head) || !shader->committed)
return 0;
- cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), shader->ctx->id);
+ cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), shader->ctx->id);
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -404,7 +404,7 @@ static int vmw_dx_shader_unscrub(struct vmw_resource *res)
cmd->body.shid = shader->id;
cmd->body.mobid = res->backup->base.mem.start;
cmd->body.offsetInBytes = res->backup_offset;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
vmw_cotable_add_resource(shader->cotable, &shader->cotable_head);
@@ -481,7 +481,7 @@ static int vmw_dx_shader_scrub(struct vmw_resource *res)
return 0;
WARN_ON_ONCE(!shader->committed);
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -491,7 +491,7 @@ static int vmw_dx_shader_scrub(struct vmw_resource *res)
cmd->body.shid = res->id;
cmd->body.mobid = SVGA3D_INVALID_ID;
cmd->body.offsetInBytes = 0;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
res->id = -1;
list_del_init(&shader->cotable_head);
@@ -856,8 +856,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
return ret;
}
- if ((u64)buffer->base.num_pages * PAGE_SIZE <
- (u64)size + (u64)offset) {
+ if ((u64)buffer->base.base.size < (u64)size + (u64)offset) {
VMW_DEBUG_USER("Illegal buffer- or shader size.\n");
ret = -EINVAL;
goto out_bad_arg;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
index 3f97b61dd5d8..7369dd86d3a9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
@@ -170,7 +170,7 @@ static int vmw_view_create(struct vmw_resource *res)
return 0;
}
- cmd = VMW_FIFO_RESERVE_DX(res->dev_priv, view->cmd_size, view->ctx->id);
+ cmd = VMW_CMD_CTX_RESERVE(res->dev_priv, view->cmd_size, view->ctx->id);
if (!cmd) {
mutex_unlock(&dev_priv->binding_mutex);
return -ENOMEM;
@@ -181,7 +181,7 @@ static int vmw_view_create(struct vmw_resource *res)
/* Sid may have changed due to surface eviction. */
WARN_ON(view->srf->id == SVGA3D_INVALID_ID);
cmd->body.sid = view->srf->id;
- vmw_fifo_commit(res->dev_priv, view->cmd_size);
+ vmw_cmd_commit(res->dev_priv, view->cmd_size);
res->id = view->view_id;
list_add_tail(&view->srf_head, &srf->view_list);
vmw_cotable_add_resource(view->cotable, &view->cotable_head);
@@ -213,14 +213,14 @@ static int vmw_view_destroy(struct vmw_resource *res)
if (!view->committed || res->id == -1)
return 0;
- cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), view->ctx->id);
+ cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), view->ctx->id);
if (!cmd)
return -ENOMEM;
cmd->header.id = vmw_view_destroy_cmds[view->view_type];
cmd->header.size = sizeof(cmd->body);
cmd->body.view_id = view->view_id;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
res->id = -1;
list_del_init(&view->cotable_head);
list_del_init(&view->srf_head);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index 5b04ec047ef3..fbe977881364 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -170,7 +170,7 @@ static int vmw_stdu_define_st(struct vmw_private *dev_priv,
SVGA3dCmdDefineGBScreenTarget body;
} *cmd;
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -188,7 +188,7 @@ static int vmw_stdu_define_st(struct vmw_private *dev_priv,
stdu->base.set_gui_x = cmd->body.xRoot;
stdu->base.set_gui_y = cmd->body.yRoot;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
stdu->defined = true;
stdu->display_width = mode->hdisplay;
@@ -229,7 +229,7 @@ static int vmw_stdu_bind_st(struct vmw_private *dev_priv,
memset(&image, 0, sizeof(image));
image.sid = res ? res->id : SVGA3D_INVALID_ID;
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -239,7 +239,7 @@ static int vmw_stdu_bind_st(struct vmw_private *dev_priv,
cmd->body.stid = stdu->base.unit;
cmd->body.image = image;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
return 0;
}
@@ -293,7 +293,7 @@ static int vmw_stdu_update_st(struct vmw_private *dev_priv,
return -EINVAL;
}
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -301,7 +301,7 @@ static int vmw_stdu_update_st(struct vmw_private *dev_priv,
0, stdu->display_width,
0, stdu->display_height);
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
return 0;
}
@@ -329,7 +329,7 @@ static int vmw_stdu_destroy_st(struct vmw_private *dev_priv,
if (unlikely(!stdu->defined))
return 0;
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -338,7 +338,7 @@ static int vmw_stdu_destroy_st(struct vmw_private *dev_priv,
cmd->body.stid = stdu->base.unit;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
/* Force sync */
ret = vmw_fallback_wait(dev_priv, false, true, 0, false, 3*HZ);
@@ -499,7 +499,7 @@ static void vmw_stdu_bo_fifo_commit(struct vmw_kms_dirty *dirty)
size_t blit_size = sizeof(*blit) * dirty->num_hits + sizeof(*suffix);
if (!dirty->num_hits) {
- vmw_fifo_commit(dirty->dev_priv, 0);
+ vmw_cmd_commit(dirty->dev_priv, 0);
return;
}
@@ -512,7 +512,7 @@ static void vmw_stdu_bo_fifo_commit(struct vmw_kms_dirty *dirty)
cmd->body.host.mipmap = 0;
cmd->body.transfer = ddirty->transfer;
suffix->suffixSize = sizeof(*suffix);
- suffix->maximumOffset = ddirty->buf->base.num_pages * PAGE_SIZE;
+ suffix->maximumOffset = ddirty->buf->base.base.size;
if (ddirty->transfer == SVGA3D_WRITE_HOST_VRAM) {
blit_size += sizeof(struct vmw_stdu_update);
@@ -522,7 +522,7 @@ static void vmw_stdu_bo_fifo_commit(struct vmw_kms_dirty *dirty)
ddirty->top, ddirty->bottom);
}
- vmw_fifo_commit(dirty->dev_priv, sizeof(*cmd) + blit_size);
+ vmw_cmd_commit(dirty->dev_priv, sizeof(*cmd) + blit_size);
stdu->display_srf->res.res_dirty = true;
ddirty->left = ddirty->top = S32_MAX;
@@ -628,7 +628,7 @@ static void vmw_stdu_bo_cpu_commit(struct vmw_kms_dirty *dirty)
dev_priv = vmw_priv(stdu->base.crtc.dev);
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (!cmd)
goto out_cleanup;
@@ -636,7 +636,7 @@ static void vmw_stdu_bo_cpu_commit(struct vmw_kms_dirty *dirty)
region.x1, region.x2,
region.y1, region.y2);
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
}
out_cleanup:
@@ -795,7 +795,7 @@ static void vmw_kms_stdu_surface_fifo_commit(struct vmw_kms_dirty *dirty)
size_t commit_size;
if (!dirty->num_hits) {
- vmw_fifo_commit(dirty->dev_priv, 0);
+ vmw_cmd_commit(dirty->dev_priv, 0);
return;
}
@@ -817,7 +817,7 @@ static void vmw_kms_stdu_surface_fifo_commit(struct vmw_kms_dirty *dirty)
vmw_stdu_populate_update(update, stdu->base.unit, sdirty->left,
sdirty->right, sdirty->top, sdirty->bottom);
- vmw_fifo_commit(dirty->dev_priv, commit_size);
+ vmw_cmd_commit(dirty->dev_priv, commit_size);
sdirty->left = sdirty->top = S32_MAX;
sdirty->right = sdirty->bottom = S32_MIN;
@@ -1238,7 +1238,7 @@ static uint32_t vmw_stdu_bo_populate_update(struct vmw_du_update_plane *update,
vfbbo = container_of(update->vfb, typeof(*vfbbo), base);
suffix->suffixSize = sizeof(*suffix);
- suffix->maximumOffset = vfbbo->buffer->base.num_pages * PAGE_SIZE;
+ suffix->maximumOffset = vfbbo->buffer->base.base.size;
vmw_stdu_populate_update(&suffix[1], stdu->base.unit, bb->x1, bb->x2,
bb->y1, bb->y2);
@@ -1713,7 +1713,7 @@ static const struct drm_crtc_helper_funcs vmw_stdu_crtc_helper_funcs = {
static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
{
struct vmw_screen_target_display_unit *stdu;
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
struct drm_connector *connector;
struct drm_encoder *encoder;
struct drm_plane *primary, *cursor;
@@ -1861,7 +1861,7 @@ static void vmw_stdu_destroy(struct vmw_screen_target_display_unit *stdu)
*/
int vmw_kms_stdu_init_display(struct vmw_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
int i, ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c b/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c
index 193192456663..1dd042a20a66 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c
@@ -99,7 +99,7 @@ static int vmw_dx_streamoutput_unscrub(struct vmw_resource *res)
if (!list_empty(&so->cotable_head) || !so->committed )
return 0;
- cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), so->ctx->id);
+ cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), so->ctx->id);
if (!cmd)
return -ENOMEM;
@@ -109,7 +109,7 @@ static int vmw_dx_streamoutput_unscrub(struct vmw_resource *res)
cmd->body.mobid = res->backup->base.mem.start;
cmd->body.offsetInBytes = res->backup_offset;
cmd->body.sizeInBytes = so->size;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
vmw_cotable_add_resource(so->cotable, &so->cotable_head);
@@ -172,7 +172,7 @@ static int vmw_dx_streamoutput_scrub(struct vmw_resource *res)
WARN_ON_ONCE(!so->committed);
- cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), so->ctx->id);
+ cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), so->ctx->id);
if (!cmd)
return -ENOMEM;
@@ -182,7 +182,7 @@ static int vmw_dx_streamoutput_scrub(struct vmw_resource *res)
cmd->body.mobid = SVGA3D_INVALID_ID;
cmd->body.offsetInBytes = 0;
cmd->body.sizeInBytes = so->size;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
res->id = -1;
list_del_init(&so->cotable_head);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 3914bfee0533..f6cab77075a0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -372,12 +372,12 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res)
if (res->id != -1) {
- cmd = VMW_FIFO_RESERVE(dev_priv, vmw_surface_destroy_size());
+ cmd = VMW_CMD_RESERVE(dev_priv, vmw_surface_destroy_size());
if (unlikely(!cmd))
return;
vmw_surface_destroy_encode(res->id, cmd);
- vmw_fifo_commit(dev_priv, vmw_surface_destroy_size());
+ vmw_cmd_commit(dev_priv, vmw_surface_destroy_size());
/*
* used_memory_size_atomic, or separate lock
@@ -440,14 +440,14 @@ static int vmw_legacy_srf_create(struct vmw_resource *res)
*/
submit_size = vmw_surface_define_size(srf);
- cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
+ cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
if (unlikely(!cmd)) {
ret = -ENOMEM;
goto out_no_fifo;
}
vmw_surface_define_encode(srf, cmd);
- vmw_fifo_commit(dev_priv, submit_size);
+ vmw_cmd_commit(dev_priv, submit_size);
vmw_fifo_resource_inc(dev_priv);
/*
@@ -492,14 +492,14 @@ static int vmw_legacy_srf_dma(struct vmw_resource *res,
BUG_ON(!val_buf->bo);
submit_size = vmw_surface_dma_size(srf);
- cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
+ cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
if (unlikely(!cmd))
return -ENOMEM;
vmw_bo_get_guest_ptr(val_buf->bo, &ptr);
vmw_surface_dma_encode(srf, cmd, &ptr, bind);
- vmw_fifo_commit(dev_priv, submit_size);
+ vmw_cmd_commit(dev_priv, submit_size);
/*
* Create a fence object and fence the backup buffer.
@@ -578,12 +578,12 @@ static int vmw_legacy_srf_destroy(struct vmw_resource *res)
*/
submit_size = vmw_surface_destroy_size();
- cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
+ cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
if (unlikely(!cmd))
return -ENOMEM;
vmw_surface_destroy_encode(res->id, cmd);
- vmw_fifo_commit(dev_priv, submit_size);
+ vmw_cmd_commit(dev_priv, submit_size);
/*
* Surface memory usage accounting.
@@ -1121,7 +1121,7 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
submit_len = sizeof(*cmd);
}
- cmd = VMW_FIFO_RESERVE(dev_priv, submit_len);
+ cmd = VMW_CMD_RESERVE(dev_priv, submit_len);
cmd2 = (typeof(cmd2))cmd;
cmd3 = (typeof(cmd3))cmd;
cmd4 = (typeof(cmd4))cmd;
@@ -1188,7 +1188,7 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
cmd->body.size.depth = metadata->base_size.depth;
}
- vmw_fifo_commit(dev_priv, submit_len);
+ vmw_cmd_commit(dev_priv, submit_len);
return 0;
@@ -1219,7 +1219,7 @@ static int vmw_gb_surface_bind(struct vmw_resource *res,
submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0);
- cmd1 = VMW_FIFO_RESERVE(dev_priv, submit_size);
+ cmd1 = VMW_CMD_RESERVE(dev_priv, submit_size);
if (unlikely(!cmd1))
return -ENOMEM;
@@ -1233,7 +1233,7 @@ static int vmw_gb_surface_bind(struct vmw_resource *res,
cmd2->header.size = sizeof(cmd2->body);
cmd2->body.sid = res->id;
}
- vmw_fifo_commit(dev_priv, submit_size);
+ vmw_cmd_commit(dev_priv, submit_size);
if (res->backup->dirty && res->backup_dirty) {
/* We've just made a full upload. Cear dirty regions. */
@@ -1272,7 +1272,7 @@ static int vmw_gb_surface_unbind(struct vmw_resource *res,
BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2));
- cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
+ cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
if (unlikely(!cmd))
return -ENOMEM;
@@ -1295,7 +1295,7 @@ static int vmw_gb_surface_unbind(struct vmw_resource *res,
cmd3->body.sid = res->id;
cmd3->body.mobid = SVGA3D_INVALID_ID;
- vmw_fifo_commit(dev_priv, submit_size);
+ vmw_cmd_commit(dev_priv, submit_size);
/*
* Create a fence object and fence the backup buffer.
@@ -1328,7 +1328,7 @@ static int vmw_gb_surface_destroy(struct vmw_resource *res)
vmw_view_surface_list_destroy(dev_priv, &srf->view_list);
vmw_binding_res_list_scrub(&res->binding_head);
- cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(!cmd)) {
mutex_unlock(&dev_priv->binding_mutex);
return -ENOMEM;
@@ -1337,7 +1337,7 @@ static int vmw_gb_surface_destroy(struct vmw_resource *res)
cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SURFACE;
cmd->header.size = sizeof(cmd->body);
cmd->body.sid = res->id;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
mutex_unlock(&dev_priv->binding_mutex);
vmw_resource_release_id(res);
vmw_fifo_resource_dec(dev_priv);
@@ -1550,8 +1550,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
&res->backup,
&user_srf->backup_base);
if (ret == 0) {
- if (res->backup->base.num_pages * PAGE_SIZE <
- res->backup_size) {
+ if (res->backup->base.base.size < res->backup_size) {
VMW_DEBUG_USER("Surface backup buffer too small.\n");
vmw_bo_unreference(&res->backup);
ret = -EINVAL;
@@ -1614,7 +1613,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
if (res->backup) {
rep->buffer_map_handle =
drm_vma_node_offset_addr(&res->backup->base.base.vma_node);
- rep->buffer_size = res->backup->base.num_pages * PAGE_SIZE;
+ rep->buffer_size = res->backup->base.base.size;
rep->buffer_handle = backup_handle;
} else {
rep->buffer_map_handle = 0;
@@ -1692,7 +1691,7 @@ vmw_gb_surface_reference_internal(struct drm_device *dev,
rep->crep.buffer_handle = backup_handle;
rep->crep.buffer_map_handle =
drm_vma_node_offset_addr(&srf->res.backup->base.base.vma_node);
- rep->crep.buffer_size = srf->res.backup->base.num_pages * PAGE_SIZE;
+ rep->crep.buffer_size = srf->res.backup->base.base.size;
rep->creq.version = drm_vmw_gb_surface_v1;
rep->creq.svga3d_flags_upper_32_bits =
@@ -1896,7 +1895,7 @@ static int vmw_surface_dirty_sync(struct vmw_resource *res)
goto out;
alloc_size = num_dirty * ((has_dx) ? sizeof(*cmd1) : sizeof(*cmd2));
- cmd = VMW_FIFO_RESERVE(dev_priv, alloc_size);
+ cmd = VMW_CMD_RESERVE(dev_priv, alloc_size);
if (!cmd)
return -ENOMEM;
@@ -1932,7 +1931,7 @@ static int vmw_surface_dirty_sync(struct vmw_resource *res)
}
}
- vmw_fifo_commit(dev_priv, alloc_size);
+ vmw_cmd_commit(dev_priv, alloc_size);
out:
memset(&dirty->boxes[0], 0, sizeof(dirty->boxes[0]) *
dirty->num_subres);
@@ -2032,14 +2031,14 @@ static int vmw_surface_clean(struct vmw_resource *res)
} *cmd;
alloc_size = sizeof(*cmd);
- cmd = VMW_FIFO_RESERVE(dev_priv, alloc_size);
+ cmd = VMW_CMD_RESERVE(dev_priv, alloc_size);
if (!cmd)
return -ENOMEM;
cmd->header.id = SVGA_3D_CMD_READBACK_GB_SURFACE;
cmd->header.size = sizeof(cmd->body);
cmd->body.sid = res->id;
- vmw_fifo_commit(dev_priv, alloc_size);
+ vmw_cmd_commit(dev_priv, alloc_size);
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
index 155ca3a5c7e5..e8e79de255cf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
@@ -5,7 +5,6 @@
* Copyright (C) 2007-2019 Vmware, Inc. All rights reservedd.
*/
#include "vmwgfx_drv.h"
-#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
index 6a04261ce760..dbb068830d80 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
@@ -309,7 +309,7 @@ void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt,
*/
static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt)
{
- struct device *dev = vmw_tt->dev_priv->dev->dev;
+ struct device *dev = vmw_tt->dev_priv->drm.dev;
dma_unmap_sgtable(dev, &vmw_tt->sgt, DMA_BIDIRECTIONAL, 0);
vmw_tt->sgt.nents = vmw_tt->sgt.orig_nents;
@@ -330,7 +330,7 @@ static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt)
*/
static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt)
{
- struct device *dev = vmw_tt->dev_priv->dev->dev;
+ struct device *dev = vmw_tt->dev_priv->drm.dev;
return dma_map_sgtable(dev, &vmw_tt->sgt, DMA_BIDIRECTIONAL, 0);
}
@@ -385,7 +385,7 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
sg = __sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages,
vsgt->num_pages, 0,
(unsigned long) vsgt->num_pages << PAGE_SHIFT,
- dma_get_max_seg_size(dev_priv->dev->dev),
+ dma_get_max_seg_size(dev_priv->drm.dev),
NULL, 0, GFP_KERNEL);
if (IS_ERR(sg)) {
ret = PTR_ERR(sg);
@@ -611,8 +611,8 @@ static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
vmw_be->mob = NULL;
if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
- ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bo, page_flags,
- ttm_cached);
+ ret = ttm_sg_tt_init(&vmw_be->dma_ttm, bo, page_flags,
+ ttm_cached);
else
ret = ttm_tt_init(&vmw_be->dma_ttm, bo, page_flags,
ttm_cached);
diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c
index 74db5a840bed..b293c67230ef 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_gem.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c
@@ -220,8 +220,8 @@ xen_drm_front_gem_import_sg_table(struct drm_device *dev,
xen_obj->sgt_imported = sgt;
- ret = drm_prime_sg_to_page_addr_arrays(sgt, xen_obj->pages,
- NULL, xen_obj->num_pages);
+ ret = drm_prime_sg_to_page_array(sgt, xen_obj->pages,
+ xen_obj->num_pages);
if (ret < 0)
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/xlnx/zynqmp_disp.c b/drivers/gpu/drm/xlnx/zynqmp_disp.c
index c685d94409b0..148add0ca1d6 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_disp.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_disp.c
@@ -1396,19 +1396,11 @@ static void zynqmp_disp_enable(struct zynqmp_disp *disp)
*/
static void zynqmp_disp_disable(struct zynqmp_disp *disp)
{
- struct drm_crtc *crtc = &disp->crtc;
-
zynqmp_disp_audio_disable(&disp->audio);
zynqmp_disp_avbuf_disable_audio(&disp->avbuf);
zynqmp_disp_avbuf_disable_channels(&disp->avbuf);
zynqmp_disp_avbuf_disable(&disp->avbuf);
-
- /* Mark the flip is done as crtc is disabled anyway */
- if (crtc->state->event) {
- complete_all(crtc->state->event->base.completion);
- crtc->state->event = NULL;
- }
}
static inline struct zynqmp_disp *crtc_to_disp(struct drm_crtc *crtc)
@@ -1499,6 +1491,13 @@ zynqmp_disp_crtc_atomic_disable(struct drm_crtc *crtc,
drm_crtc_vblank_off(&disp->crtc);
+ spin_lock_irq(&crtc->dev->event_lock);
+ if (crtc->state->event) {
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ }
+ spin_unlock_irq(&crtc->dev->event_lock);
+
clk_disable_unprepare(disp->pclk);
pm_runtime_put_sync(disp->dev);
}
diff --git a/drivers/gpu/drm/zte/zx_plane.c b/drivers/gpu/drm/zte/zx_plane.c
index c8f7b21fa09e..78d787afe594 100644
--- a/drivers/gpu/drm/zte/zx_plane.c
+++ b/drivers/gpu/drm/zte/zx_plane.c
@@ -438,15 +438,10 @@ static const struct drm_plane_helper_funcs zx_gl_plane_helper_funcs = {
.atomic_disable = zx_plane_atomic_disable,
};
-static void zx_plane_destroy(struct drm_plane *plane)
-{
- drm_plane_cleanup(plane);
-}
-
static const struct drm_plane_funcs zx_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = zx_plane_destroy,
+ .destroy = drm_plane_cleanup,
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
diff --git a/drivers/gpu/ipu-v3/ipu-di.c b/drivers/gpu/ipu-v3/ipu-di.c
index b4a31d506fcc..e617f60afeea 100644
--- a/drivers/gpu/ipu-v3/ipu-di.c
+++ b/drivers/gpu/ipu-v3/ipu-di.c
@@ -310,10 +310,6 @@ static void ipu_di_sync_config_noninterlaced(struct ipu_di *di,
/* unused */
} , {
/* unused */
- } , {
- /* unused */
- } , {
- /* unused */
},
};
/* can't use #7 and #8 for line active and pixel active counters */
diff --git a/drivers/greybus/es2.c b/drivers/greybus/es2.c
index 1df6ab5d339d..48ad154df3a7 100644
--- a/drivers/greybus/es2.c
+++ b/drivers/greybus/es2.c
@@ -567,12 +567,9 @@ static int cport_enable(struct gb_host_device *hd, u16 cport_id,
USB_DIR_OUT | USB_TYPE_VENDOR |
USB_RECIP_INTERFACE, cport_id, 0,
req, sizeof(*req), ES2_USB_CTRL_TIMEOUT);
- if (ret != sizeof(*req)) {
+ if (ret < 0) {
dev_err(&udev->dev, "failed to set cport flags for port %d\n",
cport_id);
- if (ret >= 0)
- ret = -EIO;
-
goto out;
}
@@ -961,12 +958,10 @@ static int arpc_send(struct es2_ap_dev *es2, struct arpc *rpc, int timeout)
0, 0,
rpc->req, le16_to_cpu(rpc->req->size),
ES2_USB_CTRL_TIMEOUT);
- if (retval != le16_to_cpu(rpc->req->size)) {
+ if (retval < 0) {
dev_err(&udev->dev,
"failed to send ARPC request %d: %d\n",
rpc->req->type, retval);
- if (retval > 0)
- retval = -EIO;
return retval;
}
diff --git a/drivers/greybus/greybus_trace.h b/drivers/greybus/greybus_trace.h
index 1bc9f1275c65..616a3bd61aa6 100644
--- a/drivers/greybus/greybus_trace.h
+++ b/drivers/greybus/greybus_trace.h
@@ -40,7 +40,7 @@ DECLARE_EVENT_CLASS(gb_message,
__entry->result = message->header->result;
),
- TP_printk("size=%hu operation_id=0x%04x type=0x%02x result=0x%02x",
+ TP_printk("size=%u operation_id=0x%04x type=0x%02x result=0x%02x",
__entry->size, __entry->operation_id,
__entry->type, __entry->result)
);
@@ -317,7 +317,7 @@ DECLARE_EVENT_CLASS(gb_interface,
__entry->mode_switch = intf->mode_switch;
),
- TP_printk("intf_id=%hhu device_id=%hhu module_id=%hhu D=%d J=%d A=%d E=%d M=%d",
+ TP_printk("intf_id=%u device_id=%u module_id=%u D=%d J=%d A=%d E=%d M=%d",
__entry->id, __entry->device_id, __entry->module_id,
__entry->disconnected, __entry->ejected, __entry->active,
__entry->enabled, __entry->mode_switch)
@@ -391,7 +391,7 @@ DECLARE_EVENT_CLASS(gb_module,
__entry->disconnected = module->disconnected;
),
- TP_printk("hd_bus_id=%d module_id=%hhu num_interfaces=%zu disconnected=%d",
+ TP_printk("hd_bus_id=%d module_id=%u num_interfaces=%zu disconnected=%d",
__entry->hd_bus_id, __entry->module_id,
__entry->num_interfaces, __entry->disconnected)
);
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 09fa75a2b289..786b71ef7738 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -853,6 +853,24 @@ config HID_PLANTRONICS
Say M here if you may ever plug in a Plantronics USB audio device.
+config HID_PLAYSTATION
+ tristate "PlayStation HID Driver"
+ depends on HID
+ select CRC32
+ select POWER_SUPPLY
+ help
+ Provides support for Sony PS5 controllers including support for
+ its special functionalities e.g. touchpad, lights and motion
+ sensors.
+
+config PLAYSTATION_FF
+ bool "PlayStation force feedback support"
+ depends on HID_PLAYSTATION
+ select INPUT_FF_MEMLESS
+ help
+ Say Y here if you would like to enable force feedback support for
+ PlayStation game controllers.
+
config HID_PRIMAX
tristate "Primax non-fully HID-compliant devices"
depends on HID
@@ -909,6 +927,7 @@ config HID_SONY
* Sony PS3 Blue-ray Disk Remote Control (Bluetooth)
* Logitech Harmony adapter for Sony Playstation 3 (Bluetooth)
* Guitar Hero Live PS3 and Wii U guitar dongles
+ * Guitar Hero PS3 and PC guitar dongles
config SONY_FF
bool "Sony PS2/3/4 accessories force feedback support"
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 014d21fe7dac..c4f6d5c613dc 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -94,6 +94,7 @@ hid-picolcd-$(CONFIG_HID_PICOLCD_CIR) += hid-picolcd_cir.o
hid-picolcd-$(CONFIG_DEBUG_FS) += hid-picolcd_debugfs.o
obj-$(CONFIG_HID_PLANTRONICS) += hid-plantronics.o
+obj-$(CONFIG_HID_PLAYSTATION) += hid-playstation.o
obj-$(CONFIG_HID_PRIMAX) += hid-primax.o
obj-$(CONFIG_HID_REDRAGON) += hid-redragon.o
obj-$(CONFIG_HID_RETRODE) += hid-retrode.o
@@ -138,7 +139,7 @@ obj-$(CONFIG_USB_HID) += usbhid/
obj-$(CONFIG_USB_MOUSE) += usbhid/
obj-$(CONFIG_USB_KBD) += usbhid/
-obj-$(CONFIG_I2C_HID) += i2c-hid/
+obj-$(CONFIG_I2C_HID_CORE) += i2c-hid/
obj-$(CONFIG_INTEL_ISH_HID) += intel-ish-hid/
obj-$(INTEL_ISH_FIRMWARE_DOWNLOADER) += intel-ish-hid/
diff --git a/drivers/hid/hid-chicony.c b/drivers/hid/hid-chicony.c
index 3f0ed6a95223..ca556d39da2a 100644
--- a/drivers/hid/hid-chicony.c
+++ b/drivers/hid/hid-chicony.c
@@ -21,6 +21,39 @@
#include "hid-ids.h"
+#define CH_WIRELESS_CTL_REPORT_ID 0x11
+
+static int ch_report_wireless(struct hid_report *report, u8 *data, int size)
+{
+ struct hid_device *hdev = report->device;
+ struct input_dev *input;
+
+ if (report->id != CH_WIRELESS_CTL_REPORT_ID || report->maxfield != 1)
+ return 0;
+
+ input = report->field[0]->hidinput->input;
+ if (!input) {
+ hid_warn(hdev, "can't find wireless radio control's input");
+ return 0;
+ }
+
+ input_report_key(input, KEY_RFKILL, 1);
+ input_sync(input);
+ input_report_key(input, KEY_RFKILL, 0);
+ input_sync(input);
+
+ return 1;
+}
+
+static int ch_raw_event(struct hid_device *hdev,
+ struct hid_report *report, u8 *data, int size)
+{
+ if (report->application == HID_GD_WIRELESS_RADIO_CTLS)
+ return ch_report_wireless(report, data, size);
+
+ return 0;
+}
+
#define ch_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, \
EV_KEY, (c))
static int ch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
@@ -77,10 +110,30 @@ static __u8 *ch_switch12_report_fixup(struct hid_device *hdev, __u8 *rdesc,
return rdesc;
}
+static int ch_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ int ret;
+
+ hdev->quirks |= HID_QUIRK_INPUT_PER_APP;
+ ret = hid_parse(hdev);
+ if (ret) {
+ hid_err(hdev, "Chicony hid parse failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ if (ret) {
+ hid_err(hdev, "Chicony hw start failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
static const struct hid_device_id ch_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_ACER_SWITCH12) },
{ }
};
@@ -91,6 +144,8 @@ static struct hid_driver ch_driver = {
.id_table = ch_devices,
.report_fixup = ch_switch12_report_fixup,
.input_mapping = ch_input_mapping,
+ .probe = ch_probe,
+ .raw_event = ch_raw_event,
};
module_hid_driver(ch_driver);
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 56172fe6995c..097cb1ee3126 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -90,7 +90,7 @@ EXPORT_SYMBOL_GPL(hid_register_report);
* Register a new field for this report.
*/
-static struct hid_field *hid_register_field(struct hid_report *report, unsigned usages, unsigned values)
+static struct hid_field *hid_register_field(struct hid_report *report, unsigned usages)
{
struct hid_field *field;
@@ -101,7 +101,7 @@ static struct hid_field *hid_register_field(struct hid_report *report, unsigned
field = kzalloc((sizeof(struct hid_field) +
usages * sizeof(struct hid_usage) +
- values * sizeof(unsigned)), GFP_KERNEL);
+ usages * sizeof(unsigned)), GFP_KERNEL);
if (!field)
return NULL;
@@ -300,7 +300,7 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
usages = max_t(unsigned, parser->local.usage_index,
parser->global.report_count);
- field = hid_register_field(report, usages, parser->global.report_count);
+ field = hid_register_field(report, usages);
if (!field)
return 0;
@@ -1307,6 +1307,9 @@ EXPORT_SYMBOL_GPL(hid_open_report);
static s32 snto32(__u32 value, unsigned n)
{
+ if (!value || !n)
+ return 0;
+
switch (n) {
case 8: return ((__s8)value);
case 16: return ((__s16)value);
diff --git a/drivers/hid/hid-google-hammer.c b/drivers/hid/hid-google-hammer.c
index 85a054f1ce38..d9319622da44 100644
--- a/drivers/hid/hid-google-hammer.c
+++ b/drivers/hid/hid-google-hammer.c
@@ -392,30 +392,34 @@ static int hammer_input_mapping(struct hid_device *hdev, struct hid_input *hi,
return 0;
}
-static int hammer_event(struct hid_device *hid, struct hid_field *field,
- struct hid_usage *usage, __s32 value)
+static void hammer_folded_event(struct hid_device *hdev, bool folded)
{
unsigned long flags;
- if (usage->hid == HID_USAGE_KBD_FOLDED) {
- spin_lock_irqsave(&cbas_ec_lock, flags);
+ spin_lock_irqsave(&cbas_ec_lock, flags);
- /*
- * If we are getting events from Whiskers that means that it
- * is attached to the lid.
- */
- cbas_ec.base_present = true;
- cbas_ec.base_folded = value;
- hid_dbg(hid, "%s: base: %d, folded: %d\n", __func__,
- cbas_ec.base_present, cbas_ec.base_folded);
-
- if (cbas_ec.input) {
- input_report_switch(cbas_ec.input,
- SW_TABLET_MODE, value);
- input_sync(cbas_ec.input);
- }
+ /*
+ * If we are getting events from Whiskers that means that it
+ * is attached to the lid.
+ */
+ cbas_ec.base_present = true;
+ cbas_ec.base_folded = folded;
+ hid_dbg(hdev, "%s: base: %d, folded: %d\n", __func__,
+ cbas_ec.base_present, cbas_ec.base_folded);
- spin_unlock_irqrestore(&cbas_ec_lock, flags);
+ if (cbas_ec.input) {
+ input_report_switch(cbas_ec.input, SW_TABLET_MODE, folded);
+ input_sync(cbas_ec.input);
+ }
+
+ spin_unlock_irqrestore(&cbas_ec_lock, flags);
+}
+
+static int hammer_event(struct hid_device *hid, struct hid_field *field,
+ struct hid_usage *usage, __s32 value)
+{
+ if (usage->hid == HID_USAGE_KBD_FOLDED) {
+ hammer_folded_event(hid, value);
return 1; /* We handled this event */
}
@@ -457,6 +461,47 @@ static bool hammer_has_backlight_control(struct hid_device *hdev)
HID_GD_KEYBOARD, HID_AD_BRIGHTNESS);
}
+static void hammer_get_folded_state(struct hid_device *hdev)
+{
+ struct hid_report *report;
+ char *buf;
+ int len, rlen;
+ int a;
+
+ report = hdev->report_enum[HID_INPUT_REPORT].report_id_hash[0x0];
+
+ if (!report || report->maxfield < 1)
+ return;
+
+ len = hid_report_len(report) + 1;
+
+ buf = kmalloc(len, GFP_KERNEL);
+ if (!buf)
+ return;
+
+ rlen = hid_hw_raw_request(hdev, report->id, buf, len, report->type, HID_REQ_GET_REPORT);
+
+ if (rlen != len) {
+ hid_warn(hdev, "Unable to read base folded state: %d (expected %d)\n", rlen, len);
+ goto out;
+ }
+
+ for (a = 0; a < report->maxfield; a++) {
+ struct hid_field *field = report->field[a];
+
+ if (field->usage->hid == HID_USAGE_KBD_FOLDED) {
+ u32 value = hid_field_extract(hdev, buf+1,
+ field->report_offset, field->report_size);
+
+ hammer_folded_event(hdev, value);
+ break;
+ }
+ }
+
+out:
+ kfree(buf);
+}
+
static int hammer_probe(struct hid_device *hdev,
const struct hid_device_id *id)
{
@@ -481,6 +526,8 @@ static int hammer_probe(struct hid_device *hdev,
error = hid_hw_open(hdev);
if (error)
return error;
+
+ hammer_get_folded_state(hdev);
}
if (hammer_has_backlight_control(hdev)) {
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 5ba0aa1d2335..e42aaae3138f 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -40,6 +40,9 @@
#define USB_VENDOR_ID_ACTIONSTAR 0x2101
#define USB_DEVICE_ID_ACTIONSTAR_1011 0x1011
+#define USB_VENDOR_ID_ACTIVISION 0x1430
+#define USB_DEVICE_ID_ACTIVISION_GUITAR_DONGLE 0x474c
+
#define USB_VENDOR_ID_ADS_TECH 0x06e1
#define USB_DEVICE_ID_ADS_TECH_RADIO_SI470X 0xa155
@@ -270,6 +273,7 @@
#define USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE 0x1053
#define USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE2 0x0939
#define USB_DEVICE_ID_CHICONY_WIRELESS2 0x1123
+#define USB_DEVICE_ID_CHICONY_WIRELESS3 0x1236
#define USB_DEVICE_ID_ASUS_AK1D 0x1125
#define USB_DEVICE_ID_CHICONY_TOSHIBA_WT10A 0x1408
#define USB_DEVICE_ID_CHICONY_ACER_SWITCH12 0x1421
@@ -389,6 +393,7 @@
#define USB_DEVICE_ID_TOSHIBA_CLICK_L9W 0x0401
#define USB_DEVICE_ID_HP_X2 0x074d
#define USB_DEVICE_ID_HP_X2_10_COVER 0x0755
+#define I2C_DEVICE_ID_HP_SPECTRE_X360_15 0x2817
#define USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN 0x2706
#define USB_VENDOR_ID_ELECOM 0x056e
@@ -641,6 +646,8 @@
#define USB_DEVICE_ID_INNEX_GENESIS_ATARI 0x4745
#define USB_VENDOR_ID_ITE 0x048d
+#define I2C_VENDOR_ID_ITE 0x103c
+#define I2C_DEVICE_ID_ITE_VOYO_WINPAD_A15 0x184f
#define USB_DEVICE_ID_ITE_LENOVO_YOGA 0x8386
#define USB_DEVICE_ID_ITE_LENOVO_YOGA2 0x8350
#define I2C_DEVICE_ID_ITE_LENOVO_LEGION_Y720 0x837a
@@ -1073,13 +1080,15 @@
#define USB_DEVICE_ID_SONY_PS4_CONTROLLER 0x05c4
#define USB_DEVICE_ID_SONY_PS4_CONTROLLER_2 0x09cc
#define USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE 0x0ba0
+#define USB_DEVICE_ID_SONY_PS5_CONTROLLER 0x0ce6
#define USB_DEVICE_ID_SONY_MOTION_CONTROLLER 0x03d5
#define USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER 0x042f
#define USB_DEVICE_ID_SONY_BUZZ_CONTROLLER 0x0002
#define USB_DEVICE_ID_SONY_WIRELESS_BUZZ_CONTROLLER 0x1000
-#define USB_VENDOR_ID_SONY_GHLIVE 0x12ba
+#define USB_VENDOR_ID_SONY_RHYTHM 0x12ba
#define USB_DEVICE_ID_SONY_PS3WIIU_GHLIVE_DONGLE 0x074b
+#define USB_DEVICE_ID_SONY_PS3_GUITAR_DONGLE 0x0100
#define USB_VENDOR_ID_SINO_LITE 0x1345
#define USB_DEVICE_ID_SINO_LITE_CONTROLLER 0x3008
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index f23027d2795b..236bccd37760 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -324,6 +324,8 @@ static const struct hid_device_id hid_battery_quirks[] = {
HID_BATTERY_QUIRK_IGNORE },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN),
HID_BATTERY_QUIRK_IGNORE },
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_SPECTRE_X360_15),
+ HID_BATTERY_QUIRK_IGNORE },
{}
};
@@ -1854,6 +1856,16 @@ static struct hid_input *hidinput_match_application(struct hid_report *report)
list_for_each_entry(hidinput, &hid->inputs, list) {
if (hidinput->application == report->application)
return hidinput;
+
+ /*
+ * Keep SystemControl and ConsumerControl applications together
+ * with the main keyboard, if present.
+ */
+ if ((report->application == HID_GD_SYSTEM_CONTROL ||
+ report->application == HID_CP_CONSUMER_CONTROL) &&
+ hidinput->application == HID_GD_KEYBOARD) {
+ return hidinput;
+ }
}
return NULL;
diff --git a/drivers/hid/hid-ite.c b/drivers/hid/hid-ite.c
index 22bfbebceaf4..14fc068affad 100644
--- a/drivers/hid/hid-ite.c
+++ b/drivers/hid/hid-ite.c
@@ -23,11 +23,16 @@ static __u8 *ite_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int
hid_info(hdev, "Fixing up Acer Sw5-012 ITE keyboard report descriptor\n");
rdesc[163] = HID_MAIN_ITEM_RELATIVE;
}
- /* For Acer One S1002 keyboard-dock */
+ /* For Acer One S1002/S1003 keyboard-dock */
if (*rsize == 188 && rdesc[185] == 0x81 && rdesc[186] == 0x02) {
- hid_info(hdev, "Fixing up Acer S1002 ITE keyboard report descriptor\n");
+ hid_info(hdev, "Fixing up Acer S1002/S1003 ITE keyboard report descriptor\n");
rdesc[186] = HID_MAIN_ITEM_RELATIVE;
}
+ /* For Acer Aspire Switch 10E (SW3-016) keyboard-dock */
+ if (*rsize == 210 && rdesc[184] == 0x81 && rdesc[185] == 0x02) {
+ hid_info(hdev, "Fixing up Acer Aspire Switch 10E (SW3-016) ITE keyboard report descriptor\n");
+ rdesc[185] = HID_MAIN_ITEM_RELATIVE;
+ }
}
return rdesc;
@@ -114,7 +119,8 @@ static const struct hid_device_id ite_devices[] = {
/* ITE8910 USB kbd ctlr, with Synaptics touchpad connected to it. */
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
USB_VENDOR_ID_SYNAPTICS,
- USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1003) },
+ USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1003),
+ .driver_data = QUIRK_TOUCHPAD_ON_OFF_REPORT },
{ }
};
MODULE_DEVICE_TABLE(hid, ite_devices);
diff --git a/drivers/hid/hid-lg-g15.c b/drivers/hid/hid-lg-g15.c
index fcaf8466e627..bfbba0d41933 100644
--- a/drivers/hid/hid-lg-g15.c
+++ b/drivers/hid/hid-lg-g15.c
@@ -647,7 +647,7 @@ static void lg_g15_input_close(struct input_dev *dev)
static int lg_g15_register_led(struct lg_g15_data *g15, int i)
{
- const char * const led_names[] = {
+ static const char * const led_names[] = {
"g15::kbd_backlight",
"g15::lcd_backlight",
"g15::macro_preset1",
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index 45e7e0bdd382..271bd8d24339 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -980,6 +980,7 @@ static void logi_hidpp_recv_queue_notif(struct hid_device *hdev,
case 0x07:
device_type = "eQUAD step 4 Gaming";
logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem);
+ workitem.reports_supported |= STD_KEYBOARD;
break;
case 0x08:
device_type = "eQUAD step 4 for gamepads";
@@ -994,7 +995,12 @@ static void logi_hidpp_recv_queue_notif(struct hid_device *hdev,
workitem.reports_supported |= STD_KEYBOARD;
break;
case 0x0d:
- device_type = "eQUAD Lightspeed 1_1";
+ device_type = "eQUAD Lightspeed 1.1";
+ logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem);
+ workitem.reports_supported |= STD_KEYBOARD;
+ break;
+ case 0x0f:
+ device_type = "eQUAD Lightspeed 1.2";
logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem);
workitem.reports_supported |= STD_KEYBOARD;
break;
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index 7eb9a6ddb46a..d459e2dbe647 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -92,6 +92,8 @@ MODULE_PARM_DESC(disable_tap_to_click,
#define HIDPP_CAPABILITY_BATTERY_MILEAGE BIT(2)
#define HIDPP_CAPABILITY_BATTERY_LEVEL_STATUS BIT(3)
#define HIDPP_CAPABILITY_BATTERY_VOLTAGE BIT(4)
+#define HIDPP_CAPABILITY_BATTERY_PERCENTAGE BIT(5)
+#define HIDPP_CAPABILITY_UNIFIED_BATTERY BIT(6)
#define lg_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, EV_KEY, (c))
@@ -152,6 +154,7 @@ struct hidpp_battery {
int voltage;
int charge_type;
bool online;
+ u8 supported_levels_1004;
};
/**
@@ -1171,7 +1174,7 @@ static int hidpp20_batterylevel_get_battery_info(struct hidpp_device *hidpp,
return 0;
}
-static int hidpp20_query_battery_info(struct hidpp_device *hidpp)
+static int hidpp20_query_battery_info_1000(struct hidpp_device *hidpp)
{
u8 feature_type;
int ret;
@@ -1208,7 +1211,7 @@ static int hidpp20_query_battery_info(struct hidpp_device *hidpp)
return 0;
}
-static int hidpp20_battery_event(struct hidpp_device *hidpp,
+static int hidpp20_battery_event_1000(struct hidpp_device *hidpp,
u8 *data, int size)
{
struct hidpp_report *report = (struct hidpp_report *)data;
@@ -1380,6 +1383,224 @@ static int hidpp20_battery_voltage_event(struct hidpp_device *hidpp,
return 0;
}
+/* -------------------------------------------------------------------------- */
+/* 0x1004: Unified battery */
+/* -------------------------------------------------------------------------- */
+
+#define HIDPP_PAGE_UNIFIED_BATTERY 0x1004
+
+#define CMD_UNIFIED_BATTERY_GET_CAPABILITIES 0x00
+#define CMD_UNIFIED_BATTERY_GET_STATUS 0x10
+
+#define EVENT_UNIFIED_BATTERY_STATUS_EVENT 0x00
+
+#define FLAG_UNIFIED_BATTERY_LEVEL_CRITICAL BIT(0)
+#define FLAG_UNIFIED_BATTERY_LEVEL_LOW BIT(1)
+#define FLAG_UNIFIED_BATTERY_LEVEL_GOOD BIT(2)
+#define FLAG_UNIFIED_BATTERY_LEVEL_FULL BIT(3)
+
+#define FLAG_UNIFIED_BATTERY_FLAGS_RECHARGEABLE BIT(0)
+#define FLAG_UNIFIED_BATTERY_FLAGS_STATE_OF_CHARGE BIT(1)
+
+static int hidpp20_unifiedbattery_get_capabilities(struct hidpp_device *hidpp,
+ u8 feature_index)
+{
+ struct hidpp_report response;
+ int ret;
+ u8 *params = (u8 *)response.fap.params;
+
+ if (hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_LEVEL_STATUS ||
+ hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_PERCENTAGE) {
+ /* we have already set the device capabilities, so let's skip */
+ return 0;
+ }
+
+ ret = hidpp_send_fap_command_sync(hidpp, feature_index,
+ CMD_UNIFIED_BATTERY_GET_CAPABILITIES,
+ NULL, 0, &response);
+ /* Ignore these intermittent errors */
+ if (ret == HIDPP_ERROR_RESOURCE_ERROR)
+ return -EIO;
+ if (ret > 0) {
+ hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n",
+ __func__, ret);
+ return -EPROTO;
+ }
+ if (ret)
+ return ret;
+
+ /*
+ * If the device supports state of charge (battery percentage) we won't
+ * export the battery level information. there are 4 possible battery
+ * levels and they all are optional, this means that the device might
+ * not support any of them, we are just better off with the battery
+ * percentage.
+ */
+ if (params[1] & FLAG_UNIFIED_BATTERY_FLAGS_STATE_OF_CHARGE) {
+ hidpp->capabilities |= HIDPP_CAPABILITY_BATTERY_PERCENTAGE;
+ hidpp->battery.supported_levels_1004 = 0;
+ } else {
+ hidpp->capabilities |= HIDPP_CAPABILITY_BATTERY_LEVEL_STATUS;
+ hidpp->battery.supported_levels_1004 = params[0];
+ }
+
+ return 0;
+}
+
+static int hidpp20_unifiedbattery_map_status(struct hidpp_device *hidpp,
+ u8 charging_status,
+ u8 external_power_status)
+{
+ int status;
+
+ switch (charging_status) {
+ case 0: /* discharging */
+ status = POWER_SUPPLY_STATUS_DISCHARGING;
+ break;
+ case 1: /* charging */
+ case 2: /* charging slow */
+ status = POWER_SUPPLY_STATUS_CHARGING;
+ break;
+ case 3: /* complete */
+ status = POWER_SUPPLY_STATUS_FULL;
+ break;
+ case 4: /* error */
+ status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ hid_info(hidpp->hid_dev, "%s: charging error",
+ hidpp->name);
+ break;
+ default:
+ status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ break;
+ }
+
+ return status;
+}
+
+static int hidpp20_unifiedbattery_map_level(struct hidpp_device *hidpp,
+ u8 battery_level)
+{
+ /* cler unsupported level bits */
+ battery_level &= hidpp->battery.supported_levels_1004;
+
+ if (battery_level & FLAG_UNIFIED_BATTERY_LEVEL_FULL)
+ return POWER_SUPPLY_CAPACITY_LEVEL_FULL;
+ else if (battery_level & FLAG_UNIFIED_BATTERY_LEVEL_GOOD)
+ return POWER_SUPPLY_CAPACITY_LEVEL_NORMAL;
+ else if (battery_level & FLAG_UNIFIED_BATTERY_LEVEL_LOW)
+ return POWER_SUPPLY_CAPACITY_LEVEL_LOW;
+ else if (battery_level & FLAG_UNIFIED_BATTERY_LEVEL_CRITICAL)
+ return POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL;
+
+ return POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN;
+}
+
+static int hidpp20_unifiedbattery_get_status(struct hidpp_device *hidpp,
+ u8 feature_index,
+ u8 *state_of_charge,
+ int *status,
+ int *level)
+{
+ struct hidpp_report response;
+ int ret;
+ u8 *params = (u8 *)response.fap.params;
+
+ ret = hidpp_send_fap_command_sync(hidpp, feature_index,
+ CMD_UNIFIED_BATTERY_GET_STATUS,
+ NULL, 0, &response);
+ /* Ignore these intermittent errors */
+ if (ret == HIDPP_ERROR_RESOURCE_ERROR)
+ return -EIO;
+ if (ret > 0) {
+ hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n",
+ __func__, ret);
+ return -EPROTO;
+ }
+ if (ret)
+ return ret;
+
+ *state_of_charge = params[0];
+ *status = hidpp20_unifiedbattery_map_status(hidpp, params[2], params[3]);
+ *level = hidpp20_unifiedbattery_map_level(hidpp, params[1]);
+
+ return 0;
+}
+
+static int hidpp20_query_battery_info_1004(struct hidpp_device *hidpp)
+{
+ u8 feature_type;
+ int ret;
+ u8 state_of_charge;
+ int status, level;
+
+ if (hidpp->battery.feature_index == 0xff) {
+ ret = hidpp_root_get_feature(hidpp,
+ HIDPP_PAGE_UNIFIED_BATTERY,
+ &hidpp->battery.feature_index,
+ &feature_type);
+ if (ret)
+ return ret;
+ }
+
+ ret = hidpp20_unifiedbattery_get_capabilities(hidpp,
+ hidpp->battery.feature_index);
+ if (ret)
+ return ret;
+
+ ret = hidpp20_unifiedbattery_get_status(hidpp,
+ hidpp->battery.feature_index,
+ &state_of_charge,
+ &status,
+ &level);
+ if (ret)
+ return ret;
+
+ hidpp->capabilities |= HIDPP_CAPABILITY_UNIFIED_BATTERY;
+ hidpp->battery.capacity = state_of_charge;
+ hidpp->battery.status = status;
+ hidpp->battery.level = level;
+ hidpp->battery.online = true;
+
+ return 0;
+}
+
+static int hidpp20_battery_event_1004(struct hidpp_device *hidpp,
+ u8 *data, int size)
+{
+ struct hidpp_report *report = (struct hidpp_report *)data;
+ u8 *params = (u8 *)report->fap.params;
+ int state_of_charge, status, level;
+ bool changed;
+
+ if (report->fap.feature_index != hidpp->battery.feature_index ||
+ report->fap.funcindex_clientid != EVENT_UNIFIED_BATTERY_STATUS_EVENT)
+ return 0;
+
+ state_of_charge = params[0];
+ status = hidpp20_unifiedbattery_map_status(hidpp, params[2], params[3]);
+ level = hidpp20_unifiedbattery_map_level(hidpp, params[1]);
+
+ changed = status != hidpp->battery.status ||
+ (state_of_charge != hidpp->battery.capacity &&
+ hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_PERCENTAGE) ||
+ (level != hidpp->battery.level &&
+ hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_LEVEL_STATUS);
+
+ if (changed) {
+ hidpp->battery.capacity = state_of_charge;
+ hidpp->battery.status = status;
+ hidpp->battery.level = level;
+ if (hidpp->battery.ps)
+ power_supply_changed(hidpp->battery.ps);
+ }
+
+ return 0;
+}
+
+/* -------------------------------------------------------------------------- */
+/* Battery feature helpers */
+/* -------------------------------------------------------------------------- */
+
static enum power_supply_property hidpp_battery_props[] = {
POWER_SUPPLY_PROP_ONLINE,
POWER_SUPPLY_PROP_STATUS,
@@ -3307,7 +3528,10 @@ static int hidpp_raw_hidpp_event(struct hidpp_device *hidpp, u8 *data,
}
if (hidpp->capabilities & HIDPP_CAPABILITY_HIDPP20_BATTERY) {
- ret = hidpp20_battery_event(hidpp, data, size);
+ ret = hidpp20_battery_event_1000(hidpp, data, size);
+ if (ret != 0)
+ return ret;
+ ret = hidpp20_battery_event_1004(hidpp, data, size);
if (ret != 0)
return ret;
ret = hidpp_solar_battery_event(hidpp, data, size);
@@ -3443,9 +3667,14 @@ static int hidpp_initialize_battery(struct hidpp_device *hidpp)
if (hidpp->quirks & HIDPP_QUIRK_CLASS_K750)
ret = hidpp_solar_request_battery_event(hidpp);
else {
- ret = hidpp20_query_battery_voltage_info(hidpp);
+ /* we only support one battery feature right now, so let's
+ first check the ones that support battery level first
+ and leave voltage for last */
+ ret = hidpp20_query_battery_info_1000(hidpp);
+ if (ret)
+ ret = hidpp20_query_battery_info_1004(hidpp);
if (ret)
- ret = hidpp20_query_battery_info(hidpp);
+ ret = hidpp20_query_battery_voltage_info(hidpp);
}
if (ret)
@@ -3473,7 +3702,8 @@ static int hidpp_initialize_battery(struct hidpp_device *hidpp)
num_battery_props = ARRAY_SIZE(hidpp_battery_props) - 3;
- if (hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_MILEAGE)
+ if (hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_MILEAGE ||
+ hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_PERCENTAGE)
battery_props[num_battery_props++] =
POWER_SUPPLY_PROP_CAPACITY;
@@ -3650,8 +3880,10 @@ static void hidpp_connect_event(struct hidpp_device *hidpp)
} else if (hidpp->capabilities & HIDPP_CAPABILITY_HIDPP20_BATTERY) {
if (hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_VOLTAGE)
hidpp20_query_battery_voltage_info(hidpp);
+ else if (hidpp->capabilities & HIDPP_CAPABILITY_UNIFIED_BATTERY)
+ hidpp20_query_battery_info_1004(hidpp);
else
- hidpp20_query_battery_info(hidpp);
+ hidpp20_query_battery_info_1000(hidpp);
}
if (hidpp->battery.ps)
power_supply_changed(hidpp->battery.ps);
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 0743ef51d3b2..9d9f3e1bd5f4 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -758,7 +758,8 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
MT_STORE_FIELD(inrange_state);
return 1;
case HID_DG_CONFIDENCE:
- if (cls->name == MT_CLS_WIN_8 &&
+ if ((cls->name == MT_CLS_WIN_8 ||
+ cls->name == MT_CLS_WIN_8_FORCE_MULTI_INPUT) &&
(field->application == HID_DG_TOUCHPAD ||
field->application == HID_DG_TOUCHSCREEN))
app->quirks |= MT_QUIRK_CONFIDENCE;
@@ -1746,6 +1747,13 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
}
#ifdef CONFIG_PM
+static int mt_suspend(struct hid_device *hdev, pm_message_t state)
+{
+ /* High latency is desirable for power savings during S3/S0ix */
+ mt_set_modes(hdev, HID_LATENCY_HIGH, true, true);
+ return 0;
+}
+
static int mt_reset_resume(struct hid_device *hdev)
{
mt_release_contacts(hdev);
@@ -1761,6 +1769,8 @@ static int mt_resume(struct hid_device *hdev)
hid_hw_idle(hdev, 0, 0, HID_REQ_SET_IDLE);
+ mt_set_modes(hdev, HID_LATENCY_NORMAL, true, true);
+
return 0;
}
#endif
@@ -2154,6 +2164,7 @@ static struct hid_driver mt_driver = {
.event = mt_event,
.report = mt_report,
#ifdef CONFIG_PM
+ .suspend = mt_suspend,
.reset_resume = mt_reset_resume,
.resume = mt_resume,
#endif
diff --git a/drivers/hid/hid-playstation.c b/drivers/hid/hid-playstation.c
new file mode 100644
index 000000000000..ab7c82c2e886
--- /dev/null
+++ b/drivers/hid/hid-playstation.c
@@ -0,0 +1,1351 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * HID driver for Sony DualSense(TM) controller.
+ *
+ * Copyright (c) 2020 Sony Interactive Entertainment
+ */
+
+#include <linux/bits.h>
+#include <linux/crc32.h>
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/idr.h>
+#include <linux/input/mt.h>
+#include <linux/module.h>
+
+#include <asm/unaligned.h>
+
+#include "hid-ids.h"
+
+/* List of connected playstation devices. */
+static DEFINE_MUTEX(ps_devices_lock);
+static LIST_HEAD(ps_devices_list);
+
+static DEFINE_IDA(ps_player_id_allocator);
+
+#define HID_PLAYSTATION_VERSION_PATCH 0x8000
+
+/* Base class for playstation devices. */
+struct ps_device {
+ struct list_head list;
+ struct hid_device *hdev;
+ spinlock_t lock;
+
+ uint32_t player_id;
+
+ struct power_supply_desc battery_desc;
+ struct power_supply *battery;
+ uint8_t battery_capacity;
+ int battery_status;
+
+ uint8_t mac_address[6]; /* Note: stored in little endian order. */
+ uint32_t hw_version;
+ uint32_t fw_version;
+
+ int (*parse_report)(struct ps_device *dev, struct hid_report *report, u8 *data, int size);
+};
+
+/* Calibration data for playstation motion sensors. */
+struct ps_calibration_data {
+ int abs_code;
+ short bias;
+ int sens_numer;
+ int sens_denom;
+};
+
+/* Seed values for DualShock4 / DualSense CRC32 for different report types. */
+#define PS_INPUT_CRC32_SEED 0xA1
+#define PS_OUTPUT_CRC32_SEED 0xA2
+#define PS_FEATURE_CRC32_SEED 0xA3
+
+#define DS_INPUT_REPORT_USB 0x01
+#define DS_INPUT_REPORT_USB_SIZE 64
+#define DS_INPUT_REPORT_BT 0x31
+#define DS_INPUT_REPORT_BT_SIZE 78
+#define DS_OUTPUT_REPORT_USB 0x02
+#define DS_OUTPUT_REPORT_USB_SIZE 63
+#define DS_OUTPUT_REPORT_BT 0x31
+#define DS_OUTPUT_REPORT_BT_SIZE 78
+
+#define DS_FEATURE_REPORT_CALIBRATION 0x05
+#define DS_FEATURE_REPORT_CALIBRATION_SIZE 41
+#define DS_FEATURE_REPORT_PAIRING_INFO 0x09
+#define DS_FEATURE_REPORT_PAIRING_INFO_SIZE 20
+#define DS_FEATURE_REPORT_FIRMWARE_INFO 0x20
+#define DS_FEATURE_REPORT_FIRMWARE_INFO_SIZE 64
+
+/* Button masks for DualSense input report. */
+#define DS_BUTTONS0_HAT_SWITCH GENMASK(3, 0)
+#define DS_BUTTONS0_SQUARE BIT(4)
+#define DS_BUTTONS0_CROSS BIT(5)
+#define DS_BUTTONS0_CIRCLE BIT(6)
+#define DS_BUTTONS0_TRIANGLE BIT(7)
+#define DS_BUTTONS1_L1 BIT(0)
+#define DS_BUTTONS1_R1 BIT(1)
+#define DS_BUTTONS1_L2 BIT(2)
+#define DS_BUTTONS1_R2 BIT(3)
+#define DS_BUTTONS1_CREATE BIT(4)
+#define DS_BUTTONS1_OPTIONS BIT(5)
+#define DS_BUTTONS1_L3 BIT(6)
+#define DS_BUTTONS1_R3 BIT(7)
+#define DS_BUTTONS2_PS_HOME BIT(0)
+#define DS_BUTTONS2_TOUCHPAD BIT(1)
+#define DS_BUTTONS2_MIC_MUTE BIT(2)
+
+/* Status field of DualSense input report. */
+#define DS_STATUS_BATTERY_CAPACITY GENMASK(3, 0)
+#define DS_STATUS_CHARGING GENMASK(7, 4)
+#define DS_STATUS_CHARGING_SHIFT 4
+
+/*
+ * Status of a DualSense touch point contact.
+ * Contact IDs, with highest bit set are 'inactive'
+ * and any associated data is then invalid.
+ */
+#define DS_TOUCH_POINT_INACTIVE BIT(7)
+
+ /* Magic value required in tag field of Bluetooth output report. */
+#define DS_OUTPUT_TAG 0x10
+/* Flags for DualSense output report. */
+#define DS_OUTPUT_VALID_FLAG0_COMPATIBLE_VIBRATION BIT(0)
+#define DS_OUTPUT_VALID_FLAG0_HAPTICS_SELECT BIT(1)
+#define DS_OUTPUT_VALID_FLAG1_MIC_MUTE_LED_CONTROL_ENABLE BIT(0)
+#define DS_OUTPUT_VALID_FLAG1_POWER_SAVE_CONTROL_ENABLE BIT(1)
+#define DS_OUTPUT_VALID_FLAG1_LIGHTBAR_CONTROL_ENABLE BIT(2)
+#define DS_OUTPUT_VALID_FLAG1_RELEASE_LEDS BIT(3)
+#define DS_OUTPUT_VALID_FLAG1_PLAYER_INDICATOR_CONTROL_ENABLE BIT(4)
+#define DS_OUTPUT_VALID_FLAG2_LIGHTBAR_SETUP_CONTROL_ENABLE BIT(1)
+#define DS_OUTPUT_POWER_SAVE_CONTROL_MIC_MUTE BIT(4)
+#define DS_OUTPUT_LIGHTBAR_SETUP_LIGHT_OUT BIT(1)
+
+/* DualSense hardware limits */
+#define DS_ACC_RES_PER_G 8192
+#define DS_ACC_RANGE (4*DS_ACC_RES_PER_G)
+#define DS_GYRO_RES_PER_DEG_S 1024
+#define DS_GYRO_RANGE (2048*DS_GYRO_RES_PER_DEG_S)
+#define DS_TOUCHPAD_WIDTH 1920
+#define DS_TOUCHPAD_HEIGHT 1080
+
+struct dualsense {
+ struct ps_device base;
+ struct input_dev *gamepad;
+ struct input_dev *sensors;
+ struct input_dev *touchpad;
+
+ /* Calibration data for accelerometer and gyroscope. */
+ struct ps_calibration_data accel_calib_data[3];
+ struct ps_calibration_data gyro_calib_data[3];
+
+ /* Timestamp for sensor data */
+ bool sensor_timestamp_initialized;
+ uint32_t prev_sensor_timestamp;
+ uint32_t sensor_timestamp_us;
+
+ /* Compatible rumble state */
+ bool update_rumble;
+ uint8_t motor_left;
+ uint8_t motor_right;
+
+ /* RGB lightbar */
+ bool update_lightbar;
+ uint8_t lightbar_red;
+ uint8_t lightbar_green;
+ uint8_t lightbar_blue;
+
+ /* Microphone */
+ bool update_mic_mute;
+ bool mic_muted;
+ bool last_btn_mic_state;
+
+ /* Player leds */
+ bool update_player_leds;
+ uint8_t player_leds_state;
+ struct led_classdev player_leds[5];
+
+ struct work_struct output_worker;
+ void *output_report_dmabuf;
+ uint8_t output_seq; /* Sequence number for output report. */
+};
+
+struct dualsense_touch_point {
+ uint8_t contact;
+ uint8_t x_lo;
+ uint8_t x_hi:4, y_lo:4;
+ uint8_t y_hi;
+} __packed;
+static_assert(sizeof(struct dualsense_touch_point) == 4);
+
+/* Main DualSense input report excluding any BT/USB specific headers. */
+struct dualsense_input_report {
+ uint8_t x, y;
+ uint8_t rx, ry;
+ uint8_t z, rz;
+ uint8_t seq_number;
+ uint8_t buttons[4];
+ uint8_t reserved[4];
+
+ /* Motion sensors */
+ __le16 gyro[3]; /* x, y, z */
+ __le16 accel[3]; /* x, y, z */
+ __le32 sensor_timestamp;
+ uint8_t reserved2;
+
+ /* Touchpad */
+ struct dualsense_touch_point points[2];
+
+ uint8_t reserved3[12];
+ uint8_t status;
+ uint8_t reserved4[10];
+} __packed;
+/* Common input report size shared equals the size of the USB report minus 1 byte for ReportID. */
+static_assert(sizeof(struct dualsense_input_report) == DS_INPUT_REPORT_USB_SIZE - 1);
+
+/* Common data between DualSense BT/USB main output report. */
+struct dualsense_output_report_common {
+ uint8_t valid_flag0;
+ uint8_t valid_flag1;
+
+ /* For DualShock 4 compatibility mode. */
+ uint8_t motor_right;
+ uint8_t motor_left;
+
+ /* Audio controls */
+ uint8_t reserved[4];
+ uint8_t mute_button_led;
+
+ uint8_t power_save_control;
+ uint8_t reserved2[28];
+
+ /* LEDs and lightbar */
+ uint8_t valid_flag2;
+ uint8_t reserved3[2];
+ uint8_t lightbar_setup;
+ uint8_t led_brightness;
+ uint8_t player_leds;
+ uint8_t lightbar_red;
+ uint8_t lightbar_green;
+ uint8_t lightbar_blue;
+} __packed;
+static_assert(sizeof(struct dualsense_output_report_common) == 47);
+
+struct dualsense_output_report_bt {
+ uint8_t report_id; /* 0x31 */
+ uint8_t seq_tag;
+ uint8_t tag;
+ struct dualsense_output_report_common common;
+ uint8_t reserved[24];
+ __le32 crc32;
+} __packed;
+static_assert(sizeof(struct dualsense_output_report_bt) == DS_OUTPUT_REPORT_BT_SIZE);
+
+struct dualsense_output_report_usb {
+ uint8_t report_id; /* 0x02 */
+ struct dualsense_output_report_common common;
+ uint8_t reserved[15];
+} __packed;
+static_assert(sizeof(struct dualsense_output_report_usb) == DS_OUTPUT_REPORT_USB_SIZE);
+
+/*
+ * The DualSense has a main output report used to control most features. It is
+ * largely the same between Bluetooth and USB except for different headers and CRC.
+ * This structure hide the differences between the two to simplify sending output reports.
+ */
+struct dualsense_output_report {
+ uint8_t *data; /* Start of data */
+ uint8_t len; /* Size of output report */
+
+ /* Points to Bluetooth data payload in case for a Bluetooth report else NULL. */
+ struct dualsense_output_report_bt *bt;
+ /* Points to USB data payload in case for a USB report else NULL. */
+ struct dualsense_output_report_usb *usb;
+ /* Points to common section of report, so past any headers. */
+ struct dualsense_output_report_common *common;
+};
+
+/*
+ * Common gamepad buttons across DualShock 3 / 4 and DualSense.
+ * Note: for device with a touchpad, touchpad button is not included
+ * as it will be part of the touchpad device.
+ */
+static const int ps_gamepad_buttons[] = {
+ BTN_WEST, /* Square */
+ BTN_NORTH, /* Triangle */
+ BTN_EAST, /* Circle */
+ BTN_SOUTH, /* Cross */
+ BTN_TL, /* L1 */
+ BTN_TR, /* R1 */
+ BTN_TL2, /* L2 */
+ BTN_TR2, /* R2 */
+ BTN_SELECT, /* Create (PS5) / Share (PS4) */
+ BTN_START, /* Option */
+ BTN_THUMBL, /* L3 */
+ BTN_THUMBR, /* R3 */
+ BTN_MODE, /* PS Home */
+};
+
+static const struct {int x; int y; } ps_gamepad_hat_mapping[] = {
+ {0, -1}, {1, -1}, {1, 0}, {1, 1}, {0, 1}, {-1, 1}, {-1, 0}, {-1, -1},
+ {0, 0},
+};
+
+/*
+ * Add a new ps_device to ps_devices if it doesn't exist.
+ * Return error on duplicate device, which can happen if the same
+ * device is connected using both Bluetooth and USB.
+ */
+static int ps_devices_list_add(struct ps_device *dev)
+{
+ struct ps_device *entry;
+
+ mutex_lock(&ps_devices_lock);
+ list_for_each_entry(entry, &ps_devices_list, list) {
+ if (!memcmp(entry->mac_address, dev->mac_address, sizeof(dev->mac_address))) {
+ hid_err(dev->hdev, "Duplicate device found for MAC address %pMR.\n",
+ dev->mac_address);
+ mutex_unlock(&ps_devices_lock);
+ return -EEXIST;
+ }
+ }
+
+ list_add_tail(&dev->list, &ps_devices_list);
+ mutex_unlock(&ps_devices_lock);
+ return 0;
+}
+
+static int ps_devices_list_remove(struct ps_device *dev)
+{
+ mutex_lock(&ps_devices_lock);
+ list_del(&dev->list);
+ mutex_unlock(&ps_devices_lock);
+ return 0;
+}
+
+static int ps_device_set_player_id(struct ps_device *dev)
+{
+ int ret = ida_alloc(&ps_player_id_allocator, GFP_KERNEL);
+
+ if (ret < 0)
+ return ret;
+
+ dev->player_id = ret;
+ return 0;
+}
+
+static void ps_device_release_player_id(struct ps_device *dev)
+{
+ ida_free(&ps_player_id_allocator, dev->player_id);
+
+ dev->player_id = U32_MAX;
+}
+
+static struct input_dev *ps_allocate_input_dev(struct hid_device *hdev, const char *name_suffix)
+{
+ struct input_dev *input_dev;
+
+ input_dev = devm_input_allocate_device(&hdev->dev);
+ if (!input_dev)
+ return ERR_PTR(-ENOMEM);
+
+ input_dev->id.bustype = hdev->bus;
+ input_dev->id.vendor = hdev->vendor;
+ input_dev->id.product = hdev->product;
+ input_dev->id.version = hdev->version;
+ input_dev->uniq = hdev->uniq;
+
+ if (name_suffix) {
+ input_dev->name = devm_kasprintf(&hdev->dev, GFP_KERNEL, "%s %s", hdev->name,
+ name_suffix);
+ if (!input_dev->name)
+ return ERR_PTR(-ENOMEM);
+ } else {
+ input_dev->name = hdev->name;
+ }
+
+ input_set_drvdata(input_dev, hdev);
+
+ return input_dev;
+}
+
+static enum power_supply_property ps_power_supply_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_SCOPE,
+};
+
+static int ps_battery_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct ps_device *dev = power_supply_get_drvdata(psy);
+ uint8_t battery_capacity;
+ int battery_status;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ battery_capacity = dev->battery_capacity;
+ battery_status = dev->battery_status;
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ val->intval = battery_status;
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = 1;
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ val->intval = battery_capacity;
+ break;
+ case POWER_SUPPLY_PROP_SCOPE:
+ val->intval = POWER_SUPPLY_SCOPE_DEVICE;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int ps_device_register_battery(struct ps_device *dev)
+{
+ struct power_supply *battery;
+ struct power_supply_config battery_cfg = { .drv_data = dev };
+ int ret;
+
+ dev->battery_desc.type = POWER_SUPPLY_TYPE_BATTERY;
+ dev->battery_desc.properties = ps_power_supply_props;
+ dev->battery_desc.num_properties = ARRAY_SIZE(ps_power_supply_props);
+ dev->battery_desc.get_property = ps_battery_get_property;
+ dev->battery_desc.name = devm_kasprintf(&dev->hdev->dev, GFP_KERNEL,
+ "ps-controller-battery-%pMR", dev->mac_address);
+ if (!dev->battery_desc.name)
+ return -ENOMEM;
+
+ battery = devm_power_supply_register(&dev->hdev->dev, &dev->battery_desc, &battery_cfg);
+ if (IS_ERR(battery)) {
+ ret = PTR_ERR(battery);
+ hid_err(dev->hdev, "Unable to register battery device: %d\n", ret);
+ return ret;
+ }
+ dev->battery = battery;
+
+ ret = power_supply_powers(dev->battery, &dev->hdev->dev);
+ if (ret) {
+ hid_err(dev->hdev, "Unable to activate battery device: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/* Compute crc32 of HID data and compare against expected CRC. */
+static bool ps_check_crc32(uint8_t seed, uint8_t *data, size_t len, uint32_t report_crc)
+{
+ uint32_t crc;
+
+ crc = crc32_le(0xFFFFFFFF, &seed, 1);
+ crc = ~crc32_le(crc, data, len);
+
+ return crc == report_crc;
+}
+
+static struct input_dev *ps_gamepad_create(struct hid_device *hdev,
+ int (*play_effect)(struct input_dev *, void *, struct ff_effect *))
+{
+ struct input_dev *gamepad;
+ unsigned int i;
+ int ret;
+
+ gamepad = ps_allocate_input_dev(hdev, NULL);
+ if (IS_ERR(gamepad))
+ return ERR_CAST(gamepad);
+
+ input_set_abs_params(gamepad, ABS_X, 0, 255, 0, 0);
+ input_set_abs_params(gamepad, ABS_Y, 0, 255, 0, 0);
+ input_set_abs_params(gamepad, ABS_Z, 0, 255, 0, 0);
+ input_set_abs_params(gamepad, ABS_RX, 0, 255, 0, 0);
+ input_set_abs_params(gamepad, ABS_RY, 0, 255, 0, 0);
+ input_set_abs_params(gamepad, ABS_RZ, 0, 255, 0, 0);
+
+ input_set_abs_params(gamepad, ABS_HAT0X, -1, 1, 0, 0);
+ input_set_abs_params(gamepad, ABS_HAT0Y, -1, 1, 0, 0);
+
+ for (i = 0; i < ARRAY_SIZE(ps_gamepad_buttons); i++)
+ input_set_capability(gamepad, EV_KEY, ps_gamepad_buttons[i]);
+
+#if IS_ENABLED(CONFIG_PLAYSTATION_FF)
+ if (play_effect) {
+ input_set_capability(gamepad, EV_FF, FF_RUMBLE);
+ input_ff_create_memless(gamepad, NULL, play_effect);
+ }
+#endif
+
+ ret = input_register_device(gamepad);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return gamepad;
+}
+
+static int ps_get_report(struct hid_device *hdev, uint8_t report_id, uint8_t *buf, size_t size)
+{
+ int ret;
+
+ ret = hid_hw_raw_request(hdev, report_id, buf, size, HID_FEATURE_REPORT,
+ HID_REQ_GET_REPORT);
+ if (ret < 0) {
+ hid_err(hdev, "Failed to retrieve feature with reportID %d: %d\n", report_id, ret);
+ return ret;
+ }
+
+ if (ret != size) {
+ hid_err(hdev, "Invalid byte count transferred, expected %zu got %d\n", size, ret);
+ return -EINVAL;
+ }
+
+ if (buf[0] != report_id) {
+ hid_err(hdev, "Invalid reportID received, expected %d got %d\n", report_id, buf[0]);
+ return -EINVAL;
+ }
+
+ if (hdev->bus == BUS_BLUETOOTH) {
+ /* Last 4 bytes contains crc32. */
+ uint8_t crc_offset = size - 4;
+ uint32_t report_crc = get_unaligned_le32(&buf[crc_offset]);
+
+ if (!ps_check_crc32(PS_FEATURE_CRC32_SEED, buf, crc_offset, report_crc)) {
+ hid_err(hdev, "CRC check failed for reportID=%d\n", report_id);
+ return -EILSEQ;
+ }
+ }
+
+ return 0;
+}
+
+static struct input_dev *ps_sensors_create(struct hid_device *hdev, int accel_range, int accel_res,
+ int gyro_range, int gyro_res)
+{
+ struct input_dev *sensors;
+ int ret;
+
+ sensors = ps_allocate_input_dev(hdev, "Motion Sensors");
+ if (IS_ERR(sensors))
+ return ERR_CAST(sensors);
+
+ __set_bit(INPUT_PROP_ACCELEROMETER, sensors->propbit);
+ __set_bit(EV_MSC, sensors->evbit);
+ __set_bit(MSC_TIMESTAMP, sensors->mscbit);
+
+ /* Accelerometer */
+ input_set_abs_params(sensors, ABS_X, -accel_range, accel_range, 16, 0);
+ input_set_abs_params(sensors, ABS_Y, -accel_range, accel_range, 16, 0);
+ input_set_abs_params(sensors, ABS_Z, -accel_range, accel_range, 16, 0);
+ input_abs_set_res(sensors, ABS_X, accel_res);
+ input_abs_set_res(sensors, ABS_Y, accel_res);
+ input_abs_set_res(sensors, ABS_Z, accel_res);
+
+ /* Gyroscope */
+ input_set_abs_params(sensors, ABS_RX, -gyro_range, gyro_range, 16, 0);
+ input_set_abs_params(sensors, ABS_RY, -gyro_range, gyro_range, 16, 0);
+ input_set_abs_params(sensors, ABS_RZ, -gyro_range, gyro_range, 16, 0);
+ input_abs_set_res(sensors, ABS_RX, gyro_res);
+ input_abs_set_res(sensors, ABS_RY, gyro_res);
+ input_abs_set_res(sensors, ABS_RZ, gyro_res);
+
+ ret = input_register_device(sensors);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return sensors;
+}
+
+static struct input_dev *ps_touchpad_create(struct hid_device *hdev, int width, int height,
+ unsigned int num_contacts)
+{
+ struct input_dev *touchpad;
+ int ret;
+
+ touchpad = ps_allocate_input_dev(hdev, "Touchpad");
+ if (IS_ERR(touchpad))
+ return ERR_CAST(touchpad);
+
+ /* Map button underneath touchpad to BTN_LEFT. */
+ input_set_capability(touchpad, EV_KEY, BTN_LEFT);
+ __set_bit(INPUT_PROP_BUTTONPAD, touchpad->propbit);
+
+ input_set_abs_params(touchpad, ABS_MT_POSITION_X, 0, width - 1, 0, 0);
+ input_set_abs_params(touchpad, ABS_MT_POSITION_Y, 0, height - 1, 0, 0);
+
+ ret = input_mt_init_slots(touchpad, num_contacts, INPUT_MT_POINTER);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = input_register_device(touchpad);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return touchpad;
+}
+
+static ssize_t firmware_version_show(struct device *dev,
+ struct device_attribute
+ *attr, char *buf)
+{
+ struct hid_device *hdev = to_hid_device(dev);
+ struct ps_device *ps_dev = hid_get_drvdata(hdev);
+
+ return sysfs_emit(buf, "0x%08x\n", ps_dev->fw_version);
+}
+
+static DEVICE_ATTR_RO(firmware_version);
+
+static ssize_t hardware_version_show(struct device *dev,
+ struct device_attribute
+ *attr, char *buf)
+{
+ struct hid_device *hdev = to_hid_device(dev);
+ struct ps_device *ps_dev = hid_get_drvdata(hdev);
+
+ return sysfs_emit(buf, "0x%08x\n", ps_dev->hw_version);
+}
+
+static DEVICE_ATTR_RO(hardware_version);
+
+static struct attribute *ps_device_attributes[] = {
+ &dev_attr_firmware_version.attr,
+ &dev_attr_hardware_version.attr,
+ NULL
+};
+
+static const struct attribute_group ps_device_attribute_group = {
+ .attrs = ps_device_attributes,
+};
+
+static int dualsense_get_calibration_data(struct dualsense *ds)
+{
+ short gyro_pitch_bias, gyro_pitch_plus, gyro_pitch_minus;
+ short gyro_yaw_bias, gyro_yaw_plus, gyro_yaw_minus;
+ short gyro_roll_bias, gyro_roll_plus, gyro_roll_minus;
+ short gyro_speed_plus, gyro_speed_minus;
+ short acc_x_plus, acc_x_minus;
+ short acc_y_plus, acc_y_minus;
+ short acc_z_plus, acc_z_minus;
+ int speed_2x;
+ int range_2g;
+ int ret = 0;
+ uint8_t *buf;
+
+ buf = kzalloc(DS_FEATURE_REPORT_CALIBRATION_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = ps_get_report(ds->base.hdev, DS_FEATURE_REPORT_CALIBRATION, buf,
+ DS_FEATURE_REPORT_CALIBRATION_SIZE);
+ if (ret) {
+ hid_err(ds->base.hdev, "Failed to retrieve DualSense calibration info: %d\n", ret);
+ goto err_free;
+ }
+
+ gyro_pitch_bias = get_unaligned_le16(&buf[1]);
+ gyro_yaw_bias = get_unaligned_le16(&buf[3]);
+ gyro_roll_bias = get_unaligned_le16(&buf[5]);
+ gyro_pitch_plus = get_unaligned_le16(&buf[7]);
+ gyro_pitch_minus = get_unaligned_le16(&buf[9]);
+ gyro_yaw_plus = get_unaligned_le16(&buf[11]);
+ gyro_yaw_minus = get_unaligned_le16(&buf[13]);
+ gyro_roll_plus = get_unaligned_le16(&buf[15]);
+ gyro_roll_minus = get_unaligned_le16(&buf[17]);
+ gyro_speed_plus = get_unaligned_le16(&buf[19]);
+ gyro_speed_minus = get_unaligned_le16(&buf[21]);
+ acc_x_plus = get_unaligned_le16(&buf[23]);
+ acc_x_minus = get_unaligned_le16(&buf[25]);
+ acc_y_plus = get_unaligned_le16(&buf[27]);
+ acc_y_minus = get_unaligned_le16(&buf[29]);
+ acc_z_plus = get_unaligned_le16(&buf[31]);
+ acc_z_minus = get_unaligned_le16(&buf[33]);
+
+ /*
+ * Set gyroscope calibration and normalization parameters.
+ * Data values will be normalized to 1/DS_GYRO_RES_PER_DEG_S degree/s.
+ */
+ speed_2x = (gyro_speed_plus + gyro_speed_minus);
+ ds->gyro_calib_data[0].abs_code = ABS_RX;
+ ds->gyro_calib_data[0].bias = gyro_pitch_bias;
+ ds->gyro_calib_data[0].sens_numer = speed_2x*DS_GYRO_RES_PER_DEG_S;
+ ds->gyro_calib_data[0].sens_denom = gyro_pitch_plus - gyro_pitch_minus;
+
+ ds->gyro_calib_data[1].abs_code = ABS_RY;
+ ds->gyro_calib_data[1].bias = gyro_yaw_bias;
+ ds->gyro_calib_data[1].sens_numer = speed_2x*DS_GYRO_RES_PER_DEG_S;
+ ds->gyro_calib_data[1].sens_denom = gyro_yaw_plus - gyro_yaw_minus;
+
+ ds->gyro_calib_data[2].abs_code = ABS_RZ;
+ ds->gyro_calib_data[2].bias = gyro_roll_bias;
+ ds->gyro_calib_data[2].sens_numer = speed_2x*DS_GYRO_RES_PER_DEG_S;
+ ds->gyro_calib_data[2].sens_denom = gyro_roll_plus - gyro_roll_minus;
+
+ /*
+ * Set accelerometer calibration and normalization parameters.
+ * Data values will be normalized to 1/DS_ACC_RES_PER_G g.
+ */
+ range_2g = acc_x_plus - acc_x_minus;
+ ds->accel_calib_data[0].abs_code = ABS_X;
+ ds->accel_calib_data[0].bias = acc_x_plus - range_2g / 2;
+ ds->accel_calib_data[0].sens_numer = 2*DS_ACC_RES_PER_G;
+ ds->accel_calib_data[0].sens_denom = range_2g;
+
+ range_2g = acc_y_plus - acc_y_minus;
+ ds->accel_calib_data[1].abs_code = ABS_Y;
+ ds->accel_calib_data[1].bias = acc_y_plus - range_2g / 2;
+ ds->accel_calib_data[1].sens_numer = 2*DS_ACC_RES_PER_G;
+ ds->accel_calib_data[1].sens_denom = range_2g;
+
+ range_2g = acc_z_plus - acc_z_minus;
+ ds->accel_calib_data[2].abs_code = ABS_Z;
+ ds->accel_calib_data[2].bias = acc_z_plus - range_2g / 2;
+ ds->accel_calib_data[2].sens_numer = 2*DS_ACC_RES_PER_G;
+ ds->accel_calib_data[2].sens_denom = range_2g;
+
+err_free:
+ kfree(buf);
+ return ret;
+}
+
+static int dualsense_get_firmware_info(struct dualsense *ds)
+{
+ uint8_t *buf;
+ int ret;
+
+ buf = kzalloc(DS_FEATURE_REPORT_FIRMWARE_INFO_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = ps_get_report(ds->base.hdev, DS_FEATURE_REPORT_FIRMWARE_INFO, buf,
+ DS_FEATURE_REPORT_FIRMWARE_INFO_SIZE);
+ if (ret) {
+ hid_err(ds->base.hdev, "Failed to retrieve DualSense firmware info: %d\n", ret);
+ goto err_free;
+ }
+
+ ds->base.hw_version = get_unaligned_le32(&buf[24]);
+ ds->base.fw_version = get_unaligned_le32(&buf[28]);
+
+err_free:
+ kfree(buf);
+ return ret;
+}
+
+static int dualsense_get_mac_address(struct dualsense *ds)
+{
+ uint8_t *buf;
+ int ret = 0;
+
+ buf = kzalloc(DS_FEATURE_REPORT_PAIRING_INFO_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = ps_get_report(ds->base.hdev, DS_FEATURE_REPORT_PAIRING_INFO, buf,
+ DS_FEATURE_REPORT_PAIRING_INFO_SIZE);
+ if (ret) {
+ hid_err(ds->base.hdev, "Failed to retrieve DualSense pairing info: %d\n", ret);
+ goto err_free;
+ }
+
+ memcpy(ds->base.mac_address, &buf[1], sizeof(ds->base.mac_address));
+
+err_free:
+ kfree(buf);
+ return ret;
+}
+
+static void dualsense_init_output_report(struct dualsense *ds, struct dualsense_output_report *rp,
+ void *buf)
+{
+ struct hid_device *hdev = ds->base.hdev;
+
+ if (hdev->bus == BUS_BLUETOOTH) {
+ struct dualsense_output_report_bt *bt = buf;
+
+ memset(bt, 0, sizeof(*bt));
+ bt->report_id = DS_OUTPUT_REPORT_BT;
+ bt->tag = DS_OUTPUT_TAG; /* Tag must be set. Exact meaning is unclear. */
+
+ /*
+ * Highest 4-bit is a sequence number, which needs to be increased
+ * every report. Lowest 4-bit is tag and can be zero for now.
+ */
+ bt->seq_tag = (ds->output_seq << 4) | 0x0;
+ if (++ds->output_seq == 16)
+ ds->output_seq = 0;
+
+ rp->data = buf;
+ rp->len = sizeof(*bt);
+ rp->bt = bt;
+ rp->usb = NULL;
+ rp->common = &bt->common;
+ } else { /* USB */
+ struct dualsense_output_report_usb *usb = buf;
+
+ memset(usb, 0, sizeof(*usb));
+ usb->report_id = DS_OUTPUT_REPORT_USB;
+
+ rp->data = buf;
+ rp->len = sizeof(*usb);
+ rp->bt = NULL;
+ rp->usb = usb;
+ rp->common = &usb->common;
+ }
+}
+
+/*
+ * Helper function to send DualSense output reports. Applies a CRC at the end of a report
+ * for Bluetooth reports.
+ */
+static void dualsense_send_output_report(struct dualsense *ds,
+ struct dualsense_output_report *report)
+{
+ struct hid_device *hdev = ds->base.hdev;
+
+ /* Bluetooth packets need to be signed with a CRC in the last 4 bytes. */
+ if (report->bt) {
+ uint32_t crc;
+ uint8_t seed = PS_OUTPUT_CRC32_SEED;
+
+ crc = crc32_le(0xFFFFFFFF, &seed, 1);
+ crc = ~crc32_le(crc, report->data, report->len - 4);
+
+ report->bt->crc32 = cpu_to_le32(crc);
+ }
+
+ hid_hw_output_report(hdev, report->data, report->len);
+}
+
+static void dualsense_output_worker(struct work_struct *work)
+{
+ struct dualsense *ds = container_of(work, struct dualsense, output_worker);
+ struct dualsense_output_report report;
+ struct dualsense_output_report_common *common;
+ unsigned long flags;
+
+ dualsense_init_output_report(ds, &report, ds->output_report_dmabuf);
+ common = report.common;
+
+ spin_lock_irqsave(&ds->base.lock, flags);
+
+ if (ds->update_rumble) {
+ /* Select classic rumble style haptics and enable it. */
+ common->valid_flag0 |= DS_OUTPUT_VALID_FLAG0_HAPTICS_SELECT;
+ common->valid_flag0 |= DS_OUTPUT_VALID_FLAG0_COMPATIBLE_VIBRATION;
+ common->motor_left = ds->motor_left;
+ common->motor_right = ds->motor_right;
+ ds->update_rumble = false;
+ }
+
+ if (ds->update_lightbar) {
+ common->valid_flag1 |= DS_OUTPUT_VALID_FLAG1_LIGHTBAR_CONTROL_ENABLE;
+ common->lightbar_red = ds->lightbar_red;
+ common->lightbar_green = ds->lightbar_green;
+ common->lightbar_blue = ds->lightbar_blue;
+
+ ds->update_lightbar = false;
+ }
+
+ if (ds->update_player_leds) {
+ common->valid_flag1 |= DS_OUTPUT_VALID_FLAG1_PLAYER_INDICATOR_CONTROL_ENABLE;
+ common->player_leds = ds->player_leds_state;
+
+ ds->update_player_leds = false;
+ }
+
+ if (ds->update_mic_mute) {
+ common->valid_flag1 |= DS_OUTPUT_VALID_FLAG1_MIC_MUTE_LED_CONTROL_ENABLE;
+ common->mute_button_led = ds->mic_muted;
+
+ if (ds->mic_muted) {
+ /* Disable microphone */
+ common->valid_flag1 |= DS_OUTPUT_VALID_FLAG1_POWER_SAVE_CONTROL_ENABLE;
+ common->power_save_control |= DS_OUTPUT_POWER_SAVE_CONTROL_MIC_MUTE;
+ } else {
+ /* Enable microphone */
+ common->valid_flag1 |= DS_OUTPUT_VALID_FLAG1_POWER_SAVE_CONTROL_ENABLE;
+ common->power_save_control &= ~DS_OUTPUT_POWER_SAVE_CONTROL_MIC_MUTE;
+ }
+
+ ds->update_mic_mute = false;
+ }
+
+ spin_unlock_irqrestore(&ds->base.lock, flags);
+
+ dualsense_send_output_report(ds, &report);
+}
+
+static int dualsense_parse_report(struct ps_device *ps_dev, struct hid_report *report,
+ u8 *data, int size)
+{
+ struct hid_device *hdev = ps_dev->hdev;
+ struct dualsense *ds = container_of(ps_dev, struct dualsense, base);
+ struct dualsense_input_report *ds_report;
+ uint8_t battery_data, battery_capacity, charging_status, value;
+ int battery_status;
+ uint32_t sensor_timestamp;
+ bool btn_mic_state;
+ unsigned long flags;
+ int i;
+
+ /*
+ * DualSense in USB uses the full HID report for reportID 1, but
+ * Bluetooth uses a minimal HID report for reportID 1 and reports
+ * the full report using reportID 49.
+ */
+ if (hdev->bus == BUS_USB && report->id == DS_INPUT_REPORT_USB &&
+ size == DS_INPUT_REPORT_USB_SIZE) {
+ ds_report = (struct dualsense_input_report *)&data[1];
+ } else if (hdev->bus == BUS_BLUETOOTH && report->id == DS_INPUT_REPORT_BT &&
+ size == DS_INPUT_REPORT_BT_SIZE) {
+ /* Last 4 bytes of input report contain crc32 */
+ uint32_t report_crc = get_unaligned_le32(&data[size - 4]);
+
+ if (!ps_check_crc32(PS_INPUT_CRC32_SEED, data, size - 4, report_crc)) {
+ hid_err(hdev, "DualSense input CRC's check failed\n");
+ return -EILSEQ;
+ }
+
+ ds_report = (struct dualsense_input_report *)&data[2];
+ } else {
+ hid_err(hdev, "Unhandled reportID=%d\n", report->id);
+ return -1;
+ }
+
+ input_report_abs(ds->gamepad, ABS_X, ds_report->x);
+ input_report_abs(ds->gamepad, ABS_Y, ds_report->y);
+ input_report_abs(ds->gamepad, ABS_RX, ds_report->rx);
+ input_report_abs(ds->gamepad, ABS_RY, ds_report->ry);
+ input_report_abs(ds->gamepad, ABS_Z, ds_report->z);
+ input_report_abs(ds->gamepad, ABS_RZ, ds_report->rz);
+
+ value = ds_report->buttons[0] & DS_BUTTONS0_HAT_SWITCH;
+ if (value >= ARRAY_SIZE(ps_gamepad_hat_mapping))
+ value = 8; /* center */
+ input_report_abs(ds->gamepad, ABS_HAT0X, ps_gamepad_hat_mapping[value].x);
+ input_report_abs(ds->gamepad, ABS_HAT0Y, ps_gamepad_hat_mapping[value].y);
+
+ input_report_key(ds->gamepad, BTN_WEST, ds_report->buttons[0] & DS_BUTTONS0_SQUARE);
+ input_report_key(ds->gamepad, BTN_SOUTH, ds_report->buttons[0] & DS_BUTTONS0_CROSS);
+ input_report_key(ds->gamepad, BTN_EAST, ds_report->buttons[0] & DS_BUTTONS0_CIRCLE);
+ input_report_key(ds->gamepad, BTN_NORTH, ds_report->buttons[0] & DS_BUTTONS0_TRIANGLE);
+ input_report_key(ds->gamepad, BTN_TL, ds_report->buttons[1] & DS_BUTTONS1_L1);
+ input_report_key(ds->gamepad, BTN_TR, ds_report->buttons[1] & DS_BUTTONS1_R1);
+ input_report_key(ds->gamepad, BTN_TL2, ds_report->buttons[1] & DS_BUTTONS1_L2);
+ input_report_key(ds->gamepad, BTN_TR2, ds_report->buttons[1] & DS_BUTTONS1_R2);
+ input_report_key(ds->gamepad, BTN_SELECT, ds_report->buttons[1] & DS_BUTTONS1_CREATE);
+ input_report_key(ds->gamepad, BTN_START, ds_report->buttons[1] & DS_BUTTONS1_OPTIONS);
+ input_report_key(ds->gamepad, BTN_THUMBL, ds_report->buttons[1] & DS_BUTTONS1_L3);
+ input_report_key(ds->gamepad, BTN_THUMBR, ds_report->buttons[1] & DS_BUTTONS1_R3);
+ input_report_key(ds->gamepad, BTN_MODE, ds_report->buttons[2] & DS_BUTTONS2_PS_HOME);
+ input_sync(ds->gamepad);
+
+ /*
+ * The DualSense has an internal microphone, which can be muted through a mute button
+ * on the device. The driver is expected to read the button state and program the device
+ * to mute/unmute audio at the hardware level.
+ */
+ btn_mic_state = !!(ds_report->buttons[2] & DS_BUTTONS2_MIC_MUTE);
+ if (btn_mic_state && !ds->last_btn_mic_state) {
+ spin_lock_irqsave(&ps_dev->lock, flags);
+ ds->update_mic_mute = true;
+ ds->mic_muted = !ds->mic_muted; /* toggle */
+ spin_unlock_irqrestore(&ps_dev->lock, flags);
+
+ /* Schedule updating of microphone state at hardware level. */
+ schedule_work(&ds->output_worker);
+ }
+ ds->last_btn_mic_state = btn_mic_state;
+
+ /* Parse and calibrate gyroscope data. */
+ for (i = 0; i < ARRAY_SIZE(ds_report->gyro); i++) {
+ int raw_data = (short)le16_to_cpu(ds_report->gyro[i]);
+ int calib_data = mult_frac(ds->gyro_calib_data[i].sens_numer,
+ raw_data - ds->gyro_calib_data[i].bias,
+ ds->gyro_calib_data[i].sens_denom);
+
+ input_report_abs(ds->sensors, ds->gyro_calib_data[i].abs_code, calib_data);
+ }
+
+ /* Parse and calibrate accelerometer data. */
+ for (i = 0; i < ARRAY_SIZE(ds_report->accel); i++) {
+ int raw_data = (short)le16_to_cpu(ds_report->accel[i]);
+ int calib_data = mult_frac(ds->accel_calib_data[i].sens_numer,
+ raw_data - ds->accel_calib_data[i].bias,
+ ds->accel_calib_data[i].sens_denom);
+
+ input_report_abs(ds->sensors, ds->accel_calib_data[i].abs_code, calib_data);
+ }
+
+ /* Convert timestamp (in 0.33us unit) to timestamp_us */
+ sensor_timestamp = le32_to_cpu(ds_report->sensor_timestamp);
+ if (!ds->sensor_timestamp_initialized) {
+ ds->sensor_timestamp_us = DIV_ROUND_CLOSEST(sensor_timestamp, 3);
+ ds->sensor_timestamp_initialized = true;
+ } else {
+ uint32_t delta;
+
+ if (ds->prev_sensor_timestamp > sensor_timestamp)
+ delta = (U32_MAX - ds->prev_sensor_timestamp + sensor_timestamp + 1);
+ else
+ delta = sensor_timestamp - ds->prev_sensor_timestamp;
+ ds->sensor_timestamp_us += DIV_ROUND_CLOSEST(delta, 3);
+ }
+ ds->prev_sensor_timestamp = sensor_timestamp;
+ input_event(ds->sensors, EV_MSC, MSC_TIMESTAMP, ds->sensor_timestamp_us);
+ input_sync(ds->sensors);
+
+ for (i = 0; i < ARRAY_SIZE(ds_report->points); i++) {
+ struct dualsense_touch_point *point = &ds_report->points[i];
+ bool active = (point->contact & DS_TOUCH_POINT_INACTIVE) ? false : true;
+
+ input_mt_slot(ds->touchpad, i);
+ input_mt_report_slot_state(ds->touchpad, MT_TOOL_FINGER, active);
+
+ if (active) {
+ int x = (point->x_hi << 8) | point->x_lo;
+ int y = (point->y_hi << 4) | point->y_lo;
+
+ input_report_abs(ds->touchpad, ABS_MT_POSITION_X, x);
+ input_report_abs(ds->touchpad, ABS_MT_POSITION_Y, y);
+ }
+ }
+ input_mt_sync_frame(ds->touchpad);
+ input_report_key(ds->touchpad, BTN_LEFT, ds_report->buttons[2] & DS_BUTTONS2_TOUCHPAD);
+ input_sync(ds->touchpad);
+
+ battery_data = ds_report->status & DS_STATUS_BATTERY_CAPACITY;
+ charging_status = (ds_report->status & DS_STATUS_CHARGING) >> DS_STATUS_CHARGING_SHIFT;
+
+ switch (charging_status) {
+ case 0x0:
+ /*
+ * Each unit of battery data corresponds to 10%
+ * 0 = 0-9%, 1 = 10-19%, .. and 10 = 100%
+ */
+ battery_capacity = min(battery_data * 10 + 5, 100);
+ battery_status = POWER_SUPPLY_STATUS_DISCHARGING;
+ break;
+ case 0x1:
+ battery_capacity = min(battery_data * 10 + 5, 100);
+ battery_status = POWER_SUPPLY_STATUS_CHARGING;
+ break;
+ case 0x2:
+ battery_capacity = 100;
+ battery_status = POWER_SUPPLY_STATUS_FULL;
+ break;
+ case 0xa: /* voltage or temperature out of range */
+ case 0xb: /* temperature error */
+ battery_capacity = 0;
+ battery_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ break;
+ case 0xf: /* charging error */
+ default:
+ battery_capacity = 0;
+ battery_status = POWER_SUPPLY_STATUS_UNKNOWN;
+ }
+
+ spin_lock_irqsave(&ps_dev->lock, flags);
+ ps_dev->battery_capacity = battery_capacity;
+ ps_dev->battery_status = battery_status;
+ spin_unlock_irqrestore(&ps_dev->lock, flags);
+
+ return 0;
+}
+
+static int dualsense_play_effect(struct input_dev *dev, void *data, struct ff_effect *effect)
+{
+ struct hid_device *hdev = input_get_drvdata(dev);
+ struct dualsense *ds = hid_get_drvdata(hdev);
+ unsigned long flags;
+
+ if (effect->type != FF_RUMBLE)
+ return 0;
+
+ spin_lock_irqsave(&ds->base.lock, flags);
+ ds->update_rumble = true;
+ ds->motor_left = effect->u.rumble.strong_magnitude / 256;
+ ds->motor_right = effect->u.rumble.weak_magnitude / 256;
+ spin_unlock_irqrestore(&ds->base.lock, flags);
+
+ schedule_work(&ds->output_worker);
+ return 0;
+}
+
+static int dualsense_reset_leds(struct dualsense *ds)
+{
+ struct dualsense_output_report report;
+ uint8_t *buf;
+
+ buf = kzalloc(sizeof(struct dualsense_output_report_bt), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ dualsense_init_output_report(ds, &report, buf);
+ /*
+ * On Bluetooth the DualSense outputs an animation on the lightbar
+ * during startup and maintains a color afterwards. We need to explicitly
+ * reconfigure the lightbar before we can do any programming later on.
+ * In USB the lightbar is not on by default, but redoing the setup there
+ * doesn't hurt.
+ */
+ report.common->valid_flag2 = DS_OUTPUT_VALID_FLAG2_LIGHTBAR_SETUP_CONTROL_ENABLE;
+ report.common->lightbar_setup = DS_OUTPUT_LIGHTBAR_SETUP_LIGHT_OUT; /* Fade light out. */
+ dualsense_send_output_report(ds, &report);
+
+ kfree(buf);
+ return 0;
+}
+
+static void dualsense_set_lightbar(struct dualsense *ds, uint8_t red, uint8_t green, uint8_t blue)
+{
+ ds->update_lightbar = true;
+ ds->lightbar_red = red;
+ ds->lightbar_green = green;
+ ds->lightbar_blue = blue;
+
+ schedule_work(&ds->output_worker);
+}
+
+static void dualsense_set_player_leds(struct dualsense *ds)
+{
+ /*
+ * The DualSense controller has a row of 5 LEDs used for player ids.
+ * Behavior on the PlayStation 5 console is to center the player id
+ * across the LEDs, so e.g. player 1 would be "--x--" with x being 'on'.
+ * Follow a similar mapping here.
+ */
+ static const int player_ids[5] = {
+ BIT(2),
+ BIT(3) | BIT(1),
+ BIT(4) | BIT(2) | BIT(0),
+ BIT(4) | BIT(3) | BIT(1) | BIT(0),
+ BIT(4) | BIT(3) | BIT(2) | BIT(1) | BIT(0)
+ };
+
+ uint8_t player_id = ds->base.player_id % ARRAY_SIZE(player_ids);
+
+ ds->update_player_leds = true;
+ ds->player_leds_state = player_ids[player_id];
+ schedule_work(&ds->output_worker);
+}
+
+static struct ps_device *dualsense_create(struct hid_device *hdev)
+{
+ struct dualsense *ds;
+ struct ps_device *ps_dev;
+ uint8_t max_output_report_size;
+ int ret;
+
+ ds = devm_kzalloc(&hdev->dev, sizeof(*ds), GFP_KERNEL);
+ if (!ds)
+ return ERR_PTR(-ENOMEM);
+
+ /*
+ * Patch version to allow userspace to distinguish between
+ * hid-generic vs hid-playstation axis and button mapping.
+ */
+ hdev->version |= HID_PLAYSTATION_VERSION_PATCH;
+
+ ps_dev = &ds->base;
+ ps_dev->hdev = hdev;
+ spin_lock_init(&ps_dev->lock);
+ ps_dev->battery_capacity = 100; /* initial value until parse_report. */
+ ps_dev->battery_status = POWER_SUPPLY_STATUS_UNKNOWN;
+ ps_dev->parse_report = dualsense_parse_report;
+ INIT_WORK(&ds->output_worker, dualsense_output_worker);
+ hid_set_drvdata(hdev, ds);
+
+ max_output_report_size = sizeof(struct dualsense_output_report_bt);
+ ds->output_report_dmabuf = devm_kzalloc(&hdev->dev, max_output_report_size, GFP_KERNEL);
+ if (!ds->output_report_dmabuf)
+ return ERR_PTR(-ENOMEM);
+
+ ret = dualsense_get_mac_address(ds);
+ if (ret) {
+ hid_err(hdev, "Failed to get MAC address from DualSense\n");
+ return ERR_PTR(ret);
+ }
+ snprintf(hdev->uniq, sizeof(hdev->uniq), "%pMR", ds->base.mac_address);
+
+ ret = dualsense_get_firmware_info(ds);
+ if (ret) {
+ hid_err(hdev, "Failed to get firmware info from DualSense\n");
+ return ERR_PTR(ret);
+ }
+
+ ret = ps_devices_list_add(ps_dev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = dualsense_get_calibration_data(ds);
+ if (ret) {
+ hid_err(hdev, "Failed to get calibration data from DualSense\n");
+ goto err;
+ }
+
+ ds->gamepad = ps_gamepad_create(hdev, dualsense_play_effect);
+ if (IS_ERR(ds->gamepad)) {
+ ret = PTR_ERR(ds->gamepad);
+ goto err;
+ }
+
+ ds->sensors = ps_sensors_create(hdev, DS_ACC_RANGE, DS_ACC_RES_PER_G,
+ DS_GYRO_RANGE, DS_GYRO_RES_PER_DEG_S);
+ if (IS_ERR(ds->sensors)) {
+ ret = PTR_ERR(ds->sensors);
+ goto err;
+ }
+
+ ds->touchpad = ps_touchpad_create(hdev, DS_TOUCHPAD_WIDTH, DS_TOUCHPAD_HEIGHT, 2);
+ if (IS_ERR(ds->touchpad)) {
+ ret = PTR_ERR(ds->touchpad);
+ goto err;
+ }
+
+ ret = ps_device_register_battery(ps_dev);
+ if (ret)
+ goto err;
+
+ /*
+ * The hardware may have control over the LEDs (e.g. in Bluetooth on startup).
+ * Reset the LEDs (lightbar, mute, player leds), so we can control them
+ * from software.
+ */
+ ret = dualsense_reset_leds(ds);
+ if (ret)
+ goto err;
+
+ dualsense_set_lightbar(ds, 0, 0, 128); /* blue */
+
+ ret = ps_device_set_player_id(ps_dev);
+ if (ret) {
+ hid_err(hdev, "Failed to assign player id for DualSense: %d\n", ret);
+ goto err;
+ }
+
+ /* Set player LEDs to our player id. */
+ dualsense_set_player_leds(ds);
+
+ /*
+ * Reporting hardware and firmware is important as there are frequent updates, which
+ * can change behavior.
+ */
+ hid_info(hdev, "Registered DualSense controller hw_version=0x%08x fw_version=0x%08x\n",
+ ds->base.hw_version, ds->base.fw_version);
+
+ return &ds->base;
+
+err:
+ ps_devices_list_remove(ps_dev);
+ return ERR_PTR(ret);
+}
+
+static int ps_raw_event(struct hid_device *hdev, struct hid_report *report,
+ u8 *data, int size)
+{
+ struct ps_device *dev = hid_get_drvdata(hdev);
+
+ if (dev && dev->parse_report)
+ return dev->parse_report(dev, report, data, size);
+
+ return 0;
+}
+
+static int ps_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ struct ps_device *dev;
+ int ret;
+
+ ret = hid_parse(hdev);
+ if (ret) {
+ hid_err(hdev, "Parse failed\n");
+ return ret;
+ }
+
+ ret = hid_hw_start(hdev, HID_CONNECT_HIDRAW);
+ if (ret) {
+ hid_err(hdev, "Failed to start HID device\n");
+ return ret;
+ }
+
+ ret = hid_hw_open(hdev);
+ if (ret) {
+ hid_err(hdev, "Failed to open HID device\n");
+ goto err_stop;
+ }
+
+ if (hdev->product == USB_DEVICE_ID_SONY_PS5_CONTROLLER) {
+ dev = dualsense_create(hdev);
+ if (IS_ERR(dev)) {
+ hid_err(hdev, "Failed to create dualsense.\n");
+ ret = PTR_ERR(dev);
+ goto err_close;
+ }
+ }
+
+ ret = devm_device_add_group(&hdev->dev, &ps_device_attribute_group);
+ if (ret) {
+ hid_err(hdev, "Failed to register sysfs nodes.\n");
+ goto err_close;
+ }
+
+ return ret;
+
+err_close:
+ hid_hw_close(hdev);
+err_stop:
+ hid_hw_stop(hdev);
+ return ret;
+}
+
+static void ps_remove(struct hid_device *hdev)
+{
+ struct ps_device *dev = hid_get_drvdata(hdev);
+
+ ps_devices_list_remove(dev);
+ ps_device_release_player_id(dev);
+
+ hid_hw_close(hdev);
+ hid_hw_stop(hdev);
+}
+
+static const struct hid_device_id ps_devices[] = {
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS5_CONTROLLER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS5_CONTROLLER) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, ps_devices);
+
+static struct hid_driver ps_driver = {
+ .name = "playstation",
+ .id_table = ps_devices,
+ .probe = ps_probe,
+ .remove = ps_remove,
+ .raw_event = ps_raw_event,
+};
+
+static int __init ps_init(void)
+{
+ return hid_register_driver(&ps_driver);
+}
+
+static void __exit ps_exit(void)
+{
+ hid_unregister_driver(&ps_driver);
+ ida_destroy(&ps_player_id_allocator);
+}
+
+module_init(ps_init);
+module_exit(ps_exit);
+
+MODULE_AUTHOR("Sony Interactive Entertainment");
+MODULE_DESCRIPTION("HID Driver for PlayStation peripherals.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index d9ca874dffac..1a9daf03dbfa 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -180,7 +180,6 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8882), HID_QUIRK_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8883), HID_QUIRK_NOGET },
- { HID_USB_DEVICE(USB_VENDOR_ID_TRUST, USB_DEVICE_ID_TRUST_PANORA_TABLET), HID_QUIRK_MULTI_INPUT | HID_QUIRK_HIDINPUT_FORCE },
{ HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD), HID_QUIRK_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_KNA5), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWA60), HID_QUIRK_MULTI_INPUT },
@@ -1029,7 +1028,7 @@ static DEFINE_MUTEX(dquirks_lock);
/* Runtime ("dynamic") quirks manipulation functions */
/**
- * hid_exists_dquirk: find any dynamic quirks for a HID device
+ * hid_exists_dquirk - find any dynamic quirks for a HID device
* @hdev: the HID device to match
*
* Description:
@@ -1037,7 +1036,7 @@ static DEFINE_MUTEX(dquirks_lock);
* the pointer to the relevant struct hid_device_id if found.
* Must be called with a read lock held on dquirks_lock.
*
- * Returns: NULL if no quirk found, struct hid_device_id * if found.
+ * Return: NULL if no quirk found, struct hid_device_id * if found.
*/
static struct hid_device_id *hid_exists_dquirk(const struct hid_device *hdev)
{
@@ -1061,7 +1060,7 @@ static struct hid_device_id *hid_exists_dquirk(const struct hid_device *hdev)
/**
- * hid_modify_dquirk: add/replace a HID quirk
+ * hid_modify_dquirk - add/replace a HID quirk
* @id: the HID device to match
* @quirks: the unsigned long quirks value to add/replace
*
@@ -1070,7 +1069,7 @@ static struct hid_device_id *hid_exists_dquirk(const struct hid_device *hdev)
* quirks value with what was provided. Otherwise, add the quirk
* to the dynamic quirks list.
*
- * Returns: 0 OK, -error on failure.
+ * Return: 0 OK, -error on failure.
*/
static int hid_modify_dquirk(const struct hid_device_id *id,
const unsigned long quirks)
@@ -1122,7 +1121,7 @@ static int hid_modify_dquirk(const struct hid_device_id *id,
}
/**
- * hid_remove_all_dquirks: remove all runtime HID quirks from memory
+ * hid_remove_all_dquirks - remove all runtime HID quirks from memory
* @bus: bus to match against. Use HID_BUS_ANY if all need to be removed.
*
* Description:
@@ -1146,7 +1145,10 @@ static void hid_remove_all_dquirks(__u16 bus)
}
/**
- * hid_quirks_init: apply HID quirks specified at module load time
+ * hid_quirks_init - apply HID quirks specified at module load time
+ * @quirks_param: array of quirks strings (vendor:product:quirks)
+ * @bus: bus type
+ * @count: number of quirks to check
*/
int hid_quirks_init(char **quirks_param, __u16 bus, int count)
{
@@ -1177,7 +1179,7 @@ int hid_quirks_init(char **quirks_param, __u16 bus, int count)
EXPORT_SYMBOL_GPL(hid_quirks_init);
/**
- * hid_quirks_exit: release memory associated with dynamic_quirks
+ * hid_quirks_exit - release memory associated with dynamic_quirks
* @bus: a bus to match against
*
* Description:
@@ -1194,14 +1196,14 @@ void hid_quirks_exit(__u16 bus)
EXPORT_SYMBOL_GPL(hid_quirks_exit);
/**
- * hid_gets_squirk: return any static quirks for a HID device
+ * hid_gets_squirk - return any static quirks for a HID device
* @hdev: the HID device to match
*
* Description:
* Given a HID device, return a pointer to the quirked hid_device_id entry
* associated with that device.
*
- * Returns: the quirks.
+ * Return: the quirks.
*/
static unsigned long hid_gets_squirk(const struct hid_device *hdev)
{
@@ -1225,13 +1227,13 @@ static unsigned long hid_gets_squirk(const struct hid_device *hdev)
}
/**
- * hid_lookup_quirk: return any quirks associated with a HID device
+ * hid_lookup_quirk - return any quirks associated with a HID device
* @hdev: the HID device to look for
*
* Description:
* Given a HID device, return any quirks associated with that device.
*
- * Returns: an unsigned long quirks value.
+ * Return: an unsigned long quirks value.
*/
unsigned long hid_lookup_quirk(const struct hid_device *hdev)
{
diff --git a/drivers/hid/hid-roccat-arvo.c b/drivers/hid/hid-roccat-arvo.c
index ffcd444ae2ba..4556d2a50f75 100644
--- a/drivers/hid/hid-roccat-arvo.c
+++ b/drivers/hid/hid-roccat-arvo.c
@@ -42,7 +42,7 @@ static ssize_t arvo_sysfs_show_mode_key(struct device *dev,
if (retval)
return retval;
- return snprintf(buf, PAGE_SIZE, "%d\n", temp_buf.state);
+ return sysfs_emit(buf, "%d\n", temp_buf.state);
}
static ssize_t arvo_sysfs_set_mode_key(struct device *dev,
@@ -92,7 +92,7 @@ static ssize_t arvo_sysfs_show_key_mask(struct device *dev,
if (retval)
return retval;
- return snprintf(buf, PAGE_SIZE, "%d\n", temp_buf.key_mask);
+ return sysfs_emit(buf, "%d\n", temp_buf.key_mask);
}
static ssize_t arvo_sysfs_set_key_mask(struct device *dev,
@@ -146,7 +146,7 @@ static ssize_t arvo_sysfs_show_actual_profile(struct device *dev,
struct arvo_device *arvo =
hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
- return snprintf(buf, PAGE_SIZE, "%d\n", arvo->actual_profile);
+ return sysfs_emit(buf, "%d\n", arvo->actual_profile);
}
static ssize_t arvo_sysfs_set_actual_profile(struct device *dev,
diff --git a/drivers/hid/hid-sensor-custom.c b/drivers/hid/hid-sensor-custom.c
index 4d25577a8573..2628bc53ed80 100644
--- a/drivers/hid/hid-sensor-custom.c
+++ b/drivers/hid/hid-sensor-custom.c
@@ -4,6 +4,7 @@
* Copyright (c) 2015, Intel Corporation.
*/
+#include <linux/ctype.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -21,6 +22,7 @@
#define HID_CUSTOM_TOTAL_ATTRS (HID_CUSTOM_MAX_CORE_ATTRS + 1)
#define HID_CUSTOM_FIFO_SIZE 4096
#define HID_CUSTOM_MAX_FEATURE_BYTES 64
+#define HID_SENSOR_USAGE_LENGTH (4 + 1)
struct hid_sensor_custom_field {
int report_id;
@@ -50,6 +52,7 @@ struct hid_sensor_custom {
struct kfifo data_fifo;
unsigned long misc_opened;
wait_queue_head_t wait;
+ struct platform_device *custom_pdev;
};
/* Header for each sample to user space via dev interface */
@@ -746,11 +749,130 @@ static void hid_sensor_custom_dev_if_remove(struct hid_sensor_custom
}
+/* luid defined in FW (e.g. ISH). Maybe used to identify sensor. */
+static const char *const known_sensor_luid[] = { "020B000000000000" };
+
+static int get_luid_table_index(unsigned char *usage_str)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(known_sensor_luid); i++) {
+ if (!strncmp(usage_str, known_sensor_luid[i],
+ strlen(known_sensor_luid[i])))
+ return i;
+ }
+
+ return -ENODEV;
+}
+
+static int get_known_custom_sensor_index(struct hid_sensor_hub_device *hsdev)
+{
+ struct hid_sensor_hub_attribute_info sensor_manufacturer = { 0 };
+ struct hid_sensor_hub_attribute_info sensor_luid_info = { 0 };
+ int report_size;
+ int ret;
+ static u16 w_buf[HID_CUSTOM_MAX_FEATURE_BYTES];
+ static char buf[HID_CUSTOM_MAX_FEATURE_BYTES];
+ int i;
+
+ memset(w_buf, 0, sizeof(w_buf));
+ memset(buf, 0, sizeof(buf));
+
+ /* get manufacturer info */
+ ret = sensor_hub_input_get_attribute_info(hsdev,
+ HID_FEATURE_REPORT, hsdev->usage,
+ HID_USAGE_SENSOR_PROP_MANUFACTURER, &sensor_manufacturer);
+ if (ret < 0)
+ return ret;
+
+ report_size =
+ sensor_hub_get_feature(hsdev, sensor_manufacturer.report_id,
+ sensor_manufacturer.index, sizeof(w_buf),
+ w_buf);
+ if (report_size <= 0) {
+ hid_err(hsdev->hdev,
+ "Failed to get sensor manufacturer info %d\n",
+ report_size);
+ return -ENODEV;
+ }
+
+ /* convert from wide char to char */
+ for (i = 0; i < ARRAY_SIZE(buf) - 1 && w_buf[i]; i++)
+ buf[i] = (char)w_buf[i];
+
+ /* ensure it's ISH sensor */
+ if (strncmp(buf, "INTEL", strlen("INTEL")))
+ return -ENODEV;
+
+ memset(w_buf, 0, sizeof(w_buf));
+ memset(buf, 0, sizeof(buf));
+
+ /* get real usage id */
+ ret = sensor_hub_input_get_attribute_info(hsdev,
+ HID_FEATURE_REPORT, hsdev->usage,
+ HID_USAGE_SENSOR_PROP_SERIAL_NUM, &sensor_luid_info);
+ if (ret < 0)
+ return ret;
+
+ report_size = sensor_hub_get_feature(hsdev, sensor_luid_info.report_id,
+ sensor_luid_info.index, sizeof(w_buf),
+ w_buf);
+ if (report_size <= 0) {
+ hid_err(hsdev->hdev, "Failed to get real usage info %d\n",
+ report_size);
+ return -ENODEV;
+ }
+
+ /* convert from wide char to char */
+ for (i = 0; i < ARRAY_SIZE(buf) - 1 && w_buf[i]; i++)
+ buf[i] = (char)w_buf[i];
+
+ if (strlen(buf) != strlen(known_sensor_luid[0]) + 5) {
+ hid_err(hsdev->hdev,
+ "%s luid length not match %zu != (%zu + 5)\n", __func__,
+ strlen(buf), strlen(known_sensor_luid[0]));
+ return -ENODEV;
+ }
+
+ /* get table index with luid (not matching 'LUID: ' in luid) */
+ return get_luid_table_index(&buf[5]);
+}
+
+static struct platform_device *
+hid_sensor_register_platform_device(struct platform_device *pdev,
+ struct hid_sensor_hub_device *hsdev,
+ int index)
+{
+ char real_usage[HID_SENSOR_USAGE_LENGTH] = { 0 };
+ struct platform_device *custom_pdev;
+ const char *dev_name;
+ char *c;
+
+ /* copy real usage id */
+ memcpy(real_usage, known_sensor_luid[index], 4);
+
+ /* usage id are all lowcase */
+ for (c = real_usage; *c != '\0'; c++)
+ *c = tolower(*c);
+
+ /* HID-SENSOR-INT-REAL_USAGE_ID */
+ dev_name = kasprintf(GFP_KERNEL, "HID-SENSOR-INT-%s", real_usage);
+ if (!dev_name)
+ return ERR_PTR(-ENOMEM);
+
+ custom_pdev = platform_device_register_data(pdev->dev.parent, dev_name,
+ PLATFORM_DEVID_NONE, hsdev,
+ sizeof(*hsdev));
+ kfree(dev_name);
+ return custom_pdev;
+}
+
static int hid_sensor_custom_probe(struct platform_device *pdev)
{
struct hid_sensor_custom *sensor_inst;
struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
int ret;
+ int index;
sensor_inst = devm_kzalloc(&pdev->dev, sizeof(*sensor_inst),
GFP_KERNEL);
@@ -764,6 +886,22 @@ static int hid_sensor_custom_probe(struct platform_device *pdev)
sensor_inst->pdev = pdev;
mutex_init(&sensor_inst->mutex);
platform_set_drvdata(pdev, sensor_inst);
+
+ index = get_known_custom_sensor_index(hsdev);
+ if (index >= 0 && index < ARRAY_SIZE(known_sensor_luid)) {
+ sensor_inst->custom_pdev =
+ hid_sensor_register_platform_device(pdev, hsdev, index);
+
+ ret = PTR_ERR_OR_ZERO(sensor_inst->custom_pdev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "register_platform_device failed\n");
+ return ret;
+ }
+
+ return 0;
+ }
+
ret = sensor_hub_register_callback(hsdev, hsdev->usage,
&sensor_inst->callbacks);
if (ret < 0) {
@@ -802,6 +940,11 @@ static int hid_sensor_custom_remove(struct platform_device *pdev)
struct hid_sensor_custom *sensor_inst = platform_get_drvdata(pdev);
struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
+ if (sensor_inst->custom_pdev) {
+ platform_device_unregister(sensor_inst->custom_pdev);
+ return 0;
+ }
+
hid_sensor_custom_dev_if_remove(sensor_inst);
hid_sensor_custom_remove_attributes(sensor_inst);
sysfs_remove_group(&sensor_inst->pdev->dev.kobj,
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index e3a557dc9ffd..8319b0ce385a 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -12,6 +12,7 @@
* Copyright (c) 2014-2016 Frank Praznik <frank.praznik@gmail.com>
* Copyright (c) 2018 Todd Kelner
* Copyright (c) 2020 Pascal Giard <pascal.giard@etsmtl.ca>
+ * Copyright (c) 2020 Sanjay Govind <sanjay.govind9@gmail.com>
*/
/*
@@ -59,7 +60,8 @@
#define NSG_MR5U_REMOTE_BT BIT(14)
#define NSG_MR7U_REMOTE_BT BIT(15)
#define SHANWAN_GAMEPAD BIT(16)
-#define GHL_GUITAR_PS3WIIU BIT(17)
+#define GH_GUITAR_CONTROLLER BIT(17)
+#define GHL_GUITAR_PS3WIIU BIT(18)
#define SIXAXIS_CONTROLLER (SIXAXIS_CONTROLLER_USB | SIXAXIS_CONTROLLER_BT)
#define MOTION_CONTROLLER (MOTION_CONTROLLER_USB | MOTION_CONTROLLER_BT)
@@ -84,7 +86,7 @@
#define NSG_MRXU_MAX_Y 1868
#define GHL_GUITAR_POKE_INTERVAL 10 /* In seconds */
-#define GHL_GUITAR_TILT_USAGE 44
+#define GUITAR_TILT_USAGE 44
/* Magic value and data taken from GHLtarUtility:
* https://github.com/ghlre/GHLtarUtility/blob/master/PS3Guitar.cs
@@ -692,7 +694,7 @@ static int guitar_mapping(struct hid_device *hdev, struct hid_input *hi,
if ((usage->hid & HID_USAGE_PAGE) == HID_UP_MSVENDOR) {
unsigned int abs = usage->hid & HID_USAGE;
- if (abs == GHL_GUITAR_TILT_USAGE) {
+ if (abs == GUITAR_TILT_USAGE) {
hid_map_usage_clear(hi, usage, bit, max, EV_ABS, ABS_RY);
return 1;
}
@@ -1481,7 +1483,7 @@ static int sony_mapping(struct hid_device *hdev, struct hid_input *hi,
if (sc->quirks & DUALSHOCK4_CONTROLLER)
return ds4_mapping(hdev, hi, field, usage, bit, max);
- if (sc->quirks & GHL_GUITAR_PS3WIIU)
+ if (sc->quirks & GH_GUITAR_CONTROLLER)
return guitar_mapping(hdev, hi, field, usage, bit, max);
/* Let hid-core decide for the others */
@@ -3167,8 +3169,14 @@ static const struct hid_device_id sony_devices[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SMK, USB_DEVICE_ID_SMK_NSG_MR7U_REMOTE),
.driver_data = NSG_MR7U_REMOTE_BT },
/* Guitar Hero Live PS3 and Wii U guitar dongles */
- { HID_USB_DEVICE(USB_VENDOR_ID_SONY_GHLIVE, USB_DEVICE_ID_SONY_PS3WIIU_GHLIVE_DONGLE),
- .driver_data = GHL_GUITAR_PS3WIIU},
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY_RHYTHM, USB_DEVICE_ID_SONY_PS3WIIU_GHLIVE_DONGLE),
+ .driver_data = GHL_GUITAR_PS3WIIU | GH_GUITAR_CONTROLLER },
+ /* Guitar Hero PC Guitar Dongle */
+ { HID_USB_DEVICE(USB_VENDOR_ID_ACTIVISION, USB_DEVICE_ID_ACTIVISION_GUITAR_DONGLE),
+ .driver_data = GH_GUITAR_CONTROLLER },
+ /* Guitar Hero PS3 World Tour Guitar Dongle */
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY_RHYTHM, USB_DEVICE_ID_SONY_PS3_GUITAR_DONGLE),
+ .driver_data = GH_GUITAR_CONTROLLER },
{ }
};
MODULE_DEVICE_TABLE(hid, sony_devices);
diff --git a/drivers/hid/hid-uclogic-core.c b/drivers/hid/hid-uclogic-core.c
index 8e9c9e646cb7..6a9865dd703c 100644
--- a/drivers/hid/hid-uclogic-core.c
+++ b/drivers/hid/hid-uclogic-core.c
@@ -371,6 +371,8 @@ static const struct hid_device_id uclogic_devices[] = {
USB_DEVICE_ID_HUION_TABLET) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HUION,
USB_DEVICE_ID_HUION_HS64) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_TRUST,
+ USB_DEVICE_ID_TRUST_PANORA_TABLET) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC,
USB_DEVICE_ID_HUION_TABLET) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC,
diff --git a/drivers/hid/hid-uclogic-params.c b/drivers/hid/hid-uclogic-params.c
index 56406cee401f..6af25c38b9cc 100644
--- a/drivers/hid/hid-uclogic-params.c
+++ b/drivers/hid/hid-uclogic-params.c
@@ -1045,6 +1045,8 @@ int uclogic_params_init(struct uclogic_params *params,
uclogic_params_init_with_pen_unused(&p);
}
break;
+ case VID_PID(USB_VENDOR_ID_TRUST,
+ USB_DEVICE_ID_TRUST_PANORA_TABLET):
case VID_PID(USB_VENDOR_ID_UGEE,
USB_DEVICE_ID_UGEE_TABLET_G5):
/* Ignore non-pen interfaces */
diff --git a/drivers/hid/i2c-hid/Kconfig b/drivers/hid/i2c-hid/Kconfig
index c4e5dfeab2bd..a16c6a69680b 100644
--- a/drivers/hid/i2c-hid/Kconfig
+++ b/drivers/hid/i2c-hid/Kconfig
@@ -2,18 +2,55 @@
menu "I2C HID support"
depends on I2C
-config I2C_HID
- tristate "HID over I2C transport layer"
+config I2C_HID_ACPI
+ tristate "HID over I2C transport layer ACPI driver"
default n
- depends on I2C && INPUT
- select HID
+ depends on I2C && INPUT && ACPI
+ help
+ Say Y here if you use a keyboard, a touchpad, a touchscreen, or any
+ other HID based devices which is connected to your computer via I2C.
+ This driver supports ACPI-based systems.
+
+ If unsure, say N.
+
+ This support is also available as a module. If so, the module
+ will be called i2c-hid-acpi. It will also build/depend on the
+ module i2c-hid.
+
+config I2C_HID_OF
+ tristate "HID over I2C transport layer Open Firmware driver"
+ default n
+ depends on I2C && INPUT && OF
help
Say Y here if you use a keyboard, a touchpad, a touchscreen, or any
other HID based devices which is connected to your computer via I2C.
+ This driver supports Open Firmware (Device Tree)-based systems.
If unsure, say N.
This support is also available as a module. If so, the module
- will be called i2c-hid.
+ will be called i2c-hid-of. It will also build/depend on the
+ module i2c-hid.
+
+config I2C_HID_OF_GOODIX
+ tristate "Driver for Goodix hid-i2c based devices on OF systems"
+ default n
+ depends on I2C && INPUT && OF
+ help
+ Say Y here if you want support for Goodix i2c devices that use
+ the i2c-hid protocol on Open Firmware (Device Tree)-based
+ systems.
+
+ If unsure, say N.
+
+ This support is also available as a module. If so, the module
+ will be called i2c-hid-of-goodix. It will also build/depend on
+ the module i2c-hid.
endmenu
+
+config I2C_HID_CORE
+ tristate
+ default y if I2C_HID_ACPI=y || I2C_HID_OF=y || I2C_HID_OF_GOODIX=y
+ default m if I2C_HID_ACPI=m || I2C_HID_OF=m || I2C_HID_OF_GOODIX=m
+ select HID
diff --git a/drivers/hid/i2c-hid/Makefile b/drivers/hid/i2c-hid/Makefile
index 681b3896898e..302545a771f3 100644
--- a/drivers/hid/i2c-hid/Makefile
+++ b/drivers/hid/i2c-hid/Makefile
@@ -3,7 +3,11 @@
# Makefile for the I2C input drivers
#
-obj-$(CONFIG_I2C_HID) += i2c-hid.o
+obj-$(CONFIG_I2C_HID_CORE) += i2c-hid.o
i2c-hid-objs = i2c-hid-core.o
i2c-hid-$(CONFIG_DMI) += i2c-hid-dmi-quirks.o
+
+obj-$(CONFIG_I2C_HID_ACPI) += i2c-hid-acpi.o
+obj-$(CONFIG_I2C_HID_OF) += i2c-hid-of.o
+obj-$(CONFIG_I2C_HID_OF_GOODIX) += i2c-hid-of-goodix.o
diff --git a/drivers/hid/i2c-hid/i2c-hid-acpi.c b/drivers/hid/i2c-hid/i2c-hid-acpi.c
new file mode 100644
index 000000000000..bb8c00e6be78
--- /dev/null
+++ b/drivers/hid/i2c-hid/i2c-hid-acpi.c
@@ -0,0 +1,143 @@
+/*
+ * HID over I2C ACPI Subclass
+ *
+ * Copyright (c) 2012 Benjamin Tissoires <benjamin.tissoires@gmail.com>
+ * Copyright (c) 2012 Ecole Nationale de l'Aviation Civile, France
+ * Copyright (c) 2012 Red Hat, Inc
+ *
+ * This code was forked out of the core code, which was partly based on
+ * "USB HID support for Linux":
+ *
+ * Copyright (c) 1999 Andreas Gal
+ * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz>
+ * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc
+ * Copyright (c) 2007-2008 Oliver Neukum
+ * Copyright (c) 2006-2010 Jiri Kosina
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive for
+ * more details.
+ */
+
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pm.h>
+
+#include "i2c-hid.h"
+
+struct i2c_hid_acpi {
+ struct i2chid_ops ops;
+ struct i2c_client *client;
+};
+
+static const struct acpi_device_id i2c_hid_acpi_blacklist[] = {
+ /*
+ * The CHPN0001 ACPI device, which is used to describe the Chipone
+ * ICN8505 controller, has a _CID of PNP0C50 but is not HID compatible.
+ */
+ {"CHPN0001", 0 },
+ { },
+};
+
+static int i2c_hid_acpi_get_descriptor(struct i2c_client *client)
+{
+ static guid_t i2c_hid_guid =
+ GUID_INIT(0x3CDFF6F7, 0x4267, 0x4555,
+ 0xAD, 0x05, 0xB3, 0x0A, 0x3D, 0x89, 0x38, 0xDE);
+ union acpi_object *obj;
+ struct acpi_device *adev;
+ acpi_handle handle;
+ u16 hid_descriptor_address;
+
+ handle = ACPI_HANDLE(&client->dev);
+ if (!handle || acpi_bus_get_device(handle, &adev)) {
+ dev_err(&client->dev, "Error could not get ACPI device\n");
+ return -ENODEV;
+ }
+
+ if (acpi_match_device_ids(adev, i2c_hid_acpi_blacklist) == 0)
+ return -ENODEV;
+
+ obj = acpi_evaluate_dsm_typed(handle, &i2c_hid_guid, 1, 1, NULL,
+ ACPI_TYPE_INTEGER);
+ if (!obj) {
+ dev_err(&client->dev, "Error _DSM call to get HID descriptor address failed\n");
+ return -ENODEV;
+ }
+
+ hid_descriptor_address = obj->integer.value;
+ ACPI_FREE(obj);
+
+ return hid_descriptor_address;
+}
+
+static void i2c_hid_acpi_shutdown_tail(struct i2chid_ops *ops)
+{
+ struct i2c_hid_acpi *ihid_acpi =
+ container_of(ops, struct i2c_hid_acpi, ops);
+ struct device *dev = &ihid_acpi->client->dev;
+ acpi_device_set_power(ACPI_COMPANION(dev), ACPI_STATE_D3_COLD);
+}
+
+static int i2c_hid_acpi_probe(struct i2c_client *client,
+ const struct i2c_device_id *dev_id)
+{
+ struct device *dev = &client->dev;
+ struct i2c_hid_acpi *ihid_acpi;
+ struct acpi_device *adev;
+ u16 hid_descriptor_address;
+ int ret;
+
+ ihid_acpi = devm_kzalloc(&client->dev, sizeof(*ihid_acpi), GFP_KERNEL);
+ if (!ihid_acpi)
+ return -ENOMEM;
+
+ ihid_acpi->client = client;
+ ihid_acpi->ops.shutdown_tail = i2c_hid_acpi_shutdown_tail;
+
+ ret = i2c_hid_acpi_get_descriptor(client);
+ if (ret < 0)
+ return ret;
+ hid_descriptor_address = ret;
+
+ adev = ACPI_COMPANION(dev);
+ if (adev)
+ acpi_device_fix_up_power(adev);
+
+ if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) {
+ device_set_wakeup_capable(dev, true);
+ device_set_wakeup_enable(dev, false);
+ }
+
+ return i2c_hid_core_probe(client, &ihid_acpi->ops,
+ hid_descriptor_address);
+}
+
+static const struct acpi_device_id i2c_hid_acpi_match[] = {
+ {"ACPI0C50", 0 },
+ {"PNP0C50", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, i2c_hid_acpi_match);
+
+static struct i2c_driver i2c_hid_acpi_driver = {
+ .driver = {
+ .name = "i2c_hid_acpi",
+ .pm = &i2c_hid_core_pm,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ .acpi_match_table = ACPI_PTR(i2c_hid_acpi_match),
+ },
+
+ .probe = i2c_hid_acpi_probe,
+ .remove = i2c_hid_core_remove,
+ .shutdown = i2c_hid_core_shutdown,
+};
+
+module_i2c_driver(i2c_hid_acpi_driver);
+
+MODULE_DESCRIPTION("HID over I2C ACPI driver");
+MODULE_AUTHOR("Benjamin Tissoires <benjamin.tissoires@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
index bfe716d7ea44..9993133989a5 100644
--- a/drivers/hid/i2c-hid/i2c-hid-core.c
+++ b/drivers/hid/i2c-hid/i2c-hid-core.c
@@ -35,11 +35,6 @@
#include <linux/kernel.h>
#include <linux/hid.h>
#include <linux/mutex.h>
-#include <linux/acpi.h>
-#include <linux/of.h>
-#include <linux/regulator/consumer.h>
-
-#include <linux/platform_data/i2c-hid.h>
#include "../hid-ids.h"
#include "i2c-hid.h"
@@ -156,10 +151,10 @@ struct i2c_hid {
wait_queue_head_t wait; /* For waiting the interrupt */
- struct i2c_hid_platform_data pdata;
-
bool irq_wake_enabled;
struct mutex reset_lock;
+
+ struct i2chid_ops *ops;
};
static const struct i2c_hid_quirks {
@@ -171,6 +166,8 @@ static const struct i2c_hid_quirks {
I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
{ I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288,
I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
+ { I2C_VENDOR_ID_ITE, I2C_DEVICE_ID_ITE_VOYO_WINPAD_A15,
+ I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
{ I2C_VENDOR_ID_RAYDIUM, I2C_PRODUCT_ID_RAYDIUM_3118,
I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
{ USB_VENDOR_ID_ELAN, HID_ANY_ID,
@@ -884,144 +881,36 @@ static int i2c_hid_fetch_hid_descriptor(struct i2c_hid *ihid)
return 0;
}
-#ifdef CONFIG_ACPI
-static const struct acpi_device_id i2c_hid_acpi_blacklist[] = {
- /*
- * The CHPN0001 ACPI device, which is used to describe the Chipone
- * ICN8505 controller, has a _CID of PNP0C50 but is not HID compatible.
- */
- {"CHPN0001", 0 },
- { },
-};
-
-static int i2c_hid_acpi_pdata(struct i2c_client *client,
- struct i2c_hid_platform_data *pdata)
-{
- static guid_t i2c_hid_guid =
- GUID_INIT(0x3CDFF6F7, 0x4267, 0x4555,
- 0xAD, 0x05, 0xB3, 0x0A, 0x3D, 0x89, 0x38, 0xDE);
- union acpi_object *obj;
- struct acpi_device *adev;
- acpi_handle handle;
-
- handle = ACPI_HANDLE(&client->dev);
- if (!handle || acpi_bus_get_device(handle, &adev)) {
- dev_err(&client->dev, "Error could not get ACPI device\n");
- return -ENODEV;
- }
-
- if (acpi_match_device_ids(adev, i2c_hid_acpi_blacklist) == 0)
- return -ENODEV;
-
- obj = acpi_evaluate_dsm_typed(handle, &i2c_hid_guid, 1, 1, NULL,
- ACPI_TYPE_INTEGER);
- if (!obj) {
- dev_err(&client->dev, "Error _DSM call to get HID descriptor address failed\n");
- return -ENODEV;
- }
-
- pdata->hid_descriptor_address = obj->integer.value;
- ACPI_FREE(obj);
-
- return 0;
-}
-
-static void i2c_hid_acpi_fix_up_power(struct device *dev)
-{
- struct acpi_device *adev;
-
- adev = ACPI_COMPANION(dev);
- if (adev)
- acpi_device_fix_up_power(adev);
-}
-
-static void i2c_hid_acpi_enable_wakeup(struct device *dev)
-{
- if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) {
- device_set_wakeup_capable(dev, true);
- device_set_wakeup_enable(dev, false);
- }
-}
-
-static void i2c_hid_acpi_shutdown(struct device *dev)
+static int i2c_hid_core_power_up(struct i2c_hid *ihid)
{
- acpi_device_set_power(ACPI_COMPANION(dev), ACPI_STATE_D3_COLD);
-}
+ if (!ihid->ops->power_up)
+ return 0;
-static const struct acpi_device_id i2c_hid_acpi_match[] = {
- {"ACPI0C50", 0 },
- {"PNP0C50", 0 },
- { },
-};
-MODULE_DEVICE_TABLE(acpi, i2c_hid_acpi_match);
-#else
-static inline int i2c_hid_acpi_pdata(struct i2c_client *client,
- struct i2c_hid_platform_data *pdata)
-{
- return -ENODEV;
+ return ihid->ops->power_up(ihid->ops);
}
-static inline void i2c_hid_acpi_fix_up_power(struct device *dev) {}
-
-static inline void i2c_hid_acpi_enable_wakeup(struct device *dev) {}
-
-static inline void i2c_hid_acpi_shutdown(struct device *dev) {}
-#endif
-
-#ifdef CONFIG_OF
-static int i2c_hid_of_probe(struct i2c_client *client,
- struct i2c_hid_platform_data *pdata)
+static void i2c_hid_core_power_down(struct i2c_hid *ihid)
{
- struct device *dev = &client->dev;
- u32 val;
- int ret;
-
- ret = of_property_read_u32(dev->of_node, "hid-descr-addr", &val);
- if (ret) {
- dev_err(&client->dev, "HID register address not provided\n");
- return -ENODEV;
- }
- if (val >> 16) {
- dev_err(&client->dev, "Bad HID register address: 0x%08x\n",
- val);
- return -EINVAL;
- }
- pdata->hid_descriptor_address = val;
-
- return 0;
-}
+ if (!ihid->ops->power_down)
+ return;
-static const struct of_device_id i2c_hid_of_match[] = {
- { .compatible = "hid-over-i2c" },
- {},
-};
-MODULE_DEVICE_TABLE(of, i2c_hid_of_match);
-#else
-static inline int i2c_hid_of_probe(struct i2c_client *client,
- struct i2c_hid_platform_data *pdata)
-{
- return -ENODEV;
+ ihid->ops->power_down(ihid->ops);
}
-#endif
-static void i2c_hid_fwnode_probe(struct i2c_client *client,
- struct i2c_hid_platform_data *pdata)
+static void i2c_hid_core_shutdown_tail(struct i2c_hid *ihid)
{
- u32 val;
+ if (!ihid->ops->shutdown_tail)
+ return;
- if (!device_property_read_u32(&client->dev, "post-power-on-delay-ms",
- &val))
- pdata->post_power_delay_ms = val;
+ ihid->ops->shutdown_tail(ihid->ops);
}
-static int i2c_hid_probe(struct i2c_client *client,
- const struct i2c_device_id *dev_id)
+int i2c_hid_core_probe(struct i2c_client *client, struct i2chid_ops *ops,
+ u16 hid_descriptor_address)
{
int ret;
struct i2c_hid *ihid;
struct hid_device *hid;
- __u16 hidRegister;
- struct i2c_hid_platform_data *platform_data = client->dev.platform_data;
dbg_hid("HID probe called for i2c 0x%02x\n", client->addr);
@@ -1042,44 +931,17 @@ static int i2c_hid_probe(struct i2c_client *client,
if (!ihid)
return -ENOMEM;
- if (client->dev.of_node) {
- ret = i2c_hid_of_probe(client, &ihid->pdata);
- if (ret)
- return ret;
- } else if (!platform_data) {
- ret = i2c_hid_acpi_pdata(client, &ihid->pdata);
- if (ret)
- return ret;
- } else {
- ihid->pdata = *platform_data;
- }
-
- /* Parse platform agnostic common properties from ACPI / device tree */
- i2c_hid_fwnode_probe(client, &ihid->pdata);
+ ihid->ops = ops;
- ihid->pdata.supplies[0].supply = "vdd";
- ihid->pdata.supplies[1].supply = "vddl";
-
- ret = devm_regulator_bulk_get(&client->dev,
- ARRAY_SIZE(ihid->pdata.supplies),
- ihid->pdata.supplies);
+ ret = i2c_hid_core_power_up(ihid);
if (ret)
return ret;
- ret = regulator_bulk_enable(ARRAY_SIZE(ihid->pdata.supplies),
- ihid->pdata.supplies);
- if (ret < 0)
- return ret;
-
- if (ihid->pdata.post_power_delay_ms)
- msleep(ihid->pdata.post_power_delay_ms);
-
i2c_set_clientdata(client, ihid);
ihid->client = client;
- hidRegister = ihid->pdata.hid_descriptor_address;
- ihid->wHIDDescRegister = cpu_to_le16(hidRegister);
+ ihid->wHIDDescRegister = cpu_to_le16(hid_descriptor_address);
init_waitqueue_head(&ihid->wait);
mutex_init(&ihid->reset_lock);
@@ -1089,11 +951,7 @@ static int i2c_hid_probe(struct i2c_client *client,
* real computation later. */
ret = i2c_hid_alloc_buffers(ihid, HID_MIN_BUFFER_SIZE);
if (ret < 0)
- goto err_regulator;
-
- i2c_hid_acpi_fix_up_power(&client->dev);
-
- i2c_hid_acpi_enable_wakeup(&client->dev);
+ goto err_powered;
device_enable_async_suspend(&client->dev);
@@ -1102,19 +960,19 @@ static int i2c_hid_probe(struct i2c_client *client,
if (ret < 0) {
dev_dbg(&client->dev, "nothing at this address: %d\n", ret);
ret = -ENXIO;
- goto err_regulator;
+ goto err_powered;
}
ret = i2c_hid_fetch_hid_descriptor(ihid);
if (ret < 0) {
dev_err(&client->dev,
"Failed to fetch the HID Descriptor\n");
- goto err_regulator;
+ goto err_powered;
}
ret = i2c_hid_init_irq(client);
if (ret < 0)
- goto err_regulator;
+ goto err_powered;
hid = hid_allocate_device();
if (IS_ERR(hid)) {
@@ -1153,14 +1011,14 @@ err_mem_free:
err_irq:
free_irq(client->irq, ihid);
-err_regulator:
- regulator_bulk_disable(ARRAY_SIZE(ihid->pdata.supplies),
- ihid->pdata.supplies);
+err_powered:
+ i2c_hid_core_power_down(ihid);
i2c_hid_free_buffers(ihid);
return ret;
}
+EXPORT_SYMBOL_GPL(i2c_hid_core_probe);
-static int i2c_hid_remove(struct i2c_client *client)
+int i2c_hid_core_remove(struct i2c_client *client)
{
struct i2c_hid *ihid = i2c_get_clientdata(client);
struct hid_device *hid;
@@ -1173,24 +1031,25 @@ static int i2c_hid_remove(struct i2c_client *client)
if (ihid->bufsize)
i2c_hid_free_buffers(ihid);
- regulator_bulk_disable(ARRAY_SIZE(ihid->pdata.supplies),
- ihid->pdata.supplies);
+ i2c_hid_core_power_down(ihid);
return 0;
}
+EXPORT_SYMBOL_GPL(i2c_hid_core_remove);
-static void i2c_hid_shutdown(struct i2c_client *client)
+void i2c_hid_core_shutdown(struct i2c_client *client)
{
struct i2c_hid *ihid = i2c_get_clientdata(client);
i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
free_irq(client->irq, ihid);
- i2c_hid_acpi_shutdown(&client->dev);
+ i2c_hid_core_shutdown_tail(ihid);
}
+EXPORT_SYMBOL_GPL(i2c_hid_core_shutdown);
#ifdef CONFIG_PM_SLEEP
-static int i2c_hid_suspend(struct device *dev)
+static int i2c_hid_core_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct i2c_hid *ihid = i2c_get_clientdata(client);
@@ -1217,14 +1076,13 @@ static int i2c_hid_suspend(struct device *dev)
hid_warn(hid, "Failed to enable irq wake: %d\n",
wake_status);
} else {
- regulator_bulk_disable(ARRAY_SIZE(ihid->pdata.supplies),
- ihid->pdata.supplies);
+ i2c_hid_core_power_down(ihid);
}
return 0;
}
-static int i2c_hid_resume(struct device *dev)
+static int i2c_hid_core_resume(struct device *dev)
{
int ret;
struct i2c_client *client = to_i2c_client(dev);
@@ -1233,13 +1091,7 @@ static int i2c_hid_resume(struct device *dev)
int wake_status;
if (!device_may_wakeup(&client->dev)) {
- ret = regulator_bulk_enable(ARRAY_SIZE(ihid->pdata.supplies),
- ihid->pdata.supplies);
- if (ret)
- hid_warn(hid, "Failed to enable supplies: %d\n", ret);
-
- if (ihid->pdata.post_power_delay_ms)
- msleep(ihid->pdata.post_power_delay_ms);
+ i2c_hid_core_power_up(ihid);
} else if (ihid->irq_wake_enabled) {
wake_status = disable_irq_wake(client->irq);
if (!wake_status)
@@ -1276,34 +1128,10 @@ static int i2c_hid_resume(struct device *dev)
}
#endif
-static const struct dev_pm_ops i2c_hid_pm = {
- SET_SYSTEM_SLEEP_PM_OPS(i2c_hid_suspend, i2c_hid_resume)
+const struct dev_pm_ops i2c_hid_core_pm = {
+ SET_SYSTEM_SLEEP_PM_OPS(i2c_hid_core_suspend, i2c_hid_core_resume)
};
-
-static const struct i2c_device_id i2c_hid_id_table[] = {
- { "hid", 0 },
- { "hid-over-i2c", 0 },
- { },
-};
-MODULE_DEVICE_TABLE(i2c, i2c_hid_id_table);
-
-
-static struct i2c_driver i2c_hid_driver = {
- .driver = {
- .name = "i2c_hid",
- .pm = &i2c_hid_pm,
- .probe_type = PROBE_PREFER_ASYNCHRONOUS,
- .acpi_match_table = ACPI_PTR(i2c_hid_acpi_match),
- .of_match_table = of_match_ptr(i2c_hid_of_match),
- },
-
- .probe = i2c_hid_probe,
- .remove = i2c_hid_remove,
- .shutdown = i2c_hid_shutdown,
- .id_table = i2c_hid_id_table,
-};
-
-module_i2c_driver(i2c_hid_driver);
+EXPORT_SYMBOL_GPL(i2c_hid_core_pm);
MODULE_DESCRIPTION("HID over I2C core driver");
MODULE_AUTHOR("Benjamin Tissoires <benjamin.tissoires@gmail.com>");
diff --git a/drivers/hid/i2c-hid/i2c-hid-of-goodix.c b/drivers/hid/i2c-hid/i2c-hid-of-goodix.c
new file mode 100644
index 000000000000..ee0225982a82
--- /dev/null
+++ b/drivers/hid/i2c-hid/i2c-hid-of-goodix.c
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for Goodix touchscreens that use the i2c-hid protocol.
+ *
+ * Copyright 2020 Google LLC
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pm.h>
+#include <linux/regulator/consumer.h>
+
+#include "i2c-hid.h"
+
+struct goodix_i2c_hid_timing_data {
+ unsigned int post_gpio_reset_delay_ms;
+ unsigned int post_power_delay_ms;
+};
+
+struct i2c_hid_of_goodix {
+ struct i2chid_ops ops;
+
+ struct regulator *vdd;
+ struct gpio_desc *reset_gpio;
+ const struct goodix_i2c_hid_timing_data *timings;
+};
+
+static int goodix_i2c_hid_power_up(struct i2chid_ops *ops)
+{
+ struct i2c_hid_of_goodix *ihid_goodix =
+ container_of(ops, struct i2c_hid_of_goodix, ops);
+ int ret;
+
+ ret = regulator_enable(ihid_goodix->vdd);
+ if (ret)
+ return ret;
+
+ if (ihid_goodix->timings->post_power_delay_ms)
+ msleep(ihid_goodix->timings->post_power_delay_ms);
+
+ gpiod_set_value_cansleep(ihid_goodix->reset_gpio, 0);
+ if (ihid_goodix->timings->post_gpio_reset_delay_ms)
+ msleep(ihid_goodix->timings->post_gpio_reset_delay_ms);
+
+ return 0;
+}
+
+static void goodix_i2c_hid_power_down(struct i2chid_ops *ops)
+{
+ struct i2c_hid_of_goodix *ihid_goodix =
+ container_of(ops, struct i2c_hid_of_goodix, ops);
+
+ gpiod_set_value_cansleep(ihid_goodix->reset_gpio, 1);
+ regulator_disable(ihid_goodix->vdd);
+}
+
+static int i2c_hid_of_goodix_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct i2c_hid_of_goodix *ihid_goodix;
+
+ ihid_goodix = devm_kzalloc(&client->dev, sizeof(*ihid_goodix),
+ GFP_KERNEL);
+ if (!ihid_goodix)
+ return -ENOMEM;
+
+ ihid_goodix->ops.power_up = goodix_i2c_hid_power_up;
+ ihid_goodix->ops.power_down = goodix_i2c_hid_power_down;
+
+ /* Start out with reset asserted */
+ ihid_goodix->reset_gpio =
+ devm_gpiod_get_optional(&client->dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(ihid_goodix->reset_gpio))
+ return PTR_ERR(ihid_goodix->reset_gpio);
+
+ ihid_goodix->vdd = devm_regulator_get(&client->dev, "vdd");
+ if (IS_ERR(ihid_goodix->vdd))
+ return PTR_ERR(ihid_goodix->vdd);
+
+ ihid_goodix->timings = device_get_match_data(&client->dev);
+
+ return i2c_hid_core_probe(client, &ihid_goodix->ops, 0x0001);
+}
+
+static const struct goodix_i2c_hid_timing_data goodix_gt7375p_timing_data = {
+ .post_power_delay_ms = 10,
+ .post_gpio_reset_delay_ms = 180,
+};
+
+static const struct of_device_id goodix_i2c_hid_of_match[] = {
+ { .compatible = "goodix,gt7375p", .data = &goodix_gt7375p_timing_data },
+ { }
+};
+MODULE_DEVICE_TABLE(of, goodix_i2c_hid_of_match);
+
+static struct i2c_driver goodix_i2c_hid_ts_driver = {
+ .driver = {
+ .name = "i2c_hid_of_goodix",
+ .pm = &i2c_hid_core_pm,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ .of_match_table = of_match_ptr(goodix_i2c_hid_of_match),
+ },
+ .probe = i2c_hid_of_goodix_probe,
+ .remove = i2c_hid_core_remove,
+ .shutdown = i2c_hid_core_shutdown,
+};
+module_i2c_driver(goodix_i2c_hid_ts_driver);
+
+MODULE_AUTHOR("Douglas Anderson <dianders@chromium.org>");
+MODULE_DESCRIPTION("Goodix i2c-hid touchscreen driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hid/i2c-hid/i2c-hid-of.c b/drivers/hid/i2c-hid/i2c-hid-of.c
new file mode 100644
index 000000000000..4bf7cea92637
--- /dev/null
+++ b/drivers/hid/i2c-hid/i2c-hid-of.c
@@ -0,0 +1,143 @@
+/*
+ * HID over I2C Open Firmware Subclass
+ *
+ * Copyright (c) 2012 Benjamin Tissoires <benjamin.tissoires@gmail.com>
+ * Copyright (c) 2012 Ecole Nationale de l'Aviation Civile, France
+ * Copyright (c) 2012 Red Hat, Inc
+ *
+ * This code was forked out of the core code, which was partly based on
+ * "USB HID support for Linux":
+ *
+ * Copyright (c) 1999 Andreas Gal
+ * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz>
+ * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc
+ * Copyright (c) 2007-2008 Oliver Neukum
+ * Copyright (c) 2006-2010 Jiri Kosina
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive for
+ * more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pm.h>
+#include <linux/regulator/consumer.h>
+
+#include "i2c-hid.h"
+
+struct i2c_hid_of {
+ struct i2chid_ops ops;
+
+ struct i2c_client *client;
+ struct regulator_bulk_data supplies[2];
+ int post_power_delay_ms;
+};
+
+static int i2c_hid_of_power_up(struct i2chid_ops *ops)
+{
+ struct i2c_hid_of *ihid_of = container_of(ops, struct i2c_hid_of, ops);
+ struct device *dev = &ihid_of->client->dev;
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ihid_of->supplies),
+ ihid_of->supplies);
+ if (ret) {
+ dev_warn(dev, "Failed to enable supplies: %d\n", ret);
+ return ret;
+ }
+
+ if (ihid_of->post_power_delay_ms)
+ msleep(ihid_of->post_power_delay_ms);
+
+ return 0;
+}
+
+static void i2c_hid_of_power_down(struct i2chid_ops *ops)
+{
+ struct i2c_hid_of *ihid_of = container_of(ops, struct i2c_hid_of, ops);
+
+ regulator_bulk_disable(ARRAY_SIZE(ihid_of->supplies),
+ ihid_of->supplies);
+}
+
+static int i2c_hid_of_probe(struct i2c_client *client,
+ const struct i2c_device_id *dev_id)
+{
+ struct device *dev = &client->dev;
+ struct i2c_hid_of *ihid_of;
+ u16 hid_descriptor_address;
+ int ret;
+ u32 val;
+
+ ihid_of = devm_kzalloc(&client->dev, sizeof(*ihid_of), GFP_KERNEL);
+ if (!ihid_of)
+ return -ENOMEM;
+
+ ihid_of->ops.power_up = i2c_hid_of_power_up;
+ ihid_of->ops.power_down = i2c_hid_of_power_down;
+
+ ret = of_property_read_u32(dev->of_node, "hid-descr-addr", &val);
+ if (ret) {
+ dev_err(&client->dev, "HID register address not provided\n");
+ return -ENODEV;
+ }
+ if (val >> 16) {
+ dev_err(&client->dev, "Bad HID register address: 0x%08x\n",
+ val);
+ return -EINVAL;
+ }
+ hid_descriptor_address = val;
+
+ if (!device_property_read_u32(&client->dev, "post-power-on-delay-ms",
+ &val))
+ ihid_of->post_power_delay_ms = val;
+
+ ihid_of->supplies[0].supply = "vdd";
+ ihid_of->supplies[1].supply = "vddl";
+ ret = devm_regulator_bulk_get(&client->dev,
+ ARRAY_SIZE(ihid_of->supplies),
+ ihid_of->supplies);
+ if (ret)
+ return ret;
+
+ return i2c_hid_core_probe(client, &ihid_of->ops,
+ hid_descriptor_address);
+}
+
+static const struct of_device_id i2c_hid_of_match[] = {
+ { .compatible = "hid-over-i2c" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, i2c_hid_of_match);
+
+static const struct i2c_device_id i2c_hid_of_id_table[] = {
+ { "hid", 0 },
+ { "hid-over-i2c", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, i2c_hid_of_id_table);
+
+static struct i2c_driver i2c_hid_of_driver = {
+ .driver = {
+ .name = "i2c_hid_of",
+ .pm = &i2c_hid_core_pm,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ .of_match_table = of_match_ptr(i2c_hid_of_match),
+ },
+
+ .probe = i2c_hid_of_probe,
+ .remove = i2c_hid_core_remove,
+ .shutdown = i2c_hid_core_shutdown,
+ .id_table = i2c_hid_of_id_table,
+};
+
+module_i2c_driver(i2c_hid_of_driver);
+
+MODULE_DESCRIPTION("HID over I2C OF driver");
+MODULE_AUTHOR("Benjamin Tissoires <benjamin.tissoires@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/i2c-hid/i2c-hid.h b/drivers/hid/i2c-hid/i2c-hid.h
index a8c19aef5824..05a7827d211a 100644
--- a/drivers/hid/i2c-hid/i2c-hid.h
+++ b/drivers/hid/i2c-hid/i2c-hid.h
@@ -3,6 +3,7 @@
#ifndef I2C_HID_H
#define I2C_HID_H
+#include <linux/i2c.h>
#ifdef CONFIG_DMI
struct i2c_hid_desc *i2c_hid_get_dmi_i2c_hid_desc_override(uint8_t *i2c_name);
@@ -17,4 +18,25 @@ static inline char *i2c_hid_get_dmi_hid_report_desc_override(uint8_t *i2c_name,
{ return NULL; }
#endif
+/**
+ * struct i2chid_ops - Ops provided to the core.
+ *
+ * @power_up: do sequencing to power up the device.
+ * @power_down: do sequencing to power down the device.
+ * @shutdown_tail: called at the end of shutdown.
+ */
+struct i2chid_ops {
+ int (*power_up)(struct i2chid_ops *ops);
+ void (*power_down)(struct i2chid_ops *ops);
+ void (*shutdown_tail)(struct i2chid_ops *ops);
+};
+
+int i2c_hid_core_probe(struct i2c_client *client, struct i2chid_ops *ops,
+ u16 hid_descriptor_address);
+int i2c_hid_core_remove(struct i2c_client *client);
+
+void i2c_hid_core_shutdown(struct i2c_client *client);
+
+extern const struct dev_pm_ops i2c_hid_core_pm;
+
#endif
diff --git a/drivers/hid/intel-ish-hid/ipc/hw-ish.h b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
index 1fb294ca463e..21b87e4003af 100644
--- a/drivers/hid/intel-ish-hid/ipc/hw-ish.h
+++ b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
@@ -27,6 +27,7 @@
#define CMP_H_DEVICE_ID 0x06FC
#define EHL_Ax_DEVICE_ID 0x4BB3
#define TGL_LP_DEVICE_ID 0xA0FC
+#define TGL_H_DEVICE_ID 0x43FC
#define REVISION_ID_CHT_A0 0x6
#define REVISION_ID_CHT_Ax_SI 0x0
@@ -81,5 +82,6 @@ struct ishtp_device *ish_dev_init(struct pci_dev *pdev);
int ish_hw_start(struct ishtp_device *dev);
void ish_device_disable(struct ishtp_device *dev);
int ish_disable_dma(struct ishtp_device *dev);
+void ish_set_host_ready(struct ishtp_device *dev);
#endif /* _ISHTP_HW_ISH_H_ */
diff --git a/drivers/hid/intel-ish-hid/ipc/ipc.c b/drivers/hid/intel-ish-hid/ipc/ipc.c
index a45ac7fa417b..47bbeb8b492b 100644
--- a/drivers/hid/intel-ish-hid/ipc/ipc.c
+++ b/drivers/hid/intel-ish-hid/ipc/ipc.c
@@ -193,6 +193,33 @@ static void ish_clr_host_rdy(struct ishtp_device *dev)
ish_reg_write(dev, IPC_REG_HOST_COMM, host_status);
}
+static bool ish_chk_host_rdy(struct ishtp_device *dev)
+{
+ uint32_t host_status = ish_reg_read(dev, IPC_REG_HOST_COMM);
+
+ return (host_status & IPC_HOSTCOMM_READY_BIT);
+}
+
+/**
+ * ish_set_host_ready() - reconfig ipc host registers
+ * @dev: ishtp device pointer
+ *
+ * Set host to ready state
+ * This API is called in some case:
+ * fw is still on, but ipc is powered down.
+ * such as OOB case.
+ *
+ * Return: 0 for success else error fault code
+ */
+void ish_set_host_ready(struct ishtp_device *dev)
+{
+ if (ish_chk_host_rdy(dev))
+ return;
+
+ ish_set_host_rdy(dev);
+ set_host_ready(dev);
+}
+
/**
* _ishtp_read_hdr() - Read message header
* @dev: ISHTP device pointer
diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
index c6d48a8648b7..06081cf9b85a 100644
--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
@@ -5,6 +5,7 @@
* Copyright (c) 2014-2016, Intel Corporation.
*/
+#include <linux/acpi.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
@@ -37,6 +38,7 @@ static const struct pci_device_id ish_pci_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, CMP_H_DEVICE_ID)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, EHL_Ax_DEVICE_ID)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, TGL_LP_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, TGL_H_DEVICE_ID)},
{0, }
};
MODULE_DEVICE_TABLE(pci, ish_pci_tbl);
@@ -111,6 +113,42 @@ static inline bool ish_should_leave_d0i3(struct pci_dev *pdev)
return !pm_resume_via_firmware() || pdev->device == CHV_DEVICE_ID;
}
+static int enable_gpe(struct device *dev)
+{
+#ifdef CONFIG_ACPI
+ acpi_status acpi_sts;
+ struct acpi_device *adev;
+ struct acpi_device_wakeup *wakeup;
+
+ adev = ACPI_COMPANION(dev);
+ if (!adev) {
+ dev_err(dev, "get acpi handle failed\n");
+ return -ENODEV;
+ }
+ wakeup = &adev->wakeup;
+
+ acpi_sts = acpi_enable_gpe(wakeup->gpe_device, wakeup->gpe_number);
+ if (ACPI_FAILURE(acpi_sts)) {
+ dev_err(dev, "enable ose_gpe failed\n");
+ return -EIO;
+ }
+
+ return 0;
+#else
+ return -ENODEV;
+#endif
+}
+
+static void enable_pme_wake(struct pci_dev *pdev)
+{
+ if ((pci_pme_capable(pdev, PCI_D0) ||
+ pci_pme_capable(pdev, PCI_D3hot) ||
+ pci_pme_capable(pdev, PCI_D3cold)) && !enable_gpe(&pdev->dev)) {
+ pci_pme_active(pdev, true);
+ dev_dbg(&pdev->dev, "ish ipc driver pme wake enabled\n");
+ }
+}
+
/**
* ish_probe() - PCI driver probe callback
* @pdev: pci device
@@ -179,6 +217,10 @@ static int ish_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
init_waitqueue_head(&ishtp->suspend_wait);
init_waitqueue_head(&ishtp->resume_wait);
+ /* Enable PME for EHL */
+ if (pdev->device == EHL_Ax_DEVICE_ID)
+ enable_pme_wake(pdev);
+
ret = ish_init(ishtp);
if (ret)
return ret;
@@ -218,11 +260,15 @@ static void __maybe_unused ish_resume_handler(struct work_struct *work)
{
struct pci_dev *pdev = to_pci_dev(ish_resume_device);
struct ishtp_device *dev = pci_get_drvdata(pdev);
+ uint32_t fwsts = dev->ops->get_fw_status(dev);
int ret;
- if (ish_should_leave_d0i3(pdev) && !dev->suspend_flag) {
+ if (ish_should_leave_d0i3(pdev) && !dev->suspend_flag
+ && IPC_IS_ISH_ILUP(fwsts)) {
disable_irq_wake(pdev->irq);
+ ish_set_host_ready(dev);
+
ishtp_send_resume(dev);
/* Waiting to get resume response */
@@ -317,6 +363,13 @@ static int __maybe_unused ish_resume(struct device *device)
struct pci_dev *pdev = to_pci_dev(device);
struct ishtp_device *dev = pci_get_drvdata(pdev);
+ /* add this to finish power flow for EHL */
+ if (dev->pdev->device == EHL_Ax_DEVICE_ID) {
+ pci_set_power_state(pdev, PCI_D0);
+ enable_pme_wake(pdev);
+ dev_dbg(dev->devc, "set power state to D0 for ehl\n");
+ }
+
ish_resume_device = device;
dev->resume_flag = 1;
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index e8acd235db2a..8328ef155c46 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -147,9 +147,9 @@ static int wacom_wac_pen_serial_enforce(struct hid_device *hdev,
}
if (flush)
- wacom_wac_queue_flush(hdev, &wacom_wac->pen_fifo);
+ wacom_wac_queue_flush(hdev, wacom_wac->pen_fifo);
else if (insert)
- wacom_wac_queue_insert(hdev, &wacom_wac->pen_fifo,
+ wacom_wac_queue_insert(hdev, wacom_wac->pen_fifo,
raw_data, report_size);
return insert && !flush;
@@ -1280,7 +1280,7 @@ static void wacom_devm_kfifo_release(struct device *dev, void *res)
static int wacom_devm_kfifo_alloc(struct wacom *wacom)
{
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
- struct kfifo_rec_ptr_2 *pen_fifo = &wacom_wac->pen_fifo;
+ struct kfifo_rec_ptr_2 *pen_fifo;
int error;
pen_fifo = devres_alloc(wacom_devm_kfifo_release,
@@ -1297,6 +1297,7 @@ static int wacom_devm_kfifo_alloc(struct wacom *wacom)
}
devres_add(&wacom->hdev->dev, pen_fifo);
+ wacom_wac->pen_fifo = pen_fifo;
return 0;
}
@@ -1824,7 +1825,7 @@ static ssize_t wacom_show_speed(struct device *dev,
struct hid_device *hdev = to_hid_device(dev);
struct wacom *wacom = hid_get_drvdata(hdev);
- return snprintf(buf, PAGE_SIZE, "%i\n", wacom->wacom_wac.bt_high_speed);
+ return sysfs_emit(buf, "%i\n", wacom->wacom_wac.bt_high_speed);
}
static ssize_t wacom_store_speed(struct device *dev,
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 1bd0eb71559c..44d715c12f6a 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -2600,7 +2600,12 @@ static void wacom_wac_finger_event(struct hid_device *hdev,
wacom_wac->is_invalid_bt_frame = !value;
return;
case HID_DG_CONTACTMAX:
- features->touch_max = value;
+ if (!features->touch_max) {
+ features->touch_max = value;
+ } else {
+ hid_warn(hdev, "%s: ignoring attempt to overwrite non-zero touch_max "
+ "%d -> %d\n", __func__, features->touch_max, value);
+ }
return;
}
diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
index da612b6e9c77..195910dd2154 100644
--- a/drivers/hid/wacom_wac.h
+++ b/drivers/hid/wacom_wac.h
@@ -342,7 +342,7 @@ struct wacom_wac {
struct input_dev *pen_input;
struct input_dev *touch_input;
struct input_dev *pad_input;
- struct kfifo_rec_ptr_2 pen_fifo;
+ struct kfifo_rec_ptr_2 *pen_fifo;
int pid;
int num_contacts_left;
u8 bt_features;
diff --git a/drivers/hsi/controllers/omap_ssi_core.c b/drivers/hsi/controllers/omap_ssi_core.c
index 7596dc164648..44a3f5660c10 100644
--- a/drivers/hsi/controllers/omap_ssi_core.c
+++ b/drivers/hsi/controllers/omap_ssi_core.c
@@ -424,7 +424,7 @@ static int ssi_hw_init(struct hsi_controller *ssi)
struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
int err;
- err = pm_runtime_get_sync(ssi->device.parent);
+ err = pm_runtime_resume_and_get(ssi->device.parent);
if (err < 0) {
dev_err(&ssi->device, "runtime PM failed %d\n", err);
return err;
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 6fb0c76bfbf8..0bd202de7960 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -618,7 +618,7 @@ static int __vmbus_open(struct vmbus_channel *newchannel,
goto error_clean_ring;
/* Create and init the channel open message */
- open_info = kmalloc(sizeof(*open_info) +
+ open_info = kzalloc(sizeof(*open_info) +
sizeof(struct vmbus_channel_open_channel),
GFP_KERNEL);
if (!open_info) {
@@ -745,7 +745,7 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
unsigned long flags;
int ret;
- info = kmalloc(sizeof(*info) +
+ info = kzalloc(sizeof(*info) +
sizeof(struct vmbus_channel_gpadl_teardown), GFP_KERNEL);
if (!info)
return -ENOMEM;
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 1d44bb635bb8..f0ed730e2e4e 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -31,101 +31,118 @@ const struct vmbus_device vmbus_devs[] = {
{ .dev_type = HV_IDE,
HV_IDE_GUID,
.perf_device = true,
+ .allowed_in_isolated = false,
},
/* SCSI */
{ .dev_type = HV_SCSI,
HV_SCSI_GUID,
.perf_device = true,
+ .allowed_in_isolated = true,
},
/* Fibre Channel */
{ .dev_type = HV_FC,
HV_SYNTHFC_GUID,
.perf_device = true,
+ .allowed_in_isolated = false,
},
/* Synthetic NIC */
{ .dev_type = HV_NIC,
HV_NIC_GUID,
.perf_device = true,
+ .allowed_in_isolated = true,
},
/* Network Direct */
{ .dev_type = HV_ND,
HV_ND_GUID,
.perf_device = true,
+ .allowed_in_isolated = false,
},
/* PCIE */
{ .dev_type = HV_PCIE,
HV_PCIE_GUID,
.perf_device = false,
+ .allowed_in_isolated = false,
},
/* Synthetic Frame Buffer */
{ .dev_type = HV_FB,
HV_SYNTHVID_GUID,
.perf_device = false,
+ .allowed_in_isolated = false,
},
/* Synthetic Keyboard */
{ .dev_type = HV_KBD,
HV_KBD_GUID,
.perf_device = false,
+ .allowed_in_isolated = false,
},
/* Synthetic MOUSE */
{ .dev_type = HV_MOUSE,
HV_MOUSE_GUID,
.perf_device = false,
+ .allowed_in_isolated = false,
},
/* KVP */
{ .dev_type = HV_KVP,
HV_KVP_GUID,
.perf_device = false,
+ .allowed_in_isolated = false,
},
/* Time Synch */
{ .dev_type = HV_TS,
HV_TS_GUID,
.perf_device = false,
+ .allowed_in_isolated = true,
},
/* Heartbeat */
{ .dev_type = HV_HB,
HV_HEART_BEAT_GUID,
.perf_device = false,
+ .allowed_in_isolated = true,
},
/* Shutdown */
{ .dev_type = HV_SHUTDOWN,
HV_SHUTDOWN_GUID,
.perf_device = false,
+ .allowed_in_isolated = true,
},
/* File copy */
{ .dev_type = HV_FCOPY,
HV_FCOPY_GUID,
.perf_device = false,
+ .allowed_in_isolated = false,
},
/* Backup */
{ .dev_type = HV_BACKUP,
HV_VSS_GUID,
.perf_device = false,
+ .allowed_in_isolated = false,
},
/* Dynamic Memory */
{ .dev_type = HV_DM,
HV_DM_GUID,
.perf_device = false,
+ .allowed_in_isolated = false,
},
/* Unknown GUID */
{ .dev_type = HV_UNKNOWN,
.perf_device = false,
+ .allowed_in_isolated = false,
},
};
@@ -190,6 +207,7 @@ static u16 hv_get_dev_type(const struct vmbus_channel *channel)
* vmbus_prep_negotiate_resp() - Create default response for Negotiate message
* @icmsghdrp: Pointer to msg header structure
* @buf: Raw buffer channel data
+ * @buflen: Length of the raw buffer channel data.
* @fw_version: The framework versions we can support.
* @fw_vercnt: The size of @fw_version.
* @srv_version: The service versions we can support.
@@ -202,8 +220,8 @@ static u16 hv_get_dev_type(const struct vmbus_channel *channel)
* Set up and fill in default negotiate response message.
* Mainly used by Hyper-V drivers.
*/
-bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp,
- u8 *buf, const int *fw_version, int fw_vercnt,
+bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf,
+ u32 buflen, const int *fw_version, int fw_vercnt,
const int *srv_version, int srv_vercnt,
int *nego_fw_version, int *nego_srv_version)
{
@@ -215,10 +233,14 @@ bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp,
bool found_match = false;
struct icmsg_negotiate *negop;
+ /* Check that there's enough space for icframe_vercnt, icmsg_vercnt */
+ if (buflen < ICMSG_HDR + offsetof(struct icmsg_negotiate, reserved)) {
+ pr_err_ratelimited("Invalid icmsg negotiate\n");
+ return false;
+ }
+
icmsghdrp->icmsgsize = 0x10;
- negop = (struct icmsg_negotiate *)&buf[
- sizeof(struct vmbuspipe_hdr) +
- sizeof(struct icmsg_hdr)];
+ negop = (struct icmsg_negotiate *)&buf[ICMSG_HDR];
icframe_major = negop->icframe_vercnt;
icframe_minor = 0;
@@ -226,6 +248,15 @@ bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp,
icmsg_major = negop->icmsg_vercnt;
icmsg_minor = 0;
+ /* Validate negop packet */
+ if (icframe_major > IC_VERSION_NEGOTIATION_MAX_VER_COUNT ||
+ icmsg_major > IC_VERSION_NEGOTIATION_MAX_VER_COUNT ||
+ ICMSG_NEGOTIATE_PKT_SIZE(icframe_major, icmsg_major) > buflen) {
+ pr_err_ratelimited("Invalid icmsg negotiate - icframe_major: %u, icmsg_major: %u\n",
+ icframe_major, icmsg_major);
+ goto fw_error;
+ }
+
/*
* Select the framework version number we will
* support.
@@ -889,6 +920,20 @@ find_primary_channel_by_offer(const struct vmbus_channel_offer_channel *offer)
return channel;
}
+static bool vmbus_is_valid_device(const guid_t *guid)
+{
+ u16 i;
+
+ if (!hv_is_isolation_supported())
+ return true;
+
+ for (i = 0; i < ARRAY_SIZE(vmbus_devs); i++) {
+ if (guid_equal(guid, &vmbus_devs[i].guid))
+ return vmbus_devs[i].allowed_in_isolated;
+ }
+ return false;
+}
+
/*
* vmbus_onoffer - Handler for channel offers from vmbus in parent partition.
*
@@ -903,6 +948,13 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
trace_vmbus_onoffer(offer);
+ if (!vmbus_is_valid_device(&offer->offer.if_type)) {
+ pr_err_ratelimited("Invalid offer %d from the host supporting isolation\n",
+ offer->child_relid);
+ atomic_dec(&vmbus_connection.offer_in_progress);
+ return;
+ }
+
oldchannel = find_primary_channel_by_offer(offer);
if (oldchannel != NULL) {
@@ -1049,6 +1101,18 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
mutex_lock(&vmbus_connection.channel_mutex);
channel = relid2channel(rescind->child_relid);
+ if (channel != NULL) {
+ /*
+ * Guarantee that no other instance of vmbus_onoffer_rescind()
+ * has got a reference to the channel object. Synchronize on
+ * &vmbus_connection.channel_mutex.
+ */
+ if (channel->rescind_ref) {
+ mutex_unlock(&vmbus_connection.channel_mutex);
+ return;
+ }
+ channel->rescind_ref = true;
+ }
mutex_unlock(&vmbus_connection.channel_mutex);
if (channel == NULL) {
@@ -1102,8 +1166,7 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
vmbus_device_unregister(channel->device_obj);
put_device(dev);
}
- }
- if (channel->primary_channel != NULL) {
+ } else if (channel->primary_channel != NULL) {
/*
* Sub-channel is being rescinded. Following is the channel
* close sequence when initiated from the driveri (refer to
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index 11170d9a2e1a..c83612cddb99 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -244,6 +244,13 @@ int vmbus_connect(void)
break;
}
+ if (hv_is_isolation_supported() && version < VERSION_WIN10_V5_2) {
+ pr_err("Invalid VMBus version %d.%d (expected >= %d.%d) from the host supporting isolation\n",
+ version >> 16, version & 0xFFFF, VERSION_WIN10_V5_2 >> 16, VERSION_WIN10_V5_2 & 0xFFFF);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
vmbus_proto_version = version;
pr_info("Vmbus version:%d.%d\n",
version >> 16, version & 0xFFFF);
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index 8c471823a5af..2f776d78e3c1 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -726,7 +726,7 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
nid = memory_add_physaddr_to_nid(PFN_PHYS(start_pfn));
ret = add_memory(nid, PFN_PHYS((start_pfn)),
- (HA_CHUNK << PAGE_SHIFT), MEMHP_MERGE_RESOURCE);
+ (HA_CHUNK << PAGE_SHIFT), MHP_MERGE_RESOURCE);
if (ret) {
pr_err("hot_add memory failed error is %d\n", ret);
diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c
index 5040d7e0cd9e..59ce85e00a02 100644
--- a/drivers/hv/hv_fcopy.c
+++ b/drivers/hv/hv_fcopy.c
@@ -235,15 +235,27 @@ void hv_fcopy_onchannelcallback(void *context)
if (fcopy_transaction.state > HVUTIL_READY)
return;
- vmbus_recvpacket(channel, recv_buffer, HV_HYP_PAGE_SIZE * 2, &recvlen,
- &requestid);
- if (recvlen <= 0)
+ if (vmbus_recvpacket(channel, recv_buffer, HV_HYP_PAGE_SIZE * 2, &recvlen, &requestid)) {
+ pr_err_ratelimited("Fcopy request received. Could not read into recv buf\n");
return;
+ }
+
+ if (!recvlen)
+ return;
+
+ /* Ensure recvlen is big enough to read header data */
+ if (recvlen < ICMSG_HDR) {
+ pr_err_ratelimited("Fcopy request received. Packet length too small: %d\n",
+ recvlen);
+ return;
+ }
icmsghdr = (struct icmsg_hdr *)&recv_buffer[
sizeof(struct vmbuspipe_hdr)];
+
if (icmsghdr->icmsgtype == ICMSGTYPE_NEGOTIATE) {
- if (vmbus_prep_negotiate_resp(icmsghdr, recv_buffer,
+ if (vmbus_prep_negotiate_resp(icmsghdr,
+ recv_buffer, recvlen,
fw_versions, FW_VER_COUNT,
fcopy_versions, FCOPY_VER_COUNT,
NULL, &fcopy_srv_version)) {
@@ -252,10 +264,14 @@ void hv_fcopy_onchannelcallback(void *context)
fcopy_srv_version >> 16,
fcopy_srv_version & 0xFFFF);
}
- } else {
- fcopy_msg = (struct hv_fcopy_hdr *)&recv_buffer[
- sizeof(struct vmbuspipe_hdr) +
- sizeof(struct icmsg_hdr)];
+ } else if (icmsghdr->icmsgtype == ICMSGTYPE_FCOPY) {
+ /* Ensure recvlen is big enough to contain hv_fcopy_hdr */
+ if (recvlen < ICMSG_HDR + sizeof(struct hv_fcopy_hdr)) {
+ pr_err_ratelimited("Invalid Fcopy hdr. Packet length too small: %u\n",
+ recvlen);
+ return;
+ }
+ fcopy_msg = (struct hv_fcopy_hdr *)&recv_buffer[ICMSG_HDR];
/*
* Stash away this global state for completing the
@@ -280,6 +296,10 @@ void hv_fcopy_onchannelcallback(void *context)
schedule_delayed_work(&fcopy_timeout_work,
HV_UTIL_TIMEOUT * HZ);
return;
+ } else {
+ pr_err_ratelimited("Fcopy request received. Invalid msg type: %d\n",
+ icmsghdr->icmsgtype);
+ return;
}
icmsghdr->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE;
vmbus_sendpacket(channel, recv_buffer, recvlen, requestid,
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index 754d35a25a1c..b49962d312ce 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -662,71 +662,87 @@ void hv_kvp_onchannelcallback(void *context)
if (kvp_transaction.state > HVUTIL_READY)
return;
- vmbus_recvpacket(channel, recv_buffer, HV_HYP_PAGE_SIZE * 4, &recvlen,
- &requestid);
-
- if (recvlen > 0) {
- icmsghdrp = (struct icmsg_hdr *)&recv_buffer[
- sizeof(struct vmbuspipe_hdr)];
-
- if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
- if (vmbus_prep_negotiate_resp(icmsghdrp,
- recv_buffer, fw_versions, FW_VER_COUNT,
- kvp_versions, KVP_VER_COUNT,
- NULL, &kvp_srv_version)) {
- pr_info("KVP IC version %d.%d\n",
- kvp_srv_version >> 16,
- kvp_srv_version & 0xFFFF);
- }
- } else {
- kvp_msg = (struct hv_kvp_msg *)&recv_buffer[
- sizeof(struct vmbuspipe_hdr) +
- sizeof(struct icmsg_hdr)];
+ if (vmbus_recvpacket(channel, recv_buffer, HV_HYP_PAGE_SIZE * 4, &recvlen, &requestid)) {
+ pr_err_ratelimited("KVP request received. Could not read into recv buf\n");
+ return;
+ }
- /*
- * Stash away this global state for completing the
- * transaction; note transactions are serialized.
- */
+ if (!recvlen)
+ return;
- kvp_transaction.recv_len = recvlen;
- kvp_transaction.recv_req_id = requestid;
- kvp_transaction.kvp_msg = kvp_msg;
+ /* Ensure recvlen is big enough to read header data */
+ if (recvlen < ICMSG_HDR) {
+ pr_err_ratelimited("KVP request received. Packet length too small: %d\n",
+ recvlen);
+ return;
+ }
- if (kvp_transaction.state < HVUTIL_READY) {
- /* Userspace is not registered yet */
- kvp_respond_to_host(NULL, HV_E_FAIL);
- return;
- }
- kvp_transaction.state = HVUTIL_HOSTMSG_RECEIVED;
+ icmsghdrp = (struct icmsg_hdr *)&recv_buffer[sizeof(struct vmbuspipe_hdr)];
+
+ if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
+ if (vmbus_prep_negotiate_resp(icmsghdrp,
+ recv_buffer, recvlen,
+ fw_versions, FW_VER_COUNT,
+ kvp_versions, KVP_VER_COUNT,
+ NULL, &kvp_srv_version)) {
+ pr_info("KVP IC version %d.%d\n",
+ kvp_srv_version >> 16,
+ kvp_srv_version & 0xFFFF);
+ }
+ } else if (icmsghdrp->icmsgtype == ICMSGTYPE_KVPEXCHANGE) {
+ /*
+ * recvlen is not checked against sizeof(struct kvp_msg) because kvp_msg contains
+ * a union of structs and the msg type received is not known. Code using this
+ * struct should provide validation when accessing its fields.
+ */
+ kvp_msg = (struct hv_kvp_msg *)&recv_buffer[ICMSG_HDR];
- /*
- * Get the information from the
- * user-mode component.
- * component. This transaction will be
- * completed when we get the value from
- * the user-mode component.
- * Set a timeout to deal with
- * user-mode not responding.
- */
- schedule_work(&kvp_sendkey_work);
- schedule_delayed_work(&kvp_timeout_work,
- HV_UTIL_TIMEOUT * HZ);
+ /*
+ * Stash away this global state for completing the
+ * transaction; note transactions are serialized.
+ */
- return;
+ kvp_transaction.recv_len = recvlen;
+ kvp_transaction.recv_req_id = requestid;
+ kvp_transaction.kvp_msg = kvp_msg;
+ if (kvp_transaction.state < HVUTIL_READY) {
+ /* Userspace is not registered yet */
+ kvp_respond_to_host(NULL, HV_E_FAIL);
+ return;
}
+ kvp_transaction.state = HVUTIL_HOSTMSG_RECEIVED;
- icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
- | ICMSGHDRFLAG_RESPONSE;
+ /*
+ * Get the information from the
+ * user-mode component.
+ * component. This transaction will be
+ * completed when we get the value from
+ * the user-mode component.
+ * Set a timeout to deal with
+ * user-mode not responding.
+ */
+ schedule_work(&kvp_sendkey_work);
+ schedule_delayed_work(&kvp_timeout_work,
+ HV_UTIL_TIMEOUT * HZ);
- vmbus_sendpacket(channel, recv_buffer,
- recvlen, requestid,
- VM_PKT_DATA_INBAND, 0);
+ return;
- host_negotiatied = NEGO_FINISHED;
- hv_poll_channel(kvp_transaction.recv_channel, kvp_poll_wrapper);
+ } else {
+ pr_err_ratelimited("KVP request received. Invalid msg type: %d\n",
+ icmsghdrp->icmsgtype);
+ return;
}
+ icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
+ | ICMSGHDRFLAG_RESPONSE;
+
+ vmbus_sendpacket(channel, recv_buffer,
+ recvlen, requestid,
+ VM_PKT_DATA_INBAND, 0);
+
+ host_negotiatied = NEGO_FINISHED;
+ hv_poll_channel(kvp_transaction.recv_channel, kvp_poll_wrapper);
}
static void kvp_on_reset(void)
diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c
index 783779e4cc1a..2267bd4c3472 100644
--- a/drivers/hv/hv_snapshot.c
+++ b/drivers/hv/hv_snapshot.c
@@ -298,49 +298,64 @@ void hv_vss_onchannelcallback(void *context)
if (vss_transaction.state > HVUTIL_READY)
return;
- vmbus_recvpacket(channel, recv_buffer, HV_HYP_PAGE_SIZE * 2, &recvlen,
- &requestid);
-
- if (recvlen > 0) {
- icmsghdrp = (struct icmsg_hdr *)&recv_buffer[
- sizeof(struct vmbuspipe_hdr)];
-
- if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
- if (vmbus_prep_negotiate_resp(icmsghdrp,
- recv_buffer, fw_versions, FW_VER_COUNT,
- vss_versions, VSS_VER_COUNT,
- NULL, &vss_srv_version)) {
-
- pr_info("VSS IC version %d.%d\n",
- vss_srv_version >> 16,
- vss_srv_version & 0xFFFF);
- }
- } else {
- vss_msg = (struct hv_vss_msg *)&recv_buffer[
- sizeof(struct vmbuspipe_hdr) +
- sizeof(struct icmsg_hdr)];
-
- /*
- * Stash away this global state for completing the
- * transaction; note transactions are serialized.
- */
-
- vss_transaction.recv_len = recvlen;
- vss_transaction.recv_req_id = requestid;
- vss_transaction.msg = (struct hv_vss_msg *)vss_msg;
-
- schedule_work(&vss_handle_request_work);
+ if (vmbus_recvpacket(channel, recv_buffer, HV_HYP_PAGE_SIZE * 2, &recvlen, &requestid)) {
+ pr_err_ratelimited("VSS request received. Could not read into recv buf\n");
+ return;
+ }
+
+ if (!recvlen)
+ return;
+
+ /* Ensure recvlen is big enough to read header data */
+ if (recvlen < ICMSG_HDR) {
+ pr_err_ratelimited("VSS request received. Packet length too small: %d\n",
+ recvlen);
+ return;
+ }
+
+ icmsghdrp = (struct icmsg_hdr *)&recv_buffer[sizeof(struct vmbuspipe_hdr)];
+
+ if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
+ if (vmbus_prep_negotiate_resp(icmsghdrp,
+ recv_buffer, recvlen,
+ fw_versions, FW_VER_COUNT,
+ vss_versions, VSS_VER_COUNT,
+ NULL, &vss_srv_version)) {
+
+ pr_info("VSS IC version %d.%d\n",
+ vss_srv_version >> 16,
+ vss_srv_version & 0xFFFF);
+ }
+ } else if (icmsghdrp->icmsgtype == ICMSGTYPE_VSS) {
+ /* Ensure recvlen is big enough to contain hv_vss_msg */
+ if (recvlen < ICMSG_HDR + sizeof(struct hv_vss_msg)) {
+ pr_err_ratelimited("Invalid VSS msg. Packet length too small: %u\n",
+ recvlen);
return;
}
+ vss_msg = (struct hv_vss_msg *)&recv_buffer[ICMSG_HDR];
+
+ /*
+ * Stash away this global state for completing the
+ * transaction; note transactions are serialized.
+ */
- icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
- | ICMSGHDRFLAG_RESPONSE;
+ vss_transaction.recv_len = recvlen;
+ vss_transaction.recv_req_id = requestid;
+ vss_transaction.msg = (struct hv_vss_msg *)vss_msg;
- vmbus_sendpacket(channel, recv_buffer,
- recvlen, requestid,
- VM_PKT_DATA_INBAND, 0);
+ schedule_work(&vss_handle_request_work);
+ return;
+ } else {
+ pr_err_ratelimited("VSS request received. Invalid msg type: %d\n",
+ icmsghdrp->icmsgtype);
+ return;
}
+ icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION |
+ ICMSGHDRFLAG_RESPONSE;
+ vmbus_sendpacket(channel, recv_buffer, recvlen, requestid,
+ VM_PKT_DATA_INBAND, 0);
}
static void vss_on_reset(void)
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index 05566ecdbe4b..e4aefeb330da 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -195,73 +195,88 @@ static void shutdown_onchannelcallback(void *context)
struct icmsg_hdr *icmsghdrp;
- vmbus_recvpacket(channel, shut_txf_buf,
- HV_HYP_PAGE_SIZE, &recvlen, &requestid);
+ if (vmbus_recvpacket(channel, shut_txf_buf, HV_HYP_PAGE_SIZE, &recvlen, &requestid)) {
+ pr_err_ratelimited("Shutdown request received. Could not read into shut txf buf\n");
+ return;
+ }
- if (recvlen > 0) {
- icmsghdrp = (struct icmsg_hdr *)&shut_txf_buf[
- sizeof(struct vmbuspipe_hdr)];
+ if (!recvlen)
+ return;
- if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
- if (vmbus_prep_negotiate_resp(icmsghdrp, shut_txf_buf,
- fw_versions, FW_VER_COUNT,
- sd_versions, SD_VER_COUNT,
- NULL, &sd_srv_version)) {
- pr_info("Shutdown IC version %d.%d\n",
- sd_srv_version >> 16,
- sd_srv_version & 0xFFFF);
- }
- } else {
- shutdown_msg =
- (struct shutdown_msg_data *)&shut_txf_buf[
- sizeof(struct vmbuspipe_hdr) +
- sizeof(struct icmsg_hdr)];
+ /* Ensure recvlen is big enough to read header data */
+ if (recvlen < ICMSG_HDR) {
+ pr_err_ratelimited("Shutdown request received. Packet length too small: %d\n",
+ recvlen);
+ return;
+ }
- /*
- * shutdown_msg->flags can be 0(shut down), 2(reboot),
- * or 4(hibernate). It may bitwise-OR 1, which means
- * performing the request by force. Linux always tries
- * to perform the request by force.
- */
- switch (shutdown_msg->flags) {
- case 0:
- case 1:
- icmsghdrp->status = HV_S_OK;
- work = &shutdown_work;
- pr_info("Shutdown request received -"
- " graceful shutdown initiated\n");
- break;
- case 2:
- case 3:
- icmsghdrp->status = HV_S_OK;
- work = &restart_work;
- pr_info("Restart request received -"
- " graceful restart initiated\n");
- break;
- case 4:
- case 5:
- pr_info("Hibernation request received\n");
- icmsghdrp->status = hibernation_supported ?
- HV_S_OK : HV_E_FAIL;
- if (hibernation_supported)
- work = &hibernate_context.work;
- break;
- default:
- icmsghdrp->status = HV_E_FAIL;
- pr_info("Shutdown request received -"
- " Invalid request\n");
- break;
- }
+ icmsghdrp = (struct icmsg_hdr *)&shut_txf_buf[sizeof(struct vmbuspipe_hdr)];
+
+ if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
+ if (vmbus_prep_negotiate_resp(icmsghdrp,
+ shut_txf_buf, recvlen,
+ fw_versions, FW_VER_COUNT,
+ sd_versions, SD_VER_COUNT,
+ NULL, &sd_srv_version)) {
+ pr_info("Shutdown IC version %d.%d\n",
+ sd_srv_version >> 16,
+ sd_srv_version & 0xFFFF);
+ }
+ } else if (icmsghdrp->icmsgtype == ICMSGTYPE_SHUTDOWN) {
+ /* Ensure recvlen is big enough to contain shutdown_msg_data struct */
+ if (recvlen < ICMSG_HDR + sizeof(struct shutdown_msg_data)) {
+ pr_err_ratelimited("Invalid shutdown msg data. Packet length too small: %u\n",
+ recvlen);
+ return;
}
- icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
- | ICMSGHDRFLAG_RESPONSE;
-
- vmbus_sendpacket(channel, shut_txf_buf,
- recvlen, requestid,
- VM_PKT_DATA_INBAND, 0);
+ shutdown_msg = (struct shutdown_msg_data *)&shut_txf_buf[ICMSG_HDR];
+
+ /*
+ * shutdown_msg->flags can be 0(shut down), 2(reboot),
+ * or 4(hibernate). It may bitwise-OR 1, which means
+ * performing the request by force. Linux always tries
+ * to perform the request by force.
+ */
+ switch (shutdown_msg->flags) {
+ case 0:
+ case 1:
+ icmsghdrp->status = HV_S_OK;
+ work = &shutdown_work;
+ pr_info("Shutdown request received - graceful shutdown initiated\n");
+ break;
+ case 2:
+ case 3:
+ icmsghdrp->status = HV_S_OK;
+ work = &restart_work;
+ pr_info("Restart request received - graceful restart initiated\n");
+ break;
+ case 4:
+ case 5:
+ pr_info("Hibernation request received\n");
+ icmsghdrp->status = hibernation_supported ?
+ HV_S_OK : HV_E_FAIL;
+ if (hibernation_supported)
+ work = &hibernate_context.work;
+ break;
+ default:
+ icmsghdrp->status = HV_E_FAIL;
+ pr_info("Shutdown request received - Invalid request\n");
+ break;
+ }
+ } else {
+ icmsghdrp->status = HV_E_FAIL;
+ pr_err_ratelimited("Shutdown request received. Invalid msg type: %d\n",
+ icmsghdrp->icmsgtype);
}
+ icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
+ | ICMSGHDRFLAG_RESPONSE;
+
+ vmbus_sendpacket(channel, shut_txf_buf,
+ recvlen, requestid,
+ VM_PKT_DATA_INBAND, 0);
+
if (work)
schedule_work(work);
}
@@ -396,19 +411,27 @@ static void timesync_onchannelcallback(void *context)
HV_HYP_PAGE_SIZE, &recvlen,
&requestid);
if (ret) {
- pr_warn_once("TimeSync IC pkt recv failed (Err: %d)\n",
- ret);
+ pr_err_ratelimited("TimeSync IC pkt recv failed (Err: %d)\n",
+ ret);
break;
}
if (!recvlen)
break;
+ /* Ensure recvlen is big enough to read header data */
+ if (recvlen < ICMSG_HDR) {
+ pr_err_ratelimited("Timesync request received. Packet length too small: %d\n",
+ recvlen);
+ break;
+ }
+
icmsghdrp = (struct icmsg_hdr *)&time_txf_buf[
sizeof(struct vmbuspipe_hdr)];
if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
- if (vmbus_prep_negotiate_resp(icmsghdrp, time_txf_buf,
+ if (vmbus_prep_negotiate_resp(icmsghdrp,
+ time_txf_buf, recvlen,
fw_versions, FW_VER_COUNT,
ts_versions, TS_VER_COUNT,
NULL, &ts_srv_version)) {
@@ -416,33 +439,44 @@ static void timesync_onchannelcallback(void *context)
ts_srv_version >> 16,
ts_srv_version & 0xFFFF);
}
- } else {
+ } else if (icmsghdrp->icmsgtype == ICMSGTYPE_TIMESYNC) {
if (ts_srv_version > TS_VERSION_3) {
- refdata = (struct ictimesync_ref_data *)
- &time_txf_buf[
- sizeof(struct vmbuspipe_hdr) +
- sizeof(struct icmsg_hdr)];
+ /* Ensure recvlen is big enough to read ictimesync_ref_data */
+ if (recvlen < ICMSG_HDR + sizeof(struct ictimesync_ref_data)) {
+ pr_err_ratelimited("Invalid ictimesync ref data. Length too small: %u\n",
+ recvlen);
+ break;
+ }
+ refdata = (struct ictimesync_ref_data *)&time_txf_buf[ICMSG_HDR];
adj_guesttime(refdata->parenttime,
refdata->vmreferencetime,
refdata->flags);
} else {
- timedatap = (struct ictimesync_data *)
- &time_txf_buf[
- sizeof(struct vmbuspipe_hdr) +
- sizeof(struct icmsg_hdr)];
+ /* Ensure recvlen is big enough to read ictimesync_data */
+ if (recvlen < ICMSG_HDR + sizeof(struct ictimesync_data)) {
+ pr_err_ratelimited("Invalid ictimesync data. Length too small: %u\n",
+ recvlen);
+ break;
+ }
+ timedatap = (struct ictimesync_data *)&time_txf_buf[ICMSG_HDR];
+
adj_guesttime(timedatap->parenttime,
hv_read_reference_counter(),
timedatap->flags);
}
+ } else {
+ icmsghdrp->status = HV_E_FAIL;
+ pr_err_ratelimited("Timesync request received. Invalid msg type: %d\n",
+ icmsghdrp->icmsgtype);
}
icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
| ICMSGHDRFLAG_RESPONSE;
vmbus_sendpacket(channel, time_txf_buf,
- recvlen, requestid,
- VM_PKT_DATA_INBAND, 0);
+ recvlen, requestid,
+ VM_PKT_DATA_INBAND, 0);
}
}
@@ -462,18 +496,28 @@ static void heartbeat_onchannelcallback(void *context)
while (1) {
- vmbus_recvpacket(channel, hbeat_txf_buf,
- HV_HYP_PAGE_SIZE, &recvlen, &requestid);
+ if (vmbus_recvpacket(channel, hbeat_txf_buf, HV_HYP_PAGE_SIZE,
+ &recvlen, &requestid)) {
+ pr_err_ratelimited("Heartbeat request received. Could not read into hbeat txf buf\n");
+ return;
+ }
if (!recvlen)
break;
+ /* Ensure recvlen is big enough to read header data */
+ if (recvlen < ICMSG_HDR) {
+ pr_err_ratelimited("Heartbeat request received. Packet length too small: %d\n",
+ recvlen);
+ break;
+ }
+
icmsghdrp = (struct icmsg_hdr *)&hbeat_txf_buf[
sizeof(struct vmbuspipe_hdr)];
if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
if (vmbus_prep_negotiate_resp(icmsghdrp,
- hbeat_txf_buf,
+ hbeat_txf_buf, recvlen,
fw_versions, FW_VER_COUNT,
hb_versions, HB_VER_COUNT,
NULL, &hb_srv_version)) {
@@ -482,21 +526,31 @@ static void heartbeat_onchannelcallback(void *context)
hb_srv_version >> 16,
hb_srv_version & 0xFFFF);
}
- } else {
- heartbeat_msg =
- (struct heartbeat_msg_data *)&hbeat_txf_buf[
- sizeof(struct vmbuspipe_hdr) +
- sizeof(struct icmsg_hdr)];
+ } else if (icmsghdrp->icmsgtype == ICMSGTYPE_HEARTBEAT) {
+ /*
+ * Ensure recvlen is big enough to read seq_num. Reserved area is not
+ * included in the check as the host may not fill it up entirely
+ */
+ if (recvlen < ICMSG_HDR + sizeof(u64)) {
+ pr_err_ratelimited("Invalid heartbeat msg data. Length too small: %u\n",
+ recvlen);
+ break;
+ }
+ heartbeat_msg = (struct heartbeat_msg_data *)&hbeat_txf_buf[ICMSG_HDR];
heartbeat_msg->seq_num += 1;
+ } else {
+ icmsghdrp->status = HV_E_FAIL;
+ pr_err_ratelimited("Heartbeat request received. Invalid msg type: %d\n",
+ icmsghdrp->icmsgtype);
}
icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
| ICMSGHDRFLAG_RESPONSE;
vmbus_sendpacket(channel, hbeat_txf_buf,
- recvlen, requestid,
- VM_PKT_DATA_INBAND, 0);
+ recvlen, requestid,
+ VM_PKT_DATA_INBAND, 0);
}
}
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index d491fdcee61f..10dce9f91216 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -678,6 +678,23 @@ static const struct attribute_group vmbus_dev_group = {
};
__ATTRIBUTE_GROUPS(vmbus_dev);
+/* Set up the attribute for /sys/bus/vmbus/hibernation */
+static ssize_t hibernation_show(struct bus_type *bus, char *buf)
+{
+ return sprintf(buf, "%d\n", !!hv_is_hibernation_supported());
+}
+
+static BUS_ATTR_RO(hibernation);
+
+static struct attribute *vmbus_bus_attrs[] = {
+ &bus_attr_hibernation.attr,
+ NULL,
+};
+static const struct attribute_group vmbus_bus_group = {
+ .attrs = vmbus_bus_attrs,
+};
+__ATTRIBUTE_GROUPS(vmbus_bus);
+
/*
* vmbus_uevent - add uevent for our device
*
@@ -1024,6 +1041,7 @@ static struct bus_type hv_bus = {
.uevent = vmbus_uevent,
.dev_groups = vmbus_dev_groups,
.drv_groups = vmbus_drv_groups,
+ .bus_groups = vmbus_bus_groups,
.pm = &vmbus_pm,
};
@@ -1054,12 +1072,14 @@ void vmbus_on_msg_dpc(unsigned long data)
{
struct hv_per_cpu_context *hv_cpu = (void *)data;
void *page_addr = hv_cpu->synic_message_page;
- struct hv_message *msg = (struct hv_message *)page_addr +
+ struct hv_message msg_copy, *msg = (struct hv_message *)page_addr +
VMBUS_MESSAGE_SINT;
struct vmbus_channel_message_header *hdr;
+ enum vmbus_channel_message_type msgtype;
const struct vmbus_channel_message_table_entry *entry;
struct onmessage_work_context *ctx;
- u32 message_type = msg->header.message_type;
+ __u8 payload_size;
+ u32 message_type;
/*
* 'enum vmbus_channel_message_type' is supposed to always be 'u32' as
@@ -1068,45 +1088,52 @@ void vmbus_on_msg_dpc(unsigned long data)
*/
BUILD_BUG_ON(sizeof(enum vmbus_channel_message_type) != sizeof(u32));
+ /*
+ * Since the message is in memory shared with the host, an erroneous or
+ * malicious Hyper-V could modify the message while vmbus_on_msg_dpc()
+ * or individual message handlers are executing; to prevent this, copy
+ * the message into private memory.
+ */
+ memcpy(&msg_copy, msg, sizeof(struct hv_message));
+
+ message_type = msg_copy.header.message_type;
if (message_type == HVMSG_NONE)
/* no msg */
return;
- hdr = (struct vmbus_channel_message_header *)msg->u.payload;
+ hdr = (struct vmbus_channel_message_header *)msg_copy.u.payload;
+ msgtype = hdr->msgtype;
trace_vmbus_on_msg_dpc(hdr);
- if (hdr->msgtype >= CHANNELMSG_COUNT) {
- WARN_ONCE(1, "unknown msgtype=%d\n", hdr->msgtype);
+ if (msgtype >= CHANNELMSG_COUNT) {
+ WARN_ONCE(1, "unknown msgtype=%d\n", msgtype);
goto msg_handled;
}
- if (msg->header.payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT) {
- WARN_ONCE(1, "payload size is too large (%d)\n",
- msg->header.payload_size);
+ payload_size = msg_copy.header.payload_size;
+ if (payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT) {
+ WARN_ONCE(1, "payload size is too large (%d)\n", payload_size);
goto msg_handled;
}
- entry = &channel_message_table[hdr->msgtype];
+ entry = &channel_message_table[msgtype];
if (!entry->message_handler)
goto msg_handled;
- if (msg->header.payload_size < entry->min_payload_len) {
- WARN_ONCE(1, "message too short: msgtype=%d len=%d\n",
- hdr->msgtype, msg->header.payload_size);
+ if (payload_size < entry->min_payload_len) {
+ WARN_ONCE(1, "message too short: msgtype=%d len=%d\n", msgtype, payload_size);
goto msg_handled;
}
if (entry->handler_type == VMHT_BLOCKING) {
- ctx = kmalloc(sizeof(*ctx) + msg->header.payload_size,
- GFP_ATOMIC);
+ ctx = kmalloc(sizeof(*ctx) + payload_size, GFP_ATOMIC);
if (ctx == NULL)
return;
INIT_WORK(&ctx->work, vmbus_onmessage_work);
- memcpy(&ctx->msg, msg, sizeof(msg->header) +
- msg->header.payload_size);
+ memcpy(&ctx->msg, &msg_copy, sizeof(msg->header) + payload_size);
/*
* The host can generate a rescind message while we
@@ -1115,7 +1142,7 @@ void vmbus_on_msg_dpc(unsigned long data)
* by offer_in_progress and by channel_mutex. See also the
* inline comments in vmbus_onoffer_rescind().
*/
- switch (hdr->msgtype) {
+ switch (msgtype) {
case CHANNELMSG_RESCIND_CHANNELOFFER:
/*
* If we are handling the rescind message;
@@ -2618,6 +2645,9 @@ static int __init hv_acpi_init(void)
if (!hv_is_hyperv_initialized())
return -ENODEV;
+ if (hv_root_partition)
+ return 0;
+
init_completion(&probe_event);
/*
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 1ecf697d8d99..54f04e61fb83 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -38,19 +38,6 @@ config HWMON_DEBUG_CHIP
comment "Native drivers"
-config SENSORS_AB8500
- tristate "AB8500 thermal monitoring"
- depends on AB8500_GPADC && AB8500_BM && (IIO = y)
- default n
- help
- If you say yes here you get support for the thermal sensor part
- of the AB8500 chip. The driver includes thermal management for
- AB8500 die and two GPADC channels. The GPADC channel are preferably
- used to access sensors outside the AB8500 chip.
-
- This driver can also be built as a module. If so, the module
- will be called abx500-temp.
-
config SENSORS_ABITUGURU
tristate "Abit uGuru (rev 1 & 2)"
depends on X86 && DMI
@@ -257,6 +244,16 @@ config SENSORS_ADT7475
This driver can also be built as a module. If so, the module
will be called adt7475.
+config SENSORS_AHT10
+ tristate "Aosong AHT10"
+ depends on I2C
+ help
+ If you say yes here, you get support for the Aosong AHT10
+ temperature and humidity sensors
+
+ This driver can also be built as a module. If so, the module
+ will be called aht10.
+
config SENSORS_AS370
tristate "Synaptics AS370 SoC hardware monitoring driver"
help
@@ -1136,6 +1133,17 @@ config SENSORS_TC654
This driver can also be built as a module. If so, the module
will be called tc654.
+config SENSORS_TPS23861
+ tristate "Texas Instruments TPS23861 PoE PSE"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ If you say yes here you get support for Texas Instruments
+ TPS23861 802.3at PoE PSE chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called tps23861.
+
config SENSORS_MENF21BMC_HWMON
tristate "MEN 14F021P00 BMC Hardware Monitoring"
depends on MFD_MENF21BMC
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 09a86c5e1d29..fe38e8a5c979 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -21,7 +21,6 @@ obj-$(CONFIG_SENSORS_W83795) += w83795.o
obj-$(CONFIG_SENSORS_W83781D) += w83781d.o
obj-$(CONFIG_SENSORS_W83791D) += w83791d.o
-obj-$(CONFIG_SENSORS_AB8500) += abx500.o ab8500.o
obj-$(CONFIG_SENSORS_ABITUGURU) += abituguru.o
obj-$(CONFIG_SENSORS_ABITUGURU3)+= abituguru3.o
obj-$(CONFIG_SENSORS_AD7314) += ad7314.o
@@ -45,6 +44,7 @@ obj-$(CONFIG_SENSORS_ADT7411) += adt7411.o
obj-$(CONFIG_SENSORS_ADT7462) += adt7462.o
obj-$(CONFIG_SENSORS_ADT7470) += adt7470.o
obj-$(CONFIG_SENSORS_ADT7475) += adt7475.o
+obj-$(CONFIG_SENSORS_AHT10) += aht10.o
obj-$(CONFIG_SENSORS_AMD_ENERGY) += amd_energy.o
obj-$(CONFIG_SENSORS_APPLESMC) += applesmc.o
obj-$(CONFIG_SENSORS_ARM_SCMI) += scmi-hwmon.o
@@ -144,6 +144,7 @@ obj-$(CONFIG_SENSORS_MAX31790) += max31790.o
obj-$(CONFIG_SENSORS_MC13783_ADC)+= mc13783-adc.o
obj-$(CONFIG_SENSORS_MCP3021) += mcp3021.o
obj-$(CONFIG_SENSORS_TC654) += tc654.o
+obj-$(CONFIG_SENSORS_TPS23861) += tps23861.o
obj-$(CONFIG_SENSORS_MLXREG_FAN) += mlxreg-fan.o
obj-$(CONFIG_SENSORS_MENF21BMC_HWMON) += menf21bmc_hwmon.o
obj-$(CONFIG_SENSORS_MR75203) += mr75203.o
diff --git a/drivers/hwmon/ab8500.c b/drivers/hwmon/ab8500.c
deleted file mode 100644
index 53f3379d799d..000000000000
--- a/drivers/hwmon/ab8500.c
+++ /dev/null
@@ -1,224 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) ST-Ericsson 2010 - 2013
- * Author: Martin Persson <martin.persson@stericsson.com>
- * Hongbo Zhang <hongbo.zhang@linaro.org>
- *
- * When the AB8500 thermal warning temperature is reached (threshold cannot
- * be changed by SW), an interrupt is set, and if no further action is taken
- * within a certain time frame, kernel_power_off will be called.
- *
- * When AB8500 thermal shutdown temperature is reached a hardware shutdown of
- * the AB8500 will occur.
- */
-
-#include <linux/err.h>
-#include <linux/hwmon.h>
-#include <linux/hwmon-sysfs.h>
-#include <linux/mfd/abx500.h>
-#include <linux/mfd/abx500/ab8500-bm.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/power/ab8500.h>
-#include <linux/reboot.h>
-#include <linux/slab.h>
-#include <linux/sysfs.h>
-#include <linux/iio/consumer.h>
-#include "abx500.h"
-
-#define DEFAULT_POWER_OFF_DELAY (HZ * 10)
-#define THERMAL_VCC 1800
-#define PULL_UP_RESISTOR 47000
-
-#define AB8500_SENSOR_AUX1 0
-#define AB8500_SENSOR_AUX2 1
-#define AB8500_SENSOR_BTEMP_BALL 2
-#define AB8500_SENSOR_BAT_CTRL 3
-#define NUM_MONITORED_SENSORS 4
-
-struct ab8500_gpadc_cfg {
- const struct abx500_res_to_temp *temp_tbl;
- int tbl_sz;
- int vcc;
- int r_up;
-};
-
-struct ab8500_temp {
- struct iio_channel *aux1;
- struct iio_channel *aux2;
- struct ab8500_btemp *btemp;
- struct delayed_work power_off_work;
- struct ab8500_gpadc_cfg cfg;
- struct abx500_temp *abx500_data;
-};
-
-/*
- * The hardware connection is like this:
- * VCC----[ R_up ]-----[ NTC ]----GND
- * where R_up is pull-up resistance, and GPADC measures voltage on NTC.
- * and res_to_temp table is strictly sorted by falling resistance values.
- */
-static int ab8500_voltage_to_temp(struct ab8500_gpadc_cfg *cfg,
- int v_ntc, int *temp)
-{
- int r_ntc, i = 0, tbl_sz = cfg->tbl_sz;
- const struct abx500_res_to_temp *tbl = cfg->temp_tbl;
-
- if (cfg->vcc < 0 || v_ntc >= cfg->vcc)
- return -EINVAL;
-
- r_ntc = v_ntc * cfg->r_up / (cfg->vcc - v_ntc);
- if (r_ntc > tbl[0].resist || r_ntc < tbl[tbl_sz - 1].resist)
- return -EINVAL;
-
- while (!(r_ntc <= tbl[i].resist && r_ntc > tbl[i + 1].resist) &&
- i < tbl_sz - 2)
- i++;
-
- /* return milli-Celsius */
- *temp = tbl[i].temp * 1000 + ((tbl[i + 1].temp - tbl[i].temp) * 1000 *
- (r_ntc - tbl[i].resist)) / (tbl[i + 1].resist - tbl[i].resist);
-
- return 0;
-}
-
-static int ab8500_read_sensor(struct abx500_temp *data, u8 sensor, int *temp)
-{
- int voltage, ret;
- struct ab8500_temp *ab8500_data = data->plat_data;
-
- if (sensor == AB8500_SENSOR_BTEMP_BALL) {
- *temp = ab8500_btemp_get_temp(ab8500_data->btemp);
- } else if (sensor == AB8500_SENSOR_BAT_CTRL) {
- *temp = ab8500_btemp_get_batctrl_temp(ab8500_data->btemp);
- } else if (sensor == AB8500_SENSOR_AUX1) {
- ret = iio_read_channel_processed(ab8500_data->aux1, &voltage);
- if (ret < 0)
- return ret;
- ret = ab8500_voltage_to_temp(&ab8500_data->cfg, voltage, temp);
- if (ret < 0)
- return ret;
- } else if (sensor == AB8500_SENSOR_AUX2) {
- ret = iio_read_channel_processed(ab8500_data->aux2, &voltage);
- if (ret < 0)
- return ret;
- ret = ab8500_voltage_to_temp(&ab8500_data->cfg, voltage, temp);
- if (ret < 0)
- return ret;
- }
-
- return 0;
-}
-
-static void ab8500_thermal_power_off(struct work_struct *work)
-{
- struct ab8500_temp *ab8500_data = container_of(work,
- struct ab8500_temp, power_off_work.work);
- struct abx500_temp *abx500_data = ab8500_data->abx500_data;
-
- dev_warn(&abx500_data->pdev->dev, "Power off due to critical temp\n");
-
- kernel_power_off();
-}
-
-static ssize_t ab8500_show_name(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- return sprintf(buf, "ab8500\n");
-}
-
-static ssize_t ab8500_show_label(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- char *label;
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- int index = attr->index;
-
- switch (index) {
- case 1:
- label = "ext_adc1";
- break;
- case 2:
- label = "ext_adc2";
- break;
- case 3:
- label = "bat_temp";
- break;
- case 4:
- label = "bat_ctrl";
- break;
- default:
- return -EINVAL;
- }
-
- return sprintf(buf, "%s\n", label);
-}
-
-static int ab8500_temp_irq_handler(int irq, struct abx500_temp *data)
-{
- struct ab8500_temp *ab8500_data = data->plat_data;
-
- dev_warn(&data->pdev->dev, "Power off in %d s\n",
- DEFAULT_POWER_OFF_DELAY / HZ);
-
- schedule_delayed_work(&ab8500_data->power_off_work,
- DEFAULT_POWER_OFF_DELAY);
- return 0;
-}
-
-int abx500_hwmon_init(struct abx500_temp *data)
-{
- struct ab8500_temp *ab8500_data;
-
- ab8500_data = devm_kzalloc(&data->pdev->dev, sizeof(*ab8500_data),
- GFP_KERNEL);
- if (!ab8500_data)
- return -ENOMEM;
-
- ab8500_data->btemp = ab8500_btemp_get();
- if (IS_ERR(ab8500_data->btemp))
- return PTR_ERR(ab8500_data->btemp);
-
- INIT_DELAYED_WORK(&ab8500_data->power_off_work,
- ab8500_thermal_power_off);
-
- ab8500_data->cfg.vcc = THERMAL_VCC;
- ab8500_data->cfg.r_up = PULL_UP_RESISTOR;
- ab8500_data->cfg.temp_tbl = ab8500_temp_tbl_a_thermistor;
- ab8500_data->cfg.tbl_sz = ab8500_temp_tbl_a_size;
-
- data->plat_data = ab8500_data;
- ab8500_data->aux1 = devm_iio_channel_get(&data->pdev->dev, "aux1");
- if (IS_ERR(ab8500_data->aux1)) {
- if (PTR_ERR(ab8500_data->aux1) == -ENODEV)
- return -EPROBE_DEFER;
- dev_err(&data->pdev->dev, "failed to get AUX1 ADC channel\n");
- return PTR_ERR(ab8500_data->aux1);
- }
- ab8500_data->aux2 = devm_iio_channel_get(&data->pdev->dev, "aux2");
- if (IS_ERR(ab8500_data->aux2)) {
- if (PTR_ERR(ab8500_data->aux2) == -ENODEV)
- return -EPROBE_DEFER;
- dev_err(&data->pdev->dev, "failed to get AUX2 ADC channel\n");
- return PTR_ERR(ab8500_data->aux2);
- }
-
- data->gpadc_addr[0] = AB8500_SENSOR_AUX1;
- data->gpadc_addr[1] = AB8500_SENSOR_AUX2;
- data->gpadc_addr[2] = AB8500_SENSOR_BTEMP_BALL;
- data->gpadc_addr[3] = AB8500_SENSOR_BAT_CTRL;
- data->monitored_sensors = NUM_MONITORED_SENSORS;
-
- data->ops.read_sensor = ab8500_read_sensor;
- data->ops.irq_handler = ab8500_temp_irq_handler;
- data->ops.show_name = ab8500_show_name;
- data->ops.show_label = ab8500_show_label;
- data->ops.is_visible = NULL;
-
- return 0;
-}
-EXPORT_SYMBOL(abx500_hwmon_init);
-
-MODULE_AUTHOR("Hongbo Zhang <hongbo.zhang@linaro.org>");
-MODULE_DESCRIPTION("AB8500 temperature driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/abx500.c b/drivers/hwmon/abx500.c
deleted file mode 100644
index 4b9648819836..000000000000
--- a/drivers/hwmon/abx500.c
+++ /dev/null
@@ -1,487 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) ST-Ericsson 2010 - 2013
- * Author: Martin Persson <martin.persson@stericsson.com>
- * Hongbo Zhang <hongbo.zhang@linaro.org>
- *
- * ABX500 does not provide auto ADC, so to monitor the required temperatures,
- * a periodic work is used. It is more important to not wake up the CPU than
- * to perform this job, hence the use of a deferred delay.
- *
- * A deferred delay for thermal monitor is considered safe because:
- * If the chip gets too hot during a sleep state it's most likely due to
- * external factors, such as the surrounding temperature. I.e. no SW decisions
- * will make any difference.
- */
-
-#include <linux/err.h>
-#include <linux/hwmon.h>
-#include <linux/hwmon-sysfs.h>
-#include <linux/interrupt.h>
-#include <linux/jiffies.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-#include <linux/pm.h>
-#include <linux/slab.h>
-#include <linux/sysfs.h>
-#include <linux/workqueue.h>
-#include "abx500.h"
-
-#define DEFAULT_MONITOR_DELAY HZ
-#define DEFAULT_MAX_TEMP 130
-
-static inline void schedule_monitor(struct abx500_temp *data)
-{
- data->work_active = true;
- schedule_delayed_work(&data->work, DEFAULT_MONITOR_DELAY);
-}
-
-static void threshold_updated(struct abx500_temp *data)
-{
- int i;
- for (i = 0; i < data->monitored_sensors; i++)
- if (data->max[i] != 0 || data->min[i] != 0) {
- schedule_monitor(data);
- return;
- }
-
- dev_dbg(&data->pdev->dev, "No active thresholds.\n");
- cancel_delayed_work_sync(&data->work);
- data->work_active = false;
-}
-
-static void gpadc_monitor(struct work_struct *work)
-{
- int temp, i, ret;
- char alarm_node[30];
- bool updated_min_alarm, updated_max_alarm;
- struct abx500_temp *data;
-
- data = container_of(work, struct abx500_temp, work.work);
- mutex_lock(&data->lock);
-
- for (i = 0; i < data->monitored_sensors; i++) {
- /* Thresholds are considered inactive if set to 0 */
- if (data->max[i] == 0 && data->min[i] == 0)
- continue;
-
- if (data->max[i] < data->min[i])
- continue;
-
- ret = data->ops.read_sensor(data, data->gpadc_addr[i], &temp);
- if (ret < 0) {
- dev_err(&data->pdev->dev, "GPADC read failed\n");
- continue;
- }
-
- updated_min_alarm = false;
- updated_max_alarm = false;
-
- if (data->min[i] != 0) {
- if (temp < data->min[i]) {
- if (data->min_alarm[i] == false) {
- data->min_alarm[i] = true;
- updated_min_alarm = true;
- }
- } else {
- if (data->min_alarm[i] == true) {
- data->min_alarm[i] = false;
- updated_min_alarm = true;
- }
- }
- }
- if (data->max[i] != 0) {
- if (temp > data->max[i]) {
- if (data->max_alarm[i] == false) {
- data->max_alarm[i] = true;
- updated_max_alarm = true;
- }
- } else if (temp < data->max[i] - data->max_hyst[i]) {
- if (data->max_alarm[i] == true) {
- data->max_alarm[i] = false;
- updated_max_alarm = true;
- }
- }
- }
-
- if (updated_min_alarm) {
- ret = sprintf(alarm_node, "temp%d_min_alarm", i + 1);
- sysfs_notify(&data->pdev->dev.kobj, NULL, alarm_node);
- }
- if (updated_max_alarm) {
- ret = sprintf(alarm_node, "temp%d_max_alarm", i + 1);
- sysfs_notify(&data->pdev->dev.kobj, NULL, alarm_node);
- }
- }
-
- schedule_monitor(data);
- mutex_unlock(&data->lock);
-}
-
-/* HWMON sysfs interfaces */
-static ssize_t name_show(struct device *dev, struct device_attribute *devattr,
- char *buf)
-{
- struct abx500_temp *data = dev_get_drvdata(dev);
- /* Show chip name */
- return data->ops.show_name(dev, devattr, buf);
-}
-
-static ssize_t label_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- struct abx500_temp *data = dev_get_drvdata(dev);
- /* Show each sensor label */
- return data->ops.show_label(dev, devattr, buf);
-}
-
-static ssize_t input_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- int ret, temp;
- struct abx500_temp *data = dev_get_drvdata(dev);
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- u8 gpadc_addr = data->gpadc_addr[attr->index];
-
- ret = data->ops.read_sensor(data, gpadc_addr, &temp);
- if (ret < 0)
- return ret;
-
- return sprintf(buf, "%d\n", temp);
-}
-
-/* Set functions (RW nodes) */
-static ssize_t min_store(struct device *dev, struct device_attribute *devattr,
- const char *buf, size_t count)
-{
- unsigned long val;
- struct abx500_temp *data = dev_get_drvdata(dev);
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- int res = kstrtol(buf, 10, &val);
- if (res < 0)
- return res;
-
- val = clamp_val(val, 0, DEFAULT_MAX_TEMP);
-
- mutex_lock(&data->lock);
- data->min[attr->index] = val;
- threshold_updated(data);
- mutex_unlock(&data->lock);
-
- return count;
-}
-
-static ssize_t max_store(struct device *dev, struct device_attribute *devattr,
- const char *buf, size_t count)
-{
- unsigned long val;
- struct abx500_temp *data = dev_get_drvdata(dev);
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- int res = kstrtol(buf, 10, &val);
- if (res < 0)
- return res;
-
- val = clamp_val(val, 0, DEFAULT_MAX_TEMP);
-
- mutex_lock(&data->lock);
- data->max[attr->index] = val;
- threshold_updated(data);
- mutex_unlock(&data->lock);
-
- return count;
-}
-
-static ssize_t max_hyst_store(struct device *dev,
- struct device_attribute *devattr,
- const char *buf, size_t count)
-{
- unsigned long val;
- struct abx500_temp *data = dev_get_drvdata(dev);
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- int res = kstrtoul(buf, 10, &val);
- if (res < 0)
- return res;
-
- val = clamp_val(val, 0, DEFAULT_MAX_TEMP);
-
- mutex_lock(&data->lock);
- data->max_hyst[attr->index] = val;
- threshold_updated(data);
- mutex_unlock(&data->lock);
-
- return count;
-}
-
-/* Show functions (RO nodes) */
-static ssize_t min_show(struct device *dev, struct device_attribute *devattr,
- char *buf)
-{
- struct abx500_temp *data = dev_get_drvdata(dev);
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
-
- return sprintf(buf, "%lu\n", data->min[attr->index]);
-}
-
-static ssize_t max_show(struct device *dev, struct device_attribute *devattr,
- char *buf)
-{
- struct abx500_temp *data = dev_get_drvdata(dev);
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
-
- return sprintf(buf, "%lu\n", data->max[attr->index]);
-}
-
-static ssize_t max_hyst_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- struct abx500_temp *data = dev_get_drvdata(dev);
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
-
- return sprintf(buf, "%lu\n", data->max_hyst[attr->index]);
-}
-
-static ssize_t min_alarm_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- struct abx500_temp *data = dev_get_drvdata(dev);
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
-
- return sprintf(buf, "%d\n", data->min_alarm[attr->index]);
-}
-
-static ssize_t max_alarm_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- struct abx500_temp *data = dev_get_drvdata(dev);
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
-
- return sprintf(buf, "%d\n", data->max_alarm[attr->index]);
-}
-
-static umode_t abx500_attrs_visible(struct kobject *kobj,
- struct attribute *attr, int n)
-{
- struct device *dev = kobj_to_dev(kobj);
- struct abx500_temp *data = dev_get_drvdata(dev);
-
- if (data->ops.is_visible)
- return data->ops.is_visible(attr, n);
-
- return attr->mode;
-}
-
-/* Chip name, required by hwmon */
-static SENSOR_DEVICE_ATTR_RO(name, name, 0);
-
-/* GPADC - SENSOR1 */
-static SENSOR_DEVICE_ATTR_RO(temp1_label, label, 0);
-static SENSOR_DEVICE_ATTR_RO(temp1_input, input, 0);
-static SENSOR_DEVICE_ATTR_RW(temp1_min, min, 0);
-static SENSOR_DEVICE_ATTR_RW(temp1_max, max, 0);
-static SENSOR_DEVICE_ATTR_RW(temp1_max_hyst, max_hyst, 0);
-static SENSOR_DEVICE_ATTR_RO(temp1_min_alarm, min_alarm, 0);
-static SENSOR_DEVICE_ATTR_RO(temp1_max_alarm, max_alarm, 0);
-
-/* GPADC - SENSOR2 */
-static SENSOR_DEVICE_ATTR_RO(temp2_label, label, 1);
-static SENSOR_DEVICE_ATTR_RO(temp2_input, input, 1);
-static SENSOR_DEVICE_ATTR_RW(temp2_min, min, 1);
-static SENSOR_DEVICE_ATTR_RW(temp2_max, max, 1);
-static SENSOR_DEVICE_ATTR_RW(temp2_max_hyst, max_hyst, 1);
-static SENSOR_DEVICE_ATTR_RO(temp2_min_alarm, min_alarm, 1);
-static SENSOR_DEVICE_ATTR_RO(temp2_max_alarm, max_alarm, 1);
-
-/* GPADC - SENSOR3 */
-static SENSOR_DEVICE_ATTR_RO(temp3_label, label, 2);
-static SENSOR_DEVICE_ATTR_RO(temp3_input, input, 2);
-static SENSOR_DEVICE_ATTR_RW(temp3_min, min, 2);
-static SENSOR_DEVICE_ATTR_RW(temp3_max, max, 2);
-static SENSOR_DEVICE_ATTR_RW(temp3_max_hyst, max_hyst, 2);
-static SENSOR_DEVICE_ATTR_RO(temp3_min_alarm, min_alarm, 2);
-static SENSOR_DEVICE_ATTR_RO(temp3_max_alarm, max_alarm, 2);
-
-/* GPADC - SENSOR4 */
-static SENSOR_DEVICE_ATTR_RO(temp4_label, label, 3);
-static SENSOR_DEVICE_ATTR_RO(temp4_input, input, 3);
-static SENSOR_DEVICE_ATTR_RW(temp4_min, min, 3);
-static SENSOR_DEVICE_ATTR_RW(temp4_max, max, 3);
-static SENSOR_DEVICE_ATTR_RW(temp4_max_hyst, max_hyst, 3);
-static SENSOR_DEVICE_ATTR_RO(temp4_min_alarm, min_alarm, 3);
-static SENSOR_DEVICE_ATTR_RO(temp4_max_alarm, max_alarm, 3);
-
-static struct attribute *abx500_temp_attributes[] = {
- &sensor_dev_attr_name.dev_attr.attr,
-
- &sensor_dev_attr_temp1_label.dev_attr.attr,
- &sensor_dev_attr_temp1_input.dev_attr.attr,
- &sensor_dev_attr_temp1_min.dev_attr.attr,
- &sensor_dev_attr_temp1_max.dev_attr.attr,
- &sensor_dev_attr_temp1_max_hyst.dev_attr.attr,
- &sensor_dev_attr_temp1_min_alarm.dev_attr.attr,
- &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
-
- &sensor_dev_attr_temp2_label.dev_attr.attr,
- &sensor_dev_attr_temp2_input.dev_attr.attr,
- &sensor_dev_attr_temp2_min.dev_attr.attr,
- &sensor_dev_attr_temp2_max.dev_attr.attr,
- &sensor_dev_attr_temp2_max_hyst.dev_attr.attr,
- &sensor_dev_attr_temp2_min_alarm.dev_attr.attr,
- &sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
-
- &sensor_dev_attr_temp3_label.dev_attr.attr,
- &sensor_dev_attr_temp3_input.dev_attr.attr,
- &sensor_dev_attr_temp3_min.dev_attr.attr,
- &sensor_dev_attr_temp3_max.dev_attr.attr,
- &sensor_dev_attr_temp3_max_hyst.dev_attr.attr,
- &sensor_dev_attr_temp3_min_alarm.dev_attr.attr,
- &sensor_dev_attr_temp3_max_alarm.dev_attr.attr,
-
- &sensor_dev_attr_temp4_label.dev_attr.attr,
- &sensor_dev_attr_temp4_input.dev_attr.attr,
- &sensor_dev_attr_temp4_min.dev_attr.attr,
- &sensor_dev_attr_temp4_max.dev_attr.attr,
- &sensor_dev_attr_temp4_max_hyst.dev_attr.attr,
- &sensor_dev_attr_temp4_min_alarm.dev_attr.attr,
- &sensor_dev_attr_temp4_max_alarm.dev_attr.attr,
- NULL
-};
-
-static const struct attribute_group abx500_temp_group = {
- .attrs = abx500_temp_attributes,
- .is_visible = abx500_attrs_visible,
-};
-
-static irqreturn_t abx500_temp_irq_handler(int irq, void *irq_data)
-{
- struct platform_device *pdev = irq_data;
- struct abx500_temp *data = platform_get_drvdata(pdev);
-
- data->ops.irq_handler(irq, data);
- return IRQ_HANDLED;
-}
-
-static int setup_irqs(struct platform_device *pdev)
-{
- int ret;
- int irq = platform_get_irq_byname(pdev, "ABX500_TEMP_WARM");
-
- if (irq < 0) {
- dev_err(&pdev->dev, "Get irq by name failed\n");
- return irq;
- }
-
- ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
- abx500_temp_irq_handler, 0, "abx500-temp", pdev);
- if (ret < 0)
- dev_err(&pdev->dev, "Request threaded irq failed (%d)\n", ret);
-
- return ret;
-}
-
-static int abx500_temp_probe(struct platform_device *pdev)
-{
- struct abx500_temp *data;
- int err;
-
- data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- data->pdev = pdev;
- mutex_init(&data->lock);
-
- /* Chip specific initialization */
- err = abx500_hwmon_init(data);
- if (err < 0 || !data->ops.read_sensor || !data->ops.show_name ||
- !data->ops.show_label)
- return err;
-
- INIT_DEFERRABLE_WORK(&data->work, gpadc_monitor);
-
- platform_set_drvdata(pdev, data);
-
- err = sysfs_create_group(&pdev->dev.kobj, &abx500_temp_group);
- if (err < 0) {
- dev_err(&pdev->dev, "Create sysfs group failed (%d)\n", err);
- return err;
- }
-
- data->hwmon_dev = hwmon_device_register(&pdev->dev);
- if (IS_ERR(data->hwmon_dev)) {
- err = PTR_ERR(data->hwmon_dev);
- dev_err(&pdev->dev, "Class registration failed (%d)\n", err);
- goto exit_sysfs_group;
- }
-
- if (data->ops.irq_handler) {
- err = setup_irqs(pdev);
- if (err < 0)
- goto exit_hwmon_reg;
- }
- return 0;
-
-exit_hwmon_reg:
- hwmon_device_unregister(data->hwmon_dev);
-exit_sysfs_group:
- sysfs_remove_group(&pdev->dev.kobj, &abx500_temp_group);
- return err;
-}
-
-static int abx500_temp_remove(struct platform_device *pdev)
-{
- struct abx500_temp *data = platform_get_drvdata(pdev);
-
- cancel_delayed_work_sync(&data->work);
- hwmon_device_unregister(data->hwmon_dev);
- sysfs_remove_group(&pdev->dev.kobj, &abx500_temp_group);
-
- return 0;
-}
-
-static int abx500_temp_suspend(struct platform_device *pdev,
- pm_message_t state)
-{
- struct abx500_temp *data = platform_get_drvdata(pdev);
-
- if (data->work_active)
- cancel_delayed_work_sync(&data->work);
-
- return 0;
-}
-
-static int abx500_temp_resume(struct platform_device *pdev)
-{
- struct abx500_temp *data = platform_get_drvdata(pdev);
-
- if (data->work_active)
- schedule_monitor(data);
-
- return 0;
-}
-
-#ifdef CONFIG_OF
-static const struct of_device_id abx500_temp_match[] = {
- { .compatible = "stericsson,abx500-temp" },
- {},
-};
-MODULE_DEVICE_TABLE(of, abx500_temp_match);
-#endif
-
-static struct platform_driver abx500_temp_driver = {
- .driver = {
- .name = "abx500-temp",
- .of_match_table = of_match_ptr(abx500_temp_match),
- },
- .suspend = abx500_temp_suspend,
- .resume = abx500_temp_resume,
- .probe = abx500_temp_probe,
- .remove = abx500_temp_remove,
-};
-
-module_platform_driver(abx500_temp_driver);
-
-MODULE_AUTHOR("Martin Persson <martin.persson@stericsson.com>");
-MODULE_DESCRIPTION("ABX500 temperature driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/abx500.h b/drivers/hwmon/abx500.h
deleted file mode 100644
index 4517594260f2..000000000000
--- a/drivers/hwmon/abx500.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) ST-Ericsson 2010 - 2013
- * Author: Martin Persson <martin.persson@stericsson.com>
- * Hongbo Zhang <hongbo.zhang@linaro.com>
- */
-
-#ifndef _ABX500_H
-#define _ABX500_H
-
-#define NUM_SENSORS 5
-
-struct abx500_temp;
-
-/*
- * struct abx500_temp_ops - abx500 chip specific ops
- * @read_sensor: reads gpadc output
- * @irq_handler: irq handler
- * @show_name: hwmon device name
- * @show_label: hwmon attribute label
- * @is_visible: is attribute visible
- */
-struct abx500_temp_ops {
- int (*read_sensor)(struct abx500_temp *, u8, int *);
- int (*irq_handler)(int, struct abx500_temp *);
- ssize_t (*show_name)(struct device *,
- struct device_attribute *, char *);
- ssize_t (*show_label) (struct device *,
- struct device_attribute *, char *);
- int (*is_visible)(struct attribute *, int);
-};
-
-/*
- * struct abx500_temp - representation of temp mon device
- * @pdev: platform device
- * @hwmon_dev: hwmon device
- * @ops: abx500 chip specific ops
- * @gpadc_addr: gpadc channel address
- * @min: sensor temperature min value
- * @max: sensor temperature max value
- * @max_hyst: sensor temperature hysteresis value for max limit
- * @min_alarm: sensor temperature min alarm
- * @max_alarm: sensor temperature max alarm
- * @work: delayed work scheduled to monitor temperature periodically
- * @work_active: True if work is active
- * @lock: mutex
- * @monitored_sensors: number of monitored sensors
- * @plat_data: private usage, usually points to platform specific data
- */
-struct abx500_temp {
- struct platform_device *pdev;
- struct device *hwmon_dev;
- struct abx500_temp_ops ops;
- u8 gpadc_addr[NUM_SENSORS];
- unsigned long min[NUM_SENSORS];
- unsigned long max[NUM_SENSORS];
- unsigned long max_hyst[NUM_SENSORS];
- bool min_alarm[NUM_SENSORS];
- bool max_alarm[NUM_SENSORS];
- struct delayed_work work;
- bool work_active;
- struct mutex lock;
- int monitored_sensors;
- void *plat_data;
-};
-
-int abx500_hwmon_init(struct abx500_temp *data);
-
-#endif /* _ABX500_H */
diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
index 848718ab7312..7d3ddcba34ce 100644
--- a/drivers/hwmon/acpi_power_meter.c
+++ b/drivers/hwmon/acpi_power_meter.c
@@ -161,7 +161,7 @@ static ssize_t set_avg_interval(struct device *dev,
mutex_lock(&resource->lock);
status = acpi_evaluate_integer(resource->acpi_dev->handle, "_PAI",
&args, &data);
- if (!ACPI_FAILURE(status))
+ if (ACPI_SUCCESS(status))
resource->avg_interval = temp;
mutex_unlock(&resource->lock);
@@ -232,7 +232,7 @@ static ssize_t set_cap(struct device *dev, struct device_attribute *devattr,
mutex_lock(&resource->lock);
status = acpi_evaluate_integer(resource->acpi_dev->handle, "_SHL",
&args, &data);
- if (!ACPI_FAILURE(status))
+ if (ACPI_SUCCESS(status))
resource->cap = temp;
mutex_unlock(&resource->lock);
diff --git a/drivers/hwmon/aht10.c b/drivers/hwmon/aht10.c
new file mode 100644
index 000000000000..2d9770cb4401
--- /dev/null
+++ b/drivers/hwmon/aht10.c
@@ -0,0 +1,348 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/*
+ * aht10.c - Linux hwmon driver for AHT10 Temperature and Humidity sensor
+ * Copyright (C) 2020 Johannes Cornelis Draaijer
+ */
+
+#include <linux/delay.h>
+#include <linux/hwmon.h>
+#include <linux/i2c.h>
+#include <linux/ktime.h>
+#include <linux/module.h>
+
+#define AHT10_MEAS_SIZE 6
+
+/*
+ * Poll intervals (in milliseconds)
+ */
+#define AHT10_DEFAULT_MIN_POLL_INTERVAL 2000
+#define AHT10_MIN_POLL_INTERVAL 2000
+
+/*
+ * I2C command delays (in microseconds)
+ */
+#define AHT10_MEAS_DELAY 80000
+#define AHT10_CMD_DELAY 350000
+#define AHT10_DELAY_EXTRA 100000
+
+/*
+ * Command bytes
+ */
+#define AHT10_CMD_INIT 0b11100001
+#define AHT10_CMD_MEAS 0b10101100
+#define AHT10_CMD_RST 0b10111010
+
+/*
+ * Flags in the answer byte/command
+ */
+#define AHT10_CAL_ENABLED BIT(3)
+#define AHT10_BUSY BIT(7)
+#define AHT10_MODE_NOR (BIT(5) | BIT(6))
+#define AHT10_MODE_CYC BIT(5)
+#define AHT10_MODE_CMD BIT(6)
+
+#define AHT10_MAX_POLL_INTERVAL_LEN 30
+
+/**
+ * struct aht10_data - All the data required to operate an AHT10 chip
+ * @client: the i2c client associated with the AHT10
+ * @lock: a mutex that is used to prevent parallel access to the
+ * i2c client
+ * @min_poll_interval: the minimum poll interval
+ * While the poll rate limit is not 100% necessary,
+ * the datasheet recommends that a measurement
+ * is not performed too often to prevent
+ * the chip from warming up due to the heat it generates.
+ * If it's unwanted, it can be ignored setting it to
+ * it to 0. Default value is 2000 ms
+ * @previous_poll_time: the previous time that the AHT10
+ * was polled
+ * @temperature: the latest temperature value received from
+ * the AHT10
+ * @humidity: the latest humidity value received from the
+ * AHT10
+ */
+
+struct aht10_data {
+ struct i2c_client *client;
+ /*
+ * Prevent simultaneous access to the i2c
+ * client and previous_poll_time
+ */
+ struct mutex lock;
+ ktime_t min_poll_interval;
+ ktime_t previous_poll_time;
+ int temperature;
+ int humidity;
+};
+
+/**
+ * aht10_init() - Initialize an AHT10 chip
+ * @client: the i2c client associated with the AHT10
+ * @data: the data associated with this AHT10 chip
+ * Return: 0 if succesfull, 1 if not
+ */
+static int aht10_init(struct aht10_data *data)
+{
+ const u8 cmd_init[] = {AHT10_CMD_INIT, AHT10_CAL_ENABLED | AHT10_MODE_CYC,
+ 0x00};
+ int res;
+ u8 status;
+ struct i2c_client *client = data->client;
+
+ res = i2c_master_send(client, cmd_init, 3);
+ if (res < 0)
+ return res;
+
+ usleep_range(AHT10_CMD_DELAY, AHT10_CMD_DELAY +
+ AHT10_DELAY_EXTRA);
+
+ res = i2c_master_recv(client, &status, 1);
+ if (res != 1)
+ return -ENODATA;
+
+ if (status & AHT10_BUSY)
+ return -EBUSY;
+
+ return 0;
+}
+
+/**
+ * aht10_polltime_expired() - check if the minimum poll interval has
+ * expired
+ * @data: the data containing the time to compare
+ * Return: 1 if the minimum poll interval has expired, 0 if not
+ */
+static int aht10_polltime_expired(struct aht10_data *data)
+{
+ ktime_t current_time = ktime_get_boottime();
+ ktime_t difference = ktime_sub(current_time, data->previous_poll_time);
+
+ return ktime_after(difference, data->min_poll_interval);
+}
+
+/**
+ * aht10_read_values() - read and parse the raw data from the AHT10
+ * @aht10_data: the struct aht10_data to use for the lock
+ * Return: 0 if succesfull, 1 if not
+ */
+static int aht10_read_values(struct aht10_data *data)
+{
+ const u8 cmd_meas[] = {AHT10_CMD_MEAS, 0x33, 0x00};
+ u32 temp, hum;
+ int res;
+ u8 raw_data[AHT10_MEAS_SIZE];
+ struct i2c_client *client = data->client;
+
+ mutex_lock(&data->lock);
+ if (aht10_polltime_expired(data)) {
+ res = i2c_master_send(client, cmd_meas, sizeof(cmd_meas));
+ if (res < 0) {
+ mutex_unlock(&data->lock);
+ return res;
+ }
+
+ usleep_range(AHT10_MEAS_DELAY,
+ AHT10_MEAS_DELAY + AHT10_DELAY_EXTRA);
+
+ res = i2c_master_recv(client, raw_data, AHT10_MEAS_SIZE);
+ if (res != AHT10_MEAS_SIZE) {
+ mutex_unlock(&data->lock);
+ if (res >= 0)
+ return -ENODATA;
+ else
+ return res;
+ }
+
+ hum = ((u32)raw_data[1] << 12u) |
+ ((u32)raw_data[2] << 4u) |
+ ((raw_data[3] & 0xF0u) >> 4u);
+
+ temp = ((u32)(raw_data[3] & 0x0Fu) << 16u) |
+ ((u32)raw_data[4] << 8u) |
+ raw_data[5];
+
+ temp = ((temp * 625) >> 15u) * 10;
+ hum = ((hum * 625) >> 16u) * 10;
+
+ data->temperature = (int)temp - 50000;
+ data->humidity = hum;
+ data->previous_poll_time = ktime_get_boottime();
+ }
+ mutex_unlock(&data->lock);
+ return 0;
+}
+
+/**
+ * aht10_interval_write() - store the given minimum poll interval.
+ * Return: 0 on success, -EINVAL if a value lower than the
+ * AHT10_MIN_POLL_INTERVAL is given
+ */
+static ssize_t aht10_interval_write(struct aht10_data *data,
+ long val)
+{
+ data->min_poll_interval = ms_to_ktime(clamp_val(val, 2000, LONG_MAX));
+ return 0;
+}
+
+/**
+ * aht10_interval_read() - read the minimum poll interval
+ * in milliseconds
+ */
+static ssize_t aht10_interval_read(struct aht10_data *data,
+ long *val)
+{
+ *val = ktime_to_ms(data->min_poll_interval);
+ return 0;
+}
+
+/**
+ * aht10_temperature1_read() - read the temperature in millidegrees
+ */
+static int aht10_temperature1_read(struct aht10_data *data, long *val)
+{
+ int res;
+
+ res = aht10_read_values(data);
+ if (res < 0)
+ return res;
+
+ *val = data->temperature;
+ return 0;
+}
+
+/**
+ * aht10_humidity1_read() - read the relative humidity in millipercent
+ */
+static int aht10_humidity1_read(struct aht10_data *data, long *val)
+{
+ int res;
+
+ res = aht10_read_values(data);
+ if (res < 0)
+ return res;
+
+ *val = data->humidity;
+ return 0;
+}
+
+static umode_t aht10_hwmon_visible(const void *data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_temp:
+ case hwmon_humidity:
+ return 0444;
+ case hwmon_chip:
+ return 0644;
+ default:
+ return 0;
+ }
+}
+
+static int aht10_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct aht10_data *data = dev_get_drvdata(dev);
+
+ switch (type) {
+ case hwmon_temp:
+ return aht10_temperature1_read(data, val);
+ case hwmon_humidity:
+ return aht10_humidity1_read(data, val);
+ case hwmon_chip:
+ return aht10_interval_read(data, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int aht10_hwmon_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ struct aht10_data *data = dev_get_drvdata(dev);
+
+ switch (type) {
+ case hwmon_chip:
+ return aht10_interval_write(data, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct hwmon_channel_info *aht10_info[] = {
+ HWMON_CHANNEL_INFO(chip, HWMON_C_UPDATE_INTERVAL),
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT),
+ HWMON_CHANNEL_INFO(humidity, HWMON_H_INPUT),
+ NULL,
+};
+
+static const struct hwmon_ops aht10_hwmon_ops = {
+ .is_visible = aht10_hwmon_visible,
+ .read = aht10_hwmon_read,
+ .write = aht10_hwmon_write,
+};
+
+static const struct hwmon_chip_info aht10_chip_info = {
+ .ops = &aht10_hwmon_ops,
+ .info = aht10_info,
+};
+
+static int aht10_probe(struct i2c_client *client,
+ const struct i2c_device_id *aht10_id)
+{
+ struct device *device = &client->dev;
+ struct device *hwmon_dev;
+ struct aht10_data *data;
+ int res;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+ return -ENOENT;
+
+ data = devm_kzalloc(device, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->min_poll_interval = ms_to_ktime(AHT10_DEFAULT_MIN_POLL_INTERVAL);
+ data->client = client;
+
+ mutex_init(&data->lock);
+
+ res = aht10_init(data);
+ if (res < 0)
+ return res;
+
+ res = aht10_read_values(data);
+ if (res < 0)
+ return res;
+
+ hwmon_dev = devm_hwmon_device_register_with_info(device,
+ client->name,
+ data,
+ &aht10_chip_info,
+ NULL);
+
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static const struct i2c_device_id aht10_id[] = {
+ { "aht10", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, aht10_id);
+
+static struct i2c_driver aht10_driver = {
+ .driver = {
+ .name = "aht10",
+ },
+ .probe = aht10_probe,
+ .id_table = aht10_id,
+};
+
+module_i2c_driver(aht10_driver);
+
+MODULE_AUTHOR("Johannes Cornelis Draaijer <jcdra1@gmail.com>");
+MODULE_DESCRIPTION("AHT10 Temperature and Humidity sensor driver");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwmon/amd_energy.c b/drivers/hwmon/amd_energy.c
index 822c2e74b98d..a86cc8d6d93d 100644
--- a/drivers/hwmon/amd_energy.c
+++ b/drivers/hwmon/amd_energy.c
@@ -333,6 +333,7 @@ static struct platform_device *amd_energy_platdev;
static const struct x86_cpu_id cpu_ids[] __initconst = {
X86_MATCH_VENDOR_FAM_MODEL(AMD, 0x17, 0x31, NULL),
X86_MATCH_VENDOR_FAM_MODEL(AMD, 0x19, 0x01, NULL),
+ X86_MATCH_VENDOR_FAM_MODEL(AMD, 0x19, 0x30, NULL),
{}
};
MODULE_DEVICE_TABLE(x86cpu, cpu_ids);
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index 89207af81c48..28b137eedf2e 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -565,7 +565,7 @@ static int applesmc_init_index(struct applesmc_registers *s)
static int applesmc_init_smcreg_try(void)
{
struct applesmc_registers *s = &smcreg;
- bool left_light_sensor = 0, right_light_sensor = 0;
+ bool left_light_sensor = false, right_light_sensor = false;
unsigned int count;
u8 tmp[1];
int ret;
diff --git a/drivers/hwmon/aspeed-pwm-tacho.c b/drivers/hwmon/aspeed-pwm-tacho.c
index 3d8239fd66ed..3cb88d6fbec0 100644
--- a/drivers/hwmon/aspeed-pwm-tacho.c
+++ b/drivers/hwmon/aspeed-pwm-tacho.c
@@ -620,7 +620,7 @@ static ssize_t rpm_show(struct device *dev, struct device_attribute *attr,
static umode_t pwm_is_visible(struct kobject *kobj,
struct attribute *a, int index)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct aspeed_pwm_tacho_data *priv = dev_get_drvdata(dev);
if (!priv->pwm_present[index])
@@ -631,7 +631,7 @@ static umode_t pwm_is_visible(struct kobject *kobj,
static umode_t fan_dev_is_visible(struct kobject *kobj,
struct attribute *a, int index)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct aspeed_pwm_tacho_data *priv = dev_get_drvdata(dev);
if (!priv->fan_tach_present[index])
diff --git a/drivers/hwmon/da9052-hwmon.c b/drivers/hwmon/da9052-hwmon.c
index 4af2fc309c28..ed6c5df94fdf 100644
--- a/drivers/hwmon/da9052-hwmon.c
+++ b/drivers/hwmon/da9052-hwmon.c
@@ -299,7 +299,7 @@ static ssize_t label_show(struct device *dev,
static umode_t da9052_channel_is_visible(struct kobject *kobj,
struct attribute *attr, int index)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct da9052_hwmon *hwmon = dev_get_drvdata(dev);
struct device_attribute *dattr = container_of(attr,
struct device_attribute, attr);
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index ec448f5f2dc3..73b9db9e3aab 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -1159,6 +1159,13 @@ static struct dmi_system_id i8k_blacklist_fan_support_dmi_table[] __initdata = {
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "XPS13 9333"),
},
},
+ {
+ .ident = "Dell XPS 15 L502X",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Dell System XPS L502X"),
+ },
+ },
{ }
};
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index 3ea4021f267c..befe989ca7b9 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -299,7 +299,7 @@ static DEVICE_ATTR(fan1_target, 0644, fan1_input_show, set_rpm);
static umode_t gpio_fan_is_visible(struct kobject *kobj,
struct attribute *attr, int index)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct gpio_fan_data *data = dev_get_drvdata(dev);
if (index == 0 && !data->alarm_gpio)
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index 3bc2551577a3..5ff3669c2b60 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -448,7 +448,8 @@ static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
data->is_zen = true;
switch (boot_cpu_data.x86_model) {
- case 0x0 ... 0x1: /* Zen3 */
+ case 0x0 ... 0x1: /* Zen3 SP3/TR */
+ case 0x21: /* Zen3 Ryzen Desktop */
k10temp_get_ccd_support(pdev, data, 8);
break;
}
diff --git a/drivers/hwmon/lm70.c b/drivers/hwmon/lm70.c
index ae2b84263a44..40eab3349904 100644
--- a/drivers/hwmon/lm70.c
+++ b/drivers/hwmon/lm70.c
@@ -22,9 +22,9 @@
#include <linux/hwmon.h>
#include <linux/mutex.h>
#include <linux/mod_devicetable.h>
+#include <linux/property.h>
#include <linux/spi/spi.h>
#include <linux/slab.h>
-#include <linux/of_device.h>
#include <linux/acpi.h>
#define DRVNAME "lm70"
@@ -173,25 +173,15 @@ MODULE_DEVICE_TABLE(acpi, lm70_acpi_ids);
static int lm70_probe(struct spi_device *spi)
{
- const struct of_device_id *of_match;
struct device *hwmon_dev;
struct lm70 *p_lm70;
int chip;
- of_match = of_match_device(lm70_of_ids, &spi->dev);
- if (of_match)
- chip = (int)(uintptr_t)of_match->data;
- else {
-#ifdef CONFIG_ACPI
- const struct acpi_device_id *acpi_match;
+ if (dev_fwnode(&spi->dev))
+ chip = (int)(uintptr_t)device_get_match_data(&spi->dev);
+ else
+ chip = spi_get_device_id(spi)->driver_data;
- acpi_match = acpi_match_device(lm70_acpi_ids, &spi->dev);
- if (acpi_match)
- chip = (int)(uintptr_t)acpi_match->driver_data;
- else
-#endif
- chip = spi_get_device_id(spi)->driver_data;
- }
/* signaling is SPI_MODE_0 */
if (spi->mode & (SPI_CPOL | SPI_CPHA))
diff --git a/drivers/hwmon/max6650.c b/drivers/hwmon/max6650.c
index cc7f2980fe83..f8d4534ce172 100644
--- a/drivers/hwmon/max6650.c
+++ b/drivers/hwmon/max6650.c
@@ -321,7 +321,7 @@ static SENSOR_DEVICE_ATTR_RO(gpio2_alarm, alarm, MAX6650_ALRM_GPIO2);
static umode_t max6650_attrs_visible(struct kobject *kobj, struct attribute *a,
int n)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct max6650_data *data = dev_get_drvdata(dev);
struct device_attribute *devattr;
diff --git a/drivers/hwmon/nct6683.c b/drivers/hwmon/nct6683.c
index 7f7e30f0de7b..a23047a3bfe2 100644
--- a/drivers/hwmon/nct6683.c
+++ b/drivers/hwmon/nct6683.c
@@ -169,6 +169,7 @@ superio_exit(int ioreg)
#define NCT6683_CUSTOMER_ID_INTEL 0x805
#define NCT6683_CUSTOMER_ID_MITAC 0xa0e
#define NCT6683_CUSTOMER_ID_MSI 0x201
+#define NCT6683_CUSTOMER_ID_ASROCK 0xe2c
#define NCT6683_REG_BUILD_YEAR 0x604
#define NCT6683_REG_BUILD_MONTH 0x605
@@ -1225,6 +1226,8 @@ static int nct6683_probe(struct platform_device *pdev)
break;
case NCT6683_CUSTOMER_ID_MSI:
break;
+ case NCT6683_CUSTOMER_ID_ASROCK:
+ break;
default:
if (!force)
return -ENODEV;
diff --git a/drivers/hwmon/pc87360.c b/drivers/hwmon/pc87360.c
index 94f4b8b4a2ba..6a9ba23cd302 100644
--- a/drivers/hwmon/pc87360.c
+++ b/drivers/hwmon/pc87360.c
@@ -1700,8 +1700,8 @@ static int __init pc87360_device_add(unsigned short address)
continue;
res[res_count].start = extra_isa[i];
res[res_count].end = extra_isa[i] + PC87360_EXTENT - 1;
- res[res_count].name = "pc87360",
- res[res_count].flags = IORESOURCE_IO,
+ res[res_count].name = "pc87360";
+ res[res_count].flags = IORESOURCE_IO;
err = acpi_check_resource_conflict(&res[res_count]);
if (err)
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index 03606d4298a4..32d2fc850621 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -158,10 +158,10 @@ config SENSORS_MAX16064
be called max16064.
config SENSORS_MAX16601
- tristate "Maxim MAX16601"
+ tristate "Maxim MAX16508, MAX16601"
help
If you say yes here you get hardware monitoring support for Maxim
- MAX16601.
+ MAX16508 and MAX16601.
This driver can also be built as a module. If so, the module will
be called max16601.
diff --git a/drivers/hwmon/pmbus/ibm-cffps.c b/drivers/hwmon/pmbus/ibm-cffps.c
index d6bbbb223871..ffde5aaa5036 100644
--- a/drivers/hwmon/pmbus/ibm-cffps.c
+++ b/drivers/hwmon/pmbus/ibm-cffps.c
@@ -472,7 +472,7 @@ static struct pmbus_driver_info ibm_cffps_info[] = {
};
static struct pmbus_platform_data ibm_cffps_pdata = {
- .flags = PMBUS_SKIP_STATUS_CHECK,
+ .flags = PMBUS_SKIP_STATUS_CHECK | PMBUS_NO_CAPABILITY,
};
static int ibm_cffps_probe(struct i2c_client *client)
diff --git a/drivers/hwmon/pmbus/lm25066.c b/drivers/hwmon/pmbus/lm25066.c
index c75a6bf39641..e9a66fd9e144 100644
--- a/drivers/hwmon/pmbus/lm25066.c
+++ b/drivers/hwmon/pmbus/lm25066.c
@@ -371,21 +371,18 @@ static int lm25066_write_word_data(struct i2c_client *client, int page, int reg,
case PMBUS_VIN_OV_WARN_LIMIT:
word = ((s16)word < 0) ? 0 : clamp_val(word, 0, data->rlimit);
ret = pmbus_write_word_data(client, 0, reg, word);
- pmbus_clear_cache(client);
break;
case PMBUS_IIN_OC_WARN_LIMIT:
word = ((s16)word < 0) ? 0 : clamp_val(word, 0, data->rlimit);
ret = pmbus_write_word_data(client, 0,
LM25066_MFR_IIN_OC_WARN_LIMIT,
word);
- pmbus_clear_cache(client);
break;
case PMBUS_PIN_OP_WARN_LIMIT:
word = ((s16)word < 0) ? 0 : clamp_val(word, 0, data->rlimit);
ret = pmbus_write_word_data(client, 0,
LM25066_MFR_PIN_OP_WARN_LIMIT,
word);
- pmbus_clear_cache(client);
break;
case PMBUS_VIRT_VMON_UV_WARN_LIMIT:
/* Adjust from VIN coefficients (for LM25056) */
@@ -393,7 +390,6 @@ static int lm25066_write_word_data(struct i2c_client *client, int page, int reg,
word = ((s16)word < 0) ? 0 : clamp_val(word, 0, data->rlimit);
ret = pmbus_write_word_data(client, 0,
LM25056_VAUX_UV_WARN_LIMIT, word);
- pmbus_clear_cache(client);
break;
case PMBUS_VIRT_VMON_OV_WARN_LIMIT:
/* Adjust from VIN coefficients (for LM25056) */
@@ -401,7 +397,6 @@ static int lm25066_write_word_data(struct i2c_client *client, int page, int reg,
word = ((s16)word < 0) ? 0 : clamp_val(word, 0, data->rlimit);
ret = pmbus_write_word_data(client, 0,
LM25056_VAUX_OV_WARN_LIMIT, word);
- pmbus_clear_cache(client);
break;
case PMBUS_VIRT_RESET_PIN_HISTORY:
ret = pmbus_write_byte(client, 0, LM25066_CLEAR_PIN_PEAK);
diff --git a/drivers/hwmon/pmbus/max16601.c b/drivers/hwmon/pmbus/max16601.c
index a960b86e72d2..0d1204c2dd54 100644
--- a/drivers/hwmon/pmbus/max16601.c
+++ b/drivers/hwmon/pmbus/max16601.c
@@ -1,11 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Hardware monitoring driver for Maxim MAX16601
+ * Hardware monitoring driver for Maxim MAX16508 and MAX16601.
*
* Implementation notes:
*
- * Ths chip supports two rails, VCORE and VSA. Telemetry information for the
- * two rails is reported in two subsequent I2C addresses. The driver
+ * This chip series supports two rails, VCORE and VSA. Telemetry information
+ * for the two rails is reported in two subsequent I2C addresses. The driver
* instantiates a dummy I2C client at the second I2C address to report
* information for the VSA rail in a single instance of the driver.
* Telemetry for the VSA rail is reported to the PMBus core in PMBus page 2.
@@ -31,6 +31,9 @@
#include "pmbus.h"
+enum chips { max16508, max16601 };
+
+#define REG_DEFAULT_NUM_POP 0xc4
#define REG_SETPT_DVID 0xd1
#define DAC_10MV_MODE BIT(4)
#define REG_IOUT_AVG_PK 0xee
@@ -40,7 +43,10 @@
#define CORE_RAIL_INDICATOR BIT(7)
#define REG_PHASE_REPORTING 0xf4
+#define MAX16601_NUM_PHASES 8
+
struct max16601_data {
+ enum chips id;
struct pmbus_driver_info info;
struct i2c_client *vsa;
int iout_avg_pkg;
@@ -185,6 +191,7 @@ static int max16601_write_word(struct i2c_client *client, int page, int reg,
static int max16601_identify(struct i2c_client *client,
struct pmbus_driver_info *info)
{
+ struct max16601_data *data = to_max16601_data(info);
int reg;
reg = i2c_smbus_read_byte_data(client, REG_SETPT_DVID);
@@ -195,6 +202,21 @@ static int max16601_identify(struct i2c_client *client,
else
info->vrm_version[0] = vr12;
+ if (data->id != max16601)
+ return 0;
+
+ reg = i2c_smbus_read_byte_data(client, REG_DEFAULT_NUM_POP);
+ if (reg < 0)
+ return reg;
+
+ /*
+ * If REG_DEFAULT_NUM_POP returns 0, we don't know how many phases
+ * are populated. Stick with the default in that case.
+ */
+ reg &= 0x0f;
+ if (reg && reg <= MAX16601_NUM_PHASES)
+ info->phases[0] = reg;
+
return 0;
}
@@ -216,7 +238,7 @@ static struct pmbus_driver_info max16601_info = {
.func[2] = PMBUS_HAVE_IIN | PMBUS_HAVE_STATUS_INPUT |
PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP | PMBUS_PAGE_VIRTUAL,
- .phases[0] = 8,
+ .phases[0] = MAX16601_NUM_PHASES,
.pfunc[0] = PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT | PMBUS_HAVE_TEMP,
.pfunc[1] = PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT,
.pfunc[2] = PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT | PMBUS_HAVE_TEMP,
@@ -239,28 +261,61 @@ static void max16601_remove(void *_data)
i2c_unregister_device(data->vsa);
}
-static int max16601_probe(struct i2c_client *client)
+static const struct i2c_device_id max16601_id[] = {
+ {"max16508", max16508},
+ {"max16601", max16601},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, max16601_id);
+
+static int max16601_get_id(struct i2c_client *client)
{
struct device *dev = &client->dev;
u8 buf[I2C_SMBUS_BLOCK_MAX + 1];
- struct max16601_data *data;
+ enum chips id;
int ret;
- if (!i2c_check_functionality(client->adapter,
- I2C_FUNC_SMBUS_READ_BYTE_DATA |
- I2C_FUNC_SMBUS_READ_BLOCK_DATA))
- return -ENODEV;
-
ret = i2c_smbus_read_block_data(client, PMBUS_IC_DEVICE_ID, buf);
- if (ret < 0)
+ if (ret < 0 || ret < 11)
return -ENODEV;
- /* PMBUS_IC_DEVICE_ID is expected to return "MAX16601y.xx" */
- if (ret < 11 || strncmp(buf, "MAX16601", 8)) {
+ /*
+ * PMBUS_IC_DEVICE_ID is expected to return "MAX16601y.xx"
+ * or "MAX16500y.xx".
+ */
+ if (!strncmp(buf, "MAX16500", 8)) {
+ id = max16508;
+ } else if (!strncmp(buf, "MAX16601", 8)) {
+ id = max16601;
+ } else {
buf[ret] = '\0';
dev_err(dev, "Unsupported chip '%s'\n", buf);
return -ENODEV;
}
+ return id;
+}
+
+static int max16601_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ const struct i2c_device_id *id;
+ struct max16601_data *data;
+ int ret, chip_id;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_READ_BYTE_DATA |
+ I2C_FUNC_SMBUS_READ_BLOCK_DATA))
+ return -ENODEV;
+
+ chip_id = max16601_get_id(client);
+ if (chip_id < 0)
+ return chip_id;
+
+ id = i2c_match_id(max16601_id, client);
+ if (chip_id != id->driver_data)
+ dev_warn(&client->dev,
+ "Device mismatch: Configured %s (%d), detected %d\n",
+ id->name, (int) id->driver_data, chip_id);
ret = i2c_smbus_read_byte_data(client, REG_PHASE_ID);
if (ret < 0)
@@ -275,6 +330,7 @@ static int max16601_probe(struct i2c_client *client)
if (!data)
return -ENOMEM;
+ data->id = chip_id;
data->iout_avg_pkg = 0xfc00;
data->vsa = i2c_new_dummy_device(client->adapter, client->addr + 1);
if (IS_ERR(data->vsa)) {
@@ -290,13 +346,6 @@ static int max16601_probe(struct i2c_client *client)
return pmbus_do_probe(client, &data->info);
}
-static const struct i2c_device_id max16601_id[] = {
- {"max16601", 0},
- {}
-};
-
-MODULE_DEVICE_TABLE(i2c, max16601_id);
-
static struct i2c_driver max16601_driver = {
.driver = {
.name = "max16601",
diff --git a/drivers/hwmon/pmbus/max31785.c b/drivers/hwmon/pmbus/max31785.c
index e5a9f4019cd5..17489abc49d5 100644
--- a/drivers/hwmon/pmbus/max31785.c
+++ b/drivers/hwmon/pmbus/max31785.c
@@ -17,6 +17,7 @@ enum max31785_regs {
#define MAX31785 0x3030
#define MAX31785A 0x3040
+#define MAX31785B 0x3061
#define MFR_FAN_CONFIG_DUAL_TACH BIT(12)
@@ -329,7 +330,7 @@ static int max31785_probe(struct i2c_client *client)
struct device *dev = &client->dev;
struct pmbus_driver_info *info;
bool dual_tach = false;
- s64 ret;
+ int ret;
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_BYTE_DATA |
@@ -350,12 +351,14 @@ static int max31785_probe(struct i2c_client *client)
if (ret < 0)
return ret;
- if (ret == MAX31785A) {
+ if (ret == MAX31785A || ret == MAX31785B) {
dual_tach = true;
} else if (ret == MAX31785) {
- if (!strcmp("max31785a", client->name))
- dev_warn(dev, "Expected max3175a, found max31785: cannot provide secondary tachometer readings\n");
+ if (!strcmp("max31785a", client->name) ||
+ !strcmp("max31785b", client->name))
+ dev_warn(dev, "Expected max31785a/b, found max31785: cannot provide secondary tachometer readings\n");
} else {
+ dev_err(dev, "Unrecognized MAX31785 revision: %x\n", ret);
return -ENODEV;
}
@@ -371,6 +374,7 @@ static int max31785_probe(struct i2c_client *client)
static const struct i2c_device_id max31785_id[] = {
{ "max31785", 0 },
{ "max31785a", 0 },
+ { "max31785b", 0 },
{ },
};
@@ -379,6 +383,7 @@ MODULE_DEVICE_TABLE(i2c, max31785_id);
static const struct of_device_id max31785_of_match[] = {
{ .compatible = "maxim,max31785" },
{ .compatible = "maxim,max31785a" },
+ { .compatible = "maxim,max31785b" },
{ },
};
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index 192442b3b7a2..aadea85fe630 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -974,7 +974,7 @@ static ssize_t pmbus_set_sensor(struct device *dev,
if (ret < 0)
rv = ret;
else
- sensor->data = regval;
+ sensor->data = -ENODATA;
mutex_unlock(&data->update_lock);
return rv;
}
@@ -1262,7 +1262,7 @@ static int pmbus_add_sensor_attrs_one(struct i2c_client *client,
* which global bit is set) for this page is accessible.
*/
if (!ret && attr->gbit &&
- (!upper || (upper && data->has_status_word)) &&
+ (!upper || data->has_status_word) &&
pmbus_check_status_register(client, page)) {
ret = pmbus_add_boolean(data, name, "alarm", index,
NULL, NULL,
@@ -2204,9 +2204,11 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
}
/* Enable PEC if the controller supports it */
- ret = i2c_smbus_read_byte_data(client, PMBUS_CAPABILITY);
- if (ret >= 0 && (ret & PB_CAPABILITY_ERROR_CHECK))
- client->flags |= I2C_CLIENT_PEC;
+ if (!(data->flags & PMBUS_NO_CAPABILITY)) {
+ ret = i2c_smbus_read_byte_data(client, PMBUS_CAPABILITY);
+ if (ret >= 0 && (ret & PB_CAPABILITY_ERROR_CHECK))
+ client->flags |= I2C_CLIENT_PEC;
+ }
/*
* Check if the chip is write protected. If it is, we can not clear
diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c
index 111a91dc6b79..17518b4cab1b 100644
--- a/drivers/hwmon/pwm-fan.c
+++ b/drivers/hwmon/pwm-fan.c
@@ -21,15 +21,21 @@
#define MAX_PWM 255
+struct pwm_fan_tach {
+ int irq;
+ atomic_t pulses;
+ unsigned int rpm;
+ u8 pulses_per_revolution;
+};
+
struct pwm_fan_ctx {
struct mutex lock;
struct pwm_device *pwm;
+ struct pwm_state pwm_state;
struct regulator *reg_en;
- int irq;
- atomic_t pulses;
- unsigned int rpm;
- u8 pulses_per_revolution;
+ int tach_count;
+ struct pwm_fan_tach *tachs;
ktime_t sample_start;
struct timer_list rpm_timer;
@@ -40,6 +46,7 @@ struct pwm_fan_ctx {
struct thermal_cooling_device *cdev;
struct hwmon_chip_info info;
+ struct hwmon_channel_info fan_channel;
};
static const u32 pwm_fan_channel_config_pwm[] = {
@@ -52,22 +59,12 @@ static const struct hwmon_channel_info pwm_fan_channel_pwm = {
.config = pwm_fan_channel_config_pwm,
};
-static const u32 pwm_fan_channel_config_fan[] = {
- HWMON_F_INPUT,
- 0
-};
-
-static const struct hwmon_channel_info pwm_fan_channel_fan = {
- .type = hwmon_fan,
- .config = pwm_fan_channel_config_fan,
-};
-
/* This handler assumes self resetting edge triggered interrupt. */
static irqreturn_t pulse_handler(int irq, void *dev_id)
{
- struct pwm_fan_ctx *ctx = dev_id;
+ struct pwm_fan_tach *tach = dev_id;
- atomic_inc(&ctx->pulses);
+ atomic_inc(&tach->pulses);
return IRQ_HANDLED;
}
@@ -76,13 +73,18 @@ static void sample_timer(struct timer_list *t)
{
struct pwm_fan_ctx *ctx = from_timer(ctx, t, rpm_timer);
unsigned int delta = ktime_ms_delta(ktime_get(), ctx->sample_start);
- int pulses;
+ int i;
if (delta) {
- pulses = atomic_read(&ctx->pulses);
- atomic_sub(pulses, &ctx->pulses);
- ctx->rpm = (unsigned int)(pulses * 1000 * 60) /
- (ctx->pulses_per_revolution * delta);
+ for (i = 0; i < ctx->tach_count; i++) {
+ struct pwm_fan_tach *tach = &ctx->tachs[i];
+ int pulses;
+
+ pulses = atomic_read(&tach->pulses);
+ atomic_sub(pulses, &tach->pulses);
+ tach->rpm = (unsigned int)(pulses * 1000 * 60) /
+ (tach->pulses_per_revolution * delta);
+ }
ctx->sample_start = ktime_get();
}
@@ -94,18 +96,17 @@ static int __set_pwm(struct pwm_fan_ctx *ctx, unsigned long pwm)
{
unsigned long period;
int ret = 0;
- struct pwm_state state = { };
+ struct pwm_state *state = &ctx->pwm_state;
mutex_lock(&ctx->lock);
if (ctx->pwm_value == pwm)
goto exit_set_pwm_err;
- pwm_init_state(ctx->pwm, &state);
- period = ctx->pwm->args.period;
- state.duty_cycle = DIV_ROUND_UP(pwm * (period - 1), MAX_PWM);
- state.enabled = pwm ? true : false;
+ period = state->period;
+ state->duty_cycle = DIV_ROUND_UP(pwm * (period - 1), MAX_PWM);
+ state->enabled = pwm ? true : false;
- ret = pwm_apply_state(ctx->pwm, &state);
+ ret = pwm_apply_state(ctx->pwm, state);
if (!ret)
ctx->pwm_value = pwm;
exit_set_pwm_err:
@@ -152,7 +153,7 @@ static int pwm_fan_read(struct device *dev, enum hwmon_sensor_types type,
return 0;
case hwmon_fan:
- *val = ctx->rpm;
+ *val = ctx->tachs[channel].rpm;
return 0;
default:
@@ -287,7 +288,9 @@ static void pwm_fan_regulator_disable(void *data)
static void pwm_fan_pwm_disable(void *__ctx)
{
struct pwm_fan_ctx *ctx = __ctx;
- pwm_disable(ctx->pwm);
+
+ ctx->pwm_state.enabled = false;
+ pwm_apply_state(ctx->pwm, &ctx->pwm_state);
del_timer_sync(&ctx->rpm_timer);
}
@@ -298,9 +301,10 @@ static int pwm_fan_probe(struct platform_device *pdev)
struct pwm_fan_ctx *ctx;
struct device *hwmon;
int ret;
- struct pwm_state state = { };
- int tach_count;
const struct hwmon_channel_info **channels;
+ u32 *fan_channel_config;
+ int channel_count = 1; /* We always have a PWM channel. */
+ int i;
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
@@ -334,22 +338,20 @@ static int pwm_fan_probe(struct platform_device *pdev)
ctx->pwm_value = MAX_PWM;
- pwm_init_state(ctx->pwm, &state);
+ pwm_init_state(ctx->pwm, &ctx->pwm_state);
+
/*
* __set_pwm assumes that MAX_PWM * (period - 1) fits into an unsigned
* long. Check this here to prevent the fan running at a too low
* frequency.
*/
- if (state.period > ULONG_MAX / MAX_PWM + 1) {
+ if (ctx->pwm_state.period > ULONG_MAX / MAX_PWM + 1) {
dev_err(dev, "Configured period too big\n");
return -EINVAL;
}
/* Set duty cycle to maximum allowed and enable PWM output */
- state.duty_cycle = ctx->pwm->args.period - 1;
- state.enabled = true;
-
- ret = pwm_apply_state(ctx->pwm, &state);
+ ret = __set_pwm(ctx, MAX_PWM);
if (ret) {
dev_err(dev, "Failed to configure PWM: %d\n", ret);
return ret;
@@ -359,27 +361,46 @@ static int pwm_fan_probe(struct platform_device *pdev)
if (ret)
return ret;
- tach_count = platform_irq_count(pdev);
- if (tach_count < 0)
- return dev_err_probe(dev, tach_count,
+ ctx->tach_count = platform_irq_count(pdev);
+ if (ctx->tach_count < 0)
+ return dev_err_probe(dev, ctx->tach_count,
"Could not get number of fan tachometer inputs\n");
+ dev_dbg(dev, "%d fan tachometer inputs\n", ctx->tach_count);
+
+ if (ctx->tach_count) {
+ channel_count++; /* We also have a FAN channel. */
+
+ ctx->tachs = devm_kcalloc(dev, ctx->tach_count,
+ sizeof(struct pwm_fan_tach),
+ GFP_KERNEL);
+ if (!ctx->tachs)
+ return -ENOMEM;
+
+ ctx->fan_channel.type = hwmon_fan;
+ fan_channel_config = devm_kcalloc(dev, ctx->tach_count + 1,
+ sizeof(u32), GFP_KERNEL);
+ if (!fan_channel_config)
+ return -ENOMEM;
+ ctx->fan_channel.config = fan_channel_config;
+ }
- channels = devm_kcalloc(dev, tach_count + 2,
+ channels = devm_kcalloc(dev, channel_count + 1,
sizeof(struct hwmon_channel_info *), GFP_KERNEL);
if (!channels)
return -ENOMEM;
channels[0] = &pwm_fan_channel_pwm;
- if (tach_count > 0) {
+ for (i = 0; i < ctx->tach_count; i++) {
+ struct pwm_fan_tach *tach = &ctx->tachs[i];
u32 ppr = 2;
- ctx->irq = platform_get_irq(pdev, 0);
- if (ctx->irq == -EPROBE_DEFER)
- return ctx->irq;
- if (ctx->irq > 0) {
- ret = devm_request_irq(dev, ctx->irq, pulse_handler, 0,
- pdev->name, ctx);
+ tach->irq = platform_get_irq(pdev, i);
+ if (tach->irq == -EPROBE_DEFER)
+ return tach->irq;
+ if (tach->irq > 0) {
+ ret = devm_request_irq(dev, tach->irq, pulse_handler, 0,
+ pdev->name, tach);
if (ret) {
dev_err(dev,
"Failed to request interrupt: %d\n",
@@ -388,22 +409,27 @@ static int pwm_fan_probe(struct platform_device *pdev)
}
}
- of_property_read_u32(dev->of_node,
- "pulses-per-revolution",
- &ppr);
- ctx->pulses_per_revolution = ppr;
- if (!ctx->pulses_per_revolution) {
+ of_property_read_u32_index(dev->of_node,
+ "pulses-per-revolution",
+ i,
+ &ppr);
+ tach->pulses_per_revolution = ppr;
+ if (!tach->pulses_per_revolution) {
dev_err(dev, "pulses-per-revolution can't be zero.\n");
return -EINVAL;
}
- dev_dbg(dev, "tach: irq=%d, pulses_per_revolution=%d\n",
- ctx->irq, ctx->pulses_per_revolution);
+ fan_channel_config[i] = HWMON_F_INPUT;
+ dev_dbg(dev, "tach%d: irq=%d, pulses_per_revolution=%d\n",
+ i, tach->irq, tach->pulses_per_revolution);
+ }
+
+ if (ctx->tach_count > 0) {
ctx->sample_start = ktime_get();
mod_timer(&ctx->rpm_timer, jiffies + HZ);
- channels[1] = &pwm_fan_channel_fan;
+ channels[1] = &ctx->fan_channel;
}
ctx->info.ops = &pwm_fan_hwmon_ops;
@@ -432,7 +458,6 @@ static int pwm_fan_probe(struct platform_device *pdev)
return ret;
}
ctx->cdev = cdev;
- thermal_cdev_update(cdev);
}
return 0;
@@ -441,17 +466,17 @@ static int pwm_fan_probe(struct platform_device *pdev)
static int pwm_fan_disable(struct device *dev)
{
struct pwm_fan_ctx *ctx = dev_get_drvdata(dev);
- struct pwm_args args;
int ret;
- pwm_get_args(ctx->pwm, &args);
-
if (ctx->pwm_value) {
- ret = pwm_config(ctx->pwm, 0, args.period);
+ /* keep ctx->pwm_state unmodified for pwm_fan_resume() */
+ struct pwm_state state = ctx->pwm_state;
+
+ state.duty_cycle = 0;
+ state.enabled = false;
+ ret = pwm_apply_state(ctx->pwm, &state);
if (ret < 0)
return ret;
-
- pwm_disable(ctx->pwm);
}
if (ctx->reg_en) {
@@ -479,8 +504,6 @@ static int pwm_fan_suspend(struct device *dev)
static int pwm_fan_resume(struct device *dev)
{
struct pwm_fan_ctx *ctx = dev_get_drvdata(dev);
- struct pwm_args pargs;
- unsigned long duty;
int ret;
if (ctx->reg_en) {
@@ -494,12 +517,7 @@ static int pwm_fan_resume(struct device *dev)
if (ctx->pwm_value == 0)
return 0;
- pwm_get_args(ctx->pwm, &pargs);
- duty = DIV_ROUND_UP_ULL(ctx->pwm_value * (pargs.period - 1), MAX_PWM);
- ret = pwm_config(ctx->pwm, duty, pargs.period);
- if (ret)
- return ret;
- return pwm_enable(ctx->pwm);
+ return pwm_apply_state(ctx->pwm, &ctx->pwm_state);
}
#endif
diff --git a/drivers/hwmon/smsc47m1.c b/drivers/hwmon/smsc47m1.c
index b637836b58a1..37531b5c8254 100644
--- a/drivers/hwmon/smsc47m1.c
+++ b/drivers/hwmon/smsc47m1.c
@@ -682,7 +682,7 @@ static int __init smsc47m1_handle_resources(unsigned short address,
/* Request the resources */
if (!devm_request_region(dev, start, len, DRVNAME)) {
dev_err(dev,
- "Region 0x%hx-0x%hx already in use!\n",
+ "Region 0x%x-0x%x already in use!\n",
start, start + len);
return -EBUSY;
}
diff --git a/drivers/hwmon/tps23861.c b/drivers/hwmon/tps23861.c
new file mode 100644
index 000000000000..c2484f15298b
--- /dev/null
+++ b/drivers/hwmon/tps23861.c
@@ -0,0 +1,601 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020 Sartura Ltd.
+ *
+ * Driver for the TI TPS23861 PoE PSE.
+ *
+ * Author: Robert Marko <robert.marko@sartura.hr>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/hwmon.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+
+#define TEMPERATURE 0x2c
+#define INPUT_VOLTAGE_LSB 0x2e
+#define INPUT_VOLTAGE_MSB 0x2f
+#define PORT_1_CURRENT_LSB 0x30
+#define PORT_1_CURRENT_MSB 0x31
+#define PORT_1_VOLTAGE_LSB 0x32
+#define PORT_1_VOLTAGE_MSB 0x33
+#define PORT_2_CURRENT_LSB 0x34
+#define PORT_2_CURRENT_MSB 0x35
+#define PORT_2_VOLTAGE_LSB 0x36
+#define PORT_2_VOLTAGE_MSB 0x37
+#define PORT_3_CURRENT_LSB 0x38
+#define PORT_3_CURRENT_MSB 0x39
+#define PORT_3_VOLTAGE_LSB 0x3a
+#define PORT_3_VOLTAGE_MSB 0x3b
+#define PORT_4_CURRENT_LSB 0x3c
+#define PORT_4_CURRENT_MSB 0x3d
+#define PORT_4_VOLTAGE_LSB 0x3e
+#define PORT_4_VOLTAGE_MSB 0x3f
+#define PORT_N_CURRENT_LSB_OFFSET 0x04
+#define PORT_N_VOLTAGE_LSB_OFFSET 0x04
+#define VOLTAGE_CURRENT_MASK GENMASK(13, 0)
+#define PORT_1_RESISTANCE_LSB 0x60
+#define PORT_1_RESISTANCE_MSB 0x61
+#define PORT_2_RESISTANCE_LSB 0x62
+#define PORT_2_RESISTANCE_MSB 0x63
+#define PORT_3_RESISTANCE_LSB 0x64
+#define PORT_3_RESISTANCE_MSB 0x65
+#define PORT_4_RESISTANCE_LSB 0x66
+#define PORT_4_RESISTANCE_MSB 0x67
+#define PORT_N_RESISTANCE_LSB_OFFSET 0x02
+#define PORT_RESISTANCE_MASK GENMASK(13, 0)
+#define PORT_RESISTANCE_RSN_MASK GENMASK(15, 14)
+#define PORT_RESISTANCE_RSN_OTHER 0
+#define PORT_RESISTANCE_RSN_LOW 1
+#define PORT_RESISTANCE_RSN_OPEN 2
+#define PORT_RESISTANCE_RSN_SHORT 3
+#define PORT_1_STATUS 0x0c
+#define PORT_2_STATUS 0x0d
+#define PORT_3_STATUS 0x0e
+#define PORT_4_STATUS 0x0f
+#define PORT_STATUS_CLASS_MASK GENMASK(7, 4)
+#define PORT_STATUS_DETECT_MASK GENMASK(3, 0)
+#define PORT_CLASS_UNKNOWN 0
+#define PORT_CLASS_1 1
+#define PORT_CLASS_2 2
+#define PORT_CLASS_3 3
+#define PORT_CLASS_4 4
+#define PORT_CLASS_RESERVED 5
+#define PORT_CLASS_0 6
+#define PORT_CLASS_OVERCURRENT 7
+#define PORT_CLASS_MISMATCH 8
+#define PORT_DETECT_UNKNOWN 0
+#define PORT_DETECT_SHORT 1
+#define PORT_DETECT_RESERVED 2
+#define PORT_DETECT_RESISTANCE_LOW 3
+#define PORT_DETECT_RESISTANCE_OK 4
+#define PORT_DETECT_RESISTANCE_HIGH 5
+#define PORT_DETECT_OPEN_CIRCUIT 6
+#define PORT_DETECT_RESERVED_2 7
+#define PORT_DETECT_MOSFET_FAULT 8
+#define PORT_DETECT_LEGACY 9
+/* Measurment beyond clamp voltage */
+#define PORT_DETECT_CAPACITANCE_INVALID_BEYOND 10
+/* Insufficient voltage delta */
+#define PORT_DETECT_CAPACITANCE_INVALID_DELTA 11
+#define PORT_DETECT_CAPACITANCE_OUT_OF_RANGE 12
+#define POE_PLUS 0x40
+#define OPERATING_MODE 0x12
+#define OPERATING_MODE_OFF 0
+#define OPERATING_MODE_MANUAL 1
+#define OPERATING_MODE_SEMI 2
+#define OPERATING_MODE_AUTO 3
+#define OPERATING_MODE_PORT_1_MASK GENMASK(1, 0)
+#define OPERATING_MODE_PORT_2_MASK GENMASK(3, 2)
+#define OPERATING_MODE_PORT_3_MASK GENMASK(5, 4)
+#define OPERATING_MODE_PORT_4_MASK GENMASK(7, 6)
+
+#define DETECT_CLASS_RESTART 0x18
+#define POWER_ENABLE 0x19
+#define TPS23861_NUM_PORTS 4
+
+#define TEMPERATURE_LSB 652 /* 0.652 degrees Celsius */
+#define VOLTAGE_LSB 3662 /* 3.662 mV */
+#define SHUNT_RESISTOR_DEFAULT 255000 /* 255 mOhm */
+#define CURRENT_LSB_255 62260 /* 62.260 uA */
+#define CURRENT_LSB_250 61039 /* 61.039 uA */
+#define RESISTANCE_LSB 110966 /* 11.0966 Ohm*/
+#define RESISTANCE_LSB_LOW 157216 /* 15.7216 Ohm*/
+
+struct tps23861_data {
+ struct regmap *regmap;
+ u32 shunt_resistor;
+ struct i2c_client *client;
+ struct dentry *debugfs_dir;
+};
+
+static struct regmap_config tps23861_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static int tps23861_read_temp(struct tps23861_data *data, long *val)
+{
+ unsigned int regval;
+ int err;
+
+ err = regmap_read(data->regmap, TEMPERATURE, &regval);
+ if (err < 0)
+ return err;
+
+ *val = (regval * TEMPERATURE_LSB) - 20000;
+
+ return 0;
+}
+
+static int tps23861_read_voltage(struct tps23861_data *data, int channel,
+ long *val)
+{
+ unsigned int regval;
+ int err;
+
+ if (channel < TPS23861_NUM_PORTS) {
+ err = regmap_bulk_read(data->regmap,
+ PORT_1_VOLTAGE_LSB + channel * PORT_N_VOLTAGE_LSB_OFFSET,
+ &regval, 2);
+ } else {
+ err = regmap_bulk_read(data->regmap,
+ INPUT_VOLTAGE_LSB,
+ &regval, 2);
+ }
+ if (err < 0)
+ return err;
+
+ *val = (FIELD_GET(VOLTAGE_CURRENT_MASK, regval) * VOLTAGE_LSB) / 1000;
+
+ return 0;
+}
+
+static int tps23861_read_current(struct tps23861_data *data, int channel,
+ long *val)
+{
+ unsigned int current_lsb;
+ unsigned int regval;
+ int err;
+
+ if (data->shunt_resistor == SHUNT_RESISTOR_DEFAULT)
+ current_lsb = CURRENT_LSB_255;
+ else
+ current_lsb = CURRENT_LSB_250;
+
+ err = regmap_bulk_read(data->regmap,
+ PORT_1_CURRENT_LSB + channel * PORT_N_CURRENT_LSB_OFFSET,
+ &regval, 2);
+ if (err < 0)
+ return err;
+
+ *val = (FIELD_GET(VOLTAGE_CURRENT_MASK, regval) * current_lsb) / 1000000;
+
+ return 0;
+}
+
+static int tps23861_port_disable(struct tps23861_data *data, int channel)
+{
+ unsigned int regval = 0;
+ int err;
+
+ regval |= BIT(channel + 4);
+ err = regmap_write(data->regmap, POWER_ENABLE, regval);
+
+ return err;
+}
+
+static int tps23861_port_enable(struct tps23861_data *data, int channel)
+{
+ unsigned int regval = 0;
+ int err;
+
+ regval |= BIT(channel);
+ regval |= BIT(channel + 4);
+ err = regmap_write(data->regmap, DETECT_CLASS_RESTART, regval);
+
+ return err;
+}
+
+static umode_t tps23861_is_visible(const void *data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_label:
+ return 0444;
+ default:
+ return 0;
+ }
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_input:
+ case hwmon_in_label:
+ return 0444;
+ case hwmon_in_enable:
+ return 0200;
+ default:
+ return 0;
+ }
+ case hwmon_curr:
+ switch (attr) {
+ case hwmon_curr_input:
+ case hwmon_curr_label:
+ return 0444;
+ default:
+ return 0;
+ }
+ default:
+ return 0;
+ }
+}
+
+static int tps23861_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ struct tps23861_data *data = dev_get_drvdata(dev);
+ int err;
+
+ switch (type) {
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_enable:
+ if (val == 0)
+ err = tps23861_port_disable(data, channel);
+ else if (val == 1)
+ err = tps23861_port_enable(data, channel);
+ else
+ err = -EINVAL;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
+static int tps23861_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct tps23861_data *data = dev_get_drvdata(dev);
+ int err;
+
+ switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_input:
+ err = tps23861_read_temp(data, val);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_input:
+ err = tps23861_read_voltage(data, channel, val);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+ case hwmon_curr:
+ switch (attr) {
+ case hwmon_curr_input:
+ err = tps23861_read_current(data, channel, val);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
+static const char * const tps23861_port_label[] = {
+ "Port1",
+ "Port2",
+ "Port3",
+ "Port4",
+ "Input",
+};
+
+static int tps23861_read_string(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel, const char **str)
+{
+ switch (type) {
+ case hwmon_in:
+ case hwmon_curr:
+ *str = tps23861_port_label[channel];
+ break;
+ case hwmon_temp:
+ *str = "Die";
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static const struct hwmon_channel_info *tps23861_info[] = {
+ HWMON_CHANNEL_INFO(chip,
+ HWMON_C_REGISTER_TZ),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_LABEL),
+ HWMON_CHANNEL_INFO(in,
+ HWMON_I_INPUT | HWMON_I_ENABLE | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_ENABLE | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_ENABLE | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_ENABLE | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL),
+ HWMON_CHANNEL_INFO(curr,
+ HWMON_C_INPUT | HWMON_C_LABEL,
+ HWMON_C_INPUT | HWMON_C_LABEL,
+ HWMON_C_INPUT | HWMON_C_LABEL,
+ HWMON_C_INPUT | HWMON_C_LABEL),
+ NULL
+};
+
+static const struct hwmon_ops tps23861_hwmon_ops = {
+ .is_visible = tps23861_is_visible,
+ .write = tps23861_write,
+ .read = tps23861_read,
+ .read_string = tps23861_read_string,
+};
+
+static const struct hwmon_chip_info tps23861_chip_info = {
+ .ops = &tps23861_hwmon_ops,
+ .info = tps23861_info,
+};
+
+static char *tps23861_port_operating_mode(struct tps23861_data *data, int port)
+{
+ unsigned int regval;
+ int mode;
+
+ regmap_read(data->regmap, OPERATING_MODE, &regval);
+
+ switch (port) {
+ case 1:
+ mode = FIELD_GET(OPERATING_MODE_PORT_1_MASK, regval);
+ break;
+ case 2:
+ mode = FIELD_GET(OPERATING_MODE_PORT_2_MASK, regval);
+ break;
+ case 3:
+ mode = FIELD_GET(OPERATING_MODE_PORT_3_MASK, regval);
+ break;
+ case 4:
+ mode = FIELD_GET(OPERATING_MODE_PORT_4_MASK, regval);
+ break;
+ default:
+ mode = -EINVAL;
+ }
+
+ switch (mode) {
+ case OPERATING_MODE_OFF:
+ return "Off";
+ case OPERATING_MODE_MANUAL:
+ return "Manual";
+ case OPERATING_MODE_SEMI:
+ return "Semi-Auto";
+ case OPERATING_MODE_AUTO:
+ return "Auto";
+ default:
+ return "Invalid";
+ }
+}
+
+static char *tps23861_port_detect_status(struct tps23861_data *data, int port)
+{
+ unsigned int regval;
+
+ regmap_read(data->regmap,
+ PORT_1_STATUS + (port - 1),
+ &regval);
+
+ switch (FIELD_GET(PORT_STATUS_DETECT_MASK, regval)) {
+ case PORT_DETECT_UNKNOWN:
+ return "Unknown device";
+ case PORT_DETECT_SHORT:
+ return "Short circuit";
+ case PORT_DETECT_RESISTANCE_LOW:
+ return "Too low resistance";
+ case PORT_DETECT_RESISTANCE_OK:
+ return "Valid resistance";
+ case PORT_DETECT_RESISTANCE_HIGH:
+ return "Too high resistance";
+ case PORT_DETECT_OPEN_CIRCUIT:
+ return "Open circuit";
+ case PORT_DETECT_MOSFET_FAULT:
+ return "MOSFET fault";
+ case PORT_DETECT_LEGACY:
+ return "Legacy device";
+ case PORT_DETECT_CAPACITANCE_INVALID_BEYOND:
+ return "Invalid capacitance, beyond clamp voltage";
+ case PORT_DETECT_CAPACITANCE_INVALID_DELTA:
+ return "Invalid capacitance, insufficient voltage delta";
+ case PORT_DETECT_CAPACITANCE_OUT_OF_RANGE:
+ return "Valid capacitance, outside of legacy range";
+ case PORT_DETECT_RESERVED:
+ case PORT_DETECT_RESERVED_2:
+ default:
+ return "Invalid";
+ }
+}
+
+static char *tps23861_port_class_status(struct tps23861_data *data, int port)
+{
+ unsigned int regval;
+
+ regmap_read(data->regmap,
+ PORT_1_STATUS + (port - 1),
+ &regval);
+
+ switch (FIELD_GET(PORT_STATUS_CLASS_MASK, regval)) {
+ case PORT_CLASS_UNKNOWN:
+ return "Unknown";
+ case PORT_CLASS_RESERVED:
+ case PORT_CLASS_0:
+ return "0";
+ case PORT_CLASS_1:
+ return "1";
+ case PORT_CLASS_2:
+ return "2";
+ case PORT_CLASS_3:
+ return "3";
+ case PORT_CLASS_4:
+ return "4";
+ case PORT_CLASS_OVERCURRENT:
+ return "Overcurrent";
+ case PORT_CLASS_MISMATCH:
+ return "Mismatch";
+ default:
+ return "Invalid";
+ }
+}
+
+static char *tps23861_port_poe_plus_status(struct tps23861_data *data, int port)
+{
+ unsigned int regval;
+
+ regmap_read(data->regmap, POE_PLUS, &regval);
+
+ if (BIT(port + 3) & regval)
+ return "Yes";
+ else
+ return "No";
+}
+
+static int tps23861_port_resistance(struct tps23861_data *data, int port)
+{
+ u16 regval;
+
+ regmap_bulk_read(data->regmap,
+ PORT_1_RESISTANCE_LSB + PORT_N_RESISTANCE_LSB_OFFSET * (port - 1),
+ &regval,
+ 2);
+
+ switch (FIELD_GET(PORT_RESISTANCE_RSN_MASK, regval)) {
+ case PORT_RESISTANCE_RSN_OTHER:
+ return (FIELD_GET(PORT_RESISTANCE_MASK, regval) * RESISTANCE_LSB) / 10000;
+ case PORT_RESISTANCE_RSN_LOW:
+ return (FIELD_GET(PORT_RESISTANCE_MASK, regval) * RESISTANCE_LSB_LOW) / 10000;
+ case PORT_RESISTANCE_RSN_SHORT:
+ case PORT_RESISTANCE_RSN_OPEN:
+ default:
+ return 0;
+ }
+}
+
+static int tps23861_port_status_show(struct seq_file *s, void *data)
+{
+ struct tps23861_data *priv = s->private;
+ int i;
+
+ for (i = 1; i < TPS23861_NUM_PORTS + 1; i++) {
+ seq_printf(s, "Port: \t\t%d\n", i);
+ seq_printf(s, "Operating mode: %s\n", tps23861_port_operating_mode(priv, i));
+ seq_printf(s, "Detected: \t%s\n", tps23861_port_detect_status(priv, i));
+ seq_printf(s, "Class: \t\t%s\n", tps23861_port_class_status(priv, i));
+ seq_printf(s, "PoE Plus: \t%s\n", tps23861_port_poe_plus_status(priv, i));
+ seq_printf(s, "Resistance: \t%d\n", tps23861_port_resistance(priv, i));
+ seq_putc(s, '\n');
+ }
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(tps23861_port_status);
+
+static void tps23861_init_debugfs(struct tps23861_data *data)
+{
+ data->debugfs_dir = debugfs_create_dir(data->client->name, NULL);
+
+ debugfs_create_file("port_status",
+ 0400,
+ data->debugfs_dir,
+ data,
+ &tps23861_port_status_fops);
+}
+
+static int tps23861_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct tps23861_data *data;
+ struct device *hwmon_dev;
+ u32 shunt_resistor;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->client = client;
+ i2c_set_clientdata(client, data);
+
+ data->regmap = devm_regmap_init_i2c(client, &tps23861_regmap_config);
+ if (IS_ERR(data->regmap)) {
+ dev_err(dev, "failed to allocate register map\n");
+ return PTR_ERR(data->regmap);
+ }
+
+ if (!of_property_read_u32(dev->of_node, "shunt-resistor-micro-ohms", &shunt_resistor))
+ data->shunt_resistor = shunt_resistor;
+ else
+ data->shunt_resistor = SHUNT_RESISTOR_DEFAULT;
+
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
+ data, &tps23861_chip_info,
+ NULL);
+ if (IS_ERR(hwmon_dev))
+ return PTR_ERR(hwmon_dev);
+
+ tps23861_init_debugfs(data);
+
+ return 0;
+}
+
+static int tps23861_remove(struct i2c_client *client)
+{
+ struct tps23861_data *data = i2c_get_clientdata(client);
+
+ debugfs_remove_recursive(data->debugfs_dir);
+
+ return 0;
+}
+
+static const struct of_device_id __maybe_unused tps23861_of_match[] = {
+ { .compatible = "ti,tps23861", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tps23861_of_match);
+
+static struct i2c_driver tps23861_driver = {
+ .probe_new = tps23861_probe,
+ .remove = tps23861_remove,
+ .driver = {
+ .name = "tps23861",
+ .of_match_table = of_match_ptr(tps23861_of_match),
+ },
+};
+module_i2c_driver(tps23861_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Robert Marko <robert.marko@sartura.hr>");
+MODULE_DESCRIPTION("TI TPS23861 PoE PSE");
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
index 3964ceab2817..8618aaf32350 100644
--- a/drivers/hwmon/w83627ehf.c
+++ b/drivers/hwmon/w83627ehf.c
@@ -1110,7 +1110,7 @@ clear_caseopen(struct device *dev, struct w83627ehf_data *data, int channel,
static umode_t w83627ehf_attrs_visible(struct kobject *kobj,
struct attribute *a, int n)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct w83627ehf_data *data = dev_get_drvdata(dev);
struct device_attribute *devattr;
struct sensor_device_attribute *sda;
diff --git a/drivers/hwspinlock/omap_hwspinlock.c b/drivers/hwspinlock/omap_hwspinlock.c
index 3b05560456ea..33eeff94fc2a 100644
--- a/drivers/hwspinlock/omap_hwspinlock.c
+++ b/drivers/hwspinlock/omap_hwspinlock.c
@@ -2,11 +2,12 @@
/*
* OMAP hardware spinlock driver
*
- * Copyright (C) 2010-2015 Texas Instruments Incorporated - http://www.ti.com
+ * Copyright (C) 2010-2021 Texas Instruments Incorporated - https://www.ti.com
*
* Contact: Simon Que <sque@ti.com>
* Hari Kanigeri <h-kanigeri2@ti.com>
* Ohad Ben-Cohen <ohad@wizery.com>
+ * Suman Anna <s-anna@ti.com>
*/
#include <linux/kernel.h>
@@ -164,6 +165,7 @@ static int omap_hwspinlock_remove(struct platform_device *pdev)
static const struct of_device_id omap_hwspinlock_of_match[] = {
{ .compatible = "ti,omap4-hwspinlock", },
+ { .compatible = "ti,am64-hwspinlock", },
{ .compatible = "ti,am654-hwspinlock", },
{ /* end */ },
};
diff --git a/drivers/hwtracing/coresight/coresight-catu.c b/drivers/hwtracing/coresight/coresight-catu.c
index a61313f320bd..e0740c6dbd54 100644
--- a/drivers/hwtracing/coresight/coresight-catu.c
+++ b/drivers/hwtracing/coresight/coresight-catu.c
@@ -401,8 +401,9 @@ static const struct attribute_group *catu_groups[] = {
static inline int catu_wait_for_ready(struct catu_drvdata *drvdata)
{
- return coresight_timeout(drvdata->base,
- CATU_STATUS, CATU_STATUS_READY, 1);
+ struct csdev_access *csa = &drvdata->csdev->access;
+
+ return coresight_timeout(csa, CATU_STATUS, CATU_STATUS_READY, 1);
}
static int catu_enable_hw(struct catu_drvdata *drvdata, void *data)
@@ -411,6 +412,7 @@ static int catu_enable_hw(struct catu_drvdata *drvdata, void *data)
u32 control, mode;
struct etr_buf *etr_buf = data;
struct device *dev = &drvdata->csdev->dev;
+ struct coresight_device *csdev = drvdata->csdev;
if (catu_wait_for_ready(drvdata))
dev_warn(dev, "Timeout while waiting for READY\n");
@@ -421,7 +423,7 @@ static int catu_enable_hw(struct catu_drvdata *drvdata, void *data)
return -EBUSY;
}
- rc = coresight_claim_device_unlocked(drvdata->base);
+ rc = coresight_claim_device_unlocked(csdev);
if (rc)
return rc;
@@ -465,9 +467,10 @@ static int catu_disable_hw(struct catu_drvdata *drvdata)
{
int rc = 0;
struct device *dev = &drvdata->csdev->dev;
+ struct coresight_device *csdev = drvdata->csdev;
catu_write_control(drvdata, 0);
- coresight_disclaim_device_unlocked(drvdata->base);
+ coresight_disclaim_device_unlocked(csdev);
if (catu_wait_for_ready(drvdata)) {
dev_info(dev, "Timeout while waiting for READY\n");
rc = -EAGAIN;
@@ -551,6 +554,7 @@ static int catu_probe(struct amba_device *adev, const struct amba_id *id)
dev->platform_data = pdata;
drvdata->base = base;
+ catu_desc.access = CSDEV_ACCESS_IOMEM(base);
catu_desc.pdata = pdata;
catu_desc.dev = dev;
catu_desc.groups = catu_groups;
@@ -567,12 +571,11 @@ out:
return ret;
}
-static int catu_remove(struct amba_device *adev)
+static void catu_remove(struct amba_device *adev)
{
struct catu_drvdata *drvdata = dev_get_drvdata(&adev->dev);
coresight_unregister(drvdata->csdev);
- return 0;
}
static struct amba_id catu_ids[] = {
diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c
index 4ba801dffcb7..0062c8935653 100644
--- a/drivers/hwtracing/coresight/coresight-core.c
+++ b/drivers/hwtracing/coresight/coresight-core.c
@@ -145,30 +145,32 @@ static int coresight_find_link_outport(struct coresight_device *csdev,
return -ENODEV;
}
-static inline u32 coresight_read_claim_tags(void __iomem *base)
+static inline u32 coresight_read_claim_tags(struct coresight_device *csdev)
{
- return readl_relaxed(base + CORESIGHT_CLAIMCLR);
+ return csdev_access_relaxed_read32(&csdev->access, CORESIGHT_CLAIMCLR);
}
-static inline bool coresight_is_claimed_self_hosted(void __iomem *base)
+static inline bool coresight_is_claimed_self_hosted(struct coresight_device *csdev)
{
- return coresight_read_claim_tags(base) == CORESIGHT_CLAIM_SELF_HOSTED;
+ return coresight_read_claim_tags(csdev) == CORESIGHT_CLAIM_SELF_HOSTED;
}
-static inline bool coresight_is_claimed_any(void __iomem *base)
+static inline bool coresight_is_claimed_any(struct coresight_device *csdev)
{
- return coresight_read_claim_tags(base) != 0;
+ return coresight_read_claim_tags(csdev) != 0;
}
-static inline void coresight_set_claim_tags(void __iomem *base)
+static inline void coresight_set_claim_tags(struct coresight_device *csdev)
{
- writel_relaxed(CORESIGHT_CLAIM_SELF_HOSTED, base + CORESIGHT_CLAIMSET);
+ csdev_access_relaxed_write32(&csdev->access, CORESIGHT_CLAIM_SELF_HOSTED,
+ CORESIGHT_CLAIMSET);
isb();
}
-static inline void coresight_clear_claim_tags(void __iomem *base)
+static inline void coresight_clear_claim_tags(struct coresight_device *csdev)
{
- writel_relaxed(CORESIGHT_CLAIM_SELF_HOSTED, base + CORESIGHT_CLAIMCLR);
+ csdev_access_relaxed_write32(&csdev->access, CORESIGHT_CLAIM_SELF_HOSTED,
+ CORESIGHT_CLAIMCLR);
isb();
}
@@ -182,27 +184,33 @@ static inline void coresight_clear_claim_tags(void __iomem *base)
* Called with CS_UNLOCKed for the component.
* Returns : 0 on success
*/
-int coresight_claim_device_unlocked(void __iomem *base)
+int coresight_claim_device_unlocked(struct coresight_device *csdev)
{
- if (coresight_is_claimed_any(base))
+ if (WARN_ON(!csdev))
+ return -EINVAL;
+
+ if (coresight_is_claimed_any(csdev))
return -EBUSY;
- coresight_set_claim_tags(base);
- if (coresight_is_claimed_self_hosted(base))
+ coresight_set_claim_tags(csdev);
+ if (coresight_is_claimed_self_hosted(csdev))
return 0;
/* There was a race setting the tags, clean up and fail */
- coresight_clear_claim_tags(base);
+ coresight_clear_claim_tags(csdev);
return -EBUSY;
}
EXPORT_SYMBOL_GPL(coresight_claim_device_unlocked);
-int coresight_claim_device(void __iomem *base)
+int coresight_claim_device(struct coresight_device *csdev)
{
int rc;
- CS_UNLOCK(base);
- rc = coresight_claim_device_unlocked(base);
- CS_LOCK(base);
+ if (WARN_ON(!csdev))
+ return -EINVAL;
+
+ CS_UNLOCK(csdev->access.base);
+ rc = coresight_claim_device_unlocked(csdev);
+ CS_LOCK(csdev->access.base);
return rc;
}
@@ -212,11 +220,14 @@ EXPORT_SYMBOL_GPL(coresight_claim_device);
* coresight_disclaim_device_unlocked : Clear the claim tags for the device.
* Called with CS_UNLOCKed for the component.
*/
-void coresight_disclaim_device_unlocked(void __iomem *base)
+void coresight_disclaim_device_unlocked(struct coresight_device *csdev)
{
- if (coresight_is_claimed_self_hosted(base))
- coresight_clear_claim_tags(base);
+ if (WARN_ON(!csdev))
+ return;
+
+ if (coresight_is_claimed_self_hosted(csdev))
+ coresight_clear_claim_tags(csdev);
else
/*
* The external agent may have not honoured our claim
@@ -227,11 +238,14 @@ void coresight_disclaim_device_unlocked(void __iomem *base)
}
EXPORT_SYMBOL_GPL(coresight_disclaim_device_unlocked);
-void coresight_disclaim_device(void __iomem *base)
+void coresight_disclaim_device(struct coresight_device *csdev)
{
- CS_UNLOCK(base);
- coresight_disclaim_device_unlocked(base);
- CS_LOCK(base);
+ if (WARN_ON(!csdev))
+ return;
+
+ CS_UNLOCK(csdev->access.base);
+ coresight_disclaim_device_unlocked(csdev);
+ CS_LOCK(csdev->access.base);
}
EXPORT_SYMBOL_GPL(coresight_disclaim_device);
@@ -1418,23 +1432,24 @@ static void coresight_remove_conns(struct coresight_device *csdev)
}
/**
- * coresight_timeout - loop until a bit has changed to a specific state.
- * @addr: base address of the area of interest.
- * @offset: address of a register, starting from @addr.
+ * coresight_timeout - loop until a bit has changed to a specific register
+ * state.
+ * @csa: coresight device access for the device
+ * @offset: Offset of the register from the base of the device.
* @position: the position of the bit of interest.
* @value: the value the bit should have.
*
* Return: 0 as soon as the bit has taken the desired state or -EAGAIN if
* TIMEOUT_US has elapsed, which ever happens first.
*/
-
-int coresight_timeout(void __iomem *addr, u32 offset, int position, int value)
+int coresight_timeout(struct csdev_access *csa, u32 offset,
+ int position, int value)
{
int i;
u32 val;
for (i = TIMEOUT_US; i > 0; i--) {
- val = __raw_readl(addr + offset);
+ val = csdev_access_read32(csa, offset);
/* waiting on the bit to go from 0 to 1 */
if (value) {
if (val & BIT(position))
@@ -1458,6 +1473,48 @@ int coresight_timeout(void __iomem *addr, u32 offset, int position, int value)
}
EXPORT_SYMBOL_GPL(coresight_timeout);
+u32 coresight_relaxed_read32(struct coresight_device *csdev, u32 offset)
+{
+ return csdev_access_relaxed_read32(&csdev->access, offset);
+}
+
+u32 coresight_read32(struct coresight_device *csdev, u32 offset)
+{
+ return csdev_access_read32(&csdev->access, offset);
+}
+
+void coresight_relaxed_write32(struct coresight_device *csdev,
+ u32 val, u32 offset)
+{
+ csdev_access_relaxed_write32(&csdev->access, val, offset);
+}
+
+void coresight_write32(struct coresight_device *csdev, u32 val, u32 offset)
+{
+ csdev_access_write32(&csdev->access, val, offset);
+}
+
+u64 coresight_relaxed_read64(struct coresight_device *csdev, u32 offset)
+{
+ return csdev_access_relaxed_read64(&csdev->access, offset);
+}
+
+u64 coresight_read64(struct coresight_device *csdev, u32 offset)
+{
+ return csdev_access_read64(&csdev->access, offset);
+}
+
+void coresight_relaxed_write64(struct coresight_device *csdev,
+ u64 val, u32 offset)
+{
+ csdev_access_relaxed_write64(&csdev->access, val, offset);
+}
+
+void coresight_write64(struct coresight_device *csdev, u64 val, u32 offset)
+{
+ csdev_access_write64(&csdev->access, val, offset);
+}
+
/*
* coresight_release_platform_data: Release references to the devices connected
* to the output port of this device.
@@ -1522,6 +1579,7 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
csdev->type = desc->type;
csdev->subtype = desc->subtype;
csdev->ops = desc->ops;
+ csdev->access = desc->access;
csdev->orphan = false;
csdev->dev.type = &coresight_dev_type[desc->type];
diff --git a/drivers/hwtracing/coresight/coresight-cpu-debug.c b/drivers/hwtracing/coresight/coresight-cpu-debug.c
index e1d232411d8d..2dcf13de751f 100644
--- a/drivers/hwtracing/coresight/coresight-cpu-debug.c
+++ b/drivers/hwtracing/coresight/coresight-cpu-debug.c
@@ -627,7 +627,7 @@ err:
return ret;
}
-static int debug_remove(struct amba_device *adev)
+static void debug_remove(struct amba_device *adev)
{
struct device *dev = &adev->dev;
struct debug_drvdata *drvdata = amba_get_drvdata(adev);
@@ -642,8 +642,6 @@ static int debug_remove(struct amba_device *adev)
if (!--debug_count)
debug_func_exit();
-
- return 0;
}
static const struct amba_cs_uci_id uci_id_debug[] = {
diff --git a/drivers/hwtracing/coresight/coresight-cti-core.c b/drivers/hwtracing/coresight/coresight-cti-core.c
index 61dbc1afd8da..e2a3620cbf48 100644
--- a/drivers/hwtracing/coresight/coresight-cti-core.c
+++ b/drivers/hwtracing/coresight/coresight-cti-core.c
@@ -102,7 +102,7 @@ static int cti_enable_hw(struct cti_drvdata *drvdata)
goto cti_state_unchanged;
/* claim the device */
- rc = coresight_claim_device(drvdata->base);
+ rc = coresight_claim_device(drvdata->csdev);
if (rc)
goto cti_err_not_enabled;
@@ -136,7 +136,7 @@ static void cti_cpuhp_enable_hw(struct cti_drvdata *drvdata)
goto cti_hp_not_enabled;
/* try to claim the device */
- if (coresight_claim_device(drvdata->base))
+ if (coresight_claim_device(drvdata->csdev))
goto cti_hp_not_enabled;
cti_write_all_hw_regs(drvdata);
@@ -154,6 +154,7 @@ static int cti_disable_hw(struct cti_drvdata *drvdata)
{
struct cti_config *config = &drvdata->config;
struct device *dev = &drvdata->csdev->dev;
+ struct coresight_device *csdev = drvdata->csdev;
spin_lock(&drvdata->spinlock);
@@ -171,7 +172,7 @@ static int cti_disable_hw(struct cti_drvdata *drvdata)
writel_relaxed(0, drvdata->base + CTICONTROL);
config->hw_enabled = false;
- coresight_disclaim_device_unlocked(drvdata->base);
+ coresight_disclaim_device_unlocked(csdev);
CS_LOCK(drvdata->base);
spin_unlock(&drvdata->spinlock);
pm_runtime_put(dev);
@@ -655,6 +656,7 @@ static int cti_cpu_pm_notify(struct notifier_block *nb, unsigned long cmd,
void *v)
{
struct cti_drvdata *drvdata;
+ struct coresight_device *csdev;
unsigned int cpu = smp_processor_id();
int notify_res = NOTIFY_OK;
@@ -662,6 +664,7 @@ static int cti_cpu_pm_notify(struct notifier_block *nb, unsigned long cmd,
return NOTIFY_OK;
drvdata = cti_cpu_drvdata[cpu];
+ csdev = drvdata->csdev;
if (WARN_ON_ONCE(drvdata->ctidev.cpu != cpu))
return NOTIFY_BAD;
@@ -673,13 +676,13 @@ static int cti_cpu_pm_notify(struct notifier_block *nb, unsigned long cmd,
/* CTI regs all static - we have a copy & nothing to save */
drvdata->config.hw_powered = false;
if (drvdata->config.hw_enabled)
- coresight_disclaim_device(drvdata->base);
+ coresight_disclaim_device(csdev);
break;
case CPU_PM_ENTER_FAILED:
drvdata->config.hw_powered = true;
if (drvdata->config.hw_enabled) {
- if (coresight_claim_device(drvdata->base))
+ if (coresight_claim_device(csdev))
drvdata->config.hw_enabled = false;
}
break;
@@ -692,7 +695,7 @@ static int cti_cpu_pm_notify(struct notifier_block *nb, unsigned long cmd,
/* check enable reference count to enable HW */
if (atomic_read(&drvdata->config.enable_req_count)) {
/* check we can claim the device as we re-power */
- if (coresight_claim_device(drvdata->base))
+ if (coresight_claim_device(csdev))
goto cti_notify_exit;
drvdata->config.hw_enabled = true;
@@ -736,7 +739,7 @@ static int cti_dying_cpu(unsigned int cpu)
spin_lock(&drvdata->spinlock);
drvdata->config.hw_powered = false;
if (drvdata->config.hw_enabled)
- coresight_disclaim_device(drvdata->base);
+ coresight_disclaim_device(drvdata->csdev);
spin_unlock(&drvdata->spinlock);
return 0;
}
@@ -836,7 +839,7 @@ static void cti_device_release(struct device *dev)
if (drvdata->csdev_release)
drvdata->csdev_release(dev);
}
-static int cti_remove(struct amba_device *adev)
+static void cti_remove(struct amba_device *adev)
{
struct cti_drvdata *drvdata = dev_get_drvdata(&adev->dev);
@@ -845,8 +848,6 @@ static int cti_remove(struct amba_device *adev)
mutex_unlock(&ect_mutex);
coresight_unregister(drvdata->csdev);
-
- return 0;
}
static int cti_probe(struct amba_device *adev, const struct amba_id *id)
@@ -870,6 +871,7 @@ static int cti_probe(struct amba_device *adev, const struct amba_id *id)
return PTR_ERR(base);
drvdata->base = base;
+ cti_desc.access = CSDEV_ACCESS_IOMEM(base);
dev_set_drvdata(dev, drvdata);
diff --git a/drivers/hwtracing/coresight/coresight-cti-platform.c b/drivers/hwtracing/coresight/coresight-cti-platform.c
index 98f830c6ed50..ccef04f27f12 100644
--- a/drivers/hwtracing/coresight/coresight-cti-platform.c
+++ b/drivers/hwtracing/coresight/coresight-cti-platform.c
@@ -343,7 +343,6 @@ static int cti_plat_create_connection(struct device *dev,
{
struct cti_trig_con *tc = NULL;
int cpuid = -1, err = 0;
- struct fwnode_handle *cs_fwnode = NULL;
struct coresight_device *csdev = NULL;
const char *assoc_name = "unknown";
char cpu_name_str[16];
@@ -397,8 +396,9 @@ static int cti_plat_create_connection(struct device *dev,
assoc_name = cpu_name_str;
} else {
/* associated device ? */
- cs_fwnode = fwnode_find_reference(fwnode,
- CTI_DT_CSDEV_ASSOC, 0);
+ struct fwnode_handle *cs_fwnode = fwnode_find_reference(fwnode,
+ CTI_DT_CSDEV_ASSOC,
+ 0);
if (!IS_ERR(cs_fwnode)) {
assoc_name = cti_plat_get_csdev_or_node_name(cs_fwnode,
&csdev);
diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c
index 0cf6f0b947b6..f775cbee12b8 100644
--- a/drivers/hwtracing/coresight/coresight-etb10.c
+++ b/drivers/hwtracing/coresight/coresight-etb10.c
@@ -132,7 +132,7 @@ static void __etb_enable_hw(struct etb_drvdata *drvdata)
static int etb_enable_hw(struct etb_drvdata *drvdata)
{
- int rc = coresight_claim_device(drvdata->base);
+ int rc = coresight_claim_device(drvdata->csdev);
if (rc)
return rc;
@@ -252,6 +252,7 @@ static void __etb_disable_hw(struct etb_drvdata *drvdata)
{
u32 ffcr;
struct device *dev = &drvdata->csdev->dev;
+ struct csdev_access *csa = &drvdata->csdev->access;
CS_UNLOCK(drvdata->base);
@@ -263,7 +264,7 @@ static void __etb_disable_hw(struct etb_drvdata *drvdata)
ffcr |= ETB_FFCR_FON_MAN;
writel_relaxed(ffcr, drvdata->base + ETB_FFCR);
- if (coresight_timeout(drvdata->base, ETB_FFCR, ETB_FFCR_BIT, 0)) {
+ if (coresight_timeout(csa, ETB_FFCR, ETB_FFCR_BIT, 0)) {
dev_err(dev,
"timeout while waiting for completion of Manual Flush\n");
}
@@ -271,7 +272,7 @@ static void __etb_disable_hw(struct etb_drvdata *drvdata)
/* disable trace capture */
writel_relaxed(0x0, drvdata->base + ETB_CTL_REG);
- if (coresight_timeout(drvdata->base, ETB_FFSR, ETB_FFSR_BIT, 1)) {
+ if (coresight_timeout(csa, ETB_FFSR, ETB_FFSR_BIT, 1)) {
dev_err(dev,
"timeout while waiting for Formatter to Stop\n");
}
@@ -344,7 +345,7 @@ static void etb_disable_hw(struct etb_drvdata *drvdata)
{
__etb_disable_hw(drvdata);
etb_dump_hw(drvdata);
- coresight_disclaim_device(drvdata->base);
+ coresight_disclaim_device(drvdata->csdev);
}
static int etb_disable(struct coresight_device *csdev)
@@ -757,6 +758,7 @@ static int etb_probe(struct amba_device *adev, const struct amba_id *id)
return PTR_ERR(base);
drvdata->base = base;
+ desc.access = CSDEV_ACCESS_IOMEM(base);
spin_lock_init(&drvdata->spinlock);
@@ -803,7 +805,7 @@ err_misc_register:
return ret;
}
-static int etb_remove(struct amba_device *adev)
+static void etb_remove(struct amba_device *adev)
{
struct etb_drvdata *drvdata = dev_get_drvdata(&adev->dev);
@@ -814,8 +816,6 @@ static int etb_remove(struct amba_device *adev)
*/
misc_deregister(&drvdata->miscdev);
coresight_unregister(drvdata->csdev);
-
- return 0;
}
#ifdef CONFIG_PM
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
index bdc34ca449f7..0f603b4094f2 100644
--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
@@ -27,17 +27,45 @@ static bool etm_perf_up;
static DEFINE_PER_CPU(struct perf_output_handle, ctx_handle);
static DEFINE_PER_CPU(struct coresight_device *, csdev_src);
-/* ETMv3.5/PTM's ETMCR is 'config' */
+/*
+ * The PMU formats were orignally for ETMv3.5/PTM's ETMCR 'config';
+ * now take them as general formats and apply on all ETMs.
+ */
PMU_FORMAT_ATTR(cycacc, "config:" __stringify(ETM_OPT_CYCACC));
-PMU_FORMAT_ATTR(contextid, "config:" __stringify(ETM_OPT_CTXTID));
+/* contextid1 enables tracing CONTEXTIDR_EL1 for ETMv4 */
+PMU_FORMAT_ATTR(contextid1, "config:" __stringify(ETM_OPT_CTXTID));
+/* contextid2 enables tracing CONTEXTIDR_EL2 for ETMv4 */
+PMU_FORMAT_ATTR(contextid2, "config:" __stringify(ETM_OPT_CTXTID2));
PMU_FORMAT_ATTR(timestamp, "config:" __stringify(ETM_OPT_TS));
PMU_FORMAT_ATTR(retstack, "config:" __stringify(ETM_OPT_RETSTK));
/* Sink ID - same for all ETMs */
PMU_FORMAT_ATTR(sinkid, "config2:0-31");
+/*
+ * contextid always traces the "PID". The PID is in CONTEXTIDR_EL1
+ * when the kernel is running at EL1; when the kernel is at EL2,
+ * the PID is in CONTEXTIDR_EL2.
+ */
+static ssize_t format_attr_contextid_show(struct device *dev,
+ struct device_attribute *attr,
+ char *page)
+{
+ int pid_fmt = ETM_OPT_CTXTID;
+
+#if defined(CONFIG_CORESIGHT_SOURCE_ETM4X)
+ pid_fmt = is_kernel_in_hyp_mode() ? ETM_OPT_CTXTID2 : ETM_OPT_CTXTID;
+#endif
+ return sprintf(page, "config:%d\n", pid_fmt);
+}
+
+struct device_attribute format_attr_contextid =
+ __ATTR(contextid, 0444, format_attr_contextid_show, NULL);
+
static struct attribute *etm_config_formats_attr[] = {
&format_attr_cycacc.attr,
&format_attr_contextid.attr,
+ &format_attr_contextid1.attr,
+ &format_attr_contextid2.attr,
&format_attr_timestamp.attr,
&format_attr_retstack.attr,
&format_attr_sinkid.attr,
diff --git a/drivers/hwtracing/coresight/coresight-etm3x-core.c b/drivers/hwtracing/coresight/coresight-etm3x-core.c
index 5bf5a5a4ce6d..cf64ce73a741 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x-core.c
@@ -358,10 +358,11 @@ static int etm_enable_hw(struct etm_drvdata *drvdata)
int i, rc;
u32 etmcr;
struct etm_config *config = &drvdata->config;
+ struct coresight_device *csdev = drvdata->csdev;
CS_UNLOCK(drvdata->base);
- rc = coresight_claim_device_unlocked(drvdata->base);
+ rc = coresight_claim_device_unlocked(csdev);
if (rc)
goto done;
@@ -566,6 +567,7 @@ static void etm_disable_hw(void *info)
int i;
struct etm_drvdata *drvdata = info;
struct etm_config *config = &drvdata->config;
+ struct coresight_device *csdev = drvdata->csdev;
CS_UNLOCK(drvdata->base);
etm_set_prog(drvdata);
@@ -577,7 +579,7 @@ static void etm_disable_hw(void *info)
config->cntr_val[i] = etm_readl(drvdata, ETMCNTVRn(i));
etm_set_pwrdwn(drvdata);
- coresight_disclaim_device_unlocked(drvdata->base);
+ coresight_disclaim_device_unlocked(csdev);
CS_LOCK(drvdata->base);
@@ -602,7 +604,7 @@ static void etm_disable_perf(struct coresight_device *csdev)
* power down the tracer.
*/
etm_set_pwrdwn(drvdata);
- coresight_disclaim_device_unlocked(drvdata->base);
+ coresight_disclaim_device_unlocked(csdev);
CS_LOCK(drvdata->base);
}
@@ -839,6 +841,7 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
return PTR_ERR(base);
drvdata->base = base;
+ desc.access = CSDEV_ACCESS_IOMEM(base);
spin_lock_init(&drvdata->spinlock);
@@ -909,7 +912,7 @@ static void clear_etmdrvdata(void *info)
etmdrvdata[cpu] = NULL;
}
-static int etm_remove(struct amba_device *adev)
+static void etm_remove(struct amba_device *adev)
{
struct etm_drvdata *drvdata = dev_get_drvdata(&adev->dev);
@@ -932,8 +935,6 @@ static int etm_remove(struct amba_device *adev)
cpus_read_unlock();
coresight_unregister(drvdata->csdev);
-
- return 0;
}
#ifdef CONFIG_PM
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c
index b20b6ff17cf6..15016f757828 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c
@@ -27,6 +27,7 @@
#include <linux/seq_file.h>
#include <linux/uaccess.h>
#include <linux/perf_event.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/property.h>
@@ -59,32 +60,99 @@ static u64 etm4_get_access_type(struct etmv4_config *config);
static enum cpuhp_state hp_online;
-static void etm4_os_unlock(struct etmv4_drvdata *drvdata)
+struct etm4_init_arg {
+ unsigned int pid;
+ struct etmv4_drvdata *drvdata;
+ struct csdev_access *csa;
+};
+
+/*
+ * Check if TRCSSPCICRn(i) is implemented for a given instance.
+ *
+ * TRCSSPCICRn is implemented only if :
+ * TRCSSPCICR<n> is present only if all of the following are true:
+ * TRCIDR4.NUMSSCC > n.
+ * TRCIDR4.NUMPC > 0b0000 .
+ * TRCSSCSR<n>.PC == 0b1
+ */
+static inline bool etm4x_sspcicrn_present(struct etmv4_drvdata *drvdata, int n)
+{
+ return (n < drvdata->nr_ss_cmp) &&
+ drvdata->nr_pe &&
+ (drvdata->config.ss_status[n] & TRCSSCSRn_PC);
+}
+
+u64 etm4x_sysreg_read(u32 offset, bool _relaxed, bool _64bit)
+{
+ u64 res = 0;
+
+ switch (offset) {
+ ETM4x_READ_SYSREG_CASES(res)
+ default :
+ pr_warn_ratelimited("etm4x: trying to read unsupported register @%x\n",
+ offset);
+ }
+
+ if (!_relaxed)
+ __iormb(res); /* Imitate the !relaxed I/O helpers */
+
+ return res;
+}
+
+void etm4x_sysreg_write(u64 val, u32 offset, bool _relaxed, bool _64bit)
+{
+ if (!_relaxed)
+ __iowmb(); /* Imitate the !relaxed I/O helpers */
+ if (!_64bit)
+ val &= GENMASK(31, 0);
+
+ switch (offset) {
+ ETM4x_WRITE_SYSREG_CASES(val)
+ default :
+ pr_warn_ratelimited("etm4x: trying to write to unsupported register @%x\n",
+ offset);
+ }
+}
+
+static void etm4_os_unlock_csa(struct etmv4_drvdata *drvdata, struct csdev_access *csa)
{
/* Writing 0 to TRCOSLAR unlocks the trace registers */
- writel_relaxed(0x0, drvdata->base + TRCOSLAR);
+ etm4x_relaxed_write32(csa, 0x0, TRCOSLAR);
drvdata->os_unlock = true;
isb();
}
+static void etm4_os_unlock(struct etmv4_drvdata *drvdata)
+{
+ if (!WARN_ON(!drvdata->csdev))
+ etm4_os_unlock_csa(drvdata, &drvdata->csdev->access);
+
+}
+
static void etm4_os_lock(struct etmv4_drvdata *drvdata)
{
+ if (WARN_ON(!drvdata->csdev))
+ return;
+
/* Writing 0x1 to TRCOSLAR locks the trace registers */
- writel_relaxed(0x1, drvdata->base + TRCOSLAR);
+ etm4x_relaxed_write32(&drvdata->csdev->access, 0x1, TRCOSLAR);
drvdata->os_unlock = false;
isb();
}
-static bool etm4_arch_supported(u8 arch)
+static void etm4_cs_lock(struct etmv4_drvdata *drvdata,
+ struct csdev_access *csa)
{
- /* Mask out the minor version number */
- switch (arch & 0xf0) {
- case ETM_ARCH_V4:
- break;
- default:
- return false;
- }
- return true;
+ /* Software Lock is only accessible via memory mapped interface */
+ if (csa->io_mem)
+ CS_LOCK(csa->base);
+}
+
+static void etm4_cs_unlock(struct etmv4_drvdata *drvdata,
+ struct csdev_access *csa)
+{
+ if (csa->io_mem)
+ CS_UNLOCK(csa->base);
}
static int etm4_cpu_id(struct coresight_device *csdev)
@@ -201,57 +269,64 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
{
int i, rc;
struct etmv4_config *config = &drvdata->config;
- struct device *etm_dev = &drvdata->csdev->dev;
+ struct coresight_device *csdev = drvdata->csdev;
+ struct device *etm_dev = &csdev->dev;
+ struct csdev_access *csa = &csdev->access;
+
- CS_UNLOCK(drvdata->base);
+ etm4_cs_unlock(drvdata, csa);
etm4_enable_arch_specific(drvdata);
etm4_os_unlock(drvdata);
- rc = coresight_claim_device_unlocked(drvdata->base);
+ rc = coresight_claim_device_unlocked(csdev);
if (rc)
goto done;
/* Disable the trace unit before programming trace registers */
- writel_relaxed(0, drvdata->base + TRCPRGCTLR);
+ etm4x_relaxed_write32(csa, 0, TRCPRGCTLR);
+
+ /*
+ * If we use system instructions, we need to synchronize the
+ * write to the TRCPRGCTLR, before accessing the TRCSTATR.
+ * See ARM IHI0064F, section
+ * "4.3.7 Synchronization of register updates"
+ */
+ if (!csa->io_mem)
+ isb();
/* wait for TRCSTATR.IDLE to go up */
- if (coresight_timeout(drvdata->base, TRCSTATR, TRCSTATR_IDLE_BIT, 1))
+ if (coresight_timeout(csa, TRCSTATR, TRCSTATR_IDLE_BIT, 1))
dev_err(etm_dev,
"timeout while waiting for Idle Trace Status\n");
if (drvdata->nr_pe)
- writel_relaxed(config->pe_sel, drvdata->base + TRCPROCSELR);
- writel_relaxed(config->cfg, drvdata->base + TRCCONFIGR);
+ etm4x_relaxed_write32(csa, config->pe_sel, TRCPROCSELR);
+ etm4x_relaxed_write32(csa, config->cfg, TRCCONFIGR);
/* nothing specific implemented */
- writel_relaxed(0x0, drvdata->base + TRCAUXCTLR);
- writel_relaxed(config->eventctrl0, drvdata->base + TRCEVENTCTL0R);
- writel_relaxed(config->eventctrl1, drvdata->base + TRCEVENTCTL1R);
- writel_relaxed(config->stall_ctrl, drvdata->base + TRCSTALLCTLR);
- writel_relaxed(config->ts_ctrl, drvdata->base + TRCTSCTLR);
- writel_relaxed(config->syncfreq, drvdata->base + TRCSYNCPR);
- writel_relaxed(config->ccctlr, drvdata->base + TRCCCCTLR);
- writel_relaxed(config->bb_ctrl, drvdata->base + TRCBBCTLR);
- writel_relaxed(drvdata->trcid, drvdata->base + TRCTRACEIDR);
- writel_relaxed(config->vinst_ctrl, drvdata->base + TRCVICTLR);
- writel_relaxed(config->viiectlr, drvdata->base + TRCVIIECTLR);
- writel_relaxed(config->vissctlr,
- drvdata->base + TRCVISSCTLR);
+ etm4x_relaxed_write32(csa, 0x0, TRCAUXCTLR);
+ etm4x_relaxed_write32(csa, config->eventctrl0, TRCEVENTCTL0R);
+ etm4x_relaxed_write32(csa, config->eventctrl1, TRCEVENTCTL1R);
+ if (drvdata->stallctl)
+ etm4x_relaxed_write32(csa, config->stall_ctrl, TRCSTALLCTLR);
+ etm4x_relaxed_write32(csa, config->ts_ctrl, TRCTSCTLR);
+ etm4x_relaxed_write32(csa, config->syncfreq, TRCSYNCPR);
+ etm4x_relaxed_write32(csa, config->ccctlr, TRCCCCTLR);
+ etm4x_relaxed_write32(csa, config->bb_ctrl, TRCBBCTLR);
+ etm4x_relaxed_write32(csa, drvdata->trcid, TRCTRACEIDR);
+ etm4x_relaxed_write32(csa, config->vinst_ctrl, TRCVICTLR);
+ etm4x_relaxed_write32(csa, config->viiectlr, TRCVIIECTLR);
+ etm4x_relaxed_write32(csa, config->vissctlr, TRCVISSCTLR);
if (drvdata->nr_pe_cmp)
- writel_relaxed(config->vipcssctlr,
- drvdata->base + TRCVIPCSSCTLR);
+ etm4x_relaxed_write32(csa, config->vipcssctlr, TRCVIPCSSCTLR);
for (i = 0; i < drvdata->nrseqstate - 1; i++)
- writel_relaxed(config->seq_ctrl[i],
- drvdata->base + TRCSEQEVRn(i));
- writel_relaxed(config->seq_rst, drvdata->base + TRCSEQRSTEVR);
- writel_relaxed(config->seq_state, drvdata->base + TRCSEQSTR);
- writel_relaxed(config->ext_inp, drvdata->base + TRCEXTINSELR);
+ etm4x_relaxed_write32(csa, config->seq_ctrl[i], TRCSEQEVRn(i));
+ etm4x_relaxed_write32(csa, config->seq_rst, TRCSEQRSTEVR);
+ etm4x_relaxed_write32(csa, config->seq_state, TRCSEQSTR);
+ etm4x_relaxed_write32(csa, config->ext_inp, TRCEXTINSELR);
for (i = 0; i < drvdata->nr_cntr; i++) {
- writel_relaxed(config->cntrldvr[i],
- drvdata->base + TRCCNTRLDVRn(i));
- writel_relaxed(config->cntr_ctrl[i],
- drvdata->base + TRCCNTCTLRn(i));
- writel_relaxed(config->cntr_val[i],
- drvdata->base + TRCCNTVRn(i));
+ etm4x_relaxed_write32(csa, config->cntrldvr[i], TRCCNTRLDVRn(i));
+ etm4x_relaxed_write32(csa, config->cntr_ctrl[i], TRCCNTCTLRn(i));
+ etm4x_relaxed_write32(csa, config->cntr_val[i], TRCCNTVRn(i));
}
/*
@@ -259,54 +334,52 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
* such start at 2.
*/
for (i = 2; i < drvdata->nr_resource * 2; i++)
- writel_relaxed(config->res_ctrl[i],
- drvdata->base + TRCRSCTLRn(i));
+ etm4x_relaxed_write32(csa, config->res_ctrl[i], TRCRSCTLRn(i));
for (i = 0; i < drvdata->nr_ss_cmp; i++) {
/* always clear status bit on restart if using single-shot */
if (config->ss_ctrl[i] || config->ss_pe_cmp[i])
config->ss_status[i] &= ~BIT(31);
- writel_relaxed(config->ss_ctrl[i],
- drvdata->base + TRCSSCCRn(i));
- writel_relaxed(config->ss_status[i],
- drvdata->base + TRCSSCSRn(i));
- writel_relaxed(config->ss_pe_cmp[i],
- drvdata->base + TRCSSPCICRn(i));
+ etm4x_relaxed_write32(csa, config->ss_ctrl[i], TRCSSCCRn(i));
+ etm4x_relaxed_write32(csa, config->ss_status[i], TRCSSCSRn(i));
+ if (etm4x_sspcicrn_present(drvdata, i))
+ etm4x_relaxed_write32(csa, config->ss_pe_cmp[i], TRCSSPCICRn(i));
}
for (i = 0; i < drvdata->nr_addr_cmp; i++) {
- writeq_relaxed(config->addr_val[i],
- drvdata->base + TRCACVRn(i));
- writeq_relaxed(config->addr_acc[i],
- drvdata->base + TRCACATRn(i));
+ etm4x_relaxed_write64(csa, config->addr_val[i], TRCACVRn(i));
+ etm4x_relaxed_write64(csa, config->addr_acc[i], TRCACATRn(i));
}
for (i = 0; i < drvdata->numcidc; i++)
- writeq_relaxed(config->ctxid_pid[i],
- drvdata->base + TRCCIDCVRn(i));
- writel_relaxed(config->ctxid_mask0, drvdata->base + TRCCIDCCTLR0);
+ etm4x_relaxed_write64(csa, config->ctxid_pid[i], TRCCIDCVRn(i));
+ etm4x_relaxed_write32(csa, config->ctxid_mask0, TRCCIDCCTLR0);
if (drvdata->numcidc > 4)
- writel_relaxed(config->ctxid_mask1, drvdata->base + TRCCIDCCTLR1);
+ etm4x_relaxed_write32(csa, config->ctxid_mask1, TRCCIDCCTLR1);
for (i = 0; i < drvdata->numvmidc; i++)
- writeq_relaxed(config->vmid_val[i],
- drvdata->base + TRCVMIDCVRn(i));
- writel_relaxed(config->vmid_mask0, drvdata->base + TRCVMIDCCTLR0);
+ etm4x_relaxed_write64(csa, config->vmid_val[i], TRCVMIDCVRn(i));
+ etm4x_relaxed_write32(csa, config->vmid_mask0, TRCVMIDCCTLR0);
if (drvdata->numvmidc > 4)
- writel_relaxed(config->vmid_mask1, drvdata->base + TRCVMIDCCTLR1);
+ etm4x_relaxed_write32(csa, config->vmid_mask1, TRCVMIDCCTLR1);
if (!drvdata->skip_power_up) {
+ u32 trcpdcr = etm4x_relaxed_read32(csa, TRCPDCR);
+
/*
* Request to keep the trace unit powered and also
* emulation of powerdown
*/
- writel_relaxed(readl_relaxed(drvdata->base + TRCPDCR) |
- TRCPDCR_PU, drvdata->base + TRCPDCR);
+ etm4x_relaxed_write32(csa, trcpdcr | TRCPDCR_PU, TRCPDCR);
}
/* Enable the trace unit */
- writel_relaxed(1, drvdata->base + TRCPRGCTLR);
+ etm4x_relaxed_write32(csa, 1, TRCPRGCTLR);
+
+ /* Synchronize the register updates for sysreg access */
+ if (!csa->io_mem)
+ isb();
/* wait for TRCSTATR.IDLE to go back down to '0' */
- if (coresight_timeout(drvdata->base, TRCSTATR, TRCSTATR_IDLE_BIT, 0))
+ if (coresight_timeout(csa, TRCSTATR, TRCSTATR_IDLE_BIT, 0))
dev_err(etm_dev,
"timeout while waiting for Idle Trace Status\n");
@@ -318,7 +391,7 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
isb();
done:
- CS_LOCK(drvdata->base);
+ etm4_cs_lock(drvdata, csa);
dev_dbg(etm_dev, "cpu: %d enable smp call done: %d\n",
drvdata->cpu, rc);
@@ -477,6 +550,19 @@ static int etm4_parse_event_config(struct etmv4_drvdata *drvdata,
/* bit[6], Context ID tracing bit */
config->cfg |= BIT(ETM4_CFG_BIT_CTXTID);
+ /*
+ * If set bit ETM_OPT_CTXTID2 in perf config, this asks to trace VMID
+ * for recording CONTEXTIDR_EL2. Do not enable VMID tracing if the
+ * kernel is not running in EL2.
+ */
+ if (attr->config & BIT(ETM_OPT_CTXTID2)) {
+ if (!is_kernel_in_hyp_mode()) {
+ ret = -EINVAL;
+ goto out;
+ }
+ config->cfg |= BIT(ETM4_CFG_BIT_VMID) | BIT(ETM4_CFG_BIT_VMID_OPT);
+ }
+
/* return stack - enable if selected and supported */
if ((attr->config & BIT(ETM_OPT_RETSTK)) && drvdata->retstack)
/* bit[12], Return stack enable bit */
@@ -570,20 +656,22 @@ static void etm4_disable_hw(void *info)
u32 control;
struct etmv4_drvdata *drvdata = info;
struct etmv4_config *config = &drvdata->config;
- struct device *etm_dev = &drvdata->csdev->dev;
+ struct coresight_device *csdev = drvdata->csdev;
+ struct device *etm_dev = &csdev->dev;
+ struct csdev_access *csa = &csdev->access;
int i;
- CS_UNLOCK(drvdata->base);
+ etm4_cs_unlock(drvdata, csa);
etm4_disable_arch_specific(drvdata);
if (!drvdata->skip_power_up) {
/* power can be removed from the trace unit now */
- control = readl_relaxed(drvdata->base + TRCPDCR);
+ control = etm4x_relaxed_read32(csa, TRCPDCR);
control &= ~TRCPDCR_PU;
- writel_relaxed(control, drvdata->base + TRCPDCR);
+ etm4x_relaxed_write32(csa, control, TRCPDCR);
}
- control = readl_relaxed(drvdata->base + TRCPRGCTLR);
+ control = etm4x_relaxed_read32(csa, TRCPRGCTLR);
/* EN, bit[0] Trace unit enable bit */
control &= ~0x1;
@@ -595,29 +683,27 @@ static void etm4_disable_hw(void *info)
*/
dsb(sy);
isb();
- writel_relaxed(control, drvdata->base + TRCPRGCTLR);
+ etm4x_relaxed_write32(csa, control, TRCPRGCTLR);
/* wait for TRCSTATR.PMSTABLE to go to '1' */
- if (coresight_timeout(drvdata->base, TRCSTATR,
- TRCSTATR_PMSTABLE_BIT, 1))
+ if (coresight_timeout(csa, TRCSTATR, TRCSTATR_PMSTABLE_BIT, 1))
dev_err(etm_dev,
"timeout while waiting for PM stable Trace Status\n");
/* read the status of the single shot comparators */
for (i = 0; i < drvdata->nr_ss_cmp; i++) {
config->ss_status[i] =
- readl_relaxed(drvdata->base + TRCSSCSRn(i));
+ etm4x_relaxed_read32(csa, TRCSSCSRn(i));
}
/* read back the current counter values */
for (i = 0; i < drvdata->nr_cntr; i++) {
config->cntr_val[i] =
- readl_relaxed(drvdata->base + TRCCNTVRn(i));
+ etm4x_relaxed_read32(csa, TRCCNTVRn(i));
}
- coresight_disclaim_device_unlocked(drvdata->base);
-
- CS_LOCK(drvdata->base);
+ coresight_disclaim_device_unlocked(csdev);
+ etm4_cs_lock(drvdata, csa);
dev_dbg(&drvdata->csdev->dev,
"cpu: %d disable smp call done\n", drvdata->cpu);
@@ -641,7 +727,7 @@ static int etm4_disable_perf(struct coresight_device *csdev,
* scheduled again. Configuration of the start/stop logic happens in
* function etm4_set_event_filters().
*/
- control = readl_relaxed(drvdata->base + TRCVICTLR);
+ control = etm4x_relaxed_read32(&csdev->access, TRCVICTLR);
/* TRCVICTLR::SSSTATUS, bit[9] */
filters->ssstatus = (control & BIT(9));
@@ -712,24 +798,136 @@ static const struct coresight_ops etm4_cs_ops = {
.source_ops = &etm4_source_ops,
};
+static inline bool cpu_supports_sysreg_trace(void)
+{
+ u64 dfr0 = read_sysreg_s(SYS_ID_AA64DFR0_EL1);
+
+ return ((dfr0 >> ID_AA64DFR0_TRACEVER_SHIFT) & 0xfUL) > 0;
+}
+
+static bool etm4_init_sysreg_access(struct etmv4_drvdata *drvdata,
+ struct csdev_access *csa)
+{
+ u32 devarch;
+
+ if (!cpu_supports_sysreg_trace())
+ return false;
+
+ /*
+ * ETMs implementing sysreg access must implement TRCDEVARCH.
+ */
+ devarch = read_etm4x_sysreg_const_offset(TRCDEVARCH);
+ if ((devarch & ETM_DEVARCH_ID_MASK) != ETM_DEVARCH_ETMv4x_ARCH)
+ return false;
+ *csa = (struct csdev_access) {
+ .io_mem = false,
+ .read = etm4x_sysreg_read,
+ .write = etm4x_sysreg_write,
+ };
+
+ drvdata->arch = etm_devarch_to_arch(devarch);
+ return true;
+}
+
+static bool etm4_init_iomem_access(struct etmv4_drvdata *drvdata,
+ struct csdev_access *csa)
+{
+ u32 devarch = readl_relaxed(drvdata->base + TRCDEVARCH);
+ u32 idr1 = readl_relaxed(drvdata->base + TRCIDR1);
+
+ /*
+ * All ETMs must implement TRCDEVARCH to indicate that
+ * the component is an ETMv4. To support any broken
+ * implementations we fall back to TRCIDR1 check, which
+ * is not really reliable.
+ */
+ if ((devarch & ETM_DEVARCH_ID_MASK) == ETM_DEVARCH_ETMv4x_ARCH) {
+ drvdata->arch = etm_devarch_to_arch(devarch);
+ } else {
+ pr_warn("CPU%d: ETM4x incompatible TRCDEVARCH: %x, falling back to TRCIDR1\n",
+ smp_processor_id(), devarch);
+
+ if (ETM_TRCIDR1_ARCH_MAJOR(idr1) != ETM_TRCIDR1_ARCH_ETMv4)
+ return false;
+ drvdata->arch = etm_trcidr_to_arch(idr1);
+ }
+
+ *csa = CSDEV_ACCESS_IOMEM(drvdata->base);
+ return true;
+}
+
+static bool etm4_init_csdev_access(struct etmv4_drvdata *drvdata,
+ struct csdev_access *csa)
+{
+ /*
+ * Always choose the memory mapped io, if there is
+ * a memory map to prevent sysreg access on broken
+ * systems.
+ */
+ if (drvdata->base)
+ return etm4_init_iomem_access(drvdata, csa);
+
+ if (etm4_init_sysreg_access(drvdata, csa))
+ return true;
+
+ return false;
+}
+
+static void cpu_enable_tracing(void)
+{
+ u64 dfr0 = read_sysreg(id_aa64dfr0_el1);
+ u64 trfcr;
+
+ if (!cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_TRACE_FILT_SHIFT))
+ return;
+
+ /*
+ * If the CPU supports v8.4 SelfHosted Tracing, enable
+ * tracing at the kernel EL and EL0, forcing to use the
+ * virtual time as the timestamp.
+ */
+ trfcr = (TRFCR_ELx_TS_VIRTUAL |
+ TRFCR_ELx_ExTRE |
+ TRFCR_ELx_E0TRE);
+
+ /* If we are running at EL2, allow tracing the CONTEXTIDR_EL2. */
+ if (is_kernel_in_hyp_mode())
+ trfcr |= TRFCR_EL2_CX;
+
+ write_sysreg_s(trfcr, SYS_TRFCR_EL1);
+}
+
static void etm4_init_arch_data(void *info)
{
u32 etmidr0;
- u32 etmidr1;
u32 etmidr2;
u32 etmidr3;
u32 etmidr4;
u32 etmidr5;
- struct etmv4_drvdata *drvdata = info;
+ struct etm4_init_arg *init_arg = info;
+ struct etmv4_drvdata *drvdata;
+ struct csdev_access *csa;
int i;
+ drvdata = init_arg->drvdata;
+ csa = init_arg->csa;
+
+ /*
+ * If we are unable to detect the access mechanism,
+ * or unable to detect the trace unit type, fail
+ * early.
+ */
+ if (!etm4_init_csdev_access(drvdata, csa))
+ return;
+
/* Make sure all registers are accessible */
- etm4_os_unlock(drvdata);
+ etm4_os_unlock_csa(drvdata, csa);
+ etm4_cs_unlock(drvdata, csa);
- CS_UNLOCK(drvdata->base);
+ etm4_check_arch_features(drvdata, init_arg->pid);
/* find all capabilities of the tracing unit */
- etmidr0 = readl_relaxed(drvdata->base + TRCIDR0);
+ etmidr0 = etm4x_relaxed_read32(csa, TRCIDR0);
/* INSTP0, bits[2:1] P0 tracing support field */
if (BMVAL(etmidr0, 1, 1) && BMVAL(etmidr0, 2, 2))
@@ -768,17 +966,8 @@ static void etm4_init_arch_data(void *info)
/* TSSIZE, bits[28:24] Global timestamp size field */
drvdata->ts_size = BMVAL(etmidr0, 24, 28);
- /* base architecture of trace unit */
- etmidr1 = readl_relaxed(drvdata->base + TRCIDR1);
- /*
- * TRCARCHMIN, bits[7:4] architecture the minor version number
- * TRCARCHMAJ, bits[11:8] architecture major versin number
- */
- drvdata->arch = BMVAL(etmidr1, 4, 11);
- drvdata->config.arch = drvdata->arch;
-
/* maximum size of resources */
- etmidr2 = readl_relaxed(drvdata->base + TRCIDR2);
+ etmidr2 = etm4x_relaxed_read32(csa, TRCIDR2);
/* CIDSIZE, bits[9:5] Indicates the Context ID size */
drvdata->ctxid_size = BMVAL(etmidr2, 5, 9);
/* VMIDSIZE, bits[14:10] Indicates the VMID size */
@@ -786,11 +975,12 @@ static void etm4_init_arch_data(void *info)
/* CCSIZE, bits[28:25] size of the cycle counter in bits minus 12 */
drvdata->ccsize = BMVAL(etmidr2, 25, 28);
- etmidr3 = readl_relaxed(drvdata->base + TRCIDR3);
+ etmidr3 = etm4x_relaxed_read32(csa, TRCIDR3);
/* CCITMIN, bits[11:0] minimum threshold value that can be programmed */
drvdata->ccitmin = BMVAL(etmidr3, 0, 11);
/* EXLEVEL_S, bits[19:16] Secure state instruction tracing */
drvdata->s_ex_level = BMVAL(etmidr3, 16, 19);
+ drvdata->config.s_ex_level = drvdata->s_ex_level;
/* EXLEVEL_NS, bits[23:20] Non-secure state instruction tracing */
drvdata->ns_ex_level = BMVAL(etmidr3, 20, 23);
@@ -836,7 +1026,7 @@ static void etm4_init_arch_data(void *info)
drvdata->nooverflow = false;
/* number of resources trace unit supports */
- etmidr4 = readl_relaxed(drvdata->base + TRCIDR4);
+ etmidr4 = etm4x_relaxed_read32(csa, TRCIDR4);
/* NUMACPAIRS, bits[0:3] number of addr comparator pairs for tracing */
drvdata->nr_addr_cmp = BMVAL(etmidr4, 0, 3);
/* NUMPC, bits[15:12] number of PE comparator inputs for tracing */
@@ -852,7 +1042,7 @@ static void etm4_init_arch_data(void *info)
* Otherwise for values 0x1 and above the number is N + 1 as per v4.2.
*/
drvdata->nr_resource = BMVAL(etmidr4, 16, 19);
- if ((drvdata->arch < ETM4X_ARCH_4V3) || (drvdata->nr_resource > 0))
+ if ((drvdata->arch < ETM_ARCH_V4_3) || (drvdata->nr_resource > 0))
drvdata->nr_resource += 1;
/*
* NUMSSCC, bits[23:20] the number of single-shot
@@ -862,14 +1052,14 @@ static void etm4_init_arch_data(void *info)
drvdata->nr_ss_cmp = BMVAL(etmidr4, 20, 23);
for (i = 0; i < drvdata->nr_ss_cmp; i++) {
drvdata->config.ss_status[i] =
- readl_relaxed(drvdata->base + TRCSSCSRn(i));
+ etm4x_relaxed_read32(csa, TRCSSCSRn(i));
}
/* NUMCIDC, bits[27:24] number of Context ID comparators for tracing */
drvdata->numcidc = BMVAL(etmidr4, 24, 27);
/* NUMVMIDC, bits[31:28] number of VMID comparators for tracing */
drvdata->numvmidc = BMVAL(etmidr4, 28, 31);
- etmidr5 = readl_relaxed(drvdata->base + TRCIDR5);
+ etmidr5 = etm4x_relaxed_read32(csa, TRCIDR5);
/* NUMEXTIN, bits[8:0] number of external inputs implemented */
drvdata->nr_ext_inp = BMVAL(etmidr5, 0, 8);
/* TRACEIDSIZE, bits[21:16] indicates the trace ID width */
@@ -891,23 +1081,20 @@ static void etm4_init_arch_data(void *info)
drvdata->nrseqstate = BMVAL(etmidr5, 25, 27);
/* NUMCNTR, bits[30:28] number of counters available for tracing */
drvdata->nr_cntr = BMVAL(etmidr5, 28, 30);
- CS_LOCK(drvdata->base);
+ etm4_cs_lock(drvdata, csa);
+ cpu_enable_tracing();
+}
+
+static inline u32 etm4_get_victlr_access_type(struct etmv4_config *config)
+{
+ return etm4_get_access_type(config) << TRCVICTLR_EXLEVEL_SHIFT;
}
/* Set ELx trace filter access in the TRCVICTLR register */
static void etm4_set_victlr_access(struct etmv4_config *config)
{
- u64 access_type;
-
- config->vinst_ctrl &= ~(ETM_EXLEVEL_S_VICTLR_MASK | ETM_EXLEVEL_NS_VICTLR_MASK);
-
- /*
- * TRCVICTLR::EXLEVEL_NS:EXLEVELS: Set kernel / user filtering
- * bits in vinst_ctrl, same bit pattern as TRCACATRn values returned by
- * etm4_get_access_type() but with a relative shift in this register.
- */
- access_type = etm4_get_access_type(config) << ETM_EXLEVEL_LSHIFT_TRCVICTLR;
- config->vinst_ctrl |= (u32)access_type;
+ config->vinst_ctrl &= ~TRCVICTLR_EXLEVEL_MASK;
+ config->vinst_ctrl |= etm4_get_victlr_access_type(config);
}
static void etm4_set_default_config(struct etmv4_config *config)
@@ -937,12 +1124,9 @@ static u64 etm4_get_ns_access_type(struct etmv4_config *config)
u64 access_type = 0;
/*
- * EXLEVEL_NS, bits[15:12]
- * The Exception levels are:
- * Bit[12] Exception level 0 - Application
- * Bit[13] Exception level 1 - OS
- * Bit[14] Exception level 2 - Hypervisor
- * Bit[15] Never implemented
+ * EXLEVEL_NS, for NonSecure Exception levels.
+ * The mask here is a generic value and must be
+ * shifted to the corresponding field for the registers
*/
if (!is_kernel_in_hyp_mode()) {
/* Stay away from hypervisor mode for non-VHE */
@@ -959,27 +1143,26 @@ static u64 etm4_get_ns_access_type(struct etmv4_config *config)
return access_type;
}
+/*
+ * Construct the exception level masks for a given config.
+ * This must be shifted to the corresponding register field
+ * for usage.
+ */
static u64 etm4_get_access_type(struct etmv4_config *config)
{
- u64 access_type = etm4_get_ns_access_type(config);
- u64 s_hyp = (config->arch & 0x0f) >= 0x4 ? ETM_EXLEVEL_S_HYP : 0;
-
- /*
- * EXLEVEL_S, bits[11:8], don't trace anything happening
- * in secure state.
- */
- access_type |= (ETM_EXLEVEL_S_APP |
- ETM_EXLEVEL_S_OS |
- s_hyp |
- ETM_EXLEVEL_S_MON);
+ /* All Secure exception levels are excluded from the trace */
+ return etm4_get_ns_access_type(config) | (u64)config->s_ex_level;
+}
- return access_type;
+static u64 etm4_get_comparator_access_type(struct etmv4_config *config)
+{
+ return etm4_get_access_type(config) << TRCACATR_EXLEVEL_SHIFT;
}
static void etm4_set_comparator_filter(struct etmv4_config *config,
u64 start, u64 stop, int comparator)
{
- u64 access_type = etm4_get_access_type(config);
+ u64 access_type = etm4_get_comparator_access_type(config);
/* First half of default address comparator */
config->addr_val[comparator] = start;
@@ -1014,7 +1197,7 @@ static void etm4_set_start_stop_filter(struct etmv4_config *config,
enum etm_addr_type type)
{
int shift;
- u64 access_type = etm4_get_access_type(config);
+ u64 access_type = etm4_get_comparator_access_type(config);
/* Configure the comparator */
config->addr_val[comparator] = address;
@@ -1255,7 +1438,15 @@ static int etm4_cpu_save(struct etmv4_drvdata *drvdata)
{
int i, ret = 0;
struct etmv4_save_state *state;
- struct device *etm_dev = &drvdata->csdev->dev;
+ struct coresight_device *csdev = drvdata->csdev;
+ struct csdev_access *csa;
+ struct device *etm_dev;
+
+ if (WARN_ON(!csdev))
+ return -ENODEV;
+
+ etm_dev = &csdev->dev;
+ csa = &csdev->access;
/*
* As recommended by 3.4.1 ("The procedure when powering down the PE")
@@ -1264,14 +1455,12 @@ static int etm4_cpu_save(struct etmv4_drvdata *drvdata)
dsb(sy);
isb();
- CS_UNLOCK(drvdata->base);
-
+ etm4_cs_unlock(drvdata, csa);
/* Lock the OS lock to disable trace and external debugger access */
etm4_os_lock(drvdata);
/* wait for TRCSTATR.PMSTABLE to go up */
- if (coresight_timeout(drvdata->base, TRCSTATR,
- TRCSTATR_PMSTABLE_BIT, 1)) {
+ if (coresight_timeout(csa, TRCSTATR, TRCSTATR_PMSTABLE_BIT, 1)) {
dev_err(etm_dev,
"timeout while waiting for PM Stable Status\n");
etm4_os_unlock(drvdata);
@@ -1281,55 +1470,57 @@ static int etm4_cpu_save(struct etmv4_drvdata *drvdata)
state = drvdata->save_state;
- state->trcprgctlr = readl(drvdata->base + TRCPRGCTLR);
+ state->trcprgctlr = etm4x_read32(csa, TRCPRGCTLR);
if (drvdata->nr_pe)
- state->trcprocselr = readl(drvdata->base + TRCPROCSELR);
- state->trcconfigr = readl(drvdata->base + TRCCONFIGR);
- state->trcauxctlr = readl(drvdata->base + TRCAUXCTLR);
- state->trceventctl0r = readl(drvdata->base + TRCEVENTCTL0R);
- state->trceventctl1r = readl(drvdata->base + TRCEVENTCTL1R);
- state->trcstallctlr = readl(drvdata->base + TRCSTALLCTLR);
- state->trctsctlr = readl(drvdata->base + TRCTSCTLR);
- state->trcsyncpr = readl(drvdata->base + TRCSYNCPR);
- state->trcccctlr = readl(drvdata->base + TRCCCCTLR);
- state->trcbbctlr = readl(drvdata->base + TRCBBCTLR);
- state->trctraceidr = readl(drvdata->base + TRCTRACEIDR);
- state->trcqctlr = readl(drvdata->base + TRCQCTLR);
-
- state->trcvictlr = readl(drvdata->base + TRCVICTLR);
- state->trcviiectlr = readl(drvdata->base + TRCVIIECTLR);
- state->trcvissctlr = readl(drvdata->base + TRCVISSCTLR);
+ state->trcprocselr = etm4x_read32(csa, TRCPROCSELR);
+ state->trcconfigr = etm4x_read32(csa, TRCCONFIGR);
+ state->trcauxctlr = etm4x_read32(csa, TRCAUXCTLR);
+ state->trceventctl0r = etm4x_read32(csa, TRCEVENTCTL0R);
+ state->trceventctl1r = etm4x_read32(csa, TRCEVENTCTL1R);
+ if (drvdata->stallctl)
+ state->trcstallctlr = etm4x_read32(csa, TRCSTALLCTLR);
+ state->trctsctlr = etm4x_read32(csa, TRCTSCTLR);
+ state->trcsyncpr = etm4x_read32(csa, TRCSYNCPR);
+ state->trcccctlr = etm4x_read32(csa, TRCCCCTLR);
+ state->trcbbctlr = etm4x_read32(csa, TRCBBCTLR);
+ state->trctraceidr = etm4x_read32(csa, TRCTRACEIDR);
+ state->trcqctlr = etm4x_read32(csa, TRCQCTLR);
+
+ state->trcvictlr = etm4x_read32(csa, TRCVICTLR);
+ state->trcviiectlr = etm4x_read32(csa, TRCVIIECTLR);
+ state->trcvissctlr = etm4x_read32(csa, TRCVISSCTLR);
if (drvdata->nr_pe_cmp)
- state->trcvipcssctlr = readl(drvdata->base + TRCVIPCSSCTLR);
- state->trcvdctlr = readl(drvdata->base + TRCVDCTLR);
- state->trcvdsacctlr = readl(drvdata->base + TRCVDSACCTLR);
- state->trcvdarcctlr = readl(drvdata->base + TRCVDARCCTLR);
+ state->trcvipcssctlr = etm4x_read32(csa, TRCVIPCSSCTLR);
+ state->trcvdctlr = etm4x_read32(csa, TRCVDCTLR);
+ state->trcvdsacctlr = etm4x_read32(csa, TRCVDSACCTLR);
+ state->trcvdarcctlr = etm4x_read32(csa, TRCVDARCCTLR);
for (i = 0; i < drvdata->nrseqstate - 1; i++)
- state->trcseqevr[i] = readl(drvdata->base + TRCSEQEVRn(i));
+ state->trcseqevr[i] = etm4x_read32(csa, TRCSEQEVRn(i));
- state->trcseqrstevr = readl(drvdata->base + TRCSEQRSTEVR);
- state->trcseqstr = readl(drvdata->base + TRCSEQSTR);
- state->trcextinselr = readl(drvdata->base + TRCEXTINSELR);
+ state->trcseqrstevr = etm4x_read32(csa, TRCSEQRSTEVR);
+ state->trcseqstr = etm4x_read32(csa, TRCSEQSTR);
+ state->trcextinselr = etm4x_read32(csa, TRCEXTINSELR);
for (i = 0; i < drvdata->nr_cntr; i++) {
- state->trccntrldvr[i] = readl(drvdata->base + TRCCNTRLDVRn(i));
- state->trccntctlr[i] = readl(drvdata->base + TRCCNTCTLRn(i));
- state->trccntvr[i] = readl(drvdata->base + TRCCNTVRn(i));
+ state->trccntrldvr[i] = etm4x_read32(csa, TRCCNTRLDVRn(i));
+ state->trccntctlr[i] = etm4x_read32(csa, TRCCNTCTLRn(i));
+ state->trccntvr[i] = etm4x_read32(csa, TRCCNTVRn(i));
}
for (i = 0; i < drvdata->nr_resource * 2; i++)
- state->trcrsctlr[i] = readl(drvdata->base + TRCRSCTLRn(i));
+ state->trcrsctlr[i] = etm4x_read32(csa, TRCRSCTLRn(i));
for (i = 0; i < drvdata->nr_ss_cmp; i++) {
- state->trcssccr[i] = readl(drvdata->base + TRCSSCCRn(i));
- state->trcsscsr[i] = readl(drvdata->base + TRCSSCSRn(i));
- state->trcsspcicr[i] = readl(drvdata->base + TRCSSPCICRn(i));
+ state->trcssccr[i] = etm4x_read32(csa, TRCSSCCRn(i));
+ state->trcsscsr[i] = etm4x_read32(csa, TRCSSCSRn(i));
+ if (etm4x_sspcicrn_present(drvdata, i))
+ state->trcsspcicr[i] = etm4x_read32(csa, TRCSSPCICRn(i));
}
for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
- state->trcacvr[i] = readq(drvdata->base + TRCACVRn(i));
- state->trcacatr[i] = readq(drvdata->base + TRCACATRn(i));
+ state->trcacvr[i] = etm4x_read64(csa, TRCACVRn(i));
+ state->trcacatr[i] = etm4x_read64(csa, TRCACATRn(i));
}
/*
@@ -1340,25 +1531,26 @@ static int etm4_cpu_save(struct etmv4_drvdata *drvdata)
*/
for (i = 0; i < drvdata->numcidc; i++)
- state->trccidcvr[i] = readq(drvdata->base + TRCCIDCVRn(i));
+ state->trccidcvr[i] = etm4x_read64(csa, TRCCIDCVRn(i));
for (i = 0; i < drvdata->numvmidc; i++)
- state->trcvmidcvr[i] = readq(drvdata->base + TRCVMIDCVRn(i));
+ state->trcvmidcvr[i] = etm4x_read64(csa, TRCVMIDCVRn(i));
- state->trccidcctlr0 = readl(drvdata->base + TRCCIDCCTLR0);
+ state->trccidcctlr0 = etm4x_read32(csa, TRCCIDCCTLR0);
if (drvdata->numcidc > 4)
- state->trccidcctlr1 = readl(drvdata->base + TRCCIDCCTLR1);
+ state->trccidcctlr1 = etm4x_read32(csa, TRCCIDCCTLR1);
- state->trcvmidcctlr0 = readl(drvdata->base + TRCVMIDCCTLR0);
+ state->trcvmidcctlr0 = etm4x_read32(csa, TRCVMIDCCTLR0);
if (drvdata->numvmidc > 4)
- state->trcvmidcctlr1 = readl(drvdata->base + TRCVMIDCCTLR1);
+ state->trcvmidcctlr0 = etm4x_read32(csa, TRCVMIDCCTLR1);
- state->trcclaimset = readl(drvdata->base + TRCCLAIMCLR);
+ state->trcclaimset = etm4x_read32(csa, TRCCLAIMCLR);
- state->trcpdcr = readl(drvdata->base + TRCPDCR);
+ if (!drvdata->skip_power_up)
+ state->trcpdcr = etm4x_read32(csa, TRCPDCR);
/* wait for TRCSTATR.IDLE to go up */
- if (coresight_timeout(drvdata->base, TRCSTATR, TRCSTATR_IDLE_BIT, 1)) {
+ if (coresight_timeout(csa, TRCSTATR, TRCSTATR_IDLE_BIT, 1)) {
dev_err(etm_dev,
"timeout while waiting for Idle Trace Status\n");
etm4_os_unlock(drvdata);
@@ -1373,11 +1565,11 @@ static int etm4_cpu_save(struct etmv4_drvdata *drvdata)
* potentially save power on systems that respect the TRCPDCR_PU
* despite requesting software to save/restore state.
*/
- writel_relaxed((state->trcpdcr & ~TRCPDCR_PU),
- drvdata->base + TRCPDCR);
-
+ if (!drvdata->skip_power_up)
+ etm4x_relaxed_write32(csa, (state->trcpdcr & ~TRCPDCR_PU),
+ TRCPDCR);
out:
- CS_LOCK(drvdata->base);
+ etm4_cs_lock(drvdata, csa);
return ret;
}
@@ -1385,91 +1577,83 @@ static void etm4_cpu_restore(struct etmv4_drvdata *drvdata)
{
int i;
struct etmv4_save_state *state = drvdata->save_state;
+ struct csdev_access tmp_csa = CSDEV_ACCESS_IOMEM(drvdata->base);
+ struct csdev_access *csa = &tmp_csa;
- CS_UNLOCK(drvdata->base);
-
- writel_relaxed(state->trcclaimset, drvdata->base + TRCCLAIMSET);
+ etm4_cs_unlock(drvdata, csa);
+ etm4x_relaxed_write32(csa, state->trcclaimset, TRCCLAIMSET);
- writel_relaxed(state->trcprgctlr, drvdata->base + TRCPRGCTLR);
+ etm4x_relaxed_write32(csa, state->trcprgctlr, TRCPRGCTLR);
if (drvdata->nr_pe)
- writel_relaxed(state->trcprocselr, drvdata->base + TRCPROCSELR);
- writel_relaxed(state->trcconfigr, drvdata->base + TRCCONFIGR);
- writel_relaxed(state->trcauxctlr, drvdata->base + TRCAUXCTLR);
- writel_relaxed(state->trceventctl0r, drvdata->base + TRCEVENTCTL0R);
- writel_relaxed(state->trceventctl1r, drvdata->base + TRCEVENTCTL1R);
- writel_relaxed(state->trcstallctlr, drvdata->base + TRCSTALLCTLR);
- writel_relaxed(state->trctsctlr, drvdata->base + TRCTSCTLR);
- writel_relaxed(state->trcsyncpr, drvdata->base + TRCSYNCPR);
- writel_relaxed(state->trcccctlr, drvdata->base + TRCCCCTLR);
- writel_relaxed(state->trcbbctlr, drvdata->base + TRCBBCTLR);
- writel_relaxed(state->trctraceidr, drvdata->base + TRCTRACEIDR);
- writel_relaxed(state->trcqctlr, drvdata->base + TRCQCTLR);
-
- writel_relaxed(state->trcvictlr, drvdata->base + TRCVICTLR);
- writel_relaxed(state->trcviiectlr, drvdata->base + TRCVIIECTLR);
- writel_relaxed(state->trcvissctlr, drvdata->base + TRCVISSCTLR);
+ etm4x_relaxed_write32(csa, state->trcprocselr, TRCPROCSELR);
+ etm4x_relaxed_write32(csa, state->trcconfigr, TRCCONFIGR);
+ etm4x_relaxed_write32(csa, state->trcauxctlr, TRCAUXCTLR);
+ etm4x_relaxed_write32(csa, state->trceventctl0r, TRCEVENTCTL0R);
+ etm4x_relaxed_write32(csa, state->trceventctl1r, TRCEVENTCTL1R);
+ if (drvdata->stallctl)
+ etm4x_relaxed_write32(csa, state->trcstallctlr, TRCSTALLCTLR);
+ etm4x_relaxed_write32(csa, state->trctsctlr, TRCTSCTLR);
+ etm4x_relaxed_write32(csa, state->trcsyncpr, TRCSYNCPR);
+ etm4x_relaxed_write32(csa, state->trcccctlr, TRCCCCTLR);
+ etm4x_relaxed_write32(csa, state->trcbbctlr, TRCBBCTLR);
+ etm4x_relaxed_write32(csa, state->trctraceidr, TRCTRACEIDR);
+ etm4x_relaxed_write32(csa, state->trcqctlr, TRCQCTLR);
+
+ etm4x_relaxed_write32(csa, state->trcvictlr, TRCVICTLR);
+ etm4x_relaxed_write32(csa, state->trcviiectlr, TRCVIIECTLR);
+ etm4x_relaxed_write32(csa, state->trcvissctlr, TRCVISSCTLR);
if (drvdata->nr_pe_cmp)
- writel_relaxed(state->trcvipcssctlr, drvdata->base + TRCVIPCSSCTLR);
- writel_relaxed(state->trcvdctlr, drvdata->base + TRCVDCTLR);
- writel_relaxed(state->trcvdsacctlr, drvdata->base + TRCVDSACCTLR);
- writel_relaxed(state->trcvdarcctlr, drvdata->base + TRCVDARCCTLR);
+ etm4x_relaxed_write32(csa, state->trcvipcssctlr, TRCVIPCSSCTLR);
+ etm4x_relaxed_write32(csa, state->trcvdctlr, TRCVDCTLR);
+ etm4x_relaxed_write32(csa, state->trcvdsacctlr, TRCVDSACCTLR);
+ etm4x_relaxed_write32(csa, state->trcvdarcctlr, TRCVDARCCTLR);
for (i = 0; i < drvdata->nrseqstate - 1; i++)
- writel_relaxed(state->trcseqevr[i],
- drvdata->base + TRCSEQEVRn(i));
+ etm4x_relaxed_write32(csa, state->trcseqevr[i], TRCSEQEVRn(i));
- writel_relaxed(state->trcseqrstevr, drvdata->base + TRCSEQRSTEVR);
- writel_relaxed(state->trcseqstr, drvdata->base + TRCSEQSTR);
- writel_relaxed(state->trcextinselr, drvdata->base + TRCEXTINSELR);
+ etm4x_relaxed_write32(csa, state->trcseqrstevr, TRCSEQRSTEVR);
+ etm4x_relaxed_write32(csa, state->trcseqstr, TRCSEQSTR);
+ etm4x_relaxed_write32(csa, state->trcextinselr, TRCEXTINSELR);
for (i = 0; i < drvdata->nr_cntr; i++) {
- writel_relaxed(state->trccntrldvr[i],
- drvdata->base + TRCCNTRLDVRn(i));
- writel_relaxed(state->trccntctlr[i],
- drvdata->base + TRCCNTCTLRn(i));
- writel_relaxed(state->trccntvr[i],
- drvdata->base + TRCCNTVRn(i));
+ etm4x_relaxed_write32(csa, state->trccntrldvr[i], TRCCNTRLDVRn(i));
+ etm4x_relaxed_write32(csa, state->trccntctlr[i], TRCCNTCTLRn(i));
+ etm4x_relaxed_write32(csa, state->trccntvr[i], TRCCNTVRn(i));
}
for (i = 0; i < drvdata->nr_resource * 2; i++)
- writel_relaxed(state->trcrsctlr[i],
- drvdata->base + TRCRSCTLRn(i));
+ etm4x_relaxed_write32(csa, state->trcrsctlr[i], TRCRSCTLRn(i));
for (i = 0; i < drvdata->nr_ss_cmp; i++) {
- writel_relaxed(state->trcssccr[i],
- drvdata->base + TRCSSCCRn(i));
- writel_relaxed(state->trcsscsr[i],
- drvdata->base + TRCSSCSRn(i));
- writel_relaxed(state->trcsspcicr[i],
- drvdata->base + TRCSSPCICRn(i));
+ etm4x_relaxed_write32(csa, state->trcssccr[i], TRCSSCCRn(i));
+ etm4x_relaxed_write32(csa, state->trcsscsr[i], TRCSSCSRn(i));
+ if (etm4x_sspcicrn_present(drvdata, i))
+ etm4x_relaxed_write32(csa, state->trcsspcicr[i], TRCSSPCICRn(i));
}
for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
- writeq_relaxed(state->trcacvr[i],
- drvdata->base + TRCACVRn(i));
- writeq_relaxed(state->trcacatr[i],
- drvdata->base + TRCACATRn(i));
+ etm4x_relaxed_write64(csa, state->trcacvr[i], TRCACVRn(i));
+ etm4x_relaxed_write64(csa, state->trcacatr[i], TRCACATRn(i));
}
for (i = 0; i < drvdata->numcidc; i++)
- writeq_relaxed(state->trccidcvr[i],
- drvdata->base + TRCCIDCVRn(i));
+ etm4x_relaxed_write64(csa, state->trccidcvr[i], TRCCIDCVRn(i));
for (i = 0; i < drvdata->numvmidc; i++)
- writeq_relaxed(state->trcvmidcvr[i],
- drvdata->base + TRCVMIDCVRn(i));
+ etm4x_relaxed_write64(csa, state->trcvmidcvr[i], TRCVMIDCVRn(i));
- writel_relaxed(state->trccidcctlr0, drvdata->base + TRCCIDCCTLR0);
+ etm4x_relaxed_write32(csa, state->trccidcctlr0, TRCCIDCCTLR0);
if (drvdata->numcidc > 4)
- writel_relaxed(state->trccidcctlr1, drvdata->base + TRCCIDCCTLR1);
+ etm4x_relaxed_write32(csa, state->trccidcctlr1, TRCCIDCCTLR1);
- writel_relaxed(state->trcvmidcctlr0, drvdata->base + TRCVMIDCCTLR0);
+ etm4x_relaxed_write32(csa, state->trcvmidcctlr0, TRCVMIDCCTLR0);
if (drvdata->numvmidc > 4)
- writel_relaxed(state->trcvmidcctlr1, drvdata->base + TRCVMIDCCTLR1);
+ etm4x_relaxed_write32(csa, state->trcvmidcctlr0, TRCVMIDCCTLR1);
- writel_relaxed(state->trcclaimset, drvdata->base + TRCCLAIMSET);
+ etm4x_relaxed_write32(csa, state->trcclaimset, TRCCLAIMSET);
- writel_relaxed(state->trcpdcr, drvdata->base + TRCPDCR);
+ if (!drvdata->skip_power_up)
+ etm4x_relaxed_write32(csa, state->trcpdcr, TRCPDCR);
drvdata->state_needs_restore = false;
@@ -1482,7 +1666,7 @@ static void etm4_cpu_restore(struct etmv4_drvdata *drvdata)
/* Unlock the OS lock to re-enable trace and external debug access */
etm4_os_unlock(drvdata);
- CS_LOCK(drvdata->base);
+ etm4_cs_lock(drvdata, csa);
}
static int etm4_cpu_pm_notify(struct notifier_block *nb, unsigned long cmd,
@@ -1569,15 +1753,13 @@ static void etm4_pm_clear(void)
}
}
-static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
+static int etm4_probe(struct device *dev, void __iomem *base, u32 etm_pid)
{
int ret;
- void __iomem *base;
- struct device *dev = &adev->dev;
struct coresight_platform_data *pdata = NULL;
struct etmv4_drvdata *drvdata;
- struct resource *res = &adev->res;
struct coresight_desc desc = { 0 };
+ struct etm4_init_arg init_arg = { 0 };
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
@@ -1596,14 +1778,6 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
return -ENOMEM;
}
- if (fwnode_property_present(dev_fwnode(dev), "qcom,skip-power-up"))
- drvdata->skip_power_up = true;
-
- /* Validity for the resource is already checked by the AMBA core */
- base = devm_ioremap_resource(dev, res);
- if (IS_ERR(base))
- return PTR_ERR(base);
-
drvdata->base = base;
spin_lock_init(&drvdata->spinlock);
@@ -1616,13 +1790,22 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
if (!desc.name)
return -ENOMEM;
+ init_arg.drvdata = drvdata;
+ init_arg.csa = &desc.access;
+ init_arg.pid = etm_pid;
+
if (smp_call_function_single(drvdata->cpu,
- etm4_init_arch_data, drvdata, 1))
+ etm4_init_arch_data, &init_arg, 1))
dev_err(dev, "ETM arch init failed\n");
- if (etm4_arch_supported(drvdata->arch) == false)
+ if (!drvdata->arch)
return -EINVAL;
+ /* TRCPDCR is not accessible with system instructions. */
+ if (!desc.access.io_mem ||
+ fwnode_property_present(dev_fwnode(dev), "qcom,skip-power-up"))
+ drvdata->skip_power_up = true;
+
etm4_init_trace_id(drvdata);
etm4_set_default(&drvdata->config);
@@ -1630,7 +1813,7 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
if (IS_ERR(pdata))
return PTR_ERR(pdata);
- adev->dev.platform_data = pdata;
+ dev->platform_data = pdata;
desc.type = CORESIGHT_DEV_TYPE_SOURCE;
desc.subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
@@ -1650,25 +1833,61 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
etmdrvdata[drvdata->cpu] = drvdata;
- pm_runtime_put(&adev->dev);
dev_info(&drvdata->csdev->dev, "CPU%d: ETM v%d.%d initialized\n",
- drvdata->cpu, drvdata->arch >> 4, drvdata->arch & 0xf);
+ drvdata->cpu, ETM_ARCH_MAJOR_VERSION(drvdata->arch),
+ ETM_ARCH_MINOR_VERSION(drvdata->arch));
if (boot_enable) {
coresight_enable(drvdata->csdev);
drvdata->boot_enable = true;
}
- etm4_check_arch_features(drvdata, id->id);
-
return 0;
}
+static int etm4_probe_amba(struct amba_device *adev, const struct amba_id *id)
+{
+ void __iomem *base;
+ struct device *dev = &adev->dev;
+ struct resource *res = &adev->res;
+ int ret;
+
+ /* Validity for the resource is already checked by the AMBA core */
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ ret = etm4_probe(dev, base, id->id);
+ if (!ret)
+ pm_runtime_put(&adev->dev);
+
+ return ret;
+}
+
+static int etm4_probe_platform_dev(struct platform_device *pdev)
+{
+ int ret;
+
+ pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ /*
+ * System register based devices could match the
+ * HW by reading appropriate registers on the HW
+ * and thus we could skip the PID.
+ */
+ ret = etm4_probe(&pdev->dev, NULL, 0);
+
+ pm_runtime_put(&pdev->dev);
+ return ret;
+}
+
static struct amba_cs_uci_id uci_id_etm4[] = {
{
/* ETMv4 UCI data */
- .devarch = 0x47704a13,
- .devarch_mask = 0xfff0ffff,
+ .devarch = ETM_DEVARCH_ETMv4x_ARCH,
+ .devarch_mask = ETM_DEVARCH_ID_MASK,
.devtype = 0x00000013,
}
};
@@ -1680,15 +1899,12 @@ static void clear_etmdrvdata(void *info)
etmdrvdata[cpu] = NULL;
}
-static int etm4_remove(struct amba_device *adev)
+static int __exit etm4_remove_dev(struct etmv4_drvdata *drvdata)
{
- struct etmv4_drvdata *drvdata = dev_get_drvdata(&adev->dev);
-
etm_perf_symlink(drvdata->csdev, false);
-
/*
- * Taking hotplug lock here to avoid racing between etm4_remove and
- * CPU hotplug call backs.
+ * Taking hotplug lock here to avoid racing between etm4_remove_dev()
+ * and CPU hotplug call backs.
*/
cpus_read_lock();
/*
@@ -1707,12 +1923,33 @@ static int etm4_remove(struct amba_device *adev)
return 0;
}
+static void __exit etm4_remove_amba(struct amba_device *adev)
+{
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(&adev->dev);
+
+ if (drvdata)
+ etm4_remove_dev(drvdata);
+}
+
+static int __exit etm4_remove_platform_dev(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(&pdev->dev);
+
+ if (drvdata)
+ ret = etm4_remove_dev(drvdata);
+ pm_runtime_disable(&pdev->dev);
+ return ret;
+}
+
static const struct amba_id etm4_ids[] = {
CS_AMBA_ID(0x000bb95d), /* Cortex-A53 */
CS_AMBA_ID(0x000bb95e), /* Cortex-A57 */
CS_AMBA_ID(0x000bb95a), /* Cortex-A72 */
CS_AMBA_ID(0x000bb959), /* Cortex-A73 */
CS_AMBA_UCI_ID(0x000bb9da, uci_id_etm4),/* Cortex-A35 */
+ CS_AMBA_UCI_ID(0x000bbd05, uci_id_etm4),/* Cortex-A55 */
+ CS_AMBA_UCI_ID(0x000bbd0a, uci_id_etm4),/* Cortex-A75 */
CS_AMBA_UCI_ID(0x000bbd0c, uci_id_etm4),/* Neoverse N1 */
CS_AMBA_UCI_ID(0x000f0205, uci_id_etm4),/* Qualcomm Kryo */
CS_AMBA_UCI_ID(0x000f0211, uci_id_etm4),/* Qualcomm Kryo */
@@ -1728,17 +1965,32 @@ static const struct amba_id etm4_ids[] = {
MODULE_DEVICE_TABLE(amba, etm4_ids);
-static struct amba_driver etm4x_driver = {
+static struct amba_driver etm4x_amba_driver = {
.drv = {
.name = "coresight-etm4x",
.owner = THIS_MODULE,
.suppress_bind_attrs = true,
},
- .probe = etm4_probe,
- .remove = etm4_remove,
+ .probe = etm4_probe_amba,
+ .remove = etm4_remove_amba,
.id_table = etm4_ids,
};
+static const struct of_device_id etm4_sysreg_match[] = {
+ { .compatible = "arm,coresight-etm4x-sysreg" },
+ {}
+};
+
+static struct platform_driver etm4_platform_driver = {
+ .probe = etm4_probe_platform_dev,
+ .remove = etm4_remove_platform_dev,
+ .driver = {
+ .name = "coresight-etm4x",
+ .of_match_table = etm4_sysreg_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
static int __init etm4x_init(void)
{
int ret;
@@ -1749,18 +2001,28 @@ static int __init etm4x_init(void)
if (ret)
return ret;
- ret = amba_driver_register(&etm4x_driver);
+ ret = amba_driver_register(&etm4x_amba_driver);
if (ret) {
- pr_err("Error registering etm4x driver\n");
- etm4_pm_clear();
+ pr_err("Error registering etm4x AMBA driver\n");
+ goto clear_pm;
}
+ ret = platform_driver_register(&etm4_platform_driver);
+ if (!ret)
+ return 0;
+
+ pr_err("Error registering etm4x platform driver\n");
+ amba_driver_unregister(&etm4x_amba_driver);
+
+clear_pm:
+ etm4_pm_clear();
return ret;
}
static void __exit etm4x_exit(void)
{
- amba_driver_unregister(&etm4x_driver);
+ amba_driver_unregister(&etm4x_amba_driver);
+ platform_driver_unregister(&etm4_platform_driver);
etm4_pm_clear();
}
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
index 989ce7b8ade7..0995a10790f4 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
@@ -389,7 +389,7 @@ static ssize_t mode_store(struct device *dev,
config->eventctrl1 &= ~BIT(12);
/* bit[8], Instruction stall bit */
- if (config->mode & ETM_MODE_ISTALL_EN)
+ if ((config->mode & ETM_MODE_ISTALL_EN) && (drvdata->stallctl == true))
config->stall_ctrl |= BIT(8);
else
config->stall_ctrl &= ~BIT(8);
@@ -743,7 +743,7 @@ static ssize_t s_exlevel_vinst_show(struct device *dev,
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config;
- val = (config->vinst_ctrl & ETM_EXLEVEL_S_VICTLR_MASK) >> 16;
+ val = (config->vinst_ctrl & TRCVICTLR_EXLEVEL_S_MASK) >> TRCVICTLR_EXLEVEL_S_SHIFT;
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
@@ -760,10 +760,10 @@ static ssize_t s_exlevel_vinst_store(struct device *dev,
spin_lock(&drvdata->spinlock);
/* clear all EXLEVEL_S bits */
- config->vinst_ctrl &= ~(ETM_EXLEVEL_S_VICTLR_MASK);
+ config->vinst_ctrl &= ~(TRCVICTLR_EXLEVEL_S_MASK);
/* enable instruction tracing for corresponding exception level */
val &= drvdata->s_ex_level;
- config->vinst_ctrl |= (val << 16);
+ config->vinst_ctrl |= (val << TRCVICTLR_EXLEVEL_S_SHIFT);
spin_unlock(&drvdata->spinlock);
return size;
}
@@ -778,7 +778,7 @@ static ssize_t ns_exlevel_vinst_show(struct device *dev,
struct etmv4_config *config = &drvdata->config;
/* EXLEVEL_NS, bits[23:20] */
- val = (config->vinst_ctrl & ETM_EXLEVEL_NS_VICTLR_MASK) >> 20;
+ val = (config->vinst_ctrl & TRCVICTLR_EXLEVEL_NS_MASK) >> TRCVICTLR_EXLEVEL_NS_SHIFT;
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
@@ -795,10 +795,10 @@ static ssize_t ns_exlevel_vinst_store(struct device *dev,
spin_lock(&drvdata->spinlock);
/* clear EXLEVEL_NS bits */
- config->vinst_ctrl &= ~(ETM_EXLEVEL_NS_VICTLR_MASK);
+ config->vinst_ctrl &= ~(TRCVICTLR_EXLEVEL_NS_MASK);
/* enable instruction tracing for corresponding exception level */
val &= drvdata->ns_ex_level;
- config->vinst_ctrl |= (val << 20);
+ config->vinst_ctrl |= (val << TRCVICTLR_EXLEVEL_NS_SHIFT);
spin_unlock(&drvdata->spinlock);
return size;
}
@@ -2319,7 +2319,8 @@ static struct attribute *coresight_etmv4_attrs[] = {
};
struct etmv4_reg {
- void __iomem *addr;
+ struct coresight_device *csdev;
+ u32 offset;
u32 data;
};
@@ -2327,15 +2328,16 @@ static void do_smp_cross_read(void *data)
{
struct etmv4_reg *reg = data;
- reg->data = readl_relaxed(reg->addr);
+ reg->data = etm4x_relaxed_read32(&reg->csdev->access, reg->offset);
}
-static u32 etmv4_cross_read(const struct device *dev, u32 offset)
+static u32 etmv4_cross_read(const struct etmv4_drvdata *drvdata, u32 offset)
{
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev);
struct etmv4_reg reg;
- reg.addr = drvdata->base + offset;
+ reg.offset = offset;
+ reg.csdev = drvdata->csdev;
+
/*
* smp cross call ensures the CPU will be powered up before
* accessing the ETMv4 trace core registers
@@ -2344,72 +2346,120 @@ static u32 etmv4_cross_read(const struct device *dev, u32 offset)
return reg.data;
}
-#define coresight_etm4x_reg(name, offset) \
- coresight_simple_reg32(struct etmv4_drvdata, name, offset)
+static inline u32 coresight_etm4x_attr_to_offset(struct device_attribute *attr)
+{
+ struct dev_ext_attribute *eattr;
+
+ eattr = container_of(attr, struct dev_ext_attribute, attr);
+ return (u32)(unsigned long)eattr->var;
+}
+
+static ssize_t coresight_etm4x_reg_show(struct device *dev,
+ struct device_attribute *d_attr,
+ char *buf)
+{
+ u32 val, offset;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-#define coresight_etm4x_cross_read(name, offset) \
- coresight_simple_func(struct etmv4_drvdata, etmv4_cross_read, \
- name, offset)
+ offset = coresight_etm4x_attr_to_offset(d_attr);
-coresight_etm4x_reg(trcpdcr, TRCPDCR);
-coresight_etm4x_reg(trcpdsr, TRCPDSR);
-coresight_etm4x_reg(trclsr, TRCLSR);
-coresight_etm4x_reg(trcauthstatus, TRCAUTHSTATUS);
-coresight_etm4x_reg(trcdevid, TRCDEVID);
-coresight_etm4x_reg(trcdevtype, TRCDEVTYPE);
-coresight_etm4x_reg(trcpidr0, TRCPIDR0);
-coresight_etm4x_reg(trcpidr1, TRCPIDR1);
-coresight_etm4x_reg(trcpidr2, TRCPIDR2);
-coresight_etm4x_reg(trcpidr3, TRCPIDR3);
-coresight_etm4x_cross_read(trcoslsr, TRCOSLSR);
-coresight_etm4x_cross_read(trcconfig, TRCCONFIGR);
-coresight_etm4x_cross_read(trctraceid, TRCTRACEIDR);
+ pm_runtime_get_sync(dev->parent);
+ val = etmv4_cross_read(drvdata, offset);
+ pm_runtime_put_sync(dev->parent);
+
+ return scnprintf(buf, PAGE_SIZE, "0x%x\n", val);
+}
+
+static inline bool
+etm4x_register_implemented(struct etmv4_drvdata *drvdata, u32 offset)
+{
+ switch (offset) {
+ ETM4x_SYSREG_LIST_CASES
+ /*
+ * Registers accessible via system instructions are always
+ * implemented.
+ */
+ return true;
+ ETM4x_MMAP_LIST_CASES
+ /*
+ * Registers accessible only via memory-mapped registers
+ * must not be accessed via system instructions.
+ * We cannot access the drvdata->csdev here, as this
+ * function is called during the device creation, via
+ * coresight_register() and the csdev is not initialized
+ * until that is done. So rely on the drvdata->base to
+ * detect if we have a memory mapped access.
+ */
+ return !!drvdata->base;
+ }
+
+ return false;
+}
+
+/*
+ * Hide the ETM4x registers that may not be available on the
+ * hardware.
+ * There are certain management registers unavailable via system
+ * instructions. Make those sysfs attributes hidden on such
+ * systems.
+ */
+static umode_t
+coresight_etm4x_attr_reg_implemented(struct kobject *kobj,
+ struct attribute *attr, int unused)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct device_attribute *d_attr;
+ u32 offset;
+
+ d_attr = container_of(attr, struct device_attribute, attr);
+ offset = coresight_etm4x_attr_to_offset(d_attr);
+
+ if (etm4x_register_implemented(drvdata, offset))
+ return attr->mode;
+ return 0;
+}
+
+#define coresight_etm4x_reg(name, offset) \
+ &((struct dev_ext_attribute[]) { \
+ { \
+ __ATTR(name, 0444, coresight_etm4x_reg_show, NULL), \
+ (void *)(unsigned long)offset \
+ } \
+ })[0].attr.attr
static struct attribute *coresight_etmv4_mgmt_attrs[] = {
- &dev_attr_trcoslsr.attr,
- &dev_attr_trcpdcr.attr,
- &dev_attr_trcpdsr.attr,
- &dev_attr_trclsr.attr,
- &dev_attr_trcconfig.attr,
- &dev_attr_trctraceid.attr,
- &dev_attr_trcauthstatus.attr,
- &dev_attr_trcdevid.attr,
- &dev_attr_trcdevtype.attr,
- &dev_attr_trcpidr0.attr,
- &dev_attr_trcpidr1.attr,
- &dev_attr_trcpidr2.attr,
- &dev_attr_trcpidr3.attr,
+ coresight_etm4x_reg(trcpdcr, TRCPDCR),
+ coresight_etm4x_reg(trcpdsr, TRCPDSR),
+ coresight_etm4x_reg(trclsr, TRCLSR),
+ coresight_etm4x_reg(trcauthstatus, TRCAUTHSTATUS),
+ coresight_etm4x_reg(trcdevid, TRCDEVID),
+ coresight_etm4x_reg(trcdevtype, TRCDEVTYPE),
+ coresight_etm4x_reg(trcpidr0, TRCPIDR0),
+ coresight_etm4x_reg(trcpidr1, TRCPIDR1),
+ coresight_etm4x_reg(trcpidr2, TRCPIDR2),
+ coresight_etm4x_reg(trcpidr3, TRCPIDR3),
+ coresight_etm4x_reg(trcoslsr, TRCOSLSR),
+ coresight_etm4x_reg(trcconfig, TRCCONFIGR),
+ coresight_etm4x_reg(trctraceid, TRCTRACEIDR),
+ coresight_etm4x_reg(trcdevarch, TRCDEVARCH),
NULL,
};
-coresight_etm4x_cross_read(trcidr0, TRCIDR0);
-coresight_etm4x_cross_read(trcidr1, TRCIDR1);
-coresight_etm4x_cross_read(trcidr2, TRCIDR2);
-coresight_etm4x_cross_read(trcidr3, TRCIDR3);
-coresight_etm4x_cross_read(trcidr4, TRCIDR4);
-coresight_etm4x_cross_read(trcidr5, TRCIDR5);
-/* trcidr[6,7] are reserved */
-coresight_etm4x_cross_read(trcidr8, TRCIDR8);
-coresight_etm4x_cross_read(trcidr9, TRCIDR9);
-coresight_etm4x_cross_read(trcidr10, TRCIDR10);
-coresight_etm4x_cross_read(trcidr11, TRCIDR11);
-coresight_etm4x_cross_read(trcidr12, TRCIDR12);
-coresight_etm4x_cross_read(trcidr13, TRCIDR13);
-
static struct attribute *coresight_etmv4_trcidr_attrs[] = {
- &dev_attr_trcidr0.attr,
- &dev_attr_trcidr1.attr,
- &dev_attr_trcidr2.attr,
- &dev_attr_trcidr3.attr,
- &dev_attr_trcidr4.attr,
- &dev_attr_trcidr5.attr,
+ coresight_etm4x_reg(trcidr0, TRCIDR0),
+ coresight_etm4x_reg(trcidr1, TRCIDR1),
+ coresight_etm4x_reg(trcidr2, TRCIDR2),
+ coresight_etm4x_reg(trcidr3, TRCIDR3),
+ coresight_etm4x_reg(trcidr4, TRCIDR4),
+ coresight_etm4x_reg(trcidr5, TRCIDR5),
/* trcidr[6,7] are reserved */
- &dev_attr_trcidr8.attr,
- &dev_attr_trcidr9.attr,
- &dev_attr_trcidr10.attr,
- &dev_attr_trcidr11.attr,
- &dev_attr_trcidr12.attr,
- &dev_attr_trcidr13.attr,
+ coresight_etm4x_reg(trcidr8, TRCIDR8),
+ coresight_etm4x_reg(trcidr9, TRCIDR9),
+ coresight_etm4x_reg(trcidr10, TRCIDR10),
+ coresight_etm4x_reg(trcidr11, TRCIDR11),
+ coresight_etm4x_reg(trcidr12, TRCIDR12),
+ coresight_etm4x_reg(trcidr13, TRCIDR13),
NULL,
};
@@ -2418,6 +2468,7 @@ static const struct attribute_group coresight_etmv4_group = {
};
static const struct attribute_group coresight_etmv4_mgmt_group = {
+ .is_visible = coresight_etm4x_attr_reg_implemented,
.attrs = coresight_etmv4_mgmt_attrs,
.name = "mgmt",
};
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h
index 3dd3e0633328..0af60571aa23 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.h
+++ b/drivers/hwtracing/coresight/coresight-etm4x.h
@@ -45,13 +45,13 @@
#define TRCVDSACCTLR 0x0A4
#define TRCVDARCCTLR 0x0A8
/* Derived resources registers */
-#define TRCSEQEVRn(n) (0x100 + (n * 4))
+#define TRCSEQEVRn(n) (0x100 + (n * 4)) /* n = 0-2 */
#define TRCSEQRSTEVR 0x118
#define TRCSEQSTR 0x11C
#define TRCEXTINSELR 0x120
-#define TRCCNTRLDVRn(n) (0x140 + (n * 4))
-#define TRCCNTCTLRn(n) (0x150 + (n * 4))
-#define TRCCNTVRn(n) (0x160 + (n * 4))
+#define TRCCNTRLDVRn(n) (0x140 + (n * 4)) /* n = 0-3 */
+#define TRCCNTCTLRn(n) (0x150 + (n * 4)) /* n = 0-3 */
+#define TRCCNTVRn(n) (0x160 + (n * 4)) /* n = 0-3 */
/* ID registers */
#define TRCIDR8 0x180
#define TRCIDR9 0x184
@@ -60,7 +60,7 @@
#define TRCIDR12 0x190
#define TRCIDR13 0x194
#define TRCIMSPEC0 0x1C0
-#define TRCIMSPECn(n) (0x1C0 + (n * 4))
+#define TRCIMSPECn(n) (0x1C0 + (n * 4)) /* n = 1-7 */
#define TRCIDR0 0x1E0
#define TRCIDR1 0x1E4
#define TRCIDR2 0x1E8
@@ -69,9 +69,12 @@
#define TRCIDR5 0x1F4
#define TRCIDR6 0x1F8
#define TRCIDR7 0x1FC
-/* Resource selection registers */
+/*
+ * Resource selection registers, n = 2-31.
+ * First pair (regs 0, 1) is always present and is reserved.
+ */
#define TRCRSCTLRn(n) (0x200 + (n * 4))
-/* Single-shot comparator registers */
+/* Single-shot comparator registers, n = 0-7 */
#define TRCSSCCRn(n) (0x280 + (n * 4))
#define TRCSSCSRn(n) (0x2A0 + (n * 4))
#define TRCSSPCICRn(n) (0x2C0 + (n * 4))
@@ -81,11 +84,13 @@
#define TRCPDCR 0x310
#define TRCPDSR 0x314
/* Trace registers (0x318-0xEFC) */
-/* Comparator registers */
+/* Address Comparator registers n = 0-15 */
#define TRCACVRn(n) (0x400 + (n * 8))
#define TRCACATRn(n) (0x480 + (n * 8))
+/* Data Value Comparator Value registers, n = 0-7 */
#define TRCDVCVRn(n) (0x500 + (n * 16))
#define TRCDVCMRn(n) (0x580 + (n * 16))
+/* ContextID/Virtual ContextID comparators, n = 0-7 */
#define TRCCIDCVRn(n) (0x600 + (n * 8))
#define TRCVMIDCVRn(n) (0x640 + (n * 8))
#define TRCCIDCCTLR0 0x680
@@ -121,6 +126,332 @@
#define TRCCIDR2 0xFF8
#define TRCCIDR3 0xFFC
+/*
+ * System instructions to access ETM registers.
+ * See ETMv4.4 spec ARM IHI0064F section 4.3.6 System instructions
+ */
+#define ETM4x_OFFSET_TO_REG(x) ((x) >> 2)
+
+#define ETM4x_CRn(n) (((n) >> 7) & 0x7)
+#define ETM4x_Op2(n) (((n) >> 4) & 0x7)
+#define ETM4x_CRm(n) ((n) & 0xf)
+
+#include <asm/sysreg.h>
+#define ETM4x_REG_NUM_TO_SYSREG(n) \
+ sys_reg(2, 1, ETM4x_CRn(n), ETM4x_CRm(n), ETM4x_Op2(n))
+
+#define READ_ETM4x_REG(reg) \
+ read_sysreg_s(ETM4x_REG_NUM_TO_SYSREG((reg)))
+#define WRITE_ETM4x_REG(val, reg) \
+ write_sysreg_s(val, ETM4x_REG_NUM_TO_SYSREG((reg)))
+
+#define read_etm4x_sysreg_const_offset(offset) \
+ READ_ETM4x_REG(ETM4x_OFFSET_TO_REG(offset))
+
+#define write_etm4x_sysreg_const_offset(val, offset) \
+ WRITE_ETM4x_REG(val, ETM4x_OFFSET_TO_REG(offset))
+
+#define CASE_READ(res, x) \
+ case (x): { (res) = read_etm4x_sysreg_const_offset((x)); break; }
+
+#define CASE_WRITE(val, x) \
+ case (x): { write_etm4x_sysreg_const_offset((val), (x)); break; }
+
+#define CASE_NOP(__unused, x) \
+ case (x): /* fall through */
+
+/* List of registers accessible via System instructions */
+#define ETM_SYSREG_LIST(op, val) \
+ CASE_##op((val), TRCPRGCTLR) \
+ CASE_##op((val), TRCPROCSELR) \
+ CASE_##op((val), TRCSTATR) \
+ CASE_##op((val), TRCCONFIGR) \
+ CASE_##op((val), TRCAUXCTLR) \
+ CASE_##op((val), TRCEVENTCTL0R) \
+ CASE_##op((val), TRCEVENTCTL1R) \
+ CASE_##op((val), TRCSTALLCTLR) \
+ CASE_##op((val), TRCTSCTLR) \
+ CASE_##op((val), TRCSYNCPR) \
+ CASE_##op((val), TRCCCCTLR) \
+ CASE_##op((val), TRCBBCTLR) \
+ CASE_##op((val), TRCTRACEIDR) \
+ CASE_##op((val), TRCQCTLR) \
+ CASE_##op((val), TRCVICTLR) \
+ CASE_##op((val), TRCVIIECTLR) \
+ CASE_##op((val), TRCVISSCTLR) \
+ CASE_##op((val), TRCVIPCSSCTLR) \
+ CASE_##op((val), TRCVDCTLR) \
+ CASE_##op((val), TRCVDSACCTLR) \
+ CASE_##op((val), TRCVDARCCTLR) \
+ CASE_##op((val), TRCSEQEVRn(0)) \
+ CASE_##op((val), TRCSEQEVRn(1)) \
+ CASE_##op((val), TRCSEQEVRn(2)) \
+ CASE_##op((val), TRCSEQRSTEVR) \
+ CASE_##op((val), TRCSEQSTR) \
+ CASE_##op((val), TRCEXTINSELR) \
+ CASE_##op((val), TRCCNTRLDVRn(0)) \
+ CASE_##op((val), TRCCNTRLDVRn(1)) \
+ CASE_##op((val), TRCCNTRLDVRn(2)) \
+ CASE_##op((val), TRCCNTRLDVRn(3)) \
+ CASE_##op((val), TRCCNTCTLRn(0)) \
+ CASE_##op((val), TRCCNTCTLRn(1)) \
+ CASE_##op((val), TRCCNTCTLRn(2)) \
+ CASE_##op((val), TRCCNTCTLRn(3)) \
+ CASE_##op((val), TRCCNTVRn(0)) \
+ CASE_##op((val), TRCCNTVRn(1)) \
+ CASE_##op((val), TRCCNTVRn(2)) \
+ CASE_##op((val), TRCCNTVRn(3)) \
+ CASE_##op((val), TRCIDR8) \
+ CASE_##op((val), TRCIDR9) \
+ CASE_##op((val), TRCIDR10) \
+ CASE_##op((val), TRCIDR11) \
+ CASE_##op((val), TRCIDR12) \
+ CASE_##op((val), TRCIDR13) \
+ CASE_##op((val), TRCIMSPECn(0)) \
+ CASE_##op((val), TRCIMSPECn(1)) \
+ CASE_##op((val), TRCIMSPECn(2)) \
+ CASE_##op((val), TRCIMSPECn(3)) \
+ CASE_##op((val), TRCIMSPECn(4)) \
+ CASE_##op((val), TRCIMSPECn(5)) \
+ CASE_##op((val), TRCIMSPECn(6)) \
+ CASE_##op((val), TRCIMSPECn(7)) \
+ CASE_##op((val), TRCIDR0) \
+ CASE_##op((val), TRCIDR1) \
+ CASE_##op((val), TRCIDR2) \
+ CASE_##op((val), TRCIDR3) \
+ CASE_##op((val), TRCIDR4) \
+ CASE_##op((val), TRCIDR5) \
+ CASE_##op((val), TRCIDR6) \
+ CASE_##op((val), TRCIDR7) \
+ CASE_##op((val), TRCRSCTLRn(2)) \
+ CASE_##op((val), TRCRSCTLRn(3)) \
+ CASE_##op((val), TRCRSCTLRn(4)) \
+ CASE_##op((val), TRCRSCTLRn(5)) \
+ CASE_##op((val), TRCRSCTLRn(6)) \
+ CASE_##op((val), TRCRSCTLRn(7)) \
+ CASE_##op((val), TRCRSCTLRn(8)) \
+ CASE_##op((val), TRCRSCTLRn(9)) \
+ CASE_##op((val), TRCRSCTLRn(10)) \
+ CASE_##op((val), TRCRSCTLRn(11)) \
+ CASE_##op((val), TRCRSCTLRn(12)) \
+ CASE_##op((val), TRCRSCTLRn(13)) \
+ CASE_##op((val), TRCRSCTLRn(14)) \
+ CASE_##op((val), TRCRSCTLRn(15)) \
+ CASE_##op((val), TRCRSCTLRn(16)) \
+ CASE_##op((val), TRCRSCTLRn(17)) \
+ CASE_##op((val), TRCRSCTLRn(18)) \
+ CASE_##op((val), TRCRSCTLRn(19)) \
+ CASE_##op((val), TRCRSCTLRn(20)) \
+ CASE_##op((val), TRCRSCTLRn(21)) \
+ CASE_##op((val), TRCRSCTLRn(22)) \
+ CASE_##op((val), TRCRSCTLRn(23)) \
+ CASE_##op((val), TRCRSCTLRn(24)) \
+ CASE_##op((val), TRCRSCTLRn(25)) \
+ CASE_##op((val), TRCRSCTLRn(26)) \
+ CASE_##op((val), TRCRSCTLRn(27)) \
+ CASE_##op((val), TRCRSCTLRn(28)) \
+ CASE_##op((val), TRCRSCTLRn(29)) \
+ CASE_##op((val), TRCRSCTLRn(30)) \
+ CASE_##op((val), TRCRSCTLRn(31)) \
+ CASE_##op((val), TRCSSCCRn(0)) \
+ CASE_##op((val), TRCSSCCRn(1)) \
+ CASE_##op((val), TRCSSCCRn(2)) \
+ CASE_##op((val), TRCSSCCRn(3)) \
+ CASE_##op((val), TRCSSCCRn(4)) \
+ CASE_##op((val), TRCSSCCRn(5)) \
+ CASE_##op((val), TRCSSCCRn(6)) \
+ CASE_##op((val), TRCSSCCRn(7)) \
+ CASE_##op((val), TRCSSCSRn(0)) \
+ CASE_##op((val), TRCSSCSRn(1)) \
+ CASE_##op((val), TRCSSCSRn(2)) \
+ CASE_##op((val), TRCSSCSRn(3)) \
+ CASE_##op((val), TRCSSCSRn(4)) \
+ CASE_##op((val), TRCSSCSRn(5)) \
+ CASE_##op((val), TRCSSCSRn(6)) \
+ CASE_##op((val), TRCSSCSRn(7)) \
+ CASE_##op((val), TRCSSPCICRn(0)) \
+ CASE_##op((val), TRCSSPCICRn(1)) \
+ CASE_##op((val), TRCSSPCICRn(2)) \
+ CASE_##op((val), TRCSSPCICRn(3)) \
+ CASE_##op((val), TRCSSPCICRn(4)) \
+ CASE_##op((val), TRCSSPCICRn(5)) \
+ CASE_##op((val), TRCSSPCICRn(6)) \
+ CASE_##op((val), TRCSSPCICRn(7)) \
+ CASE_##op((val), TRCOSLAR) \
+ CASE_##op((val), TRCOSLSR) \
+ CASE_##op((val), TRCACVRn(0)) \
+ CASE_##op((val), TRCACVRn(1)) \
+ CASE_##op((val), TRCACVRn(2)) \
+ CASE_##op((val), TRCACVRn(3)) \
+ CASE_##op((val), TRCACVRn(4)) \
+ CASE_##op((val), TRCACVRn(5)) \
+ CASE_##op((val), TRCACVRn(6)) \
+ CASE_##op((val), TRCACVRn(7)) \
+ CASE_##op((val), TRCACVRn(8)) \
+ CASE_##op((val), TRCACVRn(9)) \
+ CASE_##op((val), TRCACVRn(10)) \
+ CASE_##op((val), TRCACVRn(11)) \
+ CASE_##op((val), TRCACVRn(12)) \
+ CASE_##op((val), TRCACVRn(13)) \
+ CASE_##op((val), TRCACVRn(14)) \
+ CASE_##op((val), TRCACVRn(15)) \
+ CASE_##op((val), TRCACATRn(0)) \
+ CASE_##op((val), TRCACATRn(1)) \
+ CASE_##op((val), TRCACATRn(2)) \
+ CASE_##op((val), TRCACATRn(3)) \
+ CASE_##op((val), TRCACATRn(4)) \
+ CASE_##op((val), TRCACATRn(5)) \
+ CASE_##op((val), TRCACATRn(6)) \
+ CASE_##op((val), TRCACATRn(7)) \
+ CASE_##op((val), TRCACATRn(8)) \
+ CASE_##op((val), TRCACATRn(9)) \
+ CASE_##op((val), TRCACATRn(10)) \
+ CASE_##op((val), TRCACATRn(11)) \
+ CASE_##op((val), TRCACATRn(12)) \
+ CASE_##op((val), TRCACATRn(13)) \
+ CASE_##op((val), TRCACATRn(14)) \
+ CASE_##op((val), TRCACATRn(15)) \
+ CASE_##op((val), TRCDVCVRn(0)) \
+ CASE_##op((val), TRCDVCVRn(1)) \
+ CASE_##op((val), TRCDVCVRn(2)) \
+ CASE_##op((val), TRCDVCVRn(3)) \
+ CASE_##op((val), TRCDVCVRn(4)) \
+ CASE_##op((val), TRCDVCVRn(5)) \
+ CASE_##op((val), TRCDVCVRn(6)) \
+ CASE_##op((val), TRCDVCVRn(7)) \
+ CASE_##op((val), TRCDVCMRn(0)) \
+ CASE_##op((val), TRCDVCMRn(1)) \
+ CASE_##op((val), TRCDVCMRn(2)) \
+ CASE_##op((val), TRCDVCMRn(3)) \
+ CASE_##op((val), TRCDVCMRn(4)) \
+ CASE_##op((val), TRCDVCMRn(5)) \
+ CASE_##op((val), TRCDVCMRn(6)) \
+ CASE_##op((val), TRCDVCMRn(7)) \
+ CASE_##op((val), TRCCIDCVRn(0)) \
+ CASE_##op((val), TRCCIDCVRn(1)) \
+ CASE_##op((val), TRCCIDCVRn(2)) \
+ CASE_##op((val), TRCCIDCVRn(3)) \
+ CASE_##op((val), TRCCIDCVRn(4)) \
+ CASE_##op((val), TRCCIDCVRn(5)) \
+ CASE_##op((val), TRCCIDCVRn(6)) \
+ CASE_##op((val), TRCCIDCVRn(7)) \
+ CASE_##op((val), TRCVMIDCVRn(0)) \
+ CASE_##op((val), TRCVMIDCVRn(1)) \
+ CASE_##op((val), TRCVMIDCVRn(2)) \
+ CASE_##op((val), TRCVMIDCVRn(3)) \
+ CASE_##op((val), TRCVMIDCVRn(4)) \
+ CASE_##op((val), TRCVMIDCVRn(5)) \
+ CASE_##op((val), TRCVMIDCVRn(6)) \
+ CASE_##op((val), TRCVMIDCVRn(7)) \
+ CASE_##op((val), TRCCIDCCTLR0) \
+ CASE_##op((val), TRCCIDCCTLR1) \
+ CASE_##op((val), TRCVMIDCCTLR0) \
+ CASE_##op((val), TRCVMIDCCTLR1) \
+ CASE_##op((val), TRCCLAIMSET) \
+ CASE_##op((val), TRCCLAIMCLR) \
+ CASE_##op((val), TRCAUTHSTATUS) \
+ CASE_##op((val), TRCDEVARCH) \
+ CASE_##op((val), TRCDEVID)
+
+/* List of registers only accessible via memory-mapped interface */
+#define ETM_MMAP_LIST(op, val) \
+ CASE_##op((val), TRCDEVTYPE) \
+ CASE_##op((val), TRCPDCR) \
+ CASE_##op((val), TRCPDSR) \
+ CASE_##op((val), TRCDEVAFF0) \
+ CASE_##op((val), TRCDEVAFF1) \
+ CASE_##op((val), TRCLAR) \
+ CASE_##op((val), TRCLSR) \
+ CASE_##op((val), TRCITCTRL) \
+ CASE_##op((val), TRCPIDR4) \
+ CASE_##op((val), TRCPIDR0) \
+ CASE_##op((val), TRCPIDR1) \
+ CASE_##op((val), TRCPIDR2) \
+ CASE_##op((val), TRCPIDR3)
+
+#define ETM4x_READ_SYSREG_CASES(res) ETM_SYSREG_LIST(READ, (res))
+#define ETM4x_WRITE_SYSREG_CASES(val) ETM_SYSREG_LIST(WRITE, (val))
+
+#define ETM4x_SYSREG_LIST_CASES ETM_SYSREG_LIST(NOP, __unused)
+#define ETM4x_MMAP_LIST_CASES ETM_MMAP_LIST(NOP, __unused)
+
+#define read_etm4x_sysreg_offset(offset, _64bit) \
+ ({ \
+ u64 __val; \
+ \
+ if (__builtin_constant_p((offset))) \
+ __val = read_etm4x_sysreg_const_offset((offset)); \
+ else \
+ __val = etm4x_sysreg_read((offset), true, (_64bit)); \
+ __val; \
+ })
+
+#define write_etm4x_sysreg_offset(val, offset, _64bit) \
+ do { \
+ if (__builtin_constant_p((offset))) \
+ write_etm4x_sysreg_const_offset((val), \
+ (offset)); \
+ else \
+ etm4x_sysreg_write((val), (offset), true, \
+ (_64bit)); \
+ } while (0)
+
+
+#define etm4x_relaxed_read32(csa, offset) \
+ ((u32)((csa)->io_mem ? \
+ readl_relaxed((csa)->base + (offset)) : \
+ read_etm4x_sysreg_offset((offset), false)))
+
+#define etm4x_relaxed_read64(csa, offset) \
+ ((u64)((csa)->io_mem ? \
+ readq_relaxed((csa)->base + (offset)) : \
+ read_etm4x_sysreg_offset((offset), true)))
+
+#define etm4x_read32(csa, offset) \
+ ({ \
+ u32 __val = etm4x_relaxed_read32((csa), (offset)); \
+ __iormb(__val); \
+ __val; \
+ })
+
+#define etm4x_read64(csa, offset) \
+ ({ \
+ u64 __val = etm4x_relaxed_read64((csa), (offset)); \
+ __iormb(__val); \
+ __val; \
+ })
+
+#define etm4x_relaxed_write32(csa, val, offset) \
+ do { \
+ if ((csa)->io_mem) \
+ writel_relaxed((val), (csa)->base + (offset)); \
+ else \
+ write_etm4x_sysreg_offset((val), (offset), \
+ false); \
+ } while (0)
+
+#define etm4x_relaxed_write64(csa, val, offset) \
+ do { \
+ if ((csa)->io_mem) \
+ writeq_relaxed((val), (csa)->base + (offset)); \
+ else \
+ write_etm4x_sysreg_offset((val), (offset), \
+ true); \
+ } while (0)
+
+#define etm4x_write32(csa, val, offset) \
+ do { \
+ __iowmb(); \
+ etm4x_relaxed_write32((csa), (val), (offset)); \
+ } while (0)
+
+#define etm4x_write64(csa, val, offset) \
+ do { \
+ __iowmb(); \
+ etm4x_relaxed_write64((csa), (val), (offset)); \
+ } while (0)
+
+
/* ETMv4 resources */
#define ETM_MAX_NR_PE 8
#define ETMv4_MAX_CNTR 4
@@ -137,7 +468,6 @@
#define ETM_MAX_RES_SEL 32
#define ETM_MAX_SS_CMP 8
-#define ETM_ARCH_V4 0x40
#define ETMv4_SYNC_MASK 0x1F
#define ETM_CYC_THRESHOLD_MASK 0xFFF
#define ETM_CYC_THRESHOLD_DEFAULT 0x100
@@ -175,34 +505,150 @@
ETM_MODE_EXCL_KERN | \
ETM_MODE_EXCL_USER)
+/*
+ * TRCDEVARCH Bit field definitions
+ * Bits[31:21] - ARCHITECT = Always Arm Ltd.
+ * * Bits[31:28] = 0x4
+ * * Bits[27:21] = 0b0111011
+ * Bit[20] - PRESENT, Indicates the presence of this register.
+ *
+ * Bit[19:16] - REVISION, Revision of the architecture.
+ *
+ * Bit[15:0] - ARCHID, Identifies this component as an ETM
+ * * Bits[15:12] - architecture version of ETM
+ * * = 4 for ETMv4
+ * * Bits[11:0] = 0xA13, architecture part number for ETM.
+ */
+#define ETM_DEVARCH_ARCHITECT_MASK GENMASK(31, 21)
+#define ETM_DEVARCH_ARCHITECT_ARM ((0x4 << 28) | (0b0111011 << 21))
+#define ETM_DEVARCH_PRESENT BIT(20)
+#define ETM_DEVARCH_REVISION_SHIFT 16
+#define ETM_DEVARCH_REVISION_MASK GENMASK(19, 16)
+#define ETM_DEVARCH_REVISION(x) \
+ (((x) & ETM_DEVARCH_REVISION_MASK) >> ETM_DEVARCH_REVISION_SHIFT)
+#define ETM_DEVARCH_ARCHID_MASK GENMASK(15, 0)
+#define ETM_DEVARCH_ARCHID_ARCH_VER_SHIFT 12
+#define ETM_DEVARCH_ARCHID_ARCH_VER_MASK GENMASK(15, 12)
+#define ETM_DEVARCH_ARCHID_ARCH_VER(x) \
+ (((x) & ETM_DEVARCH_ARCHID_ARCH_VER_MASK) >> ETM_DEVARCH_ARCHID_ARCH_VER_SHIFT)
+
+#define ETM_DEVARCH_MAKE_ARCHID_ARCH_VER(ver) \
+ (((ver) << ETM_DEVARCH_ARCHID_ARCH_VER_SHIFT) & ETM_DEVARCH_ARCHID_ARCH_VER_MASK)
+
+#define ETM_DEVARCH_ARCHID_ARCH_PART(x) ((x) & 0xfffUL)
+
+#define ETM_DEVARCH_MAKE_ARCHID(major) \
+ ((ETM_DEVARCH_MAKE_ARCHID_ARCH_VER(major)) | ETM_DEVARCH_ARCHID_ARCH_PART(0xA13))
+
+#define ETM_DEVARCH_ARCHID_ETMv4x ETM_DEVARCH_MAKE_ARCHID(0x4)
+
+#define ETM_DEVARCH_ID_MASK \
+ (ETM_DEVARCH_ARCHITECT_MASK | ETM_DEVARCH_ARCHID_MASK | ETM_DEVARCH_PRESENT)
+#define ETM_DEVARCH_ETMv4x_ARCH \
+ (ETM_DEVARCH_ARCHITECT_ARM | ETM_DEVARCH_ARCHID_ETMv4x | ETM_DEVARCH_PRESENT)
+
#define TRCSTATR_IDLE_BIT 0
#define TRCSTATR_PMSTABLE_BIT 1
#define ETM_DEFAULT_ADDR_COMP 0
+#define TRCSSCSRn_PC BIT(3)
+
/* PowerDown Control Register bits */
#define TRCPDCR_PU BIT(3)
-/* secure state access levels - TRCACATRn */
-#define ETM_EXLEVEL_S_APP BIT(8)
-#define ETM_EXLEVEL_S_OS BIT(9)
-#define ETM_EXLEVEL_S_HYP BIT(10)
-#define ETM_EXLEVEL_S_MON BIT(11)
-/* non-secure state access levels - TRCACATRn */
-#define ETM_EXLEVEL_NS_APP BIT(12)
-#define ETM_EXLEVEL_NS_OS BIT(13)
-#define ETM_EXLEVEL_NS_HYP BIT(14)
-#define ETM_EXLEVEL_NS_NA BIT(15)
+#define TRCACATR_EXLEVEL_SHIFT 8
+
+/*
+ * Exception level mask for Secure and Non-Secure ELs.
+ * ETM defines the bits for EL control (e.g, TRVICTLR, TRCACTRn).
+ * The Secure and Non-Secure ELs are always to gether.
+ * Non-secure EL3 is never implemented.
+ * We use the following generic mask as they appear in different
+ * registers and this can be shifted for the appropriate
+ * fields.
+ */
+#define ETM_EXLEVEL_S_APP BIT(0) /* Secure EL0 */
+#define ETM_EXLEVEL_S_OS BIT(1) /* Secure EL1 */
+#define ETM_EXLEVEL_S_HYP BIT(2) /* Secure EL2 */
+#define ETM_EXLEVEL_S_MON BIT(3) /* Secure EL3/Monitor */
+#define ETM_EXLEVEL_NS_APP BIT(4) /* NonSecure EL0 */
+#define ETM_EXLEVEL_NS_OS BIT(5) /* NonSecure EL1 */
+#define ETM_EXLEVEL_NS_HYP BIT(6) /* NonSecure EL2 */
+
+#define ETM_EXLEVEL_MASK (GENMASK(6, 0))
+#define ETM_EXLEVEL_S_MASK (GENMASK(3, 0))
+#define ETM_EXLEVEL_NS_MASK (GENMASK(6, 4))
-/* access level control in TRCVICTLR - same bits as TRCACATRn but shifted */
-#define ETM_EXLEVEL_LSHIFT_TRCVICTLR 8
+/* access level controls in TRCACATRn */
+#define TRCACATR_EXLEVEL_SHIFT 8
+
+/* access level control in TRCVICTLR */
+#define TRCVICTLR_EXLEVEL_SHIFT 16
+#define TRCVICTLR_EXLEVEL_S_SHIFT 16
+#define TRCVICTLR_EXLEVEL_NS_SHIFT 20
/* secure / non secure masks - TRCVICTLR, IDR3 */
-#define ETM_EXLEVEL_S_VICTLR_MASK GENMASK(19, 16)
-/* NS MON (EL3) mode never implemented */
-#define ETM_EXLEVEL_NS_VICTLR_MASK GENMASK(22, 20)
+#define TRCVICTLR_EXLEVEL_MASK (ETM_EXLEVEL_MASK << TRCVICTLR_EXLEVEL_SHIFT)
+#define TRCVICTLR_EXLEVEL_S_MASK (ETM_EXLEVEL_S_MASK << TRCVICTLR_EXLEVEL_SHIFT)
+#define TRCVICTLR_EXLEVEL_NS_MASK (ETM_EXLEVEL_NS_MASK << TRCVICTLR_EXLEVEL_SHIFT)
+
+#define ETM_TRCIDR1_ARCH_MAJOR_SHIFT 8
+#define ETM_TRCIDR1_ARCH_MAJOR_MASK (0xfU << ETM_TRCIDR1_ARCH_MAJOR_SHIFT)
+#define ETM_TRCIDR1_ARCH_MAJOR(x) \
+ (((x) & ETM_TRCIDR1_ARCH_MAJOR_MASK) >> ETM_TRCIDR1_ARCH_MAJOR_SHIFT)
+#define ETM_TRCIDR1_ARCH_MINOR_SHIFT 4
+#define ETM_TRCIDR1_ARCH_MINOR_MASK (0xfU << ETM_TRCIDR1_ARCH_MINOR_SHIFT)
+#define ETM_TRCIDR1_ARCH_MINOR(x) \
+ (((x) & ETM_TRCIDR1_ARCH_MINOR_MASK) >> ETM_TRCIDR1_ARCH_MINOR_SHIFT)
+#define ETM_TRCIDR1_ARCH_SHIFT ETM_TRCIDR1_ARCH_MINOR_SHIFT
+#define ETM_TRCIDR1_ARCH_MASK \
+ (ETM_TRCIDR1_ARCH_MAJOR_MASK | ETM_TRCIDR1_ARCH_MINOR_MASK)
+#define ETM_TRCIDR1_ARCH_ETMv4 0x4
+
+/*
+ * Driver representation of the ETM architecture.
+ * The version of an ETM component can be detected from
+ *
+ * TRCDEVARCH - CoreSight architected register
+ * - Bits[15:12] - Major version
+ * - Bits[19:16] - Minor version
+ * TRCIDR1 - ETM architected register
+ * - Bits[11:8] - Major version
+ * - Bits[7:4] - Minor version
+ * We must rely on TRCDEVARCH for the version information,
+ * however we don't want to break the support for potential
+ * old implementations which might not implement it. Thus
+ * we fall back to TRCIDR1 if TRCDEVARCH is not implemented
+ * for memory mapped components.
+ * Now to make certain decisions easier based on the version
+ * we use an internal representation of the version in the
+ * driver, as follows :
+ *
+ * ETM_ARCH_VERSION[7:0], where :
+ * Bits[7:4] - Major version
+ * Bits[3:0] - Minro version
+ */
+#define ETM_ARCH_VERSION(major, minor) \
+ ((((major) & 0xfU) << 4) | (((minor) & 0xfU)))
+#define ETM_ARCH_MAJOR_VERSION(arch) (((arch) >> 4) & 0xfU)
+#define ETM_ARCH_MINOR_VERSION(arch) ((arch) & 0xfU)
+
+#define ETM_ARCH_V4 ETM_ARCH_VERSION(4, 0)
/* Interpretation of resource numbers change at ETM v4.3 architecture */
-#define ETM4X_ARCH_4V3 0x43
+#define ETM_ARCH_V4_3 ETM_ARCH_VERSION(4, 3)
+
+static inline u8 etm_devarch_to_arch(u32 devarch)
+{
+ return ETM_ARCH_VERSION(ETM_DEVARCH_ARCHID_ARCH_VER(devarch),
+ ETM_DEVARCH_REVISION(devarch));
+}
+
+static inline u8 etm_trcidr_to_arch(u32 trcidr1)
+{
+ return ETM_ARCH_VERSION(ETM_TRCIDR1_ARCH_MAJOR(trcidr1),
+ ETM_TRCIDR1_ARCH_MINOR(trcidr1));
+}
enum etm_impdef_type {
ETM4_IMPDEF_HISI_CORE_COMMIT,
@@ -256,7 +702,7 @@ enum etm_impdef_type {
* @vmid_mask0: VM ID comparator mask for comparator 0-3.
* @vmid_mask1: VM ID comparator mask for comparator 4-7.
* @ext_inp: External input selection.
- * @arch: ETM architecture version (for arch dependent config).
+ * @s_ex_level: Secure ELs where tracing is supported.
*/
struct etmv4_config {
u32 mode;
@@ -300,7 +746,7 @@ struct etmv4_config {
u32 vmid_mask0;
u32 vmid_mask1;
u32 ext_inp;
- u8 arch;
+ u8 s_ex_level;
};
/**
@@ -369,7 +815,7 @@ struct etmv4_save_state {
* @spinlock: Only one at a time pls.
* @mode: This tracer's mode, i.e sysFS, Perf or disabled.
* @cpu: The cpu this component is affined to.
- * @arch: ETM version number.
+ * @arch: ETM architecture version.
* @nr_pe: The number of processing entity available for tracing.
* @nr_pe_cmp: The number of processing entity comparator inputs that are
* available for tracing.
@@ -491,4 +937,7 @@ enum etm_addr_ctxtype {
extern const struct attribute_group *coresight_etmv4_groups[];
void etm4_config_trace_mode(struct etmv4_config *config);
+
+u64 etm4x_sysreg_read(u32 offset, bool _relaxed, bool _64bit);
+void etm4x_sysreg_write(u64 val, u32 offset, bool _relaxed, bool _64bit);
#endif
diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c
index 071c723227db..b363dd6bc510 100644
--- a/drivers/hwtracing/coresight/coresight-funnel.c
+++ b/drivers/hwtracing/coresight/coresight-funnel.c
@@ -52,13 +52,14 @@ static int dynamic_funnel_enable_hw(struct funnel_drvdata *drvdata, int port)
{
u32 functl;
int rc = 0;
+ struct coresight_device *csdev = drvdata->csdev;
CS_UNLOCK(drvdata->base);
functl = readl_relaxed(drvdata->base + FUNNEL_FUNCTL);
/* Claim the device only when we enable the first slave */
if (!(functl & FUNNEL_ENSx_MASK)) {
- rc = coresight_claim_device_unlocked(drvdata->base);
+ rc = coresight_claim_device_unlocked(csdev);
if (rc)
goto done;
}
@@ -101,6 +102,7 @@ static void dynamic_funnel_disable_hw(struct funnel_drvdata *drvdata,
int inport)
{
u32 functl;
+ struct coresight_device *csdev = drvdata->csdev;
CS_UNLOCK(drvdata->base);
@@ -110,7 +112,7 @@ static void dynamic_funnel_disable_hw(struct funnel_drvdata *drvdata,
/* Disclaim the device if none of the slaves are now active */
if (!(functl & FUNNEL_ENSx_MASK))
- coresight_disclaim_device_unlocked(drvdata->base);
+ coresight_disclaim_device_unlocked(csdev);
CS_LOCK(drvdata->base);
}
@@ -242,6 +244,7 @@ static int funnel_probe(struct device *dev, struct resource *res)
}
drvdata->base = base;
desc.groups = coresight_funnel_groups;
+ desc.access = CSDEV_ACCESS_IOMEM(base);
}
dev_set_drvdata(dev, drvdata);
@@ -370,9 +373,9 @@ static int dynamic_funnel_probe(struct amba_device *adev,
return funnel_probe(&adev->dev, &adev->res);
}
-static int dynamic_funnel_remove(struct amba_device *adev)
+static void dynamic_funnel_remove(struct amba_device *adev)
{
- return funnel_remove(&adev->dev);
+ funnel_remove(&adev->dev);
}
static const struct amba_id dynamic_funnel_ids[] = {
diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c
index 7e2a2b7f503f..b86acbc74cf0 100644
--- a/drivers/hwtracing/coresight/coresight-replicator.c
+++ b/drivers/hwtracing/coresight/coresight-replicator.c
@@ -45,12 +45,14 @@ struct replicator_drvdata {
static void dynamic_replicator_reset(struct replicator_drvdata *drvdata)
{
+ struct coresight_device *csdev = drvdata->csdev;
+
CS_UNLOCK(drvdata->base);
- if (!coresight_claim_device_unlocked(drvdata->base)) {
+ if (!coresight_claim_device_unlocked(csdev)) {
writel_relaxed(0xff, drvdata->base + REPLICATOR_IDFILTER0);
writel_relaxed(0xff, drvdata->base + REPLICATOR_IDFILTER1);
- coresight_disclaim_device_unlocked(drvdata->base);
+ coresight_disclaim_device_unlocked(csdev);
}
CS_LOCK(drvdata->base);
@@ -70,6 +72,7 @@ static int dynamic_replicator_enable(struct replicator_drvdata *drvdata,
{
int rc = 0;
u32 id0val, id1val;
+ struct coresight_device *csdev = drvdata->csdev;
CS_UNLOCK(drvdata->base);
@@ -84,7 +87,7 @@ static int dynamic_replicator_enable(struct replicator_drvdata *drvdata,
id0val = id1val = 0xff;
if (id0val == 0xff && id1val == 0xff)
- rc = coresight_claim_device_unlocked(drvdata->base);
+ rc = coresight_claim_device_unlocked(csdev);
if (!rc) {
switch (outport) {
@@ -140,6 +143,7 @@ static void dynamic_replicator_disable(struct replicator_drvdata *drvdata,
int inport, int outport)
{
u32 reg;
+ struct coresight_device *csdev = drvdata->csdev;
switch (outport) {
case 0:
@@ -160,7 +164,7 @@ static void dynamic_replicator_disable(struct replicator_drvdata *drvdata,
if ((readl_relaxed(drvdata->base + REPLICATOR_IDFILTER0) == 0xff) &&
(readl_relaxed(drvdata->base + REPLICATOR_IDFILTER1) == 0xff))
- coresight_disclaim_device_unlocked(drvdata->base);
+ coresight_disclaim_device_unlocked(csdev);
CS_LOCK(drvdata->base);
}
@@ -254,6 +258,7 @@ static int replicator_probe(struct device *dev, struct resource *res)
}
drvdata->base = base;
desc.groups = replicator_groups;
+ desc.access = CSDEV_ACCESS_IOMEM(base);
}
if (fwnode_property_present(dev_fwnode(dev),
@@ -388,9 +393,9 @@ static int dynamic_replicator_probe(struct amba_device *adev,
return replicator_probe(&adev->dev, &adev->res);
}
-static int dynamic_replicator_remove(struct amba_device *adev)
+static void dynamic_replicator_remove(struct amba_device *adev)
{
- return replicator_remove(&adev->dev);
+ replicator_remove(&adev->dev);
}
static const struct amba_id dynamic_replicator_ids[] = {
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
index 99791773f682..58062a5a8238 100644
--- a/drivers/hwtracing/coresight/coresight-stm.c
+++ b/drivers/hwtracing/coresight/coresight-stm.c
@@ -258,6 +258,7 @@ static void stm_disable(struct coresight_device *csdev,
struct perf_event *event)
{
struct stm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ struct csdev_access *csa = &csdev->access;
/*
* For as long as the tracer isn't disabled another entity can't
@@ -270,7 +271,7 @@ static void stm_disable(struct coresight_device *csdev,
spin_unlock(&drvdata->spinlock);
/* Wait until the engine has completely stopped */
- coresight_timeout(drvdata->base, STMTCSR, STMTCSR_BUSY_BIT, 0);
+ coresight_timeout(csa, STMTCSR, STMTCSR_BUSY_BIT, 0);
pm_runtime_put(csdev->dev.parent);
@@ -884,6 +885,7 @@ static int stm_probe(struct amba_device *adev, const struct amba_id *id)
if (IS_ERR(base))
return PTR_ERR(base);
drvdata->base = base;
+ desc.access = CSDEV_ACCESS_IOMEM(base);
ret = stm_get_stimulus_area(dev, &ch_res);
if (ret)
@@ -951,15 +953,13 @@ stm_unregister:
return ret;
}
-static int stm_remove(struct amba_device *adev)
+static void stm_remove(struct amba_device *adev)
{
struct stm_drvdata *drvdata = dev_get_drvdata(&adev->dev);
coresight_unregister(drvdata->csdev);
stm_unregister_device(&drvdata->stm);
-
- return 0;
}
#ifdef CONFIG_PM
diff --git a/drivers/hwtracing/coresight/coresight-tmc-core.c b/drivers/hwtracing/coresight/coresight-tmc-core.c
index 8169dff5a9f6..74c6323d4d6a 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-core.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-core.c
@@ -33,16 +33,20 @@ DEFINE_CORESIGHT_DEVLIST(etr_devs, "tmc_etr");
void tmc_wait_for_tmcready(struct tmc_drvdata *drvdata)
{
+ struct coresight_device *csdev = drvdata->csdev;
+ struct csdev_access *csa = &csdev->access;
+
/* Ensure formatter, unformatter and hardware fifo are empty */
- if (coresight_timeout(drvdata->base,
- TMC_STS, TMC_STS_TMCREADY_BIT, 1)) {
- dev_err(&drvdata->csdev->dev,
+ if (coresight_timeout(csa, TMC_STS, TMC_STS_TMCREADY_BIT, 1)) {
+ dev_err(&csdev->dev,
"timeout while waiting for TMC to be Ready\n");
}
}
void tmc_flush_and_stop(struct tmc_drvdata *drvdata)
{
+ struct coresight_device *csdev = drvdata->csdev;
+ struct csdev_access *csa = &csdev->access;
u32 ffcr;
ffcr = readl_relaxed(drvdata->base + TMC_FFCR);
@@ -51,9 +55,8 @@ void tmc_flush_and_stop(struct tmc_drvdata *drvdata)
ffcr |= BIT(TMC_FFCR_FLUSHMAN_BIT);
writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
/* Ensure flush completes */
- if (coresight_timeout(drvdata->base,
- TMC_FFCR, TMC_FFCR_FLUSHMAN_BIT, 0)) {
- dev_err(&drvdata->csdev->dev,
+ if (coresight_timeout(csa, TMC_FFCR, TMC_FFCR_FLUSHMAN_BIT, 0)) {
+ dev_err(&csdev->dev,
"timeout while waiting for completion of Manual Flush\n");
}
@@ -456,6 +459,7 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
}
drvdata->base = base;
+ desc.access = CSDEV_ACCESS_IOMEM(base);
spin_lock_init(&drvdata->spinlock);
@@ -559,7 +563,7 @@ out:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
}
-static int tmc_remove(struct amba_device *adev)
+static void tmc_remove(struct amba_device *adev)
{
struct tmc_drvdata *drvdata = dev_get_drvdata(&adev->dev);
@@ -570,8 +574,6 @@ static int tmc_remove(struct amba_device *adev)
*/
misc_deregister(&drvdata->miscdev);
coresight_unregister(drvdata->csdev);
-
- return 0;
}
static const struct amba_id tmc_ids[] = {
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
index 989d965f3d90..45b85edfc690 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
@@ -37,7 +37,7 @@ static void __tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
static int tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
{
- int rc = coresight_claim_device(drvdata->base);
+ int rc = coresight_claim_device(drvdata->csdev);
if (rc)
return rc;
@@ -88,7 +88,7 @@ static void __tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
{
__tmc_etb_disable_hw(drvdata);
- coresight_disclaim_device(drvdata->base);
+ coresight_disclaim_device(drvdata->csdev);
}
static void __tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
@@ -109,7 +109,7 @@ static void __tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
static int tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
{
- int rc = coresight_claim_device(drvdata->base);
+ int rc = coresight_claim_device(drvdata->csdev);
if (rc)
return rc;
@@ -120,11 +120,13 @@ static int tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata)
{
+ struct coresight_device *csdev = drvdata->csdev;
+
CS_UNLOCK(drvdata->base);
tmc_flush_and_stop(drvdata);
tmc_disable_hw(drvdata);
- coresight_disclaim_device_unlocked(drvdata->base);
+ coresight_disclaim_device_unlocked(csdev);
CS_LOCK(drvdata->base);
}
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index bf5230e39c5b..acdb59e0e661 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -1040,7 +1040,7 @@ static int tmc_etr_enable_hw(struct tmc_drvdata *drvdata,
rc = tmc_etr_enable_catu(drvdata, etr_buf);
if (rc)
return rc;
- rc = coresight_claim_device(drvdata->base);
+ rc = coresight_claim_device(drvdata->csdev);
if (!rc) {
drvdata->etr_buf = etr_buf;
__tmc_etr_enable_hw(drvdata);
@@ -1134,7 +1134,7 @@ void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
__tmc_etr_disable_hw(drvdata);
/* Disable CATU device if this ETR is connected to one */
tmc_etr_disable_catu(drvdata);
- coresight_disclaim_device(drvdata->base);
+ coresight_disclaim_device(drvdata->csdev);
/* Reset the ETR buf used by hardware */
drvdata->etr_buf = NULL;
}
diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c
index d5dfee9ee556..34d37abd2c8d 100644
--- a/drivers/hwtracing/coresight/coresight-tpiu.c
+++ b/drivers/hwtracing/coresight/coresight-tpiu.c
@@ -60,49 +60,45 @@ struct tpiu_drvdata {
struct coresight_device *csdev;
};
-static void tpiu_enable_hw(struct tpiu_drvdata *drvdata)
+static void tpiu_enable_hw(struct csdev_access *csa)
{
- CS_UNLOCK(drvdata->base);
+ CS_UNLOCK(csa->base);
/* TODO: fill this up */
- CS_LOCK(drvdata->base);
+ CS_LOCK(csa->base);
}
static int tpiu_enable(struct coresight_device *csdev, u32 mode, void *__unused)
{
- struct tpiu_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
-
- tpiu_enable_hw(drvdata);
+ tpiu_enable_hw(&csdev->access);
atomic_inc(csdev->refcnt);
dev_dbg(&csdev->dev, "TPIU enabled\n");
return 0;
}
-static void tpiu_disable_hw(struct tpiu_drvdata *drvdata)
+static void tpiu_disable_hw(struct csdev_access *csa)
{
- CS_UNLOCK(drvdata->base);
+ CS_UNLOCK(csa->base);
/* Clear formatter and stop on flush */
- writel_relaxed(FFCR_STOP_FI, drvdata->base + TPIU_FFCR);
+ csdev_access_relaxed_write32(csa, FFCR_STOP_FI, TPIU_FFCR);
/* Generate manual flush */
- writel_relaxed(FFCR_STOP_FI | FFCR_FON_MAN, drvdata->base + TPIU_FFCR);
+ csdev_access_relaxed_write32(csa, FFCR_STOP_FI | FFCR_FON_MAN, TPIU_FFCR);
/* Wait for flush to complete */
- coresight_timeout(drvdata->base, TPIU_FFCR, FFCR_FON_MAN_BIT, 0);
+ coresight_timeout(csa, TPIU_FFCR, FFCR_FON_MAN_BIT, 0);
/* Wait for formatter to stop */
- coresight_timeout(drvdata->base, TPIU_FFSR, FFSR_FT_STOPPED_BIT, 1);
+ coresight_timeout(csa, TPIU_FFSR, FFSR_FT_STOPPED_BIT, 1);
- CS_LOCK(drvdata->base);
+ CS_LOCK(csa->base);
}
static int tpiu_disable(struct coresight_device *csdev)
{
- struct tpiu_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
-
if (atomic_dec_return(csdev->refcnt))
return -EBUSY;
- tpiu_disable_hw(drvdata);
+ tpiu_disable_hw(&csdev->access);
dev_dbg(&csdev->dev, "TPIU disabled\n");
return 0;
@@ -149,9 +145,10 @@ static int tpiu_probe(struct amba_device *adev, const struct amba_id *id)
return PTR_ERR(base);
drvdata->base = base;
+ desc.access = CSDEV_ACCESS_IOMEM(base);
/* Disable tpiu to support older devices */
- tpiu_disable_hw(drvdata);
+ tpiu_disable_hw(&desc.access);
pdata = coresight_get_platform_data(dev);
if (IS_ERR(pdata))
@@ -173,13 +170,11 @@ static int tpiu_probe(struct amba_device *adev, const struct amba_id *id)
return PTR_ERR(drvdata->csdev);
}
-static int tpiu_remove(struct amba_device *adev)
+static void tpiu_remove(struct amba_device *adev)
{
struct tpiu_drvdata *drvdata = dev_get_drvdata(&adev->dev);
coresight_unregister(drvdata->csdev);
-
- return 0;
}
#ifdef CONFIG_PM
diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c
index 913db013fe90..fc90293afcbf 100644
--- a/drivers/i2c/algos/i2c-algo-bit.c
+++ b/drivers/i2c/algos/i2c-algo-bit.c
@@ -622,9 +622,7 @@ static int bit_xfer_atomic(struct i2c_adapter *i2c_adap, struct i2c_msg msgs[],
static u32 bit_func(struct i2c_adapter *adap)
{
- return I2C_FUNC_I2C | I2C_FUNC_NOSTART | I2C_FUNC_SMBUS_EMUL |
- I2C_FUNC_SMBUS_READ_BLOCK_DATA |
- I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
+ return I2C_FUNC_I2C | I2C_FUNC_NOSTART | I2C_FUNC_SMBUS_EMUL_ALL |
I2C_FUNC_10BIT_ADDR | I2C_FUNC_PROTOCOL_MANGLING;
}
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index ab1f39ac39f4..05ebf7546e3f 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -586,13 +586,6 @@ config I2C_DIGICOLOR
This driver can also be built as a module. If so, the module
will be called i2c-digicolor.
-config I2C_EFM32
- tristate "EFM32 I2C controller"
- depends on ARCH_EFM32 || COMPILE_TEST
- help
- This driver supports the i2c block found in Energy Micro's EFM32
- SoCs.
-
config I2C_EG20T
tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) I2C"
depends on PCI && (X86_32 || MIPS || COMPILE_TEST)
@@ -1000,16 +993,6 @@ config I2C_SIMTEC
This driver can also be built as a module. If so, the module
will be called i2c-simtec.
-config I2C_SIRF
- tristate "CSR SiRFprimaII I2C interface"
- depends on ARCH_SIRF || COMPILE_TEST
- help
- If you say yes to this option, support will be included for the
- CSR SiRFprimaII I2C interface.
-
- This driver can also be built as a module. If so, the module
- will be called i2c-sirf.
-
config I2C_SPRD
tristate "Spreadtrum I2C interface"
depends on I2C=y && (ARCH_SPRD || COMPILE_TEST)
@@ -1050,19 +1033,6 @@ config I2C_STM32F7
This driver can also be built as module. If so, the module
will be called i2c-stm32f7.
-config I2C_STU300
- tristate "ST Microelectronics DDC I2C interface"
- depends on MACH_U300 || COMPILE_TEST
- default y if MACH_U300
- help
- If you say yes to this option, support will be included for the
- I2C interface from ST Microelectronics simply called "DDC I2C"
- supporting both I2C and DDC, used in e.g. the U300 series
- mobile platforms.
-
- This driver can also be built as a module. If so, the module
- will be called i2c-stu300.
-
config I2C_SUN6I_P2WI
tristate "Allwinner sun6i internal P2WI controller"
depends on RESET_CONTROLLER
@@ -1175,8 +1145,8 @@ config I2C_XILINX
will be called xilinx_i2c.
config I2C_XLR
- tristate "Netlogic XLR and Sigma Designs I2C support"
- depends on CPU_XLR || ARCH_TANGO || COMPILE_TEST
+ tristate "Netlogic XLR I2C support"
+ depends on CPU_XLR || COMPILE_TEST
help
This driver enables support for the on-chip I2C interface of
the Netlogic XLR/XLS MIPS processors and Sigma Designs SOCs.
@@ -1401,15 +1371,6 @@ config I2C_OPAL
This driver can also be built as a module. If so, the module will be
called as i2c-opal.
-config I2C_ZX2967
- tristate "ZTE ZX2967 I2C support"
- depends on ARCH_ZX
- default y
- help
- Selecting this option will add ZX2967 I2C driver.
- This driver can also be built as a module. If so, the module will be
- called i2c-zx2967.
-
config I2C_FSI
tristate "FSI I2C driver"
depends on FSI
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 683c49faca05..615f35e3e31f 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -58,7 +58,6 @@ i2c-designware-platform-$(CONFIG_I2C_DESIGNWARE_BAYTRAIL) += i2c-designware-bayt
obj-$(CONFIG_I2C_DESIGNWARE_PCI) += i2c-designware-pci.o
i2c-designware-pci-y := i2c-designware-pcidrv.o
obj-$(CONFIG_I2C_DIGICOLOR) += i2c-digicolor.o
-obj-$(CONFIG_I2C_EFM32) += i2c-efm32.o
obj-$(CONFIG_I2C_EG20T) += i2c-eg20t.o
obj-$(CONFIG_I2C_EMEV2) += i2c-emev2.o
obj-$(CONFIG_I2C_EXYNOS5) += i2c-exynos5.o
@@ -99,13 +98,11 @@ obj-$(CONFIG_I2C_S3C2410) += i2c-s3c2410.o
obj-$(CONFIG_I2C_SH7760) += i2c-sh7760.o
obj-$(CONFIG_I2C_SH_MOBILE) += i2c-sh_mobile.o
obj-$(CONFIG_I2C_SIMTEC) += i2c-simtec.o
-obj-$(CONFIG_I2C_SIRF) += i2c-sirf.o
obj-$(CONFIG_I2C_SPRD) += i2c-sprd.o
obj-$(CONFIG_I2C_ST) += i2c-st.o
obj-$(CONFIG_I2C_STM32F4) += i2c-stm32f4.o
i2c-stm32f7-drv-objs := i2c-stm32f7.o i2c-stm32.o
obj-$(CONFIG_I2C_STM32F7) += i2c-stm32f7-drv.o
-obj-$(CONFIG_I2C_STU300) += i2c-stu300.o
obj-$(CONFIG_I2C_SUN6I_P2WI) += i2c-sun6i-p2wi.o
obj-$(CONFIG_I2C_SYNQUACER) += i2c-synquacer.o
obj-$(CONFIG_I2C_TEGRA) += i2c-tegra.o
@@ -122,7 +119,6 @@ obj-$(CONFIG_I2C_XILINX) += i2c-xiic.o
obj-$(CONFIG_I2C_XLR) += i2c-xlr.o
obj-$(CONFIG_I2C_XLP9XX) += i2c-xlp9xx.o
obj-$(CONFIG_I2C_RCAR) += i2c-rcar.o
-obj-$(CONFIG_I2C_ZX2967) += i2c-zx2967.o
# External I2C/SMBus adapter drivers
obj-$(CONFIG_I2C_DIOLAN_U2C) += i2c-diolan-u2c.o
diff --git a/drivers/i2c/busses/i2c-amd-mp2-pci.c b/drivers/i2c/busses/i2c-amd-mp2-pci.c
index cd3fd5ee5f65..ce130a821ea5 100644
--- a/drivers/i2c/busses/i2c-amd-mp2-pci.c
+++ b/drivers/i2c/busses/i2c-amd-mp2-pci.c
@@ -30,7 +30,7 @@ static void amd_mp2_c2p_mutex_unlock(struct amd_i2c_common *i2c_common)
struct amd_mp2_dev *privdata = i2c_common->mp2_dev;
if (unlikely(privdata->c2p_lock_busid != i2c_common->bus_id)) {
- dev_warn(ndev_dev(privdata),
+ pci_warn(privdata->pci_dev,
"bus %d attempting to unlock C2P locked by bus %d\n",
i2c_common->bus_id, privdata->c2p_lock_busid);
return;
@@ -59,8 +59,7 @@ int amd_mp2_bus_enable_set(struct amd_i2c_common *i2c_common, bool enable)
struct amd_mp2_dev *privdata = i2c_common->mp2_dev;
union i2c_cmd_base i2c_cmd_base;
- dev_dbg(ndev_dev(privdata), "%s id: %d\n", __func__,
- i2c_common->bus_id);
+ pci_dbg(privdata->pci_dev, "id: %d\n", i2c_common->bus_id);
i2c_cmd_base.ul = 0;
i2c_cmd_base.s.i2c_cmd = enable ? i2c_enable : i2c_disable;
@@ -111,20 +110,19 @@ EXPORT_SYMBOL_GPL(amd_mp2_rw);
static void amd_mp2_pci_check_rw_event(struct amd_i2c_common *i2c_common)
{
struct amd_mp2_dev *privdata = i2c_common->mp2_dev;
+ struct pci_dev *pdev = privdata->pci_dev;
int len = i2c_common->eventval.r.length;
u32 slave_addr = i2c_common->eventval.r.slave_addr;
bool err = false;
if (unlikely(len != i2c_common->msg->len)) {
- dev_err(ndev_dev(privdata),
- "length %d in event doesn't match buffer length %d!\n",
+ pci_err(pdev, "length %d in event doesn't match buffer length %d!\n",
len, i2c_common->msg->len);
err = true;
}
if (unlikely(slave_addr != i2c_common->msg->addr)) {
- dev_err(ndev_dev(privdata),
- "unexpected slave address %x (expected: %x)!\n",
+ pci_err(pdev, "unexpected slave address %x (expected: %x)!\n",
slave_addr, i2c_common->msg->addr);
err = true;
}
@@ -136,13 +134,14 @@ static void amd_mp2_pci_check_rw_event(struct amd_i2c_common *i2c_common)
static void __amd_mp2_process_event(struct amd_i2c_common *i2c_common)
{
struct amd_mp2_dev *privdata = i2c_common->mp2_dev;
+ struct pci_dev *pdev = privdata->pci_dev;
enum status_type sts = i2c_common->eventval.r.status;
enum response_type res = i2c_common->eventval.r.response;
int len = i2c_common->eventval.r.length;
if (res != command_success) {
if (res != command_failed)
- dev_err(ndev_dev(privdata), "invalid response to i2c command!\n");
+ pci_err(pdev, "invalid response to i2c command!\n");
return;
}
@@ -155,32 +154,26 @@ static void __amd_mp2_process_event(struct amd_i2c_common *i2c_common)
privdata->mmio + AMD_C2P_MSG2,
len);
} else if (sts != i2c_readfail_event) {
- dev_err(ndev_dev(privdata),
- "invalid i2c status after read (%d)!\n", sts);
+ pci_err(pdev, "invalid i2c status after read (%d)!\n", sts);
}
break;
case i2c_write:
if (sts == i2c_writecomplete_event)
amd_mp2_pci_check_rw_event(i2c_common);
else if (sts != i2c_writefail_event)
- dev_err(ndev_dev(privdata),
- "invalid i2c status after write (%d)!\n", sts);
+ pci_err(pdev, "invalid i2c status after write (%d)!\n", sts);
break;
case i2c_enable:
if (sts == i2c_busenable_complete)
i2c_common->cmd_success = true;
else if (sts != i2c_busenable_failed)
- dev_err(ndev_dev(privdata),
- "invalid i2c status after bus enable (%d)!\n",
- sts);
+ pci_err(pdev, "invalid i2c status after bus enable (%d)!\n", sts);
break;
case i2c_disable:
if (sts == i2c_busdisable_complete)
i2c_common->cmd_success = true;
else if (sts != i2c_busdisable_failed)
- dev_err(ndev_dev(privdata),
- "invalid i2c status after bus disable (%d)!\n",
- sts);
+ pci_err(pdev, "invalid i2c status after bus disable (%d)!\n", sts);
break;
default:
break;
@@ -190,10 +183,10 @@ static void __amd_mp2_process_event(struct amd_i2c_common *i2c_common)
void amd_mp2_process_event(struct amd_i2c_common *i2c_common)
{
struct amd_mp2_dev *privdata = i2c_common->mp2_dev;
+ struct pci_dev *pdev = privdata->pci_dev;
if (unlikely(i2c_common->reqcmd == i2c_none)) {
- dev_warn(ndev_dev(privdata),
- "received msg but no cmd was sent (bus = %d)!\n",
+ pci_warn(pdev, "received msg but no cmd was sent (bus = %d)!\n",
i2c_common->bus_id);
return;
}
@@ -208,6 +201,7 @@ EXPORT_SYMBOL_GPL(amd_mp2_process_event);
static irqreturn_t amd_mp2_irq_isr(int irq, void *dev)
{
struct amd_mp2_dev *privdata = dev;
+ struct pci_dev *pdev = privdata->pci_dev;
struct amd_i2c_common *i2c_common;
u32 val;
unsigned int bus_id;
@@ -236,8 +230,7 @@ static irqreturn_t amd_mp2_irq_isr(int irq, void *dev)
val = readl(privdata->mmio + AMD_P2C_MSG_INTEN);
if (val != 0) {
writel(0, privdata->mmio + AMD_P2C_MSG_INTEN);
- dev_warn(ndev_dev(privdata),
- "received irq without message\n");
+ pci_warn(pdev, "received irq without message\n");
ret = IRQ_HANDLED;
}
}
@@ -255,13 +248,13 @@ EXPORT_SYMBOL_GPL(amd_mp2_rw_timeout);
int amd_mp2_register_cb(struct amd_i2c_common *i2c_common)
{
struct amd_mp2_dev *privdata = i2c_common->mp2_dev;
+ struct pci_dev *pdev = privdata->pci_dev;
if (i2c_common->bus_id > 1)
return -EINVAL;
if (privdata->busses[i2c_common->bus_id]) {
- dev_err(ndev_dev(privdata),
- "Bus %d already taken!\n", i2c_common->bus_id);
+ pci_err(pdev, "Bus %d already taken!\n", i2c_common->bus_id);
return -EINVAL;
}
@@ -301,13 +294,13 @@ static int amd_mp2_pci_init(struct amd_mp2_dev *privdata,
rc = pcim_enable_device(pci_dev);
if (rc) {
- dev_err(ndev_dev(privdata), "Failed to enable MP2 PCI device\n");
+ pci_err(pci_dev, "Failed to enable MP2 PCI device\n");
goto err_pci_enable;
}
rc = pcim_iomap_regions(pci_dev, 1 << 2, pci_name(pci_dev));
if (rc) {
- dev_err(ndev_dev(privdata), "I/O memory remapping failed\n");
+ pci_err(pci_dev, "I/O memory remapping failed\n");
goto err_pci_enable;
}
privdata->mmio = pcim_iomap_table(pci_dev)[2];
@@ -327,7 +320,7 @@ static int amd_mp2_pci_init(struct amd_mp2_dev *privdata,
rc = devm_request_irq(&pci_dev->dev, pci_dev->irq, amd_mp2_irq_isr,
IRQF_SHARED, dev_name(&pci_dev->dev), privdata);
if (rc)
- dev_err(&pci_dev->dev, "Failure requesting irq %i: %d\n",
+ pci_err(pci_dev, "Failure requesting irq %i: %d\n",
pci_dev->irq, rc);
return rc;
@@ -363,7 +356,7 @@ static int amd_mp2_pci_probe(struct pci_dev *pci_dev,
privdata->probed = true;
- dev_info(&pci_dev->dev, "MP2 device registered.\n");
+ pci_info(pci_dev, "MP2 device registered.\n");
return 0;
}
@@ -397,8 +390,7 @@ static int amd_mp2_pci_suspend(struct device *dev)
ret = pci_save_state(pci_dev);
if (ret) {
- dev_err(ndev_dev(privdata),
- "pci_save_state failed = %d\n", ret);
+ pci_err(pci_dev, "pci_save_state failed = %d\n", ret);
return ret;
}
@@ -417,8 +409,7 @@ static int amd_mp2_pci_resume(struct device *dev)
pci_restore_state(pci_dev);
ret = pci_enable_device(pci_dev);
if (ret < 0) {
- dev_err(ndev_dev(privdata),
- "pci_enable_device failed = %d\n", ret);
+ pci_err(pci_dev, "pci_enable_device failed = %d\n", ret);
return ret;
}
diff --git a/drivers/i2c/busses/i2c-amd-mp2-plat.c b/drivers/i2c/busses/i2c-amd-mp2-plat.c
index 506433bc0ff2..de058671f9b8 100644
--- a/drivers/i2c/busses/i2c-amd-mp2-plat.c
+++ b/drivers/i2c/busses/i2c-amd-mp2-plat.c
@@ -88,8 +88,7 @@ static void i2c_amd_cmd_completion(struct amd_i2c_common *i2c_common)
union i2c_event *event = &i2c_common->eventval;
if (event->r.status == i2c_readcomplete_event)
- dev_dbg(&i2c_dev->pdev->dev, "%s readdata:%*ph\n",
- __func__, event->r.length,
+ dev_dbg(&i2c_dev->pdev->dev, "readdata:%*ph\n", event->r.length,
i2c_common->msg->buf);
complete(&i2c_dev->cmd_complete);
diff --git a/drivers/i2c/busses/i2c-amd-mp2.h b/drivers/i2c/busses/i2c-amd-mp2.h
index 058362edebaa..ddecd0c88656 100644
--- a/drivers/i2c/busses/i2c-amd-mp2.h
+++ b/drivers/i2c/busses/i2c-amd-mp2.h
@@ -185,12 +185,6 @@ struct amd_mp2_dev {
unsigned int probed;
};
-#define ndev_pdev(ndev) ((ndev)->pci_dev)
-#define ndev_name(ndev) pci_name(ndev_pdev(ndev))
-#define ndev_dev(ndev) (&ndev_pdev(ndev)->dev)
-#define work_amd_i2c_common(__work) \
- container_of(__work, struct amd_i2c_common, work.work)
-
/* PCIe communication driver */
int amd_mp2_rw(struct amd_i2c_common *i2c_common, enum i2c_cmd reqcmd);
diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
index d8295b1c379d..cceaf69279a9 100644
--- a/drivers/i2c/busses/i2c-bcm-iproc.c
+++ b/drivers/i2c/busses/i2c-bcm-iproc.c
@@ -93,6 +93,7 @@
#define S_CMD_STATUS_MASK 0x07
#define S_CMD_STATUS_SUCCESS 0x0
#define S_CMD_STATUS_TIMEOUT 0x5
+#define S_CMD_STATUS_MASTER_ABORT 0x7
#define IE_OFFSET 0x38
#define IE_M_RX_FIFO_FULL_SHIFT 31
@@ -159,6 +160,11 @@
#define IE_S_ALL_INTERRUPT_SHIFT 21
#define IE_S_ALL_INTERRUPT_MASK 0x3f
+/*
+ * It takes ~18us to reading 10bytes of data, hence to keep tasklet
+ * running for less time, max slave read per tasklet is set to 10 bytes.
+ */
+#define MAX_SLAVE_RX_PER_INT 10
enum i2c_slave_read_status {
I2C_SLAVE_RX_FIFO_EMPTY = 0,
@@ -205,8 +211,18 @@ struct bcm_iproc_i2c_dev {
/* bytes that have been read */
unsigned int rx_bytes;
unsigned int thld_bytes;
+
+ bool slave_rx_only;
+ bool rx_start_rcvd;
+ bool slave_read_complete;
+ u32 tx_underrun;
+ u32 slave_int_mask;
+ struct tasklet_struct slave_rx_tasklet;
};
+/* tasklet to process slave rx data */
+static void slave_rx_tasklet_fn(unsigned long);
+
/*
* Can be expanded in the future if more interrupt status bits are utilized
*/
@@ -215,7 +231,8 @@ struct bcm_iproc_i2c_dev {
#define ISR_MASK_SLAVE (BIT(IS_S_START_BUSY_SHIFT)\
| BIT(IS_S_RX_EVENT_SHIFT) | BIT(IS_S_RD_EVENT_SHIFT)\
- | BIT(IS_S_TX_UNDERRUN_SHIFT))
+ | BIT(IS_S_TX_UNDERRUN_SHIFT) | BIT(IS_S_RX_FIFO_FULL_SHIFT)\
+ | BIT(IS_S_RX_THLD_SHIFT))
static int bcm_iproc_i2c_reg_slave(struct i2c_client *slave);
static int bcm_iproc_i2c_unreg_slave(struct i2c_client *slave);
@@ -259,6 +276,7 @@ static void bcm_iproc_i2c_slave_init(
{
u32 val;
+ iproc_i2c->tx_underrun = 0;
if (need_reset) {
/* put controller in reset */
val = iproc_i2c_rd_reg(iproc_i2c, CFG_OFFSET);
@@ -295,8 +313,13 @@ static void bcm_iproc_i2c_slave_init(
/* Enable interrupt register to indicate a valid byte in receive fifo */
val = BIT(IE_S_RX_EVENT_SHIFT);
+ /* Enable interrupt register to indicate Slave Rx FIFO Full */
+ val |= BIT(IE_S_RX_FIFO_FULL_SHIFT);
+ /* Enable interrupt register to indicate a Master read transaction */
+ val |= BIT(IE_S_RD_EVENT_SHIFT);
/* Enable interrupt register for the Slave BUSY command */
val |= BIT(IE_S_START_BUSY_SHIFT);
+ iproc_i2c->slave_int_mask = val;
iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val);
}
@@ -311,9 +334,10 @@ static void bcm_iproc_i2c_check_slave_status(
return;
val = (val >> S_CMD_STATUS_SHIFT) & S_CMD_STATUS_MASK;
- if (val == S_CMD_STATUS_TIMEOUT) {
- dev_err(iproc_i2c->device, "slave random stretch time timeout\n");
-
+ if (val == S_CMD_STATUS_TIMEOUT || val == S_CMD_STATUS_MASTER_ABORT) {
+ dev_err(iproc_i2c->device, (val == S_CMD_STATUS_TIMEOUT) ?
+ "slave random stretch time timeout\n" :
+ "Master aborted read transaction\n");
/* re-initialize i2c for recovery */
bcm_iproc_i2c_enable_disable(iproc_i2c, false);
bcm_iproc_i2c_slave_init(iproc_i2c, true);
@@ -321,76 +345,187 @@ static void bcm_iproc_i2c_check_slave_status(
}
}
-static bool bcm_iproc_i2c_slave_isr(struct bcm_iproc_i2c_dev *iproc_i2c,
- u32 status)
+static void bcm_iproc_i2c_slave_read(struct bcm_iproc_i2c_dev *iproc_i2c)
{
+ u8 rx_data, rx_status;
+ u32 rx_bytes = 0;
u32 val;
- u8 value, rx_status;
- /* Slave RX byte receive */
- if (status & BIT(IS_S_RX_EVENT_SHIFT)) {
+ while (rx_bytes < MAX_SLAVE_RX_PER_INT) {
val = iproc_i2c_rd_reg(iproc_i2c, S_RX_OFFSET);
rx_status = (val >> S_RX_STATUS_SHIFT) & S_RX_STATUS_MASK;
- if (rx_status == I2C_SLAVE_RX_START) {
- /* Start of SMBUS for Master write */
- i2c_slave_event(iproc_i2c->slave,
- I2C_SLAVE_WRITE_REQUESTED, &value);
+ rx_data = ((val >> S_RX_DATA_SHIFT) & S_RX_DATA_MASK);
- val = iproc_i2c_rd_reg(iproc_i2c, S_RX_OFFSET);
- value = (u8)((val >> S_RX_DATA_SHIFT) & S_RX_DATA_MASK);
+ if (rx_status == I2C_SLAVE_RX_START) {
+ /* Start of SMBUS Master write */
i2c_slave_event(iproc_i2c->slave,
- I2C_SLAVE_WRITE_RECEIVED, &value);
- } else if (status & BIT(IS_S_RD_EVENT_SHIFT)) {
- /* Start of SMBUS for Master Read */
+ I2C_SLAVE_WRITE_REQUESTED, &rx_data);
+ iproc_i2c->rx_start_rcvd = true;
+ iproc_i2c->slave_read_complete = false;
+ } else if (rx_status == I2C_SLAVE_RX_DATA &&
+ iproc_i2c->rx_start_rcvd) {
+ /* Middle of SMBUS Master write */
i2c_slave_event(iproc_i2c->slave,
- I2C_SLAVE_READ_REQUESTED, &value);
- iproc_i2c_wr_reg(iproc_i2c, S_TX_OFFSET, value);
+ I2C_SLAVE_WRITE_RECEIVED, &rx_data);
+ } else if (rx_status == I2C_SLAVE_RX_END &&
+ iproc_i2c->rx_start_rcvd) {
+ /* End of SMBUS Master write */
+ if (iproc_i2c->slave_rx_only)
+ i2c_slave_event(iproc_i2c->slave,
+ I2C_SLAVE_WRITE_RECEIVED,
+ &rx_data);
+
+ i2c_slave_event(iproc_i2c->slave, I2C_SLAVE_STOP,
+ &rx_data);
+ } else if (rx_status == I2C_SLAVE_RX_FIFO_EMPTY) {
+ iproc_i2c->rx_start_rcvd = false;
+ iproc_i2c->slave_read_complete = true;
+ break;
+ }
- val = BIT(S_CMD_START_BUSY_SHIFT);
- iproc_i2c_wr_reg(iproc_i2c, S_CMD_OFFSET, val);
+ rx_bytes++;
+ }
+}
- /*
- * Enable interrupt for TX FIFO becomes empty and
- * less than PKT_LENGTH bytes were output on the SMBUS
- */
- val = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
- val |= BIT(IE_S_TX_UNDERRUN_SHIFT);
- iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val);
- } else {
- /* Master write other than start */
- value = (u8)((val >> S_RX_DATA_SHIFT) & S_RX_DATA_MASK);
+static void slave_rx_tasklet_fn(unsigned long data)
+{
+ struct bcm_iproc_i2c_dev *iproc_i2c = (struct bcm_iproc_i2c_dev *)data;
+ u32 int_clr;
+
+ bcm_iproc_i2c_slave_read(iproc_i2c);
+
+ /* clear pending IS_S_RX_EVENT_SHIFT interrupt */
+ int_clr = BIT(IS_S_RX_EVENT_SHIFT);
+
+ if (!iproc_i2c->slave_rx_only && iproc_i2c->slave_read_complete) {
+ /*
+ * In case of single byte master-read request,
+ * IS_S_TX_UNDERRUN_SHIFT event is generated before
+ * IS_S_START_BUSY_SHIFT event. Hence start slave data send
+ * from first IS_S_TX_UNDERRUN_SHIFT event.
+ *
+ * This means don't send any data from slave when
+ * IS_S_RD_EVENT_SHIFT event is generated else it will increment
+ * eeprom or other backend slave driver read pointer twice.
+ */
+ iproc_i2c->tx_underrun = 0;
+ iproc_i2c->slave_int_mask |= BIT(IE_S_TX_UNDERRUN_SHIFT);
+
+ /* clear IS_S_RD_EVENT_SHIFT interrupt */
+ int_clr |= BIT(IS_S_RD_EVENT_SHIFT);
+ }
+
+ /* clear slave interrupt */
+ iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, int_clr);
+ /* enable slave interrupts */
+ iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, iproc_i2c->slave_int_mask);
+}
+
+static bool bcm_iproc_i2c_slave_isr(struct bcm_iproc_i2c_dev *iproc_i2c,
+ u32 status)
+{
+ u32 val;
+ u8 value;
+
+ /*
+ * Slave events in case of master-write, master-write-read and,
+ * master-read
+ *
+ * Master-write : only IS_S_RX_EVENT_SHIFT event
+ * Master-write-read: both IS_S_RX_EVENT_SHIFT and IS_S_RD_EVENT_SHIFT
+ * events
+ * Master-read : both IS_S_RX_EVENT_SHIFT and IS_S_RD_EVENT_SHIFT
+ * events or only IS_S_RD_EVENT_SHIFT
+ *
+ * iproc has a slave rx fifo size of 64 bytes. Rx fifo full interrupt
+ * (IS_S_RX_FIFO_FULL_SHIFT) will be generated when RX fifo becomes
+ * full. This can happen if Master issues write requests of more than
+ * 64 bytes.
+ */
+ if (status & BIT(IS_S_RX_EVENT_SHIFT) ||
+ status & BIT(IS_S_RD_EVENT_SHIFT) ||
+ status & BIT(IS_S_RX_FIFO_FULL_SHIFT)) {
+ /* disable slave interrupts */
+ val = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
+ val &= ~iproc_i2c->slave_int_mask;
+ iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val);
+
+ if (status & BIT(IS_S_RD_EVENT_SHIFT))
+ /* Master-write-read request */
+ iproc_i2c->slave_rx_only = false;
+ else
+ /* Master-write request only */
+ iproc_i2c->slave_rx_only = true;
+
+ /* schedule tasklet to read data later */
+ tasklet_schedule(&iproc_i2c->slave_rx_tasklet);
+
+ /*
+ * clear only IS_S_RX_EVENT_SHIFT and
+ * IS_S_RX_FIFO_FULL_SHIFT interrupt.
+ */
+ val = BIT(IS_S_RX_EVENT_SHIFT);
+ if (status & BIT(IS_S_RX_FIFO_FULL_SHIFT))
+ val |= BIT(IS_S_RX_FIFO_FULL_SHIFT);
+ iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, val);
+ }
+
+ if (status & BIT(IS_S_TX_UNDERRUN_SHIFT)) {
+ iproc_i2c->tx_underrun++;
+ if (iproc_i2c->tx_underrun == 1)
+ /* Start of SMBUS for Master Read */
i2c_slave_event(iproc_i2c->slave,
- I2C_SLAVE_WRITE_RECEIVED, &value);
- if (rx_status == I2C_SLAVE_RX_END)
- i2c_slave_event(iproc_i2c->slave,
- I2C_SLAVE_STOP, &value);
- }
- } else if (status & BIT(IS_S_TX_UNDERRUN_SHIFT)) {
- /* Master read other than start */
- i2c_slave_event(iproc_i2c->slave,
- I2C_SLAVE_READ_PROCESSED, &value);
+ I2C_SLAVE_READ_REQUESTED,
+ &value);
+ else
+ /* Master read other than start */
+ i2c_slave_event(iproc_i2c->slave,
+ I2C_SLAVE_READ_PROCESSED,
+ &value);
iproc_i2c_wr_reg(iproc_i2c, S_TX_OFFSET, value);
+ /* start transfer */
val = BIT(S_CMD_START_BUSY_SHIFT);
iproc_i2c_wr_reg(iproc_i2c, S_CMD_OFFSET, val);
+
+ /* clear interrupt */
+ iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET,
+ BIT(IS_S_TX_UNDERRUN_SHIFT));
}
- /* Stop */
+ /* Stop received from master in case of master read transaction */
if (status & BIT(IS_S_START_BUSY_SHIFT)) {
- i2c_slave_event(iproc_i2c->slave, I2C_SLAVE_STOP, &value);
/*
- * Enable interrupt for TX FIFO becomes empty and
+ * Disable interrupt for TX FIFO becomes empty and
* less than PKT_LENGTH bytes were output on the SMBUS
*/
- val = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
- val &= ~BIT(IE_S_TX_UNDERRUN_SHIFT);
- iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val);
+ iproc_i2c->slave_int_mask &= ~BIT(IE_S_TX_UNDERRUN_SHIFT);
+ iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET,
+ iproc_i2c->slave_int_mask);
+
+ /* End of SMBUS for Master Read */
+ val = BIT(S_TX_WR_STATUS_SHIFT);
+ iproc_i2c_wr_reg(iproc_i2c, S_TX_OFFSET, val);
+
+ val = BIT(S_CMD_START_BUSY_SHIFT);
+ iproc_i2c_wr_reg(iproc_i2c, S_CMD_OFFSET, val);
+
+ /* flush TX FIFOs */
+ val = iproc_i2c_rd_reg(iproc_i2c, S_FIFO_CTRL_OFFSET);
+ val |= (BIT(S_FIFO_TX_FLUSH_SHIFT));
+ iproc_i2c_wr_reg(iproc_i2c, S_FIFO_CTRL_OFFSET, val);
+
+ i2c_slave_event(iproc_i2c->slave, I2C_SLAVE_STOP, &value);
+
+ /* clear interrupt */
+ iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET,
+ BIT(IS_S_START_BUSY_SHIFT));
}
- /* clear interrupt status */
- iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, status);
+ /* check slave transmit status only if slave is transmitting */
+ if (!iproc_i2c->slave_rx_only)
+ bcm_iproc_i2c_check_slave_status(iproc_i2c);
- bcm_iproc_i2c_check_slave_status(iproc_i2c);
return true;
}
@@ -505,12 +640,17 @@ static void bcm_iproc_i2c_process_m_event(struct bcm_iproc_i2c_dev *iproc_i2c,
static irqreturn_t bcm_iproc_i2c_isr(int irq, void *data)
{
struct bcm_iproc_i2c_dev *iproc_i2c = data;
- u32 status = iproc_i2c_rd_reg(iproc_i2c, IS_OFFSET);
+ u32 slave_status;
+ u32 status;
bool ret;
- u32 sl_status = status & ISR_MASK_SLAVE;
- if (sl_status) {
- ret = bcm_iproc_i2c_slave_isr(iproc_i2c, sl_status);
+ status = iproc_i2c_rd_reg(iproc_i2c, IS_OFFSET);
+ /* process only slave interrupt which are enabled */
+ slave_status = status & iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET) &
+ ISR_MASK_SLAVE;
+
+ if (slave_status) {
+ ret = bcm_iproc_i2c_slave_isr(iproc_i2c, slave_status);
if (ret)
return IRQ_HANDLED;
else
@@ -1066,6 +1206,10 @@ static int bcm_iproc_i2c_reg_slave(struct i2c_client *slave)
return -EAFNOSUPPORT;
iproc_i2c->slave = slave;
+
+ tasklet_init(&iproc_i2c->slave_rx_tasklet, slave_rx_tasklet_fn,
+ (unsigned long)iproc_i2c);
+
bcm_iproc_i2c_slave_init(iproc_i2c, false);
return 0;
}
@@ -1086,6 +1230,8 @@ static int bcm_iproc_i2c_unreg_slave(struct i2c_client *slave)
IE_S_ALL_INTERRUPT_SHIFT);
iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, tmp);
+ tasklet_kill(&iproc_i2c->slave_rx_tasklet);
+
/* Erase the slave address programmed */
tmp = iproc_i2c_rd_reg(iproc_i2c, S_CFG_SMBUS_ADDR_OFFSET);
tmp &= ~BIT(S_CFG_EN_NIC_SMB_ADDR3_SHIFT);
diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c
index d4e0a0f6732a..ba766d24219e 100644
--- a/drivers/i2c/busses/i2c-brcmstb.c
+++ b/drivers/i2c/busses/i2c-brcmstb.c
@@ -316,7 +316,7 @@ static int brcmstb_send_i2c_cmd(struct brcmstb_i2c_dev *dev,
goto cmd_out;
}
- if ((CMD_RD || CMD_WR) &&
+ if ((cmd == CMD_RD || cmd == CMD_WR) &&
bsc_readl(dev, iic_enable) & BSC_IIC_EN_NOACK_MASK) {
rc = -EREMOTEIO;
dev_dbg(dev->device, "controller received NOACK intr for %s\n",
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index 85307cfa7109..5392b82f68a4 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -38,6 +38,8 @@
#define DW_IC_CON_TX_EMPTY_CTRL BIT(8)
#define DW_IC_CON_RX_FIFO_FULL_HLD_CTRL BIT(9)
+#define DW_IC_DATA_CMD_DAT GENMASK(7, 0)
+
/*
* Registers offset
*/
diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
index d6425ad6e6a3..dd27b9dbe931 100644
--- a/drivers/i2c/busses/i2c-designware-master.c
+++ b/drivers/i2c/busses/i2c-designware-master.c
@@ -432,7 +432,7 @@ i2c_dw_read(struct dw_i2c_dev *dev)
regmap_read(dev->map, DW_IC_DATA_CMD, &tmp);
/* Ensure length byte is a valid value */
if (flags & I2C_M_RECV_LEN &&
- tmp <= I2C_SMBUS_BLOCK_MAX && tmp > 0) {
+ (tmp & DW_IC_DATA_CMD_DAT) <= I2C_SMBUS_BLOCK_MAX && tmp > 0) {
len = i2c_dw_recv_len(dev, tmp);
}
*buf++ = tmp;
diff --git a/drivers/i2c/busses/i2c-digicolor.c b/drivers/i2c/busses/i2c-digicolor.c
index f67639dc74b7..60c838c7c454 100644
--- a/drivers/i2c/busses/i2c-digicolor.c
+++ b/drivers/i2c/busses/i2c-digicolor.c
@@ -160,12 +160,11 @@ static irqreturn_t dc_i2c_irq(int irq, void *dev_id)
{
struct dc_i2c *i2c = dev_id;
int cmd_status = dc_i2c_cmd_status(i2c);
- unsigned long flags;
u8 addr_cmd;
writeb_relaxed(1, i2c->regs + II_INTFLAG_CLEAR);
- spin_lock_irqsave(&i2c->lock, flags);
+ spin_lock(&i2c->lock);
if (cmd_status == II_CMD_STATUS_ACK_BAD
|| cmd_status == II_CMD_STATUS_ABORT) {
@@ -207,7 +206,7 @@ static irqreturn_t dc_i2c_irq(int irq, void *dev_id)
}
out:
- spin_unlock_irqrestore(&i2c->lock, flags);
+ spin_unlock(&i2c->lock);
return IRQ_HANDLED;
}
diff --git a/drivers/i2c/busses/i2c-efm32.c b/drivers/i2c/busses/i2c-efm32.c
deleted file mode 100644
index f6e13ceeb2b3..000000000000
--- a/drivers/i2c/busses/i2c-efm32.c
+++ /dev/null
@@ -1,469 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2014 Uwe Kleine-Koenig for Pengutronix
- */
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/i2c.h>
-#include <linux/io.h>
-#include <linux/interrupt.h>
-#include <linux/err.h>
-#include <linux/clk.h>
-
-#define DRIVER_NAME "efm32-i2c"
-
-#define MASK_VAL(mask, val) ((val << __ffs(mask)) & mask)
-
-#define REG_CTRL 0x00
-#define REG_CTRL_EN 0x00001
-#define REG_CTRL_SLAVE 0x00002
-#define REG_CTRL_AUTOACK 0x00004
-#define REG_CTRL_AUTOSE 0x00008
-#define REG_CTRL_AUTOSN 0x00010
-#define REG_CTRL_ARBDIS 0x00020
-#define REG_CTRL_GCAMEN 0x00040
-#define REG_CTRL_CLHR__MASK 0x00300
-#define REG_CTRL_BITO__MASK 0x03000
-#define REG_CTRL_BITO_OFF 0x00000
-#define REG_CTRL_BITO_40PCC 0x01000
-#define REG_CTRL_BITO_80PCC 0x02000
-#define REG_CTRL_BITO_160PCC 0x03000
-#define REG_CTRL_GIBITO 0x08000
-#define REG_CTRL_CLTO__MASK 0x70000
-#define REG_CTRL_CLTO_OFF 0x00000
-
-#define REG_CMD 0x04
-#define REG_CMD_START 0x00001
-#define REG_CMD_STOP 0x00002
-#define REG_CMD_ACK 0x00004
-#define REG_CMD_NACK 0x00008
-#define REG_CMD_CONT 0x00010
-#define REG_CMD_ABORT 0x00020
-#define REG_CMD_CLEARTX 0x00040
-#define REG_CMD_CLEARPC 0x00080
-
-#define REG_STATE 0x08
-#define REG_STATE_BUSY 0x00001
-#define REG_STATE_MASTER 0x00002
-#define REG_STATE_TRANSMITTER 0x00004
-#define REG_STATE_NACKED 0x00008
-#define REG_STATE_BUSHOLD 0x00010
-#define REG_STATE_STATE__MASK 0x000e0
-#define REG_STATE_STATE_IDLE 0x00000
-#define REG_STATE_STATE_WAIT 0x00020
-#define REG_STATE_STATE_START 0x00040
-#define REG_STATE_STATE_ADDR 0x00060
-#define REG_STATE_STATE_ADDRACK 0x00080
-#define REG_STATE_STATE_DATA 0x000a0
-#define REG_STATE_STATE_DATAACK 0x000c0
-
-#define REG_STATUS 0x0c
-#define REG_STATUS_PSTART 0x00001
-#define REG_STATUS_PSTOP 0x00002
-#define REG_STATUS_PACK 0x00004
-#define REG_STATUS_PNACK 0x00008
-#define REG_STATUS_PCONT 0x00010
-#define REG_STATUS_PABORT 0x00020
-#define REG_STATUS_TXC 0x00040
-#define REG_STATUS_TXBL 0x00080
-#define REG_STATUS_RXDATAV 0x00100
-
-#define REG_CLKDIV 0x10
-#define REG_CLKDIV_DIV__MASK 0x001ff
-#define REG_CLKDIV_DIV(div) MASK_VAL(REG_CLKDIV_DIV__MASK, (div))
-
-#define REG_SADDR 0x14
-#define REG_SADDRMASK 0x18
-#define REG_RXDATA 0x1c
-#define REG_RXDATAP 0x20
-#define REG_TXDATA 0x24
-#define REG_IF 0x28
-#define REG_IF_START 0x00001
-#define REG_IF_RSTART 0x00002
-#define REG_IF_ADDR 0x00004
-#define REG_IF_TXC 0x00008
-#define REG_IF_TXBL 0x00010
-#define REG_IF_RXDATAV 0x00020
-#define REG_IF_ACK 0x00040
-#define REG_IF_NACK 0x00080
-#define REG_IF_MSTOP 0x00100
-#define REG_IF_ARBLOST 0x00200
-#define REG_IF_BUSERR 0x00400
-#define REG_IF_BUSHOLD 0x00800
-#define REG_IF_TXOF 0x01000
-#define REG_IF_RXUF 0x02000
-#define REG_IF_BITO 0x04000
-#define REG_IF_CLTO 0x08000
-#define REG_IF_SSTOP 0x10000
-
-#define REG_IFS 0x2c
-#define REG_IFC 0x30
-#define REG_IFC__MASK 0x1ffcf
-
-#define REG_IEN 0x34
-
-#define REG_ROUTE 0x38
-#define REG_ROUTE_SDAPEN 0x00001
-#define REG_ROUTE_SCLPEN 0x00002
-#define REG_ROUTE_LOCATION__MASK 0x00700
-#define REG_ROUTE_LOCATION(n) MASK_VAL(REG_ROUTE_LOCATION__MASK, (n))
-
-struct efm32_i2c_ddata {
- struct i2c_adapter adapter;
-
- struct clk *clk;
- void __iomem *base;
- unsigned int irq;
- u8 location;
- unsigned long frequency;
-
- /* transfer data */
- struct completion done;
- struct i2c_msg *msgs;
- size_t num_msgs;
- size_t current_word, current_msg;
- int retval;
-};
-
-static u32 efm32_i2c_read32(struct efm32_i2c_ddata *ddata, unsigned offset)
-{
- return readl(ddata->base + offset);
-}
-
-static void efm32_i2c_write32(struct efm32_i2c_ddata *ddata,
- unsigned offset, u32 value)
-{
- writel(value, ddata->base + offset);
-}
-
-static void efm32_i2c_send_next_msg(struct efm32_i2c_ddata *ddata)
-{
- struct i2c_msg *cur_msg = &ddata->msgs[ddata->current_msg];
-
- efm32_i2c_write32(ddata, REG_CMD, REG_CMD_START);
- efm32_i2c_write32(ddata, REG_TXDATA, i2c_8bit_addr_from_msg(cur_msg));
-}
-
-static void efm32_i2c_send_next_byte(struct efm32_i2c_ddata *ddata)
-{
- struct i2c_msg *cur_msg = &ddata->msgs[ddata->current_msg];
-
- if (ddata->current_word >= cur_msg->len) {
- /* cur_msg completely transferred */
- ddata->current_word = 0;
- ddata->current_msg += 1;
-
- if (ddata->current_msg >= ddata->num_msgs) {
- efm32_i2c_write32(ddata, REG_CMD, REG_CMD_STOP);
- complete(&ddata->done);
- } else {
- efm32_i2c_send_next_msg(ddata);
- }
- } else {
- efm32_i2c_write32(ddata, REG_TXDATA,
- cur_msg->buf[ddata->current_word++]);
- }
-}
-
-static void efm32_i2c_recv_next_byte(struct efm32_i2c_ddata *ddata)
-{
- struct i2c_msg *cur_msg = &ddata->msgs[ddata->current_msg];
-
- cur_msg->buf[ddata->current_word] = efm32_i2c_read32(ddata, REG_RXDATA);
- ddata->current_word += 1;
- if (ddata->current_word >= cur_msg->len) {
- /* cur_msg completely transferred */
- ddata->current_word = 0;
- ddata->current_msg += 1;
-
- efm32_i2c_write32(ddata, REG_CMD, REG_CMD_NACK);
-
- if (ddata->current_msg >= ddata->num_msgs) {
- efm32_i2c_write32(ddata, REG_CMD, REG_CMD_STOP);
- complete(&ddata->done);
- } else {
- efm32_i2c_send_next_msg(ddata);
- }
- } else {
- efm32_i2c_write32(ddata, REG_CMD, REG_CMD_ACK);
- }
-}
-
-static irqreturn_t efm32_i2c_irq(int irq, void *dev_id)
-{
- struct efm32_i2c_ddata *ddata = dev_id;
- struct i2c_msg *cur_msg = &ddata->msgs[ddata->current_msg];
- u32 irqflag = efm32_i2c_read32(ddata, REG_IF);
- u32 state = efm32_i2c_read32(ddata, REG_STATE);
-
- efm32_i2c_write32(ddata, REG_IFC, irqflag & REG_IFC__MASK);
-
- switch (state & REG_STATE_STATE__MASK) {
- case REG_STATE_STATE_IDLE:
- /* arbitration lost? */
- ddata->retval = -EAGAIN;
- complete(&ddata->done);
- break;
- case REG_STATE_STATE_WAIT:
- /*
- * huh, this shouldn't happen.
- * Reset hardware state and get out
- */
- ddata->retval = -EIO;
- efm32_i2c_write32(ddata, REG_CMD,
- REG_CMD_STOP | REG_CMD_ABORT |
- REG_CMD_CLEARTX | REG_CMD_CLEARPC);
- complete(&ddata->done);
- break;
- case REG_STATE_STATE_START:
- /* "caller" is expected to send an address */
- break;
- case REG_STATE_STATE_ADDR:
- /* wait for Ack or NAck of slave */
- break;
- case REG_STATE_STATE_ADDRACK:
- if (state & REG_STATE_NACKED) {
- efm32_i2c_write32(ddata, REG_CMD, REG_CMD_STOP);
- ddata->retval = -ENXIO;
- complete(&ddata->done);
- } else if (cur_msg->flags & I2C_M_RD) {
- /* wait for slave to send first data byte */
- } else {
- efm32_i2c_send_next_byte(ddata);
- }
- break;
- case REG_STATE_STATE_DATA:
- if (cur_msg->flags & I2C_M_RD) {
- efm32_i2c_recv_next_byte(ddata);
- } else {
- /* wait for Ack or Nack of slave */
- }
- break;
- case REG_STATE_STATE_DATAACK:
- if (state & REG_STATE_NACKED) {
- efm32_i2c_write32(ddata, REG_CMD, REG_CMD_STOP);
- complete(&ddata->done);
- } else {
- efm32_i2c_send_next_byte(ddata);
- }
- }
-
- return IRQ_HANDLED;
-}
-
-static int efm32_i2c_master_xfer(struct i2c_adapter *adap,
- struct i2c_msg *msgs, int num)
-{
- struct efm32_i2c_ddata *ddata = i2c_get_adapdata(adap);
- int ret;
-
- if (ddata->msgs)
- return -EBUSY;
-
- ddata->msgs = msgs;
- ddata->num_msgs = num;
- ddata->current_word = 0;
- ddata->current_msg = 0;
- ddata->retval = -EIO;
-
- reinit_completion(&ddata->done);
-
- dev_dbg(&ddata->adapter.dev, "state: %08x, status: %08x\n",
- efm32_i2c_read32(ddata, REG_STATE),
- efm32_i2c_read32(ddata, REG_STATUS));
-
- efm32_i2c_send_next_msg(ddata);
-
- wait_for_completion(&ddata->done);
-
- if (ddata->current_msg >= ddata->num_msgs)
- ret = ddata->num_msgs;
- else
- ret = ddata->retval;
-
- return ret;
-}
-
-static u32 efm32_i2c_functionality(struct i2c_adapter *adap)
-{
- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
-}
-
-static const struct i2c_algorithm efm32_i2c_algo = {
- .master_xfer = efm32_i2c_master_xfer,
- .functionality = efm32_i2c_functionality,
-};
-
-static u32 efm32_i2c_get_configured_location(struct efm32_i2c_ddata *ddata)
-{
- u32 reg = efm32_i2c_read32(ddata, REG_ROUTE);
-
- return (reg & REG_ROUTE_LOCATION__MASK) >>
- __ffs(REG_ROUTE_LOCATION__MASK);
-}
-
-static int efm32_i2c_probe(struct platform_device *pdev)
-{
- struct efm32_i2c_ddata *ddata;
- struct resource *res;
- unsigned long rate;
- struct device_node *np = pdev->dev.of_node;
- u32 location, frequency;
- int ret;
- u32 clkdiv;
-
- ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
- if (!ddata)
- return -ENOMEM;
- platform_set_drvdata(pdev, ddata);
-
- init_completion(&ddata->done);
- strlcpy(ddata->adapter.name, pdev->name, sizeof(ddata->adapter.name));
- ddata->adapter.owner = THIS_MODULE;
- ddata->adapter.algo = &efm32_i2c_algo;
- ddata->adapter.dev.parent = &pdev->dev;
- ddata->adapter.dev.of_node = pdev->dev.of_node;
- i2c_set_adapdata(&ddata->adapter, ddata);
-
- ddata->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(ddata->clk)) {
- ret = PTR_ERR(ddata->clk);
- dev_err(&pdev->dev, "failed to get clock: %d\n", ret);
- return ret;
- }
-
- ddata->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
- if (IS_ERR(ddata->base))
- return PTR_ERR(ddata->base);
-
- if (resource_size(res) < 0x42) {
- dev_err(&pdev->dev, "memory resource too small\n");
- return -EINVAL;
- }
-
- ret = platform_get_irq(pdev, 0);
- if (ret <= 0) {
- if (!ret)
- ret = -EINVAL;
- return ret;
- }
-
- ddata->irq = ret;
-
- ret = clk_prepare_enable(ddata->clk);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to enable clock (%d)\n", ret);
- return ret;
- }
-
-
- ret = of_property_read_u32(np, "energymicro,location", &location);
-
- if (ret)
- /* fall back to wrongly namespaced property */
- ret = of_property_read_u32(np, "efm32,location", &location);
-
- if (!ret) {
- dev_dbg(&pdev->dev, "using location %u\n", location);
- } else {
- /* default to location configured in hardware */
- location = efm32_i2c_get_configured_location(ddata);
-
- dev_info(&pdev->dev, "fall back to location %u\n", location);
- }
-
- ddata->location = location;
-
- ret = of_property_read_u32(np, "clock-frequency", &frequency);
- if (!ret) {
- dev_dbg(&pdev->dev, "using frequency %u\n", frequency);
- } else {
- frequency = I2C_MAX_STANDARD_MODE_FREQ;
- dev_info(&pdev->dev, "defaulting to 100 kHz\n");
- }
- ddata->frequency = frequency;
-
- rate = clk_get_rate(ddata->clk);
- if (!rate) {
- dev_err(&pdev->dev, "there is no input clock available\n");
- ret = -EINVAL;
- goto err_disable_clk;
- }
- clkdiv = DIV_ROUND_UP(rate, 8 * ddata->frequency) - 1;
- if (clkdiv >= 0x200) {
- dev_err(&pdev->dev,
- "input clock too fast (%lu) to divide down to bus freq (%lu)",
- rate, ddata->frequency);
- ret = -EINVAL;
- goto err_disable_clk;
- }
-
- dev_dbg(&pdev->dev, "input clock = %lu, bus freq = %lu, clkdiv = %lu\n",
- rate, ddata->frequency, (unsigned long)clkdiv);
- efm32_i2c_write32(ddata, REG_CLKDIV, REG_CLKDIV_DIV(clkdiv));
-
- efm32_i2c_write32(ddata, REG_ROUTE, REG_ROUTE_SDAPEN |
- REG_ROUTE_SCLPEN |
- REG_ROUTE_LOCATION(ddata->location));
-
- efm32_i2c_write32(ddata, REG_CTRL, REG_CTRL_EN |
- REG_CTRL_BITO_160PCC | 0 * REG_CTRL_GIBITO);
-
- efm32_i2c_write32(ddata, REG_IFC, REG_IFC__MASK);
- efm32_i2c_write32(ddata, REG_IEN, REG_IF_TXC | REG_IF_ACK | REG_IF_NACK
- | REG_IF_ARBLOST | REG_IF_BUSERR | REG_IF_RXDATAV);
-
- /* to make bus idle */
- efm32_i2c_write32(ddata, REG_CMD, REG_CMD_ABORT);
-
- ret = request_irq(ddata->irq, efm32_i2c_irq, 0, DRIVER_NAME, ddata);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to request irq (%d)\n", ret);
- goto err_disable_clk;
- }
-
- ret = i2c_add_adapter(&ddata->adapter);
- if (ret) {
- free_irq(ddata->irq, ddata);
-
-err_disable_clk:
- clk_disable_unprepare(ddata->clk);
- }
- return ret;
-}
-
-static int efm32_i2c_remove(struct platform_device *pdev)
-{
- struct efm32_i2c_ddata *ddata = platform_get_drvdata(pdev);
-
- i2c_del_adapter(&ddata->adapter);
- free_irq(ddata->irq, ddata);
- clk_disable_unprepare(ddata->clk);
-
- return 0;
-}
-
-static const struct of_device_id efm32_i2c_dt_ids[] = {
- {
- .compatible = "energymicro,efm32-i2c",
- }, {
- /* sentinel */
- }
-};
-MODULE_DEVICE_TABLE(of, efm32_i2c_dt_ids);
-
-static struct platform_driver efm32_i2c_driver = {
- .probe = efm32_i2c_probe,
- .remove = efm32_i2c_remove,
-
- .driver = {
- .name = DRIVER_NAME,
- .of_match_table = efm32_i2c_dt_ids,
- },
-};
-module_platform_driver(efm32_i2c_driver);
-
-MODULE_AUTHOR("Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>");
-MODULE_DESCRIPTION("EFM32 i2c driver");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/i2c/busses/i2c-elektor.c b/drivers/i2c/busses/i2c-elektor.c
index 140426db28df..b0f50dce9d0f 100644
--- a/drivers/i2c/busses/i2c-elektor.c
+++ b/drivers/i2c/busses/i2c-elektor.c
@@ -49,7 +49,7 @@ static int mmapped;
static wait_queue_head_t pcf_wait;
static int pcf_pending;
-static spinlock_t lock;
+static DEFINE_SPINLOCK(lock);
static struct i2c_adapter pcf_isa_ops;
@@ -132,7 +132,6 @@ static irqreturn_t pcf_isa_handler(int this_irq, void *dev_id) {
static int pcf_isa_init(void)
{
- spin_lock_init(&lock);
if (!mmapped) {
if (!request_region(base, 2, pcf_isa_ops.name)) {
printk(KERN_ERR "%s: requested I/O region (%#x:2) is "
@@ -282,7 +281,7 @@ static int elektor_probe(struct device *dev, unsigned int id)
return -ENODEV;
}
-static int elektor_remove(struct device *dev, unsigned int id)
+static void elektor_remove(struct device *dev, unsigned int id)
{
i2c_del_adapter(&pcf_isa_ops);
@@ -298,8 +297,6 @@ static int elektor_remove(struct device *dev, unsigned int id)
iounmap(base_iomem);
release_mem_region(base, 2);
}
-
- return 0;
}
static struct isa_driver i2c_elektor_driver = {
diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c
index 20a9881a0d6c..5ac30d95650c 100644
--- a/drivers/i2c/busses/i2c-exynos5.c
+++ b/drivers/i2c/busses/i2c-exynos5.c
@@ -606,6 +606,7 @@ static void exynos5_i2c_message_start(struct exynos5_i2c *i2c, int stop)
u32 i2c_ctl;
u32 int_en = 0;
u32 i2c_auto_conf = 0;
+ u32 i2c_addr = 0;
u32 fifo_ctl;
unsigned long flags;
unsigned short trig_lvl;
@@ -640,7 +641,12 @@ static void exynos5_i2c_message_start(struct exynos5_i2c *i2c, int stop)
int_en |= HSI2C_INT_TX_ALMOSTEMPTY_EN;
}
- writel(HSI2C_SLV_ADDR_MAS(i2c->msg->addr), i2c->regs + HSI2C_ADDR);
+ i2c_addr = HSI2C_SLV_ADDR_MAS(i2c->msg->addr);
+
+ if (i2c->op_clock >= I2C_MAX_FAST_MODE_PLUS_FREQ)
+ i2c_addr |= HSI2C_MASTER_ID(MASTER_ID(i2c->adap.nr));
+
+ writel(i2c_addr, i2c->regs + HSI2C_ADDR);
writel(fifo_ctl, i2c->regs + HSI2C_FIFO_CTL);
writel(i2c_ctl, i2c->regs + HSI2C_CTL);
diff --git a/drivers/i2c/busses/i2c-gpio.c b/drivers/i2c/busses/i2c-gpio.c
index a4a6825c8758..7a048abbf92b 100644
--- a/drivers/i2c/busses/i2c-gpio.c
+++ b/drivers/i2c/busses/i2c-gpio.c
@@ -520,5 +520,5 @@ module_exit(i2c_gpio_exit);
MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
MODULE_DESCRIPTION("Platform-independent bitbanging I2C driver");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:i2c-gpio");
diff --git a/drivers/i2c/busses/i2c-hix5hd2.c b/drivers/i2c/busses/i2c-hix5hd2.c
index ab15b1ec2ab3..c45f226c2b85 100644
--- a/drivers/i2c/busses/i2c-hix5hd2.c
+++ b/drivers/i2c/busses/i2c-hix5hd2.c
@@ -413,10 +413,8 @@ static int hix5hd2_i2c_probe(struct platform_device *pdev)
return PTR_ERR(priv->regs);
irq = platform_get_irq(pdev, 0);
- if (irq <= 0) {
- dev_err(&pdev->dev, "cannot find HS-I2C IRQ\n");
+ if (irq <= 0)
return irq;
- }
priv->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(priv->clk)) {
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 877fe3733a42..4acee6f9e5a3 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -72,6 +72,7 @@
* Jasper Lake (SOC) 0x4da3 32 hard yes yes yes
* Comet Lake-V (PCH) 0xa3a3 32 hard yes yes yes
* Alder Lake-S (PCH) 0x7aa3 32 hard yes yes yes
+ * Alder Lake-P (PCH) 0x51a3 32 hard yes yes yes
*
* Features supported by this driver:
* Software PEC no
@@ -228,6 +229,7 @@
#define PCI_DEVICE_ID_INTEL_TIGERLAKE_H_SMBUS 0x43a3
#define PCI_DEVICE_ID_INTEL_ELKHART_LAKE_SMBUS 0x4b23
#define PCI_DEVICE_ID_INTEL_JASPER_LAKE_SMBUS 0x4da3
+#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_P_SMBUS 0x51a3
#define PCI_DEVICE_ID_INTEL_BROXTON_SMBUS 0x5ad4
#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_S_SMBUS 0x7aa3
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_SMBUS 0x8c22
@@ -1084,6 +1086,7 @@ static const struct pci_device_id i801_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TIGERLAKE_H_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_JASPER_LAKE_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ALDER_LAKE_S_SMBUS) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ALDER_LAKE_P_SMBUS) },
{ 0, }
};
@@ -1433,7 +1436,7 @@ static int i801_add_mux(struct i801_priv *priv)
const struct i801_mux_config *mux_config;
struct i2c_mux_gpio_platform_data gpio_data;
struct gpiod_lookup_table *lookup;
- int err, i;
+ int i;
if (!priv->mux_drvdata)
return 0;
@@ -1473,22 +1476,17 @@ static int i801_add_mux(struct i801_priv *priv)
PLATFORM_DEVID_NONE, &gpio_data,
sizeof(struct i2c_mux_gpio_platform_data));
if (IS_ERR(priv->mux_pdev)) {
- err = PTR_ERR(priv->mux_pdev);
gpiod_remove_lookup_table(lookup);
- priv->mux_pdev = NULL;
dev_err(dev, "Failed to register i2c-mux-gpio device\n");
- return err;
}
- return 0;
+ return PTR_ERR_OR_ZERO(priv->mux_pdev);
}
static void i801_del_mux(struct i801_priv *priv)
{
- if (priv->mux_pdev)
- platform_device_unregister(priv->mux_pdev);
- if (priv->lookup)
- gpiod_remove_lookup_table(priv->lookup);
+ platform_device_unregister(priv->mux_pdev);
+ gpiod_remove_lookup_table(priv->lookup);
}
static unsigned int i801_get_adapter_class(struct i801_priv *priv)
@@ -1772,6 +1770,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
case PCI_DEVICE_ID_INTEL_JASPER_LAKE_SMBUS:
case PCI_DEVICE_ID_INTEL_EBG_SMBUS:
case PCI_DEVICE_ID_INTEL_ALDER_LAKE_S_SMBUS:
+ case PCI_DEVICE_ID_INTEL_ALDER_LAKE_P_SMBUS:
priv->features |= FEATURE_BLOCK_PROC;
priv->features |= FEATURE_I2C_BLOCK_READ;
priv->features |= FEATURE_IRQ;
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index a8e8af57e33f..b80fdc1f0092 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -209,6 +209,7 @@ struct imx_i2c_struct {
struct imx_i2c_dma *dma;
struct i2c_client *slave;
+ enum i2c_slave_event last_slave_event;
};
static const struct imx_i2c_hwdata imx1_i2c_hwdata = {
@@ -550,7 +551,7 @@ static void i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx,
i2c_imx->cur_clk = i2c_clk_rate;
- div = (i2c_clk_rate + i2c_imx->bitrate - 1) / i2c_imx->bitrate;
+ div = DIV_ROUND_UP(i2c_clk_rate, i2c_imx->bitrate);
if (div < i2c_clk_div[0].div)
i = 0;
else if (div > i2c_clk_div[i2c_imx->hwdata->ndivs - 1].div)
@@ -568,8 +569,8 @@ static void i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx,
* This delay is used in I2C bus disable function
* to fix chip hardware bug.
*/
- i2c_imx->disable_delay = (500000U * i2c_clk_div[i].div
- + (i2c_clk_rate / 2) - 1) / (i2c_clk_rate / 2);
+ i2c_imx->disable_delay = DIV_ROUND_UP(500000U * i2c_clk_div[i].div,
+ i2c_clk_rate / 2);
#ifdef CONFIG_I2C_DEBUG_BUS
dev_dbg(&i2c_imx->adapter.dev, "I2C_CLK=%d, REQ DIV=%d\n",
@@ -675,6 +676,36 @@ static void i2c_imx_enable_bus_idle(struct imx_i2c_struct *i2c_imx)
}
}
+static void i2c_imx_slave_event(struct imx_i2c_struct *i2c_imx,
+ enum i2c_slave_event event, u8 *val)
+{
+ i2c_slave_event(i2c_imx->slave, event, val);
+ i2c_imx->last_slave_event = event;
+}
+
+static void i2c_imx_slave_finish_op(struct imx_i2c_struct *i2c_imx)
+{
+ u8 val;
+
+ while (i2c_imx->last_slave_event != I2C_SLAVE_STOP) {
+ switch (i2c_imx->last_slave_event) {
+ case I2C_SLAVE_READ_REQUESTED:
+ i2c_imx_slave_event(i2c_imx, I2C_SLAVE_READ_PROCESSED,
+ &val);
+ break;
+
+ case I2C_SLAVE_WRITE_REQUESTED:
+ case I2C_SLAVE_READ_PROCESSED:
+ case I2C_SLAVE_WRITE_RECEIVED:
+ i2c_imx_slave_event(i2c_imx, I2C_SLAVE_STOP, &val);
+ break;
+
+ case I2C_SLAVE_STOP:
+ break;
+ }
+ }
+}
+
static irqreturn_t i2c_imx_slave_isr(struct imx_i2c_struct *i2c_imx,
unsigned int status, unsigned int ctl)
{
@@ -687,9 +718,11 @@ static irqreturn_t i2c_imx_slave_isr(struct imx_i2c_struct *i2c_imx,
}
if (status & I2SR_IAAS) { /* Addressed as a slave */
+ i2c_imx_slave_finish_op(i2c_imx);
if (status & I2SR_SRW) { /* Master wants to read from us*/
dev_dbg(&i2c_imx->adapter.dev, "read requested");
- i2c_slave_event(i2c_imx->slave, I2C_SLAVE_READ_REQUESTED, &value);
+ i2c_imx_slave_event(i2c_imx,
+ I2C_SLAVE_READ_REQUESTED, &value);
/* Slave transmit */
ctl |= I2CR_MTX;
@@ -699,7 +732,8 @@ static irqreturn_t i2c_imx_slave_isr(struct imx_i2c_struct *i2c_imx,
imx_i2c_write_reg(value, i2c_imx, IMX_I2C_I2DR);
} else { /* Master wants to write to us */
dev_dbg(&i2c_imx->adapter.dev, "write requested");
- i2c_slave_event(i2c_imx->slave, I2C_SLAVE_WRITE_REQUESTED, &value);
+ i2c_imx_slave_event(i2c_imx,
+ I2C_SLAVE_WRITE_REQUESTED, &value);
/* Slave receive */
ctl &= ~I2CR_MTX;
@@ -710,17 +744,20 @@ static irqreturn_t i2c_imx_slave_isr(struct imx_i2c_struct *i2c_imx,
} else if (!(ctl & I2CR_MTX)) { /* Receive mode */
if (status & I2SR_IBB) { /* No STOP signal detected */
value = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR);
- i2c_slave_event(i2c_imx->slave, I2C_SLAVE_WRITE_RECEIVED, &value);
+ i2c_imx_slave_event(i2c_imx,
+ I2C_SLAVE_WRITE_RECEIVED, &value);
} else { /* STOP signal is detected */
dev_dbg(&i2c_imx->adapter.dev,
"STOP signal detected");
- i2c_slave_event(i2c_imx->slave, I2C_SLAVE_STOP, &value);
+ i2c_imx_slave_event(i2c_imx,
+ I2C_SLAVE_STOP, &value);
}
} else if (!(status & I2SR_RXAK)) { /* Transmit mode received ACK */
ctl |= I2CR_MTX;
imx_i2c_write_reg(ctl, i2c_imx, IMX_I2C_I2CR);
- i2c_slave_event(i2c_imx->slave, I2C_SLAVE_READ_PROCESSED, &value);
+ i2c_imx_slave_event(i2c_imx,
+ I2C_SLAVE_READ_PROCESSED, &value);
imx_i2c_write_reg(value, i2c_imx, IMX_I2C_I2DR);
} else { /* Transmit mode received NAK */
@@ -761,6 +798,7 @@ static int i2c_imx_reg_slave(struct i2c_client *client)
return -EBUSY;
i2c_imx->slave = client;
+ i2c_imx->last_slave_event = I2C_SLAVE_STOP;
/* Resume */
ret = pm_runtime_get_sync(i2c_imx->adapter.dev.parent);
@@ -813,10 +851,17 @@ static irqreturn_t i2c_imx_isr(int irq, void *dev_id)
status = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2SR);
ctl = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
+
if (status & I2SR_IIF) {
i2c_imx_clear_irq(i2c_imx, I2SR_IIF);
- if (i2c_imx->slave && !(ctl & I2CR_MSTA))
- return i2c_imx_slave_isr(i2c_imx, status, ctl);
+ if (i2c_imx->slave) {
+ if (!(ctl & I2CR_MSTA)) {
+ return i2c_imx_slave_isr(i2c_imx, status, ctl);
+ } else if (i2c_imx->last_slave_event !=
+ I2C_SLAVE_STOP) {
+ i2c_imx_slave_finish_op(i2c_imx);
+ }
+ }
return i2c_imx_master_isr(i2c_imx, status);
}
diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c
index cb4a25ebb890..8509c5f11356 100644
--- a/drivers/i2c/busses/i2c-jz4780.c
+++ b/drivers/i2c/busses/i2c-jz4780.c
@@ -437,9 +437,8 @@ static irqreturn_t jz4780_i2c_irq(int irqno, void *dev_id)
unsigned short intst;
unsigned short intmsk;
struct jz4780_i2c *i2c = dev_id;
- unsigned long flags;
- spin_lock_irqsave(&i2c->lock, flags);
+ spin_lock(&i2c->lock);
intmsk = jz4780_i2c_readw(i2c, JZ4780_I2C_INTM);
intst = jz4780_i2c_readw(i2c, JZ4780_I2C_INTST);
@@ -551,7 +550,7 @@ static irqreturn_t jz4780_i2c_irq(int irqno, void *dev_id)
}
done:
- spin_unlock_irqrestore(&i2c->lock, flags);
+ spin_unlock(&i2c->lock);
return IRQ_HANDLED;
}
diff --git a/drivers/i2c/busses/i2c-mlxcpld.c b/drivers/i2c/busses/i2c-mlxcpld.c
index 71d7bae2cbca..4e0b7c2882ce 100644
--- a/drivers/i2c/busses/i2c-mlxcpld.c
+++ b/drivers/i2c/busses/i2c-mlxcpld.c
@@ -1,34 +1,8 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
/*
- * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2016 Michael Shych <michaels@mellanox.com>
+ * Mellanox i2c driver
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (C) 2016-2020 Mellanox Technologies
*/
#include <linux/delay.h>
@@ -37,7 +11,9 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/platform_data/mlxreg.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
/* General defines */
#define MLXPLAT_CPLD_LPC_I2C_BASE_ADDR 0x2000
@@ -51,7 +27,7 @@
#define MLXCPLD_I2C_MAX_ADDR_LEN 4
#define MLXCPLD_I2C_RETR_NUM 2
#define MLXCPLD_I2C_XFER_TO 500000 /* usec */
-#define MLXCPLD_I2C_POLL_TIME 2000 /* usec */
+#define MLXCPLD_I2C_POLL_TIME 400 /* usec */
/* LPC I2C registers */
#define MLXCPLD_LPCI2C_CPBLTY_REG 0x0
@@ -72,6 +48,16 @@
#define MLXCPLD_LPCI2C_ACK_IND 1
#define MLXCPLD_LPCI2C_NACK_IND 2
+#define MLXCPLD_I2C_FREQ_1000KHZ_SET 0x04
+#define MLXCPLD_I2C_FREQ_400KHZ_SET 0x0f
+#define MLXCPLD_I2C_FREQ_100KHZ_SET 0x42
+
+enum mlxcpld_i2c_frequency {
+ MLXCPLD_I2C_FREQ_1000KHZ = 1,
+ MLXCPLD_I2C_FREQ_400KHZ = 2,
+ MLXCPLD_I2C_FREQ_100KHZ = 3,
+};
+
struct mlxcpld_i2c_curr_xfer {
u8 cmd;
u8 addr_width;
@@ -489,8 +475,45 @@ static struct i2c_adapter mlxcpld_i2c_adapter = {
.nr = MLXCPLD_I2C_BUS_NUM,
};
+static int
+mlxcpld_i2c_set_frequency(struct mlxcpld_i2c_priv *priv,
+ struct mlxreg_core_hotplug_platform_data *pdata)
+{
+ struct mlxreg_core_item *item = pdata->items;
+ struct mlxreg_core_data *data;
+ u32 regval;
+ u8 freq;
+ int err;
+
+ if (!item)
+ return 0;
+
+ /* Read frequency setting. */
+ data = item->data;
+ err = regmap_read(pdata->regmap, data->reg, &regval);
+ if (err)
+ return err;
+
+ /* Set frequency only if it is not 100KHz, which is default. */
+ switch ((data->reg & data->mask) >> data->bit) {
+ case MLXCPLD_I2C_FREQ_1000KHZ:
+ freq = MLXCPLD_I2C_FREQ_1000KHZ_SET;
+ break;
+ case MLXCPLD_I2C_FREQ_400KHZ:
+ freq = MLXCPLD_I2C_FREQ_400KHZ_SET;
+ break;
+ default:
+ return 0;
+ }
+
+ mlxcpld_i2c_write_comm(priv, MLXCPLD_LPCI2C_HALF_CYC_REG, &freq, 1);
+
+ return 0;
+}
+
static int mlxcpld_i2c_probe(struct platform_device *pdev)
{
+ struct mlxreg_core_hotplug_platform_data *pdata;
struct mlxcpld_i2c_priv *priv;
int err;
u8 val;
@@ -505,6 +528,14 @@ static int mlxcpld_i2c_probe(struct platform_device *pdev)
priv->dev = &pdev->dev;
priv->base_addr = MLXPLAT_CPLD_LPC_I2C_BASE_ADDR;
+ /* Set I2C bus frequency if platform data provides this info. */
+ pdata = dev_get_platdata(&pdev->dev);
+ if (pdata) {
+ err = mlxcpld_i2c_set_frequency(priv, pdata);
+ if (err)
+ goto mlxcpld_i2_probe_failed;
+ }
+
/* Register with i2c layer */
mlxcpld_i2c_adapter.timeout = usecs_to_jiffies(MLXCPLD_I2C_XFER_TO);
/* Read capability register */
@@ -523,8 +554,12 @@ static int mlxcpld_i2c_probe(struct platform_device *pdev)
err = i2c_add_numbered_adapter(&priv->adap);
if (err)
- mutex_destroy(&priv->lock);
+ goto mlxcpld_i2_probe_failed;
+ return 0;
+
+mlxcpld_i2_probe_failed:
+ mutex_destroy(&priv->lock);
return err;
}
diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
index 0818d3e50734..2ffd2f354d0a 100644
--- a/drivers/i2c/busses/i2c-mt65xx.c
+++ b/drivers/i2c/busses/i2c-mt65xx.c
@@ -1275,7 +1275,8 @@ static int mtk_i2c_probe(struct platform_device *pdev)
mtk_i2c_clock_disable(i2c);
ret = devm_request_irq(&pdev->dev, irq, mtk_i2c_irq,
- IRQF_TRIGGER_NONE, I2C_DRV_NAME, i2c);
+ IRQF_NO_SUSPEND | IRQF_TRIGGER_NONE,
+ I2C_DRV_NAME, i2c);
if (ret < 0) {
dev_err(&pdev->dev,
"Request I2C IRQ %d fail\n", irq);
@@ -1302,7 +1303,16 @@ static int mtk_i2c_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM_SLEEP
-static int mtk_i2c_resume(struct device *dev)
+static int mtk_i2c_suspend_noirq(struct device *dev)
+{
+ struct mtk_i2c *i2c = dev_get_drvdata(dev);
+
+ i2c_mark_adapter_suspended(&i2c->adap);
+
+ return 0;
+}
+
+static int mtk_i2c_resume_noirq(struct device *dev)
{
int ret;
struct mtk_i2c *i2c = dev_get_drvdata(dev);
@@ -1317,12 +1327,15 @@ static int mtk_i2c_resume(struct device *dev)
mtk_i2c_clock_disable(i2c);
+ i2c_mark_adapter_resumed(&i2c->adap);
+
return 0;
}
#endif
static const struct dev_pm_ops mtk_i2c_pm = {
- SET_SYSTEM_SLEEP_PM_OPS(NULL, mtk_i2c_resume)
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_i2c_suspend_noirq,
+ mtk_i2c_resume_noirq)
};
static struct platform_driver mtk_i2c_driver = {
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index 5cfe70aedced..c590d36b5fd1 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -18,6 +18,7 @@
#include <linux/mv643xx_i2c.h>
#include <linux/platform_device.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/io.h>
#include <linux/of.h>
@@ -717,6 +718,10 @@ mv64xxx_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
struct mv64xxx_i2c_data *drv_data = i2c_get_adapdata(adap);
int rc, ret = num;
+ rc = pm_runtime_resume_and_get(&adap->dev);
+ if (rc)
+ return rc;
+
BUG_ON(drv_data->msgs != NULL);
drv_data->msgs = msgs;
drv_data->num_msgs = num;
@@ -732,6 +737,9 @@ mv64xxx_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
drv_data->num_msgs = 0;
drv_data->msgs = NULL;
+ pm_runtime_mark_last_busy(&adap->dev);
+ pm_runtime_put_autosuspend(&adap->dev);
+
return ret;
}
@@ -805,7 +813,7 @@ mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
* need to know tclk in order to calculate bus clock
* factors.
*/
- if (IS_ERR(drv_data->clk)) {
+ if (!drv_data->clk) {
rc = -ENODEV;
goto out;
}
@@ -828,7 +836,6 @@ mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
rc = PTR_ERR(drv_data->rstc);
goto out;
}
- reset_control_deassert(drv_data->rstc);
/* Its not yet defined how timeouts will be specified in device tree.
* So hard code the value to 1 second.
@@ -894,6 +901,32 @@ static int mv64xxx_i2c_init_recovery_info(struct mv64xxx_i2c_data *drv_data,
}
static int
+mv64xxx_i2c_runtime_suspend(struct device *dev)
+{
+ struct mv64xxx_i2c_data *drv_data = dev_get_drvdata(dev);
+
+ reset_control_assert(drv_data->rstc);
+ clk_disable_unprepare(drv_data->reg_clk);
+ clk_disable_unprepare(drv_data->clk);
+
+ return 0;
+}
+
+static int
+mv64xxx_i2c_runtime_resume(struct device *dev)
+{
+ struct mv64xxx_i2c_data *drv_data = dev_get_drvdata(dev);
+
+ clk_prepare_enable(drv_data->clk);
+ clk_prepare_enable(drv_data->reg_clk);
+ reset_control_reset(drv_data->rstc);
+
+ mv64xxx_i2c_hw_init(drv_data);
+
+ return 0;
+}
+
+static int
mv64xxx_i2c_probe(struct platform_device *pd)
{
struct mv64xxx_i2c_data *drv_data;
@@ -920,18 +953,22 @@ mv64xxx_i2c_probe(struct platform_device *pd)
/* Not all platforms have clocks */
drv_data->clk = devm_clk_get(&pd->dev, NULL);
- if (PTR_ERR(drv_data->clk) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- if (!IS_ERR(drv_data->clk))
- clk_prepare_enable(drv_data->clk);
+ if (IS_ERR(drv_data->clk)) {
+ if (PTR_ERR(drv_data->clk) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ drv_data->clk = NULL;
+ }
drv_data->reg_clk = devm_clk_get(&pd->dev, "reg");
- if (PTR_ERR(drv_data->reg_clk) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- if (!IS_ERR(drv_data->reg_clk))
- clk_prepare_enable(drv_data->reg_clk);
+ if (IS_ERR(drv_data->reg_clk)) {
+ if (PTR_ERR(drv_data->reg_clk) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ drv_data->reg_clk = NULL;
+ }
drv_data->irq = platform_get_irq(pd, 0);
+ if (drv_data->irq < 0)
+ return drv_data->irq;
if (pdata) {
drv_data->freq_m = pdata->freq_m;
@@ -942,16 +979,12 @@ mv64xxx_i2c_probe(struct platform_device *pd)
} else if (pd->dev.of_node) {
rc = mv64xxx_of_config(drv_data, &pd->dev);
if (rc)
- goto exit_clk;
- }
- if (drv_data->irq < 0) {
- rc = drv_data->irq;
- goto exit_reset;
+ return rc;
}
rc = mv64xxx_i2c_init_recovery_info(drv_data, &pd->dev);
if (rc == -EPROBE_DEFER)
- goto exit_reset;
+ return rc;
drv_data->adapter.dev.parent = &pd->dev;
drv_data->adapter.algo = &mv64xxx_i2c_algo;
@@ -962,7 +995,14 @@ mv64xxx_i2c_probe(struct platform_device *pd)
platform_set_drvdata(pd, drv_data);
i2c_set_adapdata(&drv_data->adapter, drv_data);
- mv64xxx_i2c_hw_init(drv_data);
+ pm_runtime_set_autosuspend_delay(&pd->dev, MSEC_PER_SEC);
+ pm_runtime_use_autosuspend(&pd->dev);
+ pm_runtime_enable(&pd->dev);
+ if (!pm_runtime_enabled(&pd->dev)) {
+ rc = mv64xxx_i2c_runtime_resume(&pd->dev);
+ if (rc)
+ goto exit_disable_pm;
+ }
rc = request_irq(drv_data->irq, mv64xxx_i2c_intr, 0,
MV64XXX_I2C_CTLR_NAME, drv_data);
@@ -970,7 +1010,7 @@ mv64xxx_i2c_probe(struct platform_device *pd)
dev_err(&drv_data->adapter.dev,
"mv64xxx: Can't register intr handler irq%d: %d\n",
drv_data->irq, rc);
- goto exit_reset;
+ goto exit_disable_pm;
} else if ((rc = i2c_add_numbered_adapter(&drv_data->adapter)) != 0) {
dev_err(&drv_data->adapter.dev,
"mv64xxx: Can't add i2c adapter, rc: %d\n", -rc);
@@ -981,54 +1021,50 @@ mv64xxx_i2c_probe(struct platform_device *pd)
exit_free_irq:
free_irq(drv_data->irq, drv_data);
-exit_reset:
- reset_control_assert(drv_data->rstc);
-exit_clk:
- clk_disable_unprepare(drv_data->reg_clk);
- clk_disable_unprepare(drv_data->clk);
+exit_disable_pm:
+ pm_runtime_disable(&pd->dev);
+ if (!pm_runtime_status_suspended(&pd->dev))
+ mv64xxx_i2c_runtime_suspend(&pd->dev);
return rc;
}
static int
-mv64xxx_i2c_remove(struct platform_device *dev)
+mv64xxx_i2c_remove(struct platform_device *pd)
{
- struct mv64xxx_i2c_data *drv_data = platform_get_drvdata(dev);
+ struct mv64xxx_i2c_data *drv_data = platform_get_drvdata(pd);
i2c_del_adapter(&drv_data->adapter);
free_irq(drv_data->irq, drv_data);
- reset_control_assert(drv_data->rstc);
- clk_disable_unprepare(drv_data->reg_clk);
- clk_disable_unprepare(drv_data->clk);
+ pm_runtime_disable(&pd->dev);
+ if (!pm_runtime_status_suspended(&pd->dev))
+ mv64xxx_i2c_runtime_suspend(&pd->dev);
return 0;
}
-#ifdef CONFIG_PM
-static int mv64xxx_i2c_resume(struct device *dev)
+static void
+mv64xxx_i2c_shutdown(struct platform_device *pd)
{
- struct mv64xxx_i2c_data *drv_data = dev_get_drvdata(dev);
-
- mv64xxx_i2c_hw_init(drv_data);
-
- return 0;
+ pm_runtime_disable(&pd->dev);
+ if (!pm_runtime_status_suspended(&pd->dev))
+ mv64xxx_i2c_runtime_suspend(&pd->dev);
}
-static const struct dev_pm_ops mv64xxx_i2c_pm = {
- .resume = mv64xxx_i2c_resume,
+static const struct dev_pm_ops mv64xxx_i2c_pm_ops = {
+ SET_RUNTIME_PM_OPS(mv64xxx_i2c_runtime_suspend,
+ mv64xxx_i2c_runtime_resume, NULL)
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
-#define mv64xxx_i2c_pm_ops (&mv64xxx_i2c_pm)
-#else
-#define mv64xxx_i2c_pm_ops NULL
-#endif
-
static struct platform_driver mv64xxx_i2c_driver = {
.probe = mv64xxx_i2c_probe,
.remove = mv64xxx_i2c_remove,
+ .shutdown = mv64xxx_i2c_shutdown,
.driver = {
.name = MV64XXX_I2C_CTLR_NAME,
- .pm = mv64xxx_i2c_pm_ops,
+ .pm = &mv64xxx_i2c_pm_ops,
.of_match_table = mv64xxx_i2c_of_match_table,
},
};
diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c
index d4b1b0865f67..a3363b20f168 100644
--- a/drivers/i2c/busses/i2c-nomadik.c
+++ b/drivers/i2c/busses/i2c-nomadik.c
@@ -1055,7 +1055,7 @@ static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id)
return ret;
}
-static int nmk_i2c_remove(struct amba_device *adev)
+static void nmk_i2c_remove(struct amba_device *adev)
{
struct resource *res = &adev->res;
struct nmk_i2c_dev *dev = amba_get_drvdata(adev);
@@ -1068,8 +1068,6 @@ static int nmk_i2c_remove(struct amba_device *adev)
i2c_clr_bit(dev->virtbase + I2C_CR, I2C_CR_PE);
clk_disable_unprepare(dev->clk);
release_mem_region(res->start, resource_size(res));
-
- return 0;
}
static struct i2c_vendor_data vendor_stn8815 = {
diff --git a/drivers/i2c/busses/i2c-pca-isa.c b/drivers/i2c/busses/i2c-pca-isa.c
index f27bc1e55385..85e8cf58e8bf 100644
--- a/drivers/i2c/busses/i2c-pca-isa.c
+++ b/drivers/i2c/busses/i2c-pca-isa.c
@@ -161,7 +161,7 @@ static int pca_isa_probe(struct device *dev, unsigned int id)
return -ENODEV;
}
-static int pca_isa_remove(struct device *dev, unsigned int id)
+static void pca_isa_remove(struct device *dev, unsigned int id)
{
i2c_del_adapter(&pca_isa_ops);
@@ -170,8 +170,6 @@ static int pca_isa_remove(struct device *dev, unsigned int id)
free_irq(irq, &pca_isa_ops);
}
release_region(base, IO_SIZE);
-
- return 0;
}
static struct isa_driver pca_isa_driver = {
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index 046d241183c5..214b4c913a13 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -86,6 +86,9 @@ struct geni_i2c_dev {
u32 clk_freq_out;
const struct geni_i2c_clk_fld *clk_fld;
int suspended;
+ void *dma_buf;
+ size_t xfer_len;
+ dma_addr_t dma_addr;
};
struct geni_i2c_err_log {
@@ -348,14 +351,39 @@ static void geni_i2c_tx_fsm_rst(struct geni_i2c_dev *gi2c)
dev_err(gi2c->se.dev, "Timeout resetting TX_FSM\n");
}
+static void geni_i2c_rx_msg_cleanup(struct geni_i2c_dev *gi2c,
+ struct i2c_msg *cur)
+{
+ gi2c->cur_rd = 0;
+ if (gi2c->dma_buf) {
+ if (gi2c->err)
+ geni_i2c_rx_fsm_rst(gi2c);
+ geni_se_rx_dma_unprep(&gi2c->se, gi2c->dma_addr, gi2c->xfer_len);
+ i2c_put_dma_safe_msg_buf(gi2c->dma_buf, cur, !gi2c->err);
+ }
+}
+
+static void geni_i2c_tx_msg_cleanup(struct geni_i2c_dev *gi2c,
+ struct i2c_msg *cur)
+{
+ gi2c->cur_wr = 0;
+ if (gi2c->dma_buf) {
+ if (gi2c->err)
+ geni_i2c_tx_fsm_rst(gi2c);
+ geni_se_tx_dma_unprep(&gi2c->se, gi2c->dma_addr, gi2c->xfer_len);
+ i2c_put_dma_safe_msg_buf(gi2c->dma_buf, cur, !gi2c->err);
+ }
+}
+
static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
u32 m_param)
{
- dma_addr_t rx_dma;
+ dma_addr_t rx_dma = 0;
unsigned long time_left;
void *dma_buf;
struct geni_se *se = &gi2c->se;
size_t len = msg->len;
+ struct i2c_msg *cur;
dma_buf = i2c_get_dma_safe_msg_buf(msg, 32);
if (dma_buf)
@@ -370,19 +398,18 @@ static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
geni_se_select_mode(se, GENI_SE_FIFO);
i2c_put_dma_safe_msg_buf(dma_buf, msg, false);
dma_buf = NULL;
+ } else {
+ gi2c->xfer_len = len;
+ gi2c->dma_addr = rx_dma;
+ gi2c->dma_buf = dma_buf;
}
+ cur = gi2c->cur;
time_left = wait_for_completion_timeout(&gi2c->done, XFER_TIMEOUT);
if (!time_left)
geni_i2c_abort_xfer(gi2c);
- gi2c->cur_rd = 0;
- if (dma_buf) {
- if (gi2c->err)
- geni_i2c_rx_fsm_rst(gi2c);
- geni_se_rx_dma_unprep(se, rx_dma, len);
- i2c_put_dma_safe_msg_buf(dma_buf, msg, !gi2c->err);
- }
+ geni_i2c_rx_msg_cleanup(gi2c, cur);
return gi2c->err;
}
@@ -390,11 +417,12 @@ static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
u32 m_param)
{
- dma_addr_t tx_dma;
+ dma_addr_t tx_dma = 0;
unsigned long time_left;
void *dma_buf;
struct geni_se *se = &gi2c->se;
size_t len = msg->len;
+ struct i2c_msg *cur;
dma_buf = i2c_get_dma_safe_msg_buf(msg, 32);
if (dma_buf)
@@ -409,22 +437,21 @@ static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
geni_se_select_mode(se, GENI_SE_FIFO);
i2c_put_dma_safe_msg_buf(dma_buf, msg, false);
dma_buf = NULL;
+ } else {
+ gi2c->xfer_len = len;
+ gi2c->dma_addr = tx_dma;
+ gi2c->dma_buf = dma_buf;
}
if (!dma_buf) /* Get FIFO IRQ */
writel_relaxed(1, se->base + SE_GENI_TX_WATERMARK_REG);
+ cur = gi2c->cur;
time_left = wait_for_completion_timeout(&gi2c->done, XFER_TIMEOUT);
if (!time_left)
geni_i2c_abort_xfer(gi2c);
- gi2c->cur_wr = 0;
- if (dma_buf) {
- if (gi2c->err)
- geni_i2c_tx_fsm_rst(gi2c);
- geni_se_tx_dma_unprep(se, tx_dma, len);
- i2c_put_dma_safe_msg_buf(dma_buf, msg, !gi2c->err);
- }
+ geni_i2c_tx_msg_cleanup(gi2c, cur);
return gi2c->err;
}
diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
index 5a47915869ae..61dc20fd1191 100644
--- a/drivers/i2c/busses/i2c-qup.c
+++ b/drivers/i2c/busses/i2c-qup.c
@@ -1603,7 +1603,7 @@ out:
static u32 qup_i2c_func(struct i2c_adapter *adap)
{
- return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
+ return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL_ALL & ~I2C_FUNC_SMBUS_QUICK);
}
static const struct i2c_algorithm qup_i2c_algo = {
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index 217def2d7cb4..12f6d452c0f7 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -91,7 +91,6 @@
#define RCAR_BUS_PHASE_START (MDBS | MIE | ESG)
#define RCAR_BUS_PHASE_DATA (MDBS | MIE)
-#define RCAR_BUS_MASK_DATA (~(ESG | FSB) & 0xFF)
#define RCAR_BUS_PHASE_STOP (MDBS | MIE | FSB)
#define RCAR_IRQ_SEND (MNR | MAL | MST | MAT | MDE)
@@ -120,6 +119,7 @@ enum rcar_i2c_type {
};
struct rcar_i2c_priv {
+ u32 flags;
void __iomem *io;
struct i2c_adapter adap;
struct i2c_msg *msg;
@@ -130,7 +130,6 @@ struct rcar_i2c_priv {
int pos;
u32 icccr;
- u32 flags;
u8 recovery_icmcr; /* protected by adapter lock */
enum rcar_i2c_type devtype;
struct i2c_client *slave;
@@ -621,27 +620,16 @@ static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv)
/*
* This driver has a lock-free design because there are IP cores (at least
* R-Car Gen2) which have an inherent race condition in their hardware design.
- * There, we need to clear RCAR_BUS_MASK_DATA bits as soon as possible after
+ * There, we need to switch to RCAR_BUS_PHASE_DATA as soon as possible after
* the interrupt was generated, otherwise an unwanted repeated message gets
* generated. It turned out that taking a spinlock at the beginning of the ISR
* was already causing repeated messages. Thus, this driver was converted to
* the now lockless behaviour. Please keep this in mind when hacking the driver.
+ * R-Car Gen3 seems to have this fixed but earlier versions than R-Car Gen2 are
+ * likely affected. Therefore, we have different interrupt handler entries.
*/
-static irqreturn_t rcar_i2c_irq(int irq, void *ptr)
+static irqreturn_t rcar_i2c_irq(int irq, struct rcar_i2c_priv *priv, u32 msr)
{
- struct rcar_i2c_priv *priv = ptr;
- u32 msr, val;
-
- /* Clear START or STOP immediately, except for REPSTART after read */
- if (likely(!(priv->flags & ID_P_REP_AFTER_RD))) {
- val = rcar_i2c_read(priv, ICMCR);
- rcar_i2c_write(priv, ICMCR, val & RCAR_BUS_MASK_DATA);
- }
-
- msr = rcar_i2c_read(priv, ICMSR);
-
- /* Only handle interrupts that are currently enabled */
- msr &= rcar_i2c_read(priv, ICMIER);
if (!msr) {
if (rcar_i2c_slave_irq(priv))
return IRQ_HANDLED;
@@ -685,6 +673,41 @@ out:
return IRQ_HANDLED;
}
+static irqreturn_t rcar_i2c_gen2_irq(int irq, void *ptr)
+{
+ struct rcar_i2c_priv *priv = ptr;
+ u32 msr;
+
+ /* Clear START or STOP immediately, except for REPSTART after read */
+ if (likely(!(priv->flags & ID_P_REP_AFTER_RD)))
+ rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_DATA);
+
+ /* Only handle interrupts that are currently enabled */
+ msr = rcar_i2c_read(priv, ICMSR);
+ msr &= rcar_i2c_read(priv, ICMIER);
+
+ return rcar_i2c_irq(irq, priv, msr);
+}
+
+static irqreturn_t rcar_i2c_gen3_irq(int irq, void *ptr)
+{
+ struct rcar_i2c_priv *priv = ptr;
+ u32 msr;
+
+ /* Only handle interrupts that are currently enabled */
+ msr = rcar_i2c_read(priv, ICMSR);
+ msr &= rcar_i2c_read(priv, ICMIER);
+
+ /*
+ * Clear START or STOP immediately, except for REPSTART after read or
+ * if a spurious interrupt was detected.
+ */
+ if (likely(!(priv->flags & ID_P_REP_AFTER_RD) && msr))
+ rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_DATA);
+
+ return rcar_i2c_irq(irq, priv, msr);
+}
+
static struct dma_chan *rcar_i2c_request_dma_chan(struct device *dev,
enum dma_transfer_direction dir,
dma_addr_t port_addr)
@@ -931,6 +954,8 @@ static int rcar_i2c_probe(struct platform_device *pdev)
struct rcar_i2c_priv *priv;
struct i2c_adapter *adap;
struct device *dev = &pdev->dev;
+ unsigned long irqflags = 0;
+ irqreturn_t (*irqhandler)(int irq, void *ptr) = rcar_i2c_gen3_irq;
int ret;
/* Otherwise logic will break because some bytes must always use PIO */
@@ -979,6 +1004,11 @@ static int rcar_i2c_probe(struct platform_device *pdev)
rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */
+ if (priv->devtype < I2C_RCAR_GEN3) {
+ irqflags |= IRQF_NO_THREAD;
+ irqhandler = rcar_i2c_gen2_irq;
+ }
+
if (priv->devtype == I2C_RCAR_GEN3) {
priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
if (!IS_ERR(priv->rstc)) {
@@ -998,7 +1028,7 @@ static int rcar_i2c_probe(struct platform_device *pdev)
priv->flags |= ID_P_HOST_NOTIFY;
priv->irq = platform_get_irq(pdev, 0);
- ret = devm_request_irq(dev, priv->irq, rcar_i2c_irq, 0, dev_name(dev), priv);
+ ret = devm_request_irq(dev, priv->irq, irqhandler, irqflags, dev_name(dev), priv);
if (ret < 0) {
dev_err(dev, "cannot get irq %d\n", priv->irq);
goto out_pm_disable;
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 3eafe0eb3e4c..62a903fbe912 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -781,7 +781,7 @@ static int s3c24xx_i2c_xfer(struct i2c_adapter *adap,
/* declare our i2c functionality */
static u32 s3c24xx_i2c_func(struct i2c_adapter *adap)
{
- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_NOSTART |
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL_ALL | I2C_FUNC_NOSTART |
I2C_FUNC_PROTOCOL_MANGLING;
}
diff --git a/drivers/i2c/busses/i2c-sirf.c b/drivers/i2c/busses/i2c-sirf.c
deleted file mode 100644
index 30db8fafe078..000000000000
--- a/drivers/i2c/busses/i2c-sirf.c
+++ /dev/null
@@ -1,475 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * I2C bus driver for CSR SiRFprimaII
- *
- * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
- */
-
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/platform_device.h>
-#include <linux/i2c.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/io.h>
-
-#define SIRFSOC_I2C_CLK_CTRL 0x00
-#define SIRFSOC_I2C_STATUS 0x0C
-#define SIRFSOC_I2C_CTRL 0x10
-#define SIRFSOC_I2C_IO_CTRL 0x14
-#define SIRFSOC_I2C_SDA_DELAY 0x18
-#define SIRFSOC_I2C_CMD_START 0x1C
-#define SIRFSOC_I2C_CMD_BUF 0x30
-#define SIRFSOC_I2C_DATA_BUF 0x80
-
-#define SIRFSOC_I2C_CMD_BUF_MAX 16
-#define SIRFSOC_I2C_DATA_BUF_MAX 16
-
-#define SIRFSOC_I2C_CMD(x) (SIRFSOC_I2C_CMD_BUF + (x)*0x04)
-#define SIRFSOC_I2C_DATA_MASK(x) (0xFF<<(((x)&3)*8))
-#define SIRFSOC_I2C_DATA_SHIFT(x) (((x)&3)*8)
-
-#define SIRFSOC_I2C_DIV_MASK (0xFFFF)
-
-/* I2C status flags */
-#define SIRFSOC_I2C_STAT_BUSY BIT(0)
-#define SIRFSOC_I2C_STAT_TIP BIT(1)
-#define SIRFSOC_I2C_STAT_NACK BIT(2)
-#define SIRFSOC_I2C_STAT_TR_INT BIT(4)
-#define SIRFSOC_I2C_STAT_STOP BIT(6)
-#define SIRFSOC_I2C_STAT_CMD_DONE BIT(8)
-#define SIRFSOC_I2C_STAT_ERR BIT(9)
-#define SIRFSOC_I2C_CMD_INDEX (0x1F<<16)
-
-/* I2C control flags */
-#define SIRFSOC_I2C_RESET BIT(0)
-#define SIRFSOC_I2C_CORE_EN BIT(1)
-#define SIRFSOC_I2C_MASTER_MODE BIT(2)
-#define SIRFSOC_I2C_CMD_DONE_EN BIT(11)
-#define SIRFSOC_I2C_ERR_INT_EN BIT(12)
-
-#define SIRFSOC_I2C_SDA_DELAY_MASK (0xFF)
-#define SIRFSOC_I2C_SCLF_FILTER (3<<8)
-
-#define SIRFSOC_I2C_START_CMD BIT(0)
-
-#define SIRFSOC_I2C_CMD_RP(x) ((x)&0x7)
-#define SIRFSOC_I2C_NACK BIT(3)
-#define SIRFSOC_I2C_WRITE BIT(4)
-#define SIRFSOC_I2C_READ BIT(5)
-#define SIRFSOC_I2C_STOP BIT(6)
-#define SIRFSOC_I2C_START BIT(7)
-
-#define SIRFSOC_I2C_ERR_NOACK 1
-#define SIRFSOC_I2C_ERR_TIMEOUT 2
-
-struct sirfsoc_i2c {
- void __iomem *base;
- struct clk *clk;
- u32 cmd_ptr; /* Current position in CMD buffer */
- u8 *buf; /* Buffer passed by user */
- u32 msg_len; /* Message length */
- u32 finished_len; /* number of bytes read/written */
- u32 read_cmd_len; /* number of read cmd sent */
- int msg_read; /* 1 indicates a read message */
- int err_status; /* 1 indicates an error on bus */
-
- u32 sda_delay; /* For suspend/resume */
- u32 clk_div;
- int last; /* Last message in transfer, STOP cmd can be sent */
-
- struct completion done; /* indicates completion of message transfer */
- struct i2c_adapter adapter;
-};
-
-static void i2c_sirfsoc_read_data(struct sirfsoc_i2c *siic)
-{
- u32 data = 0;
- int i;
-
- for (i = 0; i < siic->read_cmd_len; i++) {
- if (!(i & 0x3))
- data = readl(siic->base + SIRFSOC_I2C_DATA_BUF + i);
- siic->buf[siic->finished_len++] =
- (u8)((data & SIRFSOC_I2C_DATA_MASK(i)) >>
- SIRFSOC_I2C_DATA_SHIFT(i));
- }
-}
-
-static void i2c_sirfsoc_queue_cmd(struct sirfsoc_i2c *siic)
-{
- u32 regval;
- int i = 0;
-
- if (siic->msg_read) {
- while (((siic->finished_len + i) < siic->msg_len)
- && (siic->cmd_ptr < SIRFSOC_I2C_CMD_BUF_MAX)) {
- regval = SIRFSOC_I2C_READ | SIRFSOC_I2C_CMD_RP(0);
- if (((siic->finished_len + i) ==
- (siic->msg_len - 1)) && siic->last)
- regval |= SIRFSOC_I2C_STOP | SIRFSOC_I2C_NACK;
- writel(regval,
- siic->base + SIRFSOC_I2C_CMD(siic->cmd_ptr++));
- i++;
- }
-
- siic->read_cmd_len = i;
- } else {
- while ((siic->cmd_ptr < SIRFSOC_I2C_CMD_BUF_MAX - 1)
- && (siic->finished_len < siic->msg_len)) {
- regval = SIRFSOC_I2C_WRITE | SIRFSOC_I2C_CMD_RP(0);
- if ((siic->finished_len == (siic->msg_len - 1))
- && siic->last)
- regval |= SIRFSOC_I2C_STOP;
- writel(regval,
- siic->base + SIRFSOC_I2C_CMD(siic->cmd_ptr++));
- writel(siic->buf[siic->finished_len++],
- siic->base + SIRFSOC_I2C_CMD(siic->cmd_ptr++));
- }
- }
- siic->cmd_ptr = 0;
-
- /* Trigger the transfer */
- writel(SIRFSOC_I2C_START_CMD, siic->base + SIRFSOC_I2C_CMD_START);
-}
-
-static irqreturn_t i2c_sirfsoc_irq(int irq, void *dev_id)
-{
- struct sirfsoc_i2c *siic = (struct sirfsoc_i2c *)dev_id;
- u32 i2c_stat = readl(siic->base + SIRFSOC_I2C_STATUS);
-
- if (i2c_stat & SIRFSOC_I2C_STAT_ERR) {
- /* Error conditions */
- siic->err_status = SIRFSOC_I2C_ERR_NOACK;
- writel(SIRFSOC_I2C_STAT_ERR, siic->base + SIRFSOC_I2C_STATUS);
-
- if (i2c_stat & SIRFSOC_I2C_STAT_NACK)
- dev_dbg(&siic->adapter.dev, "ACK not received\n");
- else
- dev_err(&siic->adapter.dev, "I2C error\n");
-
- /*
- * Due to hardware ANOMALY, we need to reset I2C earlier after
- * we get NOACK while accessing non-existing clients, otherwise
- * we will get errors even we access existing clients later
- */
- writel(readl(siic->base + SIRFSOC_I2C_CTRL) | SIRFSOC_I2C_RESET,
- siic->base + SIRFSOC_I2C_CTRL);
- while (readl(siic->base + SIRFSOC_I2C_CTRL) & SIRFSOC_I2C_RESET)
- cpu_relax();
-
- complete(&siic->done);
- } else if (i2c_stat & SIRFSOC_I2C_STAT_CMD_DONE) {
- /* CMD buffer execution complete */
- if (siic->msg_read)
- i2c_sirfsoc_read_data(siic);
- if (siic->finished_len == siic->msg_len)
- complete(&siic->done);
- else /* Fill a new CMD buffer for left data */
- i2c_sirfsoc_queue_cmd(siic);
-
- writel(SIRFSOC_I2C_STAT_CMD_DONE, siic->base + SIRFSOC_I2C_STATUS);
- }
-
- return IRQ_HANDLED;
-}
-
-static void i2c_sirfsoc_set_address(struct sirfsoc_i2c *siic,
- struct i2c_msg *msg)
-{
- unsigned char addr;
- u32 regval = SIRFSOC_I2C_START | SIRFSOC_I2C_CMD_RP(0) | SIRFSOC_I2C_WRITE;
-
- /* no data and last message -> add STOP */
- if (siic->last && (msg->len == 0))
- regval |= SIRFSOC_I2C_STOP;
-
- writel(regval, siic->base + SIRFSOC_I2C_CMD(siic->cmd_ptr++));
-
- addr = i2c_8bit_addr_from_msg(msg);
-
- /* Reverse direction bit */
- if (msg->flags & I2C_M_REV_DIR_ADDR)
- addr ^= 1;
-
- writel(addr, siic->base + SIRFSOC_I2C_CMD(siic->cmd_ptr++));
-}
-
-static int i2c_sirfsoc_xfer_msg(struct sirfsoc_i2c *siic, struct i2c_msg *msg)
-{
- u32 regval = readl(siic->base + SIRFSOC_I2C_CTRL);
- /* timeout waiting for the xfer to finish or fail */
- int timeout = msecs_to_jiffies((msg->len + 1) * 50);
-
- i2c_sirfsoc_set_address(siic, msg);
-
- writel(regval | SIRFSOC_I2C_CMD_DONE_EN | SIRFSOC_I2C_ERR_INT_EN,
- siic->base + SIRFSOC_I2C_CTRL);
- i2c_sirfsoc_queue_cmd(siic);
-
- if (wait_for_completion_timeout(&siic->done, timeout) == 0) {
- siic->err_status = SIRFSOC_I2C_ERR_TIMEOUT;
- dev_err(&siic->adapter.dev, "Transfer timeout\n");
- }
-
- writel(regval & ~(SIRFSOC_I2C_CMD_DONE_EN | SIRFSOC_I2C_ERR_INT_EN),
- siic->base + SIRFSOC_I2C_CTRL);
- writel(0, siic->base + SIRFSOC_I2C_CMD_START);
-
- /* i2c control doesn't response, reset it */
- if (siic->err_status == SIRFSOC_I2C_ERR_TIMEOUT) {
- writel(readl(siic->base + SIRFSOC_I2C_CTRL) | SIRFSOC_I2C_RESET,
- siic->base + SIRFSOC_I2C_CTRL);
- while (readl(siic->base + SIRFSOC_I2C_CTRL) & SIRFSOC_I2C_RESET)
- cpu_relax();
- }
- return siic->err_status ? -EAGAIN : 0;
-}
-
-static u32 i2c_sirfsoc_func(struct i2c_adapter *adap)
-{
- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
-}
-
-static int i2c_sirfsoc_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
- int num)
-{
- struct sirfsoc_i2c *siic = adap->algo_data;
- int i, ret;
-
- clk_enable(siic->clk);
-
- for (i = 0; i < num; i++) {
- siic->buf = msgs[i].buf;
- siic->msg_len = msgs[i].len;
- siic->msg_read = !!(msgs[i].flags & I2C_M_RD);
- siic->err_status = 0;
- siic->cmd_ptr = 0;
- siic->finished_len = 0;
- siic->last = (i == (num - 1));
-
- ret = i2c_sirfsoc_xfer_msg(siic, &msgs[i]);
- if (ret) {
- clk_disable(siic->clk);
- return ret;
- }
- }
-
- clk_disable(siic->clk);
- return num;
-}
-
-/* I2C algorithms associated with this master controller driver */
-static const struct i2c_algorithm i2c_sirfsoc_algo = {
- .master_xfer = i2c_sirfsoc_xfer,
- .functionality = i2c_sirfsoc_func,
-};
-
-static int i2c_sirfsoc_probe(struct platform_device *pdev)
-{
- struct sirfsoc_i2c *siic;
- struct i2c_adapter *adap;
- struct clk *clk;
- int bitrate;
- int ctrl_speed;
- int irq;
-
- int err;
- u32 regval;
-
- clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(clk)) {
- err = PTR_ERR(clk);
- dev_err(&pdev->dev, "Clock get failed\n");
- goto err_get_clk;
- }
-
- err = clk_prepare(clk);
- if (err) {
- dev_err(&pdev->dev, "Clock prepare failed\n");
- goto err_clk_prep;
- }
-
- err = clk_enable(clk);
- if (err) {
- dev_err(&pdev->dev, "Clock enable failed\n");
- goto err_clk_en;
- }
-
- ctrl_speed = clk_get_rate(clk);
-
- siic = devm_kzalloc(&pdev->dev, sizeof(*siic), GFP_KERNEL);
- if (!siic) {
- err = -ENOMEM;
- goto out;
- }
- adap = &siic->adapter;
- adap->class = I2C_CLASS_DEPRECATED;
-
- siic->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(siic->base)) {
- err = PTR_ERR(siic->base);
- goto out;
- }
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- err = irq;
- goto out;
- }
- err = devm_request_irq(&pdev->dev, irq, i2c_sirfsoc_irq, 0,
- dev_name(&pdev->dev), siic);
- if (err)
- goto out;
-
- adap->algo = &i2c_sirfsoc_algo;
- adap->algo_data = siic;
- adap->retries = 3;
-
- adap->dev.of_node = pdev->dev.of_node;
- adap->dev.parent = &pdev->dev;
- adap->nr = pdev->id;
-
- strlcpy(adap->name, "sirfsoc-i2c", sizeof(adap->name));
-
- platform_set_drvdata(pdev, adap);
- init_completion(&siic->done);
-
- /* Controller initialisation */
-
- writel(SIRFSOC_I2C_RESET, siic->base + SIRFSOC_I2C_CTRL);
- while (readl(siic->base + SIRFSOC_I2C_CTRL) & SIRFSOC_I2C_RESET)
- cpu_relax();
- writel(SIRFSOC_I2C_CORE_EN | SIRFSOC_I2C_MASTER_MODE,
- siic->base + SIRFSOC_I2C_CTRL);
-
- siic->clk = clk;
-
- err = of_property_read_u32(pdev->dev.of_node,
- "clock-frequency", &bitrate);
- if (err < 0)
- bitrate = I2C_MAX_STANDARD_MODE_FREQ;
-
- /*
- * Due to some hardware design issues, we need to tune the formula.
- * Since i2c is open drain interface that allows the slave to
- * stall the transaction by holding the SCL line at '0', the RTL
- * implementation is waiting for SCL feedback from the pin after
- * setting it to High-Z ('1'). This wait adds to the high-time
- * interval counter few cycles of the input synchronization
- * (depending on the SCL_FILTER_REG field), and also the time it
- * takes for the board pull-up resistor to rise the SCL line.
- * For slow SCL settings these additions are negligible,
- * but they start to affect the speed when clock is set to faster
- * frequencies.
- * Through the actual tests, use the different user_div value(which
- * in the divider formula 'Fio / (Fi2c * user_div)') to adapt
- * the different ranges of i2c bus clock frequency, to make the SCL
- * more accurate.
- */
- if (bitrate <= 30000)
- regval = ctrl_speed / (bitrate * 5);
- else if (bitrate > 30000 && bitrate <= 280000)
- regval = (2 * ctrl_speed) / (bitrate * 11);
- else
- regval = ctrl_speed / (bitrate * 6);
-
- writel(regval, siic->base + SIRFSOC_I2C_CLK_CTRL);
- if (regval > 0xFF)
- writel(0xFF, siic->base + SIRFSOC_I2C_SDA_DELAY);
- else
- writel(regval, siic->base + SIRFSOC_I2C_SDA_DELAY);
-
- err = i2c_add_numbered_adapter(adap);
- if (err < 0)
- goto out;
-
- clk_disable(clk);
-
- dev_info(&pdev->dev, " I2C adapter ready to operate\n");
-
- return 0;
-
-out:
- clk_disable(clk);
-err_clk_en:
- clk_unprepare(clk);
-err_clk_prep:
- clk_put(clk);
-err_get_clk:
- return err;
-}
-
-static int i2c_sirfsoc_remove(struct platform_device *pdev)
-{
- struct i2c_adapter *adapter = platform_get_drvdata(pdev);
- struct sirfsoc_i2c *siic = adapter->algo_data;
-
- writel(SIRFSOC_I2C_RESET, siic->base + SIRFSOC_I2C_CTRL);
- i2c_del_adapter(adapter);
- clk_unprepare(siic->clk);
- clk_put(siic->clk);
- return 0;
-}
-
-#ifdef CONFIG_PM
-static int i2c_sirfsoc_suspend(struct device *dev)
-{
- struct i2c_adapter *adapter = dev_get_drvdata(dev);
- struct sirfsoc_i2c *siic = adapter->algo_data;
-
- clk_enable(siic->clk);
- siic->sda_delay = readl(siic->base + SIRFSOC_I2C_SDA_DELAY);
- siic->clk_div = readl(siic->base + SIRFSOC_I2C_CLK_CTRL);
- clk_disable(siic->clk);
- return 0;
-}
-
-static int i2c_sirfsoc_resume(struct device *dev)
-{
- struct i2c_adapter *adapter = dev_get_drvdata(dev);
- struct sirfsoc_i2c *siic = adapter->algo_data;
-
- clk_enable(siic->clk);
- writel(SIRFSOC_I2C_RESET, siic->base + SIRFSOC_I2C_CTRL);
- while (readl(siic->base + SIRFSOC_I2C_CTRL) & SIRFSOC_I2C_RESET)
- cpu_relax();
- writel(SIRFSOC_I2C_CORE_EN | SIRFSOC_I2C_MASTER_MODE,
- siic->base + SIRFSOC_I2C_CTRL);
- writel(siic->clk_div, siic->base + SIRFSOC_I2C_CLK_CTRL);
- writel(siic->sda_delay, siic->base + SIRFSOC_I2C_SDA_DELAY);
- clk_disable(siic->clk);
- return 0;
-}
-
-static const struct dev_pm_ops i2c_sirfsoc_pm_ops = {
- .suspend = i2c_sirfsoc_suspend,
- .resume = i2c_sirfsoc_resume,
-};
-#endif
-
-static const struct of_device_id sirfsoc_i2c_of_match[] = {
- { .compatible = "sirf,prima2-i2c", },
- {},
-};
-MODULE_DEVICE_TABLE(of, sirfsoc_i2c_of_match);
-
-static struct platform_driver i2c_sirfsoc_driver = {
- .driver = {
- .name = "sirfsoc_i2c",
-#ifdef CONFIG_PM
- .pm = &i2c_sirfsoc_pm_ops,
-#endif
- .of_match_table = sirfsoc_i2c_of_match,
- },
- .probe = i2c_sirfsoc_probe,
- .remove = i2c_sirfsoc_remove,
-};
-module_platform_driver(i2c_sirfsoc_driver);
-
-MODULE_DESCRIPTION("SiRF SoC I2C master controller driver");
-MODULE_AUTHOR("Zhiwu Song <Zhiwu.Song@csr.com>");
-MODULE_AUTHOR("Xiangzhen Ye <Xiangzhen.Ye@csr.com>");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
index 9aa8e65b511e..c62c815b88eb 100644
--- a/drivers/i2c/busses/i2c-stm32f7.c
+++ b/drivers/i2c/busses/i2c-stm32f7.c
@@ -57,6 +57,8 @@
#define STM32F7_I2C_CR1_RXDMAEN BIT(15)
#define STM32F7_I2C_CR1_TXDMAEN BIT(14)
#define STM32F7_I2C_CR1_ANFOFF BIT(12)
+#define STM32F7_I2C_CR1_DNF_MASK GENMASK(11, 8)
+#define STM32F7_I2C_CR1_DNF(n) (((n) & 0xf) << 8)
#define STM32F7_I2C_CR1_ERRIE BIT(7)
#define STM32F7_I2C_CR1_TCIE BIT(6)
#define STM32F7_I2C_CR1_STOPIE BIT(5)
@@ -160,7 +162,7 @@ enum {
};
#define STM32F7_I2C_DNF_DEFAULT 0
-#define STM32F7_I2C_DNF_MAX 16
+#define STM32F7_I2C_DNF_MAX 15
#define STM32F7_I2C_ANALOG_FILTER_ENABLE 1
#define STM32F7_I2C_ANALOG_FILTER_DELAY_MIN 50 /* ns */
@@ -725,6 +727,13 @@ static void stm32f7_i2c_hw_config(struct stm32f7_i2c_dev *i2c_dev)
else
stm32f7_i2c_set_bits(i2c_dev->base + STM32F7_I2C_CR1,
STM32F7_I2C_CR1_ANFOFF);
+
+ /* Program the Digital Filter */
+ stm32f7_i2c_clr_bits(i2c_dev->base + STM32F7_I2C_CR1,
+ STM32F7_I2C_CR1_DNF_MASK);
+ stm32f7_i2c_set_bits(i2c_dev->base + STM32F7_I2C_CR1,
+ STM32F7_I2C_CR1_DNF(i2c_dev->setup.dnf));
+
stm32f7_i2c_set_bits(i2c_dev->base + STM32F7_I2C_CR1,
STM32F7_I2C_CR1_PE);
}
@@ -2026,12 +2035,8 @@ static int stm32f7_i2c_probe(struct platform_device *pdev)
}
irq_error = platform_get_irq(pdev, 1);
- if (irq_error <= 0) {
- if (irq_error != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Failed to get IRQ error: %d\n",
- irq_error);
+ if (irq_error <= 0)
return irq_error ? : -ENOENT;
- }
i2c_dev->wakeup_src = of_property_read_bool(pdev->dev.of_node,
"wakeup-source");
diff --git a/drivers/i2c/busses/i2c-stu300.c b/drivers/i2c/busses/i2c-stu300.c
deleted file mode 100644
index 64d739baf480..000000000000
--- a/drivers/i2c/busses/i2c-stu300.c
+++ /dev/null
@@ -1,1008 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2007-2012 ST-Ericsson AB
- * ST DDC I2C master mode driver, used in e.g. U300 series platforms.
- * Author: Linus Walleij <linus.walleij@stericsson.com>
- * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
- */
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/i2c.h>
-#include <linux/spinlock.h>
-#include <linux/completion.h>
-#include <linux/err.h>
-#include <linux/interrupt.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-
-/* the name of this kernel module */
-#define NAME "stu300"
-
-/* CR (Control Register) 8bit (R/W) */
-#define I2C_CR (0x00000000)
-#define I2C_CR_RESET_VALUE (0x00)
-#define I2C_CR_RESET_UMASK (0x00)
-#define I2C_CR_DDC1_ENABLE (0x80)
-#define I2C_CR_TRANS_ENABLE (0x40)
-#define I2C_CR_PERIPHERAL_ENABLE (0x20)
-#define I2C_CR_DDC2B_ENABLE (0x10)
-#define I2C_CR_START_ENABLE (0x08)
-#define I2C_CR_ACK_ENABLE (0x04)
-#define I2C_CR_STOP_ENABLE (0x02)
-#define I2C_CR_INTERRUPT_ENABLE (0x01)
-/* SR1 (Status Register 1) 8bit (R/-) */
-#define I2C_SR1 (0x00000004)
-#define I2C_SR1_RESET_VALUE (0x00)
-#define I2C_SR1_RESET_UMASK (0x00)
-#define I2C_SR1_EVF_IND (0x80)
-#define I2C_SR1_ADD10_IND (0x40)
-#define I2C_SR1_TRA_IND (0x20)
-#define I2C_SR1_BUSY_IND (0x10)
-#define I2C_SR1_BTF_IND (0x08)
-#define I2C_SR1_ADSL_IND (0x04)
-#define I2C_SR1_MSL_IND (0x02)
-#define I2C_SR1_SB_IND (0x01)
-/* SR2 (Status Register 2) 8bit (R/-) */
-#define I2C_SR2 (0x00000008)
-#define I2C_SR2_RESET_VALUE (0x00)
-#define I2C_SR2_RESET_UMASK (0x40)
-#define I2C_SR2_MASK (0xBF)
-#define I2C_SR2_SCLFAL_IND (0x80)
-#define I2C_SR2_ENDAD_IND (0x20)
-#define I2C_SR2_AF_IND (0x10)
-#define I2C_SR2_STOPF_IND (0x08)
-#define I2C_SR2_ARLO_IND (0x04)
-#define I2C_SR2_BERR_IND (0x02)
-#define I2C_SR2_DDC2BF_IND (0x01)
-/* CCR (Clock Control Register) 8bit (R/W) */
-#define I2C_CCR (0x0000000C)
-#define I2C_CCR_RESET_VALUE (0x00)
-#define I2C_CCR_RESET_UMASK (0x00)
-#define I2C_CCR_MASK (0xFF)
-#define I2C_CCR_FMSM (0x80)
-#define I2C_CCR_CC_MASK (0x7F)
-/* OAR1 (Own Address Register 1) 8bit (R/W) */
-#define I2C_OAR1 (0x00000010)
-#define I2C_OAR1_RESET_VALUE (0x00)
-#define I2C_OAR1_RESET_UMASK (0x00)
-#define I2C_OAR1_ADD_MASK (0xFF)
-/* OAR2 (Own Address Register 2) 8bit (R/W) */
-#define I2C_OAR2 (0x00000014)
-#define I2C_OAR2_RESET_VALUE (0x40)
-#define I2C_OAR2_RESET_UMASK (0x19)
-#define I2C_OAR2_MASK (0xE6)
-#define I2C_OAR2_FR_25_10MHZ (0x00)
-#define I2C_OAR2_FR_10_1667MHZ (0x20)
-#define I2C_OAR2_FR_1667_2667MHZ (0x40)
-#define I2C_OAR2_FR_2667_40MHZ (0x60)
-#define I2C_OAR2_FR_40_5333MHZ (0x80)
-#define I2C_OAR2_FR_5333_66MHZ (0xA0)
-#define I2C_OAR2_FR_66_80MHZ (0xC0)
-#define I2C_OAR2_FR_80_100MHZ (0xE0)
-#define I2C_OAR2_FR_MASK (0xE0)
-#define I2C_OAR2_ADD_MASK (0x06)
-/* DR (Data Register) 8bit (R/W) */
-#define I2C_DR (0x00000018)
-#define I2C_DR_RESET_VALUE (0x00)
-#define I2C_DR_RESET_UMASK (0xFF)
-#define I2C_DR_D_MASK (0xFF)
-/* ECCR (Extended Clock Control Register) 8bit (R/W) */
-#define I2C_ECCR (0x0000001C)
-#define I2C_ECCR_RESET_VALUE (0x00)
-#define I2C_ECCR_RESET_UMASK (0xE0)
-#define I2C_ECCR_MASK (0x1F)
-#define I2C_ECCR_CC_MASK (0x1F)
-
-/*
- * These events are more or less responses to commands
- * sent into the hardware, presumably reflecting the state
- * of an internal state machine.
- */
-enum stu300_event {
- STU300_EVENT_NONE = 0,
- STU300_EVENT_1,
- STU300_EVENT_2,
- STU300_EVENT_3,
- STU300_EVENT_4,
- STU300_EVENT_5,
- STU300_EVENT_6,
- STU300_EVENT_7,
- STU300_EVENT_8,
- STU300_EVENT_9
-};
-
-enum stu300_error {
- STU300_ERROR_NONE = 0,
- STU300_ERROR_ACKNOWLEDGE_FAILURE,
- STU300_ERROR_BUS_ERROR,
- STU300_ERROR_ARBITRATION_LOST,
- STU300_ERROR_UNKNOWN
-};
-
-/* timeout waiting for the controller to respond */
-#define STU300_TIMEOUT (msecs_to_jiffies(1000))
-
-/*
- * The number of address send athemps tried before giving up.
- * If the first one fails it seems like 5 to 8 attempts are required.
- */
-#define NUM_ADDR_RESEND_ATTEMPTS 12
-
-/* I2C clock speed, in Hz 0-400kHz*/
-static unsigned int scl_frequency = I2C_MAX_STANDARD_MODE_FREQ;
-module_param(scl_frequency, uint, 0644);
-
-/**
- * struct stu300_dev - the stu300 driver state holder
- * @pdev: parent platform device
- * @adapter: corresponding I2C adapter
- * @clk: hardware block clock
- * @irq: assigned interrupt line
- * @cmd_issue_lock: this locks the following cmd_ variables
- * @cmd_complete: acknowledge completion for an I2C command
- * @cmd_event: expected event coming in as a response to a command
- * @cmd_err: error code as response to a command
- * @speed: current bus speed in Hz
- * @msg_index: index of current message
- * @msg_len: length of current message
- */
-
-struct stu300_dev {
- struct platform_device *pdev;
- struct i2c_adapter adapter;
- void __iomem *virtbase;
- struct clk *clk;
- int irq;
- spinlock_t cmd_issue_lock;
- struct completion cmd_complete;
- enum stu300_event cmd_event;
- enum stu300_error cmd_err;
- unsigned int speed;
- int msg_index;
- int msg_len;
-};
-
-/* Local forward function declarations */
-static int stu300_init_hw(struct stu300_dev *dev);
-
-/*
- * The block needs writes in both MSW and LSW in order
- * for all data lines to reach their destination.
- */
-static inline void stu300_wr8(u32 value, void __iomem *address)
-{
- writel((value << 16) | value, address);
-}
-
-/*
- * This merely masks off the duplicates which appear
- * in bytes 1-3. You _MUST_ use 32-bit bus access on this
- * device, else it will not work.
- */
-static inline u32 stu300_r8(void __iomem *address)
-{
- return readl(address) & 0x000000FFU;
-}
-
-static void stu300_irq_enable(struct stu300_dev *dev)
-{
- u32 val;
- val = stu300_r8(dev->virtbase + I2C_CR);
- val |= I2C_CR_INTERRUPT_ENABLE;
- /* Twice paranoia (possible HW glitch) */
- stu300_wr8(val, dev->virtbase + I2C_CR);
- stu300_wr8(val, dev->virtbase + I2C_CR);
-}
-
-static void stu300_irq_disable(struct stu300_dev *dev)
-{
- u32 val;
- val = stu300_r8(dev->virtbase + I2C_CR);
- val &= ~I2C_CR_INTERRUPT_ENABLE;
- /* Twice paranoia (possible HW glitch) */
- stu300_wr8(val, dev->virtbase + I2C_CR);
- stu300_wr8(val, dev->virtbase + I2C_CR);
-}
-
-
-/*
- * Tells whether a certain event or events occurred in
- * response to a command. The events represent states in
- * the internal state machine of the hardware. The events
- * are not very well described in the hardware
- * documentation and can only be treated as abstract state
- * machine states.
- *
- * @ret 0 = event has not occurred or unknown error, any
- * other value means the correct event occurred or an error.
- */
-
-static int stu300_event_occurred(struct stu300_dev *dev,
- enum stu300_event mr_event) {
- u32 status1;
- u32 status2;
-
- /* What event happened? */
- status1 = stu300_r8(dev->virtbase + I2C_SR1);
-
- if (!(status1 & I2C_SR1_EVF_IND))
- /* No event at all */
- return 0;
-
- status2 = stu300_r8(dev->virtbase + I2C_SR2);
-
- /* Block any multiple interrupts */
- stu300_irq_disable(dev);
-
- /* Check for errors first */
- if (status2 & I2C_SR2_AF_IND) {
- dev->cmd_err = STU300_ERROR_ACKNOWLEDGE_FAILURE;
- return 1;
- } else if (status2 & I2C_SR2_BERR_IND) {
- dev->cmd_err = STU300_ERROR_BUS_ERROR;
- return 1;
- } else if (status2 & I2C_SR2_ARLO_IND) {
- dev->cmd_err = STU300_ERROR_ARBITRATION_LOST;
- return 1;
- }
-
- switch (mr_event) {
- case STU300_EVENT_1:
- if (status1 & I2C_SR1_ADSL_IND)
- return 1;
- break;
- case STU300_EVENT_2:
- case STU300_EVENT_3:
- case STU300_EVENT_7:
- case STU300_EVENT_8:
- if (status1 & I2C_SR1_BTF_IND) {
- return 1;
- }
- break;
- case STU300_EVENT_4:
- if (status2 & I2C_SR2_STOPF_IND)
- return 1;
- break;
- case STU300_EVENT_5:
- if (status1 & I2C_SR1_SB_IND)
- /* Clear start bit */
- return 1;
- break;
- case STU300_EVENT_6:
- if (status2 & I2C_SR2_ENDAD_IND) {
- /* First check for any errors */
- return 1;
- }
- break;
- case STU300_EVENT_9:
- if (status1 & I2C_SR1_ADD10_IND)
- return 1;
- break;
- default:
- break;
- }
- /* If we get here, we're on thin ice.
- * Here we are in a status where we have
- * gotten a response that does not match
- * what we requested.
- */
- dev->cmd_err = STU300_ERROR_UNKNOWN;
- dev_err(&dev->pdev->dev,
- "Unhandled interrupt! %d sr1: 0x%x sr2: 0x%x\n",
- mr_event, status1, status2);
- return 0;
-}
-
-static irqreturn_t stu300_irh(int irq, void *data)
-{
- struct stu300_dev *dev = data;
- int res;
-
- /* Just make sure that the block is clocked */
- clk_enable(dev->clk);
-
- /* See if this was what we were waiting for */
- spin_lock(&dev->cmd_issue_lock);
-
- res = stu300_event_occurred(dev, dev->cmd_event);
- if (res || dev->cmd_err != STU300_ERROR_NONE)
- complete(&dev->cmd_complete);
-
- spin_unlock(&dev->cmd_issue_lock);
-
- clk_disable(dev->clk);
-
- return IRQ_HANDLED;
-}
-
-/*
- * Sends a command and then waits for the bits masked by *flagmask*
- * to go high or low by IRQ awaiting.
- */
-static int stu300_start_and_await_event(struct stu300_dev *dev,
- u8 cr_value,
- enum stu300_event mr_event)
-{
- int ret;
-
- /* Lock command issue, fill in an event we wait for */
- spin_lock_irq(&dev->cmd_issue_lock);
- init_completion(&dev->cmd_complete);
- dev->cmd_err = STU300_ERROR_NONE;
- dev->cmd_event = mr_event;
- spin_unlock_irq(&dev->cmd_issue_lock);
-
- /* Turn on interrupt, send command and wait. */
- cr_value |= I2C_CR_INTERRUPT_ENABLE;
- stu300_wr8(cr_value, dev->virtbase + I2C_CR);
- ret = wait_for_completion_interruptible_timeout(&dev->cmd_complete,
- STU300_TIMEOUT);
- if (ret < 0) {
- dev_err(&dev->pdev->dev,
- "wait_for_completion_interruptible_timeout() "
- "returned %d waiting for event %04x\n", ret, mr_event);
- return ret;
- }
-
- if (ret == 0) {
- dev_err(&dev->pdev->dev, "controller timed out "
- "waiting for event %d, reinit hardware\n", mr_event);
- (void) stu300_init_hw(dev);
- return -ETIMEDOUT;
- }
-
- if (dev->cmd_err != STU300_ERROR_NONE) {
- dev_err(&dev->pdev->dev, "controller (start) "
- "error %d waiting for event %d, reinit hardware\n",
- dev->cmd_err, mr_event);
- (void) stu300_init_hw(dev);
- return -EIO;
- }
-
- return 0;
-}
-
-/*
- * This waits for a flag to be set, if it is not set on entry, an interrupt is
- * configured to wait for the flag using a completion.
- */
-static int stu300_await_event(struct stu300_dev *dev,
- enum stu300_event mr_event)
-{
- int ret;
-
- /* Is it already here? */
- spin_lock_irq(&dev->cmd_issue_lock);
- dev->cmd_err = STU300_ERROR_NONE;
- dev->cmd_event = mr_event;
-
- init_completion(&dev->cmd_complete);
-
- /* Turn on the I2C interrupt for current operation */
- stu300_irq_enable(dev);
-
- /* Unlock the command block and wait for the event to occur */
- spin_unlock_irq(&dev->cmd_issue_lock);
-
- ret = wait_for_completion_interruptible_timeout(&dev->cmd_complete,
- STU300_TIMEOUT);
- if (ret < 0) {
- dev_err(&dev->pdev->dev,
- "wait_for_completion_interruptible_timeout()"
- "returned %d waiting for event %04x\n", ret, mr_event);
- return ret;
- }
-
- if (ret == 0) {
- if (mr_event != STU300_EVENT_6) {
- dev_err(&dev->pdev->dev, "controller "
- "timed out waiting for event %d, reinit "
- "hardware\n", mr_event);
- (void) stu300_init_hw(dev);
- }
- return -ETIMEDOUT;
- }
-
- if (dev->cmd_err != STU300_ERROR_NONE) {
- if (mr_event != STU300_EVENT_6) {
- dev_err(&dev->pdev->dev, "controller "
- "error (await_event) %d waiting for event %d, "
- "reinit hardware\n", dev->cmd_err, mr_event);
- (void) stu300_init_hw(dev);
- }
- return -EIO;
- }
-
- return 0;
-}
-
-/*
- * Waits for the busy bit to go low by repeated polling.
- */
-#define BUSY_RELEASE_ATTEMPTS 10
-static int stu300_wait_while_busy(struct stu300_dev *dev)
-{
- unsigned long timeout;
- int i;
-
- for (i = 0; i < BUSY_RELEASE_ATTEMPTS; i++) {
- timeout = jiffies + STU300_TIMEOUT;
-
- while (!time_after(jiffies, timeout)) {
- /* Is not busy? */
- if ((stu300_r8(dev->virtbase + I2C_SR1) &
- I2C_SR1_BUSY_IND) == 0)
- return 0;
- msleep(1);
- }
-
- dev_err(&dev->pdev->dev, "transaction timed out "
- "waiting for device to be free (not busy). "
- "Attempt: %d\n", i+1);
-
- dev_err(&dev->pdev->dev, "base address = "
- "0x%p, reinit hardware\n", dev->virtbase);
-
- (void) stu300_init_hw(dev);
- }
-
- dev_err(&dev->pdev->dev, "giving up after %d attempts "
- "to reset the bus.\n", BUSY_RELEASE_ATTEMPTS);
-
- return -ETIMEDOUT;
-}
-
-struct stu300_clkset {
- unsigned long rate;
- u32 setting;
-};
-
-static const struct stu300_clkset stu300_clktable[] = {
- { 0, 0xFFU },
- { 2500000, I2C_OAR2_FR_25_10MHZ },
- { 10000000, I2C_OAR2_FR_10_1667MHZ },
- { 16670000, I2C_OAR2_FR_1667_2667MHZ },
- { 26670000, I2C_OAR2_FR_2667_40MHZ },
- { 40000000, I2C_OAR2_FR_40_5333MHZ },
- { 53330000, I2C_OAR2_FR_5333_66MHZ },
- { 66000000, I2C_OAR2_FR_66_80MHZ },
- { 80000000, I2C_OAR2_FR_80_100MHZ },
- { 100000000, 0xFFU },
-};
-
-
-static int stu300_set_clk(struct stu300_dev *dev, unsigned long clkrate)
-{
-
- u32 val;
- int i = 0;
-
- /* Locate the appropriate clock setting */
- while (i < ARRAY_SIZE(stu300_clktable) - 1 &&
- stu300_clktable[i].rate < clkrate)
- i++;
-
- if (stu300_clktable[i].setting == 0xFFU) {
- dev_err(&dev->pdev->dev, "too %s clock rate requested "
- "(%lu Hz).\n", i ? "high" : "low", clkrate);
- return -EINVAL;
- }
-
- stu300_wr8(stu300_clktable[i].setting,
- dev->virtbase + I2C_OAR2);
-
- dev_dbg(&dev->pdev->dev, "Clock rate %lu Hz, I2C bus speed %d Hz "
- "virtbase %p\n", clkrate, dev->speed, dev->virtbase);
-
- if (dev->speed > I2C_MAX_STANDARD_MODE_FREQ)
- /* Fast Mode I2C */
- val = ((clkrate/dev->speed) - 9)/3 + 1;
- else
- /* Standard Mode I2C */
- val = ((clkrate/dev->speed) - 7)/2 + 1;
-
- /* According to spec the divider must be > 2 */
- if (val < 0x002) {
- dev_err(&dev->pdev->dev, "too low clock rate (%lu Hz).\n",
- clkrate);
- return -EINVAL;
- }
-
- /* We have 12 bits clock divider only! */
- if (val & 0xFFFFF000U) {
- dev_err(&dev->pdev->dev, "too high clock rate (%lu Hz).\n",
- clkrate);
- return -EINVAL;
- }
-
- if (dev->speed > I2C_MAX_STANDARD_MODE_FREQ) {
- /* CC6..CC0 */
- stu300_wr8((val & I2C_CCR_CC_MASK) | I2C_CCR_FMSM,
- dev->virtbase + I2C_CCR);
- dev_dbg(&dev->pdev->dev, "set clock divider to 0x%08x, "
- "Fast Mode I2C\n", val);
- } else {
- /* CC6..CC0 */
- stu300_wr8((val & I2C_CCR_CC_MASK),
- dev->virtbase + I2C_CCR);
- dev_dbg(&dev->pdev->dev, "set clock divider to "
- "0x%08x, Standard Mode I2C\n", val);
- }
-
- /* CC11..CC7 */
- stu300_wr8(((val >> 7) & 0x1F),
- dev->virtbase + I2C_ECCR);
-
- return 0;
-}
-
-
-static int stu300_init_hw(struct stu300_dev *dev)
-{
- u32 dummy;
- unsigned long clkrate;
- int ret;
-
- /* Disable controller */
- stu300_wr8(0x00, dev->virtbase + I2C_CR);
- /*
- * Set own address to some default value (0x00).
- * We do not support slave mode anyway.
- */
- stu300_wr8(0x00, dev->virtbase + I2C_OAR1);
- /*
- * The I2C controller only operates properly in 26 MHz but we
- * program this driver as if we didn't know. This will also set the two
- * high bits of the own address to zero as well.
- * There is no known hardware issue with running in 13 MHz
- * However, speeds over 200 kHz are not used.
- */
- clkrate = clk_get_rate(dev->clk);
- ret = stu300_set_clk(dev, clkrate);
-
- if (ret)
- return ret;
- /*
- * Enable block, do it TWICE (hardware glitch)
- * Setting bit 7 can enable DDC mode. (Not used currently.)
- */
- stu300_wr8(I2C_CR_PERIPHERAL_ENABLE,
- dev->virtbase + I2C_CR);
- stu300_wr8(I2C_CR_PERIPHERAL_ENABLE,
- dev->virtbase + I2C_CR);
- /* Make a dummy read of the status register SR1 & SR2 */
- dummy = stu300_r8(dev->virtbase + I2C_SR2);
- dummy = stu300_r8(dev->virtbase + I2C_SR1);
-
- return 0;
-}
-
-
-
-/* Send slave address. */
-static int stu300_send_address(struct stu300_dev *dev,
- struct i2c_msg *msg, int resend)
-{
- u32 val;
- int ret;
-
- if (msg->flags & I2C_M_TEN) {
- /* This is probably how 10 bit addresses look */
- val = (0xf0 | (((u32) msg->addr & 0x300) >> 7)) &
- I2C_DR_D_MASK;
- if (msg->flags & I2C_M_RD)
- /* This is the direction bit */
- val |= 0x01;
- } else {
- val = i2c_8bit_addr_from_msg(msg);
- }
-
- if (resend) {
- if (msg->flags & I2C_M_RD)
- dev_dbg(&dev->pdev->dev, "read resend\n");
- else
- dev_dbg(&dev->pdev->dev, "write resend\n");
- }
-
- stu300_wr8(val, dev->virtbase + I2C_DR);
-
- /* For 10bit addressing, await 10bit request (EVENT 9) */
- if (msg->flags & I2C_M_TEN) {
- ret = stu300_await_event(dev, STU300_EVENT_9);
- /*
- * The slave device wants a 10bit address, send the rest
- * of the bits (the LSBits)
- */
- val = msg->addr & I2C_DR_D_MASK;
- /* This clears "event 9" */
- stu300_wr8(val, dev->virtbase + I2C_DR);
- if (ret != 0)
- return ret;
- }
- /* FIXME: Why no else here? two events for 10bit?
- * Await event 6 (normal) or event 9 (10bit)
- */
-
- if (resend)
- dev_dbg(&dev->pdev->dev, "await event 6\n");
- ret = stu300_await_event(dev, STU300_EVENT_6);
-
- /*
- * Clear any pending EVENT 6 no matter what happened during
- * await_event.
- */
- val = stu300_r8(dev->virtbase + I2C_CR);
- val |= I2C_CR_PERIPHERAL_ENABLE;
- stu300_wr8(val, dev->virtbase + I2C_CR);
-
- return ret;
-}
-
-static int stu300_xfer_msg(struct i2c_adapter *adap,
- struct i2c_msg *msg, int stop)
-{
- u32 cr;
- u32 val;
- u32 i;
- int ret;
- int attempts = 0;
- struct stu300_dev *dev = i2c_get_adapdata(adap);
-
- clk_enable(dev->clk);
-
- /* Remove this if (0) to trace each and every message. */
- if (0) {
- dev_dbg(&dev->pdev->dev, "I2C message to: 0x%04x, len: %d, "
- "flags: 0x%04x, stop: %d\n",
- msg->addr, msg->len, msg->flags, stop);
- }
-
- /*
- * For some reason, sending the address sometimes fails when running
- * on the 13 MHz clock. No interrupt arrives. This is a work around,
- * which tries to restart and send the address up to 10 times before
- * really giving up. Usually 5 to 8 attempts are enough.
- */
- do {
- if (attempts)
- dev_dbg(&dev->pdev->dev, "wait while busy\n");
- /* Check that the bus is free, or wait until some timeout */
- ret = stu300_wait_while_busy(dev);
- if (ret != 0)
- goto exit_disable;
-
- if (attempts)
- dev_dbg(&dev->pdev->dev, "re-int hw\n");
- /*
- * According to ST, there is no problem if the clock is
- * changed between 13 and 26 MHz during a transfer.
- */
- ret = stu300_init_hw(dev);
- if (ret)
- goto exit_disable;
-
- /* Send a start condition */
- cr = I2C_CR_PERIPHERAL_ENABLE;
- /* Setting the START bit puts the block in master mode */
- if (!(msg->flags & I2C_M_NOSTART))
- cr |= I2C_CR_START_ENABLE;
- if ((msg->flags & I2C_M_RD) && (msg->len > 1))
- /* On read more than 1 byte, we need ack. */
- cr |= I2C_CR_ACK_ENABLE;
- /* Check that it gets through */
- if (!(msg->flags & I2C_M_NOSTART)) {
- if (attempts)
- dev_dbg(&dev->pdev->dev, "send start event\n");
- ret = stu300_start_and_await_event(dev, cr,
- STU300_EVENT_5);
- }
-
- if (attempts)
- dev_dbg(&dev->pdev->dev, "send address\n");
-
- if (ret == 0)
- /* Send address */
- ret = stu300_send_address(dev, msg, attempts != 0);
-
- if (ret != 0) {
- attempts++;
- dev_dbg(&dev->pdev->dev, "failed sending address, "
- "retrying. Attempt: %d msg_index: %d/%d\n",
- attempts, dev->msg_index, dev->msg_len);
- }
-
- } while (ret != 0 && attempts < NUM_ADDR_RESEND_ATTEMPTS);
-
- if (attempts < NUM_ADDR_RESEND_ATTEMPTS && attempts > 0) {
- dev_dbg(&dev->pdev->dev, "managed to get address "
- "through after %d attempts\n", attempts);
- } else if (attempts == NUM_ADDR_RESEND_ATTEMPTS) {
- dev_dbg(&dev->pdev->dev, "I give up, tried %d times "
- "to resend address.\n",
- NUM_ADDR_RESEND_ATTEMPTS);
- goto exit_disable;
- }
-
-
- if (msg->flags & I2C_M_RD) {
- /* READ: we read the actual bytes one at a time */
- for (i = 0; i < msg->len; i++) {
- if (i == msg->len-1) {
- /*
- * Disable ACK and set STOP condition before
- * reading last byte
- */
- val = I2C_CR_PERIPHERAL_ENABLE;
-
- if (stop)
- val |= I2C_CR_STOP_ENABLE;
-
- stu300_wr8(val,
- dev->virtbase + I2C_CR);
- }
- /* Wait for this byte... */
- ret = stu300_await_event(dev, STU300_EVENT_7);
- if (ret != 0)
- goto exit_disable;
- /* This clears event 7 */
- msg->buf[i] = (u8) stu300_r8(dev->virtbase + I2C_DR);
- }
- } else {
- /* WRITE: we send the actual bytes one at a time */
- for (i = 0; i < msg->len; i++) {
- /* Write the byte */
- stu300_wr8(msg->buf[i],
- dev->virtbase + I2C_DR);
- /* Check status */
- ret = stu300_await_event(dev, STU300_EVENT_8);
- /* Next write to DR will clear event 8 */
- if (ret != 0) {
- dev_err(&dev->pdev->dev, "error awaiting "
- "event 8 (%d)\n", ret);
- goto exit_disable;
- }
- }
- /* Check NAK */
- if (!(msg->flags & I2C_M_IGNORE_NAK)) {
- if (stu300_r8(dev->virtbase + I2C_SR2) &
- I2C_SR2_AF_IND) {
- dev_err(&dev->pdev->dev, "I2C payload "
- "send returned NAK!\n");
- ret = -EIO;
- goto exit_disable;
- }
- }
- if (stop) {
- /* Send stop condition */
- val = I2C_CR_PERIPHERAL_ENABLE;
- val |= I2C_CR_STOP_ENABLE;
- stu300_wr8(val, dev->virtbase + I2C_CR);
- }
- }
-
- /* Check that the bus is free, or wait until some timeout occurs */
- ret = stu300_wait_while_busy(dev);
- if (ret != 0) {
- dev_err(&dev->pdev->dev, "timeout waiting for transfer "
- "to commence.\n");
- goto exit_disable;
- }
-
- /* Dummy read status registers */
- val = stu300_r8(dev->virtbase + I2C_SR2);
- val = stu300_r8(dev->virtbase + I2C_SR1);
- ret = 0;
-
- exit_disable:
- /* Disable controller */
- stu300_wr8(0x00, dev->virtbase + I2C_CR);
- clk_disable(dev->clk);
- return ret;
-}
-
-static int stu300_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
- int num)
-{
- int ret = -1;
- int i;
-
- struct stu300_dev *dev = i2c_get_adapdata(adap);
- dev->msg_len = num;
-
- for (i = 0; i < num; i++) {
- /*
- * Another driver appears to send stop for each message,
- * here we only do that for the last message. Possibly some
- * peripherals require this behaviour, then their drivers
- * have to send single messages in order to get "stop" for
- * each message.
- */
- dev->msg_index = i;
-
- ret = stu300_xfer_msg(adap, &msgs[i], (i == (num - 1)));
-
- if (ret != 0) {
- num = ret;
- break;
- }
- }
-
- return num;
-}
-
-static int stu300_xfer_todo(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
-{
- /* TODO: implement polling for this case if need be. */
- WARN(1, "%s: atomic transfers not implemented\n", dev_name(&adap->dev));
- return -EOPNOTSUPP;
-}
-
-static u32 stu300_func(struct i2c_adapter *adap)
-{
- /* This is the simplest thing you can think of... */
- return I2C_FUNC_I2C | I2C_FUNC_10BIT_ADDR;
-}
-
-static const struct i2c_algorithm stu300_algo = {
- .master_xfer = stu300_xfer,
- .master_xfer_atomic = stu300_xfer_todo,
- .functionality = stu300_func,
-};
-
-static const struct i2c_adapter_quirks stu300_quirks = {
- .flags = I2C_AQ_NO_ZERO_LEN,
-};
-
-static int stu300_probe(struct platform_device *pdev)
-{
- struct stu300_dev *dev;
- struct i2c_adapter *adap;
- int bus_nr;
- int ret = 0;
-
- dev = devm_kzalloc(&pdev->dev, sizeof(struct stu300_dev), GFP_KERNEL);
- if (!dev)
- return -ENOMEM;
-
- bus_nr = pdev->id;
- dev->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(dev->clk)) {
- dev_err(&pdev->dev, "could not retrieve i2c bus clock\n");
- return PTR_ERR(dev->clk);
- }
-
- dev->pdev = pdev;
- dev->virtbase = devm_platform_ioremap_resource(pdev, 0);
- dev_dbg(&pdev->dev, "initialize bus device I2C%d on virtual "
- "base %p\n", bus_nr, dev->virtbase);
- if (IS_ERR(dev->virtbase))
- return PTR_ERR(dev->virtbase);
-
- dev->irq = platform_get_irq(pdev, 0);
- ret = devm_request_irq(&pdev->dev, dev->irq, stu300_irh, 0, NAME, dev);
- if (ret < 0)
- return ret;
-
- dev->speed = scl_frequency;
-
- clk_prepare_enable(dev->clk);
- ret = stu300_init_hw(dev);
- clk_disable(dev->clk);
- if (ret != 0) {
- dev_err(&dev->pdev->dev, "error initializing hardware.\n");
- return -EIO;
- }
-
- /* IRQ event handling initialization */
- spin_lock_init(&dev->cmd_issue_lock);
- dev->cmd_event = STU300_EVENT_NONE;
- dev->cmd_err = STU300_ERROR_NONE;
-
- adap = &dev->adapter;
- adap->owner = THIS_MODULE;
- /* DDC class but actually often used for more generic I2C */
- adap->class = I2C_CLASS_DEPRECATED;
- strlcpy(adap->name, "ST Microelectronics DDC I2C adapter",
- sizeof(adap->name));
- adap->nr = bus_nr;
- adap->algo = &stu300_algo;
- adap->dev.parent = &pdev->dev;
- adap->dev.of_node = pdev->dev.of_node;
- adap->quirks = &stu300_quirks;
-
- i2c_set_adapdata(adap, dev);
-
- /* i2c device drivers may be active on return from add_adapter() */
- ret = i2c_add_numbered_adapter(adap);
- if (ret)
- return ret;
-
- platform_set_drvdata(pdev, dev);
- dev_info(&pdev->dev, "ST DDC I2C @ %p, irq %d\n",
- dev->virtbase, dev->irq);
-
- return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int stu300_suspend(struct device *device)
-{
- struct stu300_dev *dev = dev_get_drvdata(device);
-
- /* Turn off everything */
- stu300_wr8(0x00, dev->virtbase + I2C_CR);
- return 0;
-}
-
-static int stu300_resume(struct device *device)
-{
- int ret = 0;
- struct stu300_dev *dev = dev_get_drvdata(device);
-
- clk_enable(dev->clk);
- ret = stu300_init_hw(dev);
- clk_disable(dev->clk);
-
- if (ret != 0)
- dev_err(device, "error re-initializing hardware.\n");
- return ret;
-}
-
-static SIMPLE_DEV_PM_OPS(stu300_pm, stu300_suspend, stu300_resume);
-#define STU300_I2C_PM (&stu300_pm)
-#else
-#define STU300_I2C_PM NULL
-#endif
-
-static int stu300_remove(struct platform_device *pdev)
-{
- struct stu300_dev *dev = platform_get_drvdata(pdev);
-
- i2c_del_adapter(&dev->adapter);
- /* Turn off everything */
- stu300_wr8(0x00, dev->virtbase + I2C_CR);
- return 0;
-}
-
-static const struct of_device_id stu300_dt_match[] = {
- { .compatible = "st,ddci2c" },
- {},
-};
-MODULE_DEVICE_TABLE(of, stu300_dt_match);
-
-static struct platform_driver stu300_i2c_driver = {
- .driver = {
- .name = NAME,
- .pm = STU300_I2C_PM,
- .of_match_table = stu300_dt_match,
- },
- .probe = stu300_probe,
- .remove = stu300_remove,
-
-};
-
-static int __init stu300_init(void)
-{
- return platform_driver_register(&stu300_i2c_driver);
-}
-
-static void __exit stu300_exit(void)
-{
- platform_driver_unregister(&stu300_i2c_driver);
-}
-
-/*
- * The systems using this bus often have very basic devices such
- * as regulators on the I2C bus, so this needs to be loaded early.
- * Therefore it is registered in the subsys_initcall().
- */
-subsys_initcall(stu300_init);
-module_exit(stu300_exit);
-
-MODULE_AUTHOR("Linus Walleij <linus.walleij@stericsson.com>");
-MODULE_DESCRIPTION("ST Micro DDC I2C adapter (" NAME ")");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:" NAME);
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 8b113ae32dc7..c883044715f3 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -550,7 +550,7 @@ static int tegra_i2c_poll_register(struct tegra_i2c_dev *i2c_dev,
void __iomem *addr = i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg);
u32 val;
- if (!i2c_dev->atomic_mode && !in_irq())
+ if (!i2c_dev->atomic_mode)
return readl_relaxed_poll_timeout(addr, val, !(val & mask),
delay_us, timeout_us);
@@ -1739,9 +1739,10 @@ static int tegra_i2c_probe(struct platform_device *pdev)
/* interrupt will be enabled during of transfer time */
irq_set_status_flags(i2c_dev->irq, IRQ_NOAUTOEN);
- err = devm_request_irq(i2c_dev->dev, i2c_dev->irq, tegra_i2c_isr,
- IRQF_NO_SUSPEND, dev_name(i2c_dev->dev),
- i2c_dev);
+ err = devm_request_threaded_irq(i2c_dev->dev, i2c_dev->irq,
+ NULL, tegra_i2c_isr,
+ IRQF_NO_SUSPEND | IRQF_ONESHOT,
+ dev_name(i2c_dev->dev), i2c_dev);
if (err)
return err;
diff --git a/drivers/i2c/busses/i2c-zx2967.c b/drivers/i2c/busses/i2c-zx2967.c
deleted file mode 100644
index 8db9519695a6..000000000000
--- a/drivers/i2c/busses/i2c-zx2967.c
+++ /dev/null
@@ -1,602 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2017 Sanechips Technology Co., Ltd.
- * Copyright 2017 Linaro Ltd.
- *
- * Author: Baoyou Xie <baoyou.xie@linaro.org>
- */
-
-#include <linux/clk.h>
-#include <linux/i2c.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-
-#define REG_CMD 0x04
-#define REG_DEVADDR_H 0x0C
-#define REG_DEVADDR_L 0x10
-#define REG_CLK_DIV_FS 0x14
-#define REG_CLK_DIV_HS 0x18
-#define REG_WRCONF 0x1C
-#define REG_RDCONF 0x20
-#define REG_DATA 0x24
-#define REG_STAT 0x28
-
-#define I2C_STOP 0
-#define I2C_MASTER BIT(0)
-#define I2C_ADDR_MODE_TEN BIT(1)
-#define I2C_IRQ_MSK_ENABLE BIT(3)
-#define I2C_RW_READ BIT(4)
-#define I2C_CMB_RW_EN BIT(5)
-#define I2C_START BIT(6)
-
-#define I2C_ADDR_LOW_MASK GENMASK(6, 0)
-#define I2C_ADDR_LOW_SHIFT 0
-#define I2C_ADDR_HI_MASK GENMASK(2, 0)
-#define I2C_ADDR_HI_SHIFT 7
-
-#define I2C_WFIFO_RESET BIT(7)
-#define I2C_RFIFO_RESET BIT(7)
-
-#define I2C_IRQ_ACK_CLEAR BIT(7)
-#define I2C_INT_MASK GENMASK(6, 0)
-
-#define I2C_TRANS_DONE BIT(0)
-#define I2C_SR_EDEVICE BIT(1)
-#define I2C_SR_EDATA BIT(2)
-
-#define I2C_FIFO_MAX 16
-
-#define I2C_TIMEOUT msecs_to_jiffies(1000)
-
-#define DEV(i2c) ((i2c)->adap.dev.parent)
-
-struct zx2967_i2c {
- struct i2c_adapter adap;
- struct clk *clk;
- struct completion complete;
- u32 clk_freq;
- void __iomem *reg_base;
- size_t residue;
- int irq;
- int msg_rd;
- u8 *cur_trans;
- u8 access_cnt;
- int error;
-};
-
-static void zx2967_i2c_writel(struct zx2967_i2c *i2c,
- u32 val, unsigned long reg)
-{
- writel_relaxed(val, i2c->reg_base + reg);
-}
-
-static u32 zx2967_i2c_readl(struct zx2967_i2c *i2c, unsigned long reg)
-{
- return readl_relaxed(i2c->reg_base + reg);
-}
-
-static void zx2967_i2c_writesb(struct zx2967_i2c *i2c,
- void *data, unsigned long reg, int len)
-{
- writesb(i2c->reg_base + reg, data, len);
-}
-
-static void zx2967_i2c_readsb(struct zx2967_i2c *i2c,
- void *data, unsigned long reg, int len)
-{
- readsb(i2c->reg_base + reg, data, len);
-}
-
-static void zx2967_i2c_start_ctrl(struct zx2967_i2c *i2c)
-{
- u32 status;
- u32 ctl;
-
- status = zx2967_i2c_readl(i2c, REG_STAT);
- status |= I2C_IRQ_ACK_CLEAR;
- zx2967_i2c_writel(i2c, status, REG_STAT);
-
- ctl = zx2967_i2c_readl(i2c, REG_CMD);
- if (i2c->msg_rd)
- ctl |= I2C_RW_READ;
- else
- ctl &= ~I2C_RW_READ;
- ctl &= ~I2C_CMB_RW_EN;
- ctl |= I2C_START;
- zx2967_i2c_writel(i2c, ctl, REG_CMD);
-}
-
-static void zx2967_i2c_flush_fifos(struct zx2967_i2c *i2c)
-{
- u32 offset;
- u32 val;
-
- if (i2c->msg_rd) {
- offset = REG_RDCONF;
- val = I2C_RFIFO_RESET;
- } else {
- offset = REG_WRCONF;
- val = I2C_WFIFO_RESET;
- }
-
- val |= zx2967_i2c_readl(i2c, offset);
- zx2967_i2c_writel(i2c, val, offset);
-}
-
-static int zx2967_i2c_empty_rx_fifo(struct zx2967_i2c *i2c, u32 size)
-{
- u8 val[I2C_FIFO_MAX] = {0};
- int i;
-
- if (size > I2C_FIFO_MAX) {
- dev_err(DEV(i2c), "fifo size %d over the max value %d\n",
- size, I2C_FIFO_MAX);
- return -EINVAL;
- }
-
- zx2967_i2c_readsb(i2c, val, REG_DATA, size);
- for (i = 0; i < size; i++) {
- *i2c->cur_trans++ = val[i];
- i2c->residue--;
- }
-
- barrier();
-
- return 0;
-}
-
-static int zx2967_i2c_fill_tx_fifo(struct zx2967_i2c *i2c)
-{
- size_t residue = i2c->residue;
- u8 *buf = i2c->cur_trans;
-
- if (residue == 0) {
- dev_err(DEV(i2c), "residue is %d\n", (int)residue);
- return -EINVAL;
- }
-
- if (residue <= I2C_FIFO_MAX) {
- zx2967_i2c_writesb(i2c, buf, REG_DATA, residue);
-
- /* Again update before writing to FIFO to make sure isr sees. */
- i2c->residue = 0;
- i2c->cur_trans = NULL;
- } else {
- zx2967_i2c_writesb(i2c, buf, REG_DATA, I2C_FIFO_MAX);
- i2c->residue -= I2C_FIFO_MAX;
- i2c->cur_trans += I2C_FIFO_MAX;
- }
-
- barrier();
-
- return 0;
-}
-
-static int zx2967_i2c_reset_hardware(struct zx2967_i2c *i2c)
-{
- u32 val;
- u32 clk_div;
-
- val = I2C_MASTER | I2C_IRQ_MSK_ENABLE;
- zx2967_i2c_writel(i2c, val, REG_CMD);
-
- clk_div = clk_get_rate(i2c->clk) / i2c->clk_freq - 1;
- zx2967_i2c_writel(i2c, clk_div, REG_CLK_DIV_FS);
- zx2967_i2c_writel(i2c, clk_div, REG_CLK_DIV_HS);
-
- zx2967_i2c_writel(i2c, I2C_FIFO_MAX - 1, REG_WRCONF);
- zx2967_i2c_writel(i2c, I2C_FIFO_MAX - 1, REG_RDCONF);
- zx2967_i2c_writel(i2c, 1, REG_RDCONF);
-
- zx2967_i2c_flush_fifos(i2c);
-
- return 0;
-}
-
-static void zx2967_i2c_isr_clr(struct zx2967_i2c *i2c)
-{
- u32 status;
-
- status = zx2967_i2c_readl(i2c, REG_STAT);
- status |= I2C_IRQ_ACK_CLEAR;
- zx2967_i2c_writel(i2c, status, REG_STAT);
-}
-
-static irqreturn_t zx2967_i2c_isr(int irq, void *dev_id)
-{
- u32 status;
- struct zx2967_i2c *i2c = (struct zx2967_i2c *)dev_id;
-
- status = zx2967_i2c_readl(i2c, REG_STAT) & I2C_INT_MASK;
- zx2967_i2c_isr_clr(i2c);
-
- if (status & I2C_SR_EDEVICE)
- i2c->error = -ENXIO;
- else if (status & I2C_SR_EDATA)
- i2c->error = -EIO;
- else if (status & I2C_TRANS_DONE)
- i2c->error = 0;
- else
- goto done;
-
- complete(&i2c->complete);
-done:
- return IRQ_HANDLED;
-}
-
-static void zx2967_set_addr(struct zx2967_i2c *i2c, u16 addr)
-{
- u16 val;
-
- val = (addr >> I2C_ADDR_LOW_SHIFT) & I2C_ADDR_LOW_MASK;
- zx2967_i2c_writel(i2c, val, REG_DEVADDR_L);
-
- val = (addr >> I2C_ADDR_HI_SHIFT) & I2C_ADDR_HI_MASK;
- zx2967_i2c_writel(i2c, val, REG_DEVADDR_H);
- if (val)
- val = zx2967_i2c_readl(i2c, REG_CMD) | I2C_ADDR_MODE_TEN;
- else
- val = zx2967_i2c_readl(i2c, REG_CMD) & ~I2C_ADDR_MODE_TEN;
- zx2967_i2c_writel(i2c, val, REG_CMD);
-}
-
-static int zx2967_i2c_xfer_bytes(struct zx2967_i2c *i2c, u32 bytes)
-{
- unsigned long time_left;
- int rd = i2c->msg_rd;
- int ret;
-
- reinit_completion(&i2c->complete);
-
- if (rd) {
- zx2967_i2c_writel(i2c, bytes - 1, REG_RDCONF);
- } else {
- ret = zx2967_i2c_fill_tx_fifo(i2c);
- if (ret)
- return ret;
- }
-
- zx2967_i2c_start_ctrl(i2c);
-
- time_left = wait_for_completion_timeout(&i2c->complete,
- I2C_TIMEOUT);
- if (time_left == 0)
- return -ETIMEDOUT;
-
- if (i2c->error)
- return i2c->error;
-
- return rd ? zx2967_i2c_empty_rx_fifo(i2c, bytes) : 0;
-}
-
-static int zx2967_i2c_xfer_msg(struct zx2967_i2c *i2c,
- struct i2c_msg *msg)
-{
- int ret;
- int i;
-
- zx2967_i2c_flush_fifos(i2c);
-
- i2c->cur_trans = msg->buf;
- i2c->residue = msg->len;
- i2c->access_cnt = msg->len / I2C_FIFO_MAX;
- i2c->msg_rd = msg->flags & I2C_M_RD;
-
- for (i = 0; i < i2c->access_cnt; i++) {
- ret = zx2967_i2c_xfer_bytes(i2c, I2C_FIFO_MAX);
- if (ret)
- return ret;
- }
-
- if (i2c->residue > 0) {
- ret = zx2967_i2c_xfer_bytes(i2c, i2c->residue);
- if (ret)
- return ret;
- }
-
- i2c->residue = 0;
- i2c->access_cnt = 0;
-
- return 0;
-}
-
-static int zx2967_i2c_xfer(struct i2c_adapter *adap,
- struct i2c_msg *msgs, int num)
-{
- struct zx2967_i2c *i2c = i2c_get_adapdata(adap);
- int ret;
- int i;
-
- zx2967_set_addr(i2c, msgs->addr);
-
- for (i = 0; i < num; i++) {
- ret = zx2967_i2c_xfer_msg(i2c, &msgs[i]);
- if (ret)
- return ret;
- }
-
- return num;
-}
-
-static void
-zx2967_smbus_xfer_prepare(struct zx2967_i2c *i2c, u16 addr,
- char read_write, u8 command, int size,
- union i2c_smbus_data *data)
-{
- u32 val;
-
- val = zx2967_i2c_readl(i2c, REG_RDCONF);
- val |= I2C_RFIFO_RESET;
- zx2967_i2c_writel(i2c, val, REG_RDCONF);
- zx2967_set_addr(i2c, addr);
- val = zx2967_i2c_readl(i2c, REG_CMD);
- val &= ~I2C_RW_READ;
- zx2967_i2c_writel(i2c, val, REG_CMD);
-
- switch (size) {
- case I2C_SMBUS_BYTE:
- zx2967_i2c_writel(i2c, command, REG_DATA);
- break;
- case I2C_SMBUS_BYTE_DATA:
- zx2967_i2c_writel(i2c, command, REG_DATA);
- if (read_write == I2C_SMBUS_WRITE)
- zx2967_i2c_writel(i2c, data->byte, REG_DATA);
- break;
- case I2C_SMBUS_WORD_DATA:
- zx2967_i2c_writel(i2c, command, REG_DATA);
- if (read_write == I2C_SMBUS_WRITE) {
- zx2967_i2c_writel(i2c, (data->word >> 8), REG_DATA);
- zx2967_i2c_writel(i2c, (data->word & 0xff),
- REG_DATA);
- }
- break;
- }
-}
-
-static int zx2967_smbus_xfer_read(struct zx2967_i2c *i2c, int size,
- union i2c_smbus_data *data)
-{
- unsigned long time_left;
- u8 buf[2];
- u32 val;
-
- reinit_completion(&i2c->complete);
-
- val = zx2967_i2c_readl(i2c, REG_CMD);
- val |= I2C_CMB_RW_EN;
- zx2967_i2c_writel(i2c, val, REG_CMD);
-
- val = zx2967_i2c_readl(i2c, REG_CMD);
- val |= I2C_START;
- zx2967_i2c_writel(i2c, val, REG_CMD);
-
- time_left = wait_for_completion_timeout(&i2c->complete,
- I2C_TIMEOUT);
- if (time_left == 0)
- return -ETIMEDOUT;
-
- if (i2c->error)
- return i2c->error;
-
- switch (size) {
- case I2C_SMBUS_BYTE:
- case I2C_SMBUS_BYTE_DATA:
- val = zx2967_i2c_readl(i2c, REG_DATA);
- data->byte = val;
- break;
- case I2C_SMBUS_WORD_DATA:
- case I2C_SMBUS_PROC_CALL:
- buf[0] = zx2967_i2c_readl(i2c, REG_DATA);
- buf[1] = zx2967_i2c_readl(i2c, REG_DATA);
- data->word = (buf[0] << 8) | buf[1];
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
-static int zx2967_smbus_xfer_write(struct zx2967_i2c *i2c)
-{
- unsigned long time_left;
- u32 val;
-
- reinit_completion(&i2c->complete);
- val = zx2967_i2c_readl(i2c, REG_CMD);
- val |= I2C_START;
- zx2967_i2c_writel(i2c, val, REG_CMD);
-
- time_left = wait_for_completion_timeout(&i2c->complete,
- I2C_TIMEOUT);
- if (time_left == 0)
- return -ETIMEDOUT;
-
- if (i2c->error)
- return i2c->error;
-
- return 0;
-}
-
-static int zx2967_smbus_xfer(struct i2c_adapter *adap, u16 addr,
- unsigned short flags, char read_write,
- u8 command, int size, union i2c_smbus_data *data)
-{
- struct zx2967_i2c *i2c = i2c_get_adapdata(adap);
-
- if (size == I2C_SMBUS_QUICK)
- read_write = I2C_SMBUS_WRITE;
-
- switch (size) {
- case I2C_SMBUS_QUICK:
- case I2C_SMBUS_BYTE:
- case I2C_SMBUS_BYTE_DATA:
- case I2C_SMBUS_WORD_DATA:
- zx2967_smbus_xfer_prepare(i2c, addr, read_write,
- command, size, data);
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- if (read_write == I2C_SMBUS_READ)
- return zx2967_smbus_xfer_read(i2c, size, data);
-
- return zx2967_smbus_xfer_write(i2c);
-}
-
-static u32 zx2967_i2c_func(struct i2c_adapter *adap)
-{
- return I2C_FUNC_I2C |
- I2C_FUNC_SMBUS_QUICK |
- I2C_FUNC_SMBUS_BYTE |
- I2C_FUNC_SMBUS_BYTE_DATA |
- I2C_FUNC_SMBUS_WORD_DATA |
- I2C_FUNC_SMBUS_BLOCK_DATA |
- I2C_FUNC_SMBUS_PROC_CALL |
- I2C_FUNC_SMBUS_I2C_BLOCK;
-}
-
-static int __maybe_unused zx2967_i2c_suspend(struct device *dev)
-{
- struct zx2967_i2c *i2c = dev_get_drvdata(dev);
-
- i2c_mark_adapter_suspended(&i2c->adap);
- clk_disable_unprepare(i2c->clk);
-
- return 0;
-}
-
-static int __maybe_unused zx2967_i2c_resume(struct device *dev)
-{
- struct zx2967_i2c *i2c = dev_get_drvdata(dev);
-
- clk_prepare_enable(i2c->clk);
- i2c_mark_adapter_resumed(&i2c->adap);
-
- return 0;
-}
-
-static SIMPLE_DEV_PM_OPS(zx2967_i2c_dev_pm_ops,
- zx2967_i2c_suspend, zx2967_i2c_resume);
-
-static const struct i2c_algorithm zx2967_i2c_algo = {
- .master_xfer = zx2967_i2c_xfer,
- .smbus_xfer = zx2967_smbus_xfer,
- .functionality = zx2967_i2c_func,
-};
-
-static const struct i2c_adapter_quirks zx2967_i2c_quirks = {
- .flags = I2C_AQ_NO_ZERO_LEN,
-};
-
-static const struct of_device_id zx2967_i2c_of_match[] = {
- { .compatible = "zte,zx296718-i2c", },
- { },
-};
-MODULE_DEVICE_TABLE(of, zx2967_i2c_of_match);
-
-static int zx2967_i2c_probe(struct platform_device *pdev)
-{
- struct zx2967_i2c *i2c;
- void __iomem *reg_base;
- struct clk *clk;
- int ret;
-
- i2c = devm_kzalloc(&pdev->dev, sizeof(*i2c), GFP_KERNEL);
- if (!i2c)
- return -ENOMEM;
-
- reg_base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(reg_base))
- return PTR_ERR(reg_base);
-
- clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(clk)) {
- dev_err(&pdev->dev, "missing controller clock");
- return PTR_ERR(clk);
- }
-
- ret = clk_prepare_enable(clk);
- if (ret) {
- dev_err(&pdev->dev, "failed to enable i2c_clk\n");
- return ret;
- }
-
- ret = device_property_read_u32(&pdev->dev, "clock-frequency",
- &i2c->clk_freq);
- if (ret) {
- dev_err(&pdev->dev, "missing clock-frequency");
- return ret;
- }
-
- ret = platform_get_irq(pdev, 0);
- if (ret < 0)
- return ret;
-
- i2c->irq = ret;
- i2c->reg_base = reg_base;
- i2c->clk = clk;
-
- init_completion(&i2c->complete);
- platform_set_drvdata(pdev, i2c);
-
- ret = zx2967_i2c_reset_hardware(i2c);
- if (ret) {
- dev_err(&pdev->dev, "failed to initialize i2c controller\n");
- goto err_clk_unprepare;
- }
-
- ret = devm_request_irq(&pdev->dev, i2c->irq,
- zx2967_i2c_isr, 0, dev_name(&pdev->dev), i2c);
- if (ret) {
- dev_err(&pdev->dev, "failed to request irq %i\n", i2c->irq);
- goto err_clk_unprepare;
- }
-
- i2c_set_adapdata(&i2c->adap, i2c);
- strlcpy(i2c->adap.name, "zx2967 i2c adapter",
- sizeof(i2c->adap.name));
- i2c->adap.algo = &zx2967_i2c_algo;
- i2c->adap.quirks = &zx2967_i2c_quirks;
- i2c->adap.nr = pdev->id;
- i2c->adap.dev.parent = &pdev->dev;
- i2c->adap.dev.of_node = pdev->dev.of_node;
-
- ret = i2c_add_numbered_adapter(&i2c->adap);
- if (ret)
- goto err_clk_unprepare;
-
- return 0;
-
-err_clk_unprepare:
- clk_disable_unprepare(i2c->clk);
- return ret;
-}
-
-static int zx2967_i2c_remove(struct platform_device *pdev)
-{
- struct zx2967_i2c *i2c = platform_get_drvdata(pdev);
-
- i2c_del_adapter(&i2c->adap);
- clk_disable_unprepare(i2c->clk);
-
- return 0;
-}
-
-static struct platform_driver zx2967_i2c_driver = {
- .probe = zx2967_i2c_probe,
- .remove = zx2967_i2c_remove,
- .driver = {
- .name = "zx2967_i2c",
- .of_match_table = zx2967_i2c_of_match,
- .pm = &zx2967_i2c_dev_pm_ops,
- },
-};
-module_platform_driver(zx2967_i2c_driver);
-
-MODULE_AUTHOR("Baoyou Xie <baoyou.xie@linaro.org>");
-MODULE_DESCRIPTION("ZTE ZX2967 I2C Bus Controller driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c
index 37c510d9347a..8ceaa88dd78f 100644
--- a/drivers/i2c/i2c-core-acpi.c
+++ b/drivers/i2c/i2c-core-acpi.c
@@ -225,12 +225,8 @@ static void i2c_acpi_register_device(struct i2c_adapter *adapter,
adev->power.flags.ignore_parent = true;
acpi_device_set_enumerated(adev);
- if (IS_ERR(i2c_new_client_device(adapter, info))) {
+ if (IS_ERR(i2c_new_client_device(adapter, info)))
adev->power.flags.ignore_parent = false;
- dev_err(&adapter->dev,
- "failed to add I2C device %s from ACPI\n",
- dev_name(&adev->dev));
- }
}
static acpi_status i2c_acpi_add_device(acpi_handle handle, u32 level,
diff --git a/drivers/i2c/i2c-core-smbus.c b/drivers/i2c/i2c-core-smbus.c
index f5c9787992e9..d2d32c0fd8c3 100644
--- a/drivers/i2c/i2c-core-smbus.c
+++ b/drivers/i2c/i2c-core-smbus.c
@@ -323,8 +323,7 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr,
*/
unsigned char msgbuf0[I2C_SMBUS_BLOCK_MAX+3];
unsigned char msgbuf1[I2C_SMBUS_BLOCK_MAX+2];
- int num = read_write == I2C_SMBUS_READ ? 2 : 1;
- int i;
+ int nmsgs = read_write == I2C_SMBUS_READ ? 2 : 1;
u8 partial_pec = 0;
int status;
struct i2c_msg msg[2] = {
@@ -340,6 +339,8 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr,
.buf = msgbuf1,
},
};
+ bool wants_pec = ((flags & I2C_CLIENT_PEC) && size != I2C_SMBUS_QUICK
+ && size != I2C_SMBUS_I2C_BLOCK_DATA);
msgbuf0[0] = command;
switch (size) {
@@ -348,13 +349,13 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr,
/* Special case: The read/write field is used as data */
msg[0].flags = flags | (read_write == I2C_SMBUS_READ ?
I2C_M_RD : 0);
- num = 1;
+ nmsgs = 1;
break;
case I2C_SMBUS_BYTE:
if (read_write == I2C_SMBUS_READ) {
/* Special case: only a read! */
msg[0].flags = I2C_M_RD | flags;
- num = 1;
+ nmsgs = 1;
}
break;
case I2C_SMBUS_BYTE_DATA:
@@ -375,7 +376,7 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr,
}
break;
case I2C_SMBUS_PROC_CALL:
- num = 2; /* Special case */
+ nmsgs = 2; /* Special case */
read_write = I2C_SMBUS_READ;
msg[0].len = 3;
msg[1].len = 2;
@@ -398,12 +399,11 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr,
}
i2c_smbus_try_get_dmabuf(&msg[0], command);
- for (i = 1; i < msg[0].len; i++)
- msg[0].buf[i] = data->block[i - 1];
+ memcpy(msg[0].buf + 1, data->block, msg[0].len - 1);
}
break;
case I2C_SMBUS_BLOCK_PROC_CALL:
- num = 2; /* Another special case */
+ nmsgs = 2; /* Another special case */
read_write = I2C_SMBUS_READ;
if (data->block[0] > I2C_SMBUS_BLOCK_MAX) {
dev_err(&adapter->dev,
@@ -414,8 +414,7 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr,
msg[0].len = data->block[0] + 2;
i2c_smbus_try_get_dmabuf(&msg[0], command);
- for (i = 1; i < msg[0].len; i++)
- msg[0].buf[i] = data->block[i - 1];
+ memcpy(msg[0].buf + 1, data->block, msg[0].len - 1);
msg[1].flags |= I2C_M_RECV_LEN;
msg[1].len = 1; /* block length will be added by
@@ -437,8 +436,7 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr,
msg[0].len = data->block[0] + 1;
i2c_smbus_try_get_dmabuf(&msg[0], command);
- for (i = 1; i <= data->block[0]; i++)
- msg[0].buf[i] = data->block[i];
+ memcpy(msg[0].buf + 1, data->block + 1, data->block[0]);
}
break;
default:
@@ -446,33 +444,31 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr,
return -EOPNOTSUPP;
}
- i = ((flags & I2C_CLIENT_PEC) && size != I2C_SMBUS_QUICK
- && size != I2C_SMBUS_I2C_BLOCK_DATA);
- if (i) {
+ if (wants_pec) {
/* Compute PEC if first message is a write */
if (!(msg[0].flags & I2C_M_RD)) {
- if (num == 1) /* Write only */
+ if (nmsgs == 1) /* Write only */
i2c_smbus_add_pec(&msg[0]);
else /* Write followed by read */
partial_pec = i2c_smbus_msg_pec(0, &msg[0]);
}
/* Ask for PEC if last message is a read */
- if (msg[num-1].flags & I2C_M_RD)
- msg[num-1].len++;
+ if (msg[nmsgs - 1].flags & I2C_M_RD)
+ msg[nmsgs - 1].len++;
}
- status = __i2c_transfer(adapter, msg, num);
+ status = __i2c_transfer(adapter, msg, nmsgs);
if (status < 0)
goto cleanup;
- if (status != num) {
+ if (status != nmsgs) {
status = -EIO;
goto cleanup;
}
status = 0;
/* Check PEC if last message is a read */
- if (i && (msg[num-1].flags & I2C_M_RD)) {
- status = i2c_smbus_check_pec(partial_pec, &msg[num-1]);
+ if (wants_pec && (msg[nmsgs - 1].flags & I2C_M_RD)) {
+ status = i2c_smbus_check_pec(partial_pec, &msg[nmsgs - 1]);
if (status < 0)
goto cleanup;
}
@@ -490,8 +486,7 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr,
data->word = msgbuf1[0] | (msgbuf1[1] << 8);
break;
case I2C_SMBUS_I2C_BLOCK_DATA:
- for (i = 0; i < data->block[0]; i++)
- data->block[i + 1] = msg[1].buf[i];
+ memcpy(data->block + 1, msg[1].buf, data->block[0]);
break;
case I2C_SMBUS_BLOCK_DATA:
case I2C_SMBUS_BLOCK_PROC_CALL:
@@ -502,8 +497,7 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr,
status = -EPROTO;
goto cleanup;
}
- for (i = 0; i < msg[1].buf[0] + 1; i++)
- data->block[i] = msg[1].buf[i];
+ memcpy(data->block, msg[1].buf, msg[1].buf[0] + 1);
break;
}
diff --git a/drivers/i2c/i2c-slave-testunit.c b/drivers/i2c/i2c-slave-testunit.c
index c288102de324..56dae08dfd48 100644
--- a/drivers/i2c/i2c-slave-testunit.c
+++ b/drivers/i2c/i2c-slave-testunit.c
@@ -19,6 +19,7 @@
enum testunit_cmds {
TU_CMD_READ_BYTES = 1, /* save 0 for ABORT, RESET or similar */
TU_CMD_HOST_NOTIFY,
+ TU_CMD_SMBUS_BLOCK_PROC_CALL,
TU_NUM_CMDS
};
@@ -88,6 +89,8 @@ static int i2c_slave_testunit_slave_cb(struct i2c_client *client,
enum i2c_slave_event event, u8 *val)
{
struct testunit_data *tu = i2c_get_clientdata(client);
+ bool is_proc_call = tu->reg_idx == 3 && tu->regs[TU_REG_DATAL] == 1 &&
+ tu->regs[TU_REG_CMD] == TU_CMD_SMBUS_BLOCK_PROC_CALL;
int ret = 0;
switch (event) {
@@ -118,12 +121,17 @@ static int i2c_slave_testunit_slave_cb(struct i2c_client *client,
fallthrough;
case I2C_SLAVE_WRITE_REQUESTED:
+ memset(tu->regs, 0, TU_NUM_REGS);
tu->reg_idx = 0;
break;
- case I2C_SLAVE_READ_REQUESTED:
case I2C_SLAVE_READ_PROCESSED:
- *val = TU_CUR_VERSION;
+ if (is_proc_call && tu->regs[TU_REG_DATAH])
+ tu->regs[TU_REG_DATAH]--;
+ fallthrough;
+
+ case I2C_SLAVE_READ_REQUESTED:
+ *val = is_proc_call ? tu->regs[TU_REG_DATAH] : TU_CUR_VERSION;
break;
}
diff --git a/drivers/i2c/i2c-stub.c b/drivers/i2c/i2c-stub.c
index 537a598e22db..d642cad219d9 100644
--- a/drivers/i2c/i2c-stub.c
+++ b/drivers/i2c/i2c-stub.c
@@ -7,7 +7,6 @@
*/
-#define DEBUG 1
#define pr_fmt(fmt) "i2c-stub: " fmt
#include <linux/errno.h>
diff --git a/drivers/i2c/muxes/i2c-mux-gpio.c b/drivers/i2c/muxes/i2c-mux-gpio.c
index 4effe563e9e8..bac415a52b78 100644
--- a/drivers/i2c/muxes/i2c-mux-gpio.c
+++ b/drivers/i2c/muxes/i2c-mux-gpio.c
@@ -49,60 +49,112 @@ static int i2c_mux_gpio_deselect(struct i2c_mux_core *muxc, u32 chan)
return 0;
}
-#ifdef CONFIG_OF
-static int i2c_mux_gpio_probe_dt(struct gpiomux *mux,
- struct platform_device *pdev)
+#ifdef CONFIG_ACPI
+
+static int i2c_mux_gpio_get_acpi_adr(struct device *dev,
+ struct fwnode_handle *fwdev,
+ unsigned int *adr)
+
+{
+ unsigned long long adr64;
+ acpi_status status;
+
+ status = acpi_evaluate_integer(ACPI_HANDLE_FWNODE(fwdev),
+ METHOD_NAME__ADR,
+ NULL, &adr64);
+
+ if (!ACPI_SUCCESS(status)) {
+ dev_err(dev, "Cannot get address\n");
+ return -EINVAL;
+ }
+
+ *adr = adr64;
+ if (*adr != adr64) {
+ dev_err(dev, "Address out of range\n");
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
+#else
+
+static int i2c_mux_gpio_get_acpi_adr(struct device *dev,
+ struct fwnode_handle *fwdev,
+ unsigned int *adr)
+{
+ return -EINVAL;
+}
+
+#endif
+
+static int i2c_mux_gpio_probe_fw(struct gpiomux *mux,
+ struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node;
- struct device_node *adapter_np, *child;
- struct i2c_adapter *adapter;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct device_node *adapter_np;
+ struct i2c_adapter *adapter = NULL;
+ struct fwnode_handle *child;
unsigned *values;
- int i = 0;
+ int rc, i = 0;
+
+ if (is_of_node(dev->fwnode)) {
+ if (!np)
+ return -ENODEV;
- if (!np)
- return -ENODEV;
+ adapter_np = of_parse_phandle(np, "i2c-parent", 0);
+ if (!adapter_np) {
+ dev_err(&pdev->dev, "Cannot parse i2c-parent\n");
+ return -ENODEV;
+ }
+ adapter = of_find_i2c_adapter_by_node(adapter_np);
+ of_node_put(adapter_np);
+
+ } else if (is_acpi_node(dev->fwnode)) {
+ /*
+ * In ACPI land the mux should be a direct child of the i2c
+ * bus it muxes.
+ */
+ acpi_handle dev_handle = ACPI_HANDLE(dev->parent);
- adapter_np = of_parse_phandle(np, "i2c-parent", 0);
- if (!adapter_np) {
- dev_err(&pdev->dev, "Cannot parse i2c-parent\n");
- return -ENODEV;
+ adapter = i2c_acpi_find_adapter_by_handle(dev_handle);
}
- adapter = of_find_i2c_adapter_by_node(adapter_np);
- of_node_put(adapter_np);
+
if (!adapter)
return -EPROBE_DEFER;
mux->data.parent = i2c_adapter_id(adapter);
put_device(&adapter->dev);
- mux->data.n_values = of_get_child_count(np);
-
- values = devm_kcalloc(&pdev->dev,
+ mux->data.n_values = device_get_child_node_count(dev);
+ values = devm_kcalloc(dev,
mux->data.n_values, sizeof(*mux->data.values),
GFP_KERNEL);
if (!values) {
- dev_err(&pdev->dev, "Cannot allocate values array");
+ dev_err(dev, "Cannot allocate values array");
return -ENOMEM;
}
- for_each_child_of_node(np, child) {
- of_property_read_u32(child, "reg", values + i);
+ device_for_each_child_node(dev, child) {
+ if (is_of_node(child)) {
+ fwnode_property_read_u32(child, "reg", values + i);
+
+ } else if (is_acpi_node(child)) {
+ rc = i2c_mux_gpio_get_acpi_adr(dev, child, values + i);
+ if (rc)
+ return rc;
+ }
+
i++;
}
mux->data.values = values;
- if (of_property_read_u32(np, "idle-state", &mux->data.idle))
+ if (fwnode_property_read_u32(dev->fwnode, "idle-state", &mux->data.idle))
mux->data.idle = I2C_MUX_GPIO_NO_IDLE;
return 0;
}
-#else
-static int i2c_mux_gpio_probe_dt(struct gpiomux *mux,
- struct platform_device *pdev)
-{
- return 0;
-}
-#endif
static int i2c_mux_gpio_probe(struct platform_device *pdev)
{
@@ -118,7 +170,7 @@ static int i2c_mux_gpio_probe(struct platform_device *pdev)
return -ENOMEM;
if (!dev_get_platdata(&pdev->dev)) {
- ret = i2c_mux_gpio_probe_dt(mux, pdev);
+ ret = i2c_mux_gpio_probe_fw(mux, pdev);
if (ret < 0)
return ret;
} else {
diff --git a/drivers/i2c/muxes/i2c-mux-mlxcpld.c b/drivers/i2c/muxes/i2c-mux-mlxcpld.c
index 5ed55ca4fe93..1a879f6a31ef 100644
--- a/drivers/i2c/muxes/i2c-mux-mlxcpld.c
+++ b/drivers/i2c/muxes/i2c-mux-mlxcpld.c
@@ -1,35 +1,8 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
/*
- * drivers/i2c/muxes/i2c-mux-mlxcpld.c
- * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2016 Michael Shych <michaels@mellanox.com>
+ * Mellanox i2c mux driver
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (C) 2016-2020 Mellanox Technologies
*/
#include <linux/device.h>
@@ -38,19 +11,19 @@
#include <linux/io.h>
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/platform_data/x86/mlxcpld.h>
+#include <linux/platform_data/mlxcpld.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
-#define CPLD_MUX_MAX_NCHANS 8
-
/* mlxcpld_mux - mux control structure:
- * @last_chan - last register value
+ * @last_val - last selected register value or -1 if mux deselected
* @client - I2C device client
+ * @pdata: platform data
*/
struct mlxcpld_mux {
- u8 last_chan;
+ int last_val;
struct i2c_client *client;
+ struct mlxcpld_mux_plat_data pdata;
};
/* MUX logic description.
@@ -81,37 +54,50 @@ struct mlxcpld_mux {
*
*/
-static const struct i2c_device_id mlxcpld_mux_id[] = {
- { "mlxcpld_mux_module", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, mlxcpld_mux_id);
-
/* Write to mux register. Don't use i2c_transfer() and i2c_smbus_xfer()
* for this as they will try to lock adapter a second time.
*/
static int mlxcpld_mux_reg_write(struct i2c_adapter *adap,
- struct i2c_client *client, u8 val)
+ struct mlxcpld_mux *mux, u32 val)
{
- struct mlxcpld_mux_plat_data *pdata = dev_get_platdata(&client->dev);
- union i2c_smbus_data data = { .byte = val };
-
- return __i2c_smbus_xfer(adap, client->addr, client->flags,
- I2C_SMBUS_WRITE, pdata->sel_reg_addr,
- I2C_SMBUS_BYTE_DATA, &data);
+ struct i2c_client *client = mux->client;
+ union i2c_smbus_data data;
+ struct i2c_msg msg;
+ u8 buf[3];
+
+ switch (mux->pdata.reg_size) {
+ case 1:
+ data.byte = val;
+ return __i2c_smbus_xfer(adap, client->addr, client->flags,
+ I2C_SMBUS_WRITE, mux->pdata.sel_reg_addr,
+ I2C_SMBUS_BYTE_DATA, &data);
+ case 2:
+ buf[0] = mux->pdata.sel_reg_addr >> 8;
+ buf[1] = mux->pdata.sel_reg_addr;
+ buf[2] = val;
+ msg.addr = client->addr;
+ msg.buf = buf;
+ msg.len = mux->pdata.reg_size + 1;
+ msg.flags = 0;
+ return __i2c_transfer(adap, &msg, 1);
+ default:
+ return -EINVAL;
+ }
}
static int mlxcpld_mux_select_chan(struct i2c_mux_core *muxc, u32 chan)
{
- struct mlxcpld_mux *data = i2c_mux_priv(muxc);
- struct i2c_client *client = data->client;
- u8 regval = chan + 1;
+ struct mlxcpld_mux *mux = i2c_mux_priv(muxc);
+ u32 regval = chan;
int err = 0;
+ if (mux->pdata.reg_size == 1)
+ regval += 1;
+
/* Only select the channel if its different from the last channel */
- if (data->last_chan != regval) {
- err = mlxcpld_mux_reg_write(muxc->parent, client, regval);
- data->last_chan = err < 0 ? 0 : regval;
+ if (mux->last_val != regval) {
+ err = mlxcpld_mux_reg_write(muxc->parent, mux, regval);
+ mux->last_val = err < 0 ? -1 : regval;
}
return err;
@@ -119,56 +105,64 @@ static int mlxcpld_mux_select_chan(struct i2c_mux_core *muxc, u32 chan)
static int mlxcpld_mux_deselect(struct i2c_mux_core *muxc, u32 chan)
{
- struct mlxcpld_mux *data = i2c_mux_priv(muxc);
- struct i2c_client *client = data->client;
+ struct mlxcpld_mux *mux = i2c_mux_priv(muxc);
/* Deselect active channel */
- data->last_chan = 0;
+ mux->last_val = -1;
- return mlxcpld_mux_reg_write(muxc->parent, client, data->last_chan);
+ return mlxcpld_mux_reg_write(muxc->parent, mux, 0);
}
/* Probe/reomove functions */
-static int mlxcpld_mux_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int mlxcpld_mux_probe(struct platform_device *pdev)
{
- struct i2c_adapter *adap = client->adapter;
- struct mlxcpld_mux_plat_data *pdata = dev_get_platdata(&client->dev);
+ struct mlxcpld_mux_plat_data *pdata = dev_get_platdata(&pdev->dev);
+ struct i2c_client *client = to_i2c_client(pdev->dev.parent);
struct i2c_mux_core *muxc;
- int num, force;
struct mlxcpld_mux *data;
- int err;
+ int num, err;
+ u32 func;
if (!pdata)
return -EINVAL;
- if (!i2c_check_functionality(adap, I2C_FUNC_SMBUS_WRITE_BYTE_DATA))
+ switch (pdata->reg_size) {
+ case 1:
+ func = I2C_FUNC_SMBUS_WRITE_BYTE_DATA;
+ break;
+ case 2:
+ func = I2C_FUNC_I2C;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (!i2c_check_functionality(client->adapter, func))
return -ENODEV;
- muxc = i2c_mux_alloc(adap, &client->dev, CPLD_MUX_MAX_NCHANS,
+ muxc = i2c_mux_alloc(client->adapter, &pdev->dev, pdata->num_adaps,
sizeof(*data), 0, mlxcpld_mux_select_chan,
mlxcpld_mux_deselect);
if (!muxc)
return -ENOMEM;
+ platform_set_drvdata(pdev, muxc);
data = i2c_mux_priv(muxc);
- i2c_set_clientdata(client, muxc);
data->client = client;
- data->last_chan = 0; /* force the first selection */
+ memcpy(&data->pdata, pdata, sizeof(*pdata));
+ data->last_val = -1; /* force the first selection */
/* Create an adapter for each channel. */
- for (num = 0; num < CPLD_MUX_MAX_NCHANS; num++) {
- if (num >= pdata->num_adaps)
- /* discard unconfigured channels */
- break;
-
- force = pdata->adap_ids[num];
-
- err = i2c_mux_add_adapter(muxc, force, num, 0);
+ for (num = 0; num < pdata->num_adaps; num++) {
+ err = i2c_mux_add_adapter(muxc, 0, pdata->chan_ids[num], 0);
if (err)
goto virt_reg_failed;
}
+ /* Notify caller when all channels' adapters are created. */
+ if (pdata->completion_notify)
+ pdata->completion_notify(pdata->handle, muxc->parent, muxc->adapter);
+
return 0;
virt_reg_failed:
@@ -176,24 +170,23 @@ virt_reg_failed:
return err;
}
-static int mlxcpld_mux_remove(struct i2c_client *client)
+static int mlxcpld_mux_remove(struct platform_device *pdev)
{
- struct i2c_mux_core *muxc = i2c_get_clientdata(client);
+ struct i2c_mux_core *muxc = platform_get_drvdata(pdev);
i2c_mux_del_adapters(muxc);
return 0;
}
-static struct i2c_driver mlxcpld_mux_driver = {
- .driver = {
- .name = "mlxcpld-mux",
+static struct platform_driver mlxcpld_mux_driver = {
+ .driver = {
+ .name = "i2c-mux-mlxcpld",
},
- .probe = mlxcpld_mux_probe,
- .remove = mlxcpld_mux_remove,
- .id_table = mlxcpld_mux_id,
+ .probe = mlxcpld_mux_probe,
+ .remove = mlxcpld_mux_remove,
};
-module_i2c_driver(mlxcpld_mux_driver);
+module_platform_driver(mlxcpld_mux_driver);
MODULE_AUTHOR("Michael Shych (michaels@mellanox.com)");
MODULE_DESCRIPTION("Mellanox I2C-CPLD-MUX driver");
diff --git a/drivers/i3c/device.c b/drivers/i3c/device.c
index bb8e60dff988..e92d3e9a52bd 100644
--- a/drivers/i3c/device.c
+++ b/drivers/i3c/device.c
@@ -262,6 +262,11 @@ int i3c_driver_register_with_owner(struct i3c_driver *drv, struct module *owner)
drv->driver.owner = owner;
drv->driver.bus = &i3c_bus_type;
+ if (!drv->probe) {
+ pr_err("Trying to register an i3c driver without probe callback\n");
+ return -EINVAL;
+ }
+
return driver_register(&drv->driver);
}
EXPORT_SYMBOL_GPL(i3c_driver_register_with_owner);
diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
index b61bf53ec07a..f8e9b7305c13 100644
--- a/drivers/i3c/master.c
+++ b/drivers/i3c/master.c
@@ -326,15 +326,13 @@ static int i3c_device_remove(struct device *dev)
{
struct i3c_device *i3cdev = dev_to_i3cdev(dev);
struct i3c_driver *driver = drv_to_i3cdrv(dev->driver);
- int ret;
- ret = driver->remove(i3cdev);
- if (ret)
- return ret;
+ if (driver->remove)
+ driver->remove(i3cdev);
i3c_device_free_ibi(i3cdev);
- return ret;
+ return 0;
}
struct bus_type i3c_bus_type = {
diff --git a/drivers/i3c/master/Kconfig b/drivers/i3c/master/Kconfig
index e68f15f4b4d0..3b8f95916f46 100644
--- a/drivers/i3c/master/Kconfig
+++ b/drivers/i3c/master/Kconfig
@@ -22,9 +22,18 @@ config DW_I3C_MASTER
This driver can also be built as a module. If so, the module
will be called dw-i3c-master.
+config SVC_I3C_MASTER
+ tristate "Silvaco I3C Dual-Role Master driver"
+ depends on I3C
+ depends on HAS_IOMEM
+ depends on !(ALPHA || PARISC)
+ help
+ Support for Silvaco I3C Dual-Role Master Controller.
+
config MIPI_I3C_HCI
tristate "MIPI I3C Host Controller Interface driver (EXPERIMENTAL)"
depends on I3C
+ depends on HAS_IOMEM
help
Support for hardware following the MIPI Aliance's I3C Host Controller
Interface specification.
diff --git a/drivers/i3c/master/Makefile b/drivers/i3c/master/Makefile
index b892fd4cafad..b3fee0f690b2 100644
--- a/drivers/i3c/master/Makefile
+++ b/drivers/i3c/master/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_CDNS_I3C_MASTER) += i3c-master-cdns.o
obj-$(CONFIG_DW_I3C_MASTER) += dw-i3c-master.o
+obj-$(CONFIG_SVC_I3C_MASTER) += svc-i3c-master.o
obj-$(CONFIG_MIPI_I3C_HCI) += mipi-i3c-hci/
diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c
index 8513bd353c05..03a368da51b9 100644
--- a/drivers/i3c/master/dw-i3c-master.c
+++ b/drivers/i3c/master/dw-i3c-master.c
@@ -816,11 +816,6 @@ static int dw_i3c_master_daa(struct i3c_master_controller *m)
dw_i3c_master_free_xfer(xfer);
- i3c_master_disec_locked(m, I3C_BROADCAST_ADDR,
- I3C_CCC_EVENT_HJ |
- I3C_CCC_EVENT_MR |
- I3C_CCC_EVENT_SIR);
-
return 0;
}
diff --git a/drivers/i3c/master/mipi-i3c-hci/core.c b/drivers/i3c/master/mipi-i3c-hci/core.c
index 500abd27fb22..1b73647cc3b1 100644
--- a/drivers/i3c/master/mipi-i3c-hci/core.c
+++ b/drivers/i3c/master/mipi-i3c-hci/core.c
@@ -777,7 +777,7 @@ static int i3c_hci_remove(struct platform_device *pdev)
return 0;
}
-static const struct __maybe_unused of_device_id i3c_hci_of_match[] = {
+static const __maybe_unused struct of_device_id i3c_hci_of_match[] = {
{ .compatible = "mipi-i3c-hci", },
{},
};
diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c
new file mode 100644
index 000000000000..8d990696676e
--- /dev/null
+++ b/drivers/i3c/master/svc-i3c-master.c
@@ -0,0 +1,1478 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Silvaco dual-role I3C master driver
+ *
+ * Copyright (C) 2020 Silvaco
+ * Author: Miquel RAYNAL <miquel.raynal@bootlin.com>
+ * Based on a work from: Conor Culhane <conor.culhane@silvaco.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/errno.h>
+#include <linux/i3c/master.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+/* Master Mode Registers */
+#define SVC_I3C_MCONFIG 0x000
+#define SVC_I3C_MCONFIG_MASTER_EN BIT(0)
+#define SVC_I3C_MCONFIG_DISTO(x) FIELD_PREP(BIT(3), (x))
+#define SVC_I3C_MCONFIG_HKEEP(x) FIELD_PREP(GENMASK(5, 4), (x))
+#define SVC_I3C_MCONFIG_ODSTOP(x) FIELD_PREP(BIT(6), (x))
+#define SVC_I3C_MCONFIG_PPBAUD(x) FIELD_PREP(GENMASK(11, 8), (x))
+#define SVC_I3C_MCONFIG_PPLOW(x) FIELD_PREP(GENMASK(15, 12), (x))
+#define SVC_I3C_MCONFIG_ODBAUD(x) FIELD_PREP(GENMASK(23, 16), (x))
+#define SVC_I3C_MCONFIG_ODHPP(x) FIELD_PREP(BIT(24), (x))
+#define SVC_I3C_MCONFIG_SKEW(x) FIELD_PREP(GENMASK(27, 25), (x))
+#define SVC_I3C_MCONFIG_I2CBAUD(x) FIELD_PREP(GENMASK(31, 28), (x))
+
+#define SVC_I3C_MCTRL 0x084
+#define SVC_I3C_MCTRL_REQUEST_MASK GENMASK(2, 0)
+#define SVC_I3C_MCTRL_REQUEST_NONE 0
+#define SVC_I3C_MCTRL_REQUEST_START_ADDR 1
+#define SVC_I3C_MCTRL_REQUEST_STOP 2
+#define SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK 3
+#define SVC_I3C_MCTRL_REQUEST_PROC_DAA 4
+#define SVC_I3C_MCTRL_REQUEST_AUTO_IBI 7
+#define SVC_I3C_MCTRL_TYPE_I3C 0
+#define SVC_I3C_MCTRL_TYPE_I2C BIT(4)
+#define SVC_I3C_MCTRL_IBIRESP_AUTO 0
+#define SVC_I3C_MCTRL_IBIRESP_ACK_WITHOUT_BYTE 0
+#define SVC_I3C_MCTRL_IBIRESP_ACK_WITH_BYTE BIT(7)
+#define SVC_I3C_MCTRL_IBIRESP_NACK BIT(6)
+#define SVC_I3C_MCTRL_IBIRESP_MANUAL GENMASK(7, 6)
+#define SVC_I3C_MCTRL_DIR(x) FIELD_PREP(BIT(8), (x))
+#define SVC_I3C_MCTRL_DIR_WRITE 0
+#define SVC_I3C_MCTRL_DIR_READ 1
+#define SVC_I3C_MCTRL_ADDR(x) FIELD_PREP(GENMASK(15, 9), (x))
+#define SVC_I3C_MCTRL_RDTERM(x) FIELD_PREP(GENMASK(23, 16), (x))
+
+#define SVC_I3C_MSTATUS 0x088
+#define SVC_I3C_MSTATUS_STATE(x) FIELD_GET(GENMASK(2, 0), (x))
+#define SVC_I3C_MSTATUS_STATE_DAA(x) (SVC_I3C_MSTATUS_STATE(x) == 5)
+#define SVC_I3C_MSTATUS_STATE_IDLE(x) (SVC_I3C_MSTATUS_STATE(x) == 0)
+#define SVC_I3C_MSTATUS_BETWEEN(x) FIELD_GET(BIT(4), (x))
+#define SVC_I3C_MSTATUS_NACKED(x) FIELD_GET(BIT(5), (x))
+#define SVC_I3C_MSTATUS_IBITYPE(x) FIELD_GET(GENMASK(7, 6), (x))
+#define SVC_I3C_MSTATUS_IBITYPE_IBI 1
+#define SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST 2
+#define SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN 3
+#define SVC_I3C_MINT_SLVSTART BIT(8)
+#define SVC_I3C_MINT_MCTRLDONE BIT(9)
+#define SVC_I3C_MINT_COMPLETE BIT(10)
+#define SVC_I3C_MINT_RXPEND BIT(11)
+#define SVC_I3C_MINT_TXNOTFULL BIT(12)
+#define SVC_I3C_MINT_IBIWON BIT(13)
+#define SVC_I3C_MINT_ERRWARN BIT(15)
+#define SVC_I3C_MSTATUS_SLVSTART(x) FIELD_GET(SVC_I3C_MINT_SLVSTART, (x))
+#define SVC_I3C_MSTATUS_MCTRLDONE(x) FIELD_GET(SVC_I3C_MINT_MCTRLDONE, (x))
+#define SVC_I3C_MSTATUS_COMPLETE(x) FIELD_GET(SVC_I3C_MINT_COMPLETE, (x))
+#define SVC_I3C_MSTATUS_RXPEND(x) FIELD_GET(SVC_I3C_MINT_RXPEND, (x))
+#define SVC_I3C_MSTATUS_TXNOTFULL(x) FIELD_GET(SVC_I3C_MINT_TXNOTFULL, (x))
+#define SVC_I3C_MSTATUS_IBIWON(x) FIELD_GET(SVC_I3C_MINT_IBIWON, (x))
+#define SVC_I3C_MSTATUS_ERRWARN(x) FIELD_GET(SVC_I3C_MINT_ERRWARN, (x))
+#define SVC_I3C_MSTATUS_IBIADDR(x) FIELD_GET(GENMASK(30, 24), (x))
+
+#define SVC_I3C_IBIRULES 0x08C
+#define SVC_I3C_IBIRULES_ADDR(slot, addr) FIELD_PREP(GENMASK(29, 0), \
+ ((addr) & 0x3F) << ((slot) * 6))
+#define SVC_I3C_IBIRULES_ADDRS 5
+#define SVC_I3C_IBIRULES_MSB0 BIT(30)
+#define SVC_I3C_IBIRULES_NOBYTE BIT(31)
+#define SVC_I3C_IBIRULES_MANDBYTE 0
+#define SVC_I3C_MINTSET 0x090
+#define SVC_I3C_MINTCLR 0x094
+#define SVC_I3C_MINTMASKED 0x098
+#define SVC_I3C_MERRWARN 0x09C
+#define SVC_I3C_MDMACTRL 0x0A0
+#define SVC_I3C_MDATACTRL 0x0AC
+#define SVC_I3C_MDATACTRL_FLUSHTB BIT(0)
+#define SVC_I3C_MDATACTRL_FLUSHRB BIT(1)
+#define SVC_I3C_MDATACTRL_UNLOCK_TRIG BIT(3)
+#define SVC_I3C_MDATACTRL_TXTRIG_FIFO_NOT_FULL GENMASK(5, 4)
+#define SVC_I3C_MDATACTRL_RXTRIG_FIFO_NOT_EMPTY 0
+#define SVC_I3C_MDATACTRL_RXCOUNT(x) FIELD_GET(GENMASK(28, 24), (x))
+#define SVC_I3C_MDATACTRL_TXFULL BIT(30)
+#define SVC_I3C_MDATACTRL_RXEMPTY BIT(31)
+
+#define SVC_I3C_MWDATAB 0x0B0
+#define SVC_I3C_MWDATAB_END BIT(8)
+
+#define SVC_I3C_MWDATABE 0x0B4
+#define SVC_I3C_MWDATAH 0x0B8
+#define SVC_I3C_MWDATAHE 0x0BC
+#define SVC_I3C_MRDATAB 0x0C0
+#define SVC_I3C_MRDATAH 0x0C8
+#define SVC_I3C_MWMSG_SDR 0x0D0
+#define SVC_I3C_MRMSG_SDR 0x0D4
+#define SVC_I3C_MWMSG_DDR 0x0D8
+#define SVC_I3C_MRMSG_DDR 0x0DC
+
+#define SVC_I3C_MDYNADDR 0x0E4
+#define SVC_MDYNADDR_VALID BIT(0)
+#define SVC_MDYNADDR_ADDR(x) FIELD_PREP(GENMASK(7, 1), (x))
+
+#define SVC_I3C_MAX_DEVS 32
+
+/* This parameter depends on the implementation and may be tuned */
+#define SVC_I3C_FIFO_SIZE 16
+
+struct svc_i3c_cmd {
+ u8 addr;
+ bool rnw;
+ u8 *in;
+ const void *out;
+ unsigned int len;
+ unsigned int read_len;
+ bool continued;
+};
+
+struct svc_i3c_xfer {
+ struct list_head node;
+ struct completion comp;
+ int ret;
+ unsigned int type;
+ unsigned int ncmds;
+ struct svc_i3c_cmd cmds[];
+};
+
+/**
+ * struct svc_i3c_master - Silvaco I3C Master structure
+ * @base: I3C master controller
+ * @dev: Corresponding device
+ * @regs: Memory mapping
+ * @free_slots: Bit array of available slots
+ * @addrs: Array containing the dynamic addresses of each attached device
+ * @descs: Array of descriptors, one per attached device
+ * @hj_work: Hot-join work
+ * @ibi_work: IBI work
+ * @irq: Main interrupt
+ * @pclk: System clock
+ * @fclk: Fast clock (bus)
+ * @sclk: Slow clock (other events)
+ * @xferqueue: Transfer queue structure
+ * @xferqueue.list: List member
+ * @xferqueue.cur: Current ongoing transfer
+ * @xferqueue.lock: Queue lock
+ * @ibi: IBI structure
+ * @ibi.num_slots: Number of slots available in @ibi.slots
+ * @ibi.slots: Available IBI slots
+ * @ibi.tbq_slot: To be queued IBI slot
+ * @ibi.lock: IBI lock
+ */
+struct svc_i3c_master {
+ struct i3c_master_controller base;
+ struct device *dev;
+ void __iomem *regs;
+ u32 free_slots;
+ u8 addrs[SVC_I3C_MAX_DEVS];
+ struct i3c_dev_desc *descs[SVC_I3C_MAX_DEVS];
+ struct work_struct hj_work;
+ struct work_struct ibi_work;
+ int irq;
+ struct clk *pclk;
+ struct clk *fclk;
+ struct clk *sclk;
+ struct {
+ struct list_head list;
+ struct svc_i3c_xfer *cur;
+ /* Prevent races between transfers */
+ spinlock_t lock;
+ } xferqueue;
+ struct {
+ unsigned int num_slots;
+ struct i3c_dev_desc **slots;
+ struct i3c_ibi_slot *tbq_slot;
+ /* Prevent races within IBI handlers */
+ spinlock_t lock;
+ } ibi;
+};
+
+/**
+ * struct svc_i3c_i3c_dev_data - Device specific data
+ * @index: Index in the master tables corresponding to this device
+ * @ibi: IBI slot index in the master structure
+ * @ibi_pool: IBI pool associated to this device
+ */
+struct svc_i3c_i2c_dev_data {
+ u8 index;
+ int ibi;
+ struct i3c_generic_ibi_pool *ibi_pool;
+};
+
+static bool svc_i3c_master_error(struct svc_i3c_master *master)
+{
+ u32 mstatus, merrwarn;
+
+ mstatus = readl(master->regs + SVC_I3C_MSTATUS);
+ if (SVC_I3C_MSTATUS_ERRWARN(mstatus)) {
+ merrwarn = readl(master->regs + SVC_I3C_MERRWARN);
+ writel(merrwarn, master->regs + SVC_I3C_MERRWARN);
+ dev_err(master->dev,
+ "Error condition: MSTATUS 0x%08x, MERRWARN 0x%08x\n",
+ mstatus, merrwarn);
+
+ return true;
+ }
+
+ return false;
+}
+
+static void svc_i3c_master_enable_interrupts(struct svc_i3c_master *master, u32 mask)
+{
+ writel(mask, master->regs + SVC_I3C_MINTSET);
+}
+
+static void svc_i3c_master_disable_interrupts(struct svc_i3c_master *master)
+{
+ u32 mask = readl(master->regs + SVC_I3C_MINTSET);
+
+ writel(mask, master->regs + SVC_I3C_MINTCLR);
+}
+
+static inline struct svc_i3c_master *
+to_svc_i3c_master(struct i3c_master_controller *master)
+{
+ return container_of(master, struct svc_i3c_master, base);
+}
+
+static void svc_i3c_master_hj_work(struct work_struct *work)
+{
+ struct svc_i3c_master *master;
+
+ master = container_of(work, struct svc_i3c_master, hj_work);
+ i3c_master_do_daa(&master->base);
+}
+
+static struct i3c_dev_desc *
+svc_i3c_master_dev_from_addr(struct svc_i3c_master *master,
+ unsigned int ibiaddr)
+{
+ int i;
+
+ for (i = 0; i < SVC_I3C_MAX_DEVS; i++)
+ if (master->addrs[i] == ibiaddr)
+ break;
+
+ if (i == SVC_I3C_MAX_DEVS)
+ return NULL;
+
+ return master->descs[i];
+}
+
+static void svc_i3c_master_emit_stop(struct svc_i3c_master *master)
+{
+ writel(SVC_I3C_MCTRL_REQUEST_STOP, master->regs + SVC_I3C_MCTRL);
+
+ /*
+ * This delay is necessary after the emission of a stop, otherwise eg.
+ * repeating IBIs do not get detected. There is a note in the manual
+ * about it, stating that the stop condition might not be settled
+ * correctly if a start condition follows too rapidly.
+ */
+ udelay(1);
+}
+
+static void svc_i3c_master_clear_merrwarn(struct svc_i3c_master *master)
+{
+ writel(readl(master->regs + SVC_I3C_MERRWARN),
+ master->regs + SVC_I3C_MERRWARN);
+}
+
+static int svc_i3c_master_handle_ibi(struct svc_i3c_master *master,
+ struct i3c_dev_desc *dev)
+{
+ struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+ struct i3c_ibi_slot *slot;
+ unsigned int count;
+ u32 mdatactrl;
+ u8 *buf;
+
+ slot = i3c_generic_ibi_get_free_slot(data->ibi_pool);
+ if (!slot)
+ return -ENOSPC;
+
+ slot->len = 0;
+ buf = slot->data;
+
+ while (SVC_I3C_MSTATUS_RXPEND(readl(master->regs + SVC_I3C_MSTATUS)) &&
+ slot->len < SVC_I3C_FIFO_SIZE) {
+ mdatactrl = readl(master->regs + SVC_I3C_MDATACTRL);
+ count = SVC_I3C_MDATACTRL_RXCOUNT(mdatactrl);
+ readsl(master->regs + SVC_I3C_MRDATAB, buf, count);
+ slot->len += count;
+ buf += count;
+ }
+
+ master->ibi.tbq_slot = slot;
+
+ return 0;
+}
+
+static void svc_i3c_master_ack_ibi(struct svc_i3c_master *master,
+ bool mandatory_byte)
+{
+ unsigned int ibi_ack_nack;
+
+ ibi_ack_nack = SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK;
+ if (mandatory_byte)
+ ibi_ack_nack |= SVC_I3C_MCTRL_IBIRESP_ACK_WITH_BYTE;
+ else
+ ibi_ack_nack |= SVC_I3C_MCTRL_IBIRESP_ACK_WITHOUT_BYTE;
+
+ writel(ibi_ack_nack, master->regs + SVC_I3C_MCTRL);
+}
+
+static void svc_i3c_master_nack_ibi(struct svc_i3c_master *master)
+{
+ writel(SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK |
+ SVC_I3C_MCTRL_IBIRESP_NACK,
+ master->regs + SVC_I3C_MCTRL);
+}
+
+static void svc_i3c_master_ibi_work(struct work_struct *work)
+{
+ struct svc_i3c_master *master = container_of(work, struct svc_i3c_master, ibi_work);
+ struct svc_i3c_i2c_dev_data *data;
+ unsigned int ibitype, ibiaddr;
+ struct i3c_dev_desc *dev;
+ u32 status, val;
+ int ret;
+
+ /* Acknowledge the incoming interrupt with the AUTOIBI mechanism */
+ writel(SVC_I3C_MCTRL_REQUEST_AUTO_IBI |
+ SVC_I3C_MCTRL_IBIRESP_AUTO,
+ master->regs + SVC_I3C_MCTRL);
+
+ /* Wait for IBIWON, should take approximately 100us */
+ ret = readl_relaxed_poll_timeout(master->regs + SVC_I3C_MSTATUS, val,
+ SVC_I3C_MSTATUS_IBIWON(val), 0, 1000);
+ if (ret) {
+ dev_err(master->dev, "Timeout when polling for IBIWON\n");
+ goto reenable_ibis;
+ }
+
+ /* Clear the interrupt status */
+ writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
+
+ status = readl(master->regs + SVC_I3C_MSTATUS);
+ ibitype = SVC_I3C_MSTATUS_IBITYPE(status);
+ ibiaddr = SVC_I3C_MSTATUS_IBIADDR(status);
+
+ /* Handle the critical responses to IBI's */
+ switch (ibitype) {
+ case SVC_I3C_MSTATUS_IBITYPE_IBI:
+ dev = svc_i3c_master_dev_from_addr(master, ibiaddr);
+ if (!dev)
+ svc_i3c_master_nack_ibi(master);
+ else
+ svc_i3c_master_handle_ibi(master, dev);
+ break;
+ case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN:
+ svc_i3c_master_ack_ibi(master, false);
+ break;
+ case SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST:
+ svc_i3c_master_nack_ibi(master);
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * If an error happened, we probably got interrupted and the exchange
+ * timedout. In this case we just drop everything, emit a stop and wait
+ * for the slave to interrupt again.
+ */
+ if (svc_i3c_master_error(master)) {
+ if (master->ibi.tbq_slot) {
+ data = i3c_dev_get_master_data(dev);
+ i3c_generic_ibi_recycle_slot(data->ibi_pool,
+ master->ibi.tbq_slot);
+ master->ibi.tbq_slot = NULL;
+ }
+
+ svc_i3c_master_emit_stop(master);
+
+ goto reenable_ibis;
+ }
+
+ /* Handle the non critical tasks */
+ switch (ibitype) {
+ case SVC_I3C_MSTATUS_IBITYPE_IBI:
+ if (dev) {
+ i3c_master_queue_ibi(dev, master->ibi.tbq_slot);
+ master->ibi.tbq_slot = NULL;
+ }
+ svc_i3c_master_emit_stop(master);
+ break;
+ case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN:
+ queue_work(master->base.wq, &master->hj_work);
+ break;
+ case SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST:
+ default:
+ break;
+ }
+
+reenable_ibis:
+ svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
+}
+
+static irqreturn_t svc_i3c_master_irq_handler(int irq, void *dev_id)
+{
+ struct svc_i3c_master *master = (struct svc_i3c_master *)dev_id;
+ u32 active = readl(master->regs + SVC_I3C_MINTMASKED);
+
+ if (!SVC_I3C_MSTATUS_SLVSTART(active))
+ return IRQ_NONE;
+
+ /* Clear the interrupt status */
+ writel(SVC_I3C_MINT_SLVSTART, master->regs + SVC_I3C_MSTATUS);
+
+ svc_i3c_master_disable_interrupts(master);
+
+ /* Handle the interrupt in a non atomic context */
+ queue_work(master->base.wq, &master->ibi_work);
+
+ return IRQ_HANDLED;
+}
+
+static int svc_i3c_master_bus_init(struct i3c_master_controller *m)
+{
+ struct svc_i3c_master *master = to_svc_i3c_master(m);
+ struct i3c_bus *bus = i3c_master_get_bus(m);
+ struct i3c_device_info info = {};
+ unsigned long fclk_rate, fclk_period_ns;
+ unsigned int high_period_ns, od_low_period_ns;
+ u32 ppbaud, pplow, odhpp, odbaud, i2cbaud, reg;
+ int ret;
+
+ /* Timings derivation */
+ fclk_rate = clk_get_rate(master->fclk);
+ if (!fclk_rate)
+ return -EINVAL;
+
+ fclk_period_ns = DIV_ROUND_UP(1000000000, fclk_rate);
+
+ /*
+ * Using I3C Push-Pull mode, target is 12.5MHz/80ns period.
+ * Simplest configuration is using a 50% duty-cycle of 40ns.
+ */
+ ppbaud = DIV_ROUND_UP(40, fclk_period_ns) - 1;
+ pplow = 0;
+
+ /*
+ * Using I3C Open-Drain mode, target is 4.17MHz/240ns with a
+ * duty-cycle tuned so that high levels are filetered out by
+ * the 50ns filter (target being 40ns).
+ */
+ odhpp = 1;
+ high_period_ns = (ppbaud + 1) * fclk_period_ns;
+ odbaud = DIV_ROUND_UP(240 - high_period_ns, high_period_ns) - 1;
+ od_low_period_ns = (odbaud + 1) * high_period_ns;
+
+ switch (bus->mode) {
+ case I3C_BUS_MODE_PURE:
+ i2cbaud = 0;
+ break;
+ case I3C_BUS_MODE_MIXED_FAST:
+ case I3C_BUS_MODE_MIXED_LIMITED:
+ /*
+ * Using I2C Fm+ mode, target is 1MHz/1000ns, the difference
+ * between the high and low period does not really matter.
+ */
+ i2cbaud = DIV_ROUND_UP(1000, od_low_period_ns) - 2;
+ break;
+ case I3C_BUS_MODE_MIXED_SLOW:
+ /*
+ * Using I2C Fm mode, target is 0.4MHz/2500ns, with the same
+ * constraints as the FM+ mode.
+ */
+ i2cbaud = DIV_ROUND_UP(2500, od_low_period_ns) - 2;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ reg = SVC_I3C_MCONFIG_MASTER_EN |
+ SVC_I3C_MCONFIG_DISTO(0) |
+ SVC_I3C_MCONFIG_HKEEP(0) |
+ SVC_I3C_MCONFIG_ODSTOP(0) |
+ SVC_I3C_MCONFIG_PPBAUD(ppbaud) |
+ SVC_I3C_MCONFIG_PPLOW(pplow) |
+ SVC_I3C_MCONFIG_ODBAUD(odbaud) |
+ SVC_I3C_MCONFIG_ODHPP(odhpp) |
+ SVC_I3C_MCONFIG_SKEW(0) |
+ SVC_I3C_MCONFIG_I2CBAUD(i2cbaud);
+ writel(reg, master->regs + SVC_I3C_MCONFIG);
+
+ /* Master core's registration */
+ ret = i3c_master_get_free_addr(m, 0);
+ if (ret < 0)
+ return ret;
+
+ info.dyn_addr = ret;
+
+ writel(SVC_MDYNADDR_VALID | SVC_MDYNADDR_ADDR(info.dyn_addr),
+ master->regs + SVC_I3C_MDYNADDR);
+
+ ret = i3c_master_set_info(&master->base, &info);
+ if (ret)
+ return ret;
+
+ svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
+
+ return 0;
+}
+
+static void svc_i3c_master_bus_cleanup(struct i3c_master_controller *m)
+{
+ struct svc_i3c_master *master = to_svc_i3c_master(m);
+
+ svc_i3c_master_disable_interrupts(master);
+
+ /* Disable master */
+ writel(0, master->regs + SVC_I3C_MCONFIG);
+}
+
+static int svc_i3c_master_reserve_slot(struct svc_i3c_master *master)
+{
+ unsigned int slot;
+
+ if (!(master->free_slots & GENMASK(SVC_I3C_MAX_DEVS - 1, 0)))
+ return -ENOSPC;
+
+ slot = ffs(master->free_slots) - 1;
+
+ master->free_slots &= ~BIT(slot);
+
+ return slot;
+}
+
+static void svc_i3c_master_release_slot(struct svc_i3c_master *master,
+ unsigned int slot)
+{
+ master->free_slots |= BIT(slot);
+}
+
+static int svc_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct svc_i3c_master *master = to_svc_i3c_master(m);
+ struct svc_i3c_i2c_dev_data *data;
+ int slot;
+
+ slot = svc_i3c_master_reserve_slot(master);
+ if (slot < 0)
+ return slot;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ svc_i3c_master_release_slot(master, slot);
+ return -ENOMEM;
+ }
+
+ data->ibi = -1;
+ data->index = slot;
+ master->addrs[slot] = dev->info.dyn_addr ? dev->info.dyn_addr :
+ dev->info.static_addr;
+ master->descs[slot] = dev;
+
+ i3c_dev_set_master_data(dev, data);
+
+ return 0;
+}
+
+static int svc_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev,
+ u8 old_dyn_addr)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct svc_i3c_master *master = to_svc_i3c_master(m);
+ struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+
+ master->addrs[data->index] = dev->info.dyn_addr ? dev->info.dyn_addr :
+ dev->info.static_addr;
+
+ return 0;
+}
+
+static void svc_i3c_master_detach_i3c_dev(struct i3c_dev_desc *dev)
+{
+ struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct svc_i3c_master *master = to_svc_i3c_master(m);
+
+ master->addrs[data->index] = 0;
+ svc_i3c_master_release_slot(master, data->index);
+
+ kfree(data);
+}
+
+static int svc_i3c_master_attach_i2c_dev(struct i2c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i2c_dev_get_master(dev);
+ struct svc_i3c_master *master = to_svc_i3c_master(m);
+ struct svc_i3c_i2c_dev_data *data;
+ int slot;
+
+ slot = svc_i3c_master_reserve_slot(master);
+ if (slot < 0)
+ return slot;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ svc_i3c_master_release_slot(master, slot);
+ return -ENOMEM;
+ }
+
+ data->index = slot;
+ master->addrs[slot] = dev->addr;
+
+ i2c_dev_set_master_data(dev, data);
+
+ return 0;
+}
+
+static void svc_i3c_master_detach_i2c_dev(struct i2c_dev_desc *dev)
+{
+ struct svc_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
+ struct i3c_master_controller *m = i2c_dev_get_master(dev);
+ struct svc_i3c_master *master = to_svc_i3c_master(m);
+
+ svc_i3c_master_release_slot(master, data->index);
+
+ kfree(data);
+}
+
+static int svc_i3c_master_readb(struct svc_i3c_master *master, u8 *dst,
+ unsigned int len)
+{
+ int ret, i;
+ u32 reg;
+
+ for (i = 0; i < len; i++) {
+ ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
+ SVC_I3C_MSTATUS_RXPEND(reg), 0, 1000);
+ if (ret)
+ return ret;
+
+ dst[i] = readl(master->regs + SVC_I3C_MRDATAB);
+ }
+
+ return 0;
+}
+
+static int svc_i3c_master_do_daa_locked(struct svc_i3c_master *master,
+ u8 *addrs, unsigned int *count)
+{
+ u64 prov_id[SVC_I3C_MAX_DEVS] = {}, nacking_prov_id = 0;
+ unsigned int dev_nb = 0, last_addr = 0;
+ u32 reg;
+ int ret, i;
+
+ while (true) {
+ /* Enter/proceed with DAA */
+ writel(SVC_I3C_MCTRL_REQUEST_PROC_DAA |
+ SVC_I3C_MCTRL_TYPE_I3C |
+ SVC_I3C_MCTRL_IBIRESP_NACK |
+ SVC_I3C_MCTRL_DIR(SVC_I3C_MCTRL_DIR_WRITE),
+ master->regs + SVC_I3C_MCTRL);
+
+ /*
+ * Either one slave will send its ID, or the assignment process
+ * is done.
+ */
+ ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
+ SVC_I3C_MSTATUS_RXPEND(reg) |
+ SVC_I3C_MSTATUS_MCTRLDONE(reg),
+ 1, 1000);
+ if (ret)
+ return ret;
+
+ if (SVC_I3C_MSTATUS_RXPEND(reg)) {
+ u8 data[6];
+
+ /*
+ * We only care about the 48-bit provisional ID yet to
+ * be sure a device does not nack an address twice.
+ * Otherwise, we would just need to flush the RX FIFO.
+ */
+ ret = svc_i3c_master_readb(master, data, 6);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < 6; i++)
+ prov_id[dev_nb] |= (u64)(data[i]) << (8 * (5 - i));
+
+ /* We do not care about the BCR and DCR yet */
+ ret = svc_i3c_master_readb(master, data, 2);
+ if (ret)
+ return ret;
+ } else if (SVC_I3C_MSTATUS_MCTRLDONE(reg)) {
+ if (SVC_I3C_MSTATUS_STATE_IDLE(reg) &&
+ SVC_I3C_MSTATUS_COMPLETE(reg)) {
+ /*
+ * All devices received and acked they dynamic
+ * address, this is the natural end of the DAA
+ * procedure.
+ */
+ break;
+ } else if (SVC_I3C_MSTATUS_NACKED(reg)) {
+ /*
+ * A slave device nacked the address, this is
+ * allowed only once, DAA will be stopped and
+ * then resumed. The same device is supposed to
+ * answer again immediately and shall ack the
+ * address this time.
+ */
+ if (prov_id[dev_nb] == nacking_prov_id)
+ return -EIO;
+
+ dev_nb--;
+ nacking_prov_id = prov_id[dev_nb];
+ svc_i3c_master_emit_stop(master);
+
+ continue;
+ } else {
+ return -EIO;
+ }
+ }
+
+ /* Wait for the slave to be ready to receive its address */
+ ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
+ SVC_I3C_MSTATUS_MCTRLDONE(reg) &&
+ SVC_I3C_MSTATUS_STATE_DAA(reg) &&
+ SVC_I3C_MSTATUS_BETWEEN(reg),
+ 0, 1000);
+ if (ret)
+ return ret;
+
+ /* Give the slave device a suitable dynamic address */
+ ret = i3c_master_get_free_addr(&master->base, last_addr + 1);
+ if (ret < 0)
+ return ret;
+
+ addrs[dev_nb] = ret;
+ dev_dbg(master->dev, "DAA: device %d assigned to 0x%02x\n",
+ dev_nb, addrs[dev_nb]);
+
+ writel(addrs[dev_nb], master->regs + SVC_I3C_MWDATAB);
+ last_addr = addrs[dev_nb++];
+ }
+
+ *count = dev_nb;
+
+ return 0;
+}
+
+static int svc_i3c_update_ibirules(struct svc_i3c_master *master)
+{
+ struct i3c_dev_desc *dev;
+ u32 reg_mbyte = 0, reg_nobyte = SVC_I3C_IBIRULES_NOBYTE;
+ unsigned int mbyte_addr_ok = 0, mbyte_addr_ko = 0, nobyte_addr_ok = 0,
+ nobyte_addr_ko = 0;
+ bool list_mbyte = false, list_nobyte = false;
+
+ /* Create the IBIRULES register for both cases */
+ i3c_bus_for_each_i3cdev(&master->base.bus, dev) {
+ if (I3C_BCR_DEVICE_ROLE(dev->info.bcr) == I3C_BCR_I3C_MASTER)
+ continue;
+
+ if (dev->info.bcr & I3C_BCR_IBI_PAYLOAD) {
+ reg_mbyte |= SVC_I3C_IBIRULES_ADDR(mbyte_addr_ok,
+ dev->info.dyn_addr);
+
+ /* IBI rules cannot be applied to devices with MSb=1 */
+ if (dev->info.dyn_addr & BIT(7))
+ mbyte_addr_ko++;
+ else
+ mbyte_addr_ok++;
+ } else {
+ reg_nobyte |= SVC_I3C_IBIRULES_ADDR(nobyte_addr_ok,
+ dev->info.dyn_addr);
+
+ /* IBI rules cannot be applied to devices with MSb=1 */
+ if (dev->info.dyn_addr & BIT(7))
+ nobyte_addr_ko++;
+ else
+ nobyte_addr_ok++;
+ }
+ }
+
+ /* Device list cannot be handled by hardware */
+ if (!mbyte_addr_ko && mbyte_addr_ok <= SVC_I3C_IBIRULES_ADDRS)
+ list_mbyte = true;
+
+ if (!nobyte_addr_ko && nobyte_addr_ok <= SVC_I3C_IBIRULES_ADDRS)
+ list_nobyte = true;
+
+ /* No list can be properly handled, return an error */
+ if (!list_mbyte && !list_nobyte)
+ return -ERANGE;
+
+ /* Pick the first list that can be handled by hardware, randomly */
+ if (list_mbyte)
+ writel(reg_mbyte, master->regs + SVC_I3C_IBIRULES);
+ else
+ writel(reg_nobyte, master->regs + SVC_I3C_IBIRULES);
+
+ return 0;
+}
+
+static int svc_i3c_master_do_daa(struct i3c_master_controller *m)
+{
+ struct svc_i3c_master *master = to_svc_i3c_master(m);
+ u8 addrs[SVC_I3C_MAX_DEVS];
+ unsigned long flags;
+ unsigned int dev_nb;
+ int ret, i;
+
+ spin_lock_irqsave(&master->xferqueue.lock, flags);
+ ret = svc_i3c_master_do_daa_locked(master, addrs, &dev_nb);
+ spin_unlock_irqrestore(&master->xferqueue.lock, flags);
+ if (ret)
+ goto emit_stop;
+
+ /* Register all devices who participated to the core */
+ for (i = 0; i < dev_nb; i++) {
+ ret = i3c_master_add_i3c_dev_locked(m, addrs[i]);
+ if (ret)
+ return ret;
+ }
+
+ /* Configure IBI auto-rules */
+ ret = svc_i3c_update_ibirules(master);
+ if (ret) {
+ dev_err(master->dev, "Cannot handle such a list of devices");
+ return ret;
+ }
+
+ return 0;
+
+emit_stop:
+ svc_i3c_master_emit_stop(master);
+ svc_i3c_master_clear_merrwarn(master);
+
+ return ret;
+}
+
+static int svc_i3c_master_read(struct svc_i3c_master *master,
+ u8 *in, unsigned int len)
+{
+ int offset = 0, i, ret;
+ u32 mdctrl;
+
+ while (offset < len) {
+ unsigned int count;
+
+ ret = readl_poll_timeout(master->regs + SVC_I3C_MDATACTRL,
+ mdctrl,
+ !(mdctrl & SVC_I3C_MDATACTRL_RXEMPTY),
+ 0, 1000);
+ if (ret)
+ return ret;
+
+ count = SVC_I3C_MDATACTRL_RXCOUNT(mdctrl);
+ for (i = 0; i < count; i++)
+ in[offset + i] = readl(master->regs + SVC_I3C_MRDATAB);
+
+ offset += count;
+ }
+
+ return 0;
+}
+
+static int svc_i3c_master_write(struct svc_i3c_master *master,
+ const u8 *out, unsigned int len)
+{
+ int offset = 0, ret;
+ u32 mdctrl;
+
+ while (offset < len) {
+ ret = readl_poll_timeout(master->regs + SVC_I3C_MDATACTRL,
+ mdctrl,
+ !(mdctrl & SVC_I3C_MDATACTRL_TXFULL),
+ 0, 1000);
+ if (ret)
+ return ret;
+
+ /*
+ * The last byte to be sent over the bus must either have the
+ * "end" bit set or be written in MWDATABE.
+ */
+ if (likely(offset < (len - 1)))
+ writel(out[offset++], master->regs + SVC_I3C_MWDATAB);
+ else
+ writel(out[offset++], master->regs + SVC_I3C_MWDATABE);
+ }
+
+ return 0;
+}
+
+static int svc_i3c_master_xfer(struct svc_i3c_master *master,
+ bool rnw, unsigned int xfer_type, u8 addr,
+ u8 *in, const u8 *out, unsigned int xfer_len,
+ unsigned int read_len, bool continued)
+{
+ u32 reg;
+ int ret;
+
+ writel(SVC_I3C_MCTRL_REQUEST_START_ADDR |
+ xfer_type |
+ SVC_I3C_MCTRL_IBIRESP_NACK |
+ SVC_I3C_MCTRL_DIR(rnw) |
+ SVC_I3C_MCTRL_ADDR(addr) |
+ SVC_I3C_MCTRL_RDTERM(read_len),
+ master->regs + SVC_I3C_MCTRL);
+
+ ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
+ SVC_I3C_MSTATUS_MCTRLDONE(reg), 0, 1000);
+ if (ret)
+ goto emit_stop;
+
+ if (rnw)
+ ret = svc_i3c_master_read(master, in, xfer_len);
+ else
+ ret = svc_i3c_master_write(master, out, xfer_len);
+ if (ret)
+ goto emit_stop;
+
+ ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
+ SVC_I3C_MSTATUS_COMPLETE(reg), 0, 1000);
+ if (ret)
+ goto emit_stop;
+
+ if (!continued)
+ svc_i3c_master_emit_stop(master);
+
+ return 0;
+
+emit_stop:
+ svc_i3c_master_emit_stop(master);
+ svc_i3c_master_clear_merrwarn(master);
+
+ return ret;
+}
+
+static struct svc_i3c_xfer *
+svc_i3c_master_alloc_xfer(struct svc_i3c_master *master, unsigned int ncmds)
+{
+ struct svc_i3c_xfer *xfer;
+
+ xfer = kzalloc(struct_size(xfer, cmds, ncmds), GFP_KERNEL);
+ if (!xfer)
+ return NULL;
+
+ INIT_LIST_HEAD(&xfer->node);
+ xfer->ncmds = ncmds;
+ xfer->ret = -ETIMEDOUT;
+
+ return xfer;
+}
+
+static void svc_i3c_master_free_xfer(struct svc_i3c_xfer *xfer)
+{
+ kfree(xfer);
+}
+
+static void svc_i3c_master_dequeue_xfer_locked(struct svc_i3c_master *master,
+ struct svc_i3c_xfer *xfer)
+{
+ if (master->xferqueue.cur == xfer)
+ master->xferqueue.cur = NULL;
+ else
+ list_del_init(&xfer->node);
+}
+
+static void svc_i3c_master_dequeue_xfer(struct svc_i3c_master *master,
+ struct svc_i3c_xfer *xfer)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&master->xferqueue.lock, flags);
+ svc_i3c_master_dequeue_xfer_locked(master, xfer);
+ spin_unlock_irqrestore(&master->xferqueue.lock, flags);
+}
+
+static void svc_i3c_master_start_xfer_locked(struct svc_i3c_master *master)
+{
+ struct svc_i3c_xfer *xfer = master->xferqueue.cur;
+ int ret, i;
+
+ if (!xfer)
+ return;
+
+ for (i = 0; i < xfer->ncmds; i++) {
+ struct svc_i3c_cmd *cmd = &xfer->cmds[i];
+
+ ret = svc_i3c_master_xfer(master, cmd->rnw, xfer->type,
+ cmd->addr, cmd->in, cmd->out,
+ cmd->len, cmd->read_len,
+ cmd->continued);
+ if (ret)
+ break;
+ }
+
+ xfer->ret = ret;
+ complete(&xfer->comp);
+
+ if (ret < 0)
+ svc_i3c_master_dequeue_xfer_locked(master, xfer);
+
+ xfer = list_first_entry_or_null(&master->xferqueue.list,
+ struct svc_i3c_xfer,
+ node);
+ if (xfer)
+ list_del_init(&xfer->node);
+
+ master->xferqueue.cur = xfer;
+ svc_i3c_master_start_xfer_locked(master);
+}
+
+static void svc_i3c_master_enqueue_xfer(struct svc_i3c_master *master,
+ struct svc_i3c_xfer *xfer)
+{
+ unsigned long flags;
+
+ init_completion(&xfer->comp);
+ spin_lock_irqsave(&master->xferqueue.lock, flags);
+ if (master->xferqueue.cur) {
+ list_add_tail(&xfer->node, &master->xferqueue.list);
+ } else {
+ master->xferqueue.cur = xfer;
+ svc_i3c_master_start_xfer_locked(master);
+ }
+ spin_unlock_irqrestore(&master->xferqueue.lock, flags);
+}
+
+static bool
+svc_i3c_master_supports_ccc_cmd(struct i3c_master_controller *master,
+ const struct i3c_ccc_cmd *cmd)
+{
+ /* No software support for CCC commands targeting more than one slave */
+ return (cmd->ndests == 1);
+}
+
+static int svc_i3c_master_send_bdcast_ccc_cmd(struct svc_i3c_master *master,
+ struct i3c_ccc_cmd *ccc)
+{
+ unsigned int xfer_len = ccc->dests[0].payload.len + 1;
+ struct svc_i3c_xfer *xfer;
+ struct svc_i3c_cmd *cmd;
+ u8 *buf;
+ int ret;
+
+ xfer = svc_i3c_master_alloc_xfer(master, 1);
+ if (!xfer)
+ return -ENOMEM;
+
+ buf = kmalloc(xfer_len, GFP_KERNEL);
+ if (!buf) {
+ svc_i3c_master_free_xfer(xfer);
+ return -ENOMEM;
+ }
+
+ buf[0] = ccc->id;
+ memcpy(&buf[1], ccc->dests[0].payload.data, ccc->dests[0].payload.len);
+
+ xfer->type = SVC_I3C_MCTRL_TYPE_I3C;
+
+ cmd = &xfer->cmds[0];
+ cmd->addr = ccc->dests[0].addr;
+ cmd->rnw = ccc->rnw;
+ cmd->in = NULL;
+ cmd->out = buf;
+ cmd->len = xfer_len;
+ cmd->read_len = 0;
+ cmd->continued = false;
+
+ svc_i3c_master_enqueue_xfer(master, xfer);
+ if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
+ svc_i3c_master_dequeue_xfer(master, xfer);
+
+ ret = xfer->ret;
+ kfree(buf);
+ svc_i3c_master_free_xfer(xfer);
+
+ return ret;
+}
+
+static int svc_i3c_master_send_direct_ccc_cmd(struct svc_i3c_master *master,
+ struct i3c_ccc_cmd *ccc)
+{
+ unsigned int xfer_len = ccc->dests[0].payload.len;
+ unsigned int read_len = ccc->rnw ? xfer_len : 0;
+ struct svc_i3c_xfer *xfer;
+ struct svc_i3c_cmd *cmd;
+ int ret;
+
+ xfer = svc_i3c_master_alloc_xfer(master, 2);
+ if (!xfer)
+ return -ENOMEM;
+
+ xfer->type = SVC_I3C_MCTRL_TYPE_I3C;
+
+ /* Broadcasted message */
+ cmd = &xfer->cmds[0];
+ cmd->addr = I3C_BROADCAST_ADDR;
+ cmd->rnw = 0;
+ cmd->in = NULL;
+ cmd->out = &ccc->id;
+ cmd->len = 1;
+ cmd->read_len = xfer_len;
+ cmd->read_len = 0;
+ cmd->continued = true;
+
+ /* Directed message */
+ cmd = &xfer->cmds[1];
+ cmd->addr = ccc->dests[0].addr;
+ cmd->rnw = ccc->rnw;
+ cmd->in = ccc->rnw ? ccc->dests[0].payload.data : NULL;
+ cmd->out = ccc->rnw ? NULL : ccc->dests[0].payload.data,
+ cmd->len = xfer_len;
+ cmd->read_len = read_len;
+ cmd->continued = false;
+
+ svc_i3c_master_enqueue_xfer(master, xfer);
+ if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
+ svc_i3c_master_dequeue_xfer(master, xfer);
+
+ ret = xfer->ret;
+ svc_i3c_master_free_xfer(xfer);
+
+ return ret;
+}
+
+static int svc_i3c_master_send_ccc_cmd(struct i3c_master_controller *m,
+ struct i3c_ccc_cmd *cmd)
+{
+ struct svc_i3c_master *master = to_svc_i3c_master(m);
+ bool broadcast = cmd->id < 0x80;
+
+ if (broadcast)
+ return svc_i3c_master_send_bdcast_ccc_cmd(master, cmd);
+ else
+ return svc_i3c_master_send_direct_ccc_cmd(master, cmd);
+}
+
+static int svc_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
+ struct i3c_priv_xfer *xfers,
+ int nxfers)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct svc_i3c_master *master = to_svc_i3c_master(m);
+ struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+ struct svc_i3c_xfer *xfer;
+ int ret, i;
+
+ xfer = svc_i3c_master_alloc_xfer(master, nxfers);
+ if (!xfer)
+ return -ENOMEM;
+
+ xfer->type = SVC_I3C_MCTRL_TYPE_I3C;
+
+ for (i = 0; i < nxfers; i++) {
+ struct svc_i3c_cmd *cmd = &xfer->cmds[i];
+
+ cmd->addr = master->addrs[data->index];
+ cmd->rnw = xfers[i].rnw;
+ cmd->in = xfers[i].rnw ? xfers[i].data.in : NULL;
+ cmd->out = xfers[i].rnw ? NULL : xfers[i].data.out;
+ cmd->len = xfers[i].len;
+ cmd->read_len = xfers[i].rnw ? xfers[i].len : 0;
+ cmd->continued = (i + 1) < nxfers;
+ }
+
+ svc_i3c_master_enqueue_xfer(master, xfer);
+ if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
+ svc_i3c_master_dequeue_xfer(master, xfer);
+
+ ret = xfer->ret;
+ svc_i3c_master_free_xfer(xfer);
+
+ return ret;
+}
+
+static int svc_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
+ const struct i2c_msg *xfers,
+ int nxfers)
+{
+ struct i3c_master_controller *m = i2c_dev_get_master(dev);
+ struct svc_i3c_master *master = to_svc_i3c_master(m);
+ struct svc_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
+ struct svc_i3c_xfer *xfer;
+ int ret, i;
+
+ xfer = svc_i3c_master_alloc_xfer(master, nxfers);
+ if (!xfer)
+ return -ENOMEM;
+
+ xfer->type = SVC_I3C_MCTRL_TYPE_I2C;
+
+ for (i = 0; i < nxfers; i++) {
+ struct svc_i3c_cmd *cmd = &xfer->cmds[i];
+
+ cmd->addr = master->addrs[data->index];
+ cmd->rnw = xfers[i].flags & I2C_M_RD;
+ cmd->in = cmd->rnw ? xfers[i].buf : NULL;
+ cmd->out = cmd->rnw ? NULL : xfers[i].buf;
+ cmd->len = xfers[i].len;
+ cmd->read_len = cmd->rnw ? xfers[i].len : 0;
+ cmd->continued = (i + 1 < nxfers);
+ }
+
+ svc_i3c_master_enqueue_xfer(master, xfer);
+ if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
+ svc_i3c_master_dequeue_xfer(master, xfer);
+
+ ret = xfer->ret;
+ svc_i3c_master_free_xfer(xfer);
+
+ return ret;
+}
+
+static int svc_i3c_master_request_ibi(struct i3c_dev_desc *dev,
+ const struct i3c_ibi_setup *req)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct svc_i3c_master *master = to_svc_i3c_master(m);
+ struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+ unsigned long flags;
+ unsigned int i;
+
+ if (dev->ibi->max_payload_len > SVC_I3C_FIFO_SIZE) {
+ dev_err(master->dev, "IBI max payload %d should be < %d\n",
+ dev->ibi->max_payload_len, SVC_I3C_FIFO_SIZE);
+ return -ERANGE;
+ }
+
+ data->ibi_pool = i3c_generic_ibi_alloc_pool(dev, req);
+ if (IS_ERR(data->ibi_pool))
+ return PTR_ERR(data->ibi_pool);
+
+ spin_lock_irqsave(&master->ibi.lock, flags);
+ for (i = 0; i < master->ibi.num_slots; i++) {
+ if (!master->ibi.slots[i]) {
+ data->ibi = i;
+ master->ibi.slots[i] = dev;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&master->ibi.lock, flags);
+
+ if (i < master->ibi.num_slots)
+ return 0;
+
+ i3c_generic_ibi_free_pool(data->ibi_pool);
+ data->ibi_pool = NULL;
+
+ return -ENOSPC;
+}
+
+static void svc_i3c_master_free_ibi(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct svc_i3c_master *master = to_svc_i3c_master(m);
+ struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&master->ibi.lock, flags);
+ master->ibi.slots[data->ibi] = NULL;
+ data->ibi = -1;
+ spin_unlock_irqrestore(&master->ibi.lock, flags);
+
+ i3c_generic_ibi_free_pool(data->ibi_pool);
+}
+
+static int svc_i3c_master_enable_ibi(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+
+ return i3c_master_enec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
+}
+
+static int svc_i3c_master_disable_ibi(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+
+ return i3c_master_disec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
+}
+
+static void svc_i3c_master_recycle_ibi_slot(struct i3c_dev_desc *dev,
+ struct i3c_ibi_slot *slot)
+{
+ struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+
+ i3c_generic_ibi_recycle_slot(data->ibi_pool, slot);
+}
+
+static const struct i3c_master_controller_ops svc_i3c_master_ops = {
+ .bus_init = svc_i3c_master_bus_init,
+ .bus_cleanup = svc_i3c_master_bus_cleanup,
+ .attach_i3c_dev = svc_i3c_master_attach_i3c_dev,
+ .detach_i3c_dev = svc_i3c_master_detach_i3c_dev,
+ .reattach_i3c_dev = svc_i3c_master_reattach_i3c_dev,
+ .attach_i2c_dev = svc_i3c_master_attach_i2c_dev,
+ .detach_i2c_dev = svc_i3c_master_detach_i2c_dev,
+ .do_daa = svc_i3c_master_do_daa,
+ .supports_ccc_cmd = svc_i3c_master_supports_ccc_cmd,
+ .send_ccc_cmd = svc_i3c_master_send_ccc_cmd,
+ .priv_xfers = svc_i3c_master_priv_xfers,
+ .i2c_xfers = svc_i3c_master_i2c_xfers,
+ .request_ibi = svc_i3c_master_request_ibi,
+ .free_ibi = svc_i3c_master_free_ibi,
+ .recycle_ibi_slot = svc_i3c_master_recycle_ibi_slot,
+ .enable_ibi = svc_i3c_master_enable_ibi,
+ .disable_ibi = svc_i3c_master_disable_ibi,
+};
+
+static void svc_i3c_master_reset(struct svc_i3c_master *master)
+{
+ u32 reg;
+
+ /* Clear pending warnings */
+ writel(readl(master->regs + SVC_I3C_MERRWARN),
+ master->regs + SVC_I3C_MERRWARN);
+
+ /* Set RX and TX tigger levels, flush FIFOs */
+ reg = SVC_I3C_MDATACTRL_FLUSHTB |
+ SVC_I3C_MDATACTRL_FLUSHRB |
+ SVC_I3C_MDATACTRL_UNLOCK_TRIG |
+ SVC_I3C_MDATACTRL_TXTRIG_FIFO_NOT_FULL |
+ SVC_I3C_MDATACTRL_RXTRIG_FIFO_NOT_EMPTY;
+ writel(reg, master->regs + SVC_I3C_MDATACTRL);
+
+ svc_i3c_master_disable_interrupts(master);
+}
+
+static int svc_i3c_master_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct svc_i3c_master *master;
+ int ret;
+
+ master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
+ if (!master)
+ return -ENOMEM;
+
+ master->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(master->regs))
+ return PTR_ERR(master->regs);
+
+ master->pclk = devm_clk_get(dev, "pclk");
+ if (IS_ERR(master->pclk))
+ return PTR_ERR(master->pclk);
+
+ master->fclk = devm_clk_get(dev, "fast_clk");
+ if (IS_ERR(master->fclk))
+ return PTR_ERR(master->fclk);
+
+ master->sclk = devm_clk_get(dev, "slow_clk");
+ if (IS_ERR(master->sclk))
+ return PTR_ERR(master->sclk);
+
+ master->irq = platform_get_irq(pdev, 0);
+ if (master->irq <= 0)
+ return -ENOENT;
+
+ master->dev = dev;
+
+ svc_i3c_master_reset(master);
+
+ ret = clk_prepare_enable(master->pclk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(master->fclk);
+ if (ret)
+ goto err_disable_pclk;
+
+ ret = clk_prepare_enable(master->sclk);
+ if (ret)
+ goto err_disable_fclk;
+
+ INIT_WORK(&master->hj_work, svc_i3c_master_hj_work);
+ INIT_WORK(&master->ibi_work, svc_i3c_master_ibi_work);
+ ret = devm_request_irq(dev, master->irq, svc_i3c_master_irq_handler,
+ IRQF_NO_SUSPEND, "svc-i3c-irq", master);
+ if (ret)
+ goto err_disable_sclk;
+
+ master->free_slots = GENMASK(SVC_I3C_MAX_DEVS - 1, 0);
+
+ spin_lock_init(&master->xferqueue.lock);
+ INIT_LIST_HEAD(&master->xferqueue.list);
+
+ spin_lock_init(&master->ibi.lock);
+ master->ibi.num_slots = SVC_I3C_MAX_DEVS;
+ master->ibi.slots = devm_kcalloc(&pdev->dev, master->ibi.num_slots,
+ sizeof(*master->ibi.slots),
+ GFP_KERNEL);
+ if (!master->ibi.slots) {
+ ret = -ENOMEM;
+ goto err_disable_sclk;
+ }
+
+ platform_set_drvdata(pdev, master);
+
+ /* Register the master */
+ ret = i3c_master_register(&master->base, &pdev->dev,
+ &svc_i3c_master_ops, false);
+ if (ret)
+ goto err_disable_sclk;
+
+ return 0;
+
+err_disable_sclk:
+ clk_disable_unprepare(master->sclk);
+
+err_disable_fclk:
+ clk_disable_unprepare(master->fclk);
+
+err_disable_pclk:
+ clk_disable_unprepare(master->pclk);
+
+ return ret;
+}
+
+static int svc_i3c_master_remove(struct platform_device *pdev)
+{
+ struct svc_i3c_master *master = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = i3c_master_unregister(&master->base);
+ if (ret)
+ return ret;
+
+ free_irq(master->irq, master);
+ clk_disable_unprepare(master->pclk);
+ clk_disable_unprepare(master->fclk);
+ clk_disable_unprepare(master->sclk);
+
+ return 0;
+}
+
+static const struct of_device_id svc_i3c_master_of_match_tbl[] = {
+ { .compatible = "silvaco,i3c-master" },
+ { /* sentinel */ },
+};
+
+static struct platform_driver svc_i3c_master = {
+ .probe = svc_i3c_master_probe,
+ .remove = svc_i3c_master_remove,
+ .driver = {
+ .name = "silvaco-i3c-master",
+ .of_match_table = svc_i3c_master_of_match_tbl,
+ },
+};
+module_platform_driver(svc_i3c_master);
+
+MODULE_AUTHOR("Conor Culhane <conor.culhane@silvaco.com>");
+MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com>");
+MODULE_DESCRIPTION("Silvaco dual-role I3C master driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/ide/falconide.c b/drivers/ide/falconide.c
index 77af4c1a3f38..bb86d84558d9 100644
--- a/drivers/ide/falconide.c
+++ b/drivers/ide/falconide.c
@@ -164,6 +164,7 @@ static int __init falconide_init(struct platform_device *pdev)
if (rc)
goto err_free;
+ platform_set_drvdata(pdev, host);
return 0;
err_free:
ide_host_free(host);
@@ -174,7 +175,7 @@ err:
static int falconide_remove(struct platform_device *pdev)
{
- struct ide_host *host = dev_get_drvdata(&pdev->dev);
+ struct ide_host *host = platform_get_drvdata(pdev);
ide_host_remove(host);
diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c
index 013ad33fbbc8..a1ce9f5ac3aa 100644
--- a/drivers/ide/ide-atapi.c
+++ b/drivers/ide/ide-atapi.c
@@ -107,7 +107,7 @@ int ide_queue_pc_tail(ide_drive_t *drive, struct gendisk *disk,
memcpy(scsi_req(rq)->cmd, pc->c, 12);
if (drive->media == ide_tape)
scsi_req(rq)->cmd[13] = REQ_IDETAPE_PC1;
- blk_execute_rq(drive->queue, disk, rq, 0);
+ blk_execute_rq(disk, rq, 0);
error = scsi_req(rq)->result ? -EIO : 0;
put_req:
blk_put_request(rq);
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 25d2d88e82ad..cffbcc27a34c 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -467,7 +467,7 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
}
}
- blk_execute_rq(drive->queue, info->disk, rq, 0);
+ blk_execute_rq(info->disk, rq, 0);
error = scsi_req(rq)->result ? -EIO : 0;
if (buffer)
diff --git a/drivers/ide/ide-cd_ioctl.c b/drivers/ide/ide-cd_ioctl.c
index 46f2df288c6a..011eab9c69b7 100644
--- a/drivers/ide/ide-cd_ioctl.c
+++ b/drivers/ide/ide-cd_ioctl.c
@@ -299,7 +299,7 @@ int ide_cdrom_reset(struct cdrom_device_info *cdi)
rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, 0);
ide_req(rq)->type = ATA_PRIV_MISC;
rq->rq_flags = RQF_QUIET;
- blk_execute_rq(drive->queue, cd->disk, rq, 0);
+ blk_execute_rq(cd->disk, rq, 0);
ret = scsi_req(rq)->result ? -EIO : 0;
blk_put_request(rq);
/*
diff --git a/drivers/ide/ide-devsets.c b/drivers/ide/ide-devsets.c
index f2f93ed40356..ca1d4b3d3878 100644
--- a/drivers/ide/ide-devsets.c
+++ b/drivers/ide/ide-devsets.c
@@ -173,7 +173,7 @@ int ide_devset_execute(ide_drive_t *drive, const struct ide_devset *setting,
*(int *)&scsi_req(rq)->cmd[1] = arg;
ide_req(rq)->special = setting->set;
- blk_execute_rq(q, NULL, rq, 0);
+ blk_execute_rq(NULL, rq, 0);
ret = scsi_req(rq)->result;
blk_put_request(rq);
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 34b9441084f8..8413731c6259 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -482,7 +482,7 @@ static int set_multcount(ide_drive_t *drive, int arg)
drive->mult_req = arg;
drive->special_flags |= IDE_SFLAG_SET_MULTMODE;
- blk_execute_rq(drive->queue, NULL, rq, 0);
+ blk_execute_rq(NULL, rq, 0);
blk_put_request(rq);
return (drive->mult_count == arg) ? 0 : -EIO;
diff --git a/drivers/ide/ide-ioctls.c b/drivers/ide/ide-ioctls.c
index 58994da10c06..43fbc37d85c3 100644
--- a/drivers/ide/ide-ioctls.c
+++ b/drivers/ide/ide-ioctls.c
@@ -137,7 +137,7 @@ static int ide_cmd_ioctl(ide_drive_t *drive, void __user *argp)
rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, 0);
ide_req(rq)->type = ATA_PRIV_TASKFILE;
- blk_execute_rq(drive->queue, NULL, rq, 0);
+ blk_execute_rq(NULL, rq, 0);
err = scsi_req(rq)->result ? -EIO : 0;
blk_put_request(rq);
@@ -235,7 +235,7 @@ static int generic_drive_reset(ide_drive_t *drive)
ide_req(rq)->type = ATA_PRIV_MISC;
scsi_req(rq)->cmd_len = 1;
scsi_req(rq)->cmd[0] = REQ_DRIVE_RESET;
- blk_execute_rq(drive->queue, NULL, rq, 1);
+ blk_execute_rq(NULL, rq, 1);
ret = scsi_req(rq)->result;
blk_put_request(rq);
return ret;
diff --git a/drivers/ide/ide-park.c b/drivers/ide/ide-park.c
index 8af7af6001eb..a80a0f28f7b9 100644
--- a/drivers/ide/ide-park.c
+++ b/drivers/ide/ide-park.c
@@ -37,7 +37,7 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout)
scsi_req(rq)->cmd_len = 1;
ide_req(rq)->type = ATA_PRIV_MISC;
ide_req(rq)->special = &timeout;
- blk_execute_rq(q, NULL, rq, 1);
+ blk_execute_rq(NULL, rq, 1);
rc = scsi_req(rq)->result ? -EIO : 0;
blk_put_request(rq);
if (rc)
diff --git a/drivers/ide/ide-pm.c b/drivers/ide/ide-pm.c
index 82ab308f1aaf..d680b3e3295f 100644
--- a/drivers/ide/ide-pm.c
+++ b/drivers/ide/ide-pm.c
@@ -27,7 +27,7 @@ int generic_ide_suspend(struct device *dev, pm_message_t mesg)
mesg.event = PM_EVENT_FREEZE;
rqpm.pm_state = mesg.event;
- blk_execute_rq(drive->queue, NULL, rq, 0);
+ blk_execute_rq(NULL, rq, 0);
ret = scsi_req(rq)->result ? -EIO : 0;
blk_put_request(rq);
@@ -50,7 +50,7 @@ static int ide_pm_execute_rq(struct request *rq)
blk_mq_end_request(rq, BLK_STS_OK);
return -ENXIO;
}
- blk_execute_rq(q, NULL, rq, true);
+ blk_execute_rq(NULL, rq, true);
return scsi_req(rq)->result ? -EIO : 0;
}
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index 88b96437b22e..fa05e7e7d609 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -868,7 +868,7 @@ static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int size)
goto out_put;
}
- blk_execute_rq(drive->queue, tape->disk, rq, 0);
+ blk_execute_rq(tape->disk, rq, 0);
/* calculate the number of transferred bytes and update buffer state */
size -= scsi_req(rq)->resid_len;
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
index d016cbe68cba..6665fc4724b9 100644
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
@@ -443,7 +443,7 @@ int ide_raw_taskfile(ide_drive_t *drive, struct ide_cmd *cmd, u8 *buf,
ide_req(rq)->special = cmd;
cmd->rq = rq;
- blk_execute_rq(drive->queue, NULL, rq, 0);
+ blk_execute_rq(NULL, rq, 0);
error = scsi_req(rq)->result ? -EIO : 0;
put_req:
blk_put_request(rq);
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 28f93b9aa51b..3273360f30f7 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -37,7 +37,7 @@
*/
/* un-comment DEBUG to enable pr_debug() statements */
-#define DEBUG
+/* #define DEBUG */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/iio/accel/hid-sensor-accel-3d.c b/drivers/iio/accel/hid-sensor-accel-3d.c
index 4c5e594024f8..5d63ed19e6e2 100644
--- a/drivers/iio/accel/hid-sensor-accel-3d.c
+++ b/drivers/iio/accel/hid-sensor-accel-3d.c
@@ -23,6 +23,7 @@ enum accel_3d_channel {
ACCEL_3D_CHANNEL_MAX,
};
+#define CHANNEL_SCAN_INDEX_TIMESTAMP ACCEL_3D_CHANNEL_MAX
struct accel_3d_state {
struct hid_sensor_hub_callbacks callbacks;
struct hid_sensor_common common_attributes;
@@ -75,7 +76,7 @@ static const struct iio_chan_spec accel_3d_channels[] = {
BIT(IIO_CHAN_INFO_HYSTERESIS),
.scan_index = CHANNEL_SCAN_INDEX_Z,
},
- IIO_CHAN_SOFT_TIMESTAMP(3)
+ IIO_CHAN_SOFT_TIMESTAMP(CHANNEL_SCAN_INDEX_TIMESTAMP)
};
/* Channel definitions */
@@ -110,7 +111,8 @@ static const struct iio_chan_spec gravity_channels[] = {
BIT(IIO_CHAN_INFO_SAMP_FREQ) |
BIT(IIO_CHAN_INFO_HYSTERESIS),
.scan_index = CHANNEL_SCAN_INDEX_Z,
- }
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(CHANNEL_SCAN_INDEX_TIMESTAMP),
};
/* Adjust channel real bits based on report descriptor */
diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
index e92c7e6766e1..2fadafc860fd 100644
--- a/drivers/iio/accel/kxcjk-1013.c
+++ b/drivers/iio/accel/kxcjk-1013.c
@@ -14,6 +14,7 @@
#include <linux/acpi.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
@@ -133,6 +134,7 @@ enum kx_acpi_type {
};
struct kxcjk1013_data {
+ struct regulator_bulk_data regulators[2];
struct i2c_client *client;
struct iio_trigger *dready_trig;
struct iio_trigger *motion_trig;
@@ -1300,6 +1302,13 @@ static const char *kxcjk1013_match_acpi_device(struct device *dev,
return dev_name(dev);
}
+static void kxcjk1013_disable_regulators(void *d)
+{
+ struct kxcjk1013_data *data = d;
+
+ regulator_bulk_disable(ARRAY_SIZE(data->regulators), data->regulators);
+}
+
static int kxcjk1013_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -1330,6 +1339,29 @@ static int kxcjk1013_probe(struct i2c_client *client,
return ret;
}
+ data->regulators[0].supply = "vdd";
+ data->regulators[1].supply = "vddio";
+ ret = devm_regulator_bulk_get(&client->dev, ARRAY_SIZE(data->regulators),
+ data->regulators);
+ if (ret)
+ return dev_err_probe(&client->dev, ret, "Failed to get regulators\n");
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(data->regulators),
+ data->regulators);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(&client->dev, kxcjk1013_disable_regulators, data);
+ if (ret)
+ return ret;
+
+ /*
+ * A typical delay of 10ms is required for powering up
+ * according to the data sheets of supported chips.
+ * Hence double that to play safe.
+ */
+ msleep(20);
+
if (id) {
data->chipset = (enum kx_chipset)(id->driver_data);
name = id->name;
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 15587a1bc80d..bf7d22fa4be2 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -1228,8 +1228,15 @@ config XILINX_XADC
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
- Say yes here to have support for the Xilinx XADC. The driver does support
- both the ZYNQ interface to the XADC as well as the AXI-XADC interface.
+ Say yes here to have support for the Xilinx 7 Series XADC or
+ UltraScale/UltraScale+ System Management Wizard.
+
+ For the 7 Series the driver does support both the ZYNQ interface
+ to the XADC as well as the AXI-XADC interface.
+
+ The driver also support the Xilinx System Management Wizard IP core
+ that can be used to access the System Monitor ADC on the Xilinx
+ UltraScale and UltraScale+ FPGAs.
The driver can also be build as a module. If so, the module will be called
xilinx-xadc.
diff --git a/drivers/iio/adc/ab8500-gpadc.c b/drivers/iio/adc/ab8500-gpadc.c
index 1bb987a4acba..6f9a3e2d5533 100644
--- a/drivers/iio/adc/ab8500-gpadc.c
+++ b/drivers/iio/adc/ab8500-gpadc.c
@@ -1108,10 +1108,14 @@ static int ab8500_gpadc_probe(struct platform_device *pdev)
return gpadc->irq_sw;
}
- gpadc->irq_hw = platform_get_irq_byname(pdev, "HW_CONV_END");
- if (gpadc->irq_hw < 0) {
- dev_err(dev, "failed to get platform hw_conv_end irq\n");
- return gpadc->irq_hw;
+ if (is_ab8500(gpadc->ab8500)) {
+ gpadc->irq_hw = platform_get_irq_byname(pdev, "HW_CONV_END");
+ if (gpadc->irq_hw < 0) {
+ dev_err(dev, "failed to get platform hw_conv_end irq\n");
+ return gpadc->irq_hw;
+ }
+ } else {
+ gpadc->irq_hw = 0;
}
/* Initialize completion used to notify completion of conversion */
@@ -1128,14 +1132,16 @@ static int ab8500_gpadc_probe(struct platform_device *pdev)
return ret;
}
- ret = devm_request_threaded_irq(dev, gpadc->irq_hw, NULL,
- ab8500_bm_gpadcconvend_handler, IRQF_NO_SUSPEND | IRQF_ONESHOT,
- "ab8500-gpadc-hw", gpadc);
- if (ret < 0) {
- dev_err(dev,
- "Failed to request hw conversion irq: %d\n",
- gpadc->irq_hw);
- return ret;
+ if (gpadc->irq_hw) {
+ ret = devm_request_threaded_irq(dev, gpadc->irq_hw, NULL,
+ ab8500_bm_gpadcconvend_handler, IRQF_NO_SUSPEND | IRQF_ONESHOT,
+ "ab8500-gpadc-hw", gpadc);
+ if (ret < 0) {
+ dev_err(dev,
+ "Failed to request hw conversion irq: %d\n",
+ gpadc->irq_hw);
+ return ret;
+ }
}
/* The VTVout LDO used to power the AB8500 GPADC */
diff --git a/drivers/iio/adc/ad7476.c b/drivers/iio/adc/ad7476.c
index 66c55ae67791..17402714b387 100644
--- a/drivers/iio/adc/ad7476.c
+++ b/drivers/iio/adc/ad7476.c
@@ -67,6 +67,7 @@ enum ad7476_supported_device_ids {
ID_ADS7866,
ID_ADS7867,
ID_ADS7868,
+ ID_LTC2314_14,
};
static void ad7091_convst(struct ad7476_state *st)
@@ -250,6 +251,10 @@ static const struct ad7476_chip_info ad7476_chip_info_tbl[] = {
.channel[0] = ADS786X_CHAN(8),
.channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
},
+ [ID_LTC2314_14] = {
+ .channel[0] = AD7940_CHAN(14),
+ .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+ },
};
static const struct iio_info ad7476_info = {
@@ -365,6 +370,7 @@ static const struct spi_device_id ad7476_id[] = {
{"ads7866", ID_ADS7866},
{"ads7867", ID_ADS7867},
{"ads7868", ID_ADS7868},
+ {"ltc2314-14", ID_LTC2314_14},
{}
};
MODULE_DEVICE_TABLE(spi, ad7476_id);
diff --git a/drivers/iio/adc/qcom-pm8xxx-xoadc.c b/drivers/iio/adc/qcom-pm8xxx-xoadc.c
index 7e108da7d255..0610bf254771 100644
--- a/drivers/iio/adc/qcom-pm8xxx-xoadc.c
+++ b/drivers/iio/adc/qcom-pm8xxx-xoadc.c
@@ -10,6 +10,7 @@
* Author: Linus Walleij <linus.walleij@linaro.org>
*/
+#include <linux/iio/adc/qcom-vadc-common.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/module.h>
@@ -21,8 +22,6 @@
#include <linux/interrupt.h>
#include <linux/regulator/consumer.h>
-#include "qcom-vadc-common.h"
-
/*
* Definitions for the "user processor" registers lifted from the v3.4
* Qualcomm tree. Their kernel has two out-of-tree drivers for the ADC:
diff --git a/drivers/iio/adc/qcom-spmi-adc5.c b/drivers/iio/adc/qcom-spmi-adc5.c
index c10aa28be70a..87438d1e5c0b 100644
--- a/drivers/iio/adc/qcom-spmi-adc5.c
+++ b/drivers/iio/adc/qcom-spmi-adc5.c
@@ -7,6 +7,7 @@
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/err.h>
+#include <linux/iio/adc/qcom-vadc-common.h>
#include <linux/iio/iio.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
@@ -14,12 +15,12 @@
#include <linux/math64.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <dt-bindings/iio/qcom,spmi-vadc.h>
-#include "qcom-vadc-common.h"
#define ADC5_USR_REVISION1 0x0
#define ADC5_USR_STATUS1 0x8
@@ -154,18 +155,6 @@ struct adc5_chip {
const struct adc5_data *data;
};
-static const struct vadc_prescale_ratio adc5_prescale_ratios[] = {
- {.num = 1, .den = 1},
- {.num = 1, .den = 3},
- {.num = 1, .den = 4},
- {.num = 1, .den = 6},
- {.num = 1, .den = 20},
- {.num = 1, .den = 8},
- {.num = 10, .den = 81},
- {.num = 1, .den = 10},
- {.num = 1, .den = 16}
-};
-
static int adc5_read(struct adc5_chip *adc, u16 offset, u8 *data, int len)
{
return regmap_bulk_read(adc->regmap, adc->base + offset, data, len);
@@ -181,55 +170,6 @@ static int adc5_masked_write(struct adc5_chip *adc, u16 offset, u8 mask, u8 val)
return regmap_update_bits(adc->regmap, adc->base + offset, mask, val);
}
-static int adc5_prescaling_from_dt(u32 num, u32 den)
-{
- unsigned int pre;
-
- for (pre = 0; pre < ARRAY_SIZE(adc5_prescale_ratios); pre++)
- if (adc5_prescale_ratios[pre].num == num &&
- adc5_prescale_ratios[pre].den == den)
- break;
-
- if (pre == ARRAY_SIZE(adc5_prescale_ratios))
- return -EINVAL;
-
- return pre;
-}
-
-static int adc5_hw_settle_time_from_dt(u32 value,
- const unsigned int *hw_settle)
-{
- unsigned int i;
-
- for (i = 0; i < VADC_HW_SETTLE_SAMPLES_MAX; i++) {
- if (value == hw_settle[i])
- return i;
- }
-
- return -EINVAL;
-}
-
-static int adc5_avg_samples_from_dt(u32 value)
-{
- if (!is_power_of_2(value) || value > ADC5_AVG_SAMPLES_MAX)
- return -EINVAL;
-
- return __ffs(value);
-}
-
-static int adc5_decimation_from_dt(u32 value,
- const unsigned int *decimation)
-{
- unsigned int i;
-
- for (i = 0; i < ADC5_DECIMATION_SAMPLES_MAX; i++) {
- if (value == decimation[i])
- return i;
- }
-
- return -EINVAL;
-}
-
static int adc5_read_voltage_data(struct adc5_chip *adc, u16 *data)
{
int ret;
@@ -511,7 +451,7 @@ static int adc_read_raw_common(struct iio_dev *indio_dev,
return ret;
ret = qcom_adc5_hw_scale(prop->scale_fn_type,
- &adc5_prescale_ratios[prop->prescale],
+ prop->prescale,
adc->data,
adc_code_volt, val);
if (ret)
@@ -717,7 +657,7 @@ static int adc5_get_dt_channel_data(struct adc5_chip *adc,
ret = of_property_read_u32(node, "qcom,decimation", &value);
if (!ret) {
- ret = adc5_decimation_from_dt(value, data->decimation);
+ ret = qcom_adc5_decimation_from_dt(value, data->decimation);
if (ret < 0) {
dev_err(dev, "%02x invalid decimation %d\n",
chan, value);
@@ -730,7 +670,7 @@ static int adc5_get_dt_channel_data(struct adc5_chip *adc,
ret = of_property_read_u32_array(node, "qcom,pre-scaling", varr, 2);
if (!ret) {
- ret = adc5_prescaling_from_dt(varr[0], varr[1]);
+ ret = qcom_adc5_prescaling_from_dt(varr[0], varr[1]);
if (ret < 0) {
dev_err(dev, "%02x invalid pre-scaling <%d %d>\n",
chan, varr[0], varr[1]);
@@ -759,11 +699,9 @@ static int adc5_get_dt_channel_data(struct adc5_chip *adc,
if ((dig_version[0] >= ADC5_HW_SETTLE_DIFF_MINOR &&
dig_version[1] >= ADC5_HW_SETTLE_DIFF_MAJOR) ||
adc->data->info == &adc7_info)
- ret = adc5_hw_settle_time_from_dt(value,
- data->hw_settle_2);
+ ret = qcom_adc5_hw_settle_time_from_dt(value, data->hw_settle_2);
else
- ret = adc5_hw_settle_time_from_dt(value,
- data->hw_settle_1);
+ ret = qcom_adc5_hw_settle_time_from_dt(value, data->hw_settle_1);
if (ret < 0) {
dev_err(dev, "%02x invalid hw-settle-time %d us\n",
@@ -777,7 +715,7 @@ static int adc5_get_dt_channel_data(struct adc5_chip *adc,
ret = of_property_read_u32(node, "qcom,avg-samples", &value);
if (!ret) {
- ret = adc5_avg_samples_from_dt(value);
+ ret = qcom_adc5_avg_samples_from_dt(value);
if (ret < 0) {
dev_err(dev, "%02x invalid avg-samples %d\n",
chan, value);
@@ -870,8 +808,6 @@ static int adc5_get_dt_data(struct adc5_chip *adc, struct device_node *node)
struct adc5_channel_prop prop, *chan_props;
struct device_node *child;
unsigned int index = 0;
- const struct of_device_id *id;
- const struct adc5_data *data;
int ret;
adc->nchannels = of_get_available_child_count(node);
@@ -890,24 +826,21 @@ static int adc5_get_dt_data(struct adc5_chip *adc, struct device_node *node)
chan_props = adc->chan_props;
iio_chan = adc->iio_chans;
- id = of_match_node(adc5_match_table, node);
- if (id)
- data = id->data;
- else
- data = &adc5_data_pmic;
- adc->data = data;
+ adc->data = of_device_get_match_data(adc->dev);
+ if (!adc->data)
+ adc->data = &adc5_data_pmic;
for_each_available_child_of_node(node, child) {
- ret = adc5_get_dt_channel_data(adc, &prop, child, data);
+ ret = adc5_get_dt_channel_data(adc, &prop, child, adc->data);
if (ret) {
of_node_put(child);
return ret;
}
prop.scale_fn_type =
- data->adc_chans[prop.channel].scale_fn_type;
+ adc->data->adc_chans[prop.channel].scale_fn_type;
*chan_props = prop;
- adc_chan = &data->adc_chans[prop.channel];
+ adc_chan = &adc->data->adc_chans[prop.channel];
iio_chan->channel = prop.channel;
iio_chan->datasheet_name = prop.datasheet_name;
diff --git a/drivers/iio/adc/qcom-spmi-vadc.c b/drivers/iio/adc/qcom-spmi-vadc.c
index b0388f8a69f4..05ff948372b3 100644
--- a/drivers/iio/adc/qcom-spmi-vadc.c
+++ b/drivers/iio/adc/qcom-spmi-vadc.c
@@ -7,6 +7,7 @@
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/err.h>
+#include <linux/iio/adc/qcom-vadc-common.h>
#include <linux/iio/iio.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
@@ -20,8 +21,6 @@
#include <dt-bindings/iio/qcom,spmi-vadc.h>
-#include "qcom-vadc-common.h"
-
/* VADC register and bit definitions */
#define VADC_REVISION2 0x1
#define VADC_REVISION2_SUPPORTED_VADC 1
diff --git a/drivers/iio/adc/qcom-vadc-common.c b/drivers/iio/adc/qcom-vadc-common.c
index 5113aaa6ba67..14723896aab2 100644
--- a/drivers/iio/adc/qcom-vadc-common.c
+++ b/drivers/iio/adc/qcom-vadc-common.c
@@ -2,50 +2,61 @@
#include <linux/bug.h>
#include <linux/kernel.h>
#include <linux/bitops.h>
+#include <linux/fixp-arith.h>
+#include <linux/iio/adc/qcom-vadc-common.h>
#include <linux/math64.h>
#include <linux/log2.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/units.h>
-#include "qcom-vadc-common.h"
+/**
+ * struct vadc_map_pt - Map the graph representation for ADC channel
+ * @x: Represent the ADC digitized code.
+ * @y: Represent the physical data which can be temperature, voltage,
+ * resistance.
+ */
+struct vadc_map_pt {
+ s32 x;
+ s32 y;
+};
/* Voltage to temperature */
static const struct vadc_map_pt adcmap_100k_104ef_104fb[] = {
- {1758, -40},
- {1742, -35},
- {1719, -30},
- {1691, -25},
- {1654, -20},
- {1608, -15},
- {1551, -10},
- {1483, -5},
- {1404, 0},
- {1315, 5},
- {1218, 10},
- {1114, 15},
- {1007, 20},
- {900, 25},
- {795, 30},
- {696, 35},
- {605, 40},
- {522, 45},
- {448, 50},
- {383, 55},
- {327, 60},
- {278, 65},
- {237, 70},
- {202, 75},
- {172, 80},
- {146, 85},
- {125, 90},
- {107, 95},
- {92, 100},
- {79, 105},
- {68, 110},
- {59, 115},
- {51, 120},
- {44, 125}
+ {1758, -40000 },
+ {1742, -35000 },
+ {1719, -30000 },
+ {1691, -25000 },
+ {1654, -20000 },
+ {1608, -15000 },
+ {1551, -10000 },
+ {1483, -5000 },
+ {1404, 0 },
+ {1315, 5000 },
+ {1218, 10000 },
+ {1114, 15000 },
+ {1007, 20000 },
+ {900, 25000 },
+ {795, 30000 },
+ {696, 35000 },
+ {605, 40000 },
+ {522, 45000 },
+ {448, 50000 },
+ {383, 55000 },
+ {327, 60000 },
+ {278, 65000 },
+ {237, 70000 },
+ {202, 75000 },
+ {172, 80000 },
+ {146, 85000 },
+ {125, 90000 },
+ {107, 95000 },
+ {92, 100000 },
+ {79, 105000 },
+ {68, 110000 },
+ {59, 115000 },
+ {51, 120000 },
+ {44, 125000 }
};
/*
@@ -90,18 +101,18 @@ static const struct vadc_map_pt adcmap_100k_104ef_104fb_1875_vref[] = {
};
static const struct vadc_map_pt adcmap7_die_temp[] = {
- { 433700, 1967},
- { 473100, 1964},
- { 512400, 1957},
- { 551500, 1949},
- { 590500, 1940},
- { 629300, 1930},
- { 667900, 1921},
- { 706400, 1910},
- { 744600, 1896},
- { 782500, 1878},
- { 820100, 1859},
- { 857300, 0},
+ { 857300, 160000 },
+ { 820100, 140000 },
+ { 782500, 120000 },
+ { 744600, 100000 },
+ { 706400, 80000 },
+ { 667900, 60000 },
+ { 629300, 40000 },
+ { 590500, 20000 },
+ { 551500, 0 },
+ { 512400, -20000 },
+ { 473100, -40000 },
+ { 433700, -60000 },
};
/*
@@ -278,6 +289,18 @@ static const struct vadc_map_pt adcmap7_100k[] = {
{ 2420, 130048 }
};
+static const struct vadc_prescale_ratio adc5_prescale_ratios[] = {
+ {.num = 1, .den = 1},
+ {.num = 1, .den = 3},
+ {.num = 1, .den = 4},
+ {.num = 1, .den = 6},
+ {.num = 1, .den = 20},
+ {.num = 1, .den = 8},
+ {.num = 10, .den = 81},
+ {.num = 1, .den = 10},
+ {.num = 1, .den = 16}
+};
+
static int qcom_vadc_scale_hw_calib_volt(
const struct vadc_prescale_ratio *prescale,
const struct adc5_data *data,
@@ -323,48 +346,50 @@ static struct qcom_adc5_scale_type scale_adc5_fn[] = {
static int qcom_vadc_map_voltage_temp(const struct vadc_map_pt *pts,
u32 tablesize, s32 input, int *output)
{
- bool descending = 1;
u32 i = 0;
if (!pts)
return -EINVAL;
- /* Check if table is descending or ascending */
- if (tablesize > 1) {
- if (pts[0].x < pts[1].x)
- descending = 0;
- }
-
- while (i < tablesize) {
- if ((descending) && (pts[i].x < input)) {
- /* table entry is less than measured*/
- /* value and table is descending, stop */
- break;
- } else if ((!descending) &&
- (pts[i].x > input)) {
- /* table entry is greater than measured*/
- /*value and table is ascending, stop */
- break;
- }
+ while (i < tablesize && pts[i].x > input)
i++;
- }
if (i == 0) {
*output = pts[0].y;
} else if (i == tablesize) {
*output = pts[tablesize - 1].y;
} else {
- /* result is between search_index and search_index-1 */
/* interpolate linearly */
- *output = (((s32)((pts[i].y - pts[i - 1].y) *
- (input - pts[i - 1].x)) /
- (pts[i].x - pts[i - 1].x)) +
- pts[i - 1].y);
+ *output = fixp_linear_interpolate(pts[i - 1].x, pts[i - 1].y,
+ pts[i].x, pts[i].y,
+ input);
}
return 0;
}
+static s32 qcom_vadc_map_temp_voltage(const struct vadc_map_pt *pts,
+ u32 tablesize, int input)
+{
+ u32 i = 0;
+
+ /*
+ * Table must be sorted, find the interval of 'y' which contains value
+ * 'input' and map it to proper 'x' value
+ */
+ while (i < tablesize && pts[i].y < input)
+ i++;
+
+ if (i == 0)
+ return pts[0].x;
+ if (i == tablesize)
+ return pts[tablesize - 1].x;
+
+ /* interpolate linearly */
+ return fixp_linear_interpolate(pts[i - 1].y, pts[i - 1].x,
+ pts[i].y, pts[i].x, input);
+}
+
static void qcom_vadc_scale_calib(const struct vadc_linear_graph *calib_graph,
u16 adc_code,
bool absolute,
@@ -415,8 +440,6 @@ static int qcom_vadc_scale_therm(const struct vadc_linear_graph *calib_graph,
if (ret)
return ret;
- *result_mdec *= 1000;
-
return 0;
}
@@ -462,6 +485,21 @@ static int qcom_vadc_scale_chg_temp(const struct vadc_linear_graph *calib_graph,
return 0;
}
+/* convert voltage to ADC code, using 1.875V reference */
+static u16 qcom_vadc_scale_voltage_code(s32 voltage,
+ const struct vadc_prescale_ratio *prescale,
+ const u32 full_scale_code_volt,
+ unsigned int factor)
+{
+ s64 volt = voltage;
+ s64 adc_vdd_ref_mv = 1875; /* reference voltage */
+
+ volt *= prescale->num * factor * full_scale_code_volt;
+ volt = div64_s64(volt, (s64)prescale->den * adc_vdd_ref_mv * 1000);
+
+ return volt;
+}
+
static int qcom_vadc_scale_code_voltage_factor(u16 adc_code,
const struct vadc_prescale_ratio *prescale,
const struct adc5_data *data,
@@ -563,33 +601,13 @@ static int qcom_vadc7_scale_hw_calib_die_temp(
u16 adc_code, int *result_mdec)
{
- int voltage, vtemp0, temp, i;
+ int voltage;
voltage = qcom_vadc_scale_code_voltage_factor(adc_code,
prescale, data, 1);
- if (adcmap7_die_temp[0].x > voltage) {
- *result_mdec = DIE_TEMP_ADC7_SCALE_1;
- return 0;
- }
-
- if (adcmap7_die_temp[ARRAY_SIZE(adcmap7_die_temp) - 1].x <= voltage) {
- *result_mdec = DIE_TEMP_ADC7_MAX;
- return 0;
- }
-
- for (i = 0; i < ARRAY_SIZE(adcmap7_die_temp); i++)
- if (adcmap7_die_temp[i].x > voltage)
- break;
-
- vtemp0 = adcmap7_die_temp[i - 1].x;
- voltage = voltage - vtemp0;
- temp = div64_s64(voltage * DIE_TEMP_ADC7_SCALE_FACTOR,
- adcmap7_die_temp[i - 1].y);
- temp += DIE_TEMP_ADC7_SCALE_1 + (DIE_TEMP_ADC7_SCALE_2 * (i - 1));
- *result_mdec = temp;
-
- return 0;
+ return qcom_vadc_map_voltage_temp(adcmap7_die_temp, ARRAY_SIZE(adcmap7_die_temp),
+ voltage, result_mdec);
}
static int qcom_vadc_scale_hw_smb_temp(
@@ -646,11 +664,26 @@ int qcom_vadc_scale(enum vadc_scale_fn_type scaletype,
}
EXPORT_SYMBOL(qcom_vadc_scale);
+u16 qcom_adc_tm5_temp_volt_scale(unsigned int prescale_ratio,
+ u32 full_scale_code_volt, int temp)
+{
+ const struct vadc_prescale_ratio *prescale = &adc5_prescale_ratios[prescale_ratio];
+ s32 voltage;
+
+ voltage = qcom_vadc_map_temp_voltage(adcmap_100k_104ef_104fb_1875_vref,
+ ARRAY_SIZE(adcmap_100k_104ef_104fb_1875_vref),
+ temp);
+ return qcom_vadc_scale_voltage_code(voltage, prescale, full_scale_code_volt, 1000);
+}
+EXPORT_SYMBOL(qcom_adc_tm5_temp_volt_scale);
+
int qcom_adc5_hw_scale(enum vadc_scale_fn_type scaletype,
- const struct vadc_prescale_ratio *prescale,
+ unsigned int prescale_ratio,
const struct adc5_data *data,
u16 adc_code, int *result)
{
+ const struct vadc_prescale_ratio *prescale = &adc5_prescale_ratios[prescale_ratio];
+
if (!(scaletype >= SCALE_HW_CALIB_DEFAULT &&
scaletype < SCALE_HW_CALIB_INVALID)) {
pr_err("Invalid scale type %d\n", scaletype);
@@ -662,6 +695,58 @@ int qcom_adc5_hw_scale(enum vadc_scale_fn_type scaletype,
}
EXPORT_SYMBOL(qcom_adc5_hw_scale);
+int qcom_adc5_prescaling_from_dt(u32 num, u32 den)
+{
+ unsigned int pre;
+
+ for (pre = 0; pre < ARRAY_SIZE(adc5_prescale_ratios); pre++)
+ if (adc5_prescale_ratios[pre].num == num &&
+ adc5_prescale_ratios[pre].den == den)
+ break;
+
+ if (pre == ARRAY_SIZE(adc5_prescale_ratios))
+ return -EINVAL;
+
+ return pre;
+}
+EXPORT_SYMBOL(qcom_adc5_prescaling_from_dt);
+
+int qcom_adc5_hw_settle_time_from_dt(u32 value,
+ const unsigned int *hw_settle)
+{
+ unsigned int i;
+
+ for (i = 0; i < VADC_HW_SETTLE_SAMPLES_MAX; i++) {
+ if (value == hw_settle[i])
+ return i;
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL(qcom_adc5_hw_settle_time_from_dt);
+
+int qcom_adc5_avg_samples_from_dt(u32 value)
+{
+ if (!is_power_of_2(value) || value > ADC5_AVG_SAMPLES_MAX)
+ return -EINVAL;
+
+ return __ffs(value);
+}
+EXPORT_SYMBOL(qcom_adc5_avg_samples_from_dt);
+
+int qcom_adc5_decimation_from_dt(u32 value, const unsigned int *decimation)
+{
+ unsigned int i;
+
+ for (i = 0; i < ADC5_DECIMATION_SAMPLES_MAX; i++) {
+ if (value == decimation[i])
+ return i;
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL(qcom_adc5_decimation_from_dt);
+
int qcom_vadc_decimation_from_dt(u32 value)
{
if (!is_power_of_2(value) || value < VADC_DECIMATION_MIN ||
diff --git a/drivers/iio/adc/sc27xx_adc.c b/drivers/iio/adc/sc27xx_adc.c
index aa32a1f385e2..301cf66de695 100644
--- a/drivers/iio/adc/sc27xx_adc.c
+++ b/drivers/iio/adc/sc27xx_adc.c
@@ -307,7 +307,7 @@ static int sc27xx_adc_convert_volt(struct sc27xx_adc_data *data, int channel,
sc27xx_adc_volt_ratio(data, channel, scale, &numerator, &denominator);
- return (volt * denominator + numerator / 2) / numerator;
+ return DIV_ROUND_CLOSEST(volt * denominator, numerator);
}
static int sc27xx_adc_read_processed(struct sc27xx_adc_data *data,
diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c
index 9d1ad6e38e85..c088cb990193 100644
--- a/drivers/iio/adc/stm32-adc-core.c
+++ b/drivers/iio/adc/stm32-adc-core.c
@@ -535,20 +535,16 @@ static int stm32_adc_core_hw_start(struct device *dev)
goto err_switches_dis;
}
- if (priv->bclk) {
- ret = clk_prepare_enable(priv->bclk);
- if (ret < 0) {
- dev_err(dev, "bus clk enable failed\n");
- goto err_regulator_disable;
- }
+ ret = clk_prepare_enable(priv->bclk);
+ if (ret < 0) {
+ dev_err(dev, "bus clk enable failed\n");
+ goto err_regulator_disable;
}
- if (priv->aclk) {
- ret = clk_prepare_enable(priv->aclk);
- if (ret < 0) {
- dev_err(dev, "adc clk enable failed\n");
- goto err_bclk_disable;
- }
+ ret = clk_prepare_enable(priv->aclk);
+ if (ret < 0) {
+ dev_err(dev, "adc clk enable failed\n");
+ goto err_bclk_disable;
}
writel_relaxed(priv->ccr_bak, priv->common.base + priv->cfg->regs->ccr);
@@ -556,8 +552,7 @@ static int stm32_adc_core_hw_start(struct device *dev)
return 0;
err_bclk_disable:
- if (priv->bclk)
- clk_disable_unprepare(priv->bclk);
+ clk_disable_unprepare(priv->bclk);
err_regulator_disable:
regulator_disable(priv->vref);
err_switches_dis:
@@ -575,10 +570,8 @@ static void stm32_adc_core_hw_stop(struct device *dev)
/* Backup CCR that may be lost (depends on power state to achieve) */
priv->ccr_bak = readl_relaxed(priv->common.base + priv->cfg->regs->ccr);
- if (priv->aclk)
- clk_disable_unprepare(priv->aclk);
- if (priv->bclk)
- clk_disable_unprepare(priv->bclk);
+ clk_disable_unprepare(priv->aclk);
+ clk_disable_unprepare(priv->bclk);
regulator_disable(priv->vref);
stm32_adc_core_switches_supply_dis(priv);
regulator_disable(priv->vdda);
diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
index c067c994dae2..f7c53cea509a 100644
--- a/drivers/iio/adc/stm32-adc.c
+++ b/drivers/iio/adc/stm32-adc.c
@@ -546,8 +546,7 @@ static int stm32_adc_hw_stop(struct device *dev)
if (adc->cfg->unprepare)
adc->cfg->unprepare(indio_dev);
- if (adc->clk)
- clk_disable_unprepare(adc->clk);
+ clk_disable_unprepare(adc->clk);
return 0;
}
@@ -558,11 +557,9 @@ static int stm32_adc_hw_start(struct device *dev)
struct stm32_adc *adc = iio_priv(indio_dev);
int ret;
- if (adc->clk) {
- ret = clk_prepare_enable(adc->clk);
- if (ret)
- return ret;
- }
+ ret = clk_prepare_enable(adc->clk);
+ if (ret)
+ return ret;
stm32_adc_set_res(adc);
@@ -575,8 +572,7 @@ static int stm32_adc_hw_start(struct device *dev)
return 0;
err_clk_dis:
- if (adc->clk)
- clk_disable_unprepare(adc->clk);
+ clk_disable_unprepare(adc->clk);
return ret;
}
diff --git a/drivers/iio/adc/stm32-dfsdm-core.c b/drivers/iio/adc/stm32-dfsdm-core.c
index 42a7377704a4..bb925a11c8ae 100644
--- a/drivers/iio/adc/stm32-dfsdm-core.c
+++ b/drivers/iio/adc/stm32-dfsdm-core.c
@@ -117,8 +117,7 @@ static void stm32_dfsdm_clk_disable_unprepare(struct stm32_dfsdm *dfsdm)
{
struct dfsdm_priv *priv = to_stm32_dfsdm_priv(dfsdm);
- if (priv->aclk)
- clk_disable_unprepare(priv->aclk);
+ clk_disable_unprepare(priv->aclk);
clk_disable_unprepare(priv->clk);
}
diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
index f93c34fe5873..34800dccbf69 100644
--- a/drivers/iio/adc/xilinx-xadc-core.c
+++ b/drivers/iio/adc/xilinx-xadc-core.c
@@ -19,6 +19,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/overflow.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
@@ -92,7 +93,12 @@ static const unsigned int XADC_ZYNQ_UNMASK_TIMEOUT = 500;
#define XADC_AXI_REG_GIER 0x5c
#define XADC_AXI_REG_IPISR 0x60
#define XADC_AXI_REG_IPIER 0x68
-#define XADC_AXI_ADC_REG_OFFSET 0x200
+
+/* 7 Series */
+#define XADC_7S_AXI_ADC_REG_OFFSET 0x200
+
+/* UltraScale */
+#define XADC_US_AXI_ADC_REG_OFFSET 0x400
#define XADC_AXI_RESET_MAGIC 0xa
#define XADC_AXI_GIER_ENABLE BIT(31)
@@ -447,6 +453,12 @@ static const struct xadc_ops xadc_zynq_ops = {
.get_dclk_rate = xadc_zynq_get_dclk_rate,
.interrupt_handler = xadc_zynq_interrupt_handler,
.update_alarm = xadc_zynq_update_alarm,
+ .type = XADC_TYPE_S7,
+};
+
+static const unsigned int xadc_axi_reg_offsets[] = {
+ [XADC_TYPE_S7] = XADC_7S_AXI_ADC_REG_OFFSET,
+ [XADC_TYPE_US] = XADC_US_AXI_ADC_REG_OFFSET,
};
static int xadc_axi_read_adc_reg(struct xadc *xadc, unsigned int reg,
@@ -454,7 +466,8 @@ static int xadc_axi_read_adc_reg(struct xadc *xadc, unsigned int reg,
{
uint32_t val32;
- xadc_read_reg(xadc, XADC_AXI_ADC_REG_OFFSET + reg * 4, &val32);
+ xadc_read_reg(xadc, xadc_axi_reg_offsets[xadc->ops->type] + reg * 4,
+ &val32);
*val = val32 & 0xffff;
return 0;
@@ -463,7 +476,8 @@ static int xadc_axi_read_adc_reg(struct xadc *xadc, unsigned int reg,
static int xadc_axi_write_adc_reg(struct xadc *xadc, unsigned int reg,
uint16_t val)
{
- xadc_write_reg(xadc, XADC_AXI_ADC_REG_OFFSET + reg * 4, val);
+ xadc_write_reg(xadc, xadc_axi_reg_offsets[xadc->ops->type] + reg * 4,
+ val);
return 0;
}
@@ -541,7 +555,7 @@ static unsigned long xadc_axi_get_dclk(struct xadc *xadc)
return clk_get_rate(xadc->clk);
}
-static const struct xadc_ops xadc_axi_ops = {
+static const struct xadc_ops xadc_7s_axi_ops = {
.read = xadc_axi_read_adc_reg,
.write = xadc_axi_write_adc_reg,
.setup = xadc_axi_setup,
@@ -549,6 +563,18 @@ static const struct xadc_ops xadc_axi_ops = {
.update_alarm = xadc_axi_update_alarm,
.interrupt_handler = xadc_axi_interrupt_handler,
.flags = XADC_FLAGS_BUFFERED,
+ .type = XADC_TYPE_S7,
+};
+
+static const struct xadc_ops xadc_us_axi_ops = {
+ .read = xadc_axi_read_adc_reg,
+ .write = xadc_axi_write_adc_reg,
+ .setup = xadc_axi_setup,
+ .get_dclk_rate = xadc_axi_get_dclk,
+ .update_alarm = xadc_axi_update_alarm,
+ .interrupt_handler = xadc_axi_interrupt_handler,
+ .flags = XADC_FLAGS_BUFFERED,
+ .type = XADC_TYPE_US,
};
static int _xadc_update_adc_reg(struct xadc *xadc, unsigned int reg,
@@ -585,15 +611,22 @@ static int xadc_update_scan_mode(struct iio_dev *indio_dev,
const unsigned long *mask)
{
struct xadc *xadc = iio_priv(indio_dev);
- unsigned int n;
+ size_t new_size, n;
+ void *data;
n = bitmap_weight(mask, indio_dev->masklength);
- kfree(xadc->data);
- xadc->data = kcalloc(n, sizeof(*xadc->data), GFP_KERNEL);
- if (!xadc->data)
+ if (check_mul_overflow(n, sizeof(*xadc->data), &new_size))
+ return -ENOMEM;
+
+ data = devm_krealloc(indio_dev->dev.parent, xadc->data,
+ new_size, GFP_KERNEL);
+ if (!data)
return -ENOMEM;
+ memset(data, 0, new_size);
+ xadc->data = data;
+
return 0;
}
@@ -705,11 +738,12 @@ static const struct iio_trigger_ops xadc_trigger_ops = {
static struct iio_trigger *xadc_alloc_trigger(struct iio_dev *indio_dev,
const char *name)
{
+ struct device *dev = indio_dev->dev.parent;
struct iio_trigger *trig;
int ret;
- trig = iio_trigger_alloc("%s%d-%s", indio_dev->name,
- indio_dev->id, name);
+ trig = devm_iio_trigger_alloc(dev, "%s%d-%s", indio_dev->name,
+ indio_dev->id, name);
if (trig == NULL)
return ERR_PTR(-ENOMEM);
@@ -717,21 +751,26 @@ static struct iio_trigger *xadc_alloc_trigger(struct iio_dev *indio_dev,
trig->ops = &xadc_trigger_ops;
iio_trigger_set_drvdata(trig, iio_priv(indio_dev));
- ret = iio_trigger_register(trig);
+ ret = devm_iio_trigger_register(dev, trig);
if (ret)
- goto error_free_trig;
+ return ERR_PTR(ret);
return trig;
-
-error_free_trig:
- iio_trigger_free(trig);
- return ERR_PTR(ret);
}
static int xadc_power_adc_b(struct xadc *xadc, unsigned int seq_mode)
{
uint16_t val;
+ /*
+ * As per datasheet the power-down bits are don't care in the
+ * UltraScale, but as per reality setting the power-down bit for the
+ * non-existing ADC-B powers down the main ADC, so just return and don't
+ * do anything.
+ */
+ if (xadc->ops->type == XADC_TYPE_US)
+ return 0;
+
/* Powerdown the ADC-B when it is not needed. */
switch (seq_mode) {
case XADC_CONF1_SEQ_SIMULTANEOUS:
@@ -751,6 +790,10 @@ static int xadc_get_seq_mode(struct xadc *xadc, unsigned long scan_mode)
{
unsigned int aux_scan_mode = scan_mode >> 16;
+ /* UltraScale has only one ADC and supports only continuous mode */
+ if (xadc->ops->type == XADC_TYPE_US)
+ return XADC_CONF1_SEQ_CONTINUOUS;
+
if (xadc->external_mux_mode == XADC_EXTERNAL_MUX_DUAL)
return XADC_CONF1_SEQ_SIMULTANEOUS;
@@ -863,6 +906,7 @@ static int xadc_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int *val, int *val2, long info)
{
struct xadc *xadc = iio_priv(indio_dev);
+ unsigned int bits = chan->scan_type.realbits;
uint16_t val16;
int ret;
@@ -874,17 +918,17 @@ static int xadc_read_raw(struct iio_dev *indio_dev,
if (ret < 0)
return ret;
- val16 >>= 4;
+ val16 >>= chan->scan_type.shift;
if (chan->scan_type.sign == 'u')
*val = val16;
else
- *val = sign_extend32(val16, 11);
+ *val = sign_extend32(val16, bits - 1);
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_VOLTAGE:
- /* V = (val * 3.0) / 4096 */
+ /* V = (val * 3.0) / 2**bits */
switch (chan->address) {
case XADC_REG_VCCINT:
case XADC_REG_VCCAUX:
@@ -900,19 +944,19 @@ static int xadc_read_raw(struct iio_dev *indio_dev,
*val = 1000;
break;
}
- *val2 = 12;
+ *val2 = chan->scan_type.realbits;
return IIO_VAL_FRACTIONAL_LOG2;
case IIO_TEMP:
- /* Temp in C = (val * 503.975) / 4096 - 273.15 */
+ /* Temp in C = (val * 503.975) / 2**bits - 273.15 */
*val = 503975;
- *val2 = 12;
+ *val2 = bits;
return IIO_VAL_FRACTIONAL_LOG2;
default:
return -EINVAL;
}
case IIO_CHAN_INFO_OFFSET:
/* Only the temperature channel has an offset */
- *val = -((273150 << 12) / 503975);
+ *val = -((273150 << bits) / 503975);
return IIO_VAL_INT;
case IIO_CHAN_INFO_SAMP_FREQ:
ret = xadc_read_samplerate(xadc);
@@ -1001,7 +1045,7 @@ static const struct iio_event_spec xadc_voltage_events[] = {
},
};
-#define XADC_CHAN_TEMP(_chan, _scan_index, _addr) { \
+#define XADC_CHAN_TEMP(_chan, _scan_index, _addr, _bits) { \
.type = IIO_TEMP, \
.indexed = 1, \
.channel = (_chan), \
@@ -1015,14 +1059,14 @@ static const struct iio_event_spec xadc_voltage_events[] = {
.scan_index = (_scan_index), \
.scan_type = { \
.sign = 'u', \
- .realbits = 12, \
+ .realbits = (_bits), \
.storagebits = 16, \
- .shift = 4, \
+ .shift = 16 - (_bits), \
.endianness = IIO_CPU, \
}, \
}
-#define XADC_CHAN_VOLTAGE(_chan, _scan_index, _addr, _ext, _alarm) { \
+#define XADC_CHAN_VOLTAGE(_chan, _scan_index, _addr, _bits, _ext, _alarm) { \
.type = IIO_VOLTAGE, \
.indexed = 1, \
.channel = (_chan), \
@@ -1035,41 +1079,82 @@ static const struct iio_event_spec xadc_voltage_events[] = {
.scan_index = (_scan_index), \
.scan_type = { \
.sign = ((_addr) == XADC_REG_VREFN) ? 's' : 'u', \
- .realbits = 12, \
+ .realbits = (_bits), \
.storagebits = 16, \
- .shift = 4, \
+ .shift = 16 - (_bits), \
.endianness = IIO_CPU, \
}, \
.extend_name = _ext, \
}
-static const struct iio_chan_spec xadc_channels[] = {
- XADC_CHAN_TEMP(0, 8, XADC_REG_TEMP),
- XADC_CHAN_VOLTAGE(0, 9, XADC_REG_VCCINT, "vccint", true),
- XADC_CHAN_VOLTAGE(1, 10, XADC_REG_VCCAUX, "vccaux", true),
- XADC_CHAN_VOLTAGE(2, 14, XADC_REG_VCCBRAM, "vccbram", true),
- XADC_CHAN_VOLTAGE(3, 5, XADC_REG_VCCPINT, "vccpint", true),
- XADC_CHAN_VOLTAGE(4, 6, XADC_REG_VCCPAUX, "vccpaux", true),
- XADC_CHAN_VOLTAGE(5, 7, XADC_REG_VCCO_DDR, "vccoddr", true),
- XADC_CHAN_VOLTAGE(6, 12, XADC_REG_VREFP, "vrefp", false),
- XADC_CHAN_VOLTAGE(7, 13, XADC_REG_VREFN, "vrefn", false),
- XADC_CHAN_VOLTAGE(8, 11, XADC_REG_VPVN, NULL, false),
- XADC_CHAN_VOLTAGE(9, 16, XADC_REG_VAUX(0), NULL, false),
- XADC_CHAN_VOLTAGE(10, 17, XADC_REG_VAUX(1), NULL, false),
- XADC_CHAN_VOLTAGE(11, 18, XADC_REG_VAUX(2), NULL, false),
- XADC_CHAN_VOLTAGE(12, 19, XADC_REG_VAUX(3), NULL, false),
- XADC_CHAN_VOLTAGE(13, 20, XADC_REG_VAUX(4), NULL, false),
- XADC_CHAN_VOLTAGE(14, 21, XADC_REG_VAUX(5), NULL, false),
- XADC_CHAN_VOLTAGE(15, 22, XADC_REG_VAUX(6), NULL, false),
- XADC_CHAN_VOLTAGE(16, 23, XADC_REG_VAUX(7), NULL, false),
- XADC_CHAN_VOLTAGE(17, 24, XADC_REG_VAUX(8), NULL, false),
- XADC_CHAN_VOLTAGE(18, 25, XADC_REG_VAUX(9), NULL, false),
- XADC_CHAN_VOLTAGE(19, 26, XADC_REG_VAUX(10), NULL, false),
- XADC_CHAN_VOLTAGE(20, 27, XADC_REG_VAUX(11), NULL, false),
- XADC_CHAN_VOLTAGE(21, 28, XADC_REG_VAUX(12), NULL, false),
- XADC_CHAN_VOLTAGE(22, 29, XADC_REG_VAUX(13), NULL, false),
- XADC_CHAN_VOLTAGE(23, 30, XADC_REG_VAUX(14), NULL, false),
- XADC_CHAN_VOLTAGE(24, 31, XADC_REG_VAUX(15), NULL, false),
+/* 7 Series */
+#define XADC_7S_CHAN_TEMP(_chan, _scan_index, _addr) \
+ XADC_CHAN_TEMP(_chan, _scan_index, _addr, 12)
+#define XADC_7S_CHAN_VOLTAGE(_chan, _scan_index, _addr, _ext, _alarm) \
+ XADC_CHAN_VOLTAGE(_chan, _scan_index, _addr, 12, _ext, _alarm)
+
+static const struct iio_chan_spec xadc_7s_channels[] = {
+ XADC_7S_CHAN_TEMP(0, 8, XADC_REG_TEMP),
+ XADC_7S_CHAN_VOLTAGE(0, 9, XADC_REG_VCCINT, "vccint", true),
+ XADC_7S_CHAN_VOLTAGE(1, 10, XADC_REG_VCCAUX, "vccaux", true),
+ XADC_7S_CHAN_VOLTAGE(2, 14, XADC_REG_VCCBRAM, "vccbram", true),
+ XADC_7S_CHAN_VOLTAGE(3, 5, XADC_REG_VCCPINT, "vccpint", true),
+ XADC_7S_CHAN_VOLTAGE(4, 6, XADC_REG_VCCPAUX, "vccpaux", true),
+ XADC_7S_CHAN_VOLTAGE(5, 7, XADC_REG_VCCO_DDR, "vccoddr", true),
+ XADC_7S_CHAN_VOLTAGE(6, 12, XADC_REG_VREFP, "vrefp", false),
+ XADC_7S_CHAN_VOLTAGE(7, 13, XADC_REG_VREFN, "vrefn", false),
+ XADC_7S_CHAN_VOLTAGE(8, 11, XADC_REG_VPVN, NULL, false),
+ XADC_7S_CHAN_VOLTAGE(9, 16, XADC_REG_VAUX(0), NULL, false),
+ XADC_7S_CHAN_VOLTAGE(10, 17, XADC_REG_VAUX(1), NULL, false),
+ XADC_7S_CHAN_VOLTAGE(11, 18, XADC_REG_VAUX(2), NULL, false),
+ XADC_7S_CHAN_VOLTAGE(12, 19, XADC_REG_VAUX(3), NULL, false),
+ XADC_7S_CHAN_VOLTAGE(13, 20, XADC_REG_VAUX(4), NULL, false),
+ XADC_7S_CHAN_VOLTAGE(14, 21, XADC_REG_VAUX(5), NULL, false),
+ XADC_7S_CHAN_VOLTAGE(15, 22, XADC_REG_VAUX(6), NULL, false),
+ XADC_7S_CHAN_VOLTAGE(16, 23, XADC_REG_VAUX(7), NULL, false),
+ XADC_7S_CHAN_VOLTAGE(17, 24, XADC_REG_VAUX(8), NULL, false),
+ XADC_7S_CHAN_VOLTAGE(18, 25, XADC_REG_VAUX(9), NULL, false),
+ XADC_7S_CHAN_VOLTAGE(19, 26, XADC_REG_VAUX(10), NULL, false),
+ XADC_7S_CHAN_VOLTAGE(20, 27, XADC_REG_VAUX(11), NULL, false),
+ XADC_7S_CHAN_VOLTAGE(21, 28, XADC_REG_VAUX(12), NULL, false),
+ XADC_7S_CHAN_VOLTAGE(22, 29, XADC_REG_VAUX(13), NULL, false),
+ XADC_7S_CHAN_VOLTAGE(23, 30, XADC_REG_VAUX(14), NULL, false),
+ XADC_7S_CHAN_VOLTAGE(24, 31, XADC_REG_VAUX(15), NULL, false),
+};
+
+/* UltraScale */
+#define XADC_US_CHAN_TEMP(_chan, _scan_index, _addr) \
+ XADC_CHAN_TEMP(_chan, _scan_index, _addr, 10)
+#define XADC_US_CHAN_VOLTAGE(_chan, _scan_index, _addr, _ext, _alarm) \
+ XADC_CHAN_VOLTAGE(_chan, _scan_index, _addr, 10, _ext, _alarm)
+
+static const struct iio_chan_spec xadc_us_channels[] = {
+ XADC_US_CHAN_TEMP(0, 8, XADC_REG_TEMP),
+ XADC_US_CHAN_VOLTAGE(0, 9, XADC_REG_VCCINT, "vccint", true),
+ XADC_US_CHAN_VOLTAGE(1, 10, XADC_REG_VCCAUX, "vccaux", true),
+ XADC_US_CHAN_VOLTAGE(2, 14, XADC_REG_VCCBRAM, "vccbram", true),
+ XADC_US_CHAN_VOLTAGE(3, 5, XADC_REG_VCCPINT, "vccpsintlp", true),
+ XADC_US_CHAN_VOLTAGE(4, 6, XADC_REG_VCCPAUX, "vccpsintfp", true),
+ XADC_US_CHAN_VOLTAGE(5, 7, XADC_REG_VCCO_DDR, "vccpsaux", true),
+ XADC_US_CHAN_VOLTAGE(6, 12, XADC_REG_VREFP, "vrefp", false),
+ XADC_US_CHAN_VOLTAGE(7, 13, XADC_REG_VREFN, "vrefn", false),
+ XADC_US_CHAN_VOLTAGE(8, 11, XADC_REG_VPVN, NULL, false),
+ XADC_US_CHAN_VOLTAGE(9, 16, XADC_REG_VAUX(0), NULL, false),
+ XADC_US_CHAN_VOLTAGE(10, 17, XADC_REG_VAUX(1), NULL, false),
+ XADC_US_CHAN_VOLTAGE(11, 18, XADC_REG_VAUX(2), NULL, false),
+ XADC_US_CHAN_VOLTAGE(12, 19, XADC_REG_VAUX(3), NULL, false),
+ XADC_US_CHAN_VOLTAGE(13, 20, XADC_REG_VAUX(4), NULL, false),
+ XADC_US_CHAN_VOLTAGE(14, 21, XADC_REG_VAUX(5), NULL, false),
+ XADC_US_CHAN_VOLTAGE(15, 22, XADC_REG_VAUX(6), NULL, false),
+ XADC_US_CHAN_VOLTAGE(16, 23, XADC_REG_VAUX(7), NULL, false),
+ XADC_US_CHAN_VOLTAGE(17, 24, XADC_REG_VAUX(8), NULL, false),
+ XADC_US_CHAN_VOLTAGE(18, 25, XADC_REG_VAUX(9), NULL, false),
+ XADC_US_CHAN_VOLTAGE(19, 26, XADC_REG_VAUX(10), NULL, false),
+ XADC_US_CHAN_VOLTAGE(20, 27, XADC_REG_VAUX(11), NULL, false),
+ XADC_US_CHAN_VOLTAGE(21, 28, XADC_REG_VAUX(12), NULL, false),
+ XADC_US_CHAN_VOLTAGE(22, 29, XADC_REG_VAUX(13), NULL, false),
+ XADC_US_CHAN_VOLTAGE(23, 30, XADC_REG_VAUX(14), NULL, false),
+ XADC_US_CHAN_VOLTAGE(24, 31, XADC_REG_VAUX(15), NULL, false),
};
static const struct iio_info xadc_info = {
@@ -1083,8 +1168,16 @@ static const struct iio_info xadc_info = {
};
static const struct of_device_id xadc_of_match_table[] = {
- { .compatible = "xlnx,zynq-xadc-1.00.a", (void *)&xadc_zynq_ops },
- { .compatible = "xlnx,axi-xadc-1.00.a", (void *)&xadc_axi_ops },
+ {
+ .compatible = "xlnx,zynq-xadc-1.00.a",
+ .data = &xadc_zynq_ops
+ }, {
+ .compatible = "xlnx,axi-xadc-1.00.a",
+ .data = &xadc_7s_axi_ops
+ }, {
+ .compatible = "xlnx,system-management-wiz-1.3",
+ .data = &xadc_us_axi_ops
+ },
{ },
};
MODULE_DEVICE_TABLE(of, xadc_of_match_table);
@@ -1094,8 +1187,10 @@ static int xadc_parse_dt(struct iio_dev *indio_dev, struct device_node *np,
{
struct device *dev = indio_dev->dev.parent;
struct xadc *xadc = iio_priv(indio_dev);
+ const struct iio_chan_spec *channel_templates;
struct iio_chan_spec *channels, *chan;
struct device_node *chan_node, *child;
+ unsigned int max_channels;
unsigned int num_channels;
const char *external_mux;
u32 ext_mux_chan;
@@ -1136,9 +1231,15 @@ static int xadc_parse_dt(struct iio_dev *indio_dev, struct device_node *np,
*conf |= XADC_CONF0_MUX | XADC_CONF0_CHAN(ext_mux_chan);
}
-
- channels = devm_kmemdup(dev, xadc_channels,
- sizeof(xadc_channels), GFP_KERNEL);
+ if (xadc->ops->type == XADC_TYPE_S7) {
+ channel_templates = xadc_7s_channels;
+ max_channels = ARRAY_SIZE(xadc_7s_channels);
+ } else {
+ channel_templates = xadc_us_channels;
+ max_channels = ARRAY_SIZE(xadc_us_channels);
+ }
+ channels = devm_kmemdup(dev, channel_templates,
+ sizeof(channels[0]) * max_channels, GFP_KERNEL);
if (!channels)
return -ENOMEM;
@@ -1148,7 +1249,7 @@ static int xadc_parse_dt(struct iio_dev *indio_dev, struct device_node *np,
chan_node = of_get_child_by_name(np, "xlnx,channels");
if (chan_node) {
for_each_child_of_node(chan_node, child) {
- if (num_channels >= ARRAY_SIZE(xadc_channels)) {
+ if (num_channels >= max_channels) {
of_node_put(child);
break;
}
@@ -1184,8 +1285,28 @@ static int xadc_parse_dt(struct iio_dev *indio_dev, struct device_node *np,
return 0;
}
+static const char * const xadc_type_names[] = {
+ [XADC_TYPE_S7] = "xadc",
+ [XADC_TYPE_US] = "xilinx-system-monitor",
+};
+
+static void xadc_clk_disable_unprepare(void *data)
+{
+ struct clk *clk = data;
+
+ clk_disable_unprepare(clk);
+}
+
+static void xadc_cancel_delayed_work(void *data)
+{
+ struct delayed_work *work = data;
+
+ cancel_delayed_work_sync(work);
+}
+
static int xadc_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
const struct of_device_id *id;
struct iio_dev *indio_dev;
unsigned int bipolar_mask;
@@ -1195,10 +1316,10 @@ static int xadc_probe(struct platform_device *pdev)
int irq;
int i;
- if (!pdev->dev.of_node)
+ if (!dev->of_node)
return -ENODEV;
- id = of_match_node(xadc_of_match_table, pdev->dev.of_node);
+ id = of_match_node(xadc_of_match_table, dev->of_node);
if (!id)
return -EINVAL;
@@ -1206,7 +1327,7 @@ static int xadc_probe(struct platform_device *pdev)
if (irq <= 0)
return -ENXIO;
- indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*xadc));
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*xadc));
if (!indio_dev)
return -ENOMEM;
@@ -1222,43 +1343,44 @@ static int xadc_probe(struct platform_device *pdev)
if (IS_ERR(xadc->base))
return PTR_ERR(xadc->base);
- indio_dev->name = "xadc";
+ indio_dev->name = xadc_type_names[xadc->ops->type];
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &xadc_info;
- ret = xadc_parse_dt(indio_dev, pdev->dev.of_node, &conf0);
+ ret = xadc_parse_dt(indio_dev, dev->of_node, &conf0);
if (ret)
return ret;
if (xadc->ops->flags & XADC_FLAGS_BUFFERED) {
- ret = iio_triggered_buffer_setup(indio_dev,
- &iio_pollfunc_store_time, &xadc_trigger_handler,
- &xadc_buffer_ops);
+ ret = devm_iio_triggered_buffer_setup(dev, indio_dev,
+ &iio_pollfunc_store_time,
+ &xadc_trigger_handler,
+ &xadc_buffer_ops);
if (ret)
return ret;
xadc->convst_trigger = xadc_alloc_trigger(indio_dev, "convst");
- if (IS_ERR(xadc->convst_trigger)) {
- ret = PTR_ERR(xadc->convst_trigger);
- goto err_triggered_buffer_cleanup;
- }
+ if (IS_ERR(xadc->convst_trigger))
+ return PTR_ERR(xadc->convst_trigger);
+
xadc->samplerate_trigger = xadc_alloc_trigger(indio_dev,
"samplerate");
- if (IS_ERR(xadc->samplerate_trigger)) {
- ret = PTR_ERR(xadc->samplerate_trigger);
- goto err_free_convst_trigger;
- }
+ if (IS_ERR(xadc->samplerate_trigger))
+ return PTR_ERR(xadc->samplerate_trigger);
}
- xadc->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(xadc->clk)) {
- ret = PTR_ERR(xadc->clk);
- goto err_free_samplerate_trigger;
- }
+ xadc->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(xadc->clk))
+ return PTR_ERR(xadc->clk);
ret = clk_prepare_enable(xadc->clk);
if (ret)
- goto err_free_samplerate_trigger;
+ return ret;
+
+ ret = devm_add_action_or_reset(dev,
+ xadc_clk_disable_unprepare, xadc->clk);
+ if (ret)
+ return ret;
/*
* Make sure not to exceed the maximum samplerate since otherwise the
@@ -1267,22 +1389,28 @@ static int xadc_probe(struct platform_device *pdev)
if (xadc->ops->flags & XADC_FLAGS_BUFFERED) {
ret = xadc_read_samplerate(xadc);
if (ret < 0)
- goto err_free_samplerate_trigger;
+ return ret;
+
if (ret > XADC_MAX_SAMPLERATE) {
ret = xadc_write_samplerate(xadc, XADC_MAX_SAMPLERATE);
if (ret < 0)
- goto err_free_samplerate_trigger;
+ return ret;
}
}
- ret = request_irq(xadc->irq, xadc->ops->interrupt_handler, 0,
- dev_name(&pdev->dev), indio_dev);
+ ret = devm_request_irq(dev, xadc->irq, xadc->ops->interrupt_handler, 0,
+ dev_name(dev), indio_dev);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, xadc_cancel_delayed_work,
+ &xadc->zynq_unmask_work);
if (ret)
- goto err_clk_disable_unprepare;
+ return ret;
ret = xadc->ops->setup(pdev, indio_dev, xadc->irq);
if (ret)
- goto err_free_irq;
+ return ret;
for (i = 0; i < 16; i++)
xadc_read_adc_reg(xadc, XADC_REG_THRESHOLD(i),
@@ -1290,7 +1418,7 @@ static int xadc_probe(struct platform_device *pdev)
ret = xadc_write_adc_reg(xadc, XADC_REG_CONF0, conf0);
if (ret)
- goto err_free_irq;
+ return ret;
bipolar_mask = 0;
for (i = 0; i < indio_dev->num_channels; i++) {
@@ -1300,17 +1428,18 @@ static int xadc_probe(struct platform_device *pdev)
ret = xadc_write_adc_reg(xadc, XADC_REG_INPUT_MODE(0), bipolar_mask);
if (ret)
- goto err_free_irq;
+ return ret;
+
ret = xadc_write_adc_reg(xadc, XADC_REG_INPUT_MODE(1),
bipolar_mask >> 16);
if (ret)
- goto err_free_irq;
+ return ret;
/* Disable all alarms */
ret = xadc_update_adc_reg(xadc, XADC_REG_CONF1, XADC_CONF1_ALARM_MASK,
XADC_CONF1_ALARM_MASK);
if (ret)
- goto err_free_irq;
+ return ret;
/* Set thresholds to min/max */
for (i = 0; i < 16; i++) {
@@ -1325,60 +1454,17 @@ static int xadc_probe(struct platform_device *pdev)
ret = xadc_write_adc_reg(xadc, XADC_REG_THRESHOLD(i),
xadc->threshold[i]);
if (ret)
- goto err_free_irq;
+ return ret;
}
/* Go to non-buffered mode */
xadc_postdisable(indio_dev);
- ret = iio_device_register(indio_dev);
- if (ret)
- goto err_free_irq;
-
- platform_set_drvdata(pdev, indio_dev);
-
- return 0;
-
-err_free_irq:
- free_irq(xadc->irq, indio_dev);
- cancel_delayed_work_sync(&xadc->zynq_unmask_work);
-err_clk_disable_unprepare:
- clk_disable_unprepare(xadc->clk);
-err_free_samplerate_trigger:
- if (xadc->ops->flags & XADC_FLAGS_BUFFERED)
- iio_trigger_free(xadc->samplerate_trigger);
-err_free_convst_trigger:
- if (xadc->ops->flags & XADC_FLAGS_BUFFERED)
- iio_trigger_free(xadc->convst_trigger);
-err_triggered_buffer_cleanup:
- if (xadc->ops->flags & XADC_FLAGS_BUFFERED)
- iio_triggered_buffer_cleanup(indio_dev);
-
- return ret;
-}
-
-static int xadc_remove(struct platform_device *pdev)
-{
- struct iio_dev *indio_dev = platform_get_drvdata(pdev);
- struct xadc *xadc = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
- if (xadc->ops->flags & XADC_FLAGS_BUFFERED) {
- iio_trigger_free(xadc->samplerate_trigger);
- iio_trigger_free(xadc->convst_trigger);
- iio_triggered_buffer_cleanup(indio_dev);
- }
- free_irq(xadc->irq, indio_dev);
- cancel_delayed_work_sync(&xadc->zynq_unmask_work);
- clk_disable_unprepare(xadc->clk);
- kfree(xadc->data);
-
- return 0;
+ return devm_iio_device_register(dev, indio_dev);
}
static struct platform_driver xadc_driver = {
.probe = xadc_probe,
- .remove = xadc_remove,
.driver = {
.name = "xadc",
.of_match_table = xadc_of_match_table,
diff --git a/drivers/iio/adc/xilinx-xadc-events.c b/drivers/iio/adc/xilinx-xadc-events.c
index 2357f585720a..1bd375fb10e0 100644
--- a/drivers/iio/adc/xilinx-xadc-events.c
+++ b/drivers/iio/adc/xilinx-xadc-events.c
@@ -155,9 +155,6 @@ err_out:
return ret;
}
-/* Register value is msb aligned, the lower 4 bits are ignored */
-#define XADC_THRESHOLD_VALUE_SHIFT 4
-
int xadc_read_event_value(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan, enum iio_event_type type,
enum iio_event_direction dir, enum iio_event_info info,
@@ -177,7 +174,8 @@ int xadc_read_event_value(struct iio_dev *indio_dev,
return -EINVAL;
}
- *val >>= XADC_THRESHOLD_VALUE_SHIFT;
+ /* MSB aligned */
+ *val >>= 16 - chan->scan_type.realbits;
return IIO_VAL_INT;
}
@@ -191,7 +189,8 @@ int xadc_write_event_value(struct iio_dev *indio_dev,
struct xadc *xadc = iio_priv(indio_dev);
int ret = 0;
- val <<= XADC_THRESHOLD_VALUE_SHIFT;
+ /* MSB aligned */
+ val <<= 16 - chan->scan_type.realbits;
if (val < 0 || val > 0xffff)
return -EINVAL;
diff --git a/drivers/iio/adc/xilinx-xadc.h b/drivers/iio/adc/xilinx-xadc.h
index 25abed9c0285..8b80195725e9 100644
--- a/drivers/iio/adc/xilinx-xadc.h
+++ b/drivers/iio/adc/xilinx-xadc.h
@@ -70,6 +70,11 @@ struct xadc {
int irq;
};
+enum xadc_type {
+ XADC_TYPE_S7, /* Series 7 */
+ XADC_TYPE_US, /* UltraScale and UltraScale+ */
+};
+
struct xadc_ops {
int (*read)(struct xadc *xadc, unsigned int reg, uint16_t *val);
int (*write)(struct xadc *xadc, unsigned int reg, uint16_t val);
@@ -80,6 +85,7 @@ struct xadc_ops {
irqreturn_t (*interrupt_handler)(int irq, void *devid);
unsigned int flags;
+ enum xadc_type type;
};
static inline int _xadc_read_adc_reg(struct xadc *xadc, unsigned int reg,
diff --git a/drivers/iio/chemical/bme680_core.c b/drivers/iio/chemical/bme680_core.c
index 6ea99e4cbf92..bf23cc7eb99e 100644
--- a/drivers/iio/chemical/bme680_core.c
+++ b/drivers/iio/chemical/bme680_core.c
@@ -479,7 +479,7 @@ static u8 bme680_calc_heater_res(struct bme680_data *data, u16 temp)
var4 = (var3 / (calib->res_heat_range + 4));
var5 = 131 * calib->res_heat_val + 65536;
heatr_res_x100 = ((var4 / var5) - 250) * 34;
- heatr_res = (heatr_res_x100 + 50) / 100;
+ heatr_res = DIV_ROUND_CLOSEST(heatr_res_x100, 100);
return heatr_res;
}
diff --git a/drivers/iio/chemical/pms7003.c b/drivers/iio/chemical/pms7003.c
index e9d4405654bc..e9857d93b307 100644
--- a/drivers/iio/chemical/pms7003.c
+++ b/drivers/iio/chemical/pms7003.c
@@ -282,7 +282,7 @@ static int pms7003_probe(struct serdev_device *serdev)
state->serdev = serdev;
indio_dev->info = &pms7003_info;
indio_dev->name = PMS7003_DRIVER_NAME;
- indio_dev->channels = pms7003_channels,
+ indio_dev->channels = pms7003_channels;
indio_dev->num_channels = ARRAY_SIZE(pms7003_channels);
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->available_scan_masks = pms7003_scan_masks;
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
index 442ff787f7af..5b822a4298a0 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
@@ -71,6 +71,8 @@ static struct {
{HID_USAGE_SENSOR_TEMPERATURE, HID_USAGE_SENSOR_UNITS_DEGREES, 1000, 0},
{HID_USAGE_SENSOR_HUMIDITY, 0, 1000, 0},
+ {HID_USAGE_SENSOR_HINGE, 0, 0, 17453293},
+ {HID_USAGE_SENSOR_HINGE, HID_USAGE_SENSOR_UNITS_DEGREES, 0, 17453293},
};
static void simple_div(int dividend, int divisor, int *whole,
diff --git a/drivers/iio/common/ms_sensors/ms_sensors_i2c.c b/drivers/iio/common/ms_sensors/ms_sensors_i2c.c
index b9e2038d05ef..16ea697e945c 100644
--- a/drivers/iio/common/ms_sensors/ms_sensors_i2c.c
+++ b/drivers/iio/common/ms_sensors/ms_sensors_i2c.c
@@ -488,24 +488,20 @@ int ms_sensors_ht_read_humidity(struct ms_ht_dev *dev_data,
EXPORT_SYMBOL(ms_sensors_ht_read_humidity);
/**
- * ms_sensors_tp_crc_valid() - CRC check function for
+ * ms_sensors_tp_crc4() - Calculate PROM CRC for
* Temperature and pressure devices.
* This function is only used when reading PROM coefficients
*
* @prom: pointer to PROM coefficients array
- * @len: length of PROM coefficients array
*
- * Return: True if CRC is ok.
+ * Return: CRC.
*/
-static bool ms_sensors_tp_crc_valid(u16 *prom, u8 len)
+static u8 ms_sensors_tp_crc4(u16 *prom)
{
unsigned int cnt, n_bit;
- u16 n_rem = 0x0000, crc_read = prom[0], crc = (*prom & 0xF000) >> 12;
-
- prom[len - 1] = 0;
- prom[0] &= 0x0FFF; /* Clear the CRC computation part */
+ u16 n_rem = 0x0000;
- for (cnt = 0; cnt < len * 2; cnt++) {
+ for (cnt = 0; cnt < MS_SENSORS_TP_PROM_WORDS_NB * 2; cnt++) {
if (cnt % 2 == 1)
n_rem ^= prom[cnt >> 1] & 0x00FF;
else
@@ -518,10 +514,55 @@ static bool ms_sensors_tp_crc_valid(u16 *prom, u8 len)
n_rem <<= 1;
}
}
- n_rem >>= 12;
- prom[0] = crc_read;
- return n_rem == crc;
+ return n_rem >> 12;
+}
+
+/**
+ * ms_sensors_tp_crc_valid_112() - CRC check function for
+ * Temperature and pressure devices for 112bit PROM.
+ * This function is only used when reading PROM coefficients
+ *
+ * @prom: pointer to PROM coefficients array
+ *
+ * Return: True if CRC is ok.
+ */
+static bool ms_sensors_tp_crc_valid_112(u16 *prom)
+{
+ u16 w0 = prom[0], crc_read = (w0 & 0xF000) >> 12;
+ u8 crc;
+
+ prom[0] &= 0x0FFF; /* Clear the CRC computation part */
+ prom[MS_SENSORS_TP_PROM_WORDS_NB - 1] = 0;
+
+ crc = ms_sensors_tp_crc4(prom);
+
+ prom[0] = w0;
+
+ return crc == crc_read;
+}
+
+/**
+ * ms_sensors_tp_crc_valid_128() - CRC check function for
+ * Temperature and pressure devices for 128bit PROM.
+ * This function is only used when reading PROM coefficients
+ *
+ * @prom: pointer to PROM coefficients array
+ *
+ * Return: True if CRC is ok.
+ */
+static bool ms_sensors_tp_crc_valid_128(u16 *prom)
+{
+ u16 w7 = prom[7], crc_read = w7 & 0x000F;
+ u8 crc;
+
+ prom[7] &= 0xFF00; /* Clear the CRC and LSB part */
+
+ crc = ms_sensors_tp_crc4(prom);
+
+ prom[7] = w7;
+
+ return crc == crc_read;
}
/**
@@ -536,8 +577,9 @@ static bool ms_sensors_tp_crc_valid(u16 *prom, u8 len)
int ms_sensors_tp_read_prom(struct ms_tp_dev *dev_data)
{
int i, ret;
+ bool valid;
- for (i = 0; i < MS_SENSORS_TP_PROM_WORDS_NB; i++) {
+ for (i = 0; i < dev_data->hw->prom_len; i++) {
ret = ms_sensors_read_prom_word(
dev_data->client,
MS_SENSORS_TP_PROM_READ + (i << 1),
@@ -547,8 +589,12 @@ int ms_sensors_tp_read_prom(struct ms_tp_dev *dev_data)
return ret;
}
- if (!ms_sensors_tp_crc_valid(dev_data->prom,
- MS_SENSORS_TP_PROM_WORDS_NB + 1)) {
+ if (dev_data->hw->prom_len == 8)
+ valid = ms_sensors_tp_crc_valid_128(dev_data->prom);
+ else
+ valid = ms_sensors_tp_crc_valid_112(dev_data->prom);
+
+ if (!valid) {
dev_err(&dev_data->client->dev,
"Calibration coefficients crc check error\n");
return -ENODEV;
diff --git a/drivers/iio/common/ms_sensors/ms_sensors_i2c.h b/drivers/iio/common/ms_sensors/ms_sensors_i2c.h
index bad09c80e47a..f15b973f27c6 100644
--- a/drivers/iio/common/ms_sensors/ms_sensors_i2c.h
+++ b/drivers/iio/common/ms_sensors/ms_sensors_i2c.h
@@ -11,7 +11,7 @@
#include <linux/i2c.h>
#include <linux/mutex.h>
-#define MS_SENSORS_TP_PROM_WORDS_NB 7
+#define MS_SENSORS_TP_PROM_WORDS_NB 8
/**
* struct ms_ht_dev - Humidity/Temperature sensor device structure
@@ -26,6 +26,16 @@ struct ms_ht_dev {
};
/**
+ * struct ms_hw_data - Temperature/Pressure sensor hardware data
+ * @prom_len: number of words in the PROM
+ * @max_res_index: maximum sensor resolution index
+ */
+struct ms_tp_hw_data {
+ u8 prom_len;
+ u8 max_res_index;
+};
+
+/**
* struct ms_tp_dev - Temperature/Pressure sensor device structure
* @client: i2c client
* @lock: lock protecting the i2c conversion
@@ -36,7 +46,8 @@ struct ms_ht_dev {
struct ms_tp_dev {
struct i2c_client *client;
struct mutex lock;
- u16 prom[MS_SENSORS_TP_PROM_WORDS_NB + 1];
+ const struct ms_tp_hw_data *hw;
+ u16 prom[MS_SENSORS_TP_PROM_WORDS_NB];
u8 res_index;
};
diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig
index 6f6074a5d3db..cea07b4cced1 100644
--- a/drivers/iio/dac/Kconfig
+++ b/drivers/iio/dac/Kconfig
@@ -189,6 +189,16 @@ config AD5764
To compile this driver as a module, choose M here: the
module will be called ad5764.
+config AD5766
+ tristate "Analog Devices AD5766/AD5767 DAC driver"
+ depends on SPI_MASTER
+ help
+ Say yes here to build support for Analog Devices AD5766, AD5767
+ Digital to Analog Converter.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad5766.
+
config AD5770R
tristate "Analog Devices AD5770R IDAC driver"
depends on SPI_MASTER
diff --git a/drivers/iio/dac/Makefile b/drivers/iio/dac/Makefile
index 2fc481167724..33e16f14902a 100644
--- a/drivers/iio/dac/Makefile
+++ b/drivers/iio/dac/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_AD5755) += ad5755.o
obj-$(CONFIG_AD5755) += ad5758.o
obj-$(CONFIG_AD5761) += ad5761.o
obj-$(CONFIG_AD5764) += ad5764.o
+obj-$(CONFIG_AD5766) += ad5766.o
obj-$(CONFIG_AD5770R) += ad5770r.o
obj-$(CONFIG_AD5791) += ad5791.o
obj-$(CONFIG_AD5686) += ad5686.o
diff --git a/drivers/iio/dac/ad5766.c b/drivers/iio/dac/ad5766.c
new file mode 100644
index 000000000000..ef1618ea6a20
--- /dev/null
+++ b/drivers/iio/dac/ad5766.c
@@ -0,0 +1,643 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Analog Devices AD5766, AD5767
+ * Digital to Analog Converters driver
+ * Copyright 2019-2020 Analog Devices Inc.
+ */
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/iio/iio.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <asm/unaligned.h>
+
+#define AD5766_UPPER_WORD_SPI_MASK GENMASK(31, 16)
+#define AD5766_LOWER_WORD_SPI_MASK GENMASK(15, 0)
+#define AD5766_DITHER_SOURCE_MASK(ch) GENMASK(((2 * ch) + 1), (2 * ch))
+#define AD5766_DITHER_SOURCE(ch, source) BIT((ch * 2) + source)
+#define AD5766_DITHER_SCALE_MASK(x) AD5766_DITHER_SOURCE_MASK(x)
+#define AD5766_DITHER_SCALE(ch, scale) (scale << (ch * 2))
+#define AD5766_DITHER_ENABLE_MASK(ch) BIT(ch)
+#define AD5766_DITHER_ENABLE(ch, state) ((!state) << ch)
+#define AD5766_DITHER_INVERT_MASK(ch) BIT(ch)
+#define AD5766_DITHER_INVERT(ch, state) (state << ch)
+
+#define AD5766_CMD_NOP_MUX_OUT 0x00
+#define AD5766_CMD_SDO_CNTRL 0x01
+#define AD5766_CMD_WR_IN_REG(x) (0x10 | ((x) & GENMASK(3, 0)))
+#define AD5766_CMD_WR_DAC_REG(x) (0x20 | ((x) & GENMASK(3, 0)))
+#define AD5766_CMD_SW_LDAC 0x30
+#define AD5766_CMD_SPAN_REG 0x40
+#define AD5766_CMD_WR_PWR_DITHER 0x51
+#define AD5766_CMD_WR_DAC_REG_ALL 0x60
+#define AD5766_CMD_SW_FULL_RESET 0x70
+#define AD5766_CMD_READBACK_REG(x) (0x80 | ((x) & GENMASK(3, 0)))
+#define AD5766_CMD_DITHER_SIG_1 0x90
+#define AD5766_CMD_DITHER_SIG_2 0xA0
+#define AD5766_CMD_INV_DITHER 0xB0
+#define AD5766_CMD_DITHER_SCALE_1 0xC0
+#define AD5766_CMD_DITHER_SCALE_2 0xD0
+
+#define AD5766_FULL_RESET_CODE 0x1234
+
+enum ad5766_type {
+ ID_AD5766,
+ ID_AD5767,
+};
+
+enum ad5766_voltage_range {
+ AD5766_VOLTAGE_RANGE_M20V_0V,
+ AD5766_VOLTAGE_RANGE_M16V_to_0V,
+ AD5766_VOLTAGE_RANGE_M10V_to_0V,
+ AD5766_VOLTAGE_RANGE_M12V_to_14V,
+ AD5766_VOLTAGE_RANGE_M16V_to_10V,
+ AD5766_VOLTAGE_RANGE_M10V_to_6V,
+ AD5766_VOLTAGE_RANGE_M5V_to_5V,
+ AD5766_VOLTAGE_RANGE_M10V_to_10V,
+};
+
+/**
+ * struct ad5766_chip_info - chip specific information
+ * @num_channels: number of channels
+ * @channels: channel specification
+ */
+struct ad5766_chip_info {
+ unsigned int num_channels;
+ const struct iio_chan_spec *channels;
+};
+
+enum {
+ AD5766_DITHER_ENABLE,
+ AD5766_DITHER_INVERT,
+ AD5766_DITHER_SOURCE,
+};
+
+/*
+ * Dither signal can also be scaled.
+ * Available dither scale strings corresponding to "dither_scale" field in
+ * "struct ad5766_state".
+ */
+static const char * const ad5766_dither_scales[] = {
+ "1",
+ "0.75",
+ "0.5",
+ "0.25",
+};
+
+/**
+ * struct ad5766_state - driver instance specific data
+ * @spi: SPI device
+ * @lock: Lock used to restrict concurent access to SPI device
+ * @chip_info: Chip model specific constants
+ * @gpio_reset: Reset GPIO, used to reset the device
+ * @crt_range: Current selected output range
+ * @dither_enable: Power enable bit for each channel dither block (for
+ * example, D15 = DAC 15,D8 = DAC 8, and D0 = DAC 0)
+ * 0 - Normal operation, 1 - Power down
+ * @dither_invert: Inverts the dither signal applied to the selected DAC
+ * outputs
+ * @dither_source: Selects between 2 possible sources:
+ * 1: N0, 2: N1
+ * Two bits are used for each channel
+ * @dither_scale: Two bits are used for each of the 16 channels:
+ * 0: 1 SCALING, 1: 0.75 SCALING, 2: 0.5 SCALING,
+ * 3: 0.25 SCALING.
+ * @data: SPI transfer buffers
+ */
+struct ad5766_state {
+ struct spi_device *spi;
+ struct mutex lock;
+ const struct ad5766_chip_info *chip_info;
+ struct gpio_desc *gpio_reset;
+ enum ad5766_voltage_range crt_range;
+ u16 dither_enable;
+ u16 dither_invert;
+ u32 dither_source;
+ u32 dither_scale;
+ union {
+ u32 d32;
+ u16 w16[2];
+ u8 b8[4];
+ } data[3] ____cacheline_aligned;
+};
+
+struct ad5766_span_tbl {
+ int min;
+ int max;
+};
+
+static const struct ad5766_span_tbl ad5766_span_tbl[] = {
+ [AD5766_VOLTAGE_RANGE_M20V_0V] = {-20, 0},
+ [AD5766_VOLTAGE_RANGE_M16V_to_0V] = {-16, 0},
+ [AD5766_VOLTAGE_RANGE_M10V_to_0V] = {-10, 0},
+ [AD5766_VOLTAGE_RANGE_M12V_to_14V] = {-12, 14},
+ [AD5766_VOLTAGE_RANGE_M16V_to_10V] = {-16, 10},
+ [AD5766_VOLTAGE_RANGE_M10V_to_6V] = {-10, 6},
+ [AD5766_VOLTAGE_RANGE_M5V_to_5V] = {-5, 5},
+ [AD5766_VOLTAGE_RANGE_M10V_to_10V] = {-10, 10},
+};
+
+static int __ad5766_spi_read(struct ad5766_state *st, u8 dac, int *val)
+{
+ int ret;
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = &st->data[0].d32,
+ .bits_per_word = 8,
+ .len = 3,
+ .cs_change = 1,
+ }, {
+ .tx_buf = &st->data[1].d32,
+ .rx_buf = &st->data[2].d32,
+ .bits_per_word = 8,
+ .len = 3,
+ },
+ };
+
+ st->data[0].d32 = AD5766_CMD_READBACK_REG(dac);
+ st->data[1].d32 = AD5766_CMD_NOP_MUX_OUT;
+
+ ret = spi_sync_transfer(st->spi, xfers, ARRAY_SIZE(xfers));
+ if (ret)
+ return ret;
+
+ *val = st->data[2].w16[1];
+
+ return ret;
+}
+
+static int __ad5766_spi_write(struct ad5766_state *st, u8 command, u16 data)
+{
+ st->data[0].b8[0] = command;
+ put_unaligned_be16(data, &st->data[0].b8[1]);
+
+ return spi_write(st->spi, &st->data[0].b8[0], 3);
+}
+
+static int ad5766_read(struct iio_dev *indio_dev, u8 dac, int *val)
+{
+ struct ad5766_state *st = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&st->lock);
+ ret = __ad5766_spi_read(st, dac, val);
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
+static int ad5766_write(struct iio_dev *indio_dev, u8 dac, u16 data)
+{
+ struct ad5766_state *st = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&st->lock);
+ ret = __ad5766_spi_write(st, AD5766_CMD_WR_DAC_REG(dac), data);
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
+static int ad5766_reset(struct ad5766_state *st)
+{
+ int ret;
+
+ if (st->gpio_reset) {
+ gpiod_set_value_cansleep(st->gpio_reset, 1);
+ ndelay(100); /* t_reset >= 100ns */
+ gpiod_set_value_cansleep(st->gpio_reset, 0);
+ } else {
+ ret = __ad5766_spi_write(st, AD5766_CMD_SW_FULL_RESET,
+ AD5766_FULL_RESET_CODE);
+ if (ret < 0)
+ return ret;
+ }
+
+ /*
+ * Minimum time between a reset and the subsequent successful write is
+ * typically 25 ns
+ */
+ ndelay(25);
+
+ return 0;
+}
+
+static int ad5766_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long m)
+{
+ struct ad5766_state *st = iio_priv(indio_dev);
+ int ret;
+
+ switch (m) {
+ case IIO_CHAN_INFO_RAW:
+ ret = ad5766_read(indio_dev, chan->address, val);
+ if (ret)
+ return ret;
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_OFFSET:
+ *val = ad5766_span_tbl[st->crt_range].min;
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = ad5766_span_tbl[st->crt_range].max -
+ ad5766_span_tbl[st->crt_range].min;
+ *val2 = st->chip_info->channels[0].scan_type.realbits;
+
+ return IIO_VAL_FRACTIONAL_LOG2;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad5766_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long info)
+{
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ {
+ const int max_val = GENMASK(chan->scan_type.realbits - 1, 0);
+
+ if (val > max_val || val < 0)
+ return -EINVAL;
+ val <<= chan->scan_type.shift;
+ return ad5766_write(indio_dev, chan->address, val);
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info ad5766_info = {
+ .read_raw = ad5766_read_raw,
+ .write_raw = ad5766_write_raw,
+};
+
+static int ad5766_get_dither_source(struct iio_dev *dev,
+ const struct iio_chan_spec *chan)
+{
+ struct ad5766_state *st = iio_priv(dev);
+ u32 source;
+
+ source = st->dither_source & AD5766_DITHER_SOURCE_MASK(chan->channel);
+ source = source >> (chan->channel * 2);
+ source -= 1;
+
+ return source;
+}
+
+static int ad5766_set_dither_source(struct iio_dev *dev,
+ const struct iio_chan_spec *chan,
+ unsigned int source)
+{
+ struct ad5766_state *st = iio_priv(dev);
+ uint16_t val;
+ int ret;
+
+ st->dither_source &= ~AD5766_DITHER_SOURCE_MASK(chan->channel);
+ st->dither_source |= AD5766_DITHER_SOURCE(chan->channel, source);
+
+ val = FIELD_GET(AD5766_LOWER_WORD_SPI_MASK, st->dither_source);
+ ret = ad5766_write(dev, AD5766_CMD_DITHER_SIG_1, val);
+ if (ret)
+ return ret;
+
+ val = FIELD_GET(AD5766_UPPER_WORD_SPI_MASK, st->dither_source);
+
+ return ad5766_write(dev, AD5766_CMD_DITHER_SIG_2, val);
+}
+
+static int ad5766_get_dither_scale(struct iio_dev *dev,
+ const struct iio_chan_spec *chan)
+{
+ struct ad5766_state *st = iio_priv(dev);
+ u32 scale;
+
+ scale = st->dither_scale & AD5766_DITHER_SCALE_MASK(chan->channel);
+
+ return (scale >> (chan->channel * 2));
+}
+
+static int ad5766_set_dither_scale(struct iio_dev *dev,
+ const struct iio_chan_spec *chan,
+ unsigned int scale)
+{
+ int ret;
+ struct ad5766_state *st = iio_priv(dev);
+ uint16_t val;
+
+ st->dither_scale &= ~AD5766_DITHER_SCALE_MASK(chan->channel);
+ st->dither_scale |= AD5766_DITHER_SCALE(chan->channel, scale);
+
+ val = FIELD_GET(AD5766_LOWER_WORD_SPI_MASK, st->dither_scale);
+ ret = ad5766_write(dev, AD5766_CMD_DITHER_SCALE_1, val);
+ if (ret)
+ return ret;
+ val = FIELD_GET(AD5766_UPPER_WORD_SPI_MASK, st->dither_scale);
+
+ return ad5766_write(dev, AD5766_CMD_DITHER_SCALE_2, val);
+}
+
+static const struct iio_enum ad5766_dither_scale_enum = {
+ .items = ad5766_dither_scales,
+ .num_items = ARRAY_SIZE(ad5766_dither_scales),
+ .set = ad5766_set_dither_scale,
+ .get = ad5766_get_dither_scale,
+};
+
+static ssize_t ad5766_read_ext(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ char *buf)
+{
+ struct ad5766_state *st = iio_priv(indio_dev);
+
+ switch (private) {
+ case AD5766_DITHER_ENABLE:
+ return sprintf(buf, "%u\n",
+ !(st->dither_enable & BIT(chan->channel)));
+ break;
+ case AD5766_DITHER_INVERT:
+ return sprintf(buf, "%u\n",
+ !!(st->dither_invert & BIT(chan->channel)));
+ break;
+ case AD5766_DITHER_SOURCE:
+ return sprintf(buf, "%d\n",
+ ad5766_get_dither_source(indio_dev, chan));
+ default:
+ return -EINVAL;
+ }
+}
+
+static ssize_t ad5766_write_ext(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ const char *buf, size_t len)
+{
+ struct ad5766_state *st = iio_priv(indio_dev);
+ bool readin;
+ int ret;
+
+ ret = kstrtobool(buf, &readin);
+ if (ret)
+ return ret;
+
+ switch (private) {
+ case AD5766_DITHER_ENABLE:
+ st->dither_enable &= ~AD5766_DITHER_ENABLE_MASK(chan->channel);
+ st->dither_enable |= AD5766_DITHER_ENABLE(chan->channel,
+ readin);
+ ret = ad5766_write(indio_dev, AD5766_CMD_WR_PWR_DITHER,
+ st->dither_enable);
+ break;
+ case AD5766_DITHER_INVERT:
+ st->dither_invert &= ~AD5766_DITHER_INVERT_MASK(chan->channel);
+ st->dither_invert |= AD5766_DITHER_INVERT(chan->channel,
+ readin);
+ ret = ad5766_write(indio_dev, AD5766_CMD_INV_DITHER,
+ st->dither_invert);
+ break;
+ case AD5766_DITHER_SOURCE:
+ ret = ad5766_set_dither_source(indio_dev, chan, readin);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret ? ret : len;
+}
+
+#define _AD5766_CHAN_EXT_INFO(_name, _what, _shared) { \
+ .name = _name, \
+ .read = ad5766_read_ext, \
+ .write = ad5766_write_ext, \
+ .private = _what, \
+ .shared = _shared, \
+}
+
+#define IIO_ENUM_AVAILABLE_SHARED(_name, _shared, _e) \
+{ \
+ .name = (_name "_available"), \
+ .shared = _shared, \
+ .read = iio_enum_available_read, \
+ .private = (uintptr_t)(_e), \
+}
+
+static const struct iio_chan_spec_ext_info ad5766_ext_info[] = {
+
+ _AD5766_CHAN_EXT_INFO("dither_enable", AD5766_DITHER_ENABLE,
+ IIO_SEPARATE),
+ _AD5766_CHAN_EXT_INFO("dither_invert", AD5766_DITHER_INVERT,
+ IIO_SEPARATE),
+ _AD5766_CHAN_EXT_INFO("dither_source", AD5766_DITHER_SOURCE,
+ IIO_SEPARATE),
+ IIO_ENUM("dither_scale", IIO_SEPARATE, &ad5766_dither_scale_enum),
+ IIO_ENUM_AVAILABLE_SHARED("dither_scale",
+ IIO_SEPARATE,
+ &ad5766_dither_scale_enum),
+ {}
+};
+
+#define AD576x_CHANNEL(_chan, _bits) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .output = 1, \
+ .channel = (_chan), \
+ .address = (_chan), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (_bits), \
+ .storagebits = 16, \
+ .shift = 16 - (_bits), \
+ }, \
+ .ext_info = ad5766_ext_info, \
+}
+
+#define DECLARE_AD576x_CHANNELS(_name, _bits) \
+const struct iio_chan_spec _name[] = { \
+ AD576x_CHANNEL(0, (_bits)), \
+ AD576x_CHANNEL(1, (_bits)), \
+ AD576x_CHANNEL(2, (_bits)), \
+ AD576x_CHANNEL(3, (_bits)), \
+ AD576x_CHANNEL(4, (_bits)), \
+ AD576x_CHANNEL(5, (_bits)), \
+ AD576x_CHANNEL(6, (_bits)), \
+ AD576x_CHANNEL(7, (_bits)), \
+ AD576x_CHANNEL(8, (_bits)), \
+ AD576x_CHANNEL(9, (_bits)), \
+ AD576x_CHANNEL(10, (_bits)), \
+ AD576x_CHANNEL(11, (_bits)), \
+ AD576x_CHANNEL(12, (_bits)), \
+ AD576x_CHANNEL(13, (_bits)), \
+ AD576x_CHANNEL(14, (_bits)), \
+ AD576x_CHANNEL(15, (_bits)), \
+}
+
+static DECLARE_AD576x_CHANNELS(ad5766_channels, 16);
+static DECLARE_AD576x_CHANNELS(ad5767_channels, 12);
+
+static const struct ad5766_chip_info ad5766_chip_infos[] = {
+ [ID_AD5766] = {
+ .num_channels = ARRAY_SIZE(ad5766_channels),
+ .channels = ad5766_channels,
+ },
+ [ID_AD5767] = {
+ .num_channels = ARRAY_SIZE(ad5767_channels),
+ .channels = ad5767_channels,
+ },
+};
+
+static int ad5766_get_output_range(struct ad5766_state *st)
+{
+ int i, ret, min, max, tmp[2];
+
+ ret = device_property_read_u32_array(&st->spi->dev,
+ "output-range-voltage",
+ tmp, 2);
+ if (ret)
+ return ret;
+
+ min = tmp[0] / 1000;
+ max = tmp[1] / 1000;
+ for (i = 0; i < ARRAY_SIZE(ad5766_span_tbl); i++) {
+ if (ad5766_span_tbl[i].min != min ||
+ ad5766_span_tbl[i].max != max)
+ continue;
+
+ st->crt_range = i;
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int ad5766_default_setup(struct ad5766_state *st)
+{
+ uint16_t val;
+ int ret, i;
+
+ /* Always issue a reset before writing to the span register. */
+ ret = ad5766_reset(st);
+ if (ret)
+ return ret;
+
+ ret = ad5766_get_output_range(st);
+ if (ret)
+ return ret;
+
+ /* Dither power down */
+ st->dither_enable = GENMASK(15, 0);
+ ret = __ad5766_spi_write(st, AD5766_CMD_WR_PWR_DITHER,
+ st->dither_enable);
+ if (ret)
+ return ret;
+
+ st->dither_source = 0;
+ for (i = 0; i < ARRAY_SIZE(ad5766_channels); i++)
+ st->dither_source |= AD5766_DITHER_SOURCE(i, 0);
+ val = FIELD_GET(AD5766_LOWER_WORD_SPI_MASK, st->dither_source);
+ ret = __ad5766_spi_write(st, AD5766_CMD_DITHER_SIG_1, val);
+ if (ret)
+ return ret;
+
+ val = FIELD_GET(AD5766_UPPER_WORD_SPI_MASK, st->dither_source);
+ ret = __ad5766_spi_write(st, AD5766_CMD_DITHER_SIG_2, val);
+ if (ret)
+ return ret;
+
+ st->dither_scale = 0;
+ val = FIELD_GET(AD5766_LOWER_WORD_SPI_MASK, st->dither_scale);
+ ret = __ad5766_spi_write(st, AD5766_CMD_DITHER_SCALE_1, val);
+ if (ret)
+ return ret;
+
+ val = FIELD_GET(AD5766_UPPER_WORD_SPI_MASK, st->dither_scale);
+ ret = __ad5766_spi_write(st, AD5766_CMD_DITHER_SCALE_2, val);
+ if (ret)
+ return ret;
+
+ st->dither_invert = 0;
+ ret = __ad5766_spi_write(st, AD5766_CMD_INV_DITHER, st->dither_invert);
+ if (ret)
+ return ret;
+
+ return __ad5766_spi_write(st, AD5766_CMD_SPAN_REG, st->crt_range);
+}
+
+static int ad5766_probe(struct spi_device *spi)
+{
+ enum ad5766_type type;
+ struct iio_dev *indio_dev;
+ struct ad5766_state *st;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+ mutex_init(&st->lock);
+
+ st->spi = spi;
+ type = spi_get_device_id(spi)->driver_data;
+ st->chip_info = &ad5766_chip_infos[type];
+
+ indio_dev->channels = st->chip_info->channels;
+ indio_dev->num_channels = st->chip_info->num_channels;
+ indio_dev->info = &ad5766_info;
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->dev.of_node = spi->dev.of_node;
+ indio_dev->name = spi_get_device_id(spi)->name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ st->gpio_reset = devm_gpiod_get_optional(&st->spi->dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(st->gpio_reset))
+ return PTR_ERR(st->gpio_reset);
+
+ ret = ad5766_default_setup(st);
+ if (ret)
+ return ret;
+
+ return devm_iio_device_register(&spi->dev, indio_dev);
+}
+
+static const struct of_device_id ad5766_dt_match[] = {
+ { .compatible = "adi,ad5766" },
+ { .compatible = "adi,ad5767" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, ad5766_dt_match);
+
+static const struct spi_device_id ad5766_spi_ids[] = {
+ { "ad5766", ID_AD5766 },
+ { "ad5767", ID_AD5767 },
+ {}
+};
+MODULE_DEVICE_TABLE(spi, ad5766_spi_ids);
+
+static struct spi_driver ad5766_driver = {
+ .driver = {
+ .name = "ad5766",
+ .of_match_table = ad5766_dt_match,
+ },
+ .probe = ad5766_probe,
+ .id_table = ad5766_spi_ids,
+};
+module_spi_driver(ad5766_driver);
+
+MODULE_AUTHOR("Denis-Gabriel Gheorghescu <denis.gheorghescu@analog.com>");
+MODULE_DESCRIPTION("Analog Devices AD5766/AD5767 DACs");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/frequency/adf4350.c b/drivers/iio/frequency/adf4350.c
index 82c050a3899d..1462a6a5bc6d 100644
--- a/drivers/iio/frequency/adf4350.c
+++ b/drivers/iio/frequency/adf4350.c
@@ -582,8 +582,7 @@ error_disable_reg:
if (!IS_ERR(st->reg))
regulator_disable(st->reg);
error_disable_clk:
- if (clk)
- clk_disable_unprepare(clk);
+ clk_disable_unprepare(clk);
return ret;
}
@@ -599,8 +598,7 @@ static int adf4350_remove(struct spi_device *spi)
iio_device_unregister(indio_dev);
- if (st->clk)
- clk_disable_unprepare(st->clk);
+ clk_disable_unprepare(st->clk);
if (!IS_ERR(reg))
regulator_disable(reg);
diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c
index 2d5015801a75..029ef4c34604 100644
--- a/drivers/iio/gyro/bmg160_core.c
+++ b/drivers/iio/gyro/bmg160_core.c
@@ -19,6 +19,7 @@
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
#include "bmg160.h"
#define BMG160_IRQ_NAME "bmg160_event"
@@ -92,6 +93,7 @@
struct bmg160_data {
struct regmap *regmap;
+ struct regulator_bulk_data regulators[2];
struct iio_trigger *dready_trig;
struct iio_trigger *motion_trig;
struct iio_mount_matrix orientation;
@@ -1061,6 +1063,13 @@ static const char *bmg160_match_acpi_device(struct device *dev)
return dev_name(dev);
}
+static void bmg160_disable_regulators(void *d)
+{
+ struct bmg160_data *data = d;
+
+ regulator_bulk_disable(ARRAY_SIZE(data->regulators), data->regulators);
+}
+
int bmg160_core_probe(struct device *dev, struct regmap *regmap, int irq,
const char *name)
{
@@ -1077,6 +1086,22 @@ int bmg160_core_probe(struct device *dev, struct regmap *regmap, int irq,
data->irq = irq;
data->regmap = regmap;
+ data->regulators[0].supply = "vdd";
+ data->regulators[1].supply = "vddio";
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(data->regulators),
+ data->regulators);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get regulators\n");
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(data->regulators),
+ data->regulators);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, bmg160_disable_regulators, data);
+ if (ret)
+ return ret;
+
ret = iio_read_mount_matrix(dev, "mount-matrix",
&data->orientation);
if (ret)
diff --git a/drivers/iio/gyro/hid-sensor-gyro-3d.c b/drivers/iio/gyro/hid-sensor-gyro-3d.c
index 6698f5f535f6..fb0d678ece1a 100644
--- a/drivers/iio/gyro/hid-sensor-gyro-3d.c
+++ b/drivers/iio/gyro/hid-sensor-gyro-3d.c
@@ -23,15 +23,20 @@ enum gyro_3d_channel {
GYRO_3D_CHANNEL_MAX,
};
+#define CHANNEL_SCAN_INDEX_TIMESTAMP GYRO_3D_CHANNEL_MAX
struct gyro_3d_state {
struct hid_sensor_hub_callbacks callbacks;
struct hid_sensor_common common_attributes;
struct hid_sensor_hub_attribute_info gyro[GYRO_3D_CHANNEL_MAX];
- u32 gyro_val[GYRO_3D_CHANNEL_MAX];
+ struct {
+ u32 gyro_val[GYRO_3D_CHANNEL_MAX];
+ u64 timestamp __aligned(8);
+ } scan;
int scale_pre_decml;
int scale_post_decml;
int scale_precision;
int value_offset;
+ s64 timestamp;
};
static const u32 gyro_3d_addresses[GYRO_3D_CHANNEL_MAX] = {
@@ -72,7 +77,8 @@ static const struct iio_chan_spec gyro_3d_channels[] = {
BIT(IIO_CHAN_INFO_SAMP_FREQ) |
BIT(IIO_CHAN_INFO_HYSTERESIS),
.scan_index = CHANNEL_SCAN_INDEX_Z,
- }
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(CHANNEL_SCAN_INDEX_TIMESTAMP)
};
/* Adjust channel real bits based on report descriptor */
@@ -178,14 +184,6 @@ static const struct iio_info gyro_3d_info = {
.write_raw = &gyro_3d_write_raw,
};
-/* Function to push data to buffer */
-static void hid_sensor_push_data(struct iio_dev *indio_dev, const void *data,
- int len)
-{
- dev_dbg(&indio_dev->dev, "hid_sensor_push_data\n");
- iio_push_to_buffers(indio_dev, data);
-}
-
/* Callback handler to send event after all samples are received and captured */
static int gyro_3d_proc_event(struct hid_sensor_hub_device *hsdev,
unsigned usage_id,
@@ -195,10 +193,15 @@ static int gyro_3d_proc_event(struct hid_sensor_hub_device *hsdev,
struct gyro_3d_state *gyro_state = iio_priv(indio_dev);
dev_dbg(&indio_dev->dev, "gyro_3d_proc_event\n");
- if (atomic_read(&gyro_state->common_attributes.data_ready))
- hid_sensor_push_data(indio_dev,
- gyro_state->gyro_val,
- sizeof(gyro_state->gyro_val));
+ if (atomic_read(&gyro_state->common_attributes.data_ready)) {
+ if (!gyro_state->timestamp)
+ gyro_state->timestamp = iio_get_time_ns(indio_dev);
+
+ iio_push_to_buffers_with_timestamp(indio_dev, &gyro_state->scan,
+ gyro_state->timestamp);
+
+ gyro_state->timestamp = 0;
+ }
return 0;
}
@@ -219,10 +222,15 @@ static int gyro_3d_capture_sample(struct hid_sensor_hub_device *hsdev,
case HID_USAGE_SENSOR_ANGL_VELOCITY_Y_AXIS:
case HID_USAGE_SENSOR_ANGL_VELOCITY_Z_AXIS:
offset = usage_id - HID_USAGE_SENSOR_ANGL_VELOCITY_X_AXIS;
- gyro_state->gyro_val[CHANNEL_SCAN_INDEX_X + offset] =
- *(u32 *)raw_data;
+ gyro_state->scan.gyro_val[CHANNEL_SCAN_INDEX_X + offset] =
+ *(u32 *)raw_data;
ret = 0;
break;
+ case HID_USAGE_SENSOR_TIME_TIMESTAMP:
+ gyro_state->timestamp =
+ hid_sensor_convert_timestamp(&gyro_state->common_attributes,
+ *(s64 *)raw_data);
+ break;
default:
break;
}
diff --git a/drivers/iio/imu/inv_mpu6050/Kconfig b/drivers/iio/imu/inv_mpu6050/Kconfig
index 7137ea6f25db..9c625517173a 100644
--- a/drivers/iio/imu/inv_mpu6050/Kconfig
+++ b/drivers/iio/imu/inv_mpu6050/Kconfig
@@ -16,8 +16,8 @@ config INV_MPU6050_I2C
select REGMAP_I2C
help
This driver supports the Invensense MPU6050/9150,
- MPU6500/6515/9250/9255, ICM20608/20609/20689, ICM20602/ICM20690 and
- IAM20680 motion tracking devices over I2C.
+ MPU6500/6515/6880/9250/9255, ICM20608/20609/20689, ICM20602/ICM20690
+ and IAM20680 motion tracking devices over I2C.
This driver can be built as a module. The module will be called
inv-mpu6050-i2c.
@@ -28,7 +28,7 @@ config INV_MPU6050_SPI
select REGMAP_SPI
help
This driver supports the Invensense MPU6000,
- MPU6500/6515/9250/9255, ICM20608/20609/20689, ICM20602/ICM20690 and
- IAM20680 motion tracking devices over SPI.
+ MPU6500/6515/6880/9250/9255, ICM20608/20609/20689, ICM20602/ICM20690
+ and IAM20680 motion tracking devices over SPI.
This driver can be built as a module. The module will be called
inv-mpu6050-spi.
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index 18a1898e3e34..453c51c79655 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -161,6 +161,14 @@ static const struct inv_mpu6050_hw hw_info[] = {
.temp = {INV_MPU6500_TEMP_OFFSET, INV_MPU6500_TEMP_SCALE},
},
{
+ .whoami = INV_MPU6880_WHOAMI_VALUE,
+ .name = "MPU6880",
+ .reg = &reg_set_6500,
+ .config = &chip_config_6500,
+ .fifo_size = 4096,
+ .temp = {INV_MPU6500_TEMP_OFFSET, INV_MPU6500_TEMP_SCALE},
+ },
+ {
.whoami = INV_MPU6000_WHOAMI_VALUE,
.name = "MPU6000",
.reg = &reg_set_6050,
@@ -1323,6 +1331,7 @@ static int inv_check_and_setup_chip(struct inv_mpu6050_state *st)
case INV_MPU6000:
case INV_MPU6500:
case INV_MPU6515:
+ case INV_MPU6880:
case INV_MPU9250:
case INV_MPU9255:
/* reset signal path (required for spi connection) */
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
index 28cfae1e61cf..95f16951c8f4 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
@@ -177,6 +177,7 @@ static const struct i2c_device_id inv_mpu_id[] = {
{"mpu6050", INV_MPU6050},
{"mpu6500", INV_MPU6500},
{"mpu6515", INV_MPU6515},
+ {"mpu6880", INV_MPU6880},
{"mpu9150", INV_MPU9150},
{"mpu9250", INV_MPU9250},
{"mpu9255", INV_MPU9255},
@@ -205,6 +206,10 @@ static const struct of_device_id inv_of_match[] = {
.data = (void *)INV_MPU6515
},
{
+ .compatible = "invensense,mpu6880",
+ .data = (void *)INV_MPU6880
+ },
+ {
.compatible = "invensense,mpu9150",
.data = (void *)INV_MPU9150
},
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
index eb522b38acf3..58188dc0dd13 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
@@ -70,6 +70,7 @@ enum inv_devices {
INV_MPU6050,
INV_MPU6500,
INV_MPU6515,
+ INV_MPU6880,
INV_MPU6000,
INV_MPU9150,
INV_MPU9250,
@@ -373,6 +374,7 @@ struct inv_mpu6050_state {
#define INV_MPU6000_WHOAMI_VALUE 0x68
#define INV_MPU6050_WHOAMI_VALUE 0x68
#define INV_MPU6500_WHOAMI_VALUE 0x70
+#define INV_MPU6880_WHOAMI_VALUE 0x78
#define INV_MPU9150_WHOAMI_VALUE 0x68
#define INV_MPU9250_WHOAMI_VALUE 0x71
#define INV_MPU9255_WHOAMI_VALUE 0x73
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
index 6f968ce687e1..b056f3fe2561 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
@@ -70,6 +70,7 @@ static const struct spi_device_id inv_mpu_id[] = {
{"mpu6000", INV_MPU6000},
{"mpu6500", INV_MPU6500},
{"mpu6515", INV_MPU6515},
+ {"mpu6880", INV_MPU6880},
{"mpu9250", INV_MPU9250},
{"mpu9255", INV_MPU9255},
{"icm20608", INV_ICM20608},
@@ -97,6 +98,10 @@ static const struct of_device_id inv_of_match[] = {
.data = (void *)INV_MPU6515
},
{
+ .compatible = "invensense,mpu6880",
+ .data = (void *)INV_MPU6880
+ },
+ {
.compatible = "invensense,mpu9250",
.data = (void *)INV_MPU9250
},
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index c2e4c267c36b..7db761afa578 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -169,6 +169,36 @@ static const char * const iio_chan_info_postfix[] = {
[IIO_CHAN_INFO_CALIBAMBIENT] = "calibambient",
};
+/**
+ * iio_sysfs_match_string_with_gaps - matches given string in an array with gaps
+ * @array: array of strings
+ * @n: number of strings in the array
+ * @str: string to match with
+ *
+ * Returns index of @str in the @array or -EINVAL, similar to match_string().
+ * Uses sysfs_streq instead of strcmp for matching.
+ *
+ * This routine will look for a string in an array of strings.
+ * The search will continue until the element is found or the n-th element
+ * is reached, regardless of any NULL elements in the array.
+ */
+static int iio_sysfs_match_string_with_gaps(const char * const *array, size_t n,
+ const char *str)
+{
+ const char *item;
+ int index;
+
+ for (index = 0; index < n; index++) {
+ item = array[index];
+ if (!item)
+ continue;
+ if (sysfs_streq(item, str))
+ return index;
+ }
+
+ return -EINVAL;
+}
+
#if defined(CONFIG_DEBUG_FS)
/*
* There's also a CONFIG_DEBUG_FS guard in include/linux/iio/iio.h for
@@ -470,8 +500,11 @@ ssize_t iio_enum_available_read(struct iio_dev *indio_dev,
if (!e->num_items)
return 0;
- for (i = 0; i < e->num_items; ++i)
+ for (i = 0; i < e->num_items; ++i) {
+ if (!e->items[i])
+ continue;
len += scnprintf(buf + len, PAGE_SIZE - len, "%s ", e->items[i]);
+ }
/* replace last space with a newline */
buf[len - 1] = '\n';
@@ -492,7 +525,7 @@ ssize_t iio_enum_read(struct iio_dev *indio_dev,
i = e->get(indio_dev, chan);
if (i < 0)
return i;
- else if (i >= e->num_items)
+ else if (i >= e->num_items || !e->items[i])
return -EINVAL;
return snprintf(buf, PAGE_SIZE, "%s\n", e->items[i]);
@@ -509,7 +542,7 @@ ssize_t iio_enum_write(struct iio_dev *indio_dev,
if (!e->set)
return -EINVAL;
- ret = __sysfs_match_string(e->items, e->num_items, buf);
+ ret = iio_sysfs_match_string_with_gaps(e->items, e->num_items, buf);
if (ret < 0)
return ret;
@@ -1473,11 +1506,14 @@ static int iio_device_register_sysfs(struct iio_dev *indio_dev)
goto error_clear_attrs;
}
/* Copy across original attributes */
- if (indio_dev->info->attrs)
+ if (indio_dev->info->attrs) {
memcpy(iio_dev_opaque->chan_attr_group.attrs,
indio_dev->info->attrs->attrs,
sizeof(iio_dev_opaque->chan_attr_group.attrs[0])
*attrcount_orig);
+ iio_dev_opaque->chan_attr_group.is_visible =
+ indio_dev->info->attrs->is_visible;
+ }
attrn = attrcount_orig;
/* Add all elements from the list. */
list_for_each_entry(p, &iio_dev_opaque->channel_attr_list, l)
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
index fe30bcb6a57b..db77a2d4a56b 100644
--- a/drivers/iio/inkern.c
+++ b/drivers/iio/inkern.c
@@ -191,8 +191,8 @@ err_free_channel:
return ERR_PTR(err);
}
-static struct iio_channel *of_iio_channel_get_by_name(struct device_node *np,
- const char *name)
+struct iio_channel *of_iio_channel_get_by_name(struct device_node *np,
+ const char *name)
{
struct iio_channel *chan = NULL;
@@ -230,6 +230,7 @@ static struct iio_channel *of_iio_channel_get_by_name(struct device_node *np,
return chan;
}
+EXPORT_SYMBOL_GPL(of_iio_channel_get_by_name);
static struct iio_channel *of_iio_channel_get_all(struct device *dev)
{
@@ -272,12 +273,6 @@ error_free_chans:
#else /* CONFIG_OF */
-static inline struct iio_channel *
-of_iio_channel_get_by_name(struct device_node *np, const char *name)
-{
- return NULL;
-}
-
static inline struct iio_channel *of_iio_channel_get_all(struct device *dev)
{
return NULL;
@@ -393,6 +388,29 @@ struct iio_channel *devm_iio_channel_get(struct device *dev,
}
EXPORT_SYMBOL_GPL(devm_iio_channel_get);
+struct iio_channel *devm_of_iio_channel_get_by_name(struct device *dev,
+ struct device_node *np,
+ const char *channel_name)
+{
+ struct iio_channel **ptr, *channel;
+
+ ptr = devres_alloc(devm_iio_channel_free, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ channel = of_iio_channel_get_by_name(np, channel_name);
+ if (IS_ERR(channel)) {
+ devres_free(ptr);
+ return channel;
+ }
+
+ *ptr = channel;
+ devres_add(dev, ptr);
+
+ return channel;
+}
+EXPORT_SYMBOL_GPL(devm_of_iio_channel_get_by_name);
+
struct iio_channel *iio_channel_get_all(struct device *dev)
{
const char *name;
diff --git a/drivers/iio/light/apds9960.c b/drivers/iio/light/apds9960.c
index 547e7f9d6920..df0647856e5d 100644
--- a/drivers/iio/light/apds9960.c
+++ b/drivers/iio/light/apds9960.c
@@ -8,6 +8,7 @@
* TODO: gesture + proximity calib offsets
*/
+#include <linux/acpi.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
@@ -1113,6 +1114,12 @@ static const struct i2c_device_id apds9960_id[] = {
};
MODULE_DEVICE_TABLE(i2c, apds9960_id);
+static const struct acpi_device_id apds9960_acpi_match[] = {
+ { "MSHW0184" },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, apds9960_acpi_match);
+
static const struct of_device_id apds9960_of_match[] = {
{ .compatible = "avago,apds9960" },
{ }
@@ -1124,6 +1131,7 @@ static struct i2c_driver apds9960_driver = {
.name = APDS9960_DRV_NAME,
.of_match_table = apds9960_of_match,
.pm = &apds9960_pm_ops,
+ .acpi_match_table = apds9960_acpi_match,
},
.probe = apds9960_probe,
.remove = apds9960_remove,
diff --git a/drivers/iio/light/hid-sensor-als.c b/drivers/iio/light/hid-sensor-als.c
index a21c827e4953..4093f2353d95 100644
--- a/drivers/iio/light/hid-sensor-als.c
+++ b/drivers/iio/light/hid-sensor-als.c
@@ -22,15 +22,21 @@ enum {
CHANNEL_SCAN_INDEX_MAX
};
+#define CHANNEL_SCAN_INDEX_TIMESTAMP CHANNEL_SCAN_INDEX_MAX
+
struct als_state {
struct hid_sensor_hub_callbacks callbacks;
struct hid_sensor_common common_attributes;
struct hid_sensor_hub_attribute_info als_illum;
- u32 illum[CHANNEL_SCAN_INDEX_MAX];
+ struct {
+ u32 illum[CHANNEL_SCAN_INDEX_MAX];
+ u64 timestamp __aligned(8);
+ } scan;
int scale_pre_decml;
int scale_post_decml;
int scale_precision;
int value_offset;
+ s64 timestamp;
};
/* Channel definitions */
@@ -54,7 +60,8 @@ static const struct iio_chan_spec als_channels[] = {
BIT(IIO_CHAN_INFO_SAMP_FREQ) |
BIT(IIO_CHAN_INFO_HYSTERESIS),
.scan_index = CHANNEL_SCAN_INDEX_ILLUM,
- }
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(CHANNEL_SCAN_INDEX_TIMESTAMP)
};
/* Adjust channel real bits based on report descriptor */
@@ -168,14 +175,6 @@ static const struct iio_info als_info = {
.write_raw = &als_write_raw,
};
-/* Function to push data to buffer */
-static void hid_sensor_push_data(struct iio_dev *indio_dev, const void *data,
- int len)
-{
- dev_dbg(&indio_dev->dev, "hid_sensor_push_data\n");
- iio_push_to_buffers(indio_dev, data);
-}
-
/* Callback handler to send event after all samples are received and captured */
static int als_proc_event(struct hid_sensor_hub_device *hsdev,
unsigned usage_id,
@@ -185,10 +184,14 @@ static int als_proc_event(struct hid_sensor_hub_device *hsdev,
struct als_state *als_state = iio_priv(indio_dev);
dev_dbg(&indio_dev->dev, "als_proc_event\n");
- if (atomic_read(&als_state->common_attributes.data_ready))
- hid_sensor_push_data(indio_dev,
- &als_state->illum,
- sizeof(als_state->illum));
+ if (atomic_read(&als_state->common_attributes.data_ready)) {
+ if (!als_state->timestamp)
+ als_state->timestamp = iio_get_time_ns(indio_dev);
+
+ iio_push_to_buffers_with_timestamp(indio_dev, &als_state->scan,
+ als_state->timestamp);
+ als_state->timestamp = 0;
+ }
return 0;
}
@@ -206,10 +209,14 @@ static int als_capture_sample(struct hid_sensor_hub_device *hsdev,
switch (usage_id) {
case HID_USAGE_SENSOR_LIGHT_ILLUM:
- als_state->illum[CHANNEL_SCAN_INDEX_INTENSITY] = sample_data;
- als_state->illum[CHANNEL_SCAN_INDEX_ILLUM] = sample_data;
+ als_state->scan.illum[CHANNEL_SCAN_INDEX_INTENSITY] = sample_data;
+ als_state->scan.illum[CHANNEL_SCAN_INDEX_ILLUM] = sample_data;
ret = 0;
break;
+ case HID_USAGE_SENSOR_TIME_TIMESTAMP:
+ als_state->timestamp = hid_sensor_convert_timestamp(&als_state->common_attributes,
+ *(s64 *)raw_data);
+ break;
default:
break;
}
diff --git a/drivers/iio/light/tsl2583.c b/drivers/iio/light/tsl2583.c
index 9e5490b7473b..0f787bfc88fc 100644
--- a/drivers/iio/light/tsl2583.c
+++ b/drivers/iio/light/tsl2583.c
@@ -285,7 +285,7 @@ static int tsl2583_get_lux(struct iio_dev *indio_dev)
lux64 = lux64 * chip->als_settings.als_gain_trim;
lux64 >>= 13;
lux = lux64;
- lux = (lux + 500) / 1000;
+ lux = DIV_ROUND_CLOSEST(lux, 1000);
if (lux > TSL2583_LUX_CALC_OVER_FLOW) { /* check for overflow */
return_max:
@@ -361,12 +361,12 @@ static int tsl2583_set_als_time(struct tsl2583_chip *chip)
u8 val;
/* determine als integration register */
- als_count = (chip->als_settings.als_time * 100 + 135) / 270;
+ als_count = DIV_ROUND_CLOSEST(chip->als_settings.als_time * 100, 270);
if (!als_count)
als_count = 1; /* ensure at least one cycle */
/* convert back to time (encompasses overrides) */
- als_time = (als_count * 27 + 5) / 10;
+ als_time = DIV_ROUND_CLOSEST(als_count * 27, 10);
val = 256 - als_count;
ret = i2c_smbus_write_byte_data(chip->client,
@@ -380,7 +380,7 @@ static int tsl2583_set_als_time(struct tsl2583_chip *chip)
/* set chip struct re scaling and saturation */
chip->als_saturation = als_count * 922; /* 90% of full scale */
- chip->als_time_scale = (als_time + 25) / 50;
+ chip->als_time_scale = DIV_ROUND_CLOSEST(als_time, 50);
return ret;
}
diff --git a/drivers/iio/light/vl6180.c b/drivers/iio/light/vl6180.c
index 4775bd785e50..d47a4f6f4e87 100644
--- a/drivers/iio/light/vl6180.c
+++ b/drivers/iio/light/vl6180.c
@@ -392,7 +392,7 @@ static int vl6180_set_it(struct vl6180_data *data, int val, int val2)
{
int ret, it_ms;
- it_ms = (val2 + 500) / 1000; /* round to ms */
+ it_ms = DIV_ROUND_CLOSEST(val2, 1000); /* round to ms */
if (val != 0 || it_ms < 1 || it_ms > 512)
return -EINVAL;
diff --git a/drivers/iio/magnetometer/Kconfig b/drivers/iio/magnetometer/Kconfig
index 1697a8c03506..5d4ffd66032e 100644
--- a/drivers/iio/magnetometer/Kconfig
+++ b/drivers/iio/magnetometer/Kconfig
@@ -205,4 +205,19 @@ config SENSORS_RM3100_SPI
To compile this driver as a module, choose M here: the module
will be called rm3100-spi.
+config YAMAHA_YAS530
+ tristate "Yamaha YAS530 family of 3-Axis Magnetometers (I2C)"
+ depends on I2C
+ select REGMAP_I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say Y here to add support for the Yamaha YAS530 series of
+ 3-Axis Magnetometers. Right now YAS530, YAS532 and YAS533 are
+ fully supported.
+
+ This driver can also be compiled as a module.
+ To compile this driver as a module, choose M here: the module
+ will be called yamaha-yas.
+
endmenu
diff --git a/drivers/iio/magnetometer/Makefile b/drivers/iio/magnetometer/Makefile
index ba1bc34b82fa..b9f45b7fafc3 100644
--- a/drivers/iio/magnetometer/Makefile
+++ b/drivers/iio/magnetometer/Makefile
@@ -28,3 +28,5 @@ obj-$(CONFIG_SENSORS_HMC5843_SPI) += hmc5843_spi.o
obj-$(CONFIG_SENSORS_RM3100) += rm3100-core.o
obj-$(CONFIG_SENSORS_RM3100_I2C) += rm3100-i2c.o
obj-$(CONFIG_SENSORS_RM3100_SPI) += rm3100-spi.o
+
+obj-$(CONFIG_YAMAHA_YAS530) += yamaha-yas530.o
diff --git a/drivers/iio/magnetometer/bmc150_magn.c b/drivers/iio/magnetometer/bmc150_magn.c
index fa09fcab620a..b2f3129e1b4f 100644
--- a/drivers/iio/magnetometer/bmc150_magn.c
+++ b/drivers/iio/magnetometer/bmc150_magn.c
@@ -25,6 +25,7 @@
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
#include "bmc150_magn.h"
@@ -135,6 +136,7 @@ struct bmc150_magn_data {
*/
struct mutex mutex;
struct regmap *regmap;
+ struct regulator_bulk_data regulators[2];
struct iio_mount_matrix orientation;
/* 4 x 32 bits for x, y z, 4 bytes align, 64 bits timestamp */
s32 buffer[6];
@@ -692,12 +694,24 @@ static int bmc150_magn_init(struct bmc150_magn_data *data)
int ret, chip_id;
struct bmc150_magn_preset preset;
+ ret = regulator_bulk_enable(ARRAY_SIZE(data->regulators),
+ data->regulators);
+ if (ret < 0) {
+ dev_err(data->dev, "Failed to enable regulators: %d\n", ret);
+ return ret;
+ }
+ /*
+ * 3ms power-on time according to datasheet, let's better
+ * be safe than sorry and set this delay to 5ms.
+ */
+ msleep(5);
+
ret = bmc150_magn_set_power_mode(data, BMC150_MAGN_POWER_MODE_SUSPEND,
false);
if (ret < 0) {
dev_err(data->dev,
"Failed to bring up device from suspend mode\n");
- return ret;
+ goto err_regulator_disable;
}
ret = regmap_read(data->regmap, BMC150_MAGN_REG_CHIP_ID, &chip_id);
@@ -752,6 +766,8 @@ static int bmc150_magn_init(struct bmc150_magn_data *data)
err_poweroff:
bmc150_magn_set_power_mode(data, BMC150_MAGN_POWER_MODE_SUSPEND, true);
+err_regulator_disable:
+ regulator_bulk_disable(ARRAY_SIZE(data->regulators), data->regulators);
return ret;
}
@@ -867,6 +883,13 @@ int bmc150_magn_probe(struct device *dev, struct regmap *regmap,
data->irq = irq;
data->dev = dev;
+ data->regulators[0].supply = "vdd";
+ data->regulators[1].supply = "vddio";
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(data->regulators),
+ data->regulators);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to get regulators\n");
+
ret = iio_read_mount_matrix(dev, "mount-matrix",
&data->orientation);
if (ret)
@@ -984,6 +1007,7 @@ int bmc150_magn_remove(struct device *dev)
bmc150_magn_set_power_mode(data, BMC150_MAGN_POWER_MODE_SUSPEND, true);
mutex_unlock(&data->mutex);
+ regulator_bulk_disable(ARRAY_SIZE(data->regulators), data->regulators);
return 0;
}
EXPORT_SYMBOL(bmc150_magn_remove);
diff --git a/drivers/iio/magnetometer/hid-sensor-magn-3d.c b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
index 97642ebd9168..fa48044b7f5b 100644
--- a/drivers/iio/magnetometer/hid-sensor-magn-3d.c
+++ b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
@@ -24,6 +24,7 @@ enum magn_3d_channel {
CHANNEL_SCAN_INDEX_NORTH_TRUE_TILT_COMP,
CHANNEL_SCAN_INDEX_NORTH_MAGN,
CHANNEL_SCAN_INDEX_NORTH_TRUE,
+ CHANNEL_SCAN_INDEX_TIMESTAMP,
MAGN_3D_CHANNEL_MAX,
};
@@ -47,6 +48,7 @@ struct magn_3d_state {
struct common_attributes magn_flux_attr;
struct common_attributes rot_attr;
+ s64 timestamp;
};
static const u32 magn_3d_addresses[MAGN_3D_CHANNEL_MAX] = {
@@ -57,6 +59,7 @@ static const u32 magn_3d_addresses[MAGN_3D_CHANNEL_MAX] = {
HID_USAGE_SENSOR_ORIENT_COMP_TRUE_NORTH,
HID_USAGE_SENSOR_ORIENT_MAGN_NORTH,
HID_USAGE_SENSOR_ORIENT_TRUE_NORTH,
+ HID_USAGE_SENSOR_TIME_TIMESTAMP,
};
/* Channel definitions */
@@ -124,7 +127,8 @@ static const struct iio_chan_spec magn_3d_channels[] = {
BIT(IIO_CHAN_INFO_SCALE) |
BIT(IIO_CHAN_INFO_SAMP_FREQ) |
BIT(IIO_CHAN_INFO_HYSTERESIS),
- }
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(7)
};
/* Adjust channel real bits based on report descriptor */
@@ -273,13 +277,6 @@ static const struct iio_info magn_3d_info = {
.write_raw = &magn_3d_write_raw,
};
-/* Function to push data to buffer */
-static void hid_sensor_push_data(struct iio_dev *indio_dev, const void *data)
-{
- dev_dbg(&indio_dev->dev, "hid_sensor_push_data\n");
- iio_push_to_buffers(indio_dev, data);
-}
-
/* Callback handler to send event after all samples are received and captured */
static int magn_3d_proc_event(struct hid_sensor_hub_device *hsdev,
unsigned usage_id,
@@ -289,8 +286,15 @@ static int magn_3d_proc_event(struct hid_sensor_hub_device *hsdev,
struct magn_3d_state *magn_state = iio_priv(indio_dev);
dev_dbg(&indio_dev->dev, "magn_3d_proc_event\n");
- if (atomic_read(&magn_state->magn_flux_attributes.data_ready))
- hid_sensor_push_data(indio_dev, magn_state->iio_vals);
+ if (atomic_read(&magn_state->magn_flux_attributes.data_ready)) {
+ if (!magn_state->timestamp)
+ magn_state->timestamp = iio_get_time_ns(indio_dev);
+
+ iio_push_to_buffers_with_timestamp(indio_dev,
+ magn_state->iio_vals,
+ magn_state->timestamp);
+ magn_state->timestamp = 0;
+ }
return 0;
}
@@ -321,6 +325,11 @@ static int magn_3d_capture_sample(struct hid_sensor_hub_device *hsdev,
offset = (usage_id - HID_USAGE_SENSOR_ORIENT_COMP_MAGN_NORTH)
+ CHANNEL_SCAN_INDEX_NORTH_MAGN_TILT_COMP;
break;
+ case HID_USAGE_SENSOR_TIME_TIMESTAMP:
+ magn_state->timestamp =
+ hid_sensor_convert_timestamp(&magn_state->magn_flux_attributes,
+ *(s64 *)raw_data);
+ return ret;
default:
return -EINVAL;
}
@@ -386,9 +395,10 @@ static int magn_3d_parse_report(struct platform_device *pdev,
return -ENOMEM;
}
- st->iio_vals = devm_kcalloc(&pdev->dev, attr_count,
- sizeof(u32),
- GFP_KERNEL);
+ /* attr_count include timestamp channel, and the iio_vals should be aligned to 8byte */
+ st->iio_vals = devm_kcalloc(&pdev->dev,
+ ((attr_count + 1) % 2 + (attr_count + 1) / 2) * 2,
+ sizeof(u32), GFP_KERNEL);
if (!st->iio_vals) {
dev_err(&pdev->dev,
"failed to allocate space for iio values array\n");
@@ -404,11 +414,13 @@ static int magn_3d_parse_report(struct platform_device *pdev,
(_channels[*chan_count]).scan_index = *chan_count;
(_channels[*chan_count]).address = i;
- /* Set magn_val_addr to iio value address */
- st->magn_val_addr[i] = &(st->iio_vals[*chan_count]);
- magn_3d_adjust_channel_bit_mask(_channels,
- *chan_count,
- st->magn[i].size);
+ if (i != CHANNEL_SCAN_INDEX_TIMESTAMP) {
+ /* Set magn_val_addr to iio value address */
+ st->magn_val_addr[i] = &st->iio_vals[*chan_count];
+ magn_3d_adjust_channel_bit_mask(_channels,
+ *chan_count,
+ st->magn[i].size);
+ }
(*chan_count)++;
}
}
diff --git a/drivers/iio/magnetometer/yamaha-yas530.c b/drivers/iio/magnetometer/yamaha-yas530.c
new file mode 100644
index 000000000000..d46f23d82b3d
--- /dev/null
+++ b/drivers/iio/magnetometer/yamaha-yas530.c
@@ -0,0 +1,1049 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Driver for the Yamaha YAS magnetic sensors, often used in Samsung
+ * mobile phones. While all are not yet handled because of lacking
+ * hardware, expand this driver to handle the different variants:
+ *
+ * YAS530 MS-3E (2011 Samsung Galaxy S Advance)
+ * YAS532 MS-3R (2011 Samsung Galaxy S4)
+ * YAS533 MS-3F (Vivo 1633, 1707, V3, Y21L)
+ * (YAS534 is a magnetic switch, not handled)
+ * YAS535 MS-6C
+ * YAS536 MS-3W
+ * YAS537 MS-3T (2015 Samsung Galaxy S6, Note 5, Xiaomi)
+ * YAS539 MS-3S (2018 Samsung Galaxy A7 SM-A750FN)
+ *
+ * Code functions found in the MPU3050 YAS530 and YAS532 drivers
+ * named "inv_compass" in the Tegra Android kernel tree.
+ * Copyright (C) 2012 InvenSense Corporation
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ */
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/mutex.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/random.h>
+#include <linux/unaligned/be_byteshift.h>
+
+#include <linux/iio/buffer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+/* This register map covers YAS530 and YAS532 but differs in YAS 537 and YAS539 */
+#define YAS5XX_DEVICE_ID 0x80
+#define YAS5XX_ACTUATE_INIT_COIL 0x81
+#define YAS5XX_MEASURE 0x82
+#define YAS5XX_CONFIG 0x83
+#define YAS5XX_MEASURE_INTERVAL 0x84
+#define YAS5XX_OFFSET_X 0x85 /* [-31 .. 31] */
+#define YAS5XX_OFFSET_Y1 0x86 /* [-31 .. 31] */
+#define YAS5XX_OFFSET_Y2 0x87 /* [-31 .. 31] */
+#define YAS5XX_TEST1 0x88
+#define YAS5XX_TEST2 0x89
+#define YAS5XX_CAL 0x90
+#define YAS5XX_MEASURE_DATA 0xB0
+
+/* Bits in the YAS5xx config register */
+#define YAS5XX_CONFIG_INTON BIT(0) /* Interrupt on? */
+#define YAS5XX_CONFIG_INTHACT BIT(1) /* Interrupt active high? */
+#define YAS5XX_CONFIG_CCK_MASK GENMASK(4, 2)
+#define YAS5XX_CONFIG_CCK_SHIFT 2
+
+/* Bits in the measure command register */
+#define YAS5XX_MEASURE_START BIT(0)
+#define YAS5XX_MEASURE_LDTC BIT(1)
+#define YAS5XX_MEASURE_FORS BIT(2)
+#define YAS5XX_MEASURE_DLYMES BIT(4)
+
+/* Bits in the measure data register */
+#define YAS5XX_MEASURE_DATA_BUSY BIT(7)
+
+#define YAS530_DEVICE_ID 0x01 /* YAS530 (MS-3E) */
+#define YAS530_VERSION_A 0 /* YAS530 (MS-3E A) */
+#define YAS530_VERSION_B 1 /* YAS530B (MS-3E B) */
+#define YAS530_VERSION_A_COEF 380
+#define YAS530_VERSION_B_COEF 550
+#define YAS530_DATA_BITS 12
+#define YAS530_DATA_CENTER BIT(YAS530_DATA_BITS - 1)
+#define YAS530_DATA_OVERFLOW (BIT(YAS530_DATA_BITS) - 1)
+
+#define YAS532_DEVICE_ID 0x02 /* YAS532/YAS533 (MS-3R/F) */
+#define YAS532_VERSION_AB 0 /* YAS532/533 AB (MS-3R/F AB) */
+#define YAS532_VERSION_AC 1 /* YAS532/533 AC (MS-3R/F AC) */
+#define YAS532_VERSION_AB_COEF 1800
+#define YAS532_VERSION_AC_COEF_X 850
+#define YAS532_VERSION_AC_COEF_Y1 750
+#define YAS532_VERSION_AC_COEF_Y2 750
+#define YAS532_DATA_BITS 13
+#define YAS532_DATA_CENTER BIT(YAS532_DATA_BITS - 1)
+#define YAS532_DATA_OVERFLOW (BIT(YAS532_DATA_BITS) - 1)
+#define YAS532_20DEGREES 390 /* Looks like Kelvin */
+
+/* These variant IDs are known from code dumps */
+#define YAS537_DEVICE_ID 0x07 /* YAS537 (MS-3T) */
+#define YAS539_DEVICE_ID 0x08 /* YAS539 (MS-3S) */
+
+/* Turn off device regulators etc after 5 seconds of inactivity */
+#define YAS5XX_AUTOSUSPEND_DELAY_MS 5000
+
+struct yas5xx_calibration {
+ /* Linearization calibration x, y1, y2 */
+ s32 r[3];
+ u32 f[3];
+ /* Temperature compensation calibration */
+ s32 Cx, Cy1, Cy2;
+ /* Misc calibration coefficients */
+ s32 a2, a3, a4, a5, a6, a7, a8, a9, k;
+ /* clock divider */
+ u8 dck;
+};
+
+/**
+ * struct yas5xx - state container for the YAS5xx driver
+ * @dev: parent device pointer
+ * @devid: device ID number
+ * @version: device version
+ * @name: device name
+ * @calibration: calibration settings from the OTP storage
+ * @hard_offsets: offsets for each axis measured with initcoil actuated
+ * @orientation: mounting matrix, flipped axis etc
+ * @map: regmap to access the YAX5xx registers over I2C
+ * @regs: the vdd and vddio power regulators
+ * @reset: optional GPIO line used for handling RESET
+ * @lock: locks the magnetometer for exclusive use during a measurement (which
+ * involves several register transactions so the regmap lock is not enough)
+ * so that measurements get serialized in a first-come-first serve manner
+ * @scan: naturally aligned measurements
+ */
+struct yas5xx {
+ struct device *dev;
+ unsigned int devid;
+ unsigned int version;
+ char name[16];
+ struct yas5xx_calibration calibration;
+ u8 hard_offsets[3];
+ struct iio_mount_matrix orientation;
+ struct regmap *map;
+ struct regulator_bulk_data regs[2];
+ struct gpio_desc *reset;
+ struct mutex lock;
+ /*
+ * The scanout is 4 x 32 bits in CPU endianness.
+ * Ensure timestamp is naturally aligned
+ */
+ struct {
+ s32 channels[4];
+ s64 ts __aligned(8);
+ } scan;
+};
+
+/* On YAS530 the x, y1 and y2 values are 12 bits */
+static u16 yas530_extract_axis(u8 *data)
+{
+ u16 val;
+
+ /*
+ * These are the bits used in a 16bit word:
+ * 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
+ * x x x x x x x x x x x x
+ */
+ val = get_unaligned_be16(&data[0]);
+ val = FIELD_GET(GENMASK(14, 3), val);
+ return val;
+}
+
+/* On YAS532 the x, y1 and y2 values are 13 bits */
+static u16 yas532_extract_axis(u8 *data)
+{
+ u16 val;
+
+ /*
+ * These are the bits used in a 16bit word:
+ * 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
+ * x x x x x x x x x x x x x
+ */
+ val = get_unaligned_be16(&data[0]);
+ val = FIELD_GET(GENMASK(14, 2), val);
+ return val;
+}
+
+/**
+ * yas5xx_measure() - Make a measure from the hardware
+ * @yas5xx: The device state
+ * @t: the raw temperature measurement
+ * @x: the raw x axis measurement
+ * @y1: the y1 axis measurement
+ * @y2: the y2 axis measurement
+ * @return: 0 on success or error code
+ */
+static int yas5xx_measure(struct yas5xx *yas5xx, u16 *t, u16 *x, u16 *y1, u16 *y2)
+{
+ unsigned int busy;
+ u8 data[8];
+ int ret;
+ u16 val;
+
+ mutex_lock(&yas5xx->lock);
+ ret = regmap_write(yas5xx->map, YAS5XX_MEASURE, YAS5XX_MEASURE_START);
+ if (ret < 0)
+ goto out_unlock;
+
+ /*
+ * Typical time to measure 1500 us, max 2000 us so wait min 500 us
+ * and at most 20000 us (one magnitude more than the datsheet max)
+ * before timeout.
+ */
+ ret = regmap_read_poll_timeout(yas5xx->map, YAS5XX_MEASURE_DATA, busy,
+ !(busy & YAS5XX_MEASURE_DATA_BUSY),
+ 500, 20000);
+ if (ret) {
+ dev_err(yas5xx->dev, "timeout waiting for measurement\n");
+ goto out_unlock;
+ }
+
+ ret = regmap_bulk_read(yas5xx->map, YAS5XX_MEASURE_DATA,
+ data, sizeof(data));
+ if (ret)
+ goto out_unlock;
+
+ mutex_unlock(&yas5xx->lock);
+
+ switch (yas5xx->devid) {
+ case YAS530_DEVICE_ID:
+ /*
+ * The t value is 9 bits in big endian format
+ * These are the bits used in a 16bit word:
+ * 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
+ * x x x x x x x x x
+ */
+ val = get_unaligned_be16(&data[0]);
+ val = FIELD_GET(GENMASK(14, 6), val);
+ *t = val;
+ *x = yas530_extract_axis(&data[2]);
+ *y1 = yas530_extract_axis(&data[4]);
+ *y2 = yas530_extract_axis(&data[6]);
+ break;
+ case YAS532_DEVICE_ID:
+ /*
+ * The t value is 10 bits in big endian format
+ * These are the bits used in a 16bit word:
+ * 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
+ * x x x x x x x x x x
+ */
+ val = get_unaligned_be16(&data[0]);
+ val = FIELD_GET(GENMASK(14, 5), val);
+ *t = val;
+ *x = yas532_extract_axis(&data[2]);
+ *y1 = yas532_extract_axis(&data[4]);
+ *y2 = yas532_extract_axis(&data[6]);
+ break;
+ default:
+ dev_err(yas5xx->dev, "unknown data format\n");
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+
+out_unlock:
+ mutex_unlock(&yas5xx->lock);
+ return ret;
+}
+
+static s32 yas5xx_linearize(struct yas5xx *yas5xx, u16 val, int axis)
+{
+ struct yas5xx_calibration *c = &yas5xx->calibration;
+ static const s32 yas532ac_coef[] = {
+ YAS532_VERSION_AC_COEF_X,
+ YAS532_VERSION_AC_COEF_Y1,
+ YAS532_VERSION_AC_COEF_Y2,
+ };
+ s32 coef;
+
+ /* Select coefficients */
+ switch (yas5xx->devid) {
+ case YAS530_DEVICE_ID:
+ if (yas5xx->version == YAS530_VERSION_A)
+ coef = YAS530_VERSION_A_COEF;
+ else
+ coef = YAS530_VERSION_B_COEF;
+ break;
+ case YAS532_DEVICE_ID:
+ if (yas5xx->version == YAS532_VERSION_AB)
+ coef = YAS532_VERSION_AB_COEF;
+ else
+ /* Elaborate coefficients */
+ coef = yas532ac_coef[axis];
+ break;
+ default:
+ dev_err(yas5xx->dev, "unknown device type\n");
+ return val;
+ }
+ /*
+ * Linearization formula:
+ *
+ * x' = x - (3721 + 50 * f) + (xoffset - r) * c
+ *
+ * Where f and r are calibration values, c is a per-device
+ * and sometimes per-axis coefficient.
+ */
+ return val - (3721 + 50 * c->f[axis]) +
+ (yas5xx->hard_offsets[axis] - c->r[axis]) * coef;
+}
+
+/**
+ * yas5xx_get_measure() - Measure a sample of all axis and process
+ * @yas5xx: The device state
+ * @to: Temperature out
+ * @xo: X axis out
+ * @yo: Y axis out
+ * @zo: Z axis out
+ * @return: 0 on success or error code
+ *
+ * Returned values are in nanotesla according to some code.
+ */
+static int yas5xx_get_measure(struct yas5xx *yas5xx, s32 *to, s32 *xo, s32 *yo, s32 *zo)
+{
+ struct yas5xx_calibration *c = &yas5xx->calibration;
+ u16 t, x, y1, y2;
+ /* These are "signed x, signed y1 etc */
+ s32 sx, sy1, sy2, sy, sz;
+ int ret;
+
+ /* We first get raw data that needs to be translated to [x,y,z] */
+ ret = yas5xx_measure(yas5xx, &t, &x, &y1, &y2);
+ if (ret)
+ return ret;
+
+ /* Do some linearization if available */
+ sx = yas5xx_linearize(yas5xx, x, 0);
+ sy1 = yas5xx_linearize(yas5xx, y1, 1);
+ sy2 = yas5xx_linearize(yas5xx, y2, 2);
+
+ /*
+ * Temperature compensation for x, y1, y2 respectively:
+ *
+ * Cx * t
+ * x' = x - ------
+ * 100
+ */
+ sx = sx - (c->Cx * t) / 100;
+ sy1 = sy1 - (c->Cy1 * t) / 100;
+ sy2 = sy2 - (c->Cy2 * t) / 100;
+
+ /*
+ * Break y1 and y2 into y and z, y1 and y2 are apparently encoding
+ * y and z.
+ */
+ sy = sy1 - sy2;
+ sz = -sy1 - sy2;
+
+ /*
+ * FIXME: convert to Celsius? Just guessing this is given
+ * as 1/10:s of degrees so multiply by 100 to get millicentigrades.
+ */
+ *to = t * 100;
+ /*
+ * Calibrate [x,y,z] with some formulas like this:
+ *
+ * 100 * x + a_2 * y + a_3 * z
+ * x' = k * ---------------------------
+ * 10
+ *
+ * a_4 * x + a_5 * y + a_6 * z
+ * y' = k * ---------------------------
+ * 10
+ *
+ * a_7 * x + a_8 * y + a_9 * z
+ * z' = k * ---------------------------
+ * 10
+ */
+ *xo = c->k * ((100 * sx + c->a2 * sy + c->a3 * sz) / 10);
+ *yo = c->k * ((c->a4 * sx + c->a5 * sy + c->a6 * sz) / 10);
+ *zo = c->k * ((c->a7 * sx + c->a8 * sy + c->a9 * sz) / 10);
+
+ return 0;
+}
+
+static int yas5xx_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2,
+ long mask)
+{
+ struct yas5xx *yas5xx = iio_priv(indio_dev);
+ s32 t, x, y, z;
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ pm_runtime_get_sync(yas5xx->dev);
+ ret = yas5xx_get_measure(yas5xx, &t, &x, &y, &z);
+ pm_runtime_mark_last_busy(yas5xx->dev);
+ pm_runtime_put_autosuspend(yas5xx->dev);
+ if (ret)
+ return ret;
+ switch (chan->address) {
+ case 0:
+ *val = t;
+ break;
+ case 1:
+ *val = x;
+ break;
+ case 2:
+ *val = y;
+ break;
+ case 3:
+ *val = z;
+ break;
+ default:
+ dev_err(yas5xx->dev, "unknown channel\n");
+ return -EINVAL;
+ }
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ if (chan->address == 0) {
+ /* Temperature is unscaled */
+ *val = 1;
+ return IIO_VAL_INT;
+ }
+ /*
+ * The axis values are in nanotesla according to the vendor
+ * drivers, but is clearly in microtesla according to
+ * experiments. Since 1 uT = 0.01 Gauss, we need to divide
+ * by 100000000 (10^8) to get to Gauss from the raw value.
+ */
+ *val = 1;
+ *val2 = 100000000;
+ return IIO_VAL_FRACTIONAL;
+ default:
+ /* Unknown request */
+ return -EINVAL;
+ }
+}
+
+static void yas5xx_fill_buffer(struct iio_dev *indio_dev)
+{
+ struct yas5xx *yas5xx = iio_priv(indio_dev);
+ s32 t, x, y, z;
+ int ret;
+
+ pm_runtime_get_sync(yas5xx->dev);
+ ret = yas5xx_get_measure(yas5xx, &t, &x, &y, &z);
+ pm_runtime_mark_last_busy(yas5xx->dev);
+ pm_runtime_put_autosuspend(yas5xx->dev);
+ if (ret) {
+ dev_err(yas5xx->dev, "error refilling buffer\n");
+ return;
+ }
+ yas5xx->scan.channels[0] = t;
+ yas5xx->scan.channels[1] = x;
+ yas5xx->scan.channels[2] = y;
+ yas5xx->scan.channels[3] = z;
+ iio_push_to_buffers_with_timestamp(indio_dev, &yas5xx->scan,
+ iio_get_time_ns(indio_dev));
+}
+
+static irqreturn_t yas5xx_handle_trigger(int irq, void *p)
+{
+ const struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+
+ yas5xx_fill_buffer(indio_dev);
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+
+static const struct iio_mount_matrix *
+yas5xx_get_mount_matrix(const struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct yas5xx *yas5xx = iio_priv(indio_dev);
+
+ return &yas5xx->orientation;
+}
+
+static const struct iio_chan_spec_ext_info yas5xx_ext_info[] = {
+ IIO_MOUNT_MATRIX(IIO_SHARED_BY_DIR, yas5xx_get_mount_matrix),
+ { }
+};
+
+#define YAS5XX_AXIS_CHANNEL(axis, index) \
+ { \
+ .type = IIO_MAGN, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .ext_info = yas5xx_ext_info, \
+ .address = index, \
+ .scan_index = index, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 32, \
+ .storagebits = 32, \
+ .endianness = IIO_CPU, \
+ }, \
+ }
+
+static const struct iio_chan_spec yas5xx_channels[] = {
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ .address = 0,
+ .scan_index = 0,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 32,
+ .storagebits = 32,
+ .endianness = IIO_CPU,
+ },
+ },
+ YAS5XX_AXIS_CHANNEL(X, 1),
+ YAS5XX_AXIS_CHANNEL(Y, 2),
+ YAS5XX_AXIS_CHANNEL(Z, 3),
+ IIO_CHAN_SOFT_TIMESTAMP(4),
+};
+
+static const unsigned long yas5xx_scan_masks[] = { GENMASK(3, 0), 0 };
+
+static const struct iio_info yas5xx_info = {
+ .read_raw = &yas5xx_read_raw,
+};
+
+static bool yas5xx_volatile_reg(struct device *dev, unsigned int reg)
+{
+ return reg == YAS5XX_ACTUATE_INIT_COIL ||
+ reg == YAS5XX_MEASURE ||
+ (reg >= YAS5XX_MEASURE_DATA && reg <= YAS5XX_MEASURE_DATA + 8);
+}
+
+/* TODO: enable regmap cache, using mark dirty and sync at runtime resume */
+static const struct regmap_config yas5xx_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0xff,
+ .volatile_reg = yas5xx_volatile_reg,
+};
+
+/**
+ * yas53x_extract_calibration() - extracts the a2-a9 and k calibration
+ * @data: the bitfield to use
+ * @c: the calibration to populate
+ */
+static void yas53x_extract_calibration(u8 *data, struct yas5xx_calibration *c)
+{
+ u64 val = get_unaligned_be64(data);
+
+ /*
+ * Bitfield layout for the axis calibration data, for factor
+ * a2 = 2 etc, k = k, c = clock divider
+ *
+ * n 7 6 5 4 3 2 1 0
+ * 0 [ 2 2 2 2 2 2 3 3 ] bits 63 .. 56
+ * 1 [ 3 3 4 4 4 4 4 4 ] bits 55 .. 48
+ * 2 [ 5 5 5 5 5 5 6 6 ] bits 47 .. 40
+ * 3 [ 6 6 6 6 7 7 7 7 ] bits 39 .. 32
+ * 4 [ 7 7 7 8 8 8 8 8 ] bits 31 .. 24
+ * 5 [ 8 9 9 9 9 9 9 9 ] bits 23 .. 16
+ * 6 [ 9 k k k k k c c ] bits 15 .. 8
+ * 7 [ c x x x x x x x ] bits 7 .. 0
+ */
+ c->a2 = FIELD_GET(GENMASK_ULL(63, 58), val) - 32;
+ c->a3 = FIELD_GET(GENMASK_ULL(57, 54), val) - 8;
+ c->a4 = FIELD_GET(GENMASK_ULL(53, 48), val) - 32;
+ c->a5 = FIELD_GET(GENMASK_ULL(47, 42), val) + 38;
+ c->a6 = FIELD_GET(GENMASK_ULL(41, 36), val) - 32;
+ c->a7 = FIELD_GET(GENMASK_ULL(35, 29), val) - 64;
+ c->a8 = FIELD_GET(GENMASK_ULL(28, 23), val) - 32;
+ c->a9 = FIELD_GET(GENMASK_ULL(22, 15), val);
+ c->k = FIELD_GET(GENMASK_ULL(14, 10), val) + 10;
+ c->dck = FIELD_GET(GENMASK_ULL(9, 7), val);
+}
+
+static int yas530_get_calibration_data(struct yas5xx *yas5xx)
+{
+ struct yas5xx_calibration *c = &yas5xx->calibration;
+ u8 data[16];
+ u32 val;
+ int ret;
+
+ /* Dummy read, first read is ALWAYS wrong */
+ ret = regmap_bulk_read(yas5xx->map, YAS5XX_CAL, data, sizeof(data));
+ if (ret)
+ return ret;
+
+ /* Actual calibration readout */
+ ret = regmap_bulk_read(yas5xx->map, YAS5XX_CAL, data, sizeof(data));
+ if (ret)
+ return ret;
+ dev_dbg(yas5xx->dev, "calibration data: %*ph\n", 14, data);
+
+ add_device_randomness(data, sizeof(data));
+ yas5xx->version = data[15] & GENMASK(1, 0);
+
+ /* Extract the calibration from the bitfield */
+ c->Cx = data[0] * 6 - 768;
+ c->Cy1 = data[1] * 6 - 768;
+ c->Cy2 = data[2] * 6 - 768;
+ yas53x_extract_calibration(&data[3], c);
+
+ /*
+ * Extract linearization:
+ * Linearization layout in the 32 bits at byte 11:
+ * The r factors are 6 bit values where bit 5 is the sign
+ *
+ * n 7 6 5 4 3 2 1 0
+ * 0 [ xx xx xx r0 r0 r0 r0 r0 ] bits 31 .. 24
+ * 1 [ r0 f0 f0 r1 r1 r1 r1 r1 ] bits 23 .. 16
+ * 2 [ r1 f1 f1 r2 r2 r2 r2 r2 ] bits 15 .. 8
+ * 3 [ r2 f2 f2 xx xx xx xx xx ] bits 7 .. 0
+ */
+ val = get_unaligned_be32(&data[11]);
+ c->f[0] = FIELD_GET(GENMASK(22, 21), val);
+ c->f[1] = FIELD_GET(GENMASK(14, 13), val);
+ c->f[2] = FIELD_GET(GENMASK(6, 5), val);
+ c->r[0] = sign_extend32(FIELD_GET(GENMASK(28, 23), val), 5);
+ c->r[1] = sign_extend32(FIELD_GET(GENMASK(20, 15), val), 5);
+ c->r[2] = sign_extend32(FIELD_GET(GENMASK(12, 7), val), 5);
+ return 0;
+}
+
+static int yas532_get_calibration_data(struct yas5xx *yas5xx)
+{
+ struct yas5xx_calibration *c = &yas5xx->calibration;
+ u8 data[14];
+ u32 val;
+ int ret;
+
+ /* Dummy read, first read is ALWAYS wrong */
+ ret = regmap_bulk_read(yas5xx->map, YAS5XX_CAL, data, sizeof(data));
+ if (ret)
+ return ret;
+ /* Actual calibration readout */
+ ret = regmap_bulk_read(yas5xx->map, YAS5XX_CAL, data, sizeof(data));
+ if (ret)
+ return ret;
+ dev_dbg(yas5xx->dev, "calibration data: %*ph\n", 14, data);
+
+ /* Sanity check, is this all zeroes? */
+ if (memchr_inv(data, 0x00, 13)) {
+ if (!(data[13] & BIT(7)))
+ dev_warn(yas5xx->dev, "calibration is blank!\n");
+ }
+
+ add_device_randomness(data, sizeof(data));
+ /* Only one bit of version info reserved here as far as we know */
+ yas5xx->version = data[13] & BIT(0);
+
+ /* Extract calibration from the bitfield */
+ c->Cx = data[0] * 10 - 1280;
+ c->Cy1 = data[1] * 10 - 1280;
+ c->Cy2 = data[2] * 10 - 1280;
+ yas53x_extract_calibration(&data[3], c);
+ /*
+ * Extract linearization:
+ * Linearization layout in the 32 bits at byte 10:
+ * The r factors are 6 bit values where bit 5 is the sign
+ *
+ * n 7 6 5 4 3 2 1 0
+ * 0 [ xx r0 r0 r0 r0 r0 r0 f0 ] bits 31 .. 24
+ * 1 [ f0 r1 r1 r1 r1 r1 r1 f1 ] bits 23 .. 16
+ * 2 [ f1 r2 r2 r2 r2 r2 r2 f2 ] bits 15 .. 8
+ * 3 [ f2 xx xx xx xx xx xx xx ] bits 7 .. 0
+ */
+ val = get_unaligned_be32(&data[10]);
+ c->f[0] = FIELD_GET(GENMASK(24, 23), val);
+ c->f[1] = FIELD_GET(GENMASK(16, 15), val);
+ c->f[2] = FIELD_GET(GENMASK(8, 7), val);
+ c->r[0] = sign_extend32(FIELD_GET(GENMASK(30, 25), val), 5);
+ c->r[1] = sign_extend32(FIELD_GET(GENMASK(22, 17), val), 5);
+ c->r[2] = sign_extend32(FIELD_GET(GENMASK(14, 7), val), 5);
+
+ return 0;
+}
+
+static void yas5xx_dump_calibration(struct yas5xx *yas5xx)
+{
+ struct yas5xx_calibration *c = &yas5xx->calibration;
+
+ dev_dbg(yas5xx->dev, "f[] = [%d, %d, %d]\n",
+ c->f[0], c->f[1], c->f[2]);
+ dev_dbg(yas5xx->dev, "r[] = [%d, %d, %d]\n",
+ c->r[0], c->r[1], c->r[2]);
+ dev_dbg(yas5xx->dev, "Cx = %d\n", c->Cx);
+ dev_dbg(yas5xx->dev, "Cy1 = %d\n", c->Cy1);
+ dev_dbg(yas5xx->dev, "Cy2 = %d\n", c->Cy2);
+ dev_dbg(yas5xx->dev, "a2 = %d\n", c->a2);
+ dev_dbg(yas5xx->dev, "a3 = %d\n", c->a3);
+ dev_dbg(yas5xx->dev, "a4 = %d\n", c->a4);
+ dev_dbg(yas5xx->dev, "a5 = %d\n", c->a5);
+ dev_dbg(yas5xx->dev, "a6 = %d\n", c->a6);
+ dev_dbg(yas5xx->dev, "a7 = %d\n", c->a7);
+ dev_dbg(yas5xx->dev, "a8 = %d\n", c->a8);
+ dev_dbg(yas5xx->dev, "a9 = %d\n", c->a9);
+ dev_dbg(yas5xx->dev, "k = %d\n", c->k);
+ dev_dbg(yas5xx->dev, "dck = %d\n", c->dck);
+}
+
+static int yas5xx_set_offsets(struct yas5xx *yas5xx, s8 ox, s8 oy1, s8 oy2)
+{
+ int ret;
+
+ ret = regmap_write(yas5xx->map, YAS5XX_OFFSET_X, ox);
+ if (ret)
+ return ret;
+ ret = regmap_write(yas5xx->map, YAS5XX_OFFSET_Y1, oy1);
+ if (ret)
+ return ret;
+ return regmap_write(yas5xx->map, YAS5XX_OFFSET_Y2, oy2);
+}
+
+static s8 yas5xx_adjust_offset(s8 old, int bit, u16 center, u16 measure)
+{
+ if (measure > center)
+ return old + BIT(bit);
+ if (measure < center)
+ return old - BIT(bit);
+ return old;
+}
+
+static int yas5xx_meaure_offsets(struct yas5xx *yas5xx)
+{
+ int ret;
+ u16 center;
+ u16 t, x, y1, y2;
+ s8 ox, oy1, oy2;
+ int i;
+
+ /* Actuate the init coil and measure offsets */
+ ret = regmap_write(yas5xx->map, YAS5XX_ACTUATE_INIT_COIL, 0);
+ if (ret)
+ return ret;
+
+ /* When the initcoil is active this should be around the center */
+ switch (yas5xx->devid) {
+ case YAS530_DEVICE_ID:
+ center = YAS530_DATA_CENTER;
+ break;
+ case YAS532_DEVICE_ID:
+ center = YAS532_DATA_CENTER;
+ break;
+ default:
+ dev_err(yas5xx->dev, "unknown device type\n");
+ return -EINVAL;
+ }
+
+ /*
+ * We set offsets in the interval +-31 by iterating
+ * +-16, +-8, +-4, +-2, +-1 adjusting the offsets each
+ * time, then writing the final offsets into the
+ * registers.
+ *
+ * NOTE: these offsets are NOT in the same unit or magnitude
+ * as the values for [x, y1, y2]. The value is +/-31
+ * but the effect on the raw values is much larger.
+ * The effect of the offset is to bring the measure
+ * rougly to the center.
+ */
+ ox = 0;
+ oy1 = 0;
+ oy2 = 0;
+
+ for (i = 4; i >= 0; i--) {
+ ret = yas5xx_set_offsets(yas5xx, ox, oy1, oy2);
+ if (ret)
+ return ret;
+
+ ret = yas5xx_measure(yas5xx, &t, &x, &y1, &y2);
+ if (ret)
+ return ret;
+ dev_dbg(yas5xx->dev, "measurement %d: x=%d, y1=%d, y2=%d\n",
+ 5-i, x, y1, y2);
+
+ ox = yas5xx_adjust_offset(ox, i, center, x);
+ oy1 = yas5xx_adjust_offset(oy1, i, center, y1);
+ oy2 = yas5xx_adjust_offset(oy2, i, center, y2);
+ }
+
+ /* Needed for calibration algorithm */
+ yas5xx->hard_offsets[0] = ox;
+ yas5xx->hard_offsets[1] = oy1;
+ yas5xx->hard_offsets[2] = oy2;
+ ret = yas5xx_set_offsets(yas5xx, ox, oy1, oy2);
+ if (ret)
+ return ret;
+
+ dev_info(yas5xx->dev, "discovered hard offsets: x=%d, y1=%d, y2=%d\n",
+ ox, oy1, oy2);
+ return 0;
+}
+
+static int yas5xx_power_on(struct yas5xx *yas5xx)
+{
+ unsigned int val;
+ int ret;
+
+ /* Zero the test registers */
+ ret = regmap_write(yas5xx->map, YAS5XX_TEST1, 0);
+ if (ret)
+ return ret;
+ ret = regmap_write(yas5xx->map, YAS5XX_TEST2, 0);
+ if (ret)
+ return ret;
+
+ /* Set up for no interrupts, calibrated clock divider */
+ val = FIELD_PREP(YAS5XX_CONFIG_CCK_MASK, yas5xx->calibration.dck);
+ ret = regmap_write(yas5xx->map, YAS5XX_CONFIG, val);
+ if (ret)
+ return ret;
+
+ /* Measure interval 0 (back-to-back?) */
+ return regmap_write(yas5xx->map, YAS5XX_MEASURE_INTERVAL, 0);
+}
+
+static int yas5xx_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct iio_dev *indio_dev;
+ struct device *dev = &i2c->dev;
+ struct yas5xx *yas5xx;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*yas5xx));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ yas5xx = iio_priv(indio_dev);
+ i2c_set_clientdata(i2c, indio_dev);
+ yas5xx->dev = dev;
+ mutex_init(&yas5xx->lock);
+
+ ret = iio_read_mount_matrix(dev, "mount-matrix", &yas5xx->orientation);
+ if (ret)
+ return ret;
+
+ yas5xx->regs[0].supply = "vdd";
+ yas5xx->regs[1].supply = "iovdd";
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(yas5xx->regs),
+ yas5xx->regs);
+ if (ret)
+ return dev_err_probe(dev, ret, "cannot get regulators\n");
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(yas5xx->regs), yas5xx->regs);
+ if (ret) {
+ dev_err(dev, "cannot enable regulators\n");
+ return ret;
+ }
+
+ /* See comment in runtime resume callback */
+ usleep_range(31000, 40000);
+
+ /* This will take the device out of reset if need be */
+ yas5xx->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(yas5xx->reset)) {
+ ret = dev_err_probe(dev, PTR_ERR(yas5xx->reset),
+ "failed to get reset line\n");
+ goto reg_off;
+ }
+
+ yas5xx->map = devm_regmap_init_i2c(i2c, &yas5xx_regmap_config);
+ if (IS_ERR(yas5xx->map)) {
+ dev_err(dev, "failed to allocate register map\n");
+ ret = PTR_ERR(yas5xx->map);
+ goto assert_reset;
+ }
+
+ ret = regmap_read(yas5xx->map, YAS5XX_DEVICE_ID, &yas5xx->devid);
+ if (ret)
+ goto assert_reset;
+
+ switch (yas5xx->devid) {
+ case YAS530_DEVICE_ID:
+ ret = yas530_get_calibration_data(yas5xx);
+ if (ret)
+ goto assert_reset;
+ dev_info(dev, "detected YAS530 MS-3E %s",
+ yas5xx->version ? "B" : "A");
+ strncpy(yas5xx->name, "yas530", sizeof(yas5xx->name));
+ break;
+ case YAS532_DEVICE_ID:
+ ret = yas532_get_calibration_data(yas5xx);
+ if (ret)
+ goto assert_reset;
+ dev_info(dev, "detected YAS532/YAS533 MS-3R/F %s",
+ yas5xx->version ? "AC" : "AB");
+ strncpy(yas5xx->name, "yas532", sizeof(yas5xx->name));
+ break;
+ default:
+ dev_err(dev, "unhandled device ID %02x\n", yas5xx->devid);
+ goto assert_reset;
+ }
+
+ yas5xx_dump_calibration(yas5xx);
+ ret = yas5xx_power_on(yas5xx);
+ if (ret)
+ goto assert_reset;
+ ret = yas5xx_meaure_offsets(yas5xx);
+ if (ret)
+ goto assert_reset;
+
+ indio_dev->info = &yas5xx_info;
+ indio_dev->available_scan_masks = yas5xx_scan_masks;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->name = yas5xx->name;
+ indio_dev->channels = yas5xx_channels;
+ indio_dev->num_channels = ARRAY_SIZE(yas5xx_channels);
+
+ ret = iio_triggered_buffer_setup(indio_dev, NULL,
+ yas5xx_handle_trigger,
+ NULL);
+ if (ret) {
+ dev_err(dev, "triggered buffer setup failed\n");
+ goto assert_reset;
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(dev, "device register failed\n");
+ goto cleanup_buffer;
+ }
+
+ /* Take runtime PM online */
+ pm_runtime_get_noresume(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ pm_runtime_set_autosuspend_delay(dev, YAS5XX_AUTOSUSPEND_DELAY_MS);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_put(dev);
+
+ return 0;
+
+cleanup_buffer:
+ iio_triggered_buffer_cleanup(indio_dev);
+assert_reset:
+ gpiod_set_value_cansleep(yas5xx->reset, 1);
+reg_off:
+ regulator_bulk_disable(ARRAY_SIZE(yas5xx->regs), yas5xx->regs);
+
+ return ret;
+}
+
+static int yas5xx_remove(struct i2c_client *i2c)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(i2c);
+ struct yas5xx *yas5xx = iio_priv(indio_dev);
+ struct device *dev = &i2c->dev;
+
+ iio_device_unregister(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+ /*
+ * Now we can't get any more reads from the device, which would
+ * also call pm_runtime* functions and race with our disable
+ * code. Disable PM runtime in orderly fashion and power down.
+ */
+ pm_runtime_get_sync(dev);
+ pm_runtime_put_noidle(dev);
+ pm_runtime_disable(dev);
+ gpiod_set_value_cansleep(yas5xx->reset, 1);
+ regulator_bulk_disable(ARRAY_SIZE(yas5xx->regs), yas5xx->regs);
+
+ return 0;
+}
+
+static int __maybe_unused yas5xx_runtime_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct yas5xx *yas5xx = iio_priv(indio_dev);
+
+ gpiod_set_value_cansleep(yas5xx->reset, 1);
+ regulator_bulk_disable(ARRAY_SIZE(yas5xx->regs), yas5xx->regs);
+
+ return 0;
+}
+
+static int __maybe_unused yas5xx_runtime_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct yas5xx *yas5xx = iio_priv(indio_dev);
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(yas5xx->regs), yas5xx->regs);
+ if (ret) {
+ dev_err(dev, "cannot enable regulators\n");
+ return ret;
+ }
+
+ /*
+ * The YAS530 datasheet says TVSKW is up to 30 ms, after that 1 ms
+ * for all voltages to settle. The YAS532 is 10ms then 4ms for the
+ * I2C to come online. Let's keep it safe and put this at 31ms.
+ */
+ usleep_range(31000, 40000);
+ gpiod_set_value_cansleep(yas5xx->reset, 0);
+
+ ret = yas5xx_power_on(yas5xx);
+ if (ret) {
+ dev_err(dev, "cannot power on\n");
+ goto out_reset;
+ }
+
+ return 0;
+
+out_reset:
+ gpiod_set_value_cansleep(yas5xx->reset, 1);
+ regulator_bulk_disable(ARRAY_SIZE(yas5xx->regs), yas5xx->regs);
+
+ return ret;
+}
+
+static const struct dev_pm_ops yas5xx_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(yas5xx_runtime_suspend,
+ yas5xx_runtime_resume, NULL)
+};
+
+static const struct i2c_device_id yas5xx_id[] = {
+ {"yas530", },
+ {"yas532", },
+ {"yas533", },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, yas5xx_id);
+
+static const struct of_device_id yas5xx_of_match[] = {
+ { .compatible = "yamaha,yas530", },
+ { .compatible = "yamaha,yas532", },
+ { .compatible = "yamaha,yas533", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, yas5xx_of_match);
+
+static struct i2c_driver yas5xx_driver = {
+ .driver = {
+ .name = "yas5xx",
+ .of_match_table = yas5xx_of_match,
+ .pm = &yas5xx_dev_pm_ops,
+ },
+ .probe = yas5xx_probe,
+ .remove = yas5xx_remove,
+ .id_table = yas5xx_id,
+};
+module_i2c_driver(yas5xx_driver);
+
+MODULE_DESCRIPTION("Yamaha YAS53x 3-axis magnetometer driver");
+MODULE_AUTHOR("Linus Walleij");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/orientation/hid-sensor-incl-3d.c b/drivers/iio/orientation/hid-sensor-incl-3d.c
index ae132a93bcae..52ebef30f9be 100644
--- a/drivers/iio/orientation/hid-sensor-incl-3d.c
+++ b/drivers/iio/orientation/hid-sensor-incl-3d.c
@@ -24,15 +24,21 @@ enum incl_3d_channel {
INCLI_3D_CHANNEL_MAX,
};
+#define CHANNEL_SCAN_INDEX_TIMESTAMP INCLI_3D_CHANNEL_MAX
+
struct incl_3d_state {
struct hid_sensor_hub_callbacks callbacks;
struct hid_sensor_common common_attributes;
struct hid_sensor_hub_attribute_info incl[INCLI_3D_CHANNEL_MAX];
- u32 incl_val[INCLI_3D_CHANNEL_MAX];
+ struct {
+ u32 incl_val[INCLI_3D_CHANNEL_MAX];
+ u64 timestamp __aligned(8);
+ } scan;
int scale_pre_decml;
int scale_post_decml;
int scale_precision;
int value_offset;
+ s64 timestamp;
};
static const u32 incl_3d_addresses[INCLI_3D_CHANNEL_MAX] = {
@@ -73,7 +79,8 @@ static const struct iio_chan_spec incl_3d_channels[] = {
BIT(IIO_CHAN_INFO_SAMP_FREQ) |
BIT(IIO_CHAN_INFO_HYSTERESIS),
.scan_index = CHANNEL_SCAN_INDEX_Z,
- }
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(CHANNEL_SCAN_INDEX_TIMESTAMP),
};
/* Adjust channel real bits based on report descriptor */
@@ -178,13 +185,6 @@ static const struct iio_info incl_3d_info = {
.write_raw = &incl_3d_write_raw,
};
-/* Function to push data to buffer */
-static void hid_sensor_push_data(struct iio_dev *indio_dev, u8 *data, int len)
-{
- dev_dbg(&indio_dev->dev, "hid_sensor_push_data\n");
- iio_push_to_buffers(indio_dev, (u8 *)data);
-}
-
/* Callback handler to send event after all samples are received and captured */
static int incl_3d_proc_event(struct hid_sensor_hub_device *hsdev,
unsigned usage_id,
@@ -194,10 +194,16 @@ static int incl_3d_proc_event(struct hid_sensor_hub_device *hsdev,
struct incl_3d_state *incl_state = iio_priv(indio_dev);
dev_dbg(&indio_dev->dev, "incl_3d_proc_event\n");
- if (atomic_read(&incl_state->common_attributes.data_ready))
- hid_sensor_push_data(indio_dev,
- (u8 *)incl_state->incl_val,
- sizeof(incl_state->incl_val));
+ if (atomic_read(&incl_state->common_attributes.data_ready)) {
+ if (!incl_state->timestamp)
+ incl_state->timestamp = iio_get_time_ns(indio_dev);
+
+ iio_push_to_buffers_with_timestamp(indio_dev,
+ &incl_state->scan,
+ incl_state->timestamp);
+
+ incl_state->timestamp = 0;
+ }
return 0;
}
@@ -214,13 +220,18 @@ static int incl_3d_capture_sample(struct hid_sensor_hub_device *hsdev,
switch (usage_id) {
case HID_USAGE_SENSOR_ORIENT_TILT_X:
- incl_state->incl_val[CHANNEL_SCAN_INDEX_X] = *(u32 *)raw_data;
+ incl_state->scan.incl_val[CHANNEL_SCAN_INDEX_X] = *(u32 *)raw_data;
break;
case HID_USAGE_SENSOR_ORIENT_TILT_Y:
- incl_state->incl_val[CHANNEL_SCAN_INDEX_Y] = *(u32 *)raw_data;
+ incl_state->scan.incl_val[CHANNEL_SCAN_INDEX_Y] = *(u32 *)raw_data;
break;
case HID_USAGE_SENSOR_ORIENT_TILT_Z:
- incl_state->incl_val[CHANNEL_SCAN_INDEX_Z] = *(u32 *)raw_data;
+ incl_state->scan.incl_val[CHANNEL_SCAN_INDEX_Z] = *(u32 *)raw_data;
+ break;
+ case HID_USAGE_SENSOR_TIME_TIMESTAMP:
+ incl_state->timestamp =
+ hid_sensor_convert_timestamp(&incl_state->common_attributes,
+ *(s64 *)raw_data);
break;
default:
ret = -EINVAL;
diff --git a/drivers/iio/orientation/hid-sensor-rotation.c b/drivers/iio/orientation/hid-sensor-rotation.c
index 23bc61a7f018..18e4ef060096 100644
--- a/drivers/iio/orientation/hid-sensor-rotation.c
+++ b/drivers/iio/orientation/hid-sensor-rotation.c
@@ -20,11 +20,15 @@ struct dev_rot_state {
struct hid_sensor_hub_callbacks callbacks;
struct hid_sensor_common common_attributes;
struct hid_sensor_hub_attribute_info quaternion;
- u32 sampled_vals[4];
+ struct {
+ u32 sampled_vals[4] __aligned(16);
+ u64 timestamp __aligned(8);
+ } scan;
int scale_pre_decml;
int scale_post_decml;
int scale_precision;
int value_offset;
+ s64 timestamp;
};
/* Channel definitions */
@@ -37,8 +41,10 @@ static const struct iio_chan_spec dev_rot_channels[] = {
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ) |
BIT(IIO_CHAN_INFO_OFFSET) |
BIT(IIO_CHAN_INFO_SCALE) |
- BIT(IIO_CHAN_INFO_HYSTERESIS)
- }
+ BIT(IIO_CHAN_INFO_HYSTERESIS),
+ .scan_index = 0
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(1)
};
/* Adjust channel real bits based on report descriptor */
@@ -70,7 +76,7 @@ static int dev_rot_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_RAW:
if (size >= 4) {
for (i = 0; i < 4; ++i)
- vals[i] = rot_state->sampled_vals[i];
+ vals[i] = rot_state->scan.sampled_vals[i];
ret_type = IIO_VAL_INT_MULTIPLE;
*val_len = 4;
} else
@@ -132,15 +138,6 @@ static const struct iio_info dev_rot_info = {
.write_raw = &dev_rot_write_raw,
};
-/* Function to push data to buffer */
-static void hid_sensor_push_data(struct iio_dev *indio_dev, u8 *data, int len)
-{
- dev_dbg(&indio_dev->dev, "hid_sensor_push_data >>\n");
- iio_push_to_buffers(indio_dev, (u8 *)data);
- dev_dbg(&indio_dev->dev, "hid_sensor_push_data <<\n");
-
-}
-
/* Callback handler to send event after all samples are received and captured */
static int dev_rot_proc_event(struct hid_sensor_hub_device *hsdev,
unsigned usage_id,
@@ -150,10 +147,15 @@ static int dev_rot_proc_event(struct hid_sensor_hub_device *hsdev,
struct dev_rot_state *rot_state = iio_priv(indio_dev);
dev_dbg(&indio_dev->dev, "dev_rot_proc_event\n");
- if (atomic_read(&rot_state->common_attributes.data_ready))
- hid_sensor_push_data(indio_dev,
- (u8 *)rot_state->sampled_vals,
- sizeof(rot_state->sampled_vals));
+ if (atomic_read(&rot_state->common_attributes.data_ready)) {
+ if (!rot_state->timestamp)
+ rot_state->timestamp = iio_get_time_ns(indio_dev);
+
+ iio_push_to_buffers_with_timestamp(indio_dev, &rot_state->scan,
+ rot_state->timestamp);
+
+ rot_state->timestamp = 0;
+ }
return 0;
}
@@ -168,10 +170,14 @@ static int dev_rot_capture_sample(struct hid_sensor_hub_device *hsdev,
struct dev_rot_state *rot_state = iio_priv(indio_dev);
if (usage_id == HID_USAGE_SENSOR_ORIENT_QUATERNION) {
- memcpy(rot_state->sampled_vals, raw_data,
- sizeof(rot_state->sampled_vals));
+ memcpy(&rot_state->scan.sampled_vals, raw_data,
+ sizeof(rot_state->scan.sampled_vals));
+
dev_dbg(&indio_dev->dev, "Recd Quat len:%zu::%zu\n", raw_len,
- sizeof(rot_state->sampled_vals));
+ sizeof(rot_state->scan.sampled_vals));
+ } else if (usage_id == HID_USAGE_SENSOR_TIME_TIMESTAMP) {
+ rot_state->timestamp = hid_sensor_convert_timestamp(&rot_state->common_attributes,
+ *(s64 *)raw_data);
}
return 0;
diff --git a/drivers/iio/position/Kconfig b/drivers/iio/position/Kconfig
index eda67f008c5b..1576a6380b53 100644
--- a/drivers/iio/position/Kconfig
+++ b/drivers/iio/position/Kconfig
@@ -16,4 +16,20 @@ config IQS624_POS
To compile this driver as a module, choose M here: the module
will be called iqs624-pos.
+config HID_SENSOR_CUSTOM_INTEL_HINGE
+ depends on HID_SENSOR_HUB
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ select HID_SENSOR_IIO_COMMON
+ select HID_SENSOR_IIO_TRIGGER
+ tristate "HID Hinge"
+ help
+ This sensor present three angles, hinge angel, screen angles
+ and keyboard angle respect to horizon (ground).
+ Say yes here to build support for the HID custom
+ intel hinge sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called hid-sensor-custom-hinge.
+
endmenu
diff --git a/drivers/iio/position/Makefile b/drivers/iio/position/Makefile
index 3cbe7a734352..d70902f2979d 100644
--- a/drivers/iio/position/Makefile
+++ b/drivers/iio/position/Makefile
@@ -4,4 +4,5 @@
# When adding new entries keep the list in alphabetical order
+obj-$(CONFIG_HID_SENSOR_CUSTOM_INTEL_HINGE) += hid-sensor-custom-intel-hinge.o
obj-$(CONFIG_IQS624_POS) += iqs624-pos.o
diff --git a/drivers/iio/position/hid-sensor-custom-intel-hinge.c b/drivers/iio/position/hid-sensor-custom-intel-hinge.c
new file mode 100644
index 000000000000..64a7fa7db6af
--- /dev/null
+++ b/drivers/iio/position/hid-sensor-custom-intel-hinge.c
@@ -0,0 +1,385 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * HID Sensors Driver
+ * Copyright (c) 2020, Intel Corporation.
+ */
+#include <linux/hid-sensor-hub.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/iio.h>
+#include <linux/platform_device.h>
+
+#include "../common/hid-sensors/hid-sensor-trigger.h"
+
+enum hinge_channel {
+ CHANNEL_SCAN_INDEX_HINGE_ANGLE,
+ CHANNEL_SCAN_INDEX_SCREEN_ANGLE,
+ CHANNEL_SCAN_INDEX_KEYBOARD_ANGLE,
+ CHANNEL_SCAN_INDEX_MAX,
+};
+
+#define CHANNEL_SCAN_INDEX_TIMESTAMP CHANNEL_SCAN_INDEX_MAX
+
+static const u32 hinge_addresses[CHANNEL_SCAN_INDEX_MAX] = {
+ HID_USAGE_SENSOR_DATA_FIELD_CUSTOM_VALUE(1),
+ HID_USAGE_SENSOR_DATA_FIELD_CUSTOM_VALUE(2),
+ HID_USAGE_SENSOR_DATA_FIELD_CUSTOM_VALUE(3)
+};
+
+static const char *const hinge_labels[CHANNEL_SCAN_INDEX_MAX] = { "hinge",
+ "screen",
+ "keyboard" };
+
+struct hinge_state {
+ struct iio_dev *indio_dev;
+ struct hid_sensor_hub_attribute_info hinge[CHANNEL_SCAN_INDEX_MAX];
+ struct hid_sensor_hub_callbacks callbacks;
+ struct hid_sensor_common common_attributes;
+ const char *labels[CHANNEL_SCAN_INDEX_MAX];
+ struct {
+ u32 hinge_val[3];
+ u64 timestamp __aligned(8);
+ } scan;
+
+ int scale_pre_decml;
+ int scale_post_decml;
+ int scale_precision;
+ int value_offset;
+ u64 timestamp;
+};
+
+/* Channel definitions */
+static const struct iio_chan_spec hinge_channels[] = {
+ {
+ .type = IIO_ANGL,
+ .indexed = 1,
+ .channel = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type =
+ BIT(IIO_CHAN_INFO_OFFSET) | BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) | BIT(IIO_CHAN_INFO_HYSTERESIS),
+ .scan_index = CHANNEL_SCAN_INDEX_HINGE_ANGLE,
+ .scan_type = {
+ .sign = 's',
+ .storagebits = 32,
+ },
+ }, {
+ .type = IIO_ANGL,
+ .indexed = 1,
+ .channel = 1,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type =
+ BIT(IIO_CHAN_INFO_OFFSET) | BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) | BIT(IIO_CHAN_INFO_HYSTERESIS),
+ .scan_index = CHANNEL_SCAN_INDEX_SCREEN_ANGLE,
+ .scan_type = {
+ .sign = 's',
+ .storagebits = 32,
+ },
+ }, {
+ .type = IIO_ANGL,
+ .indexed = 1,
+ .channel = 2,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type =
+ BIT(IIO_CHAN_INFO_OFFSET) | BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) | BIT(IIO_CHAN_INFO_HYSTERESIS),
+ .scan_index = CHANNEL_SCAN_INDEX_KEYBOARD_ANGLE,
+ .scan_type = {
+ .sign = 's',
+ .storagebits = 32,
+ },
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(CHANNEL_SCAN_INDEX_TIMESTAMP)
+};
+
+/* Adjust channel real bits based on report descriptor */
+static void hinge_adjust_channel_realbits(struct iio_chan_spec *channels,
+ int channel, int size)
+{
+ channels[channel].scan_type.realbits = size * 8;
+}
+
+/* Channel read_raw handler */
+static int hinge_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val, int *val2,
+ long mask)
+{
+ struct hinge_state *st = iio_priv(indio_dev);
+ struct hid_sensor_hub_device *hsdev;
+ int report_id;
+ s32 min;
+
+ hsdev = st->common_attributes.hsdev;
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ hid_sensor_power_state(&st->common_attributes, true);
+ report_id = st->hinge[chan->scan_index].report_id;
+ min = st->hinge[chan->scan_index].logical_minimum;
+ if (report_id < 0) {
+ hid_sensor_power_state(&st->common_attributes, false);
+ return -EINVAL;
+ }
+
+ *val = sensor_hub_input_attr_get_raw_value(st->common_attributes.hsdev,
+ hsdev->usage,
+ hinge_addresses[chan->scan_index],
+ report_id,
+ SENSOR_HUB_SYNC, min < 0);
+
+ hid_sensor_power_state(&st->common_attributes, false);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = st->scale_pre_decml;
+ *val2 = st->scale_post_decml;
+ return st->scale_precision;
+ case IIO_CHAN_INFO_OFFSET:
+ *val = st->value_offset;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return hid_sensor_read_samp_freq_value(&st->common_attributes,
+ val, val2);
+ case IIO_CHAN_INFO_HYSTERESIS:
+ return hid_sensor_read_raw_hyst_value(&st->common_attributes,
+ val, val2);
+ default:
+ return -EINVAL;
+ }
+}
+
+/* Channel write_raw handler */
+static int hinge_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val, int val2,
+ long mask)
+{
+ struct hinge_state *st = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return hid_sensor_write_samp_freq_value(&st->common_attributes,
+ val, val2);
+ case IIO_CHAN_INFO_HYSTERESIS:
+ return hid_sensor_write_raw_hyst_value(&st->common_attributes,
+ val, val2);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int hinge_read_label(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, char *label)
+{
+ struct hinge_state *st = iio_priv(indio_dev);
+
+ return sprintf(label, "%s\n", st->labels[chan->channel]);
+}
+
+static const struct iio_info hinge_info = {
+ .read_raw = hinge_read_raw,
+ .write_raw = hinge_write_raw,
+ .read_label = hinge_read_label,
+};
+
+/*
+ * Callback handler to send event after all samples are received
+ * and captured.
+ */
+static int hinge_proc_event(struct hid_sensor_hub_device *hsdev,
+ unsigned int usage_id, void *priv)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(priv);
+ struct hinge_state *st = iio_priv(indio_dev);
+
+ if (atomic_read(&st->common_attributes.data_ready)) {
+ if (!st->timestamp)
+ st->timestamp = iio_get_time_ns(indio_dev);
+
+ iio_push_to_buffers_with_timestamp(indio_dev, &st->scan,
+ st->timestamp);
+
+ st->timestamp = 0;
+ }
+ return 0;
+}
+
+/* Capture samples in local storage */
+static int hinge_capture_sample(struct hid_sensor_hub_device *hsdev,
+ unsigned int usage_id, size_t raw_len,
+ char *raw_data, void *priv)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(priv);
+ struct hinge_state *st = iio_priv(indio_dev);
+ int offset;
+
+ switch (usage_id) {
+ case HID_USAGE_SENSOR_DATA_FIELD_CUSTOM_VALUE(1):
+ case HID_USAGE_SENSOR_DATA_FIELD_CUSTOM_VALUE(2):
+ case HID_USAGE_SENSOR_DATA_FIELD_CUSTOM_VALUE(3):
+ offset = usage_id - HID_USAGE_SENSOR_DATA_FIELD_CUSTOM_VALUE(1);
+ st->scan.hinge_val[offset] = *(u32 *)raw_data;
+ return 0;
+ case HID_USAGE_SENSOR_TIME_TIMESTAMP:
+ st->timestamp = hid_sensor_convert_timestamp(&st->common_attributes,
+ *(int64_t *)raw_data);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+/* Parse report which is specific to an usage id */
+static int hinge_parse_report(struct platform_device *pdev,
+ struct hid_sensor_hub_device *hsdev,
+ struct iio_chan_spec *channels,
+ unsigned int usage_id, struct hinge_state *st)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < CHANNEL_SCAN_INDEX_MAX; ++i) {
+ ret = sensor_hub_input_get_attribute_info(hsdev,
+ HID_INPUT_REPORT,
+ usage_id,
+ hinge_addresses[i],
+ &st->hinge[i]);
+ if (ret < 0)
+ return ret;
+
+ hinge_adjust_channel_realbits(channels, i, st->hinge[i].size);
+ }
+
+ st->scale_precision = hid_sensor_format_scale(HID_USAGE_SENSOR_HINGE,
+ &st->hinge[CHANNEL_SCAN_INDEX_HINGE_ANGLE],
+ &st->scale_pre_decml, &st->scale_post_decml);
+
+ /* Set Sensitivity field ids, when there is no individual modifier */
+ if (st->common_attributes.sensitivity.index < 0) {
+ sensor_hub_input_get_attribute_info(hsdev,
+ HID_FEATURE_REPORT, usage_id,
+ HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_ABS |
+ HID_USAGE_SENSOR_DATA_FIELD_CUSTOM_VALUE(1),
+ &st->common_attributes.sensitivity);
+ dev_dbg(&pdev->dev, "Sensitivity index:report %d:%d\n",
+ st->common_attributes.sensitivity.index,
+ st->common_attributes.sensitivity.report_id);
+ }
+
+ return ret;
+}
+
+/* Function to initialize the processing for usage id */
+static int hid_hinge_probe(struct platform_device *pdev)
+{
+ struct hinge_state *st;
+ struct iio_dev *indio_dev;
+ struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
+ int ret;
+ int i;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, indio_dev);
+
+ st = iio_priv(indio_dev);
+ st->common_attributes.hsdev = hsdev;
+ st->common_attributes.pdev = pdev;
+ st->indio_dev = indio_dev;
+ for (i = 0; i < CHANNEL_SCAN_INDEX_MAX; i++)
+ st->labels[i] = hinge_labels[i];
+
+ ret = hid_sensor_parse_common_attributes(hsdev, hsdev->usage,
+ &st->common_attributes);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to setup common attributes\n");
+ return ret;
+ }
+
+ indio_dev->num_channels = ARRAY_SIZE(hinge_channels);
+ indio_dev->channels = devm_kmemdup(&indio_dev->dev, hinge_channels,
+ sizeof(hinge_channels), GFP_KERNEL);
+ if (!indio_dev->channels)
+ return -ENOMEM;
+
+ ret = hinge_parse_report(pdev, hsdev,
+ (struct iio_chan_spec *)indio_dev->channels,
+ hsdev->usage, st);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to setup attributes\n");
+ return ret;
+ }
+
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->info = &hinge_info;
+ indio_dev->name = "hinge";
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ atomic_set(&st->common_attributes.data_ready, 0);
+ ret = hid_sensor_setup_trigger(indio_dev, indio_dev->name,
+ &st->common_attributes);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "trigger setup failed\n");
+ return ret;
+ }
+
+ st->callbacks.send_event = hinge_proc_event;
+ st->callbacks.capture_sample = hinge_capture_sample;
+ st->callbacks.pdev = pdev;
+ ret = sensor_hub_register_callback(hsdev, hsdev->usage, &st->callbacks);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "callback reg failed\n");
+ goto error_remove_trigger;
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "device register failed\n");
+ goto error_remove_callback;
+ }
+
+ return ret;
+
+error_remove_callback:
+ sensor_hub_remove_callback(hsdev, hsdev->usage);
+error_remove_trigger:
+ hid_sensor_remove_trigger(indio_dev, &st->common_attributes);
+ return ret;
+}
+
+/* Function to deinitialize the processing for usage id */
+static int hid_hinge_remove(struct platform_device *pdev)
+{
+ struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct hinge_state *st = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ sensor_hub_remove_callback(hsdev, hsdev->usage);
+ hid_sensor_remove_trigger(indio_dev, &st->common_attributes);
+
+ return 0;
+}
+
+static const struct platform_device_id hid_hinge_ids[] = {
+ {
+ /* Format: HID-SENSOR-INT-usage_id_in_hex_lowercase */
+ .name = "HID-SENSOR-INT-020b",
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, hid_hinge_ids);
+
+static struct platform_driver hid_hinge_platform_driver = {
+ .id_table = hid_hinge_ids,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .pm = &hid_sensor_pm_ops,
+ },
+ .probe = hid_hinge_probe,
+ .remove = hid_hinge_remove,
+};
+module_platform_driver(hid_hinge_platform_driver);
+
+MODULE_DESCRIPTION("HID Sensor INTEL Hinge");
+MODULE_AUTHOR("Ye Xiang <xiang.ye@intel.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/pressure/ms5637.c b/drivers/iio/pressure/ms5637.c
index 5b59a4137d32..81f683321b23 100644
--- a/drivers/iio/pressure/ms5637.c
+++ b/drivers/iio/pressure/ms5637.c
@@ -30,9 +30,25 @@
#include "../common/ms_sensors/ms_sensors_i2c.h"
+struct ms_tp_data {
+ const char *name;
+ const struct ms_tp_hw_data *hw;
+};
+
static const int ms5637_samp_freq[6] = { 960, 480, 240, 120, 60, 30 };
-/* String copy of the above const for readability purpose */
-static const char ms5637_show_samp_freq[] = "960 480 240 120 60 30";
+
+static ssize_t ms5637_show_samp_freq(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct ms_tp_dev *dev_data = iio_priv(indio_dev);
+ int i, len = 0;
+
+ for (i = 0; i <= dev_data->hw->max_res_index; i++)
+ len += sysfs_emit_at(buf, len, "%u ", ms5637_samp_freq[i]);
+ sysfs_emit_at(buf, len - 1, "\n");
+
+ return len;
+}
static int ms5637_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *channel, int *val,
@@ -109,10 +125,10 @@ static const struct iio_chan_spec ms5637_channels[] = {
}
};
-static IIO_CONST_ATTR_SAMP_FREQ_AVAIL(ms5637_show_samp_freq);
+static IIO_DEV_ATTR_SAMP_FREQ_AVAIL(ms5637_show_samp_freq);
static struct attribute *ms5637_attributes[] = {
- &iio_const_attr_sampling_frequency_available.dev_attr.attr,
+ &iio_dev_attr_sampling_frequency_available.dev_attr.attr,
NULL,
};
@@ -129,6 +145,7 @@ static const struct iio_info ms5637_info = {
static int ms5637_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
+ const struct ms_tp_data *data;
struct ms_tp_dev *dev_data;
struct iio_dev *indio_dev;
int ret;
@@ -142,17 +159,25 @@ static int ms5637_probe(struct i2c_client *client,
return -EOPNOTSUPP;
}
+ if (id)
+ data = (const struct ms_tp_data *)id->driver_data;
+ else
+ data = device_get_match_data(&client->dev);
+ if (!data)
+ return -EINVAL;
+
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*dev_data));
if (!indio_dev)
return -ENOMEM;
dev_data = iio_priv(indio_dev);
dev_data->client = client;
- dev_data->res_index = 5;
+ dev_data->res_index = data->hw->max_res_index;
+ dev_data->hw = data->hw;
mutex_init(&dev_data->lock);
indio_dev->info = &ms5637_info;
- indio_dev->name = id->name;
+ indio_dev->name = data->name;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = ms5637_channels;
indio_dev->num_channels = ARRAY_SIZE(ms5637_channels);
@@ -170,20 +195,44 @@ static int ms5637_probe(struct i2c_client *client,
return devm_iio_device_register(&client->dev, indio_dev);
}
+static const struct ms_tp_hw_data ms5637_hw_data = {
+ .prom_len = 7,
+ .max_res_index = 5
+};
+
+static const struct ms_tp_hw_data ms5803_hw_data = {
+ .prom_len = 8,
+ .max_res_index = 4
+};
+
+static const struct ms_tp_data ms5637_data = { .name = "ms5637", .hw = &ms5637_hw_data };
+
+static const struct ms_tp_data ms5803_data = { .name = "ms5803", .hw = &ms5803_hw_data };
+
+static const struct ms_tp_data ms5805_data = { .name = "ms5805", .hw = &ms5637_hw_data };
+
+static const struct ms_tp_data ms5837_data = { .name = "ms5837", .hw = &ms5637_hw_data };
+
+static const struct ms_tp_data ms8607_data = {
+ .name = "ms8607-temppressure",
+ .hw = &ms5637_hw_data,
+};
+
static const struct i2c_device_id ms5637_id[] = {
- {"ms5637", 0},
- {"ms5805", 0},
- {"ms5837", 0},
- {"ms8607-temppressure", 0},
+ {"ms5637", (kernel_ulong_t)&ms5637_data },
+ {"ms5805", (kernel_ulong_t)&ms5805_data },
+ {"ms5837", (kernel_ulong_t)&ms5837_data },
+ {"ms8607-temppressure", (kernel_ulong_t)&ms8607_data },
{}
};
MODULE_DEVICE_TABLE(i2c, ms5637_id);
static const struct of_device_id ms5637_of_match[] = {
- { .compatible = "meas,ms5637", },
- { .compatible = "meas,ms5805", },
- { .compatible = "meas,ms5837", },
- { .compatible = "meas,ms8607-temppressure", },
+ { .compatible = "meas,ms5637", .data = &ms5637_data },
+ { .compatible = "meas,ms5803", .data = &ms5803_data },
+ { .compatible = "meas,ms5805", .data = &ms5805_data },
+ { .compatible = "meas,ms5837", .data = &ms5837_data },
+ { .compatible = "meas,ms8607-temppressure", .data = &ms8607_data },
{ },
};
MODULE_DEVICE_TABLE(of, ms5637_of_match);
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index 9325e189a215..04a78d9f8fe3 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -41,6 +41,7 @@ config INFINIBAND_USER_MEM
bool
depends on INFINIBAND_USER_ACCESS != n
depends on MMU
+ select DMA_SHARED_BUFFER
default y
config INFINIBAND_ON_DEMAND_PAGING
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile
index ccf2670ef45e..8ab4eea5a0a5 100644
--- a/drivers/infiniband/core/Makefile
+++ b/drivers/infiniband/core/Makefile
@@ -40,5 +40,5 @@ ib_uverbs-y := uverbs_main.o uverbs_cmd.o uverbs_marshall.o \
uverbs_std_types_srq.o \
uverbs_std_types_wq.o \
uverbs_std_types_qp.o
-ib_uverbs-$(CONFIG_INFINIBAND_USER_MEM) += umem.o
+ib_uverbs-$(CONFIG_INFINIBAND_USER_MEM) += umem.o umem_dmabuf.o
ib_uverbs-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += umem_odp.o
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 7989b7e1d1c0..5c9fac7cf420 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -669,11 +669,10 @@ int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
* rdma_find_gid_by_port - Returns the GID entry attributes when it finds
* a valid GID entry for given search parameters. It searches for the specified
* GID value in the local software cache.
- * @device: The device to query.
+ * @ib_dev: The device to query.
* @gid: The GID value to search for.
* @gid_type: The GID type to search for.
- * @port_num: The port number of the device where the GID value should be
- * searched.
+ * @port: The port number of the device where the GID value should be searched.
* @ndev: In RoCE, the net device of the device. NULL means ignore.
*
* Returns sgid attributes if the GID is found with valid reference or
@@ -719,7 +718,7 @@ EXPORT_SYMBOL(rdma_find_gid_by_port);
/**
* rdma_find_gid_by_filter - Returns the GID table attribute where a
* specified GID value occurs
- * @device: The device to query.
+ * @ib_dev: The device to query.
* @gid: The GID value to search for.
* @port: The port number of the device where the GID value could be
* searched.
@@ -728,6 +727,7 @@ EXPORT_SYMBOL(rdma_find_gid_by_port);
* otherwise, we continue searching the GID table. It's guaranteed that
* while filter is executed, ndev field is valid and the structure won't
* change. filter is executed in an atomic context. filter must not be NULL.
+ * @context: Private data to pass into the call-back.
*
* rdma_find_gid_by_filter() searches for the specified GID value
* of which the filter function returns true in the port's GID table.
@@ -1253,7 +1253,6 @@ EXPORT_SYMBOL(rdma_get_gid_attr);
* @entries: Entries where GID entries are returned.
* @max_entries: Maximum number of entries that can be returned.
* Entries array must be allocated to hold max_entries number of entries.
- * @num_entries: Updated to the number of entries that were successfully read.
*
* Returns number of entries on success or appropriate error code.
*/
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 98165589c8ab..3d194bb60840 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -3651,6 +3651,7 @@ static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv,
struct ib_cm_sidr_rep_param *param)
{
struct ib_mad_send_buf *msg;
+ unsigned long flags;
int ret;
lockdep_assert_held(&cm_id_priv->lock);
@@ -3676,12 +3677,12 @@ static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv,
return ret;
}
cm_id_priv->id.state = IB_CM_IDLE;
- spin_lock_irq(&cm.lock);
+ spin_lock_irqsave(&cm.lock, flags);
if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) {
rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
}
- spin_unlock_irq(&cm.lock);
+ spin_unlock_irqrestore(&cm.lock, flags);
return 0;
}
@@ -4333,7 +4334,7 @@ static int cm_add_one(struct ib_device *ib_device)
unsigned long flags;
int ret;
int count = 0;
- u8 i;
+ unsigned int i;
cm_dev = kzalloc(struct_size(cm_dev, port, ib_device->phys_port_cnt),
GFP_KERNEL);
@@ -4345,7 +4346,7 @@ static int cm_add_one(struct ib_device *ib_device)
cm_dev->going_down = 0;
set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
- for (i = 1; i <= ib_device->phys_port_cnt; i++) {
+ rdma_for_each_port (ib_device, i) {
if (!rdma_cap_ib_cm(ib_device, i))
continue;
@@ -4431,7 +4432,7 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data)
.clr_port_cap_mask = IB_PORT_CM_SUP
};
unsigned long flags;
- int i;
+ unsigned int i;
write_lock_irqsave(&cm.device_lock, flags);
list_del(&cm_dev->list);
@@ -4441,7 +4442,7 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data)
cm_dev->going_down = 1;
spin_unlock_irq(&cm.lock);
- for (i = 1; i <= ib_device->phys_port_cnt; i++) {
+ rdma_for_each_port (ib_device, i) {
if (!rdma_cap_ib_cm(ib_device, i))
continue;
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index c51b84b2d2f3..94096511599f 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -352,7 +352,13 @@ struct ib_device *cma_get_ib_dev(struct cma_device *cma_dev)
struct cma_multicast {
struct rdma_id_private *id_priv;
- struct ib_sa_multicast *sa_mc;
+ union {
+ struct ib_sa_multicast *sa_mc;
+ struct {
+ struct work_struct work;
+ struct rdma_cm_event event;
+ } iboe_join;
+ };
struct list_head list;
void *context;
struct sockaddr_storage addr;
@@ -1823,6 +1829,8 @@ static void destroy_mc(struct rdma_id_private *id_priv,
cma_igmp_send(ndev, &mgid, false);
dev_put(ndev);
}
+
+ cancel_work_sync(&mc->iboe_join.work);
}
kfree(mc);
}
@@ -2683,6 +2691,28 @@ static int cma_query_ib_route(struct rdma_id_private *id_priv,
return (id_priv->query_id < 0) ? id_priv->query_id : 0;
}
+static void cma_iboe_join_work_handler(struct work_struct *work)
+{
+ struct cma_multicast *mc =
+ container_of(work, struct cma_multicast, iboe_join.work);
+ struct rdma_cm_event *event = &mc->iboe_join.event;
+ struct rdma_id_private *id_priv = mc->id_priv;
+ int ret;
+
+ mutex_lock(&id_priv->handler_mutex);
+ if (READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING ||
+ READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL)
+ goto out_unlock;
+
+ ret = cma_cm_event_handler(id_priv, event);
+ WARN_ON(ret);
+
+out_unlock:
+ mutex_unlock(&id_priv->handler_mutex);
+ if (event->event == RDMA_CM_EVENT_MULTICAST_JOIN)
+ rdma_destroy_ah_attr(&event->param.ud.ah_attr);
+}
+
static void cma_work_handler(struct work_struct *_work)
{
struct cma_work *work = container_of(_work, struct cma_work, work);
@@ -4478,10 +4508,7 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
cma_make_mc_event(status, id_priv, multicast, &event, mc);
ret = cma_cm_event_handler(id_priv, &event);
rdma_destroy_ah_attr(&event.param.ud.ah_attr);
- if (ret) {
- destroy_id_handler_unlock(id_priv);
- return 0;
- }
+ WARN_ON(ret);
out:
mutex_unlock(&id_priv->handler_mutex);
@@ -4542,17 +4569,6 @@ static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
rec.join_state = mc->join_state;
- if ((rec.join_state == BIT(SENDONLY_FULLMEMBER_JOIN)) &&
- (!ib_sa_sendonly_fullmem_support(&sa_client,
- id_priv->id.device,
- id_priv->id.port_num))) {
- dev_warn(
- &id_priv->id.device->dev,
- "RDMA CM: port %u Unable to multicast join: SM doesn't support Send Only Full Member option\n",
- id_priv->id.port_num);
- return -EOPNOTSUPP;
- }
-
comp_mask = IB_SA_MCMEMBER_REC_MGID | IB_SA_MCMEMBER_REC_PORT_GID |
IB_SA_MCMEMBER_REC_PKEY | IB_SA_MCMEMBER_REC_JOIN_STATE |
IB_SA_MCMEMBER_REC_QKEY | IB_SA_MCMEMBER_REC_SL |
@@ -4604,7 +4620,6 @@ static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid,
static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
struct cma_multicast *mc)
{
- struct cma_work *work;
struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
int err = 0;
struct sockaddr *addr = (struct sockaddr *)&mc->addr;
@@ -4618,10 +4633,6 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
if (cma_zero_addr(addr))
return -EINVAL;
- work = kzalloc(sizeof *work, GFP_KERNEL);
- if (!work)
- return -ENOMEM;
-
gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num -
rdma_start_port(id_priv->cma_dev->device)];
cma_iboe_set_mgid(addr, &ib.rec.mgid, gid_type);
@@ -4632,10 +4643,9 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
if (dev_addr->bound_dev_if)
ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
- if (!ndev) {
- err = -ENODEV;
- goto err_free;
- }
+ if (!ndev)
+ return -ENODEV;
+
ib.rec.rate = iboe_get_rate(ndev);
ib.rec.hop_limit = 1;
ib.rec.mtu = iboe_get_mtu(ndev->mtu);
@@ -4653,24 +4663,15 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
err = -ENOTSUPP;
}
dev_put(ndev);
- if (err || !ib.rec.mtu) {
- if (!err)
- err = -EINVAL;
- goto err_free;
- }
+ if (err || !ib.rec.mtu)
+ return err ?: -EINVAL;
+
rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
&ib.rec.port_gid);
- work->id = id_priv;
- INIT_WORK(&work->work, cma_work_handler);
- cma_make_mc_event(0, id_priv, &ib, &work->event, mc);
- /* Balances with cma_id_put() in cma_work_handler */
- cma_id_get(id_priv);
- queue_work(cma_wq, &work->work);
+ INIT_WORK(&mc->iboe_join.work, cma_iboe_join_work_handler);
+ cma_make_mc_event(0, id_priv, &ib, &mc->iboe_join.event, mc);
+ queue_work(cma_wq, &mc->iboe_join.work);
return 0;
-
-err_free:
- kfree(work);
- return err;
}
int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
diff --git a/drivers/infiniband/core/cma_configfs.c b/drivers/infiniband/core/cma_configfs.c
index 97a77ea8d3c9..e0d5e3bae458 100644
--- a/drivers/infiniband/core/cma_configfs.c
+++ b/drivers/infiniband/core/cma_configfs.c
@@ -204,7 +204,6 @@ static int make_cma_ports(struct cma_dev_group *cma_dev_group,
unsigned int i;
unsigned int ports_num;
struct cma_dev_port_group *ports;
- int err;
ibdev = cma_get_ib_dev(cma_dev);
@@ -215,10 +214,8 @@ static int make_cma_ports(struct cma_dev_group *cma_dev_group,
ports = kcalloc(ports_num, sizeof(*cma_dev_group->ports),
GFP_KERNEL);
- if (!ports) {
- err = -ENOMEM;
- goto free;
- }
+ if (!ports)
+ return -ENOMEM;
for (i = 0; i < ports_num; i++) {
char port_str[10];
@@ -234,12 +231,7 @@ static int make_cma_ports(struct cma_dev_group *cma_dev_group,
}
cma_dev_group->ports = ports;
-
return 0;
-free:
- kfree(ports);
- cma_dev_group->ports = NULL;
- return err;
}
static void release_cma_dev(struct config_item *item)
diff --git a/drivers/infiniband/core/counters.c b/drivers/infiniband/core/counters.c
index 92745522250e..f3a7c1f404af 100644
--- a/drivers/infiniband/core/counters.c
+++ b/drivers/infiniband/core/counters.c
@@ -10,30 +10,35 @@
#define ALL_AUTO_MODE_MASKS (RDMA_COUNTER_MASK_QP_TYPE | RDMA_COUNTER_MASK_PID)
-static int __counter_set_mode(struct rdma_counter_mode *curr,
+static int __counter_set_mode(struct rdma_port_counter *port_counter,
enum rdma_nl_counter_mode new_mode,
enum rdma_nl_counter_mask new_mask)
{
- if ((new_mode == RDMA_COUNTER_MODE_AUTO) &&
- ((new_mask & (~ALL_AUTO_MODE_MASKS)) ||
- (curr->mode != RDMA_COUNTER_MODE_NONE)))
- return -EINVAL;
+ if (new_mode == RDMA_COUNTER_MODE_AUTO && port_counter->num_counters)
+ if (new_mask & ~ALL_AUTO_MODE_MASKS ||
+ port_counter->mode.mode != RDMA_COUNTER_MODE_NONE)
+ return -EINVAL;
- curr->mode = new_mode;
- curr->mask = new_mask;
+ port_counter->mode.mode = new_mode;
+ port_counter->mode.mask = new_mask;
return 0;
}
-/**
+/*
* rdma_counter_set_auto_mode() - Turn on/off per-port auto mode
*
- * When @on is true, the @mask must be set; When @on is false, it goes
- * into manual mode if there's any counter, so that the user is able to
- * manually access them.
+ * @dev: Device to operate
+ * @port: Port to use
+ * @mask: Mask to configure
+ * @extack: Message to the user
+ *
+ * Return 0 on success.
*/
int rdma_counter_set_auto_mode(struct ib_device *dev, u8 port,
- bool on, enum rdma_nl_counter_mask mask)
+ enum rdma_nl_counter_mask mask,
+ struct netlink_ext_ack *extack)
{
+ enum rdma_nl_counter_mode mode = RDMA_COUNTER_MODE_AUTO;
struct rdma_port_counter *port_counter;
int ret;
@@ -42,23 +47,23 @@ int rdma_counter_set_auto_mode(struct ib_device *dev, u8 port,
return -EOPNOTSUPP;
mutex_lock(&port_counter->lock);
- if (on) {
- ret = __counter_set_mode(&port_counter->mode,
- RDMA_COUNTER_MODE_AUTO, mask);
- } else {
- if (port_counter->mode.mode != RDMA_COUNTER_MODE_AUTO) {
- ret = -EINVAL;
- goto out;
- }
+ if (mask) {
+ ret = __counter_set_mode(port_counter, mode, mask);
+ if (ret)
+ NL_SET_ERR_MSG(
+ extack,
+ "Turning on auto mode is not allowed when there is bound QP");
+ goto out;
+ }
- if (port_counter->num_counters)
- ret = __counter_set_mode(&port_counter->mode,
- RDMA_COUNTER_MODE_MANUAL, 0);
- else
- ret = __counter_set_mode(&port_counter->mode,
- RDMA_COUNTER_MODE_NONE, 0);
+ if (port_counter->mode.mode != RDMA_COUNTER_MODE_AUTO) {
+ ret = -EINVAL;
+ goto out;
}
+ mode = (port_counter->num_counters) ? RDMA_COUNTER_MODE_MANUAL :
+ RDMA_COUNTER_MODE_NONE;
+ ret = __counter_set_mode(port_counter, mode, 0);
out:
mutex_unlock(&port_counter->lock);
return ret;
@@ -122,8 +127,8 @@ static struct rdma_counter *alloc_and_bind(struct ib_device *dev, u8 port,
mutex_lock(&port_counter->lock);
switch (mode) {
case RDMA_COUNTER_MODE_MANUAL:
- ret = __counter_set_mode(&port_counter->mode,
- RDMA_COUNTER_MODE_MANUAL, 0);
+ ret = __counter_set_mode(port_counter, RDMA_COUNTER_MODE_MANUAL,
+ 0);
if (ret) {
mutex_unlock(&port_counter->lock);
goto err_mode;
@@ -170,8 +175,7 @@ static void rdma_counter_free(struct rdma_counter *counter)
port_counter->num_counters--;
if (!port_counter->num_counters &&
(port_counter->mode.mode == RDMA_COUNTER_MODE_MANUAL))
- __counter_set_mode(&port_counter->mode, RDMA_COUNTER_MODE_NONE,
- 0);
+ __counter_set_mode(port_counter, RDMA_COUNTER_MODE_NONE, 0);
mutex_unlock(&port_counter->lock);
@@ -227,7 +231,7 @@ static void counter_history_stat_update(struct rdma_counter *counter)
port_counter->hstats->value[i] += counter->stats->value[i];
}
-/**
+/*
* rdma_get_counter_auto_mode - Find the counter that @qp should be bound
* with in auto mode
*
@@ -274,7 +278,7 @@ static void counter_release(struct kref *kref)
rdma_counter_free(counter);
}
-/**
+/*
* rdma_counter_bind_qp_auto - Check and bind the QP to a counter base on
* the auto-mode rule
*/
@@ -311,7 +315,7 @@ int rdma_counter_bind_qp_auto(struct ib_qp *qp, u8 port)
return 0;
}
-/**
+/*
* rdma_counter_unbind_qp - Unbind a qp from a counter
* @force:
* true - Decrease the counter ref-count anyway (e.g., qp destroy)
@@ -380,7 +384,7 @@ next:
return sum;
}
-/**
+/*
* rdma_counter_get_hwstat_value() - Get the sum value of all counters on a
* specific port, including the running ones and history data
*/
@@ -436,7 +440,7 @@ static struct rdma_counter *rdma_get_counter_by_id(struct ib_device *dev,
return counter;
}
-/**
+/*
* rdma_counter_bind_qpn() - Bind QP @qp_num to counter @counter_id
*/
int rdma_counter_bind_qpn(struct ib_device *dev, u8 port,
@@ -485,7 +489,7 @@ err:
return ret;
}
-/**
+/*
* rdma_counter_bind_qpn_alloc() - Alloc a counter and bind QP @qp_num to it
* The id of new counter is returned in @counter_id
*/
@@ -533,7 +537,7 @@ err:
return ret;
}
-/**
+/*
* rdma_counter_unbind_qpn() - Unbind QP @qp_num from a counter
*/
int rdma_counter_unbind_qpn(struct ib_device *dev, u8 port,
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index e96f979e6d52..aac0fe14e1d9 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -848,6 +848,20 @@ static int setup_port_data(struct ib_device *device)
return 0;
}
+/**
+ * ib_port_immutable_read() - Read rdma port's immutable data
+ * @dev: IB device
+ * @port: port number whose immutable data to read. It starts with index 1 and
+ * valid upto including rdma_end_port().
+ */
+const struct ib_port_immutable*
+ib_port_immutable_read(struct ib_device *dev, unsigned int port)
+{
+ WARN_ON(!rdma_is_port_valid(dev, port));
+ return &dev->port_data[port].immutable;
+}
+EXPORT_SYMBOL(ib_port_immutable_read);
+
void ib_get_device_fw_str(struct ib_device *dev, char *str)
{
if (dev->ops.get_dev_fw_str)
@@ -1887,9 +1901,9 @@ static int __ib_get_client_nl_info(struct ib_device *ibdev,
/**
* ib_get_client_nl_info - Fetch the nl_info from a client
- * @device - IB device
- * @client_name - Name of the client
- * @res - Result of the query
+ * @ibdev: IB device
+ * @client_name: Name of the client
+ * @res: Result of the query
*/
int ib_get_client_nl_info(struct ib_device *ibdev, const char *client_name,
struct ib_client_nl_info *res)
@@ -2317,7 +2331,7 @@ void ib_enum_all_roce_netdevs(roce_netdev_filter filter,
up_read(&devices_rwsem);
}
-/**
+/*
* ib_enum_all_devs - enumerate all ib_devices
* @cb: Callback to call for each found ib_device
*
@@ -2681,6 +2695,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, read_counters);
SET_DEVICE_OP(dev_ops, reg_dm_mr);
SET_DEVICE_OP(dev_ops, reg_user_mr);
+ SET_DEVICE_OP(dev_ops, reg_user_mr_dmabuf);
SET_DEVICE_OP(dev_ops, req_ncomp_notif);
SET_DEVICE_OP(dev_ops, req_notify_cq);
SET_DEVICE_OP(dev_ops, rereg_user_mr);
diff --git a/drivers/infiniband/core/iwpm_msg.c b/drivers/infiniband/core/iwpm_msg.c
index 46686990a827..30a0ff76b332 100644
--- a/drivers/infiniband/core/iwpm_msg.c
+++ b/drivers/infiniband/core/iwpm_msg.c
@@ -392,7 +392,7 @@ static const struct nla_policy resp_reg_policy[IWPM_NLA_RREG_PID_MAX] = {
/**
* iwpm_register_pid_cb - Process the port mapper response to
* iwpm_register_pid query
- * @skb:
+ * @skb: The socket buffer
* @cb: Contains the received message (payload and netlink header)
*
* If successful, the function receives the userspace port mapper pid
@@ -468,7 +468,7 @@ static const struct nla_policy resp_add_policy[IWPM_NLA_RMANAGE_MAPPING_MAX] = {
/**
* iwpm_add_mapping_cb - Process the port mapper response to
* iwpm_add_mapping request
- * @skb:
+ * @skb: The socket buffer
* @cb: Contains the received message (payload and netlink header)
*/
int iwpm_add_mapping_cb(struct sk_buff *skb, struct netlink_callback *cb)
@@ -545,7 +545,7 @@ static const struct nla_policy resp_query_policy[IWPM_NLA_RQUERY_MAPPING_MAX] =
/**
* iwpm_add_and_query_mapping_cb - Process the port mapper response to
* iwpm_add_and_query_mapping request
- * @skb:
+ * @skb: The socket buffer
* @cb: Contains the received message (payload and netlink header)
*/
int iwpm_add_and_query_mapping_cb(struct sk_buff *skb,
@@ -627,7 +627,7 @@ query_mapping_response_exit:
/**
* iwpm_remote_info_cb - Process remote connecting peer address info, which
* the port mapper has received from the connecting peer
- * @skb:
+ * @skb: The socket buffer
* @cb: Contains the received message (payload and netlink header)
*
* Stores the IPv4/IPv6 address info in a hash table
@@ -706,7 +706,7 @@ static const struct nla_policy resp_mapinfo_policy[IWPM_NLA_MAPINFO_REQ_MAX] = {
/**
* iwpm_mapping_info_cb - Process a notification that the userspace
* port mapper daemon is started
- * @skb:
+ * @skb: The socket buffer
* @cb: Contains the received message (payload and netlink header)
*
* Using the received port mapper pid, send all the local mapping
@@ -766,7 +766,7 @@ static const struct nla_policy ack_mapinfo_policy[IWPM_NLA_MAPINFO_NUM_MAX] = {
/**
* iwpm_ack_mapping_info_cb - Process the port mapper ack for
* the provided local mapping info records
- * @skb:
+ * @skb: The socket buffer
* @cb: Contains the received message (payload and netlink header)
*/
int iwpm_ack_mapping_info_cb(struct sk_buff *skb, struct netlink_callback *cb)
@@ -796,7 +796,7 @@ static const struct nla_policy map_error_policy[IWPM_NLA_ERR_MAX] = {
/**
* iwpm_mapping_error_cb - Process port mapper notification for error
*
- * @skb:
+ * @skb: The socket buffer
* @cb: Contains the received message (payload and netlink header)
*/
int iwpm_mapping_error_cb(struct sk_buff *skb, struct netlink_callback *cb)
@@ -841,7 +841,7 @@ static const struct nla_policy hello_policy[IWPM_NLA_HELLO_MAX] = {
/**
* iwpm_hello_cb - Process a hello message from iwpmd
*
- * @skb:
+ * @skb: The socket buffer
* @cb: Contains the received message (payload and netlink header)
*
* Using the received port mapper pid, send the kernel's abi_version
diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c
index 13495b43dbc1..f80e5550b51f 100644
--- a/drivers/infiniband/core/iwpm_util.c
+++ b/drivers/infiniband/core/iwpm_util.c
@@ -127,8 +127,8 @@ static struct hlist_head *get_mapinfo_hash_bucket(struct sockaddr_storage *,
/**
* iwpm_create_mapinfo - Store local and mapped IPv4/IPv6 address
* info in a hash table
- * @local_addr: Local ip/tcp address
- * @mapped_addr: Mapped local ip/tcp address
+ * @local_sockaddr: Local ip/tcp address
+ * @mapped_sockaddr: Mapped local ip/tcp address
* @nl_client: The index of the netlink client
* @map_flags: IWPM mapping flags
*/
@@ -174,7 +174,7 @@ int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr,
/**
* iwpm_remove_mapinfo - Remove local and mapped IPv4/IPv6 address
* info from the hash table
- * @local_addr: Local ip/tcp address
+ * @local_sockaddr: Local ip/tcp address
* @mapped_local_addr: Mapped local ip/tcp address
*
* Returns err code if mapping info is not found in the hash table,
diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c
index 740f03ecc05d..57519ca6cd2c 100644
--- a/drivers/infiniband/core/multicast.c
+++ b/drivers/infiniband/core/multicast.c
@@ -721,6 +721,7 @@ EXPORT_SYMBOL(ib_sa_get_mcmember_rec);
* member record and gid of the device.
* @device: RDMA device
* @port_num: Port of the rdma device to consider
+ * @rec: Multicast member record to use
* @ndev: Optional netdevice, applicable only for RoCE
* @gid_type: GID type to consider
* @ah_attr: AH attribute to fillup on successful completion
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index 08366e254b1d..d306049c22a2 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -1768,9 +1768,7 @@ static int nldev_stat_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
if (tb[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK])
mask = nla_get_u32(
tb[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK]);
-
- ret = rdma_counter_set_auto_mode(device, port,
- mask ? true : false, mask);
+ ret = rdma_counter_set_auto_mode(device, port, mask, extack);
if (ret)
goto err_msg;
} else {
diff --git a/drivers/infiniband/core/restrack.c b/drivers/infiniband/core/restrack.c
index ff1551b3cf61..ffabaf327242 100644
--- a/drivers/infiniband/core/restrack.c
+++ b/drivers/infiniband/core/restrack.c
@@ -201,8 +201,8 @@ EXPORT_SYMBOL(rdma_restrack_parent_name);
/**
* rdma_restrack_new() - Initializes new restrack entry to allow _put() interface
* to release memory in fully automatic way.
- * @res - Entry to initialize
- * @type - REstrack type
+ * @res: Entry to initialize
+ * @type: REstrack type
*/
void rdma_restrack_new(struct rdma_restrack_entry *res,
enum rdma_restrack_type type)
diff --git a/drivers/infiniband/core/roce_gid_mgmt.c b/drivers/infiniband/core/roce_gid_mgmt.c
index 6b8364bb032d..34fff94eaa38 100644
--- a/drivers/infiniband/core/roce_gid_mgmt.c
+++ b/drivers/infiniband/core/roce_gid_mgmt.c
@@ -505,7 +505,7 @@ static void enum_all_gids_of_dev_cb(struct ib_device *ib_dev,
* rdma_roce_rescan_device - Rescan all of the network devices in the system
* and add their gids, as needed, to the relevant RoCE devices.
*
- * @device: the rdma device
+ * @ib_dev: the rdma device
*/
void rdma_roce_rescan_device(struct ib_device *ib_dev)
{
diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c
index a96030b784eb..31156e22d3e7 100644
--- a/drivers/infiniband/core/rw.c
+++ b/drivers/infiniband/core/rw.c
@@ -410,7 +410,7 @@ int rdma_rw_ctx_signature_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
ctx->type = RDMA_RW_SIG_MR;
ctx->nr_ops = 1;
- ctx->reg = kcalloc(1, sizeof(*ctx->reg), GFP_KERNEL);
+ ctx->reg = kzalloc(sizeof(*ctx->reg), GFP_KERNEL);
if (!ctx->reg) {
ret = -ENOMEM;
goto out_unmap_prot_sg;
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 89a831fa1885..9ef1a355131b 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -1434,7 +1434,7 @@ enum opa_pr_supported {
PR_IB_SUPPORTED
};
-/**
+/*
* opa_pr_query_possible - Check if current PR query can be an OPA query.
*
* Retuns PR_NOT_SUPPORTED if a path record query is not
@@ -1951,30 +1951,6 @@ err1:
}
EXPORT_SYMBOL(ib_sa_guid_info_rec_query);
-bool ib_sa_sendonly_fullmem_support(struct ib_sa_client *client,
- struct ib_device *device,
- u8 port_num)
-{
- struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
- struct ib_sa_port *port;
- bool ret = false;
- unsigned long flags;
-
- if (!sa_dev)
- return ret;
-
- port = &sa_dev->port[port_num - sa_dev->start_port];
-
- spin_lock_irqsave(&port->classport_lock, flags);
- if ((port->classport_info.valid) &&
- (port->classport_info.data.type == RDMA_CLASS_PORT_INFO_IB))
- ret = ib_get_cpi_capmask2(&port->classport_info.data.ib)
- & IB_SA_CAP_MASK2_SENDONLY_FULL_MEM_SUPPORT;
- spin_unlock_irqrestore(&port->classport_lock, flags);
- return ret;
-}
-EXPORT_SYMBOL(ib_sa_sendonly_fullmem_support);
-
struct ib_classport_info_context {
struct completion done;
struct ib_sa_query *sa_query;
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 917338db7ac1..2dde99a9ba07 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -2,6 +2,7 @@
* Copyright (c) 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Cisco Systems. All rights reserved.
* Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2020 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -278,6 +279,8 @@ void ib_umem_release(struct ib_umem *umem)
{
if (!umem)
return;
+ if (umem->is_dmabuf)
+ return ib_umem_dmabuf_release(to_ib_umem_dmabuf(umem));
if (umem->is_odp)
return ib_umem_odp_release(to_ib_umem_odp(umem));
diff --git a/drivers/infiniband/core/umem_dmabuf.c b/drivers/infiniband/core/umem_dmabuf.c
new file mode 100644
index 000000000000..f9b5162d9260
--- /dev/null
+++ b/drivers/infiniband/core/umem_dmabuf.c
@@ -0,0 +1,174 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+/*
+ * Copyright (c) 2020 Intel Corporation. All rights reserved.
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/dma-resv.h>
+#include <linux/dma-mapping.h>
+
+#include "uverbs.h"
+
+int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
+{
+ struct sg_table *sgt;
+ struct scatterlist *sg;
+ struct dma_fence *fence;
+ unsigned long start, end, cur = 0;
+ unsigned int nmap = 0;
+ int i;
+
+ dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv);
+
+ if (umem_dmabuf->sgt)
+ goto wait_fence;
+
+ sgt = dma_buf_map_attachment(umem_dmabuf->attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sgt))
+ return PTR_ERR(sgt);
+
+ /* modify the sg list in-place to match umem address and length */
+
+ start = ALIGN_DOWN(umem_dmabuf->umem.address, PAGE_SIZE);
+ end = ALIGN(umem_dmabuf->umem.address + umem_dmabuf->umem.length,
+ PAGE_SIZE);
+ for_each_sgtable_dma_sg(sgt, sg, i) {
+ if (start < cur + sg_dma_len(sg) && cur < end)
+ nmap++;
+ if (cur <= start && start < cur + sg_dma_len(sg)) {
+ unsigned long offset = start - cur;
+
+ umem_dmabuf->first_sg = sg;
+ umem_dmabuf->first_sg_offset = offset;
+ sg_dma_address(sg) += offset;
+ sg_dma_len(sg) -= offset;
+ cur += offset;
+ }
+ if (cur < end && end <= cur + sg_dma_len(sg)) {
+ unsigned long trim = cur + sg_dma_len(sg) - end;
+
+ umem_dmabuf->last_sg = sg;
+ umem_dmabuf->last_sg_trim = trim;
+ sg_dma_len(sg) -= trim;
+ break;
+ }
+ cur += sg_dma_len(sg);
+ }
+
+ umem_dmabuf->umem.sg_head.sgl = umem_dmabuf->first_sg;
+ umem_dmabuf->umem.sg_head.nents = nmap;
+ umem_dmabuf->umem.nmap = nmap;
+ umem_dmabuf->sgt = sgt;
+
+wait_fence:
+ /*
+ * Although the sg list is valid now, the content of the pages
+ * may be not up-to-date. Wait for the exporter to finish
+ * the migration.
+ */
+ fence = dma_resv_get_excl(umem_dmabuf->attach->dmabuf->resv);
+ if (fence)
+ return dma_fence_wait(fence, false);
+
+ return 0;
+}
+EXPORT_SYMBOL(ib_umem_dmabuf_map_pages);
+
+void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf)
+{
+ dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv);
+
+ if (!umem_dmabuf->sgt)
+ return;
+
+ /* retore the original sg list */
+ if (umem_dmabuf->first_sg) {
+ sg_dma_address(umem_dmabuf->first_sg) -=
+ umem_dmabuf->first_sg_offset;
+ sg_dma_len(umem_dmabuf->first_sg) +=
+ umem_dmabuf->first_sg_offset;
+ umem_dmabuf->first_sg = NULL;
+ umem_dmabuf->first_sg_offset = 0;
+ }
+ if (umem_dmabuf->last_sg) {
+ sg_dma_len(umem_dmabuf->last_sg) +=
+ umem_dmabuf->last_sg_trim;
+ umem_dmabuf->last_sg = NULL;
+ umem_dmabuf->last_sg_trim = 0;
+ }
+
+ dma_buf_unmap_attachment(umem_dmabuf->attach, umem_dmabuf->sgt,
+ DMA_BIDIRECTIONAL);
+
+ umem_dmabuf->sgt = NULL;
+}
+EXPORT_SYMBOL(ib_umem_dmabuf_unmap_pages);
+
+struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
+ unsigned long offset, size_t size,
+ int fd, int access,
+ const struct dma_buf_attach_ops *ops)
+{
+ struct dma_buf *dmabuf;
+ struct ib_umem_dmabuf *umem_dmabuf;
+ struct ib_umem *umem;
+ unsigned long end;
+ struct ib_umem_dmabuf *ret = ERR_PTR(-EINVAL);
+
+ if (check_add_overflow(offset, (unsigned long)size, &end))
+ return ret;
+
+ if (unlikely(!ops || !ops->move_notify))
+ return ret;
+
+ dmabuf = dma_buf_get(fd);
+ if (IS_ERR(dmabuf))
+ return ERR_CAST(dmabuf);
+
+ if (dmabuf->size < end)
+ goto out_release_dmabuf;
+
+ umem_dmabuf = kzalloc(sizeof(*umem_dmabuf), GFP_KERNEL);
+ if (!umem_dmabuf) {
+ ret = ERR_PTR(-ENOMEM);
+ goto out_release_dmabuf;
+ }
+
+ umem = &umem_dmabuf->umem;
+ umem->ibdev = device;
+ umem->length = size;
+ umem->address = offset;
+ umem->writable = ib_access_writable(access);
+ umem->is_dmabuf = 1;
+
+ if (!ib_umem_num_pages(umem))
+ goto out_free_umem;
+
+ umem_dmabuf->attach = dma_buf_dynamic_attach(
+ dmabuf,
+ device->dma_device,
+ ops,
+ umem_dmabuf);
+ if (IS_ERR(umem_dmabuf->attach)) {
+ ret = ERR_CAST(umem_dmabuf->attach);
+ goto out_free_umem;
+ }
+ return umem_dmabuf;
+
+out_free_umem:
+ kfree(umem_dmabuf);
+
+out_release_dmabuf:
+ dma_buf_put(dmabuf);
+ return ret;
+}
+EXPORT_SYMBOL(ib_umem_dmabuf_get);
+
+void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf)
+{
+ struct dma_buf *dmabuf = umem_dmabuf->attach->dmabuf;
+
+ dma_buf_detach(dmabuf, umem_dmabuf->attach);
+ dma_buf_put(dmabuf);
+ kfree(umem_dmabuf);
+}
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index 19104a675691..dd7f3b437c6b 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -379,6 +379,11 @@ static ssize_t ib_umad_read(struct file *filp, char __user *buf,
mutex_lock(&file->mutex);
+ if (file->agents_dead) {
+ mutex_unlock(&file->mutex);
+ return -EIO;
+ }
+
while (list_empty(&file->recv_list)) {
mutex_unlock(&file->mutex);
@@ -392,6 +397,11 @@ static ssize_t ib_umad_read(struct file *filp, char __user *buf,
mutex_lock(&file->mutex);
}
+ if (file->agents_dead) {
+ mutex_unlock(&file->mutex);
+ return -EIO;
+ }
+
packet = list_entry(file->recv_list.next, struct ib_umad_packet, list);
list_del(&packet->list);
@@ -524,7 +534,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
agent = __get_agent(file, packet->mad.hdr.id);
if (!agent) {
- ret = -EINVAL;
+ ret = -EIO;
goto err_up;
}
@@ -653,10 +663,14 @@ static __poll_t ib_umad_poll(struct file *filp, struct poll_table_struct *wait)
/* we will always be able to post a MAD send */
__poll_t mask = EPOLLOUT | EPOLLWRNORM;
+ mutex_lock(&file->mutex);
poll_wait(filp, &file->recv_wait, wait);
if (!list_empty(&file->recv_list))
mask |= EPOLLIN | EPOLLRDNORM;
+ if (file->agents_dead)
+ mask = EPOLLERR;
+ mutex_unlock(&file->mutex);
return mask;
}
@@ -1336,6 +1350,7 @@ static void ib_umad_kill_port(struct ib_umad_port *port)
list_for_each_entry(file, &port->file_list, port_list) {
mutex_lock(&file->mutex);
file->agents_dead = 1;
+ wake_up_interruptible(&file->recv_wait);
mutex_unlock(&file->mutex);
for (id = 0; id < IB_UMAD_MAX_AGENTS; ++id)
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 98a5d36813ff..f5b8be3bedde 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -1382,7 +1382,7 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
if (has_sq)
scq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ,
cmd->send_cq_handle, attrs);
- if (!ind_tbl)
+ if (!ind_tbl && cmd->qp_type != IB_QPT_XRC_INI)
rcq = rcq ?: scq;
pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd->pd_handle,
attrs);
diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c
index e47c5949013f..ff047eb024ab 100644
--- a/drivers/infiniband/core/uverbs_ioctl.c
+++ b/drivers/infiniband/core/uverbs_ioctl.c
@@ -91,7 +91,7 @@ void uapi_compute_bundle_size(struct uverbs_api_ioctl_method *method_elm,
}
/**
- * uverbs_alloc() - Quickly allocate memory for use with a bundle
+ * _uverbs_alloc() - Quickly allocate memory for use with a bundle
* @bundle: The bundle
* @size: Number of bytes to allocate
* @flags: Allocator flags
diff --git a/drivers/infiniband/core/uverbs_std_types_mr.c b/drivers/infiniband/core/uverbs_std_types_mr.c
index dd4e76b26c74..f782d5e1aa25 100644
--- a/drivers/infiniband/core/uverbs_std_types_mr.c
+++ b/drivers/infiniband/core/uverbs_std_types_mr.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2020, Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -182,6 +183,86 @@ static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_MR)(
return IS_UVERBS_COPY_ERR(ret) ? ret : 0;
}
+static int UVERBS_HANDLER(UVERBS_METHOD_REG_DMABUF_MR)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uobject *uobj =
+ uverbs_attr_get_uobject(attrs, UVERBS_ATTR_REG_DMABUF_MR_HANDLE);
+ struct ib_pd *pd =
+ uverbs_attr_get_obj(attrs, UVERBS_ATTR_REG_DMABUF_MR_PD_HANDLE);
+ struct ib_device *ib_dev = pd->device;
+
+ u64 offset, length, iova;
+ u32 fd, access_flags;
+ struct ib_mr *mr;
+ int ret;
+
+ if (!ib_dev->ops.reg_user_mr_dmabuf)
+ return -EOPNOTSUPP;
+
+ ret = uverbs_copy_from(&offset, attrs,
+ UVERBS_ATTR_REG_DMABUF_MR_OFFSET);
+ if (ret)
+ return ret;
+
+ ret = uverbs_copy_from(&length, attrs,
+ UVERBS_ATTR_REG_DMABUF_MR_LENGTH);
+ if (ret)
+ return ret;
+
+ ret = uverbs_copy_from(&iova, attrs,
+ UVERBS_ATTR_REG_DMABUF_MR_IOVA);
+ if (ret)
+ return ret;
+
+ if ((offset & ~PAGE_MASK) != (iova & ~PAGE_MASK))
+ return -EINVAL;
+
+ ret = uverbs_copy_from(&fd, attrs,
+ UVERBS_ATTR_REG_DMABUF_MR_FD);
+ if (ret)
+ return ret;
+
+ ret = uverbs_get_flags32(&access_flags, attrs,
+ UVERBS_ATTR_REG_DMABUF_MR_ACCESS_FLAGS,
+ IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_READ |
+ IB_ACCESS_REMOTE_WRITE |
+ IB_ACCESS_REMOTE_ATOMIC |
+ IB_ACCESS_RELAXED_ORDERING);
+ if (ret)
+ return ret;
+
+ ret = ib_check_mr_access(ib_dev, access_flags);
+ if (ret)
+ return ret;
+
+ mr = pd->device->ops.reg_user_mr_dmabuf(pd, offset, length, iova, fd,
+ access_flags,
+ &attrs->driver_udata);
+ if (IS_ERR(mr))
+ return PTR_ERR(mr);
+
+ mr->device = pd->device;
+ mr->pd = pd;
+ mr->type = IB_MR_TYPE_USER;
+ mr->uobject = uobj;
+ atomic_inc(&pd->usecnt);
+
+ uobj->object = mr;
+
+ uverbs_finalize_uobj_create(attrs, UVERBS_ATTR_REG_DMABUF_MR_HANDLE);
+
+ ret = uverbs_copy_to(attrs, UVERBS_ATTR_REG_DMABUF_MR_RESP_LKEY,
+ &mr->lkey, sizeof(mr->lkey));
+ if (ret)
+ return ret;
+
+ ret = uverbs_copy_to(attrs, UVERBS_ATTR_REG_DMABUF_MR_RESP_RKEY,
+ &mr->rkey, sizeof(mr->rkey));
+ return ret;
+}
+
DECLARE_UVERBS_NAMED_METHOD(
UVERBS_METHOD_ADVISE_MR,
UVERBS_ATTR_IDR(UVERBS_ATTR_ADVISE_MR_PD_HANDLE,
@@ -247,6 +328,37 @@ DECLARE_UVERBS_NAMED_METHOD(
UVERBS_ATTR_TYPE(u32),
UA_MANDATORY));
+DECLARE_UVERBS_NAMED_METHOD(
+ UVERBS_METHOD_REG_DMABUF_MR,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_REG_DMABUF_MR_HANDLE,
+ UVERBS_OBJECT_MR,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_IDR(UVERBS_ATTR_REG_DMABUF_MR_PD_HANDLE,
+ UVERBS_OBJECT_PD,
+ UVERBS_ACCESS_READ,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_REG_DMABUF_MR_OFFSET,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_REG_DMABUF_MR_LENGTH,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_REG_DMABUF_MR_IOVA,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_REG_DMABUF_MR_FD,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_FLAGS_IN(UVERBS_ATTR_REG_DMABUF_MR_ACCESS_FLAGS,
+ enum ib_access_flags),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_REG_DMABUF_MR_RESP_LKEY,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_REG_DMABUF_MR_RESP_RKEY,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY));
+
DECLARE_UVERBS_NAMED_METHOD_DESTROY(
UVERBS_METHOD_MR_DESTROY,
UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_MR_HANDLE,
@@ -257,10 +369,11 @@ DECLARE_UVERBS_NAMED_METHOD_DESTROY(
DECLARE_UVERBS_NAMED_OBJECT(
UVERBS_OBJECT_MR,
UVERBS_TYPE_ALLOC_IDR(uverbs_free_mr),
+ &UVERBS_METHOD(UVERBS_METHOD_ADVISE_MR),
&UVERBS_METHOD(UVERBS_METHOD_DM_MR_REG),
&UVERBS_METHOD(UVERBS_METHOD_MR_DESTROY),
- &UVERBS_METHOD(UVERBS_METHOD_ADVISE_MR),
- &UVERBS_METHOD(UVERBS_METHOD_QUERY_MR));
+ &UVERBS_METHOD(UVERBS_METHOD_QUERY_MR),
+ &UVERBS_METHOD(UVERBS_METHOD_REG_DMABUF_MR));
const struct uapi_definition uverbs_def_obj_mr[] = {
UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_MR,
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 9137a25bb521..28464c58738c 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -2248,7 +2248,7 @@ static bool is_valid_mcast_lid(struct ib_qp *qp, u16 lid)
struct ib_qp_init_attr init_attr = {};
struct ib_qp_attr attr = {};
int num_eth_ports = 0;
- int port;
+ unsigned int port;
/* If QP state >= init, it is assigned to a port and we can check this
* port only.
@@ -2263,7 +2263,7 @@ static bool is_valid_mcast_lid(struct ib_qp *qp, u16 lid)
}
/* Can't get a quick answer, iterate over all ports */
- for (port = 0; port < qp->device->phys_port_cnt; port++)
+ rdma_for_each_port(qp->device, port)
if (rdma_port_get_link_layer(qp->device, port) !=
IB_LINK_LAYER_INFINIBAND)
num_eth_ports++;
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 401bdc9e931e..ba515efd4fdc 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -469,7 +469,6 @@ static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
struct bnxt_re_mr *mr = NULL;
dma_addr_t dma_addr = 0;
struct ib_mw *mw;
- u64 pbl_tbl;
int rc;
dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES,
@@ -504,9 +503,8 @@ static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
mr->ib_mr.lkey = mr->qplib_mr.lkey;
mr->qplib_mr.va = (u64)(unsigned long)fence->va;
mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES;
- pbl_tbl = dma_addr;
- rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl_tbl,
- BNXT_RE_FENCE_PBL_SIZE, false, PAGE_SIZE);
+ rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, NULL,
+ BNXT_RE_FENCE_PBL_SIZE, PAGE_SIZE);
if (rc) {
ibdev_err(&rdev->ibdev, "Failed to register fence-MR\n");
goto fail;
@@ -3589,7 +3587,6 @@ struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags)
struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
struct bnxt_re_dev *rdev = pd->rdev;
struct bnxt_re_mr *mr;
- u64 pbl = 0;
int rc;
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
@@ -3608,7 +3605,7 @@ struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags)
mr->qplib_mr.hwq.level = PBL_LVL_MAX;
mr->qplib_mr.total_size = -1; /* Infinte length */
- rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl, 0, false,
+ rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, NULL, 0,
PAGE_SIZE);
if (rc)
goto fail_mr;
@@ -3779,19 +3776,6 @@ int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
return rc;
}
-static int fill_umem_pbl_tbl(struct ib_umem *umem, u64 *pbl_tbl_orig,
- int page_shift)
-{
- u64 *pbl_tbl = pbl_tbl_orig;
- u64 page_size = BIT_ULL(page_shift);
- struct ib_block_iter biter;
-
- rdma_umem_for_each_dma_block(umem, &biter, page_size)
- *pbl_tbl++ = rdma_block_iter_dma_address(&biter);
-
- return pbl_tbl - pbl_tbl_orig;
-}
-
/* uverbs */
struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
u64 virt_addr, int mr_access_flags,
@@ -3801,7 +3785,6 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
struct bnxt_re_dev *rdev = pd->rdev;
struct bnxt_re_mr *mr;
struct ib_umem *umem;
- u64 *pbl_tbl = NULL;
unsigned long page_size;
int umem_pgs, rc;
@@ -3846,39 +3829,19 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
}
mr->qplib_mr.total_size = length;
- if (page_size == BNXT_RE_PAGE_SIZE_4K &&
- length > BNXT_RE_MAX_MR_SIZE_LOW) {
- ibdev_err(&rdev->ibdev, "Requested MR Sz:%llu Max sup:%llu",
- length, (u64)BNXT_RE_MAX_MR_SIZE_LOW);
- rc = -EINVAL;
- goto free_umem;
- }
-
umem_pgs = ib_umem_num_dma_blocks(umem, page_size);
- pbl_tbl = kcalloc(umem_pgs, sizeof(*pbl_tbl), GFP_KERNEL);
- if (!pbl_tbl) {
- rc = -ENOMEM;
- goto free_umem;
- }
-
- /* Map umem buf ptrs to the PBL */
- umem_pgs = fill_umem_pbl_tbl(umem, pbl_tbl, order_base_2(page_size));
- rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, pbl_tbl,
- umem_pgs, false, page_size);
+ rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, umem,
+ umem_pgs, page_size);
if (rc) {
ibdev_err(&rdev->ibdev, "Failed to register user MR");
- goto fail;
+ goto free_umem;
}
- kfree(pbl_tbl);
-
mr->ib_mr.lkey = mr->qplib_mr.lkey;
mr->ib_mr.rkey = mr->qplib_mr.lkey;
atomic_inc(&rdev->mr_count);
return &mr->ib_mr;
-fail:
- kfree(pbl_tbl);
free_umem:
ib_umem_release(umem);
free_mrw:
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index 6316179583a6..049b3576302b 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -650,42 +650,32 @@ int bnxt_qplib_dereg_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw,
}
int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
- u64 *pbl_tbl, int num_pbls, bool block, u32 buf_pg_size)
+ struct ib_umem *umem, int num_pbls, u32 buf_pg_size)
{
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct bnxt_qplib_hwq_attr hwq_attr = {};
struct bnxt_qplib_sg_info sginfo = {};
struct creq_register_mr_resp resp;
struct cmdq_register_mr req;
- int pg_ptrs, pages, i, rc;
u16 cmd_flags = 0, level;
- dma_addr_t **pbl_ptr;
+ int pages, rc;
u32 pg_size;
if (num_pbls) {
+ pages = roundup_pow_of_two(num_pbls);
/* Allocate memory for the non-leaf pages to store buf ptrs.
* Non-leaf pages always uses system PAGE_SIZE
*/
- pg_ptrs = roundup_pow_of_two(num_pbls);
- pages = pg_ptrs >> MAX_PBL_LVL_1_PGS_SHIFT;
- if (!pages)
- pages++;
-
- if (pages > MAX_PBL_LVL_1_PGS) {
- dev_err(&res->pdev->dev,
- "SP: Reg MR: pages requested (0x%x) exceeded max (0x%x)\n",
- pages, MAX_PBL_LVL_1_PGS);
- return -ENOMEM;
- }
/* Free the hwq if it already exist, must be a rereg */
if (mr->hwq.max_elements)
bnxt_qplib_free_hwq(res, &mr->hwq);
/* Use system PAGE_SIZE */
hwq_attr.res = res;
hwq_attr.depth = pages;
- hwq_attr.stride = PAGE_SIZE;
+ hwq_attr.stride = buf_pg_size;
hwq_attr.type = HWQ_TYPE_MR;
hwq_attr.sginfo = &sginfo;
+ hwq_attr.sginfo->umem = umem;
hwq_attr.sginfo->npages = pages;
hwq_attr.sginfo->pgsize = PAGE_SIZE;
hwq_attr.sginfo->pgshft = PAGE_SHIFT;
@@ -695,11 +685,6 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
"SP: Reg MR memory allocation failed\n");
return -ENOMEM;
}
- /* Write to the hwq */
- pbl_ptr = (dma_addr_t **)mr->hwq.pbl_ptr;
- for (i = 0; i < num_pbls; i++)
- pbl_ptr[PTR_PG(i)][PTR_IDX(i)] =
- (pbl_tbl[i] & PAGE_MASK) | PTU_PTE_VALID;
}
RCFW_CMD_PREP(req, REGISTER_MR, cmd_flags);
@@ -711,7 +696,7 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
req.pbl = 0;
pg_size = PAGE_SIZE;
} else {
- level = mr->hwq.level + 1;
+ level = mr->hwq.level;
req.pbl = cpu_to_le64(mr->hwq.pbl[PBL_LVL_0].pg_map_arr[0]);
}
pg_size = buf_pg_size ? buf_pg_size : PAGE_SIZE;
@@ -728,7 +713,7 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
req.mr_size = cpu_to_le64(mr->total_size);
rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
- (void *)&resp, NULL, block);
+ (void *)&resp, NULL, false);
if (rc)
goto fail;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
index 967890cd81f2..bc228340684f 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
@@ -254,7 +254,7 @@ int bnxt_qplib_alloc_mrw(struct bnxt_qplib_res *res,
int bnxt_qplib_dereg_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw,
bool block);
int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
- u64 *pbl_tbl, int num_pbls, bool block, u32 buf_pg_size);
+ struct ib_umem *umem, int num_pbls, u32 buf_pg_size);
int bnxt_qplib_free_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr);
int bnxt_qplib_alloc_fast_reg_mr(struct bnxt_qplib_res *res,
struct bnxt_qplib_mrw *mr, int max);
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index a7401398cb34..d109bb3822a5 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -2474,7 +2474,7 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
init_attr->cap.max_send_wr = qhp->attr.sq_num_entries;
init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries;
init_attr->cap.max_send_sge = qhp->attr.sq_max_sges;
- init_attr->cap.max_recv_sge = qhp->attr.sq_max_sges;
+ init_attr->cap.max_recv_sge = qhp->attr.rq_max_sges;
init_attr->cap.max_inline_data = T4_MAX_SEND_INLINE;
init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0;
return 0;
diff --git a/drivers/infiniband/hw/cxgb4/restrack.c b/drivers/infiniband/hw/cxgb4/restrack.c
index b32e6516d65f..ff645b955a08 100644
--- a/drivers/infiniband/hw/cxgb4/restrack.c
+++ b/drivers/infiniband/hw/cxgb4/restrack.c
@@ -209,7 +209,7 @@ int c4iw_fill_res_cm_id_entry(struct sk_buff *msg,
epcp = (struct c4iw_ep_common *)iw_cm_id->provider_data;
if (!epcp)
return 0;
- uep = kcalloc(1, sizeof(*uep), GFP_KERNEL);
+ uep = kzalloc(sizeof(*uep), GFP_KERNEL);
if (!uep)
return 0;
diff --git a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
index b199e4ac6cf9..fa38b34eddb8 100644
--- a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
+++ b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
- * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef _EFA_ADMIN_CMDS_H_
@@ -161,8 +161,8 @@ struct efa_admin_create_qp_resp {
u32 qp_handle;
/*
- * QP number in the given EFA virtual device. Least-significant bits
- * (as needed according to max_qp) carry unique QP ID
+ * QP number in the given EFA virtual device. Least-significant bits (as
+ * needed according to max_qp) carry unique QP ID
*/
u16 qp_num;
@@ -465,7 +465,7 @@ struct efa_admin_create_cq_cmd {
/*
* number of sub cqs - must be equal to sub_cqs_per_cq of queue
- * attributes.
+ * attributes.
*/
u16 num_sub_cqs;
@@ -563,12 +563,8 @@ struct efa_admin_acq_get_stats_resp {
};
struct efa_admin_get_set_feature_common_desc {
- /*
- * 1:0 : select - 0x1 - current value; 0x3 - default
- * value
- * 7:3 : reserved3 - MBZ
- */
- u8 flags;
+ /* MBZ */
+ u8 reserved0;
/* as appears in efa_admin_aq_feature_id */
u8 feature_id;
@@ -823,12 +819,6 @@ enum efa_admin_aenq_group {
EFA_ADMIN_AENQ_GROUPS_NUM = 5,
};
-enum efa_admin_aenq_notification_syndrom {
- EFA_ADMIN_SUSPEND = 0,
- EFA_ADMIN_RESUME = 1,
- EFA_ADMIN_UPDATE_HINTS = 2,
-};
-
struct efa_admin_mmio_req_read_less_resp {
u16 req_id;
@@ -909,9 +899,6 @@ struct efa_admin_host_info {
#define EFA_ADMIN_CREATE_CQ_CMD_VIRT_MASK BIT(6)
#define EFA_ADMIN_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK GENMASK(4, 0)
-/* get_set_feature_common_desc */
-#define EFA_ADMIN_GET_SET_FEATURE_COMMON_DESC_SELECT_MASK GENMASK(1, 0)
-
/* feature_device_attr_desc */
#define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_RDMA_READ_MASK BIT(0)
#define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_RNR_RETRY_MASK BIT(1)
diff --git a/drivers/infiniband/hw/efa/efa_admin_defs.h b/drivers/infiniband/hw/efa/efa_admin_defs.h
index 29d53ed63b3e..78ff9389ae25 100644
--- a/drivers/infiniband/hw/efa/efa_admin_defs.h
+++ b/drivers/infiniband/hw/efa/efa_admin_defs.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
- * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef _EFA_ADMIN_H_
@@ -82,7 +82,7 @@ struct efa_admin_acq_common_desc {
/*
* indicates to the driver which AQ entry has been consumed by the
- * device and could be reused
+ * device and could be reused
*/
u16 sq_head_indx;
};
diff --git a/drivers/infiniband/hw/efa/efa_com.c b/drivers/infiniband/hw/efa/efa_com.c
index 336bc2c57bb1..0d523ad736c7 100644
--- a/drivers/infiniband/hw/efa/efa_com.c
+++ b/drivers/infiniband/hw/efa/efa_com.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
/*
- * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#include "efa_com.h"
@@ -20,9 +20,6 @@
#define EFA_CTRL_MINOR 0
#define EFA_CTRL_SUB_MINOR 1
-#define EFA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x)))
-#define EFA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32))
-
enum efa_cmd_status {
EFA_CMD_SUBMITTED,
EFA_CMD_COMPLETED,
@@ -33,8 +30,6 @@ struct efa_comp_ctx {
struct efa_admin_acq_entry *user_cqe;
u32 comp_size;
enum efa_cmd_status status;
- /* status from the device */
- u8 comp_status;
u8 cmd_opcode;
u8 occupied;
};
@@ -140,8 +135,8 @@ static int efa_com_admin_init_sq(struct efa_com_dev *edev)
sq->db_addr = (u32 __iomem *)(edev->reg_bar + EFA_REGS_AQ_PROD_DB_OFF);
- addr_high = EFA_DMA_ADDR_TO_UINT32_HIGH(sq->dma_addr);
- addr_low = EFA_DMA_ADDR_TO_UINT32_LOW(sq->dma_addr);
+ addr_high = upper_32_bits(sq->dma_addr);
+ addr_low = lower_32_bits(sq->dma_addr);
writel(addr_low, edev->reg_bar + EFA_REGS_AQ_BASE_LO_OFF);
writel(addr_high, edev->reg_bar + EFA_REGS_AQ_BASE_HI_OFF);
@@ -174,8 +169,8 @@ static int efa_com_admin_init_cq(struct efa_com_dev *edev)
cq->cc = 0;
cq->phase = 1;
- addr_high = EFA_DMA_ADDR_TO_UINT32_HIGH(cq->dma_addr);
- addr_low = EFA_DMA_ADDR_TO_UINT32_LOW(cq->dma_addr);
+ addr_high = upper_32_bits(cq->dma_addr);
+ addr_low = lower_32_bits(cq->dma_addr);
writel(addr_low, edev->reg_bar + EFA_REGS_ACQ_BASE_LO_OFF);
writel(addr_high, edev->reg_bar + EFA_REGS_ACQ_BASE_HI_OFF);
@@ -215,8 +210,8 @@ static int efa_com_admin_init_aenq(struct efa_com_dev *edev,
aenq->cc = 0;
aenq->phase = 1;
- addr_low = EFA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr);
- addr_high = EFA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr);
+ addr_low = lower_32_bits(aenq->dma_addr);
+ addr_high = upper_32_bits(aenq->dma_addr);
writel(addr_low, edev->reg_bar + EFA_REGS_AENQ_BASE_LO_OFF);
writel(addr_high, edev->reg_bar + EFA_REGS_AENQ_BASE_HI_OFF);
@@ -421,9 +416,7 @@ static void efa_com_handle_single_admin_completion(struct efa_com_admin_queue *a
}
comp_ctx->status = EFA_CMD_COMPLETED;
- comp_ctx->comp_status = cqe->acq_common_descriptor.status;
- if (comp_ctx->user_cqe)
- memcpy(comp_ctx->user_cqe, cqe, comp_ctx->comp_size);
+ memcpy(comp_ctx->user_cqe, cqe, comp_ctx->comp_size);
if (!test_bit(EFA_AQ_STATE_POLLING_BIT, &aq->state))
complete(&comp_ctx->wait_event);
@@ -521,7 +514,7 @@ static int efa_com_wait_and_process_admin_cq_polling(struct efa_comp_ctx *comp_c
msleep(aq->poll_interval);
}
- err = efa_com_comp_status_to_errno(comp_ctx->comp_status);
+ err = efa_com_comp_status_to_errno(comp_ctx->user_cqe->acq_common_descriptor.status);
out:
efa_com_put_comp_ctx(aq, comp_ctx);
return err;
@@ -569,7 +562,7 @@ static int efa_com_wait_and_process_admin_cq_interrupts(struct efa_comp_ctx *com
goto out;
}
- err = efa_com_comp_status_to_errno(comp_ctx->comp_status);
+ err = efa_com_comp_status_to_errno(comp_ctx->user_cqe->acq_common_descriptor.status);
out:
efa_com_put_comp_ctx(aq, comp_ctx);
return err;
@@ -641,8 +634,8 @@ int efa_com_cmd_exec(struct efa_com_admin_queue *aq,
aq->efa_dev,
"Failed to process command %s (opcode %u) comp_status %d err %d\n",
efa_com_cmd_str(cmd->aq_common_descriptor.opcode),
- cmd->aq_common_descriptor.opcode, comp_ctx->comp_status,
- err);
+ cmd->aq_common_descriptor.opcode,
+ comp_ctx->user_cqe->acq_common_descriptor.status, err);
atomic64_inc(&aq->stats.cmd_err);
}
@@ -795,7 +788,7 @@ err_destroy_comp_ctxt:
* This method goes over the admin completion queue and wakes up
* all the pending threads that wait on the commands wait event.
*
- * @note: Should be called after MSI-X interrupt.
+ * Note: Should be called after MSI-X interrupt.
*/
void efa_com_admin_q_comp_intr_handler(struct efa_com_dev *edev)
{
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index c87b94ea2939..993cbf37e0b9 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -1323,8 +1323,8 @@ CNTR_ELEM(#name, \
/**
* hfi_addr_from_offset - return addr for readq/writeq
- * @dd - the dd device
- * @offset - the offset of the CSR within bar0
+ * @dd: the dd device
+ * @offset: the offset of the CSR within bar0
*
* This routine selects the appropriate base address
* based on the indicated offset.
@@ -1340,8 +1340,8 @@ static inline void __iomem *hfi1_addr_from_offset(
/**
* read_csr - read CSR at the indicated offset
- * @dd - the dd device
- * @offset - the offset of the CSR within bar0
+ * @dd: the dd device
+ * @offset: the offset of the CSR within bar0
*
* Return: the value read or all FF's if there
* is no mapping
@@ -1355,9 +1355,9 @@ u64 read_csr(const struct hfi1_devdata *dd, u32 offset)
/**
* write_csr - write CSR at the indicated offset
- * @dd - the dd device
- * @offset - the offset of the CSR within bar0
- * @value - value to write
+ * @dd: the dd device
+ * @offset: the offset of the CSR within bar0
+ * @value: value to write
*/
void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value)
{
@@ -1373,8 +1373,8 @@ void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value)
/**
* get_csr_addr - return te iomem address for offset
- * @dd - the dd device
- * @offset - the offset of the CSR within bar0
+ * @dd: the dd device
+ * @offset: the offset of the CSR within bar0
*
* Return: The iomem address to use in subsequent
* writeq/readq operations.
@@ -8433,7 +8433,7 @@ static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
return hfi1_rcd_head(rcd) != tail;
}
-/**
+/*
* Common code for receive contexts interrupt handlers.
* Update traces, increment kernel IRQ counter and
* setup ASPM when needed.
@@ -8447,7 +8447,7 @@ static void receive_interrupt_common(struct hfi1_ctxtdata *rcd)
aspm_ctx_disable(rcd);
}
-/**
+/*
* __hfi1_rcd_eoi_intr() - Make HW issue receive interrupt
* when there are packets present in the queue. When calling
* with interrupts enabled please use hfi1_rcd_eoi_intr.
@@ -8484,8 +8484,8 @@ static void hfi1_rcd_eoi_intr(struct hfi1_ctxtdata *rcd)
/**
* hfi1_netdev_rx_napi - napi poll function to move eoi inline
- * @napi - pointer to napi object
- * @budget - netdev budget
+ * @napi: pointer to napi object
+ * @budget: netdev budget
*/
int hfi1_netdev_rx_napi(struct napi_struct *napi, int budget)
{
@@ -10142,7 +10142,7 @@ u32 lrh_max_header_bytes(struct hfi1_devdata *dd)
/*
* Set Send Length
- * @ppd - per port data
+ * @ppd: per port data
*
* Set the MTU by limiting how many DWs may be sent. The SendLenCheck*
* registers compare against LRH.PktLen, so use the max bytes included
@@ -14200,9 +14200,9 @@ u8 hfi1_get_qp_map(struct hfi1_devdata *dd, u8 idx)
/**
* init_qpmap_table
- * @dd - device data
- * @first_ctxt - first context
- * @last_ctxt - first context
+ * @dd: device data
+ * @first_ctxt: first context
+ * @last_ctxt: first context
*
* This return sets the qpn mapping table that
* is indexed by qpn[8:1].
@@ -14383,8 +14383,8 @@ no_qos:
/**
* init_qos - init RX qos
- * @dd - device data
- * @rmt - RSM map table
+ * @dd: device data
+ * @rmt: RSM map table
*
* This routine initializes Rule 0 and the RSM map table to implement
* quality of service (qos).
@@ -15022,8 +15022,7 @@ err_exit:
/**
* hfi1_init_dd() - Initialize most of the dd structure.
- * @dev: the pci_dev for hfi1_ib device
- * @ent: pci_device_id struct for this dev
+ * @dd: the dd device
*
* This is global, and is called directly at init to set up the
* chip-specific function pointers for later use.
@@ -15378,10 +15377,11 @@ static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
/**
* create_pbc - build a pbc for transmission
+ * @ppd: info of physical Hfi port
* @flags: special case flags or-ed in built pbc
- * @srate: static rate
+ * @srate_mbs: static rate
* @vl: vl
- * @dwlen: dword length (header words + data words + pbc words)
+ * @dw_len: dword length (header words + data words + pbc words)
*
* Create a PBC with the given flags, rate, VL, and length.
*
diff --git a/drivers/infiniband/hw/hfi1/exp_rcv.c b/drivers/infiniband/hw/hfi1/exp_rcv.c
index e9d5cc8b771a..91f13140ddf2 100644
--- a/drivers/infiniband/hw/hfi1/exp_rcv.c
+++ b/drivers/infiniband/hw/hfi1/exp_rcv.c
@@ -50,7 +50,7 @@
/**
* exp_tid_group_init - initialize exp_tid_set
- * @set - the set
+ * @set: the set
*/
static void hfi1_exp_tid_set_init(struct exp_tid_set *set)
{
@@ -60,7 +60,7 @@ static void hfi1_exp_tid_set_init(struct exp_tid_set *set)
/**
* hfi1_exp_tid_group_init - initialize rcd expected receive
- * @rcd - the rcd
+ * @rcd: the rcd
*/
void hfi1_exp_tid_group_init(struct hfi1_ctxtdata *rcd)
{
@@ -71,7 +71,7 @@ void hfi1_exp_tid_group_init(struct hfi1_ctxtdata *rcd)
/**
* alloc_ctxt_rcv_groups - initialize expected receive groups
- * @rcd - the context to add the groupings to
+ * @rcd: the context to add the groupings to
*/
int hfi1_alloc_ctxt_rcv_groups(struct hfi1_ctxtdata *rcd)
{
@@ -101,7 +101,7 @@ int hfi1_alloc_ctxt_rcv_groups(struct hfi1_ctxtdata *rcd)
/**
* free_ctxt_rcv_groups - free expected receive groups
- * @rcd - the context to free
+ * @rcd: the context to free
*
* The routine dismantles the expect receive linked
* list and clears any tids associated with the receive
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index 329ee4f48d95..3b7bbc7b9d10 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -1522,7 +1522,7 @@ int hfi1_set_uevent_bits(struct hfi1_pportdata *ppd, const int evtbit)
* manage_rcvq - manage a context's receive queue
* @uctxt: the context
* @subctxt: the sub-context
- * @start_stop: action to carry out
+ * @arg: start/stop action to carry out
*
* start_stop == 0 disables receive on the context, for use in queue
* overflow conditions. start_stop==1 re-enables, to be used to
diff --git a/drivers/infiniband/hw/hfi1/intr.c b/drivers/infiniband/hw/hfi1/intr.c
index 387305b768e9..5ba5c11459e7 100644
--- a/drivers/infiniband/hw/hfi1/intr.c
+++ b/drivers/infiniband/hw/hfi1/intr.c
@@ -91,9 +91,9 @@ static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
/**
* format_hwmsg - format a single hwerror message
- * @msg message buffer
- * @msgl length of message buffer
- * @hwmsg message to add to message buffer
+ * @msg: message buffer
+ * @msgl: length of message buffer
+ * @hwmsg: message to add to message buffer
*/
static void format_hwmsg(char *msg, size_t msgl, const char *hwmsg)
{
@@ -104,11 +104,11 @@ static void format_hwmsg(char *msg, size_t msgl, const char *hwmsg)
/**
* hfi1_format_hwerrors - format hardware error messages for display
- * @hwerrs hardware errors bit vector
- * @hwerrmsgs hardware error descriptions
- * @nhwerrmsgs number of hwerrmsgs
- * @msg message buffer
- * @msgl message buffer length
+ * @hwerrs: hardware errors bit vector
+ * @hwerrmsgs: hardware error descriptions
+ * @nhwerrmsgs: number of hwerrmsgs
+ * @msg: message buffer
+ * @msgl: message buffer length
*/
void hfi1_format_hwerrors(u64 hwerrs, const struct hfi1_hwerror_msgs *hwerrmsgs,
size_t nhwerrmsgs, char *msg, size_t msgl)
diff --git a/drivers/infiniband/hw/hfi1/iowait.c b/drivers/infiniband/hw/hfi1/iowait.c
index 5836fe7b2817..111489802614 100644
--- a/drivers/infiniband/hw/hfi1/iowait.c
+++ b/drivers/infiniband/hw/hfi1/iowait.c
@@ -26,7 +26,7 @@ inline void iowait_clear_flag(struct iowait *wait, u32 flag)
clear_bit(flag, &wait->flags);
}
-/**
+/*
* iowait_init() - initialize wait structure
* @wait: wait struct to initialize
* @tx_limit: limit for overflow queuing
@@ -88,7 +88,7 @@ void iowait_cancel_work(struct iowait *w)
/**
* iowait_set_work_flag - set work flag based on leg
- * @w - the iowait work struct
+ * @w: the iowait work struct
*/
int iowait_set_work_flag(struct iowait_work *w)
{
diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c
index 3222e3acb79c..e2f2f7847aed 100644
--- a/drivers/infiniband/hw/hfi1/mad.c
+++ b/drivers/infiniband/hw/hfi1/mad.c
@@ -1341,7 +1341,7 @@ static int set_port_states(struct hfi1_pportdata *ppd, struct opa_smp *smp,
return 0;
}
-/**
+/*
* subn_set_opa_portinfo - set port information
* @smp: the incoming SM packet
* @ibdev: the infiniband device
@@ -4902,6 +4902,8 @@ static int hfi1_process_ib_mad(struct ib_device *ibdev, int mad_flags, u8 port,
* @in_grh: the global route header for this packet
* @in_mad: the incoming MAD
* @out_mad: any outgoing MAD reply
+ * @out_mad_size: size of the outgoing MAD reply
+ * @out_mad_pkey_index: used to apss back the packet key index
*
* Returns IB_MAD_RESULT_SUCCESS if this is a MAD that we are not
* interested in processing.
diff --git a/drivers/infiniband/hw/hfi1/msix.c b/drivers/infiniband/hw/hfi1/msix.c
index d61ee853d215..cf3040bb177f 100644
--- a/drivers/infiniband/hw/hfi1/msix.c
+++ b/drivers/infiniband/hw/hfi1/msix.c
@@ -103,8 +103,8 @@ int msix_initialize(struct hfi1_devdata *dd)
* @arg: context information for the IRQ
* @handler: IRQ handler
* @thread: IRQ thread handler (could be NULL)
- * @idx: zero base idx if multiple devices are needed
* @type: affinty IRQ type
+ * @name: IRQ name
*
* Allocated an MSIx vector if available, and then create the appropriate
* meta data needed to keep track of the pci IRQ request.
diff --git a/drivers/infiniband/hw/hfi1/netdev_rx.c b/drivers/infiniband/hw/hfi1/netdev_rx.c
index 6d263c9749b3..1fb6e1a0e4e1 100644
--- a/drivers/infiniband/hw/hfi1/netdev_rx.c
+++ b/drivers/infiniband/hw/hfi1/netdev_rx.c
@@ -467,7 +467,7 @@ void *hfi1_netdev_get_data(struct hfi1_devdata *dd, int id)
* hfi1_netdev_get_first_dat - Gets first entry with greater or equal id.
*
* @dd: hfi1 dev data
- * @id: requested integer id up to INT_MAX
+ * @start_id: requested integer id up to INT_MAX
*/
void *hfi1_netdev_get_first_data(struct hfi1_devdata *dd, int *start_id)
{
diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
index 18d32f053d26..6f06e9920503 100644
--- a/drivers/infiniband/hw/hfi1/pcie.c
+++ b/drivers/infiniband/hw/hfi1/pcie.c
@@ -334,7 +334,7 @@ int pcie_speeds(struct hfi1_devdata *dd)
return 0;
}
-/**
+/*
* Restore command and BARs after a reset has wiped them out
*
* Returns 0 on success, otherwise a negative error value
@@ -393,7 +393,7 @@ error:
return pcibios_err_to_errno(ret);
}
-/**
+/*
* Save BARs and command to rewrite after device reset
*
* Returns 0 on success, otherwise a negative error value
diff --git a/drivers/infiniband/hw/hfi1/pio_copy.c b/drivers/infiniband/hw/hfi1/pio_copy.c
index 4a4ec2397857..14bfd8287f4a 100644
--- a/drivers/infiniband/hw/hfi1/pio_copy.c
+++ b/drivers/infiniband/hw/hfi1/pio_copy.c
@@ -55,6 +55,7 @@
/**
* pio_copy - copy data block to MMIO space
+ * @dd: hfi1 dev data
* @pbuf: a number of blocks allocated within a PIO send context
* @pbc: PBC to send
* @from: source, must be 8 byte aligned
diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c
index 681bb4e918c9..e037df911512 100644
--- a/drivers/infiniband/hw/hfi1/qp.c
+++ b/drivers/infiniband/hw/hfi1/qp.c
@@ -186,7 +186,7 @@ static void flush_iowait(struct rvt_qp *qp)
write_sequnlock_irqrestore(lock, flags);
}
-/**
+/*
* This function is what we would push to the core layer if we wanted to be a
* "first class citizen". Instead we hide this here and rely on Verbs ULPs
* to blindly pass the MTU enum value from the PathRecord to us.
@@ -289,9 +289,9 @@ void hfi1_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
/**
* hfi1_setup_wqe - set up the wqe
- * @qp - The qp
- * @wqe - The built wqe
- * @call_send - Determine if the send should be posted or scheduled.
+ * @qp: The qp
+ * @wqe: The built wqe
+ * @call_send: Determine if the send should be posted or scheduled.
*
* Perform setup of the wqe. This is called
* prior to inserting the wqe into the ring but after
@@ -595,7 +595,7 @@ struct sdma_engine *qp_to_sdma_engine(struct rvt_qp *qp, u8 sc5)
return sde;
}
-/*
+/**
* qp_to_send_context - map a qp to a send context
* @qp: the QP
* @sc5: the 5 bit sc
@@ -912,8 +912,8 @@ void notify_error_qp(struct rvt_qp *qp)
/**
* hfi1_qp_iter_cb - callback for iterator
- * @qp - the qp
- * @v - the sl in low bits of v
+ * @qp: the qp
+ * @v: the sl in low bits of v
*
* This is called from the iterator callback to work
* on an individual qp.
diff --git a/drivers/infiniband/hw/hfi1/qsfp.c b/drivers/infiniband/hw/hfi1/qsfp.c
index 8386c84c2d92..38f311f855b5 100644
--- a/drivers/infiniband/hw/hfi1/qsfp.c
+++ b/drivers/infiniband/hw/hfi1/qsfp.c
@@ -242,7 +242,7 @@ static int i2c_bus_write(struct hfi1_devdata *dd, struct hfi1_i2c_bus *i2c,
msgs[0].buf = offset_bytes;
msgs[1].addr = slave_addr;
- msgs[1].flags = I2C_M_NOSTART,
+ msgs[1].flags = I2C_M_NOSTART;
msgs[1].len = len;
msgs[1].buf = data;
break;
@@ -290,7 +290,7 @@ static int i2c_bus_read(struct hfi1_devdata *dd, struct hfi1_i2c_bus *bus,
msgs[0].buf = offset_bytes;
msgs[1].addr = slave_addr;
- msgs[1].flags = I2C_M_RD,
+ msgs[1].flags = I2C_M_RD;
msgs[1].len = len;
msgs[1].buf = data;
break;
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index 1bb5f57152d3..0174b8ee9f00 100644
--- a/drivers/infiniband/hw/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -421,6 +421,7 @@ bail:
/**
* hfi1_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
* @qp: a pointer to the QP
+ * @ps: the current packet state
*
* Assumes s_lock is held.
*
@@ -1375,9 +1376,8 @@ static const hfi1_make_rc_ack hfi1_make_rc_ack_tbl[2] = {
[HFI1_PKT_TYPE_16B] = &hfi1_make_rc_ack_16B
};
-/**
+/*
* hfi1_send_rc_ack - Construct an ACK packet and send it
- * @qp: a pointer to the QP
*
* This is called from hfi1_rc_rcv() and handle_receive_interrupt().
* Note that RDMA reads and atomics are handled in the
@@ -1992,7 +1992,7 @@ static void update_qp_retry_state(struct rvt_qp *qp, u32 psn, u32 spsn,
}
}
-/**
+/*
* do_rc_ack - process an incoming RC ACK
* @qp: the QP the ACK came in on
* @psn: the packet sequence number of the ACK
@@ -2541,6 +2541,7 @@ static inline void rc_cancel_ack(struct rvt_qp *qp)
* @opcode: the opcode for this packet
* @psn: the packet sequence number for this packet
* @diff: the difference between the PSN and the expected PSN
+ * @rcd: the receive context
*
* This is called from hfi1_rc_rcv() to process an unexpected
* incoming RC packet for the given QP.
diff --git a/drivers/infiniband/hw/hfi1/ruc.c b/drivers/infiniband/hw/hfi1/ruc.c
index 23ac6057b211..c3fa1814c6a8 100644
--- a/drivers/infiniband/hw/hfi1/ruc.c
+++ b/drivers/infiniband/hw/hfi1/ruc.c
@@ -260,6 +260,7 @@ static inline void hfi1_make_ruc_bth(struct rvt_qp *qp,
* @qp: the queue pair
* @ohdr: a pointer to the destination header memory
* @bth0: bth0 passed in from the RC/UC builder
+ * @bth1: bth1 passed in from the RC/UC builder
* @bth2: bth2 passed in from the RC/UC builder
* @middle: non zero implies indicates ahg "could" be used
* @ps: the current packet state
@@ -348,6 +349,7 @@ static inline void hfi1_make_ruc_header_16B(struct rvt_qp *qp,
* @qp: the queue pair
* @ohdr: a pointer to the destination header memory
* @bth0: bth0 passed in from the RC/UC builder
+ * @bth1: bth1 passed in from the RC/UC builder
* @bth2: bth2 passed in from the RC/UC builder
* @middle: non zero implies indicates ahg "could" be used
* @ps: the current packet state
@@ -455,11 +457,10 @@ void hfi1_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr,
/**
* hfi1_schedule_send_yield - test for a yield required for QP
* send engine
- * @timeout: Final time for timeout slice for jiffies
* @qp: a pointer to QP
* @ps: a pointer to a structure with commonly lookup values for
* the the send engine progress
- * @tid - true if it is the tid leg
+ * @tid: true if it is the tid leg
*
* This routine checks if the time slice for the QP has expired
* for RC QPs, if so an additional work entry is queued. At this
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index a307d4c8b15a..46b5290b2839 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -1740,7 +1740,7 @@ retry:
sane = (hwhead == swhead);
if (unlikely(!sane)) {
- dd_dev_err(dd, "SDMA(%u) bad head (%s) hwhd=%hu swhd=%hu swtl=%hu cnt=%hu\n",
+ dd_dev_err(dd, "SDMA(%u) bad head (%s) hwhd=%u swhd=%u swtl=%u cnt=%u\n",
sde->this_idx,
use_dmahead ? "dma" : "kreg",
hwhead, swhead, swtail, cnt);
@@ -2448,11 +2448,11 @@ nodesc:
* @sde: sdma engine to use
* @wait: SE wait structure to use when full (may be NULL)
* @tx_list: list of sdma_txreqs to submit
- * @count: pointer to a u16 which, after return will contain the total number of
- * sdma_txreqs removed from the tx_list. This will include sdma_txreqs
- * whose SDMA descriptors are submitted to the ring and the sdma_txreqs
- * which are added to SDMA engine flush list if the SDMA engine state is
- * not running.
+ * @count_out: pointer to a u16 which, after return will contain the total number of
+ * sdma_txreqs removed from the tx_list. This will include sdma_txreqs
+ * whose SDMA descriptors are submitted to the ring and the sdma_txreqs
+ * which are added to SDMA engine flush list if the SDMA engine state is
+ * not running.
*
* The call submits the list into the ring.
*
diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c
index 92aa2a9b3b5a..0b1f9e4d038b 100644
--- a/drivers/infiniband/hw/hfi1/tid_rdma.c
+++ b/drivers/infiniband/hw/hfi1/tid_rdma.c
@@ -309,7 +309,8 @@ int hfi1_kern_exp_rcv_init(struct hfi1_ctxtdata *rcd, int reinit)
/**
* qp_to_rcd - determine the receive context used by a qp
- * @qp - the qp
+ * @rdi: rvt dev struct
+ * @qp: the qp
*
* This routine returns the receive context associated
* with a a qp's qpn.
@@ -484,6 +485,7 @@ static struct rvt_qp *first_qp(struct hfi1_ctxtdata *rcd,
/**
* kernel_tid_waiters - determine rcd wait
* @rcd: the receive context
+ * @queue: the queue to operate on
* @qp: the head of the qp being processed
*
* This routine will return false IFF
@@ -517,7 +519,9 @@ static bool kernel_tid_waiters(struct hfi1_ctxtdata *rcd,
/**
* dequeue_tid_waiter - dequeue the qp from the list
- * @qp - the qp to remove the wait list
+ * @rcd: the receive context
+ * @queue: the queue to operate on
+ * @qp: the qp to remove the wait list
*
* This routine removes the indicated qp from the
* wait list if it is there.
@@ -549,6 +553,7 @@ static void dequeue_tid_waiter(struct hfi1_ctxtdata *rcd,
/**
* queue_qp_for_tid_wait - suspend QP on tid space
* @rcd: the receive context
+ * @queue: the queue to operate on
* @qp: the qp
*
* The qp is inserted at the tail of the rcd
@@ -593,7 +598,7 @@ static void __trigger_tid_waiter(struct rvt_qp *qp)
/**
* tid_rdma_schedule_tid_wakeup - schedule wakeup for a qp
- * @qp - the qp
+ * @qp: the qp
*
* trigger a schedule or a waiting qp in a deadlock
* safe manner. The qp reference is held prior
@@ -630,7 +635,7 @@ static void tid_rdma_schedule_tid_wakeup(struct rvt_qp *qp)
/**
* tid_rdma_trigger_resume - field a trigger work request
- * @work - the work item
+ * @work: the work item
*
* Complete the off qp trigger processing by directly
* calling the progress routine.
@@ -654,7 +659,7 @@ static void tid_rdma_trigger_resume(struct work_struct *work)
rvt_put_qp(qp);
}
-/**
+/*
* tid_rdma_flush_wait - unwind any tid space wait
*
* This is called when resetting a qp to
@@ -693,8 +698,8 @@ void hfi1_tid_rdma_flush_wait(struct rvt_qp *qp)
/* Flow functions */
/**
* kern_reserve_flow - allocate a hardware flow
- * @rcd - the context to use for allocation
- * @last - the index of the preferred flow. Use RXE_NUM_TID_FLOWS to
+ * @rcd: the context to use for allocation
+ * @last: the index of the preferred flow. Use RXE_NUM_TID_FLOWS to
* signify "don't care".
*
* Use a bit mask based allocation to reserve a hardware
@@ -860,9 +865,10 @@ static u8 trdma_pset_order(struct tid_rdma_pageset *s)
/**
* tid_rdma_find_phys_blocks_4k - get groups base on mr info
- * @npages - number of pages
- * @pages - pointer to an array of page structs
- * @list - page set array to return
+ * @flow: overall info for a TID RDMA segment
+ * @pages: pointer to an array of page structs
+ * @npages: number of pages
+ * @list: page set array to return
*
* This routine returns the number of groups associated with
* the current sge information. This implementation is based
@@ -949,10 +955,10 @@ static u32 tid_rdma_find_phys_blocks_4k(struct tid_rdma_flow *flow,
/**
* tid_flush_pages - dump out pages into pagesets
- * @list - list of pagesets
- * @idx - pointer to current page index
- * @pages - number of pages to dump
- * @sets - current number of pagesset
+ * @list: list of pagesets
+ * @idx: pointer to current page index
+ * @pages: number of pages to dump
+ * @sets: current number of pagesset
*
* This routine flushes out accumuated pages.
*
@@ -990,9 +996,10 @@ static u32 tid_flush_pages(struct tid_rdma_pageset *list,
/**
* tid_rdma_find_phys_blocks_8k - get groups base on mr info
- * @pages - pointer to an array of page structs
- * @npages - number of pages
- * @list - page set array to return
+ * @flow: overall info for a TID RDMA segment
+ * @pages: pointer to an array of page structs
+ * @npages: number of pages
+ * @list: page set array to return
*
* This routine parses an array of pages to compute pagesets
* in an 8k compatible way.
@@ -1064,7 +1071,7 @@ static u32 tid_rdma_find_phys_blocks_8k(struct tid_rdma_flow *flow,
return sets;
}
-/**
+/*
* Find pages for one segment of a sge array represented by @ss. The function
* does not check the sge, the sge must have been checked for alignment with a
* prior call to hfi1_kern_trdma_ok. Other sge checking is done as part of
@@ -1598,7 +1605,7 @@ void hfi1_kern_exp_rcv_clear_all(struct tid_rdma_request *req)
/**
* hfi1_kern_exp_rcv_free_flows - free priviously allocated flow information
- * @req - the tid rdma request to be cleaned
+ * @req: the tid rdma request to be cleaned
*/
static void hfi1_kern_exp_rcv_free_flows(struct tid_rdma_request *req)
{
@@ -3435,7 +3442,7 @@ static u32 hfi1_compute_tid_rnr_timeout(struct rvt_qp *qp, u32 to_seg)
return 0;
}
-/**
+/*
* Central place for resource allocation at TID write responder,
* is called from write_req and write_data interrupt handlers as
* well as the send thread when a queued QP is scheduled for
diff --git a/drivers/infiniband/hw/hfi1/uc.c b/drivers/infiniband/hw/hfi1/uc.c
index 1fb918399da0..5b0f536b34e0 100644
--- a/drivers/infiniband/hw/hfi1/uc.c
+++ b/drivers/infiniband/hw/hfi1/uc.c
@@ -55,6 +55,7 @@
/**
* hfi1_make_uc_req - construct a request packet (SEND, RDMA write)
* @qp: a pointer to the QP
+ * @ps: the current packet state
*
* Assume s_lock is held.
*
@@ -291,12 +292,7 @@ bail_no_tx:
/**
* hfi1_uc_rcv - handle an incoming UC packet
- * @ibp: the port the packet came in on
- * @hdr: the header of the packet
- * @rcv_flags: flags relevant to rcv processing
- * @data: the packet data
- * @tlen: the length of the packet
- * @qp: the QP for this packet.
+ * @packet: the packet structure
*
* This is called from qp_rcv() to process an incoming UC packet
* for the given QP.
diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c
index e804af71b629..6ecb984c85fa 100644
--- a/drivers/infiniband/hw/hfi1/ud.c
+++ b/drivers/infiniband/hw/hfi1/ud.c
@@ -468,6 +468,7 @@ void hfi1_make_ud_req_16B(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
/**
* hfi1_make_ud_req - construct a UD request packet
* @qp: the QP
+ * @ps: the current packet state
*
* Assume s_lock is held.
*
@@ -840,12 +841,7 @@ static int opa_smp_check(struct hfi1_ibport *ibp, u16 pkey, u8 sc5,
/**
* hfi1_ud_rcv - receive an incoming UD packet
- * @ibp: the port the packet came in on
- * @hdr: the packet header
- * @rcv_flags: flags relevant to rcv processing
- * @data: the packet data
- * @tlen: the packet length
- * @qp: the QP the packet came on
+ * @packet: the packet structure
*
* This is called from qp_rcv() to process an incoming UD packet
* for the given QP.
diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
index b94fc7fd75a9..58dcab2679d9 100644
--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.c
+++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
@@ -154,12 +154,12 @@ void hfi1_user_exp_rcv_free(struct hfi1_filedata *fd)
fd->entry_to_rb = NULL;
}
-/**
+/*
* Release pinned receive buffer pages.
*
- * @mapped - true if the pages have been DMA mapped. false otherwise.
- * @idx - Index of the first page to unpin.
- * @npages - No of pages to unpin.
+ * @mapped: true if the pages have been DMA mapped. false otherwise.
+ * @idx: Index of the first page to unpin.
+ * @npages: No of pages to unpin.
*
* If the pages have been DMA mapped (indicated by mapped parameter), their
* info will be passed via a struct tid_rb_node. If they haven't been mapped,
@@ -189,7 +189,7 @@ static void unpin_rcv_pages(struct hfi1_filedata *fd,
fd->tid_n_pinned -= npages;
}
-/**
+/*
* Pin receive buffer pages.
*/
static int pin_rcv_pages(struct hfi1_filedata *fd, struct tid_user_buf *tidbuf)
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index 3591923abebb..0dd4bb0a5a7e 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -729,7 +729,7 @@ bail_txadd:
/**
* update_tx_opstats - record stats by opcode
- * @qp; the qp
+ * @qp: the qp
* @ps: transmit packet state
* @plen: the plen in dwords
*
@@ -1145,7 +1145,7 @@ static inline int egress_pkey_matches_entry(u16 pkey, u16 ent)
* egress_pkey_check - check P_KEY of a packet
* @ppd: Physical IB port data
* @slid: SLID for packet
- * @bkey: PKEY for header
+ * @pkey: PKEY for header
* @sc5: SC for packet
* @s_pkey_index: It will be used for look up optimization for kernel contexts
* only. If it is negative value, then it means user contexts is calling this
@@ -1206,7 +1206,7 @@ bad:
return 1;
}
-/**
+/*
* get_send_routine - choose an egress routine
*
* Choose an egress routine based on QP type
diff --git a/drivers/infiniband/hw/hns/hns_roce_common.h b/drivers/infiniband/hw/hns/hns_roce_common.h
index 5afee04fb02c..23c438cef40d 100644
--- a/drivers/infiniband/hw/hns/hns_roce_common.h
+++ b/drivers/infiniband/hw/hns/hns_roce_common.h
@@ -32,6 +32,7 @@
#ifndef _HNS_ROCE_COMMON_H
#define _HNS_ROCE_COMMON_H
+#include <linux/bitfield.h>
#define roce_write(dev, reg, val) writel((val), (dev)->reg_base + (reg))
#define roce_read(dev, reg) readl((dev)->reg_base + (reg))
@@ -65,6 +66,27 @@
#define hr_reg_enable(ptr, field) _hr_reg_enable(ptr, field)
+#define _hr_reg_clear(ptr, field_type, field_h, field_l) \
+ ({ \
+ const field_type *_ptr = ptr; \
+ *((__le32 *)_ptr + (field_h) / 32) &= \
+ cpu_to_le32( \
+ ~GENMASK((field_h) % 32, (field_l) % 32)) + \
+ BUILD_BUG_ON_ZERO(((field_h) / 32) != \
+ ((field_l) / 32)); \
+ })
+
+#define hr_reg_clear(ptr, field) _hr_reg_clear(ptr, field)
+
+#define _hr_reg_write(ptr, field_type, field_h, field_l, val) \
+ ({ \
+ _hr_reg_clear(ptr, field_type, field_h, field_l); \
+ *((__le32 *)ptr + (field_h) / 32) |= cpu_to_le32(FIELD_PREP( \
+ GENMASK((field_h) % 32, (field_l) % 32), val)); \
+ })
+
+#define hr_reg_write(ptr, field, val) _hr_reg_write(ptr, field, val)
+
#define ROCEE_GLB_CFG_ROCEE_DB_SQ_MODE_S 3
#define ROCEE_GLB_CFG_ROCEE_DB_OTH_MODE_S 4
@@ -342,8 +364,8 @@
#define ROCEE_TX_CMQ_BASEADDR_L_REG 0x07000
#define ROCEE_TX_CMQ_BASEADDR_H_REG 0x07004
#define ROCEE_TX_CMQ_DEPTH_REG 0x07008
-#define ROCEE_TX_CMQ_TAIL_REG 0x07010
-#define ROCEE_TX_CMQ_HEAD_REG 0x07014
+#define ROCEE_TX_CMQ_HEAD_REG 0x07010
+#define ROCEE_TX_CMQ_TAIL_REG 0x07014
#define ROCEE_RX_CMQ_BASEADDR_L_REG 0x07018
#define ROCEE_RX_CMQ_BASEADDR_H_REG 0x0701c
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index 8533fc2d8df2..74fc4940b03a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -38,11 +38,74 @@
#include "hns_roce_hem.h"
#include "hns_roce_common.h"
+static u8 get_least_load_bankid_for_cq(struct hns_roce_bank *bank)
+{
+ u32 least_load = bank[0].inuse;
+ u8 bankid = 0;
+ u32 bankcnt;
+ u8 i;
+
+ for (i = 1; i < HNS_ROCE_CQ_BANK_NUM; i++) {
+ bankcnt = bank[i].inuse;
+ if (bankcnt < least_load) {
+ least_load = bankcnt;
+ bankid = i;
+ }
+ }
+
+ return bankid;
+}
+
+static int alloc_cqn(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
+{
+ struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
+ struct hns_roce_bank *bank;
+ u8 bankid;
+ int id;
+
+ mutex_lock(&cq_table->bank_mutex);
+ bankid = get_least_load_bankid_for_cq(cq_table->bank);
+ bank = &cq_table->bank[bankid];
+
+ id = ida_alloc_range(&bank->ida, bank->min, bank->max, GFP_KERNEL);
+ if (id < 0) {
+ mutex_unlock(&cq_table->bank_mutex);
+ return id;
+ }
+
+ /* the lower 2 bits is bankid */
+ hr_cq->cqn = (id << CQ_BANKID_SHIFT) | bankid;
+ bank->inuse++;
+ mutex_unlock(&cq_table->bank_mutex);
+
+ return 0;
+}
+
+static inline u8 get_cq_bankid(unsigned long cqn)
+{
+ /* The lower 2 bits of CQN are used to hash to different banks */
+ return (u8)(cqn & GENMASK(1, 0));
+}
+
+static void free_cqn(struct hns_roce_dev *hr_dev, unsigned long cqn)
+{
+ struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
+ struct hns_roce_bank *bank;
+
+ bank = &cq_table->bank[get_cq_bankid(cqn)];
+
+ ida_free(&bank->ida, cqn >> CQ_BANKID_SHIFT);
+
+ mutex_lock(&cq_table->bank_mutex);
+ bank->inuse--;
+ mutex_unlock(&cq_table->bank_mutex);
+}
+
static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
{
+ struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_cmd_mailbox *mailbox;
- struct hns_roce_cq_table *cq_table;
u64 mtts[MTT_MIN_COUNT] = { 0 };
dma_addr_t dma_handle;
int ret;
@@ -54,13 +117,6 @@ static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
return -EINVAL;
}
- cq_table = &hr_dev->cq_table;
- ret = hns_roce_bitmap_alloc(&cq_table->bitmap, &hr_cq->cqn);
- if (ret) {
- ibdev_err(ibdev, "failed to alloc CQ bitmap, ret = %d.\n", ret);
- return ret;
- }
-
/* Get CQC memory HEM(Hardware Entry Memory) table */
ret = hns_roce_table_get(hr_dev, &cq_table->table, hr_cq->cqn);
if (ret) {
@@ -110,7 +166,6 @@ err_put:
hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
err_out:
- hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn, BITMAP_NO_RR);
return ret;
}
@@ -138,7 +193,6 @@ static void free_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
wait_for_completion(&hr_cq->free);
hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
- hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn, BITMAP_NO_RR);
}
static int alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
@@ -152,7 +206,6 @@ static int alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
buf_attr.region[0].size = hr_cq->cq_depth * hr_cq->cqe_size;
buf_attr.region[0].hopnum = hr_dev->caps.cqe_hop_num;
buf_attr.region_count = 1;
- buf_attr.fixed_page = true;
ret = hns_roce_mtr_create(hr_dev, &hr_cq->mtr, &buf_attr,
hr_dev->caps.cqe_ba_pg_sz + HNS_HW_PAGE_SHIFT,
@@ -298,11 +351,17 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
goto err_cq_buf;
}
+ ret = alloc_cqn(hr_dev, hr_cq);
+ if (ret) {
+ ibdev_err(ibdev, "failed to alloc CQN, ret = %d.\n", ret);
+ goto err_cq_db;
+ }
+
ret = alloc_cqc(hr_dev, hr_cq);
if (ret) {
ibdev_err(ibdev,
"failed to alloc CQ context, ret = %d.\n", ret);
- goto err_cq_db;
+ goto err_cqn;
}
/*
@@ -326,6 +385,8 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
err_cqc:
free_cqc(hr_dev, hr_cq);
+err_cqn:
+ free_cqn(hr_dev, hr_cq->cqn);
err_cq_db:
free_cq_db(hr_dev, hr_cq, udata);
err_cq_buf:
@@ -341,9 +402,11 @@ int hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
if (hr_dev->hw->destroy_cq)
hr_dev->hw->destroy_cq(ib_cq, udata);
- free_cq_buf(hr_dev, hr_cq);
- free_cq_db(hr_dev, hr_cq, udata);
free_cqc(hr_dev, hr_cq);
+ free_cqn(hr_dev, hr_cq->cqn);
+ free_cq_db(hr_dev, hr_cq, udata);
+ free_cq_buf(hr_dev, hr_cq);
+
return 0;
}
@@ -402,18 +465,33 @@ void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
complete(&hr_cq->free);
}
-int hns_roce_init_cq_table(struct hns_roce_dev *hr_dev)
+void hns_roce_init_cq_table(struct hns_roce_dev *hr_dev)
{
struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
+ unsigned int reserved_from_bot;
+ unsigned int i;
+ mutex_init(&cq_table->bank_mutex);
xa_init(&cq_table->array);
- return hns_roce_bitmap_init(&cq_table->bitmap, hr_dev->caps.num_cqs,
- hr_dev->caps.num_cqs - 1,
- hr_dev->caps.reserved_cqs, 0);
+ reserved_from_bot = hr_dev->caps.reserved_cqs;
+
+ for (i = 0; i < reserved_from_bot; i++) {
+ cq_table->bank[get_cq_bankid(i)].inuse++;
+ cq_table->bank[get_cq_bankid(i)].min++;
+ }
+
+ for (i = 0; i < HNS_ROCE_CQ_BANK_NUM; i++) {
+ ida_init(&cq_table->bank[i].ida);
+ cq_table->bank[i].max = hr_dev->caps.num_cqs /
+ HNS_ROCE_CQ_BANK_NUM - 1;
+ }
}
void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev)
{
- hns_roce_bitmap_cleanup(&hr_dev->cq_table.bitmap);
+ int i;
+
+ for (i = 0; i < HNS_ROCE_CQ_BANK_NUM; i++)
+ ida_destroy(&hr_dev->cq_table.bank[i].ida);
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index 55d538625e36..3d6b7a2db496 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -54,6 +54,7 @@
/* Hardware specification only for v1 engine */
#define HNS_ROCE_MIN_CQE_NUM 0x40
#define HNS_ROCE_MIN_WQE_NUM 0x20
+#define HNS_ROCE_MIN_SRQ_WQE_NUM 1
/* Hardware specification only for v1 engine */
#define HNS_ROCE_MAX_INNER_MTPT_NUM 0x7
@@ -65,6 +66,8 @@
#define HNS_ROCE_CQE_WCMD_EMPTY_BIT 0x2
#define HNS_ROCE_MIN_CQE_CNT 16
+#define HNS_ROCE_RESERVED_SGE 1
+
#define HNS_ROCE_MAX_IRQ_NUM 128
#define HNS_ROCE_SGE_IN_WQE 2
@@ -90,6 +93,7 @@
#define HNS_ROCE_MAX_PORTS 6
#define HNS_ROCE_GID_SIZE 16
#define HNS_ROCE_SGE_SIZE 16
+#define HNS_ROCE_DWQE_SIZE 65536
#define HNS_ROCE_HOP_NUM_0 0xff
@@ -119,6 +123,9 @@
#define SRQ_DB_REG 0x230
#define HNS_ROCE_QP_BANK_NUM 8
+#define HNS_ROCE_CQ_BANK_NUM 4
+
+#define CQ_BANKID_SHIFT 2
/* The chip implementation of the consumer index is calculated
* according to twice the actual EQ depth
@@ -163,44 +170,6 @@ enum hns_roce_event {
HNS_ROCE_EVENT_TYPE_FLR = 0x15,
};
-/* Local Work Queue Catastrophic Error,SUBTYPE 0x5 */
-enum {
- HNS_ROCE_LWQCE_QPC_ERROR = 1,
- HNS_ROCE_LWQCE_MTU_ERROR = 2,
- HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR = 3,
- HNS_ROCE_LWQCE_WQE_ADDR_ERROR = 4,
- HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR = 5,
- HNS_ROCE_LWQCE_SL_ERROR = 6,
- HNS_ROCE_LWQCE_PORT_ERROR = 7,
-};
-
-/* Local Access Violation Work Queue Error,SUBTYPE 0x7 */
-enum {
- HNS_ROCE_LAVWQE_R_KEY_VIOLATION = 1,
- HNS_ROCE_LAVWQE_LENGTH_ERROR = 2,
- HNS_ROCE_LAVWQE_VA_ERROR = 3,
- HNS_ROCE_LAVWQE_PD_ERROR = 4,
- HNS_ROCE_LAVWQE_RW_ACC_ERROR = 5,
- HNS_ROCE_LAVWQE_KEY_STATE_ERROR = 6,
- HNS_ROCE_LAVWQE_MR_OPERATION_ERROR = 7,
-};
-
-/* DOORBELL overflow subtype */
-enum {
- HNS_ROCE_DB_SUBTYPE_SDB_OVF = 1,
- HNS_ROCE_DB_SUBTYPE_SDB_ALM_OVF = 2,
- HNS_ROCE_DB_SUBTYPE_ODB_OVF = 3,
- HNS_ROCE_DB_SUBTYPE_ODB_ALM_OVF = 4,
- HNS_ROCE_DB_SUBTYPE_SDB_ALM_EMP = 5,
- HNS_ROCE_DB_SUBTYPE_ODB_ALM_EMP = 6,
-};
-
-enum {
- /* RQ&SRQ related operations */
- HNS_ROCE_OPCODE_SEND_DATA_RECEIVE = 0x06,
- HNS_ROCE_OPCODE_RDMA_WITH_IMM_RECEIVE = 0x07,
-};
-
#define HNS_ROCE_CAP_FLAGS_EX_SHIFT 12
enum {
@@ -253,9 +222,6 @@ enum {
#define HNS_ROCE_CMD_SUCCESS 1
-#define HNS_ROCE_PORT_DOWN 0
-#define HNS_ROCE_PORT_UP 1
-
/* The minimum page size is 4K for hardware */
#define HNS_HW_PAGE_SHIFT 12
#define HNS_HW_PAGE_SIZE (1 << HNS_HW_PAGE_SHIFT)
@@ -332,7 +298,6 @@ struct hns_roce_buf_attr {
} region[HNS_ROCE_MAX_BT_REGION];
unsigned int region_count; /* valid region count */
unsigned int page_shift; /* buffer page shift */
- bool fixed_page; /* decide page shift is fixed-size or maximum size */
unsigned int user_access; /* umem access flag */
bool mtt_only; /* only alloc buffer-required MTT memory */
};
@@ -393,6 +358,7 @@ struct hns_roce_wq {
spinlock_t lock;
u32 wqe_cnt; /* WQE num */
u32 max_gs;
+ u32 rsv_sge;
int offset;
int wqe_shift; /* WQE size */
u32 head;
@@ -489,6 +455,8 @@ struct hns_roce_idx_que {
struct hns_roce_mtr mtr;
int entry_shift;
unsigned long *bitmap;
+ u32 head;
+ u32 tail;
};
struct hns_roce_srq {
@@ -496,7 +464,9 @@ struct hns_roce_srq {
unsigned long srqn;
u32 wqe_cnt;
int max_gs;
+ u32 rsv_sge;
int wqe_shift;
+ u32 cqn;
void __iomem *db_reg_l;
atomic_t refcount;
@@ -507,8 +477,6 @@ struct hns_roce_srq {
u64 *wrid;
struct hns_roce_idx_que idx_que;
spinlock_t lock;
- u16 head;
- u16 tail;
struct mutex mutex;
void (*event)(struct hns_roce_srq *srq, enum hns_roce_event event);
};
@@ -532,13 +500,14 @@ struct hns_roce_qp_table {
struct hns_roce_hem_table sccc_table;
struct mutex scc_mutex;
struct hns_roce_bank bank[HNS_ROCE_QP_BANK_NUM];
- spinlock_t bank_lock;
+ struct mutex bank_mutex;
};
struct hns_roce_cq_table {
- struct hns_roce_bitmap bitmap;
struct xarray array;
struct hns_roce_hem_table table;
+ struct hns_roce_bank bank[HNS_ROCE_CQ_BANK_NUM];
+ struct mutex bank_mutex;
};
struct hns_roce_srq_table {
@@ -640,6 +609,10 @@ struct hns_roce_work {
u32 queue_num;
};
+enum {
+ HNS_ROCE_QP_CAP_DIRECT_WQE = BIT(5),
+};
+
struct hns_roce_qp {
struct ib_qp ibqp;
struct hns_roce_wq rq;
@@ -647,7 +620,7 @@ struct hns_roce_qp {
struct hns_roce_db sdb;
unsigned long en_flags;
u32 doorbell_qpn;
- u32 sq_signal_bits;
+ enum ib_sig_type sq_signal_bits;
struct hns_roce_wq sq;
struct hns_roce_mtr mtr;
@@ -779,7 +752,7 @@ struct hns_roce_caps {
u32 max_cqes;
u32 min_cqes;
u32 min_wqes;
- int reserved_cqs;
+ u32 reserved_cqs;
int reserved_srqs;
int num_aeq_vectors;
int num_comp_vectors;
@@ -911,8 +884,7 @@ struct hns_roce_hw {
int (*write_mtpt)(struct hns_roce_dev *hr_dev, void *mb_buf,
struct hns_roce_mr *mr, unsigned long mtpt_idx);
int (*rereg_write_mtpt)(struct hns_roce_dev *hr_dev,
- struct hns_roce_mr *mr, int flags, u32 pdn,
- int mr_access_flags, u64 iova, u64 size,
+ struct hns_roce_mr *mr, int flags,
void *mb_buf);
int (*frmr_write_mtpt)(struct hns_roce_dev *hr_dev, void *mb_buf,
struct hns_roce_mr *mr);
@@ -945,11 +917,7 @@ struct hns_roce_hw {
int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
int (*init_eq)(struct hns_roce_dev *hr_dev);
void (*cleanup_eq)(struct hns_roce_dev *hr_dev);
- void (*write_srqc)(struct hns_roce_dev *hr_dev,
- struct hns_roce_srq *srq, u32 pdn, u16 xrcd, u32 cqn,
- void *mb_buf, u64 *mtts_wqe, u64 *mtts_idx,
- dma_addr_t dma_handle_wqe,
- dma_addr_t dma_handle_idx);
+ int (*write_srqc)(struct hns_roce_srq *srq, void *mb_buf);
int (*modify_srq)(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr,
enum ib_srq_attr_mask srq_attr_mask,
struct ib_udata *udata);
@@ -982,6 +950,7 @@ struct hns_roce_dev {
struct mutex pgdir_mutex;
int irq[HNS_ROCE_MAX_IRQ_NUM];
u8 __iomem *reg_base;
+ void __iomem *mem_base;
struct hns_roce_caps caps;
struct xarray qp_table_xa;
@@ -1067,7 +1036,7 @@ static inline struct hns_roce_srq *to_hr_srq(struct ib_srq *ibsrq)
static inline void hns_roce_write64_k(__le32 val[2], void __iomem *dest)
{
- __raw_writeq(*(u64 *) val, dest);
+ writeq(*(u64 *)val, dest);
}
static inline struct hns_roce_qp
@@ -1164,7 +1133,7 @@ int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
int hns_roce_init_pd_table(struct hns_roce_dev *hr_dev);
int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev);
-int hns_roce_init_cq_table(struct hns_roce_dev *hr_dev);
+void hns_roce_init_cq_table(struct hns_roce_dev *hr_dev);
int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev);
int hns_roce_init_srq_table(struct hns_roce_dev *hr_dev);
@@ -1281,7 +1250,6 @@ u8 hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index);
void hns_roce_handle_device_err(struct hns_roce_dev *hr_dev);
int hns_roce_init(struct hns_roce_dev *hr_dev);
void hns_roce_exit(struct hns_roce_dev *hr_dev);
-
int hns_roce_fill_res_cq_entry(struct sk_buff *msg,
struct ib_cq *ib_cq);
#endif /* _HNS_ROCE_DEVICE_H */
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
index edc9d6b98d95..cfd2e1b60c7f 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
@@ -1075,9 +1075,8 @@ static struct roce_hem_item *hem_list_alloc_item(struct hns_roce_dev *hr_dev,
return NULL;
if (exist_bt) {
- hem->addr = dma_alloc_coherent(hr_dev->dev,
- count * BA_BYTE_LEN,
- &hem->dma_addr, GFP_KERNEL);
+ hem->addr = dma_alloc_coherent(hr_dev->dev, count * BA_BYTE_LEN,
+ &hem->dma_addr, GFP_KERNEL);
if (!hem->addr) {
kfree(hem);
return NULL;
@@ -1336,6 +1335,10 @@ static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev,
if (ba_num < 1)
return -ENOMEM;
+ if (ba_num > unit)
+ return -ENOBUFS;
+
+ ba_num = min_t(int, ba_num, unit);
INIT_LIST_HEAD(&temp_root);
offset = r->offset;
/* indicate to last region */
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index f68585ff8e8a..5346fdca9473 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -43,6 +43,22 @@
#include "hns_roce_hem.h"
#include "hns_roce_hw_v1.h"
+/**
+ * hns_get_gid_index - Get gid index.
+ * @hr_dev: pointer to structure hns_roce_dev.
+ * @port: port, value range: 0 ~ MAX
+ * @gid_index: gid_index, value range: 0 ~ MAX
+ * Description:
+ * N ports shared gids, allocation method as follow:
+ * GID[0][0], GID[1][0],.....GID[N - 1][0],
+ * GID[0][0], GID[1][0],.....GID[N - 1][0],
+ * And so on
+ */
+u8 hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index)
+{
+ return gid_index * hr_dev->caps.num_ports + port;
+}
+
static void set_data_seg(struct hns_roce_wqe_data_seg *dseg, struct ib_sge *sg)
{
dseg->lkey = cpu_to_le32(sg->lkey);
@@ -314,8 +330,6 @@ out:
/* Set DB return */
if (likely(nreq)) {
qp->sq.head += nreq;
- /* Memory barrier */
- wmb();
roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_SQ_HEAD_M,
SQ_DOORBELL_U32_4_SQ_HEAD_S,
@@ -395,8 +409,6 @@ static int hns_roce_v1_post_recv(struct ib_qp *ibqp,
out:
if (likely(nreq)) {
hr_qp->rq.head += nreq;
- /* Memory barrier */
- wmb();
if (ibqp->qp_type == IB_QPT_GSI) {
__le32 tmp;
@@ -1391,7 +1403,7 @@ static void hns_roce_free_mr_free(struct hns_roce_dev *hr_dev)
/**
* hns_roce_v1_reset - reset RoCE
* @hr_dev: RoCE device struct pointer
- * @enable: true -- drop reset, false -- reset
+ * @dereset: true -- drop reset, false -- reset
* return 0 - success , negative --fail
*/
static int hns_roce_v1_reset(struct hns_roce_dev *hr_dev, bool dereset)
@@ -1968,12 +1980,6 @@ static void __hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
if (nfreed) {
hr_cq->cons_index += nfreed;
- /*
- * Make sure update of buffer contents is done before
- * updating consumer index.
- */
- wmb();
-
hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index);
}
}
@@ -2314,8 +2320,6 @@ int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
*hr_cq->tptr_addr = hr_cq->cons_index &
((hr_cq->cq_depth << 1) - 1);
- /* Memroy barrier */
- wmb();
hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index);
}
@@ -3204,9 +3208,6 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
* need to hw to flash RQ HEAD by DB again
*/
if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
- /* Memory barrier */
- wmb();
-
roce_set_field(doorbell[0], RQ_DOORBELL_U32_4_RQ_HEAD_M,
RQ_DOORBELL_U32_4_RQ_HEAD_S, hr_qp->rq.head);
roce_set_field(doorbell[1], RQ_DOORBELL_U32_8_QPN_M,
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
index 46ab0a321d21..84383236e47d 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
@@ -193,6 +193,49 @@
#define HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S 0
#define HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_M GENMASK(4, 0)
+/* Local Work Queue Catastrophic Error,SUBTYPE 0x5 */
+enum {
+ HNS_ROCE_LWQCE_QPC_ERROR = 1,
+ HNS_ROCE_LWQCE_MTU_ERROR,
+ HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR,
+ HNS_ROCE_LWQCE_WQE_ADDR_ERROR,
+ HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR,
+ HNS_ROCE_LWQCE_SL_ERROR,
+ HNS_ROCE_LWQCE_PORT_ERROR,
+};
+
+/* Local Access Violation Work Queue Error,SUBTYPE 0x7 */
+enum {
+ HNS_ROCE_LAVWQE_R_KEY_VIOLATION = 1,
+ HNS_ROCE_LAVWQE_LENGTH_ERROR,
+ HNS_ROCE_LAVWQE_VA_ERROR,
+ HNS_ROCE_LAVWQE_PD_ERROR,
+ HNS_ROCE_LAVWQE_RW_ACC_ERROR,
+ HNS_ROCE_LAVWQE_KEY_STATE_ERROR,
+ HNS_ROCE_LAVWQE_MR_OPERATION_ERROR,
+};
+
+/* DOORBELL overflow subtype */
+enum {
+ HNS_ROCE_DB_SUBTYPE_SDB_OVF = 1,
+ HNS_ROCE_DB_SUBTYPE_SDB_ALM_OVF,
+ HNS_ROCE_DB_SUBTYPE_ODB_OVF,
+ HNS_ROCE_DB_SUBTYPE_ODB_ALM_OVF,
+ HNS_ROCE_DB_SUBTYPE_SDB_ALM_EMP,
+ HNS_ROCE_DB_SUBTYPE_ODB_ALM_EMP,
+};
+
+enum {
+ /* RQ&SRQ related operations */
+ HNS_ROCE_OPCODE_SEND_DATA_RECEIVE = 0x06,
+ HNS_ROCE_OPCODE_RDMA_WITH_IMM_RECEIVE,
+};
+
+enum {
+ HNS_ROCE_PORT_DOWN = 0,
+ HNS_ROCE_PORT_UP,
+};
+
struct hns_roce_cq_context {
__le32 cqc_byte_4;
__le32 cq_bt_l;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 833e1f259936..c3934abeb260 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -48,8 +48,8 @@
#include "hns_roce_hem.h"
#include "hns_roce_hw_v2.h"
-static void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg,
- struct ib_sge *sg)
+static inline void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg,
+ struct ib_sge *sg)
{
dseg->lkey = cpu_to_le32(sg->lkey);
dseg->addr = cpu_to_le64(sg->addr);
@@ -99,16 +99,16 @@ static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
u64 pbl_ba;
/* use ib_access_flags */
- roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_BIND_EN_S,
- wr->access & IB_ACCESS_MW_BIND ? 1 : 0);
- roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_ATOMIC_S,
- wr->access & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
- roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_RR_S,
- wr->access & IB_ACCESS_REMOTE_READ ? 1 : 0);
- roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_RW_S,
- wr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0);
- roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_LW_S,
- wr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0);
+ roce_set_bit(fseg->byte_40, V2_RC_FRMR_WQE_BYTE_40_BIND_EN_S,
+ !!(wr->access & IB_ACCESS_MW_BIND));
+ roce_set_bit(fseg->byte_40, V2_RC_FRMR_WQE_BYTE_40_ATOMIC_S,
+ !!(wr->access & IB_ACCESS_REMOTE_ATOMIC));
+ roce_set_bit(fseg->byte_40, V2_RC_FRMR_WQE_BYTE_40_RR_S,
+ !!(wr->access & IB_ACCESS_REMOTE_READ));
+ roce_set_bit(fseg->byte_40, V2_RC_FRMR_WQE_BYTE_40_RW_S,
+ !!(wr->access & IB_ACCESS_REMOTE_WRITE));
+ roce_set_bit(fseg->byte_40, V2_RC_FRMR_WQE_BYTE_40_LW_S,
+ !!(wr->access & IB_ACCESS_LOCAL_WRITE));
/* Data structure reuse may lead to confusion */
pbl_ba = mr->pbl_mtr.hem_cfg.root_ba;
@@ -121,12 +121,10 @@ static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
rc_sq_wqe->va = cpu_to_le64(wr->mr->iova);
fseg->pbl_size = cpu_to_le32(mr->npages);
- roce_set_field(fseg->mode_buf_pg_sz,
- V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_M,
+ roce_set_field(fseg->byte_40, V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_M,
V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_S,
to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
- roce_set_bit(fseg->mode_buf_pg_sz,
- V2_RC_FRMR_WQE_BYTE_40_BLK_MODE_S, 0);
+ roce_set_bit(fseg->byte_40, V2_RC_FRMR_WQE_BYTE_40_BLK_MODE_S, 0);
}
static void set_atomic_seg(const struct ib_send_wr *wr,
@@ -361,7 +359,7 @@ static int check_send_valid(struct hns_roce_dev *hr_dev,
} else if (unlikely(hr_qp->state == IB_QPS_RESET ||
hr_qp->state == IB_QPS_INIT ||
hr_qp->state == IB_QPS_RTR)) {
- ibdev_err(ibdev, "failed to post WQE, QP state %hhu!\n",
+ ibdev_err(ibdev, "failed to post WQE, QP state %u!\n",
hr_qp->state);
return -EINVAL;
} else if (unlikely(hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN)) {
@@ -469,7 +467,6 @@ static inline int set_ud_wqe(struct hns_roce_qp *qp,
int ret;
valid_num_sge = calc_wr_sge_num(wr, &msg_len);
- memset(ud_sq_wqe, 0, sizeof(*ud_sq_wqe));
ret = set_ud_opcode(ud_sq_wqe, wr);
if (WARN_ON(ret))
@@ -503,6 +500,8 @@ static inline int set_ud_wqe(struct hns_roce_qp *qp,
if (ret)
return ret;
+ qp->sl = to_hr_ah(ud_wr(wr)->ah)->av.sl;
+
set_extend_sge(qp, wr->sg_list, &curr_idx, valid_num_sge);
/*
@@ -521,10 +520,12 @@ static inline int set_ud_wqe(struct hns_roce_qp *qp,
return 0;
}
-static int set_rc_opcode(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
+static int set_rc_opcode(struct hns_roce_dev *hr_dev,
+ struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
const struct ib_send_wr *wr)
{
u32 ib_op = wr->opcode;
+ int ret = 0;
rc_sq_wqe->immtdata = get_immtdata(wr);
@@ -544,7 +545,10 @@ static int set_rc_opcode(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
rc_sq_wqe->va = cpu_to_le64(atomic_wr(wr)->remote_addr);
break;
case IB_WR_REG_MR:
- set_frmr_seg(rc_sq_wqe, reg_wr(wr));
+ if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
+ set_frmr_seg(rc_sq_wqe, reg_wr(wr));
+ else
+ ret = -EOPNOTSUPP;
break;
case IB_WR_LOCAL_INV:
roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_SO_S, 1);
@@ -553,19 +557,23 @@ static int set_rc_opcode(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
rc_sq_wqe->inv_key = cpu_to_le32(wr->ex.invalidate_rkey);
break;
default:
- return -EINVAL;
+ ret = -EINVAL;
}
+ if (unlikely(ret))
+ return ret;
+
roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
V2_RC_SEND_WQE_BYTE_4_OPCODE_S, to_hr_opcode(ib_op));
- return 0;
+ return ret;
}
static inline int set_rc_wqe(struct hns_roce_qp *qp,
const struct ib_send_wr *wr,
void *wqe, unsigned int *sge_idx,
unsigned int owner_bit)
{
+ struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device);
struct hns_roce_v2_rc_send_wqe *rc_sq_wqe = wqe;
unsigned int curr_idx = *sge_idx;
unsigned int valid_num_sge;
@@ -573,11 +581,10 @@ static inline int set_rc_wqe(struct hns_roce_qp *qp,
int ret;
valid_num_sge = calc_wr_sge_num(wr, &msg_len);
- memset(rc_sq_wqe, 0, sizeof(*rc_sq_wqe));
rc_sq_wqe->msg_len = cpu_to_le32(msg_len);
- ret = set_rc_opcode(rc_sq_wqe, wr);
+ ret = set_rc_opcode(hr_dev, rc_sq_wqe, wr);
if (WARN_ON(ret))
return ret;
@@ -635,6 +642,8 @@ static inline void update_sq_db(struct hns_roce_dev *hr_dev,
V2_DB_BYTE_4_TAG_S, qp->doorbell_qpn);
roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_CMD_M,
V2_DB_BYTE_4_CMD_S, HNS_ROCE_V2_SQ_DB);
+ /* indicates data on new BAR, 0 : SQ doorbell, 1 : DWQE */
+ roce_set_bit(sq_db.byte_4, V2_DB_FLAG_S, 0);
roce_set_field(sq_db.parameter, V2_DB_PARAMETER_IDX_M,
V2_DB_PARAMETER_IDX_S, qp->sq.head);
roce_set_field(sq_db.parameter, V2_DB_PARAMETER_SL_M,
@@ -644,6 +653,38 @@ static inline void update_sq_db(struct hns_roce_dev *hr_dev,
}
}
+static void hns_roce_write512(struct hns_roce_dev *hr_dev, u64 *val,
+ u64 __iomem *dest)
+{
+#define HNS_ROCE_WRITE_TIMES 8
+ struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+ struct hnae3_handle *handle = priv->handle;
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ int i;
+
+ if (!hr_dev->dis_db && !ops->get_hw_reset_stat(handle))
+ for (i = 0; i < HNS_ROCE_WRITE_TIMES; i++)
+ writeq_relaxed(*(val + i), dest + i);
+}
+
+static void write_dwqe(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
+ void *wqe)
+{
+ struct hns_roce_v2_rc_send_wqe *rc_sq_wqe = wqe;
+
+ /* All kinds of DirectWQE have the same header field layout */
+ roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_FLAG_S, 1);
+ roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_DB_SL_L_M,
+ V2_RC_SEND_WQE_BYTE_4_DB_SL_L_S, qp->sl);
+ roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_DB_SL_H_M,
+ V2_RC_SEND_WQE_BYTE_4_DB_SL_H_S, qp->sl >> 2);
+ roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_WQE_INDEX_M,
+ V2_RC_SEND_WQE_BYTE_4_WQE_INDEX_S, qp->sq.head);
+
+ hns_roce_write512(hr_dev, wqe, hr_dev->mem_base +
+ HNS_ROCE_DWQE_SIZE * qp->ibqp.qp_num);
+}
+
static int hns_roce_v2_post_send(struct ib_qp *ibqp,
const struct ib_send_wr *wr,
const struct ib_send_wr **bad_wr)
@@ -708,9 +749,12 @@ out:
if (likely(nreq)) {
qp->sq.head += nreq;
qp->next_sge = sge_idx;
- /* Memory barrier */
- wmb();
- update_sq_db(hr_dev, qp);
+
+ if (nreq == 1 && qp->sq.head == qp->sq.tail + 1 &&
+ (qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE))
+ write_dwqe(hr_dev, qp, wqe);
+ else
+ update_sq_db(hr_dev, qp);
}
spin_unlock_irqrestore(&qp->sq.lock, flags);
@@ -721,14 +765,74 @@ out:
static int check_recv_valid(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *hr_qp)
{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct ib_qp *ibqp = &hr_qp->ibqp;
+
+ if (unlikely(ibqp->qp_type != IB_QPT_RC &&
+ ibqp->qp_type != IB_QPT_GSI &&
+ ibqp->qp_type != IB_QPT_UD)) {
+ ibdev_err(ibdev, "unsupported qp type, qp_type = %d.\n",
+ ibqp->qp_type);
+ return -EOPNOTSUPP;
+ }
+
if (unlikely(hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN))
return -EIO;
- else if (hr_qp->state == IB_QPS_RESET)
+
+ if (hr_qp->state == IB_QPS_RESET)
return -EINVAL;
return 0;
}
+static void fill_recv_sge_to_wqe(const struct ib_recv_wr *wr, void *wqe,
+ u32 max_sge, bool rsv)
+{
+ struct hns_roce_v2_wqe_data_seg *dseg = wqe;
+ u32 i, cnt;
+
+ for (i = 0, cnt = 0; i < wr->num_sge; i++) {
+ /* Skip zero-length sge */
+ if (!wr->sg_list[i].length)
+ continue;
+ set_data_seg_v2(dseg + cnt, wr->sg_list + i);
+ cnt++;
+ }
+
+ /* Fill a reserved sge to make hw stop reading remaining segments */
+ if (rsv) {
+ dseg[cnt].lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
+ dseg[cnt].addr = 0;
+ dseg[cnt].len = cpu_to_le32(HNS_ROCE_INVALID_SGE_LENGTH);
+ } else {
+ /* Clear remaining segments to make ROCEE ignore sges */
+ if (cnt < max_sge)
+ memset(dseg + cnt, 0,
+ (max_sge - cnt) * HNS_ROCE_SGE_SIZE);
+ }
+}
+
+static void fill_rq_wqe(struct hns_roce_qp *hr_qp, const struct ib_recv_wr *wr,
+ u32 wqe_idx, u32 max_sge)
+{
+ struct hns_roce_rinl_sge *sge_list;
+ void *wqe = NULL;
+ u32 i;
+
+ wqe = hns_roce_get_recv_wqe(hr_qp, wqe_idx);
+ fill_recv_sge_to_wqe(wr, wqe, max_sge, hr_qp->rq.rsv_sge);
+
+ /* rq support inline data */
+ if (hr_qp->rq_inl_buf.wqe_cnt) {
+ sge_list = hr_qp->rq_inl_buf.wqe_list[wqe_idx].sg_list;
+ hr_qp->rq_inl_buf.wqe_list[wqe_idx].sge_cnt = (u32)wr->num_sge;
+ for (i = 0; i < wr->num_sge; i++) {
+ sge_list[i].addr = (void *)(u64)wr->sg_list[i].addr;
+ sge_list[i].len = wr->sg_list[i].length;
+ }
+ }
+}
+
static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_wr)
@@ -736,14 +840,9 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
struct ib_device *ibdev = &hr_dev->ib_dev;
- struct hns_roce_v2_wqe_data_seg *dseg;
- struct hns_roce_rinl_sge *sge_list;
+ u32 wqe_idx, nreq, max_sge;
unsigned long flags;
- void *wqe = NULL;
- u32 wqe_idx;
- int nreq;
int ret;
- int i;
spin_lock_irqsave(&hr_qp->rq.lock, flags);
@@ -754,6 +853,7 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
goto out;
}
+ max_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge;
for (nreq = 0; wr; ++nreq, wr = wr->next) {
if (unlikely(hns_roce_wq_overflow(&hr_qp->rq, nreq,
hr_qp->ibqp.recv_cq))) {
@@ -762,50 +862,22 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
goto out;
}
- wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1);
-
- if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
+ if (unlikely(wr->num_sge > max_sge)) {
ibdev_err(ibdev, "num_sge = %d >= max_sge = %u.\n",
- wr->num_sge, hr_qp->rq.max_gs);
+ wr->num_sge, max_sge);
ret = -EINVAL;
*bad_wr = wr;
goto out;
}
- wqe = hns_roce_get_recv_wqe(hr_qp, wqe_idx);
- dseg = (struct hns_roce_v2_wqe_data_seg *)wqe;
- for (i = 0; i < wr->num_sge; i++) {
- if (!wr->sg_list[i].length)
- continue;
- set_data_seg_v2(dseg, wr->sg_list + i);
- dseg++;
- }
-
- if (wr->num_sge < hr_qp->rq.max_gs) {
- dseg->lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
- dseg->addr = 0;
- }
-
- /* rq support inline data */
- if (hr_qp->rq_inl_buf.wqe_cnt) {
- sge_list = hr_qp->rq_inl_buf.wqe_list[wqe_idx].sg_list;
- hr_qp->rq_inl_buf.wqe_list[wqe_idx].sge_cnt =
- (u32)wr->num_sge;
- for (i = 0; i < wr->num_sge; i++) {
- sge_list[i].addr =
- (void *)(u64)wr->sg_list[i].addr;
- sge_list[i].len = wr->sg_list[i].length;
- }
- }
-
+ wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1);
+ fill_rq_wqe(hr_qp, wr, wqe_idx, max_sge);
hr_qp->rq.wrid[wqe_idx] = wr->wr_id;
}
out:
if (likely(nreq)) {
hr_qp->rq.head += nreq;
- /* Memory barrier */
- wmb();
/*
* Hip08 hardware cannot flush the WQEs in RQ if the QP state
@@ -829,41 +901,82 @@ out:
return ret;
}
-static void *get_srq_wqe(struct hns_roce_srq *srq, int n)
+static void *get_srq_wqe_buf(struct hns_roce_srq *srq, u32 n)
{
return hns_roce_buf_offset(srq->buf_mtr.kmem, n << srq->wqe_shift);
}
-static void *get_idx_buf(struct hns_roce_idx_que *idx_que, unsigned int n)
+static void *get_idx_buf(struct hns_roce_idx_que *idx_que, u32 n)
{
return hns_roce_buf_offset(idx_que->mtr.kmem,
n << idx_que->entry_shift);
}
-static void hns_roce_free_srq_wqe(struct hns_roce_srq *srq, int wqe_index)
+static void hns_roce_free_srq_wqe(struct hns_roce_srq *srq, u32 wqe_index)
{
/* always called with interrupts disabled. */
spin_lock(&srq->lock);
bitmap_clear(srq->idx_que.bitmap, wqe_index, 1);
- srq->tail++;
+ srq->idx_que.tail++;
spin_unlock(&srq->lock);
}
-static int find_empty_entry(struct hns_roce_idx_que *idx_que,
- unsigned long size)
+static int hns_roce_srqwq_overflow(struct hns_roce_srq *srq)
{
- int wqe_idx;
+ struct hns_roce_idx_que *idx_que = &srq->idx_que;
- if (unlikely(bitmap_full(idx_que->bitmap, size)))
+ return idx_que->head - idx_que->tail >= srq->wqe_cnt;
+}
+
+static int check_post_srq_valid(struct hns_roce_srq *srq, u32 max_sge,
+ const struct ib_recv_wr *wr)
+{
+ struct ib_device *ib_dev = srq->ibsrq.device;
+
+ if (unlikely(wr->num_sge > max_sge)) {
+ ibdev_err(ib_dev,
+ "failed to check sge, wr->num_sge = %d, max_sge = %u.\n",
+ wr->num_sge, max_sge);
+ return -EINVAL;
+ }
+
+ if (unlikely(hns_roce_srqwq_overflow(srq))) {
+ ibdev_err(ib_dev,
+ "failed to check srqwq status, srqwq is full.\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int get_srq_wqe_idx(struct hns_roce_srq *srq, u32 *wqe_idx)
+{
+ struct hns_roce_idx_que *idx_que = &srq->idx_que;
+ u32 pos;
+
+ pos = find_first_zero_bit(idx_que->bitmap, srq->wqe_cnt);
+ if (unlikely(pos == srq->wqe_cnt))
return -ENOSPC;
- wqe_idx = find_first_zero_bit(idx_que->bitmap, size);
+ bitmap_set(idx_que->bitmap, pos, 1);
+ *wqe_idx = pos;
+ return 0;
+}
- bitmap_set(idx_que->bitmap, wqe_idx, 1);
+static void fill_wqe_idx(struct hns_roce_srq *srq, unsigned int wqe_idx)
+{
+ struct hns_roce_idx_que *idx_que = &srq->idx_que;
+ unsigned int head;
+ __le32 *buf;
- return wqe_idx;
+ head = idx_que->head & (srq->wqe_cnt - 1);
+
+ buf = get_idx_buf(idx_que, head);
+ *buf = cpu_to_le32(wqe_idx);
+
+ idx_que->head++;
}
static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
@@ -872,77 +985,42 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
struct hns_roce_srq *srq = to_hr_srq(ibsrq);
- struct hns_roce_v2_wqe_data_seg *dseg;
struct hns_roce_v2_db srq_db;
unsigned long flags;
- unsigned int ind;
- __le32 *srq_idx;
int ret = 0;
- int wqe_idx;
+ u32 max_sge;
+ u32 wqe_idx;
void *wqe;
- int nreq;
- int i;
+ u32 nreq;
spin_lock_irqsave(&srq->lock, flags);
- ind = srq->head & (srq->wqe_cnt - 1);
-
+ max_sge = srq->max_gs - srq->rsv_sge;
for (nreq = 0; wr; ++nreq, wr = wr->next) {
- if (unlikely(wr->num_sge >= srq->max_gs)) {
- ret = -EINVAL;
- *bad_wr = wr;
- break;
- }
-
- if (unlikely(srq->head == srq->tail)) {
- ret = -ENOMEM;
+ ret = check_post_srq_valid(srq, max_sge, wr);
+ if (ret) {
*bad_wr = wr;
break;
}
- wqe_idx = find_empty_entry(&srq->idx_que, srq->wqe_cnt);
- if (unlikely(wqe_idx < 0)) {
- ret = -ENOMEM;
+ ret = get_srq_wqe_idx(srq, &wqe_idx);
+ if (unlikely(ret)) {
*bad_wr = wr;
break;
}
- wqe = get_srq_wqe(srq, wqe_idx);
- dseg = (struct hns_roce_v2_wqe_data_seg *)wqe;
-
- for (i = 0; i < wr->num_sge; ++i) {
- dseg[i].len = cpu_to_le32(wr->sg_list[i].length);
- dseg[i].lkey = cpu_to_le32(wr->sg_list[i].lkey);
- dseg[i].addr = cpu_to_le64(wr->sg_list[i].addr);
- }
-
- if (wr->num_sge < srq->max_gs) {
- dseg[i].len = 0;
- dseg[i].lkey = cpu_to_le32(0x100);
- dseg[i].addr = 0;
- }
-
- srq_idx = get_idx_buf(&srq->idx_que, ind);
- *srq_idx = cpu_to_le32(wqe_idx);
-
+ wqe = get_srq_wqe_buf(srq, wqe_idx);
+ fill_recv_sge_to_wqe(wr, wqe, max_sge, srq->rsv_sge);
+ fill_wqe_idx(srq, wqe_idx);
srq->wrid[wqe_idx] = wr->wr_id;
- ind = (ind + 1) & (srq->wqe_cnt - 1);
}
if (likely(nreq)) {
- srq->head += nreq;
-
- /*
- * Make sure that descriptors are written before
- * doorbell record.
- */
- wmb();
-
srq_db.byte_4 =
cpu_to_le32(HNS_ROCE_V2_SRQ_DB << V2_DB_BYTE_4_CMD_S |
(srq->srqn & V2_DB_BYTE_4_TAG_M));
srq_db.parameter =
- cpu_to_le32(srq->head & V2_DB_PARAMETER_IDX_M);
+ cpu_to_le32(srq->idx_que.head & V2_DB_PARAMETER_IDX_M);
hns_roce_write64(hr_dev, (__le32 *)&srq_db, srq->db_reg_l);
}
@@ -1059,15 +1137,6 @@ static int hns_roce_v2_rst_process_cmd(struct hns_roce_dev *hr_dev)
return 0;
}
-static int hns_roce_cmq_space(struct hns_roce_v2_cmq_ring *ring)
-{
- int ntu = ring->next_to_use;
- int ntc = ring->next_to_clean;
- int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
-
- return ring->desc_num - used - 1;
-}
-
static int hns_roce_alloc_cmq_desc(struct hns_roce_dev *hr_dev,
struct hns_roce_v2_cmq_ring *ring)
{
@@ -1107,8 +1176,7 @@ static int hns_roce_init_cmq_ring(struct hns_roce_dev *hr_dev, bool ring_type)
&priv->cmq.csq : &priv->cmq.crq;
ring->flag = ring_type;
- ring->next_to_clean = 0;
- ring->next_to_use = 0;
+ ring->head = 0;
return hns_roce_alloc_cmq_desc(hr_dev, ring);
}
@@ -1207,34 +1275,10 @@ static void hns_roce_cmq_setup_basic_desc(struct hns_roce_cmq_desc *desc,
static int hns_roce_cmq_csq_done(struct hns_roce_dev *hr_dev)
{
- u32 head = roce_read(hr_dev, ROCEE_TX_CMQ_HEAD_REG);
+ u32 tail = roce_read(hr_dev, ROCEE_TX_CMQ_TAIL_REG);
struct hns_roce_v2_priv *priv = hr_dev->priv;
- return head == priv->cmq.csq.next_to_use;
-}
-
-static int hns_roce_cmq_csq_clean(struct hns_roce_dev *hr_dev)
-{
- struct hns_roce_v2_priv *priv = hr_dev->priv;
- struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
- struct hns_roce_cmq_desc *desc;
- u16 ntc = csq->next_to_clean;
- u32 head;
- int clean = 0;
-
- desc = &csq->desc[ntc];
- head = roce_read(hr_dev, ROCEE_TX_CMQ_HEAD_REG);
- while (head != ntc) {
- memset(desc, 0, sizeof(*desc));
- ntc++;
- if (ntc == csq->desc_num)
- ntc = 0;
- desc = &csq->desc[ntc];
- clean++;
- }
- csq->next_to_clean = ntc;
-
- return clean;
+ return tail == priv->cmq.csq.head;
}
static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
@@ -1242,42 +1286,26 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
{
struct hns_roce_v2_priv *priv = hr_dev->priv;
struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
- struct hns_roce_cmq_desc *desc_to_use;
- bool complete = false;
u32 timeout = 0;
- int handle = 0;
u16 desc_ret;
- int ret = 0;
- int ntc;
+ u32 tail;
+ int ret;
+ int i;
spin_lock_bh(&csq->lock);
- if (num > hns_roce_cmq_space(csq)) {
- spin_unlock_bh(&csq->lock);
- return -EBUSY;
- }
-
- /*
- * Record the location of desc in the cmq for this time
- * which will be use for hardware to write back
- */
- ntc = csq->next_to_use;
+ tail = csq->head;
- while (handle < num) {
- desc_to_use = &csq->desc[csq->next_to_use];
- *desc_to_use = desc[handle];
- dev_dbg(hr_dev->dev, "set cmq desc:\n");
- csq->next_to_use++;
- if (csq->next_to_use == csq->desc_num)
- csq->next_to_use = 0;
- handle++;
+ for (i = 0; i < num; i++) {
+ csq->desc[csq->head++] = desc[i];
+ if (csq->head == csq->desc_num)
+ csq->head = 0;
}
/* Write to hardware */
- roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, csq->next_to_use);
+ roce_write(hr_dev, ROCEE_TX_CMQ_HEAD_REG, csq->head);
- /*
- * If the command is sync, wait for the firmware to write back,
+ /* If the command is sync, wait for the firmware to write back,
* if multi descriptors to be sent, use the first one to check
*/
if (le16_to_cpu(desc->flag) & HNS_ROCE_CMD_FLAG_NO_INTR) {
@@ -1285,39 +1313,34 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
if (hns_roce_cmq_csq_done(hr_dev))
break;
udelay(1);
- timeout++;
- } while (timeout < priv->cmq.tx_timeout);
+ } while (++timeout < priv->cmq.tx_timeout);
}
if (hns_roce_cmq_csq_done(hr_dev)) {
- complete = true;
- handle = 0;
- while (handle < num) {
- /* get the result of hardware write back */
- desc_to_use = &csq->desc[ntc];
- desc[handle] = *desc_to_use;
- dev_dbg(hr_dev->dev, "Get cmq desc:\n");
- desc_ret = le16_to_cpu(desc[handle].retval);
- if (desc_ret == CMD_EXEC_SUCCESS)
- ret = 0;
- else
- ret = -EIO;
- priv->cmq.last_status = desc_ret;
- ntc++;
- handle++;
- if (ntc == csq->desc_num)
- ntc = 0;
+ for (ret = 0, i = 0; i < num; i++) {
+ /* check the result of hardware write back */
+ desc[i] = csq->desc[tail++];
+ if (tail == csq->desc_num)
+ tail = 0;
+
+ desc_ret = le16_to_cpu(desc[i].retval);
+ if (likely(desc_ret == CMD_EXEC_SUCCESS))
+ continue;
+
+ dev_err_ratelimited(hr_dev->dev,
+ "Cmdq IO error, opcode = %x, return = %x\n",
+ desc->opcode, desc_ret);
+ ret = -EIO;
}
- }
+ } else {
+ /* FW/HW reset or incorrect number of desc */
+ tail = roce_read(hr_dev, ROCEE_TX_CMQ_TAIL_REG);
+ dev_warn(hr_dev->dev, "CMDQ move tail from %d to %d\n",
+ csq->head, tail);
+ csq->head = tail;
- if (!complete)
ret = -EAGAIN;
-
- /* clean the command send queue */
- handle = hns_roce_cmq_csq_clean(hr_dev);
- if (handle != num)
- dev_warn(hr_dev->dev, "Cleaned %d, need to clean %d\n",
- handle, num);
+ }
spin_unlock_bh(&csq->lock);
@@ -1530,7 +1553,8 @@ static int hns_roce_config_global_param(struct hns_roce_dev *hr_dev)
CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_S, 0x3e8);
roce_set_field(req->time_cfg_udp_port,
CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_M,
- CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_S, 0x12b7);
+ CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_S,
+ ROCE_V2_UDP_DPORT);
return hns_roce_cmq_send(hr_dev, &desc, 1);
}
@@ -1541,17 +1565,13 @@ static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev)
struct hns_roce_pf_res_a *req_a;
struct hns_roce_pf_res_b *req_b;
int ret;
- int i;
- for (i = 0; i < 2; i++) {
- hns_roce_cmq_setup_basic_desc(&desc[i],
- HNS_ROCE_OPC_QUERY_PF_RES, true);
+ hns_roce_cmq_setup_basic_desc(&desc[0], HNS_ROCE_OPC_QUERY_PF_RES,
+ true);
+ desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
- if (i == 0)
- desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
- else
- desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
- }
+ hns_roce_cmq_setup_basic_desc(&desc[1], HNS_ROCE_OPC_QUERY_PF_RES,
+ true);
ret = hns_roce_cmq_send(hr_dev, desc, 2);
if (ret)
@@ -1644,19 +1664,16 @@ static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev)
struct hns_roce_cmq_desc desc[2];
struct hns_roce_vf_res_a *req_a;
struct hns_roce_vf_res_b *req_b;
- int i;
req_a = (struct hns_roce_vf_res_a *)desc[0].data;
req_b = (struct hns_roce_vf_res_b *)desc[1].data;
- for (i = 0; i < 2; i++) {
- hns_roce_cmq_setup_basic_desc(&desc[i],
- HNS_ROCE_OPC_ALLOC_VF_RES, false);
- if (i == 0)
- desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
- else
- desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
- }
+ hns_roce_cmq_setup_basic_desc(&desc[0], HNS_ROCE_OPC_ALLOC_VF_RES,
+ false);
+ desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
+
+ hns_roce_cmq_setup_basic_desc(&desc[1], HNS_ROCE_OPC_ALLOC_VF_RES,
+ false);
roce_set_field(req_a->vf_qpc_bt_idx_num,
VF_RES_A_DATA_1_VF_QPC_BT_IDX_M,
@@ -1866,7 +1883,6 @@ static void set_default_caps(struct hns_roce_dev *hr_dev)
caps->flags = HNS_ROCE_CAP_FLAG_REREG_MR |
HNS_ROCE_CAP_FLAG_ROCE_V1_V2 |
- HNS_ROCE_CAP_FLAG_RQ_INLINE |
HNS_ROCE_CAP_FLAG_RECORD_DB |
HNS_ROCE_CAP_FLAG_SQ_RECORD_DB;
@@ -1999,10 +2015,12 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
caps->max_sq_sg = le16_to_cpu(resp_a->max_sq_sg);
caps->max_sq_inline = le16_to_cpu(resp_a->max_sq_inline);
caps->max_rq_sg = le16_to_cpu(resp_a->max_rq_sg);
+ caps->max_rq_sg = roundup_pow_of_two(caps->max_rq_sg);
caps->max_extend_sg = le32_to_cpu(resp_a->max_extend_sg);
caps->num_qpc_timer = le16_to_cpu(resp_a->num_qpc_timer);
caps->num_cqc_timer = le16_to_cpu(resp_a->num_cqc_timer);
caps->max_srq_sges = le16_to_cpu(resp_a->max_srq_sges);
+ caps->max_srq_sges = roundup_pow_of_two(caps->max_srq_sges);
caps->num_aeq_vectors = resp_a->num_aeq_vectors;
caps->num_other_vectors = resp_a->num_other_vectors;
caps->max_sq_desc_sz = resp_a->max_sq_desc_sz;
@@ -2336,7 +2354,6 @@ static int hns_roce_config_link_table(struct hns_roce_dev *hr_dev,
struct hns_roce_link_table_entry *entry;
enum hns_roce_opcode_type opcode;
u32 page_num;
- int i;
switch (type) {
case TSQ_LINK_TABLE:
@@ -2354,14 +2371,10 @@ static int hns_roce_config_link_table(struct hns_roce_dev *hr_dev,
page_num = link_tbl->npages;
entry = link_tbl->table.buf;
- for (i = 0; i < 2; i++) {
- hns_roce_cmq_setup_basic_desc(&desc[i], opcode, false);
+ hns_roce_cmq_setup_basic_desc(&desc[0], opcode, false);
+ desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
- if (i == 0)
- desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
- else
- desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
- }
+ hns_roce_cmq_setup_basic_desc(&desc[1], opcode, false);
req_a->base_addr_l = cpu_to_le32(link_tbl->table.map & 0xffffffff);
req_a->base_addr_h = cpu_to_le32(link_tbl->table.map >> 32);
@@ -2880,36 +2893,20 @@ static int hns_roce_v2_write_mtpt(struct hns_roce_dev *hr_dev,
mpt_entry = mb_buf;
memset(mpt_entry, 0, sizeof(*mpt_entry));
- roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
- V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_VALID);
- roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M,
- V2_MPT_BYTE_4_PBL_HOP_NUM_S, mr->pbl_hop_num ==
- HNS_ROCE_HOP_NUM_0 ? 0 : mr->pbl_hop_num);
- roce_set_field(mpt_entry->byte_4_pd_hop_st,
- V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
- V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
- to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.ba_pg_shift));
- roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
- V2_MPT_BYTE_4_PD_S, mr->pd);
-
- roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 0);
- roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 0);
- roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
- roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_BIND_EN_S,
- (mr->access & IB_ACCESS_MW_BIND ? 1 : 0));
- roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_ATOMIC_EN_S,
- mr->access & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
- roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S,
- (mr->access & IB_ACCESS_REMOTE_READ ? 1 : 0));
- roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S,
- (mr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0));
- roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
- (mr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0));
-
- roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S,
- mr->type == MR_TYPE_MR ? 0 : 1);
- roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_INNER_PA_VLD_S,
- 1);
+ hr_reg_write(mpt_entry, MPT_ST, V2_MPT_ST_VALID);
+ hr_reg_write(mpt_entry, MPT_PD, mr->pd);
+ hr_reg_enable(mpt_entry, MPT_L_INV_EN);
+
+ hr_reg_write(mpt_entry, MPT_BIND_EN,
+ !!(mr->access & IB_ACCESS_MW_BIND));
+ hr_reg_write(mpt_entry, MPT_ATOMIC_EN,
+ !!(mr->access & IB_ACCESS_REMOTE_ATOMIC));
+ hr_reg_write(mpt_entry, MPT_RR_EN,
+ !!(mr->access & IB_ACCESS_REMOTE_READ));
+ hr_reg_write(mpt_entry, MPT_RW_EN,
+ !!(mr->access & IB_ACCESS_REMOTE_WRITE));
+ hr_reg_write(mpt_entry, MPT_LW_EN,
+ !!((mr->access & IB_ACCESS_LOCAL_WRITE)));
mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size));
mpt_entry->len_h = cpu_to_le32(upper_32_bits(mr->size));
@@ -2917,9 +2914,19 @@ static int hns_roce_v2_write_mtpt(struct hns_roce_dev *hr_dev,
mpt_entry->va_l = cpu_to_le32(lower_32_bits(mr->iova));
mpt_entry->va_h = cpu_to_le32(upper_32_bits(mr->iova));
+ if (mr->type != MR_TYPE_MR)
+ hr_reg_enable(mpt_entry, MPT_PA);
+
if (mr->type == MR_TYPE_DMA)
return 0;
+ if (mr->pbl_hop_num != HNS_ROCE_HOP_NUM_0)
+ hr_reg_write(mpt_entry, MPT_PBL_HOP_NUM, mr->pbl_hop_num);
+
+ hr_reg_write(mpt_entry, MPT_PBL_BA_PG_SZ,
+ to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.ba_pg_shift));
+ hr_reg_enable(mpt_entry, MPT_INNER_PA_VLD);
+
ret = set_mtpt_pbl(hr_dev, mpt_entry, mr);
return ret;
@@ -2927,20 +2934,17 @@ static int hns_roce_v2_write_mtpt(struct hns_roce_dev *hr_dev,
static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
struct hns_roce_mr *mr, int flags,
- u32 pdn, int mr_access_flags, u64 iova,
- u64 size, void *mb_buf)
+ void *mb_buf)
{
struct hns_roce_v2_mpt_entry *mpt_entry = mb_buf;
+ u32 mr_access_flags = mr->access;
int ret = 0;
roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_VALID);
- if (flags & IB_MR_REREG_PD) {
- roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
- V2_MPT_BYTE_4_PD_S, pdn);
- mr->pd = pdn;
- }
+ roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
+ V2_MPT_BYTE_4_PD_S, mr->pd);
if (flags & IB_MR_REREG_ACCESS) {
roce_set_bit(mpt_entry->byte_8_mw_cnt_en,
@@ -2958,13 +2962,10 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
}
if (flags & IB_MR_REREG_TRANS) {
- mpt_entry->va_l = cpu_to_le32(lower_32_bits(iova));
- mpt_entry->va_h = cpu_to_le32(upper_32_bits(iova));
- mpt_entry->len_l = cpu_to_le32(lower_32_bits(size));
- mpt_entry->len_h = cpu_to_le32(upper_32_bits(size));
-
- mr->iova = iova;
- mr->size = size;
+ mpt_entry->va_l = cpu_to_le32(lower_32_bits(mr->iova));
+ mpt_entry->va_h = cpu_to_le32(upper_32_bits(mr->iova));
+ mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size));
+ mpt_entry->len_h = cpu_to_le32(upper_32_bits(mr->size));
ret = set_mtpt_pbl(hr_dev, mpt_entry, mr);
}
@@ -3126,11 +3127,6 @@ static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
if (nfreed) {
hr_cq->cons_index += nfreed;
- /*
- * Make sure update of buffer contents is done before
- * updating consumer index.
- */
- wmb();
hns_roce_v2_cq_set_ci(hr_cq, hr_cq->cons_index);
}
}
@@ -3639,11 +3635,8 @@ static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries,
break;
}
- if (npolled) {
- /* Memory barrier */
- wmb();
+ if (npolled)
hns_roce_v2_cq_set_ci(hr_cq, hr_cq->cons_index);
- }
out:
spin_unlock_irqrestore(&hr_cq->lock, flags);
@@ -4235,7 +4228,6 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
struct hns_roce_v2_qp_context *context,
struct hns_roce_v2_qp_context *qpc_mask)
{
- const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
struct ib_device *ibdev = &hr_dev->ib_dev;
@@ -4243,7 +4235,6 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
dma_addr_t irrl_ba;
enum ib_mtu mtu;
u8 lp_pktn_ini;
- u8 port_num;
u64 *mtts;
u8 *dmac;
u8 *smac;
@@ -4324,15 +4315,6 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, 0);
}
- /* Configure GID index */
- port_num = rdma_ah_get_port_num(&attr->ah_attr);
- roce_set_field(context->byte_20_smac_sgid_idx,
- V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S,
- hns_get_gid_index(hr_dev, port_num - 1,
- grh->sgid_index));
- roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
- V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S, 0);
-
memcpy(&(context->dmac), dmac, sizeof(u32));
roce_set_field(context->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
V2_QPC_BYTE_52_DMAC_S, *((u16 *)(&dmac[4])));
@@ -5083,7 +5065,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
done:
qp_attr->cur_qp_state = qp_attr->qp_state;
qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
- qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
+ qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge;
if (!ibqp->uobject) {
qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
@@ -5174,6 +5156,9 @@ static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev,
struct hns_roce_cmq_desc desc;
int ret, i;
+ if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
+ return 0;
+
mutex_lock(&hr_dev->qp_table.scc_mutex);
/* set scc ctx clear done flag */
@@ -5220,98 +5205,96 @@ out:
return ret;
}
-static void hns_roce_v2_write_srqc(struct hns_roce_dev *hr_dev,
- struct hns_roce_srq *srq, u32 pdn, u16 xrcd,
- u32 cqn, void *mb_buf, u64 *mtts_wqe,
- u64 *mtts_idx, dma_addr_t dma_handle_wqe,
- dma_addr_t dma_handle_idx)
+#define DMA_IDX_SHIFT 3
+#define DMA_WQE_SHIFT 3
+
+static int hns_roce_v2_write_srqc_index_queue(struct hns_roce_srq *srq,
+ struct hns_roce_srq_context *ctx)
{
- struct hns_roce_srq_context *srq_context;
+ struct hns_roce_idx_que *idx_que = &srq->idx_que;
+ struct ib_device *ibdev = srq->ibsrq.device;
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibdev);
+ u64 mtts_idx[MTT_MIN_COUNT] = {};
+ dma_addr_t dma_handle_idx = 0;
+ int ret;
+
+ /* Get physical address of idx que buf */
+ ret = hns_roce_mtr_find(hr_dev, &idx_que->mtr, 0, mtts_idx,
+ ARRAY_SIZE(mtts_idx), &dma_handle_idx);
+ if (ret < 1) {
+ ibdev_err(ibdev, "failed to find mtr for SRQ idx, ret = %d.\n",
+ ret);
+ return -ENOBUFS;
+ }
+
+ hr_reg_write(ctx, SRQC_IDX_HOP_NUM,
+ to_hr_hem_hopnum(hr_dev->caps.idx_hop_num, srq->wqe_cnt));
+
+ hr_reg_write(ctx, SRQC_IDX_BT_BA_L, dma_handle_idx >> DMA_IDX_SHIFT);
+ hr_reg_write(ctx, SRQC_IDX_BT_BA_H,
+ upper_32_bits(dma_handle_idx >> DMA_IDX_SHIFT));
+
+ hr_reg_write(ctx, SRQC_IDX_BA_PG_SZ,
+ to_hr_hw_page_shift(idx_que->mtr.hem_cfg.ba_pg_shift));
+ hr_reg_write(ctx, SRQC_IDX_BUF_PG_SZ,
+ to_hr_hw_page_shift(idx_que->mtr.hem_cfg.buf_pg_shift));
+
+ hr_reg_write(ctx, SRQC_IDX_CUR_BLK_ADDR_L,
+ to_hr_hw_page_addr(mtts_idx[0]));
+ hr_reg_write(ctx, SRQC_IDX_CUR_BLK_ADDR_H,
+ upper_32_bits(to_hr_hw_page_addr(mtts_idx[0])));
+
+ hr_reg_write(ctx, SRQC_IDX_NXT_BLK_ADDR_L,
+ to_hr_hw_page_addr(mtts_idx[1]));
+ hr_reg_write(ctx, SRQC_IDX_NXT_BLK_ADDR_H,
+ upper_32_bits(to_hr_hw_page_addr(mtts_idx[1])));
+
+ return 0;
+}
- srq_context = mb_buf;
- memset(srq_context, 0, sizeof(*srq_context));
-
- roce_set_field(srq_context->byte_4_srqn_srqst, SRQC_BYTE_4_SRQ_ST_M,
- SRQC_BYTE_4_SRQ_ST_S, 1);
-
- roce_set_field(srq_context->byte_4_srqn_srqst,
- SRQC_BYTE_4_SRQ_WQE_HOP_NUM_M,
- SRQC_BYTE_4_SRQ_WQE_HOP_NUM_S,
- to_hr_hem_hopnum(hr_dev->caps.srqwqe_hop_num,
- srq->wqe_cnt));
- roce_set_field(srq_context->byte_4_srqn_srqst,
- SRQC_BYTE_4_SRQ_SHIFT_M, SRQC_BYTE_4_SRQ_SHIFT_S,
- ilog2(srq->wqe_cnt));
-
- roce_set_field(srq_context->byte_4_srqn_srqst, SRQC_BYTE_4_SRQN_M,
- SRQC_BYTE_4_SRQN_S, srq->srqn);
-
- roce_set_field(srq_context->byte_8_limit_wl, SRQC_BYTE_8_SRQ_LIMIT_WL_M,
- SRQC_BYTE_8_SRQ_LIMIT_WL_S, 0);
-
- roce_set_field(srq_context->byte_12_xrcd, SRQC_BYTE_12_SRQ_XRCD_M,
- SRQC_BYTE_12_SRQ_XRCD_S, xrcd);
-
- srq_context->wqe_bt_ba = cpu_to_le32((u32)(dma_handle_wqe >> 3));
-
- roce_set_field(srq_context->byte_24_wqe_bt_ba,
- SRQC_BYTE_24_SRQ_WQE_BT_BA_M,
- SRQC_BYTE_24_SRQ_WQE_BT_BA_S,
- dma_handle_wqe >> 35);
-
- roce_set_field(srq_context->byte_28_rqws_pd, SRQC_BYTE_28_PD_M,
- SRQC_BYTE_28_PD_S, pdn);
- roce_set_field(srq_context->byte_28_rqws_pd, SRQC_BYTE_28_RQWS_M,
- SRQC_BYTE_28_RQWS_S, srq->max_gs <= 0 ? 0 :
- fls(srq->max_gs - 1));
-
- srq_context->idx_bt_ba = cpu_to_le32(dma_handle_idx >> 3);
- roce_set_field(srq_context->rsv_idx_bt_ba,
- SRQC_BYTE_36_SRQ_IDX_BT_BA_M,
- SRQC_BYTE_36_SRQ_IDX_BT_BA_S,
- dma_handle_idx >> 35);
-
- srq_context->idx_cur_blk_addr =
- cpu_to_le32(to_hr_hw_page_addr(mtts_idx[0]));
- roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
- SRQC_BYTE_44_SRQ_IDX_CUR_BLK_ADDR_M,
- SRQC_BYTE_44_SRQ_IDX_CUR_BLK_ADDR_S,
- upper_32_bits(to_hr_hw_page_addr(mtts_idx[0])));
- roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
- SRQC_BYTE_44_SRQ_IDX_HOP_NUM_M,
- SRQC_BYTE_44_SRQ_IDX_HOP_NUM_S,
- to_hr_hem_hopnum(hr_dev->caps.idx_hop_num,
- srq->wqe_cnt));
-
- roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
- SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_M,
- SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_S,
- to_hr_hw_page_shift(srq->idx_que.mtr.hem_cfg.ba_pg_shift));
- roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
- SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_M,
- SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_S,
- to_hr_hw_page_shift(srq->idx_que.mtr.hem_cfg.buf_pg_shift));
-
- srq_context->idx_nxt_blk_addr =
- cpu_to_le32(to_hr_hw_page_addr(mtts_idx[1]));
- roce_set_field(srq_context->rsv_idxnxtblkaddr,
- SRQC_BYTE_52_SRQ_IDX_NXT_BLK_ADDR_M,
- SRQC_BYTE_52_SRQ_IDX_NXT_BLK_ADDR_S,
- upper_32_bits(to_hr_hw_page_addr(mtts_idx[1])));
- roce_set_field(srq_context->byte_56_xrc_cqn,
- SRQC_BYTE_56_SRQ_XRC_CQN_M, SRQC_BYTE_56_SRQ_XRC_CQN_S,
- cqn);
- roce_set_field(srq_context->byte_56_xrc_cqn,
- SRQC_BYTE_56_SRQ_WQE_BA_PG_SZ_M,
- SRQC_BYTE_56_SRQ_WQE_BA_PG_SZ_S,
- to_hr_hw_page_shift(srq->buf_mtr.hem_cfg.ba_pg_shift));
- roce_set_field(srq_context->byte_56_xrc_cqn,
- SRQC_BYTE_56_SRQ_WQE_BUF_PG_SZ_M,
- SRQC_BYTE_56_SRQ_WQE_BUF_PG_SZ_S,
- to_hr_hw_page_shift(srq->buf_mtr.hem_cfg.buf_pg_shift));
-
- roce_set_bit(srq_context->db_record_addr_record_en,
- SRQC_BYTE_60_SRQ_RECORD_EN_S, 0);
+static int hns_roce_v2_write_srqc(struct hns_roce_srq *srq, void *mb_buf)
+{
+ struct ib_device *ibdev = srq->ibsrq.device;
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibdev);
+ struct hns_roce_srq_context *ctx = mb_buf;
+ u64 mtts_wqe[MTT_MIN_COUNT] = {};
+ dma_addr_t dma_handle_wqe = 0;
+ int ret;
+
+ memset(ctx, 0, sizeof(*ctx));
+
+ /* Get the physical address of srq buf */
+ ret = hns_roce_mtr_find(hr_dev, &srq->buf_mtr, 0, mtts_wqe,
+ ARRAY_SIZE(mtts_wqe), &dma_handle_wqe);
+ if (ret < 1) {
+ ibdev_err(ibdev, "failed to find mtr for SRQ WQE, ret = %d.\n",
+ ret);
+ return -ENOBUFS;
+ }
+
+ hr_reg_write(ctx, SRQC_SRQ_ST, 1);
+ hr_reg_write(ctx, SRQC_PD, to_hr_pd(srq->ibsrq.pd)->pdn);
+ hr_reg_write(ctx, SRQC_SRQN, srq->srqn);
+ hr_reg_write(ctx, SRQC_XRCD, 0);
+ hr_reg_write(ctx, SRQC_XRC_CQN, srq->cqn);
+ hr_reg_write(ctx, SRQC_SHIFT, ilog2(srq->wqe_cnt));
+ hr_reg_write(ctx, SRQC_RQWS,
+ srq->max_gs <= 0 ? 0 : fls(srq->max_gs - 1));
+
+ hr_reg_write(ctx, SRQC_WQE_HOP_NUM,
+ to_hr_hem_hopnum(hr_dev->caps.srqwqe_hop_num,
+ srq->wqe_cnt));
+
+ hr_reg_write(ctx, SRQC_WQE_BT_BA_L, dma_handle_wqe >> DMA_WQE_SHIFT);
+ hr_reg_write(ctx, SRQC_WQE_BT_BA_H,
+ upper_32_bits(dma_handle_wqe >> DMA_WQE_SHIFT));
+
+ hr_reg_write(ctx, SRQC_WQE_BA_PG_SZ,
+ to_hr_hw_page_shift(srq->buf_mtr.hem_cfg.ba_pg_shift));
+ hr_reg_write(ctx, SRQC_WQE_BUF_PG_SZ,
+ to_hr_hw_page_shift(srq->buf_mtr.hem_cfg.buf_pg_shift));
+
+ return hns_roce_v2_write_srqc_index_queue(srq, ctx);
}
static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq,
@@ -5331,7 +5314,7 @@ static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq,
return -EINVAL;
if (srq_attr_mask & IB_SRQ_LIMIT) {
- if (srq_attr->srq_limit >= srq->wqe_cnt)
+ if (srq_attr->srq_limit > srq->wqe_cnt)
return -EINVAL;
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
@@ -5394,8 +5377,8 @@ static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
SRQC_BYTE_8_SRQ_LIMIT_WL_S);
attr->srq_limit = limit_wl;
- attr->max_wr = srq->wqe_cnt - 1;
- attr->max_sge = srq->max_gs;
+ attr->max_wr = srq->wqe_cnt;
+ attr->max_sge = srq->max_gs - srq->rsv_sge;
out:
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
@@ -5626,9 +5609,6 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
++eq->cons_index;
aeqe_found = 1;
- if (eq->cons_index > (2 * eq->entries - 1))
- eq->cons_index = 0;
-
hns_roce_v2_init_irq_work(hr_dev, eq, queue_num);
aeqe = next_aeqe_sw_v2(eq);
@@ -5671,9 +5651,6 @@ static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev,
++eq->cons_index;
ceqe_found = 1;
- if (eq->cons_index > (EQ_DEPTH_COEFF * eq->entries - 1))
- eq->cons_index = 0;
-
ceqe = next_ceqe_sw_v2(eq);
}
@@ -5948,7 +5925,6 @@ static int alloc_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
buf_attr.region[0].size = eq->entries * eq->eqe_size;
buf_attr.region[0].hopnum = eq->hop_num;
buf_attr.region_count = 1;
- buf_attr.fixed_page = true;
err = hns_roce_mtr_create(hr_dev, &eq->mtr, &buf_attr,
hr_dev->caps.eqe_ba_pg_sz +
@@ -6286,6 +6262,7 @@ static void hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
/* Get info from NIC driver. */
hr_dev->reg_base = handle->rinfo.roce_io_base;
+ hr_dev->mem_base = handle->rinfo.roce_mem_base;
hr_dev->caps.num_ports = 1;
hr_dev->iboe.netdevs[0] = handle->rinfo.netdev;
hr_dev->iboe.phy_port[0] = 0;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index bdaccf86460d..39621fb6ec16 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -96,7 +96,8 @@
#define HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ PAGE_SIZE
#define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFFF000
#define HNS_ROCE_V2_MAX_INNER_MTPT_NUM 2
-#define HNS_ROCE_INVALID_LKEY 0x100
+#define HNS_ROCE_INVALID_LKEY 0x0
+#define HNS_ROCE_INVALID_SGE_LENGTH 0x80000000
#define HNS_ROCE_CMQ_TX_TIMEOUT 30000
#define HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE 2
#define HNS_ROCE_V2_RSV_QPS 8
@@ -366,24 +367,61 @@ struct hns_roce_v2_cq_context {
#define CQC_STASH CQC_FIELD_LOC(63, 63)
struct hns_roce_srq_context {
- __le32 byte_4_srqn_srqst;
- __le32 byte_8_limit_wl;
- __le32 byte_12_xrcd;
- __le32 byte_16_pi_ci;
- __le32 wqe_bt_ba;
- __le32 byte_24_wqe_bt_ba;
- __le32 byte_28_rqws_pd;
- __le32 idx_bt_ba;
- __le32 rsv_idx_bt_ba;
- __le32 idx_cur_blk_addr;
- __le32 byte_44_idxbufpgsz_addr;
- __le32 idx_nxt_blk_addr;
- __le32 rsv_idxnxtblkaddr;
- __le32 byte_56_xrc_cqn;
- __le32 db_record_addr_record_en;
- __le32 db_record_addr;
+ __le32 byte_4_srqn_srqst;
+ __le32 byte_8_limit_wl;
+ __le32 byte_12_xrcd;
+ __le32 byte_16_pi_ci;
+ __le32 wqe_bt_ba;
+ __le32 byte_24_wqe_bt_ba;
+ __le32 byte_28_rqws_pd;
+ __le32 idx_bt_ba;
+ __le32 rsv_idx_bt_ba;
+ __le32 idx_cur_blk_addr;
+ __le32 byte_44_idxbufpgsz_addr;
+ __le32 idx_nxt_blk_addr;
+ __le32 rsv_idxnxtblkaddr;
+ __le32 byte_56_xrc_cqn;
+ __le32 db_record_addr_record_en;
+ __le32 db_record_addr;
};
+#define SRQC_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_srq_context, h, l)
+
+#define SRQC_SRQ_ST SRQC_FIELD_LOC(1, 0)
+#define SRQC_WQE_HOP_NUM SRQC_FIELD_LOC(3, 2)
+#define SRQC_SHIFT SRQC_FIELD_LOC(7, 4)
+#define SRQC_SRQN SRQC_FIELD_LOC(31, 8)
+#define SRQC_LIMIT_WL SRQC_FIELD_LOC(47, 32)
+#define SRQC_RSV0 SRQC_FIELD_LOC(63, 48)
+#define SRQC_XRCD SRQC_FIELD_LOC(87, 64)
+#define SRQC_RSV1 SRQC_FIELD_LOC(95, 88)
+#define SRQC_PRODUCER_IDX SRQC_FIELD_LOC(111, 96)
+#define SRQC_CONSUMER_IDX SRQC_FIELD_LOC(127, 112)
+#define SRQC_WQE_BT_BA_L SRQC_FIELD_LOC(159, 128)
+#define SRQC_WQE_BT_BA_H SRQC_FIELD_LOC(188, 160)
+#define SRQC_RSV2 SRQC_FIELD_LOC(191, 189)
+#define SRQC_PD SRQC_FIELD_LOC(215, 192)
+#define SRQC_RQWS SRQC_FIELD_LOC(219, 216)
+#define SRQC_RSV3 SRQC_FIELD_LOC(223, 220)
+#define SRQC_IDX_BT_BA_L SRQC_FIELD_LOC(255, 224)
+#define SRQC_IDX_BT_BA_H SRQC_FIELD_LOC(284, 256)
+#define SRQC_RSV4 SRQC_FIELD_LOC(287, 285)
+#define SRQC_IDX_CUR_BLK_ADDR_L SRQC_FIELD_LOC(319, 288)
+#define SRQC_IDX_CUR_BLK_ADDR_H SRQC_FIELD_LOC(339, 320)
+#define SRQC_RSV5 SRQC_FIELD_LOC(341, 340)
+#define SRQC_IDX_HOP_NUM SRQC_FIELD_LOC(343, 342)
+#define SRQC_IDX_BA_PG_SZ SRQC_FIELD_LOC(347, 344)
+#define SRQC_IDX_BUF_PG_SZ SRQC_FIELD_LOC(351, 348)
+#define SRQC_IDX_NXT_BLK_ADDR_L SRQC_FIELD_LOC(383, 352)
+#define SRQC_IDX_NXT_BLK_ADDR_H SRQC_FIELD_LOC(403, 384)
+#define SRQC_RSV6 SRQC_FIELD_LOC(415, 404)
+#define SRQC_XRC_CQN SRQC_FIELD_LOC(439, 416)
+#define SRQC_WQE_BA_PG_SZ SRQC_FIELD_LOC(443, 440)
+#define SRQC_WQE_BUF_PG_SZ SRQC_FIELD_LOC(447, 444)
+#define SRQC_DB_RECORD_EN SRQC_FIELD_LOC(448, 448)
+#define SRQC_DB_RECORD_ADDR_L SRQC_FIELD_LOC(479, 449)
+#define SRQC_DB_RECORD_ADDR_H SRQC_FIELD_LOC(511, 480)
+
#define SRQC_BYTE_4_SRQ_ST_S 0
#define SRQC_BYTE_4_SRQ_ST_M GENMASK(1, 0)
@@ -993,6 +1031,45 @@ struct hns_roce_v2_mpt_entry {
__le32 byte_64_buf_pa1;
};
+#define MPT_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_v2_mpt_entry, h, l)
+
+#define MPT_ST MPT_FIELD_LOC(1, 0)
+#define MPT_PBL_HOP_NUM MPT_FIELD_LOC(3, 2)
+#define MPT_PBL_BA_PG_SZ MPT_FIELD_LOC(7, 4)
+#define MPT_PD MPT_FIELD_LOC(31, 8)
+#define MPT_RA_EN MPT_FIELD_LOC(32, 32)
+#define MPT_R_INV_EN MPT_FIELD_LOC(33, 33)
+#define MPT_L_INV_EN MPT_FIELD_LOC(34, 34)
+#define MPT_BIND_EN MPT_FIELD_LOC(35, 35)
+#define MPT_ATOMIC_EN MPT_FIELD_LOC(36, 36)
+#define MPT_RR_EN MPT_FIELD_LOC(37, 37)
+#define MPT_RW_EN MPT_FIELD_LOC(38, 38)
+#define MPT_LW_EN MPT_FIELD_LOC(39, 39)
+#define MPT_MW_CNT MPT_FIELD_LOC(63, 40)
+#define MPT_FRE MPT_FIELD_LOC(64, 64)
+#define MPT_PA MPT_FIELD_LOC(65, 65)
+#define MPT_ZBVA MPT_FIELD_LOC(66, 66)
+#define MPT_SHARE MPT_FIELD_LOC(67, 67)
+#define MPT_MR_MW MPT_FIELD_LOC(68, 68)
+#define MPT_BPD MPT_FIELD_LOC(69, 69)
+#define MPT_BQP MPT_FIELD_LOC(70, 70)
+#define MPT_INNER_PA_VLD MPT_FIELD_LOC(71, 71)
+#define MPT_MW_BIND_QPN MPT_FIELD_LOC(95, 72)
+#define MPT_BOUND_LKEY MPT_FIELD_LOC(127, 96)
+#define MPT_LEN MPT_FIELD_LOC(191, 128)
+#define MPT_LKEY MPT_FIELD_LOC(223, 192)
+#define MPT_VA MPT_FIELD_LOC(287, 224)
+#define MPT_PBL_SIZE MPT_FIELD_LOC(319, 288)
+#define MPT_PBL_BA MPT_FIELD_LOC(380, 320)
+#define MPT_BLK_MODE MPT_FIELD_LOC(381, 381)
+#define MPT_RSV0 MPT_FIELD_LOC(383, 382)
+#define MPT_PA0 MPT_FIELD_LOC(441, 384)
+#define MPT_BOUND_VA MPT_FIELD_LOC(447, 442)
+#define MPT_PA1 MPT_FIELD_LOC(505, 448)
+#define MPT_PERSIST_EN MPT_FIELD_LOC(506, 506)
+#define MPT_RSV2 MPT_FIELD_LOC(507, 507)
+#define MPT_PBL_BUF_PG_SZ MPT_FIELD_LOC(511, 508)
+
#define V2_MPT_BYTE_4_MPT_ST_S 0
#define V2_MPT_BYTE_4_MPT_ST_M GENMASK(1, 0)
@@ -1059,6 +1136,8 @@ struct hns_roce_v2_mpt_entry {
#define V2_DB_BYTE_4_CMD_S 24
#define V2_DB_BYTE_4_CMD_M GENMASK(27, 24)
+#define V2_DB_FLAG_S 31
+
#define V2_DB_PARAMETER_IDX_S 0
#define V2_DB_PARAMETER_IDX_M GENMASK(15, 0)
@@ -1155,6 +1234,15 @@ struct hns_roce_v2_rc_send_wqe {
#define V2_RC_SEND_WQE_BYTE_4_OPCODE_S 0
#define V2_RC_SEND_WQE_BYTE_4_OPCODE_M GENMASK(4, 0)
+#define V2_RC_SEND_WQE_BYTE_4_DB_SL_L_S 5
+#define V2_RC_SEND_WQE_BYTE_4_DB_SL_L_M GENMASK(6, 5)
+
+#define V2_RC_SEND_WQE_BYTE_4_DB_SL_H_S 13
+#define V2_RC_SEND_WQE_BYTE_4_DB_SL_H_M GENMASK(14, 13)
+
+#define V2_RC_SEND_WQE_BYTE_4_WQE_INDEX_S 15
+#define V2_RC_SEND_WQE_BYTE_4_WQE_INDEX_M GENMASK(30, 15)
+
#define V2_RC_SEND_WQE_BYTE_4_OWNER_S 7
#define V2_RC_SEND_WQE_BYTE_4_CQE_S 8
@@ -1167,15 +1255,17 @@ struct hns_roce_v2_rc_send_wqe {
#define V2_RC_SEND_WQE_BYTE_4_INLINE_S 12
-#define V2_RC_FRMR_WQE_BYTE_4_BIND_EN_S 19
+#define V2_RC_FRMR_WQE_BYTE_40_BIND_EN_S 10
+
+#define V2_RC_FRMR_WQE_BYTE_40_ATOMIC_S 11
-#define V2_RC_FRMR_WQE_BYTE_4_ATOMIC_S 20
+#define V2_RC_FRMR_WQE_BYTE_40_RR_S 12
-#define V2_RC_FRMR_WQE_BYTE_4_RR_S 21
+#define V2_RC_FRMR_WQE_BYTE_40_RW_S 13
-#define V2_RC_FRMR_WQE_BYTE_4_RW_S 22
+#define V2_RC_FRMR_WQE_BYTE_40_LW_S 14
-#define V2_RC_FRMR_WQE_BYTE_4_LW_S 23
+#define V2_RC_SEND_WQE_BYTE_4_FLAG_S 31
#define V2_RC_SEND_WQE_BYTE_16_XRC_SRQN_S 0
#define V2_RC_SEND_WQE_BYTE_16_XRC_SRQN_M GENMASK(23, 0)
@@ -1190,7 +1280,7 @@ struct hns_roce_v2_rc_send_wqe {
struct hns_roce_wqe_frmr_seg {
__le32 pbl_size;
- __le32 mode_buf_pg_sz;
+ __le32 byte_40;
};
#define V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_S 4
@@ -1786,12 +1876,8 @@ struct hns_roce_v2_cmq_ring {
dma_addr_t desc_dma_addr;
struct hns_roce_cmq_desc *desc;
u32 head;
- u32 tail;
-
u16 buf_size;
u16 desc_num;
- int next_to_use;
- int next_to_clean;
u8 flag;
spinlock_t lock; /* command queue lock */
};
@@ -1800,7 +1886,6 @@ struct hns_roce_v2_cmq {
struct hns_roce_v2_cmq_ring csq;
struct hns_roce_v2_cmq_ring crq;
u16 tx_timeout;
- u16 last_status;
};
enum hns_roce_link_table_type {
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index d9179bae4989..c9c0836394a2 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -42,22 +42,6 @@
#include "hns_roce_device.h"
#include "hns_roce_hem.h"
-/**
- * hns_get_gid_index - Get gid index.
- * @hr_dev: pointer to structure hns_roce_dev.
- * @port: port, value range: 0 ~ MAX
- * @gid_index: gid_index, value range: 0 ~ MAX
- * Description:
- * N ports shared gids, allocation method as follow:
- * GID[0][0], GID[1][0],.....GID[N - 1][0],
- * GID[0][0], GID[1][0],.....GID[N - 1][0],
- * And so on
- */
-u8 hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index)
-{
- return gid_index * hr_dev->caps.num_ports + port;
-}
-
static int hns_roce_set_mac(struct hns_roce_dev *hr_dev, u8 port, u8 *addr)
{
u8 phy_port;
@@ -217,7 +201,8 @@ static int hns_roce_query_device(struct ib_device *ib_dev,
props->max_srq_sge = hr_dev->caps.max_srq_sges;
}
- if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_FRMR) {
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_FRMR &&
+ hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
props->max_fast_reg_page_list_len = HNS_ROCE_FRMR_MAX_PA;
}
@@ -748,11 +733,7 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
goto err_pd_table_free;
}
- ret = hns_roce_init_cq_table(hr_dev);
- if (ret) {
- dev_err(dev, "Failed to init completion queue table.\n");
- goto err_mr_table_free;
- }
+ hns_roce_init_cq_table(hr_dev);
ret = hns_roce_init_qp_table(hr_dev);
if (ret) {
@@ -772,13 +753,10 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
return 0;
err_qp_table_free:
- if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ)
- hns_roce_cleanup_qp_table(hr_dev);
+ hns_roce_cleanup_qp_table(hr_dev);
err_cq_table_free:
hns_roce_cleanup_cq_table(hr_dev);
-
-err_mr_table_free:
hns_roce_cleanup_mr_table(hr_dev);
err_pd_table_free:
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index 1bcffd93ff3e..79b3c3023fe7 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -66,8 +66,7 @@ int hns_roce_hw_destroy_mpt(struct hns_roce_dev *hr_dev,
HNS_ROCE_CMD_TIMEOUT_MSECS);
}
-static int alloc_mr_key(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,
- u32 pd, u64 iova, u64 size, u32 access)
+static int alloc_mr_key(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
{
struct ib_device *ibdev = &hr_dev->ib_dev;
unsigned long obj = 0;
@@ -82,11 +81,6 @@ static int alloc_mr_key(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,
return -ENOMEM;
}
- mr->iova = iova; /* MR va starting addr */
- mr->size = size; /* MR addr range */
- mr->pd = pd; /* MR num */
- mr->access = access; /* MR access permit */
- mr->enabled = 0; /* MR active status */
mr->key = hw_index_to_key(obj); /* MR key */
err = hns_roce_table_get(hr_dev, &hr_dev->mr_table.mtpt_table, obj);
@@ -110,8 +104,7 @@ static void free_mr_key(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
}
static int alloc_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,
- size_t length, struct ib_udata *udata, u64 start,
- int access)
+ struct ib_udata *udata, u64 start)
{
struct ib_device *ibdev = &hr_dev->ib_dev;
bool is_fast = mr->type == MR_TYPE_FRMR;
@@ -121,11 +114,10 @@ static int alloc_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,
mr->pbl_hop_num = is_fast ? 1 : hr_dev->caps.pbl_hop_num;
buf_attr.page_shift = is_fast ? PAGE_SHIFT :
hr_dev->caps.pbl_buf_pg_sz + PAGE_SHIFT;
- buf_attr.region[0].size = length;
+ buf_attr.region[0].size = mr->size;
buf_attr.region[0].hopnum = mr->pbl_hop_num;
buf_attr.region_count = 1;
- buf_attr.fixed_page = true;
- buf_attr.user_access = access;
+ buf_attr.user_access = mr->access;
/* fast MR's buffer is alloced before mapping, not at creation */
buf_attr.mtt_only = is_fast;
@@ -197,9 +189,6 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
}
mr->enabled = 1;
- hns_roce_free_cmd_mailbox(hr_dev, mailbox);
-
- return 0;
err_page:
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
@@ -237,14 +226,16 @@ struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc)
return ERR_PTR(-ENOMEM);
mr->type = MR_TYPE_DMA;
+ mr->pd = to_hr_pd(pd)->pdn;
+ mr->access = acc;
/* Allocate memory region key */
hns_roce_hem_list_init(&mr->pbl_mtr.hem_list);
- ret = alloc_mr_key(hr_dev, mr, to_hr_pd(pd)->pdn, 0, 0, acc);
+ ret = alloc_mr_key(hr_dev, mr);
if (ret)
goto err_free;
- ret = hns_roce_mr_enable(to_hr_dev(pd->device), mr);
+ ret = hns_roce_mr_enable(hr_dev, mr);
if (ret)
goto err_mr;
@@ -271,13 +262,17 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (!mr)
return ERR_PTR(-ENOMEM);
+ mr->iova = virt_addr;
+ mr->size = length;
+ mr->pd = to_hr_pd(pd)->pdn;
+ mr->access = access_flags;
mr->type = MR_TYPE_MR;
- ret = alloc_mr_key(hr_dev, mr, to_hr_pd(pd)->pdn, virt_addr, length,
- access_flags);
+
+ ret = alloc_mr_key(hr_dev, mr);
if (ret)
goto err_alloc_mr;
- ret = alloc_mr_pbl(hr_dev, mr, length, udata, start, access_flags);
+ ret = alloc_mr_pbl(hr_dev, mr, udata, start);
if (ret)
goto err_alloc_key;
@@ -299,35 +294,6 @@ err_alloc_mr:
return ERR_PTR(ret);
}
-static int rereg_mr_trans(struct ib_mr *ibmr, int flags,
- u64 start, u64 length,
- u64 virt_addr, int mr_access_flags,
- struct hns_roce_cmd_mailbox *mailbox,
- u32 pdn, struct ib_udata *udata)
-{
- struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
- struct ib_device *ibdev = &hr_dev->ib_dev;
- struct hns_roce_mr *mr = to_hr_mr(ibmr);
- int ret;
-
- free_mr_pbl(hr_dev, mr);
- ret = alloc_mr_pbl(hr_dev, mr, length, udata, start, mr_access_flags);
- if (ret) {
- ibdev_err(ibdev, "failed to create mr PBL, ret = %d.\n", ret);
- return ret;
- }
-
- ret = hr_dev->hw->rereg_write_mtpt(hr_dev, mr, flags, pdn,
- mr_access_flags, virt_addr,
- length, mailbox->buf);
- if (ret) {
- ibdev_err(ibdev, "failed to write mtpt, ret = %d.\n", ret);
- free_mr_pbl(hr_dev, mr);
- }
-
- return ret;
-}
-
struct ib_mr *hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start,
u64 length, u64 virt_addr,
int mr_access_flags, struct ib_pd *pd,
@@ -338,7 +304,6 @@ struct ib_mr *hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start,
struct hns_roce_mr *mr = to_hr_mr(ibmr);
struct hns_roce_cmd_mailbox *mailbox;
unsigned long mtpt_idx;
- u32 pdn = 0;
int ret;
if (!mr->enabled)
@@ -360,23 +325,29 @@ struct ib_mr *hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start,
ibdev_warn(ib_dev, "failed to destroy MPT, ret = %d.\n", ret);
mr->enabled = 0;
+ mr->iova = virt_addr;
+ mr->size = length;
if (flags & IB_MR_REREG_PD)
- pdn = to_hr_pd(pd)->pdn;
+ mr->pd = to_hr_pd(pd)->pdn;
+
+ if (flags & IB_MR_REREG_ACCESS)
+ mr->access = mr_access_flags;
if (flags & IB_MR_REREG_TRANS) {
- ret = rereg_mr_trans(ibmr, flags,
- start, length,
- virt_addr, mr_access_flags,
- mailbox, pdn, udata);
- if (ret)
- goto free_cmd_mbox;
- } else {
- ret = hr_dev->hw->rereg_write_mtpt(hr_dev, mr, flags, pdn,
- mr_access_flags, virt_addr,
- length, mailbox->buf);
- if (ret)
+ free_mr_pbl(hr_dev, mr);
+ ret = alloc_mr_pbl(hr_dev, mr, udata, start);
+ if (ret) {
+ ibdev_err(ib_dev, "failed to alloc mr PBL, ret = %d.\n",
+ ret);
goto free_cmd_mbox;
+ }
+ }
+
+ ret = hr_dev->hw->rereg_write_mtpt(hr_dev, mr, flags, mailbox->buf);
+ if (ret) {
+ ibdev_err(ib_dev, "failed to write mtpt, ret = %d.\n", ret);
+ goto free_cmd_mbox;
}
ret = hns_roce_hw_create_mpt(hr_dev, mailbox, mtpt_idx);
@@ -386,12 +357,6 @@ struct ib_mr *hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start,
}
mr->enabled = 1;
- if (flags & IB_MR_REREG_ACCESS)
- mr->access = mr_access_flags;
-
- hns_roce_free_cmd_mailbox(hr_dev, mailbox);
-
- return NULL;
free_cmd_mbox:
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
@@ -421,7 +386,6 @@ struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
struct device *dev = hr_dev->dev;
struct hns_roce_mr *mr;
- u64 length;
int ret;
if (mr_type != IB_MR_TYPE_MEM_REG)
@@ -438,14 +402,15 @@ struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
return ERR_PTR(-ENOMEM);
mr->type = MR_TYPE_FRMR;
+ mr->pd = to_hr_pd(pd)->pdn;
+ mr->size = max_num_sg * (1 << PAGE_SHIFT);
/* Allocate memory region key */
- length = max_num_sg * (1 << PAGE_SHIFT);
- ret = alloc_mr_key(hr_dev, mr, to_hr_pd(pd)->pdn, 0, length, 0);
+ ret = alloc_mr_key(hr_dev, mr);
if (ret)
goto err_free;
- ret = alloc_mr_pbl(hr_dev, mr, length, NULL, 0, 0);
+ ret = alloc_mr_pbl(hr_dev, mr, NULL, 0);
if (ret)
goto err_key;
@@ -454,7 +419,7 @@ struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
goto err_pbl;
mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
- mr->ibmr.length = length;
+ mr->ibmr.length = mr->size;
return &mr->ibmr;
@@ -631,30 +596,26 @@ int hns_roce_dealloc_mw(struct ib_mw *ibmw)
}
static int mtr_map_region(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
- dma_addr_t *pages, struct hns_roce_buf_region *region)
+ struct hns_roce_buf_region *region, dma_addr_t *pages,
+ int max_count)
{
+ int count, npage;
+ int offset, end;
__le64 *mtts;
- int offset;
- int count;
- int npage;
u64 addr;
- int end;
int i;
- /* if hopnum is 0, buffer cannot store BAs, so skip write mtt */
- if (!region->hopnum)
- return 0;
-
offset = region->offset;
end = offset + region->count;
npage = 0;
- while (offset < end) {
+ while (offset < end && npage < max_count) {
+ count = 0;
mtts = hns_roce_hem_list_find_mtt(hr_dev, &mtr->hem_list,
offset, &count, NULL);
if (!mtts)
return -ENOBUFS;
- for (i = 0; i < count; i++) {
+ for (i = 0; i < count && npage < max_count; i++) {
if (hr_dev->hw_rev == HNS_ROCE_HW_VER1)
addr = to_hr_hw_page_addr(pages[npage]);
else
@@ -666,7 +627,7 @@ static int mtr_map_region(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
offset += count;
}
- return 0;
+ return npage;
}
static inline bool mtr_has_mtt(struct hns_roce_buf_attr *attr)
@@ -729,25 +690,15 @@ static void mtr_free_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr)
}
static int mtr_alloc_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
- struct hns_roce_buf_attr *buf_attr, bool is_direct,
+ struct hns_roce_buf_attr *buf_attr,
struct ib_udata *udata, unsigned long user_addr)
{
struct ib_device *ibdev = &hr_dev->ib_dev;
- unsigned int best_pg_shift;
- int all_pg_count = 0;
size_t total_size;
- int ret;
total_size = mtr_bufs_size(buf_attr);
- if (total_size < 1) {
- ibdev_err(ibdev, "failed to check mtr size\n.");
- return -EINVAL;
- }
if (udata) {
- unsigned long pgsz_bitmap;
- unsigned long page_size;
-
mtr->kmem = NULL;
mtr->umem = ib_umem_get(ibdev, user_addr, total_size,
buf_attr->user_access);
@@ -756,76 +707,67 @@ static int mtr_alloc_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
PTR_ERR(mtr->umem));
return -ENOMEM;
}
- if (buf_attr->fixed_page)
- pgsz_bitmap = 1 << buf_attr->page_shift;
- else
- pgsz_bitmap = GENMASK(buf_attr->page_shift, PAGE_SHIFT);
-
- page_size = ib_umem_find_best_pgsz(mtr->umem, pgsz_bitmap,
- user_addr);
- if (!page_size)
- return -EINVAL;
- best_pg_shift = order_base_2(page_size);
- all_pg_count = ib_umem_num_dma_blocks(mtr->umem, page_size);
- ret = 0;
} else {
mtr->umem = NULL;
- mtr->kmem =
- hns_roce_buf_alloc(hr_dev, total_size,
- buf_attr->page_shift,
- is_direct ? HNS_ROCE_BUF_DIRECT : 0);
+ mtr->kmem = hns_roce_buf_alloc(hr_dev, total_size,
+ buf_attr->page_shift,
+ mtr->hem_cfg.is_direct ?
+ HNS_ROCE_BUF_DIRECT : 0);
if (IS_ERR(mtr->kmem)) {
ibdev_err(ibdev, "failed to alloc kmem, ret = %ld.\n",
PTR_ERR(mtr->kmem));
return PTR_ERR(mtr->kmem);
}
-
- best_pg_shift = buf_attr->page_shift;
- all_pg_count = mtr->kmem->npages;
- }
-
- /* must bigger than minimum hardware page shift */
- if (best_pg_shift < HNS_HW_PAGE_SHIFT || all_pg_count < 1) {
- ret = -EINVAL;
- ibdev_err(ibdev,
- "failed to check mtr, page shift = %u count = %d.\n",
- best_pg_shift, all_pg_count);
- goto err_alloc_mem;
}
- mtr->hem_cfg.buf_pg_shift = best_pg_shift;
- mtr->hem_cfg.buf_pg_count = all_pg_count;
-
return 0;
-err_alloc_mem:
- mtr_free_bufs(hr_dev, mtr);
- return ret;
}
-static int mtr_get_pages(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
- dma_addr_t *pages, int count, unsigned int page_shift)
+static int mtr_map_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
+ int page_count, unsigned int page_shift)
{
struct ib_device *ibdev = &hr_dev->ib_dev;
+ dma_addr_t *pages;
int npage;
- int err;
+ int ret;
+
+ /* alloc a tmp array to store buffer's dma address */
+ pages = kvcalloc(page_count, sizeof(dma_addr_t), GFP_KERNEL);
+ if (!pages)
+ return -ENOMEM;
if (mtr->umem)
- npage = hns_roce_get_umem_bufs(hr_dev, pages, count, 0,
+ npage = hns_roce_get_umem_bufs(hr_dev, pages, page_count, 0,
mtr->umem, page_shift);
else
- npage = hns_roce_get_kmem_bufs(hr_dev, pages, count, 0,
+ npage = hns_roce_get_kmem_bufs(hr_dev, pages, page_count, 0,
mtr->kmem);
+ if (npage != page_count) {
+ ibdev_err(ibdev, "failed to get mtr page %d != %d.\n", npage,
+ page_count);
+ ret = -ENOBUFS;
+ goto err_alloc_list;
+ }
+
if (mtr->hem_cfg.is_direct && npage > 1) {
- err = mtr_check_direct_pages(pages, npage, page_shift);
- if (err) {
- ibdev_err(ibdev, "Failed to check %s direct page-%d\n",
- mtr->umem ? "user" : "kernel", err);
- npage = err;
+ ret = mtr_check_direct_pages(pages, npage, page_shift);
+ if (ret) {
+ ibdev_err(ibdev, "failed to check %s mtr, idx = %d.\n",
+ mtr->umem ? "user" : "kernel", ret);
+ ret = -ENOBUFS;
+ goto err_alloc_list;
}
}
- return npage;
+ ret = hns_roce_mtr_map(hr_dev, mtr, pages, page_count);
+ if (ret)
+ ibdev_err(ibdev, "failed to map mtr pages, ret = %d.\n", ret);
+
+err_alloc_list:
+ kvfree(pages);
+
+ return ret;
}
int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
@@ -833,8 +775,8 @@ int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
{
struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_buf_region *r;
- unsigned int i;
- int err;
+ unsigned int i, mapped_cnt;
+ int ret;
/*
* Only use the first page address as root ba when hopnum is 0, this
@@ -845,26 +787,42 @@ int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
return 0;
}
- for (i = 0; i < mtr->hem_cfg.region_count; i++) {
+ for (i = 0, mapped_cnt = 0; i < mtr->hem_cfg.region_count &&
+ mapped_cnt < page_cnt; i++) {
r = &mtr->hem_cfg.region[i];
+ /* if hopnum is 0, no need to map pages in this region */
+ if (!r->hopnum) {
+ mapped_cnt += r->count;
+ continue;
+ }
+
if (r->offset + r->count > page_cnt) {
- err = -EINVAL;
+ ret = -EINVAL;
ibdev_err(ibdev,
"failed to check mtr%u end %u + %u, max %u.\n",
i, r->offset, r->count, page_cnt);
- return err;
+ return ret;
}
- err = mtr_map_region(hr_dev, mtr, &pages[r->offset], r);
- if (err) {
+ ret = mtr_map_region(hr_dev, mtr, r, &pages[r->offset],
+ page_cnt - mapped_cnt);
+ if (ret < 0) {
ibdev_err(ibdev,
"failed to map mtr%u offset %u, ret = %d.\n",
- i, r->offset, err);
- return err;
+ i, r->offset, ret);
+ return ret;
}
+ mapped_cnt += ret;
+ ret = 0;
}
- return 0;
+ if (mapped_cnt < page_cnt) {
+ ret = -ENOBUFS;
+ ibdev_err(ibdev, "failed to map mtr pages count: %u < %u.\n",
+ mapped_cnt, page_cnt);
+ }
+
+ return ret;
}
int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
@@ -928,68 +886,92 @@ done:
static int mtr_init_buf_cfg(struct hns_roce_dev *hr_dev,
struct hns_roce_buf_attr *attr,
struct hns_roce_hem_cfg *cfg,
- unsigned int *buf_page_shift)
+ unsigned int *buf_page_shift, int unalinged_size)
{
struct hns_roce_buf_region *r;
+ int first_region_padding;
+ int page_cnt, region_cnt;
unsigned int page_shift;
- int page_cnt = 0;
size_t buf_size;
- int region_cnt;
+ /* If mtt is disabled, all pages must be within a continuous range */
+ cfg->is_direct = !mtr_has_mtt(attr);
+ buf_size = mtr_bufs_size(attr);
if (cfg->is_direct) {
- buf_size = cfg->buf_pg_count << cfg->buf_pg_shift;
- page_cnt = DIV_ROUND_UP(buf_size, HNS_HW_PAGE_SIZE);
- /*
- * When HEM buffer use level-0 addressing, the page size equals
- * the buffer size, and the the page size = 4K * 2^N.
+ /* When HEM buffer uses 0-level addressing, the page size is
+ * equal to the whole buffer size, and we split the buffer into
+ * small pages which is used to check whether the adjacent
+ * units are in the continuous space and its size is fixed to
+ * 4K based on hns ROCEE's requirement.
*/
- cfg->buf_pg_shift = HNS_HW_PAGE_SHIFT + order_base_2(page_cnt);
- if (attr->region_count > 1) {
- cfg->buf_pg_count = page_cnt;
- page_shift = HNS_HW_PAGE_SHIFT;
- } else {
- cfg->buf_pg_count = 1;
- page_shift = cfg->buf_pg_shift;
- if (buf_size != 1 << page_shift) {
- ibdev_err(&hr_dev->ib_dev,
- "failed to check direct size %zu shift %d.\n",
- buf_size, page_shift);
- return -EINVAL;
- }
- }
+ page_shift = HNS_HW_PAGE_SHIFT;
+
+ /* The ROCEE requires the page size to be 4K * 2 ^ N. */
+ cfg->buf_pg_count = 1;
+ cfg->buf_pg_shift = HNS_HW_PAGE_SHIFT +
+ order_base_2(DIV_ROUND_UP(buf_size, HNS_HW_PAGE_SIZE));
+ first_region_padding = 0;
} else {
- page_shift = cfg->buf_pg_shift;
+ page_shift = attr->page_shift;
+ cfg->buf_pg_count = DIV_ROUND_UP(buf_size + unalinged_size,
+ 1 << page_shift);
+ cfg->buf_pg_shift = page_shift;
+ first_region_padding = unalinged_size;
}
- /* convert buffer size to page index and page count */
- for (page_cnt = 0, region_cnt = 0; page_cnt < cfg->buf_pg_count &&
- region_cnt < attr->region_count &&
+ /* Convert buffer size to page index and page count for each region and
+ * the buffer's offset needs to be appended to the first region.
+ */
+ for (page_cnt = 0, region_cnt = 0; region_cnt < attr->region_count &&
region_cnt < ARRAY_SIZE(cfg->region); region_cnt++) {
r = &cfg->region[region_cnt];
r->offset = page_cnt;
- buf_size = hr_hw_page_align(attr->region[region_cnt].size);
+ buf_size = hr_hw_page_align(attr->region[region_cnt].size +
+ first_region_padding);
r->count = DIV_ROUND_UP(buf_size, 1 << page_shift);
+ first_region_padding = 0;
page_cnt += r->count;
r->hopnum = to_hr_hem_hopnum(attr->region[region_cnt].hopnum,
r->count);
}
- if (region_cnt < 1) {
- ibdev_err(&hr_dev->ib_dev,
- "failed to check mtr region count, pages = %d.\n",
- cfg->buf_pg_count);
- return -ENOBUFS;
- }
-
cfg->region_count = region_cnt;
*buf_page_shift = page_shift;
return page_cnt;
}
+static int mtr_alloc_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
+ unsigned int ba_page_shift)
+{
+ struct hns_roce_hem_cfg *cfg = &mtr->hem_cfg;
+ int ret;
+
+ hns_roce_hem_list_init(&mtr->hem_list);
+ if (!cfg->is_direct) {
+ ret = hns_roce_hem_list_request(hr_dev, &mtr->hem_list,
+ cfg->region, cfg->region_count,
+ ba_page_shift);
+ if (ret)
+ return ret;
+ cfg->root_ba = mtr->hem_list.root_ba;
+ cfg->ba_pg_shift = ba_page_shift;
+ } else {
+ cfg->ba_pg_shift = cfg->buf_pg_shift;
+ }
+
+ return 0;
+}
+
+static void mtr_free_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr)
+{
+ hns_roce_hem_list_release(hr_dev, &mtr->hem_list);
+}
+
/**
* hns_roce_mtr_create - Create hns memory translate region.
*
+ * @hr_dev: RoCE device struct pointer
* @mtr: memory translate region
* @buf_attr: buffer attribute for creating mtr
* @ba_page_shift: page shift for multi-hop base address table
@@ -1001,95 +983,51 @@ int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
unsigned int ba_page_shift, struct ib_udata *udata,
unsigned long user_addr)
{
- struct hns_roce_hem_cfg *cfg = &mtr->hem_cfg;
struct ib_device *ibdev = &hr_dev->ib_dev;
unsigned int buf_page_shift = 0;
- dma_addr_t *pages = NULL;
- int all_pg_cnt;
- int get_pg_cnt;
- int ret = 0;
-
- /* if disable mtt, all pages must in a continuous address range */
- cfg->is_direct = !mtr_has_mtt(buf_attr);
-
- /* if buffer only need mtt, just init the hem cfg */
- if (buf_attr->mtt_only) {
- cfg->buf_pg_shift = buf_attr->page_shift;
- cfg->buf_pg_count = mtr_bufs_size(buf_attr) >>
- buf_attr->page_shift;
- mtr->umem = NULL;
- mtr->kmem = NULL;
- } else {
- ret = mtr_alloc_bufs(hr_dev, mtr, buf_attr, cfg->is_direct,
- udata, user_addr);
- if (ret) {
- ibdev_err(ibdev,
- "failed to alloc mtr bufs, ret = %d.\n", ret);
- return ret;
- }
- }
+ int buf_page_cnt;
+ int ret;
- all_pg_cnt = mtr_init_buf_cfg(hr_dev, buf_attr, cfg, &buf_page_shift);
- if (all_pg_cnt < 1) {
- ret = -ENOBUFS;
- ibdev_err(ibdev, "failed to init mtr buf cfg.\n");
- goto err_alloc_bufs;
+ buf_page_cnt = mtr_init_buf_cfg(hr_dev, buf_attr, &mtr->hem_cfg,
+ &buf_page_shift,
+ udata ? user_addr & ~PAGE_MASK : 0);
+ if (buf_page_cnt < 1 || buf_page_shift < HNS_HW_PAGE_SHIFT) {
+ ibdev_err(ibdev, "failed to init mtr cfg, count %d shift %d.\n",
+ buf_page_cnt, buf_page_shift);
+ return -EINVAL;
}
- hns_roce_hem_list_init(&mtr->hem_list);
- if (!cfg->is_direct) {
- ret = hns_roce_hem_list_request(hr_dev, &mtr->hem_list,
- cfg->region, cfg->region_count,
- ba_page_shift);
- if (ret) {
- ibdev_err(ibdev, "failed to request mtr hem, ret = %d.\n",
- ret);
- goto err_alloc_bufs;
- }
- cfg->root_ba = mtr->hem_list.root_ba;
- cfg->ba_pg_shift = ba_page_shift;
- } else {
- cfg->ba_pg_shift = cfg->buf_pg_shift;
+ ret = mtr_alloc_mtt(hr_dev, mtr, ba_page_shift);
+ if (ret) {
+ ibdev_err(ibdev, "failed to alloc mtr mtt, ret = %d.\n", ret);
+ return ret;
}
- /* no buffer to map */
- if (buf_attr->mtt_only)
+ /* The caller has its own buffer list and invokes the hns_roce_mtr_map()
+ * to finish the MTT configuration.
+ */
+ if (buf_attr->mtt_only) {
+ mtr->umem = NULL;
+ mtr->kmem = NULL;
return 0;
-
- /* alloc a tmp array to store buffer's dma address */
- pages = kvcalloc(all_pg_cnt, sizeof(dma_addr_t), GFP_KERNEL);
- if (!pages) {
- ret = -ENOMEM;
- ibdev_err(ibdev, "failed to alloc mtr page list %d.\n",
- all_pg_cnt);
- goto err_alloc_hem_list;
- }
-
- get_pg_cnt = mtr_get_pages(hr_dev, mtr, pages, all_pg_cnt,
- buf_page_shift);
- if (get_pg_cnt != all_pg_cnt) {
- ibdev_err(ibdev, "failed to get mtr page %d != %d.\n",
- get_pg_cnt, all_pg_cnt);
- ret = -ENOBUFS;
- goto err_alloc_page_list;
}
- /* write buffer's dma address to BA table */
- ret = hns_roce_mtr_map(hr_dev, mtr, pages, all_pg_cnt);
+ ret = mtr_alloc_bufs(hr_dev, mtr, buf_attr, udata, user_addr);
if (ret) {
- ibdev_err(ibdev, "failed to map mtr pages, ret = %d.\n", ret);
- goto err_alloc_page_list;
+ ibdev_err(ibdev, "failed to alloc mtr bufs, ret = %d.\n", ret);
+ goto err_alloc_mtt;
}
- /* drop tmp array */
- kvfree(pages);
- return 0;
-err_alloc_page_list:
- kvfree(pages);
-err_alloc_hem_list:
- hns_roce_hem_list_release(hr_dev, &mtr->hem_list);
-err_alloc_bufs:
+ /* Write buffer's dma address to MTT */
+ ret = mtr_map_bufs(hr_dev, mtr, buf_page_cnt, buf_page_shift);
+ if (ret)
+ ibdev_err(ibdev, "failed to map mtr bufs, ret = %d.\n", ret);
+ else
+ return 0;
+
mtr_free_bufs(hr_dev, mtr);
+err_alloc_mtt:
+ mtr_free_mtt(hr_dev, mtr);
return ret;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index d8e2fe5558d2..004aca9086ab 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -209,7 +209,7 @@ static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
hr_qp->doorbell_qpn = 1;
} else {
- spin_lock(&qp_table->bank_lock);
+ mutex_lock(&qp_table->bank_mutex);
bankid = get_least_load_bankid_for_qp(qp_table->bank);
ret = alloc_qpn_with_bankid(&qp_table->bank[bankid], bankid,
@@ -217,12 +217,12 @@ static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
if (ret) {
ibdev_err(&hr_dev->ib_dev,
"failed to alloc QPN, ret = %d\n", ret);
- spin_unlock(&qp_table->bank_lock);
+ mutex_unlock(&qp_table->bank_mutex);
return ret;
}
qp_table->bank[bankid].inuse++;
- spin_unlock(&qp_table->bank_lock);
+ mutex_unlock(&qp_table->bank_mutex);
hr_qp->doorbell_qpn = (u32)num;
}
@@ -408,14 +408,37 @@ static void free_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
ida_free(&hr_dev->qp_table.bank[bankid].ida, hr_qp->qpn >> 3);
- spin_lock(&hr_dev->qp_table.bank_lock);
+ mutex_lock(&hr_dev->qp_table.bank_mutex);
hr_dev->qp_table.bank[bankid].inuse--;
- spin_unlock(&hr_dev->qp_table.bank_lock);
+ mutex_unlock(&hr_dev->qp_table.bank_mutex);
+}
+
+static u32 proc_rq_sge(struct hns_roce_dev *dev, struct hns_roce_qp *hr_qp,
+ bool user)
+{
+ u32 max_sge = dev->caps.max_rq_sg;
+
+ if (dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
+ return max_sge;
+
+ /* Reserve SGEs only for HIP08 in kernel; The userspace driver will
+ * calculate number of max_sge with reserved SGEs when allocating wqe
+ * buf, so there is no need to do this again in kernel. But the number
+ * may exceed the capacity of SGEs recorded in the firmware, so the
+ * kernel driver should just adapt the value accordingly.
+ */
+ if (user)
+ max_sge = roundup_pow_of_two(max_sge + 1);
+ else
+ hr_qp->rq.rsv_sge = 1;
+
+ return max_sge;
}
static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap,
- struct hns_roce_qp *hr_qp, int has_rq)
+ struct hns_roce_qp *hr_qp, int has_rq, bool user)
{
+ u32 max_sge = proc_rq_sge(hr_dev, hr_qp, user);
u32 cnt;
/* If srq exist, set zero for relative number of rq */
@@ -431,8 +454,9 @@ static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap,
/* Check the validity of QP support capacity */
if (!cap->max_recv_wr || cap->max_recv_wr > hr_dev->caps.max_wqes ||
- cap->max_recv_sge > hr_dev->caps.max_rq_sg) {
- ibdev_err(&hr_dev->ib_dev, "RQ config error, depth=%u, sge=%d\n",
+ cap->max_recv_sge > max_sge) {
+ ibdev_err(&hr_dev->ib_dev,
+ "RQ config error, depth = %u, sge = %u\n",
cap->max_recv_wr, cap->max_recv_sge);
return -EINVAL;
}
@@ -444,7 +468,8 @@ static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap,
return -EINVAL;
}
- hr_qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge));
+ hr_qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge) +
+ hr_qp->rq.rsv_sge);
if (hr_dev->caps.max_rq_sg <= HNS_ROCE_SGE_IN_WQE)
hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz);
@@ -459,7 +484,7 @@ static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap,
hr_qp->rq_inl_buf.wqe_cnt = 0;
cap->max_recv_wr = cnt;
- cap->max_recv_sge = hr_qp->rq.max_gs;
+ cap->max_recv_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge;
return 0;
}
@@ -599,7 +624,6 @@ static int set_wqe_buf_attr(struct hns_roce_dev *hr_dev,
return -EINVAL;
buf_attr->page_shift = HNS_HW_PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz;
- buf_attr->fixed_page = true;
buf_attr->region_count = idx;
return 0;
@@ -919,7 +943,7 @@ static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
hr_qp->sq_signal_bits = IB_SIGNAL_REQ_WR;
ret = set_rq_size(hr_dev, &init_attr->cap, hr_qp,
- hns_roce_qp_has_rq(init_attr));
+ hns_roce_qp_has_rq(init_attr), !!udata);
if (ret) {
ibdev_err(ibdev, "failed to set user RQ size, ret = %d.\n",
ret);
@@ -1371,6 +1395,7 @@ int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
unsigned int i;
mutex_init(&qp_table->scc_mutex);
+ mutex_init(&qp_table->bank_mutex);
xa_init(&hr_dev->qp_table_xa);
reserved_from_bot = hr_dev->caps.reserved_qps;
diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c
index c4ae57e4173a..d5a6de0e7095 100644
--- a/drivers/infiniband/hw/hns/hns_roce_srq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_srq.c
@@ -3,6 +3,7 @@
* Copyright (c) 2018 Hisilicon Limited.
*/
+#include <linux/pci.h>
#include <rdma/ib_umem.h>
#include "hns_roce_device.h"
#include "hns_roce_cmd.h"
@@ -76,40 +77,16 @@ static int hns_roce_hw_destroy_srq(struct hns_roce_dev *dev,
HNS_ROCE_CMD_TIMEOUT_MSECS);
}
-static int alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
- u32 pdn, u32 cqn, u16 xrcd, u64 db_rec_addr)
+static int alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
{
struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_cmd_mailbox *mailbox;
- u64 mtts_wqe[MTT_MIN_COUNT] = { 0 };
- u64 mtts_idx[MTT_MIN_COUNT] = { 0 };
- dma_addr_t dma_handle_wqe = 0;
- dma_addr_t dma_handle_idx = 0;
int ret;
- /* Get the physical address of srq buf */
- ret = hns_roce_mtr_find(hr_dev, &srq->buf_mtr, 0, mtts_wqe,
- ARRAY_SIZE(mtts_wqe), &dma_handle_wqe);
- if (ret < 1) {
- ibdev_err(ibdev, "failed to find mtr for SRQ WQE, ret = %d.\n",
- ret);
- return -ENOBUFS;
- }
-
- /* Get physical address of idx que buf */
- ret = hns_roce_mtr_find(hr_dev, &srq->idx_que.mtr, 0, mtts_idx,
- ARRAY_SIZE(mtts_idx), &dma_handle_idx);
- if (ret < 1) {
- ibdev_err(ibdev, "failed to find mtr for SRQ idx, ret = %d.\n",
- ret);
- return -ENOBUFS;
- }
-
ret = hns_roce_bitmap_alloc(&srq_table->bitmap, &srq->srqn);
if (ret) {
- ibdev_err(ibdev,
- "failed to alloc SRQ number, ret = %d.\n", ret);
+ ibdev_err(ibdev, "failed to alloc SRQ number.\n");
return -ENOMEM;
}
@@ -127,34 +104,36 @@ static int alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
if (IS_ERR_OR_NULL(mailbox)) {
- ret = -ENOMEM;
ibdev_err(ibdev, "failed to alloc mailbox for SRQC.\n");
+ ret = -ENOMEM;
goto err_xa;
}
- hr_dev->hw->write_srqc(hr_dev, srq, pdn, xrcd, cqn, mailbox->buf,
- mtts_wqe, mtts_idx, dma_handle_wqe,
- dma_handle_idx);
+ ret = hr_dev->hw->write_srqc(srq, mailbox->buf);
+ if (ret) {
+ ibdev_err(ibdev, "failed to write SRQC.\n");
+ goto err_mbox;
+ }
ret = hns_roce_hw_create_srq(hr_dev, mailbox, srq->srqn);
- hns_roce_free_cmd_mailbox(hr_dev, mailbox);
if (ret) {
ibdev_err(ibdev, "failed to config SRQC, ret = %d.\n", ret);
- goto err_xa;
+ goto err_mbox;
}
- atomic_set(&srq->refcount, 1);
- init_completion(&srq->free);
- return ret;
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+
+ return 0;
+err_mbox:
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
err_xa:
xa_erase(&srq_table->xa, srq->srqn);
-
err_put:
hns_roce_table_put(hr_dev, &srq_table->table, srq->srqn);
-
err_out:
hns_roce_bitmap_free(&srq_table->bitmap, srq->srqn, BITMAP_NO_RR);
+
return ret;
}
@@ -178,46 +157,13 @@ static void free_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
hns_roce_bitmap_free(&srq_table->bitmap, srq->srqn, BITMAP_NO_RR);
}
-static int alloc_srq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
- struct ib_udata *udata, unsigned long addr)
-{
- struct ib_device *ibdev = &hr_dev->ib_dev;
- struct hns_roce_buf_attr buf_attr = {};
- int err;
-
- srq->wqe_shift = ilog2(roundup_pow_of_two(max(HNS_ROCE_SGE_SIZE,
- HNS_ROCE_SGE_SIZE *
- srq->max_gs)));
-
- buf_attr.page_shift = hr_dev->caps.srqwqe_buf_pg_sz + HNS_HW_PAGE_SHIFT;
- buf_attr.region[0].size = to_hr_hem_entries_size(srq->wqe_cnt,
- srq->wqe_shift);
- buf_attr.region[0].hopnum = hr_dev->caps.srqwqe_hop_num;
- buf_attr.region_count = 1;
- buf_attr.fixed_page = true;
-
- err = hns_roce_mtr_create(hr_dev, &srq->buf_mtr, &buf_attr,
- hr_dev->caps.srqwqe_ba_pg_sz +
- HNS_HW_PAGE_SHIFT, udata, addr);
- if (err)
- ibdev_err(ibdev,
- "failed to alloc SRQ buf mtr, ret = %d.\n", err);
-
- return err;
-}
-
-static void free_srq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
-{
- hns_roce_mtr_destroy(hr_dev, &srq->buf_mtr);
-}
-
static int alloc_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
struct ib_udata *udata, unsigned long addr)
{
struct hns_roce_idx_que *idx_que = &srq->idx_que;
struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_buf_attr buf_attr = {};
- int err;
+ int ret;
srq->idx_que.entry_shift = ilog2(HNS_ROCE_IDX_QUE_ENTRY_SZ);
@@ -226,31 +172,33 @@ static int alloc_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
srq->idx_que.entry_shift);
buf_attr.region[0].hopnum = hr_dev->caps.idx_hop_num;
buf_attr.region_count = 1;
- buf_attr.fixed_page = true;
- err = hns_roce_mtr_create(hr_dev, &idx_que->mtr, &buf_attr,
+ ret = hns_roce_mtr_create(hr_dev, &idx_que->mtr, &buf_attr,
hr_dev->caps.idx_ba_pg_sz + HNS_HW_PAGE_SHIFT,
udata, addr);
- if (err) {
+ if (ret) {
ibdev_err(ibdev,
- "failed to alloc SRQ idx mtr, ret = %d.\n", err);
- return err;
+ "failed to alloc SRQ idx mtr, ret = %d.\n", ret);
+ return ret;
}
if (!udata) {
idx_que->bitmap = bitmap_zalloc(srq->wqe_cnt, GFP_KERNEL);
if (!idx_que->bitmap) {
ibdev_err(ibdev, "failed to alloc SRQ idx bitmap.\n");
- err = -ENOMEM;
+ ret = -ENOMEM;
goto err_idx_mtr;
}
}
+ idx_que->head = 0;
+ idx_que->tail = 0;
+
return 0;
err_idx_mtr:
hns_roce_mtr_destroy(hr_dev, &idx_que->mtr);
- return err;
+ return ret;
}
static void free_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
@@ -262,10 +210,42 @@ static void free_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
hns_roce_mtr_destroy(hr_dev, &idx_que->mtr);
}
+static int alloc_srq_wqe_buf(struct hns_roce_dev *hr_dev,
+ struct hns_roce_srq *srq,
+ struct ib_udata *udata, unsigned long addr)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct hns_roce_buf_attr buf_attr = {};
+ int ret;
+
+ srq->wqe_shift = ilog2(roundup_pow_of_two(max(HNS_ROCE_SGE_SIZE,
+ HNS_ROCE_SGE_SIZE *
+ srq->max_gs)));
+
+ buf_attr.page_shift = hr_dev->caps.srqwqe_buf_pg_sz + HNS_HW_PAGE_SHIFT;
+ buf_attr.region[0].size = to_hr_hem_entries_size(srq->wqe_cnt,
+ srq->wqe_shift);
+ buf_attr.region[0].hopnum = hr_dev->caps.srqwqe_hop_num;
+ buf_attr.region_count = 1;
+
+ ret = hns_roce_mtr_create(hr_dev, &srq->buf_mtr, &buf_attr,
+ hr_dev->caps.srqwqe_ba_pg_sz +
+ HNS_HW_PAGE_SHIFT, udata, addr);
+ if (ret)
+ ibdev_err(ibdev,
+ "failed to alloc SRQ buf mtr, ret = %d.\n", ret);
+
+ return ret;
+}
+
+static void free_srq_wqe_buf(struct hns_roce_dev *hr_dev,
+ struct hns_roce_srq *srq)
+{
+ hns_roce_mtr_destroy(hr_dev, &srq->buf_mtr);
+}
+
static int alloc_srq_wrid(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
{
- srq->head = 0;
- srq->tail = srq->wqe_cnt - 1;
srq->wrid = kvmalloc_array(srq->wqe_cnt, sizeof(u64), GFP_KERNEL);
if (!srq->wrid)
return -ENOMEM;
@@ -279,96 +259,171 @@ static void free_srq_wrid(struct hns_roce_srq *srq)
srq->wrid = NULL;
}
-int hns_roce_create_srq(struct ib_srq *ib_srq,
- struct ib_srq_init_attr *init_attr,
- struct ib_udata *udata)
+static u32 proc_srq_sge(struct hns_roce_dev *dev, struct hns_roce_srq *hr_srq,
+ bool user)
+{
+ u32 max_sge = dev->caps.max_srq_sges;
+
+ if (dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
+ return max_sge;
+
+ /* Reserve SGEs only for HIP08 in kernel; The userspace driver will
+ * calculate number of max_sge with reserved SGEs when allocating wqe
+ * buf, so there is no need to do this again in kernel. But the number
+ * may exceed the capacity of SGEs recorded in the firmware, so the
+ * kernel driver should just adapt the value accordingly.
+ */
+ if (user)
+ max_sge = roundup_pow_of_two(max_sge + 1);
+ else
+ hr_srq->rsv_sge = 1;
+
+ return max_sge;
+}
+
+static int set_srq_basic_param(struct hns_roce_srq *srq,
+ struct ib_srq_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device);
+ struct ib_srq_attr *attr = &init_attr->attr;
+ u32 max_sge;
+
+ max_sge = proc_srq_sge(hr_dev, srq, !!udata);
+ if (attr->max_wr > hr_dev->caps.max_srq_wrs ||
+ attr->max_sge > max_sge) {
+ ibdev_err(&hr_dev->ib_dev,
+ "invalid SRQ attr, depth = %u, sge = %u.\n",
+ attr->max_wr, attr->max_sge);
+ return -EINVAL;
+ }
+
+ attr->max_wr = max_t(u32, attr->max_wr, HNS_ROCE_MIN_SRQ_WQE_NUM);
+ srq->wqe_cnt = roundup_pow_of_two(attr->max_wr);
+ srq->max_gs = roundup_pow_of_two(attr->max_sge + srq->rsv_sge);
+
+ attr->max_wr = srq->wqe_cnt;
+ attr->max_sge = srq->max_gs - srq->rsv_sge;
+ attr->srq_limit = 0;
+
+ return 0;
+}
+
+static void set_srq_ext_param(struct hns_roce_srq *srq,
+ struct ib_srq_init_attr *init_attr)
+{
+ srq->cqn = ib_srq_has_cq(init_attr->srq_type) ?
+ to_hr_cq(init_attr->ext.cq)->cqn : 0;
+}
+
+static int set_srq_param(struct hns_roce_srq *srq,
+ struct ib_srq_init_attr *init_attr,
+ struct ib_udata *udata)
{
- struct hns_roce_dev *hr_dev = to_hr_dev(ib_srq->device);
- struct hns_roce_ib_create_srq_resp resp = {};
- struct hns_roce_srq *srq = to_hr_srq(ib_srq);
- struct ib_device *ibdev = &hr_dev->ib_dev;
- struct hns_roce_ib_create_srq ucmd = {};
int ret;
- u32 cqn;
- if (init_attr->srq_type != IB_SRQT_BASIC &&
- init_attr->srq_type != IB_SRQT_XRC)
- return -EOPNOTSUPP;
+ ret = set_srq_basic_param(srq, init_attr, udata);
+ if (ret)
+ return ret;
- /* Check the actual SRQ wqe and SRQ sge num */
- if (init_attr->attr.max_wr >= hr_dev->caps.max_srq_wrs ||
- init_attr->attr.max_sge > hr_dev->caps.max_srq_sges)
- return -EINVAL;
+ set_srq_ext_param(srq, init_attr);
- mutex_init(&srq->mutex);
- spin_lock_init(&srq->lock);
+ return 0;
+}
- srq->wqe_cnt = roundup_pow_of_two(init_attr->attr.max_wr + 1);
- srq->max_gs = init_attr->attr.max_sge;
+static int alloc_srq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
+ struct ib_udata *udata)
+{
+ struct hns_roce_ib_create_srq ucmd = {};
+ int ret;
if (udata) {
ret = ib_copy_from_udata(&ucmd, udata,
min(udata->inlen, sizeof(ucmd)));
if (ret) {
- ibdev_err(ibdev, "failed to copy SRQ udata, ret = %d.\n",
+ ibdev_err(&hr_dev->ib_dev,
+ "failed to copy SRQ udata, ret = %d.\n",
ret);
return ret;
}
}
- ret = alloc_srq_buf(hr_dev, srq, udata, ucmd.buf_addr);
- if (ret) {
- ibdev_err(ibdev,
- "failed to alloc SRQ buffer, ret = %d.\n", ret);
+ ret = alloc_srq_idx(hr_dev, srq, udata, ucmd.que_addr);
+ if (ret)
return ret;
- }
- ret = alloc_srq_idx(hr_dev, srq, udata, ucmd.que_addr);
- if (ret) {
- ibdev_err(ibdev, "failed to alloc SRQ idx, ret = %d.\n", ret);
- goto err_buf_alloc;
- }
+ ret = alloc_srq_wqe_buf(hr_dev, srq, udata, ucmd.buf_addr);
+ if (ret)
+ goto err_idx;
if (!udata) {
ret = alloc_srq_wrid(hr_dev, srq);
- if (ret) {
- ibdev_err(ibdev, "failed to alloc SRQ wrid, ret = %d.\n",
- ret);
- goto err_idx_alloc;
- }
+ if (ret)
+ goto err_wqe_buf;
}
- cqn = ib_srq_has_cq(init_attr->srq_type) ?
- to_hr_cq(init_attr->ext.cq)->cqn : 0;
- srq->db_reg_l = hr_dev->reg_base + SRQ_DB_REG;
+ return 0;
- ret = alloc_srqc(hr_dev, srq, to_hr_pd(ib_srq->pd)->pdn, cqn, 0, 0);
- if (ret) {
- ibdev_err(ibdev,
- "failed to alloc SRQ context, ret = %d.\n", ret);
- goto err_wrid_alloc;
- }
+err_wqe_buf:
+ free_srq_wqe_buf(hr_dev, srq);
+err_idx:
+ free_srq_idx(hr_dev, srq);
- srq->event = hns_roce_ib_srq_event;
- resp.srqn = srq->srqn;
+ return ret;
+}
+
+static void free_srq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
+{
+ free_srq_wrid(srq);
+ free_srq_wqe_buf(hr_dev, srq);
+ free_srq_idx(hr_dev, srq);
+}
+
+int hns_roce_create_srq(struct ib_srq *ib_srq,
+ struct ib_srq_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ib_srq->device);
+ struct hns_roce_ib_create_srq_resp resp = {};
+ struct hns_roce_srq *srq = to_hr_srq(ib_srq);
+ int ret;
+
+ mutex_init(&srq->mutex);
+ spin_lock_init(&srq->lock);
+
+ ret = set_srq_param(srq, init_attr, udata);
+ if (ret)
+ return ret;
+
+ ret = alloc_srq_buf(hr_dev, srq, udata);
+ if (ret)
+ return ret;
+
+ ret = alloc_srqc(hr_dev, srq);
+ if (ret)
+ goto err_srq_buf;
if (udata) {
- ret = ib_copy_to_udata(udata, &resp,
- min(udata->outlen, sizeof(resp)));
- if (ret)
- goto err_srqc_alloc;
+ resp.srqn = srq->srqn;
+ if (ib_copy_to_udata(udata, &resp,
+ min(udata->outlen, sizeof(resp)))) {
+ ret = -EFAULT;
+ goto err_srqc;
+ }
}
+ srq->db_reg_l = hr_dev->reg_base + SRQ_DB_REG;
+ srq->event = hns_roce_ib_srq_event;
+ atomic_set(&srq->refcount, 1);
+ init_completion(&srq->free);
+
return 0;
-err_srqc_alloc:
+err_srqc:
free_srqc(hr_dev, srq);
-err_wrid_alloc:
- free_srq_wrid(srq);
-err_idx_alloc:
- free_srq_idx(hr_dev, srq);
-err_buf_alloc:
+err_srq_buf:
free_srq_buf(hr_dev, srq);
+
return ret;
}
@@ -378,8 +433,6 @@ int hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
struct hns_roce_srq *srq = to_hr_srq(ibsrq);
free_srqc(hr_dev, srq);
- free_srq_idx(hr_dev, srq);
- free_srq_wrid(srq);
free_srq_buf(hr_dev, srq);
return 0;
}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index 9acc0ecc9a43..ac65c8237b2e 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -70,7 +70,7 @@ static void i40iw_disconnect_worker(struct work_struct *work);
/**
* i40iw_free_sqbuf - put back puda buffer if refcount = 0
* @vsi: pointer to vsi structure
- * @buf: puda buffer to free
+ * @bufp: puda buffer to free
*/
void i40iw_free_sqbuf(struct i40iw_sc_vsi *vsi, void *bufp)
{
@@ -729,6 +729,7 @@ static int i40iw_handle_tcp_options(struct i40iw_cm_node *cm_node,
/**
* i40iw_build_mpa_v1 - build a MPA V1 frame
* @cm_node: connection's node
+ * @start_addr: MPA frame start address
* @mpa_key: to do read0 or write0
*/
static void i40iw_build_mpa_v1(struct i40iw_cm_node *cm_node,
@@ -1040,7 +1041,7 @@ negotiate_done:
/**
* i40iw_schedule_cm_timer
- * @@cm_node: connection's node
+ * @cm_node: connection's node
* @sqbuf: buffer to send
* @type: if it is send or close
* @send_retrans: if rexmits to be done
@@ -1205,7 +1206,7 @@ static void i40iw_build_timer_list(struct list_head *timer_list,
/**
* i40iw_cm_timer_tick - system's timer expired callback
- * @pass: Pointing to cm_core
+ * @t: Timer instance to fetch the cm_core pointer from
*/
static void i40iw_cm_timer_tick(struct timer_list *t)
{
@@ -1463,6 +1464,7 @@ struct i40iw_cm_node *i40iw_find_node(struct i40iw_cm_core *cm_core,
* @cm_core: cm's core
* @dst_port: listener tcp port num
* @dst_addr: listener ip addr
+ * @vlan_id: vlan id for the given address
* @listener_state: state to match with listen node's
*/
static struct i40iw_cm_listener *i40iw_find_listener(
@@ -1521,7 +1523,7 @@ static void i40iw_add_hte_node(struct i40iw_cm_core *cm_core,
/**
* i40iw_find_port - find port that matches reference port
* @hte: ptr to accelerated or non-accelerated list
- * @accelerated_list: flag for accelerated vs non-accelerated list
+ * @port: port number to locate
*/
static bool i40iw_find_port(struct list_head *hte, u16 port)
{
@@ -1834,6 +1836,7 @@ exit:
/**
* i40iw_dec_refcnt_listen - delete listener and associated cm nodes
* @cm_core: cm's core
+ * @listener: passive connection's listener
* @free_hanging_nodes: to free associated cm_nodes
* @apbvt_del: flag to delete the apbvt
*/
@@ -2029,7 +2032,7 @@ static int i40iw_addr_resolve_neigh(struct i40iw_device *iwdev,
return rc;
}
-/**
+/*
* i40iw_get_dst_ipv6
*/
static struct dst_entry *i40iw_get_dst_ipv6(struct sockaddr_in6 *src_addr,
@@ -2051,7 +2054,8 @@ static struct dst_entry *i40iw_get_dst_ipv6(struct sockaddr_in6 *src_addr,
/**
* i40iw_addr_resolve_neigh_ipv6 - resolve neighbor ipv6 address
* @iwdev: iwarp device structure
- * @dst_ip: remote ip address
+ * @src: source ip address
+ * @dest: remote ip address
* @arpindex: if there is an arp entry
*/
static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev,
@@ -3004,7 +3008,7 @@ static struct i40iw_cm_node *i40iw_create_cm_node(
/**
* i40iw_cm_reject - reject and teardown a connection
* @cm_node: connection's node
- * @pdate: ptr to private data for reject
+ * @pdata: ptr to private data for reject
* @plen: size of private data
*/
static int i40iw_cm_reject(struct i40iw_cm_node *cm_node, const void *pdata, u8 plen)
@@ -4302,7 +4306,7 @@ set_qhash:
* i40iw_cm_teardown_connections - teardown QPs
* @iwdev: device pointer
* @ipaddr: Pointer to IPv4 or IPv6 address
- * @ipv4: flag indicating IPv4 when true
+ * @nfo: cm info node
* @disconnect_all: flag indicating disconnect all QPs
* teardown QPs where source or destination addr matches ip addr
*/
@@ -4358,6 +4362,7 @@ void i40iw_cm_teardown_connections(struct i40iw_device *iwdev, u32 *ipaddr,
/**
* i40iw_ifdown_notify - process an ifdown on an interface
* @iwdev: device pointer
+ * @netdev: network interface device structure
* @ipaddr: Pointer to IPv4 or IPv6 address
* @ipv4: flag indicating IPv4 when true
* @ifup: flag indicating interface up when true
diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
index c943d491b72b..eaea5d545eb8 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
@@ -181,7 +181,7 @@ static enum i40iw_status_code i40iw_sc_parse_fpm_commit_buf(
* i40iw_sc_decode_fpm_query() - Decode a 64 bit value into max count and size
* @buf: ptr to fpm query buffer
* @buf_idx: index into buf
- * @info: ptr to i40iw_hmc_obj_info struct
+ * @obj_info: ptr to i40iw_hmc_obj_info struct
* @rsrc_idx: resource index into info
*
* Decode a 64 bit value from fpm query buffer into max count and size
@@ -205,7 +205,7 @@ static u64 i40iw_sc_decode_fpm_query(u64 *buf,
/**
* i40iw_sc_parse_fpm_query_buf() - parses fpm query buffer
* @buf: ptr to fpm query buffer
- * @info: ptr to i40iw_hmc_obj_info struct
+ * @hmc_info: ptr to i40iw_hmc_obj_info struct
* @hmc_fpm_misc: ptr to fpm data
*
* parses fpm query buffer and copy max_cnt and
@@ -775,7 +775,7 @@ static enum i40iw_status_code i40iw_sc_ccq_get_cqe_info(
* i40iw_sc_poll_for_cqp_op_done - Waits for last write to complete in CQP SQ
* @cqp: struct for cqp hw
* @op_code: cqp opcode for completion
- * @info: completion q entry to return
+ * @compl_info: completion q entry to return
*/
static enum i40iw_status_code i40iw_sc_poll_for_cqp_op_done(
struct i40iw_sc_cqp *cqp,
@@ -933,7 +933,7 @@ static enum i40iw_status_code i40iw_sc_commit_fpm_values_done(struct i40iw_sc_cq
* @cqp: struct for cqp hw
* @scratch: u64 saved to be used during cqp completion
* @hmc_fn_id: hmc function id
- * @commit_fpm_mem; Memory for fpm values
+ * @commit_fpm_mem: Memory for fpm values
* @post_sq: flag for cqp db to ring
* @wait_type: poll ccq or cqp registers for cqp completion
*/
@@ -1026,7 +1026,7 @@ i40iw_sc_query_rdma_features(struct i40iw_sc_cqp *cqp,
/**
* i40iw_get_rdma_features - get RDMA features
- * @dev - sc device struct
+ * @dev: sc device struct
*/
enum i40iw_status_code i40iw_get_rdma_features(struct i40iw_sc_dev *dev)
{
@@ -1456,7 +1456,7 @@ static enum i40iw_status_code i40iw_sc_add_local_mac_ipaddr_entry(
* @cqp: struct for cqp hw
* @scratch: u64 saved to be used during cqp completion
* @entry_idx: index of mac entry
- * @ ignore_ref_count: to force mac adde delete
+ * @ignore_ref_count: to force mac adde delete
* @post_sq: flag for cqp db to ring
*/
static enum i40iw_status_code i40iw_sc_del_local_mac_ipaddr_entry(
@@ -2304,7 +2304,7 @@ static enum i40iw_status_code i40iw_sc_cq_destroy(struct i40iw_sc_cq *cq,
* i40iw_sc_cq_modify - modify a Completion Queue
* @cq: cq struct
* @info: modification info struct
- * @scratch:
+ * @scratch: u64 saved to be used during cqp completion
* @post_sq: flag to post to sq
*/
static enum i40iw_status_code i40iw_sc_cq_modify(struct i40iw_sc_cq *cq,
@@ -3673,7 +3673,7 @@ static enum i40iw_status_code i40iw_sc_configure_iw_fpm(struct i40iw_sc_dev *dev
/**
* cqp_sds_wqe_fill - fill cqp wqe doe sd
* @cqp: struct for cqp hw
- * @info; sd info for wqe
+ * @info: sd info for wqe
* @scratch: u64 saved to be used during cqp completion
*/
static enum i40iw_status_code cqp_sds_wqe_fill(struct i40iw_sc_cqp *cqp,
@@ -4884,7 +4884,7 @@ void i40iw_hw_stats_init(struct i40iw_vsi_pestat *stats, u8 fcn_idx, bool is_pf)
/**
* i40iw_hw_stats_read_32 - Read 32-bit HW stats counters and accommodates for roll-overs.
- * @stat: pestat struct
+ * @stats: pestat struct
* @index: index in HW stats table which contains offset reg-addr
* @value: hw stats value
*/
diff --git a/drivers/infiniband/hw/i40iw/i40iw_hmc.c b/drivers/infiniband/hw/i40iw/i40iw_hmc.c
index 5484cbf55f0f..8bd72af9e099 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_hmc.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_hmc.c
@@ -46,7 +46,7 @@
* i40iw_find_sd_index_limit - finds segment descriptor index limit
* @hmc_info: pointer to the HMC configuration information structure
* @type: type of HMC resources we're searching
- * @index: starting index for the object
+ * @idx: starting index for the object
* @cnt: number of objects we're trying to create
* @sd_idx: pointer to return index of the segment descriptor in question
* @sd_limit: pointer to return the maximum number of segment descriptors
@@ -78,7 +78,7 @@ static inline void i40iw_find_sd_index_limit(struct i40iw_hmc_info *hmc_info,
* @type: HMC resource type we're examining
* @idx: starting index for the object
* @cnt: number of objects we're trying to create
- * @pd_index: pointer to return page descriptor index
+ * @pd_idx: pointer to return page descriptor index
* @pd_limit: pointer to return page descriptor index limit
*
* Calculates the page descriptor index and index limit for the resource
diff --git a/drivers/infiniband/hw/i40iw/i40iw_hw.c b/drivers/infiniband/hw/i40iw/i40iw_hw.c
index 56fdc161f6f8..d167ac10c751 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_hw.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_hw.c
@@ -165,7 +165,7 @@ static void i40iw_cqp_ce_handler(struct i40iw_device *iwdev, struct i40iw_sc_cq
/**
* i40iw_iwarp_ce_handler - handle iwarp completions
* @iwdev: iwarp device
- * @iwcp: iwarp cq receiving event
+ * @iwcq: iwarp cq receiving event
*/
static void i40iw_iwarp_ce_handler(struct i40iw_device *iwdev,
struct i40iw_sc_cq *iwcq)
@@ -519,6 +519,7 @@ enum i40iw_status_code i40iw_manage_apbvt(struct i40iw_device *iwdev,
* @iwdev: iwarp device
* @mac_addr: mac address ptr
* @ip_addr: ip addr for arp cache
+ * @ipv4: flag indicating IPv4 when true
* @action: add, delete or modify
*/
void i40iw_manage_arp_cache(struct i40iw_device *iwdev,
@@ -581,7 +582,6 @@ static void i40iw_send_syn_cqp_callback(struct i40iw_cqp_request *cqp_request, u
* @mtype: type of qhash
* @cmnode: cmnode associated with connection
* @wait: wait for completion
- * @user_pri:user pri of the connection
*/
enum i40iw_status_code i40iw_manage_qhash(struct i40iw_device *iwdev,
struct i40iw_cm_info *cminfo,
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
index 584932d3cc44..ab4cb11950dc 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
@@ -186,7 +186,7 @@ static void i40iw_enable_intr(struct i40iw_sc_dev *dev, u32 msix_id)
/**
* i40iw_dpc - tasklet for aeq and ceq 0
- * @data: iwarp device
+ * @t: Timer context to fetch pointer to iwarp device
*/
static void i40iw_dpc(struct tasklet_struct *t)
{
@@ -200,7 +200,7 @@ static void i40iw_dpc(struct tasklet_struct *t)
/**
* i40iw_ceq_dpc - dpc handler for CEQ
- * @data: data points to CEQ
+ * @t: Timer context to fetch pointer to CEQ data
*/
static void i40iw_ceq_dpc(struct tasklet_struct *t)
{
@@ -227,7 +227,7 @@ static irqreturn_t i40iw_irq_handler(int irq, void *data)
/**
* i40iw_destroy_cqp - destroy control qp
* @iwdev: iwarp device
- * @create_done: 1 if cqp create poll was success
+ * @free_hwcqp: 1 if CQP should be destroyed
*
* Issue destroy cqp request and
* free the resources associated with the cqp
@@ -253,7 +253,7 @@ static void i40iw_destroy_cqp(struct i40iw_device *iwdev, bool free_hwcqp)
/**
* i40iw_disable_irqs - disable device interrupts
* @dev: hardware control device structure
- * @msic_vec: msix vector to disable irq
+ * @msix_vec: msix vector to disable irq
* @dev_id: parameter to pass to free_irq (used during irq setup)
*
* The function is called when destroying aeq/ceq
@@ -394,8 +394,9 @@ static enum i40iw_hmc_rsrc_type iw_hmc_obj_types[] = {
/**
* i40iw_close_hmc_objects_type - delete hmc objects of a given type
- * @iwdev: iwarp device
+ * @dev: iwarp device
* @obj_type: the hmc object type to be deleted
+ * @hmc_info: pointer to the HMC configuration information
* @is_pf: true if the function is PF otherwise false
* @reset: true if called before reset
*/
@@ -437,6 +438,7 @@ static void i40iw_del_hmc_objects(struct i40iw_sc_dev *dev,
/**
* i40iw_ceq_handler - interrupt handler for ceq
+ * @irq: interrupt request number
* @data: ceq pointer
*/
static irqreturn_t i40iw_ceq_handler(int irq, void *data)
@@ -1777,6 +1779,7 @@ static void i40iw_l2param_change(struct i40e_info *ldev, struct i40e_client *cli
/**
* i40iw_close - client interface operation close for iwarp/uda device
* @ldev: lan device information
+ * @reset: true if called before reset
* @client: client to close
*
* Called by the lan driver during the processing of client unregister
diff --git a/drivers/infiniband/hw/i40iw/i40iw_pble.c b/drivers/infiniband/hw/i40iw/i40iw_pble.c
index 5f97643e22e5..53e5cd1a2bd6 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_pble.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_pble.c
@@ -54,6 +54,7 @@ static void i40iw_free_vmalloc_mem(struct i40iw_hw *hw, struct i40iw_chunk *chun
/**
* i40iw_destroy_pble_pool - destroy pool during module unload
+ * @dev: i40iw_sc_dev struct
* @pble_rsrc: pble resources
*/
void i40iw_destroy_pble_pool(struct i40iw_sc_dev *dev, struct i40iw_hmc_pble_rsrc *pble_rsrc)
@@ -112,8 +113,8 @@ enum i40iw_status_code i40iw_hmc_init_pble(struct i40iw_sc_dev *dev,
/**
* get_sd_pd_idx - Returns sd index, pd index and rel_pd_idx from fpm address
- * @ pble_rsrc: structure containing fpm address
- * @ idx: where to return indexes
+ * @pble_rsrc: structure containing fpm address
+ * @idx: where to return indexes
*/
static inline void get_sd_pd_idx(struct i40iw_hmc_pble_rsrc *pble_rsrc,
struct sd_pd_idx *idx)
diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c
index 924be4b03c9a..d1c8cc0a6236 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_puda.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_puda.c
@@ -511,7 +511,8 @@ static void i40iw_puda_qp_setctx(struct i40iw_puda_rsrc *rsrc)
/**
* i40iw_puda_qp_wqe - setup wqe for qp create
- * @rsrc: resource for qp
+ * @dev: iwarp device
+ * @qp: resource for qp
*/
static enum i40iw_status_code i40iw_puda_qp_wqe(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp)
{
@@ -623,7 +624,8 @@ static enum i40iw_status_code i40iw_puda_qp_create(struct i40iw_puda_rsrc *rsrc)
/**
* i40iw_puda_cq_wqe - setup wqe for cq create
- * @rsrc: resource for cq
+ * @dev: iwarp device
+ * @cq: cq to setup
*/
static enum i40iw_status_code i40iw_puda_cq_wqe(struct i40iw_sc_dev *dev, struct i40iw_sc_cq *cq)
{
@@ -782,7 +784,7 @@ static void i40iw_puda_free_cq(struct i40iw_puda_rsrc *rsrc)
/**
* i40iw_puda_dele_resources - delete all resources during close
- * @dev: iwarp device
+ * @vsi: pointer to vsi structure
* @type: type of resource to dele
* @reset: true if reset chip
*/
@@ -876,7 +878,7 @@ static enum i40iw_status_code i40iw_puda_allocbufs(struct i40iw_puda_rsrc *rsrc,
/**
* i40iw_puda_create_rsrc - create resouce (ilq or ieq)
- * @dev: iwarp device
+ * @vsi: pointer to vsi structure
* @info: resource information
*/
enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_vsi *vsi,
@@ -1121,6 +1123,7 @@ static void i40iw_ieq_compl_pfpdu(struct i40iw_puda_rsrc *ieq,
/**
* i40iw_ieq_create_pbufl - create buffer list for single fpdu
+ * @pfpdu: partial management per user qp
* @rxlist: resource list for receive ieq buffes
* @pbufl: temp. list for buffers for fpddu
* @buf: first receive buffer
@@ -1434,7 +1437,7 @@ static void i40iw_ieq_handle_exception(struct i40iw_puda_rsrc *ieq,
/**
* i40iw_ieq_receive - received exception buffer
- * @dev: iwarp device
+ * @vsi: pointer to vsi structure
* @buf: exception buffer received
*/
static void i40iw_ieq_receive(struct i40iw_sc_vsi *vsi,
diff --git a/drivers/infiniband/hw/i40iw/i40iw_uk.c b/drivers/infiniband/hw/i40iw/i40iw_uk.c
index c3633c9944db..f521be16bf31 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_uk.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_uk.c
@@ -119,6 +119,8 @@ void i40iw_qp_post_wr(struct i40iw_qp_uk *qp)
* @qp: hw qp ptr
* @wqe_idx: return wqe index
* @wqe_size: size of sq wqe
+ * @total_size: work request length
+ * @wr_id: work request id
*/
u64 *i40iw_qp_get_next_send_wqe(struct i40iw_qp_uk *qp,
u32 *wqe_idx,
@@ -717,7 +719,6 @@ static enum i40iw_status_code i40iw_cq_post_entries(struct i40iw_cq_uk *cq,
* i40iw_cq_poll_completion - get cq completion info
* @cq: hw cq
* @info: cq poll information returned
- * @post_cq: update cq tail
*/
static enum i40iw_status_code i40iw_cq_poll_completion(struct i40iw_cq_uk *cq,
struct i40iw_cq_poll_info *info)
@@ -1051,7 +1052,7 @@ void i40iw_device_init_uk(struct i40iw_dev_uk *dev)
/**
* i40iw_clean_cq - clean cq entries
- * @ queue completion context
+ * @queue: completion context
* @cq: cq to clean
*/
void i40iw_clean_cq(void *queue, struct i40iw_cq_uk *cq)
diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
index 644f8c641aa0..76f052b12c14 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
@@ -55,6 +55,7 @@
* i40iw_arp_table - manage arp table
* @iwdev: iwarp device
* @ip_addr: ip address for device
+ * @ipv4: flag indicating IPv4 when true
* @mac_addr: mac address ptr
* @action: modify, delete or add
*/
@@ -138,7 +139,7 @@ inline u32 i40iw_rd32(struct i40iw_hw *hw, u32 reg)
/**
* i40iw_inetaddr_event - system notifier for ipv4 addr events
- * @notfier: not used
+ * @notifier: not used
* @event: event for notifier
* @ptr: if address
*/
@@ -214,7 +215,7 @@ int i40iw_inetaddr_event(struct notifier_block *notifier,
/**
* i40iw_inet6addr_event - system notifier for ipv6 addr events
- * @notfier: not used
+ * @notifier: not used
* @event: event for notifier
* @ptr: if address
*/
@@ -265,7 +266,7 @@ int i40iw_inet6addr_event(struct notifier_block *notifier,
/**
* i40iw_net_event - system notifier for netevents
- * @notfier: not used
+ * @notifier: not used
* @event: event for notifier
* @ptr: neighbor
*/
@@ -310,7 +311,7 @@ int i40iw_net_event(struct notifier_block *notifier, unsigned long event, void *
/**
* i40iw_netdevice_event - system notifier for netdev events
- * @notfier: not used
+ * @notifier: not used
* @event: event for notifier
* @ptr: netdev
*/
@@ -652,6 +653,7 @@ struct ib_qp *i40iw_get_qp(struct ib_device *device, int qpn)
* i40iw_debug_buf - print debug msg and buffer is mask set
* @dev: hardware control device structure
* @mask: mask to compare if to print debug buffer
+ * @desc: identifying string
* @buf: points buffer addr
* @size: saize of buffer to print
*/
@@ -784,7 +786,7 @@ enum i40iw_status_code i40iw_free_virt_mem(struct i40iw_hw *hw,
/**
* i40iw_cqp_sds_cmd - create cqp command for sd
* @dev: hardware control device structure
- * @sd_info: information for sd cqp
+ * @sdinfo: information for sd cqp
*
*/
enum i40iw_status_code i40iw_cqp_sds_cmd(struct i40iw_sc_dev *dev,
@@ -889,7 +891,7 @@ void i40iw_terminate_done(struct i40iw_sc_qp *qp, int timeout_occurred)
/**
* i40iw_terminate_imeout - timeout happened
- * @context: points to iwarp qp
+ * @t: points to iwarp qp
*/
static void i40iw_terminate_timeout(struct timer_list *t)
{
@@ -943,7 +945,7 @@ static void i40iw_cqp_generic_worker(struct work_struct *work)
/**
* i40iw_cqp_spawn_worker - spawn worket thread
- * @iwdev: device struct pointer
+ * @dev: device struct pointer
* @work_info: work request info
* @iw_vf_idx: virtual function index
*/
@@ -1048,7 +1050,7 @@ enum i40iw_status_code i40iw_cqp_manage_hmc_fcn_cmd(struct i40iw_sc_dev *dev,
/**
* i40iw_cqp_query_fpm_values_cmd - send cqp command for fpm
- * @iwdev: function device struct
+ * @dev: function device struct
* @values_mem: buffer for fpm
* @hmc_fn_id: function id for fpm
*/
@@ -1114,7 +1116,7 @@ enum i40iw_status_code i40iw_cqp_commit_fpm_values_cmd(struct i40iw_sc_dev *dev,
/**
* i40iw_vf_wait_vchnl_resp - wait for channel msg
- * @iwdev: function's device struct
+ * @dev: function's device struct
*/
enum i40iw_status_code i40iw_vf_wait_vchnl_resp(struct i40iw_sc_dev *dev)
{
@@ -1461,7 +1463,7 @@ enum i40iw_status_code i40iw_puda_get_tcpip_info(struct i40iw_puda_completion_in
/**
* i40iw_hw_stats_timeout - Stats timer-handler which updates all HW stats
- * @vsi: pointer to the vsi structure
+ * @t: Timer context containing pointer to the vsi structure
*/
static void i40iw_hw_stats_timeout(struct timer_list *t)
{
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index 65aedfe57e77..f18d146a6079 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -265,9 +265,7 @@ static struct i40iw_pbl *i40iw_get_pbl(unsigned long va,
/**
* i40iw_free_qp_resources - free up memory resources for qp
- * @iwdev: iwarp device
* @iwqp: qp ptr (user or kernel)
- * @qp_num: qp number assigned
*/
void i40iw_free_qp_resources(struct i40iw_qp *iwqp)
{
@@ -302,6 +300,7 @@ static void i40iw_clean_cqes(struct i40iw_qp *iwqp, struct i40iw_cq *iwcq)
/**
* i40iw_destroy_qp - destroy qp
* @ibqp: qp's ib pointer also to get to device's qp address
+ * @udata: user data
*/
static int i40iw_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
{
@@ -338,8 +337,8 @@ static int i40iw_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
/**
* i40iw_setup_virt_qp - setup for allocation of virtual qp
- * @dev: iwarp device
- * @qp: qp ptr
+ * @iwdev: iwarp device
+ * @iwqp: qp ptr
* @init_info: initialize info to return
*/
static int i40iw_setup_virt_qp(struct i40iw_device *iwdev,
@@ -1241,7 +1240,7 @@ static void i40iw_copy_user_pgaddrs(struct i40iw_mr *iwmr,
* i40iw_check_mem_contiguous - check if pbls stored in arr are contiguous
* @arr: lvl1 pbl array
* @npages: page count
- * pg_size: page size
+ * @pg_size: page size
*
*/
static bool i40iw_check_mem_contiguous(u64 *arr, u32 npages, u32 pg_size)
@@ -1258,7 +1257,7 @@ static bool i40iw_check_mem_contiguous(u64 *arr, u32 npages, u32 pg_size)
/**
* i40iw_check_mr_contiguous - check if MR is physically contiguous
* @palloc: pbl allocation struct
- * pg_size: page size
+ * @pg_size: page size
*/
static bool i40iw_check_mr_contiguous(struct i40iw_pble_alloc *palloc, u32 pg_size)
{
@@ -1533,6 +1532,7 @@ static int i40iw_set_page(struct ib_mr *ibmr, u64 addr)
* @ibmr: ib mem to access iwarp mr pointer
* @sg: scatter gather list for fmr
* @sg_nents: number of sg pages
+ * @sg_offset: scatter gather offset
*/
static int i40iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
int sg_nents, unsigned int *sg_offset)
@@ -1881,6 +1881,7 @@ static void i40iw_del_memlist(struct i40iw_mr *iwmr,
/**
* i40iw_dereg_mr - deregister mr
* @ib_mr: mr ptr for dereg
+ * @udata: user data
*/
static int i40iw_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
{
@@ -1945,7 +1946,7 @@ static int i40iw_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
return 0;
}
-/**
+/*
* hw_rev_show
*/
static ssize_t hw_rev_show(struct device *dev,
@@ -1959,7 +1960,7 @@ static ssize_t hw_rev_show(struct device *dev,
}
static DEVICE_ATTR_RO(hw_rev);
-/**
+/*
* hca_type_show
*/
static ssize_t hca_type_show(struct device *dev,
@@ -1969,7 +1970,7 @@ static ssize_t hca_type_show(struct device *dev,
}
static DEVICE_ATTR_RO(hca_type);
-/**
+/*
* board_id_show
*/
static ssize_t board_id_show(struct device *dev,
diff --git a/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c b/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c
index 48fd327f876b..aca9061688ae 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c
@@ -119,7 +119,7 @@ static enum i40iw_status_code vchnl_vf_send_get_pe_stats_req(struct i40iw_sc_dev
return ret_code;
}
-/**
+/*
* vchnl_vf_send_add_hmc_objs_req - Add HMC objects
* @dev: IWARP device pointer
* @vchnl_req: Virtual channel message request pointer
@@ -158,9 +158,9 @@ static enum i40iw_status_code vchnl_vf_send_add_hmc_objs_req(struct i40iw_sc_dev
* vchnl_vf_send_del_hmc_objs_req - del HMC objects
* @dev: IWARP device pointer
* @vchnl_req: Virtual channel message request pointer
- * @ rsrc_type - resource type to delete
- * @ start_index - starting index for resource
- * @ rsrc_count - number of resource type to delete
+ * @rsrc_type: resource type to delete
+ * @start_index: starting index for resource
+ * @rsrc_count: number of resource type to delete
*/
static enum i40iw_status_code vchnl_vf_send_del_hmc_objs_req(struct i40iw_sc_dev *dev,
struct i40iw_virtchnl_req *vchnl_req,
@@ -222,6 +222,7 @@ static void vchnl_pf_send_get_ver_resp(struct i40iw_sc_dev *dev,
* @dev: IWARP device pointer
* @vf_id: Virtual function ID associated with the message
* @vchnl_msg: Virtual channel message buffer pointer
+ * @hmc_fcn: HMC function index pointer
*/
static void vchnl_pf_send_get_hmc_fcn_resp(struct i40iw_sc_dev *dev,
u32 vf_id,
@@ -276,6 +277,7 @@ static void vchnl_pf_send_get_pe_stats_resp(struct i40iw_sc_dev *dev,
* @dev: IWARP device pointer
* @vf_id: Virtual function ID associated with the message
* @vchnl_msg: Virtual channel message buffer pointer
+ * @op_ret_code: I40IW_ERR_* status code
*/
static void vchnl_pf_send_error_resp(struct i40iw_sc_dev *dev, u32 vf_id,
struct i40iw_virtchnl_op_buf *vchnl_msg,
@@ -297,8 +299,9 @@ static void vchnl_pf_send_error_resp(struct i40iw_sc_dev *dev, u32 vf_id,
/**
* pf_cqp_get_hmc_fcn_callback - Callback for Get HMC Fcn
- * @cqp_req_param: CQP Request param value
- * @not_used: unused CQP callback parameter
+ * @dev: IWARP device pointer
+ * @callback_param: unused CQP callback parameter
+ * @cqe_info: CQE information pointer
*/
static void pf_cqp_get_hmc_fcn_callback(struct i40iw_sc_dev *dev, void *callback_param,
struct i40iw_ccq_cqe_info *cqe_info)
@@ -331,7 +334,7 @@ static void pf_cqp_get_hmc_fcn_callback(struct i40iw_sc_dev *dev, void *callback
/**
* pf_add_hmc_obj - Callback for Add HMC Object
- * @vf_dev: pointer to the VF Device
+ * @work_vf_dev: pointer to the VF Device
*/
static void pf_add_hmc_obj_callback(void *work_vf_dev)
{
@@ -404,7 +407,7 @@ del_out:
/**
* i40iw_vf_init_pestat - Initialize stats for VF
- * @devL pointer to the VF Device
+ * @dev: pointer to the VF Device
* @stats: Statistics structure pointer
* @index: Stats index
*/
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index e3cd402c079a..f26a0d920842 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -1699,7 +1699,7 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
struct mlx4_dev *dev = (to_mdev(qp->device))->dev;
int is_bonded = mlx4_is_bonded(dev);
- if (flow_attr->port < 1 || flow_attr->port > qp->device->phys_port_cnt)
+ if (!rdma_is_port_valid(qp->device, flow_attr->port))
return ERR_PTR(-EINVAL);
if (flow_attr->flags & ~IB_FLOW_ATTR_FLAGS_DONT_TRAP)
diff --git a/drivers/infiniband/hw/mlx4/sysfs.c b/drivers/infiniband/hw/mlx4/sysfs.c
index 1b5891130aab..24ee79aa2122 100644
--- a/drivers/infiniband/hw/mlx4/sysfs.c
+++ b/drivers/infiniband/hw/mlx4/sysfs.c
@@ -798,7 +798,7 @@ static void unregister_pkey_tree(struct mlx4_ib_dev *device)
int mlx4_ib_device_register_sysfs(struct mlx4_ib_dev *dev)
{
- int i;
+ unsigned int i;
int ret = 0;
if (!mlx4_is_master(dev->dev))
@@ -817,7 +817,7 @@ int mlx4_ib_device_register_sysfs(struct mlx4_ib_dev *dev)
goto err_ports;
}
- for (i = 1; i <= dev->ib_dev.phys_port_cnt; ++i) {
+ rdma_for_each_port(&dev->ib_dev, i) {
ret = add_port_entries(dev, i);
if (ret)
goto err_add_entries;
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index 819c142857d6..de3c2fc6f361 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -94,13 +94,13 @@ struct devx_umem {
struct mlx5_core_dev *mdev;
struct ib_umem *umem;
u32 dinlen;
- u32 dinbox[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)];
+ u32 dinbox[MLX5_ST_SZ_DW(destroy_umem_in)];
};
struct devx_umem_reg_cmd {
void *in;
u32 inlen;
- u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+ u32 out[MLX5_ST_SZ_DW(create_umem_out)];
};
static struct mlx5_ib_ucontext *
@@ -111,8 +111,8 @@ devx_ufile2uctx(const struct uverbs_attr_bundle *attrs)
int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user)
{
- u32 in[MLX5_ST_SZ_DW(create_uctx_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+ u32 in[MLX5_ST_SZ_DW(create_uctx_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(create_uctx_out)] = {};
void *uctx;
int err;
u16 uid;
@@ -138,14 +138,14 @@ int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user)
if (err)
return err;
- uid = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+ uid = MLX5_GET(create_uctx_out, out, uid);
return uid;
}
void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid)
{
- u32 in[MLX5_ST_SZ_DW(destroy_uctx_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_uctx_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(destroy_uctx_out)] = {};
MLX5_SET(destroy_uctx_in, in, opcode, MLX5_CMD_OP_DESTROY_UCTX);
MLX5_SET(destroy_uctx_in, in, uid, uid);
@@ -288,6 +288,80 @@ static u64 get_enc_obj_id(u32 opcode, u32 obj_id)
return ((u64)opcode << 32) | obj_id;
}
+static u32 devx_get_created_obj_id(const void *in, const void *out, u16 opcode)
+{
+ switch (opcode) {
+ case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
+ return MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+ case MLX5_CMD_OP_CREATE_UMEM:
+ return MLX5_GET(create_umem_out, out, umem_id);
+ case MLX5_CMD_OP_CREATE_MKEY:
+ return MLX5_GET(create_mkey_out, out, mkey_index);
+ case MLX5_CMD_OP_CREATE_CQ:
+ return MLX5_GET(create_cq_out, out, cqn);
+ case MLX5_CMD_OP_ALLOC_PD:
+ return MLX5_GET(alloc_pd_out, out, pd);
+ case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
+ return MLX5_GET(alloc_transport_domain_out, out,
+ transport_domain);
+ case MLX5_CMD_OP_CREATE_RMP:
+ return MLX5_GET(create_rmp_out, out, rmpn);
+ case MLX5_CMD_OP_CREATE_SQ:
+ return MLX5_GET(create_sq_out, out, sqn);
+ case MLX5_CMD_OP_CREATE_RQ:
+ return MLX5_GET(create_rq_out, out, rqn);
+ case MLX5_CMD_OP_CREATE_RQT:
+ return MLX5_GET(create_rqt_out, out, rqtn);
+ case MLX5_CMD_OP_CREATE_TIR:
+ return MLX5_GET(create_tir_out, out, tirn);
+ case MLX5_CMD_OP_CREATE_TIS:
+ return MLX5_GET(create_tis_out, out, tisn);
+ case MLX5_CMD_OP_ALLOC_Q_COUNTER:
+ return MLX5_GET(alloc_q_counter_out, out, counter_set_id);
+ case MLX5_CMD_OP_CREATE_FLOW_TABLE:
+ return MLX5_GET(create_flow_table_out, out, table_id);
+ case MLX5_CMD_OP_CREATE_FLOW_GROUP:
+ return MLX5_GET(create_flow_group_out, out, group_id);
+ case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
+ return MLX5_GET(set_fte_in, in, flow_index);
+ case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
+ return MLX5_GET(alloc_flow_counter_out, out, flow_counter_id);
+ case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
+ return MLX5_GET(alloc_packet_reformat_context_out, out,
+ packet_reformat_id);
+ case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
+ return MLX5_GET(alloc_modify_header_context_out, out,
+ modify_header_id);
+ case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
+ return MLX5_GET(create_scheduling_element_out, out,
+ scheduling_element_id);
+ case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
+ return MLX5_GET(add_vxlan_udp_dport_in, in, vxlan_udp_port);
+ case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
+ return MLX5_GET(set_l2_table_entry_in, in, table_index);
+ case MLX5_CMD_OP_CREATE_QP:
+ return MLX5_GET(create_qp_out, out, qpn);
+ case MLX5_CMD_OP_CREATE_SRQ:
+ return MLX5_GET(create_srq_out, out, srqn);
+ case MLX5_CMD_OP_CREATE_XRC_SRQ:
+ return MLX5_GET(create_xrc_srq_out, out, xrc_srqn);
+ case MLX5_CMD_OP_CREATE_DCT:
+ return MLX5_GET(create_dct_out, out, dctn);
+ case MLX5_CMD_OP_CREATE_XRQ:
+ return MLX5_GET(create_xrq_out, out, xrqn);
+ case MLX5_CMD_OP_ATTACH_TO_MCG:
+ return MLX5_GET(attach_to_mcg_in, in, qpn);
+ case MLX5_CMD_OP_ALLOC_XRCD:
+ return MLX5_GET(alloc_xrcd_out, out, xrcd);
+ case MLX5_CMD_OP_CREATE_PSV:
+ return MLX5_GET(create_psv_out, out, psv0_index);
+ default:
+ /* The entry must match to one of the devx_is_obj_create_cmd */
+ WARN_ON(true);
+ return 0;
+ }
+}
+
static u64 devx_get_obj_id(const void *in)
{
u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
@@ -399,8 +473,8 @@ static u64 devx_get_obj_id(const void *in)
break;
case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT,
- MLX5_GET(general_obj_in_cmd_hdr, in,
- obj_id));
+ MLX5_GET(query_modify_header_context_in,
+ in, modify_header_id));
break;
case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT,
@@ -1019,63 +1093,76 @@ static void devx_obj_build_destroy_cmd(void *in, void *out, void *din,
u32 *dinlen,
u32 *obj_id)
{
- u16 obj_type = MLX5_GET(general_obj_in_cmd_hdr, in, obj_type);
+ u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
u16 uid = MLX5_GET(general_obj_in_cmd_hdr, in, uid);
- *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+ *obj_id = devx_get_created_obj_id(in, out, opcode);
*dinlen = MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr);
-
- MLX5_SET(general_obj_in_cmd_hdr, din, obj_id, *obj_id);
MLX5_SET(general_obj_in_cmd_hdr, din, uid, uid);
- switch (MLX5_GET(general_obj_in_cmd_hdr, in, opcode)) {
+ switch (opcode) {
case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
- MLX5_SET(general_obj_in_cmd_hdr, din, obj_type, obj_type);
+ MLX5_SET(general_obj_in_cmd_hdr, din, obj_id, *obj_id);
+ MLX5_SET(general_obj_in_cmd_hdr, din, obj_type,
+ MLX5_GET(general_obj_in_cmd_hdr, in, obj_type));
break;
case MLX5_CMD_OP_CREATE_UMEM:
- MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
+ MLX5_SET(destroy_umem_in, din, opcode,
MLX5_CMD_OP_DESTROY_UMEM);
+ MLX5_SET(destroy_umem_in, din, umem_id, *obj_id);
break;
case MLX5_CMD_OP_CREATE_MKEY:
- MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_MKEY);
+ MLX5_SET(destroy_mkey_in, din, opcode,
+ MLX5_CMD_OP_DESTROY_MKEY);
+ MLX5_SET(destroy_mkey_in, in, mkey_index, *obj_id);
break;
case MLX5_CMD_OP_CREATE_CQ:
- MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_CQ);
+ MLX5_SET(destroy_cq_in, din, opcode, MLX5_CMD_OP_DESTROY_CQ);
+ MLX5_SET(destroy_cq_in, din, cqn, *obj_id);
break;
case MLX5_CMD_OP_ALLOC_PD:
- MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DEALLOC_PD);
+ MLX5_SET(dealloc_pd_in, din, opcode, MLX5_CMD_OP_DEALLOC_PD);
+ MLX5_SET(dealloc_pd_in, din, pd, *obj_id);
break;
case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
- MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
+ MLX5_SET(dealloc_transport_domain_in, din, opcode,
MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
+ MLX5_SET(dealloc_transport_domain_in, din, transport_domain,
+ *obj_id);
break;
case MLX5_CMD_OP_CREATE_RMP:
- MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RMP);
+ MLX5_SET(destroy_rmp_in, din, opcode, MLX5_CMD_OP_DESTROY_RMP);
+ MLX5_SET(destroy_rmp_in, din, rmpn, *obj_id);
break;
case MLX5_CMD_OP_CREATE_SQ:
- MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_SQ);
+ MLX5_SET(destroy_sq_in, din, opcode, MLX5_CMD_OP_DESTROY_SQ);
+ MLX5_SET(destroy_sq_in, din, sqn, *obj_id);
break;
case MLX5_CMD_OP_CREATE_RQ:
- MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RQ);
+ MLX5_SET(destroy_rq_in, din, opcode, MLX5_CMD_OP_DESTROY_RQ);
+ MLX5_SET(destroy_rq_in, din, rqn, *obj_id);
break;
case MLX5_CMD_OP_CREATE_RQT:
- MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RQT);
+ MLX5_SET(destroy_rqt_in, din, opcode, MLX5_CMD_OP_DESTROY_RQT);
+ MLX5_SET(destroy_rqt_in, din, rqtn, *obj_id);
break;
case MLX5_CMD_OP_CREATE_TIR:
- MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_TIR);
+ MLX5_SET(destroy_tir_in, din, opcode, MLX5_CMD_OP_DESTROY_TIR);
+ MLX5_SET(destroy_tir_in, din, tirn, *obj_id);
break;
case MLX5_CMD_OP_CREATE_TIS:
- MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_TIS);
+ MLX5_SET(destroy_tis_in, din, opcode, MLX5_CMD_OP_DESTROY_TIS);
+ MLX5_SET(destroy_tis_in, din, tisn, *obj_id);
break;
case MLX5_CMD_OP_ALLOC_Q_COUNTER:
- MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
+ MLX5_SET(dealloc_q_counter_in, din, opcode,
MLX5_CMD_OP_DEALLOC_Q_COUNTER);
+ MLX5_SET(dealloc_q_counter_in, din, counter_set_id, *obj_id);
break;
case MLX5_CMD_OP_CREATE_FLOW_TABLE:
*dinlen = MLX5_ST_SZ_BYTES(destroy_flow_table_in);
- *obj_id = MLX5_GET(create_flow_table_out, out, table_id);
MLX5_SET(destroy_flow_table_in, din, other_vport,
MLX5_GET(create_flow_table_in, in, other_vport));
MLX5_SET(destroy_flow_table_in, din, vport_number,
@@ -1083,12 +1170,11 @@ static void devx_obj_build_destroy_cmd(void *in, void *out, void *din,
MLX5_SET(destroy_flow_table_in, din, table_type,
MLX5_GET(create_flow_table_in, in, table_type));
MLX5_SET(destroy_flow_table_in, din, table_id, *obj_id);
- MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
+ MLX5_SET(destroy_flow_table_in, din, opcode,
MLX5_CMD_OP_DESTROY_FLOW_TABLE);
break;
case MLX5_CMD_OP_CREATE_FLOW_GROUP:
*dinlen = MLX5_ST_SZ_BYTES(destroy_flow_group_in);
- *obj_id = MLX5_GET(create_flow_group_out, out, group_id);
MLX5_SET(destroy_flow_group_in, din, other_vport,
MLX5_GET(create_flow_group_in, in, other_vport));
MLX5_SET(destroy_flow_group_in, din, vport_number,
@@ -1098,12 +1184,11 @@ static void devx_obj_build_destroy_cmd(void *in, void *out, void *din,
MLX5_SET(destroy_flow_group_in, din, table_id,
MLX5_GET(create_flow_group_in, in, table_id));
MLX5_SET(destroy_flow_group_in, din, group_id, *obj_id);
- MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
+ MLX5_SET(destroy_flow_group_in, din, opcode,
MLX5_CMD_OP_DESTROY_FLOW_GROUP);
break;
case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
*dinlen = MLX5_ST_SZ_BYTES(delete_fte_in);
- *obj_id = MLX5_GET(set_fte_in, in, flow_index);
MLX5_SET(delete_fte_in, din, other_vport,
MLX5_GET(set_fte_in, in, other_vport));
MLX5_SET(delete_fte_in, din, vport_number,
@@ -1113,63 +1198,70 @@ static void devx_obj_build_destroy_cmd(void *in, void *out, void *din,
MLX5_SET(delete_fte_in, din, table_id,
MLX5_GET(set_fte_in, in, table_id));
MLX5_SET(delete_fte_in, din, flow_index, *obj_id);
- MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
+ MLX5_SET(delete_fte_in, din, opcode,
MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
break;
case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
- MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
+ MLX5_SET(dealloc_flow_counter_in, din, opcode,
MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
+ MLX5_SET(dealloc_flow_counter_in, din, flow_counter_id,
+ *obj_id);
break;
case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
- MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
+ MLX5_SET(dealloc_packet_reformat_context_in, din, opcode,
MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
+ MLX5_SET(dealloc_packet_reformat_context_in, din,
+ packet_reformat_id, *obj_id);
break;
case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
- MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
+ MLX5_SET(dealloc_modify_header_context_in, din, opcode,
MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
+ MLX5_SET(dealloc_modify_header_context_in, din,
+ modify_header_id, *obj_id);
break;
case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
*dinlen = MLX5_ST_SZ_BYTES(destroy_scheduling_element_in);
- *obj_id = MLX5_GET(create_scheduling_element_out, out,
- scheduling_element_id);
MLX5_SET(destroy_scheduling_element_in, din,
scheduling_hierarchy,
MLX5_GET(create_scheduling_element_in, in,
scheduling_hierarchy));
MLX5_SET(destroy_scheduling_element_in, din,
scheduling_element_id, *obj_id);
- MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
+ MLX5_SET(destroy_scheduling_element_in, din, opcode,
MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT);
break;
case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
*dinlen = MLX5_ST_SZ_BYTES(delete_vxlan_udp_dport_in);
- *obj_id = MLX5_GET(add_vxlan_udp_dport_in, in, vxlan_udp_port);
MLX5_SET(delete_vxlan_udp_dport_in, din, vxlan_udp_port, *obj_id);
- MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
+ MLX5_SET(delete_vxlan_udp_dport_in, din, opcode,
MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
break;
case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
*dinlen = MLX5_ST_SZ_BYTES(delete_l2_table_entry_in);
- *obj_id = MLX5_GET(set_l2_table_entry_in, in, table_index);
MLX5_SET(delete_l2_table_entry_in, din, table_index, *obj_id);
- MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
+ MLX5_SET(delete_l2_table_entry_in, din, opcode,
MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY);
break;
case MLX5_CMD_OP_CREATE_QP:
- MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_QP);
+ MLX5_SET(destroy_qp_in, din, opcode, MLX5_CMD_OP_DESTROY_QP);
+ MLX5_SET(destroy_qp_in, din, qpn, *obj_id);
break;
case MLX5_CMD_OP_CREATE_SRQ:
- MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_SRQ);
+ MLX5_SET(destroy_srq_in, din, opcode, MLX5_CMD_OP_DESTROY_SRQ);
+ MLX5_SET(destroy_srq_in, din, srqn, *obj_id);
break;
case MLX5_CMD_OP_CREATE_XRC_SRQ:
- MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
+ MLX5_SET(destroy_xrc_srq_in, din, opcode,
MLX5_CMD_OP_DESTROY_XRC_SRQ);
+ MLX5_SET(destroy_xrc_srq_in, din, xrc_srqn, *obj_id);
break;
case MLX5_CMD_OP_CREATE_DCT:
- MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_DCT);
+ MLX5_SET(destroy_dct_in, din, opcode, MLX5_CMD_OP_DESTROY_DCT);
+ MLX5_SET(destroy_dct_in, din, dctn, *obj_id);
break;
case MLX5_CMD_OP_CREATE_XRQ:
- MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_XRQ);
+ MLX5_SET(destroy_xrq_in, din, opcode, MLX5_CMD_OP_DESTROY_XRQ);
+ MLX5_SET(destroy_xrq_in, din, xrqn, *obj_id);
break;
case MLX5_CMD_OP_ATTACH_TO_MCG:
*dinlen = MLX5_ST_SZ_BYTES(detach_from_mcg_in);
@@ -1178,16 +1270,19 @@ static void devx_obj_build_destroy_cmd(void *in, void *out, void *din,
memcpy(MLX5_ADDR_OF(detach_from_mcg_in, din, multicast_gid),
MLX5_ADDR_OF(attach_to_mcg_in, in, multicast_gid),
MLX5_FLD_SZ_BYTES(attach_to_mcg_in, multicast_gid));
- MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DETACH_FROM_MCG);
+ MLX5_SET(detach_from_mcg_in, din, opcode,
+ MLX5_CMD_OP_DETACH_FROM_MCG);
+ MLX5_SET(detach_from_mcg_in, din, qpn, *obj_id);
break;
case MLX5_CMD_OP_ALLOC_XRCD:
- MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
+ MLX5_SET(dealloc_xrcd_in, din, opcode,
+ MLX5_CMD_OP_DEALLOC_XRCD);
+ MLX5_SET(dealloc_xrcd_in, din, xrcd, *obj_id);
break;
case MLX5_CMD_OP_CREATE_PSV:
- MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
+ MLX5_SET(destroy_psv_in, din, opcode,
MLX5_CMD_OP_DESTROY_PSV);
- MLX5_SET(destroy_psv_in, din, psvn,
- MLX5_GET(create_psv_out, out, psv0_index));
+ MLX5_SET(destroy_psv_in, din, psvn, *obj_id);
break;
default:
/* The entry must match to one of the devx_is_obj_create_cmd */
@@ -1215,9 +1310,9 @@ static int devx_handle_mkey_indirect(struct devx_obj *obj,
mkey->size = MLX5_GET64(mkc, mkc, len);
mkey->pd = MLX5_GET(mkc, mkc, pd);
devx_mr->ndescs = MLX5_GET(mkc, mkc, translations_octword_size);
+ init_waitqueue_head(&mkey->wait);
- return xa_err(xa_store(&dev->odp_mkeys, mlx5_base_mkey(mkey->key), mkey,
- GFP_KERNEL));
+ return mlx5r_store_odp_mkey(dev, mkey);
}
static int devx_handle_mkey_create(struct mlx5_ib_dev *dev,
@@ -1290,16 +1385,15 @@ static int devx_obj_cleanup(struct ib_uobject *uobject,
int ret;
dev = mlx5_udata_to_mdev(&attrs->driver_udata);
- if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) {
+ if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY &&
+ xa_erase(&obj->ib_dev->odp_mkeys,
+ mlx5_base_mkey(obj->devx_mr.mmkey.key)))
/*
* The pagefault_single_data_segment() does commands against
* the mmkey, we must wait for that to stop before freeing the
* mkey, as another allocation could get the same mkey #.
*/
- xa_erase(&obj->ib_dev->odp_mkeys,
- mlx5_base_mkey(obj->devx_mr.mmkey.key));
- synchronize_srcu(&dev->odp_srcu);
- }
+ mlx5r_deref_wait_odp_mkey(&obj->devx_mr.mmkey);
if (obj->flags & DEVX_OBJ_FLAGS_DCT)
ret = mlx5_core_destroy_dct(obj->ib_dev, &obj->core_dct);
@@ -1345,6 +1439,16 @@ out:
rcu_read_unlock();
}
+static bool is_apu_thread_cq(struct mlx5_ib_dev *dev, const void *in)
+{
+ if (!MLX5_CAP_GEN(dev->mdev, apu) ||
+ !MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context),
+ apu_thread_cq))
+ return false;
+
+ return true;
+}
+
static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
struct uverbs_attr_bundle *attrs)
{
@@ -1398,7 +1502,8 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
obj->flags |= DEVX_OBJ_FLAGS_DCT;
err = mlx5_core_create_dct(dev, &obj->core_dct, cmd_in,
cmd_in_len, cmd_out, cmd_out_len);
- } else if (opcode == MLX5_CMD_OP_CREATE_CQ) {
+ } else if (opcode == MLX5_CMD_OP_CREATE_CQ &&
+ !is_apu_thread_cq(dev, cmd_in)) {
obj->flags |= DEVX_OBJ_FLAGS_CQ;
obj->core_cq.comp = devx_cq_comp;
err = mlx5_core_create_cq(dev->mdev, &obj->core_cq,
@@ -1968,8 +2073,10 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT)(
num_alloc_xa_entries++;
event_sub = kzalloc(sizeof(*event_sub), GFP_KERNEL);
- if (!event_sub)
+ if (!event_sub) {
+ err = -ENOMEM;
goto err;
+ }
list_add_tail(&event_sub->event_list, &sub_list);
uverbs_uobject_get(&ev_file->uobj);
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
index 9bb9bb058932..652c6ccf1881 100644
--- a/drivers/infiniband/hw/mlx5/mad.c
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -48,7 +48,7 @@ static bool can_do_mad_ifc(struct mlx5_ib_dev *dev, u8 port_num,
if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED &&
in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
return true;
- return dev->mdev->port_caps[port_num - 1].has_smi;
+ return dev->port_caps[port_num - 1].has_smi;
}
static int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey,
@@ -279,7 +279,7 @@ int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
}
-int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port)
+int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, unsigned int port)
{
struct ib_smp *in_mad = NULL;
struct ib_smp *out_mad = NULL;
@@ -299,7 +299,7 @@ int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port)
packet_error = be16_to_cpu(out_mad->status);
- dev->mdev->port_caps[port - 1].ext_port_cap = (!err && !packet_error) ?
+ dev->port_caps[port - 1].ext_port_cap = (!err && !packet_error) ?
MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO : 0;
out:
@@ -308,8 +308,8 @@ out:
return err;
}
-int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
- struct ib_smp *out_mad)
+static int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
+ struct ib_smp *out_mad)
{
struct ib_smp *in_mad = NULL;
int err = -ENOMEM;
@@ -549,7 +549,7 @@ int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port,
props->port_cap_flags = be32_to_cpup((__be32 *)(out_mad->data + 20));
props->gid_tbl_len = out_mad->data[50];
props->max_msg_sz = 1 << MLX5_CAP_GEN(mdev, log_max_msg);
- props->pkey_tbl_len = mdev->port_caps[port - 1].pkey_table_len;
+ props->pkey_tbl_len = dev->pkey_table_len;
props->bad_pkey_cntr = be16_to_cpup((__be16 *)(out_mad->data + 46));
props->qkey_viol_cntr = be16_to_cpup((__be16 *)(out_mad->data + 48));
props->active_width = out_mad->data[31] & 0xf;
@@ -589,7 +589,7 @@ int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port,
/* If reported active speed is QDR, check if is FDR-10 */
if (props->active_speed == 4) {
- if (mdev->port_caps[port - 1].ext_port_cap &
+ if (dev->port_caps[port - 1].ext_port_cap &
MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) {
init_query_mad(in_mad);
in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index d26f3f3e0462..0d69a697d75f 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
* Copyright (c) 2013-2020, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2020, Intel Corporation. All rights reserved.
*/
#include <linux/debugfs.h>
@@ -461,7 +462,6 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
struct net_device *ndev, *upper;
enum ib_mtu ndev_ib_mtu;
bool put_mdev = true;
- u16 qkey_viol_cntr;
u32 eth_prot_oper;
u8 mdev_port_num;
bool ext;
@@ -499,20 +499,22 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
translate_eth_proto_oper(eth_prot_oper, &props->active_speed,
&props->active_width, ext);
- props->port_cap_flags |= IB_PORT_CM_SUP;
- props->ip_gids = true;
+ if (!dev->is_rep && mlx5_is_roce_enabled(mdev)) {
+ u16 qkey_viol_cntr;
- props->gid_tbl_len = MLX5_CAP_ROCE(dev->mdev,
- roce_address_table_size);
+ props->port_cap_flags |= IB_PORT_CM_SUP;
+ props->ip_gids = true;
+ props->gid_tbl_len = MLX5_CAP_ROCE(dev->mdev,
+ roce_address_table_size);
+ mlx5_query_nic_vport_qkey_viol_cntr(mdev, &qkey_viol_cntr);
+ props->qkey_viol_cntr = qkey_viol_cntr;
+ }
props->max_mtu = IB_MTU_4096;
props->max_msg_sz = 1 << MLX5_CAP_GEN(dev->mdev, log_max_msg);
props->pkey_tbl_len = 1;
props->state = IB_PORT_DOWN;
props->phys_state = IB_PORT_PHYS_STATE_DISABLED;
- mlx5_query_nic_vport_qkey_viol_cntr(mdev, &qkey_viol_cntr);
- props->qkey_viol_cntr = qkey_viol_cntr;
-
/* If this is a stub query for an unaffiliated port stop here */
if (!put_mdev)
goto out;
@@ -815,9 +817,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
if (err)
return err;
- err = mlx5_query_max_pkeys(ibdev, &props->max_pkeys);
- if (err)
- return err;
+ props->max_pkeys = dev->pkey_table_len;
err = mlx5_query_vendor_id(ibdev, &props->vendor_id);
if (err)
@@ -1384,19 +1384,17 @@ int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
static int mlx5_ib_rep_query_port(struct ib_device *ibdev, u8 port,
struct ib_port_attr *props)
{
- int ret;
+ return mlx5_query_port_roce(ibdev, port, props);
+}
- /* Only link layer == ethernet is valid for representors
- * and we always use port 1
+static int mlx5_ib_rep_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
+ u16 *pkey)
+{
+ /* Default special Pkey for representor device port as per the
+ * IB specification 1.3 section 10.9.1.2.
*/
- ret = mlx5_query_port_roce(ibdev, port, props);
- if (ret || !props)
- return ret;
-
- /* We don't support GIDS */
- props->gid_tbl_len = 0;
-
- return ret;
+ *pkey = 0xffff;
+ return 0;
}
static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
@@ -2935,8 +2933,8 @@ static int set_has_smi_cap(struct mlx5_ib_dev *dev)
int err;
int port;
- for (port = 1; port <= ARRAY_SIZE(dev->mdev->port_caps); port++) {
- dev->mdev->port_caps[port - 1].has_smi = false;
+ for (port = 1; port <= ARRAY_SIZE(dev->port_caps); port++) {
+ dev->port_caps[port - 1].has_smi = false;
if (MLX5_CAP_GEN(dev->mdev, port_type) ==
MLX5_CAP_PORT_TYPE_IB) {
if (MLX5_CAP_GEN(dev->mdev, ib_virt)) {
@@ -2948,10 +2946,10 @@ static int set_has_smi_cap(struct mlx5_ib_dev *dev)
port, err);
return err;
}
- dev->mdev->port_caps[port - 1].has_smi =
+ dev->port_caps[port - 1].has_smi =
vport_ctx.has_smi;
} else {
- dev->mdev->port_caps[port - 1].has_smi = true;
+ dev->port_caps[port - 1].has_smi = true;
}
}
}
@@ -2960,63 +2958,12 @@ static int set_has_smi_cap(struct mlx5_ib_dev *dev)
static void get_ext_port_caps(struct mlx5_ib_dev *dev)
{
- int port;
+ unsigned int port;
- for (port = 1; port <= dev->num_ports; port++)
+ rdma_for_each_port (&dev->ib_dev, port)
mlx5_query_ext_port_caps(dev, port);
}
-static int __get_port_caps(struct mlx5_ib_dev *dev, u8 port)
-{
- struct ib_device_attr *dprops = NULL;
- struct ib_port_attr *pprops = NULL;
- int err = -ENOMEM;
-
- pprops = kzalloc(sizeof(*pprops), GFP_KERNEL);
- if (!pprops)
- goto out;
-
- dprops = kmalloc(sizeof(*dprops), GFP_KERNEL);
- if (!dprops)
- goto out;
-
- err = mlx5_ib_query_device(&dev->ib_dev, dprops, NULL);
- if (err) {
- mlx5_ib_warn(dev, "query_device failed %d\n", err);
- goto out;
- }
-
- err = mlx5_ib_query_port(&dev->ib_dev, port, pprops);
- if (err) {
- mlx5_ib_warn(dev, "query_port %d failed %d\n",
- port, err);
- goto out;
- }
-
- dev->mdev->port_caps[port - 1].pkey_table_len =
- dprops->max_pkeys;
- dev->mdev->port_caps[port - 1].gid_table_len =
- pprops->gid_tbl_len;
- mlx5_ib_dbg(dev, "port %d: pkey_table_len %d, gid_table_len %d\n",
- port, dprops->max_pkeys, pprops->gid_tbl_len);
-
-out:
- kfree(pprops);
- kfree(dprops);
-
- return err;
-}
-
-static int get_port_caps(struct mlx5_ib_dev *dev, u8 port)
-{
- /* For representors use port 1, is this is the only native
- * port
- */
- if (dev->is_rep)
- return __get_port_caps(dev, 1);
- return __get_port_caps(dev, port);
-}
-
static u8 mlx5_get_umr_fence(u8 umr_fence_cap)
{
switch (umr_fence_cap) {
@@ -3311,8 +3258,7 @@ static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
int err;
dev->port[port_num].roce.nb.notifier_call = mlx5_netdev_event;
- err = register_netdevice_notifier_net(mlx5_core_net(dev->mdev),
- &dev->port[port_num].roce.nb);
+ err = register_netdevice_notifier(&dev->port[port_num].roce.nb);
if (err) {
dev->port[port_num].roce.nb.notifier_call = NULL;
return err;
@@ -3324,8 +3270,7 @@ static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
{
if (dev->port[port_num].roce.nb.notifier_call) {
- unregister_netdevice_notifier_net(mlx5_core_net(dev->mdev),
- &dev->port[port_num].roce.nb);
+ unregister_netdevice_notifier(&dev->port[port_num].roce.nb);
dev->port[port_num].roce.nb.notifier_call = NULL;
}
}
@@ -3490,10 +3435,6 @@ static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev,
if (err)
goto unbind;
- err = get_port_caps(ibdev, mlx5_core_native_port_num(mpi->mdev));
- if (err)
- goto unbind;
-
err = mlx5_add_netdev_notifier(ibdev, port_num);
if (err) {
mlx5_ib_err(ibdev, "failed adding netdev notifier for port %u\n",
@@ -3571,11 +3512,9 @@ static int mlx5_ib_init_multiport_master(struct mlx5_ib_dev *dev)
break;
}
}
- if (!bound) {
- get_port_caps(dev, i + 1);
+ if (!bound)
mlx5_ib_dbg(dev, "no free port found for port %d\n",
i + 1);
- }
}
list_add_tail(&dev->ib_dev_list, &mlx5_ib_dev_list);
@@ -3928,8 +3867,7 @@ static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
{
mlx5_ib_cleanup_multiport_master(dev);
WARN_ON(!xa_empty(&dev->odp_mkeys));
- cleanup_srcu_struct(&dev->odp_srcu);
-
+ mutex_destroy(&dev->cap_mask_mutex);
WARN_ON(!xa_empty(&dev->sig_mrs));
WARN_ON(!bitmap_empty(dev->dm.memic_alloc_pages, MLX5_MAX_MEMIC_PAGES));
}
@@ -3940,6 +3878,12 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
int err;
int i;
+ dev->ib_dev.node_type = RDMA_NODE_IB_CA;
+ dev->ib_dev.local_dma_lkey = 0 /* not supported for now */;
+ dev->ib_dev.phys_port_cnt = dev->num_ports;
+ dev->ib_dev.dev.parent = mdev->device;
+ dev->ib_dev.lag_flags = RDMA_LAG_FLAGS_HASH_ALL_SLAVES;
+
for (i = 0; i < dev->num_ports; i++) {
spin_lock_init(&dev->port[i].mp.mpi_lock);
rwlock_init(&dev->port[i].roce.netdev_lock);
@@ -3958,27 +3902,14 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
if (err)
goto err_mp;
- if (!mlx5_core_mp_enabled(mdev)) {
- for (i = 1; i <= dev->num_ports; i++) {
- err = get_port_caps(dev, i);
- if (err)
- break;
- }
- } else {
- err = get_port_caps(dev, mlx5_core_native_port_num(mdev));
- }
+ err = mlx5_query_max_pkeys(&dev->ib_dev, &dev->pkey_table_len);
if (err)
goto err_mp;
if (mlx5_use_mad_ifc(dev))
get_ext_port_caps(dev);
- dev->ib_dev.node_type = RDMA_NODE_IB_CA;
- dev->ib_dev.local_dma_lkey = 0 /* not supported for now */;
- dev->ib_dev.phys_port_cnt = dev->num_ports;
dev->ib_dev.num_comp_vectors = mlx5_comp_vectors_count(mdev);
- dev->ib_dev.dev.parent = mdev->device;
- dev->ib_dev.lag_flags = RDMA_LAG_FLAGS_HASH_ALL_SLAVES;
mutex_init(&dev->cap_mask_mutex);
INIT_LIST_HEAD(&dev->qp_list);
@@ -3989,17 +3920,11 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
spin_lock_init(&dev->dm.lock);
dev->dm.dev = mdev;
-
- err = init_srcu_struct(&dev->odp_srcu);
- if (err)
- goto err_mp;
-
return 0;
err_mp:
mlx5_ib_cleanup_multiport_master(dev);
-
- return -ENOMEM;
+ return err;
}
static int mlx5_ib_enable_driver(struct ib_device *dev)
@@ -4069,6 +3994,7 @@ static const struct ib_device_ops mlx5_ib_dev_ops = {
.query_srq = mlx5_ib_query_srq,
.query_ucontext = mlx5_ib_query_ucontext,
.reg_user_mr = mlx5_ib_reg_user_mr,
+ .reg_user_mr_dmabuf = mlx5_ib_reg_user_mr_dmabuf,
.req_notify_cq = mlx5_ib_arm_cq,
.rereg_user_mr = mlx5_ib_rereg_user_mr,
.resize_cq = mlx5_ib_resize_cq,
@@ -4209,6 +4135,7 @@ static int mlx5_ib_stage_non_default_cb(struct mlx5_ib_dev *dev)
static const struct ib_device_ops mlx5_ib_dev_port_rep_ops = {
.get_port_immutable = mlx5_port_rep_immutable,
.query_port = mlx5_ib_rep_query_port,
+ .query_pkey = mlx5_ib_rep_query_pkey,
};
static int mlx5_ib_stage_raw_eth_non_default_cb(struct mlx5_ib_dev *dev)
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index b0fdc1b08e06..88cc26e008fc 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2013-2020, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2020, Intel Corporation. All rights reserved.
*/
#ifndef MLX5_IB_H
@@ -683,11 +684,8 @@ struct mlx5_ib_mr {
u64 pi_iova;
/* For ODP and implicit */
- atomic_t num_deferred_work;
- wait_queue_head_t q_deferred_work;
struct xarray implicit_children;
union {
- struct rcu_head rcu;
struct list_head elm;
struct work_struct work;
} odp_destroy;
@@ -703,6 +701,12 @@ static inline bool is_odp_mr(struct mlx5_ib_mr *mr)
mr->umem->is_odp;
}
+static inline bool is_dmabuf_mr(struct mlx5_ib_mr *mr)
+{
+ return IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && mr->umem &&
+ mr->umem->is_dmabuf;
+}
+
struct mlx5_ib_mw {
struct ib_mw ibmw;
struct mlx5_core_mkey mmkey;
@@ -1029,6 +1033,11 @@ struct mlx5_var_table {
u64 num_var_hw_entries;
};
+struct mlx5_port_caps {
+ bool has_smi;
+ u8 ext_port_cap;
+};
+
struct mlx5_ib_dev {
struct ib_device ib_dev;
struct mlx5_core_dev *mdev;
@@ -1056,11 +1065,6 @@ struct mlx5_ib_dev {
u64 odp_max_size;
struct mlx5_ib_pf_eq odp_pf_eq;
- /*
- * Sleepable RCU that prevents destruction of MRs while they are still
- * being used by a page fault handler.
- */
- struct srcu_struct odp_srcu;
struct xarray odp_mkeys;
u32 null_mkey;
@@ -1089,6 +1093,8 @@ struct mlx5_ib_dev {
struct mlx5_var_table var_table;
struct xarray sig_mrs;
+ struct mlx5_port_caps port_caps[MLX5_MAX_PORTS];
+ u16 pkey_table_len;
};
static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
@@ -1243,6 +1249,10 @@ struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc);
struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
struct ib_udata *udata);
+struct ib_mr *mlx5_ib_reg_user_mr_dmabuf(struct ib_pd *pd, u64 start,
+ u64 length, u64 virt_addr,
+ int fd, int access_flags,
+ struct ib_udata *udata);
int mlx5_ib_advise_mr(struct ib_pd *pd,
enum ib_uverbs_advise_mr_advice advice,
u32 flags,
@@ -1253,11 +1263,13 @@ int mlx5_ib_alloc_mw(struct ib_mw *mw, struct ib_udata *udata);
int mlx5_ib_dealloc_mw(struct ib_mw *mw);
int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
int page_shift, int flags);
+int mlx5_ib_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags);
struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
struct ib_udata *udata,
int access_flags);
void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *mr);
void mlx5_ib_fence_odp_mr(struct mlx5_ib_mr *mr);
+void mlx5_ib_fence_dmabuf_mr(struct mlx5_ib_mr *mr);
struct ib_mr *mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
u64 length, u64 virt_addr, int access_flags,
struct ib_pd *pd, struct ib_udata *udata);
@@ -1279,9 +1291,7 @@ int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
size_t *out_mad_size, u16 *out_mad_pkey_index);
int mlx5_ib_alloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
-int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port);
-int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
- struct ib_smp *out_mad);
+int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, unsigned int port);
int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev,
__be64 *sys_image_guid);
int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev,
@@ -1345,6 +1355,7 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
enum ib_uverbs_advise_mr_advice advice,
u32 flags, struct ib_sge *sg_list, u32 num_sge);
int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr);
+int mlx5_ib_init_dmabuf_mr(struct mlx5_ib_mr *mr);
#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
{
@@ -1370,6 +1381,10 @@ static inline int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr)
{
return -EOPNOTSUPP;
}
+static inline int mlx5_ib_init_dmabuf_mr(struct mlx5_ib_mr *mr)
+{
+ return -EOPNOTSUPP;
+}
#endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
extern const struct mmu_interval_notifier_ops mlx5_mn_ops;
@@ -1576,6 +1591,29 @@ static inline bool mlx5_ib_can_reconfig_with_umr(struct mlx5_ib_dev *dev,
return true;
}
+static inline int mlx5r_store_odp_mkey(struct mlx5_ib_dev *dev,
+ struct mlx5_core_mkey *mmkey)
+{
+ refcount_set(&mmkey->usecount, 1);
+
+ return xa_err(xa_store(&dev->odp_mkeys, mlx5_base_mkey(mmkey->key),
+ mmkey, GFP_KERNEL));
+}
+
+/* deref an mkey that can participate in ODP flow */
+static inline void mlx5r_deref_odp_mkey(struct mlx5_core_mkey *mmkey)
+{
+ if (refcount_dec_and_test(&mmkey->usecount))
+ wake_up(&mmkey->wait);
+}
+
+/* deref an mkey that can participate in ODP flow and wait for relese */
+static inline void mlx5r_deref_wait_odp_mkey(struct mlx5_core_mkey *mmkey)
+{
+ mlx5r_deref_odp_mkey(mmkey);
+ wait_event(mmkey->wait, refcount_read(&mmkey->usecount) == 0);
+}
+
int mlx5_ib_test_wc(struct mlx5_ib_dev *dev);
static inline bool mlx5_ib_lag_should_assign_affinity(struct mlx5_ib_dev *dev)
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 24f8d59a42ea..db05b0e0a8d7 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2020, Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -36,6 +37,8 @@
#include <linux/debugfs.h>
#include <linux/export.h>
#include <linux/delay.h>
+#include <linux/dma-buf.h>
+#include <linux/dma-resv.h>
#include <rdma/ib_umem.h>
#include <rdma/ib_umem_odp.h>
#include <rdma/ib_verbs.h>
@@ -155,6 +158,7 @@ static void create_mkey_callback(int status, struct mlx5_async_work *context)
mr->mmkey.type = MLX5_MKEY_MR;
mr->mmkey.key |= mlx5_idx_to_mkey(
MLX5_GET(create_mkey_out, mr->out, mkey_index));
+ init_waitqueue_head(&mr->mmkey.wait);
WRITE_ONCE(dev->cache.last_add, jiffies);
@@ -935,6 +939,17 @@ static void set_mr_fields(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
mr->access_flags = access_flags;
}
+static unsigned int mlx5_umem_dmabuf_default_pgsz(struct ib_umem *umem,
+ u64 iova)
+{
+ /*
+ * The alignment of iova has already been checked upon entering
+ * UVERBS_METHOD_REG_DMABUF_MR
+ */
+ umem->iova = iova;
+ return PAGE_SIZE;
+}
+
static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
struct ib_umem *umem, u64 iova,
int access_flags)
@@ -944,7 +959,11 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
struct mlx5_ib_mr *mr;
unsigned int page_size;
- page_size = mlx5_umem_find_best_pgsz(umem, mkc, log_page_size, 0, iova);
+ if (umem->is_dmabuf)
+ page_size = mlx5_umem_dmabuf_default_pgsz(umem, iova);
+ else
+ page_size = mlx5_umem_find_best_pgsz(umem, mkc, log_page_size,
+ 0, iova);
if (WARN_ON(!page_size))
return ERR_PTR(-EINVAL);
ent = mr_cache_ent_from_order(
@@ -980,7 +999,6 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
mr->mmkey.size = umem->length;
mr->mmkey.pd = to_mpd(pd)->pdn;
mr->page_shift = order_base_2(page_size);
- mr->umem = umem;
set_mr_fields(dev, mr, umem->length, access_flags);
return mr;
@@ -1201,8 +1219,10 @@ int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
/*
* Send the DMA list to the HW for a normal MR using UMR.
+ * Dmabuf MR is handled in a similar way, except that the MLX5_IB_UPD_XLT_ZAP
+ * flag may be used.
*/
-static int mlx5_ib_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags)
+int mlx5_ib_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags)
{
struct mlx5_ib_dev *dev = mr_to_mdev(mr);
struct device *ddev = &dev->mdev->pdev->dev;
@@ -1244,6 +1264,10 @@ static int mlx5_ib_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags)
cur_mtt->ptag =
cpu_to_be64(rdma_block_iter_dma_address(&biter) |
MLX5_IB_MTT_PRESENT);
+
+ if (mr->umem->is_dmabuf && (flags & MLX5_IB_UPD_XLT_ZAP))
+ cur_mtt->ptag = 0;
+
cur_mtt++;
}
@@ -1528,10 +1552,7 @@ static struct ib_mr *create_user_odp_mr(struct ib_pd *pd, u64 start, u64 length,
}
odp->private = mr;
- init_waitqueue_head(&mr->q_deferred_work);
- atomic_set(&mr->num_deferred_work, 0);
- err = xa_err(xa_store(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key),
- &mr->mmkey, GFP_KERNEL));
+ err = mlx5r_store_odp_mkey(dev, &mr->mmkey);
if (err)
goto err_dereg_mr;
@@ -1567,6 +1588,81 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
return create_real_mr(pd, umem, iova, access_flags);
}
+static void mlx5_ib_dmabuf_invalidate_cb(struct dma_buf_attachment *attach)
+{
+ struct ib_umem_dmabuf *umem_dmabuf = attach->importer_priv;
+ struct mlx5_ib_mr *mr = umem_dmabuf->private;
+
+ dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv);
+
+ if (!umem_dmabuf->sgt)
+ return;
+
+ mlx5_ib_update_mr_pas(mr, MLX5_IB_UPD_XLT_ZAP);
+ ib_umem_dmabuf_unmap_pages(umem_dmabuf);
+}
+
+static struct dma_buf_attach_ops mlx5_ib_dmabuf_attach_ops = {
+ .allow_peer2peer = 1,
+ .move_notify = mlx5_ib_dmabuf_invalidate_cb,
+};
+
+struct ib_mr *mlx5_ib_reg_user_mr_dmabuf(struct ib_pd *pd, u64 offset,
+ u64 length, u64 virt_addr,
+ int fd, int access_flags,
+ struct ib_udata *udata)
+{
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ struct mlx5_ib_mr *mr = NULL;
+ struct ib_umem_dmabuf *umem_dmabuf;
+ int err;
+
+ if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM) ||
+ !IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ mlx5_ib_dbg(dev,
+ "offset 0x%llx, virt_addr 0x%llx, length 0x%llx, fd %d, access_flags 0x%x\n",
+ offset, virt_addr, length, fd, access_flags);
+
+ /* dmabuf requires xlt update via umr to work. */
+ if (!mlx5_ib_can_load_pas_with_umr(dev, length))
+ return ERR_PTR(-EINVAL);
+
+ umem_dmabuf = ib_umem_dmabuf_get(&dev->ib_dev, offset, length, fd,
+ access_flags,
+ &mlx5_ib_dmabuf_attach_ops);
+ if (IS_ERR(umem_dmabuf)) {
+ mlx5_ib_dbg(dev, "umem_dmabuf get failed (%ld)\n",
+ PTR_ERR(umem_dmabuf));
+ return ERR_CAST(umem_dmabuf);
+ }
+
+ mr = alloc_cacheable_mr(pd, &umem_dmabuf->umem, virt_addr,
+ access_flags);
+ if (IS_ERR(mr)) {
+ ib_umem_release(&umem_dmabuf->umem);
+ return ERR_CAST(mr);
+ }
+
+ mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key);
+
+ atomic_add(ib_umem_num_pages(mr->umem), &dev->mdev->priv.reg_pages);
+ umem_dmabuf->private = mr;
+ err = mlx5r_store_odp_mkey(dev, &mr->mmkey);
+ if (err)
+ goto err_dereg_mr;
+
+ err = mlx5_ib_init_dmabuf_mr(mr);
+ if (err)
+ goto err_dereg_mr;
+ return &mr->ibmr;
+
+err_dereg_mr:
+ dereg_mr(dev, mr);
+ return ERR_PTR(err);
+}
+
/**
* mlx5_mr_cache_invalidate - Fence all DMA on the MR
* @mr: The MR to fence
@@ -1740,8 +1836,8 @@ struct ib_mr *mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
return ERR_PTR(err);
return NULL;
}
- /* DM or ODP MR's don't have a umem so we can't re-use it */
- if (!mr->umem || is_odp_mr(mr))
+ /* DM or ODP MR's don't have a normal umem so we can't re-use it */
+ if (!mr->umem || is_odp_mr(mr) || is_dmabuf_mr(mr))
goto recreate;
/*
@@ -1760,10 +1856,10 @@ struct ib_mr *mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
}
/*
- * DM doesn't have a PAS list so we can't re-use it, odp does but the
- * logic around releasing the umem is different
+ * DM doesn't have a PAS list so we can't re-use it, odp/dmabuf does
+ * but the logic around releasing the umem is different
*/
- if (!mr->umem || is_odp_mr(mr))
+ if (!mr->umem || is_odp_mr(mr) || is_dmabuf_mr(mr))
goto recreate;
if (!(new_access_flags & IB_ACCESS_ON_DEMAND) &&
@@ -1876,6 +1972,8 @@ static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
/* Stop all DMA */
if (is_odp_mr(mr))
mlx5_ib_fence_odp_mr(mr);
+ else if (is_dmabuf_mr(mr))
+ mlx5_ib_fence_dmabuf_mr(mr);
else
clean_mr(dev, mr);
@@ -2227,9 +2325,7 @@ int mlx5_ib_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
}
if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
- err = xa_err(xa_store(&dev->odp_mkeys,
- mlx5_base_mkey(mw->mmkey.key), &mw->mmkey,
- GFP_KERNEL));
+ err = mlx5r_store_odp_mkey(dev, &mw->mmkey);
if (err)
goto free_mkey;
}
@@ -2249,14 +2345,13 @@ int mlx5_ib_dealloc_mw(struct ib_mw *mw)
struct mlx5_ib_dev *dev = to_mdev(mw->device);
struct mlx5_ib_mw *mmw = to_mmw(mw);
- if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
- xa_erase(&dev->odp_mkeys, mlx5_base_mkey(mmw->mmkey.key));
+ if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) &&
+ xa_erase(&dev->odp_mkeys, mlx5_base_mkey(mmw->mmkey.key)))
/*
- * pagefault_single_data_segment() may be accessing mmw under
- * SRCU if the user bound an ODP MR to this MW.
+ * pagefault_single_data_segment() may be accessing mmw
+ * if the user bound an ODP MR to this MW.
*/
- synchronize_srcu(&dev->odp_srcu);
- }
+ mlx5r_deref_wait_odp_mkey(&mmw->mmkey);
return mlx5_core_destroy_mkey(dev->mdev, &mmw->mmkey);
}
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index aa2413b50adc..b103555b1f5d 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -33,6 +33,8 @@
#include <rdma/ib_umem.h>
#include <rdma/ib_umem_odp.h>
#include <linux/kernel.h>
+#include <linux/dma-buf.h>
+#include <linux/dma-resv.h>
#include "mlx5_ib.h"
#include "cmd.h"
@@ -113,7 +115,6 @@ static void populate_klm(struct mlx5_klm *pklm, size_t idx, size_t nentries,
* xarray would be protected by the umem_mutex, however that is not
* possible. Instead this uses a weaker update-then-lock pattern:
*
- * srcu_read_lock()
* xa_store()
* mutex_lock(umem_mutex)
* mlx5_ib_update_xlt()
@@ -124,12 +125,9 @@ static void populate_klm(struct mlx5_klm *pklm, size_t idx, size_t nentries,
* before destroying.
*
* The umem_mutex provides the acquire/release semantic needed to make
- * the xa_store() visible to a racing thread. While SRCU is not
- * technically required, using it gives consistent use of the SRCU
- * locking around the xarray.
+ * the xa_store() visible to a racing thread.
*/
lockdep_assert_held(&to_ib_umem_odp(imr->umem)->umem_mutex);
- lockdep_assert_held(&mr_to_mdev(imr)->odp_srcu);
for (; pklm != end; pklm++, idx++) {
struct mlx5_ib_mr *mtt = xa_load(&imr->implicit_children, idx);
@@ -205,8 +203,8 @@ static void dma_fence_odp_mr(struct mlx5_ib_mr *mr)
}
/*
- * This must be called after the mr has been removed from implicit_children
- * and the SRCU synchronized. NOTE: The MR does not necessarily have to be
+ * This must be called after the mr has been removed from implicit_children.
+ * NOTE: The MR does not necessarily have to be
* empty here, parallel page faults could have raced with the free process and
* added pages to it.
*/
@@ -216,19 +214,15 @@ static void free_implicit_child_mr(struct mlx5_ib_mr *mr, bool need_imr_xlt)
struct ib_umem_odp *odp_imr = to_ib_umem_odp(imr->umem);
struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
unsigned long idx = ib_umem_start(odp) >> MLX5_IMR_MTT_SHIFT;
- int srcu_key;
- /* implicit_child_mr's are not allowed to have deferred work */
- WARN_ON(atomic_read(&mr->num_deferred_work));
+ mlx5r_deref_wait_odp_mkey(&mr->mmkey);
if (need_imr_xlt) {
- srcu_key = srcu_read_lock(&mr_to_mdev(mr)->odp_srcu);
mutex_lock(&odp_imr->umem_mutex);
mlx5_ib_update_xlt(mr->parent, idx, 1, 0,
MLX5_IB_UPD_XLT_INDIRECT |
MLX5_IB_UPD_XLT_ATOMIC);
mutex_unlock(&odp_imr->umem_mutex);
- srcu_read_unlock(&mr_to_mdev(mr)->odp_srcu, srcu_key);
}
dma_fence_odp_mr(mr);
@@ -236,26 +230,16 @@ static void free_implicit_child_mr(struct mlx5_ib_mr *mr, bool need_imr_xlt)
mr->parent = NULL;
mlx5_mr_cache_free(mr_to_mdev(mr), mr);
ib_umem_odp_release(odp);
- if (atomic_dec_and_test(&imr->num_deferred_work))
- wake_up(&imr->q_deferred_work);
}
static void free_implicit_child_mr_work(struct work_struct *work)
{
struct mlx5_ib_mr *mr =
container_of(work, struct mlx5_ib_mr, odp_destroy.work);
+ struct mlx5_ib_mr *imr = mr->parent;
free_implicit_child_mr(mr, true);
-}
-
-static void free_implicit_child_mr_rcu(struct rcu_head *head)
-{
- struct mlx5_ib_mr *mr =
- container_of(head, struct mlx5_ib_mr, odp_destroy.rcu);
-
- /* Freeing a MR is a sleeping operation, so bounce to a work queue */
- INIT_WORK(&mr->odp_destroy.work, free_implicit_child_mr_work);
- queue_work(system_unbound_wq, &mr->odp_destroy.work);
+ mlx5r_deref_odp_mkey(&imr->mmkey);
}
static void destroy_unused_implicit_child_mr(struct mlx5_ib_mr *mr)
@@ -264,21 +248,14 @@ static void destroy_unused_implicit_child_mr(struct mlx5_ib_mr *mr)
unsigned long idx = ib_umem_start(odp) >> MLX5_IMR_MTT_SHIFT;
struct mlx5_ib_mr *imr = mr->parent;
- xa_lock(&imr->implicit_children);
- /*
- * This can race with mlx5_ib_free_implicit_mr(), the first one to
- * reach the xa lock wins the race and destroys the MR.
- */
- if (__xa_cmpxchg(&imr->implicit_children, idx, mr, NULL, GFP_ATOMIC) !=
- mr)
- goto out_unlock;
+ if (!refcount_inc_not_zero(&imr->mmkey.usecount))
+ return;
- atomic_inc(&imr->num_deferred_work);
- call_srcu(&mr_to_mdev(mr)->odp_srcu, &mr->odp_destroy.rcu,
- free_implicit_child_mr_rcu);
+ xa_erase(&imr->implicit_children, idx);
-out_unlock:
- xa_unlock(&imr->implicit_children);
+ /* Freeing a MR is a sleeping operation, so bounce to a work queue */
+ INIT_WORK(&mr->odp_destroy.work, free_implicit_child_mr_work);
+ queue_work(system_unbound_wq, &mr->odp_destroy.work);
}
static bool mlx5_ib_invalidate_range(struct mmu_interval_notifier *mni,
@@ -490,6 +467,12 @@ static struct mlx5_ib_mr *implicit_get_child_mr(struct mlx5_ib_mr *imr,
mr->parent = imr;
odp->private = mr;
+ /*
+ * First refcount is owned by the xarray and second refconut
+ * is returned to the caller.
+ */
+ refcount_set(&mr->mmkey.usecount, 2);
+
err = mlx5_ib_update_xlt(mr, 0,
MLX5_IMR_MTT_ENTRIES,
PAGE_SHIFT,
@@ -500,27 +483,28 @@ static struct mlx5_ib_mr *implicit_get_child_mr(struct mlx5_ib_mr *imr,
goto out_mr;
}
- /*
- * Once the store to either xarray completes any error unwind has to
- * use synchronize_srcu(). Avoid this with xa_reserve()
- */
- ret = xa_cmpxchg(&imr->implicit_children, idx, NULL, mr,
- GFP_KERNEL);
+ xa_lock(&imr->implicit_children);
+ ret = __xa_cmpxchg(&imr->implicit_children, idx, NULL, mr,
+ GFP_KERNEL);
if (unlikely(ret)) {
if (xa_is_err(ret)) {
ret = ERR_PTR(xa_err(ret));
- goto out_mr;
+ goto out_lock;
}
/*
* Another thread beat us to creating the child mr, use
* theirs.
*/
- goto out_mr;
+ refcount_inc(&ret->mmkey.usecount);
+ goto out_lock;
}
+ xa_unlock(&imr->implicit_children);
mlx5_ib_dbg(mr_to_mdev(imr), "key %x mr %p\n", mr->mmkey.key, mr);
return mr;
+out_lock:
+ xa_unlock(&imr->implicit_children);
out_mr:
mlx5_mr_cache_free(mr_to_mdev(imr), mr);
out_umem:
@@ -559,8 +543,6 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
imr->ibmr.device = &dev->ib_dev;
imr->umem = &umem_odp->umem;
imr->is_odp_implicit = true;
- atomic_set(&imr->num_deferred_work, 0);
- init_waitqueue_head(&imr->q_deferred_work);
xa_init(&imr->implicit_children);
err = mlx5_ib_update_xlt(imr, 0,
@@ -572,8 +554,7 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
if (err)
goto out_mr;
- err = xa_err(xa_store(&dev->odp_mkeys, mlx5_base_mkey(imr->mmkey.key),
- &imr->mmkey, GFP_KERNEL));
+ err = mlx5r_store_odp_mkey(dev, &imr->mmkey);
if (err)
goto out_mr;
@@ -591,60 +572,35 @@ void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
{
struct ib_umem_odp *odp_imr = to_ib_umem_odp(imr->umem);
struct mlx5_ib_dev *dev = mr_to_mdev(imr);
- struct list_head destroy_list;
struct mlx5_ib_mr *mtt;
- struct mlx5_ib_mr *tmp;
unsigned long idx;
- INIT_LIST_HEAD(&destroy_list);
-
xa_erase(&dev->odp_mkeys, mlx5_base_mkey(imr->mmkey.key));
/*
- * This stops the SRCU protected page fault path from touching either
- * the imr or any children. The page fault path can only reach the
- * children xarray via the imr.
- */
- synchronize_srcu(&dev->odp_srcu);
-
- /*
* All work on the prefetch list must be completed, xa_erase() prevented
* new work from being created.
*/
- wait_event(imr->q_deferred_work, !atomic_read(&imr->num_deferred_work));
-
+ mlx5r_deref_wait_odp_mkey(&imr->mmkey);
/*
* At this point it is forbidden for any other thread to enter
* pagefault_mr() on this imr. It is already forbidden to call
* pagefault_mr() on an implicit child. Due to this additions to
* implicit_children are prevented.
+ * In addition, any new call to destroy_unused_implicit_child_mr()
+ * may return immediately.
*/
/*
- * Block destroy_unused_implicit_child_mr() from incrementing
- * num_deferred_work.
- */
- xa_lock(&imr->implicit_children);
- xa_for_each (&imr->implicit_children, idx, mtt) {
- __xa_erase(&imr->implicit_children, idx);
- list_add(&mtt->odp_destroy.elm, &destroy_list);
- }
- xa_unlock(&imr->implicit_children);
-
- /*
- * Wait for any concurrent destroy_unused_implicit_child_mr() to
- * complete.
- */
- wait_event(imr->q_deferred_work, !atomic_read(&imr->num_deferred_work));
-
- /*
* Fence the imr before we destroy the children. This allows us to
* skip updating the XLT of the imr during destroy of the child mkey
* the imr points to.
*/
mlx5_mr_cache_invalidate(imr);
- list_for_each_entry_safe (mtt, tmp, &destroy_list, odp_destroy.elm)
+ xa_for_each(&imr->implicit_children, idx, mtt) {
+ xa_erase(&imr->implicit_children, idx);
free_implicit_child_mr(mtt, false);
+ }
mlx5_mr_cache_free(dev, imr);
ib_umem_odp_release(odp_imr);
@@ -663,13 +619,39 @@ void mlx5_ib_fence_odp_mr(struct mlx5_ib_mr *mr)
xa_erase(&mr_to_mdev(mr)->odp_mkeys, mlx5_base_mkey(mr->mmkey.key));
/* Wait for all running page-fault handlers to finish. */
- synchronize_srcu(&mr_to_mdev(mr)->odp_srcu);
-
- wait_event(mr->q_deferred_work, !atomic_read(&mr->num_deferred_work));
+ mlx5r_deref_wait_odp_mkey(&mr->mmkey);
dma_fence_odp_mr(mr);
}
+/**
+ * mlx5_ib_fence_dmabuf_mr - Stop all access to the dmabuf MR
+ * @mr: to fence
+ *
+ * On return no parallel threads will be touching this MR and no DMA will be
+ * active.
+ */
+void mlx5_ib_fence_dmabuf_mr(struct mlx5_ib_mr *mr)
+{
+ struct ib_umem_dmabuf *umem_dmabuf = to_ib_umem_dmabuf(mr->umem);
+
+ /* Prevent new page faults and prefetch requests from succeeding */
+ xa_erase(&mr_to_mdev(mr)->odp_mkeys, mlx5_base_mkey(mr->mmkey.key));
+
+ mlx5r_deref_wait_odp_mkey(&mr->mmkey);
+
+ dma_resv_lock(umem_dmabuf->attach->dmabuf->resv, NULL);
+ mlx5_mr_cache_invalidate(mr);
+ umem_dmabuf->private = NULL;
+ ib_umem_dmabuf_unmap_pages(umem_dmabuf);
+ dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv);
+
+ if (!mr->cache_ent) {
+ mlx5_core_destroy_mkey(mr_to_mdev(mr)->mdev, &mr->mmkey);
+ WARN_ON(mr->descs);
+ }
+}
+
#define MLX5_PF_FLAGS_DOWNGRADE BIT(1)
#define MLX5_PF_FLAGS_SNAPSHOT BIT(2)
#define MLX5_PF_FLAGS_ENABLE BIT(3)
@@ -747,8 +729,10 @@ static int pagefault_implicit_mr(struct mlx5_ib_mr *imr,
struct mlx5_ib_mr *mtt;
u64 len;
+ xa_lock(&imr->implicit_children);
mtt = xa_load(&imr->implicit_children, idx);
if (unlikely(!mtt)) {
+ xa_unlock(&imr->implicit_children);
mtt = implicit_get_child_mr(imr, idx);
if (IS_ERR(mtt)) {
ret = PTR_ERR(mtt);
@@ -756,6 +740,9 @@ static int pagefault_implicit_mr(struct mlx5_ib_mr *imr,
}
upd_start_idx = min(upd_start_idx, idx);
upd_len = idx - upd_start_idx + 1;
+ } else {
+ refcount_inc(&mtt->mmkey.usecount);
+ xa_unlock(&imr->implicit_children);
}
umem_odp = to_ib_umem_odp(mtt->umem);
@@ -764,6 +751,9 @@ static int pagefault_implicit_mr(struct mlx5_ib_mr *imr,
ret = pagefault_real_mr(mtt, umem_odp, user_va, len,
bytes_mapped, flags);
+
+ mlx5r_deref_odp_mkey(&mtt->mmkey);
+
if (ret < 0)
goto out;
user_va += len;
@@ -803,6 +793,44 @@ out:
return ret;
}
+static int pagefault_dmabuf_mr(struct mlx5_ib_mr *mr, size_t bcnt,
+ u32 *bytes_mapped, u32 flags)
+{
+ struct ib_umem_dmabuf *umem_dmabuf = to_ib_umem_dmabuf(mr->umem);
+ u32 xlt_flags = 0;
+ int err;
+ unsigned int page_size;
+
+ if (flags & MLX5_PF_FLAGS_ENABLE)
+ xlt_flags |= MLX5_IB_UPD_XLT_ENABLE;
+
+ dma_resv_lock(umem_dmabuf->attach->dmabuf->resv, NULL);
+ err = ib_umem_dmabuf_map_pages(umem_dmabuf);
+ if (err) {
+ dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv);
+ return err;
+ }
+
+ page_size = mlx5_umem_find_best_pgsz(&umem_dmabuf->umem, mkc,
+ log_page_size, 0,
+ umem_dmabuf->umem.iova);
+ if (unlikely(page_size < PAGE_SIZE)) {
+ ib_umem_dmabuf_unmap_pages(umem_dmabuf);
+ err = -EINVAL;
+ } else {
+ err = mlx5_ib_update_mr_pas(mr, xlt_flags);
+ }
+ dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv);
+
+ if (err)
+ return err;
+
+ if (bytes_mapped)
+ *bytes_mapped += bcnt;
+
+ return ib_umem_num_pages(mr->umem);
+}
+
/*
* Returns:
* -EFAULT: The io_virt->bcnt is not within the MR, it covers pages that are
@@ -817,10 +845,12 @@ static int pagefault_mr(struct mlx5_ib_mr *mr, u64 io_virt, size_t bcnt,
{
struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
- lockdep_assert_held(&mr_to_mdev(mr)->odp_srcu);
if (unlikely(io_virt < mr->mmkey.iova))
return -EFAULT;
+ if (mr->umem->is_dmabuf)
+ return pagefault_dmabuf_mr(mr, bcnt, bytes_mapped, flags);
+
if (!odp->is_implicit_odp) {
u64 user_va;
@@ -847,6 +877,16 @@ int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr)
return ret >= 0 ? 0 : ret;
}
+int mlx5_ib_init_dmabuf_mr(struct mlx5_ib_mr *mr)
+{
+ int ret;
+
+ ret = pagefault_dmabuf_mr(mr, mr->umem->length, NULL,
+ MLX5_PF_FLAGS_ENABLE);
+
+ return ret >= 0 ? 0 : ret;
+}
+
struct pf_frame {
struct pf_frame *next;
u32 key;
@@ -896,7 +936,7 @@ static int pagefault_single_data_segment(struct mlx5_ib_dev *dev,
u32 *bytes_committed,
u32 *bytes_mapped)
{
- int npages = 0, srcu_key, ret, i, outlen, cur_outlen = 0, depth = 0;
+ int npages = 0, ret, i, outlen, cur_outlen = 0, depth = 0;
struct pf_frame *head = NULL, *frame;
struct mlx5_core_mkey *mmkey;
struct mlx5_ib_mr *mr;
@@ -905,14 +945,14 @@ static int pagefault_single_data_segment(struct mlx5_ib_dev *dev,
size_t offset;
int ndescs;
- srcu_key = srcu_read_lock(&dev->odp_srcu);
-
io_virt += *bytes_committed;
bcnt -= *bytes_committed;
next_mr:
+ xa_lock(&dev->odp_mkeys);
mmkey = xa_load(&dev->odp_mkeys, mlx5_base_mkey(key));
if (!mmkey) {
+ xa_unlock(&dev->odp_mkeys);
mlx5_ib_dbg(
dev,
"skipping non ODP MR (lkey=0x%06x) in page fault handler.\n",
@@ -925,12 +965,15 @@ next_mr:
* faulted.
*/
ret = 0;
- goto srcu_unlock;
+ goto end;
}
+ refcount_inc(&mmkey->usecount);
+ xa_unlock(&dev->odp_mkeys);
+
if (!mkey_is_eq(mmkey, key)) {
mlx5_ib_dbg(dev, "failed to find mkey %x\n", key);
ret = -EFAULT;
- goto srcu_unlock;
+ goto end;
}
switch (mmkey->type) {
@@ -939,7 +982,7 @@ next_mr:
ret = pagefault_mr(mr, io_virt, bcnt, bytes_mapped, 0);
if (ret < 0)
- goto srcu_unlock;
+ goto end;
mlx5_update_odp_stats(mr, faults, ret);
@@ -954,7 +997,7 @@ next_mr:
if (depth >= MLX5_CAP_GEN(dev->mdev, max_indirection)) {
mlx5_ib_dbg(dev, "indirection level exceeded\n");
ret = -EFAULT;
- goto srcu_unlock;
+ goto end;
}
outlen = MLX5_ST_SZ_BYTES(query_mkey_out) +
@@ -965,7 +1008,7 @@ next_mr:
out = kzalloc(outlen, GFP_KERNEL);
if (!out) {
ret = -ENOMEM;
- goto srcu_unlock;
+ goto end;
}
cur_outlen = outlen;
}
@@ -975,7 +1018,7 @@ next_mr:
ret = mlx5_core_query_mkey(dev->mdev, mmkey, out, outlen);
if (ret)
- goto srcu_unlock;
+ goto end;
offset = io_virt - MLX5_GET64(query_mkey_out, out,
memory_key_mkey_entry.start_addr);
@@ -989,7 +1032,7 @@ next_mr:
frame = kzalloc(sizeof(*frame), GFP_KERNEL);
if (!frame) {
ret = -ENOMEM;
- goto srcu_unlock;
+ goto end;
}
frame->key = be32_to_cpu(pklm->key);
@@ -1008,7 +1051,7 @@ next_mr:
default:
mlx5_ib_dbg(dev, "wrong mkey type %d\n", mmkey->type);
ret = -EFAULT;
- goto srcu_unlock;
+ goto end;
}
if (head) {
@@ -1021,10 +1064,13 @@ next_mr:
depth = frame->depth;
kfree(frame);
+ mlx5r_deref_odp_mkey(mmkey);
goto next_mr;
}
-srcu_unlock:
+end:
+ if (mmkey)
+ mlx5r_deref_odp_mkey(mmkey);
while (head) {
frame = head;
head = frame->next;
@@ -1032,24 +1078,25 @@ srcu_unlock:
}
kfree(out);
- srcu_read_unlock(&dev->odp_srcu, srcu_key);
*bytes_committed = 0;
return ret ? ret : npages;
}
-/**
+/*
* Parse a series of data segments for page fault handling.
*
- * @pfault contains page fault information.
- * @wqe points at the first data segment in the WQE.
- * @wqe_end points after the end of the WQE.
- * @bytes_mapped receives the number of bytes that the function was able to
- * map. This allows the caller to decide intelligently whether
- * enough memory was mapped to resolve the page fault
- * successfully (e.g. enough for the next MTU, or the entire
- * WQE).
- * @total_wqe_bytes receives the total data size of this WQE in bytes (minus
- * the committed bytes).
+ * @dev: Pointer to mlx5 IB device
+ * @pfault: contains page fault information.
+ * @wqe: points at the first data segment in the WQE.
+ * @wqe_end: points after the end of the WQE.
+ * @bytes_mapped: receives the number of bytes that the function was able to
+ * map. This allows the caller to decide intelligently whether
+ * enough memory was mapped to resolve the page fault
+ * successfully (e.g. enough for the next MTU, or the entire
+ * WQE).
+ * @total_wqe_bytes: receives the total data size of this WQE in bytes (minus
+ * the committed bytes).
+ * @receive_queue: receive WQE end of sg list
*
* Returns the number of pages loaded if positive, zero for an empty WQE, or a
* negative error code.
@@ -1738,8 +1785,8 @@ static void destroy_prefetch_work(struct prefetch_mr_work *work)
u32 i;
for (i = 0; i < work->num_sge; ++i)
- if (atomic_dec_and_test(&work->frags[i].mr->num_deferred_work))
- wake_up(&work->frags[i].mr->q_deferred_work);
+ mlx5r_deref_odp_mkey(&work->frags[i].mr->mmkey);
+
kvfree(work);
}
@@ -1749,27 +1796,30 @@ get_prefetchable_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice,
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct mlx5_core_mkey *mmkey;
- struct ib_umem_odp *odp;
- struct mlx5_ib_mr *mr;
-
- lockdep_assert_held(&dev->odp_srcu);
+ struct mlx5_ib_mr *mr = NULL;
+ xa_lock(&dev->odp_mkeys);
mmkey = xa_load(&dev->odp_mkeys, mlx5_base_mkey(lkey));
if (!mmkey || mmkey->key != lkey || mmkey->type != MLX5_MKEY_MR)
- return NULL;
+ goto end;
mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
- if (mr->ibmr.pd != pd)
- return NULL;
-
- odp = to_ib_umem_odp(mr->umem);
+ if (mr->ibmr.pd != pd) {
+ mr = NULL;
+ goto end;
+ }
/* prefetch with write-access must be supported by the MR */
if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE &&
- !odp->umem.writable)
- return NULL;
+ !mr->umem->writable) {
+ mr = NULL;
+ goto end;
+ }
+ refcount_inc(&mmkey->usecount);
+end:
+ xa_unlock(&dev->odp_mkeys);
return mr;
}
@@ -1777,17 +1827,12 @@ static void mlx5_ib_prefetch_mr_work(struct work_struct *w)
{
struct prefetch_mr_work *work =
container_of(w, struct prefetch_mr_work, work);
- struct mlx5_ib_dev *dev;
u32 bytes_mapped = 0;
- int srcu_key;
int ret;
u32 i;
/* We rely on IB/core that work is executed if we have num_sge != 0 only. */
WARN_ON(!work->num_sge);
- dev = mr_to_mdev(work->frags[0].mr);
- /* SRCU should be held when calling to mlx5_odp_populate_xlt() */
- srcu_key = srcu_read_lock(&dev->odp_srcu);
for (i = 0; i < work->num_sge; ++i) {
ret = pagefault_mr(work->frags[i].mr, work->frags[i].io_virt,
work->frags[i].length, &bytes_mapped,
@@ -1796,7 +1841,6 @@ static void mlx5_ib_prefetch_mr_work(struct work_struct *w)
continue;
mlx5_update_odp_stats(work->frags[i].mr, prefetch, ret);
}
- srcu_read_unlock(&dev->odp_srcu, srcu_key);
destroy_prefetch_work(work);
}
@@ -1820,9 +1864,6 @@ static bool init_prefetch_work(struct ib_pd *pd,
work->num_sge = i;
return false;
}
-
- /* Keep the MR pointer will valid outside the SRCU */
- atomic_inc(&work->frags[i].mr->num_deferred_work);
}
work->num_sge = num_sge;
return true;
@@ -1833,42 +1874,35 @@ static int mlx5_ib_prefetch_sg_list(struct ib_pd *pd,
u32 pf_flags, struct ib_sge *sg_list,
u32 num_sge)
{
- struct mlx5_ib_dev *dev = to_mdev(pd->device);
u32 bytes_mapped = 0;
- int srcu_key;
int ret = 0;
u32 i;
- srcu_key = srcu_read_lock(&dev->odp_srcu);
for (i = 0; i < num_sge; ++i) {
struct mlx5_ib_mr *mr;
mr = get_prefetchable_mr(pd, advice, sg_list[i].lkey);
- if (!mr) {
- ret = -ENOENT;
- goto out;
- }
+ if (!mr)
+ return -ENOENT;
ret = pagefault_mr(mr, sg_list[i].addr, sg_list[i].length,
&bytes_mapped, pf_flags);
- if (ret < 0)
- goto out;
+ if (ret < 0) {
+ mlx5r_deref_odp_mkey(&mr->mmkey);
+ return ret;
+ }
mlx5_update_odp_stats(mr, prefetch, ret);
+ mlx5r_deref_odp_mkey(&mr->mmkey);
}
- ret = 0;
-out:
- srcu_read_unlock(&dev->odp_srcu, srcu_key);
- return ret;
+ return 0;
}
int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
enum ib_uverbs_advise_mr_advice advice,
u32 flags, struct ib_sge *sg_list, u32 num_sge)
{
- struct mlx5_ib_dev *dev = to_mdev(pd->device);
u32 pf_flags = 0;
struct prefetch_mr_work *work;
- int srcu_key;
if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH)
pf_flags |= MLX5_PF_FLAGS_DOWNGRADE;
@@ -1884,13 +1918,10 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
if (!work)
return -ENOMEM;
- srcu_key = srcu_read_lock(&dev->odp_srcu);
if (!init_prefetch_work(pd, advice, pf_flags, work, sg_list, num_sge)) {
- srcu_read_unlock(&dev->odp_srcu, srcu_key);
destroy_prefetch_work(work);
return -EINVAL;
}
queue_work(system_unbound_wq, &work->work);
- srcu_read_unlock(&dev->odp_srcu, srcu_key);
return 0;
}
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 0cb7cc642d87..ec4b3f6a8222 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -1078,6 +1078,7 @@ static int _create_kernel_qp(struct mlx5_ib_dev *dev,
qpc = MLX5_ADDR_OF(create_qp_in, *in, qpc);
MLX5_SET(qpc, qpc, uar_page, uar_index);
+ MLX5_SET(qpc, qpc, ts_format, MLX5_QPC_TIMESTAMP_FORMAT_DEFAULT);
MLX5_SET(qpc, qpc, log_page_size, qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
/* Set "fast registration enabled" for all kernel QPs */
@@ -1172,10 +1173,72 @@ static void destroy_flow_rule_vport_sq(struct mlx5_ib_sq *sq)
sq->flow_rule = NULL;
}
+static int get_rq_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq)
+{
+ bool fr_supported =
+ MLX5_CAP_GEN(dev->mdev, rq_ts_format) ==
+ MLX5_RQ_TIMESTAMP_FORMAT_CAP_FREE_RUNNING ||
+ MLX5_CAP_GEN(dev->mdev, rq_ts_format) ==
+ MLX5_RQ_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME;
+
+ if (send_cq->create_flags & IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION) {
+ if (!fr_supported) {
+ mlx5_ib_dbg(dev, "Free running TS format is not supported\n");
+ return -EOPNOTSUPP;
+ }
+ return MLX5_RQC_TIMESTAMP_FORMAT_FREE_RUNNING;
+ }
+ return MLX5_RQC_TIMESTAMP_FORMAT_DEFAULT;
+}
+
+static int get_sq_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq)
+{
+ bool fr_supported =
+ MLX5_CAP_GEN(dev->mdev, sq_ts_format) ==
+ MLX5_SQ_TIMESTAMP_FORMAT_CAP_FREE_RUNNING ||
+ MLX5_CAP_GEN(dev->mdev, sq_ts_format) ==
+ MLX5_SQ_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME;
+
+ if (send_cq->create_flags & IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION) {
+ if (!fr_supported) {
+ mlx5_ib_dbg(dev, "Free running TS format is not supported\n");
+ return -EOPNOTSUPP;
+ }
+ return MLX5_SQC_TIMESTAMP_FORMAT_FREE_RUNNING;
+ }
+ return MLX5_SQC_TIMESTAMP_FORMAT_DEFAULT;
+}
+
+static int get_qp_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq,
+ struct mlx5_ib_cq *recv_cq)
+{
+ bool fr_supported =
+ MLX5_CAP_ROCE(dev->mdev, qp_ts_format) ==
+ MLX5_QP_TIMESTAMP_FORMAT_CAP_FREE_RUNNING ||
+ MLX5_CAP_ROCE(dev->mdev, qp_ts_format) ==
+ MLX5_QP_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME;
+ int ts_format = MLX5_QPC_TIMESTAMP_FORMAT_DEFAULT;
+
+ if (recv_cq &&
+ recv_cq->create_flags & IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION)
+ ts_format = MLX5_QPC_TIMESTAMP_FORMAT_FREE_RUNNING;
+
+ if (send_cq &&
+ send_cq->create_flags & IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION)
+ ts_format = MLX5_QPC_TIMESTAMP_FORMAT_FREE_RUNNING;
+
+ if (ts_format == MLX5_QPC_TIMESTAMP_FORMAT_FREE_RUNNING &&
+ !fr_supported) {
+ mlx5_ib_dbg(dev, "Free running TS format is not supported\n");
+ return -EOPNOTSUPP;
+ }
+ return ts_format;
+}
+
static int create_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
struct ib_udata *udata,
struct mlx5_ib_sq *sq, void *qpin,
- struct ib_pd *pd)
+ struct ib_pd *pd, struct mlx5_ib_cq *cq)
{
struct mlx5_ib_ubuffer *ubuffer = &sq->ubuffer;
__be64 *pas;
@@ -1187,6 +1250,11 @@ static int create_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
int err;
unsigned int page_offset_quantized;
unsigned long page_size;
+ int ts_format;
+
+ ts_format = get_sq_ts_format(dev, cq);
+ if (ts_format < 0)
+ return ts_format;
sq->ubuffer.umem = ib_umem_get(&dev->ib_dev, ubuffer->buf_addr,
ubuffer->buf_size, 0);
@@ -1215,6 +1283,7 @@ static int create_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
if (MLX5_CAP_ETH(dev->mdev, multi_pkt_send_wqe))
MLX5_SET(sqc, sqc, allow_multi_pkt_send_wqe, 1);
MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
+ MLX5_SET(sqc, sqc, ts_format, ts_format);
MLX5_SET(sqc, sqc, user_index, MLX5_GET(qpc, qpc, user_index));
MLX5_SET(sqc, sqc, cqn, MLX5_GET(qpc, qpc, cqn_snd));
MLX5_SET(sqc, sqc, tis_lst_sz, 1);
@@ -1263,7 +1332,7 @@ static void destroy_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
struct mlx5_ib_rq *rq, void *qpin,
- struct ib_pd *pd)
+ struct ib_pd *pd, struct mlx5_ib_cq *cq)
{
struct mlx5_ib_qp *mqp = rq->base.container_mibqp;
__be64 *pas;
@@ -1274,9 +1343,14 @@ static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
struct ib_umem *umem = rq->base.ubuffer.umem;
unsigned int page_offset_quantized;
unsigned long page_size = 0;
+ int ts_format;
size_t inlen;
int err;
+ ts_format = get_rq_ts_format(dev, cq);
+ if (ts_format < 0)
+ return ts_format;
+
page_size = mlx5_umem_find_best_quantized_pgoff(umem, wq, log_wq_pg_sz,
MLX5_ADAPTER_PAGE_SHIFT,
page_offset, 64,
@@ -1296,6 +1370,7 @@ static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
MLX5_SET(rqc, rqc, vsd, 1);
MLX5_SET(rqc, rqc, mem_rq_type, MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE);
MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
+ MLX5_SET(rqc, rqc, ts_format, ts_format);
MLX5_SET(rqc, rqc, flush_in_error_en, 1);
MLX5_SET(rqc, rqc, user_index, MLX5_GET(qpc, qpc, user_index));
MLX5_SET(rqc, rqc, cqn, MLX5_GET(qpc, qpc, cqn_rcv));
@@ -1393,10 +1468,10 @@ static int create_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
}
static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
- u32 *in, size_t inlen,
- struct ib_pd *pd,
+ u32 *in, size_t inlen, struct ib_pd *pd,
struct ib_udata *udata,
- struct mlx5_ib_create_qp_resp *resp)
+ struct mlx5_ib_create_qp_resp *resp,
+ struct ib_qp_init_attr *init_attr)
{
struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
@@ -1415,7 +1490,8 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if (err)
return err;
- err = create_raw_packet_qp_sq(dev, udata, sq, in, pd);
+ err = create_raw_packet_qp_sq(dev, udata, sq, in, pd,
+ to_mcq(init_attr->send_cq));
if (err)
goto err_destroy_tis;
@@ -1437,7 +1513,8 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
rq->flags |= MLX5_IB_RQ_CVLAN_STRIPPING;
if (qp->flags & IB_QP_CREATE_PCI_WRITE_END_PADDING)
rq->flags |= MLX5_IB_RQ_PCI_WRITE_END_PADDING;
- err = create_raw_packet_qp_rq(dev, rq, in, pd);
+ err = create_raw_packet_qp_rq(dev, rq, in, pd,
+ to_mcq(init_attr->recv_cq));
if (err)
goto err_destroy_sq;
@@ -1907,6 +1984,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
struct mlx5_ib_cq *recv_cq;
unsigned long flags;
struct mlx5_ib_qp_base *base;
+ int ts_format;
int mlx5_st;
void *qpc;
u32 *in;
@@ -1944,6 +2022,13 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
if (ucmd->sq_wqe_count > (1 << MLX5_CAP_GEN(mdev, log_max_qp_sz)))
return -EINVAL;
+ if (init_attr->qp_type != IB_QPT_RAW_PACKET) {
+ ts_format = get_qp_ts_format(dev, to_mcq(init_attr->send_cq),
+ to_mcq(init_attr->recv_cq));
+ if (ts_format < 0)
+ return ts_format;
+ }
+
err = _create_user_qp(dev, pd, qp, udata, init_attr, &in, &params->resp,
&inlen, base, ucmd);
if (err)
@@ -1992,6 +2077,9 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
MLX5_SET(qpc, qpc, log_rq_size, ilog2(qp->rq.wqe_cnt));
}
+ if (init_attr->qp_type != IB_QPT_RAW_PACKET)
+ MLX5_SET(qpc, qpc, ts_format, ts_format);
+
MLX5_SET(qpc, qpc, rq_type, get_rx_type(qp, init_attr));
if (qp->sq.wqe_cnt) {
@@ -2046,7 +2134,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
qp->raw_packet_qp.sq.ubuffer.buf_addr = ucmd->sq_buf_addr;
raw_packet_qp_copy_info(qp, &qp->raw_packet_qp);
err = create_raw_packet_qp(dev, qp, in, inlen, pd, udata,
- &params->resp);
+ &params->resp, init_attr);
} else
err = mlx5_qpc_create_qp(dev, &base->mqp, in, inlen, out);
@@ -2432,9 +2520,6 @@ static int check_qp_type(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
case MLX5_IB_QPT_HW_GSI:
case IB_QPT_DRIVER:
case IB_QPT_GSI:
- if (dev->profile == &raw_eth_profile)
- goto out;
- fallthrough;
case IB_QPT_RAW_PACKET:
case IB_QPT_UD:
case MLX5_IB_QPT_REG_UMR:
@@ -2629,10 +2714,6 @@ static int process_create_flags(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
int create_flags = attr->create_flags;
bool cond;
- if (qp->type == IB_QPT_UD && dev->profile == &raw_eth_profile)
- if (create_flags & ~MLX5_IB_QP_CREATE_WC_TEST)
- return -EINVAL;
-
if (qp_type == MLX5_IB_QPT_DCT)
return (create_flags) ? -EINVAL : 0;
@@ -3076,6 +3157,8 @@ static int ib_to_mlx5_rate_map(u8 rate)
return 4;
case IB_RATE_50_GBPS:
return 5;
+ case IB_RATE_400_GBPS:
+ return 6;
default:
return rate + MLX5_STAT_RATE_OFFSET;
}
@@ -3183,11 +3266,13 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
alt ? attr->alt_pkey_index : attr->pkey_index);
if (ah_flags & IB_AH_GRH) {
- if (grh->sgid_index >=
- dev->mdev->port_caps[port - 1].gid_table_len) {
+ const struct ib_port_immutable *immutable;
+
+ immutable = ib_port_immutable_read(&dev->ib_dev, port);
+ if (grh->sgid_index >= immutable->gid_tbl_len) {
pr_err("sgid_index (%u) too large. max is %d\n",
grh->sgid_index,
- dev->mdev->port_caps[port - 1].gid_table_len);
+ immutable->gid_tbl_len);
return -EINVAL;
}
}
@@ -4211,6 +4296,23 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
return 0;
}
+static bool mlx5_ib_modify_qp_allowed(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_qp *qp,
+ enum ib_qp_type qp_type)
+{
+ if (dev->profile != &raw_eth_profile)
+ return true;
+
+ if (qp_type == IB_QPT_RAW_PACKET || qp_type == MLX5_IB_QPT_REG_UMR)
+ return true;
+
+ /* Internal QP used for wc testing, with NOPs in wq */
+ if (qp->flags & MLX5_IB_QP_CREATE_WC_TEST)
+ return true;
+
+ return false;
+}
+
int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata)
{
@@ -4221,7 +4323,9 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
enum ib_qp_type qp_type;
enum ib_qp_state cur_state, new_state;
int err = -EINVAL;
- int port;
+
+ if (!mlx5_ib_modify_qp_allowed(dev, qp, ibqp->qp_type))
+ return -EOPNOTSUPP;
if (attr_mask & ~(IB_QP_ATTR_STANDARD_BITS | IB_QP_RATE_LIMIT))
return -EOPNOTSUPP;
@@ -4263,10 +4367,6 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
- if (!(cur_state == new_state && cur_state == IB_QPS_RESET)) {
- port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
- }
-
if (qp->flags & IB_QP_CREATE_SOURCE_QPN) {
if (attr_mask & ~(IB_QP_STATE | IB_QP_CUR_STATE)) {
mlx5_ib_dbg(dev, "invalid attr_mask 0x%x when underlay QP is used\n",
@@ -4295,14 +4395,10 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
goto out;
}
- if (attr_mask & IB_QP_PKEY_INDEX) {
- port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
- if (attr->pkey_index >=
- dev->mdev->port_caps[port - 1].pkey_table_len) {
- mlx5_ib_dbg(dev, "invalid pkey index %d\n",
- attr->pkey_index);
- goto out;
- }
+ if ((attr_mask & IB_QP_PKEY_INDEX) &&
+ attr->pkey_index >= dev->pkey_table_len) {
+ mlx5_ib_dbg(dev, "invalid pkey index %d\n", attr->pkey_index);
+ goto out;
}
if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
@@ -5376,7 +5472,7 @@ void mlx5_ib_drain_rq(struct ib_qp *qp)
handle_drain_completion(cq, &rdrain, dev);
}
-/**
+/*
* Bind a qp to a counter. If @counter is NULL then bind the qp to
* the default counter
*/
diff --git a/drivers/infiniband/hw/mlx5/wr.c b/drivers/infiniband/hw/mlx5/wr.c
index d6038fb6c50c..cf2852cba45c 100644
--- a/drivers/infiniband/hw/mlx5/wr.c
+++ b/drivers/infiniband/hw/mlx5/wr.c
@@ -1369,7 +1369,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
handle_qpt_uc(wr, &seg, &size);
break;
case IB_QPT_SMI:
- if (unlikely(!mdev->port_caps[qp->port - 1].has_smi)) {
+ if (unlikely(!dev->port_caps[qp->port - 1].has_smi)) {
mlx5_ib_warn(dev, "Send SMP MADs is not allowed\n");
err = -EPERM;
*bad_wr = wr;
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
index 9dde70373a55..3cb4febaad0f 100644
--- a/drivers/infiniband/hw/qedr/qedr.h
+++ b/drivers/infiniband/hw/qedr/qedr.h
@@ -617,18 +617,18 @@ static inline bool qedr_qp_has_srq(struct qedr_qp *qp)
static inline bool qedr_qp_has_sq(struct qedr_qp *qp)
{
if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_XRC_TGT)
- return 0;
+ return false;
- return 1;
+ return true;
}
static inline bool qedr_qp_has_rq(struct qedr_qp *qp)
{
if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_XRC_INI ||
qp->qp_type == IB_QPT_XRC_TGT || qedr_qp_has_srq(qp))
- return 0;
+ return false;
- return 1;
+ return true;
}
static inline struct qedr_user_mmap_entry *
diff --git a/drivers/infiniband/hw/qedr/qedr_roce_cm.c b/drivers/infiniband/hw/qedr/qedr_roce_cm.c
index f5542d703ef9..13e5e6bbec99 100644
--- a/drivers/infiniband/hw/qedr/qedr_roce_cm.c
+++ b/drivers/infiniband/hw/qedr/qedr_roce_cm.c
@@ -586,8 +586,8 @@ int qedr_gsi_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
qedr_inc_sw_prod(&qp->sq);
DP_DEBUG(qp->dev, QEDR_MSG_GSI,
- "gsi post send: opcode=%d, in_irq=%ld, irqs_disabled=%d, wr_id=%llx\n",
- wr->opcode, in_irq(), irqs_disabled(), wr->wr_id);
+ "gsi post send: opcode=%d, wr_id=%llx\n", wr->opcode,
+ wr->wr_id);
} else {
DP_ERR(dev, "gsi post send: failed to transmit (rc=%d)\n", rc);
rc = -EAGAIN;
diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c
index 92eeea5679e2..84fc4dcc5399 100644
--- a/drivers/infiniband/hw/qib/qib_driver.c
+++ b/drivers/infiniband/hw/qib/qib_driver.c
@@ -151,7 +151,7 @@ int qib_count_units(int *npresentp, int *nupp)
/**
* qib_wait_linkstate - wait for an IB link state change to occur
- * @dd: the qlogic_ib device
+ * @ppd: the qlogic_ib device
* @state: the state to wait for
* @msecs: the number of milliseconds to wait
*
diff --git a/drivers/infiniband/hw/qib/qib_eeprom.c b/drivers/infiniband/hw/qib/qib_eeprom.c
index 5838b3bf34b9..bf660c001b6d 100644
--- a/drivers/infiniband/hw/qib/qib_eeprom.c
+++ b/drivers/infiniband/hw/qib/qib_eeprom.c
@@ -47,7 +47,7 @@
* qib_eeprom_read - receives bytes from the eeprom via I2C
* @dd: the qlogic_ib device
* @eeprom_offset: address to read from
- * @buffer: where to store result
+ * @buff: where to store result
* @len: number of bytes to receive
*/
int qib_eeprom_read(struct qib_devdata *dd, u8 eeprom_offset,
@@ -94,7 +94,7 @@ static int eeprom_write_with_enable(struct qib_devdata *dd, u8 offset,
* qib_eeprom_write - writes data to the eeprom via I2C
* @dd: the qlogic_ib device
* @eeprom_offset: where to place data
- * @buffer: data to write
+ * @buff: data to write
* @len: number of bytes to write
*/
int qib_eeprom_write(struct qib_devdata *dd, u8 eeprom_offset,
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
index 44150be215bf..b35e1174be22 100644
--- a/drivers/infiniband/hw/qib/qib_iba6120.c
+++ b/drivers/infiniband/hw/qib/qib_iba6120.c
@@ -1223,7 +1223,7 @@ static void qib_set_ib_6120_lstate(struct qib_pportdata *ppd, u16 linkcmd,
/**
* qib_6120_bringup_serdes - bring up the serdes
- * @dd: the qlogic_ib device
+ * @ppd: the qlogic_ib device
*/
static int qib_6120_bringup_serdes(struct qib_pportdata *ppd)
{
@@ -1412,7 +1412,7 @@ static void qib_6120_quiet_serdes(struct qib_pportdata *ppd)
/**
* qib_6120_setup_setextled - set the state of the two external LEDs
- * @dd: the qlogic_ib device
+ * @ppd: the qlogic_ib device
* @on: whether the link is up or not
*
* The exact combo of LEDs if on is true is determined by looking
@@ -1823,7 +1823,7 @@ bail:
* qib_6120_put_tid - write a TID in chip
* @dd: the qlogic_ib device
* @tidptr: pointer to the expected TID (in chip) to update
- * @tidtype: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0)
+ * @type: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0)
* for expected
* @pa: physical address of in memory buffer; tidinvalid if freeing
*
@@ -1890,7 +1890,7 @@ static void qib_6120_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
* qib_6120_put_tid_2 - write a TID in chip, Revision 2 or higher
* @dd: the qlogic_ib device
* @tidptr: pointer to the expected TID (in chip) to update
- * @tidtype: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0)
+ * @type: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0)
* for expected
* @pa: physical address of in memory buffer; tidinvalid if freeing
*
@@ -1932,7 +1932,7 @@ static void qib_6120_put_tid_2(struct qib_devdata *dd, u64 __iomem *tidptr,
/**
* qib_6120_clear_tids - clear all TID entries for a context, expected and eager
* @dd: the qlogic_ib device
- * @ctxt: the context
+ * @rcd: the context
*
* clear all TID entries for a context, expected and eager.
* Used from qib_close(). On this chip, TIDs are only 32 bits,
@@ -2008,7 +2008,7 @@ int __attribute__((weak)) qib_unordered_wc(void)
/**
* qib_6120_get_base_info - set chip-specific flags for user code
* @rcd: the qlogic_ib ctxt
- * @kbase: qib_base_info pointer
+ * @kinfo: qib_base_info pointer
*
* We set the PCIE flag because the lower bandwidth on PCIe vs
* HyperTransport can affect some user packet algorithms.
@@ -2270,8 +2270,8 @@ static void sendctrl_6120_mod(struct qib_pportdata *ppd, u32 op)
/**
* qib_portcntr_6120 - read a per-port counter
- * @dd: the qlogic_ib device
- * @creg: the counter to snapshot
+ * @ppd: the qlogic_ib device
+ * @reg: the counter to snapshot
*/
static u64 qib_portcntr_6120(struct qib_pportdata *ppd, u32 reg)
{
@@ -2610,7 +2610,7 @@ static void qib_chk_6120_errormask(struct qib_devdata *dd)
/**
* qib_get_faststats - get word counters from chip before they overflow
- * @opaque - contains a pointer to the qlogic_ib device qib_devdata
+ * @t: contains a pointer to the qlogic_ib device qib_devdata
*
* This needs more work; in particular, decision on whether we really
* need traffic_wds done the way it is
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
index 0a6f26d4cb31..229dcd6ead95 100644
--- a/drivers/infiniband/hw/qib/qib_iba7220.c
+++ b/drivers/infiniband/hw/qib/qib_iba7220.c
@@ -1701,7 +1701,7 @@ static void qib_7220_quiet_serdes(struct qib_pportdata *ppd)
/**
* qib_setup_7220_setextled - set the state of the two external LEDs
- * @dd: the qlogic_ib device
+ * @ppd: the qlogic_ib device
* @on: whether the link is up or not
*
* The exact combo of LEDs if on is true is determined by looking
@@ -2146,7 +2146,7 @@ bail:
* qib_7220_put_tid - write a TID to the chip
* @dd: the qlogic_ib device
* @tidptr: pointer to the expected TID (in chip) to update
- * @tidtype: 0 for eager, 1 for expected
+ * @type: 0 for eager, 1 for expected
* @pa: physical address of in memory buffer; tidinvalid if freeing
*/
static void qib_7220_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
@@ -2180,7 +2180,7 @@ static void qib_7220_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
/**
* qib_7220_clear_tids - clear all TID entries for a ctxt, expected and eager
* @dd: the qlogic_ib device
- * @ctxt: the ctxt
+ * @rcd: the ctxt
*
* clear all TID entries for a ctxt, expected and eager.
* Used from qib_close(). On this chip, TIDs are only 32 bits,
@@ -2238,7 +2238,7 @@ static void qib_7220_tidtemplate(struct qib_devdata *dd)
/**
* qib_init_7220_get_base_info - set chip-specific flags for user code
* @rcd: the qlogic_ib ctxt
- * @kbase: qib_base_info pointer
+ * @kinfo: qib_base_info pointer
*
* We set the PCIE flag because the lower bandwidth on PCIe vs
* HyperTransport can affect some user packet algorithims.
@@ -2896,8 +2896,8 @@ static void sendctrl_7220_mod(struct qib_pportdata *ppd, u32 op)
/**
* qib_portcntr_7220 - read a per-port counter
- * @dd: the qlogic_ib device
- * @creg: the counter to snapshot
+ * @ppd: the qlogic_ib device
+ * @reg: the counter to snapshot
*/
static u64 qib_portcntr_7220(struct qib_pportdata *ppd, u32 reg)
{
@@ -3232,7 +3232,7 @@ done:
/**
* qib_get_7220_faststats - get word counters from chip before they overflow
- * @opaque - contains a pointer to the qlogic_ib device qib_devdata
+ * @t: contains a pointer to the qlogic_ib device qib_devdata
*
* This needs more work; in particular, decision on whether we really
* need traffic_wds done the way it is
@@ -4468,7 +4468,7 @@ static int qib_7220_eeprom_wen(struct qib_devdata *dd, int wen)
/**
* qib_init_iba7220_funcs - set up the chip-specific function pointers
- * @dev: the pci_dev for qlogic_ib device
+ * @pdev: the pci_dev for qlogic_ib device
* @ent: pci_device_id struct for this dev
*
* This is global, and is called directly at init to set up the
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 189a0ce6056a..9fe6ea75b45e 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -2514,7 +2514,7 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
/**
* qib_7322_quiet_serdes - set serdes to txidle
- * @dd: the qlogic_ib device
+ * @ppd: the qlogic_ib device
* Called when driver is being unloaded
*/
static void qib_7322_mini_quiet_serdes(struct qib_pportdata *ppd)
@@ -3760,7 +3760,7 @@ bail:
* qib_7322_put_tid - write a TID to the chip
* @dd: the qlogic_ib device
* @tidptr: pointer to the expected TID (in chip) to update
- * @tidtype: 0 for eager, 1 for expected
+ * @type: 0 for eager, 1 for expected
* @pa: physical address of in memory buffer; tidinvalid if freeing
*/
static void qib_7322_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
@@ -3796,7 +3796,7 @@ static void qib_7322_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
/**
* qib_7322_clear_tids - clear all TID entries for a ctxt, expected and eager
* @dd: the qlogic_ib device
- * @ctxt: the ctxt
+ * @rcd: the ctxt
*
* clear all TID entries for a ctxt, expected and eager.
* Used from qib_close().
@@ -3861,7 +3861,7 @@ static void qib_7322_tidtemplate(struct qib_devdata *dd)
/**
* qib_init_7322_get_base_info - set chip-specific flags for user code
* @rcd: the qlogic_ib ctxt
- * @kbase: qib_base_info pointer
+ * @kinfo: qib_base_info pointer
*
* We set the PCIE flag because the lower bandwidth on PCIe vs
* HyperTransport can affect some user packet algorithims.
@@ -4724,7 +4724,7 @@ static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op)
/**
* qib_portcntr_7322 - read a per-port chip counter
* @ppd: the qlogic_ib pport
- * @creg: the counter to read (not a chip offset)
+ * @reg: the counter to read (not a chip offset)
*/
static u64 qib_portcntr_7322(struct qib_pportdata *ppd, u32 reg)
{
@@ -5096,7 +5096,7 @@ done:
/**
* qib_get_7322_faststats - get word counters from chip before they overflow
- * @opaque - contains a pointer to the qlogic_ib device qib_devdata
+ * @t: contains a pointer to the qlogic_ib device qib_devdata
*
* VESTIGIAL IBA7322 has no "small fast counters", so the only
* real purpose of this function is to maintain the notion of
@@ -7175,7 +7175,7 @@ static int qib_7322_tempsense_rd(struct qib_devdata *dd, int regnum)
/**
* qib_init_iba7322_funcs - set up the chip-specific function pointers
- * @dev: the pci_dev for qlogic_ib device
+ * @pdev: the pci_dev for qlogic_ib device
* @ent: pci_device_id struct for this dev
*
* Also allocates, inits, and returns the devdata struct for this
diff --git a/drivers/infiniband/hw/qib/qib_intr.c b/drivers/infiniband/hw/qib/qib_intr.c
index 65c3b964ad1b..85c3187d796d 100644
--- a/drivers/infiniband/hw/qib/qib_intr.c
+++ b/drivers/infiniband/hw/qib/qib_intr.c
@@ -40,9 +40,9 @@
/**
* qib_format_hwmsg - format a single hwerror message
- * @msg message buffer
- * @msgl length of message buffer
- * @hwmsg message to add to message buffer
+ * @msg: message buffer
+ * @msgl: length of message buffer
+ * @hwmsg: message to add to message buffer
*/
static void qib_format_hwmsg(char *msg, size_t msgl, const char *hwmsg)
{
@@ -53,11 +53,11 @@ static void qib_format_hwmsg(char *msg, size_t msgl, const char *hwmsg)
/**
* qib_format_hwerrors - format hardware error messages for display
- * @hwerrs hardware errors bit vector
- * @hwerrmsgs hardware error descriptions
- * @nhwerrmsgs number of hwerrmsgs
- * @msg message buffer
- * @msgl message buffer length
+ * @hwerrs: hardware errors bit vector
+ * @hwerrmsgs: hardware error descriptions
+ * @nhwerrmsgs: number of hwerrmsgs
+ * @msg: message buffer
+ * @msgl: message buffer length
*/
void qib_format_hwerrors(u64 hwerrs, const struct qib_hwerror_msgs *hwerrmsgs,
size_t nhwerrmsgs, char *msg, size_t msgl)
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
index f83e331977f8..44e2f813024a 100644
--- a/drivers/infiniband/hw/qib/qib_mad.c
+++ b/drivers/infiniband/hw/qib/qib_mad.c
@@ -886,7 +886,7 @@ done:
/**
* rm_pkey - decrecment the reference count for the given PKEY
- * @dd: the qlogic_ib device
+ * @ppd: the qlogic_ib device
* @key: the PKEY index
*
* Return true if this was the last reference and the hardware table entry
@@ -916,7 +916,7 @@ bail:
/**
* add_pkey - add the given PKEY to the hardware table
- * @dd: the qlogic_ib device
+ * @ppd: the qlogic_ib device
* @key: the PKEY
*
* Return an error code if unable to add the entry, zero if no change,
@@ -2346,8 +2346,10 @@ static int process_cc(struct ib_device *ibdev, int mad_flags,
* @port: the port number this packet came in on
* @in_wc: the work completion entry for this packet
* @in_grh: the global route header for this packet
- * @in_mad: the incoming MAD
- * @out_mad: any outgoing MAD reply
+ * @in: the incoming MAD
+ * @out: any outgoing MAD reply
+ * @out_mad_size: size of the outgoing MAD reply
+ * @out_mad_pkey_index: unused
*
* Returns IB_MAD_RESULT_SUCCESS if this is a MAD that we are not
* interested in processing.
diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c
index 2e07b3749b88..cb2a02d671e2 100644
--- a/drivers/infiniband/hw/qib/qib_pcie.c
+++ b/drivers/infiniband/hw/qib/qib_pcie.c
@@ -181,7 +181,7 @@ void qib_pcie_ddcleanup(struct qib_devdata *dd)
pci_set_drvdata(dd->pcidev, NULL);
}
-/**
+/*
* We save the msi lo and hi values, so we can restore them after
* chip reset (the kernel PCI infrastructure doesn't yet handle that
* correctly.
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index 8d0563ef5be1..ca39a029e4af 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -207,7 +207,7 @@ bail:
return ret;
}
-/**
+/*
* qib_free_all_qps - check for QPs still in use
*/
unsigned qib_free_all_qps(struct rvt_dev_info *rdi)
@@ -376,9 +376,9 @@ void qib_flush_qp_waiters(struct rvt_qp *qp)
/**
* qib_check_send_wqe - validate wr/wqe
- * @qp - The qp
- * @wqe - The built wqe
- * @call_send - Determine if the send should be posted or scheduled
+ * @qp: The qp
+ * @wqe: The built wqe
+ * @call_send: Determine if the send should be posted or scheduled
*
* Returns 0 on success, -EINVAL on failure
*/
@@ -418,8 +418,8 @@ static const char * const qp_type_str[] = {
/**
* qib_qp_iter_print - print information to seq_file
- * @s - the seq_file
- * @iter - the iterator
+ * @s: the seq_file
+ * @iter: the iterator
*/
void qib_qp_iter_print(struct seq_file *s, struct rvt_qp_iter *iter)
{
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index 3915e5b4a9bc..a1c20ffb4490 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -207,6 +207,7 @@ bail:
/**
* qib_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
* @qp: a pointer to the QP
+ * @flags: unused
*
* Assumes the s_lock is held.
*
@@ -992,7 +993,7 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
return wqe;
}
-/**
+/*
* do_rc_ack - process an incoming RC ACK
* @qp: the QP the ACK came in on
* @psn: the packet sequence number of the ACK
@@ -1259,6 +1260,7 @@ static void rdma_seq_err(struct rvt_qp *qp, struct qib_ibport *ibp, u32 psn,
* @psn: the packet sequence number for this packet
* @hdrsize: the header length
* @pmtu: the path MTU
+ * @rcd: the context pointer
*
* This is called from qib_rc_rcv() to process an incoming RC response
* packet for the given QP.
@@ -1480,6 +1482,7 @@ bail:
* @opcode: the opcode for this packet
* @psn: the packet sequence number for this packet
* @diff: the difference between the PSN and the expected PSN
+ * @rcd: the context pointer
*
* This is called from qib_rc_rcv() to process an unexpected
* incoming RC packet for the given QP.
diff --git a/drivers/infiniband/hw/qib/qib_twsi.c b/drivers/infiniband/hw/qib/qib_twsi.c
index f5698664419b..97b8a2bf5c69 100644
--- a/drivers/infiniband/hw/qib/qib_twsi.c
+++ b/drivers/infiniband/hw/qib/qib_twsi.c
@@ -168,6 +168,7 @@ static void stop_cmd(struct qib_devdata *dd);
/**
* rd_byte - read a byte, sending STOP on last, else ACK
* @dd: the qlogic_ib device
+ * @last: identifies the last read
*
* Returns byte shifted out of device
*/
diff --git a/drivers/infiniband/hw/qib/qib_tx.c b/drivers/infiniband/hw/qib/qib_tx.c
index 29785eb84646..6a8148851f21 100644
--- a/drivers/infiniband/hw/qib/qib_tx.c
+++ b/drivers/infiniband/hw/qib/qib_tx.c
@@ -377,6 +377,7 @@ void qib_sendbuf_done(struct qib_devdata *dd, unsigned n)
* @start: the starting send buffer number
* @len: the number of send buffers
* @avail: true if the buffers are available for kernel use, false otherwise
+ * @rcd: the context pointer
*/
void qib_chg_pioavailkernel(struct qib_devdata *dd, unsigned start,
unsigned len, u32 avail, struct qib_ctxtdata *rcd)
diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c
index 554af4273a13..8e2bda77d8b9 100644
--- a/drivers/infiniband/hw/qib/qib_uc.c
+++ b/drivers/infiniband/hw/qib/qib_uc.c
@@ -40,6 +40,7 @@
/**
* qib_make_uc_req - construct a request packet (SEND, RDMA write)
* @qp: a pointer to the QP
+ * @flags: unused
*
* Assumes the s_lock is held.
*
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
index 93ca21347959..81eda94bd279 100644
--- a/drivers/infiniband/hw/qib/qib_ud.c
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -222,6 +222,7 @@ drop:
/**
* qib_make_ud_req - construct a UD request packet
* @qp: the QP
+ * @flags: flags to modify and pass back to caller
*
* Assumes the s_lock is held.
*
diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c
index 4c24e83f3175..5d6cf7427431 100644
--- a/drivers/infiniband/hw/qib/qib_user_pages.c
+++ b/drivers/infiniband/hw/qib/qib_user_pages.c
@@ -43,7 +43,7 @@ static void __qib_release_user_pages(struct page **p, size_t num_pages,
unpin_user_pages_dirty_lock(p, num_pages, dirty);
}
-/**
+/*
* qib_map_page - a safety wrapper around pci_map_page()
*
* A dma_addr of all 0's is interpreted by the chip as "disabled".
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index f6c01bad5a74..8e0de265ad57 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -1067,7 +1067,7 @@ bail:
/**
* qib_get_counters - get various chip counters
- * @dd: the qlogic_ib device
+ * @ppd: the qlogic_ib device
* @cntrs: counters are placed here
*
* Return the counters needed by recv_pma_get_portcounters().
@@ -1675,7 +1675,7 @@ void qib_unregister_ib_device(struct qib_devdata *dd)
/**
* _qib_schedule_send - schedule progress
- * @qp - the qp
+ * @qp: the qp
*
* This schedules progress w/o regard to the s_flags.
*
@@ -1694,7 +1694,7 @@ bool _qib_schedule_send(struct rvt_qp *qp)
/**
* qib_schedule_send - schedule progress
- * @qp - the qp
+ * @qp: the qp
*
* This schedules qp progress. The s_lock
* should be held.
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
index e59615a4c9d9..586b0e52ba7f 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
@@ -214,7 +214,7 @@ static ssize_t summary_show(struct usnic_ib_qp_grp *qp_grp, char *buf)
struct usnic_vnic_res *vnic_res;
int len;
- len = sysfs_emit(buf, "QPN: %d State: (%s) PID: %u VF Idx: %hu ",
+ len = sysfs_emit(buf, "QPN: %d State: (%s) PID: %u VF Idx: %hu",
qp_grp->ibqp.qp_num,
usnic_ib_qp_grp_state_to_string(qp_grp->state),
qp_grp->owner_pid,
@@ -224,14 +224,13 @@ static ssize_t summary_show(struct usnic_ib_qp_grp *qp_grp, char *buf)
res_chunk = qp_grp->res_chunk_list[i];
for (j = 0; j < res_chunk->cnt; j++) {
vnic_res = res_chunk->res[j];
- len += sysfs_emit_at(
- buf, len, "%s[%d] ",
+ len += sysfs_emit_at(buf, len, " %s[%d]",
usnic_vnic_res_type_to_str(vnic_res->type),
vnic_res->vnic_idx);
}
}
- len = sysfs_emit_at(buf, len, "\n");
+ len += sysfs_emit_at(buf, len, "\n");
return len;
}
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
index c142f5e7f25f..de57f2fed743 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
@@ -509,6 +509,20 @@ static inline int ib_send_flags_to_pvrdma(int flags)
return flags & PVRDMA_MASK(PVRDMA_SEND_FLAGS_MAX);
}
+static inline int pvrdma_network_type_to_ib(enum pvrdma_network_type type)
+{
+ switch (type) {
+ case PVRDMA_NETWORK_ROCE_V1:
+ return RDMA_NETWORK_ROCE_V1;
+ case PVRDMA_NETWORK_IPV4:
+ return RDMA_NETWORK_IPV4;
+ case PVRDMA_NETWORK_IPV6:
+ return RDMA_NETWORK_IPV6;
+ default:
+ return RDMA_NETWORK_IPV6;
+ }
+}
+
void pvrdma_qp_cap_to_ib(struct ib_qp_cap *dst,
const struct pvrdma_qp_cap *src);
void ib_qp_cap_to_pvrdma(struct pvrdma_qp_cap *dst,
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
index a119ac3e103c..6aa40bd2fd52 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
@@ -367,7 +367,7 @@ retry:
wc->dlid_path_bits = cqe->dlid_path_bits;
wc->port_num = cqe->port_num;
wc->vendor_err = cqe->vendor_err;
- wc->network_hdr_type = cqe->network_hdr_type;
+ wc->network_hdr_type = pvrdma_network_type_to_ib(cqe->network_hdr_type);
/* Update shared ring state */
pvrdma_idx_ring_inc(&cq->ring_state->rx.cons_head, cq->ibcq.cqe);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
index 00a330909bb3..4b6019e7de67 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
@@ -474,7 +474,6 @@ static irqreturn_t pvrdma_intrx_handler(int irq, void *dev_id)
int ring_slots = (dev->dsr->cq_ring_pages.num_pages - 1) * PAGE_SIZE /
sizeof(struct pvrdma_cqne);
unsigned int head;
- unsigned long flags;
dev_dbg(&dev->pdev->dev, "interrupt x (completion) handler\n");
@@ -483,11 +482,11 @@ static irqreturn_t pvrdma_intrx_handler(int irq, void *dev_id)
struct pvrdma_cq *cq;
cqne = get_cqne(dev, head);
- spin_lock_irqsave(&dev->cq_tbl_lock, flags);
+ spin_lock(&dev->cq_tbl_lock);
cq = dev->cq_tbl[cqne->info % dev->dsr->caps.max_cq];
if (cq)
refcount_inc(&cq->refcnt);
- spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
+ spin_unlock(&dev->cq_tbl_lock);
if (cq && cq->ibcq.comp_handler)
cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c
index 20cc0799ac4b..5138afca067f 100644
--- a/drivers/infiniband/sw/rdmavt/cq.c
+++ b/drivers/infiniband/sw/rdmavt/cq.c
@@ -371,7 +371,7 @@ int rvt_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
return ret;
}
-/**
+/*
* rvt_resize_cq - change the size of the CQ
* @ibcq: the completion queue
*
diff --git a/drivers/infiniband/sw/rdmavt/mad.c b/drivers/infiniband/sw/rdmavt/mad.c
index 108c71e3ac23..fa5be13a4394 100644
--- a/drivers/infiniband/sw/rdmavt/mad.c
+++ b/drivers/infiniband/sw/rdmavt/mad.c
@@ -56,8 +56,11 @@
* @port_num: the port number this packet came in on, 1 based from ib core
* @in_wc: the work completion entry for this packet
* @in_grh: the global route header for this packet
- * @in_mad: the incoming MAD
- * @out_mad: any outgoing MAD reply
+ * @in: the incoming MAD
+ * @in_mad_size: size of the incoming MAD reply
+ * @out: any outgoing MAD reply
+ * @out_mad_size: size of the outgoing MAD reply
+ * @out_mad_pkey_index: unused
*
* Note that the verbs framework has already done the MAD sanity checks,
* and hop count/pointer updating for IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
diff --git a/drivers/infiniband/sw/rdmavt/mcast.c b/drivers/infiniband/sw/rdmavt/mcast.c
index 5233a63d99a6..951abac13dbb 100644
--- a/drivers/infiniband/sw/rdmavt/mcast.c
+++ b/drivers/infiniband/sw/rdmavt/mcast.c
@@ -180,7 +180,7 @@ struct rvt_mcast *rvt_mcast_find(struct rvt_ibport *ibp, union ib_gid *mgid,
}
EXPORT_SYMBOL(rvt_mcast_find);
-/**
+/*
* rvt_mcast_add - insert mcast GID into table and attach QP struct
* @mcast: the mcast GID table
* @mqp: the QP to attach
diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c
index 90fc234f489a..601d18dda1f5 100644
--- a/drivers/infiniband/sw/rdmavt/mr.c
+++ b/drivers/infiniband/sw/rdmavt/mr.c
@@ -369,6 +369,7 @@ bail:
* @pd: protection domain for this memory region
* @start: starting userspace address
* @length: length of region to register
+ * @virt_addr: associated virtual address
* @mr_access_flags: access flags for this memory region
* @udata: unused by the driver
*
@@ -438,8 +439,8 @@ bail_umem:
/**
* rvt_dereg_clean_qp_cb - callback from iterator
- * @qp - the qp
- * @v - the mregion (as u64)
+ * @qp: the qp
+ * @v: the mregion (as u64)
*
* This routine fields the callback for all QPs and
* for QPs in the same PD as the MR will call the
@@ -457,7 +458,7 @@ static void rvt_dereg_clean_qp_cb(struct rvt_qp *qp, u64 v)
/**
* rvt_dereg_clean_qps - find QPs for reference cleanup
- * @mr - the MR that is being deregistered
+ * @mr: the MR that is being deregistered
*
* This routine iterates RC QPs looking for references
* to the lkey noted in mr.
@@ -471,8 +472,8 @@ static void rvt_dereg_clean_qps(struct rvt_mregion *mr)
/**
* rvt_check_refs - check references
- * @mr - the megion
- * @t - the caller identification
+ * @mr: the megion
+ * @t: the caller identification
*
* This routine checks MRs holding a reference during
* when being de-registered.
@@ -506,8 +507,8 @@ static int rvt_check_refs(struct rvt_mregion *mr, const char *t)
/**
* rvt_mr_has_lkey - is MR
- * @mr - the mregion
- * @lkey - the lkey
+ * @mr: the mregion
+ * @lkey: the lkey
*/
bool rvt_mr_has_lkey(struct rvt_mregion *mr, u32 lkey)
{
@@ -516,8 +517,8 @@ bool rvt_mr_has_lkey(struct rvt_mregion *mr, u32 lkey)
/**
* rvt_ss_has_lkey - is mr in sge tests
- * @ss - the sge state
- * @lkey
+ * @ss: the sge state
+ * @lkey: the lkey
*
* This code tests for an MR in the indicated
* sge state.
@@ -540,7 +541,7 @@ bool rvt_ss_has_lkey(struct rvt_sge_state *ss, u32 lkey)
/**
* rvt_dereg_mr - unregister and free a memory region
* @ibmr: the memory region to free
- *
+ * @udata: unused by the driver
*
* Note that this is called to free MRs created by rvt_get_dma_mr()
* or rvt_reg_user_mr().
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 22fa9bde5419..9d13db68283c 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -156,7 +156,7 @@ void rvt_wss_exit(struct rvt_dev_info *rdi)
rdi->wss = NULL;
}
-/**
+/*
* rvt_wss_init - Init wss data structures
*
* Return: 0 on success
@@ -323,6 +323,7 @@ static void get_map_page(struct rvt_qpn_table *qpt,
/**
* init_qpn_table - initialize the QP number table for a device
+ * @rdi: rvt dev struct
* @qpt: the QPN table
*/
static int init_qpn_table(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt)
@@ -524,6 +525,7 @@ static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
* IB_QPT_SMI/IB_QPT_GSI
* @rdi: rvt device info structure
* @qpt: queue pair number table pointer
+ * @type: the QP type
* @port_num: IB port number, 1 based, comes from core
* @exclude_prefix: prefix of special queue pair number being allocated
*
@@ -655,8 +657,8 @@ static void rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends)
/**
* rvt_swqe_has_lkey - return true if lkey is used by swqe
- * @wqe - the send wqe
- * @lkey - the lkey
+ * @wqe: the send wqe
+ * @lkey: the lkey
*
* Test the swqe for using lkey
*/
@@ -675,8 +677,8 @@ static bool rvt_swqe_has_lkey(struct rvt_swqe *wqe, u32 lkey)
/**
* rvt_qp_sends_has_lkey - return true is qp sends use lkey
- * @qp - the rvt_qp
- * @lkey - the lkey
+ * @qp: the rvt_qp
+ * @lkey: the lkey
*/
static bool rvt_qp_sends_has_lkey(struct rvt_qp *qp, u32 lkey)
{
@@ -699,8 +701,8 @@ static bool rvt_qp_sends_has_lkey(struct rvt_qp *qp, u32 lkey)
/**
* rvt_qp_acks_has_lkey - return true if acks have lkey
- * @qp - the qp
- * @lkey - the lkey
+ * @qp: the qp
+ * @lkey: the lkey
*/
static bool rvt_qp_acks_has_lkey(struct rvt_qp *qp, u32 lkey)
{
@@ -716,10 +718,10 @@ static bool rvt_qp_acks_has_lkey(struct rvt_qp *qp, u32 lkey)
return false;
}
-/*
+/**
* rvt_qp_mr_clean - clean up remote ops for lkey
- * @qp - the qp
- * @lkey - the lkey that is being de-registered
+ * @qp: the qp
+ * @lkey: the lkey that is being de-registered
*
* This routine checks if the lkey is being used by
* the qp.
@@ -853,6 +855,7 @@ bail:
/**
* rvt_init_qp - initialize the QP state to the reset state
+ * @rdi: rvt dev struct
* @qp: the QP to init or reinit
* @type: the QP type
*
@@ -907,6 +910,7 @@ static void rvt_init_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
/**
* _rvt_reset_qp - initialize the QP state to the reset state
+ * @rdi: rvt dev struct
* @qp: the QP to reset
* @type: the QP type
*
@@ -1726,6 +1730,7 @@ inval:
/**
* rvt_destroy_qp - destroy a queue pair
* @ibqp: the queue pair to destroy
+ * @udata: unused by the driver
*
* Note that this can be called while the QP is actively sending or
* receiving!
@@ -1901,9 +1906,9 @@ int rvt_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
/**
* rvt_qp_valid_operation - validate post send wr request
- * @qp - the qp
- * @post-parms - the post send table for the driver
- * @wr - the work request
+ * @qp: the qp
+ * @post_parms: the post send table for the driver
+ * @wr: the work request
*
* The routine validates the operation based on the
* validation table an returns the length of the operation
@@ -2013,6 +2018,7 @@ static inline int rvt_qp_is_avail(
* rvt_post_one_wr - post one RC, UC, or UD send work request
* @qp: the QP to post on
* @wr: the work request to send
+ * @call_send: kick the send engine into gear
*/
static int rvt_post_one_wr(struct rvt_qp *qp,
const struct ib_send_wr *wr,
@@ -2612,7 +2618,7 @@ EXPORT_SYMBOL(rvt_stop_rc_timers);
/**
* rvt_stop_rnr_timer - stop an rnr timer
- * @qp - the QP
+ * @qp: the QP
*
* stop an rnr timer and return if the timer
* had been pending.
diff --git a/drivers/infiniband/sw/rdmavt/srq.c b/drivers/infiniband/sw/rdmavt/srq.c
index 64d98bf238ab..2a7c2f12d372 100644
--- a/drivers/infiniband/sw/rdmavt/srq.c
+++ b/drivers/infiniband/sw/rdmavt/srq.c
@@ -67,7 +67,7 @@ void rvt_driver_srq_init(struct rvt_dev_info *rdi)
/**
* rvt_create_srq - create a shared receive queue
- * @ibpd: the protection domain of the SRQ to create
+ * @ibsrq: the protection domain of the SRQ to create
* @srq_init_attr: the attributes of the SRQ
* @udata: data from libibverbs when creating a user SRQ
*
@@ -311,7 +311,8 @@ bail_free:
return ret;
}
-/** rvt_query_srq - query srq data
+/**
+ * rvt_query_srq - query srq data
* @ibsrq: srq to query
* @attr: return info in attr
*
@@ -330,7 +331,7 @@ int rvt_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
/**
* rvt_destroy_srq - destory an srq
* @ibsrq: srq object to destroy
- *
+ * @udata: user data for libibverbs.so
*/
int rvt_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
{
diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c
index 49cec85a372a..8fd0128a9336 100644
--- a/drivers/infiniband/sw/rdmavt/vt.c
+++ b/drivers/infiniband/sw/rdmavt/vt.c
@@ -294,7 +294,7 @@ static int rvt_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
/**
* rvt_dealloc_ucontext - Free a user context
- * @context - Free this
+ * @context: Unused
*/
static void rvt_dealloc_ucontext(struct ib_ucontext *context)
{
diff --git a/drivers/infiniband/sw/rxe/Kconfig b/drivers/infiniband/sw/rxe/Kconfig
index 452149066792..06b8dc5093f7 100644
--- a/drivers/infiniband/sw/rxe/Kconfig
+++ b/drivers/infiniband/sw/rxe/Kconfig
@@ -4,6 +4,7 @@ config RDMA_RXE
depends on INET && PCI && INFINIBAND
depends on INFINIBAND_VIRT_DMA
select NET_UDP_TUNNEL
+ select CRYPTO
select CRYPTO_CRC32
help
This driver implements the InfiniBand RDMA transport over
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index 0a1e6393250b..17a361b8dbb1 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -515,6 +515,7 @@ static void rxe_drain_resp_pkts(struct rxe_qp *qp, bool notify)
while ((skb = skb_dequeue(&qp->resp_pkts))) {
rxe_drop_ref(qp);
kfree_skb(skb);
+ ib_device_put(qp->ibqp.device);
}
while ((wqe = queue_head(qp->sq.queue))) {
@@ -527,6 +528,17 @@ static void rxe_drain_resp_pkts(struct rxe_qp *qp, bool notify)
}
}
+static void free_pkt(struct rxe_pkt_info *pkt)
+{
+ struct sk_buff *skb = PKT_TO_SKB(pkt);
+ struct rxe_qp *qp = pkt->qp;
+ struct ib_device *dev = qp->ibqp.device;
+
+ kfree_skb(skb);
+ rxe_drop_ref(qp);
+ ib_device_put(dev);
+}
+
int rxe_completer(void *arg)
{
struct rxe_qp *qp = (struct rxe_qp *)arg;
@@ -535,6 +547,7 @@ int rxe_completer(void *arg)
struct sk_buff *skb = NULL;
struct rxe_pkt_info *pkt = NULL;
enum comp_state state;
+ int ret = 0;
rxe_add_ref(qp);
@@ -542,7 +555,8 @@ int rxe_completer(void *arg)
qp->req.state == QP_STATE_RESET) {
rxe_drain_resp_pkts(qp, qp->valid &&
qp->req.state == QP_STATE_ERROR);
- goto exit;
+ ret = -EAGAIN;
+ goto done;
}
if (qp->comp.timeout) {
@@ -552,8 +566,10 @@ int rxe_completer(void *arg)
qp->comp.timeout_retry = 0;
}
- if (qp->req.need_retry)
- goto exit;
+ if (qp->req.need_retry) {
+ ret = -EAGAIN;
+ goto done;
+ }
state = COMPST_GET_ACK;
@@ -624,11 +640,6 @@ int rxe_completer(void *arg)
break;
case COMPST_DONE:
- if (pkt) {
- rxe_drop_ref(pkt->qp);
- kfree_skb(skb);
- skb = NULL;
- }
goto done;
case COMPST_EXIT:
@@ -651,7 +662,8 @@ int rxe_completer(void *arg)
qp->qp_timeout_jiffies)
mod_timer(&qp->retrans_timer,
jiffies + qp->qp_timeout_jiffies);
- goto exit;
+ ret = -EAGAIN;
+ goto done;
case COMPST_ERROR_RETRY:
/* we come here if the retry timer fired and we did
@@ -663,22 +675,18 @@ int rxe_completer(void *arg)
*/
/* there is nothing to retry in this case */
- if (!wqe || (wqe->state == wqe_state_posted))
- goto exit;
+ if (!wqe || (wqe->state == wqe_state_posted)) {
+ pr_warn("Retry attempted without a valid wqe\n");
+ ret = -EAGAIN;
+ goto done;
+ }
/* if we've started a retry, don't start another
* retry sequence, unless this is a timeout.
*/
if (qp->comp.started_retry &&
- !qp->comp.timeout_retry) {
- if (pkt) {
- rxe_drop_ref(pkt->qp);
- kfree_skb(skb);
- skb = NULL;
- }
-
+ !qp->comp.timeout_retry)
goto done;
- }
if (qp->comp.retry_cnt > 0) {
if (qp->comp.retry_cnt != 7)
@@ -699,13 +707,6 @@ int rxe_completer(void *arg)
qp->comp.started_retry = 1;
rxe_run_task(&qp->req.task, 0);
}
-
- if (pkt) {
- rxe_drop_ref(pkt->qp);
- kfree_skb(skb);
- skb = NULL;
- }
-
goto done;
} else {
@@ -726,10 +727,8 @@ int rxe_completer(void *arg)
mod_timer(&qp->rnr_nak_timer,
jiffies + rnrnak_jiffies(aeth_syn(pkt)
& ~AETH_TYPE_MASK));
- rxe_drop_ref(pkt->qp);
- kfree_skb(skb);
- skb = NULL;
- goto exit;
+ ret = -EAGAIN;
+ goto done;
} else {
rxe_counter_inc(rxe,
RXE_CNT_RNR_RETRY_EXCEEDED);
@@ -742,30 +741,15 @@ int rxe_completer(void *arg)
WARN_ON_ONCE(wqe->status == IB_WC_SUCCESS);
do_complete(qp, wqe);
rxe_qp_error(qp);
-
- if (pkt) {
- rxe_drop_ref(pkt->qp);
- kfree_skb(skb);
- skb = NULL;
- }
-
- goto exit;
+ ret = -EAGAIN;
+ goto done;
}
}
-exit:
- /* we come here if we are done with processing and want the task to
- * exit from the loop calling us
- */
- WARN_ON_ONCE(skb);
- rxe_drop_ref(qp);
- return -EAGAIN;
-
done:
- /* we come here if we have processed a packet we want the task to call
- * us again to see if there is anything else to do
- */
- WARN_ON_ONCE(skb);
+ if (pkt)
+ free_pkt(pkt);
rxe_drop_ref(qp);
- return 0;
+
+ return ret;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_hdr.h b/drivers/infiniband/sw/rxe/rxe_hdr.h
index 3b483b75dfe3..e432f9e37795 100644
--- a/drivers/infiniband/sw/rxe/rxe_hdr.h
+++ b/drivers/infiniband/sw/rxe/rxe_hdr.h
@@ -22,7 +22,6 @@ struct rxe_pkt_info {
u16 paylen; /* length of bth - icrc */
u8 port_num; /* port pkt received on */
u8 opcode; /* bth opcode of packet */
- u8 offset; /* bth offset from pkt->hdr */
};
/* Macros should be used only for received skb */
@@ -280,134 +279,134 @@ static inline void __bth_set_psn(void *arg, u32 psn)
static inline u8 bth_opcode(struct rxe_pkt_info *pkt)
{
- return __bth_opcode(pkt->hdr + pkt->offset);
+ return __bth_opcode(pkt->hdr);
}
static inline void bth_set_opcode(struct rxe_pkt_info *pkt, u8 opcode)
{
- __bth_set_opcode(pkt->hdr + pkt->offset, opcode);
+ __bth_set_opcode(pkt->hdr, opcode);
}
static inline u8 bth_se(struct rxe_pkt_info *pkt)
{
- return __bth_se(pkt->hdr + pkt->offset);
+ return __bth_se(pkt->hdr);
}
static inline void bth_set_se(struct rxe_pkt_info *pkt, int se)
{
- __bth_set_se(pkt->hdr + pkt->offset, se);
+ __bth_set_se(pkt->hdr, se);
}
static inline u8 bth_mig(struct rxe_pkt_info *pkt)
{
- return __bth_mig(pkt->hdr + pkt->offset);
+ return __bth_mig(pkt->hdr);
}
static inline void bth_set_mig(struct rxe_pkt_info *pkt, u8 mig)
{
- __bth_set_mig(pkt->hdr + pkt->offset, mig);
+ __bth_set_mig(pkt->hdr, mig);
}
static inline u8 bth_pad(struct rxe_pkt_info *pkt)
{
- return __bth_pad(pkt->hdr + pkt->offset);
+ return __bth_pad(pkt->hdr);
}
static inline void bth_set_pad(struct rxe_pkt_info *pkt, u8 pad)
{
- __bth_set_pad(pkt->hdr + pkt->offset, pad);
+ __bth_set_pad(pkt->hdr, pad);
}
static inline u8 bth_tver(struct rxe_pkt_info *pkt)
{
- return __bth_tver(pkt->hdr + pkt->offset);
+ return __bth_tver(pkt->hdr);
}
static inline void bth_set_tver(struct rxe_pkt_info *pkt, u8 tver)
{
- __bth_set_tver(pkt->hdr + pkt->offset, tver);
+ __bth_set_tver(pkt->hdr, tver);
}
static inline u16 bth_pkey(struct rxe_pkt_info *pkt)
{
- return __bth_pkey(pkt->hdr + pkt->offset);
+ return __bth_pkey(pkt->hdr);
}
static inline void bth_set_pkey(struct rxe_pkt_info *pkt, u16 pkey)
{
- __bth_set_pkey(pkt->hdr + pkt->offset, pkey);
+ __bth_set_pkey(pkt->hdr, pkey);
}
static inline u32 bth_qpn(struct rxe_pkt_info *pkt)
{
- return __bth_qpn(pkt->hdr + pkt->offset);
+ return __bth_qpn(pkt->hdr);
}
static inline void bth_set_qpn(struct rxe_pkt_info *pkt, u32 qpn)
{
- __bth_set_qpn(pkt->hdr + pkt->offset, qpn);
+ __bth_set_qpn(pkt->hdr, qpn);
}
static inline int bth_fecn(struct rxe_pkt_info *pkt)
{
- return __bth_fecn(pkt->hdr + pkt->offset);
+ return __bth_fecn(pkt->hdr);
}
static inline void bth_set_fecn(struct rxe_pkt_info *pkt, int fecn)
{
- __bth_set_fecn(pkt->hdr + pkt->offset, fecn);
+ __bth_set_fecn(pkt->hdr, fecn);
}
static inline int bth_becn(struct rxe_pkt_info *pkt)
{
- return __bth_becn(pkt->hdr + pkt->offset);
+ return __bth_becn(pkt->hdr);
}
static inline void bth_set_becn(struct rxe_pkt_info *pkt, int becn)
{
- __bth_set_becn(pkt->hdr + pkt->offset, becn);
+ __bth_set_becn(pkt->hdr, becn);
}
static inline u8 bth_resv6a(struct rxe_pkt_info *pkt)
{
- return __bth_resv6a(pkt->hdr + pkt->offset);
+ return __bth_resv6a(pkt->hdr);
}
static inline void bth_set_resv6a(struct rxe_pkt_info *pkt)
{
- __bth_set_resv6a(pkt->hdr + pkt->offset);
+ __bth_set_resv6a(pkt->hdr);
}
static inline int bth_ack(struct rxe_pkt_info *pkt)
{
- return __bth_ack(pkt->hdr + pkt->offset);
+ return __bth_ack(pkt->hdr);
}
static inline void bth_set_ack(struct rxe_pkt_info *pkt, int ack)
{
- __bth_set_ack(pkt->hdr + pkt->offset, ack);
+ __bth_set_ack(pkt->hdr, ack);
}
static inline void bth_set_resv7(struct rxe_pkt_info *pkt)
{
- __bth_set_resv7(pkt->hdr + pkt->offset);
+ __bth_set_resv7(pkt->hdr);
}
static inline u32 bth_psn(struct rxe_pkt_info *pkt)
{
- return __bth_psn(pkt->hdr + pkt->offset);
+ return __bth_psn(pkt->hdr);
}
static inline void bth_set_psn(struct rxe_pkt_info *pkt, u32 psn)
{
- __bth_set_psn(pkt->hdr + pkt->offset, psn);
+ __bth_set_psn(pkt->hdr, psn);
}
static inline void bth_init(struct rxe_pkt_info *pkt, u8 opcode, int se,
int mig, int pad, u16 pkey, u32 qpn, int ack_req,
u32 psn)
{
- struct rxe_bth *bth = (struct rxe_bth *)(pkt->hdr + pkt->offset);
+ struct rxe_bth *bth = (struct rxe_bth *)(pkt->hdr);
bth->opcode = opcode;
bth->flags = (pad << 4) & BTH_PAD_MASK;
@@ -448,14 +447,14 @@ static inline void __rdeth_set_een(void *arg, u32 een)
static inline u8 rdeth_een(struct rxe_pkt_info *pkt)
{
- return __rdeth_een(pkt->hdr + pkt->offset
- + rxe_opcode[pkt->opcode].offset[RXE_RDETH]);
+ return __rdeth_een(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_RDETH]);
}
static inline void rdeth_set_een(struct rxe_pkt_info *pkt, u32 een)
{
- __rdeth_set_een(pkt->hdr + pkt->offset
- + rxe_opcode[pkt->opcode].offset[RXE_RDETH], een);
+ __rdeth_set_een(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_RDETH], een);
}
/******************************************************************************
@@ -499,26 +498,26 @@ static inline void __deth_set_sqp(void *arg, u32 sqp)
static inline u32 deth_qkey(struct rxe_pkt_info *pkt)
{
- return __deth_qkey(pkt->hdr + pkt->offset
- + rxe_opcode[pkt->opcode].offset[RXE_DETH]);
+ return __deth_qkey(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_DETH]);
}
static inline void deth_set_qkey(struct rxe_pkt_info *pkt, u32 qkey)
{
- __deth_set_qkey(pkt->hdr + pkt->offset
- + rxe_opcode[pkt->opcode].offset[RXE_DETH], qkey);
+ __deth_set_qkey(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_DETH], qkey);
}
static inline u32 deth_sqp(struct rxe_pkt_info *pkt)
{
- return __deth_sqp(pkt->hdr + pkt->offset
- + rxe_opcode[pkt->opcode].offset[RXE_DETH]);
+ return __deth_sqp(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_DETH]);
}
static inline void deth_set_sqp(struct rxe_pkt_info *pkt, u32 sqp)
{
- __deth_set_sqp(pkt->hdr + pkt->offset
- + rxe_opcode[pkt->opcode].offset[RXE_DETH], sqp);
+ __deth_set_sqp(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_DETH], sqp);
}
/******************************************************************************
@@ -574,38 +573,38 @@ static inline void __reth_set_len(void *arg, u32 len)
static inline u64 reth_va(struct rxe_pkt_info *pkt)
{
- return __reth_va(pkt->hdr + pkt->offset
- + rxe_opcode[pkt->opcode].offset[RXE_RETH]);
+ return __reth_va(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_RETH]);
}
static inline void reth_set_va(struct rxe_pkt_info *pkt, u64 va)
{
- __reth_set_va(pkt->hdr + pkt->offset
- + rxe_opcode[pkt->opcode].offset[RXE_RETH], va);
+ __reth_set_va(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_RETH], va);
}
static inline u32 reth_rkey(struct rxe_pkt_info *pkt)
{
- return __reth_rkey(pkt->hdr + pkt->offset
- + rxe_opcode[pkt->opcode].offset[RXE_RETH]);
+ return __reth_rkey(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_RETH]);
}
static inline void reth_set_rkey(struct rxe_pkt_info *pkt, u32 rkey)
{
- __reth_set_rkey(pkt->hdr + pkt->offset
- + rxe_opcode[pkt->opcode].offset[RXE_RETH], rkey);
+ __reth_set_rkey(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_RETH], rkey);
}
static inline u32 reth_len(struct rxe_pkt_info *pkt)
{
- return __reth_len(pkt->hdr + pkt->offset
- + rxe_opcode[pkt->opcode].offset[RXE_RETH]);
+ return __reth_len(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_RETH]);
}
static inline void reth_set_len(struct rxe_pkt_info *pkt, u32 len)
{
- __reth_set_len(pkt->hdr + pkt->offset
- + rxe_opcode[pkt->opcode].offset[RXE_RETH], len);
+ __reth_set_len(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_RETH], len);
}
/******************************************************************************
@@ -676,50 +675,50 @@ static inline void __atmeth_set_comp(void *arg, u64 comp)
static inline u64 atmeth_va(struct rxe_pkt_info *pkt)
{
- return __atmeth_va(pkt->hdr + pkt->offset
- + rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
+ return __atmeth_va(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
}
static inline void atmeth_set_va(struct rxe_pkt_info *pkt, u64 va)
{
- __atmeth_set_va(pkt->hdr + pkt->offset
- + rxe_opcode[pkt->opcode].offset[RXE_ATMETH], va);
+ __atmeth_set_va(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_ATMETH], va);
}
static inline u32 atmeth_rkey(struct rxe_pkt_info *pkt)
{
- return __atmeth_rkey(pkt->hdr + pkt->offset
- + rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
+ return __atmeth_rkey(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
}
static inline void atmeth_set_rkey(struct rxe_pkt_info *pkt, u32 rkey)
{
- __atmeth_set_rkey(pkt->hdr + pkt->offset
- + rxe_opcode[pkt->opcode].offset[RXE_ATMETH], rkey);
+ __atmeth_set_rkey(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_ATMETH], rkey);
}
static inline u64 atmeth_swap_add(struct rxe_pkt_info *pkt)
{
- return __atmeth_swap_add(pkt->hdr + pkt->offset
- + rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
+ return __atmeth_swap_add(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
}
static inline void atmeth_set_swap_add(struct rxe_pkt_info *pkt, u64 swap_add)
{
- __atmeth_set_swap_add(pkt->hdr + pkt->offset
- + rxe_opcode[pkt->opcode].offset[RXE_ATMETH], swap_add);
+ __atmeth_set_swap_add(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_ATMETH], swap_add);
}
static inline u64 atmeth_comp(struct rxe_pkt_info *pkt)
{
- return __atmeth_comp(pkt->hdr + pkt->offset
- + rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
+ return __atmeth_comp(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
}
static inline void atmeth_set_comp(struct rxe_pkt_info *pkt, u64 comp)
{
- __atmeth_set_comp(pkt->hdr + pkt->offset
- + rxe_opcode[pkt->opcode].offset[RXE_ATMETH], comp);
+ __atmeth_set_comp(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_ATMETH], comp);
}
/******************************************************************************
@@ -780,26 +779,26 @@ static inline void __aeth_set_msn(void *arg, u32 msn)
static inline u8 aeth_syn(struct rxe_pkt_info *pkt)
{
- return __aeth_syn(pkt->hdr + pkt->offset
- + rxe_opcode[pkt->opcode].offset[RXE_AETH]);
+ return __aeth_syn(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_AETH]);
}
static inline void aeth_set_syn(struct rxe_pkt_info *pkt, u8 syn)
{
- __aeth_set_syn(pkt->hdr + pkt->offset
- + rxe_opcode[pkt->opcode].offset[RXE_AETH], syn);
+ __aeth_set_syn(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_AETH], syn);
}
static inline u32 aeth_msn(struct rxe_pkt_info *pkt)
{
- return __aeth_msn(pkt->hdr + pkt->offset
- + rxe_opcode[pkt->opcode].offset[RXE_AETH]);
+ return __aeth_msn(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_AETH]);
}
static inline void aeth_set_msn(struct rxe_pkt_info *pkt, u32 msn)
{
- __aeth_set_msn(pkt->hdr + pkt->offset
- + rxe_opcode[pkt->opcode].offset[RXE_AETH], msn);
+ __aeth_set_msn(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_AETH], msn);
}
/******************************************************************************
@@ -825,14 +824,14 @@ static inline void __atmack_set_orig(void *arg, u64 orig)
static inline u64 atmack_orig(struct rxe_pkt_info *pkt)
{
- return __atmack_orig(pkt->hdr + pkt->offset
- + rxe_opcode[pkt->opcode].offset[RXE_ATMACK]);
+ return __atmack_orig(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_ATMACK]);
}
static inline void atmack_set_orig(struct rxe_pkt_info *pkt, u64 orig)
{
- __atmack_set_orig(pkt->hdr + pkt->offset
- + rxe_opcode[pkt->opcode].offset[RXE_ATMACK], orig);
+ __atmack_set_orig(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_ATMACK], orig);
}
/******************************************************************************
@@ -858,14 +857,14 @@ static inline void __immdt_set_imm(void *arg, __be32 imm)
static inline __be32 immdt_imm(struct rxe_pkt_info *pkt)
{
- return __immdt_imm(pkt->hdr + pkt->offset
- + rxe_opcode[pkt->opcode].offset[RXE_IMMDT]);
+ return __immdt_imm(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_IMMDT]);
}
static inline void immdt_set_imm(struct rxe_pkt_info *pkt, __be32 imm)
{
- __immdt_set_imm(pkt->hdr + pkt->offset
- + rxe_opcode[pkt->opcode].offset[RXE_IMMDT], imm);
+ __immdt_set_imm(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_IMMDT], imm);
}
/******************************************************************************
@@ -891,14 +890,14 @@ static inline void __ieth_set_rkey(void *arg, u32 rkey)
static inline u32 ieth_rkey(struct rxe_pkt_info *pkt)
{
- return __ieth_rkey(pkt->hdr + pkt->offset
- + rxe_opcode[pkt->opcode].offset[RXE_IETH]);
+ return __ieth_rkey(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_IETH]);
}
static inline void ieth_set_rkey(struct rxe_pkt_info *pkt, u32 rkey)
{
- __ieth_set_rkey(pkt->hdr + pkt->offset
- + rxe_opcode[pkt->opcode].offset[RXE_IETH], rkey);
+ __ieth_set_rkey(pkt->hdr +
+ rxe_opcode[pkt->opcode].offset[RXE_IETH], rkey);
}
enum rxe_hdr_length {
@@ -915,13 +914,12 @@ enum rxe_hdr_length {
static inline size_t header_size(struct rxe_pkt_info *pkt)
{
- return pkt->offset + rxe_opcode[pkt->opcode].length;
+ return rxe_opcode[pkt->opcode].length;
}
static inline void *payload_addr(struct rxe_pkt_info *pkt)
{
- return pkt->hdr + pkt->offset
- + rxe_opcode[pkt->opcode].offset[RXE_PAYLOAD];
+ return pkt->hdr + rxe_opcode[pkt->opcode].offset[RXE_PAYLOAD];
}
static inline size_t payload_size(struct rxe_pkt_info *pkt)
diff --git a/drivers/infiniband/sw/rxe/rxe_mcast.c b/drivers/infiniband/sw/rxe/rxe_mcast.c
index c02315aed8d1..0ea9a5aa4ec0 100644
--- a/drivers/infiniband/sw/rxe/rxe_mcast.c
+++ b/drivers/infiniband/sw/rxe/rxe_mcast.c
@@ -7,45 +7,61 @@
#include "rxe.h"
#include "rxe_loc.h"
+/* caller should hold mc_grp_pool->pool_lock */
+static struct rxe_mc_grp *create_grp(struct rxe_dev *rxe,
+ struct rxe_pool *pool,
+ union ib_gid *mgid)
+{
+ int err;
+ struct rxe_mc_grp *grp;
+
+ grp = rxe_alloc_locked(&rxe->mc_grp_pool);
+ if (!grp)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&grp->qp_list);
+ spin_lock_init(&grp->mcg_lock);
+ grp->rxe = rxe;
+ rxe_add_key_locked(grp, mgid);
+
+ err = rxe_mcast_add(rxe, mgid);
+ if (unlikely(err)) {
+ rxe_drop_key_locked(grp);
+ rxe_drop_ref(grp);
+ return ERR_PTR(err);
+ }
+
+ return grp;
+}
+
int rxe_mcast_get_grp(struct rxe_dev *rxe, union ib_gid *mgid,
struct rxe_mc_grp **grp_p)
{
int err;
struct rxe_mc_grp *grp;
+ struct rxe_pool *pool = &rxe->mc_grp_pool;
+ unsigned long flags;
- if (rxe->attr.max_mcast_qp_attach == 0) {
- err = -EINVAL;
- goto err1;
- }
+ if (rxe->attr.max_mcast_qp_attach == 0)
+ return -EINVAL;
- grp = rxe_pool_get_key(&rxe->mc_grp_pool, mgid);
+ write_lock_irqsave(&pool->pool_lock, flags);
+
+ grp = rxe_pool_get_key_locked(pool, mgid);
if (grp)
goto done;
- grp = rxe_alloc(&rxe->mc_grp_pool);
- if (!grp) {
- err = -ENOMEM;
- goto err1;
+ grp = create_grp(rxe, pool, mgid);
+ if (IS_ERR(grp)) {
+ write_unlock_irqrestore(&pool->pool_lock, flags);
+ err = PTR_ERR(grp);
+ return err;
}
- INIT_LIST_HEAD(&grp->qp_list);
- spin_lock_init(&grp->mcg_lock);
- grp->rxe = rxe;
-
- rxe_add_key(grp, mgid);
-
- err = rxe_mcast_add(rxe, mgid);
- if (err)
- goto err2;
-
done:
+ write_unlock_irqrestore(&pool->pool_lock, flags);
*grp_p = grp;
return 0;
-
-err2:
- rxe_drop_ref(grp);
-err1:
- return err;
}
int rxe_mcast_add_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index c4b06ced30a7..01662727dca0 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -8,6 +8,7 @@
#include <linux/if_arp.h>
#include <linux/netdevice.h>
#include <linux/if.h>
+#include <linux/if_vlan.h>
#include <net/udp_tunnel.h>
#include <net/sch_generic.h>
#include <linux/netfilter.h>
@@ -152,10 +153,16 @@ static struct dst_entry *rxe_find_route(struct net_device *ndev,
static int rxe_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
{
struct udphdr *udph;
+ struct rxe_dev *rxe;
struct net_device *ndev = skb->dev;
- struct rxe_dev *rxe = rxe_get_dev_from_net(ndev);
struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
+ /* takes a reference on rxe->ib_dev
+ * drop when skb is freed
+ */
+ rxe = rxe_get_dev_from_net(ndev);
+ if (!rxe && is_vlan_dev(ndev))
+ rxe = rxe_get_dev_from_net(vlan_dev_real_dev(ndev));
if (!rxe)
goto drop;
@@ -174,12 +181,6 @@ static int rxe_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
rxe_rcv(skb);
- /*
- * FIXME: this is in the wrong place, it needs to be done when pkt is
- * destroyed
- */
- ib_device_put(&rxe->ib_dev);
-
return 0;
drop:
kfree_skb(skb);
@@ -406,9 +407,22 @@ int rxe_send(struct rxe_pkt_info *pkt, struct sk_buff *skb)
return 0;
}
+/* fix up a send packet to match the packets
+ * received from UDP before looping them back
+ */
void rxe_loopback(struct sk_buff *skb)
{
- rxe_rcv(skb);
+ struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
+
+ if (skb->protocol == htons(ETH_P_IP))
+ skb_pull(skb, sizeof(struct iphdr));
+ else
+ skb_pull(skb, sizeof(struct ipv6hdr));
+
+ if (WARN_ON(!ib_device_try_get(&pkt->rxe->ib_dev)))
+ kfree_skb(skb);
+ else
+ rxe_rcv(skb);
}
struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
index b374eb53e2fe..307d8986e7c9 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.c
+++ b/drivers/infiniband/sw/rxe/rxe_pool.c
@@ -15,21 +15,25 @@ struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = {
[RXE_TYPE_UC] = {
.name = "rxe-uc",
.size = sizeof(struct rxe_ucontext),
+ .elem_offset = offsetof(struct rxe_ucontext, pelem),
.flags = RXE_POOL_NO_ALLOC,
},
[RXE_TYPE_PD] = {
.name = "rxe-pd",
.size = sizeof(struct rxe_pd),
+ .elem_offset = offsetof(struct rxe_pd, pelem),
.flags = RXE_POOL_NO_ALLOC,
},
[RXE_TYPE_AH] = {
.name = "rxe-ah",
.size = sizeof(struct rxe_ah),
- .flags = RXE_POOL_ATOMIC | RXE_POOL_NO_ALLOC,
+ .elem_offset = offsetof(struct rxe_ah, pelem),
+ .flags = RXE_POOL_NO_ALLOC,
},
[RXE_TYPE_SRQ] = {
.name = "rxe-srq",
.size = sizeof(struct rxe_srq),
+ .elem_offset = offsetof(struct rxe_srq, pelem),
.flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC,
.min_index = RXE_MIN_SRQ_INDEX,
.max_index = RXE_MAX_SRQ_INDEX,
@@ -37,6 +41,7 @@ struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = {
[RXE_TYPE_QP] = {
.name = "rxe-qp",
.size = sizeof(struct rxe_qp),
+ .elem_offset = offsetof(struct rxe_qp, pelem),
.cleanup = rxe_qp_cleanup,
.flags = RXE_POOL_INDEX,
.min_index = RXE_MIN_QP_INDEX,
@@ -45,12 +50,14 @@ struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = {
[RXE_TYPE_CQ] = {
.name = "rxe-cq",
.size = sizeof(struct rxe_cq),
+ .elem_offset = offsetof(struct rxe_cq, pelem),
.flags = RXE_POOL_NO_ALLOC,
.cleanup = rxe_cq_cleanup,
},
[RXE_TYPE_MR] = {
.name = "rxe-mr",
.size = sizeof(struct rxe_mem),
+ .elem_offset = offsetof(struct rxe_mem, pelem),
.cleanup = rxe_mem_cleanup,
.flags = RXE_POOL_INDEX,
.max_index = RXE_MAX_MR_INDEX,
@@ -59,6 +66,7 @@ struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = {
[RXE_TYPE_MW] = {
.name = "rxe-mw",
.size = sizeof(struct rxe_mem),
+ .elem_offset = offsetof(struct rxe_mem, pelem),
.flags = RXE_POOL_INDEX,
.max_index = RXE_MAX_MW_INDEX,
.min_index = RXE_MIN_MW_INDEX,
@@ -66,6 +74,7 @@ struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = {
[RXE_TYPE_MC_GRP] = {
.name = "rxe-mc_grp",
.size = sizeof(struct rxe_mc_grp),
+ .elem_offset = offsetof(struct rxe_mc_grp, pelem),
.cleanup = rxe_mc_cleanup,
.flags = RXE_POOL_KEY,
.key_offset = offsetof(struct rxe_mc_grp, mgid),
@@ -74,7 +83,7 @@ struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = {
[RXE_TYPE_MC_ELEM] = {
.name = "rxe-mc_elem",
.size = sizeof(struct rxe_mc_elem),
- .flags = RXE_POOL_ATOMIC,
+ .elem_offset = offsetof(struct rxe_mc_elem, pelem),
},
};
@@ -94,18 +103,18 @@ static int rxe_pool_init_index(struct rxe_pool *pool, u32 max, u32 min)
goto out;
}
- pool->max_index = max;
- pool->min_index = min;
+ pool->index.max_index = max;
+ pool->index.min_index = min;
size = BITS_TO_LONGS(max - min + 1) * sizeof(long);
- pool->table = kmalloc(size, GFP_KERNEL);
- if (!pool->table) {
+ pool->index.table = kmalloc(size, GFP_KERNEL);
+ if (!pool->index.table) {
err = -ENOMEM;
goto out;
}
- pool->table_size = size;
- bitmap_zero(pool->table, max - min + 1);
+ pool->index.table_size = size;
+ bitmap_zero(pool->index.table, max - min + 1);
out:
return err;
@@ -127,13 +136,12 @@ int rxe_pool_init(
pool->max_elem = max_elem;
pool->elem_size = ALIGN(size, RXE_POOL_ALIGN);
pool->flags = rxe_type_info[type].flags;
- pool->tree = RB_ROOT;
+ pool->index.tree = RB_ROOT;
+ pool->key.tree = RB_ROOT;
pool->cleanup = rxe_type_info[type].cleanup;
atomic_set(&pool->num_elem, 0);
- kref_init(&pool->ref_cnt);
-
rwlock_init(&pool->pool_lock);
if (rxe_type_info[type].flags & RXE_POOL_INDEX) {
@@ -145,67 +153,47 @@ int rxe_pool_init(
}
if (rxe_type_info[type].flags & RXE_POOL_KEY) {
- pool->key_offset = rxe_type_info[type].key_offset;
- pool->key_size = rxe_type_info[type].key_size;
+ pool->key.key_offset = rxe_type_info[type].key_offset;
+ pool->key.key_size = rxe_type_info[type].key_size;
}
- pool->state = RXE_POOL_STATE_VALID;
-
out:
return err;
}
-static void rxe_pool_release(struct kref *kref)
-{
- struct rxe_pool *pool = container_of(kref, struct rxe_pool, ref_cnt);
-
- pool->state = RXE_POOL_STATE_INVALID;
- kfree(pool->table);
-}
-
-static void rxe_pool_put(struct rxe_pool *pool)
-{
- kref_put(&pool->ref_cnt, rxe_pool_release);
-}
-
void rxe_pool_cleanup(struct rxe_pool *pool)
{
- unsigned long flags;
-
- write_lock_irqsave(&pool->pool_lock, flags);
- pool->state = RXE_POOL_STATE_INVALID;
if (atomic_read(&pool->num_elem) > 0)
pr_warn("%s pool destroyed with unfree'd elem\n",
pool_name(pool));
- write_unlock_irqrestore(&pool->pool_lock, flags);
- rxe_pool_put(pool);
+ kfree(pool->index.table);
}
static u32 alloc_index(struct rxe_pool *pool)
{
u32 index;
- u32 range = pool->max_index - pool->min_index + 1;
+ u32 range = pool->index.max_index - pool->index.min_index + 1;
- index = find_next_zero_bit(pool->table, range, pool->last);
+ index = find_next_zero_bit(pool->index.table, range, pool->index.last);
if (index >= range)
- index = find_first_zero_bit(pool->table, range);
+ index = find_first_zero_bit(pool->index.table, range);
WARN_ON_ONCE(index >= range);
- set_bit(index, pool->table);
- pool->last = index;
- return index + pool->min_index;
+ set_bit(index, pool->index.table);
+ pool->index.last = index;
+ return index + pool->index.min_index;
}
static void insert_index(struct rxe_pool *pool, struct rxe_pool_entry *new)
{
- struct rb_node **link = &pool->tree.rb_node;
+ struct rb_node **link = &pool->index.tree.rb_node;
struct rb_node *parent = NULL;
struct rxe_pool_entry *elem;
while (*link) {
parent = *link;
- elem = rb_entry(parent, struct rxe_pool_entry, node);
+ elem = rb_entry(parent, struct rxe_pool_entry, index_node);
if (elem->index == new->index) {
pr_warn("element already exists!\n");
@@ -218,25 +206,25 @@ static void insert_index(struct rxe_pool *pool, struct rxe_pool_entry *new)
link = &(*link)->rb_right;
}
- rb_link_node(&new->node, parent, link);
- rb_insert_color(&new->node, &pool->tree);
+ rb_link_node(&new->index_node, parent, link);
+ rb_insert_color(&new->index_node, &pool->index.tree);
out:
return;
}
static void insert_key(struct rxe_pool *pool, struct rxe_pool_entry *new)
{
- struct rb_node **link = &pool->tree.rb_node;
+ struct rb_node **link = &pool->key.tree.rb_node;
struct rb_node *parent = NULL;
struct rxe_pool_entry *elem;
int cmp;
while (*link) {
parent = *link;
- elem = rb_entry(parent, struct rxe_pool_entry, node);
+ elem = rb_entry(parent, struct rxe_pool_entry, key_node);
- cmp = memcmp((u8 *)elem + pool->key_offset,
- (u8 *)new + pool->key_offset, pool->key_size);
+ cmp = memcmp((u8 *)elem + pool->key.key_offset,
+ (u8 *)new + pool->key.key_offset, pool->key.key_size);
if (cmp == 0) {
pr_warn("key already exists!\n");
@@ -249,116 +237,135 @@ static void insert_key(struct rxe_pool *pool, struct rxe_pool_entry *new)
link = &(*link)->rb_right;
}
- rb_link_node(&new->node, parent, link);
- rb_insert_color(&new->node, &pool->tree);
+ rb_link_node(&new->key_node, parent, link);
+ rb_insert_color(&new->key_node, &pool->key.tree);
out:
return;
}
-void rxe_add_key(void *arg, void *key)
+void __rxe_add_key_locked(struct rxe_pool_entry *elem, void *key)
{
- struct rxe_pool_entry *elem = arg;
struct rxe_pool *pool = elem->pool;
- unsigned long flags;
- write_lock_irqsave(&pool->pool_lock, flags);
- memcpy((u8 *)elem + pool->key_offset, key, pool->key_size);
+ memcpy((u8 *)elem + pool->key.key_offset, key, pool->key.key_size);
insert_key(pool, elem);
- write_unlock_irqrestore(&pool->pool_lock, flags);
}
-void rxe_drop_key(void *arg)
+void __rxe_add_key(struct rxe_pool_entry *elem, void *key)
{
- struct rxe_pool_entry *elem = arg;
struct rxe_pool *pool = elem->pool;
unsigned long flags;
write_lock_irqsave(&pool->pool_lock, flags);
- rb_erase(&elem->node, &pool->tree);
+ __rxe_add_key_locked(elem, key);
write_unlock_irqrestore(&pool->pool_lock, flags);
}
-void rxe_add_index(void *arg)
+void __rxe_drop_key_locked(struct rxe_pool_entry *elem)
+{
+ struct rxe_pool *pool = elem->pool;
+
+ rb_erase(&elem->key_node, &pool->key.tree);
+}
+
+void __rxe_drop_key(struct rxe_pool_entry *elem)
{
- struct rxe_pool_entry *elem = arg;
struct rxe_pool *pool = elem->pool;
unsigned long flags;
write_lock_irqsave(&pool->pool_lock, flags);
+ __rxe_drop_key_locked(elem);
+ write_unlock_irqrestore(&pool->pool_lock, flags);
+}
+
+void __rxe_add_index_locked(struct rxe_pool_entry *elem)
+{
+ struct rxe_pool *pool = elem->pool;
+
elem->index = alloc_index(pool);
insert_index(pool, elem);
- write_unlock_irqrestore(&pool->pool_lock, flags);
}
-void rxe_drop_index(void *arg)
+void __rxe_add_index(struct rxe_pool_entry *elem)
{
- struct rxe_pool_entry *elem = arg;
struct rxe_pool *pool = elem->pool;
unsigned long flags;
write_lock_irqsave(&pool->pool_lock, flags);
- clear_bit(elem->index - pool->min_index, pool->table);
- rb_erase(&elem->node, &pool->tree);
+ __rxe_add_index_locked(elem);
write_unlock_irqrestore(&pool->pool_lock, flags);
}
-void *rxe_alloc(struct rxe_pool *pool)
+void __rxe_drop_index_locked(struct rxe_pool_entry *elem)
{
- struct rxe_pool_entry *elem;
- unsigned long flags;
+ struct rxe_pool *pool = elem->pool;
- might_sleep_if(!(pool->flags & RXE_POOL_ATOMIC));
+ clear_bit(elem->index - pool->index.min_index, pool->index.table);
+ rb_erase(&elem->index_node, &pool->index.tree);
+}
- read_lock_irqsave(&pool->pool_lock, flags);
- if (pool->state != RXE_POOL_STATE_VALID) {
- read_unlock_irqrestore(&pool->pool_lock, flags);
- return NULL;
- }
- kref_get(&pool->ref_cnt);
- read_unlock_irqrestore(&pool->pool_lock, flags);
+void __rxe_drop_index(struct rxe_pool_entry *elem)
+{
+ struct rxe_pool *pool = elem->pool;
+ unsigned long flags;
+
+ write_lock_irqsave(&pool->pool_lock, flags);
+ __rxe_drop_index_locked(elem);
+ write_unlock_irqrestore(&pool->pool_lock, flags);
+}
- if (!ib_device_try_get(&pool->rxe->ib_dev))
- goto out_put_pool;
+void *rxe_alloc_locked(struct rxe_pool *pool)
+{
+ struct rxe_type_info *info = &rxe_type_info[pool->type];
+ struct rxe_pool_entry *elem;
+ u8 *obj;
if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
goto out_cnt;
- elem = kzalloc(rxe_type_info[pool->type].size,
- (pool->flags & RXE_POOL_ATOMIC) ?
- GFP_ATOMIC : GFP_KERNEL);
- if (!elem)
+ obj = kzalloc(info->size, GFP_ATOMIC);
+ if (!obj)
goto out_cnt;
+ elem = (struct rxe_pool_entry *)(obj + info->elem_offset);
+
elem->pool = pool;
kref_init(&elem->ref_cnt);
- return elem;
+ return obj;
out_cnt:
atomic_dec(&pool->num_elem);
- ib_device_put(&pool->rxe->ib_dev);
-out_put_pool:
- rxe_pool_put(pool);
return NULL;
}
-int rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_entry *elem)
+void *rxe_alloc(struct rxe_pool *pool)
{
- unsigned long flags;
+ struct rxe_type_info *info = &rxe_type_info[pool->type];
+ struct rxe_pool_entry *elem;
+ u8 *obj;
+
+ if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
+ goto out_cnt;
- might_sleep_if(!(pool->flags & RXE_POOL_ATOMIC));
+ obj = kzalloc(info->size, GFP_KERNEL);
+ if (!obj)
+ goto out_cnt;
- read_lock_irqsave(&pool->pool_lock, flags);
- if (pool->state != RXE_POOL_STATE_VALID) {
- read_unlock_irqrestore(&pool->pool_lock, flags);
- return -EINVAL;
- }
- kref_get(&pool->ref_cnt);
- read_unlock_irqrestore(&pool->pool_lock, flags);
+ elem = (struct rxe_pool_entry *)(obj + info->elem_offset);
- if (!ib_device_try_get(&pool->rxe->ib_dev))
- goto out_put_pool;
+ elem->pool = pool;
+ kref_init(&elem->ref_cnt);
+
+ return obj;
+
+out_cnt:
+ atomic_dec(&pool->num_elem);
+ return NULL;
+}
+int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_entry *elem)
+{
if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
goto out_cnt;
@@ -369,9 +376,6 @@ int rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_entry *elem)
out_cnt:
atomic_dec(&pool->num_elem);
- ib_device_put(&pool->rxe->ib_dev);
-out_put_pool:
- rxe_pool_put(pool);
return -EINVAL;
}
@@ -380,67 +384,77 @@ void rxe_elem_release(struct kref *kref)
struct rxe_pool_entry *elem =
container_of(kref, struct rxe_pool_entry, ref_cnt);
struct rxe_pool *pool = elem->pool;
+ struct rxe_type_info *info = &rxe_type_info[pool->type];
+ u8 *obj;
if (pool->cleanup)
pool->cleanup(elem);
- if (!(pool->flags & RXE_POOL_NO_ALLOC))
- kfree(elem);
+ if (!(pool->flags & RXE_POOL_NO_ALLOC)) {
+ obj = (u8 *)elem - info->elem_offset;
+ kfree(obj);
+ }
+
atomic_dec(&pool->num_elem);
- ib_device_put(&pool->rxe->ib_dev);
- rxe_pool_put(pool);
}
-void *rxe_pool_get_index(struct rxe_pool *pool, u32 index)
+void *rxe_pool_get_index_locked(struct rxe_pool *pool, u32 index)
{
- struct rb_node *node = NULL;
- struct rxe_pool_entry *elem = NULL;
- unsigned long flags;
-
- read_lock_irqsave(&pool->pool_lock, flags);
-
- if (pool->state != RXE_POOL_STATE_VALID)
- goto out;
+ struct rxe_type_info *info = &rxe_type_info[pool->type];
+ struct rb_node *node;
+ struct rxe_pool_entry *elem;
+ u8 *obj;
- node = pool->tree.rb_node;
+ node = pool->index.tree.rb_node;
while (node) {
- elem = rb_entry(node, struct rxe_pool_entry, node);
+ elem = rb_entry(node, struct rxe_pool_entry, index_node);
if (elem->index > index)
node = node->rb_left;
else if (elem->index < index)
node = node->rb_right;
- else {
- kref_get(&elem->ref_cnt);
+ else
break;
- }
}
-out:
- read_unlock_irqrestore(&pool->pool_lock, flags);
- return node ? elem : NULL;
+ if (node) {
+ kref_get(&elem->ref_cnt);
+ obj = (u8 *)elem - info->elem_offset;
+ } else {
+ obj = NULL;
+ }
+
+ return obj;
}
-void *rxe_pool_get_key(struct rxe_pool *pool, void *key)
+void *rxe_pool_get_index(struct rxe_pool *pool, u32 index)
{
- struct rb_node *node = NULL;
- struct rxe_pool_entry *elem = NULL;
- int cmp;
+ u8 *obj;
unsigned long flags;
read_lock_irqsave(&pool->pool_lock, flags);
+ obj = rxe_pool_get_index_locked(pool, index);
+ read_unlock_irqrestore(&pool->pool_lock, flags);
- if (pool->state != RXE_POOL_STATE_VALID)
- goto out;
+ return obj;
+}
- node = pool->tree.rb_node;
+void *rxe_pool_get_key_locked(struct rxe_pool *pool, void *key)
+{
+ struct rxe_type_info *info = &rxe_type_info[pool->type];
+ struct rb_node *node;
+ struct rxe_pool_entry *elem;
+ u8 *obj;
+ int cmp;
+
+ node = pool->key.tree.rb_node;
while (node) {
- elem = rb_entry(node, struct rxe_pool_entry, node);
+ elem = rb_entry(node, struct rxe_pool_entry, key_node);
- cmp = memcmp((u8 *)elem + pool->key_offset,
- key, pool->key_size);
+ cmp = memcmp((u8 *)elem + pool->key.key_offset,
+ key, pool->key.key_size);
if (cmp > 0)
node = node->rb_left;
@@ -450,10 +464,24 @@ void *rxe_pool_get_key(struct rxe_pool *pool, void *key)
break;
}
- if (node)
+ if (node) {
kref_get(&elem->ref_cnt);
+ obj = (u8 *)elem - info->elem_offset;
+ } else {
+ obj = NULL;
+ }
-out:
+ return obj;
+}
+
+void *rxe_pool_get_key(struct rxe_pool *pool, void *key)
+{
+ u8 *obj;
+ unsigned long flags;
+
+ read_lock_irqsave(&pool->pool_lock, flags);
+ obj = rxe_pool_get_key_locked(pool, key);
read_unlock_irqrestore(&pool->pool_lock, flags);
- return node ? elem : NULL;
+
+ return obj;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.h b/drivers/infiniband/sw/rxe/rxe_pool.h
index 432745ffc8d4..61210b300a78 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.h
+++ b/drivers/infiniband/sw/rxe/rxe_pool.h
@@ -11,7 +11,6 @@
#define RXE_POOL_CACHE_FLAGS (0)
enum rxe_pool_flags {
- RXE_POOL_ATOMIC = BIT(0),
RXE_POOL_INDEX = BIT(1),
RXE_POOL_KEY = BIT(2),
RXE_POOL_NO_ALLOC = BIT(4),
@@ -36,6 +35,7 @@ struct rxe_pool_entry;
struct rxe_type_info {
const char *name;
size_t size;
+ size_t elem_offset;
void (*cleanup)(struct rxe_pool_entry *obj);
enum rxe_pool_flags flags;
u32 max_index;
@@ -46,18 +46,16 @@ struct rxe_type_info {
extern struct rxe_type_info rxe_type_info[];
-enum rxe_pool_state {
- RXE_POOL_STATE_INVALID,
- RXE_POOL_STATE_VALID,
-};
-
struct rxe_pool_entry {
struct rxe_pool *pool;
struct kref ref_cnt;
struct list_head list;
- /* only used if indexed or keyed */
- struct rb_node node;
+ /* only used if keyed */
+ struct rb_node key_node;
+
+ /* only used if indexed */
+ struct rb_node index_node;
u32 index;
};
@@ -65,24 +63,29 @@ struct rxe_pool {
struct rxe_dev *rxe;
rwlock_t pool_lock; /* protects pool add/del/search */
size_t elem_size;
- struct kref ref_cnt;
void (*cleanup)(struct rxe_pool_entry *obj);
- enum rxe_pool_state state;
enum rxe_pool_flags flags;
enum rxe_elem_type type;
unsigned int max_elem;
atomic_t num_elem;
- /* only used if indexed or keyed */
- struct rb_root tree;
- unsigned long *table;
- size_t table_size;
- u32 max_index;
- u32 min_index;
- u32 last;
- size_t key_offset;
- size_t key_size;
+ /* only used if indexed */
+ struct {
+ struct rb_root tree;
+ unsigned long *table;
+ size_t table_size;
+ u32 last;
+ u32 max_index;
+ u32 min_index;
+ } index;
+
+ /* only used if keyed */
+ struct {
+ struct rb_root tree;
+ size_t key_offset;
+ size_t key_size;
+ } key;
};
/* initialize a pool of objects with given limit on
@@ -95,32 +98,70 @@ int rxe_pool_init(struct rxe_dev *rxe, struct rxe_pool *pool,
/* free resources from object pool */
void rxe_pool_cleanup(struct rxe_pool *pool);
-/* allocate an object from pool */
+/* allocate an object from pool holding and not holding the pool lock */
+void *rxe_alloc_locked(struct rxe_pool *pool);
+
void *rxe_alloc(struct rxe_pool *pool);
/* connect already allocated object to pool */
-int rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_entry *elem);
+int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_entry *elem);
+
+#define rxe_add_to_pool(pool, obj) __rxe_add_to_pool(pool, &(obj)->pelem)
/* assign an index to an indexed object and insert object into
- * pool's rb tree
+ * pool's rb tree holding and not holding the pool_lock
*/
-void rxe_add_index(void *elem);
+void __rxe_add_index_locked(struct rxe_pool_entry *elem);
+
+#define rxe_add_index_locked(obj) __rxe_add_index_locked(&(obj)->pelem)
-/* drop an index and remove object from rb tree */
-void rxe_drop_index(void *elem);
+void __rxe_add_index(struct rxe_pool_entry *elem);
+
+#define rxe_add_index(obj) __rxe_add_index(&(obj)->pelem)
+
+/* drop an index and remove object from rb tree
+ * holding and not holding the pool_lock
+ */
+void __rxe_drop_index_locked(struct rxe_pool_entry *elem);
+
+#define rxe_drop_index_locked(obj) __rxe_drop_index_locked(&(obj)->pelem)
+
+void __rxe_drop_index(struct rxe_pool_entry *elem);
+
+#define rxe_drop_index(obj) __rxe_drop_index(&(obj)->pelem)
/* assign a key to a keyed object and insert object into
- * pool's rb tree
+ * pool's rb tree holding and not holding pool_lock
*/
-void rxe_add_key(void *elem, void *key);
+void __rxe_add_key_locked(struct rxe_pool_entry *elem, void *key);
+
+#define rxe_add_key_locked(obj, key) __rxe_add_key_locked(&(obj)->pelem, key)
+
+void __rxe_add_key(struct rxe_pool_entry *elem, void *key);
+
+#define rxe_add_key(obj, key) __rxe_add_key(&(obj)->pelem, key)
+
+/* remove elem from rb tree holding and not holding the pool_lock */
+void __rxe_drop_key_locked(struct rxe_pool_entry *elem);
-/* remove elem from rb tree */
-void rxe_drop_key(void *elem);
+#define rxe_drop_key_locked(obj) __rxe_drop_key_locked(&(obj)->pelem)
+
+void __rxe_drop_key(struct rxe_pool_entry *elem);
+
+#define rxe_drop_key(obj) __rxe_drop_key(&(obj)->pelem)
+
+/* lookup an indexed object from index holding and not holding the pool_lock.
+ * takes a reference on object
+ */
+void *rxe_pool_get_index_locked(struct rxe_pool *pool, u32 index);
-/* lookup an indexed object from index. takes a reference on object */
void *rxe_pool_get_index(struct rxe_pool *pool, u32 index);
-/* lookup keyed object from key. takes a reference on the object */
+/* lookup keyed object from key holding and not holding the pool_lock.
+ * takes a reference on the objecti
+ */
+void *rxe_pool_get_key_locked(struct rxe_pool *pool, void *key);
+
void *rxe_pool_get_key(struct rxe_pool *pool, void *key);
/* cleanup an object when all references are dropped */
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index 656a5b4be847..34ae957a315c 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -62,6 +62,17 @@ int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init)
struct rxe_port *port;
int port_num = init->port_num;
+ switch (init->qp_type) {
+ case IB_QPT_SMI:
+ case IB_QPT_GSI:
+ case IB_QPT_RC:
+ case IB_QPT_UC:
+ case IB_QPT_UD:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
if (!init->recv_cq || !init->send_cq) {
pr_warn("missing cq\n");
goto err1;
diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c
index c9984a28eecc..7a49e27da23a 100644
--- a/drivers/infiniband/sw/rxe/rxe_recv.c
+++ b/drivers/infiniband/sw/rxe/rxe_recv.c
@@ -9,21 +9,26 @@
#include "rxe.h"
#include "rxe_loc.h"
+/* check that QP matches packet opcode type and is in a valid state */
static int check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
struct rxe_qp *qp)
{
+ unsigned int pkt_type;
+
if (unlikely(!qp->valid))
goto err1;
+ pkt_type = pkt->opcode & 0xe0;
+
switch (qp_type(qp)) {
case IB_QPT_RC:
- if (unlikely((pkt->opcode & IB_OPCODE_RC) != 0)) {
+ if (unlikely(pkt_type != IB_OPCODE_RC)) {
pr_warn_ratelimited("bad qp type\n");
goto err1;
}
break;
case IB_QPT_UC:
- if (unlikely(!(pkt->opcode & IB_OPCODE_UC))) {
+ if (unlikely(pkt_type != IB_OPCODE_UC)) {
pr_warn_ratelimited("bad qp type\n");
goto err1;
}
@@ -31,7 +36,7 @@ static int check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
case IB_QPT_UD:
case IB_QPT_SMI:
case IB_QPT_GSI:
- if (unlikely(!(pkt->opcode & IB_OPCODE_UD))) {
+ if (unlikely(pkt_type != IB_OPCODE_UD)) {
pr_warn_ratelimited("bad qp type\n");
goto err1;
}
@@ -85,8 +90,7 @@ static int check_keys(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
goto err1;
}
- if ((qp_type(qp) == IB_QPT_UD || qp_type(qp) == IB_QPT_GSI) &&
- pkt->mask) {
+ if (qp_type(qp) == IB_QPT_UD || qp_type(qp) == IB_QPT_GSI) {
u32 qkey = (qpn == 1) ? GSI_QKEY : qp->attr.qkey;
if (unlikely(deth_qkey(pkt) != qkey)) {
@@ -233,8 +237,6 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
struct rxe_mc_elem *mce;
struct rxe_qp *qp;
union ib_gid dgid;
- struct sk_buff *per_qp_skb;
- struct rxe_pkt_info *per_qp_pkt;
int err;
if (skb->protocol == htons(ETH_P_IP))
@@ -246,13 +248,17 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
/* lookup mcast group corresponding to mgid, takes a ref */
mcg = rxe_pool_get_key(&rxe->mc_grp_pool, &dgid);
if (!mcg)
- goto err1; /* mcast group not registered */
+ goto drop; /* mcast group not registered */
spin_lock_bh(&mcg->mcg_lock);
+ /* this is unreliable datagram service so we let
+ * failures to deliver a multicast packet to a
+ * single QP happen and just move on and try
+ * the rest of them on the list
+ */
list_for_each_entry(mce, &mcg->qp_list, qp_list) {
qp = mce->qp;
- pkt = SKB_TO_PKT(skb);
/* validate qp for incoming packet */
err = check_type_state(rxe, pkt, qp);
@@ -263,31 +269,49 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
if (err)
continue;
- /* for all but the last qp create a new clone of the
- * skb and pass to the qp.
+ /* for all but the last QP create a new clone of the
+ * skb and pass to the QP. Pass the original skb to
+ * the last QP in the list.
*/
- if (mce->qp_list.next != &mcg->qp_list)
- per_qp_skb = skb_clone(skb, GFP_ATOMIC);
- else
- per_qp_skb = skb;
-
- if (unlikely(!per_qp_skb))
- continue;
-
- per_qp_pkt = SKB_TO_PKT(per_qp_skb);
- per_qp_pkt->qp = qp;
- rxe_add_ref(qp);
- rxe_rcv_pkt(per_qp_pkt, per_qp_skb);
+ if (mce->qp_list.next != &mcg->qp_list) {
+ struct sk_buff *cskb;
+ struct rxe_pkt_info *cpkt;
+
+ cskb = skb_clone(skb, GFP_ATOMIC);
+ if (unlikely(!cskb))
+ continue;
+
+ if (WARN_ON(!ib_device_try_get(&rxe->ib_dev))) {
+ kfree_skb(cskb);
+ break;
+ }
+
+ cpkt = SKB_TO_PKT(cskb);
+ cpkt->qp = qp;
+ rxe_add_ref(qp);
+ rxe_rcv_pkt(cpkt, cskb);
+ } else {
+ pkt->qp = qp;
+ rxe_add_ref(qp);
+ rxe_rcv_pkt(pkt, skb);
+ skb = NULL; /* mark consumed */
+ }
}
spin_unlock_bh(&mcg->mcg_lock);
rxe_drop_ref(mcg); /* drop ref from rxe_pool_get_key. */
- return;
+ if (likely(!skb))
+ return;
-err1:
+ /* This only occurs if one of the checks fails on the last
+ * QP in the list above
+ */
+
+drop:
kfree_skb(skb);
+ ib_device_put(&rxe->ib_dev);
}
/**
@@ -340,9 +364,7 @@ void rxe_rcv(struct sk_buff *skb)
__be32 *icrcp;
u32 calc_icrc, pack_icrc;
- pkt->offset = 0;
-
- if (unlikely(skb->len < pkt->offset + RXE_BTH_BYTES))
+ if (unlikely(skb->len < RXE_BTH_BYTES))
goto drop;
if (rxe_chk_dgid(rxe, skb) < 0) {
@@ -397,4 +419,5 @@ drop:
rxe_drop_ref(pkt->qp);
kfree_skb(skb);
+ ib_device_put(&rxe->ib_dev);
}
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index d4917646641a..889290793d75 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -375,7 +375,6 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp,
pkt->psn = qp->req.psn;
pkt->mask = rxe_opcode[opcode].mask;
pkt->paylen = paylen;
- pkt->offset = 0;
pkt->wqe = wqe;
/* init skb */
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index 5a098083a9d2..142f3d8014d8 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -99,6 +99,7 @@ static inline enum resp_states get_req(struct rxe_qp *qp,
while ((skb = skb_dequeue(&qp->req_pkts))) {
rxe_drop_ref(qp);
kfree_skb(skb);
+ ib_device_put(qp->ibqp.device);
}
/* go drain recv wr queue */
@@ -585,11 +586,10 @@ static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp,
ack->qp = qp;
ack->opcode = opcode;
ack->mask = rxe_opcode[opcode].mask;
- ack->offset = pkt->offset;
ack->paylen = paylen;
/* fill in bth using the request packet headers */
- memcpy(ack->hdr, pkt->hdr, pkt->offset + RXE_BTH_BYTES);
+ memcpy(ack->hdr, pkt->hdr, RXE_BTH_BYTES);
bth_set_opcode(ack, opcode);
bth_set_qpn(ack, qp->attr.dest_qp_num);
@@ -872,6 +872,11 @@ static enum resp_states do_complete(struct rxe_qp *qp,
else
wc->network_hdr_type = RDMA_NETWORK_IPV6;
+ if (is_vlan_dev(skb->dev)) {
+ wc->wc_flags |= IB_WC_WITH_VLAN;
+ wc->vlan_id = vlan_dev_vlan_id(skb->dev);
+ }
+
if (pkt->mask & RXE_IMMDT_MASK) {
wc->wc_flags |= IB_WC_WITH_IMM;
wc->ex.imm_data = immdt_imm(pkt);
@@ -1012,6 +1017,7 @@ static enum resp_states cleanup(struct rxe_qp *qp,
skb = skb_dequeue(&qp->req_pkts);
rxe_drop_ref(qp);
kfree_skb(skb);
+ ib_device_put(qp->ibqp.device);
}
if (qp->resp.mr) {
@@ -1176,6 +1182,7 @@ static void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify)
while ((skb = skb_dequeue(&qp->req_pkts))) {
rxe_drop_ref(qp);
kfree_skb(skb);
+ ib_device_put(qp->ibqp.device);
}
if (notify)
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index a031514e2f41..dee5e0e919d2 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -106,12 +106,12 @@ static enum rdma_link_layer rxe_get_link_layer(struct ib_device *dev,
return IB_LINK_LAYER_ETHERNET;
}
-static int rxe_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
+static int rxe_alloc_ucontext(struct ib_ucontext *ibuc, struct ib_udata *udata)
{
- struct rxe_dev *rxe = to_rdev(uctx->device);
- struct rxe_ucontext *uc = to_ruc(uctx);
+ struct rxe_dev *rxe = to_rdev(ibuc->device);
+ struct rxe_ucontext *uc = to_ruc(ibuc);
- return rxe_add_to_pool(&rxe->uc_pool, &uc->pelem);
+ return rxe_add_to_pool(&rxe->uc_pool, uc);
}
static void rxe_dealloc_ucontext(struct ib_ucontext *ibuc)
@@ -145,7 +145,7 @@ static int rxe_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
struct rxe_dev *rxe = to_rdev(ibpd->device);
struct rxe_pd *pd = to_rpd(ibpd);
- return rxe_add_to_pool(&rxe->pd_pool, &pd->pelem);
+ return rxe_add_to_pool(&rxe->pd_pool, pd);
}
static int rxe_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
@@ -169,7 +169,7 @@ static int rxe_create_ah(struct ib_ah *ibah,
if (err)
return err;
- err = rxe_add_to_pool(&rxe->ah_pool, &ah->pelem);
+ err = rxe_add_to_pool(&rxe->ah_pool, ah);
if (err)
return err;
@@ -273,7 +273,7 @@ static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init,
if (err)
goto err1;
- err = rxe_add_to_pool(&rxe->srq_pool, &srq->pelem);
+ err = rxe_add_to_pool(&rxe->srq_pool, srq);
if (err)
goto err1;
@@ -555,37 +555,42 @@ static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
}
}
-static int init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
+static void copy_inline_data_to_wqe(struct rxe_send_wqe *wqe,
+ const struct ib_send_wr *ibwr)
+{
+ struct ib_sge *sge = ibwr->sg_list;
+ u8 *p = wqe->dma.inline_data;
+ int i;
+
+ for (i = 0; i < ibwr->num_sge; i++, sge++) {
+ memcpy(p, (void *)(uintptr_t)sge->addr, sge->length);
+ p += sge->length;
+ }
+}
+
+static void init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
unsigned int mask, unsigned int length,
struct rxe_send_wqe *wqe)
{
int num_sge = ibwr->num_sge;
- struct ib_sge *sge;
- int i;
- u8 *p;
init_send_wr(qp, &wqe->wr, ibwr);
+ /* local operation */
+ if (unlikely(mask & WR_REG_MASK)) {
+ wqe->mask = mask;
+ wqe->state = wqe_state_posted;
+ return;
+ }
+
if (qp_type(qp) == IB_QPT_UD ||
qp_type(qp) == IB_QPT_SMI ||
qp_type(qp) == IB_QPT_GSI)
memcpy(&wqe->av, &to_rah(ud_wr(ibwr)->ah)->av, sizeof(wqe->av));
- if (unlikely(ibwr->send_flags & IB_SEND_INLINE)) {
- p = wqe->dma.inline_data;
-
- sge = ibwr->sg_list;
- for (i = 0; i < num_sge; i++, sge++) {
- memcpy(p, (void *)(uintptr_t)sge->addr,
- sge->length);
-
- p += sge->length;
- }
- } else if (mask & WR_REG_MASK) {
- wqe->mask = mask;
- wqe->state = wqe_state_posted;
- return 0;
- } else
+ if (unlikely(ibwr->send_flags & IB_SEND_INLINE))
+ copy_inline_data_to_wqe(wqe, ibwr);
+ else
memcpy(wqe->dma.sge, ibwr->sg_list,
num_sge * sizeof(struct ib_sge));
@@ -599,8 +604,6 @@ static int init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
wqe->dma.sge_offset = 0;
wqe->state = wqe_state_posted;
wqe->ssn = atomic_add_return(1, &qp->ssn);
-
- return 0;
}
static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
@@ -623,10 +626,7 @@ static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
}
send_wqe = producer_addr(sq->queue);
-
- err = init_send_wqe(qp, ibwr, mask, length, send_wqe);
- if (unlikely(err))
- goto err1;
+ init_send_wqe(qp, ibwr, mask, length, send_wqe);
advance_producer(sq->queue);
spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
@@ -774,7 +774,7 @@ static int rxe_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
if (err)
return err;
- return rxe_add_to_pool(&rxe->cq_pool, &cq->pelem);
+ return rxe_add_to_pool(&rxe->cq_pool, cq);
}
static int rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
@@ -1118,7 +1118,7 @@ int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name)
struct ib_device *dev = &rxe->ib_dev;
struct crypto_shash *tfm;
- strlcpy(dev->node_desc, "rxe", sizeof(dev->node_desc));
+ strscpy(dev->node_desc, "rxe", sizeof(dev->node_desc));
dev->node_type = RDMA_NODE_IB_CA;
dev->phys_port_cnt = 1;
diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h
index adda78996219..368959ae9a8c 100644
--- a/drivers/infiniband/sw/siw/siw.h
+++ b/drivers/infiniband/sw/siw/siw.h
@@ -653,7 +653,7 @@ static inline struct siw_sqe *orq_get_free(struct siw_qp *qp)
{
struct siw_sqe *orq_e = orq_get_tail(qp);
- if (orq_e && READ_ONCE(orq_e->flags) == 0)
+ if (READ_ONCE(orq_e->flags) == 0)
return orq_e;
return NULL;
diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c
index ee95cf29179d..cf55326f2ab4 100644
--- a/drivers/infiniband/sw/siw/siw_main.c
+++ b/drivers/infiniband/sw/siw/siw_main.c
@@ -135,7 +135,7 @@ static struct {
static int siw_init_cpulist(void)
{
- int i, num_nodes = num_possible_nodes();
+ int i, num_nodes = nr_node_ids;
memset(siw_tx_thread, 0, sizeof(siw_tx_thread));
@@ -357,7 +357,7 @@ static struct siw_device *siw_device_create(struct net_device *netdev)
sizeof(base_dev->iw_ifname));
/* Disable TCP port mapping */
- base_dev->iw_driver_flags = IW_F_NO_PORT_MAP,
+ base_dev->iw_driver_flags = IW_F_NO_PORT_MAP;
sdev->attrs.max_qp = SIW_MAX_QP;
sdev->attrs.max_qp_wr = SIW_MAX_QP_WR;
diff --git a/drivers/infiniband/sw/siw/siw_qp.c b/drivers/infiniband/sw/siw/siw_qp.c
index 875d36d4b1c6..ddb2e66f9f13 100644
--- a/drivers/infiniband/sw/siw/siw_qp.c
+++ b/drivers/infiniband/sw/siw/siw_qp.c
@@ -199,26 +199,26 @@ void siw_qp_llp_write_space(struct sock *sk)
static int siw_qp_readq_init(struct siw_qp *qp, int irq_size, int orq_size)
{
- irq_size = roundup_pow_of_two(irq_size);
- orq_size = roundup_pow_of_two(orq_size);
-
- qp->attrs.irq_size = irq_size;
- qp->attrs.orq_size = orq_size;
-
- qp->irq = vzalloc(irq_size * sizeof(struct siw_sqe));
- if (!qp->irq) {
- siw_dbg_qp(qp, "irq malloc for %d failed\n", irq_size);
- qp->attrs.irq_size = 0;
- return -ENOMEM;
+ if (irq_size) {
+ irq_size = roundup_pow_of_two(irq_size);
+ qp->irq = vzalloc(irq_size * sizeof(struct siw_sqe));
+ if (!qp->irq) {
+ qp->attrs.irq_size = 0;
+ return -ENOMEM;
+ }
}
- qp->orq = vzalloc(orq_size * sizeof(struct siw_sqe));
- if (!qp->orq) {
- siw_dbg_qp(qp, "orq malloc for %d failed\n", orq_size);
- qp->attrs.orq_size = 0;
- qp->attrs.irq_size = 0;
- vfree(qp->irq);
- return -ENOMEM;
+ if (orq_size) {
+ orq_size = roundup_pow_of_two(orq_size);
+ qp->orq = vzalloc(orq_size * sizeof(struct siw_sqe));
+ if (!qp->orq) {
+ qp->attrs.orq_size = 0;
+ qp->attrs.irq_size = 0;
+ vfree(qp->irq);
+ return -ENOMEM;
+ }
}
+ qp->attrs.irq_size = irq_size;
+ qp->attrs.orq_size = orq_size;
siw_dbg_qp(qp, "ORD %d, IRD %d\n", orq_size, irq_size);
return 0;
}
@@ -288,13 +288,14 @@ int siw_qp_mpa_rts(struct siw_qp *qp, enum mpa_v2_ctrl ctrl)
if (ctrl & MPA_V2_RDMA_WRITE_RTR)
wqe->sqe.opcode = SIW_OP_WRITE;
else if (ctrl & MPA_V2_RDMA_READ_RTR) {
- struct siw_sqe *rreq;
+ struct siw_sqe *rreq = NULL;
wqe->sqe.opcode = SIW_OP_READ;
spin_lock(&qp->orq_lock);
- rreq = orq_get_free(qp);
+ if (qp->attrs.orq_size)
+ rreq = orq_get_free(qp);
if (rreq) {
siw_read_to_orq(rreq, &wqe->sqe);
qp->orq_put++;
@@ -877,135 +878,88 @@ void siw_read_to_orq(struct siw_sqe *rreq, struct siw_sqe *sqe)
rreq->num_sge = 1;
}
-/*
- * Must be called with SQ locked.
- * To avoid complete SQ starvation by constant inbound READ requests,
- * the active IRQ will not be served after qp->irq_burst, if the
- * SQ has pending work.
- */
-int siw_activate_tx(struct siw_qp *qp)
+static int siw_activate_tx_from_sq(struct siw_qp *qp)
{
- struct siw_sqe *irqe, *sqe;
+ struct siw_sqe *sqe;
struct siw_wqe *wqe = tx_wqe(qp);
int rv = 1;
- irqe = &qp->irq[qp->irq_get % qp->attrs.irq_size];
-
- if (irqe->flags & SIW_WQE_VALID) {
- sqe = sq_get_next(qp);
-
- /*
- * Avoid local WQE processing starvation in case
- * of constant inbound READ request stream
- */
- if (sqe && ++qp->irq_burst >= SIW_IRQ_MAXBURST_SQ_ACTIVE) {
- qp->irq_burst = 0;
- goto skip_irq;
- }
- memset(wqe->mem, 0, sizeof(*wqe->mem) * SIW_MAX_SGE);
- wqe->wr_status = SIW_WR_QUEUED;
-
- /* start READ RESPONSE */
- wqe->sqe.opcode = SIW_OP_READ_RESPONSE;
- wqe->sqe.flags = 0;
- if (irqe->num_sge) {
- wqe->sqe.num_sge = 1;
- wqe->sqe.sge[0].length = irqe->sge[0].length;
- wqe->sqe.sge[0].laddr = irqe->sge[0].laddr;
- wqe->sqe.sge[0].lkey = irqe->sge[0].lkey;
- } else {
- wqe->sqe.num_sge = 0;
- }
-
- /* Retain original RREQ's message sequence number for
- * potential error reporting cases.
- */
- wqe->sqe.sge[1].length = irqe->sge[1].length;
-
- wqe->sqe.rkey = irqe->rkey;
- wqe->sqe.raddr = irqe->raddr;
+ sqe = sq_get_next(qp);
+ if (!sqe)
+ return 0;
- wqe->processed = 0;
- qp->irq_get++;
+ memset(wqe->mem, 0, sizeof(*wqe->mem) * SIW_MAX_SGE);
+ wqe->wr_status = SIW_WR_QUEUED;
- /* mark current IRQ entry free */
- smp_store_mb(irqe->flags, 0);
+ /* First copy SQE to kernel private memory */
+ memcpy(&wqe->sqe, sqe, sizeof(*sqe));
+ if (wqe->sqe.opcode >= SIW_NUM_OPCODES) {
+ rv = -EINVAL;
goto out;
}
- sqe = sq_get_next(qp);
- if (sqe) {
-skip_irq:
- memset(wqe->mem, 0, sizeof(*wqe->mem) * SIW_MAX_SGE);
- wqe->wr_status = SIW_WR_QUEUED;
-
- /* First copy SQE to kernel private memory */
- memcpy(&wqe->sqe, sqe, sizeof(*sqe));
-
- if (wqe->sqe.opcode >= SIW_NUM_OPCODES) {
+ if (wqe->sqe.flags & SIW_WQE_INLINE) {
+ if (wqe->sqe.opcode != SIW_OP_SEND &&
+ wqe->sqe.opcode != SIW_OP_WRITE) {
rv = -EINVAL;
goto out;
}
- if (wqe->sqe.flags & SIW_WQE_INLINE) {
- if (wqe->sqe.opcode != SIW_OP_SEND &&
- wqe->sqe.opcode != SIW_OP_WRITE) {
- rv = -EINVAL;
- goto out;
- }
- if (wqe->sqe.sge[0].length > SIW_MAX_INLINE) {
- rv = -EINVAL;
- goto out;
- }
- wqe->sqe.sge[0].laddr = (uintptr_t)&wqe->sqe.sge[1];
- wqe->sqe.sge[0].lkey = 0;
- wqe->sqe.num_sge = 1;
+ if (wqe->sqe.sge[0].length > SIW_MAX_INLINE) {
+ rv = -EINVAL;
+ goto out;
}
- if (wqe->sqe.flags & SIW_WQE_READ_FENCE) {
- /* A READ cannot be fenced */
- if (unlikely(wqe->sqe.opcode == SIW_OP_READ ||
- wqe->sqe.opcode ==
- SIW_OP_READ_LOCAL_INV)) {
- siw_dbg_qp(qp, "cannot fence read\n");
- rv = -EINVAL;
- goto out;
- }
- spin_lock(&qp->orq_lock);
+ wqe->sqe.sge[0].laddr = (uintptr_t)&wqe->sqe.sge[1];
+ wqe->sqe.sge[0].lkey = 0;
+ wqe->sqe.num_sge = 1;
+ }
+ if (wqe->sqe.flags & SIW_WQE_READ_FENCE) {
+ /* A READ cannot be fenced */
+ if (unlikely(wqe->sqe.opcode == SIW_OP_READ ||
+ wqe->sqe.opcode ==
+ SIW_OP_READ_LOCAL_INV)) {
+ siw_dbg_qp(qp, "cannot fence read\n");
+ rv = -EINVAL;
+ goto out;
+ }
+ spin_lock(&qp->orq_lock);
- if (!siw_orq_empty(qp)) {
- qp->tx_ctx.orq_fence = 1;
- rv = 0;
- }
- spin_unlock(&qp->orq_lock);
+ if (qp->attrs.orq_size && !siw_orq_empty(qp)) {
+ qp->tx_ctx.orq_fence = 1;
+ rv = 0;
+ }
+ spin_unlock(&qp->orq_lock);
- } else if (wqe->sqe.opcode == SIW_OP_READ ||
- wqe->sqe.opcode == SIW_OP_READ_LOCAL_INV) {
- struct siw_sqe *rreq;
+ } else if (wqe->sqe.opcode == SIW_OP_READ ||
+ wqe->sqe.opcode == SIW_OP_READ_LOCAL_INV) {
+ struct siw_sqe *rreq;
- wqe->sqe.num_sge = 1;
+ if (unlikely(!qp->attrs.orq_size)) {
+ /* We negotiated not to send READ req's */
+ rv = -EINVAL;
+ goto out;
+ }
+ wqe->sqe.num_sge = 1;
- spin_lock(&qp->orq_lock);
+ spin_lock(&qp->orq_lock);
- rreq = orq_get_free(qp);
- if (rreq) {
- /*
- * Make an immediate copy in ORQ to be ready
- * to process loopback READ reply
- */
- siw_read_to_orq(rreq, &wqe->sqe);
- qp->orq_put++;
- } else {
- qp->tx_ctx.orq_fence = 1;
- rv = 0;
- }
- spin_unlock(&qp->orq_lock);
+ rreq = orq_get_free(qp);
+ if (rreq) {
+ /*
+ * Make an immediate copy in ORQ to be ready
+ * to process loopback READ reply
+ */
+ siw_read_to_orq(rreq, &wqe->sqe);
+ qp->orq_put++;
+ } else {
+ qp->tx_ctx.orq_fence = 1;
+ rv = 0;
}
-
- /* Clear SQE, can be re-used by application */
- smp_store_mb(sqe->flags, 0);
- qp->sq_get++;
- } else {
- rv = 0;
+ spin_unlock(&qp->orq_lock);
}
+
+ /* Clear SQE, can be re-used by application */
+ smp_store_mb(sqe->flags, 0);
+ qp->sq_get++;
out:
if (unlikely(rv < 0)) {
siw_dbg_qp(qp, "error %d\n", rv);
@@ -1015,6 +969,65 @@ out:
}
/*
+ * Must be called with SQ locked.
+ * To avoid complete SQ starvation by constant inbound READ requests,
+ * the active IRQ will not be served after qp->irq_burst, if the
+ * SQ has pending work.
+ */
+int siw_activate_tx(struct siw_qp *qp)
+{
+ struct siw_sqe *irqe;
+ struct siw_wqe *wqe = tx_wqe(qp);
+
+ if (!qp->attrs.irq_size)
+ return siw_activate_tx_from_sq(qp);
+
+ irqe = &qp->irq[qp->irq_get % qp->attrs.irq_size];
+
+ if (!(irqe->flags & SIW_WQE_VALID))
+ return siw_activate_tx_from_sq(qp);
+
+ /*
+ * Avoid local WQE processing starvation in case
+ * of constant inbound READ request stream
+ */
+ if (sq_get_next(qp) && ++qp->irq_burst >= SIW_IRQ_MAXBURST_SQ_ACTIVE) {
+ qp->irq_burst = 0;
+ return siw_activate_tx_from_sq(qp);
+ }
+ memset(wqe->mem, 0, sizeof(*wqe->mem) * SIW_MAX_SGE);
+ wqe->wr_status = SIW_WR_QUEUED;
+
+ /* start READ RESPONSE */
+ wqe->sqe.opcode = SIW_OP_READ_RESPONSE;
+ wqe->sqe.flags = 0;
+ if (irqe->num_sge) {
+ wqe->sqe.num_sge = 1;
+ wqe->sqe.sge[0].length = irqe->sge[0].length;
+ wqe->sqe.sge[0].laddr = irqe->sge[0].laddr;
+ wqe->sqe.sge[0].lkey = irqe->sge[0].lkey;
+ } else {
+ wqe->sqe.num_sge = 0;
+ }
+
+ /* Retain original RREQ's message sequence number for
+ * potential error reporting cases.
+ */
+ wqe->sqe.sge[1].length = irqe->sge[1].length;
+
+ wqe->sqe.rkey = irqe->rkey;
+ wqe->sqe.raddr = irqe->raddr;
+
+ wqe->processed = 0;
+ qp->irq_get++;
+
+ /* mark current IRQ entry free */
+ smp_store_mb(irqe->flags, 0);
+
+ return 1;
+}
+
+/*
* Check if current CQ state qualifies for calling CQ completion
* handler. Must be called with CQ lock held.
*/
diff --git a/drivers/infiniband/sw/siw/siw_qp_rx.c b/drivers/infiniband/sw/siw/siw_qp_rx.c
index 4bd1f1f84057..60116f20653c 100644
--- a/drivers/infiniband/sw/siw/siw_qp_rx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_rx.c
@@ -680,6 +680,10 @@ static int siw_init_rresp(struct siw_qp *qp, struct siw_rx_stream *srx)
}
spin_lock_irqsave(&qp->sq_lock, flags);
+ if (unlikely(!qp->attrs.irq_size)) {
+ run_sq = 0;
+ goto error_irq;
+ }
if (tx_work->wr_status == SIW_WR_IDLE) {
/*
* immediately schedule READ response w/o
@@ -712,8 +716,9 @@ static int siw_init_rresp(struct siw_qp *qp, struct siw_rx_stream *srx)
/* RRESP now valid as current TX wqe or placed into IRQ */
smp_store_mb(resp->flags, SIW_WQE_VALID);
} else {
- pr_warn("siw: [QP %u]: irq %d exceeded %d\n", qp_id(qp),
- qp->irq_put % qp->attrs.irq_size, qp->attrs.irq_size);
+error_irq:
+ pr_warn("siw: [QP %u]: IRQ exceeded or null, size %d\n",
+ qp_id(qp), qp->attrs.irq_size);
siw_init_terminate(qp, TERM_ERROR_LAYER_RDMAP,
RDMAP_ETYPE_REMOTE_OPERATION,
@@ -740,6 +745,9 @@ static int siw_orqe_start_rx(struct siw_qp *qp)
struct siw_sqe *orqe;
struct siw_wqe *wqe = NULL;
+ if (unlikely(!qp->attrs.orq_size))
+ return -EPROTO;
+
/* make sure ORQ indices are current */
smp_mb();
@@ -796,8 +804,8 @@ int siw_proc_rresp(struct siw_qp *qp)
*/
rv = siw_orqe_start_rx(qp);
if (rv) {
- pr_warn("siw: [QP %u]: ORQ empty at idx %d\n",
- qp_id(qp), qp->orq_get % qp->attrs.orq_size);
+ pr_warn("siw: [QP %u]: ORQ empty, size %d\n",
+ qp_id(qp), qp->attrs.orq_size);
goto error_term;
}
rv = siw_rresp_check_ntoh(srx, frx);
@@ -1290,11 +1298,13 @@ static int siw_rdmap_complete(struct siw_qp *qp, int error)
wc_status);
siw_wqe_put_mem(wqe, SIW_OP_READ);
- if (!error)
+ if (!error) {
rv = siw_check_tx_fence(qp);
- else
- /* Disable current ORQ eleement */
- WRITE_ONCE(orq_get_current(qp)->flags, 0);
+ } else {
+ /* Disable current ORQ element */
+ if (qp->attrs.orq_size)
+ WRITE_ONCE(orq_get_current(qp)->flags, 0);
+ }
break;
case RDMAP_RDMA_READ_REQ:
diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c
index d19d8325588b..7989c4043db4 100644
--- a/drivers/infiniband/sw/siw/siw_qp_tx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_tx.c
@@ -1107,8 +1107,8 @@ next_wqe:
/*
* RREQ may have already been completed by inbound RRESP!
*/
- if (tx_type == SIW_OP_READ ||
- tx_type == SIW_OP_READ_LOCAL_INV) {
+ if ((tx_type == SIW_OP_READ ||
+ tx_type == SIW_OP_READ_LOCAL_INV) && qp->attrs.orq_size) {
/* Cleanup pending entry in ORQ */
qp->orq_put--;
qp->orq[qp->orq_put % qp->attrs.orq_size].flags = 0;
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
index 68fd053fc774..e389d44e5591 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.c
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
@@ -365,13 +365,23 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
if (rv)
goto err_out;
+ num_sqe = attrs->cap.max_send_wr;
+ num_rqe = attrs->cap.max_recv_wr;
+
/* All queue indices are derived from modulo operations
* on a free running 'get' (consumer) and 'put' (producer)
* unsigned counter. Having queue sizes at power of two
* avoids handling counter wrap around.
*/
- num_sqe = roundup_pow_of_two(attrs->cap.max_send_wr);
- num_rqe = roundup_pow_of_two(attrs->cap.max_recv_wr);
+ if (num_sqe)
+ num_sqe = roundup_pow_of_two(num_sqe);
+ else {
+ /* Zero sized SQ is not supported */
+ rv = -EINVAL;
+ goto err_out;
+ }
+ if (num_rqe)
+ num_rqe = roundup_pow_of_two(num_rqe);
if (udata)
qp->sendq = vmalloc_user(num_sqe * sizeof(struct siw_sqe));
@@ -379,7 +389,6 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
qp->sendq = vzalloc(num_sqe * sizeof(struct siw_sqe));
if (qp->sendq == NULL) {
- siw_dbg(base_dev, "SQ size %d alloc failed\n", num_sqe);
rv = -ENOMEM;
goto err_out_xa;
}
@@ -413,7 +422,6 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
qp->recvq = vzalloc(num_rqe * sizeof(struct siw_rqe));
if (qp->recvq == NULL) {
- siw_dbg(base_dev, "RQ size %d alloc failed\n", num_rqe);
rv = -ENOMEM;
goto err_out_xa;
}
@@ -966,9 +974,9 @@ int siw_post_receive(struct ib_qp *base_qp, const struct ib_recv_wr *wr,
unsigned long flags;
int rv = 0;
- if (qp->srq) {
+ if (qp->srq || qp->attrs.rq_size == 0) {
*bad_wr = wr;
- return -EOPNOTSUPP; /* what else from errno.h? */
+ return -EINVAL;
}
if (!rdma_is_kernel_res(&qp->base_qp.res)) {
siw_dbg_qp(qp, "no kernel post_recv for user mapped rq\n");
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 3440dc48d02c..179ff1d068e5 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -413,7 +413,6 @@ struct ipoib_dev_priv {
u64 hca_caps;
struct ipoib_ethtool_st ethtool;
unsigned int max_send_sge;
- bool sm_fullmember_sendonly_support;
const struct net_device_ops *rn_ops;
};
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index a6f413491321..e16b40c09f82 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -141,8 +141,6 @@ int ipoib_open(struct net_device *dev)
set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
- priv->sm_fullmember_sendonly_support = false;
-
if (ipoib_ib_dev_open(dev)) {
if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))
return 0;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 86e4ed64e4e2..5b3154503bf4 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -275,7 +275,7 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
memset(&av, 0, sizeof(av));
av.type = rdma_ah_find_type(priv->ca, priv->port);
- rdma_ah_set_dlid(&av, be16_to_cpu(mcast->mcmember.mlid)),
+ rdma_ah_set_dlid(&av, be16_to_cpu(mcast->mcmember.mlid));
rdma_ah_set_port_num(&av, priv->port);
rdma_ah_set_sl(&av, mcast->mcmember.sl);
rdma_ah_set_static_rate(&av, mcast->mcmember.rate);
@@ -334,15 +334,6 @@ void ipoib_mcast_carrier_on_task(struct work_struct *work)
return;
}
/*
- * Check if can send sendonly MCG's with sendonly-fullmember join state.
- * It done here after the successfully join to the broadcast group,
- * because the broadcast group must always be joined first and is always
- * re-joined if the SM changes substantially.
- */
- priv->sm_fullmember_sendonly_support =
- ib_sa_sendonly_fullmem_support(&ipoib_sa_client,
- priv->ca, priv->port);
- /*
* Take rtnl_lock to avoid racing with ipoib_stop() and
* turning the carrier back on while a device is being
* removed. However, ipoib_stop() will attempt to flush
@@ -537,9 +528,7 @@ static int ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast)
* most closely emulates the behavior, from a user space
* application perspective, of Ethernet multicast operation.
*/
- if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) &&
- priv->sm_fullmember_sendonly_support)
- /* SM supports sendonly-fullmember, otherwise fallback to full-member */
+ if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
rec.join_state = SENDONLY_FULLMEMBER_JOIN;
}
spin_unlock_irq(&priv->lock);
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 4792b9bf400f..8fcaa1136f2c 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -89,13 +89,20 @@ int iser_debug_level = 0;
module_param_named(debug_level, iser_debug_level, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:disabled)");
+static int iscsi_iser_set(const char *val, const struct kernel_param *kp);
+static const struct kernel_param_ops iscsi_iser_size_ops = {
+ .set = iscsi_iser_set,
+ .get = param_get_uint,
+};
+
static unsigned int iscsi_max_lun = 512;
-module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
-MODULE_PARM_DESC(max_lun, "Max LUNs to allow per session (default:512");
+module_param_cb(max_lun, &iscsi_iser_size_ops, &iscsi_max_lun, S_IRUGO);
+MODULE_PARM_DESC(max_lun, "Max LUNs to allow per session, should > 0 (default:512)");
unsigned int iser_max_sectors = ISER_DEF_MAX_SECTORS;
-module_param_named(max_sectors, iser_max_sectors, uint, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(max_sectors, "Max number of sectors in a single scsi command (default:1024");
+module_param_cb(max_sectors, &iscsi_iser_size_ops, &iser_max_sectors,
+ S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(max_sectors, "Max number of sectors in a single scsi command, should > 0 (default:1024)");
bool iser_always_reg = true;
module_param_named(always_register, iser_always_reg, bool, S_IRUGO);
@@ -110,6 +117,18 @@ int iser_pi_guard;
module_param_named(pi_guard, iser_pi_guard, int, S_IRUGO);
MODULE_PARM_DESC(pi_guard, "T10-PI guard_type [deprecated]");
+static int iscsi_iser_set(const char *val, const struct kernel_param *kp)
+{
+ int ret;
+ unsigned int n = 0;
+
+ ret = kstrtouint(val, 10, &n);
+ if (ret != 0 || n == 0)
+ return -EINVAL;
+
+ return param_set_uint(val, kp);
+}
+
/*
* iscsi_iser_recv() - Process a successful recv completion
* @conn: iscsi connection
@@ -571,13 +590,20 @@ iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session)
static inline unsigned int
iser_dif_prot_caps(int prot_caps)
{
- return ((prot_caps & IB_PROT_T10DIF_TYPE_1) ?
- SHOST_DIF_TYPE1_PROTECTION | SHOST_DIX_TYPE0_PROTECTION |
- SHOST_DIX_TYPE1_PROTECTION : 0) |
- ((prot_caps & IB_PROT_T10DIF_TYPE_2) ?
- SHOST_DIF_TYPE2_PROTECTION | SHOST_DIX_TYPE2_PROTECTION : 0) |
- ((prot_caps & IB_PROT_T10DIF_TYPE_3) ?
- SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE3_PROTECTION : 0);
+ int ret = 0;
+
+ if (prot_caps & IB_PROT_T10DIF_TYPE_1)
+ ret |= SHOST_DIF_TYPE1_PROTECTION |
+ SHOST_DIX_TYPE0_PROTECTION |
+ SHOST_DIX_TYPE1_PROTECTION;
+ if (prot_caps & IB_PROT_T10DIF_TYPE_2)
+ ret |= SHOST_DIF_TYPE2_PROTECTION |
+ SHOST_DIX_TYPE2_PROTECTION;
+ if (prot_caps & IB_PROT_T10DIF_TYPE_3)
+ ret |= SHOST_DIF_TYPE3_PROTECTION |
+ SHOST_DIX_TYPE3_PROTECTION;
+
+ return ret;
}
/**
@@ -1009,11 +1035,6 @@ static int __init iser_init(void)
iser_dbg("Starting iSER datamover...\n");
- if (iscsi_max_lun < 1) {
- iser_err("Invalid max_lun value of %u\n", iscsi_max_lun);
- return -EINVAL;
- }
-
memset(&ig, 0, sizeof(struct iser_global));
ig.desc_cache = kmem_cache_create("iser_descriptors",
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index d4e057fac219..afec40da9b58 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -169,7 +169,7 @@ iser_set_dif_domain(struct scsi_cmnd *sc, struct ib_sig_domain *domain)
domain->sig.dif.ref_escape = true;
if (sc->prot_flags & SCSI_PROT_REF_INCREMENT)
domain->sig.dif.ref_remap = true;
-};
+}
static int
iser_set_sig_attrs(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs)
@@ -390,4 +390,3 @@ err_reg:
return err;
}
-
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 2bd18b006893..136f6c4492e0 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -685,7 +685,7 @@ static void iser_cleanup_handler(struct rdma_cm_id *cma_id,
iser_disconnected_handler(cma_id);
iser_free_ib_conn_res(iser_conn, destroy);
complete(&iser_conn->ib_completion);
-};
+}
static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
{
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 2ba27221ea85..7305ed8976c2 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -71,7 +71,6 @@ static int isert_sg_tablesize_set(const char *val, const struct kernel_param *kp
return param_set_int(val, kp);
}
-
static inline bool
isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd)
{
@@ -79,7 +78,6 @@ isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd)
cmd->prot_op != TARGET_PROT_NORMAL);
}
-
static void
isert_qp_event_callback(struct ib_event *e, void *context)
{
@@ -232,8 +230,10 @@ isert_create_device_ib_res(struct isert_device *device)
}
/* Check signature cap */
- device->pi_capable = ib_dev->attrs.device_cap_flags &
- IB_DEVICE_INTEGRITY_HANDOVER ? true : false;
+ if (ib_dev->attrs.device_cap_flags & IB_DEVICE_INTEGRITY_HANDOVER)
+ device->pi_capable = true;
+ else
+ device->pi_capable = false;
return 0;
}
@@ -1993,7 +1993,7 @@ isert_set_dif_domain(struct se_cmd *se_cmd, struct ib_sig_domain *domain)
if (se_cmd->prot_type == TARGET_DIF_TYPE1_PROT ||
se_cmd->prot_type == TARGET_DIF_TYPE2_PROT)
domain->sig.dif.ref_remap = true;
-};
+}
static int
isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs)
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
index 4933085a864a..cecf0f7cadf9 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
@@ -233,7 +233,7 @@ static void vema_get_class_port_info(struct opa_vnic_vema_port *port,
port_info = (struct opa_class_port_info *)rsp_mad->data;
memcpy(port_info, &port->class_port_info, sizeof(*port_info));
- port_info->base_version = OPA_MGMT_BASE_VERSION,
+ port_info->base_version = OPA_MGMT_BASE_VERSION;
port_info->class_version = OPA_EMA_CLASS_VERSION;
/*
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
index ba00f0de14ca..b6a0abf40589 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
@@ -408,6 +408,7 @@ int rtrs_clt_create_sess_files(struct rtrs_clt_sess *sess)
"%s", str);
if (err) {
pr_err("kobject_init_and_add: %d\n", err);
+ kobject_put(&sess->kobj);
return err;
}
err = sysfs_create_group(&sess->kobj, &rtrs_clt_sess_attr_group);
@@ -419,6 +420,7 @@ int rtrs_clt_create_sess_files(struct rtrs_clt_sess *sess)
&sess->kobj, "stats");
if (err) {
pr_err("kobject_init_and_add: %d\n", err);
+ kobject_put(&sess->stats->kobj_stats);
goto remove_group;
}
@@ -469,15 +471,12 @@ int rtrs_clt_create_sysfs_root_files(struct rtrs_clt *clt)
return sysfs_create_group(&clt->dev.kobj, &rtrs_clt_attr_group);
}
-void rtrs_clt_destroy_sysfs_root_folders(struct rtrs_clt *clt)
+void rtrs_clt_destroy_sysfs_root(struct rtrs_clt *clt)
{
+ sysfs_remove_group(&clt->dev.kobj, &rtrs_clt_attr_group);
+
if (clt->kobj_paths) {
kobject_del(clt->kobj_paths);
kobject_put(clt->kobj_paths);
}
}
-
-void rtrs_clt_destroy_sysfs_root_files(struct rtrs_clt *clt)
-{
- sysfs_remove_group(&clt->dev.kobj, &rtrs_clt_attr_group);
-}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
index 67f86c405a26..0a08b4b742a3 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
@@ -31,6 +31,8 @@
*/
#define RTRS_RECONNECT_SEED 8
+#define FIRST_CONN 0x01
+
MODULE_DESCRIPTION("RDMA Transport Client");
MODULE_LICENSE("GPL");
@@ -178,18 +180,18 @@ struct rtrs_clt_con *rtrs_permit_to_clt_con(struct rtrs_clt_sess *sess,
}
/**
- * __rtrs_clt_change_state() - change the session state through session state
+ * rtrs_clt_change_state() - change the session state through session state
* machine.
*
* @sess: client session to change the state of.
* @new_state: state to change to.
*
- * returns true if successful, false if the requested state can not be set.
+ * returns true if sess's state is changed to new state, otherwise return false.
*
* Locks:
* state_wq lock must be hold.
*/
-static bool __rtrs_clt_change_state(struct rtrs_clt_sess *sess,
+static bool rtrs_clt_change_state(struct rtrs_clt_sess *sess,
enum rtrs_clt_state new_state)
{
enum rtrs_clt_state old_state;
@@ -286,7 +288,7 @@ static bool rtrs_clt_change_state_from_to(struct rtrs_clt_sess *sess,
spin_lock_irq(&sess->state_wq.lock);
if (sess->state == old_state)
- changed = __rtrs_clt_change_state(sess, new_state);
+ changed = rtrs_clt_change_state(sess, new_state);
spin_unlock_irq(&sess->state_wq.lock);
return changed;
@@ -494,7 +496,7 @@ static void rtrs_clt_recv_done(struct rtrs_clt_con *con, struct ib_wc *wc)
int err;
struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
- WARN_ON(sess->flags != RTRS_MSG_NEW_RKEY_F);
+ WARN_ON((sess->flags & RTRS_MSG_NEW_RKEY_F) == 0);
iu = container_of(wc->wr_cqe, struct rtrs_iu,
cqe);
err = rtrs_iu_post_recv(&con->c, iu);
@@ -514,7 +516,7 @@ static void rtrs_clt_rkey_rsp_done(struct rtrs_clt_con *con, struct ib_wc *wc)
u32 buf_id;
int err;
- WARN_ON(sess->flags != RTRS_MSG_NEW_RKEY_F);
+ WARN_ON((sess->flags & RTRS_MSG_NEW_RKEY_F) == 0);
iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
@@ -621,12 +623,12 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
} else if (imm_type == RTRS_HB_MSG_IMM) {
WARN_ON(con->c.cid);
rtrs_send_hb_ack(&sess->s);
- if (sess->flags == RTRS_MSG_NEW_RKEY_F)
+ if (sess->flags & RTRS_MSG_NEW_RKEY_F)
return rtrs_clt_recv_done(con, wc);
} else if (imm_type == RTRS_HB_ACK_IMM) {
WARN_ON(con->c.cid);
sess->s.hb_missed_cnt = 0;
- if (sess->flags == RTRS_MSG_NEW_RKEY_F)
+ if (sess->flags & RTRS_MSG_NEW_RKEY_F)
return rtrs_clt_recv_done(con, wc);
} else {
rtrs_wrn(con->c.sess, "Unknown IMM type %u\n",
@@ -654,7 +656,7 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
WARN_ON(!(wc->wc_flags & IB_WC_WITH_INVALIDATE ||
wc->wc_flags & IB_WC_WITH_IMM));
WARN_ON(wc->wr_cqe->done != rtrs_clt_rdma_done);
- if (sess->flags == RTRS_MSG_NEW_RKEY_F) {
+ if (sess->flags & RTRS_MSG_NEW_RKEY_F) {
if (wc->wc_flags & IB_WC_WITH_INVALIDATE)
return rtrs_clt_recv_done(con, wc);
@@ -664,7 +666,6 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
case IB_WC_RDMA_WRITE:
/*
* post_send() RDMA write completions of IO reqs (read/write)
- * and hb
*/
break;
@@ -680,7 +681,7 @@ static int post_recv_io(struct rtrs_clt_con *con, size_t q_size)
struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
for (i = 0; i < q_size; i++) {
- if (sess->flags == RTRS_MSG_NEW_RKEY_F) {
+ if (sess->flags & RTRS_MSG_NEW_RKEY_F) {
struct rtrs_iu *iu = &con->rsp_ius[i];
err = rtrs_iu_post_recv(&con->c, iu);
@@ -1318,6 +1319,12 @@ out_err:
static void free_permits(struct rtrs_clt *clt)
{
+ if (clt->permits_map) {
+ size_t sz = clt->queue_depth;
+
+ wait_event(clt->permits_wait,
+ find_first_bit(clt->permits_map, sz) >= sz);
+ }
kfree(clt->permits_map);
clt->permits_map = NULL;
kfree(clt->permits);
@@ -1353,21 +1360,14 @@ static bool rtrs_clt_change_state_get_old(struct rtrs_clt_sess *sess,
bool changed;
spin_lock_irq(&sess->state_wq.lock);
- *old_state = sess->state;
- changed = __rtrs_clt_change_state(sess, new_state);
+ if (old_state)
+ *old_state = sess->state;
+ changed = rtrs_clt_change_state(sess, new_state);
spin_unlock_irq(&sess->state_wq.lock);
return changed;
}
-static bool rtrs_clt_change_state(struct rtrs_clt_sess *sess,
- enum rtrs_clt_state new_state)
-{
- enum rtrs_clt_state old_state;
-
- return rtrs_clt_change_state_get_old(sess, new_state, &old_state);
-}
-
static void rtrs_clt_hb_err_handler(struct rtrs_con *c)
{
struct rtrs_clt_con *con = container_of(c, typeof(*con), c);
@@ -1511,7 +1511,7 @@ static void destroy_con(struct rtrs_clt_con *con)
static int create_con_cq_qp(struct rtrs_clt_con *con)
{
struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
- u16 wr_queue_size;
+ u32 max_send_wr, max_recv_wr, cq_size;
int err, cq_vector;
struct rtrs_msg_rkey_rsp *rsp;
@@ -1523,7 +1523,8 @@ static int create_con_cq_qp(struct rtrs_clt_con *con)
* + 2 for drain and heartbeat
* in case qp gets into error state
*/
- wr_queue_size = SERVICE_CON_QUEUE_DEPTH * 3 + 2;
+ max_send_wr = SERVICE_CON_QUEUE_DEPTH * 2 + 2;
+ max_recv_wr = SERVICE_CON_QUEUE_DEPTH * 2 + 2;
/* We must be the first here */
if (WARN_ON(sess->s.dev))
return -EINVAL;
@@ -1555,25 +1556,29 @@ static int create_con_cq_qp(struct rtrs_clt_con *con)
/* Shared between connections */
sess->s.dev_ref++;
- wr_queue_size =
+ max_send_wr =
min_t(int, sess->s.dev->ib_dev->attrs.max_qp_wr,
/* QD * (REQ + RSP + FR REGS or INVS) + drain */
sess->queue_depth * 3 + 1);
+ max_recv_wr =
+ min_t(int, sess->s.dev->ib_dev->attrs.max_qp_wr,
+ sess->queue_depth * 3 + 1);
}
/* alloc iu to recv new rkey reply when server reports flags set */
- if (sess->flags == RTRS_MSG_NEW_RKEY_F || con->c.cid == 0) {
- con->rsp_ius = rtrs_iu_alloc(wr_queue_size, sizeof(*rsp),
+ if (sess->flags & RTRS_MSG_NEW_RKEY_F || con->c.cid == 0) {
+ con->rsp_ius = rtrs_iu_alloc(max_recv_wr, sizeof(*rsp),
GFP_KERNEL, sess->s.dev->ib_dev,
DMA_FROM_DEVICE,
rtrs_clt_rdma_done);
if (!con->rsp_ius)
return -ENOMEM;
- con->queue_size = wr_queue_size;
+ con->queue_size = max_recv_wr;
}
+ cq_size = max_send_wr + max_recv_wr;
cq_vector = con->cpu % sess->s.dev->ib_dev->num_comp_vectors;
err = rtrs_cq_qp_create(&sess->s, &con->c, sess->max_send_sge,
- cq_vector, wr_queue_size, wr_queue_size,
- IB_POLL_SOFTIRQ);
+ cq_vector, cq_size, max_send_wr,
+ max_recv_wr, IB_POLL_SOFTIRQ);
/*
* In case of error we do not bother to clean previous allocations,
* since destroy_con_cq_qp() must be called.
@@ -1657,6 +1662,7 @@ static int rtrs_rdma_route_resolved(struct rtrs_clt_con *con)
.cid_num = cpu_to_le16(sess->s.con_num),
.recon_cnt = cpu_to_le16(sess->s.recon_cnt),
};
+ msg.first_conn = sess->for_new_clt ? FIRST_CONN : 0;
uuid_copy(&msg.sess_uuid, &sess->s.uuid);
uuid_copy(&msg.paths_uuid, &clt->paths_uuid);
@@ -1742,6 +1748,8 @@ static int rtrs_rdma_conn_established(struct rtrs_clt_con *con,
scnprintf(sess->hca_name, sizeof(sess->hca_name),
sess->s.dev->ib_dev->name);
sess->s.src_addr = con->c.cm_id->route.addr.src_addr;
+ /* set for_new_clt, to allow future reconnect on any path */
+ sess->for_new_clt = 1;
}
return 0;
@@ -1788,7 +1796,7 @@ static int rtrs_rdma_conn_rejected(struct rtrs_clt_con *con,
static void rtrs_clt_close_conns(struct rtrs_clt_sess *sess, bool wait)
{
- if (rtrs_clt_change_state(sess, RTRS_CLT_CLOSING))
+ if (rtrs_clt_change_state_get_old(sess, RTRS_CLT_CLOSING, NULL))
queue_work(rtrs_wq, &sess->close_work);
if (wait)
flush_work(&sess->close_work);
@@ -2174,7 +2182,7 @@ static void rtrs_clt_close_work(struct work_struct *work)
cancel_delayed_work_sync(&sess->reconnect_dwork);
rtrs_clt_stop_and_destroy_conns(sess);
- rtrs_clt_change_state(sess, RTRS_CLT_CLOSED);
+ rtrs_clt_change_state_get_old(sess, RTRS_CLT_CLOSED, NULL);
}
static int init_conns(struct rtrs_clt_sess *sess)
@@ -2226,7 +2234,7 @@ destroy:
* doing rdma_resolve_addr(), switch to CONNECTION_ERR state
* manually to keep reconnecting.
*/
- rtrs_clt_change_state(sess, RTRS_CLT_CONNECTING_ERR);
+ rtrs_clt_change_state_get_old(sess, RTRS_CLT_CONNECTING_ERR, NULL);
return err;
}
@@ -2243,7 +2251,7 @@ static void rtrs_clt_info_req_done(struct ib_cq *cq, struct ib_wc *wc)
if (unlikely(wc->status != IB_WC_SUCCESS)) {
rtrs_err(sess->clt, "Sess info request send failed: %s\n",
ib_wc_status_msg(wc->status));
- rtrs_clt_change_state(sess, RTRS_CLT_CONNECTING_ERR);
+ rtrs_clt_change_state_get_old(sess, RTRS_CLT_CONNECTING_ERR, NULL);
return;
}
@@ -2367,7 +2375,7 @@ static void rtrs_clt_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc)
out:
rtrs_clt_update_wc_stats(con);
rtrs_iu_free(iu, sess->s.dev->ib_dev, 1);
- rtrs_clt_change_state(sess, state);
+ rtrs_clt_change_state_get_old(sess, state, NULL);
}
static int rtrs_send_sess_info(struct rtrs_clt_sess *sess)
@@ -2423,7 +2431,6 @@ static int rtrs_send_sess_info(struct rtrs_clt_sess *sess)
err = -ECONNRESET;
else
err = -ETIMEDOUT;
- goto out;
}
out:
@@ -2433,7 +2440,7 @@ out:
rtrs_iu_free(rx_iu, sess->s.dev->ib_dev, 1);
if (unlikely(err))
/* If we've never taken async path because of malloc problems */
- rtrs_clt_change_state(sess, RTRS_CLT_CONNECTING_ERR);
+ rtrs_clt_change_state_get_old(sess, RTRS_CLT_CONNECTING_ERR, NULL);
return err;
}
@@ -2490,7 +2497,7 @@ static void rtrs_clt_reconnect_work(struct work_struct *work)
/* Stop everything */
rtrs_clt_stop_and_destroy_conns(sess);
msleep(RTRS_RECONNECT_BACKOFF);
- if (rtrs_clt_change_state(sess, RTRS_CLT_CONNECTING)) {
+ if (rtrs_clt_change_state_get_old(sess, RTRS_CLT_CONNECTING, NULL)) {
err = init_sess(sess);
if (err)
goto reconnect_again;
@@ -2499,7 +2506,7 @@ static void rtrs_clt_reconnect_work(struct work_struct *work)
return;
reconnect_again:
- if (rtrs_clt_change_state(sess, RTRS_CLT_RECONNECTING)) {
+ if (rtrs_clt_change_state_get_old(sess, RTRS_CLT_RECONNECTING, NULL)) {
sess->stats->reconnects.fail_cnt++;
delay_ms = clt->reconnect_delay_sec * 1000;
queue_delayed_work(rtrs_wq, &sess->reconnect_dwork,
@@ -2565,11 +2572,8 @@ static struct rtrs_clt *alloc_clt(const char *sessname, size_t paths_num,
clt->dev.class = rtrs_clt_dev_class;
clt->dev.release = rtrs_clt_dev_release;
err = dev_set_name(&clt->dev, "%s", sessname);
- if (err) {
- free_percpu(clt->pcpu_path);
- kfree(clt);
- return ERR_PTR(err);
- }
+ if (err)
+ goto err;
/*
* Suppress user space notification until
* sysfs files are created
@@ -2577,44 +2581,35 @@ static struct rtrs_clt *alloc_clt(const char *sessname, size_t paths_num,
dev_set_uevent_suppress(&clt->dev, true);
err = device_register(&clt->dev);
if (err) {
- free_percpu(clt->pcpu_path);
put_device(&clt->dev);
- return ERR_PTR(err);
+ goto err;
}
clt->kobj_paths = kobject_create_and_add("paths", &clt->dev.kobj);
if (!clt->kobj_paths) {
- free_percpu(clt->pcpu_path);
- device_unregister(&clt->dev);
- return NULL;
+ err = -ENOMEM;
+ goto err_dev;
}
err = rtrs_clt_create_sysfs_root_files(clt);
if (err) {
- free_percpu(clt->pcpu_path);
kobject_del(clt->kobj_paths);
kobject_put(clt->kobj_paths);
- device_unregister(&clt->dev);
- return ERR_PTR(err);
+ goto err_dev;
}
dev_set_uevent_suppress(&clt->dev, false);
kobject_uevent(&clt->dev.kobj, KOBJ_ADD);
return clt;
-}
-
-static void wait_for_inflight_permits(struct rtrs_clt *clt)
-{
- if (clt->permits_map) {
- size_t sz = clt->queue_depth;
-
- wait_event(clt->permits_wait,
- find_first_bit(clt->permits_map, sz) >= sz);
- }
+err_dev:
+ device_unregister(&clt->dev);
+err:
+ free_percpu(clt->pcpu_path);
+ kfree(clt);
+ return ERR_PTR(err);
}
static void free_clt(struct rtrs_clt *clt)
{
- wait_for_inflight_permits(clt);
free_permits(clt);
free_percpu(clt->pcpu_path);
mutex_destroy(&clt->paths_ev_mutex);
@@ -2672,6 +2667,8 @@ struct rtrs_clt *rtrs_clt_open(struct rtrs_clt_ops *ops,
err = PTR_ERR(sess);
goto close_all_sess;
}
+ if (!i)
+ sess->for_new_clt = 1;
list_add_tail_rcu(&sess->s.entry, &clt->paths_list);
err = init_sess(sess);
@@ -2702,8 +2699,7 @@ close_all_sess:
rtrs_clt_close_conns(sess, true);
kobject_put(&sess->kobj);
}
- rtrs_clt_destroy_sysfs_root_files(clt);
- rtrs_clt_destroy_sysfs_root_folders(clt);
+ rtrs_clt_destroy_sysfs_root(clt);
free_clt(clt);
out:
@@ -2720,8 +2716,7 @@ void rtrs_clt_close(struct rtrs_clt *clt)
struct rtrs_clt_sess *sess, *tmp;
/* Firstly forbid sysfs access */
- rtrs_clt_destroy_sysfs_root_files(clt);
- rtrs_clt_destroy_sysfs_root_folders(clt);
+ rtrs_clt_destroy_sysfs_root(clt);
/* Now it is safe to iterate over all paths without locks */
list_for_each_entry_safe(sess, tmp, &clt->paths_list, s.entry) {
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.h b/drivers/infiniband/ulp/rtrs/rtrs-clt.h
index b8dbd701b3cb..692bc83e1f09 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.h
@@ -143,6 +143,7 @@ struct rtrs_clt_sess {
int max_send_sge;
u32 flags;
struct kobject kobj;
+ u8 for_new_clt;
struct rtrs_clt_stats *stats;
/* cache hca_port and hca_name to display in sysfs */
u8 hca_port;
@@ -243,8 +244,7 @@ ssize_t rtrs_clt_reset_all_help(struct rtrs_clt_stats *stats,
/* rtrs-clt-sysfs.c */
int rtrs_clt_create_sysfs_root_files(struct rtrs_clt *clt);
-void rtrs_clt_destroy_sysfs_root_folders(struct rtrs_clt *clt);
-void rtrs_clt_destroy_sysfs_root_files(struct rtrs_clt *clt);
+void rtrs_clt_destroy_sysfs_root(struct rtrs_clt *clt);
int rtrs_clt_create_sess_files(struct rtrs_clt_sess *sess);
void rtrs_clt_destroy_sess_files(struct rtrs_clt_sess *sess,
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-pri.h b/drivers/infiniband/ulp/rtrs/rtrs-pri.h
index 3f2918671dbe..8caad0a2322b 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-pri.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs-pri.h
@@ -188,7 +188,9 @@ struct rtrs_msg_conn_req {
__le16 recon_cnt;
uuid_t sess_uuid;
uuid_t paths_uuid;
- u8 reserved[12];
+ u8 first_conn : 1;
+ u8 reserved_bits : 7;
+ u8 reserved[11];
};
/**
@@ -303,8 +305,9 @@ int rtrs_post_rdma_write_imm_empty(struct rtrs_con *con, struct ib_cqe *cqe,
struct ib_send_wr *head);
int rtrs_cq_qp_create(struct rtrs_sess *rtrs_sess, struct rtrs_con *con,
- u32 max_send_sge, int cq_vector, u16 cq_size,
- u16 wr_queue_size, enum ib_poll_context poll_ctx);
+ u32 max_send_sge, int cq_vector, int cq_size,
+ u32 max_send_wr, u32 max_recv_wr,
+ enum ib_poll_context poll_ctx);
void rtrs_cq_qp_destroy(struct rtrs_con *con);
void rtrs_init_hb(struct rtrs_sess *sess, struct ib_cqe *cqe,
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
index d2edff3b8f0d..126a96e75c62 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
@@ -51,6 +51,8 @@ static ssize_t rtrs_srv_disconnect_store(struct kobject *kobj,
sockaddr_to_str((struct sockaddr *)&sess->s.dst_addr, str, sizeof(str));
rtrs_info(s, "disconnect for path %s requested\n", str);
+ /* first remove sysfs itself to avoid deadlock */
+ sysfs_remove_file_self(&sess->kobj, &attr->attr);
close_sess(sess);
return count;
@@ -181,6 +183,7 @@ static int rtrs_srv_create_once_sysfs_root_folders(struct rtrs_srv_sess *sess)
err = -ENOMEM;
pr_err("kobject_create_and_add(): %d\n", err);
device_del(&srv->dev);
+ put_device(&srv->dev);
goto unlock;
}
dev_set_uevent_suppress(&srv->dev, false);
@@ -206,6 +209,7 @@ rtrs_srv_destroy_once_sysfs_root_folders(struct rtrs_srv_sess *sess)
kobject_put(srv->kobj_paths);
mutex_unlock(&srv->paths_mutex);
device_del(&srv->dev);
+ put_device(&srv->dev);
} else {
mutex_unlock(&srv->paths_mutex);
}
@@ -234,6 +238,7 @@ static int rtrs_srv_create_stats_files(struct rtrs_srv_sess *sess)
&sess->kobj, "stats");
if (err) {
rtrs_err(s, "kobject_init_and_add(): %d\n", err);
+ kobject_put(&sess->stats->kobj_stats);
return err;
}
err = sysfs_create_group(&sess->stats->kobj_stats,
@@ -290,8 +295,8 @@ remove_group:
sysfs_remove_group(&sess->kobj, &rtrs_srv_sess_attr_group);
put_kobj:
kobject_del(&sess->kobj);
- kobject_put(&sess->kobj);
destroy_root:
+ kobject_put(&sess->kobj);
rtrs_srv_destroy_once_sysfs_root_folders(sess);
return err;
@@ -302,7 +307,7 @@ void rtrs_srv_destroy_sess_files(struct rtrs_srv_sess *sess)
if (sess->kobj.state_in_sysfs) {
kobject_del(&sess->stats->kobj_stats);
kobject_put(&sess->stats->kobj_stats);
- kobject_del(&sess->kobj);
+ sysfs_remove_group(&sess->kobj, &rtrs_srv_sess_attr_group);
kobject_put(&sess->kobj);
rtrs_srv_destroy_once_sysfs_root_folders(sess);
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
index c42fd470c4eb..d071809e3ed2 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
@@ -222,7 +222,8 @@ static int rdma_write_sg(struct rtrs_srv_op *id)
dma_addr_t dma_addr = sess->dma_addr[id->msg_id];
struct rtrs_srv_mr *srv_mr;
struct rtrs_srv *srv = sess->srv;
- struct ib_send_wr inv_wr, imm_wr;
+ struct ib_send_wr inv_wr;
+ struct ib_rdma_wr imm_wr;
struct ib_rdma_wr *wr = NULL;
enum ib_send_flags flags;
size_t sg_cnt;
@@ -267,21 +268,22 @@ static int rdma_write_sg(struct rtrs_srv_op *id)
WARN_ON_ONCE(rkey != wr->rkey);
wr->wr.opcode = IB_WR_RDMA_WRITE;
+ wr->wr.wr_cqe = &io_comp_cqe;
wr->wr.ex.imm_data = 0;
wr->wr.send_flags = 0;
if (need_inval && always_invalidate) {
wr->wr.next = &rwr.wr;
rwr.wr.next = &inv_wr;
- inv_wr.next = &imm_wr;
+ inv_wr.next = &imm_wr.wr;
} else if (always_invalidate) {
wr->wr.next = &rwr.wr;
- rwr.wr.next = &imm_wr;
+ rwr.wr.next = &imm_wr.wr;
} else if (need_inval) {
wr->wr.next = &inv_wr;
- inv_wr.next = &imm_wr;
+ inv_wr.next = &imm_wr.wr;
} else {
- wr->wr.next = &imm_wr;
+ wr->wr.next = &imm_wr.wr;
}
/*
* From time to time we have to post signaled sends,
@@ -294,16 +296,18 @@ static int rdma_write_sg(struct rtrs_srv_op *id)
inv_wr.sg_list = NULL;
inv_wr.num_sge = 0;
inv_wr.opcode = IB_WR_SEND_WITH_INV;
+ inv_wr.wr_cqe = &io_comp_cqe;
inv_wr.send_flags = 0;
inv_wr.ex.invalidate_rkey = rkey;
}
- imm_wr.next = NULL;
+ imm_wr.wr.next = NULL;
if (always_invalidate) {
struct rtrs_msg_rkey_rsp *msg;
srv_mr = &sess->mrs[id->msg_id];
rwr.wr.opcode = IB_WR_REG_MR;
+ rwr.wr.wr_cqe = &local_reg_cqe;
rwr.wr.num_sge = 0;
rwr.mr = srv_mr->mr;
rwr.wr.send_flags = 0;
@@ -318,22 +322,22 @@ static int rdma_write_sg(struct rtrs_srv_op *id)
list.addr = srv_mr->iu->dma_addr;
list.length = sizeof(*msg);
list.lkey = sess->s.dev->ib_pd->local_dma_lkey;
- imm_wr.sg_list = &list;
- imm_wr.num_sge = 1;
- imm_wr.opcode = IB_WR_SEND_WITH_IMM;
+ imm_wr.wr.sg_list = &list;
+ imm_wr.wr.num_sge = 1;
+ imm_wr.wr.opcode = IB_WR_SEND_WITH_IMM;
ib_dma_sync_single_for_device(sess->s.dev->ib_dev,
srv_mr->iu->dma_addr,
srv_mr->iu->size, DMA_TO_DEVICE);
} else {
- imm_wr.sg_list = NULL;
- imm_wr.num_sge = 0;
- imm_wr.opcode = IB_WR_RDMA_WRITE_WITH_IMM;
+ imm_wr.wr.sg_list = NULL;
+ imm_wr.wr.num_sge = 0;
+ imm_wr.wr.opcode = IB_WR_RDMA_WRITE_WITH_IMM;
}
- imm_wr.send_flags = flags;
- imm_wr.ex.imm_data = cpu_to_be32(rtrs_to_io_rsp_imm(id->msg_id,
+ imm_wr.wr.send_flags = flags;
+ imm_wr.wr.ex.imm_data = cpu_to_be32(rtrs_to_io_rsp_imm(id->msg_id,
0, need_inval));
- imm_wr.wr_cqe = &io_comp_cqe;
+ imm_wr.wr.wr_cqe = &io_comp_cqe;
ib_dma_sync_single_for_device(sess->s.dev->ib_dev, dma_addr,
offset, DMA_BIDIRECTIONAL);
@@ -360,7 +364,8 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
{
struct rtrs_sess *s = con->c.sess;
struct rtrs_srv_sess *sess = to_srv_sess(s);
- struct ib_send_wr inv_wr, imm_wr, *wr = NULL;
+ struct ib_send_wr inv_wr, *wr = NULL;
+ struct ib_rdma_wr imm_wr;
struct ib_reg_wr rwr;
struct rtrs_srv *srv = sess->srv;
struct rtrs_srv_mr *srv_mr;
@@ -379,6 +384,7 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
if (need_inval) {
if (likely(sg_cnt)) {
+ inv_wr.wr_cqe = &io_comp_cqe;
inv_wr.sg_list = NULL;
inv_wr.num_sge = 0;
inv_wr.opcode = IB_WR_SEND_WITH_INV;
@@ -396,15 +402,15 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
if (need_inval && always_invalidate) {
wr = &inv_wr;
inv_wr.next = &rwr.wr;
- rwr.wr.next = &imm_wr;
+ rwr.wr.next = &imm_wr.wr;
} else if (always_invalidate) {
wr = &rwr.wr;
- rwr.wr.next = &imm_wr;
+ rwr.wr.next = &imm_wr.wr;
} else if (need_inval) {
wr = &inv_wr;
- inv_wr.next = &imm_wr;
+ inv_wr.next = &imm_wr.wr;
} else {
- wr = &imm_wr;
+ wr = &imm_wr.wr;
}
/*
* From time to time we have to post signalled sends,
@@ -413,14 +419,15 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
flags = (atomic_inc_return(&con->wr_cnt) % srv->queue_depth) ?
0 : IB_SEND_SIGNALED;
imm = rtrs_to_io_rsp_imm(id->msg_id, errno, need_inval);
- imm_wr.next = NULL;
+ imm_wr.wr.next = NULL;
if (always_invalidate) {
struct ib_sge list;
struct rtrs_msg_rkey_rsp *msg;
srv_mr = &sess->mrs[id->msg_id];
- rwr.wr.next = &imm_wr;
+ rwr.wr.next = &imm_wr.wr;
rwr.wr.opcode = IB_WR_REG_MR;
+ rwr.wr.wr_cqe = &local_reg_cqe;
rwr.wr.num_sge = 0;
rwr.wr.send_flags = 0;
rwr.mr = srv_mr->mr;
@@ -435,21 +442,21 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
list.addr = srv_mr->iu->dma_addr;
list.length = sizeof(*msg);
list.lkey = sess->s.dev->ib_pd->local_dma_lkey;
- imm_wr.sg_list = &list;
- imm_wr.num_sge = 1;
- imm_wr.opcode = IB_WR_SEND_WITH_IMM;
+ imm_wr.wr.sg_list = &list;
+ imm_wr.wr.num_sge = 1;
+ imm_wr.wr.opcode = IB_WR_SEND_WITH_IMM;
ib_dma_sync_single_for_device(sess->s.dev->ib_dev,
srv_mr->iu->dma_addr,
srv_mr->iu->size, DMA_TO_DEVICE);
} else {
- imm_wr.sg_list = NULL;
- imm_wr.num_sge = 0;
- imm_wr.opcode = IB_WR_RDMA_WRITE_WITH_IMM;
+ imm_wr.wr.sg_list = NULL;
+ imm_wr.wr.num_sge = 0;
+ imm_wr.wr.opcode = IB_WR_RDMA_WRITE_WITH_IMM;
}
- imm_wr.send_flags = flags;
- imm_wr.wr_cqe = &io_comp_cqe;
+ imm_wr.wr.send_flags = flags;
+ imm_wr.wr.wr_cqe = &io_comp_cqe;
- imm_wr.ex.imm_data = cpu_to_be32(imm);
+ imm_wr.wr.ex.imm_data = cpu_to_be32(imm);
err = ib_post_send(id->con->c.qp, wr, NULL);
if (unlikely(err))
@@ -651,7 +658,7 @@ static int map_cont_bufs(struct rtrs_srv_sess *sess)
if (!srv_mr->iu) {
err = -ENOMEM;
rtrs_err(ss, "rtrs_iu_alloc(), err: %d\n", err);
- goto free_iu;
+ goto dereg_mr;
}
}
/* Eventually dma addr for each chunk can be cached */
@@ -667,7 +674,6 @@ err:
srv_mr = &sess->mrs[mri];
sgt = &srv_mr->sgt;
mr = srv_mr->mr;
-free_iu:
rtrs_iu_free(srv_mr->iu, sess->s.dev->ib_dev, 1);
dereg_mr:
ib_dereg_mr(mr);
@@ -814,7 +820,7 @@ static int process_info_req(struct rtrs_srv_con *con,
rwr[mri].wr.opcode = IB_WR_REG_MR;
rwr[mri].wr.wr_cqe = &local_reg_cqe;
rwr[mri].wr.num_sge = 0;
- rwr[mri].wr.send_flags = mri ? 0 : IB_SEND_SIGNALED;
+ rwr[mri].wr.send_flags = 0;
rwr[mri].mr = mr;
rwr[mri].key = mr->rkey;
rwr[mri].access = (IB_ACCESS_LOCAL_WRITE |
@@ -1238,7 +1244,6 @@ static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
case IB_WC_SEND:
/*
* post_send() RDMA write completions of IO reqs (read/write)
- * and hb
*/
atomic_add(srv->queue_depth, &con->sq_wr_avail);
@@ -1328,7 +1333,8 @@ static void free_srv(struct rtrs_srv *srv)
}
static struct rtrs_srv *get_or_create_srv(struct rtrs_srv_ctx *ctx,
- const uuid_t *paths_uuid)
+ const uuid_t *paths_uuid,
+ bool first_conn)
{
struct rtrs_srv *srv;
int i;
@@ -1341,13 +1347,18 @@ static struct rtrs_srv *get_or_create_srv(struct rtrs_srv_ctx *ctx,
return srv;
}
}
+ mutex_unlock(&ctx->srv_mutex);
+ /*
+ * If this request is not the first connection request from the
+ * client for this session then fail and return error.
+ */
+ if (!first_conn)
+ return ERR_PTR(-ENXIO);
/* need to allocate a new srv */
srv = kzalloc(sizeof(*srv), GFP_KERNEL);
- if (!srv) {
- mutex_unlock(&ctx->srv_mutex);
- return NULL;
- }
+ if (!srv)
+ return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&srv->paths_list);
mutex_init(&srv->paths_mutex);
@@ -1357,8 +1368,6 @@ static struct rtrs_srv *get_or_create_srv(struct rtrs_srv_ctx *ctx,
srv->ctx = ctx;
device_initialize(&srv->dev);
srv->dev.release = rtrs_srv_dev_release;
- list_add(&srv->ctx_list, &ctx->srv_list);
- mutex_unlock(&ctx->srv_mutex);
srv->chunks = kcalloc(srv->queue_depth, sizeof(*srv->chunks),
GFP_KERNEL);
@@ -1371,6 +1380,9 @@ static struct rtrs_srv *get_or_create_srv(struct rtrs_srv_ctx *ctx,
goto err_free_chunks;
}
refcount_set(&srv->refcount, 1);
+ mutex_lock(&ctx->srv_mutex);
+ list_add(&srv->ctx_list, &ctx->srv_list);
+ mutex_unlock(&ctx->srv_mutex);
return srv;
@@ -1381,7 +1393,7 @@ err_free_chunks:
err_free_srv:
kfree(srv);
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
static void put_srv(struct rtrs_srv *srv)
@@ -1461,10 +1473,12 @@ static bool __is_path_w_addr_exists(struct rtrs_srv *srv,
static void free_sess(struct rtrs_srv_sess *sess)
{
- if (sess->kobj.state_in_sysfs)
+ if (sess->kobj.state_in_sysfs) {
+ kobject_del(&sess->kobj);
kobject_put(&sess->kobj);
- else
+ } else {
kfree(sess);
+ }
}
static void rtrs_srv_close_work(struct work_struct *work)
@@ -1586,7 +1600,7 @@ static int create_con(struct rtrs_srv_sess *sess,
struct rtrs_sess *s = &sess->s;
struct rtrs_srv_con *con;
- u16 cq_size, wr_queue_size;
+ u32 cq_size, wr_queue_size;
int err, cq_vector;
con = kzalloc(sizeof(*con), GFP_KERNEL);
@@ -1600,7 +1614,7 @@ static int create_con(struct rtrs_srv_sess *sess,
con->c.cm_id = cm_id;
con->c.sess = &sess->s;
con->c.cid = cid;
- atomic_set(&con->wr_cnt, 0);
+ atomic_set(&con->wr_cnt, 1);
if (con->c.cid == 0) {
/*
@@ -1630,7 +1644,8 @@ static int create_con(struct rtrs_srv_sess *sess,
/* TODO: SOFTIRQ can be faster, but be careful with softirq context */
err = rtrs_cq_qp_create(&sess->s, &con->c, 1, cq_vector, cq_size,
- wr_queue_size, IB_POLL_WORKQUEUE);
+ wr_queue_size, wr_queue_size,
+ IB_POLL_WORKQUEUE);
if (err) {
rtrs_err(s, "rtrs_cq_qp_create(), err: %d\n", err);
goto free_con;
@@ -1781,13 +1796,9 @@ static int rtrs_rdma_connect(struct rdma_cm_id *cm_id,
goto reject_w_econnreset;
}
recon_cnt = le16_to_cpu(msg->recon_cnt);
- srv = get_or_create_srv(ctx, &msg->paths_uuid);
- /*
- * "refcount == 0" happens if a previous thread calls get_or_create_srv
- * allocate srv, but chunks of srv are not allocated yet.
- */
- if (!srv || refcount_read(&srv->refcount) == 0) {
- err = -ENOMEM;
+ srv = get_or_create_srv(ctx, &msg->paths_uuid, msg->first_conn);
+ if (IS_ERR(srv)) {
+ err = PTR_ERR(srv);
goto reject_w_err;
}
mutex_lock(&srv->paths_mutex);
@@ -1862,8 +1873,8 @@ reject_w_econnreset:
return rtrs_rdma_do_reject(cm_id, -ECONNRESET);
close_and_return_err:
- close_sess(sess);
mutex_unlock(&srv->paths_mutex);
+ close_sess(sess);
return err;
}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs.c b/drivers/infiniband/ulp/rtrs/rtrs.c
index 2e3a849e0a77..d13aff0aa816 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs.c
@@ -182,16 +182,16 @@ int rtrs_post_rdma_write_imm_empty(struct rtrs_con *con, struct ib_cqe *cqe,
u32 imm_data, enum ib_send_flags flags,
struct ib_send_wr *head)
{
- struct ib_send_wr wr;
+ struct ib_rdma_wr wr;
- wr = (struct ib_send_wr) {
- .wr_cqe = cqe,
- .send_flags = flags,
- .opcode = IB_WR_RDMA_WRITE_WITH_IMM,
- .ex.imm_data = cpu_to_be32(imm_data),
+ wr = (struct ib_rdma_wr) {
+ .wr.wr_cqe = cqe,
+ .wr.send_flags = flags,
+ .wr.opcode = IB_WR_RDMA_WRITE_WITH_IMM,
+ .wr.ex.imm_data = cpu_to_be32(imm_data),
};
- return rtrs_post_send(con->qp, head, &wr);
+ return rtrs_post_send(con->qp, head, &wr.wr);
}
EXPORT_SYMBOL_GPL(rtrs_post_rdma_write_imm_empty);
@@ -231,14 +231,14 @@ static int create_cq(struct rtrs_con *con, int cq_vector, u16 cq_size,
}
static int create_qp(struct rtrs_con *con, struct ib_pd *pd,
- u16 wr_queue_size, u32 max_sge)
+ u32 max_send_wr, u32 max_recv_wr, u32 max_sge)
{
struct ib_qp_init_attr init_attr = {NULL};
struct rdma_cm_id *cm_id = con->cm_id;
int ret;
- init_attr.cap.max_send_wr = wr_queue_size;
- init_attr.cap.max_recv_wr = wr_queue_size;
+ init_attr.cap.max_send_wr = max_send_wr;
+ init_attr.cap.max_recv_wr = max_recv_wr;
init_attr.cap.max_recv_sge = 1;
init_attr.event_handler = qp_event_handler;
init_attr.qp_context = con;
@@ -260,8 +260,9 @@ static int create_qp(struct rtrs_con *con, struct ib_pd *pd,
}
int rtrs_cq_qp_create(struct rtrs_sess *sess, struct rtrs_con *con,
- u32 max_send_sge, int cq_vector, u16 cq_size,
- u16 wr_queue_size, enum ib_poll_context poll_ctx)
+ u32 max_send_sge, int cq_vector, int cq_size,
+ u32 max_send_wr, u32 max_recv_wr,
+ enum ib_poll_context poll_ctx)
{
int err;
@@ -269,7 +270,8 @@ int rtrs_cq_qp_create(struct rtrs_sess *sess, struct rtrs_con *con,
if (err)
return err;
- err = create_qp(con, sess->dev->ib_pd, wr_queue_size, max_send_sge);
+ err = create_qp(con, sess->dev->ib_pd, max_send_wr, max_recv_wr,
+ max_send_sge);
if (err) {
ib_free_cq(con->cq);
con->cq = NULL;
@@ -308,7 +310,7 @@ void rtrs_send_hb_ack(struct rtrs_sess *sess)
imm = rtrs_to_imm(RTRS_HB_ACK_IMM, 0);
err = rtrs_post_rdma_write_imm_empty(usr_con, sess->hb_cqe, imm,
- IB_SEND_SIGNALED, NULL);
+ 0, NULL);
if (err) {
sess->hb_err_handler(usr_con);
return;
@@ -337,7 +339,7 @@ static void hb_work(struct work_struct *work)
}
imm = rtrs_to_imm(RTRS_HB_MSG_IMM, 0);
err = rtrs_post_rdma_write_imm_empty(usr_con, sess->hb_cqe, imm,
- IB_SEND_SIGNALED, NULL);
+ 0, NULL);
if (err) {
sess->hb_err_handler(usr_con);
return;
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 5492b66a8153..31f8aa2c40ed 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -3628,7 +3628,7 @@ static ssize_t srp_create_target(struct device *dev,
struct srp_rdma_ch *ch;
struct srp_device *srp_dev = host->srp_dev;
struct ib_device *ibdev = srp_dev->dev;
- int ret, node_idx, node, cpu, i;
+ int ret, i, ch_idx;
unsigned int max_sectors_per_mr, mr_per_cmd = 0;
bool multich = false;
uint32_t max_iu_len;
@@ -3753,81 +3753,61 @@ static ssize_t srp_create_target(struct device *dev,
goto out;
ret = -ENOMEM;
- if (target->ch_count == 0)
+ if (target->ch_count == 0) {
target->ch_count =
- max_t(unsigned int, num_online_nodes(),
- min(ch_count ?:
- min(4 * num_online_nodes(),
- ibdev->num_comp_vectors),
- num_online_cpus()));
+ min(ch_count ?:
+ max(4 * num_online_nodes(),
+ ibdev->num_comp_vectors),
+ num_online_cpus());
+ }
+
target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
GFP_KERNEL);
if (!target->ch)
goto out;
- node_idx = 0;
- for_each_online_node(node) {
- const int ch_start = (node_idx * target->ch_count /
- num_online_nodes());
- const int ch_end = ((node_idx + 1) * target->ch_count /
- num_online_nodes());
- const int cv_start = node_idx * ibdev->num_comp_vectors /
- num_online_nodes();
- const int cv_end = (node_idx + 1) * ibdev->num_comp_vectors /
- num_online_nodes();
- int cpu_idx = 0;
-
- for_each_online_cpu(cpu) {
- if (cpu_to_node(cpu) != node)
- continue;
- if (ch_start + cpu_idx >= ch_end)
- continue;
- ch = &target->ch[ch_start + cpu_idx];
- ch->target = target;
- ch->comp_vector = cv_start == cv_end ? cv_start :
- cv_start + cpu_idx % (cv_end - cv_start);
- spin_lock_init(&ch->lock);
- INIT_LIST_HEAD(&ch->free_tx);
- ret = srp_new_cm_id(ch);
- if (ret)
- goto err_disconnect;
+ for (ch_idx = 0; ch_idx < target->ch_count; ++ch_idx) {
+ ch = &target->ch[ch_idx];
+ ch->target = target;
+ ch->comp_vector = ch_idx % ibdev->num_comp_vectors;
+ spin_lock_init(&ch->lock);
+ INIT_LIST_HEAD(&ch->free_tx);
+ ret = srp_new_cm_id(ch);
+ if (ret)
+ goto err_disconnect;
- ret = srp_create_ch_ib(ch);
- if (ret)
- goto err_disconnect;
+ ret = srp_create_ch_ib(ch);
+ if (ret)
+ goto err_disconnect;
- ret = srp_alloc_req_data(ch);
- if (ret)
- goto err_disconnect;
+ ret = srp_alloc_req_data(ch);
+ if (ret)
+ goto err_disconnect;
- ret = srp_connect_ch(ch, max_iu_len, multich);
- if (ret) {
- char dst[64];
-
- if (target->using_rdma_cm)
- snprintf(dst, sizeof(dst), "%pIS",
- &target->rdma_cm.dst);
- else
- snprintf(dst, sizeof(dst), "%pI6",
- target->ib_cm.orig_dgid.raw);
- shost_printk(KERN_ERR, target->scsi_host,
- PFX "Connection %d/%d to %s failed\n",
- ch_start + cpu_idx,
- target->ch_count, dst);
- if (node_idx == 0 && cpu_idx == 0) {
- goto free_ch;
- } else {
- srp_free_ch_ib(target, ch);
- srp_free_req_data(target, ch);
- target->ch_count = ch - target->ch;
- goto connected;
- }
- }
+ ret = srp_connect_ch(ch, max_iu_len, multich);
+ if (ret) {
+ char dst[64];
- multich = true;
- cpu_idx++;
+ if (target->using_rdma_cm)
+ snprintf(dst, sizeof(dst), "%pIS",
+ &target->rdma_cm.dst);
+ else
+ snprintf(dst, sizeof(dst), "%pI6",
+ target->ib_cm.orig_dgid.raw);
+ shost_printk(KERN_ERR, target->scsi_host,
+ PFX "Connection %d/%d to %s failed\n",
+ ch_idx,
+ target->ch_count, dst);
+ if (ch_idx == 0) {
+ goto free_ch;
+ } else {
+ srp_free_ch_ib(target, ch);
+ srp_free_req_data(target, ch);
+ target->ch_count = ch - target->ch;
+ goto connected;
+ }
}
- node_idx++;
+ multich = true;
}
connected:
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
index a2b5fbba2d3b..430dc6975004 100644
--- a/drivers/input/joydev.c
+++ b/drivers/input/joydev.c
@@ -456,7 +456,7 @@ static int joydev_handle_JSIOCSAXMAP(struct joydev *joydev,
if (IS_ERR(abspam))
return PTR_ERR(abspam);
- for (i = 0; i < joydev->nabs; i++) {
+ for (i = 0; i < len && i < joydev->nabs; i++) {
if (abspam[i] > ABS_MAX) {
retval = -EINVAL;
goto out;
@@ -480,6 +480,9 @@ static int joydev_handle_JSIOCSBTNMAP(struct joydev *joydev,
int i;
int retval = 0;
+ if (len % sizeof(*keypam))
+ return -EINVAL;
+
len = min(len, sizeof(joydev->keypam));
/* Validate the map. */
@@ -487,7 +490,7 @@ static int joydev_handle_JSIOCSBTNMAP(struct joydev *joydev,
if (IS_ERR(keypam))
return PTR_ERR(keypam);
- for (i = 0; i < joydev->nkey; i++) {
+ for (i = 0; i < (len / 2) && i < joydev->nkey; i++) {
if (keypam[i] > KEY_MAX || keypam[i] < BTN_MISC) {
retval = -EINVAL;
goto out;
diff --git a/drivers/input/joystick/Kconfig b/drivers/input/joystick/Kconfig
index b080f0cfb068..5e38899058c1 100644
--- a/drivers/input/joystick/Kconfig
+++ b/drivers/input/joystick/Kconfig
@@ -382,4 +382,11 @@ config JOYSTICK_FSIA6B
To compile this driver as a module, choose M here: the
module will be called fsia6b.
+config JOYSTICK_N64
+ bool "N64 controller"
+ depends on MACH_NINTENDO64
+ help
+ Say Y here if you want enable support for the four
+ built-in controller ports on the Nintendo 64 console.
+
endif
diff --git a/drivers/input/joystick/Makefile b/drivers/input/joystick/Makefile
index 58232b3057d3..31d720c9e493 100644
--- a/drivers/input/joystick/Makefile
+++ b/drivers/input/joystick/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_JOYSTICK_INTERACT) += interact.o
obj-$(CONFIG_JOYSTICK_JOYDUMP) += joydump.o
obj-$(CONFIG_JOYSTICK_MAGELLAN) += magellan.o
obj-$(CONFIG_JOYSTICK_MAPLE) += maplecontrol.o
+obj-$(CONFIG_JOYSTICK_N64) += n64joy.o
obj-$(CONFIG_JOYSTICK_PSXPAD_SPI) += psxpad-spi.o
obj-$(CONFIG_JOYSTICK_PXRC) += pxrc.o
obj-$(CONFIG_JOYSTICK_SIDEWINDER) += sidewinder.o
@@ -37,4 +38,3 @@ obj-$(CONFIG_JOYSTICK_WARRIOR) += warrior.o
obj-$(CONFIG_JOYSTICK_WALKERA0701) += walkera0701.o
obj-$(CONFIG_JOYSTICK_XPAD) += xpad.o
obj-$(CONFIG_JOYSTICK_ZHENHUA) += zhenhua.o
-
diff --git a/drivers/input/joystick/n64joy.c b/drivers/input/joystick/n64joy.c
new file mode 100644
index 000000000000..8bcc529942bc
--- /dev/null
+++ b/drivers/input/joystick/n64joy.c
@@ -0,0 +1,345 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for the four N64 controllers.
+ *
+ * Copyright (c) 2021 Lauri Kasanen
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/limits.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+
+MODULE_AUTHOR("Lauri Kasanen <cand@gmx.com>");
+MODULE_DESCRIPTION("Driver for N64 controllers");
+MODULE_LICENSE("GPL");
+
+#define PIF_RAM 0x1fc007c0
+
+#define SI_DRAM_REG 0
+#define SI_READ_REG 1
+#define SI_WRITE_REG 4
+#define SI_STATUS_REG 6
+
+#define SI_STATUS_DMA_BUSY BIT(0)
+#define SI_STATUS_IO_BUSY BIT(1)
+
+#define N64_CONTROLLER_ID 0x0500
+
+#define MAX_CONTROLLERS 4
+
+static const char *n64joy_phys[MAX_CONTROLLERS] = {
+ "n64joy/port0",
+ "n64joy/port1",
+ "n64joy/port2",
+ "n64joy/port3",
+};
+
+struct n64joy_priv {
+ u64 si_buf[8] ____cacheline_aligned;
+ struct timer_list timer;
+ struct mutex n64joy_mutex;
+ struct input_dev *n64joy_dev[MAX_CONTROLLERS];
+ u32 __iomem *reg_base;
+ u8 n64joy_opened;
+};
+
+struct joydata {
+ unsigned int: 16; /* unused */
+ unsigned int err: 2;
+ unsigned int: 14; /* unused */
+
+ union {
+ u32 data;
+
+ struct {
+ unsigned int a: 1;
+ unsigned int b: 1;
+ unsigned int z: 1;
+ unsigned int start: 1;
+ unsigned int up: 1;
+ unsigned int down: 1;
+ unsigned int left: 1;
+ unsigned int right: 1;
+ unsigned int: 2; /* unused */
+ unsigned int l: 1;
+ unsigned int r: 1;
+ unsigned int c_up: 1;
+ unsigned int c_down: 1;
+ unsigned int c_left: 1;
+ unsigned int c_right: 1;
+ signed int x: 8;
+ signed int y: 8;
+ };
+ };
+};
+
+static void n64joy_write_reg(u32 __iomem *reg_base, const u8 reg, const u32 value)
+{
+ writel(value, reg_base + reg);
+}
+
+static u32 n64joy_read_reg(u32 __iomem *reg_base, const u8 reg)
+{
+ return readl(reg_base + reg);
+}
+
+static void n64joy_wait_si_dma(u32 __iomem *reg_base)
+{
+ while (n64joy_read_reg(reg_base, SI_STATUS_REG) &
+ (SI_STATUS_DMA_BUSY | SI_STATUS_IO_BUSY))
+ cpu_relax();
+}
+
+static void n64joy_exec_pif(struct n64joy_priv *priv, const u64 in[8])
+{
+ unsigned long flags;
+
+ dma_cache_wback_inv((unsigned long) in, 8 * 8);
+ dma_cache_inv((unsigned long) priv->si_buf, 8 * 8);
+
+ local_irq_save(flags);
+
+ n64joy_wait_si_dma(priv->reg_base);
+
+ barrier();
+ n64joy_write_reg(priv->reg_base, SI_DRAM_REG, virt_to_phys(in));
+ barrier();
+ n64joy_write_reg(priv->reg_base, SI_WRITE_REG, PIF_RAM);
+ barrier();
+
+ n64joy_wait_si_dma(priv->reg_base);
+
+ barrier();
+ n64joy_write_reg(priv->reg_base, SI_DRAM_REG, virt_to_phys(priv->si_buf));
+ barrier();
+ n64joy_write_reg(priv->reg_base, SI_READ_REG, PIF_RAM);
+ barrier();
+
+ n64joy_wait_si_dma(priv->reg_base);
+
+ local_irq_restore(flags);
+}
+
+static const u64 polldata[] ____cacheline_aligned = {
+ 0xff010401ffffffff,
+ 0xff010401ffffffff,
+ 0xff010401ffffffff,
+ 0xff010401ffffffff,
+ 0xfe00000000000000,
+ 0,
+ 0,
+ 1
+};
+
+static void n64joy_poll(struct timer_list *t)
+{
+ const struct joydata *data;
+ struct n64joy_priv *priv = container_of(t, struct n64joy_priv, timer);
+ struct input_dev *dev;
+ u32 i;
+
+ n64joy_exec_pif(priv, polldata);
+
+ data = (struct joydata *) priv->si_buf;
+
+ for (i = 0; i < MAX_CONTROLLERS; i++) {
+ if (!priv->n64joy_dev[i])
+ continue;
+
+ dev = priv->n64joy_dev[i];
+
+ /* d-pad */
+ input_report_key(dev, BTN_DPAD_UP, data[i].up);
+ input_report_key(dev, BTN_DPAD_DOWN, data[i].down);
+ input_report_key(dev, BTN_DPAD_LEFT, data[i].left);
+ input_report_key(dev, BTN_DPAD_RIGHT, data[i].right);
+
+ /* c buttons */
+ input_report_key(dev, BTN_FORWARD, data[i].c_up);
+ input_report_key(dev, BTN_BACK, data[i].c_down);
+ input_report_key(dev, BTN_LEFT, data[i].c_left);
+ input_report_key(dev, BTN_RIGHT, data[i].c_right);
+
+ /* matching buttons */
+ input_report_key(dev, BTN_START, data[i].start);
+ input_report_key(dev, BTN_Z, data[i].z);
+
+ /* remaining ones: a, b, l, r */
+ input_report_key(dev, BTN_0, data[i].a);
+ input_report_key(dev, BTN_1, data[i].b);
+ input_report_key(dev, BTN_2, data[i].l);
+ input_report_key(dev, BTN_3, data[i].r);
+
+ input_report_abs(dev, ABS_X, data[i].x);
+ input_report_abs(dev, ABS_Y, data[i].y);
+
+ input_sync(dev);
+ }
+
+ mod_timer(&priv->timer, jiffies + msecs_to_jiffies(16));
+}
+
+static int n64joy_open(struct input_dev *dev)
+{
+ struct n64joy_priv *priv = input_get_drvdata(dev);
+ int err;
+
+ err = mutex_lock_interruptible(&priv->n64joy_mutex);
+ if (err)
+ return err;
+
+ if (!priv->n64joy_opened) {
+ /*
+ * We could use the vblank irq, but it's not important if
+ * the poll point slightly changes.
+ */
+ timer_setup(&priv->timer, n64joy_poll, 0);
+ mod_timer(&priv->timer, jiffies + msecs_to_jiffies(16));
+ }
+
+ priv->n64joy_opened++;
+
+ mutex_unlock(&priv->n64joy_mutex);
+ return err;
+}
+
+static void n64joy_close(struct input_dev *dev)
+{
+ struct n64joy_priv *priv = input_get_drvdata(dev);
+
+ mutex_lock(&priv->n64joy_mutex);
+ if (!--priv->n64joy_opened)
+ del_timer_sync(&priv->timer);
+ mutex_unlock(&priv->n64joy_mutex);
+}
+
+static const u64 __initconst scandata[] ____cacheline_aligned = {
+ 0xff010300ffffffff,
+ 0xff010300ffffffff,
+ 0xff010300ffffffff,
+ 0xff010300ffffffff,
+ 0xfe00000000000000,
+ 0,
+ 0,
+ 1
+};
+
+/*
+ * The target device is embedded and RAM-constrained. We save RAM
+ * by initializing in __init code that gets dropped late in boot.
+ * For the same reason there is no module or unloading support.
+ */
+static int __init n64joy_probe(struct platform_device *pdev)
+{
+ const struct joydata *data;
+ struct n64joy_priv *priv;
+ struct input_dev *dev;
+ int err = 0;
+ u32 i, j, found = 0;
+
+ priv = kzalloc(sizeof(struct n64joy_priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ mutex_init(&priv->n64joy_mutex);
+
+ priv->reg_base = devm_platform_ioremap_resource(pdev, 0);
+ if (!priv->reg_base) {
+ err = -EINVAL;
+ goto fail;
+ }
+
+ /* The controllers are not hotpluggable, so we can scan in init */
+ n64joy_exec_pif(priv, scandata);
+
+ data = (struct joydata *) priv->si_buf;
+
+ for (i = 0; i < MAX_CONTROLLERS; i++) {
+ if (!data[i].err && data[i].data >> 16 == N64_CONTROLLER_ID) {
+ found++;
+
+ dev = priv->n64joy_dev[i] = input_allocate_device();
+ if (!priv->n64joy_dev[i]) {
+ err = -ENOMEM;
+ goto fail;
+ }
+
+ input_set_drvdata(dev, priv);
+
+ dev->name = "N64 controller";
+ dev->phys = n64joy_phys[i];
+ dev->id.bustype = BUS_HOST;
+ dev->id.vendor = 0;
+ dev->id.product = data[i].data >> 16;
+ dev->id.version = 0;
+ dev->dev.parent = &pdev->dev;
+
+ dev->open = n64joy_open;
+ dev->close = n64joy_close;
+
+ /* d-pad */
+ input_set_capability(dev, EV_KEY, BTN_DPAD_UP);
+ input_set_capability(dev, EV_KEY, BTN_DPAD_DOWN);
+ input_set_capability(dev, EV_KEY, BTN_DPAD_LEFT);
+ input_set_capability(dev, EV_KEY, BTN_DPAD_RIGHT);
+ /* c buttons */
+ input_set_capability(dev, EV_KEY, BTN_LEFT);
+ input_set_capability(dev, EV_KEY, BTN_RIGHT);
+ input_set_capability(dev, EV_KEY, BTN_FORWARD);
+ input_set_capability(dev, EV_KEY, BTN_BACK);
+ /* matching buttons */
+ input_set_capability(dev, EV_KEY, BTN_START);
+ input_set_capability(dev, EV_KEY, BTN_Z);
+ /* remaining ones: a, b, l, r */
+ input_set_capability(dev, EV_KEY, BTN_0);
+ input_set_capability(dev, EV_KEY, BTN_1);
+ input_set_capability(dev, EV_KEY, BTN_2);
+ input_set_capability(dev, EV_KEY, BTN_3);
+
+ for (j = 0; j < 2; j++)
+ input_set_abs_params(dev, ABS_X + j,
+ S8_MIN, S8_MAX, 0, 0);
+
+ err = input_register_device(dev);
+ if (err) {
+ input_free_device(dev);
+ goto fail;
+ }
+ }
+ }
+
+ pr_info("%u controller(s) connected\n", found);
+
+ if (!found)
+ return -ENODEV;
+
+ return 0;
+fail:
+ for (i = 0; i < MAX_CONTROLLERS; i++) {
+ if (!priv->n64joy_dev[i])
+ continue;
+ input_unregister_device(priv->n64joy_dev[i]);
+ }
+ return err;
+}
+
+static struct platform_driver n64joy_driver = {
+ .driver = {
+ .name = "n64joy",
+ },
+};
+
+static int __init n64joy_init(void)
+{
+ return platform_driver_probe(&n64joy_driver, n64joy_probe);
+}
+
+module_init(n64joy_init);
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 0687f0ed60b8..9f0d07dcbf06 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -215,9 +215,17 @@ static const struct xpad_device {
{ 0x0e6f, 0x0213, "Afterglow Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
{ 0x0e6f, 0x021f, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
{ 0x0e6f, 0x0246, "Rock Candy Gamepad for Xbox One 2015", 0, XTYPE_XBOXONE },
- { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x02a0, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x02a1, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x02a2, "PDP Wired Controller for Xbox One - Crimson Red", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x02a4, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x02a6, "PDP Wired Controller for Xbox One - Camo Series", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x02a7, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x02a8, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x02ad, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x02b3, "Afterglow Prismatic Wired Controller", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x02b8, "Afterglow Prismatic Wired Controller", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 },
{ 0x0e6f, 0x0346, "Rock Candy Gamepad for Xbox One 2016", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 },
@@ -296,6 +304,10 @@ static const struct xpad_device {
{ 0x1bad, 0xfa01, "MadCatz GamePad", 0, XTYPE_XBOX360 },
{ 0x1bad, 0xfd00, "Razer Onza TE", 0, XTYPE_XBOX360 },
{ 0x1bad, 0xfd01, "Razer Onza", 0, XTYPE_XBOX360 },
+ { 0x20d6, 0x2001, "BDA Xbox Series X Wired Controller", 0, XTYPE_XBOXONE },
+ { 0x20d6, 0x2009, "PowerA Enhanced Wired Controller for Xbox Series X|S", 0, XTYPE_XBOXONE },
+ { 0x20d6, 0x281f, "PowerA Wired Controller For Xbox 360", 0, XTYPE_XBOX360 },
+ { 0x2e24, 0x0652, "Hyperkin Duke X-Box One pad", 0, XTYPE_XBOXONE },
{ 0x24c6, 0x5000, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x24c6, 0x5300, "PowerA MINI PROEX Controller", 0, XTYPE_XBOX360 },
{ 0x24c6, 0x5303, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 },
@@ -429,8 +441,12 @@ static const struct usb_device_id xpad_table[] = {
XPAD_XBOX360_VENDOR(0x162e), /* Joytech X-Box 360 controllers */
XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */
XPAD_XBOX360_VENDOR(0x1bad), /* Harminix Rock Band Guitar and Drums */
+ XPAD_XBOX360_VENDOR(0x20d6), /* PowerA Controllers */
+ XPAD_XBOXONE_VENDOR(0x20d6), /* PowerA Controllers */
XPAD_XBOX360_VENDOR(0x24c6), /* PowerA Controllers */
XPAD_XBOXONE_VENDOR(0x24c6), /* PowerA Controllers */
+ XPAD_XBOXONE_VENDOR(0x2e24), /* Hyperkin Duke X-Box One pad */
+ XPAD_XBOX360_VENDOR(0x2f24), /* GameSir Controllers */
{ }
};
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 2b321c17054a..32d15809ae58 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -446,7 +446,7 @@ config KEYBOARD_MPR121
config KEYBOARD_SNVS_PWRKEY
tristate "IMX SNVS Power Key Driver"
- depends on ARCH_MXC || COMPILE_TEST
+ depends on ARCH_MXC || (COMPILE_TEST && HAS_IOMEM)
depends on OF
help
This is the snvs powerkey driver for the Freescale i.MX application
@@ -685,7 +685,7 @@ config KEYBOARD_OMAP
config KEYBOARD_OMAP4
tristate "TI OMAP4+ keypad support"
- depends on OF || ARCH_OMAP2PLUS
+ depends on (OF && HAS_IOMEM) || ARCH_OMAP2PLUS
select INPUT_MATRIXKMAP
help
Say Y here if you want to use the OMAP4+ keypad.
@@ -773,7 +773,7 @@ config KEYBOARD_CAP11XX
config KEYBOARD_BCM
tristate "Broadcom keypad driver"
- depends on OF && HAVE_CLK
+ depends on OF && HAVE_CLK && HAS_IOMEM
select INPUT_MATRIXKMAP
default ARCH_BCM_CYGNUS
help
diff --git a/drivers/input/keyboard/applespi.c b/drivers/input/keyboard/applespi.c
index d22223154177..eda1b23002b5 100644
--- a/drivers/input/keyboard/applespi.c
+++ b/drivers/input/keyboard/applespi.c
@@ -48,6 +48,7 @@
#include <linux/efi.h>
#include <linux/input.h>
#include <linux/input/mt.h>
+#include <linux/ktime.h>
#include <linux/leds.h>
#include <linux/module.h>
#include <linux/spinlock.h>
@@ -409,7 +410,7 @@ struct applespi_data {
unsigned int cmd_msg_cntr;
/* lock to protect the above parameters and flags below */
spinlock_t cmd_msg_lock;
- bool cmd_msg_queued;
+ ktime_t cmd_msg_queued;
enum applespi_evt_type cmd_evt_type;
struct led_classdev backlight_info;
@@ -729,7 +730,7 @@ static void applespi_msg_complete(struct applespi_data *applespi,
wake_up_all(&applespi->drain_complete);
if (is_write_msg) {
- applespi->cmd_msg_queued = false;
+ applespi->cmd_msg_queued = 0;
applespi_send_cmd_msg(applespi);
}
@@ -748,6 +749,8 @@ static void applespi_async_write_complete(void *context)
applespi->tx_status,
APPLESPI_STATUS_SIZE);
+ udelay(SPI_RW_CHG_DELAY_US);
+
if (!applespi_check_write_status(applespi, applespi->wr_m.status)) {
/*
* If we got an error, we presumably won't get the expected
@@ -771,8 +774,16 @@ static int applespi_send_cmd_msg(struct applespi_data *applespi)
return 0;
/* check whether send is in progress */
- if (applespi->cmd_msg_queued)
- return 0;
+ if (applespi->cmd_msg_queued) {
+ if (ktime_ms_delta(ktime_get(), applespi->cmd_msg_queued) < 1000)
+ return 0;
+
+ dev_warn(&applespi->spi->dev, "Command %d timed out\n",
+ applespi->cmd_evt_type);
+
+ applespi->cmd_msg_queued = 0;
+ applespi->write_active = false;
+ }
/* set up packet */
memset(packet, 0, APPLESPI_PACKET_SIZE);
@@ -869,7 +880,7 @@ static int applespi_send_cmd_msg(struct applespi_data *applespi)
return sts;
}
- applespi->cmd_msg_queued = true;
+ applespi->cmd_msg_queued = ktime_get_coarse();
applespi->write_active = true;
return 0;
@@ -1921,7 +1932,7 @@ static int __maybe_unused applespi_resume(struct device *dev)
applespi->drain = false;
applespi->have_cl_led_on = false;
applespi->have_bl_level = 0;
- applespi->cmd_msg_queued = false;
+ applespi->cmd_msg_queued = 0;
applespi->read_active = false;
applespi->write_active = false;
diff --git a/drivers/input/keyboard/cros_ec_keyb.c b/drivers/input/keyboard/cros_ec_keyb.c
index b379ed762878..38457d9641bd 100644
--- a/drivers/input/keyboard/cros_ec_keyb.c
+++ b/drivers/input/keyboard/cros_ec_keyb.c
@@ -27,6 +27,8 @@
#include <asm/unaligned.h>
+#define MAX_NUM_TOP_ROW_KEYS 15
+
/**
* struct cros_ec_keyb - Structure representing EC keyboard device
*
@@ -42,6 +44,9 @@
* @idev: The input device for the matrix keys.
* @bs_idev: The input device for non-matrix buttons and switches (or NULL).
* @notifier: interrupt event notifier for transport devices
+ * @function_row_physmap: An array of the encoded rows/columns for the top
+ * row function keys, in an order from left to right
+ * @num_function_row_keys: The number of top row keys in a custom keyboard
*/
struct cros_ec_keyb {
unsigned int rows;
@@ -58,6 +63,9 @@ struct cros_ec_keyb {
struct input_dev *idev;
struct input_dev *bs_idev;
struct notifier_block notifier;
+
+ u16 function_row_physmap[MAX_NUM_TOP_ROW_KEYS];
+ size_t num_function_row_keys;
};
/**
@@ -527,6 +535,11 @@ static int cros_ec_keyb_register_matrix(struct cros_ec_keyb *ckdev)
struct input_dev *idev;
const char *phys;
int err;
+ struct property *prop;
+ const __be32 *p;
+ u16 *physmap;
+ u32 key_pos;
+ int row, col;
err = matrix_keypad_parse_properties(dev, &ckdev->rows, &ckdev->cols);
if (err)
@@ -578,6 +591,21 @@ static int cros_ec_keyb_register_matrix(struct cros_ec_keyb *ckdev)
ckdev->idev = idev;
cros_ec_keyb_compute_valid_keys(ckdev);
+ physmap = ckdev->function_row_physmap;
+ of_property_for_each_u32(dev->of_node, "function-row-physmap",
+ prop, p, key_pos) {
+ if (ckdev->num_function_row_keys == MAX_NUM_TOP_ROW_KEYS) {
+ dev_warn(dev, "Only support up to %d top row keys\n",
+ MAX_NUM_TOP_ROW_KEYS);
+ break;
+ }
+ row = KEY_ROW(key_pos);
+ col = KEY_COL(key_pos);
+ *physmap = MATRIX_SCAN_CODE(row, col, ckdev->row_shift);
+ physmap++;
+ ckdev->num_function_row_keys++;
+ }
+
err = input_register_device(ckdev->idev);
if (err) {
dev_err(dev, "cannot register input device\n");
@@ -587,6 +615,51 @@ static int cros_ec_keyb_register_matrix(struct cros_ec_keyb *ckdev)
return 0;
}
+static ssize_t function_row_physmap_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t size = 0;
+ int i;
+ struct cros_ec_keyb *ckdev = dev_get_drvdata(dev);
+ u16 *physmap = ckdev->function_row_physmap;
+
+ for (i = 0; i < ckdev->num_function_row_keys; i++)
+ size += scnprintf(buf + size, PAGE_SIZE - size,
+ "%s%02X", size ? " " : "", physmap[i]);
+ if (size)
+ size += scnprintf(buf + size, PAGE_SIZE - size, "\n");
+
+ return size;
+}
+
+static DEVICE_ATTR_RO(function_row_physmap);
+
+static struct attribute *cros_ec_keyb_attrs[] = {
+ &dev_attr_function_row_physmap.attr,
+ NULL,
+};
+
+static umode_t cros_ec_keyb_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr,
+ int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct cros_ec_keyb *ckdev = dev_get_drvdata(dev);
+
+ if (attr == &dev_attr_function_row_physmap.attr &&
+ !ckdev->num_function_row_keys)
+ return 0;
+
+ return attr->mode;
+}
+
+static const struct attribute_group cros_ec_keyb_attr_group = {
+ .is_visible = cros_ec_keyb_attr_is_visible,
+ .attrs = cros_ec_keyb_attrs,
+};
+
+
static int cros_ec_keyb_probe(struct platform_device *pdev)
{
struct cros_ec_device *ec = dev_get_drvdata(pdev->dev.parent);
@@ -617,6 +690,12 @@ static int cros_ec_keyb_probe(struct platform_device *pdev)
return err;
}
+ err = devm_device_add_group(dev, &cros_ec_keyb_attr_group);
+ if (err) {
+ dev_err(dev, "failed to create attributes. err=%d\n", err);
+ return err;
+ }
+
ckdev->notifier.notifier_call = cros_ec_keyb_work;
err = blocking_notifier_chain_register(&ckdev->ec->event_notifier,
&ckdev->notifier);
diff --git a/drivers/input/keyboard/locomokbd.c b/drivers/input/keyboard/locomokbd.c
index daf6a753ca61..dae053596572 100644
--- a/drivers/input/keyboard/locomokbd.c
+++ b/drivers/input/keyboard/locomokbd.c
@@ -304,7 +304,7 @@ static int locomokbd_probe(struct locomo_dev *dev)
return err;
}
-static int locomokbd_remove(struct locomo_dev *dev)
+static void locomokbd_remove(struct locomo_dev *dev)
{
struct locomokbd *locomokbd = locomo_get_drvdata(dev);
@@ -318,8 +318,6 @@ static int locomokbd_remove(struct locomo_dev *dev)
release_mem_region((unsigned long) dev->mapbase, dev->length);
kfree(locomokbd);
-
- return 0;
}
static struct locomo_driver keyboard_driver = {
diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c
index b17ac2a295b9..43375b38ee59 100644
--- a/drivers/input/keyboard/omap4-keypad.c
+++ b/drivers/input/keyboard/omap4-keypad.c
@@ -60,6 +60,8 @@
((((dbms) * 1000) / ((1 << ((ptv) + 1)) * (1000000 / 32768))) - 1)
#define OMAP4_VAL_DEBOUNCINGTIME_16MS \
OMAP4_KEYPAD_DEBOUNCINGTIME_MS(16, OMAP4_KEYPAD_PTV_DIV_128)
+#define OMAP4_KEYPAD_AUTOIDLE_MS 50 /* Approximate measured time */
+#define OMAP4_KEYPAD_IDLE_CHECK_MS (OMAP4_KEYPAD_AUTOIDLE_MS / 2)
enum {
KBD_REVISION_OMAP4 = 0,
@@ -71,6 +73,7 @@ struct omap4_keypad {
void __iomem *base;
unsigned int irq;
+ struct mutex lock; /* for key scan */
unsigned int rows;
unsigned int cols;
@@ -78,7 +81,7 @@ struct omap4_keypad {
u32 irqreg_offset;
unsigned int row_shift;
bool no_autorepeat;
- unsigned char key_state[8];
+ u64 keys;
unsigned short *keymap;
};
@@ -107,6 +110,55 @@ static void kbd_write_irqreg(struct omap4_keypad *keypad_data,
keypad_data->base + keypad_data->irqreg_offset + offset);
}
+static int omap4_keypad_report_keys(struct omap4_keypad *keypad_data,
+ u64 keys, bool down)
+{
+ struct input_dev *input_dev = keypad_data->input;
+ unsigned int col, row, code;
+ DECLARE_BITMAP(mask, 64);
+ unsigned long bit;
+ int events = 0;
+
+ bitmap_from_u64(mask, keys);
+
+ for_each_set_bit(bit, mask, keypad_data->rows * BITS_PER_BYTE) {
+ row = bit / BITS_PER_BYTE;
+ col = bit % BITS_PER_BYTE;
+ code = MATRIX_SCAN_CODE(row, col, keypad_data->row_shift);
+
+ input_event(input_dev, EV_MSC, MSC_SCAN, code);
+ input_report_key(input_dev, keypad_data->keymap[code], down);
+
+ events++;
+ }
+
+ if (events)
+ input_sync(input_dev);
+
+ return events;
+}
+
+static void omap4_keypad_scan_keys(struct omap4_keypad *keypad_data, u64 keys)
+{
+ u64 changed;
+
+ mutex_lock(&keypad_data->lock);
+
+ changed = keys ^ keypad_data->keys;
+
+ /*
+ * Report key up events separately and first. This matters in case we
+ * lost key-up interrupt and just now catching up.
+ */
+ omap4_keypad_report_keys(keypad_data, changed & ~keys, false);
+
+ /* Report key down events */
+ omap4_keypad_report_keys(keypad_data, changed & keys, true);
+
+ keypad_data->keys = keys;
+
+ mutex_unlock(&keypad_data->lock);
+}
/* Interrupt handlers */
static irqreturn_t omap4_keypad_irq_handler(int irq, void *dev_id)
@@ -122,48 +174,44 @@ static irqreturn_t omap4_keypad_irq_handler(int irq, void *dev_id)
static irqreturn_t omap4_keypad_irq_thread_fn(int irq, void *dev_id)
{
struct omap4_keypad *keypad_data = dev_id;
- struct input_dev *input_dev = keypad_data->input;
- unsigned char key_state[ARRAY_SIZE(keypad_data->key_state)];
- unsigned int col, row, code, changed;
- u32 *new_state = (u32 *) key_state;
-
- *new_state = kbd_readl(keypad_data, OMAP4_KBD_FULLCODE31_0);
- *(new_state + 1) = kbd_readl(keypad_data, OMAP4_KBD_FULLCODE63_32);
-
- for (row = 0; row < keypad_data->rows; row++) {
- changed = key_state[row] ^ keypad_data->key_state[row];
- if (!changed)
- continue;
-
- for (col = 0; col < keypad_data->cols; col++) {
- if (changed & (1 << col)) {
- code = MATRIX_SCAN_CODE(row, col,
- keypad_data->row_shift);
- input_event(input_dev, EV_MSC, MSC_SCAN, code);
- input_report_key(input_dev,
- keypad_data->keymap[code],
- key_state[row] & (1 << col));
- }
- }
+ struct device *dev = keypad_data->input->dev.parent;
+ u32 low, high;
+ int error;
+ u64 keys;
+
+ error = pm_runtime_get_sync(dev);
+ if (error < 0) {
+ pm_runtime_put_noidle(dev);
+ return IRQ_NONE;
}
- input_sync(input_dev);
+ low = kbd_readl(keypad_data, OMAP4_KBD_FULLCODE31_0);
+ high = kbd_readl(keypad_data, OMAP4_KBD_FULLCODE63_32);
+ keys = low | (u64)high << 32;
- memcpy(keypad_data->key_state, key_state,
- sizeof(keypad_data->key_state));
+ omap4_keypad_scan_keys(keypad_data, keys);
/* clear pending interrupts */
kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS,
kbd_read_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS));
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
return IRQ_HANDLED;
}
static int omap4_keypad_open(struct input_dev *input)
{
struct omap4_keypad *keypad_data = input_get_drvdata(input);
+ struct device *dev = input->dev.parent;
+ int error;
- pm_runtime_get_sync(input->dev.parent);
+ error = pm_runtime_get_sync(dev);
+ if (error < 0) {
+ pm_runtime_put_noidle(dev);
+ return error;
+ }
disable_irq(keypad_data->irq);
@@ -176,13 +224,15 @@ static int omap4_keypad_open(struct input_dev *input)
kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS,
kbd_read_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS));
kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQENABLE,
- OMAP4_DEF_IRQENABLE_EVENTEN |
- OMAP4_DEF_IRQENABLE_LONGKEY);
+ OMAP4_DEF_IRQENABLE_EVENTEN);
kbd_writel(keypad_data, OMAP4_KBD_WAKEUPENABLE,
- OMAP4_DEF_WUP_EVENT_ENA | OMAP4_DEF_WUP_LONG_KEY_ENA);
+ OMAP4_DEF_WUP_EVENT_ENA);
enable_irq(keypad_data->irq);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
return 0;
}
@@ -200,14 +250,20 @@ static void omap4_keypad_stop(struct omap4_keypad *keypad_data)
static void omap4_keypad_close(struct input_dev *input)
{
- struct omap4_keypad *keypad_data;
+ struct omap4_keypad *keypad_data = input_get_drvdata(input);
+ struct device *dev = input->dev.parent;
+ int error;
+
+ error = pm_runtime_get_sync(dev);
+ if (error < 0)
+ pm_runtime_put_noidle(dev);
- keypad_data = input_get_drvdata(input);
disable_irq(keypad_data->irq);
omap4_keypad_stop(keypad_data);
enable_irq(keypad_data->irq);
- pm_runtime_put_sync(input->dev.parent);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
}
static int omap4_keypad_parse_dt(struct device *dev,
@@ -252,8 +308,41 @@ static int omap4_keypad_check_revision(struct device *dev,
return 0;
}
+/*
+ * Errata ID i689 "1.32 Keyboard Key Up Event Can Be Missed".
+ * Interrupt may not happen for key-up events. We must clear stuck
+ * key-up events after the keyboard hardware has auto-idled.
+ */
+static int __maybe_unused omap4_keypad_runtime_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct omap4_keypad *keypad_data = platform_get_drvdata(pdev);
+ u32 active;
+
+ active = kbd_readl(keypad_data, OMAP4_KBD_STATEMACHINE);
+ if (active) {
+ pm_runtime_mark_last_busy(dev);
+ return -EBUSY;
+ }
+
+ omap4_keypad_scan_keys(keypad_data, 0);
+
+ return 0;
+}
+
+static const struct dev_pm_ops omap4_keypad_pm_ops = {
+ SET_RUNTIME_PM_OPS(omap4_keypad_runtime_suspend, NULL, NULL)
+};
+
+static void omap4_disable_pm(void *d)
+{
+ pm_runtime_dont_use_autosuspend(d);
+ pm_runtime_disable(d);
+}
+
static int omap4_keypad_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct omap4_keypad *keypad_data;
struct input_dev *input_dev;
struct resource *res;
@@ -271,63 +360,62 @@ static int omap4_keypad_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- keypad_data = kzalloc(sizeof(struct omap4_keypad), GFP_KERNEL);
+ keypad_data = devm_kzalloc(dev, sizeof(*keypad_data), GFP_KERNEL);
if (!keypad_data) {
- dev_err(&pdev->dev, "keypad_data memory allocation failed\n");
+ dev_err(dev, "keypad_data memory allocation failed\n");
return -ENOMEM;
}
keypad_data->irq = irq;
+ mutex_init(&keypad_data->lock);
+ platform_set_drvdata(pdev, keypad_data);
- error = omap4_keypad_parse_dt(&pdev->dev, keypad_data);
+ error = omap4_keypad_parse_dt(dev, keypad_data);
if (error)
- goto err_free_keypad;
+ return error;
- res = request_mem_region(res->start, resource_size(res), pdev->name);
- if (!res) {
- dev_err(&pdev->dev, "can't request mem region\n");
- error = -EBUSY;
- goto err_free_keypad;
- }
+ keypad_data->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(keypad_data->base))
+ return PTR_ERR(keypad_data->base);
- keypad_data->base = ioremap(res->start, resource_size(res));
- if (!keypad_data->base) {
- dev_err(&pdev->dev, "can't ioremap mem resource\n");
- error = -ENOMEM;
- goto err_release_mem;
- }
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_autosuspend_delay(dev, OMAP4_KEYPAD_IDLE_CHECK_MS);
+ pm_runtime_enable(dev);
- pm_runtime_enable(&pdev->dev);
+ error = devm_add_action_or_reset(dev, omap4_disable_pm, dev);
+ if (error) {
+ dev_err(dev, "unable to register cleanup action\n");
+ return error;
+ }
/*
* Enable clocks for the keypad module so that we can read
* revision register.
*/
- error = pm_runtime_get_sync(&pdev->dev);
+ error = pm_runtime_get_sync(dev);
if (error) {
- dev_err(&pdev->dev, "pm_runtime_get_sync() failed\n");
- pm_runtime_put_noidle(&pdev->dev);
- } else {
- error = omap4_keypad_check_revision(&pdev->dev,
- keypad_data);
- if (!error) {
- /* Ensure device does not raise interrupts */
- omap4_keypad_stop(keypad_data);
- }
- pm_runtime_put_sync(&pdev->dev);
+ dev_err(dev, "pm_runtime_get_sync() failed\n");
+ pm_runtime_put_noidle(dev);
+ return error;
+ }
+
+ error = omap4_keypad_check_revision(dev, keypad_data);
+ if (!error) {
+ /* Ensure device does not raise interrupts */
+ omap4_keypad_stop(keypad_data);
}
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
if (error)
- goto err_pm_disable;
+ return error;
/* input device allocation */
- keypad_data->input = input_dev = input_allocate_device();
- if (!input_dev) {
- error = -ENOMEM;
- goto err_pm_disable;
- }
+ keypad_data->input = input_dev = devm_input_allocate_device(dev);
+ if (!input_dev)
+ return -ENOMEM;
input_dev->name = pdev->name;
- input_dev->dev.parent = &pdev->dev;
input_dev->id.bustype = BUS_HOST;
input_dev->id.vendor = 0x0001;
input_dev->id.product = 0x0001;
@@ -344,84 +432,51 @@ static int omap4_keypad_probe(struct platform_device *pdev)
keypad_data->row_shift = get_count_order(keypad_data->cols);
max_keys = keypad_data->rows << keypad_data->row_shift;
- keypad_data->keymap = kcalloc(max_keys,
- sizeof(keypad_data->keymap[0]),
- GFP_KERNEL);
+ keypad_data->keymap = devm_kcalloc(dev,
+ max_keys,
+ sizeof(keypad_data->keymap[0]),
+ GFP_KERNEL);
if (!keypad_data->keymap) {
- dev_err(&pdev->dev, "Not enough memory for keymap\n");
- error = -ENOMEM;
- goto err_free_input;
+ dev_err(dev, "Not enough memory for keymap\n");
+ return -ENOMEM;
}
error = matrix_keypad_build_keymap(NULL, NULL,
keypad_data->rows, keypad_data->cols,
keypad_data->keymap, input_dev);
if (error) {
- dev_err(&pdev->dev, "failed to build keymap\n");
- goto err_free_keymap;
+ dev_err(dev, "failed to build keymap\n");
+ return error;
}
- error = request_threaded_irq(keypad_data->irq, omap4_keypad_irq_handler,
- omap4_keypad_irq_thread_fn, IRQF_ONESHOT,
- "omap4-keypad", keypad_data);
+ error = devm_request_threaded_irq(dev, keypad_data->irq,
+ omap4_keypad_irq_handler,
+ omap4_keypad_irq_thread_fn,
+ IRQF_ONESHOT,
+ "omap4-keypad", keypad_data);
if (error) {
- dev_err(&pdev->dev, "failed to register interrupt\n");
- goto err_free_keymap;
+ dev_err(dev, "failed to register interrupt\n");
+ return error;
}
error = input_register_device(keypad_data->input);
- if (error < 0) {
- dev_err(&pdev->dev, "failed to register input device\n");
- goto err_free_irq;
+ if (error) {
+ dev_err(dev, "failed to register input device\n");
+ return error;
}
- device_init_wakeup(&pdev->dev, true);
- error = dev_pm_set_wake_irq(&pdev->dev, keypad_data->irq);
+ device_init_wakeup(dev, true);
+ error = dev_pm_set_wake_irq(dev, keypad_data->irq);
if (error)
- dev_warn(&pdev->dev,
- "failed to set up wakeup irq: %d\n", error);
-
- platform_set_drvdata(pdev, keypad_data);
+ dev_warn(dev, "failed to set up wakeup irq: %d\n", error);
return 0;
-
-err_free_irq:
- free_irq(keypad_data->irq, keypad_data);
-err_free_keymap:
- kfree(keypad_data->keymap);
-err_free_input:
- input_free_device(input_dev);
-err_pm_disable:
- pm_runtime_disable(&pdev->dev);
- iounmap(keypad_data->base);
-err_release_mem:
- release_mem_region(res->start, resource_size(res));
-err_free_keypad:
- kfree(keypad_data);
- return error;
}
static int omap4_keypad_remove(struct platform_device *pdev)
{
- struct omap4_keypad *keypad_data = platform_get_drvdata(pdev);
- struct resource *res;
-
dev_pm_clear_wake_irq(&pdev->dev);
- free_irq(keypad_data->irq, keypad_data);
-
- pm_runtime_disable(&pdev->dev);
-
- input_unregister_device(keypad_data->input);
-
- iounmap(keypad_data->base);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, resource_size(res));
-
- kfree(keypad_data->keymap);
- kfree(keypad_data);
-
return 0;
}
@@ -437,6 +492,7 @@ static struct platform_driver omap4_keypad_driver = {
.driver = {
.name = "omap4-keypad",
.of_match_table = omap_keypad_dt_match,
+ .pm = &omap4_keypad_pm_ops,
},
};
module_platform_driver(omap4_keypad_driver);
diff --git a/drivers/input/misc/ariel-pwrbutton.c b/drivers/input/misc/ariel-pwrbutton.c
index eda86ab552b9..17bbaac8b80c 100644
--- a/drivers/input/misc/ariel-pwrbutton.c
+++ b/drivers/input/misc/ariel-pwrbutton.c
@@ -149,12 +149,6 @@ static const struct of_device_id ariel_pwrbutton_of_match[] = {
};
MODULE_DEVICE_TABLE(of, ariel_pwrbutton_of_match);
-static const struct spi_device_id ariel_pwrbutton_id_table[] = {
- { "wyse-ariel-ec-input", 0 },
- {}
-};
-MODULE_DEVICE_TABLE(spi, ariel_pwrbutton_id_table);
-
static struct spi_driver ariel_pwrbutton_driver = {
.driver = {
.name = "dell-wyse-ariel-ec-input",
diff --git a/drivers/input/misc/da7280.c b/drivers/input/misc/da7280.c
index 37568b00873d..b08610d6e575 100644
--- a/drivers/input/misc/da7280.c
+++ b/drivers/input/misc/da7280.c
@@ -863,6 +863,7 @@ static void da7280_parse_properties(struct device *dev,
gpi_str3[7] = '0' + i;
haptics->gpi_ctl[i].polarity = 0;
error = device_property_read_string(dev, gpi_str3, &str);
+ if (!error)
haptics->gpi_ctl[i].polarity =
da7280_haptic_of_gpi_pol_str(dev, str);
}
@@ -1299,11 +1300,13 @@ static int __maybe_unused da7280_resume(struct device *dev)
return retval;
}
+#ifdef CONFIG_OF
static const struct of_device_id da7280_of_match[] = {
{ .compatible = "dlg,da7280", },
{ }
};
MODULE_DEVICE_TABLE(of, da7280_of_match);
+#endif
static const struct i2c_device_id da7280_i2c_id[] = {
{ "da7280", },
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index b067bfd2699c..4a6b33bbe7ea 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -986,7 +986,7 @@ static void alps_get_finger_coordinate_v7(struct input_mt_pos *mt,
case V7_PACKET_ID_TWO:
mt[1].x &= ~0x000F;
mt[1].y |= 0x000F;
- /* Detect false-postive touches where x & y report max value */
+ /* Detect false-positive touches where x & y report max value */
if (mt[1].y == 0x7ff && mt[1].x == 0xff0) {
mt[1].x = 0;
/* y gets set to 0 at the end of this function */
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 8fb7b4385ded..ffad142801b3 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -1106,8 +1106,11 @@ static void synaptics_process_packet(struct psmouse *psmouse)
num_fingers = hw.w + 2;
break;
case 2:
- if (SYN_MODEL_PEN(info->model_id))
- ; /* Nothing, treat a pen as a single finger */
+ /*
+ * SYN_MODEL_PEN(info->model_id): even if
+ * the device supports pen, we treat it as
+ * a single finger.
+ */
break;
case 4 ... 15:
if (SYN_CAP_PALMDETECT(info->capabilities))
diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig
index 0754744b9ce5..f39b7b3f7942 100644
--- a/drivers/input/serio/Kconfig
+++ b/drivers/input/serio/Kconfig
@@ -255,7 +255,7 @@ config SERIO_ARC_PS2
config SERIO_APBPS2
tristate "GRLIB APBPS2 PS/2 keyboard/mouse controller"
- depends on OF
+ depends on OF && HAS_IOMEM
help
Say Y here if you want support for GRLIB APBPS2 peripherals used
to connect to PS/2 keyboard and/or mouse.
diff --git a/drivers/input/serio/ambakmi.c b/drivers/input/serio/ambakmi.c
index ecdeca147ed7..4408245b61d2 100644
--- a/drivers/input/serio/ambakmi.c
+++ b/drivers/input/serio/ambakmi.c
@@ -159,7 +159,7 @@ static int amba_kmi_probe(struct amba_device *dev,
return ret;
}
-static int amba_kmi_remove(struct amba_device *dev)
+static void amba_kmi_remove(struct amba_device *dev)
{
struct amba_kmi_port *kmi = amba_get_drvdata(dev);
@@ -168,7 +168,6 @@ static int amba_kmi_remove(struct amba_device *dev)
iounmap(kmi->base);
kfree(kmi);
amba_release_regions(dev);
- return 0;
}
static int __maybe_unused amba_kmi_resume(struct device *dev)
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 3a2dcf0805f1..9119e12a5778 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -219,6 +219,8 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
DMI_MATCH(DMI_SYS_VENDOR, "PEGATRON CORPORATION"),
DMI_MATCH(DMI_PRODUCT_NAME, "C15B"),
},
+ },
+ {
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ByteSpeed LLC"),
DMI_MATCH(DMI_PRODUCT_NAME, "ByteSpeed Laptop C15B"),
@@ -586,6 +588,10 @@ static const struct dmi_system_id i8042_dmi_noselftest_table[] = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */
},
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_CHASSIS_TYPE, "31"), /* Convertible Notebook */
+ },
},
{ }
};
diff --git a/drivers/input/serio/sa1111ps2.c b/drivers/input/serio/sa1111ps2.c
index 7b8ceb702a74..68fac4801e2e 100644
--- a/drivers/input/serio/sa1111ps2.c
+++ b/drivers/input/serio/sa1111ps2.c
@@ -344,7 +344,7 @@ static int ps2_probe(struct sa1111_dev *dev)
/*
* Remove one device from this driver.
*/
-static int ps2_remove(struct sa1111_dev *dev)
+static void ps2_remove(struct sa1111_dev *dev)
{
struct ps2if *ps2if = sa1111_get_drvdata(dev);
@@ -353,8 +353,6 @@ static int ps2_remove(struct sa1111_dev *dev)
sa1111_set_drvdata(dev, NULL);
kfree(ps2if);
-
- return 0;
}
/*
diff --git a/drivers/input/serio/serport.c b/drivers/input/serio/serport.c
index 8ac970a423de..33e9d9bfd036 100644
--- a/drivers/input/serio/serport.c
+++ b/drivers/input/serio/serport.c
@@ -156,7 +156,9 @@ out:
* returning 0 characters.
*/
-static ssize_t serport_ldisc_read(struct tty_struct * tty, struct file * file, unsigned char __user * buf, size_t nr)
+static ssize_t serport_ldisc_read(struct tty_struct * tty, struct file * file,
+ unsigned char *kbuf, size_t nr,
+ void **cookie, unsigned long offset)
{
struct serport *serport = (struct serport*) tty->disc_data;
struct serio *serio;
diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c
index e08b0ef078e8..fcb1b646436a 100644
--- a/drivers/input/tablet/aiptek.c
+++ b/drivers/input/tablet/aiptek.c
@@ -1036,9 +1036,9 @@ static ssize_t show_tabletSize(struct device *dev, struct device_attribute *attr
{
struct aiptek *aiptek = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%dx%d\n",
- input_abs_get_max(aiptek->inputdev, ABS_X) + 1,
- input_abs_get_max(aiptek->inputdev, ABS_Y) + 1);
+ return sysfs_emit(buf, "%dx%d\n",
+ input_abs_get_max(aiptek->inputdev, ABS_X) + 1,
+ input_abs_get_max(aiptek->inputdev, ABS_Y) + 1);
}
/* These structs define the sysfs files, param #1 is the name of the
@@ -1064,9 +1064,8 @@ static ssize_t show_tabletPointerMode(struct device *dev, struct device_attribut
{
struct aiptek *aiptek = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%s\n",
- map_val_to_str(pointer_mode_map,
- aiptek->curSetting.pointerMode));
+ return sysfs_emit(buf, "%s\n", map_val_to_str(pointer_mode_map,
+ aiptek->curSetting.pointerMode));
}
static ssize_t
@@ -1101,9 +1100,8 @@ static ssize_t show_tabletCoordinateMode(struct device *dev, struct device_attri
{
struct aiptek *aiptek = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%s\n",
- map_val_to_str(coordinate_mode_map,
- aiptek->curSetting.coordinateMode));
+ return sysfs_emit(buf, "%s\n", map_val_to_str(coordinate_mode_map,
+ aiptek->curSetting.coordinateMode));
}
static ssize_t
@@ -1143,9 +1141,8 @@ static ssize_t show_tabletToolMode(struct device *dev, struct device_attribute *
{
struct aiptek *aiptek = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%s\n",
- map_val_to_str(tool_mode_map,
- aiptek->curSetting.toolMode));
+ return sysfs_emit(buf, "%s\n", map_val_to_str(tool_mode_map,
+ aiptek->curSetting.toolMode));
}
static ssize_t
@@ -1174,10 +1171,9 @@ static ssize_t show_tabletXtilt(struct device *dev, struct device_attribute *att
struct aiptek *aiptek = dev_get_drvdata(dev);
if (aiptek->curSetting.xTilt == AIPTEK_TILT_DISABLE) {
- return snprintf(buf, PAGE_SIZE, "disable\n");
+ return sysfs_emit(buf, "disable\n");
} else {
- return snprintf(buf, PAGE_SIZE, "%d\n",
- aiptek->curSetting.xTilt);
+ return sysfs_emit(buf, "%d\n", aiptek->curSetting.xTilt);
}
}
@@ -1216,10 +1212,9 @@ static ssize_t show_tabletYtilt(struct device *dev, struct device_attribute *att
struct aiptek *aiptek = dev_get_drvdata(dev);
if (aiptek->curSetting.yTilt == AIPTEK_TILT_DISABLE) {
- return snprintf(buf, PAGE_SIZE, "disable\n");
+ return sysfs_emit(buf, "disable\n");
} else {
- return snprintf(buf, PAGE_SIZE, "%d\n",
- aiptek->curSetting.yTilt);
+ return sysfs_emit(buf, "%d\n", aiptek->curSetting.yTilt);
}
}
@@ -1257,7 +1252,7 @@ static ssize_t show_tabletJitterDelay(struct device *dev, struct device_attribut
{
struct aiptek *aiptek = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%d\n", aiptek->curSetting.jitterDelay);
+ return sysfs_emit(buf, "%d\n", aiptek->curSetting.jitterDelay);
}
static ssize_t
@@ -1286,8 +1281,7 @@ static ssize_t show_tabletProgrammableDelay(struct device *dev, struct device_at
{
struct aiptek *aiptek = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%d\n",
- aiptek->curSetting.programmableDelay);
+ return sysfs_emit(buf, "%d\n", aiptek->curSetting.programmableDelay);
}
static ssize_t
@@ -1316,7 +1310,7 @@ static ssize_t show_tabletEventsReceived(struct device *dev, struct device_attri
{
struct aiptek *aiptek = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%ld\n", aiptek->eventCount);
+ return sysfs_emit(buf, "%ld\n", aiptek->eventCount);
}
static DEVICE_ATTR(event_count, S_IRUGO, show_tabletEventsReceived, NULL);
@@ -1355,7 +1349,7 @@ static ssize_t show_tabletDiagnosticMessage(struct device *dev, struct device_at
default:
return 0;
}
- return snprintf(buf, PAGE_SIZE, retMsg);
+ return sysfs_emit(buf, retMsg);
}
static DEVICE_ATTR(diagnostic, S_IRUGO, show_tabletDiagnosticMessage, NULL);
@@ -1375,9 +1369,8 @@ static ssize_t show_tabletStylusUpper(struct device *dev, struct device_attribut
{
struct aiptek *aiptek = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%s\n",
- map_val_to_str(stylus_button_map,
- aiptek->curSetting.stylusButtonUpper));
+ return sysfs_emit(buf, "%s\n", map_val_to_str(stylus_button_map,
+ aiptek->curSetting.stylusButtonUpper));
}
static ssize_t
@@ -1406,9 +1399,8 @@ static ssize_t show_tabletStylusLower(struct device *dev, struct device_attribut
{
struct aiptek *aiptek = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%s\n",
- map_val_to_str(stylus_button_map,
- aiptek->curSetting.stylusButtonLower));
+ return sysfs_emit(buf, "%s\n", map_val_to_str(stylus_button_map,
+ aiptek->curSetting.stylusButtonLower));
}
static ssize_t
@@ -1444,9 +1436,8 @@ static ssize_t show_tabletMouseLeft(struct device *dev, struct device_attribute
{
struct aiptek *aiptek = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%s\n",
- map_val_to_str(mouse_button_map,
- aiptek->curSetting.mouseButtonLeft));
+ return sysfs_emit(buf, "%s\n", map_val_to_str(mouse_button_map,
+ aiptek->curSetting.mouseButtonLeft));
}
static ssize_t
@@ -1474,9 +1465,8 @@ static ssize_t show_tabletMouseMiddle(struct device *dev, struct device_attribut
{
struct aiptek *aiptek = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%s\n",
- map_val_to_str(mouse_button_map,
- aiptek->curSetting.mouseButtonMiddle));
+ return sysfs_emit(buf, "%s\n", map_val_to_str(mouse_button_map,
+ aiptek->curSetting.mouseButtonMiddle));
}
static ssize_t
@@ -1504,9 +1494,8 @@ static ssize_t show_tabletMouseRight(struct device *dev, struct device_attribute
{
struct aiptek *aiptek = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%s\n",
- map_val_to_str(mouse_button_map,
- aiptek->curSetting.mouseButtonRight));
+ return sysfs_emit(buf, "%s\n", map_val_to_str(mouse_button_map,
+ aiptek->curSetting.mouseButtonRight));
}
static ssize_t
@@ -1535,10 +1524,9 @@ static ssize_t show_tabletWheel(struct device *dev, struct device_attribute *att
struct aiptek *aiptek = dev_get_drvdata(dev);
if (aiptek->curSetting.wheel == AIPTEK_WHEEL_DISABLE) {
- return snprintf(buf, PAGE_SIZE, "disable\n");
+ return sysfs_emit(buf, "disable\n");
} else {
- return snprintf(buf, PAGE_SIZE, "%d\n",
- aiptek->curSetting.wheel);
+ return sysfs_emit(buf, "%d\n", aiptek->curSetting.wheel);
}
}
@@ -1568,8 +1556,7 @@ static ssize_t show_tabletExecute(struct device *dev, struct device_attribute *a
/* There is nothing useful to display, so a one-line manual
* is in order...
*/
- return snprintf(buf, PAGE_SIZE,
- "Write anything to this file to program your tablet.\n");
+ return sysfs_emit(buf, "Write anything to this file to program your tablet.\n");
}
static ssize_t
@@ -1600,7 +1587,7 @@ static ssize_t show_tabletODMCode(struct device *dev, struct device_attribute *a
{
struct aiptek *aiptek = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "0x%04x\n", aiptek->features.odmCode);
+ return sysfs_emit(buf, "0x%04x\n", aiptek->features.odmCode);
}
static DEVICE_ATTR(odm_code, S_IRUGO, show_tabletODMCode, NULL);
@@ -1613,7 +1600,7 @@ static ssize_t show_tabletModelCode(struct device *dev, struct device_attribute
{
struct aiptek *aiptek = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "0x%04x\n", aiptek->features.modelCode);
+ return sysfs_emit(buf, "0x%04x\n", aiptek->features.modelCode);
}
static DEVICE_ATTR(model_code, S_IRUGO, show_tabletModelCode, NULL);
@@ -1626,8 +1613,7 @@ static ssize_t show_firmwareCode(struct device *dev, struct device_attribute *at
{
struct aiptek *aiptek = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%04x\n",
- aiptek->features.firmwareCode);
+ return sysfs_emit(buf, "%04x\n", aiptek->features.firmwareCode);
}
static DEVICE_ATTR(firmware_code, S_IRUGO, show_firmwareCode, NULL);
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index cc18f54ea887..529614d364fe 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -608,7 +608,7 @@ config TOUCHSCREEN_MTOUCH
config TOUCHSCREEN_IMX6UL_TSC
tristate "Freescale i.MX6UL touchscreen controller"
- depends on (OF && GPIOLIB) || COMPILE_TEST
+ depends on ((OF && GPIOLIB) || COMPILE_TEST) && HAS_IOMEM
help
Say Y here if you have a Freescale i.MX6UL, and want to
use the internal touchscreen controller.
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index a703870ca7bd..f113a27aeb1e 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -64,24 +64,13 @@
struct ads7846_buf {
u8 cmd;
- /*
- * This union is a temporary hack. The driver does an in-place
- * endianness conversion. This will be cleaned up in the next
- * patch.
- */
- union {
- __be16 data_be16;
- u16 data;
- };
+ __be16 data;
} __packed;
-
-struct ts_event {
- bool ignore;
- struct ads7846_buf x;
- struct ads7846_buf y;
- struct ads7846_buf z1;
- struct ads7846_buf z2;
+struct ads7846_buf_layout {
+ unsigned int offset;
+ unsigned int count;
+ unsigned int skip;
};
/*
@@ -90,12 +79,18 @@ struct ts_event {
* systems where main memory is not DMA-coherent (most non-x86 boards).
*/
struct ads7846_packet {
- struct ts_event tc;
- struct ads7846_buf read_x_cmd;
- struct ads7846_buf read_y_cmd;
- struct ads7846_buf read_z1_cmd;
- struct ads7846_buf read_z2_cmd;
+ unsigned int count;
+ unsigned int count_skip;
+ unsigned int cmds;
+ unsigned int last_cmd_idx;
+ struct ads7846_buf_layout l[5];
+ struct ads7846_buf *rx;
+ struct ads7846_buf *tx;
+
struct ads7846_buf pwrdown_cmd;
+
+ bool ignore;
+ u16 x, y, z1, z2;
};
struct ads7846 {
@@ -194,7 +189,6 @@ struct ads7846 {
#define READ_Y(vref) (READ_12BIT_DFR(y, 1, vref))
#define READ_Z1(vref) (READ_12BIT_DFR(z1, 1, vref))
#define READ_Z2(vref) (READ_12BIT_DFR(z2, 1, vref))
-
#define READ_X(vref) (READ_12BIT_DFR(x, 1, vref))
#define PWRDOWN (READ_12BIT_DFR(y, 0, 0)) /* LAST */
@@ -207,6 +201,21 @@ struct ads7846 {
#define REF_ON (READ_12BIT_DFR(x, 1, 1))
#define REF_OFF (READ_12BIT_DFR(y, 0, 0))
+/* Order commands in the most optimal way to reduce Vref switching and
+ * settling time:
+ * Measure: X; Vref: X+, X-; IN: Y+
+ * Measure: Y; Vref: Y+, Y-; IN: X+
+ * Measure: Z1; Vref: Y+, X-; IN: X+
+ * Measure: Z2; Vref: Y+, X-; IN: Y-
+ */
+enum ads7846_cmds {
+ ADS7846_X,
+ ADS7846_Y,
+ ADS7846_Z1,
+ ADS7846_Z2,
+ ADS7846_PWDOWN,
+};
+
static int get_pendown_state(struct ads7846 *ts)
{
if (ts->get_pendown_state)
@@ -689,26 +698,109 @@ static int ads7846_no_filter(void *ads, int data_idx, int *val)
return ADS7846_FILTER_OK;
}
-static int ads7846_get_value(struct ads7846 *ts, struct spi_message *m)
+static int ads7846_get_value(struct ads7846_buf *buf)
{
int value;
- struct spi_transfer *t =
- list_entry(m->transfers.prev, struct spi_transfer, transfer_list);
- struct ads7846_buf *buf = t->rx_buf;
- value = be16_to_cpup(&buf->data_be16);
+ value = be16_to_cpup(&buf->data);
/* enforce ADC output is 12 bits width */
return (value >> 3) & 0xfff;
}
-static void ads7846_update_value(struct spi_message *m, int val)
+static void ads7846_set_cmd_val(struct ads7846 *ts, enum ads7846_cmds cmd_idx,
+ u16 val)
+{
+ struct ads7846_packet *packet = ts->packet;
+
+ switch (cmd_idx) {
+ case ADS7846_Y:
+ packet->y = val;
+ break;
+ case ADS7846_X:
+ packet->x = val;
+ break;
+ case ADS7846_Z1:
+ packet->z1 = val;
+ break;
+ case ADS7846_Z2:
+ packet->z2 = val;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+}
+
+static u8 ads7846_get_cmd(enum ads7846_cmds cmd_idx, int vref)
+{
+ switch (cmd_idx) {
+ case ADS7846_Y:
+ return READ_Y(vref);
+ case ADS7846_X:
+ return READ_X(vref);
+
+ /* 7846 specific commands */
+ case ADS7846_Z1:
+ return READ_Z1(vref);
+ case ADS7846_Z2:
+ return READ_Z2(vref);
+ case ADS7846_PWDOWN:
+ return PWRDOWN;
+ default:
+ WARN_ON_ONCE(1);
+ }
+
+ return 0;
+}
+
+static bool ads7846_cmd_need_settle(enum ads7846_cmds cmd_idx)
+{
+ switch (cmd_idx) {
+ case ADS7846_X:
+ case ADS7846_Y:
+ case ADS7846_Z1:
+ case ADS7846_Z2:
+ return true;
+ case ADS7846_PWDOWN:
+ return false;
+ default:
+ WARN_ON_ONCE(1);
+ }
+
+ return false;
+}
+
+static int ads7846_filter(struct ads7846 *ts)
{
- struct spi_transfer *t =
- list_entry(m->transfers.prev, struct spi_transfer, transfer_list);
- struct ads7846_buf *buf = t->rx_buf;
+ struct ads7846_packet *packet = ts->packet;
+ int action;
+ int val;
+ unsigned int cmd_idx, b;
- buf->data = val;
+ packet->ignore = false;
+ for (cmd_idx = packet->last_cmd_idx; cmd_idx < packet->cmds - 1; cmd_idx++) {
+ struct ads7846_buf_layout *l = &packet->l[cmd_idx];
+
+ packet->last_cmd_idx = cmd_idx;
+
+ for (b = l->skip; b < l->count; b++) {
+ val = ads7846_get_value(&packet->rx[l->offset + b]);
+
+ action = ts->filter(ts->filter_data, cmd_idx, &val);
+ if (action == ADS7846_FILTER_REPEAT) {
+ if (b == l->count - 1)
+ return -EAGAIN;
+ } else if (action == ADS7846_FILTER_OK) {
+ ads7846_set_cmd_val(ts, cmd_idx, val);
+ break;
+ } else {
+ packet->ignore = true;
+ return 0;
+ }
+ }
+ }
+
+ return 0;
}
static void ads7846_read_state(struct ads7846 *ts)
@@ -716,52 +808,26 @@ static void ads7846_read_state(struct ads7846 *ts)
struct ads7846_packet *packet = ts->packet;
struct spi_message *m;
int msg_idx = 0;
- int val;
- int action;
int error;
- while (msg_idx < ts->msg_count) {
+ packet->last_cmd_idx = 0;
+ while (true) {
ts->wait_for_sync();
m = &ts->msg[msg_idx];
error = spi_sync(ts->spi, m);
if (error) {
dev_err(&ts->spi->dev, "spi_sync --> %d\n", error);
- packet->tc.ignore = true;
+ packet->ignore = true;
return;
}
- /*
- * Last message is power down request, no need to convert
- * or filter the value.
- */
- if (msg_idx < ts->msg_count - 1) {
-
- val = ads7846_get_value(ts, m);
-
- action = ts->filter(ts->filter_data, msg_idx, &val);
- switch (action) {
- case ADS7846_FILTER_REPEAT:
- continue;
-
- case ADS7846_FILTER_IGNORE:
- packet->tc.ignore = true;
- msg_idx = ts->msg_count - 1;
- continue;
-
- case ADS7846_FILTER_OK:
- ads7846_update_value(m, val);
- packet->tc.ignore = false;
- msg_idx++;
- break;
+ error = ads7846_filter(ts);
+ if (error)
+ continue;
- default:
- BUG();
- }
- } else {
- msg_idx++;
- }
+ return;
}
}
@@ -771,19 +837,14 @@ static void ads7846_report_state(struct ads7846 *ts)
unsigned int Rt;
u16 x, y, z1, z2;
- /*
- * ads7846_get_value() does in-place conversion (including byte swap)
- * from on-the-wire format as part of debouncing to get stable
- * readings.
- */
- x = packet->tc.x.data;
- y = packet->tc.y.data;
+ x = packet->x;
+ y = packet->y;
if (ts->model == 7845) {
z1 = 0;
z2 = 0;
} else {
- z1 = packet->tc.z1.data;
- z2 = packet->tc.z2.data;
+ z1 = packet->z1;
+ z2 = packet->z2;
}
/* range filtering */
@@ -816,9 +877,9 @@ static void ads7846_report_state(struct ads7846 *ts)
* the maximum. Don't report it to user space, repeat at least
* once more the measurement
*/
- if (packet->tc.ignore || Rt > ts->pressure_max) {
+ if (packet->ignore || Rt > ts->pressure_max) {
dev_vdbg(&ts->spi->dev, "ignored %d pressure %d\n",
- packet->tc.ignore, Rt);
+ packet->ignore, Rt);
return;
}
@@ -979,13 +1040,59 @@ static int ads7846_setup_pendown(struct spi_device *spi,
* Set up the transfers to read touchscreen state; this assumes we
* use formula #2 for pressure, not #3.
*/
-static void ads7846_setup_spi_msg(struct ads7846 *ts,
+static int ads7846_setup_spi_msg(struct ads7846 *ts,
const struct ads7846_platform_data *pdata)
{
struct spi_message *m = &ts->msg[0];
struct spi_transfer *x = ts->xfer;
struct ads7846_packet *packet = ts->packet;
int vref = pdata->keep_vref_on;
+ unsigned int count, offset = 0;
+ unsigned int cmd_idx, b;
+ unsigned long time;
+ size_t size = 0;
+
+ /* time per bit */
+ time = NSEC_PER_SEC / ts->spi->max_speed_hz;
+
+ count = pdata->settle_delay_usecs * NSEC_PER_USEC / time;
+ packet->count_skip = DIV_ROUND_UP(count, 24);
+
+ if (ts->debounce_max && ts->debounce_rep)
+ /* ads7846_debounce_filter() is making ts->debounce_rep + 2
+ * reads. So we need to get all samples for normal case. */
+ packet->count = ts->debounce_rep + 2;
+ else
+ packet->count = 1;
+
+ if (ts->model == 7846)
+ packet->cmds = 5; /* x, y, z1, z2, pwdown */
+ else
+ packet->cmds = 3; /* x, y, pwdown */
+
+ for (cmd_idx = 0; cmd_idx < packet->cmds; cmd_idx++) {
+ struct ads7846_buf_layout *l = &packet->l[cmd_idx];
+ unsigned int max_count;
+
+ if (ads7846_cmd_need_settle(cmd_idx))
+ max_count = packet->count + packet->count_skip;
+ else
+ max_count = packet->count;
+
+ l->offset = offset;
+ offset += max_count;
+ l->count = max_count;
+ l->skip = packet->count_skip;
+ size += sizeof(*packet->tx) * max_count;
+ }
+
+ packet->tx = devm_kzalloc(&ts->spi->dev, size, GFP_KERNEL);
+ if (!packet->tx)
+ return -ENOMEM;
+
+ packet->rx = devm_kzalloc(&ts->spi->dev, size, GFP_KERNEL);
+ if (!packet->rx)
+ return -ENOMEM;
if (ts->model == 7873) {
/*
@@ -1001,117 +1108,20 @@ static void ads7846_setup_spi_msg(struct ads7846 *ts,
spi_message_init(m);
m->context = ts;
- packet->read_y_cmd.cmd = READ_Y(vref);
- x->tx_buf = &packet->read_y_cmd;
- x->rx_buf = &packet->tc.y;
- x->len = 3;
- spi_message_add_tail(x, m);
+ for (cmd_idx = 0; cmd_idx < packet->cmds; cmd_idx++) {
+ struct ads7846_buf_layout *l = &packet->l[cmd_idx];
+ u8 cmd = ads7846_get_cmd(cmd_idx, vref);
- /*
- * The first sample after switching drivers can be low quality;
- * optionally discard it, using a second one after the signals
- * have had enough time to stabilize.
- */
- if (pdata->settle_delay_usecs) {
- x->delay.value = pdata->settle_delay_usecs;
- x->delay.unit = SPI_DELAY_UNIT_USECS;
- x++;
-
- x->tx_buf = &packet->read_y_cmd;
- x->rx_buf = &packet->tc.y;
- x->len = 3;
- spi_message_add_tail(x, m);
+ for (b = 0; b < l->count; b++)
+ packet->tx[l->offset + b].cmd = cmd;
}
- ts->msg_count++;
- m++;
- spi_message_init(m);
- m->context = ts;
-
- /* turn y- off, x+ on, then leave in lowpower */
- x++;
- packet->read_x_cmd.cmd = READ_X(vref);
- x->tx_buf = &packet->read_x_cmd;
- x->rx_buf = &packet->tc.x;
- x->len = 3;
+ x->tx_buf = packet->tx;
+ x->rx_buf = packet->rx;
+ x->len = size;
spi_message_add_tail(x, m);
- /* ... maybe discard first sample ... */
- if (pdata->settle_delay_usecs) {
- x->delay.value = pdata->settle_delay_usecs;
- x->delay.unit = SPI_DELAY_UNIT_USECS;
-
- x++;
- x->tx_buf = &packet->read_x_cmd;
- x->rx_buf = &packet->tc.x;
- x->len = 3;
- spi_message_add_tail(x, m);
- }
-
- /* turn y+ off, x- on; we'll use formula #2 */
- if (ts->model == 7846) {
- ts->msg_count++;
- m++;
- spi_message_init(m);
- m->context = ts;
-
- x++;
- packet->read_z1_cmd.cmd = READ_Z1(vref);
- x->tx_buf = &packet->read_z1_cmd;
- x->rx_buf = &packet->tc.z1;
- x->len = 3;
- spi_message_add_tail(x, m);
-
- /* ... maybe discard first sample ... */
- if (pdata->settle_delay_usecs) {
- x->delay.value = pdata->settle_delay_usecs;
- x->delay.unit = SPI_DELAY_UNIT_USECS;
-
- x++;
- x->tx_buf = &packet->read_z1_cmd;
- x->rx_buf = &packet->tc.z1;
- x->len = 3;
- spi_message_add_tail(x, m);
- }
-
- ts->msg_count++;
- m++;
- spi_message_init(m);
- m->context = ts;
-
- x++;
- packet->read_z2_cmd.cmd = READ_Z2(vref);
- x->tx_buf = &packet->read_z2_cmd;
- x->rx_buf = &packet->tc.z2;
- x->len = 3;
- spi_message_add_tail(x, m);
-
- /* ... maybe discard first sample ... */
- if (pdata->settle_delay_usecs) {
- x->delay.value = pdata->settle_delay_usecs;
- x->delay.unit = SPI_DELAY_UNIT_USECS;
-
- x++;
- x->tx_buf = &packet->read_z2_cmd;
- x->rx_buf = &packet->tc.z2;
- x->len = 3;
- spi_message_add_tail(x, m);
- }
- }
-
- /* power down */
- ts->msg_count++;
- m++;
- spi_message_init(m);
- m->context = ts;
-
- x++;
- packet->pwrdown_cmd.cmd = PWRDOWN;
- x->tx_buf = &packet->pwrdown_cmd;
- x->len = 3;
-
- CS_CHANGE(*x);
- spi_message_add_tail(x, m);
+ return 0;
}
#ifdef CONFIG_OF
diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
index d51cb910fba1..4c2b579f6c8b 100644
--- a/drivers/input/touchscreen/elants_i2c.c
+++ b/drivers/input/touchscreen/elants_i2c.c
@@ -56,6 +56,7 @@
#define QUEUE_HEADER_SINGLE 0x62
#define QUEUE_HEADER_NORMAL 0X63
#define QUEUE_HEADER_WAIT 0x64
+#define QUEUE_HEADER_NORMAL2 0x66
/* Command header definition */
#define CMD_HEADER_WRITE 0x54
@@ -69,6 +70,7 @@
#define CMD_HEADER_REK 0x66
/* FW position data */
+#define PACKET_SIZE_OLD 40
#define PACKET_SIZE 55
#define MAX_CONTACT_NUM 10
#define FW_POS_HEADER 0
@@ -90,6 +92,8 @@
/* FW read command, 0x53 0x?? 0x0, 0x01 */
#define E_ELAN_INFO_FW_VER 0x00
#define E_ELAN_INFO_BC_VER 0x10
+#define E_ELAN_INFO_X_RES 0x60
+#define E_ELAN_INFO_Y_RES 0x63
#define E_ELAN_INFO_REK 0xD0
#define E_ELAN_INFO_TEST_VER 0xE0
#define E_ELAN_INFO_FW_ID 0xF0
@@ -112,6 +116,11 @@
#define ELAN_POWERON_DELAY_USEC 500
#define ELAN_RESET_DELAY_MSEC 20
+enum elants_chip_id {
+ EKTH3500,
+ EKTF3624,
+};
+
enum elants_state {
ELAN_STATE_NORMAL,
ELAN_WAIT_QUEUE_HEADER,
@@ -143,9 +152,12 @@ struct elants_data {
unsigned int y_res;
unsigned int x_max;
unsigned int y_max;
+ unsigned int phy_x;
+ unsigned int phy_y;
struct touchscreen_properties prop;
enum elants_state state;
+ enum elants_chip_id chip_id;
enum elants_iap_mode iap_mode;
/* Guards against concurrent access to the device via sysfs */
@@ -433,7 +445,51 @@ static int elants_i2c_query_bc_version(struct elants_data *ts)
return 0;
}
-static int elants_i2c_query_ts_info(struct elants_data *ts)
+static int elants_i2c_query_ts_info_ektf(struct elants_data *ts)
+{
+ struct i2c_client *client = ts->client;
+ int error;
+ u8 resp[4];
+ u16 phy_x, phy_y;
+ const u8 get_xres_cmd[] = {
+ CMD_HEADER_READ, E_ELAN_INFO_X_RES, 0x00, 0x00
+ };
+ const u8 get_yres_cmd[] = {
+ CMD_HEADER_READ, E_ELAN_INFO_Y_RES, 0x00, 0x00
+ };
+
+ /* Get X/Y size in mm */
+ error = elants_i2c_execute_command(client, get_xres_cmd,
+ sizeof(get_xres_cmd),
+ resp, sizeof(resp), 1,
+ "get X size");
+ if (error)
+ return error;
+
+ phy_x = resp[2] | ((resp[3] & 0xF0) << 4);
+
+ error = elants_i2c_execute_command(client, get_yres_cmd,
+ sizeof(get_yres_cmd),
+ resp, sizeof(resp), 1,
+ "get Y size");
+ if (error)
+ return error;
+
+ phy_y = resp[2] | ((resp[3] & 0xF0) << 4);
+
+ dev_dbg(&client->dev, "phy_x=%d, phy_y=%d\n", phy_x, phy_y);
+
+ ts->phy_x = phy_x;
+ ts->phy_y = phy_y;
+
+ /* eKTF doesn't report max size, set it to default values */
+ ts->x_max = 2240 - 1;
+ ts->y_max = 1408 - 1;
+
+ return 0;
+}
+
+static int elants_i2c_query_ts_info_ekth(struct elants_data *ts)
{
struct i2c_client *client = ts->client;
int error;
@@ -508,6 +564,8 @@ static int elants_i2c_query_ts_info(struct elants_data *ts)
ts->x_res = DIV_ROUND_CLOSEST(ts->x_max, phy_x);
ts->y_max = ELAN_TS_RESOLUTION(cols, osr);
ts->y_res = DIV_ROUND_CLOSEST(ts->y_max, phy_y);
+ ts->phy_x = phy_x;
+ ts->phy_y = phy_y;
}
return 0;
@@ -587,8 +645,19 @@ static int elants_i2c_initialize(struct elants_data *ts)
error = elants_i2c_query_fw_version(ts);
if (!error)
error = elants_i2c_query_test_version(ts);
- if (!error)
- error = elants_i2c_query_ts_info(ts);
+
+ switch (ts->chip_id) {
+ case EKTH3500:
+ if (!error)
+ error = elants_i2c_query_ts_info_ekth(ts);
+ break;
+ case EKTF3624:
+ if (!error)
+ error = elants_i2c_query_ts_info_ektf(ts);
+ break;
+ default:
+ BUG();
+ }
if (error)
ts->iap_mode = ELAN_IAP_RECOVERY;
@@ -853,7 +922,8 @@ out:
* Event reporting.
*/
-static void elants_i2c_mt_event(struct elants_data *ts, u8 *buf)
+static void elants_i2c_mt_event(struct elants_data *ts, u8 *buf,
+ size_t packet_size)
{
struct input_dev *input = ts->input;
unsigned int n_fingers;
@@ -880,8 +950,24 @@ static void elants_i2c_mt_event(struct elants_data *ts, u8 *buf)
pos = &buf[FW_POS_XY + i * 3];
x = (((u16)pos[0] & 0xf0) << 4) | pos[1];
y = (((u16)pos[0] & 0x0f) << 8) | pos[2];
- p = buf[FW_POS_PRESSURE + i];
- w = buf[FW_POS_WIDTH + i];
+
+ /*
+ * eKTF3624 may have use "old" touch-report format,
+ * depending on a device and TS firmware version.
+ * For example, ASUS Transformer devices use the "old"
+ * format, while ASUS Nexus 7 uses the "new" formant.
+ */
+ if (packet_size == PACKET_SIZE_OLD &&
+ ts->chip_id == EKTF3624) {
+ w = buf[FW_POS_WIDTH + i / 2];
+ w >>= 4 * (~i & 1);
+ w |= w << 4;
+ w |= !w;
+ p = w;
+ } else {
+ p = buf[FW_POS_PRESSURE + i];
+ w = buf[FW_POS_WIDTH + i];
+ }
dev_dbg(&ts->client->dev, "i=%d x=%d y=%d p=%d w=%d\n",
i, x, y, p, w);
@@ -913,7 +999,8 @@ static u8 elants_i2c_calculate_checksum(u8 *buf)
return checksum;
}
-static void elants_i2c_event(struct elants_data *ts, u8 *buf)
+static void elants_i2c_event(struct elants_data *ts, u8 *buf,
+ size_t packet_size)
{
u8 checksum = elants_i2c_calculate_checksum(buf);
@@ -927,7 +1014,7 @@ static void elants_i2c_event(struct elants_data *ts, u8 *buf)
"%s: unknown packet type: %02x\n",
__func__, buf[FW_POS_HEADER]);
else
- elants_i2c_mt_event(ts, buf);
+ elants_i2c_mt_event(ts, buf, packet_size);
}
static irqreturn_t elants_i2c_irq(int irq, void *_dev)
@@ -970,7 +1057,6 @@ static irqreturn_t elants_i2c_irq(int irq, void *_dev)
switch (ts->buf[FW_HDR_TYPE]) {
case CMD_HEADER_HELLO:
case CMD_HEADER_RESP:
- case CMD_HEADER_REK:
break;
case QUEUE_HEADER_WAIT:
@@ -985,9 +1071,24 @@ static irqreturn_t elants_i2c_irq(int irq, void *_dev)
break;
case QUEUE_HEADER_SINGLE:
- elants_i2c_event(ts, &ts->buf[HEADER_SIZE]);
+ elants_i2c_event(ts, &ts->buf[HEADER_SIZE],
+ ts->buf[FW_HDR_LENGTH]);
break;
+ case QUEUE_HEADER_NORMAL2: /* CMD_HEADER_REK */
+ /*
+ * Depending on firmware version, eKTF3624 touchscreens
+ * may utilize one of these opcodes for the touch events:
+ * 0x63 (NORMAL) and 0x66 (NORMAL2). The 0x63 is used by
+ * older firmware version and differs from 0x66 such that
+ * touch pressure value needs to be adjusted. The 0x66
+ * opcode of newer firmware is equal to 0x63 of eKTH3500.
+ */
+ if (ts->chip_id != EKTF3624)
+ break;
+
+ fallthrough;
+
case QUEUE_HEADER_NORMAL:
report_count = ts->buf[FW_HDR_COUNT];
if (report_count == 0 || report_count > 3) {
@@ -998,7 +1099,12 @@ static irqreturn_t elants_i2c_irq(int irq, void *_dev)
}
report_len = ts->buf[FW_HDR_LENGTH] / report_count;
- if (report_len != PACKET_SIZE) {
+
+ if (report_len == PACKET_SIZE_OLD &&
+ ts->chip_id == EKTF3624) {
+ dev_dbg_once(&client->dev,
+ "using old report format\n");
+ } else if (report_len != PACKET_SIZE) {
dev_err(&client->dev,
"mismatching report length: %*ph\n",
HEADER_SIZE, ts->buf);
@@ -1007,8 +1113,8 @@ static irqreturn_t elants_i2c_irq(int irq, void *_dev)
for (i = 0; i < report_count; i++) {
u8 *buf = ts->buf + HEADER_SIZE +
- i * PACKET_SIZE;
- elants_i2c_event(ts, buf);
+ i * report_len;
+ elants_i2c_event(ts, buf, report_len);
}
break;
@@ -1250,6 +1356,7 @@ static int elants_i2c_probe(struct i2c_client *client,
init_completion(&ts->cmd_done);
ts->client = client;
+ ts->chip_id = (enum elants_chip_id)id->driver_data;
i2c_set_clientdata(client, ts);
ts->vcc33 = devm_regulator_get(&client->dev, "vcc33");
@@ -1331,13 +1438,20 @@ static int elants_i2c_probe(struct i2c_client *client,
input_set_abs_params(ts->input, ABS_MT_PRESSURE, 0, 255, 0, 0);
input_set_abs_params(ts->input, ABS_MT_TOOL_TYPE,
0, MT_TOOL_PALM, 0, 0);
+
+ touchscreen_parse_properties(ts->input, true, &ts->prop);
+
+ if (ts->chip_id == EKTF3624) {
+ /* calculate resolution from size */
+ ts->x_res = DIV_ROUND_CLOSEST(ts->prop.max_x, ts->phy_x);
+ ts->y_res = DIV_ROUND_CLOSEST(ts->prop.max_y, ts->phy_y);
+ }
+
input_abs_set_res(ts->input, ABS_MT_POSITION_X, ts->x_res);
input_abs_set_res(ts->input, ABS_MT_POSITION_Y, ts->y_res);
if (ts->major_res > 0)
input_abs_set_res(ts->input, ABS_MT_TOUCH_MAJOR, ts->major_res);
- touchscreen_parse_properties(ts->input, true, &ts->prop);
-
error = input_mt_init_slots(ts->input, MAX_CONTACT_NUM,
INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED);
if (error) {
@@ -1466,14 +1580,16 @@ static SIMPLE_DEV_PM_OPS(elants_i2c_pm_ops,
elants_i2c_suspend, elants_i2c_resume);
static const struct i2c_device_id elants_i2c_id[] = {
- { DEVICE_NAME, 0 },
+ { DEVICE_NAME, EKTH3500 },
+ { "ekth3500", EKTH3500 },
+ { "ektf3624", EKTF3624 },
{ }
};
MODULE_DEVICE_TABLE(i2c, elants_i2c_id);
#ifdef CONFIG_ACPI
static const struct acpi_device_id elants_acpi_id[] = {
- { "ELAN0001", 0 },
+ { "ELAN0001", EKTH3500 },
{ }
};
MODULE_DEVICE_TABLE(acpi, elants_acpi_id);
@@ -1482,6 +1598,7 @@ MODULE_DEVICE_TABLE(acpi, elants_acpi_id);
#ifdef CONFIG_OF
static const struct of_device_id elants_of_match[] = {
{ .compatible = "elan,ekth3500" },
+ { .compatible = "elan,ektf3624" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, elants_of_match);
diff --git a/drivers/input/touchscreen/elo.c b/drivers/input/touchscreen/elo.c
index e0bacd34866a..96173232e53f 100644
--- a/drivers/input/touchscreen/elo.c
+++ b/drivers/input/touchscreen/elo.c
@@ -341,8 +341,10 @@ static int elo_connect(struct serio *serio, struct serio_driver *drv)
switch (elo->id) {
case 0: /* 10-byte protocol */
- if (elo_setup_10(elo))
+ if (elo_setup_10(elo)) {
+ err = -EIO;
goto fail3;
+ }
break;
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
index 19765f1c04f7..c682b028f0a2 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -157,6 +157,7 @@ static const struct goodix_chip_id goodix_chip_ids[] = {
{ .id = "5663", .data = &gt1x_chip_data },
{ .id = "5688", .data = &gt1x_chip_data },
{ .id = "917S", .data = &gt1x_chip_data },
+ { .id = "9286", .data = &gt1x_chip_data },
{ .id = "911", .data = &gt911_chip_data },
{ .id = "9271", .data = &gt911_chip_data },
@@ -1448,6 +1449,7 @@ static const struct of_device_id goodix_of_match[] = {
{ .compatible = "goodix,gt927" },
{ .compatible = "goodix,gt9271" },
{ .compatible = "goodix,gt928" },
+ { .compatible = "goodix,gt9286" },
{ .compatible = "goodix,gt967" },
{ }
};
diff --git a/drivers/input/touchscreen/htcpen.c b/drivers/input/touchscreen/htcpen.c
index 2f261a34f9c2..056ba76087e8 100644
--- a/drivers/input/touchscreen/htcpen.c
+++ b/drivers/input/touchscreen/htcpen.c
@@ -171,7 +171,7 @@ static int htcpen_isa_probe(struct device *dev, unsigned int id)
return err;
}
-static int htcpen_isa_remove(struct device *dev, unsigned int id)
+static void htcpen_isa_remove(struct device *dev, unsigned int id)
{
struct input_dev *htcpen_dev = dev_get_drvdata(dev);
@@ -182,8 +182,6 @@ static int htcpen_isa_remove(struct device *dev, unsigned int id)
release_region(HTCPEN_PORT_INDEX, 2);
release_region(HTCPEN_PORT_INIT, 1);
release_region(HTCPEN_PORT_IRQ_CLEAR, 1);
-
- return 0;
}
#ifdef CONFIG_PM
diff --git a/drivers/input/touchscreen/ili210x.c b/drivers/input/touchscreen/ili210x.c
index 199cf3daec10..d8fccf048bf4 100644
--- a/drivers/input/touchscreen/ili210x.c
+++ b/drivers/input/touchscreen/ili210x.c
@@ -29,11 +29,13 @@ struct ili2xxx_chip {
void *buf, size_t len);
int (*get_touch_data)(struct i2c_client *client, u8 *data);
bool (*parse_touch_data)(const u8 *data, unsigned int finger,
- unsigned int *x, unsigned int *y);
+ unsigned int *x, unsigned int *y,
+ unsigned int *z);
bool (*continue_polling)(const u8 *data, bool touch);
unsigned int max_touches;
unsigned int resolution;
bool has_calibrate_reg;
+ bool has_pressure_reg;
};
struct ili210x {
@@ -82,7 +84,8 @@ static int ili210x_read_touch_data(struct i2c_client *client, u8 *data)
static bool ili210x_touchdata_to_coords(const u8 *touchdata,
unsigned int finger,
- unsigned int *x, unsigned int *y)
+ unsigned int *x, unsigned int *y,
+ unsigned int *z)
{
if (touchdata[0] & BIT(finger))
return false;
@@ -137,7 +140,8 @@ static int ili211x_read_touch_data(struct i2c_client *client, u8 *data)
static bool ili211x_touchdata_to_coords(const u8 *touchdata,
unsigned int finger,
- unsigned int *x, unsigned int *y)
+ unsigned int *x, unsigned int *y,
+ unsigned int *z)
{
u32 data;
@@ -169,7 +173,8 @@ static const struct ili2xxx_chip ili211x_chip = {
static bool ili212x_touchdata_to_coords(const u8 *touchdata,
unsigned int finger,
- unsigned int *x, unsigned int *y)
+ unsigned int *x, unsigned int *y,
+ unsigned int *z)
{
u16 val;
@@ -235,7 +240,8 @@ static int ili251x_read_touch_data(struct i2c_client *client, u8 *data)
static bool ili251x_touchdata_to_coords(const u8 *touchdata,
unsigned int finger,
- unsigned int *x, unsigned int *y)
+ unsigned int *x, unsigned int *y,
+ unsigned int *z)
{
u16 val;
@@ -245,6 +251,7 @@ static bool ili251x_touchdata_to_coords(const u8 *touchdata,
*x = val & 0x3fff;
*y = get_unaligned_be16(touchdata + 1 + (finger * 5) + 2);
+ *z = touchdata[1 + (finger * 5) + 4];
return true;
}
@@ -261,6 +268,7 @@ static const struct ili2xxx_chip ili251x_chip = {
.continue_polling = ili251x_check_continue_polling,
.max_touches = 10,
.has_calibrate_reg = true,
+ .has_pressure_reg = true,
};
static bool ili210x_report_events(struct ili210x *priv, u8 *touchdata)
@@ -268,14 +276,16 @@ static bool ili210x_report_events(struct ili210x *priv, u8 *touchdata)
struct input_dev *input = priv->input;
int i;
bool contact = false, touch;
- unsigned int x = 0, y = 0;
+ unsigned int x = 0, y = 0, z = 0;
for (i = 0; i < priv->chip->max_touches; i++) {
- touch = priv->chip->parse_touch_data(touchdata, i, &x, &y);
+ touch = priv->chip->parse_touch_data(touchdata, i, &x, &y, &z);
input_mt_slot(input, i);
if (input_mt_report_slot_state(input, MT_TOOL_FINGER, touch)) {
touchscreen_report_pos(input, &priv->prop, x, y, true);
+ if (priv->chip->has_pressure_reg)
+ input_report_abs(input, ABS_MT_PRESSURE, z);
contact = true;
}
}
@@ -437,6 +447,8 @@ static int ili210x_i2c_probe(struct i2c_client *client,
max_xy = (chip->resolution ?: SZ_64K) - 1;
input_set_abs_params(input, ABS_MT_POSITION_X, 0, max_xy, 0, 0);
input_set_abs_params(input, ABS_MT_POSITION_Y, 0, max_xy, 0, 0);
+ if (priv->chip->has_pressure_reg)
+ input_set_abs_params(input, ABS_MT_PRESSURE, 0, 0xa, 0, 0);
touchscreen_parse_properties(input, true, &priv->prop);
error = input_mt_init_slots(input, priv->chip->max_touches,
diff --git a/drivers/input/touchscreen/iqs5xx.c b/drivers/input/touchscreen/iqs5xx.c
index 4fd21bc3ce0f..54f30038dca4 100644
--- a/drivers/input/touchscreen/iqs5xx.c
+++ b/drivers/input/touchscreen/iqs5xx.c
@@ -2,8 +2,7 @@
/*
* Azoteq IQS550/572/525 Trackpad/Touchscreen Controller
*
- * Copyright (C) 2018
- * Author: Jeff LaBundy <jeff@labundy.com>
+ * Copyright (C) 2018 Jeff LaBundy <jeff@labundy.com>
*
* These devices require firmware exported from a PC-based configuration tool
* made available by the vendor. Firmware files may be pushed to the device's
@@ -12,6 +11,7 @@
* Link to PC-based configuration tool and data sheet: http://www.azoteq.com/
*/
+#include <linux/bits.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
@@ -30,9 +30,9 @@
#define IQS5XX_FW_FILE_LEN 64
#define IQS5XX_NUM_RETRIES 10
-#define IQS5XX_NUM_POINTS 256
#define IQS5XX_NUM_CONTACTS 5
#define IQS5XX_WR_BYTES_MAX 2
+#define IQS5XX_XY_RES_MAX 0xFFFE
#define IQS5XX_PROD_NUM_IQS550 40
#define IQS5XX_PROD_NUM_IQS572 58
@@ -41,28 +41,27 @@
#define IQS5XX_PROJ_NUM_B000 15
#define IQS5XX_MAJOR_VER_MIN 2
-#define IQS5XX_RESUME 0x00
-#define IQS5XX_SUSPEND 0x01
+#define IQS5XX_SHOW_RESET BIT(7)
+#define IQS5XX_ACK_RESET BIT(7)
-#define IQS5XX_SW_INPUT_EVENT 0x10
-#define IQS5XX_SETUP_COMPLETE 0x40
-#define IQS5XX_EVENT_MODE 0x01
-#define IQS5XX_TP_EVENT 0x04
+#define IQS5XX_SUSPEND BIT(0)
+#define IQS5XX_RESUME 0
-#define IQS5XX_FLIP_X 0x01
-#define IQS5XX_FLIP_Y 0x02
-#define IQS5XX_SWITCH_XY_AXIS 0x04
+#define IQS5XX_SETUP_COMPLETE BIT(6)
+#define IQS5XX_WDT BIT(5)
+#define IQS5XX_ALP_REATI BIT(3)
+#define IQS5XX_REATI BIT(2)
+
+#define IQS5XX_TP_EVENT BIT(2)
+#define IQS5XX_EVENT_MODE BIT(0)
#define IQS5XX_PROD_NUM 0x0000
-#define IQS5XX_ABS_X 0x0016
-#define IQS5XX_ABS_Y 0x0018
+#define IQS5XX_SYS_INFO0 0x000F
+#define IQS5XX_SYS_INFO1 0x0010
#define IQS5XX_SYS_CTRL0 0x0431
#define IQS5XX_SYS_CTRL1 0x0432
#define IQS5XX_SYS_CFG0 0x058E
#define IQS5XX_SYS_CFG1 0x058F
-#define IQS5XX_TOTAL_RX 0x063D
-#define IQS5XX_TOTAL_TX 0x063E
-#define IQS5XX_XY_CFG0 0x0669
#define IQS5XX_X_RES 0x066E
#define IQS5XX_Y_RES 0x0670
#define IQS5XX_CHKSM 0x83C0
@@ -99,6 +98,7 @@ struct iqs5xx_private {
struct i2c_client *client;
struct input_dev *input;
struct gpio_desc *reset_gpio;
+ struct touchscreen_properties prop;
struct mutex lock;
u8 bl_status;
};
@@ -126,6 +126,14 @@ struct iqs5xx_touch_data {
u8 area;
} __packed;
+struct iqs5xx_status {
+ u8 sys_info[2];
+ u8 num_active;
+ __be16 rel_x;
+ __be16 rel_y;
+ struct iqs5xx_touch_data touch_data[IQS5XX_NUM_CONTACTS];
+} __packed;
+
static int iqs5xx_read_burst(struct i2c_client *client,
u16 reg, void *val, u16 len)
{
@@ -182,11 +190,6 @@ static int iqs5xx_read_word(struct i2c_client *client, u16 reg, u16 *val)
return 0;
}
-static int iqs5xx_read_byte(struct i2c_client *client, u16 reg, u8 *val)
-{
- return iqs5xx_read_burst(client, reg, val, sizeof(*val));
-}
-
static int iqs5xx_write_burst(struct i2c_client *client,
u16 reg, const void *val, u16 len)
{
@@ -337,11 +340,16 @@ static int iqs5xx_bl_open(struct i2c_client *client)
*/
for (i = 0; i < IQS5XX_BL_ATTEMPTS; i++) {
iqs5xx_reset(client);
+ usleep_range(350, 400);
for (j = 0; j < IQS5XX_NUM_RETRIES; j++) {
error = iqs5xx_bl_cmd(client, IQS5XX_BL_CMD_VER, 0);
- if (!error || error == -EINVAL)
- return error;
+ if (!error)
+ usleep_range(10000, 10100);
+ else if (error != -EINVAL)
+ continue;
+
+ return error;
}
}
@@ -481,12 +489,10 @@ static void iqs5xx_close(struct input_dev *input)
static int iqs5xx_axis_init(struct i2c_client *client)
{
struct iqs5xx_private *iqs5xx = i2c_get_clientdata(client);
- struct touchscreen_properties prop;
+ struct touchscreen_properties *prop = &iqs5xx->prop;
struct input_dev *input;
+ u16 max_x, max_y;
int error;
- u16 max_x, max_x_hw;
- u16 max_y, max_y_hw;
- u8 val;
if (!iqs5xx->input) {
input = devm_input_allocate_device(&client->dev);
@@ -506,89 +512,39 @@ static int iqs5xx_axis_init(struct i2c_client *client)
iqs5xx->input = input;
}
- touchscreen_parse_properties(iqs5xx->input, true, &prop);
-
- error = iqs5xx_read_byte(client, IQS5XX_TOTAL_RX, &val);
- if (error)
- return error;
- max_x_hw = (val - 1) * IQS5XX_NUM_POINTS;
-
- error = iqs5xx_read_byte(client, IQS5XX_TOTAL_TX, &val);
+ error = iqs5xx_read_word(client, IQS5XX_X_RES, &max_x);
if (error)
return error;
- max_y_hw = (val - 1) * IQS5XX_NUM_POINTS;
- error = iqs5xx_read_byte(client, IQS5XX_XY_CFG0, &val);
+ error = iqs5xx_read_word(client, IQS5XX_Y_RES, &max_y);
if (error)
return error;
- if (val & IQS5XX_SWITCH_XY_AXIS)
- swap(max_x_hw, max_y_hw);
+ input_abs_set_max(iqs5xx->input, ABS_MT_POSITION_X, max_x);
+ input_abs_set_max(iqs5xx->input, ABS_MT_POSITION_Y, max_y);
- if (prop.swap_x_y)
- val ^= IQS5XX_SWITCH_XY_AXIS;
-
- if (prop.invert_x)
- val ^= prop.swap_x_y ? IQS5XX_FLIP_Y : IQS5XX_FLIP_X;
-
- if (prop.invert_y)
- val ^= prop.swap_x_y ? IQS5XX_FLIP_X : IQS5XX_FLIP_Y;
-
- error = iqs5xx_write_byte(client, IQS5XX_XY_CFG0, val);
- if (error)
- return error;
+ touchscreen_parse_properties(iqs5xx->input, true, prop);
- if (prop.max_x > max_x_hw) {
+ if (prop->max_x > IQS5XX_XY_RES_MAX) {
dev_err(&client->dev, "Invalid maximum x-coordinate: %u > %u\n",
- prop.max_x, max_x_hw);
+ prop->max_x, IQS5XX_XY_RES_MAX);
return -EINVAL;
- } else if (prop.max_x == 0) {
- error = iqs5xx_read_word(client, IQS5XX_X_RES, &max_x);
+ } else if (prop->max_x != max_x) {
+ error = iqs5xx_write_word(client, IQS5XX_X_RES, prop->max_x);
if (error)
return error;
-
- input_abs_set_max(iqs5xx->input,
- prop.swap_x_y ? ABS_MT_POSITION_Y :
- ABS_MT_POSITION_X,
- max_x);
- } else {
- max_x = (u16)prop.max_x;
}
- if (prop.max_y > max_y_hw) {
+ if (prop->max_y > IQS5XX_XY_RES_MAX) {
dev_err(&client->dev, "Invalid maximum y-coordinate: %u > %u\n",
- prop.max_y, max_y_hw);
+ prop->max_y, IQS5XX_XY_RES_MAX);
return -EINVAL;
- } else if (prop.max_y == 0) {
- error = iqs5xx_read_word(client, IQS5XX_Y_RES, &max_y);
+ } else if (prop->max_y != max_y) {
+ error = iqs5xx_write_word(client, IQS5XX_Y_RES, prop->max_y);
if (error)
return error;
-
- input_abs_set_max(iqs5xx->input,
- prop.swap_x_y ? ABS_MT_POSITION_X :
- ABS_MT_POSITION_Y,
- max_y);
- } else {
- max_y = (u16)prop.max_y;
}
- /*
- * Write horizontal and vertical resolution to the device in case its
- * original defaults were overridden or swapped as per the properties
- * specified in the device tree.
- */
- error = iqs5xx_write_word(client,
- prop.swap_x_y ? IQS5XX_Y_RES : IQS5XX_X_RES,
- max_x);
- if (error)
- return error;
-
- error = iqs5xx_write_word(client,
- prop.swap_x_y ? IQS5XX_X_RES : IQS5XX_Y_RES,
- max_y);
- if (error)
- return error;
-
error = input_mt_init_slots(iqs5xx->input, IQS5XX_NUM_CONTACTS,
INPUT_MT_DIRECT);
if (error)
@@ -603,7 +559,6 @@ static int iqs5xx_dev_init(struct i2c_client *client)
struct iqs5xx_private *iqs5xx = i2c_get_clientdata(client);
struct iqs5xx_dev_id_info *dev_id_info;
int error;
- u8 val;
u8 buf[sizeof(*dev_id_info) + 1];
error = iqs5xx_read_burst(client, IQS5XX_PROD_NUM,
@@ -666,18 +621,18 @@ static int iqs5xx_dev_init(struct i2c_client *client)
if (error)
return error;
- error = iqs5xx_read_byte(client, IQS5XX_SYS_CFG0, &val);
+ error = iqs5xx_write_byte(client, IQS5XX_SYS_CTRL0, IQS5XX_ACK_RESET);
if (error)
return error;
- val |= IQS5XX_SETUP_COMPLETE;
- val &= ~IQS5XX_SW_INPUT_EVENT;
- error = iqs5xx_write_byte(client, IQS5XX_SYS_CFG0, val);
+ error = iqs5xx_write_byte(client, IQS5XX_SYS_CFG0,
+ IQS5XX_SETUP_COMPLETE | IQS5XX_WDT |
+ IQS5XX_ALP_REATI | IQS5XX_REATI);
if (error)
return error;
- val = IQS5XX_TP_EVENT | IQS5XX_EVENT_MODE;
- error = iqs5xx_write_byte(client, IQS5XX_SYS_CFG1, val);
+ error = iqs5xx_write_byte(client, IQS5XX_SYS_CFG1,
+ IQS5XX_TP_EVENT | IQS5XX_EVENT_MODE);
if (error)
return error;
@@ -688,13 +643,12 @@ static int iqs5xx_dev_init(struct i2c_client *client)
iqs5xx->bl_status = dev_id_info->bl_status;
/*
- * Closure of the first communication window that appears following the
- * release of reset appears to kick off an initialization period during
- * which further communication is met with clock stretching. The return
- * from this function is delayed so that further communication attempts
- * avoid this period.
+ * The following delay allows ATI to complete before the open and close
+ * callbacks are free to elicit I2C communication. Any attempts to read
+ * from or write to the device during this time may face extended clock
+ * stretching and prompt the I2C controller to report an error.
*/
- msleep(100);
+ msleep(250);
return 0;
}
@@ -702,7 +656,7 @@ static int iqs5xx_dev_init(struct i2c_client *client)
static irqreturn_t iqs5xx_irq(int irq, void *data)
{
struct iqs5xx_private *iqs5xx = data;
- struct iqs5xx_touch_data touch_data[IQS5XX_NUM_CONTACTS];
+ struct iqs5xx_status status;
struct i2c_client *client = iqs5xx->client;
struct input_dev *input = iqs5xx->input;
int error, i;
@@ -715,21 +669,35 @@ static irqreturn_t iqs5xx_irq(int irq, void *data)
if (iqs5xx->bl_status == IQS5XX_BL_STATUS_RESET)
return IRQ_NONE;
- error = iqs5xx_read_burst(client, IQS5XX_ABS_X,
- touch_data, sizeof(touch_data));
+ error = iqs5xx_read_burst(client, IQS5XX_SYS_INFO0,
+ &status, sizeof(status));
if (error)
return IRQ_NONE;
- for (i = 0; i < ARRAY_SIZE(touch_data); i++) {
- u16 pressure = be16_to_cpu(touch_data[i].strength);
+ if (status.sys_info[0] & IQS5XX_SHOW_RESET) {
+ dev_err(&client->dev, "Unexpected device reset\n");
+
+ error = iqs5xx_dev_init(client);
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to re-initialize device: %d\n", error);
+ return IRQ_NONE;
+ }
+
+ return IRQ_HANDLED;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(status.touch_data); i++) {
+ struct iqs5xx_touch_data *touch_data = &status.touch_data[i];
+ u16 pressure = be16_to_cpu(touch_data->strength);
input_mt_slot(input, i);
if (input_mt_report_slot_state(input, MT_TOOL_FINGER,
pressure != 0)) {
- input_report_abs(input, ABS_MT_POSITION_X,
- be16_to_cpu(touch_data[i].abs_x));
- input_report_abs(input, ABS_MT_POSITION_Y,
- be16_to_cpu(touch_data[i].abs_y));
+ touchscreen_report_pos(iqs5xx->input, &iqs5xx->prop,
+ be16_to_cpu(touch_data->abs_x),
+ be16_to_cpu(touch_data->abs_y),
+ true);
input_report_abs(input, ABS_MT_PRESSURE, pressure);
}
}
@@ -884,7 +852,7 @@ static int iqs5xx_fw_file_parse(struct i2c_client *client,
static int iqs5xx_fw_file_write(struct i2c_client *client, const char *fw_file)
{
struct iqs5xx_private *iqs5xx = i2c_get_clientdata(client);
- int error;
+ int error, error_bl = 0;
u8 *pmap;
if (iqs5xx->bl_status == IQS5XX_BL_STATUS_NONE)
@@ -938,6 +906,7 @@ err_reset:
usleep_range(10000, 10100);
}
+ error_bl = error;
error = iqs5xx_dev_init(client);
if (!error && iqs5xx->bl_status == IQS5XX_BL_STATUS_RESET)
error = -EINVAL;
@@ -949,11 +918,15 @@ err_reset:
err_kfree:
kfree(pmap);
+ if (error_bl)
+ return error_bl;
+
return error;
}
-static ssize_t fw_file_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t fw_file_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct iqs5xx_private *iqs5xx = dev_get_drvdata(dev);
struct i2c_client *client = iqs5xx->client;
@@ -1012,7 +985,7 @@ static int __maybe_unused iqs5xx_suspend(struct device *dev)
struct input_dev *input = iqs5xx->input;
int error = 0;
- if (!input)
+ if (!input || device_may_wakeup(dev))
return error;
mutex_lock(&input->mutex);
@@ -1031,7 +1004,7 @@ static int __maybe_unused iqs5xx_resume(struct device *dev)
struct input_dev *input = iqs5xx->input;
int error = 0;
- if (!input)
+ if (!input || device_may_wakeup(dev))
return error;
mutex_lock(&input->mutex);
diff --git a/drivers/input/touchscreen/melfas_mip4.c b/drivers/input/touchscreen/melfas_mip4.c
index c0050044a5a9..225796a3f546 100644
--- a/drivers/input/touchscreen/melfas_mip4.c
+++ b/drivers/input/touchscreen/melfas_mip4.c
@@ -465,13 +465,13 @@ static void mip4_report_keys(struct mip4_ts *ts, u8 *packet)
static void mip4_report_touch(struct mip4_ts *ts, u8 *packet)
{
int id;
- bool hover;
- bool palm;
+ bool __always_unused hover;
+ bool __always_unused palm;
bool state;
u16 x, y;
- u8 pressure_stage = 0;
+ u8 __always_unused pressure_stage = 0;
u8 pressure;
- u8 size;
+ u8 __always_unused size;
u8 touch_major;
u8 touch_minor;
diff --git a/drivers/input/touchscreen/raydium_i2c_ts.c b/drivers/input/touchscreen/raydium_i2c_ts.c
index 603a948460d6..4d2d22a86977 100644
--- a/drivers/input/touchscreen/raydium_i2c_ts.c
+++ b/drivers/input/touchscreen/raydium_i2c_ts.c
@@ -445,6 +445,7 @@ static int raydium_i2c_write_object(struct i2c_client *client,
enum raydium_bl_ack state)
{
int error;
+ static const u8 cmd[] = { 0xFF, 0x39 };
error = raydium_i2c_send(client, RM_CMD_BOOT_WRT, data, len);
if (error) {
@@ -453,7 +454,7 @@ static int raydium_i2c_write_object(struct i2c_client *client,
return error;
}
- error = raydium_i2c_send(client, RM_CMD_BOOT_ACK, NULL, 0);
+ error = raydium_i2c_send(client, RM_CMD_BOOT_ACK, cmd, sizeof(cmd));
if (error) {
dev_err(&client->dev, "Ack obj command failed: %d\n", error);
return error;
diff --git a/drivers/input/touchscreen/st1232.c b/drivers/input/touchscreen/st1232.c
index bda96762744e..6abae665ca71 100644
--- a/drivers/input/touchscreen/st1232.c
+++ b/drivers/input/touchscreen/st1232.c
@@ -26,6 +26,20 @@
#define ST1232_TS_NAME "st1232-ts"
#define ST1633_TS_NAME "st1633-ts"
+#define REG_STATUS 0x01 /* Device Status | Error Code */
+
+#define STATUS_NORMAL 0x00
+#define STATUS_INIT 0x01
+#define STATUS_ERROR 0x02
+#define STATUS_AUTO_TUNING 0x03
+#define STATUS_IDLE 0x04
+#define STATUS_POWER_DOWN 0x05
+
+#define ERROR_NONE 0x00
+#define ERROR_INVALID_ADDRESS 0x10
+#define ERROR_INVALID_VALUE 0x20
+#define ERROR_INVALID_PLATFORM 0x30
+
#define REG_XY_RESOLUTION 0x04
#define REG_XY_COORDINATES 0x12
#define ST_TS_MAX_FINGERS 10
@@ -47,7 +61,8 @@ struct st1232_ts_data {
u8 *read_buf;
};
-static int st1232_ts_read_data(struct st1232_ts_data *ts, u8 reg)
+static int st1232_ts_read_data(struct st1232_ts_data *ts, u8 reg,
+ unsigned int n)
{
struct i2c_client *client = ts->client;
struct i2c_msg msg[] = {
@@ -59,7 +74,7 @@ static int st1232_ts_read_data(struct st1232_ts_data *ts, u8 reg)
{
.addr = client->addr,
.flags = I2C_M_RD | I2C_M_DMA_SAFE,
- .len = ts->read_buf_len,
+ .len = n,
.buf = ts->read_buf,
}
};
@@ -72,6 +87,27 @@ static int st1232_ts_read_data(struct st1232_ts_data *ts, u8 reg)
return 0;
}
+static int st1232_ts_wait_ready(struct st1232_ts_data *ts)
+{
+ unsigned int retries;
+ int error;
+
+ for (retries = 10; retries; retries--) {
+ error = st1232_ts_read_data(ts, REG_STATUS, 1);
+ if (!error) {
+ switch (ts->read_buf[0]) {
+ case STATUS_NORMAL | ERROR_NONE:
+ case STATUS_IDLE | ERROR_NONE:
+ return 0;
+ }
+ }
+
+ usleep_range(1000, 2000);
+ }
+
+ return -ENXIO;
+}
+
static int st1232_ts_read_resolution(struct st1232_ts_data *ts, u16 *max_x,
u16 *max_y)
{
@@ -79,14 +115,14 @@ static int st1232_ts_read_resolution(struct st1232_ts_data *ts, u16 *max_x,
int error;
/* select resolution register */
- error = st1232_ts_read_data(ts, REG_XY_RESOLUTION);
+ error = st1232_ts_read_data(ts, REG_XY_RESOLUTION, 3);
if (error)
return error;
buf = ts->read_buf;
- *max_x = ((buf[0] & 0x0070) << 4) | buf[1];
- *max_y = ((buf[0] & 0x0007) << 8) | buf[2];
+ *max_x = (((buf[0] & 0x0070) << 4) | buf[1]) - 1;
+ *max_y = (((buf[0] & 0x0007) << 8) | buf[2]) - 1;
return 0;
}
@@ -140,7 +176,7 @@ static irqreturn_t st1232_ts_irq_handler(int irq, void *dev_id)
int count;
int error;
- error = st1232_ts_read_data(ts, REG_XY_COORDINATES);
+ error = st1232_ts_read_data(ts, REG_XY_COORDINATES, ts->read_buf_len);
if (error)
goto out;
@@ -251,6 +287,11 @@ static int st1232_ts_probe(struct i2c_client *client,
input_dev->name = "st1232-touchscreen";
input_dev->id.bustype = BUS_I2C;
+ /* Wait until device is ready */
+ error = st1232_ts_wait_ready(ts);
+ if (error)
+ return error;
+
/* Read resolution from the chip */
error = st1232_ts_read_resolution(ts, &max_x, &max_y);
if (error) {
diff --git a/drivers/input/touchscreen/stmpe-ts.c b/drivers/input/touchscreen/stmpe-ts.c
index cd747725589b..25c45c3a3561 100644
--- a/drivers/input/touchscreen/stmpe-ts.c
+++ b/drivers/input/touchscreen/stmpe-ts.c
@@ -52,6 +52,7 @@
* @idev: registered input device
* @work: a work item used to scan the device
* @dev: a pointer back to the MFD cell struct device*
+ * @prop: Touchscreen properties
* @ave_ctrl: Sample average control
* (0 -> 1 sample, 1 -> 2 samples, 2 -> 4 samples, 3 -> 8 samples)
* @touch_det_delay: Touch detect interrupt delay
diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c
index 620cdd7d214a..12f2562b0141 100644
--- a/drivers/input/touchscreen/sur40.c
+++ b/drivers/input/touchscreen/sur40.c
@@ -787,6 +787,7 @@ static int sur40_probe(struct usb_interface *interface,
dev_err(&interface->dev,
"Unable to register video controls.");
v4l2_ctrl_handler_free(&sur40->hdl);
+ error = sur40->hdl.error;
goto err_unreg_v4l2;
}
diff --git a/drivers/input/touchscreen/surface3_spi.c b/drivers/input/touchscreen/surface3_spi.c
index 731454599fce..1da23e5585a0 100644
--- a/drivers/input/touchscreen/surface3_spi.c
+++ b/drivers/input/touchscreen/surface3_spi.c
@@ -94,9 +94,7 @@ static void surface3_spi_report_touch(struct surface3_ts_data *ts_data,
static void surface3_spi_process_touch(struct surface3_ts_data *ts_data, u8 *data)
{
- u16 timestamp;
unsigned int i;
- timestamp = get_unaligned_le16(&data[15]);
for (i = 0; i < 13; i++) {
struct surface3_ts_data_finger *finger;
diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
index 397cb1d3f481..c847453a03c2 100644
--- a/drivers/input/touchscreen/usbtouchscreen.c
+++ b/drivers/input/touchscreen/usbtouchscreen.c
@@ -1044,6 +1044,7 @@ static void nexio_exit(struct usbtouch_usb *usbtouch)
static int nexio_read_data(struct usbtouch_usb *usbtouch, unsigned char *pkt)
{
+ struct device *dev = &usbtouch->interface->dev;
struct nexio_touch_packet *packet = (void *) pkt;
struct nexio_priv *priv = usbtouch->priv;
unsigned int data_len = be16_to_cpu(packet->data_len);
@@ -1062,6 +1063,8 @@ static int nexio_read_data(struct usbtouch_usb *usbtouch, unsigned char *pkt)
/* send ACK */
ret = usb_submit_urb(priv->ack, GFP_ATOMIC);
+ if (ret)
+ dev_warn(dev, "Failed to submit ACK URB: %d\n", ret);
if (!usbtouch->type->max_xc) {
usbtouch->type->max_xc = 2 * x_len;
diff --git a/drivers/input/touchscreen/zinitix.c b/drivers/input/touchscreen/zinitix.c
index a3e3adbabc67..3b636beb583c 100644
--- a/drivers/input/touchscreen/zinitix.c
+++ b/drivers/input/touchscreen/zinitix.c
@@ -161,7 +161,7 @@ static int zinitix_read_data(struct i2c_client *client,
ret = i2c_master_recv(client, (u8 *)values, length);
if (ret != length)
- return ret < 0 ? ret : -EIO; ;
+ return ret < 0 ? ret : -EIO;
return 0;
}
@@ -190,7 +190,7 @@ static int zinitix_write_cmd(struct i2c_client *client, u16 reg)
return 0;
}
-static bool zinitix_init_touch(struct bt541_ts_data *bt541)
+static int zinitix_init_touch(struct bt541_ts_data *bt541)
{
struct i2c_client *client = bt541->client;
int i;
diff --git a/drivers/interconnect/qcom/Kconfig b/drivers/interconnect/qcom/Kconfig
index b3fb5b02bcf1..ca52647f8955 100644
--- a/drivers/interconnect/qcom/Kconfig
+++ b/drivers/interconnect/qcom/Kconfig
@@ -17,6 +17,15 @@ config INTERCONNECT_QCOM_MSM8916
This is a driver for the Qualcomm Network-on-Chip on msm8916-based
platforms.
+config INTERCONNECT_QCOM_MSM8939
+ tristate "Qualcomm MSM8939 interconnect driver"
+ depends on INTERCONNECT_QCOM
+ depends on QCOM_SMD_RPM
+ select INTERCONNECT_QCOM_SMD_RPM
+ help
+ This is a driver for the Qualcomm Network-on-Chip on msm8939-based
+ platforms.
+
config INTERCONNECT_QCOM_MSM8974
tristate "Qualcomm MSM8974 interconnect driver"
depends on INTERCONNECT_QCOM
@@ -74,6 +83,15 @@ config INTERCONNECT_QCOM_SDM845
This is a driver for the Qualcomm Network-on-Chip on sdm845-based
platforms.
+config INTERCONNECT_QCOM_SDX55
+ tristate "Qualcomm SDX55 interconnect driver"
+ depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
+ select INTERCONNECT_QCOM_RPMH
+ select INTERCONNECT_QCOM_BCM_VOTER
+ help
+ This is a driver for the Qualcomm Network-on-Chip on sdx55-based
+ platforms.
+
config INTERCONNECT_QCOM_SM8150
tristate "Qualcomm SM8150 interconnect driver"
depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
diff --git a/drivers/interconnect/qcom/Makefile b/drivers/interconnect/qcom/Makefile
index cf628f7990cd..c6a735df067e 100644
--- a/drivers/interconnect/qcom/Makefile
+++ b/drivers/interconnect/qcom/Makefile
@@ -2,24 +2,28 @@
icc-bcm-voter-objs := bcm-voter.o
qnoc-msm8916-objs := msm8916.o
+qnoc-msm8939-objs := msm8939.o
qnoc-msm8974-objs := msm8974.o
icc-osm-l3-objs := osm-l3.o
qnoc-qcs404-objs := qcs404.o
icc-rpmh-obj := icc-rpmh.o
qnoc-sc7180-objs := sc7180.o
qnoc-sdm845-objs := sdm845.o
+qnoc-sdx55-objs := sdx55.o
qnoc-sm8150-objs := sm8150.o
qnoc-sm8250-objs := sm8250.o
-icc-smd-rpm-objs := smd-rpm.o
+icc-smd-rpm-objs := smd-rpm.o icc-rpm.o
obj-$(CONFIG_INTERCONNECT_QCOM_BCM_VOTER) += icc-bcm-voter.o
obj-$(CONFIG_INTERCONNECT_QCOM_MSM8916) += qnoc-msm8916.o
+obj-$(CONFIG_INTERCONNECT_QCOM_MSM8939) += qnoc-msm8939.o
obj-$(CONFIG_INTERCONNECT_QCOM_MSM8974) += qnoc-msm8974.o
obj-$(CONFIG_INTERCONNECT_QCOM_OSM_L3) += icc-osm-l3.o
obj-$(CONFIG_INTERCONNECT_QCOM_QCS404) += qnoc-qcs404.o
obj-$(CONFIG_INTERCONNECT_QCOM_RPMH) += icc-rpmh.o
obj-$(CONFIG_INTERCONNECT_QCOM_SC7180) += qnoc-sc7180.o
obj-$(CONFIG_INTERCONNECT_QCOM_SDM845) += qnoc-sdm845.o
+obj-$(CONFIG_INTERCONNECT_QCOM_SDX55) += qnoc-sdx55.o
obj-$(CONFIG_INTERCONNECT_QCOM_SM8150) += qnoc-sm8150.o
obj-$(CONFIG_INTERCONNECT_QCOM_SM8250) += qnoc-sm8250.o
obj-$(CONFIG_INTERCONNECT_QCOM_SMD_RPM) += icc-smd-rpm.o
diff --git a/drivers/interconnect/qcom/icc-rpm.c b/drivers/interconnect/qcom/icc-rpm.c
new file mode 100644
index 000000000000..cc6095492cbe
--- /dev/null
+++ b/drivers/interconnect/qcom/icc-rpm.c
@@ -0,0 +1,191 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Linaro Ltd
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/interconnect-provider.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "smd-rpm.h"
+#include "icc-rpm.h"
+
+static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
+{
+ struct qcom_icc_provider *qp;
+ struct qcom_icc_node *qn;
+ struct icc_provider *provider;
+ struct icc_node *n;
+ u64 sum_bw;
+ u64 max_peak_bw;
+ u64 rate;
+ u32 agg_avg = 0;
+ u32 agg_peak = 0;
+ int ret, i;
+
+ qn = src->data;
+ provider = src->provider;
+ qp = to_qcom_provider(provider);
+
+ list_for_each_entry(n, &provider->nodes, node_list)
+ provider->aggregate(n, 0, n->avg_bw, n->peak_bw,
+ &agg_avg, &agg_peak);
+
+ sum_bw = icc_units_to_bps(agg_avg);
+ max_peak_bw = icc_units_to_bps(agg_peak);
+
+ /* send bandwidth request message to the RPM processor */
+ if (qn->mas_rpm_id != -1) {
+ ret = qcom_icc_rpm_smd_send(QCOM_SMD_RPM_ACTIVE_STATE,
+ RPM_BUS_MASTER_REQ,
+ qn->mas_rpm_id,
+ sum_bw);
+ if (ret) {
+ pr_err("qcom_icc_rpm_smd_send mas %d error %d\n",
+ qn->mas_rpm_id, ret);
+ return ret;
+ }
+ }
+
+ if (qn->slv_rpm_id != -1) {
+ ret = qcom_icc_rpm_smd_send(QCOM_SMD_RPM_ACTIVE_STATE,
+ RPM_BUS_SLAVE_REQ,
+ qn->slv_rpm_id,
+ sum_bw);
+ if (ret) {
+ pr_err("qcom_icc_rpm_smd_send slv error %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ rate = max(sum_bw, max_peak_bw);
+
+ do_div(rate, qn->buswidth);
+
+ if (qn->rate == rate)
+ return 0;
+
+ for (i = 0; i < qp->num_clks; i++) {
+ ret = clk_set_rate(qp->bus_clks[i].clk, rate);
+ if (ret) {
+ pr_err("%s clk_set_rate error: %d\n",
+ qp->bus_clks[i].id, ret);
+ return ret;
+ }
+ }
+
+ qn->rate = rate;
+
+ return 0;
+}
+
+int qnoc_probe(struct platform_device *pdev, size_t cd_size, int cd_num,
+ const struct clk_bulk_data *cd)
+{
+ struct device *dev = &pdev->dev;
+ const struct qcom_icc_desc *desc;
+ struct icc_onecell_data *data;
+ struct icc_provider *provider;
+ struct qcom_icc_node **qnodes;
+ struct qcom_icc_provider *qp;
+ struct icc_node *node;
+ size_t num_nodes, i;
+ int ret;
+
+ /* wait for the RPM proxy */
+ if (!qcom_icc_rpm_smd_available())
+ return -EPROBE_DEFER;
+
+ desc = of_device_get_match_data(dev);
+ if (!desc)
+ return -EINVAL;
+
+ qnodes = desc->nodes;
+ num_nodes = desc->num_nodes;
+
+ qp = devm_kzalloc(dev, sizeof(*qp), GFP_KERNEL);
+ if (!qp)
+ return -ENOMEM;
+
+ data = devm_kzalloc(dev, struct_size(data, nodes, num_nodes),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ qp->bus_clks = devm_kmemdup(dev, cd, cd_size,
+ GFP_KERNEL);
+ if (!qp->bus_clks)
+ return -ENOMEM;
+
+ qp->num_clks = cd_num;
+ ret = devm_clk_bulk_get(dev, qp->num_clks, qp->bus_clks);
+ if (ret)
+ return ret;
+
+ ret = clk_bulk_prepare_enable(qp->num_clks, qp->bus_clks);
+ if (ret)
+ return ret;
+
+ provider = &qp->provider;
+ INIT_LIST_HEAD(&provider->nodes);
+ provider->dev = dev;
+ provider->set = qcom_icc_set;
+ provider->aggregate = icc_std_aggregate;
+ provider->xlate = of_icc_xlate_onecell;
+ provider->data = data;
+
+ ret = icc_provider_add(provider);
+ if (ret) {
+ dev_err(dev, "error adding interconnect provider: %d\n", ret);
+ clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
+ return ret;
+ }
+
+ for (i = 0; i < num_nodes; i++) {
+ size_t j;
+
+ node = icc_node_create(qnodes[i]->id);
+ if (IS_ERR(node)) {
+ ret = PTR_ERR(node);
+ goto err;
+ }
+
+ node->name = qnodes[i]->name;
+ node->data = qnodes[i];
+ icc_node_add(node, provider);
+
+ for (j = 0; j < qnodes[i]->num_links; j++)
+ icc_link_create(node, qnodes[i]->links[j]);
+
+ data->nodes[i] = node;
+ }
+ data->num_nodes = num_nodes;
+
+ platform_set_drvdata(pdev, qp);
+
+ return 0;
+err:
+ icc_nodes_remove(provider);
+ clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
+ icc_provider_del(provider);
+
+ return ret;
+}
+EXPORT_SYMBOL(qnoc_probe);
+
+int qnoc_remove(struct platform_device *pdev)
+{
+ struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
+
+ icc_nodes_remove(&qp->provider);
+ clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
+ return icc_provider_del(&qp->provider);
+}
+EXPORT_SYMBOL(qnoc_remove);
diff --git a/drivers/interconnect/qcom/icc-rpm.h b/drivers/interconnect/qcom/icc-rpm.h
new file mode 100644
index 000000000000..79a6f68249c1
--- /dev/null
+++ b/drivers/interconnect/qcom/icc-rpm.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020 Linaro Ltd
+ */
+
+#ifndef __DRIVERS_INTERCONNECT_QCOM_ICC_RPM_H
+#define __DRIVERS_INTERCONNECT_QCOM_ICC_RPM_H
+
+#define RPM_BUS_MASTER_REQ 0x73616d62
+#define RPM_BUS_SLAVE_REQ 0x766c7362
+
+#define QCOM_MAX_LINKS 12
+
+#define to_qcom_provider(_provider) \
+ container_of(_provider, struct qcom_icc_provider, provider)
+
+/**
+ * struct qcom_icc_provider - Qualcomm specific interconnect provider
+ * @provider: generic interconnect provider
+ * @bus_clks: the clk_bulk_data table of bus clocks
+ * @num_clks: the total number of clk_bulk_data entries
+ */
+struct qcom_icc_provider {
+ struct icc_provider provider;
+ struct clk_bulk_data *bus_clks;
+ int num_clks;
+};
+
+/**
+ * struct qcom_icc_node - Qualcomm specific interconnect nodes
+ * @name: the node name used in debugfs
+ * @id: a unique node identifier
+ * @links: an array of nodes where we can go next while traversing
+ * @num_links: the total number of @links
+ * @buswidth: width of the interconnect between a node and the bus (bytes)
+ * @mas_rpm_id: RPM id for devices that are bus masters
+ * @slv_rpm_id: RPM id for devices that are bus slaves
+ * @rate: current bus clock rate in Hz
+ */
+struct qcom_icc_node {
+ unsigned char *name;
+ u16 id;
+ u16 links[QCOM_MAX_LINKS];
+ u16 num_links;
+ u16 buswidth;
+ int mas_rpm_id;
+ int slv_rpm_id;
+ u64 rate;
+};
+
+struct qcom_icc_desc {
+ struct qcom_icc_node **nodes;
+ size_t num_nodes;
+};
+
+#define DEFINE_QNODE(_name, _id, _buswidth, _mas_rpm_id, _slv_rpm_id, \
+ ...) \
+ static struct qcom_icc_node _name = { \
+ .name = #_name, \
+ .id = _id, \
+ .buswidth = _buswidth, \
+ .mas_rpm_id = _mas_rpm_id, \
+ .slv_rpm_id = _slv_rpm_id, \
+ .num_links = ARRAY_SIZE(((int[]){ __VA_ARGS__ })), \
+ .links = { __VA_ARGS__ }, \
+ }
+
+
+int qnoc_probe(struct platform_device *pdev, size_t cd_size, int cd_num,
+ const struct clk_bulk_data *cd);
+int qnoc_remove(struct platform_device *pdev);
+
+#endif
diff --git a/drivers/interconnect/qcom/msm8916.c b/drivers/interconnect/qcom/msm8916.c
index e8371d40ab8d..fc3689c8947a 100644
--- a/drivers/interconnect/qcom/msm8916.c
+++ b/drivers/interconnect/qcom/msm8916.c
@@ -15,9 +15,7 @@
#include <dt-bindings/interconnect/qcom,msm8916.h>
#include "smd-rpm.h"
-
-#define RPM_BUS_MASTER_REQ 0x73616d62
-#define RPM_BUS_SLAVE_REQ 0x766c7362
+#include "icc-rpm.h"
enum {
MSM8916_BIMC_SNOC_MAS = 1,
@@ -107,67 +105,11 @@ enum {
MSM8916_SNOC_PNOC_SLV,
};
-#define to_msm8916_provider(_provider) \
- container_of(_provider, struct msm8916_icc_provider, provider)
-
static const struct clk_bulk_data msm8916_bus_clocks[] = {
{ .id = "bus" },
{ .id = "bus_a" },
};
-/**
- * struct msm8916_icc_provider - Qualcomm specific interconnect provider
- * @provider: generic interconnect provider
- * @bus_clks: the clk_bulk_data table of bus clocks
- * @num_clks: the total number of clk_bulk_data entries
- */
-struct msm8916_icc_provider {
- struct icc_provider provider;
- struct clk_bulk_data *bus_clks;
- int num_clks;
-};
-
-#define MSM8916_MAX_LINKS 8
-
-/**
- * struct msm8916_icc_node - Qualcomm specific interconnect nodes
- * @name: the node name used in debugfs
- * @id: a unique node identifier
- * @links: an array of nodes where we can go next while traversing
- * @num_links: the total number of @links
- * @buswidth: width of the interconnect between a node and the bus (bytes)
- * @mas_rpm_id: RPM ID for devices that are bus masters
- * @slv_rpm_id: RPM ID for devices that are bus slaves
- * @rate: current bus clock rate in Hz
- */
-struct msm8916_icc_node {
- unsigned char *name;
- u16 id;
- u16 links[MSM8916_MAX_LINKS];
- u16 num_links;
- u16 buswidth;
- int mas_rpm_id;
- int slv_rpm_id;
- u64 rate;
-};
-
-struct msm8916_icc_desc {
- struct msm8916_icc_node **nodes;
- size_t num_nodes;
-};
-
-#define DEFINE_QNODE(_name, _id, _buswidth, _mas_rpm_id, _slv_rpm_id, \
- ...) \
- static struct msm8916_icc_node _name = { \
- .name = #_name, \
- .id = _id, \
- .buswidth = _buswidth, \
- .mas_rpm_id = _mas_rpm_id, \
- .slv_rpm_id = _slv_rpm_id, \
- .num_links = ARRAY_SIZE(((int[]){ __VA_ARGS__ })), \
- .links = { __VA_ARGS__ }, \
- }
-
DEFINE_QNODE(bimc_snoc_mas, MSM8916_BIMC_SNOC_MAS, 8, -1, -1, MSM8916_BIMC_SNOC_SLV);
DEFINE_QNODE(bimc_snoc_slv, MSM8916_BIMC_SNOC_SLV, 8, -1, -1, MSM8916_SNOC_INT_0, MSM8916_SNOC_INT_1);
DEFINE_QNODE(mas_apss, MSM8916_MASTER_AMPSS_M0, 8, -1, -1, MSM8916_SLAVE_EBI_CH0, MSM8916_BIMC_SNOC_MAS, MSM8916_SLAVE_AMPSS_L2);
@@ -254,7 +196,7 @@ DEFINE_QNODE(snoc_int_bimc, MSM8916_SNOC_INT_BIMC, 8, 101, 132, MSM8916_SNOC_BIM
DEFINE_QNODE(snoc_pcnoc_mas, MSM8916_SNOC_PNOC_MAS, 8, -1, -1, MSM8916_SNOC_PNOC_SLV);
DEFINE_QNODE(snoc_pcnoc_slv, MSM8916_SNOC_PNOC_SLV, 8, -1, -1, MSM8916_PNOC_INT_0);
-static struct msm8916_icc_node *msm8916_snoc_nodes[] = {
+static struct qcom_icc_node *msm8916_snoc_nodes[] = {
[BIMC_SNOC_SLV] = &bimc_snoc_slv,
[MASTER_JPEG] = &mas_jpeg,
[MASTER_MDP_PORT0] = &mas_mdp,
@@ -283,12 +225,12 @@ static struct msm8916_icc_node *msm8916_snoc_nodes[] = {
[SNOC_QDSS_INT] = &qdss_int,
};
-static struct msm8916_icc_desc msm8916_snoc = {
+static struct qcom_icc_desc msm8916_snoc = {
.nodes = msm8916_snoc_nodes,
.num_nodes = ARRAY_SIZE(msm8916_snoc_nodes),
};
-static struct msm8916_icc_node *msm8916_bimc_nodes[] = {
+static struct qcom_icc_node *msm8916_bimc_nodes[] = {
[BIMC_SNOC_MAS] = &bimc_snoc_mas,
[MASTER_AMPSS_M0] = &mas_apss,
[MASTER_GRAPHICS_3D] = &mas_gfx,
@@ -300,12 +242,12 @@ static struct msm8916_icc_node *msm8916_bimc_nodes[] = {
[SNOC_BIMC_1_SLV] = &snoc_bimc_1_slv,
};
-static struct msm8916_icc_desc msm8916_bimc = {
+static struct qcom_icc_desc msm8916_bimc = {
.nodes = msm8916_bimc_nodes,
.num_nodes = ARRAY_SIZE(msm8916_bimc_nodes),
};
-static struct msm8916_icc_node *msm8916_pcnoc_nodes[] = {
+static struct qcom_icc_node *msm8916_pcnoc_nodes[] = {
[MASTER_BLSP_1] = &mas_blsp_1,
[MASTER_DEHR] = &mas_dehr,
[MASTER_LPASS] = &mas_audio,
@@ -358,178 +300,15 @@ static struct msm8916_icc_node *msm8916_pcnoc_nodes[] = {
[SNOC_PCNOC_SLV] = &snoc_pcnoc_slv,
};
-static struct msm8916_icc_desc msm8916_pcnoc = {
+static struct qcom_icc_desc msm8916_pcnoc = {
.nodes = msm8916_pcnoc_nodes,
.num_nodes = ARRAY_SIZE(msm8916_pcnoc_nodes),
};
-static int msm8916_icc_set(struct icc_node *src, struct icc_node *dst)
-{
- struct msm8916_icc_provider *qp;
- struct msm8916_icc_node *qn;
- u64 sum_bw, max_peak_bw, rate;
- u32 agg_avg = 0, agg_peak = 0;
- struct icc_provider *provider;
- struct icc_node *n;
- int ret, i;
-
- qn = src->data;
- provider = src->provider;
- qp = to_msm8916_provider(provider);
-
- list_for_each_entry(n, &provider->nodes, node_list)
- provider->aggregate(n, 0, n->avg_bw, n->peak_bw,
- &agg_avg, &agg_peak);
-
- sum_bw = icc_units_to_bps(agg_avg);
- max_peak_bw = icc_units_to_bps(agg_peak);
-
- /* send bandwidth request message to the RPM processor */
- if (qn->mas_rpm_id != -1) {
- ret = qcom_icc_rpm_smd_send(QCOM_SMD_RPM_ACTIVE_STATE,
- RPM_BUS_MASTER_REQ,
- qn->mas_rpm_id,
- sum_bw);
- if (ret) {
- pr_err("qcom_icc_rpm_smd_send mas %d error %d\n",
- qn->mas_rpm_id, ret);
- return ret;
- }
- }
-
- if (qn->slv_rpm_id != -1) {
- ret = qcom_icc_rpm_smd_send(QCOM_SMD_RPM_ACTIVE_STATE,
- RPM_BUS_SLAVE_REQ,
- qn->slv_rpm_id,
- sum_bw);
- if (ret) {
- pr_err("qcom_icc_rpm_smd_send slv error %d\n",
- ret);
- return ret;
- }
- }
-
- rate = max(sum_bw, max_peak_bw);
-
- do_div(rate, qn->buswidth);
-
- if (qn->rate == rate)
- return 0;
-
- for (i = 0; i < qp->num_clks; i++) {
- ret = clk_set_rate(qp->bus_clks[i].clk, rate);
- if (ret) {
- pr_err("%s clk_set_rate error: %d\n",
- qp->bus_clks[i].id, ret);
- return ret;
- }
- }
-
- qn->rate = rate;
-
- return 0;
-}
-
static int msm8916_qnoc_probe(struct platform_device *pdev)
{
- const struct msm8916_icc_desc *desc;
- struct msm8916_icc_node **qnodes;
- struct msm8916_icc_provider *qp;
- struct device *dev = &pdev->dev;
- struct icc_onecell_data *data;
- struct icc_provider *provider;
- struct icc_node *node;
- size_t num_nodes, i;
- int ret;
-
- /* wait for the RPM proxy */
- if (!qcom_icc_rpm_smd_available())
- return -EPROBE_DEFER;
-
- desc = of_device_get_match_data(dev);
- if (!desc)
- return -EINVAL;
-
- qnodes = desc->nodes;
- num_nodes = desc->num_nodes;
-
- qp = devm_kzalloc(dev, sizeof(*qp), GFP_KERNEL);
- if (!qp)
- return -ENOMEM;
-
- data = devm_kzalloc(dev, struct_size(data, nodes, num_nodes),
- GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- qp->bus_clks = devm_kmemdup(dev, msm8916_bus_clocks,
- sizeof(msm8916_bus_clocks), GFP_KERNEL);
- if (!qp->bus_clks)
- return -ENOMEM;
-
- qp->num_clks = ARRAY_SIZE(msm8916_bus_clocks);
- ret = devm_clk_bulk_get(dev, qp->num_clks, qp->bus_clks);
- if (ret)
- return ret;
-
- ret = clk_bulk_prepare_enable(qp->num_clks, qp->bus_clks);
- if (ret)
- return ret;
-
- provider = &qp->provider;
- INIT_LIST_HEAD(&provider->nodes);
- provider->dev = dev;
- provider->set = msm8916_icc_set;
- provider->aggregate = icc_std_aggregate;
- provider->xlate = of_icc_xlate_onecell;
- provider->data = data;
-
- ret = icc_provider_add(provider);
- if (ret) {
- dev_err(dev, "error adding interconnect provider: %d\n", ret);
- clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
- return ret;
- }
-
- for (i = 0; i < num_nodes; i++) {
- size_t j;
-
- node = icc_node_create(qnodes[i]->id);
- if (IS_ERR(node)) {
- ret = PTR_ERR(node);
- goto err;
- }
-
- node->name = qnodes[i]->name;
- node->data = qnodes[i];
- icc_node_add(node, provider);
-
- for (j = 0; j < qnodes[i]->num_links; j++)
- icc_link_create(node, qnodes[i]->links[j]);
-
- data->nodes[i] = node;
- }
- data->num_nodes = num_nodes;
-
- platform_set_drvdata(pdev, qp);
-
- return 0;
-
-err:
- icc_nodes_remove(provider);
- icc_provider_del(provider);
- clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
-
- return ret;
-}
-
-static int msm8916_qnoc_remove(struct platform_device *pdev)
-{
- struct msm8916_icc_provider *qp = platform_get_drvdata(pdev);
-
- icc_nodes_remove(&qp->provider);
- clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
- return icc_provider_del(&qp->provider);
+ return qnoc_probe(pdev, sizeof(msm8916_bus_clocks),
+ ARRAY_SIZE(msm8916_bus_clocks), msm8916_bus_clocks);
}
static const struct of_device_id msm8916_noc_of_match[] = {
@@ -542,7 +321,7 @@ MODULE_DEVICE_TABLE(of, msm8916_noc_of_match);
static struct platform_driver msm8916_noc_driver = {
.probe = msm8916_qnoc_probe,
- .remove = msm8916_qnoc_remove,
+ .remove = qnoc_remove,
.driver = {
.name = "qnoc-msm8916",
.of_match_table = msm8916_noc_of_match,
diff --git a/drivers/interconnect/qcom/msm8939.c b/drivers/interconnect/qcom/msm8939.c
new file mode 100644
index 000000000000..dfbec30ed149
--- /dev/null
+++ b/drivers/interconnect/qcom/msm8939.c
@@ -0,0 +1,355 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Linaro Ltd
+ * Author: Jun Nie <jun.nie@linaro.org>
+ * With reference of msm8916 interconnect driver of Georgi Djakov.
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/interconnect-provider.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
+
+#include <dt-bindings/interconnect/qcom,msm8939.h>
+
+#include "smd-rpm.h"
+#include "icc-rpm.h"
+
+enum {
+ MSM8939_BIMC_SNOC_MAS = 1,
+ MSM8939_BIMC_SNOC_SLV,
+ MSM8939_MASTER_AMPSS_M0,
+ MSM8939_MASTER_LPASS,
+ MSM8939_MASTER_BLSP_1,
+ MSM8939_MASTER_DEHR,
+ MSM8939_MASTER_GRAPHICS_3D,
+ MSM8939_MASTER_JPEG,
+ MSM8939_MASTER_MDP_PORT0,
+ MSM8939_MASTER_MDP_PORT1,
+ MSM8939_MASTER_CPP,
+ MSM8939_MASTER_CRYPTO_CORE0,
+ MSM8939_MASTER_SDCC_1,
+ MSM8939_MASTER_SDCC_2,
+ MSM8939_MASTER_QDSS_BAM,
+ MSM8939_MASTER_QDSS_ETR,
+ MSM8939_MASTER_SNOC_CFG,
+ MSM8939_MASTER_SPDM,
+ MSM8939_MASTER_TCU0,
+ MSM8939_MASTER_USB_HS1,
+ MSM8939_MASTER_USB_HS2,
+ MSM8939_MASTER_VFE,
+ MSM8939_MASTER_VIDEO_P0,
+ MSM8939_SNOC_MM_INT_0,
+ MSM8939_SNOC_MM_INT_1,
+ MSM8939_SNOC_MM_INT_2,
+ MSM8939_PNOC_INT_0,
+ MSM8939_PNOC_INT_1,
+ MSM8939_PNOC_MAS_0,
+ MSM8939_PNOC_MAS_1,
+ MSM8939_PNOC_SLV_0,
+ MSM8939_PNOC_SLV_1,
+ MSM8939_PNOC_SLV_2,
+ MSM8939_PNOC_SLV_3,
+ MSM8939_PNOC_SLV_4,
+ MSM8939_PNOC_SLV_8,
+ MSM8939_PNOC_SLV_9,
+ MSM8939_PNOC_SNOC_MAS,
+ MSM8939_PNOC_SNOC_SLV,
+ MSM8939_SNOC_QDSS_INT,
+ MSM8939_SLAVE_AMPSS_L2,
+ MSM8939_SLAVE_APSS,
+ MSM8939_SLAVE_LPASS,
+ MSM8939_SLAVE_BIMC_CFG,
+ MSM8939_SLAVE_BLSP_1,
+ MSM8939_SLAVE_BOOT_ROM,
+ MSM8939_SLAVE_CAMERA_CFG,
+ MSM8939_SLAVE_CATS_128,
+ MSM8939_SLAVE_OCMEM_64,
+ MSM8939_SLAVE_CLK_CTL,
+ MSM8939_SLAVE_CRYPTO_0_CFG,
+ MSM8939_SLAVE_DEHR_CFG,
+ MSM8939_SLAVE_DISPLAY_CFG,
+ MSM8939_SLAVE_EBI_CH0,
+ MSM8939_SLAVE_GRAPHICS_3D_CFG,
+ MSM8939_SLAVE_IMEM_CFG,
+ MSM8939_SLAVE_IMEM,
+ MSM8939_SLAVE_MPM,
+ MSM8939_SLAVE_MSG_RAM,
+ MSM8939_SLAVE_MSS,
+ MSM8939_SLAVE_PDM,
+ MSM8939_SLAVE_PMIC_ARB,
+ MSM8939_SLAVE_PNOC_CFG,
+ MSM8939_SLAVE_PRNG,
+ MSM8939_SLAVE_QDSS_CFG,
+ MSM8939_SLAVE_QDSS_STM,
+ MSM8939_SLAVE_RBCPR_CFG,
+ MSM8939_SLAVE_SDCC_1,
+ MSM8939_SLAVE_SDCC_2,
+ MSM8939_SLAVE_SECURITY,
+ MSM8939_SLAVE_SNOC_CFG,
+ MSM8939_SLAVE_SPDM,
+ MSM8939_SLAVE_SRVC_SNOC,
+ MSM8939_SLAVE_TCSR,
+ MSM8939_SLAVE_TLMM,
+ MSM8939_SLAVE_USB_HS1,
+ MSM8939_SLAVE_USB_HS2,
+ MSM8939_SLAVE_VENUS_CFG,
+ MSM8939_SNOC_BIMC_0_MAS,
+ MSM8939_SNOC_BIMC_0_SLV,
+ MSM8939_SNOC_BIMC_1_MAS,
+ MSM8939_SNOC_BIMC_1_SLV,
+ MSM8939_SNOC_BIMC_2_MAS,
+ MSM8939_SNOC_BIMC_2_SLV,
+ MSM8939_SNOC_INT_0,
+ MSM8939_SNOC_INT_1,
+ MSM8939_SNOC_INT_BIMC,
+ MSM8939_SNOC_PNOC_MAS,
+ MSM8939_SNOC_PNOC_SLV,
+};
+
+static const struct clk_bulk_data msm8939_bus_clocks[] = {
+ { .id = "bus" },
+ { .id = "bus_a" },
+};
+
+DEFINE_QNODE(bimc_snoc_mas, MSM8939_BIMC_SNOC_MAS, 8, -1, -1, MSM8939_BIMC_SNOC_SLV);
+DEFINE_QNODE(bimc_snoc_slv, MSM8939_BIMC_SNOC_SLV, 16, -1, 2, MSM8939_SNOC_INT_0, MSM8939_SNOC_INT_1);
+DEFINE_QNODE(mas_apss, MSM8939_MASTER_AMPSS_M0, 16, -1, -1, MSM8939_SLAVE_EBI_CH0, MSM8939_BIMC_SNOC_MAS, MSM8939_SLAVE_AMPSS_L2);
+DEFINE_QNODE(mas_audio, MSM8939_MASTER_LPASS, 4, -1, -1, MSM8939_PNOC_MAS_0);
+DEFINE_QNODE(mas_blsp_1, MSM8939_MASTER_BLSP_1, 4, -1, -1, MSM8939_PNOC_MAS_1);
+DEFINE_QNODE(mas_dehr, MSM8939_MASTER_DEHR, 4, -1, -1, MSM8939_PNOC_MAS_0);
+DEFINE_QNODE(mas_gfx, MSM8939_MASTER_GRAPHICS_3D, 16, -1, -1, MSM8939_SLAVE_EBI_CH0, MSM8939_BIMC_SNOC_MAS, MSM8939_SLAVE_AMPSS_L2);
+DEFINE_QNODE(mas_jpeg, MSM8939_MASTER_JPEG, 16, -1, -1, MSM8939_SNOC_MM_INT_0, MSM8939_SNOC_MM_INT_2);
+DEFINE_QNODE(mas_mdp0, MSM8939_MASTER_MDP_PORT0, 16, -1, -1, MSM8939_SNOC_MM_INT_1, MSM8939_SNOC_MM_INT_2);
+DEFINE_QNODE(mas_mdp1, MSM8939_MASTER_MDP_PORT1, 16, -1, -1, MSM8939_SNOC_MM_INT_0, MSM8939_SNOC_MM_INT_2);
+DEFINE_QNODE(mas_cpp, MSM8939_MASTER_CPP, 16, -1, -1, MSM8939_SNOC_MM_INT_0, MSM8939_SNOC_MM_INT_2);
+DEFINE_QNODE(mas_pcnoc_crypto_0, MSM8939_MASTER_CRYPTO_CORE0, 8, -1, -1, MSM8939_PNOC_INT_1);
+DEFINE_QNODE(mas_pcnoc_sdcc_1, MSM8939_MASTER_SDCC_1, 8, -1, -1, MSM8939_PNOC_INT_1);
+DEFINE_QNODE(mas_pcnoc_sdcc_2, MSM8939_MASTER_SDCC_2, 8, -1, -1, MSM8939_PNOC_INT_1);
+DEFINE_QNODE(mas_qdss_bam, MSM8939_MASTER_QDSS_BAM, 8, -1, -1, MSM8939_SNOC_QDSS_INT);
+DEFINE_QNODE(mas_qdss_etr, MSM8939_MASTER_QDSS_ETR, 8, -1, -1, MSM8939_SNOC_QDSS_INT);
+DEFINE_QNODE(mas_snoc_cfg, MSM8939_MASTER_SNOC_CFG, 4, 20, -1, MSM8939_SLAVE_SRVC_SNOC);
+DEFINE_QNODE(mas_spdm, MSM8939_MASTER_SPDM, 4, -1, -1, MSM8939_PNOC_MAS_0);
+DEFINE_QNODE(mas_tcu0, MSM8939_MASTER_TCU0, 16, -1, -1, MSM8939_SLAVE_EBI_CH0, MSM8939_BIMC_SNOC_MAS, MSM8939_SLAVE_AMPSS_L2);
+DEFINE_QNODE(mas_usb_hs1, MSM8939_MASTER_USB_HS1, 4, -1, -1, MSM8939_PNOC_MAS_1);
+DEFINE_QNODE(mas_usb_hs2, MSM8939_MASTER_USB_HS2, 4, -1, -1, MSM8939_PNOC_MAS_1);
+DEFINE_QNODE(mas_vfe, MSM8939_MASTER_VFE, 16, -1, -1, MSM8939_SNOC_MM_INT_1, MSM8939_SNOC_MM_INT_2);
+DEFINE_QNODE(mas_video, MSM8939_MASTER_VIDEO_P0, 16, -1, -1, MSM8939_SNOC_MM_INT_0, MSM8939_SNOC_MM_INT_2);
+DEFINE_QNODE(mm_int_0, MSM8939_SNOC_MM_INT_0, 16, -1, -1, MSM8939_SNOC_BIMC_2_MAS);
+DEFINE_QNODE(mm_int_1, MSM8939_SNOC_MM_INT_1, 16, -1, -1, MSM8939_SNOC_BIMC_1_MAS);
+DEFINE_QNODE(mm_int_2, MSM8939_SNOC_MM_INT_2, 16, -1, -1, MSM8939_SNOC_INT_0);
+DEFINE_QNODE(pcnoc_int_0, MSM8939_PNOC_INT_0, 8, -1, -1, MSM8939_PNOC_SNOC_MAS, MSM8939_PNOC_SLV_0, MSM8939_PNOC_SLV_1, MSM8939_PNOC_SLV_2, MSM8939_PNOC_SLV_3, MSM8939_PNOC_SLV_4, MSM8939_PNOC_SLV_8, MSM8939_PNOC_SLV_9);
+DEFINE_QNODE(pcnoc_int_1, MSM8939_PNOC_INT_1, 8, -1, -1, MSM8939_PNOC_SNOC_MAS);
+DEFINE_QNODE(pcnoc_m_0, MSM8939_PNOC_MAS_0, 8, -1, -1, MSM8939_PNOC_INT_0);
+DEFINE_QNODE(pcnoc_m_1, MSM8939_PNOC_MAS_1, 8, -1, -1, MSM8939_PNOC_SNOC_MAS);
+DEFINE_QNODE(pcnoc_s_0, MSM8939_PNOC_SLV_0, 4, -1, -1, MSM8939_SLAVE_CLK_CTL, MSM8939_SLAVE_TLMM, MSM8939_SLAVE_TCSR, MSM8939_SLAVE_SECURITY, MSM8939_SLAVE_MSS);
+DEFINE_QNODE(pcnoc_s_1, MSM8939_PNOC_SLV_1, 4, -1, -1, MSM8939_SLAVE_IMEM_CFG, MSM8939_SLAVE_CRYPTO_0_CFG, MSM8939_SLAVE_MSG_RAM, MSM8939_SLAVE_PDM, MSM8939_SLAVE_PRNG);
+DEFINE_QNODE(pcnoc_s_2, MSM8939_PNOC_SLV_2, 4, -1, -1, MSM8939_SLAVE_SPDM, MSM8939_SLAVE_BOOT_ROM, MSM8939_SLAVE_BIMC_CFG, MSM8939_SLAVE_PNOC_CFG, MSM8939_SLAVE_PMIC_ARB);
+DEFINE_QNODE(pcnoc_s_3, MSM8939_PNOC_SLV_3, 4, -1, -1, MSM8939_SLAVE_MPM, MSM8939_SLAVE_SNOC_CFG, MSM8939_SLAVE_RBCPR_CFG, MSM8939_SLAVE_QDSS_CFG, MSM8939_SLAVE_DEHR_CFG);
+DEFINE_QNODE(pcnoc_s_4, MSM8939_PNOC_SLV_4, 4, -1, -1, MSM8939_SLAVE_VENUS_CFG, MSM8939_SLAVE_CAMERA_CFG, MSM8939_SLAVE_DISPLAY_CFG);
+DEFINE_QNODE(pcnoc_s_8, MSM8939_PNOC_SLV_8, 4, -1, -1, MSM8939_SLAVE_USB_HS1, MSM8939_SLAVE_SDCC_1, MSM8939_SLAVE_BLSP_1);
+DEFINE_QNODE(pcnoc_s_9, MSM8939_PNOC_SLV_9, 4, -1, -1, MSM8939_SLAVE_SDCC_2, MSM8939_SLAVE_LPASS, MSM8939_SLAVE_USB_HS2);
+DEFINE_QNODE(pcnoc_snoc_mas, MSM8939_PNOC_SNOC_MAS, 8, 29, -1, MSM8939_PNOC_SNOC_SLV);
+DEFINE_QNODE(pcnoc_snoc_slv, MSM8939_PNOC_SNOC_SLV, 8, -1, 45, MSM8939_SNOC_INT_0, MSM8939_SNOC_INT_BIMC, MSM8939_SNOC_INT_1);
+DEFINE_QNODE(qdss_int, MSM8939_SNOC_QDSS_INT, 8, -1, -1, MSM8939_SNOC_INT_0, MSM8939_SNOC_INT_BIMC);
+DEFINE_QNODE(slv_apps_l2, MSM8939_SLAVE_AMPSS_L2, 16, -1, -1, 0);
+DEFINE_QNODE(slv_apss, MSM8939_SLAVE_APSS, 4, -1, 20, 0);
+DEFINE_QNODE(slv_audio, MSM8939_SLAVE_LPASS, 4, -1, -1, 0);
+DEFINE_QNODE(slv_bimc_cfg, MSM8939_SLAVE_BIMC_CFG, 4, -1, -1, 0);
+DEFINE_QNODE(slv_blsp_1, MSM8939_SLAVE_BLSP_1, 4, -1, -1, 0);
+DEFINE_QNODE(slv_boot_rom, MSM8939_SLAVE_BOOT_ROM, 4, -1, -1, 0);
+DEFINE_QNODE(slv_camera_cfg, MSM8939_SLAVE_CAMERA_CFG, 4, -1, -1, 0);
+DEFINE_QNODE(slv_cats_0, MSM8939_SLAVE_CATS_128, 16, -1, 106, 0);
+DEFINE_QNODE(slv_cats_1, MSM8939_SLAVE_OCMEM_64, 8, -1, 107, 0);
+DEFINE_QNODE(slv_clk_ctl, MSM8939_SLAVE_CLK_CTL, 4, -1, -1, 0);
+DEFINE_QNODE(slv_crypto_0_cfg, MSM8939_SLAVE_CRYPTO_0_CFG, 4, -1, -1, 0);
+DEFINE_QNODE(slv_dehr_cfg, MSM8939_SLAVE_DEHR_CFG, 4, -1, -1, 0);
+DEFINE_QNODE(slv_display_cfg, MSM8939_SLAVE_DISPLAY_CFG, 4, -1, -1, 0);
+DEFINE_QNODE(slv_ebi_ch0, MSM8939_SLAVE_EBI_CH0, 16, -1, 0, 0);
+DEFINE_QNODE(slv_gfx_cfg, MSM8939_SLAVE_GRAPHICS_3D_CFG, 4, -1, -1, 0);
+DEFINE_QNODE(slv_imem_cfg, MSM8939_SLAVE_IMEM_CFG, 4, -1, -1, 0);
+DEFINE_QNODE(slv_imem, MSM8939_SLAVE_IMEM, 8, -1, 26, 0);
+DEFINE_QNODE(slv_mpm, MSM8939_SLAVE_MPM, 4, -1, -1, 0);
+DEFINE_QNODE(slv_msg_ram, MSM8939_SLAVE_MSG_RAM, 4, -1, -1, 0);
+DEFINE_QNODE(slv_mss, MSM8939_SLAVE_MSS, 4, -1, -1, 0);
+DEFINE_QNODE(slv_pdm, MSM8939_SLAVE_PDM, 4, -1, -1, 0);
+DEFINE_QNODE(slv_pmic_arb, MSM8939_SLAVE_PMIC_ARB, 4, -1, -1, 0);
+DEFINE_QNODE(slv_pcnoc_cfg, MSM8939_SLAVE_PNOC_CFG, 4, -1, -1, 0);
+DEFINE_QNODE(slv_prng, MSM8939_SLAVE_PRNG, 4, -1, -1, 0);
+DEFINE_QNODE(slv_qdss_cfg, MSM8939_SLAVE_QDSS_CFG, 4, -1, -1, 0);
+DEFINE_QNODE(slv_qdss_stm, MSM8939_SLAVE_QDSS_STM, 4, -1, 30, 0);
+DEFINE_QNODE(slv_rbcpr_cfg, MSM8939_SLAVE_RBCPR_CFG, 4, -1, -1, 0);
+DEFINE_QNODE(slv_sdcc_1, MSM8939_SLAVE_SDCC_1, 4, -1, -1, 0);
+DEFINE_QNODE(slv_sdcc_2, MSM8939_SLAVE_SDCC_2, 4, -1, -1, 0);
+DEFINE_QNODE(slv_security, MSM8939_SLAVE_SECURITY, 4, -1, -1, 0);
+DEFINE_QNODE(slv_snoc_cfg, MSM8939_SLAVE_SNOC_CFG, 4, -1, -1, 0);
+DEFINE_QNODE(slv_spdm, MSM8939_SLAVE_SPDM, 4, -1, -1, 0);
+DEFINE_QNODE(slv_srvc_snoc, MSM8939_SLAVE_SRVC_SNOC, 8, -1, 29, 0);
+DEFINE_QNODE(slv_tcsr, MSM8939_SLAVE_TCSR, 4, -1, -1, 0);
+DEFINE_QNODE(slv_tlmm, MSM8939_SLAVE_TLMM, 4, -1, -1, 0);
+DEFINE_QNODE(slv_usb_hs1, MSM8939_SLAVE_USB_HS1, 4, -1, -1, 0);
+DEFINE_QNODE(slv_usb_hs2, MSM8939_SLAVE_USB_HS2, 4, -1, -1, 0);
+DEFINE_QNODE(slv_venus_cfg, MSM8939_SLAVE_VENUS_CFG, 4, -1, -1, 0);
+DEFINE_QNODE(snoc_bimc_0_mas, MSM8939_SNOC_BIMC_0_MAS, 16, 3, -1, MSM8939_SNOC_BIMC_0_SLV);
+DEFINE_QNODE(snoc_bimc_0_slv, MSM8939_SNOC_BIMC_0_SLV, 16, -1, 24, MSM8939_SLAVE_EBI_CH0);
+DEFINE_QNODE(snoc_bimc_1_mas, MSM8939_SNOC_BIMC_1_MAS, 16, 76, -1, MSM8939_SNOC_BIMC_1_SLV);
+DEFINE_QNODE(snoc_bimc_1_slv, MSM8939_SNOC_BIMC_1_SLV, 16, -1, 104, MSM8939_SLAVE_EBI_CH0);
+DEFINE_QNODE(snoc_bimc_2_mas, MSM8939_SNOC_BIMC_2_MAS, 16, -1, -1, MSM8939_SNOC_BIMC_2_SLV);
+DEFINE_QNODE(snoc_bimc_2_slv, MSM8939_SNOC_BIMC_2_SLV, 16, -1, -1, MSM8939_SLAVE_EBI_CH0);
+DEFINE_QNODE(snoc_int_0, MSM8939_SNOC_INT_0, 8, 99, 130, MSM8939_SLAVE_QDSS_STM, MSM8939_SLAVE_IMEM, MSM8939_SNOC_PNOC_MAS);
+DEFINE_QNODE(snoc_int_1, MSM8939_SNOC_INT_1, 8, 100, 131, MSM8939_SLAVE_APSS, MSM8939_SLAVE_CATS_128, MSM8939_SLAVE_OCMEM_64);
+DEFINE_QNODE(snoc_int_bimc, MSM8939_SNOC_INT_BIMC, 8, 101, 132, MSM8939_SNOC_BIMC_1_MAS);
+DEFINE_QNODE(snoc_pcnoc_mas, MSM8939_SNOC_PNOC_MAS, 8, -1, -1, MSM8939_SNOC_PNOC_SLV);
+DEFINE_QNODE(snoc_pcnoc_slv, MSM8939_SNOC_PNOC_SLV, 8, -1, -1, MSM8939_PNOC_INT_0);
+
+static struct qcom_icc_node *msm8939_snoc_nodes[] = {
+ [BIMC_SNOC_SLV] = &bimc_snoc_slv,
+ [MASTER_QDSS_BAM] = &mas_qdss_bam,
+ [MASTER_QDSS_ETR] = &mas_qdss_etr,
+ [MASTER_SNOC_CFG] = &mas_snoc_cfg,
+ [PCNOC_SNOC_SLV] = &pcnoc_snoc_slv,
+ [SLAVE_APSS] = &slv_apss,
+ [SLAVE_CATS_128] = &slv_cats_0,
+ [SLAVE_OCMEM_64] = &slv_cats_1,
+ [SLAVE_IMEM] = &slv_imem,
+ [SLAVE_QDSS_STM] = &slv_qdss_stm,
+ [SLAVE_SRVC_SNOC] = &slv_srvc_snoc,
+ [SNOC_BIMC_0_MAS] = &snoc_bimc_0_mas,
+ [SNOC_BIMC_1_MAS] = &snoc_bimc_1_mas,
+ [SNOC_BIMC_2_MAS] = &snoc_bimc_2_mas,
+ [SNOC_INT_0] = &snoc_int_0,
+ [SNOC_INT_1] = &snoc_int_1,
+ [SNOC_INT_BIMC] = &snoc_int_bimc,
+ [SNOC_PCNOC_MAS] = &snoc_pcnoc_mas,
+ [SNOC_QDSS_INT] = &qdss_int,
+};
+
+static struct qcom_icc_desc msm8939_snoc = {
+ .nodes = msm8939_snoc_nodes,
+ .num_nodes = ARRAY_SIZE(msm8939_snoc_nodes),
+};
+
+static struct qcom_icc_node *msm8939_snoc_mm_nodes[] = {
+ [MASTER_VIDEO_P0] = &mas_video,
+ [MASTER_JPEG] = &mas_jpeg,
+ [MASTER_VFE] = &mas_vfe,
+ [MASTER_MDP_PORT0] = &mas_mdp0,
+ [MASTER_MDP_PORT1] = &mas_mdp1,
+ [MASTER_CPP] = &mas_cpp,
+ [SNOC_MM_INT_0] = &mm_int_0,
+ [SNOC_MM_INT_1] = &mm_int_1,
+ [SNOC_MM_INT_2] = &mm_int_2,
+};
+
+static struct qcom_icc_desc msm8939_snoc_mm = {
+ .nodes = msm8939_snoc_mm_nodes,
+ .num_nodes = ARRAY_SIZE(msm8939_snoc_mm_nodes),
+};
+
+static struct qcom_icc_node *msm8939_bimc_nodes[] = {
+ [BIMC_SNOC_MAS] = &bimc_snoc_mas,
+ [MASTER_AMPSS_M0] = &mas_apss,
+ [MASTER_GRAPHICS_3D] = &mas_gfx,
+ [MASTER_TCU0] = &mas_tcu0,
+ [SLAVE_AMPSS_L2] = &slv_apps_l2,
+ [SLAVE_EBI_CH0] = &slv_ebi_ch0,
+ [SNOC_BIMC_0_SLV] = &snoc_bimc_0_slv,
+ [SNOC_BIMC_1_SLV] = &snoc_bimc_1_slv,
+ [SNOC_BIMC_2_SLV] = &snoc_bimc_2_slv,
+};
+
+static struct qcom_icc_desc msm8939_bimc = {
+ .nodes = msm8939_bimc_nodes,
+ .num_nodes = ARRAY_SIZE(msm8939_bimc_nodes),
+};
+
+static struct qcom_icc_node *msm8939_pcnoc_nodes[] = {
+ [MASTER_BLSP_1] = &mas_blsp_1,
+ [MASTER_DEHR] = &mas_dehr,
+ [MASTER_LPASS] = &mas_audio,
+ [MASTER_CRYPTO_CORE0] = &mas_pcnoc_crypto_0,
+ [MASTER_SDCC_1] = &mas_pcnoc_sdcc_1,
+ [MASTER_SDCC_2] = &mas_pcnoc_sdcc_2,
+ [MASTER_SPDM] = &mas_spdm,
+ [MASTER_USB_HS1] = &mas_usb_hs1,
+ [MASTER_USB_HS2] = &mas_usb_hs2,
+ [PCNOC_INT_0] = &pcnoc_int_0,
+ [PCNOC_INT_1] = &pcnoc_int_1,
+ [PCNOC_MAS_0] = &pcnoc_m_0,
+ [PCNOC_MAS_1] = &pcnoc_m_1,
+ [PCNOC_SLV_0] = &pcnoc_s_0,
+ [PCNOC_SLV_1] = &pcnoc_s_1,
+ [PCNOC_SLV_2] = &pcnoc_s_2,
+ [PCNOC_SLV_3] = &pcnoc_s_3,
+ [PCNOC_SLV_4] = &pcnoc_s_4,
+ [PCNOC_SLV_8] = &pcnoc_s_8,
+ [PCNOC_SLV_9] = &pcnoc_s_9,
+ [PCNOC_SNOC_MAS] = &pcnoc_snoc_mas,
+ [SLAVE_BIMC_CFG] = &slv_bimc_cfg,
+ [SLAVE_BLSP_1] = &slv_blsp_1,
+ [SLAVE_BOOT_ROM] = &slv_boot_rom,
+ [SLAVE_CAMERA_CFG] = &slv_camera_cfg,
+ [SLAVE_CLK_CTL] = &slv_clk_ctl,
+ [SLAVE_CRYPTO_0_CFG] = &slv_crypto_0_cfg,
+ [SLAVE_DEHR_CFG] = &slv_dehr_cfg,
+ [SLAVE_DISPLAY_CFG] = &slv_display_cfg,
+ [SLAVE_GRAPHICS_3D_CFG] = &slv_gfx_cfg,
+ [SLAVE_IMEM_CFG] = &slv_imem_cfg,
+ [SLAVE_LPASS] = &slv_audio,
+ [SLAVE_MPM] = &slv_mpm,
+ [SLAVE_MSG_RAM] = &slv_msg_ram,
+ [SLAVE_MSS] = &slv_mss,
+ [SLAVE_PDM] = &slv_pdm,
+ [SLAVE_PMIC_ARB] = &slv_pmic_arb,
+ [SLAVE_PCNOC_CFG] = &slv_pcnoc_cfg,
+ [SLAVE_PRNG] = &slv_prng,
+ [SLAVE_QDSS_CFG] = &slv_qdss_cfg,
+ [SLAVE_RBCPR_CFG] = &slv_rbcpr_cfg,
+ [SLAVE_SDCC_1] = &slv_sdcc_1,
+ [SLAVE_SDCC_2] = &slv_sdcc_2,
+ [SLAVE_SECURITY] = &slv_security,
+ [SLAVE_SNOC_CFG] = &slv_snoc_cfg,
+ [SLAVE_SPDM] = &slv_spdm,
+ [SLAVE_TCSR] = &slv_tcsr,
+ [SLAVE_TLMM] = &slv_tlmm,
+ [SLAVE_USB_HS1] = &slv_usb_hs1,
+ [SLAVE_USB_HS2] = &slv_usb_hs2,
+ [SLAVE_VENUS_CFG] = &slv_venus_cfg,
+ [SNOC_PCNOC_SLV] = &snoc_pcnoc_slv,
+};
+
+static struct qcom_icc_desc msm8939_pcnoc = {
+ .nodes = msm8939_pcnoc_nodes,
+ .num_nodes = ARRAY_SIZE(msm8939_pcnoc_nodes),
+};
+
+static int msm8939_qnoc_probe(struct platform_device *pdev)
+{
+ return qnoc_probe(pdev, sizeof(msm8939_bus_clocks),
+ ARRAY_SIZE(msm8939_bus_clocks), msm8939_bus_clocks);
+}
+
+static const struct of_device_id msm8939_noc_of_match[] = {
+ { .compatible = "qcom,msm8939-bimc", .data = &msm8939_bimc },
+ { .compatible = "qcom,msm8939-pcnoc", .data = &msm8939_pcnoc },
+ { .compatible = "qcom,msm8939-snoc", .data = &msm8939_snoc },
+ { .compatible = "qcom,msm8939-snoc-mm", .data = &msm8939_snoc_mm },
+ { }
+};
+MODULE_DEVICE_TABLE(of, msm8939_noc_of_match);
+
+static struct platform_driver msm8939_noc_driver = {
+ .probe = msm8939_qnoc_probe,
+ .remove = qnoc_remove,
+ .driver = {
+ .name = "qnoc-msm8939",
+ .of_match_table = msm8939_noc_of_match,
+ },
+};
+module_platform_driver(msm8939_noc_driver);
+MODULE_AUTHOR("Jun Nie <jun.nie@linaro.org>");
+MODULE_DESCRIPTION("Qualcomm MSM8939 NoC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/interconnect/qcom/qcs404.c b/drivers/interconnect/qcom/qcs404.c
index 9820709b43db..36a7e30a00be 100644
--- a/drivers/interconnect/qcom/qcs404.c
+++ b/drivers/interconnect/qcom/qcs404.c
@@ -9,15 +9,12 @@
#include <linux/interconnect-provider.h>
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/of_platform.h>
#include <linux/platform_device.h>
-#include <linux/slab.h>
+#include <linux/of_device.h>
-#include "smd-rpm.h"
-#define RPM_BUS_MASTER_REQ 0x73616d62
-#define RPM_BUS_SLAVE_REQ 0x766c7362
+#include "smd-rpm.h"
+#include "icc-rpm.h"
enum {
QCS404_MASTER_AMPSS_M0 = 1,
@@ -95,67 +92,11 @@ enum {
QCS404_SLAVE_LPASS,
};
-#define to_qcom_provider(_provider) \
- container_of(_provider, struct qcom_icc_provider, provider)
-
-static const struct clk_bulk_data bus_clocks[] = {
+static const struct clk_bulk_data qcs404_bus_clocks[] = {
{ .id = "bus" },
{ .id = "bus_a" },
};
-/**
- * struct qcom_icc_provider - Qualcomm specific interconnect provider
- * @provider: generic interconnect provider
- * @bus_clks: the clk_bulk_data table of bus clocks
- * @num_clks: the total number of clk_bulk_data entries
- */
-struct qcom_icc_provider {
- struct icc_provider provider;
- struct clk_bulk_data *bus_clks;
- int num_clks;
-};
-
-#define QCS404_MAX_LINKS 12
-
-/**
- * struct qcom_icc_node - Qualcomm specific interconnect nodes
- * @name: the node name used in debugfs
- * @id: a unique node identifier
- * @links: an array of nodes where we can go next while traversing
- * @num_links: the total number of @links
- * @buswidth: width of the interconnect between a node and the bus (bytes)
- * @mas_rpm_id: RPM id for devices that are bus masters
- * @slv_rpm_id: RPM id for devices that are bus slaves
- * @rate: current bus clock rate in Hz
- */
-struct qcom_icc_node {
- unsigned char *name;
- u16 id;
- u16 links[QCS404_MAX_LINKS];
- u16 num_links;
- u16 buswidth;
- int mas_rpm_id;
- int slv_rpm_id;
- u64 rate;
-};
-
-struct qcom_icc_desc {
- struct qcom_icc_node **nodes;
- size_t num_nodes;
-};
-
-#define DEFINE_QNODE(_name, _id, _buswidth, _mas_rpm_id, _slv_rpm_id, \
- ...) \
- static struct qcom_icc_node _name = { \
- .name = #_name, \
- .id = _id, \
- .buswidth = _buswidth, \
- .mas_rpm_id = _mas_rpm_id, \
- .slv_rpm_id = _slv_rpm_id, \
- .num_links = ARRAY_SIZE(((int[]){ __VA_ARGS__ })), \
- .links = { __VA_ARGS__ }, \
- }
-
DEFINE_QNODE(mas_apps_proc, QCS404_MASTER_AMPSS_M0, 8, 0, -1, QCS404_SLAVE_EBI_CH0, QCS404_BIMC_SNOC_SLV);
DEFINE_QNODE(mas_oxili, QCS404_MASTER_GRAPHICS_3D, 8, -1, -1, QCS404_SLAVE_EBI_CH0, QCS404_BIMC_SNOC_SLV);
DEFINE_QNODE(mas_mdp, QCS404_MASTER_MDP_PORT0, 8, -1, -1, QCS404_SLAVE_EBI_CH0, QCS404_BIMC_SNOC_SLV);
@@ -327,178 +268,11 @@ static struct qcom_icc_desc qcs404_snoc = {
.num_nodes = ARRAY_SIZE(qcs404_snoc_nodes),
};
-static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
-{
- struct qcom_icc_provider *qp;
- struct qcom_icc_node *qn;
- struct icc_provider *provider;
- struct icc_node *n;
- u64 sum_bw;
- u64 max_peak_bw;
- u64 rate;
- u32 agg_avg = 0;
- u32 agg_peak = 0;
- int ret, i;
-
- qn = src->data;
- provider = src->provider;
- qp = to_qcom_provider(provider);
-
- list_for_each_entry(n, &provider->nodes, node_list)
- provider->aggregate(n, 0, n->avg_bw, n->peak_bw,
- &agg_avg, &agg_peak);
-
- sum_bw = icc_units_to_bps(agg_avg);
- max_peak_bw = icc_units_to_bps(agg_peak);
-
- /* send bandwidth request message to the RPM processor */
- if (qn->mas_rpm_id != -1) {
- ret = qcom_icc_rpm_smd_send(QCOM_SMD_RPM_ACTIVE_STATE,
- RPM_BUS_MASTER_REQ,
- qn->mas_rpm_id,
- sum_bw);
- if (ret) {
- pr_err("qcom_icc_rpm_smd_send mas %d error %d\n",
- qn->mas_rpm_id, ret);
- return ret;
- }
- }
-
- if (qn->slv_rpm_id != -1) {
- ret = qcom_icc_rpm_smd_send(QCOM_SMD_RPM_ACTIVE_STATE,
- RPM_BUS_SLAVE_REQ,
- qn->slv_rpm_id,
- sum_bw);
- if (ret) {
- pr_err("qcom_icc_rpm_smd_send slv error %d\n",
- ret);
- return ret;
- }
- }
-
- rate = max(sum_bw, max_peak_bw);
-
- do_div(rate, qn->buswidth);
-
- if (qn->rate == rate)
- return 0;
-
- for (i = 0; i < qp->num_clks; i++) {
- ret = clk_set_rate(qp->bus_clks[i].clk, rate);
- if (ret) {
- pr_err("%s clk_set_rate error: %d\n",
- qp->bus_clks[i].id, ret);
- return ret;
- }
- }
-
- qn->rate = rate;
- return 0;
-}
-
-static int qnoc_probe(struct platform_device *pdev)
+static int qcs404_qnoc_probe(struct platform_device *pdev)
{
- struct device *dev = &pdev->dev;
- const struct qcom_icc_desc *desc;
- struct icc_onecell_data *data;
- struct icc_provider *provider;
- struct qcom_icc_node **qnodes;
- struct qcom_icc_provider *qp;
- struct icc_node *node;
- size_t num_nodes, i;
- int ret;
-
- /* wait for the RPM proxy */
- if (!qcom_icc_rpm_smd_available())
- return -EPROBE_DEFER;
-
- desc = of_device_get_match_data(dev);
- if (!desc)
- return -EINVAL;
-
- qnodes = desc->nodes;
- num_nodes = desc->num_nodes;
-
- qp = devm_kzalloc(dev, sizeof(*qp), GFP_KERNEL);
- if (!qp)
- return -ENOMEM;
-
- data = devm_kzalloc(dev, struct_size(data, nodes, num_nodes),
- GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- qp->bus_clks = devm_kmemdup(dev, bus_clocks, sizeof(bus_clocks),
- GFP_KERNEL);
- if (!qp->bus_clks)
- return -ENOMEM;
-
- qp->num_clks = ARRAY_SIZE(bus_clocks);
- ret = devm_clk_bulk_get(dev, qp->num_clks, qp->bus_clks);
- if (ret)
- return ret;
-
- ret = clk_bulk_prepare_enable(qp->num_clks, qp->bus_clks);
- if (ret)
- return ret;
-
- provider = &qp->provider;
- INIT_LIST_HEAD(&provider->nodes);
- provider->dev = dev;
- provider->set = qcom_icc_set;
- provider->aggregate = icc_std_aggregate;
- provider->xlate = of_icc_xlate_onecell;
- provider->data = data;
-
- ret = icc_provider_add(provider);
- if (ret) {
- dev_err(dev, "error adding interconnect provider: %d\n", ret);
- clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
- return ret;
- }
-
- for (i = 0; i < num_nodes; i++) {
- size_t j;
-
- node = icc_node_create(qnodes[i]->id);
- if (IS_ERR(node)) {
- ret = PTR_ERR(node);
- goto err;
- }
-
- node->name = qnodes[i]->name;
- node->data = qnodes[i];
- icc_node_add(node, provider);
-
- dev_dbg(dev, "registered node %s\n", node->name);
-
- /* populate links */
- for (j = 0; j < qnodes[i]->num_links; j++)
- icc_link_create(node, qnodes[i]->links[j]);
-
- data->nodes[i] = node;
- }
- data->num_nodes = num_nodes;
-
- platform_set_drvdata(pdev, qp);
-
- return 0;
-err:
- icc_nodes_remove(provider);
- clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
- icc_provider_del(provider);
-
- return ret;
-}
-
-static int qnoc_remove(struct platform_device *pdev)
-{
- struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
-
- icc_nodes_remove(&qp->provider);
- clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
- return icc_provider_del(&qp->provider);
+ return qnoc_probe(pdev, sizeof(qcs404_bus_clocks),
+ ARRAY_SIZE(qcs404_bus_clocks), qcs404_bus_clocks);
}
static const struct of_device_id qcs404_noc_of_match[] = {
@@ -510,7 +284,7 @@ static const struct of_device_id qcs404_noc_of_match[] = {
MODULE_DEVICE_TABLE(of, qcs404_noc_of_match);
static struct platform_driver qcs404_noc_driver = {
- .probe = qnoc_probe,
+ .probe = qcs404_qnoc_probe,
.remove = qnoc_remove,
.driver = {
.name = "qnoc-qcs404",
diff --git a/drivers/interconnect/qcom/sdx55.c b/drivers/interconnect/qcom/sdx55.c
new file mode 100644
index 000000000000..a5a122ee3d21
--- /dev/null
+++ b/drivers/interconnect/qcom/sdx55.c
@@ -0,0 +1,356 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Qualcomm SDX55 interconnect driver
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ *
+ * Copyright (c) 2021, Linaro Ltd.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/interconnect.h>
+#include <linux/interconnect-provider.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <dt-bindings/interconnect/qcom,sdx55.h>
+
+#include "bcm-voter.h"
+#include "icc-rpmh.h"
+#include "sdx55.h"
+
+DEFINE_QNODE(ipa_core_master, SDX55_MASTER_IPA_CORE, 1, 8, SDX55_SLAVE_IPA_CORE);
+DEFINE_QNODE(llcc_mc, SDX55_MASTER_LLCC, 4, 4, SDX55_SLAVE_EBI_CH0);
+DEFINE_QNODE(acm_tcu, SDX55_MASTER_TCU_0, 1, 8, SDX55_SLAVE_LLCC, SDX55_SLAVE_MEM_NOC_SNOC, SDX55_SLAVE_MEM_NOC_PCIE_SNOC);
+DEFINE_QNODE(qnm_snoc_gc, SDX55_MASTER_SNOC_GC_MEM_NOC, 1, 8, SDX55_SLAVE_LLCC);
+DEFINE_QNODE(xm_apps_rdwr, SDX55_MASTER_AMPSS_M0, 1, 16, SDX55_SLAVE_LLCC, SDX55_SLAVE_MEM_NOC_SNOC, SDX55_SLAVE_MEM_NOC_PCIE_SNOC);
+DEFINE_QNODE(qhm_audio, SDX55_MASTER_AUDIO, 1, 4, SDX55_SLAVE_ANOC_SNOC);
+DEFINE_QNODE(qhm_blsp1, SDX55_MASTER_BLSP_1, 1, 4, SDX55_SLAVE_ANOC_SNOC);
+DEFINE_QNODE(qhm_qdss_bam, SDX55_MASTER_QDSS_BAM, 1, 4, SDX55_SLAVE_SNOC_CFG, SDX55_SLAVE_EMAC_CFG, SDX55_SLAVE_USB3, SDX55_SLAVE_TLMM, SDX55_SLAVE_SPMI_FETCHER, SDX55_SLAVE_QDSS_CFG, SDX55_SLAVE_PDM, SDX55_SLAVE_SNOC_MEM_NOC_GC, SDX55_SLAVE_TCSR, SDX55_SLAVE_CNOC_DDRSS, SDX55_SLAVE_SPMI_VGI_COEX, SDX55_SLAVE_QPIC, SDX55_SLAVE_OCIMEM, SDX55_SLAVE_IPA_CFG, SDX55_SLAVE_USB3_PHY_CFG, SDX55_SLAVE_AOP, SDX55_SLAVE_BLSP_1, SDX55_SLAVE_SDCC_1, SDX55_SLAVE_CNOC_MSS, SDX55_SLAVE_PCIE_PARF, SDX55_SLAVE_ECC_CFG, SDX55_SLAVE_AUDIO, SDX55_SLAVE_AOSS, SDX55_SLAVE_PRNG, SDX55_SLAVE_CRYPTO_0_CFG, SDX55_SLAVE_TCU, SDX55_SLAVE_CLK_CTL, SDX55_SLAVE_IMEM_CFG);
+DEFINE_QNODE(qhm_qpic, SDX55_MASTER_QPIC, 1, 4, SDX55_SLAVE_AOSS, SDX55_SLAVE_IPA_CFG, SDX55_SLAVE_ANOC_SNOC, SDX55_SLAVE_AOP, SDX55_SLAVE_AUDIO);
+DEFINE_QNODE(qhm_snoc_cfg, SDX55_MASTER_SNOC_CFG, 1, 4, SDX55_SLAVE_SERVICE_SNOC);
+DEFINE_QNODE(qhm_spmi_fetcher1, SDX55_MASTER_SPMI_FETCHER, 1, 4, SDX55_SLAVE_AOSS, SDX55_SLAVE_ANOC_SNOC, SDX55_SLAVE_AOP);
+DEFINE_QNODE(qnm_aggre_noc, SDX55_MASTER_ANOC_SNOC, 1, 8, SDX55_SLAVE_PCIE_0, SDX55_SLAVE_SNOC_CFG, SDX55_SLAVE_SDCC_1, SDX55_SLAVE_TLMM, SDX55_SLAVE_SPMI_FETCHER, SDX55_SLAVE_QDSS_CFG, SDX55_SLAVE_PDM, SDX55_SLAVE_SNOC_MEM_NOC_GC, SDX55_SLAVE_TCSR, SDX55_SLAVE_CNOC_DDRSS, SDX55_SLAVE_SPMI_VGI_COEX, SDX55_SLAVE_QDSS_STM, SDX55_SLAVE_QPIC, SDX55_SLAVE_OCIMEM, SDX55_SLAVE_IPA_CFG, SDX55_SLAVE_USB3_PHY_CFG, SDX55_SLAVE_AOP, SDX55_SLAVE_BLSP_1, SDX55_SLAVE_USB3, SDX55_SLAVE_CNOC_MSS, SDX55_SLAVE_PCIE_PARF, SDX55_SLAVE_ECC_CFG, SDX55_SLAVE_APPSS, SDX55_SLAVE_AUDIO, SDX55_SLAVE_AOSS, SDX55_SLAVE_PRNG, SDX55_SLAVE_CRYPTO_0_CFG, SDX55_SLAVE_TCU, SDX55_SLAVE_CLK_CTL, SDX55_SLAVE_IMEM_CFG);
+DEFINE_QNODE(qnm_ipa, SDX55_MASTER_IPA, 1, 8, SDX55_SLAVE_SNOC_CFG, SDX55_SLAVE_EMAC_CFG, SDX55_SLAVE_USB3, SDX55_SLAVE_AOSS, SDX55_SLAVE_SPMI_FETCHER, SDX55_SLAVE_QDSS_CFG, SDX55_SLAVE_PDM, SDX55_SLAVE_SNOC_MEM_NOC_GC, SDX55_SLAVE_TCSR, SDX55_SLAVE_CNOC_DDRSS, SDX55_SLAVE_QDSS_STM, SDX55_SLAVE_QPIC, SDX55_SLAVE_OCIMEM, SDX55_SLAVE_IPA_CFG, SDX55_SLAVE_USB3_PHY_CFG, SDX55_SLAVE_AOP, SDX55_SLAVE_BLSP_1, SDX55_SLAVE_SDCC_1, SDX55_SLAVE_CNOC_MSS, SDX55_SLAVE_PCIE_PARF, SDX55_SLAVE_ECC_CFG, SDX55_SLAVE_AUDIO, SDX55_SLAVE_TLMM, SDX55_SLAVE_PRNG, SDX55_SLAVE_CRYPTO_0_CFG, SDX55_SLAVE_CLK_CTL, SDX55_SLAVE_IMEM_CFG);
+DEFINE_QNODE(qnm_memnoc, SDX55_MASTER_MEM_NOC_SNOC, 1, 8, SDX55_SLAVE_SNOC_CFG, SDX55_SLAVE_EMAC_CFG, SDX55_SLAVE_USB3, SDX55_SLAVE_TLMM, SDX55_SLAVE_SPMI_FETCHER, SDX55_SLAVE_QDSS_CFG, SDX55_SLAVE_PDM, SDX55_SLAVE_TCSR, SDX55_SLAVE_CNOC_DDRSS, SDX55_SLAVE_SPMI_VGI_COEX, SDX55_SLAVE_QDSS_STM, SDX55_SLAVE_QPIC, SDX55_SLAVE_OCIMEM, SDX55_SLAVE_IPA_CFG, SDX55_SLAVE_USB3_PHY_CFG, SDX55_SLAVE_AOP, SDX55_SLAVE_BLSP_1, SDX55_SLAVE_SDCC_1, SDX55_SLAVE_CNOC_MSS, SDX55_SLAVE_PCIE_PARF, SDX55_SLAVE_ECC_CFG, SDX55_SLAVE_APPSS, SDX55_SLAVE_AUDIO, SDX55_SLAVE_AOSS, SDX55_SLAVE_PRNG, SDX55_SLAVE_CRYPTO_0_CFG, SDX55_SLAVE_TCU, SDX55_SLAVE_CLK_CTL, SDX55_SLAVE_IMEM_CFG);
+DEFINE_QNODE(qnm_memnoc_pcie, SDX55_MASTER_MEM_NOC_PCIE_SNOC, 1, 8, SDX55_SLAVE_PCIE_0);
+DEFINE_QNODE(qxm_crypto, SDX55_MASTER_CRYPTO_CORE_0, 1, 8, SDX55_SLAVE_AOSS, SDX55_SLAVE_ANOC_SNOC, SDX55_SLAVE_AOP);
+DEFINE_QNODE(xm_emac, SDX55_MASTER_EMAC, 1, 8, SDX55_SLAVE_ANOC_SNOC);
+DEFINE_QNODE(xm_ipa2pcie_slv, SDX55_MASTER_IPA_PCIE, 1, 8, SDX55_SLAVE_PCIE_0);
+DEFINE_QNODE(xm_pcie, SDX55_MASTER_PCIE, 1, 8, SDX55_SLAVE_ANOC_SNOC);
+DEFINE_QNODE(xm_qdss_etr, SDX55_MASTER_QDSS_ETR, 1, 8, SDX55_SLAVE_SNOC_CFG, SDX55_SLAVE_EMAC_CFG, SDX55_SLAVE_USB3, SDX55_SLAVE_AOSS, SDX55_SLAVE_SPMI_FETCHER, SDX55_SLAVE_QDSS_CFG, SDX55_SLAVE_PDM, SDX55_SLAVE_SNOC_MEM_NOC_GC, SDX55_SLAVE_TCSR, SDX55_SLAVE_CNOC_DDRSS, SDX55_SLAVE_SPMI_VGI_COEX, SDX55_SLAVE_QPIC, SDX55_SLAVE_OCIMEM, SDX55_SLAVE_IPA_CFG, SDX55_SLAVE_USB3_PHY_CFG, SDX55_SLAVE_AOP, SDX55_SLAVE_BLSP_1, SDX55_SLAVE_SDCC_1, SDX55_SLAVE_CNOC_MSS, SDX55_SLAVE_PCIE_PARF, SDX55_SLAVE_ECC_CFG, SDX55_SLAVE_AUDIO, SDX55_SLAVE_AOSS, SDX55_SLAVE_PRNG, SDX55_SLAVE_CRYPTO_0_CFG, SDX55_SLAVE_TCU, SDX55_SLAVE_CLK_CTL, SDX55_SLAVE_IMEM_CFG);
+DEFINE_QNODE(xm_sdc1, SDX55_MASTER_SDCC_1, 1, 8, SDX55_SLAVE_AOSS, SDX55_SLAVE_IPA_CFG, SDX55_SLAVE_ANOC_SNOC, SDX55_SLAVE_AOP, SDX55_SLAVE_AUDIO);
+DEFINE_QNODE(xm_usb3, SDX55_MASTER_USB3, 1, 8, SDX55_SLAVE_ANOC_SNOC);
+DEFINE_QNODE(ipa_core_slave, SDX55_SLAVE_IPA_CORE, 1, 8);
+DEFINE_QNODE(ebi, SDX55_SLAVE_EBI_CH0, 1, 4);
+DEFINE_QNODE(qns_llcc, SDX55_SLAVE_LLCC, 1, 16, SDX55_SLAVE_EBI_CH0);
+DEFINE_QNODE(qns_memnoc_snoc, SDX55_SLAVE_MEM_NOC_SNOC, 1, 8, SDX55_MASTER_MEM_NOC_SNOC);
+DEFINE_QNODE(qns_sys_pcie, SDX55_SLAVE_MEM_NOC_PCIE_SNOC, 1, 8, SDX55_MASTER_MEM_NOC_PCIE_SNOC);
+DEFINE_QNODE(qhs_aop, SDX55_SLAVE_AOP, 1, 4);
+DEFINE_QNODE(qhs_aoss, SDX55_SLAVE_AOSS, 1, 4);
+DEFINE_QNODE(qhs_apss, SDX55_SLAVE_APPSS, 1, 4);
+DEFINE_QNODE(qhs_audio, SDX55_SLAVE_AUDIO, 1, 4);
+DEFINE_QNODE(qhs_blsp1, SDX55_SLAVE_BLSP_1, 1, 4);
+DEFINE_QNODE(qhs_clk_ctl, SDX55_SLAVE_CLK_CTL, 1, 4);
+DEFINE_QNODE(qhs_crypto0_cfg, SDX55_SLAVE_CRYPTO_0_CFG, 1, 4);
+DEFINE_QNODE(qhs_ddrss_cfg, SDX55_SLAVE_CNOC_DDRSS, 1, 4);
+DEFINE_QNODE(qhs_ecc_cfg, SDX55_SLAVE_ECC_CFG, 1, 4);
+DEFINE_QNODE(qhs_emac_cfg, SDX55_SLAVE_EMAC_CFG, 1, 4);
+DEFINE_QNODE(qhs_imem_cfg, SDX55_SLAVE_IMEM_CFG, 1, 4);
+DEFINE_QNODE(qhs_ipa, SDX55_SLAVE_IPA_CFG, 1, 4);
+DEFINE_QNODE(qhs_mss_cfg, SDX55_SLAVE_CNOC_MSS, 1, 4);
+DEFINE_QNODE(qhs_pcie_parf, SDX55_SLAVE_PCIE_PARF, 1, 4);
+DEFINE_QNODE(qhs_pdm, SDX55_SLAVE_PDM, 1, 4);
+DEFINE_QNODE(qhs_prng, SDX55_SLAVE_PRNG, 1, 4);
+DEFINE_QNODE(qhs_qdss_cfg, SDX55_SLAVE_QDSS_CFG, 1, 4);
+DEFINE_QNODE(qhs_qpic, SDX55_SLAVE_QPIC, 1, 4);
+DEFINE_QNODE(qhs_sdc1, SDX55_SLAVE_SDCC_1, 1, 4);
+DEFINE_QNODE(qhs_snoc_cfg, SDX55_SLAVE_SNOC_CFG, 1, 4, SDX55_MASTER_SNOC_CFG);
+DEFINE_QNODE(qhs_spmi_fetcher, SDX55_SLAVE_SPMI_FETCHER, 1, 4);
+DEFINE_QNODE(qhs_spmi_vgi_coex, SDX55_SLAVE_SPMI_VGI_COEX, 1, 4);
+DEFINE_QNODE(qhs_tcsr, SDX55_SLAVE_TCSR, 1, 4);
+DEFINE_QNODE(qhs_tlmm, SDX55_SLAVE_TLMM, 1, 4);
+DEFINE_QNODE(qhs_usb3, SDX55_SLAVE_USB3, 1, 4);
+DEFINE_QNODE(qhs_usb3_phy, SDX55_SLAVE_USB3_PHY_CFG, 1, 4);
+DEFINE_QNODE(qns_aggre_noc, SDX55_SLAVE_ANOC_SNOC, 1, 8, SDX55_MASTER_ANOC_SNOC);
+DEFINE_QNODE(qns_snoc_memnoc, SDX55_SLAVE_SNOC_MEM_NOC_GC, 1, 8, SDX55_MASTER_SNOC_GC_MEM_NOC);
+DEFINE_QNODE(qxs_imem, SDX55_SLAVE_OCIMEM, 1, 8);
+DEFINE_QNODE(srvc_snoc, SDX55_SLAVE_SERVICE_SNOC, 1, 4);
+DEFINE_QNODE(xs_pcie, SDX55_SLAVE_PCIE_0, 1, 8);
+DEFINE_QNODE(xs_qdss_stm, SDX55_SLAVE_QDSS_STM, 1, 4);
+DEFINE_QNODE(xs_sys_tcu_cfg, SDX55_SLAVE_TCU, 1, 8);
+
+DEFINE_QBCM(bcm_mc0, "MC0", true, &ebi);
+DEFINE_QBCM(bcm_sh0, "SH0", true, &qns_llcc);
+DEFINE_QBCM(bcm_ce0, "CE0", false, &qxm_crypto);
+DEFINE_QBCM(bcm_ip0, "IP0", false, &ipa_core_slave);
+DEFINE_QBCM(bcm_pn0, "PN0", false, &qhm_snoc_cfg);
+DEFINE_QBCM(bcm_sh3, "SH3", false, &xm_apps_rdwr);
+DEFINE_QBCM(bcm_sh4, "SH4", false, &qns_memnoc_snoc, &qns_sys_pcie);
+DEFINE_QBCM(bcm_sn0, "SN0", true, &qns_snoc_memnoc);
+DEFINE_QBCM(bcm_sn1, "SN1", false, &qxs_imem);
+DEFINE_QBCM(bcm_pn1, "PN1", false, &xm_sdc1);
+DEFINE_QBCM(bcm_pn2, "PN2", false, &qhm_audio, &qhm_spmi_fetcher1);
+DEFINE_QBCM(bcm_sn3, "SN3", false, &xs_qdss_stm);
+DEFINE_QBCM(bcm_pn3, "PN3", false, &qhm_blsp1, &qhm_qpic);
+DEFINE_QBCM(bcm_sn4, "SN4", false, &xs_sys_tcu_cfg);
+DEFINE_QBCM(bcm_pn5, "PN5", false, &qxm_crypto);
+DEFINE_QBCM(bcm_sn6, "SN6", false, &xs_pcie);
+DEFINE_QBCM(bcm_sn7, "SN7", false, &qnm_aggre_noc, &xm_emac, &xm_emac, &xm_usb3,
+ &qns_aggre_noc);
+DEFINE_QBCM(bcm_sn8, "SN8", false, &qhm_qdss_bam, &xm_qdss_etr);
+DEFINE_QBCM(bcm_sn9, "SN9", false, &qnm_memnoc);
+DEFINE_QBCM(bcm_sn10, "SN10", false, &qnm_memnoc_pcie);
+DEFINE_QBCM(bcm_sn11, "SN11", false, &qnm_ipa, &xm_ipa2pcie_slv);
+
+static struct qcom_icc_bcm *mc_virt_bcms[] = {
+ &bcm_mc0,
+};
+
+static struct qcom_icc_node *mc_virt_nodes[] = {
+ [MASTER_LLCC] = &llcc_mc,
+ [SLAVE_EBI_CH0] = &ebi,
+};
+
+static const struct qcom_icc_desc sdx55_mc_virt = {
+ .nodes = mc_virt_nodes,
+ .num_nodes = ARRAY_SIZE(mc_virt_nodes),
+ .bcms = mc_virt_bcms,
+ .num_bcms = ARRAY_SIZE(mc_virt_bcms),
+};
+
+static struct qcom_icc_bcm *mem_noc_bcms[] = {
+ &bcm_sh0,
+ &bcm_sh3,
+ &bcm_sh4,
+};
+
+static struct qcom_icc_node *mem_noc_nodes[] = {
+ [MASTER_TCU_0] = &acm_tcu,
+ [MASTER_SNOC_GC_MEM_NOC] = &qnm_snoc_gc,
+ [MASTER_AMPSS_M0] = &xm_apps_rdwr,
+ [SLAVE_LLCC] = &qns_llcc,
+ [SLAVE_MEM_NOC_SNOC] = &qns_memnoc_snoc,
+ [SLAVE_MEM_NOC_PCIE_SNOC] = &qns_sys_pcie,
+};
+
+static const struct qcom_icc_desc sdx55_mem_noc = {
+ .nodes = mem_noc_nodes,
+ .num_nodes = ARRAY_SIZE(mem_noc_nodes),
+ .bcms = mem_noc_bcms,
+ .num_bcms = ARRAY_SIZE(mem_noc_bcms),
+};
+
+static struct qcom_icc_bcm *system_noc_bcms[] = {
+ &bcm_ce0,
+ &bcm_pn0,
+ &bcm_pn1,
+ &bcm_pn2,
+ &bcm_pn3,
+ &bcm_pn5,
+ &bcm_sn0,
+ &bcm_sn1,
+ &bcm_sn3,
+ &bcm_sn4,
+ &bcm_sn6,
+ &bcm_sn7,
+ &bcm_sn8,
+ &bcm_sn9,
+ &bcm_sn10,
+ &bcm_sn11,
+};
+
+static struct qcom_icc_node *system_noc_nodes[] = {
+ [MASTER_AUDIO] = &qhm_audio,
+ [MASTER_BLSP_1] = &qhm_blsp1,
+ [MASTER_QDSS_BAM] = &qhm_qdss_bam,
+ [MASTER_QPIC] = &qhm_qpic,
+ [MASTER_SNOC_CFG] = &qhm_snoc_cfg,
+ [MASTER_SPMI_FETCHER] = &qhm_spmi_fetcher1,
+ [MASTER_ANOC_SNOC] = &qnm_aggre_noc,
+ [MASTER_IPA] = &qnm_ipa,
+ [MASTER_MEM_NOC_SNOC] = &qnm_memnoc,
+ [MASTER_MEM_NOC_PCIE_SNOC] = &qnm_memnoc_pcie,
+ [MASTER_CRYPTO_CORE_0] = &qxm_crypto,
+ [MASTER_EMAC] = &xm_emac,
+ [MASTER_IPA_PCIE] = &xm_ipa2pcie_slv,
+ [MASTER_PCIE] = &xm_pcie,
+ [MASTER_QDSS_ETR] = &xm_qdss_etr,
+ [MASTER_SDCC_1] = &xm_sdc1,
+ [MASTER_USB3] = &xm_usb3,
+ [SLAVE_AOP] = &qhs_aop,
+ [SLAVE_AOSS] = &qhs_aoss,
+ [SLAVE_APPSS] = &qhs_apss,
+ [SLAVE_AUDIO] = &qhs_audio,
+ [SLAVE_BLSP_1] = &qhs_blsp1,
+ [SLAVE_CLK_CTL] = &qhs_clk_ctl,
+ [SLAVE_CRYPTO_0_CFG] = &qhs_crypto0_cfg,
+ [SLAVE_CNOC_DDRSS] = &qhs_ddrss_cfg,
+ [SLAVE_ECC_CFG] = &qhs_ecc_cfg,
+ [SLAVE_EMAC_CFG] = &qhs_emac_cfg,
+ [SLAVE_IMEM_CFG] = &qhs_imem_cfg,
+ [SLAVE_IPA_CFG] = &qhs_ipa,
+ [SLAVE_CNOC_MSS] = &qhs_mss_cfg,
+ [SLAVE_PCIE_PARF] = &qhs_pcie_parf,
+ [SLAVE_PDM] = &qhs_pdm,
+ [SLAVE_PRNG] = &qhs_prng,
+ [SLAVE_QDSS_CFG] = &qhs_qdss_cfg,
+ [SLAVE_QPIC] = &qhs_qpic,
+ [SLAVE_SDCC_1] = &qhs_sdc1,
+ [SLAVE_SNOC_CFG] = &qhs_snoc_cfg,
+ [SLAVE_SPMI_FETCHER] = &qhs_spmi_fetcher,
+ [SLAVE_SPMI_VGI_COEX] = &qhs_spmi_vgi_coex,
+ [SLAVE_TCSR] = &qhs_tcsr,
+ [SLAVE_TLMM] = &qhs_tlmm,
+ [SLAVE_USB3] = &qhs_usb3,
+ [SLAVE_USB3_PHY_CFG] = &qhs_usb3_phy,
+ [SLAVE_ANOC_SNOC] = &qns_aggre_noc,
+ [SLAVE_SNOC_MEM_NOC_GC] = &qns_snoc_memnoc,
+ [SLAVE_OCIMEM] = &qxs_imem,
+ [SLAVE_SERVICE_SNOC] = &srvc_snoc,
+ [SLAVE_PCIE_0] = &xs_pcie,
+ [SLAVE_QDSS_STM] = &xs_qdss_stm,
+ [SLAVE_TCU] = &xs_sys_tcu_cfg,
+};
+
+static const struct qcom_icc_desc sdx55_system_noc = {
+ .nodes = system_noc_nodes,
+ .num_nodes = ARRAY_SIZE(system_noc_nodes),
+ .bcms = system_noc_bcms,
+ .num_bcms = ARRAY_SIZE(system_noc_bcms),
+};
+
+static struct qcom_icc_bcm *ipa_virt_bcms[] = {
+ &bcm_ip0,
+};
+
+static struct qcom_icc_node *ipa_virt_nodes[] = {
+ [MASTER_IPA_CORE] = &ipa_core_master,
+ [SLAVE_IPA_CORE] = &ipa_core_slave,
+};
+
+static const struct qcom_icc_desc sdx55_ipa_virt = {
+ .nodes = ipa_virt_nodes,
+ .num_nodes = ARRAY_SIZE(ipa_virt_nodes),
+ .bcms = ipa_virt_bcms,
+ .num_bcms = ARRAY_SIZE(ipa_virt_bcms),
+};
+
+static int qnoc_probe(struct platform_device *pdev)
+{
+ const struct qcom_icc_desc *desc;
+ struct icc_onecell_data *data;
+ struct icc_provider *provider;
+ struct qcom_icc_node **qnodes;
+ struct qcom_icc_provider *qp;
+ struct icc_node *node;
+ size_t num_nodes, i;
+ int ret;
+
+ desc = device_get_match_data(&pdev->dev);
+ if (!desc)
+ return -EINVAL;
+
+ qnodes = desc->nodes;
+ num_nodes = desc->num_nodes;
+
+ qp = devm_kzalloc(&pdev->dev, sizeof(*qp), GFP_KERNEL);
+ if (!qp)
+ return -ENOMEM;
+
+ data = devm_kcalloc(&pdev->dev, num_nodes, sizeof(*node), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ provider = &qp->provider;
+ provider->dev = &pdev->dev;
+ provider->set = qcom_icc_set;
+ provider->pre_aggregate = qcom_icc_pre_aggregate;
+ provider->aggregate = qcom_icc_aggregate;
+ provider->xlate = of_icc_xlate_onecell;
+ INIT_LIST_HEAD(&provider->nodes);
+ provider->data = data;
+
+ qp->dev = &pdev->dev;
+ qp->bcms = desc->bcms;
+ qp->num_bcms = desc->num_bcms;
+
+ qp->voter = of_bcm_voter_get(qp->dev, NULL);
+ if (IS_ERR(qp->voter))
+ return PTR_ERR(qp->voter);
+
+ ret = icc_provider_add(provider);
+ if (ret) {
+ dev_err(&pdev->dev, "error adding interconnect provider\n");
+ return ret;
+ }
+
+ for (i = 0; i < qp->num_bcms; i++)
+ qcom_icc_bcm_init(qp->bcms[i], &pdev->dev);
+
+ for (i = 0; i < num_nodes; i++) {
+ size_t j;
+
+ if (!qnodes[i])
+ continue;
+
+ node = icc_node_create(qnodes[i]->id);
+ if (IS_ERR(node)) {
+ ret = PTR_ERR(node);
+ goto err;
+ }
+
+ node->name = qnodes[i]->name;
+ node->data = qnodes[i];
+ icc_node_add(node, provider);
+
+ for (j = 0; j < qnodes[i]->num_links; j++)
+ icc_link_create(node, qnodes[i]->links[j]);
+
+ data->nodes[i] = node;
+ }
+ data->num_nodes = num_nodes;
+
+ platform_set_drvdata(pdev, qp);
+
+ return 0;
+err:
+ icc_nodes_remove(provider);
+ icc_provider_del(provider);
+ return ret;
+}
+
+static int qnoc_remove(struct platform_device *pdev)
+{
+ struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
+
+ icc_nodes_remove(&qp->provider);
+ return icc_provider_del(&qp->provider);
+}
+
+static const struct of_device_id qnoc_of_match[] = {
+ { .compatible = "qcom,sdx55-mc-virt",
+ .data = &sdx55_mc_virt},
+ { .compatible = "qcom,sdx55-mem-noc",
+ .data = &sdx55_mem_noc},
+ { .compatible = "qcom,sdx55-system-noc",
+ .data = &sdx55_system_noc},
+ { .compatible = "qcom,sdx55-ipa-virt",
+ .data = &sdx55_ipa_virt},
+ { }
+};
+MODULE_DEVICE_TABLE(of, qnoc_of_match);
+
+static struct platform_driver qnoc_driver = {
+ .probe = qnoc_probe,
+ .remove = qnoc_remove,
+ .driver = {
+ .name = "qnoc-sdx55",
+ .of_match_table = qnoc_of_match,
+ .sync_state = icc_sync_state,
+ },
+};
+module_platform_driver(qnoc_driver);
+
+MODULE_DESCRIPTION("Qualcomm SDX55 NoC driver");
+MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/interconnect/qcom/sdx55.h b/drivers/interconnect/qcom/sdx55.h
new file mode 100644
index 000000000000..deff8afe0631
--- /dev/null
+++ b/drivers/interconnect/qcom/sdx55.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2021, Linaro Ltd.
+ */
+
+#ifndef __DRIVERS_INTERCONNECT_QCOM_SDX55_H
+#define __DRIVERS_INTERCONNECT_QCOM_SDX55_H
+
+#define SDX55_MASTER_IPA_CORE 0
+#define SDX55_MASTER_LLCC 1
+#define SDX55_MASTER_TCU_0 2
+#define SDX55_MASTER_SNOC_GC_MEM_NOC 3
+#define SDX55_MASTER_AMPSS_M0 4
+#define SDX55_MASTER_AUDIO 5
+#define SDX55_MASTER_BLSP_1 6
+#define SDX55_MASTER_QDSS_BAM 7
+#define SDX55_MASTER_QPIC 8
+#define SDX55_MASTER_SNOC_CFG 9
+#define SDX55_MASTER_SPMI_FETCHER 10
+#define SDX55_MASTER_ANOC_SNOC 11
+#define SDX55_MASTER_IPA 12
+#define SDX55_MASTER_MEM_NOC_SNOC 13
+#define SDX55_MASTER_MEM_NOC_PCIE_SNOC 14
+#define SDX55_MASTER_CRYPTO_CORE_0 15
+#define SDX55_MASTER_EMAC 16
+#define SDX55_MASTER_IPA_PCIE 17
+#define SDX55_MASTER_PCIE 18
+#define SDX55_MASTER_QDSS_ETR 19
+#define SDX55_MASTER_SDCC_1 20
+#define SDX55_MASTER_USB3 21
+#define SDX55_SLAVE_IPA_CORE 22
+#define SDX55_SLAVE_EBI_CH0 23
+#define SDX55_SLAVE_LLCC 24
+#define SDX55_SLAVE_MEM_NOC_SNOC 25
+#define SDX55_SLAVE_MEM_NOC_PCIE_SNOC 26
+#define SDX55_SLAVE_ANOC_SNOC 27
+#define SDX55_SLAVE_SNOC_CFG 28
+#define SDX55_SLAVE_EMAC_CFG 29
+#define SDX55_SLAVE_USB3 30
+#define SDX55_SLAVE_TLMM 31
+#define SDX55_SLAVE_SPMI_FETCHER 32
+#define SDX55_SLAVE_QDSS_CFG 33
+#define SDX55_SLAVE_PDM 34
+#define SDX55_SLAVE_SNOC_MEM_NOC_GC 35
+#define SDX55_SLAVE_TCSR 36
+#define SDX55_SLAVE_CNOC_DDRSS 37
+#define SDX55_SLAVE_SPMI_VGI_COEX 38
+#define SDX55_SLAVE_QPIC 39
+#define SDX55_SLAVE_OCIMEM 40
+#define SDX55_SLAVE_IPA_CFG 41
+#define SDX55_SLAVE_USB3_PHY_CFG 42
+#define SDX55_SLAVE_AOP 43
+#define SDX55_SLAVE_BLSP_1 44
+#define SDX55_SLAVE_SDCC_1 45
+#define SDX55_SLAVE_CNOC_MSS 46
+#define SDX55_SLAVE_PCIE_PARF 47
+#define SDX55_SLAVE_ECC_CFG 48
+#define SDX55_SLAVE_AUDIO 49
+#define SDX55_SLAVE_AOSS 51
+#define SDX55_SLAVE_PRNG 52
+#define SDX55_SLAVE_CRYPTO_0_CFG 53
+#define SDX55_SLAVE_TCU 54
+#define SDX55_SLAVE_CLK_CTL 55
+#define SDX55_SLAVE_IMEM_CFG 56
+#define SDX55_SLAVE_SERVICE_SNOC 57
+#define SDX55_SLAVE_PCIE_0 58
+#define SDX55_SLAVE_QDSS_STM 59
+#define SDX55_SLAVE_APPSS 60
+
+#endif
diff --git a/drivers/iommu/amd/Kconfig b/drivers/iommu/amd/Kconfig
index 626b97d0dd21..a3cbafb603f5 100644
--- a/drivers/iommu/amd/Kconfig
+++ b/drivers/iommu/amd/Kconfig
@@ -10,6 +10,7 @@ config AMD_IOMMU
select IOMMU_API
select IOMMU_IOVA
select IOMMU_DMA
+ select IOMMU_IO_PGTABLE
depends on X86_64 && PCI && ACPI && HAVE_CMPXCHG_DOUBLE
help
With this option you can enable support for AMD IOMMU hardware in
diff --git a/drivers/iommu/amd/Makefile b/drivers/iommu/amd/Makefile
index dc5a2fa4fd37..a935f8f4b974 100644
--- a/drivers/iommu/amd/Makefile
+++ b/drivers/iommu/amd/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_AMD_IOMMU) += iommu.o init.o quirks.o
+obj-$(CONFIG_AMD_IOMMU) += iommu.o init.o quirks.o io_pgtable.o
obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += debugfs.o
obj-$(CONFIG_AMD_IOMMU_V2) += iommu_v2.o
diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
index 6b8cbdf71714..026ce7f8d993 100644
--- a/drivers/iommu/amd/amd_iommu.h
+++ b/drivers/iommu/amd/amd_iommu.h
@@ -36,6 +36,7 @@ extern void amd_iommu_disable(void);
extern int amd_iommu_reenable(int);
extern int amd_iommu_enable_faulting(void);
extern int amd_iommu_guest_ir;
+extern enum io_pgtable_fmt amd_iommu_pgtable;
/* IOMMUv2 specific functions */
struct iommu_domain;
@@ -56,6 +57,10 @@ extern void amd_iommu_domain_direct_map(struct iommu_domain *dom);
extern int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids);
extern int amd_iommu_flush_page(struct iommu_domain *dom, u32 pasid,
u64 address);
+extern void amd_iommu_update_and_flush_device_table(struct protection_domain *domain);
+extern void amd_iommu_domain_update(struct protection_domain *domain);
+extern void amd_iommu_domain_flush_complete(struct protection_domain *domain);
+extern void amd_iommu_domain_flush_tlb_pde(struct protection_domain *domain);
extern int amd_iommu_flush_tlb(struct iommu_domain *dom, u32 pasid);
extern int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, u32 pasid,
unsigned long cr3);
@@ -84,12 +89,9 @@ static inline bool is_rd890_iommu(struct pci_dev *pdev)
(pdev->device == PCI_DEVICE_ID_RD890_IOMMU);
}
-static inline bool iommu_feature(struct amd_iommu *iommu, u64 f)
+static inline bool iommu_feature(struct amd_iommu *iommu, u64 mask)
{
- if (!(iommu->cap & (1 << IOMMU_CAP_EFR)))
- return false;
-
- return !!(iommu->features & f);
+ return !!(iommu->features & mask);
}
static inline u64 iommu_virt_to_phys(void *vaddr)
@@ -102,6 +104,21 @@ static inline void *iommu_phys_to_virt(unsigned long paddr)
return phys_to_virt(__sme_clr(paddr));
}
+static inline
+void amd_iommu_domain_set_pt_root(struct protection_domain *domain, u64 root)
+{
+ atomic64_set(&domain->iop.pt_root, root);
+ domain->iop.root = (u64 *)(root & PAGE_MASK);
+ domain->iop.mode = root & 7; /* lowest 3 bits encode pgtable mode */
+}
+
+static inline
+void amd_iommu_domain_clr_pt_root(struct protection_domain *domain)
+{
+ amd_iommu_domain_set_pt_root(domain, 0);
+}
+
+
extern bool translation_pre_enabled(struct amd_iommu *iommu);
extern bool amd_iommu_is_attach_deferred(struct iommu_domain *domain,
struct device *dev);
@@ -114,4 +131,6 @@ void amd_iommu_apply_ivrs_quirks(void);
static inline void amd_iommu_apply_ivrs_quirks(void) { }
#endif
+extern void amd_iommu_domain_set_pgtable(struct protection_domain *domain,
+ u64 *root, int mode);
#endif
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index 553587827771..6937e3674a16 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -15,6 +15,7 @@
#include <linux/spinlock.h>
#include <linux/pci.h>
#include <linux/irqreturn.h>
+#include <linux/io-pgtable.h>
/*
* Maximum number of IOMMUs supported
@@ -252,6 +253,19 @@
#define GA_GUEST_NR 0x1
+#define IOMMU_IN_ADDR_BIT_SIZE 52
+#define IOMMU_OUT_ADDR_BIT_SIZE 52
+
+/*
+ * This bitmap is used to advertise the page sizes our hardware support
+ * to the IOMMU core, which will then use this information to split
+ * physically contiguous memory regions it is mapping into page sizes
+ * that we support.
+ *
+ * 512GB Pages are not supported due to a hardware bug
+ */
+#define AMD_IOMMU_PGSIZES ((~0xFFFUL) & ~(2ULL << 38))
+
/* Bit value definition for dte irq remapping fields*/
#define DTE_IRQ_PHYS_ADDR_MASK (((1ULL << 45)-1) << 6)
#define DTE_IRQ_REMAP_INTCTL_MASK (0x3ULL << 60)
@@ -387,6 +401,10 @@
#define IOMMU_CAP_NPCACHE 26
#define IOMMU_CAP_EFR 27
+/* IOMMU IVINFO */
+#define IOMMU_IVINFO_OFFSET 36
+#define IOMMU_IVINFO_EFRSUP BIT(0)
+
/* IOMMU Feature Reporting Field (for IVHD type 10h */
#define IOMMU_FEAT_GASUP_SHIFT 6
@@ -466,6 +484,27 @@ struct amd_irte_ops;
#define AMD_IOMMU_FLAG_TRANS_PRE_ENABLED (1 << 0)
+#define io_pgtable_to_data(x) \
+ container_of((x), struct amd_io_pgtable, iop)
+
+#define io_pgtable_ops_to_data(x) \
+ io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
+
+#define io_pgtable_ops_to_domain(x) \
+ container_of(io_pgtable_ops_to_data(x), \
+ struct protection_domain, iop)
+
+#define io_pgtable_cfg_to_data(x) \
+ container_of((x), struct amd_io_pgtable, pgtbl_cfg)
+
+struct amd_io_pgtable {
+ struct io_pgtable_cfg pgtbl_cfg;
+ struct io_pgtable iop;
+ int mode;
+ u64 *root;
+ atomic64_t pt_root; /* pgtable root and pgtable mode */
+};
+
/*
* This structure contains generic data for IOMMU protection domains
* independent of their use.
@@ -474,9 +513,9 @@ struct protection_domain {
struct list_head dev_list; /* List of all devices in this domain */
struct iommu_domain domain; /* generic domain handle used by
iommu core code */
+ struct amd_io_pgtable iop;
spinlock_t lock; /* mostly used to lock the page table*/
u16 id; /* the domain id written to the device table */
- atomic64_t pt_root; /* pgtable root and pgtable mode */
int glx; /* Number of levels for GCR3 table */
u64 *gcr3_tbl; /* Guest CR3 table */
unsigned long flags; /* flags to find out type of domain */
@@ -484,12 +523,6 @@ struct protection_domain {
unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */
};
-/* For decocded pt_root */
-struct domain_pgtable {
- int mode;
- u64 *root;
-};
-
/*
* Structure where we save information about one hardware AMD IOMMU in the
* system.
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index 6a1f7048dacc..9126efcbaf2c 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -12,6 +12,7 @@
#include <linux/acpi.h>
#include <linux/list.h>
#include <linux/bitmap.h>
+#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/syscore_ops.h>
#include <linux/interrupt.h>
@@ -147,6 +148,8 @@ struct ivmd_header {
bool amd_iommu_dump;
bool amd_iommu_irq_remap __read_mostly;
+enum io_pgtable_fmt amd_iommu_pgtable = AMD_IOMMU_V1;
+
int amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
static int amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE;
@@ -254,9 +257,13 @@ static enum iommu_init_state init_state = IOMMU_START_STATE;
static int amd_iommu_enable_interrupts(void);
static int __init iommu_go_to_state(enum iommu_init_state state);
static void init_device_table_dma(void);
+static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
+ u8 fxn, u64 *value, bool is_write);
static bool amd_iommu_pre_enabled = true;
+static u32 amd_iommu_ivinfo __initdata;
+
bool translation_pre_enabled(struct amd_iommu *iommu)
{
return (iommu->flags & AMD_IOMMU_FLAG_TRANS_PRE_ENABLED);
@@ -296,6 +303,18 @@ int amd_iommu_get_num_iommus(void)
return amd_iommus_present;
}
+/*
+ * For IVHD type 0x11/0x40, EFR is also available via IVHD.
+ * Default to IVHD EFR since it is available sooner
+ * (i.e. before PCI init).
+ */
+static void __init early_iommu_features_init(struct amd_iommu *iommu,
+ struct ivhd_header *h)
+{
+ if (amd_iommu_ivinfo & IOMMU_IVINFO_EFRSUP)
+ iommu->features = h->efr_reg;
+}
+
/* Access to l1 and l2 indexed register spaces */
static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address)
@@ -1577,6 +1596,9 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
if (h->efr_reg & BIT(IOMMU_EFR_XTSUP_SHIFT))
amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE;
+
+ early_iommu_features_init(iommu, h);
+
break;
default:
return -EINVAL;
@@ -1695,13 +1717,11 @@ static int __init init_iommu_all(struct acpi_table_header *table)
return 0;
}
-static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
- u8 fxn, u64 *value, bool is_write);
-
-static void init_iommu_perf_ctr(struct amd_iommu *iommu)
+static void __init init_iommu_perf_ctr(struct amd_iommu *iommu)
{
+ int retry;
struct pci_dev *pdev = iommu->dev;
- u64 val = 0xabcd, val2 = 0, save_reg = 0;
+ u64 val = 0xabcd, val2 = 0, save_reg, save_src;
if (!iommu_feature(iommu, FEATURE_PC))
return;
@@ -1709,17 +1729,39 @@ static void init_iommu_perf_ctr(struct amd_iommu *iommu)
amd_iommu_pc_present = true;
/* save the value to restore, if writable */
- if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, false))
+ if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, false) ||
+ iommu_pc_get_set_reg(iommu, 0, 0, 8, &save_src, false))
goto pc_false;
- /* Check if the performance counters can be written to */
- if ((iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true)) ||
- (iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false)) ||
- (val != val2))
+ /*
+ * Disable power gating by programing the performance counter
+ * source to 20 (i.e. counts the reads and writes from/to IOMMU
+ * Reserved Register [MMIO Offset 1FF8h] that are ignored.),
+ * which never get incremented during this init phase.
+ * (Note: The event is also deprecated.)
+ */
+ val = 20;
+ if (iommu_pc_get_set_reg(iommu, 0, 0, 8, &val, true))
goto pc_false;
+ /* Check if the performance counters can be written to */
+ val = 0xabcd;
+ for (retry = 5; retry; retry--) {
+ if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true) ||
+ iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false) ||
+ val2)
+ break;
+
+ /* Wait about 20 msec for power gating to disable and retry. */
+ msleep(20);
+ }
+
/* restore */
- if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, true))
+ if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, true) ||
+ iommu_pc_get_set_reg(iommu, 0, 0, 8, &save_src, true))
+ goto pc_false;
+
+ if (val != val2)
goto pc_false;
pci_info(pdev, "IOMMU performance counters supported\n");
@@ -1770,6 +1812,35 @@ static const struct attribute_group *amd_iommu_groups[] = {
NULL,
};
+/*
+ * Note: IVHD 0x11 and 0x40 also contains exact copy
+ * of the IOMMU Extended Feature Register [MMIO Offset 0030h].
+ * Default to EFR in IVHD since it is available sooner (i.e. before PCI init).
+ */
+static void __init late_iommu_features_init(struct amd_iommu *iommu)
+{
+ u64 features;
+
+ if (!(iommu->cap & (1 << IOMMU_CAP_EFR)))
+ return;
+
+ /* read extended feature bits */
+ features = readq(iommu->mmio_base + MMIO_EXT_FEATURES);
+
+ if (!iommu->features) {
+ iommu->features = features;
+ return;
+ }
+
+ /*
+ * Sanity check and warn if EFR values from
+ * IVHD and MMIO conflict.
+ */
+ if (features != iommu->features)
+ pr_warn(FW_WARN "EFR mismatch. Use IVHD EFR (%#llx : %#llx\n).",
+ features, iommu->features);
+}
+
static int __init iommu_init_pci(struct amd_iommu *iommu)
{
int cap_ptr = iommu->cap_ptr;
@@ -1789,8 +1860,7 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
amd_iommu_iotlb_sup = false;
- /* read extended feature bits */
- iommu->features = readq(iommu->mmio_base + MMIO_EXT_FEATURES);
+ late_iommu_features_init(iommu);
if (iommu_feature(iommu, FEATURE_GT)) {
int glxval;
@@ -1883,7 +1953,7 @@ static void print_iommu_info(void)
struct pci_dev *pdev = iommu->dev;
int i;
- pci_info(pdev, "Found IOMMU cap 0x%hx\n", iommu->cap_ptr);
+ pci_info(pdev, "Found IOMMU cap 0x%x\n", iommu->cap_ptr);
if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
pci_info(pdev, "Extended features (%#llx):",
@@ -1911,7 +1981,7 @@ static void print_iommu_info(void)
static int __init amd_iommu_init_pci(void)
{
struct amd_iommu *iommu;
- int ret = 0;
+ int ret;
for_each_iommu(iommu) {
ret = iommu_init_pci(iommu);
@@ -2607,6 +2677,11 @@ static void __init free_dma_resources(void)
free_unity_maps();
}
+static void __init ivinfo_init(void *ivrs)
+{
+ amd_iommu_ivinfo = *((u32 *)(ivrs + IOMMU_IVINFO_OFFSET));
+}
+
/*
* This is the hardware init function for AMD IOMMU in the system.
* This function is called either from amd_iommu_init or from the interrupt
@@ -2637,8 +2712,8 @@ static void __init free_dma_resources(void)
static int __init early_amd_iommu_init(void)
{
struct acpi_table_header *ivrs_base;
+ int i, remap_cache_sz, ret;
acpi_status status;
- int i, remap_cache_sz, ret = 0;
u32 pci_id;
if (!amd_iommu_detected)
@@ -2661,6 +2736,8 @@ static int __init early_amd_iommu_init(void)
if (ret)
goto out;
+ ivinfo_init(ivrs_base);
+
amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base);
DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type);
@@ -2780,7 +2857,6 @@ static int __init early_amd_iommu_init(void)
out:
/* Don't leak any ACPI memory */
acpi_put_table(ivrs_base);
- ivrs_base = NULL;
return ret;
}
diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c
new file mode 100644
index 000000000000..bb0ee5c9fde7
--- /dev/null
+++ b/drivers/iommu/amd/io_pgtable.c
@@ -0,0 +1,560 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * CPU-agnostic AMD IO page table allocator.
+ *
+ * Copyright (C) 2020 Advanced Micro Devices, Inc.
+ * Author: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
+ */
+
+#define pr_fmt(fmt) "AMD-Vi: " fmt
+#define dev_fmt(fmt) pr_fmt(fmt)
+
+#include <linux/atomic.h>
+#include <linux/bitops.h>
+#include <linux/io-pgtable.h>
+#include <linux/kernel.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/barrier.h>
+
+#include "amd_iommu_types.h"
+#include "amd_iommu.h"
+
+static void v1_tlb_flush_all(void *cookie)
+{
+}
+
+static void v1_tlb_flush_walk(unsigned long iova, size_t size,
+ size_t granule, void *cookie)
+{
+}
+
+static void v1_tlb_add_page(struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t granule,
+ void *cookie)
+{
+}
+
+static const struct iommu_flush_ops v1_flush_ops = {
+ .tlb_flush_all = v1_tlb_flush_all,
+ .tlb_flush_walk = v1_tlb_flush_walk,
+ .tlb_add_page = v1_tlb_add_page,
+};
+
+/*
+ * Helper function to get the first pte of a large mapping
+ */
+static u64 *first_pte_l7(u64 *pte, unsigned long *page_size,
+ unsigned long *count)
+{
+ unsigned long pte_mask, pg_size, cnt;
+ u64 *fpte;
+
+ pg_size = PTE_PAGE_SIZE(*pte);
+ cnt = PAGE_SIZE_PTE_COUNT(pg_size);
+ pte_mask = ~((cnt << 3) - 1);
+ fpte = (u64 *)(((unsigned long)pte) & pte_mask);
+
+ if (page_size)
+ *page_size = pg_size;
+
+ if (count)
+ *count = cnt;
+
+ return fpte;
+}
+
+/****************************************************************************
+ *
+ * The functions below are used the create the page table mappings for
+ * unity mapped regions.
+ *
+ ****************************************************************************/
+
+static void free_page_list(struct page *freelist)
+{
+ while (freelist != NULL) {
+ unsigned long p = (unsigned long)page_address(freelist);
+
+ freelist = freelist->freelist;
+ free_page(p);
+ }
+}
+
+static struct page *free_pt_page(unsigned long pt, struct page *freelist)
+{
+ struct page *p = virt_to_page((void *)pt);
+
+ p->freelist = freelist;
+
+ return p;
+}
+
+#define DEFINE_FREE_PT_FN(LVL, FN) \
+static struct page *free_pt_##LVL (unsigned long __pt, struct page *freelist) \
+{ \
+ unsigned long p; \
+ u64 *pt; \
+ int i; \
+ \
+ pt = (u64 *)__pt; \
+ \
+ for (i = 0; i < 512; ++i) { \
+ /* PTE present? */ \
+ if (!IOMMU_PTE_PRESENT(pt[i])) \
+ continue; \
+ \
+ /* Large PTE? */ \
+ if (PM_PTE_LEVEL(pt[i]) == 0 || \
+ PM_PTE_LEVEL(pt[i]) == 7) \
+ continue; \
+ \
+ p = (unsigned long)IOMMU_PTE_PAGE(pt[i]); \
+ freelist = FN(p, freelist); \
+ } \
+ \
+ return free_pt_page((unsigned long)pt, freelist); \
+}
+
+DEFINE_FREE_PT_FN(l2, free_pt_page)
+DEFINE_FREE_PT_FN(l3, free_pt_l2)
+DEFINE_FREE_PT_FN(l4, free_pt_l3)
+DEFINE_FREE_PT_FN(l5, free_pt_l4)
+DEFINE_FREE_PT_FN(l6, free_pt_l5)
+
+static struct page *free_sub_pt(unsigned long root, int mode,
+ struct page *freelist)
+{
+ switch (mode) {
+ case PAGE_MODE_NONE:
+ case PAGE_MODE_7_LEVEL:
+ break;
+ case PAGE_MODE_1_LEVEL:
+ freelist = free_pt_page(root, freelist);
+ break;
+ case PAGE_MODE_2_LEVEL:
+ freelist = free_pt_l2(root, freelist);
+ break;
+ case PAGE_MODE_3_LEVEL:
+ freelist = free_pt_l3(root, freelist);
+ break;
+ case PAGE_MODE_4_LEVEL:
+ freelist = free_pt_l4(root, freelist);
+ break;
+ case PAGE_MODE_5_LEVEL:
+ freelist = free_pt_l5(root, freelist);
+ break;
+ case PAGE_MODE_6_LEVEL:
+ freelist = free_pt_l6(root, freelist);
+ break;
+ default:
+ BUG();
+ }
+
+ return freelist;
+}
+
+void amd_iommu_domain_set_pgtable(struct protection_domain *domain,
+ u64 *root, int mode)
+{
+ u64 pt_root;
+
+ /* lowest 3 bits encode pgtable mode */
+ pt_root = mode & 7;
+ pt_root |= (u64)root;
+
+ amd_iommu_domain_set_pt_root(domain, pt_root);
+}
+
+/*
+ * This function is used to add another level to an IO page table. Adding
+ * another level increases the size of the address space by 9 bits to a size up
+ * to 64 bits.
+ */
+static bool increase_address_space(struct protection_domain *domain,
+ unsigned long address,
+ gfp_t gfp)
+{
+ unsigned long flags;
+ bool ret = true;
+ u64 *pte;
+
+ pte = (void *)get_zeroed_page(gfp);
+ if (!pte)
+ return false;
+
+ spin_lock_irqsave(&domain->lock, flags);
+
+ if (address <= PM_LEVEL_SIZE(domain->iop.mode))
+ goto out;
+
+ ret = false;
+ if (WARN_ON_ONCE(domain->iop.mode == PAGE_MODE_6_LEVEL))
+ goto out;
+
+ *pte = PM_LEVEL_PDE(domain->iop.mode, iommu_virt_to_phys(domain->iop.root));
+
+ domain->iop.root = pte;
+ domain->iop.mode += 1;
+ amd_iommu_update_and_flush_device_table(domain);
+ amd_iommu_domain_flush_complete(domain);
+
+ /*
+ * Device Table needs to be updated and flushed before the new root can
+ * be published.
+ */
+ amd_iommu_domain_set_pgtable(domain, pte, domain->iop.mode);
+
+ pte = NULL;
+ ret = true;
+
+out:
+ spin_unlock_irqrestore(&domain->lock, flags);
+ free_page((unsigned long)pte);
+
+ return ret;
+}
+
+static u64 *alloc_pte(struct protection_domain *domain,
+ unsigned long address,
+ unsigned long page_size,
+ u64 **pte_page,
+ gfp_t gfp,
+ bool *updated)
+{
+ int level, end_lvl;
+ u64 *pte, *page;
+
+ BUG_ON(!is_power_of_2(page_size));
+
+ while (address > PM_LEVEL_SIZE(domain->iop.mode)) {
+ /*
+ * Return an error if there is no memory to update the
+ * page-table.
+ */
+ if (!increase_address_space(domain, address, gfp))
+ return NULL;
+ }
+
+
+ level = domain->iop.mode - 1;
+ pte = &domain->iop.root[PM_LEVEL_INDEX(level, address)];
+ address = PAGE_SIZE_ALIGN(address, page_size);
+ end_lvl = PAGE_SIZE_LEVEL(page_size);
+
+ while (level > end_lvl) {
+ u64 __pte, __npte;
+ int pte_level;
+
+ __pte = *pte;
+ pte_level = PM_PTE_LEVEL(__pte);
+
+ /*
+ * If we replace a series of large PTEs, we need
+ * to tear down all of them.
+ */
+ if (IOMMU_PTE_PRESENT(__pte) &&
+ pte_level == PAGE_MODE_7_LEVEL) {
+ unsigned long count, i;
+ u64 *lpte;
+
+ lpte = first_pte_l7(pte, NULL, &count);
+
+ /*
+ * Unmap the replicated PTEs that still match the
+ * original large mapping
+ */
+ for (i = 0; i < count; ++i)
+ cmpxchg64(&lpte[i], __pte, 0ULL);
+
+ *updated = true;
+ continue;
+ }
+
+ if (!IOMMU_PTE_PRESENT(__pte) ||
+ pte_level == PAGE_MODE_NONE) {
+ page = (u64 *)get_zeroed_page(gfp);
+
+ if (!page)
+ return NULL;
+
+ __npte = PM_LEVEL_PDE(level, iommu_virt_to_phys(page));
+
+ /* pte could have been changed somewhere. */
+ if (cmpxchg64(pte, __pte, __npte) != __pte)
+ free_page((unsigned long)page);
+ else if (IOMMU_PTE_PRESENT(__pte))
+ *updated = true;
+
+ continue;
+ }
+
+ /* No level skipping support yet */
+ if (pte_level != level)
+ return NULL;
+
+ level -= 1;
+
+ pte = IOMMU_PTE_PAGE(__pte);
+
+ if (pte_page && level == end_lvl)
+ *pte_page = pte;
+
+ pte = &pte[PM_LEVEL_INDEX(level, address)];
+ }
+
+ return pte;
+}
+
+/*
+ * This function checks if there is a PTE for a given dma address. If
+ * there is one, it returns the pointer to it.
+ */
+static u64 *fetch_pte(struct amd_io_pgtable *pgtable,
+ unsigned long address,
+ unsigned long *page_size)
+{
+ int level;
+ u64 *pte;
+
+ *page_size = 0;
+
+ if (address > PM_LEVEL_SIZE(pgtable->mode))
+ return NULL;
+
+ level = pgtable->mode - 1;
+ pte = &pgtable->root[PM_LEVEL_INDEX(level, address)];
+ *page_size = PTE_LEVEL_PAGE_SIZE(level);
+
+ while (level > 0) {
+
+ /* Not Present */
+ if (!IOMMU_PTE_PRESENT(*pte))
+ return NULL;
+
+ /* Large PTE */
+ if (PM_PTE_LEVEL(*pte) == 7 ||
+ PM_PTE_LEVEL(*pte) == 0)
+ break;
+
+ /* No level skipping support yet */
+ if (PM_PTE_LEVEL(*pte) != level)
+ return NULL;
+
+ level -= 1;
+
+ /* Walk to the next level */
+ pte = IOMMU_PTE_PAGE(*pte);
+ pte = &pte[PM_LEVEL_INDEX(level, address)];
+ *page_size = PTE_LEVEL_PAGE_SIZE(level);
+ }
+
+ /*
+ * If we have a series of large PTEs, make
+ * sure to return a pointer to the first one.
+ */
+ if (PM_PTE_LEVEL(*pte) == PAGE_MODE_7_LEVEL)
+ pte = first_pte_l7(pte, page_size, NULL);
+
+ return pte;
+}
+
+static struct page *free_clear_pte(u64 *pte, u64 pteval, struct page *freelist)
+{
+ unsigned long pt;
+ int mode;
+
+ while (cmpxchg64(pte, pteval, 0) != pteval) {
+ pr_warn("AMD-Vi: IOMMU pte changed since we read it\n");
+ pteval = *pte;
+ }
+
+ if (!IOMMU_PTE_PRESENT(pteval))
+ return freelist;
+
+ pt = (unsigned long)IOMMU_PTE_PAGE(pteval);
+ mode = IOMMU_PTE_MODE(pteval);
+
+ return free_sub_pt(pt, mode, freelist);
+}
+
+/*
+ * Generic mapping functions. It maps a physical address into a DMA
+ * address space. It allocates the page table pages if necessary.
+ * In the future it can be extended to a generic mapping function
+ * supporting all features of AMD IOMMU page tables like level skipping
+ * and full 64 bit address spaces.
+ */
+static int iommu_v1_map_page(struct io_pgtable_ops *ops, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
+{
+ struct protection_domain *dom = io_pgtable_ops_to_domain(ops);
+ struct page *freelist = NULL;
+ bool updated = false;
+ u64 __pte, *pte;
+ int ret, i, count;
+
+ BUG_ON(!IS_ALIGNED(iova, size));
+ BUG_ON(!IS_ALIGNED(paddr, size));
+
+ ret = -EINVAL;
+ if (!(prot & IOMMU_PROT_MASK))
+ goto out;
+
+ count = PAGE_SIZE_PTE_COUNT(size);
+ pte = alloc_pte(dom, iova, size, NULL, gfp, &updated);
+
+ ret = -ENOMEM;
+ if (!pte)
+ goto out;
+
+ for (i = 0; i < count; ++i)
+ freelist = free_clear_pte(&pte[i], pte[i], freelist);
+
+ if (freelist != NULL)
+ updated = true;
+
+ if (count > 1) {
+ __pte = PAGE_SIZE_PTE(__sme_set(paddr), size);
+ __pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_PR | IOMMU_PTE_FC;
+ } else
+ __pte = __sme_set(paddr) | IOMMU_PTE_PR | IOMMU_PTE_FC;
+
+ if (prot & IOMMU_PROT_IR)
+ __pte |= IOMMU_PTE_IR;
+ if (prot & IOMMU_PROT_IW)
+ __pte |= IOMMU_PTE_IW;
+
+ for (i = 0; i < count; ++i)
+ pte[i] = __pte;
+
+ ret = 0;
+
+out:
+ if (updated) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&dom->lock, flags);
+ /*
+ * Flush domain TLB(s) and wait for completion. Any Device-Table
+ * Updates and flushing already happened in
+ * increase_address_space().
+ */
+ amd_iommu_domain_flush_tlb_pde(dom);
+ amd_iommu_domain_flush_complete(dom);
+ spin_unlock_irqrestore(&dom->lock, flags);
+ }
+
+ /* Everything flushed out, free pages now */
+ free_page_list(freelist);
+
+ return ret;
+}
+
+static unsigned long iommu_v1_unmap_page(struct io_pgtable_ops *ops,
+ unsigned long iova,
+ size_t size,
+ struct iommu_iotlb_gather *gather)
+{
+ struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
+ unsigned long long unmapped;
+ unsigned long unmap_size;
+ u64 *pte;
+
+ BUG_ON(!is_power_of_2(size));
+
+ unmapped = 0;
+
+ while (unmapped < size) {
+ pte = fetch_pte(pgtable, iova, &unmap_size);
+ if (pte) {
+ int i, count;
+
+ count = PAGE_SIZE_PTE_COUNT(unmap_size);
+ for (i = 0; i < count; i++)
+ pte[i] = 0ULL;
+ }
+
+ iova = (iova & ~(unmap_size - 1)) + unmap_size;
+ unmapped += unmap_size;
+ }
+
+ BUG_ON(unmapped && !is_power_of_2(unmapped));
+
+ return unmapped;
+}
+
+static phys_addr_t iommu_v1_iova_to_phys(struct io_pgtable_ops *ops, unsigned long iova)
+{
+ struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
+ unsigned long offset_mask, pte_pgsize;
+ u64 *pte, __pte;
+
+ if (pgtable->mode == PAGE_MODE_NONE)
+ return iova;
+
+ pte = fetch_pte(pgtable, iova, &pte_pgsize);
+
+ if (!pte || !IOMMU_PTE_PRESENT(*pte))
+ return 0;
+
+ offset_mask = pte_pgsize - 1;
+ __pte = __sme_clr(*pte & PM_ADDR_MASK);
+
+ return (__pte & ~offset_mask) | (iova & offset_mask);
+}
+
+/*
+ * ----------------------------------------------------
+ */
+static void v1_free_pgtable(struct io_pgtable *iop)
+{
+ struct amd_io_pgtable *pgtable = container_of(iop, struct amd_io_pgtable, iop);
+ struct protection_domain *dom;
+ struct page *freelist = NULL;
+ unsigned long root;
+
+ if (pgtable->mode == PAGE_MODE_NONE)
+ return;
+
+ dom = container_of(pgtable, struct protection_domain, iop);
+
+ /* Update data structure */
+ amd_iommu_domain_clr_pt_root(dom);
+
+ /* Make changes visible to IOMMUs */
+ amd_iommu_domain_update(dom);
+
+ /* Page-table is not visible to IOMMU anymore, so free it */
+ BUG_ON(pgtable->mode < PAGE_MODE_NONE ||
+ pgtable->mode > PAGE_MODE_6_LEVEL);
+
+ root = (unsigned long)pgtable->root;
+ freelist = free_sub_pt(root, pgtable->mode, freelist);
+
+ free_page_list(freelist);
+}
+
+static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
+{
+ struct amd_io_pgtable *pgtable = io_pgtable_cfg_to_data(cfg);
+
+ cfg->pgsize_bitmap = AMD_IOMMU_PGSIZES,
+ cfg->ias = IOMMU_IN_ADDR_BIT_SIZE,
+ cfg->oas = IOMMU_OUT_ADDR_BIT_SIZE,
+ cfg->tlb = &v1_flush_ops;
+
+ pgtable->iop.ops.map = iommu_v1_map_page;
+ pgtable->iop.ops.unmap = iommu_v1_unmap_page;
+ pgtable->iop.ops.iova_to_phys = iommu_v1_iova_to_phys;
+
+ return &pgtable->iop;
+}
+
+struct io_pgtable_init_fns io_pgtable_amd_iommu_v1_init_fns = {
+ .alloc = v1_alloc_pgtable,
+ .free = v1_free_pgtable,
+};
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index f0adbc48fd17..a69a8b573e40 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -31,6 +31,7 @@
#include <linux/irqdomain.h>
#include <linux/percpu.h>
#include <linux/iova.h>
+#include <linux/io-pgtable.h>
#include <asm/irq_remapping.h>
#include <asm/io_apic.h>
#include <asm/apic.h>
@@ -57,16 +58,6 @@
#define HT_RANGE_START (0xfd00000000ULL)
#define HT_RANGE_END (0xffffffffffULL)
-/*
- * This bitmap is used to advertise the page sizes our hardware support
- * to the IOMMU core, which will then use this information to split
- * physically contiguous memory regions it is mapping into page sizes
- * that we support.
- *
- * 512GB Pages are not supported due to a hardware bug
- */
-#define AMD_IOMMU_PGSIZES ((~0xFFFUL) & ~(2ULL << 38))
-
#define DEFAULT_PGTABLE_LEVEL PAGE_MODE_3_LEVEL
static DEFINE_SPINLOCK(pd_bitmap_lock);
@@ -96,10 +87,7 @@ struct iommu_cmd {
struct kmem_cache *amd_iommu_irq_cache;
-static void update_domain(struct protection_domain *domain);
static void detach_device(struct device *dev);
-static void update_and_flush_device_table(struct protection_domain *domain,
- struct domain_pgtable *pgtable);
/****************************************************************************
*
@@ -151,37 +139,6 @@ static struct protection_domain *to_pdomain(struct iommu_domain *dom)
return container_of(dom, struct protection_domain, domain);
}
-static void amd_iommu_domain_get_pgtable(struct protection_domain *domain,
- struct domain_pgtable *pgtable)
-{
- u64 pt_root = atomic64_read(&domain->pt_root);
-
- pgtable->root = (u64 *)(pt_root & PAGE_MASK);
- pgtable->mode = pt_root & 7; /* lowest 3 bits encode pgtable mode */
-}
-
-static void amd_iommu_domain_set_pt_root(struct protection_domain *domain, u64 root)
-{
- atomic64_set(&domain->pt_root, root);
-}
-
-static void amd_iommu_domain_clr_pt_root(struct protection_domain *domain)
-{
- amd_iommu_domain_set_pt_root(domain, 0);
-}
-
-static void amd_iommu_domain_set_pgtable(struct protection_domain *domain,
- u64 *root, int mode)
-{
- u64 pt_root;
-
- /* lowest 3 bits encode pgtable mode */
- pt_root = mode & 7;
- pt_root |= (u64)root;
-
- amd_iommu_domain_set_pt_root(domain, pt_root);
-}
-
static struct iommu_dev_data *alloc_dev_data(u16 devid)
{
struct iommu_dev_data *dev_data;
@@ -437,29 +394,6 @@ static void amd_iommu_uninit_device(struct device *dev)
*/
}
-/*
- * Helper function to get the first pte of a large mapping
- */
-static u64 *first_pte_l7(u64 *pte, unsigned long *page_size,
- unsigned long *count)
-{
- unsigned long pte_mask, pg_size, cnt;
- u64 *fpte;
-
- pg_size = PTE_PAGE_SIZE(*pte);
- cnt = PAGE_SIZE_PTE_COUNT(pg_size);
- pte_mask = ~((cnt << 3) - 1);
- fpte = (u64 *)(((unsigned long)pte) & pte_mask);
-
- if (page_size)
- *page_size = pg_size;
-
- if (count)
- *count = cnt;
-
- return fpte;
-}
-
/****************************************************************************
*
* Interrupt handling functions
@@ -1335,12 +1269,12 @@ static void domain_flush_pages(struct protection_domain *domain,
}
/* Flush the whole IO/TLB for a given protection domain - including PDE */
-static void domain_flush_tlb_pde(struct protection_domain *domain)
+void amd_iommu_domain_flush_tlb_pde(struct protection_domain *domain)
{
__domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1);
}
-static void domain_flush_complete(struct protection_domain *domain)
+void amd_iommu_domain_flush_complete(struct protection_domain *domain)
{
int i;
@@ -1365,7 +1299,7 @@ static void domain_flush_np_cache(struct protection_domain *domain,
spin_lock_irqsave(&domain->lock, flags);
domain_flush_pages(domain, iova, size);
- domain_flush_complete(domain);
+ amd_iommu_domain_flush_complete(domain);
spin_unlock_irqrestore(&domain->lock, flags);
}
}
@@ -1384,443 +1318,6 @@ static void domain_flush_devices(struct protection_domain *domain)
/****************************************************************************
*
- * The functions below are used the create the page table mappings for
- * unity mapped regions.
- *
- ****************************************************************************/
-
-static void free_page_list(struct page *freelist)
-{
- while (freelist != NULL) {
- unsigned long p = (unsigned long)page_address(freelist);
- freelist = freelist->freelist;
- free_page(p);
- }
-}
-
-static struct page *free_pt_page(unsigned long pt, struct page *freelist)
-{
- struct page *p = virt_to_page((void *)pt);
-
- p->freelist = freelist;
-
- return p;
-}
-
-#define DEFINE_FREE_PT_FN(LVL, FN) \
-static struct page *free_pt_##LVL (unsigned long __pt, struct page *freelist) \
-{ \
- unsigned long p; \
- u64 *pt; \
- int i; \
- \
- pt = (u64 *)__pt; \
- \
- for (i = 0; i < 512; ++i) { \
- /* PTE present? */ \
- if (!IOMMU_PTE_PRESENT(pt[i])) \
- continue; \
- \
- /* Large PTE? */ \
- if (PM_PTE_LEVEL(pt[i]) == 0 || \
- PM_PTE_LEVEL(pt[i]) == 7) \
- continue; \
- \
- p = (unsigned long)IOMMU_PTE_PAGE(pt[i]); \
- freelist = FN(p, freelist); \
- } \
- \
- return free_pt_page((unsigned long)pt, freelist); \
-}
-
-DEFINE_FREE_PT_FN(l2, free_pt_page)
-DEFINE_FREE_PT_FN(l3, free_pt_l2)
-DEFINE_FREE_PT_FN(l4, free_pt_l3)
-DEFINE_FREE_PT_FN(l5, free_pt_l4)
-DEFINE_FREE_PT_FN(l6, free_pt_l5)
-
-static struct page *free_sub_pt(unsigned long root, int mode,
- struct page *freelist)
-{
- switch (mode) {
- case PAGE_MODE_NONE:
- case PAGE_MODE_7_LEVEL:
- break;
- case PAGE_MODE_1_LEVEL:
- freelist = free_pt_page(root, freelist);
- break;
- case PAGE_MODE_2_LEVEL:
- freelist = free_pt_l2(root, freelist);
- break;
- case PAGE_MODE_3_LEVEL:
- freelist = free_pt_l3(root, freelist);
- break;
- case PAGE_MODE_4_LEVEL:
- freelist = free_pt_l4(root, freelist);
- break;
- case PAGE_MODE_5_LEVEL:
- freelist = free_pt_l5(root, freelist);
- break;
- case PAGE_MODE_6_LEVEL:
- freelist = free_pt_l6(root, freelist);
- break;
- default:
- BUG();
- }
-
- return freelist;
-}
-
-static void free_pagetable(struct domain_pgtable *pgtable)
-{
- struct page *freelist = NULL;
- unsigned long root;
-
- if (pgtable->mode == PAGE_MODE_NONE)
- return;
-
- BUG_ON(pgtable->mode < PAGE_MODE_NONE ||
- pgtable->mode > PAGE_MODE_6_LEVEL);
-
- root = (unsigned long)pgtable->root;
- freelist = free_sub_pt(root, pgtable->mode, freelist);
-
- free_page_list(freelist);
-}
-
-/*
- * This function is used to add another level to an IO page table. Adding
- * another level increases the size of the address space by 9 bits to a size up
- * to 64 bits.
- */
-static bool increase_address_space(struct protection_domain *domain,
- unsigned long address,
- gfp_t gfp)
-{
- struct domain_pgtable pgtable;
- unsigned long flags;
- bool ret = true;
- u64 *pte;
-
- spin_lock_irqsave(&domain->lock, flags);
-
- amd_iommu_domain_get_pgtable(domain, &pgtable);
-
- if (address <= PM_LEVEL_SIZE(pgtable.mode))
- goto out;
-
- ret = false;
- if (WARN_ON_ONCE(pgtable.mode == PAGE_MODE_6_LEVEL))
- goto out;
-
- pte = (void *)get_zeroed_page(gfp);
- if (!pte)
- goto out;
-
- *pte = PM_LEVEL_PDE(pgtable.mode, iommu_virt_to_phys(pgtable.root));
-
- pgtable.root = pte;
- pgtable.mode += 1;
- update_and_flush_device_table(domain, &pgtable);
- domain_flush_complete(domain);
-
- /*
- * Device Table needs to be updated and flushed before the new root can
- * be published.
- */
- amd_iommu_domain_set_pgtable(domain, pte, pgtable.mode);
-
- ret = true;
-
-out:
- spin_unlock_irqrestore(&domain->lock, flags);
-
- return ret;
-}
-
-static u64 *alloc_pte(struct protection_domain *domain,
- unsigned long address,
- unsigned long page_size,
- u64 **pte_page,
- gfp_t gfp,
- bool *updated)
-{
- struct domain_pgtable pgtable;
- int level, end_lvl;
- u64 *pte, *page;
-
- BUG_ON(!is_power_of_2(page_size));
-
- amd_iommu_domain_get_pgtable(domain, &pgtable);
-
- while (address > PM_LEVEL_SIZE(pgtable.mode)) {
- /*
- * Return an error if there is no memory to update the
- * page-table.
- */
- if (!increase_address_space(domain, address, gfp))
- return NULL;
-
- /* Read new values to check if update was successful */
- amd_iommu_domain_get_pgtable(domain, &pgtable);
- }
-
-
- level = pgtable.mode - 1;
- pte = &pgtable.root[PM_LEVEL_INDEX(level, address)];
- address = PAGE_SIZE_ALIGN(address, page_size);
- end_lvl = PAGE_SIZE_LEVEL(page_size);
-
- while (level > end_lvl) {
- u64 __pte, __npte;
- int pte_level;
-
- __pte = *pte;
- pte_level = PM_PTE_LEVEL(__pte);
-
- /*
- * If we replace a series of large PTEs, we need
- * to tear down all of them.
- */
- if (IOMMU_PTE_PRESENT(__pte) &&
- pte_level == PAGE_MODE_7_LEVEL) {
- unsigned long count, i;
- u64 *lpte;
-
- lpte = first_pte_l7(pte, NULL, &count);
-
- /*
- * Unmap the replicated PTEs that still match the
- * original large mapping
- */
- for (i = 0; i < count; ++i)
- cmpxchg64(&lpte[i], __pte, 0ULL);
-
- *updated = true;
- continue;
- }
-
- if (!IOMMU_PTE_PRESENT(__pte) ||
- pte_level == PAGE_MODE_NONE) {
- page = (u64 *)get_zeroed_page(gfp);
-
- if (!page)
- return NULL;
-
- __npte = PM_LEVEL_PDE(level, iommu_virt_to_phys(page));
-
- /* pte could have been changed somewhere. */
- if (cmpxchg64(pte, __pte, __npte) != __pte)
- free_page((unsigned long)page);
- else if (IOMMU_PTE_PRESENT(__pte))
- *updated = true;
-
- continue;
- }
-
- /* No level skipping support yet */
- if (pte_level != level)
- return NULL;
-
- level -= 1;
-
- pte = IOMMU_PTE_PAGE(__pte);
-
- if (pte_page && level == end_lvl)
- *pte_page = pte;
-
- pte = &pte[PM_LEVEL_INDEX(level, address)];
- }
-
- return pte;
-}
-
-/*
- * This function checks if there is a PTE for a given dma address. If
- * there is one, it returns the pointer to it.
- */
-static u64 *fetch_pte(struct protection_domain *domain,
- unsigned long address,
- unsigned long *page_size)
-{
- struct domain_pgtable pgtable;
- int level;
- u64 *pte;
-
- *page_size = 0;
-
- amd_iommu_domain_get_pgtable(domain, &pgtable);
-
- if (address > PM_LEVEL_SIZE(pgtable.mode))
- return NULL;
-
- level = pgtable.mode - 1;
- pte = &pgtable.root[PM_LEVEL_INDEX(level, address)];
- *page_size = PTE_LEVEL_PAGE_SIZE(level);
-
- while (level > 0) {
-
- /* Not Present */
- if (!IOMMU_PTE_PRESENT(*pte))
- return NULL;
-
- /* Large PTE */
- if (PM_PTE_LEVEL(*pte) == 7 ||
- PM_PTE_LEVEL(*pte) == 0)
- break;
-
- /* No level skipping support yet */
- if (PM_PTE_LEVEL(*pte) != level)
- return NULL;
-
- level -= 1;
-
- /* Walk to the next level */
- pte = IOMMU_PTE_PAGE(*pte);
- pte = &pte[PM_LEVEL_INDEX(level, address)];
- *page_size = PTE_LEVEL_PAGE_SIZE(level);
- }
-
- /*
- * If we have a series of large PTEs, make
- * sure to return a pointer to the first one.
- */
- if (PM_PTE_LEVEL(*pte) == PAGE_MODE_7_LEVEL)
- pte = first_pte_l7(pte, page_size, NULL);
-
- return pte;
-}
-
-static struct page *free_clear_pte(u64 *pte, u64 pteval, struct page *freelist)
-{
- unsigned long pt;
- int mode;
-
- while (cmpxchg64(pte, pteval, 0) != pteval) {
- pr_warn("AMD-Vi: IOMMU pte changed since we read it\n");
- pteval = *pte;
- }
-
- if (!IOMMU_PTE_PRESENT(pteval))
- return freelist;
-
- pt = (unsigned long)IOMMU_PTE_PAGE(pteval);
- mode = IOMMU_PTE_MODE(pteval);
-
- return free_sub_pt(pt, mode, freelist);
-}
-
-/*
- * Generic mapping functions. It maps a physical address into a DMA
- * address space. It allocates the page table pages if necessary.
- * In the future it can be extended to a generic mapping function
- * supporting all features of AMD IOMMU page tables like level skipping
- * and full 64 bit address spaces.
- */
-static int iommu_map_page(struct protection_domain *dom,
- unsigned long bus_addr,
- unsigned long phys_addr,
- unsigned long page_size,
- int prot,
- gfp_t gfp)
-{
- struct page *freelist = NULL;
- bool updated = false;
- u64 __pte, *pte;
- int ret, i, count;
-
- BUG_ON(!IS_ALIGNED(bus_addr, page_size));
- BUG_ON(!IS_ALIGNED(phys_addr, page_size));
-
- ret = -EINVAL;
- if (!(prot & IOMMU_PROT_MASK))
- goto out;
-
- count = PAGE_SIZE_PTE_COUNT(page_size);
- pte = alloc_pte(dom, bus_addr, page_size, NULL, gfp, &updated);
-
- ret = -ENOMEM;
- if (!pte)
- goto out;
-
- for (i = 0; i < count; ++i)
- freelist = free_clear_pte(&pte[i], pte[i], freelist);
-
- if (freelist != NULL)
- updated = true;
-
- if (count > 1) {
- __pte = PAGE_SIZE_PTE(__sme_set(phys_addr), page_size);
- __pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_PR | IOMMU_PTE_FC;
- } else
- __pte = __sme_set(phys_addr) | IOMMU_PTE_PR | IOMMU_PTE_FC;
-
- if (prot & IOMMU_PROT_IR)
- __pte |= IOMMU_PTE_IR;
- if (prot & IOMMU_PROT_IW)
- __pte |= IOMMU_PTE_IW;
-
- for (i = 0; i < count; ++i)
- pte[i] = __pte;
-
- ret = 0;
-
-out:
- if (updated) {
- unsigned long flags;
-
- spin_lock_irqsave(&dom->lock, flags);
- /*
- * Flush domain TLB(s) and wait for completion. Any Device-Table
- * Updates and flushing already happened in
- * increase_address_space().
- */
- domain_flush_tlb_pde(dom);
- domain_flush_complete(dom);
- spin_unlock_irqrestore(&dom->lock, flags);
- }
-
- /* Everything flushed out, free pages now */
- free_page_list(freelist);
-
- return ret;
-}
-
-static unsigned long iommu_unmap_page(struct protection_domain *dom,
- unsigned long bus_addr,
- unsigned long page_size)
-{
- unsigned long long unmapped;
- unsigned long unmap_size;
- u64 *pte;
-
- BUG_ON(!is_power_of_2(page_size));
-
- unmapped = 0;
-
- while (unmapped < page_size) {
-
- pte = fetch_pte(dom, bus_addr, &unmap_size);
-
- if (pte) {
- int i, count;
-
- count = PAGE_SIZE_PTE_COUNT(unmap_size);
- for (i = 0; i < count; i++)
- pte[i] = 0ULL;
- }
-
- bus_addr = (bus_addr & ~(unmap_size - 1)) + unmap_size;
- unmapped += unmap_size;
- }
-
- BUG_ON(unmapped && !is_power_of_2(unmapped));
-
- return unmapped;
-}
-
-/****************************************************************************
- *
* The next functions belong to the domain allocation. A domain is
* allocated for every IOMMU as the default domain. If device isolation
* is enabled, every device get its own domain. The most important thing
@@ -1896,17 +1393,16 @@ static void free_gcr3_table(struct protection_domain *domain)
}
static void set_dte_entry(u16 devid, struct protection_domain *domain,
- struct domain_pgtable *pgtable,
bool ats, bool ppr)
{
u64 pte_root = 0;
u64 flags = 0;
u32 old_domid;
- if (pgtable->mode != PAGE_MODE_NONE)
- pte_root = iommu_virt_to_phys(pgtable->root);
+ if (domain->iop.mode != PAGE_MODE_NONE)
+ pte_root = iommu_virt_to_phys(domain->iop.root);
- pte_root |= (pgtable->mode & DEV_ENTRY_MODE_MASK)
+ pte_root |= (domain->iop.mode & DEV_ENTRY_MODE_MASK)
<< DEV_ENTRY_MODE_SHIFT;
pte_root |= DTE_FLAG_IR | DTE_FLAG_IW | DTE_FLAG_V | DTE_FLAG_TV;
@@ -1979,7 +1475,6 @@ static void clear_dte_entry(u16 devid)
static void do_attach(struct iommu_dev_data *dev_data,
struct protection_domain *domain)
{
- struct domain_pgtable pgtable;
struct amd_iommu *iommu;
bool ats;
@@ -1995,8 +1490,7 @@ static void do_attach(struct iommu_dev_data *dev_data,
domain->dev_cnt += 1;
/* Update device table */
- amd_iommu_domain_get_pgtable(domain, &pgtable);
- set_dte_entry(dev_data->devid, domain, &pgtable,
+ set_dte_entry(dev_data->devid, domain,
ats, dev_data->iommu_v2);
clone_aliases(dev_data->pdev);
@@ -2020,10 +1514,10 @@ static void do_detach(struct iommu_dev_data *dev_data)
device_flush_dte(dev_data);
/* Flush IOTLB */
- domain_flush_tlb_pde(domain);
+ amd_iommu_domain_flush_tlb_pde(domain);
/* Wait for the flushes to finish */
- domain_flush_complete(domain);
+ amd_iommu_domain_flush_complete(domain);
/* decrease reference counters - needs to happen after the flushes */
domain->dev_iommu[iommu->index] -= 1;
@@ -2156,9 +1650,9 @@ skip_ats_check:
* left the caches in the IOMMU dirty. So we have to flush
* here to evict all dirty stuff.
*/
- domain_flush_tlb_pde(domain);
+ amd_iommu_domain_flush_tlb_pde(domain);
- domain_flush_complete(domain);
+ amd_iommu_domain_flush_complete(domain);
out:
spin_unlock(&dev_data->lock);
@@ -2303,36 +1797,31 @@ static int amd_iommu_domain_get_attr(struct iommu_domain *domain,
*
*****************************************************************************/
-static void update_device_table(struct protection_domain *domain,
- struct domain_pgtable *pgtable)
+static void update_device_table(struct protection_domain *domain)
{
struct iommu_dev_data *dev_data;
list_for_each_entry(dev_data, &domain->dev_list, list) {
- set_dte_entry(dev_data->devid, domain, pgtable,
+ set_dte_entry(dev_data->devid, domain,
dev_data->ats.enabled, dev_data->iommu_v2);
clone_aliases(dev_data->pdev);
}
}
-static void update_and_flush_device_table(struct protection_domain *domain,
- struct domain_pgtable *pgtable)
+void amd_iommu_update_and_flush_device_table(struct protection_domain *domain)
{
- update_device_table(domain, pgtable);
+ update_device_table(domain);
domain_flush_devices(domain);
}
-static void update_domain(struct protection_domain *domain)
+void amd_iommu_domain_update(struct protection_domain *domain)
{
- struct domain_pgtable pgtable;
-
/* Update device table */
- amd_iommu_domain_get_pgtable(domain, &pgtable);
- update_and_flush_device_table(domain, &pgtable);
+ amd_iommu_update_and_flush_device_table(domain);
/* Flush domain TLB(s) and wait for completion */
- domain_flush_tlb_pde(domain);
- domain_flush_complete(domain);
+ amd_iommu_domain_flush_tlb_pde(domain);
+ amd_iommu_domain_flush_complete(domain);
}
int __init amd_iommu_init_api(void)
@@ -2400,22 +1889,19 @@ static void cleanup_domain(struct protection_domain *domain)
static void protection_domain_free(struct protection_domain *domain)
{
- struct domain_pgtable pgtable;
-
if (!domain)
return;
if (domain->id)
domain_id_free(domain->id);
- amd_iommu_domain_get_pgtable(domain, &pgtable);
- amd_iommu_domain_clr_pt_root(domain);
- free_pagetable(&pgtable);
+ if (domain->iop.pgtbl_cfg.tlb)
+ free_io_pgtable_ops(&domain->iop.iop.ops);
kfree(domain);
}
-static int protection_domain_init(struct protection_domain *domain, int mode)
+static int protection_domain_init_v1(struct protection_domain *domain, int mode)
{
u64 *pt_root = NULL;
@@ -2438,34 +1924,55 @@ static int protection_domain_init(struct protection_domain *domain, int mode)
return 0;
}
-static struct protection_domain *protection_domain_alloc(int mode)
+static struct protection_domain *protection_domain_alloc(unsigned int type)
{
+ struct io_pgtable_ops *pgtbl_ops;
struct protection_domain *domain;
+ int pgtable = amd_iommu_pgtable;
+ int mode = DEFAULT_PGTABLE_LEVEL;
+ int ret;
domain = kzalloc(sizeof(*domain), GFP_KERNEL);
if (!domain)
return NULL;
- if (protection_domain_init(domain, mode))
+ /*
+ * Force IOMMU v1 page table when iommu=pt and
+ * when allocating domain for pass-through devices.
+ */
+ if (type == IOMMU_DOMAIN_IDENTITY) {
+ pgtable = AMD_IOMMU_V1;
+ mode = PAGE_MODE_NONE;
+ } else if (type == IOMMU_DOMAIN_UNMANAGED) {
+ pgtable = AMD_IOMMU_V1;
+ }
+
+ switch (pgtable) {
+ case AMD_IOMMU_V1:
+ ret = protection_domain_init_v1(domain, mode);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ if (ret)
goto out_err;
- return domain;
+ pgtbl_ops = alloc_io_pgtable_ops(pgtable, &domain->iop.pgtbl_cfg, domain);
+ if (!pgtbl_ops)
+ goto out_err;
+ return domain;
out_err:
kfree(domain);
-
return NULL;
}
static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
{
struct protection_domain *domain;
- int mode = DEFAULT_PGTABLE_LEVEL;
-
- if (type == IOMMU_DOMAIN_IDENTITY)
- mode = PAGE_MODE_NONE;
- domain = protection_domain_alloc(mode);
+ domain = protection_domain_alloc(type);
if (!domain)
return NULL;
@@ -2580,12 +2087,12 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
gfp_t gfp)
{
struct protection_domain *domain = to_pdomain(dom);
- struct domain_pgtable pgtable;
+ struct io_pgtable_ops *ops = &domain->iop.iop.ops;
int prot = 0;
- int ret;
+ int ret = -EINVAL;
- amd_iommu_domain_get_pgtable(domain, &pgtable);
- if (pgtable.mode == PAGE_MODE_NONE)
+ if ((amd_iommu_pgtable == AMD_IOMMU_V1) &&
+ (domain->iop.mode == PAGE_MODE_NONE))
return -EINVAL;
if (iommu_prot & IOMMU_READ)
@@ -2593,9 +2100,10 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
if (iommu_prot & IOMMU_WRITE)
prot |= IOMMU_PROT_IW;
- ret = iommu_map_page(domain, iova, paddr, page_size, prot, gfp);
-
- domain_flush_np_cache(domain, iova, page_size);
+ if (ops->map) {
+ ret = ops->map(ops, iova, paddr, page_size, prot, gfp);
+ domain_flush_np_cache(domain, iova, page_size);
+ }
return ret;
}
@@ -2605,36 +2113,22 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
struct iommu_iotlb_gather *gather)
{
struct protection_domain *domain = to_pdomain(dom);
- struct domain_pgtable pgtable;
+ struct io_pgtable_ops *ops = &domain->iop.iop.ops;
- amd_iommu_domain_get_pgtable(domain, &pgtable);
- if (pgtable.mode == PAGE_MODE_NONE)
+ if ((amd_iommu_pgtable == AMD_IOMMU_V1) &&
+ (domain->iop.mode == PAGE_MODE_NONE))
return 0;
- return iommu_unmap_page(domain, iova, page_size);
+ return (ops->unmap) ? ops->unmap(ops, iova, page_size, gather) : 0;
}
static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
dma_addr_t iova)
{
struct protection_domain *domain = to_pdomain(dom);
- unsigned long offset_mask, pte_pgsize;
- struct domain_pgtable pgtable;
- u64 *pte, __pte;
-
- amd_iommu_domain_get_pgtable(domain, &pgtable);
- if (pgtable.mode == PAGE_MODE_NONE)
- return iova;
+ struct io_pgtable_ops *ops = &domain->iop.iop.ops;
- pte = fetch_pte(domain, iova, &pte_pgsize);
-
- if (!pte || !IOMMU_PTE_PRESENT(*pte))
- return 0;
-
- offset_mask = pte_pgsize - 1;
- __pte = __sme_clr(*pte & PM_ADDR_MASK);
-
- return (__pte & ~offset_mask) | (iova & offset_mask);
+ return ops->iova_to_phys(ops, iova);
}
static bool amd_iommu_capable(enum iommu_cap cap)
@@ -2720,8 +2214,8 @@ static void amd_iommu_flush_iotlb_all(struct iommu_domain *domain)
unsigned long flags;
spin_lock_irqsave(&dom->lock, flags);
- domain_flush_tlb_pde(dom);
- domain_flush_complete(dom);
+ amd_iommu_domain_flush_tlb_pde(dom);
+ amd_iommu_domain_flush_complete(dom);
spin_unlock_irqrestore(&dom->lock, flags);
}
@@ -2799,22 +2293,12 @@ EXPORT_SYMBOL(amd_iommu_unregister_ppr_notifier);
void amd_iommu_domain_direct_map(struct iommu_domain *dom)
{
struct protection_domain *domain = to_pdomain(dom);
- struct domain_pgtable pgtable;
unsigned long flags;
spin_lock_irqsave(&domain->lock, flags);
- /* First save pgtable configuration*/
- amd_iommu_domain_get_pgtable(domain, &pgtable);
-
- /* Remove page-table from domain */
- amd_iommu_domain_clr_pt_root(domain);
-
- /* Make changes visible to IOMMUs */
- update_domain(domain);
-
- /* Page-table is not visible to IOMMU anymore, so free it */
- free_pagetable(&pgtable);
+ if (domain->iop.pgtbl_cfg.tlb)
+ free_io_pgtable_ops(&domain->iop.iop.ops);
spin_unlock_irqrestore(&domain->lock, flags);
}
@@ -2855,7 +2339,7 @@ int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids)
domain->glx = levels;
domain->flags |= PD_IOMMUV2_MASK;
- update_domain(domain);
+ amd_iommu_domain_update(domain);
ret = 0;
@@ -2892,7 +2376,7 @@ static int __flush_pasid(struct protection_domain *domain, u32 pasid,
}
/* Wait until IOMMU TLB flushes are complete */
- domain_flush_complete(domain);
+ amd_iommu_domain_flush_complete(domain);
/* Now flush device TLBs */
list_for_each_entry(dev_data, &domain->dev_list, list) {
@@ -2918,7 +2402,7 @@ static int __flush_pasid(struct protection_domain *domain, u32 pasid,
}
/* Wait until all device TLBs are flushed */
- domain_flush_complete(domain);
+ amd_iommu_domain_flush_complete(domain);
ret = 0;
@@ -3003,11 +2487,9 @@ static u64 *__get_gcr3_pte(u64 *root, int level, u32 pasid, bool alloc)
static int __set_gcr3(struct protection_domain *domain, u32 pasid,
unsigned long cr3)
{
- struct domain_pgtable pgtable;
u64 *pte;
- amd_iommu_domain_get_pgtable(domain, &pgtable);
- if (pgtable.mode != PAGE_MODE_NONE)
+ if (domain->iop.mode != PAGE_MODE_NONE)
return -EINVAL;
pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, true);
@@ -3021,11 +2503,9 @@ static int __set_gcr3(struct protection_domain *domain, u32 pasid,
static int __clear_gcr3(struct protection_domain *domain, u32 pasid)
{
- struct domain_pgtable pgtable;
u64 *pte;
- amd_iommu_domain_get_pgtable(domain, &pgtable);
- if (pgtable.mode != PAGE_MODE_NONE)
+ if (domain->iop.mode != PAGE_MODE_NONE)
return -EINVAL;
pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, false);
diff --git a/drivers/iommu/amd/iommu_v2.c b/drivers/iommu/amd/iommu_v2.c
index 5ecc0bc608ec..f8d4ad421e07 100644
--- a/drivers/iommu/amd/iommu_v2.c
+++ b/drivers/iommu/amd/iommu_v2.c
@@ -77,7 +77,7 @@ struct fault {
};
static LIST_HEAD(state_list);
-static spinlock_t state_lock;
+static DEFINE_SPINLOCK(state_lock);
static struct workqueue_struct *iommu_wq;
@@ -938,8 +938,6 @@ static int __init amd_iommu_v2_init(void)
return 0;
}
- spin_lock_init(&state_lock);
-
ret = -ENOMEM;
iommu_wq = alloc_workqueue("amd_iommu_v2", WQ_MEM_RECLAIM, 0);
if (iommu_wq == NULL)
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
index e13b092e6004..bb251cab61f3 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
@@ -182,9 +182,13 @@ static void arm_smmu_mm_invalidate_range(struct mmu_notifier *mn,
unsigned long start, unsigned long end)
{
struct arm_smmu_mmu_notifier *smmu_mn = mn_to_smmu(mn);
+ struct arm_smmu_domain *smmu_domain = smmu_mn->domain;
+ size_t size = end - start + 1;
- arm_smmu_atc_inv_domain(smmu_mn->domain, mm->pasid, start,
- end - start + 1);
+ if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_BTM))
+ arm_smmu_tlb_inv_range_asid(start, size, smmu_mn->cd->asid,
+ PAGE_SIZE, false, smmu_domain);
+ arm_smmu_atc_inv_domain(smmu_domain, mm->pasid, start, size);
}
static void arm_smmu_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
@@ -391,7 +395,7 @@ bool arm_smmu_sva_supported(struct arm_smmu_device *smmu)
unsigned long reg, fld;
unsigned long oas;
unsigned long asid_bits;
- u32 feat_mask = ARM_SMMU_FEAT_BTM | ARM_SMMU_FEAT_COHERENCY;
+ u32 feat_mask = ARM_SMMU_FEAT_COHERENCY;
if (vabits_actual == 52)
feat_mask |= ARM_SMMU_FEAT_VAX;
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 8ca7415d785d..8594b4a83043 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -88,15 +88,6 @@ static struct arm_smmu_option_prop arm_smmu_options[] = {
{ 0, NULL},
};
-static inline void __iomem *arm_smmu_page1_fixup(unsigned long offset,
- struct arm_smmu_device *smmu)
-{
- if (offset > SZ_64K)
- return smmu->page1 + offset - SZ_64K;
-
- return smmu->base + offset;
-}
-
static void parse_driver_options(struct arm_smmu_device *smmu)
{
int i = 0;
@@ -272,9 +263,11 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
cmd[1] |= FIELD_PREP(CMDQ_CFGI_1_RANGE, 31);
break;
case CMDQ_OP_TLBI_NH_VA:
+ cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid);
+ fallthrough;
+ case CMDQ_OP_TLBI_EL2_VA:
cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_NUM, ent->tlbi.num);
cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_SCALE, ent->tlbi.scale);
- cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid);
cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_ASID, ent->tlbi.asid);
cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_LEAF, ent->tlbi.leaf);
cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_TTL, ent->tlbi.ttl);
@@ -296,6 +289,9 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
case CMDQ_OP_TLBI_S12_VMALL:
cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid);
break;
+ case CMDQ_OP_TLBI_EL2_ASID:
+ cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_ASID, ent->tlbi.asid);
+ break;
case CMDQ_OP_ATC_INV:
cmd[0] |= FIELD_PREP(CMDQ_0_SSV, ent->substream_valid);
cmd[0] |= FIELD_PREP(CMDQ_ATC_0_GLOBAL, ent->atc.global);
@@ -886,7 +882,8 @@ static int arm_smmu_cmdq_batch_submit(struct arm_smmu_device *smmu,
void arm_smmu_tlb_inv_asid(struct arm_smmu_device *smmu, u16 asid)
{
struct arm_smmu_cmdq_ent cmd = {
- .opcode = CMDQ_OP_TLBI_NH_ASID,
+ .opcode = smmu->features & ARM_SMMU_FEAT_E2H ?
+ CMDQ_OP_TLBI_EL2_ASID : CMDQ_OP_TLBI_NH_ASID,
.tlbi.asid = asid,
};
@@ -1269,13 +1266,16 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
}
if (s1_cfg) {
+ u64 strw = smmu->features & ARM_SMMU_FEAT_E2H ?
+ STRTAB_STE_1_STRW_EL2 : STRTAB_STE_1_STRW_NSEL1;
+
BUG_ON(ste_live);
dst[1] = cpu_to_le64(
FIELD_PREP(STRTAB_STE_1_S1DSS, STRTAB_STE_1_S1DSS_SSID0) |
FIELD_PREP(STRTAB_STE_1_S1CIR, STRTAB_STE_1_S1C_CACHE_WBRA) |
FIELD_PREP(STRTAB_STE_1_S1COR, STRTAB_STE_1_S1C_CACHE_WBRA) |
FIELD_PREP(STRTAB_STE_1_S1CSH, ARM_SMMU_SH_ISH) |
- FIELD_PREP(STRTAB_STE_1_STRW, STRTAB_STE_1_STRW_NSEL1));
+ FIELD_PREP(STRTAB_STE_1_STRW, strw));
if (smmu->features & ARM_SMMU_FEAT_STALLS &&
!(smmu->features & ARM_SMMU_FEAT_STALL_FORCE))
@@ -1667,40 +1667,28 @@ static void arm_smmu_tlb_inv_context(void *cookie)
arm_smmu_atc_inv_domain(smmu_domain, 0, 0, 0);
}
-static void arm_smmu_tlb_inv_range(unsigned long iova, size_t size,
- size_t granule, bool leaf,
- struct arm_smmu_domain *smmu_domain)
+static void __arm_smmu_tlb_inv_range(struct arm_smmu_cmdq_ent *cmd,
+ unsigned long iova, size_t size,
+ size_t granule,
+ struct arm_smmu_domain *smmu_domain)
{
struct arm_smmu_device *smmu = smmu_domain->smmu;
- unsigned long start = iova, end = iova + size, num_pages = 0, tg = 0;
+ unsigned long end = iova + size, num_pages = 0, tg = 0;
size_t inv_range = granule;
struct arm_smmu_cmdq_batch cmds = {};
- struct arm_smmu_cmdq_ent cmd = {
- .tlbi = {
- .leaf = leaf,
- },
- };
if (!size)
return;
- if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
- cmd.opcode = CMDQ_OP_TLBI_NH_VA;
- cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid;
- } else {
- cmd.opcode = CMDQ_OP_TLBI_S2_IPA;
- cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
- }
-
if (smmu->features & ARM_SMMU_FEAT_RANGE_INV) {
/* Get the leaf page size */
tg = __ffs(smmu_domain->domain.pgsize_bitmap);
/* Convert page size of 12,14,16 (log2) to 1,2,3 */
- cmd.tlbi.tg = (tg - 10) / 2;
+ cmd->tlbi.tg = (tg - 10) / 2;
/* Determine what level the granule is at */
- cmd.tlbi.ttl = 4 - ((ilog2(granule) - 3) / (tg - 3));
+ cmd->tlbi.ttl = 4 - ((ilog2(granule) - 3) / (tg - 3));
num_pages = size >> tg;
}
@@ -1718,11 +1706,11 @@ static void arm_smmu_tlb_inv_range(unsigned long iova, size_t size,
/* Determine the power of 2 multiple number of pages */
scale = __ffs(num_pages);
- cmd.tlbi.scale = scale;
+ cmd->tlbi.scale = scale;
/* Determine how many chunks of 2^scale size we have */
num = (num_pages >> scale) & CMDQ_TLBI_RANGE_NUM_MAX;
- cmd.tlbi.num = num - 1;
+ cmd->tlbi.num = num - 1;
/* range is num * 2^scale * pgsize */
inv_range = num << (scale + tg);
@@ -1731,17 +1719,54 @@ static void arm_smmu_tlb_inv_range(unsigned long iova, size_t size,
num_pages -= num << scale;
}
- cmd.tlbi.addr = iova;
- arm_smmu_cmdq_batch_add(smmu, &cmds, &cmd);
+ cmd->tlbi.addr = iova;
+ arm_smmu_cmdq_batch_add(smmu, &cmds, cmd);
iova += inv_range;
}
arm_smmu_cmdq_batch_submit(smmu, &cmds);
+}
+
+static void arm_smmu_tlb_inv_range_domain(unsigned long iova, size_t size,
+ size_t granule, bool leaf,
+ struct arm_smmu_domain *smmu_domain)
+{
+ struct arm_smmu_cmdq_ent cmd = {
+ .tlbi = {
+ .leaf = leaf,
+ },
+ };
+
+ if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
+ cmd.opcode = smmu_domain->smmu->features & ARM_SMMU_FEAT_E2H ?
+ CMDQ_OP_TLBI_EL2_VA : CMDQ_OP_TLBI_NH_VA;
+ cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid;
+ } else {
+ cmd.opcode = CMDQ_OP_TLBI_S2_IPA;
+ cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
+ }
+ __arm_smmu_tlb_inv_range(&cmd, iova, size, granule, smmu_domain);
/*
* Unfortunately, this can't be leaf-only since we may have
* zapped an entire table.
*/
- arm_smmu_atc_inv_domain(smmu_domain, 0, start, size);
+ arm_smmu_atc_inv_domain(smmu_domain, 0, iova, size);
+}
+
+void arm_smmu_tlb_inv_range_asid(unsigned long iova, size_t size, int asid,
+ size_t granule, bool leaf,
+ struct arm_smmu_domain *smmu_domain)
+{
+ struct arm_smmu_cmdq_ent cmd = {
+ .opcode = smmu_domain->smmu->features & ARM_SMMU_FEAT_E2H ?
+ CMDQ_OP_TLBI_EL2_VA : CMDQ_OP_TLBI_NH_VA,
+ .tlbi = {
+ .asid = asid,
+ .leaf = leaf,
+ },
+ };
+
+ __arm_smmu_tlb_inv_range(&cmd, iova, size, granule, smmu_domain);
}
static void arm_smmu_tlb_inv_page_nosync(struct iommu_iotlb_gather *gather,
@@ -1757,7 +1782,7 @@ static void arm_smmu_tlb_inv_page_nosync(struct iommu_iotlb_gather *gather,
static void arm_smmu_tlb_inv_walk(unsigned long iova, size_t size,
size_t granule, void *cookie)
{
- arm_smmu_tlb_inv_range(iova, size, granule, false, cookie);
+ arm_smmu_tlb_inv_range_domain(iova, size, granule, false, cookie);
}
static const struct iommu_flush_ops arm_smmu_flush_ops = {
@@ -2280,8 +2305,9 @@ static void arm_smmu_iotlb_sync(struct iommu_domain *domain,
{
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
- arm_smmu_tlb_inv_range(gather->start, gather->end - gather->start,
- gather->pgsize, true, smmu_domain);
+ arm_smmu_tlb_inv_range_domain(gather->start,
+ gather->end - gather->start + 1,
+ gather->pgsize, true, smmu_domain);
}
static phys_addr_t
@@ -2611,6 +2637,7 @@ static struct iommu_ops arm_smmu_ops = {
/* Probing and initialisation functions */
static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu,
struct arm_smmu_queue *q,
+ void __iomem *page,
unsigned long prod_off,
unsigned long cons_off,
size_t dwords, const char *name)
@@ -2639,8 +2666,8 @@ static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu,
1 << q->llq.max_n_shift, name);
}
- q->prod_reg = arm_smmu_page1_fixup(prod_off, smmu);
- q->cons_reg = arm_smmu_page1_fixup(cons_off, smmu);
+ q->prod_reg = page + prod_off;
+ q->cons_reg = page + cons_off;
q->ent_dwords = dwords;
q->q_base = Q_BASE_RWA;
@@ -2684,9 +2711,9 @@ static int arm_smmu_init_queues(struct arm_smmu_device *smmu)
int ret;
/* cmdq */
- ret = arm_smmu_init_one_queue(smmu, &smmu->cmdq.q, ARM_SMMU_CMDQ_PROD,
- ARM_SMMU_CMDQ_CONS, CMDQ_ENT_DWORDS,
- "cmdq");
+ ret = arm_smmu_init_one_queue(smmu, &smmu->cmdq.q, smmu->base,
+ ARM_SMMU_CMDQ_PROD, ARM_SMMU_CMDQ_CONS,
+ CMDQ_ENT_DWORDS, "cmdq");
if (ret)
return ret;
@@ -2695,9 +2722,9 @@ static int arm_smmu_init_queues(struct arm_smmu_device *smmu)
return ret;
/* evtq */
- ret = arm_smmu_init_one_queue(smmu, &smmu->evtq.q, ARM_SMMU_EVTQ_PROD,
- ARM_SMMU_EVTQ_CONS, EVTQ_ENT_DWORDS,
- "evtq");
+ ret = arm_smmu_init_one_queue(smmu, &smmu->evtq.q, smmu->page1,
+ ARM_SMMU_EVTQ_PROD, ARM_SMMU_EVTQ_CONS,
+ EVTQ_ENT_DWORDS, "evtq");
if (ret)
return ret;
@@ -2705,9 +2732,9 @@ static int arm_smmu_init_queues(struct arm_smmu_device *smmu)
if (!(smmu->features & ARM_SMMU_FEAT_PRI))
return 0;
- return arm_smmu_init_one_queue(smmu, &smmu->priq.q, ARM_SMMU_PRIQ_PROD,
- ARM_SMMU_PRIQ_CONS, PRIQ_ENT_DWORDS,
- "priq");
+ return arm_smmu_init_one_queue(smmu, &smmu->priq.q, smmu->page1,
+ ARM_SMMU_PRIQ_PROD, ARM_SMMU_PRIQ_CONS,
+ PRIQ_ENT_DWORDS, "priq");
}
static int arm_smmu_init_l1_strtab(struct arm_smmu_device *smmu)
@@ -3060,7 +3087,11 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
writel_relaxed(reg, smmu->base + ARM_SMMU_CR1);
/* CR2 (random crap) */
- reg = CR2_PTM | CR2_RECINVSID | CR2_E2H;
+ reg = CR2_PTM | CR2_RECINVSID;
+
+ if (smmu->features & ARM_SMMU_FEAT_E2H)
+ reg |= CR2_E2H;
+
writel_relaxed(reg, smmu->base + ARM_SMMU_CR2);
/* Stream table */
@@ -3099,10 +3130,8 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
/* Event queue */
writeq_relaxed(smmu->evtq.q.q_base, smmu->base + ARM_SMMU_EVTQ_BASE);
- writel_relaxed(smmu->evtq.q.llq.prod,
- arm_smmu_page1_fixup(ARM_SMMU_EVTQ_PROD, smmu));
- writel_relaxed(smmu->evtq.q.llq.cons,
- arm_smmu_page1_fixup(ARM_SMMU_EVTQ_CONS, smmu));
+ writel_relaxed(smmu->evtq.q.llq.prod, smmu->page1 + ARM_SMMU_EVTQ_PROD);
+ writel_relaxed(smmu->evtq.q.llq.cons, smmu->page1 + ARM_SMMU_EVTQ_CONS);
enables |= CR0_EVTQEN;
ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
@@ -3117,9 +3146,9 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
writeq_relaxed(smmu->priq.q.q_base,
smmu->base + ARM_SMMU_PRIQ_BASE);
writel_relaxed(smmu->priq.q.llq.prod,
- arm_smmu_page1_fixup(ARM_SMMU_PRIQ_PROD, smmu));
+ smmu->page1 + ARM_SMMU_PRIQ_PROD);
writel_relaxed(smmu->priq.q.llq.cons,
- arm_smmu_page1_fixup(ARM_SMMU_PRIQ_CONS, smmu));
+ smmu->page1 + ARM_SMMU_PRIQ_CONS);
enables |= CR0_PRIQEN;
ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
@@ -3221,8 +3250,11 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
smmu->options |= ARM_SMMU_OPT_MSIPOLL;
}
- if (reg & IDR0_HYP)
+ if (reg & IDR0_HYP) {
smmu->features |= ARM_SMMU_FEAT_HYP;
+ if (cpus_have_cap(ARM64_HAS_VIRT_HOST_EXTN))
+ smmu->features |= ARM_SMMU_FEAT_E2H;
+ }
/*
* The coherency feature as set by FW is used in preference to the ID
@@ -3489,11 +3521,7 @@ err_reset_pci_ops: __maybe_unused;
static void __iomem *arm_smmu_ioremap(struct device *dev, resource_size_t start,
resource_size_t size)
{
- struct resource res = {
- .flags = IORESOURCE_MEM,
- .start = start,
- .end = start + size - 1,
- };
+ struct resource res = DEFINE_RES_MEM(start, size);
return devm_ioremap_resource(dev, &res);
}
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
index 96c2e9565e00..f985817c967a 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
@@ -139,15 +139,15 @@
#define ARM_SMMU_CMDQ_CONS 0x9c
#define ARM_SMMU_EVTQ_BASE 0xa0
-#define ARM_SMMU_EVTQ_PROD 0x100a8
-#define ARM_SMMU_EVTQ_CONS 0x100ac
+#define ARM_SMMU_EVTQ_PROD 0xa8
+#define ARM_SMMU_EVTQ_CONS 0xac
#define ARM_SMMU_EVTQ_IRQ_CFG0 0xb0
#define ARM_SMMU_EVTQ_IRQ_CFG1 0xb8
#define ARM_SMMU_EVTQ_IRQ_CFG2 0xbc
#define ARM_SMMU_PRIQ_BASE 0xc0
-#define ARM_SMMU_PRIQ_PROD 0x100c8
-#define ARM_SMMU_PRIQ_CONS 0x100cc
+#define ARM_SMMU_PRIQ_PROD 0xc8
+#define ARM_SMMU_PRIQ_CONS 0xcc
#define ARM_SMMU_PRIQ_IRQ_CFG0 0xd0
#define ARM_SMMU_PRIQ_IRQ_CFG1 0xd8
#define ARM_SMMU_PRIQ_IRQ_CFG2 0xdc
@@ -430,6 +430,8 @@ struct arm_smmu_cmdq_ent {
#define CMDQ_OP_TLBI_NH_ASID 0x11
#define CMDQ_OP_TLBI_NH_VA 0x12
#define CMDQ_OP_TLBI_EL2_ALL 0x20
+ #define CMDQ_OP_TLBI_EL2_ASID 0x21
+ #define CMDQ_OP_TLBI_EL2_VA 0x22
#define CMDQ_OP_TLBI_S12_VMALL 0x28
#define CMDQ_OP_TLBI_S2_IPA 0x2a
#define CMDQ_OP_TLBI_NSNH_ALL 0x30
@@ -604,6 +606,7 @@ struct arm_smmu_device {
#define ARM_SMMU_FEAT_RANGE_INV (1 << 15)
#define ARM_SMMU_FEAT_BTM (1 << 16)
#define ARM_SMMU_FEAT_SVA (1 << 17)
+#define ARM_SMMU_FEAT_E2H (1 << 18)
u32 features;
#define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0)
@@ -694,6 +697,9 @@ extern struct arm_smmu_ctx_desc quiet_cd;
int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain, int ssid,
struct arm_smmu_ctx_desc *cd);
void arm_smmu_tlb_inv_asid(struct arm_smmu_device *smmu, u16 asid);
+void arm_smmu_tlb_inv_range_asid(unsigned long iova, size_t size, int asid,
+ size_t granule, bool leaf,
+ struct arm_smmu_domain *smmu_domain);
bool arm_smmu_free_asid(struct arm_smmu_ctx_desc *cd);
int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, int ssid,
unsigned long iova, size_t size);
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
index bcda17012aee..98b3a1c2a181 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
@@ -166,6 +166,7 @@ static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = {
{ .compatible = "qcom,mdss" },
{ .compatible = "qcom,sc7180-mdss" },
{ .compatible = "qcom,sc7180-mss-pil" },
+ { .compatible = "qcom,sc8180x-mdss" },
{ .compatible = "qcom,sdm845-mdss" },
{ .compatible = "qcom,sdm845-mss-pil" },
{ }
@@ -206,6 +207,8 @@ static int qcom_smmu_cfg_probe(struct arm_smmu_device *smmu)
smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i));
if (FIELD_GET(ARM_SMMU_SMR_VALID, smr)) {
+ /* Ignore valid bit for SMR mask extraction. */
+ smr &= ~ARM_SMMU_SMR_VALID;
smmu->smrs[i].id = FIELD_GET(ARM_SMMU_SMR_ID, smr);
smmu->smrs[i].mask = FIELD_GET(ARM_SMMU_SMR_MASK, smr);
smmu->smrs[i].valid = true;
@@ -327,10 +330,12 @@ static struct arm_smmu_device *qcom_smmu_create(struct arm_smmu_device *smmu,
static const struct of_device_id __maybe_unused qcom_smmu_impl_of_match[] = {
{ .compatible = "qcom,msm8998-smmu-v2" },
{ .compatible = "qcom,sc7180-smmu-500" },
+ { .compatible = "qcom,sc8180x-smmu-500" },
{ .compatible = "qcom,sdm630-smmu-v2" },
{ .compatible = "qcom,sdm845-smmu-500" },
{ .compatible = "qcom,sm8150-smmu-500" },
{ .compatible = "qcom,sm8250-smmu-500" },
+ { .compatible = "qcom,sm8350-smmu-500" },
{ }
};
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 4078358ed66e..af765c813cc8 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -51,6 +51,8 @@ struct iommu_dma_cookie {
struct iommu_domain *fq_domain;
};
+static DEFINE_STATIC_KEY_FALSE(iommu_deferred_attach_enabled);
+
void iommu_dma_free_cpu_cached_iovas(unsigned int cpu,
struct iommu_domain *domain)
{
@@ -309,6 +311,11 @@ static void iommu_dma_flush_iotlb_all(struct iova_domain *iovad)
domain->ops->flush_iotlb_all(domain);
}
+static bool dev_is_untrusted(struct device *dev)
+{
+ return dev_is_pci(dev) && to_pci_dev(dev)->untrusted;
+}
+
/**
* iommu_dma_init_domain - Initialise a DMA mapping domain
* @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
@@ -363,8 +370,9 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
init_iova_domain(iovad, 1UL << order, base_pfn);
- if (!cookie->fq_domain && !iommu_domain_get_attr(domain,
- DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, &attr) && attr) {
+ if (!cookie->fq_domain && (!dev || !dev_is_untrusted(dev)) &&
+ !iommu_domain_get_attr(domain, DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, &attr) &&
+ attr) {
if (init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all,
iommu_dma_entry_dtor))
pr_warn("iova flush queue initialization failed\n");
@@ -378,21 +386,6 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
return iova_reserve_iommu_regions(dev, domain);
}
-static int iommu_dma_deferred_attach(struct device *dev,
- struct iommu_domain *domain)
-{
- const struct iommu_ops *ops = domain->ops;
-
- if (!is_kdump_kernel())
- return 0;
-
- if (unlikely(ops->is_attach_deferred &&
- ops->is_attach_deferred(domain, dev)))
- return iommu_attach_device(domain, dev);
-
- return 0;
-}
-
/**
* dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
* page flags.
@@ -521,11 +514,6 @@ static void __iommu_dma_unmap_swiotlb(struct device *dev, dma_addr_t dma_addr,
iova_align(iovad, size), dir, attrs);
}
-static bool dev_is_untrusted(struct device *dev)
-{
- return dev_is_pci(dev) && to_pci_dev(dev)->untrusted;
-}
-
static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
size_t size, int prot, u64 dma_mask)
{
@@ -535,7 +523,8 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
size_t iova_off = iova_offset(iovad, phys);
dma_addr_t iova;
- if (unlikely(iommu_dma_deferred_attach(dev, domain)))
+ if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
+ iommu_deferred_attach(dev, domain))
return DMA_MAPPING_ERROR;
size = iova_align(iovad, size + iova_off);
@@ -693,7 +682,8 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
*dma_handle = DMA_MAPPING_ERROR;
- if (unlikely(iommu_dma_deferred_attach(dev, domain)))
+ if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
+ iommu_deferred_attach(dev, domain))
return NULL;
min_size = alloc_sizes & -alloc_sizes;
@@ -976,7 +966,8 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
unsigned long mask = dma_get_seg_boundary(dev);
int i;
- if (unlikely(iommu_dma_deferred_attach(dev, domain)))
+ if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
+ iommu_deferred_attach(dev, domain))
return 0;
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
@@ -1197,34 +1188,6 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
return cpu_addr;
}
-#ifdef CONFIG_DMA_REMAP
-static void *iommu_dma_alloc_noncoherent(struct device *dev, size_t size,
- dma_addr_t *handle, enum dma_data_direction dir, gfp_t gfp)
-{
- if (!gfpflags_allow_blocking(gfp)) {
- struct page *page;
-
- page = dma_common_alloc_pages(dev, size, handle, dir, gfp);
- if (!page)
- return NULL;
- return page_address(page);
- }
-
- return iommu_dma_alloc_remap(dev, size, handle, gfp | __GFP_ZERO,
- PAGE_KERNEL, 0);
-}
-
-static void iommu_dma_free_noncoherent(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t handle, enum dma_data_direction dir)
-{
- __iommu_dma_unmap(dev, handle, size);
- __iommu_dma_free(dev, size, cpu_addr);
-}
-#else
-#define iommu_dma_alloc_noncoherent NULL
-#define iommu_dma_free_noncoherent NULL
-#endif /* CONFIG_DMA_REMAP */
-
static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs)
@@ -1295,8 +1258,6 @@ static const struct dma_map_ops iommu_dma_ops = {
.free = iommu_dma_free,
.alloc_pages = dma_common_alloc_pages,
.free_pages = dma_common_free_pages,
- .alloc_noncoherent = iommu_dma_alloc_noncoherent,
- .free_noncoherent = iommu_dma_free_noncoherent,
.mmap = iommu_dma_mmap,
.get_sgtable = iommu_dma_get_sgtable,
.map_page = iommu_dma_map_page,
@@ -1424,6 +1385,9 @@ void iommu_dma_compose_msi_msg(struct msi_desc *desc,
static int iommu_dma_init(void)
{
+ if (is_kdump_kernel())
+ static_branch_enable(&iommu_deferred_attach_enabled);
+
return iova_cache_get();
}
arch_initcall(iommu_dma_init);
diff --git a/drivers/iommu/hyperv-iommu.c b/drivers/iommu/hyperv-iommu.c
index 1d21a0b5f724..e285a220c913 100644
--- a/drivers/iommu/hyperv-iommu.c
+++ b/drivers/iommu/hyperv-iommu.c
@@ -20,6 +20,7 @@
#include <asm/io_apic.h>
#include <asm/irq_remapping.h>
#include <asm/hypervisor.h>
+#include <asm/mshyperv.h>
#include "irq_remapping.h"
@@ -115,30 +116,43 @@ static const struct irq_domain_ops hyperv_ir_domain_ops = {
.free = hyperv_irq_remapping_free,
};
+static const struct irq_domain_ops hyperv_root_ir_domain_ops;
static int __init hyperv_prepare_irq_remapping(void)
{
struct fwnode_handle *fn;
int i;
+ const char *name;
+ const struct irq_domain_ops *ops;
if (!hypervisor_is_type(X86_HYPER_MS_HYPERV) ||
x86_init.hyper.msi_ext_dest_id() ||
!x2apic_supported())
return -ENODEV;
- fn = irq_domain_alloc_named_id_fwnode("HYPERV-IR", 0);
+ if (hv_root_partition) {
+ name = "HYPERV-ROOT-IR";
+ ops = &hyperv_root_ir_domain_ops;
+ } else {
+ name = "HYPERV-IR";
+ ops = &hyperv_ir_domain_ops;
+ }
+
+ fn = irq_domain_alloc_named_id_fwnode(name, 0);
if (!fn)
return -ENOMEM;
ioapic_ir_domain =
irq_domain_create_hierarchy(arch_get_ir_parent_domain(),
- 0, IOAPIC_REMAPPING_ENTRY, fn,
- &hyperv_ir_domain_ops, NULL);
+ 0, IOAPIC_REMAPPING_ENTRY, fn, ops, NULL);
if (!ioapic_ir_domain) {
irq_domain_free_fwnode(fn);
return -ENOMEM;
}
+ if (hv_root_partition)
+ return 0; /* The rest is only relevant to guests */
+
/*
* Hyper-V doesn't provide irq remapping function for
* IO-APIC and so IO-APIC only accepts 8-bit APIC ID.
@@ -166,4 +180,161 @@ struct irq_remap_ops hyperv_irq_remap_ops = {
.enable = hyperv_enable_irq_remapping,
};
+/* IRQ remapping domain when Linux runs as the root partition */
+struct hyperv_root_ir_data {
+ u8 ioapic_id;
+ bool is_level;
+ struct hv_interrupt_entry entry;
+};
+
+static void
+hyperv_root_ir_compose_msi_msg(struct irq_data *irq_data, struct msi_msg *msg)
+{
+ u64 status;
+ u32 vector;
+ struct irq_cfg *cfg;
+ int ioapic_id;
+ struct cpumask *affinity;
+ int cpu;
+ struct hv_interrupt_entry entry;
+ struct hyperv_root_ir_data *data = irq_data->chip_data;
+ struct IO_APIC_route_entry e;
+
+ cfg = irqd_cfg(irq_data);
+ affinity = irq_data_get_effective_affinity_mask(irq_data);
+ cpu = cpumask_first_and(affinity, cpu_online_mask);
+
+ vector = cfg->vector;
+ ioapic_id = data->ioapic_id;
+
+ if (data->entry.source == HV_DEVICE_TYPE_IOAPIC
+ && data->entry.ioapic_rte.as_uint64) {
+ entry = data->entry;
+
+ status = hv_unmap_ioapic_interrupt(ioapic_id, &entry);
+
+ if (status != HV_STATUS_SUCCESS)
+ pr_debug("%s: unexpected unmap status %lld\n", __func__, status);
+
+ data->entry.ioapic_rte.as_uint64 = 0;
+ data->entry.source = 0; /* Invalid source */
+ }
+
+
+ status = hv_map_ioapic_interrupt(ioapic_id, data->is_level, cpu,
+ vector, &entry);
+
+ if (status != HV_STATUS_SUCCESS) {
+ pr_err("%s: map hypercall failed, status %lld\n", __func__, status);
+ return;
+ }
+
+ data->entry = entry;
+
+ /* Turn it into an IO_APIC_route_entry, and generate MSI MSG. */
+ e.w1 = entry.ioapic_rte.low_uint32;
+ e.w2 = entry.ioapic_rte.high_uint32;
+
+ memset(msg, 0, sizeof(*msg));
+ msg->arch_data.vector = e.vector;
+ msg->arch_data.delivery_mode = e.delivery_mode;
+ msg->arch_addr_lo.dest_mode_logical = e.dest_mode_logical;
+ msg->arch_addr_lo.dmar_format = e.ir_format;
+ msg->arch_addr_lo.dmar_index_0_14 = e.ir_index_0_14;
+}
+
+static int hyperv_root_ir_set_affinity(struct irq_data *data,
+ const struct cpumask *mask, bool force)
+{
+ struct irq_data *parent = data->parent_data;
+ struct irq_cfg *cfg = irqd_cfg(data);
+ int ret;
+
+ ret = parent->chip->irq_set_affinity(parent, mask, force);
+ if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
+ return ret;
+
+ send_cleanup_vector(cfg);
+
+ return 0;
+}
+
+static struct irq_chip hyperv_root_ir_chip = {
+ .name = "HYPERV-ROOT-IR",
+ .irq_ack = apic_ack_irq,
+ .irq_set_affinity = hyperv_root_ir_set_affinity,
+ .irq_compose_msi_msg = hyperv_root_ir_compose_msi_msg,
+};
+
+static int hyperv_root_irq_remapping_alloc(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs,
+ void *arg)
+{
+ struct irq_alloc_info *info = arg;
+ struct irq_data *irq_data;
+ struct hyperv_root_ir_data *data;
+ int ret = 0;
+
+ if (!info || info->type != X86_IRQ_ALLOC_TYPE_IOAPIC || nr_irqs > 1)
+ return -EINVAL;
+
+ ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
+ if (ret < 0)
+ return ret;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ irq_domain_free_irqs_common(domain, virq, nr_irqs);
+ return -ENOMEM;
+ }
+
+ irq_data = irq_domain_get_irq_data(domain, virq);
+ if (!irq_data) {
+ kfree(data);
+ irq_domain_free_irqs_common(domain, virq, nr_irqs);
+ return -EINVAL;
+ }
+
+ data->ioapic_id = info->devid;
+ data->is_level = info->ioapic.is_level;
+
+ irq_data->chip = &hyperv_root_ir_chip;
+ irq_data->chip_data = data;
+
+ return 0;
+}
+
+static void hyperv_root_irq_remapping_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
+{
+ struct irq_data *irq_data;
+ struct hyperv_root_ir_data *data;
+ struct hv_interrupt_entry *e;
+ int i;
+
+ for (i = 0; i < nr_irqs; i++) {
+ irq_data = irq_domain_get_irq_data(domain, virq + i);
+
+ if (irq_data && irq_data->chip_data) {
+ data = irq_data->chip_data;
+ e = &data->entry;
+
+ if (e->source == HV_DEVICE_TYPE_IOAPIC
+ && e->ioapic_rte.as_uint64)
+ hv_unmap_ioapic_interrupt(data->ioapic_id,
+ &data->entry);
+
+ kfree(data);
+ }
+ }
+
+ irq_domain_free_irqs_common(domain, virq, nr_irqs);
+}
+
+static const struct irq_domain_ops hyperv_root_ir_domain_ops = {
+ .select = hyperv_irq_remapping_select,
+ .alloc = hyperv_root_irq_remapping_alloc,
+ .free = hyperv_root_irq_remapping_free,
+};
+
#endif
diff --git a/drivers/iommu/intel/Makefile b/drivers/iommu/intel/Makefile
index fb8e1e8c8029..ae236ec7d219 100644
--- a/drivers/iommu/intel/Makefile
+++ b/drivers/iommu/intel/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_DMAR_TABLE) += dmar.o
obj-$(CONFIG_INTEL_IOMMU) += iommu.o pasid.o
-obj-$(CONFIG_INTEL_IOMMU) += trace.o
+obj-$(CONFIG_DMAR_TABLE) += trace.o cap_audit.o
obj-$(CONFIG_INTEL_IOMMU_DEBUGFS) += debugfs.o
obj-$(CONFIG_INTEL_IOMMU_SVM) += svm.o
obj-$(CONFIG_IRQ_REMAP) += irq_remapping.o
diff --git a/drivers/iommu/intel/cap_audit.c b/drivers/iommu/intel/cap_audit.c
new file mode 100644
index 000000000000..b12e421a2f1a
--- /dev/null
+++ b/drivers/iommu/intel/cap_audit.c
@@ -0,0 +1,205 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * cap_audit.c - audit iommu capabilities for boot time and hot plug
+ *
+ * Copyright (C) 2021 Intel Corporation
+ *
+ * Author: Kyung Min Park <kyung.min.park@intel.com>
+ * Lu Baolu <baolu.lu@linux.intel.com>
+ */
+
+#define pr_fmt(fmt) "DMAR: " fmt
+
+#include <linux/intel-iommu.h>
+#include "cap_audit.h"
+
+static u64 intel_iommu_cap_sanity;
+static u64 intel_iommu_ecap_sanity;
+
+static inline void check_irq_capabilities(struct intel_iommu *a,
+ struct intel_iommu *b)
+{
+ CHECK_FEATURE_MISMATCH(a, b, cap, pi_support, CAP_PI_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, eim_support, ECAP_EIM_MASK);
+}
+
+static inline void check_dmar_capabilities(struct intel_iommu *a,
+ struct intel_iommu *b)
+{
+ MINIMAL_FEATURE_IOMMU(b, cap, CAP_MAMV_MASK);
+ MINIMAL_FEATURE_IOMMU(b, cap, CAP_NFR_MASK);
+ MINIMAL_FEATURE_IOMMU(b, cap, CAP_SLLPS_MASK);
+ MINIMAL_FEATURE_IOMMU(b, cap, CAP_FRO_MASK);
+ MINIMAL_FEATURE_IOMMU(b, cap, CAP_MGAW_MASK);
+ MINIMAL_FEATURE_IOMMU(b, cap, CAP_SAGAW_MASK);
+ MINIMAL_FEATURE_IOMMU(b, cap, CAP_NDOMS_MASK);
+ MINIMAL_FEATURE_IOMMU(b, ecap, ECAP_PSS_MASK);
+ MINIMAL_FEATURE_IOMMU(b, ecap, ECAP_MHMV_MASK);
+ MINIMAL_FEATURE_IOMMU(b, ecap, ECAP_IRO_MASK);
+
+ CHECK_FEATURE_MISMATCH(a, b, cap, 5lp_support, CAP_FL5LP_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, cap, fl1gp_support, CAP_FL1GP_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, cap, read_drain, CAP_RD_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, cap, write_drain, CAP_WD_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, cap, pgsel_inv, CAP_PSI_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, cap, zlr, CAP_ZLR_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, cap, caching_mode, CAP_CM_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, cap, phmr, CAP_PHMR_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, cap, plmr, CAP_PLMR_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, cap, rwbf, CAP_RWBF_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, cap, afl, CAP_AFL_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, rps, ECAP_RPS_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, smpwc, ECAP_SMPWC_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, flts, ECAP_FLTS_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, slts, ECAP_SLTS_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, nwfs, ECAP_NWFS_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, slads, ECAP_SLADS_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, vcs, ECAP_VCS_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, smts, ECAP_SMTS_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, pds, ECAP_PDS_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, dit, ECAP_DIT_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, pasid, ECAP_PASID_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, eafs, ECAP_EAFS_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, srs, ECAP_SRS_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, ers, ECAP_ERS_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, prs, ECAP_PRS_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, nest, ECAP_NEST_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, mts, ECAP_MTS_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, sc_support, ECAP_SC_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, pass_through, ECAP_PT_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, dev_iotlb_support, ECAP_DT_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, qis, ECAP_QI_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, ecap, coherent, ECAP_C_MASK);
+}
+
+static int cap_audit_hotplug(struct intel_iommu *iommu, enum cap_audit_type type)
+{
+ bool mismatch = false;
+ u64 old_cap = intel_iommu_cap_sanity;
+ u64 old_ecap = intel_iommu_ecap_sanity;
+
+ if (type == CAP_AUDIT_HOTPLUG_IRQR) {
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, pi_support, CAP_PI_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, eim_support, ECAP_EIM_MASK);
+ goto out;
+ }
+
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, 5lp_support, CAP_FL5LP_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, fl1gp_support, CAP_FL1GP_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, read_drain, CAP_RD_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, write_drain, CAP_WD_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, pgsel_inv, CAP_PSI_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, zlr, CAP_ZLR_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, caching_mode, CAP_CM_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, phmr, CAP_PHMR_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, plmr, CAP_PLMR_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, rwbf, CAP_RWBF_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, afl, CAP_AFL_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, rps, ECAP_RPS_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, smpwc, ECAP_SMPWC_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, flts, ECAP_FLTS_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, slts, ECAP_SLTS_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, nwfs, ECAP_NWFS_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, slads, ECAP_SLADS_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, vcs, ECAP_VCS_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, smts, ECAP_SMTS_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, pds, ECAP_PDS_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, dit, ECAP_DIT_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, pasid, ECAP_PASID_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, eafs, ECAP_EAFS_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, srs, ECAP_SRS_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, ers, ECAP_ERS_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, prs, ECAP_PRS_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, nest, ECAP_NEST_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, mts, ECAP_MTS_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, sc_support, ECAP_SC_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, pass_through, ECAP_PT_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, dev_iotlb_support, ECAP_DT_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, qis, ECAP_QI_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, coherent, ECAP_C_MASK);
+
+ /* Abort hot plug if the hot plug iommu feature is smaller than global */
+ MINIMAL_FEATURE_HOTPLUG(iommu, cap, max_amask_val, CAP_MAMV_MASK, mismatch);
+ MINIMAL_FEATURE_HOTPLUG(iommu, cap, num_fault_regs, CAP_NFR_MASK, mismatch);
+ MINIMAL_FEATURE_HOTPLUG(iommu, cap, super_page_val, CAP_SLLPS_MASK, mismatch);
+ MINIMAL_FEATURE_HOTPLUG(iommu, cap, fault_reg_offset, CAP_FRO_MASK, mismatch);
+ MINIMAL_FEATURE_HOTPLUG(iommu, cap, mgaw, CAP_MGAW_MASK, mismatch);
+ MINIMAL_FEATURE_HOTPLUG(iommu, cap, sagaw, CAP_SAGAW_MASK, mismatch);
+ MINIMAL_FEATURE_HOTPLUG(iommu, cap, ndoms, CAP_NDOMS_MASK, mismatch);
+ MINIMAL_FEATURE_HOTPLUG(iommu, ecap, pss, ECAP_PSS_MASK, mismatch);
+ MINIMAL_FEATURE_HOTPLUG(iommu, ecap, max_handle_mask, ECAP_MHMV_MASK, mismatch);
+ MINIMAL_FEATURE_HOTPLUG(iommu, ecap, iotlb_offset, ECAP_IRO_MASK, mismatch);
+
+out:
+ if (mismatch) {
+ intel_iommu_cap_sanity = old_cap;
+ intel_iommu_ecap_sanity = old_ecap;
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int cap_audit_static(struct intel_iommu *iommu, enum cap_audit_type type)
+{
+ struct dmar_drhd_unit *d;
+ struct intel_iommu *i;
+
+ rcu_read_lock();
+ if (list_empty(&dmar_drhd_units))
+ goto out;
+
+ for_each_active_iommu(i, d) {
+ if (!iommu) {
+ intel_iommu_ecap_sanity = i->ecap;
+ intel_iommu_cap_sanity = i->cap;
+ iommu = i;
+ continue;
+ }
+
+ if (type == CAP_AUDIT_STATIC_DMAR)
+ check_dmar_capabilities(iommu, i);
+ else
+ check_irq_capabilities(iommu, i);
+ }
+
+out:
+ rcu_read_unlock();
+ return 0;
+}
+
+int intel_cap_audit(enum cap_audit_type type, struct intel_iommu *iommu)
+{
+ switch (type) {
+ case CAP_AUDIT_STATIC_DMAR:
+ case CAP_AUDIT_STATIC_IRQR:
+ return cap_audit_static(iommu, type);
+ case CAP_AUDIT_HOTPLUG_DMAR:
+ case CAP_AUDIT_HOTPLUG_IRQR:
+ return cap_audit_hotplug(iommu, type);
+ default:
+ break;
+ }
+
+ return -EFAULT;
+}
+
+bool intel_cap_smts_sanity(void)
+{
+ return ecap_smts(intel_iommu_ecap_sanity);
+}
+
+bool intel_cap_pasid_sanity(void)
+{
+ return ecap_pasid(intel_iommu_ecap_sanity);
+}
+
+bool intel_cap_nest_sanity(void)
+{
+ return ecap_nest(intel_iommu_ecap_sanity);
+}
+
+bool intel_cap_flts_sanity(void)
+{
+ return ecap_flts(intel_iommu_ecap_sanity);
+}
diff --git a/drivers/iommu/intel/cap_audit.h b/drivers/iommu/intel/cap_audit.h
new file mode 100644
index 000000000000..74cfccae0e81
--- /dev/null
+++ b/drivers/iommu/intel/cap_audit.h
@@ -0,0 +1,130 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * cap_audit.h - audit iommu capabilities header
+ *
+ * Copyright (C) 2021 Intel Corporation
+ *
+ * Author: Kyung Min Park <kyung.min.park@intel.com>
+ */
+
+/*
+ * Capability Register Mask
+ */
+#define CAP_FL5LP_MASK BIT_ULL(60)
+#define CAP_PI_MASK BIT_ULL(59)
+#define CAP_FL1GP_MASK BIT_ULL(56)
+#define CAP_RD_MASK BIT_ULL(55)
+#define CAP_WD_MASK BIT_ULL(54)
+#define CAP_MAMV_MASK GENMASK_ULL(53, 48)
+#define CAP_NFR_MASK GENMASK_ULL(47, 40)
+#define CAP_PSI_MASK BIT_ULL(39)
+#define CAP_SLLPS_MASK GENMASK_ULL(37, 34)
+#define CAP_FRO_MASK GENMASK_ULL(33, 24)
+#define CAP_ZLR_MASK BIT_ULL(22)
+#define CAP_MGAW_MASK GENMASK_ULL(21, 16)
+#define CAP_SAGAW_MASK GENMASK_ULL(12, 8)
+#define CAP_CM_MASK BIT_ULL(7)
+#define CAP_PHMR_MASK BIT_ULL(6)
+#define CAP_PLMR_MASK BIT_ULL(5)
+#define CAP_RWBF_MASK BIT_ULL(4)
+#define CAP_AFL_MASK BIT_ULL(3)
+#define CAP_NDOMS_MASK GENMASK_ULL(2, 0)
+
+/*
+ * Extended Capability Register Mask
+ */
+#define ECAP_RPS_MASK BIT_ULL(49)
+#define ECAP_SMPWC_MASK BIT_ULL(48)
+#define ECAP_FLTS_MASK BIT_ULL(47)
+#define ECAP_SLTS_MASK BIT_ULL(46)
+#define ECAP_SLADS_MASK BIT_ULL(45)
+#define ECAP_VCS_MASK BIT_ULL(44)
+#define ECAP_SMTS_MASK BIT_ULL(43)
+#define ECAP_PDS_MASK BIT_ULL(42)
+#define ECAP_DIT_MASK BIT_ULL(41)
+#define ECAP_PASID_MASK BIT_ULL(40)
+#define ECAP_PSS_MASK GENMASK_ULL(39, 35)
+#define ECAP_EAFS_MASK BIT_ULL(34)
+#define ECAP_NWFS_MASK BIT_ULL(33)
+#define ECAP_SRS_MASK BIT_ULL(31)
+#define ECAP_ERS_MASK BIT_ULL(30)
+#define ECAP_PRS_MASK BIT_ULL(29)
+#define ECAP_NEST_MASK BIT_ULL(26)
+#define ECAP_MTS_MASK BIT_ULL(25)
+#define ECAP_MHMV_MASK GENMASK_ULL(23, 20)
+#define ECAP_IRO_MASK GENMASK_ULL(17, 8)
+#define ECAP_SC_MASK BIT_ULL(7)
+#define ECAP_PT_MASK BIT_ULL(6)
+#define ECAP_EIM_MASK BIT_ULL(4)
+#define ECAP_DT_MASK BIT_ULL(2)
+#define ECAP_QI_MASK BIT_ULL(1)
+#define ECAP_C_MASK BIT_ULL(0)
+
+/*
+ * u64 intel_iommu_cap_sanity, intel_iommu_ecap_sanity will be adjusted as each
+ * IOMMU gets audited.
+ */
+#define DO_CHECK_FEATURE_MISMATCH(a, b, cap, feature, MASK) \
+do { \
+ if (cap##_##feature(a) != cap##_##feature(b)) { \
+ intel_iommu_##cap##_sanity &= ~(MASK); \
+ pr_info("IOMMU feature %s inconsistent", #feature); \
+ } \
+} while (0)
+
+#define CHECK_FEATURE_MISMATCH(a, b, cap, feature, MASK) \
+ DO_CHECK_FEATURE_MISMATCH((a)->cap, (b)->cap, cap, feature, MASK)
+
+#define CHECK_FEATURE_MISMATCH_HOTPLUG(b, cap, feature, MASK) \
+do { \
+ if (cap##_##feature(intel_iommu_##cap##_sanity)) \
+ DO_CHECK_FEATURE_MISMATCH(intel_iommu_##cap##_sanity, \
+ (b)->cap, cap, feature, MASK); \
+} while (0)
+
+#define MINIMAL_FEATURE_IOMMU(iommu, cap, MASK) \
+do { \
+ u64 min_feature = intel_iommu_##cap##_sanity & (MASK); \
+ min_feature = min_t(u64, min_feature, (iommu)->cap & (MASK)); \
+ intel_iommu_##cap##_sanity = (intel_iommu_##cap##_sanity & ~(MASK)) | \
+ min_feature; \
+} while (0)
+
+#define MINIMAL_FEATURE_HOTPLUG(iommu, cap, feature, MASK, mismatch) \
+do { \
+ if ((intel_iommu_##cap##_sanity & (MASK)) > \
+ (cap##_##feature((iommu)->cap))) \
+ mismatch = true; \
+ else \
+ (iommu)->cap = ((iommu)->cap & ~(MASK)) | \
+ (intel_iommu_##cap##_sanity & (MASK)); \
+} while (0)
+
+enum cap_audit_type {
+ CAP_AUDIT_STATIC_DMAR,
+ CAP_AUDIT_STATIC_IRQR,
+ CAP_AUDIT_HOTPLUG_DMAR,
+ CAP_AUDIT_HOTPLUG_IRQR,
+};
+
+bool intel_cap_smts_sanity(void);
+bool intel_cap_pasid_sanity(void);
+bool intel_cap_nest_sanity(void);
+bool intel_cap_flts_sanity(void);
+
+static inline bool scalable_mode_support(void)
+{
+ return (intel_iommu_sm && intel_cap_smts_sanity());
+}
+
+static inline bool pasid_mode_support(void)
+{
+ return scalable_mode_support() && intel_cap_pasid_sanity();
+}
+
+static inline bool nested_mode_support(void)
+{
+ return scalable_mode_support() && intel_cap_nest_sanity();
+}
+
+int intel_cap_audit(enum cap_audit_type type, struct intel_iommu *iommu);
diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
index 004feaed3c72..d5c51b5c20af 100644
--- a/drivers/iommu/intel/dmar.c
+++ b/drivers/iommu/intel/dmar.c
@@ -31,6 +31,7 @@
#include <linux/limits.h>
#include <asm/irq_remapping.h>
#include <asm/iommu_table.h>
+#include <trace/events/intel_iommu.h>
#include "../irq_remapping.h"
@@ -525,6 +526,7 @@ dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
struct acpi_dmar_reserved_memory *rmrr;
struct acpi_dmar_atsr *atsr;
struct acpi_dmar_rhsa *rhsa;
+ struct acpi_dmar_satc *satc;
switch (header->type) {
case ACPI_DMAR_TYPE_HARDWARE_UNIT:
@@ -554,6 +556,10 @@ dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
/* We don't print this here because we need to sanity-check
it first. So print it in dmar_parse_one_andd() instead. */
break;
+ case ACPI_DMAR_TYPE_SATC:
+ satc = container_of(header, struct acpi_dmar_satc, header);
+ pr_info("SATC flags: 0x%x\n", satc->flags);
+ break;
}
}
@@ -641,6 +647,7 @@ parse_dmar_table(void)
.cb[ACPI_DMAR_TYPE_ROOT_ATS] = &dmar_parse_one_atsr,
.cb[ACPI_DMAR_TYPE_HARDWARE_AFFINITY] = &dmar_parse_one_rhsa,
.cb[ACPI_DMAR_TYPE_NAMESPACE] = &dmar_parse_one_andd,
+ .cb[ACPI_DMAR_TYPE_SATC] = &dmar_parse_one_satc,
};
/*
@@ -1307,6 +1314,8 @@ restart:
offset = ((index + i) % QI_LENGTH) << shift;
memcpy(qi->desc + offset, &desc[i], 1 << shift);
qi->desc_status[(index + i) % QI_LENGTH] = QI_IN_USE;
+ trace_qi_submit(iommu, desc[i].qw0, desc[i].qw1,
+ desc[i].qw2, desc[i].qw3);
}
qi->desc_status[wait_index] = QI_IN_USE;
@@ -1496,7 +1505,7 @@ void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
* Max Invs Pending (MIP) is set to 0 for now until we have DIT in
* ECAP.
*/
- if (addr & GENMASK_ULL(size_order + VTD_PAGE_SHIFT, 0))
+ if (!IS_ALIGNED(addr, VTD_PAGE_SIZE << size_order))
pr_warn_ratelimited("Invalidate non-aligned address %llx, order %d\n",
addr, size_order);
@@ -2074,6 +2083,7 @@ static guid_t dmar_hp_guid =
#define DMAR_DSM_FUNC_DRHD 1
#define DMAR_DSM_FUNC_ATSR 2
#define DMAR_DSM_FUNC_RHSA 3
+#define DMAR_DSM_FUNC_SATC 4
static inline bool dmar_detect_dsm(acpi_handle handle, int func)
{
@@ -2091,6 +2101,7 @@ static int dmar_walk_dsm_resource(acpi_handle handle, int func,
[DMAR_DSM_FUNC_DRHD] = ACPI_DMAR_TYPE_HARDWARE_UNIT,
[DMAR_DSM_FUNC_ATSR] = ACPI_DMAR_TYPE_ROOT_ATS,
[DMAR_DSM_FUNC_RHSA] = ACPI_DMAR_TYPE_HARDWARE_AFFINITY,
+ [DMAR_DSM_FUNC_SATC] = ACPI_DMAR_TYPE_SATC,
};
if (!dmar_detect_dsm(handle, func))
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index f665322a0991..ee0932307d64 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -44,10 +44,10 @@
#include <asm/irq_remapping.h>
#include <asm/cacheflush.h>
#include <asm/iommu.h>
-#include <trace/events/intel_iommu.h>
#include "../irq_remapping.h"
#include "pasid.h"
+#include "cap_audit.h"
#define ROOT_SIZE VTD_PAGE_SIZE
#define CONTEXT_SIZE VTD_PAGE_SIZE
@@ -316,8 +316,18 @@ struct dmar_atsr_unit {
u8 include_all:1; /* include all ports */
};
+struct dmar_satc_unit {
+ struct list_head list; /* list of SATC units */
+ struct acpi_dmar_header *hdr; /* ACPI header */
+ struct dmar_dev_scope *devices; /* target devices */
+ struct intel_iommu *iommu; /* the corresponding iommu */
+ int devices_cnt; /* target device count */
+ u8 atc_required:1; /* ATS is required */
+};
+
static LIST_HEAD(dmar_atsr_units);
static LIST_HEAD(dmar_rmrr_units);
+static LIST_HEAD(dmar_satc_units);
#define for_each_rmrr_units(rmrr) \
list_for_each_entry(rmrr, &dmar_rmrr_units, list)
@@ -1017,8 +1027,11 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
- if (domain_use_first_level(domain))
+ if (domain_use_first_level(domain)) {
pteval |= DMA_FL_PTE_XD | DMA_FL_PTE_US;
+ if (domain->domain.type == IOMMU_DOMAIN_DMA)
+ pteval |= DMA_FL_PTE_ACCESS;
+ }
if (cmpxchg64(&pte->val, 0ULL, pteval))
/* Someone else set it while we were thinking; use theirs. */
free_pgtable_page(tmp_page);
@@ -1861,25 +1874,7 @@ static void free_dmar_iommu(struct intel_iommu *iommu)
*/
static bool first_level_by_default(void)
{
- struct dmar_drhd_unit *drhd;
- struct intel_iommu *iommu;
- static int first_level_support = -1;
-
- if (likely(first_level_support != -1))
- return first_level_support;
-
- first_level_support = 1;
-
- rcu_read_lock();
- for_each_active_iommu(iommu, drhd) {
- if (!sm_supported(iommu) || !ecap_flts(iommu->ecap)) {
- first_level_support = 0;
- break;
- }
- }
- rcu_read_unlock();
-
- return first_level_support;
+ return scalable_mode_support() && intel_cap_flts_sanity();
}
static struct dmar_domain *alloc_domain(int flags)
@@ -2298,9 +2293,9 @@ static int
__domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
unsigned long phys_pfn, unsigned long nr_pages, int prot)
{
- struct dma_pte *first_pte = NULL, *pte = NULL;
unsigned int largepage_lvl = 0;
unsigned long lvl_pages = 0;
+ struct dma_pte *pte = NULL;
phys_addr_t pteval;
u64 attr;
@@ -2310,9 +2305,16 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
return -EINVAL;
attr = prot & (DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP);
- if (domain_use_first_level(domain))
+ if (domain_use_first_level(domain)) {
attr |= DMA_FL_PTE_PRESENT | DMA_FL_PTE_XD | DMA_FL_PTE_US;
+ if (domain->domain.type == IOMMU_DOMAIN_DMA) {
+ attr |= DMA_FL_PTE_ACCESS;
+ if (prot & DMA_PTE_WRITE)
+ attr |= DMA_FL_PTE_DIRTY;
+ }
+ }
+
pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | attr;
while (nr_pages > 0) {
@@ -2322,7 +2324,7 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
largepage_lvl = hardware_largepage_caps(domain, iov_pfn,
phys_pfn, nr_pages);
- first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
+ pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
if (!pte)
return -ENOMEM;
/* It is large page*/
@@ -2383,34 +2385,14 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
* recalculate 'pte' and switch back to smaller pages for the
* end of the mapping, if the trailing size is not enough to
* use another superpage (i.e. nr_pages < lvl_pages).
+ *
+ * We leave clflush for the leaf pte changes to iotlb_sync_map()
+ * callback.
*/
pte++;
if (!nr_pages || first_pte_in_page(pte) ||
- (largepage_lvl > 1 && nr_pages < lvl_pages)) {
- domain_flush_cache(domain, first_pte,
- (void *)pte - (void *)first_pte);
+ (largepage_lvl > 1 && nr_pages < lvl_pages))
pte = NULL;
- }
- }
-
- return 0;
-}
-
-static int
-domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
- unsigned long phys_pfn, unsigned long nr_pages, int prot)
-{
- int iommu_id, ret;
- struct intel_iommu *iommu;
-
- /* Do the real mapping first */
- ret = __domain_mapping(domain, iov_pfn, phys_pfn, nr_pages, prot);
- if (ret)
- return ret;
-
- for_each_domain_iommu(iommu_id, domain) {
- iommu = g_iommus[iommu_id];
- __mapping_notify_one(iommu, domain, iov_pfn, nr_pages);
}
return 0;
@@ -3197,6 +3179,10 @@ static int __init init_dmars(void)
goto error;
}
+ ret = intel_cap_audit(CAP_AUDIT_STATIC_DMAR, NULL);
+ if (ret)
+ goto free_iommu;
+
for_each_iommu(iommu, drhd) {
if (drhd->ignored) {
iommu_disable_translation(iommu);
@@ -3740,6 +3726,57 @@ int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
return 0;
}
+static struct dmar_satc_unit *dmar_find_satc(struct acpi_dmar_satc *satc)
+{
+ struct dmar_satc_unit *satcu;
+ struct acpi_dmar_satc *tmp;
+
+ list_for_each_entry_rcu(satcu, &dmar_satc_units, list,
+ dmar_rcu_check()) {
+ tmp = (struct acpi_dmar_satc *)satcu->hdr;
+ if (satc->segment != tmp->segment)
+ continue;
+ if (satc->header.length != tmp->header.length)
+ continue;
+ if (memcmp(satc, tmp, satc->header.length) == 0)
+ return satcu;
+ }
+
+ return NULL;
+}
+
+int dmar_parse_one_satc(struct acpi_dmar_header *hdr, void *arg)
+{
+ struct acpi_dmar_satc *satc;
+ struct dmar_satc_unit *satcu;
+
+ if (system_state >= SYSTEM_RUNNING && !intel_iommu_enabled)
+ return 0;
+
+ satc = container_of(hdr, struct acpi_dmar_satc, header);
+ satcu = dmar_find_satc(satc);
+ if (satcu)
+ return 0;
+
+ satcu = kzalloc(sizeof(*satcu) + hdr->length, GFP_KERNEL);
+ if (!satcu)
+ return -ENOMEM;
+
+ satcu->hdr = (void *)(satcu + 1);
+ memcpy(satcu->hdr, hdr, hdr->length);
+ satcu->atc_required = satc->flags & 0x1;
+ satcu->devices = dmar_alloc_dev_scope((void *)(satc + 1),
+ (void *)satc + satc->header.length,
+ &satcu->devices_cnt);
+ if (satcu->devices_cnt && !satcu->devices) {
+ kfree(satcu);
+ return -ENOMEM;
+ }
+ list_add_rcu(&satcu->list, &dmar_satc_units);
+
+ return 0;
+}
+
static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
{
int sp, ret;
@@ -3748,6 +3785,10 @@ static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
if (g_iommus[iommu->seq_id])
return 0;
+ ret = intel_cap_audit(CAP_AUDIT_HOTPLUG_DMAR, iommu);
+ if (ret)
+ goto out;
+
if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
pr_warn("%s: Doesn't support hardware pass through.\n",
iommu->name);
@@ -3843,6 +3884,7 @@ static void intel_iommu_free_dmars(void)
{
struct dmar_rmrr_unit *rmrru, *rmrr_n;
struct dmar_atsr_unit *atsru, *atsr_n;
+ struct dmar_satc_unit *satcu, *satc_n;
list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
list_del(&rmrru->list);
@@ -3854,6 +3896,11 @@ static void intel_iommu_free_dmars(void)
list_del(&atsru->list);
intel_iommu_free_atsr(atsru);
}
+ list_for_each_entry_safe(satcu, satc_n, &dmar_satc_units, list) {
+ list_del(&satcu->list);
+ dmar_free_dev_scope(&satcu->devices, &satcu->devices_cnt);
+ kfree(satcu);
+ }
}
int dmar_find_matched_atsr_unit(struct pci_dev *dev)
@@ -3905,8 +3952,10 @@ int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
int ret;
struct dmar_rmrr_unit *rmrru;
struct dmar_atsr_unit *atsru;
+ struct dmar_satc_unit *satcu;
struct acpi_dmar_atsr *atsr;
struct acpi_dmar_reserved_memory *rmrr;
+ struct acpi_dmar_satc *satc;
if (!intel_iommu_enabled && system_state >= SYSTEM_RUNNING)
return 0;
@@ -3947,6 +3996,23 @@ int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
break;
}
}
+ list_for_each_entry(satcu, &dmar_satc_units, list) {
+ satc = container_of(satcu->hdr, struct acpi_dmar_satc, header);
+ if (info->event == BUS_NOTIFY_ADD_DEVICE) {
+ ret = dmar_insert_dev_scope(info, (void *)(satc + 1),
+ (void *)satc + satc->header.length,
+ satc->segment, satcu->devices,
+ satcu->devices_cnt);
+ if (ret > 0)
+ break;
+ else if (ret < 0)
+ return ret;
+ } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
+ if (dmar_remove_dev_scope(info, satc->segment,
+ satcu->devices, satcu->devices_cnt))
+ break;
+ }
+ }
return 0;
}
@@ -4290,6 +4356,9 @@ int __init intel_iommu_init(void)
if (list_empty(&dmar_atsr_units))
pr_info("No ATSR found\n");
+ if (list_empty(&dmar_satc_units))
+ pr_info("No SATC found\n");
+
if (dmar_map_gfx)
intel_iommu_gfx_mapped = 1;
@@ -4943,7 +5012,6 @@ static int intel_iommu_map(struct iommu_domain *domain,
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
u64 max_addr;
int prot = 0;
- int ret;
if (iommu_prot & IOMMU_READ)
prot |= DMA_PTE_READ;
@@ -4969,9 +5037,8 @@ static int intel_iommu_map(struct iommu_domain *domain,
/* Round up size to next multiple of PAGE_SIZE, if it and
the low bits of hpa would take us onto the next page */
size = aligned_nrpages(hpa, size);
- ret = domain_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
- hpa >> VTD_PAGE_SHIFT, size, prot);
- return ret;
+ return __domain_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
+ hpa >> VTD_PAGE_SHIFT, size, prot);
}
static size_t intel_iommu_unmap(struct iommu_domain *domain,
@@ -5040,60 +5107,6 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
return phys;
}
-static inline bool scalable_mode_support(void)
-{
- struct dmar_drhd_unit *drhd;
- struct intel_iommu *iommu;
- bool ret = true;
-
- rcu_read_lock();
- for_each_active_iommu(iommu, drhd) {
- if (!sm_supported(iommu)) {
- ret = false;
- break;
- }
- }
- rcu_read_unlock();
-
- return ret;
-}
-
-static inline bool iommu_pasid_support(void)
-{
- struct dmar_drhd_unit *drhd;
- struct intel_iommu *iommu;
- bool ret = true;
-
- rcu_read_lock();
- for_each_active_iommu(iommu, drhd) {
- if (!pasid_supported(iommu)) {
- ret = false;
- break;
- }
- }
- rcu_read_unlock();
-
- return ret;
-}
-
-static inline bool nested_mode_support(void)
-{
- struct dmar_drhd_unit *drhd;
- struct intel_iommu *iommu;
- bool ret = true;
-
- rcu_read_lock();
- for_each_active_iommu(iommu, drhd) {
- if (!sm_supported(iommu) || !ecap_nest(iommu->ecap)) {
- ret = false;
- break;
- }
- }
- rcu_read_unlock();
-
- return ret;
-}
-
static bool intel_iommu_capable(enum iommu_cap cap)
{
if (cap == IOMMU_CAP_CACHE_COHERENCY)
@@ -5334,7 +5347,7 @@ intel_iommu_dev_has_feat(struct device *dev, enum iommu_dev_features feat)
int ret;
if (!dev_is_pci(dev) || dmar_disabled ||
- !scalable_mode_support() || !iommu_pasid_support())
+ !scalable_mode_support() || !pasid_mode_support())
return false;
ret = pci_pasid_features(to_pci_dev(dev));
@@ -5440,6 +5453,36 @@ intel_iommu_domain_set_attr(struct iommu_domain *domain,
return ret;
}
+static bool domain_use_flush_queue(void)
+{
+ struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu;
+ bool r = true;
+
+ if (intel_iommu_strict)
+ return false;
+
+ /*
+ * The flush queue implementation does not perform page-selective
+ * invalidations that are required for efficient TLB flushes in virtual
+ * environments. The benefit of batching is likely to be much lower than
+ * the overhead of synchronizing the virtual and physical IOMMU
+ * page-tables.
+ */
+ rcu_read_lock();
+ for_each_active_iommu(iommu, drhd) {
+ if (!cap_caching_mode(iommu->cap))
+ continue;
+
+ pr_warn_once("IOMMU batching is disabled due to virtualization");
+ r = false;
+ break;
+ }
+ rcu_read_unlock();
+
+ return r;
+}
+
static int
intel_iommu_domain_get_attr(struct iommu_domain *domain,
enum iommu_attr attr, void *data)
@@ -5450,7 +5493,7 @@ intel_iommu_domain_get_attr(struct iommu_domain *domain,
case IOMMU_DOMAIN_DMA:
switch (attr) {
case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
- *(int *)data = !intel_iommu_strict;
+ *(int *)data = domain_use_flush_queue();
return 0;
default:
return -ENODEV;
@@ -5478,6 +5521,57 @@ static bool risky_device(struct pci_dev *pdev)
return false;
}
+static void clflush_sync_map(struct dmar_domain *domain, unsigned long clf_pfn,
+ unsigned long clf_pages)
+{
+ struct dma_pte *first_pte = NULL, *pte = NULL;
+ unsigned long lvl_pages = 0;
+ int level = 0;
+
+ while (clf_pages > 0) {
+ if (!pte) {
+ level = 0;
+ pte = pfn_to_dma_pte(domain, clf_pfn, &level);
+ if (WARN_ON(!pte))
+ return;
+ first_pte = pte;
+ lvl_pages = lvl_to_nr_pages(level);
+ }
+
+ if (WARN_ON(!lvl_pages || clf_pages < lvl_pages))
+ return;
+
+ clf_pages -= lvl_pages;
+ clf_pfn += lvl_pages;
+ pte++;
+
+ if (!clf_pages || first_pte_in_page(pte) ||
+ (level > 1 && clf_pages < lvl_pages)) {
+ domain_flush_cache(domain, first_pte,
+ (void *)pte - (void *)first_pte);
+ pte = NULL;
+ }
+ }
+}
+
+static void intel_iommu_iotlb_sync_map(struct iommu_domain *domain,
+ unsigned long iova, size_t size)
+{
+ struct dmar_domain *dmar_domain = to_dmar_domain(domain);
+ unsigned long pages = aligned_nrpages(iova, size);
+ unsigned long pfn = iova >> VTD_PAGE_SHIFT;
+ struct intel_iommu *iommu;
+ int iommu_id;
+
+ if (!dmar_domain->iommu_coherency)
+ clflush_sync_map(dmar_domain, pfn, pages);
+
+ for_each_domain_iommu(iommu_id, dmar_domain) {
+ iommu = g_iommus[iommu_id];
+ __mapping_notify_one(iommu, dmar_domain, pfn, pages);
+ }
+}
+
const struct iommu_ops intel_iommu_ops = {
.capable = intel_iommu_capable,
.domain_alloc = intel_iommu_domain_alloc,
@@ -5490,6 +5584,7 @@ const struct iommu_ops intel_iommu_ops = {
.aux_detach_dev = intel_iommu_aux_detach_device,
.aux_get_pasid = intel_iommu_aux_get_pasid,
.map = intel_iommu_map,
+ .iotlb_sync_map = intel_iommu_iotlb_sync_map,
.unmap = intel_iommu_unmap,
.flush_iotlb_all = intel_flush_iotlb_all,
.iotlb_sync = intel_iommu_tlb_sync,
diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c
index 685200a5cff0..611ef5243cb6 100644
--- a/drivers/iommu/intel/irq_remapping.c
+++ b/drivers/iommu/intel/irq_remapping.c
@@ -22,6 +22,7 @@
#include <asm/pci-direct.h>
#include "../irq_remapping.h"
+#include "cap_audit.h"
enum irq_mode {
IRQ_REMAPPING,
@@ -734,6 +735,9 @@ static int __init intel_prepare_irq_remapping(void)
if (dmar_table_init() < 0)
return -ENODEV;
+ if (intel_cap_audit(CAP_AUDIT_STATIC_IRQR, NULL))
+ goto error;
+
if (!dmar_ir_support())
return -ENODEV;
@@ -1439,6 +1443,10 @@ static int dmar_ir_add(struct dmar_drhd_unit *dmaru, struct intel_iommu *iommu)
int ret;
int eim = x2apic_enabled();
+ ret = intel_cap_audit(CAP_AUDIT_HOTPLUG_IRQR, iommu);
+ if (ret)
+ return ret;
+
if (eim && !ecap_eim_support(iommu->ecap)) {
pr_info("DRHD %Lx: EIM not supported by DRHD, ecap %Lx\n",
iommu->reg_phys, iommu->ecap);
diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
index b92af83b79bd..f26cb6195b2c 100644
--- a/drivers/iommu/intel/pasid.c
+++ b/drivers/iommu/intel/pasid.c
@@ -457,20 +457,6 @@ pasid_cache_invalidation_with_pasid(struct intel_iommu *iommu,
}
static void
-iotlb_invalidation_with_pasid(struct intel_iommu *iommu, u16 did, u32 pasid)
-{
- struct qi_desc desc;
-
- desc.qw0 = QI_EIOTLB_PASID(pasid) | QI_EIOTLB_DID(did) |
- QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) | QI_EIOTLB_TYPE;
- desc.qw1 = 0;
- desc.qw2 = 0;
- desc.qw3 = 0;
-
- qi_submit_sync(iommu, &desc, 1, 0);
-}
-
-static void
devtlb_invalidation_with_pasid(struct intel_iommu *iommu,
struct device *dev, u32 pasid)
{
@@ -514,7 +500,7 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev,
clflush_cache_range(pte, sizeof(*pte));
pasid_cache_invalidation_with_pasid(iommu, did, pasid);
- iotlb_invalidation_with_pasid(iommu, did, pasid);
+ qi_flush_piotlb(iommu, did, pasid, 0, -1, 0);
/* Device IOTLB doesn't need to be flushed in caching mode. */
if (!cap_caching_mode(iommu->cap))
@@ -530,7 +516,7 @@ static void pasid_flush_caches(struct intel_iommu *iommu,
if (cap_caching_mode(iommu->cap)) {
pasid_cache_invalidation_with_pasid(iommu, did, pasid);
- iotlb_invalidation_with_pasid(iommu, did, pasid);
+ qi_flush_piotlb(iommu, did, pasid, 0, -1, 0);
} else {
iommu_flush_write_buffer(iommu);
}
diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h
index 97dfcffbf495..444c0bec221a 100644
--- a/drivers/iommu/intel/pasid.h
+++ b/drivers/iommu/intel/pasid.h
@@ -30,8 +30,8 @@
#define VCMD_VRSP_IP 0x1
#define VCMD_VRSP_SC(e) (((e) >> 1) & 0x3)
#define VCMD_VRSP_SC_SUCCESS 0
-#define VCMD_VRSP_SC_NO_PASID_AVAIL 1
-#define VCMD_VRSP_SC_INVALID_PASID 1
+#define VCMD_VRSP_SC_NO_PASID_AVAIL 2
+#define VCMD_VRSP_SC_INVALID_PASID 2
#define VCMD_VRSP_RESULT_PASID(e) (((e) >> 8) & 0xfffff)
#define VCMD_CMD_OPERAND(e) ((e) << 8)
/*
diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
index 18a9f05df407..574a7e657a9a 100644
--- a/drivers/iommu/intel/svm.c
+++ b/drivers/iommu/intel/svm.c
@@ -123,53 +123,16 @@ static void __flush_svm_range_dev(struct intel_svm *svm,
unsigned long address,
unsigned long pages, int ih)
{
- struct qi_desc desc;
+ struct device_domain_info *info = get_domain_info(sdev->dev);
- if (pages == -1) {
- desc.qw0 = QI_EIOTLB_PASID(svm->pasid) |
- QI_EIOTLB_DID(sdev->did) |
- QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
- QI_EIOTLB_TYPE;
- desc.qw1 = 0;
- } else {
- int mask = ilog2(__roundup_pow_of_two(pages));
-
- desc.qw0 = QI_EIOTLB_PASID(svm->pasid) |
- QI_EIOTLB_DID(sdev->did) |
- QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) |
- QI_EIOTLB_TYPE;
- desc.qw1 = QI_EIOTLB_ADDR(address) |
- QI_EIOTLB_IH(ih) |
- QI_EIOTLB_AM(mask);
- }
- desc.qw2 = 0;
- desc.qw3 = 0;
- qi_submit_sync(sdev->iommu, &desc, 1, 0);
-
- if (sdev->dev_iotlb) {
- desc.qw0 = QI_DEV_EIOTLB_PASID(svm->pasid) |
- QI_DEV_EIOTLB_SID(sdev->sid) |
- QI_DEV_EIOTLB_QDEP(sdev->qdep) |
- QI_DEIOTLB_TYPE;
- if (pages == -1) {
- desc.qw1 = QI_DEV_EIOTLB_ADDR(-1ULL >> 1) |
- QI_DEV_EIOTLB_SIZE;
- } else if (pages > 1) {
- /* The least significant zero bit indicates the size. So,
- * for example, an "address" value of 0x12345f000 will
- * flush from 0x123440000 to 0x12347ffff (256KiB). */
- unsigned long last = address + ((unsigned long)(pages - 1) << VTD_PAGE_SHIFT);
- unsigned long mask = __rounddown_pow_of_two(address ^ last);
-
- desc.qw1 = QI_DEV_EIOTLB_ADDR((address & ~mask) |
- (mask - 1)) | QI_DEV_EIOTLB_SIZE;
- } else {
- desc.qw1 = QI_DEV_EIOTLB_ADDR(address);
- }
- desc.qw2 = 0;
- desc.qw3 = 0;
- qi_submit_sync(sdev->iommu, &desc, 1, 0);
- }
+ if (WARN_ON(!pages))
+ return;
+
+ qi_flush_piotlb(sdev->iommu, sdev->did, svm->pasid, address, pages, ih);
+ if (info->ats_enabled)
+ qi_flush_dev_iotlb_pasid(sdev->iommu, sdev->sid, info->pfsid,
+ svm->pasid, sdev->qdep, address,
+ order_base_2(pages));
}
static void intel_flush_svm_range_dev(struct intel_svm *svm,
@@ -948,10 +911,8 @@ static irqreturn_t prq_event_thread(int irq, void *d)
u64 address;
handled = 1;
-
req = &iommu->prq[head / sizeof(*req)];
-
- result = QI_RESP_FAILURE;
+ result = QI_RESP_INVALID;
address = (u64)req->addr << VTD_PAGE_SHIFT;
if (!req->pasid_present) {
pr_err("%s: Page request without PASID: %08llx %08llx\n",
@@ -989,7 +950,6 @@ static irqreturn_t prq_event_thread(int irq, void *d)
rcu_read_unlock();
}
- result = QI_RESP_INVALID;
/* Since we're using init_mm.pgd directly, we should never take
* any faults on kernel addresses. */
if (!svm->mm)
@@ -1079,8 +1039,17 @@ prq_advance:
* Clear the page request overflow bit and wake up all threads that
* are waiting for the completion of this handling.
*/
- if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO)
- writel(DMA_PRS_PRO, iommu->reg + DMAR_PRS_REG);
+ if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO) {
+ pr_info_ratelimited("IOMMU: %s: PRQ overflow detected\n",
+ iommu->name);
+ head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
+ tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
+ if (head == tail) {
+ writel(DMA_PRS_PRO, iommu->reg + DMAR_PRS_REG);
+ pr_info_ratelimited("IOMMU: %s: PRQ overflow cleared",
+ iommu->name);
+ }
+ }
if (!completion_done(&iommu->prq_complete))
complete(&iommu->prq_complete);
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index 1d92ac948db7..d4004bcf333a 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -44,26 +44,25 @@
/*
* We have 32 bits total; 12 bits resolved at level 1, 8 bits at level 2,
- * and 12 bits in a page. With some carefully-chosen coefficients we can
- * hide the ugly inconsistencies behind these macros and at least let the
- * rest of the code pretend to be somewhat sane.
+ * and 12 bits in a page.
+ * MediaTek extend 2 bits to reach 34bits, 14 bits at lvl1 and 8 bits at lvl2.
*/
#define ARM_V7S_ADDR_BITS 32
-#define _ARM_V7S_LVL_BITS(lvl) (16 - (lvl) * 4)
-#define ARM_V7S_LVL_SHIFT(lvl) (ARM_V7S_ADDR_BITS - (4 + 8 * (lvl)))
+#define _ARM_V7S_LVL_BITS(lvl, cfg) ((lvl) == 1 ? ((cfg)->ias - 20) : 8)
+#define ARM_V7S_LVL_SHIFT(lvl) ((lvl) == 1 ? 20 : 12)
#define ARM_V7S_TABLE_SHIFT 10
-#define ARM_V7S_PTES_PER_LVL(lvl) (1 << _ARM_V7S_LVL_BITS(lvl))
-#define ARM_V7S_TABLE_SIZE(lvl) \
- (ARM_V7S_PTES_PER_LVL(lvl) * sizeof(arm_v7s_iopte))
+#define ARM_V7S_PTES_PER_LVL(lvl, cfg) (1 << _ARM_V7S_LVL_BITS(lvl, cfg))
+#define ARM_V7S_TABLE_SIZE(lvl, cfg) \
+ (ARM_V7S_PTES_PER_LVL(lvl, cfg) * sizeof(arm_v7s_iopte))
#define ARM_V7S_BLOCK_SIZE(lvl) (1UL << ARM_V7S_LVL_SHIFT(lvl))
#define ARM_V7S_LVL_MASK(lvl) ((u32)(~0U << ARM_V7S_LVL_SHIFT(lvl)))
#define ARM_V7S_TABLE_MASK ((u32)(~0U << ARM_V7S_TABLE_SHIFT))
-#define _ARM_V7S_IDX_MASK(lvl) (ARM_V7S_PTES_PER_LVL(lvl) - 1)
-#define ARM_V7S_LVL_IDX(addr, lvl) ({ \
+#define _ARM_V7S_IDX_MASK(lvl, cfg) (ARM_V7S_PTES_PER_LVL(lvl, cfg) - 1)
+#define ARM_V7S_LVL_IDX(addr, lvl, cfg) ({ \
int _l = lvl; \
- ((u32)(addr) >> ARM_V7S_LVL_SHIFT(_l)) & _ARM_V7S_IDX_MASK(_l); \
+ ((addr) >> ARM_V7S_LVL_SHIFT(_l)) & _ARM_V7S_IDX_MASK(_l, cfg); \
})
/*
@@ -112,9 +111,10 @@
#define ARM_V7S_TEX_MASK 0x7
#define ARM_V7S_ATTR_TEX(val) (((val) & ARM_V7S_TEX_MASK) << ARM_V7S_TEX_SHIFT)
-/* MediaTek extend the two bits for PA 32bit/33bit */
+/* MediaTek extend the bits below for PA 32bit/33bit/34bit */
#define ARM_V7S_ATTR_MTK_PA_BIT32 BIT(9)
#define ARM_V7S_ATTR_MTK_PA_BIT33 BIT(4)
+#define ARM_V7S_ATTR_MTK_PA_BIT34 BIT(5)
/* *well, except for TEX on level 2 large pages, of course :( */
#define ARM_V7S_CONT_PAGE_TEX_SHIFT 6
@@ -194,6 +194,8 @@ static arm_v7s_iopte paddr_to_iopte(phys_addr_t paddr, int lvl,
pte |= ARM_V7S_ATTR_MTK_PA_BIT32;
if (paddr & BIT_ULL(33))
pte |= ARM_V7S_ATTR_MTK_PA_BIT33;
+ if (paddr & BIT_ULL(34))
+ pte |= ARM_V7S_ATTR_MTK_PA_BIT34;
return pte;
}
@@ -218,6 +220,8 @@ static phys_addr_t iopte_to_paddr(arm_v7s_iopte pte, int lvl,
paddr |= BIT_ULL(32);
if (pte & ARM_V7S_ATTR_MTK_PA_BIT33)
paddr |= BIT_ULL(33);
+ if (pte & ARM_V7S_ATTR_MTK_PA_BIT34)
+ paddr |= BIT_ULL(34);
return paddr;
}
@@ -234,7 +238,7 @@ static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp,
struct device *dev = cfg->iommu_dev;
phys_addr_t phys;
dma_addr_t dma;
- size_t size = ARM_V7S_TABLE_SIZE(lvl);
+ size_t size = ARM_V7S_TABLE_SIZE(lvl, cfg);
void *table = NULL;
if (lvl == 1)
@@ -280,7 +284,7 @@ static void __arm_v7s_free_table(void *table, int lvl,
{
struct io_pgtable_cfg *cfg = &data->iop.cfg;
struct device *dev = cfg->iommu_dev;
- size_t size = ARM_V7S_TABLE_SIZE(lvl);
+ size_t size = ARM_V7S_TABLE_SIZE(lvl, cfg);
if (!cfg->coherent_walk)
dma_unmap_single(dev, __arm_v7s_dma_addr(table), size,
@@ -424,7 +428,7 @@ static int arm_v7s_init_pte(struct arm_v7s_io_pgtable *data,
arm_v7s_iopte *tblp;
size_t sz = ARM_V7S_BLOCK_SIZE(lvl);
- tblp = ptep - ARM_V7S_LVL_IDX(iova, lvl);
+ tblp = ptep - ARM_V7S_LVL_IDX(iova, lvl, cfg);
if (WARN_ON(__arm_v7s_unmap(data, NULL, iova + i * sz,
sz, lvl, tblp) != sz))
return -EINVAL;
@@ -477,7 +481,7 @@ static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova,
int num_entries = size >> ARM_V7S_LVL_SHIFT(lvl);
/* Find our entry at the current level */
- ptep += ARM_V7S_LVL_IDX(iova, lvl);
+ ptep += ARM_V7S_LVL_IDX(iova, lvl, cfg);
/* If we can install a leaf entry at this level, then do so */
if (num_entries)
@@ -519,7 +523,6 @@ static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova,
phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops);
- struct io_pgtable *iop = &data->iop;
int ret;
if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias) ||
@@ -535,12 +538,7 @@ static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova,
* Synchronise all PTE updates for the new mapping before there's
* a chance for anything to kick off a table walk for the new iova.
*/
- if (iop->cfg.quirks & IO_PGTABLE_QUIRK_TLBI_ON_MAP) {
- io_pgtable_tlb_flush_walk(iop, iova, size,
- ARM_V7S_BLOCK_SIZE(2));
- } else {
- wmb();
- }
+ wmb();
return ret;
}
@@ -550,7 +548,7 @@ static void arm_v7s_free_pgtable(struct io_pgtable *iop)
struct arm_v7s_io_pgtable *data = io_pgtable_to_data(iop);
int i;
- for (i = 0; i < ARM_V7S_PTES_PER_LVL(1); i++) {
+ for (i = 0; i < ARM_V7S_PTES_PER_LVL(1, &data->iop.cfg); i++) {
arm_v7s_iopte pte = data->pgd[i];
if (ARM_V7S_PTE_IS_TABLE(pte, 1))
@@ -602,9 +600,9 @@ static size_t arm_v7s_split_blk_unmap(struct arm_v7s_io_pgtable *data,
if (!tablep)
return 0; /* Bytes unmapped */
- num_ptes = ARM_V7S_PTES_PER_LVL(2);
+ num_ptes = ARM_V7S_PTES_PER_LVL(2, cfg);
num_entries = size >> ARM_V7S_LVL_SHIFT(2);
- unmap_idx = ARM_V7S_LVL_IDX(iova, 2);
+ unmap_idx = ARM_V7S_LVL_IDX(iova, 2, cfg);
pte = arm_v7s_prot_to_pte(arm_v7s_pte_to_prot(blk_pte, 1), 2, cfg);
if (num_entries > 1)
@@ -646,7 +644,7 @@ static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *data,
if (WARN_ON(lvl > 2))
return 0;
- idx = ARM_V7S_LVL_IDX(iova, lvl);
+ idx = ARM_V7S_LVL_IDX(iova, lvl, &iop->cfg);
ptep += idx;
do {
pte[i] = READ_ONCE(ptep[i]);
@@ -717,7 +715,7 @@ static size_t arm_v7s_unmap(struct io_pgtable_ops *ops, unsigned long iova,
{
struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops);
- if (WARN_ON(upper_32_bits(iova)))
+ if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias)))
return 0;
return __arm_v7s_unmap(data, gather, iova, size, 1, data->pgd);
@@ -732,7 +730,7 @@ static phys_addr_t arm_v7s_iova_to_phys(struct io_pgtable_ops *ops,
u32 mask;
do {
- ptep += ARM_V7S_LVL_IDX(iova, ++lvl);
+ ptep += ARM_V7S_LVL_IDX(iova, ++lvl, &data->iop.cfg);
pte = READ_ONCE(*ptep);
ptep = iopte_deref(pte, lvl, data);
} while (ARM_V7S_PTE_IS_TABLE(pte, lvl));
@@ -751,15 +749,14 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
{
struct arm_v7s_io_pgtable *data;
- if (cfg->ias > ARM_V7S_ADDR_BITS)
+ if (cfg->ias > (arm_v7s_is_mtk_enabled(cfg) ? 34 : ARM_V7S_ADDR_BITS))
return NULL;
- if (cfg->oas > (arm_v7s_is_mtk_enabled(cfg) ? 34 : ARM_V7S_ADDR_BITS))
+ if (cfg->oas > (arm_v7s_is_mtk_enabled(cfg) ? 35 : ARM_V7S_ADDR_BITS))
return NULL;
if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
IO_PGTABLE_QUIRK_NO_PERMS |
- IO_PGTABLE_QUIRK_TLBI_ON_MAP |
IO_PGTABLE_QUIRK_ARM_MTK_EXT |
IO_PGTABLE_QUIRK_NON_STRICT))
return NULL;
@@ -775,8 +772,8 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
spin_lock_init(&data->split_lock);
data->l2_tables = kmem_cache_create("io-pgtable_armv7s_l2",
- ARM_V7S_TABLE_SIZE(2),
- ARM_V7S_TABLE_SIZE(2),
+ ARM_V7S_TABLE_SIZE(2, cfg),
+ ARM_V7S_TABLE_SIZE(2, cfg),
ARM_V7S_TABLE_SLAB_FLAGS, NULL);
if (!data->l2_tables)
goto out_free_data;
diff --git a/drivers/iommu/io-pgtable.c b/drivers/iommu/io-pgtable.c
index 94394c81468f..6e9917ce980f 100644
--- a/drivers/iommu/io-pgtable.c
+++ b/drivers/iommu/io-pgtable.c
@@ -24,6 +24,9 @@ io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] = {
#ifdef CONFIG_IOMMU_IO_PGTABLE_ARMV7S
[ARM_V7S] = &io_pgtable_arm_v7s_init_fns,
#endif
+#ifdef CONFIG_AMD_IOMMU
+ [AMD_IOMMU_V1] = &io_pgtable_amd_iommu_v1_init_fns,
+#endif
};
struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c